diff --git a/index.html b/index.html index ccc4eef..9df2bb2 100644 --- a/index.html +++ b/index.html @@ -24,6 +24,7 @@ +

woman-in-the-middle-attack

...a space for Angeliki Diakrousi to unpack

  • Let' s Talk About Unspeakable Things @@ -89,6 +90,16 @@ transcribing + + + + + diff --git a/project/audio/description-alex1-A.srt b/project/audio/description-alex1-A.srt new file mode 100644 index 0000000..0f0434a --- /dev/null +++ b/project/audio/description-alex1-A.srt @@ -0,0 +1,70 @@ +1 +00:00:11,740 --> 00:00:18,060 +Em... I hear... It sounds like walking and it's quite windy outside + +2 +00:00:18,620 --> 00:00:25,460 +But it sounds like... It's... The microphone is inside or something + +3 +00:00:26,980 --> 00:00:29,340 +It's probably music + +4 +00:00:30,320 --> 00:00:35,700 +From a car maybe or from some radio? + +5 +00:00:37,600 --> 00:00:42,400 +It's outside on the street. I hear some cars in the background + +6 +00:00:48,700 --> 00:00:53,600 +Mmmm... It sounds like gardening sounds + +7 +00:00:54,820 --> 00:00:59,040 +There are people in the background or maybe it's bikes + +8 +00:01:05,840 --> 00:01:08,500 +Oh I hear a bird in the background + +9 +00:01:12,120 --> 00:01:16,440 +A very monotone sound + +10 +00:01:16,740 --> 00:01:21,180 +It's like driving, maybe driving in a car + +11 +00:01:35,120 --> 00:01:36,580 +And some wind around +It seems like outside but there is some noise in the background + +12 +00:01:36,580 --> 00:01:36,600 +It seems like outside but there is some noise in the background + +13 +00:01:46,340 --> 00:01:48,460 +It's outside + +14 +00:02:04,920 --> 00:02:04,940 +Oh, I hear children in the background crying + +15 +00:02:04,940 --> 00:02:08,360 +There is some weird sound in the foreground, which I... It's hard to describe [chuckling] +Oh, I hear children in the background crying + +16 +00:02:08,360 --> 00:02:08,440 +There is some weird sound in the foreground, which I... It's hard to describe [chuckling] + +17 +00:02:14,900 --> 00:02:19,580 +Again it sounds like there is a kindergarten or school nearby + diff --git a/project/overlapping-interface.html b/project/overlapping-interface.html index 8286b30..3408a6d 100644 --- a/project/overlapping-interface.html +++ b/project/overlapping-interface.html @@ -15,6 +15,27 @@ a { text-decoration:none; } + #transcriptWrapper { + overflow: hidden; +} + #transcriptWrapper3 { + overflow: hidden; +} +#transcript3 > div { + transition: all .8s ease; + list-style-type: disc; +} +.speaking3 { + font-weight:bold +} + +#transcript > div { + transition: all .8s ease; + list-style-type: disc; +} +.speaking { + font-weight:bold +} @@ -26,19 +47,17 @@

    -
    - - + + +
    +
    @@ -54,8 +73,9 @@ Experiment: I listen and record carefully sounds from the area that are coming f
    -
    +

    Lidia

    +
    00:00:16,540 Someone with a bag
    @@ -65,11 +85,13 @@ Experiment: I listen and record carefully sounds from the area that are coming f
    00:02:16,680 Children?
    -
    +
    -
    + +

    Eugenie

    +
    00:00:01,800 Children outside
    00:00:10,760 I think it's outside
    00:00:13,700 Shoes, walking people
    @@ -82,7 +104,7 @@ Experiment: I listen and record carefully sounds from the area that are coming f
    00:01:30,900 People in a room. Inside. They are moving, close
    00:01:54,620 It's inside somewhere, somewhere...
    00:02:15,400 Children in a swimming pool
    -
    +
    @@ -194,7 +216,7 @@ a1.addEventListener("play", function(){ }) a1.addEventListener("pause", function(){ - but.innerHTML="play" + but.innerHTML="listen" }) var links=document.querySelectorAll("div.sub a") @@ -216,8 +238,47 @@ for (var i=0, l=links.length; i + + + + + + + diff --git a/project/venv/bin/activate b/project/venv/bin/activate new file mode 100644 index 0000000..4651d62 --- /dev/null +++ b/project/venv/bin/activate @@ -0,0 +1,78 @@ +# This file must be used with "source bin/activate" *from bash* +# you cannot run it directly + +deactivate () { + unset -f pydoc >/dev/null 2>&1 + + # reset old environment variables + # ! [ -z ${VAR+_} ] returns true if VAR is declared at all + if ! [ -z "${_OLD_VIRTUAL_PATH+_}" ] ; then + PATH="$_OLD_VIRTUAL_PATH" + export PATH + unset _OLD_VIRTUAL_PATH + fi + if ! [ -z "${_OLD_VIRTUAL_PYTHONHOME+_}" ] ; then + PYTHONHOME="$_OLD_VIRTUAL_PYTHONHOME" + export PYTHONHOME + unset _OLD_VIRTUAL_PYTHONHOME + fi + + # This should detect bash and zsh, which have a hash command that must + # be called to get it to forget past commands. Without forgetting + # past commands the $PATH changes we made may not be respected + if [ -n "${BASH-}" ] || [ -n "${ZSH_VERSION-}" ] ; then + hash -r 2>/dev/null + fi + + if ! [ -z "${_OLD_VIRTUAL_PS1+_}" ] ; then + PS1="$_OLD_VIRTUAL_PS1" + export PS1 + unset _OLD_VIRTUAL_PS1 + fi + + unset VIRTUAL_ENV + if [ ! "${1-}" = "nondestructive" ] ; then + # Self destruct! + unset -f deactivate + fi +} + +# unset irrelevant variables +deactivate nondestructive + +VIRTUAL_ENV="/home/angeliki/Documents/0918_PZI/0918_Grad/xpub_graduation/project/venv" +export VIRTUAL_ENV + +_OLD_VIRTUAL_PATH="$PATH" +PATH="$VIRTUAL_ENV/bin:$PATH" +export PATH + +# unset PYTHONHOME if set +if ! [ -z "${PYTHONHOME+_}" ] ; then + _OLD_VIRTUAL_PYTHONHOME="$PYTHONHOME" + unset PYTHONHOME +fi + +if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT-}" ] ; then + _OLD_VIRTUAL_PS1="${PS1-}" + if [ "x" != x ] ; then + PS1="${PS1-}" + else + PS1="(`basename \"$VIRTUAL_ENV\"`) ${PS1-}" + fi + export PS1 +fi + +# Make sure to unalias pydoc if it's already there +alias pydoc 2>/dev/null >/dev/null && unalias pydoc || true + +pydoc () { + python -m pydoc "$@" +} + +# This should detect bash and zsh, which have a hash command that must +# be called to get it to forget past commands. Without forgetting +# past commands the $PATH changes we made may not be respected +if [ -n "${BASH-}" ] || [ -n "${ZSH_VERSION-}" ] ; then + hash -r 2>/dev/null +fi diff --git a/project/venv/bin/activate.csh b/project/venv/bin/activate.csh new file mode 100644 index 0000000..f88e00e --- /dev/null +++ b/project/venv/bin/activate.csh @@ -0,0 +1,42 @@ +# This file must be used with "source bin/activate.csh" *from csh*. +# You cannot run it directly. +# Created by Davide Di Blasi . + +set newline='\ +' + +alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH:q" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT:q" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; test "\!:*" != "nondestructive" && unalias deactivate && unalias pydoc' + +# Unset irrelevant variables. +deactivate nondestructive + +setenv VIRTUAL_ENV "/home/angeliki/Documents/0918_PZI/0918_Grad/xpub_graduation/project/venv" + +set _OLD_VIRTUAL_PATH="$PATH:q" +setenv PATH "$VIRTUAL_ENV:q/bin:$PATH:q" + + + +if ("" != "") then + set env_name = "" +else + set env_name = "$VIRTUAL_ENV:t:q" +endif + +# Could be in a non-interactive environment, +# in which case, $prompt is undefined and we wouldn't +# care about the prompt anyway. +if ( $?prompt ) then + set _OLD_VIRTUAL_PROMPT="$prompt:q" +if ( "$prompt:q" =~ *"$newline:q"* ) then + : +else + set prompt = "[$env_name:q] $prompt:q" +endif +endif + +unset env_name + +alias pydoc python -m pydoc + +rehash diff --git a/project/venv/bin/activate.fish b/project/venv/bin/activate.fish new file mode 100644 index 0000000..35e7e19 --- /dev/null +++ b/project/venv/bin/activate.fish @@ -0,0 +1,101 @@ +# This file must be used using `source bin/activate.fish` *within a running fish ( http://fishshell.com ) session*. +# Do not run it directly. + +function _bashify_path -d "Converts a fish path to something bash can recognize" + set fishy_path $argv + set bashy_path $fishy_path[1] + for path_part in $fishy_path[2..-1] + set bashy_path "$bashy_path:$path_part" + end + echo $bashy_path +end + +function _fishify_path -d "Converts a bash path to something fish can recognize" + echo $argv | tr ':' '\n' +end + +function deactivate -d 'Exit virtualenv mode and return to the normal environment.' + # reset old environment variables + if test -n "$_OLD_VIRTUAL_PATH" + # https://github.com/fish-shell/fish-shell/issues/436 altered PATH handling + if test (echo $FISH_VERSION | tr "." "\n")[1] -lt 3 + set -gx PATH (_fishify_path $_OLD_VIRTUAL_PATH) + else + set -gx PATH $_OLD_VIRTUAL_PATH + end + set -e _OLD_VIRTUAL_PATH + end + + if test -n "$_OLD_VIRTUAL_PYTHONHOME" + set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME + set -e _OLD_VIRTUAL_PYTHONHOME + end + + if test -n "$_OLD_FISH_PROMPT_OVERRIDE" + # Set an empty local `$fish_function_path` to allow the removal of `fish_prompt` using `functions -e`. + set -l fish_function_path + + # Erase virtualenv's `fish_prompt` and restore the original. + functions -e fish_prompt + functions -c _old_fish_prompt fish_prompt + functions -e _old_fish_prompt + set -e _OLD_FISH_PROMPT_OVERRIDE + end + + set -e VIRTUAL_ENV + + if test "$argv[1]" != 'nondestructive' + # Self-destruct! + functions -e pydoc + functions -e deactivate + functions -e _bashify_path + functions -e _fishify_path + end +end + +# Unset irrelevant variables. +deactivate nondestructive + +set -gx VIRTUAL_ENV "/home/angeliki/Documents/0918_PZI/0918_Grad/xpub_graduation/project/venv" + +# https://github.com/fish-shell/fish-shell/issues/436 altered PATH handling +if test (echo $FISH_VERSION | tr "." "\n")[1] -lt 3 + set -gx _OLD_VIRTUAL_PATH (_bashify_path $PATH) +else + set -gx _OLD_VIRTUAL_PATH $PATH +end +set -gx PATH "$VIRTUAL_ENV/bin" $PATH + +# Unset `$PYTHONHOME` if set. +if set -q PYTHONHOME + set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME + set -e PYTHONHOME +end + +function pydoc + python -m pydoc $argv +end + +if test -z "$VIRTUAL_ENV_DISABLE_PROMPT" + # Copy the current `fish_prompt` function as `_old_fish_prompt`. + functions -c fish_prompt _old_fish_prompt + + function fish_prompt + # Save the current $status, for fish_prompts that display it. + set -l old_status $status + + # Prompt override provided? + # If not, just prepend the environment name. + if test -n "" + printf '%s%s' "" (set_color normal) + else + printf '%s(%s) ' (set_color normal) (basename "$VIRTUAL_ENV") + end + + # Restore the original $status + echo "exit $old_status" | source + _old_fish_prompt + end + + set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV" +end diff --git a/project/venv/bin/activate.ps1 b/project/venv/bin/activate.ps1 new file mode 100644 index 0000000..6d8ae2a --- /dev/null +++ b/project/venv/bin/activate.ps1 @@ -0,0 +1,60 @@ +# This file must be dot sourced from PoSh; you cannot run it directly. Do this: . ./activate.ps1 + +$script:THIS_PATH = $myinvocation.mycommand.path +$script:BASE_DIR = split-path (resolve-path "$THIS_PATH/..") -Parent + +function global:deactivate([switch] $NonDestructive) +{ + if (test-path variable:_OLD_VIRTUAL_PATH) + { + $env:PATH = $variable:_OLD_VIRTUAL_PATH + remove-variable "_OLD_VIRTUAL_PATH" -scope global + } + + if (test-path function:_old_virtual_prompt) + { + $function:prompt = $function:_old_virtual_prompt + remove-item function:\_old_virtual_prompt + } + + if ($env:VIRTUAL_ENV) + { + $old_env = split-path $env:VIRTUAL_ENV -leaf + remove-item env:VIRTUAL_ENV -erroraction silentlycontinue + } + + if (!$NonDestructive) + { + # Self destruct! + remove-item function:deactivate + remove-item function:pydoc + } +} + +function global:pydoc +{ + python -m pydoc $args +} + +# unset irrelevant variables +deactivate -nondestructive + +$VIRTUAL_ENV = $BASE_DIR +$env:VIRTUAL_ENV = $VIRTUAL_ENV + +$global:_OLD_VIRTUAL_PATH = $env:PATH +$env:PATH = "$env:VIRTUAL_ENV/bin:" + $env:PATH +if (!$env:VIRTUAL_ENV_DISABLE_PROMPT) +{ + function global:_old_virtual_prompt + { + "" + } + $function:_old_virtual_prompt = $function:prompt + function global:prompt + { + # Add a prefix to the current prompt, but don't discard it. + write-host "($( split-path $env:VIRTUAL_ENV -leaf )) " -nonewline + & $function:_old_virtual_prompt + } +} diff --git a/project/venv/bin/activate_this.py b/project/venv/bin/activate_this.py new file mode 100644 index 0000000..59b5d72 --- /dev/null +++ b/project/venv/bin/activate_this.py @@ -0,0 +1,46 @@ +"""Activate virtualenv for current interpreter: + +Use exec(open(this_file).read(), {'__file__': this_file}). + +This can be used when you must use an existing Python interpreter, not the virtualenv bin/python. +""" +import os +import site +import sys + +try: + __file__ +except NameError: + raise AssertionError("You must use exec(open(this_file).read(), {'__file__': this_file}))") + +# prepend bin to PATH (this file is inside the bin directory) +bin_dir = os.path.dirname(os.path.abspath(__file__)) +os.environ["PATH"] = os.pathsep.join([bin_dir] + os.environ.get("PATH", "").split(os.pathsep)) + +base = os.path.dirname(bin_dir) + +# virtual env is right above bin directory +os.environ["VIRTUAL_ENV"] = base + +# add the virtual environments site-package to the host python import mechanism +IS_PYPY = hasattr(sys, "pypy_version_info") +IS_JYTHON = sys.platform.startswith("java") +if IS_JYTHON: + site_packages = os.path.join(base, "Lib", "site-packages") +elif IS_PYPY: + site_packages = os.path.join(base, "site-packages") +else: + IS_WIN = sys.platform == "win32" + if IS_WIN: + site_packages = os.path.join(base, "Lib", "site-packages") + else: + site_packages = os.path.join(base, "lib", "python{}".format(sys.version[:3]), "site-packages") + +prev = set(sys.path) +site.addsitedir(site_packages) +sys.real_prefix = sys.prefix +sys.prefix = base + +# Move the added items to the front of the path, in place +new = list(sys.path) +sys.path[:] = [i for i in new if i not in prev] + [i for i in new if i in prev] diff --git a/project/venv/bin/easy_install b/project/venv/bin/easy_install new file mode 100755 index 0000000..e70a64d --- /dev/null +++ b/project/venv/bin/easy_install @@ -0,0 +1,10 @@ +#!/home/angeliki/Documents/0918_PZI/0918_Grad/xpub_graduation/project/venv/bin/python +# -*- coding: utf-8 -*- +import re +import sys + +from setuptools.command.easy_install import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/project/venv/bin/easy_install-2.7 b/project/venv/bin/easy_install-2.7 new file mode 100755 index 0000000..e70a64d --- /dev/null +++ b/project/venv/bin/easy_install-2.7 @@ -0,0 +1,10 @@ +#!/home/angeliki/Documents/0918_PZI/0918_Grad/xpub_graduation/project/venv/bin/python +# -*- coding: utf-8 -*- +import re +import sys + +from setuptools.command.easy_install import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/project/venv/bin/f2py b/project/venv/bin/f2py new file mode 100755 index 0000000..a33f775 --- /dev/null +++ b/project/venv/bin/f2py @@ -0,0 +1,10 @@ +#!/home/angeliki/Documents/0918_PZI/0918_Grad/xpub_graduation/project/venv/bin/python +# -*- coding: utf-8 -*- +import re +import sys + +from numpy.f2py.f2py2e import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/project/venv/bin/f2py2 b/project/venv/bin/f2py2 new file mode 100755 index 0000000..a33f775 --- /dev/null +++ b/project/venv/bin/f2py2 @@ -0,0 +1,10 @@ +#!/home/angeliki/Documents/0918_PZI/0918_Grad/xpub_graduation/project/venv/bin/python +# -*- coding: utf-8 -*- +import re +import sys + +from numpy.f2py.f2py2e import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/project/venv/bin/f2py2.7 b/project/venv/bin/f2py2.7 new file mode 100755 index 0000000..a33f775 --- /dev/null +++ b/project/venv/bin/f2py2.7 @@ -0,0 +1,10 @@ +#!/home/angeliki/Documents/0918_PZI/0918_Grad/xpub_graduation/project/venv/bin/python +# -*- coding: utf-8 -*- +import re +import sys + +from numpy.f2py.f2py2e import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/project/venv/bin/pip b/project/venv/bin/pip new file mode 100755 index 0000000..881f992 --- /dev/null +++ b/project/venv/bin/pip @@ -0,0 +1,10 @@ +#!/home/angeliki/Documents/0918_PZI/0918_Grad/xpub_graduation/project/venv/bin/python +# -*- coding: utf-8 -*- +import re +import sys + +from pip._internal import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/project/venv/bin/pip2 b/project/venv/bin/pip2 new file mode 100755 index 0000000..881f992 --- /dev/null +++ b/project/venv/bin/pip2 @@ -0,0 +1,10 @@ +#!/home/angeliki/Documents/0918_PZI/0918_Grad/xpub_graduation/project/venv/bin/python +# -*- coding: utf-8 -*- +import re +import sys + +from pip._internal import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/project/venv/bin/pip2.7 b/project/venv/bin/pip2.7 new file mode 100755 index 0000000..881f992 --- /dev/null +++ b/project/venv/bin/pip2.7 @@ -0,0 +1,10 @@ +#!/home/angeliki/Documents/0918_PZI/0918_Grad/xpub_graduation/project/venv/bin/python +# -*- coding: utf-8 -*- +import re +import sys + +from pip._internal import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/project/venv/bin/python b/project/venv/bin/python new file mode 100755 index 0000000..39388a1 Binary files /dev/null and b/project/venv/bin/python differ diff --git a/project/venv/bin/python-config b/project/venv/bin/python-config new file mode 100755 index 0000000..ee561ac --- /dev/null +++ b/project/venv/bin/python-config @@ -0,0 +1,78 @@ +#!/home/angeliki/Documents/0918_PZI/0918_Grad/xpub_graduation/project/venv/bin/python + +import sys +import getopt +import sysconfig + +valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags', + 'ldflags', 'help'] + +if sys.version_info >= (3, 2): + valid_opts.insert(-1, 'extension-suffix') + valid_opts.append('abiflags') +if sys.version_info >= (3, 3): + valid_opts.append('configdir') + + +def exit_with_usage(code=1): + sys.stderr.write("Usage: {0} [{1}]\n".format( + sys.argv[0], '|'.join('--'+opt for opt in valid_opts))) + sys.exit(code) + +try: + opts, args = getopt.getopt(sys.argv[1:], '', valid_opts) +except getopt.error: + exit_with_usage() + +if not opts: + exit_with_usage() + +pyver = sysconfig.get_config_var('VERSION') +getvar = sysconfig.get_config_var + +opt_flags = [flag for (flag, val) in opts] + +if '--help' in opt_flags: + exit_with_usage(code=0) + +for opt in opt_flags: + if opt == '--prefix': + print(sysconfig.get_config_var('prefix')) + + elif opt == '--exec-prefix': + print(sysconfig.get_config_var('exec_prefix')) + + elif opt in ('--includes', '--cflags'): + flags = ['-I' + sysconfig.get_path('include'), + '-I' + sysconfig.get_path('platinclude')] + if opt == '--cflags': + flags.extend(getvar('CFLAGS').split()) + print(' '.join(flags)) + + elif opt in ('--libs', '--ldflags'): + abiflags = getattr(sys, 'abiflags', '') + libs = ['-lpython' + pyver + abiflags] + libs += getvar('LIBS').split() + libs += getvar('SYSLIBS').split() + # add the prefix/lib/pythonX.Y/config dir, but only if there is no + # shared library in prefix/lib/. + if opt == '--ldflags': + if not getvar('Py_ENABLE_SHARED'): + libs.insert(0, '-L' + getvar('LIBPL')) + if not getvar('PYTHONFRAMEWORK'): + libs.extend(getvar('LINKFORSHARED').split()) + print(' '.join(libs)) + + elif opt == '--extension-suffix': + ext_suffix = sysconfig.get_config_var('EXT_SUFFIX') + if ext_suffix is None: + ext_suffix = sysconfig.get_config_var('SO') + print(ext_suffix) + + elif opt == '--abiflags': + if not getattr(sys, 'abiflags', None): + exit_with_usage() + print(sys.abiflags) + + elif opt == '--configdir': + print(sysconfig.get_config_var('LIBPL')) diff --git a/project/venv/bin/python2 b/project/venv/bin/python2 new file mode 120000 index 0000000..d8654aa --- /dev/null +++ b/project/venv/bin/python2 @@ -0,0 +1 @@ +python \ No newline at end of file diff --git a/project/venv/bin/python2.7 b/project/venv/bin/python2.7 new file mode 120000 index 0000000..d8654aa --- /dev/null +++ b/project/venv/bin/python2.7 @@ -0,0 +1 @@ +python \ No newline at end of file diff --git a/project/venv/bin/wheel b/project/venv/bin/wheel new file mode 100755 index 0000000..951b3ca --- /dev/null +++ b/project/venv/bin/wheel @@ -0,0 +1,10 @@ +#!/home/angeliki/Documents/0918_PZI/0918_Grad/xpub_graduation/project/venv/bin/python +# -*- coding: utf-8 -*- +import re +import sys + +from wheel.cli import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/project/venv/include/python2.7 b/project/venv/include/python2.7 new file mode 120000 index 0000000..ad4ca80 --- /dev/null +++ b/project/venv/include/python2.7 @@ -0,0 +1 @@ +/usr/include/python2.7 \ No newline at end of file diff --git a/project/venv/lib/python2.7/UserDict.py b/project/venv/lib/python2.7/UserDict.py new file mode 120000 index 0000000..1dcde33 --- /dev/null +++ b/project/venv/lib/python2.7/UserDict.py @@ -0,0 +1 @@ +/usr/lib/python2.7/UserDict.py \ No newline at end of file diff --git a/project/venv/lib/python2.7/UserDict.pyc b/project/venv/lib/python2.7/UserDict.pyc new file mode 100644 index 0000000..f9d01c1 Binary files /dev/null and b/project/venv/lib/python2.7/UserDict.pyc differ diff --git a/project/venv/lib/python2.7/_abcoll.py b/project/venv/lib/python2.7/_abcoll.py new file mode 120000 index 0000000..e39c38d --- /dev/null +++ b/project/venv/lib/python2.7/_abcoll.py @@ -0,0 +1 @@ +/usr/lib/python2.7/_abcoll.py \ No newline at end of file diff --git a/project/venv/lib/python2.7/_abcoll.pyc b/project/venv/lib/python2.7/_abcoll.pyc new file mode 100644 index 0000000..a1cafa0 Binary files /dev/null and b/project/venv/lib/python2.7/_abcoll.pyc differ diff --git a/project/venv/lib/python2.7/_weakrefset.py b/project/venv/lib/python2.7/_weakrefset.py new file mode 120000 index 0000000..a3c1cd4 --- /dev/null +++ b/project/venv/lib/python2.7/_weakrefset.py @@ -0,0 +1 @@ +/usr/lib/python2.7/_weakrefset.py \ No newline at end of file diff --git a/project/venv/lib/python2.7/_weakrefset.pyc b/project/venv/lib/python2.7/_weakrefset.pyc new file mode 100644 index 0000000..0730d67 Binary files /dev/null and b/project/venv/lib/python2.7/_weakrefset.pyc differ diff --git a/project/venv/lib/python2.7/abc.py b/project/venv/lib/python2.7/abc.py new file mode 120000 index 0000000..cb3e5d1 --- /dev/null +++ b/project/venv/lib/python2.7/abc.py @@ -0,0 +1 @@ +/usr/lib/python2.7/abc.py \ No newline at end of file diff --git a/project/venv/lib/python2.7/abc.pyc b/project/venv/lib/python2.7/abc.pyc new file mode 100644 index 0000000..a974b9e Binary files /dev/null and b/project/venv/lib/python2.7/abc.pyc differ diff --git a/project/venv/lib/python2.7/codecs.py b/project/venv/lib/python2.7/codecs.py new file mode 120000 index 0000000..50169dc --- /dev/null +++ b/project/venv/lib/python2.7/codecs.py @@ -0,0 +1 @@ +/usr/lib/python2.7/codecs.py \ No newline at end of file diff --git a/project/venv/lib/python2.7/codecs.pyc b/project/venv/lib/python2.7/codecs.pyc new file mode 100644 index 0000000..900ca90 Binary files /dev/null and b/project/venv/lib/python2.7/codecs.pyc differ diff --git a/project/venv/lib/python2.7/copy_reg.py b/project/venv/lib/python2.7/copy_reg.py new file mode 120000 index 0000000..5dc0af3 --- /dev/null +++ b/project/venv/lib/python2.7/copy_reg.py @@ -0,0 +1 @@ +/usr/lib/python2.7/copy_reg.py \ No newline at end of file diff --git a/project/venv/lib/python2.7/copy_reg.pyc b/project/venv/lib/python2.7/copy_reg.pyc new file mode 100644 index 0000000..4869f65 Binary files /dev/null and b/project/venv/lib/python2.7/copy_reg.pyc differ diff --git a/project/venv/lib/python2.7/distutils/__init__.py b/project/venv/lib/python2.7/distutils/__init__.py new file mode 100644 index 0000000..b9b0f24 --- /dev/null +++ b/project/venv/lib/python2.7/distutils/__init__.py @@ -0,0 +1,134 @@ +import os +import sys +import warnings + +# opcode is not a virtualenv module, so we can use it to find the stdlib +# Important! To work on pypy, this must be a module that resides in the +# lib-python/modified-x.y.z directory +import opcode + +dirname = os.path.dirname + +distutils_path = os.path.join(os.path.dirname(opcode.__file__), "distutils") +if os.path.normpath(distutils_path) == os.path.dirname(os.path.normpath(__file__)): + warnings.warn("The virtualenv distutils package at %s appears to be in the same location as the system distutils?") +else: + __path__.insert(0, distutils_path) # noqa: F821 + if sys.version_info < (3, 4): + import imp + + real_distutils = imp.load_module("_virtualenv_distutils", None, distutils_path, ("", "", imp.PKG_DIRECTORY)) + else: + import importlib.machinery + + distutils_path = os.path.join(distutils_path, "__init__.py") + loader = importlib.machinery.SourceFileLoader("_virtualenv_distutils", distutils_path) + if sys.version_info < (3, 5): + import types + + real_distutils = types.ModuleType(loader.name) + else: + import importlib.util + + spec = importlib.util.spec_from_loader(loader.name, loader) + real_distutils = importlib.util.module_from_spec(spec) + loader.exec_module(real_distutils) + + # Copy the relevant attributes + try: + __revision__ = real_distutils.__revision__ + except AttributeError: + pass + __version__ = real_distutils.__version__ + +from distutils import dist, sysconfig # isort:skip + +try: + basestring +except NameError: + basestring = str + +# patch build_ext (distutils doesn't know how to get the libs directory +# path on windows - it hardcodes the paths around the patched sys.prefix) + +if sys.platform == "win32": + from distutils.command.build_ext import build_ext as old_build_ext + + class build_ext(old_build_ext): + def finalize_options(self): + if self.library_dirs is None: + self.library_dirs = [] + elif isinstance(self.library_dirs, basestring): + self.library_dirs = self.library_dirs.split(os.pathsep) + + self.library_dirs.insert(0, os.path.join(sys.real_prefix, "Libs")) + old_build_ext.finalize_options(self) + + from distutils.command import build_ext as build_ext_module + + build_ext_module.build_ext = build_ext + +# distutils.dist patches: + +old_find_config_files = dist.Distribution.find_config_files + + +def find_config_files(self): + found = old_find_config_files(self) + if os.name == "posix": + user_filename = ".pydistutils.cfg" + else: + user_filename = "pydistutils.cfg" + user_filename = os.path.join(sys.prefix, user_filename) + if os.path.isfile(user_filename): + for item in list(found): + if item.endswith("pydistutils.cfg"): + found.remove(item) + found.append(user_filename) + return found + + +dist.Distribution.find_config_files = find_config_files + +# distutils.sysconfig patches: + +old_get_python_inc = sysconfig.get_python_inc + + +def sysconfig_get_python_inc(plat_specific=0, prefix=None): + if prefix is None: + prefix = sys.real_prefix + return old_get_python_inc(plat_specific, prefix) + + +sysconfig_get_python_inc.__doc__ = old_get_python_inc.__doc__ +sysconfig.get_python_inc = sysconfig_get_python_inc + +old_get_python_lib = sysconfig.get_python_lib + + +def sysconfig_get_python_lib(plat_specific=0, standard_lib=0, prefix=None): + if standard_lib and prefix is None: + prefix = sys.real_prefix + return old_get_python_lib(plat_specific, standard_lib, prefix) + + +sysconfig_get_python_lib.__doc__ = old_get_python_lib.__doc__ +sysconfig.get_python_lib = sysconfig_get_python_lib + +old_get_config_vars = sysconfig.get_config_vars + + +def sysconfig_get_config_vars(*args): + real_vars = old_get_config_vars(*args) + if sys.platform == "win32": + lib_dir = os.path.join(sys.real_prefix, "libs") + if isinstance(real_vars, dict) and "LIBDIR" not in real_vars: + real_vars["LIBDIR"] = lib_dir # asked for all + elif isinstance(real_vars, list) and "LIBDIR" in args: + real_vars = real_vars + [lib_dir] # asked for list + return real_vars + + +sysconfig_get_config_vars.__doc__ = old_get_config_vars.__doc__ +sysconfig.get_config_vars = sysconfig_get_config_vars diff --git a/project/venv/lib/python2.7/distutils/__init__.pyc b/project/venv/lib/python2.7/distutils/__init__.pyc new file mode 100644 index 0000000..bfb8eaa Binary files /dev/null and b/project/venv/lib/python2.7/distutils/__init__.pyc differ diff --git a/project/venv/lib/python2.7/distutils/distutils.cfg b/project/venv/lib/python2.7/distutils/distutils.cfg new file mode 100644 index 0000000..1af230e --- /dev/null +++ b/project/venv/lib/python2.7/distutils/distutils.cfg @@ -0,0 +1,6 @@ +# This is a config file local to this virtualenv installation +# You may include options that will be used by all distutils commands, +# and by easy_install. For instance: +# +# [easy_install] +# find_links = http://mylocalsite diff --git a/project/venv/lib/python2.7/encodings b/project/venv/lib/python2.7/encodings new file mode 120000 index 0000000..1250ad8 --- /dev/null +++ b/project/venv/lib/python2.7/encodings @@ -0,0 +1 @@ +/usr/lib/python2.7/encodings \ No newline at end of file diff --git a/project/venv/lib/python2.7/fnmatch.py b/project/venv/lib/python2.7/fnmatch.py new file mode 120000 index 0000000..ec3e10c --- /dev/null +++ b/project/venv/lib/python2.7/fnmatch.py @@ -0,0 +1 @@ +/usr/lib/python2.7/fnmatch.py \ No newline at end of file diff --git a/project/venv/lib/python2.7/fnmatch.pyc b/project/venv/lib/python2.7/fnmatch.pyc new file mode 100644 index 0000000..37c070f Binary files /dev/null and b/project/venv/lib/python2.7/fnmatch.pyc differ diff --git a/project/venv/lib/python2.7/genericpath.py b/project/venv/lib/python2.7/genericpath.py new file mode 120000 index 0000000..cb8897c --- /dev/null +++ b/project/venv/lib/python2.7/genericpath.py @@ -0,0 +1 @@ +/usr/lib/python2.7/genericpath.py \ No newline at end of file diff --git a/project/venv/lib/python2.7/genericpath.pyc b/project/venv/lib/python2.7/genericpath.pyc new file mode 100644 index 0000000..929f820 Binary files /dev/null and b/project/venv/lib/python2.7/genericpath.pyc differ diff --git a/project/venv/lib/python2.7/lib-dynload b/project/venv/lib/python2.7/lib-dynload new file mode 120000 index 0000000..c706a1e --- /dev/null +++ b/project/venv/lib/python2.7/lib-dynload @@ -0,0 +1 @@ +/usr/lib/python2.7/lib-dynload \ No newline at end of file diff --git a/project/venv/lib/python2.7/linecache.py b/project/venv/lib/python2.7/linecache.py new file mode 120000 index 0000000..943c429 --- /dev/null +++ b/project/venv/lib/python2.7/linecache.py @@ -0,0 +1 @@ +/usr/lib/python2.7/linecache.py \ No newline at end of file diff --git a/project/venv/lib/python2.7/linecache.pyc b/project/venv/lib/python2.7/linecache.pyc new file mode 100644 index 0000000..aad96e2 Binary files /dev/null and b/project/venv/lib/python2.7/linecache.pyc differ diff --git a/project/venv/lib/python2.7/locale.py b/project/venv/lib/python2.7/locale.py new file mode 120000 index 0000000..92c243c --- /dev/null +++ b/project/venv/lib/python2.7/locale.py @@ -0,0 +1 @@ +/usr/lib/python2.7/locale.py \ No newline at end of file diff --git a/project/venv/lib/python2.7/locale.pyc b/project/venv/lib/python2.7/locale.pyc new file mode 100644 index 0000000..bdfa23c Binary files /dev/null and b/project/venv/lib/python2.7/locale.pyc differ diff --git a/project/venv/lib/python2.7/no-global-site-packages.txt b/project/venv/lib/python2.7/no-global-site-packages.txt new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/ntpath.py b/project/venv/lib/python2.7/ntpath.py new file mode 120000 index 0000000..5659ae1 --- /dev/null +++ b/project/venv/lib/python2.7/ntpath.py @@ -0,0 +1 @@ +/usr/lib/python2.7/ntpath.py \ No newline at end of file diff --git a/project/venv/lib/python2.7/ntpath.pyc b/project/venv/lib/python2.7/ntpath.pyc new file mode 100644 index 0000000..799ce32 Binary files /dev/null and b/project/venv/lib/python2.7/ntpath.pyc differ diff --git a/project/venv/lib/python2.7/orig-prefix.txt b/project/venv/lib/python2.7/orig-prefix.txt new file mode 100644 index 0000000..e25db58 --- /dev/null +++ b/project/venv/lib/python2.7/orig-prefix.txt @@ -0,0 +1 @@ +/usr \ No newline at end of file diff --git a/project/venv/lib/python2.7/os.py b/project/venv/lib/python2.7/os.py new file mode 120000 index 0000000..950fc8d --- /dev/null +++ b/project/venv/lib/python2.7/os.py @@ -0,0 +1 @@ +/usr/lib/python2.7/os.py \ No newline at end of file diff --git a/project/venv/lib/python2.7/os.pyc b/project/venv/lib/python2.7/os.pyc new file mode 100644 index 0000000..f84c6e5 Binary files /dev/null and b/project/venv/lib/python2.7/os.pyc differ diff --git a/project/venv/lib/python2.7/posixpath.py b/project/venv/lib/python2.7/posixpath.py new file mode 120000 index 0000000..30cb8ca --- /dev/null +++ b/project/venv/lib/python2.7/posixpath.py @@ -0,0 +1 @@ +/usr/lib/python2.7/posixpath.py \ No newline at end of file diff --git a/project/venv/lib/python2.7/posixpath.pyc b/project/venv/lib/python2.7/posixpath.pyc new file mode 100644 index 0000000..3bc4de5 Binary files /dev/null and b/project/venv/lib/python2.7/posixpath.pyc differ diff --git a/project/venv/lib/python2.7/re.py b/project/venv/lib/python2.7/re.py new file mode 120000 index 0000000..56a0731 --- /dev/null +++ b/project/venv/lib/python2.7/re.py @@ -0,0 +1 @@ +/usr/lib/python2.7/re.py \ No newline at end of file diff --git a/project/venv/lib/python2.7/re.pyc b/project/venv/lib/python2.7/re.pyc new file mode 100644 index 0000000..53d7398 Binary files /dev/null and b/project/venv/lib/python2.7/re.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/PyYAML-5.1.dist-info/INSTALLER b/project/venv/lib/python2.7/site-packages/PyYAML-5.1.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/PyYAML-5.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/project/venv/lib/python2.7/site-packages/PyYAML-5.1.dist-info/LICENSE b/project/venv/lib/python2.7/site-packages/PyYAML-5.1.dist-info/LICENSE new file mode 100644 index 0000000..e8f8805 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/PyYAML-5.1.dist-info/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2017-2019 Ingy döt Net +Copyright (c) 2006-2016 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/project/venv/lib/python2.7/site-packages/PyYAML-5.1.dist-info/METADATA b/project/venv/lib/python2.7/site-packages/PyYAML-5.1.dist-info/METADATA new file mode 100644 index 0000000..e4ea8f1 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/PyYAML-5.1.dist-info/METADATA @@ -0,0 +1,40 @@ +Metadata-Version: 2.1 +Name: PyYAML +Version: 5.1 +Summary: YAML parser and emitter for Python +Home-page: https://github.com/yaml/pyyaml +Author: Kirill Simonov +Author-email: xi@resolvent.net +License: MIT +Download-URL: https://pypi.org/project/PyYAML/ +Platform: Any +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Text Processing :: Markup +Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.* + +YAML is a data serialization format designed for human readability +and interaction with scripting languages. PyYAML is a YAML parser +and emitter for Python. + +PyYAML features a complete YAML 1.1 parser, Unicode support, pickle +support, capable extension API, and sensible error messages. PyYAML +supports standard YAML tags and provides Python-specific tags that +allow to represent an arbitrary Python object. + +PyYAML is applicable for a broad range of tasks from complex +configuration files to object serialization and persistence. + diff --git a/project/venv/lib/python2.7/site-packages/PyYAML-5.1.dist-info/RECORD b/project/venv/lib/python2.7/site-packages/PyYAML-5.1.dist-info/RECORD new file mode 100644 index 0000000..6d74479 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/PyYAML-5.1.dist-info/RECORD @@ -0,0 +1,40 @@ +PyYAML-5.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +PyYAML-5.1.dist-info/LICENSE,sha256=oq25yVm3l0lKDvgL32DiLbJ0nuPgwJCFVuPrVI-WfFY,1101 +PyYAML-5.1.dist-info/METADATA,sha256=33vAx8vSzjUMXGHO2jp0u8tvgkRqfAH3-OEDSpjfIx8,1704 +PyYAML-5.1.dist-info/RECORD,, +PyYAML-5.1.dist-info/WHEEL,sha256=HMBxudg1Bdignf3xswgnPWbkfRGI13bogQQwWC4xJ5w,105 +PyYAML-5.1.dist-info/top_level.txt,sha256=rpj0IVMTisAjh_1vG3Ccf9v5jpCQwAz6cD1IVU5ZdhQ,11 +yaml/__init__.py,sha256=Ke2gASae4i0HLUxbz8l2CUR3__qpspvBBI1gltOxQbE,12182 +yaml/__init__.pyc,, +yaml/composer.py,sha256=TtvWJCaffdVHhHx_muLguloDUwg1NUbo-GlJ0me6xuA,4923 +yaml/composer.pyc,, +yaml/constructor.py,sha256=HrBLwPOrijfcmurB4Ww71xrTtMKehs0-xh3NNlQzveY,26834 +yaml/constructor.pyc,, +yaml/cyaml.py,sha256=6MZKSJHTWwoct5urK9qbGXAhM1xz_Boawlf5SVMC85o,3842 +yaml/cyaml.pyc,, +yaml/dumper.py,sha256=yMFwlojOY-M4EMUT8FiROY2QC2GAGQO7Iiky0xBlJFc,2833 +yaml/dumper.pyc,, +yaml/emitter.py,sha256=09BwSivdd4BbPgx2rbFcoMOa9H1yZxqY1YILjOXvFeo,43427 +yaml/emitter.pyc,, +yaml/error.py,sha256=7K-NdIv0qNKPKbnXxEg0L_b9K7nYDORr3rzm8_b-iBY,2559 +yaml/error.pyc,, +yaml/events.py,sha256=50_TksgQiE4up-lKo_V-nBy-tAIxkIPQxY5qDhKCeHw,2445 +yaml/events.pyc,, +yaml/loader.py,sha256=T61DzDkbfResZoQfVy1U3vN0_mKFyBALB9et-478-w4,2055 +yaml/loader.pyc,, +yaml/nodes.py,sha256=gPKNj8pKCdh2d4gr3gIYINnPOaOxGhJAUiYhGRnPE84,1440 +yaml/nodes.pyc,, +yaml/parser.py,sha256=sgXahZA3DkySYnaC4D_zcl3l2y4Y5R40icWtdwkF_NE,25542 +yaml/parser.pyc,, +yaml/reader.py,sha256=y6cj-OcZjnFDO1u6Blmi-tKoi-3Pjvmo2PikMnO5FFc,6850 +yaml/reader.pyc,, +yaml/representer.py,sha256=BaJcXjs4a1yjYMnCKxJ39TZlWahVu2OE7K6dEEy0Bqo,17791 +yaml/representer.pyc,, +yaml/resolver.py,sha256=5Z3boiMikL6Qt6fS5Mt8fHym0GxbW7CMT2f2fnD1ZPQ,9122 +yaml/resolver.pyc,, +yaml/scanner.py,sha256=Umb0oXZGFIf814tr_CUPPfKfeWUZj9ucqz06q20v0Fg,52027 +yaml/scanner.pyc,, +yaml/serializer.py,sha256=tRsRwfu5E9fpLU7LY3vBQf2prt77hwnYlMt5dnBJLig,4171 +yaml/serializer.pyc,, +yaml/tokens.py,sha256=lTQIzSVw8Mg9wv459-TjiOQe6wVziqaRlqX2_89rp54,2573 +yaml/tokens.pyc,, diff --git a/project/venv/lib/python2.7/site-packages/PyYAML-5.1.dist-info/WHEEL b/project/venv/lib/python2.7/site-packages/PyYAML-5.1.dist-info/WHEEL new file mode 100644 index 0000000..e28e857 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/PyYAML-5.1.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.33.1) +Root-Is-Purelib: false +Tag: cp27-cp27mu-linux_x86_64 + diff --git a/project/venv/lib/python2.7/site-packages/PyYAML-5.1.dist-info/top_level.txt b/project/venv/lib/python2.7/site-packages/PyYAML-5.1.dist-info/top_level.txt new file mode 100644 index 0000000..e6475e9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/PyYAML-5.1.dist-info/top_level.txt @@ -0,0 +1,2 @@ +_yaml +yaml diff --git a/project/venv/lib/python2.7/site-packages/TextGrid-1.4.dist-info/DESCRIPTION.rst b/project/venv/lib/python2.7/site-packages/TextGrid-1.4.dist-info/DESCRIPTION.rst new file mode 100644 index 0000000..e118723 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/TextGrid-1.4.dist-info/DESCRIPTION.rst @@ -0,0 +1,3 @@ +UNKNOWN + + diff --git a/project/venv/lib/python2.7/site-packages/TextGrid-1.4.dist-info/INSTALLER b/project/venv/lib/python2.7/site-packages/TextGrid-1.4.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/TextGrid-1.4.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/project/venv/lib/python2.7/site-packages/TextGrid-1.4.dist-info/METADATA b/project/venv/lib/python2.7/site-packages/TextGrid-1.4.dist-info/METADATA new file mode 100644 index 0000000..dff5bc5 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/TextGrid-1.4.dist-info/METADATA @@ -0,0 +1,14 @@ +Metadata-Version: 2.0 +Name: TextGrid +Version: 1.4 +Summary: Praat TextGrid manipulation. +Home-page: UNKNOWN +Author: Kyle Gorman et al. +Author-email: kylebgorman@gmail.com +License: UNKNOWN +Description-Content-Type: UNKNOWN +Platform: UNKNOWN + +UNKNOWN + + diff --git a/project/venv/lib/python2.7/site-packages/TextGrid-1.4.dist-info/RECORD b/project/venv/lib/python2.7/site-packages/TextGrid-1.4.dist-info/RECORD new file mode 100644 index 0000000..f28014d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/TextGrid-1.4.dist-info/RECORD @@ -0,0 +1,13 @@ +TextGrid-1.4.dist-info/DESCRIPTION.rst,sha256=OCTuuN6LcWulhHS3d5rfjdsQtW22n7HENFRh6jC6ego,10 +TextGrid-1.4.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +TextGrid-1.4.dist-info/METADATA,sha256=IFHsDhZZFuOJdumPEBkSTUqAlDSJMDUJW05UhdRiGjg,250 +TextGrid-1.4.dist-info/RECORD,, +TextGrid-1.4.dist-info/WHEEL,sha256=9Z5Xm-eel1bTS7e6ogYiKz0zmPEqDwIypurdHN1hR40,116 +TextGrid-1.4.dist-info/metadata.json,sha256=Nue5ZSguO37SYRnPFE8MCpWaMwa5J-nxt6us3cS4MHc,372 +TextGrid-1.4.dist-info/top_level.txt,sha256=XHJGg7FS4QKvUdqLkmC2SmknY5vhVIxj0ooAPxOaMuY,9 +textgrid/__init__.py,sha256=k-l1bvhZB1An_ijRtB4kbjWCuBWjxlmzgeb-9ybsyZ4,79 +textgrid/__init__.pyc,, +textgrid/exceptions.py,sha256=pUSc3V9Vc4dzReiBF1lE7ANOjIijvkSDYKQYyFCajus,43 +textgrid/exceptions.pyc,, +textgrid/textgrid.py,sha256=eVCsld7TCG_BcBS8RhsEtWf-273CmqW96-ddyuZN2v4,33505 +textgrid/textgrid.pyc,, diff --git a/project/venv/lib/python2.7/site-packages/TextGrid-1.4.dist-info/WHEEL b/project/venv/lib/python2.7/site-packages/TextGrid-1.4.dist-info/WHEEL new file mode 100644 index 0000000..ab4a09e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/TextGrid-1.4.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.29.0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/project/venv/lib/python2.7/site-packages/TextGrid-1.4.dist-info/metadata.json b/project/venv/lib/python2.7/site-packages/TextGrid-1.4.dist-info/metadata.json new file mode 100644 index 0000000..301f94e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/TextGrid-1.4.dist-info/metadata.json @@ -0,0 +1 @@ +{"description_content_type": "UNKNOWN", "extensions": {"python.details": {"contacts": [{"email": "kylebgorman@gmail.com", "name": "Kyle Gorman et al.", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}}}, "generator": "bdist_wheel (0.29.0)", "metadata_version": "2.0", "name": "TextGrid", "summary": "Praat TextGrid manipulation.", "version": "1.4"} \ No newline at end of file diff --git a/project/venv/lib/python2.7/site-packages/TextGrid-1.4.dist-info/top_level.txt b/project/venv/lib/python2.7/site-packages/TextGrid-1.4.dist-info/top_level.txt new file mode 100644 index 0000000..62dbf12 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/TextGrid-1.4.dist-info/top_level.txt @@ -0,0 +1 @@ +textgrid diff --git a/project/venv/lib/python2.7/site-packages/easy_install.py b/project/venv/lib/python2.7/site-packages/easy_install.py new file mode 100644 index 0000000..d87e984 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/easy_install.py @@ -0,0 +1,5 @@ +"""Run the EasyInstall command""" + +if __name__ == '__main__': + from setuptools.command.easy_install import main + main() diff --git a/project/venv/lib/python2.7/site-packages/easy_install.pyc b/project/venv/lib/python2.7/site-packages/easy_install.pyc new file mode 100644 index 0000000..33e67ee Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/easy_install.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy-1.16.2.dist-info/INSTALLER b/project/venv/lib/python2.7/site-packages/numpy-1.16.2.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy-1.16.2.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/project/venv/lib/python2.7/site-packages/numpy-1.16.2.dist-info/METADATA b/project/venv/lib/python2.7/site-packages/numpy-1.16.2.dist-info/METADATA new file mode 100644 index 0000000..84454d7 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy-1.16.2.dist-info/METADATA @@ -0,0 +1,54 @@ +Metadata-Version: 2.1 +Name: numpy +Version: 1.16.2 +Summary: NumPy is the fundamental package for array computing with Python. +Home-page: https://www.numpy.org +Author: Travis E. Oliphant et al. +Maintainer: NumPy Developers +Maintainer-email: numpy-discussion@python.org +License: BSD +Download-URL: https://pypi.python.org/pypi/numpy +Platform: Windows +Platform: Linux +Platform: Solaris +Platform: Mac OS-X +Platform: Unix +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Science/Research +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved +Classifier: Programming Language :: C +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Topic :: Software Development +Classifier: Topic :: Scientific/Engineering +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX +Classifier: Operating System :: Unix +Classifier: Operating System :: MacOS +Requires-Python: >=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.* + +It provides: + +- a powerful N-dimensional array object +- sophisticated (broadcasting) functions +- tools for integrating C/C++ and Fortran code +- useful linear algebra, Fourier transform, and random number capabilities +- and much more + +Besides its obvious scientific uses, NumPy can also be used as an efficient +multi-dimensional container of generic data. Arbitrary data-types can be +defined. This allows NumPy to seamlessly and speedily integrate with a wide +variety of databases. + +All NumPy wheels distributed on PyPI are BSD licensed. + + + diff --git a/project/venv/lib/python2.7/site-packages/numpy-1.16.2.dist-info/RECORD b/project/venv/lib/python2.7/site-packages/numpy-1.16.2.dist-info/RECORD new file mode 100644 index 0000000..5931327 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy-1.16.2.dist-info/RECORD @@ -0,0 +1,803 @@ +../../../bin/f2py,sha256=WCOexfKhCMTNtQGohYPYG2qfcYBxXcHfYqRTX9uw8x0,286 +../../../bin/f2py2,sha256=WCOexfKhCMTNtQGohYPYG2qfcYBxXcHfYqRTX9uw8x0,286 +../../../bin/f2py2.7,sha256=WCOexfKhCMTNtQGohYPYG2qfcYBxXcHfYqRTX9uw8x0,286 +numpy-1.16.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +numpy-1.16.2.dist-info/METADATA,sha256=MyfSLq4okJQpnE7_0yoqjdO3I3YvMALqZOnJdFCvr7U,1948 +numpy-1.16.2.dist-info/RECORD,, +numpy-1.16.2.dist-info/WHEEL,sha256=M5Ujap42zjfAFnpJOoFU72TFHuBKh-JF0Rqu5vZhkVE,110 +numpy-1.16.2.dist-info/entry_points.txt,sha256=EvEfDEP2oBMNGDgk_sl6nu5nHwbMn1G_bN_aoiCI4ZY,113 +numpy-1.16.2.dist-info/top_level.txt,sha256=4J9lbBMLnAiyxatxh8iRKV5Entd_6-oqbO7pzJjMsPw,6 +numpy/.libs/libgfortran-ed201abd.so.3.0.0,sha256=xkH_25s9lTuTrEf3fE8GZK68sYSPsIlP3SJ6Qze9aIM,1023960 +numpy/.libs/libopenblasp-r0-382c8f3a.3.5.dev.so,sha256=MgisyqBW3cKbBpB8GwsyuN2JKQUJtVPllGByvSYLck4,29764696 +numpy/LICENSE.txt,sha256=75wRgMQl3M781Yx0TIDCm5QIlvSAKTqjJnit7QWANzQ,46470 +numpy/__config__.py,sha256=6JW9C97RVmv2Rqmvmsh704QBrLHxkyJpFHadrlll9LE,1554 +numpy/__config__.pyc,, +numpy/__init__.py,sha256=6bqV6_Ot0ysgHlDh0mhaoowho85c5GC5OEtyZrScd9U,7110 +numpy/__init__.pyc,, +numpy/_distributor_init.py,sha256=IgPkSK3H9bgjFeUfWuXhjKrgetQl5ztUW-rTyjGHK3c,331 +numpy/_distributor_init.pyc,, +numpy/_globals.py,sha256=p8xxERZsxjGPUWV9pMY3jz75NZxDLppGeKaHbYGCDqM,2379 +numpy/_globals.pyc,, +numpy/_pytesttester.py,sha256=eLWMwBiqamHoev8-VlmtvCaxV_gitqK7js-UkjUW4qs,6854 +numpy/_pytesttester.pyc,, +numpy/compat/__init__.py,sha256=MHle4gJcrXh1w4SNv0mz5rbUTAjAzHnyO3rtbSW3AUo,498 +numpy/compat/__init__.pyc,, +numpy/compat/_inspect.py,sha256=xEImUFhm4VAzT2LJj2Va_yDAHJsdy0RwSi1JwOOhykU,7513 +numpy/compat/_inspect.pyc,, +numpy/compat/py3k.py,sha256=pBEgOIEsaycTciDcN33Dt2WdNA8c3JBtxkSuzA2g8TQ,6663 +numpy/compat/py3k.pyc,, +numpy/compat/setup.py,sha256=REJcwNU7EbfwBFS1FHazGJcUhh50_5gYttr3BSczCiM,382 +numpy/compat/setup.pyc,, +numpy/compat/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/compat/tests/__init__.pyc,, +numpy/compat/tests/test_compat.py,sha256=KI-7Ru3Ia5TwGBmBWTM3Qjq6G_CFOzX1uId--9MecXo,691 +numpy/compat/tests/test_compat.pyc,, +numpy/conftest.py,sha256=KLI46jUH5d7LGWeUI5WuGbAL1_2TukfRWKnlue4Ncrk,1686 +numpy/conftest.pyc,, +numpy/core/__init__.py,sha256=_QCag_dScAatINZX8mlDcGdabAemkT8iq9GiWWgWisE,5928 +numpy/core/__init__.pyc,, +numpy/core/_add_newdocs.py,sha256=I_ndaFJhI6iD5ySeC2HGpGRdPgv5JIP2vd6_Wo2tbjo,203038 +numpy/core/_add_newdocs.pyc,, +numpy/core/_aliased_types.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/core/_aliased_types.pyc,, +numpy/core/_dtype.py,sha256=rvaeOf-ypqhRrb4_Aljt2CXX6uZrX0STQ0pQwlXTKv4,9688 +numpy/core/_dtype.pyc,, +numpy/core/_dtype_ctypes.py,sha256=srVPSI6kJvXjzIvkBQDU1itAfT-vCZKeyjgCF3yC-6Q,3448 +numpy/core/_dtype_ctypes.pyc,, +numpy/core/_dummy.so,sha256=6ymA-LVfcdEzHE71a6qp91sQ-WTxcqP2rx3vXPIsapM,15911 +numpy/core/_internal.py,sha256=U3CSOXeUC79XPV-ZaxlhniUt3q5iFJXlQVFeIcwcsY4,27779 +numpy/core/_internal.pyc,, +numpy/core/_methods.py,sha256=3QDUJ2FAN317yU5fE6lPdFgBVrnBaiYnUkuTu1vWmoU,5083 +numpy/core/_methods.pyc,, +numpy/core/_multiarray_tests.so,sha256=Yb9JupRaApkHMPry22_gupVhAqw2wWk0D3Mmo4b9OY4,546243 +numpy/core/_multiarray_umath.so,sha256=yDVY4aDlT98Wlvgo1tYgO3L51DSaQfgZFRNH2inY1IE,19667840 +numpy/core/_operand_flag_tests.so,sha256=FkVNAuX0uAk6KjUMNJiSURvBXs33ytk_HPE-dd_zrjs,29842 +numpy/core/_rational_tests.so,sha256=VFcpJ5c5MPOkT8HbLcHVO2_Juz0nGM14R7byTD5cVjk,261212 +numpy/core/_string_helpers.py,sha256=NGGGhaFdU5eGiUAj3GTIBoOgWs4r9aTNlsE2r9NgX6Q,2855 +numpy/core/_string_helpers.pyc,, +numpy/core/_struct_ufunc_tests.so,sha256=1r1lYPRdWLixFyn7aczDqsLn2X_EOMl_n5lMp9zkLcI,30069 +numpy/core/_type_aliases.py,sha256=FA2Pz5OKqcLl1QKLJNu-ETHIzQ1ii3LH5pSdHhZkfZA,9181 +numpy/core/_type_aliases.pyc,, +numpy/core/_umath_tests.so,sha256=TddxXbUXnLoLhXFUJx9Tn0WZi4sY5Fb9OG6Zxi0swyM,82716 +numpy/core/arrayprint.py,sha256=9-GT05iiKZvMAjhoN1AUraQ1BzRaWmmq5_n0eWPp9L4,60310 +numpy/core/arrayprint.pyc,, +numpy/core/cversions.py,sha256=ukYNpkei0Coi7DOcbroXuDoXc6kl5odxmcy_39pszA0,413 +numpy/core/cversions.pyc,, +numpy/core/defchararray.py,sha256=gRmZlrryuLgNQLsFjMzZz4X2hFnTZzzqfkpiTWwvthI,71118 +numpy/core/defchararray.pyc,, +numpy/core/einsumfunc.py,sha256=-P82bltMMGjUCRpYjJYohGdPpxLExucGlWJLMA7XxDw,51207 +numpy/core/einsumfunc.pyc,, +numpy/core/fromnumeric.py,sha256=gza7xG7bWMWNtrQ0mV4719FWHpOAmk27LnhXsdQ5Ics,109555 +numpy/core/fromnumeric.pyc,, +numpy/core/function_base.py,sha256=_1hlf2CCd-eXq5p7sJz56RhzrqyMmzBQjfti-i-uhbQ,16336 +numpy/core/function_base.pyc,, +numpy/core/generate_numpy_api.py,sha256=0JBYTvekUeJyhp7QMKtWJSK-L6lVNhev16y0F2qX2pU,7470 +numpy/core/generate_numpy_api.pyc,, +numpy/core/getlimits.py,sha256=zGUdeXCw8GAWly8MCTL6xWEyoBDszviDpc47SbS_BhY,18936 +numpy/core/getlimits.pyc,, +numpy/core/include/numpy/__multiarray_api.h,sha256=MS8l193p6aGZSSuV6ube6F9Ms7Wsn5TKVv9WHM7eCkw,60958 +numpy/core/include/numpy/__ufunc_api.h,sha256=szHiF_4UY3EY8wH0q1YW9UmoakOHQTsHog75MJwEyTg,12143 +numpy/core/include/numpy/_neighborhood_iterator_imp.h,sha256=hNiUJ3gmJRxdjByk5R5jmLeBKpNfaP_29KLHFuTrSIA,1861 +numpy/core/include/numpy/_numpyconfig.h,sha256=bDiTLQ972ZWQBEpx6OM8riS64nSAelKa2kIimnXm_Ss,1010 +numpy/core/include/numpy/arrayobject.h,sha256=SXj-2avTHV8mNWvv7sOYHLKkRKcafDG7_HNpQNot1GE,164 +numpy/core/include/numpy/arrayscalars.h,sha256=vC7QCznlT8vkyvxbIh4QNwi1LR7UkP7GJ1j_0ZiJa1E,3509 +numpy/core/include/numpy/halffloat.h,sha256=ohvyl3Kz3mB1hW3MRzxwPDH-0L9WWM_eKhvYLjtT_2w,1878 +numpy/core/include/numpy/multiarray_api.txt,sha256=Panvwe-mLDLFw9WU90x2M7nqrCE99JzBD186Xa5R4po,56385 +numpy/core/include/numpy/ndarrayobject.h,sha256=ZVCR5RE1W4QUJ8X6jeai-9gwWvgDkImR8ZEH1XI2wl0,11507 +numpy/core/include/numpy/ndarraytypes.h,sha256=eHZA7lbctLPjUPdyXu6ITCnyOmlt88IIG2rZkjs4DAA,64719 +numpy/core/include/numpy/noprefix.h,sha256=YE-lWegAdZKI5lf44AW5jiWbnmO6hircWzj_WMFrLT4,6786 +numpy/core/include/numpy/npy_1_7_deprecated_api.h,sha256=LLeZKLuJADU3RDfT04pu5FCxCBU5cEzY5Q9phR_HL78,4715 +numpy/core/include/numpy/npy_3kcompat.h,sha256=exFgMT6slmo2Zg3bFsY3mKLUrrkg3KU_66gUmu5IYKk,14666 +numpy/core/include/numpy/npy_common.h,sha256=FIVNq2pSdIRJsoi56GAruahhfp1OwafweqCDhakUc8w,37277 +numpy/core/include/numpy/npy_cpu.h,sha256=3frXChwN0Cxca-sAeTTOJCiZ6_2q1EuggUwqEotdXLg,3879 +numpy/core/include/numpy/npy_endian.h,sha256=HHanBydLvLC2anJJySvy6wZ_lYaC_xI6GNwT8cJ78rE,2596 +numpy/core/include/numpy/npy_interrupt.h,sha256=Eyddk806h30jxgymbr44b7eIZKrHXtNzXpPtUPp2Ng8,3439 +numpy/core/include/numpy/npy_math.h,sha256=AeaXjX76YzIMT67EdZMnjG1--x61UI2htuGtGwgfA24,18838 +numpy/core/include/numpy/npy_no_deprecated_api.h,sha256=X-wRYdpuwIuerTnBblKjR7Dqsv8rqxn01RFLVWUHvi8,567 +numpy/core/include/numpy/npy_os.h,sha256=cEvEvpD92EeFjsjRelw1dXJaHYL-0yPJDuz3VeSJs4E,817 +numpy/core/include/numpy/numpyconfig.h,sha256=J5BLHoCyhe383tIM4YriMgYDjOPC4xWzRvqBPyNCTOE,1207 +numpy/core/include/numpy/old_defines.h,sha256=7eiZoi7JrdVT9LXKCoeta5AoIncGa98GcVlWqDrLjwk,6306 +numpy/core/include/numpy/oldnumeric.h,sha256=Yo-LiSzVfDK2YyhlH41ff4gS0m-lv8XjI4JcAzpdy94,708 +numpy/core/include/numpy/ufunc_api.txt,sha256=2d31yVD80vKEgf5Pr5JtDjnVjc2DLfiXRWQuq7KT5wc,6889 +numpy/core/include/numpy/ufuncobject.h,sha256=ocjHj2QCTYkbxIdSmPi-2k3vzKNJ96oB7giwiWFS2i0,13051 +numpy/core/include/numpy/utils.h,sha256=KqJzngAvarYV3oZQu5fY0ARPVihUP7FsZjdljysaSUk,729 +numpy/core/info.py,sha256=SjDs9EfOswEy-ABgUr9f09v83sUdhmwFXRlaZbOGCnA,4692 +numpy/core/info.pyc,, +numpy/core/lib/libnpymath.a,sha256=b-gdLaZupHyZrHs-1v0f7TG5QQrmR0RwgJjmiGO3Y0o,350538 +numpy/core/lib/npy-pkg-config/mlib.ini,sha256=_LsWV1eStNqwhdiYPa2538GL46dnfVwT4MrI1zbsoFw,147 +numpy/core/lib/npy-pkg-config/npymath.ini,sha256=kamUNrYKAmXqQa8BcNv7D5sLqHh6bnChM0_5rZCsTfY,360 +numpy/core/machar.py,sha256=StiB_u3QIWj55RQASESDLjv_8650DE8vuCd7evhlmDI,10854 +numpy/core/machar.pyc,, +numpy/core/memmap.py,sha256=4SEtSehRX8SJtGseMP1hm4LgrRR4oLa20wlRLUy4aJU,11612 +numpy/core/memmap.pyc,, +numpy/core/multiarray.py,sha256=26mdC_rn84U2WwIjf6mZX7rW_YugkEHRIDZBkKrRImQ,50606 +numpy/core/multiarray.pyc,, +numpy/core/numeric.py,sha256=OaLQxu1NQmiehpC5BHRLMBIKXUqiqmDTaqhIgvN8K5M,92560 +numpy/core/numeric.pyc,, +numpy/core/numerictypes.py,sha256=8esH4zpSWgjUnfpe9CK0D5MmBPojh6iWLGEDza9Pi2E,17849 +numpy/core/numerictypes.pyc,, +numpy/core/overrides.py,sha256=OSkSrhC7dl63cK9Pr0qt4X5rMsB025MNoPV-_o15VNM,6658 +numpy/core/overrides.pyc,, +numpy/core/records.py,sha256=6N9oqz53J_5eGPsR9DxltkYxJdzraA2dJil0xBiqJm4,30418 +numpy/core/records.pyc,, +numpy/core/setup.py,sha256=3vqfmmG_JLENcuBVmjzpiznKMciwKcd1a6B0MOE7z2I,41384 +numpy/core/setup.pyc,, +numpy/core/setup_common.py,sha256=8Ywhbj-uud98QbEgIZEQGvmpD6S2Ve-dlOYqb_9lkmw,16571 +numpy/core/setup_common.pyc,, +numpy/core/shape_base.py,sha256=4Sjp5Jjs5BfLJ0rU6PUrxl-hNk6ye5IcrpbUNbD667U,28273 +numpy/core/shape_base.pyc,, +numpy/core/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/core/tests/__init__.pyc,, +numpy/core/tests/_locales.py,sha256=GQro3bha8c5msgQyvNzmDUrNwqS2cGkKKuN4gg4c6tI,2266 +numpy/core/tests/_locales.pyc,, +numpy/core/tests/data/astype_copy.pkl,sha256=lWSzCcvzRB_wpuRGj92spGIw-rNPFcd9hwJaRVvfWdk,716 +numpy/core/tests/data/recarray_from_file.fits,sha256=NA0kliz31FlLnYxv3ppzeruONqNYkuEvts5wzXEeIc4,8640 +numpy/core/tests/test_abc.py,sha256=cpIqt3VFBZLHbuNpO4NuyCGgd--k1zij5aasu7FV77I,2402 +numpy/core/tests/test_abc.pyc,, +numpy/core/tests/test_api.py,sha256=k7pN6IGolFQHBT7CmaRZHJqR59amWk6QcVK4RdqQkKc,18902 +numpy/core/tests/test_api.pyc,, +numpy/core/tests/test_arrayprint.py,sha256=PPUppm9m6cPx2mjU0lQevTdnLSq0_6JIe8LyumN3ujM,34701 +numpy/core/tests/test_arrayprint.pyc,, +numpy/core/tests/test_datetime.py,sha256=1h-LfflxFQCvWsZcQwK8Ry8mWW0mziuIQ1n-NX1Qqtc,101949 +numpy/core/tests/test_datetime.pyc,, +numpy/core/tests/test_defchararray.py,sha256=L5EoOBTZVrRU1Vju5IhY8BSUlBOGPzEViKJwyQSlpXo,25481 +numpy/core/tests/test_defchararray.pyc,, +numpy/core/tests/test_deprecations.py,sha256=BkNax57OUQmuEvqnR4Lv2p2y0UNpXPHhsGAR6a4qjmQ,21435 +numpy/core/tests/test_deprecations.pyc,, +numpy/core/tests/test_dtype.py,sha256=lPGPQGw4_6ZPn4ns_7nmJNumlvUv_DALMvm8pymaqWY,43520 +numpy/core/tests/test_dtype.pyc,, +numpy/core/tests/test_einsum.py,sha256=bg9t1Hu_z0G8xGWqJuIOqS21QMR76xdQl8xQhlejXPc,44090 +numpy/core/tests/test_einsum.pyc,, +numpy/core/tests/test_errstate.py,sha256=5vN5Xiv9cILQ0j62P7DrZPvTAiAddRY4EfoCUGQqgMk,1335 +numpy/core/tests/test_errstate.pyc,, +numpy/core/tests/test_extint128.py,sha256=-0zEInkai1qRhXI0bdHCguU_meD3s6Td4vUIBwirYQI,5709 +numpy/core/tests/test_extint128.pyc,, +numpy/core/tests/test_function_base.py,sha256=L7fBYrSEjcZdVDF1I811LWwXRAGxfcOv8b68VDxZGQo,12977 +numpy/core/tests/test_function_base.pyc,, +numpy/core/tests/test_getlimits.py,sha256=2fBK7Slo67kP6bThcN9bOKmeX9gGPQVUE17jGVydoXk,4427 +numpy/core/tests/test_getlimits.pyc,, +numpy/core/tests/test_half.py,sha256=Hnlj7T4-kAKT-2gPa7-vNrbOOJ6qfZoaq0jW4CbCtRs,22300 +numpy/core/tests/test_half.pyc,, +numpy/core/tests/test_indexerrors.py,sha256=0Ku3Sy5jcaE3D2KsyDrFTvgQzMv2dyWja3hc4t5-n_k,4857 +numpy/core/tests/test_indexerrors.pyc,, +numpy/core/tests/test_indexing.py,sha256=i0P9N0vV6RKU_n-sT8whMeG-6ydRVKAREg-b3veKKXM,50602 +numpy/core/tests/test_indexing.pyc,, +numpy/core/tests/test_item_selection.py,sha256=pMjd_8v5YC4WVjLLIrhmisPOG_DMw4j3YGEZ-UY1kOA,3599 +numpy/core/tests/test_item_selection.pyc,, +numpy/core/tests/test_longdouble.py,sha256=Qzlzxtk0NTXUq2loU_RPFVruc3hwjmbOJy-VatXr0F0,6314 +numpy/core/tests/test_longdouble.pyc,, +numpy/core/tests/test_machar.py,sha256=FrKeGhC7j-z9tApS_uI1E0DUkzieKIdUHMQPfCSM0t8,1141 +numpy/core/tests/test_machar.pyc,, +numpy/core/tests/test_mem_overlap.py,sha256=AyBz4pm7HhTDdlW2pq9FR1AO0E5QAYdKpBoWbOdSrco,29505 +numpy/core/tests/test_mem_overlap.pyc,, +numpy/core/tests/test_memmap.py,sha256=mYreq9HqWDz5Z9r6mmdvz4iB6VnUR61ytVCzzd3mhc0,7316 +numpy/core/tests/test_memmap.pyc,, +numpy/core/tests/test_multiarray.py,sha256=2mzeCjnNFSIXEa0ZAp0myFEcQ31ENi4DHH8ERNI8Tn4,303271 +numpy/core/tests/test_multiarray.pyc,, +numpy/core/tests/test_nditer.py,sha256=e2vzCwKhLjcjgE4zPv7YgFpCE78NWdDJpL__fCR-prU,112097 +numpy/core/tests/test_nditer.pyc,, +numpy/core/tests/test_numeric.py,sha256=EwIsuf6vPm9YsbvmoHOklKEgGuRuf8hBs003FPk-Seo,102550 +numpy/core/tests/test_numeric.pyc,, +numpy/core/tests/test_numerictypes.py,sha256=5NMUrwVqBH_qUFk-62QhUxMwg7FPPdm9hcPdftE_4zo,18526 +numpy/core/tests/test_numerictypes.pyc,, +numpy/core/tests/test_overrides.py,sha256=qRa8AOOBnejS6ZfW7vP3g-jioFdIwBBJ3hYrPZXy7ks,13196 +numpy/core/tests/test_overrides.pyc,, +numpy/core/tests/test_print.py,sha256=Q53dqbjQQIlCzRp_1ZY0A-ptP7FlbBZVPeMeMLX0cVg,6876 +numpy/core/tests/test_print.pyc,, +numpy/core/tests/test_records.py,sha256=jFWlwrYqBGKSdmtq55Qqrg8dg-3wvcagr9KRXCC2wMA,17734 +numpy/core/tests/test_records.pyc,, +numpy/core/tests/test_regression.py,sha256=DObYUilkZkErqk67UGsRfhgaNLma7vAZhS73uFJjmaE,87021 +numpy/core/tests/test_regression.pyc,, +numpy/core/tests/test_scalar_ctors.py,sha256=kjyYllJHyhMQGT49Xbjjc2tuFHXcQIM-PAZExMWczq8,2294 +numpy/core/tests/test_scalar_ctors.pyc,, +numpy/core/tests/test_scalarbuffer.py,sha256=0U9U95ogctbQb9ggbmgiQ2UmzXEpO6mbXnn4ciuAO50,3561 +numpy/core/tests/test_scalarbuffer.pyc,, +numpy/core/tests/test_scalarinherit.py,sha256=iP5lLn-z1vtongCvDt-JBnHwNWNREx4ovu12Iy-mGiA,1838 +numpy/core/tests/test_scalarinherit.pyc,, +numpy/core/tests/test_scalarmath.py,sha256=Z14wE1NY6S3UEBVIlNj3KfOsuteXSZliHZKTqnU-nWM,27257 +numpy/core/tests/test_scalarmath.pyc,, +numpy/core/tests/test_scalarprint.py,sha256=AissnDOK_noK09FFQp-oioDk3WjgKkTuGf1-vUWDfFg,15469 +numpy/core/tests/test_scalarprint.pyc,, +numpy/core/tests/test_shape_base.py,sha256=8NAbKisPKVc3z9CdPr_oCVRDWjjvui_sAGLCXweTkR4,24351 +numpy/core/tests/test_shape_base.pyc,, +numpy/core/tests/test_ufunc.py,sha256=tkfbCdwV_tDSLcZl79CsTev2cjBBP-o3bTwrOYeVjEo,77403 +numpy/core/tests/test_ufunc.pyc,, +numpy/core/tests/test_umath.py,sha256=AOt3f7EnCM8RZJ8enlkgNQ6IT8SkrYx-a98g5Lq46cM,108290 +numpy/core/tests/test_umath.pyc,, +numpy/core/tests/test_umath_complex.py,sha256=QJHRggzmZw0eFoqyc83I8mdcU5ayPLI9kAmj3MUPEF8,19323 +numpy/core/tests/test_umath_complex.pyc,, +numpy/core/tests/test_unicode.py,sha256=PvWt5NLjgwulCgXakHEKMJ2pSpTLbUWgz9dZExEcSJ8,13656 +numpy/core/tests/test_unicode.pyc,, +numpy/core/umath.py,sha256=rdEijDCvJClpWvMFCUVRUlrAQQ_lq4jeNJla-hTfpFU,1919 +numpy/core/umath.pyc,, +numpy/core/umath_tests.py,sha256=Sr6VQTbH-sOMlXy-tg1-Unht7MKaaV4wtAYR6mQYNbU,455 +numpy/core/umath_tests.pyc,, +numpy/ctypeslib.py,sha256=-uBRC3nl-33m3g3cPEJ_yh73Z191ErweCnoWDYGWcVw,17206 +numpy/ctypeslib.pyc,, +numpy/distutils/__config__.py,sha256=6JW9C97RVmv2Rqmvmsh704QBrLHxkyJpFHadrlll9LE,1554 +numpy/distutils/__config__.pyc,, +numpy/distutils/__init__.py,sha256=b93HZiRpHfSC9E-GPiXk6PWDwQ3STJ4rlzvx6PhHH1k,1092 +numpy/distutils/__init__.pyc,, +numpy/distutils/__version__.py,sha256=SSRZKvGfvg_GpYbXWtI5gaTK0NGW9nBBCyNghaaXBh8,151 +numpy/distutils/__version__.pyc,, +numpy/distutils/_shell_utils.py,sha256=zKjy56kw6erYPK71h-afpX9HYn2ZeQHSMpvvYiVwOu0,2603 +numpy/distutils/_shell_utils.pyc,, +numpy/distutils/ccompiler.py,sha256=ytnf5_oaWfnssvfJQr0aXntNDXUcvZLCasOnwAiYKA0,29537 +numpy/distutils/ccompiler.pyc,, +numpy/distutils/command/__init__.py,sha256=l5r9aYwIEq1D-JJc8WFUxABk6Ip28FpRK_ok7wSLRZE,1098 +numpy/distutils/command/__init__.pyc,, +numpy/distutils/command/autodist.py,sha256=1oytviCdsUjm3YxLLVePvWEqxyz0eDRRqvfokDm2sXQ,2048 +numpy/distutils/command/autodist.pyc,, +numpy/distutils/command/bdist_rpm.py,sha256=rhhIyFzkd5NGi6lZaft44EBPZB3zZFRDc75klJYnbw8,775 +numpy/distutils/command/bdist_rpm.pyc,, +numpy/distutils/command/build.py,sha256=6Q9bDubq5WfwR1K5woDFXed692szD0Rq-5Ckv2xpoK4,1618 +numpy/distutils/command/build.pyc,, +numpy/distutils/command/build_clib.py,sha256=_Y3upI_slekgMk2CI2vplOXj5p1_aEHa-F9_nJ0HOgg,13389 +numpy/distutils/command/build_clib.pyc,, +numpy/distutils/command/build_ext.py,sha256=QXS_Z1vrpvTrTjUb6m9n1upo9zljo4o5yd27Su5LJfQ,25949 +numpy/distutils/command/build_ext.pyc,, +numpy/distutils/command/build_py.py,sha256=7TBGLz0va0PW6sEX-aUjsXdzvhuSbJGgIrMim1JTwu4,1210 +numpy/distutils/command/build_py.pyc,, +numpy/distutils/command/build_scripts.py,sha256=ze19jHBhC3JggKLbL9wgs9I3mG7ls-V2NbykvleNwgQ,1731 +numpy/distutils/command/build_scripts.pyc,, +numpy/distutils/command/build_src.py,sha256=aUl2Zf8WktMCr8f1u6YoAlblNSVGjv-lz_7yTfOKC80,30908 +numpy/distutils/command/build_src.pyc,, +numpy/distutils/command/config.py,sha256=DxvvFqUtKPCXzHCfC2DOQcMBm67YkRdXyWeBa3bYFQE,19094 +numpy/distutils/command/config.pyc,, +numpy/distutils/command/config_compiler.py,sha256=SKJTEk_Y_Da-dVYOHAdf4c3yXxjlE1dsr-hJxY0m0PU,4435 +numpy/distutils/command/config_compiler.pyc,, +numpy/distutils/command/develop.py,sha256=nYM5yjhKtGKh_3wZwrvEQBLYHKldz64aU-0iSycSkXA,641 +numpy/distutils/command/develop.pyc,, +numpy/distutils/command/egg_info.py,sha256=pdiCFQiQuIpf_xmVk9Njl7iowY9CxGn9KRbU-A9eBfg,987 +numpy/distutils/command/egg_info.pyc,, +numpy/distutils/command/install.py,sha256=yBj3NM6sctAbG3QR5Y4qPs7YjxpW7EoKeMPEkNWf2qU,3127 +numpy/distutils/command/install.pyc,, +numpy/distutils/command/install_clib.py,sha256=6tUO3FbF_b_e_Ly31qod9rB4yHA2z8m2mh6qry1a4yk,1315 +numpy/distutils/command/install_clib.pyc,, +numpy/distutils/command/install_data.py,sha256=7iWTw93ty2sBPwHwg_EEhgQhZSZe6SsKdfTS9RbUR9A,914 +numpy/distutils/command/install_data.pyc,, +numpy/distutils/command/install_headers.py,sha256=NbZwt-Joo80z_1TfxA-mIWXm2L9Mmh4ZLht7HAuveoo,985 +numpy/distutils/command/install_headers.pyc,, +numpy/distutils/command/sdist.py,sha256=tHmlb0RzD8x04dswPXEua9H_b6GuHWY1V3hYkwJDKvA,799 +numpy/distutils/command/sdist.pyc,, +numpy/distutils/compat.py,sha256=xzkW8JgJgGTmye34QCYTIkLfsXBvmPu4tvgCwXNdiU0,218 +numpy/distutils/compat.pyc,, +numpy/distutils/conv_template.py,sha256=5VAAMSjzrSe_mCxzMHVW6GQZ0ATqQr5N9EFYhuTeQvg,9702 +numpy/distutils/conv_template.pyc,, +numpy/distutils/core.py,sha256=9GNNyWDTCqfnD7Jp2tzp9vOBVyeJmF8lsgv_xdlt59g,8230 +numpy/distutils/core.pyc,, +numpy/distutils/cpuinfo.py,sha256=AHJuQeg78_P5EReO1kLd-MAohvB-GfV8zuRh7F8hltI,23015 +numpy/distutils/cpuinfo.pyc,, +numpy/distutils/exec_command.py,sha256=laMoxZ17D5I0cnkUce94wpfgTl1j3xWHn_A_jisdcu8,10795 +numpy/distutils/exec_command.pyc,, +numpy/distutils/extension.py,sha256=q_NjgW-sOoeEBbeSEJwFh411mTgsF7BzGYso61Wf0qg,2967 +numpy/distutils/extension.pyc,, +numpy/distutils/fcompiler/__init__.py,sha256=v3zk6W_xQXCI5H00aVBYDi5IgSug7zrGQKxpl74Qs_k,40154 +numpy/distutils/fcompiler/__init__.pyc,, +numpy/distutils/fcompiler/absoft.py,sha256=AKbj5uGr8dpGDLzRIJbdUnXXAtF_5k4JqnqwTWvy-tQ,5565 +numpy/distutils/fcompiler/absoft.pyc,, +numpy/distutils/fcompiler/compaq.py,sha256=djulalEdV6b58ofcEw14Uoq5-aNgblJMqLIzNwmJ2SE,4109 +numpy/distutils/fcompiler/compaq.pyc,, +numpy/distutils/fcompiler/environment.py,sha256=A6AdFm6GwL5znym5qito1o3brZEgDTJs3AeCRUxu0fA,3309 +numpy/distutils/fcompiler/environment.pyc,, +numpy/distutils/fcompiler/g95.py,sha256=K68RRAvOvyKoh-jsD9J4ZDsHltrGnJ_AllxULhy6iOE,1396 +numpy/distutils/fcompiler/g95.pyc,, +numpy/distutils/fcompiler/gnu.py,sha256=oHipJDyfisSK9_Kdkv1Av8hDHY3UbLALgWfBO7cXkPA,20804 +numpy/distutils/fcompiler/gnu.pyc,, +numpy/distutils/fcompiler/hpux.py,sha256=xpNfy7vCKWPnJ5M3JPnjMAewKBAfKN5hFX3hvEL2zaM,1419 +numpy/distutils/fcompiler/hpux.pyc,, +numpy/distutils/fcompiler/ibm.py,sha256=66gCrBbbVvqu_LvdX7a9MA15NqNIY2DXPOPUFrU1zRc,3595 +numpy/distutils/fcompiler/ibm.pyc,, +numpy/distutils/fcompiler/intel.py,sha256=WlsBtvZnLpFke7oTpMCDYFlccNSUWWkB2p422iwQURU,6861 +numpy/distutils/fcompiler/intel.pyc,, +numpy/distutils/fcompiler/lahey.py,sha256=pJ0-xgtYwyYXgt8JlN8PFeYYEWB3vOmFkNx6UUFXzuM,1393 +numpy/distutils/fcompiler/lahey.pyc,, +numpy/distutils/fcompiler/mips.py,sha256=IxLojWR1oi0VW93PxPpHQXRwZcYffD1dunllQW2w19A,1780 +numpy/distutils/fcompiler/mips.pyc,, +numpy/distutils/fcompiler/nag.py,sha256=eiTvBopdCgVh5-HDTryVbRrYvf4r_Sqse1mruTt5Blo,2608 +numpy/distutils/fcompiler/nag.pyc,, +numpy/distutils/fcompiler/none.py,sha256=N6adoFAf8inIQfCDEBzK5cGI3hLIWWpHmQXux8iJDfA,824 +numpy/distutils/fcompiler/none.pyc,, +numpy/distutils/fcompiler/pathf95.py,sha256=Xf1JMB30PDSoNpA1Y-vKPRBeNO0XfSi0dvVQvvdjfUQ,1127 +numpy/distutils/fcompiler/pathf95.pyc,, +numpy/distutils/fcompiler/pg.py,sha256=G0uNPfedmbkYWfChg1UbxBKqo25RenzSVJN1BUtRDw0,4232 +numpy/distutils/fcompiler/pg.pyc,, +numpy/distutils/fcompiler/sun.py,sha256=21DQ6Rprr9rEp4pp7Np8kCwOc0Xfqdxa1iX0O-yPJPM,1643 +numpy/distutils/fcompiler/sun.pyc,, +numpy/distutils/fcompiler/vast.py,sha256=LJ21-WIJsiquLtjdDaNsJqblwN5wuM2FZsYl1R40vN8,1733 +numpy/distutils/fcompiler/vast.pyc,, +numpy/distutils/from_template.py,sha256=671F-qa8R1gbJUe1tCZFjw64K7J98ZnfeSV1HvWbZas,7979 +numpy/distutils/from_template.pyc,, +numpy/distutils/info.py,sha256=lNxUhbJnzWjA47P2I_9NW-tuVrjGzL62jHDlQJ3pp6E,157 +numpy/distutils/info.pyc,, +numpy/distutils/intelccompiler.py,sha256=1qzr6PMxi0UkR0NUY3rt3gqww9GwJ-Gbe91yxQKlieU,4291 +numpy/distutils/intelccompiler.pyc,, +numpy/distutils/lib2def.py,sha256=RWD0EpuUHoxIuc9VyyDCH2d73jgsdGG2PBKVisanlVU,3502 +numpy/distutils/lib2def.pyc,, +numpy/distutils/line_endings.py,sha256=aBO2e754iin4Ylo7FNwlBg6nPudXMnQZYdhVhf-E3aA,2053 +numpy/distutils/line_endings.pyc,, +numpy/distutils/log.py,sha256=yHzdtNdTg6YtvO50Hu-Le5WJ7Typ2TvaCYabelTaUO0,2745 +numpy/distutils/log.pyc,, +numpy/distutils/mingw/gfortran_vs2003_hack.c,sha256=cbsN3Lk9Hkwzr9c-yOP2xEBg1_ml1X7nwAMDWxGjzc8,77 +numpy/distutils/mingw32ccompiler.py,sha256=4fU0Qe_BcIZOFnjwy423AfEHXVepbyayWu89-UxDGGY,25178 +numpy/distutils/mingw32ccompiler.pyc,, +numpy/distutils/misc_util.py,sha256=vUYKvhcqAd10VZOCm0Gw2heGfVTnndNj64BYiPbtbQA,82905 +numpy/distutils/misc_util.pyc,, +numpy/distutils/msvc9compiler.py,sha256=TuPYjPFp3nYQSIG1goNxuOly7o3VMx-H35POMpycB3k,2258 +numpy/distutils/msvc9compiler.pyc,, +numpy/distutils/msvccompiler.py,sha256=7EUlHbgdKBBJG3AzgE94AQeUFnj0HcD6M7_YPN7vdCs,1994 +numpy/distutils/msvccompiler.pyc,, +numpy/distutils/npy_pkg_config.py,sha256=k3lxSOC_InRBSGddbfbvMLRTGqnE-LliNXakwdZ3AH8,13154 +numpy/distutils/npy_pkg_config.pyc,, +numpy/distutils/numpy_distribution.py,sha256=lbnEW1OxWxC_1n2sKd0Q3fC5QnNdFuAkNAlvXF99zIQ,700 +numpy/distutils/numpy_distribution.pyc,, +numpy/distutils/pathccompiler.py,sha256=FjNouOTL8u4gLMbJW7GdT0RlsD2nXV1_SEBNZj9QdpQ,779 +numpy/distutils/pathccompiler.pyc,, +numpy/distutils/setup.py,sha256=q3DcCZNkK_jHsC0imocewd4uCKQWWXjkzd4nkBmkMFI,611 +numpy/distutils/setup.pyc,, +numpy/distutils/system_info.py,sha256=vwtulLGu7HBcUtEjlBkssLCy0vxlUx_2yNBmf0eLD_U,89628 +numpy/distutils/system_info.pyc,, +numpy/distutils/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/distutils/tests/__init__.pyc,, +numpy/distutils/tests/test_exec_command.py,sha256=SH9RaWmUnSu8uGEjwyoTrzqoVPclTUnM6UTGSKxW8qc,7146 +numpy/distutils/tests/test_exec_command.pyc,, +numpy/distutils/tests/test_fcompiler.py,sha256=t26JUHwJxl_m-ALPa5XABgVFrMthBw7gRKN4yYX1hYQ,2678 +numpy/distutils/tests/test_fcompiler.pyc,, +numpy/distutils/tests/test_fcompiler_gnu.py,sha256=O57uCEHeQIS0XF8GloEas3OlaOfmIHDWEtgYS_q3x48,2218 +numpy/distutils/tests/test_fcompiler_gnu.pyc,, +numpy/distutils/tests/test_fcompiler_intel.py,sha256=fOjd_jv0Od6bZyzFf4YpZMcnFva0OZK7yJV_4Hebb6A,1140 +numpy/distutils/tests/test_fcompiler_intel.pyc,, +numpy/distutils/tests/test_fcompiler_nagfor.py,sha256=5-Num0A3cN7_NS3BlAgYt174S-OGOWRLL9rXtv-h_fA,1176 +numpy/distutils/tests/test_fcompiler_nagfor.pyc,, +numpy/distutils/tests/test_from_template.py,sha256=SDYoe0XUpAayyEQDq7ZhrvEEz7U9upJDLYzhcdoVifc,1103 +numpy/distutils/tests/test_from_template.pyc,, +numpy/distutils/tests/test_misc_util.py,sha256=8LIm12X83HmvgmpvJJ9inaU7FlGt287VwDM-rMKCOv4,3316 +numpy/distutils/tests/test_misc_util.pyc,, +numpy/distutils/tests/test_npy_pkg_config.py,sha256=wa0QMQ9JAye87t2gDbFaBHp0HGpNFgwxJrJ30ZrHvNk,2639 +numpy/distutils/tests/test_npy_pkg_config.pyc,, +numpy/distutils/tests/test_shell_utils.py,sha256=we9P8AvjCQky1NRDP3sXAJnNUek7rDmMR4Ar9cg9iSk,2030 +numpy/distutils/tests/test_shell_utils.pyc,, +numpy/distutils/tests/test_system_info.py,sha256=Asv6c-N1I2JQHAcBZuObsoBlaaKOVepkhyFAmGp0zow,7730 +numpy/distutils/tests/test_system_info.pyc,, +numpy/distutils/unixccompiler.py,sha256=M7Hn3ANMo8iP-sZtSAebI3RCLp0ViRYxawAbck0hlQM,5177 +numpy/distutils/unixccompiler.pyc,, +numpy/doc/__init__.py,sha256=BDpxTM0iw2F4thjBkYqjIXX57F5KfIaH8xMd67N6Jh0,574 +numpy/doc/__init__.pyc,, +numpy/doc/basics.py,sha256=5ygY_jESZpg9TENO0dszCttlq5v5IXvXjL2pWg5Atms,9658 +numpy/doc/basics.pyc,, +numpy/doc/broadcasting.py,sha256=0uofJxPfkwsaQaTSju8TwiOpsmXSw2F3bzG8CdkKviU,5603 +numpy/doc/broadcasting.pyc,, +numpy/doc/byteswapping.py,sha256=ivf9jUApDmMijOj1f5BGYkGCRVh4OLa_Wybbcl3A9Zw,5349 +numpy/doc/byteswapping.pyc,, +numpy/doc/constants.py,sha256=G-xVDfqRId16dKXe1Owy6-tlexIzJUTuir2yu3iQgMc,9290 +numpy/doc/constants.pyc,, +numpy/doc/creation.py,sha256=6FUALDWgqPWObcW-ZHDQMAnfo42I60rRR9pDpwb4-YE,5496 +numpy/doc/creation.pyc,, +numpy/doc/glossary.py,sha256=D5Ljv1ZOOIj-O0Peg2E2QaUBV3mZb60bJOI_4gQmFTo,13583 +numpy/doc/glossary.pyc,, +numpy/doc/indexing.py,sha256=qhhsiAeG_7Y2rgziwj515Fsw0wFL4dq1quT-ja3-0zs,15669 +numpy/doc/indexing.pyc,, +numpy/doc/internals.py,sha256=xYp6lv4yyV0ZIo_qCvLCAWxDa0rhu7FNrTmpXY1isO4,9669 +numpy/doc/internals.pyc,, +numpy/doc/misc.py,sha256=JWJqyiYL2qoSMVAb0QC8w_Pm5l7ZLxx2Z9D5ilgU4Uo,6191 +numpy/doc/misc.pyc,, +numpy/doc/structured_arrays.py,sha256=Kr2n-4TQjfVVBoOLt3Lv30e6j5et9y8zYXZx3wg_hCc,26108 +numpy/doc/structured_arrays.pyc,, +numpy/doc/subclassing.py,sha256=AqtEltybX__ghj91b73QgXcGpYd8gGlwoO-R7SQDwe8,28561 +numpy/doc/subclassing.pyc,, +numpy/doc/ufuncs.py,sha256=vsAkCLEMh7Qa_3x4WbDMY3IQsDCLdOCuB_6P2aEcVLg,5427 +numpy/doc/ufuncs.pyc,, +numpy/dual.py,sha256=SZ3DLWXQFv1lRKN1TlG487xmexpJFa7faaBdnnexm3E,1865 +numpy/dual.pyc,, +numpy/f2py/__init__.py,sha256=nHuShe3wj5HQ2Xyb42DEorG3DK63HaXRNUizZgqB83g,3101 +numpy/f2py/__init__.pyc,, +numpy/f2py/__main__.py,sha256=mnksAcMyLdK0So_DseQn0zalhnA7LflS7hHvo7QCVjU,134 +numpy/f2py/__main__.pyc,, +numpy/f2py/__version__.py,sha256=rEHB9hlWmpryhNa0EmMnlAlDCGI4GXILC9CZUEV3Wew,254 +numpy/f2py/__version__.pyc,, +numpy/f2py/auxfuncs.py,sha256=mDvaBo3Y8tYpXLZfq8DCv6UZ3-2JqWc_iNBZRxGesb0,21826 +numpy/f2py/auxfuncs.pyc,, +numpy/f2py/capi_maps.py,sha256=FgizIHORFdaX5eIVZEQSlC9kVAidh0jfKoJYMK4Z86E,31416 +numpy/f2py/capi_maps.pyc,, +numpy/f2py/cb_rules.py,sha256=un1xn8goj4jFL8FzxRwWSAzpr0CVcvwObVUKdIGJyaA,22946 +numpy/f2py/cb_rules.pyc,, +numpy/f2py/cfuncs.py,sha256=NRxuXAaryWHOFh5205BvvDjajituolH6FvtsumCltvI,45114 +numpy/f2py/cfuncs.pyc,, +numpy/f2py/common_rules.py,sha256=DOCOo4brpFaKNll8hOjG_vCYuOfKyTBYMItaDC_osEc,4981 +numpy/f2py/common_rules.pyc,, +numpy/f2py/crackfortran.py,sha256=eiNE5IpgI8pYEocd2R3U7iays2lTid0x8ClkAsKB67Q,128927 +numpy/f2py/crackfortran.pyc,, +numpy/f2py/diagnose.py,sha256=VNuNTGnQaXn9Fn2jlueYt47634CvLQSaAWJWy_Nxwnw,5295 +numpy/f2py/diagnose.pyc,, +numpy/f2py/f2py2e.py,sha256=w9zSJG3tnCMyOrgZJqhZiEUoAnnI7oU61kYZzvsLdfo,23983 +numpy/f2py/f2py2e.pyc,, +numpy/f2py/f2py_testing.py,sha256=8rkBjUsNhBavpoBgi_bqDS8H8tBdd5BR8hrE6ENsIAo,1523 +numpy/f2py/f2py_testing.pyc,, +numpy/f2py/f90mod_rules.py,sha256=YFK4MPkGHBxshAInbcapnumX3qlu0h6ya6GQpS8zWLk,9850 +numpy/f2py/f90mod_rules.pyc,, +numpy/f2py/func2subr.py,sha256=Oy12rqUa1vcXvzR6g8yx8jSYDwfKt5Jqiebf1QaWX1o,9224 +numpy/f2py/func2subr.pyc,, +numpy/f2py/info.py,sha256=Mk1-neqpqYQ6njoVUCKHmMkyFkAqYeWH4cGZr8NfKiI,136 +numpy/f2py/info.pyc,, +numpy/f2py/rules.py,sha256=WijCZZXIQSbV5wRHGGgjUvQlh4gQ9tKaqbSYTjvOyRk,58526 +numpy/f2py/rules.pyc,, +numpy/f2py/setup.py,sha256=qNCIqRPcpEUhJBjihtEXEe4Iil4XDYVRAI_sZm7xZhM,2444 +numpy/f2py/setup.pyc,, +numpy/f2py/src/fortranobject.c,sha256=VtPYFyh0jv5N432cFTZGonH7qwkUWJRdDUwZZ6RnqnU,35984 +numpy/f2py/src/fortranobject.h,sha256=ltMxueNeETQtEYSA_E7bpRtF8Jj1xuOBS-YNhjBMfOw,5227 +numpy/f2py/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/f2py/tests/__init__.pyc,, +numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c,sha256=L_Z0GMyfYIoRiS0FIBlzy_nxlFoNbkLAatObVmKeGsk,9025 +numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap,sha256=But9r9m4iL7EGq_haMW8IiQ4VivH0TgUozxX4pPvdpE,29 +numpy/f2py/tests/src/assumed_shape/foo_free.f90,sha256=oBwbGSlbr9MkFyhVO2aldjc01dr9GHrMrSiRQek8U64,460 +numpy/f2py/tests/src/assumed_shape/foo_mod.f90,sha256=rfzw3QdI-eaDSl-hslCgGpd5tHftJOVhXvb21Y9Gf6M,499 +numpy/f2py/tests/src/assumed_shape/foo_use.f90,sha256=rmT9k4jP9Ru1PLcGqepw9Jc6P9XNXM0axY7o4hi9lUw,269 +numpy/f2py/tests/src/assumed_shape/precision.f90,sha256=r08JeTVmTTExA-hYZ6HzaxVwBn1GMbPAuuwBhBDtJUk,130 +numpy/f2py/tests/src/common/block.f,sha256=GQ0Pd-VMX3H3a-__f2SuosSdwNXHpBqoGnQDjf8aG9g,224 +numpy/f2py/tests/src/kind/foo.f90,sha256=zIHpw1KdkWbTzbXb73hPbCg4N2Htj3XL8DIwM7seXpo,347 +numpy/f2py/tests/src/mixed/foo.f,sha256=90zmbSHloY1XQYcPb8B5d9bv9mCZx8Z8AMTtgDwJDz8,85 +numpy/f2py/tests/src/mixed/foo_fixed.f90,sha256=pxKuPzxF3Kn5khyFq9ayCsQiolxB3SaNtcWaK5j6Rv4,179 +numpy/f2py/tests/src/mixed/foo_free.f90,sha256=fIQ71wrBc00JUAVUj_r3QF9SdeNniBiMw6Ly7CGgPWU,139 +numpy/f2py/tests/src/parameter/constant_both.f90,sha256=-bBf2eqHb-uFxgo6Q7iAtVUUQzrGFqzhHDNaxwSICfQ,1939 +numpy/f2py/tests/src/parameter/constant_compound.f90,sha256=re7pfzcuaquiOia53UT7qNNrTYu2euGKOF4IhoLmT6g,469 +numpy/f2py/tests/src/parameter/constant_integer.f90,sha256=nEmMLitKoSAG7gBBEQLWumogN-KS3DBZOAZJWcSDnFw,612 +numpy/f2py/tests/src/parameter/constant_non_compound.f90,sha256=IcxESVLKJUZ1k9uYKoSb8Hfm9-O_4rVnlkiUU2diy8Q,609 +numpy/f2py/tests/src/parameter/constant_real.f90,sha256=quNbDsM1Ts2rN4WtPO67S9Xi_8l2cXabWRO00CPQSSQ,610 +numpy/f2py/tests/src/regression/inout.f90,sha256=CpHpgMrf0bqA1W3Ozo3vInDz0RP904S7LkpdAH6ODck,277 +numpy/f2py/tests/src/size/foo.f90,sha256=IlFAQazwBRr3zyT7v36-tV0-fXtB1d7WFp6S1JVMstg,815 +numpy/f2py/tests/src/string/char.f90,sha256=ihr_BH9lY7eXcQpHHDQhFoKcbu7VMOX5QP2Tlr7xlaM,618 +numpy/f2py/tests/test_array_from_pyobj.py,sha256=gLSX9JuF_8NNboUQRzRF3IYC7pWJ06Mw8m6sy2wQvCQ,22083 +numpy/f2py/tests/test_array_from_pyobj.pyc,, +numpy/f2py/tests/test_assumed_shape.py,sha256=QhSsSJ4gzrgACSO-dyasMPhJSfa7PzDAxAd9yN0M6zI,949 +numpy/f2py/tests/test_assumed_shape.pyc,, +numpy/f2py/tests/test_block_docstring.py,sha256=lbRnFEGeseQ-WP9grC8Y4J6xKu-Nwgp0_fSVuYn19Hw,568 +numpy/f2py/tests/test_block_docstring.pyc,, +numpy/f2py/tests/test_callback.py,sha256=d5366rZPJMowFLd7AiwUMRdhKTe8BMeakcp1plQNBJI,3994 +numpy/f2py/tests/test_callback.pyc,, +numpy/f2py/tests/test_common.py,sha256=tLmi1JrfwFdTcBlUInxTn04f6Hf8eSB00sWRoKJvHrM,868 +numpy/f2py/tests/test_common.pyc,, +numpy/f2py/tests/test_compile_function.py,sha256=gQG9PeMaXCcjlc233cEJb5eMoNFfvzAdkHf8qb7Css0,4256 +numpy/f2py/tests/test_compile_function.pyc,, +numpy/f2py/tests/test_kind.py,sha256=G6u6EWjVHenmPju3RQCa9bSeCJGDul3VyXFgp2_Yc7w,1078 +numpy/f2py/tests/test_kind.pyc,, +numpy/f2py/tests/test_mixed.py,sha256=nUgGcvcbyd_NB6NuwFCIH8ze5eeMztC-fE5lCHXc9Bg,931 +numpy/f2py/tests/test_mixed.pyc,, +numpy/f2py/tests/test_parameter.py,sha256=_wX-gM-XGxA_mfDBM8np9NLjYiCF6LJbglwKf09JbdM,3976 +numpy/f2py/tests/test_parameter.pyc,, +numpy/f2py/tests/test_quoted_character.py,sha256=Q0oDtl3STQqzSap5VYPpfzJJ72NtQchm6Vg-bwuoBl4,1029 +numpy/f2py/tests/test_quoted_character.pyc,, +numpy/f2py/tests/test_regression.py,sha256=lPQUKx5RrVtGhyIvIcWS5GgA_CgQypabuuna-Q1z3hs,764 +numpy/f2py/tests/test_regression.pyc,, +numpy/f2py/tests/test_return_character.py,sha256=4a_JeEtY1AkT-Q-01iaZyqWLDGmZGW17d88JNFZoXTc,3864 +numpy/f2py/tests/test_return_character.pyc,, +numpy/f2py/tests/test_return_complex.py,sha256=FO4oflCncNIft36R3Fe9uiyDtryiB-_d2PLMH3x64I4,4779 +numpy/f2py/tests/test_return_complex.pyc,, +numpy/f2py/tests/test_return_integer.py,sha256=cyyAbyHUepwYeyXlgIa2FD4B7A2dHnpp2jwx8ZDQiZQ,4749 +numpy/f2py/tests/test_return_integer.pyc,, +numpy/f2py/tests/test_return_logical.py,sha256=u3dazkOU1oz9kZKYXBd2GWaEr02MYfjGdLrb7kT8MiY,4974 +numpy/f2py/tests/test_return_logical.pyc,, +numpy/f2py/tests/test_return_real.py,sha256=QVRKzeO44ZuIlV8EycmtXaHT_i0rnX2bi3rOh7py4GM,5619 +numpy/f2py/tests/test_return_real.pyc,, +numpy/f2py/tests/test_semicolon_split.py,sha256=v7YFx-oTbXUZZ4qjdblCYeVVtkD1YYa4CbuEf2LTOLs,1580 +numpy/f2py/tests/test_semicolon_split.pyc,, +numpy/f2py/tests/test_size.py,sha256=GV7S4tl8FhK60T_EpX86yVQo_bMVTdyOTB8fGVIQ24o,1352 +numpy/f2py/tests/test_size.pyc,, +numpy/f2py/tests/test_string.py,sha256=LTQC9AFVsUAuJVFuH3Wltl-NfFIilVl0KvBNnEgdnmo,676 +numpy/f2py/tests/test_string.pyc,, +numpy/f2py/tests/util.py,sha256=u06FJvpEGZM6P9WaZWkfTxR5TSdjCm7eXku45MO5R_o,9436 +numpy/f2py/tests/util.pyc,, +numpy/f2py/use_rules.py,sha256=L6nTSJnxougQ2PVAzR7s-1spidcfDp9tzLIFAJe3gUI,3652 +numpy/f2py/use_rules.pyc,, +numpy/fft/__init__.py,sha256=KGWBTdw_6ckUIfniIdikkgBwDy8riaGID8x4cdOf_Ds,252 +numpy/fft/__init__.pyc,, +numpy/fft/fftpack.py,sha256=lsjJM82Zdhh9t68dQrzO6iPihdgE_QK3GtSSfpYEcxI,47089 +numpy/fft/fftpack.pyc,, +numpy/fft/fftpack_lite.so,sha256=SHB5OV2xfkGP6xHoRsRM6r2F39PpCrYdhlkhrZ-YtQE,149826 +numpy/fft/helper.py,sha256=6Q_SGRP2hukwWRJjiL5OidSfdJIkvZo7AePSqYMLgJI,9710 +numpy/fft/helper.pyc,, +numpy/fft/info.py,sha256=831NwiCI33uiLx21G7kFCwzZuFxDfmU8n-2LG4FJm2w,7235 +numpy/fft/info.pyc,, +numpy/fft/setup.py,sha256=VR1boee7xZd3lOQVRJ3083I0kYqq_-RCo6CK6UK8Lso,550 +numpy/fft/setup.pyc,, +numpy/fft/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/fft/tests/__init__.pyc,, +numpy/fft/tests/test_fftpack.py,sha256=Ub9oaoyEILrev0kZoEh0hTaYlf-orOUtfKIsNbANwoU,7019 +numpy/fft/tests/test_fftpack.pyc,, +numpy/fft/tests/test_helper.py,sha256=q8y7x0IaXRW2ai0dCEmhW48MRkQ6k8bSegti5gCE6ac,9774 +numpy/fft/tests/test_helper.pyc,, +numpy/lib/__init__.py,sha256=NgwUuJaACbQF-qd9VLy6CEPwcZHk1GEdWPW0UhZPQl8,1358 +numpy/lib/__init__.pyc,, +numpy/lib/_datasource.py,sha256=qKNDjPS0q8CJ4HD9I6IPgJEmr2MN2m-yinyYOJBqZ1w,25523 +numpy/lib/_datasource.pyc,, +numpy/lib/_iotools.py,sha256=JERUFZ1Xja8mq9T7AQdtO2RwoblLmmDcfhnL9iT0xOE,32683 +numpy/lib/_iotools.pyc,, +numpy/lib/_version.py,sha256=8ouI5DbgX1RuNbPhVX_Fn14_v7ZiwwQ1grQPX3_bXBs,4866 +numpy/lib/_version.pyc,, +numpy/lib/arraypad.py,sha256=dAhAzdE0AXL26VMyOj0XIzh_XqcjWv6SDSDaAGSFxyQ,45097 +numpy/lib/arraypad.pyc,, +numpy/lib/arraysetops.py,sha256=gpQmyj62NhyGms41X4J0L2qF5apzPqSCRsSN1Os8AVo,24175 +numpy/lib/arraysetops.pyc,, +numpy/lib/arrayterator.py,sha256=niYNI2qhySUT5j_3gl07pPbkmY4GJqdwIGaMZyPil84,7191 +numpy/lib/arrayterator.pyc,, +numpy/lib/financial.py,sha256=fYFVxvZmJJUxyWarmOpNz8rhFfs_Gv3vx8iB9L2LVwc,25985 +numpy/lib/financial.pyc,, +numpy/lib/format.py,sha256=-LoQbL9aFfVv38VLi99Pga5zY1tO4qdsog7VGc2N4wE,30723 +numpy/lib/format.pyc,, +numpy/lib/function_base.py,sha256=oCjNZGSGegRLTaEMmOJwYoTTWR1eZuVEO9HJLCk26Gc,156000 +numpy/lib/function_base.pyc,, +numpy/lib/histograms.py,sha256=0FviiX5FbM5sxHjbHHE54LQLday_AoX4LCF_N8_x4bk,39375 +numpy/lib/histograms.pyc,, +numpy/lib/index_tricks.py,sha256=OymHt6Mzi16XZBWSvLdnwGr9ywaPker-3Dq2vQHXN7I,29087 +numpy/lib/index_tricks.pyc,, +numpy/lib/info.py,sha256=oVczF_pC_CMZC2h2adb2HHza_1qF3qI065j4RBrd-I4,6616 +numpy/lib/info.pyc,, +numpy/lib/mixins.py,sha256=GeOiq01E663Z_06xQfIUYKpl2JPkswqhaQEernjnO_Q,7268 +numpy/lib/mixins.pyc,, +numpy/lib/nanfunctions.py,sha256=7LsFmh0meOwBJpzoNnR1V8e2nAJv61A8Ib9EWmYFRLg,57741 +numpy/lib/nanfunctions.pyc,, +numpy/lib/npyio.py,sha256=PqeSplzcbS7aZQdlYTxYAFPfJkPwJuS1I1f_sfEEpO0,84661 +numpy/lib/npyio.pyc,, +numpy/lib/polynomial.py,sha256=Gm-XIvalJS2B1KMRvyBddziWHtd8ZCmWfxZlJ0kOiyc,40292 +numpy/lib/polynomial.pyc,, +numpy/lib/recfunctions.py,sha256=OQ3lGqoHCauSXqgGJaGH0Bl1eNV_SdoAz02P0_7L2lw,54851 +numpy/lib/recfunctions.pyc,, +numpy/lib/scimath.py,sha256=axf_K8DphkbuKZXA6K2A5fbyIu1BdKQ6P74iFW9YpTc,14698 +numpy/lib/scimath.pyc,, +numpy/lib/setup.py,sha256=os9eV9wSzwTQlfxeoQ33gYQ4wOj1_6EvqcROc8PyGbE,379 +numpy/lib/setup.pyc,, +numpy/lib/shape_base.py,sha256=T1RaVDs9X7GUBclWBf2SZkLZhY3xbpctMv8rtt0gdM0,37967 +numpy/lib/shape_base.pyc,, +numpy/lib/stride_tricks.py,sha256=P7koCHdGLg31K2aQPIPcAmqLKKsnY-HZw_eS3hqUpZA,9123 +numpy/lib/stride_tricks.pyc,, +numpy/lib/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/lib/tests/__init__.pyc,, +numpy/lib/tests/data/py2-objarr.npy,sha256=F4cyUC-_TB9QSFLAo2c7c44rC6NUYIgrfGx9PqWPSKk,258 +numpy/lib/tests/data/py2-objarr.npz,sha256=xo13HBT0FbFZ2qvZz0LWGDb3SuQASSaXh7rKfVcJjx4,366 +numpy/lib/tests/data/py3-objarr.npy,sha256=pTTVh8ezp-lwAK3fkgvdKU8Arp5NMKznVD-M6Ex_uA0,341 +numpy/lib/tests/data/py3-objarr.npz,sha256=qQR0gS57e9ta16d_vCQjaaKM74gPdlwCPkp55P-qrdw,449 +numpy/lib/tests/data/python3.npy,sha256=X0ad3hAaLGXig9LtSHAo-BgOvLlFfPYMnZuVIxRmj-0,96 +numpy/lib/tests/data/win64python2.npy,sha256=agOcgHVYFJrV-nrRJDbGnUnF4ZTPYXuSeF-Mtg7GMpc,96 +numpy/lib/tests/test__datasource.py,sha256=5LwfmvIysaLHlCYkmsj46S7YRF2zRG4BmKSjjJr6fdE,11463 +numpy/lib/tests/test__datasource.pyc,, +numpy/lib/tests/test__iotools.py,sha256=7ocNK0I-XKtiJLxnq2Fq_Yszi-e-70Km7crI28Jyqww,13714 +numpy/lib/tests/test__iotools.pyc,, +numpy/lib/tests/test__version.py,sha256=eCeeSqb8G3WNtCgkM3XGz9Zszyye-KFDlNQ7EY2J_UY,2055 +numpy/lib/tests/test__version.pyc,, +numpy/lib/tests/test_arraypad.py,sha256=ZIcLFH5ykizK2XT6WN1f9CyR48tqHQnx_b2Ojurs7eo,49320 +numpy/lib/tests/test_arraypad.pyc,, +numpy/lib/tests/test_arraysetops.py,sha256=WsH_vJI5guzG9Mix0y-kwVceTZ8e-aKJxEaf_RNTcbE,22157 +numpy/lib/tests/test_arraysetops.pyc,, +numpy/lib/tests/test_arrayterator.py,sha256=run7iWWbvoHGGsDv_uB6G8QENFzOCSgUIxAMVp7ZMu4,1357 +numpy/lib/tests/test_arrayterator.pyc,, +numpy/lib/tests/test_financial.py,sha256=8cCdlpNixwl1Wrgblemxi3ndTxX_Sq2yr-30lSURnq0,17098 +numpy/lib/tests/test_financial.pyc,, +numpy/lib/tests/test_format.py,sha256=m8X4eNPyby2wOIENzkW8apHugUi5WRkzHZO15h9MSDU,35376 +numpy/lib/tests/test_format.pyc,, +numpy/lib/tests/test_function_base.py,sha256=3qeRuK68MKs7Xkxe61M2ozQkCiSnTDpGFeJbrp807Uc,117817 +numpy/lib/tests/test_function_base.pyc,, +numpy/lib/tests/test_histograms.py,sha256=0EhLLKOADkyQZTucCcXfYyAC5X1n5_vglMid0VJO-DE,33415 +numpy/lib/tests/test_histograms.pyc,, +numpy/lib/tests/test_index_tricks.py,sha256=Ql-AUXqIgQSdLjbeRI7GmaiVVtdgH0YoR0m5DGS_V-g,16322 +numpy/lib/tests/test_index_tricks.pyc,, +numpy/lib/tests/test_io.py,sha256=Z2bkX6n7a7lIKS4hfWxf20XyCFQfjFK8PAjEocnpOI0,96988 +numpy/lib/tests/test_io.pyc,, +numpy/lib/tests/test_mixins.py,sha256=YNIKF716Jz7V8FJ8Zzww_F6laTD8j3A6SBxCXqt6rAQ,7233 +numpy/lib/tests/test_mixins.pyc,, +numpy/lib/tests/test_nanfunctions.py,sha256=wS-i0JsIwMb0p35vW2qGC_dpewDFVzI2YTnJ25pr1O8,36179 +numpy/lib/tests/test_nanfunctions.pyc,, +numpy/lib/tests/test_packbits.py,sha256=W4gtoYBa5LbevvbXL9lvrZRT5Wt1fqyI3J7oDLtza_A,12851 +numpy/lib/tests/test_packbits.pyc,, +numpy/lib/tests/test_polynomial.py,sha256=C7XhyAVCbh_5_HWiZ5XPEVOq_WGtBa8hvmcmVYE3QDc,10055 +numpy/lib/tests/test_polynomial.pyc,, +numpy/lib/tests/test_recfunctions.py,sha256=3ExnP2zwR3y9mO3rZ_ueqIQ5aMitvje76U-xWUG_PjY,38471 +numpy/lib/tests/test_recfunctions.pyc,, +numpy/lib/tests/test_regression.py,sha256=96pKecYGHPZwAoHV3_kLvl3gIb0PN0m33R0H3dd7uSk,8472 +numpy/lib/tests/test_regression.pyc,, +numpy/lib/tests/test_shape_base.py,sha256=nVUzbHADBmrOaOHeh5fA27gjxsnOC3r-S5lyo1n5MV8,23979 +numpy/lib/tests/test_shape_base.pyc,, +numpy/lib/tests/test_stride_tricks.py,sha256=HUp9YL7eBTRfT8gs6iraMl6M3YvoDxfFmkkwwmroing,15392 +numpy/lib/tests/test_stride_tricks.pyc,, +numpy/lib/tests/test_twodim_base.py,sha256=toC7eTjEuZxygJwQub0tC7_uGCtLVVMCHfa6EUkGJU4,17524 +numpy/lib/tests/test_twodim_base.pyc,, +numpy/lib/tests/test_type_check.py,sha256=KxnoWjY3iGTnr0pDWEah73ZAx_6a85S9SSnkKIG-sn0,13509 +numpy/lib/tests/test_type_check.pyc,, +numpy/lib/tests/test_ufunclike.py,sha256=VFt_8BDH7q80yXmYJSn1crolIMizKFN3mAJcigaazLU,3350 +numpy/lib/tests/test_ufunclike.pyc,, +numpy/lib/tests/test_utils.py,sha256=kIH7i6N_Gtsk8FgIHTuYvASeGxQB15UYPJwqvWBPWkY,2474 +numpy/lib/tests/test_utils.pyc,, +numpy/lib/twodim_base.py,sha256=sFRiYvhrOG9EY_YMlTblXb4aJKZaE3WLUx1WBKoIgG4,27339 +numpy/lib/twodim_base.pyc,, +numpy/lib/type_check.py,sha256=XsSXtj8bHk22iq4NNZpwqMKPM9FoddtYNWKFi63oZqc,18073 +numpy/lib/type_check.pyc,, +numpy/lib/ufunclike.py,sha256=1df-LT8UlC_SRmc06DhAnsUZLHROx0p56jw6GUwcap8,7156 +numpy/lib/ufunclike.pyc,, +numpy/lib/user_array.py,sha256=7nJPlDfP-04Lcq8iH_cqBbSEsx5cHCcj-2Py-oh-5t0,7817 +numpy/lib/user_array.pyc,, +numpy/lib/utils.py,sha256=L_JtNhuRwM6_4YyhM5jCpeH36j_lj5-t6XJT3pB4In0,36161 +numpy/lib/utils.pyc,, +numpy/linalg/__init__.py,sha256=P2q5fyWhZEc-xhcruFEcHWmYhSBOWSr63i9UjE8x3fk,2326 +numpy/linalg/__init__.pyc,, +numpy/linalg/_umath_linalg.so,sha256=o-tN4TdHWHggXAxfA1nBDiHBJqqyuW6o09zBFoFwzAw,872112 +numpy/linalg/info.py,sha256=AbXPYYabJK5In0F9IMk-oVWZgDyEaoU45Wnq6RtuCJs,1198 +numpy/linalg/info.pyc,, +numpy/linalg/lapack_lite.so,sha256=BNhR9YGqEaCh68JU5TlJnmxJGqZdgZ7_ZCS_lQwRay4,108832 +numpy/linalg/linalg.py,sha256=rZuKNk2u7c65Cp8yssB1BEeCSPauDClcgYF6N4dY2So,85094 +numpy/linalg/linalg.pyc,, +numpy/linalg/setup.py,sha256=k1X4EfRWACFtJYfb8Wiol_-pPnEMtqURxQ8H9FwFHWg,1878 +numpy/linalg/setup.pyc,, +numpy/linalg/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/linalg/tests/__init__.pyc,, +numpy/linalg/tests/test_build.py,sha256=xKcJ8JmGk-zTqxxMhDX5GFsw-ptn8uwOUOcxaTUuPHc,1704 +numpy/linalg/tests/test_build.pyc,, +numpy/linalg/tests/test_deprecations.py,sha256=eGYDVF3rmGQyDEMGOc-p_zc84Cx1I3jQPyaJe7xOvEc,706 +numpy/linalg/tests/test_deprecations.pyc,, +numpy/linalg/tests/test_linalg.py,sha256=y87Nka03PZAqUl5GqzOb4LsLbgbGib5V9ehdoTIPLRI,69977 +numpy/linalg/tests/test_linalg.pyc,, +numpy/linalg/tests/test_regression.py,sha256=zz7lprqDg7yU-z1d6AOdCDH3Tjqgw82QGiaPM7peixY,5671 +numpy/linalg/tests/test_regression.pyc,, +numpy/ma/__init__.py,sha256=fcmMCElT3MmCkjIGVhXyEAbjuWe_j1NVUiE65eAMvy0,1470 +numpy/ma/__init__.pyc,, +numpy/ma/bench.py,sha256=q3y_e1wpHVEdg0iIxrBshWVt2LOFfYi6q-eIJ3RSVrU,4942 +numpy/ma/bench.pyc,, +numpy/ma/core.py,sha256=UVL30fxCdddsnMlLOgcCxl-ca_-Iqm6uyN8QHnczcH4,256431 +numpy/ma/core.pyc,, +numpy/ma/extras.py,sha256=iNaY5jpgYLssKzzgS7FfhlZ3BM59gjLbsNb7CUylLDU,56986 +numpy/ma/extras.pyc,, +numpy/ma/mrecords.py,sha256=j8EituvbyOFG5oiTwHBnVdQX1mhD_qByBezBeB_R1hM,26937 +numpy/ma/mrecords.pyc,, +numpy/ma/setup.py,sha256=zkieH8BeiGVXl3Wlt_WeP9kciZlyAZY20DDu4SGk4b4,429 +numpy/ma/setup.pyc,, +numpy/ma/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/ma/tests/__init__.pyc,, +numpy/ma/tests/test_core.py,sha256=pM6Wn3jxw6cPV7wFlNKWyyAYFDAGAm0oYtw2txBudKw,196219 +numpy/ma/tests/test_core.pyc,, +numpy/ma/tests/test_deprecations.py,sha256=StN-maPV6dwIPn1LmJ_Fd9l_Ysrbzvl8BZy6zYeUru8,2340 +numpy/ma/tests/test_deprecations.pyc,, +numpy/ma/tests/test_extras.py,sha256=f7wFcowBM60pnNwIJ221W9X6AWNS9pRK_VjVXV5Wqio,66791 +numpy/ma/tests/test_extras.pyc,, +numpy/ma/tests/test_mrecords.py,sha256=SX0_-SgRqIQBlPaiDNVD0-oBAot0N9aLPWM7Gj4U804,19966 +numpy/ma/tests/test_mrecords.pyc,, +numpy/ma/tests/test_old_ma.py,sha256=85NJMKj-TG8WGSMFT2KbDEsBsrgV58URkFUd1v9iuBc,32351 +numpy/ma/tests/test_old_ma.pyc,, +numpy/ma/tests/test_regression.py,sha256=AGAA97e9_0q1VHSTOx6qIsh1qA56GzEKhWZWveuHf3w,2993 +numpy/ma/tests/test_regression.pyc,, +numpy/ma/tests/test_subclassing.py,sha256=GEqxbqfodv1823cRitfJK3qMWTbDWIpv2HwuVB_kpXk,12997 +numpy/ma/tests/test_subclassing.pyc,, +numpy/ma/testutils.py,sha256=meyy8_0sx4g2sebsVO1PrFSc6ogLzEU7vjOuu2VjY1U,10365 +numpy/ma/testutils.pyc,, +numpy/ma/timer_comparison.py,sha256=Q1AyfHzNrWzVTrx6ebL9HgpQEkEJPHAkbWuTK_0bBkQ,15586 +numpy/ma/timer_comparison.pyc,, +numpy/ma/version.py,sha256=KpJAmUE1s1TpbgqgdBpDoslxm7kOMpczLjEzLMGv9Ag,380 +numpy/ma/version.pyc,, +numpy/matlib.py,sha256=bfk5RflWhOjnBKhpU4L-WDafyzoNIy5-K-8MMyIauN8,9809 +numpy/matlib.pyc,, +numpy/matrixlib/__init__.py,sha256=W-2bi7zuMWQY5U1ikwfaBPubrcYkbxzPzzIeYz3RYPA,284 +numpy/matrixlib/__init__.pyc,, +numpy/matrixlib/defmatrix.py,sha256=1tR1FsgapRQ2XbiAvV4ik4mwkKLJA1y6ABeztSeYr2k,30660 +numpy/matrixlib/defmatrix.pyc,, +numpy/matrixlib/setup.py,sha256=7DS-rWnyWlLTuOj31UuhkyW8QhLQ7KD5wirtWT_DUhc,437 +numpy/matrixlib/setup.pyc,, +numpy/matrixlib/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/matrixlib/tests/__init__.pyc,, +numpy/matrixlib/tests/test_defmatrix.py,sha256=FRkFPpDpgUEzEAgShORCVhPOuqclxBftHyEW5z2oV4o,15315 +numpy/matrixlib/tests/test_defmatrix.pyc,, +numpy/matrixlib/tests/test_interaction.py,sha256=y0ldcMIKCeT_tRo_uON6Cvxuff-M4MxmqnzA0kDFHYU,12179 +numpy/matrixlib/tests/test_interaction.pyc,, +numpy/matrixlib/tests/test_masked_matrix.py,sha256=4uslUEOdw_ACZ9R-VKX2m82HlDeNFrBs-zP8uuWj5gI,8962 +numpy/matrixlib/tests/test_masked_matrix.pyc,, +numpy/matrixlib/tests/test_matrix_linalg.py,sha256=XYsAcC02YgvlfqAQOLY2hOuggeRlRhkztNsLYWGb4QQ,2125 +numpy/matrixlib/tests/test_matrix_linalg.pyc,, +numpy/matrixlib/tests/test_multiarray.py,sha256=jM-cFU_ktanoyJ0ScRYv5xwohhE3pKpVhBBtd31b-IQ,628 +numpy/matrixlib/tests/test_multiarray.pyc,, +numpy/matrixlib/tests/test_numeric.py,sha256=YPq5f11MUAV6WcLQbl8xKWcm17lMj9SJ09mamqGCpxA,515 +numpy/matrixlib/tests/test_numeric.pyc,, +numpy/matrixlib/tests/test_regression.py,sha256=ou1TP5bFNpjRaL2-zQxzS11ChwvAkCVp3k71SBtOO9M,1001 +numpy/matrixlib/tests/test_regression.pyc,, +numpy/polynomial/__init__.py,sha256=boBgsbz2Rr49pBTyGNT3TnLRTPSauyjBNeCVGek7oUM,1134 +numpy/polynomial/__init__.pyc,, +numpy/polynomial/_polybase.py,sha256=GEr4yD6OnPtNo98Mf0p5r8vt_nEmTeqTRG3pljAADbQ,32779 +numpy/polynomial/_polybase.pyc,, +numpy/polynomial/chebyshev.py,sha256=79hcfdqUHgAtBofN4ZZHTkOJNzYeJ12KsCgZm4zng6I,67424 +numpy/polynomial/chebyshev.pyc,, +numpy/polynomial/hermite.py,sha256=14FITYGMDQzZLsAIu6TwIeS_Eh4O3dTizXHRmVLddAI,58113 +numpy/polynomial/hermite.pyc,, +numpy/polynomial/hermite_e.py,sha256=NuXm8lrR5s8yli_bUauvgUsX0p0yM-EwtvNj6srObtU,58237 +numpy/polynomial/hermite_e.pyc,, +numpy/polynomial/laguerre.py,sha256=o3ZvgUi9ivpWGqPIk6hCAGcs8P9mljAxP54oXpQfYiM,56513 +numpy/polynomial/laguerre.pyc,, +numpy/polynomial/legendre.py,sha256=UvhOne6SyvBKj5DIYGyfD9bu_RcMqADY5ezbVkhBR8s,57701 +numpy/polynomial/legendre.pyc,, +numpy/polynomial/polynomial.py,sha256=9XK_JxmTv4IjCgtJ03dd6ASNDIe8H0crEOa7MbstUTI,53443 +numpy/polynomial/polynomial.pyc,, +numpy/polynomial/polyutils.py,sha256=2qA03OFnMiXQj6aF0vIUWN2B99D4yd-dEMSMOYDOLf0,11529 +numpy/polynomial/polyutils.pyc,, +numpy/polynomial/setup.py,sha256=PKIUV6Jh7_0jBboPp3IHPmp6LWVs4tbIkdu_FtmI_5U,385 +numpy/polynomial/setup.pyc,, +numpy/polynomial/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/polynomial/tests/__init__.pyc,, +numpy/polynomial/tests/test_chebyshev.py,sha256=ntXcwISDcgmlGGfMhwYWiWdjqtTVVUfFdQHm-Msx5yc,20719 +numpy/polynomial/tests/test_chebyshev.pyc,, +numpy/polynomial/tests/test_classes.py,sha256=F07n2iV0_XPK2JC_8egdnO6FDETpttR7gW6e-8zDtEs,20056 +numpy/polynomial/tests/test_classes.pyc,, +numpy/polynomial/tests/test_hermite.py,sha256=OQNcZGOg1FoqVVXrouHqanG2d2zCUEUdhutRV9WaIOs,18758 +numpy/polynomial/tests/test_hermite.pyc,, +numpy/polynomial/tests/test_hermite_e.py,sha256=W1akH58gTi9VrXh2GiyJfPhpeJzf57XnAFVIgM33hp4,19092 +numpy/polynomial/tests/test_hermite_e.pyc,, +numpy/polynomial/tests/test_laguerre.py,sha256=fyNsRBmdHn1Sd0uQaoXeL70kJb6nQCMeYE5X7utxkS0,17692 +numpy/polynomial/tests/test_laguerre.pyc,, +numpy/polynomial/tests/test_legendre.py,sha256=Np3xtAMN5zlioM5HFsyLR4tdAJW0ibvfJbz9QHWMkOo,18456 +numpy/polynomial/tests/test_legendre.pyc,, +numpy/polynomial/tests/test_polynomial.py,sha256=xbrdofA-XCwZhsyPL5tMKDEZ5cWzxNp6Pz4SV_4nKEU,19552 +numpy/polynomial/tests/test_polynomial.pyc,, +numpy/polynomial/tests/test_polyutils.py,sha256=GzRz3leypd2UrWE-EwuIWL0lbbj6ks6Mjli3tozDN9U,3081 +numpy/polynomial/tests/test_polyutils.pyc,, +numpy/polynomial/tests/test_printing.py,sha256=_7O-05q3JEjdxmuzBdWxligQVdC6qGygKmbhfiYW9KQ,2067 +numpy/polynomial/tests/test_printing.pyc,, +numpy/random/__init__.py,sha256=RvKHC6GpPCEcZdGLYiDG3RH5vEbJfOkL4fM0igfBAAA,6053 +numpy/random/__init__.pyc,, +numpy/random/info.py,sha256=OzPLVv_aA7kxLu9WdGiRqO2_yA2163PWQi3Lwwrhs3E,109 +numpy/random/info.pyc,, +numpy/random/mtrand.so,sha256=m1nTioYCsspPoM5cqrMt5FkkqiGErpwL3ulG-HyKuYY,3190922 +numpy/random/randomkit.h,sha256=GOfc27td8dO8YM0WeB_qM313pouCDUt9Ad7nc_lgKI0,6799 +numpy/random/setup.py,sha256=Zm-rZze8r6GWKT-o9tYq2DVym0AMh2tNwE_s6m1Z-Bc,2286 +numpy/random/setup.pyc,, +numpy/random/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/random/tests/__init__.pyc,, +numpy/random/tests/test_random.py,sha256=0V5bRWCOZiMrbv96Lhq-nR0R-o8yedZ8Jgy1ewfP_Gc,66167 +numpy/random/tests/test_random.pyc,, +numpy/random/tests/test_regression.py,sha256=Eb0wEE5cbGklIwcSTNkPI6CpawspICJsx3lYtTDxl7A,5671 +numpy/random/tests/test_regression.pyc,, +numpy/setup.py,sha256=lsyhnRXfo0ybq63nVUX8HnYhQ1mI0bSic-mk-lK3wnc,920 +numpy/setup.pyc,, +numpy/testing/__init__.py,sha256=MHRK5eimwrC9RE723HlOcOQGxu5HAmQ-qwlcVX1sZ1k,632 +numpy/testing/__init__.pyc,, +numpy/testing/_private/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/testing/_private/__init__.pyc,, +numpy/testing/_private/decorators.py,sha256=JSIBsQH4t1rdMcr1-Cf2jBJ6CXzIGEFyZoWxUJuXI7M,9015 +numpy/testing/_private/decorators.pyc,, +numpy/testing/_private/noseclasses.py,sha256=nYtV16KcoqAcHswfYO-u6bRIrDBvCvpqjCNfl7zk-SA,14601 +numpy/testing/_private/noseclasses.pyc,, +numpy/testing/_private/nosetester.py,sha256=S1nEtDBvNT87Zrt8XmuSVIBWpanJwjtD1YiRlcf7eoA,20515 +numpy/testing/_private/nosetester.pyc,, +numpy/testing/_private/parameterized.py,sha256=S_cqBegd7kdwVq1kg_DAnywwFPT_g1bjDJ6-LMq0LO4,18316 +numpy/testing/_private/parameterized.pyc,, +numpy/testing/_private/utils.py,sha256=TNDvuuT_SN73Hr07ODXOvRk8wgueYWF1uYdSSjIJhsk,78209 +numpy/testing/_private/utils.pyc,, +numpy/testing/decorators.py,sha256=BEktn0PuVlmgUQ_zGVNXu0wQYh3W0_bu61LnQPrxY20,428 +numpy/testing/decorators.pyc,, +numpy/testing/noseclasses.py,sha256=iZmGKPHAGQIshsEONB-oLt7gHPzx2Bg57oat_M4M5XE,423 +numpy/testing/noseclasses.pyc,, +numpy/testing/nosetester.py,sha256=as3E0khSkTseCRpyvtOSSq4fJY1K1lrrAyIcXOErTMo,583 +numpy/testing/nosetester.pyc,, +numpy/testing/print_coercion_tables.py,sha256=F44AObcou_xytUWszku8t1bWuui-4I_18o7Z7zW8l18,2705 +numpy/testing/print_coercion_tables.pyc,, +numpy/testing/setup.py,sha256=9PnlgcejccUBzaGPi9Po-ElhmuQMAmWCBRdvCDwiKYw,676 +numpy/testing/setup.pyc,, +numpy/testing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/testing/tests/__init__.pyc,, +numpy/testing/tests/test_decorators.py,sha256=JB3wOfG4SHAvGNBJgEnotP6Y7uHgNq00-Ji8-vpTL0M,5921 +numpy/testing/tests/test_decorators.pyc,, +numpy/testing/tests/test_doctesting.py,sha256=sKBXwuRZwMFSiem3R9egBzzSUB81kkpw9y-Y07iqU2M,1413 +numpy/testing/tests/test_doctesting.pyc,, +numpy/testing/tests/test_utils.py,sha256=I8zSV7NrpCwC4hF9DXqR22OqMUF65Rg6oljOXBWp010,52756 +numpy/testing/tests/test_utils.pyc,, +numpy/testing/utils.py,sha256=3Z2wHEc2f-0lZrdDueAdbe96KQw1DqM_aFosea9VRtY,1232 +numpy/testing/utils.pyc,, +numpy/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/tests/__init__.pyc,, +numpy/tests/test_ctypeslib.py,sha256=Fy_dBd80RrBufyeXISkBu6kS3X700qOD5ob0pDjRssg,12276 +numpy/tests/test_ctypeslib.pyc,, +numpy/tests/test_matlib.py,sha256=WKILeEOe3NdKP_XAy-uCs4VEi7r_ghQ7NUhIgH1LzoM,2158 +numpy/tests/test_matlib.pyc,, +numpy/tests/test_numpy_version.py,sha256=VtTTZAPnsJ8xtKLy1qYqIwrpcjTtqJ9xP9qP5-p8DbU,647 +numpy/tests/test_numpy_version.pyc,, +numpy/tests/test_public_api.py,sha256=CmikwegnRWXrMO7vvsZ4Z8O8bwDsfuq5tfOWO-0ccs8,3457 +numpy/tests/test_public_api.pyc,, +numpy/tests/test_reloading.py,sha256=7sDoPGkvvZas7FhT4oOURt75A5JAcr_Ws2uoJ9cVMiY,1304 +numpy/tests/test_reloading.pyc,, +numpy/tests/test_scripts.py,sha256=SxlQPb8EttfP4V5iGJyXMBtDWTS3EcYVBN-JWDTtSy4,1637 +numpy/tests/test_scripts.pyc,, +numpy/tests/test_warnings.py,sha256=ye4TBGnOuPAZyu5bS5JDxYV5hLglUQQfKSrMWwY_phI,2594 +numpy/tests/test_warnings.pyc,, +numpy/version.py,sha256=5fSzfrOy5GglDQLyQSQBrK0HuGT2cGqu3DYSwiL72Hw,294 +numpy/version.pyc,, diff --git a/project/venv/lib/python2.7/site-packages/numpy-1.16.2.dist-info/WHEEL b/project/venv/lib/python2.7/site-packages/numpy-1.16.2.dist-info/WHEEL new file mode 100644 index 0000000..295a0ca --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy-1.16.2.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.31.1) +Root-Is-Purelib: false +Tag: cp27-cp27mu-manylinux1_x86_64 + diff --git a/project/venv/lib/python2.7/site-packages/numpy-1.16.2.dist-info/entry_points.txt b/project/venv/lib/python2.7/site-packages/numpy-1.16.2.dist-info/entry_points.txt new file mode 100644 index 0000000..bddf93b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy-1.16.2.dist-info/entry_points.txt @@ -0,0 +1,5 @@ +[console_scripts] +f2py = numpy.f2py.f2py2e:main +f2py2 = numpy.f2py.f2py2e:main +f2py2.7 = numpy.f2py.f2py2e:main + diff --git a/project/venv/lib/python2.7/site-packages/numpy-1.16.2.dist-info/top_level.txt b/project/venv/lib/python2.7/site-packages/numpy-1.16.2.dist-info/top_level.txt new file mode 100644 index 0000000..24ce15a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy-1.16.2.dist-info/top_level.txt @@ -0,0 +1 @@ +numpy diff --git a/project/venv/lib/python2.7/site-packages/numpy/.libs/libgfortran-ed201abd.so.3.0.0 b/project/venv/lib/python2.7/site-packages/numpy/.libs/libgfortran-ed201abd.so.3.0.0 new file mode 100755 index 0000000..ea741fc Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/.libs/libgfortran-ed201abd.so.3.0.0 differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/.libs/libopenblasp-r0-382c8f3a.3.5.dev.so b/project/venv/lib/python2.7/site-packages/numpy/.libs/libopenblasp-r0-382c8f3a.3.5.dev.so new file mode 100755 index 0000000..a12ec70 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/.libs/libopenblasp-r0-382c8f3a.3.5.dev.so differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/LICENSE.txt b/project/venv/lib/python2.7/site-packages/numpy/LICENSE.txt new file mode 100644 index 0000000..8e6cc62 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/LICENSE.txt @@ -0,0 +1,940 @@ +Copyright (c) 2005-2019, NumPy Developers. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the NumPy Developers nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +The NumPy repository and source distributions bundle several libraries that are +compatibly licensed. We list these here. + +Name: Numpydoc +Files: doc/sphinxext/numpydoc/* +License: 2-clause BSD + For details, see doc/sphinxext/LICENSE.txt + +Name: scipy-sphinx-theme +Files: doc/scipy-sphinx-theme/* +License: 3-clause BSD, PSF and Apache 2.0 + For details, see doc/scipy-sphinx-theme/LICENSE.txt + +Name: lapack-lite +Files: numpy/linalg/lapack_lite/* +License: 3-clause BSD + For details, see numpy/linalg/lapack_lite/LICENSE.txt + +Name: tempita +Files: tools/npy_tempita/* +License: BSD derived + For details, see tools/npy_tempita/license.txt + +Name: dragon4 +Files: numpy/core/src/multiarray/dragon4.c +License: One of a kind + For license text, see numpy/core/src/multiarray/dragon4.c + +---- + +This binary distribution of NumPy also bundles the following software: + + +Name: OpenBLAS +Files: .libs/libopenb*.so +Description: bundled as a dynamically linked library +Availability: https://github.com/xianyi/OpenBLAS/ +License: 3-clause BSD + Copyright (c) 2011-2014, The OpenBLAS Project + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + 3. Neither the name of the OpenBLAS project nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Name: LAPACK +Files: .libs/libopenb*.so +Description: bundled in OpenBLAS +Availability: https://github.com/xianyi/OpenBLAS/ +License 3-clause BSD + Copyright (c) 1992-2013 The University of Tennessee and The University + of Tennessee Research Foundation. All rights + reserved. + Copyright (c) 2000-2013 The University of California Berkeley. All + rights reserved. + Copyright (c) 2006-2013 The University of Colorado Denver. All rights + reserved. + + $COPYRIGHT$ + + Additional copyrights may follow + + $HEADER$ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer listed + in this license in the documentation and/or other materials + provided with the distribution. + + - Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + The copyright holders provide no reassurances that the source code + provided does not infringe any patent, copyright, or any other + intellectual property rights of third parties. The copyright holders + disclaim any liability to any recipient for claims brought against + recipient by any third party for infringement of that parties + intellectual property rights. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Name: GCC runtime library +Files: .libs/libgfortran*.so +Description: dynamically linked to files compiled with gcc +Availability: https://gcc.gnu.org/viewcvs/gcc/ +License: GPLv3 + runtime exception + Copyright (C) 2002-2017 Free Software Foundation, Inc. + + Libgfortran is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgfortran is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . + +---- + +Full text of license texts referred to above follows (that they are +listed below does not necessarily imply the conditions apply to the +present binary release): + +---- + +GCC RUNTIME LIBRARY EXCEPTION + +Version 3.1, 31 March 2009 + +Copyright (C) 2009 Free Software Foundation, Inc. + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +This GCC Runtime Library Exception ("Exception") is an additional +permission under section 7 of the GNU General Public License, version +3 ("GPLv3"). It applies to a given file (the "Runtime Library") that +bears a notice placed by the copyright holder of the file stating that +the file is governed by GPLv3 along with this Exception. + +When you use GCC to compile a program, GCC may combine portions of +certain GCC header files and runtime libraries with the compiled +program. The purpose of this Exception is to allow compilation of +non-GPL (including proprietary) programs to use, in this way, the +header files and runtime libraries covered by this Exception. + +0. Definitions. + +A file is an "Independent Module" if it either requires the Runtime +Library for execution after a Compilation Process, or makes use of an +interface provided by the Runtime Library, but is not otherwise based +on the Runtime Library. + +"GCC" means a version of the GNU Compiler Collection, with or without +modifications, governed by version 3 (or a specified later version) of +the GNU General Public License (GPL) with the option of using any +subsequent versions published by the FSF. + +"GPL-compatible Software" is software whose conditions of propagation, +modification and use would permit combination with GCC in accord with +the license of GCC. + +"Target Code" refers to output from any compiler for a real or virtual +target processor architecture, in executable form or suitable for +input to an assembler, loader, linker and/or execution +phase. Notwithstanding that, Target Code does not include data in any +format that is used as a compiler intermediate representation, or used +for producing a compiler intermediate representation. + +The "Compilation Process" transforms code entirely represented in +non-intermediate languages designed for human-written code, and/or in +Java Virtual Machine byte code, into Target Code. Thus, for example, +use of source code generators and preprocessors need not be considered +part of the Compilation Process, since the Compilation Process can be +understood as starting with the output of the generators or +preprocessors. + +A Compilation Process is "Eligible" if it is done using GCC, alone or +with other GPL-compatible software, or if it is done without using any +work based on GCC. For example, using non-GPL-compatible Software to +optimize any GCC intermediate representations would not qualify as an +Eligible Compilation Process. + +1. Grant of Additional Permission. + +You have permission to propagate a work of Target Code formed by +combining the Runtime Library with Independent Modules, even if such +propagation would otherwise violate the terms of GPLv3, provided that +all Target Code was generated by Eligible Compilation Processes. You +may then convey such a combination under terms of your choice, +consistent with the licensing of the Independent Modules. + +2. No Weakening of GCC Copyleft. + +The availability of this Exception does not imply any general +presumption that third-party software is unaffected by the copyleft +requirements of the license of GCC. + +---- + + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/project/venv/lib/python2.7/site-packages/numpy/__config__.py b/project/venv/lib/python2.7/site-packages/numpy/__config__.py new file mode 100644 index 0000000..adfef3f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/__config__.py @@ -0,0 +1,39 @@ +# This file is generated by numpy's setup.py +# It contains system_info results at the time of building this package. +__all__ = ["get_info","show"] + + + +import os +import sys + +extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs') + +if sys.platform == 'win32' and os.path.isdir(extra_dll_dir): + os.environ.setdefault('PATH', '') + os.environ['PATH'] += os.pathsep + extra_dll_dir + +lapack_opt_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]} +blas_opt_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]} +blis_info={} +openblas_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]} +openblas_lapack_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]} +lapack_mkl_info={} +blas_mkl_info={} + +def get_info(name): + g = globals() + return g.get(name, g.get(name + "_info", {})) + +def show(): + for name,info_dict in globals().items(): + if name[0] == "_" or type(info_dict) is not type({}): continue + print(name + ":") + if not info_dict: + print(" NOT AVAILABLE") + for k,v in info_dict.items(): + v = str(v) + if k == "sources" and len(v) > 200: + v = v[:60] + " ...\n... " + v[-60:] + print(" %s = %s" % (k,v)) + \ No newline at end of file diff --git a/project/venv/lib/python2.7/site-packages/numpy/__config__.pyc b/project/venv/lib/python2.7/site-packages/numpy/__config__.pyc new file mode 100644 index 0000000..73697b7 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/__config__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/__init__.py b/project/venv/lib/python2.7/site-packages/numpy/__init__.py new file mode 100644 index 0000000..ba88c73 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/__init__.py @@ -0,0 +1,219 @@ +""" +NumPy +===== + +Provides + 1. An array object of arbitrary homogeneous items + 2. Fast mathematical operations over arrays + 3. Linear Algebra, Fourier Transforms, Random Number Generation + +How to use the documentation +---------------------------- +Documentation is available in two forms: docstrings provided +with the code, and a loose standing reference guide, available from +`the NumPy homepage `_. + +We recommend exploring the docstrings using +`IPython `_, an advanced Python shell with +TAB-completion and introspection capabilities. See below for further +instructions. + +The docstring examples assume that `numpy` has been imported as `np`:: + + >>> import numpy as np + +Code snippets are indicated by three greater-than signs:: + + >>> x = 42 + >>> x = x + 1 + +Use the built-in ``help`` function to view a function's docstring:: + + >>> help(np.sort) + ... # doctest: +SKIP + +For some objects, ``np.info(obj)`` may provide additional help. This is +particularly true if you see the line "Help on ufunc object:" at the top +of the help() page. Ufuncs are implemented in C, not Python, for speed. +The native Python help() does not know how to view their help, but our +np.info() function does. + +To search for documents containing a keyword, do:: + + >>> np.lookfor('keyword') + ... # doctest: +SKIP + +General-purpose documents like a glossary and help on the basic concepts +of numpy are available under the ``doc`` sub-module:: + + >>> from numpy import doc + >>> help(doc) + ... # doctest: +SKIP + +Available subpackages +--------------------- +doc + Topical documentation on broadcasting, indexing, etc. +lib + Basic functions used by several sub-packages. +random + Core Random Tools +linalg + Core Linear Algebra Tools +fft + Core FFT routines +polynomial + Polynomial tools +testing + NumPy testing tools +f2py + Fortran to Python Interface Generator. +distutils + Enhancements to distutils with support for + Fortran compilers support and more. + +Utilities +--------- +test + Run numpy unittests +show_config + Show numpy build configuration +dual + Overwrite certain functions with high-performance Scipy tools +matlib + Make everything matrices. +__version__ + NumPy version string + +Viewing documentation using IPython +----------------------------------- +Start IPython with the NumPy profile (``ipython -p numpy``), which will +import `numpy` under the alias `np`. Then, use the ``cpaste`` command to +paste examples into the shell. To see which functions are available in +`numpy`, type ``np.`` (where ```` refers to the TAB key), or use +``np.*cos*?`` (where ```` refers to the ENTER key) to narrow +down the list. To view the docstring for a function, use +``np.cos?`` (to view the docstring) and ``np.cos??`` (to view +the source code). + +Copies vs. in-place operation +----------------------------- +Most of the functions in `numpy` return a copy of the array argument +(e.g., `np.sort`). In-place versions of these functions are often +available as array methods, i.e. ``x = np.array([1,2,3]); x.sort()``. +Exceptions to this rule are documented. + +""" +from __future__ import division, absolute_import, print_function + +import sys +import warnings + +from ._globals import ModuleDeprecationWarning, VisibleDeprecationWarning +from ._globals import _NoValue + +# We first need to detect if we're being called as part of the numpy setup +# procedure itself in a reliable manner. +try: + __NUMPY_SETUP__ +except NameError: + __NUMPY_SETUP__ = False + +if __NUMPY_SETUP__: + sys.stderr.write('Running from numpy source directory.\n') +else: + try: + from numpy.__config__ import show as show_config + except ImportError: + msg = """Error importing numpy: you should not try to import numpy from + its source directory; please exit the numpy source tree, and relaunch + your python interpreter from there.""" + raise ImportError(msg) + + from .version import git_revision as __git_revision__ + from .version import version as __version__ + + __all__ = ['ModuleDeprecationWarning', + 'VisibleDeprecationWarning'] + + # Allow distributors to run custom init code + from . import _distributor_init + + from . import core + from .core import * + from . import compat + from . import lib + from .lib import * + from . import linalg + from . import fft + from . import polynomial + from . import random + from . import ctypeslib + from . import ma + from . import matrixlib as _mat + from .matrixlib import * + from .compat import long + + # Make these accessible from numpy name-space + # but not imported in from numpy import * + if sys.version_info[0] >= 3: + from builtins import bool, int, float, complex, object, str + unicode = str + else: + from __builtin__ import bool, int, float, complex, object, unicode, str + + from .core import round, abs, max, min + # now that numpy modules are imported, can initialize limits + core.getlimits._register_known_types() + + __all__.extend(['__version__', 'show_config']) + __all__.extend(core.__all__) + __all__.extend(_mat.__all__) + __all__.extend(lib.__all__) + __all__.extend(['linalg', 'fft', 'random', 'ctypeslib', 'ma']) + + # Filter out Cython harmless warnings + warnings.filterwarnings("ignore", message="numpy.dtype size changed") + warnings.filterwarnings("ignore", message="numpy.ufunc size changed") + warnings.filterwarnings("ignore", message="numpy.ndarray size changed") + + # oldnumeric and numarray were removed in 1.9. In case some packages import + # but do not use them, we define them here for backward compatibility. + oldnumeric = 'removed' + numarray = 'removed' + + # We don't actually use this ourselves anymore, but I'm not 100% sure that + # no-one else in the world is using it (though I hope not) + from .testing import Tester + + # Pytest testing + from numpy._pytesttester import PytestTester + test = PytestTester(__name__) + del PytestTester + + + def _sanity_check(): + """ + Quick sanity checks for common bugs caused by environment. + There are some cases e.g. with wrong BLAS ABI that cause wrong + results under specific runtime conditions that are not necessarily + achieved during test suite runs, and it is useful to catch those early. + + See https://github.com/numpy/numpy/issues/8577 and other + similar bug reports. + + """ + try: + x = ones(2, dtype=float32) + if not abs(x.dot(x) - 2.0) < 1e-5: + raise AssertionError() + except AssertionError: + msg = ("The current Numpy installation ({!r}) fails to " + "pass simple sanity checks. This can be caused for example " + "by incorrect BLAS library being linked in, or by mixing " + "package managers (pip, conda, apt, ...). Search closed " + "numpy issues for similar problems.") + raise RuntimeError(msg.format(__file__)) + + _sanity_check() + del _sanity_check diff --git a/project/venv/lib/python2.7/site-packages/numpy/__init__.pyc b/project/venv/lib/python2.7/site-packages/numpy/__init__.pyc new file mode 100644 index 0000000..310aea8 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/_distributor_init.py b/project/venv/lib/python2.7/site-packages/numpy/_distributor_init.py new file mode 100644 index 0000000..d893ba3 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/_distributor_init.py @@ -0,0 +1,10 @@ +""" Distributor init file + +Distributors: you can add custom code here to support particular distributions +of numpy. + +For example, this is a good place to put any checks for hardware requirements. + +The numpy standard source distribution will not put code in this file, so you +can safely replace this file with your own version. +""" diff --git a/project/venv/lib/python2.7/site-packages/numpy/_distributor_init.pyc b/project/venv/lib/python2.7/site-packages/numpy/_distributor_init.pyc new file mode 100644 index 0000000..bcdf59a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/_distributor_init.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/_globals.py b/project/venv/lib/python2.7/site-packages/numpy/_globals.py new file mode 100644 index 0000000..f5c0761 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/_globals.py @@ -0,0 +1,81 @@ +""" +Module defining global singleton classes. + +This module raises a RuntimeError if an attempt to reload it is made. In that +way the identities of the classes defined here are fixed and will remain so +even if numpy itself is reloaded. In particular, a function like the following +will still work correctly after numpy is reloaded:: + + def foo(arg=np._NoValue): + if arg is np._NoValue: + ... + +That was not the case when the singleton classes were defined in the numpy +``__init__.py`` file. See gh-7844 for a discussion of the reload problem that +motivated this module. + +""" +from __future__ import division, absolute_import, print_function + +__ALL__ = [ + 'ModuleDeprecationWarning', 'VisibleDeprecationWarning', '_NoValue' + ] + + +# Disallow reloading this module so as to preserve the identities of the +# classes defined here. +if '_is_loaded' in globals(): + raise RuntimeError('Reloading numpy._globals is not allowed') +_is_loaded = True + + +class ModuleDeprecationWarning(DeprecationWarning): + """Module deprecation warning. + + The nose tester turns ordinary Deprecation warnings into test failures. + That makes it hard to deprecate whole modules, because they get + imported by default. So this is a special Deprecation warning that the + nose tester will let pass without making tests fail. + + """ + + +ModuleDeprecationWarning.__module__ = 'numpy' + + +class VisibleDeprecationWarning(UserWarning): + """Visible deprecation warning. + + By default, python will not show deprecation warnings, so this class + can be used when a very visible warning is helpful, for example because + the usage is most likely a user bug. + + """ + + +VisibleDeprecationWarning.__module__ = 'numpy' + + +class _NoValueType(object): + """Special keyword value. + + The instance of this class may be used as the default value assigned to a + deprecated keyword in order to check if it has been given a user defined + value. + """ + __instance = None + def __new__(cls): + # ensure that only one instance exists + if not cls.__instance: + cls.__instance = super(_NoValueType, cls).__new__(cls) + return cls.__instance + + # needed for python 2 to preserve identity through a pickle + def __reduce__(self): + return (self.__class__, ()) + + def __repr__(self): + return "" + + +_NoValue = _NoValueType() diff --git a/project/venv/lib/python2.7/site-packages/numpy/_globals.pyc b/project/venv/lib/python2.7/site-packages/numpy/_globals.pyc new file mode 100644 index 0000000..87566c2 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/_globals.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/_pytesttester.py b/project/venv/lib/python2.7/site-packages/numpy/_pytesttester.py new file mode 100644 index 0000000..8d1a381 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/_pytesttester.py @@ -0,0 +1,209 @@ +""" +Pytest test running. + +This module implements the ``test()`` function for NumPy modules. The usual +boiler plate for doing that is to put the following in the module +``__init__.py`` file:: + + from numpy._pytesttester import PytestTester + test = PytestTester(__name__).test + del PytestTester + + +Warnings filtering and other runtime settings should be dealt with in the +``pytest.ini`` file in the numpy repo root. The behavior of the test depends on +whether or not that file is found as follows: + +* ``pytest.ini`` is present (develop mode) + All warnings except those explicily filtered out are raised as error. +* ``pytest.ini`` is absent (release mode) + DeprecationWarnings and PendingDeprecationWarnings are ignored, other + warnings are passed through. + +In practice, tests run from the numpy repo are run in develop mode. That +includes the standard ``python runtests.py`` invocation. + +This module is imported by every numpy subpackage, so lies at the top level to +simplify circular import issues. For the same reason, it contains no numpy +imports at module scope, instead importing numpy within function calls. +""" +from __future__ import division, absolute_import, print_function + +import sys +import os + +__all__ = ['PytestTester'] + + + +def _show_numpy_info(): + import numpy as np + + print("NumPy version %s" % np.__version__) + relaxed_strides = np.ones((10, 1), order="C").flags.f_contiguous + print("NumPy relaxed strides checking option:", relaxed_strides) + + +class PytestTester(object): + """ + Pytest test runner. + + This class is made available in ``numpy.testing``, and a test function + is typically added to a package's __init__.py like so:: + + from numpy.testing import PytestTester + test = PytestTester(__name__).test + del PytestTester + + Calling this test function finds and runs all tests associated with the + module and all its sub-modules. + + Attributes + ---------- + module_name : str + Full path to the package to test. + + Parameters + ---------- + module_name : module name + The name of the module to test. + + """ + def __init__(self, module_name): + self.module_name = module_name + + def __call__(self, label='fast', verbose=1, extra_argv=None, + doctests=False, coverage=False, durations=-1, tests=None): + """ + Run tests for module using pytest. + + Parameters + ---------- + label : {'fast', 'full'}, optional + Identifies the tests to run. When set to 'fast', tests decorated + with `pytest.mark.slow` are skipped, when 'full', the slow marker + is ignored. + verbose : int, optional + Verbosity value for test outputs, in the range 1-3. Default is 1. + extra_argv : list, optional + List with any extra arguments to pass to pytests. + doctests : bool, optional + .. note:: Not supported + coverage : bool, optional + If True, report coverage of NumPy code. Default is False. + Requires installation of (pip) pytest-cov. + durations : int, optional + If < 0, do nothing, If 0, report time of all tests, if > 0, + report the time of the slowest `timer` tests. Default is -1. + tests : test or list of tests + Tests to be executed with pytest '--pyargs' + + Returns + ------- + result : bool + Return True on success, false otherwise. + + Notes + ----- + Each NumPy module exposes `test` in its namespace to run all tests for + it. For example, to run all tests for numpy.lib: + + >>> np.lib.test() #doctest: +SKIP + + Examples + -------- + >>> result = np.lib.test() #doctest: +SKIP + ... + 1023 passed, 2 skipped, 6 deselected, 1 xfailed in 10.39 seconds + >>> result + True + + """ + import pytest + import warnings + + #FIXME This is no longer needed? Assume it was for use in tests. + # cap verbosity at 3, which is equivalent to the pytest '-vv' option + #from . import utils + #verbose = min(int(verbose), 3) + #utils.verbose = verbose + # + + module = sys.modules[self.module_name] + module_path = os.path.abspath(module.__path__[0]) + + # setup the pytest arguments + pytest_args = ["-l"] + + # offset verbosity. The "-q" cancels a "-v". + pytest_args += ["-q"] + + # Filter out distutils cpu warnings (could be localized to + # distutils tests). ASV has problems with top level import, + # so fetch module for suppression here. + with warnings.catch_warnings(): + warnings.simplefilter("always") + from numpy.distutils import cpuinfo + + # Filter out annoying import messages. Want these in both develop and + # release mode. + pytest_args += [ + "-W ignore:Not importing directory", + "-W ignore:numpy.dtype size changed", + "-W ignore:numpy.ufunc size changed", + "-W ignore::UserWarning:cpuinfo", + ] + + # When testing matrices, ignore their PendingDeprecationWarnings + pytest_args += [ + "-W ignore:the matrix subclass is not", + ] + + # Ignore python2.7 -3 warnings + pytest_args += [ + r"-W ignore:sys\.exc_clear\(\) not supported in 3\.x:DeprecationWarning", + r"-W ignore:in 3\.x, __setslice__:DeprecationWarning", + r"-W ignore:in 3\.x, __getslice__:DeprecationWarning", + r"-W ignore:buffer\(\) not supported in 3\.x:DeprecationWarning", + r"-W ignore:CObject type is not supported in 3\.x:DeprecationWarning", + r"-W ignore:comparing unequal types not supported in 3\.x:DeprecationWarning", + r"-W ignore:the commands module has been removed in Python 3\.0:DeprecationWarning", + r"-W ignore:The 'new' module has been removed in Python 3\.0:DeprecationWarning", + ] + + + if doctests: + raise ValueError("Doctests not supported") + + if extra_argv: + pytest_args += list(extra_argv) + + if verbose > 1: + pytest_args += ["-" + "v"*(verbose - 1)] + + if coverage: + pytest_args += ["--cov=" + module_path] + + if label == "fast": + pytest_args += ["-m", "not slow"] + elif label != "full": + pytest_args += ["-m", label] + + if durations >= 0: + pytest_args += ["--durations=%s" % durations] + + if tests is None: + tests = [self.module_name] + + pytest_args += ["--pyargs"] + list(tests) + + + # run tests. + _show_numpy_info() + + try: + code = pytest.main(pytest_args) + except SystemExit as exc: + code = exc.code + + return code == 0 diff --git a/project/venv/lib/python2.7/site-packages/numpy/_pytesttester.pyc b/project/venv/lib/python2.7/site-packages/numpy/_pytesttester.pyc new file mode 100644 index 0000000..871cade Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/_pytesttester.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/compat/__init__.py b/project/venv/lib/python2.7/site-packages/numpy/compat/__init__.py new file mode 100644 index 0000000..5b371f5 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/compat/__init__.py @@ -0,0 +1,20 @@ +""" +Compatibility module. + +This module contains duplicated code from Python itself or 3rd party +extensions, which may be included for the following reasons: + + * compatibility + * we may only need a small subset of the copied library/module + +""" +from __future__ import division, absolute_import, print_function + +from . import _inspect +from . import py3k +from ._inspect import getargspec, formatargspec +from .py3k import * + +__all__ = [] +__all__.extend(_inspect.__all__) +__all__.extend(py3k.__all__) diff --git a/project/venv/lib/python2.7/site-packages/numpy/compat/__init__.pyc b/project/venv/lib/python2.7/site-packages/numpy/compat/__init__.pyc new file mode 100644 index 0000000..49a0de9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/compat/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/compat/_inspect.py b/project/venv/lib/python2.7/site-packages/numpy/compat/_inspect.py new file mode 100644 index 0000000..439d0d2 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/compat/_inspect.py @@ -0,0 +1,193 @@ +"""Subset of inspect module from upstream python + +We use this instead of upstream because upstream inspect is slow to import, and +significantly contributes to numpy import times. Importing this copy has almost +no overhead. + +""" +from __future__ import division, absolute_import, print_function + +import types + +__all__ = ['getargspec', 'formatargspec'] + +# ----------------------------------------------------------- type-checking +def ismethod(object): + """Return true if the object is an instance method. + + Instance method objects provide these attributes: + __doc__ documentation string + __name__ name with which this method was defined + im_class class object in which this method belongs + im_func function object containing implementation of method + im_self instance to which this method is bound, or None + + """ + return isinstance(object, types.MethodType) + +def isfunction(object): + """Return true if the object is a user-defined function. + + Function objects provide these attributes: + __doc__ documentation string + __name__ name with which this function was defined + func_code code object containing compiled function bytecode + func_defaults tuple of any default values for arguments + func_doc (same as __doc__) + func_globals global namespace in which this function was defined + func_name (same as __name__) + + """ + return isinstance(object, types.FunctionType) + +def iscode(object): + """Return true if the object is a code object. + + Code objects provide these attributes: + co_argcount number of arguments (not including * or ** args) + co_code string of raw compiled bytecode + co_consts tuple of constants used in the bytecode + co_filename name of file in which this code object was created + co_firstlineno number of first line in Python source code + co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg + co_lnotab encoded mapping of line numbers to bytecode indices + co_name name with which this code object was defined + co_names tuple of names of local variables + co_nlocals number of local variables + co_stacksize virtual machine stack space required + co_varnames tuple of names of arguments and local variables + + """ + return isinstance(object, types.CodeType) + +# ------------------------------------------------ argument list extraction +# These constants are from Python's compile.h. +CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 1, 2, 4, 8 + +def getargs(co): + """Get information about the arguments accepted by a code object. + + Three things are returned: (args, varargs, varkw), where 'args' is + a list of argument names (possibly containing nested lists), and + 'varargs' and 'varkw' are the names of the * and ** arguments or None. + + """ + + if not iscode(co): + raise TypeError('arg is not a code object') + + nargs = co.co_argcount + names = co.co_varnames + args = list(names[:nargs]) + + # The following acrobatics are for anonymous (tuple) arguments. + # Which we do not need to support, so remove to avoid importing + # the dis module. + for i in range(nargs): + if args[i][:1] in ['', '.']: + raise TypeError("tuple function arguments are not supported") + varargs = None + if co.co_flags & CO_VARARGS: + varargs = co.co_varnames[nargs] + nargs = nargs + 1 + varkw = None + if co.co_flags & CO_VARKEYWORDS: + varkw = co.co_varnames[nargs] + return args, varargs, varkw + +def getargspec(func): + """Get the names and default values of a function's arguments. + + A tuple of four things is returned: (args, varargs, varkw, defaults). + 'args' is a list of the argument names (it may contain nested lists). + 'varargs' and 'varkw' are the names of the * and ** arguments or None. + 'defaults' is an n-tuple of the default values of the last n arguments. + + """ + + if ismethod(func): + func = func.__func__ + if not isfunction(func): + raise TypeError('arg is not a Python function') + args, varargs, varkw = getargs(func.__code__) + return args, varargs, varkw, func.__defaults__ + +def getargvalues(frame): + """Get information about arguments passed into a particular frame. + + A tuple of four things is returned: (args, varargs, varkw, locals). + 'args' is a list of the argument names (it may contain nested lists). + 'varargs' and 'varkw' are the names of the * and ** arguments or None. + 'locals' is the locals dictionary of the given frame. + + """ + args, varargs, varkw = getargs(frame.f_code) + return args, varargs, varkw, frame.f_locals + +def joinseq(seq): + if len(seq) == 1: + return '(' + seq[0] + ',)' + else: + return '(' + ', '.join(seq) + ')' + +def strseq(object, convert, join=joinseq): + """Recursively walk a sequence, stringifying each element. + + """ + if type(object) in [list, tuple]: + return join([strseq(_o, convert, join) for _o in object]) + else: + return convert(object) + +def formatargspec(args, varargs=None, varkw=None, defaults=None, + formatarg=str, + formatvarargs=lambda name: '*' + name, + formatvarkw=lambda name: '**' + name, + formatvalue=lambda value: '=' + repr(value), + join=joinseq): + """Format an argument spec from the 4 values returned by getargspec. + + The first four arguments are (args, varargs, varkw, defaults). The + other four arguments are the corresponding optional formatting functions + that are called to turn names and values into strings. The ninth + argument is an optional function to format the sequence of arguments. + + """ + specs = [] + if defaults: + firstdefault = len(args) - len(defaults) + for i in range(len(args)): + spec = strseq(args[i], formatarg, join) + if defaults and i >= firstdefault: + spec = spec + formatvalue(defaults[i - firstdefault]) + specs.append(spec) + if varargs is not None: + specs.append(formatvarargs(varargs)) + if varkw is not None: + specs.append(formatvarkw(varkw)) + return '(' + ', '.join(specs) + ')' + +def formatargvalues(args, varargs, varkw, locals, + formatarg=str, + formatvarargs=lambda name: '*' + name, + formatvarkw=lambda name: '**' + name, + formatvalue=lambda value: '=' + repr(value), + join=joinseq): + """Format an argument spec from the 4 values returned by getargvalues. + + The first four arguments are (args, varargs, varkw, locals). The + next four arguments are the corresponding optional formatting functions + that are called to turn names and values into strings. The ninth + argument is an optional function to format the sequence of arguments. + + """ + def convert(name, locals=locals, + formatarg=formatarg, formatvalue=formatvalue): + return formatarg(name) + formatvalue(locals[name]) + specs = [strseq(arg, convert, join) for arg in args] + + if varargs: + specs.append(formatvarargs(varargs) + formatvalue(locals[varargs])) + if varkw: + specs.append(formatvarkw(varkw) + formatvalue(locals[varkw])) + return '(' + ', '.join(specs) + ')' diff --git a/project/venv/lib/python2.7/site-packages/numpy/compat/_inspect.pyc b/project/venv/lib/python2.7/site-packages/numpy/compat/_inspect.pyc new file mode 100644 index 0000000..c453d4d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/compat/_inspect.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/compat/py3k.py b/project/venv/lib/python2.7/site-packages/numpy/compat/py3k.py new file mode 100644 index 0000000..0672927 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/compat/py3k.py @@ -0,0 +1,243 @@ +""" +Python 3 compatibility tools. + +""" +from __future__ import division, absolute_import, print_function + +__all__ = ['bytes', 'asbytes', 'isfileobj', 'getexception', 'strchar', + 'unicode', 'asunicode', 'asbytes_nested', 'asunicode_nested', + 'asstr', 'open_latin1', 'long', 'basestring', 'sixu', + 'integer_types', 'is_pathlib_path', 'npy_load_module', 'Path', + 'contextlib_nullcontext', 'os_fspath', 'os_PathLike'] + +import sys +try: + from pathlib import Path, PurePath +except ImportError: + Path = PurePath = None + +if sys.version_info[0] >= 3: + import io + + long = int + integer_types = (int,) + basestring = str + unicode = str + bytes = bytes + + def asunicode(s): + if isinstance(s, bytes): + return s.decode('latin1') + return str(s) + + def asbytes(s): + if isinstance(s, bytes): + return s + return str(s).encode('latin1') + + def asstr(s): + if isinstance(s, bytes): + return s.decode('latin1') + return str(s) + + def isfileobj(f): + return isinstance(f, (io.FileIO, io.BufferedReader, io.BufferedWriter)) + + def open_latin1(filename, mode='r'): + return open(filename, mode=mode, encoding='iso-8859-1') + + def sixu(s): + return s + + strchar = 'U' + + +else: + bytes = str + long = long + basestring = basestring + unicode = unicode + integer_types = (int, long) + asbytes = str + asstr = str + strchar = 'S' + + def isfileobj(f): + return isinstance(f, file) + + def asunicode(s): + if isinstance(s, unicode): + return s + return str(s).decode('ascii') + + def open_latin1(filename, mode='r'): + return open(filename, mode=mode) + + def sixu(s): + return unicode(s, 'unicode_escape') + + +def getexception(): + return sys.exc_info()[1] + +def asbytes_nested(x): + if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)): + return [asbytes_nested(y) for y in x] + else: + return asbytes(x) + +def asunicode_nested(x): + if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)): + return [asunicode_nested(y) for y in x] + else: + return asunicode(x) + +def is_pathlib_path(obj): + """ + Check whether obj is a pathlib.Path object. + + Prefer using `isinstance(obj, os_PathLike)` instead of this function. + """ + return Path is not None and isinstance(obj, Path) + +# from Python 3.7 +class contextlib_nullcontext(object): + """Context manager that does no additional processing. + + Used as a stand-in for a normal context manager, when a particular + block of code is only sometimes used with a normal context manager: + + cm = optional_cm if condition else nullcontext() + with cm: + # Perform operation, using optional_cm if condition is True + """ + + def __init__(self, enter_result=None): + self.enter_result = enter_result + + def __enter__(self): + return self.enter_result + + def __exit__(self, *excinfo): + pass + + +if sys.version_info[0] >= 3 and sys.version_info[1] >= 4: + def npy_load_module(name, fn, info=None): + """ + Load a module. + + .. versionadded:: 1.11.2 + + Parameters + ---------- + name : str + Full module name. + fn : str + Path to module file. + info : tuple, optional + Only here for backward compatibility with Python 2.*. + + Returns + ------- + mod : module + + """ + import importlib.machinery + return importlib.machinery.SourceFileLoader(name, fn).load_module() +else: + def npy_load_module(name, fn, info=None): + """ + Load a module. + + .. versionadded:: 1.11.2 + + Parameters + ---------- + name : str + Full module name. + fn : str + Path to module file. + info : tuple, optional + Information as returned by `imp.find_module` + (suffix, mode, type). + + Returns + ------- + mod : module + + """ + import imp + import os + if info is None: + path = os.path.dirname(fn) + fo, fn, info = imp.find_module(name, [path]) + else: + fo = open(fn, info[1]) + try: + mod = imp.load_module(name, fo, fn, info) + finally: + fo.close() + return mod + +# backport abc.ABC +import abc +if sys.version_info[:2] >= (3, 4): + abc_ABC = abc.ABC +else: + abc_ABC = abc.ABCMeta('ABC', (object,), {'__slots__': ()}) + + +# Backport os.fs_path, os.PathLike, and PurePath.__fspath__ +if sys.version_info[:2] >= (3, 6): + import os + os_fspath = os.fspath + os_PathLike = os.PathLike +else: + def _PurePath__fspath__(self): + return str(self) + + class os_PathLike(abc_ABC): + """Abstract base class for implementing the file system path protocol.""" + + @abc.abstractmethod + def __fspath__(self): + """Return the file system path representation of the object.""" + raise NotImplementedError + + @classmethod + def __subclasshook__(cls, subclass): + if PurePath is not None and issubclass(subclass, PurePath): + return True + return hasattr(subclass, '__fspath__') + + + def os_fspath(path): + """Return the path representation of a path-like object. + If str or bytes is passed in, it is returned unchanged. Otherwise the + os.PathLike interface is used to get the path representation. If the + path representation is not str or bytes, TypeError is raised. If the + provided path is not str, bytes, or os.PathLike, TypeError is raised. + """ + if isinstance(path, (unicode, bytes)): + return path + + # Work from the object's type to match method resolution of other magic + # methods. + path_type = type(path) + try: + path_repr = path_type.__fspath__(path) + except AttributeError: + if hasattr(path_type, '__fspath__'): + raise + elif PurePath is not None and issubclass(path_type, PurePath): + return _PurePath__fspath__(path) + else: + raise TypeError("expected str, bytes or os.PathLike object, " + "not " + path_type.__name__) + if isinstance(path_repr, (unicode, bytes)): + return path_repr + else: + raise TypeError("expected {}.__fspath__() to return str or bytes, " + "not {}".format(path_type.__name__, + type(path_repr).__name__)) diff --git a/project/venv/lib/python2.7/site-packages/numpy/compat/py3k.pyc b/project/venv/lib/python2.7/site-packages/numpy/compat/py3k.pyc new file mode 100644 index 0000000..94e7c18 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/compat/py3k.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/compat/setup.py b/project/venv/lib/python2.7/site-packages/numpy/compat/setup.py new file mode 100644 index 0000000..8828574 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/compat/setup.py @@ -0,0 +1,12 @@ +from __future__ import division, print_function + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + + config = Configuration('compat', parent_package, top_path) + config.add_data_dir('tests') + return config + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(configuration=configuration) diff --git a/project/venv/lib/python2.7/site-packages/numpy/compat/setup.pyc b/project/venv/lib/python2.7/site-packages/numpy/compat/setup.pyc new file mode 100644 index 0000000..20a6b64 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/compat/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/compat/tests/__init__.py b/project/venv/lib/python2.7/site-packages/numpy/compat/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/numpy/compat/tests/__init__.pyc b/project/venv/lib/python2.7/site-packages/numpy/compat/tests/__init__.pyc new file mode 100644 index 0000000..1d7fa18 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/compat/tests/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/compat/tests/test_compat.py b/project/venv/lib/python2.7/site-packages/numpy/compat/tests/test_compat.py new file mode 100644 index 0000000..9bb316a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/compat/tests/test_compat.py @@ -0,0 +1,26 @@ +from __future__ import division, absolute_import, print_function + +from os.path import join + +from numpy.compat import isfileobj, os_fspath +from numpy.testing import assert_ +from numpy.testing import tempdir + + +def test_isfileobj(): + with tempdir(prefix="numpy_test_compat_") as folder: + filename = join(folder, 'a.bin') + + with open(filename, 'wb') as f: + assert_(isfileobj(f)) + + with open(filename, 'ab') as f: + assert_(isfileobj(f)) + + with open(filename, 'rb') as f: + assert_(isfileobj(f)) + + +def test_os_fspath_strings(): + for string_path in (b'/a/b/c.d', u'/a/b/c.d'): + assert_(os_fspath(string_path) == string_path) diff --git a/project/venv/lib/python2.7/site-packages/numpy/compat/tests/test_compat.pyc b/project/venv/lib/python2.7/site-packages/numpy/compat/tests/test_compat.pyc new file mode 100644 index 0000000..74cb905 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/compat/tests/test_compat.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/conftest.py b/project/venv/lib/python2.7/site-packages/numpy/conftest.py new file mode 100644 index 0000000..4d4d055 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/conftest.py @@ -0,0 +1,60 @@ +""" +Pytest configuration and fixtures for the Numpy test suite. +""" +from __future__ import division, absolute_import, print_function + +import pytest +import numpy + +from numpy.core._multiarray_tests import get_fpu_mode + + +_old_fpu_mode = None +_collect_results = {} + + +#FIXME when yield tests are gone. +@pytest.hookimpl() +def pytest_itemcollected(item): + """ + Check FPU precision mode was not changed during test collection. + + The clumsy way we do it here is mainly necessary because numpy + still uses yield tests, which can execute code at test collection + time. + """ + global _old_fpu_mode + + mode = get_fpu_mode() + + if _old_fpu_mode is None: + _old_fpu_mode = mode + elif mode != _old_fpu_mode: + _collect_results[item] = (_old_fpu_mode, mode) + _old_fpu_mode = mode + + +@pytest.fixture(scope="function", autouse=True) +def check_fpu_mode(request): + """ + Check FPU precision mode was not changed during the test. + """ + old_mode = get_fpu_mode() + yield + new_mode = get_fpu_mode() + + if old_mode != new_mode: + raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}" + " during the test".format(old_mode, new_mode)) + + collect_result = _collect_results.get(request.node) + if collect_result is not None: + old_mode, new_mode = collect_result + raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}" + " when collecting the test".format(old_mode, + new_mode)) + + +@pytest.fixture(autouse=True) +def add_np(doctest_namespace): + doctest_namespace['np'] = numpy diff --git a/project/venv/lib/python2.7/site-packages/numpy/conftest.pyc b/project/venv/lib/python2.7/site-packages/numpy/conftest.pyc new file mode 100644 index 0000000..88ac4a8 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/conftest.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/__init__.py b/project/venv/lib/python2.7/site-packages/numpy/core/__init__.py new file mode 100644 index 0000000..c6a4e93 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/__init__.py @@ -0,0 +1,171 @@ +from __future__ import division, absolute_import, print_function + +from .info import __doc__ +from numpy.version import version as __version__ + +import os + +# on Windows NumPy loads an important OpenBLAS-related DLL +# and the code below aims to alleviate issues with DLL +# path resolution portability with an absolute path DLL load +if os.name == 'nt': + from ctypes import WinDLL + import glob + # convention for storing / loading the DLL from + # numpy/.libs/, if present + libs_path = os.path.abspath(os.path.join(os.path.dirname(__file__), + '..', '.libs')) + DLL_filenames = [] + if os.path.isdir(libs_path): + for filename in glob.glob(os.path.join(libs_path, '*openblas*dll')): + # NOTE: would it change behavior to load ALL + # DLLs at this path vs. the name restriction? + WinDLL(os.path.abspath(filename)) + DLL_filenames.append(filename) + if len(DLL_filenames) > 1: + import warnings + warnings.warn("loaded more than 1 DLL from .libs:\n%s" % + "\n".join(DLL_filenames), + stacklevel=1) + +# disables OpenBLAS affinity setting of the main thread that limits +# python threads or processes to one core +env_added = [] +for envkey in ['OPENBLAS_MAIN_FREE', 'GOTOBLAS_MAIN_FREE']: + if envkey not in os.environ: + os.environ[envkey] = '1' + env_added.append(envkey) + +try: + from . import multiarray +except ImportError as exc: + import sys + msg = """ + +IMPORTANT: PLEASE READ THIS FOR ADVICE ON HOW TO SOLVE THIS ISSUE! + +Importing the multiarray numpy extension module failed. Most +likely you are trying to import a failed build of numpy. +Here is how to proceed: +- If you're working with a numpy git repository, try `git clean -xdf` + (removes all files not under version control) and rebuild numpy. +- If you are simply trying to use the numpy version that you have installed: + your installation is broken - please reinstall numpy. +- If you have already reinstalled and that did not fix the problem, then: + 1. Check that you are using the Python you expect (you're using %s), + and that you have no directories in your PATH or PYTHONPATH that can + interfere with the Python and numpy versions you're trying to use. + 2. If (1) looks fine, you can open a new issue at + https://github.com/numpy/numpy/issues. Please include details on: + - how you installed Python + - how you installed numpy + - your operating system + - whether or not you have multiple versions of Python installed + - if you built from source, your compiler versions and ideally a build log + + Note: this error has many possible causes, so please don't comment on + an existing issue about this - open a new one instead. + +Original error was: %s +""" % (sys.executable, exc) + raise ImportError(msg) +finally: + for envkey in env_added: + del os.environ[envkey] +del envkey +del env_added +del os + +from . import umath + +# Check that multiarray,umath are pure python modules wrapping +# _multiarray_umath and not either of the old c-extension modules +if not (hasattr(multiarray, '_multiarray_umath') and + hasattr(umath, '_multiarray_umath')): + import sys + path = sys.modules['numpy'].__path__ + msg = ("Something is wrong with the numpy installation. " + "While importing we detected an older version of " + "numpy in {}. One method of fixing this is to repeatedly uninstall " + "numpy until none is found, then reinstall this version.") + raise ImportError(msg.format(path)) + +from . import numerictypes as nt +multiarray.set_typeDict(nt.sctypeDict) +from . import numeric +from .numeric import * +from . import fromnumeric +from .fromnumeric import * +from . import defchararray as char +from . import records as rec +from .records import * +from .memmap import * +from .defchararray import chararray +from . import function_base +from .function_base import * +from . import machar +from .machar import * +from . import getlimits +from .getlimits import * +from . import shape_base +from .shape_base import * +from . import einsumfunc +from .einsumfunc import * +del nt + +from .fromnumeric import amax as max, amin as min, round_ as round +from .numeric import absolute as abs + +# do this after everything else, to minimize the chance of this misleadingly +# appearing in an import-time traceback +from . import _add_newdocs +# add these for module-freeze analysis (like PyInstaller) +from . import _dtype_ctypes +from . import _internal +from . import _dtype +from . import _methods + +__all__ = ['char', 'rec', 'memmap'] +__all__ += numeric.__all__ +__all__ += fromnumeric.__all__ +__all__ += rec.__all__ +__all__ += ['chararray'] +__all__ += function_base.__all__ +__all__ += machar.__all__ +__all__ += getlimits.__all__ +__all__ += shape_base.__all__ +__all__ += einsumfunc.__all__ + +# Make it possible so that ufuncs can be pickled +# Here are the loading and unloading functions +# The name numpy.core._ufunc_reconstruct must be +# available for unpickling to work. +def _ufunc_reconstruct(module, name): + # The `fromlist` kwarg is required to ensure that `mod` points to the + # inner-most module rather than the parent package when module name is + # nested. This makes it possible to pickle non-toplevel ufuncs such as + # scipy.special.expit for instance. + mod = __import__(module, fromlist=[name]) + return getattr(mod, name) + +def _ufunc_reduce(func): + from pickle import whichmodule + name = func.__name__ + return _ufunc_reconstruct, (whichmodule(func, name), name) + + +import sys +if sys.version_info[0] >= 3: + import copyreg +else: + import copy_reg as copyreg + +copyreg.pickle(ufunc, _ufunc_reduce, _ufunc_reconstruct) +# Unclutter namespace (must keep _ufunc_reconstruct for unpickling) +del copyreg +del sys +del _ufunc_reduce + +from numpy._pytesttester import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/__init__.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/__init__.pyc new file mode 100644 index 0000000..d3cd51b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_add_newdocs.py b/project/venv/lib/python2.7/site-packages/numpy/core/_add_newdocs.py new file mode 100644 index 0000000..a242a74 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/_add_newdocs.py @@ -0,0 +1,6936 @@ +""" +This is only meant to add docs to objects defined in C-extension modules. +The purpose is to allow easier editing of the docstrings without +requiring a re-compile. + +NOTE: Many of the methods of ndarray have corresponding functions. + If you update these docstrings, please keep also the ones in + core/fromnumeric.py, core/defmatrix.py up-to-date. + +""" +from __future__ import division, absolute_import, print_function + +from numpy.core import numerictypes as _numerictypes +from numpy.core import dtype +from numpy.core.function_base import add_newdoc + +############################################################################### +# +# flatiter +# +# flatiter needs a toplevel description +# +############################################################################### + +add_newdoc('numpy.core', 'flatiter', + """ + Flat iterator object to iterate over arrays. + + A `flatiter` iterator is returned by ``x.flat`` for any array `x`. + It allows iterating over the array as if it were a 1-D array, + either in a for-loop or by calling its `next` method. + + Iteration is done in row-major, C-style order (the last + index varying the fastest). The iterator can also be indexed using + basic slicing or advanced indexing. + + See Also + -------- + ndarray.flat : Return a flat iterator over an array. + ndarray.flatten : Returns a flattened copy of an array. + + Notes + ----- + A `flatiter` iterator can not be constructed directly from Python code + by calling the `flatiter` constructor. + + Examples + -------- + >>> x = np.arange(6).reshape(2, 3) + >>> fl = x.flat + >>> type(fl) + + >>> for item in fl: + ... print(item) + ... + 0 + 1 + 2 + 3 + 4 + 5 + + >>> fl[2:4] + array([2, 3]) + + """) + +# flatiter attributes + +add_newdoc('numpy.core', 'flatiter', ('base', + """ + A reference to the array that is iterated over. + + Examples + -------- + >>> x = np.arange(5) + >>> fl = x.flat + >>> fl.base is x + True + + """)) + + + +add_newdoc('numpy.core', 'flatiter', ('coords', + """ + An N-dimensional tuple of current coordinates. + + Examples + -------- + >>> x = np.arange(6).reshape(2, 3) + >>> fl = x.flat + >>> fl.coords + (0, 0) + >>> fl.next() + 0 + >>> fl.coords + (0, 1) + + """)) + + + +add_newdoc('numpy.core', 'flatiter', ('index', + """ + Current flat index into the array. + + Examples + -------- + >>> x = np.arange(6).reshape(2, 3) + >>> fl = x.flat + >>> fl.index + 0 + >>> fl.next() + 0 + >>> fl.index + 1 + + """)) + +# flatiter functions + +add_newdoc('numpy.core', 'flatiter', ('__array__', + """__array__(type=None) Get array from iterator + + """)) + + +add_newdoc('numpy.core', 'flatiter', ('copy', + """ + copy() + + Get a copy of the iterator as a 1-D array. + + Examples + -------- + >>> x = np.arange(6).reshape(2, 3) + >>> x + array([[0, 1, 2], + [3, 4, 5]]) + >>> fl = x.flat + >>> fl.copy() + array([0, 1, 2, 3, 4, 5]) + + """)) + + +############################################################################### +# +# nditer +# +############################################################################### + +add_newdoc('numpy.core', 'nditer', + """ + Efficient multi-dimensional iterator object to iterate over arrays. + To get started using this object, see the + :ref:`introductory guide to array iteration `. + + Parameters + ---------- + op : ndarray or sequence of array_like + The array(s) to iterate over. + flags : sequence of str, optional + Flags to control the behavior of the iterator. + + * "buffered" enables buffering when required. + * "c_index" causes a C-order index to be tracked. + * "f_index" causes a Fortran-order index to be tracked. + * "multi_index" causes a multi-index, or a tuple of indices + with one per iteration dimension, to be tracked. + * "common_dtype" causes all the operands to be converted to + a common data type, with copying or buffering as necessary. + * "copy_if_overlap" causes the iterator to determine if read + operands have overlap with write operands, and make temporary + copies as necessary to avoid overlap. False positives (needless + copying) are possible in some cases. + * "delay_bufalloc" delays allocation of the buffers until + a reset() call is made. Allows "allocate" operands to + be initialized before their values are copied into the buffers. + * "external_loop" causes the `values` given to be + one-dimensional arrays with multiple values instead of + zero-dimensional arrays. + * "grow_inner" allows the `value` array sizes to be made + larger than the buffer size when both "buffered" and + "external_loop" is used. + * "ranged" allows the iterator to be restricted to a sub-range + of the iterindex values. + * "refs_ok" enables iteration of reference types, such as + object arrays. + * "reduce_ok" enables iteration of "readwrite" operands + which are broadcasted, also known as reduction operands. + * "zerosize_ok" allows `itersize` to be zero. + op_flags : list of list of str, optional + This is a list of flags for each operand. At minimum, one of + "readonly", "readwrite", or "writeonly" must be specified. + + * "readonly" indicates the operand will only be read from. + * "readwrite" indicates the operand will be read from and written to. + * "writeonly" indicates the operand will only be written to. + * "no_broadcast" prevents the operand from being broadcasted. + * "contig" forces the operand data to be contiguous. + * "aligned" forces the operand data to be aligned. + * "nbo" forces the operand data to be in native byte order. + * "copy" allows a temporary read-only copy if required. + * "updateifcopy" allows a temporary read-write copy if required. + * "allocate" causes the array to be allocated if it is None + in the `op` parameter. + * "no_subtype" prevents an "allocate" operand from using a subtype. + * "arraymask" indicates that this operand is the mask to use + for selecting elements when writing to operands with the + 'writemasked' flag set. The iterator does not enforce this, + but when writing from a buffer back to the array, it only + copies those elements indicated by this mask. + * 'writemasked' indicates that only elements where the chosen + 'arraymask' operand is True will be written to. + * "overlap_assume_elementwise" can be used to mark operands that are + accessed only in the iterator order, to allow less conservative + copying when "copy_if_overlap" is present. + op_dtypes : dtype or tuple of dtype(s), optional + The required data type(s) of the operands. If copying or buffering + is enabled, the data will be converted to/from their original types. + order : {'C', 'F', 'A', 'K'}, optional + Controls the iteration order. 'C' means C order, 'F' means + Fortran order, 'A' means 'F' order if all the arrays are Fortran + contiguous, 'C' order otherwise, and 'K' means as close to the + order the array elements appear in memory as possible. This also + affects the element memory order of "allocate" operands, as they + are allocated to be compatible with iteration order. + Default is 'K'. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur when making a copy + or buffering. Setting this to 'unsafe' is not recommended, + as it can adversely affect accumulations. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + op_axes : list of list of ints, optional + If provided, is a list of ints or None for each operands. + The list of axes for an operand is a mapping from the dimensions + of the iterator to the dimensions of the operand. A value of + -1 can be placed for entries, causing that dimension to be + treated as "newaxis". + itershape : tuple of ints, optional + The desired shape of the iterator. This allows "allocate" operands + with a dimension mapped by op_axes not corresponding to a dimension + of a different operand to get a value not equal to 1 for that + dimension. + buffersize : int, optional + When buffering is enabled, controls the size of the temporary + buffers. Set to 0 for the default value. + + Attributes + ---------- + dtypes : tuple of dtype(s) + The data types of the values provided in `value`. This may be + different from the operand data types if buffering is enabled. + Valid only before the iterator is closed. + finished : bool + Whether the iteration over the operands is finished or not. + has_delayed_bufalloc : bool + If True, the iterator was created with the "delay_bufalloc" flag, + and no reset() function was called on it yet. + has_index : bool + If True, the iterator was created with either the "c_index" or + the "f_index" flag, and the property `index` can be used to + retrieve it. + has_multi_index : bool + If True, the iterator was created with the "multi_index" flag, + and the property `multi_index` can be used to retrieve it. + index + When the "c_index" or "f_index" flag was used, this property + provides access to the index. Raises a ValueError if accessed + and `has_index` is False. + iterationneedsapi : bool + Whether iteration requires access to the Python API, for example + if one of the operands is an object array. + iterindex : int + An index which matches the order of iteration. + itersize : int + Size of the iterator. + itviews + Structured view(s) of `operands` in memory, matching the reordered + and optimized iterator access pattern. Valid only before the iterator + is closed. + multi_index + When the "multi_index" flag was used, this property + provides access to the index. Raises a ValueError if accessed + accessed and `has_multi_index` is False. + ndim : int + The iterator's dimension. + nop : int + The number of iterator operands. + operands : tuple of operand(s) + The array(s) to be iterated over. Valid only before the iterator is + closed. + shape : tuple of ints + Shape tuple, the shape of the iterator. + value + Value of `operands` at current iteration. Normally, this is a + tuple of array scalars, but if the flag "external_loop" is used, + it is a tuple of one dimensional arrays. + + Notes + ----- + `nditer` supersedes `flatiter`. The iterator implementation behind + `nditer` is also exposed by the NumPy C API. + + The Python exposure supplies two iteration interfaces, one which follows + the Python iterator protocol, and another which mirrors the C-style + do-while pattern. The native Python approach is better in most cases, but + if you need the iterator's coordinates or index, use the C-style pattern. + + Examples + -------- + Here is how we might write an ``iter_add`` function, using the + Python iterator protocol:: + + def iter_add_py(x, y, out=None): + addop = np.add + it = np.nditer([x, y, out], [], + [['readonly'], ['readonly'], ['writeonly','allocate']]) + with it: + for (a, b, c) in it: + addop(a, b, out=c) + return it.operands[2] + + Here is the same function, but following the C-style pattern:: + + def iter_add(x, y, out=None): + addop = np.add + + it = np.nditer([x, y, out], [], + [['readonly'], ['readonly'], ['writeonly','allocate']]) + with it: + while not it.finished: + addop(it[0], it[1], out=it[2]) + it.iternext() + + return it.operands[2] + + Here is an example outer product function:: + + def outer_it(x, y, out=None): + mulop = np.multiply + + it = np.nditer([x, y, out], ['external_loop'], + [['readonly'], ['readonly'], ['writeonly', 'allocate']], + op_axes=[list(range(x.ndim)) + [-1] * y.ndim, + [-1] * x.ndim + list(range(y.ndim)), + None]) + with it: + for (a, b, c) in it: + mulop(a, b, out=c) + return it.operands[2] + + >>> a = np.arange(2)+1 + >>> b = np.arange(3)+1 + >>> outer_it(a,b) + array([[1, 2, 3], + [2, 4, 6]]) + + Here is an example function which operates like a "lambda" ufunc:: + + def luf(lamdaexpr, *args, **kwargs): + "luf(lambdaexpr, op1, ..., opn, out=None, order='K', casting='safe', buffersize=0)" + nargs = len(args) + op = (kwargs.get('out',None),) + args + it = np.nditer(op, ['buffered','external_loop'], + [['writeonly','allocate','no_broadcast']] + + [['readonly','nbo','aligned']]*nargs, + order=kwargs.get('order','K'), + casting=kwargs.get('casting','safe'), + buffersize=kwargs.get('buffersize',0)) + while not it.finished: + it[0] = lamdaexpr(*it[1:]) + it.iternext() + return it.operands[0] + + >>> a = np.arange(5) + >>> b = np.ones(5) + >>> luf(lambda i,j:i*i + j/2, a, b) + array([ 0.5, 1.5, 4.5, 9.5, 16.5]) + + If operand flags `"writeonly"` or `"readwrite"` are used the operands may + be views into the original data with the `WRITEBACKIFCOPY` flag. In this case + nditer must be used as a context manager or the nditer.close + method must be called before using the result. The temporary + data will be written back to the original data when the `__exit__` + function is called but not before: + + >>> a = np.arange(6, dtype='i4')[::-2] + >>> with nditer(a, [], + ... [['writeonly', 'updateifcopy']], + ... casting='unsafe', + ... op_dtypes=[np.dtype('f4')]) as i: + ... x = i.operands[0] + ... x[:] = [-1, -2, -3] + ... # a still unchanged here + >>> a, x + array([-1, -2, -3]), array([-1, -2, -3]) + + It is important to note that once the iterator is exited, dangling + references (like `x` in the example) may or may not share data with + the original data `a`. If writeback semantics were active, i.e. if + `x.base.flags.writebackifcopy` is `True`, then exiting the iterator + will sever the connection between `x` and `a`, writing to `x` will + no longer write to `a`. If writeback semantics are not active, then + `x.data` will still point at some part of `a.data`, and writing to + one will affect the other. + + """) + +# nditer methods + +add_newdoc('numpy.core', 'nditer', ('copy', + """ + copy() + + Get a copy of the iterator in its current state. + + Examples + -------- + >>> x = np.arange(10) + >>> y = x + 1 + >>> it = np.nditer([x, y]) + >>> it.next() + (array(0), array(1)) + >>> it2 = it.copy() + >>> it2.next() + (array(1), array(2)) + + """)) + +add_newdoc('numpy.core', 'nditer', ('operands', + """ + operands[`Slice`] + + The array(s) to be iterated over. Valid only before the iterator is closed. + """)) + +add_newdoc('numpy.core', 'nditer', ('debug_print', + """ + debug_print() + + Print the current state of the `nditer` instance and debug info to stdout. + + """)) + +add_newdoc('numpy.core', 'nditer', ('enable_external_loop', + """ + enable_external_loop() + + When the "external_loop" was not used during construction, but + is desired, this modifies the iterator to behave as if the flag + was specified. + + """)) + +add_newdoc('numpy.core', 'nditer', ('iternext', + """ + iternext() + + Check whether iterations are left, and perform a single internal iteration + without returning the result. Used in the C-style pattern do-while + pattern. For an example, see `nditer`. + + Returns + ------- + iternext : bool + Whether or not there are iterations left. + + """)) + +add_newdoc('numpy.core', 'nditer', ('remove_axis', + """ + remove_axis(i) + + Removes axis `i` from the iterator. Requires that the flag "multi_index" + be enabled. + + """)) + +add_newdoc('numpy.core', 'nditer', ('remove_multi_index', + """ + remove_multi_index() + + When the "multi_index" flag was specified, this removes it, allowing + the internal iteration structure to be optimized further. + + """)) + +add_newdoc('numpy.core', 'nditer', ('reset', + """ + reset() + + Reset the iterator to its initial state. + + """)) + +add_newdoc('numpy.core', 'nested_iters', + """ + Create nditers for use in nested loops + + Create a tuple of `nditer` objects which iterate in nested loops over + different axes of the op argument. The first iterator is used in the + outermost loop, the last in the innermost loop. Advancing one will change + the subsequent iterators to point at its new element. + + Parameters + ---------- + op : ndarray or sequence of array_like + The array(s) to iterate over. + + axes : list of list of int + Each item is used as an "op_axes" argument to an nditer + + flags, op_flags, op_dtypes, order, casting, buffersize (optional) + See `nditer` parameters of the same name + + Returns + ------- + iters : tuple of nditer + An nditer for each item in `axes`, outermost first + + See Also + -------- + nditer + + Examples + -------- + + Basic usage. Note how y is the "flattened" version of + [a[:, 0, :], a[:, 1, 0], a[:, 2, :]] since we specified + the first iter's axes as [1] + + >>> a = np.arange(12).reshape(2, 3, 2) + >>> i, j = np.nested_iters(a, [[1], [0, 2]], flags=["multi_index"]) + >>> for x in i: + ... print(i.multi_index) + ... for y in j: + ... print('', j.multi_index, y) + + (0,) + (0, 0) 0 + (0, 1) 1 + (1, 0) 6 + (1, 1) 7 + (1,) + (0, 0) 2 + (0, 1) 3 + (1, 0) 8 + (1, 1) 9 + (2,) + (0, 0) 4 + (0, 1) 5 + (1, 0) 10 + (1, 1) 11 + + """) + +add_newdoc('numpy.core', 'nditer', ('close', + """ + close() + + Resolve all writeback semantics in writeable operands. + + See Also + -------- + + :ref:`nditer-context-manager` + + """)) + + +############################################################################### +# +# broadcast +# +############################################################################### + +add_newdoc('numpy.core', 'broadcast', + """ + Produce an object that mimics broadcasting. + + Parameters + ---------- + in1, in2, ... : array_like + Input parameters. + + Returns + ------- + b : broadcast object + Broadcast the input parameters against one another, and + return an object that encapsulates the result. + Amongst others, it has ``shape`` and ``nd`` properties, and + may be used as an iterator. + + See Also + -------- + broadcast_arrays + broadcast_to + + Examples + -------- + + Manually adding two vectors, using broadcasting: + + >>> x = np.array([[1], [2], [3]]) + >>> y = np.array([4, 5, 6]) + >>> b = np.broadcast(x, y) + + >>> out = np.empty(b.shape) + >>> out.flat = [u+v for (u,v) in b] + >>> out + array([[ 5., 6., 7.], + [ 6., 7., 8.], + [ 7., 8., 9.]]) + + Compare against built-in broadcasting: + + >>> x + y + array([[5, 6, 7], + [6, 7, 8], + [7, 8, 9]]) + + """) + +# attributes + +add_newdoc('numpy.core', 'broadcast', ('index', + """ + current index in broadcasted result + + Examples + -------- + >>> x = np.array([[1], [2], [3]]) + >>> y = np.array([4, 5, 6]) + >>> b = np.broadcast(x, y) + >>> b.index + 0 + >>> b.next(), b.next(), b.next() + ((1, 4), (1, 5), (1, 6)) + >>> b.index + 3 + + """)) + +add_newdoc('numpy.core', 'broadcast', ('iters', + """ + tuple of iterators along ``self``'s "components." + + Returns a tuple of `numpy.flatiter` objects, one for each "component" + of ``self``. + + See Also + -------- + numpy.flatiter + + Examples + -------- + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> row, col = b.iters + >>> row.next(), col.next() + (1, 4) + + """)) + +add_newdoc('numpy.core', 'broadcast', ('ndim', + """ + Number of dimensions of broadcasted result. Alias for `nd`. + + .. versionadded:: 1.12.0 + + Examples + -------- + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> b.ndim + 2 + + """)) + +add_newdoc('numpy.core', 'broadcast', ('nd', + """ + Number of dimensions of broadcasted result. For code intended for NumPy + 1.12.0 and later the more consistent `ndim` is preferred. + + Examples + -------- + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> b.nd + 2 + + """)) + +add_newdoc('numpy.core', 'broadcast', ('numiter', + """ + Number of iterators possessed by the broadcasted result. + + Examples + -------- + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> b.numiter + 2 + + """)) + +add_newdoc('numpy.core', 'broadcast', ('shape', + """ + Shape of broadcasted result. + + Examples + -------- + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> b.shape + (3, 3) + + """)) + +add_newdoc('numpy.core', 'broadcast', ('size', + """ + Total size of broadcasted result. + + Examples + -------- + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> b.size + 9 + + """)) + +add_newdoc('numpy.core', 'broadcast', ('reset', + """ + reset() + + Reset the broadcasted result's iterator(s). + + Parameters + ---------- + None + + Returns + ------- + None + + Examples + -------- + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]] + >>> b = np.broadcast(x, y) + >>> b.index + 0 + >>> b.next(), b.next(), b.next() + ((1, 4), (2, 4), (3, 4)) + >>> b.index + 3 + >>> b.reset() + >>> b.index + 0 + + """)) + +############################################################################### +# +# numpy functions +# +############################################################################### + +add_newdoc('numpy.core.multiarray', 'array', + """ + array(object, dtype=None, copy=True, order='K', subok=False, ndmin=0) + + Create an array. + + Parameters + ---------- + object : array_like + An array, any object exposing the array interface, an object whose + __array__ method returns an array, or any (nested) sequence. + dtype : data-type, optional + The desired data-type for the array. If not given, then the type will + be determined as the minimum type required to hold the objects in the + sequence. This argument can only be used to 'upcast' the array. For + downcasting, use the .astype(t) method. + copy : bool, optional + If true (default), then the object is copied. Otherwise, a copy will + only be made if __array__ returns a copy, if obj is a nested sequence, + or if a copy is needed to satisfy any of the other requirements + (`dtype`, `order`, etc.). + order : {'K', 'A', 'C', 'F'}, optional + Specify the memory layout of the array. If object is not an array, the + newly created array will be in C order (row major) unless 'F' is + specified, in which case it will be in Fortran order (column major). + If object is an array the following holds. + + ===== ========= =================================================== + order no copy copy=True + ===== ========= =================================================== + 'K' unchanged F & C order preserved, otherwise most similar order + 'A' unchanged F order if input is F and not C, otherwise C order + 'C' C order C order + 'F' F order F order + ===== ========= =================================================== + + When ``copy=False`` and a copy is made for other reasons, the result is + the same as if ``copy=True``, with some exceptions for `A`, see the + Notes section. The default order is 'K'. + subok : bool, optional + If True, then sub-classes will be passed-through, otherwise + the returned array will be forced to be a base-class array (default). + ndmin : int, optional + Specifies the minimum number of dimensions that the resulting + array should have. Ones will be pre-pended to the shape as + needed to meet this requirement. + + Returns + ------- + out : ndarray + An array object satisfying the specified requirements. + + See Also + -------- + empty_like : Return an empty array with shape and type of input. + ones_like : Return an array of ones with shape and type of input. + zeros_like : Return an array of zeros with shape and type of input. + full_like : Return a new array with shape of input filled with value. + empty : Return a new uninitialized array. + ones : Return a new array setting values to one. + zeros : Return a new array setting values to zero. + full : Return a new array of given shape filled with value. + + + Notes + ----- + When order is 'A' and `object` is an array in neither 'C' nor 'F' order, + and a copy is forced by a change in dtype, then the order of the result is + not necessarily 'C' as expected. This is likely a bug. + + Examples + -------- + >>> np.array([1, 2, 3]) + array([1, 2, 3]) + + Upcasting: + + >>> np.array([1, 2, 3.0]) + array([ 1., 2., 3.]) + + More than one dimension: + + >>> np.array([[1, 2], [3, 4]]) + array([[1, 2], + [3, 4]]) + + Minimum dimensions 2: + + >>> np.array([1, 2, 3], ndmin=2) + array([[1, 2, 3]]) + + Type provided: + + >>> np.array([1, 2, 3], dtype=complex) + array([ 1.+0.j, 2.+0.j, 3.+0.j]) + + Data-type consisting of more than one element: + + >>> x = np.array([(1,2),(3,4)],dtype=[('a','>> x['a'] + array([1, 3]) + + Creating an array from sub-classes: + + >>> np.array(np.mat('1 2; 3 4')) + array([[1, 2], + [3, 4]]) + + >>> np.array(np.mat('1 2; 3 4'), subok=True) + matrix([[1, 2], + [3, 4]]) + + """) + +add_newdoc('numpy.core.multiarray', 'empty', + """ + empty(shape, dtype=float, order='C') + + Return a new array of given shape and type, without initializing entries. + + Parameters + ---------- + shape : int or tuple of int + Shape of the empty array, e.g., ``(2, 3)`` or ``2``. + dtype : data-type, optional + Desired output data-type for the array, e.g, `numpy.int8`. Default is + `numpy.float64`. + order : {'C', 'F'}, optional, default: 'C' + Whether to store multi-dimensional data in row-major + (C-style) or column-major (Fortran-style) order in + memory. + + Returns + ------- + out : ndarray + Array of uninitialized (arbitrary) data of the given shape, dtype, and + order. Object arrays will be initialized to None. + + See Also + -------- + empty_like : Return an empty array with shape and type of input. + ones : Return a new array setting values to one. + zeros : Return a new array setting values to zero. + full : Return a new array of given shape filled with value. + + + Notes + ----- + `empty`, unlike `zeros`, does not set the array values to zero, + and may therefore be marginally faster. On the other hand, it requires + the user to manually set all the values in the array, and should be + used with caution. + + Examples + -------- + >>> np.empty([2, 2]) + array([[ -9.74499359e+001, 6.69583040e-309], + [ 2.13182611e-314, 3.06959433e-309]]) #random + + >>> np.empty([2, 2], dtype=int) + array([[-1073741821, -1067949133], + [ 496041986, 19249760]]) #random + + """) + +add_newdoc('numpy.core.multiarray', 'scalar', + """ + scalar(dtype, obj) + + Return a new scalar array of the given type initialized with obj. + + This function is meant mainly for pickle support. `dtype` must be a + valid data-type descriptor. If `dtype` corresponds to an object + descriptor, then `obj` can be any object, otherwise `obj` must be a + string. If `obj` is not given, it will be interpreted as None for object + type and as zeros for all other types. + + """) + +add_newdoc('numpy.core.multiarray', 'zeros', + """ + zeros(shape, dtype=float, order='C') + + Return a new array of given shape and type, filled with zeros. + + Parameters + ---------- + shape : int or tuple of ints + Shape of the new array, e.g., ``(2, 3)`` or ``2``. + dtype : data-type, optional + The desired data-type for the array, e.g., `numpy.int8`. Default is + `numpy.float64`. + order : {'C', 'F'}, optional, default: 'C' + Whether to store multi-dimensional data in row-major + (C-style) or column-major (Fortran-style) order in + memory. + + Returns + ------- + out : ndarray + Array of zeros with the given shape, dtype, and order. + + See Also + -------- + zeros_like : Return an array of zeros with shape and type of input. + empty : Return a new uninitialized array. + ones : Return a new array setting values to one. + full : Return a new array of given shape filled with value. + + Examples + -------- + >>> np.zeros(5) + array([ 0., 0., 0., 0., 0.]) + + >>> np.zeros((5,), dtype=int) + array([0, 0, 0, 0, 0]) + + >>> np.zeros((2, 1)) + array([[ 0.], + [ 0.]]) + + >>> s = (2,2) + >>> np.zeros(s) + array([[ 0., 0.], + [ 0., 0.]]) + + >>> np.zeros((2,), dtype=[('x', 'i4'), ('y', 'i4')]) # custom dtype + array([(0, 0), (0, 0)], + dtype=[('x', '>> np.fromstring('1 2', dtype=int, sep=' ') + array([1, 2]) + >>> np.fromstring('1, 2', dtype=int, sep=',') + array([1, 2]) + + """) + +add_newdoc('numpy.core.multiarray', 'compare_chararrays', + """ + compare_chararrays(a, b, cmp_op, rstrip) + + Performs element-wise comparison of two string arrays using the + comparison operator specified by `cmp_op`. + + Parameters + ---------- + a, b : array_like + Arrays to be compared. + cmp_op : {"<", "<=", "==", ">=", ">", "!="} + Type of comparison. + rstrip : Boolean + If True, the spaces at the end of Strings are removed before the comparison. + + Returns + ------- + out : ndarray + The output array of type Boolean with the same shape as a and b. + + Raises + ------ + ValueError + If `cmp_op` is not valid. + TypeError + If at least one of `a` or `b` is a non-string array + + Examples + -------- + >>> a = np.array(["a", "b", "cde"]) + >>> b = np.array(["a", "a", "dec"]) + >>> np.compare_chararrays(a, b, ">", True) + array([False, True, False]) + + """) + +add_newdoc('numpy.core.multiarray', 'fromiter', + """ + fromiter(iterable, dtype, count=-1) + + Create a new 1-dimensional array from an iterable object. + + Parameters + ---------- + iterable : iterable object + An iterable object providing data for the array. + dtype : data-type + The data-type of the returned array. + count : int, optional + The number of items to read from *iterable*. The default is -1, + which means all data is read. + + Returns + ------- + out : ndarray + The output array. + + Notes + ----- + Specify `count` to improve performance. It allows ``fromiter`` to + pre-allocate the output array, instead of resizing it on demand. + + Examples + -------- + >>> iterable = (x*x for x in range(5)) + >>> np.fromiter(iterable, float) + array([ 0., 1., 4., 9., 16.]) + + """) + +add_newdoc('numpy.core.multiarray', 'fromfile', + """ + fromfile(file, dtype=float, count=-1, sep='') + + Construct an array from data in a text or binary file. + + A highly efficient way of reading binary data with a known data-type, + as well as parsing simply formatted text files. Data written using the + `tofile` method can be read using this function. + + Parameters + ---------- + file : file or str + Open file object or filename. + dtype : data-type + Data type of the returned array. + For binary files, it is used to determine the size and byte-order + of the items in the file. + count : int + Number of items to read. ``-1`` means all items (i.e., the complete + file). + sep : str + Separator between items if file is a text file. + Empty ("") separator means the file should be treated as binary. + Spaces (" ") in the separator match zero or more whitespace characters. + A separator consisting only of spaces must match at least one + whitespace. + + See also + -------- + load, save + ndarray.tofile + loadtxt : More flexible way of loading data from a text file. + + Notes + ----- + Do not rely on the combination of `tofile` and `fromfile` for + data storage, as the binary files generated are are not platform + independent. In particular, no byte-order or data-type information is + saved. Data can be stored in the platform independent ``.npy`` format + using `save` and `load` instead. + + Examples + -------- + Construct an ndarray: + + >>> dt = np.dtype([('time', [('min', int), ('sec', int)]), + ... ('temp', float)]) + >>> x = np.zeros((1,), dtype=dt) + >>> x['time']['min'] = 10; x['temp'] = 98.25 + >>> x + array([((10, 0), 98.25)], + dtype=[('time', [('min', '>> import os + >>> fname = os.tmpnam() + >>> x.tofile(fname) + + Read the raw data from disk: + + >>> np.fromfile(fname, dtype=dt) + array([((10, 0), 98.25)], + dtype=[('time', [('min', '>> np.save(fname, x) + >>> np.load(fname + '.npy') + array([((10, 0), 98.25)], + dtype=[('time', [('min', '>> dt = np.dtype(int) + >>> dt = dt.newbyteorder('>') + >>> np.frombuffer(buf, dtype=dt) + + The data of the resulting array will not be byteswapped, but will be + interpreted correctly. + + Examples + -------- + >>> s = 'hello world' + >>> np.frombuffer(s, dtype='S1', count=5, offset=6) + array(['w', 'o', 'r', 'l', 'd'], + dtype='|S1') + + >>> np.frombuffer(b'\\x01\\x02', dtype=np.uint8) + array([1, 2], dtype=uint8) + >>> np.frombuffer(b'\\x01\\x02\\x03\\x04\\x05', dtype=np.uint8, count=3) + array([1, 2, 3], dtype=uint8) + + """) + +add_newdoc('numpy.core', 'fastCopyAndTranspose', + """_fastCopyAndTranspose(a)""") + +add_newdoc('numpy.core.multiarray', 'correlate', + """cross_correlate(a,v, mode=0)""") + +add_newdoc('numpy.core.multiarray', 'arange', + """ + arange([start,] stop[, step,], dtype=None) + + Return evenly spaced values within a given interval. + + Values are generated within the half-open interval ``[start, stop)`` + (in other words, the interval including `start` but excluding `stop`). + For integer arguments the function is equivalent to the Python built-in + `range` function, but returns an ndarray rather than a list. + + When using a non-integer step, such as 0.1, the results will often not + be consistent. It is better to use `numpy.linspace` for these cases. + + Parameters + ---------- + start : number, optional + Start of interval. The interval includes this value. The default + start value is 0. + stop : number + End of interval. The interval does not include this value, except + in some cases where `step` is not an integer and floating point + round-off affects the length of `out`. + step : number, optional + Spacing between values. For any output `out`, this is the distance + between two adjacent values, ``out[i+1] - out[i]``. The default + step size is 1. If `step` is specified as a position argument, + `start` must also be given. + dtype : dtype + The type of the output array. If `dtype` is not given, infer the data + type from the other input arguments. + + Returns + ------- + arange : ndarray + Array of evenly spaced values. + + For floating point arguments, the length of the result is + ``ceil((stop - start)/step)``. Because of floating point overflow, + this rule may result in the last element of `out` being greater + than `stop`. + + See Also + -------- + linspace : Evenly spaced numbers with careful handling of endpoints. + ogrid: Arrays of evenly spaced numbers in N-dimensions. + mgrid: Grid-shaped arrays of evenly spaced numbers in N-dimensions. + + Examples + -------- + >>> np.arange(3) + array([0, 1, 2]) + >>> np.arange(3.0) + array([ 0., 1., 2.]) + >>> np.arange(3,7) + array([3, 4, 5, 6]) + >>> np.arange(3,7,2) + array([3, 5]) + + """) + +add_newdoc('numpy.core.multiarray', '_get_ndarray_c_version', + """_get_ndarray_c_version() + + Return the compile time NDARRAY_VERSION number. + + """) + +add_newdoc('numpy.core.multiarray', '_reconstruct', + """_reconstruct(subtype, shape, dtype) + + Construct an empty array. Used by Pickles. + + """) + + +add_newdoc('numpy.core.multiarray', 'set_string_function', + """ + set_string_function(f, repr=1) + + Internal method to set a function to be used when pretty printing arrays. + + """) + +add_newdoc('numpy.core.multiarray', 'set_numeric_ops', + """ + set_numeric_ops(op1=func1, op2=func2, ...) + + Set numerical operators for array objects. + + .. deprecated:: 1.16 + + For the general case, use :c:func:`PyUFunc_ReplaceLoopBySignature`. + For ndarray subclasses, define the ``__array_ufunc__`` method and + override the relevant ufunc. + + Parameters + ---------- + op1, op2, ... : callable + Each ``op = func`` pair describes an operator to be replaced. + For example, ``add = lambda x, y: np.add(x, y) % 5`` would replace + addition by modulus 5 addition. + + Returns + ------- + saved_ops : list of callables + A list of all operators, stored before making replacements. + + Notes + ----- + .. WARNING:: + Use with care! Incorrect usage may lead to memory errors. + + A function replacing an operator cannot make use of that operator. + For example, when replacing add, you may not use ``+``. Instead, + directly call ufuncs. + + Examples + -------- + >>> def add_mod5(x, y): + ... return np.add(x, y) % 5 + ... + >>> old_funcs = np.set_numeric_ops(add=add_mod5) + + >>> x = np.arange(12).reshape((3, 4)) + >>> x + x + array([[0, 2, 4, 1], + [3, 0, 2, 4], + [1, 3, 0, 2]]) + + >>> ignore = np.set_numeric_ops(**old_funcs) # restore operators + + """) + +add_newdoc('numpy.core.multiarray', 'promote_types', + """ + promote_types(type1, type2) + + Returns the data type with the smallest size and smallest scalar + kind to which both ``type1`` and ``type2`` may be safely cast. + The returned data type is always in native byte order. + + This function is symmetric, but rarely associative. + + Parameters + ---------- + type1 : dtype or dtype specifier + First data type. + type2 : dtype or dtype specifier + Second data type. + + Returns + ------- + out : dtype + The promoted data type. + + Notes + ----- + .. versionadded:: 1.6.0 + + Starting in NumPy 1.9, promote_types function now returns a valid string + length when given an integer or float dtype as one argument and a string + dtype as another argument. Previously it always returned the input string + dtype, even if it wasn't long enough to store the max integer/float value + converted to a string. + + See Also + -------- + result_type, dtype, can_cast + + Examples + -------- + >>> np.promote_types('f4', 'f8') + dtype('float64') + + >>> np.promote_types('i8', 'f4') + dtype('float64') + + >>> np.promote_types('>i8', '>> np.promote_types('i4', 'S8') + dtype('S11') + + An example of a non-associative case: + + >>> p = np.promote_types + >>> p('S', p('i1', 'u1')) + dtype('S6') + >>> p(p('S', 'i1'), 'u1') + dtype('S4') + + """) + +add_newdoc('numpy.core.multiarray', 'newbuffer', + """ + newbuffer(size) + + Return a new uninitialized buffer object. + + Parameters + ---------- + size : int + Size in bytes of returned buffer object. + + Returns + ------- + newbuffer : buffer object + Returned, uninitialized buffer object of `size` bytes. + + """) + +add_newdoc('numpy.core.multiarray', 'getbuffer', + """ + getbuffer(obj [,offset[, size]]) + + Create a buffer object from the given object referencing a slice of + length size starting at offset. + + Default is the entire buffer. A read-write buffer is attempted followed + by a read-only buffer. + + Parameters + ---------- + obj : object + + offset : int, optional + + size : int, optional + + Returns + ------- + buffer_obj : buffer + + Examples + -------- + >>> buf = np.getbuffer(np.ones(5), 1, 3) + >>> len(buf) + 3 + >>> buf[0] + '\\x00' + >>> buf + + + """) + +add_newdoc('numpy.core.multiarray', 'c_einsum', + """ + c_einsum(subscripts, *operands, out=None, dtype=None, order='K', + casting='safe') + + *This documentation shadows that of the native python implementation of the `einsum` function, + except all references and examples related to the `optimize` argument (v 0.12.0) have been removed.* + + Evaluates the Einstein summation convention on the operands. + + Using the Einstein summation convention, many common multi-dimensional, + linear algebraic array operations can be represented in a simple fashion. + In *implicit* mode `einsum` computes these values. + + In *explicit* mode, `einsum` provides further flexibility to compute + other array operations that might not be considered classical Einstein + summation operations, by disabling, or forcing summation over specified + subscript labels. + + See the notes and examples for clarification. + + Parameters + ---------- + subscripts : str + Specifies the subscripts for summation as comma separated list of + subscript labels. An implicit (classical Einstein summation) + calculation is performed unless the explicit indicator '->' is + included as well as subscript labels of the precise output form. + operands : list of array_like + These are the arrays for the operation. + out : ndarray, optional + If provided, the calculation is done into this array. + dtype : {data-type, None}, optional + If provided, forces the calculation to use the data type specified. + Note that you may have to also give a more liberal `casting` + parameter to allow the conversions. Default is None. + order : {'C', 'F', 'A', 'K'}, optional + Controls the memory layout of the output. 'C' means it should + be C contiguous. 'F' means it should be Fortran contiguous, + 'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise. + 'K' means it should be as close to the layout as the inputs as + is possible, including arbitrarily permuted axes. + Default is 'K'. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. Setting this to + 'unsafe' is not recommended, as it can adversely affect accumulations. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + + Default is 'safe'. + optimize : {False, True, 'greedy', 'optimal'}, optional + Controls if intermediate optimization should occur. No optimization + will occur if False and True will default to the 'greedy' algorithm. + Also accepts an explicit contraction list from the ``np.einsum_path`` + function. See ``np.einsum_path`` for more details. Defaults to False. + + Returns + ------- + output : ndarray + The calculation based on the Einstein summation convention. + + See Also + -------- + einsum_path, dot, inner, outer, tensordot, linalg.multi_dot + + Notes + ----- + .. versionadded:: 1.6.0 + + The Einstein summation convention can be used to compute + many multi-dimensional, linear algebraic array operations. `einsum` + provides a succinct way of representing these. + + A non-exhaustive list of these operations, + which can be computed by `einsum`, is shown below along with examples: + + * Trace of an array, :py:func:`numpy.trace`. + * Return a diagonal, :py:func:`numpy.diag`. + * Array axis summations, :py:func:`numpy.sum`. + * Transpositions and permutations, :py:func:`numpy.transpose`. + * Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`. + * Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`. + * Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`. + * Tensor contractions, :py:func:`numpy.tensordot`. + * Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`. + + The subscripts string is a comma-separated list of subscript labels, + where each label refers to a dimension of the corresponding operand. + Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)`` + is equivalent to :py:func:`np.inner(a,b) `. If a label + appears only once, it is not summed, so ``np.einsum('i', a)`` produces a + view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)`` + describes traditional matrix multiplication and is equivalent to + :py:func:`np.matmul(a,b) `. Repeated subscript labels in one + operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent + to :py:func:`np.trace(a) `. + + In *implicit mode*, the chosen subscripts are important + since the axes of the output are reordered alphabetically. This + means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while + ``np.einsum('ji', a)`` takes its transpose. Additionally, + ``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while, + ``np.einsum('ij,jh', a, b)`` returns the transpose of the + multiplication since subscript 'h' precedes subscript 'i'. + + In *explicit mode* the output can be directly controlled by + specifying output subscript labels. This requires the + identifier '->' as well as the list of output subscript labels. + This feature increases the flexibility of the function since + summing can be disabled or forced when required. The call + ``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) `, + and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) `. + The difference is that `einsum` does not allow broadcasting by default. + Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the + order of the output subscript labels and therefore returns matrix + multiplication, unlike the example above in implicit mode. + + To enable and control broadcasting, use an ellipsis. Default + NumPy-style broadcasting is done by adding an ellipsis + to the left of each term, like ``np.einsum('...ii->...i', a)``. + To take the trace along the first and last axes, + you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix + product with the left-most indices instead of rightmost, one can do + ``np.einsum('ij...,jk...->ik...', a, b)``. + + When there is only one operand, no axes are summed, and no output + parameter is provided, a view into the operand is returned instead + of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)`` + produces a view (changed in version 1.10.0). + + `einsum` also provides an alternative way to provide the subscripts + and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``. + If the output shape is not provided in this format `einsum` will be + calculated in implicit mode, otherwise it will be performed explicitly. + The examples below have corresponding `einsum` calls with the two + parameter methods. + + .. versionadded:: 1.10.0 + + Views returned from einsum are now writeable whenever the input array + is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now + have the same effect as :py:func:`np.swapaxes(a, 0, 2) ` + and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal + of a 2D array. + + Examples + -------- + >>> a = np.arange(25).reshape(5,5) + >>> b = np.arange(5) + >>> c = np.arange(6).reshape(2,3) + + Trace of a matrix: + + >>> np.einsum('ii', a) + 60 + >>> np.einsum(a, [0,0]) + 60 + >>> np.trace(a) + 60 + + Extract the diagonal (requires explicit form): + + >>> np.einsum('ii->i', a) + array([ 0, 6, 12, 18, 24]) + >>> np.einsum(a, [0,0], [0]) + array([ 0, 6, 12, 18, 24]) + >>> np.diag(a) + array([ 0, 6, 12, 18, 24]) + + Sum over an axis (requires explicit form): + + >>> np.einsum('ij->i', a) + array([ 10, 35, 60, 85, 110]) + >>> np.einsum(a, [0,1], [0]) + array([ 10, 35, 60, 85, 110]) + >>> np.sum(a, axis=1) + array([ 10, 35, 60, 85, 110]) + + For higher dimensional arrays summing a single axis can be done with ellipsis: + + >>> np.einsum('...j->...', a) + array([ 10, 35, 60, 85, 110]) + >>> np.einsum(a, [Ellipsis,1], [Ellipsis]) + array([ 10, 35, 60, 85, 110]) + + Compute a matrix transpose, or reorder any number of axes: + + >>> np.einsum('ji', c) + array([[0, 3], + [1, 4], + [2, 5]]) + >>> np.einsum('ij->ji', c) + array([[0, 3], + [1, 4], + [2, 5]]) + >>> np.einsum(c, [1,0]) + array([[0, 3], + [1, 4], + [2, 5]]) + >>> np.transpose(c) + array([[0, 3], + [1, 4], + [2, 5]]) + + Vector inner products: + + >>> np.einsum('i,i', b, b) + 30 + >>> np.einsum(b, [0], b, [0]) + 30 + >>> np.inner(b,b) + 30 + + Matrix vector multiplication: + + >>> np.einsum('ij,j', a, b) + array([ 30, 80, 130, 180, 230]) + >>> np.einsum(a, [0,1], b, [1]) + array([ 30, 80, 130, 180, 230]) + >>> np.dot(a, b) + array([ 30, 80, 130, 180, 230]) + >>> np.einsum('...j,j', a, b) + array([ 30, 80, 130, 180, 230]) + + Broadcasting and scalar multiplication: + + >>> np.einsum('..., ...', 3, c) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + >>> np.einsum(',ij', 3, c) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + >>> np.einsum(3, [Ellipsis], c, [Ellipsis]) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + >>> np.multiply(3, c) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + + Vector outer product: + + >>> np.einsum('i,j', np.arange(2)+1, b) + array([[0, 1, 2, 3, 4], + [0, 2, 4, 6, 8]]) + >>> np.einsum(np.arange(2)+1, [0], b, [1]) + array([[0, 1, 2, 3, 4], + [0, 2, 4, 6, 8]]) + >>> np.outer(np.arange(2)+1, b) + array([[0, 1, 2, 3, 4], + [0, 2, 4, 6, 8]]) + + Tensor contraction: + + >>> a = np.arange(60.).reshape(3,4,5) + >>> b = np.arange(24.).reshape(4,3,2) + >>> np.einsum('ijk,jil->kl', a, b) + array([[ 4400., 4730.], + [ 4532., 4874.], + [ 4664., 5018.], + [ 4796., 5162.], + [ 4928., 5306.]]) + >>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3]) + array([[ 4400., 4730.], + [ 4532., 4874.], + [ 4664., 5018.], + [ 4796., 5162.], + [ 4928., 5306.]]) + >>> np.tensordot(a,b, axes=([1,0],[0,1])) + array([[ 4400., 4730.], + [ 4532., 4874.], + [ 4664., 5018.], + [ 4796., 5162.], + [ 4928., 5306.]]) + + Writeable returned arrays (since version 1.10.0): + + >>> a = np.zeros((3, 3)) + >>> np.einsum('ii->i', a)[:] = 1 + >>> a + array([[ 1., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 1.]]) + + Example of ellipsis use: + + >>> a = np.arange(6).reshape((3,2)) + >>> b = np.arange(12).reshape((4,3)) + >>> np.einsum('ki,jk->ij', a, b) + array([[10, 28, 46, 64], + [13, 40, 67, 94]]) + >>> np.einsum('ki,...k->i...', a, b) + array([[10, 28, 46, 64], + [13, 40, 67, 94]]) + >>> np.einsum('k...,jk', a, b) + array([[10, 28, 46, 64], + [13, 40, 67, 94]]) + + """) + + +############################################################################## +# +# Documentation for ndarray attributes and methods +# +############################################################################## + + +############################################################################## +# +# ndarray object +# +############################################################################## + + +add_newdoc('numpy.core.multiarray', 'ndarray', + """ + ndarray(shape, dtype=float, buffer=None, offset=0, + strides=None, order=None) + + An array object represents a multidimensional, homogeneous array + of fixed-size items. An associated data-type object describes the + format of each element in the array (its byte-order, how many bytes it + occupies in memory, whether it is an integer, a floating point number, + or something else, etc.) + + Arrays should be constructed using `array`, `zeros` or `empty` (refer + to the See Also section below). The parameters given here refer to + a low-level method (`ndarray(...)`) for instantiating an array. + + For more information, refer to the `numpy` module and examine the + methods and attributes of an array. + + Parameters + ---------- + (for the __new__ method; see Notes below) + + shape : tuple of ints + Shape of created array. + dtype : data-type, optional + Any object that can be interpreted as a numpy data type. + buffer : object exposing buffer interface, optional + Used to fill the array with data. + offset : int, optional + Offset of array data in buffer. + strides : tuple of ints, optional + Strides of data in memory. + order : {'C', 'F'}, optional + Row-major (C-style) or column-major (Fortran-style) order. + + Attributes + ---------- + T : ndarray + Transpose of the array. + data : buffer + The array's elements, in memory. + dtype : dtype object + Describes the format of the elements in the array. + flags : dict + Dictionary containing information related to memory use, e.g., + 'C_CONTIGUOUS', 'OWNDATA', 'WRITEABLE', etc. + flat : numpy.flatiter object + Flattened version of the array as an iterator. The iterator + allows assignments, e.g., ``x.flat = 3`` (See `ndarray.flat` for + assignment examples; TODO). + imag : ndarray + Imaginary part of the array. + real : ndarray + Real part of the array. + size : int + Number of elements in the array. + itemsize : int + The memory use of each array element in bytes. + nbytes : int + The total number of bytes required to store the array data, + i.e., ``itemsize * size``. + ndim : int + The array's number of dimensions. + shape : tuple of ints + Shape of the array. + strides : tuple of ints + The step-size required to move from one element to the next in + memory. For example, a contiguous ``(3, 4)`` array of type + ``int16`` in C-order has strides ``(8, 2)``. This implies that + to move from element to element in memory requires jumps of 2 bytes. + To move from row-to-row, one needs to jump 8 bytes at a time + (``2 * 4``). + ctypes : ctypes object + Class containing properties of the array needed for interaction + with ctypes. + base : ndarray + If the array is a view into another array, that array is its `base` + (unless that array is also a view). The `base` array is where the + array data is actually stored. + + See Also + -------- + array : Construct an array. + zeros : Create an array, each element of which is zero. + empty : Create an array, but leave its allocated memory unchanged (i.e., + it contains "garbage"). + dtype : Create a data-type. + + Notes + ----- + There are two modes of creating an array using ``__new__``: + + 1. If `buffer` is None, then only `shape`, `dtype`, and `order` + are used. + 2. If `buffer` is an object exposing the buffer interface, then + all keywords are interpreted. + + No ``__init__`` method is needed because the array is fully initialized + after the ``__new__`` method. + + Examples + -------- + These examples illustrate the low-level `ndarray` constructor. Refer + to the `See Also` section above for easier ways of constructing an + ndarray. + + First mode, `buffer` is None: + + >>> np.ndarray(shape=(2,2), dtype=float, order='F') + array([[ -1.13698227e+002, 4.25087011e-303], + [ 2.88528414e-306, 3.27025015e-309]]) #random + + Second mode: + + >>> np.ndarray((2,), buffer=np.array([1,2,3]), + ... offset=np.int_().itemsize, + ... dtype=int) # offset = 1*itemsize, i.e. skip first element + array([2, 3]) + + """) + + +############################################################################## +# +# ndarray attributes +# +############################################################################## + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_interface__', + """Array protocol: Python side.""")) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_finalize__', + """None.""")) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_priority__', + """Array priority.""")) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_struct__', + """Array protocol: C-struct side.""")) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('_as_parameter_', + """Allow the array to be interpreted as a ctypes object by returning the + data-memory location as an integer + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('base', + """ + Base object if memory is from some other object. + + Examples + -------- + The base of an array that owns its memory is None: + + >>> x = np.array([1,2,3,4]) + >>> x.base is None + True + + Slicing creates a view, whose memory is shared with x: + + >>> y = x[2:] + >>> y.base is x + True + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('ctypes', + """ + An object to simplify the interaction of the array with the ctypes + module. + + This attribute creates an object that makes it easier to use arrays + when calling shared libraries with the ctypes module. The returned + object has, among others, data, shape, and strides attributes (see + Notes below) which themselves return ctypes objects that can be used + as arguments to a shared library. + + Parameters + ---------- + None + + Returns + ------- + c : Python object + Possessing attributes data, shape, strides, etc. + + See Also + -------- + numpy.ctypeslib + + Notes + ----- + Below are the public attributes of this object which were documented + in "Guide to NumPy" (we have omitted undocumented public attributes, + as well as documented private attributes): + + .. autoattribute:: numpy.core._internal._ctypes.data + + .. autoattribute:: numpy.core._internal._ctypes.shape + + .. autoattribute:: numpy.core._internal._ctypes.strides + + .. automethod:: numpy.core._internal._ctypes.data_as + + .. automethod:: numpy.core._internal._ctypes.shape_as + + .. automethod:: numpy.core._internal._ctypes.strides_as + + If the ctypes module is not available, then the ctypes attribute + of array objects still returns something useful, but ctypes objects + are not returned and errors may be raised instead. In particular, + the object will still have the as parameter attribute which will + return an integer equal to the data attribute. + + Examples + -------- + >>> import ctypes + >>> x + array([[0, 1], + [2, 3]]) + >>> x.ctypes.data + 30439712 + >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long)) + + >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long)).contents + c_long(0) + >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_longlong)).contents + c_longlong(4294967296L) + >>> x.ctypes.shape + + >>> x.ctypes.shape_as(ctypes.c_long) + + >>> x.ctypes.strides + + >>> x.ctypes.strides_as(ctypes.c_longlong) + + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('data', + """Python buffer object pointing to the start of the array's data.""")) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('dtype', + """ + Data-type of the array's elements. + + Parameters + ---------- + None + + Returns + ------- + d : numpy dtype object + + See Also + -------- + numpy.dtype + + Examples + -------- + >>> x + array([[0, 1], + [2, 3]]) + >>> x.dtype + dtype('int32') + >>> type(x.dtype) + + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('imag', + """ + The imaginary part of the array. + + Examples + -------- + >>> x = np.sqrt([1+0j, 0+1j]) + >>> x.imag + array([ 0. , 0.70710678]) + >>> x.imag.dtype + dtype('float64') + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('itemsize', + """ + Length of one array element in bytes. + + Examples + -------- + >>> x = np.array([1,2,3], dtype=np.float64) + >>> x.itemsize + 8 + >>> x = np.array([1,2,3], dtype=np.complex128) + >>> x.itemsize + 16 + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('flags', + """ + Information about the memory layout of the array. + + Attributes + ---------- + C_CONTIGUOUS (C) + The data is in a single, C-style contiguous segment. + F_CONTIGUOUS (F) + The data is in a single, Fortran-style contiguous segment. + OWNDATA (O) + The array owns the memory it uses or borrows it from another object. + WRITEABLE (W) + The data area can be written to. Setting this to False locks + the data, making it read-only. A view (slice, etc.) inherits WRITEABLE + from its base array at creation time, but a view of a writeable + array may be subsequently locked while the base array remains writeable. + (The opposite is not true, in that a view of a locked array may not + be made writeable. However, currently, locking a base object does not + lock any views that already reference it, so under that circumstance it + is possible to alter the contents of a locked array via a previously + created writeable view onto it.) Attempting to change a non-writeable + array raises a RuntimeError exception. + ALIGNED (A) + The data and all elements are aligned appropriately for the hardware. + WRITEBACKIFCOPY (X) + This array is a copy of some other array. The C-API function + PyArray_ResolveWritebackIfCopy must be called before deallocating + to the base array will be updated with the contents of this array. + UPDATEIFCOPY (U) + (Deprecated, use WRITEBACKIFCOPY) This array is a copy of some other array. + When this array is + deallocated, the base array will be updated with the contents of + this array. + FNC + F_CONTIGUOUS and not C_CONTIGUOUS. + FORC + F_CONTIGUOUS or C_CONTIGUOUS (one-segment test). + BEHAVED (B) + ALIGNED and WRITEABLE. + CARRAY (CA) + BEHAVED and C_CONTIGUOUS. + FARRAY (FA) + BEHAVED and F_CONTIGUOUS and not C_CONTIGUOUS. + + Notes + ----- + The `flags` object can be accessed dictionary-like (as in ``a.flags['WRITEABLE']``), + or by using lowercased attribute names (as in ``a.flags.writeable``). Short flag + names are only supported in dictionary access. + + Only the WRITEBACKIFCOPY, UPDATEIFCOPY, WRITEABLE, and ALIGNED flags can be + changed by the user, via direct assignment to the attribute or dictionary + entry, or by calling `ndarray.setflags`. + + The array flags cannot be set arbitrarily: + + - UPDATEIFCOPY can only be set ``False``. + - WRITEBACKIFCOPY can only be set ``False``. + - ALIGNED can only be set ``True`` if the data is truly aligned. + - WRITEABLE can only be set ``True`` if the array owns its own memory + or the ultimate owner of the memory exposes a writeable buffer + interface or is a string. + + Arrays can be both C-style and Fortran-style contiguous simultaneously. + This is clear for 1-dimensional arrays, but can also be true for higher + dimensional arrays. + + Even for contiguous arrays a stride for a given dimension + ``arr.strides[dim]`` may be *arbitrary* if ``arr.shape[dim] == 1`` + or the array has no elements. + It does *not* generally hold that ``self.strides[-1] == self.itemsize`` + for C-style contiguous arrays or ``self.strides[0] == self.itemsize`` for + Fortran-style contiguous arrays is true. + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('flat', + """ + A 1-D iterator over the array. + + This is a `numpy.flatiter` instance, which acts similarly to, but is not + a subclass of, Python's built-in iterator object. + + See Also + -------- + flatten : Return a copy of the array collapsed into one dimension. + + flatiter + + Examples + -------- + >>> x = np.arange(1, 7).reshape(2, 3) + >>> x + array([[1, 2, 3], + [4, 5, 6]]) + >>> x.flat[3] + 4 + >>> x.T + array([[1, 4], + [2, 5], + [3, 6]]) + >>> x.T.flat[3] + 5 + >>> type(x.flat) + + + An assignment example: + + >>> x.flat = 3; x + array([[3, 3, 3], + [3, 3, 3]]) + >>> x.flat[[1,4]] = 1; x + array([[3, 1, 3], + [3, 1, 3]]) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('nbytes', + """ + Total bytes consumed by the elements of the array. + + Notes + ----- + Does not include memory consumed by non-element attributes of the + array object. + + Examples + -------- + >>> x = np.zeros((3,5,2), dtype=np.complex128) + >>> x.nbytes + 480 + >>> np.prod(x.shape) * x.itemsize + 480 + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('ndim', + """ + Number of array dimensions. + + Examples + -------- + >>> x = np.array([1, 2, 3]) + >>> x.ndim + 1 + >>> y = np.zeros((2, 3, 4)) + >>> y.ndim + 3 + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('real', + """ + The real part of the array. + + Examples + -------- + >>> x = np.sqrt([1+0j, 0+1j]) + >>> x.real + array([ 1. , 0.70710678]) + >>> x.real.dtype + dtype('float64') + + See Also + -------- + numpy.real : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('shape', + """ + Tuple of array dimensions. + + The shape property is usually used to get the current shape of an array, + but may also be used to reshape the array in-place by assigning a tuple of + array dimensions to it. As with `numpy.reshape`, one of the new shape + dimensions can be -1, in which case its value is inferred from the size of + the array and the remaining dimensions. Reshaping an array in-place will + fail if a copy is required. + + Examples + -------- + >>> x = np.array([1, 2, 3, 4]) + >>> x.shape + (4,) + >>> y = np.zeros((2, 3, 4)) + >>> y.shape + (2, 3, 4) + >>> y.shape = (3, 8) + >>> y + array([[ 0., 0., 0., 0., 0., 0., 0., 0.], + [ 0., 0., 0., 0., 0., 0., 0., 0.], + [ 0., 0., 0., 0., 0., 0., 0., 0.]]) + >>> y.shape = (3, 6) + Traceback (most recent call last): + File "", line 1, in + ValueError: total size of new array must be unchanged + >>> np.zeros((4,2))[::2].shape = (-1,) + Traceback (most recent call last): + File "", line 1, in + AttributeError: incompatible shape for a non-contiguous array + + See Also + -------- + numpy.reshape : similar function + ndarray.reshape : similar method + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('size', + """ + Number of elements in the array. + + Equal to ``np.prod(a.shape)``, i.e., the product of the array's + dimensions. + + Notes + ----- + `a.size` returns a standard arbitrary precision Python integer. This + may not be the case with other methods of obtaining the same value + (like the suggested ``np.prod(a.shape)``, which returns an instance + of ``np.int_``), and may be relevant if the value is used further in + calculations that may overflow a fixed size integer type. + + Examples + -------- + >>> x = np.zeros((3, 5, 2), dtype=np.complex128) + >>> x.size + 30 + >>> np.prod(x.shape) + 30 + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('strides', + """ + Tuple of bytes to step in each dimension when traversing an array. + + The byte offset of element ``(i[0], i[1], ..., i[n])`` in an array `a` + is:: + + offset = sum(np.array(i) * a.strides) + + A more detailed explanation of strides can be found in the + "ndarray.rst" file in the NumPy reference guide. + + Notes + ----- + Imagine an array of 32-bit integers (each 4 bytes):: + + x = np.array([[0, 1, 2, 3, 4], + [5, 6, 7, 8, 9]], dtype=np.int32) + + This array is stored in memory as 40 bytes, one after the other + (known as a contiguous block of memory). The strides of an array tell + us how many bytes we have to skip in memory to move to the next position + along a certain axis. For example, we have to skip 4 bytes (1 value) to + move to the next column, but 20 bytes (5 values) to get to the same + position in the next row. As such, the strides for the array `x` will be + ``(20, 4)``. + + See Also + -------- + numpy.lib.stride_tricks.as_strided + + Examples + -------- + >>> y = np.reshape(np.arange(2*3*4), (2,3,4)) + >>> y + array([[[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]], + [[12, 13, 14, 15], + [16, 17, 18, 19], + [20, 21, 22, 23]]]) + >>> y.strides + (48, 16, 4) + >>> y[1,1,1] + 17 + >>> offset=sum(y.strides * np.array((1,1,1))) + >>> offset/y.itemsize + 17 + + >>> x = np.reshape(np.arange(5*6*7*8), (5,6,7,8)).transpose(2,3,1,0) + >>> x.strides + (32, 4, 224, 1344) + >>> i = np.array([3,5,2,2]) + >>> offset = sum(i * x.strides) + >>> x[3,5,2,2] + 813 + >>> offset / x.itemsize + 813 + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('T', + """ + Same as self.transpose(), except that self is returned if + self.ndim < 2. + + Examples + -------- + >>> x = np.array([[1.,2.],[3.,4.]]) + >>> x + array([[ 1., 2.], + [ 3., 4.]]) + >>> x.T + array([[ 1., 3.], + [ 2., 4.]]) + >>> x = np.array([1.,2.,3.,4.]) + >>> x + array([ 1., 2., 3., 4.]) + >>> x.T + array([ 1., 2., 3., 4.]) + + """)) + + +############################################################################## +# +# ndarray methods +# +############################################################################## + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__array__', + """ a.__array__(|dtype) -> reference if type unchanged, copy otherwise. + + Returns either a new reference to self if dtype is not given or a new array + of provided data type if dtype is different from the current dtype of the + array. + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_prepare__', + """a.__array_prepare__(obj) -> Object of same type as ndarray object obj. + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_wrap__', + """a.__array_wrap__(obj) -> Object of same type as ndarray object a. + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__copy__', + """a.__copy__() + + Used if :func:`copy.copy` is called on an array. Returns a copy of the array. + + Equivalent to ``a.copy(order='K')``. + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__deepcopy__', + """a.__deepcopy__(memo, /) -> Deep copy of array. + + Used if :func:`copy.deepcopy` is called on an array. + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__reduce__', + """a.__reduce__() + + For pickling. + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__setstate__', + """a.__setstate__(state, /) + + For unpickling. + + The `state` argument must be a sequence that contains the following + elements: + + Parameters + ---------- + version : int + optional pickle version. If omitted defaults to 0. + shape : tuple + dtype : data-type + isFortran : bool + rawdata : string or list + a binary string with the data (or a list if 'a' is an object array) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('all', + """ + a.all(axis=None, out=None, keepdims=False) + + Returns True if all elements evaluate to True. + + Refer to `numpy.all` for full documentation. + + See Also + -------- + numpy.all : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('any', + """ + a.any(axis=None, out=None, keepdims=False) + + Returns True if any of the elements of `a` evaluate to True. + + Refer to `numpy.any` for full documentation. + + See Also + -------- + numpy.any : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('argmax', + """ + a.argmax(axis=None, out=None) + + Return indices of the maximum values along the given axis. + + Refer to `numpy.argmax` for full documentation. + + See Also + -------- + numpy.argmax : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('argmin', + """ + a.argmin(axis=None, out=None) + + Return indices of the minimum values along the given axis of `a`. + + Refer to `numpy.argmin` for detailed documentation. + + See Also + -------- + numpy.argmin : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('argsort', + """ + a.argsort(axis=-1, kind='quicksort', order=None) + + Returns the indices that would sort this array. + + Refer to `numpy.argsort` for full documentation. + + See Also + -------- + numpy.argsort : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('argpartition', + """ + a.argpartition(kth, axis=-1, kind='introselect', order=None) + + Returns the indices that would partition this array. + + Refer to `numpy.argpartition` for full documentation. + + .. versionadded:: 1.8.0 + + See Also + -------- + numpy.argpartition : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('astype', + """ + a.astype(dtype, order='K', casting='unsafe', subok=True, copy=True) + + Copy of the array, cast to a specified type. + + Parameters + ---------- + dtype : str or dtype + Typecode or data-type to which the array is cast. + order : {'C', 'F', 'A', 'K'}, optional + Controls the memory layout order of the result. + 'C' means C order, 'F' means Fortran order, 'A' + means 'F' order if all the arrays are Fortran contiguous, + 'C' order otherwise, and 'K' means as close to the + order the array elements appear in memory as possible. + Default is 'K'. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. Defaults to 'unsafe' + for backwards compatibility. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + subok : bool, optional + If True, then sub-classes will be passed-through (default), otherwise + the returned array will be forced to be a base-class array. + copy : bool, optional + By default, astype always returns a newly allocated array. If this + is set to false, and the `dtype`, `order`, and `subok` + requirements are satisfied, the input array is returned instead + of a copy. + + Returns + ------- + arr_t : ndarray + Unless `copy` is False and the other conditions for returning the input + array are satisfied (see description for `copy` input parameter), `arr_t` + is a new array of the same shape as the input array, with dtype, order + given by `dtype`, `order`. + + Notes + ----- + Starting in NumPy 1.9, astype method now returns an error if the string + dtype to cast to is not long enough in 'safe' casting mode to hold the max + value of integer/float array that is being casted. Previously the casting + was allowed even if the result was truncated. + + Raises + ------ + ComplexWarning + When casting from complex to float or int. To avoid this, + one should use ``a.real.astype(t)``. + + Examples + -------- + >>> x = np.array([1, 2, 2.5]) + >>> x + array([ 1. , 2. , 2.5]) + + >>> x.astype(int) + array([1, 2, 2]) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap', + """ + a.byteswap(inplace=False) + + Swap the bytes of the array elements + + Toggle between low-endian and big-endian data representation by + returning a byteswapped array, optionally swapped in-place. + + Parameters + ---------- + inplace : bool, optional + If ``True``, swap bytes in-place, default is ``False``. + + Returns + ------- + out : ndarray + The byteswapped array. If `inplace` is ``True``, this is + a view to self. + + Examples + -------- + >>> A = np.array([1, 256, 8755], dtype=np.int16) + >>> map(hex, A) + ['0x1', '0x100', '0x2233'] + >>> A.byteswap(inplace=True) + array([ 256, 1, 13090], dtype=int16) + >>> map(hex, A) + ['0x100', '0x1', '0x3322'] + + Arrays of strings are not swapped + + >>> A = np.array(['ceg', 'fac']) + >>> A.byteswap() + array(['ceg', 'fac'], + dtype='|S3') + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('choose', + """ + a.choose(choices, out=None, mode='raise') + + Use an index array to construct a new array from a set of choices. + + Refer to `numpy.choose` for full documentation. + + See Also + -------- + numpy.choose : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('clip', + """ + a.clip(min=None, max=None, out=None) + + Return an array whose values are limited to ``[min, max]``. + One of max or min must be given. + + Refer to `numpy.clip` for full documentation. + + See Also + -------- + numpy.clip : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('compress', + """ + a.compress(condition, axis=None, out=None) + + Return selected slices of this array along given axis. + + Refer to `numpy.compress` for full documentation. + + See Also + -------- + numpy.compress : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('conj', + """ + a.conj() + + Complex-conjugate all elements. + + Refer to `numpy.conjugate` for full documentation. + + See Also + -------- + numpy.conjugate : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('conjugate', + """ + a.conjugate() + + Return the complex conjugate, element-wise. + + Refer to `numpy.conjugate` for full documentation. + + See Also + -------- + numpy.conjugate : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('copy', + """ + a.copy(order='C') + + Return a copy of the array. + + Parameters + ---------- + order : {'C', 'F', 'A', 'K'}, optional + Controls the memory layout of the copy. 'C' means C-order, + 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, + 'C' otherwise. 'K' means match the layout of `a` as closely + as possible. (Note that this function and :func:`numpy.copy` are very + similar, but have different default values for their order= + arguments.) + + See also + -------- + numpy.copy + numpy.copyto + + Examples + -------- + >>> x = np.array([[1,2,3],[4,5,6]], order='F') + + >>> y = x.copy() + + >>> x.fill(0) + + >>> x + array([[0, 0, 0], + [0, 0, 0]]) + + >>> y + array([[1, 2, 3], + [4, 5, 6]]) + + >>> y.flags['C_CONTIGUOUS'] + True + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('cumprod', + """ + a.cumprod(axis=None, dtype=None, out=None) + + Return the cumulative product of the elements along the given axis. + + Refer to `numpy.cumprod` for full documentation. + + See Also + -------- + numpy.cumprod : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('cumsum', + """ + a.cumsum(axis=None, dtype=None, out=None) + + Return the cumulative sum of the elements along the given axis. + + Refer to `numpy.cumsum` for full documentation. + + See Also + -------- + numpy.cumsum : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('diagonal', + """ + a.diagonal(offset=0, axis1=0, axis2=1) + + Return specified diagonals. In NumPy 1.9 the returned array is a + read-only view instead of a copy as in previous NumPy versions. In + a future version the read-only restriction will be removed. + + Refer to :func:`numpy.diagonal` for full documentation. + + See Also + -------- + numpy.diagonal : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('dot', + """ + a.dot(b, out=None) + + Dot product of two arrays. + + Refer to `numpy.dot` for full documentation. + + See Also + -------- + numpy.dot : equivalent function + + Examples + -------- + >>> a = np.eye(2) + >>> b = np.ones((2, 2)) * 2 + >>> a.dot(b) + array([[ 2., 2.], + [ 2., 2.]]) + + This array method can be conveniently chained: + + >>> a.dot(b).dot(b) + array([[ 8., 8.], + [ 8., 8.]]) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('dump', + """a.dump(file) + + Dump a pickle of the array to the specified file. + The array can be read back with pickle.load or numpy.load. + + Parameters + ---------- + file : str + A string naming the dump file. + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('dumps', + """ + a.dumps() + + Returns the pickle of the array as a string. + pickle.loads or numpy.loads will convert the string back to an array. + + Parameters + ---------- + None + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('fill', + """ + a.fill(value) + + Fill the array with a scalar value. + + Parameters + ---------- + value : scalar + All elements of `a` will be assigned this value. + + Examples + -------- + >>> a = np.array([1, 2]) + >>> a.fill(0) + >>> a + array([0, 0]) + >>> a = np.empty(2) + >>> a.fill(1) + >>> a + array([ 1., 1.]) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('flatten', + """ + a.flatten(order='C') + + Return a copy of the array collapsed into one dimension. + + Parameters + ---------- + order : {'C', 'F', 'A', 'K'}, optional + 'C' means to flatten in row-major (C-style) order. + 'F' means to flatten in column-major (Fortran- + style) order. 'A' means to flatten in column-major + order if `a` is Fortran *contiguous* in memory, + row-major order otherwise. 'K' means to flatten + `a` in the order the elements occur in memory. + The default is 'C'. + + Returns + ------- + y : ndarray + A copy of the input array, flattened to one dimension. + + See Also + -------- + ravel : Return a flattened array. + flat : A 1-D flat iterator over the array. + + Examples + -------- + >>> a = np.array([[1,2], [3,4]]) + >>> a.flatten() + array([1, 2, 3, 4]) + >>> a.flatten('F') + array([1, 3, 2, 4]) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('getfield', + """ + a.getfield(dtype, offset=0) + + Returns a field of the given array as a certain type. + + A field is a view of the array data with a given data-type. The values in + the view are determined by the given type and the offset into the current + array in bytes. The offset needs to be such that the view dtype fits in the + array dtype; for example an array of dtype complex128 has 16-byte elements. + If taking a view with a 32-bit integer (4 bytes), the offset needs to be + between 0 and 12 bytes. + + Parameters + ---------- + dtype : str or dtype + The data type of the view. The dtype size of the view can not be larger + than that of the array itself. + offset : int + Number of bytes to skip before beginning the element view. + + Examples + -------- + >>> x = np.diag([1.+1.j]*2) + >>> x[1, 1] = 2 + 4.j + >>> x + array([[ 1.+1.j, 0.+0.j], + [ 0.+0.j, 2.+4.j]]) + >>> x.getfield(np.float64) + array([[ 1., 0.], + [ 0., 2.]]) + + By choosing an offset of 8 bytes we can select the complex part of the + array for our view: + + >>> x.getfield(np.float64, offset=8) + array([[ 1., 0.], + [ 0., 4.]]) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('item', + """ + a.item(*args) + + Copy an element of an array to a standard Python scalar and return it. + + Parameters + ---------- + \\*args : Arguments (variable number and type) + + * none: in this case, the method only works for arrays + with one element (`a.size == 1`), which element is + copied into a standard Python scalar object and returned. + + * int_type: this argument is interpreted as a flat index into + the array, specifying which element to copy and return. + + * tuple of int_types: functions as does a single int_type argument, + except that the argument is interpreted as an nd-index into the + array. + + Returns + ------- + z : Standard Python scalar object + A copy of the specified element of the array as a suitable + Python scalar + + Notes + ----- + When the data type of `a` is longdouble or clongdouble, item() returns + a scalar array object because there is no available Python scalar that + would not lose information. Void arrays return a buffer object for item(), + unless fields are defined, in which case a tuple is returned. + + `item` is very similar to a[args], except, instead of an array scalar, + a standard Python scalar is returned. This can be useful for speeding up + access to elements of the array and doing arithmetic on elements of the + array using Python's optimized math. + + Examples + -------- + >>> x = np.random.randint(9, size=(3, 3)) + >>> x + array([[3, 1, 7], + [2, 8, 3], + [8, 5, 3]]) + >>> x.item(3) + 2 + >>> x.item(7) + 5 + >>> x.item((0, 1)) + 1 + >>> x.item((2, 2)) + 3 + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('itemset', + """ + a.itemset(*args) + + Insert scalar into an array (scalar is cast to array's dtype, if possible) + + There must be at least 1 argument, and define the last argument + as *item*. Then, ``a.itemset(*args)`` is equivalent to but faster + than ``a[args] = item``. The item should be a scalar value and `args` + must select a single item in the array `a`. + + Parameters + ---------- + \\*args : Arguments + If one argument: a scalar, only used in case `a` is of size 1. + If two arguments: the last argument is the value to be set + and must be a scalar, the first argument specifies a single array + element location. It is either an int or a tuple. + + Notes + ----- + Compared to indexing syntax, `itemset` provides some speed increase + for placing a scalar into a particular location in an `ndarray`, + if you must do this. However, generally this is discouraged: + among other problems, it complicates the appearance of the code. + Also, when using `itemset` (and `item`) inside a loop, be sure + to assign the methods to a local variable to avoid the attribute + look-up at each loop iteration. + + Examples + -------- + >>> x = np.random.randint(9, size=(3, 3)) + >>> x + array([[3, 1, 7], + [2, 8, 3], + [8, 5, 3]]) + >>> x.itemset(4, 0) + >>> x.itemset((2, 2), 9) + >>> x + array([[3, 1, 7], + [2, 0, 3], + [8, 5, 9]]) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('max', + """ + a.max(axis=None, out=None, keepdims=False) + + Return the maximum along a given axis. + + Refer to `numpy.amax` for full documentation. + + See Also + -------- + numpy.amax : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('mean', + """ + a.mean(axis=None, dtype=None, out=None, keepdims=False) + + Returns the average of the array elements along given axis. + + Refer to `numpy.mean` for full documentation. + + See Also + -------- + numpy.mean : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('min', + """ + a.min(axis=None, out=None, keepdims=False) + + Return the minimum along a given axis. + + Refer to `numpy.amin` for full documentation. + + See Also + -------- + numpy.amin : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'shares_memory', + """ + shares_memory(a, b, max_work=None) + + Determine if two arrays share memory + + Parameters + ---------- + a, b : ndarray + Input arrays + max_work : int, optional + Effort to spend on solving the overlap problem (maximum number + of candidate solutions to consider). The following special + values are recognized: + + max_work=MAY_SHARE_EXACT (default) + The problem is solved exactly. In this case, the function returns + True only if there is an element shared between the arrays. + max_work=MAY_SHARE_BOUNDS + Only the memory bounds of a and b are checked. + + Raises + ------ + numpy.TooHardError + Exceeded max_work. + + Returns + ------- + out : bool + + See Also + -------- + may_share_memory + + Examples + -------- + >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9])) + False + + """) + + +add_newdoc('numpy.core.multiarray', 'may_share_memory', + """ + may_share_memory(a, b, max_work=None) + + Determine if two arrays might share memory + + A return of True does not necessarily mean that the two arrays + share any element. It just means that they *might*. + + Only the memory bounds of a and b are checked by default. + + Parameters + ---------- + a, b : ndarray + Input arrays + max_work : int, optional + Effort to spend on solving the overlap problem. See + `shares_memory` for details. Default for ``may_share_memory`` + is to do a bounds check. + + Returns + ------- + out : bool + + See Also + -------- + shares_memory + + Examples + -------- + >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9])) + False + >>> x = np.zeros([3, 4]) + >>> np.may_share_memory(x[:,0], x[:,1]) + True + + """) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder', + """ + arr.newbyteorder(new_order='S') + + Return the array with the same data viewed with a different byte order. + + Equivalent to:: + + arr.view(arr.dtype.newbytorder(new_order)) + + Changes are also made in all fields and sub-arrays of the array data + type. + + + + Parameters + ---------- + new_order : string, optional + Byte order to force; a value from the byte order specifications + below. `new_order` codes can be any of: + + * 'S' - swap dtype from current to opposite endian + * {'<', 'L'} - little endian + * {'>', 'B'} - big endian + * {'=', 'N'} - native order + * {'|', 'I'} - ignore (no change to byte order) + + The default value ('S') results in swapping the current + byte order. The code does a case-insensitive check on the first + letter of `new_order` for the alternatives above. For example, + any of 'B' or 'b' or 'biggish' are valid to specify big-endian. + + + Returns + ------- + new_arr : array + New array object with the dtype reflecting given change to the + byte order. + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('nonzero', + """ + a.nonzero() + + Return the indices of the elements that are non-zero. + + Refer to `numpy.nonzero` for full documentation. + + See Also + -------- + numpy.nonzero : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('prod', + """ + a.prod(axis=None, dtype=None, out=None, keepdims=False) + + Return the product of the array elements over the given axis + + Refer to `numpy.prod` for full documentation. + + See Also + -------- + numpy.prod : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('ptp', + """ + a.ptp(axis=None, out=None, keepdims=False) + + Peak to peak (maximum - minimum) value along a given axis. + + Refer to `numpy.ptp` for full documentation. + + See Also + -------- + numpy.ptp : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('put', + """ + a.put(indices, values, mode='raise') + + Set ``a.flat[n] = values[n]`` for all `n` in indices. + + Refer to `numpy.put` for full documentation. + + See Also + -------- + numpy.put : equivalent function + + """)) + +add_newdoc('numpy.core.multiarray', 'copyto', + """ + copyto(dst, src, casting='same_kind', where=True) + + Copies values from one array to another, broadcasting as necessary. + + Raises a TypeError if the `casting` rule is violated, and if + `where` is provided, it selects which elements to copy. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + dst : ndarray + The array into which values are copied. + src : array_like + The array from which values are copied. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur when copying. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + where : array_like of bool, optional + A boolean array which is broadcasted to match the dimensions + of `dst`, and selects elements to copy from `src` to `dst` + wherever it contains the value True. + + """) + +add_newdoc('numpy.core.multiarray', 'putmask', + """ + putmask(a, mask, values) + + Changes elements of an array based on conditional and input values. + + Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``. + + If `values` is not the same size as `a` and `mask` then it will repeat. + This gives behavior different from ``a[mask] = values``. + + Parameters + ---------- + a : array_like + Target array. + mask : array_like + Boolean mask array. It has to be the same shape as `a`. + values : array_like + Values to put into `a` where `mask` is True. If `values` is smaller + than `a` it will be repeated. + + See Also + -------- + place, put, take, copyto + + Examples + -------- + >>> x = np.arange(6).reshape(2, 3) + >>> np.putmask(x, x>2, x**2) + >>> x + array([[ 0, 1, 2], + [ 9, 16, 25]]) + + If `values` is smaller than `a` it is repeated: + + >>> x = np.arange(5) + >>> np.putmask(x, x>1, [-33, -44]) + >>> x + array([ 0, 1, -33, -44, -33]) + + """) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('ravel', + """ + a.ravel([order]) + + Return a flattened array. + + Refer to `numpy.ravel` for full documentation. + + See Also + -------- + numpy.ravel : equivalent function + + ndarray.flat : a flat iterator on the array. + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('repeat', + """ + a.repeat(repeats, axis=None) + + Repeat elements of an array. + + Refer to `numpy.repeat` for full documentation. + + See Also + -------- + numpy.repeat : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('reshape', + """ + a.reshape(shape, order='C') + + Returns an array containing the same data with a new shape. + + Refer to `numpy.reshape` for full documentation. + + See Also + -------- + numpy.reshape : equivalent function + + Notes + ----- + Unlike the free function `numpy.reshape`, this method on `ndarray` allows + the elements of the shape parameter to be passed in as separate arguments. + For example, ``a.reshape(10, 11)`` is equivalent to + ``a.reshape((10, 11))``. + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('resize', + """ + a.resize(new_shape, refcheck=True) + + Change shape and size of array in-place. + + Parameters + ---------- + new_shape : tuple of ints, or `n` ints + Shape of resized array. + refcheck : bool, optional + If False, reference count will not be checked. Default is True. + + Returns + ------- + None + + Raises + ------ + ValueError + If `a` does not own its own data or references or views to it exist, + and the data memory must be changed. + PyPy only: will always raise if the data memory must be changed, since + there is no reliable way to determine if references or views to it + exist. + + SystemError + If the `order` keyword argument is specified. This behaviour is a + bug in NumPy. + + See Also + -------- + resize : Return a new array with the specified shape. + + Notes + ----- + This reallocates space for the data area if necessary. + + Only contiguous arrays (data elements consecutive in memory) can be + resized. + + The purpose of the reference count check is to make sure you + do not use this array as a buffer for another Python object and then + reallocate the memory. However, reference counts can increase in + other ways so if you are sure that you have not shared the memory + for this array with another Python object, then you may safely set + `refcheck` to False. + + Examples + -------- + Shrinking an array: array is flattened (in the order that the data are + stored in memory), resized, and reshaped: + + >>> a = np.array([[0, 1], [2, 3]], order='C') + >>> a.resize((2, 1)) + >>> a + array([[0], + [1]]) + + >>> a = np.array([[0, 1], [2, 3]], order='F') + >>> a.resize((2, 1)) + >>> a + array([[0], + [2]]) + + Enlarging an array: as above, but missing entries are filled with zeros: + + >>> b = np.array([[0, 1], [2, 3]]) + >>> b.resize(2, 3) # new_shape parameter doesn't have to be a tuple + >>> b + array([[0, 1, 2], + [3, 0, 0]]) + + Referencing an array prevents resizing... + + >>> c = a + >>> a.resize((1, 1)) + Traceback (most recent call last): + ... + ValueError: cannot resize an array that has been referenced ... + + Unless `refcheck` is False: + + >>> a.resize((1, 1), refcheck=False) + >>> a + array([[0]]) + >>> c + array([[0]]) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('round', + """ + a.round(decimals=0, out=None) + + Return `a` with each element rounded to the given number of decimals. + + Refer to `numpy.around` for full documentation. + + See Also + -------- + numpy.around : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('searchsorted', + """ + a.searchsorted(v, side='left', sorter=None) + + Find indices where elements of v should be inserted in a to maintain order. + + For full documentation, see `numpy.searchsorted` + + See Also + -------- + numpy.searchsorted : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('setfield', + """ + a.setfield(val, dtype, offset=0) + + Put a value into a specified place in a field defined by a data-type. + + Place `val` into `a`'s field defined by `dtype` and beginning `offset` + bytes into the field. + + Parameters + ---------- + val : object + Value to be placed in field. + dtype : dtype object + Data-type of the field in which to place `val`. + offset : int, optional + The number of bytes into the field at which to place `val`. + + Returns + ------- + None + + See Also + -------- + getfield + + Examples + -------- + >>> x = np.eye(3) + >>> x.getfield(np.float64) + array([[ 1., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 1.]]) + >>> x.setfield(3, np.int32) + >>> x.getfield(np.int32) + array([[3, 3, 3], + [3, 3, 3], + [3, 3, 3]]) + >>> x + array([[ 1.00000000e+000, 1.48219694e-323, 1.48219694e-323], + [ 1.48219694e-323, 1.00000000e+000, 1.48219694e-323], + [ 1.48219694e-323, 1.48219694e-323, 1.00000000e+000]]) + >>> x.setfield(np.eye(3), np.int32) + >>> x + array([[ 1., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 1.]]) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags', + """ + a.setflags(write=None, align=None, uic=None) + + Set array flags WRITEABLE, ALIGNED, (WRITEBACKIFCOPY and UPDATEIFCOPY), + respectively. + + These Boolean-valued flags affect how numpy interprets the memory + area used by `a` (see Notes below). The ALIGNED flag can only + be set to True if the data is actually aligned according to the type. + The WRITEBACKIFCOPY and (deprecated) UPDATEIFCOPY flags can never be set + to True. The flag WRITEABLE can only be set to True if the array owns its + own memory, or the ultimate owner of the memory exposes a writeable buffer + interface, or is a string. (The exception for string is made so that + unpickling can be done without copying memory.) + + Parameters + ---------- + write : bool, optional + Describes whether or not `a` can be written to. + align : bool, optional + Describes whether or not `a` is aligned properly for its type. + uic : bool, optional + Describes whether or not `a` is a copy of another "base" array. + + Notes + ----- + Array flags provide information about how the memory area used + for the array is to be interpreted. There are 7 Boolean flags + in use, only four of which can be changed by the user: + WRITEBACKIFCOPY, UPDATEIFCOPY, WRITEABLE, and ALIGNED. + + WRITEABLE (W) the data area can be written to; + + ALIGNED (A) the data and strides are aligned appropriately for the hardware + (as determined by the compiler); + + UPDATEIFCOPY (U) (deprecated), replaced by WRITEBACKIFCOPY; + + WRITEBACKIFCOPY (X) this array is a copy of some other array (referenced + by .base). When the C-API function PyArray_ResolveWritebackIfCopy is + called, the base array will be updated with the contents of this array. + + All flags can be accessed using the single (upper case) letter as well + as the full name. + + Examples + -------- + >>> y + array([[3, 1, 7], + [2, 0, 0], + [8, 5, 9]]) + >>> y.flags + C_CONTIGUOUS : True + F_CONTIGUOUS : False + OWNDATA : True + WRITEABLE : True + ALIGNED : True + WRITEBACKIFCOPY : False + UPDATEIFCOPY : False + >>> y.setflags(write=0, align=0) + >>> y.flags + C_CONTIGUOUS : True + F_CONTIGUOUS : False + OWNDATA : True + WRITEABLE : False + ALIGNED : False + WRITEBACKIFCOPY : False + UPDATEIFCOPY : False + >>> y.setflags(uic=1) + Traceback (most recent call last): + File "", line 1, in + ValueError: cannot set WRITEBACKIFCOPY flag to True + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('sort', + """ + a.sort(axis=-1, kind='quicksort', order=None) + + Sort an array, in-place. + + Parameters + ---------- + axis : int, optional + Axis along which to sort. Default is -1, which means sort along the + last axis. + kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional + Sorting algorithm. Default is 'quicksort'. + order : str or list of str, optional + When `a` is an array with fields defined, this argument specifies + which fields to compare first, second, etc. A single field can + be specified as a string, and not all fields need be specified, + but unspecified fields will still be used, in the order in which + they come up in the dtype, to break ties. + + See Also + -------- + numpy.sort : Return a sorted copy of an array. + argsort : Indirect sort. + lexsort : Indirect stable sort on multiple keys. + searchsorted : Find elements in sorted array. + partition: Partial sort. + + Notes + ----- + See ``sort`` for notes on the different sorting algorithms. + + Examples + -------- + >>> a = np.array([[1,4], [3,1]]) + >>> a.sort(axis=1) + >>> a + array([[1, 4], + [1, 3]]) + >>> a.sort(axis=0) + >>> a + array([[1, 3], + [1, 4]]) + + Use the `order` keyword to specify a field to use when sorting a + structured array: + + >>> a = np.array([('a', 2), ('c', 1)], dtype=[('x', 'S1'), ('y', int)]) + >>> a.sort(order='y') + >>> a + array([('c', 1), ('a', 2)], + dtype=[('x', '|S1'), ('y', '>> a = np.array([3, 4, 2, 1]) + >>> a.partition(3) + >>> a + array([2, 1, 3, 4]) + + >>> a.partition((1, 3)) + array([1, 2, 3, 4]) + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('squeeze', + """ + a.squeeze(axis=None) + + Remove single-dimensional entries from the shape of `a`. + + Refer to `numpy.squeeze` for full documentation. + + See Also + -------- + numpy.squeeze : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('std', + """ + a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False) + + Returns the standard deviation of the array elements along given axis. + + Refer to `numpy.std` for full documentation. + + See Also + -------- + numpy.std : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('sum', + """ + a.sum(axis=None, dtype=None, out=None, keepdims=False) + + Return the sum of the array elements over the given axis. + + Refer to `numpy.sum` for full documentation. + + See Also + -------- + numpy.sum : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('swapaxes', + """ + a.swapaxes(axis1, axis2) + + Return a view of the array with `axis1` and `axis2` interchanged. + + Refer to `numpy.swapaxes` for full documentation. + + See Also + -------- + numpy.swapaxes : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('take', + """ + a.take(indices, axis=None, out=None, mode='raise') + + Return an array formed from the elements of `a` at the given indices. + + Refer to `numpy.take` for full documentation. + + See Also + -------- + numpy.take : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('tofile', + """ + a.tofile(fid, sep="", format="%s") + + Write array to a file as text or binary (default). + + Data is always written in 'C' order, independent of the order of `a`. + The data produced by this method can be recovered using the function + fromfile(). + + Parameters + ---------- + fid : file or str + An open file object, or a string containing a filename. + sep : str + Separator between array items for text output. + If "" (empty), a binary file is written, equivalent to + ``file.write(a.tobytes())``. + format : str + Format string for text file output. + Each entry in the array is formatted to text by first converting + it to the closest Python type, and then using "format" % item. + + Notes + ----- + This is a convenience function for quick storage of array data. + Information on endianness and precision is lost, so this method is not a + good choice for files intended to archive data or transport data between + machines with different endianness. Some of these problems can be overcome + by outputting the data as text files, at the expense of speed and file + size. + + When fid is a file object, array contents are directly written to the + file, bypassing the file object's ``write`` method. As a result, tofile + cannot be used with files objects supporting compression (e.g., GzipFile) + or file-like objects that do not support ``fileno()`` (e.g., BytesIO). + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('tolist', + """ + a.tolist() + + Return the array as a (possibly nested) list. + + Return a copy of the array data as a (nested) Python list. + Data items are converted to the nearest compatible Python type. + + Parameters + ---------- + none + + Returns + ------- + y : list + The possibly nested list of array elements. + + Notes + ----- + The array may be recreated, ``a = np.array(a.tolist())``. + + Examples + -------- + >>> a = np.array([1, 2]) + >>> a.tolist() + [1, 2] + >>> a = np.array([[1, 2], [3, 4]]) + >>> list(a) + [array([1, 2]), array([3, 4])] + >>> a.tolist() + [[1, 2], [3, 4]] + + """)) + + +tobytesdoc = """ + a.{name}(order='C') + + Construct Python bytes containing the raw data bytes in the array. + + Constructs Python bytes showing a copy of the raw contents of + data memory. The bytes object can be produced in either 'C' or 'Fortran', + or 'Any' order (the default is 'C'-order). 'Any' order means C-order + unless the F_CONTIGUOUS flag in the array is set, in which case it + means 'Fortran' order. + + {deprecated} + + Parameters + ---------- + order : {{'C', 'F', None}}, optional + Order of the data for multidimensional arrays: + C, Fortran, or the same as for the original array. + + Returns + ------- + s : bytes + Python bytes exhibiting a copy of `a`'s raw data. + + Examples + -------- + >>> x = np.array([[0, 1], [2, 3]]) + >>> x.tobytes() + b'\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x03\\x00\\x00\\x00' + >>> x.tobytes('C') == x.tobytes() + True + >>> x.tobytes('F') + b'\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x03\\x00\\x00\\x00' + + """ + +add_newdoc('numpy.core.multiarray', 'ndarray', + ('tostring', tobytesdoc.format(name='tostring', + deprecated= + 'This function is a compatibility ' + 'alias for tobytes. Despite its ' + 'name it returns bytes not ' + 'strings.'))) +add_newdoc('numpy.core.multiarray', 'ndarray', + ('tobytes', tobytesdoc.format(name='tobytes', + deprecated='.. versionadded:: 1.9.0'))) + +add_newdoc('numpy.core.multiarray', 'ndarray', ('trace', + """ + a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None) + + Return the sum along diagonals of the array. + + Refer to `numpy.trace` for full documentation. + + See Also + -------- + numpy.trace : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('transpose', + """ + a.transpose(*axes) + + Returns a view of the array with axes transposed. + + For a 1-D array, this has no effect. (To change between column and + row vectors, first cast the 1-D array into a matrix object.) + For a 2-D array, this is the usual matrix transpose. + For an n-D array, if axes are given, their order indicates how the + axes are permuted (see Examples). If axes are not provided and + ``a.shape = (i[0], i[1], ... i[n-2], i[n-1])``, then + ``a.transpose().shape = (i[n-1], i[n-2], ... i[1], i[0])``. + + Parameters + ---------- + axes : None, tuple of ints, or `n` ints + + * None or no argument: reverses the order of the axes. + + * tuple of ints: `i` in the `j`-th place in the tuple means `a`'s + `i`-th axis becomes `a.transpose()`'s `j`-th axis. + + * `n` ints: same as an n-tuple of the same ints (this form is + intended simply as a "convenience" alternative to the tuple form) + + Returns + ------- + out : ndarray + View of `a`, with axes suitably permuted. + + See Also + -------- + ndarray.T : Array property returning the array transposed. + + Examples + -------- + >>> a = np.array([[1, 2], [3, 4]]) + >>> a + array([[1, 2], + [3, 4]]) + >>> a.transpose() + array([[1, 3], + [2, 4]]) + >>> a.transpose((1, 0)) + array([[1, 3], + [2, 4]]) + >>> a.transpose(1, 0) + array([[1, 3], + [2, 4]]) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('var', + """ + a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False) + + Returns the variance of the array elements, along given axis. + + Refer to `numpy.var` for full documentation. + + See Also + -------- + numpy.var : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('view', + """ + a.view(dtype=None, type=None) + + New view of array with the same data. + + Parameters + ---------- + dtype : data-type or ndarray sub-class, optional + Data-type descriptor of the returned view, e.g., float32 or int16. The + default, None, results in the view having the same data-type as `a`. + This argument can also be specified as an ndarray sub-class, which + then specifies the type of the returned object (this is equivalent to + setting the ``type`` parameter). + type : Python type, optional + Type of the returned view, e.g., ndarray or matrix. Again, the + default None results in type preservation. + + Notes + ----- + ``a.view()`` is used two different ways: + + ``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view + of the array's memory with a different data-type. This can cause a + reinterpretation of the bytes of memory. + + ``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just + returns an instance of `ndarray_subclass` that looks at the same array + (same shape, dtype, etc.) This does not cause a reinterpretation of the + memory. + + For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of + bytes per entry than the previous dtype (for example, converting a + regular array to a structured array), then the behavior of the view + cannot be predicted just from the superficial appearance of ``a`` (shown + by ``print(a)``). It also depends on exactly how ``a`` is stored in + memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus + defined as a slice or transpose, etc., the view may give different + results. + + + Examples + -------- + >>> x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)]) + + Viewing array data using a different type and dtype: + + >>> y = x.view(dtype=np.int16, type=np.matrix) + >>> y + matrix([[513]], dtype=int16) + >>> print(type(y)) + + + Creating a view on a structured array so it can be used in calculations + + >>> x = np.array([(1, 2),(3,4)], dtype=[('a', np.int8), ('b', np.int8)]) + >>> xv = x.view(dtype=np.int8).reshape(-1,2) + >>> xv + array([[1, 2], + [3, 4]], dtype=int8) + >>> xv.mean(0) + array([ 2., 3.]) + + Making changes to the view changes the underlying array + + >>> xv[0,1] = 20 + >>> print(x) + [(1, 20) (3, 4)] + + Using a view to convert an array to a recarray: + + >>> z = x.view(np.recarray) + >>> z.a + array([1], dtype=int8) + + Views share data: + + >>> x[0] = (9, 10) + >>> z[0] + (9, 10) + + Views that change the dtype size (bytes per entry) should normally be + avoided on arrays defined by slices, transposes, fortran-ordering, etc.: + + >>> x = np.array([[1,2,3],[4,5,6]], dtype=np.int16) + >>> y = x[:, 0:2] + >>> y + array([[1, 2], + [4, 5]], dtype=int16) + >>> y.view(dtype=[('width', np.int16), ('length', np.int16)]) + Traceback (most recent call last): + File "", line 1, in + ValueError: new type not compatible with array. + >>> z = y.copy() + >>> z.view(dtype=[('width', np.int16), ('length', np.int16)]) + array([[(1, 2)], + [(4, 5)]], dtype=[('width', '>> oct_array = np.frompyfunc(oct, 1, 1) + >>> oct_array(np.array((10, 30, 100))) + array([012, 036, 0144], dtype=object) + >>> np.array((oct(10), oct(30), oct(100))) # for comparison + array(['012', '036', '0144'], + dtype='|S4') + + """) + +add_newdoc('numpy.core.umath', 'geterrobj', + """ + geterrobj() + + Return the current object that defines floating-point error handling. + + The error object contains all information that defines the error handling + behavior in NumPy. `geterrobj` is used internally by the other + functions that get and set error handling behavior (`geterr`, `seterr`, + `geterrcall`, `seterrcall`). + + Returns + ------- + errobj : list + The error object, a list containing three elements: + [internal numpy buffer size, error mask, error callback function]. + + The error mask is a single integer that holds the treatment information + on all four floating point errors. The information for each error type + is contained in three bits of the integer. If we print it in base 8, we + can see what treatment is set for "invalid", "under", "over", and + "divide" (in that order). The printed string can be interpreted with + + * 0 : 'ignore' + * 1 : 'warn' + * 2 : 'raise' + * 3 : 'call' + * 4 : 'print' + * 5 : 'log' + + See Also + -------- + seterrobj, seterr, geterr, seterrcall, geterrcall + getbufsize, setbufsize + + Notes + ----- + For complete documentation of the types of floating-point exceptions and + treatment options, see `seterr`. + + Examples + -------- + >>> np.geterrobj() # first get the defaults + [10000, 0, None] + + >>> def err_handler(type, flag): + ... print("Floating point error (%s), with flag %s" % (type, flag)) + ... + >>> old_bufsize = np.setbufsize(20000) + >>> old_err = np.seterr(divide='raise') + >>> old_handler = np.seterrcall(err_handler) + >>> np.geterrobj() + [20000, 2, ] + + >>> old_err = np.seterr(all='ignore') + >>> np.base_repr(np.geterrobj()[1], 8) + '0' + >>> old_err = np.seterr(divide='warn', over='log', under='call', + invalid='print') + >>> np.base_repr(np.geterrobj()[1], 8) + '4351' + + """) + +add_newdoc('numpy.core.umath', 'seterrobj', + """ + seterrobj(errobj) + + Set the object that defines floating-point error handling. + + The error object contains all information that defines the error handling + behavior in NumPy. `seterrobj` is used internally by the other + functions that set error handling behavior (`seterr`, `seterrcall`). + + Parameters + ---------- + errobj : list + The error object, a list containing three elements: + [internal numpy buffer size, error mask, error callback function]. + + The error mask is a single integer that holds the treatment information + on all four floating point errors. The information for each error type + is contained in three bits of the integer. If we print it in base 8, we + can see what treatment is set for "invalid", "under", "over", and + "divide" (in that order). The printed string can be interpreted with + + * 0 : 'ignore' + * 1 : 'warn' + * 2 : 'raise' + * 3 : 'call' + * 4 : 'print' + * 5 : 'log' + + See Also + -------- + geterrobj, seterr, geterr, seterrcall, geterrcall + getbufsize, setbufsize + + Notes + ----- + For complete documentation of the types of floating-point exceptions and + treatment options, see `seterr`. + + Examples + -------- + >>> old_errobj = np.geterrobj() # first get the defaults + >>> old_errobj + [10000, 0, None] + + >>> def err_handler(type, flag): + ... print("Floating point error (%s), with flag %s" % (type, flag)) + ... + >>> new_errobj = [20000, 12, err_handler] + >>> np.seterrobj(new_errobj) + >>> np.base_repr(12, 8) # int for divide=4 ('print') and over=1 ('warn') + '14' + >>> np.geterr() + {'over': 'warn', 'divide': 'print', 'invalid': 'ignore', 'under': 'ignore'} + >>> np.geterrcall() is err_handler + True + + """) + + +############################################################################## +# +# compiled_base functions +# +############################################################################## + +add_newdoc('numpy.core.multiarray', 'add_docstring', + """ + add_docstring(obj, docstring) + + Add a docstring to a built-in obj if possible. + If the obj already has a docstring raise a RuntimeError + If this routine does not know how to add a docstring to the object + raise a TypeError + """) + +add_newdoc('numpy.core.umath', '_add_newdoc_ufunc', + """ + add_ufunc_docstring(ufunc, new_docstring) + + Replace the docstring for a ufunc with new_docstring. + This method will only work if the current docstring for + the ufunc is NULL. (At the C level, i.e. when ufunc->doc is NULL.) + + Parameters + ---------- + ufunc : numpy.ufunc + A ufunc whose current doc is NULL. + new_docstring : string + The new docstring for the ufunc. + + Notes + ----- + This method allocates memory for new_docstring on + the heap. Technically this creates a mempory leak, since this + memory will not be reclaimed until the end of the program + even if the ufunc itself is removed. However this will only + be a problem if the user is repeatedly creating ufuncs with + no documentation, adding documentation via add_newdoc_ufunc, + and then throwing away the ufunc. + """) + +add_newdoc('numpy.core.multiarray', 'packbits', + """ + packbits(myarray, axis=None) + + Packs the elements of a binary-valued array into bits in a uint8 array. + + The result is padded to full bytes by inserting zero bits at the end. + + Parameters + ---------- + myarray : array_like + An array of integers or booleans whose elements should be packed to + bits. + axis : int, optional + The dimension over which bit-packing is done. + ``None`` implies packing the flattened array. + + Returns + ------- + packed : ndarray + Array of type uint8 whose elements represent bits corresponding to the + logical (0 or nonzero) value of the input elements. The shape of + `packed` has the same number of dimensions as the input (unless `axis` + is None, in which case the output is 1-D). + + See Also + -------- + unpackbits: Unpacks elements of a uint8 array into a binary-valued output + array. + + Examples + -------- + >>> a = np.array([[[1,0,1], + ... [0,1,0]], + ... [[1,1,0], + ... [0,0,1]]]) + >>> b = np.packbits(a, axis=-1) + >>> b + array([[[160],[64]],[[192],[32]]], dtype=uint8) + + Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000, + and 32 = 0010 0000. + + """) + +add_newdoc('numpy.core.multiarray', 'unpackbits', + """ + unpackbits(myarray, axis=None) + + Unpacks elements of a uint8 array into a binary-valued output array. + + Each element of `myarray` represents a bit-field that should be unpacked + into a binary-valued output array. The shape of the output array is either + 1-D (if `axis` is None) or the same shape as the input array with unpacking + done along the axis specified. + + Parameters + ---------- + myarray : ndarray, uint8 type + Input array. + axis : int, optional + The dimension over which bit-unpacking is done. + ``None`` implies unpacking the flattened array. + + Returns + ------- + unpacked : ndarray, uint8 type + The elements are binary-valued (0 or 1). + + See Also + -------- + packbits : Packs the elements of a binary-valued array into bits in a uint8 + array. + + Examples + -------- + >>> a = np.array([[2], [7], [23]], dtype=np.uint8) + >>> a + array([[ 2], + [ 7], + [23]], dtype=uint8) + >>> b = np.unpackbits(a, axis=1) + >>> b + array([[0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8) + + """) + +add_newdoc('numpy.core._multiarray_tests', 'format_float_OSprintf_g', + """ + format_float_OSprintf_g(val, precision) + + Print a floating point scalar using the system's printf function, + equivalent to: + + printf("%.*g", precision, val); + + for half/float/double, or replacing 'g' by 'Lg' for longdouble. This + method is designed to help cross-validate the format_float_* methods. + + Parameters + ---------- + val : python float or numpy floating scalar + Value to format. + + precision : non-negative integer, optional + Precision given to printf. + + Returns + ------- + rep : string + The string representation of the floating point value + + See Also + -------- + format_float_scientific + format_float_positional + """) + + +############################################################################## +# +# Documentation for ufunc attributes and methods +# +############################################################################## + + +############################################################################## +# +# ufunc object +# +############################################################################## + +add_newdoc('numpy.core', 'ufunc', + """ + Functions that operate element by element on whole arrays. + + To see the documentation for a specific ufunc, use `info`. For + example, ``np.info(np.sin)``. Because ufuncs are written in C + (for speed) and linked into Python with NumPy's ufunc facility, + Python's help() function finds this page whenever help() is called + on a ufunc. + + A detailed explanation of ufuncs can be found in the docs for :ref:`ufuncs`. + + Calling ufuncs: + =============== + + op(*x[, out], where=True, **kwargs) + Apply `op` to the arguments `*x` elementwise, broadcasting the arguments. + + The broadcasting rules are: + + * Dimensions of length 1 may be prepended to either array. + * Arrays may be repeated along dimensions of length 1. + + Parameters + ---------- + *x : array_like + Input arrays. + out : ndarray, None, or tuple of ndarray and None, optional + Alternate array object(s) in which to put the result; if provided, it + must have a shape that the inputs broadcast to. A tuple of arrays + (possible only as a keyword argument) must have length equal to the + number of outputs; use `None` for uninitialized outputs to be + allocated by the ufunc. + where : array_like, optional + Values of True indicate to calculate the ufunc at that position, values + of False indicate to leave the value in the output alone. Note that if + an uninitialized return array is created via the default ``out=None``, + then the elements where the values are False will remain uninitialized. + **kwargs + For other keyword-only arguments, see the :ref:`ufunc docs `. + + Returns + ------- + r : ndarray or tuple of ndarray + `r` will have the shape that the arrays in `x` broadcast to; if `out` is + provided, it will be returned. If not, `r` will be allocated and + may contain uninitialized values. If the function has more than one + output, then the result will be a tuple of arrays. + + """) + + +############################################################################## +# +# ufunc attributes +# +############################################################################## + +add_newdoc('numpy.core', 'ufunc', ('identity', + """ + The identity value. + + Data attribute containing the identity element for the ufunc, if it has one. + If it does not, the attribute value is None. + + Examples + -------- + >>> np.add.identity + 0 + >>> np.multiply.identity + 1 + >>> np.power.identity + 1 + >>> print(np.exp.identity) + None + """)) + +add_newdoc('numpy.core', 'ufunc', ('nargs', + """ + The number of arguments. + + Data attribute containing the number of arguments the ufunc takes, including + optional ones. + + Notes + ----- + Typically this value will be one more than what you might expect because all + ufuncs take the optional "out" argument. + + Examples + -------- + >>> np.add.nargs + 3 + >>> np.multiply.nargs + 3 + >>> np.power.nargs + 3 + >>> np.exp.nargs + 2 + """)) + +add_newdoc('numpy.core', 'ufunc', ('nin', + """ + The number of inputs. + + Data attribute containing the number of arguments the ufunc treats as input. + + Examples + -------- + >>> np.add.nin + 2 + >>> np.multiply.nin + 2 + >>> np.power.nin + 2 + >>> np.exp.nin + 1 + """)) + +add_newdoc('numpy.core', 'ufunc', ('nout', + """ + The number of outputs. + + Data attribute containing the number of arguments the ufunc treats as output. + + Notes + ----- + Since all ufuncs can take output arguments, this will always be (at least) 1. + + Examples + -------- + >>> np.add.nout + 1 + >>> np.multiply.nout + 1 + >>> np.power.nout + 1 + >>> np.exp.nout + 1 + + """)) + +add_newdoc('numpy.core', 'ufunc', ('ntypes', + """ + The number of types. + + The number of numerical NumPy types - of which there are 18 total - on which + the ufunc can operate. + + See Also + -------- + numpy.ufunc.types + + Examples + -------- + >>> np.add.ntypes + 18 + >>> np.multiply.ntypes + 18 + >>> np.power.ntypes + 17 + >>> np.exp.ntypes + 7 + >>> np.remainder.ntypes + 14 + + """)) + +add_newdoc('numpy.core', 'ufunc', ('types', + """ + Returns a list with types grouped input->output. + + Data attribute listing the data-type "Domain-Range" groupings the ufunc can + deliver. The data-types are given using the character codes. + + See Also + -------- + numpy.ufunc.ntypes + + Examples + -------- + >>> np.add.types + ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', + 'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', + 'GG->G', 'OO->O'] + + >>> np.multiply.types + ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', + 'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', + 'GG->G', 'OO->O'] + + >>> np.power.types + ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L', + 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G', + 'OO->O'] + + >>> np.exp.types + ['f->f', 'd->d', 'g->g', 'F->F', 'D->D', 'G->G', 'O->O'] + + >>> np.remainder.types + ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L', + 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'OO->O'] + + """)) + +add_newdoc('numpy.core', 'ufunc', ('signature', + """ + Definition of the core elements a generalized ufunc operates on. + + The signature determines how the dimensions of each input/output array + are split into core and loop dimensions: + + 1. Each dimension in the signature is matched to a dimension of the + corresponding passed-in array, starting from the end of the shape tuple. + 2. Core dimensions assigned to the same label in the signature must have + exactly matching sizes, no broadcasting is performed. + 3. The core dimensions are removed from all inputs and the remaining + dimensions are broadcast together, defining the loop dimensions. + + Notes + ----- + Generalized ufuncs are used internally in many linalg functions, and in + the testing suite; the examples below are taken from these. + For ufuncs that operate on scalars, the signature is `None`, which is + equivalent to '()' for every argument. + + Examples + -------- + >>> np.core.umath_tests.matrix_multiply.signature + '(m,n),(n,p)->(m,p)' + >>> np.linalg._umath_linalg.det.signature + '(m,m)->()' + >>> np.add.signature is None + True # equivalent to '(),()->()' + """)) + +############################################################################## +# +# ufunc methods +# +############################################################################## + +add_newdoc('numpy.core', 'ufunc', ('reduce', + """ + reduce(a, axis=0, dtype=None, out=None, keepdims=False, initial) + + Reduces `a`'s dimension by one, by applying ufunc along one axis. + + Let :math:`a.shape = (N_0, ..., N_i, ..., N_{M-1})`. Then + :math:`ufunc.reduce(a, axis=i)[k_0, ..,k_{i-1}, k_{i+1}, .., k_{M-1}]` = + the result of iterating `j` over :math:`range(N_i)`, cumulatively applying + ufunc to each :math:`a[k_0, ..,k_{i-1}, j, k_{i+1}, .., k_{M-1}]`. + For a one-dimensional array, reduce produces results equivalent to: + :: + + r = op.identity # op = ufunc + for i in range(len(A)): + r = op(r, A[i]) + return r + + For example, add.reduce() is equivalent to sum(). + + Parameters + ---------- + a : array_like + The array to act on. + axis : None or int or tuple of ints, optional + Axis or axes along which a reduction is performed. + The default (`axis` = 0) is perform a reduction over the first + dimension of the input array. `axis` may be negative, in + which case it counts from the last to the first axis. + + .. versionadded:: 1.7.0 + + If this is `None`, a reduction is performed over all the axes. + If this is a tuple of ints, a reduction is performed on multiple + axes, instead of a single axis or all the axes as before. + + For operations which are either not commutative or not associative, + doing a reduction over multiple axes is not well-defined. The + ufuncs do not currently raise an exception in this case, but will + likely do so in the future. + dtype : data-type code, optional + The type used to represent the intermediate results. Defaults + to the data-type of the output array if this is provided, or + the data-type of the input array if no output array is provided. + out : ndarray, None, or tuple of ndarray and None, optional + A location into which the result is stored. If not provided or `None`, + a freshly-allocated array is returned. For consistency with + :ref:`ufunc.__call__`, if given as a keyword, this may be wrapped in a + 1-element tuple. + + .. versionchanged:: 1.13.0 + Tuples are allowed for keyword argument. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `arr`. + + .. versionadded:: 1.7.0 + initial : scalar, optional + The value with which to start the reduction. + If the ufunc has no identity or the dtype is object, this defaults + to None - otherwise it defaults to ufunc.identity. + If ``None`` is given, the first element of the reduction is used, + and an error is thrown if the reduction is empty. + + .. versionadded:: 1.15.0 + + Returns + ------- + r : ndarray + The reduced array. If `out` was supplied, `r` is a reference to it. + + Examples + -------- + >>> np.multiply.reduce([2,3,5]) + 30 + + A multi-dimensional array example: + + >>> X = np.arange(8).reshape((2,2,2)) + >>> X + array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + >>> np.add.reduce(X, 0) + array([[ 4, 6], + [ 8, 10]]) + >>> np.add.reduce(X) # confirm: default axis value is 0 + array([[ 4, 6], + [ 8, 10]]) + >>> np.add.reduce(X, 1) + array([[ 2, 4], + [10, 12]]) + >>> np.add.reduce(X, 2) + array([[ 1, 5], + [ 9, 13]]) + + You can use the ``initial`` keyword argument to initialize the reduction with a + different value. + + >>> np.add.reduce([10], initial=5) + 15 + >>> np.add.reduce(np.ones((2, 2, 2)), axis=(0, 2), initializer=10) + array([14., 14.]) + + Allows reductions of empty arrays where they would normally fail, i.e. + for ufuncs without an identity. + + >>> np.minimum.reduce([], initial=np.inf) + inf + >>> np.minimum.reduce([]) + Traceback (most recent call last): + ... + ValueError: zero-size array to reduction operation minimum which has no identity + """)) + +add_newdoc('numpy.core', 'ufunc', ('accumulate', + """ + accumulate(array, axis=0, dtype=None, out=None) + + Accumulate the result of applying the operator to all elements. + + For a one-dimensional array, accumulate produces results equivalent to:: + + r = np.empty(len(A)) + t = op.identity # op = the ufunc being applied to A's elements + for i in range(len(A)): + t = op(t, A[i]) + r[i] = t + return r + + For example, add.accumulate() is equivalent to np.cumsum(). + + For a multi-dimensional array, accumulate is applied along only one + axis (axis zero by default; see Examples below) so repeated use is + necessary if one wants to accumulate over multiple axes. + + Parameters + ---------- + array : array_like + The array to act on. + axis : int, optional + The axis along which to apply the accumulation; default is zero. + dtype : data-type code, optional + The data-type used to represent the intermediate results. Defaults + to the data-type of the output array if such is provided, or the + the data-type of the input array if no output array is provided. + out : ndarray, None, or tuple of ndarray and None, optional + A location into which the result is stored. If not provided or `None`, + a freshly-allocated array is returned. For consistency with + :ref:`ufunc.__call__`, if given as a keyword, this may be wrapped in a + 1-element tuple. + + .. versionchanged:: 1.13.0 + Tuples are allowed for keyword argument. + + Returns + ------- + r : ndarray + The accumulated values. If `out` was supplied, `r` is a reference to + `out`. + + Examples + -------- + 1-D array examples: + + >>> np.add.accumulate([2, 3, 5]) + array([ 2, 5, 10]) + >>> np.multiply.accumulate([2, 3, 5]) + array([ 2, 6, 30]) + + 2-D array examples: + + >>> I = np.eye(2) + >>> I + array([[ 1., 0.], + [ 0., 1.]]) + + Accumulate along axis 0 (rows), down columns: + + >>> np.add.accumulate(I, 0) + array([[ 1., 0.], + [ 1., 1.]]) + >>> np.add.accumulate(I) # no axis specified = axis zero + array([[ 1., 0.], + [ 1., 1.]]) + + Accumulate along axis 1 (columns), through rows: + + >>> np.add.accumulate(I, 1) + array([[ 1., 1.], + [ 0., 1.]]) + + """)) + +add_newdoc('numpy.core', 'ufunc', ('reduceat', + """ + reduceat(a, indices, axis=0, dtype=None, out=None) + + Performs a (local) reduce with specified slices over a single axis. + + For i in ``range(len(indices))``, `reduceat` computes + ``ufunc.reduce(a[indices[i]:indices[i+1]])``, which becomes the i-th + generalized "row" parallel to `axis` in the final result (i.e., in a + 2-D array, for example, if `axis = 0`, it becomes the i-th row, but if + `axis = 1`, it becomes the i-th column). There are three exceptions to this: + + * when ``i = len(indices) - 1`` (so for the last index), + ``indices[i+1] = a.shape[axis]``. + * if ``indices[i] >= indices[i + 1]``, the i-th generalized "row" is + simply ``a[indices[i]]``. + * if ``indices[i] >= len(a)`` or ``indices[i] < 0``, an error is raised. + + The shape of the output depends on the size of `indices`, and may be + larger than `a` (this happens if ``len(indices) > a.shape[axis]``). + + Parameters + ---------- + a : array_like + The array to act on. + indices : array_like + Paired indices, comma separated (not colon), specifying slices to + reduce. + axis : int, optional + The axis along which to apply the reduceat. + dtype : data-type code, optional + The type used to represent the intermediate results. Defaults + to the data type of the output array if this is provided, or + the data type of the input array if no output array is provided. + out : ndarray, None, or tuple of ndarray and None, optional + A location into which the result is stored. If not provided or `None`, + a freshly-allocated array is returned. For consistency with + :ref:`ufunc.__call__`, if given as a keyword, this may be wrapped in a + 1-element tuple. + + .. versionchanged:: 1.13.0 + Tuples are allowed for keyword argument. + + Returns + ------- + r : ndarray + The reduced values. If `out` was supplied, `r` is a reference to + `out`. + + Notes + ----- + A descriptive example: + + If `a` is 1-D, the function `ufunc.accumulate(a)` is the same as + ``ufunc.reduceat(a, indices)[::2]`` where `indices` is + ``range(len(array) - 1)`` with a zero placed + in every other element: + ``indices = zeros(2 * len(a) - 1)``, ``indices[1::2] = range(1, len(a))``. + + Don't be fooled by this attribute's name: `reduceat(a)` is not + necessarily smaller than `a`. + + Examples + -------- + To take the running sum of four successive values: + + >>> np.add.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2] + array([ 6, 10, 14, 18]) + + A 2-D example: + + >>> x = np.linspace(0, 15, 16).reshape(4,4) + >>> x + array([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [ 12., 13., 14., 15.]]) + + :: + + # reduce such that the result has the following five rows: + # [row1 + row2 + row3] + # [row4] + # [row2] + # [row3] + # [row1 + row2 + row3 + row4] + + >>> np.add.reduceat(x, [0, 3, 1, 2, 0]) + array([[ 12., 15., 18., 21.], + [ 12., 13., 14., 15.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [ 24., 28., 32., 36.]]) + + :: + + # reduce such that result has the following two columns: + # [col1 * col2 * col3, col4] + + >>> np.multiply.reduceat(x, [0, 3], 1) + array([[ 0., 3.], + [ 120., 7.], + [ 720., 11.], + [ 2184., 15.]]) + + """)) + +add_newdoc('numpy.core', 'ufunc', ('outer', + """ + outer(A, B, **kwargs) + + Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`. + + Let ``M = A.ndim``, ``N = B.ndim``. Then the result, `C`, of + ``op.outer(A, B)`` is an array of dimension M + N such that: + + .. math:: C[i_0, ..., i_{M-1}, j_0, ..., j_{N-1}] = + op(A[i_0, ..., i_{M-1}], B[j_0, ..., j_{N-1}]) + + For `A` and `B` one-dimensional, this is equivalent to:: + + r = empty(len(A),len(B)) + for i in range(len(A)): + for j in range(len(B)): + r[i,j] = op(A[i], B[j]) # op = ufunc in question + + Parameters + ---------- + A : array_like + First array + B : array_like + Second array + kwargs : any + Arguments to pass on to the ufunc. Typically `dtype` or `out`. + + Returns + ------- + r : ndarray + Output array + + See Also + -------- + numpy.outer + + Examples + -------- + >>> np.multiply.outer([1, 2, 3], [4, 5, 6]) + array([[ 4, 5, 6], + [ 8, 10, 12], + [12, 15, 18]]) + + A multi-dimensional example: + + >>> A = np.array([[1, 2, 3], [4, 5, 6]]) + >>> A.shape + (2, 3) + >>> B = np.array([[1, 2, 3, 4]]) + >>> B.shape + (1, 4) + >>> C = np.multiply.outer(A, B) + >>> C.shape; C + (2, 3, 1, 4) + array([[[[ 1, 2, 3, 4]], + [[ 2, 4, 6, 8]], + [[ 3, 6, 9, 12]]], + [[[ 4, 8, 12, 16]], + [[ 5, 10, 15, 20]], + [[ 6, 12, 18, 24]]]]) + + """)) + +add_newdoc('numpy.core', 'ufunc', ('at', + """ + at(a, indices, b=None) + + Performs unbuffered in place operation on operand 'a' for elements + specified by 'indices'. For addition ufunc, this method is equivalent to + ``a[indices] += b``, except that results are accumulated for elements that + are indexed more than once. For example, ``a[[0,0]] += 1`` will only + increment the first element once because of buffering, whereas + ``add.at(a, [0,0], 1)`` will increment the first element twice. + + .. versionadded:: 1.8.0 + + Parameters + ---------- + a : array_like + The array to perform in place operation on. + indices : array_like or tuple + Array like index object or slice object for indexing into first + operand. If first operand has multiple dimensions, indices can be a + tuple of array like index objects or slice objects. + b : array_like + Second operand for ufuncs requiring two operands. Operand must be + broadcastable over first operand after indexing or slicing. + + Examples + -------- + Set items 0 and 1 to their negative values: + + >>> a = np.array([1, 2, 3, 4]) + >>> np.negative.at(a, [0, 1]) + >>> print(a) + array([-1, -2, 3, 4]) + + Increment items 0 and 1, and increment item 2 twice: + + >>> a = np.array([1, 2, 3, 4]) + >>> np.add.at(a, [0, 1, 2, 2], 1) + >>> print(a) + array([2, 3, 5, 4]) + + Add items 0 and 1 in first array to second array, + and store results in first array: + + >>> a = np.array([1, 2, 3, 4]) + >>> b = np.array([1, 2]) + >>> np.add.at(a, [0, 1], b) + >>> print(a) + array([2, 4, 3, 4]) + + """)) + +############################################################################## +# +# Documentation for dtype attributes and methods +# +############################################################################## + +############################################################################## +# +# dtype object +# +############################################################################## + +add_newdoc('numpy.core.multiarray', 'dtype', + """ + dtype(obj, align=False, copy=False) + + Create a data type object. + + A numpy array is homogeneous, and contains elements described by a + dtype object. A dtype object can be constructed from different + combinations of fundamental numeric types. + + Parameters + ---------- + obj + Object to be converted to a data type object. + align : bool, optional + Add padding to the fields to match what a C compiler would output + for a similar C-struct. Can be ``True`` only if `obj` is a dictionary + or a comma-separated string. If a struct dtype is being created, + this also sets a sticky alignment flag ``isalignedstruct``. + copy : bool, optional + Make a new copy of the data-type object. If ``False``, the result + may just be a reference to a built-in data-type object. + + See also + -------- + result_type + + Examples + -------- + Using array-scalar type: + + >>> np.dtype(np.int16) + dtype('int16') + + Structured type, one field name 'f1', containing int16: + + >>> np.dtype([('f1', np.int16)]) + dtype([('f1', '>> np.dtype([('f1', [('f1', np.int16)])]) + dtype([('f1', [('f1', '>> np.dtype([('f1', np.uint), ('f2', np.int32)]) + dtype([('f1', '>> np.dtype([('a','f8'),('b','S10')]) + dtype([('a', '>> np.dtype("i4, (2,3)f8") + dtype([('f0', '>> np.dtype([('hello',(int,3)),('world',np.void,10)]) + dtype([('hello', '>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)})) + dtype(('>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]}) + dtype([('gender', '|S1'), ('age', '|u1')]) + + Offsets in bytes, here 0 and 25: + + >>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)}) + dtype([('surname', '|S25'), ('age', '|u1')]) + + """) + +############################################################################## +# +# dtype attributes +# +############################################################################## + +add_newdoc('numpy.core.multiarray', 'dtype', ('alignment', + """ + The required alignment (bytes) of this data-type according to the compiler. + + More information is available in the C-API section of the manual. + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('byteorder', + """ + A character indicating the byte-order of this data-type object. + + One of: + + === ============== + '=' native + '<' little-endian + '>' big-endian + '|' not applicable + === ============== + + All built-in data-type objects have byteorder either '=' or '|'. + + Examples + -------- + + >>> dt = np.dtype('i2') + >>> dt.byteorder + '=' + >>> # endian is not relevant for 8 bit numbers + >>> np.dtype('i1').byteorder + '|' + >>> # or ASCII strings + >>> np.dtype('S2').byteorder + '|' + >>> # Even if specific code is given, and it is native + >>> # '=' is the byteorder + >>> import sys + >>> sys_is_le = sys.byteorder == 'little' + >>> native_code = sys_is_le and '<' or '>' + >>> swapped_code = sys_is_le and '>' or '<' + >>> dt = np.dtype(native_code + 'i2') + >>> dt.byteorder + '=' + >>> # Swapped code shows up as itself + >>> dt = np.dtype(swapped_code + 'i2') + >>> dt.byteorder == swapped_code + True + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('char', + """A unique character code for each of the 21 different built-in types.""")) + +add_newdoc('numpy.core.multiarray', 'dtype', ('descr', + """ + `__array_interface__` description of the data-type. + + The format is that required by the 'descr' key in the + `__array_interface__` attribute. + + Warning: This attribute exists specifically for `__array_interface__`, + and is not a datatype description compatible with `np.dtype`. + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('fields', + """ + Dictionary of named fields defined for this data type, or ``None``. + + The dictionary is indexed by keys that are the names of the fields. + Each entry in the dictionary is a tuple fully describing the field:: + + (dtype, offset[, title]) + + Offset is limited to C int, which is signed and usually 32 bits. + If present, the optional title can be any object (if it is a string + or unicode then it will also be a key in the fields dictionary, + otherwise it's meta-data). Notice also that the first two elements + of the tuple can be passed directly as arguments to the ``ndarray.getfield`` + and ``ndarray.setfield`` methods. + + See Also + -------- + ndarray.getfield, ndarray.setfield + + Examples + -------- + >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) + >>> print(dt.fields) + {'grades': (dtype(('float64',(2,))), 16), 'name': (dtype('|S16'), 0)} + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('flags', + """ + Bit-flags describing how this data type is to be interpreted. + + Bit-masks are in `numpy.core.multiarray` as the constants + `ITEM_HASOBJECT`, `LIST_PICKLE`, `ITEM_IS_POINTER`, `NEEDS_INIT`, + `NEEDS_PYAPI`, `USE_GETITEM`, `USE_SETITEM`. A full explanation + of these flags is in C-API documentation; they are largely useful + for user-defined data-types. + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('hasobject', + """ + Boolean indicating whether this dtype contains any reference-counted + objects in any fields or sub-dtypes. + + Recall that what is actually in the ndarray memory representing + the Python object is the memory address of that object (a pointer). + Special handling may be required, and this attribute is useful for + distinguishing data types that may contain arbitrary Python objects + and data-types that won't. + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('isbuiltin', + """ + Integer indicating how this dtype relates to the built-in dtypes. + + Read-only. + + = ======================================================================== + 0 if this is a structured array type, with fields + 1 if this is a dtype compiled into numpy (such as ints, floats etc) + 2 if the dtype is for a user-defined numpy type + A user-defined type uses the numpy C-API machinery to extend + numpy to handle a new array type. See + :ref:`user.user-defined-data-types` in the NumPy manual. + = ======================================================================== + + Examples + -------- + >>> dt = np.dtype('i2') + >>> dt.isbuiltin + 1 + >>> dt = np.dtype('f8') + >>> dt.isbuiltin + 1 + >>> dt = np.dtype([('field1', 'f8')]) + >>> dt.isbuiltin + 0 + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('isnative', + """ + Boolean indicating whether the byte order of this dtype is native + to the platform. + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('isalignedstruct', + """ + Boolean indicating whether the dtype is a struct which maintains + field alignment. This flag is sticky, so when combining multiple + structs together, it is preserved and produces new dtypes which + are also aligned. + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('itemsize', + """ + The element size of this data-type object. + + For 18 of the 21 types this number is fixed by the data-type. + For the flexible data-types, this number can be anything. + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('kind', + """ + A character code (one of 'biufcmMOSUV') identifying the general kind of data. + + = ====================== + b boolean + i signed integer + u unsigned integer + f floating-point + c complex floating-point + m timedelta + M datetime + O object + S (byte-)string + U Unicode + V void + = ====================== + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('name', + """ + A bit-width name for this data-type. + + Un-sized flexible data-type objects do not have this attribute. + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('names', + """ + Ordered list of field names, or ``None`` if there are no fields. + + The names are ordered according to increasing byte offset. This can be + used, for example, to walk through all of the named fields in offset order. + + Examples + -------- + >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) + >>> dt.names + ('name', 'grades') + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('num', + """ + A unique number for each of the 21 different built-in types. + + These are roughly ordered from least-to-most precision. + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('shape', + """ + Shape tuple of the sub-array if this data type describes a sub-array, + and ``()`` otherwise. + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('ndim', + """ + Number of dimensions of the sub-array if this data type describes a + sub-array, and ``0`` otherwise. + + .. versionadded:: 1.13.0 + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('str', + """The array-protocol typestring of this data-type object.""")) + +add_newdoc('numpy.core.multiarray', 'dtype', ('subdtype', + """ + Tuple ``(item_dtype, shape)`` if this `dtype` describes a sub-array, and + None otherwise. + + The *shape* is the fixed shape of the sub-array described by this + data type, and *item_dtype* the data type of the array. + + If a field whose dtype object has this attribute is retrieved, + then the extra dimensions implied by *shape* are tacked on to + the end of the retrieved array. + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('type', + """The type object used to instantiate a scalar of this data-type.""")) + +############################################################################## +# +# dtype methods +# +############################################################################## + +add_newdoc('numpy.core.multiarray', 'dtype', ('newbyteorder', + """ + newbyteorder(new_order='S') + + Return a new dtype with a different byte order. + + Changes are also made in all fields and sub-arrays of the data type. + + Parameters + ---------- + new_order : string, optional + Byte order to force; a value from the byte order specifications + below. The default value ('S') results in swapping the current + byte order. `new_order` codes can be any of: + + * 'S' - swap dtype from current to opposite endian + * {'<', 'L'} - little endian + * {'>', 'B'} - big endian + * {'=', 'N'} - native order + * {'|', 'I'} - ignore (no change to byte order) + + The code does a case-insensitive check on the first letter of + `new_order` for these alternatives. For example, any of '>' + or 'B' or 'b' or 'brian' are valid to specify big-endian. + + Returns + ------- + new_dtype : dtype + New dtype object with the given change to the byte order. + + Notes + ----- + Changes are also made in all fields and sub-arrays of the data type. + + Examples + -------- + >>> import sys + >>> sys_is_le = sys.byteorder == 'little' + >>> native_code = sys_is_le and '<' or '>' + >>> swapped_code = sys_is_le and '>' or '<' + >>> native_dt = np.dtype(native_code+'i2') + >>> swapped_dt = np.dtype(swapped_code+'i2') + >>> native_dt.newbyteorder('S') == swapped_dt + True + >>> native_dt.newbyteorder() == swapped_dt + True + >>> native_dt == swapped_dt.newbyteorder('S') + True + >>> native_dt == swapped_dt.newbyteorder('=') + True + >>> native_dt == swapped_dt.newbyteorder('N') + True + >>> native_dt == native_dt.newbyteorder('|') + True + >>> np.dtype('>> np.dtype('>> np.dtype('>i2') == native_dt.newbyteorder('>') + True + >>> np.dtype('>i2') == native_dt.newbyteorder('B') + True + + """)) + + +############################################################################## +# +# Datetime-related Methods +# +############################################################################## + +add_newdoc('numpy.core.multiarray', 'busdaycalendar', + """ + busdaycalendar(weekmask='1111100', holidays=None) + + A business day calendar object that efficiently stores information + defining valid days for the busday family of functions. + + The default valid days are Monday through Friday ("business days"). + A busdaycalendar object can be specified with any set of weekly + valid days, plus an optional "holiday" dates that always will be invalid. + + Once a busdaycalendar object is created, the weekmask and holidays + cannot be modified. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + weekmask : str or array_like of bool, optional + A seven-element array indicating which of Monday through Sunday are + valid days. May be specified as a length-seven list or array, like + [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string + like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for + weekdays, optionally separated by white space. Valid abbreviations + are: Mon Tue Wed Thu Fri Sat Sun + holidays : array_like of datetime64[D], optional + An array of dates to consider as invalid dates, no matter which + weekday they fall upon. Holiday dates may be specified in any + order, and NaT (not-a-time) dates are ignored. This list is + saved in a normalized form that is suited for fast calculations + of valid days. + + Returns + ------- + out : busdaycalendar + A business day calendar object containing the specified + weekmask and holidays values. + + See Also + -------- + is_busday : Returns a boolean array indicating valid days. + busday_offset : Applies an offset counted in valid days. + busday_count : Counts how many valid days are in a half-open date range. + + Attributes + ---------- + Note: once a busdaycalendar object is created, you cannot modify the + weekmask or holidays. The attributes return copies of internal data. + weekmask : (copy) seven-element array of bool + holidays : (copy) sorted array of datetime64[D] + + Examples + -------- + >>> # Some important days in July + ... bdd = np.busdaycalendar( + ... holidays=['2011-07-01', '2011-07-04', '2011-07-17']) + >>> # Default is Monday to Friday weekdays + ... bdd.weekmask + array([ True, True, True, True, True, False, False], dtype='bool') + >>> # Any holidays already on the weekend are removed + ... bdd.holidays + array(['2011-07-01', '2011-07-04'], dtype='datetime64[D]') + """) + +add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('weekmask', + """A copy of the seven-element boolean mask indicating valid days.""")) + +add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('holidays', + """A copy of the holiday array indicating additional invalid days.""")) + +add_newdoc('numpy.core.multiarray', 'normalize_axis_index', + """ + normalize_axis_index(axis, ndim, msg_prefix=None) + + Normalizes an axis index, `axis`, such that is a valid positive index into + the shape of array with `ndim` dimensions. Raises an AxisError with an + appropriate message if this is not possible. + + Used internally by all axis-checking logic. + + .. versionadded:: 1.13.0 + + Parameters + ---------- + axis : int + The un-normalized index of the axis. Can be negative + ndim : int + The number of dimensions of the array that `axis` should be normalized + against + msg_prefix : str + A prefix to put before the message, typically the name of the argument + + Returns + ------- + normalized_axis : int + The normalized axis index, such that `0 <= normalized_axis < ndim` + + Raises + ------ + AxisError + If the axis index is invalid, when `-ndim <= axis < ndim` is false. + + Examples + -------- + >>> normalize_axis_index(0, ndim=3) + 0 + >>> normalize_axis_index(1, ndim=3) + 1 + >>> normalize_axis_index(-1, ndim=3) + 2 + + >>> normalize_axis_index(3, ndim=3) + Traceback (most recent call last): + ... + AxisError: axis 3 is out of bounds for array of dimension 3 + >>> normalize_axis_index(-4, ndim=3, msg_prefix='axes_arg') + Traceback (most recent call last): + ... + AxisError: axes_arg: axis -4 is out of bounds for array of dimension 3 + """) + +add_newdoc('numpy.core.multiarray', 'datetime_data', + """ + datetime_data(dtype, /) + + Get information about the step size of a date or time type. + + The returned tuple can be passed as the second argument of `numpy.datetime64` and + `numpy.timedelta64`. + + Parameters + ---------- + dtype : dtype + The dtype object, which must be a `datetime64` or `timedelta64` type. + + Returns + ------- + unit : str + The :ref:`datetime unit ` on which this dtype + is based. + count : int + The number of base units in a step. + + Examples + -------- + >>> dt_25s = np.dtype('timedelta64[25s]') + >>> np.datetime_data(dt_25s) + ('s', 25) + >>> np.array(10, dt_25s).astype('timedelta64[s]') + array(250, dtype='timedelta64[s]') + + The result can be used to construct a datetime that uses the same units + as a timedelta + + >>> np.datetime64('2010', np.datetime_data(dt_25s)) + numpy.datetime64('2010-01-01T00:00:00', '25s') + """) + + +############################################################################## +# +# Documentation for `generic` attributes and methods +# +############################################################################## + +add_newdoc('numpy.core.numerictypes', 'generic', + """ + Base class for numpy scalar types. + + Class from which most (all?) numpy scalar types are derived. For + consistency, exposes the same API as `ndarray`, despite many + consequent attributes being either "get-only," or completely irrelevant. + This is the class from which it is strongly suggested users should derive + custom scalar types. + + """) + +# Attributes + +add_newdoc('numpy.core.numerictypes', 'generic', ('T', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class so as to + provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('base', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class so as to + a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('data', + """Pointer to start of data.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('dtype', + """Get array data-descriptor.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('flags', + """The integer value of flags.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('flat', + """A 1-D view of the scalar.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('imag', + """The imaginary part of the scalar.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('itemsize', + """The length of one element in bytes.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('nbytes', + """The length of the scalar in bytes.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('ndim', + """The number of array dimensions.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('real', + """The real part of the scalar.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('shape', + """Tuple of array dimensions.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('size', + """The number of elements in the gentype.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('strides', + """Tuple of bytes steps in each dimension.""")) + +# Methods + +add_newdoc('numpy.core.numerictypes', 'generic', ('all', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('any', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('argmax', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('argmin', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('argsort', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('astype', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('byteswap', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class so as to + provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('choose', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('clip', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('compress', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('conjugate', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('copy', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('cumprod', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('cumsum', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('diagonal', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('dump', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('dumps', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('fill', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('flatten', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('getfield', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('item', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('itemset', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('max', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('mean', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('min', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('newbyteorder', + """ + newbyteorder(new_order='S') + + Return a new `dtype` with a different byte order. + + Changes are also made in all fields and sub-arrays of the data type. + + The `new_order` code can be any from the following: + + * 'S' - swap dtype from current to opposite endian + * {'<', 'L'} - little endian + * {'>', 'B'} - big endian + * {'=', 'N'} - native order + * {'|', 'I'} - ignore (no change to byte order) + + Parameters + ---------- + new_order : str, optional + Byte order to force; a value from the byte order specifications + above. The default value ('S') results in swapping the current + byte order. The code does a case-insensitive check on the first + letter of `new_order` for the alternatives above. For example, + any of 'B' or 'b' or 'biggish' are valid to specify big-endian. + + + Returns + ------- + new_dtype : dtype + New `dtype` object with the given change to the byte order. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('nonzero', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('prod', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('ptp', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('put', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('ravel', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('repeat', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('reshape', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('resize', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('round', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('searchsorted', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('setfield', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('setflags', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class so as to + provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('sort', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('squeeze', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('std', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('sum', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('swapaxes', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('take', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('tofile', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('tolist', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('tostring', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('trace', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('transpose', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('var', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('view', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + + +############################################################################## +# +# Documentation for scalar type abstract base classes in type hierarchy +# +############################################################################## + + +add_newdoc('numpy.core.numerictypes', 'number', + """ + Abstract base class of all numeric scalar types. + + """) + +add_newdoc('numpy.core.numerictypes', 'integer', + """ + Abstract base class of all integer scalar types. + + """) + +add_newdoc('numpy.core.numerictypes', 'signedinteger', + """ + Abstract base class of all signed integer scalar types. + + """) + +add_newdoc('numpy.core.numerictypes', 'unsignedinteger', + """ + Abstract base class of all unsigned integer scalar types. + + """) + +add_newdoc('numpy.core.numerictypes', 'inexact', + """ + Abstract base class of all numeric scalar types with a (potentially) + inexact representation of the values in its range, such as + floating-point numbers. + + """) + +add_newdoc('numpy.core.numerictypes', 'floating', + """ + Abstract base class of all floating-point scalar types. + + """) + +add_newdoc('numpy.core.numerictypes', 'complexfloating', + """ + Abstract base class of all complex number scalar types that are made up of + floating-point numbers. + + """) + +add_newdoc('numpy.core.numerictypes', 'flexible', + """ + Abstract base class of all scalar types without predefined length. + The actual size of these types depends on the specific `np.dtype` + instantiation. + + """) + +add_newdoc('numpy.core.numerictypes', 'character', + """ + Abstract base class of all character string scalar types. + + """) + + +############################################################################## +# +# Documentation for concrete scalar classes +# +############################################################################## + +def numeric_type_aliases(aliases): + def type_aliases_gen(): + for alias, doc in aliases: + try: + alias_type = getattr(_numerictypes, alias) + except AttributeError: + # The set of aliases that actually exist varies between platforms + pass + else: + yield (alias_type, alias, doc) + return list(type_aliases_gen()) + + +possible_aliases = numeric_type_aliases([ + ('int8', '8-bit signed integer (-128 to 127)'), + ('int16', '16-bit signed integer (-32768 to 32767)'), + ('int32', '32-bit signed integer (-2147483648 to 2147483647)'), + ('int64', '64-bit signed integer (-9223372036854775808 to 9223372036854775807)'), + ('intp', 'Signed integer large enough to fit pointer, compatible with C ``intptr_t``'), + ('uint8', '8-bit unsigned integer (0 to 255)'), + ('uint16', '16-bit unsigned integer (0 to 65535)'), + ('uint32', '32-bit unsigned integer (0 to 4294967295)'), + ('uint64', '64-bit unsigned integer (0 to 18446744073709551615)'), + ('uintp', 'Unsigned integer large enough to fit pointer, compatible with C ``uintptr_t``'), + ('float16', '16-bit-precision floating-point number type: sign bit, 5 bits exponent, 10 bits mantissa'), + ('float32', '32-bit-precision floating-point number type: sign bit, 8 bits exponent, 23 bits mantissa'), + ('float64', '64-bit precision floating-point number type: sign bit, 11 bits exponent, 52 bits mantissa'), + ('float96', '96-bit extended-precision floating-point number type'), + ('float128', '128-bit extended-precision floating-point number type'), + ('complex64', 'Complex number type composed of 2 32-bit-precision floating-point numbers'), + ('complex128', 'Complex number type composed of 2 64-bit-precision floating-point numbers'), + ('complex192', 'Complex number type composed of 2 96-bit extended-precision floating-point numbers'), + ('complex256', 'Complex number type composed of 2 128-bit extended-precision floating-point numbers'), + ]) + + +def add_newdoc_for_scalar_type(obj, fixed_aliases, doc): + o = getattr(_numerictypes, obj) + + character_code = dtype(o).char + canonical_name_doc = "" if obj == o.__name__ else "Canonical name: ``np.{}``.\n ".format(obj) + alias_doc = ''.join("Alias: ``np.{}``.\n ".format(alias) for alias in fixed_aliases) + alias_doc += ''.join("Alias *on this platform*: ``np.{}``: {}.\n ".format(alias, doc) + for (alias_type, alias, doc) in possible_aliases if alias_type is o) + + docstring = """ + {doc} + Character code: ``'{character_code}'``. + {canonical_name_doc}{alias_doc} + """.format(doc=doc.strip(), character_code=character_code, + canonical_name_doc=canonical_name_doc, alias_doc=alias_doc) + + add_newdoc('numpy.core.numerictypes', obj, docstring) + + +add_newdoc_for_scalar_type('bool_', ['bool8'], + """ + Boolean type (True or False), stored as a byte. + """) + +add_newdoc_for_scalar_type('byte', [], + """ + Signed integer type, compatible with C ``char``. + """) + +add_newdoc_for_scalar_type('short', [], + """ + Signed integer type, compatible with C ``short``. + """) + +add_newdoc_for_scalar_type('intc', [], + """ + Signed integer type, compatible with C ``int``. + """) + +add_newdoc_for_scalar_type('int_', [], + """ + Signed integer type, compatible with Python `int` anc C ``long``. + """) + +add_newdoc_for_scalar_type('longlong', [], + """ + Signed integer type, compatible with C ``long long``. + """) + +add_newdoc_for_scalar_type('ubyte', [], + """ + Unsigned integer type, compatible with C ``unsigned char``. + """) + +add_newdoc_for_scalar_type('ushort', [], + """ + Unsigned integer type, compatible with C ``unsigned short``. + """) + +add_newdoc_for_scalar_type('uintc', [], + """ + Unsigned integer type, compatible with C ``unsigned int``. + """) + +add_newdoc_for_scalar_type('uint', [], + """ + Unsigned integer type, compatible with C ``unsigned long``. + """) + +add_newdoc_for_scalar_type('ulonglong', [], + """ + Signed integer type, compatible with C ``unsigned long long``. + """) + +add_newdoc_for_scalar_type('half', [], + """ + Half-precision floating-point number type. + """) + +add_newdoc_for_scalar_type('single', [], + """ + Single-precision floating-point number type, compatible with C ``float``. + """) + +add_newdoc_for_scalar_type('double', ['float_'], + """ + Double-precision floating-point number type, compatible with Python `float` + and C ``double``. + """) + +add_newdoc_for_scalar_type('longdouble', ['longfloat'], + """ + Extended-precision floating-point number type, compatible with C + ``long double`` but not necessarily with IEEE 754 quadruple-precision. + """) + +add_newdoc_for_scalar_type('csingle', ['singlecomplex'], + """ + Complex number type composed of two single-precision floating-point + numbers. + """) + +add_newdoc_for_scalar_type('cdouble', ['cfloat', 'complex_'], + """ + Complex number type composed of two double-precision floating-point + numbers, compatible with Python `complex`. + """) + +add_newdoc_for_scalar_type('clongdouble', ['clongfloat', 'longcomplex'], + """ + Complex number type composed of two extended-precision floating-point + numbers. + """) + +add_newdoc_for_scalar_type('object_', [], + """ + Any Python object. + """) diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_add_newdocs.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/_add_newdocs.pyc new file mode 100644 index 0000000..e6ee6c7 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/_add_newdocs.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_aliased_types.py b/project/venv/lib/python2.7/site-packages/numpy/core/_aliased_types.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_aliased_types.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/_aliased_types.pyc new file mode 100644 index 0000000..6bfc0e6 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/_aliased_types.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_dtype.py b/project/venv/lib/python2.7/site-packages/numpy/core/_dtype.py new file mode 100644 index 0000000..3a12c8f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/_dtype.py @@ -0,0 +1,341 @@ +""" +A place for code to be called from the implementation of np.dtype + +String handling is much easier to do correctly in python. +""" +from __future__ import division, absolute_import, print_function + +import sys + +import numpy as np + + +_kind_to_stem = { + 'u': 'uint', + 'i': 'int', + 'c': 'complex', + 'f': 'float', + 'b': 'bool', + 'V': 'void', + 'O': 'object', + 'M': 'datetime', + 'm': 'timedelta' +} +if sys.version_info[0] >= 3: + _kind_to_stem.update({ + 'S': 'bytes', + 'U': 'str' + }) +else: + _kind_to_stem.update({ + 'S': 'string', + 'U': 'unicode' + }) + + +def _kind_name(dtype): + try: + return _kind_to_stem[dtype.kind] + except KeyError: + raise RuntimeError( + "internal dtype error, unknown kind {!r}" + .format(dtype.kind) + ) + + +def __str__(dtype): + if dtype.fields is not None: + return _struct_str(dtype, include_align=True) + elif dtype.subdtype: + return _subarray_str(dtype) + elif issubclass(dtype.type, np.flexible) or not dtype.isnative: + return dtype.str + else: + return dtype.name + + +def __repr__(dtype): + arg_str = _construction_repr(dtype, include_align=False) + if dtype.isalignedstruct: + arg_str = arg_str + ", align=True" + return "dtype({})".format(arg_str) + + +def _unpack_field(dtype, offset, title=None): + """ + Helper function to normalize the items in dtype.fields. + + Call as: + + dtype, offset, title = _unpack_field(*dtype.fields[name]) + """ + return dtype, offset, title + + +def _isunsized(dtype): + # PyDataType_ISUNSIZED + return dtype.itemsize == 0 + + +def _construction_repr(dtype, include_align=False, short=False): + """ + Creates a string repr of the dtype, excluding the 'dtype()' part + surrounding the object. This object may be a string, a list, or + a dict depending on the nature of the dtype. This + is the object passed as the first parameter to the dtype + constructor, and if no additional constructor parameters are + given, will reproduce the exact memory layout. + + Parameters + ---------- + short : bool + If true, this creates a shorter repr using 'kind' and 'itemsize', instead + of the longer type name. + + include_align : bool + If true, this includes the 'align=True' parameter + inside the struct dtype construction dict when needed. Use this flag + if you want a proper repr string without the 'dtype()' part around it. + + If false, this does not preserve the + 'align=True' parameter or sticky NPY_ALIGNED_STRUCT flag for + struct arrays like the regular repr does, because the 'align' + flag is not part of first dtype constructor parameter. This + mode is intended for a full 'repr', where the 'align=True' is + provided as the second parameter. + """ + if dtype.fields is not None: + return _struct_str(dtype, include_align=include_align) + elif dtype.subdtype: + return _subarray_str(dtype) + else: + return _scalar_str(dtype, short=short) + + +def _scalar_str(dtype, short): + byteorder = _byte_order_str(dtype) + + if dtype.type == np.bool_: + if short: + return "'?'" + else: + return "'bool'" + + elif dtype.type == np.object_: + # The object reference may be different sizes on different + # platforms, so it should never include the itemsize here. + return "'O'" + + elif dtype.type == np.string_: + if _isunsized(dtype): + return "'S'" + else: + return "'S%d'" % dtype.itemsize + + elif dtype.type == np.unicode_: + if _isunsized(dtype): + return "'%sU'" % byteorder + else: + return "'%sU%d'" % (byteorder, dtype.itemsize / 4) + + # unlike the other types, subclasses of void are preserved - but + # historically the repr does not actually reveal the subclass + elif issubclass(dtype.type, np.void): + if _isunsized(dtype): + return "'V'" + else: + return "'V%d'" % dtype.itemsize + + elif dtype.type == np.datetime64: + return "'%sM8%s'" % (byteorder, _datetime_metadata_str(dtype)) + + elif dtype.type == np.timedelta64: + return "'%sm8%s'" % (byteorder, _datetime_metadata_str(dtype)) + + elif np.issubdtype(dtype, np.number): + # Short repr with endianness, like '' """ + # hack to obtain the native and swapped byte order characters + swapped = np.dtype(int).newbyteorder('s') + native = swapped.newbyteorder('s') + + byteorder = dtype.byteorder + if byteorder == '=': + return native.byteorder + if byteorder == 's': + # TODO: this path can never be reached + return swapped.byteorder + elif byteorder == '|': + return '' + else: + return byteorder + + +def _datetime_metadata_str(dtype): + # TODO: this duplicates the C append_metastr_to_string + unit, count = np.datetime_data(dtype) + if unit == 'generic': + return '' + elif count == 1: + return '[{}]'.format(unit) + else: + return '[{}{}]'.format(count, unit) + + +def _struct_dict_str(dtype, includealignedflag): + # unpack the fields dictionary into ls + names = dtype.names + fld_dtypes = [] + offsets = [] + titles = [] + for name in names: + fld_dtype, offset, title = _unpack_field(*dtype.fields[name]) + fld_dtypes.append(fld_dtype) + offsets.append(offset) + titles.append(title) + + # Build up a string to make the dictionary + + # First, the names + ret = "{'names':[" + ret += ",".join(repr(name) for name in names) + + # Second, the formats + ret += "], 'formats':[" + ret += ",".join( + _construction_repr(fld_dtype, short=True) for fld_dtype in fld_dtypes) + + # Third, the offsets + ret += "], 'offsets':[" + ret += ",".join("%d" % offset for offset in offsets) + + # Fourth, the titles + if any(title is not None for title in titles): + ret += "], 'titles':[" + ret += ",".join(repr(title) for title in titles) + + # Fifth, the itemsize + ret += "], 'itemsize':%d" % dtype.itemsize + + if (includealignedflag and dtype.isalignedstruct): + # Finally, the aligned flag + ret += ", 'aligned':True}" + else: + ret += "}" + + return ret + + +def _is_packed(dtype): + """ + Checks whether the structured data type in 'dtype' + has a simple layout, where all the fields are in order, + and follow each other with no alignment padding. + + When this returns true, the dtype can be reconstructed + from a list of the field names and dtypes with no additional + dtype parameters. + + Duplicates the C `is_dtype_struct_simple_unaligned_layout` functio. + """ + total_offset = 0 + for name in dtype.names: + fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name]) + if fld_offset != total_offset: + return False + total_offset += fld_dtype.itemsize + if total_offset != dtype.itemsize: + return False + return True + + +def _struct_list_str(dtype): + items = [] + for name in dtype.names: + fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name]) + + item = "(" + if title is not None: + item += "({!r}, {!r}), ".format(title, name) + else: + item += "{!r}, ".format(name) + # Special case subarray handling here + if fld_dtype.subdtype is not None: + base, shape = fld_dtype.subdtype + item += "{}, {}".format( + _construction_repr(base, short=True), + shape + ) + else: + item += _construction_repr(fld_dtype, short=True) + + item += ")" + items.append(item) + + return "[" + ", ".join(items) + "]" + + +def _struct_str(dtype, include_align): + # The list str representation can't include the 'align=' flag, + # so if it is requested and the struct has the aligned flag set, + # we must use the dict str instead. + if not (include_align and dtype.isalignedstruct) and _is_packed(dtype): + sub = _struct_list_str(dtype) + + else: + sub = _struct_dict_str(dtype, include_align) + + # If the data type isn't the default, void, show it + if dtype.type != np.void: + return "({t.__module__}.{t.__name__}, {f})".format(t=dtype.type, f=sub) + else: + return sub + + +def _subarray_str(dtype): + base, shape = dtype.subdtype + return "({}, {})".format( + _construction_repr(base, short=True), + shape + ) + + +def _name_get(dtype): + # provides dtype.name.__get__ + + if dtype.isbuiltin == 2: + # user dtypes don't promise to do anything special + return dtype.type.__name__ + + # Builtin classes are documented as returning a "bit name" + name = dtype.type.__name__ + + # handle bool_, str_, etc + if name[-1] == '_': + name = name[:-1] + + # append bit counts to str, unicode, and void + if np.issubdtype(dtype, np.flexible) and not _isunsized(dtype): + name += "{}".format(dtype.itemsize * 8) + + # append metadata to datetimes + elif dtype.type in (np.datetime64, np.timedelta64): + name += _datetime_metadata_str(dtype) + + return name diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_dtype.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/_dtype.pyc new file mode 100644 index 0000000..fd10efd Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/_dtype.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_dtype_ctypes.py b/project/venv/lib/python2.7/site-packages/numpy/core/_dtype_ctypes.py new file mode 100644 index 0000000..0852b1e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/_dtype_ctypes.py @@ -0,0 +1,113 @@ +""" +Conversion from ctypes to dtype. + +In an ideal world, we could acheive this through the PEP3118 buffer protocol, +something like:: + + def dtype_from_ctypes_type(t): + # needed to ensure that the shape of `t` is within memoryview.format + class DummyStruct(ctypes.Structure): + _fields_ = [('a', t)] + + # empty to avoid memory allocation + ctype_0 = (DummyStruct * 0)() + mv = memoryview(ctype_0) + + # convert the struct, and slice back out the field + return _dtype_from_pep3118(mv.format)['a'] + +Unfortunately, this fails because: + +* ctypes cannot handle length-0 arrays with PEP3118 (bpo-32782) +* PEP3118 cannot represent unions, but both numpy and ctypes can +* ctypes cannot handle big-endian structs with PEP3118 (bpo-32780) +""" +import _ctypes +import ctypes + +import numpy as np + + +def _from_ctypes_array(t): + return np.dtype((dtype_from_ctypes_type(t._type_), (t._length_,))) + + +def _from_ctypes_structure(t): + for item in t._fields_: + if len(item) > 2: + raise TypeError( + "ctypes bitfields have no dtype equivalent") + + if hasattr(t, "_pack_"): + formats = [] + offsets = [] + names = [] + current_offset = 0 + for fname, ftyp in t._fields_: + names.append(fname) + formats.append(dtype_from_ctypes_type(ftyp)) + # Each type has a default offset, this is platform dependent for some types. + effective_pack = min(t._pack_, ctypes.alignment(ftyp)) + current_offset = ((current_offset + effective_pack - 1) // effective_pack) * effective_pack + offsets.append(current_offset) + current_offset += ctypes.sizeof(ftyp) + + return np.dtype(dict( + formats=formats, + offsets=offsets, + names=names, + itemsize=ctypes.sizeof(t))) + else: + fields = [] + for fname, ftyp in t._fields_: + fields.append((fname, dtype_from_ctypes_type(ftyp))) + + # by default, ctypes structs are aligned + return np.dtype(fields, align=True) + + +def _from_ctypes_scalar(t): + """ + Return the dtype type with endianness included if it's the case + """ + if getattr(t, '__ctype_be__', None) is t: + return np.dtype('>' + t._type_) + elif getattr(t, '__ctype_le__', None) is t: + return np.dtype('<' + t._type_) + else: + return np.dtype(t._type_) + + +def _from_ctypes_union(t): + formats = [] + offsets = [] + names = [] + for fname, ftyp in t._fields_: + names.append(fname) + formats.append(dtype_from_ctypes_type(ftyp)) + offsets.append(0) # Union fields are offset to 0 + + return np.dtype(dict( + formats=formats, + offsets=offsets, + names=names, + itemsize=ctypes.sizeof(t))) + + +def dtype_from_ctypes_type(t): + """ + Construct a dtype object from a ctypes type + """ + if issubclass(t, _ctypes.Array): + return _from_ctypes_array(t) + elif issubclass(t, _ctypes._Pointer): + raise TypeError("ctypes pointers have no dtype equivalent") + elif issubclass(t, _ctypes.Structure): + return _from_ctypes_structure(t) + elif issubclass(t, _ctypes.Union): + return _from_ctypes_union(t) + elif isinstance(getattr(t, '_type_', None), str): + return _from_ctypes_scalar(t) + else: + raise NotImplementedError( + "Unknown ctypes type {}".format(t.__name__)) diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_dtype_ctypes.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/_dtype_ctypes.pyc new file mode 100644 index 0000000..5e64300 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/_dtype_ctypes.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_dummy.so b/project/venv/lib/python2.7/site-packages/numpy/core/_dummy.so new file mode 100755 index 0000000..093ed33 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/_dummy.so differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_internal.py b/project/venv/lib/python2.7/site-packages/numpy/core/_internal.py new file mode 100644 index 0000000..1d3bb55 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/_internal.py @@ -0,0 +1,928 @@ +""" +A place for internal code + +Some things are more easily handled Python. + +""" +from __future__ import division, absolute_import, print_function + +import re +import sys + +from numpy.compat import unicode +from numpy.core.overrides import set_module +from .multiarray import dtype, array, ndarray +try: + import ctypes +except ImportError: + ctypes = None + +if (sys.byteorder == 'little'): + _nbo = b'<' +else: + _nbo = b'>' + +def _makenames_list(adict, align): + allfields = [] + fnames = list(adict.keys()) + for fname in fnames: + obj = adict[fname] + n = len(obj) + if not isinstance(obj, tuple) or n not in [2, 3]: + raise ValueError("entry not a 2- or 3- tuple") + if (n > 2) and (obj[2] == fname): + continue + num = int(obj[1]) + if (num < 0): + raise ValueError("invalid offset.") + format = dtype(obj[0], align=align) + if (n > 2): + title = obj[2] + else: + title = None + allfields.append((fname, format, num, title)) + # sort by offsets + allfields.sort(key=lambda x: x[2]) + names = [x[0] for x in allfields] + formats = [x[1] for x in allfields] + offsets = [x[2] for x in allfields] + titles = [x[3] for x in allfields] + + return names, formats, offsets, titles + +# Called in PyArray_DescrConverter function when +# a dictionary without "names" and "formats" +# fields is used as a data-type descriptor. +def _usefields(adict, align): + try: + names = adict[-1] + except KeyError: + names = None + if names is None: + names, formats, offsets, titles = _makenames_list(adict, align) + else: + formats = [] + offsets = [] + titles = [] + for name in names: + res = adict[name] + formats.append(res[0]) + offsets.append(res[1]) + if (len(res) > 2): + titles.append(res[2]) + else: + titles.append(None) + + return dtype({"names": names, + "formats": formats, + "offsets": offsets, + "titles": titles}, align) + + +# construct an array_protocol descriptor list +# from the fields attribute of a descriptor +# This calls itself recursively but should eventually hit +# a descriptor that has no fields and then return +# a simple typestring + +def _array_descr(descriptor): + fields = descriptor.fields + if fields is None: + subdtype = descriptor.subdtype + if subdtype is None: + if descriptor.metadata is None: + return descriptor.str + else: + new = descriptor.metadata.copy() + if new: + return (descriptor.str, new) + else: + return descriptor.str + else: + return (_array_descr(subdtype[0]), subdtype[1]) + + names = descriptor.names + ordered_fields = [fields[x] + (x,) for x in names] + result = [] + offset = 0 + for field in ordered_fields: + if field[1] > offset: + num = field[1] - offset + result.append(('', '|V%d' % num)) + offset += num + elif field[1] < offset: + raise ValueError( + "dtype.descr is not defined for types with overlapping or " + "out-of-order fields") + if len(field) > 3: + name = (field[2], field[3]) + else: + name = field[2] + if field[0].subdtype: + tup = (name, _array_descr(field[0].subdtype[0]), + field[0].subdtype[1]) + else: + tup = (name, _array_descr(field[0])) + offset += field[0].itemsize + result.append(tup) + + if descriptor.itemsize > offset: + num = descriptor.itemsize - offset + result.append(('', '|V%d' % num)) + + return result + +# Build a new array from the information in a pickle. +# Note that the name numpy.core._internal._reconstruct is embedded in +# pickles of ndarrays made with NumPy before release 1.0 +# so don't remove the name here, or you'll +# break backward compatibility. +def _reconstruct(subtype, shape, dtype): + return ndarray.__new__(subtype, shape, dtype) + + +# format_re was originally from numarray by J. Todd Miller + +format_re = re.compile(br'(?P[<>|=]?)' + br'(?P *[(]?[ ,0-9L]*[)]? *)' + br'(?P[<>|=]?)' + br'(?P[A-Za-z0-9.?]*(?:\[[a-zA-Z0-9,.]+\])?)') +sep_re = re.compile(br'\s*,\s*') +space_re = re.compile(br'\s+$') + +# astr is a string (perhaps comma separated) + +_convorder = {b'=': _nbo} + +def _commastring(astr): + startindex = 0 + result = [] + while startindex < len(astr): + mo = format_re.match(astr, pos=startindex) + try: + (order1, repeats, order2, dtype) = mo.groups() + except (TypeError, AttributeError): + raise ValueError('format number %d of "%s" is not recognized' % + (len(result)+1, astr)) + startindex = mo.end() + # Separator or ending padding + if startindex < len(astr): + if space_re.match(astr, pos=startindex): + startindex = len(astr) + else: + mo = sep_re.match(astr, pos=startindex) + if not mo: + raise ValueError( + 'format number %d of "%s" is not recognized' % + (len(result)+1, astr)) + startindex = mo.end() + + if order2 == b'': + order = order1 + elif order1 == b'': + order = order2 + else: + order1 = _convorder.get(order1, order1) + order2 = _convorder.get(order2, order2) + if (order1 != order2): + raise ValueError( + 'inconsistent byte-order specification %s and %s' % + (order1, order2)) + order = order1 + + if order in [b'|', b'=', _nbo]: + order = b'' + dtype = order + dtype + if (repeats == b''): + newitem = dtype + else: + newitem = (dtype, eval(repeats)) + result.append(newitem) + + return result + +class dummy_ctype(object): + def __init__(self, cls): + self._cls = cls + def __mul__(self, other): + return self + def __call__(self, *other): + return self._cls(other) + def __eq__(self, other): + return self._cls == other._cls + def __ne__(self, other): + return self._cls != other._cls + +def _getintp_ctype(): + val = _getintp_ctype.cache + if val is not None: + return val + if ctypes is None: + import numpy as np + val = dummy_ctype(np.intp) + else: + char = dtype('p').char + if (char == 'i'): + val = ctypes.c_int + elif char == 'l': + val = ctypes.c_long + elif char == 'q': + val = ctypes.c_longlong + else: + val = ctypes.c_long + _getintp_ctype.cache = val + return val +_getintp_ctype.cache = None + +# Used for .ctypes attribute of ndarray + +class _missing_ctypes(object): + def cast(self, num, obj): + return num.value + + class c_void_p(object): + def __init__(self, ptr): + self.value = ptr + + +class _unsafe_first_element_pointer(object): + """ + Helper to allow viewing an array as a ctypes pointer to the first element + + This avoids: + * dealing with strides + * `.view` rejecting object-containing arrays + * `memoryview` not supporting overlapping fields + """ + def __init__(self, arr): + self.base = arr + + @property + def __array_interface__(self): + i = dict( + shape=(), + typestr='|V0', + data=(self.base.__array_interface__['data'][0], False), + strides=(), + version=3, + ) + return i + + +def _get_void_ptr(arr): + """ + Get a `ctypes.c_void_p` to arr.data, that keeps a reference to the array + """ + import numpy as np + # convert to a 0d array that has a data pointer referrign to the start + # of arr. This holds a reference to arr. + simple_arr = np.asarray(_unsafe_first_element_pointer(arr)) + + # create a `char[0]` using the same memory. + c_arr = (ctypes.c_char * 0).from_buffer(simple_arr) + + # finally cast to void* + return ctypes.cast(ctypes.pointer(c_arr), ctypes.c_void_p) + + +class _ctypes(object): + def __init__(self, array, ptr=None): + self._arr = array + + if ctypes: + self._ctypes = ctypes + # get a void pointer to the buffer, which keeps the array alive + self._data = _get_void_ptr(array) + assert self._data.value == ptr + else: + # fake a pointer-like object that holds onto the reference + self._ctypes = _missing_ctypes() + self._data = self._ctypes.c_void_p(ptr) + self._data._objects = array + + if self._arr.ndim == 0: + self._zerod = True + else: + self._zerod = False + + def data_as(self, obj): + """ + Return the data pointer cast to a particular c-types object. + For example, calling ``self._as_parameter_`` is equivalent to + ``self.data_as(ctypes.c_void_p)``. Perhaps you want to use the data as a + pointer to a ctypes array of floating-point data: + ``self.data_as(ctypes.POINTER(ctypes.c_double))``. + + The returned pointer will keep a reference to the array. + """ + return self._ctypes.cast(self._data, obj) + + def shape_as(self, obj): + """ + Return the shape tuple as an array of some other c-types + type. For example: ``self.shape_as(ctypes.c_short)``. + """ + if self._zerod: + return None + return (obj*self._arr.ndim)(*self._arr.shape) + + def strides_as(self, obj): + """ + Return the strides tuple as an array of some other + c-types type. For example: ``self.strides_as(ctypes.c_longlong)``. + """ + if self._zerod: + return None + return (obj*self._arr.ndim)(*self._arr.strides) + + @property + def data(self): + """ + A pointer to the memory area of the array as a Python integer. + This memory area may contain data that is not aligned, or not in correct + byte-order. The memory area may not even be writeable. The array + flags and data-type of this array should be respected when passing this + attribute to arbitrary C-code to avoid trouble that can include Python + crashing. User Beware! The value of this attribute is exactly the same + as ``self._array_interface_['data'][0]``. + + Note that unlike `data_as`, a reference will not be kept to the array: + code like ``ctypes.c_void_p((a + b).ctypes.data)`` will result in a + pointer to a deallocated array, and should be spelt + ``(a + b).ctypes.data_as(ctypes.c_void_p)`` + """ + return self._data.value + + @property + def shape(self): + """ + (c_intp*self.ndim): A ctypes array of length self.ndim where + the basetype is the C-integer corresponding to ``dtype('p')`` on this + platform. This base-type could be `ctypes.c_int`, `ctypes.c_long`, or + `ctypes.c_longlong` depending on the platform. + The c_intp type is defined accordingly in `numpy.ctypeslib`. + The ctypes array contains the shape of the underlying array. + """ + return self.shape_as(_getintp_ctype()) + + @property + def strides(self): + """ + (c_intp*self.ndim): A ctypes array of length self.ndim where + the basetype is the same as for the shape attribute. This ctypes array + contains the strides information from the underlying array. This strides + information is important for showing how many bytes must be jumped to + get to the next element in the array. + """ + return self.strides_as(_getintp_ctype()) + + @property + def _as_parameter_(self): + """ + Overrides the ctypes semi-magic method + + Enables `c_func(some_array.ctypes)` + """ + return self._data + + # kept for compatibility + get_data = data.fget + get_shape = shape.fget + get_strides = strides.fget + get_as_parameter = _as_parameter_.fget + + +def _newnames(datatype, order): + """ + Given a datatype and an order object, return a new names tuple, with the + order indicated + """ + oldnames = datatype.names + nameslist = list(oldnames) + if isinstance(order, (str, unicode)): + order = [order] + seen = set() + if isinstance(order, (list, tuple)): + for name in order: + try: + nameslist.remove(name) + except ValueError: + if name in seen: + raise ValueError("duplicate field name: %s" % (name,)) + else: + raise ValueError("unknown field name: %s" % (name,)) + seen.add(name) + return tuple(list(order) + nameslist) + raise ValueError("unsupported order value: %s" % (order,)) + +def _copy_fields(ary): + """Return copy of structured array with padding between fields removed. + + Parameters + ---------- + ary : ndarray + Structured array from which to remove padding bytes + + Returns + ------- + ary_copy : ndarray + Copy of ary with padding bytes removed + """ + dt = ary.dtype + copy_dtype = {'names': dt.names, + 'formats': [dt.fields[name][0] for name in dt.names]} + return array(ary, dtype=copy_dtype, copy=True) + +def _getfield_is_safe(oldtype, newtype, offset): + """ Checks safety of getfield for object arrays. + + As in _view_is_safe, we need to check that memory containing objects is not + reinterpreted as a non-object datatype and vice versa. + + Parameters + ---------- + oldtype : data-type + Data type of the original ndarray. + newtype : data-type + Data type of the field being accessed by ndarray.getfield + offset : int + Offset of the field being accessed by ndarray.getfield + + Raises + ------ + TypeError + If the field access is invalid + + """ + if newtype.hasobject or oldtype.hasobject: + if offset == 0 and newtype == oldtype: + return + if oldtype.names: + for name in oldtype.names: + if (oldtype.fields[name][1] == offset and + oldtype.fields[name][0] == newtype): + return + raise TypeError("Cannot get/set field of an object array") + return + +def _view_is_safe(oldtype, newtype): + """ Checks safety of a view involving object arrays, for example when + doing:: + + np.zeros(10, dtype=oldtype).view(newtype) + + Parameters + ---------- + oldtype : data-type + Data type of original ndarray + newtype : data-type + Data type of the view + + Raises + ------ + TypeError + If the new type is incompatible with the old type. + + """ + + # if the types are equivalent, there is no problem. + # for example: dtype((np.record, 'i4,i4')) == dtype((np.void, 'i4,i4')) + if oldtype == newtype: + return + + if newtype.hasobject or oldtype.hasobject: + raise TypeError("Cannot change data-type for object array.") + return + +# Given a string containing a PEP 3118 format specifier, +# construct a NumPy dtype + +_pep3118_native_map = { + '?': '?', + 'c': 'S1', + 'b': 'b', + 'B': 'B', + 'h': 'h', + 'H': 'H', + 'i': 'i', + 'I': 'I', + 'l': 'l', + 'L': 'L', + 'q': 'q', + 'Q': 'Q', + 'e': 'e', + 'f': 'f', + 'd': 'd', + 'g': 'g', + 'Zf': 'F', + 'Zd': 'D', + 'Zg': 'G', + 's': 'S', + 'w': 'U', + 'O': 'O', + 'x': 'V', # padding +} +_pep3118_native_typechars = ''.join(_pep3118_native_map.keys()) + +_pep3118_standard_map = { + '?': '?', + 'c': 'S1', + 'b': 'b', + 'B': 'B', + 'h': 'i2', + 'H': 'u2', + 'i': 'i4', + 'I': 'u4', + 'l': 'i4', + 'L': 'u4', + 'q': 'i8', + 'Q': 'u8', + 'e': 'f2', + 'f': 'f', + 'd': 'd', + 'Zf': 'F', + 'Zd': 'D', + 's': 'S', + 'w': 'U', + 'O': 'O', + 'x': 'V', # padding +} +_pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys()) + +_pep3118_unsupported_map = { + 'u': 'UCS-2 strings', + '&': 'pointers', + 't': 'bitfields', + 'X': 'function pointers', +} + +class _Stream(object): + def __init__(self, s): + self.s = s + self.byteorder = '@' + + def advance(self, n): + res = self.s[:n] + self.s = self.s[n:] + return res + + def consume(self, c): + if self.s[:len(c)] == c: + self.advance(len(c)) + return True + return False + + def consume_until(self, c): + if callable(c): + i = 0 + while i < len(self.s) and not c(self.s[i]): + i = i + 1 + return self.advance(i) + else: + i = self.s.index(c) + res = self.advance(i) + self.advance(len(c)) + return res + + @property + def next(self): + return self.s[0] + + def __bool__(self): + return bool(self.s) + __nonzero__ = __bool__ + + +def _dtype_from_pep3118(spec): + stream = _Stream(spec) + dtype, align = __dtype_from_pep3118(stream, is_subdtype=False) + return dtype + +def __dtype_from_pep3118(stream, is_subdtype): + field_spec = dict( + names=[], + formats=[], + offsets=[], + itemsize=0 + ) + offset = 0 + common_alignment = 1 + is_padding = False + + # Parse spec + while stream: + value = None + + # End of structure, bail out to upper level + if stream.consume('}'): + break + + # Sub-arrays (1) + shape = None + if stream.consume('('): + shape = stream.consume_until(')') + shape = tuple(map(int, shape.split(','))) + + # Byte order + if stream.next in ('@', '=', '<', '>', '^', '!'): + byteorder = stream.advance(1) + if byteorder == '!': + byteorder = '>' + stream.byteorder = byteorder + + # Byte order characters also control native vs. standard type sizes + if stream.byteorder in ('@', '^'): + type_map = _pep3118_native_map + type_map_chars = _pep3118_native_typechars + else: + type_map = _pep3118_standard_map + type_map_chars = _pep3118_standard_typechars + + # Item sizes + itemsize_str = stream.consume_until(lambda c: not c.isdigit()) + if itemsize_str: + itemsize = int(itemsize_str) + else: + itemsize = 1 + + # Data types + is_padding = False + + if stream.consume('T{'): + value, align = __dtype_from_pep3118( + stream, is_subdtype=True) + elif stream.next in type_map_chars: + if stream.next == 'Z': + typechar = stream.advance(2) + else: + typechar = stream.advance(1) + + is_padding = (typechar == 'x') + dtypechar = type_map[typechar] + if dtypechar in 'USV': + dtypechar += '%d' % itemsize + itemsize = 1 + numpy_byteorder = {'@': '=', '^': '='}.get( + stream.byteorder, stream.byteorder) + value = dtype(numpy_byteorder + dtypechar) + align = value.alignment + elif stream.next in _pep3118_unsupported_map: + desc = _pep3118_unsupported_map[stream.next] + raise NotImplementedError( + "Unrepresentable PEP 3118 data type {!r} ({})" + .format(stream.next, desc)) + else: + raise ValueError("Unknown PEP 3118 data type specifier %r" % stream.s) + + # + # Native alignment may require padding + # + # Here we assume that the presence of a '@' character implicitly implies + # that the start of the array is *already* aligned. + # + extra_offset = 0 + if stream.byteorder == '@': + start_padding = (-offset) % align + intra_padding = (-value.itemsize) % align + + offset += start_padding + + if intra_padding != 0: + if itemsize > 1 or (shape is not None and _prod(shape) > 1): + # Inject internal padding to the end of the sub-item + value = _add_trailing_padding(value, intra_padding) + else: + # We can postpone the injection of internal padding, + # as the item appears at most once + extra_offset += intra_padding + + # Update common alignment + common_alignment = _lcm(align, common_alignment) + + # Convert itemsize to sub-array + if itemsize != 1: + value = dtype((value, (itemsize,))) + + # Sub-arrays (2) + if shape is not None: + value = dtype((value, shape)) + + # Field name + if stream.consume(':'): + name = stream.consume_until(':') + else: + name = None + + if not (is_padding and name is None): + if name is not None and name in field_spec['names']: + raise RuntimeError("Duplicate field name '%s' in PEP3118 format" + % name) + field_spec['names'].append(name) + field_spec['formats'].append(value) + field_spec['offsets'].append(offset) + + offset += value.itemsize + offset += extra_offset + + field_spec['itemsize'] = offset + + # extra final padding for aligned types + if stream.byteorder == '@': + field_spec['itemsize'] += (-offset) % common_alignment + + # Check if this was a simple 1-item type, and unwrap it + if (field_spec['names'] == [None] + and field_spec['offsets'][0] == 0 + and field_spec['itemsize'] == field_spec['formats'][0].itemsize + and not is_subdtype): + ret = field_spec['formats'][0] + else: + _fix_names(field_spec) + ret = dtype(field_spec) + + # Finished + return ret, common_alignment + +def _fix_names(field_spec): + """ Replace names which are None with the next unused f%d name """ + names = field_spec['names'] + for i, name in enumerate(names): + if name is not None: + continue + + j = 0 + while True: + name = 'f{}'.format(j) + if name not in names: + break + j = j + 1 + names[i] = name + +def _add_trailing_padding(value, padding): + """Inject the specified number of padding bytes at the end of a dtype""" + if value.fields is None: + field_spec = dict( + names=['f0'], + formats=[value], + offsets=[0], + itemsize=value.itemsize + ) + else: + fields = value.fields + names = value.names + field_spec = dict( + names=names, + formats=[fields[name][0] for name in names], + offsets=[fields[name][1] for name in names], + itemsize=value.itemsize + ) + + field_spec['itemsize'] += padding + return dtype(field_spec) + +def _prod(a): + p = 1 + for x in a: + p *= x + return p + +def _gcd(a, b): + """Calculate the greatest common divisor of a and b""" + while b: + a, b = b, a % b + return a + +def _lcm(a, b): + return a // _gcd(a, b) * b + +# Exception used in shares_memory() +@set_module('numpy') +class TooHardError(RuntimeError): + pass + +@set_module('numpy') +class AxisError(ValueError, IndexError): + """ Axis supplied was invalid. """ + def __init__(self, axis, ndim=None, msg_prefix=None): + # single-argument form just delegates to base class + if ndim is None and msg_prefix is None: + msg = axis + + # do the string formatting here, to save work in the C code + else: + msg = ("axis {} is out of bounds for array of dimension {}" + .format(axis, ndim)) + if msg_prefix is not None: + msg = "{}: {}".format(msg_prefix, msg) + + super(AxisError, self).__init__(msg) + + +def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs): + """ Format the error message for when __array_ufunc__ gives up. """ + args_string = ', '.join(['{!r}'.format(arg) for arg in inputs] + + ['{}={!r}'.format(k, v) + for k, v in kwargs.items()]) + args = inputs + kwargs.get('out', ()) + types_string = ', '.join(repr(type(arg).__name__) for arg in args) + return ('operand type(s) all returned NotImplemented from ' + '__array_ufunc__({!r}, {!r}, {}): {}' + .format(ufunc, method, args_string, types_string)) + + +def array_function_errmsg_formatter(public_api, types): + """ Format the error message for when __array_ufunc__ gives up. """ + func_name = '{}.{}'.format(public_api.__module__, public_api.__name__) + return ("no implementation found for '{}' on types that implement " + '__array_function__: {}'.format(func_name, list(types))) + + +def _ufunc_doc_signature_formatter(ufunc): + """ + Builds a signature string which resembles PEP 457 + + This is used to construct the first line of the docstring + """ + + # input arguments are simple + if ufunc.nin == 1: + in_args = 'x' + else: + in_args = ', '.join('x{}'.format(i+1) for i in range(ufunc.nin)) + + # output arguments are both keyword or positional + if ufunc.nout == 0: + out_args = ', /, out=()' + elif ufunc.nout == 1: + out_args = ', /, out=None' + else: + out_args = '[, {positional}], / [, out={default}]'.format( + positional=', '.join( + 'out{}'.format(i+1) for i in range(ufunc.nout)), + default=repr((None,)*ufunc.nout) + ) + + # keyword only args depend on whether this is a gufunc + kwargs = ( + ", casting='same_kind'" + ", order='K'" + ", dtype=None" + ", subok=True" + "[, signature" + ", extobj]" + ) + if ufunc.signature is None: + kwargs = ", where=True" + kwargs + + # join all the parts together + return '{name}({in_args}{out_args}, *{kwargs})'.format( + name=ufunc.__name__, + in_args=in_args, + out_args=out_args, + kwargs=kwargs + ) + + +def npy_ctypes_check(cls): + # determine if a class comes from ctypes, in order to work around + # a bug in the buffer protocol for those objects, bpo-10746 + try: + # ctypes class are new-style, so have an __mro__. This probably fails + # for ctypes classes with multiple inheritance. + ctype_base = cls.__mro__[-2] + # right now, they're part of the _ctypes module + return 'ctypes' in ctype_base.__module__ + except Exception: + return False + + +class recursive(object): + ''' + A decorator class for recursive nested functions. + Naive recursive nested functions hold a reference to themselves: + + def outer(*args): + def stringify_leaky(arg0, *arg1): + if len(arg1) > 0: + return stringify_leaky(*arg1) # <- HERE + return str(arg0) + stringify_leaky(*args) + + This design pattern creates a reference cycle that is difficult for a + garbage collector to resolve. The decorator class prevents the + cycle by passing the nested function in as an argument `self`: + + def outer(*args): + @recursive + def stringify(self, arg0, *arg1): + if len(arg1) > 0: + return self(*arg1) + return str(arg0) + stringify(*args) + + ''' + def __init__(self, func): + self.func = func + def __call__(self, *args, **kwargs): + return self.func(self, *args, **kwargs) + diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_internal.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/_internal.pyc new file mode 100644 index 0000000..71188ad Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/_internal.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_methods.py b/project/venv/lib/python2.7/site-packages/numpy/core/_methods.py new file mode 100644 index 0000000..33f6d01 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/_methods.py @@ -0,0 +1,156 @@ +""" +Array methods which are called by both the C-code for the method +and the Python code for the NumPy-namespace function + +""" +from __future__ import division, absolute_import, print_function + +import warnings + +from numpy.core import multiarray as mu +from numpy.core import umath as um +from numpy.core.numeric import asanyarray +from numpy.core import numerictypes as nt +from numpy._globals import _NoValue + +# save those O(100) nanoseconds! +umr_maximum = um.maximum.reduce +umr_minimum = um.minimum.reduce +umr_sum = um.add.reduce +umr_prod = um.multiply.reduce +umr_any = um.logical_or.reduce +umr_all = um.logical_and.reduce + +# avoid keyword arguments to speed up parsing, saves about 15%-20% for very +# small reductions +def _amax(a, axis=None, out=None, keepdims=False, + initial=_NoValue): + return umr_maximum(a, axis, None, out, keepdims, initial) + +def _amin(a, axis=None, out=None, keepdims=False, + initial=_NoValue): + return umr_minimum(a, axis, None, out, keepdims, initial) + +def _sum(a, axis=None, dtype=None, out=None, keepdims=False, + initial=_NoValue): + return umr_sum(a, axis, dtype, out, keepdims, initial) + +def _prod(a, axis=None, dtype=None, out=None, keepdims=False, + initial=_NoValue): + return umr_prod(a, axis, dtype, out, keepdims, initial) + +def _any(a, axis=None, dtype=None, out=None, keepdims=False): + return umr_any(a, axis, dtype, out, keepdims) + +def _all(a, axis=None, dtype=None, out=None, keepdims=False): + return umr_all(a, axis, dtype, out, keepdims) + +def _count_reduce_items(arr, axis): + if axis is None: + axis = tuple(range(arr.ndim)) + if not isinstance(axis, tuple): + axis = (axis,) + items = 1 + for ax in axis: + items *= arr.shape[ax] + return items + +def _mean(a, axis=None, dtype=None, out=None, keepdims=False): + arr = asanyarray(a) + + is_float16_result = False + rcount = _count_reduce_items(arr, axis) + # Make this warning show up first + if rcount == 0: + warnings.warn("Mean of empty slice.", RuntimeWarning, stacklevel=2) + + # Cast bool, unsigned int, and int to float64 by default + if dtype is None: + if issubclass(arr.dtype.type, (nt.integer, nt.bool_)): + dtype = mu.dtype('f8') + elif issubclass(arr.dtype.type, nt.float16): + dtype = mu.dtype('f4') + is_float16_result = True + + ret = umr_sum(arr, axis, dtype, out, keepdims) + if isinstance(ret, mu.ndarray): + ret = um.true_divide( + ret, rcount, out=ret, casting='unsafe', subok=False) + if is_float16_result and out is None: + ret = arr.dtype.type(ret) + elif hasattr(ret, 'dtype'): + if is_float16_result: + ret = arr.dtype.type(ret / rcount) + else: + ret = ret.dtype.type(ret / rcount) + else: + ret = ret / rcount + + return ret + +def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): + arr = asanyarray(a) + + rcount = _count_reduce_items(arr, axis) + # Make this warning show up on top. + if ddof >= rcount: + warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning, + stacklevel=2) + + # Cast bool, unsigned int, and int to float64 by default + if dtype is None and issubclass(arr.dtype.type, (nt.integer, nt.bool_)): + dtype = mu.dtype('f8') + + # Compute the mean. + # Note that if dtype is not of inexact type then arraymean will + # not be either. + arrmean = umr_sum(arr, axis, dtype, keepdims=True) + if isinstance(arrmean, mu.ndarray): + arrmean = um.true_divide( + arrmean, rcount, out=arrmean, casting='unsafe', subok=False) + else: + arrmean = arrmean.dtype.type(arrmean / rcount) + + # Compute sum of squared deviations from mean + # Note that x may not be inexact and that we need it to be an array, + # not a scalar. + x = asanyarray(arr - arrmean) + if issubclass(arr.dtype.type, nt.complexfloating): + x = um.multiply(x, um.conjugate(x), out=x).real + else: + x = um.multiply(x, x, out=x) + ret = umr_sum(x, axis, dtype, out, keepdims) + + # Compute degrees of freedom and make sure it is not negative. + rcount = max([rcount - ddof, 0]) + + # divide by degrees of freedom + if isinstance(ret, mu.ndarray): + ret = um.true_divide( + ret, rcount, out=ret, casting='unsafe', subok=False) + elif hasattr(ret, 'dtype'): + ret = ret.dtype.type(ret / rcount) + else: + ret = ret / rcount + + return ret + +def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): + ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof, + keepdims=keepdims) + + if isinstance(ret, mu.ndarray): + ret = um.sqrt(ret, out=ret) + elif hasattr(ret, 'dtype'): + ret = ret.dtype.type(um.sqrt(ret)) + else: + ret = um.sqrt(ret) + + return ret + +def _ptp(a, axis=None, out=None, keepdims=False): + return um.subtract( + umr_maximum(a, axis, None, out, keepdims), + umr_minimum(a, axis, None, None, keepdims), + out + ) diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_methods.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/_methods.pyc new file mode 100644 index 0000000..c1d5c2f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/_methods.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_multiarray_tests.so b/project/venv/lib/python2.7/site-packages/numpy/core/_multiarray_tests.so new file mode 100755 index 0000000..b019305 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/_multiarray_tests.so differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_multiarray_umath.so b/project/venv/lib/python2.7/site-packages/numpy/core/_multiarray_umath.so new file mode 100755 index 0000000..81bffb6 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/_multiarray_umath.so differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_operand_flag_tests.so b/project/venv/lib/python2.7/site-packages/numpy/core/_operand_flag_tests.so new file mode 100755 index 0000000..93291dd Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/_operand_flag_tests.so differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_rational_tests.so b/project/venv/lib/python2.7/site-packages/numpy/core/_rational_tests.so new file mode 100755 index 0000000..08a153a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/_rational_tests.so differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_string_helpers.py b/project/venv/lib/python2.7/site-packages/numpy/core/_string_helpers.py new file mode 100644 index 0000000..45e6a73 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/_string_helpers.py @@ -0,0 +1,100 @@ +""" +String-handling utilities to avoid locale-dependence. + +Used primarily to generate type name aliases. +""" +# "import string" is costly to import! +# Construct the translation tables directly +# "A" = chr(65), "a" = chr(97) +_all_chars = [chr(_m) for _m in range(256)] +_ascii_upper = _all_chars[65:65+26] +_ascii_lower = _all_chars[97:97+26] +LOWER_TABLE = "".join(_all_chars[:65] + _ascii_lower + _all_chars[65+26:]) +UPPER_TABLE = "".join(_all_chars[:97] + _ascii_upper + _all_chars[97+26:]) + + +def english_lower(s): + """ Apply English case rules to convert ASCII strings to all lower case. + + This is an internal utility function to replace calls to str.lower() such + that we can avoid changing behavior with changing locales. In particular, + Turkish has distinct dotted and dotless variants of the Latin letter "I" in + both lowercase and uppercase. Thus, "I".lower() != "i" in a "tr" locale. + + Parameters + ---------- + s : str + + Returns + ------- + lowered : str + + Examples + -------- + >>> from numpy.core.numerictypes import english_lower + >>> english_lower('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_') + 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz0123456789_' + >>> english_lower('') + '' + """ + lowered = s.translate(LOWER_TABLE) + return lowered + + +def english_upper(s): + """ Apply English case rules to convert ASCII strings to all upper case. + + This is an internal utility function to replace calls to str.upper() such + that we can avoid changing behavior with changing locales. In particular, + Turkish has distinct dotted and dotless variants of the Latin letter "I" in + both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale. + + Parameters + ---------- + s : str + + Returns + ------- + uppered : str + + Examples + -------- + >>> from numpy.core.numerictypes import english_upper + >>> english_upper('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_') + 'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_' + >>> english_upper('') + '' + """ + uppered = s.translate(UPPER_TABLE) + return uppered + + +def english_capitalize(s): + """ Apply English case rules to convert the first character of an ASCII + string to upper case. + + This is an internal utility function to replace calls to str.capitalize() + such that we can avoid changing behavior with changing locales. + + Parameters + ---------- + s : str + + Returns + ------- + capitalized : str + + Examples + -------- + >>> from numpy.core.numerictypes import english_capitalize + >>> english_capitalize('int8') + 'Int8' + >>> english_capitalize('Int8') + 'Int8' + >>> english_capitalize('') + '' + """ + if s: + return english_upper(s[0]) + s[1:] + else: + return s diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_string_helpers.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/_string_helpers.pyc new file mode 100644 index 0000000..d0d1785 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/_string_helpers.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_struct_ufunc_tests.so b/project/venv/lib/python2.7/site-packages/numpy/core/_struct_ufunc_tests.so new file mode 100755 index 0000000..a29a741 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/_struct_ufunc_tests.so differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_type_aliases.py b/project/venv/lib/python2.7/site-packages/numpy/core/_type_aliases.py new file mode 100644 index 0000000..d6e1a1f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/_type_aliases.py @@ -0,0 +1,282 @@ +""" +Due to compatibility, numpy has a very large number of different naming +conventions for the scalar types (those subclassing from `numpy.generic`). +This file produces a convoluted set of dictionaries mapping names to types, +and sometimes other mappings too. + +.. data:: allTypes + A dictionary of names to types that will be exposed as attributes through + ``np.core.numerictypes.*`` + +.. data:: sctypeDict + Similar to `allTypes`, but maps a broader set of aliases to their types. + +.. data:: sctypeNA + NumArray-compatible names for the scalar types. Contains not only + ``name: type`` mappings, but ``char: name`` mappings too. + + .. deprecated:: 1.16 + +.. data:: sctypes + A dictionary keyed by a "type group" string, providing a list of types + under that group. + +""" +import warnings +import sys + +from numpy.compat import unicode +from numpy._globals import VisibleDeprecationWarning +from numpy.core._string_helpers import english_lower, english_capitalize +from numpy.core.multiarray import typeinfo, dtype +from numpy.core._dtype import _kind_name + + +sctypeDict = {} # Contains all leaf-node scalar types with aliases +class TypeNADict(dict): + def __getitem__(self, key): + # 2018-06-24, 1.16 + warnings.warn('sctypeNA and typeNA will be removed in v1.18 ' + 'of numpy', VisibleDeprecationWarning, stacklevel=2) + return dict.__getitem__(self, key) + def get(self, key, default=None): + # 2018-06-24, 1.16 + warnings.warn('sctypeNA and typeNA will be removed in v1.18 ' + 'of numpy', VisibleDeprecationWarning, stacklevel=2) + return dict.get(self, key, default) + +sctypeNA = TypeNADict() # Contails all leaf-node types -> numarray type equivalences +allTypes = {} # Collect the types we will add to the module + + +# separate the actual type info from the abstract base classes +_abstract_types = {} +_concrete_typeinfo = {} +for k, v in typeinfo.items(): + # make all the keys lowercase too + k = english_lower(k) + if isinstance(v, type): + _abstract_types[k] = v + else: + _concrete_typeinfo[k] = v + +_concrete_types = {v.type for k, v in _concrete_typeinfo.items()} + + +def _bits_of(obj): + try: + info = next(v for v in _concrete_typeinfo.values() if v.type is obj) + except StopIteration: + if obj in _abstract_types.values(): + raise ValueError("Cannot count the bits of an abstract type") + + # some third-party type - make a best-guess + return dtype(obj).itemsize * 8 + else: + return info.bits + + +def bitname(obj): + """Return a bit-width name for a given type object""" + bits = _bits_of(obj) + dt = dtype(obj) + char = dt.kind + base = _kind_name(dt) + + if base == 'object': + bits = 0 + + if bits != 0: + char = "%s%d" % (char, bits // 8) + + return base, bits, char + + +def _add_types(): + for name, info in _concrete_typeinfo.items(): + # define C-name and insert typenum and typechar references also + allTypes[name] = info.type + sctypeDict[name] = info.type + sctypeDict[info.char] = info.type + sctypeDict[info.num] = info.type + + for name, cls in _abstract_types.items(): + allTypes[name] = cls +_add_types() + +# This is the priority order used to assign the bit-sized NPY_INTxx names, which +# must match the order in npy_common.h in order for NPY_INTxx and np.intxx to be +# consistent. +# If two C types have the same size, then the earliest one in this list is used +# as the sized name. +_int_ctypes = ['long', 'longlong', 'int', 'short', 'byte'] +_uint_ctypes = list('u' + t for t in _int_ctypes) + +def _add_aliases(): + for name, info in _concrete_typeinfo.items(): + # these are handled by _add_integer_aliases + if name in _int_ctypes or name in _uint_ctypes: + continue + + # insert bit-width version for this class (if relevant) + base, bit, char = bitname(info.type) + + myname = "%s%d" % (base, bit) + + # ensure that (c)longdouble does not overwrite the aliases assigned to + # (c)double + if name in ('longdouble', 'clongdouble') and myname in allTypes: + continue + + base_capitalize = english_capitalize(base) + if base == 'complex': + na_name = '%s%d' % (base_capitalize, bit//2) + elif base == 'bool': + na_name = base_capitalize + else: + na_name = "%s%d" % (base_capitalize, bit) + + allTypes[myname] = info.type + + # add mapping for both the bit name and the numarray name + sctypeDict[myname] = info.type + sctypeDict[na_name] = info.type + + # add forward, reverse, and string mapping to numarray + sctypeNA[na_name] = info.type + sctypeNA[info.type] = na_name + sctypeNA[info.char] = na_name + + sctypeDict[char] = info.type + sctypeNA[char] = na_name +_add_aliases() + +def _add_integer_aliases(): + seen_bits = set() + for i_ctype, u_ctype in zip(_int_ctypes, _uint_ctypes): + i_info = _concrete_typeinfo[i_ctype] + u_info = _concrete_typeinfo[u_ctype] + bits = i_info.bits # same for both + + for info, charname, intname, Intname in [ + (i_info,'i%d' % (bits//8,), 'int%d' % bits, 'Int%d' % bits), + (u_info,'u%d' % (bits//8,), 'uint%d' % bits, 'UInt%d' % bits)]: + if bits not in seen_bits: + # sometimes two different types have the same number of bits + # if so, the one iterated over first takes precedence + allTypes[intname] = info.type + sctypeDict[intname] = info.type + sctypeDict[Intname] = info.type + sctypeDict[charname] = info.type + sctypeNA[Intname] = info.type + sctypeNA[charname] = info.type + sctypeNA[info.type] = Intname + sctypeNA[info.char] = Intname + + seen_bits.add(bits) + +_add_integer_aliases() + +# We use these later +void = allTypes['void'] + +# +# Rework the Python names (so that float and complex and int are consistent +# with Python usage) +# +def _set_up_aliases(): + type_pairs = [('complex_', 'cdouble'), + ('int0', 'intp'), + ('uint0', 'uintp'), + ('single', 'float'), + ('csingle', 'cfloat'), + ('singlecomplex', 'cfloat'), + ('float_', 'double'), + ('intc', 'int'), + ('uintc', 'uint'), + ('int_', 'long'), + ('uint', 'ulong'), + ('cfloat', 'cdouble'), + ('longfloat', 'longdouble'), + ('clongfloat', 'clongdouble'), + ('longcomplex', 'clongdouble'), + ('bool_', 'bool'), + ('bytes_', 'string'), + ('string_', 'string'), + ('unicode_', 'unicode'), + ('object_', 'object')] + if sys.version_info[0] >= 3: + type_pairs.extend([('str_', 'unicode')]) + else: + type_pairs.extend([('str_', 'string')]) + for alias, t in type_pairs: + allTypes[alias] = allTypes[t] + sctypeDict[alias] = sctypeDict[t] + # Remove aliases overriding python types and modules + to_remove = ['ulong', 'object', 'int', 'float', + 'complex', 'bool', 'string', 'datetime', 'timedelta'] + if sys.version_info[0] >= 3: + to_remove.extend(['bytes', 'str']) + else: + to_remove.extend(['unicode', 'long']) + + for t in to_remove: + try: + del allTypes[t] + del sctypeDict[t] + except KeyError: + pass +_set_up_aliases() + + +sctypes = {'int': [], + 'uint':[], + 'float':[], + 'complex':[], + 'others':[bool, object, bytes, unicode, void]} + +def _add_array_type(typename, bits): + try: + t = allTypes['%s%d' % (typename, bits)] + except KeyError: + pass + else: + sctypes[typename].append(t) + +def _set_array_types(): + ibytes = [1, 2, 4, 8, 16, 32, 64] + fbytes = [2, 4, 8, 10, 12, 16, 32, 64] + for bytes in ibytes: + bits = 8*bytes + _add_array_type('int', bits) + _add_array_type('uint', bits) + for bytes in fbytes: + bits = 8*bytes + _add_array_type('float', bits) + _add_array_type('complex', 2*bits) + _gi = dtype('p') + if _gi.type not in sctypes['int']: + indx = 0 + sz = _gi.itemsize + _lst = sctypes['int'] + while (indx < len(_lst) and sz >= _lst[indx](0).itemsize): + indx += 1 + sctypes['int'].insert(indx, _gi.type) + sctypes['uint'].insert(indx, dtype('P').type) +_set_array_types() + + +# Add additional strings to the sctypeDict +_toadd = ['int', 'float', 'complex', 'bool', 'object'] +if sys.version_info[0] >= 3: + _toadd.extend(['str', 'bytes', ('a', 'bytes_')]) +else: + _toadd.extend(['string', ('str', 'string_'), 'unicode', ('a', 'string_')]) + +for name in _toadd: + if isinstance(name, tuple): + sctypeDict[name[0]] = allTypes[name[1]] + else: + sctypeDict[name] = allTypes['%s_' % name] + +del _toadd, name diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_type_aliases.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/_type_aliases.pyc new file mode 100644 index 0000000..adc74dc Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/_type_aliases.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_umath_tests.so b/project/venv/lib/python2.7/site-packages/numpy/core/_umath_tests.so new file mode 100755 index 0000000..0b30cd2 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/_umath_tests.so differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/arrayprint.py b/project/venv/lib/python2.7/site-packages/numpy/core/arrayprint.py new file mode 100644 index 0000000..6a71de2 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/arrayprint.py @@ -0,0 +1,1631 @@ +"""Array printing function + +$Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $ + +""" +from __future__ import division, absolute_import, print_function + +__all__ = ["array2string", "array_str", "array_repr", "set_string_function", + "set_printoptions", "get_printoptions", "printoptions", + "format_float_positional", "format_float_scientific"] +__docformat__ = 'restructuredtext' + +# +# Written by Konrad Hinsen +# last revision: 1996-3-13 +# modified by Jim Hugunin 1997-3-3 for repr's and str's (and other details) +# and by Perry Greenfield 2000-4-1 for numarray +# and by Travis Oliphant 2005-8-22 for numpy + + +# Note: Both scalartypes.c.src and arrayprint.py implement strs for numpy +# scalars but for different purposes. scalartypes.c.src has str/reprs for when +# the scalar is printed on its own, while arrayprint.py has strs for when +# scalars are printed inside an ndarray. Only the latter strs are currently +# user-customizable. + +import sys +import functools +import numbers +if sys.version_info[0] >= 3: + try: + from _thread import get_ident + except ImportError: + from _dummy_thread import get_ident +else: + try: + from thread import get_ident + except ImportError: + from dummy_thread import get_ident + +import numpy as np +from . import numerictypes as _nt +from .umath import absolute, not_equal, isnan, isinf, isfinite, isnat +from . import multiarray +from .multiarray import (array, dragon4_positional, dragon4_scientific, + datetime_as_string, datetime_data, ndarray, + set_legacy_print_mode) +from .fromnumeric import ravel, any +from .numeric import concatenate, asarray, errstate +from .numerictypes import (longlong, intc, int_, float_, complex_, bool_, + flexible) +from .overrides import array_function_dispatch, set_module +import warnings +import contextlib + +_format_options = { + 'edgeitems': 3, # repr N leading and trailing items of each dimension + 'threshold': 1000, # total items > triggers array summarization + 'floatmode': 'maxprec', + 'precision': 8, # precision of floating point representations + 'suppress': False, # suppress printing small floating values in exp format + 'linewidth': 75, + 'nanstr': 'nan', + 'infstr': 'inf', + 'sign': '-', + 'formatter': None, + 'legacy': False} + +def _make_options_dict(precision=None, threshold=None, edgeitems=None, + linewidth=None, suppress=None, nanstr=None, infstr=None, + sign=None, formatter=None, floatmode=None, legacy=None): + """ make a dictionary out of the non-None arguments, plus sanity checks """ + + options = {k: v for k, v in locals().items() if v is not None} + + if suppress is not None: + options['suppress'] = bool(suppress) + + modes = ['fixed', 'unique', 'maxprec', 'maxprec_equal'] + if floatmode not in modes + [None]: + raise ValueError("floatmode option must be one of " + + ", ".join('"{}"'.format(m) for m in modes)) + + if sign not in [None, '-', '+', ' ']: + raise ValueError("sign option must be one of ' ', '+', or '-'") + + if legacy not in [None, False, '1.13']: + warnings.warn("legacy printing option can currently only be '1.13' or " + "`False`", stacklevel=3) + if threshold is not None: + # forbid the bad threshold arg suggested by stack overflow, gh-12351 + if not isinstance(threshold, numbers.Number) or np.isnan(threshold): + raise ValueError("threshold must be numeric and non-NAN, try " + "sys.maxsize for untruncated representation") + return options + + +@set_module('numpy') +def set_printoptions(precision=None, threshold=None, edgeitems=None, + linewidth=None, suppress=None, nanstr=None, infstr=None, + formatter=None, sign=None, floatmode=None, **kwarg): + """ + Set printing options. + + These options determine the way floating point numbers, arrays and + other NumPy objects are displayed. + + Parameters + ---------- + precision : int or None, optional + Number of digits of precision for floating point output (default 8). + May be `None` if `floatmode` is not `fixed`, to print as many digits as + necessary to uniquely specify the value. + threshold : int, optional + Total number of array elements which trigger summarization + rather than full repr (default 1000). + edgeitems : int, optional + Number of array items in summary at beginning and end of + each dimension (default 3). + linewidth : int, optional + The number of characters per line for the purpose of inserting + line breaks (default 75). + suppress : bool, optional + If True, always print floating point numbers using fixed point + notation, in which case numbers equal to zero in the current precision + will print as zero. If False, then scientific notation is used when + absolute value of the smallest number is < 1e-4 or the ratio of the + maximum absolute value to the minimum is > 1e3. The default is False. + nanstr : str, optional + String representation of floating point not-a-number (default nan). + infstr : str, optional + String representation of floating point infinity (default inf). + sign : string, either '-', '+', or ' ', optional + Controls printing of the sign of floating-point types. If '+', always + print the sign of positive values. If ' ', always prints a space + (whitespace character) in the sign position of positive values. If + '-', omit the sign character of positive values. (default '-') + formatter : dict of callables, optional + If not None, the keys should indicate the type(s) that the respective + formatting function applies to. Callables should return a string. + Types that are not specified (by their corresponding keys) are handled + by the default formatters. Individual types for which a formatter + can be set are: + + - 'bool' + - 'int' + - 'timedelta' : a `numpy.timedelta64` + - 'datetime' : a `numpy.datetime64` + - 'float' + - 'longfloat' : 128-bit floats + - 'complexfloat' + - 'longcomplexfloat' : composed of two 128-bit floats + - 'numpystr' : types `numpy.string_` and `numpy.unicode_` + - 'object' : `np.object_` arrays + - 'str' : all other strings + + Other keys that can be used to set a group of types at once are: + + - 'all' : sets all types + - 'int_kind' : sets 'int' + - 'float_kind' : sets 'float' and 'longfloat' + - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat' + - 'str_kind' : sets 'str' and 'numpystr' + floatmode : str, optional + Controls the interpretation of the `precision` option for + floating-point types. Can take the following values: + + * 'fixed': Always print exactly `precision` fractional digits, + even if this would print more or fewer digits than + necessary to specify the value uniquely. + * 'unique': Print the minimum number of fractional digits necessary + to represent each value uniquely. Different elements may + have a different number of digits. The value of the + `precision` option is ignored. + * 'maxprec': Print at most `precision` fractional digits, but if + an element can be uniquely represented with fewer digits + only print it with that many. + * 'maxprec_equal': Print at most `precision` fractional digits, + but if every element in the array can be uniquely + represented with an equal number of fewer digits, use that + many digits for all elements. + legacy : string or `False`, optional + If set to the string `'1.13'` enables 1.13 legacy printing mode. This + approximates numpy 1.13 print output by including a space in the sign + position of floats and different behavior for 0d arrays. If set to + `False`, disables legacy mode. Unrecognized strings will be ignored + with a warning for forward compatibility. + + .. versionadded:: 1.14.0 + + See Also + -------- + get_printoptions, set_string_function, array2string + + Notes + ----- + `formatter` is always reset with a call to `set_printoptions`. + + Examples + -------- + Floating point precision can be set: + + >>> np.set_printoptions(precision=4) + >>> print(np.array([1.123456789])) + [ 1.1235] + + Long arrays can be summarised: + + >>> np.set_printoptions(threshold=5) + >>> print(np.arange(10)) + [0 1 2 ..., 7 8 9] + + Small results can be suppressed: + + >>> eps = np.finfo(float).eps + >>> x = np.arange(4.) + >>> x**2 - (x + eps)**2 + array([ -4.9304e-32, -4.4409e-16, 0.0000e+00, 0.0000e+00]) + >>> np.set_printoptions(suppress=True) + >>> x**2 - (x + eps)**2 + array([-0., -0., 0., 0.]) + + A custom formatter can be used to display array elements as desired: + + >>> np.set_printoptions(formatter={'all':lambda x: 'int: '+str(-x)}) + >>> x = np.arange(3) + >>> x + array([int: 0, int: -1, int: -2]) + >>> np.set_printoptions() # formatter gets reset + >>> x + array([0, 1, 2]) + + To put back the default options, you can use: + + >>> np.set_printoptions(edgeitems=3,infstr='inf', + ... linewidth=75, nanstr='nan', precision=8, + ... suppress=False, threshold=1000, formatter=None) + """ + legacy = kwarg.pop('legacy', None) + if kwarg: + msg = "set_printoptions() got unexpected keyword argument '{}'" + raise TypeError(msg.format(kwarg.popitem()[0])) + + opt = _make_options_dict(precision, threshold, edgeitems, linewidth, + suppress, nanstr, infstr, sign, formatter, + floatmode, legacy) + # formatter is always reset + opt['formatter'] = formatter + _format_options.update(opt) + + # set the C variable for legacy mode + if _format_options['legacy'] == '1.13': + set_legacy_print_mode(113) + # reset the sign option in legacy mode to avoid confusion + _format_options['sign'] = '-' + elif _format_options['legacy'] is False: + set_legacy_print_mode(0) + + +@set_module('numpy') +def get_printoptions(): + """ + Return the current print options. + + Returns + ------- + print_opts : dict + Dictionary of current print options with keys + + - precision : int + - threshold : int + - edgeitems : int + - linewidth : int + - suppress : bool + - nanstr : str + - infstr : str + - formatter : dict of callables + - sign : str + + For a full description of these options, see `set_printoptions`. + + See Also + -------- + set_printoptions, set_string_function + + """ + return _format_options.copy() + + +@set_module('numpy') +@contextlib.contextmanager +def printoptions(*args, **kwargs): + """Context manager for setting print options. + + Set print options for the scope of the `with` block, and restore the old + options at the end. See `set_printoptions` for the full description of + available options. + + Examples + -------- + + >>> with np.printoptions(precision=2): + ... print(np.array([2.0])) / 3 + [0.67] + + The `as`-clause of the `with`-statement gives the current print options: + + >>> with np.printoptions(precision=2) as opts: + ... assert_equal(opts, np.get_printoptions()) + + See Also + -------- + set_printoptions, get_printoptions + + """ + opts = np.get_printoptions() + try: + np.set_printoptions(*args, **kwargs) + yield np.get_printoptions() + finally: + np.set_printoptions(**opts) + + +def _leading_trailing(a, edgeitems, index=()): + """ + Keep only the N-D corners (leading and trailing edges) of an array. + + Should be passed a base-class ndarray, since it makes no guarantees about + preserving subclasses. + """ + axis = len(index) + if axis == a.ndim: + return a[index] + + if a.shape[axis] > 2*edgeitems: + return concatenate(( + _leading_trailing(a, edgeitems, index + np.index_exp[ :edgeitems]), + _leading_trailing(a, edgeitems, index + np.index_exp[-edgeitems:]) + ), axis=axis) + else: + return _leading_trailing(a, edgeitems, index + np.index_exp[:]) + + +def _object_format(o): + """ Object arrays containing lists should be printed unambiguously """ + if type(o) is list: + fmt = 'list({!r})' + else: + fmt = '{!r}' + return fmt.format(o) + +def repr_format(x): + return repr(x) + +def str_format(x): + return str(x) + +def _get_formatdict(data, **opt): + prec, fmode = opt['precision'], opt['floatmode'] + supp, sign = opt['suppress'], opt['sign'] + legacy = opt['legacy'] + + # wrapped in lambdas to avoid taking a code path with the wrong type of data + formatdict = { + 'bool': lambda: BoolFormat(data), + 'int': lambda: IntegerFormat(data), + 'float': lambda: + FloatingFormat(data, prec, fmode, supp, sign, legacy=legacy), + 'longfloat': lambda: + FloatingFormat(data, prec, fmode, supp, sign, legacy=legacy), + 'complexfloat': lambda: + ComplexFloatingFormat(data, prec, fmode, supp, sign, legacy=legacy), + 'longcomplexfloat': lambda: + ComplexFloatingFormat(data, prec, fmode, supp, sign, legacy=legacy), + 'datetime': lambda: DatetimeFormat(data, legacy=legacy), + 'timedelta': lambda: TimedeltaFormat(data), + 'object': lambda: _object_format, + 'void': lambda: str_format, + 'numpystr': lambda: repr_format, + 'str': lambda: str} + + # we need to wrap values in `formatter` in a lambda, so that the interface + # is the same as the above values. + def indirect(x): + return lambda: x + + formatter = opt['formatter'] + if formatter is not None: + fkeys = [k for k in formatter.keys() if formatter[k] is not None] + if 'all' in fkeys: + for key in formatdict.keys(): + formatdict[key] = indirect(formatter['all']) + if 'int_kind' in fkeys: + for key in ['int']: + formatdict[key] = indirect(formatter['int_kind']) + if 'float_kind' in fkeys: + for key in ['float', 'longfloat']: + formatdict[key] = indirect(formatter['float_kind']) + if 'complex_kind' in fkeys: + for key in ['complexfloat', 'longcomplexfloat']: + formatdict[key] = indirect(formatter['complex_kind']) + if 'str_kind' in fkeys: + for key in ['numpystr', 'str']: + formatdict[key] = indirect(formatter['str_kind']) + for key in formatdict.keys(): + if key in fkeys: + formatdict[key] = indirect(formatter[key]) + + return formatdict + +def _get_format_function(data, **options): + """ + find the right formatting function for the dtype_ + """ + dtype_ = data.dtype + dtypeobj = dtype_.type + formatdict = _get_formatdict(data, **options) + if issubclass(dtypeobj, _nt.bool_): + return formatdict['bool']() + elif issubclass(dtypeobj, _nt.integer): + if issubclass(dtypeobj, _nt.timedelta64): + return formatdict['timedelta']() + else: + return formatdict['int']() + elif issubclass(dtypeobj, _nt.floating): + if issubclass(dtypeobj, _nt.longfloat): + return formatdict['longfloat']() + else: + return formatdict['float']() + elif issubclass(dtypeobj, _nt.complexfloating): + if issubclass(dtypeobj, _nt.clongfloat): + return formatdict['longcomplexfloat']() + else: + return formatdict['complexfloat']() + elif issubclass(dtypeobj, (_nt.unicode_, _nt.string_)): + return formatdict['numpystr']() + elif issubclass(dtypeobj, _nt.datetime64): + return formatdict['datetime']() + elif issubclass(dtypeobj, _nt.object_): + return formatdict['object']() + elif issubclass(dtypeobj, _nt.void): + if dtype_.names is not None: + return StructuredVoidFormat.from_data(data, **options) + else: + return formatdict['void']() + else: + return formatdict['numpystr']() + + +def _recursive_guard(fillvalue='...'): + """ + Like the python 3.2 reprlib.recursive_repr, but forwards *args and **kwargs + + Decorates a function such that if it calls itself with the same first + argument, it returns `fillvalue` instead of recursing. + + Largely copied from reprlib.recursive_repr + """ + + def decorating_function(f): + repr_running = set() + + @functools.wraps(f) + def wrapper(self, *args, **kwargs): + key = id(self), get_ident() + if key in repr_running: + return fillvalue + repr_running.add(key) + try: + return f(self, *args, **kwargs) + finally: + repr_running.discard(key) + + return wrapper + + return decorating_function + + +# gracefully handle recursive calls, when object arrays contain themselves +@_recursive_guard() +def _array2string(a, options, separator=' ', prefix=""): + # The formatter __init__s in _get_format_function cannot deal with + # subclasses yet, and we also need to avoid recursion issues in + # _formatArray with subclasses which return 0d arrays in place of scalars + data = asarray(a) + if a.shape == (): + a = data + + if a.size > options['threshold']: + summary_insert = "..." + data = _leading_trailing(data, options['edgeitems']) + else: + summary_insert = "" + + # find the right formatting function for the array + format_function = _get_format_function(data, **options) + + # skip over "[" + next_line_prefix = " " + # skip over array( + next_line_prefix += " "*len(prefix) + + lst = _formatArray(a, format_function, options['linewidth'], + next_line_prefix, separator, options['edgeitems'], + summary_insert, options['legacy']) + return lst + + +def _array2string_dispatcher( + a, max_line_width=None, precision=None, + suppress_small=None, separator=None, prefix=None, + style=None, formatter=None, threshold=None, + edgeitems=None, sign=None, floatmode=None, suffix=None, + **kwarg): + return (a,) + + +@array_function_dispatch(_array2string_dispatcher, module='numpy') +def array2string(a, max_line_width=None, precision=None, + suppress_small=None, separator=' ', prefix="", + style=np._NoValue, formatter=None, threshold=None, + edgeitems=None, sign=None, floatmode=None, suffix="", + **kwarg): + """ + Return a string representation of an array. + + Parameters + ---------- + a : array_like + Input array. + max_line_width : int, optional + The maximum number of columns the string should span. Newline + characters splits the string appropriately after array elements. + precision : int or None, optional + Floating point precision. Default is the current printing + precision (usually 8), which can be altered using `set_printoptions`. + suppress_small : bool, optional + Represent very small numbers as zero. A number is "very small" if it + is smaller than the current printing precision. + separator : str, optional + Inserted between elements. + prefix : str, optional + suffix: str, optional + The length of the prefix and suffix strings are used to respectively + align and wrap the output. An array is typically printed as:: + + prefix + array2string(a) + suffix + + The output is left-padded by the length of the prefix string, and + wrapping is forced at the column ``max_line_width - len(suffix)``. + It should be noted that the content of prefix and suffix strings are + not included in the output. + style : _NoValue, optional + Has no effect, do not use. + + .. deprecated:: 1.14.0 + formatter : dict of callables, optional + If not None, the keys should indicate the type(s) that the respective + formatting function applies to. Callables should return a string. + Types that are not specified (by their corresponding keys) are handled + by the default formatters. Individual types for which a formatter + can be set are: + + - 'bool' + - 'int' + - 'timedelta' : a `numpy.timedelta64` + - 'datetime' : a `numpy.datetime64` + - 'float' + - 'longfloat' : 128-bit floats + - 'complexfloat' + - 'longcomplexfloat' : composed of two 128-bit floats + - 'void' : type `numpy.void` + - 'numpystr' : types `numpy.string_` and `numpy.unicode_` + - 'str' : all other strings + + Other keys that can be used to set a group of types at once are: + + - 'all' : sets all types + - 'int_kind' : sets 'int' + - 'float_kind' : sets 'float' and 'longfloat' + - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat' + - 'str_kind' : sets 'str' and 'numpystr' + threshold : int, optional + Total number of array elements which trigger summarization + rather than full repr. + edgeitems : int, optional + Number of array items in summary at beginning and end of + each dimension. + sign : string, either '-', '+', or ' ', optional + Controls printing of the sign of floating-point types. If '+', always + print the sign of positive values. If ' ', always prints a space + (whitespace character) in the sign position of positive values. If + '-', omit the sign character of positive values. + floatmode : str, optional + Controls the interpretation of the `precision` option for + floating-point types. Can take the following values: + + - 'fixed': Always print exactly `precision` fractional digits, + even if this would print more or fewer digits than + necessary to specify the value uniquely. + - 'unique': Print the minimum number of fractional digits necessary + to represent each value uniquely. Different elements may + have a different number of digits. The value of the + `precision` option is ignored. + - 'maxprec': Print at most `precision` fractional digits, but if + an element can be uniquely represented with fewer digits + only print it with that many. + - 'maxprec_equal': Print at most `precision` fractional digits, + but if every element in the array can be uniquely + represented with an equal number of fewer digits, use that + many digits for all elements. + legacy : string or `False`, optional + If set to the string `'1.13'` enables 1.13 legacy printing mode. This + approximates numpy 1.13 print output by including a space in the sign + position of floats and different behavior for 0d arrays. If set to + `False`, disables legacy mode. Unrecognized strings will be ignored + with a warning for forward compatibility. + + .. versionadded:: 1.14.0 + + Returns + ------- + array_str : str + String representation of the array. + + Raises + ------ + TypeError + if a callable in `formatter` does not return a string. + + See Also + -------- + array_str, array_repr, set_printoptions, get_printoptions + + Notes + ----- + If a formatter is specified for a certain type, the `precision` keyword is + ignored for that type. + + This is a very flexible function; `array_repr` and `array_str` are using + `array2string` internally so keywords with the same name should work + identically in all three functions. + + Examples + -------- + >>> x = np.array([1e-16,1,2,3]) + >>> print(np.array2string(x, precision=2, separator=',', + ... suppress_small=True)) + [ 0., 1., 2., 3.] + + >>> x = np.arange(3.) + >>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) + '[0.00 1.00 2.00]' + + >>> x = np.arange(3) + >>> np.array2string(x, formatter={'int':lambda x: hex(x)}) + '[0x0L 0x1L 0x2L]' + + """ + legacy = kwarg.pop('legacy', None) + if kwarg: + msg = "array2string() got unexpected keyword argument '{}'" + raise TypeError(msg.format(kwarg.popitem()[0])) + + overrides = _make_options_dict(precision, threshold, edgeitems, + max_line_width, suppress_small, None, None, + sign, formatter, floatmode, legacy) + options = _format_options.copy() + options.update(overrides) + + if options['legacy'] == '1.13': + if style is np._NoValue: + style = repr + + if a.shape == () and not a.dtype.names: + return style(a.item()) + elif style is not np._NoValue: + # Deprecation 11-9-2017 v1.14 + warnings.warn("'style' argument is deprecated and no longer functional" + " except in 1.13 'legacy' mode", + DeprecationWarning, stacklevel=3) + + if options['legacy'] != '1.13': + options['linewidth'] -= len(suffix) + + # treat as a null array if any of shape elements == 0 + if a.size == 0: + return "[]" + + return _array2string(a, options, separator, prefix) + + +def _extendLine(s, line, word, line_width, next_line_prefix, legacy): + needs_wrap = len(line) + len(word) > line_width + if legacy != '1.13': + s# don't wrap lines if it won't help + if len(line) <= len(next_line_prefix): + needs_wrap = False + + if needs_wrap: + s += line.rstrip() + "\n" + line = next_line_prefix + line += word + return s, line + + +def _formatArray(a, format_function, line_width, next_line_prefix, + separator, edge_items, summary_insert, legacy): + """formatArray is designed for two modes of operation: + + 1. Full output + + 2. Summarized output + + """ + def recurser(index, hanging_indent, curr_width): + """ + By using this local function, we don't need to recurse with all the + arguments. Since this function is not created recursively, the cost is + not significant + """ + axis = len(index) + axes_left = a.ndim - axis + + if axes_left == 0: + return format_function(a[index]) + + # when recursing, add a space to align with the [ added, and reduce the + # length of the line by 1 + next_hanging_indent = hanging_indent + ' ' + if legacy == '1.13': + next_width = curr_width + else: + next_width = curr_width - len(']') + + a_len = a.shape[axis] + show_summary = summary_insert and 2*edge_items < a_len + if show_summary: + leading_items = edge_items + trailing_items = edge_items + else: + leading_items = 0 + trailing_items = a_len + + # stringify the array with the hanging indent on the first line too + s = '' + + # last axis (rows) - wrap elements if they would not fit on one line + if axes_left == 1: + # the length up until the beginning of the separator / bracket + if legacy == '1.13': + elem_width = curr_width - len(separator.rstrip()) + else: + elem_width = curr_width - max(len(separator.rstrip()), len(']')) + + line = hanging_indent + for i in range(leading_items): + word = recurser(index + (i,), next_hanging_indent, next_width) + s, line = _extendLine( + s, line, word, elem_width, hanging_indent, legacy) + line += separator + + if show_summary: + s, line = _extendLine( + s, line, summary_insert, elem_width, hanging_indent, legacy) + if legacy == '1.13': + line += ", " + else: + line += separator + + for i in range(trailing_items, 1, -1): + word = recurser(index + (-i,), next_hanging_indent, next_width) + s, line = _extendLine( + s, line, word, elem_width, hanging_indent, legacy) + line += separator + + if legacy == '1.13': + # width of the separator is not considered on 1.13 + elem_width = curr_width + word = recurser(index + (-1,), next_hanging_indent, next_width) + s, line = _extendLine( + s, line, word, elem_width, hanging_indent, legacy) + + s += line + + # other axes - insert newlines between rows + else: + s = '' + line_sep = separator.rstrip() + '\n'*(axes_left - 1) + + for i in range(leading_items): + nested = recurser(index + (i,), next_hanging_indent, next_width) + s += hanging_indent + nested + line_sep + + if show_summary: + if legacy == '1.13': + # trailing space, fixed nbr of newlines, and fixed separator + s += hanging_indent + summary_insert + ", \n" + else: + s += hanging_indent + summary_insert + line_sep + + for i in range(trailing_items, 1, -1): + nested = recurser(index + (-i,), next_hanging_indent, + next_width) + s += hanging_indent + nested + line_sep + + nested = recurser(index + (-1,), next_hanging_indent, next_width) + s += hanging_indent + nested + + # remove the hanging indent, and wrap in [] + s = '[' + s[len(hanging_indent):] + ']' + return s + + try: + # invoke the recursive part with an initial index and prefix + return recurser(index=(), + hanging_indent=next_line_prefix, + curr_width=line_width) + finally: + # recursive closures have a cyclic reference to themselves, which + # requires gc to collect (gh-10620). To avoid this problem, for + # performance and PyPy friendliness, we break the cycle: + recurser = None + +def _none_or_positive_arg(x, name): + if x is None: + return -1 + if x < 0: + raise ValueError("{} must be >= 0".format(name)) + return x + +class FloatingFormat(object): + """ Formatter for subtypes of np.floating """ + def __init__(self, data, precision, floatmode, suppress_small, sign=False, + **kwarg): + # for backcompatibility, accept bools + if isinstance(sign, bool): + sign = '+' if sign else '-' + + self._legacy = kwarg.get('legacy', False) + if self._legacy == '1.13': + # when not 0d, legacy does not support '-' + if data.shape != () and sign == '-': + sign = ' ' + + self.floatmode = floatmode + if floatmode == 'unique': + self.precision = None + else: + self.precision = precision + + self.precision = _none_or_positive_arg(self.precision, 'precision') + + self.suppress_small = suppress_small + self.sign = sign + self.exp_format = False + self.large_exponent = False + + self.fillFormat(data) + + def fillFormat(self, data): + # only the finite values are used to compute the number of digits + finite_vals = data[isfinite(data)] + + # choose exponential mode based on the non-zero finite values: + abs_non_zero = absolute(finite_vals[finite_vals != 0]) + if len(abs_non_zero) != 0: + max_val = np.max(abs_non_zero) + min_val = np.min(abs_non_zero) + with errstate(over='ignore'): # division can overflow + if max_val >= 1.e8 or (not self.suppress_small and + (min_val < 0.0001 or max_val/min_val > 1000.)): + self.exp_format = True + + # do a first pass of printing all the numbers, to determine sizes + if len(finite_vals) == 0: + self.pad_left = 0 + self.pad_right = 0 + self.trim = '.' + self.exp_size = -1 + self.unique = True + elif self.exp_format: + trim, unique = '.', True + if self.floatmode == 'fixed' or self._legacy == '1.13': + trim, unique = 'k', False + strs = (dragon4_scientific(x, precision=self.precision, + unique=unique, trim=trim, sign=self.sign == '+') + for x in finite_vals) + frac_strs, _, exp_strs = zip(*(s.partition('e') for s in strs)) + int_part, frac_part = zip(*(s.split('.') for s in frac_strs)) + self.exp_size = max(len(s) for s in exp_strs) - 1 + + self.trim = 'k' + self.precision = max(len(s) for s in frac_part) + + # for back-compat with np 1.13, use 2 spaces & sign and full prec + if self._legacy == '1.13': + self.pad_left = 3 + else: + # this should be only 1 or 2. Can be calculated from sign. + self.pad_left = max(len(s) for s in int_part) + # pad_right is only needed for nan length calculation + self.pad_right = self.exp_size + 2 + self.precision + + self.unique = False + else: + # first pass printing to determine sizes + trim, unique = '.', True + if self.floatmode == 'fixed': + trim, unique = 'k', False + strs = (dragon4_positional(x, precision=self.precision, + fractional=True, + unique=unique, trim=trim, + sign=self.sign == '+') + for x in finite_vals) + int_part, frac_part = zip(*(s.split('.') for s in strs)) + if self._legacy == '1.13': + self.pad_left = 1 + max(len(s.lstrip('-+')) for s in int_part) + else: + self.pad_left = max(len(s) for s in int_part) + self.pad_right = max(len(s) for s in frac_part) + self.exp_size = -1 + + if self.floatmode in ['fixed', 'maxprec_equal']: + self.precision = self.pad_right + self.unique = False + self.trim = 'k' + else: + self.unique = True + self.trim = '.' + + if self._legacy != '1.13': + # account for sign = ' ' by adding one to pad_left + if self.sign == ' ' and not any(np.signbit(finite_vals)): + self.pad_left += 1 + + # if there are non-finite values, may need to increase pad_left + if data.size != finite_vals.size: + neginf = self.sign != '-' or any(data[isinf(data)] < 0) + nanlen = len(_format_options['nanstr']) + inflen = len(_format_options['infstr']) + neginf + offset = self.pad_right + 1 # +1 for decimal pt + self.pad_left = max(self.pad_left, nanlen - offset, inflen - offset) + + def __call__(self, x): + if not np.isfinite(x): + with errstate(invalid='ignore'): + if np.isnan(x): + sign = '+' if self.sign == '+' else '' + ret = sign + _format_options['nanstr'] + else: # isinf + sign = '-' if x < 0 else '+' if self.sign == '+' else '' + ret = sign + _format_options['infstr'] + return ' '*(self.pad_left + self.pad_right + 1 - len(ret)) + ret + + if self.exp_format: + return dragon4_scientific(x, + precision=self.precision, + unique=self.unique, + trim=self.trim, + sign=self.sign == '+', + pad_left=self.pad_left, + exp_digits=self.exp_size) + else: + return dragon4_positional(x, + precision=self.precision, + unique=self.unique, + fractional=True, + trim=self.trim, + sign=self.sign == '+', + pad_left=self.pad_left, + pad_right=self.pad_right) + +# for back-compatibility, we keep the classes for each float type too +class FloatFormat(FloatingFormat): + def __init__(self, *args, **kwargs): + warnings.warn("FloatFormat has been replaced by FloatingFormat", + DeprecationWarning, stacklevel=2) + super(FloatFormat, self).__init__(*args, **kwargs) + + +class LongFloatFormat(FloatingFormat): + def __init__(self, *args, **kwargs): + warnings.warn("LongFloatFormat has been replaced by FloatingFormat", + DeprecationWarning, stacklevel=2) + super(LongFloatFormat, self).__init__(*args, **kwargs) + + +@set_module('numpy') +def format_float_scientific(x, precision=None, unique=True, trim='k', + sign=False, pad_left=None, exp_digits=None): + """ + Format a floating-point scalar as a decimal string in scientific notation. + + Provides control over rounding, trimming and padding. Uses and assumes + IEEE unbiased rounding. Uses the "Dragon4" algorithm. + + Parameters + ---------- + x : python float or numpy floating scalar + Value to format. + precision : non-negative integer or None, optional + Maximum number of digits to print. May be None if `unique` is + `True`, but must be an integer if unique is `False`. + unique : boolean, optional + If `True`, use a digit-generation strategy which gives the shortest + representation which uniquely identifies the floating-point number from + other values of the same type, by judicious rounding. If `precision` + was omitted, print all necessary digits, otherwise digit generation is + cut off after `precision` digits and the remaining value is rounded. + If `False`, digits are generated as if printing an infinite-precision + value and stopping after `precision` digits, rounding the remaining + value. + trim : one of 'k', '.', '0', '-', optional + Controls post-processing trimming of trailing digits, as follows: + + * 'k' : keep trailing zeros, keep decimal point (no trimming) + * '.' : trim all trailing zeros, leave decimal point + * '0' : trim all but the zero before the decimal point. Insert the + zero if it is missing. + * '-' : trim trailing zeros and any trailing decimal point + sign : boolean, optional + Whether to show the sign for positive values. + pad_left : non-negative integer, optional + Pad the left side of the string with whitespace until at least that + many characters are to the left of the decimal point. + exp_digits : non-negative integer, optional + Pad the exponent with zeros until it contains at least this many digits. + If omitted, the exponent will be at least 2 digits. + + Returns + ------- + rep : string + The string representation of the floating point value + + See Also + -------- + format_float_positional + + Examples + -------- + >>> np.format_float_scientific(np.float32(np.pi)) + '3.1415927e+00' + >>> s = np.float32(1.23e24) + >>> np.format_float_scientific(s, unique=False, precision=15) + '1.230000071797338e+24' + >>> np.format_float_scientific(s, exp_digits=4) + '1.23e+0024' + """ + precision = _none_or_positive_arg(precision, 'precision') + pad_left = _none_or_positive_arg(pad_left, 'pad_left') + exp_digits = _none_or_positive_arg(exp_digits, 'exp_digits') + return dragon4_scientific(x, precision=precision, unique=unique, + trim=trim, sign=sign, pad_left=pad_left, + exp_digits=exp_digits) + + +@set_module('numpy') +def format_float_positional(x, precision=None, unique=True, + fractional=True, trim='k', sign=False, + pad_left=None, pad_right=None): + """ + Format a floating-point scalar as a decimal string in positional notation. + + Provides control over rounding, trimming and padding. Uses and assumes + IEEE unbiased rounding. Uses the "Dragon4" algorithm. + + Parameters + ---------- + x : python float or numpy floating scalar + Value to format. + precision : non-negative integer or None, optional + Maximum number of digits to print. May be None if `unique` is + `True`, but must be an integer if unique is `False`. + unique : boolean, optional + If `True`, use a digit-generation strategy which gives the shortest + representation which uniquely identifies the floating-point number from + other values of the same type, by judicious rounding. If `precision` + was omitted, print out all necessary digits, otherwise digit generation + is cut off after `precision` digits and the remaining value is rounded. + If `False`, digits are generated as if printing an infinite-precision + value and stopping after `precision` digits, rounding the remaining + value. + fractional : boolean, optional + If `True`, the cutoff of `precision` digits refers to the total number + of digits after the decimal point, including leading zeros. + If `False`, `precision` refers to the total number of significant + digits, before or after the decimal point, ignoring leading zeros. + trim : one of 'k', '.', '0', '-', optional + Controls post-processing trimming of trailing digits, as follows: + + * 'k' : keep trailing zeros, keep decimal point (no trimming) + * '.' : trim all trailing zeros, leave decimal point + * '0' : trim all but the zero before the decimal point. Insert the + zero if it is missing. + * '-' : trim trailing zeros and any trailing decimal point + sign : boolean, optional + Whether to show the sign for positive values. + pad_left : non-negative integer, optional + Pad the left side of the string with whitespace until at least that + many characters are to the left of the decimal point. + pad_right : non-negative integer, optional + Pad the right side of the string with whitespace until at least that + many characters are to the right of the decimal point. + + Returns + ------- + rep : string + The string representation of the floating point value + + See Also + -------- + format_float_scientific + + Examples + -------- + >>> np.format_float_positional(np.float32(np.pi)) + '3.1415927' + >>> np.format_float_positional(np.float16(np.pi)) + '3.14' + >>> np.format_float_positional(np.float16(0.3)) + '0.3' + >>> np.format_float_positional(np.float16(0.3), unique=False, precision=10) + '0.3000488281' + """ + precision = _none_or_positive_arg(precision, 'precision') + pad_left = _none_or_positive_arg(pad_left, 'pad_left') + pad_right = _none_or_positive_arg(pad_right, 'pad_right') + return dragon4_positional(x, precision=precision, unique=unique, + fractional=fractional, trim=trim, + sign=sign, pad_left=pad_left, + pad_right=pad_right) + + +class IntegerFormat(object): + def __init__(self, data): + if data.size > 0: + max_str_len = max(len(str(np.max(data))), + len(str(np.min(data)))) + else: + max_str_len = 0 + self.format = '%{}d'.format(max_str_len) + + def __call__(self, x): + return self.format % x + + +class BoolFormat(object): + def __init__(self, data, **kwargs): + # add an extra space so " True" and "False" have the same length and + # array elements align nicely when printed, except in 0d arrays + self.truestr = ' True' if data.shape != () else 'True' + + def __call__(self, x): + return self.truestr if x else "False" + + +class ComplexFloatingFormat(object): + """ Formatter for subtypes of np.complexfloating """ + def __init__(self, x, precision, floatmode, suppress_small, + sign=False, **kwarg): + # for backcompatibility, accept bools + if isinstance(sign, bool): + sign = '+' if sign else '-' + + floatmode_real = floatmode_imag = floatmode + if kwarg.get('legacy', False) == '1.13': + floatmode_real = 'maxprec_equal' + floatmode_imag = 'maxprec' + + self.real_format = FloatingFormat(x.real, precision, floatmode_real, + suppress_small, sign=sign, **kwarg) + self.imag_format = FloatingFormat(x.imag, precision, floatmode_imag, + suppress_small, sign='+', **kwarg) + + def __call__(self, x): + r = self.real_format(x.real) + i = self.imag_format(x.imag) + + # add the 'j' before the terminal whitespace in i + sp = len(i.rstrip()) + i = i[:sp] + 'j' + i[sp:] + + return r + i + +# for back-compatibility, we keep the classes for each complex type too +class ComplexFormat(ComplexFloatingFormat): + def __init__(self, *args, **kwargs): + warnings.warn( + "ComplexFormat has been replaced by ComplexFloatingFormat", + DeprecationWarning, stacklevel=2) + super(ComplexFormat, self).__init__(*args, **kwargs) + +class LongComplexFormat(ComplexFloatingFormat): + def __init__(self, *args, **kwargs): + warnings.warn( + "LongComplexFormat has been replaced by ComplexFloatingFormat", + DeprecationWarning, stacklevel=2) + super(LongComplexFormat, self).__init__(*args, **kwargs) + + +class _TimelikeFormat(object): + def __init__(self, data): + non_nat = data[~isnat(data)] + if len(non_nat) > 0: + # Max str length of non-NaT elements + max_str_len = max(len(self._format_non_nat(np.max(non_nat))), + len(self._format_non_nat(np.min(non_nat)))) + else: + max_str_len = 0 + if len(non_nat) < data.size: + # data contains a NaT + max_str_len = max(max_str_len, 5) + self._format = '%{}s'.format(max_str_len) + self._nat = "'NaT'".rjust(max_str_len) + + def _format_non_nat(self, x): + # override in subclass + raise NotImplementedError + + def __call__(self, x): + if isnat(x): + return self._nat + else: + return self._format % self._format_non_nat(x) + + +class DatetimeFormat(_TimelikeFormat): + def __init__(self, x, unit=None, timezone=None, casting='same_kind', + legacy=False): + # Get the unit from the dtype + if unit is None: + if x.dtype.kind == 'M': + unit = datetime_data(x.dtype)[0] + else: + unit = 's' + + if timezone is None: + timezone = 'naive' + self.timezone = timezone + self.unit = unit + self.casting = casting + self.legacy = legacy + + # must be called after the above are configured + super(DatetimeFormat, self).__init__(x) + + def __call__(self, x): + if self.legacy == '1.13': + return self._format_non_nat(x) + return super(DatetimeFormat, self).__call__(x) + + def _format_non_nat(self, x): + return "'%s'" % datetime_as_string(x, + unit=self.unit, + timezone=self.timezone, + casting=self.casting) + + +class TimedeltaFormat(_TimelikeFormat): + def _format_non_nat(self, x): + return str(x.astype('i8')) + + +class SubArrayFormat(object): + def __init__(self, format_function): + self.format_function = format_function + + def __call__(self, arr): + if arr.ndim <= 1: + return "[" + ", ".join(self.format_function(a) for a in arr) + "]" + return "[" + ", ".join(self.__call__(a) for a in arr) + "]" + + +class StructuredVoidFormat(object): + """ + Formatter for structured np.void objects. + + This does not work on structured alias types like np.dtype(('i4', 'i2,i2')), + as alias scalars lose their field information, and the implementation + relies upon np.void.__getitem__. + """ + def __init__(self, format_functions): + self.format_functions = format_functions + + @classmethod + def from_data(cls, data, **options): + """ + This is a second way to initialize StructuredVoidFormat, using the raw data + as input. Added to avoid changing the signature of __init__. + """ + format_functions = [] + for field_name in data.dtype.names: + format_function = _get_format_function(data[field_name], **options) + if data.dtype[field_name].shape != (): + format_function = SubArrayFormat(format_function) + format_functions.append(format_function) + return cls(format_functions) + + def __call__(self, x): + str_fields = [ + format_function(field) + for field, format_function in zip(x, self.format_functions) + ] + if len(str_fields) == 1: + return "({},)".format(str_fields[0]) + else: + return "({})".format(", ".join(str_fields)) + + +# for backwards compatibility +class StructureFormat(StructuredVoidFormat): + def __init__(self, *args, **kwargs): + # NumPy 1.14, 2018-02-14 + warnings.warn( + "StructureFormat has been replaced by StructuredVoidFormat", + DeprecationWarning, stacklevel=2) + super(StructureFormat, self).__init__(*args, **kwargs) + + +def _void_scalar_repr(x): + """ + Implements the repr for structured-void scalars. It is called from the + scalartypes.c.src code, and is placed here because it uses the elementwise + formatters defined above. + """ + return StructuredVoidFormat.from_data(array(x), **_format_options)(x) + + +_typelessdata = [int_, float_, complex_, bool_] +if issubclass(intc, int): + _typelessdata.append(intc) +if issubclass(longlong, int): + _typelessdata.append(longlong) + + +def dtype_is_implied(dtype): + """ + Determine if the given dtype is implied by the representation of its values. + + Parameters + ---------- + dtype : dtype + Data type + + Returns + ------- + implied : bool + True if the dtype is implied by the representation of its values. + + Examples + -------- + >>> np.core.arrayprint.dtype_is_implied(int) + True + >>> np.array([1, 2, 3], int) + array([1, 2, 3]) + >>> np.core.arrayprint.dtype_is_implied(np.int8) + False + >>> np.array([1, 2, 3], np.int8) + array([1, 2, 3], dtype=np.int8) + """ + dtype = np.dtype(dtype) + if _format_options['legacy'] == '1.13' and dtype.type == bool_: + return False + + # not just void types can be structured, and names are not part of the repr + if dtype.names is not None: + return False + + return dtype.type in _typelessdata + + +def dtype_short_repr(dtype): + """ + Convert a dtype to a short form which evaluates to the same dtype. + + The intent is roughly that the following holds + + >>> from numpy import * + >>> assert eval(dtype_short_repr(dt)) == dt + """ + if dtype.names is not None: + # structured dtypes give a list or tuple repr + return str(dtype) + elif issubclass(dtype.type, flexible): + # handle these separately so they don't give garbage like str256 + return "'%s'" % str(dtype) + + typename = dtype.name + # quote typenames which can't be represented as python variable names + if typename and not (typename[0].isalpha() and typename.isalnum()): + typename = repr(typename) + + return typename + + +def _array_repr_implementation( + arr, max_line_width=None, precision=None, suppress_small=None, + array2string=array2string): + """Internal version of array_repr() that allows overriding array2string.""" + if max_line_width is None: + max_line_width = _format_options['linewidth'] + + if type(arr) is not ndarray: + class_name = type(arr).__name__ + else: + class_name = "array" + + skipdtype = dtype_is_implied(arr.dtype) and arr.size > 0 + + prefix = class_name + "(" + suffix = ")" if skipdtype else "," + + if (_format_options['legacy'] == '1.13' and + arr.shape == () and not arr.dtype.names): + lst = repr(arr.item()) + elif arr.size > 0 or arr.shape == (0,): + lst = array2string(arr, max_line_width, precision, suppress_small, + ', ', prefix, suffix=suffix) + else: # show zero-length shape unless it is (0,) + lst = "[], shape=%s" % (repr(arr.shape),) + + arr_str = prefix + lst + suffix + + if skipdtype: + return arr_str + + dtype_str = "dtype={})".format(dtype_short_repr(arr.dtype)) + + # compute whether we should put dtype on a new line: Do so if adding the + # dtype would extend the last line past max_line_width. + # Note: This line gives the correct result even when rfind returns -1. + last_line_len = len(arr_str) - (arr_str.rfind('\n') + 1) + spacer = " " + if _format_options['legacy'] == '1.13': + if issubclass(arr.dtype.type, flexible): + spacer = '\n' + ' '*len(class_name + "(") + elif last_line_len + len(dtype_str) + 1 > max_line_width: + spacer = '\n' + ' '*len(class_name + "(") + + return arr_str + spacer + dtype_str + + +def _array_repr_dispatcher( + arr, max_line_width=None, precision=None, suppress_small=None): + return (arr,) + + +@array_function_dispatch(_array_repr_dispatcher, module='numpy') +def array_repr(arr, max_line_width=None, precision=None, suppress_small=None): + """ + Return the string representation of an array. + + Parameters + ---------- + arr : ndarray + Input array. + max_line_width : int, optional + The maximum number of columns the string should span. Newline + characters split the string appropriately after array elements. + precision : int, optional + Floating point precision. Default is the current printing precision + (usually 8), which can be altered using `set_printoptions`. + suppress_small : bool, optional + Represent very small numbers as zero, default is False. Very small + is defined by `precision`, if the precision is 8 then + numbers smaller than 5e-9 are represented as zero. + + Returns + ------- + string : str + The string representation of an array. + + See Also + -------- + array_str, array2string, set_printoptions + + Examples + -------- + >>> np.array_repr(np.array([1,2])) + 'array([1, 2])' + >>> np.array_repr(np.ma.array([0.])) + 'MaskedArray([ 0.])' + >>> np.array_repr(np.array([], np.int32)) + 'array([], dtype=int32)' + + >>> x = np.array([1e-6, 4e-7, 2, 3]) + >>> np.array_repr(x, precision=6, suppress_small=True) + 'array([ 0.000001, 0. , 2. , 3. ])' + + """ + return _array_repr_implementation( + arr, max_line_width, precision, suppress_small) + + +_guarded_str = _recursive_guard()(str) + + +def _array_str_implementation( + a, max_line_width=None, precision=None, suppress_small=None, + array2string=array2string): + """Internal version of array_str() that allows overriding array2string.""" + if (_format_options['legacy'] == '1.13' and + a.shape == () and not a.dtype.names): + return str(a.item()) + + # the str of 0d arrays is a special case: It should appear like a scalar, + # so floats are not truncated by `precision`, and strings are not wrapped + # in quotes. So we return the str of the scalar value. + if a.shape == (): + # obtain a scalar and call str on it, avoiding problems for subclasses + # for which indexing with () returns a 0d instead of a scalar by using + # ndarray's getindex. Also guard against recursive 0d object arrays. + return _guarded_str(np.ndarray.__getitem__(a, ())) + + return array2string(a, max_line_width, precision, suppress_small, ' ', "") + + +def _array_str_dispatcher( + a, max_line_width=None, precision=None, suppress_small=None): + return (a,) + + +@array_function_dispatch(_array_str_dispatcher, module='numpy') +def array_str(a, max_line_width=None, precision=None, suppress_small=None): + """ + Return a string representation of the data in an array. + + The data in the array is returned as a single string. This function is + similar to `array_repr`, the difference being that `array_repr` also + returns information on the kind of array and its data type. + + Parameters + ---------- + a : ndarray + Input array. + max_line_width : int, optional + Inserts newlines if text is longer than `max_line_width`. The + default is, indirectly, 75. + precision : int, optional + Floating point precision. Default is the current printing precision + (usually 8), which can be altered using `set_printoptions`. + suppress_small : bool, optional + Represent numbers "very close" to zero as zero; default is False. + Very close is defined by precision: if the precision is 8, e.g., + numbers smaller (in absolute value) than 5e-9 are represented as + zero. + + See Also + -------- + array2string, array_repr, set_printoptions + + Examples + -------- + >>> np.array_str(np.arange(3)) + '[0 1 2]' + + """ + return _array_str_implementation( + a, max_line_width, precision, suppress_small) + + +# needed if __array_function__ is disabled +_array2string_impl = getattr(array2string, '__wrapped__', array2string) +_default_array_str = functools.partial(_array_str_implementation, + array2string=_array2string_impl) +_default_array_repr = functools.partial(_array_repr_implementation, + array2string=_array2string_impl) + + +def set_string_function(f, repr=True): + """ + Set a Python function to be used when pretty printing arrays. + + Parameters + ---------- + f : function or None + Function to be used to pretty print arrays. The function should expect + a single array argument and return a string of the representation of + the array. If None, the function is reset to the default NumPy function + to print arrays. + repr : bool, optional + If True (default), the function for pretty printing (``__repr__``) + is set, if False the function that returns the default string + representation (``__str__``) is set. + + See Also + -------- + set_printoptions, get_printoptions + + Examples + -------- + >>> def pprint(arr): + ... return 'HA! - What are you going to do now?' + ... + >>> np.set_string_function(pprint) + >>> a = np.arange(10) + >>> a + HA! - What are you going to do now? + >>> print(a) + [0 1 2 3 4 5 6 7 8 9] + + We can reset the function to the default: + + >>> np.set_string_function(None) + >>> a + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + + `repr` affects either pretty printing or normal string representation. + Note that ``__repr__`` is still affected by setting ``__str__`` + because the width of each array element in the returned string becomes + equal to the length of the result of ``__str__()``. + + >>> x = np.arange(4) + >>> np.set_string_function(lambda x:'random', repr=False) + >>> x.__str__() + 'random' + >>> x.__repr__() + 'array([ 0, 1, 2, 3])' + + """ + if f is None: + if repr: + return multiarray.set_string_function(_default_array_repr, 1) + else: + return multiarray.set_string_function(_default_array_str, 0) + else: + return multiarray.set_string_function(f, repr) + +set_string_function(_default_array_str, 0) +set_string_function(_default_array_repr, 1) diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/arrayprint.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/arrayprint.pyc new file mode 100644 index 0000000..9f94b0d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/arrayprint.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/cversions.py b/project/venv/lib/python2.7/site-packages/numpy/core/cversions.py new file mode 100644 index 0000000..7995dd9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/cversions.py @@ -0,0 +1,15 @@ +"""Simple script to compute the api hash of the current API. + +The API has is defined by numpy_api_order and ufunc_api_order. + +""" +from __future__ import division, absolute_import, print_function + +from os.path import dirname + +from code_generators.genapi import fullapi_hash +from code_generators.numpy_api import full_api + +if __name__ == '__main__': + curdir = dirname(__file__) + print(fullapi_hash(full_api)) diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/cversions.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/cversions.pyc new file mode 100644 index 0000000..f4ef3cb Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/cversions.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/defchararray.py b/project/venv/lib/python2.7/site-packages/numpy/core/defchararray.py new file mode 100644 index 0000000..12ba3f0 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/defchararray.py @@ -0,0 +1,2835 @@ +""" +This module contains a set of functions for vectorized string +operations and methods. + +.. note:: + The `chararray` class exists for backwards compatibility with + Numarray, it is not recommended for new development. Starting from numpy + 1.4, if one needs arrays of strings, it is recommended to use arrays of + `dtype` `object_`, `string_` or `unicode_`, and use the free functions + in the `numpy.char` module for fast vectorized string operations. + +Some methods will only be available if the corresponding string method is +available in your version of Python. + +The preferred alias for `defchararray` is `numpy.char`. + +""" +from __future__ import division, absolute_import, print_function + +import functools +import sys +from .numerictypes import string_, unicode_, integer, object_, bool_, character +from .numeric import ndarray, compare_chararrays +from .numeric import array as narray +from numpy.core.multiarray import _vec_string +from numpy.core.overrides import set_module +from numpy.core import overrides +from numpy.compat import asbytes, long +import numpy + +__all__ = [ + 'chararray', 'equal', 'not_equal', 'greater_equal', 'less_equal', + 'greater', 'less', 'str_len', 'add', 'multiply', 'mod', 'capitalize', + 'center', 'count', 'decode', 'encode', 'endswith', 'expandtabs', + 'find', 'index', 'isalnum', 'isalpha', 'isdigit', 'islower', 'isspace', + 'istitle', 'isupper', 'join', 'ljust', 'lower', 'lstrip', 'partition', + 'replace', 'rfind', 'rindex', 'rjust', 'rpartition', 'rsplit', + 'rstrip', 'split', 'splitlines', 'startswith', 'strip', 'swapcase', + 'title', 'translate', 'upper', 'zfill', 'isnumeric', 'isdecimal', + 'array', 'asarray' + ] + + +_globalvar = 0 +if sys.version_info[0] >= 3: + _unicode = str + _bytes = bytes +else: + _unicode = unicode + _bytes = str +_len = len + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy.char') + + +def _use_unicode(*args): + """ + Helper function for determining the output type of some string + operations. + + For an operation on two ndarrays, if at least one is unicode, the + result should be unicode. + """ + for x in args: + if (isinstance(x, _unicode) or + issubclass(numpy.asarray(x).dtype.type, unicode_)): + return unicode_ + return string_ + +def _to_string_or_unicode_array(result): + """ + Helper function to cast a result back into a string or unicode array + if an object array must be used as an intermediary. + """ + return numpy.asarray(result.tolist()) + +def _clean_args(*args): + """ + Helper function for delegating arguments to Python string + functions. + + Many of the Python string operations that have optional arguments + do not use 'None' to indicate a default value. In these cases, + we need to remove all `None` arguments, and those following them. + """ + newargs = [] + for chk in args: + if chk is None: + break + newargs.append(chk) + return newargs + +def _get_num_chars(a): + """ + Helper function that returns the number of characters per field in + a string or unicode array. This is to abstract out the fact that + for a unicode array this is itemsize / 4. + """ + if issubclass(a.dtype.type, unicode_): + return a.itemsize // 4 + return a.itemsize + + +def _binary_op_dispatcher(x1, x2): + return (x1, x2) + + +@array_function_dispatch(_binary_op_dispatcher) +def equal(x1, x2): + """ + Return (x1 == x2) element-wise. + + Unlike `numpy.equal`, this comparison is performed by first + stripping whitespace characters from the end of the string. This + behavior is provided for backward-compatibility with numarray. + + Parameters + ---------- + x1, x2 : array_like of str or unicode + Input arrays of the same shape. + + Returns + ------- + out : ndarray or bool + Output array of bools, or a single bool if x1 and x2 are scalars. + + See Also + -------- + not_equal, greater_equal, less_equal, greater, less + """ + return compare_chararrays(x1, x2, '==', True) + + +@array_function_dispatch(_binary_op_dispatcher) +def not_equal(x1, x2): + """ + Return (x1 != x2) element-wise. + + Unlike `numpy.not_equal`, this comparison is performed by first + stripping whitespace characters from the end of the string. This + behavior is provided for backward-compatibility with numarray. + + Parameters + ---------- + x1, x2 : array_like of str or unicode + Input arrays of the same shape. + + Returns + ------- + out : ndarray or bool + Output array of bools, or a single bool if x1 and x2 are scalars. + + See Also + -------- + equal, greater_equal, less_equal, greater, less + """ + return compare_chararrays(x1, x2, '!=', True) + + +@array_function_dispatch(_binary_op_dispatcher) +def greater_equal(x1, x2): + """ + Return (x1 >= x2) element-wise. + + Unlike `numpy.greater_equal`, this comparison is performed by + first stripping whitespace characters from the end of the string. + This behavior is provided for backward-compatibility with + numarray. + + Parameters + ---------- + x1, x2 : array_like of str or unicode + Input arrays of the same shape. + + Returns + ------- + out : ndarray or bool + Output array of bools, or a single bool if x1 and x2 are scalars. + + See Also + -------- + equal, not_equal, less_equal, greater, less + """ + return compare_chararrays(x1, x2, '>=', True) + + +@array_function_dispatch(_binary_op_dispatcher) +def less_equal(x1, x2): + """ + Return (x1 <= x2) element-wise. + + Unlike `numpy.less_equal`, this comparison is performed by first + stripping whitespace characters from the end of the string. This + behavior is provided for backward-compatibility with numarray. + + Parameters + ---------- + x1, x2 : array_like of str or unicode + Input arrays of the same shape. + + Returns + ------- + out : ndarray or bool + Output array of bools, or a single bool if x1 and x2 are scalars. + + See Also + -------- + equal, not_equal, greater_equal, greater, less + """ + return compare_chararrays(x1, x2, '<=', True) + + +@array_function_dispatch(_binary_op_dispatcher) +def greater(x1, x2): + """ + Return (x1 > x2) element-wise. + + Unlike `numpy.greater`, this comparison is performed by first + stripping whitespace characters from the end of the string. This + behavior is provided for backward-compatibility with numarray. + + Parameters + ---------- + x1, x2 : array_like of str or unicode + Input arrays of the same shape. + + Returns + ------- + out : ndarray or bool + Output array of bools, or a single bool if x1 and x2 are scalars. + + See Also + -------- + equal, not_equal, greater_equal, less_equal, less + """ + return compare_chararrays(x1, x2, '>', True) + + +@array_function_dispatch(_binary_op_dispatcher) +def less(x1, x2): + """ + Return (x1 < x2) element-wise. + + Unlike `numpy.greater`, this comparison is performed by first + stripping whitespace characters from the end of the string. This + behavior is provided for backward-compatibility with numarray. + + Parameters + ---------- + x1, x2 : array_like of str or unicode + Input arrays of the same shape. + + Returns + ------- + out : ndarray or bool + Output array of bools, or a single bool if x1 and x2 are scalars. + + See Also + -------- + equal, not_equal, greater_equal, less_equal, greater + """ + return compare_chararrays(x1, x2, '<', True) + + +def _unary_op_dispatcher(a): + return (a,) + + +@array_function_dispatch(_unary_op_dispatcher) +def str_len(a): + """ + Return len(a) element-wise. + + Parameters + ---------- + a : array_like of str or unicode + + Returns + ------- + out : ndarray + Output array of integers + + See also + -------- + __builtin__.len + """ + return _vec_string(a, integer, '__len__') + + +@array_function_dispatch(_binary_op_dispatcher) +def add(x1, x2): + """ + Return element-wise string concatenation for two arrays of str or unicode. + + Arrays `x1` and `x2` must have the same shape. + + Parameters + ---------- + x1 : array_like of str or unicode + Input array. + x2 : array_like of str or unicode + Input array. + + Returns + ------- + add : ndarray + Output array of `string_` or `unicode_`, depending on input types + of the same shape as `x1` and `x2`. + + """ + arr1 = numpy.asarray(x1) + arr2 = numpy.asarray(x2) + out_size = _get_num_chars(arr1) + _get_num_chars(arr2) + dtype = _use_unicode(arr1, arr2) + return _vec_string(arr1, (dtype, out_size), '__add__', (arr2,)) + + +def _multiply_dispatcher(a, i): + return (a,) + + +@array_function_dispatch(_multiply_dispatcher) +def multiply(a, i): + """ + Return (a * i), that is string multiple concatenation, + element-wise. + + Values in `i` of less than 0 are treated as 0 (which yields an + empty string). + + Parameters + ---------- + a : array_like of str or unicode + + i : array_like of ints + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input types + + """ + a_arr = numpy.asarray(a) + i_arr = numpy.asarray(i) + if not issubclass(i_arr.dtype.type, integer): + raise ValueError("Can only multiply by integers") + out_size = _get_num_chars(a_arr) * max(long(i_arr.max()), 0) + return _vec_string( + a_arr, (a_arr.dtype.type, out_size), '__mul__', (i_arr,)) + + +def _mod_dispatcher(a, values): + return (a, values) + + +@array_function_dispatch(_mod_dispatcher) +def mod(a, values): + """ + Return (a % i), that is pre-Python 2.6 string formatting + (iterpolation), element-wise for a pair of array_likes of str + or unicode. + + Parameters + ---------- + a : array_like of str or unicode + + values : array_like of values + These values will be element-wise interpolated into the string. + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input types + + See also + -------- + str.__mod__ + + """ + return _to_string_or_unicode_array( + _vec_string(a, object_, '__mod__', (values,))) + + +@array_function_dispatch(_unary_op_dispatcher) +def capitalize(a): + """ + Return a copy of `a` with only the first character of each element + capitalized. + + Calls `str.capitalize` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like of str or unicode + Input array of strings to capitalize. + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input + types + + See also + -------- + str.capitalize + + Examples + -------- + >>> c = np.array(['a1b2','1b2a','b2a1','2a1b'],'S4'); c + array(['a1b2', '1b2a', 'b2a1', '2a1b'], + dtype='|S4') + >>> np.char.capitalize(c) + array(['A1b2', '1b2a', 'B2a1', '2a1b'], + dtype='|S4') + + """ + a_arr = numpy.asarray(a) + return _vec_string(a_arr, a_arr.dtype, 'capitalize') + + +def _center_dispatcher(a, width, fillchar=None): + return (a,) + + +@array_function_dispatch(_center_dispatcher) +def center(a, width, fillchar=' '): + """ + Return a copy of `a` with its elements centered in a string of + length `width`. + + Calls `str.center` element-wise. + + Parameters + ---------- + a : array_like of str or unicode + + width : int + The length of the resulting strings + fillchar : str or unicode, optional + The padding character to use (default is space). + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input + types + + See also + -------- + str.center + + """ + a_arr = numpy.asarray(a) + width_arr = numpy.asarray(width) + size = long(numpy.max(width_arr.flat)) + if numpy.issubdtype(a_arr.dtype, numpy.string_): + fillchar = asbytes(fillchar) + return _vec_string( + a_arr, (a_arr.dtype.type, size), 'center', (width_arr, fillchar)) + + +def _count_dispatcher(a, sub, start=None, end=None): + return (a,) + + +@array_function_dispatch(_count_dispatcher) +def count(a, sub, start=0, end=None): + """ + Returns an array with the number of non-overlapping occurrences of + substring `sub` in the range [`start`, `end`]. + + Calls `str.count` element-wise. + + Parameters + ---------- + a : array_like of str or unicode + + sub : str or unicode + The substring to search for. + + start, end : int, optional + Optional arguments `start` and `end` are interpreted as slice + notation to specify the range in which to count. + + Returns + ------- + out : ndarray + Output array of ints. + + See also + -------- + str.count + + Examples + -------- + >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> c + array(['aAaAaA', ' aA ', 'abBABba'], + dtype='|S7') + >>> np.char.count(c, 'A') + array([3, 1, 1]) + >>> np.char.count(c, 'aA') + array([3, 1, 0]) + >>> np.char.count(c, 'A', start=1, end=4) + array([2, 1, 1]) + >>> np.char.count(c, 'A', start=1, end=3) + array([1, 0, 0]) + + """ + return _vec_string(a, integer, 'count', [sub, start] + _clean_args(end)) + + +def _code_dispatcher(a, encoding=None, errors=None): + return (a,) + + +@array_function_dispatch(_code_dispatcher) +def decode(a, encoding=None, errors=None): + """ + Calls `str.decode` element-wise. + + The set of available codecs comes from the Python standard library, + and may be extended at runtime. For more information, see the + :mod:`codecs` module. + + Parameters + ---------- + a : array_like of str or unicode + + encoding : str, optional + The name of an encoding + + errors : str, optional + Specifies how to handle encoding errors + + Returns + ------- + out : ndarray + + See also + -------- + str.decode + + Notes + ----- + The type of the result will depend on the encoding specified. + + Examples + -------- + >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> c + array(['aAaAaA', ' aA ', 'abBABba'], + dtype='|S7') + >>> np.char.encode(c, encoding='cp037') + array(['\\x81\\xc1\\x81\\xc1\\x81\\xc1', '@@\\x81\\xc1@@', + '\\x81\\x82\\xc2\\xc1\\xc2\\x82\\x81'], + dtype='|S7') + + """ + return _to_string_or_unicode_array( + _vec_string(a, object_, 'decode', _clean_args(encoding, errors))) + + +@array_function_dispatch(_code_dispatcher) +def encode(a, encoding=None, errors=None): + """ + Calls `str.encode` element-wise. + + The set of available codecs comes from the Python standard library, + and may be extended at runtime. For more information, see the codecs + module. + + Parameters + ---------- + a : array_like of str or unicode + + encoding : str, optional + The name of an encoding + + errors : str, optional + Specifies how to handle encoding errors + + Returns + ------- + out : ndarray + + See also + -------- + str.encode + + Notes + ----- + The type of the result will depend on the encoding specified. + + """ + return _to_string_or_unicode_array( + _vec_string(a, object_, 'encode', _clean_args(encoding, errors))) + + +def _endswith_dispatcher(a, suffix, start=None, end=None): + return (a,) + + +@array_function_dispatch(_endswith_dispatcher) +def endswith(a, suffix, start=0, end=None): + """ + Returns a boolean array which is `True` where the string element + in `a` ends with `suffix`, otherwise `False`. + + Calls `str.endswith` element-wise. + + Parameters + ---------- + a : array_like of str or unicode + + suffix : str + + start, end : int, optional + With optional `start`, test beginning at that position. With + optional `end`, stop comparing at that position. + + Returns + ------- + out : ndarray + Outputs an array of bools. + + See also + -------- + str.endswith + + Examples + -------- + >>> s = np.array(['foo', 'bar']) + >>> s[0] = 'foo' + >>> s[1] = 'bar' + >>> s + array(['foo', 'bar'], + dtype='|S3') + >>> np.char.endswith(s, 'ar') + array([False, True]) + >>> np.char.endswith(s, 'a', start=1, end=2) + array([False, True]) + + """ + return _vec_string( + a, bool_, 'endswith', [suffix, start] + _clean_args(end)) + + +def _expandtabs_dispatcher(a, tabsize=None): + return (a,) + + +@array_function_dispatch(_expandtabs_dispatcher) +def expandtabs(a, tabsize=8): + """ + Return a copy of each string element where all tab characters are + replaced by one or more spaces. + + Calls `str.expandtabs` element-wise. + + Return a copy of each string element where all tab characters are + replaced by one or more spaces, depending on the current column + and the given `tabsize`. The column number is reset to zero after + each newline occurring in the string. This doesn't understand other + non-printing characters or escape sequences. + + Parameters + ---------- + a : array_like of str or unicode + Input array + tabsize : int, optional + Replace tabs with `tabsize` number of spaces. If not given defaults + to 8 spaces. + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input type + + See also + -------- + str.expandtabs + + """ + return _to_string_or_unicode_array( + _vec_string(a, object_, 'expandtabs', (tabsize,))) + + +@array_function_dispatch(_count_dispatcher) +def find(a, sub, start=0, end=None): + """ + For each element, return the lowest index in the string where + substring `sub` is found. + + Calls `str.find` element-wise. + + For each element, return the lowest index in the string where + substring `sub` is found, such that `sub` is contained in the + range [`start`, `end`]. + + Parameters + ---------- + a : array_like of str or unicode + + sub : str or unicode + + start, end : int, optional + Optional arguments `start` and `end` are interpreted as in + slice notation. + + Returns + ------- + out : ndarray or int + Output array of ints. Returns -1 if `sub` is not found. + + See also + -------- + str.find + + """ + return _vec_string( + a, integer, 'find', [sub, start] + _clean_args(end)) + + +@array_function_dispatch(_count_dispatcher) +def index(a, sub, start=0, end=None): + """ + Like `find`, but raises `ValueError` when the substring is not found. + + Calls `str.index` element-wise. + + Parameters + ---------- + a : array_like of str or unicode + + sub : str or unicode + + start, end : int, optional + + Returns + ------- + out : ndarray + Output array of ints. Returns -1 if `sub` is not found. + + See also + -------- + find, str.find + + """ + return _vec_string( + a, integer, 'index', [sub, start] + _clean_args(end)) + + +@array_function_dispatch(_unary_op_dispatcher) +def isalnum(a): + """ + Returns true for each element if all characters in the string are + alphanumeric and there is at least one character, false otherwise. + + Calls `str.isalnum` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like of str or unicode + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input type + + See also + -------- + str.isalnum + """ + return _vec_string(a, bool_, 'isalnum') + + +@array_function_dispatch(_unary_op_dispatcher) +def isalpha(a): + """ + Returns true for each element if all characters in the string are + alphabetic and there is at least one character, false otherwise. + + Calls `str.isalpha` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like of str or unicode + + Returns + ------- + out : ndarray + Output array of bools + + See also + -------- + str.isalpha + """ + return _vec_string(a, bool_, 'isalpha') + + +@array_function_dispatch(_unary_op_dispatcher) +def isdigit(a): + """ + Returns true for each element if all characters in the string are + digits and there is at least one character, false otherwise. + + Calls `str.isdigit` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like of str or unicode + + Returns + ------- + out : ndarray + Output array of bools + + See also + -------- + str.isdigit + """ + return _vec_string(a, bool_, 'isdigit') + + +@array_function_dispatch(_unary_op_dispatcher) +def islower(a): + """ + Returns true for each element if all cased characters in the + string are lowercase and there is at least one cased character, + false otherwise. + + Calls `str.islower` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like of str or unicode + + Returns + ------- + out : ndarray + Output array of bools + + See also + -------- + str.islower + """ + return _vec_string(a, bool_, 'islower') + + +@array_function_dispatch(_unary_op_dispatcher) +def isspace(a): + """ + Returns true for each element if there are only whitespace + characters in the string and there is at least one character, + false otherwise. + + Calls `str.isspace` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like of str or unicode + + Returns + ------- + out : ndarray + Output array of bools + + See also + -------- + str.isspace + """ + return _vec_string(a, bool_, 'isspace') + + +@array_function_dispatch(_unary_op_dispatcher) +def istitle(a): + """ + Returns true for each element if the element is a titlecased + string and there is at least one character, false otherwise. + + Call `str.istitle` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like of str or unicode + + Returns + ------- + out : ndarray + Output array of bools + + See also + -------- + str.istitle + """ + return _vec_string(a, bool_, 'istitle') + + +@array_function_dispatch(_unary_op_dispatcher) +def isupper(a): + """ + Returns true for each element if all cased characters in the + string are uppercase and there is at least one character, false + otherwise. + + Call `str.isupper` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like of str or unicode + + Returns + ------- + out : ndarray + Output array of bools + + See also + -------- + str.isupper + """ + return _vec_string(a, bool_, 'isupper') + + +def _join_dispatcher(sep, seq): + return (sep, seq) + + +@array_function_dispatch(_join_dispatcher) +def join(sep, seq): + """ + Return a string which is the concatenation of the strings in the + sequence `seq`. + + Calls `str.join` element-wise. + + Parameters + ---------- + sep : array_like of str or unicode + seq : array_like of str or unicode + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input types + + See also + -------- + str.join + """ + return _to_string_or_unicode_array( + _vec_string(sep, object_, 'join', (seq,))) + + + +def _just_dispatcher(a, width, fillchar=None): + return (a,) + + +@array_function_dispatch(_just_dispatcher) +def ljust(a, width, fillchar=' '): + """ + Return an array with the elements of `a` left-justified in a + string of length `width`. + + Calls `str.ljust` element-wise. + + Parameters + ---------- + a : array_like of str or unicode + + width : int + The length of the resulting strings + fillchar : str or unicode, optional + The character to use for padding + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input type + + See also + -------- + str.ljust + + """ + a_arr = numpy.asarray(a) + width_arr = numpy.asarray(width) + size = long(numpy.max(width_arr.flat)) + if numpy.issubdtype(a_arr.dtype, numpy.string_): + fillchar = asbytes(fillchar) + return _vec_string( + a_arr, (a_arr.dtype.type, size), 'ljust', (width_arr, fillchar)) + + +@array_function_dispatch(_unary_op_dispatcher) +def lower(a): + """ + Return an array with the elements converted to lowercase. + + Call `str.lower` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like, {str, unicode} + Input array. + + Returns + ------- + out : ndarray, {str, unicode} + Output array of str or unicode, depending on input type + + See also + -------- + str.lower + + Examples + -------- + >>> c = np.array(['A1B C', '1BCA', 'BCA1']); c + array(['A1B C', '1BCA', 'BCA1'], + dtype='|S5') + >>> np.char.lower(c) + array(['a1b c', '1bca', 'bca1'], + dtype='|S5') + + """ + a_arr = numpy.asarray(a) + return _vec_string(a_arr, a_arr.dtype, 'lower') + + +def _strip_dispatcher(a, chars=None): + return (a,) + + +@array_function_dispatch(_strip_dispatcher) +def lstrip(a, chars=None): + """ + For each element in `a`, return a copy with the leading characters + removed. + + Calls `str.lstrip` element-wise. + + Parameters + ---------- + a : array-like, {str, unicode} + Input array. + + chars : {str, unicode}, optional + The `chars` argument is a string specifying the set of + characters to be removed. If omitted or None, the `chars` + argument defaults to removing whitespace. The `chars` argument + is not a prefix; rather, all combinations of its values are + stripped. + + Returns + ------- + out : ndarray, {str, unicode} + Output array of str or unicode, depending on input type + + See also + -------- + str.lstrip + + Examples + -------- + >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> c + array(['aAaAaA', ' aA ', 'abBABba'], + dtype='|S7') + + The 'a' variable is unstripped from c[1] because whitespace leading. + + >>> np.char.lstrip(c, 'a') + array(['AaAaA', ' aA ', 'bBABba'], + dtype='|S7') + + + >>> np.char.lstrip(c, 'A') # leaves c unchanged + array(['aAaAaA', ' aA ', 'abBABba'], + dtype='|S7') + >>> (np.char.lstrip(c, ' ') == np.char.lstrip(c, '')).all() + ... # XXX: is this a regression? this line now returns False + ... # np.char.lstrip(c,'') does not modify c at all. + True + >>> (np.char.lstrip(c, ' ') == np.char.lstrip(c, None)).all() + True + + """ + a_arr = numpy.asarray(a) + return _vec_string(a_arr, a_arr.dtype, 'lstrip', (chars,)) + + +def _partition_dispatcher(a, sep): + return (a,) + + +@array_function_dispatch(_partition_dispatcher) +def partition(a, sep): + """ + Partition each element in `a` around `sep`. + + Calls `str.partition` element-wise. + + For each element in `a`, split the element as the first + occurrence of `sep`, and return 3 strings containing the part + before the separator, the separator itself, and the part after + the separator. If the separator is not found, return 3 strings + containing the string itself, followed by two empty strings. + + Parameters + ---------- + a : array_like, {str, unicode} + Input array + sep : {str, unicode} + Separator to split each string element in `a`. + + Returns + ------- + out : ndarray, {str, unicode} + Output array of str or unicode, depending on input type. + The output array will have an extra dimension with 3 + elements per input element. + + See also + -------- + str.partition + + """ + return _to_string_or_unicode_array( + _vec_string(a, object_, 'partition', (sep,))) + + +def _replace_dispatcher(a, old, new, count=None): + return (a,) + + +@array_function_dispatch(_replace_dispatcher) +def replace(a, old, new, count=None): + """ + For each element in `a`, return a copy of the string with all + occurrences of substring `old` replaced by `new`. + + Calls `str.replace` element-wise. + + Parameters + ---------- + a : array-like of str or unicode + + old, new : str or unicode + + count : int, optional + If the optional argument `count` is given, only the first + `count` occurrences are replaced. + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input type + + See also + -------- + str.replace + + """ + return _to_string_or_unicode_array( + _vec_string( + a, object_, 'replace', [old, new] + _clean_args(count))) + + +@array_function_dispatch(_count_dispatcher) +def rfind(a, sub, start=0, end=None): + """ + For each element in `a`, return the highest index in the string + where substring `sub` is found, such that `sub` is contained + within [`start`, `end`]. + + Calls `str.rfind` element-wise. + + Parameters + ---------- + a : array-like of str or unicode + + sub : str or unicode + + start, end : int, optional + Optional arguments `start` and `end` are interpreted as in + slice notation. + + Returns + ------- + out : ndarray + Output array of ints. Return -1 on failure. + + See also + -------- + str.rfind + + """ + return _vec_string( + a, integer, 'rfind', [sub, start] + _clean_args(end)) + + +@array_function_dispatch(_count_dispatcher) +def rindex(a, sub, start=0, end=None): + """ + Like `rfind`, but raises `ValueError` when the substring `sub` is + not found. + + Calls `str.rindex` element-wise. + + Parameters + ---------- + a : array-like of str or unicode + + sub : str or unicode + + start, end : int, optional + + Returns + ------- + out : ndarray + Output array of ints. + + See also + -------- + rfind, str.rindex + + """ + return _vec_string( + a, integer, 'rindex', [sub, start] + _clean_args(end)) + + +@array_function_dispatch(_just_dispatcher) +def rjust(a, width, fillchar=' '): + """ + Return an array with the elements of `a` right-justified in a + string of length `width`. + + Calls `str.rjust` element-wise. + + Parameters + ---------- + a : array_like of str or unicode + + width : int + The length of the resulting strings + fillchar : str or unicode, optional + The character to use for padding + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input type + + See also + -------- + str.rjust + + """ + a_arr = numpy.asarray(a) + width_arr = numpy.asarray(width) + size = long(numpy.max(width_arr.flat)) + if numpy.issubdtype(a_arr.dtype, numpy.string_): + fillchar = asbytes(fillchar) + return _vec_string( + a_arr, (a_arr.dtype.type, size), 'rjust', (width_arr, fillchar)) + + +@array_function_dispatch(_partition_dispatcher) +def rpartition(a, sep): + """ + Partition (split) each element around the right-most separator. + + Calls `str.rpartition` element-wise. + + For each element in `a`, split the element as the last + occurrence of `sep`, and return 3 strings containing the part + before the separator, the separator itself, and the part after + the separator. If the separator is not found, return 3 strings + containing the string itself, followed by two empty strings. + + Parameters + ---------- + a : array_like of str or unicode + Input array + sep : str or unicode + Right-most separator to split each element in array. + + Returns + ------- + out : ndarray + Output array of string or unicode, depending on input + type. The output array will have an extra dimension with + 3 elements per input element. + + See also + -------- + str.rpartition + + """ + return _to_string_or_unicode_array( + _vec_string(a, object_, 'rpartition', (sep,))) + + +def _split_dispatcher(a, sep=None, maxsplit=None): + return (a,) + + +@array_function_dispatch(_split_dispatcher) +def rsplit(a, sep=None, maxsplit=None): + """ + For each element in `a`, return a list of the words in the + string, using `sep` as the delimiter string. + + Calls `str.rsplit` element-wise. + + Except for splitting from the right, `rsplit` + behaves like `split`. + + Parameters + ---------- + a : array_like of str or unicode + + sep : str or unicode, optional + If `sep` is not specified or `None`, any whitespace string + is a separator. + maxsplit : int, optional + If `maxsplit` is given, at most `maxsplit` splits are done, + the rightmost ones. + + Returns + ------- + out : ndarray + Array of list objects + + See also + -------- + str.rsplit, split + + """ + # This will return an array of lists of different sizes, so we + # leave it as an object array + return _vec_string( + a, object_, 'rsplit', [sep] + _clean_args(maxsplit)) + + +def _strip_dispatcher(a, chars=None): + return (a,) + + +@array_function_dispatch(_strip_dispatcher) +def rstrip(a, chars=None): + """ + For each element in `a`, return a copy with the trailing + characters removed. + + Calls `str.rstrip` element-wise. + + Parameters + ---------- + a : array-like of str or unicode + + chars : str or unicode, optional + The `chars` argument is a string specifying the set of + characters to be removed. If omitted or None, the `chars` + argument defaults to removing whitespace. The `chars` argument + is not a suffix; rather, all combinations of its values are + stripped. + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input type + + See also + -------- + str.rstrip + + Examples + -------- + >>> c = np.array(['aAaAaA', 'abBABba'], dtype='S7'); c + array(['aAaAaA', 'abBABba'], + dtype='|S7') + >>> np.char.rstrip(c, 'a') + array(['aAaAaA', 'abBABb'], + dtype='|S7') + >>> np.char.rstrip(c, 'A') + array(['aAaAa', 'abBABba'], + dtype='|S7') + + """ + a_arr = numpy.asarray(a) + return _vec_string(a_arr, a_arr.dtype, 'rstrip', (chars,)) + + +@array_function_dispatch(_split_dispatcher) +def split(a, sep=None, maxsplit=None): + """ + For each element in `a`, return a list of the words in the + string, using `sep` as the delimiter string. + + Calls `str.split` element-wise. + + Parameters + ---------- + a : array_like of str or unicode + + sep : str or unicode, optional + If `sep` is not specified or `None`, any whitespace string is a + separator. + + maxsplit : int, optional + If `maxsplit` is given, at most `maxsplit` splits are done. + + Returns + ------- + out : ndarray + Array of list objects + + See also + -------- + str.split, rsplit + + """ + # This will return an array of lists of different sizes, so we + # leave it as an object array + return _vec_string( + a, object_, 'split', [sep] + _clean_args(maxsplit)) + + +def _splitlines_dispatcher(a, keepends=None): + return (a,) + + +@array_function_dispatch(_splitlines_dispatcher) +def splitlines(a, keepends=None): + """ + For each element in `a`, return a list of the lines in the + element, breaking at line boundaries. + + Calls `str.splitlines` element-wise. + + Parameters + ---------- + a : array_like of str or unicode + + keepends : bool, optional + Line breaks are not included in the resulting list unless + keepends is given and true. + + Returns + ------- + out : ndarray + Array of list objects + + See also + -------- + str.splitlines + + """ + return _vec_string( + a, object_, 'splitlines', _clean_args(keepends)) + + +def _startswith_dispatcher(a, prefix, start=None, end=None): + return (a,) + + +@array_function_dispatch(_startswith_dispatcher) +def startswith(a, prefix, start=0, end=None): + """ + Returns a boolean array which is `True` where the string element + in `a` starts with `prefix`, otherwise `False`. + + Calls `str.startswith` element-wise. + + Parameters + ---------- + a : array_like of str or unicode + + prefix : str + + start, end : int, optional + With optional `start`, test beginning at that position. With + optional `end`, stop comparing at that position. + + Returns + ------- + out : ndarray + Array of booleans + + See also + -------- + str.startswith + + """ + return _vec_string( + a, bool_, 'startswith', [prefix, start] + _clean_args(end)) + + +@array_function_dispatch(_strip_dispatcher) +def strip(a, chars=None): + """ + For each element in `a`, return a copy with the leading and + trailing characters removed. + + Calls `str.strip` element-wise. + + Parameters + ---------- + a : array-like of str or unicode + + chars : str or unicode, optional + The `chars` argument is a string specifying the set of + characters to be removed. If omitted or None, the `chars` + argument defaults to removing whitespace. The `chars` argument + is not a prefix or suffix; rather, all combinations of its + values are stripped. + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input type + + See also + -------- + str.strip + + Examples + -------- + >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> c + array(['aAaAaA', ' aA ', 'abBABba'], + dtype='|S7') + >>> np.char.strip(c) + array(['aAaAaA', 'aA', 'abBABba'], + dtype='|S7') + >>> np.char.strip(c, 'a') # 'a' unstripped from c[1] because whitespace leads + array(['AaAaA', ' aA ', 'bBABb'], + dtype='|S7') + >>> np.char.strip(c, 'A') # 'A' unstripped from c[1] because (unprinted) ws trails + array(['aAaAa', ' aA ', 'abBABba'], + dtype='|S7') + + """ + a_arr = numpy.asarray(a) + return _vec_string(a_arr, a_arr.dtype, 'strip', _clean_args(chars)) + + +@array_function_dispatch(_unary_op_dispatcher) +def swapcase(a): + """ + Return element-wise a copy of the string with + uppercase characters converted to lowercase and vice versa. + + Calls `str.swapcase` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like, {str, unicode} + Input array. + + Returns + ------- + out : ndarray, {str, unicode} + Output array of str or unicode, depending on input type + + See also + -------- + str.swapcase + + Examples + -------- + >>> c=np.array(['a1B c','1b Ca','b Ca1','cA1b'],'S5'); c + array(['a1B c', '1b Ca', 'b Ca1', 'cA1b'], + dtype='|S5') + >>> np.char.swapcase(c) + array(['A1b C', '1B cA', 'B cA1', 'Ca1B'], + dtype='|S5') + + """ + a_arr = numpy.asarray(a) + return _vec_string(a_arr, a_arr.dtype, 'swapcase') + + +@array_function_dispatch(_unary_op_dispatcher) +def title(a): + """ + Return element-wise title cased version of string or unicode. + + Title case words start with uppercase characters, all remaining cased + characters are lowercase. + + Calls `str.title` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like, {str, unicode} + Input array. + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input type + + See also + -------- + str.title + + Examples + -------- + >>> c=np.array(['a1b c','1b ca','b ca1','ca1b'],'S5'); c + array(['a1b c', '1b ca', 'b ca1', 'ca1b'], + dtype='|S5') + >>> np.char.title(c) + array(['A1B C', '1B Ca', 'B Ca1', 'Ca1B'], + dtype='|S5') + + """ + a_arr = numpy.asarray(a) + return _vec_string(a_arr, a_arr.dtype, 'title') + + +def _translate_dispatcher(a, table, deletechars=None): + return (a,) + + +@array_function_dispatch(_translate_dispatcher) +def translate(a, table, deletechars=None): + """ + For each element in `a`, return a copy of the string where all + characters occurring in the optional argument `deletechars` are + removed, and the remaining characters have been mapped through the + given translation table. + + Calls `str.translate` element-wise. + + Parameters + ---------- + a : array-like of str or unicode + + table : str of length 256 + + deletechars : str + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input type + + See also + -------- + str.translate + + """ + a_arr = numpy.asarray(a) + if issubclass(a_arr.dtype.type, unicode_): + return _vec_string( + a_arr, a_arr.dtype, 'translate', (table,)) + else: + return _vec_string( + a_arr, a_arr.dtype, 'translate', [table] + _clean_args(deletechars)) + + +@array_function_dispatch(_unary_op_dispatcher) +def upper(a): + """ + Return an array with the elements converted to uppercase. + + Calls `str.upper` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like, {str, unicode} + Input array. + + Returns + ------- + out : ndarray, {str, unicode} + Output array of str or unicode, depending on input type + + See also + -------- + str.upper + + Examples + -------- + >>> c = np.array(['a1b c', '1bca', 'bca1']); c + array(['a1b c', '1bca', 'bca1'], + dtype='|S5') + >>> np.char.upper(c) + array(['A1B C', '1BCA', 'BCA1'], + dtype='|S5') + + """ + a_arr = numpy.asarray(a) + return _vec_string(a_arr, a_arr.dtype, 'upper') + + +def _zfill_dispatcher(a, width): + return (a,) + + +@array_function_dispatch(_zfill_dispatcher) +def zfill(a, width): + """ + Return the numeric string left-filled with zeros + + Calls `str.zfill` element-wise. + + Parameters + ---------- + a : array_like, {str, unicode} + Input array. + width : int + Width of string to left-fill elements in `a`. + + Returns + ------- + out : ndarray, {str, unicode} + Output array of str or unicode, depending on input type + + See also + -------- + str.zfill + + """ + a_arr = numpy.asarray(a) + width_arr = numpy.asarray(width) + size = long(numpy.max(width_arr.flat)) + return _vec_string( + a_arr, (a_arr.dtype.type, size), 'zfill', (width_arr,)) + + +@array_function_dispatch(_unary_op_dispatcher) +def isnumeric(a): + """ + For each element, return True if there are only numeric + characters in the element. + + Calls `unicode.isnumeric` element-wise. + + Numeric characters include digit characters, and all characters + that have the Unicode numeric value property, e.g. ``U+2155, + VULGAR FRACTION ONE FIFTH``. + + Parameters + ---------- + a : array_like, unicode + Input array. + + Returns + ------- + out : ndarray, bool + Array of booleans of same shape as `a`. + + See also + -------- + unicode.isnumeric + + """ + if _use_unicode(a) != unicode_: + raise TypeError("isnumeric is only available for Unicode strings and arrays") + return _vec_string(a, bool_, 'isnumeric') + + +@array_function_dispatch(_unary_op_dispatcher) +def isdecimal(a): + """ + For each element, return True if there are only decimal + characters in the element. + + Calls `unicode.isdecimal` element-wise. + + Decimal characters include digit characters, and all characters + that that can be used to form decimal-radix numbers, + e.g. ``U+0660, ARABIC-INDIC DIGIT ZERO``. + + Parameters + ---------- + a : array_like, unicode + Input array. + + Returns + ------- + out : ndarray, bool + Array of booleans identical in shape to `a`. + + See also + -------- + unicode.isdecimal + + """ + if _use_unicode(a) != unicode_: + raise TypeError("isnumeric is only available for Unicode strings and arrays") + return _vec_string(a, bool_, 'isdecimal') + + +@set_module('numpy') +class chararray(ndarray): + """ + chararray(shape, itemsize=1, unicode=False, buffer=None, offset=0, + strides=None, order=None) + + Provides a convenient view on arrays of string and unicode values. + + .. note:: + The `chararray` class exists for backwards compatibility with + Numarray, it is not recommended for new development. Starting from numpy + 1.4, if one needs arrays of strings, it is recommended to use arrays of + `dtype` `object_`, `string_` or `unicode_`, and use the free functions + in the `numpy.char` module for fast vectorized string operations. + + Versus a regular NumPy array of type `str` or `unicode`, this + class adds the following functionality: + + 1) values automatically have whitespace removed from the end + when indexed + + 2) comparison operators automatically remove whitespace from the + end when comparing values + + 3) vectorized string operations are provided as methods + (e.g. `.endswith`) and infix operators (e.g. ``"+", "*", "%"``) + + chararrays should be created using `numpy.char.array` or + `numpy.char.asarray`, rather than this constructor directly. + + This constructor creates the array, using `buffer` (with `offset` + and `strides`) if it is not ``None``. If `buffer` is ``None``, then + constructs a new array with `strides` in "C order", unless both + ``len(shape) >= 2`` and ``order='Fortran'``, in which case `strides` + is in "Fortran order". + + Methods + ------- + astype + argsort + copy + count + decode + dump + dumps + encode + endswith + expandtabs + fill + find + flatten + getfield + index + isalnum + isalpha + isdecimal + isdigit + islower + isnumeric + isspace + istitle + isupper + item + join + ljust + lower + lstrip + nonzero + put + ravel + repeat + replace + reshape + resize + rfind + rindex + rjust + rsplit + rstrip + searchsorted + setfield + setflags + sort + split + splitlines + squeeze + startswith + strip + swapaxes + swapcase + take + title + tofile + tolist + tostring + translate + transpose + upper + view + zfill + + Parameters + ---------- + shape : tuple + Shape of the array. + itemsize : int, optional + Length of each array element, in number of characters. Default is 1. + unicode : bool, optional + Are the array elements of type unicode (True) or string (False). + Default is False. + buffer : int, optional + Memory address of the start of the array data. Default is None, + in which case a new array is created. + offset : int, optional + Fixed stride displacement from the beginning of an axis? + Default is 0. Needs to be >=0. + strides : array_like of ints, optional + Strides for the array (see `ndarray.strides` for full description). + Default is None. + order : {'C', 'F'}, optional + The order in which the array data is stored in memory: 'C' -> + "row major" order (the default), 'F' -> "column major" + (Fortran) order. + + Examples + -------- + >>> charar = np.chararray((3, 3)) + >>> charar[:] = 'a' + >>> charar + chararray([['a', 'a', 'a'], + ['a', 'a', 'a'], + ['a', 'a', 'a']], + dtype='|S1') + + >>> charar = np.chararray(charar.shape, itemsize=5) + >>> charar[:] = 'abc' + >>> charar + chararray([['abc', 'abc', 'abc'], + ['abc', 'abc', 'abc'], + ['abc', 'abc', 'abc']], + dtype='|S5') + + """ + def __new__(subtype, shape, itemsize=1, unicode=False, buffer=None, + offset=0, strides=None, order='C'): + global _globalvar + + if unicode: + dtype = unicode_ + else: + dtype = string_ + + # force itemsize to be a Python long, since using NumPy integer + # types results in itemsize.itemsize being used as the size of + # strings in the new array. + itemsize = long(itemsize) + + if sys.version_info[0] >= 3 and isinstance(buffer, _unicode): + # On Py3, unicode objects do not have the buffer interface + filler = buffer + buffer = None + else: + filler = None + + _globalvar = 1 + if buffer is None: + self = ndarray.__new__(subtype, shape, (dtype, itemsize), + order=order) + else: + self = ndarray.__new__(subtype, shape, (dtype, itemsize), + buffer=buffer, + offset=offset, strides=strides, + order=order) + if filler is not None: + self[...] = filler + _globalvar = 0 + return self + + def __array_finalize__(self, obj): + # The b is a special case because it is used for reconstructing. + if not _globalvar and self.dtype.char not in 'SUbc': + raise ValueError("Can only create a chararray from string data.") + + def __getitem__(self, obj): + val = ndarray.__getitem__(self, obj) + + if isinstance(val, character): + temp = val.rstrip() + if _len(temp) == 0: + val = '' + else: + val = temp + + return val + + # IMPLEMENTATION NOTE: Most of the methods of this class are + # direct delegations to the free functions in this module. + # However, those that return an array of strings should instead + # return a chararray, so some extra wrapping is required. + + def __eq__(self, other): + """ + Return (self == other) element-wise. + + See also + -------- + equal + """ + return equal(self, other) + + def __ne__(self, other): + """ + Return (self != other) element-wise. + + See also + -------- + not_equal + """ + return not_equal(self, other) + + def __ge__(self, other): + """ + Return (self >= other) element-wise. + + See also + -------- + greater_equal + """ + return greater_equal(self, other) + + def __le__(self, other): + """ + Return (self <= other) element-wise. + + See also + -------- + less_equal + """ + return less_equal(self, other) + + def __gt__(self, other): + """ + Return (self > other) element-wise. + + See also + -------- + greater + """ + return greater(self, other) + + def __lt__(self, other): + """ + Return (self < other) element-wise. + + See also + -------- + less + """ + return less(self, other) + + def __add__(self, other): + """ + Return (self + other), that is string concatenation, + element-wise for a pair of array_likes of str or unicode. + + See also + -------- + add + """ + return asarray(add(self, other)) + + def __radd__(self, other): + """ + Return (other + self), that is string concatenation, + element-wise for a pair of array_likes of `string_` or `unicode_`. + + See also + -------- + add + """ + return asarray(add(numpy.asarray(other), self)) + + def __mul__(self, i): + """ + Return (self * i), that is string multiple concatenation, + element-wise. + + See also + -------- + multiply + """ + return asarray(multiply(self, i)) + + def __rmul__(self, i): + """ + Return (self * i), that is string multiple concatenation, + element-wise. + + See also + -------- + multiply + """ + return asarray(multiply(self, i)) + + def __mod__(self, i): + """ + Return (self % i), that is pre-Python 2.6 string formatting + (iterpolation), element-wise for a pair of array_likes of `string_` + or `unicode_`. + + See also + -------- + mod + """ + return asarray(mod(self, i)) + + def __rmod__(self, other): + return NotImplemented + + def argsort(self, axis=-1, kind='quicksort', order=None): + """ + Return the indices that sort the array lexicographically. + + For full documentation see `numpy.argsort`, for which this method is + in fact merely a "thin wrapper." + + Examples + -------- + >>> c = np.array(['a1b c', '1b ca', 'b ca1', 'Ca1b'], 'S5') + >>> c = c.view(np.chararray); c + chararray(['a1b c', '1b ca', 'b ca1', 'Ca1b'], + dtype='|S5') + >>> c[c.argsort()] + chararray(['1b ca', 'Ca1b', 'a1b c', 'b ca1'], + dtype='|S5') + + """ + return self.__array__().argsort(axis, kind, order) + argsort.__doc__ = ndarray.argsort.__doc__ + + def capitalize(self): + """ + Return a copy of `self` with only the first character of each element + capitalized. + + See also + -------- + char.capitalize + + """ + return asarray(capitalize(self)) + + def center(self, width, fillchar=' '): + """ + Return a copy of `self` with its elements centered in a + string of length `width`. + + See also + -------- + center + """ + return asarray(center(self, width, fillchar)) + + def count(self, sub, start=0, end=None): + """ + Returns an array with the number of non-overlapping occurrences of + substring `sub` in the range [`start`, `end`]. + + See also + -------- + char.count + + """ + return count(self, sub, start, end) + + def decode(self, encoding=None, errors=None): + """ + Calls `str.decode` element-wise. + + See also + -------- + char.decode + + """ + return decode(self, encoding, errors) + + def encode(self, encoding=None, errors=None): + """ + Calls `str.encode` element-wise. + + See also + -------- + char.encode + + """ + return encode(self, encoding, errors) + + def endswith(self, suffix, start=0, end=None): + """ + Returns a boolean array which is `True` where the string element + in `self` ends with `suffix`, otherwise `False`. + + See also + -------- + char.endswith + + """ + return endswith(self, suffix, start, end) + + def expandtabs(self, tabsize=8): + """ + Return a copy of each string element where all tab characters are + replaced by one or more spaces. + + See also + -------- + char.expandtabs + + """ + return asarray(expandtabs(self, tabsize)) + + def find(self, sub, start=0, end=None): + """ + For each element, return the lowest index in the string where + substring `sub` is found. + + See also + -------- + char.find + + """ + return find(self, sub, start, end) + + def index(self, sub, start=0, end=None): + """ + Like `find`, but raises `ValueError` when the substring is not found. + + See also + -------- + char.index + + """ + return index(self, sub, start, end) + + def isalnum(self): + """ + Returns true for each element if all characters in the string + are alphanumeric and there is at least one character, false + otherwise. + + See also + -------- + char.isalnum + + """ + return isalnum(self) + + def isalpha(self): + """ + Returns true for each element if all characters in the string + are alphabetic and there is at least one character, false + otherwise. + + See also + -------- + char.isalpha + + """ + return isalpha(self) + + def isdigit(self): + """ + Returns true for each element if all characters in the string are + digits and there is at least one character, false otherwise. + + See also + -------- + char.isdigit + + """ + return isdigit(self) + + def islower(self): + """ + Returns true for each element if all cased characters in the + string are lowercase and there is at least one cased character, + false otherwise. + + See also + -------- + char.islower + + """ + return islower(self) + + def isspace(self): + """ + Returns true for each element if there are only whitespace + characters in the string and there is at least one character, + false otherwise. + + See also + -------- + char.isspace + + """ + return isspace(self) + + def istitle(self): + """ + Returns true for each element if the element is a titlecased + string and there is at least one character, false otherwise. + + See also + -------- + char.istitle + + """ + return istitle(self) + + def isupper(self): + """ + Returns true for each element if all cased characters in the + string are uppercase and there is at least one character, false + otherwise. + + See also + -------- + char.isupper + + """ + return isupper(self) + + def join(self, seq): + """ + Return a string which is the concatenation of the strings in the + sequence `seq`. + + See also + -------- + char.join + + """ + return join(self, seq) + + def ljust(self, width, fillchar=' '): + """ + Return an array with the elements of `self` left-justified in a + string of length `width`. + + See also + -------- + char.ljust + + """ + return asarray(ljust(self, width, fillchar)) + + def lower(self): + """ + Return an array with the elements of `self` converted to + lowercase. + + See also + -------- + char.lower + + """ + return asarray(lower(self)) + + def lstrip(self, chars=None): + """ + For each element in `self`, return a copy with the leading characters + removed. + + See also + -------- + char.lstrip + + """ + return asarray(lstrip(self, chars)) + + def partition(self, sep): + """ + Partition each element in `self` around `sep`. + + See also + -------- + partition + """ + return asarray(partition(self, sep)) + + def replace(self, old, new, count=None): + """ + For each element in `self`, return a copy of the string with all + occurrences of substring `old` replaced by `new`. + + See also + -------- + char.replace + + """ + return asarray(replace(self, old, new, count)) + + def rfind(self, sub, start=0, end=None): + """ + For each element in `self`, return the highest index in the string + where substring `sub` is found, such that `sub` is contained + within [`start`, `end`]. + + See also + -------- + char.rfind + + """ + return rfind(self, sub, start, end) + + def rindex(self, sub, start=0, end=None): + """ + Like `rfind`, but raises `ValueError` when the substring `sub` is + not found. + + See also + -------- + char.rindex + + """ + return rindex(self, sub, start, end) + + def rjust(self, width, fillchar=' '): + """ + Return an array with the elements of `self` + right-justified in a string of length `width`. + + See also + -------- + char.rjust + + """ + return asarray(rjust(self, width, fillchar)) + + def rpartition(self, sep): + """ + Partition each element in `self` around `sep`. + + See also + -------- + rpartition + """ + return asarray(rpartition(self, sep)) + + def rsplit(self, sep=None, maxsplit=None): + """ + For each element in `self`, return a list of the words in + the string, using `sep` as the delimiter string. + + See also + -------- + char.rsplit + + """ + return rsplit(self, sep, maxsplit) + + def rstrip(self, chars=None): + """ + For each element in `self`, return a copy with the trailing + characters removed. + + See also + -------- + char.rstrip + + """ + return asarray(rstrip(self, chars)) + + def split(self, sep=None, maxsplit=None): + """ + For each element in `self`, return a list of the words in the + string, using `sep` as the delimiter string. + + See also + -------- + char.split + + """ + return split(self, sep, maxsplit) + + def splitlines(self, keepends=None): + """ + For each element in `self`, return a list of the lines in the + element, breaking at line boundaries. + + See also + -------- + char.splitlines + + """ + return splitlines(self, keepends) + + def startswith(self, prefix, start=0, end=None): + """ + Returns a boolean array which is `True` where the string element + in `self` starts with `prefix`, otherwise `False`. + + See also + -------- + char.startswith + + """ + return startswith(self, prefix, start, end) + + def strip(self, chars=None): + """ + For each element in `self`, return a copy with the leading and + trailing characters removed. + + See also + -------- + char.strip + + """ + return asarray(strip(self, chars)) + + def swapcase(self): + """ + For each element in `self`, return a copy of the string with + uppercase characters converted to lowercase and vice versa. + + See also + -------- + char.swapcase + + """ + return asarray(swapcase(self)) + + def title(self): + """ + For each element in `self`, return a titlecased version of the + string: words start with uppercase characters, all remaining cased + characters are lowercase. + + See also + -------- + char.title + + """ + return asarray(title(self)) + + def translate(self, table, deletechars=None): + """ + For each element in `self`, return a copy of the string where + all characters occurring in the optional argument + `deletechars` are removed, and the remaining characters have + been mapped through the given translation table. + + See also + -------- + char.translate + + """ + return asarray(translate(self, table, deletechars)) + + def upper(self): + """ + Return an array with the elements of `self` converted to + uppercase. + + See also + -------- + char.upper + + """ + return asarray(upper(self)) + + def zfill(self, width): + """ + Return the numeric string left-filled with zeros in a string of + length `width`. + + See also + -------- + char.zfill + + """ + return asarray(zfill(self, width)) + + def isnumeric(self): + """ + For each element in `self`, return True if there are only + numeric characters in the element. + + See also + -------- + char.isnumeric + + """ + return isnumeric(self) + + def isdecimal(self): + """ + For each element in `self`, return True if there are only + decimal characters in the element. + + See also + -------- + char.isdecimal + + """ + return isdecimal(self) + + +def array(obj, itemsize=None, copy=True, unicode=None, order=None): + """ + Create a `chararray`. + + .. note:: + This class is provided for numarray backward-compatibility. + New code (not concerned with numarray compatibility) should use + arrays of type `string_` or `unicode_` and use the free functions + in :mod:`numpy.char ` for fast + vectorized string operations instead. + + Versus a regular NumPy array of type `str` or `unicode`, this + class adds the following functionality: + + 1) values automatically have whitespace removed from the end + when indexed + + 2) comparison operators automatically remove whitespace from the + end when comparing values + + 3) vectorized string operations are provided as methods + (e.g. `str.endswith`) and infix operators (e.g. ``+, *, %``) + + Parameters + ---------- + obj : array of str or unicode-like + + itemsize : int, optional + `itemsize` is the number of characters per scalar in the + resulting array. If `itemsize` is None, and `obj` is an + object array or a Python list, the `itemsize` will be + automatically determined. If `itemsize` is provided and `obj` + is of type str or unicode, then the `obj` string will be + chunked into `itemsize` pieces. + + copy : bool, optional + If true (default), then the object is copied. Otherwise, a copy + will only be made if __array__ returns a copy, if obj is a + nested sequence, or if a copy is needed to satisfy any of the other + requirements (`itemsize`, unicode, `order`, etc.). + + unicode : bool, optional + When true, the resulting `chararray` can contain Unicode + characters, when false only 8-bit characters. If unicode is + `None` and `obj` is one of the following: + + - a `chararray`, + - an ndarray of type `str` or `unicode` + - a Python str or unicode object, + + then the unicode setting of the output array will be + automatically determined. + + order : {'C', 'F', 'A'}, optional + Specify the order of the array. If order is 'C' (default), then the + array will be in C-contiguous order (last-index varies the + fastest). If order is 'F', then the returned array + will be in Fortran-contiguous order (first-index varies the + fastest). If order is 'A', then the returned array may + be in any order (either C-, Fortran-contiguous, or even + discontiguous). + """ + if isinstance(obj, (_bytes, _unicode)): + if unicode is None: + if isinstance(obj, _unicode): + unicode = True + else: + unicode = False + + if itemsize is None: + itemsize = _len(obj) + shape = _len(obj) // itemsize + + if unicode: + if sys.maxunicode == 0xffff: + # On a narrow Python build, the buffer for Unicode + # strings is UCS2, which doesn't match the buffer for + # NumPy Unicode types, which is ALWAYS UCS4. + # Therefore, we need to convert the buffer. On Python + # 2.6 and later, we can use the utf_32 codec. Earlier + # versions don't have that codec, so we convert to a + # numerical array that matches the input buffer, and + # then use NumPy to convert it to UCS4. All of this + # should happen in native endianness. + obj = obj.encode('utf_32') + else: + obj = _unicode(obj) + else: + # Let the default Unicode -> string encoding (if any) take + # precedence. + obj = _bytes(obj) + + return chararray(shape, itemsize=itemsize, unicode=unicode, + buffer=obj, order=order) + + if isinstance(obj, (list, tuple)): + obj = numpy.asarray(obj) + + if isinstance(obj, ndarray) and issubclass(obj.dtype.type, character): + # If we just have a vanilla chararray, create a chararray + # view around it. + if not isinstance(obj, chararray): + obj = obj.view(chararray) + + if itemsize is None: + itemsize = obj.itemsize + # itemsize is in 8-bit chars, so for Unicode, we need + # to divide by the size of a single Unicode character, + # which for NumPy is always 4 + if issubclass(obj.dtype.type, unicode_): + itemsize //= 4 + + if unicode is None: + if issubclass(obj.dtype.type, unicode_): + unicode = True + else: + unicode = False + + if unicode: + dtype = unicode_ + else: + dtype = string_ + + if order is not None: + obj = numpy.asarray(obj, order=order) + if (copy or + (itemsize != obj.itemsize) or + (not unicode and isinstance(obj, unicode_)) or + (unicode and isinstance(obj, string_))): + obj = obj.astype((dtype, long(itemsize))) + return obj + + if isinstance(obj, ndarray) and issubclass(obj.dtype.type, object): + if itemsize is None: + # Since no itemsize was specified, convert the input array to + # a list so the ndarray constructor will automatically + # determine the itemsize for us. + obj = obj.tolist() + # Fall through to the default case + + if unicode: + dtype = unicode_ + else: + dtype = string_ + + if itemsize is None: + val = narray(obj, dtype=dtype, order=order, subok=True) + else: + val = narray(obj, dtype=(dtype, itemsize), order=order, subok=True) + return val.view(chararray) + + +def asarray(obj, itemsize=None, unicode=None, order=None): + """ + Convert the input to a `chararray`, copying the data only if + necessary. + + Versus a regular NumPy array of type `str` or `unicode`, this + class adds the following functionality: + + 1) values automatically have whitespace removed from the end + when indexed + + 2) comparison operators automatically remove whitespace from the + end when comparing values + + 3) vectorized string operations are provided as methods + (e.g. `str.endswith`) and infix operators (e.g. ``+``, ``*``,``%``) + + Parameters + ---------- + obj : array of str or unicode-like + + itemsize : int, optional + `itemsize` is the number of characters per scalar in the + resulting array. If `itemsize` is None, and `obj` is an + object array or a Python list, the `itemsize` will be + automatically determined. If `itemsize` is provided and `obj` + is of type str or unicode, then the `obj` string will be + chunked into `itemsize` pieces. + + unicode : bool, optional + When true, the resulting `chararray` can contain Unicode + characters, when false only 8-bit characters. If unicode is + `None` and `obj` is one of the following: + + - a `chararray`, + - an ndarray of type `str` or 'unicode` + - a Python str or unicode object, + + then the unicode setting of the output array will be + automatically determined. + + order : {'C', 'F'}, optional + Specify the order of the array. If order is 'C' (default), then the + array will be in C-contiguous order (last-index varies the + fastest). If order is 'F', then the returned array + will be in Fortran-contiguous order (first-index varies the + fastest). + """ + return array(obj, itemsize, copy=False, + unicode=unicode, order=order) diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/defchararray.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/defchararray.pyc new file mode 100644 index 0000000..b1dcebe Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/defchararray.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/einsumfunc.py b/project/venv/lib/python2.7/site-packages/numpy/core/einsumfunc.py new file mode 100644 index 0000000..c4fc77e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/einsumfunc.py @@ -0,0 +1,1422 @@ +""" +Implementation of optimized einsum. + +""" +from __future__ import division, absolute_import, print_function + +import itertools + +from numpy.compat import basestring +from numpy.core.multiarray import c_einsum +from numpy.core.numeric import asanyarray, tensordot +from numpy.core.overrides import array_function_dispatch + +__all__ = ['einsum', 'einsum_path'] + +einsum_symbols = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' +einsum_symbols_set = set(einsum_symbols) + + +def _flop_count(idx_contraction, inner, num_terms, size_dictionary): + """ + Computes the number of FLOPS in the contraction. + + Parameters + ---------- + idx_contraction : iterable + The indices involved in the contraction + inner : bool + Does this contraction require an inner product? + num_terms : int + The number of terms in a contraction + size_dictionary : dict + The size of each of the indices in idx_contraction + + Returns + ------- + flop_count : int + The total number of FLOPS required for the contraction. + + Examples + -------- + + >>> _flop_count('abc', False, 1, {'a': 2, 'b':3, 'c':5}) + 90 + + >>> _flop_count('abc', True, 2, {'a': 2, 'b':3, 'c':5}) + 270 + + """ + + overall_size = _compute_size_by_dict(idx_contraction, size_dictionary) + op_factor = max(1, num_terms - 1) + if inner: + op_factor += 1 + + return overall_size * op_factor + +def _compute_size_by_dict(indices, idx_dict): + """ + Computes the product of the elements in indices based on the dictionary + idx_dict. + + Parameters + ---------- + indices : iterable + Indices to base the product on. + idx_dict : dictionary + Dictionary of index sizes + + Returns + ------- + ret : int + The resulting product. + + Examples + -------- + >>> _compute_size_by_dict('abbc', {'a': 2, 'b':3, 'c':5}) + 90 + + """ + ret = 1 + for i in indices: + ret *= idx_dict[i] + return ret + + +def _find_contraction(positions, input_sets, output_set): + """ + Finds the contraction for a given set of input and output sets. + + Parameters + ---------- + positions : iterable + Integer positions of terms used in the contraction. + input_sets : list + List of sets that represent the lhs side of the einsum subscript + output_set : set + Set that represents the rhs side of the overall einsum subscript + + Returns + ------- + new_result : set + The indices of the resulting contraction + remaining : list + List of sets that have not been contracted, the new set is appended to + the end of this list + idx_removed : set + Indices removed from the entire contraction + idx_contraction : set + The indices used in the current contraction + + Examples + -------- + + # A simple dot product test case + >>> pos = (0, 1) + >>> isets = [set('ab'), set('bc')] + >>> oset = set('ac') + >>> _find_contraction(pos, isets, oset) + ({'a', 'c'}, [{'a', 'c'}], {'b'}, {'a', 'b', 'c'}) + + # A more complex case with additional terms in the contraction + >>> pos = (0, 2) + >>> isets = [set('abd'), set('ac'), set('bdc')] + >>> oset = set('ac') + >>> _find_contraction(pos, isets, oset) + ({'a', 'c'}, [{'a', 'c'}, {'a', 'c'}], {'b', 'd'}, {'a', 'b', 'c', 'd'}) + """ + + idx_contract = set() + idx_remain = output_set.copy() + remaining = [] + for ind, value in enumerate(input_sets): + if ind in positions: + idx_contract |= value + else: + remaining.append(value) + idx_remain |= value + + new_result = idx_remain & idx_contract + idx_removed = (idx_contract - new_result) + remaining.append(new_result) + + return (new_result, remaining, idx_removed, idx_contract) + + +def _optimal_path(input_sets, output_set, idx_dict, memory_limit): + """ + Computes all possible pair contractions, sieves the results based + on ``memory_limit`` and returns the lowest cost path. This algorithm + scales factorial with respect to the elements in the list ``input_sets``. + + Parameters + ---------- + input_sets : list + List of sets that represent the lhs side of the einsum subscript + output_set : set + Set that represents the rhs side of the overall einsum subscript + idx_dict : dictionary + Dictionary of index sizes + memory_limit : int + The maximum number of elements in a temporary array + + Returns + ------- + path : list + The optimal contraction order within the memory limit constraint. + + Examples + -------- + >>> isets = [set('abd'), set('ac'), set('bdc')] + >>> oset = set() + >>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4} + >>> _path__optimal_path(isets, oset, idx_sizes, 5000) + [(0, 2), (0, 1)] + """ + + full_results = [(0, [], input_sets)] + for iteration in range(len(input_sets) - 1): + iter_results = [] + + # Compute all unique pairs + for curr in full_results: + cost, positions, remaining = curr + for con in itertools.combinations(range(len(input_sets) - iteration), 2): + + # Find the contraction + cont = _find_contraction(con, remaining, output_set) + new_result, new_input_sets, idx_removed, idx_contract = cont + + # Sieve the results based on memory_limit + new_size = _compute_size_by_dict(new_result, idx_dict) + if new_size > memory_limit: + continue + + # Build (total_cost, positions, indices_remaining) + total_cost = cost + _flop_count(idx_contract, idx_removed, len(con), idx_dict) + new_pos = positions + [con] + iter_results.append((total_cost, new_pos, new_input_sets)) + + # Update combinatorial list, if we did not find anything return best + # path + remaining contractions + if iter_results: + full_results = iter_results + else: + path = min(full_results, key=lambda x: x[0])[1] + path += [tuple(range(len(input_sets) - iteration))] + return path + + # If we have not found anything return single einsum contraction + if len(full_results) == 0: + return [tuple(range(len(input_sets)))] + + path = min(full_results, key=lambda x: x[0])[1] + return path + +def _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, path_cost, naive_cost): + """Compute the cost (removed size + flops) and resultant indices for + performing the contraction specified by ``positions``. + + Parameters + ---------- + positions : tuple of int + The locations of the proposed tensors to contract. + input_sets : list of sets + The indices found on each tensors. + output_set : set + The output indices of the expression. + idx_dict : dict + Mapping of each index to its size. + memory_limit : int + The total allowed size for an intermediary tensor. + path_cost : int + The contraction cost so far. + naive_cost : int + The cost of the unoptimized expression. + + Returns + ------- + cost : (int, int) + A tuple containing the size of any indices removed, and the flop cost. + positions : tuple of int + The locations of the proposed tensors to contract. + new_input_sets : list of sets + The resulting new list of indices if this proposed contraction is performed. + + """ + + # Find the contraction + contract = _find_contraction(positions, input_sets, output_set) + idx_result, new_input_sets, idx_removed, idx_contract = contract + + # Sieve the results based on memory_limit + new_size = _compute_size_by_dict(idx_result, idx_dict) + if new_size > memory_limit: + return None + + # Build sort tuple + old_sizes = (_compute_size_by_dict(input_sets[p], idx_dict) for p in positions) + removed_size = sum(old_sizes) - new_size + + # NB: removed_size used to be just the size of any removed indices i.e.: + # helpers.compute_size_by_dict(idx_removed, idx_dict) + cost = _flop_count(idx_contract, idx_removed, len(positions), idx_dict) + sort = (-removed_size, cost) + + # Sieve based on total cost as well + if (path_cost + cost) > naive_cost: + return None + + # Add contraction to possible choices + return [sort, positions, new_input_sets] + + +def _update_other_results(results, best): + """Update the positions and provisional input_sets of ``results`` based on + performing the contraction result ``best``. Remove any involving the tensors + contracted. + + Parameters + ---------- + results : list + List of contraction results produced by ``_parse_possible_contraction``. + best : list + The best contraction of ``results`` i.e. the one that will be performed. + + Returns + ------- + mod_results : list + The list of modifed results, updated with outcome of ``best`` contraction. + """ + + best_con = best[1] + bx, by = best_con + mod_results = [] + + for cost, (x, y), con_sets in results: + + # Ignore results involving tensors just contracted + if x in best_con or y in best_con: + continue + + # Update the input_sets + del con_sets[by - int(by > x) - int(by > y)] + del con_sets[bx - int(bx > x) - int(bx > y)] + con_sets.insert(-1, best[2][-1]) + + # Update the position indices + mod_con = x - int(x > bx) - int(x > by), y - int(y > bx) - int(y > by) + mod_results.append((cost, mod_con, con_sets)) + + return mod_results + +def _greedy_path(input_sets, output_set, idx_dict, memory_limit): + """ + Finds the path by contracting the best pair until the input list is + exhausted. The best pair is found by minimizing the tuple + ``(-prod(indices_removed), cost)``. What this amounts to is prioritizing + matrix multiplication or inner product operations, then Hadamard like + operations, and finally outer operations. Outer products are limited by + ``memory_limit``. This algorithm scales cubically with respect to the + number of elements in the list ``input_sets``. + + Parameters + ---------- + input_sets : list + List of sets that represent the lhs side of the einsum subscript + output_set : set + Set that represents the rhs side of the overall einsum subscript + idx_dict : dictionary + Dictionary of index sizes + memory_limit_limit : int + The maximum number of elements in a temporary array + + Returns + ------- + path : list + The greedy contraction order within the memory limit constraint. + + Examples + -------- + >>> isets = [set('abd'), set('ac'), set('bdc')] + >>> oset = set() + >>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4} + >>> _path__greedy_path(isets, oset, idx_sizes, 5000) + [(0, 2), (0, 1)] + """ + + # Handle trivial cases that leaked through + if len(input_sets) == 1: + return [(0,)] + elif len(input_sets) == 2: + return [(0, 1)] + + # Build up a naive cost + contract = _find_contraction(range(len(input_sets)), input_sets, output_set) + idx_result, new_input_sets, idx_removed, idx_contract = contract + naive_cost = _flop_count(idx_contract, idx_removed, len(input_sets), idx_dict) + + # Initially iterate over all pairs + comb_iter = itertools.combinations(range(len(input_sets)), 2) + known_contractions = [] + + path_cost = 0 + path = [] + + for iteration in range(len(input_sets) - 1): + + # Iterate over all pairs on first step, only previously found pairs on subsequent steps + for positions in comb_iter: + + # Always initially ignore outer products + if input_sets[positions[0]].isdisjoint(input_sets[positions[1]]): + continue + + result = _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, path_cost, + naive_cost) + if result is not None: + known_contractions.append(result) + + # If we do not have a inner contraction, rescan pairs including outer products + if len(known_contractions) == 0: + + # Then check the outer products + for positions in itertools.combinations(range(len(input_sets)), 2): + result = _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, + path_cost, naive_cost) + if result is not None: + known_contractions.append(result) + + # If we still did not find any remaining contractions, default back to einsum like behavior + if len(known_contractions) == 0: + path.append(tuple(range(len(input_sets)))) + break + + # Sort based on first index + best = min(known_contractions, key=lambda x: x[0]) + + # Now propagate as many unused contractions as possible to next iteration + known_contractions = _update_other_results(known_contractions, best) + + # Next iteration only compute contractions with the new tensor + # All other contractions have been accounted for + input_sets = best[2] + new_tensor_pos = len(input_sets) - 1 + comb_iter = ((i, new_tensor_pos) for i in range(new_tensor_pos)) + + # Update path and total cost + path.append(best[1]) + path_cost += best[0][1] + + return path + + +def _can_dot(inputs, result, idx_removed): + """ + Checks if we can use BLAS (np.tensordot) call and its beneficial to do so. + + Parameters + ---------- + inputs : list of str + Specifies the subscripts for summation. + result : str + Resulting summation. + idx_removed : set + Indices that are removed in the summation + + + Returns + ------- + type : bool + Returns true if BLAS should and can be used, else False + + Notes + ----- + If the operations is BLAS level 1 or 2 and is not already aligned + we default back to einsum as the memory movement to copy is more + costly than the operation itself. + + + Examples + -------- + + # Standard GEMM operation + >>> _can_dot(['ij', 'jk'], 'ik', set('j')) + True + + # Can use the standard BLAS, but requires odd data movement + >>> _can_dot(['ijj', 'jk'], 'ik', set('j')) + False + + # DDOT where the memory is not aligned + >>> _can_dot(['ijk', 'ikj'], '', set('ijk')) + False + + """ + + # All `dot` calls remove indices + if len(idx_removed) == 0: + return False + + # BLAS can only handle two operands + if len(inputs) != 2: + return False + + input_left, input_right = inputs + + for c in set(input_left + input_right): + # can't deal with repeated indices on same input or more than 2 total + nl, nr = input_left.count(c), input_right.count(c) + if (nl > 1) or (nr > 1) or (nl + nr > 2): + return False + + # can't do implicit summation or dimension collapse e.g. + # "ab,bc->c" (implicitly sum over 'a') + # "ab,ca->ca" (take diagonal of 'a') + if nl + nr - 1 == int(c in result): + return False + + # Build a few temporaries + set_left = set(input_left) + set_right = set(input_right) + keep_left = set_left - idx_removed + keep_right = set_right - idx_removed + rs = len(idx_removed) + + # At this point we are a DOT, GEMV, or GEMM operation + + # Handle inner products + + # DDOT with aligned data + if input_left == input_right: + return True + + # DDOT without aligned data (better to use einsum) + if set_left == set_right: + return False + + # Handle the 4 possible (aligned) GEMV or GEMM cases + + # GEMM or GEMV no transpose + if input_left[-rs:] == input_right[:rs]: + return True + + # GEMM or GEMV transpose both + if input_left[:rs] == input_right[-rs:]: + return True + + # GEMM or GEMV transpose right + if input_left[-rs:] == input_right[-rs:]: + return True + + # GEMM or GEMV transpose left + if input_left[:rs] == input_right[:rs]: + return True + + # Einsum is faster than GEMV if we have to copy data + if not keep_left or not keep_right: + return False + + # We are a matrix-matrix product, but we need to copy data + return True + + +def _parse_einsum_input(operands): + """ + A reproduction of einsum c side einsum parsing in python. + + Returns + ------- + input_strings : str + Parsed input strings + output_string : str + Parsed output string + operands : list of array_like + The operands to use in the numpy contraction + + Examples + -------- + The operand list is simplified to reduce printing: + + >>> a = np.random.rand(4, 4) + >>> b = np.random.rand(4, 4, 4) + >>> __parse_einsum_input(('...a,...a->...', a, b)) + ('za,xza', 'xz', [a, b]) + + >>> __parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0])) + ('za,xza', 'xz', [a, b]) + """ + + if len(operands) == 0: + raise ValueError("No input operands") + + if isinstance(operands[0], basestring): + subscripts = operands[0].replace(" ", "") + operands = [asanyarray(v) for v in operands[1:]] + + # Ensure all characters are valid + for s in subscripts: + if s in '.,->': + continue + if s not in einsum_symbols: + raise ValueError("Character %s is not a valid symbol." % s) + + else: + tmp_operands = list(operands) + operand_list = [] + subscript_list = [] + for p in range(len(operands) // 2): + operand_list.append(tmp_operands.pop(0)) + subscript_list.append(tmp_operands.pop(0)) + + output_list = tmp_operands[-1] if len(tmp_operands) else None + operands = [asanyarray(v) for v in operand_list] + subscripts = "" + last = len(subscript_list) - 1 + for num, sub in enumerate(subscript_list): + for s in sub: + if s is Ellipsis: + subscripts += "..." + elif isinstance(s, int): + subscripts += einsum_symbols[s] + else: + raise TypeError("For this input type lists must contain " + "either int or Ellipsis") + if num != last: + subscripts += "," + + if output_list is not None: + subscripts += "->" + for s in output_list: + if s is Ellipsis: + subscripts += "..." + elif isinstance(s, int): + subscripts += einsum_symbols[s] + else: + raise TypeError("For this input type lists must contain " + "either int or Ellipsis") + # Check for proper "->" + if ("-" in subscripts) or (">" in subscripts): + invalid = (subscripts.count("-") > 1) or (subscripts.count(">") > 1) + if invalid or (subscripts.count("->") != 1): + raise ValueError("Subscripts can only contain one '->'.") + + # Parse ellipses + if "." in subscripts: + used = subscripts.replace(".", "").replace(",", "").replace("->", "") + unused = list(einsum_symbols_set - set(used)) + ellipse_inds = "".join(unused) + longest = 0 + + if "->" in subscripts: + input_tmp, output_sub = subscripts.split("->") + split_subscripts = input_tmp.split(",") + out_sub = True + else: + split_subscripts = subscripts.split(',') + out_sub = False + + for num, sub in enumerate(split_subscripts): + if "." in sub: + if (sub.count(".") != 3) or (sub.count("...") != 1): + raise ValueError("Invalid Ellipses.") + + # Take into account numerical values + if operands[num].shape == (): + ellipse_count = 0 + else: + ellipse_count = max(operands[num].ndim, 1) + ellipse_count -= (len(sub) - 3) + + if ellipse_count > longest: + longest = ellipse_count + + if ellipse_count < 0: + raise ValueError("Ellipses lengths do not match.") + elif ellipse_count == 0: + split_subscripts[num] = sub.replace('...', '') + else: + rep_inds = ellipse_inds[-ellipse_count:] + split_subscripts[num] = sub.replace('...', rep_inds) + + subscripts = ",".join(split_subscripts) + if longest == 0: + out_ellipse = "" + else: + out_ellipse = ellipse_inds[-longest:] + + if out_sub: + subscripts += "->" + output_sub.replace("...", out_ellipse) + else: + # Special care for outputless ellipses + output_subscript = "" + tmp_subscripts = subscripts.replace(",", "") + for s in sorted(set(tmp_subscripts)): + if s not in (einsum_symbols): + raise ValueError("Character %s is not a valid symbol." % s) + if tmp_subscripts.count(s) == 1: + output_subscript += s + normal_inds = ''.join(sorted(set(output_subscript) - + set(out_ellipse))) + + subscripts += "->" + out_ellipse + normal_inds + + # Build output string if does not exist + if "->" in subscripts: + input_subscripts, output_subscript = subscripts.split("->") + else: + input_subscripts = subscripts + # Build output subscripts + tmp_subscripts = subscripts.replace(",", "") + output_subscript = "" + for s in sorted(set(tmp_subscripts)): + if s not in einsum_symbols: + raise ValueError("Character %s is not a valid symbol." % s) + if tmp_subscripts.count(s) == 1: + output_subscript += s + + # Make sure output subscripts are in the input + for char in output_subscript: + if char not in input_subscripts: + raise ValueError("Output character %s did not appear in the input" + % char) + + # Make sure number operands is equivalent to the number of terms + if len(input_subscripts.split(',')) != len(operands): + raise ValueError("Number of einsum subscripts must be equal to the " + "number of operands.") + + return (input_subscripts, output_subscript, operands) + + +def _einsum_path_dispatcher(*operands, **kwargs): + # NOTE: technically, we should only dispatch on array-like arguments, not + # subscripts (given as strings). But separating operands into + # arrays/subscripts is a little tricky/slow (given einsum's two supported + # signatures), so as a practical shortcut we dispatch on everything. + # Strings will be ignored for dispatching since they don't define + # __array_function__. + return operands + + +@array_function_dispatch(_einsum_path_dispatcher, module='numpy') +def einsum_path(*operands, **kwargs): + """ + einsum_path(subscripts, *operands, optimize='greedy') + + Evaluates the lowest cost contraction order for an einsum expression by + considering the creation of intermediate arrays. + + Parameters + ---------- + subscripts : str + Specifies the subscripts for summation. + *operands : list of array_like + These are the arrays for the operation. + optimize : {bool, list, tuple, 'greedy', 'optimal'} + Choose the type of path. If a tuple is provided, the second argument is + assumed to be the maximum intermediate size created. If only a single + argument is provided the largest input or output array size is used + as a maximum intermediate size. + + * if a list is given that starts with ``einsum_path``, uses this as the + contraction path + * if False no optimization is taken + * if True defaults to the 'greedy' algorithm + * 'optimal' An algorithm that combinatorially explores all possible + ways of contracting the listed tensors and choosest the least costly + path. Scales exponentially with the number of terms in the + contraction. + * 'greedy' An algorithm that chooses the best pair contraction + at each step. Effectively, this algorithm searches the largest inner, + Hadamard, and then outer products at each step. Scales cubically with + the number of terms in the contraction. Equivalent to the 'optimal' + path for most contractions. + + Default is 'greedy'. + + Returns + ------- + path : list of tuples + A list representation of the einsum path. + string_repr : str + A printable representation of the einsum path. + + Notes + ----- + The resulting path indicates which terms of the input contraction should be + contracted first, the result of this contraction is then appended to the + end of the contraction list. This list can then be iterated over until all + intermediate contractions are complete. + + See Also + -------- + einsum, linalg.multi_dot + + Examples + -------- + + We can begin with a chain dot example. In this case, it is optimal to + contract the ``b`` and ``c`` tensors first as represented by the first + element of the path ``(1, 2)``. The resulting tensor is added to the end + of the contraction and the remaining contraction ``(0, 1)`` is then + completed. + + >>> a = np.random.rand(2, 2) + >>> b = np.random.rand(2, 5) + >>> c = np.random.rand(5, 2) + >>> path_info = np.einsum_path('ij,jk,kl->il', a, b, c, optimize='greedy') + >>> print(path_info[0]) + ['einsum_path', (1, 2), (0, 1)] + >>> print(path_info[1]) + Complete contraction: ij,jk,kl->il + Naive scaling: 4 + Optimized scaling: 3 + Naive FLOP count: 1.600e+02 + Optimized FLOP count: 5.600e+01 + Theoretical speedup: 2.857 + Largest intermediate: 4.000e+00 elements + ------------------------------------------------------------------------- + scaling current remaining + ------------------------------------------------------------------------- + 3 kl,jk->jl ij,jl->il + 3 jl,ij->il il->il + + + A more complex index transformation example. + + >>> I = np.random.rand(10, 10, 10, 10) + >>> C = np.random.rand(10, 10) + >>> path_info = np.einsum_path('ea,fb,abcd,gc,hd->efgh', C, C, I, C, C, + optimize='greedy') + + >>> print(path_info[0]) + ['einsum_path', (0, 2), (0, 3), (0, 2), (0, 1)] + >>> print(path_info[1]) + Complete contraction: ea,fb,abcd,gc,hd->efgh + Naive scaling: 8 + Optimized scaling: 5 + Naive FLOP count: 8.000e+08 + Optimized FLOP count: 8.000e+05 + Theoretical speedup: 1000.000 + Largest intermediate: 1.000e+04 elements + -------------------------------------------------------------------------- + scaling current remaining + -------------------------------------------------------------------------- + 5 abcd,ea->bcde fb,gc,hd,bcde->efgh + 5 bcde,fb->cdef gc,hd,cdef->efgh + 5 cdef,gc->defg hd,defg->efgh + 5 defg,hd->efgh efgh->efgh + """ + + # Make sure all keywords are valid + valid_contract_kwargs = ['optimize', 'einsum_call'] + unknown_kwargs = [k for (k, v) in kwargs.items() if k + not in valid_contract_kwargs] + if len(unknown_kwargs): + raise TypeError("Did not understand the following kwargs:" + " %s" % unknown_kwargs) + + # Figure out what the path really is + path_type = kwargs.pop('optimize', True) + if path_type is True: + path_type = 'greedy' + if path_type is None: + path_type = False + + memory_limit = None + + # No optimization or a named path algorithm + if (path_type is False) or isinstance(path_type, basestring): + pass + + # Given an explicit path + elif len(path_type) and (path_type[0] == 'einsum_path'): + pass + + # Path tuple with memory limit + elif ((len(path_type) == 2) and isinstance(path_type[0], basestring) and + isinstance(path_type[1], (int, float))): + memory_limit = int(path_type[1]) + path_type = path_type[0] + + else: + raise TypeError("Did not understand the path: %s" % str(path_type)) + + # Hidden option, only einsum should call this + einsum_call_arg = kwargs.pop("einsum_call", False) + + # Python side parsing + input_subscripts, output_subscript, operands = _parse_einsum_input(operands) + + # Build a few useful list and sets + input_list = input_subscripts.split(',') + input_sets = [set(x) for x in input_list] + output_set = set(output_subscript) + indices = set(input_subscripts.replace(',', '')) + + # Get length of each unique dimension and ensure all dimensions are correct + dimension_dict = {} + broadcast_indices = [[] for x in range(len(input_list))] + for tnum, term in enumerate(input_list): + sh = operands[tnum].shape + if len(sh) != len(term): + raise ValueError("Einstein sum subscript %s does not contain the " + "correct number of indices for operand %d." + % (input_subscripts[tnum], tnum)) + for cnum, char in enumerate(term): + dim = sh[cnum] + + # Build out broadcast indices + if dim == 1: + broadcast_indices[tnum].append(char) + + if char in dimension_dict.keys(): + # For broadcasting cases we always want the largest dim size + if dimension_dict[char] == 1: + dimension_dict[char] = dim + elif dim not in (1, dimension_dict[char]): + raise ValueError("Size of label '%s' for operand %d (%d) " + "does not match previous terms (%d)." + % (char, tnum, dimension_dict[char], dim)) + else: + dimension_dict[char] = dim + + # Convert broadcast inds to sets + broadcast_indices = [set(x) for x in broadcast_indices] + + # Compute size of each input array plus the output array + size_list = [_compute_size_by_dict(term, dimension_dict) + for term in input_list + [output_subscript]] + max_size = max(size_list) + + if memory_limit is None: + memory_arg = max_size + else: + memory_arg = memory_limit + + # Compute naive cost + # This isn't quite right, need to look into exactly how einsum does this + inner_product = (sum(len(x) for x in input_sets) - len(indices)) > 0 + naive_cost = _flop_count(indices, inner_product, len(input_list), dimension_dict) + + # Compute the path + if (path_type is False) or (len(input_list) in [1, 2]) or (indices == output_set): + # Nothing to be optimized, leave it to einsum + path = [tuple(range(len(input_list)))] + elif path_type == "greedy": + path = _greedy_path(input_sets, output_set, dimension_dict, memory_arg) + elif path_type == "optimal": + path = _optimal_path(input_sets, output_set, dimension_dict, memory_arg) + elif path_type[0] == 'einsum_path': + path = path_type[1:] + else: + raise KeyError("Path name %s not found", path_type) + + cost_list, scale_list, size_list, contraction_list = [], [], [], [] + + # Build contraction tuple (positions, gemm, einsum_str, remaining) + for cnum, contract_inds in enumerate(path): + # Make sure we remove inds from right to left + contract_inds = tuple(sorted(list(contract_inds), reverse=True)) + + contract = _find_contraction(contract_inds, input_sets, output_set) + out_inds, input_sets, idx_removed, idx_contract = contract + + cost = _flop_count(idx_contract, idx_removed, len(contract_inds), dimension_dict) + cost_list.append(cost) + scale_list.append(len(idx_contract)) + size_list.append(_compute_size_by_dict(out_inds, dimension_dict)) + + bcast = set() + tmp_inputs = [] + for x in contract_inds: + tmp_inputs.append(input_list.pop(x)) + bcast |= broadcast_indices.pop(x) + + new_bcast_inds = bcast - idx_removed + + # If we're broadcasting, nix blas + if not len(idx_removed & bcast): + do_blas = _can_dot(tmp_inputs, out_inds, idx_removed) + else: + do_blas = False + + # Last contraction + if (cnum - len(path)) == -1: + idx_result = output_subscript + else: + sort_result = [(dimension_dict[ind], ind) for ind in out_inds] + idx_result = "".join([x[1] for x in sorted(sort_result)]) + + input_list.append(idx_result) + broadcast_indices.append(new_bcast_inds) + einsum_str = ",".join(tmp_inputs) + "->" + idx_result + + contraction = (contract_inds, idx_removed, einsum_str, input_list[:], do_blas) + contraction_list.append(contraction) + + opt_cost = sum(cost_list) + 1 + + if einsum_call_arg: + return (operands, contraction_list) + + # Return the path along with a nice string representation + overall_contraction = input_subscripts + "->" + output_subscript + header = ("scaling", "current", "remaining") + + speedup = naive_cost / opt_cost + max_i = max(size_list) + + path_print = " Complete contraction: %s\n" % overall_contraction + path_print += " Naive scaling: %d\n" % len(indices) + path_print += " Optimized scaling: %d\n" % max(scale_list) + path_print += " Naive FLOP count: %.3e\n" % naive_cost + path_print += " Optimized FLOP count: %.3e\n" % opt_cost + path_print += " Theoretical speedup: %3.3f\n" % speedup + path_print += " Largest intermediate: %.3e elements\n" % max_i + path_print += "-" * 74 + "\n" + path_print += "%6s %24s %40s\n" % header + path_print += "-" * 74 + + for n, contraction in enumerate(contraction_list): + inds, idx_rm, einsum_str, remaining, blas = contraction + remaining_str = ",".join(remaining) + "->" + output_subscript + path_run = (scale_list[n], einsum_str, remaining_str) + path_print += "\n%4d %24s %40s" % path_run + + path = ['einsum_path'] + path + return (path, path_print) + + +def _einsum_dispatcher(*operands, **kwargs): + # Arguably we dispatch on more arguments that we really should; see note in + # _einsum_path_dispatcher for why. + for op in operands: + yield op + yield kwargs.get('out') + + +# Rewrite einsum to handle different cases +@array_function_dispatch(_einsum_dispatcher, module='numpy') +def einsum(*operands, **kwargs): + """ + einsum(subscripts, *operands, out=None, dtype=None, order='K', + casting='safe', optimize=False) + + Evaluates the Einstein summation convention on the operands. + + Using the Einstein summation convention, many common multi-dimensional, + linear algebraic array operations can be represented in a simple fashion. + In *implicit* mode `einsum` computes these values. + + In *explicit* mode, `einsum` provides further flexibility to compute + other array operations that might not be considered classical Einstein + summation operations, by disabling, or forcing summation over specified + subscript labels. + + See the notes and examples for clarification. + + Parameters + ---------- + subscripts : str + Specifies the subscripts for summation as comma separated list of + subscript labels. An implicit (classical Einstein summation) + calculation is performed unless the explicit indicator '->' is + included as well as subscript labels of the precise output form. + operands : list of array_like + These are the arrays for the operation. + out : ndarray, optional + If provided, the calculation is done into this array. + dtype : {data-type, None}, optional + If provided, forces the calculation to use the data type specified. + Note that you may have to also give a more liberal `casting` + parameter to allow the conversions. Default is None. + order : {'C', 'F', 'A', 'K'}, optional + Controls the memory layout of the output. 'C' means it should + be C contiguous. 'F' means it should be Fortran contiguous, + 'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise. + 'K' means it should be as close to the layout as the inputs as + is possible, including arbitrarily permuted axes. + Default is 'K'. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. Setting this to + 'unsafe' is not recommended, as it can adversely affect accumulations. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + + Default is 'safe'. + optimize : {False, True, 'greedy', 'optimal'}, optional + Controls if intermediate optimization should occur. No optimization + will occur if False and True will default to the 'greedy' algorithm. + Also accepts an explicit contraction list from the ``np.einsum_path`` + function. See ``np.einsum_path`` for more details. Defaults to False. + + Returns + ------- + output : ndarray + The calculation based on the Einstein summation convention. + + See Also + -------- + einsum_path, dot, inner, outer, tensordot, linalg.multi_dot + + Notes + ----- + .. versionadded:: 1.6.0 + + The Einstein summation convention can be used to compute + many multi-dimensional, linear algebraic array operations. `einsum` + provides a succinct way of representing these. + + A non-exhaustive list of these operations, + which can be computed by `einsum`, is shown below along with examples: + + * Trace of an array, :py:func:`numpy.trace`. + * Return a diagonal, :py:func:`numpy.diag`. + * Array axis summations, :py:func:`numpy.sum`. + * Transpositions and permutations, :py:func:`numpy.transpose`. + * Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`. + * Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`. + * Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`. + * Tensor contractions, :py:func:`numpy.tensordot`. + * Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`. + + The subscripts string is a comma-separated list of subscript labels, + where each label refers to a dimension of the corresponding operand. + Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)`` + is equivalent to :py:func:`np.inner(a,b) `. If a label + appears only once, it is not summed, so ``np.einsum('i', a)`` produces a + view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)`` + describes traditional matrix multiplication and is equivalent to + :py:func:`np.matmul(a,b) `. Repeated subscript labels in one + operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent + to :py:func:`np.trace(a) `. + + In *implicit mode*, the chosen subscripts are important + since the axes of the output are reordered alphabetically. This + means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while + ``np.einsum('ji', a)`` takes its transpose. Additionally, + ``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while, + ``np.einsum('ij,jh', a, b)`` returns the transpose of the + multiplication since subscript 'h' precedes subscript 'i'. + + In *explicit mode* the output can be directly controlled by + specifying output subscript labels. This requires the + identifier '->' as well as the list of output subscript labels. + This feature increases the flexibility of the function since + summing can be disabled or forced when required. The call + ``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) `, + and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) `. + The difference is that `einsum` does not allow broadcasting by default. + Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the + order of the output subscript labels and therefore returns matrix + multiplication, unlike the example above in implicit mode. + + To enable and control broadcasting, use an ellipsis. Default + NumPy-style broadcasting is done by adding an ellipsis + to the left of each term, like ``np.einsum('...ii->...i', a)``. + To take the trace along the first and last axes, + you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix + product with the left-most indices instead of rightmost, one can do + ``np.einsum('ij...,jk...->ik...', a, b)``. + + When there is only one operand, no axes are summed, and no output + parameter is provided, a view into the operand is returned instead + of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)`` + produces a view (changed in version 1.10.0). + + `einsum` also provides an alternative way to provide the subscripts + and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``. + If the output shape is not provided in this format `einsum` will be + calculated in implicit mode, otherwise it will be performed explicitly. + The examples below have corresponding `einsum` calls with the two + parameter methods. + + .. versionadded:: 1.10.0 + + Views returned from einsum are now writeable whenever the input array + is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now + have the same effect as :py:func:`np.swapaxes(a, 0, 2) ` + and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal + of a 2D array. + + .. versionadded:: 1.12.0 + + Added the ``optimize`` argument which will optimize the contraction order + of an einsum expression. For a contraction with three or more operands this + can greatly increase the computational efficiency at the cost of a larger + memory footprint during computation. + + Typically a 'greedy' algorithm is applied which empirical tests have shown + returns the optimal path in the majority of cases. In some cases 'optimal' + will return the superlative path through a more expensive, exhaustive search. + For iterative calculations it may be advisable to calculate the optimal path + once and reuse that path by supplying it as an argument. An example is given + below. + + See :py:func:`numpy.einsum_path` for more details. + + Examples + -------- + >>> a = np.arange(25).reshape(5,5) + >>> b = np.arange(5) + >>> c = np.arange(6).reshape(2,3) + + Trace of a matrix: + + >>> np.einsum('ii', a) + 60 + >>> np.einsum(a, [0,0]) + 60 + >>> np.trace(a) + 60 + + Extract the diagonal (requires explicit form): + + >>> np.einsum('ii->i', a) + array([ 0, 6, 12, 18, 24]) + >>> np.einsum(a, [0,0], [0]) + array([ 0, 6, 12, 18, 24]) + >>> np.diag(a) + array([ 0, 6, 12, 18, 24]) + + Sum over an axis (requires explicit form): + + >>> np.einsum('ij->i', a) + array([ 10, 35, 60, 85, 110]) + >>> np.einsum(a, [0,1], [0]) + array([ 10, 35, 60, 85, 110]) + >>> np.sum(a, axis=1) + array([ 10, 35, 60, 85, 110]) + + For higher dimensional arrays summing a single axis can be done with ellipsis: + + >>> np.einsum('...j->...', a) + array([ 10, 35, 60, 85, 110]) + >>> np.einsum(a, [Ellipsis,1], [Ellipsis]) + array([ 10, 35, 60, 85, 110]) + + Compute a matrix transpose, or reorder any number of axes: + + >>> np.einsum('ji', c) + array([[0, 3], + [1, 4], + [2, 5]]) + >>> np.einsum('ij->ji', c) + array([[0, 3], + [1, 4], + [2, 5]]) + >>> np.einsum(c, [1,0]) + array([[0, 3], + [1, 4], + [2, 5]]) + >>> np.transpose(c) + array([[0, 3], + [1, 4], + [2, 5]]) + + Vector inner products: + + >>> np.einsum('i,i', b, b) + 30 + >>> np.einsum(b, [0], b, [0]) + 30 + >>> np.inner(b,b) + 30 + + Matrix vector multiplication: + + >>> np.einsum('ij,j', a, b) + array([ 30, 80, 130, 180, 230]) + >>> np.einsum(a, [0,1], b, [1]) + array([ 30, 80, 130, 180, 230]) + >>> np.dot(a, b) + array([ 30, 80, 130, 180, 230]) + >>> np.einsum('...j,j', a, b) + array([ 30, 80, 130, 180, 230]) + + Broadcasting and scalar multiplication: + + >>> np.einsum('..., ...', 3, c) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + >>> np.einsum(',ij', 3, c) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + >>> np.einsum(3, [Ellipsis], c, [Ellipsis]) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + >>> np.multiply(3, c) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + + Vector outer product: + + >>> np.einsum('i,j', np.arange(2)+1, b) + array([[0, 1, 2, 3, 4], + [0, 2, 4, 6, 8]]) + >>> np.einsum(np.arange(2)+1, [0], b, [1]) + array([[0, 1, 2, 3, 4], + [0, 2, 4, 6, 8]]) + >>> np.outer(np.arange(2)+1, b) + array([[0, 1, 2, 3, 4], + [0, 2, 4, 6, 8]]) + + Tensor contraction: + + >>> a = np.arange(60.).reshape(3,4,5) + >>> b = np.arange(24.).reshape(4,3,2) + >>> np.einsum('ijk,jil->kl', a, b) + array([[ 4400., 4730.], + [ 4532., 4874.], + [ 4664., 5018.], + [ 4796., 5162.], + [ 4928., 5306.]]) + >>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3]) + array([[ 4400., 4730.], + [ 4532., 4874.], + [ 4664., 5018.], + [ 4796., 5162.], + [ 4928., 5306.]]) + >>> np.tensordot(a,b, axes=([1,0],[0,1])) + array([[ 4400., 4730.], + [ 4532., 4874.], + [ 4664., 5018.], + [ 4796., 5162.], + [ 4928., 5306.]]) + + Writeable returned arrays (since version 1.10.0): + + >>> a = np.zeros((3, 3)) + >>> np.einsum('ii->i', a)[:] = 1 + >>> a + array([[ 1., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 1.]]) + + Example of ellipsis use: + + >>> a = np.arange(6).reshape((3,2)) + >>> b = np.arange(12).reshape((4,3)) + >>> np.einsum('ki,jk->ij', a, b) + array([[10, 28, 46, 64], + [13, 40, 67, 94]]) + >>> np.einsum('ki,...k->i...', a, b) + array([[10, 28, 46, 64], + [13, 40, 67, 94]]) + >>> np.einsum('k...,jk', a, b) + array([[10, 28, 46, 64], + [13, 40, 67, 94]]) + + Chained array operations. For more complicated contractions, speed ups + might be achieved by repeatedly computing a 'greedy' path or pre-computing the + 'optimal' path and repeatedly applying it, using an + `einsum_path` insertion (since version 1.12.0). Performance improvements can be + particularly significant with larger arrays: + + >>> a = np.ones(64).reshape(2,4,8) + # Basic `einsum`: ~1520ms (benchmarked on 3.1GHz Intel i5.) + >>> for iteration in range(500): + ... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a) + # Sub-optimal `einsum` (due to repeated path calculation time): ~330ms + >>> for iteration in range(500): + ... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal') + # Greedy `einsum` (faster optimal path approximation): ~160ms + >>> for iteration in range(500): + ... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='greedy') + # Optimal `einsum` (best usage pattern in some use cases): ~110ms + >>> path = np.einsum_path('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal')[0] + >>> for iteration in range(500): + ... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=path) + + """ + + # Grab non-einsum kwargs; do not optimize by default. + optimize_arg = kwargs.pop('optimize', False) + + # If no optimization, run pure einsum + if optimize_arg is False: + return c_einsum(*operands, **kwargs) + + valid_einsum_kwargs = ['out', 'dtype', 'order', 'casting'] + einsum_kwargs = {k: v for (k, v) in kwargs.items() if + k in valid_einsum_kwargs} + + # Make sure all keywords are valid + valid_contract_kwargs = ['optimize'] + valid_einsum_kwargs + unknown_kwargs = [k for (k, v) in kwargs.items() if + k not in valid_contract_kwargs] + + if len(unknown_kwargs): + raise TypeError("Did not understand the following kwargs: %s" + % unknown_kwargs) + + # Special handeling if out is specified + specified_out = False + out_array = einsum_kwargs.pop('out', None) + if out_array is not None: + specified_out = True + + # Build the contraction list and operand + operands, contraction_list = einsum_path(*operands, optimize=optimize_arg, + einsum_call=True) + + handle_out = False + + # Start contraction loop + for num, contraction in enumerate(contraction_list): + inds, idx_rm, einsum_str, remaining, blas = contraction + tmp_operands = [operands.pop(x) for x in inds] + + # Do we need to deal with the output? + handle_out = specified_out and ((num + 1) == len(contraction_list)) + + # Call tensordot if still possible + if blas: + # Checks have already been handled + input_str, results_index = einsum_str.split('->') + input_left, input_right = input_str.split(',') + + tensor_result = input_left + input_right + for s in idx_rm: + tensor_result = tensor_result.replace(s, "") + + # Find indices to contract over + left_pos, right_pos = [], [] + for s in sorted(idx_rm): + left_pos.append(input_left.find(s)) + right_pos.append(input_right.find(s)) + + # Contract! + new_view = tensordot(*tmp_operands, axes=(tuple(left_pos), tuple(right_pos))) + + # Build a new view if needed + if (tensor_result != results_index) or handle_out: + if handle_out: + einsum_kwargs["out"] = out_array + new_view = c_einsum(tensor_result + '->' + results_index, new_view, **einsum_kwargs) + + # Call einsum + else: + # If out was specified + if handle_out: + einsum_kwargs["out"] = out_array + + # Do the contraction + new_view = c_einsum(einsum_str, *tmp_operands, **einsum_kwargs) + + # Append new items and dereference what we can + operands.append(new_view) + del tmp_operands, new_view + + if specified_out: + return out_array + else: + return operands[0] diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/einsumfunc.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/einsumfunc.pyc new file mode 100644 index 0000000..8368b3f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/einsumfunc.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/fromnumeric.py b/project/venv/lib/python2.7/site-packages/numpy/core/fromnumeric.py new file mode 100644 index 0000000..59a820d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/fromnumeric.py @@ -0,0 +1,3459 @@ +"""Module containing non-deprecated functions borrowed from Numeric. + +""" +from __future__ import division, absolute_import, print_function + +import functools +import types +import warnings + +import numpy as np +from .. import VisibleDeprecationWarning +from . import multiarray as mu +from . import overrides +from . import umath as um +from . import numerictypes as nt +from .numeric import asarray, array, asanyarray, concatenate +from . import _methods + +_dt_ = nt.sctype2char + +# functions that are methods +__all__ = [ + 'alen', 'all', 'alltrue', 'amax', 'amin', 'any', 'argmax', + 'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip', + 'compress', 'cumprod', 'cumproduct', 'cumsum', 'diagonal', 'mean', + 'ndim', 'nonzero', 'partition', 'prod', 'product', 'ptp', 'put', + 'rank', 'ravel', 'repeat', 'reshape', 'resize', 'round_', + 'searchsorted', 'shape', 'size', 'sometrue', 'sort', 'squeeze', + 'std', 'sum', 'swapaxes', 'take', 'trace', 'transpose', 'var', +] + +_gentype = types.GeneratorType +# save away Python sum +_sum_ = sum + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +# functions that are now methods +def _wrapit(obj, method, *args, **kwds): + try: + wrap = obj.__array_wrap__ + except AttributeError: + wrap = None + result = getattr(asarray(obj), method)(*args, **kwds) + if wrap: + if not isinstance(result, mu.ndarray): + result = asarray(result) + result = wrap(result) + return result + + +def _wrapfunc(obj, method, *args, **kwds): + try: + return getattr(obj, method)(*args, **kwds) + + # An AttributeError occurs if the object does not have + # such a method in its class. + + # A TypeError occurs if the object does have such a method + # in its class, but its signature is not identical to that + # of NumPy's. This situation has occurred in the case of + # a downstream library like 'pandas'. + except (AttributeError, TypeError): + return _wrapit(obj, method, *args, **kwds) + + +def _wrapreduction(obj, ufunc, method, axis, dtype, out, **kwargs): + passkwargs = {k: v for k, v in kwargs.items() + if v is not np._NoValue} + + if type(obj) is not mu.ndarray: + try: + reduction = getattr(obj, method) + except AttributeError: + pass + else: + # This branch is needed for reductions like any which don't + # support a dtype. + if dtype is not None: + return reduction(axis=axis, dtype=dtype, out=out, **passkwargs) + else: + return reduction(axis=axis, out=out, **passkwargs) + + return ufunc.reduce(obj, axis, dtype, out, **passkwargs) + + +def _take_dispatcher(a, indices, axis=None, out=None, mode=None): + return (a, out) + + +@array_function_dispatch(_take_dispatcher) +def take(a, indices, axis=None, out=None, mode='raise'): + """ + Take elements from an array along an axis. + + When axis is not None, this function does the same thing as "fancy" + indexing (indexing arrays using arrays); however, it can be easier to use + if you need elements along a given axis. A call such as + ``np.take(arr, indices, axis=3)`` is equivalent to + ``arr[:,:,:,indices,...]``. + + Explained without fancy indexing, this is equivalent to the following use + of `ndindex`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of + indices:: + + Ni, Nk = a.shape[:axis], a.shape[axis+1:] + Nj = indices.shape + for ii in ndindex(Ni): + for jj in ndindex(Nj): + for kk in ndindex(Nk): + out[ii + jj + kk] = a[ii + (indices[jj],) + kk] + + Parameters + ---------- + a : array_like (Ni..., M, Nk...) + The source array. + indices : array_like (Nj...) + The indices of the values to extract. + + .. versionadded:: 1.8.0 + + Also allow scalars for indices. + axis : int, optional + The axis over which to select values. By default, the flattened + input array is used. + out : ndarray, optional (Ni..., Nj..., Nk...) + If provided, the result will be placed in this array. It should + be of the appropriate shape and dtype. + mode : {'raise', 'wrap', 'clip'}, optional + Specifies how out-of-bounds indices will behave. + + * 'raise' -- raise an error (default) + * 'wrap' -- wrap around + * 'clip' -- clip to the range + + 'clip' mode means that all indices that are too large are replaced + by the index that addresses the last element along that axis. Note + that this disables indexing with negative numbers. + + Returns + ------- + out : ndarray (Ni..., Nj..., Nk...) + The returned array has the same type as `a`. + + See Also + -------- + compress : Take elements using a boolean mask + ndarray.take : equivalent method + take_along_axis : Take elements by matching the array and the index arrays + + Notes + ----- + + By eliminating the inner loop in the description above, and using `s_` to + build simple slice objects, `take` can be expressed in terms of applying + fancy indexing to each 1-d slice:: + + Ni, Nk = a.shape[:axis], a.shape[axis+1:] + for ii in ndindex(Ni): + for kk in ndindex(Nj): + out[ii + s_[...,] + kk] = a[ii + s_[:,] + kk][indices] + + For this reason, it is equivalent to (but faster than) the following use + of `apply_along_axis`:: + + out = np.apply_along_axis(lambda a_1d: a_1d[indices], axis, a) + + Examples + -------- + >>> a = [4, 3, 5, 7, 6, 8] + >>> indices = [0, 1, 4] + >>> np.take(a, indices) + array([4, 3, 6]) + + In this example if `a` is an ndarray, "fancy" indexing can be used. + + >>> a = np.array(a) + >>> a[indices] + array([4, 3, 6]) + + If `indices` is not one dimensional, the output also has these dimensions. + + >>> np.take(a, [[0, 1], [2, 3]]) + array([[4, 3], + [5, 7]]) + """ + return _wrapfunc(a, 'take', indices, axis=axis, out=out, mode=mode) + + +def _reshape_dispatcher(a, newshape, order=None): + return (a,) + + +# not deprecated --- copy if necessary, view otherwise +@array_function_dispatch(_reshape_dispatcher) +def reshape(a, newshape, order='C'): + """ + Gives a new shape to an array without changing its data. + + Parameters + ---------- + a : array_like + Array to be reshaped. + newshape : int or tuple of ints + The new shape should be compatible with the original shape. If + an integer, then the result will be a 1-D array of that length. + One shape dimension can be -1. In this case, the value is + inferred from the length of the array and remaining dimensions. + order : {'C', 'F', 'A'}, optional + Read the elements of `a` using this index order, and place the + elements into the reshaped array using this index order. 'C' + means to read / write the elements using C-like index order, + with the last axis index changing fastest, back to the first + axis index changing slowest. 'F' means to read / write the + elements using Fortran-like index order, with the first index + changing fastest, and the last index changing slowest. Note that + the 'C' and 'F' options take no account of the memory layout of + the underlying array, and only refer to the order of indexing. + 'A' means to read / write the elements in Fortran-like index + order if `a` is Fortran *contiguous* in memory, C-like order + otherwise. + + Returns + ------- + reshaped_array : ndarray + This will be a new view object if possible; otherwise, it will + be a copy. Note there is no guarantee of the *memory layout* (C- or + Fortran- contiguous) of the returned array. + + See Also + -------- + ndarray.reshape : Equivalent method. + + Notes + ----- + It is not always possible to change the shape of an array without + copying the data. If you want an error to be raised when the data is copied, + you should assign the new shape to the shape attribute of the array:: + + >>> a = np.zeros((10, 2)) + # A transpose makes the array non-contiguous + >>> b = a.T + # Taking a view makes it possible to modify the shape without modifying + # the initial object. + >>> c = b.view() + >>> c.shape = (20) + AttributeError: incompatible shape for a non-contiguous array + + The `order` keyword gives the index ordering both for *fetching* the values + from `a`, and then *placing* the values into the output array. + For example, let's say you have an array: + + >>> a = np.arange(6).reshape((3, 2)) + >>> a + array([[0, 1], + [2, 3], + [4, 5]]) + + You can think of reshaping as first raveling the array (using the given + index order), then inserting the elements from the raveled array into the + new array using the same kind of index ordering as was used for the + raveling. + + >>> np.reshape(a, (2, 3)) # C-like index ordering + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.reshape(np.ravel(a), (2, 3)) # equivalent to C ravel then C reshape + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.reshape(a, (2, 3), order='F') # Fortran-like index ordering + array([[0, 4, 3], + [2, 1, 5]]) + >>> np.reshape(np.ravel(a, order='F'), (2, 3), order='F') + array([[0, 4, 3], + [2, 1, 5]]) + + Examples + -------- + >>> a = np.array([[1,2,3], [4,5,6]]) + >>> np.reshape(a, 6) + array([1, 2, 3, 4, 5, 6]) + >>> np.reshape(a, 6, order='F') + array([1, 4, 2, 5, 3, 6]) + + >>> np.reshape(a, (3,-1)) # the unspecified value is inferred to be 2 + array([[1, 2], + [3, 4], + [5, 6]]) + """ + return _wrapfunc(a, 'reshape', newshape, order=order) + + +def _choose_dispatcher(a, choices, out=None, mode=None): + yield a + for c in choices: + yield c + yield out + + +@array_function_dispatch(_choose_dispatcher) +def choose(a, choices, out=None, mode='raise'): + """ + Construct an array from an index array and a set of arrays to choose from. + + First of all, if confused or uncertain, definitely look at the Examples - + in its full generality, this function is less simple than it might + seem from the following code description (below ndi = + `numpy.lib.index_tricks`): + + ``np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)])``. + + But this omits some subtleties. Here is a fully general summary: + + Given an "index" array (`a`) of integers and a sequence of `n` arrays + (`choices`), `a` and each choice array are first broadcast, as necessary, + to arrays of a common shape; calling these *Ba* and *Bchoices[i], i = + 0,...,n-1* we have that, necessarily, ``Ba.shape == Bchoices[i].shape`` + for each `i`. Then, a new array with shape ``Ba.shape`` is created as + follows: + + * if ``mode=raise`` (the default), then, first of all, each element of + `a` (and thus `Ba`) must be in the range `[0, n-1]`; now, suppose that + `i` (in that range) is the value at the `(j0, j1, ..., jm)` position + in `Ba` - then the value at the same position in the new array is the + value in `Bchoices[i]` at that same position; + + * if ``mode=wrap``, values in `a` (and thus `Ba`) may be any (signed) + integer; modular arithmetic is used to map integers outside the range + `[0, n-1]` back into that range; and then the new array is constructed + as above; + + * if ``mode=clip``, values in `a` (and thus `Ba`) may be any (signed) + integer; negative integers are mapped to 0; values greater than `n-1` + are mapped to `n-1`; and then the new array is constructed as above. + + Parameters + ---------- + a : int array + This array must contain integers in `[0, n-1]`, where `n` is the number + of choices, unless ``mode=wrap`` or ``mode=clip``, in which cases any + integers are permissible. + choices : sequence of arrays + Choice arrays. `a` and all of the choices must be broadcastable to the + same shape. If `choices` is itself an array (not recommended), then + its outermost dimension (i.e., the one corresponding to + ``choices.shape[0]``) is taken as defining the "sequence". + out : array, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and dtype. + mode : {'raise' (default), 'wrap', 'clip'}, optional + Specifies how indices outside `[0, n-1]` will be treated: + + * 'raise' : an exception is raised + * 'wrap' : value becomes value mod `n` + * 'clip' : values < 0 are mapped to 0, values > n-1 are mapped to n-1 + + Returns + ------- + merged_array : array + The merged result. + + Raises + ------ + ValueError: shape mismatch + If `a` and each choice array are not all broadcastable to the same + shape. + + See Also + -------- + ndarray.choose : equivalent method + + Notes + ----- + To reduce the chance of misinterpretation, even though the following + "abuse" is nominally supported, `choices` should neither be, nor be + thought of as, a single array, i.e., the outermost sequence-like container + should be either a list or a tuple. + + Examples + -------- + + >>> choices = [[0, 1, 2, 3], [10, 11, 12, 13], + ... [20, 21, 22, 23], [30, 31, 32, 33]] + >>> np.choose([2, 3, 1, 0], choices + ... # the first element of the result will be the first element of the + ... # third (2+1) "array" in choices, namely, 20; the second element + ... # will be the second element of the fourth (3+1) choice array, i.e., + ... # 31, etc. + ... ) + array([20, 31, 12, 3]) + >>> np.choose([2, 4, 1, 0], choices, mode='clip') # 4 goes to 3 (4-1) + array([20, 31, 12, 3]) + >>> # because there are 4 choice arrays + >>> np.choose([2, 4, 1, 0], choices, mode='wrap') # 4 goes to (4 mod 4) + array([20, 1, 12, 3]) + >>> # i.e., 0 + + A couple examples illustrating how choose broadcasts: + + >>> a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]] + >>> choices = [-10, 10] + >>> np.choose(a, choices) + array([[ 10, -10, 10], + [-10, 10, -10], + [ 10, -10, 10]]) + + >>> # With thanks to Anne Archibald + >>> a = np.array([0, 1]).reshape((2,1,1)) + >>> c1 = np.array([1, 2, 3]).reshape((1,3,1)) + >>> c2 = np.array([-1, -2, -3, -4, -5]).reshape((1,1,5)) + >>> np.choose(a, (c1, c2)) # result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2 + array([[[ 1, 1, 1, 1, 1], + [ 2, 2, 2, 2, 2], + [ 3, 3, 3, 3, 3]], + [[-1, -2, -3, -4, -5], + [-1, -2, -3, -4, -5], + [-1, -2, -3, -4, -5]]]) + + """ + return _wrapfunc(a, 'choose', choices, out=out, mode=mode) + + +def _repeat_dispatcher(a, repeats, axis=None): + return (a,) + + +@array_function_dispatch(_repeat_dispatcher) +def repeat(a, repeats, axis=None): + """ + Repeat elements of an array. + + Parameters + ---------- + a : array_like + Input array. + repeats : int or array of ints + The number of repetitions for each element. `repeats` is broadcasted + to fit the shape of the given axis. + axis : int, optional + The axis along which to repeat values. By default, use the + flattened input array, and return a flat output array. + + Returns + ------- + repeated_array : ndarray + Output array which has the same shape as `a`, except along + the given axis. + + See Also + -------- + tile : Tile an array. + + Examples + -------- + >>> np.repeat(3, 4) + array([3, 3, 3, 3]) + >>> x = np.array([[1,2],[3,4]]) + >>> np.repeat(x, 2) + array([1, 1, 2, 2, 3, 3, 4, 4]) + >>> np.repeat(x, 3, axis=1) + array([[1, 1, 1, 2, 2, 2], + [3, 3, 3, 4, 4, 4]]) + >>> np.repeat(x, [1, 2], axis=0) + array([[1, 2], + [3, 4], + [3, 4]]) + + """ + return _wrapfunc(a, 'repeat', repeats, axis=axis) + + +def _put_dispatcher(a, ind, v, mode=None): + return (a, ind, v) + + +@array_function_dispatch(_put_dispatcher) +def put(a, ind, v, mode='raise'): + """ + Replaces specified elements of an array with given values. + + The indexing works on the flattened target array. `put` is roughly + equivalent to: + + :: + + a.flat[ind] = v + + Parameters + ---------- + a : ndarray + Target array. + ind : array_like + Target indices, interpreted as integers. + v : array_like + Values to place in `a` at target indices. If `v` is shorter than + `ind` it will be repeated as necessary. + mode : {'raise', 'wrap', 'clip'}, optional + Specifies how out-of-bounds indices will behave. + + * 'raise' -- raise an error (default) + * 'wrap' -- wrap around + * 'clip' -- clip to the range + + 'clip' mode means that all indices that are too large are replaced + by the index that addresses the last element along that axis. Note + that this disables indexing with negative numbers. + + See Also + -------- + putmask, place + put_along_axis : Put elements by matching the array and the index arrays + + Examples + -------- + >>> a = np.arange(5) + >>> np.put(a, [0, 2], [-44, -55]) + >>> a + array([-44, 1, -55, 3, 4]) + + >>> a = np.arange(5) + >>> np.put(a, 22, -5, mode='clip') + >>> a + array([ 0, 1, 2, 3, -5]) + + """ + try: + put = a.put + except AttributeError: + raise TypeError("argument 1 must be numpy.ndarray, " + "not {name}".format(name=type(a).__name__)) + + return put(ind, v, mode=mode) + + +def _swapaxes_dispatcher(a, axis1, axis2): + return (a,) + + +@array_function_dispatch(_swapaxes_dispatcher) +def swapaxes(a, axis1, axis2): + """ + Interchange two axes of an array. + + Parameters + ---------- + a : array_like + Input array. + axis1 : int + First axis. + axis2 : int + Second axis. + + Returns + ------- + a_swapped : ndarray + For NumPy >= 1.10.0, if `a` is an ndarray, then a view of `a` is + returned; otherwise a new array is created. For earlier NumPy + versions a view of `a` is returned only if the order of the + axes is changed, otherwise the input array is returned. + + Examples + -------- + >>> x = np.array([[1,2,3]]) + >>> np.swapaxes(x,0,1) + array([[1], + [2], + [3]]) + + >>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]]) + >>> x + array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + + >>> np.swapaxes(x,0,2) + array([[[0, 4], + [2, 6]], + [[1, 5], + [3, 7]]]) + + """ + return _wrapfunc(a, 'swapaxes', axis1, axis2) + + +def _transpose_dispatcher(a, axes=None): + return (a,) + + +@array_function_dispatch(_transpose_dispatcher) +def transpose(a, axes=None): + """ + Permute the dimensions of an array. + + Parameters + ---------- + a : array_like + Input array. + axes : list of ints, optional + By default, reverse the dimensions, otherwise permute the axes + according to the values given. + + Returns + ------- + p : ndarray + `a` with its axes permuted. A view is returned whenever + possible. + + See Also + -------- + moveaxis + argsort + + Notes + ----- + Use `transpose(a, argsort(axes))` to invert the transposition of tensors + when using the `axes` keyword argument. + + Transposing a 1-D array returns an unchanged view of the original array. + + Examples + -------- + >>> x = np.arange(4).reshape((2,2)) + >>> x + array([[0, 1], + [2, 3]]) + + >>> np.transpose(x) + array([[0, 2], + [1, 3]]) + + >>> x = np.ones((1, 2, 3)) + >>> np.transpose(x, (1, 0, 2)).shape + (2, 1, 3) + + """ + return _wrapfunc(a, 'transpose', axes) + + +def _partition_dispatcher(a, kth, axis=None, kind=None, order=None): + return (a,) + + +@array_function_dispatch(_partition_dispatcher) +def partition(a, kth, axis=-1, kind='introselect', order=None): + """ + Return a partitioned copy of an array. + + Creates a copy of the array with its elements rearranged in such a + way that the value of the element in k-th position is in the + position it would be in a sorted array. All elements smaller than + the k-th element are moved before this element and all equal or + greater are moved behind it. The ordering of the elements in the two + partitions is undefined. + + .. versionadded:: 1.8.0 + + Parameters + ---------- + a : array_like + Array to be sorted. + kth : int or sequence of ints + Element index to partition by. The k-th value of the element + will be in its final sorted position and all smaller elements + will be moved before it and all equal or greater elements behind + it. The order of all elements in the partitions is undefined. If + provided with a sequence of k-th it will partition all elements + indexed by k-th of them into their sorted position at once. + axis : int or None, optional + Axis along which to sort. If None, the array is flattened before + sorting. The default is -1, which sorts along the last axis. + kind : {'introselect'}, optional + Selection algorithm. Default is 'introselect'. + order : str or list of str, optional + When `a` is an array with fields defined, this argument + specifies which fields to compare first, second, etc. A single + field can be specified as a string. Not all fields need be + specified, but unspecified fields will still be used, in the + order in which they come up in the dtype, to break ties. + + Returns + ------- + partitioned_array : ndarray + Array of the same type and shape as `a`. + + See Also + -------- + ndarray.partition : Method to sort an array in-place. + argpartition : Indirect partition. + sort : Full sorting + + Notes + ----- + The various selection algorithms are characterized by their average + speed, worst case performance, work space size, and whether they are + stable. A stable sort keeps items with the same key in the same + relative order. The available algorithms have the following + properties: + + ================= ======= ============= ============ ======= + kind speed worst case work space stable + ================= ======= ============= ============ ======= + 'introselect' 1 O(n) 0 no + ================= ======= ============= ============ ======= + + All the partition algorithms make temporary copies of the data when + partitioning along any but the last axis. Consequently, + partitioning along the last axis is faster and uses less space than + partitioning along any other axis. + + The sort order for complex numbers is lexicographic. If both the + real and imaginary parts are non-nan then the order is determined by + the real parts except when they are equal, in which case the order + is determined by the imaginary parts. + + Examples + -------- + >>> a = np.array([3, 4, 2, 1]) + >>> np.partition(a, 3) + array([2, 1, 3, 4]) + + >>> np.partition(a, (1, 3)) + array([1, 2, 3, 4]) + + """ + if axis is None: + # flatten returns (1, N) for np.matrix, so always use the last axis + a = asanyarray(a).flatten() + axis = -1 + else: + a = asanyarray(a).copy(order="K") + a.partition(kth, axis=axis, kind=kind, order=order) + return a + + +def _argpartition_dispatcher(a, kth, axis=None, kind=None, order=None): + return (a,) + + +@array_function_dispatch(_argpartition_dispatcher) +def argpartition(a, kth, axis=-1, kind='introselect', order=None): + """ + Perform an indirect partition along the given axis using the + algorithm specified by the `kind` keyword. It returns an array of + indices of the same shape as `a` that index data along the given + axis in partitioned order. + + .. versionadded:: 1.8.0 + + Parameters + ---------- + a : array_like + Array to sort. + kth : int or sequence of ints + Element index to partition by. The k-th element will be in its + final sorted position and all smaller elements will be moved + before it and all larger elements behind it. The order all + elements in the partitions is undefined. If provided with a + sequence of k-th it will partition all of them into their sorted + position at once. + axis : int or None, optional + Axis along which to sort. The default is -1 (the last axis). If + None, the flattened array is used. + kind : {'introselect'}, optional + Selection algorithm. Default is 'introselect' + order : str or list of str, optional + When `a` is an array with fields defined, this argument + specifies which fields to compare first, second, etc. A single + field can be specified as a string, and not all fields need be + specified, but unspecified fields will still be used, in the + order in which they come up in the dtype, to break ties. + + Returns + ------- + index_array : ndarray, int + Array of indices that partition `a` along the specified axis. + If `a` is one-dimensional, ``a[index_array]`` yields a partitioned `a`. + More generally, ``np.take_along_axis(a, index_array, axis=a)`` always + yields the partitioned `a`, irrespective of dimensionality. + + See Also + -------- + partition : Describes partition algorithms used. + ndarray.partition : Inplace partition. + argsort : Full indirect sort + + Notes + ----- + See `partition` for notes on the different selection algorithms. + + Examples + -------- + One dimensional array: + + >>> x = np.array([3, 4, 2, 1]) + >>> x[np.argpartition(x, 3)] + array([2, 1, 3, 4]) + >>> x[np.argpartition(x, (1, 3))] + array([1, 2, 3, 4]) + + >>> x = [3, 4, 2, 1] + >>> np.array(x)[np.argpartition(x, 3)] + array([2, 1, 3, 4]) + + """ + return _wrapfunc(a, 'argpartition', kth, axis=axis, kind=kind, order=order) + + +def _sort_dispatcher(a, axis=None, kind=None, order=None): + return (a,) + + +@array_function_dispatch(_sort_dispatcher) +def sort(a, axis=-1, kind='quicksort', order=None): + """ + Return a sorted copy of an array. + + Parameters + ---------- + a : array_like + Array to be sorted. + axis : int or None, optional + Axis along which to sort. If None, the array is flattened before + sorting. The default is -1, which sorts along the last axis. + kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional + Sorting algorithm. Default is 'quicksort'. + order : str or list of str, optional + When `a` is an array with fields defined, this argument specifies + which fields to compare first, second, etc. A single field can + be specified as a string, and not all fields need be specified, + but unspecified fields will still be used, in the order in which + they come up in the dtype, to break ties. + + Returns + ------- + sorted_array : ndarray + Array of the same type and shape as `a`. + + See Also + -------- + ndarray.sort : Method to sort an array in-place. + argsort : Indirect sort. + lexsort : Indirect stable sort on multiple keys. + searchsorted : Find elements in a sorted array. + partition : Partial sort. + + Notes + ----- + The various sorting algorithms are characterized by their average speed, + worst case performance, work space size, and whether they are stable. A + stable sort keeps items with the same key in the same relative + order. The three available algorithms have the following + properties: + + =========== ======= ============= ============ ======== + kind speed worst case work space stable + =========== ======= ============= ============ ======== + 'quicksort' 1 O(n^2) 0 no + 'mergesort' 2 O(n*log(n)) ~n/2 yes + 'heapsort' 3 O(n*log(n)) 0 no + =========== ======= ============= ============ ======== + + All the sort algorithms make temporary copies of the data when + sorting along any but the last axis. Consequently, sorting along + the last axis is faster and uses less space than sorting along + any other axis. + + The sort order for complex numbers is lexicographic. If both the real + and imaginary parts are non-nan then the order is determined by the + real parts except when they are equal, in which case the order is + determined by the imaginary parts. + + Previous to numpy 1.4.0 sorting real and complex arrays containing nan + values led to undefined behaviour. In numpy versions >= 1.4.0 nan + values are sorted to the end. The extended sort order is: + + * Real: [R, nan] + * Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj] + + where R is a non-nan real value. Complex values with the same nan + placements are sorted according to the non-nan part if it exists. + Non-nan values are sorted as before. + + .. versionadded:: 1.12.0 + + quicksort has been changed to an introsort which will switch + heapsort when it does not make enough progress. This makes its + worst case O(n*log(n)). + + 'stable' automatically choses the best stable sorting algorithm + for the data type being sorted. It is currently mapped to + merge sort. + + Examples + -------- + >>> a = np.array([[1,4],[3,1]]) + >>> np.sort(a) # sort along the last axis + array([[1, 4], + [1, 3]]) + >>> np.sort(a, axis=None) # sort the flattened array + array([1, 1, 3, 4]) + >>> np.sort(a, axis=0) # sort along the first axis + array([[1, 1], + [3, 4]]) + + Use the `order` keyword to specify a field to use when sorting a + structured array: + + >>> dtype = [('name', 'S10'), ('height', float), ('age', int)] + >>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38), + ... ('Galahad', 1.7, 38)] + >>> a = np.array(values, dtype=dtype) # create a structured array + >>> np.sort(a, order='height') # doctest: +SKIP + array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41), + ('Lancelot', 1.8999999999999999, 38)], + dtype=[('name', '|S10'), ('height', '>> np.sort(a, order=['age', 'height']) # doctest: +SKIP + array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38), + ('Arthur', 1.8, 41)], + dtype=[('name', '|S10'), ('height', '>> x = np.array([3, 1, 2]) + >>> np.argsort(x) + array([1, 2, 0]) + + Two-dimensional array: + + >>> x = np.array([[0, 3], [2, 2]]) + >>> x + array([[0, 3], + [2, 2]]) + + >>> np.argsort(x, axis=0) # sorts along first axis (down) + array([[0, 1], + [1, 0]]) + + >>> np.argsort(x, axis=1) # sorts along last axis (across) + array([[0, 1], + [0, 1]]) + + Indices of the sorted elements of a N-dimensional array: + + >>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape) + >>> ind + (array([0, 1, 1, 0]), array([0, 0, 1, 1])) + >>> x[ind] # same as np.sort(x, axis=None) + array([0, 2, 2, 3]) + + Sorting with keys: + + >>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '>> x + array([(1, 0), (0, 1)], + dtype=[('x', '>> np.argsort(x, order=('x','y')) + array([1, 0]) + + >>> np.argsort(x, order=('y','x')) + array([0, 1]) + + """ + return _wrapfunc(a, 'argsort', axis=axis, kind=kind, order=order) + + +def _argmax_dispatcher(a, axis=None, out=None): + return (a, out) + + +@array_function_dispatch(_argmax_dispatcher) +def argmax(a, axis=None, out=None): + """ + Returns the indices of the maximum values along an axis. + + Parameters + ---------- + a : array_like + Input array. + axis : int, optional + By default, the index is into the flattened array, otherwise + along the specified axis. + out : array, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and dtype. + + Returns + ------- + index_array : ndarray of ints + Array of indices into the array. It has the same shape as `a.shape` + with the dimension along `axis` removed. + + See Also + -------- + ndarray.argmax, argmin + amax : The maximum value along a given axis. + unravel_index : Convert a flat index into an index tuple. + + Notes + ----- + In case of multiple occurrences of the maximum values, the indices + corresponding to the first occurrence are returned. + + Examples + -------- + >>> a = np.arange(6).reshape(2,3) + 10 + >>> a + array([[10, 11, 12], + [13, 14, 15]]) + >>> np.argmax(a) + 5 + >>> np.argmax(a, axis=0) + array([1, 1, 1]) + >>> np.argmax(a, axis=1) + array([2, 2]) + + Indexes of the maximal elements of a N-dimensional array: + + >>> ind = np.unravel_index(np.argmax(a, axis=None), a.shape) + >>> ind + (1, 2) + >>> a[ind] + 15 + + >>> b = np.arange(6) + >>> b[1] = 5 + >>> b + array([0, 5, 2, 3, 4, 5]) + >>> np.argmax(b) # Only the first occurrence is returned. + 1 + + """ + return _wrapfunc(a, 'argmax', axis=axis, out=out) + + +def _argmin_dispatcher(a, axis=None, out=None): + return (a, out) + + +@array_function_dispatch(_argmin_dispatcher) +def argmin(a, axis=None, out=None): + """ + Returns the indices of the minimum values along an axis. + + Parameters + ---------- + a : array_like + Input array. + axis : int, optional + By default, the index is into the flattened array, otherwise + along the specified axis. + out : array, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and dtype. + + Returns + ------- + index_array : ndarray of ints + Array of indices into the array. It has the same shape as `a.shape` + with the dimension along `axis` removed. + + See Also + -------- + ndarray.argmin, argmax + amin : The minimum value along a given axis. + unravel_index : Convert a flat index into an index tuple. + + Notes + ----- + In case of multiple occurrences of the minimum values, the indices + corresponding to the first occurrence are returned. + + Examples + -------- + >>> a = np.arange(6).reshape(2,3) + 10 + >>> a + array([[10, 11, 12], + [13, 14, 15]]) + >>> np.argmin(a) + 0 + >>> np.argmin(a, axis=0) + array([0, 0, 0]) + >>> np.argmin(a, axis=1) + array([0, 0]) + + Indices of the minimum elements of a N-dimensional array: + + >>> ind = np.unravel_index(np.argmin(a, axis=None), a.shape) + >>> ind + (0, 0) + >>> a[ind] + 10 + + >>> b = np.arange(6) + 10 + >>> b[4] = 10 + >>> b + array([10, 11, 12, 13, 10, 15]) + >>> np.argmin(b) # Only the first occurrence is returned. + 0 + + """ + return _wrapfunc(a, 'argmin', axis=axis, out=out) + + +def _searchsorted_dispatcher(a, v, side=None, sorter=None): + return (a, v, sorter) + + +@array_function_dispatch(_searchsorted_dispatcher) +def searchsorted(a, v, side='left', sorter=None): + """ + Find indices where elements should be inserted to maintain order. + + Find the indices into a sorted array `a` such that, if the + corresponding elements in `v` were inserted before the indices, the + order of `a` would be preserved. + + Assuming that `a` is sorted: + + ====== ============================ + `side` returned index `i` satisfies + ====== ============================ + left ``a[i-1] < v <= a[i]`` + right ``a[i-1] <= v < a[i]`` + ====== ============================ + + Parameters + ---------- + a : 1-D array_like + Input array. If `sorter` is None, then it must be sorted in + ascending order, otherwise `sorter` must be an array of indices + that sort it. + v : array_like + Values to insert into `a`. + side : {'left', 'right'}, optional + If 'left', the index of the first suitable location found is given. + If 'right', return the last such index. If there is no suitable + index, return either 0 or N (where N is the length of `a`). + sorter : 1-D array_like, optional + Optional array of integer indices that sort array a into ascending + order. They are typically the result of argsort. + + .. versionadded:: 1.7.0 + + Returns + ------- + indices : array of ints + Array of insertion points with the same shape as `v`. + + See Also + -------- + sort : Return a sorted copy of an array. + histogram : Produce histogram from 1-D data. + + Notes + ----- + Binary search is used to find the required insertion points. + + As of NumPy 1.4.0 `searchsorted` works with real/complex arrays containing + `nan` values. The enhanced sort order is documented in `sort`. + + This function is a faster version of the builtin python `bisect.bisect_left` + (``side='left'``) and `bisect.bisect_right` (``side='right'``) functions, + which is also vectorized in the `v` argument. + + Examples + -------- + >>> np.searchsorted([1,2,3,4,5], 3) + 2 + >>> np.searchsorted([1,2,3,4,5], 3, side='right') + 3 + >>> np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3]) + array([0, 5, 1, 2]) + + """ + return _wrapfunc(a, 'searchsorted', v, side=side, sorter=sorter) + + +def _resize_dispatcher(a, new_shape): + return (a,) + + +@array_function_dispatch(_resize_dispatcher) +def resize(a, new_shape): + """ + Return a new array with the specified shape. + + If the new array is larger than the original array, then the new + array is filled with repeated copies of `a`. Note that this behavior + is different from a.resize(new_shape) which fills with zeros instead + of repeated copies of `a`. + + Parameters + ---------- + a : array_like + Array to be resized. + + new_shape : int or tuple of int + Shape of resized array. + + Returns + ------- + reshaped_array : ndarray + The new array is formed from the data in the old array, repeated + if necessary to fill out the required number of elements. The + data are repeated in the order that they are stored in memory. + + See Also + -------- + ndarray.resize : resize an array in-place. + + Notes + ----- + Warning: This functionality does **not** consider axes separately, + i.e. it does not apply interpolation/extrapolation. + It fills the return array with the required number of elements, taken + from `a` as they are laid out in memory, disregarding strides and axes. + (This is in case the new shape is smaller. For larger, see above.) + This functionality is therefore not suitable to resize images, + or data where each axis represents a separate and distinct entity. + + Examples + -------- + >>> a=np.array([[0,1],[2,3]]) + >>> np.resize(a,(2,3)) + array([[0, 1, 2], + [3, 0, 1]]) + >>> np.resize(a,(1,4)) + array([[0, 1, 2, 3]]) + >>> np.resize(a,(2,4)) + array([[0, 1, 2, 3], + [0, 1, 2, 3]]) + + """ + if isinstance(new_shape, (int, nt.integer)): + new_shape = (new_shape,) + a = ravel(a) + Na = len(a) + total_size = um.multiply.reduce(new_shape) + if Na == 0 or total_size == 0: + return mu.zeros(new_shape, a.dtype) + + n_copies = int(total_size / Na) + extra = total_size % Na + + if extra != 0: + n_copies = n_copies + 1 + extra = Na - extra + + a = concatenate((a,) * n_copies) + if extra > 0: + a = a[:-extra] + + return reshape(a, new_shape) + + +def _squeeze_dispatcher(a, axis=None): + return (a,) + + +@array_function_dispatch(_squeeze_dispatcher) +def squeeze(a, axis=None): + """ + Remove single-dimensional entries from the shape of an array. + + Parameters + ---------- + a : array_like + Input data. + axis : None or int or tuple of ints, optional + .. versionadded:: 1.7.0 + + Selects a subset of the single-dimensional entries in the + shape. If an axis is selected with shape entry greater than + one, an error is raised. + + Returns + ------- + squeezed : ndarray + The input array, but with all or a subset of the + dimensions of length 1 removed. This is always `a` itself + or a view into `a`. + + Raises + ------ + ValueError + If `axis` is not `None`, and an axis being squeezed is not of length 1 + + See Also + -------- + expand_dims : The inverse operation, adding singleton dimensions + reshape : Insert, remove, and combine dimensions, and resize existing ones + + Examples + -------- + >>> x = np.array([[[0], [1], [2]]]) + >>> x.shape + (1, 3, 1) + >>> np.squeeze(x).shape + (3,) + >>> np.squeeze(x, axis=0).shape + (3, 1) + >>> np.squeeze(x, axis=1).shape + Traceback (most recent call last): + ... + ValueError: cannot select an axis to squeeze out which has size not equal to one + >>> np.squeeze(x, axis=2).shape + (1, 3) + + """ + try: + squeeze = a.squeeze + except AttributeError: + return _wrapit(a, 'squeeze') + if axis is None: + return squeeze() + else: + return squeeze(axis=axis) + + +def _diagonal_dispatcher(a, offset=None, axis1=None, axis2=None): + return (a,) + + +@array_function_dispatch(_diagonal_dispatcher) +def diagonal(a, offset=0, axis1=0, axis2=1): + """ + Return specified diagonals. + + If `a` is 2-D, returns the diagonal of `a` with the given offset, + i.e., the collection of elements of the form ``a[i, i+offset]``. If + `a` has more than two dimensions, then the axes specified by `axis1` + and `axis2` are used to determine the 2-D sub-array whose diagonal is + returned. The shape of the resulting array can be determined by + removing `axis1` and `axis2` and appending an index to the right equal + to the size of the resulting diagonals. + + In versions of NumPy prior to 1.7, this function always returned a new, + independent array containing a copy of the values in the diagonal. + + In NumPy 1.7 and 1.8, it continues to return a copy of the diagonal, + but depending on this fact is deprecated. Writing to the resulting + array continues to work as it used to, but a FutureWarning is issued. + + Starting in NumPy 1.9 it returns a read-only view on the original array. + Attempting to write to the resulting array will produce an error. + + In some future release, it will return a read/write view and writing to + the returned array will alter your original array. The returned array + will have the same type as the input array. + + If you don't write to the array returned by this function, then you can + just ignore all of the above. + + If you depend on the current behavior, then we suggest copying the + returned array explicitly, i.e., use ``np.diagonal(a).copy()`` instead + of just ``np.diagonal(a)``. This will work with both past and future + versions of NumPy. + + Parameters + ---------- + a : array_like + Array from which the diagonals are taken. + offset : int, optional + Offset of the diagonal from the main diagonal. Can be positive or + negative. Defaults to main diagonal (0). + axis1 : int, optional + Axis to be used as the first axis of the 2-D sub-arrays from which + the diagonals should be taken. Defaults to first axis (0). + axis2 : int, optional + Axis to be used as the second axis of the 2-D sub-arrays from + which the diagonals should be taken. Defaults to second axis (1). + + Returns + ------- + array_of_diagonals : ndarray + If `a` is 2-D, then a 1-D array containing the diagonal and of the + same type as `a` is returned unless `a` is a `matrix`, in which case + a 1-D array rather than a (2-D) `matrix` is returned in order to + maintain backward compatibility. + + If ``a.ndim > 2``, then the dimensions specified by `axis1` and `axis2` + are removed, and a new axis inserted at the end corresponding to the + diagonal. + + Raises + ------ + ValueError + If the dimension of `a` is less than 2. + + See Also + -------- + diag : MATLAB work-a-like for 1-D and 2-D arrays. + diagflat : Create diagonal arrays. + trace : Sum along diagonals. + + Examples + -------- + >>> a = np.arange(4).reshape(2,2) + >>> a + array([[0, 1], + [2, 3]]) + >>> a.diagonal() + array([0, 3]) + >>> a.diagonal(1) + array([1]) + + A 3-D example: + + >>> a = np.arange(8).reshape(2,2,2); a + array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + >>> a.diagonal(0, # Main diagonals of two arrays created by skipping + ... 0, # across the outer(left)-most axis last and + ... 1) # the "middle" (row) axis first. + array([[0, 6], + [1, 7]]) + + The sub-arrays whose main diagonals we just obtained; note that each + corresponds to fixing the right-most (column) axis, and that the + diagonals are "packed" in rows. + + >>> a[:,:,0] # main diagonal is [0 6] + array([[0, 2], + [4, 6]]) + >>> a[:,:,1] # main diagonal is [1 7] + array([[1, 3], + [5, 7]]) + + """ + if isinstance(a, np.matrix): + # Make diagonal of matrix 1-D to preserve backward compatibility. + return asarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2) + else: + return asanyarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2) + + +def _trace_dispatcher( + a, offset=None, axis1=None, axis2=None, dtype=None, out=None): + return (a, out) + + +@array_function_dispatch(_trace_dispatcher) +def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None): + """ + Return the sum along diagonals of the array. + + If `a` is 2-D, the sum along its diagonal with the given offset + is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i. + + If `a` has more than two dimensions, then the axes specified by axis1 and + axis2 are used to determine the 2-D sub-arrays whose traces are returned. + The shape of the resulting array is the same as that of `a` with `axis1` + and `axis2` removed. + + Parameters + ---------- + a : array_like + Input array, from which the diagonals are taken. + offset : int, optional + Offset of the diagonal from the main diagonal. Can be both positive + and negative. Defaults to 0. + axis1, axis2 : int, optional + Axes to be used as the first and second axis of the 2-D sub-arrays + from which the diagonals should be taken. Defaults are the first two + axes of `a`. + dtype : dtype, optional + Determines the data-type of the returned array and of the accumulator + where the elements are summed. If dtype has the value None and `a` is + of integer type of precision less than the default integer + precision, then the default integer precision is used. Otherwise, + the precision is the same as that of `a`. + out : ndarray, optional + Array into which the output is placed. Its type is preserved and + it must be of the right shape to hold the output. + + Returns + ------- + sum_along_diagonals : ndarray + If `a` is 2-D, the sum along the diagonal is returned. If `a` has + larger dimensions, then an array of sums along diagonals is returned. + + See Also + -------- + diag, diagonal, diagflat + + Examples + -------- + >>> np.trace(np.eye(3)) + 3.0 + >>> a = np.arange(8).reshape((2,2,2)) + >>> np.trace(a) + array([6, 8]) + + >>> a = np.arange(24).reshape((2,2,2,3)) + >>> np.trace(a).shape + (2, 3) + + """ + if isinstance(a, np.matrix): + # Get trace of matrix via an array to preserve backward compatibility. + return asarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out) + else: + return asanyarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out) + + +def _ravel_dispatcher(a, order=None): + return (a,) + + +@array_function_dispatch(_ravel_dispatcher) +def ravel(a, order='C'): + """Return a contiguous flattened array. + + A 1-D array, containing the elements of the input, is returned. A copy is + made only if needed. + + As of NumPy 1.10, the returned array will have the same type as the input + array. (for example, a masked array will be returned for a masked array + input) + + Parameters + ---------- + a : array_like + Input array. The elements in `a` are read in the order specified by + `order`, and packed as a 1-D array. + order : {'C','F', 'A', 'K'}, optional + + The elements of `a` are read using this index order. 'C' means + to index the elements in row-major, C-style order, + with the last axis index changing fastest, back to the first + axis index changing slowest. 'F' means to index the elements + in column-major, Fortran-style order, with the + first index changing fastest, and the last index changing + slowest. Note that the 'C' and 'F' options take no account of + the memory layout of the underlying array, and only refer to + the order of axis indexing. 'A' means to read the elements in + Fortran-like index order if `a` is Fortran *contiguous* in + memory, C-like order otherwise. 'K' means to read the + elements in the order they occur in memory, except for + reversing the data when strides are negative. By default, 'C' + index order is used. + + Returns + ------- + y : array_like + y is an array of the same subtype as `a`, with shape ``(a.size,)``. + Note that matrices are special cased for backward compatibility, if `a` + is a matrix, then y is a 1-D ndarray. + + See Also + -------- + ndarray.flat : 1-D iterator over an array. + ndarray.flatten : 1-D array copy of the elements of an array + in row-major order. + ndarray.reshape : Change the shape of an array without changing its data. + + Notes + ----- + In row-major, C-style order, in two dimensions, the row index + varies the slowest, and the column index the quickest. This can + be generalized to multiple dimensions, where row-major order + implies that the index along the first axis varies slowest, and + the index along the last quickest. The opposite holds for + column-major, Fortran-style index ordering. + + When a view is desired in as many cases as possible, ``arr.reshape(-1)`` + may be preferable. + + Examples + -------- + It is equivalent to ``reshape(-1, order=order)``. + + >>> x = np.array([[1, 2, 3], [4, 5, 6]]) + >>> print(np.ravel(x)) + [1 2 3 4 5 6] + + >>> print(x.reshape(-1)) + [1 2 3 4 5 6] + + >>> print(np.ravel(x, order='F')) + [1 4 2 5 3 6] + + When ``order`` is 'A', it will preserve the array's 'C' or 'F' ordering: + + >>> print(np.ravel(x.T)) + [1 4 2 5 3 6] + >>> print(np.ravel(x.T, order='A')) + [1 2 3 4 5 6] + + When ``order`` is 'K', it will preserve orderings that are neither 'C' + nor 'F', but won't reverse axes: + + >>> a = np.arange(3)[::-1]; a + array([2, 1, 0]) + >>> a.ravel(order='C') + array([2, 1, 0]) + >>> a.ravel(order='K') + array([2, 1, 0]) + + >>> a = np.arange(12).reshape(2,3,2).swapaxes(1,2); a + array([[[ 0, 2, 4], + [ 1, 3, 5]], + [[ 6, 8, 10], + [ 7, 9, 11]]]) + >>> a.ravel(order='C') + array([ 0, 2, 4, 1, 3, 5, 6, 8, 10, 7, 9, 11]) + >>> a.ravel(order='K') + array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) + + """ + if isinstance(a, np.matrix): + return asarray(a).ravel(order=order) + else: + return asanyarray(a).ravel(order=order) + + +def _nonzero_dispatcher(a): + return (a,) + + +@array_function_dispatch(_nonzero_dispatcher) +def nonzero(a): + """ + Return the indices of the elements that are non-zero. + + Returns a tuple of arrays, one for each dimension of `a`, + containing the indices of the non-zero elements in that + dimension. The values in `a` are always tested and returned in + row-major, C-style order. The corresponding non-zero + values can be obtained with:: + + a[nonzero(a)] + + To group the indices by element, rather than dimension, use:: + + transpose(nonzero(a)) + + The result of this is always a 2-D array, with a row for + each non-zero element. + + Parameters + ---------- + a : array_like + Input array. + + Returns + ------- + tuple_of_arrays : tuple + Indices of elements that are non-zero. + + See Also + -------- + flatnonzero : + Return indices that are non-zero in the flattened version of the input + array. + ndarray.nonzero : + Equivalent ndarray method. + count_nonzero : + Counts the number of non-zero elements in the input array. + + Examples + -------- + >>> x = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]]) + >>> x + array([[3, 0, 0], + [0, 4, 0], + [5, 6, 0]]) + >>> np.nonzero(x) + (array([0, 1, 2, 2]), array([0, 1, 0, 1])) + + >>> x[np.nonzero(x)] + array([3, 4, 5, 6]) + >>> np.transpose(np.nonzero(x)) + array([[0, 0], + [1, 1], + [2, 0], + [2, 1]) + + A common use for ``nonzero`` is to find the indices of an array, where + a condition is True. Given an array `a`, the condition `a` > 3 is a + boolean array and since False is interpreted as 0, np.nonzero(a > 3) + yields the indices of the `a` where the condition is true. + + >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + >>> a > 3 + array([[False, False, False], + [ True, True, True], + [ True, True, True]]) + >>> np.nonzero(a > 3) + (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) + + Using this result to index `a` is equivalent to using the mask directly: + + >>> a[np.nonzero(a > 3)] + array([4, 5, 6, 7, 8, 9]) + >>> a[a > 3] # prefer this spelling + array([4, 5, 6, 7, 8, 9]) + + ``nonzero`` can also be called as a method of the array. + + >>> (a > 3).nonzero() + (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) + + """ + return _wrapfunc(a, 'nonzero') + + +def _shape_dispatcher(a): + return (a,) + + +@array_function_dispatch(_shape_dispatcher) +def shape(a): + """ + Return the shape of an array. + + Parameters + ---------- + a : array_like + Input array. + + Returns + ------- + shape : tuple of ints + The elements of the shape tuple give the lengths of the + corresponding array dimensions. + + See Also + -------- + alen + ndarray.shape : Equivalent array method. + + Examples + -------- + >>> np.shape(np.eye(3)) + (3, 3) + >>> np.shape([[1, 2]]) + (1, 2) + >>> np.shape([0]) + (1,) + >>> np.shape(0) + () + + >>> a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) + >>> np.shape(a) + (2,) + >>> a.shape + (2,) + + """ + try: + result = a.shape + except AttributeError: + result = asarray(a).shape + return result + + +def _compress_dispatcher(condition, a, axis=None, out=None): + return (condition, a, out) + + +@array_function_dispatch(_compress_dispatcher) +def compress(condition, a, axis=None, out=None): + """ + Return selected slices of an array along given axis. + + When working along a given axis, a slice along that axis is returned in + `output` for each index where `condition` evaluates to True. When + working on a 1-D array, `compress` is equivalent to `extract`. + + Parameters + ---------- + condition : 1-D array of bools + Array that selects which entries to return. If len(condition) + is less than the size of `a` along the given axis, then output is + truncated to the length of the condition array. + a : array_like + Array from which to extract a part. + axis : int, optional + Axis along which to take slices. If None (default), work on the + flattened array. + out : ndarray, optional + Output array. Its type is preserved and it must be of the right + shape to hold the output. + + Returns + ------- + compressed_array : ndarray + A copy of `a` without the slices along axis for which `condition` + is false. + + See Also + -------- + take, choose, diag, diagonal, select + ndarray.compress : Equivalent method in ndarray + np.extract: Equivalent method when working on 1-D arrays + numpy.doc.ufuncs : Section "Output arguments" + + Examples + -------- + >>> a = np.array([[1, 2], [3, 4], [5, 6]]) + >>> a + array([[1, 2], + [3, 4], + [5, 6]]) + >>> np.compress([0, 1], a, axis=0) + array([[3, 4]]) + >>> np.compress([False, True, True], a, axis=0) + array([[3, 4], + [5, 6]]) + >>> np.compress([False, True], a, axis=1) + array([[2], + [4], + [6]]) + + Working on the flattened array does not return slices along an axis but + selects elements. + + >>> np.compress([False, True], a) + array([2]) + + """ + return _wrapfunc(a, 'compress', condition, axis=axis, out=out) + + +def _clip_dispatcher(a, a_min, a_max, out=None): + return (a, a_min, a_max) + + +@array_function_dispatch(_clip_dispatcher) +def clip(a, a_min, a_max, out=None): + """ + Clip (limit) the values in an array. + + Given an interval, values outside the interval are clipped to + the interval edges. For example, if an interval of ``[0, 1]`` + is specified, values smaller than 0 become 0, and values larger + than 1 become 1. + + Parameters + ---------- + a : array_like + Array containing elements to clip. + a_min : scalar or array_like or `None` + Minimum value. If `None`, clipping is not performed on lower + interval edge. Not more than one of `a_min` and `a_max` may be + `None`. + a_max : scalar or array_like or `None` + Maximum value. If `None`, clipping is not performed on upper + interval edge. Not more than one of `a_min` and `a_max` may be + `None`. If `a_min` or `a_max` are array_like, then the three + arrays will be broadcasted to match their shapes. + out : ndarray, optional + The results will be placed in this array. It may be the input + array for in-place clipping. `out` must be of the right shape + to hold the output. Its type is preserved. + + Returns + ------- + clipped_array : ndarray + An array with the elements of `a`, but where values + < `a_min` are replaced with `a_min`, and those > `a_max` + with `a_max`. + + See Also + -------- + numpy.doc.ufuncs : Section "Output arguments" + + Examples + -------- + >>> a = np.arange(10) + >>> np.clip(a, 1, 8) + array([1, 1, 2, 3, 4, 5, 6, 7, 8, 8]) + >>> a + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> np.clip(a, 3, 6, out=a) + array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6]) + >>> a = np.arange(10) + >>> a + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> np.clip(a, [3, 4, 1, 1, 1, 4, 4, 4, 4, 4], 8) + array([3, 4, 2, 3, 4, 5, 6, 7, 8, 8]) + + """ + return _wrapfunc(a, 'clip', a_min, a_max, out=out) + + +def _sum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, + initial=None): + return (a, out) + + +@array_function_dispatch(_sum_dispatcher) +def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._NoValue): + """ + Sum of array elements over a given axis. + + Parameters + ---------- + a : array_like + Elements to sum. + axis : None or int or tuple of ints, optional + Axis or axes along which a sum is performed. The default, + axis=None, will sum all of the elements of the input array. If + axis is negative it counts from the last to the first axis. + + .. versionadded:: 1.7.0 + + If axis is a tuple of ints, a sum is performed on all of the axes + specified in the tuple instead of a single axis or all the axes as + before. + dtype : dtype, optional + The type of the returned array and of the accumulator in which the + elements are summed. The dtype of `a` is used by default unless `a` + has an integer dtype of less precision than the default platform + integer. In that case, if `a` is signed then the platform integer + is used while if `a` is unsigned then an unsigned integer of the + same precision as the platform integer is used. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output, but the type of the output + values will be cast if necessary. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `sum` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + initial : scalar, optional + Starting value for the sum. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.15.0 + + Returns + ------- + sum_along_axis : ndarray + An array with the same shape as `a`, with the specified + axis removed. If `a` is a 0-d array, or if `axis` is None, a scalar + is returned. If an output array is specified, a reference to + `out` is returned. + + See Also + -------- + ndarray.sum : Equivalent method. + + cumsum : Cumulative sum of array elements. + + trapz : Integration of array values using the composite trapezoidal rule. + + mean, average + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + The sum of an empty array is the neutral element 0: + + >>> np.sum([]) + 0.0 + + Examples + -------- + >>> np.sum([0.5, 1.5]) + 2.0 + >>> np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32) + 1 + >>> np.sum([[0, 1], [0, 5]]) + 6 + >>> np.sum([[0, 1], [0, 5]], axis=0) + array([0, 6]) + >>> np.sum([[0, 1], [0, 5]], axis=1) + array([1, 5]) + + If the accumulator is too small, overflow occurs: + + >>> np.ones(128, dtype=np.int8).sum(dtype=np.int8) + -128 + + You can also start the sum with a value other than zero: + + >>> np.sum([10], initial=5) + 15 + """ + if isinstance(a, _gentype): + # 2018-02-25, 1.15.0 + warnings.warn( + "Calling np.sum(generator) is deprecated, and in the future will give a different result. " + "Use np.sum(np.fromiter(generator)) or the python sum builtin instead.", + DeprecationWarning, stacklevel=2) + + res = _sum_(a) + if out is not None: + out[...] = res + return out + return res + + return _wrapreduction(a, np.add, 'sum', axis, dtype, out, keepdims=keepdims, + initial=initial) + + +def _any_dispatcher(a, axis=None, out=None, keepdims=None): + return (a, out) + + +@array_function_dispatch(_any_dispatcher) +def any(a, axis=None, out=None, keepdims=np._NoValue): + """ + Test whether any array element along a given axis evaluates to True. + + Returns single boolean unless `axis` is not ``None`` + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + axis : None or int or tuple of ints, optional + Axis or axes along which a logical OR reduction is performed. + The default (`axis` = `None`) is to perform a logical OR over all + the dimensions of the input array. `axis` may be negative, in + which case it counts from the last to the first axis. + + .. versionadded:: 1.7.0 + + If this is a tuple of ints, a reduction is performed on multiple + axes, instead of a single axis or all the axes as before. + out : ndarray, optional + Alternate output array in which to place the result. It must have + the same shape as the expected output and its type is preserved + (e.g., if it is of type float, then it will remain so, returning + 1.0 for True and 0.0 for False, regardless of the type of `a`). + See `doc.ufuncs` (Section "Output arguments") for details. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `any` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + + Returns + ------- + any : bool or ndarray + A new boolean or `ndarray` is returned unless `out` is specified, + in which case a reference to `out` is returned. + + See Also + -------- + ndarray.any : equivalent method + + all : Test whether all elements along a given axis evaluate to True. + + Notes + ----- + Not a Number (NaN), positive infinity and negative infinity evaluate + to `True` because these are not equal to zero. + + Examples + -------- + >>> np.any([[True, False], [True, True]]) + True + + >>> np.any([[True, False], [False, False]], axis=0) + array([ True, False]) + + >>> np.any([-1, 0, 5]) + True + + >>> np.any(np.nan) + True + + >>> o=np.array([False]) + >>> z=np.any([-1, 4, 5], out=o) + >>> z, o + (array([ True]), array([ True])) + >>> # Check now that z is a reference to o + >>> z is o + True + >>> id(z), id(o) # identity of z and o # doctest: +SKIP + (191614240, 191614240) + + """ + return _wrapreduction(a, np.logical_or, 'any', axis, None, out, keepdims=keepdims) + + +def _all_dispatcher(a, axis=None, out=None, keepdims=None): + return (a, out) + + +@array_function_dispatch(_all_dispatcher) +def all(a, axis=None, out=None, keepdims=np._NoValue): + """ + Test whether all array elements along a given axis evaluate to True. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + axis : None or int or tuple of ints, optional + Axis or axes along which a logical AND reduction is performed. + The default (`axis` = `None`) is to perform a logical AND over all + the dimensions of the input array. `axis` may be negative, in + which case it counts from the last to the first axis. + + .. versionadded:: 1.7.0 + + If this is a tuple of ints, a reduction is performed on multiple + axes, instead of a single axis or all the axes as before. + out : ndarray, optional + Alternate output array in which to place the result. + It must have the same shape as the expected output and its + type is preserved (e.g., if ``dtype(out)`` is float, the result + will consist of 0.0's and 1.0's). See `doc.ufuncs` (Section + "Output arguments") for more details. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `all` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + + Returns + ------- + all : ndarray, bool + A new boolean or array is returned unless `out` is specified, + in which case a reference to `out` is returned. + + See Also + -------- + ndarray.all : equivalent method + + any : Test whether any element along a given axis evaluates to True. + + Notes + ----- + Not a Number (NaN), positive infinity and negative infinity + evaluate to `True` because these are not equal to zero. + + Examples + -------- + >>> np.all([[True,False],[True,True]]) + False + + >>> np.all([[True,False],[True,True]], axis=0) + array([ True, False]) + + >>> np.all([-1, 4, 5]) + True + + >>> np.all([1.0, np.nan]) + True + + >>> o=np.array([False]) + >>> z=np.all([-1, 4, 5], out=o) + >>> id(z), id(o), z # doctest: +SKIP + (28293632, 28293632, array([ True])) + + """ + return _wrapreduction(a, np.logical_and, 'all', axis, None, out, keepdims=keepdims) + + +def _cumsum_dispatcher(a, axis=None, dtype=None, out=None): + return (a, out) + + +@array_function_dispatch(_cumsum_dispatcher) +def cumsum(a, axis=None, dtype=None, out=None): + """ + Return the cumulative sum of the elements along a given axis. + + Parameters + ---------- + a : array_like + Input array. + axis : int, optional + Axis along which the cumulative sum is computed. The default + (None) is to compute the cumsum over the flattened array. + dtype : dtype, optional + Type of the returned array and of the accumulator in which the + elements are summed. If `dtype` is not specified, it defaults + to the dtype of `a`, unless `a` has an integer dtype with a + precision less than that of the default platform integer. In + that case, the default platform integer is used. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type will be cast if necessary. See `doc.ufuncs` + (Section "Output arguments") for more details. + + Returns + ------- + cumsum_along_axis : ndarray. + A new array holding the result is returned unless `out` is + specified, in which case a reference to `out` is returned. The + result has the same size as `a`, and the same shape as `a` if + `axis` is not None or `a` is a 1-d array. + + + See Also + -------- + sum : Sum array elements. + + trapz : Integration of array values using the composite trapezoidal rule. + + diff : Calculate the n-th discrete difference along given axis. + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + Examples + -------- + >>> a = np.array([[1,2,3], [4,5,6]]) + >>> a + array([[1, 2, 3], + [4, 5, 6]]) + >>> np.cumsum(a) + array([ 1, 3, 6, 10, 15, 21]) + >>> np.cumsum(a, dtype=float) # specifies type of output value(s) + array([ 1., 3., 6., 10., 15., 21.]) + + >>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns + array([[1, 2, 3], + [5, 7, 9]]) + >>> np.cumsum(a,axis=1) # sum over columns for each of the 2 rows + array([[ 1, 3, 6], + [ 4, 9, 15]]) + + """ + return _wrapfunc(a, 'cumsum', axis=axis, dtype=dtype, out=out) + + +def _ptp_dispatcher(a, axis=None, out=None, keepdims=None): + return (a, out) + + +@array_function_dispatch(_ptp_dispatcher) +def ptp(a, axis=None, out=None, keepdims=np._NoValue): + """ + Range of values (maximum - minimum) along an axis. + + The name of the function comes from the acronym for 'peak to peak'. + + Parameters + ---------- + a : array_like + Input values. + axis : None or int or tuple of ints, optional + Axis along which to find the peaks. By default, flatten the + array. `axis` may be negative, in + which case it counts from the last to the first axis. + + .. versionadded:: 1.15.0 + + If this is a tuple of ints, a reduction is performed on multiple + axes, instead of a single axis or all the axes as before. + out : array_like + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type of the output values will be cast if necessary. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `ptp` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + + Returns + ------- + ptp : ndarray + A new array holding the result, unless `out` was + specified, in which case a reference to `out` is returned. + + Examples + -------- + >>> x = np.arange(4).reshape((2,2)) + >>> x + array([[0, 1], + [2, 3]]) + + >>> np.ptp(x, axis=0) + array([2, 2]) + + >>> np.ptp(x, axis=1) + array([1, 1]) + + """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + if type(a) is not mu.ndarray: + try: + ptp = a.ptp + except AttributeError: + pass + else: + return ptp(axis=axis, out=out, **kwargs) + return _methods._ptp(a, axis=axis, out=out, **kwargs) + + +def _amax_dispatcher(a, axis=None, out=None, keepdims=None, initial=None): + return (a, out) + + +@array_function_dispatch(_amax_dispatcher) +def amax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue): + """ + Return the maximum of an array or maximum along an axis. + + Parameters + ---------- + a : array_like + Input data. + axis : None or int or tuple of ints, optional + Axis or axes along which to operate. By default, flattened input is + used. + + .. versionadded:: 1.7.0 + + If this is a tuple of ints, the maximum is selected over multiple axes, + instead of a single axis or all the axes as before. + out : ndarray, optional + Alternative output array in which to place the result. Must + be of the same shape and buffer length as the expected output. + See `doc.ufuncs` (Section "Output arguments") for more details. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `amax` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + + initial : scalar, optional + The minimum value of an output element. Must be present to allow + computation on empty slice. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.15.0 + + + Returns + ------- + amax : ndarray or scalar + Maximum of `a`. If `axis` is None, the result is a scalar value. + If `axis` is given, the result is an array of dimension + ``a.ndim - 1``. + + See Also + -------- + amin : + The minimum value of an array along a given axis, propagating any NaNs. + nanmax : + The maximum value of an array along a given axis, ignoring any NaNs. + maximum : + Element-wise maximum of two arrays, propagating any NaNs. + fmax : + Element-wise maximum of two arrays, ignoring any NaNs. + argmax : + Return the indices of the maximum values. + + nanmin, minimum, fmin + + Notes + ----- + NaN values are propagated, that is if at least one item is NaN, the + corresponding max value will be NaN as well. To ignore NaN values + (MATLAB behavior), please use nanmax. + + Don't use `amax` for element-wise comparison of 2 arrays; when + ``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than + ``amax(a, axis=0)``. + + Examples + -------- + >>> a = np.arange(4).reshape((2,2)) + >>> a + array([[0, 1], + [2, 3]]) + >>> np.amax(a) # Maximum of the flattened array + 3 + >>> np.amax(a, axis=0) # Maxima along the first axis + array([2, 3]) + >>> np.amax(a, axis=1) # Maxima along the second axis + array([1, 3]) + + >>> b = np.arange(5, dtype=float) + >>> b[2] = np.NaN + >>> np.amax(b) + nan + >>> np.nanmax(b) + 4.0 + + You can use an initial value to compute the maximum of an empty slice, or + to initialize it to a different value: + + >>> np.max([[-50], [10]], axis=-1, initial=0) + array([ 0, 10]) + + Notice that the initial value is used as one of the elements for which the + maximum is determined, unlike for the default argument Python's max + function, which is only used for empty iterables. + + >>> np.max([5], initial=6) + 6 + >>> max([5], default=6) + 5 + """ + return _wrapreduction(a, np.maximum, 'max', axis, None, out, keepdims=keepdims, + initial=initial) + + +def _amin_dispatcher(a, axis=None, out=None, keepdims=None, initial=None): + return (a, out) + + +@array_function_dispatch(_amin_dispatcher) +def amin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue): + """ + Return the minimum of an array or minimum along an axis. + + Parameters + ---------- + a : array_like + Input data. + axis : None or int or tuple of ints, optional + Axis or axes along which to operate. By default, flattened input is + used. + + .. versionadded:: 1.7.0 + + If this is a tuple of ints, the minimum is selected over multiple axes, + instead of a single axis or all the axes as before. + out : ndarray, optional + Alternative output array in which to place the result. Must + be of the same shape and buffer length as the expected output. + See `doc.ufuncs` (Section "Output arguments") for more details. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `amin` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + + initial : scalar, optional + The maximum value of an output element. Must be present to allow + computation on empty slice. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.15.0 + + Returns + ------- + amin : ndarray or scalar + Minimum of `a`. If `axis` is None, the result is a scalar value. + If `axis` is given, the result is an array of dimension + ``a.ndim - 1``. + + See Also + -------- + amax : + The maximum value of an array along a given axis, propagating any NaNs. + nanmin : + The minimum value of an array along a given axis, ignoring any NaNs. + minimum : + Element-wise minimum of two arrays, propagating any NaNs. + fmin : + Element-wise minimum of two arrays, ignoring any NaNs. + argmin : + Return the indices of the minimum values. + + nanmax, maximum, fmax + + Notes + ----- + NaN values are propagated, that is if at least one item is NaN, the + corresponding min value will be NaN as well. To ignore NaN values + (MATLAB behavior), please use nanmin. + + Don't use `amin` for element-wise comparison of 2 arrays; when + ``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than + ``amin(a, axis=0)``. + + Examples + -------- + >>> a = np.arange(4).reshape((2,2)) + >>> a + array([[0, 1], + [2, 3]]) + >>> np.amin(a) # Minimum of the flattened array + 0 + >>> np.amin(a, axis=0) # Minima along the first axis + array([0, 1]) + >>> np.amin(a, axis=1) # Minima along the second axis + array([0, 2]) + + >>> b = np.arange(5, dtype=float) + >>> b[2] = np.NaN + >>> np.amin(b) + nan + >>> np.nanmin(b) + 0.0 + + >>> np.min([[-50], [10]], axis=-1, initial=0) + array([-50, 0]) + + Notice that the initial value is used as one of the elements for which the + minimum is determined, unlike for the default argument Python's max + function, which is only used for empty iterables. + + Notice that this isn't the same as Python's ``default`` argument. + + >>> np.min([6], initial=5) + 5 + >>> min([6], default=5) + 6 + """ + return _wrapreduction(a, np.minimum, 'min', axis, None, out, keepdims=keepdims, + initial=initial) + + +def _alen_dispathcer(a): + return (a,) + + +@array_function_dispatch(_alen_dispathcer) +def alen(a): + """ + Return the length of the first dimension of the input array. + + Parameters + ---------- + a : array_like + Input array. + + Returns + ------- + alen : int + Length of the first dimension of `a`. + + See Also + -------- + shape, size + + Examples + -------- + >>> a = np.zeros((7,4,5)) + >>> a.shape[0] + 7 + >>> np.alen(a) + 7 + + """ + try: + return len(a) + except TypeError: + return len(array(a, ndmin=1)) + + +def _prod_dispatcher( + a, axis=None, dtype=None, out=None, keepdims=None, initial=None): + return (a, out) + + +@array_function_dispatch(_prod_dispatcher) +def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._NoValue): + """ + Return the product of array elements over a given axis. + + Parameters + ---------- + a : array_like + Input data. + axis : None or int or tuple of ints, optional + Axis or axes along which a product is performed. The default, + axis=None, will calculate the product of all the elements in the + input array. If axis is negative it counts from the last to the + first axis. + + .. versionadded:: 1.7.0 + + If axis is a tuple of ints, a product is performed on all of the + axes specified in the tuple instead of a single axis or all the + axes as before. + dtype : dtype, optional + The type of the returned array, as well as of the accumulator in + which the elements are multiplied. The dtype of `a` is used by + default unless `a` has an integer dtype of less precision than the + default platform integer. In that case, if `a` is signed then the + platform integer is used while if `a` is unsigned then an unsigned + integer of the same precision as the platform integer is used. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output, but the type of the output + values will be cast if necessary. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in the + result as dimensions with size one. With this option, the result + will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `prod` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + initial : scalar, optional + The starting value for this product. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.15.0 + + Returns + ------- + product_along_axis : ndarray, see `dtype` parameter above. + An array shaped as `a` but with the specified axis removed. + Returns a reference to `out` if specified. + + See Also + -------- + ndarray.prod : equivalent method + numpy.doc.ufuncs : Section "Output arguments" + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow. That means that, on a 32-bit platform: + + >>> x = np.array([536870910, 536870910, 536870910, 536870910]) + >>> np.prod(x) # random + 16 + + The product of an empty array is the neutral element 1: + + >>> np.prod([]) + 1.0 + + Examples + -------- + By default, calculate the product of all elements: + + >>> np.prod([1.,2.]) + 2.0 + + Even when the input array is two-dimensional: + + >>> np.prod([[1.,2.],[3.,4.]]) + 24.0 + + But we can also specify the axis over which to multiply: + + >>> np.prod([[1.,2.],[3.,4.]], axis=1) + array([ 2., 12.]) + + If the type of `x` is unsigned, then the output type is + the unsigned platform integer: + + >>> x = np.array([1, 2, 3], dtype=np.uint8) + >>> np.prod(x).dtype == np.uint + True + + If `x` is of a signed integer type, then the output type + is the default platform integer: + + >>> x = np.array([1, 2, 3], dtype=np.int8) + >>> np.prod(x).dtype == int + True + + You can also start the product with a value other than one: + + >>> np.prod([1, 2], initial=5) + 10 + """ + return _wrapreduction(a, np.multiply, 'prod', axis, dtype, out, keepdims=keepdims, + initial=initial) + + +def _cumprod_dispatcher(a, axis=None, dtype=None, out=None): + return (a, out) + + +@array_function_dispatch(_cumprod_dispatcher) +def cumprod(a, axis=None, dtype=None, out=None): + """ + Return the cumulative product of elements along a given axis. + + Parameters + ---------- + a : array_like + Input array. + axis : int, optional + Axis along which the cumulative product is computed. By default + the input is flattened. + dtype : dtype, optional + Type of the returned array, as well as of the accumulator in which + the elements are multiplied. If *dtype* is not specified, it + defaults to the dtype of `a`, unless `a` has an integer dtype with + a precision less than that of the default platform integer. In + that case, the default platform integer is used instead. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type of the resulting values will be cast if necessary. + + Returns + ------- + cumprod : ndarray + A new array holding the result is returned unless `out` is + specified, in which case a reference to out is returned. + + See Also + -------- + numpy.doc.ufuncs : Section "Output arguments" + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + Examples + -------- + >>> a = np.array([1,2,3]) + >>> np.cumprod(a) # intermediate results 1, 1*2 + ... # total product 1*2*3 = 6 + array([1, 2, 6]) + >>> a = np.array([[1, 2, 3], [4, 5, 6]]) + >>> np.cumprod(a, dtype=float) # specify type of output + array([ 1., 2., 6., 24., 120., 720.]) + + The cumulative product for each column (i.e., over the rows) of `a`: + + >>> np.cumprod(a, axis=0) + array([[ 1, 2, 3], + [ 4, 10, 18]]) + + The cumulative product for each row (i.e. over the columns) of `a`: + + >>> np.cumprod(a,axis=1) + array([[ 1, 2, 6], + [ 4, 20, 120]]) + + """ + return _wrapfunc(a, 'cumprod', axis=axis, dtype=dtype, out=out) + + +def _ndim_dispatcher(a): + return (a,) + + +@array_function_dispatch(_ndim_dispatcher) +def ndim(a): + """ + Return the number of dimensions of an array. + + Parameters + ---------- + a : array_like + Input array. If it is not already an ndarray, a conversion is + attempted. + + Returns + ------- + number_of_dimensions : int + The number of dimensions in `a`. Scalars are zero-dimensional. + + See Also + -------- + ndarray.ndim : equivalent method + shape : dimensions of array + ndarray.shape : dimensions of array + + Examples + -------- + >>> np.ndim([[1,2,3],[4,5,6]]) + 2 + >>> np.ndim(np.array([[1,2,3],[4,5,6]])) + 2 + >>> np.ndim(1) + 0 + + """ + try: + return a.ndim + except AttributeError: + return asarray(a).ndim + + +def _size_dispatcher(a, axis=None): + return (a,) + + +@array_function_dispatch(_size_dispatcher) +def size(a, axis=None): + """ + Return the number of elements along a given axis. + + Parameters + ---------- + a : array_like + Input data. + axis : int, optional + Axis along which the elements are counted. By default, give + the total number of elements. + + Returns + ------- + element_count : int + Number of elements along the specified axis. + + See Also + -------- + shape : dimensions of array + ndarray.shape : dimensions of array + ndarray.size : number of elements in array + + Examples + -------- + >>> a = np.array([[1,2,3],[4,5,6]]) + >>> np.size(a) + 6 + >>> np.size(a,1) + 3 + >>> np.size(a,0) + 2 + + """ + if axis is None: + try: + return a.size + except AttributeError: + return asarray(a).size + else: + try: + return a.shape[axis] + except AttributeError: + return asarray(a).shape[axis] + + +def _around_dispatcher(a, decimals=None, out=None): + return (a, out) + + +@array_function_dispatch(_around_dispatcher) +def around(a, decimals=0, out=None): + """ + Evenly round to the given number of decimals. + + Parameters + ---------- + a : array_like + Input data. + decimals : int, optional + Number of decimal places to round to (default: 0). If + decimals is negative, it specifies the number of positions to + the left of the decimal point. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output, but the type of the output + values will be cast if necessary. See `doc.ufuncs` (Section + "Output arguments") for details. + + Returns + ------- + rounded_array : ndarray + An array of the same type as `a`, containing the rounded values. + Unless `out` was specified, a new array is created. A reference to + the result is returned. + + The real and imaginary parts of complex numbers are rounded + separately. The result of rounding a float is a float. + + See Also + -------- + ndarray.round : equivalent method + + ceil, fix, floor, rint, trunc + + + Notes + ----- + For values exactly halfway between rounded decimal values, NumPy + rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0, + -0.5 and 0.5 round to 0.0, etc. Results may also be surprising due + to the inexact representation of decimal fractions in the IEEE + floating point standard [1]_ and errors introduced when scaling + by powers of ten. + + References + ---------- + .. [1] "Lecture Notes on the Status of IEEE 754", William Kahan, + https://people.eecs.berkeley.edu/~wkahan/ieee754status/IEEE754.PDF + .. [2] "How Futile are Mindless Assessments of + Roundoff in Floating-Point Computation?", William Kahan, + https://people.eecs.berkeley.edu/~wkahan/Mindless.pdf + + Examples + -------- + >>> np.around([0.37, 1.64]) + array([ 0., 2.]) + >>> np.around([0.37, 1.64], decimals=1) + array([ 0.4, 1.6]) + >>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value + array([ 0., 2., 2., 4., 4.]) + >>> np.around([1,2,3,11], decimals=1) # ndarray of ints is returned + array([ 1, 2, 3, 11]) + >>> np.around([1,2,3,11], decimals=-1) + array([ 0, 0, 0, 10]) + + """ + return _wrapfunc(a, 'round', decimals=decimals, out=out) + + +def _mean_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None): + return (a, out) + + +@array_function_dispatch(_mean_dispatcher) +def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): + """ + Compute the arithmetic mean along the specified axis. + + Returns the average of the array elements. The average is taken over + the flattened array by default, otherwise over the specified axis. + `float64` intermediate and return values are used for integer inputs. + + Parameters + ---------- + a : array_like + Array containing numbers whose mean is desired. If `a` is not an + array, a conversion is attempted. + axis : None or int or tuple of ints, optional + Axis or axes along which the means are computed. The default is to + compute the mean of the flattened array. + + .. versionadded:: 1.7.0 + + If this is a tuple of ints, a mean is performed over multiple axes, + instead of a single axis or all the axes as before. + dtype : data-type, optional + Type to use in computing the mean. For integer inputs, the default + is `float64`; for floating point inputs, it is the same as the + input dtype. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``; if provided, it must have the same shape as the + expected output, but the type will be cast if necessary. + See `doc.ufuncs` for details. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `mean` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + + Returns + ------- + m : ndarray, see dtype parameter above + If `out=None`, returns a new array containing the mean values, + otherwise a reference to the output array is returned. + + See Also + -------- + average : Weighted average + std, var, nanmean, nanstd, nanvar + + Notes + ----- + The arithmetic mean is the sum of the elements along the axis divided + by the number of elements. + + Note that for floating-point input, the mean is computed using the + same precision the input has. Depending on the input data, this can + cause the results to be inaccurate, especially for `float32` (see + example below). Specifying a higher-precision accumulator using the + `dtype` keyword can alleviate this issue. + + By default, `float16` results are computed using `float32` intermediates + for extra precision. + + Examples + -------- + >>> a = np.array([[1, 2], [3, 4]]) + >>> np.mean(a) + 2.5 + >>> np.mean(a, axis=0) + array([ 2., 3.]) + >>> np.mean(a, axis=1) + array([ 1.5, 3.5]) + + In single precision, `mean` can be inaccurate: + + >>> a = np.zeros((2, 512*512), dtype=np.float32) + >>> a[0, :] = 1.0 + >>> a[1, :] = 0.1 + >>> np.mean(a) + 0.54999924 + + Computing the mean in float64 is more accurate: + + >>> np.mean(a, dtype=np.float64) + 0.55000000074505806 + + """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + if type(a) is not mu.ndarray: + try: + mean = a.mean + except AttributeError: + pass + else: + return mean(axis=axis, dtype=dtype, out=out, **kwargs) + + return _methods._mean(a, axis=axis, dtype=dtype, + out=out, **kwargs) + + +def _std_dispatcher( + a, axis=None, dtype=None, out=None, ddof=None, keepdims=None): + return (a, out) + + +@array_function_dispatch(_std_dispatcher) +def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): + """ + Compute the standard deviation along the specified axis. + + Returns the standard deviation, a measure of the spread of a distribution, + of the array elements. The standard deviation is computed for the + flattened array by default, otherwise over the specified axis. + + Parameters + ---------- + a : array_like + Calculate the standard deviation of these values. + axis : None or int or tuple of ints, optional + Axis or axes along which the standard deviation is computed. The + default is to compute the standard deviation of the flattened array. + + .. versionadded:: 1.7.0 + + If this is a tuple of ints, a standard deviation is performed over + multiple axes, instead of a single axis or all the axes as before. + dtype : dtype, optional + Type to use in computing the standard deviation. For arrays of + integer type the default is float64, for arrays of float types it is + the same as the array type. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output but the type (of the calculated + values) will be cast if necessary. + ddof : int, optional + Means Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of elements. + By default `ddof` is zero. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `std` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + + Returns + ------- + standard_deviation : ndarray, see dtype parameter above. + If `out` is None, return a new array containing the standard deviation, + otherwise return a reference to the output array. + + See Also + -------- + var, mean, nanmean, nanstd, nanvar + numpy.doc.ufuncs : Section "Output arguments" + + Notes + ----- + The standard deviation is the square root of the average of the squared + deviations from the mean, i.e., ``std = sqrt(mean(abs(x - x.mean())**2))``. + + The average squared deviation is normally calculated as + ``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is specified, + the divisor ``N - ddof`` is used instead. In standard statistical + practice, ``ddof=1`` provides an unbiased estimator of the variance + of the infinite population. ``ddof=0`` provides a maximum likelihood + estimate of the variance for normally distributed variables. The + standard deviation computed in this function is the square root of + the estimated variance, so even with ``ddof=1``, it will not be an + unbiased estimate of the standard deviation per se. + + Note that, for complex numbers, `std` takes the absolute + value before squaring, so that the result is always real and nonnegative. + + For floating-point input, the *std* is computed using the same + precision the input has. Depending on the input data, this can cause + the results to be inaccurate, especially for float32 (see example below). + Specifying a higher-accuracy accumulator using the `dtype` keyword can + alleviate this issue. + + Examples + -------- + >>> a = np.array([[1, 2], [3, 4]]) + >>> np.std(a) + 1.1180339887498949 + >>> np.std(a, axis=0) + array([ 1., 1.]) + >>> np.std(a, axis=1) + array([ 0.5, 0.5]) + + In single precision, std() can be inaccurate: + + >>> a = np.zeros((2, 512*512), dtype=np.float32) + >>> a[0, :] = 1.0 + >>> a[1, :] = 0.1 + >>> np.std(a) + 0.45000005 + + Computing the standard deviation in float64 is more accurate: + + >>> np.std(a, dtype=np.float64) + 0.44999999925494177 + + """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + + if type(a) is not mu.ndarray: + try: + std = a.std + except AttributeError: + pass + else: + return std(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs) + + return _methods._std(a, axis=axis, dtype=dtype, out=out, ddof=ddof, + **kwargs) + + +def _var_dispatcher( + a, axis=None, dtype=None, out=None, ddof=None, keepdims=None): + return (a, out) + + +@array_function_dispatch(_var_dispatcher) +def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): + """ + Compute the variance along the specified axis. + + Returns the variance of the array elements, a measure of the spread of a + distribution. The variance is computed for the flattened array by + default, otherwise over the specified axis. + + Parameters + ---------- + a : array_like + Array containing numbers whose variance is desired. If `a` is not an + array, a conversion is attempted. + axis : None or int or tuple of ints, optional + Axis or axes along which the variance is computed. The default is to + compute the variance of the flattened array. + + .. versionadded:: 1.7.0 + + If this is a tuple of ints, a variance is performed over multiple axes, + instead of a single axis or all the axes as before. + dtype : data-type, optional + Type to use in computing the variance. For arrays of integer type + the default is `float32`; for arrays of float types it is the same as + the array type. + out : ndarray, optional + Alternate output array in which to place the result. It must have + the same shape as the expected output, but the type is cast if + necessary. + ddof : int, optional + "Delta Degrees of Freedom": the divisor used in the calculation is + ``N - ddof``, where ``N`` represents the number of elements. By + default `ddof` is zero. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `var` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + + Returns + ------- + variance : ndarray, see dtype parameter above + If ``out=None``, returns a new array containing the variance; + otherwise, a reference to the output array is returned. + + See Also + -------- + std , mean, nanmean, nanstd, nanvar + numpy.doc.ufuncs : Section "Output arguments" + + Notes + ----- + The variance is the average of the squared deviations from the mean, + i.e., ``var = mean(abs(x - x.mean())**2)``. + + The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``. + If, however, `ddof` is specified, the divisor ``N - ddof`` is used + instead. In standard statistical practice, ``ddof=1`` provides an + unbiased estimator of the variance of a hypothetical infinite population. + ``ddof=0`` provides a maximum likelihood estimate of the variance for + normally distributed variables. + + Note that for complex numbers, the absolute value is taken before + squaring, so that the result is always real and nonnegative. + + For floating-point input, the variance is computed using the same + precision the input has. Depending on the input data, this can cause + the results to be inaccurate, especially for `float32` (see example + below). Specifying a higher-accuracy accumulator using the ``dtype`` + keyword can alleviate this issue. + + Examples + -------- + >>> a = np.array([[1, 2], [3, 4]]) + >>> np.var(a) + 1.25 + >>> np.var(a, axis=0) + array([ 1., 1.]) + >>> np.var(a, axis=1) + array([ 0.25, 0.25]) + + In single precision, var() can be inaccurate: + + >>> a = np.zeros((2, 512*512), dtype=np.float32) + >>> a[0, :] = 1.0 + >>> a[1, :] = 0.1 + >>> np.var(a) + 0.20250003 + + Computing the variance in float64 is more accurate: + + >>> np.var(a, dtype=np.float64) + 0.20249999932944759 + >>> ((1-0.55)**2 + (0.1-0.55)**2)/2 + 0.2025 + + """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + + if type(a) is not mu.ndarray: + try: + var = a.var + + except AttributeError: + pass + else: + return var(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs) + + return _methods._var(a, axis=axis, dtype=dtype, out=out, ddof=ddof, + **kwargs) + + +# Aliases of other functions. These have their own definitions only so that +# they can have unique docstrings. + +@array_function_dispatch(_around_dispatcher) +def round_(a, decimals=0, out=None): + """ + Round an array to the given number of decimals. + + See Also + -------- + around : equivalent function; see for details. + """ + return around(a, decimals=decimals, out=out) + + +@array_function_dispatch(_prod_dispatcher, verify=False) +def product(*args, **kwargs): + """ + Return the product of array elements over a given axis. + + See Also + -------- + prod : equivalent function; see for details. + """ + return prod(*args, **kwargs) + + +@array_function_dispatch(_cumprod_dispatcher, verify=False) +def cumproduct(*args, **kwargs): + """ + Return the cumulative product over the given axis. + + See Also + -------- + cumprod : equivalent function; see for details. + """ + return cumprod(*args, **kwargs) + + +@array_function_dispatch(_any_dispatcher, verify=False) +def sometrue(*args, **kwargs): + """ + Check whether some values are true. + + Refer to `any` for full documentation. + + See Also + -------- + any : equivalent function; see for details. + """ + return any(*args, **kwargs) + + +@array_function_dispatch(_all_dispatcher, verify=False) +def alltrue(*args, **kwargs): + """ + Check if all elements of input array are true. + + See Also + -------- + numpy.all : Equivalent function; see for details. + """ + return all(*args, **kwargs) + + +@array_function_dispatch(_ndim_dispatcher) +def rank(a): + """ + Return the number of dimensions of an array. + + .. note:: + This function is deprecated in NumPy 1.9 to avoid confusion with + `numpy.linalg.matrix_rank`. The ``ndim`` attribute or function + should be used instead. + + See Also + -------- + ndim : equivalent non-deprecated function + + Notes + ----- + In the old Numeric package, `rank` was the term used for the number of + dimensions, but in NumPy `ndim` is used instead. + """ + # 2014-04-12, 1.9 + warnings.warn( + "`rank` is deprecated; use the `ndim` attribute or function instead. " + "To find the rank of a matrix see `numpy.linalg.matrix_rank`.", + VisibleDeprecationWarning, stacklevel=2) + return ndim(a) diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/fromnumeric.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/fromnumeric.pyc new file mode 100644 index 0000000..88d2d5e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/fromnumeric.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/function_base.py b/project/venv/lib/python2.7/site-packages/numpy/core/function_base.py new file mode 100644 index 0000000..b68fd40 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/function_base.py @@ -0,0 +1,462 @@ +from __future__ import division, absolute_import, print_function + +import functools +import warnings +import operator + +from . import numeric as _nx +from .numeric import (result_type, NaN, shares_memory, MAY_SHARE_BOUNDS, + TooHardError, asanyarray) +from numpy.core.multiarray import add_docstring +from numpy.core import overrides + +__all__ = ['logspace', 'linspace', 'geomspace'] + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +def _index_deprecate(i, stacklevel=2): + try: + i = operator.index(i) + except TypeError: + msg = ("object of type {} cannot be safely interpreted as " + "an integer.".format(type(i))) + i = int(i) + stacklevel += 1 + warnings.warn(msg, DeprecationWarning, stacklevel=stacklevel) + return i + + +def _linspace_dispatcher(start, stop, num=None, endpoint=None, retstep=None, + dtype=None, axis=None): + return (start, stop) + + +@array_function_dispatch(_linspace_dispatcher) +def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, + axis=0): + """ + Return evenly spaced numbers over a specified interval. + + Returns `num` evenly spaced samples, calculated over the + interval [`start`, `stop`]. + + The endpoint of the interval can optionally be excluded. + + .. versionchanged:: 1.16.0 + Non-scalar `start` and `stop` are now supported. + + Parameters + ---------- + start : array_like + The starting value of the sequence. + stop : array_like + The end value of the sequence, unless `endpoint` is set to False. + In that case, the sequence consists of all but the last of ``num + 1`` + evenly spaced samples, so that `stop` is excluded. Note that the step + size changes when `endpoint` is False. + num : int, optional + Number of samples to generate. Default is 50. Must be non-negative. + endpoint : bool, optional + If True, `stop` is the last sample. Otherwise, it is not included. + Default is True. + retstep : bool, optional + If True, return (`samples`, `step`), where `step` is the spacing + between samples. + dtype : dtype, optional + The type of the output array. If `dtype` is not given, infer the data + type from the other input arguments. + + .. versionadded:: 1.9.0 + + axis : int, optional + The axis in the result to store the samples. Relevant only if start + or stop are array-like. By default (0), the samples will be along a + new axis inserted at the beginning. Use -1 to get an axis at the end. + + .. versionadded:: 1.16.0 + + Returns + ------- + samples : ndarray + There are `num` equally spaced samples in the closed interval + ``[start, stop]`` or the half-open interval ``[start, stop)`` + (depending on whether `endpoint` is True or False). + step : float, optional + Only returned if `retstep` is True + + Size of spacing between samples. + + + See Also + -------- + arange : Similar to `linspace`, but uses a step size (instead of the + number of samples). + geomspace : Similar to `linspace`, but with numbers spaced evenly on a log + scale (a geometric progression). + logspace : Similar to `geomspace`, but with the end points specified as + logarithms. + + Examples + -------- + >>> np.linspace(2.0, 3.0, num=5) + array([ 2. , 2.25, 2.5 , 2.75, 3. ]) + >>> np.linspace(2.0, 3.0, num=5, endpoint=False) + array([ 2. , 2.2, 2.4, 2.6, 2.8]) + >>> np.linspace(2.0, 3.0, num=5, retstep=True) + (array([ 2. , 2.25, 2.5 , 2.75, 3. ]), 0.25) + + Graphical illustration: + + >>> import matplotlib.pyplot as plt + >>> N = 8 + >>> y = np.zeros(N) + >>> x1 = np.linspace(0, 10, N, endpoint=True) + >>> x2 = np.linspace(0, 10, N, endpoint=False) + >>> plt.plot(x1, y, 'o') + [] + >>> plt.plot(x2, y + 0.5, 'o') + [] + >>> plt.ylim([-0.5, 1]) + (-0.5, 1) + >>> plt.show() + + """ + # 2016-02-25, 1.12 + num = _index_deprecate(num) + if num < 0: + raise ValueError("Number of samples, %s, must be non-negative." % num) + div = (num - 1) if endpoint else num + + # Convert float/complex array scalars to float, gh-3504 + # and make sure one can use variables that have an __array_interface__, gh-6634 + start = asanyarray(start) * 1.0 + stop = asanyarray(stop) * 1.0 + + dt = result_type(start, stop, float(num)) + if dtype is None: + dtype = dt + + delta = stop - start + y = _nx.arange(0, num, dtype=dt).reshape((-1,) + (1,) * delta.ndim) + # In-place multiplication y *= delta/div is faster, but prevents the multiplicant + # from overriding what class is produced, and thus prevents, e.g. use of Quantities, + # see gh-7142. Hence, we multiply in place only for standard scalar types. + _mult_inplace = _nx.isscalar(delta) + if num > 1: + step = delta / div + if _nx.any(step == 0): + # Special handling for denormal numbers, gh-5437 + y /= div + if _mult_inplace: + y *= delta + else: + y = y * delta + else: + if _mult_inplace: + y *= step + else: + y = y * step + else: + # 0 and 1 item long sequences have an undefined step + step = NaN + # Multiply with delta to allow possible override of output class. + y = y * delta + + y += start + + if endpoint and num > 1: + y[-1] = stop + + if axis != 0: + y = _nx.moveaxis(y, 0, axis) + + if retstep: + return y.astype(dtype, copy=False), step + else: + return y.astype(dtype, copy=False) + + +def _logspace_dispatcher(start, stop, num=None, endpoint=None, base=None, + dtype=None, axis=None): + return (start, stop) + + +@array_function_dispatch(_logspace_dispatcher) +def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, + axis=0): + """ + Return numbers spaced evenly on a log scale. + + In linear space, the sequence starts at ``base ** start`` + (`base` to the power of `start`) and ends with ``base ** stop`` + (see `endpoint` below). + + .. versionchanged:: 1.16.0 + Non-scalar `start` and `stop` are now supported. + + Parameters + ---------- + start : array_like + ``base ** start`` is the starting value of the sequence. + stop : array_like + ``base ** stop`` is the final value of the sequence, unless `endpoint` + is False. In that case, ``num + 1`` values are spaced over the + interval in log-space, of which all but the last (a sequence of + length `num`) are returned. + num : integer, optional + Number of samples to generate. Default is 50. + endpoint : boolean, optional + If true, `stop` is the last sample. Otherwise, it is not included. + Default is True. + base : float, optional + The base of the log space. The step size between the elements in + ``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform. + Default is 10.0. + dtype : dtype + The type of the output array. If `dtype` is not given, infer the data + type from the other input arguments. + axis : int, optional + The axis in the result to store the samples. Relevant only if start + or stop are array-like. By default (0), the samples will be along a + new axis inserted at the beginning. Use -1 to get an axis at the end. + + .. versionadded:: 1.16.0 + + + Returns + ------- + samples : ndarray + `num` samples, equally spaced on a log scale. + + See Also + -------- + arange : Similar to linspace, with the step size specified instead of the + number of samples. Note that, when used with a float endpoint, the + endpoint may or may not be included. + linspace : Similar to logspace, but with the samples uniformly distributed + in linear space, instead of log space. + geomspace : Similar to logspace, but with endpoints specified directly. + + Notes + ----- + Logspace is equivalent to the code + + >>> y = np.linspace(start, stop, num=num, endpoint=endpoint) + ... # doctest: +SKIP + >>> power(base, y).astype(dtype) + ... # doctest: +SKIP + + Examples + -------- + >>> np.logspace(2.0, 3.0, num=4) + array([ 100. , 215.443469 , 464.15888336, 1000. ]) + >>> np.logspace(2.0, 3.0, num=4, endpoint=False) + array([ 100. , 177.827941 , 316.22776602, 562.34132519]) + >>> np.logspace(2.0, 3.0, num=4, base=2.0) + array([ 4. , 5.0396842 , 6.34960421, 8. ]) + + Graphical illustration: + + >>> import matplotlib.pyplot as plt + >>> N = 10 + >>> x1 = np.logspace(0.1, 1, N, endpoint=True) + >>> x2 = np.logspace(0.1, 1, N, endpoint=False) + >>> y = np.zeros(N) + >>> plt.plot(x1, y, 'o') + [] + >>> plt.plot(x2, y + 0.5, 'o') + [] + >>> plt.ylim([-0.5, 1]) + (-0.5, 1) + >>> plt.show() + + """ + y = linspace(start, stop, num=num, endpoint=endpoint, axis=axis) + if dtype is None: + return _nx.power(base, y) + return _nx.power(base, y).astype(dtype, copy=False) + + +def _geomspace_dispatcher(start, stop, num=None, endpoint=None, dtype=None, + axis=None): + return (start, stop) + + +@array_function_dispatch(_geomspace_dispatcher) +def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0): + """ + Return numbers spaced evenly on a log scale (a geometric progression). + + This is similar to `logspace`, but with endpoints specified directly. + Each output sample is a constant multiple of the previous. + + .. versionchanged:: 1.16.0 + Non-scalar `start` and `stop` are now supported. + + Parameters + ---------- + start : array_like + The starting value of the sequence. + stop : array_like + The final value of the sequence, unless `endpoint` is False. + In that case, ``num + 1`` values are spaced over the + interval in log-space, of which all but the last (a sequence of + length `num`) are returned. + num : integer, optional + Number of samples to generate. Default is 50. + endpoint : boolean, optional + If true, `stop` is the last sample. Otherwise, it is not included. + Default is True. + dtype : dtype + The type of the output array. If `dtype` is not given, infer the data + type from the other input arguments. + axis : int, optional + The axis in the result to store the samples. Relevant only if start + or stop are array-like. By default (0), the samples will be along a + new axis inserted at the beginning. Use -1 to get an axis at the end. + + .. versionadded:: 1.16.0 + + Returns + ------- + samples : ndarray + `num` samples, equally spaced on a log scale. + + See Also + -------- + logspace : Similar to geomspace, but with endpoints specified using log + and base. + linspace : Similar to geomspace, but with arithmetic instead of geometric + progression. + arange : Similar to linspace, with the step size specified instead of the + number of samples. + + Notes + ----- + If the inputs or dtype are complex, the output will follow a logarithmic + spiral in the complex plane. (There are an infinite number of spirals + passing through two points; the output will follow the shortest such path.) + + Examples + -------- + >>> np.geomspace(1, 1000, num=4) + array([ 1., 10., 100., 1000.]) + >>> np.geomspace(1, 1000, num=3, endpoint=False) + array([ 1., 10., 100.]) + >>> np.geomspace(1, 1000, num=4, endpoint=False) + array([ 1. , 5.62341325, 31.6227766 , 177.827941 ]) + >>> np.geomspace(1, 256, num=9) + array([ 1., 2., 4., 8., 16., 32., 64., 128., 256.]) + + Note that the above may not produce exact integers: + + >>> np.geomspace(1, 256, num=9, dtype=int) + array([ 1, 2, 4, 7, 16, 32, 63, 127, 256]) + >>> np.around(np.geomspace(1, 256, num=9)).astype(int) + array([ 1, 2, 4, 8, 16, 32, 64, 128, 256]) + + Negative, decreasing, and complex inputs are allowed: + + >>> np.geomspace(1000, 1, num=4) + array([ 1000., 100., 10., 1.]) + >>> np.geomspace(-1000, -1, num=4) + array([-1000., -100., -10., -1.]) + >>> np.geomspace(1j, 1000j, num=4) # Straight line + array([ 0. +1.j, 0. +10.j, 0. +100.j, 0.+1000.j]) + >>> np.geomspace(-1+0j, 1+0j, num=5) # Circle + array([-1.00000000+0.j , -0.70710678+0.70710678j, + 0.00000000+1.j , 0.70710678+0.70710678j, + 1.00000000+0.j ]) + + Graphical illustration of ``endpoint`` parameter: + + >>> import matplotlib.pyplot as plt + >>> N = 10 + >>> y = np.zeros(N) + >>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=True), y + 1, 'o') + >>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=False), y + 2, 'o') + >>> plt.axis([0.5, 2000, 0, 3]) + >>> plt.grid(True, color='0.7', linestyle='-', which='both', axis='both') + >>> plt.show() + + """ + start = asanyarray(start) + stop = asanyarray(stop) + if _nx.any(start == 0) or _nx.any(stop == 0): + raise ValueError('Geometric sequence cannot include zero') + + dt = result_type(start, stop, float(num), _nx.zeros((), dtype)) + if dtype is None: + dtype = dt + else: + # complex to dtype('complex128'), for instance + dtype = _nx.dtype(dtype) + + # Promote both arguments to the same dtype in case, for instance, one is + # complex and another is negative and log would produce NaN otherwise. + # Copy since we may change things in-place further down. + start = start.astype(dt, copy=True) + stop = stop.astype(dt, copy=True) + + out_sign = _nx.ones(_nx.broadcast(start, stop).shape, dt) + # Avoid negligible real or imaginary parts in output by rotating to + # positive real, calculating, then undoing rotation + if _nx.issubdtype(dt, _nx.complexfloating): + all_imag = (start.real == 0.) & (stop.real == 0.) + if _nx.any(all_imag): + start[all_imag] = start[all_imag].imag + stop[all_imag] = stop[all_imag].imag + out_sign[all_imag] = 1j + + both_negative = (_nx.sign(start) == -1) & (_nx.sign(stop) == -1) + if _nx.any(both_negative): + _nx.negative(start, out=start, where=both_negative) + _nx.negative(stop, out=stop, where=both_negative) + _nx.negative(out_sign, out=out_sign, where=both_negative) + + log_start = _nx.log10(start) + log_stop = _nx.log10(stop) + result = out_sign * logspace(log_start, log_stop, num=num, + endpoint=endpoint, base=10.0, dtype=dtype) + if axis != 0: + result = _nx.moveaxis(result, 0, axis) + + return result.astype(dtype, copy=False) + + +#always succeed +def add_newdoc(place, obj, doc): + """ + Adds documentation to obj which is in module place. + + If doc is a string add it to obj as a docstring + + If doc is a tuple, then the first element is interpreted as + an attribute of obj and the second as the docstring + (method, docstring) + + If doc is a list, then each element of the list should be a + sequence of length two --> [(method1, docstring1), + (method2, docstring2), ...] + + This routine never raises an error. + + This routine cannot modify read-only docstrings, as appear + in new-style classes or built-in functions. Because this + routine never raises an error the caller must check manually + that the docstrings were changed. + """ + try: + new = getattr(__import__(place, globals(), {}, [obj]), obj) + if isinstance(doc, str): + add_docstring(new, doc.strip()) + elif isinstance(doc, tuple): + add_docstring(getattr(new, doc[0]), doc[1].strip()) + elif isinstance(doc, list): + for val in doc: + add_docstring(getattr(new, val[0]), val[1].strip()) + except Exception: + pass diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/function_base.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/function_base.pyc new file mode 100644 index 0000000..7fa35ae Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/function_base.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/generate_numpy_api.py b/project/venv/lib/python2.7/site-packages/numpy/core/generate_numpy_api.py new file mode 100644 index 0000000..5e04fb8 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/generate_numpy_api.py @@ -0,0 +1,254 @@ +from __future__ import division, print_function + +import os +import genapi + +from genapi import \ + TypeApi, GlobalVarApi, FunctionApi, BoolValuesApi + +import numpy_api + +# use annotated api when running under cpychecker +h_template = r""" +#if defined(_MULTIARRAYMODULE) || defined(WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE) + +typedef struct { + PyObject_HEAD + npy_bool obval; +} PyBoolScalarObject; + +extern NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type; +extern NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type; +extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; + +%s + +#else + +#if defined(PY_ARRAY_UNIQUE_SYMBOL) +#define PyArray_API PY_ARRAY_UNIQUE_SYMBOL +#endif + +#if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY) +extern void **PyArray_API; +#else +#if defined(PY_ARRAY_UNIQUE_SYMBOL) +void **PyArray_API; +#else +static void **PyArray_API=NULL; +#endif +#endif + +%s + +#if !defined(NO_IMPORT_ARRAY) && !defined(NO_IMPORT) +static int +_import_array(void) +{ + int st; + PyObject *numpy = PyImport_ImportModule("numpy.core._multiarray_umath"); + PyObject *c_api = NULL; + + if (numpy == NULL) { + return -1; + } + c_api = PyObject_GetAttrString(numpy, "_ARRAY_API"); + Py_DECREF(numpy); + if (c_api == NULL) { + PyErr_SetString(PyExc_AttributeError, "_ARRAY_API not found"); + return -1; + } + +#if PY_VERSION_HEX >= 0x03000000 + if (!PyCapsule_CheckExact(c_api)) { + PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCapsule object"); + Py_DECREF(c_api); + return -1; + } + PyArray_API = (void **)PyCapsule_GetPointer(c_api, NULL); +#else + if (!PyCObject_Check(c_api)) { + PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCObject object"); + Py_DECREF(c_api); + return -1; + } + PyArray_API = (void **)PyCObject_AsVoidPtr(c_api); +#endif + Py_DECREF(c_api); + if (PyArray_API == NULL) { + PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is NULL pointer"); + return -1; + } + + /* Perform runtime check of C API version */ + if (NPY_VERSION != PyArray_GetNDArrayCVersion()) { + PyErr_Format(PyExc_RuntimeError, "module compiled against "\ + "ABI version 0x%%x but this version of numpy is 0x%%x", \ + (int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion()); + return -1; + } + if (NPY_FEATURE_VERSION > PyArray_GetNDArrayCFeatureVersion()) { + PyErr_Format(PyExc_RuntimeError, "module compiled against "\ + "API version 0x%%x but this version of numpy is 0x%%x", \ + (int) NPY_FEATURE_VERSION, (int) PyArray_GetNDArrayCFeatureVersion()); + return -1; + } + + /* + * Perform runtime check of endianness and check it matches the one set by + * the headers (npy_endian.h) as a safeguard + */ + st = PyArray_GetEndianness(); + if (st == NPY_CPU_UNKNOWN_ENDIAN) { + PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as unknown endian"); + return -1; + } +#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN + if (st != NPY_CPU_BIG) { + PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\ + "big endian, but detected different endianness at runtime"); + return -1; + } +#elif NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN + if (st != NPY_CPU_LITTLE) { + PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\ + "little endian, but detected different endianness at runtime"); + return -1; + } +#endif + + return 0; +} + +#if PY_VERSION_HEX >= 0x03000000 +#define NUMPY_IMPORT_ARRAY_RETVAL NULL +#else +#define NUMPY_IMPORT_ARRAY_RETVAL +#endif + +#define import_array() {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return NUMPY_IMPORT_ARRAY_RETVAL; } } + +#define import_array1(ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return ret; } } + +#define import_array2(msg, ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, msg); return ret; } } + +#endif + +#endif +""" + + +c_template = r""" +/* These pointers will be stored in the C-object for use in other + extension modules +*/ + +void *PyArray_API[] = { +%s +}; +""" + +c_api_header = """ +=========== +NumPy C-API +=========== +""" + +def generate_api(output_dir, force=False): + basename = 'multiarray_api' + + h_file = os.path.join(output_dir, '__%s.h' % basename) + c_file = os.path.join(output_dir, '__%s.c' % basename) + d_file = os.path.join(output_dir, '%s.txt' % basename) + targets = (h_file, c_file, d_file) + + sources = numpy_api.multiarray_api + + if (not force and not genapi.should_rebuild(targets, [numpy_api.__file__, __file__])): + return targets + else: + do_generate_api(targets, sources) + + return targets + +def do_generate_api(targets, sources): + header_file = targets[0] + c_file = targets[1] + doc_file = targets[2] + + global_vars = sources[0] + scalar_bool_values = sources[1] + types_api = sources[2] + multiarray_funcs = sources[3] + + multiarray_api = sources[:] + + module_list = [] + extension_list = [] + init_list = [] + + # Check multiarray api indexes + multiarray_api_index = genapi.merge_api_dicts(multiarray_api) + genapi.check_api_dict(multiarray_api_index) + + numpyapi_list = genapi.get_api_functions('NUMPY_API', + multiarray_funcs) + + # FIXME: ordered_funcs_api is unused + ordered_funcs_api = genapi.order_dict(multiarray_funcs) + + # Create dict name -> *Api instance + api_name = 'PyArray_API' + multiarray_api_dict = {} + for f in numpyapi_list: + name = f.name + index = multiarray_funcs[name][0] + annotations = multiarray_funcs[name][1:] + multiarray_api_dict[f.name] = FunctionApi(f.name, index, annotations, + f.return_type, + f.args, api_name) + + for name, val in global_vars.items(): + index, type = val + multiarray_api_dict[name] = GlobalVarApi(name, index, type, api_name) + + for name, val in scalar_bool_values.items(): + index = val[0] + multiarray_api_dict[name] = BoolValuesApi(name, index, api_name) + + for name, val in types_api.items(): + index = val[0] + multiarray_api_dict[name] = TypeApi(name, index, 'PyTypeObject', api_name) + + if len(multiarray_api_dict) != len(multiarray_api_index): + keys_dict = set(multiarray_api_dict.keys()) + keys_index = set(multiarray_api_index.keys()) + raise AssertionError( + "Multiarray API size mismatch - " + "index has extra keys {}, dict has extra keys {}" + .format(keys_index - keys_dict, keys_dict - keys_index) + ) + + extension_list = [] + for name, index in genapi.order_dict(multiarray_api_index): + api_item = multiarray_api_dict[name] + extension_list.append(api_item.define_from_array_api_string()) + init_list.append(api_item.array_api_define()) + module_list.append(api_item.internal_define()) + + # Write to header + s = h_template % ('\n'.join(module_list), '\n'.join(extension_list)) + genapi.write_file(header_file, s) + + # Write to c-code + s = c_template % ',\n'.join(init_list) + genapi.write_file(c_file, s) + + # write to documentation + s = c_api_header + for func in numpyapi_list: + s += func.to_ReST() + s += '\n\n' + genapi.write_file(doc_file, s) + + return targets diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/generate_numpy_api.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/generate_numpy_api.pyc new file mode 100644 index 0000000..8a03d2b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/generate_numpy_api.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/getlimits.py b/project/venv/lib/python2.7/site-packages/numpy/core/getlimits.py new file mode 100644 index 0000000..544b8b3 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/getlimits.py @@ -0,0 +1,550 @@ +"""Machine limits for Float32 and Float64 and (long double) if available... + +""" +from __future__ import division, absolute_import, print_function + +__all__ = ['finfo', 'iinfo'] + +import warnings + +from .machar import MachAr +from .overrides import set_module +from . import numeric +from . import numerictypes as ntypes +from .numeric import array, inf +from .umath import log10, exp2 +from . import umath + + +def _fr0(a): + """fix rank-0 --> rank-1""" + if a.ndim == 0: + a = a.copy() + a.shape = (1,) + return a + + +def _fr1(a): + """fix rank > 0 --> rank-0""" + if a.size == 1: + a = a.copy() + a.shape = () + return a + +class MachArLike(object): + """ Object to simulate MachAr instance """ + + def __init__(self, + ftype, + **kwargs): + params = _MACHAR_PARAMS[ftype] + float_conv = lambda v: array([v], ftype) + float_to_float = lambda v : _fr1(float_conv(v)) + float_to_str = lambda v: (params['fmt'] % array(_fr0(v)[0], ftype)) + + self.title = params['title'] + # Parameter types same as for discovered MachAr object. + self.epsilon = self.eps = float_to_float(kwargs.pop('eps')) + self.epsneg = float_to_float(kwargs.pop('epsneg')) + self.xmax = self.huge = float_to_float(kwargs.pop('huge')) + self.xmin = self.tiny = float_to_float(kwargs.pop('tiny')) + self.ibeta = params['itype'](kwargs.pop('ibeta')) + self.__dict__.update(kwargs) + self.precision = int(-log10(self.eps)) + self.resolution = float_to_float(float_conv(10) ** (-self.precision)) + self._str_eps = float_to_str(self.eps) + self._str_epsneg = float_to_str(self.epsneg) + self._str_xmin = float_to_str(self.xmin) + self._str_xmax = float_to_str(self.xmax) + self._str_resolution = float_to_str(self.resolution) + +_convert_to_float = { + ntypes.csingle: ntypes.single, + ntypes.complex_: ntypes.float_, + ntypes.clongfloat: ntypes.longfloat + } + +# Parameters for creating MachAr / MachAr-like objects +_title_fmt = 'numpy {} precision floating point number' +_MACHAR_PARAMS = { + ntypes.double: dict( + itype = ntypes.int64, + fmt = '%24.16e', + title = _title_fmt.format('double')), + ntypes.single: dict( + itype = ntypes.int32, + fmt = '%15.7e', + title = _title_fmt.format('single')), + ntypes.longdouble: dict( + itype = ntypes.longlong, + fmt = '%s', + title = _title_fmt.format('long double')), + ntypes.half: dict( + itype = ntypes.int16, + fmt = '%12.5e', + title = _title_fmt.format('half'))} + +# Key to identify the floating point type. Key is result of +# ftype('-0.1').newbyteorder('<').tobytes() +# See: +# https://perl5.git.perl.org/perl.git/blob/3118d7d684b56cbeb702af874f4326683c45f045:/Configure +_KNOWN_TYPES = {} +def _register_type(machar, bytepat): + _KNOWN_TYPES[bytepat] = machar +_float_ma = {} + +def _register_known_types(): + # Known parameters for float16 + # See docstring of MachAr class for description of parameters. + f16 = ntypes.float16 + float16_ma = MachArLike(f16, + machep=-10, + negep=-11, + minexp=-14, + maxexp=16, + it=10, + iexp=5, + ibeta=2, + irnd=5, + ngrd=0, + eps=exp2(f16(-10)), + epsneg=exp2(f16(-11)), + huge=f16(65504), + tiny=f16(2 ** -14)) + _register_type(float16_ma, b'f\xae') + _float_ma[16] = float16_ma + + # Known parameters for float32 + f32 = ntypes.float32 + float32_ma = MachArLike(f32, + machep=-23, + negep=-24, + minexp=-126, + maxexp=128, + it=23, + iexp=8, + ibeta=2, + irnd=5, + ngrd=0, + eps=exp2(f32(-23)), + epsneg=exp2(f32(-24)), + huge=f32((1 - 2 ** -24) * 2**128), + tiny=exp2(f32(-126))) + _register_type(float32_ma, b'\xcd\xcc\xcc\xbd') + _float_ma[32] = float32_ma + + # Known parameters for float64 + f64 = ntypes.float64 + epsneg_f64 = 2.0 ** -53.0 + tiny_f64 = 2.0 ** -1022.0 + float64_ma = MachArLike(f64, + machep=-52, + negep=-53, + minexp=-1022, + maxexp=1024, + it=52, + iexp=11, + ibeta=2, + irnd=5, + ngrd=0, + eps=2.0 ** -52.0, + epsneg=epsneg_f64, + huge=(1.0 - epsneg_f64) / tiny_f64 * f64(4), + tiny=tiny_f64) + _register_type(float64_ma, b'\x9a\x99\x99\x99\x99\x99\xb9\xbf') + _float_ma[64] = float64_ma + + # Known parameters for IEEE 754 128-bit binary float + ld = ntypes.longdouble + epsneg_f128 = exp2(ld(-113)) + tiny_f128 = exp2(ld(-16382)) + # Ignore runtime error when this is not f128 + with numeric.errstate(all='ignore'): + huge_f128 = (ld(1) - epsneg_f128) / tiny_f128 * ld(4) + float128_ma = MachArLike(ld, + machep=-112, + negep=-113, + minexp=-16382, + maxexp=16384, + it=112, + iexp=15, + ibeta=2, + irnd=5, + ngrd=0, + eps=exp2(ld(-112)), + epsneg=epsneg_f128, + huge=huge_f128, + tiny=tiny_f128) + # IEEE 754 128-bit binary float + _register_type(float128_ma, + b'\x9a\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\xfb\xbf') + _register_type(float128_ma, + b'\x9a\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\xfb\xbf') + _float_ma[128] = float128_ma + + # Known parameters for float80 (Intel 80-bit extended precision) + epsneg_f80 = exp2(ld(-64)) + tiny_f80 = exp2(ld(-16382)) + # Ignore runtime error when this is not f80 + with numeric.errstate(all='ignore'): + huge_f80 = (ld(1) - epsneg_f80) / tiny_f80 * ld(4) + float80_ma = MachArLike(ld, + machep=-63, + negep=-64, + minexp=-16382, + maxexp=16384, + it=63, + iexp=15, + ibeta=2, + irnd=5, + ngrd=0, + eps=exp2(ld(-63)), + epsneg=epsneg_f80, + huge=huge_f80, + tiny=tiny_f80) + # float80, first 10 bytes containing actual storage + _register_type(float80_ma, b'\xcd\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xfb\xbf') + _float_ma[80] = float80_ma + + # Guessed / known parameters for double double; see: + # https://en.wikipedia.org/wiki/Quadruple-precision_floating-point_format#Double-double_arithmetic + # These numbers have the same exponent range as float64, but extended number of + # digits in the significand. + huge_dd = (umath.nextafter(ld(inf), ld(0)) + if hasattr(umath, 'nextafter') # Missing on some platforms? + else float64_ma.huge) + float_dd_ma = MachArLike(ld, + machep=-105, + negep=-106, + minexp=-1022, + maxexp=1024, + it=105, + iexp=11, + ibeta=2, + irnd=5, + ngrd=0, + eps=exp2(ld(-105)), + epsneg= exp2(ld(-106)), + huge=huge_dd, + tiny=exp2(ld(-1022))) + # double double; low, high order (e.g. PPC 64) + _register_type(float_dd_ma, + b'\x9a\x99\x99\x99\x99\x99Y<\x9a\x99\x99\x99\x99\x99\xb9\xbf') + # double double; high, low order (e.g. PPC 64 le) + _register_type(float_dd_ma, + b'\x9a\x99\x99\x99\x99\x99\xb9\xbf\x9a\x99\x99\x99\x99\x99Y<') + _float_ma['dd'] = float_dd_ma + + +def _get_machar(ftype): + """ Get MachAr instance or MachAr-like instance + + Get parameters for floating point type, by first trying signatures of + various known floating point types, then, if none match, attempting to + identify parameters by analysis. + + Parameters + ---------- + ftype : class + Numpy floating point type class (e.g. ``np.float64``) + + Returns + ------- + ma_like : instance of :class:`MachAr` or :class:`MachArLike` + Object giving floating point parameters for `ftype`. + + Warns + ----- + UserWarning + If the binary signature of the float type is not in the dictionary of + known float types. + """ + params = _MACHAR_PARAMS.get(ftype) + if params is None: + raise ValueError(repr(ftype)) + # Detect known / suspected types + key = ftype('-0.1').newbyteorder('<').tobytes() + ma_like = _KNOWN_TYPES.get(key) + # Could be 80 bit == 10 byte extended precision, where last bytes can be + # random garbage. Try comparing first 10 bytes to pattern. + if ma_like is None and ftype == ntypes.longdouble: + ma_like = _KNOWN_TYPES.get(key[:10]) + if ma_like is not None: + return ma_like + # Fall back to parameter discovery + warnings.warn( + 'Signature {} for {} does not match any known type: ' + 'falling back to type probe function'.format(key, ftype), + UserWarning, stacklevel=2) + return _discovered_machar(ftype) + + +def _discovered_machar(ftype): + """ Create MachAr instance with found information on float types + """ + params = _MACHAR_PARAMS[ftype] + return MachAr(lambda v: array([v], ftype), + lambda v:_fr0(v.astype(params['itype']))[0], + lambda v:array(_fr0(v)[0], ftype), + lambda v: params['fmt'] % array(_fr0(v)[0], ftype), + params['title']) + + +@set_module('numpy') +class finfo(object): + """ + finfo(dtype) + + Machine limits for floating point types. + + Attributes + ---------- + bits : int + The number of bits occupied by the type. + eps : float + The smallest representable positive number such that + ``1.0 + eps != 1.0``. Type of `eps` is an appropriate floating + point type. + epsneg : floating point number of the appropriate type + The smallest representable positive number such that + ``1.0 - epsneg != 1.0``. + iexp : int + The number of bits in the exponent portion of the floating point + representation. + machar : MachAr + The object which calculated these parameters and holds more + detailed information. + machep : int + The exponent that yields `eps`. + max : floating point number of the appropriate type + The largest representable number. + maxexp : int + The smallest positive power of the base (2) that causes overflow. + min : floating point number of the appropriate type + The smallest representable number, typically ``-max``. + minexp : int + The most negative power of the base (2) consistent with there + being no leading 0's in the mantissa. + negep : int + The exponent that yields `epsneg`. + nexp : int + The number of bits in the exponent including its sign and bias. + nmant : int + The number of bits in the mantissa. + precision : int + The approximate number of decimal digits to which this kind of + float is precise. + resolution : floating point number of the appropriate type + The approximate decimal resolution of this type, i.e., + ``10**-precision``. + tiny : float + The smallest positive usable number. Type of `tiny` is an + appropriate floating point type. + + Parameters + ---------- + dtype : float, dtype, or instance + Kind of floating point data-type about which to get information. + + See Also + -------- + MachAr : The implementation of the tests that produce this information. + iinfo : The equivalent for integer data types. + + Notes + ----- + For developers of NumPy: do not instantiate this at the module level. + The initial calculation of these parameters is expensive and negatively + impacts import times. These objects are cached, so calling ``finfo()`` + repeatedly inside your functions is not a problem. + + """ + + _finfo_cache = {} + + def __new__(cls, dtype): + try: + dtype = numeric.dtype(dtype) + except TypeError: + # In case a float instance was given + dtype = numeric.dtype(type(dtype)) + + obj = cls._finfo_cache.get(dtype, None) + if obj is not None: + return obj + dtypes = [dtype] + newdtype = numeric.obj2sctype(dtype) + if newdtype is not dtype: + dtypes.append(newdtype) + dtype = newdtype + if not issubclass(dtype, numeric.inexact): + raise ValueError("data type %r not inexact" % (dtype)) + obj = cls._finfo_cache.get(dtype, None) + if obj is not None: + return obj + if not issubclass(dtype, numeric.floating): + newdtype = _convert_to_float[dtype] + if newdtype is not dtype: + dtypes.append(newdtype) + dtype = newdtype + obj = cls._finfo_cache.get(dtype, None) + if obj is not None: + return obj + obj = object.__new__(cls)._init(dtype) + for dt in dtypes: + cls._finfo_cache[dt] = obj + return obj + + def _init(self, dtype): + self.dtype = numeric.dtype(dtype) + machar = _get_machar(dtype) + + for word in ['precision', 'iexp', + 'maxexp', 'minexp', 'negep', + 'machep']: + setattr(self, word, getattr(machar, word)) + for word in ['tiny', 'resolution', 'epsneg']: + setattr(self, word, getattr(machar, word).flat[0]) + self.bits = self.dtype.itemsize * 8 + self.max = machar.huge.flat[0] + self.min = -self.max + self.eps = machar.eps.flat[0] + self.nexp = machar.iexp + self.nmant = machar.it + self.machar = machar + self._str_tiny = machar._str_xmin.strip() + self._str_max = machar._str_xmax.strip() + self._str_epsneg = machar._str_epsneg.strip() + self._str_eps = machar._str_eps.strip() + self._str_resolution = machar._str_resolution.strip() + return self + + def __str__(self): + fmt = ( + 'Machine parameters for %(dtype)s\n' + '---------------------------------------------------------------\n' + 'precision = %(precision)3s resolution = %(_str_resolution)s\n' + 'machep = %(machep)6s eps = %(_str_eps)s\n' + 'negep = %(negep)6s epsneg = %(_str_epsneg)s\n' + 'minexp = %(minexp)6s tiny = %(_str_tiny)s\n' + 'maxexp = %(maxexp)6s max = %(_str_max)s\n' + 'nexp = %(nexp)6s min = -max\n' + '---------------------------------------------------------------\n' + ) + return fmt % self.__dict__ + + def __repr__(self): + c = self.__class__.__name__ + d = self.__dict__.copy() + d['klass'] = c + return (("%(klass)s(resolution=%(resolution)s, min=-%(_str_max)s," + " max=%(_str_max)s, dtype=%(dtype)s)") % d) + + +@set_module('numpy') +class iinfo(object): + """ + iinfo(type) + + Machine limits for integer types. + + Attributes + ---------- + bits : int + The number of bits occupied by the type. + min : int + The smallest integer expressible by the type. + max : int + The largest integer expressible by the type. + + Parameters + ---------- + int_type : integer type, dtype, or instance + The kind of integer data type to get information about. + + See Also + -------- + finfo : The equivalent for floating point data types. + + Examples + -------- + With types: + + >>> ii16 = np.iinfo(np.int16) + >>> ii16.min + -32768 + >>> ii16.max + 32767 + >>> ii32 = np.iinfo(np.int32) + >>> ii32.min + -2147483648 + >>> ii32.max + 2147483647 + + With instances: + + >>> ii32 = np.iinfo(np.int32(10)) + >>> ii32.min + -2147483648 + >>> ii32.max + 2147483647 + + """ + + _min_vals = {} + _max_vals = {} + + def __init__(self, int_type): + try: + self.dtype = numeric.dtype(int_type) + except TypeError: + self.dtype = numeric.dtype(type(int_type)) + self.kind = self.dtype.kind + self.bits = self.dtype.itemsize * 8 + self.key = "%s%d" % (self.kind, self.bits) + if self.kind not in 'iu': + raise ValueError("Invalid integer data type %r." % (self.kind,)) + + def min(self): + """Minimum value of given dtype.""" + if self.kind == 'u': + return 0 + else: + try: + val = iinfo._min_vals[self.key] + except KeyError: + val = int(-(1 << (self.bits-1))) + iinfo._min_vals[self.key] = val + return val + + min = property(min) + + def max(self): + """Maximum value of given dtype.""" + try: + val = iinfo._max_vals[self.key] + except KeyError: + if self.kind == 'u': + val = int((1 << self.bits) - 1) + else: + val = int((1 << (self.bits-1)) - 1) + iinfo._max_vals[self.key] = val + return val + + max = property(max) + + def __str__(self): + """String representation.""" + fmt = ( + 'Machine parameters for %(dtype)s\n' + '---------------------------------------------------------------\n' + 'min = %(min)s\n' + 'max = %(max)s\n' + '---------------------------------------------------------------\n' + ) + return fmt % {'dtype': self.dtype, 'min': self.min, 'max': self.max} + + def __repr__(self): + return "%s(min=%s, max=%s, dtype=%s)" % (self.__class__.__name__, + self.min, self.max, self.dtype) + diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/getlimits.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/getlimits.pyc new file mode 100644 index 0000000..0eb30ab Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/getlimits.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/__multiarray_api.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/__multiarray_api.h new file mode 100644 index 0000000..e5b1cf2 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/__multiarray_api.h @@ -0,0 +1,1554 @@ + +#if defined(_MULTIARRAYMODULE) || defined(WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE) + +typedef struct { + PyObject_HEAD + npy_bool obval; +} PyBoolScalarObject; + +extern NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type; +extern NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type; +extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; + +NPY_NO_EXPORT unsigned int PyArray_GetNDArrayCVersion \ + (void); +extern NPY_NO_EXPORT PyTypeObject PyBigArray_Type; + +extern NPY_NO_EXPORT PyTypeObject PyArray_Type; + +extern NPY_NO_EXPORT PyTypeObject PyArrayDescr_Type; + +extern NPY_NO_EXPORT PyTypeObject PyArrayFlags_Type; + +extern NPY_NO_EXPORT PyTypeObject PyArrayIter_Type; + +extern NPY_NO_EXPORT PyTypeObject PyArrayMultiIter_Type; + +extern NPY_NO_EXPORT int NPY_NUMUSERTYPES; + +extern NPY_NO_EXPORT PyTypeObject PyBoolArrType_Type; + +extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; + +extern NPY_NO_EXPORT PyTypeObject PyGenericArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyNumberArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyIntegerArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PySignedIntegerArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyUnsignedIntegerArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyInexactArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyFloatingArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyComplexFloatingArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyFlexibleArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyCharacterArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyByteArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyShortArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyIntArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyLongArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyLongLongArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyUByteArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyUShortArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyUIntArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyULongArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyULongLongArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyFloatArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyDoubleArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyLongDoubleArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyCFloatArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyCDoubleArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyCLongDoubleArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyObjectArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyStringArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyUnicodeArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyVoidArrType_Type; + +NPY_NO_EXPORT int PyArray_SetNumericOps \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_GetNumericOps \ + (void); +NPY_NO_EXPORT int PyArray_INCREF \ + (PyArrayObject *); +NPY_NO_EXPORT int PyArray_XDECREF \ + (PyArrayObject *); +NPY_NO_EXPORT void PyArray_SetStringFunction \ + (PyObject *, int); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromType \ + (int); +NPY_NO_EXPORT PyObject * PyArray_TypeObjectFromType \ + (int); +NPY_NO_EXPORT char * PyArray_Zero \ + (PyArrayObject *); +NPY_NO_EXPORT char * PyArray_One \ + (PyArrayObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) NPY_GCC_NONNULL(2) PyObject * PyArray_CastToType \ + (PyArrayObject *, PyArray_Descr *, int); +NPY_NO_EXPORT int PyArray_CastTo \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT int PyArray_CastAnyTo \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT int PyArray_CanCastSafely \ + (int, int); +NPY_NO_EXPORT npy_bool PyArray_CanCastTo \ + (PyArray_Descr *, PyArray_Descr *); +NPY_NO_EXPORT int PyArray_ObjectType \ + (PyObject *, int); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromObject \ + (PyObject *, PyArray_Descr *); +NPY_NO_EXPORT PyArrayObject ** PyArray_ConvertToCommonType \ + (PyObject *, int *); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromScalar \ + (PyObject *); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromTypeObject \ + (PyObject *); +NPY_NO_EXPORT npy_intp PyArray_Size \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_Scalar \ + (void *, PyArray_Descr *, PyObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromScalar \ + (PyObject *, PyArray_Descr *); +NPY_NO_EXPORT void PyArray_ScalarAsCtype \ + (PyObject *, void *); +NPY_NO_EXPORT int PyArray_CastScalarToCtype \ + (PyObject *, void *, PyArray_Descr *); +NPY_NO_EXPORT int PyArray_CastScalarDirect \ + (PyObject *, PyArray_Descr *, void *, int); +NPY_NO_EXPORT PyObject * PyArray_ScalarFromObject \ + (PyObject *); +NPY_NO_EXPORT PyArray_VectorUnaryFunc * PyArray_GetCastFunc \ + (PyArray_Descr *, int); +NPY_NO_EXPORT PyObject * PyArray_FromDims \ + (int, int *, int); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_FromDimsAndDataAndDescr \ + (int, int *, PyArray_Descr *, char *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromAny \ + (PyObject *, PyArray_Descr *, int, int, int, PyObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(1) PyObject * PyArray_EnsureArray \ + (PyObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(1) PyObject * PyArray_EnsureAnyArray \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_FromFile \ + (FILE *, PyArray_Descr *, npy_intp, char *); +NPY_NO_EXPORT PyObject * PyArray_FromString \ + (char *, npy_intp, PyArray_Descr *, npy_intp, char *); +NPY_NO_EXPORT PyObject * PyArray_FromBuffer \ + (PyObject *, PyArray_Descr *, npy_intp, npy_intp); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromIter \ + (PyObject *, PyArray_Descr *, npy_intp); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(1) PyObject * PyArray_Return \ + (PyArrayObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) NPY_GCC_NONNULL(2) PyObject * PyArray_GetField \ + (PyArrayObject *, PyArray_Descr *, int); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) NPY_GCC_NONNULL(2) int PyArray_SetField \ + (PyArrayObject *, PyArray_Descr *, int, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_Byteswap \ + (PyArrayObject *, npy_bool); +NPY_NO_EXPORT PyObject * PyArray_Resize \ + (PyArrayObject *, PyArray_Dims *, int, NPY_ORDER); +NPY_NO_EXPORT int PyArray_MoveInto \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT int PyArray_CopyInto \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT int PyArray_CopyAnyInto \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT int PyArray_CopyObject \ + (PyArrayObject *, PyObject *); +NPY_NO_EXPORT NPY_GCC_NONNULL(1) PyObject * PyArray_NewCopy \ + (PyArrayObject *, NPY_ORDER); +NPY_NO_EXPORT PyObject * PyArray_ToList \ + (PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_ToString \ + (PyArrayObject *, NPY_ORDER); +NPY_NO_EXPORT int PyArray_ToFile \ + (PyArrayObject *, FILE *, char *, char *); +NPY_NO_EXPORT int PyArray_Dump \ + (PyObject *, PyObject *, int); +NPY_NO_EXPORT PyObject * PyArray_Dumps \ + (PyObject *, int); +NPY_NO_EXPORT int PyArray_ValidType \ + (int); +NPY_NO_EXPORT void PyArray_UpdateFlags \ + (PyArrayObject *, int); +NPY_NO_EXPORT NPY_GCC_NONNULL(1) PyObject * PyArray_New \ + (PyTypeObject *, int, npy_intp *, int, npy_intp *, void *, int, int, PyObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) NPY_GCC_NONNULL(1) NPY_GCC_NONNULL(2) PyObject * PyArray_NewFromDescr \ + (PyTypeObject *, PyArray_Descr *, int, npy_intp *, npy_intp *, void *, int, PyObject *); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNew \ + (PyArray_Descr *); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNewFromType \ + (int); +NPY_NO_EXPORT double PyArray_GetPriority \ + (PyObject *, double); +NPY_NO_EXPORT PyObject * PyArray_IterNew \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_MultiIterNew \ + (int, ...); +NPY_NO_EXPORT int PyArray_PyIntAsInt \ + (PyObject *); +NPY_NO_EXPORT npy_intp PyArray_PyIntAsIntp \ + (PyObject *); +NPY_NO_EXPORT int PyArray_Broadcast \ + (PyArrayMultiIterObject *); +NPY_NO_EXPORT void PyArray_FillObjectArray \ + (PyArrayObject *, PyObject *); +NPY_NO_EXPORT int PyArray_FillWithScalar \ + (PyArrayObject *, PyObject *); +NPY_NO_EXPORT npy_bool PyArray_CheckStrides \ + (int, int, npy_intp, npy_intp, npy_intp *, npy_intp *); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNewByteorder \ + (PyArray_Descr *, char); +NPY_NO_EXPORT PyObject * PyArray_IterAllButAxis \ + (PyObject *, int *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_CheckFromAny \ + (PyObject *, PyArray_Descr *, int, int, int, PyObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromArray \ + (PyArrayObject *, PyArray_Descr *, int); +NPY_NO_EXPORT PyObject * PyArray_FromInterface \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_FromStructInterface \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_FromArrayAttr \ + (PyObject *, PyArray_Descr *, PyObject *); +NPY_NO_EXPORT NPY_SCALARKIND PyArray_ScalarKind \ + (int, PyArrayObject **); +NPY_NO_EXPORT int PyArray_CanCoerceScalar \ + (int, int, NPY_SCALARKIND); +NPY_NO_EXPORT PyObject * PyArray_NewFlagsObject \ + (PyObject *); +NPY_NO_EXPORT npy_bool PyArray_CanCastScalar \ + (PyTypeObject *, PyTypeObject *); +NPY_NO_EXPORT int PyArray_CompareUCS4 \ + (npy_ucs4 *, npy_ucs4 *, size_t); +NPY_NO_EXPORT int PyArray_RemoveSmallest \ + (PyArrayMultiIterObject *); +NPY_NO_EXPORT int PyArray_ElementStrides \ + (PyObject *); +NPY_NO_EXPORT void PyArray_Item_INCREF \ + (char *, PyArray_Descr *); +NPY_NO_EXPORT void PyArray_Item_XDECREF \ + (char *, PyArray_Descr *); +NPY_NO_EXPORT PyObject * PyArray_FieldNames \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_Transpose \ + (PyArrayObject *, PyArray_Dims *); +NPY_NO_EXPORT PyObject * PyArray_TakeFrom \ + (PyArrayObject *, PyObject *, int, PyArrayObject *, NPY_CLIPMODE); +NPY_NO_EXPORT PyObject * PyArray_PutTo \ + (PyArrayObject *, PyObject*, PyObject *, NPY_CLIPMODE); +NPY_NO_EXPORT PyObject * PyArray_PutMask \ + (PyArrayObject *, PyObject*, PyObject*); +NPY_NO_EXPORT PyObject * PyArray_Repeat \ + (PyArrayObject *, PyObject *, int); +NPY_NO_EXPORT PyObject * PyArray_Choose \ + (PyArrayObject *, PyObject *, PyArrayObject *, NPY_CLIPMODE); +NPY_NO_EXPORT int PyArray_Sort \ + (PyArrayObject *, int, NPY_SORTKIND); +NPY_NO_EXPORT PyObject * PyArray_ArgSort \ + (PyArrayObject *, int, NPY_SORTKIND); +NPY_NO_EXPORT PyObject * PyArray_SearchSorted \ + (PyArrayObject *, PyObject *, NPY_SEARCHSIDE, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_ArgMax \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_ArgMin \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Reshape \ + (PyArrayObject *, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_Newshape \ + (PyArrayObject *, PyArray_Dims *, NPY_ORDER); +NPY_NO_EXPORT PyObject * PyArray_Squeeze \ + (PyArrayObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_View \ + (PyArrayObject *, PyArray_Descr *, PyTypeObject *); +NPY_NO_EXPORT PyObject * PyArray_SwapAxes \ + (PyArrayObject *, int, int); +NPY_NO_EXPORT PyObject * PyArray_Max \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Min \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Ptp \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Mean \ + (PyArrayObject *, int, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Trace \ + (PyArrayObject *, int, int, int, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Diagonal \ + (PyArrayObject *, int, int, int); +NPY_NO_EXPORT PyObject * PyArray_Clip \ + (PyArrayObject *, PyObject *, PyObject *, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Conjugate \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Nonzero \ + (PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Std \ + (PyArrayObject *, int, int, PyArrayObject *, int); +NPY_NO_EXPORT PyObject * PyArray_Sum \ + (PyArrayObject *, int, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_CumSum \ + (PyArrayObject *, int, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Prod \ + (PyArrayObject *, int, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_CumProd \ + (PyArrayObject *, int, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_All \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Any \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Compress \ + (PyArrayObject *, PyObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Flatten \ + (PyArrayObject *, NPY_ORDER); +NPY_NO_EXPORT PyObject * PyArray_Ravel \ + (PyArrayObject *, NPY_ORDER); +NPY_NO_EXPORT npy_intp PyArray_MultiplyList \ + (npy_intp *, int); +NPY_NO_EXPORT int PyArray_MultiplyIntList \ + (int *, int); +NPY_NO_EXPORT void * PyArray_GetPtr \ + (PyArrayObject *, npy_intp*); +NPY_NO_EXPORT int PyArray_CompareLists \ + (npy_intp *, npy_intp *, int); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(5) int PyArray_AsCArray \ + (PyObject **, void *, npy_intp *, int, PyArray_Descr*); +NPY_NO_EXPORT int PyArray_As1D \ + (PyObject **, char **, int *, int); +NPY_NO_EXPORT int PyArray_As2D \ + (PyObject **, char ***, int *, int *, int); +NPY_NO_EXPORT int PyArray_Free \ + (PyObject *, void *); +NPY_NO_EXPORT int PyArray_Converter \ + (PyObject *, PyObject **); +NPY_NO_EXPORT int PyArray_IntpFromSequence \ + (PyObject *, npy_intp *, int); +NPY_NO_EXPORT PyObject * PyArray_Concatenate \ + (PyObject *, int); +NPY_NO_EXPORT PyObject * PyArray_InnerProduct \ + (PyObject *, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_MatrixProduct \ + (PyObject *, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_CopyAndTranspose \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_Correlate \ + (PyObject *, PyObject *, int); +NPY_NO_EXPORT int PyArray_TypestrConvert \ + (int, int); +NPY_NO_EXPORT int PyArray_DescrConverter \ + (PyObject *, PyArray_Descr **); +NPY_NO_EXPORT int PyArray_DescrConverter2 \ + (PyObject *, PyArray_Descr **); +NPY_NO_EXPORT int PyArray_IntpConverter \ + (PyObject *, PyArray_Dims *); +NPY_NO_EXPORT int PyArray_BufferConverter \ + (PyObject *, PyArray_Chunk *); +NPY_NO_EXPORT int PyArray_AxisConverter \ + (PyObject *, int *); +NPY_NO_EXPORT int PyArray_BoolConverter \ + (PyObject *, npy_bool *); +NPY_NO_EXPORT int PyArray_ByteorderConverter \ + (PyObject *, char *); +NPY_NO_EXPORT int PyArray_OrderConverter \ + (PyObject *, NPY_ORDER *); +NPY_NO_EXPORT unsigned char PyArray_EquivTypes \ + (PyArray_Descr *, PyArray_Descr *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_Zeros \ + (int, npy_intp *, PyArray_Descr *, int); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_Empty \ + (int, npy_intp *, PyArray_Descr *, int); +NPY_NO_EXPORT PyObject * PyArray_Where \ + (PyObject *, PyObject *, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_Arange \ + (double, double, double, int); +NPY_NO_EXPORT PyObject * PyArray_ArangeObj \ + (PyObject *, PyObject *, PyObject *, PyArray_Descr *); +NPY_NO_EXPORT int PyArray_SortkindConverter \ + (PyObject *, NPY_SORTKIND *); +NPY_NO_EXPORT PyObject * PyArray_LexSort \ + (PyObject *, int); +NPY_NO_EXPORT PyObject * PyArray_Round \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT unsigned char PyArray_EquivTypenums \ + (int, int); +NPY_NO_EXPORT int PyArray_RegisterDataType \ + (PyArray_Descr *); +NPY_NO_EXPORT int PyArray_RegisterCastFunc \ + (PyArray_Descr *, int, PyArray_VectorUnaryFunc *); +NPY_NO_EXPORT int PyArray_RegisterCanCast \ + (PyArray_Descr *, int, NPY_SCALARKIND); +NPY_NO_EXPORT void PyArray_InitArrFuncs \ + (PyArray_ArrFuncs *); +NPY_NO_EXPORT PyObject * PyArray_IntTupleFromIntp \ + (int, npy_intp *); +NPY_NO_EXPORT int PyArray_TypeNumFromName \ + (char *); +NPY_NO_EXPORT int PyArray_ClipmodeConverter \ + (PyObject *, NPY_CLIPMODE *); +NPY_NO_EXPORT int PyArray_OutputConverter \ + (PyObject *, PyArrayObject **); +NPY_NO_EXPORT PyObject * PyArray_BroadcastToShape \ + (PyObject *, npy_intp *, int); +NPY_NO_EXPORT void _PyArray_SigintHandler \ + (int); +NPY_NO_EXPORT void* _PyArray_GetSigintBuf \ + (void); +NPY_NO_EXPORT int PyArray_DescrAlignConverter \ + (PyObject *, PyArray_Descr **); +NPY_NO_EXPORT int PyArray_DescrAlignConverter2 \ + (PyObject *, PyArray_Descr **); +NPY_NO_EXPORT int PyArray_SearchsideConverter \ + (PyObject *, void *); +NPY_NO_EXPORT PyObject * PyArray_CheckAxis \ + (PyArrayObject *, int *, int); +NPY_NO_EXPORT npy_intp PyArray_OverflowMultiplyList \ + (npy_intp *, int); +NPY_NO_EXPORT int PyArray_CompareString \ + (char *, char *, size_t); +NPY_NO_EXPORT PyObject * PyArray_MultiIterFromObjects \ + (PyObject **, int, int, ...); +NPY_NO_EXPORT int PyArray_GetEndianness \ + (void); +NPY_NO_EXPORT unsigned int PyArray_GetNDArrayCFeatureVersion \ + (void); +NPY_NO_EXPORT PyObject * PyArray_Correlate2 \ + (PyObject *, PyObject *, int); +NPY_NO_EXPORT PyObject* PyArray_NeighborhoodIterNew \ + (PyArrayIterObject *, npy_intp *, int, PyArrayObject*); +extern NPY_NO_EXPORT PyTypeObject PyTimeIntegerArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyDatetimeArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyTimedeltaArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyHalfArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject NpyIter_Type; + +NPY_NO_EXPORT void PyArray_SetDatetimeParseFunction \ + (PyObject *); +NPY_NO_EXPORT void PyArray_DatetimeToDatetimeStruct \ + (npy_datetime, NPY_DATETIMEUNIT, npy_datetimestruct *); +NPY_NO_EXPORT void PyArray_TimedeltaToTimedeltaStruct \ + (npy_timedelta, NPY_DATETIMEUNIT, npy_timedeltastruct *); +NPY_NO_EXPORT npy_datetime PyArray_DatetimeStructToDatetime \ + (NPY_DATETIMEUNIT, npy_datetimestruct *); +NPY_NO_EXPORT npy_datetime PyArray_TimedeltaStructToTimedelta \ + (NPY_DATETIMEUNIT, npy_timedeltastruct *); +NPY_NO_EXPORT NpyIter * NpyIter_New \ + (PyArrayObject *, npy_uint32, NPY_ORDER, NPY_CASTING, PyArray_Descr*); +NPY_NO_EXPORT NpyIter * NpyIter_MultiNew \ + (int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **); +NPY_NO_EXPORT NpyIter * NpyIter_AdvancedNew \ + (int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **, int, int **, npy_intp *, npy_intp); +NPY_NO_EXPORT NpyIter * NpyIter_Copy \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_Deallocate \ + (NpyIter *); +NPY_NO_EXPORT npy_bool NpyIter_HasDelayedBufAlloc \ + (NpyIter *); +NPY_NO_EXPORT npy_bool NpyIter_HasExternalLoop \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_EnableExternalLoop \ + (NpyIter *); +NPY_NO_EXPORT npy_intp * NpyIter_GetInnerStrideArray \ + (NpyIter *); +NPY_NO_EXPORT npy_intp * NpyIter_GetInnerLoopSizePtr \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_Reset \ + (NpyIter *, char **); +NPY_NO_EXPORT int NpyIter_ResetBasePointers \ + (NpyIter *, char **, char **); +NPY_NO_EXPORT int NpyIter_ResetToIterIndexRange \ + (NpyIter *, npy_intp, npy_intp, char **); +NPY_NO_EXPORT int NpyIter_GetNDim \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_GetNOp \ + (NpyIter *); +NPY_NO_EXPORT NpyIter_IterNextFunc * NpyIter_GetIterNext \ + (NpyIter *, char **); +NPY_NO_EXPORT npy_intp NpyIter_GetIterSize \ + (NpyIter *); +NPY_NO_EXPORT void NpyIter_GetIterIndexRange \ + (NpyIter *, npy_intp *, npy_intp *); +NPY_NO_EXPORT npy_intp NpyIter_GetIterIndex \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_GotoIterIndex \ + (NpyIter *, npy_intp); +NPY_NO_EXPORT npy_bool NpyIter_HasMultiIndex \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_GetShape \ + (NpyIter *, npy_intp *); +NPY_NO_EXPORT NpyIter_GetMultiIndexFunc * NpyIter_GetGetMultiIndex \ + (NpyIter *, char **); +NPY_NO_EXPORT int NpyIter_GotoMultiIndex \ + (NpyIter *, npy_intp *); +NPY_NO_EXPORT int NpyIter_RemoveMultiIndex \ + (NpyIter *); +NPY_NO_EXPORT npy_bool NpyIter_HasIndex \ + (NpyIter *); +NPY_NO_EXPORT npy_bool NpyIter_IsBuffered \ + (NpyIter *); +NPY_NO_EXPORT npy_bool NpyIter_IsGrowInner \ + (NpyIter *); +NPY_NO_EXPORT npy_intp NpyIter_GetBufferSize \ + (NpyIter *); +NPY_NO_EXPORT npy_intp * NpyIter_GetIndexPtr \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_GotoIndex \ + (NpyIter *, npy_intp); +NPY_NO_EXPORT char ** NpyIter_GetDataPtrArray \ + (NpyIter *); +NPY_NO_EXPORT PyArray_Descr ** NpyIter_GetDescrArray \ + (NpyIter *); +NPY_NO_EXPORT PyArrayObject ** NpyIter_GetOperandArray \ + (NpyIter *); +NPY_NO_EXPORT PyArrayObject * NpyIter_GetIterView \ + (NpyIter *, npy_intp); +NPY_NO_EXPORT void NpyIter_GetReadFlags \ + (NpyIter *, char *); +NPY_NO_EXPORT void NpyIter_GetWriteFlags \ + (NpyIter *, char *); +NPY_NO_EXPORT void NpyIter_DebugPrint \ + (NpyIter *); +NPY_NO_EXPORT npy_bool NpyIter_IterationNeedsAPI \ + (NpyIter *); +NPY_NO_EXPORT void NpyIter_GetInnerFixedStrideArray \ + (NpyIter *, npy_intp *); +NPY_NO_EXPORT int NpyIter_RemoveAxis \ + (NpyIter *, int); +NPY_NO_EXPORT npy_intp * NpyIter_GetAxisStrideArray \ + (NpyIter *, int); +NPY_NO_EXPORT npy_bool NpyIter_RequiresBuffering \ + (NpyIter *); +NPY_NO_EXPORT char ** NpyIter_GetInitialDataPtrArray \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_CreateCompatibleStrides \ + (NpyIter *, npy_intp, npy_intp *); +NPY_NO_EXPORT int PyArray_CastingConverter \ + (PyObject *, NPY_CASTING *); +NPY_NO_EXPORT npy_intp PyArray_CountNonzero \ + (PyArrayObject *); +NPY_NO_EXPORT PyArray_Descr * PyArray_PromoteTypes \ + (PyArray_Descr *, PyArray_Descr *); +NPY_NO_EXPORT PyArray_Descr * PyArray_MinScalarType \ + (PyArrayObject *); +NPY_NO_EXPORT PyArray_Descr * PyArray_ResultType \ + (npy_intp, PyArrayObject **, npy_intp, PyArray_Descr **); +NPY_NO_EXPORT npy_bool PyArray_CanCastArrayTo \ + (PyArrayObject *, PyArray_Descr *, NPY_CASTING); +NPY_NO_EXPORT npy_bool PyArray_CanCastTypeTo \ + (PyArray_Descr *, PyArray_Descr *, NPY_CASTING); +NPY_NO_EXPORT PyArrayObject * PyArray_EinsteinSum \ + (char *, npy_intp, PyArrayObject **, PyArray_Descr *, NPY_ORDER, NPY_CASTING, PyArrayObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) NPY_GCC_NONNULL(1) PyObject * PyArray_NewLikeArray \ + (PyArrayObject *, NPY_ORDER, PyArray_Descr *, int); +NPY_NO_EXPORT int PyArray_GetArrayParamsFromObject \ + (PyObject *, PyArray_Descr *, npy_bool, PyArray_Descr **, int *, npy_intp *, PyArrayObject **, PyObject *); +NPY_NO_EXPORT int PyArray_ConvertClipmodeSequence \ + (PyObject *, NPY_CLIPMODE *, int); +NPY_NO_EXPORT PyObject * PyArray_MatrixProduct2 \ + (PyObject *, PyObject *, PyArrayObject*); +NPY_NO_EXPORT npy_bool NpyIter_IsFirstVisit \ + (NpyIter *, int); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) int PyArray_SetBaseObject \ + (PyArrayObject *, PyObject *); +NPY_NO_EXPORT void PyArray_CreateSortedStridePerm \ + (int, npy_intp *, npy_stride_sort_item *); +NPY_NO_EXPORT void PyArray_RemoveAxesInPlace \ + (PyArrayObject *, npy_bool *); +NPY_NO_EXPORT void PyArray_DebugPrint \ + (PyArrayObject *); +NPY_NO_EXPORT int PyArray_FailUnlessWriteable \ + (PyArrayObject *, const char *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) int PyArray_SetUpdateIfCopyBase \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT void * PyDataMem_NEW \ + (size_t); +NPY_NO_EXPORT void PyDataMem_FREE \ + (void *); +NPY_NO_EXPORT void * PyDataMem_RENEW \ + (void *, size_t); +NPY_NO_EXPORT PyDataMem_EventHookFunc * PyDataMem_SetEventHook \ + (PyDataMem_EventHookFunc *, void *, void **); +extern NPY_NO_EXPORT NPY_CASTING NPY_DEFAULT_ASSIGN_CASTING; + +NPY_NO_EXPORT void PyArray_MapIterSwapAxes \ + (PyArrayMapIterObject *, PyArrayObject **, int); +NPY_NO_EXPORT PyObject * PyArray_MapIterArray \ + (PyArrayObject *, PyObject *); +NPY_NO_EXPORT void PyArray_MapIterNext \ + (PyArrayMapIterObject *); +NPY_NO_EXPORT int PyArray_Partition \ + (PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND); +NPY_NO_EXPORT PyObject * PyArray_ArgPartition \ + (PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND); +NPY_NO_EXPORT int PyArray_SelectkindConverter \ + (PyObject *, NPY_SELECTKIND *); +NPY_NO_EXPORT void * PyDataMem_NEW_ZEROED \ + (size_t, size_t); +NPY_NO_EXPORT NPY_GCC_NONNULL(1) int PyArray_CheckAnyScalarExact \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_MapIterArrayCopyIfOverlap \ + (PyArrayObject *, PyObject *, int, PyArrayObject *); +NPY_NO_EXPORT int PyArray_ResolveWritebackIfCopy \ + (PyArrayObject *); +NPY_NO_EXPORT int PyArray_SetWritebackIfCopyBase \ + (PyArrayObject *, PyArrayObject *); + +#else + +#if defined(PY_ARRAY_UNIQUE_SYMBOL) +#define PyArray_API PY_ARRAY_UNIQUE_SYMBOL +#endif + +#if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY) +extern void **PyArray_API; +#else +#if defined(PY_ARRAY_UNIQUE_SYMBOL) +void **PyArray_API; +#else +static void **PyArray_API=NULL; +#endif +#endif + +#define PyArray_GetNDArrayCVersion \ + (*(unsigned int (*)(void)) \ + PyArray_API[0]) +#define PyBigArray_Type (*(PyTypeObject *)PyArray_API[1]) +#define PyArray_Type (*(PyTypeObject *)PyArray_API[2]) +#define PyArrayDescr_Type (*(PyTypeObject *)PyArray_API[3]) +#define PyArrayFlags_Type (*(PyTypeObject *)PyArray_API[4]) +#define PyArrayIter_Type (*(PyTypeObject *)PyArray_API[5]) +#define PyArrayMultiIter_Type (*(PyTypeObject *)PyArray_API[6]) +#define NPY_NUMUSERTYPES (*(int *)PyArray_API[7]) +#define PyBoolArrType_Type (*(PyTypeObject *)PyArray_API[8]) +#define _PyArrayScalar_BoolValues ((PyBoolScalarObject *)PyArray_API[9]) +#define PyGenericArrType_Type (*(PyTypeObject *)PyArray_API[10]) +#define PyNumberArrType_Type (*(PyTypeObject *)PyArray_API[11]) +#define PyIntegerArrType_Type (*(PyTypeObject *)PyArray_API[12]) +#define PySignedIntegerArrType_Type (*(PyTypeObject *)PyArray_API[13]) +#define PyUnsignedIntegerArrType_Type (*(PyTypeObject *)PyArray_API[14]) +#define PyInexactArrType_Type (*(PyTypeObject *)PyArray_API[15]) +#define PyFloatingArrType_Type (*(PyTypeObject *)PyArray_API[16]) +#define PyComplexFloatingArrType_Type (*(PyTypeObject *)PyArray_API[17]) +#define PyFlexibleArrType_Type (*(PyTypeObject *)PyArray_API[18]) +#define PyCharacterArrType_Type (*(PyTypeObject *)PyArray_API[19]) +#define PyByteArrType_Type (*(PyTypeObject *)PyArray_API[20]) +#define PyShortArrType_Type (*(PyTypeObject *)PyArray_API[21]) +#define PyIntArrType_Type (*(PyTypeObject *)PyArray_API[22]) +#define PyLongArrType_Type (*(PyTypeObject *)PyArray_API[23]) +#define PyLongLongArrType_Type (*(PyTypeObject *)PyArray_API[24]) +#define PyUByteArrType_Type (*(PyTypeObject *)PyArray_API[25]) +#define PyUShortArrType_Type (*(PyTypeObject *)PyArray_API[26]) +#define PyUIntArrType_Type (*(PyTypeObject *)PyArray_API[27]) +#define PyULongArrType_Type (*(PyTypeObject *)PyArray_API[28]) +#define PyULongLongArrType_Type (*(PyTypeObject *)PyArray_API[29]) +#define PyFloatArrType_Type (*(PyTypeObject *)PyArray_API[30]) +#define PyDoubleArrType_Type (*(PyTypeObject *)PyArray_API[31]) +#define PyLongDoubleArrType_Type (*(PyTypeObject *)PyArray_API[32]) +#define PyCFloatArrType_Type (*(PyTypeObject *)PyArray_API[33]) +#define PyCDoubleArrType_Type (*(PyTypeObject *)PyArray_API[34]) +#define PyCLongDoubleArrType_Type (*(PyTypeObject *)PyArray_API[35]) +#define PyObjectArrType_Type (*(PyTypeObject *)PyArray_API[36]) +#define PyStringArrType_Type (*(PyTypeObject *)PyArray_API[37]) +#define PyUnicodeArrType_Type (*(PyTypeObject *)PyArray_API[38]) +#define PyVoidArrType_Type (*(PyTypeObject *)PyArray_API[39]) +#define PyArray_SetNumericOps \ + (*(int (*)(PyObject *)) \ + PyArray_API[40]) +#define PyArray_GetNumericOps \ + (*(PyObject * (*)(void)) \ + PyArray_API[41]) +#define PyArray_INCREF \ + (*(int (*)(PyArrayObject *)) \ + PyArray_API[42]) +#define PyArray_XDECREF \ + (*(int (*)(PyArrayObject *)) \ + PyArray_API[43]) +#define PyArray_SetStringFunction \ + (*(void (*)(PyObject *, int)) \ + PyArray_API[44]) +#define PyArray_DescrFromType \ + (*(PyArray_Descr * (*)(int)) \ + PyArray_API[45]) +#define PyArray_TypeObjectFromType \ + (*(PyObject * (*)(int)) \ + PyArray_API[46]) +#define PyArray_Zero \ + (*(char * (*)(PyArrayObject *)) \ + PyArray_API[47]) +#define PyArray_One \ + (*(char * (*)(PyArrayObject *)) \ + PyArray_API[48]) +#define PyArray_CastToType \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, int)) \ + PyArray_API[49]) +#define PyArray_CastTo \ + (*(int (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[50]) +#define PyArray_CastAnyTo \ + (*(int (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[51]) +#define PyArray_CanCastSafely \ + (*(int (*)(int, int)) \ + PyArray_API[52]) +#define PyArray_CanCastTo \ + (*(npy_bool (*)(PyArray_Descr *, PyArray_Descr *)) \ + PyArray_API[53]) +#define PyArray_ObjectType \ + (*(int (*)(PyObject *, int)) \ + PyArray_API[54]) +#define PyArray_DescrFromObject \ + (*(PyArray_Descr * (*)(PyObject *, PyArray_Descr *)) \ + PyArray_API[55]) +#define PyArray_ConvertToCommonType \ + (*(PyArrayObject ** (*)(PyObject *, int *)) \ + PyArray_API[56]) +#define PyArray_DescrFromScalar \ + (*(PyArray_Descr * (*)(PyObject *)) \ + PyArray_API[57]) +#define PyArray_DescrFromTypeObject \ + (*(PyArray_Descr * (*)(PyObject *)) \ + PyArray_API[58]) +#define PyArray_Size \ + (*(npy_intp (*)(PyObject *)) \ + PyArray_API[59]) +#define PyArray_Scalar \ + (*(PyObject * (*)(void *, PyArray_Descr *, PyObject *)) \ + PyArray_API[60]) +#define PyArray_FromScalar \ + (*(PyObject * (*)(PyObject *, PyArray_Descr *)) \ + PyArray_API[61]) +#define PyArray_ScalarAsCtype \ + (*(void (*)(PyObject *, void *)) \ + PyArray_API[62]) +#define PyArray_CastScalarToCtype \ + (*(int (*)(PyObject *, void *, PyArray_Descr *)) \ + PyArray_API[63]) +#define PyArray_CastScalarDirect \ + (*(int (*)(PyObject *, PyArray_Descr *, void *, int)) \ + PyArray_API[64]) +#define PyArray_ScalarFromObject \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[65]) +#define PyArray_GetCastFunc \ + (*(PyArray_VectorUnaryFunc * (*)(PyArray_Descr *, int)) \ + PyArray_API[66]) +#define PyArray_FromDims \ + (*(PyObject * (*)(int, int *, int)) \ + PyArray_API[67]) +#define PyArray_FromDimsAndDataAndDescr \ + (*(PyObject * (*)(int, int *, PyArray_Descr *, char *)) \ + PyArray_API[68]) +#define PyArray_FromAny \ + (*(PyObject * (*)(PyObject *, PyArray_Descr *, int, int, int, PyObject *)) \ + PyArray_API[69]) +#define PyArray_EnsureArray \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[70]) +#define PyArray_EnsureAnyArray \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[71]) +#define PyArray_FromFile \ + (*(PyObject * (*)(FILE *, PyArray_Descr *, npy_intp, char *)) \ + PyArray_API[72]) +#define PyArray_FromString \ + (*(PyObject * (*)(char *, npy_intp, PyArray_Descr *, npy_intp, char *)) \ + PyArray_API[73]) +#define PyArray_FromBuffer \ + (*(PyObject * (*)(PyObject *, PyArray_Descr *, npy_intp, npy_intp)) \ + PyArray_API[74]) +#define PyArray_FromIter \ + (*(PyObject * (*)(PyObject *, PyArray_Descr *, npy_intp)) \ + PyArray_API[75]) +#define PyArray_Return \ + (*(PyObject * (*)(PyArrayObject *)) \ + PyArray_API[76]) +#define PyArray_GetField \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, int)) \ + PyArray_API[77]) +#define PyArray_SetField \ + (*(int (*)(PyArrayObject *, PyArray_Descr *, int, PyObject *)) \ + PyArray_API[78]) +#define PyArray_Byteswap \ + (*(PyObject * (*)(PyArrayObject *, npy_bool)) \ + PyArray_API[79]) +#define PyArray_Resize \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Dims *, int, NPY_ORDER)) \ + PyArray_API[80]) +#define PyArray_MoveInto \ + (*(int (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[81]) +#define PyArray_CopyInto \ + (*(int (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[82]) +#define PyArray_CopyAnyInto \ + (*(int (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[83]) +#define PyArray_CopyObject \ + (*(int (*)(PyArrayObject *, PyObject *)) \ + PyArray_API[84]) +#define PyArray_NewCopy \ + (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \ + PyArray_API[85]) +#define PyArray_ToList \ + (*(PyObject * (*)(PyArrayObject *)) \ + PyArray_API[86]) +#define PyArray_ToString \ + (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \ + PyArray_API[87]) +#define PyArray_ToFile \ + (*(int (*)(PyArrayObject *, FILE *, char *, char *)) \ + PyArray_API[88]) +#define PyArray_Dump \ + (*(int (*)(PyObject *, PyObject *, int)) \ + PyArray_API[89]) +#define PyArray_Dumps \ + (*(PyObject * (*)(PyObject *, int)) \ + PyArray_API[90]) +#define PyArray_ValidType \ + (*(int (*)(int)) \ + PyArray_API[91]) +#define PyArray_UpdateFlags \ + (*(void (*)(PyArrayObject *, int)) \ + PyArray_API[92]) +#define PyArray_New \ + (*(PyObject * (*)(PyTypeObject *, int, npy_intp *, int, npy_intp *, void *, int, int, PyObject *)) \ + PyArray_API[93]) +#define PyArray_NewFromDescr \ + (*(PyObject * (*)(PyTypeObject *, PyArray_Descr *, int, npy_intp *, npy_intp *, void *, int, PyObject *)) \ + PyArray_API[94]) +#define PyArray_DescrNew \ + (*(PyArray_Descr * (*)(PyArray_Descr *)) \ + PyArray_API[95]) +#define PyArray_DescrNewFromType \ + (*(PyArray_Descr * (*)(int)) \ + PyArray_API[96]) +#define PyArray_GetPriority \ + (*(double (*)(PyObject *, double)) \ + PyArray_API[97]) +#define PyArray_IterNew \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[98]) +#define PyArray_MultiIterNew \ + (*(PyObject * (*)(int, ...)) \ + PyArray_API[99]) +#define PyArray_PyIntAsInt \ + (*(int (*)(PyObject *)) \ + PyArray_API[100]) +#define PyArray_PyIntAsIntp \ + (*(npy_intp (*)(PyObject *)) \ + PyArray_API[101]) +#define PyArray_Broadcast \ + (*(int (*)(PyArrayMultiIterObject *)) \ + PyArray_API[102]) +#define PyArray_FillObjectArray \ + (*(void (*)(PyArrayObject *, PyObject *)) \ + PyArray_API[103]) +#define PyArray_FillWithScalar \ + (*(int (*)(PyArrayObject *, PyObject *)) \ + PyArray_API[104]) +#define PyArray_CheckStrides \ + (*(npy_bool (*)(int, int, npy_intp, npy_intp, npy_intp *, npy_intp *)) \ + PyArray_API[105]) +#define PyArray_DescrNewByteorder \ + (*(PyArray_Descr * (*)(PyArray_Descr *, char)) \ + PyArray_API[106]) +#define PyArray_IterAllButAxis \ + (*(PyObject * (*)(PyObject *, int *)) \ + PyArray_API[107]) +#define PyArray_CheckFromAny \ + (*(PyObject * (*)(PyObject *, PyArray_Descr *, int, int, int, PyObject *)) \ + PyArray_API[108]) +#define PyArray_FromArray \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, int)) \ + PyArray_API[109]) +#define PyArray_FromInterface \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[110]) +#define PyArray_FromStructInterface \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[111]) +#define PyArray_FromArrayAttr \ + (*(PyObject * (*)(PyObject *, PyArray_Descr *, PyObject *)) \ + PyArray_API[112]) +#define PyArray_ScalarKind \ + (*(NPY_SCALARKIND (*)(int, PyArrayObject **)) \ + PyArray_API[113]) +#define PyArray_CanCoerceScalar \ + (*(int (*)(int, int, NPY_SCALARKIND)) \ + PyArray_API[114]) +#define PyArray_NewFlagsObject \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[115]) +#define PyArray_CanCastScalar \ + (*(npy_bool (*)(PyTypeObject *, PyTypeObject *)) \ + PyArray_API[116]) +#define PyArray_CompareUCS4 \ + (*(int (*)(npy_ucs4 *, npy_ucs4 *, size_t)) \ + PyArray_API[117]) +#define PyArray_RemoveSmallest \ + (*(int (*)(PyArrayMultiIterObject *)) \ + PyArray_API[118]) +#define PyArray_ElementStrides \ + (*(int (*)(PyObject *)) \ + PyArray_API[119]) +#define PyArray_Item_INCREF \ + (*(void (*)(char *, PyArray_Descr *)) \ + PyArray_API[120]) +#define PyArray_Item_XDECREF \ + (*(void (*)(char *, PyArray_Descr *)) \ + PyArray_API[121]) +#define PyArray_FieldNames \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[122]) +#define PyArray_Transpose \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Dims *)) \ + PyArray_API[123]) +#define PyArray_TakeFrom \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, int, PyArrayObject *, NPY_CLIPMODE)) \ + PyArray_API[124]) +#define PyArray_PutTo \ + (*(PyObject * (*)(PyArrayObject *, PyObject*, PyObject *, NPY_CLIPMODE)) \ + PyArray_API[125]) +#define PyArray_PutMask \ + (*(PyObject * (*)(PyArrayObject *, PyObject*, PyObject*)) \ + PyArray_API[126]) +#define PyArray_Repeat \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, int)) \ + PyArray_API[127]) +#define PyArray_Choose \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, PyArrayObject *, NPY_CLIPMODE)) \ + PyArray_API[128]) +#define PyArray_Sort \ + (*(int (*)(PyArrayObject *, int, NPY_SORTKIND)) \ + PyArray_API[129]) +#define PyArray_ArgSort \ + (*(PyObject * (*)(PyArrayObject *, int, NPY_SORTKIND)) \ + PyArray_API[130]) +#define PyArray_SearchSorted \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, NPY_SEARCHSIDE, PyObject *)) \ + PyArray_API[131]) +#define PyArray_ArgMax \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[132]) +#define PyArray_ArgMin \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[133]) +#define PyArray_Reshape \ + (*(PyObject * (*)(PyArrayObject *, PyObject *)) \ + PyArray_API[134]) +#define PyArray_Newshape \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Dims *, NPY_ORDER)) \ + PyArray_API[135]) +#define PyArray_Squeeze \ + (*(PyObject * (*)(PyArrayObject *)) \ + PyArray_API[136]) +#define PyArray_View \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, PyTypeObject *)) \ + PyArray_API[137]) +#define PyArray_SwapAxes \ + (*(PyObject * (*)(PyArrayObject *, int, int)) \ + PyArray_API[138]) +#define PyArray_Max \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[139]) +#define PyArray_Min \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[140]) +#define PyArray_Ptp \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[141]) +#define PyArray_Mean \ + (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ + PyArray_API[142]) +#define PyArray_Trace \ + (*(PyObject * (*)(PyArrayObject *, int, int, int, int, PyArrayObject *)) \ + PyArray_API[143]) +#define PyArray_Diagonal \ + (*(PyObject * (*)(PyArrayObject *, int, int, int)) \ + PyArray_API[144]) +#define PyArray_Clip \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, PyObject *, PyArrayObject *)) \ + PyArray_API[145]) +#define PyArray_Conjugate \ + (*(PyObject * (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[146]) +#define PyArray_Nonzero \ + (*(PyObject * (*)(PyArrayObject *)) \ + PyArray_API[147]) +#define PyArray_Std \ + (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *, int)) \ + PyArray_API[148]) +#define PyArray_Sum \ + (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ + PyArray_API[149]) +#define PyArray_CumSum \ + (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ + PyArray_API[150]) +#define PyArray_Prod \ + (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ + PyArray_API[151]) +#define PyArray_CumProd \ + (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ + PyArray_API[152]) +#define PyArray_All \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[153]) +#define PyArray_Any \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[154]) +#define PyArray_Compress \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, int, PyArrayObject *)) \ + PyArray_API[155]) +#define PyArray_Flatten \ + (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \ + PyArray_API[156]) +#define PyArray_Ravel \ + (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \ + PyArray_API[157]) +#define PyArray_MultiplyList \ + (*(npy_intp (*)(npy_intp *, int)) \ + PyArray_API[158]) +#define PyArray_MultiplyIntList \ + (*(int (*)(int *, int)) \ + PyArray_API[159]) +#define PyArray_GetPtr \ + (*(void * (*)(PyArrayObject *, npy_intp*)) \ + PyArray_API[160]) +#define PyArray_CompareLists \ + (*(int (*)(npy_intp *, npy_intp *, int)) \ + PyArray_API[161]) +#define PyArray_AsCArray \ + (*(int (*)(PyObject **, void *, npy_intp *, int, PyArray_Descr*)) \ + PyArray_API[162]) +#define PyArray_As1D \ + (*(int (*)(PyObject **, char **, int *, int)) \ + PyArray_API[163]) +#define PyArray_As2D \ + (*(int (*)(PyObject **, char ***, int *, int *, int)) \ + PyArray_API[164]) +#define PyArray_Free \ + (*(int (*)(PyObject *, void *)) \ + PyArray_API[165]) +#define PyArray_Converter \ + (*(int (*)(PyObject *, PyObject **)) \ + PyArray_API[166]) +#define PyArray_IntpFromSequence \ + (*(int (*)(PyObject *, npy_intp *, int)) \ + PyArray_API[167]) +#define PyArray_Concatenate \ + (*(PyObject * (*)(PyObject *, int)) \ + PyArray_API[168]) +#define PyArray_InnerProduct \ + (*(PyObject * (*)(PyObject *, PyObject *)) \ + PyArray_API[169]) +#define PyArray_MatrixProduct \ + (*(PyObject * (*)(PyObject *, PyObject *)) \ + PyArray_API[170]) +#define PyArray_CopyAndTranspose \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[171]) +#define PyArray_Correlate \ + (*(PyObject * (*)(PyObject *, PyObject *, int)) \ + PyArray_API[172]) +#define PyArray_TypestrConvert \ + (*(int (*)(int, int)) \ + PyArray_API[173]) +#define PyArray_DescrConverter \ + (*(int (*)(PyObject *, PyArray_Descr **)) \ + PyArray_API[174]) +#define PyArray_DescrConverter2 \ + (*(int (*)(PyObject *, PyArray_Descr **)) \ + PyArray_API[175]) +#define PyArray_IntpConverter \ + (*(int (*)(PyObject *, PyArray_Dims *)) \ + PyArray_API[176]) +#define PyArray_BufferConverter \ + (*(int (*)(PyObject *, PyArray_Chunk *)) \ + PyArray_API[177]) +#define PyArray_AxisConverter \ + (*(int (*)(PyObject *, int *)) \ + PyArray_API[178]) +#define PyArray_BoolConverter \ + (*(int (*)(PyObject *, npy_bool *)) \ + PyArray_API[179]) +#define PyArray_ByteorderConverter \ + (*(int (*)(PyObject *, char *)) \ + PyArray_API[180]) +#define PyArray_OrderConverter \ + (*(int (*)(PyObject *, NPY_ORDER *)) \ + PyArray_API[181]) +#define PyArray_EquivTypes \ + (*(unsigned char (*)(PyArray_Descr *, PyArray_Descr *)) \ + PyArray_API[182]) +#define PyArray_Zeros \ + (*(PyObject * (*)(int, npy_intp *, PyArray_Descr *, int)) \ + PyArray_API[183]) +#define PyArray_Empty \ + (*(PyObject * (*)(int, npy_intp *, PyArray_Descr *, int)) \ + PyArray_API[184]) +#define PyArray_Where \ + (*(PyObject * (*)(PyObject *, PyObject *, PyObject *)) \ + PyArray_API[185]) +#define PyArray_Arange \ + (*(PyObject * (*)(double, double, double, int)) \ + PyArray_API[186]) +#define PyArray_ArangeObj \ + (*(PyObject * (*)(PyObject *, PyObject *, PyObject *, PyArray_Descr *)) \ + PyArray_API[187]) +#define PyArray_SortkindConverter \ + (*(int (*)(PyObject *, NPY_SORTKIND *)) \ + PyArray_API[188]) +#define PyArray_LexSort \ + (*(PyObject * (*)(PyObject *, int)) \ + PyArray_API[189]) +#define PyArray_Round \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[190]) +#define PyArray_EquivTypenums \ + (*(unsigned char (*)(int, int)) \ + PyArray_API[191]) +#define PyArray_RegisterDataType \ + (*(int (*)(PyArray_Descr *)) \ + PyArray_API[192]) +#define PyArray_RegisterCastFunc \ + (*(int (*)(PyArray_Descr *, int, PyArray_VectorUnaryFunc *)) \ + PyArray_API[193]) +#define PyArray_RegisterCanCast \ + (*(int (*)(PyArray_Descr *, int, NPY_SCALARKIND)) \ + PyArray_API[194]) +#define PyArray_InitArrFuncs \ + (*(void (*)(PyArray_ArrFuncs *)) \ + PyArray_API[195]) +#define PyArray_IntTupleFromIntp \ + (*(PyObject * (*)(int, npy_intp *)) \ + PyArray_API[196]) +#define PyArray_TypeNumFromName \ + (*(int (*)(char *)) \ + PyArray_API[197]) +#define PyArray_ClipmodeConverter \ + (*(int (*)(PyObject *, NPY_CLIPMODE *)) \ + PyArray_API[198]) +#define PyArray_OutputConverter \ + (*(int (*)(PyObject *, PyArrayObject **)) \ + PyArray_API[199]) +#define PyArray_BroadcastToShape \ + (*(PyObject * (*)(PyObject *, npy_intp *, int)) \ + PyArray_API[200]) +#define _PyArray_SigintHandler \ + (*(void (*)(int)) \ + PyArray_API[201]) +#define _PyArray_GetSigintBuf \ + (*(void* (*)(void)) \ + PyArray_API[202]) +#define PyArray_DescrAlignConverter \ + (*(int (*)(PyObject *, PyArray_Descr **)) \ + PyArray_API[203]) +#define PyArray_DescrAlignConverter2 \ + (*(int (*)(PyObject *, PyArray_Descr **)) \ + PyArray_API[204]) +#define PyArray_SearchsideConverter \ + (*(int (*)(PyObject *, void *)) \ + PyArray_API[205]) +#define PyArray_CheckAxis \ + (*(PyObject * (*)(PyArrayObject *, int *, int)) \ + PyArray_API[206]) +#define PyArray_OverflowMultiplyList \ + (*(npy_intp (*)(npy_intp *, int)) \ + PyArray_API[207]) +#define PyArray_CompareString \ + (*(int (*)(char *, char *, size_t)) \ + PyArray_API[208]) +#define PyArray_MultiIterFromObjects \ + (*(PyObject * (*)(PyObject **, int, int, ...)) \ + PyArray_API[209]) +#define PyArray_GetEndianness \ + (*(int (*)(void)) \ + PyArray_API[210]) +#define PyArray_GetNDArrayCFeatureVersion \ + (*(unsigned int (*)(void)) \ + PyArray_API[211]) +#define PyArray_Correlate2 \ + (*(PyObject * (*)(PyObject *, PyObject *, int)) \ + PyArray_API[212]) +#define PyArray_NeighborhoodIterNew \ + (*(PyObject* (*)(PyArrayIterObject *, npy_intp *, int, PyArrayObject*)) \ + PyArray_API[213]) +#define PyTimeIntegerArrType_Type (*(PyTypeObject *)PyArray_API[214]) +#define PyDatetimeArrType_Type (*(PyTypeObject *)PyArray_API[215]) +#define PyTimedeltaArrType_Type (*(PyTypeObject *)PyArray_API[216]) +#define PyHalfArrType_Type (*(PyTypeObject *)PyArray_API[217]) +#define NpyIter_Type (*(PyTypeObject *)PyArray_API[218]) +#define PyArray_SetDatetimeParseFunction \ + (*(void (*)(PyObject *)) \ + PyArray_API[219]) +#define PyArray_DatetimeToDatetimeStruct \ + (*(void (*)(npy_datetime, NPY_DATETIMEUNIT, npy_datetimestruct *)) \ + PyArray_API[220]) +#define PyArray_TimedeltaToTimedeltaStruct \ + (*(void (*)(npy_timedelta, NPY_DATETIMEUNIT, npy_timedeltastruct *)) \ + PyArray_API[221]) +#define PyArray_DatetimeStructToDatetime \ + (*(npy_datetime (*)(NPY_DATETIMEUNIT, npy_datetimestruct *)) \ + PyArray_API[222]) +#define PyArray_TimedeltaStructToTimedelta \ + (*(npy_datetime (*)(NPY_DATETIMEUNIT, npy_timedeltastruct *)) \ + PyArray_API[223]) +#define NpyIter_New \ + (*(NpyIter * (*)(PyArrayObject *, npy_uint32, NPY_ORDER, NPY_CASTING, PyArray_Descr*)) \ + PyArray_API[224]) +#define NpyIter_MultiNew \ + (*(NpyIter * (*)(int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **)) \ + PyArray_API[225]) +#define NpyIter_AdvancedNew \ + (*(NpyIter * (*)(int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **, int, int **, npy_intp *, npy_intp)) \ + PyArray_API[226]) +#define NpyIter_Copy \ + (*(NpyIter * (*)(NpyIter *)) \ + PyArray_API[227]) +#define NpyIter_Deallocate \ + (*(int (*)(NpyIter *)) \ + PyArray_API[228]) +#define NpyIter_HasDelayedBufAlloc \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[229]) +#define NpyIter_HasExternalLoop \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[230]) +#define NpyIter_EnableExternalLoop \ + (*(int (*)(NpyIter *)) \ + PyArray_API[231]) +#define NpyIter_GetInnerStrideArray \ + (*(npy_intp * (*)(NpyIter *)) \ + PyArray_API[232]) +#define NpyIter_GetInnerLoopSizePtr \ + (*(npy_intp * (*)(NpyIter *)) \ + PyArray_API[233]) +#define NpyIter_Reset \ + (*(int (*)(NpyIter *, char **)) \ + PyArray_API[234]) +#define NpyIter_ResetBasePointers \ + (*(int (*)(NpyIter *, char **, char **)) \ + PyArray_API[235]) +#define NpyIter_ResetToIterIndexRange \ + (*(int (*)(NpyIter *, npy_intp, npy_intp, char **)) \ + PyArray_API[236]) +#define NpyIter_GetNDim \ + (*(int (*)(NpyIter *)) \ + PyArray_API[237]) +#define NpyIter_GetNOp \ + (*(int (*)(NpyIter *)) \ + PyArray_API[238]) +#define NpyIter_GetIterNext \ + (*(NpyIter_IterNextFunc * (*)(NpyIter *, char **)) \ + PyArray_API[239]) +#define NpyIter_GetIterSize \ + (*(npy_intp (*)(NpyIter *)) \ + PyArray_API[240]) +#define NpyIter_GetIterIndexRange \ + (*(void (*)(NpyIter *, npy_intp *, npy_intp *)) \ + PyArray_API[241]) +#define NpyIter_GetIterIndex \ + (*(npy_intp (*)(NpyIter *)) \ + PyArray_API[242]) +#define NpyIter_GotoIterIndex \ + (*(int (*)(NpyIter *, npy_intp)) \ + PyArray_API[243]) +#define NpyIter_HasMultiIndex \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[244]) +#define NpyIter_GetShape \ + (*(int (*)(NpyIter *, npy_intp *)) \ + PyArray_API[245]) +#define NpyIter_GetGetMultiIndex \ + (*(NpyIter_GetMultiIndexFunc * (*)(NpyIter *, char **)) \ + PyArray_API[246]) +#define NpyIter_GotoMultiIndex \ + (*(int (*)(NpyIter *, npy_intp *)) \ + PyArray_API[247]) +#define NpyIter_RemoveMultiIndex \ + (*(int (*)(NpyIter *)) \ + PyArray_API[248]) +#define NpyIter_HasIndex \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[249]) +#define NpyIter_IsBuffered \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[250]) +#define NpyIter_IsGrowInner \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[251]) +#define NpyIter_GetBufferSize \ + (*(npy_intp (*)(NpyIter *)) \ + PyArray_API[252]) +#define NpyIter_GetIndexPtr \ + (*(npy_intp * (*)(NpyIter *)) \ + PyArray_API[253]) +#define NpyIter_GotoIndex \ + (*(int (*)(NpyIter *, npy_intp)) \ + PyArray_API[254]) +#define NpyIter_GetDataPtrArray \ + (*(char ** (*)(NpyIter *)) \ + PyArray_API[255]) +#define NpyIter_GetDescrArray \ + (*(PyArray_Descr ** (*)(NpyIter *)) \ + PyArray_API[256]) +#define NpyIter_GetOperandArray \ + (*(PyArrayObject ** (*)(NpyIter *)) \ + PyArray_API[257]) +#define NpyIter_GetIterView \ + (*(PyArrayObject * (*)(NpyIter *, npy_intp)) \ + PyArray_API[258]) +#define NpyIter_GetReadFlags \ + (*(void (*)(NpyIter *, char *)) \ + PyArray_API[259]) +#define NpyIter_GetWriteFlags \ + (*(void (*)(NpyIter *, char *)) \ + PyArray_API[260]) +#define NpyIter_DebugPrint \ + (*(void (*)(NpyIter *)) \ + PyArray_API[261]) +#define NpyIter_IterationNeedsAPI \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[262]) +#define NpyIter_GetInnerFixedStrideArray \ + (*(void (*)(NpyIter *, npy_intp *)) \ + PyArray_API[263]) +#define NpyIter_RemoveAxis \ + (*(int (*)(NpyIter *, int)) \ + PyArray_API[264]) +#define NpyIter_GetAxisStrideArray \ + (*(npy_intp * (*)(NpyIter *, int)) \ + PyArray_API[265]) +#define NpyIter_RequiresBuffering \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[266]) +#define NpyIter_GetInitialDataPtrArray \ + (*(char ** (*)(NpyIter *)) \ + PyArray_API[267]) +#define NpyIter_CreateCompatibleStrides \ + (*(int (*)(NpyIter *, npy_intp, npy_intp *)) \ + PyArray_API[268]) +#define PyArray_CastingConverter \ + (*(int (*)(PyObject *, NPY_CASTING *)) \ + PyArray_API[269]) +#define PyArray_CountNonzero \ + (*(npy_intp (*)(PyArrayObject *)) \ + PyArray_API[270]) +#define PyArray_PromoteTypes \ + (*(PyArray_Descr * (*)(PyArray_Descr *, PyArray_Descr *)) \ + PyArray_API[271]) +#define PyArray_MinScalarType \ + (*(PyArray_Descr * (*)(PyArrayObject *)) \ + PyArray_API[272]) +#define PyArray_ResultType \ + (*(PyArray_Descr * (*)(npy_intp, PyArrayObject **, npy_intp, PyArray_Descr **)) \ + PyArray_API[273]) +#define PyArray_CanCastArrayTo \ + (*(npy_bool (*)(PyArrayObject *, PyArray_Descr *, NPY_CASTING)) \ + PyArray_API[274]) +#define PyArray_CanCastTypeTo \ + (*(npy_bool (*)(PyArray_Descr *, PyArray_Descr *, NPY_CASTING)) \ + PyArray_API[275]) +#define PyArray_EinsteinSum \ + (*(PyArrayObject * (*)(char *, npy_intp, PyArrayObject **, PyArray_Descr *, NPY_ORDER, NPY_CASTING, PyArrayObject *)) \ + PyArray_API[276]) +#define PyArray_NewLikeArray \ + (*(PyObject * (*)(PyArrayObject *, NPY_ORDER, PyArray_Descr *, int)) \ + PyArray_API[277]) +#define PyArray_GetArrayParamsFromObject \ + (*(int (*)(PyObject *, PyArray_Descr *, npy_bool, PyArray_Descr **, int *, npy_intp *, PyArrayObject **, PyObject *)) \ + PyArray_API[278]) +#define PyArray_ConvertClipmodeSequence \ + (*(int (*)(PyObject *, NPY_CLIPMODE *, int)) \ + PyArray_API[279]) +#define PyArray_MatrixProduct2 \ + (*(PyObject * (*)(PyObject *, PyObject *, PyArrayObject*)) \ + PyArray_API[280]) +#define NpyIter_IsFirstVisit \ + (*(npy_bool (*)(NpyIter *, int)) \ + PyArray_API[281]) +#define PyArray_SetBaseObject \ + (*(int (*)(PyArrayObject *, PyObject *)) \ + PyArray_API[282]) +#define PyArray_CreateSortedStridePerm \ + (*(void (*)(int, npy_intp *, npy_stride_sort_item *)) \ + PyArray_API[283]) +#define PyArray_RemoveAxesInPlace \ + (*(void (*)(PyArrayObject *, npy_bool *)) \ + PyArray_API[284]) +#define PyArray_DebugPrint \ + (*(void (*)(PyArrayObject *)) \ + PyArray_API[285]) +#define PyArray_FailUnlessWriteable \ + (*(int (*)(PyArrayObject *, const char *)) \ + PyArray_API[286]) +#define PyArray_SetUpdateIfCopyBase \ + (*(int (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[287]) +#define PyDataMem_NEW \ + (*(void * (*)(size_t)) \ + PyArray_API[288]) +#define PyDataMem_FREE \ + (*(void (*)(void *)) \ + PyArray_API[289]) +#define PyDataMem_RENEW \ + (*(void * (*)(void *, size_t)) \ + PyArray_API[290]) +#define PyDataMem_SetEventHook \ + (*(PyDataMem_EventHookFunc * (*)(PyDataMem_EventHookFunc *, void *, void **)) \ + PyArray_API[291]) +#define NPY_DEFAULT_ASSIGN_CASTING (*(NPY_CASTING *)PyArray_API[292]) +#define PyArray_MapIterSwapAxes \ + (*(void (*)(PyArrayMapIterObject *, PyArrayObject **, int)) \ + PyArray_API[293]) +#define PyArray_MapIterArray \ + (*(PyObject * (*)(PyArrayObject *, PyObject *)) \ + PyArray_API[294]) +#define PyArray_MapIterNext \ + (*(void (*)(PyArrayMapIterObject *)) \ + PyArray_API[295]) +#define PyArray_Partition \ + (*(int (*)(PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND)) \ + PyArray_API[296]) +#define PyArray_ArgPartition \ + (*(PyObject * (*)(PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND)) \ + PyArray_API[297]) +#define PyArray_SelectkindConverter \ + (*(int (*)(PyObject *, NPY_SELECTKIND *)) \ + PyArray_API[298]) +#define PyDataMem_NEW_ZEROED \ + (*(void * (*)(size_t, size_t)) \ + PyArray_API[299]) +#define PyArray_CheckAnyScalarExact \ + (*(int (*)(PyObject *)) \ + PyArray_API[300]) +#define PyArray_MapIterArrayCopyIfOverlap \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, int, PyArrayObject *)) \ + PyArray_API[301]) +#define PyArray_ResolveWritebackIfCopy \ + (*(int (*)(PyArrayObject *)) \ + PyArray_API[302]) +#define PyArray_SetWritebackIfCopyBase \ + (*(int (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[303]) + +#if !defined(NO_IMPORT_ARRAY) && !defined(NO_IMPORT) +static int +_import_array(void) +{ + int st; + PyObject *numpy = PyImport_ImportModule("numpy.core._multiarray_umath"); + PyObject *c_api = NULL; + + if (numpy == NULL) { + return -1; + } + c_api = PyObject_GetAttrString(numpy, "_ARRAY_API"); + Py_DECREF(numpy); + if (c_api == NULL) { + PyErr_SetString(PyExc_AttributeError, "_ARRAY_API not found"); + return -1; + } + +#if PY_VERSION_HEX >= 0x03000000 + if (!PyCapsule_CheckExact(c_api)) { + PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCapsule object"); + Py_DECREF(c_api); + return -1; + } + PyArray_API = (void **)PyCapsule_GetPointer(c_api, NULL); +#else + if (!PyCObject_Check(c_api)) { + PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCObject object"); + Py_DECREF(c_api); + return -1; + } + PyArray_API = (void **)PyCObject_AsVoidPtr(c_api); +#endif + Py_DECREF(c_api); + if (PyArray_API == NULL) { + PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is NULL pointer"); + return -1; + } + + /* Perform runtime check of C API version */ + if (NPY_VERSION != PyArray_GetNDArrayCVersion()) { + PyErr_Format(PyExc_RuntimeError, "module compiled against "\ + "ABI version 0x%x but this version of numpy is 0x%x", \ + (int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion()); + return -1; + } + if (NPY_FEATURE_VERSION > PyArray_GetNDArrayCFeatureVersion()) { + PyErr_Format(PyExc_RuntimeError, "module compiled against "\ + "API version 0x%x but this version of numpy is 0x%x", \ + (int) NPY_FEATURE_VERSION, (int) PyArray_GetNDArrayCFeatureVersion()); + return -1; + } + + /* + * Perform runtime check of endianness and check it matches the one set by + * the headers (npy_endian.h) as a safeguard + */ + st = PyArray_GetEndianness(); + if (st == NPY_CPU_UNKNOWN_ENDIAN) { + PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as unknown endian"); + return -1; + } +#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN + if (st != NPY_CPU_BIG) { + PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\ + "big endian, but detected different endianness at runtime"); + return -1; + } +#elif NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN + if (st != NPY_CPU_LITTLE) { + PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\ + "little endian, but detected different endianness at runtime"); + return -1; + } +#endif + + return 0; +} + +#if PY_VERSION_HEX >= 0x03000000 +#define NUMPY_IMPORT_ARRAY_RETVAL NULL +#else +#define NUMPY_IMPORT_ARRAY_RETVAL +#endif + +#define import_array() {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return NUMPY_IMPORT_ARRAY_RETVAL; } } + +#define import_array1(ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return ret; } } + +#define import_array2(msg, ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, msg); return ret; } } + +#endif + +#endif diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/__ufunc_api.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/__ufunc_api.h new file mode 100644 index 0000000..22d2ba1 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/__ufunc_api.h @@ -0,0 +1,326 @@ + +#ifdef _UMATHMODULE + +extern NPY_NO_EXPORT PyTypeObject PyUFunc_Type; + +extern NPY_NO_EXPORT PyTypeObject PyUFunc_Type; + +NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndData \ + (PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int); +NPY_NO_EXPORT int PyUFunc_RegisterLoopForType \ + (PyUFuncObject *, int, PyUFuncGenericFunction, int *, void *); +NPY_NO_EXPORT int PyUFunc_GenericFunction \ + (PyUFuncObject *, PyObject *, PyObject *, PyArrayObject **); +NPY_NO_EXPORT void PyUFunc_f_f_As_d_d \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_d_d \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_f_f \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_g_g \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_F_F_As_D_D \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_F_F \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_D_D \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_G_G \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_O_O \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_ff_f_As_dd_d \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_ff_f \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_dd_d \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_gg_g \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_FF_F_As_DD_D \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_DD_D \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_FF_F \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_GG_G \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_OO_O \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_O_O_method \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_OO_O_method \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_On_Om \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT int PyUFunc_GetPyValues \ + (char *, int *, int *, PyObject **); +NPY_NO_EXPORT int PyUFunc_checkfperr \ + (int, PyObject *, int *); +NPY_NO_EXPORT void PyUFunc_clearfperr \ + (void); +NPY_NO_EXPORT int PyUFunc_getfperr \ + (void); +NPY_NO_EXPORT int PyUFunc_handlefperr \ + (int, PyObject *, int, int *); +NPY_NO_EXPORT int PyUFunc_ReplaceLoopBySignature \ + (PyUFuncObject *, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *); +NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndDataAndSignature \ + (PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int, const char *); +NPY_NO_EXPORT int PyUFunc_SetUsesArraysAsData \ + (void **, size_t); +NPY_NO_EXPORT void PyUFunc_e_e \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_e_e_As_f_f \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_e_e_As_d_d \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_ee_e \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_ee_e_As_ff_f \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_ee_e_As_dd_d \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT int PyUFunc_DefaultTypeResolver \ + (PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyObject *, PyArray_Descr **); +NPY_NO_EXPORT int PyUFunc_ValidateCasting \ + (PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyArray_Descr **); +NPY_NO_EXPORT int PyUFunc_RegisterLoopForDescr \ + (PyUFuncObject *, PyArray_Descr *, PyUFuncGenericFunction, PyArray_Descr **, void *); +NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndDataAndSignatureAndIdentity \ + (PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int, const char *, PyObject *); + +#else + +#if defined(PY_UFUNC_UNIQUE_SYMBOL) +#define PyUFunc_API PY_UFUNC_UNIQUE_SYMBOL +#endif + +#if defined(NO_IMPORT) || defined(NO_IMPORT_UFUNC) +extern void **PyUFunc_API; +#else +#if defined(PY_UFUNC_UNIQUE_SYMBOL) +void **PyUFunc_API; +#else +static void **PyUFunc_API=NULL; +#endif +#endif + +#define PyUFunc_Type (*(PyTypeObject *)PyUFunc_API[0]) +#define PyUFunc_FromFuncAndData \ + (*(PyObject * (*)(PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int)) \ + PyUFunc_API[1]) +#define PyUFunc_RegisterLoopForType \ + (*(int (*)(PyUFuncObject *, int, PyUFuncGenericFunction, int *, void *)) \ + PyUFunc_API[2]) +#define PyUFunc_GenericFunction \ + (*(int (*)(PyUFuncObject *, PyObject *, PyObject *, PyArrayObject **)) \ + PyUFunc_API[3]) +#define PyUFunc_f_f_As_d_d \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[4]) +#define PyUFunc_d_d \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[5]) +#define PyUFunc_f_f \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[6]) +#define PyUFunc_g_g \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[7]) +#define PyUFunc_F_F_As_D_D \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[8]) +#define PyUFunc_F_F \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[9]) +#define PyUFunc_D_D \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[10]) +#define PyUFunc_G_G \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[11]) +#define PyUFunc_O_O \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[12]) +#define PyUFunc_ff_f_As_dd_d \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[13]) +#define PyUFunc_ff_f \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[14]) +#define PyUFunc_dd_d \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[15]) +#define PyUFunc_gg_g \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[16]) +#define PyUFunc_FF_F_As_DD_D \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[17]) +#define PyUFunc_DD_D \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[18]) +#define PyUFunc_FF_F \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[19]) +#define PyUFunc_GG_G \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[20]) +#define PyUFunc_OO_O \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[21]) +#define PyUFunc_O_O_method \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[22]) +#define PyUFunc_OO_O_method \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[23]) +#define PyUFunc_On_Om \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[24]) +#define PyUFunc_GetPyValues \ + (*(int (*)(char *, int *, int *, PyObject **)) \ + PyUFunc_API[25]) +#define PyUFunc_checkfperr \ + (*(int (*)(int, PyObject *, int *)) \ + PyUFunc_API[26]) +#define PyUFunc_clearfperr \ + (*(void (*)(void)) \ + PyUFunc_API[27]) +#define PyUFunc_getfperr \ + (*(int (*)(void)) \ + PyUFunc_API[28]) +#define PyUFunc_handlefperr \ + (*(int (*)(int, PyObject *, int, int *)) \ + PyUFunc_API[29]) +#define PyUFunc_ReplaceLoopBySignature \ + (*(int (*)(PyUFuncObject *, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *)) \ + PyUFunc_API[30]) +#define PyUFunc_FromFuncAndDataAndSignature \ + (*(PyObject * (*)(PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int, const char *)) \ + PyUFunc_API[31]) +#define PyUFunc_SetUsesArraysAsData \ + (*(int (*)(void **, size_t)) \ + PyUFunc_API[32]) +#define PyUFunc_e_e \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[33]) +#define PyUFunc_e_e_As_f_f \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[34]) +#define PyUFunc_e_e_As_d_d \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[35]) +#define PyUFunc_ee_e \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[36]) +#define PyUFunc_ee_e_As_ff_f \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[37]) +#define PyUFunc_ee_e_As_dd_d \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[38]) +#define PyUFunc_DefaultTypeResolver \ + (*(int (*)(PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyObject *, PyArray_Descr **)) \ + PyUFunc_API[39]) +#define PyUFunc_ValidateCasting \ + (*(int (*)(PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyArray_Descr **)) \ + PyUFunc_API[40]) +#define PyUFunc_RegisterLoopForDescr \ + (*(int (*)(PyUFuncObject *, PyArray_Descr *, PyUFuncGenericFunction, PyArray_Descr **, void *)) \ + PyUFunc_API[41]) +#define PyUFunc_FromFuncAndDataAndSignatureAndIdentity \ + (*(PyObject * (*)(PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int, const char *, PyObject *)) \ + PyUFunc_API[42]) + +static NPY_INLINE int +_import_umath(void) +{ + PyObject *numpy = PyImport_ImportModule("numpy.core._multiarray_umath"); + PyObject *c_api = NULL; + + if (numpy == NULL) { + PyErr_SetString(PyExc_ImportError, + "numpy.core._multiarray_umath failed to import"); + return -1; + } + c_api = PyObject_GetAttrString(numpy, "_UFUNC_API"); + Py_DECREF(numpy); + if (c_api == NULL) { + PyErr_SetString(PyExc_AttributeError, "_UFUNC_API not found"); + return -1; + } + +#if PY_VERSION_HEX >= 0x03000000 + if (!PyCapsule_CheckExact(c_api)) { + PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is not PyCapsule object"); + Py_DECREF(c_api); + return -1; + } + PyUFunc_API = (void **)PyCapsule_GetPointer(c_api, NULL); +#else + if (!PyCObject_Check(c_api)) { + PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is not PyCObject object"); + Py_DECREF(c_api); + return -1; + } + PyUFunc_API = (void **)PyCObject_AsVoidPtr(c_api); +#endif + Py_DECREF(c_api); + if (PyUFunc_API == NULL) { + PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is NULL pointer"); + return -1; + } + return 0; +} + +#if PY_VERSION_HEX >= 0x03000000 +#define NUMPY_IMPORT_UMATH_RETVAL NULL +#else +#define NUMPY_IMPORT_UMATH_RETVAL +#endif + +#define import_umath() \ + do {\ + UFUNC_NOFPE\ + if (_import_umath() < 0) {\ + PyErr_Print();\ + PyErr_SetString(PyExc_ImportError,\ + "numpy.core.umath failed to import");\ + return NUMPY_IMPORT_UMATH_RETVAL;\ + }\ + } while(0) + +#define import_umath1(ret) \ + do {\ + UFUNC_NOFPE\ + if (_import_umath() < 0) {\ + PyErr_Print();\ + PyErr_SetString(PyExc_ImportError,\ + "numpy.core.umath failed to import");\ + return ret;\ + }\ + } while(0) + +#define import_umath2(ret, msg) \ + do {\ + UFUNC_NOFPE\ + if (_import_umath() < 0) {\ + PyErr_Print();\ + PyErr_SetString(PyExc_ImportError, msg);\ + return ret;\ + }\ + } while(0) + +#define import_ufunc() \ + do {\ + UFUNC_NOFPE\ + if (_import_umath() < 0) {\ + PyErr_Print();\ + PyErr_SetString(PyExc_ImportError,\ + "numpy.core.umath failed to import");\ + }\ + } while(0) + +#endif diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/_neighborhood_iterator_imp.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/_neighborhood_iterator_imp.h new file mode 100644 index 0000000..e8860cb --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/_neighborhood_iterator_imp.h @@ -0,0 +1,90 @@ +#ifndef _NPY_INCLUDE_NEIGHBORHOOD_IMP +#error You should not include this header directly +#endif +/* + * Private API (here for inline) + */ +static NPY_INLINE int +_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter); + +/* + * Update to next item of the iterator + * + * Note: this simply increment the coordinates vector, last dimension + * incremented first , i.e, for dimension 3 + * ... + * -1, -1, -1 + * -1, -1, 0 + * -1, -1, 1 + * .... + * -1, 0, -1 + * -1, 0, 0 + * .... + * 0, -1, -1 + * 0, -1, 0 + * .... + */ +#define _UPDATE_COORD_ITER(c) \ + wb = iter->coordinates[c] < iter->bounds[c][1]; \ + if (wb) { \ + iter->coordinates[c] += 1; \ + return 0; \ + } \ + else { \ + iter->coordinates[c] = iter->bounds[c][0]; \ + } + +static NPY_INLINE int +_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter) +{ + npy_intp i, wb; + + for (i = iter->nd - 1; i >= 0; --i) { + _UPDATE_COORD_ITER(i) + } + + return 0; +} + +/* + * Version optimized for 2d arrays, manual loop unrolling + */ +static NPY_INLINE int +_PyArrayNeighborhoodIter_IncrCoord2D(PyArrayNeighborhoodIterObject* iter) +{ + npy_intp wb; + + _UPDATE_COORD_ITER(1) + _UPDATE_COORD_ITER(0) + + return 0; +} +#undef _UPDATE_COORD_ITER + +/* + * Advance to the next neighbour + */ +static NPY_INLINE int +PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter) +{ + _PyArrayNeighborhoodIter_IncrCoord (iter); + iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates); + + return 0; +} + +/* + * Reset functions + */ +static NPY_INLINE int +PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter) +{ + npy_intp i; + + for (i = 0; i < iter->nd; ++i) { + iter->coordinates[i] = iter->bounds[i][0]; + } + iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates); + + return 0; +} diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/_numpyconfig.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/_numpyconfig.h new file mode 100644 index 0000000..edb7e37 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/_numpyconfig.h @@ -0,0 +1,32 @@ +#define NPY_HAVE_ENDIAN_H 1 +#define NPY_SIZEOF_SHORT SIZEOF_SHORT +#define NPY_SIZEOF_INT SIZEOF_INT +#define NPY_SIZEOF_LONG SIZEOF_LONG +#define NPY_SIZEOF_FLOAT 4 +#define NPY_SIZEOF_COMPLEX_FLOAT 8 +#define NPY_SIZEOF_DOUBLE 8 +#define NPY_SIZEOF_COMPLEX_DOUBLE 16 +#define NPY_SIZEOF_LONGDOUBLE 16 +#define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32 +#define NPY_SIZEOF_PY_INTPTR_T 8 +#define NPY_SIZEOF_OFF_T 8 +#define NPY_SIZEOF_PY_LONG_LONG 8 +#define NPY_SIZEOF_LONGLONG 8 +#define NPY_NO_SMP 0 +#define NPY_HAVE_DECL_ISNAN +#define NPY_HAVE_DECL_ISINF +#define NPY_HAVE_DECL_ISFINITE +#define NPY_HAVE_DECL_SIGNBIT +#define NPY_USE_C99_COMPLEX 1 +#define NPY_HAVE_COMPLEX_DOUBLE 1 +#define NPY_HAVE_COMPLEX_FLOAT 1 +#define NPY_HAVE_COMPLEX_LONG_DOUBLE 1 +#define NPY_RELAXED_STRIDES_CHECKING 1 +#define NPY_USE_C99_FORMATS 1 +#define NPY_VISIBILITY_HIDDEN __attribute__((visibility("hidden"))) +#define NPY_ABI_VERSION 0x01000009 +#define NPY_API_VERSION 0x0000000D + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS 1 +#endif diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/arrayobject.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/arrayobject.h new file mode 100644 index 0000000..4f46d6b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/arrayobject.h @@ -0,0 +1,11 @@ +#ifndef Py_ARRAYOBJECT_H +#define Py_ARRAYOBJECT_H + +#include "ndarrayobject.h" +#include "npy_interrupt.h" + +#ifdef NPY_NO_PREFIX +#include "noprefix.h" +#endif + +#endif diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/arrayscalars.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/arrayscalars.h new file mode 100644 index 0000000..64450e7 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/arrayscalars.h @@ -0,0 +1,175 @@ +#ifndef _NPY_ARRAYSCALARS_H_ +#define _NPY_ARRAYSCALARS_H_ + +#ifndef _MULTIARRAYMODULE +typedef struct { + PyObject_HEAD + npy_bool obval; +} PyBoolScalarObject; +#endif + + +typedef struct { + PyObject_HEAD + signed char obval; +} PyByteScalarObject; + + +typedef struct { + PyObject_HEAD + short obval; +} PyShortScalarObject; + + +typedef struct { + PyObject_HEAD + int obval; +} PyIntScalarObject; + + +typedef struct { + PyObject_HEAD + long obval; +} PyLongScalarObject; + + +typedef struct { + PyObject_HEAD + npy_longlong obval; +} PyLongLongScalarObject; + + +typedef struct { + PyObject_HEAD + unsigned char obval; +} PyUByteScalarObject; + + +typedef struct { + PyObject_HEAD + unsigned short obval; +} PyUShortScalarObject; + + +typedef struct { + PyObject_HEAD + unsigned int obval; +} PyUIntScalarObject; + + +typedef struct { + PyObject_HEAD + unsigned long obval; +} PyULongScalarObject; + + +typedef struct { + PyObject_HEAD + npy_ulonglong obval; +} PyULongLongScalarObject; + + +typedef struct { + PyObject_HEAD + npy_half obval; +} PyHalfScalarObject; + + +typedef struct { + PyObject_HEAD + float obval; +} PyFloatScalarObject; + + +typedef struct { + PyObject_HEAD + double obval; +} PyDoubleScalarObject; + + +typedef struct { + PyObject_HEAD + npy_longdouble obval; +} PyLongDoubleScalarObject; + + +typedef struct { + PyObject_HEAD + npy_cfloat obval; +} PyCFloatScalarObject; + + +typedef struct { + PyObject_HEAD + npy_cdouble obval; +} PyCDoubleScalarObject; + + +typedef struct { + PyObject_HEAD + npy_clongdouble obval; +} PyCLongDoubleScalarObject; + + +typedef struct { + PyObject_HEAD + PyObject * obval; +} PyObjectScalarObject; + +typedef struct { + PyObject_HEAD + npy_datetime obval; + PyArray_DatetimeMetaData obmeta; +} PyDatetimeScalarObject; + +typedef struct { + PyObject_HEAD + npy_timedelta obval; + PyArray_DatetimeMetaData obmeta; +} PyTimedeltaScalarObject; + + +typedef struct { + PyObject_HEAD + char obval; +} PyScalarObject; + +#define PyStringScalarObject PyStringObject +#define PyUnicodeScalarObject PyUnicodeObject + +typedef struct { + PyObject_VAR_HEAD + char *obval; + PyArray_Descr *descr; + int flags; + PyObject *base; +} PyVoidScalarObject; + +/* Macros + PyScalarObject + PyArrType_Type + are defined in ndarrayobject.h +*/ + +#define PyArrayScalar_False ((PyObject *)(&(_PyArrayScalar_BoolValues[0]))) +#define PyArrayScalar_True ((PyObject *)(&(_PyArrayScalar_BoolValues[1]))) +#define PyArrayScalar_FromLong(i) \ + ((PyObject *)(&(_PyArrayScalar_BoolValues[((i)!=0)]))) +#define PyArrayScalar_RETURN_BOOL_FROM_LONG(i) \ + return Py_INCREF(PyArrayScalar_FromLong(i)), \ + PyArrayScalar_FromLong(i) +#define PyArrayScalar_RETURN_FALSE \ + return Py_INCREF(PyArrayScalar_False), \ + PyArrayScalar_False +#define PyArrayScalar_RETURN_TRUE \ + return Py_INCREF(PyArrayScalar_True), \ + PyArrayScalar_True + +#define PyArrayScalar_New(cls) \ + Py##cls##ArrType_Type.tp_alloc(&Py##cls##ArrType_Type, 0) +#define PyArrayScalar_VAL(obj, cls) \ + ((Py##cls##ScalarObject *)obj)->obval +#define PyArrayScalar_ASSIGN(obj, cls, val) \ + PyArrayScalar_VAL(obj, cls) = val + +#endif diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/halffloat.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/halffloat.h new file mode 100644 index 0000000..ab0d221 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/halffloat.h @@ -0,0 +1,70 @@ +#ifndef __NPY_HALFFLOAT_H__ +#define __NPY_HALFFLOAT_H__ + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Half-precision routines + */ + +/* Conversions */ +float npy_half_to_float(npy_half h); +double npy_half_to_double(npy_half h); +npy_half npy_float_to_half(float f); +npy_half npy_double_to_half(double d); +/* Comparisons */ +int npy_half_eq(npy_half h1, npy_half h2); +int npy_half_ne(npy_half h1, npy_half h2); +int npy_half_le(npy_half h1, npy_half h2); +int npy_half_lt(npy_half h1, npy_half h2); +int npy_half_ge(npy_half h1, npy_half h2); +int npy_half_gt(npy_half h1, npy_half h2); +/* faster *_nonan variants for when you know h1 and h2 are not NaN */ +int npy_half_eq_nonan(npy_half h1, npy_half h2); +int npy_half_lt_nonan(npy_half h1, npy_half h2); +int npy_half_le_nonan(npy_half h1, npy_half h2); +/* Miscellaneous functions */ +int npy_half_iszero(npy_half h); +int npy_half_isnan(npy_half h); +int npy_half_isinf(npy_half h); +int npy_half_isfinite(npy_half h); +int npy_half_signbit(npy_half h); +npy_half npy_half_copysign(npy_half x, npy_half y); +npy_half npy_half_spacing(npy_half h); +npy_half npy_half_nextafter(npy_half x, npy_half y); +npy_half npy_half_divmod(npy_half x, npy_half y, npy_half *modulus); + +/* + * Half-precision constants + */ + +#define NPY_HALF_ZERO (0x0000u) +#define NPY_HALF_PZERO (0x0000u) +#define NPY_HALF_NZERO (0x8000u) +#define NPY_HALF_ONE (0x3c00u) +#define NPY_HALF_NEGONE (0xbc00u) +#define NPY_HALF_PINF (0x7c00u) +#define NPY_HALF_NINF (0xfc00u) +#define NPY_HALF_NAN (0x7e00u) + +#define NPY_MAX_HALF (0x7bffu) + +/* + * Bit-level conversions + */ + +npy_uint16 npy_floatbits_to_halfbits(npy_uint32 f); +npy_uint16 npy_doublebits_to_halfbits(npy_uint64 d); +npy_uint32 npy_halfbits_to_floatbits(npy_uint16 h); +npy_uint64 npy_halfbits_to_doublebits(npy_uint16 h); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/multiarray_api.txt b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/multiarray_api.txt new file mode 100644 index 0000000..7c45394 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/multiarray_api.txt @@ -0,0 +1,2501 @@ + +=========== +NumPy C-API +=========== +:: + + unsigned int + PyArray_GetNDArrayCVersion(void ) + + +Included at the very first so not auto-grabbed and thus not labeled. + +:: + + int + PyArray_SetNumericOps(PyObject *dict) + +Set internal structure with number functions that all arrays will use + +:: + + PyObject * + PyArray_GetNumericOps(void ) + +Get dictionary showing number functions that all arrays will use + +:: + + int + PyArray_INCREF(PyArrayObject *mp) + +For object arrays, increment all internal references. + +:: + + int + PyArray_XDECREF(PyArrayObject *mp) + +Decrement all internal references for object arrays. +(or arrays with object fields) + +:: + + void + PyArray_SetStringFunction(PyObject *op, int repr) + +Set the array print function to be a Python function. + +:: + + PyArray_Descr * + PyArray_DescrFromType(int type) + +Get the PyArray_Descr structure for a type. + +:: + + PyObject * + PyArray_TypeObjectFromType(int type) + +Get a typeobject from a type-number -- can return NULL. + +New reference + +:: + + char * + PyArray_Zero(PyArrayObject *arr) + +Get pointer to zero of correct type for array. + +:: + + char * + PyArray_One(PyArrayObject *arr) + +Get pointer to one of correct type for array + +:: + + PyObject * + PyArray_CastToType(PyArrayObject *arr, PyArray_Descr *dtype, int + is_f_order) + +For backward compatibility + +Cast an array using typecode structure. +steals reference to dtype --- cannot be NULL + +This function always makes a copy of arr, even if the dtype +doesn't change. + +:: + + int + PyArray_CastTo(PyArrayObject *out, PyArrayObject *mp) + +Cast to an already created array. + +:: + + int + PyArray_CastAnyTo(PyArrayObject *out, PyArrayObject *mp) + +Cast to an already created array. Arrays don't have to be "broadcastable" +Only requirement is they have the same number of elements. + +:: + + int + PyArray_CanCastSafely(int fromtype, int totype) + +Check the type coercion rules. + +:: + + npy_bool + PyArray_CanCastTo(PyArray_Descr *from, PyArray_Descr *to) + +leaves reference count alone --- cannot be NULL + +PyArray_CanCastTypeTo is equivalent to this, but adds a 'casting' +parameter. + +:: + + int + PyArray_ObjectType(PyObject *op, int minimum_type) + +Return the typecode of the array a Python object would be converted to + +Returns the type number the result should have, or NPY_NOTYPE on error. + +:: + + PyArray_Descr * + PyArray_DescrFromObject(PyObject *op, PyArray_Descr *mintype) + +new reference -- accepts NULL for mintype + +:: + + PyArrayObject ** + PyArray_ConvertToCommonType(PyObject *op, int *retn) + + +:: + + PyArray_Descr * + PyArray_DescrFromScalar(PyObject *sc) + +Return descr object from array scalar. + +New reference + +:: + + PyArray_Descr * + PyArray_DescrFromTypeObject(PyObject *type) + + +:: + + npy_intp + PyArray_Size(PyObject *op) + +Compute the size of an array (in number of items) + +:: + + PyObject * + PyArray_Scalar(void *data, PyArray_Descr *descr, PyObject *base) + +Get scalar-equivalent to a region of memory described by a descriptor. + +:: + + PyObject * + PyArray_FromScalar(PyObject *scalar, PyArray_Descr *outcode) + +Get 0-dim array from scalar + +0-dim array from array-scalar object +always contains a copy of the data +unless outcode is NULL, it is of void type and the referrer does +not own it either. + +steals reference to outcode + +:: + + void + PyArray_ScalarAsCtype(PyObject *scalar, void *ctypeptr) + +Convert to c-type + +no error checking is performed -- ctypeptr must be same type as scalar +in case of flexible type, the data is not copied +into ctypeptr which is expected to be a pointer to pointer + +:: + + int + PyArray_CastScalarToCtype(PyObject *scalar, void + *ctypeptr, PyArray_Descr *outcode) + +Cast Scalar to c-type + +The output buffer must be large-enough to receive the value +Even for flexible types which is different from ScalarAsCtype +where only a reference for flexible types is returned + +This may not work right on narrow builds for NumPy unicode scalars. + +:: + + int + PyArray_CastScalarDirect(PyObject *scalar, PyArray_Descr + *indescr, void *ctypeptr, int outtype) + +Cast Scalar to c-type + +:: + + PyObject * + PyArray_ScalarFromObject(PyObject *object) + +Get an Array Scalar From a Python Object + +Returns NULL if unsuccessful but error is only set if another error occurred. +Currently only Numeric-like object supported. + +:: + + PyArray_VectorUnaryFunc * + PyArray_GetCastFunc(PyArray_Descr *descr, int type_num) + +Get a cast function to cast from the input descriptor to the +output type_number (must be a registered data-type). +Returns NULL if un-successful. + +:: + + PyObject * + PyArray_FromDims(int nd, int *d, int type) + +Construct an empty array from dimensions and typenum + +:: + + PyObject * + PyArray_FromDimsAndDataAndDescr(int nd, int *d, PyArray_Descr + *descr, char *data) + +Like FromDimsAndData but uses the Descr structure instead of typecode +as input. + +:: + + PyObject * + PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int + min_depth, int max_depth, int flags, PyObject + *context) + +Does not check for NPY_ARRAY_ENSURECOPY and NPY_ARRAY_NOTSWAPPED in flags +Steals a reference to newtype --- which can be NULL + +:: + + PyObject * + PyArray_EnsureArray(PyObject *op) + +This is a quick wrapper around +PyArray_FromAny(op, NULL, 0, 0, NPY_ARRAY_ENSUREARRAY, NULL) +that special cases Arrays and PyArray_Scalars up front +It *steals a reference* to the object +It also guarantees that the result is PyArray_Type +Because it decrefs op if any conversion needs to take place +so it can be used like PyArray_EnsureArray(some_function(...)) + +:: + + PyObject * + PyArray_EnsureAnyArray(PyObject *op) + + +:: + + PyObject * + PyArray_FromFile(FILE *fp, PyArray_Descr *dtype, npy_intp num, char + *sep) + + +Given a ``FILE *`` pointer ``fp``, and a ``PyArray_Descr``, return an +array corresponding to the data encoded in that file. + +If the dtype is NULL, the default array type is used (double). +If non-null, the reference is stolen and if dtype->subarray is true dtype +will be decrefed even on success. + +The number of elements to read is given as ``num``; if it is < 0, then +then as many as possible are read. + +If ``sep`` is NULL or empty, then binary data is assumed, else +text data, with ``sep`` as the separator between elements. Whitespace in +the separator matches any length of whitespace in the text, and a match +for whitespace around the separator is added. + +For memory-mapped files, use the buffer interface. No more data than +necessary is read by this routine. + +:: + + PyObject * + PyArray_FromString(char *data, npy_intp slen, PyArray_Descr + *dtype, npy_intp num, char *sep) + + +Given a pointer to a string ``data``, a string length ``slen``, and +a ``PyArray_Descr``, return an array corresponding to the data +encoded in that string. + +If the dtype is NULL, the default array type is used (double). +If non-null, the reference is stolen. + +If ``slen`` is < 0, then the end of string is used for text data. +It is an error for ``slen`` to be < 0 for binary data (since embedded NULLs +would be the norm). + +The number of elements to read is given as ``num``; if it is < 0, then +then as many as possible are read. + +If ``sep`` is NULL or empty, then binary data is assumed, else +text data, with ``sep`` as the separator between elements. Whitespace in +the separator matches any length of whitespace in the text, and a match +for whitespace around the separator is added. + +:: + + PyObject * + PyArray_FromBuffer(PyObject *buf, PyArray_Descr *type, npy_intp + count, npy_intp offset) + + +:: + + PyObject * + PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count) + + +steals a reference to dtype (which cannot be NULL) + +:: + + PyObject * + PyArray_Return(PyArrayObject *mp) + + +Return either an array or the appropriate Python object if the array +is 0d and matches a Python type. +steals reference to mp + +:: + + PyObject * + PyArray_GetField(PyArrayObject *self, PyArray_Descr *typed, int + offset) + +Get a subset of bytes from each element of the array +steals reference to typed, must not be NULL + +:: + + int + PyArray_SetField(PyArrayObject *self, PyArray_Descr *dtype, int + offset, PyObject *val) + +Set a subset of bytes from each element of the array +steals reference to dtype, must not be NULL + +:: + + PyObject * + PyArray_Byteswap(PyArrayObject *self, npy_bool inplace) + + +:: + + PyObject * + PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int + refcheck, NPY_ORDER order) + +Resize (reallocate data). Only works if nothing else is referencing this +array and it is contiguous. If refcheck is 0, then the reference count is +not checked and assumed to be 1. You still must own this data and have no +weak-references and no base object. + +:: + + int + PyArray_MoveInto(PyArrayObject *dst, PyArrayObject *src) + +Move the memory of one array into another, allowing for overlapping data. + +Returns 0 on success, negative on failure. + +:: + + int + PyArray_CopyInto(PyArrayObject *dst, PyArrayObject *src) + +Copy an Array into another array. +Broadcast to the destination shape if necessary. + +Returns 0 on success, -1 on failure. + +:: + + int + PyArray_CopyAnyInto(PyArrayObject *dst, PyArrayObject *src) + +Copy an Array into another array -- memory must not overlap +Does not require src and dest to have "broadcastable" shapes +(only the same number of elements). + +TODO: For NumPy 2.0, this could accept an order parameter which +only allows NPY_CORDER and NPY_FORDER. Could also rename +this to CopyAsFlat to make the name more intuitive. + +Returns 0 on success, -1 on error. + +:: + + int + PyArray_CopyObject(PyArrayObject *dest, PyObject *src_object) + + +:: + + PyObject * + PyArray_NewCopy(PyArrayObject *obj, NPY_ORDER order) + +Copy an array. + +:: + + PyObject * + PyArray_ToList(PyArrayObject *self) + +To List + +:: + + PyObject * + PyArray_ToString(PyArrayObject *self, NPY_ORDER order) + + +:: + + int + PyArray_ToFile(PyArrayObject *self, FILE *fp, char *sep, char *format) + +To File + +:: + + int + PyArray_Dump(PyObject *self, PyObject *file, int protocol) + + +:: + + PyObject * + PyArray_Dumps(PyObject *self, int protocol) + + +:: + + int + PyArray_ValidType(int type) + +Is the typenum valid? + +:: + + void + PyArray_UpdateFlags(PyArrayObject *ret, int flagmask) + +Update Several Flags at once. + +:: + + PyObject * + PyArray_New(PyTypeObject *subtype, int nd, npy_intp *dims, int + type_num, npy_intp *strides, void *data, int itemsize, int + flags, PyObject *obj) + +Generic new array creation routine. + +:: + + PyObject * + PyArray_NewFromDescr(PyTypeObject *subtype, PyArray_Descr *descr, int + nd, npy_intp *dims, npy_intp *strides, void + *data, int flags, PyObject *obj) + +Generic new array creation routine. + +steals a reference to descr. On failure or when dtype->subarray is +true, dtype will be decrefed. + +:: + + PyArray_Descr * + PyArray_DescrNew(PyArray_Descr *base) + +base cannot be NULL + +:: + + PyArray_Descr * + PyArray_DescrNewFromType(int type_num) + + +:: + + double + PyArray_GetPriority(PyObject *obj, double default_) + +Get Priority from object + +:: + + PyObject * + PyArray_IterNew(PyObject *obj) + +Get Iterator. + +:: + + PyObject * + PyArray_MultiIterNew(int n, ... ) + +Get MultiIterator, + +:: + + int + PyArray_PyIntAsInt(PyObject *o) + + +:: + + npy_intp + PyArray_PyIntAsIntp(PyObject *o) + + +:: + + int + PyArray_Broadcast(PyArrayMultiIterObject *mit) + + +:: + + void + PyArray_FillObjectArray(PyArrayObject *arr, PyObject *obj) + +Assumes contiguous + +:: + + int + PyArray_FillWithScalar(PyArrayObject *arr, PyObject *obj) + + +:: + + npy_bool + PyArray_CheckStrides(int elsize, int nd, npy_intp numbytes, npy_intp + offset, npy_intp *dims, npy_intp *newstrides) + + +:: + + PyArray_Descr * + PyArray_DescrNewByteorder(PyArray_Descr *self, char newendian) + + +returns a copy of the PyArray_Descr structure with the byteorder +altered: +no arguments: The byteorder is swapped (in all subfields as well) +single argument: The byteorder is forced to the given state +(in all subfields as well) + +Valid states: ('big', '>') or ('little' or '<') +('native', or '=') + +If a descr structure with | is encountered it's own +byte-order is not changed but any fields are: + + +Deep bytorder change of a data-type descriptor +Leaves reference count of self unchanged --- does not DECREF self *** + +:: + + PyObject * + PyArray_IterAllButAxis(PyObject *obj, int *inaxis) + +Get Iterator that iterates over all but one axis (don't use this with +PyArray_ITER_GOTO1D). The axis will be over-written if negative +with the axis having the smallest stride. + +:: + + PyObject * + PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int + min_depth, int max_depth, int requires, PyObject + *context) + +steals a reference to descr -- accepts NULL + +:: + + PyObject * + PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int + flags) + +steals reference to newtype --- acc. NULL + +:: + + PyObject * + PyArray_FromInterface(PyObject *origin) + + +:: + + PyObject * + PyArray_FromStructInterface(PyObject *input) + + +:: + + PyObject * + PyArray_FromArrayAttr(PyObject *op, PyArray_Descr *typecode, PyObject + *context) + + +:: + + NPY_SCALARKIND + PyArray_ScalarKind(int typenum, PyArrayObject **arr) + +ScalarKind + +Returns the scalar kind of a type number, with an +optional tweak based on the scalar value itself. +If no scalar is provided, it returns INTPOS_SCALAR +for both signed and unsigned integers, otherwise +it checks the sign of any signed integer to choose +INTNEG_SCALAR when appropriate. + +:: + + int + PyArray_CanCoerceScalar(int thistype, int neededtype, NPY_SCALARKIND + scalar) + + +Determines whether the data type 'thistype', with +scalar kind 'scalar', can be coerced into 'neededtype'. + +:: + + PyObject * + PyArray_NewFlagsObject(PyObject *obj) + + +Get New ArrayFlagsObject + +:: + + npy_bool + PyArray_CanCastScalar(PyTypeObject *from, PyTypeObject *to) + +See if array scalars can be cast. + +TODO: For NumPy 2.0, add a NPY_CASTING parameter. + +:: + + int + PyArray_CompareUCS4(npy_ucs4 *s1, npy_ucs4 *s2, size_t len) + + +:: + + int + PyArray_RemoveSmallest(PyArrayMultiIterObject *multi) + +Adjusts previously broadcasted iterators so that the axis with +the smallest sum of iterator strides is not iterated over. +Returns dimension which is smallest in the range [0,multi->nd). +A -1 is returned if multi->nd == 0. + +don't use with PyArray_ITER_GOTO1D because factors are not adjusted + +:: + + int + PyArray_ElementStrides(PyObject *obj) + + +:: + + void + PyArray_Item_INCREF(char *data, PyArray_Descr *descr) + +XINCREF all objects in a single array item. This is complicated for +structured datatypes where the position of objects needs to be extracted. +The function is execute recursively for each nested field or subarrays dtype +such as as `np.dtype([("field1", "O"), ("field2", "f,O", (3,2))])` + +:: + + void + PyArray_Item_XDECREF(char *data, PyArray_Descr *descr) + + +XDECREF all objects in a single array item. This is complicated for +structured datatypes where the position of objects needs to be extracted. +The function is execute recursively for each nested field or subarrays dtype +such as as `np.dtype([("field1", "O"), ("field2", "f,O", (3,2))])` + +:: + + PyObject * + PyArray_FieldNames(PyObject *fields) + +Return the tuple of ordered field names from a dictionary. + +:: + + PyObject * + PyArray_Transpose(PyArrayObject *ap, PyArray_Dims *permute) + +Return Transpose. + +:: + + PyObject * + PyArray_TakeFrom(PyArrayObject *self0, PyObject *indices0, int + axis, PyArrayObject *out, NPY_CLIPMODE clipmode) + +Take + +:: + + PyObject * + PyArray_PutTo(PyArrayObject *self, PyObject*values0, PyObject + *indices0, NPY_CLIPMODE clipmode) + +Put values into an array + +:: + + PyObject * + PyArray_PutMask(PyArrayObject *self, PyObject*values0, PyObject*mask0) + +Put values into an array according to a mask. + +:: + + PyObject * + PyArray_Repeat(PyArrayObject *aop, PyObject *op, int axis) + +Repeat the array. + +:: + + PyObject * + PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject + *out, NPY_CLIPMODE clipmode) + + +:: + + int + PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND which) + +Sort an array in-place + +:: + + PyObject * + PyArray_ArgSort(PyArrayObject *op, int axis, NPY_SORTKIND which) + +ArgSort an array + +:: + + PyObject * + PyArray_SearchSorted(PyArrayObject *op1, PyObject *op2, NPY_SEARCHSIDE + side, PyObject *perm) + + +Search the sorted array op1 for the location of the items in op2. The +result is an array of indexes, one for each element in op2, such that if +the item were to be inserted in op1 just before that index the array +would still be in sorted order. + +Parameters +---------- +op1 : PyArrayObject * +Array to be searched, must be 1-D. +op2 : PyObject * +Array of items whose insertion indexes in op1 are wanted +side : {NPY_SEARCHLEFT, NPY_SEARCHRIGHT} +If NPY_SEARCHLEFT, return first valid insertion indexes +If NPY_SEARCHRIGHT, return last valid insertion indexes +perm : PyObject * +Permutation array that sorts op1 (optional) + +Returns +------- +ret : PyObject * +New reference to npy_intp array containing indexes where items in op2 +could be validly inserted into op1. NULL on error. + +Notes +----- +Binary search is used to find the indexes. + +:: + + PyObject * + PyArray_ArgMax(PyArrayObject *op, int axis, PyArrayObject *out) + +ArgMax + +:: + + PyObject * + PyArray_ArgMin(PyArrayObject *op, int axis, PyArrayObject *out) + +ArgMin + +:: + + PyObject * + PyArray_Reshape(PyArrayObject *self, PyObject *shape) + +Reshape + +:: + + PyObject * + PyArray_Newshape(PyArrayObject *self, PyArray_Dims *newdims, NPY_ORDER + order) + +New shape for an array + +:: + + PyObject * + PyArray_Squeeze(PyArrayObject *self) + + +return a new view of the array object with all of its unit-length +dimensions squeezed out if needed, otherwise +return the same array. + +:: + + PyObject * + PyArray_View(PyArrayObject *self, PyArray_Descr *type, PyTypeObject + *pytype) + +View +steals a reference to type -- accepts NULL + +:: + + PyObject * + PyArray_SwapAxes(PyArrayObject *ap, int a1, int a2) + +SwapAxes + +:: + + PyObject * + PyArray_Max(PyArrayObject *ap, int axis, PyArrayObject *out) + +Max + +:: + + PyObject * + PyArray_Min(PyArrayObject *ap, int axis, PyArrayObject *out) + +Min + +:: + + PyObject * + PyArray_Ptp(PyArrayObject *ap, int axis, PyArrayObject *out) + +Ptp + +:: + + PyObject * + PyArray_Mean(PyArrayObject *self, int axis, int rtype, PyArrayObject + *out) + +Mean + +:: + + PyObject * + PyArray_Trace(PyArrayObject *self, int offset, int axis1, int + axis2, int rtype, PyArrayObject *out) + +Trace + +:: + + PyObject * + PyArray_Diagonal(PyArrayObject *self, int offset, int axis1, int + axis2) + +Diagonal + +In NumPy versions prior to 1.7, this function always returned a copy of +the diagonal array. In 1.7, the code has been updated to compute a view +onto 'self', but it still copies this array before returning, as well as +setting the internal WARN_ON_WRITE flag. In a future version, it will +simply return a view onto self. + +:: + + PyObject * + PyArray_Clip(PyArrayObject *self, PyObject *min, PyObject + *max, PyArrayObject *out) + +Clip + +:: + + PyObject * + PyArray_Conjugate(PyArrayObject *self, PyArrayObject *out) + +Conjugate + +:: + + PyObject * + PyArray_Nonzero(PyArrayObject *self) + +Nonzero + +TODO: In NumPy 2.0, should make the iteration order a parameter. + +:: + + PyObject * + PyArray_Std(PyArrayObject *self, int axis, int rtype, PyArrayObject + *out, int variance) + +Set variance to 1 to by-pass square-root calculation and return variance +Std + +:: + + PyObject * + PyArray_Sum(PyArrayObject *self, int axis, int rtype, PyArrayObject + *out) + +Sum + +:: + + PyObject * + PyArray_CumSum(PyArrayObject *self, int axis, int rtype, PyArrayObject + *out) + +CumSum + +:: + + PyObject * + PyArray_Prod(PyArrayObject *self, int axis, int rtype, PyArrayObject + *out) + +Prod + +:: + + PyObject * + PyArray_CumProd(PyArrayObject *self, int axis, int + rtype, PyArrayObject *out) + +CumProd + +:: + + PyObject * + PyArray_All(PyArrayObject *self, int axis, PyArrayObject *out) + +All + +:: + + PyObject * + PyArray_Any(PyArrayObject *self, int axis, PyArrayObject *out) + +Any + +:: + + PyObject * + PyArray_Compress(PyArrayObject *self, PyObject *condition, int + axis, PyArrayObject *out) + +Compress + +:: + + PyObject * + PyArray_Flatten(PyArrayObject *a, NPY_ORDER order) + +Flatten + +:: + + PyObject * + PyArray_Ravel(PyArrayObject *arr, NPY_ORDER order) + +Ravel +Returns a contiguous array + +:: + + npy_intp + PyArray_MultiplyList(npy_intp *l1, int n) + +Multiply a List + +:: + + int + PyArray_MultiplyIntList(int *l1, int n) + +Multiply a List of ints + +:: + + void * + PyArray_GetPtr(PyArrayObject *obj, npy_intp*ind) + +Produce a pointer into array + +:: + + int + PyArray_CompareLists(npy_intp *l1, npy_intp *l2, int n) + +Compare Lists + +:: + + int + PyArray_AsCArray(PyObject **op, void *ptr, npy_intp *dims, int + nd, PyArray_Descr*typedescr) + +Simulate a C-array +steals a reference to typedescr -- can be NULL + +:: + + int + PyArray_As1D(PyObject **op, char **ptr, int *d1, int typecode) + +Convert to a 1D C-array + +:: + + int + PyArray_As2D(PyObject **op, char ***ptr, int *d1, int *d2, int + typecode) + +Convert to a 2D C-array + +:: + + int + PyArray_Free(PyObject *op, void *ptr) + +Free pointers created if As2D is called + +:: + + int + PyArray_Converter(PyObject *object, PyObject **address) + + +Useful to pass as converter function for O& processing in PyArgs_ParseTuple. + +This conversion function can be used with the "O&" argument for +PyArg_ParseTuple. It will immediately return an object of array type +or will convert to a NPY_ARRAY_CARRAY any other object. + +If you use PyArray_Converter, you must DECREF the array when finished +as you get a new reference to it. + +:: + + int + PyArray_IntpFromSequence(PyObject *seq, npy_intp *vals, int maxvals) + +PyArray_IntpFromSequence +Returns the number of integers converted or -1 if an error occurred. +vals must be large enough to hold maxvals + +:: + + PyObject * + PyArray_Concatenate(PyObject *op, int axis) + +Concatenate + +Concatenate an arbitrary Python sequence into an array. +op is a python object supporting the sequence interface. +Its elements will be concatenated together to form a single +multidimensional array. If axis is NPY_MAXDIMS or bigger, then +each sequence object will be flattened before concatenation + +:: + + PyObject * + PyArray_InnerProduct(PyObject *op1, PyObject *op2) + +Numeric.innerproduct(a,v) + +:: + + PyObject * + PyArray_MatrixProduct(PyObject *op1, PyObject *op2) + +Numeric.matrixproduct(a,v) +just like inner product but does the swapaxes stuff on the fly + +:: + + PyObject * + PyArray_CopyAndTranspose(PyObject *op) + +Copy and Transpose + +Could deprecate this function, as there isn't a speed benefit over +calling Transpose and then Copy. + +:: + + PyObject * + PyArray_Correlate(PyObject *op1, PyObject *op2, int mode) + +Numeric.correlate(a1,a2,mode) + +:: + + int + PyArray_TypestrConvert(int itemsize, int gentype) + +Typestr converter + +:: + + int + PyArray_DescrConverter(PyObject *obj, PyArray_Descr **at) + +Get typenum from an object -- None goes to NPY_DEFAULT_TYPE +This function takes a Python object representing a type and converts it +to a the correct PyArray_Descr * structure to describe the type. + +Many objects can be used to represent a data-type which in NumPy is +quite a flexible concept. + +This is the central code that converts Python objects to +Type-descriptor objects that are used throughout numpy. + +Returns a new reference in *at, but the returned should not be +modified as it may be one of the canonical immutable objects or +a reference to the input obj. + +:: + + int + PyArray_DescrConverter2(PyObject *obj, PyArray_Descr **at) + +Get typenum from an object -- None goes to NULL + +:: + + int + PyArray_IntpConverter(PyObject *obj, PyArray_Dims *seq) + +Get intp chunk from sequence + +This function takes a Python sequence object and allocates and +fills in an intp array with the converted values. + +Remember to free the pointer seq.ptr when done using +PyDimMem_FREE(seq.ptr)** + +:: + + int + PyArray_BufferConverter(PyObject *obj, PyArray_Chunk *buf) + +Get buffer chunk from object + +this function takes a Python object which exposes the (single-segment) +buffer interface and returns a pointer to the data segment + +You should increment the reference count by one of buf->base +if you will hang on to a reference + +You only get a borrowed reference to the object. Do not free the +memory... + +:: + + int + PyArray_AxisConverter(PyObject *obj, int *axis) + +Get axis from an object (possibly None) -- a converter function, + +See also PyArray_ConvertMultiAxis, which also handles a tuple of axes. + +:: + + int + PyArray_BoolConverter(PyObject *object, npy_bool *val) + +Convert an object to true / false + +:: + + int + PyArray_ByteorderConverter(PyObject *obj, char *endian) + +Convert object to endian + +:: + + int + PyArray_OrderConverter(PyObject *object, NPY_ORDER *val) + +Convert an object to FORTRAN / C / ANY / KEEP + +:: + + unsigned char + PyArray_EquivTypes(PyArray_Descr *type1, PyArray_Descr *type2) + + +This function returns true if the two typecodes are +equivalent (same basic kind and same itemsize). + +:: + + PyObject * + PyArray_Zeros(int nd, npy_intp *dims, PyArray_Descr *type, int + is_f_order) + +Zeros + +steals a reference to type. On failure or when dtype->subarray is +true, dtype will be decrefed. +accepts NULL type + +:: + + PyObject * + PyArray_Empty(int nd, npy_intp *dims, PyArray_Descr *type, int + is_f_order) + +Empty + +accepts NULL type +steals referenct to type + +:: + + PyObject * + PyArray_Where(PyObject *condition, PyObject *x, PyObject *y) + +Where + +:: + + PyObject * + PyArray_Arange(double start, double stop, double step, int type_num) + +Arange, + +:: + + PyObject * + PyArray_ArangeObj(PyObject *start, PyObject *stop, PyObject + *step, PyArray_Descr *dtype) + + +ArangeObj, + +this doesn't change the references + +:: + + int + PyArray_SortkindConverter(PyObject *obj, NPY_SORTKIND *sortkind) + +Convert object to sort kind + +:: + + PyObject * + PyArray_LexSort(PyObject *sort_keys, int axis) + +LexSort an array providing indices that will sort a collection of arrays +lexicographically. The first key is sorted on first, followed by the second key +-- requires that arg"merge"sort is available for each sort_key + +Returns an index array that shows the indexes for the lexicographic sort along +the given axis. + +:: + + PyObject * + PyArray_Round(PyArrayObject *a, int decimals, PyArrayObject *out) + +Round + +:: + + unsigned char + PyArray_EquivTypenums(int typenum1, int typenum2) + + +:: + + int + PyArray_RegisterDataType(PyArray_Descr *descr) + +Register Data type +Does not change the reference count of descr + +:: + + int + PyArray_RegisterCastFunc(PyArray_Descr *descr, int + totype, PyArray_VectorUnaryFunc *castfunc) + +Register Casting Function +Replaces any function currently stored. + +:: + + int + PyArray_RegisterCanCast(PyArray_Descr *descr, int + totype, NPY_SCALARKIND scalar) + +Register a type number indicating that a descriptor can be cast +to it safely + +:: + + void + PyArray_InitArrFuncs(PyArray_ArrFuncs *f) + +Initialize arrfuncs to NULL + +:: + + PyObject * + PyArray_IntTupleFromIntp(int len, npy_intp *vals) + +PyArray_IntTupleFromIntp + +:: + + int + PyArray_TypeNumFromName(char *str) + + +:: + + int + PyArray_ClipmodeConverter(PyObject *object, NPY_CLIPMODE *val) + +Convert an object to NPY_RAISE / NPY_CLIP / NPY_WRAP + +:: + + int + PyArray_OutputConverter(PyObject *object, PyArrayObject **address) + +Useful to pass as converter function for O& processing in +PyArgs_ParseTuple for output arrays + +:: + + PyObject * + PyArray_BroadcastToShape(PyObject *obj, npy_intp *dims, int nd) + +Get Iterator broadcast to a particular shape + +:: + + void + _PyArray_SigintHandler(int signum) + + +:: + + void* + _PyArray_GetSigintBuf(void ) + + +:: + + int + PyArray_DescrAlignConverter(PyObject *obj, PyArray_Descr **at) + + +Get type-descriptor from an object forcing alignment if possible +None goes to DEFAULT type. + +any object with the .fields attribute and/or .itemsize attribute (if the +.fields attribute does not give the total size -- i.e. a partial record +naming). If itemsize is given it must be >= size computed from fields + +The .fields attribute must return a convertible dictionary if present. +Result inherits from NPY_VOID. + +:: + + int + PyArray_DescrAlignConverter2(PyObject *obj, PyArray_Descr **at) + + +Get type-descriptor from an object forcing alignment if possible +None goes to NULL. + +:: + + int + PyArray_SearchsideConverter(PyObject *obj, void *addr) + +Convert object to searchsorted side + +:: + + PyObject * + PyArray_CheckAxis(PyArrayObject *arr, int *axis, int flags) + +PyArray_CheckAxis + +check that axis is valid +convert 0-d arrays to 1-d arrays + +:: + + npy_intp + PyArray_OverflowMultiplyList(npy_intp *l1, int n) + +Multiply a List of Non-negative numbers with over-flow detection. + +:: + + int + PyArray_CompareString(char *s1, char *s2, size_t len) + + +:: + + PyObject * + PyArray_MultiIterFromObjects(PyObject **mps, int n, int nadd, ... ) + +Get MultiIterator from array of Python objects and any additional + +PyObject **mps -- array of PyObjects +int n - number of PyObjects in the array +int nadd - number of additional arrays to include in the iterator. + +Returns a multi-iterator object. + +:: + + int + PyArray_GetEndianness(void ) + + +:: + + unsigned int + PyArray_GetNDArrayCFeatureVersion(void ) + +Returns the built-in (at compilation time) C API version + +:: + + PyObject * + PyArray_Correlate2(PyObject *op1, PyObject *op2, int mode) + +correlate(a1,a2,mode) + +This function computes the usual correlation (correlate(a1, a2) != +correlate(a2, a1), and conjugate the second argument for complex inputs + +:: + + PyObject* + PyArray_NeighborhoodIterNew(PyArrayIterObject *x, npy_intp + *bounds, int mode, PyArrayObject*fill) + +A Neighborhood Iterator object. + +:: + + void + PyArray_SetDatetimeParseFunction(PyObject *op) + +This function is scheduled to be removed + +TO BE REMOVED - NOT USED INTERNALLY. + +:: + + void + PyArray_DatetimeToDatetimeStruct(npy_datetime val, NPY_DATETIMEUNIT + fr, npy_datetimestruct *result) + +Fill the datetime struct from the value and resolution unit. + +TO BE REMOVED - NOT USED INTERNALLY. + +:: + + void + PyArray_TimedeltaToTimedeltaStruct(npy_timedelta val, NPY_DATETIMEUNIT + fr, npy_timedeltastruct *result) + +Fill the timedelta struct from the timedelta value and resolution unit. + +TO BE REMOVED - NOT USED INTERNALLY. + +:: + + npy_datetime + PyArray_DatetimeStructToDatetime(NPY_DATETIMEUNIT + fr, npy_datetimestruct *d) + +Create a datetime value from a filled datetime struct and resolution unit. + +TO BE REMOVED - NOT USED INTERNALLY. + +:: + + npy_datetime + PyArray_TimedeltaStructToTimedelta(NPY_DATETIMEUNIT + fr, npy_timedeltastruct *d) + +Create a timdelta value from a filled timedelta struct and resolution unit. + +TO BE REMOVED - NOT USED INTERNALLY. + +:: + + NpyIter * + NpyIter_New(PyArrayObject *op, npy_uint32 flags, NPY_ORDER + order, NPY_CASTING casting, PyArray_Descr*dtype) + +Allocate a new iterator for one array object. + +:: + + NpyIter * + NpyIter_MultiNew(int nop, PyArrayObject **op_in, npy_uint32 + flags, NPY_ORDER order, NPY_CASTING + casting, npy_uint32 *op_flags, PyArray_Descr + **op_request_dtypes) + +Allocate a new iterator for more than one array object, using +standard NumPy broadcasting rules and the default buffer size. + +:: + + NpyIter * + NpyIter_AdvancedNew(int nop, PyArrayObject **op_in, npy_uint32 + flags, NPY_ORDER order, NPY_CASTING + casting, npy_uint32 *op_flags, PyArray_Descr + **op_request_dtypes, int oa_ndim, int + **op_axes, npy_intp *itershape, npy_intp + buffersize) + +Allocate a new iterator for multiple array objects, and advanced +options for controlling the broadcasting, shape, and buffer size. + +:: + + NpyIter * + NpyIter_Copy(NpyIter *iter) + +Makes a copy of the iterator + +:: + + int + NpyIter_Deallocate(NpyIter *iter) + +Deallocate an iterator + +:: + + npy_bool + NpyIter_HasDelayedBufAlloc(NpyIter *iter) + +Whether the buffer allocation is being delayed + +:: + + npy_bool + NpyIter_HasExternalLoop(NpyIter *iter) + +Whether the iterator handles the inner loop + +:: + + int + NpyIter_EnableExternalLoop(NpyIter *iter) + +Removes the inner loop handling (so HasExternalLoop returns true) + +:: + + npy_intp * + NpyIter_GetInnerStrideArray(NpyIter *iter) + +Get the array of strides for the inner loop (when HasExternalLoop is true) + +This function may be safely called without holding the Python GIL. + +:: + + npy_intp * + NpyIter_GetInnerLoopSizePtr(NpyIter *iter) + +Get a pointer to the size of the inner loop (when HasExternalLoop is true) + +This function may be safely called without holding the Python GIL. + +:: + + int + NpyIter_Reset(NpyIter *iter, char **errmsg) + +Resets the iterator to its initial state + +If errmsg is non-NULL, it should point to a variable which will +receive the error message, and no Python exception will be set. +This is so that the function can be called from code not holding +the GIL. + +:: + + int + NpyIter_ResetBasePointers(NpyIter *iter, char **baseptrs, char + **errmsg) + +Resets the iterator to its initial state, with new base data pointers. +This function requires great caution. + +If errmsg is non-NULL, it should point to a variable which will +receive the error message, and no Python exception will be set. +This is so that the function can be called from code not holding +the GIL. + +:: + + int + NpyIter_ResetToIterIndexRange(NpyIter *iter, npy_intp istart, npy_intp + iend, char **errmsg) + +Resets the iterator to a new iterator index range + +If errmsg is non-NULL, it should point to a variable which will +receive the error message, and no Python exception will be set. +This is so that the function can be called from code not holding +the GIL. + +:: + + int + NpyIter_GetNDim(NpyIter *iter) + +Gets the number of dimensions being iterated + +:: + + int + NpyIter_GetNOp(NpyIter *iter) + +Gets the number of operands being iterated + +:: + + NpyIter_IterNextFunc * + NpyIter_GetIterNext(NpyIter *iter, char **errmsg) + +Compute the specialized iteration function for an iterator + +If errmsg is non-NULL, it should point to a variable which will +receive the error message, and no Python exception will be set. +This is so that the function can be called from code not holding +the GIL. + +:: + + npy_intp + NpyIter_GetIterSize(NpyIter *iter) + +Gets the number of elements being iterated + +:: + + void + NpyIter_GetIterIndexRange(NpyIter *iter, npy_intp *istart, npy_intp + *iend) + +Gets the range of iteration indices being iterated + +:: + + npy_intp + NpyIter_GetIterIndex(NpyIter *iter) + +Gets the current iteration index + +:: + + int + NpyIter_GotoIterIndex(NpyIter *iter, npy_intp iterindex) + +Sets the iterator position to the specified iterindex, +which matches the iteration order of the iterator. + +Returns NPY_SUCCEED on success, NPY_FAIL on failure. + +:: + + npy_bool + NpyIter_HasMultiIndex(NpyIter *iter) + +Whether the iterator is tracking a multi-index + +:: + + int + NpyIter_GetShape(NpyIter *iter, npy_intp *outshape) + +Gets the broadcast shape if a multi-index is being tracked by the iterator, +otherwise gets the shape of the iteration as Fortran-order +(fastest-changing index first). + +The reason Fortran-order is returned when a multi-index +is not enabled is that this is providing a direct view into how +the iterator traverses the n-dimensional space. The iterator organizes +its memory from fastest index to slowest index, and when +a multi-index is enabled, it uses a permutation to recover the original +order. + +Returns NPY_SUCCEED or NPY_FAIL. + +:: + + NpyIter_GetMultiIndexFunc * + NpyIter_GetGetMultiIndex(NpyIter *iter, char **errmsg) + +Compute a specialized get_multi_index function for the iterator + +If errmsg is non-NULL, it should point to a variable which will +receive the error message, and no Python exception will be set. +This is so that the function can be called from code not holding +the GIL. + +:: + + int + NpyIter_GotoMultiIndex(NpyIter *iter, npy_intp *multi_index) + +Sets the iterator to the specified multi-index, which must have the +correct number of entries for 'ndim'. It is only valid +when NPY_ITER_MULTI_INDEX was passed to the constructor. This operation +fails if the multi-index is out of bounds. + +Returns NPY_SUCCEED on success, NPY_FAIL on failure. + +:: + + int + NpyIter_RemoveMultiIndex(NpyIter *iter) + +Removes multi-index support from an iterator. + +Returns NPY_SUCCEED or NPY_FAIL. + +:: + + npy_bool + NpyIter_HasIndex(NpyIter *iter) + +Whether the iterator is tracking an index + +:: + + npy_bool + NpyIter_IsBuffered(NpyIter *iter) + +Whether the iterator is buffered + +:: + + npy_bool + NpyIter_IsGrowInner(NpyIter *iter) + +Whether the inner loop can grow if buffering is unneeded + +:: + + npy_intp + NpyIter_GetBufferSize(NpyIter *iter) + +Gets the size of the buffer, or 0 if buffering is not enabled + +:: + + npy_intp * + NpyIter_GetIndexPtr(NpyIter *iter) + +Get a pointer to the index, if it is being tracked + +:: + + int + NpyIter_GotoIndex(NpyIter *iter, npy_intp flat_index) + +If the iterator is tracking an index, sets the iterator +to the specified index. + +Returns NPY_SUCCEED on success, NPY_FAIL on failure. + +:: + + char ** + NpyIter_GetDataPtrArray(NpyIter *iter) + +Get the array of data pointers (1 per object being iterated) + +This function may be safely called without holding the Python GIL. + +:: + + PyArray_Descr ** + NpyIter_GetDescrArray(NpyIter *iter) + +Get the array of data type pointers (1 per object being iterated) + +:: + + PyArrayObject ** + NpyIter_GetOperandArray(NpyIter *iter) + +Get the array of objects being iterated + +:: + + PyArrayObject * + NpyIter_GetIterView(NpyIter *iter, npy_intp i) + +Returns a view to the i-th object with the iterator's internal axes + +:: + + void + NpyIter_GetReadFlags(NpyIter *iter, char *outreadflags) + +Gets an array of read flags (1 per object being iterated) + +:: + + void + NpyIter_GetWriteFlags(NpyIter *iter, char *outwriteflags) + +Gets an array of write flags (1 per object being iterated) + +:: + + void + NpyIter_DebugPrint(NpyIter *iter) + +For debugging + +:: + + npy_bool + NpyIter_IterationNeedsAPI(NpyIter *iter) + +Whether the iteration loop, and in particular the iternext() +function, needs API access. If this is true, the GIL must +be retained while iterating. + +:: + + void + NpyIter_GetInnerFixedStrideArray(NpyIter *iter, npy_intp *out_strides) + +Get an array of strides which are fixed. Any strides which may +change during iteration receive the value NPY_MAX_INTP. Once +the iterator is ready to iterate, call this to get the strides +which will always be fixed in the inner loop, then choose optimized +inner loop functions which take advantage of those fixed strides. + +This function may be safely called without holding the Python GIL. + +:: + + int + NpyIter_RemoveAxis(NpyIter *iter, int axis) + +Removes an axis from iteration. This requires that NPY_ITER_MULTI_INDEX +was set for iterator creation, and does not work if buffering is +enabled. This function also resets the iterator to its initial state. + +Returns NPY_SUCCEED or NPY_FAIL. + +:: + + npy_intp * + NpyIter_GetAxisStrideArray(NpyIter *iter, int axis) + +Gets the array of strides for the specified axis. +If the iterator is tracking a multi-index, gets the strides +for the axis specified, otherwise gets the strides for +the iteration axis as Fortran order (fastest-changing axis first). + +Returns NULL if an error occurs. + +:: + + npy_bool + NpyIter_RequiresBuffering(NpyIter *iter) + +Whether the iteration could be done with no buffering. + +:: + + char ** + NpyIter_GetInitialDataPtrArray(NpyIter *iter) + +Get the array of data pointers (1 per object being iterated), +directly into the arrays (never pointing to a buffer), for starting +unbuffered iteration. This always returns the addresses for the +iterator position as reset to iterator index 0. + +These pointers are different from the pointers accepted by +NpyIter_ResetBasePointers, because the direction along some +axes may have been reversed, requiring base offsets. + +This function may be safely called without holding the Python GIL. + +:: + + int + NpyIter_CreateCompatibleStrides(NpyIter *iter, npy_intp + itemsize, npy_intp *outstrides) + +Builds a set of strides which are the same as the strides of an +output array created using the NPY_ITER_ALLOCATE flag, where NULL +was passed for op_axes. This is for data packed contiguously, +but not necessarily in C or Fortran order. This should be used +together with NpyIter_GetShape and NpyIter_GetNDim. + +A use case for this function is to match the shape and layout of +the iterator and tack on one or more dimensions. For example, +in order to generate a vector per input value for a numerical gradient, +you pass in ndim*itemsize for itemsize, then add another dimension to +the end with size ndim and stride itemsize. To do the Hessian matrix, +you do the same thing but add two dimensions, or take advantage of +the symmetry and pack it into 1 dimension with a particular encoding. + +This function may only be called if the iterator is tracking a multi-index +and if NPY_ITER_DONT_NEGATE_STRIDES was used to prevent an axis from +being iterated in reverse order. + +If an array is created with this method, simply adding 'itemsize' +for each iteration will traverse the new array matching the +iterator. + +Returns NPY_SUCCEED or NPY_FAIL. + +:: + + int + PyArray_CastingConverter(PyObject *obj, NPY_CASTING *casting) + +Convert any Python object, *obj*, to an NPY_CASTING enum. + +:: + + npy_intp + PyArray_CountNonzero(PyArrayObject *self) + +Counts the number of non-zero elements in the array. + +Returns -1 on error. + +:: + + PyArray_Descr * + PyArray_PromoteTypes(PyArray_Descr *type1, PyArray_Descr *type2) + +Produces the smallest size and lowest kind type to which both +input types can be cast. + +:: + + PyArray_Descr * + PyArray_MinScalarType(PyArrayObject *arr) + +If arr is a scalar (has 0 dimensions) with a built-in number data type, +finds the smallest type size/kind which can still represent its data. +Otherwise, returns the array's data type. + + +:: + + PyArray_Descr * + PyArray_ResultType(npy_intp narrs, PyArrayObject **arr, npy_intp + ndtypes, PyArray_Descr **dtypes) + +Produces the result type of a bunch of inputs, using the UFunc +type promotion rules. Use this function when you have a set of +input arrays, and need to determine an output array dtype. + +If all the inputs are scalars (have 0 dimensions) or the maximum "kind" +of the scalars is greater than the maximum "kind" of the arrays, does +a regular type promotion. + +Otherwise, does a type promotion on the MinScalarType +of all the inputs. Data types passed directly are treated as array +types. + + +:: + + npy_bool + PyArray_CanCastArrayTo(PyArrayObject *arr, PyArray_Descr + *to, NPY_CASTING casting) + +Returns 1 if the array object may be cast to the given data type using +the casting rule, 0 otherwise. This differs from PyArray_CanCastTo in +that it handles scalar arrays (0 dimensions) specially, by checking +their value. + +:: + + npy_bool + PyArray_CanCastTypeTo(PyArray_Descr *from, PyArray_Descr + *to, NPY_CASTING casting) + +Returns true if data of type 'from' may be cast to data of type +'to' according to the rule 'casting'. + +:: + + PyArrayObject * + PyArray_EinsteinSum(char *subscripts, npy_intp nop, PyArrayObject + **op_in, PyArray_Descr *dtype, NPY_ORDER + order, NPY_CASTING casting, PyArrayObject *out) + +This function provides summation of array elements according to +the Einstein summation convention. For example: +- trace(a) -> einsum("ii", a) +- transpose(a) -> einsum("ji", a) +- multiply(a,b) -> einsum(",", a, b) +- inner(a,b) -> einsum("i,i", a, b) +- outer(a,b) -> einsum("i,j", a, b) +- matvec(a,b) -> einsum("ij,j", a, b) +- matmat(a,b) -> einsum("ij,jk", a, b) + +subscripts: The string of subscripts for einstein summation. +nop: The number of operands +op_in: The array of operands +dtype: Either NULL, or the data type to force the calculation as. +order: The order for the calculation/the output axes. +casting: What kind of casts should be permitted. +out: Either NULL, or an array into which the output should be placed. + +By default, the labels get placed in alphabetical order +at the end of the output. So, if c = einsum("i,j", a, b) +then c[i,j] == a[i]*b[j], but if c = einsum("j,i", a, b) +then c[i,j] = a[j]*b[i]. + +Alternatively, you can control the output order or prevent +an axis from being summed/force an axis to be summed by providing +indices for the output. This allows us to turn 'trace' into +'diag', for example. +- diag(a) -> einsum("ii->i", a) +- sum(a, axis=0) -> einsum("i...->", a) + +Subscripts at the beginning and end may be specified by +putting an ellipsis "..." in the middle. For example, +the function einsum("i...i", a) takes the diagonal of +the first and last dimensions of the operand, and +einsum("ij...,jk...->ik...") takes the matrix product using +the first two indices of each operand instead of the last two. + +When there is only one operand, no axes being summed, and +no output parameter, this function returns a view +into the operand instead of making a copy. + +:: + + PyObject * + PyArray_NewLikeArray(PyArrayObject *prototype, NPY_ORDER + order, PyArray_Descr *dtype, int subok) + +Creates a new array with the same shape as the provided one, +with possible memory layout order and data type changes. + +prototype - The array the new one should be like. +order - NPY_CORDER - C-contiguous result. +NPY_FORTRANORDER - Fortran-contiguous result. +NPY_ANYORDER - Fortran if prototype is Fortran, C otherwise. +NPY_KEEPORDER - Keeps the axis ordering of prototype. +dtype - If not NULL, overrides the data type of the result. +subok - If 1, use the prototype's array subtype, otherwise +always create a base-class array. + +NOTE: If dtype is not NULL, steals the dtype reference. On failure or when +dtype->subarray is true, dtype will be decrefed. + +:: + + int + PyArray_GetArrayParamsFromObject(PyObject *op, PyArray_Descr + *requested_dtype, npy_bool + writeable, PyArray_Descr + **out_dtype, int *out_ndim, npy_intp + *out_dims, PyArrayObject + **out_arr, PyObject *context) + +Retrieves the array parameters for viewing/converting an arbitrary +PyObject* to a NumPy array. This allows the "innate type and shape" +of Python list-of-lists to be discovered without +actually converting to an array. + +In some cases, such as structured arrays and the __array__ interface, +a data type needs to be used to make sense of the object. When +this is needed, provide a Descr for 'requested_dtype', otherwise +provide NULL. This reference is not stolen. Also, if the requested +dtype doesn't modify the interpretation of the input, out_dtype will +still get the "innate" dtype of the object, not the dtype passed +in 'requested_dtype'. + +If writing to the value in 'op' is desired, set the boolean +'writeable' to 1. This raises an error when 'op' is a scalar, list +of lists, or other non-writeable 'op'. + +Result: When success (0 return value) is returned, either out_arr +is filled with a non-NULL PyArrayObject and +the rest of the parameters are untouched, or out_arr is +filled with NULL, and the rest of the parameters are +filled. + +Typical usage: + +PyArrayObject *arr = NULL; +PyArray_Descr *dtype = NULL; +int ndim = 0; +npy_intp dims[NPY_MAXDIMS]; + +if (PyArray_GetArrayParamsFromObject(op, NULL, 1, &dtype, +&ndim, dims, &arr, NULL) < 0) { +return NULL; +} +if (arr == NULL) { +... validate/change dtype, validate flags, ndim, etc ... +// Could make custom strides here too +arr = PyArray_NewFromDescr(&PyArray_Type, dtype, ndim, +dims, NULL, +is_f_order ? NPY_ARRAY_F_CONTIGUOUS : 0, +NULL); +if (arr == NULL) { +return NULL; +} +if (PyArray_CopyObject(arr, op) < 0) { +Py_DECREF(arr); +return NULL; +} +} +else { +... in this case the other parameters weren't filled, just +validate and possibly copy arr itself ... +} +... use arr ... + +:: + + int + PyArray_ConvertClipmodeSequence(PyObject *object, NPY_CLIPMODE + *modes, int n) + +Convert an object to an array of n NPY_CLIPMODE values. +This is intended to be used in functions where a different mode +could be applied to each axis, like in ravel_multi_index. + +:: + + PyObject * + PyArray_MatrixProduct2(PyObject *op1, PyObject + *op2, PyArrayObject*out) + +Numeric.matrixproduct2(a,v,out) +just like inner product but does the swapaxes stuff on the fly + +:: + + npy_bool + NpyIter_IsFirstVisit(NpyIter *iter, int iop) + +Checks to see whether this is the first time the elements +of the specified reduction operand which the iterator points at are +being seen for the first time. The function returns +a reasonable answer for reduction operands and when buffering is +disabled. The answer may be incorrect for buffered non-reduction +operands. + +This function is intended to be used in EXTERNAL_LOOP mode only, +and will produce some wrong answers when that mode is not enabled. + +If this function returns true, the caller should also +check the inner loop stride of the operand, because if +that stride is 0, then only the first element of the innermost +external loop is being visited for the first time. + +WARNING: For performance reasons, 'iop' is not bounds-checked, +it is not confirmed that 'iop' is actually a reduction +operand, and it is not confirmed that EXTERNAL_LOOP +mode is enabled. These checks are the responsibility of +the caller, and should be done outside of any inner loops. + +:: + + int + PyArray_SetBaseObject(PyArrayObject *arr, PyObject *obj) + +Sets the 'base' attribute of the array. This steals a reference +to 'obj'. + +Returns 0 on success, -1 on failure. + +:: + + void + PyArray_CreateSortedStridePerm(int ndim, npy_intp + *strides, npy_stride_sort_item + *out_strideperm) + + +This function populates the first ndim elements +of strideperm with sorted descending by their absolute values. +For example, the stride array (4, -2, 12) becomes +[(2, 12), (0, 4), (1, -2)]. + +:: + + void + PyArray_RemoveAxesInPlace(PyArrayObject *arr, npy_bool *flags) + + +Removes the axes flagged as True from the array, +modifying it in place. If an axis flagged for removal +has a shape entry bigger than one, this effectively selects +index zero for that axis. + +WARNING: If an axis flagged for removal has a shape equal to zero, +the array will point to invalid memory. The caller must +validate this! +If an axis flagged for removal has a shape larger than one, +the aligned flag (and in the future the contiguous flags), +may need explicit update. +(check also NPY_RELAXED_STRIDES_CHECKING) + +For example, this can be used to remove the reduction axes +from a reduction result once its computation is complete. + +:: + + void + PyArray_DebugPrint(PyArrayObject *obj) + +Prints the raw data of the ndarray in a form useful for debugging +low-level C issues. + +:: + + int + PyArray_FailUnlessWriteable(PyArrayObject *obj, const char *name) + + +This function does nothing if obj is writeable, and raises an exception +(and returns -1) if obj is not writeable. It may also do other +house-keeping, such as issuing warnings on arrays which are transitioning +to become views. Always call this function at some point before writing to +an array. + +'name' is a name for the array, used to give better error +messages. Something like "assignment destination", "output array", or even +just "array". + +:: + + int + PyArray_SetUpdateIfCopyBase(PyArrayObject *arr, PyArrayObject *base) + + +Precondition: 'arr' is a copy of 'base' (though possibly with different +strides, ordering, etc.). This function sets the UPDATEIFCOPY flag and the +->base pointer on 'arr', so that when 'arr' is destructed, it will copy any +changes back to 'base'. DEPRECATED, use PyArray_SetWritebackIfCopyBase + +Steals a reference to 'base'. + +Returns 0 on success, -1 on failure. + +:: + + void * + PyDataMem_NEW(size_t size) + +Allocates memory for array data. + +:: + + void + PyDataMem_FREE(void *ptr) + +Free memory for array data. + +:: + + void * + PyDataMem_RENEW(void *ptr, size_t size) + +Reallocate/resize memory for array data. + +:: + + PyDataMem_EventHookFunc * + PyDataMem_SetEventHook(PyDataMem_EventHookFunc *newhook, void + *user_data, void **old_data) + +Sets the allocation event hook for numpy array data. +Takes a PyDataMem_EventHookFunc *, which has the signature: +void hook(void *old, void *new, size_t size, void *user_data). +Also takes a void *user_data, and void **old_data. + +Returns a pointer to the previous hook or NULL. If old_data is +non-NULL, the previous user_data pointer will be copied to it. + +If not NULL, hook will be called at the end of each PyDataMem_NEW/FREE/RENEW: +result = PyDataMem_NEW(size) -> (*hook)(NULL, result, size, user_data) +PyDataMem_FREE(ptr) -> (*hook)(ptr, NULL, 0, user_data) +result = PyDataMem_RENEW(ptr, size) -> (*hook)(ptr, result, size, user_data) + +When the hook is called, the GIL will be held by the calling +thread. The hook should be written to be reentrant, if it performs +operations that might cause new allocation events (such as the +creation/destruction numpy objects, or creating/destroying Python +objects which might cause a gc) + +:: + + void + PyArray_MapIterSwapAxes(PyArrayMapIterObject *mit, PyArrayObject + **ret, int getmap) + + +:: + + PyObject * + PyArray_MapIterArray(PyArrayObject *a, PyObject *index) + + +Use advanced indexing to iterate an array. + +:: + + void + PyArray_MapIterNext(PyArrayMapIterObject *mit) + +This function needs to update the state of the map iterator +and point mit->dataptr to the memory-location of the next object + +Note that this function never handles an extra operand but provides +compatibility for an old (exposed) API. + +:: + + int + PyArray_Partition(PyArrayObject *op, PyArrayObject *ktharray, int + axis, NPY_SELECTKIND which) + +Partition an array in-place + +:: + + PyObject * + PyArray_ArgPartition(PyArrayObject *op, PyArrayObject *ktharray, int + axis, NPY_SELECTKIND which) + +ArgPartition an array + +:: + + int + PyArray_SelectkindConverter(PyObject *obj, NPY_SELECTKIND *selectkind) + +Convert object to select kind + +:: + + void * + PyDataMem_NEW_ZEROED(size_t size, size_t elsize) + +Allocates zeroed memory for array data. + +:: + + int + PyArray_CheckAnyScalarExact(PyObject *obj) + +return true an object is exactly a numpy scalar + +:: + + PyObject * + PyArray_MapIterArrayCopyIfOverlap(PyArrayObject *a, PyObject + *index, int + copy_if_overlap, PyArrayObject + *extra_op) + + +Same as PyArray_MapIterArray, but: + +If copy_if_overlap != 0, check if `a` has memory overlap with any of the +arrays in `index` and with `extra_op`. If yes, make copies as appropriate +to avoid problems if `a` is modified during the iteration. +`iter->array` may contain a copied array (UPDATEIFCOPY/WRITEBACKIFCOPY set). + +:: + + int + PyArray_ResolveWritebackIfCopy(PyArrayObject *self) + + +If WRITEBACKIFCOPY and self has data, reset the base WRITEABLE flag, +copy the local data to base, release the local data, and set flags +appropriately. Return 0 if not relevant, 1 if success, < 0 on failure + +:: + + int + PyArray_SetWritebackIfCopyBase(PyArrayObject *arr, PyArrayObject + *base) + + +Precondition: 'arr' is a copy of 'base' (though possibly with different +strides, ordering, etc.). This function sets the WRITEBACKIFCOPY flag and the +->base pointer on 'arr', call PyArray_ResolveWritebackIfCopy to copy any +changes back to 'base' before deallocating the array. + +Steals a reference to 'base'. + +Returns 0 on success, -1 on failure. + diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/ndarrayobject.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/ndarrayobject.h new file mode 100644 index 0000000..45f008b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/ndarrayobject.h @@ -0,0 +1,285 @@ +/* + * DON'T INCLUDE THIS DIRECTLY. + */ + +#ifndef NPY_NDARRAYOBJECT_H +#define NPY_NDARRAYOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include "ndarraytypes.h" + +/* Includes the "function" C-API -- these are all stored in a + list of pointers --- one for each file + The two lists are concatenated into one in multiarray. + + They are available as import_array() +*/ + +#include "__multiarray_api.h" + + +/* C-API that requires previous API to be defined */ + +#define PyArray_DescrCheck(op) (((PyObject*)(op))->ob_type==&PyArrayDescr_Type) + +#define PyArray_Check(op) PyObject_TypeCheck(op, &PyArray_Type) +#define PyArray_CheckExact(op) (((PyObject*)(op))->ob_type == &PyArray_Type) + +#define PyArray_HasArrayInterfaceType(op, type, context, out) \ + ((((out)=PyArray_FromStructInterface(op)) != Py_NotImplemented) || \ + (((out)=PyArray_FromInterface(op)) != Py_NotImplemented) || \ + (((out)=PyArray_FromArrayAttr(op, type, context)) != \ + Py_NotImplemented)) + +#define PyArray_HasArrayInterface(op, out) \ + PyArray_HasArrayInterfaceType(op, NULL, NULL, out) + +#define PyArray_IsZeroDim(op) (PyArray_Check(op) && \ + (PyArray_NDIM((PyArrayObject *)op) == 0)) + +#define PyArray_IsScalar(obj, cls) \ + (PyObject_TypeCheck(obj, &Py##cls##ArrType_Type)) + +#define PyArray_CheckScalar(m) (PyArray_IsScalar(m, Generic) || \ + PyArray_IsZeroDim(m)) +#if PY_MAJOR_VERSION >= 3 +#define PyArray_IsPythonNumber(obj) \ + (PyFloat_Check(obj) || PyComplex_Check(obj) || \ + PyLong_Check(obj) || PyBool_Check(obj)) +#define PyArray_IsIntegerScalar(obj) (PyLong_Check(obj) \ + || PyArray_IsScalar((obj), Integer)) +#define PyArray_IsPythonScalar(obj) \ + (PyArray_IsPythonNumber(obj) || PyBytes_Check(obj) || \ + PyUnicode_Check(obj)) +#else +#define PyArray_IsPythonNumber(obj) \ + (PyInt_Check(obj) || PyFloat_Check(obj) || PyComplex_Check(obj) || \ + PyLong_Check(obj) || PyBool_Check(obj)) +#define PyArray_IsIntegerScalar(obj) (PyInt_Check(obj) \ + || PyLong_Check(obj) \ + || PyArray_IsScalar((obj), Integer)) +#define PyArray_IsPythonScalar(obj) \ + (PyArray_IsPythonNumber(obj) || PyString_Check(obj) || \ + PyUnicode_Check(obj)) +#endif + +#define PyArray_IsAnyScalar(obj) \ + (PyArray_IsScalar(obj, Generic) || PyArray_IsPythonScalar(obj)) + +#define PyArray_CheckAnyScalar(obj) (PyArray_IsPythonScalar(obj) || \ + PyArray_CheckScalar(obj)) + + +#define PyArray_GETCONTIGUOUS(m) (PyArray_ISCONTIGUOUS(m) ? \ + Py_INCREF(m), (m) : \ + (PyArrayObject *)(PyArray_Copy(m))) + +#define PyArray_SAMESHAPE(a1,a2) ((PyArray_NDIM(a1) == PyArray_NDIM(a2)) && \ + PyArray_CompareLists(PyArray_DIMS(a1), \ + PyArray_DIMS(a2), \ + PyArray_NDIM(a1))) + +#define PyArray_SIZE(m) PyArray_MultiplyList(PyArray_DIMS(m), PyArray_NDIM(m)) +#define PyArray_NBYTES(m) (PyArray_ITEMSIZE(m) * PyArray_SIZE(m)) +#define PyArray_FROM_O(m) PyArray_FromAny(m, NULL, 0, 0, 0, NULL) + +#define PyArray_FROM_OF(m,flags) PyArray_CheckFromAny(m, NULL, 0, 0, flags, \ + NULL) + +#define PyArray_FROM_OT(m,type) PyArray_FromAny(m, \ + PyArray_DescrFromType(type), 0, 0, 0, NULL) + +#define PyArray_FROM_OTF(m, type, flags) \ + PyArray_FromAny(m, PyArray_DescrFromType(type), 0, 0, \ + (((flags) & NPY_ARRAY_ENSURECOPY) ? \ + ((flags) | NPY_ARRAY_DEFAULT) : (flags)), NULL) + +#define PyArray_FROMANY(m, type, min, max, flags) \ + PyArray_FromAny(m, PyArray_DescrFromType(type), min, max, \ + (((flags) & NPY_ARRAY_ENSURECOPY) ? \ + (flags) | NPY_ARRAY_DEFAULT : (flags)), NULL) + +#define PyArray_ZEROS(m, dims, type, is_f_order) \ + PyArray_Zeros(m, dims, PyArray_DescrFromType(type), is_f_order) + +#define PyArray_EMPTY(m, dims, type, is_f_order) \ + PyArray_Empty(m, dims, PyArray_DescrFromType(type), is_f_order) + +#define PyArray_FILLWBYTE(obj, val) memset(PyArray_DATA(obj), val, \ + PyArray_NBYTES(obj)) +#ifndef PYPY_VERSION +#define PyArray_REFCOUNT(obj) (((PyObject *)(obj))->ob_refcnt) +#define NPY_REFCOUNT PyArray_REFCOUNT +#endif +#define NPY_MAX_ELSIZE (2 * NPY_SIZEOF_LONGDOUBLE) + +#define PyArray_ContiguousFromAny(op, type, min_depth, max_depth) \ + PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ + max_depth, NPY_ARRAY_DEFAULT, NULL) + +#define PyArray_EquivArrTypes(a1, a2) \ + PyArray_EquivTypes(PyArray_DESCR(a1), PyArray_DESCR(a2)) + +#define PyArray_EquivByteorders(b1, b2) \ + (((b1) == (b2)) || (PyArray_ISNBO(b1) == PyArray_ISNBO(b2))) + +#define PyArray_SimpleNew(nd, dims, typenum) \ + PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, NULL, 0, 0, NULL) + +#define PyArray_SimpleNewFromData(nd, dims, typenum, data) \ + PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, \ + data, 0, NPY_ARRAY_CARRAY, NULL) + +#define PyArray_SimpleNewFromDescr(nd, dims, descr) \ + PyArray_NewFromDescr(&PyArray_Type, descr, nd, dims, \ + NULL, NULL, 0, NULL) + +#define PyArray_ToScalar(data, arr) \ + PyArray_Scalar(data, PyArray_DESCR(arr), (PyObject *)arr) + + +/* These might be faster without the dereferencing of obj + going on inside -- of course an optimizing compiler should + inline the constants inside a for loop making it a moot point +*/ + +#define PyArray_GETPTR1(obj, i) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDES(obj)[0])) + +#define PyArray_GETPTR2(obj, i, j) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDES(obj)[0] + \ + (j)*PyArray_STRIDES(obj)[1])) + +#define PyArray_GETPTR3(obj, i, j, k) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDES(obj)[0] + \ + (j)*PyArray_STRIDES(obj)[1] + \ + (k)*PyArray_STRIDES(obj)[2])) + +#define PyArray_GETPTR4(obj, i, j, k, l) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDES(obj)[0] + \ + (j)*PyArray_STRIDES(obj)[1] + \ + (k)*PyArray_STRIDES(obj)[2] + \ + (l)*PyArray_STRIDES(obj)[3])) + +/* Move to arrayobject.c once PyArray_XDECREF_ERR is removed */ +static NPY_INLINE void +PyArray_DiscardWritebackIfCopy(PyArrayObject *arr) +{ + PyArrayObject_fields *fa = (PyArrayObject_fields *)arr; + if (fa && fa->base) { + if ((fa->flags & NPY_ARRAY_UPDATEIFCOPY) || + (fa->flags & NPY_ARRAY_WRITEBACKIFCOPY)) { + PyArray_ENABLEFLAGS((PyArrayObject*)fa->base, NPY_ARRAY_WRITEABLE); + Py_DECREF(fa->base); + fa->base = NULL; + PyArray_CLEARFLAGS(arr, NPY_ARRAY_WRITEBACKIFCOPY); + PyArray_CLEARFLAGS(arr, NPY_ARRAY_UPDATEIFCOPY); + } + } +} + +#define PyArray_DESCR_REPLACE(descr) do { \ + PyArray_Descr *_new_; \ + _new_ = PyArray_DescrNew(descr); \ + Py_XDECREF(descr); \ + descr = _new_; \ + } while(0) + +/* Copy should always return contiguous array */ +#define PyArray_Copy(obj) PyArray_NewCopy(obj, NPY_CORDER) + +#define PyArray_FromObject(op, type, min_depth, max_depth) \ + PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ + max_depth, NPY_ARRAY_BEHAVED | \ + NPY_ARRAY_ENSUREARRAY, NULL) + +#define PyArray_ContiguousFromObject(op, type, min_depth, max_depth) \ + PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ + max_depth, NPY_ARRAY_DEFAULT | \ + NPY_ARRAY_ENSUREARRAY, NULL) + +#define PyArray_CopyFromObject(op, type, min_depth, max_depth) \ + PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ + max_depth, NPY_ARRAY_ENSURECOPY | \ + NPY_ARRAY_DEFAULT | \ + NPY_ARRAY_ENSUREARRAY, NULL) + +#define PyArray_Cast(mp, type_num) \ + PyArray_CastToType(mp, PyArray_DescrFromType(type_num), 0) + +#define PyArray_Take(ap, items, axis) \ + PyArray_TakeFrom(ap, items, axis, NULL, NPY_RAISE) + +#define PyArray_Put(ap, items, values) \ + PyArray_PutTo(ap, items, values, NPY_RAISE) + +/* Compatibility with old Numeric stuff -- don't use in new code */ + +#define PyArray_FromDimsAndData(nd, d, type, data) \ + PyArray_FromDimsAndDataAndDescr(nd, d, PyArray_DescrFromType(type), \ + data) + + +/* + Check to see if this key in the dictionary is the "title" + entry of the tuple (i.e. a duplicate dictionary entry in the fields + dict. +*/ + +static NPY_INLINE int +NPY_TITLE_KEY_check(PyObject *key, PyObject *value) +{ + PyObject *title; + if (PyTuple_GET_SIZE(value) != 3) { + return 0; + } + title = PyTuple_GET_ITEM(value, 2); + if (key == title) { + return 1; + } +#ifdef PYPY_VERSION + /* + * On PyPy, dictionary keys do not always preserve object identity. + * Fall back to comparison by value. + */ + if (PyUnicode_Check(title) && PyUnicode_Check(key)) { + return PyUnicode_Compare(title, key) == 0 ? 1 : 0; + } +#if PY_VERSION_HEX < 0x03000000 + if (PyString_Check(title) && PyString_Check(key)) { + return PyObject_Compare(title, key) == 0 ? 1 : 0; + } +#endif +#endif + return 0; +} + +/* Macro, for backward compat with "if NPY_TITLE_KEY(key, value) { ..." */ +#define NPY_TITLE_KEY(key, value) (NPY_TITLE_KEY_check((key), (value))) + +#define DEPRECATE(msg) PyErr_WarnEx(PyExc_DeprecationWarning,msg,1) +#define DEPRECATE_FUTUREWARNING(msg) PyErr_WarnEx(PyExc_FutureWarning,msg,1) + +#if !defined(NPY_NO_DEPRECATED_API) || \ + (NPY_NO_DEPRECATED_API < NPY_1_14_API_VERSION) +static NPY_INLINE void +PyArray_XDECREF_ERR(PyArrayObject *arr) +{ + /* 2017-Nov-10 1.14 */ + DEPRECATE("PyArray_XDECREF_ERR is deprecated, call " + "PyArray_DiscardWritebackIfCopy then Py_XDECREF instead"); + PyArray_DiscardWritebackIfCopy(arr); + Py_XDECREF(arr); +} +#endif + + +#ifdef __cplusplus +} +#endif + + +#endif /* NPY_NDARRAYOBJECT_H */ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/ndarraytypes.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/ndarraytypes.h new file mode 100644 index 0000000..b0b749c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/ndarraytypes.h @@ -0,0 +1,1838 @@ +#ifndef NDARRAYTYPES_H +#define NDARRAYTYPES_H + +#include "npy_common.h" +#include "npy_endian.h" +#include "npy_cpu.h" +#include "utils.h" + +#define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN + +/* Only use thread if configured in config and python supports it */ +#if defined WITH_THREAD && !NPY_NO_SMP + #define NPY_ALLOW_THREADS 1 +#else + #define NPY_ALLOW_THREADS 0 +#endif + +#ifndef __has_extension +#define __has_extension(x) 0 +#endif + +#if !defined(_NPY_NO_DEPRECATIONS) && \ + ((defined(__GNUC__)&& __GNUC__ >= 6) || \ + __has_extension(attribute_deprecated_with_message)) +#define NPY_ATTR_DEPRECATE(text) __attribute__ ((deprecated (text))) +#else +#define NPY_ATTR_DEPRECATE(text) +#endif + +/* + * There are several places in the code where an array of dimensions + * is allocated statically. This is the size of that static + * allocation. + * + * The array creation itself could have arbitrary dimensions but all + * the places where static allocation is used would need to be changed + * to dynamic (including inside of several structures) + */ + +#define NPY_MAXDIMS 32 +#define NPY_MAXARGS 32 + +/* Used for Converter Functions "O&" code in ParseTuple */ +#define NPY_FAIL 0 +#define NPY_SUCCEED 1 + +/* + * Binary compatibility version number. This number is increased + * whenever the C-API is changed such that binary compatibility is + * broken, i.e. whenever a recompile of extension modules is needed. + */ +#define NPY_VERSION NPY_ABI_VERSION + +/* + * Minor API version. This number is increased whenever a change is + * made to the C-API -- whether it breaks binary compatibility or not. + * Some changes, such as adding a function pointer to the end of the + * function table, can be made without breaking binary compatibility. + * In this case, only the NPY_FEATURE_VERSION (*not* NPY_VERSION) + * would be increased. Whenever binary compatibility is broken, both + * NPY_VERSION and NPY_FEATURE_VERSION should be increased. + */ +#define NPY_FEATURE_VERSION NPY_API_VERSION + +enum NPY_TYPES { NPY_BOOL=0, + NPY_BYTE, NPY_UBYTE, + NPY_SHORT, NPY_USHORT, + NPY_INT, NPY_UINT, + NPY_LONG, NPY_ULONG, + NPY_LONGLONG, NPY_ULONGLONG, + NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE, + NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE, + NPY_OBJECT=17, + NPY_STRING, NPY_UNICODE, + NPY_VOID, + /* + * New 1.6 types appended, may be integrated + * into the above in 2.0. + */ + NPY_DATETIME, NPY_TIMEDELTA, NPY_HALF, + + NPY_NTYPES, + NPY_NOTYPE, + NPY_CHAR NPY_ATTR_DEPRECATE("Use NPY_STRING"), + NPY_USERDEF=256, /* leave room for characters */ + + /* The number of types not including the new 1.6 types */ + NPY_NTYPES_ABI_COMPATIBLE=21 +}; +#ifdef _MSC_VER +#pragma deprecated(NPY_CHAR) +#endif + +/* basetype array priority */ +#define NPY_PRIORITY 0.0 + +/* default subtype priority */ +#define NPY_SUBTYPE_PRIORITY 1.0 + +/* default scalar priority */ +#define NPY_SCALAR_PRIORITY -1000000.0 + +/* How many floating point types are there (excluding half) */ +#define NPY_NUM_FLOATTYPE 3 + +/* + * These characters correspond to the array type and the struct + * module + */ + +enum NPY_TYPECHAR { + NPY_BOOLLTR = '?', + NPY_BYTELTR = 'b', + NPY_UBYTELTR = 'B', + NPY_SHORTLTR = 'h', + NPY_USHORTLTR = 'H', + NPY_INTLTR = 'i', + NPY_UINTLTR = 'I', + NPY_LONGLTR = 'l', + NPY_ULONGLTR = 'L', + NPY_LONGLONGLTR = 'q', + NPY_ULONGLONGLTR = 'Q', + NPY_HALFLTR = 'e', + NPY_FLOATLTR = 'f', + NPY_DOUBLELTR = 'd', + NPY_LONGDOUBLELTR = 'g', + NPY_CFLOATLTR = 'F', + NPY_CDOUBLELTR = 'D', + NPY_CLONGDOUBLELTR = 'G', + NPY_OBJECTLTR = 'O', + NPY_STRINGLTR = 'S', + NPY_STRINGLTR2 = 'a', + NPY_UNICODELTR = 'U', + NPY_VOIDLTR = 'V', + NPY_DATETIMELTR = 'M', + NPY_TIMEDELTALTR = 'm', + NPY_CHARLTR = 'c', + + /* + * No Descriptor, just a define -- this let's + * Python users specify an array of integers + * large enough to hold a pointer on the + * platform + */ + NPY_INTPLTR = 'p', + NPY_UINTPLTR = 'P', + + /* + * These are for dtype 'kinds', not dtype 'typecodes' + * as the above are for. + */ + NPY_GENBOOLLTR ='b', + NPY_SIGNEDLTR = 'i', + NPY_UNSIGNEDLTR = 'u', + NPY_FLOATINGLTR = 'f', + NPY_COMPLEXLTR = 'c' +}; + +typedef enum { + NPY_QUICKSORT=0, + NPY_HEAPSORT=1, + NPY_MERGESORT=2 +} NPY_SORTKIND; +#define NPY_NSORTS (NPY_MERGESORT + 1) + + +typedef enum { + NPY_INTROSELECT=0 +} NPY_SELECTKIND; +#define NPY_NSELECTS (NPY_INTROSELECT + 1) + + +typedef enum { + NPY_SEARCHLEFT=0, + NPY_SEARCHRIGHT=1 +} NPY_SEARCHSIDE; +#define NPY_NSEARCHSIDES (NPY_SEARCHRIGHT + 1) + + +typedef enum { + NPY_NOSCALAR=-1, + NPY_BOOL_SCALAR, + NPY_INTPOS_SCALAR, + NPY_INTNEG_SCALAR, + NPY_FLOAT_SCALAR, + NPY_COMPLEX_SCALAR, + NPY_OBJECT_SCALAR +} NPY_SCALARKIND; +#define NPY_NSCALARKINDS (NPY_OBJECT_SCALAR + 1) + +/* For specifying array memory layout or iteration order */ +typedef enum { + /* Fortran order if inputs are all Fortran, C otherwise */ + NPY_ANYORDER=-1, + /* C order */ + NPY_CORDER=0, + /* Fortran order */ + NPY_FORTRANORDER=1, + /* An order as close to the inputs as possible */ + NPY_KEEPORDER=2 +} NPY_ORDER; + +/* For specifying allowed casting in operations which support it */ +typedef enum { + /* Only allow identical types */ + NPY_NO_CASTING=0, + /* Allow identical and byte swapped types */ + NPY_EQUIV_CASTING=1, + /* Only allow safe casts */ + NPY_SAFE_CASTING=2, + /* Allow safe casts or casts within the same kind */ + NPY_SAME_KIND_CASTING=3, + /* Allow any casts */ + NPY_UNSAFE_CASTING=4 +} NPY_CASTING; + +typedef enum { + NPY_CLIP=0, + NPY_WRAP=1, + NPY_RAISE=2 +} NPY_CLIPMODE; + +/* The special not-a-time (NaT) value */ +#define NPY_DATETIME_NAT NPY_MIN_INT64 + +/* + * Upper bound on the length of a DATETIME ISO 8601 string + * YEAR: 21 (64-bit year) + * MONTH: 3 + * DAY: 3 + * HOURS: 3 + * MINUTES: 3 + * SECONDS: 3 + * ATTOSECONDS: 1 + 3*6 + * TIMEZONE: 5 + * NULL TERMINATOR: 1 + */ +#define NPY_DATETIME_MAX_ISO8601_STRLEN (21 + 3*5 + 1 + 3*6 + 6 + 1) + +/* The FR in the unit names stands for frequency */ +typedef enum { + /* Force signed enum type, must be -1 for code compatibility */ + NPY_FR_ERROR = -1, /* error or undetermined */ + + /* Start of valid units */ + NPY_FR_Y = 0, /* Years */ + NPY_FR_M = 1, /* Months */ + NPY_FR_W = 2, /* Weeks */ + /* Gap where 1.6 NPY_FR_B (value 3) was */ + NPY_FR_D = 4, /* Days */ + NPY_FR_h = 5, /* hours */ + NPY_FR_m = 6, /* minutes */ + NPY_FR_s = 7, /* seconds */ + NPY_FR_ms = 8, /* milliseconds */ + NPY_FR_us = 9, /* microseconds */ + NPY_FR_ns = 10, /* nanoseconds */ + NPY_FR_ps = 11, /* picoseconds */ + NPY_FR_fs = 12, /* femtoseconds */ + NPY_FR_as = 13, /* attoseconds */ + NPY_FR_GENERIC = 14 /* unbound units, can convert to anything */ +} NPY_DATETIMEUNIT; + +/* + * NOTE: With the NPY_FR_B gap for 1.6 ABI compatibility, NPY_DATETIME_NUMUNITS + * is technically one more than the actual number of units. + */ +#define NPY_DATETIME_NUMUNITS (NPY_FR_GENERIC + 1) +#define NPY_DATETIME_DEFAULTUNIT NPY_FR_GENERIC + +/* + * Business day conventions for mapping invalid business + * days to valid business days. + */ +typedef enum { + /* Go forward in time to the following business day. */ + NPY_BUSDAY_FORWARD, + NPY_BUSDAY_FOLLOWING = NPY_BUSDAY_FORWARD, + /* Go backward in time to the preceding business day. */ + NPY_BUSDAY_BACKWARD, + NPY_BUSDAY_PRECEDING = NPY_BUSDAY_BACKWARD, + /* + * Go forward in time to the following business day, unless it + * crosses a month boundary, in which case go backward + */ + NPY_BUSDAY_MODIFIEDFOLLOWING, + /* + * Go backward in time to the preceding business day, unless it + * crosses a month boundary, in which case go forward. + */ + NPY_BUSDAY_MODIFIEDPRECEDING, + /* Produce a NaT for non-business days. */ + NPY_BUSDAY_NAT, + /* Raise an exception for non-business days. */ + NPY_BUSDAY_RAISE +} NPY_BUSDAY_ROLL; + +/************************************************************ + * NumPy Auxiliary Data for inner loops, sort functions, etc. + ************************************************************/ + +/* + * When creating an auxiliary data struct, this should always appear + * as the first member, like this: + * + * typedef struct { + * NpyAuxData base; + * double constant; + * } constant_multiplier_aux_data; + */ +typedef struct NpyAuxData_tag NpyAuxData; + +/* Function pointers for freeing or cloning auxiliary data */ +typedef void (NpyAuxData_FreeFunc) (NpyAuxData *); +typedef NpyAuxData *(NpyAuxData_CloneFunc) (NpyAuxData *); + +struct NpyAuxData_tag { + NpyAuxData_FreeFunc *free; + NpyAuxData_CloneFunc *clone; + /* To allow for a bit of expansion without breaking the ABI */ + void *reserved[2]; +}; + +/* Macros to use for freeing and cloning auxiliary data */ +#define NPY_AUXDATA_FREE(auxdata) \ + do { \ + if ((auxdata) != NULL) { \ + (auxdata)->free(auxdata); \ + } \ + } while(0) +#define NPY_AUXDATA_CLONE(auxdata) \ + ((auxdata)->clone(auxdata)) + +#define NPY_ERR(str) fprintf(stderr, #str); fflush(stderr); +#define NPY_ERR2(str) fprintf(stderr, str); fflush(stderr); + +#define NPY_STRINGIFY(x) #x +#define NPY_TOSTRING(x) NPY_STRINGIFY(x) + + /* + * Macros to define how array, and dimension/strides data is + * allocated. + */ + + /* Data buffer - PyDataMem_NEW/FREE/RENEW are in multiarraymodule.c */ + +#define NPY_USE_PYMEM 1 + +#if NPY_USE_PYMEM == 1 + /* numpy sometimes calls PyArray_malloc() with the GIL released. On Python + 3.3 and older, it was safe to call PyMem_Malloc() with the GIL released. + On Python 3.4 and newer, it's better to use PyMem_RawMalloc() to be able + to use tracemalloc. On Python 3.6, calling PyMem_Malloc() with the GIL + released is now a fatal error in debug mode. */ +# if PY_VERSION_HEX >= 0x03040000 +# define PyArray_malloc PyMem_RawMalloc +# define PyArray_free PyMem_RawFree +# define PyArray_realloc PyMem_RawRealloc +# else +# define PyArray_malloc PyMem_Malloc +# define PyArray_free PyMem_Free +# define PyArray_realloc PyMem_Realloc +# endif +#else +#define PyArray_malloc malloc +#define PyArray_free free +#define PyArray_realloc realloc +#endif + +/* Dimensions and strides */ +#define PyDimMem_NEW(size) \ + ((npy_intp *)PyArray_malloc(size*sizeof(npy_intp))) + +#define PyDimMem_FREE(ptr) PyArray_free(ptr) + +#define PyDimMem_RENEW(ptr,size) \ + ((npy_intp *)PyArray_realloc(ptr,size*sizeof(npy_intp))) + +/* forward declaration */ +struct _PyArray_Descr; + +/* These must deal with unaligned and swapped data if necessary */ +typedef PyObject * (PyArray_GetItemFunc) (void *, void *); +typedef int (PyArray_SetItemFunc)(PyObject *, void *, void *); + +typedef void (PyArray_CopySwapNFunc)(void *, npy_intp, void *, npy_intp, + npy_intp, int, void *); + +typedef void (PyArray_CopySwapFunc)(void *, void *, int, void *); +typedef npy_bool (PyArray_NonzeroFunc)(void *, void *); + + +/* + * These assume aligned and notswapped data -- a buffer will be used + * before or contiguous data will be obtained + */ + +typedef int (PyArray_CompareFunc)(const void *, const void *, void *); +typedef int (PyArray_ArgFunc)(void*, npy_intp, npy_intp*, void *); + +typedef void (PyArray_DotFunc)(void *, npy_intp, void *, npy_intp, void *, + npy_intp, void *); + +typedef void (PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, + void *); + +/* + * XXX the ignore argument should be removed next time the API version + * is bumped. It used to be the separator. + */ +typedef int (PyArray_ScanFunc)(FILE *fp, void *dptr, + char *ignore, struct _PyArray_Descr *); +typedef int (PyArray_FromStrFunc)(char *s, void *dptr, char **endptr, + struct _PyArray_Descr *); + +typedef int (PyArray_FillFunc)(void *, npy_intp, void *); + +typedef int (PyArray_SortFunc)(void *, npy_intp, void *); +typedef int (PyArray_ArgSortFunc)(void *, npy_intp *, npy_intp, void *); +typedef int (PyArray_PartitionFunc)(void *, npy_intp, npy_intp, + npy_intp *, npy_intp *, + void *); +typedef int (PyArray_ArgPartitionFunc)(void *, npy_intp *, npy_intp, npy_intp, + npy_intp *, npy_intp *, + void *); + +typedef int (PyArray_FillWithScalarFunc)(void *, npy_intp, void *, void *); + +typedef int (PyArray_ScalarKindFunc)(void *); + +typedef void (PyArray_FastClipFunc)(void *in, npy_intp n_in, void *min, + void *max, void *out); +typedef void (PyArray_FastPutmaskFunc)(void *in, void *mask, npy_intp n_in, + void *values, npy_intp nv); +typedef int (PyArray_FastTakeFunc)(void *dest, void *src, npy_intp *indarray, + npy_intp nindarray, npy_intp n_outer, + npy_intp m_middle, npy_intp nelem, + NPY_CLIPMODE clipmode); + +typedef struct { + npy_intp *ptr; + int len; +} PyArray_Dims; + +typedef struct { + /* + * Functions to cast to most other standard types + * Can have some NULL entries. The types + * DATETIME, TIMEDELTA, and HALF go into the castdict + * even though they are built-in. + */ + PyArray_VectorUnaryFunc *cast[NPY_NTYPES_ABI_COMPATIBLE]; + + /* The next four functions *cannot* be NULL */ + + /* + * Functions to get and set items with standard Python types + * -- not array scalars + */ + PyArray_GetItemFunc *getitem; + PyArray_SetItemFunc *setitem; + + /* + * Copy and/or swap data. Memory areas may not overlap + * Use memmove first if they might + */ + PyArray_CopySwapNFunc *copyswapn; + PyArray_CopySwapFunc *copyswap; + + /* + * Function to compare items + * Can be NULL + */ + PyArray_CompareFunc *compare; + + /* + * Function to select largest + * Can be NULL + */ + PyArray_ArgFunc *argmax; + + /* + * Function to compute dot product + * Can be NULL + */ + PyArray_DotFunc *dotfunc; + + /* + * Function to scan an ASCII file and + * place a single value plus possible separator + * Can be NULL + */ + PyArray_ScanFunc *scanfunc; + + /* + * Function to read a single value from a string + * and adjust the pointer; Can be NULL + */ + PyArray_FromStrFunc *fromstr; + + /* + * Function to determine if data is zero or not + * If NULL a default version is + * used at Registration time. + */ + PyArray_NonzeroFunc *nonzero; + + /* + * Used for arange. Should return 0 on success + * and -1 on failure. + * Can be NULL. + */ + PyArray_FillFunc *fill; + + /* + * Function to fill arrays with scalar values + * Can be NULL + */ + PyArray_FillWithScalarFunc *fillwithscalar; + + /* + * Sorting functions + * Can be NULL + */ + PyArray_SortFunc *sort[NPY_NSORTS]; + PyArray_ArgSortFunc *argsort[NPY_NSORTS]; + + /* + * Dictionary of additional casting functions + * PyArray_VectorUnaryFuncs + * which can be populated to support casting + * to other registered types. Can be NULL + */ + PyObject *castdict; + + /* + * Functions useful for generalizing + * the casting rules. + * Can be NULL; + */ + PyArray_ScalarKindFunc *scalarkind; + int **cancastscalarkindto; + int *cancastto; + + PyArray_FastClipFunc *fastclip; + PyArray_FastPutmaskFunc *fastputmask; + PyArray_FastTakeFunc *fasttake; + + /* + * Function to select smallest + * Can be NULL + */ + PyArray_ArgFunc *argmin; + +} PyArray_ArrFuncs; + +/* The item must be reference counted when it is inserted or extracted. */ +#define NPY_ITEM_REFCOUNT 0x01 +/* Same as needing REFCOUNT */ +#define NPY_ITEM_HASOBJECT 0x01 +/* Convert to list for pickling */ +#define NPY_LIST_PICKLE 0x02 +/* The item is a POINTER */ +#define NPY_ITEM_IS_POINTER 0x04 +/* memory needs to be initialized for this data-type */ +#define NPY_NEEDS_INIT 0x08 +/* operations need Python C-API so don't give-up thread. */ +#define NPY_NEEDS_PYAPI 0x10 +/* Use f.getitem when extracting elements of this data-type */ +#define NPY_USE_GETITEM 0x20 +/* Use f.setitem when setting creating 0-d array from this data-type.*/ +#define NPY_USE_SETITEM 0x40 +/* A sticky flag specifically for structured arrays */ +#define NPY_ALIGNED_STRUCT 0x80 + +/* + *These are inherited for global data-type if any data-types in the + * field have them + */ +#define NPY_FROM_FIELDS (NPY_NEEDS_INIT | NPY_LIST_PICKLE | \ + NPY_ITEM_REFCOUNT | NPY_NEEDS_PYAPI) + +#define NPY_OBJECT_DTYPE_FLAGS (NPY_LIST_PICKLE | NPY_USE_GETITEM | \ + NPY_ITEM_IS_POINTER | NPY_ITEM_REFCOUNT | \ + NPY_NEEDS_INIT | NPY_NEEDS_PYAPI) + +#define PyDataType_FLAGCHK(dtype, flag) \ + (((dtype)->flags & (flag)) == (flag)) + +#define PyDataType_REFCHK(dtype) \ + PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT) + +typedef struct _PyArray_Descr { + PyObject_HEAD + /* + * the type object representing an + * instance of this type -- should not + * be two type_numbers with the same type + * object. + */ + PyTypeObject *typeobj; + /* kind for this type */ + char kind; + /* unique-character representing this type */ + char type; + /* + * '>' (big), '<' (little), '|' + * (not-applicable), or '=' (native). + */ + char byteorder; + /* flags describing data type */ + char flags; + /* number representing this type */ + int type_num; + /* element size (itemsize) for this type */ + int elsize; + /* alignment needed for this type */ + int alignment; + /* + * Non-NULL if this type is + * is an array (C-contiguous) + * of some other type + */ + struct _arr_descr *subarray; + /* + * The fields dictionary for this type + * For statically defined descr this + * is always Py_None + */ + PyObject *fields; + /* + * An ordered tuple of field names or NULL + * if no fields are defined + */ + PyObject *names; + /* + * a table of functions specific for each + * basic data descriptor + */ + PyArray_ArrFuncs *f; + /* Metadata about this dtype */ + PyObject *metadata; + /* + * Metadata specific to the C implementation + * of the particular dtype. This was added + * for NumPy 1.7.0. + */ + NpyAuxData *c_metadata; + /* Cached hash value (-1 if not yet computed). + * This was added for NumPy 2.0.0. + */ + npy_hash_t hash; +} PyArray_Descr; + +typedef struct _arr_descr { + PyArray_Descr *base; + PyObject *shape; /* a tuple */ +} PyArray_ArrayDescr; + +/* + * The main array object structure. + * + * It has been recommended to use the inline functions defined below + * (PyArray_DATA and friends) to access fields here for a number of + * releases. Direct access to the members themselves is deprecated. + * To ensure that your code does not use deprecated access, + * #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION + * (or NPY_1_8_API_VERSION or higher as required). + */ +/* This struct will be moved to a private header in a future release */ +typedef struct tagPyArrayObject_fields { + PyObject_HEAD + /* Pointer to the raw data buffer */ + char *data; + /* The number of dimensions, also called 'ndim' */ + int nd; + /* The size in each dimension, also called 'shape' */ + npy_intp *dimensions; + /* + * Number of bytes to jump to get to the + * next element in each dimension + */ + npy_intp *strides; + /* + * This object is decref'd upon + * deletion of array. Except in the + * case of WRITEBACKIFCOPY which has + * special handling. + * + * For views it points to the original + * array, collapsed so no chains of + * views occur. + * + * For creation from buffer object it + * points to an object that should be + * decref'd on deletion + * + * For WRITEBACKIFCOPY flag this is an + * array to-be-updated upon calling + * PyArray_ResolveWritebackIfCopy + */ + PyObject *base; + /* Pointer to type structure */ + PyArray_Descr *descr; + /* Flags describing array -- see below */ + int flags; + /* For weak references */ + PyObject *weakreflist; +} PyArrayObject_fields; + +/* + * To hide the implementation details, we only expose + * the Python struct HEAD. + */ +#if !defined(NPY_NO_DEPRECATED_API) || \ + (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION) +/* + * Can't put this in npy_deprecated_api.h like the others. + * PyArrayObject field access is deprecated as of NumPy 1.7. + */ +typedef PyArrayObject_fields PyArrayObject; +#else +typedef struct tagPyArrayObject { + PyObject_HEAD +} PyArrayObject; +#endif + +#define NPY_SIZEOF_PYARRAYOBJECT (sizeof(PyArrayObject_fields)) + +/* Array Flags Object */ +typedef struct PyArrayFlagsObject { + PyObject_HEAD + PyObject *arr; + int flags; +} PyArrayFlagsObject; + +/* Mirrors buffer object to ptr */ + +typedef struct { + PyObject_HEAD + PyObject *base; + void *ptr; + npy_intp len; + int flags; +} PyArray_Chunk; + +typedef struct { + NPY_DATETIMEUNIT base; + int num; +} PyArray_DatetimeMetaData; + +typedef struct { + NpyAuxData base; + PyArray_DatetimeMetaData meta; +} PyArray_DatetimeDTypeMetaData; + +/* + * This structure contains an exploded view of a date-time value. + * NaT is represented by year == NPY_DATETIME_NAT. + */ +typedef struct { + npy_int64 year; + npy_int32 month, day, hour, min, sec, us, ps, as; +} npy_datetimestruct; + +/* This is not used internally. */ +typedef struct { + npy_int64 day; + npy_int32 sec, us, ps, as; +} npy_timedeltastruct; + +typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *); + +/* + * Means c-style contiguous (last index varies the fastest). The data + * elements right after each other. + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_C_CONTIGUOUS 0x0001 + +/* + * Set if array is a contiguous Fortran array: the first index varies + * the fastest in memory (strides array is reverse of C-contiguous + * array) + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_F_CONTIGUOUS 0x0002 + +/* + * Note: all 0-d arrays are C_CONTIGUOUS and F_CONTIGUOUS. If a + * 1-d array is C_CONTIGUOUS it is also F_CONTIGUOUS. Arrays with + * more then one dimension can be C_CONTIGUOUS and F_CONTIGUOUS + * at the same time if they have either zero or one element. + * If NPY_RELAXED_STRIDES_CHECKING is set, a higher dimensional + * array is always C_CONTIGUOUS and F_CONTIGUOUS if it has zero elements + * and the array is contiguous if ndarray.squeeze() is contiguous. + * I.e. dimensions for which `ndarray.shape[dimension] == 1` are + * ignored. + */ + +/* + * If set, the array owns the data: it will be free'd when the array + * is deleted. + * + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_OWNDATA 0x0004 + +/* + * An array never has the next four set; they're only used as parameter + * flags to the various FromAny functions + * + * This flag may be requested in constructor functions. + */ + +/* Cause a cast to occur regardless of whether or not it is safe. */ +#define NPY_ARRAY_FORCECAST 0x0010 + +/* + * Always copy the array. Returned arrays are always CONTIGUOUS, + * ALIGNED, and WRITEABLE. + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_ENSURECOPY 0x0020 + +/* + * Make sure the returned array is a base-class ndarray + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_ENSUREARRAY 0x0040 + +/* + * Make sure that the strides are in units of the element size Needed + * for some operations with record-arrays. + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_ELEMENTSTRIDES 0x0080 + +/* + * Array data is aligned on the appropriate memory address for the type + * stored according to how the compiler would align things (e.g., an + * array of integers (4 bytes each) starts on a memory address that's + * a multiple of 4) + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_ALIGNED 0x0100 + +/* + * Array data has the native endianness + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_NOTSWAPPED 0x0200 + +/* + * Array data is writeable + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_WRITEABLE 0x0400 + +/* + * If this flag is set, then base contains a pointer to an array of + * the same size that should be updated with the current contents of + * this array when PyArray_ResolveWritebackIfCopy is called. + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_UPDATEIFCOPY 0x1000 /* Deprecated in 1.14 */ +#define NPY_ARRAY_WRITEBACKIFCOPY 0x2000 + +/* + * NOTE: there are also internal flags defined in multiarray/arrayobject.h, + * which start at bit 31 and work down. + */ + +#define NPY_ARRAY_BEHAVED (NPY_ARRAY_ALIGNED | \ + NPY_ARRAY_WRITEABLE) +#define NPY_ARRAY_BEHAVED_NS (NPY_ARRAY_ALIGNED | \ + NPY_ARRAY_WRITEABLE | \ + NPY_ARRAY_NOTSWAPPED) +#define NPY_ARRAY_CARRAY (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_BEHAVED) +#define NPY_ARRAY_CARRAY_RO (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) +#define NPY_ARRAY_FARRAY (NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_BEHAVED) +#define NPY_ARRAY_FARRAY_RO (NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) +#define NPY_ARRAY_DEFAULT (NPY_ARRAY_CARRAY) +#define NPY_ARRAY_IN_ARRAY (NPY_ARRAY_CARRAY_RO) +#define NPY_ARRAY_OUT_ARRAY (NPY_ARRAY_CARRAY) +#define NPY_ARRAY_INOUT_ARRAY (NPY_ARRAY_CARRAY | \ + NPY_ARRAY_UPDATEIFCOPY) +#define NPY_ARRAY_INOUT_ARRAY2 (NPY_ARRAY_CARRAY | \ + NPY_ARRAY_WRITEBACKIFCOPY) +#define NPY_ARRAY_IN_FARRAY (NPY_ARRAY_FARRAY_RO) +#define NPY_ARRAY_OUT_FARRAY (NPY_ARRAY_FARRAY) +#define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY | \ + NPY_ARRAY_UPDATEIFCOPY) +#define NPY_ARRAY_INOUT_FARRAY2 (NPY_ARRAY_FARRAY | \ + NPY_ARRAY_WRITEBACKIFCOPY) + +#define NPY_ARRAY_UPDATE_ALL (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) + +/* This flag is for the array interface, not PyArrayObject */ +#define NPY_ARR_HAS_DESCR 0x0800 + + + + +/* + * Size of internal buffers used for alignment Make BUFSIZE a multiple + * of sizeof(npy_cdouble) -- usually 16 so that ufunc buffers are aligned + */ +#define NPY_MIN_BUFSIZE ((int)sizeof(npy_cdouble)) +#define NPY_MAX_BUFSIZE (((int)sizeof(npy_cdouble))*1000000) +#define NPY_BUFSIZE 8192 +/* buffer stress test size: */ +/*#define NPY_BUFSIZE 17*/ + +#define PyArray_MAX(a,b) (((a)>(b))?(a):(b)) +#define PyArray_MIN(a,b) (((a)<(b))?(a):(b)) +#define PyArray_CLT(p,q) ((((p).real==(q).real) ? ((p).imag < (q).imag) : \ + ((p).real < (q).real))) +#define PyArray_CGT(p,q) ((((p).real==(q).real) ? ((p).imag > (q).imag) : \ + ((p).real > (q).real))) +#define PyArray_CLE(p,q) ((((p).real==(q).real) ? ((p).imag <= (q).imag) : \ + ((p).real <= (q).real))) +#define PyArray_CGE(p,q) ((((p).real==(q).real) ? ((p).imag >= (q).imag) : \ + ((p).real >= (q).real))) +#define PyArray_CEQ(p,q) (((p).real==(q).real) && ((p).imag == (q).imag)) +#define PyArray_CNE(p,q) (((p).real!=(q).real) || ((p).imag != (q).imag)) + +/* + * C API: consists of Macros and functions. The MACROS are defined + * here. + */ + + +#define PyArray_ISCONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) +#define PyArray_ISWRITEABLE(m) PyArray_CHKFLAGS(m, NPY_ARRAY_WRITEABLE) +#define PyArray_ISALIGNED(m) PyArray_CHKFLAGS(m, NPY_ARRAY_ALIGNED) + +#define PyArray_IS_C_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) +#define PyArray_IS_F_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) + +/* the variable is used in some places, so always define it */ +#define NPY_BEGIN_THREADS_DEF PyThreadState *_save=NULL; +#if NPY_ALLOW_THREADS +#define NPY_BEGIN_ALLOW_THREADS Py_BEGIN_ALLOW_THREADS +#define NPY_END_ALLOW_THREADS Py_END_ALLOW_THREADS +#define NPY_BEGIN_THREADS do {_save = PyEval_SaveThread();} while (0); +#define NPY_END_THREADS do { if (_save) \ + { PyEval_RestoreThread(_save); _save = NULL;} } while (0); +#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) do { if (loop_size > 500) \ + { _save = PyEval_SaveThread();} } while (0); + +#define NPY_BEGIN_THREADS_DESCR(dtype) \ + do {if (!(PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI))) \ + NPY_BEGIN_THREADS;} while (0); + +#define NPY_END_THREADS_DESCR(dtype) \ + do {if (!(PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI))) \ + NPY_END_THREADS; } while (0); + +#define NPY_ALLOW_C_API_DEF PyGILState_STATE __save__; +#define NPY_ALLOW_C_API do {__save__ = PyGILState_Ensure();} while (0); +#define NPY_DISABLE_C_API do {PyGILState_Release(__save__);} while (0); +#else +#define NPY_BEGIN_ALLOW_THREADS +#define NPY_END_ALLOW_THREADS +#define NPY_BEGIN_THREADS +#define NPY_END_THREADS +#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) +#define NPY_BEGIN_THREADS_DESCR(dtype) +#define NPY_END_THREADS_DESCR(dtype) +#define NPY_ALLOW_C_API_DEF +#define NPY_ALLOW_C_API +#define NPY_DISABLE_C_API +#endif + +/********************************** + * The nditer object, added in 1.6 + **********************************/ + +/* The actual structure of the iterator is an internal detail */ +typedef struct NpyIter_InternalOnly NpyIter; + +/* Iterator function pointers that may be specialized */ +typedef int (NpyIter_IterNextFunc)(NpyIter *iter); +typedef void (NpyIter_GetMultiIndexFunc)(NpyIter *iter, + npy_intp *outcoords); + +/*** Global flags that may be passed to the iterator constructors ***/ + +/* Track an index representing C order */ +#define NPY_ITER_C_INDEX 0x00000001 +/* Track an index representing Fortran order */ +#define NPY_ITER_F_INDEX 0x00000002 +/* Track a multi-index */ +#define NPY_ITER_MULTI_INDEX 0x00000004 +/* User code external to the iterator does the 1-dimensional innermost loop */ +#define NPY_ITER_EXTERNAL_LOOP 0x00000008 +/* Convert all the operands to a common data type */ +#define NPY_ITER_COMMON_DTYPE 0x00000010 +/* Operands may hold references, requiring API access during iteration */ +#define NPY_ITER_REFS_OK 0x00000020 +/* Zero-sized operands should be permitted, iteration checks IterSize for 0 */ +#define NPY_ITER_ZEROSIZE_OK 0x00000040 +/* Permits reductions (size-0 stride with dimension size > 1) */ +#define NPY_ITER_REDUCE_OK 0x00000080 +/* Enables sub-range iteration */ +#define NPY_ITER_RANGED 0x00000100 +/* Enables buffering */ +#define NPY_ITER_BUFFERED 0x00000200 +/* When buffering is enabled, grows the inner loop if possible */ +#define NPY_ITER_GROWINNER 0x00000400 +/* Delay allocation of buffers until first Reset* call */ +#define NPY_ITER_DELAY_BUFALLOC 0x00000800 +/* When NPY_KEEPORDER is specified, disable reversing negative-stride axes */ +#define NPY_ITER_DONT_NEGATE_STRIDES 0x00001000 +/* + * If output operands overlap with other operands (based on heuristics that + * has false positives but no false negatives), make temporary copies to + * eliminate overlap. + */ +#define NPY_ITER_COPY_IF_OVERLAP 0x00002000 + +/*** Per-operand flags that may be passed to the iterator constructors ***/ + +/* The operand will be read from and written to */ +#define NPY_ITER_READWRITE 0x00010000 +/* The operand will only be read from */ +#define NPY_ITER_READONLY 0x00020000 +/* The operand will only be written to */ +#define NPY_ITER_WRITEONLY 0x00040000 +/* The operand's data must be in native byte order */ +#define NPY_ITER_NBO 0x00080000 +/* The operand's data must be aligned */ +#define NPY_ITER_ALIGNED 0x00100000 +/* The operand's data must be contiguous (within the inner loop) */ +#define NPY_ITER_CONTIG 0x00200000 +/* The operand may be copied to satisfy requirements */ +#define NPY_ITER_COPY 0x00400000 +/* The operand may be copied with WRITEBACKIFCOPY to satisfy requirements */ +#define NPY_ITER_UPDATEIFCOPY 0x00800000 +/* Allocate the operand if it is NULL */ +#define NPY_ITER_ALLOCATE 0x01000000 +/* If an operand is allocated, don't use any subtype */ +#define NPY_ITER_NO_SUBTYPE 0x02000000 +/* This is a virtual array slot, operand is NULL but temporary data is there */ +#define NPY_ITER_VIRTUAL 0x04000000 +/* Require that the dimension match the iterator dimensions exactly */ +#define NPY_ITER_NO_BROADCAST 0x08000000 +/* A mask is being used on this array, affects buffer -> array copy */ +#define NPY_ITER_WRITEMASKED 0x10000000 +/* This array is the mask for all WRITEMASKED operands */ +#define NPY_ITER_ARRAYMASK 0x20000000 +/* Assume iterator order data access for COPY_IF_OVERLAP */ +#define NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE 0x40000000 + +#define NPY_ITER_GLOBAL_FLAGS 0x0000ffff +#define NPY_ITER_PER_OP_FLAGS 0xffff0000 + + +/***************************** + * Basic iterator object + *****************************/ + +/* FWD declaration */ +typedef struct PyArrayIterObject_tag PyArrayIterObject; + +/* + * type of the function which translates a set of coordinates to a + * pointer to the data + */ +typedef char* (*npy_iter_get_dataptr_t)(PyArrayIterObject* iter, npy_intp*); + +struct PyArrayIterObject_tag { + PyObject_HEAD + int nd_m1; /* number of dimensions - 1 */ + npy_intp index, size; + npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */ + npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */ + npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */ + npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */ + npy_intp factors[NPY_MAXDIMS]; /* shape factors */ + PyArrayObject *ao; + char *dataptr; /* pointer to current item*/ + npy_bool contiguous; + + npy_intp bounds[NPY_MAXDIMS][2]; + npy_intp limits[NPY_MAXDIMS][2]; + npy_intp limits_sizes[NPY_MAXDIMS]; + npy_iter_get_dataptr_t translate; +} ; + + +/* Iterator API */ +#define PyArrayIter_Check(op) PyObject_TypeCheck(op, &PyArrayIter_Type) + +#define _PyAIT(it) ((PyArrayIterObject *)(it)) +#define PyArray_ITER_RESET(it) do { \ + _PyAIT(it)->index = 0; \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ + memset(_PyAIT(it)->coordinates, 0, \ + (_PyAIT(it)->nd_m1+1)*sizeof(npy_intp)); \ +} while (0) + +#define _PyArray_ITER_NEXT1(it) do { \ + (it)->dataptr += _PyAIT(it)->strides[0]; \ + (it)->coordinates[0]++; \ +} while (0) + +#define _PyArray_ITER_NEXT2(it) do { \ + if ((it)->coordinates[1] < (it)->dims_m1[1]) { \ + (it)->coordinates[1]++; \ + (it)->dataptr += (it)->strides[1]; \ + } \ + else { \ + (it)->coordinates[1] = 0; \ + (it)->coordinates[0]++; \ + (it)->dataptr += (it)->strides[0] - \ + (it)->backstrides[1]; \ + } \ +} while (0) + +#define PyArray_ITER_NEXT(it) do { \ + _PyAIT(it)->index++; \ + if (_PyAIT(it)->nd_m1 == 0) { \ + _PyArray_ITER_NEXT1(_PyAIT(it)); \ + } \ + else if (_PyAIT(it)->contiguous) \ + _PyAIT(it)->dataptr += PyArray_DESCR(_PyAIT(it)->ao)->elsize; \ + else if (_PyAIT(it)->nd_m1 == 1) { \ + _PyArray_ITER_NEXT2(_PyAIT(it)); \ + } \ + else { \ + int __npy_i; \ + for (__npy_i=_PyAIT(it)->nd_m1; __npy_i >= 0; __npy_i--) { \ + if (_PyAIT(it)->coordinates[__npy_i] < \ + _PyAIT(it)->dims_m1[__npy_i]) { \ + _PyAIT(it)->coordinates[__npy_i]++; \ + _PyAIT(it)->dataptr += \ + _PyAIT(it)->strides[__npy_i]; \ + break; \ + } \ + else { \ + _PyAIT(it)->coordinates[__npy_i] = 0; \ + _PyAIT(it)->dataptr -= \ + _PyAIT(it)->backstrides[__npy_i]; \ + } \ + } \ + } \ +} while (0) + +#define PyArray_ITER_GOTO(it, destination) do { \ + int __npy_i; \ + _PyAIT(it)->index = 0; \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ + for (__npy_i = _PyAIT(it)->nd_m1; __npy_i>=0; __npy_i--) { \ + if (destination[__npy_i] < 0) { \ + destination[__npy_i] += \ + _PyAIT(it)->dims_m1[__npy_i]+1; \ + } \ + _PyAIT(it)->dataptr += destination[__npy_i] * \ + _PyAIT(it)->strides[__npy_i]; \ + _PyAIT(it)->coordinates[__npy_i] = \ + destination[__npy_i]; \ + _PyAIT(it)->index += destination[__npy_i] * \ + ( __npy_i==_PyAIT(it)->nd_m1 ? 1 : \ + _PyAIT(it)->dims_m1[__npy_i+1]+1) ; \ + } \ +} while (0) + +#define PyArray_ITER_GOTO1D(it, ind) do { \ + int __npy_i; \ + npy_intp __npy_ind = (npy_intp) (ind); \ + if (__npy_ind < 0) __npy_ind += _PyAIT(it)->size; \ + _PyAIT(it)->index = __npy_ind; \ + if (_PyAIT(it)->nd_m1 == 0) { \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ + __npy_ind * _PyAIT(it)->strides[0]; \ + } \ + else if (_PyAIT(it)->contiguous) \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ + __npy_ind * PyArray_DESCR(_PyAIT(it)->ao)->elsize; \ + else { \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ + for (__npy_i = 0; __npy_i<=_PyAIT(it)->nd_m1; \ + __npy_i++) { \ + _PyAIT(it)->dataptr += \ + (__npy_ind / _PyAIT(it)->factors[__npy_i]) \ + * _PyAIT(it)->strides[__npy_i]; \ + __npy_ind %= _PyAIT(it)->factors[__npy_i]; \ + } \ + } \ +} while (0) + +#define PyArray_ITER_DATA(it) ((void *)(_PyAIT(it)->dataptr)) + +#define PyArray_ITER_NOTDONE(it) (_PyAIT(it)->index < _PyAIT(it)->size) + + +/* + * Any object passed to PyArray_Broadcast must be binary compatible + * with this structure. + */ + +typedef struct { + PyObject_HEAD + int numiter; /* number of iters */ + npy_intp size; /* broadcasted size */ + npy_intp index; /* current index */ + int nd; /* number of dims */ + npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */ + PyArrayIterObject *iters[NPY_MAXARGS]; /* iterators */ +} PyArrayMultiIterObject; + +#define _PyMIT(m) ((PyArrayMultiIterObject *)(m)) +#define PyArray_MultiIter_RESET(multi) do { \ + int __npy_mi; \ + _PyMIT(multi)->index = 0; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_RESET(_PyMIT(multi)->iters[__npy_mi]); \ + } \ +} while (0) + +#define PyArray_MultiIter_NEXT(multi) do { \ + int __npy_mi; \ + _PyMIT(multi)->index++; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_NEXT(_PyMIT(multi)->iters[__npy_mi]); \ + } \ +} while (0) + +#define PyArray_MultiIter_GOTO(multi, dest) do { \ + int __npy_mi; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_GOTO(_PyMIT(multi)->iters[__npy_mi], dest); \ + } \ + _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ +} while (0) + +#define PyArray_MultiIter_GOTO1D(multi, ind) do { \ + int __npy_mi; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_GOTO1D(_PyMIT(multi)->iters[__npy_mi], ind); \ + } \ + _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ +} while (0) + +#define PyArray_MultiIter_DATA(multi, i) \ + ((void *)(_PyMIT(multi)->iters[i]->dataptr)) + +#define PyArray_MultiIter_NEXTi(multi, i) \ + PyArray_ITER_NEXT(_PyMIT(multi)->iters[i]) + +#define PyArray_MultiIter_NOTDONE(multi) \ + (_PyMIT(multi)->index < _PyMIT(multi)->size) + + +/* + * Store the information needed for fancy-indexing over an array. The + * fields are slightly unordered to keep consec, dataptr and subspace + * where they were originally. + */ +typedef struct { + PyObject_HEAD + /* + * Multi-iterator portion --- needs to be present in this + * order to work with PyArray_Broadcast + */ + + int numiter; /* number of index-array + iterators */ + npy_intp size; /* size of broadcasted + result */ + npy_intp index; /* current index */ + int nd; /* number of dims */ + npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */ + NpyIter *outer; /* index objects + iterator */ + void *unused[NPY_MAXDIMS - 2]; + PyArrayObject *array; + /* Flat iterator for the indexed array. For compatibility solely. */ + PyArrayIterObject *ait; + + /* + * Subspace array. For binary compatibility (was an iterator, + * but only the check for NULL should be used). + */ + PyArrayObject *subspace; + + /* + * if subspace iteration, then this is the array of axes in + * the underlying array represented by the index objects + */ + int iteraxes[NPY_MAXDIMS]; + npy_intp fancy_strides[NPY_MAXDIMS]; + + /* pointer when all fancy indices are 0 */ + char *baseoffset; + + /* + * after binding consec denotes at which axis the fancy axes + * are inserted. + */ + int consec; + char *dataptr; + + int nd_fancy; + npy_intp fancy_dims[NPY_MAXDIMS]; + + /* Whether the iterator (any of the iterators) requires API */ + int needs_api; + + /* + * Extra op information. + */ + PyArrayObject *extra_op; + PyArray_Descr *extra_op_dtype; /* desired dtype */ + npy_uint32 *extra_op_flags; /* Iterator flags */ + + NpyIter *extra_op_iter; + NpyIter_IterNextFunc *extra_op_next; + char **extra_op_ptrs; + + /* + * Information about the iteration state. + */ + NpyIter_IterNextFunc *outer_next; + char **outer_ptrs; + npy_intp *outer_strides; + + /* + * Information about the subspace iterator. + */ + NpyIter *subspace_iter; + NpyIter_IterNextFunc *subspace_next; + char **subspace_ptrs; + npy_intp *subspace_strides; + + /* Count for the external loop (which ever it is) for API iteration */ + npy_intp iter_count; + +} PyArrayMapIterObject; + +enum { + NPY_NEIGHBORHOOD_ITER_ZERO_PADDING, + NPY_NEIGHBORHOOD_ITER_ONE_PADDING, + NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING, + NPY_NEIGHBORHOOD_ITER_CIRCULAR_PADDING, + NPY_NEIGHBORHOOD_ITER_MIRROR_PADDING +}; + +typedef struct { + PyObject_HEAD + + /* + * PyArrayIterObject part: keep this in this exact order + */ + int nd_m1; /* number of dimensions - 1 */ + npy_intp index, size; + npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */ + npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */ + npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */ + npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */ + npy_intp factors[NPY_MAXDIMS]; /* shape factors */ + PyArrayObject *ao; + char *dataptr; /* pointer to current item*/ + npy_bool contiguous; + + npy_intp bounds[NPY_MAXDIMS][2]; + npy_intp limits[NPY_MAXDIMS][2]; + npy_intp limits_sizes[NPY_MAXDIMS]; + npy_iter_get_dataptr_t translate; + + /* + * New members + */ + npy_intp nd; + + /* Dimensions is the dimension of the array */ + npy_intp dimensions[NPY_MAXDIMS]; + + /* + * Neighborhood points coordinates are computed relatively to the + * point pointed by _internal_iter + */ + PyArrayIterObject* _internal_iter; + /* + * To keep a reference to the representation of the constant value + * for constant padding + */ + char* constant; + + int mode; +} PyArrayNeighborhoodIterObject; + +/* + * Neighborhood iterator API + */ + +/* General: those work for any mode */ +static NPY_INLINE int +PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter); +static NPY_INLINE int +PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter); +#if 0 +static NPY_INLINE int +PyArrayNeighborhoodIter_Next2D(PyArrayNeighborhoodIterObject* iter); +#endif + +/* + * Include inline implementations - functions defined there are not + * considered public API + */ +#define _NPY_INCLUDE_NEIGHBORHOOD_IMP +#include "_neighborhood_iterator_imp.h" +#undef _NPY_INCLUDE_NEIGHBORHOOD_IMP + +/* The default array type */ +#define NPY_DEFAULT_TYPE NPY_DOUBLE + +/* + * All sorts of useful ways to look into a PyArrayObject. It is recommended + * to use PyArrayObject * objects instead of always casting from PyObject *, + * for improved type checking. + * + * In many cases here the macro versions of the accessors are deprecated, + * but can't be immediately changed to inline functions because the + * preexisting macros accept PyObject * and do automatic casts. Inline + * functions accepting PyArrayObject * provides for some compile-time + * checking of correctness when working with these objects in C. + */ + +#define PyArray_ISONESEGMENT(m) (PyArray_NDIM(m) == 0 || \ + PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) || \ + PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS)) + +#define PyArray_ISFORTRAN(m) (PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) && \ + (!PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS))) + +#define PyArray_FORTRAN_IF(m) ((PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) ? \ + NPY_ARRAY_F_CONTIGUOUS : 0)) + +#if (defined(NPY_NO_DEPRECATED_API) && (NPY_1_7_API_VERSION <= NPY_NO_DEPRECATED_API)) +/* + * Changing access macros into functions, to allow for future hiding + * of the internal memory layout. This later hiding will allow the 2.x series + * to change the internal representation of arrays without affecting + * ABI compatibility. + */ + +static NPY_INLINE int +PyArray_NDIM(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->nd; +} + +static NPY_INLINE void * +PyArray_DATA(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->data; +} + +static NPY_INLINE char * +PyArray_BYTES(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->data; +} + +static NPY_INLINE npy_intp * +PyArray_DIMS(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->dimensions; +} + +static NPY_INLINE npy_intp * +PyArray_STRIDES(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->strides; +} + +static NPY_INLINE npy_intp +PyArray_DIM(const PyArrayObject *arr, int idim) +{ + return ((PyArrayObject_fields *)arr)->dimensions[idim]; +} + +static NPY_INLINE npy_intp +PyArray_STRIDE(const PyArrayObject *arr, int istride) +{ + return ((PyArrayObject_fields *)arr)->strides[istride]; +} + +static NPY_INLINE NPY_RETURNS_BORROWED_REF PyObject * +PyArray_BASE(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->base; +} + +static NPY_INLINE NPY_RETURNS_BORROWED_REF PyArray_Descr * +PyArray_DESCR(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr; +} + +static NPY_INLINE int +PyArray_FLAGS(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->flags; +} + +static NPY_INLINE npy_intp +PyArray_ITEMSIZE(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr->elsize; +} + +static NPY_INLINE int +PyArray_TYPE(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr->type_num; +} + +static NPY_INLINE int +PyArray_CHKFLAGS(const PyArrayObject *arr, int flags) +{ + return (PyArray_FLAGS(arr) & flags) == flags; +} + +static NPY_INLINE PyObject * +PyArray_GETITEM(const PyArrayObject *arr, const char *itemptr) +{ + return ((PyArrayObject_fields *)arr)->descr->f->getitem( + (void *)itemptr, (PyArrayObject *)arr); +} + +static NPY_INLINE int +PyArray_SETITEM(PyArrayObject *arr, char *itemptr, PyObject *v) +{ + return ((PyArrayObject_fields *)arr)->descr->f->setitem( + v, itemptr, arr); +} + +#else + +/* These macros are deprecated as of NumPy 1.7. */ +#define PyArray_NDIM(obj) (((PyArrayObject_fields *)(obj))->nd) +#define PyArray_BYTES(obj) (((PyArrayObject_fields *)(obj))->data) +#define PyArray_DATA(obj) ((void *)((PyArrayObject_fields *)(obj))->data) +#define PyArray_DIMS(obj) (((PyArrayObject_fields *)(obj))->dimensions) +#define PyArray_STRIDES(obj) (((PyArrayObject_fields *)(obj))->strides) +#define PyArray_DIM(obj,n) (PyArray_DIMS(obj)[n]) +#define PyArray_STRIDE(obj,n) (PyArray_STRIDES(obj)[n]) +#define PyArray_BASE(obj) (((PyArrayObject_fields *)(obj))->base) +#define PyArray_DESCR(obj) (((PyArrayObject_fields *)(obj))->descr) +#define PyArray_FLAGS(obj) (((PyArrayObject_fields *)(obj))->flags) +#define PyArray_CHKFLAGS(m, FLAGS) \ + ((((PyArrayObject_fields *)(m))->flags & (FLAGS)) == (FLAGS)) +#define PyArray_ITEMSIZE(obj) \ + (((PyArrayObject_fields *)(obj))->descr->elsize) +#define PyArray_TYPE(obj) \ + (((PyArrayObject_fields *)(obj))->descr->type_num) +#define PyArray_GETITEM(obj,itemptr) \ + PyArray_DESCR(obj)->f->getitem((char *)(itemptr), \ + (PyArrayObject *)(obj)) + +#define PyArray_SETITEM(obj,itemptr,v) \ + PyArray_DESCR(obj)->f->setitem((PyObject *)(v), \ + (char *)(itemptr), \ + (PyArrayObject *)(obj)) +#endif + +static NPY_INLINE PyArray_Descr * +PyArray_DTYPE(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr; +} + +static NPY_INLINE npy_intp * +PyArray_SHAPE(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->dimensions; +} + +/* + * Enables the specified array flags. Does no checking, + * assumes you know what you're doing. + */ +static NPY_INLINE void +PyArray_ENABLEFLAGS(PyArrayObject *arr, int flags) +{ + ((PyArrayObject_fields *)arr)->flags |= flags; +} + +/* + * Clears the specified array flags. Does no checking, + * assumes you know what you're doing. + */ +static NPY_INLINE void +PyArray_CLEARFLAGS(PyArrayObject *arr, int flags) +{ + ((PyArrayObject_fields *)arr)->flags &= ~flags; +} + +#define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL) + +#define PyTypeNum_ISUNSIGNED(type) (((type) == NPY_UBYTE) || \ + ((type) == NPY_USHORT) || \ + ((type) == NPY_UINT) || \ + ((type) == NPY_ULONG) || \ + ((type) == NPY_ULONGLONG)) + +#define PyTypeNum_ISSIGNED(type) (((type) == NPY_BYTE) || \ + ((type) == NPY_SHORT) || \ + ((type) == NPY_INT) || \ + ((type) == NPY_LONG) || \ + ((type) == NPY_LONGLONG)) + +#define PyTypeNum_ISINTEGER(type) (((type) >= NPY_BYTE) && \ + ((type) <= NPY_ULONGLONG)) + +#define PyTypeNum_ISFLOAT(type) ((((type) >= NPY_FLOAT) && \ + ((type) <= NPY_LONGDOUBLE)) || \ + ((type) == NPY_HALF)) + +#define PyTypeNum_ISNUMBER(type) (((type) <= NPY_CLONGDOUBLE) || \ + ((type) == NPY_HALF)) + +#define PyTypeNum_ISSTRING(type) (((type) == NPY_STRING) || \ + ((type) == NPY_UNICODE)) + +#define PyTypeNum_ISCOMPLEX(type) (((type) >= NPY_CFLOAT) && \ + ((type) <= NPY_CLONGDOUBLE)) + +#define PyTypeNum_ISPYTHON(type) (((type) == NPY_LONG) || \ + ((type) == NPY_DOUBLE) || \ + ((type) == NPY_CDOUBLE) || \ + ((type) == NPY_BOOL) || \ + ((type) == NPY_OBJECT )) + +#define PyTypeNum_ISFLEXIBLE(type) (((type) >=NPY_STRING) && \ + ((type) <=NPY_VOID)) + +#define PyTypeNum_ISDATETIME(type) (((type) >=NPY_DATETIME) && \ + ((type) <=NPY_TIMEDELTA)) + +#define PyTypeNum_ISUSERDEF(type) (((type) >= NPY_USERDEF) && \ + ((type) < NPY_USERDEF+ \ + NPY_NUMUSERTYPES)) + +#define PyTypeNum_ISEXTENDED(type) (PyTypeNum_ISFLEXIBLE(type) || \ + PyTypeNum_ISUSERDEF(type)) + +#define PyTypeNum_ISOBJECT(type) ((type) == NPY_OBJECT) + + +#define PyDataType_ISBOOL(obj) PyTypeNum_ISBOOL(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISSIGNED(obj) PyTypeNum_ISSIGNED(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISINTEGER(obj) PyTypeNum_ISINTEGER(((PyArray_Descr*)(obj))->type_num ) +#define PyDataType_ISFLOAT(obj) PyTypeNum_ISFLOAT(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISNUMBER(obj) PyTypeNum_ISNUMBER(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISSTRING(obj) PyTypeNum_ISSTRING(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISPYTHON(obj) PyTypeNum_ISPYTHON(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISDATETIME(obj) PyTypeNum_ISDATETIME(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISOBJECT(obj) PyTypeNum_ISOBJECT(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_HASFIELDS(obj) (((PyArray_Descr *)(obj))->names != NULL) +#define PyDataType_HASSUBARRAY(dtype) ((dtype)->subarray != NULL) +#define PyDataType_ISUNSIZED(dtype) ((dtype)->elsize == 0) +#define PyDataType_MAKEUNSIZED(dtype) ((dtype)->elsize = 0) + +#define PyArray_ISBOOL(obj) PyTypeNum_ISBOOL(PyArray_TYPE(obj)) +#define PyArray_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(PyArray_TYPE(obj)) +#define PyArray_ISSIGNED(obj) PyTypeNum_ISSIGNED(PyArray_TYPE(obj)) +#define PyArray_ISINTEGER(obj) PyTypeNum_ISINTEGER(PyArray_TYPE(obj)) +#define PyArray_ISFLOAT(obj) PyTypeNum_ISFLOAT(PyArray_TYPE(obj)) +#define PyArray_ISNUMBER(obj) PyTypeNum_ISNUMBER(PyArray_TYPE(obj)) +#define PyArray_ISSTRING(obj) PyTypeNum_ISSTRING(PyArray_TYPE(obj)) +#define PyArray_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(PyArray_TYPE(obj)) +#define PyArray_ISPYTHON(obj) PyTypeNum_ISPYTHON(PyArray_TYPE(obj)) +#define PyArray_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj)) +#define PyArray_ISDATETIME(obj) PyTypeNum_ISDATETIME(PyArray_TYPE(obj)) +#define PyArray_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(PyArray_TYPE(obj)) +#define PyArray_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(PyArray_TYPE(obj)) +#define PyArray_ISOBJECT(obj) PyTypeNum_ISOBJECT(PyArray_TYPE(obj)) +#define PyArray_HASFIELDS(obj) PyDataType_HASFIELDS(PyArray_DESCR(obj)) + + /* + * FIXME: This should check for a flag on the data-type that + * states whether or not it is variable length. Because the + * ISFLEXIBLE check is hard-coded to the built-in data-types. + */ +#define PyArray_ISVARIABLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj)) + +#define PyArray_SAFEALIGNEDCOPY(obj) (PyArray_ISALIGNED(obj) && !PyArray_ISVARIABLE(obj)) + + +#define NPY_LITTLE '<' +#define NPY_BIG '>' +#define NPY_NATIVE '=' +#define NPY_SWAP 's' +#define NPY_IGNORE '|' + +#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN +#define NPY_NATBYTE NPY_BIG +#define NPY_OPPBYTE NPY_LITTLE +#else +#define NPY_NATBYTE NPY_LITTLE +#define NPY_OPPBYTE NPY_BIG +#endif + +#define PyArray_ISNBO(arg) ((arg) != NPY_OPPBYTE) +#define PyArray_IsNativeByteOrder PyArray_ISNBO +#define PyArray_ISNOTSWAPPED(m) PyArray_ISNBO(PyArray_DESCR(m)->byteorder) +#define PyArray_ISBYTESWAPPED(m) (!PyArray_ISNOTSWAPPED(m)) + +#define PyArray_FLAGSWAP(m, flags) (PyArray_CHKFLAGS(m, flags) && \ + PyArray_ISNOTSWAPPED(m)) + +#define PyArray_ISCARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY) +#define PyArray_ISCARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY_RO) +#define PyArray_ISFARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY) +#define PyArray_ISFARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY_RO) +#define PyArray_ISBEHAVED(m) PyArray_FLAGSWAP(m, NPY_ARRAY_BEHAVED) +#define PyArray_ISBEHAVED_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_ALIGNED) + + +#define PyDataType_ISNOTSWAPPED(d) PyArray_ISNBO(((PyArray_Descr *)(d))->byteorder) +#define PyDataType_ISBYTESWAPPED(d) (!PyDataType_ISNOTSWAPPED(d)) + +/************************************************************ + * A struct used by PyArray_CreateSortedStridePerm, new in 1.7. + ************************************************************/ + +typedef struct { + npy_intp perm, stride; +} npy_stride_sort_item; + +/************************************************************ + * This is the form of the struct that's returned pointed by the + * PyCObject attribute of an array __array_struct__. See + * https://docs.scipy.org/doc/numpy/reference/arrays.interface.html for the full + * documentation. + ************************************************************/ +typedef struct { + int two; /* + * contains the integer 2 as a sanity + * check + */ + + int nd; /* number of dimensions */ + + char typekind; /* + * kind in array --- character code of + * typestr + */ + + int itemsize; /* size of each element */ + + int flags; /* + * how should be data interpreted. Valid + * flags are CONTIGUOUS (1), F_CONTIGUOUS (2), + * ALIGNED (0x100), NOTSWAPPED (0x200), and + * WRITEABLE (0x400). ARR_HAS_DESCR (0x800) + * states that arrdescr field is present in + * structure + */ + + npy_intp *shape; /* + * A length-nd array of shape + * information + */ + + npy_intp *strides; /* A length-nd array of stride information */ + + void *data; /* A pointer to the first element of the array */ + + PyObject *descr; /* + * A list of fields or NULL (ignored if flags + * does not have ARR_HAS_DESCR flag set) + */ +} PyArrayInterface; + +/* + * This is a function for hooking into the PyDataMem_NEW/FREE/RENEW functions. + * See the documentation for PyDataMem_SetEventHook. + */ +typedef void (PyDataMem_EventHookFunc)(void *inp, void *outp, size_t size, + void *user_data); + +/* + * Use the keyword NPY_DEPRECATED_INCLUDES to ensure that the header files + * npy_*_*_deprecated_api.h are only included from here and nowhere else. + */ +#ifdef NPY_DEPRECATED_INCLUDES +#error "Do not use the reserved keyword NPY_DEPRECATED_INCLUDES." +#endif +#define NPY_DEPRECATED_INCLUDES +#if !defined(NPY_NO_DEPRECATED_API) || \ + (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION) +#include "npy_1_7_deprecated_api.h" +#endif +/* + * There is no file npy_1_8_deprecated_api.h since there are no additional + * deprecated API features in NumPy 1.8. + * + * Note to maintainers: insert code like the following in future NumPy + * versions. + * + * #if !defined(NPY_NO_DEPRECATED_API) || \ + * (NPY_NO_DEPRECATED_API < NPY_1_9_API_VERSION) + * #include "npy_1_9_deprecated_api.h" + * #endif + */ +#undef NPY_DEPRECATED_INCLUDES + +#endif /* NPY_ARRAYTYPES_H */ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/noprefix.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/noprefix.h new file mode 100644 index 0000000..041f301 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/noprefix.h @@ -0,0 +1,212 @@ +#ifndef NPY_NOPREFIX_H +#define NPY_NOPREFIX_H + +/* + * You can directly include noprefix.h as a backward + * compatibility measure + */ +#ifndef NPY_NO_PREFIX +#include "ndarrayobject.h" +#include "npy_interrupt.h" +#endif + +#define SIGSETJMP NPY_SIGSETJMP +#define SIGLONGJMP NPY_SIGLONGJMP +#define SIGJMP_BUF NPY_SIGJMP_BUF + +#define MAX_DIMS NPY_MAXDIMS + +#define longlong npy_longlong +#define ulonglong npy_ulonglong +#define Bool npy_bool +#define longdouble npy_longdouble +#define byte npy_byte + +#ifndef _BSD_SOURCE +#define ushort npy_ushort +#define uint npy_uint +#define ulong npy_ulong +#endif + +#define ubyte npy_ubyte +#define ushort npy_ushort +#define uint npy_uint +#define ulong npy_ulong +#define cfloat npy_cfloat +#define cdouble npy_cdouble +#define clongdouble npy_clongdouble +#define Int8 npy_int8 +#define UInt8 npy_uint8 +#define Int16 npy_int16 +#define UInt16 npy_uint16 +#define Int32 npy_int32 +#define UInt32 npy_uint32 +#define Int64 npy_int64 +#define UInt64 npy_uint64 +#define Int128 npy_int128 +#define UInt128 npy_uint128 +#define Int256 npy_int256 +#define UInt256 npy_uint256 +#define Float16 npy_float16 +#define Complex32 npy_complex32 +#define Float32 npy_float32 +#define Complex64 npy_complex64 +#define Float64 npy_float64 +#define Complex128 npy_complex128 +#define Float80 npy_float80 +#define Complex160 npy_complex160 +#define Float96 npy_float96 +#define Complex192 npy_complex192 +#define Float128 npy_float128 +#define Complex256 npy_complex256 +#define intp npy_intp +#define uintp npy_uintp +#define datetime npy_datetime +#define timedelta npy_timedelta + +#define SIZEOF_LONGLONG NPY_SIZEOF_LONGLONG +#define SIZEOF_INTP NPY_SIZEOF_INTP +#define SIZEOF_UINTP NPY_SIZEOF_UINTP +#define SIZEOF_HALF NPY_SIZEOF_HALF +#define SIZEOF_LONGDOUBLE NPY_SIZEOF_LONGDOUBLE +#define SIZEOF_DATETIME NPY_SIZEOF_DATETIME +#define SIZEOF_TIMEDELTA NPY_SIZEOF_TIMEDELTA + +#define LONGLONG_FMT NPY_LONGLONG_FMT +#define ULONGLONG_FMT NPY_ULONGLONG_FMT +#define LONGLONG_SUFFIX NPY_LONGLONG_SUFFIX +#define ULONGLONG_SUFFIX NPY_ULONGLONG_SUFFIX + +#define MAX_INT8 127 +#define MIN_INT8 -128 +#define MAX_UINT8 255 +#define MAX_INT16 32767 +#define MIN_INT16 -32768 +#define MAX_UINT16 65535 +#define MAX_INT32 2147483647 +#define MIN_INT32 (-MAX_INT32 - 1) +#define MAX_UINT32 4294967295U +#define MAX_INT64 LONGLONG_SUFFIX(9223372036854775807) +#define MIN_INT64 (-MAX_INT64 - LONGLONG_SUFFIX(1)) +#define MAX_UINT64 ULONGLONG_SUFFIX(18446744073709551615) +#define MAX_INT128 LONGLONG_SUFFIX(85070591730234615865843651857942052864) +#define MIN_INT128 (-MAX_INT128 - LONGLONG_SUFFIX(1)) +#define MAX_UINT128 ULONGLONG_SUFFIX(170141183460469231731687303715884105728) +#define MAX_INT256 LONGLONG_SUFFIX(57896044618658097711785492504343953926634992332820282019728792003956564819967) +#define MIN_INT256 (-MAX_INT256 - LONGLONG_SUFFIX(1)) +#define MAX_UINT256 ULONGLONG_SUFFIX(115792089237316195423570985008687907853269984665640564039457584007913129639935) + +#define MAX_BYTE NPY_MAX_BYTE +#define MIN_BYTE NPY_MIN_BYTE +#define MAX_UBYTE NPY_MAX_UBYTE +#define MAX_SHORT NPY_MAX_SHORT +#define MIN_SHORT NPY_MIN_SHORT +#define MAX_USHORT NPY_MAX_USHORT +#define MAX_INT NPY_MAX_INT +#define MIN_INT NPY_MIN_INT +#define MAX_UINT NPY_MAX_UINT +#define MAX_LONG NPY_MAX_LONG +#define MIN_LONG NPY_MIN_LONG +#define MAX_ULONG NPY_MAX_ULONG +#define MAX_LONGLONG NPY_MAX_LONGLONG +#define MIN_LONGLONG NPY_MIN_LONGLONG +#define MAX_ULONGLONG NPY_MAX_ULONGLONG +#define MIN_DATETIME NPY_MIN_DATETIME +#define MAX_DATETIME NPY_MAX_DATETIME +#define MIN_TIMEDELTA NPY_MIN_TIMEDELTA +#define MAX_TIMEDELTA NPY_MAX_TIMEDELTA + +#define BITSOF_BOOL NPY_BITSOF_BOOL +#define BITSOF_CHAR NPY_BITSOF_CHAR +#define BITSOF_SHORT NPY_BITSOF_SHORT +#define BITSOF_INT NPY_BITSOF_INT +#define BITSOF_LONG NPY_BITSOF_LONG +#define BITSOF_LONGLONG NPY_BITSOF_LONGLONG +#define BITSOF_HALF NPY_BITSOF_HALF +#define BITSOF_FLOAT NPY_BITSOF_FLOAT +#define BITSOF_DOUBLE NPY_BITSOF_DOUBLE +#define BITSOF_LONGDOUBLE NPY_BITSOF_LONGDOUBLE +#define BITSOF_DATETIME NPY_BITSOF_DATETIME +#define BITSOF_TIMEDELTA NPY_BITSOF_TIMEDELTA + +#define _pya_malloc PyArray_malloc +#define _pya_free PyArray_free +#define _pya_realloc PyArray_realloc + +#define BEGIN_THREADS_DEF NPY_BEGIN_THREADS_DEF +#define BEGIN_THREADS NPY_BEGIN_THREADS +#define END_THREADS NPY_END_THREADS +#define ALLOW_C_API_DEF NPY_ALLOW_C_API_DEF +#define ALLOW_C_API NPY_ALLOW_C_API +#define DISABLE_C_API NPY_DISABLE_C_API + +#define PY_FAIL NPY_FAIL +#define PY_SUCCEED NPY_SUCCEED + +#ifndef TRUE +#define TRUE NPY_TRUE +#endif + +#ifndef FALSE +#define FALSE NPY_FALSE +#endif + +#define LONGDOUBLE_FMT NPY_LONGDOUBLE_FMT + +#define CONTIGUOUS NPY_CONTIGUOUS +#define C_CONTIGUOUS NPY_C_CONTIGUOUS +#define FORTRAN NPY_FORTRAN +#define F_CONTIGUOUS NPY_F_CONTIGUOUS +#define OWNDATA NPY_OWNDATA +#define FORCECAST NPY_FORCECAST +#define ENSURECOPY NPY_ENSURECOPY +#define ENSUREARRAY NPY_ENSUREARRAY +#define ELEMENTSTRIDES NPY_ELEMENTSTRIDES +#define ALIGNED NPY_ALIGNED +#define NOTSWAPPED NPY_NOTSWAPPED +#define WRITEABLE NPY_WRITEABLE +#define UPDATEIFCOPY NPY_UPDATEIFCOPY +#define WRITEBACKIFCOPY NPY_ARRAY_WRITEBACKIFCOPY +#define ARR_HAS_DESCR NPY_ARR_HAS_DESCR +#define BEHAVED NPY_BEHAVED +#define BEHAVED_NS NPY_BEHAVED_NS +#define CARRAY NPY_CARRAY +#define CARRAY_RO NPY_CARRAY_RO +#define FARRAY NPY_FARRAY +#define FARRAY_RO NPY_FARRAY_RO +#define DEFAULT NPY_DEFAULT +#define IN_ARRAY NPY_IN_ARRAY +#define OUT_ARRAY NPY_OUT_ARRAY +#define INOUT_ARRAY NPY_INOUT_ARRAY +#define IN_FARRAY NPY_IN_FARRAY +#define OUT_FARRAY NPY_OUT_FARRAY +#define INOUT_FARRAY NPY_INOUT_FARRAY +#define UPDATE_ALL NPY_UPDATE_ALL + +#define OWN_DATA NPY_OWNDATA +#define BEHAVED_FLAGS NPY_BEHAVED +#define BEHAVED_FLAGS_NS NPY_BEHAVED_NS +#define CARRAY_FLAGS_RO NPY_CARRAY_RO +#define CARRAY_FLAGS NPY_CARRAY +#define FARRAY_FLAGS NPY_FARRAY +#define FARRAY_FLAGS_RO NPY_FARRAY_RO +#define DEFAULT_FLAGS NPY_DEFAULT +#define UPDATE_ALL_FLAGS NPY_UPDATE_ALL_FLAGS + +#ifndef MIN +#define MIN PyArray_MIN +#endif +#ifndef MAX +#define MAX PyArray_MAX +#endif +#define MAX_INTP NPY_MAX_INTP +#define MIN_INTP NPY_MIN_INTP +#define MAX_UINTP NPY_MAX_UINTP +#define INTP_FMT NPY_INTP_FMT + +#ifndef PYPY_VERSION +#define REFCOUNT PyArray_REFCOUNT +#define MAX_ELSIZE NPY_MAX_ELSIZE +#endif + +#endif diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_1_7_deprecated_api.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_1_7_deprecated_api.h new file mode 100644 index 0000000..a6ee212 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_1_7_deprecated_api.h @@ -0,0 +1,133 @@ +#ifndef _NPY_1_7_DEPRECATED_API_H +#define _NPY_1_7_DEPRECATED_API_H + +#ifndef NPY_DEPRECATED_INCLUDES +#error "Should never include npy_*_*_deprecated_api directly." +#endif + +/* Emit a warning if the user did not specifically request the old API */ +#ifndef NPY_NO_DEPRECATED_API +#if defined(_WIN32) +#define _WARN___STR2__(x) #x +#define _WARN___STR1__(x) _WARN___STR2__(x) +#define _WARN___LOC__ __FILE__ "(" _WARN___STR1__(__LINE__) ") : Warning Msg: " +#pragma message(_WARN___LOC__"Using deprecated NumPy API, disable it with " \ + "#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION") +#elif defined(__GNUC__) +#warning "Using deprecated NumPy API, disable it with " \ + "#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION" +#endif +/* TODO: How to do this warning message for other compilers? */ +#endif + +/* + * This header exists to collect all dangerous/deprecated NumPy API + * as of NumPy 1.7. + * + * This is an attempt to remove bad API, the proliferation of macros, + * and namespace pollution currently produced by the NumPy headers. + */ + +/* These array flags are deprecated as of NumPy 1.7 */ +#define NPY_CONTIGUOUS NPY_ARRAY_C_CONTIGUOUS +#define NPY_FORTRAN NPY_ARRAY_F_CONTIGUOUS + +/* + * The consistent NPY_ARRAY_* names which don't pollute the NPY_* + * namespace were added in NumPy 1.7. + * + * These versions of the carray flags are deprecated, but + * probably should only be removed after two releases instead of one. + */ +#define NPY_C_CONTIGUOUS NPY_ARRAY_C_CONTIGUOUS +#define NPY_F_CONTIGUOUS NPY_ARRAY_F_CONTIGUOUS +#define NPY_OWNDATA NPY_ARRAY_OWNDATA +#define NPY_FORCECAST NPY_ARRAY_FORCECAST +#define NPY_ENSURECOPY NPY_ARRAY_ENSURECOPY +#define NPY_ENSUREARRAY NPY_ARRAY_ENSUREARRAY +#define NPY_ELEMENTSTRIDES NPY_ARRAY_ELEMENTSTRIDES +#define NPY_ALIGNED NPY_ARRAY_ALIGNED +#define NPY_NOTSWAPPED NPY_ARRAY_NOTSWAPPED +#define NPY_WRITEABLE NPY_ARRAY_WRITEABLE +#define NPY_UPDATEIFCOPY NPY_ARRAY_UPDATEIFCOPY +#define NPY_BEHAVED NPY_ARRAY_BEHAVED +#define NPY_BEHAVED_NS NPY_ARRAY_BEHAVED_NS +#define NPY_CARRAY NPY_ARRAY_CARRAY +#define NPY_CARRAY_RO NPY_ARRAY_CARRAY_RO +#define NPY_FARRAY NPY_ARRAY_FARRAY +#define NPY_FARRAY_RO NPY_ARRAY_FARRAY_RO +#define NPY_DEFAULT NPY_ARRAY_DEFAULT +#define NPY_IN_ARRAY NPY_ARRAY_IN_ARRAY +#define NPY_OUT_ARRAY NPY_ARRAY_OUT_ARRAY +#define NPY_INOUT_ARRAY NPY_ARRAY_INOUT_ARRAY +#define NPY_IN_FARRAY NPY_ARRAY_IN_FARRAY +#define NPY_OUT_FARRAY NPY_ARRAY_OUT_FARRAY +#define NPY_INOUT_FARRAY NPY_ARRAY_INOUT_FARRAY +#define NPY_UPDATE_ALL NPY_ARRAY_UPDATE_ALL + +/* This way of accessing the default type is deprecated as of NumPy 1.7 */ +#define PyArray_DEFAULT NPY_DEFAULT_TYPE + +/* These DATETIME bits aren't used internally */ +#if PY_VERSION_HEX >= 0x03000000 +#define PyDataType_GetDatetimeMetaData(descr) \ + ((descr->metadata == NULL) ? NULL : \ + ((PyArray_DatetimeMetaData *)(PyCapsule_GetPointer( \ + PyDict_GetItemString( \ + descr->metadata, NPY_METADATA_DTSTR), NULL)))) +#else +#define PyDataType_GetDatetimeMetaData(descr) \ + ((descr->metadata == NULL) ? NULL : \ + ((PyArray_DatetimeMetaData *)(PyCObject_AsVoidPtr( \ + PyDict_GetItemString(descr->metadata, NPY_METADATA_DTSTR))))) +#endif + +/* + * Deprecated as of NumPy 1.7, this kind of shortcut doesn't + * belong in the public API. + */ +#define NPY_AO PyArrayObject + +/* + * Deprecated as of NumPy 1.7, an all-lowercase macro doesn't + * belong in the public API. + */ +#define fortran fortran_ + +/* + * Deprecated as of NumPy 1.7, as it is a namespace-polluting + * macro. + */ +#define FORTRAN_IF PyArray_FORTRAN_IF + +/* Deprecated as of NumPy 1.7, datetime64 uses c_metadata instead */ +#define NPY_METADATA_DTSTR "__timeunit__" + +/* + * Deprecated as of NumPy 1.7. + * The reasoning: + * - These are for datetime, but there's no datetime "namespace". + * - They just turn NPY_STR_ into "", which is just + * making something simple be indirected. + */ +#define NPY_STR_Y "Y" +#define NPY_STR_M "M" +#define NPY_STR_W "W" +#define NPY_STR_D "D" +#define NPY_STR_h "h" +#define NPY_STR_m "m" +#define NPY_STR_s "s" +#define NPY_STR_ms "ms" +#define NPY_STR_us "us" +#define NPY_STR_ns "ns" +#define NPY_STR_ps "ps" +#define NPY_STR_fs "fs" +#define NPY_STR_as "as" + +/* + * The macros in old_defines.h are Deprecated as of NumPy 1.7 and will be + * removed in the next major release. + */ +#include "old_defines.h" + +#endif diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_3kcompat.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_3kcompat.h new file mode 100644 index 0000000..832bc05 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_3kcompat.h @@ -0,0 +1,577 @@ +/* + * This is a convenience header file providing compatibility utilities + * for supporting Python 2 and Python 3 in the same code base. + * + * If you want to use this for your own projects, it's recommended to make a + * copy of it. Although the stuff below is unlikely to change, we don't provide + * strong backwards compatibility guarantees at the moment. + */ + +#ifndef _NPY_3KCOMPAT_H_ +#define _NPY_3KCOMPAT_H_ + +#include +#include + +#if PY_VERSION_HEX >= 0x03000000 +#ifndef NPY_PY3K +#define NPY_PY3K 1 +#endif +#endif + +#include "numpy/npy_common.h" +#include "numpy/ndarrayobject.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * PyInt -> PyLong + */ + +#if defined(NPY_PY3K) +/* Return True only if the long fits in a C long */ +static NPY_INLINE int PyInt_Check(PyObject *op) { + int overflow = 0; + if (!PyLong_Check(op)) { + return 0; + } + PyLong_AsLongAndOverflow(op, &overflow); + return (overflow == 0); +} + +#define PyInt_FromLong PyLong_FromLong +#define PyInt_AsLong PyLong_AsLong +#define PyInt_AS_LONG PyLong_AsLong +#define PyInt_AsSsize_t PyLong_AsSsize_t + +/* NOTE: + * + * Since the PyLong type is very different from the fixed-range PyInt, + * we don't define PyInt_Type -> PyLong_Type. + */ +#endif /* NPY_PY3K */ + +/* Py3 changes PySlice_GetIndicesEx' first argument's type to PyObject* */ +#ifdef NPY_PY3K +# define NpySlice_GetIndicesEx PySlice_GetIndicesEx +#else +# define NpySlice_GetIndicesEx(op, nop, start, end, step, slicelength) \ + PySlice_GetIndicesEx((PySliceObject *)op, nop, start, end, step, slicelength) +#endif + +/* <2.7.11 and <3.4.4 have the wrong argument type for Py_EnterRecursiveCall */ +#if (PY_VERSION_HEX < 0x02070B00) || \ + ((0x03000000 <= PY_VERSION_HEX) && (PY_VERSION_HEX < 0x03040400)) + #define Npy_EnterRecursiveCall(x) Py_EnterRecursiveCall((char *)(x)) +#else + #define Npy_EnterRecursiveCall(x) Py_EnterRecursiveCall(x) +#endif + +/* Py_SETREF was added in 3.5.2, and only if Py_LIMITED_API is absent */ +#if PY_VERSION_HEX < 0x03050200 + #define Py_SETREF(op, op2) \ + do { \ + PyObject *_py_tmp = (PyObject *)(op); \ + (op) = (op2); \ + Py_DECREF(_py_tmp); \ + } while (0) +#endif + +/* + * PyString -> PyBytes + */ + +#if defined(NPY_PY3K) + +#define PyString_Type PyBytes_Type +#define PyString_Check PyBytes_Check +#define PyStringObject PyBytesObject +#define PyString_FromString PyBytes_FromString +#define PyString_FromStringAndSize PyBytes_FromStringAndSize +#define PyString_AS_STRING PyBytes_AS_STRING +#define PyString_AsStringAndSize PyBytes_AsStringAndSize +#define PyString_FromFormat PyBytes_FromFormat +#define PyString_Concat PyBytes_Concat +#define PyString_ConcatAndDel PyBytes_ConcatAndDel +#define PyString_AsString PyBytes_AsString +#define PyString_GET_SIZE PyBytes_GET_SIZE +#define PyString_Size PyBytes_Size + +#define PyUString_Type PyUnicode_Type +#define PyUString_Check PyUnicode_Check +#define PyUStringObject PyUnicodeObject +#define PyUString_FromString PyUnicode_FromString +#define PyUString_FromStringAndSize PyUnicode_FromStringAndSize +#define PyUString_FromFormat PyUnicode_FromFormat +#define PyUString_Concat PyUnicode_Concat2 +#define PyUString_ConcatAndDel PyUnicode_ConcatAndDel +#define PyUString_GET_SIZE PyUnicode_GET_SIZE +#define PyUString_Size PyUnicode_Size +#define PyUString_InternFromString PyUnicode_InternFromString +#define PyUString_Format PyUnicode_Format + +#define PyBaseString_Check(obj) (PyUnicode_Check(obj)) + +#else + +#define PyBytes_Type PyString_Type +#define PyBytes_Check PyString_Check +#define PyBytesObject PyStringObject +#define PyBytes_FromString PyString_FromString +#define PyBytes_FromStringAndSize PyString_FromStringAndSize +#define PyBytes_AS_STRING PyString_AS_STRING +#define PyBytes_AsStringAndSize PyString_AsStringAndSize +#define PyBytes_FromFormat PyString_FromFormat +#define PyBytes_Concat PyString_Concat +#define PyBytes_ConcatAndDel PyString_ConcatAndDel +#define PyBytes_AsString PyString_AsString +#define PyBytes_GET_SIZE PyString_GET_SIZE +#define PyBytes_Size PyString_Size + +#define PyUString_Type PyString_Type +#define PyUString_Check PyString_Check +#define PyUStringObject PyStringObject +#define PyUString_FromString PyString_FromString +#define PyUString_FromStringAndSize PyString_FromStringAndSize +#define PyUString_FromFormat PyString_FromFormat +#define PyUString_Concat PyString_Concat +#define PyUString_ConcatAndDel PyString_ConcatAndDel +#define PyUString_GET_SIZE PyString_GET_SIZE +#define PyUString_Size PyString_Size +#define PyUString_InternFromString PyString_InternFromString +#define PyUString_Format PyString_Format + +#define PyBaseString_Check(obj) (PyBytes_Check(obj) || PyUnicode_Check(obj)) + +#endif /* NPY_PY3K */ + + +static NPY_INLINE void +PyUnicode_ConcatAndDel(PyObject **left, PyObject *right) +{ + Py_SETREF(*left, PyUnicode_Concat(*left, right)); + Py_DECREF(right); +} + +static NPY_INLINE void +PyUnicode_Concat2(PyObject **left, PyObject *right) +{ + Py_SETREF(*left, PyUnicode_Concat(*left, right)); +} + +/* + * PyFile_* compatibility + */ + +/* + * Get a FILE* handle to the file represented by the Python object + */ +static NPY_INLINE FILE* +npy_PyFile_Dup2(PyObject *file, char *mode, npy_off_t *orig_pos) +{ + int fd, fd2, unbuf; + PyObject *ret, *os, *io, *io_raw; + npy_off_t pos; + FILE *handle; + + /* For Python 2 PyFileObject, use PyFile_AsFile */ +#if !defined(NPY_PY3K) + if (PyFile_Check(file)) { + return PyFile_AsFile(file); + } +#endif + + /* Flush first to ensure things end up in the file in the correct order */ + ret = PyObject_CallMethod(file, "flush", ""); + if (ret == NULL) { + return NULL; + } + Py_DECREF(ret); + fd = PyObject_AsFileDescriptor(file); + if (fd == -1) { + return NULL; + } + + /* + * The handle needs to be dup'd because we have to call fclose + * at the end + */ + os = PyImport_ImportModule("os"); + if (os == NULL) { + return NULL; + } + ret = PyObject_CallMethod(os, "dup", "i", fd); + Py_DECREF(os); + if (ret == NULL) { + return NULL; + } + fd2 = PyNumber_AsSsize_t(ret, NULL); + Py_DECREF(ret); + + /* Convert to FILE* handle */ +#ifdef _WIN32 + handle = _fdopen(fd2, mode); +#else + handle = fdopen(fd2, mode); +#endif + if (handle == NULL) { + PyErr_SetString(PyExc_IOError, + "Getting a FILE* from a Python file object failed"); + return NULL; + } + + /* Record the original raw file handle position */ + *orig_pos = npy_ftell(handle); + if (*orig_pos == -1) { + /* The io module is needed to determine if buffering is used */ + io = PyImport_ImportModule("io"); + if (io == NULL) { + fclose(handle); + return NULL; + } + /* File object instances of RawIOBase are unbuffered */ + io_raw = PyObject_GetAttrString(io, "RawIOBase"); + Py_DECREF(io); + if (io_raw == NULL) { + fclose(handle); + return NULL; + } + unbuf = PyObject_IsInstance(file, io_raw); + Py_DECREF(io_raw); + if (unbuf == 1) { + /* Succeed if the IO is unbuffered */ + return handle; + } + else { + PyErr_SetString(PyExc_IOError, "obtaining file position failed"); + fclose(handle); + return NULL; + } + } + + /* Seek raw handle to the Python-side position */ + ret = PyObject_CallMethod(file, "tell", ""); + if (ret == NULL) { + fclose(handle); + return NULL; + } + pos = PyLong_AsLongLong(ret); + Py_DECREF(ret); + if (PyErr_Occurred()) { + fclose(handle); + return NULL; + } + if (npy_fseek(handle, pos, SEEK_SET) == -1) { + PyErr_SetString(PyExc_IOError, "seeking file failed"); + fclose(handle); + return NULL; + } + return handle; +} + +/* + * Close the dup-ed file handle, and seek the Python one to the current position + */ +static NPY_INLINE int +npy_PyFile_DupClose2(PyObject *file, FILE* handle, npy_off_t orig_pos) +{ + int fd, unbuf; + PyObject *ret, *io, *io_raw; + npy_off_t position; + + /* For Python 2 PyFileObject, do nothing */ +#if !defined(NPY_PY3K) + if (PyFile_Check(file)) { + return 0; + } +#endif + + position = npy_ftell(handle); + + /* Close the FILE* handle */ + fclose(handle); + + /* + * Restore original file handle position, in order to not confuse + * Python-side data structures + */ + fd = PyObject_AsFileDescriptor(file); + if (fd == -1) { + return -1; + } + + if (npy_lseek(fd, orig_pos, SEEK_SET) == -1) { + + /* The io module is needed to determine if buffering is used */ + io = PyImport_ImportModule("io"); + if (io == NULL) { + return -1; + } + /* File object instances of RawIOBase are unbuffered */ + io_raw = PyObject_GetAttrString(io, "RawIOBase"); + Py_DECREF(io); + if (io_raw == NULL) { + return -1; + } + unbuf = PyObject_IsInstance(file, io_raw); + Py_DECREF(io_raw); + if (unbuf == 1) { + /* Succeed if the IO is unbuffered */ + return 0; + } + else { + PyErr_SetString(PyExc_IOError, "seeking file failed"); + return -1; + } + } + + if (position == -1) { + PyErr_SetString(PyExc_IOError, "obtaining file position failed"); + return -1; + } + + /* Seek Python-side handle to the FILE* handle position */ + ret = PyObject_CallMethod(file, "seek", NPY_OFF_T_PYFMT "i", position, 0); + if (ret == NULL) { + return -1; + } + Py_DECREF(ret); + return 0; +} + +static NPY_INLINE int +npy_PyFile_Check(PyObject *file) +{ + int fd; + /* For Python 2, check if it is a PyFileObject */ +#if !defined(NPY_PY3K) + if (PyFile_Check(file)) { + return 1; + } +#endif + fd = PyObject_AsFileDescriptor(file); + if (fd == -1) { + PyErr_Clear(); + return 0; + } + return 1; +} + +static NPY_INLINE PyObject* +npy_PyFile_OpenFile(PyObject *filename, const char *mode) +{ + PyObject *open; + open = PyDict_GetItemString(PyEval_GetBuiltins(), "open"); + if (open == NULL) { + return NULL; + } + return PyObject_CallFunction(open, "Os", filename, mode); +} + +static NPY_INLINE int +npy_PyFile_CloseFile(PyObject *file) +{ + PyObject *ret; + + ret = PyObject_CallMethod(file, "close", NULL); + if (ret == NULL) { + return -1; + } + Py_DECREF(ret); + return 0; +} + + +/* This is a copy of _PyErr_ChainExceptions + */ +static NPY_INLINE void +npy_PyErr_ChainExceptions(PyObject *exc, PyObject *val, PyObject *tb) +{ + if (exc == NULL) + return; + + if (PyErr_Occurred()) { + /* only py3 supports this anyway */ + #ifdef NPY_PY3K + PyObject *exc2, *val2, *tb2; + PyErr_Fetch(&exc2, &val2, &tb2); + PyErr_NormalizeException(&exc, &val, &tb); + if (tb != NULL) { + PyException_SetTraceback(val, tb); + Py_DECREF(tb); + } + Py_DECREF(exc); + PyErr_NormalizeException(&exc2, &val2, &tb2); + PyException_SetContext(val2, val); + PyErr_Restore(exc2, val2, tb2); + #endif + } + else { + PyErr_Restore(exc, val, tb); + } +} + + +/* This is a copy of _PyErr_ChainExceptions, with: + * - a minimal implementation for python 2 + * - __cause__ used instead of __context__ + */ +static NPY_INLINE void +npy_PyErr_ChainExceptionsCause(PyObject *exc, PyObject *val, PyObject *tb) +{ + if (exc == NULL) + return; + + if (PyErr_Occurred()) { + /* only py3 supports this anyway */ + #ifdef NPY_PY3K + PyObject *exc2, *val2, *tb2; + PyErr_Fetch(&exc2, &val2, &tb2); + PyErr_NormalizeException(&exc, &val, &tb); + if (tb != NULL) { + PyException_SetTraceback(val, tb); + Py_DECREF(tb); + } + Py_DECREF(exc); + PyErr_NormalizeException(&exc2, &val2, &tb2); + PyException_SetCause(val2, val); + PyErr_Restore(exc2, val2, tb2); + #endif + } + else { + PyErr_Restore(exc, val, tb); + } +} + +/* + * PyObject_Cmp + */ +#if defined(NPY_PY3K) +static NPY_INLINE int +PyObject_Cmp(PyObject *i1, PyObject *i2, int *cmp) +{ + int v; + v = PyObject_RichCompareBool(i1, i2, Py_LT); + if (v == 1) { + *cmp = -1; + return 1; + } + else if (v == -1) { + return -1; + } + + v = PyObject_RichCompareBool(i1, i2, Py_GT); + if (v == 1) { + *cmp = 1; + return 1; + } + else if (v == -1) { + return -1; + } + + v = PyObject_RichCompareBool(i1, i2, Py_EQ); + if (v == 1) { + *cmp = 0; + return 1; + } + else { + *cmp = 0; + return -1; + } +} +#endif + +/* + * PyCObject functions adapted to PyCapsules. + * + * The main job here is to get rid of the improved error handling + * of PyCapsules. It's a shame... + */ +#if PY_VERSION_HEX >= 0x03000000 + +static NPY_INLINE PyObject * +NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *)) +{ + PyObject *ret = PyCapsule_New(ptr, NULL, dtor); + if (ret == NULL) { + PyErr_Clear(); + } + return ret; +} + +static NPY_INLINE PyObject * +NpyCapsule_FromVoidPtrAndDesc(void *ptr, void* context, void (*dtor)(PyObject *)) +{ + PyObject *ret = NpyCapsule_FromVoidPtr(ptr, dtor); + if (ret != NULL && PyCapsule_SetContext(ret, context) != 0) { + PyErr_Clear(); + Py_DECREF(ret); + ret = NULL; + } + return ret; +} + +static NPY_INLINE void * +NpyCapsule_AsVoidPtr(PyObject *obj) +{ + void *ret = PyCapsule_GetPointer(obj, NULL); + if (ret == NULL) { + PyErr_Clear(); + } + return ret; +} + +static NPY_INLINE void * +NpyCapsule_GetDesc(PyObject *obj) +{ + return PyCapsule_GetContext(obj); +} + +static NPY_INLINE int +NpyCapsule_Check(PyObject *ptr) +{ + return PyCapsule_CheckExact(ptr); +} + +#else + +static NPY_INLINE PyObject * +NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(void *)) +{ + return PyCObject_FromVoidPtr(ptr, dtor); +} + +static NPY_INLINE PyObject * +NpyCapsule_FromVoidPtrAndDesc(void *ptr, void* context, + void (*dtor)(void *, void *)) +{ + return PyCObject_FromVoidPtrAndDesc(ptr, context, dtor); +} + +static NPY_INLINE void * +NpyCapsule_AsVoidPtr(PyObject *ptr) +{ + return PyCObject_AsVoidPtr(ptr); +} + +static NPY_INLINE void * +NpyCapsule_GetDesc(PyObject *obj) +{ + return PyCObject_GetDesc(obj); +} + +static NPY_INLINE int +NpyCapsule_Check(PyObject *ptr) +{ + return PyCObject_Check(ptr); +} + +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* _NPY_3KCOMPAT_H_ */ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_common.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_common.h new file mode 100644 index 0000000..64aaaac --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_common.h @@ -0,0 +1,1083 @@ +#ifndef _NPY_COMMON_H_ +#define _NPY_COMMON_H_ + +/* numpconfig.h is auto-generated */ +#include "numpyconfig.h" +#ifdef HAVE_NPY_CONFIG_H +#include +#endif + +/* need Python.h for npy_intp, npy_uintp */ +#include + +/* + * using static inline modifiers when defining npy_math functions + * allows the compiler to make optimizations when possible + */ +#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD +#ifndef NPY_INLINE_MATH +#define NPY_INLINE_MATH 1 +#endif +#endif + +/* + * gcc does not unroll even with -O3 + * use with care, unrolling on modern cpus rarely speeds things up + */ +#ifdef HAVE_ATTRIBUTE_OPTIMIZE_UNROLL_LOOPS +#define NPY_GCC_UNROLL_LOOPS \ + __attribute__((optimize("unroll-loops"))) +#else +#define NPY_GCC_UNROLL_LOOPS +#endif + +/* highest gcc optimization level, enabled autovectorizer */ +#ifdef HAVE_ATTRIBUTE_OPTIMIZE_OPT_3 +#define NPY_GCC_OPT_3 __attribute__((optimize("O3"))) +#else +#define NPY_GCC_OPT_3 +#endif + +/* compile target attributes */ +#if defined HAVE_ATTRIBUTE_TARGET_AVX && defined HAVE_LINK_AVX +#define NPY_GCC_TARGET_AVX __attribute__((target("avx"))) +#else +#define NPY_GCC_TARGET_AVX +#endif +#if defined HAVE_ATTRIBUTE_TARGET_AVX2 && defined HAVE_LINK_AVX2 +#define NPY_GCC_TARGET_AVX2 __attribute__((target("avx2"))) +#else +#define NPY_GCC_TARGET_AVX2 +#endif + +/* + * mark an argument (starting from 1) that must not be NULL and is not checked + * DO NOT USE IF FUNCTION CHECKS FOR NULL!! the compiler will remove the check + */ +#ifdef HAVE_ATTRIBUTE_NONNULL +#define NPY_GCC_NONNULL(n) __attribute__((nonnull(n))) +#else +#define NPY_GCC_NONNULL(n) +#endif + +#if defined HAVE_XMMINTRIN_H && defined HAVE__MM_LOAD_PS +#define NPY_HAVE_SSE_INTRINSICS +#endif + +#if defined HAVE_EMMINTRIN_H && defined HAVE__MM_LOAD_PD +#define NPY_HAVE_SSE2_INTRINSICS +#endif + +/* + * give a hint to the compiler which branch is more likely or unlikely + * to occur, e.g. rare error cases: + * + * if (NPY_UNLIKELY(failure == 0)) + * return NULL; + * + * the double !! is to cast the expression (e.g. NULL) to a boolean required by + * the intrinsic + */ +#ifdef HAVE___BUILTIN_EXPECT +#define NPY_LIKELY(x) __builtin_expect(!!(x), 1) +#define NPY_UNLIKELY(x) __builtin_expect(!!(x), 0) +#else +#define NPY_LIKELY(x) (x) +#define NPY_UNLIKELY(x) (x) +#endif + +#ifdef HAVE___BUILTIN_PREFETCH +/* unlike _mm_prefetch also works on non-x86 */ +#define NPY_PREFETCH(x, rw, loc) __builtin_prefetch((x), (rw), (loc)) +#else +#ifdef HAVE__MM_PREFETCH +/* _MM_HINT_ET[01] (rw = 1) unsupported, only available in gcc >= 4.9 */ +#define NPY_PREFETCH(x, rw, loc) _mm_prefetch((x), loc == 0 ? _MM_HINT_NTA : \ + (loc == 1 ? _MM_HINT_T2 : \ + (loc == 2 ? _MM_HINT_T1 : \ + (loc == 3 ? _MM_HINT_T0 : -1)))) +#else +#define NPY_PREFETCH(x, rw,loc) +#endif +#endif + +#if defined(_MSC_VER) + #define NPY_INLINE __inline +#elif defined(__GNUC__) + #if defined(__STRICT_ANSI__) + #define NPY_INLINE __inline__ + #else + #define NPY_INLINE inline + #endif +#else + #define NPY_INLINE +#endif + +#ifdef HAVE___THREAD + #define NPY_TLS __thread +#else + #ifdef HAVE___DECLSPEC_THREAD_ + #define NPY_TLS __declspec(thread) + #else + #define NPY_TLS + #endif +#endif + +#ifdef WITH_CPYCHECKER_RETURNS_BORROWED_REF_ATTRIBUTE + #define NPY_RETURNS_BORROWED_REF \ + __attribute__((cpychecker_returns_borrowed_ref)) +#else + #define NPY_RETURNS_BORROWED_REF +#endif + +#ifdef WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE + #define NPY_STEALS_REF_TO_ARG(n) \ + __attribute__((cpychecker_steals_reference_to_arg(n))) +#else + #define NPY_STEALS_REF_TO_ARG(n) +#endif + +/* 64 bit file position support, also on win-amd64. Ticket #1660 */ +#if defined(_MSC_VER) && defined(_WIN64) && (_MSC_VER > 1400) || \ + defined(__MINGW32__) || defined(__MINGW64__) + #include + +/* mingw based on 3.4.5 has lseek but not ftell/fseek */ +#if defined(__MINGW32__) || defined(__MINGW64__) +extern int __cdecl _fseeki64(FILE *, long long, int); +extern long long __cdecl _ftelli64(FILE *); +#endif + + #define npy_fseek _fseeki64 + #define npy_ftell _ftelli64 + #define npy_lseek _lseeki64 + #define npy_off_t npy_int64 + + #if NPY_SIZEOF_INT == 8 + #define NPY_OFF_T_PYFMT "i" + #elif NPY_SIZEOF_LONG == 8 + #define NPY_OFF_T_PYFMT "l" + #elif NPY_SIZEOF_LONGLONG == 8 + #define NPY_OFF_T_PYFMT "L" + #else + #error Unsupported size for type off_t + #endif +#else +#ifdef HAVE_FSEEKO + #define npy_fseek fseeko +#else + #define npy_fseek fseek +#endif +#ifdef HAVE_FTELLO + #define npy_ftell ftello +#else + #define npy_ftell ftell +#endif + #include + #define npy_lseek lseek + #define npy_off_t off_t + + #if NPY_SIZEOF_OFF_T == NPY_SIZEOF_SHORT + #define NPY_OFF_T_PYFMT "h" + #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_INT + #define NPY_OFF_T_PYFMT "i" + #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_LONG + #define NPY_OFF_T_PYFMT "l" + #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_LONGLONG + #define NPY_OFF_T_PYFMT "L" + #else + #error Unsupported size for type off_t + #endif +#endif + +/* enums for detected endianness */ +enum { + NPY_CPU_UNKNOWN_ENDIAN, + NPY_CPU_LITTLE, + NPY_CPU_BIG +}; + +/* + * This is to typedef npy_intp to the appropriate pointer size for this + * platform. Py_intptr_t, Py_uintptr_t are defined in pyport.h. + */ +typedef Py_intptr_t npy_intp; +typedef Py_uintptr_t npy_uintp; + +/* + * Define sizes that were not defined in numpyconfig.h. + */ +#define NPY_SIZEOF_CHAR 1 +#define NPY_SIZEOF_BYTE 1 +#define NPY_SIZEOF_DATETIME 8 +#define NPY_SIZEOF_TIMEDELTA 8 +#define NPY_SIZEOF_INTP NPY_SIZEOF_PY_INTPTR_T +#define NPY_SIZEOF_UINTP NPY_SIZEOF_PY_INTPTR_T +#define NPY_SIZEOF_HALF 2 +#define NPY_SIZEOF_CFLOAT NPY_SIZEOF_COMPLEX_FLOAT +#define NPY_SIZEOF_CDOUBLE NPY_SIZEOF_COMPLEX_DOUBLE +#define NPY_SIZEOF_CLONGDOUBLE NPY_SIZEOF_COMPLEX_LONGDOUBLE + +#ifdef constchar +#undef constchar +#endif + +#define NPY_SSIZE_T_PYFMT "n" +#define constchar char + +/* NPY_INTP_FMT Note: + * Unlike the other NPY_*_FMT macros which are used with + * PyOS_snprintf, NPY_INTP_FMT is used with PyErr_Format and + * PyString_Format. These functions use different formatting + * codes which are portably specified according to the Python + * documentation. See ticket #1795. + */ +#if NPY_SIZEOF_PY_INTPTR_T == NPY_SIZEOF_INT + #define NPY_INTP NPY_INT + #define NPY_UINTP NPY_UINT + #define PyIntpArrType_Type PyIntArrType_Type + #define PyUIntpArrType_Type PyUIntArrType_Type + #define NPY_MAX_INTP NPY_MAX_INT + #define NPY_MIN_INTP NPY_MIN_INT + #define NPY_MAX_UINTP NPY_MAX_UINT + #define NPY_INTP_FMT "d" +#elif NPY_SIZEOF_PY_INTPTR_T == NPY_SIZEOF_LONG + #define NPY_INTP NPY_LONG + #define NPY_UINTP NPY_ULONG + #define PyIntpArrType_Type PyLongArrType_Type + #define PyUIntpArrType_Type PyULongArrType_Type + #define NPY_MAX_INTP NPY_MAX_LONG + #define NPY_MIN_INTP NPY_MIN_LONG + #define NPY_MAX_UINTP NPY_MAX_ULONG + #define NPY_INTP_FMT "ld" +#elif defined(PY_LONG_LONG) && (NPY_SIZEOF_PY_INTPTR_T == NPY_SIZEOF_LONGLONG) + #define NPY_INTP NPY_LONGLONG + #define NPY_UINTP NPY_ULONGLONG + #define PyIntpArrType_Type PyLongLongArrType_Type + #define PyUIntpArrType_Type PyULongLongArrType_Type + #define NPY_MAX_INTP NPY_MAX_LONGLONG + #define NPY_MIN_INTP NPY_MIN_LONGLONG + #define NPY_MAX_UINTP NPY_MAX_ULONGLONG + #define NPY_INTP_FMT "lld" +#endif + +/* + * We can only use C99 formats for npy_int_p if it is the same as + * intp_t, hence the condition on HAVE_UNITPTR_T + */ +#if (NPY_USE_C99_FORMATS) == 1 \ + && (defined HAVE_UINTPTR_T) \ + && (defined HAVE_INTTYPES_H) + #include + #undef NPY_INTP_FMT + #define NPY_INTP_FMT PRIdPTR +#endif + + +/* + * Some platforms don't define bool, long long, or long double. + * Handle that here. + */ +#define NPY_BYTE_FMT "hhd" +#define NPY_UBYTE_FMT "hhu" +#define NPY_SHORT_FMT "hd" +#define NPY_USHORT_FMT "hu" +#define NPY_INT_FMT "d" +#define NPY_UINT_FMT "u" +#define NPY_LONG_FMT "ld" +#define NPY_ULONG_FMT "lu" +#define NPY_HALF_FMT "g" +#define NPY_FLOAT_FMT "g" +#define NPY_DOUBLE_FMT "g" + + +#ifdef PY_LONG_LONG +typedef PY_LONG_LONG npy_longlong; +typedef unsigned PY_LONG_LONG npy_ulonglong; +# ifdef _MSC_VER +# define NPY_LONGLONG_FMT "I64d" +# define NPY_ULONGLONG_FMT "I64u" +# else +# define NPY_LONGLONG_FMT "lld" +# define NPY_ULONGLONG_FMT "llu" +# endif +# ifdef _MSC_VER +# define NPY_LONGLONG_SUFFIX(x) (x##i64) +# define NPY_ULONGLONG_SUFFIX(x) (x##Ui64) +# else +# define NPY_LONGLONG_SUFFIX(x) (x##LL) +# define NPY_ULONGLONG_SUFFIX(x) (x##ULL) +# endif +#else +typedef long npy_longlong; +typedef unsigned long npy_ulonglong; +# define NPY_LONGLONG_SUFFIX(x) (x##L) +# define NPY_ULONGLONG_SUFFIX(x) (x##UL) +#endif + + +typedef unsigned char npy_bool; +#define NPY_FALSE 0 +#define NPY_TRUE 1 + + +#if NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE + typedef double npy_longdouble; + #define NPY_LONGDOUBLE_FMT "g" +#else + typedef long double npy_longdouble; + #define NPY_LONGDOUBLE_FMT "Lg" +#endif + +#ifndef Py_USING_UNICODE +#error Must use Python with unicode enabled. +#endif + + +typedef signed char npy_byte; +typedef unsigned char npy_ubyte; +typedef unsigned short npy_ushort; +typedef unsigned int npy_uint; +typedef unsigned long npy_ulong; + +/* These are for completeness */ +typedef char npy_char; +typedef short npy_short; +typedef int npy_int; +typedef long npy_long; +typedef float npy_float; +typedef double npy_double; + +/* + * Hash value compatibility. + * As of Python 3.2 hash values are of type Py_hash_t. + * Previous versions use C long. + */ +#if PY_VERSION_HEX < 0x03020000 +typedef long npy_hash_t; +#define NPY_SIZEOF_HASH_T NPY_SIZEOF_LONG +#else +typedef Py_hash_t npy_hash_t; +#define NPY_SIZEOF_HASH_T NPY_SIZEOF_INTP +#endif + +/* + * Disabling C99 complex usage: a lot of C code in numpy/scipy rely on being + * able to do .real/.imag. Will have to convert code first. + */ +#if 0 +#if defined(NPY_USE_C99_COMPLEX) && defined(NPY_HAVE_COMPLEX_DOUBLE) +typedef complex npy_cdouble; +#else +typedef struct { double real, imag; } npy_cdouble; +#endif + +#if defined(NPY_USE_C99_COMPLEX) && defined(NPY_HAVE_COMPLEX_FLOAT) +typedef complex float npy_cfloat; +#else +typedef struct { float real, imag; } npy_cfloat; +#endif + +#if defined(NPY_USE_C99_COMPLEX) && defined(NPY_HAVE_COMPLEX_LONG_DOUBLE) +typedef complex long double npy_clongdouble; +#else +typedef struct {npy_longdouble real, imag;} npy_clongdouble; +#endif +#endif +#if NPY_SIZEOF_COMPLEX_DOUBLE != 2 * NPY_SIZEOF_DOUBLE +#error npy_cdouble definition is not compatible with C99 complex definition ! \ + Please contact NumPy maintainers and give detailed information about your \ + compiler and platform +#endif +typedef struct { double real, imag; } npy_cdouble; + +#if NPY_SIZEOF_COMPLEX_FLOAT != 2 * NPY_SIZEOF_FLOAT +#error npy_cfloat definition is not compatible with C99 complex definition ! \ + Please contact NumPy maintainers and give detailed information about your \ + compiler and platform +#endif +typedef struct { float real, imag; } npy_cfloat; + +#if NPY_SIZEOF_COMPLEX_LONGDOUBLE != 2 * NPY_SIZEOF_LONGDOUBLE +#error npy_clongdouble definition is not compatible with C99 complex definition ! \ + Please contact NumPy maintainers and give detailed information about your \ + compiler and platform +#endif +typedef struct { npy_longdouble real, imag; } npy_clongdouble; + +/* + * numarray-style bit-width typedefs + */ +#define NPY_MAX_INT8 127 +#define NPY_MIN_INT8 -128 +#define NPY_MAX_UINT8 255 +#define NPY_MAX_INT16 32767 +#define NPY_MIN_INT16 -32768 +#define NPY_MAX_UINT16 65535 +#define NPY_MAX_INT32 2147483647 +#define NPY_MIN_INT32 (-NPY_MAX_INT32 - 1) +#define NPY_MAX_UINT32 4294967295U +#define NPY_MAX_INT64 NPY_LONGLONG_SUFFIX(9223372036854775807) +#define NPY_MIN_INT64 (-NPY_MAX_INT64 - NPY_LONGLONG_SUFFIX(1)) +#define NPY_MAX_UINT64 NPY_ULONGLONG_SUFFIX(18446744073709551615) +#define NPY_MAX_INT128 NPY_LONGLONG_SUFFIX(85070591730234615865843651857942052864) +#define NPY_MIN_INT128 (-NPY_MAX_INT128 - NPY_LONGLONG_SUFFIX(1)) +#define NPY_MAX_UINT128 NPY_ULONGLONG_SUFFIX(170141183460469231731687303715884105728) +#define NPY_MAX_INT256 NPY_LONGLONG_SUFFIX(57896044618658097711785492504343953926634992332820282019728792003956564819967) +#define NPY_MIN_INT256 (-NPY_MAX_INT256 - NPY_LONGLONG_SUFFIX(1)) +#define NPY_MAX_UINT256 NPY_ULONGLONG_SUFFIX(115792089237316195423570985008687907853269984665640564039457584007913129639935) +#define NPY_MIN_DATETIME NPY_MIN_INT64 +#define NPY_MAX_DATETIME NPY_MAX_INT64 +#define NPY_MIN_TIMEDELTA NPY_MIN_INT64 +#define NPY_MAX_TIMEDELTA NPY_MAX_INT64 + + /* Need to find the number of bits for each type and + make definitions accordingly. + + C states that sizeof(char) == 1 by definition + + So, just using the sizeof keyword won't help. + + It also looks like Python itself uses sizeof(char) quite a + bit, which by definition should be 1 all the time. + + Idea: Make Use of CHAR_BIT which should tell us how many + BITS per CHARACTER + */ + + /* Include platform definitions -- These are in the C89/90 standard */ +#include +#define NPY_MAX_BYTE SCHAR_MAX +#define NPY_MIN_BYTE SCHAR_MIN +#define NPY_MAX_UBYTE UCHAR_MAX +#define NPY_MAX_SHORT SHRT_MAX +#define NPY_MIN_SHORT SHRT_MIN +#define NPY_MAX_USHORT USHRT_MAX +#define NPY_MAX_INT INT_MAX +#ifndef INT_MIN +#define INT_MIN (-INT_MAX - 1) +#endif +#define NPY_MIN_INT INT_MIN +#define NPY_MAX_UINT UINT_MAX +#define NPY_MAX_LONG LONG_MAX +#define NPY_MIN_LONG LONG_MIN +#define NPY_MAX_ULONG ULONG_MAX + +#define NPY_BITSOF_BOOL (sizeof(npy_bool) * CHAR_BIT) +#define NPY_BITSOF_CHAR CHAR_BIT +#define NPY_BITSOF_BYTE (NPY_SIZEOF_BYTE * CHAR_BIT) +#define NPY_BITSOF_SHORT (NPY_SIZEOF_SHORT * CHAR_BIT) +#define NPY_BITSOF_INT (NPY_SIZEOF_INT * CHAR_BIT) +#define NPY_BITSOF_LONG (NPY_SIZEOF_LONG * CHAR_BIT) +#define NPY_BITSOF_LONGLONG (NPY_SIZEOF_LONGLONG * CHAR_BIT) +#define NPY_BITSOF_INTP (NPY_SIZEOF_INTP * CHAR_BIT) +#define NPY_BITSOF_HALF (NPY_SIZEOF_HALF * CHAR_BIT) +#define NPY_BITSOF_FLOAT (NPY_SIZEOF_FLOAT * CHAR_BIT) +#define NPY_BITSOF_DOUBLE (NPY_SIZEOF_DOUBLE * CHAR_BIT) +#define NPY_BITSOF_LONGDOUBLE (NPY_SIZEOF_LONGDOUBLE * CHAR_BIT) +#define NPY_BITSOF_CFLOAT (NPY_SIZEOF_CFLOAT * CHAR_BIT) +#define NPY_BITSOF_CDOUBLE (NPY_SIZEOF_CDOUBLE * CHAR_BIT) +#define NPY_BITSOF_CLONGDOUBLE (NPY_SIZEOF_CLONGDOUBLE * CHAR_BIT) +#define NPY_BITSOF_DATETIME (NPY_SIZEOF_DATETIME * CHAR_BIT) +#define NPY_BITSOF_TIMEDELTA (NPY_SIZEOF_TIMEDELTA * CHAR_BIT) + +#if NPY_BITSOF_LONG == 8 +#define NPY_INT8 NPY_LONG +#define NPY_UINT8 NPY_ULONG + typedef long npy_int8; + typedef unsigned long npy_uint8; +#define PyInt8ScalarObject PyLongScalarObject +#define PyInt8ArrType_Type PyLongArrType_Type +#define PyUInt8ScalarObject PyULongScalarObject +#define PyUInt8ArrType_Type PyULongArrType_Type +#define NPY_INT8_FMT NPY_LONG_FMT +#define NPY_UINT8_FMT NPY_ULONG_FMT +#elif NPY_BITSOF_LONG == 16 +#define NPY_INT16 NPY_LONG +#define NPY_UINT16 NPY_ULONG + typedef long npy_int16; + typedef unsigned long npy_uint16; +#define PyInt16ScalarObject PyLongScalarObject +#define PyInt16ArrType_Type PyLongArrType_Type +#define PyUInt16ScalarObject PyULongScalarObject +#define PyUInt16ArrType_Type PyULongArrType_Type +#define NPY_INT16_FMT NPY_LONG_FMT +#define NPY_UINT16_FMT NPY_ULONG_FMT +#elif NPY_BITSOF_LONG == 32 +#define NPY_INT32 NPY_LONG +#define NPY_UINT32 NPY_ULONG + typedef long npy_int32; + typedef unsigned long npy_uint32; + typedef unsigned long npy_ucs4; +#define PyInt32ScalarObject PyLongScalarObject +#define PyInt32ArrType_Type PyLongArrType_Type +#define PyUInt32ScalarObject PyULongScalarObject +#define PyUInt32ArrType_Type PyULongArrType_Type +#define NPY_INT32_FMT NPY_LONG_FMT +#define NPY_UINT32_FMT NPY_ULONG_FMT +#elif NPY_BITSOF_LONG == 64 +#define NPY_INT64 NPY_LONG +#define NPY_UINT64 NPY_ULONG + typedef long npy_int64; + typedef unsigned long npy_uint64; +#define PyInt64ScalarObject PyLongScalarObject +#define PyInt64ArrType_Type PyLongArrType_Type +#define PyUInt64ScalarObject PyULongScalarObject +#define PyUInt64ArrType_Type PyULongArrType_Type +#define NPY_INT64_FMT NPY_LONG_FMT +#define NPY_UINT64_FMT NPY_ULONG_FMT +#define MyPyLong_FromInt64 PyLong_FromLong +#define MyPyLong_AsInt64 PyLong_AsLong +#elif NPY_BITSOF_LONG == 128 +#define NPY_INT128 NPY_LONG +#define NPY_UINT128 NPY_ULONG + typedef long npy_int128; + typedef unsigned long npy_uint128; +#define PyInt128ScalarObject PyLongScalarObject +#define PyInt128ArrType_Type PyLongArrType_Type +#define PyUInt128ScalarObject PyULongScalarObject +#define PyUInt128ArrType_Type PyULongArrType_Type +#define NPY_INT128_FMT NPY_LONG_FMT +#define NPY_UINT128_FMT NPY_ULONG_FMT +#endif + +#if NPY_BITSOF_LONGLONG == 8 +# ifndef NPY_INT8 +# define NPY_INT8 NPY_LONGLONG +# define NPY_UINT8 NPY_ULONGLONG + typedef npy_longlong npy_int8; + typedef npy_ulonglong npy_uint8; +# define PyInt8ScalarObject PyLongLongScalarObject +# define PyInt8ArrType_Type PyLongLongArrType_Type +# define PyUInt8ScalarObject PyULongLongScalarObject +# define PyUInt8ArrType_Type PyULongLongArrType_Type +#define NPY_INT8_FMT NPY_LONGLONG_FMT +#define NPY_UINT8_FMT NPY_ULONGLONG_FMT +# endif +# define NPY_MAX_LONGLONG NPY_MAX_INT8 +# define NPY_MIN_LONGLONG NPY_MIN_INT8 +# define NPY_MAX_ULONGLONG NPY_MAX_UINT8 +#elif NPY_BITSOF_LONGLONG == 16 +# ifndef NPY_INT16 +# define NPY_INT16 NPY_LONGLONG +# define NPY_UINT16 NPY_ULONGLONG + typedef npy_longlong npy_int16; + typedef npy_ulonglong npy_uint16; +# define PyInt16ScalarObject PyLongLongScalarObject +# define PyInt16ArrType_Type PyLongLongArrType_Type +# define PyUInt16ScalarObject PyULongLongScalarObject +# define PyUInt16ArrType_Type PyULongLongArrType_Type +#define NPY_INT16_FMT NPY_LONGLONG_FMT +#define NPY_UINT16_FMT NPY_ULONGLONG_FMT +# endif +# define NPY_MAX_LONGLONG NPY_MAX_INT16 +# define NPY_MIN_LONGLONG NPY_MIN_INT16 +# define NPY_MAX_ULONGLONG NPY_MAX_UINT16 +#elif NPY_BITSOF_LONGLONG == 32 +# ifndef NPY_INT32 +# define NPY_INT32 NPY_LONGLONG +# define NPY_UINT32 NPY_ULONGLONG + typedef npy_longlong npy_int32; + typedef npy_ulonglong npy_uint32; + typedef npy_ulonglong npy_ucs4; +# define PyInt32ScalarObject PyLongLongScalarObject +# define PyInt32ArrType_Type PyLongLongArrType_Type +# define PyUInt32ScalarObject PyULongLongScalarObject +# define PyUInt32ArrType_Type PyULongLongArrType_Type +#define NPY_INT32_FMT NPY_LONGLONG_FMT +#define NPY_UINT32_FMT NPY_ULONGLONG_FMT +# endif +# define NPY_MAX_LONGLONG NPY_MAX_INT32 +# define NPY_MIN_LONGLONG NPY_MIN_INT32 +# define NPY_MAX_ULONGLONG NPY_MAX_UINT32 +#elif NPY_BITSOF_LONGLONG == 64 +# ifndef NPY_INT64 +# define NPY_INT64 NPY_LONGLONG +# define NPY_UINT64 NPY_ULONGLONG + typedef npy_longlong npy_int64; + typedef npy_ulonglong npy_uint64; +# define PyInt64ScalarObject PyLongLongScalarObject +# define PyInt64ArrType_Type PyLongLongArrType_Type +# define PyUInt64ScalarObject PyULongLongScalarObject +# define PyUInt64ArrType_Type PyULongLongArrType_Type +#define NPY_INT64_FMT NPY_LONGLONG_FMT +#define NPY_UINT64_FMT NPY_ULONGLONG_FMT +# define MyPyLong_FromInt64 PyLong_FromLongLong +# define MyPyLong_AsInt64 PyLong_AsLongLong +# endif +# define NPY_MAX_LONGLONG NPY_MAX_INT64 +# define NPY_MIN_LONGLONG NPY_MIN_INT64 +# define NPY_MAX_ULONGLONG NPY_MAX_UINT64 +#elif NPY_BITSOF_LONGLONG == 128 +# ifndef NPY_INT128 +# define NPY_INT128 NPY_LONGLONG +# define NPY_UINT128 NPY_ULONGLONG + typedef npy_longlong npy_int128; + typedef npy_ulonglong npy_uint128; +# define PyInt128ScalarObject PyLongLongScalarObject +# define PyInt128ArrType_Type PyLongLongArrType_Type +# define PyUInt128ScalarObject PyULongLongScalarObject +# define PyUInt128ArrType_Type PyULongLongArrType_Type +#define NPY_INT128_FMT NPY_LONGLONG_FMT +#define NPY_UINT128_FMT NPY_ULONGLONG_FMT +# endif +# define NPY_MAX_LONGLONG NPY_MAX_INT128 +# define NPY_MIN_LONGLONG NPY_MIN_INT128 +# define NPY_MAX_ULONGLONG NPY_MAX_UINT128 +#elif NPY_BITSOF_LONGLONG == 256 +# define NPY_INT256 NPY_LONGLONG +# define NPY_UINT256 NPY_ULONGLONG + typedef npy_longlong npy_int256; + typedef npy_ulonglong npy_uint256; +# define PyInt256ScalarObject PyLongLongScalarObject +# define PyInt256ArrType_Type PyLongLongArrType_Type +# define PyUInt256ScalarObject PyULongLongScalarObject +# define PyUInt256ArrType_Type PyULongLongArrType_Type +#define NPY_INT256_FMT NPY_LONGLONG_FMT +#define NPY_UINT256_FMT NPY_ULONGLONG_FMT +# define NPY_MAX_LONGLONG NPY_MAX_INT256 +# define NPY_MIN_LONGLONG NPY_MIN_INT256 +# define NPY_MAX_ULONGLONG NPY_MAX_UINT256 +#endif + +#if NPY_BITSOF_INT == 8 +#ifndef NPY_INT8 +#define NPY_INT8 NPY_INT +#define NPY_UINT8 NPY_UINT + typedef int npy_int8; + typedef unsigned int npy_uint8; +# define PyInt8ScalarObject PyIntScalarObject +# define PyInt8ArrType_Type PyIntArrType_Type +# define PyUInt8ScalarObject PyUIntScalarObject +# define PyUInt8ArrType_Type PyUIntArrType_Type +#define NPY_INT8_FMT NPY_INT_FMT +#define NPY_UINT8_FMT NPY_UINT_FMT +#endif +#elif NPY_BITSOF_INT == 16 +#ifndef NPY_INT16 +#define NPY_INT16 NPY_INT +#define NPY_UINT16 NPY_UINT + typedef int npy_int16; + typedef unsigned int npy_uint16; +# define PyInt16ScalarObject PyIntScalarObject +# define PyInt16ArrType_Type PyIntArrType_Type +# define PyUInt16ScalarObject PyIntUScalarObject +# define PyUInt16ArrType_Type PyIntUArrType_Type +#define NPY_INT16_FMT NPY_INT_FMT +#define NPY_UINT16_FMT NPY_UINT_FMT +#endif +#elif NPY_BITSOF_INT == 32 +#ifndef NPY_INT32 +#define NPY_INT32 NPY_INT +#define NPY_UINT32 NPY_UINT + typedef int npy_int32; + typedef unsigned int npy_uint32; + typedef unsigned int npy_ucs4; +# define PyInt32ScalarObject PyIntScalarObject +# define PyInt32ArrType_Type PyIntArrType_Type +# define PyUInt32ScalarObject PyUIntScalarObject +# define PyUInt32ArrType_Type PyUIntArrType_Type +#define NPY_INT32_FMT NPY_INT_FMT +#define NPY_UINT32_FMT NPY_UINT_FMT +#endif +#elif NPY_BITSOF_INT == 64 +#ifndef NPY_INT64 +#define NPY_INT64 NPY_INT +#define NPY_UINT64 NPY_UINT + typedef int npy_int64; + typedef unsigned int npy_uint64; +# define PyInt64ScalarObject PyIntScalarObject +# define PyInt64ArrType_Type PyIntArrType_Type +# define PyUInt64ScalarObject PyUIntScalarObject +# define PyUInt64ArrType_Type PyUIntArrType_Type +#define NPY_INT64_FMT NPY_INT_FMT +#define NPY_UINT64_FMT NPY_UINT_FMT +# define MyPyLong_FromInt64 PyLong_FromLong +# define MyPyLong_AsInt64 PyLong_AsLong +#endif +#elif NPY_BITSOF_INT == 128 +#ifndef NPY_INT128 +#define NPY_INT128 NPY_INT +#define NPY_UINT128 NPY_UINT + typedef int npy_int128; + typedef unsigned int npy_uint128; +# define PyInt128ScalarObject PyIntScalarObject +# define PyInt128ArrType_Type PyIntArrType_Type +# define PyUInt128ScalarObject PyUIntScalarObject +# define PyUInt128ArrType_Type PyUIntArrType_Type +#define NPY_INT128_FMT NPY_INT_FMT +#define NPY_UINT128_FMT NPY_UINT_FMT +#endif +#endif + +#if NPY_BITSOF_SHORT == 8 +#ifndef NPY_INT8 +#define NPY_INT8 NPY_SHORT +#define NPY_UINT8 NPY_USHORT + typedef short npy_int8; + typedef unsigned short npy_uint8; +# define PyInt8ScalarObject PyShortScalarObject +# define PyInt8ArrType_Type PyShortArrType_Type +# define PyUInt8ScalarObject PyUShortScalarObject +# define PyUInt8ArrType_Type PyUShortArrType_Type +#define NPY_INT8_FMT NPY_SHORT_FMT +#define NPY_UINT8_FMT NPY_USHORT_FMT +#endif +#elif NPY_BITSOF_SHORT == 16 +#ifndef NPY_INT16 +#define NPY_INT16 NPY_SHORT +#define NPY_UINT16 NPY_USHORT + typedef short npy_int16; + typedef unsigned short npy_uint16; +# define PyInt16ScalarObject PyShortScalarObject +# define PyInt16ArrType_Type PyShortArrType_Type +# define PyUInt16ScalarObject PyUShortScalarObject +# define PyUInt16ArrType_Type PyUShortArrType_Type +#define NPY_INT16_FMT NPY_SHORT_FMT +#define NPY_UINT16_FMT NPY_USHORT_FMT +#endif +#elif NPY_BITSOF_SHORT == 32 +#ifndef NPY_INT32 +#define NPY_INT32 NPY_SHORT +#define NPY_UINT32 NPY_USHORT + typedef short npy_int32; + typedef unsigned short npy_uint32; + typedef unsigned short npy_ucs4; +# define PyInt32ScalarObject PyShortScalarObject +# define PyInt32ArrType_Type PyShortArrType_Type +# define PyUInt32ScalarObject PyUShortScalarObject +# define PyUInt32ArrType_Type PyUShortArrType_Type +#define NPY_INT32_FMT NPY_SHORT_FMT +#define NPY_UINT32_FMT NPY_USHORT_FMT +#endif +#elif NPY_BITSOF_SHORT == 64 +#ifndef NPY_INT64 +#define NPY_INT64 NPY_SHORT +#define NPY_UINT64 NPY_USHORT + typedef short npy_int64; + typedef unsigned short npy_uint64; +# define PyInt64ScalarObject PyShortScalarObject +# define PyInt64ArrType_Type PyShortArrType_Type +# define PyUInt64ScalarObject PyUShortScalarObject +# define PyUInt64ArrType_Type PyUShortArrType_Type +#define NPY_INT64_FMT NPY_SHORT_FMT +#define NPY_UINT64_FMT NPY_USHORT_FMT +# define MyPyLong_FromInt64 PyLong_FromLong +# define MyPyLong_AsInt64 PyLong_AsLong +#endif +#elif NPY_BITSOF_SHORT == 128 +#ifndef NPY_INT128 +#define NPY_INT128 NPY_SHORT +#define NPY_UINT128 NPY_USHORT + typedef short npy_int128; + typedef unsigned short npy_uint128; +# define PyInt128ScalarObject PyShortScalarObject +# define PyInt128ArrType_Type PyShortArrType_Type +# define PyUInt128ScalarObject PyUShortScalarObject +# define PyUInt128ArrType_Type PyUShortArrType_Type +#define NPY_INT128_FMT NPY_SHORT_FMT +#define NPY_UINT128_FMT NPY_USHORT_FMT +#endif +#endif + + +#if NPY_BITSOF_CHAR == 8 +#ifndef NPY_INT8 +#define NPY_INT8 NPY_BYTE +#define NPY_UINT8 NPY_UBYTE + typedef signed char npy_int8; + typedef unsigned char npy_uint8; +# define PyInt8ScalarObject PyByteScalarObject +# define PyInt8ArrType_Type PyByteArrType_Type +# define PyUInt8ScalarObject PyUByteScalarObject +# define PyUInt8ArrType_Type PyUByteArrType_Type +#define NPY_INT8_FMT NPY_BYTE_FMT +#define NPY_UINT8_FMT NPY_UBYTE_FMT +#endif +#elif NPY_BITSOF_CHAR == 16 +#ifndef NPY_INT16 +#define NPY_INT16 NPY_BYTE +#define NPY_UINT16 NPY_UBYTE + typedef signed char npy_int16; + typedef unsigned char npy_uint16; +# define PyInt16ScalarObject PyByteScalarObject +# define PyInt16ArrType_Type PyByteArrType_Type +# define PyUInt16ScalarObject PyUByteScalarObject +# define PyUInt16ArrType_Type PyUByteArrType_Type +#define NPY_INT16_FMT NPY_BYTE_FMT +#define NPY_UINT16_FMT NPY_UBYTE_FMT +#endif +#elif NPY_BITSOF_CHAR == 32 +#ifndef NPY_INT32 +#define NPY_INT32 NPY_BYTE +#define NPY_UINT32 NPY_UBYTE + typedef signed char npy_int32; + typedef unsigned char npy_uint32; + typedef unsigned char npy_ucs4; +# define PyInt32ScalarObject PyByteScalarObject +# define PyInt32ArrType_Type PyByteArrType_Type +# define PyUInt32ScalarObject PyUByteScalarObject +# define PyUInt32ArrType_Type PyUByteArrType_Type +#define NPY_INT32_FMT NPY_BYTE_FMT +#define NPY_UINT32_FMT NPY_UBYTE_FMT +#endif +#elif NPY_BITSOF_CHAR == 64 +#ifndef NPY_INT64 +#define NPY_INT64 NPY_BYTE +#define NPY_UINT64 NPY_UBYTE + typedef signed char npy_int64; + typedef unsigned char npy_uint64; +# define PyInt64ScalarObject PyByteScalarObject +# define PyInt64ArrType_Type PyByteArrType_Type +# define PyUInt64ScalarObject PyUByteScalarObject +# define PyUInt64ArrType_Type PyUByteArrType_Type +#define NPY_INT64_FMT NPY_BYTE_FMT +#define NPY_UINT64_FMT NPY_UBYTE_FMT +# define MyPyLong_FromInt64 PyLong_FromLong +# define MyPyLong_AsInt64 PyLong_AsLong +#endif +#elif NPY_BITSOF_CHAR == 128 +#ifndef NPY_INT128 +#define NPY_INT128 NPY_BYTE +#define NPY_UINT128 NPY_UBYTE + typedef signed char npy_int128; + typedef unsigned char npy_uint128; +# define PyInt128ScalarObject PyByteScalarObject +# define PyInt128ArrType_Type PyByteArrType_Type +# define PyUInt128ScalarObject PyUByteScalarObject +# define PyUInt128ArrType_Type PyUByteArrType_Type +#define NPY_INT128_FMT NPY_BYTE_FMT +#define NPY_UINT128_FMT NPY_UBYTE_FMT +#endif +#endif + + + +#if NPY_BITSOF_DOUBLE == 32 +#ifndef NPY_FLOAT32 +#define NPY_FLOAT32 NPY_DOUBLE +#define NPY_COMPLEX64 NPY_CDOUBLE + typedef double npy_float32; + typedef npy_cdouble npy_complex64; +# define PyFloat32ScalarObject PyDoubleScalarObject +# define PyComplex64ScalarObject PyCDoubleScalarObject +# define PyFloat32ArrType_Type PyDoubleArrType_Type +# define PyComplex64ArrType_Type PyCDoubleArrType_Type +#define NPY_FLOAT32_FMT NPY_DOUBLE_FMT +#define NPY_COMPLEX64_FMT NPY_CDOUBLE_FMT +#endif +#elif NPY_BITSOF_DOUBLE == 64 +#ifndef NPY_FLOAT64 +#define NPY_FLOAT64 NPY_DOUBLE +#define NPY_COMPLEX128 NPY_CDOUBLE + typedef double npy_float64; + typedef npy_cdouble npy_complex128; +# define PyFloat64ScalarObject PyDoubleScalarObject +# define PyComplex128ScalarObject PyCDoubleScalarObject +# define PyFloat64ArrType_Type PyDoubleArrType_Type +# define PyComplex128ArrType_Type PyCDoubleArrType_Type +#define NPY_FLOAT64_FMT NPY_DOUBLE_FMT +#define NPY_COMPLEX128_FMT NPY_CDOUBLE_FMT +#endif +#elif NPY_BITSOF_DOUBLE == 80 +#ifndef NPY_FLOAT80 +#define NPY_FLOAT80 NPY_DOUBLE +#define NPY_COMPLEX160 NPY_CDOUBLE + typedef double npy_float80; + typedef npy_cdouble npy_complex160; +# define PyFloat80ScalarObject PyDoubleScalarObject +# define PyComplex160ScalarObject PyCDoubleScalarObject +# define PyFloat80ArrType_Type PyDoubleArrType_Type +# define PyComplex160ArrType_Type PyCDoubleArrType_Type +#define NPY_FLOAT80_FMT NPY_DOUBLE_FMT +#define NPY_COMPLEX160_FMT NPY_CDOUBLE_FMT +#endif +#elif NPY_BITSOF_DOUBLE == 96 +#ifndef NPY_FLOAT96 +#define NPY_FLOAT96 NPY_DOUBLE +#define NPY_COMPLEX192 NPY_CDOUBLE + typedef double npy_float96; + typedef npy_cdouble npy_complex192; +# define PyFloat96ScalarObject PyDoubleScalarObject +# define PyComplex192ScalarObject PyCDoubleScalarObject +# define PyFloat96ArrType_Type PyDoubleArrType_Type +# define PyComplex192ArrType_Type PyCDoubleArrType_Type +#define NPY_FLOAT96_FMT NPY_DOUBLE_FMT +#define NPY_COMPLEX192_FMT NPY_CDOUBLE_FMT +#endif +#elif NPY_BITSOF_DOUBLE == 128 +#ifndef NPY_FLOAT128 +#define NPY_FLOAT128 NPY_DOUBLE +#define NPY_COMPLEX256 NPY_CDOUBLE + typedef double npy_float128; + typedef npy_cdouble npy_complex256; +# define PyFloat128ScalarObject PyDoubleScalarObject +# define PyComplex256ScalarObject PyCDoubleScalarObject +# define PyFloat128ArrType_Type PyDoubleArrType_Type +# define PyComplex256ArrType_Type PyCDoubleArrType_Type +#define NPY_FLOAT128_FMT NPY_DOUBLE_FMT +#define NPY_COMPLEX256_FMT NPY_CDOUBLE_FMT +#endif +#endif + + + +#if NPY_BITSOF_FLOAT == 32 +#ifndef NPY_FLOAT32 +#define NPY_FLOAT32 NPY_FLOAT +#define NPY_COMPLEX64 NPY_CFLOAT + typedef float npy_float32; + typedef npy_cfloat npy_complex64; +# define PyFloat32ScalarObject PyFloatScalarObject +# define PyComplex64ScalarObject PyCFloatScalarObject +# define PyFloat32ArrType_Type PyFloatArrType_Type +# define PyComplex64ArrType_Type PyCFloatArrType_Type +#define NPY_FLOAT32_FMT NPY_FLOAT_FMT +#define NPY_COMPLEX64_FMT NPY_CFLOAT_FMT +#endif +#elif NPY_BITSOF_FLOAT == 64 +#ifndef NPY_FLOAT64 +#define NPY_FLOAT64 NPY_FLOAT +#define NPY_COMPLEX128 NPY_CFLOAT + typedef float npy_float64; + typedef npy_cfloat npy_complex128; +# define PyFloat64ScalarObject PyFloatScalarObject +# define PyComplex128ScalarObject PyCFloatScalarObject +# define PyFloat64ArrType_Type PyFloatArrType_Type +# define PyComplex128ArrType_Type PyCFloatArrType_Type +#define NPY_FLOAT64_FMT NPY_FLOAT_FMT +#define NPY_COMPLEX128_FMT NPY_CFLOAT_FMT +#endif +#elif NPY_BITSOF_FLOAT == 80 +#ifndef NPY_FLOAT80 +#define NPY_FLOAT80 NPY_FLOAT +#define NPY_COMPLEX160 NPY_CFLOAT + typedef float npy_float80; + typedef npy_cfloat npy_complex160; +# define PyFloat80ScalarObject PyFloatScalarObject +# define PyComplex160ScalarObject PyCFloatScalarObject +# define PyFloat80ArrType_Type PyFloatArrType_Type +# define PyComplex160ArrType_Type PyCFloatArrType_Type +#define NPY_FLOAT80_FMT NPY_FLOAT_FMT +#define NPY_COMPLEX160_FMT NPY_CFLOAT_FMT +#endif +#elif NPY_BITSOF_FLOAT == 96 +#ifndef NPY_FLOAT96 +#define NPY_FLOAT96 NPY_FLOAT +#define NPY_COMPLEX192 NPY_CFLOAT + typedef float npy_float96; + typedef npy_cfloat npy_complex192; +# define PyFloat96ScalarObject PyFloatScalarObject +# define PyComplex192ScalarObject PyCFloatScalarObject +# define PyFloat96ArrType_Type PyFloatArrType_Type +# define PyComplex192ArrType_Type PyCFloatArrType_Type +#define NPY_FLOAT96_FMT NPY_FLOAT_FMT +#define NPY_COMPLEX192_FMT NPY_CFLOAT_FMT +#endif +#elif NPY_BITSOF_FLOAT == 128 +#ifndef NPY_FLOAT128 +#define NPY_FLOAT128 NPY_FLOAT +#define NPY_COMPLEX256 NPY_CFLOAT + typedef float npy_float128; + typedef npy_cfloat npy_complex256; +# define PyFloat128ScalarObject PyFloatScalarObject +# define PyComplex256ScalarObject PyCFloatScalarObject +# define PyFloat128ArrType_Type PyFloatArrType_Type +# define PyComplex256ArrType_Type PyCFloatArrType_Type +#define NPY_FLOAT128_FMT NPY_FLOAT_FMT +#define NPY_COMPLEX256_FMT NPY_CFLOAT_FMT +#endif +#endif + +/* half/float16 isn't a floating-point type in C */ +#define NPY_FLOAT16 NPY_HALF +typedef npy_uint16 npy_half; +typedef npy_half npy_float16; + +#if NPY_BITSOF_LONGDOUBLE == 32 +#ifndef NPY_FLOAT32 +#define NPY_FLOAT32 NPY_LONGDOUBLE +#define NPY_COMPLEX64 NPY_CLONGDOUBLE + typedef npy_longdouble npy_float32; + typedef npy_clongdouble npy_complex64; +# define PyFloat32ScalarObject PyLongDoubleScalarObject +# define PyComplex64ScalarObject PyCLongDoubleScalarObject +# define PyFloat32ArrType_Type PyLongDoubleArrType_Type +# define PyComplex64ArrType_Type PyCLongDoubleArrType_Type +#define NPY_FLOAT32_FMT NPY_LONGDOUBLE_FMT +#define NPY_COMPLEX64_FMT NPY_CLONGDOUBLE_FMT +#endif +#elif NPY_BITSOF_LONGDOUBLE == 64 +#ifndef NPY_FLOAT64 +#define NPY_FLOAT64 NPY_LONGDOUBLE +#define NPY_COMPLEX128 NPY_CLONGDOUBLE + typedef npy_longdouble npy_float64; + typedef npy_clongdouble npy_complex128; +# define PyFloat64ScalarObject PyLongDoubleScalarObject +# define PyComplex128ScalarObject PyCLongDoubleScalarObject +# define PyFloat64ArrType_Type PyLongDoubleArrType_Type +# define PyComplex128ArrType_Type PyCLongDoubleArrType_Type +#define NPY_FLOAT64_FMT NPY_LONGDOUBLE_FMT +#define NPY_COMPLEX128_FMT NPY_CLONGDOUBLE_FMT +#endif +#elif NPY_BITSOF_LONGDOUBLE == 80 +#ifndef NPY_FLOAT80 +#define NPY_FLOAT80 NPY_LONGDOUBLE +#define NPY_COMPLEX160 NPY_CLONGDOUBLE + typedef npy_longdouble npy_float80; + typedef npy_clongdouble npy_complex160; +# define PyFloat80ScalarObject PyLongDoubleScalarObject +# define PyComplex160ScalarObject PyCLongDoubleScalarObject +# define PyFloat80ArrType_Type PyLongDoubleArrType_Type +# define PyComplex160ArrType_Type PyCLongDoubleArrType_Type +#define NPY_FLOAT80_FMT NPY_LONGDOUBLE_FMT +#define NPY_COMPLEX160_FMT NPY_CLONGDOUBLE_FMT +#endif +#elif NPY_BITSOF_LONGDOUBLE == 96 +#ifndef NPY_FLOAT96 +#define NPY_FLOAT96 NPY_LONGDOUBLE +#define NPY_COMPLEX192 NPY_CLONGDOUBLE + typedef npy_longdouble npy_float96; + typedef npy_clongdouble npy_complex192; +# define PyFloat96ScalarObject PyLongDoubleScalarObject +# define PyComplex192ScalarObject PyCLongDoubleScalarObject +# define PyFloat96ArrType_Type PyLongDoubleArrType_Type +# define PyComplex192ArrType_Type PyCLongDoubleArrType_Type +#define NPY_FLOAT96_FMT NPY_LONGDOUBLE_FMT +#define NPY_COMPLEX192_FMT NPY_CLONGDOUBLE_FMT +#endif +#elif NPY_BITSOF_LONGDOUBLE == 128 +#ifndef NPY_FLOAT128 +#define NPY_FLOAT128 NPY_LONGDOUBLE +#define NPY_COMPLEX256 NPY_CLONGDOUBLE + typedef npy_longdouble npy_float128; + typedef npy_clongdouble npy_complex256; +# define PyFloat128ScalarObject PyLongDoubleScalarObject +# define PyComplex256ScalarObject PyCLongDoubleScalarObject +# define PyFloat128ArrType_Type PyLongDoubleArrType_Type +# define PyComplex256ArrType_Type PyCLongDoubleArrType_Type +#define NPY_FLOAT128_FMT NPY_LONGDOUBLE_FMT +#define NPY_COMPLEX256_FMT NPY_CLONGDOUBLE_FMT +#endif +#elif NPY_BITSOF_LONGDOUBLE == 256 +#define NPY_FLOAT256 NPY_LONGDOUBLE +#define NPY_COMPLEX512 NPY_CLONGDOUBLE + typedef npy_longdouble npy_float256; + typedef npy_clongdouble npy_complex512; +# define PyFloat256ScalarObject PyLongDoubleScalarObject +# define PyComplex512ScalarObject PyCLongDoubleScalarObject +# define PyFloat256ArrType_Type PyLongDoubleArrType_Type +# define PyComplex512ArrType_Type PyCLongDoubleArrType_Type +#define NPY_FLOAT256_FMT NPY_LONGDOUBLE_FMT +#define NPY_COMPLEX512_FMT NPY_CLONGDOUBLE_FMT +#endif + +/* datetime typedefs */ +typedef npy_int64 npy_timedelta; +typedef npy_int64 npy_datetime; +#define NPY_DATETIME_FMT NPY_INT64_FMT +#define NPY_TIMEDELTA_FMT NPY_INT64_FMT + +/* End of typedefs for numarray style bit-width names */ + +#endif diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_cpu.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_cpu.h new file mode 100644 index 0000000..5edd8f4 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_cpu.h @@ -0,0 +1,118 @@ +/* + * This set (target) cpu specific macros: + * - Possible values: + * NPY_CPU_X86 + * NPY_CPU_AMD64 + * NPY_CPU_PPC + * NPY_CPU_PPC64 + * NPY_CPU_PPC64LE + * NPY_CPU_SPARC + * NPY_CPU_S390 + * NPY_CPU_IA64 + * NPY_CPU_HPPA + * NPY_CPU_ALPHA + * NPY_CPU_ARMEL + * NPY_CPU_ARMEB + * NPY_CPU_SH_LE + * NPY_CPU_SH_BE + * NPY_CPU_ARCEL + * NPY_CPU_ARCEB + * NPY_CPU_RISCV64 + */ +#ifndef _NPY_CPUARCH_H_ +#define _NPY_CPUARCH_H_ + +#include "numpyconfig.h" +#include /* for memcpy */ + +#if defined( __i386__ ) || defined(i386) || defined(_M_IX86) + /* + * __i386__ is defined by gcc and Intel compiler on Linux, + * _M_IX86 by VS compiler, + * i386 by Sun compilers on opensolaris at least + */ + #define NPY_CPU_X86 +#elif defined(__x86_64__) || defined(__amd64__) || defined(__x86_64) || defined(_M_AMD64) + /* + * both __x86_64__ and __amd64__ are defined by gcc + * __x86_64 defined by sun compiler on opensolaris at least + * _M_AMD64 defined by MS compiler + */ + #define NPY_CPU_AMD64 +#elif defined(__powerpc64__) && defined(__LITTLE_ENDIAN__) + #define NPY_CPU_PPC64LE +#elif defined(__powerpc64__) && defined(__BIG_ENDIAN__) + #define NPY_CPU_PPC64 +#elif defined(__ppc__) || defined(__powerpc__) || defined(_ARCH_PPC) + /* + * __ppc__ is defined by gcc, I remember having seen __powerpc__ once, + * but can't find it ATM + * _ARCH_PPC is used by at least gcc on AIX + * As __powerpc__ and _ARCH_PPC are also defined by PPC64 check + * for those specifically first before defaulting to ppc + */ + #define NPY_CPU_PPC +#elif defined(__sparc__) || defined(__sparc) + /* __sparc__ is defined by gcc and Forte (e.g. Sun) compilers */ + #define NPY_CPU_SPARC +#elif defined(__s390__) + #define NPY_CPU_S390 +#elif defined(__ia64) + #define NPY_CPU_IA64 +#elif defined(__hppa) + #define NPY_CPU_HPPA +#elif defined(__alpha__) + #define NPY_CPU_ALPHA +#elif defined(__arm__) || defined(__aarch64__) + #if defined(__ARMEB__) || defined(__AARCH64EB__) + #if defined(__ARM_32BIT_STATE) + #define NPY_CPU_ARMEB_AARCH32 + #elif defined(__ARM_64BIT_STATE) + #define NPY_CPU_ARMEB_AARCH64 + #else + #define NPY_CPU_ARMEB + #endif + #elif defined(__ARMEL__) || defined(__AARCH64EL__) + #if defined(__ARM_32BIT_STATE) + #define NPY_CPU_ARMEL_AARCH32 + #elif defined(__ARM_64BIT_STATE) + #define NPY_CPU_ARMEL_AARCH64 + #else + #define NPY_CPU_ARMEL + #endif + #else + # error Unknown ARM CPU, please report this to numpy maintainers with \ + information about your platform (OS, CPU and compiler) + #endif +#elif defined(__sh__) && defined(__LITTLE_ENDIAN__) + #define NPY_CPU_SH_LE +#elif defined(__sh__) && defined(__BIG_ENDIAN__) + #define NPY_CPU_SH_BE +#elif defined(__MIPSEL__) + #define NPY_CPU_MIPSEL +#elif defined(__MIPSEB__) + #define NPY_CPU_MIPSEB +#elif defined(__or1k__) + #define NPY_CPU_OR1K +#elif defined(__mc68000__) + #define NPY_CPU_M68K +#elif defined(__arc__) && defined(__LITTLE_ENDIAN__) + #define NPY_CPU_ARCEL +#elif defined(__arc__) && defined(__BIG_ENDIAN__) + #define NPY_CPU_ARCEB +#elif defined(__riscv) && defined(__riscv_xlen) && __riscv_xlen == 64 + #define NPY_CPU_RISCV64 +#else + #error Unknown CPU, please report this to numpy maintainers with \ + information about your platform (OS, CPU and compiler) +#endif + +#define NPY_COPY_PYOBJECT_PTR(dst, src) memcpy(dst, src, sizeof(PyObject *)) + +#if (defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64)) +#define NPY_CPU_HAVE_UNALIGNED_ACCESS 1 +#else +#define NPY_CPU_HAVE_UNALIGNED_ACCESS 0 +#endif + +#endif diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_endian.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_endian.h new file mode 100644 index 0000000..44cdffd --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_endian.h @@ -0,0 +1,72 @@ +#ifndef _NPY_ENDIAN_H_ +#define _NPY_ENDIAN_H_ + +/* + * NPY_BYTE_ORDER is set to the same value as BYTE_ORDER set by glibc in + * endian.h + */ + +#if defined(NPY_HAVE_ENDIAN_H) || defined(NPY_HAVE_SYS_ENDIAN_H) + /* Use endian.h if available */ + + #if defined(NPY_HAVE_ENDIAN_H) + #include + #elif defined(NPY_HAVE_SYS_ENDIAN_H) + #include + #endif + + #if defined(BYTE_ORDER) && defined(BIG_ENDIAN) && defined(LITTLE_ENDIAN) + #define NPY_BYTE_ORDER BYTE_ORDER + #define NPY_LITTLE_ENDIAN LITTLE_ENDIAN + #define NPY_BIG_ENDIAN BIG_ENDIAN + #elif defined(_BYTE_ORDER) && defined(_BIG_ENDIAN) && defined(_LITTLE_ENDIAN) + #define NPY_BYTE_ORDER _BYTE_ORDER + #define NPY_LITTLE_ENDIAN _LITTLE_ENDIAN + #define NPY_BIG_ENDIAN _BIG_ENDIAN + #elif defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && defined(__LITTLE_ENDIAN) + #define NPY_BYTE_ORDER __BYTE_ORDER + #define NPY_LITTLE_ENDIAN __LITTLE_ENDIAN + #define NPY_BIG_ENDIAN __BIG_ENDIAN + #endif +#endif + +#ifndef NPY_BYTE_ORDER + /* Set endianness info using target CPU */ + #include "npy_cpu.h" + + #define NPY_LITTLE_ENDIAN 1234 + #define NPY_BIG_ENDIAN 4321 + + #if defined(NPY_CPU_X86) \ + || defined(NPY_CPU_AMD64) \ + || defined(NPY_CPU_IA64) \ + || defined(NPY_CPU_ALPHA) \ + || defined(NPY_CPU_ARMEL) \ + || defined(NPY_CPU_ARMEL_AARCH32) \ + || defined(NPY_CPU_ARMEL_AARCH64) \ + || defined(NPY_CPU_SH_LE) \ + || defined(NPY_CPU_MIPSEL) \ + || defined(NPY_CPU_PPC64LE) \ + || defined(NPY_CPU_ARCEL) \ + || defined(NPY_CPU_RISCV64) + #define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN + #elif defined(NPY_CPU_PPC) \ + || defined(NPY_CPU_SPARC) \ + || defined(NPY_CPU_S390) \ + || defined(NPY_CPU_HPPA) \ + || defined(NPY_CPU_PPC64) \ + || defined(NPY_CPU_ARMEB) \ + || defined(NPY_CPU_ARMEB_AARCH32) \ + || defined(NPY_CPU_ARMEB_AARCH64) \ + || defined(NPY_CPU_SH_BE) \ + || defined(NPY_CPU_MIPSEB) \ + || defined(NPY_CPU_OR1K) \ + || defined(NPY_CPU_M68K) \ + || defined(NPY_CPU_ARCEB) + #define NPY_BYTE_ORDER NPY_BIG_ENDIAN + #else + #error Unknown CPU: can not set endianness + #endif +#endif + +#endif diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_interrupt.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_interrupt.h new file mode 100644 index 0000000..40cb7ac --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_interrupt.h @@ -0,0 +1,117 @@ + +/* Signal handling: + +This header file defines macros that allow your code to handle +interrupts received during processing. Interrupts that +could reasonably be handled: + +SIGINT, SIGABRT, SIGALRM, SIGSEGV + +****Warning*************** + +Do not allow code that creates temporary memory or increases reference +counts of Python objects to be interrupted unless you handle it +differently. + +************************** + +The mechanism for handling interrupts is conceptually simple: + + - replace the signal handler with our own home-grown version + and store the old one. + - run the code to be interrupted -- if an interrupt occurs + the handler should basically just cause a return to the + calling function for finish work. + - restore the old signal handler + +Of course, every code that allows interrupts must account for +returning via the interrupt and handle clean-up correctly. But, +even still, the simple paradigm is complicated by at least three +factors. + + 1) platform portability (i.e. Microsoft says not to use longjmp + to return from signal handling. They have a __try and __except + extension to C instead but what about mingw?). + + 2) how to handle threads: apparently whether signals are delivered to + every thread of the process or the "invoking" thread is platform + dependent. --- we don't handle threads for now. + + 3) do we need to worry about re-entrance. For now, assume the + code will not call-back into itself. + +Ideas: + + 1) Start by implementing an approach that works on platforms that + can use setjmp and longjmp functionality and does nothing + on other platforms. + + 2) Ignore threads --- i.e. do not mix interrupt handling and threads + + 3) Add a default signal_handler function to the C-API but have the rest + use macros. + + +Simple Interface: + + +In your C-extension: around a block of code you want to be interruptible +with a SIGINT + +NPY_SIGINT_ON +[code] +NPY_SIGINT_OFF + +In order for this to work correctly, the +[code] block must not allocate any memory or alter the reference count of any +Python objects. In other words [code] must be interruptible so that continuation +after NPY_SIGINT_OFF will only be "missing some computations" + +Interrupt handling does not work well with threads. + +*/ + +/* Add signal handling macros + Make the global variable and signal handler part of the C-API +*/ + +#ifndef NPY_INTERRUPT_H +#define NPY_INTERRUPT_H + +#ifndef NPY_NO_SIGNAL + +#include +#include + +#ifndef sigsetjmp + +#define NPY_SIGSETJMP(arg1, arg2) setjmp(arg1) +#define NPY_SIGLONGJMP(arg1, arg2) longjmp(arg1, arg2) +#define NPY_SIGJMP_BUF jmp_buf + +#else + +#define NPY_SIGSETJMP(arg1, arg2) sigsetjmp(arg1, arg2) +#define NPY_SIGLONGJMP(arg1, arg2) siglongjmp(arg1, arg2) +#define NPY_SIGJMP_BUF sigjmp_buf + +#endif + +# define NPY_SIGINT_ON { \ + PyOS_sighandler_t _npy_sig_save; \ + _npy_sig_save = PyOS_setsig(SIGINT, _PyArray_SigintHandler); \ + if (NPY_SIGSETJMP(*((NPY_SIGJMP_BUF *)_PyArray_GetSigintBuf()), \ + 1) == 0) { \ + +# define NPY_SIGINT_OFF } \ + PyOS_setsig(SIGINT, _npy_sig_save); \ + } + +#else /* NPY_NO_SIGNAL */ + +#define NPY_SIGINT_ON +#define NPY_SIGINT_OFF + +#endif /* HAVE_SIGSETJMP */ + +#endif /* NPY_INTERRUPT_H */ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_math.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_math.h new file mode 100644 index 0000000..582390c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_math.h @@ -0,0 +1,551 @@ +#ifndef __NPY_MATH_C99_H_ +#define __NPY_MATH_C99_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#ifdef __SUNPRO_CC +#include +#endif +#ifdef HAVE_NPY_CONFIG_H +#include +#endif +#include + +/* By adding static inline specifiers to npy_math function definitions when + appropriate, compiler is given the opportunity to optimize */ +#if NPY_INLINE_MATH +#define NPY_INPLACE NPY_INLINE static +#else +#define NPY_INPLACE +#endif + + +/* + * NAN and INFINITY like macros (same behavior as glibc for NAN, same as C99 + * for INFINITY) + * + * XXX: I should test whether INFINITY and NAN are available on the platform + */ +NPY_INLINE static float __npy_inff(void) +{ + const union { npy_uint32 __i; float __f;} __bint = {0x7f800000UL}; + return __bint.__f; +} + +NPY_INLINE static float __npy_nanf(void) +{ + const union { npy_uint32 __i; float __f;} __bint = {0x7fc00000UL}; + return __bint.__f; +} + +NPY_INLINE static float __npy_pzerof(void) +{ + const union { npy_uint32 __i; float __f;} __bint = {0x00000000UL}; + return __bint.__f; +} + +NPY_INLINE static float __npy_nzerof(void) +{ + const union { npy_uint32 __i; float __f;} __bint = {0x80000000UL}; + return __bint.__f; +} + +#define NPY_INFINITYF __npy_inff() +#define NPY_NANF __npy_nanf() +#define NPY_PZEROF __npy_pzerof() +#define NPY_NZEROF __npy_nzerof() + +#define NPY_INFINITY ((npy_double)NPY_INFINITYF) +#define NPY_NAN ((npy_double)NPY_NANF) +#define NPY_PZERO ((npy_double)NPY_PZEROF) +#define NPY_NZERO ((npy_double)NPY_NZEROF) + +#define NPY_INFINITYL ((npy_longdouble)NPY_INFINITYF) +#define NPY_NANL ((npy_longdouble)NPY_NANF) +#define NPY_PZEROL ((npy_longdouble)NPY_PZEROF) +#define NPY_NZEROL ((npy_longdouble)NPY_NZEROF) + +/* + * Useful constants + */ +#define NPY_E 2.718281828459045235360287471352662498 /* e */ +#define NPY_LOG2E 1.442695040888963407359924681001892137 /* log_2 e */ +#define NPY_LOG10E 0.434294481903251827651128918916605082 /* log_10 e */ +#define NPY_LOGE2 0.693147180559945309417232121458176568 /* log_e 2 */ +#define NPY_LOGE10 2.302585092994045684017991454684364208 /* log_e 10 */ +#define NPY_PI 3.141592653589793238462643383279502884 /* pi */ +#define NPY_PI_2 1.570796326794896619231321691639751442 /* pi/2 */ +#define NPY_PI_4 0.785398163397448309615660845819875721 /* pi/4 */ +#define NPY_1_PI 0.318309886183790671537767526745028724 /* 1/pi */ +#define NPY_2_PI 0.636619772367581343075535053490057448 /* 2/pi */ +#define NPY_EULER 0.577215664901532860606512090082402431 /* Euler constant */ +#define NPY_SQRT2 1.414213562373095048801688724209698079 /* sqrt(2) */ +#define NPY_SQRT1_2 0.707106781186547524400844362104849039 /* 1/sqrt(2) */ + +#define NPY_Ef 2.718281828459045235360287471352662498F /* e */ +#define NPY_LOG2Ef 1.442695040888963407359924681001892137F /* log_2 e */ +#define NPY_LOG10Ef 0.434294481903251827651128918916605082F /* log_10 e */ +#define NPY_LOGE2f 0.693147180559945309417232121458176568F /* log_e 2 */ +#define NPY_LOGE10f 2.302585092994045684017991454684364208F /* log_e 10 */ +#define NPY_PIf 3.141592653589793238462643383279502884F /* pi */ +#define NPY_PI_2f 1.570796326794896619231321691639751442F /* pi/2 */ +#define NPY_PI_4f 0.785398163397448309615660845819875721F /* pi/4 */ +#define NPY_1_PIf 0.318309886183790671537767526745028724F /* 1/pi */ +#define NPY_2_PIf 0.636619772367581343075535053490057448F /* 2/pi */ +#define NPY_EULERf 0.577215664901532860606512090082402431F /* Euler constant */ +#define NPY_SQRT2f 1.414213562373095048801688724209698079F /* sqrt(2) */ +#define NPY_SQRT1_2f 0.707106781186547524400844362104849039F /* 1/sqrt(2) */ + +#define NPY_El 2.718281828459045235360287471352662498L /* e */ +#define NPY_LOG2El 1.442695040888963407359924681001892137L /* log_2 e */ +#define NPY_LOG10El 0.434294481903251827651128918916605082L /* log_10 e */ +#define NPY_LOGE2l 0.693147180559945309417232121458176568L /* log_e 2 */ +#define NPY_LOGE10l 2.302585092994045684017991454684364208L /* log_e 10 */ +#define NPY_PIl 3.141592653589793238462643383279502884L /* pi */ +#define NPY_PI_2l 1.570796326794896619231321691639751442L /* pi/2 */ +#define NPY_PI_4l 0.785398163397448309615660845819875721L /* pi/4 */ +#define NPY_1_PIl 0.318309886183790671537767526745028724L /* 1/pi */ +#define NPY_2_PIl 0.636619772367581343075535053490057448L /* 2/pi */ +#define NPY_EULERl 0.577215664901532860606512090082402431L /* Euler constant */ +#define NPY_SQRT2l 1.414213562373095048801688724209698079L /* sqrt(2) */ +#define NPY_SQRT1_2l 0.707106781186547524400844362104849039L /* 1/sqrt(2) */ + +/* + * C99 double math funcs + */ +NPY_INPLACE double npy_sin(double x); +NPY_INPLACE double npy_cos(double x); +NPY_INPLACE double npy_tan(double x); +NPY_INPLACE double npy_sinh(double x); +NPY_INPLACE double npy_cosh(double x); +NPY_INPLACE double npy_tanh(double x); + +NPY_INPLACE double npy_asin(double x); +NPY_INPLACE double npy_acos(double x); +NPY_INPLACE double npy_atan(double x); + +NPY_INPLACE double npy_log(double x); +NPY_INPLACE double npy_log10(double x); +NPY_INPLACE double npy_exp(double x); +NPY_INPLACE double npy_sqrt(double x); +NPY_INPLACE double npy_cbrt(double x); + +NPY_INPLACE double npy_fabs(double x); +NPY_INPLACE double npy_ceil(double x); +NPY_INPLACE double npy_fmod(double x, double y); +NPY_INPLACE double npy_floor(double x); + +NPY_INPLACE double npy_expm1(double x); +NPY_INPLACE double npy_log1p(double x); +NPY_INPLACE double npy_hypot(double x, double y); +NPY_INPLACE double npy_acosh(double x); +NPY_INPLACE double npy_asinh(double xx); +NPY_INPLACE double npy_atanh(double x); +NPY_INPLACE double npy_rint(double x); +NPY_INPLACE double npy_trunc(double x); +NPY_INPLACE double npy_exp2(double x); +NPY_INPLACE double npy_log2(double x); + +NPY_INPLACE double npy_atan2(double x, double y); +NPY_INPLACE double npy_pow(double x, double y); +NPY_INPLACE double npy_modf(double x, double* y); +NPY_INPLACE double npy_frexp(double x, int* y); +NPY_INPLACE double npy_ldexp(double n, int y); + +NPY_INPLACE double npy_copysign(double x, double y); +double npy_nextafter(double x, double y); +double npy_spacing(double x); + +/* + * IEEE 754 fpu handling. Those are guaranteed to be macros + */ + +/* use builtins to avoid function calls in tight loops + * only available if npy_config.h is available (= numpys own build) */ +#if HAVE___BUILTIN_ISNAN + #define npy_isnan(x) __builtin_isnan(x) +#else + #ifndef NPY_HAVE_DECL_ISNAN + #define npy_isnan(x) ((x) != (x)) + #else + #if defined(_MSC_VER) && (_MSC_VER < 1900) + #define npy_isnan(x) _isnan((x)) + #else + #define npy_isnan(x) isnan(x) + #endif + #endif +#endif + + +/* only available if npy_config.h is available (= numpys own build) */ +#if HAVE___BUILTIN_ISFINITE + #define npy_isfinite(x) __builtin_isfinite(x) +#else + #ifndef NPY_HAVE_DECL_ISFINITE + #ifdef _MSC_VER + #define npy_isfinite(x) _finite((x)) + #else + #define npy_isfinite(x) !npy_isnan((x) + (-x)) + #endif + #else + #define npy_isfinite(x) isfinite((x)) + #endif +#endif + +/* only available if npy_config.h is available (= numpys own build) */ +#if HAVE___BUILTIN_ISINF + #define npy_isinf(x) __builtin_isinf(x) +#else + #ifndef NPY_HAVE_DECL_ISINF + #define npy_isinf(x) (!npy_isfinite(x) && !npy_isnan(x)) + #else + #if defined(_MSC_VER) && (_MSC_VER < 1900) + #define npy_isinf(x) (!_finite((x)) && !_isnan((x))) + #else + #define npy_isinf(x) isinf((x)) + #endif + #endif +#endif + +#ifndef NPY_HAVE_DECL_SIGNBIT + int _npy_signbit_f(float x); + int _npy_signbit_d(double x); + int _npy_signbit_ld(long double x); + #define npy_signbit(x) \ + (sizeof (x) == sizeof (long double) ? _npy_signbit_ld (x) \ + : sizeof (x) == sizeof (double) ? _npy_signbit_d (x) \ + : _npy_signbit_f (x)) +#else + #define npy_signbit(x) signbit((x)) +#endif + +/* + * float C99 math functions + */ +NPY_INPLACE float npy_sinf(float x); +NPY_INPLACE float npy_cosf(float x); +NPY_INPLACE float npy_tanf(float x); +NPY_INPLACE float npy_sinhf(float x); +NPY_INPLACE float npy_coshf(float x); +NPY_INPLACE float npy_tanhf(float x); +NPY_INPLACE float npy_fabsf(float x); +NPY_INPLACE float npy_floorf(float x); +NPY_INPLACE float npy_ceilf(float x); +NPY_INPLACE float npy_rintf(float x); +NPY_INPLACE float npy_truncf(float x); +NPY_INPLACE float npy_sqrtf(float x); +NPY_INPLACE float npy_cbrtf(float x); +NPY_INPLACE float npy_log10f(float x); +NPY_INPLACE float npy_logf(float x); +NPY_INPLACE float npy_expf(float x); +NPY_INPLACE float npy_expm1f(float x); +NPY_INPLACE float npy_asinf(float x); +NPY_INPLACE float npy_acosf(float x); +NPY_INPLACE float npy_atanf(float x); +NPY_INPLACE float npy_asinhf(float x); +NPY_INPLACE float npy_acoshf(float x); +NPY_INPLACE float npy_atanhf(float x); +NPY_INPLACE float npy_log1pf(float x); +NPY_INPLACE float npy_exp2f(float x); +NPY_INPLACE float npy_log2f(float x); + +NPY_INPLACE float npy_atan2f(float x, float y); +NPY_INPLACE float npy_hypotf(float x, float y); +NPY_INPLACE float npy_powf(float x, float y); +NPY_INPLACE float npy_fmodf(float x, float y); + +NPY_INPLACE float npy_modff(float x, float* y); +NPY_INPLACE float npy_frexpf(float x, int* y); +NPY_INPLACE float npy_ldexpf(float x, int y); + +NPY_INPLACE float npy_copysignf(float x, float y); +float npy_nextafterf(float x, float y); +float npy_spacingf(float x); + +/* + * long double C99 math functions + */ +NPY_INPLACE npy_longdouble npy_sinl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_cosl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_tanl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_sinhl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_coshl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_tanhl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_fabsl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_floorl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_ceill(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_rintl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_truncl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_sqrtl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_cbrtl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_log10l(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_logl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_expl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_expm1l(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_asinl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_acosl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_atanl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_asinhl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_acoshl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_atanhl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_log1pl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_exp2l(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_log2l(npy_longdouble x); + +NPY_INPLACE npy_longdouble npy_atan2l(npy_longdouble x, npy_longdouble y); +NPY_INPLACE npy_longdouble npy_hypotl(npy_longdouble x, npy_longdouble y); +NPY_INPLACE npy_longdouble npy_powl(npy_longdouble x, npy_longdouble y); +NPY_INPLACE npy_longdouble npy_fmodl(npy_longdouble x, npy_longdouble y); + +NPY_INPLACE npy_longdouble npy_modfl(npy_longdouble x, npy_longdouble* y); +NPY_INPLACE npy_longdouble npy_frexpl(npy_longdouble x, int* y); +NPY_INPLACE npy_longdouble npy_ldexpl(npy_longdouble x, int y); + +NPY_INPLACE npy_longdouble npy_copysignl(npy_longdouble x, npy_longdouble y); +npy_longdouble npy_nextafterl(npy_longdouble x, npy_longdouble y); +npy_longdouble npy_spacingl(npy_longdouble x); + +/* + * Non standard functions + */ +NPY_INPLACE double npy_deg2rad(double x); +NPY_INPLACE double npy_rad2deg(double x); +NPY_INPLACE double npy_logaddexp(double x, double y); +NPY_INPLACE double npy_logaddexp2(double x, double y); +NPY_INPLACE double npy_divmod(double x, double y, double *modulus); +NPY_INPLACE double npy_heaviside(double x, double h0); + +NPY_INPLACE float npy_deg2radf(float x); +NPY_INPLACE float npy_rad2degf(float x); +NPY_INPLACE float npy_logaddexpf(float x, float y); +NPY_INPLACE float npy_logaddexp2f(float x, float y); +NPY_INPLACE float npy_divmodf(float x, float y, float *modulus); +NPY_INPLACE float npy_heavisidef(float x, float h0); + +NPY_INPLACE npy_longdouble npy_deg2radl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_rad2degl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_logaddexpl(npy_longdouble x, npy_longdouble y); +NPY_INPLACE npy_longdouble npy_logaddexp2l(npy_longdouble x, npy_longdouble y); +NPY_INPLACE npy_longdouble npy_divmodl(npy_longdouble x, npy_longdouble y, + npy_longdouble *modulus); +NPY_INPLACE npy_longdouble npy_heavisidel(npy_longdouble x, npy_longdouble h0); + +#define npy_degrees npy_rad2deg +#define npy_degreesf npy_rad2degf +#define npy_degreesl npy_rad2degl + +#define npy_radians npy_deg2rad +#define npy_radiansf npy_deg2radf +#define npy_radiansl npy_deg2radl + +/* + * Complex declarations + */ + +/* + * C99 specifies that complex numbers have the same representation as + * an array of two elements, where the first element is the real part + * and the second element is the imaginary part. + */ +#define __NPY_CPACK_IMP(x, y, type, ctype) \ + union { \ + ctype z; \ + type a[2]; \ + } z1;; \ + \ + z1.a[0] = (x); \ + z1.a[1] = (y); \ + \ + return z1.z; + +static NPY_INLINE npy_cdouble npy_cpack(double x, double y) +{ + __NPY_CPACK_IMP(x, y, double, npy_cdouble); +} + +static NPY_INLINE npy_cfloat npy_cpackf(float x, float y) +{ + __NPY_CPACK_IMP(x, y, float, npy_cfloat); +} + +static NPY_INLINE npy_clongdouble npy_cpackl(npy_longdouble x, npy_longdouble y) +{ + __NPY_CPACK_IMP(x, y, npy_longdouble, npy_clongdouble); +} +#undef __NPY_CPACK_IMP + +/* + * Same remark as above, but in the other direction: extract first/second + * member of complex number, assuming a C99-compatible representation + * + * Those are defineds as static inline, and such as a reasonable compiler would + * most likely compile this to one or two instructions (on CISC at least) + */ +#define __NPY_CEXTRACT_IMP(z, index, type, ctype) \ + union { \ + ctype z; \ + type a[2]; \ + } __z_repr; \ + __z_repr.z = z; \ + \ + return __z_repr.a[index]; + +static NPY_INLINE double npy_creal(npy_cdouble z) +{ + __NPY_CEXTRACT_IMP(z, 0, double, npy_cdouble); +} + +static NPY_INLINE double npy_cimag(npy_cdouble z) +{ + __NPY_CEXTRACT_IMP(z, 1, double, npy_cdouble); +} + +static NPY_INLINE float npy_crealf(npy_cfloat z) +{ + __NPY_CEXTRACT_IMP(z, 0, float, npy_cfloat); +} + +static NPY_INLINE float npy_cimagf(npy_cfloat z) +{ + __NPY_CEXTRACT_IMP(z, 1, float, npy_cfloat); +} + +static NPY_INLINE npy_longdouble npy_creall(npy_clongdouble z) +{ + __NPY_CEXTRACT_IMP(z, 0, npy_longdouble, npy_clongdouble); +} + +static NPY_INLINE npy_longdouble npy_cimagl(npy_clongdouble z) +{ + __NPY_CEXTRACT_IMP(z, 1, npy_longdouble, npy_clongdouble); +} +#undef __NPY_CEXTRACT_IMP + +/* + * Double precision complex functions + */ +double npy_cabs(npy_cdouble z); +double npy_carg(npy_cdouble z); + +npy_cdouble npy_cexp(npy_cdouble z); +npy_cdouble npy_clog(npy_cdouble z); +npy_cdouble npy_cpow(npy_cdouble x, npy_cdouble y); + +npy_cdouble npy_csqrt(npy_cdouble z); + +npy_cdouble npy_ccos(npy_cdouble z); +npy_cdouble npy_csin(npy_cdouble z); +npy_cdouble npy_ctan(npy_cdouble z); + +npy_cdouble npy_ccosh(npy_cdouble z); +npy_cdouble npy_csinh(npy_cdouble z); +npy_cdouble npy_ctanh(npy_cdouble z); + +npy_cdouble npy_cacos(npy_cdouble z); +npy_cdouble npy_casin(npy_cdouble z); +npy_cdouble npy_catan(npy_cdouble z); + +npy_cdouble npy_cacosh(npy_cdouble z); +npy_cdouble npy_casinh(npy_cdouble z); +npy_cdouble npy_catanh(npy_cdouble z); + +/* + * Single precision complex functions + */ +float npy_cabsf(npy_cfloat z); +float npy_cargf(npy_cfloat z); + +npy_cfloat npy_cexpf(npy_cfloat z); +npy_cfloat npy_clogf(npy_cfloat z); +npy_cfloat npy_cpowf(npy_cfloat x, npy_cfloat y); + +npy_cfloat npy_csqrtf(npy_cfloat z); + +npy_cfloat npy_ccosf(npy_cfloat z); +npy_cfloat npy_csinf(npy_cfloat z); +npy_cfloat npy_ctanf(npy_cfloat z); + +npy_cfloat npy_ccoshf(npy_cfloat z); +npy_cfloat npy_csinhf(npy_cfloat z); +npy_cfloat npy_ctanhf(npy_cfloat z); + +npy_cfloat npy_cacosf(npy_cfloat z); +npy_cfloat npy_casinf(npy_cfloat z); +npy_cfloat npy_catanf(npy_cfloat z); + +npy_cfloat npy_cacoshf(npy_cfloat z); +npy_cfloat npy_casinhf(npy_cfloat z); +npy_cfloat npy_catanhf(npy_cfloat z); + + +/* + * Extended precision complex functions + */ +npy_longdouble npy_cabsl(npy_clongdouble z); +npy_longdouble npy_cargl(npy_clongdouble z); + +npy_clongdouble npy_cexpl(npy_clongdouble z); +npy_clongdouble npy_clogl(npy_clongdouble z); +npy_clongdouble npy_cpowl(npy_clongdouble x, npy_clongdouble y); + +npy_clongdouble npy_csqrtl(npy_clongdouble z); + +npy_clongdouble npy_ccosl(npy_clongdouble z); +npy_clongdouble npy_csinl(npy_clongdouble z); +npy_clongdouble npy_ctanl(npy_clongdouble z); + +npy_clongdouble npy_ccoshl(npy_clongdouble z); +npy_clongdouble npy_csinhl(npy_clongdouble z); +npy_clongdouble npy_ctanhl(npy_clongdouble z); + +npy_clongdouble npy_cacosl(npy_clongdouble z); +npy_clongdouble npy_casinl(npy_clongdouble z); +npy_clongdouble npy_catanl(npy_clongdouble z); + +npy_clongdouble npy_cacoshl(npy_clongdouble z); +npy_clongdouble npy_casinhl(npy_clongdouble z); +npy_clongdouble npy_catanhl(npy_clongdouble z); + + +/* + * Functions that set the floating point error + * status word. + */ + +/* + * platform-dependent code translates floating point + * status to an integer sum of these values + */ +#define NPY_FPE_DIVIDEBYZERO 1 +#define NPY_FPE_OVERFLOW 2 +#define NPY_FPE_UNDERFLOW 4 +#define NPY_FPE_INVALID 8 + +int npy_clear_floatstatus_barrier(char*); +int npy_get_floatstatus_barrier(char*); +/* + * use caution with these - clang and gcc8.1 are known to reorder calls + * to this form of the function which can defeat the check. The _barrier + * form of the call is preferable, where the argument is + * (char*)&local_variable + */ +int npy_clear_floatstatus(void); +int npy_get_floatstatus(void); + +void npy_set_floatstatus_divbyzero(void); +void npy_set_floatstatus_overflow(void); +void npy_set_floatstatus_underflow(void); +void npy_set_floatstatus_invalid(void); + +#ifdef __cplusplus +} +#endif + +#if NPY_INLINE_MATH +#include "npy_math_internal.h" +#endif + +#endif diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_no_deprecated_api.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_no_deprecated_api.h new file mode 100644 index 0000000..6183dc2 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_no_deprecated_api.h @@ -0,0 +1,19 @@ +/* + * This include file is provided for inclusion in Cython *.pyd files where + * one would like to define the NPY_NO_DEPRECATED_API macro. It can be + * included by + * + * cdef extern from "npy_no_deprecated_api.h": pass + * + */ +#ifndef NPY_NO_DEPRECATED_API + +/* put this check here since there may be multiple includes in C extensions. */ +#if defined(NDARRAYTYPES_H) || defined(_NPY_DEPRECATED_API_H) || \ + defined(OLD_DEFINES_H) +#error "npy_no_deprecated_api.h" must be first among numpy includes. +#else +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#endif + +#endif diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_os.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_os.h new file mode 100644 index 0000000..9228c39 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_os.h @@ -0,0 +1,30 @@ +#ifndef _NPY_OS_H_ +#define _NPY_OS_H_ + +#if defined(linux) || defined(__linux) || defined(__linux__) + #define NPY_OS_LINUX +#elif defined(__FreeBSD__) || defined(__NetBSD__) || \ + defined(__OpenBSD__) || defined(__DragonFly__) + #define NPY_OS_BSD + #ifdef __FreeBSD__ + #define NPY_OS_FREEBSD + #elif defined(__NetBSD__) + #define NPY_OS_NETBSD + #elif defined(__OpenBSD__) + #define NPY_OS_OPENBSD + #elif defined(__DragonFly__) + #define NPY_OS_DRAGONFLY + #endif +#elif defined(sun) || defined(__sun) + #define NPY_OS_SOLARIS +#elif defined(__CYGWIN__) + #define NPY_OS_CYGWIN +#elif defined(_WIN32) || defined(__WIN32__) || defined(WIN32) + #define NPY_OS_WIN32 +#elif defined(__APPLE__) + #define NPY_OS_DARWIN +#else + #define NPY_OS_UNKNOWN +#endif + +#endif diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/numpyconfig.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/numpyconfig.h new file mode 100644 index 0000000..ab198f3 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/numpyconfig.h @@ -0,0 +1,41 @@ +#ifndef _NPY_NUMPYCONFIG_H_ +#define _NPY_NUMPYCONFIG_H_ + +#include "_numpyconfig.h" + +/* + * On Mac OS X, because there is only one configuration stage for all the archs + * in universal builds, any macro which depends on the arch needs to be + * hardcoded + */ +#ifdef __APPLE__ + #undef NPY_SIZEOF_LONG + #undef NPY_SIZEOF_PY_INTPTR_T + + #ifdef __LP64__ + #define NPY_SIZEOF_LONG 8 + #define NPY_SIZEOF_PY_INTPTR_T 8 + #else + #define NPY_SIZEOF_LONG 4 + #define NPY_SIZEOF_PY_INTPTR_T 4 + #endif +#endif + +/** + * To help with the NPY_NO_DEPRECATED_API macro, we include API version + * numbers for specific versions of NumPy. To exclude all API that was + * deprecated as of 1.7, add the following before #including any NumPy + * headers: + * #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION + */ +#define NPY_1_7_API_VERSION 0x00000007 +#define NPY_1_8_API_VERSION 0x00000008 +#define NPY_1_9_API_VERSION 0x00000008 +#define NPY_1_10_API_VERSION 0x00000008 +#define NPY_1_11_API_VERSION 0x00000008 +#define NPY_1_12_API_VERSION 0x00000008 +#define NPY_1_13_API_VERSION 0x00000008 +#define NPY_1_14_API_VERSION 0x00000008 +#define NPY_1_15_API_VERSION 0x00000008 + +#endif diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/old_defines.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/old_defines.h new file mode 100644 index 0000000..abf8159 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/old_defines.h @@ -0,0 +1,187 @@ +/* This header is deprecated as of NumPy 1.7 */ +#ifndef OLD_DEFINES_H +#define OLD_DEFINES_H + +#if defined(NPY_NO_DEPRECATED_API) && NPY_NO_DEPRECATED_API >= NPY_1_7_API_VERSION +#error The header "old_defines.h" is deprecated as of NumPy 1.7. +#endif + +#define NDARRAY_VERSION NPY_VERSION + +#define PyArray_MIN_BUFSIZE NPY_MIN_BUFSIZE +#define PyArray_MAX_BUFSIZE NPY_MAX_BUFSIZE +#define PyArray_BUFSIZE NPY_BUFSIZE + +#define PyArray_PRIORITY NPY_PRIORITY +#define PyArray_SUBTYPE_PRIORITY NPY_PRIORITY +#define PyArray_NUM_FLOATTYPE NPY_NUM_FLOATTYPE + +#define NPY_MAX PyArray_MAX +#define NPY_MIN PyArray_MIN + +#define PyArray_TYPES NPY_TYPES +#define PyArray_BOOL NPY_BOOL +#define PyArray_BYTE NPY_BYTE +#define PyArray_UBYTE NPY_UBYTE +#define PyArray_SHORT NPY_SHORT +#define PyArray_USHORT NPY_USHORT +#define PyArray_INT NPY_INT +#define PyArray_UINT NPY_UINT +#define PyArray_LONG NPY_LONG +#define PyArray_ULONG NPY_ULONG +#define PyArray_LONGLONG NPY_LONGLONG +#define PyArray_ULONGLONG NPY_ULONGLONG +#define PyArray_HALF NPY_HALF +#define PyArray_FLOAT NPY_FLOAT +#define PyArray_DOUBLE NPY_DOUBLE +#define PyArray_LONGDOUBLE NPY_LONGDOUBLE +#define PyArray_CFLOAT NPY_CFLOAT +#define PyArray_CDOUBLE NPY_CDOUBLE +#define PyArray_CLONGDOUBLE NPY_CLONGDOUBLE +#define PyArray_OBJECT NPY_OBJECT +#define PyArray_STRING NPY_STRING +#define PyArray_UNICODE NPY_UNICODE +#define PyArray_VOID NPY_VOID +#define PyArray_DATETIME NPY_DATETIME +#define PyArray_TIMEDELTA NPY_TIMEDELTA +#define PyArray_NTYPES NPY_NTYPES +#define PyArray_NOTYPE NPY_NOTYPE +#define PyArray_CHAR NPY_CHAR +#define PyArray_USERDEF NPY_USERDEF +#define PyArray_NUMUSERTYPES NPY_NUMUSERTYPES + +#define PyArray_INTP NPY_INTP +#define PyArray_UINTP NPY_UINTP + +#define PyArray_INT8 NPY_INT8 +#define PyArray_UINT8 NPY_UINT8 +#define PyArray_INT16 NPY_INT16 +#define PyArray_UINT16 NPY_UINT16 +#define PyArray_INT32 NPY_INT32 +#define PyArray_UINT32 NPY_UINT32 + +#ifdef NPY_INT64 +#define PyArray_INT64 NPY_INT64 +#define PyArray_UINT64 NPY_UINT64 +#endif + +#ifdef NPY_INT128 +#define PyArray_INT128 NPY_INT128 +#define PyArray_UINT128 NPY_UINT128 +#endif + +#ifdef NPY_FLOAT16 +#define PyArray_FLOAT16 NPY_FLOAT16 +#define PyArray_COMPLEX32 NPY_COMPLEX32 +#endif + +#ifdef NPY_FLOAT80 +#define PyArray_FLOAT80 NPY_FLOAT80 +#define PyArray_COMPLEX160 NPY_COMPLEX160 +#endif + +#ifdef NPY_FLOAT96 +#define PyArray_FLOAT96 NPY_FLOAT96 +#define PyArray_COMPLEX192 NPY_COMPLEX192 +#endif + +#ifdef NPY_FLOAT128 +#define PyArray_FLOAT128 NPY_FLOAT128 +#define PyArray_COMPLEX256 NPY_COMPLEX256 +#endif + +#define PyArray_FLOAT32 NPY_FLOAT32 +#define PyArray_COMPLEX64 NPY_COMPLEX64 +#define PyArray_FLOAT64 NPY_FLOAT64 +#define PyArray_COMPLEX128 NPY_COMPLEX128 + + +#define PyArray_TYPECHAR NPY_TYPECHAR +#define PyArray_BOOLLTR NPY_BOOLLTR +#define PyArray_BYTELTR NPY_BYTELTR +#define PyArray_UBYTELTR NPY_UBYTELTR +#define PyArray_SHORTLTR NPY_SHORTLTR +#define PyArray_USHORTLTR NPY_USHORTLTR +#define PyArray_INTLTR NPY_INTLTR +#define PyArray_UINTLTR NPY_UINTLTR +#define PyArray_LONGLTR NPY_LONGLTR +#define PyArray_ULONGLTR NPY_ULONGLTR +#define PyArray_LONGLONGLTR NPY_LONGLONGLTR +#define PyArray_ULONGLONGLTR NPY_ULONGLONGLTR +#define PyArray_HALFLTR NPY_HALFLTR +#define PyArray_FLOATLTR NPY_FLOATLTR +#define PyArray_DOUBLELTR NPY_DOUBLELTR +#define PyArray_LONGDOUBLELTR NPY_LONGDOUBLELTR +#define PyArray_CFLOATLTR NPY_CFLOATLTR +#define PyArray_CDOUBLELTR NPY_CDOUBLELTR +#define PyArray_CLONGDOUBLELTR NPY_CLONGDOUBLELTR +#define PyArray_OBJECTLTR NPY_OBJECTLTR +#define PyArray_STRINGLTR NPY_STRINGLTR +#define PyArray_STRINGLTR2 NPY_STRINGLTR2 +#define PyArray_UNICODELTR NPY_UNICODELTR +#define PyArray_VOIDLTR NPY_VOIDLTR +#define PyArray_DATETIMELTR NPY_DATETIMELTR +#define PyArray_TIMEDELTALTR NPY_TIMEDELTALTR +#define PyArray_CHARLTR NPY_CHARLTR +#define PyArray_INTPLTR NPY_INTPLTR +#define PyArray_UINTPLTR NPY_UINTPLTR +#define PyArray_GENBOOLLTR NPY_GENBOOLLTR +#define PyArray_SIGNEDLTR NPY_SIGNEDLTR +#define PyArray_UNSIGNEDLTR NPY_UNSIGNEDLTR +#define PyArray_FLOATINGLTR NPY_FLOATINGLTR +#define PyArray_COMPLEXLTR NPY_COMPLEXLTR + +#define PyArray_QUICKSORT NPY_QUICKSORT +#define PyArray_HEAPSORT NPY_HEAPSORT +#define PyArray_MERGESORT NPY_MERGESORT +#define PyArray_SORTKIND NPY_SORTKIND +#define PyArray_NSORTS NPY_NSORTS + +#define PyArray_NOSCALAR NPY_NOSCALAR +#define PyArray_BOOL_SCALAR NPY_BOOL_SCALAR +#define PyArray_INTPOS_SCALAR NPY_INTPOS_SCALAR +#define PyArray_INTNEG_SCALAR NPY_INTNEG_SCALAR +#define PyArray_FLOAT_SCALAR NPY_FLOAT_SCALAR +#define PyArray_COMPLEX_SCALAR NPY_COMPLEX_SCALAR +#define PyArray_OBJECT_SCALAR NPY_OBJECT_SCALAR +#define PyArray_SCALARKIND NPY_SCALARKIND +#define PyArray_NSCALARKINDS NPY_NSCALARKINDS + +#define PyArray_ANYORDER NPY_ANYORDER +#define PyArray_CORDER NPY_CORDER +#define PyArray_FORTRANORDER NPY_FORTRANORDER +#define PyArray_ORDER NPY_ORDER + +#define PyDescr_ISBOOL PyDataType_ISBOOL +#define PyDescr_ISUNSIGNED PyDataType_ISUNSIGNED +#define PyDescr_ISSIGNED PyDataType_ISSIGNED +#define PyDescr_ISINTEGER PyDataType_ISINTEGER +#define PyDescr_ISFLOAT PyDataType_ISFLOAT +#define PyDescr_ISNUMBER PyDataType_ISNUMBER +#define PyDescr_ISSTRING PyDataType_ISSTRING +#define PyDescr_ISCOMPLEX PyDataType_ISCOMPLEX +#define PyDescr_ISPYTHON PyDataType_ISPYTHON +#define PyDescr_ISFLEXIBLE PyDataType_ISFLEXIBLE +#define PyDescr_ISUSERDEF PyDataType_ISUSERDEF +#define PyDescr_ISEXTENDED PyDataType_ISEXTENDED +#define PyDescr_ISOBJECT PyDataType_ISOBJECT +#define PyDescr_HASFIELDS PyDataType_HASFIELDS + +#define PyArray_LITTLE NPY_LITTLE +#define PyArray_BIG NPY_BIG +#define PyArray_NATIVE NPY_NATIVE +#define PyArray_SWAP NPY_SWAP +#define PyArray_IGNORE NPY_IGNORE + +#define PyArray_NATBYTE NPY_NATBYTE +#define PyArray_OPPBYTE NPY_OPPBYTE + +#define PyArray_MAX_ELSIZE NPY_MAX_ELSIZE + +#define PyArray_USE_PYMEM NPY_USE_PYMEM + +#define PyArray_RemoveLargest PyArray_RemoveSmallest + +#define PyArray_UCS4 npy_ucs4 + +#endif diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/oldnumeric.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/oldnumeric.h new file mode 100644 index 0000000..38530fa --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/oldnumeric.h @@ -0,0 +1,25 @@ +#include "arrayobject.h" + +#ifndef PYPY_VERSION +#ifndef REFCOUNT +# define REFCOUNT NPY_REFCOUNT +# define MAX_ELSIZE 16 +#endif +#endif + +#define PyArray_UNSIGNED_TYPES +#define PyArray_SBYTE NPY_BYTE +#define PyArray_CopyArray PyArray_CopyInto +#define _PyArray_multiply_list PyArray_MultiplyIntList +#define PyArray_ISSPACESAVER(m) NPY_FALSE +#define PyScalarArray_Check PyArray_CheckScalar + +#define CONTIGUOUS NPY_CONTIGUOUS +#define OWN_DIMENSIONS 0 +#define OWN_STRIDES 0 +#define OWN_DATA NPY_OWNDATA +#define SAVESPACE 0 +#define SAVESPACEBIT 0 + +#undef import_array +#define import_array() { if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); } } diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/ufunc_api.txt b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/ufunc_api.txt new file mode 100644 index 0000000..883fb77 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/ufunc_api.txt @@ -0,0 +1,337 @@ + +================= +NumPy Ufunc C-API +================= +:: + + PyObject * + PyUFunc_FromFuncAndData(PyUFuncGenericFunction *func, void + **data, char *types, int ntypes, int nin, int + nout, int identity, const char *name, const + char *doc, int unused) + + +:: + + int + PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc, int + usertype, PyUFuncGenericFunction + function, int *arg_types, void *data) + + +:: + + int + PyUFunc_GenericFunction(PyUFuncObject *ufunc, PyObject *args, PyObject + *kwds, PyArrayObject **op) + + +This generic function is called with the ufunc object, the arguments to it, +and an array of (pointers to) PyArrayObjects which are NULL. + +'op' is an array of at least NPY_MAXARGS PyArrayObject *. + +:: + + void + PyUFunc_f_f_As_d_d(char **args, npy_intp *dimensions, npy_intp + *steps, void *func) + + +:: + + void + PyUFunc_d_d(char **args, npy_intp *dimensions, npy_intp *steps, void + *func) + + +:: + + void + PyUFunc_f_f(char **args, npy_intp *dimensions, npy_intp *steps, void + *func) + + +:: + + void + PyUFunc_g_g(char **args, npy_intp *dimensions, npy_intp *steps, void + *func) + + +:: + + void + PyUFunc_F_F_As_D_D(char **args, npy_intp *dimensions, npy_intp + *steps, void *func) + + +:: + + void + PyUFunc_F_F(char **args, npy_intp *dimensions, npy_intp *steps, void + *func) + + +:: + + void + PyUFunc_D_D(char **args, npy_intp *dimensions, npy_intp *steps, void + *func) + + +:: + + void + PyUFunc_G_G(char **args, npy_intp *dimensions, npy_intp *steps, void + *func) + + +:: + + void + PyUFunc_O_O(char **args, npy_intp *dimensions, npy_intp *steps, void + *func) + + +:: + + void + PyUFunc_ff_f_As_dd_d(char **args, npy_intp *dimensions, npy_intp + *steps, void *func) + + +:: + + void + PyUFunc_ff_f(char **args, npy_intp *dimensions, npy_intp *steps, void + *func) + + +:: + + void + PyUFunc_dd_d(char **args, npy_intp *dimensions, npy_intp *steps, void + *func) + + +:: + + void + PyUFunc_gg_g(char **args, npy_intp *dimensions, npy_intp *steps, void + *func) + + +:: + + void + PyUFunc_FF_F_As_DD_D(char **args, npy_intp *dimensions, npy_intp + *steps, void *func) + + +:: + + void + PyUFunc_DD_D(char **args, npy_intp *dimensions, npy_intp *steps, void + *func) + + +:: + + void + PyUFunc_FF_F(char **args, npy_intp *dimensions, npy_intp *steps, void + *func) + + +:: + + void + PyUFunc_GG_G(char **args, npy_intp *dimensions, npy_intp *steps, void + *func) + + +:: + + void + PyUFunc_OO_O(char **args, npy_intp *dimensions, npy_intp *steps, void + *func) + + +:: + + void + PyUFunc_O_O_method(char **args, npy_intp *dimensions, npy_intp + *steps, void *func) + + +:: + + void + PyUFunc_OO_O_method(char **args, npy_intp *dimensions, npy_intp + *steps, void *func) + + +:: + + void + PyUFunc_On_Om(char **args, npy_intp *dimensions, npy_intp *steps, void + *func) + + +:: + + int + PyUFunc_GetPyValues(char *name, int *bufsize, int *errmask, PyObject + **errobj) + + +On return, if errobj is populated with a non-NULL value, the caller +owns a new reference to errobj. + +:: + + int + PyUFunc_checkfperr(int errmask, PyObject *errobj, int *first) + + +:: + + void + PyUFunc_clearfperr() + + +:: + + int + PyUFunc_getfperr(void ) + + +:: + + int + PyUFunc_handlefperr(int errmask, PyObject *errobj, int retstatus, int + *first) + + +:: + + int + PyUFunc_ReplaceLoopBySignature(PyUFuncObject + *func, PyUFuncGenericFunction + newfunc, int + *signature, PyUFuncGenericFunction + *oldfunc) + + +:: + + PyObject * + PyUFunc_FromFuncAndDataAndSignature(PyUFuncGenericFunction *func, void + **data, char *types, int + ntypes, int nin, int nout, int + identity, const char *name, const + char *doc, int unused, const char + *signature) + + +:: + + int + PyUFunc_SetUsesArraysAsData(void **data, size_t i) + + +:: + + void + PyUFunc_e_e(char **args, npy_intp *dimensions, npy_intp *steps, void + *func) + + +:: + + void + PyUFunc_e_e_As_f_f(char **args, npy_intp *dimensions, npy_intp + *steps, void *func) + + +:: + + void + PyUFunc_e_e_As_d_d(char **args, npy_intp *dimensions, npy_intp + *steps, void *func) + + +:: + + void + PyUFunc_ee_e(char **args, npy_intp *dimensions, npy_intp *steps, void + *func) + + +:: + + void + PyUFunc_ee_e_As_ff_f(char **args, npy_intp *dimensions, npy_intp + *steps, void *func) + + +:: + + void + PyUFunc_ee_e_As_dd_d(char **args, npy_intp *dimensions, npy_intp + *steps, void *func) + + +:: + + int + PyUFunc_DefaultTypeResolver(PyUFuncObject *ufunc, NPY_CASTING + casting, PyArrayObject + **operands, PyObject + *type_tup, PyArray_Descr **out_dtypes) + + +This function applies the default type resolution rules +for the provided ufunc. + +Returns 0 on success, -1 on error. + +:: + + int + PyUFunc_ValidateCasting(PyUFuncObject *ufunc, NPY_CASTING + casting, PyArrayObject + **operands, PyArray_Descr **dtypes) + + +Validates that the input operands can be cast to +the input types, and the output types can be cast to +the output operands where provided. + +Returns 0 on success, -1 (with exception raised) on validation failure. + +:: + + int + PyUFunc_RegisterLoopForDescr(PyUFuncObject *ufunc, PyArray_Descr + *user_dtype, PyUFuncGenericFunction + function, PyArray_Descr + **arg_dtypes, void *data) + + +:: + + PyObject * + PyUFunc_FromFuncAndDataAndSignatureAndIdentity(PyUFuncGenericFunction + *func, void + **data, char + *types, int ntypes, int + nin, int nout, int + identity, const char + *name, const char + *doc, int unused, const + char + *signature, PyObject + *identity_value) + + diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/ufuncobject.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/ufuncobject.h new file mode 100644 index 0000000..90d837a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/ufuncobject.h @@ -0,0 +1,377 @@ +#ifndef Py_UFUNCOBJECT_H +#define Py_UFUNCOBJECT_H + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * The legacy generic inner loop for a standard element-wise or + * generalized ufunc. + */ +typedef void (*PyUFuncGenericFunction) + (char **args, + npy_intp *dimensions, + npy_intp *strides, + void *innerloopdata); + +/* + * The most generic one-dimensional inner loop for + * a masked standard element-wise ufunc. "Masked" here means that it skips + * doing calculations on any items for which the maskptr array has a true + * value. + */ +typedef void (PyUFunc_MaskedStridedInnerLoopFunc)( + char **dataptrs, npy_intp *strides, + char *maskptr, npy_intp mask_stride, + npy_intp count, + NpyAuxData *innerloopdata); + +/* Forward declaration for the type resolver and loop selector typedefs */ +struct _tagPyUFuncObject; + +/* + * Given the operands for calling a ufunc, should determine the + * calculation input and output data types and return an inner loop function. + * This function should validate that the casting rule is being followed, + * and fail if it is not. + * + * For backwards compatibility, the regular type resolution function does not + * support auxiliary data with object semantics. The type resolution call + * which returns a masked generic function returns a standard NpyAuxData + * object, for which the NPY_AUXDATA_FREE and NPY_AUXDATA_CLONE macros + * work. + * + * ufunc: The ufunc object. + * casting: The 'casting' parameter provided to the ufunc. + * operands: An array of length (ufunc->nin + ufunc->nout), + * with the output parameters possibly NULL. + * type_tup: Either NULL, or the type_tup passed to the ufunc. + * out_dtypes: An array which should be populated with new + * references to (ufunc->nin + ufunc->nout) new + * dtypes, one for each input and output. These + * dtypes should all be in native-endian format. + * + * Should return 0 on success, -1 on failure (with exception set), + * or -2 if Py_NotImplemented should be returned. + */ +typedef int (PyUFunc_TypeResolutionFunc)( + struct _tagPyUFuncObject *ufunc, + NPY_CASTING casting, + PyArrayObject **operands, + PyObject *type_tup, + PyArray_Descr **out_dtypes); + +/* + * Given an array of DTypes as returned by the PyUFunc_TypeResolutionFunc, + * and an array of fixed strides (the array will contain NPY_MAX_INTP for + * strides which are not necessarily fixed), returns an inner loop + * with associated auxiliary data. + * + * For backwards compatibility, there is a variant of the inner loop + * selection which returns an inner loop irrespective of the strides, + * and with a void* static auxiliary data instead of an NpyAuxData * + * dynamically allocatable auxiliary data. + * + * ufunc: The ufunc object. + * dtypes: An array which has been populated with dtypes, + * in most cases by the type resolution function + * for the same ufunc. + * fixed_strides: For each input/output, either the stride that + * will be used every time the function is called + * or NPY_MAX_INTP if the stride might change or + * is not known ahead of time. The loop selection + * function may use this stride to pick inner loops + * which are optimized for contiguous or 0-stride + * cases. + * out_innerloop: Should be populated with the correct ufunc inner + * loop for the given type. + * out_innerloopdata: Should be populated with the void* data to + * be passed into the out_innerloop function. + * out_needs_api: If the inner loop needs to use the Python API, + * should set the to 1, otherwise should leave + * this untouched. + */ +typedef int (PyUFunc_LegacyInnerLoopSelectionFunc)( + struct _tagPyUFuncObject *ufunc, + PyArray_Descr **dtypes, + PyUFuncGenericFunction *out_innerloop, + void **out_innerloopdata, + int *out_needs_api); +typedef int (PyUFunc_MaskedInnerLoopSelectionFunc)( + struct _tagPyUFuncObject *ufunc, + PyArray_Descr **dtypes, + PyArray_Descr *mask_dtype, + npy_intp *fixed_strides, + npy_intp fixed_mask_stride, + PyUFunc_MaskedStridedInnerLoopFunc **out_innerloop, + NpyAuxData **out_innerloopdata, + int *out_needs_api); + +typedef struct _tagPyUFuncObject { + PyObject_HEAD + /* + * nin: Number of inputs + * nout: Number of outputs + * nargs: Always nin + nout (Why is it stored?) + */ + int nin, nout, nargs; + + /* Identity for reduction, either PyUFunc_One or PyUFunc_Zero */ + int identity; + + /* Array of one-dimensional core loops */ + PyUFuncGenericFunction *functions; + /* Array of funcdata that gets passed into the functions */ + void **data; + /* The number of elements in 'functions' and 'data' */ + int ntypes; + + /* Used to be unused field 'check_return' */ + int reserved1; + + /* The name of the ufunc */ + const char *name; + + /* Array of type numbers, of size ('nargs' * 'ntypes') */ + char *types; + + /* Documentation string */ + const char *doc; + + void *ptr; + PyObject *obj; + PyObject *userloops; + + /* generalized ufunc parameters */ + + /* 0 for scalar ufunc; 1 for generalized ufunc */ + int core_enabled; + /* number of distinct dimension names in signature */ + int core_num_dim_ix; + + /* + * dimension indices of input/output argument k are stored in + * core_dim_ixs[core_offsets[k]..core_offsets[k]+core_num_dims[k]-1] + */ + + /* numbers of core dimensions of each argument */ + int *core_num_dims; + /* + * dimension indices in a flatted form; indices + * are in the range of [0,core_num_dim_ix) + */ + int *core_dim_ixs; + /* + * positions of 1st core dimensions of each + * argument in core_dim_ixs, equivalent to cumsum(core_num_dims) + */ + int *core_offsets; + /* signature string for printing purpose */ + char *core_signature; + + /* + * A function which resolves the types and fills an array + * with the dtypes for the inputs and outputs. + */ + PyUFunc_TypeResolutionFunc *type_resolver; + /* + * A function which returns an inner loop written for + * NumPy 1.6 and earlier ufuncs. This is for backwards + * compatibility, and may be NULL if inner_loop_selector + * is specified. + */ + PyUFunc_LegacyInnerLoopSelectionFunc *legacy_inner_loop_selector; + /* + * This was blocked off to be the "new" inner loop selector in 1.7, + * but this was never implemented. (This is also why the above + * selector is called the "legacy" selector.) + */ + void *reserved2; + /* + * A function which returns a masked inner loop for the ufunc. + */ + PyUFunc_MaskedInnerLoopSelectionFunc *masked_inner_loop_selector; + + /* + * List of flags for each operand when ufunc is called by nditer object. + * These flags will be used in addition to the default flags for each + * operand set by nditer object. + */ + npy_uint32 *op_flags; + + /* + * List of global flags used when ufunc is called by nditer object. + * These flags will be used in addition to the default global flags + * set by nditer object. + */ + npy_uint32 iter_flags; + + /* New in NPY_API_VERSION 0x0000000D and above */ + + /* + * for each core_num_dim_ix distinct dimension names, + * the possible "frozen" size (-1 if not frozen). + */ + npy_intp *core_dim_sizes; + + /* + * for each distinct core dimension, a set of UFUNC_CORE_DIM* flags + */ + npy_uint32 *core_dim_flags; + + /* Identity for reduction, when identity == PyUFunc_IdentityValue */ + PyObject *identity_value; + +} PyUFuncObject; + +#include "arrayobject.h" +/* Generalized ufunc; 0x0001 reserved for possible use as CORE_ENABLED */ +/* the core dimension's size will be determined by the operands. */ +#define UFUNC_CORE_DIM_SIZE_INFERRED 0x0002 +/* the core dimension may be absent */ +#define UFUNC_CORE_DIM_CAN_IGNORE 0x0004 +/* flags inferred during execution */ +#define UFUNC_CORE_DIM_MISSING 0x00040000 + +#define UFUNC_ERR_IGNORE 0 +#define UFUNC_ERR_WARN 1 +#define UFUNC_ERR_RAISE 2 +#define UFUNC_ERR_CALL 3 +#define UFUNC_ERR_PRINT 4 +#define UFUNC_ERR_LOG 5 + + /* Python side integer mask */ + +#define UFUNC_MASK_DIVIDEBYZERO 0x07 +#define UFUNC_MASK_OVERFLOW 0x3f +#define UFUNC_MASK_UNDERFLOW 0x1ff +#define UFUNC_MASK_INVALID 0xfff + +#define UFUNC_SHIFT_DIVIDEBYZERO 0 +#define UFUNC_SHIFT_OVERFLOW 3 +#define UFUNC_SHIFT_UNDERFLOW 6 +#define UFUNC_SHIFT_INVALID 9 + + +#define UFUNC_OBJ_ISOBJECT 1 +#define UFUNC_OBJ_NEEDS_API 2 + + /* Default user error mode */ +#define UFUNC_ERR_DEFAULT \ + (UFUNC_ERR_WARN << UFUNC_SHIFT_DIVIDEBYZERO) + \ + (UFUNC_ERR_WARN << UFUNC_SHIFT_OVERFLOW) + \ + (UFUNC_ERR_WARN << UFUNC_SHIFT_INVALID) + +#if NPY_ALLOW_THREADS +#define NPY_LOOP_BEGIN_THREADS do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) _save = PyEval_SaveThread();} while (0); +#define NPY_LOOP_END_THREADS do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) PyEval_RestoreThread(_save);} while (0); +#else +#define NPY_LOOP_BEGIN_THREADS +#define NPY_LOOP_END_THREADS +#endif + +/* + * UFunc has unit of 0, and the order of operations can be reordered + * This case allows reduction with multiple axes at once. + */ +#define PyUFunc_Zero 0 +/* + * UFunc has unit of 1, and the order of operations can be reordered + * This case allows reduction with multiple axes at once. + */ +#define PyUFunc_One 1 +/* + * UFunc has unit of -1, and the order of operations can be reordered + * This case allows reduction with multiple axes at once. Intended for + * bitwise_and reduction. + */ +#define PyUFunc_MinusOne 2 +/* + * UFunc has no unit, and the order of operations cannot be reordered. + * This case does not allow reduction with multiple axes at once. + */ +#define PyUFunc_None -1 +/* + * UFunc has no unit, and the order of operations can be reordered + * This case allows reduction with multiple axes at once. + */ +#define PyUFunc_ReorderableNone -2 +/* + * UFunc unit is in identity_value, and the order of operations can be reordered + * This case allows reduction with multiple axes at once. + */ +#define PyUFunc_IdentityValue -3 + + +#define UFUNC_REDUCE 0 +#define UFUNC_ACCUMULATE 1 +#define UFUNC_REDUCEAT 2 +#define UFUNC_OUTER 3 + + +typedef struct { + int nin; + int nout; + PyObject *callable; +} PyUFunc_PyFuncData; + +/* A linked-list of function information for + user-defined 1-d loops. + */ +typedef struct _loop1d_info { + PyUFuncGenericFunction func; + void *data; + int *arg_types; + struct _loop1d_info *next; + int nargs; + PyArray_Descr **arg_dtypes; +} PyUFunc_Loop1d; + + +#include "__ufunc_api.h" + +#define UFUNC_PYVALS_NAME "UFUNC_PYVALS" + +#define UFUNC_CHECK_ERROR(arg) \ + do {if ((((arg)->obj & UFUNC_OBJ_NEEDS_API) && PyErr_Occurred()) || \ + ((arg)->errormask && \ + PyUFunc_checkfperr((arg)->errormask, \ + (arg)->errobj, \ + &(arg)->first))) \ + goto fail;} while (0) + +/* + * THESE MACROS ARE DEPRECATED. + * Use npy_set_floatstatus_* in the npymath library. + */ +#define UFUNC_FPE_DIVIDEBYZERO NPY_FPE_DIVIDEBYZERO +#define UFUNC_FPE_OVERFLOW NPY_FPE_OVERFLOW +#define UFUNC_FPE_UNDERFLOW NPY_FPE_UNDERFLOW +#define UFUNC_FPE_INVALID NPY_FPE_INVALID + +#define UFUNC_CHECK_STATUS(ret) \ + { \ + ret = npy_clear_floatstatus(); \ + } +#define generate_divbyzero_error() npy_set_floatstatus_divbyzero() +#define generate_overflow_error() npy_set_floatstatus_overflow() + + /* Make sure it gets defined if it isn't already */ +#ifndef UFUNC_NOFPE +/* Clear the floating point exception default of Borland C++ */ +#if defined(__BORLANDC__) +#define UFUNC_NOFPE _control87(MCW_EM, MCW_EM); +#else +#define UFUNC_NOFPE +#endif +#endif + + +#ifdef __cplusplus +} +#endif +#endif /* !Py_UFUNCOBJECT_H */ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/utils.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/utils.h new file mode 100644 index 0000000..32218b8 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/utils.h @@ -0,0 +1,21 @@ +#ifndef __NUMPY_UTILS_HEADER__ +#define __NUMPY_UTILS_HEADER__ + +#ifndef __COMP_NPY_UNUSED + #if defined(__GNUC__) + #define __COMP_NPY_UNUSED __attribute__ ((__unused__)) + # elif defined(__ICC) + #define __COMP_NPY_UNUSED __attribute__ ((__unused__)) + # elif defined(__clang__) + #define __COMP_NPY_UNUSED __attribute__ ((unused)) + #else + #define __COMP_NPY_UNUSED + #endif +#endif + +/* Use this to tag a variable as not used. It will remove unused variable + * warning on support platforms (see __COM_NPY_UNUSED) and mangle the variable + * to avoid accidental use */ +#define NPY_UNUSED(x) (__NPY_UNUSED_TAGGED ## x) __COMP_NPY_UNUSED + +#endif diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/info.py b/project/venv/lib/python2.7/site-packages/numpy/core/info.py new file mode 100644 index 0000000..c6f7bbc --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/info.py @@ -0,0 +1,87 @@ +"""Defines a multi-dimensional array and useful procedures for Numerical computation. + +Functions + +- array - NumPy Array construction +- zeros - Return an array of all zeros +- empty - Return an uninitialized array +- shape - Return shape of sequence or array +- rank - Return number of dimensions +- size - Return number of elements in entire array or a + certain dimension +- fromstring - Construct array from (byte) string +- take - Select sub-arrays using sequence of indices +- put - Set sub-arrays using sequence of 1-D indices +- putmask - Set portion of arrays using a mask +- reshape - Return array with new shape +- repeat - Repeat elements of array +- choose - Construct new array from indexed array tuple +- correlate - Correlate two 1-d arrays +- searchsorted - Search for element in 1-d array +- sum - Total sum over a specified dimension +- average - Average, possibly weighted, over axis or array. +- cumsum - Cumulative sum over a specified dimension +- product - Total product over a specified dimension +- cumproduct - Cumulative product over a specified dimension +- alltrue - Logical and over an entire axis +- sometrue - Logical or over an entire axis +- allclose - Tests if sequences are essentially equal + +More Functions: + +- arange - Return regularly spaced array +- asarray - Guarantee NumPy array +- convolve - Convolve two 1-d arrays +- swapaxes - Exchange axes +- concatenate - Join arrays together +- transpose - Permute axes +- sort - Sort elements of array +- argsort - Indices of sorted array +- argmax - Index of largest value +- argmin - Index of smallest value +- inner - Innerproduct of two arrays +- dot - Dot product (matrix multiplication) +- outer - Outerproduct of two arrays +- resize - Return array with arbitrary new shape +- indices - Tuple of indices +- fromfunction - Construct array from universal function +- diagonal - Return diagonal array +- trace - Trace of array +- dump - Dump array to file object (pickle) +- dumps - Return pickled string representing data +- load - Return array stored in file object +- loads - Return array from pickled string +- ravel - Return array as 1-D +- nonzero - Indices of nonzero elements for 1-D array +- shape - Shape of array +- where - Construct array from binary result +- compress - Elements of array where condition is true +- clip - Clip array between two values +- ones - Array of all ones +- identity - 2-D identity array (matrix) + +(Universal) Math Functions + + add logical_or exp + subtract logical_xor log + multiply logical_not log10 + divide maximum sin + divide_safe minimum sinh + conjugate bitwise_and sqrt + power bitwise_or tan + absolute bitwise_xor tanh + negative invert ceil + greater left_shift fabs + greater_equal right_shift floor + less arccos arctan2 + less_equal arcsin fmod + equal arctan hypot + not_equal cos around + logical_and cosh sign + arccosh arcsinh arctanh + +""" +from __future__ import division, absolute_import, print_function + +depends = ['testing'] +global_symbols = ['*'] diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/info.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/info.pyc new file mode 100644 index 0000000..32adae4 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/info.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/lib/libnpymath.a b/project/venv/lib/python2.7/site-packages/numpy/core/lib/libnpymath.a new file mode 100644 index 0000000..bbbf777 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/lib/libnpymath.a differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/lib/npy-pkg-config/mlib.ini b/project/venv/lib/python2.7/site-packages/numpy/core/lib/npy-pkg-config/mlib.ini new file mode 100644 index 0000000..5840f5e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/lib/npy-pkg-config/mlib.ini @@ -0,0 +1,12 @@ +[meta] +Name = mlib +Description = Math library used with this version of numpy +Version = 1.0 + +[default] +Libs=-lm +Cflags= + +[msvc] +Libs=m.lib +Cflags= diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/lib/npy-pkg-config/npymath.ini b/project/venv/lib/python2.7/site-packages/numpy/core/lib/npy-pkg-config/npymath.ini new file mode 100644 index 0000000..3e465ad --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/lib/npy-pkg-config/npymath.ini @@ -0,0 +1,20 @@ +[meta] +Name=npymath +Description=Portable, core math library implementing C99 standard +Version=0.1 + +[variables] +pkgname=numpy.core +prefix=${pkgdir} +libdir=${prefix}/lib +includedir=${prefix}/include + +[default] +Libs=-L${libdir} -lnpymath +Cflags=-I${includedir} +Requires=mlib + +[msvc] +Libs=/LIBPATH:${libdir} npymath.lib +Cflags=/INCLUDE:${includedir} +Requires=mlib diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/machar.py b/project/venv/lib/python2.7/site-packages/numpy/core/machar.py new file mode 100644 index 0000000..91fb4ed --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/machar.py @@ -0,0 +1,344 @@ +""" +Machine arithmetics - determine the parameters of the +floating-point arithmetic system + +Author: Pearu Peterson, September 2003 + +""" +from __future__ import division, absolute_import, print_function + +__all__ = ['MachAr'] + +from numpy.core.fromnumeric import any +from numpy.core.numeric import errstate +from numpy.core.overrides import set_module + +# Need to speed this up...especially for longfloat + +@set_module('numpy') +class MachAr(object): + """ + Diagnosing machine parameters. + + Attributes + ---------- + ibeta : int + Radix in which numbers are represented. + it : int + Number of base-`ibeta` digits in the floating point mantissa M. + machep : int + Exponent of the smallest (most negative) power of `ibeta` that, + added to 1.0, gives something different from 1.0 + eps : float + Floating-point number ``beta**machep`` (floating point precision) + negep : int + Exponent of the smallest power of `ibeta` that, subtracted + from 1.0, gives something different from 1.0. + epsneg : float + Floating-point number ``beta**negep``. + iexp : int + Number of bits in the exponent (including its sign and bias). + minexp : int + Smallest (most negative) power of `ibeta` consistent with there + being no leading zeros in the mantissa. + xmin : float + Floating point number ``beta**minexp`` (the smallest [in + magnitude] usable floating value). + maxexp : int + Smallest (positive) power of `ibeta` that causes overflow. + xmax : float + ``(1-epsneg) * beta**maxexp`` (the largest [in magnitude] + usable floating value). + irnd : int + In ``range(6)``, information on what kind of rounding is done + in addition, and on how underflow is handled. + ngrd : int + Number of 'guard digits' used when truncating the product + of two mantissas to fit the representation. + epsilon : float + Same as `eps`. + tiny : float + Same as `xmin`. + huge : float + Same as `xmax`. + precision : float + ``- int(-log10(eps))`` + resolution : float + ``- 10**(-precision)`` + + Parameters + ---------- + float_conv : function, optional + Function that converts an integer or integer array to a float + or float array. Default is `float`. + int_conv : function, optional + Function that converts a float or float array to an integer or + integer array. Default is `int`. + float_to_float : function, optional + Function that converts a float array to float. Default is `float`. + Note that this does not seem to do anything useful in the current + implementation. + float_to_str : function, optional + Function that converts a single float to a string. Default is + ``lambda v:'%24.16e' %v``. + title : str, optional + Title that is printed in the string representation of `MachAr`. + + See Also + -------- + finfo : Machine limits for floating point types. + iinfo : Machine limits for integer types. + + References + ---------- + .. [1] Press, Teukolsky, Vetterling and Flannery, + "Numerical Recipes in C++," 2nd ed, + Cambridge University Press, 2002, p. 31. + + """ + + def __init__(self, float_conv=float,int_conv=int, + float_to_float=float, + float_to_str=lambda v:'%24.16e' % v, + title='Python floating point number'): + """ + + float_conv - convert integer to float (array) + int_conv - convert float (array) to integer + float_to_float - convert float array to float + float_to_str - convert array float to str + title - description of used floating point numbers + + """ + # We ignore all errors here because we are purposely triggering + # underflow to detect the properties of the runninng arch. + with errstate(under='ignore'): + self._do_init(float_conv, int_conv, float_to_float, float_to_str, title) + + def _do_init(self, float_conv, int_conv, float_to_float, float_to_str, title): + max_iterN = 10000 + msg = "Did not converge after %d tries with %s" + one = float_conv(1) + two = one + one + zero = one - one + + # Do we really need to do this? Aren't they 2 and 2.0? + # Determine ibeta and beta + a = one + for _ in range(max_iterN): + a = a + a + temp = a + one + temp1 = temp - a + if any(temp1 - one != zero): + break + else: + raise RuntimeError(msg % (_, one.dtype)) + b = one + for _ in range(max_iterN): + b = b + b + temp = a + b + itemp = int_conv(temp-a) + if any(itemp != 0): + break + else: + raise RuntimeError(msg % (_, one.dtype)) + ibeta = itemp + beta = float_conv(ibeta) + + # Determine it and irnd + it = -1 + b = one + for _ in range(max_iterN): + it = it + 1 + b = b * beta + temp = b + one + temp1 = temp - b + if any(temp1 - one != zero): + break + else: + raise RuntimeError(msg % (_, one.dtype)) + + betah = beta / two + a = one + for _ in range(max_iterN): + a = a + a + temp = a + one + temp1 = temp - a + if any(temp1 - one != zero): + break + else: + raise RuntimeError(msg % (_, one.dtype)) + temp = a + betah + irnd = 0 + if any(temp-a != zero): + irnd = 1 + tempa = a + beta + temp = tempa + betah + if irnd == 0 and any(temp-tempa != zero): + irnd = 2 + + # Determine negep and epsneg + negep = it + 3 + betain = one / beta + a = one + for i in range(negep): + a = a * betain + b = a + for _ in range(max_iterN): + temp = one - a + if any(temp-one != zero): + break + a = a * beta + negep = negep - 1 + # Prevent infinite loop on PPC with gcc 4.0: + if negep < 0: + raise RuntimeError("could not determine machine tolerance " + "for 'negep', locals() -> %s" % (locals())) + else: + raise RuntimeError(msg % (_, one.dtype)) + negep = -negep + epsneg = a + + # Determine machep and eps + machep = - it - 3 + a = b + + for _ in range(max_iterN): + temp = one + a + if any(temp-one != zero): + break + a = a * beta + machep = machep + 1 + else: + raise RuntimeError(msg % (_, one.dtype)) + eps = a + + # Determine ngrd + ngrd = 0 + temp = one + eps + if irnd == 0 and any(temp*one - one != zero): + ngrd = 1 + + # Determine iexp + i = 0 + k = 1 + z = betain + t = one + eps + nxres = 0 + for _ in range(max_iterN): + y = z + z = y*y + a = z*one # Check here for underflow + temp = z*t + if any(a+a == zero) or any(abs(z) >= y): + break + temp1 = temp * betain + if any(temp1*beta == z): + break + i = i + 1 + k = k + k + else: + raise RuntimeError(msg % (_, one.dtype)) + if ibeta != 10: + iexp = i + 1 + mx = k + k + else: + iexp = 2 + iz = ibeta + while k >= iz: + iz = iz * ibeta + iexp = iexp + 1 + mx = iz + iz - 1 + + # Determine minexp and xmin + for _ in range(max_iterN): + xmin = y + y = y * betain + a = y * one + temp = y * t + if any((a + a) != zero) and any(abs(y) < xmin): + k = k + 1 + temp1 = temp * betain + if any(temp1*beta == y) and any(temp != y): + nxres = 3 + xmin = y + break + else: + break + else: + raise RuntimeError(msg % (_, one.dtype)) + minexp = -k + + # Determine maxexp, xmax + if mx <= k + k - 3 and ibeta != 10: + mx = mx + mx + iexp = iexp + 1 + maxexp = mx + minexp + irnd = irnd + nxres + if irnd >= 2: + maxexp = maxexp - 2 + i = maxexp + minexp + if ibeta == 2 and not i: + maxexp = maxexp - 1 + if i > 20: + maxexp = maxexp - 1 + if any(a != y): + maxexp = maxexp - 2 + xmax = one - epsneg + if any(xmax*one != xmax): + xmax = one - beta*epsneg + xmax = xmax / (xmin*beta*beta*beta) + i = maxexp + minexp + 3 + for j in range(i): + if ibeta == 2: + xmax = xmax + xmax + else: + xmax = xmax * beta + + self.ibeta = ibeta + self.it = it + self.negep = negep + self.epsneg = float_to_float(epsneg) + self._str_epsneg = float_to_str(epsneg) + self.machep = machep + self.eps = float_to_float(eps) + self._str_eps = float_to_str(eps) + self.ngrd = ngrd + self.iexp = iexp + self.minexp = minexp + self.xmin = float_to_float(xmin) + self._str_xmin = float_to_str(xmin) + self.maxexp = maxexp + self.xmax = float_to_float(xmax) + self._str_xmax = float_to_str(xmax) + self.irnd = irnd + + self.title = title + # Commonly used parameters + self.epsilon = self.eps + self.tiny = self.xmin + self.huge = self.xmax + + import math + self.precision = int(-math.log10(float_to_float(self.eps))) + ten = two + two + two + two + two + resolution = ten ** (-self.precision) + self.resolution = float_to_float(resolution) + self._str_resolution = float_to_str(resolution) + + def __str__(self): + fmt = ( + 'Machine parameters for %(title)s\n' + '---------------------------------------------------------------------\n' + 'ibeta=%(ibeta)s it=%(it)s iexp=%(iexp)s ngrd=%(ngrd)s irnd=%(irnd)s\n' + 'machep=%(machep)s eps=%(_str_eps)s (beta**machep == epsilon)\n' + 'negep =%(negep)s epsneg=%(_str_epsneg)s (beta**epsneg)\n' + 'minexp=%(minexp)s xmin=%(_str_xmin)s (beta**minexp == tiny)\n' + 'maxexp=%(maxexp)s xmax=%(_str_xmax)s ((1-epsneg)*beta**maxexp == huge)\n' + '---------------------------------------------------------------------\n' + ) + return fmt % self.__dict__ + + +if __name__ == '__main__': + print(MachAr()) diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/machar.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/machar.pyc new file mode 100644 index 0000000..3aaaa4b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/machar.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/memmap.py b/project/venv/lib/python2.7/site-packages/numpy/core/memmap.py new file mode 100644 index 0000000..82bc470 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/memmap.py @@ -0,0 +1,334 @@ +from __future__ import division, absolute_import, print_function + +import numpy as np +from .numeric import uint8, ndarray, dtype +from numpy.compat import ( + long, basestring, os_fspath, contextlib_nullcontext, is_pathlib_path +) +from numpy.core.overrides import set_module + +__all__ = ['memmap'] + +dtypedescr = dtype +valid_filemodes = ["r", "c", "r+", "w+"] +writeable_filemodes = ["r+", "w+"] + +mode_equivalents = { + "readonly":"r", + "copyonwrite":"c", + "readwrite":"r+", + "write":"w+" + } + + +@set_module('numpy') +class memmap(ndarray): + """Create a memory-map to an array stored in a *binary* file on disk. + + Memory-mapped files are used for accessing small segments of large files + on disk, without reading the entire file into memory. NumPy's + memmap's are array-like objects. This differs from Python's ``mmap`` + module, which uses file-like objects. + + This subclass of ndarray has some unpleasant interactions with + some operations, because it doesn't quite fit properly as a subclass. + An alternative to using this subclass is to create the ``mmap`` + object yourself, then create an ndarray with ndarray.__new__ directly, + passing the object created in its 'buffer=' parameter. + + This class may at some point be turned into a factory function + which returns a view into an mmap buffer. + + Delete the memmap instance to close the memmap file. + + + Parameters + ---------- + filename : str, file-like object, or pathlib.Path instance + The file name or file object to be used as the array data buffer. + dtype : data-type, optional + The data-type used to interpret the file contents. + Default is `uint8`. + mode : {'r+', 'r', 'w+', 'c'}, optional + The file is opened in this mode: + + +------+-------------------------------------------------------------+ + | 'r' | Open existing file for reading only. | + +------+-------------------------------------------------------------+ + | 'r+' | Open existing file for reading and writing. | + +------+-------------------------------------------------------------+ + | 'w+' | Create or overwrite existing file for reading and writing. | + +------+-------------------------------------------------------------+ + | 'c' | Copy-on-write: assignments affect data in memory, but | + | | changes are not saved to disk. The file on disk is | + | | read-only. | + +------+-------------------------------------------------------------+ + + Default is 'r+'. + offset : int, optional + In the file, array data starts at this offset. Since `offset` is + measured in bytes, it should normally be a multiple of the byte-size + of `dtype`. When ``mode != 'r'``, even positive offsets beyond end of + file are valid; The file will be extended to accommodate the + additional data. By default, ``memmap`` will start at the beginning of + the file, even if ``filename`` is a file pointer ``fp`` and + ``fp.tell() != 0``. + shape : tuple, optional + The desired shape of the array. If ``mode == 'r'`` and the number + of remaining bytes after `offset` is not a multiple of the byte-size + of `dtype`, you must specify `shape`. By default, the returned array + will be 1-D with the number of elements determined by file size + and data-type. + order : {'C', 'F'}, optional + Specify the order of the ndarray memory layout: + :term:`row-major`, C-style or :term:`column-major`, + Fortran-style. This only has an effect if the shape is + greater than 1-D. The default order is 'C'. + + Attributes + ---------- + filename : str or pathlib.Path instance + Path to the mapped file. + offset : int + Offset position in the file. + mode : str + File mode. + + Methods + ------- + flush + Flush any changes in memory to file on disk. + When you delete a memmap object, flush is called first to write + changes to disk before removing the object. + + + See also + -------- + lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file. + + Notes + ----- + The memmap object can be used anywhere an ndarray is accepted. + Given a memmap ``fp``, ``isinstance(fp, numpy.ndarray)`` returns + ``True``. + + Memory-mapped files cannot be larger than 2GB on 32-bit systems. + + When a memmap causes a file to be created or extended beyond its + current size in the filesystem, the contents of the new part are + unspecified. On systems with POSIX filesystem semantics, the extended + part will be filled with zero bytes. + + Examples + -------- + >>> data = np.arange(12, dtype='float32') + >>> data.resize((3,4)) + + This example uses a temporary file so that doctest doesn't write + files to your directory. You would use a 'normal' filename. + + >>> from tempfile import mkdtemp + >>> import os.path as path + >>> filename = path.join(mkdtemp(), 'newfile.dat') + + Create a memmap with dtype and shape that matches our data: + + >>> fp = np.memmap(filename, dtype='float32', mode='w+', shape=(3,4)) + >>> fp + memmap([[ 0., 0., 0., 0.], + [ 0., 0., 0., 0.], + [ 0., 0., 0., 0.]], dtype=float32) + + Write data to memmap array: + + >>> fp[:] = data[:] + >>> fp + memmap([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]], dtype=float32) + + >>> fp.filename == path.abspath(filename) + True + + Deletion flushes memory changes to disk before removing the object: + + >>> del fp + + Load the memmap and verify data was stored: + + >>> newfp = np.memmap(filename, dtype='float32', mode='r', shape=(3,4)) + >>> newfp + memmap([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]], dtype=float32) + + Read-only memmap: + + >>> fpr = np.memmap(filename, dtype='float32', mode='r', shape=(3,4)) + >>> fpr.flags.writeable + False + + Copy-on-write memmap: + + >>> fpc = np.memmap(filename, dtype='float32', mode='c', shape=(3,4)) + >>> fpc.flags.writeable + True + + It's possible to assign to copy-on-write array, but values are only + written into the memory copy of the array, and not written to disk: + + >>> fpc + memmap([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]], dtype=float32) + >>> fpc[0,:] = 0 + >>> fpc + memmap([[ 0., 0., 0., 0.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]], dtype=float32) + + File on disk is unchanged: + + >>> fpr + memmap([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]], dtype=float32) + + Offset into a memmap: + + >>> fpo = np.memmap(filename, dtype='float32', mode='r', offset=16) + >>> fpo + memmap([ 4., 5., 6., 7., 8., 9., 10., 11.], dtype=float32) + + """ + + __array_priority__ = -100.0 + + def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0, + shape=None, order='C'): + # Import here to minimize 'import numpy' overhead + import mmap + import os.path + try: + mode = mode_equivalents[mode] + except KeyError: + if mode not in valid_filemodes: + raise ValueError("mode must be one of %s" % + (valid_filemodes + list(mode_equivalents.keys()))) + + if mode == 'w+' and shape is None: + raise ValueError("shape must be given") + + if hasattr(filename, 'read'): + f_ctx = contextlib_nullcontext(filename) + else: + f_ctx = open(os_fspath(filename), ('r' if mode == 'c' else mode)+'b') + + with f_ctx as fid: + fid.seek(0, 2) + flen = fid.tell() + descr = dtypedescr(dtype) + _dbytes = descr.itemsize + + if shape is None: + bytes = flen - offset + if bytes % _dbytes: + raise ValueError("Size of available data is not a " + "multiple of the data-type size.") + size = bytes // _dbytes + shape = (size,) + else: + if not isinstance(shape, tuple): + shape = (shape,) + size = np.intp(1) # avoid default choice of np.int_, which might overflow + for k in shape: + size *= k + + bytes = long(offset + size*_dbytes) + + if mode == 'w+' or (mode == 'r+' and flen < bytes): + fid.seek(bytes - 1, 0) + fid.write(b'\0') + fid.flush() + + if mode == 'c': + acc = mmap.ACCESS_COPY + elif mode == 'r': + acc = mmap.ACCESS_READ + else: + acc = mmap.ACCESS_WRITE + + start = offset - offset % mmap.ALLOCATIONGRANULARITY + bytes -= start + array_offset = offset - start + mm = mmap.mmap(fid.fileno(), bytes, access=acc, offset=start) + + self = ndarray.__new__(subtype, shape, dtype=descr, buffer=mm, + offset=array_offset, order=order) + self._mmap = mm + self.offset = offset + self.mode = mode + + if is_pathlib_path(filename): + # special case - if we were constructed with a pathlib.path, + # then filename is a path object, not a string + self.filename = filename.resolve() + elif hasattr(fid, "name") and isinstance(fid.name, basestring): + # py3 returns int for TemporaryFile().name + self.filename = os.path.abspath(fid.name) + # same as memmap copies (e.g. memmap + 1) + else: + self.filename = None + + return self + + def __array_finalize__(self, obj): + if hasattr(obj, '_mmap') and np.may_share_memory(self, obj): + self._mmap = obj._mmap + self.filename = obj.filename + self.offset = obj.offset + self.mode = obj.mode + else: + self._mmap = None + self.filename = None + self.offset = None + self.mode = None + + def flush(self): + """ + Write any changes in the array to the file on disk. + + For further information, see `memmap`. + + Parameters + ---------- + None + + See Also + -------- + memmap + + """ + if self.base is not None and hasattr(self.base, 'flush'): + self.base.flush() + + def __array_wrap__(self, arr, context=None): + arr = super(memmap, self).__array_wrap__(arr, context) + + # Return a memmap if a memmap was given as the output of the + # ufunc. Leave the arr class unchanged if self is not a memmap + # to keep original memmap subclasses behavior + if self is arr or type(self) is not memmap: + return arr + # Return scalar instead of 0d memmap, e.g. for np.sum with + # axis=None + if arr.shape == (): + return arr[()] + # Return ndarray otherwise + return arr.view(np.ndarray) + + def __getitem__(self, index): + res = super(memmap, self).__getitem__(index) + if type(res) is memmap and res._mmap is None: + return res.view(type=ndarray) + return res diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/memmap.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/memmap.pyc new file mode 100644 index 0000000..917dc40 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/memmap.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/multiarray.py b/project/venv/lib/python2.7/site-packages/numpy/core/multiarray.py new file mode 100644 index 0000000..7908969 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/multiarray.py @@ -0,0 +1,1567 @@ +""" +Create the numpy.core.multiarray namespace for backward compatibility. In v1.16 +the multiarray and umath c-extension modules were merged into a single +_multiarray_umath extension module. So we replicate the old namespace +by importing from the extension module. + +""" + +import functools +import warnings + +from . import overrides +from . import _multiarray_umath +import numpy as np +from numpy.core._multiarray_umath import * +from numpy.core._multiarray_umath import ( + _fastCopyAndTranspose, _flagdict, _insert, _reconstruct, _vec_string, + _ARRAY_API, _monotonicity + ) + +__all__ = [ + '_ARRAY_API', 'ALLOW_THREADS', 'BUFSIZE', 'CLIP', 'DATETIMEUNITS', + 'ITEM_HASOBJECT', 'ITEM_IS_POINTER', 'LIST_PICKLE', 'MAXDIMS', + 'MAY_SHARE_BOUNDS', 'MAY_SHARE_EXACT', 'NEEDS_INIT', 'NEEDS_PYAPI', + 'RAISE', 'USE_GETITEM', 'USE_SETITEM', 'WRAP', '_fastCopyAndTranspose', + '_flagdict', '_insert', '_reconstruct', '_vec_string', '_monotonicity', + 'add_docstring', 'arange', 'array', 'bincount', 'broadcast', + 'busday_count', 'busday_offset', 'busdaycalendar', 'can_cast', + 'compare_chararrays', 'concatenate', 'copyto', 'correlate', 'correlate2', + 'count_nonzero', 'c_einsum', 'datetime_as_string', 'datetime_data', + 'digitize', 'dot', 'dragon4_positional', 'dragon4_scientific', 'dtype', + 'empty', 'empty_like', 'error', 'flagsobj', 'flatiter', 'format_longfloat', + 'frombuffer', 'fromfile', 'fromiter', 'fromstring', 'getbuffer', 'inner', + 'int_asbuffer', 'interp', 'interp_complex', 'is_busday', 'lexsort', + 'matmul', 'may_share_memory', 'min_scalar_type', 'ndarray', 'nditer', + 'nested_iters', 'newbuffer', 'normalize_axis_index', 'packbits', + 'promote_types', 'putmask', 'ravel_multi_index', 'result_type', 'scalar', + 'set_datetimeparse_function', 'set_legacy_print_mode', 'set_numeric_ops', + 'set_string_function', 'set_typeDict', 'shares_memory', 'test_interrupt', + 'tracemalloc_domain', 'typeinfo', 'unpackbits', 'unravel_index', 'vdot', + 'where', 'zeros'] + +# For backward compatibility, make sure pickle imports these functions from here +_reconstruct.__module__ = 'numpy.core.multiarray' +scalar.__module__ = 'numpy.core.multiarray' + + +arange.__module__ = 'numpy' +array.__module__ = 'numpy' +datetime_data.__module__ = 'numpy' +empty.__module__ = 'numpy' +frombuffer.__module__ = 'numpy' +fromfile.__module__ = 'numpy' +fromiter.__module__ = 'numpy' +frompyfunc.__module__ = 'numpy' +fromstring.__module__ = 'numpy' +geterrobj.__module__ = 'numpy' +may_share_memory.__module__ = 'numpy' +nested_iters.__module__ = 'numpy' +promote_types.__module__ = 'numpy' +set_numeric_ops.__module__ = 'numpy' +seterrobj.__module__ = 'numpy' +zeros.__module__ = 'numpy' + + +# We can't verify dispatcher signatures because NumPy's C functions don't +# support introspection. +array_function_from_c_func_and_dispatcher = functools.partial( + overrides.array_function_from_dispatcher, + module='numpy', docs_from_dispatcher=True, verify=False) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.empty_like) +def empty_like(prototype, dtype=None, order=None, subok=None): + """ + empty_like(prototype, dtype=None, order='K', subok=True) + + Return a new array with the same shape and type as a given array. + + Parameters + ---------- + prototype : array_like + The shape and data-type of `prototype` define these same attributes + of the returned array. + dtype : data-type, optional + Overrides the data type of the result. + + .. versionadded:: 1.6.0 + order : {'C', 'F', 'A', or 'K'}, optional + Overrides the memory layout of the result. 'C' means C-order, + 'F' means F-order, 'A' means 'F' if ``prototype`` is Fortran + contiguous, 'C' otherwise. 'K' means match the layout of ``prototype`` + as closely as possible. + + .. versionadded:: 1.6.0 + subok : bool, optional. + If True, then the newly created array will use the sub-class + type of 'a', otherwise it will be a base-class array. Defaults + to True. + + Returns + ------- + out : ndarray + Array of uninitialized (arbitrary) data with the same + shape and type as `prototype`. + + See Also + -------- + ones_like : Return an array of ones with shape and type of input. + zeros_like : Return an array of zeros with shape and type of input. + full_like : Return a new array with shape of input filled with value. + empty : Return a new uninitialized array. + + Notes + ----- + This function does *not* initialize the returned array; to do that use + `zeros_like` or `ones_like` instead. It may be marginally faster than + the functions that do set the array values. + + Examples + -------- + >>> a = ([1,2,3], [4,5,6]) # a is array-like + >>> np.empty_like(a) + array([[-1073741821, -1073741821, 3], #random + [ 0, 0, -1073741821]]) + >>> a = np.array([[1., 2., 3.],[4.,5.,6.]]) + >>> np.empty_like(a) + array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000],#random + [ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]]) + + """ + return (prototype,) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.concatenate) +def concatenate(arrays, axis=None, out=None): + """ + concatenate((a1, a2, ...), axis=0, out=None) + + Join a sequence of arrays along an existing axis. + + Parameters + ---------- + a1, a2, ... : sequence of array_like + The arrays must have the same shape, except in the dimension + corresponding to `axis` (the first, by default). + axis : int, optional + The axis along which the arrays will be joined. If axis is None, + arrays are flattened before use. Default is 0. + out : ndarray, optional + If provided, the destination to place the result. The shape must be + correct, matching that of what concatenate would have returned if no + out argument were specified. + + Returns + ------- + res : ndarray + The concatenated array. + + See Also + -------- + ma.concatenate : Concatenate function that preserves input masks. + array_split : Split an array into multiple sub-arrays of equal or + near-equal size. + split : Split array into a list of multiple sub-arrays of equal size. + hsplit : Split array into multiple sub-arrays horizontally (column wise) + vsplit : Split array into multiple sub-arrays vertically (row wise) + dsplit : Split array into multiple sub-arrays along the 3rd axis (depth). + stack : Stack a sequence of arrays along a new axis. + hstack : Stack arrays in sequence horizontally (column wise) + vstack : Stack arrays in sequence vertically (row wise) + dstack : Stack arrays in sequence depth wise (along third dimension) + block : Assemble arrays from blocks. + + Notes + ----- + When one or more of the arrays to be concatenated is a MaskedArray, + this function will return a MaskedArray object instead of an ndarray, + but the input masks are *not* preserved. In cases where a MaskedArray + is expected as input, use the ma.concatenate function from the masked + array module instead. + + Examples + -------- + >>> a = np.array([[1, 2], [3, 4]]) + >>> b = np.array([[5, 6]]) + >>> np.concatenate((a, b), axis=0) + array([[1, 2], + [3, 4], + [5, 6]]) + >>> np.concatenate((a, b.T), axis=1) + array([[1, 2, 5], + [3, 4, 6]]) + >>> np.concatenate((a, b), axis=None) + array([1, 2, 3, 4, 5, 6]) + + This function will not preserve masking of MaskedArray inputs. + + >>> a = np.ma.arange(3) + >>> a[1] = np.ma.masked + >>> b = np.arange(2, 5) + >>> a + masked_array(data=[0, --, 2], + mask=[False, True, False], + fill_value=999999) + >>> b + array([2, 3, 4]) + >>> np.concatenate([a, b]) + masked_array(data=[0, 1, 2, 2, 3, 4], + mask=False, + fill_value=999999) + >>> np.ma.concatenate([a, b]) + masked_array(data=[0, --, 2, 2, 3, 4], + mask=[False, True, False, False, False, False], + fill_value=999999) + + """ + if out is not None: + # optimize for the typical case where only arrays is provided + arrays = list(arrays) + arrays.append(out) + return arrays + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.inner) +def inner(a, b): + """ + inner(a, b) + + Inner product of two arrays. + + Ordinary inner product of vectors for 1-D arrays (without complex + conjugation), in higher dimensions a sum product over the last axes. + + Parameters + ---------- + a, b : array_like + If `a` and `b` are nonscalar, their last dimensions must match. + + Returns + ------- + out : ndarray + `out.shape = a.shape[:-1] + b.shape[:-1]` + + Raises + ------ + ValueError + If the last dimension of `a` and `b` has different size. + + See Also + -------- + tensordot : Sum products over arbitrary axes. + dot : Generalised matrix product, using second last dimension of `b`. + einsum : Einstein summation convention. + + Notes + ----- + For vectors (1-D arrays) it computes the ordinary inner-product:: + + np.inner(a, b) = sum(a[:]*b[:]) + + More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`:: + + np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1)) + + or explicitly:: + + np.inner(a, b)[i0,...,ir-1,j0,...,js-1] + = sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:]) + + In addition `a` or `b` may be scalars, in which case:: + + np.inner(a,b) = a*b + + Examples + -------- + Ordinary inner product for vectors: + + >>> a = np.array([1,2,3]) + >>> b = np.array([0,1,0]) + >>> np.inner(a, b) + 2 + + A multidimensional example: + + >>> a = np.arange(24).reshape((2,3,4)) + >>> b = np.arange(4) + >>> np.inner(a, b) + array([[ 14, 38, 62], + [ 86, 110, 134]]) + + An example where `b` is a scalar: + + >>> np.inner(np.eye(2), 7) + array([[ 7., 0.], + [ 0., 7.]]) + + """ + return (a, b) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.where) +def where(condition, x=None, y=None): + """ + where(condition, [x, y]) + + Return elements chosen from `x` or `y` depending on `condition`. + + .. note:: + When only `condition` is provided, this function is a shorthand for + ``np.asarray(condition).nonzero()``. Using `nonzero` directly should be + preferred, as it behaves correctly for subclasses. The rest of this + documentation covers only the case where all three arguments are + provided. + + Parameters + ---------- + condition : array_like, bool + Where True, yield `x`, otherwise yield `y`. + x, y : array_like + Values from which to choose. `x`, `y` and `condition` need to be + broadcastable to some shape. + + Returns + ------- + out : ndarray + An array with elements from `x` where `condition` is True, and elements + from `y` elsewhere. + + See Also + -------- + choose + nonzero : The function that is called when x and y are omitted + + Notes + ----- + If all the arrays are 1-D, `where` is equivalent to:: + + [xv if c else yv + for c, xv, yv in zip(condition, x, y)] + + Examples + -------- + >>> a = np.arange(10) + >>> a + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> np.where(a < 5, a, 10*a) + array([ 0, 1, 2, 3, 4, 50, 60, 70, 80, 90]) + + This can be used on multidimensional arrays too: + + >>> np.where([[True, False], [True, True]], + ... [[1, 2], [3, 4]], + ... [[9, 8], [7, 6]]) + array([[1, 8], + [3, 4]]) + + The shapes of x, y, and the condition are broadcast together: + + >>> x, y = np.ogrid[:3, :4] + >>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast + array([[10, 0, 0, 0], + [10, 11, 1, 1], + [10, 11, 12, 2]]) + + >>> a = np.array([[0, 1, 2], + ... [0, 2, 4], + ... [0, 3, 6]]) + >>> np.where(a < 4, a, -1) # -1 is broadcast + array([[ 0, 1, 2], + [ 0, 2, -1], + [ 0, 3, -1]]) + """ + return (condition, x, y) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.lexsort) +def lexsort(keys, axis=None): + """ + lexsort(keys, axis=-1) + + Perform an indirect stable sort using a sequence of keys. + + Given multiple sorting keys, which can be interpreted as columns in a + spreadsheet, lexsort returns an array of integer indices that describes + the sort order by multiple columns. The last key in the sequence is used + for the primary sort order, the second-to-last key for the secondary sort + order, and so on. The keys argument must be a sequence of objects that + can be converted to arrays of the same shape. If a 2D array is provided + for the keys argument, it's rows are interpreted as the sorting keys and + sorting is according to the last row, second last row etc. + + Parameters + ---------- + keys : (k, N) array or tuple containing k (N,)-shaped sequences + The `k` different "columns" to be sorted. The last column (or row if + `keys` is a 2D array) is the primary sort key. + axis : int, optional + Axis to be indirectly sorted. By default, sort over the last axis. + + Returns + ------- + indices : (N,) ndarray of ints + Array of indices that sort the keys along the specified axis. + + See Also + -------- + argsort : Indirect sort. + ndarray.sort : In-place sort. + sort : Return a sorted copy of an array. + + Examples + -------- + Sort names: first by surname, then by name. + + >>> surnames = ('Hertz', 'Galilei', 'Hertz') + >>> first_names = ('Heinrich', 'Galileo', 'Gustav') + >>> ind = np.lexsort((first_names, surnames)) + >>> ind + array([1, 2, 0]) + + >>> [surnames[i] + ", " + first_names[i] for i in ind] + ['Galilei, Galileo', 'Hertz, Gustav', 'Hertz, Heinrich'] + + Sort two columns of numbers: + + >>> a = [1,5,1,4,3,4,4] # First column + >>> b = [9,4,0,4,0,2,1] # Second column + >>> ind = np.lexsort((b,a)) # Sort by a, then by b + >>> print(ind) + [2 0 4 6 5 3 1] + + >>> [(a[i],b[i]) for i in ind] + [(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)] + + Note that sorting is first according to the elements of ``a``. + Secondary sorting is according to the elements of ``b``. + + A normal ``argsort`` would have yielded: + + >>> [(a[i],b[i]) for i in np.argsort(a)] + [(1, 9), (1, 0), (3, 0), (4, 4), (4, 2), (4, 1), (5, 4)] + + Structured arrays are sorted lexically by ``argsort``: + + >>> x = np.array([(1,9), (5,4), (1,0), (4,4), (3,0), (4,2), (4,1)], + ... dtype=np.dtype([('x', int), ('y', int)])) + + >>> np.argsort(x) # or np.argsort(x, order=('x', 'y')) + array([2, 0, 4, 6, 5, 3, 1]) + + """ + if isinstance(keys, tuple): + return keys + else: + return (keys,) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.can_cast) +def can_cast(from_, to, casting=None): + """ + can_cast(from_, to, casting='safe') + + Returns True if cast between data types can occur according to the + casting rule. If from is a scalar or array scalar, also returns + True if the scalar value can be cast without overflow or truncation + to an integer. + + Parameters + ---------- + from_ : dtype, dtype specifier, scalar, or array + Data type, scalar, or array to cast from. + to : dtype or dtype specifier + Data type to cast to. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + + Returns + ------- + out : bool + True if cast can occur according to the casting rule. + + Notes + ----- + Starting in NumPy 1.9, can_cast function now returns False in 'safe' + casting mode for integer/float dtype and string dtype if the string dtype + length is not long enough to store the max integer/float value converted + to a string. Previously can_cast in 'safe' mode returned True for + integer/float dtype and a string dtype of any length. + + See also + -------- + dtype, result_type + + Examples + -------- + Basic examples + + >>> np.can_cast(np.int32, np.int64) + True + >>> np.can_cast(np.float64, complex) + True + >>> np.can_cast(complex, float) + False + + >>> np.can_cast('i8', 'f8') + True + >>> np.can_cast('i8', 'f4') + False + >>> np.can_cast('i4', 'S4') + False + + Casting scalars + + >>> np.can_cast(100, 'i1') + True + >>> np.can_cast(150, 'i1') + False + >>> np.can_cast(150, 'u1') + True + + >>> np.can_cast(3.5e100, np.float32) + False + >>> np.can_cast(1000.0, np.float32) + True + + Array scalar checks the value, array does not + + >>> np.can_cast(np.array(1000.0), np.float32) + True + >>> np.can_cast(np.array([1000.0]), np.float32) + False + + Using the casting rules + + >>> np.can_cast('i8', 'i8', 'no') + True + >>> np.can_cast('i8', 'no') + False + + >>> np.can_cast('i8', 'equiv') + True + >>> np.can_cast('i8', 'equiv') + False + + >>> np.can_cast('i8', 'safe') + True + >>> np.can_cast('i4', 'safe') + False + + >>> np.can_cast('i4', 'same_kind') + True + >>> np.can_cast('u4', 'same_kind') + False + + >>> np.can_cast('u4', 'unsafe') + True + + """ + return (from_,) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.min_scalar_type) +def min_scalar_type(a): + """ + min_scalar_type(a) + + For scalar ``a``, returns the data type with the smallest size + and smallest scalar kind which can hold its value. For non-scalar + array ``a``, returns the vector's dtype unmodified. + + Floating point values are not demoted to integers, + and complex values are not demoted to floats. + + Parameters + ---------- + a : scalar or array_like + The value whose minimal data type is to be found. + + Returns + ------- + out : dtype + The minimal data type. + + Notes + ----- + .. versionadded:: 1.6.0 + + See Also + -------- + result_type, promote_types, dtype, can_cast + + Examples + -------- + >>> np.min_scalar_type(10) + dtype('uint8') + + >>> np.min_scalar_type(-260) + dtype('int16') + + >>> np.min_scalar_type(3.1) + dtype('float16') + + >>> np.min_scalar_type(1e50) + dtype('float64') + + >>> np.min_scalar_type(np.arange(4,dtype='f8')) + dtype('float64') + + """ + return (a,) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.result_type) +def result_type(*arrays_and_dtypes): + """ + result_type(*arrays_and_dtypes) + + Returns the type that results from applying the NumPy + type promotion rules to the arguments. + + Type promotion in NumPy works similarly to the rules in languages + like C++, with some slight differences. When both scalars and + arrays are used, the array's type takes precedence and the actual value + of the scalar is taken into account. + + For example, calculating 3*a, where a is an array of 32-bit floats, + intuitively should result in a 32-bit float output. If the 3 is a + 32-bit integer, the NumPy rules indicate it can't convert losslessly + into a 32-bit float, so a 64-bit float should be the result type. + By examining the value of the constant, '3', we see that it fits in + an 8-bit integer, which can be cast losslessly into the 32-bit float. + + Parameters + ---------- + arrays_and_dtypes : list of arrays and dtypes + The operands of some operation whose result type is needed. + + Returns + ------- + out : dtype + The result type. + + See also + -------- + dtype, promote_types, min_scalar_type, can_cast + + Notes + ----- + .. versionadded:: 1.6.0 + + The specific algorithm used is as follows. + + Categories are determined by first checking which of boolean, + integer (int/uint), or floating point (float/complex) the maximum + kind of all the arrays and the scalars are. + + If there are only scalars or the maximum category of the scalars + is higher than the maximum category of the arrays, + the data types are combined with :func:`promote_types` + to produce the return value. + + Otherwise, `min_scalar_type` is called on each array, and + the resulting data types are all combined with :func:`promote_types` + to produce the return value. + + The set of int values is not a subset of the uint values for types + with the same number of bits, something not reflected in + :func:`min_scalar_type`, but handled as a special case in `result_type`. + + Examples + -------- + >>> np.result_type(3, np.arange(7, dtype='i1')) + dtype('int8') + + >>> np.result_type('i4', 'c8') + dtype('complex128') + + >>> np.result_type(3.0, -2) + dtype('float64') + + """ + return arrays_and_dtypes + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.dot) +def dot(a, b, out=None): + """ + dot(a, b, out=None) + + Dot product of two arrays. Specifically, + + - If both `a` and `b` are 1-D arrays, it is inner product of vectors + (without complex conjugation). + + - If both `a` and `b` are 2-D arrays, it is matrix multiplication, + but using :func:`matmul` or ``a @ b`` is preferred. + + - If either `a` or `b` is 0-D (scalar), it is equivalent to :func:`multiply` + and using ``numpy.multiply(a, b)`` or ``a * b`` is preferred. + + - If `a` is an N-D array and `b` is a 1-D array, it is a sum product over + the last axis of `a` and `b`. + + - If `a` is an N-D array and `b` is an M-D array (where ``M>=2``), it is a + sum product over the last axis of `a` and the second-to-last axis of `b`:: + + dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m]) + + Parameters + ---------- + a : array_like + First argument. + b : array_like + Second argument. + out : ndarray, optional + Output argument. This must have the exact kind that would be returned + if it was not used. In particular, it must have the right type, must be + C-contiguous, and its dtype must be the dtype that would be returned + for `dot(a,b)`. This is a performance feature. Therefore, if these + conditions are not met, an exception is raised, instead of attempting + to be flexible. + + Returns + ------- + output : ndarray + Returns the dot product of `a` and `b`. If `a` and `b` are both + scalars or both 1-D arrays then a scalar is returned; otherwise + an array is returned. + If `out` is given, then it is returned. + + Raises + ------ + ValueError + If the last dimension of `a` is not the same size as + the second-to-last dimension of `b`. + + See Also + -------- + vdot : Complex-conjugating dot product. + tensordot : Sum products over arbitrary axes. + einsum : Einstein summation convention. + matmul : '@' operator as method with out parameter. + + Examples + -------- + >>> np.dot(3, 4) + 12 + + Neither argument is complex-conjugated: + + >>> np.dot([2j, 3j], [2j, 3j]) + (-13+0j) + + For 2-D arrays it is the matrix product: + + >>> a = [[1, 0], [0, 1]] + >>> b = [[4, 1], [2, 2]] + >>> np.dot(a, b) + array([[4, 1], + [2, 2]]) + + >>> a = np.arange(3*4*5*6).reshape((3,4,5,6)) + >>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3)) + >>> np.dot(a, b)[2,3,2,1,2,2] + 499128 + >>> sum(a[2,3,2,:] * b[1,2,:,2]) + 499128 + + """ + return (a, b, out) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.vdot) +def vdot(a, b): + """ + vdot(a, b) + + Return the dot product of two vectors. + + The vdot(`a`, `b`) function handles complex numbers differently than + dot(`a`, `b`). If the first argument is complex the complex conjugate + of the first argument is used for the calculation of the dot product. + + Note that `vdot` handles multidimensional arrays differently than `dot`: + it does *not* perform a matrix product, but flattens input arguments + to 1-D vectors first. Consequently, it should only be used for vectors. + + Parameters + ---------- + a : array_like + If `a` is complex the complex conjugate is taken before calculation + of the dot product. + b : array_like + Second argument to the dot product. + + Returns + ------- + output : ndarray + Dot product of `a` and `b`. Can be an int, float, or + complex depending on the types of `a` and `b`. + + See Also + -------- + dot : Return the dot product without using the complex conjugate of the + first argument. + + Examples + -------- + >>> a = np.array([1+2j,3+4j]) + >>> b = np.array([5+6j,7+8j]) + >>> np.vdot(a, b) + (70-8j) + >>> np.vdot(b, a) + (70+8j) + + Note that higher-dimensional arrays are flattened! + + >>> a = np.array([[1, 4], [5, 6]]) + >>> b = np.array([[4, 1], [2, 2]]) + >>> np.vdot(a, b) + 30 + >>> np.vdot(b, a) + 30 + >>> 1*4 + 4*1 + 5*2 + 6*2 + 30 + + """ + return (a, b) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.bincount) +def bincount(x, weights=None, minlength=None): + """ + bincount(x, weights=None, minlength=0) + + Count number of occurrences of each value in array of non-negative ints. + + The number of bins (of size 1) is one larger than the largest value in + `x`. If `minlength` is specified, there will be at least this number + of bins in the output array (though it will be longer if necessary, + depending on the contents of `x`). + Each bin gives the number of occurrences of its index value in `x`. + If `weights` is specified the input array is weighted by it, i.e. if a + value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead + of ``out[n] += 1``. + + Parameters + ---------- + x : array_like, 1 dimension, nonnegative ints + Input array. + weights : array_like, optional + Weights, array of the same shape as `x`. + minlength : int, optional + A minimum number of bins for the output array. + + .. versionadded:: 1.6.0 + + Returns + ------- + out : ndarray of ints + The result of binning the input array. + The length of `out` is equal to ``np.amax(x)+1``. + + Raises + ------ + ValueError + If the input is not 1-dimensional, or contains elements with negative + values, or if `minlength` is negative. + TypeError + If the type of the input is float or complex. + + See Also + -------- + histogram, digitize, unique + + Examples + -------- + >>> np.bincount(np.arange(5)) + array([1, 1, 1, 1, 1]) + >>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7])) + array([1, 3, 1, 1, 0, 0, 0, 1]) + + >>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23]) + >>> np.bincount(x).size == np.amax(x)+1 + True + + The input array needs to be of integer dtype, otherwise a + TypeError is raised: + + >>> np.bincount(np.arange(5, dtype=float)) + Traceback (most recent call last): + File "", line 1, in + TypeError: array cannot be safely cast to required type + + A possible use of ``bincount`` is to perform sums over + variable-size chunks of an array, using the ``weights`` keyword. + + >>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights + >>> x = np.array([0, 1, 1, 2, 2, 2]) + >>> np.bincount(x, weights=w) + array([ 0.3, 0.7, 1.1]) + + """ + return (x, weights) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.ravel_multi_index) +def ravel_multi_index(multi_index, dims, mode=None, order=None): + """ + ravel_multi_index(multi_index, dims, mode='raise', order='C') + + Converts a tuple of index arrays into an array of flat + indices, applying boundary modes to the multi-index. + + Parameters + ---------- + multi_index : tuple of array_like + A tuple of integer arrays, one array for each dimension. + dims : tuple of ints + The shape of array into which the indices from ``multi_index`` apply. + mode : {'raise', 'wrap', 'clip'}, optional + Specifies how out-of-bounds indices are handled. Can specify + either one mode or a tuple of modes, one mode per index. + + * 'raise' -- raise an error (default) + * 'wrap' -- wrap around + * 'clip' -- clip to the range + + In 'clip' mode, a negative index which would normally + wrap will clip to 0 instead. + order : {'C', 'F'}, optional + Determines whether the multi-index should be viewed as + indexing in row-major (C-style) or column-major + (Fortran-style) order. + + Returns + ------- + raveled_indices : ndarray + An array of indices into the flattened version of an array + of dimensions ``dims``. + + See Also + -------- + unravel_index + + Notes + ----- + .. versionadded:: 1.6.0 + + Examples + -------- + >>> arr = np.array([[3,6,6],[4,5,1]]) + >>> np.ravel_multi_index(arr, (7,6)) + array([22, 41, 37]) + >>> np.ravel_multi_index(arr, (7,6), order='F') + array([31, 41, 13]) + >>> np.ravel_multi_index(arr, (4,6), mode='clip') + array([22, 23, 19]) + >>> np.ravel_multi_index(arr, (4,4), mode=('clip','wrap')) + array([12, 13, 13]) + + >>> np.ravel_multi_index((3,1,4,1), (6,7,8,9)) + 1621 + """ + return multi_index + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.unravel_index) +def unravel_index(indices, shape=None, order=None, dims=None): + """ + unravel_index(indices, shape, order='C') + + Converts a flat index or array of flat indices into a tuple + of coordinate arrays. + + Parameters + ---------- + indices : array_like + An integer array whose elements are indices into the flattened + version of an array of dimensions ``shape``. Before version 1.6.0, + this function accepted just one index value. + shape : tuple of ints + The shape of the array to use for unraveling ``indices``. + + .. versionchanged:: 1.16.0 + Renamed from ``dims`` to ``shape``. + + order : {'C', 'F'}, optional + Determines whether the indices should be viewed as indexing in + row-major (C-style) or column-major (Fortran-style) order. + + .. versionadded:: 1.6.0 + + Returns + ------- + unraveled_coords : tuple of ndarray + Each array in the tuple has the same shape as the ``indices`` + array. + + See Also + -------- + ravel_multi_index + + Examples + -------- + >>> np.unravel_index([22, 41, 37], (7,6)) + (array([3, 6, 6]), array([4, 5, 1])) + >>> np.unravel_index([31, 41, 13], (7,6), order='F') + (array([3, 6, 6]), array([4, 5, 1])) + + >>> np.unravel_index(1621, (6,7,8,9)) + (3, 1, 4, 1) + + """ + if dims is not None: + warnings.warn("'shape' argument should be used instead of 'dims'", + DeprecationWarning, stacklevel=3) + return (indices,) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.copyto) +def copyto(dst, src, casting=None, where=None): + """ + copyto(dst, src, casting='same_kind', where=True) + + Copies values from one array to another, broadcasting as necessary. + + Raises a TypeError if the `casting` rule is violated, and if + `where` is provided, it selects which elements to copy. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + dst : ndarray + The array into which values are copied. + src : array_like + The array from which values are copied. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur when copying. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + where : array_like of bool, optional + A boolean array which is broadcasted to match the dimensions + of `dst`, and selects elements to copy from `src` to `dst` + wherever it contains the value True. + """ + return (dst, src, where) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.putmask) +def putmask(a, mask, values): + """ + putmask(a, mask, values) + + Changes elements of an array based on conditional and input values. + + Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``. + + If `values` is not the same size as `a` and `mask` then it will repeat. + This gives behavior different from ``a[mask] = values``. + + Parameters + ---------- + a : array_like + Target array. + mask : array_like + Boolean mask array. It has to be the same shape as `a`. + values : array_like + Values to put into `a` where `mask` is True. If `values` is smaller + than `a` it will be repeated. + + See Also + -------- + place, put, take, copyto + + Examples + -------- + >>> x = np.arange(6).reshape(2, 3) + >>> np.putmask(x, x>2, x**2) + >>> x + array([[ 0, 1, 2], + [ 9, 16, 25]]) + + If `values` is smaller than `a` it is repeated: + + >>> x = np.arange(5) + >>> np.putmask(x, x>1, [-33, -44]) + >>> x + array([ 0, 1, -33, -44, -33]) + + """ + return (a, mask, values) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.packbits) +def packbits(myarray, axis=None): + """ + packbits(myarray, axis=None) + + Packs the elements of a binary-valued array into bits in a uint8 array. + + The result is padded to full bytes by inserting zero bits at the end. + + Parameters + ---------- + myarray : array_like + An array of integers or booleans whose elements should be packed to + bits. + axis : int, optional + The dimension over which bit-packing is done. + ``None`` implies packing the flattened array. + + Returns + ------- + packed : ndarray + Array of type uint8 whose elements represent bits corresponding to the + logical (0 or nonzero) value of the input elements. The shape of + `packed` has the same number of dimensions as the input (unless `axis` + is None, in which case the output is 1-D). + + See Also + -------- + unpackbits: Unpacks elements of a uint8 array into a binary-valued output + array. + + Examples + -------- + >>> a = np.array([[[1,0,1], + ... [0,1,0]], + ... [[1,1,0], + ... [0,0,1]]]) + >>> b = np.packbits(a, axis=-1) + >>> b + array([[[160],[64]],[[192],[32]]], dtype=uint8) + + Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000, + and 32 = 0010 0000. + + """ + return (myarray,) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.unpackbits) +def unpackbits(myarray, axis=None): + """ + unpackbits(myarray, axis=None) + + Unpacks elements of a uint8 array into a binary-valued output array. + + Each element of `myarray` represents a bit-field that should be unpacked + into a binary-valued output array. The shape of the output array is either + 1-D (if `axis` is None) or the same shape as the input array with unpacking + done along the axis specified. + + Parameters + ---------- + myarray : ndarray, uint8 type + Input array. + axis : int, optional + The dimension over which bit-unpacking is done. + ``None`` implies unpacking the flattened array. + + Returns + ------- + unpacked : ndarray, uint8 type + The elements are binary-valued (0 or 1). + + See Also + -------- + packbits : Packs the elements of a binary-valued array into bits in a uint8 + array. + + Examples + -------- + >>> a = np.array([[2], [7], [23]], dtype=np.uint8) + >>> a + array([[ 2], + [ 7], + [23]], dtype=uint8) + >>> b = np.unpackbits(a, axis=1) + >>> b + array([[0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8) + + """ + return (myarray,) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.shares_memory) +def shares_memory(a, b, max_work=None): + """ + shares_memory(a, b, max_work=None) + + Determine if two arrays share memory + + Parameters + ---------- + a, b : ndarray + Input arrays + max_work : int, optional + Effort to spend on solving the overlap problem (maximum number + of candidate solutions to consider). The following special + values are recognized: + + max_work=MAY_SHARE_EXACT (default) + The problem is solved exactly. In this case, the function returns + True only if there is an element shared between the arrays. + max_work=MAY_SHARE_BOUNDS + Only the memory bounds of a and b are checked. + + Raises + ------ + numpy.TooHardError + Exceeded max_work. + + Returns + ------- + out : bool + + See Also + -------- + may_share_memory + + Examples + -------- + >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9])) + False + + """ + return (a, b) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.may_share_memory) +def may_share_memory(a, b, max_work=None): + """ + may_share_memory(a, b, max_work=None) + + Determine if two arrays might share memory + + A return of True does not necessarily mean that the two arrays + share any element. It just means that they *might*. + + Only the memory bounds of a and b are checked by default. + + Parameters + ---------- + a, b : ndarray + Input arrays + max_work : int, optional + Effort to spend on solving the overlap problem. See + `shares_memory` for details. Default for ``may_share_memory`` + is to do a bounds check. + + Returns + ------- + out : bool + + See Also + -------- + shares_memory + + Examples + -------- + >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9])) + False + >>> x = np.zeros([3, 4]) + >>> np.may_share_memory(x[:,0], x[:,1]) + True + + """ + return (a, b) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.is_busday) +def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None): + """ + is_busday(dates, weekmask='1111100', holidays=None, busdaycal=None, out=None) + + Calculates which of the given dates are valid days, and which are not. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + dates : array_like of datetime64[D] + The array of dates to process. + weekmask : str or array_like of bool, optional + A seven-element array indicating which of Monday through Sunday are + valid days. May be specified as a length-seven list or array, like + [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string + like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for + weekdays, optionally separated by white space. Valid abbreviations + are: Mon Tue Wed Thu Fri Sat Sun + holidays : array_like of datetime64[D], optional + An array of dates to consider as invalid dates. They may be + specified in any order, and NaT (not-a-time) dates are ignored. + This list is saved in a normalized form that is suited for + fast calculations of valid days. + busdaycal : busdaycalendar, optional + A `busdaycalendar` object which specifies the valid days. If this + parameter is provided, neither weekmask nor holidays may be + provided. + out : array of bool, optional + If provided, this array is filled with the result. + + Returns + ------- + out : array of bool + An array with the same shape as ``dates``, containing True for + each valid day, and False for each invalid day. + + See Also + -------- + busdaycalendar: An object that specifies a custom set of valid days. + busday_offset : Applies an offset counted in valid days. + busday_count : Counts how many valid days are in a half-open date range. + + Examples + -------- + >>> # The weekdays are Friday, Saturday, and Monday + ... np.is_busday(['2011-07-01', '2011-07-02', '2011-07-18'], + ... holidays=['2011-07-01', '2011-07-04', '2011-07-17']) + array([False, False, True], dtype='bool') + """ + return (dates, weekmask, holidays, out) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_offset) +def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None, + busdaycal=None, out=None): + """ + busday_offset(dates, offsets, roll='raise', weekmask='1111100', holidays=None, busdaycal=None, out=None) + + First adjusts the date to fall on a valid day according to + the ``roll`` rule, then applies offsets to the given dates + counted in valid days. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + dates : array_like of datetime64[D] + The array of dates to process. + offsets : array_like of int + The array of offsets, which is broadcast with ``dates``. + roll : {'raise', 'nat', 'forward', 'following', 'backward', 'preceding', 'modifiedfollowing', 'modifiedpreceding'}, optional + How to treat dates that do not fall on a valid day. The default + is 'raise'. + + * 'raise' means to raise an exception for an invalid day. + * 'nat' means to return a NaT (not-a-time) for an invalid day. + * 'forward' and 'following' mean to take the first valid day + later in time. + * 'backward' and 'preceding' mean to take the first valid day + earlier in time. + * 'modifiedfollowing' means to take the first valid day + later in time unless it is across a Month boundary, in which + case to take the first valid day earlier in time. + * 'modifiedpreceding' means to take the first valid day + earlier in time unless it is across a Month boundary, in which + case to take the first valid day later in time. + weekmask : str or array_like of bool, optional + A seven-element array indicating which of Monday through Sunday are + valid days. May be specified as a length-seven list or array, like + [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string + like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for + weekdays, optionally separated by white space. Valid abbreviations + are: Mon Tue Wed Thu Fri Sat Sun + holidays : array_like of datetime64[D], optional + An array of dates to consider as invalid dates. They may be + specified in any order, and NaT (not-a-time) dates are ignored. + This list is saved in a normalized form that is suited for + fast calculations of valid days. + busdaycal : busdaycalendar, optional + A `busdaycalendar` object which specifies the valid days. If this + parameter is provided, neither weekmask nor holidays may be + provided. + out : array of datetime64[D], optional + If provided, this array is filled with the result. + + Returns + ------- + out : array of datetime64[D] + An array with a shape from broadcasting ``dates`` and ``offsets`` + together, containing the dates with offsets applied. + + See Also + -------- + busdaycalendar: An object that specifies a custom set of valid days. + is_busday : Returns a boolean array indicating valid days. + busday_count : Counts how many valid days are in a half-open date range. + + Examples + -------- + >>> # First business day in October 2011 (not accounting for holidays) + ... np.busday_offset('2011-10', 0, roll='forward') + numpy.datetime64('2011-10-03','D') + >>> # Last business day in February 2012 (not accounting for holidays) + ... np.busday_offset('2012-03', -1, roll='forward') + numpy.datetime64('2012-02-29','D') + >>> # Third Wednesday in January 2011 + ... np.busday_offset('2011-01', 2, roll='forward', weekmask='Wed') + numpy.datetime64('2011-01-19','D') + >>> # 2012 Mother's Day in Canada and the U.S. + ... np.busday_offset('2012-05', 1, roll='forward', weekmask='Sun') + numpy.datetime64('2012-05-13','D') + + >>> # First business day on or after a date + ... np.busday_offset('2011-03-20', 0, roll='forward') + numpy.datetime64('2011-03-21','D') + >>> np.busday_offset('2011-03-22', 0, roll='forward') + numpy.datetime64('2011-03-22','D') + >>> # First business day after a date + ... np.busday_offset('2011-03-20', 1, roll='backward') + numpy.datetime64('2011-03-21','D') + >>> np.busday_offset('2011-03-22', 1, roll='backward') + numpy.datetime64('2011-03-23','D') + """ + return (dates, offsets, weekmask, holidays, out) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_count) +def busday_count(begindates, enddates, weekmask=None, holidays=None, + busdaycal=None, out=None): + """ + busday_count(begindates, enddates, weekmask='1111100', holidays=[], busdaycal=None, out=None) + + Counts the number of valid days between `begindates` and + `enddates`, not including the day of `enddates`. + + If ``enddates`` specifies a date value that is earlier than the + corresponding ``begindates`` date value, the count will be negative. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + begindates : array_like of datetime64[D] + The array of the first dates for counting. + enddates : array_like of datetime64[D] + The array of the end dates for counting, which are excluded + from the count themselves. + weekmask : str or array_like of bool, optional + A seven-element array indicating which of Monday through Sunday are + valid days. May be specified as a length-seven list or array, like + [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string + like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for + weekdays, optionally separated by white space. Valid abbreviations + are: Mon Tue Wed Thu Fri Sat Sun + holidays : array_like of datetime64[D], optional + An array of dates to consider as invalid dates. They may be + specified in any order, and NaT (not-a-time) dates are ignored. + This list is saved in a normalized form that is suited for + fast calculations of valid days. + busdaycal : busdaycalendar, optional + A `busdaycalendar` object which specifies the valid days. If this + parameter is provided, neither weekmask nor holidays may be + provided. + out : array of int, optional + If provided, this array is filled with the result. + + Returns + ------- + out : array of int + An array with a shape from broadcasting ``begindates`` and ``enddates`` + together, containing the number of valid days between + the begin and end dates. + + See Also + -------- + busdaycalendar: An object that specifies a custom set of valid days. + is_busday : Returns a boolean array indicating valid days. + busday_offset : Applies an offset counted in valid days. + + Examples + -------- + >>> # Number of weekdays in January 2011 + ... np.busday_count('2011-01', '2011-02') + 21 + >>> # Number of weekdays in 2011 + ... np.busday_count('2011', '2012') + 260 + >>> # Number of Saturdays in 2011 + ... np.busday_count('2011', '2012', weekmask='Sat') + 53 + """ + return (begindates, enddates, weekmask, holidays, out) + + +@array_function_from_c_func_and_dispatcher( + _multiarray_umath.datetime_as_string) +def datetime_as_string(arr, unit=None, timezone=None, casting=None): + """ + datetime_as_string(arr, unit=None, timezone='naive', casting='same_kind') + + Convert an array of datetimes into an array of strings. + + Parameters + ---------- + arr : array_like of datetime64 + The array of UTC timestamps to format. + unit : str + One of None, 'auto', or a :ref:`datetime unit `. + timezone : {'naive', 'UTC', 'local'} or tzinfo + Timezone information to use when displaying the datetime. If 'UTC', end + with a Z to indicate UTC time. If 'local', convert to the local timezone + first, and suffix with a +-#### timezone offset. If a tzinfo object, + then do as with 'local', but use the specified timezone. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'} + Casting to allow when changing between datetime units. + + Returns + ------- + str_arr : ndarray + An array of strings the same shape as `arr`. + + Examples + -------- + >>> d = np.arange('2002-10-27T04:30', 4*60, 60, dtype='M8[m]') + >>> d + array(['2002-10-27T04:30', '2002-10-27T05:30', '2002-10-27T06:30', + '2002-10-27T07:30'], dtype='datetime64[m]') + + Setting the timezone to UTC shows the same information, but with a Z suffix + + >>> np.datetime_as_string(d, timezone='UTC') + array(['2002-10-27T04:30Z', '2002-10-27T05:30Z', '2002-10-27T06:30Z', + '2002-10-27T07:30Z'], dtype='>> np.datetime_as_string(d, timezone=pytz.timezone('US/Eastern')) + array(['2002-10-27T00:30-0400', '2002-10-27T01:30-0400', + '2002-10-27T01:30-0500', '2002-10-27T02:30-0500'], dtype='>> np.datetime_as_string(d, unit='h') + array(['2002-10-27T04', '2002-10-27T05', '2002-10-27T06', '2002-10-27T07'], + dtype='>> np.datetime_as_string(d, unit='s') + array(['2002-10-27T04:30:00', '2002-10-27T05:30:00', '2002-10-27T06:30:00', + '2002-10-27T07:30:00'], dtype='>> np.datetime_as_string(d, unit='h', casting='safe') + TypeError: Cannot create a datetime string as units 'h' from a NumPy + datetime with units 'm' according to the rule 'safe' + """ + return (arr,) diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/multiarray.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/multiarray.pyc new file mode 100644 index 0000000..79a3d61 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/multiarray.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/numeric.py b/project/venv/lib/python2.7/site-packages/numpy/core/numeric.py new file mode 100644 index 0000000..8768cbe --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/numeric.py @@ -0,0 +1,3100 @@ +from __future__ import division, absolute_import, print_function + +try: + # Accessing collections abstract classes from collections + # has been deprecated since Python 3.3 + import collections.abc as collections_abc +except ImportError: + import collections as collections_abc +import functools +import itertools +import operator +import sys +import warnings +import numbers + +import numpy as np +from . import multiarray +from .multiarray import ( + _fastCopyAndTranspose as fastCopyAndTranspose, ALLOW_THREADS, + BUFSIZE, CLIP, MAXDIMS, MAY_SHARE_BOUNDS, MAY_SHARE_EXACT, RAISE, + WRAP, arange, array, broadcast, can_cast, compare_chararrays, + concatenate, copyto, dot, dtype, empty, + empty_like, flatiter, frombuffer, fromfile, fromiter, fromstring, + inner, int_asbuffer, lexsort, matmul, may_share_memory, + min_scalar_type, ndarray, nditer, nested_iters, promote_types, + putmask, result_type, set_numeric_ops, shares_memory, vdot, where, + zeros, normalize_axis_index) +if sys.version_info[0] < 3: + from .multiarray import newbuffer, getbuffer + +from . import overrides +from . import umath +from .overrides import set_module +from .umath import (multiply, invert, sin, UFUNC_BUFSIZE_DEFAULT, + ERR_IGNORE, ERR_WARN, ERR_RAISE, ERR_CALL, ERR_PRINT, + ERR_LOG, ERR_DEFAULT, PINF, NAN) +from . import numerictypes +from .numerictypes import longlong, intc, int_, float_, complex_, bool_ +from ._internal import TooHardError, AxisError + +bitwise_not = invert +ufunc = type(sin) +newaxis = None + +if sys.version_info[0] >= 3: + if sys.version_info[1] in (6, 7): + try: + import pickle5 as pickle + except ImportError: + import pickle + else: + import pickle + basestring = str + import builtins +else: + import cPickle as pickle + import __builtin__ as builtins + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +def loads(*args, **kwargs): + # NumPy 1.15.0, 2017-12-10 + warnings.warn( + "np.core.numeric.loads is deprecated, use pickle.loads instead", + DeprecationWarning, stacklevel=2) + return pickle.loads(*args, **kwargs) + + +__all__ = [ + 'newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc', + 'arange', 'array', 'zeros', 'count_nonzero', 'empty', 'broadcast', 'dtype', + 'fromstring', 'fromfile', 'frombuffer', 'int_asbuffer', 'where', + 'argwhere', 'copyto', 'concatenate', 'fastCopyAndTranspose', 'lexsort', + 'set_numeric_ops', 'can_cast', 'promote_types', 'min_scalar_type', + 'result_type', 'asarray', 'asanyarray', 'ascontiguousarray', + 'asfortranarray', 'isfortran', 'empty_like', 'zeros_like', 'ones_like', + 'correlate', 'convolve', 'inner', 'dot', 'outer', 'vdot', 'roll', + 'rollaxis', 'moveaxis', 'cross', 'tensordot', 'little_endian', 'require', + 'fromiter', 'array_equal', 'array_equiv', 'indices', 'fromfunction', + 'isclose', 'load', 'loads', 'isscalar', 'binary_repr', 'base_repr', 'ones', + 'identity', 'allclose', 'compare_chararrays', 'putmask', 'seterr', + 'geterr', 'setbufsize', 'getbufsize', 'seterrcall', 'geterrcall', + 'errstate', 'flatnonzero', 'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN', + 'False_', 'True_', 'bitwise_not', 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS', + 'BUFSIZE', 'ALLOW_THREADS', 'ComplexWarning', 'full', 'full_like', + 'matmul', 'shares_memory', 'may_share_memory', 'MAY_SHARE_BOUNDS', + 'MAY_SHARE_EXACT', 'TooHardError', 'AxisError'] + +if sys.version_info[0] < 3: + __all__.extend(['getbuffer', 'newbuffer']) + + +@set_module('numpy') +class ComplexWarning(RuntimeWarning): + """ + The warning raised when casting a complex dtype to a real dtype. + + As implemented, casting a complex number to a real discards its imaginary + part, but this behavior may not be what the user actually wants. + + """ + pass + + +def _zeros_like_dispatcher(a, dtype=None, order=None, subok=None): + return (a,) + + +@array_function_dispatch(_zeros_like_dispatcher) +def zeros_like(a, dtype=None, order='K', subok=True): + """ + Return an array of zeros with the same shape and type as a given array. + + Parameters + ---------- + a : array_like + The shape and data-type of `a` define these same attributes of + the returned array. + dtype : data-type, optional + Overrides the data type of the result. + + .. versionadded:: 1.6.0 + order : {'C', 'F', 'A', or 'K'}, optional + Overrides the memory layout of the result. 'C' means C-order, + 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, + 'C' otherwise. 'K' means match the layout of `a` as closely + as possible. + + .. versionadded:: 1.6.0 + subok : bool, optional. + If True, then the newly created array will use the sub-class + type of 'a', otherwise it will be a base-class array. Defaults + to True. + + Returns + ------- + out : ndarray + Array of zeros with the same shape and type as `a`. + + See Also + -------- + empty_like : Return an empty array with shape and type of input. + ones_like : Return an array of ones with shape and type of input. + full_like : Return a new array with shape of input filled with value. + zeros : Return a new array setting values to zero. + + Examples + -------- + >>> x = np.arange(6) + >>> x = x.reshape((2, 3)) + >>> x + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.zeros_like(x) + array([[0, 0, 0], + [0, 0, 0]]) + + >>> y = np.arange(3, dtype=float) + >>> y + array([ 0., 1., 2.]) + >>> np.zeros_like(y) + array([ 0., 0., 0.]) + + """ + res = empty_like(a, dtype=dtype, order=order, subok=subok) + # needed instead of a 0 to get same result as zeros for for string dtypes + z = zeros(1, dtype=res.dtype) + multiarray.copyto(res, z, casting='unsafe') + return res + + +@set_module('numpy') +def ones(shape, dtype=None, order='C'): + """ + Return a new array of given shape and type, filled with ones. + + Parameters + ---------- + shape : int or sequence of ints + Shape of the new array, e.g., ``(2, 3)`` or ``2``. + dtype : data-type, optional + The desired data-type for the array, e.g., `numpy.int8`. Default is + `numpy.float64`. + order : {'C', 'F'}, optional, default: C + Whether to store multi-dimensional data in row-major + (C-style) or column-major (Fortran-style) order in + memory. + + Returns + ------- + out : ndarray + Array of ones with the given shape, dtype, and order. + + See Also + -------- + ones_like : Return an array of ones with shape and type of input. + empty : Return a new uninitialized array. + zeros : Return a new array setting values to zero. + full : Return a new array of given shape filled with value. + + + Examples + -------- + >>> np.ones(5) + array([ 1., 1., 1., 1., 1.]) + + >>> np.ones((5,), dtype=int) + array([1, 1, 1, 1, 1]) + + >>> np.ones((2, 1)) + array([[ 1.], + [ 1.]]) + + >>> s = (2,2) + >>> np.ones(s) + array([[ 1., 1.], + [ 1., 1.]]) + + """ + a = empty(shape, dtype, order) + multiarray.copyto(a, 1, casting='unsafe') + return a + + +def _ones_like_dispatcher(a, dtype=None, order=None, subok=None): + return (a,) + + +@array_function_dispatch(_ones_like_dispatcher) +def ones_like(a, dtype=None, order='K', subok=True): + """ + Return an array of ones with the same shape and type as a given array. + + Parameters + ---------- + a : array_like + The shape and data-type of `a` define these same attributes of + the returned array. + dtype : data-type, optional + Overrides the data type of the result. + + .. versionadded:: 1.6.0 + order : {'C', 'F', 'A', or 'K'}, optional + Overrides the memory layout of the result. 'C' means C-order, + 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, + 'C' otherwise. 'K' means match the layout of `a` as closely + as possible. + + .. versionadded:: 1.6.0 + subok : bool, optional. + If True, then the newly created array will use the sub-class + type of 'a', otherwise it will be a base-class array. Defaults + to True. + + Returns + ------- + out : ndarray + Array of ones with the same shape and type as `a`. + + See Also + -------- + empty_like : Return an empty array with shape and type of input. + zeros_like : Return an array of zeros with shape and type of input. + full_like : Return a new array with shape of input filled with value. + ones : Return a new array setting values to one. + + Examples + -------- + >>> x = np.arange(6) + >>> x = x.reshape((2, 3)) + >>> x + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.ones_like(x) + array([[1, 1, 1], + [1, 1, 1]]) + + >>> y = np.arange(3, dtype=float) + >>> y + array([ 0., 1., 2.]) + >>> np.ones_like(y) + array([ 1., 1., 1.]) + + """ + res = empty_like(a, dtype=dtype, order=order, subok=subok) + multiarray.copyto(res, 1, casting='unsafe') + return res + + +@set_module('numpy') +def full(shape, fill_value, dtype=None, order='C'): + """ + Return a new array of given shape and type, filled with `fill_value`. + + Parameters + ---------- + shape : int or sequence of ints + Shape of the new array, e.g., ``(2, 3)`` or ``2``. + fill_value : scalar + Fill value. + dtype : data-type, optional + The desired data-type for the array The default, `None`, means + `np.array(fill_value).dtype`. + order : {'C', 'F'}, optional + Whether to store multidimensional data in C- or Fortran-contiguous + (row- or column-wise) order in memory. + + Returns + ------- + out : ndarray + Array of `fill_value` with the given shape, dtype, and order. + + See Also + -------- + full_like : Return a new array with shape of input filled with value. + empty : Return a new uninitialized array. + ones : Return a new array setting values to one. + zeros : Return a new array setting values to zero. + + Examples + -------- + >>> np.full((2, 2), np.inf) + array([[ inf, inf], + [ inf, inf]]) + >>> np.full((2, 2), 10) + array([[10, 10], + [10, 10]]) + + """ + if dtype is None: + dtype = array(fill_value).dtype + a = empty(shape, dtype, order) + multiarray.copyto(a, fill_value, casting='unsafe') + return a + + +def _full_like_dispatcher(a, fill_value, dtype=None, order=None, subok=None): + return (a,) + + +@array_function_dispatch(_full_like_dispatcher) +def full_like(a, fill_value, dtype=None, order='K', subok=True): + """ + Return a full array with the same shape and type as a given array. + + Parameters + ---------- + a : array_like + The shape and data-type of `a` define these same attributes of + the returned array. + fill_value : scalar + Fill value. + dtype : data-type, optional + Overrides the data type of the result. + order : {'C', 'F', 'A', or 'K'}, optional + Overrides the memory layout of the result. 'C' means C-order, + 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, + 'C' otherwise. 'K' means match the layout of `a` as closely + as possible. + subok : bool, optional. + If True, then the newly created array will use the sub-class + type of 'a', otherwise it will be a base-class array. Defaults + to True. + + Returns + ------- + out : ndarray + Array of `fill_value` with the same shape and type as `a`. + + See Also + -------- + empty_like : Return an empty array with shape and type of input. + ones_like : Return an array of ones with shape and type of input. + zeros_like : Return an array of zeros with shape and type of input. + full : Return a new array of given shape filled with value. + + Examples + -------- + >>> x = np.arange(6, dtype=int) + >>> np.full_like(x, 1) + array([1, 1, 1, 1, 1, 1]) + >>> np.full_like(x, 0.1) + array([0, 0, 0, 0, 0, 0]) + >>> np.full_like(x, 0.1, dtype=np.double) + array([ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) + >>> np.full_like(x, np.nan, dtype=np.double) + array([ nan, nan, nan, nan, nan, nan]) + + >>> y = np.arange(6, dtype=np.double) + >>> np.full_like(y, 0.1) + array([ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) + + """ + res = empty_like(a, dtype=dtype, order=order, subok=subok) + multiarray.copyto(res, fill_value, casting='unsafe') + return res + + +def _count_nonzero_dispatcher(a, axis=None): + return (a,) + + +@array_function_dispatch(_count_nonzero_dispatcher) +def count_nonzero(a, axis=None): + """ + Counts the number of non-zero values in the array ``a``. + + The word "non-zero" is in reference to the Python 2.x + built-in method ``__nonzero__()`` (renamed ``__bool__()`` + in Python 3.x) of Python objects that tests an object's + "truthfulness". For example, any number is considered + truthful if it is nonzero, whereas any string is considered + truthful if it is not the empty string. Thus, this function + (recursively) counts how many elements in ``a`` (and in + sub-arrays thereof) have their ``__nonzero__()`` or ``__bool__()`` + method evaluated to ``True``. + + Parameters + ---------- + a : array_like + The array for which to count non-zeros. + axis : int or tuple, optional + Axis or tuple of axes along which to count non-zeros. + Default is None, meaning that non-zeros will be counted + along a flattened version of ``a``. + + .. versionadded:: 1.12.0 + + Returns + ------- + count : int or array of int + Number of non-zero values in the array along a given axis. + Otherwise, the total number of non-zero values in the array + is returned. + + See Also + -------- + nonzero : Return the coordinates of all the non-zero values. + + Examples + -------- + >>> np.count_nonzero(np.eye(4)) + 4 + >>> np.count_nonzero([[0,1,7,0,0],[3,0,0,2,19]]) + 5 + >>> np.count_nonzero([[0,1,7,0,0],[3,0,0,2,19]], axis=0) + array([1, 1, 1, 1, 1]) + >>> np.count_nonzero([[0,1,7,0,0],[3,0,0,2,19]], axis=1) + array([2, 3]) + + """ + if axis is None: + return multiarray.count_nonzero(a) + + a = asanyarray(a) + + # TODO: this works around .astype(bool) not working properly (gh-9847) + if np.issubdtype(a.dtype, np.character): + a_bool = a != a.dtype.type() + else: + a_bool = a.astype(np.bool_, copy=False) + + return a_bool.sum(axis=axis, dtype=np.intp) + + +@set_module('numpy') +def asarray(a, dtype=None, order=None): + """Convert the input to an array. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. This + includes lists, lists of tuples, tuples, tuples of tuples, tuples + of lists and ndarrays. + dtype : data-type, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F'}, optional + Whether to use row-major (C-style) or + column-major (Fortran-style) memory representation. + Defaults to 'C'. + + Returns + ------- + out : ndarray + Array interpretation of `a`. No copy is performed if the input + is already an ndarray with matching dtype and order. If `a` is a + subclass of ndarray, a base class ndarray is returned. + + See Also + -------- + asanyarray : Similar function which passes through subclasses. + ascontiguousarray : Convert input to a contiguous array. + asfarray : Convert input to a floating point ndarray. + asfortranarray : Convert input to an ndarray with column-major + memory order. + asarray_chkfinite : Similar function which checks input for NaNs and Infs. + fromiter : Create an array from an iterator. + fromfunction : Construct an array by executing a function on grid + positions. + + Examples + -------- + Convert a list into an array: + + >>> a = [1, 2] + >>> np.asarray(a) + array([1, 2]) + + Existing arrays are not copied: + + >>> a = np.array([1, 2]) + >>> np.asarray(a) is a + True + + If `dtype` is set, array is copied only if dtype does not match: + + >>> a = np.array([1, 2], dtype=np.float32) + >>> np.asarray(a, dtype=np.float32) is a + True + >>> np.asarray(a, dtype=np.float64) is a + False + + Contrary to `asanyarray`, ndarray subclasses are not passed through: + + >>> issubclass(np.recarray, np.ndarray) + True + >>> a = np.array([(1.0, 2), (3.0, 4)], dtype='f4,i4').view(np.recarray) + >>> np.asarray(a) is a + False + >>> np.asanyarray(a) is a + True + + """ + return array(a, dtype, copy=False, order=order) + + +@set_module('numpy') +def asanyarray(a, dtype=None, order=None): + """Convert the input to an ndarray, but pass ndarray subclasses through. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. This + includes scalars, lists, lists of tuples, tuples, tuples of tuples, + tuples of lists, and ndarrays. + dtype : data-type, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F'}, optional + Whether to use row-major (C-style) or column-major + (Fortran-style) memory representation. Defaults to 'C'. + + Returns + ------- + out : ndarray or an ndarray subclass + Array interpretation of `a`. If `a` is an ndarray or a subclass + of ndarray, it is returned as-is and no copy is performed. + + See Also + -------- + asarray : Similar function which always returns ndarrays. + ascontiguousarray : Convert input to a contiguous array. + asfarray : Convert input to a floating point ndarray. + asfortranarray : Convert input to an ndarray with column-major + memory order. + asarray_chkfinite : Similar function which checks input for NaNs and + Infs. + fromiter : Create an array from an iterator. + fromfunction : Construct an array by executing a function on grid + positions. + + Examples + -------- + Convert a list into an array: + + >>> a = [1, 2] + >>> np.asanyarray(a) + array([1, 2]) + + Instances of `ndarray` subclasses are passed through as-is: + + >>> a = np.array([(1.0, 2), (3.0, 4)], dtype='f4,i4').view(np.recarray) + >>> np.asanyarray(a) is a + True + + """ + return array(a, dtype, copy=False, order=order, subok=True) + + +@set_module('numpy') +def ascontiguousarray(a, dtype=None): + """ + Return a contiguous array (ndim >= 1) in memory (C order). + + Parameters + ---------- + a : array_like + Input array. + dtype : str or dtype object, optional + Data-type of returned array. + + Returns + ------- + out : ndarray + Contiguous array of same shape and content as `a`, with type `dtype` + if specified. + + See Also + -------- + asfortranarray : Convert input to an ndarray with column-major + memory order. + require : Return an ndarray that satisfies requirements. + ndarray.flags : Information about the memory layout of the array. + + Examples + -------- + >>> x = np.arange(6).reshape(2,3) + >>> np.ascontiguousarray(x, dtype=np.float32) + array([[ 0., 1., 2.], + [ 3., 4., 5.]], dtype=float32) + >>> x.flags['C_CONTIGUOUS'] + True + + Note: This function returns an array with at least one-dimension (1-d) + so it will not preserve 0-d arrays. + + """ + return array(a, dtype, copy=False, order='C', ndmin=1) + + +@set_module('numpy') +def asfortranarray(a, dtype=None): + """ + Return an array (ndim >= 1) laid out in Fortran order in memory. + + Parameters + ---------- + a : array_like + Input array. + dtype : str or dtype object, optional + By default, the data-type is inferred from the input data. + + Returns + ------- + out : ndarray + The input `a` in Fortran, or column-major, order. + + See Also + -------- + ascontiguousarray : Convert input to a contiguous (C order) array. + asanyarray : Convert input to an ndarray with either row or + column-major memory order. + require : Return an ndarray that satisfies requirements. + ndarray.flags : Information about the memory layout of the array. + + Examples + -------- + >>> x = np.arange(6).reshape(2,3) + >>> y = np.asfortranarray(x) + >>> x.flags['F_CONTIGUOUS'] + False + >>> y.flags['F_CONTIGUOUS'] + True + + Note: This function returns an array with at least one-dimension (1-d) + so it will not preserve 0-d arrays. + + """ + return array(a, dtype, copy=False, order='F', ndmin=1) + + +@set_module('numpy') +def require(a, dtype=None, requirements=None): + """ + Return an ndarray of the provided type that satisfies requirements. + + This function is useful to be sure that an array with the correct flags + is returned for passing to compiled code (perhaps through ctypes). + + Parameters + ---------- + a : array_like + The object to be converted to a type-and-requirement-satisfying array. + dtype : data-type + The required data-type. If None preserve the current dtype. If your + application requires the data to be in native byteorder, include + a byteorder specification as a part of the dtype specification. + requirements : str or list of str + The requirements list can be any of the following + + * 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array + * 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array + * 'ALIGNED' ('A') - ensure a data-type aligned array + * 'WRITEABLE' ('W') - ensure a writable array + * 'OWNDATA' ('O') - ensure an array that owns its own data + * 'ENSUREARRAY', ('E') - ensure a base array, instead of a subclass + + See Also + -------- + asarray : Convert input to an ndarray. + asanyarray : Convert to an ndarray, but pass through ndarray subclasses. + ascontiguousarray : Convert input to a contiguous array. + asfortranarray : Convert input to an ndarray with column-major + memory order. + ndarray.flags : Information about the memory layout of the array. + + Notes + ----- + The returned array will be guaranteed to have the listed requirements + by making a copy if needed. + + Examples + -------- + >>> x = np.arange(6).reshape(2,3) + >>> x.flags + C_CONTIGUOUS : True + F_CONTIGUOUS : False + OWNDATA : False + WRITEABLE : True + ALIGNED : True + WRITEBACKIFCOPY : False + UPDATEIFCOPY : False + + >>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F']) + >>> y.flags + C_CONTIGUOUS : False + F_CONTIGUOUS : True + OWNDATA : True + WRITEABLE : True + ALIGNED : True + WRITEBACKIFCOPY : False + UPDATEIFCOPY : False + + """ + possible_flags = {'C': 'C', 'C_CONTIGUOUS': 'C', 'CONTIGUOUS': 'C', + 'F': 'F', 'F_CONTIGUOUS': 'F', 'FORTRAN': 'F', + 'A': 'A', 'ALIGNED': 'A', + 'W': 'W', 'WRITEABLE': 'W', + 'O': 'O', 'OWNDATA': 'O', + 'E': 'E', 'ENSUREARRAY': 'E'} + if not requirements: + return asanyarray(a, dtype=dtype) + else: + requirements = {possible_flags[x.upper()] for x in requirements} + + if 'E' in requirements: + requirements.remove('E') + subok = False + else: + subok = True + + order = 'A' + if requirements >= {'C', 'F'}: + raise ValueError('Cannot specify both "C" and "F" order') + elif 'F' in requirements: + order = 'F' + requirements.remove('F') + elif 'C' in requirements: + order = 'C' + requirements.remove('C') + + arr = array(a, dtype=dtype, order=order, copy=False, subok=subok) + + for prop in requirements: + if not arr.flags[prop]: + arr = arr.copy(order) + break + return arr + + +@set_module('numpy') +def isfortran(a): + """ + Returns True if the array is Fortran contiguous but *not* C contiguous. + + This function is obsolete and, because of changes due to relaxed stride + checking, its return value for the same array may differ for versions + of NumPy >= 1.10.0 and previous versions. If you only want to check if an + array is Fortran contiguous use ``a.flags.f_contiguous`` instead. + + Parameters + ---------- + a : ndarray + Input array. + + + Examples + -------- + + np.array allows to specify whether the array is written in C-contiguous + order (last index varies the fastest), or FORTRAN-contiguous order in + memory (first index varies the fastest). + + >>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C') + >>> a + array([[1, 2, 3], + [4, 5, 6]]) + >>> np.isfortran(a) + False + + >>> b = np.array([[1, 2, 3], [4, 5, 6]], order='FORTRAN') + >>> b + array([[1, 2, 3], + [4, 5, 6]]) + >>> np.isfortran(b) + True + + + The transpose of a C-ordered array is a FORTRAN-ordered array. + + >>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C') + >>> a + array([[1, 2, 3], + [4, 5, 6]]) + >>> np.isfortran(a) + False + >>> b = a.T + >>> b + array([[1, 4], + [2, 5], + [3, 6]]) + >>> np.isfortran(b) + True + + C-ordered arrays evaluate as False even if they are also FORTRAN-ordered. + + >>> np.isfortran(np.array([1, 2], order='FORTRAN')) + False + + """ + return a.flags.fnc + + +def _argwhere_dispatcher(a): + return (a,) + + +@array_function_dispatch(_argwhere_dispatcher) +def argwhere(a): + """ + Find the indices of array elements that are non-zero, grouped by element. + + Parameters + ---------- + a : array_like + Input data. + + Returns + ------- + index_array : ndarray + Indices of elements that are non-zero. Indices are grouped by element. + + See Also + -------- + where, nonzero + + Notes + ----- + ``np.argwhere(a)`` is the same as ``np.transpose(np.nonzero(a))``. + + The output of ``argwhere`` is not suitable for indexing arrays. + For this purpose use ``nonzero(a)`` instead. + + Examples + -------- + >>> x = np.arange(6).reshape(2,3) + >>> x + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.argwhere(x>1) + array([[0, 2], + [1, 0], + [1, 1], + [1, 2]]) + + """ + return transpose(nonzero(a)) + + +def _flatnonzero_dispatcher(a): + return (a,) + + +@array_function_dispatch(_flatnonzero_dispatcher) +def flatnonzero(a): + """ + Return indices that are non-zero in the flattened version of a. + + This is equivalent to np.nonzero(np.ravel(a))[0]. + + Parameters + ---------- + a : array_like + Input data. + + Returns + ------- + res : ndarray + Output array, containing the indices of the elements of `a.ravel()` + that are non-zero. + + See Also + -------- + nonzero : Return the indices of the non-zero elements of the input array. + ravel : Return a 1-D array containing the elements of the input array. + + Examples + -------- + >>> x = np.arange(-2, 3) + >>> x + array([-2, -1, 0, 1, 2]) + >>> np.flatnonzero(x) + array([0, 1, 3, 4]) + + Use the indices of the non-zero elements as an index array to extract + these elements: + + >>> x.ravel()[np.flatnonzero(x)] + array([-2, -1, 1, 2]) + + """ + return np.nonzero(np.ravel(a))[0] + + +_mode_from_name_dict = {'v': 0, + 's': 1, + 'f': 2} + + +def _mode_from_name(mode): + if isinstance(mode, basestring): + return _mode_from_name_dict[mode.lower()[0]] + return mode + + +def _correlate_dispatcher(a, v, mode=None): + return (a, v) + + +@array_function_dispatch(_correlate_dispatcher) +def correlate(a, v, mode='valid'): + """ + Cross-correlation of two 1-dimensional sequences. + + This function computes the correlation as generally defined in signal + processing texts:: + + c_{av}[k] = sum_n a[n+k] * conj(v[n]) + + with a and v sequences being zero-padded where necessary and conj being + the conjugate. + + Parameters + ---------- + a, v : array_like + Input sequences. + mode : {'valid', 'same', 'full'}, optional + Refer to the `convolve` docstring. Note that the default + is 'valid', unlike `convolve`, which uses 'full'. + old_behavior : bool + `old_behavior` was removed in NumPy 1.10. If you need the old + behavior, use `multiarray.correlate`. + + Returns + ------- + out : ndarray + Discrete cross-correlation of `a` and `v`. + + See Also + -------- + convolve : Discrete, linear convolution of two one-dimensional sequences. + multiarray.correlate : Old, no conjugate, version of correlate. + + Notes + ----- + The definition of correlation above is not unique and sometimes correlation + may be defined differently. Another common definition is:: + + c'_{av}[k] = sum_n a[n] conj(v[n+k]) + + which is related to ``c_{av}[k]`` by ``c'_{av}[k] = c_{av}[-k]``. + + Examples + -------- + >>> np.correlate([1, 2, 3], [0, 1, 0.5]) + array([ 3.5]) + >>> np.correlate([1, 2, 3], [0, 1, 0.5], "same") + array([ 2. , 3.5, 3. ]) + >>> np.correlate([1, 2, 3], [0, 1, 0.5], "full") + array([ 0.5, 2. , 3.5, 3. , 0. ]) + + Using complex sequences: + + >>> np.correlate([1+1j, 2, 3-1j], [0, 1, 0.5j], 'full') + array([ 0.5-0.5j, 1.0+0.j , 1.5-1.5j, 3.0-1.j , 0.0+0.j ]) + + Note that you get the time reversed, complex conjugated result + when the two input sequences change places, i.e., + ``c_{va}[k] = c^{*}_{av}[-k]``: + + >>> np.correlate([0, 1, 0.5j], [1+1j, 2, 3-1j], 'full') + array([ 0.0+0.j , 3.0+1.j , 1.5+1.5j, 1.0+0.j , 0.5+0.5j]) + + """ + mode = _mode_from_name(mode) + return multiarray.correlate2(a, v, mode) + + +def _convolve_dispatcher(a, v, mode=None): + return (a, v) + + +@array_function_dispatch(_convolve_dispatcher) +def convolve(a, v, mode='full'): + """ + Returns the discrete, linear convolution of two one-dimensional sequences. + + The convolution operator is often seen in signal processing, where it + models the effect of a linear time-invariant system on a signal [1]_. In + probability theory, the sum of two independent random variables is + distributed according to the convolution of their individual + distributions. + + If `v` is longer than `a`, the arrays are swapped before computation. + + Parameters + ---------- + a : (N,) array_like + First one-dimensional input array. + v : (M,) array_like + Second one-dimensional input array. + mode : {'full', 'valid', 'same'}, optional + 'full': + By default, mode is 'full'. This returns the convolution + at each point of overlap, with an output shape of (N+M-1,). At + the end-points of the convolution, the signals do not overlap + completely, and boundary effects may be seen. + + 'same': + Mode 'same' returns output of length ``max(M, N)``. Boundary + effects are still visible. + + 'valid': + Mode 'valid' returns output of length + ``max(M, N) - min(M, N) + 1``. The convolution product is only given + for points where the signals overlap completely. Values outside + the signal boundary have no effect. + + Returns + ------- + out : ndarray + Discrete, linear convolution of `a` and `v`. + + See Also + -------- + scipy.signal.fftconvolve : Convolve two arrays using the Fast Fourier + Transform. + scipy.linalg.toeplitz : Used to construct the convolution operator. + polymul : Polynomial multiplication. Same output as convolve, but also + accepts poly1d objects as input. + + Notes + ----- + The discrete convolution operation is defined as + + .. math:: (a * v)[n] = \\sum_{m = -\\infty}^{\\infty} a[m] v[n - m] + + It can be shown that a convolution :math:`x(t) * y(t)` in time/space + is equivalent to the multiplication :math:`X(f) Y(f)` in the Fourier + domain, after appropriate padding (padding is necessary to prevent + circular convolution). Since multiplication is more efficient (faster) + than convolution, the function `scipy.signal.fftconvolve` exploits the + FFT to calculate the convolution of large data-sets. + + References + ---------- + .. [1] Wikipedia, "Convolution", + https://en.wikipedia.org/wiki/Convolution + + Examples + -------- + Note how the convolution operator flips the second array + before "sliding" the two across one another: + + >>> np.convolve([1, 2, 3], [0, 1, 0.5]) + array([ 0. , 1. , 2.5, 4. , 1.5]) + + Only return the middle values of the convolution. + Contains boundary effects, where zeros are taken + into account: + + >>> np.convolve([1,2,3],[0,1,0.5], 'same') + array([ 1. , 2.5, 4. ]) + + The two arrays are of the same length, so there + is only one position where they completely overlap: + + >>> np.convolve([1,2,3],[0,1,0.5], 'valid') + array([ 2.5]) + + """ + a, v = array(a, copy=False, ndmin=1), array(v, copy=False, ndmin=1) + if (len(v) > len(a)): + a, v = v, a + if len(a) == 0: + raise ValueError('a cannot be empty') + if len(v) == 0: + raise ValueError('v cannot be empty') + mode = _mode_from_name(mode) + return multiarray.correlate(a, v[::-1], mode) + + +def _outer_dispatcher(a, b, out=None): + return (a, b, out) + + +@array_function_dispatch(_outer_dispatcher) +def outer(a, b, out=None): + """ + Compute the outer product of two vectors. + + Given two vectors, ``a = [a0, a1, ..., aM]`` and + ``b = [b0, b1, ..., bN]``, + the outer product [1]_ is:: + + [[a0*b0 a0*b1 ... a0*bN ] + [a1*b0 . + [ ... . + [aM*b0 aM*bN ]] + + Parameters + ---------- + a : (M,) array_like + First input vector. Input is flattened if + not already 1-dimensional. + b : (N,) array_like + Second input vector. Input is flattened if + not already 1-dimensional. + out : (M, N) ndarray, optional + A location where the result is stored + + .. versionadded:: 1.9.0 + + Returns + ------- + out : (M, N) ndarray + ``out[i, j] = a[i] * b[j]`` + + See also + -------- + inner + einsum : ``einsum('i,j->ij', a.ravel(), b.ravel())`` is the equivalent. + ufunc.outer : A generalization to N dimensions and other operations. + ``np.multiply.outer(a.ravel(), b.ravel())`` is the equivalent. + + References + ---------- + .. [1] : G. H. Golub and C. F. Van Loan, *Matrix Computations*, 3rd + ed., Baltimore, MD, Johns Hopkins University Press, 1996, + pg. 8. + + Examples + -------- + Make a (*very* coarse) grid for computing a Mandelbrot set: + + >>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5)) + >>> rl + array([[-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.]]) + >>> im = np.outer(1j*np.linspace(2, -2, 5), np.ones((5,))) + >>> im + array([[ 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j], + [ 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j], + [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], + [ 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j], + [ 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]]) + >>> grid = rl + im + >>> grid + array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j], + [-2.+1.j, -1.+1.j, 0.+1.j, 1.+1.j, 2.+1.j], + [-2.+0.j, -1.+0.j, 0.+0.j, 1.+0.j, 2.+0.j], + [-2.-1.j, -1.-1.j, 0.-1.j, 1.-1.j, 2.-1.j], + [-2.-2.j, -1.-2.j, 0.-2.j, 1.-2.j, 2.-2.j]]) + + An example using a "vector" of letters: + + >>> x = np.array(['a', 'b', 'c'], dtype=object) + >>> np.outer(x, [1, 2, 3]) + array([[a, aa, aaa], + [b, bb, bbb], + [c, cc, ccc]], dtype=object) + + """ + a = asarray(a) + b = asarray(b) + return multiply(a.ravel()[:, newaxis], b.ravel()[newaxis, :], out) + + +def _tensordot_dispatcher(a, b, axes=None): + return (a, b) + + +@array_function_dispatch(_tensordot_dispatcher) +def tensordot(a, b, axes=2): + """ + Compute tensor dot product along specified axes for arrays >= 1-D. + + Given two tensors (arrays of dimension greater than or equal to one), + `a` and `b`, and an array_like object containing two array_like + objects, ``(a_axes, b_axes)``, sum the products of `a`'s and `b`'s + elements (components) over the axes specified by ``a_axes`` and + ``b_axes``. The third argument can be a single non-negative + integer_like scalar, ``N``; if it is such, then the last ``N`` + dimensions of `a` and the first ``N`` dimensions of `b` are summed + over. + + Parameters + ---------- + a, b : array_like, len(shape) >= 1 + Tensors to "dot". + + axes : int or (2,) array_like + * integer_like + If an int N, sum over the last N axes of `a` and the first N axes + of `b` in order. The sizes of the corresponding axes must match. + * (2,) array_like + Or, a list of axes to be summed over, first sequence applying to `a`, + second to `b`. Both elements array_like must be of the same length. + + See Also + -------- + dot, einsum + + Notes + ----- + Three common use cases are: + * ``axes = 0`` : tensor product :math:`a\\otimes b` + * ``axes = 1`` : tensor dot product :math:`a\\cdot b` + * ``axes = 2`` : (default) tensor double contraction :math:`a:b` + + When `axes` is integer_like, the sequence for evaluation will be: first + the -Nth axis in `a` and 0th axis in `b`, and the -1th axis in `a` and + Nth axis in `b` last. + + When there is more than one axis to sum over - and they are not the last + (first) axes of `a` (`b`) - the argument `axes` should consist of + two sequences of the same length, with the first axis to sum over given + first in both sequences, the second axis second, and so forth. + + Examples + -------- + A "traditional" example: + + >>> a = np.arange(60.).reshape(3,4,5) + >>> b = np.arange(24.).reshape(4,3,2) + >>> c = np.tensordot(a,b, axes=([1,0],[0,1])) + >>> c.shape + (5, 2) + >>> c + array([[ 4400., 4730.], + [ 4532., 4874.], + [ 4664., 5018.], + [ 4796., 5162.], + [ 4928., 5306.]]) + >>> # A slower but equivalent way of computing the same... + >>> d = np.zeros((5,2)) + >>> for i in range(5): + ... for j in range(2): + ... for k in range(3): + ... for n in range(4): + ... d[i,j] += a[k,n,i] * b[n,k,j] + >>> c == d + array([[ True, True], + [ True, True], + [ True, True], + [ True, True], + [ True, True]]) + + An extended example taking advantage of the overloading of + and \\*: + + >>> a = np.array(range(1, 9)) + >>> a.shape = (2, 2, 2) + >>> A = np.array(('a', 'b', 'c', 'd'), dtype=object) + >>> A.shape = (2, 2) + >>> a; A + array([[[1, 2], + [3, 4]], + [[5, 6], + [7, 8]]]) + array([[a, b], + [c, d]], dtype=object) + + >>> np.tensordot(a, A) # third argument default is 2 for double-contraction + array([abbcccdddd, aaaaabbbbbbcccccccdddddddd], dtype=object) + + >>> np.tensordot(a, A, 1) + array([[[acc, bdd], + [aaacccc, bbbdddd]], + [[aaaaacccccc, bbbbbdddddd], + [aaaaaaacccccccc, bbbbbbbdddddddd]]], dtype=object) + + >>> np.tensordot(a, A, 0) # tensor product (result too long to incl.) + array([[[[[a, b], + [c, d]], + ... + + >>> np.tensordot(a, A, (0, 1)) + array([[[abbbbb, cddddd], + [aabbbbbb, ccdddddd]], + [[aaabbbbbbb, cccddddddd], + [aaaabbbbbbbb, ccccdddddddd]]], dtype=object) + + >>> np.tensordot(a, A, (2, 1)) + array([[[abb, cdd], + [aaabbbb, cccdddd]], + [[aaaaabbbbbb, cccccdddddd], + [aaaaaaabbbbbbbb, cccccccdddddddd]]], dtype=object) + + >>> np.tensordot(a, A, ((0, 1), (0, 1))) + array([abbbcccccddddddd, aabbbbccccccdddddddd], dtype=object) + + >>> np.tensordot(a, A, ((2, 1), (1, 0))) + array([acccbbdddd, aaaaacccccccbbbbbbdddddddd], dtype=object) + + """ + try: + iter(axes) + except Exception: + axes_a = list(range(-axes, 0)) + axes_b = list(range(0, axes)) + else: + axes_a, axes_b = axes + try: + na = len(axes_a) + axes_a = list(axes_a) + except TypeError: + axes_a = [axes_a] + na = 1 + try: + nb = len(axes_b) + axes_b = list(axes_b) + except TypeError: + axes_b = [axes_b] + nb = 1 + + a, b = asarray(a), asarray(b) + as_ = a.shape + nda = a.ndim + bs = b.shape + ndb = b.ndim + equal = True + if na != nb: + equal = False + else: + for k in range(na): + if as_[axes_a[k]] != bs[axes_b[k]]: + equal = False + break + if axes_a[k] < 0: + axes_a[k] += nda + if axes_b[k] < 0: + axes_b[k] += ndb + if not equal: + raise ValueError("shape-mismatch for sum") + + # Move the axes to sum over to the end of "a" + # and to the front of "b" + notin = [k for k in range(nda) if k not in axes_a] + newaxes_a = notin + axes_a + N2 = 1 + for axis in axes_a: + N2 *= as_[axis] + newshape_a = (int(multiply.reduce([as_[ax] for ax in notin])), N2) + olda = [as_[axis] for axis in notin] + + notin = [k for k in range(ndb) if k not in axes_b] + newaxes_b = axes_b + notin + N2 = 1 + for axis in axes_b: + N2 *= bs[axis] + newshape_b = (N2, int(multiply.reduce([bs[ax] for ax in notin]))) + oldb = [bs[axis] for axis in notin] + + at = a.transpose(newaxes_a).reshape(newshape_a) + bt = b.transpose(newaxes_b).reshape(newshape_b) + res = dot(at, bt) + return res.reshape(olda + oldb) + + +def _roll_dispatcher(a, shift, axis=None): + return (a,) + + +@array_function_dispatch(_roll_dispatcher) +def roll(a, shift, axis=None): + """ + Roll array elements along a given axis. + + Elements that roll beyond the last position are re-introduced at + the first. + + Parameters + ---------- + a : array_like + Input array. + shift : int or tuple of ints + The number of places by which elements are shifted. If a tuple, + then `axis` must be a tuple of the same size, and each of the + given axes is shifted by the corresponding number. If an int + while `axis` is a tuple of ints, then the same value is used for + all given axes. + axis : int or tuple of ints, optional + Axis or axes along which elements are shifted. By default, the + array is flattened before shifting, after which the original + shape is restored. + + Returns + ------- + res : ndarray + Output array, with the same shape as `a`. + + See Also + -------- + rollaxis : Roll the specified axis backwards, until it lies in a + given position. + + Notes + ----- + .. versionadded:: 1.12.0 + + Supports rolling over multiple dimensions simultaneously. + + Examples + -------- + >>> x = np.arange(10) + >>> np.roll(x, 2) + array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7]) + + >>> x2 = np.reshape(x, (2,5)) + >>> x2 + array([[0, 1, 2, 3, 4], + [5, 6, 7, 8, 9]]) + >>> np.roll(x2, 1) + array([[9, 0, 1, 2, 3], + [4, 5, 6, 7, 8]]) + >>> np.roll(x2, 1, axis=0) + array([[5, 6, 7, 8, 9], + [0, 1, 2, 3, 4]]) + >>> np.roll(x2, 1, axis=1) + array([[4, 0, 1, 2, 3], + [9, 5, 6, 7, 8]]) + + """ + a = asanyarray(a) + if axis is None: + return roll(a.ravel(), shift, 0).reshape(a.shape) + + else: + axis = normalize_axis_tuple(axis, a.ndim, allow_duplicate=True) + broadcasted = broadcast(shift, axis) + if broadcasted.ndim > 1: + raise ValueError( + "'shift' and 'axis' should be scalars or 1D sequences") + shifts = {ax: 0 for ax in range(a.ndim)} + for sh, ax in broadcasted: + shifts[ax] += sh + + rolls = [((slice(None), slice(None)),)] * a.ndim + for ax, offset in shifts.items(): + offset %= a.shape[ax] or 1 # If `a` is empty, nothing matters. + if offset: + # (original, result), (original, result) + rolls[ax] = ((slice(None, -offset), slice(offset, None)), + (slice(-offset, None), slice(None, offset))) + + result = empty_like(a) + for indices in itertools.product(*rolls): + arr_index, res_index = zip(*indices) + result[res_index] = a[arr_index] + + return result + + +def _rollaxis_dispatcher(a, axis, start=None): + return (a,) + + +@array_function_dispatch(_rollaxis_dispatcher) +def rollaxis(a, axis, start=0): + """ + Roll the specified axis backwards, until it lies in a given position. + + This function continues to be supported for backward compatibility, but you + should prefer `moveaxis`. The `moveaxis` function was added in NumPy + 1.11. + + Parameters + ---------- + a : ndarray + Input array. + axis : int + The axis to roll backwards. The positions of the other axes do not + change relative to one another. + start : int, optional + The axis is rolled until it lies before this position. The default, + 0, results in a "complete" roll. + + Returns + ------- + res : ndarray + For NumPy >= 1.10.0 a view of `a` is always returned. For earlier + NumPy versions a view of `a` is returned only if the order of the + axes is changed, otherwise the input array is returned. + + See Also + -------- + moveaxis : Move array axes to new positions. + roll : Roll the elements of an array by a number of positions along a + given axis. + + Examples + -------- + >>> a = np.ones((3,4,5,6)) + >>> np.rollaxis(a, 3, 1).shape + (3, 6, 4, 5) + >>> np.rollaxis(a, 2).shape + (5, 3, 4, 6) + >>> np.rollaxis(a, 1, 4).shape + (3, 5, 6, 4) + + """ + n = a.ndim + axis = normalize_axis_index(axis, n) + if start < 0: + start += n + msg = "'%s' arg requires %d <= %s < %d, but %d was passed in" + if not (0 <= start < n + 1): + raise AxisError(msg % ('start', -n, 'start', n + 1, start)) + if axis < start: + # it's been removed + start -= 1 + if axis == start: + return a[...] + axes = list(range(0, n)) + axes.remove(axis) + axes.insert(start, axis) + return a.transpose(axes) + + +def normalize_axis_tuple(axis, ndim, argname=None, allow_duplicate=False): + """ + Normalizes an axis argument into a tuple of non-negative integer axes. + + This handles shorthands such as ``1`` and converts them to ``(1,)``, + as well as performing the handling of negative indices covered by + `normalize_axis_index`. + + By default, this forbids axes from being specified multiple times. + + Used internally by multi-axis-checking logic. + + .. versionadded:: 1.13.0 + + Parameters + ---------- + axis : int, iterable of int + The un-normalized index or indices of the axis. + ndim : int + The number of dimensions of the array that `axis` should be normalized + against. + argname : str, optional + A prefix to put before the error message, typically the name of the + argument. + allow_duplicate : bool, optional + If False, the default, disallow an axis from being specified twice. + + Returns + ------- + normalized_axes : tuple of int + The normalized axis index, such that `0 <= normalized_axis < ndim` + + Raises + ------ + AxisError + If any axis provided is out of range + ValueError + If an axis is repeated + + See also + -------- + normalize_axis_index : normalizing a single scalar axis + """ + # Optimization to speed-up the most common cases. + if type(axis) not in (tuple, list): + try: + axis = [operator.index(axis)] + except TypeError: + pass + # Going via an iterator directly is slower than via list comprehension. + axis = tuple([normalize_axis_index(ax, ndim, argname) for ax in axis]) + if not allow_duplicate and len(set(axis)) != len(axis): + if argname: + raise ValueError('repeated axis in `{}` argument'.format(argname)) + else: + raise ValueError('repeated axis') + return axis + + +def _moveaxis_dispatcher(a, source, destination): + return (a,) + + +@array_function_dispatch(_moveaxis_dispatcher) +def moveaxis(a, source, destination): + """ + Move axes of an array to new positions. + + Other axes remain in their original order. + + .. versionadded:: 1.11.0 + + Parameters + ---------- + a : np.ndarray + The array whose axes should be reordered. + source : int or sequence of int + Original positions of the axes to move. These must be unique. + destination : int or sequence of int + Destination positions for each of the original axes. These must also be + unique. + + Returns + ------- + result : np.ndarray + Array with moved axes. This array is a view of the input array. + + See Also + -------- + transpose: Permute the dimensions of an array. + swapaxes: Interchange two axes of an array. + + Examples + -------- + + >>> x = np.zeros((3, 4, 5)) + >>> np.moveaxis(x, 0, -1).shape + (4, 5, 3) + >>> np.moveaxis(x, -1, 0).shape + (5, 3, 4) + + These all achieve the same result: + + >>> np.transpose(x).shape + (5, 4, 3) + >>> np.swapaxes(x, 0, -1).shape + (5, 4, 3) + >>> np.moveaxis(x, [0, 1], [-1, -2]).shape + (5, 4, 3) + >>> np.moveaxis(x, [0, 1, 2], [-1, -2, -3]).shape + (5, 4, 3) + + """ + try: + # allow duck-array types if they define transpose + transpose = a.transpose + except AttributeError: + a = asarray(a) + transpose = a.transpose + + source = normalize_axis_tuple(source, a.ndim, 'source') + destination = normalize_axis_tuple(destination, a.ndim, 'destination') + if len(source) != len(destination): + raise ValueError('`source` and `destination` arguments must have ' + 'the same number of elements') + + order = [n for n in range(a.ndim) if n not in source] + + for dest, src in sorted(zip(destination, source)): + order.insert(dest, src) + + result = transpose(order) + return result + + +# fix hack in scipy which imports this function +def _move_axis_to_0(a, axis): + return moveaxis(a, axis, 0) + + +def _cross_dispatcher(a, b, axisa=None, axisb=None, axisc=None, axis=None): + return (a, b) + + +@array_function_dispatch(_cross_dispatcher) +def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): + """ + Return the cross product of two (arrays of) vectors. + + The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular + to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors + are defined by the last axis of `a` and `b` by default, and these axes + can have dimensions 2 or 3. Where the dimension of either `a` or `b` is + 2, the third component of the input vector is assumed to be zero and the + cross product calculated accordingly. In cases where both input vectors + have dimension 2, the z-component of the cross product is returned. + + Parameters + ---------- + a : array_like + Components of the first vector(s). + b : array_like + Components of the second vector(s). + axisa : int, optional + Axis of `a` that defines the vector(s). By default, the last axis. + axisb : int, optional + Axis of `b` that defines the vector(s). By default, the last axis. + axisc : int, optional + Axis of `c` containing the cross product vector(s). Ignored if + both input vectors have dimension 2, as the return is scalar. + By default, the last axis. + axis : int, optional + If defined, the axis of `a`, `b` and `c` that defines the vector(s) + and cross product(s). Overrides `axisa`, `axisb` and `axisc`. + + Returns + ------- + c : ndarray + Vector cross product(s). + + Raises + ------ + ValueError + When the dimension of the vector(s) in `a` and/or `b` does not + equal 2 or 3. + + See Also + -------- + inner : Inner product + outer : Outer product. + ix_ : Construct index arrays. + + Notes + ----- + .. versionadded:: 1.9.0 + + Supports full broadcasting of the inputs. + + Examples + -------- + Vector cross-product. + + >>> x = [1, 2, 3] + >>> y = [4, 5, 6] + >>> np.cross(x, y) + array([-3, 6, -3]) + + One vector with dimension 2. + + >>> x = [1, 2] + >>> y = [4, 5, 6] + >>> np.cross(x, y) + array([12, -6, -3]) + + Equivalently: + + >>> x = [1, 2, 0] + >>> y = [4, 5, 6] + >>> np.cross(x, y) + array([12, -6, -3]) + + Both vectors with dimension 2. + + >>> x = [1,2] + >>> y = [4,5] + >>> np.cross(x, y) + -3 + + Multiple vector cross-products. Note that the direction of the cross + product vector is defined by the `right-hand rule`. + + >>> x = np.array([[1,2,3], [4,5,6]]) + >>> y = np.array([[4,5,6], [1,2,3]]) + >>> np.cross(x, y) + array([[-3, 6, -3], + [ 3, -6, 3]]) + + The orientation of `c` can be changed using the `axisc` keyword. + + >>> np.cross(x, y, axisc=0) + array([[-3, 3], + [ 6, -6], + [-3, 3]]) + + Change the vector definition of `x` and `y` using `axisa` and `axisb`. + + >>> x = np.array([[1,2,3], [4,5,6], [7, 8, 9]]) + >>> y = np.array([[7, 8, 9], [4,5,6], [1,2,3]]) + >>> np.cross(x, y) + array([[ -6, 12, -6], + [ 0, 0, 0], + [ 6, -12, 6]]) + >>> np.cross(x, y, axisa=0, axisb=0) + array([[-24, 48, -24], + [-30, 60, -30], + [-36, 72, -36]]) + + """ + if axis is not None: + axisa, axisb, axisc = (axis,) * 3 + a = asarray(a) + b = asarray(b) + # Check axisa and axisb are within bounds + axisa = normalize_axis_index(axisa, a.ndim, msg_prefix='axisa') + axisb = normalize_axis_index(axisb, b.ndim, msg_prefix='axisb') + + # Move working axis to the end of the shape + a = moveaxis(a, axisa, -1) + b = moveaxis(b, axisb, -1) + msg = ("incompatible dimensions for cross product\n" + "(dimension must be 2 or 3)") + if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3): + raise ValueError(msg) + + # Create the output array + shape = broadcast(a[..., 0], b[..., 0]).shape + if a.shape[-1] == 3 or b.shape[-1] == 3: + shape += (3,) + # Check axisc is within bounds + axisc = normalize_axis_index(axisc, len(shape), msg_prefix='axisc') + dtype = promote_types(a.dtype, b.dtype) + cp = empty(shape, dtype) + + # create local aliases for readability + a0 = a[..., 0] + a1 = a[..., 1] + if a.shape[-1] == 3: + a2 = a[..., 2] + b0 = b[..., 0] + b1 = b[..., 1] + if b.shape[-1] == 3: + b2 = b[..., 2] + if cp.ndim != 0 and cp.shape[-1] == 3: + cp0 = cp[..., 0] + cp1 = cp[..., 1] + cp2 = cp[..., 2] + + if a.shape[-1] == 2: + if b.shape[-1] == 2: + # a0 * b1 - a1 * b0 + multiply(a0, b1, out=cp) + cp -= a1 * b0 + return cp + else: + assert b.shape[-1] == 3 + # cp0 = a1 * b2 - 0 (a2 = 0) + # cp1 = 0 - a0 * b2 (a2 = 0) + # cp2 = a0 * b1 - a1 * b0 + multiply(a1, b2, out=cp0) + multiply(a0, b2, out=cp1) + negative(cp1, out=cp1) + multiply(a0, b1, out=cp2) + cp2 -= a1 * b0 + else: + assert a.shape[-1] == 3 + if b.shape[-1] == 3: + # cp0 = a1 * b2 - a2 * b1 + # cp1 = a2 * b0 - a0 * b2 + # cp2 = a0 * b1 - a1 * b0 + multiply(a1, b2, out=cp0) + tmp = array(a2 * b1) + cp0 -= tmp + multiply(a2, b0, out=cp1) + multiply(a0, b2, out=tmp) + cp1 -= tmp + multiply(a0, b1, out=cp2) + multiply(a1, b0, out=tmp) + cp2 -= tmp + else: + assert b.shape[-1] == 2 + # cp0 = 0 - a2 * b1 (b2 = 0) + # cp1 = a2 * b0 - 0 (b2 = 0) + # cp2 = a0 * b1 - a1 * b0 + multiply(a2, b1, out=cp0) + negative(cp0, out=cp0) + multiply(a2, b0, out=cp1) + multiply(a0, b1, out=cp2) + cp2 -= a1 * b0 + + return moveaxis(cp, -1, axisc) + + +little_endian = (sys.byteorder == 'little') + + +@set_module('numpy') +def indices(dimensions, dtype=int): + """ + Return an array representing the indices of a grid. + + Compute an array where the subarrays contain index values 0,1,... + varying only along the corresponding axis. + + Parameters + ---------- + dimensions : sequence of ints + The shape of the grid. + dtype : dtype, optional + Data type of the result. + + Returns + ------- + grid : ndarray + The array of grid indices, + ``grid.shape = (len(dimensions),) + tuple(dimensions)``. + + See Also + -------- + mgrid, meshgrid + + Notes + ----- + The output shape is obtained by prepending the number of dimensions + in front of the tuple of dimensions, i.e. if `dimensions` is a tuple + ``(r0, ..., rN-1)`` of length ``N``, the output shape is + ``(N,r0,...,rN-1)``. + + The subarrays ``grid[k]`` contains the N-D array of indices along the + ``k-th`` axis. Explicitly:: + + grid[k,i0,i1,...,iN-1] = ik + + Examples + -------- + >>> grid = np.indices((2, 3)) + >>> grid.shape + (2, 2, 3) + >>> grid[0] # row indices + array([[0, 0, 0], + [1, 1, 1]]) + >>> grid[1] # column indices + array([[0, 1, 2], + [0, 1, 2]]) + + The indices can be used as an index into an array. + + >>> x = np.arange(20).reshape(5, 4) + >>> row, col = np.indices((2, 3)) + >>> x[row, col] + array([[0, 1, 2], + [4, 5, 6]]) + + Note that it would be more straightforward in the above example to + extract the required elements directly with ``x[:2, :3]``. + + """ + dimensions = tuple(dimensions) + N = len(dimensions) + shape = (1,)*N + res = empty((N,)+dimensions, dtype=dtype) + for i, dim in enumerate(dimensions): + res[i] = arange(dim, dtype=dtype).reshape( + shape[:i] + (dim,) + shape[i+1:] + ) + return res + + +@set_module('numpy') +def fromfunction(function, shape, **kwargs): + """ + Construct an array by executing a function over each coordinate. + + The resulting array therefore has a value ``fn(x, y, z)`` at + coordinate ``(x, y, z)``. + + Parameters + ---------- + function : callable + The function is called with N parameters, where N is the rank of + `shape`. Each parameter represents the coordinates of the array + varying along a specific axis. For example, if `shape` + were ``(2, 2)``, then the parameters would be + ``array([[0, 0], [1, 1]])`` and ``array([[0, 1], [0, 1]])`` + shape : (N,) tuple of ints + Shape of the output array, which also determines the shape of + the coordinate arrays passed to `function`. + dtype : data-type, optional + Data-type of the coordinate arrays passed to `function`. + By default, `dtype` is float. + + Returns + ------- + fromfunction : any + The result of the call to `function` is passed back directly. + Therefore the shape of `fromfunction` is completely determined by + `function`. If `function` returns a scalar value, the shape of + `fromfunction` would not match the `shape` parameter. + + See Also + -------- + indices, meshgrid + + Notes + ----- + Keywords other than `dtype` are passed to `function`. + + Examples + -------- + >>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int) + array([[ True, False, False], + [False, True, False], + [False, False, True]]) + + >>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int) + array([[0, 1, 2], + [1, 2, 3], + [2, 3, 4]]) + + """ + dtype = kwargs.pop('dtype', float) + args = indices(shape, dtype=dtype) + return function(*args, **kwargs) + + +def _frombuffer(buf, dtype, shape, order): + return frombuffer(buf, dtype=dtype).reshape(shape, order=order) + + +@set_module('numpy') +def isscalar(num): + """ + Returns True if the type of `num` is a scalar type. + + Parameters + ---------- + num : any + Input argument, can be of any type and shape. + + Returns + ------- + val : bool + True if `num` is a scalar type, False if it is not. + + See Also + -------- + ndim : Get the number of dimensions of an array + + Notes + ----- + In almost all cases ``np.ndim(x) == 0`` should be used instead of this + function, as that will also return true for 0d arrays. This is how + numpy overloads functions in the style of the ``dx`` arguments to `gradient` + and the ``bins`` argument to `histogram`. Some key differences: + + +--------------------------------------+---------------+-------------------+ + | x |``isscalar(x)``|``np.ndim(x) == 0``| + +======================================+===============+===================+ + | PEP 3141 numeric objects (including | ``True`` | ``True`` | + | builtins) | | | + +--------------------------------------+---------------+-------------------+ + | builtin string and buffer objects | ``True`` | ``True`` | + +--------------------------------------+---------------+-------------------+ + | other builtin objects, like | ``False`` | ``True`` | + | `pathlib.Path`, `Exception`, | | | + | the result of `re.compile` | | | + +--------------------------------------+---------------+-------------------+ + | third-party objects like | ``False`` | ``True`` | + | `matplotlib.figure.Figure` | | | + +--------------------------------------+---------------+-------------------+ + | zero-dimensional numpy arrays | ``False`` | ``True`` | + +--------------------------------------+---------------+-------------------+ + | other numpy arrays | ``False`` | ``False`` | + +--------------------------------------+---------------+-------------------+ + | `list`, `tuple`, and other sequence | ``False`` | ``False`` | + | objects | | | + +--------------------------------------+---------------+-------------------+ + + Examples + -------- + >>> np.isscalar(3.1) + True + >>> np.isscalar(np.array(3.1)) + False + >>> np.isscalar([3.1]) + False + >>> np.isscalar(False) + True + >>> np.isscalar('numpy') + True + + NumPy supports PEP 3141 numbers: + + >>> from fractions import Fraction + >>> isscalar(Fraction(5, 17)) + True + >>> from numbers import Number + >>> isscalar(Number()) + True + + """ + return (isinstance(num, generic) + or type(num) in ScalarType + or isinstance(num, numbers.Number)) + + +@set_module('numpy') +def binary_repr(num, width=None): + """ + Return the binary representation of the input number as a string. + + For negative numbers, if width is not given, a minus sign is added to the + front. If width is given, the two's complement of the number is + returned, with respect to that width. + + In a two's-complement system negative numbers are represented by the two's + complement of the absolute value. This is the most common method of + representing signed integers on computers [1]_. A N-bit two's-complement + system can represent every integer in the range + :math:`-2^{N-1}` to :math:`+2^{N-1}-1`. + + Parameters + ---------- + num : int + Only an integer decimal number can be used. + width : int, optional + The length of the returned string if `num` is positive, or the length + of the two's complement if `num` is negative, provided that `width` is + at least a sufficient number of bits for `num` to be represented in the + designated form. + + If the `width` value is insufficient, it will be ignored, and `num` will + be returned in binary (`num` > 0) or two's complement (`num` < 0) form + with its width equal to the minimum number of bits needed to represent + the number in the designated form. This behavior is deprecated and will + later raise an error. + + .. deprecated:: 1.12.0 + + Returns + ------- + bin : str + Binary representation of `num` or two's complement of `num`. + + See Also + -------- + base_repr: Return a string representation of a number in the given base + system. + bin: Python's built-in binary representation generator of an integer. + + Notes + ----- + `binary_repr` is equivalent to using `base_repr` with base 2, but about 25x + faster. + + References + ---------- + .. [1] Wikipedia, "Two's complement", + https://en.wikipedia.org/wiki/Two's_complement + + Examples + -------- + >>> np.binary_repr(3) + '11' + >>> np.binary_repr(-3) + '-11' + >>> np.binary_repr(3, width=4) + '0011' + + The two's complement is returned when the input number is negative and + width is specified: + + >>> np.binary_repr(-3, width=3) + '101' + >>> np.binary_repr(-3, width=5) + '11101' + + """ + def warn_if_insufficient(width, binwidth): + if width is not None and width < binwidth: + warnings.warn( + "Insufficient bit width provided. This behavior " + "will raise an error in the future.", DeprecationWarning, + stacklevel=3) + + if num == 0: + return '0' * (width or 1) + + elif num > 0: + binary = bin(num)[2:] + binwidth = len(binary) + outwidth = (binwidth if width is None + else max(binwidth, width)) + warn_if_insufficient(width, binwidth) + return binary.zfill(outwidth) + + else: + if width is None: + return '-' + bin(-num)[2:] + + else: + poswidth = len(bin(-num)[2:]) + + # See gh-8679: remove extra digit + # for numbers at boundaries. + if 2**(poswidth - 1) == -num: + poswidth -= 1 + + twocomp = 2**(poswidth + 1) + num + binary = bin(twocomp)[2:] + binwidth = len(binary) + + outwidth = max(binwidth, width) + warn_if_insufficient(width, binwidth) + return '1' * (outwidth - binwidth) + binary + + +@set_module('numpy') +def base_repr(number, base=2, padding=0): + """ + Return a string representation of a number in the given base system. + + Parameters + ---------- + number : int + The value to convert. Positive and negative values are handled. + base : int, optional + Convert `number` to the `base` number system. The valid range is 2-36, + the default value is 2. + padding : int, optional + Number of zeros padded on the left. Default is 0 (no padding). + + Returns + ------- + out : str + String representation of `number` in `base` system. + + See Also + -------- + binary_repr : Faster version of `base_repr` for base 2. + + Examples + -------- + >>> np.base_repr(5) + '101' + >>> np.base_repr(6, 5) + '11' + >>> np.base_repr(7, base=5, padding=3) + '00012' + + >>> np.base_repr(10, base=16) + 'A' + >>> np.base_repr(32, base=16) + '20' + + """ + digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ' + if base > len(digits): + raise ValueError("Bases greater than 36 not handled in base_repr.") + elif base < 2: + raise ValueError("Bases less than 2 not handled in base_repr.") + + num = abs(number) + res = [] + while num: + res.append(digits[num % base]) + num //= base + if padding: + res.append('0' * padding) + if number < 0: + res.append('-') + return ''.join(reversed(res or '0')) + + +def load(file): + """ + Wrapper around cPickle.load which accepts either a file-like object or + a filename. + + Note that the NumPy binary format is not based on pickle/cPickle anymore. + For details on the preferred way of loading and saving files, see `load` + and `save`. + + See Also + -------- + load, save + + """ + # NumPy 1.15.0, 2017-12-10 + warnings.warn( + "np.core.numeric.load is deprecated, use pickle.load instead", + DeprecationWarning, stacklevel=2) + if isinstance(file, type("")): + file = open(file, "rb") + return pickle.load(file) + + +# These are all essentially abbreviations +# These might wind up in a special abbreviations module + + +def _maketup(descr, val): + dt = dtype(descr) + # Place val in all scalar tuples: + fields = dt.fields + if fields is None: + return val + else: + res = [_maketup(fields[name][0], val) for name in dt.names] + return tuple(res) + + +@set_module('numpy') +def identity(n, dtype=None): + """ + Return the identity array. + + The identity array is a square array with ones on + the main diagonal. + + Parameters + ---------- + n : int + Number of rows (and columns) in `n` x `n` output. + dtype : data-type, optional + Data-type of the output. Defaults to ``float``. + + Returns + ------- + out : ndarray + `n` x `n` array with its main diagonal set to one, + and all other elements 0. + + Examples + -------- + >>> np.identity(3) + array([[ 1., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 1.]]) + + """ + from numpy import eye + return eye(n, dtype=dtype) + + +def _allclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None): + return (a, b) + + +@array_function_dispatch(_allclose_dispatcher) +def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): + """ + Returns True if two arrays are element-wise equal within a tolerance. + + The tolerance values are positive, typically very small numbers. The + relative difference (`rtol` * abs(`b`)) and the absolute difference + `atol` are added together to compare against the absolute difference + between `a` and `b`. + + If either array contains one or more NaNs, False is returned. + Infs are treated as equal if they are in the same place and of the same + sign in both arrays. + + Parameters + ---------- + a, b : array_like + Input arrays to compare. + rtol : float + The relative tolerance parameter (see Notes). + atol : float + The absolute tolerance parameter (see Notes). + equal_nan : bool + Whether to compare NaN's as equal. If True, NaN's in `a` will be + considered equal to NaN's in `b` in the output array. + + .. versionadded:: 1.10.0 + + Returns + ------- + allclose : bool + Returns True if the two arrays are equal within the given + tolerance; False otherwise. + + See Also + -------- + isclose, all, any, equal + + Notes + ----- + If the following equation is element-wise True, then allclose returns + True. + + absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) + + The above equation is not symmetric in `a` and `b`, so that + ``allclose(a, b)`` might be different from ``allclose(b, a)`` in + some rare cases. + + The comparison of `a` and `b` uses standard broadcasting, which + means that `a` and `b` need not have the same shape in order for + ``allclose(a, b)`` to evaluate to True. The same is true for + `equal` but not `array_equal`. + + Examples + -------- + >>> np.allclose([1e10,1e-7], [1.00001e10,1e-8]) + False + >>> np.allclose([1e10,1e-8], [1.00001e10,1e-9]) + True + >>> np.allclose([1e10,1e-8], [1.0001e10,1e-9]) + False + >>> np.allclose([1.0, np.nan], [1.0, np.nan]) + False + >>> np.allclose([1.0, np.nan], [1.0, np.nan], equal_nan=True) + True + + """ + res = all(isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)) + return bool(res) + + +def _isclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None): + return (a, b) + + +@array_function_dispatch(_isclose_dispatcher) +def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): + """ + Returns a boolean array where two arrays are element-wise equal within a + tolerance. + + The tolerance values are positive, typically very small numbers. The + relative difference (`rtol` * abs(`b`)) and the absolute difference + `atol` are added together to compare against the absolute difference + between `a` and `b`. + + .. warning:: The default `atol` is not appropriate for comparing numbers + that are much smaller than one (see Notes). + + Parameters + ---------- + a, b : array_like + Input arrays to compare. + rtol : float + The relative tolerance parameter (see Notes). + atol : float + The absolute tolerance parameter (see Notes). + equal_nan : bool + Whether to compare NaN's as equal. If True, NaN's in `a` will be + considered equal to NaN's in `b` in the output array. + + Returns + ------- + y : array_like + Returns a boolean array of where `a` and `b` are equal within the + given tolerance. If both `a` and `b` are scalars, returns a single + boolean value. + + See Also + -------- + allclose + + Notes + ----- + .. versionadded:: 1.7.0 + + For finite values, isclose uses the following equation to test whether + two floating point values are equivalent. + + absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) + + Unlike the built-in `math.isclose`, the above equation is not symmetric + in `a` and `b` -- it assumes `b` is the reference value -- so that + `isclose(a, b)` might be different from `isclose(b, a)`. Furthermore, + the default value of atol is not zero, and is used to determine what + small values should be considered close to zero. The default value is + appropriate for expected values of order unity: if the expected values + are significantly smaller than one, it can result in false positives. + `atol` should be carefully selected for the use case at hand. A zero value + for `atol` will result in `False` if either `a` or `b` is zero. + + Examples + -------- + >>> np.isclose([1e10,1e-7], [1.00001e10,1e-8]) + array([True, False]) + >>> np.isclose([1e10,1e-8], [1.00001e10,1e-9]) + array([True, True]) + >>> np.isclose([1e10,1e-8], [1.0001e10,1e-9]) + array([False, True]) + >>> np.isclose([1.0, np.nan], [1.0, np.nan]) + array([True, False]) + >>> np.isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True) + array([True, True]) + >>> np.isclose([1e-8, 1e-7], [0.0, 0.0]) + array([ True, False], dtype=bool) + >>> np.isclose([1e-100, 1e-7], [0.0, 0.0], atol=0.0) + array([False, False], dtype=bool) + >>> np.isclose([1e-10, 1e-10], [1e-20, 0.0]) + array([ True, True], dtype=bool) + >>> np.isclose([1e-10, 1e-10], [1e-20, 0.999999e-10], atol=0.0) + array([False, True], dtype=bool) + """ + def within_tol(x, y, atol, rtol): + with errstate(invalid='ignore'): + return less_equal(abs(x-y), atol + rtol * abs(y)) + + x = asanyarray(a) + y = asanyarray(b) + + # Make sure y is an inexact type to avoid bad behavior on abs(MIN_INT). + # This will cause casting of x later. Also, make sure to allow subclasses + # (e.g., for numpy.ma). + dt = multiarray.result_type(y, 1.) + y = array(y, dtype=dt, copy=False, subok=True) + + xfin = isfinite(x) + yfin = isfinite(y) + if all(xfin) and all(yfin): + return within_tol(x, y, atol, rtol) + else: + finite = xfin & yfin + cond = zeros_like(finite, subok=True) + # Because we're using boolean indexing, x & y must be the same shape. + # Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in + # lib.stride_tricks, though, so we can't import it here. + x = x * ones_like(cond) + y = y * ones_like(cond) + # Avoid subtraction with infinite/nan values... + cond[finite] = within_tol(x[finite], y[finite], atol, rtol) + # Check for equality of infinite values... + cond[~finite] = (x[~finite] == y[~finite]) + if equal_nan: + # Make NaN == NaN + both_nan = isnan(x) & isnan(y) + + # Needed to treat masked arrays correctly. = True would not work. + cond[both_nan] = both_nan[both_nan] + + return cond[()] # Flatten 0d arrays to scalars + + +def _array_equal_dispatcher(a1, a2): + return (a1, a2) + + +@array_function_dispatch(_array_equal_dispatcher) +def array_equal(a1, a2): + """ + True if two arrays have the same shape and elements, False otherwise. + + Parameters + ---------- + a1, a2 : array_like + Input arrays. + + Returns + ------- + b : bool + Returns True if the arrays are equal. + + See Also + -------- + allclose: Returns True if two arrays are element-wise equal within a + tolerance. + array_equiv: Returns True if input arrays are shape consistent and all + elements equal. + + Examples + -------- + >>> np.array_equal([1, 2], [1, 2]) + True + >>> np.array_equal(np.array([1, 2]), np.array([1, 2])) + True + >>> np.array_equal([1, 2], [1, 2, 3]) + False + >>> np.array_equal([1, 2], [1, 4]) + False + + """ + try: + a1, a2 = asarray(a1), asarray(a2) + except Exception: + return False + if a1.shape != a2.shape: + return False + return bool(asarray(a1 == a2).all()) + + +def _array_equiv_dispatcher(a1, a2): + return (a1, a2) + + +@array_function_dispatch(_array_equiv_dispatcher) +def array_equiv(a1, a2): + """ + Returns True if input arrays are shape consistent and all elements equal. + + Shape consistent means they are either the same shape, or one input array + can be broadcasted to create the same shape as the other one. + + Parameters + ---------- + a1, a2 : array_like + Input arrays. + + Returns + ------- + out : bool + True if equivalent, False otherwise. + + Examples + -------- + >>> np.array_equiv([1, 2], [1, 2]) + True + >>> np.array_equiv([1, 2], [1, 3]) + False + + Showing the shape equivalence: + + >>> np.array_equiv([1, 2], [[1, 2], [1, 2]]) + True + >>> np.array_equiv([1, 2], [[1, 2, 1, 2], [1, 2, 1, 2]]) + False + + >>> np.array_equiv([1, 2], [[1, 2], [1, 3]]) + False + + """ + try: + a1, a2 = asarray(a1), asarray(a2) + except Exception: + return False + try: + multiarray.broadcast(a1, a2) + except Exception: + return False + + return bool(asarray(a1 == a2).all()) + + +_errdict = {"ignore": ERR_IGNORE, + "warn": ERR_WARN, + "raise": ERR_RAISE, + "call": ERR_CALL, + "print": ERR_PRINT, + "log": ERR_LOG} + +_errdict_rev = {value: key for key, value in _errdict.items()} + + +@set_module('numpy') +def seterr(all=None, divide=None, over=None, under=None, invalid=None): + """ + Set how floating-point errors are handled. + + Note that operations on integer scalar types (such as `int16`) are + handled like floating point, and are affected by these settings. + + Parameters + ---------- + all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional + Set treatment for all types of floating-point errors at once: + + - ignore: Take no action when the exception occurs. + - warn: Print a `RuntimeWarning` (via the Python `warnings` module). + - raise: Raise a `FloatingPointError`. + - call: Call a function specified using the `seterrcall` function. + - print: Print a warning directly to ``stdout``. + - log: Record error in a Log object specified by `seterrcall`. + + The default is not to change the current behavior. + divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional + Treatment for division by zero. + over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional + Treatment for floating-point overflow. + under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional + Treatment for floating-point underflow. + invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional + Treatment for invalid floating-point operation. + + Returns + ------- + old_settings : dict + Dictionary containing the old settings. + + See also + -------- + seterrcall : Set a callback function for the 'call' mode. + geterr, geterrcall, errstate + + Notes + ----- + The floating-point exceptions are defined in the IEEE 754 standard [1]_: + + - Division by zero: infinite result obtained from finite numbers. + - Overflow: result too large to be expressed. + - Underflow: result so close to zero that some precision + was lost. + - Invalid operation: result is not an expressible number, typically + indicates that a NaN was produced. + + .. [1] https://en.wikipedia.org/wiki/IEEE_754 + + Examples + -------- + >>> old_settings = np.seterr(all='ignore') #seterr to known value + >>> np.seterr(over='raise') + {'over': 'ignore', 'divide': 'ignore', 'invalid': 'ignore', + 'under': 'ignore'} + >>> np.seterr(**old_settings) # reset to default + {'over': 'raise', 'divide': 'ignore', 'invalid': 'ignore', + 'under': 'ignore'} + + >>> np.int16(32000) * np.int16(3) + 30464 + >>> old_settings = np.seterr(all='warn', over='raise') + >>> np.int16(32000) * np.int16(3) + Traceback (most recent call last): + File "", line 1, in + FloatingPointError: overflow encountered in short_scalars + + >>> old_settings = np.seterr(all='print') + >>> np.geterr() + {'over': 'print', 'divide': 'print', 'invalid': 'print', 'under': 'print'} + >>> np.int16(32000) * np.int16(3) + Warning: overflow encountered in short_scalars + 30464 + + """ + + pyvals = umath.geterrobj() + old = geterr() + + if divide is None: + divide = all or old['divide'] + if over is None: + over = all or old['over'] + if under is None: + under = all or old['under'] + if invalid is None: + invalid = all or old['invalid'] + + maskvalue = ((_errdict[divide] << SHIFT_DIVIDEBYZERO) + + (_errdict[over] << SHIFT_OVERFLOW) + + (_errdict[under] << SHIFT_UNDERFLOW) + + (_errdict[invalid] << SHIFT_INVALID)) + + pyvals[1] = maskvalue + umath.seterrobj(pyvals) + return old + + +@set_module('numpy') +def geterr(): + """ + Get the current way of handling floating-point errors. + + Returns + ------- + res : dict + A dictionary with keys "divide", "over", "under", and "invalid", + whose values are from the strings "ignore", "print", "log", "warn", + "raise", and "call". The keys represent possible floating-point + exceptions, and the values define how these exceptions are handled. + + See Also + -------- + geterrcall, seterr, seterrcall + + Notes + ----- + For complete documentation of the types of floating-point exceptions and + treatment options, see `seterr`. + + Examples + -------- + >>> np.geterr() + {'over': 'warn', 'divide': 'warn', 'invalid': 'warn', + 'under': 'ignore'} + >>> np.arange(3.) / np.arange(3.) + array([ NaN, 1., 1.]) + + >>> oldsettings = np.seterr(all='warn', over='raise') + >>> np.geterr() + {'over': 'raise', 'divide': 'warn', 'invalid': 'warn', 'under': 'warn'} + >>> np.arange(3.) / np.arange(3.) + __main__:1: RuntimeWarning: invalid value encountered in divide + array([ NaN, 1., 1.]) + + """ + maskvalue = umath.geterrobj()[1] + mask = 7 + res = {} + val = (maskvalue >> SHIFT_DIVIDEBYZERO) & mask + res['divide'] = _errdict_rev[val] + val = (maskvalue >> SHIFT_OVERFLOW) & mask + res['over'] = _errdict_rev[val] + val = (maskvalue >> SHIFT_UNDERFLOW) & mask + res['under'] = _errdict_rev[val] + val = (maskvalue >> SHIFT_INVALID) & mask + res['invalid'] = _errdict_rev[val] + return res + + +@set_module('numpy') +def setbufsize(size): + """ + Set the size of the buffer used in ufuncs. + + Parameters + ---------- + size : int + Size of buffer. + + """ + if size > 10e6: + raise ValueError("Buffer size, %s, is too big." % size) + if size < 5: + raise ValueError("Buffer size, %s, is too small." % size) + if size % 16 != 0: + raise ValueError("Buffer size, %s, is not a multiple of 16." % size) + + pyvals = umath.geterrobj() + old = getbufsize() + pyvals[0] = size + umath.seterrobj(pyvals) + return old + + +@set_module('numpy') +def getbufsize(): + """ + Return the size of the buffer used in ufuncs. + + Returns + ------- + getbufsize : int + Size of ufunc buffer in bytes. + + """ + return umath.geterrobj()[0] + + +@set_module('numpy') +def seterrcall(func): + """ + Set the floating-point error callback function or log object. + + There are two ways to capture floating-point error messages. The first + is to set the error-handler to 'call', using `seterr`. Then, set + the function to call using this function. + + The second is to set the error-handler to 'log', using `seterr`. + Floating-point errors then trigger a call to the 'write' method of + the provided object. + + Parameters + ---------- + func : callable f(err, flag) or object with write method + Function to call upon floating-point errors ('call'-mode) or + object whose 'write' method is used to log such message ('log'-mode). + + The call function takes two arguments. The first is a string describing + the type of error (such as "divide by zero", "overflow", "underflow", + or "invalid value"), and the second is the status flag. The flag is a + byte, whose four least-significant bits indicate the type of error, one + of "divide", "over", "under", "invalid":: + + [0 0 0 0 divide over under invalid] + + In other words, ``flags = divide + 2*over + 4*under + 8*invalid``. + + If an object is provided, its write method should take one argument, + a string. + + Returns + ------- + h : callable, log instance or None + The old error handler. + + See Also + -------- + seterr, geterr, geterrcall + + Examples + -------- + Callback upon error: + + >>> def err_handler(type, flag): + ... print("Floating point error (%s), with flag %s" % (type, flag)) + ... + + >>> saved_handler = np.seterrcall(err_handler) + >>> save_err = np.seterr(all='call') + + >>> np.array([1, 2, 3]) / 0.0 + Floating point error (divide by zero), with flag 1 + array([ Inf, Inf, Inf]) + + >>> np.seterrcall(saved_handler) + + >>> np.seterr(**save_err) + {'over': 'call', 'divide': 'call', 'invalid': 'call', 'under': 'call'} + + Log error message: + + >>> class Log(object): + ... def write(self, msg): + ... print("LOG: %s" % msg) + ... + + >>> log = Log() + >>> saved_handler = np.seterrcall(log) + >>> save_err = np.seterr(all='log') + + >>> np.array([1, 2, 3]) / 0.0 + LOG: Warning: divide by zero encountered in divide + + array([ Inf, Inf, Inf]) + + >>> np.seterrcall(saved_handler) + <__main__.Log object at 0x...> + >>> np.seterr(**save_err) + {'over': 'log', 'divide': 'log', 'invalid': 'log', 'under': 'log'} + + """ + if func is not None and not isinstance(func, collections_abc.Callable): + if not hasattr(func, 'write') or not isinstance(func.write, collections_abc.Callable): + raise ValueError("Only callable can be used as callback") + pyvals = umath.geterrobj() + old = geterrcall() + pyvals[2] = func + umath.seterrobj(pyvals) + return old + + +@set_module('numpy') +def geterrcall(): + """ + Return the current callback function used on floating-point errors. + + When the error handling for a floating-point error (one of "divide", + "over", "under", or "invalid") is set to 'call' or 'log', the function + that is called or the log instance that is written to is returned by + `geterrcall`. This function or log instance has been set with + `seterrcall`. + + Returns + ------- + errobj : callable, log instance or None + The current error handler. If no handler was set through `seterrcall`, + ``None`` is returned. + + See Also + -------- + seterrcall, seterr, geterr + + Notes + ----- + For complete documentation of the types of floating-point exceptions and + treatment options, see `seterr`. + + Examples + -------- + >>> np.geterrcall() # we did not yet set a handler, returns None + + >>> oldsettings = np.seterr(all='call') + >>> def err_handler(type, flag): + ... print("Floating point error (%s), with flag %s" % (type, flag)) + >>> oldhandler = np.seterrcall(err_handler) + >>> np.array([1, 2, 3]) / 0.0 + Floating point error (divide by zero), with flag 1 + array([ Inf, Inf, Inf]) + + >>> cur_handler = np.geterrcall() + >>> cur_handler is err_handler + True + + """ + return umath.geterrobj()[2] + + +class _unspecified(object): + pass + + +_Unspecified = _unspecified() + + +@set_module('numpy') +class errstate(object): + """ + errstate(**kwargs) + + Context manager for floating-point error handling. + + Using an instance of `errstate` as a context manager allows statements in + that context to execute with a known error handling behavior. Upon entering + the context the error handling is set with `seterr` and `seterrcall`, and + upon exiting it is reset to what it was before. + + Parameters + ---------- + kwargs : {divide, over, under, invalid} + Keyword arguments. The valid keywords are the possible floating-point + exceptions. Each keyword should have a string value that defines the + treatment for the particular error. Possible values are + {'ignore', 'warn', 'raise', 'call', 'print', 'log'}. + + See Also + -------- + seterr, geterr, seterrcall, geterrcall + + Notes + ----- + For complete documentation of the types of floating-point exceptions and + treatment options, see `seterr`. + + Examples + -------- + >>> olderr = np.seterr(all='ignore') # Set error handling to known state. + + >>> np.arange(3) / 0. + array([ NaN, Inf, Inf]) + >>> with np.errstate(divide='warn'): + ... np.arange(3) / 0. + ... + __main__:2: RuntimeWarning: divide by zero encountered in divide + array([ NaN, Inf, Inf]) + + >>> np.sqrt(-1) + nan + >>> with np.errstate(invalid='raise'): + ... np.sqrt(-1) + Traceback (most recent call last): + File "", line 2, in + FloatingPointError: invalid value encountered in sqrt + + Outside the context the error handling behavior has not changed: + + >>> np.geterr() + {'over': 'warn', 'divide': 'warn', 'invalid': 'warn', + 'under': 'ignore'} + + """ + # Note that we don't want to run the above doctests because they will fail + # without a from __future__ import with_statement + + def __init__(self, **kwargs): + self.call = kwargs.pop('call', _Unspecified) + self.kwargs = kwargs + + def __enter__(self): + self.oldstate = seterr(**self.kwargs) + if self.call is not _Unspecified: + self.oldcall = seterrcall(self.call) + + def __exit__(self, *exc_info): + seterr(**self.oldstate) + if self.call is not _Unspecified: + seterrcall(self.oldcall) + + +def _setdef(): + defval = [UFUNC_BUFSIZE_DEFAULT, ERR_DEFAULT, None] + umath.seterrobj(defval) + + +# set the default values +_setdef() + +Inf = inf = infty = Infinity = PINF +nan = NaN = NAN +False_ = bool_(False) +True_ = bool_(True) + + +def extend_all(module): + existing = set(__all__) + mall = getattr(module, '__all__') + for a in mall: + if a not in existing: + __all__.append(a) + + +from .umath import * +from .numerictypes import * +from . import fromnumeric +from .fromnumeric import * +from . import arrayprint +from .arrayprint import * +extend_all(fromnumeric) +extend_all(umath) +extend_all(numerictypes) +extend_all(arrayprint) diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/numeric.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/numeric.pyc new file mode 100644 index 0000000..51516e5 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/numeric.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/numerictypes.py b/project/venv/lib/python2.7/site-packages/numpy/core/numerictypes.py new file mode 100644 index 0000000..f00f922 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/numerictypes.py @@ -0,0 +1,675 @@ +""" +numerictypes: Define the numeric type objects + +This module is designed so "from numerictypes import \\*" is safe. +Exported symbols include: + + Dictionary with all registered number types (including aliases): + typeDict + + Type objects (not all will be available, depends on platform): + see variable sctypes for which ones you have + + Bit-width names + + int8 int16 int32 int64 int128 + uint8 uint16 uint32 uint64 uint128 + float16 float32 float64 float96 float128 float256 + complex32 complex64 complex128 complex192 complex256 complex512 + datetime64 timedelta64 + + c-based names + + bool_ + + object_ + + void, str_, unicode_ + + byte, ubyte, + short, ushort + intc, uintc, + intp, uintp, + int_, uint, + longlong, ulonglong, + + single, csingle, + float_, complex_, + longfloat, clongfloat, + + As part of the type-hierarchy: xx -- is bit-width + + generic + +-> bool_ (kind=b) + +-> number + | +-> integer + | | +-> signedinteger (intxx) (kind=i) + | | | byte + | | | short + | | | intc + | | | intp int0 + | | | int_ + | | | longlong + | | \\-> unsignedinteger (uintxx) (kind=u) + | | ubyte + | | ushort + | | uintc + | | uintp uint0 + | | uint_ + | | ulonglong + | +-> inexact + | +-> floating (floatxx) (kind=f) + | | half + | | single + | | float_ (double) + | | longfloat + | \\-> complexfloating (complexxx) (kind=c) + | csingle (singlecomplex) + | complex_ (cfloat, cdouble) + | clongfloat (longcomplex) + +-> flexible + | +-> character + | | str_ (string_, bytes_) (kind=S) [Python 2] + | | unicode_ (kind=U) [Python 2] + | | + | | bytes_ (string_) (kind=S) [Python 3] + | | str_ (unicode_) (kind=U) [Python 3] + | | + | \\-> void (kind=V) + \\-> object_ (not used much) (kind=O) + +""" +from __future__ import division, absolute_import, print_function + +import types as _types +import sys +import numbers +import warnings + +from numpy.compat import bytes, long +from numpy.core.multiarray import ( + typeinfo, ndarray, array, empty, dtype, datetime_data, + datetime_as_string, busday_offset, busday_count, is_busday, + busdaycalendar + ) +from numpy.core.overrides import set_module + +# we add more at the bottom +__all__ = ['sctypeDict', 'sctypeNA', 'typeDict', 'typeNA', 'sctypes', + 'ScalarType', 'obj2sctype', 'cast', 'nbytes', 'sctype2char', + 'maximum_sctype', 'issctype', 'typecodes', 'find_common_type', + 'issubdtype', 'datetime_data', 'datetime_as_string', + 'busday_offset', 'busday_count', 'is_busday', 'busdaycalendar', + ] + +# we don't need all these imports, but we need to keep them for compatibility +# for users using np.core.numerictypes.UPPER_TABLE +from ._string_helpers import ( + english_lower, english_upper, english_capitalize, LOWER_TABLE, UPPER_TABLE +) + +from ._type_aliases import ( + sctypeDict, + sctypeNA, + allTypes, + bitname, + sctypes, + _concrete_types, + _concrete_typeinfo, + _bits_of, +) +from ._dtype import _kind_name + +# we don't export these for import *, but we do want them accessible +# as numerictypes.bool, etc. +if sys.version_info[0] >= 3: + from builtins import bool, int, float, complex, object, str + unicode = str +else: + from __builtin__ import bool, int, float, complex, object, unicode, str + + +# We use this later +generic = allTypes['generic'] + +genericTypeRank = ['bool', 'int8', 'uint8', 'int16', 'uint16', + 'int32', 'uint32', 'int64', 'uint64', 'int128', + 'uint128', 'float16', + 'float32', 'float64', 'float80', 'float96', 'float128', + 'float256', + 'complex32', 'complex64', 'complex128', 'complex160', + 'complex192', 'complex256', 'complex512', 'object'] + +def maximum_sctype(t): + """ + Return the scalar type of highest precision of the same kind as the input. + + Parameters + ---------- + t : dtype or dtype specifier + The input data type. This can be a `dtype` object or an object that + is convertible to a `dtype`. + + Returns + ------- + out : dtype + The highest precision data type of the same kind (`dtype.kind`) as `t`. + + See Also + -------- + obj2sctype, mintypecode, sctype2char + dtype + + Examples + -------- + >>> np.maximum_sctype(int) + + >>> np.maximum_sctype(np.uint8) + + >>> np.maximum_sctype(complex) + + + >>> np.maximum_sctype(str) + + + >>> np.maximum_sctype('i2') + + >>> np.maximum_sctype('f4') + + + """ + g = obj2sctype(t) + if g is None: + return t + t = g + base = _kind_name(dtype(t)) + if base in sctypes: + return sctypes[base][-1] + else: + return t + + +@set_module('numpy') +def issctype(rep): + """ + Determines whether the given object represents a scalar data-type. + + Parameters + ---------- + rep : any + If `rep` is an instance of a scalar dtype, True is returned. If not, + False is returned. + + Returns + ------- + out : bool + Boolean result of check whether `rep` is a scalar dtype. + + See Also + -------- + issubsctype, issubdtype, obj2sctype, sctype2char + + Examples + -------- + >>> np.issctype(np.int32) + True + >>> np.issctype(list) + False + >>> np.issctype(1.1) + False + + Strings are also a scalar type: + + >>> np.issctype(np.dtype('str')) + True + + """ + if not isinstance(rep, (type, dtype)): + return False + try: + res = obj2sctype(rep) + if res and res != object_: + return True + return False + except Exception: + return False + + +@set_module('numpy') +def obj2sctype(rep, default=None): + """ + Return the scalar dtype or NumPy equivalent of Python type of an object. + + Parameters + ---------- + rep : any + The object of which the type is returned. + default : any, optional + If given, this is returned for objects whose types can not be + determined. If not given, None is returned for those objects. + + Returns + ------- + dtype : dtype or Python type + The data type of `rep`. + + See Also + -------- + sctype2char, issctype, issubsctype, issubdtype, maximum_sctype + + Examples + -------- + >>> np.obj2sctype(np.int32) + + >>> np.obj2sctype(np.array([1., 2.])) + + >>> np.obj2sctype(np.array([1.j])) + + + >>> np.obj2sctype(dict) + + >>> np.obj2sctype('string') + + + >>> np.obj2sctype(1, default=list) + + + """ + # prevent abtract classes being upcast + if isinstance(rep, type) and issubclass(rep, generic): + return rep + # extract dtype from arrays + if isinstance(rep, ndarray): + return rep.dtype.type + # fall back on dtype to convert + try: + res = dtype(rep) + except Exception: + return default + else: + return res.type + + +@set_module('numpy') +def issubclass_(arg1, arg2): + """ + Determine if a class is a subclass of a second class. + + `issubclass_` is equivalent to the Python built-in ``issubclass``, + except that it returns False instead of raising a TypeError if one + of the arguments is not a class. + + Parameters + ---------- + arg1 : class + Input class. True is returned if `arg1` is a subclass of `arg2`. + arg2 : class or tuple of classes. + Input class. If a tuple of classes, True is returned if `arg1` is a + subclass of any of the tuple elements. + + Returns + ------- + out : bool + Whether `arg1` is a subclass of `arg2` or not. + + See Also + -------- + issubsctype, issubdtype, issctype + + Examples + -------- + >>> np.issubclass_(np.int32, int) + True + >>> np.issubclass_(np.int32, float) + False + + """ + try: + return issubclass(arg1, arg2) + except TypeError: + return False + + +@set_module('numpy') +def issubsctype(arg1, arg2): + """ + Determine if the first argument is a subclass of the second argument. + + Parameters + ---------- + arg1, arg2 : dtype or dtype specifier + Data-types. + + Returns + ------- + out : bool + The result. + + See Also + -------- + issctype, issubdtype,obj2sctype + + Examples + -------- + >>> np.issubsctype('S8', str) + True + >>> np.issubsctype(np.array([1]), int) + True + >>> np.issubsctype(np.array([1]), float) + False + + """ + return issubclass(obj2sctype(arg1), obj2sctype(arg2)) + + +@set_module('numpy') +def issubdtype(arg1, arg2): + """ + Returns True if first argument is a typecode lower/equal in type hierarchy. + + Parameters + ---------- + arg1, arg2 : dtype_like + dtype or string representing a typecode. + + Returns + ------- + out : bool + + See Also + -------- + issubsctype, issubclass_ + numpy.core.numerictypes : Overview of numpy type hierarchy. + + Examples + -------- + >>> np.issubdtype('S1', np.string_) + True + >>> np.issubdtype(np.float64, np.float32) + False + + """ + if not issubclass_(arg1, generic): + arg1 = dtype(arg1).type + if not issubclass_(arg2, generic): + arg2_orig = arg2 + arg2 = dtype(arg2).type + if not isinstance(arg2_orig, dtype): + # weird deprecated behaviour, that tried to infer np.floating from + # float, and similar less obvious things, such as np.generic from + # basestring + mro = arg2.mro() + arg2 = mro[1] if len(mro) > 1 else mro[0] + + def type_repr(x): + """ Helper to produce clear error messages """ + if not isinstance(x, type): + return repr(x) + elif issubclass(x, generic): + return "np.{}".format(x.__name__) + else: + return x.__name__ + + # 1.14, 2017-08-01 + warnings.warn( + "Conversion of the second argument of issubdtype from `{raw}` " + "to `{abstract}` is deprecated. In future, it will be treated " + "as `{concrete} == np.dtype({raw}).type`.".format( + raw=type_repr(arg2_orig), + abstract=type_repr(arg2), + concrete=type_repr(dtype(arg2_orig).type) + ), + FutureWarning, stacklevel=2 + ) + + return issubclass(arg1, arg2) + + +# This dictionary allows look up based on any alias for an array data-type +class _typedict(dict): + """ + Base object for a dictionary for look-up with any alias for an array dtype. + + Instances of `_typedict` can not be used as dictionaries directly, + first they have to be populated. + + """ + + def __getitem__(self, obj): + return dict.__getitem__(self, obj2sctype(obj)) + +nbytes = _typedict() +_alignment = _typedict() +_maxvals = _typedict() +_minvals = _typedict() +def _construct_lookups(): + for name, info in _concrete_typeinfo.items(): + obj = info.type + nbytes[obj] = info.bits // 8 + _alignment[obj] = info.alignment + if len(info) > 5: + _maxvals[obj] = info.max + _minvals[obj] = info.min + else: + _maxvals[obj] = None + _minvals[obj] = None + +_construct_lookups() + + +@set_module('numpy') +def sctype2char(sctype): + """ + Return the string representation of a scalar dtype. + + Parameters + ---------- + sctype : scalar dtype or object + If a scalar dtype, the corresponding string character is + returned. If an object, `sctype2char` tries to infer its scalar type + and then return the corresponding string character. + + Returns + ------- + typechar : str + The string character corresponding to the scalar type. + + Raises + ------ + ValueError + If `sctype` is an object for which the type can not be inferred. + + See Also + -------- + obj2sctype, issctype, issubsctype, mintypecode + + Examples + -------- + >>> for sctype in [np.int32, float, complex, np.string_, np.ndarray]: + ... print(np.sctype2char(sctype)) + l + d + D + S + O + + >>> x = np.array([1., 2-1.j]) + >>> np.sctype2char(x) + 'D' + >>> np.sctype2char(list) + 'O' + + """ + sctype = obj2sctype(sctype) + if sctype is None: + raise ValueError("unrecognized type") + if sctype not in _concrete_types: + # for compatibility + raise KeyError(sctype) + return dtype(sctype).char + +# Create dictionary of casting functions that wrap sequences +# indexed by type or type character +cast = _typedict() +for key in _concrete_types: + cast[key] = lambda x, k=key: array(x, copy=False).astype(k) + +try: + ScalarType = [_types.IntType, _types.FloatType, _types.ComplexType, + _types.LongType, _types.BooleanType, + _types.StringType, _types.UnicodeType, _types.BufferType] +except AttributeError: + # Py3K + ScalarType = [int, float, complex, int, bool, bytes, str, memoryview] + +ScalarType.extend(_concrete_types) +ScalarType = tuple(ScalarType) + + +# Now add the types we've determined to this module +for key in allTypes: + globals()[key] = allTypes[key] + __all__.append(key) + +del key + +typecodes = {'Character':'c', + 'Integer':'bhilqp', + 'UnsignedInteger':'BHILQP', + 'Float':'efdg', + 'Complex':'FDG', + 'AllInteger':'bBhHiIlLqQpP', + 'AllFloat':'efdgFDG', + 'Datetime': 'Mm', + 'All':'?bhilqpBHILQPefdgFDGSUVOMm'} + +# backwards compatibility --- deprecated name +typeDict = sctypeDict +typeNA = sctypeNA + +# b -> boolean +# u -> unsigned integer +# i -> signed integer +# f -> floating point +# c -> complex +# M -> datetime +# m -> timedelta +# S -> string +# U -> Unicode string +# V -> record +# O -> Python object +_kind_list = ['b', 'u', 'i', 'f', 'c', 'S', 'U', 'V', 'O', 'M', 'm'] + +__test_types = '?'+typecodes['AllInteger'][:-2]+typecodes['AllFloat']+'O' +__len_test_types = len(__test_types) + +# Keep incrementing until a common type both can be coerced to +# is found. Otherwise, return None +def _find_common_coerce(a, b): + if a > b: + return a + try: + thisind = __test_types.index(a.char) + except ValueError: + return None + return _can_coerce_all([a, b], start=thisind) + +# Find a data-type that all data-types in a list can be coerced to +def _can_coerce_all(dtypelist, start=0): + N = len(dtypelist) + if N == 0: + return None + if N == 1: + return dtypelist[0] + thisind = start + while thisind < __len_test_types: + newdtype = dtype(__test_types[thisind]) + numcoerce = len([x for x in dtypelist if newdtype >= x]) + if numcoerce == N: + return newdtype + thisind += 1 + return None + +def _register_types(): + numbers.Integral.register(integer) + numbers.Complex.register(inexact) + numbers.Real.register(floating) + numbers.Number.register(number) + +_register_types() + + +@set_module('numpy') +def find_common_type(array_types, scalar_types): + """ + Determine common type following standard coercion rules. + + Parameters + ---------- + array_types : sequence + A list of dtypes or dtype convertible objects representing arrays. + scalar_types : sequence + A list of dtypes or dtype convertible objects representing scalars. + + Returns + ------- + datatype : dtype + The common data type, which is the maximum of `array_types` ignoring + `scalar_types`, unless the maximum of `scalar_types` is of a + different kind (`dtype.kind`). If the kind is not understood, then + None is returned. + + See Also + -------- + dtype, common_type, can_cast, mintypecode + + Examples + -------- + >>> np.find_common_type([], [np.int64, np.float32, complex]) + dtype('complex128') + >>> np.find_common_type([np.int64, np.float32], []) + dtype('float64') + + The standard casting rules ensure that a scalar cannot up-cast an + array unless the scalar is of a fundamentally different kind of data + (i.e. under a different hierarchy in the data type hierarchy) then + the array: + + >>> np.find_common_type([np.float32], [np.int64, np.float64]) + dtype('float32') + + Complex is of a different type, so it up-casts the float in the + `array_types` argument: + + >>> np.find_common_type([np.float32], [complex]) + dtype('complex128') + + Type specifier strings are convertible to dtypes and can therefore + be used instead of dtypes: + + >>> np.find_common_type(['f4', 'f4', 'i4'], ['c8']) + dtype('complex128') + + """ + array_types = [dtype(x) for x in array_types] + scalar_types = [dtype(x) for x in scalar_types] + + maxa = _can_coerce_all(array_types) + maxsc = _can_coerce_all(scalar_types) + + if maxa is None: + return maxsc + + if maxsc is None: + return maxa + + try: + index_a = _kind_list.index(maxa.kind) + index_sc = _kind_list.index(maxsc.kind) + except ValueError: + return None + + if index_sc > index_a: + return _find_common_coerce(maxsc, maxa) + else: + return maxa diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/numerictypes.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/numerictypes.pyc new file mode 100644 index 0000000..18638de Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/numerictypes.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/overrides.py b/project/venv/lib/python2.7/site-packages/numpy/core/overrides.py new file mode 100644 index 0000000..c55174e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/overrides.py @@ -0,0 +1,187 @@ +"""Implementation of __array_function__ overrides from NEP-18.""" +import collections +import functools +import os + +from numpy.core._multiarray_umath import ( + add_docstring, implement_array_function, _get_implementing_args) +from numpy.compat._inspect import getargspec + + +ENABLE_ARRAY_FUNCTION = bool( + int(os.environ.get('NUMPY_EXPERIMENTAL_ARRAY_FUNCTION', 0))) + + +add_docstring( + implement_array_function, + """ + Implement a function with checks for __array_function__ overrides. + + All arguments are required, and can only be passed by position. + + Arguments + --------- + implementation : function + Function that implements the operation on NumPy array without + overrides when called like ``implementation(*args, **kwargs)``. + public_api : function + Function exposed by NumPy's public API originally called like + ``public_api(*args, **kwargs)`` on which arguments are now being + checked. + relevant_args : iterable + Iterable of arguments to check for __array_function__ methods. + args : tuple + Arbitrary positional arguments originally passed into ``public_api``. + kwargs : dict + Arbitrary keyword arguments originally passed into ``public_api``. + + Returns + ------- + Result from calling ``implementation()`` or an ``__array_function__`` + method, as appropriate. + + Raises + ------ + TypeError : if no implementation is found. + """) + + +# exposed for testing purposes; used internally by implement_array_function +add_docstring( + _get_implementing_args, + """ + Collect arguments on which to call __array_function__. + + Parameters + ---------- + relevant_args : iterable of array-like + Iterable of possibly array-like arguments to check for + __array_function__ methods. + + Returns + ------- + Sequence of arguments with __array_function__ methods, in the order in + which they should be called. + """) + + +ArgSpec = collections.namedtuple('ArgSpec', 'args varargs keywords defaults') + + +def verify_matching_signatures(implementation, dispatcher): + """Verify that a dispatcher function has the right signature.""" + implementation_spec = ArgSpec(*getargspec(implementation)) + dispatcher_spec = ArgSpec(*getargspec(dispatcher)) + + if (implementation_spec.args != dispatcher_spec.args or + implementation_spec.varargs != dispatcher_spec.varargs or + implementation_spec.keywords != dispatcher_spec.keywords or + (bool(implementation_spec.defaults) != + bool(dispatcher_spec.defaults)) or + (implementation_spec.defaults is not None and + len(implementation_spec.defaults) != + len(dispatcher_spec.defaults))): + raise RuntimeError('implementation and dispatcher for %s have ' + 'different function signatures' % implementation) + + if implementation_spec.defaults is not None: + if dispatcher_spec.defaults != (None,) * len(dispatcher_spec.defaults): + raise RuntimeError('dispatcher functions can only use None for ' + 'default argument values') + + +def set_module(module): + """Decorator for overriding __module__ on a function or class. + + Example usage:: + + @set_module('numpy') + def example(): + pass + + assert example.__module__ == 'numpy' + """ + def decorator(func): + if module is not None: + func.__module__ = module + return func + return decorator + + +def array_function_dispatch(dispatcher, module=None, verify=True, + docs_from_dispatcher=False): + """Decorator for adding dispatch with the __array_function__ protocol. + + See NEP-18 for example usage. + + Parameters + ---------- + dispatcher : callable + Function that when called like ``dispatcher(*args, **kwargs)`` with + arguments from the NumPy function call returns an iterable of + array-like arguments to check for ``__array_function__``. + module : str, optional + __module__ attribute to set on new function, e.g., ``module='numpy'``. + By default, module is copied from the decorated function. + verify : bool, optional + If True, verify the that the signature of the dispatcher and decorated + function signatures match exactly: all required and optional arguments + should appear in order with the same names, but the default values for + all optional arguments should be ``None``. Only disable verification + if the dispatcher's signature needs to deviate for some particular + reason, e.g., because the function has a signature like + ``func(*args, **kwargs)``. + docs_from_dispatcher : bool, optional + If True, copy docs from the dispatcher function onto the dispatched + function, rather than from the implementation. This is useful for + functions defined in C, which otherwise don't have docstrings. + + Returns + ------- + Function suitable for decorating the implementation of a NumPy function. + """ + + if not ENABLE_ARRAY_FUNCTION: + # __array_function__ requires an explicit opt-in for now + def decorator(implementation): + if module is not None: + implementation.__module__ = module + if docs_from_dispatcher: + add_docstring(implementation, dispatcher.__doc__) + return implementation + return decorator + + def decorator(implementation): + if verify: + verify_matching_signatures(implementation, dispatcher) + + if docs_from_dispatcher: + add_docstring(implementation, dispatcher.__doc__) + + @functools.wraps(implementation) + def public_api(*args, **kwargs): + relevant_args = dispatcher(*args, **kwargs) + return implement_array_function( + implementation, public_api, relevant_args, args, kwargs) + + if module is not None: + public_api.__module__ = module + + # TODO: remove this when we drop Python 2 support (functools.wraps + # adds __wrapped__ automatically in later versions) + public_api.__wrapped__ = implementation + + return public_api + + return decorator + + +def array_function_from_dispatcher( + implementation, module=None, verify=True, docs_from_dispatcher=True): + """Like array_function_dispatcher, but with function arguments flipped.""" + + def decorator(dispatcher): + return array_function_dispatch( + dispatcher, module, verify=verify, + docs_from_dispatcher=docs_from_dispatcher)(implementation) + return decorator diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/overrides.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/overrides.pyc new file mode 100644 index 0000000..2f836f9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/overrides.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/records.py b/project/venv/lib/python2.7/site-packages/numpy/core/records.py new file mode 100644 index 0000000..5898bb1 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/records.py @@ -0,0 +1,879 @@ +""" +Record Arrays +============= +Record arrays expose the fields of structured arrays as properties. + +Most commonly, ndarrays contain elements of a single type, e.g. floats, +integers, bools etc. However, it is possible for elements to be combinations +of these using structured types, such as:: + + >>> a = np.array([(1, 2.0), (1, 2.0)], dtype=[('x', int), ('y', float)]) + >>> a + array([(1, 2.0), (1, 2.0)], + dtype=[('x', '>> a['x'] + array([1, 1]) + + >>> a['y'] + array([ 2., 2.]) + +Record arrays allow us to access fields as properties:: + + >>> ar = np.rec.array(a) + + >>> ar.x + array([1, 1]) + + >>> ar.y + array([ 2., 2.]) + +""" +from __future__ import division, absolute_import, print_function + +import sys +import os +import warnings + +from . import numeric as sb +from . import numerictypes as nt +from numpy.compat import isfileobj, bytes, long, unicode, os_fspath +from numpy.core.overrides import set_module +from .arrayprint import get_printoptions + +# All of the functions allow formats to be a dtype +__all__ = ['record', 'recarray', 'format_parser'] + + +ndarray = sb.ndarray + +_byteorderconv = {'b':'>', + 'l':'<', + 'n':'=', + 'B':'>', + 'L':'<', + 'N':'=', + 'S':'s', + 's':'s', + '>':'>', + '<':'<', + '=':'=', + '|':'|', + 'I':'|', + 'i':'|'} + +# formats regular expression +# allows multidimension spec with a tuple syntax in front +# of the letter code '(2,3)f4' and ' ( 2 , 3 ) f4 ' +# are equally allowed + +numfmt = nt.typeDict + +def find_duplicate(list): + """Find duplication in a list, return a list of duplicated elements""" + dup = [] + for i in range(len(list)): + if (list[i] in list[i + 1:]): + if (list[i] not in dup): + dup.append(list[i]) + return dup + + +@set_module('numpy') +class format_parser(object): + """ + Class to convert formats, names, titles description to a dtype. + + After constructing the format_parser object, the dtype attribute is + the converted data-type: + ``dtype = format_parser(formats, names, titles).dtype`` + + Attributes + ---------- + dtype : dtype + The converted data-type. + + Parameters + ---------- + formats : str or list of str + The format description, either specified as a string with + comma-separated format descriptions in the form ``'f8, i4, a5'``, or + a list of format description strings in the form + ``['f8', 'i4', 'a5']``. + names : str or list/tuple of str + The field names, either specified as a comma-separated string in the + form ``'col1, col2, col3'``, or as a list or tuple of strings in the + form ``['col1', 'col2', 'col3']``. + An empty list can be used, in that case default field names + ('f0', 'f1', ...) are used. + titles : sequence + Sequence of title strings. An empty list can be used to leave titles + out. + aligned : bool, optional + If True, align the fields by padding as the C-compiler would. + Default is False. + byteorder : str, optional + If specified, all the fields will be changed to the + provided byte-order. Otherwise, the default byte-order is + used. For all available string specifiers, see `dtype.newbyteorder`. + + See Also + -------- + dtype, typename, sctype2char + + Examples + -------- + >>> np.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'], + ... ['T1', 'T2', 'T3']).dtype + dtype([(('T1', 'col1'), '>> np.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'], + ... []).dtype + dtype([('col1', '>> np.format_parser(['f8', 'i4', 'a5'], [], []).dtype + dtype([('f0', ' len(titles)): + self._titles += [None] * (self._nfields - len(titles)) + + def _createdescr(self, byteorder): + descr = sb.dtype({'names':self._names, + 'formats':self._f_formats, + 'offsets':self._offsets, + 'titles':self._titles}) + if (byteorder is not None): + byteorder = _byteorderconv[byteorder[0]] + descr = descr.newbyteorder(byteorder) + + self._descr = descr + +class record(nt.void): + """A data-type scalar that allows field access as attribute lookup. + """ + + # manually set name and module so that this class's type shows up + # as numpy.record when printed + __name__ = 'record' + __module__ = 'numpy' + + def __repr__(self): + if get_printoptions()['legacy'] == '1.13': + return self.__str__() + return super(record, self).__repr__() + + def __str__(self): + if get_printoptions()['legacy'] == '1.13': + return str(self.item()) + return super(record, self).__str__() + + def __getattribute__(self, attr): + if attr in ['setfield', 'getfield', 'dtype']: + return nt.void.__getattribute__(self, attr) + try: + return nt.void.__getattribute__(self, attr) + except AttributeError: + pass + fielddict = nt.void.__getattribute__(self, 'dtype').fields + res = fielddict.get(attr, None) + if res: + obj = self.getfield(*res[:2]) + # if it has fields return a record, + # otherwise return the object + try: + dt = obj.dtype + except AttributeError: + #happens if field is Object type + return obj + if dt.fields: + return obj.view((self.__class__, obj.dtype.fields)) + return obj + else: + raise AttributeError("'record' object has no " + "attribute '%s'" % attr) + + def __setattr__(self, attr, val): + if attr in ['setfield', 'getfield', 'dtype']: + raise AttributeError("Cannot set '%s' attribute" % attr) + fielddict = nt.void.__getattribute__(self, 'dtype').fields + res = fielddict.get(attr, None) + if res: + return self.setfield(val, *res[:2]) + else: + if getattr(self, attr, None): + return nt.void.__setattr__(self, attr, val) + else: + raise AttributeError("'record' object has no " + "attribute '%s'" % attr) + + def __getitem__(self, indx): + obj = nt.void.__getitem__(self, indx) + + # copy behavior of record.__getattribute__, + if isinstance(obj, nt.void) and obj.dtype.fields: + return obj.view((self.__class__, obj.dtype.fields)) + else: + # return a single element + return obj + + def pprint(self): + """Pretty-print all fields.""" + # pretty-print all fields + names = self.dtype.names + maxlen = max(len(name) for name in names) + fmt = '%% %ds: %%s' % maxlen + rows = [fmt % (name, getattr(self, name)) for name in names] + return "\n".join(rows) + +# The recarray is almost identical to a standard array (which supports +# named fields already) The biggest difference is that it can use +# attribute-lookup to find the fields and it is constructed using +# a record. + +# If byteorder is given it forces a particular byteorder on all +# the fields (and any subfields) + +class recarray(ndarray): + """Construct an ndarray that allows field access using attributes. + + Arrays may have a data-types containing fields, analogous + to columns in a spread sheet. An example is ``[(x, int), (y, float)]``, + where each entry in the array is a pair of ``(int, float)``. Normally, + these attributes are accessed using dictionary lookups such as ``arr['x']`` + and ``arr['y']``. Record arrays allow the fields to be accessed as members + of the array, using ``arr.x`` and ``arr.y``. + + Parameters + ---------- + shape : tuple + Shape of output array. + dtype : data-type, optional + The desired data-type. By default, the data-type is determined + from `formats`, `names`, `titles`, `aligned` and `byteorder`. + formats : list of data-types, optional + A list containing the data-types for the different columns, e.g. + ``['i4', 'f8', 'i4']``. `formats` does *not* support the new + convention of using types directly, i.e. ``(int, float, int)``. + Note that `formats` must be a list, not a tuple. + Given that `formats` is somewhat limited, we recommend specifying + `dtype` instead. + names : tuple of str, optional + The name of each column, e.g. ``('x', 'y', 'z')``. + buf : buffer, optional + By default, a new array is created of the given shape and data-type. + If `buf` is specified and is an object exposing the buffer interface, + the array will use the memory from the existing buffer. In this case, + the `offset` and `strides` keywords are available. + + Other Parameters + ---------------- + titles : tuple of str, optional + Aliases for column names. For example, if `names` were + ``('x', 'y', 'z')`` and `titles` is + ``('x_coordinate', 'y_coordinate', 'z_coordinate')``, then + ``arr['x']`` is equivalent to both ``arr.x`` and ``arr.x_coordinate``. + byteorder : {'<', '>', '='}, optional + Byte-order for all fields. + aligned : bool, optional + Align the fields in memory as the C-compiler would. + strides : tuple of ints, optional + Buffer (`buf`) is interpreted according to these strides (strides + define how many bytes each array element, row, column, etc. + occupy in memory). + offset : int, optional + Start reading buffer (`buf`) from this offset onwards. + order : {'C', 'F'}, optional + Row-major (C-style) or column-major (Fortran-style) order. + + Returns + ------- + rec : recarray + Empty array of the given shape and type. + + See Also + -------- + rec.fromrecords : Construct a record array from data. + record : fundamental data-type for `recarray`. + format_parser : determine a data-type from formats, names, titles. + + Notes + ----- + This constructor can be compared to ``empty``: it creates a new record + array but does not fill it with data. To create a record array from data, + use one of the following methods: + + 1. Create a standard ndarray and convert it to a record array, + using ``arr.view(np.recarray)`` + 2. Use the `buf` keyword. + 3. Use `np.rec.fromrecords`. + + Examples + -------- + Create an array with two fields, ``x`` and ``y``: + + >>> x = np.array([(1.0, 2), (3.0, 4)], dtype=[('x', float), ('y', int)]) + >>> x + array([(1.0, 2), (3.0, 4)], + dtype=[('x', '>> x['x'] + array([ 1., 3.]) + + View the array as a record array: + + >>> x = x.view(np.recarray) + + >>> x.x + array([ 1., 3.]) + + >>> x.y + array([2, 4]) + + Create a new, empty record array: + + >>> np.recarray((2,), + ... dtype=[('x', int), ('y', float), ('z', int)]) #doctest: +SKIP + rec.array([(-1073741821, 1.2249118382103472e-301, 24547520), + (3471280, 1.2134086255804012e-316, 0)], + dtype=[('x', ' 0 or self.shape == (0,): + lst = sb.array2string( + self, separator=', ', prefix=prefix, suffix=',') + else: + # show zero-length shape unless it is (0,) + lst = "[], shape=%s" % (repr(self.shape),) + + lf = '\n'+' '*len(prefix) + if get_printoptions()['legacy'] == '1.13': + lf = ' ' + lf # trailing space + return fmt % (lst, lf, repr_dtype) + + def field(self, attr, val=None): + if isinstance(attr, int): + names = ndarray.__getattribute__(self, 'dtype').names + attr = names[attr] + + fielddict = ndarray.__getattribute__(self, 'dtype').fields + + res = fielddict[attr][:2] + + if val is None: + obj = self.getfield(*res) + if obj.dtype.fields: + return obj + return obj.view(ndarray) + else: + return self.setfield(val, *res) + + +def fromarrays(arrayList, dtype=None, shape=None, formats=None, + names=None, titles=None, aligned=False, byteorder=None): + """ create a record array from a (flat) list of arrays + + >>> x1=np.array([1,2,3,4]) + >>> x2=np.array(['a','dd','xyz','12']) + >>> x3=np.array([1.1,2,3,4]) + >>> r = np.core.records.fromarrays([x1,x2,x3],names='a,b,c') + >>> print(r[1]) + (2, 'dd', 2.0) + >>> x1[1]=34 + >>> r.a + array([1, 2, 3, 4]) + """ + + arrayList = [sb.asarray(x) for x in arrayList] + + if shape is None or shape == 0: + shape = arrayList[0].shape + + if isinstance(shape, int): + shape = (shape,) + + if formats is None and dtype is None: + # go through each object in the list to see if it is an ndarray + # and determine the formats. + formats = [] + for obj in arrayList: + if not isinstance(obj, ndarray): + raise ValueError("item in the array list must be an ndarray.") + formats.append(obj.dtype.str) + formats = ','.join(formats) + + if dtype is not None: + descr = sb.dtype(dtype) + _names = descr.names + else: + parsed = format_parser(formats, names, titles, aligned, byteorder) + _names = parsed._names + descr = parsed._descr + + # Determine shape from data-type. + if len(descr) != len(arrayList): + raise ValueError("mismatch between the number of fields " + "and the number of arrays") + + d0 = descr[0].shape + nn = len(d0) + if nn > 0: + shape = shape[:-nn] + + for k, obj in enumerate(arrayList): + nn = descr[k].ndim + testshape = obj.shape[:obj.ndim - nn] + if testshape != shape: + raise ValueError("array-shape mismatch in array %d" % k) + + _array = recarray(shape, descr) + + # populate the record array (makes a copy) + for i in range(len(arrayList)): + _array[_names[i]] = arrayList[i] + + return _array + +def fromrecords(recList, dtype=None, shape=None, formats=None, names=None, + titles=None, aligned=False, byteorder=None): + """ create a recarray from a list of records in text form + + The data in the same field can be heterogeneous, they will be promoted + to the highest data type. This method is intended for creating + smaller record arrays. If used to create large array without formats + defined + + r=fromrecords([(2,3.,'abc')]*100000) + + it can be slow. + + If formats is None, then this will auto-detect formats. Use list of + tuples rather than list of lists for faster processing. + + >>> r=np.core.records.fromrecords([(456,'dbe',1.2),(2,'de',1.3)], + ... names='col1,col2,col3') + >>> print(r[0]) + (456, 'dbe', 1.2) + >>> r.col1 + array([456, 2]) + >>> r.col2 + array(['dbe', 'de'], + dtype='|S3') + >>> import pickle + >>> print(pickle.loads(pickle.dumps(r))) + [(456, 'dbe', 1.2) (2, 'de', 1.3)] + """ + + if formats is None and dtype is None: # slower + obj = sb.array(recList, dtype=object) + arrlist = [sb.array(obj[..., i].tolist()) for i in range(obj.shape[-1])] + return fromarrays(arrlist, formats=formats, shape=shape, names=names, + titles=titles, aligned=aligned, byteorder=byteorder) + + if dtype is not None: + descr = sb.dtype((record, dtype)) + else: + descr = format_parser(formats, names, titles, aligned, byteorder)._descr + + try: + retval = sb.array(recList, dtype=descr) + except (TypeError, ValueError): + if (shape is None or shape == 0): + shape = len(recList) + if isinstance(shape, (int, long)): + shape = (shape,) + if len(shape) > 1: + raise ValueError("Can only deal with 1-d array.") + _array = recarray(shape, descr) + for k in range(_array.size): + _array[k] = tuple(recList[k]) + # list of lists instead of list of tuples ? + # 2018-02-07, 1.14.1 + warnings.warn( + "fromrecords expected a list of tuples, may have received a list " + "of lists instead. In the future that will raise an error", + FutureWarning, stacklevel=2) + return _array + else: + if shape is not None and retval.shape != shape: + retval.shape = shape + + res = retval.view(recarray) + + return res + + +def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None, + names=None, titles=None, aligned=False, byteorder=None): + """ create a (read-only) record array from binary data contained in + a string""" + + if dtype is None and formats is None: + raise TypeError("fromstring() needs a 'dtype' or 'formats' argument") + + if dtype is not None: + descr = sb.dtype(dtype) + else: + descr = format_parser(formats, names, titles, aligned, byteorder)._descr + + itemsize = descr.itemsize + if (shape is None or shape == 0 or shape == -1): + shape = (len(datastring) - offset) // itemsize + + _array = recarray(shape, descr, buf=datastring, offset=offset) + return _array + +def get_remaining_size(fd): + try: + fn = fd.fileno() + except AttributeError: + return os.path.getsize(fd.name) - fd.tell() + st = os.fstat(fn) + size = st.st_size - fd.tell() + return size + +def fromfile(fd, dtype=None, shape=None, offset=0, formats=None, + names=None, titles=None, aligned=False, byteorder=None): + """Create an array from binary file data + + If file is a string or a path-like object then that file is opened, + else it is assumed to be a file object. The file object must + support random access (i.e. it must have tell and seek methods). + + >>> from tempfile import TemporaryFile + >>> a = np.empty(10,dtype='f8,i4,a5') + >>> a[5] = (0.5,10,'abcde') + >>> + >>> fd=TemporaryFile() + >>> a = a.newbyteorder('<') + >>> a.tofile(fd) + >>> + >>> fd.seek(0) + >>> r=np.core.records.fromfile(fd, formats='f8,i4,a5', shape=10, + ... byteorder='<') + >>> print(r[5]) + (0.5, 10, 'abcde') + >>> r.shape + (10,) + """ + + if dtype is None and formats is None: + raise TypeError("fromfile() needs a 'dtype' or 'formats' argument") + + if (shape is None or shape == 0): + shape = (-1,) + elif isinstance(shape, (int, long)): + shape = (shape,) + + if isfileobj(fd): + # file already opened + name = 0 + else: + # open file + fd = open(os_fspath(fd), 'rb') + name = 1 + + if (offset > 0): + fd.seek(offset, 1) + size = get_remaining_size(fd) + + if dtype is not None: + descr = sb.dtype(dtype) + else: + descr = format_parser(formats, names, titles, aligned, byteorder)._descr + + itemsize = descr.itemsize + + shapeprod = sb.array(shape).prod(dtype=nt.intp) + shapesize = shapeprod * itemsize + if shapesize < 0: + shape = list(shape) + shape[shape.index(-1)] = size // -shapesize + shape = tuple(shape) + shapeprod = sb.array(shape).prod(dtype=nt.intp) + + nbytes = shapeprod * itemsize + + if nbytes > size: + raise ValueError( + "Not enough bytes left in file for specified shape and type") + + # create the array + _array = recarray(shape, descr) + nbytesread = fd.readinto(_array.data) + if nbytesread != nbytes: + raise IOError("Didn't read as many bytes as expected") + if name: + fd.close() + + return _array + +def array(obj, dtype=None, shape=None, offset=0, strides=None, formats=None, + names=None, titles=None, aligned=False, byteorder=None, copy=True): + """Construct a record array from a wide-variety of objects. + """ + + if ((isinstance(obj, (type(None), str)) or isfileobj(obj)) and + (formats is None) and (dtype is None)): + raise ValueError("Must define formats (or dtype) if object is " + "None, string, or an open file") + + kwds = {} + if dtype is not None: + dtype = sb.dtype(dtype) + elif formats is not None: + dtype = format_parser(formats, names, titles, + aligned, byteorder)._descr + else: + kwds = {'formats': formats, + 'names': names, + 'titles': titles, + 'aligned': aligned, + 'byteorder': byteorder + } + + if obj is None: + if shape is None: + raise ValueError("Must define a shape if obj is None") + return recarray(shape, dtype, buf=obj, offset=offset, strides=strides) + + elif isinstance(obj, bytes): + return fromstring(obj, dtype, shape=shape, offset=offset, **kwds) + + elif isinstance(obj, (list, tuple)): + if isinstance(obj[0], (tuple, list)): + return fromrecords(obj, dtype=dtype, shape=shape, **kwds) + else: + return fromarrays(obj, dtype=dtype, shape=shape, **kwds) + + elif isinstance(obj, recarray): + if dtype is not None and (obj.dtype != dtype): + new = obj.view(dtype) + else: + new = obj + if copy: + new = new.copy() + return new + + elif isfileobj(obj): + return fromfile(obj, dtype=dtype, shape=shape, offset=offset) + + elif isinstance(obj, ndarray): + if dtype is not None and (obj.dtype != dtype): + new = obj.view(dtype) + else: + new = obj + if copy: + new = new.copy() + return new.view(recarray) + + else: + interface = getattr(obj, "__array_interface__", None) + if interface is None or not isinstance(interface, dict): + raise ValueError("Unknown input type") + obj = sb.array(obj) + if dtype is not None and (obj.dtype != dtype): + obj = obj.view(dtype) + return obj.view(recarray) diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/records.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/records.pyc new file mode 100644 index 0000000..e08c31c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/records.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/setup.py b/project/venv/lib/python2.7/site-packages/numpy/core/setup.py new file mode 100644 index 0000000..9ccca62 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/setup.py @@ -0,0 +1,974 @@ +from __future__ import division, print_function + +import os +import sys +import pickle +import copy +import warnings +import platform +from os.path import join +from numpy.distutils import log +from distutils.dep_util import newer +from distutils.sysconfig import get_config_var +from numpy._build_utils.apple_accelerate import ( + uses_accelerate_framework, get_sgemv_fix + ) +from numpy.compat import npy_load_module +from setup_common import * + +# Set to True to enable relaxed strides checking. This (mostly) means +# that `strides[dim]` is ignored if `shape[dim] == 1` when setting flags. +NPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "1") != "0") + +# Put NPY_RELAXED_STRIDES_DEBUG=1 in the environment if you want numpy to use a +# bogus value for affected strides in order to help smoke out bad stride usage +# when relaxed stride checking is enabled. +NPY_RELAXED_STRIDES_DEBUG = (os.environ.get('NPY_RELAXED_STRIDES_DEBUG', "0") != "0") +NPY_RELAXED_STRIDES_DEBUG = NPY_RELAXED_STRIDES_DEBUG and NPY_RELAXED_STRIDES_CHECKING + +# XXX: ugly, we use a class to avoid calling twice some expensive functions in +# config.h/numpyconfig.h. I don't see a better way because distutils force +# config.h generation inside an Extension class, and as such sharing +# configuration information between extensions is not easy. +# Using a pickled-based memoize does not work because config_cmd is an instance +# method, which cPickle does not like. +# +# Use pickle in all cases, as cPickle is gone in python3 and the difference +# in time is only in build. -- Charles Harris, 2013-03-30 + +class CallOnceOnly(object): + def __init__(self): + self._check_types = None + self._check_ieee_macros = None + self._check_complex = None + + def check_types(self, *a, **kw): + if self._check_types is None: + out = check_types(*a, **kw) + self._check_types = pickle.dumps(out) + else: + out = copy.deepcopy(pickle.loads(self._check_types)) + return out + + def check_ieee_macros(self, *a, **kw): + if self._check_ieee_macros is None: + out = check_ieee_macros(*a, **kw) + self._check_ieee_macros = pickle.dumps(out) + else: + out = copy.deepcopy(pickle.loads(self._check_ieee_macros)) + return out + + def check_complex(self, *a, **kw): + if self._check_complex is None: + out = check_complex(*a, **kw) + self._check_complex = pickle.dumps(out) + else: + out = copy.deepcopy(pickle.loads(self._check_complex)) + return out + +def pythonlib_dir(): + """return path where libpython* is.""" + if sys.platform == 'win32': + return os.path.join(sys.prefix, "libs") + else: + return get_config_var('LIBDIR') + +def is_npy_no_signal(): + """Return True if the NPY_NO_SIGNAL symbol must be defined in configuration + header.""" + return sys.platform == 'win32' + +def is_npy_no_smp(): + """Return True if the NPY_NO_SMP symbol must be defined in public + header (when SMP support cannot be reliably enabled).""" + # Perhaps a fancier check is in order here. + # so that threads are only enabled if there + # are actually multiple CPUS? -- but + # threaded code can be nice even on a single + # CPU so that long-calculating code doesn't + # block. + return 'NPY_NOSMP' in os.environ + +def win32_checks(deflist): + from numpy.distutils.misc_util import get_build_architecture + a = get_build_architecture() + + # Distutils hack on AMD64 on windows + print('BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' % + (a, os.name, sys.platform)) + if a == 'AMD64': + deflist.append('DISTUTILS_USE_SDK') + + # On win32, force long double format string to be 'g', not + # 'Lg', since the MS runtime does not support long double whose + # size is > sizeof(double) + if a == "Intel" or a == "AMD64": + deflist.append('FORCE_NO_LONG_DOUBLE_FORMATTING') + +def check_math_capabilities(config, moredefs, mathlibs): + def check_func(func_name): + return config.check_func(func_name, libraries=mathlibs, + decl=True, call=True) + + def check_funcs_once(funcs_name): + decl = dict([(f, True) for f in funcs_name]) + st = config.check_funcs_once(funcs_name, libraries=mathlibs, + decl=decl, call=decl) + if st: + moredefs.extend([(fname2def(f), 1) for f in funcs_name]) + return st + + def check_funcs(funcs_name): + # Use check_funcs_once first, and if it does not work, test func per + # func. Return success only if all the functions are available + if not check_funcs_once(funcs_name): + # Global check failed, check func per func + for f in funcs_name: + if check_func(f): + moredefs.append((fname2def(f), 1)) + return 0 + else: + return 1 + + #use_msvc = config.check_decl("_MSC_VER") + + if not check_funcs_once(MANDATORY_FUNCS): + raise SystemError("One of the required function to build numpy is not" + " available (the list is %s)." % str(MANDATORY_FUNCS)) + + # Standard functions which may not be available and for which we have a + # replacement implementation. Note that some of these are C99 functions. + + # XXX: hack to circumvent cpp pollution from python: python put its + # config.h in the public namespace, so we have a clash for the common + # functions we test. We remove every function tested by python's + # autoconf, hoping their own test are correct + for f in OPTIONAL_STDFUNCS_MAYBE: + if config.check_decl(fname2def(f), + headers=["Python.h", "math.h"]): + OPTIONAL_STDFUNCS.remove(f) + + check_funcs(OPTIONAL_STDFUNCS) + + for h in OPTIONAL_HEADERS: + if config.check_func("", decl=False, call=False, headers=[h]): + h = h.replace(".", "_").replace(os.path.sep, "_") + moredefs.append((fname2def(h), 1)) + + for tup in OPTIONAL_INTRINSICS: + headers = None + if len(tup) == 2: + f, args, m = tup[0], tup[1], fname2def(tup[0]) + elif len(tup) == 3: + f, args, headers, m = tup[0], tup[1], [tup[2]], fname2def(tup[0]) + else: + f, args, headers, m = tup[0], tup[1], [tup[2]], fname2def(tup[3]) + if config.check_func(f, decl=False, call=True, call_args=args, + headers=headers): + moredefs.append((m, 1)) + + for dec, fn in OPTIONAL_FUNCTION_ATTRIBUTES: + if config.check_gcc_function_attribute(dec, fn): + moredefs.append((fname2def(fn), 1)) + + for fn in OPTIONAL_VARIABLE_ATTRIBUTES: + if config.check_gcc_variable_attribute(fn): + m = fn.replace("(", "_").replace(")", "_") + moredefs.append((fname2def(m), 1)) + + # C99 functions: float and long double versions + check_funcs(C99_FUNCS_SINGLE) + check_funcs(C99_FUNCS_EXTENDED) + +def check_complex(config, mathlibs): + priv = [] + pub = [] + + try: + if os.uname()[0] == "Interix": + warnings.warn("Disabling broken complex support. See #1365", stacklevel=2) + return priv, pub + except Exception: + # os.uname not available on all platforms. blanket except ugly but safe + pass + + # Check for complex support + st = config.check_header('complex.h') + if st: + priv.append(('HAVE_COMPLEX_H', 1)) + pub.append(('NPY_USE_C99_COMPLEX', 1)) + + for t in C99_COMPLEX_TYPES: + st = config.check_type(t, headers=["complex.h"]) + if st: + pub.append(('NPY_HAVE_%s' % type2def(t), 1)) + + def check_prec(prec): + flist = [f + prec for f in C99_COMPLEX_FUNCS] + decl = dict([(f, True) for f in flist]) + if not config.check_funcs_once(flist, call=decl, decl=decl, + libraries=mathlibs): + for f in flist: + if config.check_func(f, call=True, decl=True, + libraries=mathlibs): + priv.append((fname2def(f), 1)) + else: + priv.extend([(fname2def(f), 1) for f in flist]) + + check_prec('') + check_prec('f') + check_prec('l') + + return priv, pub + +def check_ieee_macros(config): + priv = [] + pub = [] + + macros = [] + + def _add_decl(f): + priv.append(fname2def("decl_%s" % f)) + pub.append('NPY_%s' % fname2def("decl_%s" % f)) + + # XXX: hack to circumvent cpp pollution from python: python put its + # config.h in the public namespace, so we have a clash for the common + # functions we test. We remove every function tested by python's + # autoconf, hoping their own test are correct + _macros = ["isnan", "isinf", "signbit", "isfinite"] + for f in _macros: + py_symbol = fname2def("decl_%s" % f) + already_declared = config.check_decl(py_symbol, + headers=["Python.h", "math.h"]) + if already_declared: + if config.check_macro_true(py_symbol, + headers=["Python.h", "math.h"]): + pub.append('NPY_%s' % fname2def("decl_%s" % f)) + else: + macros.append(f) + # Normally, isnan and isinf are macro (C99), but some platforms only have + # func, or both func and macro version. Check for macro only, and define + # replacement ones if not found. + # Note: including Python.h is necessary because it modifies some math.h + # definitions + for f in macros: + st = config.check_decl(f, headers=["Python.h", "math.h"]) + if st: + _add_decl(f) + + return priv, pub + +def check_types(config_cmd, ext, build_dir): + private_defines = [] + public_defines = [] + + # Expected size (in number of bytes) for each type. This is an + # optimization: those are only hints, and an exhaustive search for the size + # is done if the hints are wrong. + expected = {'short': [2], 'int': [4], 'long': [8, 4], + 'float': [4], 'double': [8], 'long double': [16, 12, 8], + 'Py_intptr_t': [8, 4], 'PY_LONG_LONG': [8], 'long long': [8], + 'off_t': [8, 4]} + + # Check we have the python header (-dev* packages on Linux) + result = config_cmd.check_header('Python.h') + if not result: + python = 'python' + if '__pypy__' in sys.builtin_module_names: + python = 'pypy' + raise SystemError( + "Cannot compile 'Python.h'. Perhaps you need to " + "install {0}-dev|{0}-devel.".format(python)) + res = config_cmd.check_header("endian.h") + if res: + private_defines.append(('HAVE_ENDIAN_H', 1)) + public_defines.append(('NPY_HAVE_ENDIAN_H', 1)) + res = config_cmd.check_header("sys/endian.h") + if res: + private_defines.append(('HAVE_SYS_ENDIAN_H', 1)) + public_defines.append(('NPY_HAVE_SYS_ENDIAN_H', 1)) + + # Check basic types sizes + for type in ('short', 'int', 'long'): + res = config_cmd.check_decl("SIZEOF_%s" % sym2def(type), headers=["Python.h"]) + if res: + public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), "SIZEOF_%s" % sym2def(type))) + else: + res = config_cmd.check_type_size(type, expected=expected[type]) + if res >= 0: + public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res)) + else: + raise SystemError("Checking sizeof (%s) failed !" % type) + + for type in ('float', 'double', 'long double'): + already_declared = config_cmd.check_decl("SIZEOF_%s" % sym2def(type), + headers=["Python.h"]) + res = config_cmd.check_type_size(type, expected=expected[type]) + if res >= 0: + public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res)) + if not already_declared and not type == 'long double': + private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res)) + else: + raise SystemError("Checking sizeof (%s) failed !" % type) + + # Compute size of corresponding complex type: used to check that our + # definition is binary compatible with C99 complex type (check done at + # build time in npy_common.h) + complex_def = "struct {%s __x; %s __y;}" % (type, type) + res = config_cmd.check_type_size(complex_def, + expected=[2 * x for x in expected[type]]) + if res >= 0: + public_defines.append(('NPY_SIZEOF_COMPLEX_%s' % sym2def(type), '%d' % res)) + else: + raise SystemError("Checking sizeof (%s) failed !" % complex_def) + + for type in ('Py_intptr_t', 'off_t'): + res = config_cmd.check_type_size(type, headers=["Python.h"], + library_dirs=[pythonlib_dir()], + expected=expected[type]) + + if res >= 0: + private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res)) + public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res)) + else: + raise SystemError("Checking sizeof (%s) failed !" % type) + + # We check declaration AND type because that's how distutils does it. + if config_cmd.check_decl('PY_LONG_LONG', headers=['Python.h']): + res = config_cmd.check_type_size('PY_LONG_LONG', headers=['Python.h'], + library_dirs=[pythonlib_dir()], + expected=expected['PY_LONG_LONG']) + if res >= 0: + private_defines.append(('SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res)) + public_defines.append(('NPY_SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res)) + else: + raise SystemError("Checking sizeof (%s) failed !" % 'PY_LONG_LONG') + + res = config_cmd.check_type_size('long long', + expected=expected['long long']) + if res >= 0: + #private_defines.append(('SIZEOF_%s' % sym2def('long long'), '%d' % res)) + public_defines.append(('NPY_SIZEOF_%s' % sym2def('long long'), '%d' % res)) + else: + raise SystemError("Checking sizeof (%s) failed !" % 'long long') + + if not config_cmd.check_decl('CHAR_BIT', headers=['Python.h']): + raise RuntimeError( + "Config wo CHAR_BIT is not supported" + ", please contact the maintainers") + + return private_defines, public_defines + +def check_mathlib(config_cmd): + # Testing the C math library + mathlibs = [] + mathlibs_choices = [[], ['m'], ['cpml']] + mathlib = os.environ.get('MATHLIB') + if mathlib: + mathlibs_choices.insert(0, mathlib.split(',')) + for libs in mathlibs_choices: + if config_cmd.check_func("exp", libraries=libs, decl=True, call=True): + mathlibs = libs + break + else: + raise EnvironmentError("math library missing; rerun " + "setup.py after setting the " + "MATHLIB env variable") + return mathlibs + +def visibility_define(config): + """Return the define value to use for NPY_VISIBILITY_HIDDEN (may be empty + string).""" + hide = '__attribute__((visibility("hidden")))' + if config.check_gcc_function_attribute(hide, 'hideme'): + return hide + else: + return '' + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration, dot_join + from numpy.distutils.system_info import get_info + + config = Configuration('core', parent_package, top_path) + local_dir = config.local_path + codegen_dir = join(local_dir, 'code_generators') + + if is_released(config): + warnings.simplefilter('error', MismatchCAPIWarning) + + # Check whether we have a mismatch between the set C API VERSION and the + # actual C API VERSION + check_api_version(C_API_VERSION, codegen_dir) + + generate_umath_py = join(codegen_dir, 'generate_umath.py') + n = dot_join(config.name, 'generate_umath') + generate_umath = npy_load_module('_'.join(n.split('.')), + generate_umath_py, ('.py', 'U', 1)) + + header_dir = 'include/numpy' # this is relative to config.path_in_package + + cocache = CallOnceOnly() + + def generate_config_h(ext, build_dir): + target = join(build_dir, header_dir, 'config.h') + d = os.path.dirname(target) + if not os.path.exists(d): + os.makedirs(d) + + if newer(__file__, target): + config_cmd = config.get_config_cmd() + log.info('Generating %s', target) + + # Check sizeof + moredefs, ignored = cocache.check_types(config_cmd, ext, build_dir) + + # Check math library and C99 math funcs availability + mathlibs = check_mathlib(config_cmd) + moredefs.append(('MATHLIB', ','.join(mathlibs))) + + check_math_capabilities(config_cmd, moredefs, mathlibs) + moredefs.extend(cocache.check_ieee_macros(config_cmd)[0]) + moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[0]) + + # Signal check + if is_npy_no_signal(): + moredefs.append('__NPY_PRIVATE_NO_SIGNAL') + + # Windows checks + if sys.platform == 'win32' or os.name == 'nt': + win32_checks(moredefs) + + # C99 restrict keyword + moredefs.append(('NPY_RESTRICT', config_cmd.check_restrict())) + + # Inline check + inline = config_cmd.check_inline() + + # Use relaxed stride checking + if NPY_RELAXED_STRIDES_CHECKING: + moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1)) + + # Use bogus stride debug aid when relaxed strides are enabled + if NPY_RELAXED_STRIDES_DEBUG: + moredefs.append(('NPY_RELAXED_STRIDES_DEBUG', 1)) + + # Get long double representation + rep = check_long_double_representation(config_cmd) + moredefs.append(('HAVE_LDOUBLE_%s' % rep, 1)) + + # Py3K check + if sys.version_info[0] == 3: + moredefs.append(('NPY_PY3K', 1)) + + # Generate the config.h file from moredefs + target_f = open(target, 'w') + for d in moredefs: + if isinstance(d, str): + target_f.write('#define %s\n' % (d)) + else: + target_f.write('#define %s %s\n' % (d[0], d[1])) + + # define inline to our keyword, or nothing + target_f.write('#ifndef __cplusplus\n') + if inline == 'inline': + target_f.write('/* #undef inline */\n') + else: + target_f.write('#define inline %s\n' % inline) + target_f.write('#endif\n') + + # add the guard to make sure config.h is never included directly, + # but always through npy_config.h + target_f.write(""" +#ifndef _NPY_NPY_CONFIG_H_ +#error config.h should never be included directly, include npy_config.h instead +#endif +""") + + target_f.close() + print('File:', target) + target_f = open(target) + print(target_f.read()) + target_f.close() + print('EOF') + else: + mathlibs = [] + target_f = open(target) + for line in target_f: + s = '#define MATHLIB' + if line.startswith(s): + value = line[len(s):].strip() + if value: + mathlibs.extend(value.split(',')) + target_f.close() + + # Ugly: this can be called within a library and not an extension, + # in which case there is no libraries attributes (and none is + # needed). + if hasattr(ext, 'libraries'): + ext.libraries.extend(mathlibs) + + incl_dir = os.path.dirname(target) + if incl_dir not in config.numpy_include_dirs: + config.numpy_include_dirs.append(incl_dir) + + return target + + def generate_numpyconfig_h(ext, build_dir): + """Depends on config.h: generate_config_h has to be called before !""" + # put common include directory in build_dir on search path + # allows using code generation in headers headers + config.add_include_dirs(join(build_dir, "src", "common")) + config.add_include_dirs(join(build_dir, "src", "npymath")) + + target = join(build_dir, header_dir, '_numpyconfig.h') + d = os.path.dirname(target) + if not os.path.exists(d): + os.makedirs(d) + if newer(__file__, target): + config_cmd = config.get_config_cmd() + log.info('Generating %s', target) + + # Check sizeof + ignored, moredefs = cocache.check_types(config_cmd, ext, build_dir) + + if is_npy_no_signal(): + moredefs.append(('NPY_NO_SIGNAL', 1)) + + if is_npy_no_smp(): + moredefs.append(('NPY_NO_SMP', 1)) + else: + moredefs.append(('NPY_NO_SMP', 0)) + + mathlibs = check_mathlib(config_cmd) + moredefs.extend(cocache.check_ieee_macros(config_cmd)[1]) + moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[1]) + + if NPY_RELAXED_STRIDES_CHECKING: + moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1)) + + if NPY_RELAXED_STRIDES_DEBUG: + moredefs.append(('NPY_RELAXED_STRIDES_DEBUG', 1)) + + # Check whether we can use inttypes (C99) formats + if config_cmd.check_decl('PRIdPTR', headers=['inttypes.h']): + moredefs.append(('NPY_USE_C99_FORMATS', 1)) + + # visibility check + hidden_visibility = visibility_define(config_cmd) + moredefs.append(('NPY_VISIBILITY_HIDDEN', hidden_visibility)) + + # Add the C API/ABI versions + moredefs.append(('NPY_ABI_VERSION', '0x%.8X' % C_ABI_VERSION)) + moredefs.append(('NPY_API_VERSION', '0x%.8X' % C_API_VERSION)) + + # Add moredefs to header + target_f = open(target, 'w') + for d in moredefs: + if isinstance(d, str): + target_f.write('#define %s\n' % (d)) + else: + target_f.write('#define %s %s\n' % (d[0], d[1])) + + # Define __STDC_FORMAT_MACROS + target_f.write(""" +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS 1 +#endif +""") + target_f.close() + + # Dump the numpyconfig.h header to stdout + print('File: %s' % target) + target_f = open(target) + print(target_f.read()) + target_f.close() + print('EOF') + config.add_data_files((header_dir, target)) + return target + + def generate_api_func(module_name): + def generate_api(ext, build_dir): + script = join(codegen_dir, module_name + '.py') + sys.path.insert(0, codegen_dir) + try: + m = __import__(module_name) + log.info('executing %s', script) + h_file, c_file, doc_file = m.generate_api(os.path.join(build_dir, header_dir)) + finally: + del sys.path[0] + config.add_data_files((header_dir, h_file), + (header_dir, doc_file)) + return (h_file,) + return generate_api + + generate_numpy_api = generate_api_func('generate_numpy_api') + generate_ufunc_api = generate_api_func('generate_ufunc_api') + + config.add_include_dirs(join(local_dir, "src", "common")) + config.add_include_dirs(join(local_dir, "src")) + config.add_include_dirs(join(local_dir)) + + config.add_data_files('include/numpy/*.h') + config.add_include_dirs(join('src', 'npymath')) + config.add_include_dirs(join('src', 'multiarray')) + config.add_include_dirs(join('src', 'umath')) + config.add_include_dirs(join('src', 'npysort')) + + config.add_define_macros([("NPY_INTERNAL_BUILD", "1")]) # this macro indicates that Numpy build is in process + config.add_define_macros([("HAVE_NPY_CONFIG_H", "1")]) + if sys.platform[:3] == "aix": + config.add_define_macros([("_LARGE_FILES", None)]) + else: + config.add_define_macros([("_FILE_OFFSET_BITS", "64")]) + config.add_define_macros([('_LARGEFILE_SOURCE', '1')]) + config.add_define_macros([('_LARGEFILE64_SOURCE', '1')]) + + config.numpy_include_dirs.extend(config.paths('include')) + + deps = [join('src', 'npymath', '_signbit.c'), + join('include', 'numpy', '*object.h'), + join(codegen_dir, 'genapi.py'), + ] + + ####################################################################### + # dummy module # + ####################################################################### + + # npymath needs the config.h and numpyconfig.h files to be generated, but + # build_clib cannot handle generate_config_h and generate_numpyconfig_h + # (don't ask). Because clib are generated before extensions, we have to + # explicitly add an extension which has generate_config_h and + # generate_numpyconfig_h as sources *before* adding npymath. + + config.add_extension('_dummy', + sources=[join('src', 'dummymodule.c'), + generate_config_h, + generate_numpyconfig_h, + generate_numpy_api] + ) + + ####################################################################### + # npymath library # + ####################################################################### + + subst_dict = dict([("sep", os.path.sep), ("pkgname", "numpy.core")]) + + def get_mathlib_info(*args): + # Another ugly hack: the mathlib info is known once build_src is run, + # but we cannot use add_installed_pkg_config here either, so we only + # update the substitution dictionary during npymath build + config_cmd = config.get_config_cmd() + + # Check that the toolchain works, to fail early if it doesn't + # (avoid late errors with MATHLIB which are confusing if the + # compiler does not work). + st = config_cmd.try_link('int main(void) { return 0;}') + if not st: + raise RuntimeError("Broken toolchain: cannot link a simple C program") + mlibs = check_mathlib(config_cmd) + + posix_mlib = ' '.join(['-l%s' % l for l in mlibs]) + msvc_mlib = ' '.join(['%s.lib' % l for l in mlibs]) + subst_dict["posix_mathlib"] = posix_mlib + subst_dict["msvc_mathlib"] = msvc_mlib + + npymath_sources = [join('src', 'npymath', 'npy_math_internal.h.src'), + join('src', 'npymath', 'npy_math.c'), + join('src', 'npymath', 'ieee754.c.src'), + join('src', 'npymath', 'npy_math_complex.c.src'), + join('src', 'npymath', 'halffloat.c') + ] + + # Must be true for CRT compilers but not MinGW/cygwin. See gh-9977. + is_msvc = platform.system() == 'Windows' + config.add_installed_library('npymath', + sources=npymath_sources + [get_mathlib_info], + install_dir='lib', + build_info={ + 'include_dirs' : [], # empty list required for creating npy_math_internal.h + 'extra_compiler_args' : (['/GL-'] if is_msvc else []), + }) + config.add_npy_pkg_config("npymath.ini.in", "lib/npy-pkg-config", + subst_dict) + config.add_npy_pkg_config("mlib.ini.in", "lib/npy-pkg-config", + subst_dict) + + ####################################################################### + # npysort library # + ####################################################################### + + # This library is created for the build but it is not installed + npysort_sources = [join('src', 'common', 'npy_sort.h.src'), + join('src', 'npysort', 'quicksort.c.src'), + join('src', 'npysort', 'mergesort.c.src'), + join('src', 'npysort', 'heapsort.c.src'), + join('src', 'common', 'npy_partition.h.src'), + join('src', 'npysort', 'selection.c.src'), + join('src', 'common', 'npy_binsearch.h.src'), + join('src', 'npysort', 'binsearch.c.src'), + ] + config.add_library('npysort', + sources=npysort_sources, + include_dirs=[]) + + ####################################################################### + # multiarray_tests module # + ####################################################################### + + config.add_extension('_multiarray_tests', + sources=[join('src', 'multiarray', '_multiarray_tests.c.src'), + join('src', 'common', 'mem_overlap.c')], + depends=[join('src', 'common', 'mem_overlap.h'), + join('src', 'common', 'npy_extint128.h')], + libraries=['npymath']) + + ####################################################################### + # _multiarray_umath module - common part # + ####################################################################### + + common_deps = [ + join('src', 'common', 'array_assign.h'), + join('src', 'common', 'binop_override.h'), + join('src', 'common', 'cblasfuncs.h'), + join('src', 'common', 'lowlevel_strided_loops.h'), + join('src', 'common', 'mem_overlap.h'), + join('src', 'common', 'npy_cblas.h'), + join('src', 'common', 'npy_config.h'), + join('src', 'common', 'npy_ctypes.h'), + join('src', 'common', 'npy_extint128.h'), + join('src', 'common', 'npy_import.h'), + join('src', 'common', 'npy_longdouble.h'), + join('src', 'common', 'templ_common.h.src'), + join('src', 'common', 'ucsnarrow.h'), + join('src', 'common', 'ufunc_override.h'), + join('src', 'common', 'umathmodule.h'), + join('src', 'common', 'numpyos.h'), + ] + + common_src = [ + join('src', 'common', 'array_assign.c'), + join('src', 'common', 'mem_overlap.c'), + join('src', 'common', 'npy_longdouble.c'), + join('src', 'common', 'templ_common.h.src'), + join('src', 'common', 'ucsnarrow.c'), + join('src', 'common', 'ufunc_override.c'), + join('src', 'common', 'numpyos.c'), + ] + + blas_info = get_info('blas_opt', 0) + if blas_info and ('HAVE_CBLAS', None) in blas_info.get('define_macros', []): + extra_info = blas_info + # These files are also in MANIFEST.in so that they are always in + # the source distribution independently of HAVE_CBLAS. + common_src.extend([join('src', 'common', 'cblasfuncs.c'), + join('src', 'common', 'python_xerbla.c'), + ]) + if uses_accelerate_framework(blas_info): + common_src.extend(get_sgemv_fix()) + else: + extra_info = {} + + ####################################################################### + # _multiarray_umath module - multiarray part # + ####################################################################### + + multiarray_deps = [ + join('src', 'multiarray', 'arrayobject.h'), + join('src', 'multiarray', 'arraytypes.h'), + join('src', 'multiarray', 'arrayfunction_override.h'), + join('src', 'multiarray', 'buffer.h'), + join('src', 'multiarray', 'calculation.h'), + join('src', 'multiarray', 'common.h'), + join('src', 'multiarray', 'convert_datatype.h'), + join('src', 'multiarray', 'convert.h'), + join('src', 'multiarray', 'conversion_utils.h'), + join('src', 'multiarray', 'ctors.h'), + join('src', 'multiarray', 'descriptor.h'), + join('src', 'multiarray', 'dragon4.h'), + join('src', 'multiarray', 'getset.h'), + join('src', 'multiarray', 'hashdescr.h'), + join('src', 'multiarray', 'iterators.h'), + join('src', 'multiarray', 'mapping.h'), + join('src', 'multiarray', 'methods.h'), + join('src', 'multiarray', 'multiarraymodule.h'), + join('src', 'multiarray', 'nditer_impl.h'), + join('src', 'multiarray', 'number.h'), + join('src', 'multiarray', 'refcount.h'), + join('src', 'multiarray', 'scalartypes.h'), + join('src', 'multiarray', 'sequence.h'), + join('src', 'multiarray', 'shape.h'), + join('src', 'multiarray', 'strfuncs.h'), + join('src', 'multiarray', 'typeinfo.h'), + join('src', 'multiarray', 'usertypes.h'), + join('src', 'multiarray', 'vdot.h'), + join('include', 'numpy', 'arrayobject.h'), + join('include', 'numpy', '_neighborhood_iterator_imp.h'), + join('include', 'numpy', 'npy_endian.h'), + join('include', 'numpy', 'arrayscalars.h'), + join('include', 'numpy', 'noprefix.h'), + join('include', 'numpy', 'npy_interrupt.h'), + join('include', 'numpy', 'npy_3kcompat.h'), + join('include', 'numpy', 'npy_math.h'), + join('include', 'numpy', 'halffloat.h'), + join('include', 'numpy', 'npy_common.h'), + join('include', 'numpy', 'npy_os.h'), + join('include', 'numpy', 'utils.h'), + join('include', 'numpy', 'ndarrayobject.h'), + join('include', 'numpy', 'npy_cpu.h'), + join('include', 'numpy', 'numpyconfig.h'), + join('include', 'numpy', 'ndarraytypes.h'), + join('include', 'numpy', 'npy_1_7_deprecated_api.h'), + # add library sources as distuils does not consider libraries + # dependencies + ] + npysort_sources + npymath_sources + + multiarray_src = [ + join('src', 'multiarray', 'alloc.c'), + join('src', 'multiarray', 'arrayobject.c'), + join('src', 'multiarray', 'arraytypes.c.src'), + join('src', 'multiarray', 'array_assign_scalar.c'), + join('src', 'multiarray', 'array_assign_array.c'), + join('src', 'multiarray', 'arrayfunction_override.c'), + join('src', 'multiarray', 'buffer.c'), + join('src', 'multiarray', 'calculation.c'), + join('src', 'multiarray', 'compiled_base.c'), + join('src', 'multiarray', 'common.c'), + join('src', 'multiarray', 'convert.c'), + join('src', 'multiarray', 'convert_datatype.c'), + join('src', 'multiarray', 'conversion_utils.c'), + join('src', 'multiarray', 'ctors.c'), + join('src', 'multiarray', 'datetime.c'), + join('src', 'multiarray', 'datetime_strings.c'), + join('src', 'multiarray', 'datetime_busday.c'), + join('src', 'multiarray', 'datetime_busdaycal.c'), + join('src', 'multiarray', 'descriptor.c'), + join('src', 'multiarray', 'dragon4.c'), + join('src', 'multiarray', 'dtype_transfer.c'), + join('src', 'multiarray', 'einsum.c.src'), + join('src', 'multiarray', 'flagsobject.c'), + join('src', 'multiarray', 'getset.c'), + join('src', 'multiarray', 'hashdescr.c'), + join('src', 'multiarray', 'item_selection.c'), + join('src', 'multiarray', 'iterators.c'), + join('src', 'multiarray', 'lowlevel_strided_loops.c.src'), + join('src', 'multiarray', 'mapping.c'), + join('src', 'multiarray', 'methods.c'), + join('src', 'multiarray', 'multiarraymodule.c'), + join('src', 'multiarray', 'nditer_templ.c.src'), + join('src', 'multiarray', 'nditer_api.c'), + join('src', 'multiarray', 'nditer_constr.c'), + join('src', 'multiarray', 'nditer_pywrap.c'), + join('src', 'multiarray', 'number.c'), + join('src', 'multiarray', 'refcount.c'), + join('src', 'multiarray', 'sequence.c'), + join('src', 'multiarray', 'shape.c'), + join('src', 'multiarray', 'scalarapi.c'), + join('src', 'multiarray', 'scalartypes.c.src'), + join('src', 'multiarray', 'strfuncs.c'), + join('src', 'multiarray', 'temp_elide.c'), + join('src', 'multiarray', 'typeinfo.c'), + join('src', 'multiarray', 'usertypes.c'), + join('src', 'multiarray', 'vdot.c'), + ] + + ####################################################################### + # _multiarray_umath module - umath part # + ####################################################################### + + def generate_umath_c(ext, build_dir): + target = join(build_dir, header_dir, '__umath_generated.c') + dir = os.path.dirname(target) + if not os.path.exists(dir): + os.makedirs(dir) + script = generate_umath_py + if newer(script, target): + f = open(target, 'w') + f.write(generate_umath.make_code(generate_umath.defdict, + generate_umath.__file__)) + f.close() + return [] + + umath_src = [ + join('src', 'umath', 'umathmodule.c'), + join('src', 'umath', 'reduction.c'), + join('src', 'umath', 'funcs.inc.src'), + join('src', 'umath', 'simd.inc.src'), + join('src', 'umath', 'loops.h.src'), + join('src', 'umath', 'loops.c.src'), + join('src', 'umath', 'matmul.h.src'), + join('src', 'umath', 'matmul.c.src'), + join('src', 'umath', 'ufunc_object.c'), + join('src', 'umath', 'extobj.c'), + join('src', 'umath', 'cpuid.c'), + join('src', 'umath', 'scalarmath.c.src'), + join('src', 'umath', 'ufunc_type_resolution.c'), + join('src', 'umath', 'override.c'), + ] + + umath_deps = [ + generate_umath_py, + join('include', 'numpy', 'npy_math.h'), + join('include', 'numpy', 'halffloat.h'), + join('src', 'multiarray', 'common.h'), + join('src', 'multiarray', 'number.h'), + join('src', 'common', 'templ_common.h.src'), + join('src', 'umath', 'simd.inc.src'), + join('src', 'umath', 'override.h'), + join(codegen_dir, 'generate_ufunc_api.py'), + ] + + config.add_extension('_multiarray_umath', + sources=multiarray_src + umath_src + + npymath_sources + common_src + + [generate_config_h, + generate_numpyconfig_h, + generate_numpy_api, + join(codegen_dir, 'generate_numpy_api.py'), + join('*.py'), + generate_umath_c, + generate_ufunc_api, + ], + depends=deps + multiarray_deps + umath_deps + + common_deps, + libraries=['npymath', 'npysort'], + extra_info=extra_info) + + ####################################################################### + # umath_tests module # + ####################################################################### + + config.add_extension('_umath_tests', + sources=[join('src', 'umath', '_umath_tests.c.src')]) + + ####################################################################### + # custom rational dtype module # + ####################################################################### + + config.add_extension('_rational_tests', + sources=[join('src', 'umath', '_rational_tests.c.src')]) + + ####################################################################### + # struct_ufunc_test module # + ####################################################################### + + config.add_extension('_struct_ufunc_tests', + sources=[join('src', 'umath', '_struct_ufunc_tests.c.src')]) + + + ####################################################################### + # operand_flag_tests module # + ####################################################################### + + config.add_extension('_operand_flag_tests', + sources=[join('src', 'umath', '_operand_flag_tests.c.src')]) + + config.add_data_dir('tests') + config.add_data_dir('tests/data') + + config.make_svn_version_py() + + return config + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(configuration=configuration) diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/setup.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/setup.pyc new file mode 100644 index 0000000..2466efd Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/setup_common.py b/project/venv/lib/python2.7/site-packages/numpy/core/setup_common.py new file mode 100644 index 0000000..f837df1 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/setup_common.py @@ -0,0 +1,401 @@ +from __future__ import division, absolute_import, print_function + +# Code common to build tools +import sys +import warnings +import copy +import binascii + +from numpy.distutils.misc_util import mingw32 + + +#------------------- +# Versioning support +#------------------- +# How to change C_API_VERSION ? +# - increase C_API_VERSION value +# - record the hash for the new C API with the script cversions.py +# and add the hash to cversions.txt +# The hash values are used to remind developers when the C API number was not +# updated - generates a MismatchCAPIWarning warning which is turned into an +# exception for released version. + +# Binary compatibility version number. This number is increased whenever the +# C-API is changed such that binary compatibility is broken, i.e. whenever a +# recompile of extension modules is needed. +C_ABI_VERSION = 0x01000009 + +# Minor API version. This number is increased whenever a change is made to the +# C-API -- whether it breaks binary compatibility or not. Some changes, such +# as adding a function pointer to the end of the function table, can be made +# without breaking binary compatibility. In this case, only the C_API_VERSION +# (*not* C_ABI_VERSION) would be increased. Whenever binary compatibility is +# broken, both C_API_VERSION and C_ABI_VERSION should be increased. +# +# 0x00000008 - 1.7.x +# 0x00000009 - 1.8.x +# 0x00000009 - 1.9.x +# 0x0000000a - 1.10.x +# 0x0000000a - 1.11.x +# 0x0000000a - 1.12.x +# 0x0000000b - 1.13.x +# 0x0000000c - 1.14.x +# 0x0000000c - 1.15.x +# 0x0000000d - 1.16.x +C_API_VERSION = 0x0000000d + +class MismatchCAPIWarning(Warning): + pass + +def is_released(config): + """Return True if a released version of numpy is detected.""" + from distutils.version import LooseVersion + + v = config.get_version('../version.py') + if v is None: + raise ValueError("Could not get version") + pv = LooseVersion(vstring=v).version + if len(pv) > 3: + return False + return True + +def get_api_versions(apiversion, codegen_dir): + """ + Return current C API checksum and the recorded checksum. + + Return current C API checksum and the recorded checksum for the given + version of the C API version. + + """ + # Compute the hash of the current API as defined in the .txt files in + # code_generators + sys.path.insert(0, codegen_dir) + try: + m = __import__('genapi') + numpy_api = __import__('numpy_api') + curapi_hash = m.fullapi_hash(numpy_api.full_api) + apis_hash = m.get_versions_hash() + finally: + del sys.path[0] + + return curapi_hash, apis_hash[apiversion] + +def check_api_version(apiversion, codegen_dir): + """Emits a MismacthCAPIWarning if the C API version needs updating.""" + curapi_hash, api_hash = get_api_versions(apiversion, codegen_dir) + + # If different hash, it means that the api .txt files in + # codegen_dir have been updated without the API version being + # updated. Any modification in those .txt files should be reflected + # in the api and eventually abi versions. + # To compute the checksum of the current API, use + # code_generators/cversions.py script + if not curapi_hash == api_hash: + msg = ("API mismatch detected, the C API version " + "numbers have to be updated. Current C api version is %d, " + "with checksum %s, but recorded checksum for C API version %d in " + "codegen_dir/cversions.txt is %s. If functions were added in the " + "C API, you have to update C_API_VERSION in %s." + ) + warnings.warn(msg % (apiversion, curapi_hash, apiversion, api_hash, + __file__), + MismatchCAPIWarning, stacklevel=2) +# Mandatory functions: if not found, fail the build +MANDATORY_FUNCS = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", + "floor", "ceil", "sqrt", "log10", "log", "exp", "asin", + "acos", "atan", "fmod", 'modf', 'frexp', 'ldexp'] + +# Standard functions which may not be available and for which we have a +# replacement implementation. Note that some of these are C99 functions. +OPTIONAL_STDFUNCS = ["expm1", "log1p", "acosh", "asinh", "atanh", + "rint", "trunc", "exp2", "log2", "hypot", "atan2", "pow", + "copysign", "nextafter", "ftello", "fseeko", + "strtoll", "strtoull", "cbrt", "strtold_l", "fallocate", + "backtrace", "madvise"] + + +OPTIONAL_HEADERS = [ +# sse headers only enabled automatically on amd64/x32 builds + "xmmintrin.h", # SSE + "emmintrin.h", # SSE2 + "features.h", # for glibc version linux + "xlocale.h", # see GH#8367 + "dlfcn.h", # dladdr + "sys/mman.h", #madvise +] + +# optional gcc compiler builtins and their call arguments and optional a +# required header and definition name (HAVE_ prepended) +# call arguments are required as the compiler will do strict signature checking +OPTIONAL_INTRINSICS = [("__builtin_isnan", '5.'), + ("__builtin_isinf", '5.'), + ("__builtin_isfinite", '5.'), + ("__builtin_bswap32", '5u'), + ("__builtin_bswap64", '5u'), + ("__builtin_expect", '5, 0'), + ("__builtin_mul_overflow", '5, 5, (int*)5'), + # broken on OSX 10.11, make sure its not optimized away + ("volatile int r = __builtin_cpu_supports", '"sse"', + "stdio.h", "__BUILTIN_CPU_SUPPORTS"), + # MMX only needed for icc, but some clangs don't have it + ("_m_from_int64", '0', "emmintrin.h"), + ("_mm_load_ps", '(float*)0', "xmmintrin.h"), # SSE + ("_mm_prefetch", '(float*)0, _MM_HINT_NTA', + "xmmintrin.h"), # SSE + ("_mm_load_pd", '(double*)0', "emmintrin.h"), # SSE2 + ("__builtin_prefetch", "(float*)0, 0, 3"), + # check that the linker can handle avx + ("__asm__ volatile", '"vpand %xmm1, %xmm2, %xmm3"', + "stdio.h", "LINK_AVX"), + ("__asm__ volatile", '"vpand %ymm1, %ymm2, %ymm3"', + "stdio.h", "LINK_AVX2"), + ("__asm__ volatile", '"xgetbv"', "stdio.h", "XGETBV"), + ] + +# function attributes +# tested via "int %s %s(void *);" % (attribute, name) +# function name will be converted to HAVE_ preprocessor macro +OPTIONAL_FUNCTION_ATTRIBUTES = [('__attribute__((optimize("unroll-loops")))', + 'attribute_optimize_unroll_loops'), + ('__attribute__((optimize("O3")))', + 'attribute_optimize_opt_3'), + ('__attribute__((nonnull (1)))', + 'attribute_nonnull'), + ('__attribute__((target ("avx")))', + 'attribute_target_avx'), + ('__attribute__((target ("avx2")))', + 'attribute_target_avx2'), + ] + +# variable attributes tested via "int %s a" % attribute +OPTIONAL_VARIABLE_ATTRIBUTES = ["__thread", "__declspec(thread)"] + +# Subset of OPTIONAL_STDFUNCS which may already have HAVE_* defined by Python.h +OPTIONAL_STDFUNCS_MAYBE = [ + "expm1", "log1p", "acosh", "atanh", "asinh", "hypot", "copysign", + "ftello", "fseeko" + ] + +# C99 functions: float and long double versions +C99_FUNCS = [ + "sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", "floor", "ceil", + "rint", "trunc", "sqrt", "log10", "log", "log1p", "exp", "expm1", + "asin", "acos", "atan", "asinh", "acosh", "atanh", "hypot", "atan2", + "pow", "fmod", "modf", 'frexp', 'ldexp', "exp2", "log2", "copysign", + "nextafter", "cbrt" + ] +C99_FUNCS_SINGLE = [f + 'f' for f in C99_FUNCS] +C99_FUNCS_EXTENDED = [f + 'l' for f in C99_FUNCS] +C99_COMPLEX_TYPES = [ + 'complex double', 'complex float', 'complex long double' + ] +C99_COMPLEX_FUNCS = [ + "cabs", "cacos", "cacosh", "carg", "casin", "casinh", "catan", + "catanh", "ccos", "ccosh", "cexp", "cimag", "clog", "conj", "cpow", + "cproj", "creal", "csin", "csinh", "csqrt", "ctan", "ctanh" + ] + +def fname2def(name): + return "HAVE_%s" % name.upper() + +def sym2def(symbol): + define = symbol.replace(' ', '') + return define.upper() + +def type2def(symbol): + define = symbol.replace(' ', '_') + return define.upper() + +# Code to detect long double representation taken from MPFR m4 macro +def check_long_double_representation(cmd): + cmd._check_compiler() + body = LONG_DOUBLE_REPRESENTATION_SRC % {'type': 'long double'} + + # Disable whole program optimization (the default on vs2015, with python 3.5+) + # which generates intermediary object files and prevents checking the + # float representation. + if sys.platform == "win32" and not mingw32(): + try: + cmd.compiler.compile_options.remove("/GL") + except (AttributeError, ValueError): + pass + + # Disable multi-file interprocedural optimization in the Intel compiler on Linux + # which generates intermediary object files and prevents checking the + # float representation. + elif (sys.platform != "win32" + and cmd.compiler.compiler_type.startswith('intel') + and '-ipo' in cmd.compiler.cc_exe): + newcompiler = cmd.compiler.cc_exe.replace(' -ipo', '') + cmd.compiler.set_executables( + compiler=newcompiler, + compiler_so=newcompiler, + compiler_cxx=newcompiler, + linker_exe=newcompiler, + linker_so=newcompiler + ' -shared' + ) + + # We need to use _compile because we need the object filename + src, obj = cmd._compile(body, None, None, 'c') + try: + ltype = long_double_representation(pyod(obj)) + return ltype + except ValueError: + # try linking to support CC="gcc -flto" or icc -ipo + # struct needs to be volatile so it isn't optimized away + body = body.replace('struct', 'volatile struct') + body += "int main(void) { return 0; }\n" + src, obj = cmd._compile(body, None, None, 'c') + cmd.temp_files.append("_configtest") + cmd.compiler.link_executable([obj], "_configtest") + ltype = long_double_representation(pyod("_configtest")) + return ltype + finally: + cmd._clean() + +LONG_DOUBLE_REPRESENTATION_SRC = r""" +/* "before" is 16 bytes to ensure there's no padding between it and "x". + * We're not expecting any "long double" bigger than 16 bytes or with + * alignment requirements stricter than 16 bytes. */ +typedef %(type)s test_type; + +struct { + char before[16]; + test_type x; + char after[8]; +} foo = { + { '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', + '\001', '\043', '\105', '\147', '\211', '\253', '\315', '\357' }, + -123456789.0, + { '\376', '\334', '\272', '\230', '\166', '\124', '\062', '\020' } +}; +""" + +def pyod(filename): + """Python implementation of the od UNIX utility (od -b, more exactly). + + Parameters + ---------- + filename : str + name of the file to get the dump from. + + Returns + ------- + out : seq + list of lines of od output + + Note + ---- + We only implement enough to get the necessary information for long double + representation, this is not intended as a compatible replacement for od. + """ + def _pyod2(): + out = [] + + fid = open(filename, 'rb') + try: + yo = [int(oct(int(binascii.b2a_hex(o), 16))) for o in fid.read()] + for i in range(0, len(yo), 16): + line = ['%07d' % int(oct(i))] + line.extend(['%03d' % c for c in yo[i:i+16]]) + out.append(" ".join(line)) + return out + finally: + fid.close() + + def _pyod3(): + out = [] + + fid = open(filename, 'rb') + try: + yo2 = [oct(o)[2:] for o in fid.read()] + for i in range(0, len(yo2), 16): + line = ['%07d' % int(oct(i)[2:])] + line.extend(['%03d' % int(c) for c in yo2[i:i+16]]) + out.append(" ".join(line)) + return out + finally: + fid.close() + + if sys.version_info[0] < 3: + return _pyod2() + else: + return _pyod3() + +_BEFORE_SEQ = ['000', '000', '000', '000', '000', '000', '000', '000', + '001', '043', '105', '147', '211', '253', '315', '357'] +_AFTER_SEQ = ['376', '334', '272', '230', '166', '124', '062', '020'] + +_IEEE_DOUBLE_BE = ['301', '235', '157', '064', '124', '000', '000', '000'] +_IEEE_DOUBLE_LE = _IEEE_DOUBLE_BE[::-1] +_INTEL_EXTENDED_12B = ['000', '000', '000', '000', '240', '242', '171', '353', + '031', '300', '000', '000'] +_INTEL_EXTENDED_16B = ['000', '000', '000', '000', '240', '242', '171', '353', + '031', '300', '000', '000', '000', '000', '000', '000'] +_MOTOROLA_EXTENDED_12B = ['300', '031', '000', '000', '353', '171', + '242', '240', '000', '000', '000', '000'] +_IEEE_QUAD_PREC_BE = ['300', '031', '326', '363', '105', '100', '000', '000', + '000', '000', '000', '000', '000', '000', '000', '000'] +_IEEE_QUAD_PREC_LE = _IEEE_QUAD_PREC_BE[::-1] +_IBM_DOUBLE_DOUBLE_BE = (['301', '235', '157', '064', '124', '000', '000', '000'] + + ['000'] * 8) +_IBM_DOUBLE_DOUBLE_LE = (['000', '000', '000', '124', '064', '157', '235', '301'] + + ['000'] * 8) + +def long_double_representation(lines): + """Given a binary dump as given by GNU od -b, look for long double + representation.""" + + # Read contains a list of 32 items, each item is a byte (in octal + # representation, as a string). We 'slide' over the output until read is of + # the form before_seq + content + after_sequence, where content is the long double + # representation: + # - content is 12 bytes: 80 bits Intel representation + # - content is 16 bytes: 80 bits Intel representation (64 bits) or quad precision + # - content is 8 bytes: same as double (not implemented yet) + read = [''] * 32 + saw = None + for line in lines: + # we skip the first word, as od -b output an index at the beginning of + # each line + for w in line.split()[1:]: + read.pop(0) + read.append(w) + + # If the end of read is equal to the after_sequence, read contains + # the long double + if read[-8:] == _AFTER_SEQ: + saw = copy.copy(read) + # if the content was 12 bytes, we only have 32 - 8 - 12 = 12 + # "before" bytes. In other words the first 4 "before" bytes went + # past the sliding window. + if read[:12] == _BEFORE_SEQ[4:]: + if read[12:-8] == _INTEL_EXTENDED_12B: + return 'INTEL_EXTENDED_12_BYTES_LE' + if read[12:-8] == _MOTOROLA_EXTENDED_12B: + return 'MOTOROLA_EXTENDED_12_BYTES_BE' + # if the content was 16 bytes, we are left with 32-8-16 = 16 + # "before" bytes, so 8 went past the sliding window. + elif read[:8] == _BEFORE_SEQ[8:]: + if read[8:-8] == _INTEL_EXTENDED_16B: + return 'INTEL_EXTENDED_16_BYTES_LE' + elif read[8:-8] == _IEEE_QUAD_PREC_BE: + return 'IEEE_QUAD_BE' + elif read[8:-8] == _IEEE_QUAD_PREC_LE: + return 'IEEE_QUAD_LE' + elif read[8:-8] == _IBM_DOUBLE_DOUBLE_LE: + return 'IBM_DOUBLE_DOUBLE_LE' + elif read[8:-8] == _IBM_DOUBLE_DOUBLE_BE: + return 'IBM_DOUBLE_DOUBLE_BE' + # if the content was 8 bytes, left with 32-8-8 = 16 bytes + elif read[:16] == _BEFORE_SEQ: + if read[16:-8] == _IEEE_DOUBLE_LE: + return 'IEEE_DOUBLE_LE' + elif read[16:-8] == _IEEE_DOUBLE_BE: + return 'IEEE_DOUBLE_BE' + + if saw is not None: + raise ValueError("Unrecognized format (%s)" % saw) + else: + # We never detected the after_sequence + raise ValueError("Could not lock sequences (%s)" % saw) diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/setup_common.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/setup_common.pyc new file mode 100644 index 0000000..3557770 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/setup_common.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/shape_base.py b/project/venv/lib/python2.7/site-packages/numpy/core/shape_base.py new file mode 100644 index 0000000..d20afd8 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/shape_base.py @@ -0,0 +1,888 @@ +from __future__ import division, absolute_import, print_function + +__all__ = ['atleast_1d', 'atleast_2d', 'atleast_3d', 'block', 'hstack', + 'stack', 'vstack'] + +import functools +import operator +import types +import warnings + +from . import numeric as _nx +from . import overrides +from .numeric import array, asanyarray, newaxis +from .multiarray import normalize_axis_index + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +def _atleast_1d_dispatcher(*arys): + return arys + + +@array_function_dispatch(_atleast_1d_dispatcher) +def atleast_1d(*arys): + """ + Convert inputs to arrays with at least one dimension. + + Scalar inputs are converted to 1-dimensional arrays, whilst + higher-dimensional inputs are preserved. + + Parameters + ---------- + arys1, arys2, ... : array_like + One or more input arrays. + + Returns + ------- + ret : ndarray + An array, or list of arrays, each with ``a.ndim >= 1``. + Copies are made only if necessary. + + See Also + -------- + atleast_2d, atleast_3d + + Examples + -------- + >>> np.atleast_1d(1.0) + array([ 1.]) + + >>> x = np.arange(9.0).reshape(3,3) + >>> np.atleast_1d(x) + array([[ 0., 1., 2.], + [ 3., 4., 5.], + [ 6., 7., 8.]]) + >>> np.atleast_1d(x) is x + True + + >>> np.atleast_1d(1, [3, 4]) + [array([1]), array([3, 4])] + + """ + res = [] + for ary in arys: + ary = asanyarray(ary) + if ary.ndim == 0: + result = ary.reshape(1) + else: + result = ary + res.append(result) + if len(res) == 1: + return res[0] + else: + return res + + +def _atleast_2d_dispatcher(*arys): + return arys + + +@array_function_dispatch(_atleast_2d_dispatcher) +def atleast_2d(*arys): + """ + View inputs as arrays with at least two dimensions. + + Parameters + ---------- + arys1, arys2, ... : array_like + One or more array-like sequences. Non-array inputs are converted + to arrays. Arrays that already have two or more dimensions are + preserved. + + Returns + ------- + res, res2, ... : ndarray + An array, or list of arrays, each with ``a.ndim >= 2``. + Copies are avoided where possible, and views with two or more + dimensions are returned. + + See Also + -------- + atleast_1d, atleast_3d + + Examples + -------- + >>> np.atleast_2d(3.0) + array([[ 3.]]) + + >>> x = np.arange(3.0) + >>> np.atleast_2d(x) + array([[ 0., 1., 2.]]) + >>> np.atleast_2d(x).base is x + True + + >>> np.atleast_2d(1, [1, 2], [[1, 2]]) + [array([[1]]), array([[1, 2]]), array([[1, 2]])] + + """ + res = [] + for ary in arys: + ary = asanyarray(ary) + if ary.ndim == 0: + result = ary.reshape(1, 1) + elif ary.ndim == 1: + result = ary[newaxis,:] + else: + result = ary + res.append(result) + if len(res) == 1: + return res[0] + else: + return res + + +def _atleast_3d_dispatcher(*arys): + return arys + + +@array_function_dispatch(_atleast_3d_dispatcher) +def atleast_3d(*arys): + """ + View inputs as arrays with at least three dimensions. + + Parameters + ---------- + arys1, arys2, ... : array_like + One or more array-like sequences. Non-array inputs are converted to + arrays. Arrays that already have three or more dimensions are + preserved. + + Returns + ------- + res1, res2, ... : ndarray + An array, or list of arrays, each with ``a.ndim >= 3``. Copies are + avoided where possible, and views with three or more dimensions are + returned. For example, a 1-D array of shape ``(N,)`` becomes a view + of shape ``(1, N, 1)``, and a 2-D array of shape ``(M, N)`` becomes a + view of shape ``(M, N, 1)``. + + See Also + -------- + atleast_1d, atleast_2d + + Examples + -------- + >>> np.atleast_3d(3.0) + array([[[ 3.]]]) + + >>> x = np.arange(3.0) + >>> np.atleast_3d(x).shape + (1, 3, 1) + + >>> x = np.arange(12.0).reshape(4,3) + >>> np.atleast_3d(x).shape + (4, 3, 1) + >>> np.atleast_3d(x).base is x.base # x is a reshape, so not base itself + True + + >>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]): + ... print(arr, arr.shape) + ... + [[[1] + [2]]] (1, 2, 1) + [[[1] + [2]]] (1, 2, 1) + [[[1 2]]] (1, 1, 2) + + """ + res = [] + for ary in arys: + ary = asanyarray(ary) + if ary.ndim == 0: + result = ary.reshape(1, 1, 1) + elif ary.ndim == 1: + result = ary[newaxis,:, newaxis] + elif ary.ndim == 2: + result = ary[:,:, newaxis] + else: + result = ary + res.append(result) + if len(res) == 1: + return res[0] + else: + return res + + +def _arrays_for_stack_dispatcher(arrays, stacklevel=4): + if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'): + warnings.warn('arrays to stack must be passed as a "sequence" type ' + 'such as list or tuple. Support for non-sequence ' + 'iterables such as generators is deprecated as of ' + 'NumPy 1.16 and will raise an error in the future.', + FutureWarning, stacklevel=stacklevel) + return () + return arrays + + +def _warn_for_nonsequence(arrays): + if not overrides.ENABLE_ARRAY_FUNCTION: + _arrays_for_stack_dispatcher(arrays, stacklevel=4) + + +def _vhstack_dispatcher(tup): + return _arrays_for_stack_dispatcher(tup) + + +@array_function_dispatch(_vhstack_dispatcher) +def vstack(tup): + """ + Stack arrays in sequence vertically (row wise). + + This is equivalent to concatenation along the first axis after 1-D arrays + of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by + `vsplit`. + + This function makes most sense for arrays with up to 3 dimensions. For + instance, for pixel-data with a height (first axis), width (second axis), + and r/g/b channels (third axis). The functions `concatenate`, `stack` and + `block` provide more general stacking and concatenation operations. + + Parameters + ---------- + tup : sequence of ndarrays + The arrays must have the same shape along all but the first axis. + 1-D arrays must have the same length. + + Returns + ------- + stacked : ndarray + The array formed by stacking the given arrays, will be at least 2-D. + + See Also + -------- + stack : Join a sequence of arrays along a new axis. + hstack : Stack arrays in sequence horizontally (column wise). + dstack : Stack arrays in sequence depth wise (along third dimension). + concatenate : Join a sequence of arrays along an existing axis. + vsplit : Split array into a list of multiple sub-arrays vertically. + block : Assemble arrays from blocks. + + Examples + -------- + >>> a = np.array([1, 2, 3]) + >>> b = np.array([2, 3, 4]) + >>> np.vstack((a,b)) + array([[1, 2, 3], + [2, 3, 4]]) + + >>> a = np.array([[1], [2], [3]]) + >>> b = np.array([[2], [3], [4]]) + >>> np.vstack((a,b)) + array([[1], + [2], + [3], + [2], + [3], + [4]]) + + """ + _warn_for_nonsequence(tup) + return _nx.concatenate([atleast_2d(_m) for _m in tup], 0) + + +@array_function_dispatch(_vhstack_dispatcher) +def hstack(tup): + """ + Stack arrays in sequence horizontally (column wise). + + This is equivalent to concatenation along the second axis, except for 1-D + arrays where it concatenates along the first axis. Rebuilds arrays divided + by `hsplit`. + + This function makes most sense for arrays with up to 3 dimensions. For + instance, for pixel-data with a height (first axis), width (second axis), + and r/g/b channels (third axis). The functions `concatenate`, `stack` and + `block` provide more general stacking and concatenation operations. + + Parameters + ---------- + tup : sequence of ndarrays + The arrays must have the same shape along all but the second axis, + except 1-D arrays which can be any length. + + Returns + ------- + stacked : ndarray + The array formed by stacking the given arrays. + + See Also + -------- + stack : Join a sequence of arrays along a new axis. + vstack : Stack arrays in sequence vertically (row wise). + dstack : Stack arrays in sequence depth wise (along third axis). + concatenate : Join a sequence of arrays along an existing axis. + hsplit : Split array along second axis. + block : Assemble arrays from blocks. + + Examples + -------- + >>> a = np.array((1,2,3)) + >>> b = np.array((2,3,4)) + >>> np.hstack((a,b)) + array([1, 2, 3, 2, 3, 4]) + >>> a = np.array([[1],[2],[3]]) + >>> b = np.array([[2],[3],[4]]) + >>> np.hstack((a,b)) + array([[1, 2], + [2, 3], + [3, 4]]) + + """ + _warn_for_nonsequence(tup) + arrs = [atleast_1d(_m) for _m in tup] + # As a special case, dimension 0 of 1-dimensional arrays is "horizontal" + if arrs and arrs[0].ndim == 1: + return _nx.concatenate(arrs, 0) + else: + return _nx.concatenate(arrs, 1) + + +def _stack_dispatcher(arrays, axis=None, out=None): + arrays = _arrays_for_stack_dispatcher(arrays, stacklevel=6) + if out is not None: + # optimize for the typical case where only arrays is provided + arrays = list(arrays) + arrays.append(out) + return arrays + + +@array_function_dispatch(_stack_dispatcher) +def stack(arrays, axis=0, out=None): + """ + Join a sequence of arrays along a new axis. + + The `axis` parameter specifies the index of the new axis in the dimensions + of the result. For example, if ``axis=0`` it will be the first dimension + and if ``axis=-1`` it will be the last dimension. + + .. versionadded:: 1.10.0 + + Parameters + ---------- + arrays : sequence of array_like + Each array must have the same shape. + axis : int, optional + The axis in the result array along which the input arrays are stacked. + out : ndarray, optional + If provided, the destination to place the result. The shape must be + correct, matching that of what stack would have returned if no + out argument were specified. + + Returns + ------- + stacked : ndarray + The stacked array has one more dimension than the input arrays. + + See Also + -------- + concatenate : Join a sequence of arrays along an existing axis. + split : Split array into a list of multiple sub-arrays of equal size. + block : Assemble arrays from blocks. + + Examples + -------- + >>> arrays = [np.random.randn(3, 4) for _ in range(10)] + >>> np.stack(arrays, axis=0).shape + (10, 3, 4) + + >>> np.stack(arrays, axis=1).shape + (3, 10, 4) + + >>> np.stack(arrays, axis=2).shape + (3, 4, 10) + + >>> a = np.array([1, 2, 3]) + >>> b = np.array([2, 3, 4]) + >>> np.stack((a, b)) + array([[1, 2, 3], + [2, 3, 4]]) + + >>> np.stack((a, b), axis=-1) + array([[1, 2], + [2, 3], + [3, 4]]) + + """ + _warn_for_nonsequence(arrays) + arrays = [asanyarray(arr) for arr in arrays] + if not arrays: + raise ValueError('need at least one array to stack') + + shapes = {arr.shape for arr in arrays} + if len(shapes) != 1: + raise ValueError('all input arrays must have the same shape') + + result_ndim = arrays[0].ndim + 1 + axis = normalize_axis_index(axis, result_ndim) + + sl = (slice(None),) * axis + (_nx.newaxis,) + expanded_arrays = [arr[sl] for arr in arrays] + return _nx.concatenate(expanded_arrays, axis=axis, out=out) + + +def _block_format_index(index): + """ + Convert a list of indices ``[0, 1, 2]`` into ``"arrays[0][1][2]"``. + """ + idx_str = ''.join('[{}]'.format(i) for i in index if i is not None) + return 'arrays' + idx_str + + +def _block_check_depths_match(arrays, parent_index=[]): + """ + Recursive function checking that the depths of nested lists in `arrays` + all match. Mismatch raises a ValueError as described in the block + docstring below. + + The entire index (rather than just the depth) needs to be calculated + for each innermost list, in case an error needs to be raised, so that + the index of the offending list can be printed as part of the error. + + Parameters + ---------- + arrays : nested list of arrays + The arrays to check + parent_index : list of int + The full index of `arrays` within the nested lists passed to + `_block_check_depths_match` at the top of the recursion. + + Returns + ------- + first_index : list of int + The full index of an element from the bottom of the nesting in + `arrays`. If any element at the bottom is an empty list, this will + refer to it, and the last index along the empty axis will be `None`. + max_arr_ndim : int + The maximum of the ndims of the arrays nested in `arrays`. + final_size: int + The number of elements in the final array. This is used the motivate + the choice of algorithm used using benchmarking wisdom. + + """ + if type(arrays) is tuple: + # not strictly necessary, but saves us from: + # - more than one way to do things - no point treating tuples like + # lists + # - horribly confusing behaviour that results when tuples are + # treated like ndarray + raise TypeError( + '{} is a tuple. ' + 'Only lists can be used to arrange blocks, and np.block does ' + 'not allow implicit conversion from tuple to ndarray.'.format( + _block_format_index(parent_index) + ) + ) + elif type(arrays) is list and len(arrays) > 0: + idxs_ndims = (_block_check_depths_match(arr, parent_index + [i]) + for i, arr in enumerate(arrays)) + + first_index, max_arr_ndim, final_size = next(idxs_ndims) + for index, ndim, size in idxs_ndims: + final_size += size + if ndim > max_arr_ndim: + max_arr_ndim = ndim + if len(index) != len(first_index): + raise ValueError( + "List depths are mismatched. First element was at depth " + "{}, but there is an element at depth {} ({})".format( + len(first_index), + len(index), + _block_format_index(index) + ) + ) + # propagate our flag that indicates an empty list at the bottom + if index[-1] is None: + first_index = index + + return first_index, max_arr_ndim, final_size + elif type(arrays) is list and len(arrays) == 0: + # We've 'bottomed out' on an empty list + return parent_index + [None], 0, 0 + else: + # We've 'bottomed out' - arrays is either a scalar or an array + size = _nx.size(arrays) + return parent_index, _nx.ndim(arrays), size + + +def _atleast_nd(a, ndim): + # Ensures `a` has at least `ndim` dimensions by prepending + # ones to `a.shape` as necessary + return array(a, ndmin=ndim, copy=False, subok=True) + + +def _accumulate(values): + # Helper function because Python 2.7 doesn't have + # itertools.accumulate + value = 0 + accumulated = [] + for v in values: + value += v + accumulated.append(value) + return accumulated + + +def _concatenate_shapes(shapes, axis): + """Given array shapes, return the resulting shape and slices prefixes. + + These help in nested concatation. + Returns + ------- + shape: tuple of int + This tuple satisfies: + ``` + shape, _ = _concatenate_shapes([arr.shape for shape in arrs], axis) + shape == concatenate(arrs, axis).shape + ``` + + slice_prefixes: tuple of (slice(start, end), ) + For a list of arrays being concatenated, this returns the slice + in the larger array at axis that needs to be sliced into. + + For example, the following holds: + ``` + ret = concatenate([a, b, c], axis) + _, (sl_a, sl_b, sl_c) = concatenate_slices([a, b, c], axis) + + ret[(slice(None),) * axis + sl_a] == a + ret[(slice(None),) * axis + sl_b] == b + ret[(slice(None),) * axis + sl_c] == c + ``` + + Thses are called slice prefixes since they are used in the recursive + blocking algorithm to compute the left-most slices during the + recursion. Therefore, they must be prepended to rest of the slice + that was computed deeper in the recusion. + + These are returned as tuples to ensure that they can quickly be added + to existing slice tuple without creating a new tuple everytime. + + """ + # Cache a result that will be reused. + shape_at_axis = [shape[axis] for shape in shapes] + + # Take a shape, any shape + first_shape = shapes[0] + first_shape_pre = first_shape[:axis] + first_shape_post = first_shape[axis+1:] + + if any(shape[:axis] != first_shape_pre or + shape[axis+1:] != first_shape_post for shape in shapes): + raise ValueError( + 'Mismatched array shapes in block along axis {}.'.format(axis)) + + shape = (first_shape_pre + (sum(shape_at_axis),) + first_shape[axis+1:]) + + offsets_at_axis = _accumulate(shape_at_axis) + slice_prefixes = [(slice(start, end),) + for start, end in zip([0] + offsets_at_axis, + offsets_at_axis)] + return shape, slice_prefixes + + +def _block_info_recursion(arrays, max_depth, result_ndim, depth=0): + """ + Returns the shape of the final array, along with a list + of slices and a list of arrays that can be used for assignment inside the + new array + + Parameters + ---------- + arrays : nested list of arrays + The arrays to check + max_depth : list of int + The number of nested lists + result_ndim: int + The number of dimensions in thefinal array. + + Returns + ------- + shape : tuple of int + The shape that the final array will take on. + slices: list of tuple of slices + The slices into the full array required for assignment. These are + required to be prepended with ``(Ellipsis, )`` to obtain to correct + final index. + arrays: list of ndarray + The data to assign to each slice of the full array + + """ + if depth < max_depth: + shapes, slices, arrays = zip( + *[_block_info_recursion(arr, max_depth, result_ndim, depth+1) + for arr in arrays]) + + axis = result_ndim - max_depth + depth + shape, slice_prefixes = _concatenate_shapes(shapes, axis) + + # Prepend the slice prefix and flatten the slices + slices = [slice_prefix + the_slice + for slice_prefix, inner_slices in zip(slice_prefixes, slices) + for the_slice in inner_slices] + + # Flatten the array list + arrays = functools.reduce(operator.add, arrays) + + return shape, slices, arrays + else: + # We've 'bottomed out' - arrays is either a scalar or an array + # type(arrays) is not list + # Return the slice and the array inside a list to be consistent with + # the recursive case. + arr = _atleast_nd(arrays, result_ndim) + return arr.shape, [()], [arr] + + +def _block(arrays, max_depth, result_ndim, depth=0): + """ + Internal implementation of block based on repeated concatenation. + `arrays` is the argument passed to + block. `max_depth` is the depth of nested lists within `arrays` and + `result_ndim` is the greatest of the dimensions of the arrays in + `arrays` and the depth of the lists in `arrays` (see block docstring + for details). + """ + if depth < max_depth: + arrs = [_block(arr, max_depth, result_ndim, depth+1) + for arr in arrays] + return _nx.concatenate(arrs, axis=-(max_depth-depth)) + else: + # We've 'bottomed out' - arrays is either a scalar or an array + # type(arrays) is not list + return _atleast_nd(arrays, result_ndim) + + +def _block_dispatcher(arrays): + # Use type(...) is list to match the behavior of np.block(), which special + # cases list specifically rather than allowing for generic iterables or + # tuple. Also, we know that list.__array_function__ will never exist. + if type(arrays) is list: + for subarrays in arrays: + for subarray in _block_dispatcher(subarrays): + yield subarray + else: + yield arrays + + +@array_function_dispatch(_block_dispatcher) +def block(arrays): + """ + Assemble an nd-array from nested lists of blocks. + + Blocks in the innermost lists are concatenated (see `concatenate`) along + the last dimension (-1), then these are concatenated along the + second-last dimension (-2), and so on until the outermost list is reached. + + Blocks can be of any dimension, but will not be broadcasted using the normal + rules. Instead, leading axes of size 1 are inserted, to make ``block.ndim`` + the same for all blocks. This is primarily useful for working with scalars, + and means that code like ``np.block([v, 1])`` is valid, where + ``v.ndim == 1``. + + When the nested list is two levels deep, this allows block matrices to be + constructed from their components. + + .. versionadded:: 1.13.0 + + Parameters + ---------- + arrays : nested list of array_like or scalars (but not tuples) + If passed a single ndarray or scalar (a nested list of depth 0), this + is returned unmodified (and not copied). + + Elements shapes must match along the appropriate axes (without + broadcasting), but leading 1s will be prepended to the shape as + necessary to make the dimensions match. + + Returns + ------- + block_array : ndarray + The array assembled from the given blocks. + + The dimensionality of the output is equal to the greatest of: + * the dimensionality of all the inputs + * the depth to which the input list is nested + + Raises + ------ + ValueError + * If list depths are mismatched - for instance, ``[[a, b], c]`` is + illegal, and should be spelt ``[[a, b], [c]]`` + * If lists are empty - for instance, ``[[a, b], []]`` + + See Also + -------- + concatenate : Join a sequence of arrays together. + stack : Stack arrays in sequence along a new dimension. + hstack : Stack arrays in sequence horizontally (column wise). + vstack : Stack arrays in sequence vertically (row wise). + dstack : Stack arrays in sequence depth wise (along third dimension). + vsplit : Split array into a list of multiple sub-arrays vertically. + + Notes + ----- + + When called with only scalars, ``np.block`` is equivalent to an ndarray + call. So ``np.block([[1, 2], [3, 4]])`` is equivalent to + ``np.array([[1, 2], [3, 4]])``. + + This function does not enforce that the blocks lie on a fixed grid. + ``np.block([[a, b], [c, d]])`` is not restricted to arrays of the form:: + + AAAbb + AAAbb + cccDD + + But is also allowed to produce, for some ``a, b, c, d``:: + + AAAbb + AAAbb + cDDDD + + Since concatenation happens along the last axis first, `block` is _not_ + capable of producing the following directly:: + + AAAbb + cccbb + cccDD + + Matlab's "square bracket stacking", ``[A, B, ...; p, q, ...]``, is + equivalent to ``np.block([[A, B, ...], [p, q, ...]])``. + + Examples + -------- + The most common use of this function is to build a block matrix + + >>> A = np.eye(2) * 2 + >>> B = np.eye(3) * 3 + >>> np.block([ + ... [A, np.zeros((2, 3))], + ... [np.ones((3, 2)), B ] + ... ]) + array([[ 2., 0., 0., 0., 0.], + [ 0., 2., 0., 0., 0.], + [ 1., 1., 3., 0., 0.], + [ 1., 1., 0., 3., 0.], + [ 1., 1., 0., 0., 3.]]) + + With a list of depth 1, `block` can be used as `hstack` + + >>> np.block([1, 2, 3]) # hstack([1, 2, 3]) + array([1, 2, 3]) + + >>> a = np.array([1, 2, 3]) + >>> b = np.array([2, 3, 4]) + >>> np.block([a, b, 10]) # hstack([a, b, 10]) + array([1, 2, 3, 2, 3, 4, 10]) + + >>> A = np.ones((2, 2), int) + >>> B = 2 * A + >>> np.block([A, B]) # hstack([A, B]) + array([[1, 1, 2, 2], + [1, 1, 2, 2]]) + + With a list of depth 2, `block` can be used in place of `vstack`: + + >>> a = np.array([1, 2, 3]) + >>> b = np.array([2, 3, 4]) + >>> np.block([[a], [b]]) # vstack([a, b]) + array([[1, 2, 3], + [2, 3, 4]]) + + >>> A = np.ones((2, 2), int) + >>> B = 2 * A + >>> np.block([[A], [B]]) # vstack([A, B]) + array([[1, 1], + [1, 1], + [2, 2], + [2, 2]]) + + It can also be used in places of `atleast_1d` and `atleast_2d` + + >>> a = np.array(0) + >>> b = np.array([1]) + >>> np.block([a]) # atleast_1d(a) + array([0]) + >>> np.block([b]) # atleast_1d(b) + array([1]) + + >>> np.block([[a]]) # atleast_2d(a) + array([[0]]) + >>> np.block([[b]]) # atleast_2d(b) + array([[1]]) + + + """ + arrays, list_ndim, result_ndim, final_size = _block_setup(arrays) + + # It was found through benchmarking that making an array of final size + # around 256x256 was faster by straight concatenation on a + # i7-7700HQ processor and dual channel ram 2400MHz. + # It didn't seem to matter heavily on the dtype used. + # + # A 2D array using repeated concatenation requires 2 copies of the array. + # + # The fastest algorithm will depend on the ratio of CPU power to memory + # speed. + # One can monitor the results of the benchmark + # https://pv.github.io/numpy-bench/#bench_shape_base.Block2D.time_block2d + # to tune this parameter until a C version of the `_block_info_recursion` + # algorithm is implemented which would likely be faster than the python + # version. + if list_ndim * final_size > (2 * 512 * 512): + return _block_slicing(arrays, list_ndim, result_ndim) + else: + return _block_concatenate(arrays, list_ndim, result_ndim) + + +# Theses helper functions are mostly used for testing. +# They allow us to write tests that directly call `_block_slicing` +# or `_block_concatenate` wtihout blocking large arrays to forse the wisdom +# to trigger the desired path. +def _block_setup(arrays): + """ + Returns + (`arrays`, list_ndim, result_ndim, final_size) + """ + bottom_index, arr_ndim, final_size = _block_check_depths_match(arrays) + list_ndim = len(bottom_index) + if bottom_index and bottom_index[-1] is None: + raise ValueError( + 'List at {} cannot be empty'.format( + _block_format_index(bottom_index) + ) + ) + result_ndim = max(arr_ndim, list_ndim) + return arrays, list_ndim, result_ndim, final_size + + +def _block_slicing(arrays, list_ndim, result_ndim): + shape, slices, arrays = _block_info_recursion( + arrays, list_ndim, result_ndim) + dtype = _nx.result_type(*[arr.dtype for arr in arrays]) + + # Test preferring F only in the case that all input arrays are F + F_order = all(arr.flags['F_CONTIGUOUS'] for arr in arrays) + C_order = all(arr.flags['C_CONTIGUOUS'] for arr in arrays) + order = 'F' if F_order and not C_order else 'C' + result = _nx.empty(shape=shape, dtype=dtype, order=order) + # Note: In a c implementation, the function + # PyArray_CreateMultiSortedStridePerm could be used for more advanced + # guessing of the desired order. + + for the_slice, arr in zip(slices, arrays): + result[(Ellipsis,) + the_slice] = arr + return result + + +def _block_concatenate(arrays, list_ndim, result_ndim): + result = _block(arrays, list_ndim, result_ndim) + if list_ndim == 0: + # Catch an edge case where _block returns a view because + # `arrays` is a single numpy array and not a list of numpy arrays. + # This might copy scalars or lists twice, but this isn't a likely + # usecase for those interested in performance + result = result.copy() + return result diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/shape_base.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/shape_base.pyc new file mode 100644 index 0000000..dd1aa41 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/shape_base.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/__init__.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/__init__.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/__init__.pyc new file mode 100644 index 0000000..e809cf8 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/_locales.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/_locales.py new file mode 100644 index 0000000..52e4ff3 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/_locales.py @@ -0,0 +1,76 @@ +"""Provide class for testing in French locale + +""" +from __future__ import division, absolute_import, print_function + +import sys +import locale + +import pytest + +__ALL__ = ['CommaDecimalPointLocale'] + + +def find_comma_decimal_point_locale(): + """See if platform has a decimal point as comma locale. + + Find a locale that uses a comma instead of a period as the + decimal point. + + Returns + ------- + old_locale: str + Locale when the function was called. + new_locale: {str, None) + First French locale found, None if none found. + + """ + if sys.platform == 'win32': + locales = ['FRENCH'] + else: + locales = ['fr_FR', 'fr_FR.UTF-8', 'fi_FI', 'fi_FI.UTF-8'] + + old_locale = locale.getlocale(locale.LC_NUMERIC) + new_locale = None + try: + for loc in locales: + try: + locale.setlocale(locale.LC_NUMERIC, loc) + new_locale = loc + break + except locale.Error: + pass + finally: + locale.setlocale(locale.LC_NUMERIC, locale=old_locale) + return old_locale, new_locale + + +class CommaDecimalPointLocale(object): + """Sets LC_NUMERIC to a locale with comma as decimal point. + + Classes derived from this class have setup and teardown methods that run + tests with locale.LC_NUMERIC set to a locale where commas (',') are used as + the decimal point instead of periods ('.'). On exit the locale is restored + to the initial locale. It also serves as context manager with the same + effect. If no such locale is available, the test is skipped. + + .. versionadded:: 1.15.0 + + """ + (cur_locale, tst_locale) = find_comma_decimal_point_locale() + + def setup(self): + if self.tst_locale is None: + pytest.skip("No French locale available") + locale.setlocale(locale.LC_NUMERIC, locale=self.tst_locale) + + def teardown(self): + locale.setlocale(locale.LC_NUMERIC, locale=self.cur_locale) + + def __enter__(self): + if self.tst_locale is None: + pytest.skip("No French locale available") + locale.setlocale(locale.LC_NUMERIC, locale=self.tst_locale) + + def __exit__(self, type, value, traceback): + locale.setlocale(locale.LC_NUMERIC, locale=self.cur_locale) diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/_locales.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/_locales.pyc new file mode 100644 index 0000000..ba68e27 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/_locales.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/data/astype_copy.pkl b/project/venv/lib/python2.7/site-packages/numpy/core/tests/data/astype_copy.pkl new file mode 100644 index 0000000..7397c97 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/data/astype_copy.pkl differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/data/recarray_from_file.fits b/project/venv/lib/python2.7/site-packages/numpy/core/tests/data/recarray_from_file.fits new file mode 100644 index 0000000..ca48ee8 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/data/recarray_from_file.fits differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_abc.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_abc.py new file mode 100644 index 0000000..d9c61b0 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_abc.py @@ -0,0 +1,56 @@ +from __future__ import division, absolute_import, print_function + +from numpy.testing import assert_ + +import numbers + +import numpy as np +from numpy.core.numerictypes import sctypes + +class TestABC(object): + def test_abstract(self): + assert_(issubclass(np.number, numbers.Number)) + + assert_(issubclass(np.inexact, numbers.Complex)) + assert_(issubclass(np.complexfloating, numbers.Complex)) + assert_(issubclass(np.floating, numbers.Real)) + + assert_(issubclass(np.integer, numbers.Integral)) + assert_(issubclass(np.signedinteger, numbers.Integral)) + assert_(issubclass(np.unsignedinteger, numbers.Integral)) + + def test_floats(self): + for t in sctypes['float']: + assert_(isinstance(t(), numbers.Real), + "{0} is not instance of Real".format(t.__name__)) + assert_(issubclass(t, numbers.Real), + "{0} is not subclass of Real".format(t.__name__)) + assert_(not isinstance(t(), numbers.Rational), + "{0} is instance of Rational".format(t.__name__)) + assert_(not issubclass(t, numbers.Rational), + "{0} is subclass of Rational".format(t.__name__)) + + def test_complex(self): + for t in sctypes['complex']: + assert_(isinstance(t(), numbers.Complex), + "{0} is not instance of Complex".format(t.__name__)) + assert_(issubclass(t, numbers.Complex), + "{0} is not subclass of Complex".format(t.__name__)) + assert_(not isinstance(t(), numbers.Real), + "{0} is instance of Real".format(t.__name__)) + assert_(not issubclass(t, numbers.Real), + "{0} is subclass of Real".format(t.__name__)) + + def test_int(self): + for t in sctypes['int']: + assert_(isinstance(t(), numbers.Integral), + "{0} is not instance of Integral".format(t.__name__)) + assert_(issubclass(t, numbers.Integral), + "{0} is not subclass of Integral".format(t.__name__)) + + def test_uint(self): + for t in sctypes['uint']: + assert_(isinstance(t(), numbers.Integral), + "{0} is not instance of Integral".format(t.__name__)) + assert_(issubclass(t, numbers.Integral), + "{0} is not subclass of Integral".format(t.__name__)) diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_abc.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_abc.pyc new file mode 100644 index 0000000..f904b25 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_abc.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_api.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_api.py new file mode 100644 index 0000000..9755e7b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_api.py @@ -0,0 +1,516 @@ +from __future__ import division, absolute_import, print_function + +import sys + +import numpy as np +from numpy.testing import ( + assert_, assert_equal, assert_array_equal, assert_raises, HAS_REFCOUNT + ) + +# Switch between new behaviour when NPY_RELAXED_STRIDES_CHECKING is set. +NPY_RELAXED_STRIDES_CHECKING = np.ones((10, 1), order='C').flags.f_contiguous + + +def test_array_array(): + tobj = type(object) + ones11 = np.ones((1, 1), np.float64) + tndarray = type(ones11) + # Test is_ndarray + assert_equal(np.array(ones11, dtype=np.float64), ones11) + if HAS_REFCOUNT: + old_refcount = sys.getrefcount(tndarray) + np.array(ones11) + assert_equal(old_refcount, sys.getrefcount(tndarray)) + + # test None + assert_equal(np.array(None, dtype=np.float64), + np.array(np.nan, dtype=np.float64)) + if HAS_REFCOUNT: + old_refcount = sys.getrefcount(tobj) + np.array(None, dtype=np.float64) + assert_equal(old_refcount, sys.getrefcount(tobj)) + + # test scalar + assert_equal(np.array(1.0, dtype=np.float64), + np.ones((), dtype=np.float64)) + if HAS_REFCOUNT: + old_refcount = sys.getrefcount(np.float64) + np.array(np.array(1.0, dtype=np.float64), dtype=np.float64) + assert_equal(old_refcount, sys.getrefcount(np.float64)) + + # test string + S2 = np.dtype((str, 2)) + S3 = np.dtype((str, 3)) + S5 = np.dtype((str, 5)) + assert_equal(np.array("1.0", dtype=np.float64), + np.ones((), dtype=np.float64)) + assert_equal(np.array("1.0").dtype, S3) + assert_equal(np.array("1.0", dtype=str).dtype, S3) + assert_equal(np.array("1.0", dtype=S2), np.array("1.")) + assert_equal(np.array("1", dtype=S5), np.ones((), dtype=S5)) + + # test unicode + _unicode = globals().get("unicode") + if _unicode: + U2 = np.dtype((_unicode, 2)) + U3 = np.dtype((_unicode, 3)) + U5 = np.dtype((_unicode, 5)) + assert_equal(np.array(_unicode("1.0"), dtype=np.float64), + np.ones((), dtype=np.float64)) + assert_equal(np.array(_unicode("1.0")).dtype, U3) + assert_equal(np.array(_unicode("1.0"), dtype=_unicode).dtype, U3) + assert_equal(np.array(_unicode("1.0"), dtype=U2), + np.array(_unicode("1."))) + assert_equal(np.array(_unicode("1"), dtype=U5), + np.ones((), dtype=U5)) + + builtins = getattr(__builtins__, '__dict__', __builtins__) + assert_(hasattr(builtins, 'get')) + + # test buffer + _buffer = builtins.get("buffer") + if _buffer and sys.version_info[:3] >= (2, 7, 5): + # This test fails for earlier versions of Python. + # Evidently a bug got fixed in 2.7.5. + dat = np.array(_buffer('1.0'), dtype=np.float64) + assert_equal(dat, [49.0, 46.0, 48.0]) + assert_(dat.dtype.type is np.float64) + + dat = np.array(_buffer(b'1.0')) + assert_equal(dat, [49, 46, 48]) + assert_(dat.dtype.type is np.uint8) + + # test memoryview, new version of buffer + _memoryview = builtins.get("memoryview") + if _memoryview: + dat = np.array(_memoryview(b'1.0'), dtype=np.float64) + assert_equal(dat, [49.0, 46.0, 48.0]) + assert_(dat.dtype.type is np.float64) + + dat = np.array(_memoryview(b'1.0')) + assert_equal(dat, [49, 46, 48]) + assert_(dat.dtype.type is np.uint8) + + # test array interface + a = np.array(100.0, dtype=np.float64) + o = type("o", (object,), + dict(__array_interface__=a.__array_interface__)) + assert_equal(np.array(o, dtype=np.float64), a) + + # test array_struct interface + a = np.array([(1, 4.0, 'Hello'), (2, 6.0, 'World')], + dtype=[('f0', int), ('f1', float), ('f2', str)]) + o = type("o", (object,), + dict(__array_struct__=a.__array_struct__)) + ## wasn't what I expected... is np.array(o) supposed to equal a ? + ## instead we get a array([...], dtype=">V18") + assert_equal(bytes(np.array(o).data), bytes(a.data)) + + # test array + o = type("o", (object,), + dict(__array__=lambda *x: np.array(100.0, dtype=np.float64)))() + assert_equal(np.array(o, dtype=np.float64), np.array(100.0, np.float64)) + + # test recursion + nested = 1.5 + for i in range(np.MAXDIMS): + nested = [nested] + + # no error + np.array(nested) + + # Exceeds recursion limit + assert_raises(ValueError, np.array, [nested], dtype=np.float64) + + # Try with lists... + assert_equal(np.array([None] * 10, dtype=np.float64), + np.full((10,), np.nan, dtype=np.float64)) + assert_equal(np.array([[None]] * 10, dtype=np.float64), + np.full((10, 1), np.nan, dtype=np.float64)) + assert_equal(np.array([[None] * 10], dtype=np.float64), + np.full((1, 10), np.nan, dtype=np.float64)) + assert_equal(np.array([[None] * 10] * 10, dtype=np.float64), + np.full((10, 10), np.nan, dtype=np.float64)) + + assert_equal(np.array([1.0] * 10, dtype=np.float64), + np.ones((10,), dtype=np.float64)) + assert_equal(np.array([[1.0]] * 10, dtype=np.float64), + np.ones((10, 1), dtype=np.float64)) + assert_equal(np.array([[1.0] * 10], dtype=np.float64), + np.ones((1, 10), dtype=np.float64)) + assert_equal(np.array([[1.0] * 10] * 10, dtype=np.float64), + np.ones((10, 10), dtype=np.float64)) + + # Try with tuples + assert_equal(np.array((None,) * 10, dtype=np.float64), + np.full((10,), np.nan, dtype=np.float64)) + assert_equal(np.array([(None,)] * 10, dtype=np.float64), + np.full((10, 1), np.nan, dtype=np.float64)) + assert_equal(np.array([(None,) * 10], dtype=np.float64), + np.full((1, 10), np.nan, dtype=np.float64)) + assert_equal(np.array([(None,) * 10] * 10, dtype=np.float64), + np.full((10, 10), np.nan, dtype=np.float64)) + + assert_equal(np.array((1.0,) * 10, dtype=np.float64), + np.ones((10,), dtype=np.float64)) + assert_equal(np.array([(1.0,)] * 10, dtype=np.float64), + np.ones((10, 1), dtype=np.float64)) + assert_equal(np.array([(1.0,) * 10], dtype=np.float64), + np.ones((1, 10), dtype=np.float64)) + assert_equal(np.array([(1.0,) * 10] * 10, dtype=np.float64), + np.ones((10, 10), dtype=np.float64)) + + +def test_fastCopyAndTranspose(): + # 0D array + a = np.array(2) + b = np.fastCopyAndTranspose(a) + assert_equal(b, a.T) + assert_(b.flags.owndata) + + # 1D array + a = np.array([3, 2, 7, 0]) + b = np.fastCopyAndTranspose(a) + assert_equal(b, a.T) + assert_(b.flags.owndata) + + # 2D array + a = np.arange(6).reshape(2, 3) + b = np.fastCopyAndTranspose(a) + assert_equal(b, a.T) + assert_(b.flags.owndata) + +def test_array_astype(): + a = np.arange(6, dtype='f4').reshape(2, 3) + # Default behavior: allows unsafe casts, keeps memory layout, + # always copies. + b = a.astype('i4') + assert_equal(a, b) + assert_equal(b.dtype, np.dtype('i4')) + assert_equal(a.strides, b.strides) + b = a.T.astype('i4') + assert_equal(a.T, b) + assert_equal(b.dtype, np.dtype('i4')) + assert_equal(a.T.strides, b.strides) + b = a.astype('f4') + assert_equal(a, b) + assert_(not (a is b)) + + # copy=False parameter can sometimes skip a copy + b = a.astype('f4', copy=False) + assert_(a is b) + + # order parameter allows overriding of the memory layout, + # forcing a copy if the layout is wrong + b = a.astype('f4', order='F', copy=False) + assert_equal(a, b) + assert_(not (a is b)) + assert_(b.flags.f_contiguous) + + b = a.astype('f4', order='C', copy=False) + assert_equal(a, b) + assert_(a is b) + assert_(b.flags.c_contiguous) + + # casting parameter allows catching bad casts + b = a.astype('c8', casting='safe') + assert_equal(a, b) + assert_equal(b.dtype, np.dtype('c8')) + + assert_raises(TypeError, a.astype, 'i4', casting='safe') + + # subok=False passes through a non-subclassed array + b = a.astype('f4', subok=0, copy=False) + assert_(a is b) + + class MyNDArray(np.ndarray): + pass + + a = np.array([[0, 1, 2], [3, 4, 5]], dtype='f4').view(MyNDArray) + + # subok=True passes through a subclass + b = a.astype('f4', subok=True, copy=False) + assert_(a is b) + + # subok=True is default, and creates a subtype on a cast + b = a.astype('i4', copy=False) + assert_equal(a, b) + assert_equal(type(b), MyNDArray) + + # subok=False never returns a subclass + b = a.astype('f4', subok=False, copy=False) + assert_equal(a, b) + assert_(not (a is b)) + assert_(type(b) is not MyNDArray) + + # Make sure converting from string object to fixed length string + # does not truncate. + a = np.array([b'a'*100], dtype='O') + b = a.astype('S') + assert_equal(a, b) + assert_equal(b.dtype, np.dtype('S100')) + a = np.array([u'a'*100], dtype='O') + b = a.astype('U') + assert_equal(a, b) + assert_equal(b.dtype, np.dtype('U100')) + + # Same test as above but for strings shorter than 64 characters + a = np.array([b'a'*10], dtype='O') + b = a.astype('S') + assert_equal(a, b) + assert_equal(b.dtype, np.dtype('S10')) + a = np.array([u'a'*10], dtype='O') + b = a.astype('U') + assert_equal(a, b) + assert_equal(b.dtype, np.dtype('U10')) + + a = np.array(123456789012345678901234567890, dtype='O').astype('S') + assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30')) + a = np.array(123456789012345678901234567890, dtype='O').astype('U') + assert_array_equal(a, np.array(u'1234567890' * 3, dtype='U30')) + + a = np.array([123456789012345678901234567890], dtype='O').astype('S') + assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30')) + a = np.array([123456789012345678901234567890], dtype='O').astype('U') + assert_array_equal(a, np.array(u'1234567890' * 3, dtype='U30')) + + a = np.array(123456789012345678901234567890, dtype='S') + assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30')) + a = np.array(123456789012345678901234567890, dtype='U') + assert_array_equal(a, np.array(u'1234567890' * 3, dtype='U30')) + + a = np.array(u'a\u0140', dtype='U') + b = np.ndarray(buffer=a, dtype='uint32', shape=2) + assert_(b.size == 2) + + a = np.array([1000], dtype='i4') + assert_raises(TypeError, a.astype, 'S1', casting='safe') + + a = np.array(1000, dtype='i4') + assert_raises(TypeError, a.astype, 'U1', casting='safe') + +def test_copyto_fromscalar(): + a = np.arange(6, dtype='f4').reshape(2, 3) + + # Simple copy + np.copyto(a, 1.5) + assert_equal(a, 1.5) + np.copyto(a.T, 2.5) + assert_equal(a, 2.5) + + # Where-masked copy + mask = np.array([[0, 1, 0], [0, 0, 1]], dtype='?') + np.copyto(a, 3.5, where=mask) + assert_equal(a, [[2.5, 3.5, 2.5], [2.5, 2.5, 3.5]]) + mask = np.array([[0, 1], [1, 1], [1, 0]], dtype='?') + np.copyto(a.T, 4.5, where=mask) + assert_equal(a, [[2.5, 4.5, 4.5], [4.5, 4.5, 3.5]]) + +def test_copyto(): + a = np.arange(6, dtype='i4').reshape(2, 3) + + # Simple copy + np.copyto(a, [[3, 1, 5], [6, 2, 1]]) + assert_equal(a, [[3, 1, 5], [6, 2, 1]]) + + # Overlapping copy should work + np.copyto(a[:, :2], a[::-1, 1::-1]) + assert_equal(a, [[2, 6, 5], [1, 3, 1]]) + + # Defaults to 'same_kind' casting + assert_raises(TypeError, np.copyto, a, 1.5) + + # Force a copy with 'unsafe' casting, truncating 1.5 to 1 + np.copyto(a, 1.5, casting='unsafe') + assert_equal(a, 1) + + # Copying with a mask + np.copyto(a, 3, where=[True, False, True]) + assert_equal(a, [[3, 1, 3], [3, 1, 3]]) + + # Casting rule still applies with a mask + assert_raises(TypeError, np.copyto, a, 3.5, where=[True, False, True]) + + # Lists of integer 0's and 1's is ok too + np.copyto(a, 4.0, casting='unsafe', where=[[0, 1, 1], [1, 0, 0]]) + assert_equal(a, [[3, 4, 4], [4, 1, 3]]) + + # Overlapping copy with mask should work + np.copyto(a[:, :2], a[::-1, 1::-1], where=[[0, 1], [1, 1]]) + assert_equal(a, [[3, 4, 4], [4, 3, 3]]) + + # 'dst' must be an array + assert_raises(TypeError, np.copyto, [1, 2, 3], [2, 3, 4]) + +def test_copyto_permut(): + # test explicit overflow case + pad = 500 + l = [True] * pad + [True, True, True, True] + r = np.zeros(len(l)-pad) + d = np.ones(len(l)-pad) + mask = np.array(l)[pad:] + np.copyto(r, d, where=mask[::-1]) + + # test all permutation of possible masks, 9 should be sufficient for + # current 4 byte unrolled code + power = 9 + d = np.ones(power) + for i in range(2**power): + r = np.zeros(power) + l = [(i & x) != 0 for x in range(power)] + mask = np.array(l) + np.copyto(r, d, where=mask) + assert_array_equal(r == 1, l) + assert_equal(r.sum(), sum(l)) + + r = np.zeros(power) + np.copyto(r, d, where=mask[::-1]) + assert_array_equal(r == 1, l[::-1]) + assert_equal(r.sum(), sum(l)) + + r = np.zeros(power) + np.copyto(r[::2], d[::2], where=mask[::2]) + assert_array_equal(r[::2] == 1, l[::2]) + assert_equal(r[::2].sum(), sum(l[::2])) + + r = np.zeros(power) + np.copyto(r[::2], d[::2], where=mask[::-2]) + assert_array_equal(r[::2] == 1, l[::-2]) + assert_equal(r[::2].sum(), sum(l[::-2])) + + for c in [0xFF, 0x7F, 0x02, 0x10]: + r = np.zeros(power) + mask = np.array(l) + imask = np.array(l).view(np.uint8) + imask[mask != 0] = c + np.copyto(r, d, where=mask) + assert_array_equal(r == 1, l) + assert_equal(r.sum(), sum(l)) + + r = np.zeros(power) + np.copyto(r, d, where=True) + assert_equal(r.sum(), r.size) + r = np.ones(power) + d = np.zeros(power) + np.copyto(r, d, where=False) + assert_equal(r.sum(), r.size) + +def test_copy_order(): + a = np.arange(24).reshape(2, 1, 3, 4) + b = a.copy(order='F') + c = np.arange(24).reshape(2, 1, 4, 3).swapaxes(2, 3) + + def check_copy_result(x, y, ccontig, fcontig, strides=False): + assert_(not (x is y)) + assert_equal(x, y) + assert_equal(res.flags.c_contiguous, ccontig) + assert_equal(res.flags.f_contiguous, fcontig) + # This check is impossible only because + # NPY_RELAXED_STRIDES_CHECKING changes the strides actively + if not NPY_RELAXED_STRIDES_CHECKING: + if strides: + assert_equal(x.strides, y.strides) + else: + assert_(x.strides != y.strides) + + # Validate the initial state of a, b, and c + assert_(a.flags.c_contiguous) + assert_(not a.flags.f_contiguous) + assert_(not b.flags.c_contiguous) + assert_(b.flags.f_contiguous) + assert_(not c.flags.c_contiguous) + assert_(not c.flags.f_contiguous) + + # Copy with order='C' + res = a.copy(order='C') + check_copy_result(res, a, ccontig=True, fcontig=False, strides=True) + res = b.copy(order='C') + check_copy_result(res, b, ccontig=True, fcontig=False, strides=False) + res = c.copy(order='C') + check_copy_result(res, c, ccontig=True, fcontig=False, strides=False) + res = np.copy(a, order='C') + check_copy_result(res, a, ccontig=True, fcontig=False, strides=True) + res = np.copy(b, order='C') + check_copy_result(res, b, ccontig=True, fcontig=False, strides=False) + res = np.copy(c, order='C') + check_copy_result(res, c, ccontig=True, fcontig=False, strides=False) + + # Copy with order='F' + res = a.copy(order='F') + check_copy_result(res, a, ccontig=False, fcontig=True, strides=False) + res = b.copy(order='F') + check_copy_result(res, b, ccontig=False, fcontig=True, strides=True) + res = c.copy(order='F') + check_copy_result(res, c, ccontig=False, fcontig=True, strides=False) + res = np.copy(a, order='F') + check_copy_result(res, a, ccontig=False, fcontig=True, strides=False) + res = np.copy(b, order='F') + check_copy_result(res, b, ccontig=False, fcontig=True, strides=True) + res = np.copy(c, order='F') + check_copy_result(res, c, ccontig=False, fcontig=True, strides=False) + + # Copy with order='K' + res = a.copy(order='K') + check_copy_result(res, a, ccontig=True, fcontig=False, strides=True) + res = b.copy(order='K') + check_copy_result(res, b, ccontig=False, fcontig=True, strides=True) + res = c.copy(order='K') + check_copy_result(res, c, ccontig=False, fcontig=False, strides=True) + res = np.copy(a, order='K') + check_copy_result(res, a, ccontig=True, fcontig=False, strides=True) + res = np.copy(b, order='K') + check_copy_result(res, b, ccontig=False, fcontig=True, strides=True) + res = np.copy(c, order='K') + check_copy_result(res, c, ccontig=False, fcontig=False, strides=True) + +def test_contiguous_flags(): + a = np.ones((4, 4, 1))[::2,:,:] + if NPY_RELAXED_STRIDES_CHECKING: + a.strides = a.strides[:2] + (-123,) + b = np.ones((2, 2, 1, 2, 2)).swapaxes(3, 4) + + def check_contig(a, ccontig, fcontig): + assert_(a.flags.c_contiguous == ccontig) + assert_(a.flags.f_contiguous == fcontig) + + # Check if new arrays are correct: + check_contig(a, False, False) + check_contig(b, False, False) + if NPY_RELAXED_STRIDES_CHECKING: + check_contig(np.empty((2, 2, 0, 2, 2)), True, True) + check_contig(np.array([[[1], [2]]], order='F'), True, True) + else: + check_contig(np.empty((2, 2, 0, 2, 2)), True, False) + check_contig(np.array([[[1], [2]]], order='F'), False, True) + check_contig(np.empty((2, 2)), True, False) + check_contig(np.empty((2, 2), order='F'), False, True) + + # Check that np.array creates correct contiguous flags: + check_contig(np.array(a, copy=False), False, False) + check_contig(np.array(a, copy=False, order='C'), True, False) + check_contig(np.array(a, ndmin=4, copy=False, order='F'), False, True) + + if NPY_RELAXED_STRIDES_CHECKING: + # Check slicing update of flags and : + check_contig(a[0], True, True) + check_contig(a[None, ::4, ..., None], True, True) + check_contig(b[0, 0, ...], False, True) + check_contig(b[:,:, 0:0,:,:], True, True) + else: + # Check slicing update of flags: + check_contig(a[0], True, False) + # Would be nice if this was C-Contiguous: + check_contig(a[None, 0, ..., None], False, False) + check_contig(b[0, 0, 0, ...], False, True) + + # Test ravel and squeeze. + check_contig(a.ravel(), True, True) + check_contig(np.ones((1, 3, 1)).squeeze(), True, True) + +def test_broadcast_arrays(): + # Test user defined dtypes + a = np.array([(1, 2, 3)], dtype='u4,u4,u4') + b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4') + result = np.broadcast_arrays(a, b) + assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype='u4,u4,u4')) + assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4')) diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_api.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_api.pyc new file mode 100644 index 0000000..28be9b0 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_api.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_arrayprint.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_arrayprint.py new file mode 100644 index 0000000..f2b8fdc --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_arrayprint.py @@ -0,0 +1,893 @@ +# -*- coding: utf-8 -*- +from __future__ import division, absolute_import, print_function + +import sys +import gc +import pytest + +import numpy as np +from numpy.testing import ( + assert_, assert_equal, assert_raises, assert_warns, HAS_REFCOUNT, + assert_raises_regex, + ) +import textwrap + +class TestArrayRepr(object): + def test_nan_inf(self): + x = np.array([np.nan, np.inf]) + assert_equal(repr(x), 'array([nan, inf])') + + def test_subclass(self): + class sub(np.ndarray): pass + + # one dimensional + x1d = np.array([1, 2]).view(sub) + assert_equal(repr(x1d), 'sub([1, 2])') + + # two dimensional + x2d = np.array([[1, 2], [3, 4]]).view(sub) + assert_equal(repr(x2d), + 'sub([[1, 2],\n' + ' [3, 4]])') + + # two dimensional with flexible dtype + xstruct = np.ones((2,2), dtype=[('a', ' 1) + y = sub(None) + x[()] = y + y[()] = x + assert_equal(repr(x), + 'sub(sub(sub(..., dtype=object), dtype=object), dtype=object)') + assert_equal(str(x), '...') + x[()] = 0 # resolve circular references for garbage collector + + # nested 0d-subclass-object + x = sub(None) + x[()] = sub(None) + assert_equal(repr(x), 'sub(sub(None, dtype=object), dtype=object)') + assert_equal(str(x), 'None') + + # gh-10663 + class DuckCounter(np.ndarray): + def __getitem__(self, item): + result = super(DuckCounter, self).__getitem__(item) + if not isinstance(result, DuckCounter): + result = result[...].view(DuckCounter) + return result + + def to_string(self): + return {0: 'zero', 1: 'one', 2: 'two'}.get(self.item(), 'many') + + def __str__(self): + if self.shape == (): + return self.to_string() + else: + fmt = {'all': lambda x: x.to_string()} + return np.array2string(self, formatter=fmt) + + dc = np.arange(5).view(DuckCounter) + assert_equal(str(dc), "[zero one two many many]") + assert_equal(str(dc[0]), "zero") + + def test_self_containing(self): + arr0d = np.array(None) + arr0d[()] = arr0d + assert_equal(repr(arr0d), + 'array(array(..., dtype=object), dtype=object)') + arr0d[()] = 0 # resolve recursion for garbage collector + + arr1d = np.array([None, None]) + arr1d[1] = arr1d + assert_equal(repr(arr1d), + 'array([None, array(..., dtype=object)], dtype=object)') + arr1d[1] = 0 # resolve recursion for garbage collector + + first = np.array(None) + second = np.array(None) + first[()] = second + second[()] = first + assert_equal(repr(first), + 'array(array(array(..., dtype=object), dtype=object), dtype=object)') + first[()] = 0 # resolve circular references for garbage collector + + def test_containing_list(self): + # printing square brackets directly would be ambiguuous + arr1d = np.array([None, None]) + arr1d[0] = [1, 2] + arr1d[1] = [3] + assert_equal(repr(arr1d), + 'array([list([1, 2]), list([3])], dtype=object)') + + def test_void_scalar_recursion(self): + # gh-9345 + repr(np.void(b'test')) # RecursionError ? + + def test_fieldless_structured(self): + # gh-10366 + no_fields = np.dtype([]) + arr_no_fields = np.empty(4, dtype=no_fields) + assert_equal(repr(arr_no_fields), 'array([(), (), (), ()], dtype=[])') + + +class TestComplexArray(object): + def test_str(self): + rvals = [0, 1, -1, np.inf, -np.inf, np.nan] + cvals = [complex(rp, ip) for rp in rvals for ip in rvals] + dtypes = [np.complex64, np.cdouble, np.clongdouble] + actual = [str(np.array([c], dt)) for c in cvals for dt in dtypes] + wanted = [ + '[0.+0.j]', '[0.+0.j]', '[0.+0.j]', + '[0.+1.j]', '[0.+1.j]', '[0.+1.j]', + '[0.-1.j]', '[0.-1.j]', '[0.-1.j]', + '[0.+infj]', '[0.+infj]', '[0.+infj]', + '[0.-infj]', '[0.-infj]', '[0.-infj]', + '[0.+nanj]', '[0.+nanj]', '[0.+nanj]', + '[1.+0.j]', '[1.+0.j]', '[1.+0.j]', + '[1.+1.j]', '[1.+1.j]', '[1.+1.j]', + '[1.-1.j]', '[1.-1.j]', '[1.-1.j]', + '[1.+infj]', '[1.+infj]', '[1.+infj]', + '[1.-infj]', '[1.-infj]', '[1.-infj]', + '[1.+nanj]', '[1.+nanj]', '[1.+nanj]', + '[-1.+0.j]', '[-1.+0.j]', '[-1.+0.j]', + '[-1.+1.j]', '[-1.+1.j]', '[-1.+1.j]', + '[-1.-1.j]', '[-1.-1.j]', '[-1.-1.j]', + '[-1.+infj]', '[-1.+infj]', '[-1.+infj]', + '[-1.-infj]', '[-1.-infj]', '[-1.-infj]', + '[-1.+nanj]', '[-1.+nanj]', '[-1.+nanj]', + '[inf+0.j]', '[inf+0.j]', '[inf+0.j]', + '[inf+1.j]', '[inf+1.j]', '[inf+1.j]', + '[inf-1.j]', '[inf-1.j]', '[inf-1.j]', + '[inf+infj]', '[inf+infj]', '[inf+infj]', + '[inf-infj]', '[inf-infj]', '[inf-infj]', + '[inf+nanj]', '[inf+nanj]', '[inf+nanj]', + '[-inf+0.j]', '[-inf+0.j]', '[-inf+0.j]', + '[-inf+1.j]', '[-inf+1.j]', '[-inf+1.j]', + '[-inf-1.j]', '[-inf-1.j]', '[-inf-1.j]', + '[-inf+infj]', '[-inf+infj]', '[-inf+infj]', + '[-inf-infj]', '[-inf-infj]', '[-inf-infj]', + '[-inf+nanj]', '[-inf+nanj]', '[-inf+nanj]', + '[nan+0.j]', '[nan+0.j]', '[nan+0.j]', + '[nan+1.j]', '[nan+1.j]', '[nan+1.j]', + '[nan-1.j]', '[nan-1.j]', '[nan-1.j]', + '[nan+infj]', '[nan+infj]', '[nan+infj]', + '[nan-infj]', '[nan-infj]', '[nan-infj]', + '[nan+nanj]', '[nan+nanj]', '[nan+nanj]'] + + for res, val in zip(actual, wanted): + assert_equal(res, val) + +class TestArray2String(object): + def test_basic(self): + """Basic test of array2string.""" + a = np.arange(3) + assert_(np.array2string(a) == '[0 1 2]') + assert_(np.array2string(a, max_line_width=4, legacy='1.13') == '[0 1\n 2]') + assert_(np.array2string(a, max_line_width=4) == '[0\n 1\n 2]') + + def test_unexpected_kwarg(self): + # ensure than an appropriate TypeError + # is raised when array2string receives + # an unexpected kwarg + + with assert_raises_regex(TypeError, 'nonsense'): + np.array2string(np.array([1, 2, 3]), + nonsense=None) + + def test_format_function(self): + """Test custom format function for each element in array.""" + def _format_function(x): + if np.abs(x) < 1: + return '.' + elif np.abs(x) < 2: + return 'o' + else: + return 'O' + + x = np.arange(3) + if sys.version_info[0] >= 3: + x_hex = "[0x0 0x1 0x2]" + x_oct = "[0o0 0o1 0o2]" + else: + x_hex = "[0x0L 0x1L 0x2L]" + x_oct = "[0L 01L 02L]" + assert_(np.array2string(x, formatter={'all':_format_function}) == + "[. o O]") + assert_(np.array2string(x, formatter={'int_kind':_format_function}) == + "[. o O]") + assert_(np.array2string(x, formatter={'all':lambda x: "%.4f" % x}) == + "[0.0000 1.0000 2.0000]") + assert_equal(np.array2string(x, formatter={'int':lambda x: hex(x)}), + x_hex) + assert_equal(np.array2string(x, formatter={'int':lambda x: oct(x)}), + x_oct) + + x = np.arange(3.) + assert_(np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) == + "[0.00 1.00 2.00]") + assert_(np.array2string(x, formatter={'float':lambda x: "%.2f" % x}) == + "[0.00 1.00 2.00]") + + s = np.array(['abc', 'def']) + assert_(np.array2string(s, formatter={'numpystr':lambda s: s*2}) == + '[abcabc defdef]') + + # check for backcompat that using FloatFormat works and emits warning + with assert_warns(DeprecationWarning): + fmt = np.core.arrayprint.FloatFormat(x, 9, 'maxprec', False) + assert_equal(np.array2string(x, formatter={'float_kind': fmt}), + '[0. 1. 2.]') + + def test_structure_format(self): + dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) + x = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt) + assert_equal(np.array2string(x), + "[('Sarah', [8., 7.]) ('John', [6., 7.])]") + + np.set_printoptions(legacy='1.13') + try: + # for issue #5692 + A = np.zeros(shape=10, dtype=[("A", "M8[s]")]) + A[5:].fill(np.datetime64('NaT')) + assert_equal( + np.array2string(A), + textwrap.dedent("""\ + [('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) + ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('NaT',) ('NaT',) + ('NaT',) ('NaT',) ('NaT',)]""") + ) + finally: + np.set_printoptions(legacy=False) + + # same again, but with non-legacy behavior + assert_equal( + np.array2string(A), + textwrap.dedent("""\ + [('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) + ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) + ('1970-01-01T00:00:00',) ( 'NaT',) + ( 'NaT',) ( 'NaT',) + ( 'NaT',) ( 'NaT',)]""") + ) + + # and again, with timedeltas + A = np.full(10, 123456, dtype=[("A", "m8[s]")]) + A[5:].fill(np.datetime64('NaT')) + assert_equal( + np.array2string(A), + textwrap.dedent("""\ + [(123456,) (123456,) (123456,) (123456,) (123456,) ( 'NaT',) ( 'NaT',) + ( 'NaT',) ( 'NaT',) ( 'NaT',)]""") + ) + + # See #8160 + struct_int = np.array([([1, -1],), ([123, 1],)], dtype=[('B', 'i4', 2)]) + assert_equal(np.array2string(struct_int), + "[([ 1, -1],) ([123, 1],)]") + struct_2dint = np.array([([[0, 1], [2, 3]],), ([[12, 0], [0, 0]],)], + dtype=[('B', 'i4', (2, 2))]) + assert_equal(np.array2string(struct_2dint), + "[([[ 0, 1], [ 2, 3]],) ([[12, 0], [ 0, 0]],)]") + + # See #8172 + array_scalar = np.array( + (1., 2.1234567890123456789, 3.), dtype=('f8,f8,f8')) + assert_equal(np.array2string(array_scalar), "(1., 2.12345679, 3.)") + + def test_unstructured_void_repr(self): + a = np.array([27, 91, 50, 75, 7, 65, 10, 8, + 27, 91, 51, 49,109, 82,101,100], dtype='u1').view('V8') + assert_equal(repr(a[0]), r"void(b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08')") + assert_equal(str(a[0]), r"b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'") + assert_equal(repr(a), + r"array([b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'," "\n" + r" b'\x1B\x5B\x33\x31\x6D\x52\x65\x64'], dtype='|V8')") + + assert_equal(eval(repr(a), vars(np)), a) + assert_equal(eval(repr(a[0]), vars(np)), a[0]) + + def test_edgeitems_kwarg(self): + # previously the global print options would be taken over the kwarg + arr = np.zeros(3, int) + assert_equal( + np.array2string(arr, edgeitems=1, threshold=0), + "[0 ... 0]" + ) + + def test_summarize_1d(self): + A = np.arange(1001) + strA = '[ 0 1 2 ... 998 999 1000]' + assert_equal(str(A), strA) + + reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])' + assert_equal(repr(A), reprA) + + def test_summarize_2d(self): + A = np.arange(1002).reshape(2, 501) + strA = '[[ 0 1 2 ... 498 499 500]\n' \ + ' [ 501 502 503 ... 999 1000 1001]]' + assert_equal(str(A), strA) + + reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \ + ' [ 501, 502, 503, ..., 999, 1000, 1001]])' + assert_equal(repr(A), reprA) + + def test_linewidth(self): + a = np.full(6, 1) + + def make_str(a, width, **kw): + return np.array2string(a, separator="", max_line_width=width, **kw) + + assert_equal(make_str(a, 8, legacy='1.13'), '[111111]') + assert_equal(make_str(a, 7, legacy='1.13'), '[111111]') + assert_equal(make_str(a, 5, legacy='1.13'), '[1111\n' + ' 11]') + + assert_equal(make_str(a, 8), '[111111]') + assert_equal(make_str(a, 7), '[11111\n' + ' 1]') + assert_equal(make_str(a, 5), '[111\n' + ' 111]') + + b = a[None,None,:] + + assert_equal(make_str(b, 12, legacy='1.13'), '[[[111111]]]') + assert_equal(make_str(b, 9, legacy='1.13'), '[[[111111]]]') + assert_equal(make_str(b, 8, legacy='1.13'), '[[[11111\n' + ' 1]]]') + + assert_equal(make_str(b, 12), '[[[111111]]]') + assert_equal(make_str(b, 9), '[[[111\n' + ' 111]]]') + assert_equal(make_str(b, 8), '[[[11\n' + ' 11\n' + ' 11]]]') + + def test_wide_element(self): + a = np.array(['xxxxx']) + assert_equal( + np.array2string(a, max_line_width=5), + "['xxxxx']" + ) + assert_equal( + np.array2string(a, max_line_width=5, legacy='1.13'), + "[ 'xxxxx']" + ) + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_refcount(self): + # make sure we do not hold references to the array due to a recursive + # closure (gh-10620) + gc.disable() + a = np.arange(2) + r1 = sys.getrefcount(a) + np.array2string(a) + np.array2string(a) + r2 = sys.getrefcount(a) + gc.collect() + gc.enable() + assert_(r1 == r2) + +class TestPrintOptions(object): + """Test getting and setting global print options.""" + + def setup(self): + self.oldopts = np.get_printoptions() + + def teardown(self): + np.set_printoptions(**self.oldopts) + + def test_basic(self): + x = np.array([1.5, 0, 1.234567890]) + assert_equal(repr(x), "array([1.5 , 0. , 1.23456789])") + np.set_printoptions(precision=4) + assert_equal(repr(x), "array([1.5 , 0. , 1.2346])") + + def test_precision_zero(self): + np.set_printoptions(precision=0) + for values, string in ( + ([0.], "0."), ([.3], "0."), ([-.3], "-0."), ([.7], "1."), + ([1.5], "2."), ([-1.5], "-2."), ([-15.34], "-15."), + ([100.], "100."), ([.2, -1, 122.51], " 0., -1., 123."), + ([0], "0"), ([-12], "-12"), ([complex(.3, -.7)], "0.-1.j")): + x = np.array(values) + assert_equal(repr(x), "array([%s])" % string) + + def test_formatter(self): + x = np.arange(3) + np.set_printoptions(formatter={'all':lambda x: str(x-1)}) + assert_equal(repr(x), "array([-1, 0, 1])") + + def test_formatter_reset(self): + x = np.arange(3) + np.set_printoptions(formatter={'all':lambda x: str(x-1)}) + assert_equal(repr(x), "array([-1, 0, 1])") + np.set_printoptions(formatter={'int':None}) + assert_equal(repr(x), "array([0, 1, 2])") + + np.set_printoptions(formatter={'all':lambda x: str(x-1)}) + assert_equal(repr(x), "array([-1, 0, 1])") + np.set_printoptions(formatter={'all':None}) + assert_equal(repr(x), "array([0, 1, 2])") + + np.set_printoptions(formatter={'int':lambda x: str(x-1)}) + assert_equal(repr(x), "array([-1, 0, 1])") + np.set_printoptions(formatter={'int_kind':None}) + assert_equal(repr(x), "array([0, 1, 2])") + + x = np.arange(3.) + np.set_printoptions(formatter={'float':lambda x: str(x-1)}) + assert_equal(repr(x), "array([-1.0, 0.0, 1.0])") + np.set_printoptions(formatter={'float_kind':None}) + assert_equal(repr(x), "array([0., 1., 2.])") + + def test_0d_arrays(self): + unicode = type(u'') + + assert_equal(unicode(np.array(u'café', '= 3: + assert_equal(repr(np.array('café', '= 3 else '|S4' + assert_equal(repr(np.ones(3, dtype=styp)), + "array(['1', '1', '1'], dtype='{}')".format(styp)) + assert_equal(repr(np.ones(12, dtype=styp)), textwrap.dedent("""\ + array(['1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1'], + dtype='{}')""".format(styp))) + + def test_linewidth_repr(self): + a = np.full(7, fill_value=2) + np.set_printoptions(linewidth=17) + assert_equal( + repr(a), + textwrap.dedent("""\ + array([2, 2, 2, + 2, 2, 2, + 2])""") + ) + np.set_printoptions(linewidth=17, legacy='1.13') + assert_equal( + repr(a), + textwrap.dedent("""\ + array([2, 2, 2, + 2, 2, 2, 2])""") + ) + + a = np.full(8, fill_value=2) + + np.set_printoptions(linewidth=18, legacy=False) + assert_equal( + repr(a), + textwrap.dedent("""\ + array([2, 2, 2, + 2, 2, 2, + 2, 2])""") + ) + + np.set_printoptions(linewidth=18, legacy='1.13') + assert_equal( + repr(a), + textwrap.dedent("""\ + array([2, 2, 2, 2, + 2, 2, 2, 2])""") + ) + + def test_linewidth_str(self): + a = np.full(18, fill_value=2) + np.set_printoptions(linewidth=18) + assert_equal( + str(a), + textwrap.dedent("""\ + [2 2 2 2 2 2 2 2 + 2 2 2 2 2 2 2 2 + 2 2]""") + ) + np.set_printoptions(linewidth=18, legacy='1.13') + assert_equal( + str(a), + textwrap.dedent("""\ + [2 2 2 2 2 2 2 2 2 + 2 2 2 2 2 2 2 2 2]""") + ) + + def test_edgeitems(self): + np.set_printoptions(edgeitems=1, threshold=1) + a = np.arange(27).reshape((3, 3, 3)) + assert_equal( + repr(a), + textwrap.dedent("""\ + array([[[ 0, ..., 2], + ..., + [ 6, ..., 8]], + + ..., + + [[18, ..., 20], + ..., + [24, ..., 26]]])""") + ) + + b = np.zeros((3, 3, 1, 1)) + assert_equal( + repr(b), + textwrap.dedent("""\ + array([[[[0.]], + + ..., + + [[0.]]], + + + ..., + + + [[[0.]], + + ..., + + [[0.]]]])""") + ) + + # 1.13 had extra trailing spaces, and was missing newlines + np.set_printoptions(legacy='1.13') + + assert_equal( + repr(a), + textwrap.dedent("""\ + array([[[ 0, ..., 2], + ..., + [ 6, ..., 8]], + + ..., + [[18, ..., 20], + ..., + [24, ..., 26]]])""") + ) + + assert_equal( + repr(b), + textwrap.dedent("""\ + array([[[[ 0.]], + + ..., + [[ 0.]]], + + + ..., + [[[ 0.]], + + ..., + [[ 0.]]]])""") + ) + + def test_bad_args(self): + assert_raises(ValueError, np.set_printoptions, threshold='nan') + assert_raises(ValueError, np.set_printoptions, threshold=u'1') + assert_raises(ValueError, np.set_printoptions, threshold=b'1') + +def test_unicode_object_array(): + import sys + if sys.version_info[0] >= 3: + expected = "array(['é'], dtype=object)" + else: + expected = "array([u'\\xe9'], dtype=object)" + x = np.array([u'\xe9'], dtype=object) + assert_equal(repr(x), expected) + + +class TestContextManager(object): + def test_ctx_mgr(self): + # test that context manager actuall works + with np.printoptions(precision=2): + s = str(np.array([2.0]) / 3) + assert_equal(s, '[0.67]') + + def test_ctx_mgr_restores(self): + # test that print options are actually restrored + opts = np.get_printoptions() + with np.printoptions(precision=opts['precision'] - 1, + linewidth=opts['linewidth'] - 4): + pass + assert_equal(np.get_printoptions(), opts) + + def test_ctx_mgr_exceptions(self): + # test that print options are restored even if an exception is raised + opts = np.get_printoptions() + try: + with np.printoptions(precision=2, linewidth=11): + raise ValueError + except ValueError: + pass + assert_equal(np.get_printoptions(), opts) + + def test_ctx_mgr_as_smth(self): + opts = {"precision": 2} + with np.printoptions(**opts) as ctx: + saved_opts = ctx.copy() + assert_equal({k: saved_opts[k] for k in opts}, opts) diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_arrayprint.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_arrayprint.pyc new file mode 100644 index 0000000..303f866 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_arrayprint.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_datetime.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_datetime.py new file mode 100644 index 0000000..9832b42 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_datetime.py @@ -0,0 +1,2222 @@ +from __future__ import division, absolute_import, print_function + + +import numpy +import numpy as np +import datetime +import pytest +from numpy.testing import ( + assert_, assert_equal, assert_raises, assert_warns, suppress_warnings, + assert_raises_regex, + ) +from numpy.core.numeric import pickle + +# Use pytz to test out various time zones if available +try: + from pytz import timezone as tz + _has_pytz = True +except ImportError: + _has_pytz = False + +try: + RecursionError +except NameError: + RecursionError = RuntimeError # python < 3.5 + + +class TestDateTime(object): + def test_datetime_dtype_creation(self): + for unit in ['Y', 'M', 'W', 'D', + 'h', 'm', 's', 'ms', 'us', + 'ns', 'ps', 'fs', 'as']: + dt1 = np.dtype('M8[750%s]' % unit) + assert_(dt1 == np.dtype('datetime64[750%s]' % unit)) + dt2 = np.dtype('m8[%s]' % unit) + assert_(dt2 == np.dtype('timedelta64[%s]' % unit)) + + # Generic units shouldn't add [] to the end + assert_equal(str(np.dtype("M8")), "datetime64") + + # Should be possible to specify the endianness + assert_equal(np.dtype("=M8"), np.dtype("M8")) + assert_equal(np.dtype("=M8[s]"), np.dtype("M8[s]")) + assert_(np.dtype(">M8") == np.dtype("M8") or + np.dtype("M8[D]") == np.dtype("M8[D]") or + np.dtype("M8") != np.dtype("m8") == np.dtype("m8") or + np.dtype("m8[D]") == np.dtype("m8[D]") or + np.dtype("m8") != np.dtype(" Scalars + assert_equal(np.datetime64(b, '[s]'), np.datetime64('NaT', '[s]')) + assert_equal(np.datetime64(b, '[ms]'), np.datetime64('NaT', '[ms]')) + assert_equal(np.datetime64(b, '[M]'), np.datetime64('NaT', '[M]')) + assert_equal(np.datetime64(b, '[Y]'), np.datetime64('NaT', '[Y]')) + assert_equal(np.datetime64(b, '[W]'), np.datetime64('NaT', '[W]')) + + # Arrays -> Scalars + assert_equal(np.datetime64(a, '[s]'), np.datetime64('NaT', '[s]')) + assert_equal(np.datetime64(a, '[ms]'), np.datetime64('NaT', '[ms]')) + assert_equal(np.datetime64(a, '[M]'), np.datetime64('NaT', '[M]')) + assert_equal(np.datetime64(a, '[Y]'), np.datetime64('NaT', '[Y]')) + assert_equal(np.datetime64(a, '[W]'), np.datetime64('NaT', '[W]')) + + def test_days_creation(self): + assert_equal(np.array('1599', dtype='M8[D]').astype('i8'), + (1600-1970)*365 - (1972-1600)/4 + 3 - 365) + assert_equal(np.array('1600', dtype='M8[D]').astype('i8'), + (1600-1970)*365 - (1972-1600)/4 + 3) + assert_equal(np.array('1601', dtype='M8[D]').astype('i8'), + (1600-1970)*365 - (1972-1600)/4 + 3 + 366) + assert_equal(np.array('1900', dtype='M8[D]').astype('i8'), + (1900-1970)*365 - (1970-1900)//4) + assert_equal(np.array('1901', dtype='M8[D]').astype('i8'), + (1900-1970)*365 - (1970-1900)//4 + 365) + assert_equal(np.array('1967', dtype='M8[D]').astype('i8'), -3*365 - 1) + assert_equal(np.array('1968', dtype='M8[D]').astype('i8'), -2*365 - 1) + assert_equal(np.array('1969', dtype='M8[D]').astype('i8'), -1*365) + assert_equal(np.array('1970', dtype='M8[D]').astype('i8'), 0*365) + assert_equal(np.array('1971', dtype='M8[D]').astype('i8'), 1*365) + assert_equal(np.array('1972', dtype='M8[D]').astype('i8'), 2*365) + assert_equal(np.array('1973', dtype='M8[D]').astype('i8'), 3*365 + 1) + assert_equal(np.array('1974', dtype='M8[D]').astype('i8'), 4*365 + 1) + assert_equal(np.array('2000', dtype='M8[D]').astype('i8'), + (2000 - 1970)*365 + (2000 - 1972)//4) + assert_equal(np.array('2001', dtype='M8[D]').astype('i8'), + (2000 - 1970)*365 + (2000 - 1972)//4 + 366) + assert_equal(np.array('2400', dtype='M8[D]').astype('i8'), + (2400 - 1970)*365 + (2400 - 1972)//4 - 3) + assert_equal(np.array('2401', dtype='M8[D]').astype('i8'), + (2400 - 1970)*365 + (2400 - 1972)//4 - 3 + 366) + + assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('i8'), + (1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 28) + assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('i8'), + (1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 29) + assert_equal(np.array('2000-02-29', dtype='M8[D]').astype('i8'), + (2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 28) + assert_equal(np.array('2000-03-01', dtype='M8[D]').astype('i8'), + (2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 29) + assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('i8'), + (2000 - 1970)*365 + (2000 - 1972)//4 + 366 + 31 + 28 + 21) + + def test_days_to_pydate(self): + assert_equal(np.array('1599', dtype='M8[D]').astype('O'), + datetime.date(1599, 1, 1)) + assert_equal(np.array('1600', dtype='M8[D]').astype('O'), + datetime.date(1600, 1, 1)) + assert_equal(np.array('1601', dtype='M8[D]').astype('O'), + datetime.date(1601, 1, 1)) + assert_equal(np.array('1900', dtype='M8[D]').astype('O'), + datetime.date(1900, 1, 1)) + assert_equal(np.array('1901', dtype='M8[D]').astype('O'), + datetime.date(1901, 1, 1)) + assert_equal(np.array('2000', dtype='M8[D]').astype('O'), + datetime.date(2000, 1, 1)) + assert_equal(np.array('2001', dtype='M8[D]').astype('O'), + datetime.date(2001, 1, 1)) + assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('O'), + datetime.date(1600, 2, 29)) + assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('O'), + datetime.date(1600, 3, 1)) + assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('O'), + datetime.date(2001, 3, 22)) + + def test_dtype_comparison(self): + assert_(not (np.dtype('M8[us]') == np.dtype('M8[ms]'))) + assert_(np.dtype('M8[us]') != np.dtype('M8[ms]')) + assert_(np.dtype('M8[2D]') != np.dtype('M8[D]')) + assert_(np.dtype('M8[D]') != np.dtype('M8[2D]')) + + def test_pydatetime_creation(self): + a = np.array(['1960-03-12', datetime.date(1960, 3, 12)], dtype='M8[D]') + assert_equal(a[0], a[1]) + a = np.array(['1999-12-31', datetime.date(1999, 12, 31)], dtype='M8[D]') + assert_equal(a[0], a[1]) + a = np.array(['2000-01-01', datetime.date(2000, 1, 1)], dtype='M8[D]') + assert_equal(a[0], a[1]) + # Will fail if the date changes during the exact right moment + a = np.array(['today', datetime.date.today()], dtype='M8[D]') + assert_equal(a[0], a[1]) + # datetime.datetime.now() returns local time, not UTC + #a = np.array(['now', datetime.datetime.now()], dtype='M8[s]') + #assert_equal(a[0], a[1]) + + # we can give a datetime.date time units + assert_equal(np.array(datetime.date(1960, 3, 12), dtype='M8[s]'), + np.array(np.datetime64('1960-03-12T00:00:00'))) + + def test_datetime_string_conversion(self): + a = ['2011-03-16', '1920-01-01', '2013-05-19'] + str_a = np.array(a, dtype='S') + uni_a = np.array(a, dtype='U') + dt_a = np.array(a, dtype='M') + + # String to datetime + assert_equal(dt_a, str_a.astype('M')) + assert_equal(dt_a.dtype, str_a.astype('M').dtype) + dt_b = np.empty_like(dt_a) + dt_b[...] = str_a + assert_equal(dt_a, dt_b) + + # Datetime to string + assert_equal(str_a, dt_a.astype('S0')) + str_b = np.empty_like(str_a) + str_b[...] = dt_a + assert_equal(str_a, str_b) + + # Unicode to datetime + assert_equal(dt_a, uni_a.astype('M')) + assert_equal(dt_a.dtype, uni_a.astype('M').dtype) + dt_b = np.empty_like(dt_a) + dt_b[...] = uni_a + assert_equal(dt_a, dt_b) + + # Datetime to unicode + assert_equal(uni_a, dt_a.astype('U')) + uni_b = np.empty_like(uni_a) + uni_b[...] = dt_a + assert_equal(uni_a, uni_b) + + # Datetime to long string - gh-9712 + assert_equal(str_a, dt_a.astype((np.string_, 128))) + str_b = np.empty(str_a.shape, dtype=(np.string_, 128)) + str_b[...] = dt_a + assert_equal(str_a, str_b) + + def test_datetime_array_str(self): + a = np.array(['2011-03-16', '1920-01-01', '2013-05-19'], dtype='M') + assert_equal(str(a), "['2011-03-16' '1920-01-01' '2013-05-19']") + + a = np.array(['2011-03-16T13:55', '1920-01-01T03:12'], dtype='M') + assert_equal(np.array2string(a, separator=', ', + formatter={'datetime': lambda x: + "'%s'" % np.datetime_as_string(x, timezone='UTC')}), + "['2011-03-16T13:55Z', '1920-01-01T03:12Z']") + + # Check that one NaT doesn't corrupt subsequent entries + a = np.array(['2010', 'NaT', '2030']).astype('M') + assert_equal(str(a), "['2010' 'NaT' '2030']") + + def test_timedelta_array_str(self): + a = np.array([-1, 0, 100], dtype='m') + assert_equal(str(a), "[ -1 0 100]") + a = np.array(['NaT', 'NaT'], dtype='m') + assert_equal(str(a), "['NaT' 'NaT']") + # Check right-alignment with NaTs + a = np.array([-1, 'NaT', 0], dtype='m') + assert_equal(str(a), "[ -1 'NaT' 0]") + a = np.array([-1, 'NaT', 1234567], dtype='m') + assert_equal(str(a), "[ -1 'NaT' 1234567]") + + # Test with other byteorder: + a = np.array([-1, 'NaT', 1234567], dtype='>m') + assert_equal(str(a), "[ -1 'NaT' 1234567]") + a = np.array([-1, 'NaT', 1234567], dtype=''\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'us'\np6\n" + \ + b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb." + assert_equal(pickle.loads(pkl), np.dtype('>M8[us]')) + + def test_setstate(self): + "Verify that datetime dtype __setstate__ can handle bad arguments" + dt = np.dtype('>M8[us]') + assert_raises(ValueError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, 1)) + assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2]) + assert_raises(TypeError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, ({}, 'xxx'))) + assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2]) + + def test_dtype_promotion(self): + # datetime datetime computes the metadata gcd + # timedelta timedelta computes the metadata gcd + for mM in ['m', 'M']: + assert_equal( + np.promote_types(np.dtype(mM+'8[2Y]'), np.dtype(mM+'8[2Y]')), + np.dtype(mM+'8[2Y]')) + assert_equal( + np.promote_types(np.dtype(mM+'8[12Y]'), np.dtype(mM+'8[15Y]')), + np.dtype(mM+'8[3Y]')) + assert_equal( + np.promote_types(np.dtype(mM+'8[62M]'), np.dtype(mM+'8[24M]')), + np.dtype(mM+'8[2M]')) + assert_equal( + np.promote_types(np.dtype(mM+'8[1W]'), np.dtype(mM+'8[2D]')), + np.dtype(mM+'8[1D]')) + assert_equal( + np.promote_types(np.dtype(mM+'8[W]'), np.dtype(mM+'8[13s]')), + np.dtype(mM+'8[s]')) + assert_equal( + np.promote_types(np.dtype(mM+'8[13W]'), np.dtype(mM+'8[49s]')), + np.dtype(mM+'8[7s]')) + # timedelta timedelta raises when there is no reasonable gcd + assert_raises(TypeError, np.promote_types, + np.dtype('m8[Y]'), np.dtype('m8[D]')) + assert_raises(TypeError, np.promote_types, + np.dtype('m8[M]'), np.dtype('m8[W]')) + # timedelta timedelta may overflow with big unit ranges + assert_raises(OverflowError, np.promote_types, + np.dtype('m8[W]'), np.dtype('m8[fs]')) + assert_raises(OverflowError, np.promote_types, + np.dtype('m8[s]'), np.dtype('m8[as]')) + + def test_cast_overflow(self): + # gh-4486 + def cast(): + numpy.datetime64("1971-01-01 00:00:00.000000000000000").astype("= self.B)) + assert_(np.all(self.A <= self.B)) + assert_(not np.any(self.A > self.B)) + assert_(not np.any(self.A < self.B)) + assert_(not np.any(self.A != self.B)) + +class TestChar(object): + def setup(self): + self.A = np.array('abc1', dtype='c').view(np.chararray) + + def test_it(self): + assert_equal(self.A.shape, (4,)) + assert_equal(self.A.upper()[:2].tobytes(), b'AB') + +class TestComparisons(object): + def setup(self): + self.A = np.array([['abc', '123'], + ['789', 'xyz']]).view(np.chararray) + self.B = np.array([['efg', '123 '], + ['051', 'tuv']]).view(np.chararray) + + def test_not_equal(self): + assert_array_equal((self.A != self.B), [[True, False], [True, True]]) + + def test_equal(self): + assert_array_equal((self.A == self.B), [[False, True], [False, False]]) + + def test_greater_equal(self): + assert_array_equal((self.A >= self.B), [[False, True], [True, True]]) + + def test_less_equal(self): + assert_array_equal((self.A <= self.B), [[True, True], [False, False]]) + + def test_greater(self): + assert_array_equal((self.A > self.B), [[False, False], [True, True]]) + + def test_less(self): + assert_array_equal((self.A < self.B), [[True, False], [False, False]]) + +class TestComparisonsMixed1(TestComparisons): + """Ticket #1276""" + + def setup(self): + TestComparisons.setup(self) + self.B = np.array([['efg', '123 '], + ['051', 'tuv']], np.unicode_).view(np.chararray) + +class TestComparisonsMixed2(TestComparisons): + """Ticket #1276""" + + def setup(self): + TestComparisons.setup(self) + self.A = np.array([['abc', '123'], + ['789', 'xyz']], np.unicode_).view(np.chararray) + +class TestInformation(object): + def setup(self): + self.A = np.array([[' abc ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']]).view(np.chararray) + self.B = np.array([[u' \u03a3 ', u''], + [u'12345', u'MixedCase'], + [u'123 \t 345 \0 ', u'UPPER']]).view(np.chararray) + + def test_len(self): + assert_(issubclass(np.char.str_len(self.A).dtype.type, np.integer)) + assert_array_equal(np.char.str_len(self.A), [[5, 0], [5, 9], [12, 5]]) + assert_array_equal(np.char.str_len(self.B), [[3, 0], [5, 9], [12, 5]]) + + def test_count(self): + assert_(issubclass(self.A.count('').dtype.type, np.integer)) + assert_array_equal(self.A.count('a'), [[1, 0], [0, 1], [0, 0]]) + assert_array_equal(self.A.count('123'), [[0, 0], [1, 0], [1, 0]]) + # Python doesn't seem to like counting NULL characters + # assert_array_equal(self.A.count('\0'), [[0, 0], [0, 0], [1, 0]]) + assert_array_equal(self.A.count('a', 0, 2), [[1, 0], [0, 0], [0, 0]]) + assert_array_equal(self.B.count('a'), [[0, 0], [0, 1], [0, 0]]) + assert_array_equal(self.B.count('123'), [[0, 0], [1, 0], [1, 0]]) + # assert_array_equal(self.B.count('\0'), [[0, 0], [0, 0], [1, 0]]) + + def test_endswith(self): + assert_(issubclass(self.A.endswith('').dtype.type, np.bool_)) + assert_array_equal(self.A.endswith(' '), [[1, 0], [0, 0], [1, 0]]) + assert_array_equal(self.A.endswith('3', 0, 3), [[0, 0], [1, 0], [1, 0]]) + + def fail(): + self.A.endswith('3', 'fdjk') + + assert_raises(TypeError, fail) + + def test_find(self): + assert_(issubclass(self.A.find('a').dtype.type, np.integer)) + assert_array_equal(self.A.find('a'), [[1, -1], [-1, 6], [-1, -1]]) + assert_array_equal(self.A.find('3'), [[-1, -1], [2, -1], [2, -1]]) + assert_array_equal(self.A.find('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]]) + assert_array_equal(self.A.find(['1', 'P']), [[-1, -1], [0, -1], [0, 1]]) + + def test_index(self): + + def fail(): + self.A.index('a') + + assert_raises(ValueError, fail) + assert_(np.char.index('abcba', 'b') == 1) + assert_(issubclass(np.char.index('abcba', 'b').dtype.type, np.integer)) + + def test_isalnum(self): + assert_(issubclass(self.A.isalnum().dtype.type, np.bool_)) + assert_array_equal(self.A.isalnum(), [[False, False], [True, True], [False, True]]) + + def test_isalpha(self): + assert_(issubclass(self.A.isalpha().dtype.type, np.bool_)) + assert_array_equal(self.A.isalpha(), [[False, False], [False, True], [False, True]]) + + def test_isdigit(self): + assert_(issubclass(self.A.isdigit().dtype.type, np.bool_)) + assert_array_equal(self.A.isdigit(), [[False, False], [True, False], [False, False]]) + + def test_islower(self): + assert_(issubclass(self.A.islower().dtype.type, np.bool_)) + assert_array_equal(self.A.islower(), [[True, False], [False, False], [False, False]]) + + def test_isspace(self): + assert_(issubclass(self.A.isspace().dtype.type, np.bool_)) + assert_array_equal(self.A.isspace(), [[False, False], [False, False], [False, False]]) + + def test_istitle(self): + assert_(issubclass(self.A.istitle().dtype.type, np.bool_)) + assert_array_equal(self.A.istitle(), [[False, False], [False, False], [False, False]]) + + def test_isupper(self): + assert_(issubclass(self.A.isupper().dtype.type, np.bool_)) + assert_array_equal(self.A.isupper(), [[False, False], [False, False], [False, True]]) + + def test_rfind(self): + assert_(issubclass(self.A.rfind('a').dtype.type, np.integer)) + assert_array_equal(self.A.rfind('a'), [[1, -1], [-1, 6], [-1, -1]]) + assert_array_equal(self.A.rfind('3'), [[-1, -1], [2, -1], [6, -1]]) + assert_array_equal(self.A.rfind('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]]) + assert_array_equal(self.A.rfind(['1', 'P']), [[-1, -1], [0, -1], [0, 2]]) + + def test_rindex(self): + + def fail(): + self.A.rindex('a') + + assert_raises(ValueError, fail) + assert_(np.char.rindex('abcba', 'b') == 3) + assert_(issubclass(np.char.rindex('abcba', 'b').dtype.type, np.integer)) + + def test_startswith(self): + assert_(issubclass(self.A.startswith('').dtype.type, np.bool_)) + assert_array_equal(self.A.startswith(' '), [[1, 0], [0, 0], [0, 0]]) + assert_array_equal(self.A.startswith('1', 0, 3), [[0, 0], [1, 0], [1, 0]]) + + def fail(): + self.A.startswith('3', 'fdjk') + + assert_raises(TypeError, fail) + + +class TestMethods(object): + def setup(self): + self.A = np.array([[' abc ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']], + dtype='S').view(np.chararray) + self.B = np.array([[u' \u03a3 ', u''], + [u'12345', u'MixedCase'], + [u'123 \t 345 \0 ', u'UPPER']]).view(np.chararray) + + def test_capitalize(self): + tgt = [[b' abc ', b''], + [b'12345', b'Mixedcase'], + [b'123 \t 345 \0 ', b'Upper']] + assert_(issubclass(self.A.capitalize().dtype.type, np.string_)) + assert_array_equal(self.A.capitalize(), tgt) + + tgt = [[u' \u03c3 ', ''], + ['12345', 'Mixedcase'], + ['123 \t 345 \0 ', 'Upper']] + assert_(issubclass(self.B.capitalize().dtype.type, np.unicode_)) + assert_array_equal(self.B.capitalize(), tgt) + + def test_center(self): + assert_(issubclass(self.A.center(10).dtype.type, np.string_)) + C = self.A.center([10, 20]) + assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) + + C = self.A.center(20, b'#') + assert_(np.all(C.startswith(b'#'))) + assert_(np.all(C.endswith(b'#'))) + + C = np.char.center(b'FOO', [[10, 20], [15, 8]]) + tgt = [[b' FOO ', b' FOO '], + [b' FOO ', b' FOO ']] + assert_(issubclass(C.dtype.type, np.string_)) + assert_array_equal(C, tgt) + + def test_decode(self): + if sys.version_info[0] >= 3: + A = np.char.array([b'\\u03a3']) + assert_(A.decode('unicode-escape')[0] == '\u03a3') + else: + with suppress_warnings() as sup: + if sys.py3kwarning: + sup.filter(DeprecationWarning, "'hex_codec'") + A = np.char.array(['736563726574206d657373616765']) + assert_(A.decode('hex_codec')[0] == 'secret message') + + def test_encode(self): + B = self.B.encode('unicode_escape') + assert_(B[0][0] == str(' \\u03a3 ').encode('latin1')) + + def test_expandtabs(self): + T = self.A.expandtabs() + assert_(T[2, 0] == b'123 345 \0') + + def test_join(self): + if sys.version_info[0] >= 3: + # NOTE: list(b'123') == [49, 50, 51] + # so that b','.join(b'123') results to an error on Py3 + A0 = self.A.decode('ascii') + else: + A0 = self.A + + A = np.char.join([',', '#'], A0) + if sys.version_info[0] >= 3: + assert_(issubclass(A.dtype.type, np.unicode_)) + else: + assert_(issubclass(A.dtype.type, np.string_)) + tgt = np.array([[' ,a,b,c, ', ''], + ['1,2,3,4,5', 'M#i#x#e#d#C#a#s#e'], + ['1,2,3, ,\t, ,3,4,5, ,\x00, ', 'U#P#P#E#R']]) + assert_array_equal(np.char.join([',', '#'], A0), tgt) + + def test_ljust(self): + assert_(issubclass(self.A.ljust(10).dtype.type, np.string_)) + + C = self.A.ljust([10, 20]) + assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) + + C = self.A.ljust(20, b'#') + assert_array_equal(C.startswith(b'#'), [ + [False, True], [False, False], [False, False]]) + assert_(np.all(C.endswith(b'#'))) + + C = np.char.ljust(b'FOO', [[10, 20], [15, 8]]) + tgt = [[b'FOO ', b'FOO '], + [b'FOO ', b'FOO ']] + assert_(issubclass(C.dtype.type, np.string_)) + assert_array_equal(C, tgt) + + def test_lower(self): + tgt = [[b' abc ', b''], + [b'12345', b'mixedcase'], + [b'123 \t 345 \0 ', b'upper']] + assert_(issubclass(self.A.lower().dtype.type, np.string_)) + assert_array_equal(self.A.lower(), tgt) + + tgt = [[u' \u03c3 ', u''], + [u'12345', u'mixedcase'], + [u'123 \t 345 \0 ', u'upper']] + assert_(issubclass(self.B.lower().dtype.type, np.unicode_)) + assert_array_equal(self.B.lower(), tgt) + + def test_lstrip(self): + tgt = [[b'abc ', b''], + [b'12345', b'MixedCase'], + [b'123 \t 345 \0 ', b'UPPER']] + assert_(issubclass(self.A.lstrip().dtype.type, np.string_)) + assert_array_equal(self.A.lstrip(), tgt) + + tgt = [[b' abc', b''], + [b'2345', b'ixedCase'], + [b'23 \t 345 \x00', b'UPPER']] + assert_array_equal(self.A.lstrip([b'1', b'M']), tgt) + + tgt = [[u'\u03a3 ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']] + assert_(issubclass(self.B.lstrip().dtype.type, np.unicode_)) + assert_array_equal(self.B.lstrip(), tgt) + + def test_partition(self): + P = self.A.partition([b'3', b'M']) + tgt = [[(b' abc ', b'', b''), (b'', b'', b'')], + [(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')], + [(b'12', b'3', b' \t 345 \0 '), (b'UPPER', b'', b'')]] + assert_(issubclass(P.dtype.type, np.string_)) + assert_array_equal(P, tgt) + + def test_replace(self): + R = self.A.replace([b'3', b'a'], + [b'##########', b'@']) + tgt = [[b' abc ', b''], + [b'12##########45', b'MixedC@se'], + [b'12########## \t ##########45 \x00', b'UPPER']] + assert_(issubclass(R.dtype.type, np.string_)) + assert_array_equal(R, tgt) + + if sys.version_info[0] < 3: + # NOTE: b'abc'.replace(b'a', 'b') is not allowed on Py3 + R = self.A.replace(b'a', u'\u03a3') + tgt = [[u' \u03a3bc ', ''], + ['12345', u'MixedC\u03a3se'], + ['123 \t 345 \x00', 'UPPER']] + assert_(issubclass(R.dtype.type, np.unicode_)) + assert_array_equal(R, tgt) + + def test_rjust(self): + assert_(issubclass(self.A.rjust(10).dtype.type, np.string_)) + + C = self.A.rjust([10, 20]) + assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) + + C = self.A.rjust(20, b'#') + assert_(np.all(C.startswith(b'#'))) + assert_array_equal(C.endswith(b'#'), + [[False, True], [False, False], [False, False]]) + + C = np.char.rjust(b'FOO', [[10, 20], [15, 8]]) + tgt = [[b' FOO', b' FOO'], + [b' FOO', b' FOO']] + assert_(issubclass(C.dtype.type, np.string_)) + assert_array_equal(C, tgt) + + def test_rpartition(self): + P = self.A.rpartition([b'3', b'M']) + tgt = [[(b'', b'', b' abc '), (b'', b'', b'')], + [(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')], + [(b'123 \t ', b'3', b'45 \0 '), (b'', b'', b'UPPER')]] + assert_(issubclass(P.dtype.type, np.string_)) + assert_array_equal(P, tgt) + + def test_rsplit(self): + A = self.A.rsplit(b'3') + tgt = [[[b' abc '], [b'']], + [[b'12', b'45'], [b'MixedCase']], + [[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]] + assert_(issubclass(A.dtype.type, np.object_)) + assert_equal(A.tolist(), tgt) + + def test_rstrip(self): + assert_(issubclass(self.A.rstrip().dtype.type, np.string_)) + + tgt = [[b' abc', b''], + [b'12345', b'MixedCase'], + [b'123 \t 345', b'UPPER']] + assert_array_equal(self.A.rstrip(), tgt) + + tgt = [[b' abc ', b''], + [b'1234', b'MixedCase'], + [b'123 \t 345 \x00', b'UPP'] + ] + assert_array_equal(self.A.rstrip([b'5', b'ER']), tgt) + + tgt = [[u' \u03a3', ''], + ['12345', 'MixedCase'], + ['123 \t 345', 'UPPER']] + assert_(issubclass(self.B.rstrip().dtype.type, np.unicode_)) + assert_array_equal(self.B.rstrip(), tgt) + + def test_strip(self): + tgt = [[b'abc', b''], + [b'12345', b'MixedCase'], + [b'123 \t 345', b'UPPER']] + assert_(issubclass(self.A.strip().dtype.type, np.string_)) + assert_array_equal(self.A.strip(), tgt) + + tgt = [[b' abc ', b''], + [b'234', b'ixedCas'], + [b'23 \t 345 \x00', b'UPP']] + assert_array_equal(self.A.strip([b'15', b'EReM']), tgt) + + tgt = [[u'\u03a3', ''], + ['12345', 'MixedCase'], + ['123 \t 345', 'UPPER']] + assert_(issubclass(self.B.strip().dtype.type, np.unicode_)) + assert_array_equal(self.B.strip(), tgt) + + def test_split(self): + A = self.A.split(b'3') + tgt = [ + [[b' abc '], [b'']], + [[b'12', b'45'], [b'MixedCase']], + [[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]] + assert_(issubclass(A.dtype.type, np.object_)) + assert_equal(A.tolist(), tgt) + + def test_splitlines(self): + A = np.char.array(['abc\nfds\nwer']).splitlines() + assert_(issubclass(A.dtype.type, np.object_)) + assert_(A.shape == (1,)) + assert_(len(A[0]) == 3) + + def test_swapcase(self): + tgt = [[b' ABC ', b''], + [b'12345', b'mIXEDcASE'], + [b'123 \t 345 \0 ', b'upper']] + assert_(issubclass(self.A.swapcase().dtype.type, np.string_)) + assert_array_equal(self.A.swapcase(), tgt) + + tgt = [[u' \u03c3 ', u''], + [u'12345', u'mIXEDcASE'], + [u'123 \t 345 \0 ', u'upper']] + assert_(issubclass(self.B.swapcase().dtype.type, np.unicode_)) + assert_array_equal(self.B.swapcase(), tgt) + + def test_title(self): + tgt = [[b' Abc ', b''], + [b'12345', b'Mixedcase'], + [b'123 \t 345 \0 ', b'Upper']] + assert_(issubclass(self.A.title().dtype.type, np.string_)) + assert_array_equal(self.A.title(), tgt) + + tgt = [[u' \u03a3 ', u''], + [u'12345', u'Mixedcase'], + [u'123 \t 345 \0 ', u'Upper']] + assert_(issubclass(self.B.title().dtype.type, np.unicode_)) + assert_array_equal(self.B.title(), tgt) + + def test_upper(self): + tgt = [[b' ABC ', b''], + [b'12345', b'MIXEDCASE'], + [b'123 \t 345 \0 ', b'UPPER']] + assert_(issubclass(self.A.upper().dtype.type, np.string_)) + assert_array_equal(self.A.upper(), tgt) + + tgt = [[u' \u03a3 ', u''], + [u'12345', u'MIXEDCASE'], + [u'123 \t 345 \0 ', u'UPPER']] + assert_(issubclass(self.B.upper().dtype.type, np.unicode_)) + assert_array_equal(self.B.upper(), tgt) + + def test_isnumeric(self): + + def fail(): + self.A.isnumeric() + + assert_raises(TypeError, fail) + assert_(issubclass(self.B.isnumeric().dtype.type, np.bool_)) + assert_array_equal(self.B.isnumeric(), [ + [False, False], [True, False], [False, False]]) + + def test_isdecimal(self): + + def fail(): + self.A.isdecimal() + + assert_raises(TypeError, fail) + assert_(issubclass(self.B.isdecimal().dtype.type, np.bool_)) + assert_array_equal(self.B.isdecimal(), [ + [False, False], [True, False], [False, False]]) + + +class TestOperations(object): + def setup(self): + self.A = np.array([['abc', '123'], + ['789', 'xyz']]).view(np.chararray) + self.B = np.array([['efg', '456'], + ['051', 'tuv']]).view(np.chararray) + + def test_add(self): + AB = np.array([['abcefg', '123456'], + ['789051', 'xyztuv']]).view(np.chararray) + assert_array_equal(AB, (self.A + self.B)) + assert_(len((self.A + self.B)[0][0]) == 6) + + def test_radd(self): + QA = np.array([['qabc', 'q123'], + ['q789', 'qxyz']]).view(np.chararray) + assert_array_equal(QA, ('q' + self.A)) + + def test_mul(self): + A = self.A + for r in (2, 3, 5, 7, 197): + Ar = np.array([[A[0, 0]*r, A[0, 1]*r], + [A[1, 0]*r, A[1, 1]*r]]).view(np.chararray) + + assert_array_equal(Ar, (self.A * r)) + + for ob in [object(), 'qrs']: + with assert_raises_regex(ValueError, + 'Can only multiply by integers'): + A*ob + + def test_rmul(self): + A = self.A + for r in (2, 3, 5, 7, 197): + Ar = np.array([[A[0, 0]*r, A[0, 1]*r], + [A[1, 0]*r, A[1, 1]*r]]).view(np.chararray) + assert_array_equal(Ar, (r * self.A)) + + for ob in [object(), 'qrs']: + with assert_raises_regex(ValueError, + 'Can only multiply by integers'): + ob * A + + def test_mod(self): + """Ticket #856""" + F = np.array([['%d', '%f'], ['%s', '%r']]).view(np.chararray) + C = np.array([[3, 7], [19, 1]]) + FC = np.array([['3', '7.000000'], + ['19', '1']]).view(np.chararray) + assert_array_equal(FC, F % C) + + A = np.array([['%.3f', '%d'], ['%s', '%r']]).view(np.chararray) + A1 = np.array([['1.000', '1'], ['1', '1']]).view(np.chararray) + assert_array_equal(A1, (A % 1)) + + A2 = np.array([['1.000', '2'], ['3', '4']]).view(np.chararray) + assert_array_equal(A2, (A % [[1, 2], [3, 4]])) + + def test_rmod(self): + assert_(("%s" % self.A) == str(self.A)) + assert_(("%r" % self.A) == repr(self.A)) + + for ob in [42, object()]: + with assert_raises_regex( + TypeError, "unsupported operand type.* and 'chararray'"): + ob % self.A + + def test_slice(self): + """Regression test for https://github.com/numpy/numpy/issues/5982""" + + arr = np.array([['abc ', 'def '], ['geh ', 'ijk ']], + dtype='S4').view(np.chararray) + sl1 = arr[:] + assert_array_equal(sl1, arr) + assert_(sl1.base is arr) + assert_(sl1.base.base is arr.base) + + sl2 = arr[:, :] + assert_array_equal(sl2, arr) + assert_(sl2.base is arr) + assert_(sl2.base.base is arr.base) + + assert_(arr[0, 0] == b'abc') + + +def test_empty_indexing(): + """Regression test for ticket 1948.""" + # Check that indexing a chararray with an empty list/array returns an + # empty chararray instead of a chararray with a single empty string in it. + s = np.chararray((4,)) + assert_(s[[]].size == 0) diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_defchararray.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_defchararray.pyc new file mode 100644 index 0000000..b0941bf Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_defchararray.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_deprecations.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_deprecations.py new file mode 100644 index 0000000..edb5d5e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_deprecations.py @@ -0,0 +1,535 @@ +""" +Tests related to deprecation warnings. Also a convenient place +to document how deprecations should eventually be turned into errors. + +""" +from __future__ import division, absolute_import, print_function + +import datetime +import sys +import operator +import warnings +import pytest + +import numpy as np +from numpy.testing import ( + assert_raises, assert_warns, assert_ + ) + +try: + import pytz + _has_pytz = True +except ImportError: + _has_pytz = False + + +class _DeprecationTestCase(object): + # Just as warning: warnings uses re.match, so the start of this message + # must match. + message = '' + warning_cls = DeprecationWarning + + def setup(self): + self.warn_ctx = warnings.catch_warnings(record=True) + self.log = self.warn_ctx.__enter__() + + # Do *not* ignore other DeprecationWarnings. Ignoring warnings + # can give very confusing results because of + # https://bugs.python.org/issue4180 and it is probably simplest to + # try to keep the tests cleanly giving only the right warning type. + # (While checking them set to "error" those are ignored anyway) + # We still have them show up, because otherwise they would be raised + warnings.filterwarnings("always", category=self.warning_cls) + warnings.filterwarnings("always", message=self.message, + category=self.warning_cls) + + def teardown(self): + self.warn_ctx.__exit__() + + def assert_deprecated(self, function, num=1, ignore_others=False, + function_fails=False, + exceptions=np._NoValue, + args=(), kwargs={}): + """Test if DeprecationWarnings are given and raised. + + This first checks if the function when called gives `num` + DeprecationWarnings, after that it tries to raise these + DeprecationWarnings and compares them with `exceptions`. + The exceptions can be different for cases where this code path + is simply not anticipated and the exception is replaced. + + Parameters + ---------- + function : callable + The function to test + num : int + Number of DeprecationWarnings to expect. This should normally be 1. + ignore_others : bool + Whether warnings of the wrong type should be ignored (note that + the message is not checked) + function_fails : bool + If the function would normally fail, setting this will check for + warnings inside a try/except block. + exceptions : Exception or tuple of Exceptions + Exception to expect when turning the warnings into an error. + The default checks for DeprecationWarnings. If exceptions is + empty the function is expected to run successfully. + args : tuple + Arguments for `function` + kwargs : dict + Keyword arguments for `function` + """ + # reset the log + self.log[:] = [] + + if exceptions is np._NoValue: + exceptions = (self.warning_cls,) + + try: + function(*args, **kwargs) + except (Exception if function_fails else tuple()): + pass + + # just in case, clear the registry + num_found = 0 + for warning in self.log: + if warning.category is self.warning_cls: + num_found += 1 + elif not ignore_others: + raise AssertionError( + "expected %s but got: %s" % + (self.warning_cls.__name__, warning.category)) + if num is not None and num_found != num: + msg = "%i warnings found but %i expected." % (len(self.log), num) + lst = [str(w.category) for w in self.log] + raise AssertionError("\n".join([msg] + lst)) + + with warnings.catch_warnings(): + warnings.filterwarnings("error", message=self.message, + category=self.warning_cls) + try: + function(*args, **kwargs) + if exceptions != tuple(): + raise AssertionError( + "No error raised during function call") + except exceptions: + if exceptions == tuple(): + raise AssertionError( + "Error raised during function call") + + def assert_not_deprecated(self, function, args=(), kwargs={}): + """Test that warnings are not raised. + + This is just a shorthand for: + + self.assert_deprecated(function, num=0, ignore_others=True, + exceptions=tuple(), args=args, kwargs=kwargs) + """ + self.assert_deprecated(function, num=0, ignore_others=True, + exceptions=tuple(), args=args, kwargs=kwargs) + + +class _VisibleDeprecationTestCase(_DeprecationTestCase): + warning_cls = np.VisibleDeprecationWarning + + +class TestNonTupleNDIndexDeprecation(object): + def test_basic(self): + a = np.zeros((5, 5)) + with warnings.catch_warnings(): + warnings.filterwarnings('always') + assert_warns(FutureWarning, a.__getitem__, [[0, 1], [0, 1]]) + assert_warns(FutureWarning, a.__getitem__, [slice(None)]) + + warnings.filterwarnings('error') + assert_raises(FutureWarning, a.__getitem__, [[0, 1], [0, 1]]) + assert_raises(FutureWarning, a.__getitem__, [slice(None)]) + + # a a[[0, 1]] always was advanced indexing, so no error/warning + a[[0, 1]] + + +class TestRankDeprecation(_DeprecationTestCase): + """Test that np.rank is deprecated. The function should simply be + removed. The VisibleDeprecationWarning may become unnecessary. + """ + + def test(self): + a = np.arange(10) + assert_warns(np.VisibleDeprecationWarning, np.rank, a) + + +class TestComparisonDeprecations(_DeprecationTestCase): + """This tests the deprecation, for non-element-wise comparison logic. + This used to mean that when an error occurred during element-wise comparison + (i.e. broadcasting) NotImplemented was returned, but also in the comparison + itself, False was given instead of the error. + + Also test FutureWarning for the None comparison. + """ + + message = "elementwise.* comparison failed; .*" + + def test_normal_types(self): + for op in (operator.eq, operator.ne): + # Broadcasting errors: + self.assert_deprecated(op, args=(np.zeros(3), [])) + a = np.zeros(3, dtype='i,i') + # (warning is issued a couple of times here) + self.assert_deprecated(op, args=(a, a[:-1]), num=None) + + # Element comparison error (numpy array can't be compared). + a = np.array([1, np.array([1,2,3])], dtype=object) + b = np.array([1, np.array([1,2,3])], dtype=object) + self.assert_deprecated(op, args=(a, b), num=None) + + def test_string(self): + # For two string arrays, strings always raised the broadcasting error: + a = np.array(['a', 'b']) + b = np.array(['a', 'b', 'c']) + assert_raises(ValueError, lambda x, y: x == y, a, b) + + # The empty list is not cast to string, and this used to pass due + # to dtype mismatch; now (2018-06-21) it correctly leads to a + # FutureWarning. + assert_warns(FutureWarning, lambda: a == []) + + def test_void_dtype_equality_failures(self): + class NotArray(object): + def __array__(self): + raise TypeError + + # Needed so Python 3 does not raise DeprecationWarning twice. + def __ne__(self, other): + return NotImplemented + + self.assert_deprecated(lambda: np.arange(2) == NotArray()) + self.assert_deprecated(lambda: np.arange(2) != NotArray()) + + struct1 = np.zeros(2, dtype="i4,i4") + struct2 = np.zeros(2, dtype="i4,i4,i4") + + assert_warns(FutureWarning, lambda: struct1 == 1) + assert_warns(FutureWarning, lambda: struct1 == struct2) + assert_warns(FutureWarning, lambda: struct1 != 1) + assert_warns(FutureWarning, lambda: struct1 != struct2) + + def test_array_richcompare_legacy_weirdness(self): + # It doesn't really work to use assert_deprecated here, b/c part of + # the point of assert_deprecated is to check that when warnings are + # set to "error" mode then the error is propagated -- which is good! + # But here we are testing a bunch of code that is deprecated *because* + # it has the habit of swallowing up errors and converting them into + # different warnings. So assert_warns will have to be sufficient. + assert_warns(FutureWarning, lambda: np.arange(2) == "a") + assert_warns(FutureWarning, lambda: np.arange(2) != "a") + # No warning for scalar comparisons + with warnings.catch_warnings(): + warnings.filterwarnings("error") + assert_(not (np.array(0) == "a")) + assert_(np.array(0) != "a") + assert_(not (np.int16(0) == "a")) + assert_(np.int16(0) != "a") + + for arg1 in [np.asarray(0), np.int16(0)]: + struct = np.zeros(2, dtype="i4,i4") + for arg2 in [struct, "a"]: + for f in [operator.lt, operator.le, operator.gt, operator.ge]: + if sys.version_info[0] >= 3: + # py3 + with warnings.catch_warnings() as l: + warnings.filterwarnings("always") + assert_raises(TypeError, f, arg1, arg2) + assert_(not l) + else: + # py2 + assert_warns(DeprecationWarning, f, arg1, arg2) + + +class TestDatetime64Timezone(_DeprecationTestCase): + """Parsing of datetime64 with timezones deprecated in 1.11.0, because + datetime64 is now timezone naive rather than UTC only. + + It will be quite a while before we can remove this, because, at the very + least, a lot of existing code uses the 'Z' modifier to avoid conversion + from local time to UTC, even if otherwise it handles time in a timezone + naive fashion. + """ + def test_string(self): + self.assert_deprecated(np.datetime64, args=('2000-01-01T00+01',)) + self.assert_deprecated(np.datetime64, args=('2000-01-01T00Z',)) + + @pytest.mark.skipif(not _has_pytz, + reason="The pytz module is not available.") + def test_datetime(self): + tz = pytz.timezone('US/Eastern') + dt = datetime.datetime(2000, 1, 1, 0, 0, tzinfo=tz) + self.assert_deprecated(np.datetime64, args=(dt,)) + + +class TestNonCContiguousViewDeprecation(_DeprecationTestCase): + """View of non-C-contiguous arrays deprecated in 1.11.0. + + The deprecation will not be raised for arrays that are both C and F + contiguous, as C contiguous is dominant. There are more such arrays + with relaxed stride checking than without so the deprecation is not + as visible with relaxed stride checking in force. + """ + + def test_fortran_contiguous(self): + self.assert_deprecated(np.ones((2,2)).T.view, args=(complex,)) + self.assert_deprecated(np.ones((2,2)).T.view, args=(np.int8,)) + + +class TestInvalidOrderParameterInputForFlattenArrayDeprecation(_DeprecationTestCase): + """Invalid arguments to the ORDER parameter in array.flatten() should not be + allowed and should raise an error. However, in the interests of not breaking + code that may inadvertently pass invalid arguments to this parameter, a + DeprecationWarning will be issued instead for the time being to give developers + time to refactor relevant code. + """ + + def test_flatten_array_non_string_arg(self): + x = np.zeros((3, 5)) + self.message = ("Non-string object detected for " + "the array ordering. Please pass " + "in 'C', 'F', 'A', or 'K' instead") + self.assert_deprecated(x.flatten, args=(np.pi,)) + + def test_flatten_array_invalid_string_arg(self): + # Tests that a DeprecationWarning is raised + # when a string of length greater than one + # starting with "C", "F", "A", or "K" (case- + # and unicode-insensitive) is passed in for + # the ORDER parameter. Otherwise, a TypeError + # will be raised! + + x = np.zeros((3, 5)) + self.message = ("Non length-one string passed " + "in for the array ordering. Please " + "pass in 'C', 'F', 'A', or 'K' instead") + self.assert_deprecated(x.flatten, args=("FACK",)) + + +class TestArrayDataAttributeAssignmentDeprecation(_DeprecationTestCase): + """Assigning the 'data' attribute of an ndarray is unsafe as pointed + out in gh-7093. Eventually, such assignment should NOT be allowed, but + in the interests of maintaining backwards compatibility, only a Deprecation- + Warning will be raised instead for the time being to give developers time to + refactor relevant code. + """ + + def test_data_attr_assignment(self): + a = np.arange(10) + b = np.linspace(0, 1, 10) + + self.message = ("Assigning the 'data' attribute is an " + "inherently unsafe operation and will " + "be removed in the future.") + self.assert_deprecated(a.__setattr__, args=('data', b.data)) + + +class TestLinspaceInvalidNumParameter(_DeprecationTestCase): + """Argument to the num parameter in linspace that cannot be + safely interpreted as an integer is deprecated in 1.12.0. + + Argument to the num parameter in linspace that cannot be + safely interpreted as an integer should not be allowed. + In the interest of not breaking code that passes + an argument that could still be interpreted as an integer, a + DeprecationWarning will be issued for the time being to give + developers time to refactor relevant code. + """ + def test_float_arg(self): + # 2016-02-25, PR#7328 + self.assert_deprecated(np.linspace, args=(0, 10, 2.5)) + + +class TestBinaryReprInsufficientWidthParameterForRepresentation(_DeprecationTestCase): + """ + If a 'width' parameter is passed into ``binary_repr`` that is insufficient to + represent the number in base 2 (positive) or 2's complement (negative) form, + the function used to silently ignore the parameter and return a representation + using the minimal number of bits needed for the form in question. Such behavior + is now considered unsafe from a user perspective and will raise an error in the future. + """ + + def test_insufficient_width_positive(self): + args = (10,) + kwargs = {'width': 2} + + self.message = ("Insufficient bit width provided. This behavior " + "will raise an error in the future.") + self.assert_deprecated(np.binary_repr, args=args, kwargs=kwargs) + + def test_insufficient_width_negative(self): + args = (-5,) + kwargs = {'width': 2} + + self.message = ("Insufficient bit width provided. This behavior " + "will raise an error in the future.") + self.assert_deprecated(np.binary_repr, args=args, kwargs=kwargs) + + +class TestNumericStyleTypecodes(_DeprecationTestCase): + """ + Deprecate the old numeric-style dtypes, which are especially + confusing for complex types, e.g. Complex32 -> complex64. When the + deprecation cycle is complete, the check for the strings should be + removed from PyArray_DescrConverter in descriptor.c, and the + deprecated keys should not be added as capitalized aliases in + _add_aliases in numerictypes.py. + """ + def test_all_dtypes(self): + deprecated_types = [ + 'Bool', 'Complex32', 'Complex64', 'Float16', 'Float32', 'Float64', + 'Int8', 'Int16', 'Int32', 'Int64', 'Object0', 'Timedelta64', + 'UInt8', 'UInt16', 'UInt32', 'UInt64', 'Void0' + ] + if sys.version_info[0] < 3: + deprecated_types.extend(['Unicode0', 'String0']) + + for dt in deprecated_types: + self.assert_deprecated(np.dtype, exceptions=(TypeError,), + args=(dt,)) + + +class TestTestDeprecated(object): + def test_assert_deprecated(self): + test_case_instance = _DeprecationTestCase() + test_case_instance.setup() + assert_raises(AssertionError, + test_case_instance.assert_deprecated, + lambda: None) + + def foo(): + warnings.warn("foo", category=DeprecationWarning, stacklevel=2) + + test_case_instance.assert_deprecated(foo) + test_case_instance.teardown() + + +class TestClassicIntDivision(_DeprecationTestCase): + """ + See #7949. Deprecate the numeric-style dtypes with -3 flag in python 2 + if used for division + List of data types: https://docs.scipy.org/doc/numpy/user/basics.types.html + """ + def test_int_dtypes(self): + #scramble types and do some mix and match testing + deprecated_types = [ + 'bool_', 'int_', 'intc', 'uint8', 'int8', 'uint64', 'int32', 'uint16', + 'intp', 'int64', 'uint32', 'int16' + ] + if sys.version_info[0] < 3 and sys.py3kwarning: + import operator as op + dt2 = 'bool_' + for dt1 in deprecated_types: + a = np.array([1,2,3], dtype=dt1) + b = np.array([1,2,3], dtype=dt2) + self.assert_deprecated(op.div, args=(a,b)) + dt2 = dt1 + + +class TestNonNumericConjugate(_DeprecationTestCase): + """ + Deprecate no-op behavior of ndarray.conjugate on non-numeric dtypes, + which conflicts with the error behavior of np.conjugate. + """ + def test_conjugate(self): + for a in np.array(5), np.array(5j): + self.assert_not_deprecated(a.conjugate) + for a in (np.array('s'), np.array('2016', 'M'), + np.array((1, 2), [('a', int), ('b', int)])): + self.assert_deprecated(a.conjugate) + + +class TestNPY_CHAR(_DeprecationTestCase): + # 2017-05-03, 1.13.0 + def test_npy_char_deprecation(self): + from numpy.core._multiarray_tests import npy_char_deprecation + self.assert_deprecated(npy_char_deprecation) + assert_(npy_char_deprecation() == 'S1') + + +class Test_UPDATEIFCOPY(_DeprecationTestCase): + """ + v1.14 deprecates creating an array with the UPDATEIFCOPY flag, use + WRITEBACKIFCOPY instead + """ + def test_npy_updateifcopy_deprecation(self): + from numpy.core._multiarray_tests import npy_updateifcopy_deprecation + arr = np.arange(9).reshape(3, 3) + v = arr.T + self.assert_deprecated(npy_updateifcopy_deprecation, args=(v,)) + + +class TestDatetimeEvent(_DeprecationTestCase): + # 2017-08-11, 1.14.0 + def test_3_tuple(self): + for cls in (np.datetime64, np.timedelta64): + # two valid uses - (unit, num) and (unit, num, den, None) + self.assert_not_deprecated(cls, args=(1, ('ms', 2))) + self.assert_not_deprecated(cls, args=(1, ('ms', 2, 1, None))) + + # trying to use the event argument, removed in 1.7.0, is deprecated + # it used to be a uint8 + self.assert_deprecated(cls, args=(1, ('ms', 2, 'event'))) + self.assert_deprecated(cls, args=(1, ('ms', 2, 63))) + self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 'event'))) + self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 63))) + + +class TestTruthTestingEmptyArrays(_DeprecationTestCase): + # 2017-09-25, 1.14.0 + message = '.*truth value of an empty array is ambiguous.*' + + def test_1d(self): + self.assert_deprecated(bool, args=(np.array([]),)) + + def test_2d(self): + self.assert_deprecated(bool, args=(np.zeros((1, 0)),)) + self.assert_deprecated(bool, args=(np.zeros((0, 1)),)) + self.assert_deprecated(bool, args=(np.zeros((0, 0)),)) + + +class TestBincount(_DeprecationTestCase): + # 2017-06-01, 1.14.0 + def test_bincount_minlength(self): + self.assert_deprecated(lambda: np.bincount([1, 2, 3], minlength=None)) + + +class TestGeneratorSum(_DeprecationTestCase): + # 2018-02-25, 1.15.0 + def test_generator_sum(self): + self.assert_deprecated(np.sum, args=((i for i in range(5)),)) + + +class TestSctypeNA(_VisibleDeprecationTestCase): + # 2018-06-24, 1.16 + def test_sctypeNA(self): + self.assert_deprecated(lambda: np.sctypeNA['?']) + self.assert_deprecated(lambda: np.typeNA['?']) + self.assert_deprecated(lambda: np.typeNA.get('?')) + + +class TestPositiveOnNonNumerical(_DeprecationTestCase): + # 2018-06-28, 1.16.0 + def test_positive_on_non_number(self): + self.assert_deprecated(operator.pos, args=(np.array('foo'),)) + +class TestFromstring(_DeprecationTestCase): + # 2017-10-19, 1.14 + def test_fromstring(self): + self.assert_deprecated(np.fromstring, args=('\x00'*80,)) + +class Test_GetSet_NumericOps(_DeprecationTestCase): + # 2018-09-20, 1.16.0 + def test_get_numeric_ops(self): + from numpy.core._multiarray_tests import getset_numericops + self.assert_deprecated(getset_numericops, num=2) + + # empty kwargs prevents any state actually changing which would break + # other tests. + self.assert_deprecated(np.set_numeric_ops, kwargs={}) + assert_raises(ValueError, np.set_numeric_ops, add='abc') diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_deprecations.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_deprecations.pyc new file mode 100644 index 0000000..5e429f2 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_deprecations.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_dtype.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_dtype.py new file mode 100644 index 0000000..8f37119 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_dtype.py @@ -0,0 +1,1122 @@ +from __future__ import division, absolute_import, print_function + +import sys +import operator +import pytest +import ctypes +import gc + +import numpy as np +from numpy.core._rational_tests import rational +from numpy.testing import ( + assert_, assert_equal, assert_array_equal, assert_raises, HAS_REFCOUNT) +from numpy.core.numeric import pickle + +def assert_dtype_equal(a, b): + assert_equal(a, b) + assert_equal(hash(a), hash(b), + "two equivalent types do not hash to the same value !") + +def assert_dtype_not_equal(a, b): + assert_(a != b) + assert_(hash(a) != hash(b), + "two different types hash to the same value !") + +class TestBuiltin(object): + @pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object, + np.unicode]) + def test_run(self, t): + """Only test hash runs at all.""" + dt = np.dtype(t) + hash(dt) + + @pytest.mark.parametrize('t', [int, float]) + def test_dtype(self, t): + # Make sure equivalent byte order char hash the same (e.g. < and = on + # little endian) + dt = np.dtype(t) + dt2 = dt.newbyteorder("<") + dt3 = dt.newbyteorder(">") + if dt == dt2: + assert_(dt.byteorder != dt2.byteorder, "bogus test") + assert_dtype_equal(dt, dt2) + else: + assert_(dt.byteorder != dt3.byteorder, "bogus test") + assert_dtype_equal(dt, dt3) + + def test_equivalent_dtype_hashing(self): + # Make sure equivalent dtypes with different type num hash equal + uintp = np.dtype(np.uintp) + if uintp.itemsize == 4: + left = uintp + right = np.dtype(np.uint32) + else: + left = uintp + right = np.dtype(np.ulonglong) + assert_(left == right) + assert_(hash(left) == hash(right)) + + def test_invalid_types(self): + # Make sure invalid type strings raise an error + + assert_raises(TypeError, np.dtype, 'O3') + assert_raises(TypeError, np.dtype, 'O5') + assert_raises(TypeError, np.dtype, 'O7') + assert_raises(TypeError, np.dtype, 'b3') + assert_raises(TypeError, np.dtype, 'h4') + assert_raises(TypeError, np.dtype, 'I5') + assert_raises(TypeError, np.dtype, 'e3') + assert_raises(TypeError, np.dtype, 'f5') + + if np.dtype('g').itemsize == 8 or np.dtype('g').itemsize == 16: + assert_raises(TypeError, np.dtype, 'g12') + elif np.dtype('g').itemsize == 12: + assert_raises(TypeError, np.dtype, 'g16') + + if np.dtype('l').itemsize == 8: + assert_raises(TypeError, np.dtype, 'l4') + assert_raises(TypeError, np.dtype, 'L4') + else: + assert_raises(TypeError, np.dtype, 'l8') + assert_raises(TypeError, np.dtype, 'L8') + + if np.dtype('q').itemsize == 8: + assert_raises(TypeError, np.dtype, 'q4') + assert_raises(TypeError, np.dtype, 'Q4') + else: + assert_raises(TypeError, np.dtype, 'q8') + assert_raises(TypeError, np.dtype, 'Q8') + + def test_bad_param(self): + # Can't give a size that's too small + assert_raises(ValueError, np.dtype, + {'names':['f0', 'f1'], + 'formats':['i4', 'i1'], + 'offsets':[0, 4], + 'itemsize':4}) + # If alignment is enabled, the alignment (4) must divide the itemsize + assert_raises(ValueError, np.dtype, + {'names':['f0', 'f1'], + 'formats':['i4', 'i1'], + 'offsets':[0, 4], + 'itemsize':9}, align=True) + # If alignment is enabled, the individual fields must be aligned + assert_raises(ValueError, np.dtype, + {'names':['f0', 'f1'], + 'formats':['i1', 'f4'], + 'offsets':[0, 2]}, align=True) + + def test_field_order_equality(self): + x = np.dtype({'names': ['A', 'B'], + 'formats': ['i4', 'f4'], + 'offsets': [0, 4]}) + y = np.dtype({'names': ['B', 'A'], + 'formats': ['f4', 'i4'], + 'offsets': [4, 0]}) + assert_equal(x == y, False) + +class TestRecord(object): + def test_equivalent_record(self): + """Test whether equivalent record dtypes hash the same.""" + a = np.dtype([('yo', int)]) + b = np.dtype([('yo', int)]) + assert_dtype_equal(a, b) + + def test_different_names(self): + # In theory, they may hash the same (collision) ? + a = np.dtype([('yo', int)]) + b = np.dtype([('ye', int)]) + assert_dtype_not_equal(a, b) + + def test_different_titles(self): + # In theory, they may hash the same (collision) ? + a = np.dtype({'names': ['r', 'b'], + 'formats': ['u1', 'u1'], + 'titles': ['Red pixel', 'Blue pixel']}) + b = np.dtype({'names': ['r', 'b'], + 'formats': ['u1', 'u1'], + 'titles': ['RRed pixel', 'Blue pixel']}) + assert_dtype_not_equal(a, b) + + def test_mutate(self): + # Mutating a dtype should reset the cached hash value + a = np.dtype([('yo', int)]) + b = np.dtype([('yo', int)]) + c = np.dtype([('ye', int)]) + assert_dtype_equal(a, b) + assert_dtype_not_equal(a, c) + a.names = ['ye'] + assert_dtype_equal(a, c) + assert_dtype_not_equal(a, b) + state = b.__reduce__()[2] + a.__setstate__(state) + assert_dtype_equal(a, b) + assert_dtype_not_equal(a, c) + + def test_not_lists(self): + """Test if an appropriate exception is raised when passing bad values to + the dtype constructor. + """ + assert_raises(TypeError, np.dtype, + dict(names={'A', 'B'}, formats=['f8', 'i4'])) + assert_raises(TypeError, np.dtype, + dict(names=['A', 'B'], formats={'f8', 'i4'})) + + def test_aligned_size(self): + # Check that structured dtypes get padded to an aligned size + dt = np.dtype('i4, i1', align=True) + assert_equal(dt.itemsize, 8) + dt = np.dtype([('f0', 'i4'), ('f1', 'i1')], align=True) + assert_equal(dt.itemsize, 8) + dt = np.dtype({'names':['f0', 'f1'], + 'formats':['i4', 'u1'], + 'offsets':[0, 4]}, align=True) + assert_equal(dt.itemsize, 8) + dt = np.dtype({'f0': ('i4', 0), 'f1':('u1', 4)}, align=True) + assert_equal(dt.itemsize, 8) + # Nesting should preserve that alignment + dt1 = np.dtype([('f0', 'i4'), + ('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]), + ('f2', 'i1')], align=True) + assert_equal(dt1.itemsize, 20) + dt2 = np.dtype({'names':['f0', 'f1', 'f2'], + 'formats':['i4', + [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], + 'i1'], + 'offsets':[0, 4, 16]}, align=True) + assert_equal(dt2.itemsize, 20) + dt3 = np.dtype({'f0': ('i4', 0), + 'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4), + 'f2': ('i1', 16)}, align=True) + assert_equal(dt3.itemsize, 20) + assert_equal(dt1, dt2) + assert_equal(dt2, dt3) + # Nesting should preserve packing + dt1 = np.dtype([('f0', 'i4'), + ('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]), + ('f2', 'i1')], align=False) + assert_equal(dt1.itemsize, 11) + dt2 = np.dtype({'names':['f0', 'f1', 'f2'], + 'formats':['i4', + [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], + 'i1'], + 'offsets':[0, 4, 10]}, align=False) + assert_equal(dt2.itemsize, 11) + dt3 = np.dtype({'f0': ('i4', 0), + 'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4), + 'f2': ('i1', 10)}, align=False) + assert_equal(dt3.itemsize, 11) + assert_equal(dt1, dt2) + assert_equal(dt2, dt3) + # Array of subtype should preserve alignment + dt1 = np.dtype([('a', '|i1'), + ('b', [('f0', 'f4', (64, 64)), (1,)), + ('rtile', '>f4', (64, 36))], (3,)), + ('bottom', [('bleft', ('>f4', (8, 64)), (1,)), + ('bright', '>f4', (8, 36))])]) + assert_equal(str(dt), + "[('top', [('tiles', ('>f4', (64, 64)), (1,)), " + "('rtile', '>f4', (64, 36))], (3,)), " + "('bottom', [('bleft', ('>f4', (8, 64)), (1,)), " + "('bright', '>f4', (8, 36))])]") + + # If the sticky aligned flag is set to True, it makes the + # str() function use a dict representation with an 'aligned' flag + dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), + ('rtile', '>f4', (64, 36))], + (3,)), + ('bottom', [('bleft', ('>f4', (8, 64)), (1,)), + ('bright', '>f4', (8, 36))])], + align=True) + assert_equal(str(dt), + "{'names':['top','bottom'], " + "'formats':[([('tiles', ('>f4', (64, 64)), (1,)), " + "('rtile', '>f4', (64, 36))], (3,))," + "[('bleft', ('>f4', (8, 64)), (1,)), " + "('bright', '>f4', (8, 36))]], " + "'offsets':[0,76800], " + "'itemsize':80000, " + "'aligned':True}") + assert_equal(np.dtype(eval(str(dt))), dt) + + dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'], + 'offsets': [0, 1, 2], + 'titles': ['Red pixel', 'Green pixel', 'Blue pixel']}) + assert_equal(str(dt), + "[(('Red pixel', 'r'), 'u1'), " + "(('Green pixel', 'g'), 'u1'), " + "(('Blue pixel', 'b'), 'u1')]") + + dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'], + 'formats': ['f4', (64, 64)), (1,)), + ('rtile', '>f4', (64, 36))], (3,)), + ('bottom', [('bleft', ('>f4', (8, 64)), (1,)), + ('bright', '>f4', (8, 36))])]) + assert_equal(repr(dt), + "dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), " + "('rtile', '>f4', (64, 36))], (3,)), " + "('bottom', [('bleft', ('>f4', (8, 64)), (1,)), " + "('bright', '>f4', (8, 36))])])") + + dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'], + 'offsets': [0, 1, 2], + 'titles': ['Red pixel', 'Green pixel', 'Blue pixel']}, + align=True) + assert_equal(repr(dt), + "dtype([(('Red pixel', 'r'), 'u1'), " + "(('Green pixel', 'g'), 'u1'), " + "(('Blue pixel', 'b'), 'u1')], align=True)") + + def test_repr_structured_not_packed(self): + dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'], + 'formats': ['= 3, reason="Python 2 only") + def test_dtype_str_with_long_in_shape(self): + # Pull request #376, should not error + np.dtype('(1L,)i4') + + def test_base_dtype_with_object_type(self): + # Issue gh-2798, should not error. + np.array(['a'], dtype="O").astype(("O", [("name", "O")])) + + def test_empty_string_to_object(self): + # Pull request #4722 + np.array(["", ""]).astype(object) + + def test_void_subclass_unsized(self): + dt = np.dtype(np.record) + assert_equal(repr(dt), "dtype('V')") + assert_equal(str(dt), '|V0') + assert_equal(dt.name, 'record') + + def test_void_subclass_sized(self): + dt = np.dtype((np.record, 2)) + assert_equal(repr(dt), "dtype('V2')") + assert_equal(str(dt), '|V2') + assert_equal(dt.name, 'record16') + + def test_void_subclass_fields(self): + dt = np.dtype((np.record, [('a', 'f4', (2, 1)), ('b', 'u4')]) + self.check(BigEndStruct, expected) + + def test_little_endian_structure_packed(self): + class LittleEndStruct(ctypes.LittleEndianStructure): + _fields_ = [ + ('one', ctypes.c_uint8), + ('two', ctypes.c_uint32) + ] + _pack_ = 1 + expected = np.dtype([('one', 'u1'), ('two', 'B'), + ('b', '>H') + ], align=True) + self.check(PaddedStruct, expected) + + def test_simple_endian_types(self): + self.check(ctypes.c_uint16.__ctype_le__, np.dtype('u2')) + self.check(ctypes.c_uint8.__ctype_le__, np.dtype('u1')) + self.check(ctypes.c_uint8.__ctype_be__, np.dtype('u1')) diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_dtype.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_dtype.pyc new file mode 100644 index 0000000..083ecde Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_dtype.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_einsum.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_einsum.py new file mode 100644 index 0000000..3be4a8a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_einsum.py @@ -0,0 +1,1001 @@ +from __future__ import division, absolute_import, print_function + +import itertools + +import numpy as np +from numpy.testing import ( + assert_, assert_equal, assert_array_equal, assert_almost_equal, + assert_raises, suppress_warnings + ) + +# Setup for optimize einsum +chars = 'abcdefghij' +sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) +global_size_dict = dict(zip(chars, sizes)) + + +class TestEinsum(object): + def test_einsum_errors(self): + for do_opt in [True, False]: + # Need enough arguments + assert_raises(ValueError, np.einsum, optimize=do_opt) + assert_raises(ValueError, np.einsum, "", optimize=do_opt) + + # subscripts must be a string + assert_raises(TypeError, np.einsum, 0, 0, optimize=do_opt) + + # out parameter must be an array + assert_raises(TypeError, np.einsum, "", 0, out='test', + optimize=do_opt) + + # order parameter must be a valid order + assert_raises(TypeError, np.einsum, "", 0, order='W', + optimize=do_opt) + + # casting parameter must be a valid casting + assert_raises(ValueError, np.einsum, "", 0, casting='blah', + optimize=do_opt) + + # dtype parameter must be a valid dtype + assert_raises(TypeError, np.einsum, "", 0, dtype='bad_data_type', + optimize=do_opt) + + # other keyword arguments are rejected + assert_raises(TypeError, np.einsum, "", 0, bad_arg=0, + optimize=do_opt) + + # issue 4528 revealed a segfault with this call + assert_raises(TypeError, np.einsum, *(None,)*63, optimize=do_opt) + + # number of operands must match count in subscripts string + assert_raises(ValueError, np.einsum, "", 0, 0, optimize=do_opt) + assert_raises(ValueError, np.einsum, ",", 0, [0], [0], + optimize=do_opt) + assert_raises(ValueError, np.einsum, ",", [0], optimize=do_opt) + + # can't have more subscripts than dimensions in the operand + assert_raises(ValueError, np.einsum, "i", 0, optimize=do_opt) + assert_raises(ValueError, np.einsum, "ij", [0, 0], optimize=do_opt) + assert_raises(ValueError, np.einsum, "...i", 0, optimize=do_opt) + assert_raises(ValueError, np.einsum, "i...j", [0, 0], optimize=do_opt) + assert_raises(ValueError, np.einsum, "i...", 0, optimize=do_opt) + assert_raises(ValueError, np.einsum, "ij...", [0, 0], optimize=do_opt) + + # invalid ellipsis + assert_raises(ValueError, np.einsum, "i..", [0, 0], optimize=do_opt) + assert_raises(ValueError, np.einsum, ".i...", [0, 0], optimize=do_opt) + assert_raises(ValueError, np.einsum, "j->..j", [0, 0], optimize=do_opt) + assert_raises(ValueError, np.einsum, "j->.j...", [0, 0], optimize=do_opt) + + # invalid subscript character + assert_raises(ValueError, np.einsum, "i%...", [0, 0], optimize=do_opt) + assert_raises(ValueError, np.einsum, "...j$", [0, 0], optimize=do_opt) + assert_raises(ValueError, np.einsum, "i->&", [0, 0], optimize=do_opt) + + # output subscripts must appear in input + assert_raises(ValueError, np.einsum, "i->ij", [0, 0], optimize=do_opt) + + # output subscripts may only be specified once + assert_raises(ValueError, np.einsum, "ij->jij", [[0, 0], [0, 0]], + optimize=do_opt) + + # dimensions much match when being collapsed + assert_raises(ValueError, np.einsum, "ii", + np.arange(6).reshape(2, 3), optimize=do_opt) + assert_raises(ValueError, np.einsum, "ii->i", + np.arange(6).reshape(2, 3), optimize=do_opt) + + # broadcasting to new dimensions must be enabled explicitly + assert_raises(ValueError, np.einsum, "i", np.arange(6).reshape(2, 3), + optimize=do_opt) + assert_raises(ValueError, np.einsum, "i->i", [[0, 1], [0, 1]], + out=np.arange(4).reshape(2, 2), optimize=do_opt) + + def test_einsum_views(self): + # pass-through + for do_opt in [True, False]: + a = np.arange(6) + a.shape = (2, 3) + + b = np.einsum("...", a, optimize=do_opt) + assert_(b.base is a) + + b = np.einsum(a, [Ellipsis], optimize=do_opt) + assert_(b.base is a) + + b = np.einsum("ij", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, a) + + b = np.einsum(a, [0, 1], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, a) + + # output is writeable whenever input is writeable + b = np.einsum("...", a, optimize=do_opt) + assert_(b.flags['WRITEABLE']) + a.flags['WRITEABLE'] = False + b = np.einsum("...", a, optimize=do_opt) + assert_(not b.flags['WRITEABLE']) + + # transpose + a = np.arange(6) + a.shape = (2, 3) + + b = np.einsum("ji", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, a.T) + + b = np.einsum(a, [1, 0], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, a.T) + + # diagonal + a = np.arange(9) + a.shape = (3, 3) + + b = np.einsum("ii->i", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a[i, i] for i in range(3)]) + + b = np.einsum(a, [0, 0], [0], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a[i, i] for i in range(3)]) + + # diagonal with various ways of broadcasting an additional dimension + a = np.arange(27) + a.shape = (3, 3, 3) + + b = np.einsum("...ii->...i", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [[x[i, i] for i in range(3)] for x in a]) + + b = np.einsum(a, [Ellipsis, 0, 0], [Ellipsis, 0], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [[x[i, i] for i in range(3)] for x in a]) + + b = np.einsum("ii...->...i", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [[x[i, i] for i in range(3)] + for x in a.transpose(2, 0, 1)]) + + b = np.einsum(a, [0, 0, Ellipsis], [Ellipsis, 0], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [[x[i, i] for i in range(3)] + for x in a.transpose(2, 0, 1)]) + + b = np.einsum("...ii->i...", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a[:, i, i] for i in range(3)]) + + b = np.einsum(a, [Ellipsis, 0, 0], [0, Ellipsis], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a[:, i, i] for i in range(3)]) + + b = np.einsum("jii->ij", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a[:, i, i] for i in range(3)]) + + b = np.einsum(a, [1, 0, 0], [0, 1], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a[:, i, i] for i in range(3)]) + + b = np.einsum("ii...->i...", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)]) + + b = np.einsum(a, [0, 0, Ellipsis], [0, Ellipsis], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)]) + + b = np.einsum("i...i->i...", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)]) + + b = np.einsum(a, [0, Ellipsis, 0], [0, Ellipsis], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)]) + + b = np.einsum("i...i->...i", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [[x[i, i] for i in range(3)] + for x in a.transpose(1, 0, 2)]) + + b = np.einsum(a, [0, Ellipsis, 0], [Ellipsis, 0], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [[x[i, i] for i in range(3)] + for x in a.transpose(1, 0, 2)]) + + # triple diagonal + a = np.arange(27) + a.shape = (3, 3, 3) + + b = np.einsum("iii->i", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a[i, i, i] for i in range(3)]) + + b = np.einsum(a, [0, 0, 0], [0], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a[i, i, i] for i in range(3)]) + + # swap axes + a = np.arange(24) + a.shape = (2, 3, 4) + + b = np.einsum("ijk->jik", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, a.swapaxes(0, 1)) + + b = np.einsum(a, [0, 1, 2], [1, 0, 2], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, a.swapaxes(0, 1)) + + def check_einsum_sums(self, dtype, do_opt=False): + # Check various sums. Does many sizes to exercise unrolled loops. + + # sum(a, axis=-1) + for n in range(1, 17): + a = np.arange(n, dtype=dtype) + assert_equal(np.einsum("i->", a, optimize=do_opt), + np.sum(a, axis=-1).astype(dtype)) + assert_equal(np.einsum(a, [0], [], optimize=do_opt), + np.sum(a, axis=-1).astype(dtype)) + + for n in range(1, 17): + a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n) + assert_equal(np.einsum("...i->...", a, optimize=do_opt), + np.sum(a, axis=-1).astype(dtype)) + assert_equal(np.einsum(a, [Ellipsis, 0], [Ellipsis], optimize=do_opt), + np.sum(a, axis=-1).astype(dtype)) + + # sum(a, axis=0) + for n in range(1, 17): + a = np.arange(2*n, dtype=dtype).reshape(2, n) + assert_equal(np.einsum("i...->...", a, optimize=do_opt), + np.sum(a, axis=0).astype(dtype)) + assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis], optimize=do_opt), + np.sum(a, axis=0).astype(dtype)) + + for n in range(1, 17): + a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n) + assert_equal(np.einsum("i...->...", a, optimize=do_opt), + np.sum(a, axis=0).astype(dtype)) + assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis], optimize=do_opt), + np.sum(a, axis=0).astype(dtype)) + + # trace(a) + for n in range(1, 17): + a = np.arange(n*n, dtype=dtype).reshape(n, n) + assert_equal(np.einsum("ii", a, optimize=do_opt), + np.trace(a).astype(dtype)) + assert_equal(np.einsum(a, [0, 0], optimize=do_opt), + np.trace(a).astype(dtype)) + + # multiply(a, b) + assert_equal(np.einsum("..., ...", 3, 4), 12) # scalar case + for n in range(1, 17): + a = np.arange(3 * n, dtype=dtype).reshape(3, n) + b = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n) + assert_equal(np.einsum("..., ...", a, b, optimize=do_opt), + np.multiply(a, b)) + assert_equal(np.einsum(a, [Ellipsis], b, [Ellipsis], optimize=do_opt), + np.multiply(a, b)) + + # inner(a,b) + for n in range(1, 17): + a = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n) + b = np.arange(n, dtype=dtype) + assert_equal(np.einsum("...i, ...i", a, b, optimize=do_opt), np.inner(a, b)) + assert_equal(np.einsum(a, [Ellipsis, 0], b, [Ellipsis, 0], optimize=do_opt), + np.inner(a, b)) + + for n in range(1, 11): + a = np.arange(n * 3 * 2, dtype=dtype).reshape(n, 3, 2) + b = np.arange(n, dtype=dtype) + assert_equal(np.einsum("i..., i...", a, b, optimize=do_opt), + np.inner(a.T, b.T).T) + assert_equal(np.einsum(a, [0, Ellipsis], b, [0, Ellipsis], optimize=do_opt), + np.inner(a.T, b.T).T) + + # outer(a,b) + for n in range(1, 17): + a = np.arange(3, dtype=dtype)+1 + b = np.arange(n, dtype=dtype)+1 + assert_equal(np.einsum("i,j", a, b, optimize=do_opt), + np.outer(a, b)) + assert_equal(np.einsum(a, [0], b, [1], optimize=do_opt), + np.outer(a, b)) + + # Suppress the complex warnings for the 'as f8' tests + with suppress_warnings() as sup: + sup.filter(np.ComplexWarning) + + # matvec(a,b) / a.dot(b) where a is matrix, b is vector + for n in range(1, 17): + a = np.arange(4*n, dtype=dtype).reshape(4, n) + b = np.arange(n, dtype=dtype) + assert_equal(np.einsum("ij, j", a, b, optimize=do_opt), + np.dot(a, b)) + assert_equal(np.einsum(a, [0, 1], b, [1], optimize=do_opt), + np.dot(a, b)) + + c = np.arange(4, dtype=dtype) + np.einsum("ij,j", a, b, out=c, + dtype='f8', casting='unsafe', optimize=do_opt) + assert_equal(c, + np.dot(a.astype('f8'), + b.astype('f8')).astype(dtype)) + c[...] = 0 + np.einsum(a, [0, 1], b, [1], out=c, + dtype='f8', casting='unsafe', optimize=do_opt) + assert_equal(c, + np.dot(a.astype('f8'), + b.astype('f8')).astype(dtype)) + + for n in range(1, 17): + a = np.arange(4*n, dtype=dtype).reshape(4, n) + b = np.arange(n, dtype=dtype) + assert_equal(np.einsum("ji,j", a.T, b.T, optimize=do_opt), + np.dot(b.T, a.T)) + assert_equal(np.einsum(a.T, [1, 0], b.T, [1], optimize=do_opt), + np.dot(b.T, a.T)) + + c = np.arange(4, dtype=dtype) + np.einsum("ji,j", a.T, b.T, out=c, + dtype='f8', casting='unsafe', optimize=do_opt) + assert_equal(c, + np.dot(b.T.astype('f8'), + a.T.astype('f8')).astype(dtype)) + c[...] = 0 + np.einsum(a.T, [1, 0], b.T, [1], out=c, + dtype='f8', casting='unsafe', optimize=do_opt) + assert_equal(c, + np.dot(b.T.astype('f8'), + a.T.astype('f8')).astype(dtype)) + + # matmat(a,b) / a.dot(b) where a is matrix, b is matrix + for n in range(1, 17): + if n < 8 or dtype != 'f2': + a = np.arange(4*n, dtype=dtype).reshape(4, n) + b = np.arange(n*6, dtype=dtype).reshape(n, 6) + assert_equal(np.einsum("ij,jk", a, b, optimize=do_opt), + np.dot(a, b)) + assert_equal(np.einsum(a, [0, 1], b, [1, 2], optimize=do_opt), + np.dot(a, b)) + + for n in range(1, 17): + a = np.arange(4*n, dtype=dtype).reshape(4, n) + b = np.arange(n*6, dtype=dtype).reshape(n, 6) + c = np.arange(24, dtype=dtype).reshape(4, 6) + np.einsum("ij,jk", a, b, out=c, dtype='f8', casting='unsafe', + optimize=do_opt) + assert_equal(c, + np.dot(a.astype('f8'), + b.astype('f8')).astype(dtype)) + c[...] = 0 + np.einsum(a, [0, 1], b, [1, 2], out=c, + dtype='f8', casting='unsafe', optimize=do_opt) + assert_equal(c, + np.dot(a.astype('f8'), + b.astype('f8')).astype(dtype)) + + # matrix triple product (note this is not currently an efficient + # way to multiply 3 matrices) + a = np.arange(12, dtype=dtype).reshape(3, 4) + b = np.arange(20, dtype=dtype).reshape(4, 5) + c = np.arange(30, dtype=dtype).reshape(5, 6) + if dtype != 'f2': + assert_equal(np.einsum("ij,jk,kl", a, b, c, optimize=do_opt), + a.dot(b).dot(c)) + assert_equal(np.einsum(a, [0, 1], b, [1, 2], c, [2, 3], + optimize=do_opt), a.dot(b).dot(c)) + + d = np.arange(18, dtype=dtype).reshape(3, 6) + np.einsum("ij,jk,kl", a, b, c, out=d, + dtype='f8', casting='unsafe', optimize=do_opt) + tgt = a.astype('f8').dot(b.astype('f8')) + tgt = tgt.dot(c.astype('f8')).astype(dtype) + assert_equal(d, tgt) + + d[...] = 0 + np.einsum(a, [0, 1], b, [1, 2], c, [2, 3], out=d, + dtype='f8', casting='unsafe', optimize=do_opt) + tgt = a.astype('f8').dot(b.astype('f8')) + tgt = tgt.dot(c.astype('f8')).astype(dtype) + assert_equal(d, tgt) + + # tensordot(a, b) + if np.dtype(dtype) != np.dtype('f2'): + a = np.arange(60, dtype=dtype).reshape(3, 4, 5) + b = np.arange(24, dtype=dtype).reshape(4, 3, 2) + assert_equal(np.einsum("ijk, jil -> kl", a, b), + np.tensordot(a, b, axes=([1, 0], [0, 1]))) + assert_equal(np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3]), + np.tensordot(a, b, axes=([1, 0], [0, 1]))) + + c = np.arange(10, dtype=dtype).reshape(5, 2) + np.einsum("ijk,jil->kl", a, b, out=c, + dtype='f8', casting='unsafe', optimize=do_opt) + assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'), + axes=([1, 0], [0, 1])).astype(dtype)) + c[...] = 0 + np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3], out=c, + dtype='f8', casting='unsafe', optimize=do_opt) + assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'), + axes=([1, 0], [0, 1])).astype(dtype)) + + # logical_and(logical_and(a!=0, b!=0), c!=0) + a = np.array([1, 3, -2, 0, 12, 13, 0, 1], dtype=dtype) + b = np.array([0, 3.5, 0., -2, 0, 1, 3, 12], dtype=dtype) + c = np.array([True, True, False, True, True, False, True, True]) + assert_equal(np.einsum("i,i,i->i", a, b, c, + dtype='?', casting='unsafe', optimize=do_opt), + np.logical_and(np.logical_and(a != 0, b != 0), c != 0)) + assert_equal(np.einsum(a, [0], b, [0], c, [0], [0], + dtype='?', casting='unsafe'), + np.logical_and(np.logical_and(a != 0, b != 0), c != 0)) + + a = np.arange(9, dtype=dtype) + assert_equal(np.einsum(",i->", 3, a), 3*np.sum(a)) + assert_equal(np.einsum(3, [], a, [0], []), 3*np.sum(a)) + assert_equal(np.einsum("i,->", a, 3), 3*np.sum(a)) + assert_equal(np.einsum(a, [0], 3, [], []), 3*np.sum(a)) + + # Various stride0, contiguous, and SSE aligned variants + for n in range(1, 25): + a = np.arange(n, dtype=dtype) + if np.dtype(dtype).itemsize > 1: + assert_equal(np.einsum("...,...", a, a, optimize=do_opt), + np.multiply(a, a)) + assert_equal(np.einsum("i,i", a, a, optimize=do_opt), np.dot(a, a)) + assert_equal(np.einsum("i,->i", a, 2, optimize=do_opt), 2*a) + assert_equal(np.einsum(",i->i", 2, a, optimize=do_opt), 2*a) + assert_equal(np.einsum("i,->", a, 2, optimize=do_opt), 2*np.sum(a)) + assert_equal(np.einsum(",i->", 2, a, optimize=do_opt), 2*np.sum(a)) + + assert_equal(np.einsum("...,...", a[1:], a[:-1], optimize=do_opt), + np.multiply(a[1:], a[:-1])) + assert_equal(np.einsum("i,i", a[1:], a[:-1], optimize=do_opt), + np.dot(a[1:], a[:-1])) + assert_equal(np.einsum("i,->i", a[1:], 2, optimize=do_opt), 2*a[1:]) + assert_equal(np.einsum(",i->i", 2, a[1:], optimize=do_opt), 2*a[1:]) + assert_equal(np.einsum("i,->", a[1:], 2, optimize=do_opt), + 2*np.sum(a[1:])) + assert_equal(np.einsum(",i->", 2, a[1:], optimize=do_opt), + 2*np.sum(a[1:])) + + # An object array, summed as the data type + a = np.arange(9, dtype=object) + + b = np.einsum("i->", a, dtype=dtype, casting='unsafe') + assert_equal(b, np.sum(a)) + assert_equal(b.dtype, np.dtype(dtype)) + + b = np.einsum(a, [0], [], dtype=dtype, casting='unsafe') + assert_equal(b, np.sum(a)) + assert_equal(b.dtype, np.dtype(dtype)) + + # A case which was failing (ticket #1885) + p = np.arange(2) + 1 + q = np.arange(4).reshape(2, 2) + 3 + r = np.arange(4).reshape(2, 2) + 7 + assert_equal(np.einsum('z,mz,zm->', p, q, r), 253) + + # singleton dimensions broadcast (gh-10343) + p = np.ones((10,2)) + q = np.ones((1,2)) + assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True), + np.einsum('ij,ij->j', p, q, optimize=False)) + assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True), + [10.] * 2) + + # a blas-compatible contraction broadcasting case which was failing + # for optimize=True (ticket #10930) + x = np.array([2., 3.]) + y = np.array([4.]) + assert_array_equal(np.einsum("i, i", x, y, optimize=False), 20.) + assert_array_equal(np.einsum("i, i", x, y, optimize=True), 20.) + + # all-ones array was bypassing bug (ticket #10930) + p = np.ones((1, 5)) / 2 + q = np.ones((5, 5)) / 2 + for optimize in (True, False): + assert_array_equal(np.einsum("...ij,...jk->...ik", p, p, + optimize=optimize), + np.einsum("...ij,...jk->...ik", p, q, + optimize=optimize)) + assert_array_equal(np.einsum("...ij,...jk->...ik", p, q, + optimize=optimize), + np.full((1, 5), 1.25)) + + # Cases which were failing (gh-10899) + x = np.eye(2, dtype=dtype) + y = np.ones(2, dtype=dtype) + assert_array_equal(np.einsum("ji,i->", x, y, optimize=optimize), + [2.]) # contig_contig_outstride0_two + assert_array_equal(np.einsum("i,ij->", y, x, optimize=optimize), + [2.]) # stride0_contig_outstride0_two + assert_array_equal(np.einsum("ij,i->", x, y, optimize=optimize), + [2.]) # contig_stride0_outstride0_two + + def test_einsum_sums_int8(self): + self.check_einsum_sums('i1') + + def test_einsum_sums_uint8(self): + self.check_einsum_sums('u1') + + def test_einsum_sums_int16(self): + self.check_einsum_sums('i2') + + def test_einsum_sums_uint16(self): + self.check_einsum_sums('u2') + + def test_einsum_sums_int32(self): + self.check_einsum_sums('i4') + self.check_einsum_sums('i4', True) + + def test_einsum_sums_uint32(self): + self.check_einsum_sums('u4') + self.check_einsum_sums('u4', True) + + def test_einsum_sums_int64(self): + self.check_einsum_sums('i8') + + def test_einsum_sums_uint64(self): + self.check_einsum_sums('u8') + + def test_einsum_sums_float16(self): + self.check_einsum_sums('f2') + + def test_einsum_sums_float32(self): + self.check_einsum_sums('f4') + + def test_einsum_sums_float64(self): + self.check_einsum_sums('f8') + self.check_einsum_sums('f8', True) + + def test_einsum_sums_longdouble(self): + self.check_einsum_sums(np.longdouble) + + def test_einsum_sums_cfloat64(self): + self.check_einsum_sums('c8') + self.check_einsum_sums('c8', True) + + def test_einsum_sums_cfloat128(self): + self.check_einsum_sums('c16') + + def test_einsum_sums_clongdouble(self): + self.check_einsum_sums(np.clongdouble) + + def test_einsum_misc(self): + # This call used to crash because of a bug in + # PyArray_AssignZero + a = np.ones((1, 2)) + b = np.ones((2, 2, 1)) + assert_equal(np.einsum('ij...,j...->i...', a, b), [[[2], [2]]]) + assert_equal(np.einsum('ij...,j...->i...', a, b, optimize=True), [[[2], [2]]]) + + # Regression test for issue #10369 (test unicode inputs with Python 2) + assert_equal(np.einsum(u'ij...,j...->i...', a, b), [[[2], [2]]]) + assert_equal(np.einsum('...i,...i', [1, 2, 3], [2, 3, 4]), 20) + assert_equal(np.einsum(u'...i,...i', [1, 2, 3], [2, 3, 4]), 20) + assert_equal(np.einsum('...i,...i', [1, 2, 3], [2, 3, 4], + optimize=u'greedy'), 20) + + # The iterator had an issue with buffering this reduction + a = np.ones((5, 12, 4, 2, 3), np.int64) + b = np.ones((5, 12, 11), np.int64) + assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b), + np.einsum('ijklm,ijn->', a, b)) + assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b, optimize=True), + np.einsum('ijklm,ijn->', a, b, optimize=True)) + + # Issue #2027, was a problem in the contiguous 3-argument + # inner loop implementation + a = np.arange(1, 3) + b = np.arange(1, 5).reshape(2, 2) + c = np.arange(1, 9).reshape(4, 2) + assert_equal(np.einsum('x,yx,zx->xzy', a, b, c), + [[[1, 3], [3, 9], [5, 15], [7, 21]], + [[8, 16], [16, 32], [24, 48], [32, 64]]]) + assert_equal(np.einsum('x,yx,zx->xzy', a, b, c, optimize=True), + [[[1, 3], [3, 9], [5, 15], [7, 21]], + [[8, 16], [16, 32], [24, 48], [32, 64]]]) + + def test_subscript_range(self): + # Issue #7741, make sure that all letters of Latin alphabet (both uppercase & lowercase) can be used + # when creating a subscript from arrays + a = np.ones((2, 3)) + b = np.ones((3, 4)) + np.einsum(a, [0, 20], b, [20, 2], [0, 2], optimize=False) + np.einsum(a, [0, 27], b, [27, 2], [0, 2], optimize=False) + np.einsum(a, [0, 51], b, [51, 2], [0, 2], optimize=False) + assert_raises(ValueError, lambda: np.einsum(a, [0, 52], b, [52, 2], [0, 2], optimize=False)) + assert_raises(ValueError, lambda: np.einsum(a, [-1, 5], b, [5, 2], [-1, 2], optimize=False)) + + def test_einsum_broadcast(self): + # Issue #2455 change in handling ellipsis + # remove the 'middle broadcast' error + # only use the 'RIGHT' iteration in prepare_op_axes + # adds auto broadcast on left where it belongs + # broadcast on right has to be explicit + # We need to test the optimized parsing as well + + A = np.arange(2 * 3 * 4).reshape(2, 3, 4) + B = np.arange(3) + ref = np.einsum('ijk,j->ijk', A, B, optimize=False) + for opt in [True, False]: + assert_equal(np.einsum('ij...,j...->ij...', A, B, optimize=opt), ref) + assert_equal(np.einsum('ij...,...j->ij...', A, B, optimize=opt), ref) + assert_equal(np.einsum('ij...,j->ij...', A, B, optimize=opt), ref) # used to raise error + + A = np.arange(12).reshape((4, 3)) + B = np.arange(6).reshape((3, 2)) + ref = np.einsum('ik,kj->ij', A, B, optimize=False) + for opt in [True, False]: + assert_equal(np.einsum('ik...,k...->i...', A, B, optimize=opt), ref) + assert_equal(np.einsum('ik...,...kj->i...j', A, B, optimize=opt), ref) + assert_equal(np.einsum('...k,kj', A, B, optimize=opt), ref) # used to raise error + assert_equal(np.einsum('ik,k...->i...', A, B, optimize=opt), ref) # used to raise error + + dims = [2, 3, 4, 5] + a = np.arange(np.prod(dims)).reshape(dims) + v = np.arange(dims[2]) + ref = np.einsum('ijkl,k->ijl', a, v, optimize=False) + for opt in [True, False]: + assert_equal(np.einsum('ijkl,k', a, v, optimize=opt), ref) + assert_equal(np.einsum('...kl,k', a, v, optimize=opt), ref) # used to raise error + assert_equal(np.einsum('...kl,k...', a, v, optimize=opt), ref) + + J, K, M = 160, 160, 120 + A = np.arange(J * K * M).reshape(1, 1, 1, J, K, M) + B = np.arange(J * K * M * 3).reshape(J, K, M, 3) + ref = np.einsum('...lmn,...lmno->...o', A, B, optimize=False) + for opt in [True, False]: + assert_equal(np.einsum('...lmn,lmno->...o', A, B, + optimize=opt), ref) # used to raise error + + def test_einsum_fixedstridebug(self): + # Issue #4485 obscure einsum bug + # This case revealed a bug in nditer where it reported a stride + # as 'fixed' (0) when it was in fact not fixed during processing + # (0 or 4). The reason for the bug was that the check for a fixed + # stride was using the information from the 2D inner loop reuse + # to restrict the iteration dimensions it had to validate to be + # the same, but that 2D inner loop reuse logic is only triggered + # during the buffer copying step, and hence it was invalid to + # rely on those values. The fix is to check all the dimensions + # of the stride in question, which in the test case reveals that + # the stride is not fixed. + # + # NOTE: This test is triggered by the fact that the default buffersize, + # used by einsum, is 8192, and 3*2731 = 8193, is larger than that + # and results in a mismatch between the buffering and the + # striding for operand A. + A = np.arange(2 * 3).reshape(2, 3).astype(np.float32) + B = np.arange(2 * 3 * 2731).reshape(2, 3, 2731).astype(np.int16) + es = np.einsum('cl, cpx->lpx', A, B) + tp = np.tensordot(A, B, axes=(0, 0)) + assert_equal(es, tp) + # The following is the original test case from the bug report, + # made repeatable by changing random arrays to aranges. + A = np.arange(3 * 3).reshape(3, 3).astype(np.float64) + B = np.arange(3 * 3 * 64 * 64).reshape(3, 3, 64, 64).astype(np.float32) + es = np.einsum('cl, cpxy->lpxy', A, B) + tp = np.tensordot(A, B, axes=(0, 0)) + assert_equal(es, tp) + + def test_einsum_fixed_collapsingbug(self): + # Issue #5147. + # The bug only occurred when output argument of einssum was used. + x = np.random.normal(0, 1, (5, 5, 5, 5)) + y1 = np.zeros((5, 5)) + np.einsum('aabb->ab', x, out=y1) + idx = np.arange(5) + y2 = x[idx[:, None], idx[:, None], idx, idx] + assert_equal(y1, y2) + + def test_einsum_all_contig_non_contig_output(self): + # Issue gh-5907, tests that the all contiguous special case + # actually checks the contiguity of the output + x = np.ones((5, 5)) + out = np.ones(10)[::2] + correct_base = np.ones(10) + correct_base[::2] = 5 + # Always worked (inner iteration is done with 0-stride): + np.einsum('mi,mi,mi->m', x, x, x, out=out) + assert_array_equal(out.base, correct_base) + # Example 1: + out = np.ones(10)[::2] + np.einsum('im,im,im->m', x, x, x, out=out) + assert_array_equal(out.base, correct_base) + # Example 2, buffering causes x to be contiguous but + # special cases do not catch the operation before: + out = np.ones((2, 2, 2))[..., 0] + correct_base = np.ones((2, 2, 2)) + correct_base[..., 0] = 2 + x = np.ones((2, 2), np.float32) + np.einsum('ij,jk->ik', x, x, out=out) + assert_array_equal(out.base, correct_base) + + def test_small_boolean_arrays(self): + # See gh-5946. + # Use array of True embedded in False. + a = np.zeros((16, 1, 1), dtype=np.bool_)[:2] + a[...] = True + out = np.zeros((16, 1, 1), dtype=np.bool_)[:2] + tgt = np.ones((2, 1, 1), dtype=np.bool_) + res = np.einsum('...ij,...jk->...ik', a, a, out=out) + assert_equal(res, tgt) + + def test_out_is_res(self): + a = np.arange(9).reshape(3, 3) + res = np.einsum('...ij,...jk->...ik', a, a, out=a) + assert res is a + + def optimize_compare(self, subscripts, operands=None): + # Tests all paths of the optimization function against + # conventional einsum + if operands is None: + args = [subscripts] + terms = subscripts.split('->')[0].split(',') + for term in terms: + dims = [global_size_dict[x] for x in term] + args.append(np.random.rand(*dims)) + else: + args = [subscripts] + operands + + noopt = np.einsum(*args, optimize=False) + opt = np.einsum(*args, optimize='greedy') + assert_almost_equal(opt, noopt) + opt = np.einsum(*args, optimize='optimal') + assert_almost_equal(opt, noopt) + + def test_hadamard_like_products(self): + # Hadamard outer products + self.optimize_compare('a,ab,abc->abc') + self.optimize_compare('a,b,ab->ab') + + def test_index_transformations(self): + # Simple index transformation cases + self.optimize_compare('ea,fb,gc,hd,abcd->efgh') + self.optimize_compare('ea,fb,abcd,gc,hd->efgh') + self.optimize_compare('abcd,ea,fb,gc,hd->efgh') + + def test_complex(self): + # Long test cases + self.optimize_compare('acdf,jbje,gihb,hfac,gfac,gifabc,hfac') + self.optimize_compare('acdf,jbje,gihb,hfac,gfac,gifabc,hfac') + self.optimize_compare('cd,bdhe,aidb,hgca,gc,hgibcd,hgac') + self.optimize_compare('abhe,hidj,jgba,hiab,gab') + self.optimize_compare('bde,cdh,agdb,hica,ibd,hgicd,hiac') + self.optimize_compare('chd,bde,agbc,hiad,hgc,hgi,hiad') + self.optimize_compare('chd,bde,agbc,hiad,bdi,cgh,agdb') + self.optimize_compare('bdhe,acad,hiab,agac,hibd') + + def test_collapse(self): + # Inner products + self.optimize_compare('ab,ab,c->') + self.optimize_compare('ab,ab,c->c') + self.optimize_compare('ab,ab,cd,cd->') + self.optimize_compare('ab,ab,cd,cd->ac') + self.optimize_compare('ab,ab,cd,cd->cd') + self.optimize_compare('ab,ab,cd,cd,ef,ef->') + + def test_expand(self): + # Outer products + self.optimize_compare('ab,cd,ef->abcdef') + self.optimize_compare('ab,cd,ef->acdf') + self.optimize_compare('ab,cd,de->abcde') + self.optimize_compare('ab,cd,de->be') + self.optimize_compare('ab,bcd,cd->abcd') + self.optimize_compare('ab,bcd,cd->abd') + + def test_edge_cases(self): + # Difficult edge cases for optimization + self.optimize_compare('eb,cb,fb->cef') + self.optimize_compare('dd,fb,be,cdb->cef') + self.optimize_compare('bca,cdb,dbf,afc->') + self.optimize_compare('dcc,fce,ea,dbf->ab') + self.optimize_compare('fdf,cdd,ccd,afe->ae') + self.optimize_compare('abcd,ad') + self.optimize_compare('ed,fcd,ff,bcf->be') + self.optimize_compare('baa,dcf,af,cde->be') + self.optimize_compare('bd,db,eac->ace') + self.optimize_compare('fff,fae,bef,def->abd') + self.optimize_compare('efc,dbc,acf,fd->abe') + self.optimize_compare('ba,ac,da->bcd') + + def test_inner_product(self): + # Inner products + self.optimize_compare('ab,ab') + self.optimize_compare('ab,ba') + self.optimize_compare('abc,abc') + self.optimize_compare('abc,bac') + self.optimize_compare('abc,cba') + + def test_random_cases(self): + # Randomly built test cases + self.optimize_compare('aab,fa,df,ecc->bde') + self.optimize_compare('ecb,fef,bad,ed->ac') + self.optimize_compare('bcf,bbb,fbf,fc->') + self.optimize_compare('bb,ff,be->e') + self.optimize_compare('bcb,bb,fc,fff->') + self.optimize_compare('fbb,dfd,fc,fc->') + self.optimize_compare('afd,ba,cc,dc->bf') + self.optimize_compare('adb,bc,fa,cfc->d') + self.optimize_compare('bbd,bda,fc,db->acf') + self.optimize_compare('dba,ead,cad->bce') + self.optimize_compare('aef,fbc,dca->bde') + + def test_combined_views_mapping(self): + # gh-10792 + a = np.arange(9).reshape(1, 1, 3, 1, 3) + b = np.einsum('bbcdc->d', a) + assert_equal(b, [12]) + + def test_broadcasting_dot_cases(self): + # Ensures broadcasting cases are not mistaken for GEMM + + a = np.random.rand(1, 5, 4) + b = np.random.rand(4, 6) + c = np.random.rand(5, 6) + d = np.random.rand(10) + + self.optimize_compare('ijk,kl,jl', operands=[a, b, c]) + self.optimize_compare('ijk,kl,jl,i->i', operands=[a, b, c, d]) + + e = np.random.rand(1, 1, 5, 4) + f = np.random.rand(7, 7) + self.optimize_compare('abjk,kl,jl', operands=[e, b, c]) + self.optimize_compare('abjk,kl,jl,ab->ab', operands=[e, b, c, f]) + + # Edge case found in gh-11308 + g = np.arange(64).reshape(2, 4, 8) + self.optimize_compare('obk,ijk->ioj', operands=[g, g]) + + +class TestEinsumPath(object): + def build_operands(self, string, size_dict=global_size_dict): + + # Builds views based off initial operands + operands = [string] + terms = string.split('->')[0].split(',') + for term in terms: + dims = [size_dict[x] for x in term] + operands.append(np.random.rand(*dims)) + + return operands + + def assert_path_equal(self, comp, benchmark): + # Checks if list of tuples are equivalent + ret = (len(comp) == len(benchmark)) + assert_(ret) + for pos in range(len(comp) - 1): + ret &= isinstance(comp[pos + 1], tuple) + ret &= (comp[pos + 1] == benchmark[pos + 1]) + assert_(ret) + + def test_memory_contraints(self): + # Ensure memory constraints are satisfied + + outer_test = self.build_operands('a,b,c->abc') + + path, path_str = np.einsum_path(*outer_test, optimize=('greedy', 0)) + self.assert_path_equal(path, ['einsum_path', (0, 1, 2)]) + + path, path_str = np.einsum_path(*outer_test, optimize=('optimal', 0)) + self.assert_path_equal(path, ['einsum_path', (0, 1, 2)]) + + long_test = self.build_operands('acdf,jbje,gihb,hfac') + path, path_str = np.einsum_path(*long_test, optimize=('greedy', 0)) + self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)]) + + path, path_str = np.einsum_path(*long_test, optimize=('optimal', 0)) + self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)]) + + def test_long_paths(self): + # Long complex cases + + # Long test 1 + long_test1 = self.build_operands('acdf,jbje,gihb,hfac,gfac,gifabc,hfac') + path, path_str = np.einsum_path(*long_test1, optimize='greedy') + self.assert_path_equal(path, ['einsum_path', + (3, 6), (3, 4), (2, 4), (2, 3), (0, 2), (0, 1)]) + + path, path_str = np.einsum_path(*long_test1, optimize='optimal') + self.assert_path_equal(path, ['einsum_path', + (3, 6), (3, 4), (2, 4), (2, 3), (0, 2), (0, 1)]) + + # Long test 2 + long_test2 = self.build_operands('chd,bde,agbc,hiad,bdi,cgh,agdb') + path, path_str = np.einsum_path(*long_test2, optimize='greedy') + print(path) + self.assert_path_equal(path, ['einsum_path', + (3, 4), (0, 3), (3, 4), (1, 3), (1, 2), (0, 1)]) + + path, path_str = np.einsum_path(*long_test2, optimize='optimal') + print(path) + self.assert_path_equal(path, ['einsum_path', + (0, 5), (1, 4), (3, 4), (1, 3), (1, 2), (0, 1)]) + + def test_edge_paths(self): + # Difficult edge cases + + # Edge test1 + edge_test1 = self.build_operands('eb,cb,fb->cef') + path, path_str = np.einsum_path(*edge_test1, optimize='greedy') + self.assert_path_equal(path, ['einsum_path', (0, 2), (0, 1)]) + + path, path_str = np.einsum_path(*edge_test1, optimize='optimal') + self.assert_path_equal(path, ['einsum_path', (0, 2), (0, 1)]) + + # Edge test2 + edge_test2 = self.build_operands('dd,fb,be,cdb->cef') + path, path_str = np.einsum_path(*edge_test2, optimize='greedy') + self.assert_path_equal(path, ['einsum_path', (0, 3), (0, 1), (0, 1)]) + + path, path_str = np.einsum_path(*edge_test2, optimize='optimal') + self.assert_path_equal(path, ['einsum_path', (0, 3), (0, 1), (0, 1)]) + + # Edge test3 + edge_test3 = self.build_operands('bca,cdb,dbf,afc->') + path, path_str = np.einsum_path(*edge_test3, optimize='greedy') + self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)]) + + path, path_str = np.einsum_path(*edge_test3, optimize='optimal') + self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)]) + + # Edge test4 + edge_test4 = self.build_operands('dcc,fce,ea,dbf->ab') + path, path_str = np.einsum_path(*edge_test4, optimize='greedy') + self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 1), (0, 1)]) + + path, path_str = np.einsum_path(*edge_test4, optimize='optimal') + self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)]) + + # Edge test5 + edge_test4 = self.build_operands('a,ac,ab,ad,cd,bd,bc->', + size_dict={"a": 20, "b": 20, "c": 20, "d": 20}) + path, path_str = np.einsum_path(*edge_test4, optimize='greedy') + self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)]) + + path, path_str = np.einsum_path(*edge_test4, optimize='optimal') + self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)]) + + def test_path_type_input(self): + # Test explicit path handeling + path_test = self.build_operands('dcc,fce,ea,dbf->ab') + + path, path_str = np.einsum_path(*path_test, optimize=False) + self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)]) + + path, path_str = np.einsum_path(*path_test, optimize=True) + self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 1), (0, 1)]) + + exp_path = ['einsum_path', (0, 2), (0, 2), (0, 1)] + path, path_str = np.einsum_path(*path_test, optimize=exp_path) + self.assert_path_equal(path, exp_path) + + # Double check einsum works on the input path + noopt = np.einsum(*path_test, optimize=False) + opt = np.einsum(*path_test, optimize=exp_path) + assert_almost_equal(noopt, opt) + + def test_spaces(self): + #gh-10794 + arr = np.array([[1]]) + for sp in itertools.product(['', ' '], repeat=4): + # no error for any spacing + np.einsum('{}...a{}->{}...a{}'.format(*sp), arr) + +def test_overlap(): + a = np.arange(9, dtype=int).reshape(3, 3) + b = np.arange(9, dtype=int).reshape(3, 3) + d = np.dot(a, b) + # sanity check + c = np.einsum('ij,jk->ik', a, b) + assert_equal(c, d) + #gh-10080, out overlaps one of the operands + c = np.einsum('ij,jk->ik', a, b, out=b) + assert_equal(c, d) diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_einsum.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_einsum.pyc new file mode 100644 index 0000000..d1232e0 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_einsum.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_errstate.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_errstate.py new file mode 100644 index 0000000..670d485 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_errstate.py @@ -0,0 +1,41 @@ +from __future__ import division, absolute_import, print_function + +import platform +import pytest + +import numpy as np +from numpy.testing import assert_, assert_raises + + +class TestErrstate(object): + @pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.") + def test_invalid(self): + with np.errstate(all='raise', under='ignore'): + a = -np.arange(3) + # This should work + with np.errstate(invalid='ignore'): + np.sqrt(a) + # While this should fail! + with assert_raises(FloatingPointError): + np.sqrt(a) + + def test_divide(self): + with np.errstate(all='raise', under='ignore'): + a = -np.arange(3) + # This should work + with np.errstate(divide='ignore'): + a // 0 + # While this should fail! + with assert_raises(FloatingPointError): + a // 0 + + def test_errcall(self): + def foo(*args): + print(args) + + olderrcall = np.geterrcall() + with np.errstate(call=foo): + assert_(np.geterrcall() is foo, 'call is not foo') + with np.errstate(call=None): + assert_(np.geterrcall() is None, 'call is not None') + assert_(np.geterrcall() is olderrcall, 'call is not olderrcall') diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_errstate.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_errstate.pyc new file mode 100644 index 0000000..64952ea Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_errstate.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_extint128.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_extint128.py new file mode 100644 index 0000000..7c454a6 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_extint128.py @@ -0,0 +1,221 @@ +from __future__ import division, absolute_import, print_function + +import itertools +import contextlib +import operator +import pytest + +import numpy as np +import numpy.core._multiarray_tests as mt + +from numpy.testing import assert_raises, assert_equal + + +INT64_MAX = np.iinfo(np.int64).max +INT64_MIN = np.iinfo(np.int64).min +INT64_MID = 2**32 + +# int128 is not two's complement, the sign bit is separate +INT128_MAX = 2**128 - 1 +INT128_MIN = -INT128_MAX +INT128_MID = 2**64 + +INT64_VALUES = ( + [INT64_MIN + j for j in range(20)] + + [INT64_MAX - j for j in range(20)] + + [INT64_MID + j for j in range(-20, 20)] + + [2*INT64_MID + j for j in range(-20, 20)] + + [INT64_MID//2 + j for j in range(-20, 20)] + + list(range(-70, 70)) +) + +INT128_VALUES = ( + [INT128_MIN + j for j in range(20)] + + [INT128_MAX - j for j in range(20)] + + [INT128_MID + j for j in range(-20, 20)] + + [2*INT128_MID + j for j in range(-20, 20)] + + [INT128_MID//2 + j for j in range(-20, 20)] + + list(range(-70, 70)) + + [False] # negative zero +) + +INT64_POS_VALUES = [x for x in INT64_VALUES if x > 0] + + +@contextlib.contextmanager +def exc_iter(*args): + """ + Iterate over Cartesian product of *args, and if an exception is raised, + add information of the current iterate. + """ + + value = [None] + + def iterate(): + for v in itertools.product(*args): + value[0] = v + yield v + + try: + yield iterate() + except Exception: + import traceback + msg = "At: %r\n%s" % (repr(value[0]), + traceback.format_exc()) + raise AssertionError(msg) + + +def test_safe_binop(): + # Test checked arithmetic routines + + ops = [ + (operator.add, 1), + (operator.sub, 2), + (operator.mul, 3) + ] + + with exc_iter(ops, INT64_VALUES, INT64_VALUES) as it: + for xop, a, b in it: + pyop, op = xop + c = pyop(a, b) + + if not (INT64_MIN <= c <= INT64_MAX): + assert_raises(OverflowError, mt.extint_safe_binop, a, b, op) + else: + d = mt.extint_safe_binop(a, b, op) + if c != d: + # assert_equal is slow + assert_equal(d, c) + + +def test_to_128(): + with exc_iter(INT64_VALUES) as it: + for a, in it: + b = mt.extint_to_128(a) + if a != b: + assert_equal(b, a) + + +def test_to_64(): + with exc_iter(INT128_VALUES) as it: + for a, in it: + if not (INT64_MIN <= a <= INT64_MAX): + assert_raises(OverflowError, mt.extint_to_64, a) + else: + b = mt.extint_to_64(a) + if a != b: + assert_equal(b, a) + + +def test_mul_64_64(): + with exc_iter(INT64_VALUES, INT64_VALUES) as it: + for a, b in it: + c = a * b + d = mt.extint_mul_64_64(a, b) + if c != d: + assert_equal(d, c) + + +def test_add_128(): + with exc_iter(INT128_VALUES, INT128_VALUES) as it: + for a, b in it: + c = a + b + if not (INT128_MIN <= c <= INT128_MAX): + assert_raises(OverflowError, mt.extint_add_128, a, b) + else: + d = mt.extint_add_128(a, b) + if c != d: + assert_equal(d, c) + + +def test_sub_128(): + with exc_iter(INT128_VALUES, INT128_VALUES) as it: + for a, b in it: + c = a - b + if not (INT128_MIN <= c <= INT128_MAX): + assert_raises(OverflowError, mt.extint_sub_128, a, b) + else: + d = mt.extint_sub_128(a, b) + if c != d: + assert_equal(d, c) + + +def test_neg_128(): + with exc_iter(INT128_VALUES) as it: + for a, in it: + b = -a + c = mt.extint_neg_128(a) + if b != c: + assert_equal(c, b) + + +def test_shl_128(): + with exc_iter(INT128_VALUES) as it: + for a, in it: + if a < 0: + b = -(((-a) << 1) & (2**128-1)) + else: + b = (a << 1) & (2**128-1) + c = mt.extint_shl_128(a) + if b != c: + assert_equal(c, b) + + +def test_shr_128(): + with exc_iter(INT128_VALUES) as it: + for a, in it: + if a < 0: + b = -((-a) >> 1) + else: + b = a >> 1 + c = mt.extint_shr_128(a) + if b != c: + assert_equal(c, b) + + +def test_gt_128(): + with exc_iter(INT128_VALUES, INT128_VALUES) as it: + for a, b in it: + c = a > b + d = mt.extint_gt_128(a, b) + if c != d: + assert_equal(d, c) + + +@pytest.mark.slow +def test_divmod_128_64(): + with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it: + for a, b in it: + if a >= 0: + c, cr = divmod(a, b) + else: + c, cr = divmod(-a, b) + c = -c + cr = -cr + + d, dr = mt.extint_divmod_128_64(a, b) + + if c != d or d != dr or b*d + dr != a: + assert_equal(d, c) + assert_equal(dr, cr) + assert_equal(b*d + dr, a) + + +def test_floordiv_128_64(): + with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it: + for a, b in it: + c = a // b + d = mt.extint_floordiv_128_64(a, b) + + if c != d: + assert_equal(d, c) + + +def test_ceildiv_128_64(): + with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it: + for a, b in it: + c = (a + b - 1) // b + d = mt.extint_ceildiv_128_64(a, b) + + if c != d: + assert_equal(d, c) diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_extint128.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_extint128.pyc new file mode 100644 index 0000000..77946a7 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_extint128.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_function_base.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_function_base.py new file mode 100644 index 0000000..459baca --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_function_base.py @@ -0,0 +1,364 @@ +from __future__ import division, absolute_import, print_function + +from numpy import ( + logspace, linspace, geomspace, dtype, array, sctypes, arange, isnan, + ndarray, sqrt, nextafter, stack + ) +from numpy.testing import ( + assert_, assert_equal, assert_raises, assert_array_equal, assert_allclose, + suppress_warnings + ) + + +class PhysicalQuantity(float): + def __new__(cls, value): + return float.__new__(cls, value) + + def __add__(self, x): + assert_(isinstance(x, PhysicalQuantity)) + return PhysicalQuantity(float(x) + float(self)) + __radd__ = __add__ + + def __sub__(self, x): + assert_(isinstance(x, PhysicalQuantity)) + return PhysicalQuantity(float(self) - float(x)) + + def __rsub__(self, x): + assert_(isinstance(x, PhysicalQuantity)) + return PhysicalQuantity(float(x) - float(self)) + + def __mul__(self, x): + return PhysicalQuantity(float(x) * float(self)) + __rmul__ = __mul__ + + def __div__(self, x): + return PhysicalQuantity(float(self) / float(x)) + + def __rdiv__(self, x): + return PhysicalQuantity(float(x) / float(self)) + + +class PhysicalQuantity2(ndarray): + __array_priority__ = 10 + + +class TestLogspace(object): + + def test_basic(self): + y = logspace(0, 6) + assert_(len(y) == 50) + y = logspace(0, 6, num=100) + assert_(y[-1] == 10 ** 6) + y = logspace(0, 6, endpoint=0) + assert_(y[-1] < 10 ** 6) + y = logspace(0, 6, num=7) + assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6]) + + def test_start_stop_array(self): + start = array([0., 1.]) + stop = array([6., 7.]) + t1 = logspace(start, stop, 6) + t2 = stack([logspace(_start, _stop, 6) + for _start, _stop in zip(start, stop)], axis=1) + assert_equal(t1, t2) + t3 = logspace(start, stop[0], 6) + t4 = stack([logspace(_start, stop[0], 6) + for _start in start], axis=1) + assert_equal(t3, t4) + t5 = logspace(start, stop, 6, axis=-1) + assert_equal(t5, t2.T) + + def test_dtype(self): + y = logspace(0, 6, dtype='float32') + assert_equal(y.dtype, dtype('float32')) + y = logspace(0, 6, dtype='float64') + assert_equal(y.dtype, dtype('float64')) + y = logspace(0, 6, dtype='int32') + assert_equal(y.dtype, dtype('int32')) + + def test_physical_quantities(self): + a = PhysicalQuantity(1.0) + b = PhysicalQuantity(5.0) + assert_equal(logspace(a, b), logspace(1.0, 5.0)) + + def test_subclass(self): + a = array(1).view(PhysicalQuantity2) + b = array(7).view(PhysicalQuantity2) + ls = logspace(a, b) + assert type(ls) is PhysicalQuantity2 + assert_equal(ls, logspace(1.0, 7.0)) + ls = logspace(a, b, 1) + assert type(ls) is PhysicalQuantity2 + assert_equal(ls, logspace(1.0, 7.0, 1)) + + +class TestGeomspace(object): + + def test_basic(self): + y = geomspace(1, 1e6) + assert_(len(y) == 50) + y = geomspace(1, 1e6, num=100) + assert_(y[-1] == 10 ** 6) + y = geomspace(1, 1e6, endpoint=False) + assert_(y[-1] < 10 ** 6) + y = geomspace(1, 1e6, num=7) + assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6]) + + y = geomspace(8, 2, num=3) + assert_allclose(y, [8, 4, 2]) + assert_array_equal(y.imag, 0) + + y = geomspace(-1, -100, num=3) + assert_array_equal(y, [-1, -10, -100]) + assert_array_equal(y.imag, 0) + + y = geomspace(-100, -1, num=3) + assert_array_equal(y, [-100, -10, -1]) + assert_array_equal(y.imag, 0) + + def test_complex(self): + # Purely imaginary + y = geomspace(1j, 16j, num=5) + assert_allclose(y, [1j, 2j, 4j, 8j, 16j]) + assert_array_equal(y.real, 0) + + y = geomspace(-4j, -324j, num=5) + assert_allclose(y, [-4j, -12j, -36j, -108j, -324j]) + assert_array_equal(y.real, 0) + + y = geomspace(1+1j, 1000+1000j, num=4) + assert_allclose(y, [1+1j, 10+10j, 100+100j, 1000+1000j]) + + y = geomspace(-1+1j, -1000+1000j, num=4) + assert_allclose(y, [-1+1j, -10+10j, -100+100j, -1000+1000j]) + + # Logarithmic spirals + y = geomspace(-1, 1, num=3, dtype=complex) + assert_allclose(y, [-1, 1j, +1]) + + y = geomspace(0+3j, -3+0j, 3) + assert_allclose(y, [0+3j, -3/sqrt(2)+3j/sqrt(2), -3+0j]) + y = geomspace(0+3j, 3+0j, 3) + assert_allclose(y, [0+3j, 3/sqrt(2)+3j/sqrt(2), 3+0j]) + y = geomspace(-3+0j, 0-3j, 3) + assert_allclose(y, [-3+0j, -3/sqrt(2)-3j/sqrt(2), 0-3j]) + y = geomspace(0+3j, -3+0j, 3) + assert_allclose(y, [0+3j, -3/sqrt(2)+3j/sqrt(2), -3+0j]) + y = geomspace(-2-3j, 5+7j, 7) + assert_allclose(y, [-2-3j, -0.29058977-4.15771027j, + 2.08885354-4.34146838j, 4.58345529-3.16355218j, + 6.41401745-0.55233457j, 6.75707386+3.11795092j, + 5+7j]) + + # Type promotion should prevent the -5 from becoming a NaN + y = geomspace(3j, -5, 2) + assert_allclose(y, [3j, -5]) + y = geomspace(-5, 3j, 2) + assert_allclose(y, [-5, 3j]) + + def test_dtype(self): + y = geomspace(1, 1e6, dtype='float32') + assert_equal(y.dtype, dtype('float32')) + y = geomspace(1, 1e6, dtype='float64') + assert_equal(y.dtype, dtype('float64')) + y = geomspace(1, 1e6, dtype='int32') + assert_equal(y.dtype, dtype('int32')) + + # Native types + y = geomspace(1, 1e6, dtype=float) + assert_equal(y.dtype, dtype('float_')) + y = geomspace(1, 1e6, dtype=complex) + assert_equal(y.dtype, dtype('complex')) + + def test_start_stop_array_scalar(self): + lim1 = array([120, 100], dtype="int8") + lim2 = array([-120, -100], dtype="int8") + lim3 = array([1200, 1000], dtype="uint16") + t1 = geomspace(lim1[0], lim1[1], 5) + t2 = geomspace(lim2[0], lim2[1], 5) + t3 = geomspace(lim3[0], lim3[1], 5) + t4 = geomspace(120.0, 100.0, 5) + t5 = geomspace(-120.0, -100.0, 5) + t6 = geomspace(1200.0, 1000.0, 5) + + # t3 uses float32, t6 uses float64 + assert_allclose(t1, t4, rtol=1e-2) + assert_allclose(t2, t5, rtol=1e-2) + assert_allclose(t3, t6, rtol=1e-5) + + def test_start_stop_array(self): + # Try to use all special cases. + start = array([1.e0, 32., 1j, -4j, 1+1j, -1]) + stop = array([1.e4, 2., 16j, -324j, 10000+10000j, 1]) + t1 = geomspace(start, stop, 5) + t2 = stack([geomspace(_start, _stop, 5) + for _start, _stop in zip(start, stop)], axis=1) + assert_equal(t1, t2) + t3 = geomspace(start, stop[0], 5) + t4 = stack([geomspace(_start, stop[0], 5) + for _start in start], axis=1) + assert_equal(t3, t4) + t5 = geomspace(start, stop, 5, axis=-1) + assert_equal(t5, t2.T) + + def test_physical_quantities(self): + a = PhysicalQuantity(1.0) + b = PhysicalQuantity(5.0) + assert_equal(geomspace(a, b), geomspace(1.0, 5.0)) + + def test_subclass(self): + a = array(1).view(PhysicalQuantity2) + b = array(7).view(PhysicalQuantity2) + gs = geomspace(a, b) + assert type(gs) is PhysicalQuantity2 + assert_equal(gs, geomspace(1.0, 7.0)) + gs = geomspace(a, b, 1) + assert type(gs) is PhysicalQuantity2 + assert_equal(gs, geomspace(1.0, 7.0, 1)) + + def test_bounds(self): + assert_raises(ValueError, geomspace, 0, 10) + assert_raises(ValueError, geomspace, 10, 0) + assert_raises(ValueError, geomspace, 0, 0) + + +class TestLinspace(object): + + def test_basic(self): + y = linspace(0, 10) + assert_(len(y) == 50) + y = linspace(2, 10, num=100) + assert_(y[-1] == 10) + y = linspace(2, 10, endpoint=0) + assert_(y[-1] < 10) + assert_raises(ValueError, linspace, 0, 10, num=-1) + + def test_corner(self): + y = list(linspace(0, 1, 1)) + assert_(y == [0.0], y) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, ".*safely interpreted as an integer") + y = list(linspace(0, 1, 2.5)) + assert_(y == [0.0, 1.0]) + + def test_type(self): + t1 = linspace(0, 1, 0).dtype + t2 = linspace(0, 1, 1).dtype + t3 = linspace(0, 1, 2).dtype + assert_equal(t1, t2) + assert_equal(t2, t3) + + def test_dtype(self): + y = linspace(0, 6, dtype='float32') + assert_equal(y.dtype, dtype('float32')) + y = linspace(0, 6, dtype='float64') + assert_equal(y.dtype, dtype('float64')) + y = linspace(0, 6, dtype='int32') + assert_equal(y.dtype, dtype('int32')) + + def test_start_stop_array_scalar(self): + lim1 = array([-120, 100], dtype="int8") + lim2 = array([120, -100], dtype="int8") + lim3 = array([1200, 1000], dtype="uint16") + t1 = linspace(lim1[0], lim1[1], 5) + t2 = linspace(lim2[0], lim2[1], 5) + t3 = linspace(lim3[0], lim3[1], 5) + t4 = linspace(-120.0, 100.0, 5) + t5 = linspace(120.0, -100.0, 5) + t6 = linspace(1200.0, 1000.0, 5) + assert_equal(t1, t4) + assert_equal(t2, t5) + assert_equal(t3, t6) + + def test_start_stop_array(self): + start = array([-120, 120], dtype="int8") + stop = array([100, -100], dtype="int8") + t1 = linspace(start, stop, 5) + t2 = stack([linspace(_start, _stop, 5) + for _start, _stop in zip(start, stop)], axis=1) + assert_equal(t1, t2) + t3 = linspace(start, stop[0], 5) + t4 = stack([linspace(_start, stop[0], 5) + for _start in start], axis=1) + assert_equal(t3, t4) + t5 = linspace(start, stop, 5, axis=-1) + assert_equal(t5, t2.T) + + def test_complex(self): + lim1 = linspace(1 + 2j, 3 + 4j, 5) + t1 = array([1.0+2.j, 1.5+2.5j, 2.0+3j, 2.5+3.5j, 3.0+4j]) + lim2 = linspace(1j, 10, 5) + t2 = array([0.0+1.j, 2.5+0.75j, 5.0+0.5j, 7.5+0.25j, 10.0+0j]) + assert_equal(lim1, t1) + assert_equal(lim2, t2) + + def test_physical_quantities(self): + a = PhysicalQuantity(0.0) + b = PhysicalQuantity(1.0) + assert_equal(linspace(a, b), linspace(0.0, 1.0)) + + def test_subclass(self): + a = array(0).view(PhysicalQuantity2) + b = array(1).view(PhysicalQuantity2) + ls = linspace(a, b) + assert type(ls) is PhysicalQuantity2 + assert_equal(ls, linspace(0.0, 1.0)) + ls = linspace(a, b, 1) + assert type(ls) is PhysicalQuantity2 + assert_equal(ls, linspace(0.0, 1.0, 1)) + + def test_array_interface(self): + # Regression test for https://github.com/numpy/numpy/pull/6659 + # Ensure that start/stop can be objects that implement + # __array_interface__ and are convertible to numeric scalars + + class Arrayish(object): + """ + A generic object that supports the __array_interface__ and hence + can in principle be converted to a numeric scalar, but is not + otherwise recognized as numeric, but also happens to support + multiplication by floats. + + Data should be an object that implements the buffer interface, + and contains at least 4 bytes. + """ + + def __init__(self, data): + self._data = data + + @property + def __array_interface__(self): + return {'shape': (), 'typestr': ' 1) + assert_(info.minexp < -1) + assert_(info.maxexp > 1) diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_getlimits.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_getlimits.pyc new file mode 100644 index 0000000..c428a4d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_getlimits.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_half.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_half.py new file mode 100644 index 0000000..7707125 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_half.py @@ -0,0 +1,518 @@ +from __future__ import division, absolute_import, print_function + +import platform +import pytest + +import numpy as np +from numpy import uint16, float16, float32, float64 +from numpy.testing import assert_, assert_equal + + +def assert_raises_fpe(strmatch, callable, *args, **kwargs): + try: + callable(*args, **kwargs) + except FloatingPointError as exc: + assert_(str(exc).find(strmatch) >= 0, + "Did not raise floating point %s error" % strmatch) + else: + assert_(False, + "Did not raise floating point %s error" % strmatch) + +class TestHalf(object): + def setup(self): + # An array of all possible float16 values + self.all_f16 = np.arange(0x10000, dtype=uint16) + self.all_f16.dtype = float16 + self.all_f32 = np.array(self.all_f16, dtype=float32) + self.all_f64 = np.array(self.all_f16, dtype=float64) + + # An array of all non-NaN float16 values, in sorted order + self.nonan_f16 = np.concatenate( + (np.arange(0xfc00, 0x7fff, -1, dtype=uint16), + np.arange(0x0000, 0x7c01, 1, dtype=uint16))) + self.nonan_f16.dtype = float16 + self.nonan_f32 = np.array(self.nonan_f16, dtype=float32) + self.nonan_f64 = np.array(self.nonan_f16, dtype=float64) + + # An array of all finite float16 values, in sorted order + self.finite_f16 = self.nonan_f16[1:-1] + self.finite_f32 = self.nonan_f32[1:-1] + self.finite_f64 = self.nonan_f64[1:-1] + + def test_half_conversions(self): + """Checks that all 16-bit values survive conversion + to/from 32-bit and 64-bit float""" + # Because the underlying routines preserve the NaN bits, every + # value is preserved when converting to/from other floats. + + # Convert from float32 back to float16 + b = np.array(self.all_f32, dtype=float16) + assert_equal(self.all_f16.view(dtype=uint16), + b.view(dtype=uint16)) + + # Convert from float64 back to float16 + b = np.array(self.all_f64, dtype=float16) + assert_equal(self.all_f16.view(dtype=uint16), + b.view(dtype=uint16)) + + # Convert float16 to longdouble and back + # This doesn't necessarily preserve the extra NaN bits, + # so exclude NaNs. + a_ld = np.array(self.nonan_f16, dtype=np.longdouble) + b = np.array(a_ld, dtype=float16) + assert_equal(self.nonan_f16.view(dtype=uint16), + b.view(dtype=uint16)) + + # Check the range for which all integers can be represented + i_int = np.arange(-2048, 2049) + i_f16 = np.array(i_int, dtype=float16) + j = np.array(i_f16, dtype=int) + assert_equal(i_int, j) + + @pytest.mark.parametrize("offset", [None, "up", "down"]) + @pytest.mark.parametrize("shift", [None, "up", "down"]) + @pytest.mark.parametrize("float_t", [np.float32, np.float64]) + def test_half_conversion_rounding(self, float_t, shift, offset): + # Assumes that round to even is used during casting. + max_pattern = np.float16(np.finfo(np.float16).max).view(np.uint16) + + # Test all (positive) finite numbers, denormals are most interesting + # however: + f16s_patterns = np.arange(0, max_pattern+1, dtype=np.uint16) + f16s_float = f16s_patterns.view(np.float16).astype(float_t) + + # Shift the values by half a bit up or a down (or do not shift), + if shift == "up": + f16s_float = 0.5 * (f16s_float[:-1] + f16s_float[1:])[1:] + elif shift == "down": + f16s_float = 0.5 * (f16s_float[:-1] + f16s_float[1:])[:-1] + else: + f16s_float = f16s_float[1:-1] + + # Increase the float by a minimal value: + if offset == "up": + f16s_float = np.nextafter(f16s_float, float_t(1e50)) + elif offset == "down": + f16s_float = np.nextafter(f16s_float, float_t(-1e50)) + + # Convert back to float16 and its bit pattern: + res_patterns = f16s_float.astype(np.float16).view(np.uint16) + + # The above calculations tries the original values, or the exact + # mid points between the float16 values. It then further offsets them + # by as little as possible. If no offset occurs, "round to even" + # logic will be necessary, an arbitrarily small offset should cause + # normal up/down rounding always. + + # Calculate the expecte pattern: + cmp_patterns = f16s_patterns[1:-1].copy() + + if shift == "down" and offset != "up": + shift_pattern = -1 + elif shift == "up" and offset != "down": + shift_pattern = 1 + else: + # There cannot be a shift, either shift is None, so all rounding + # will go back to original, or shift is reduced by offset too much. + shift_pattern = 0 + + # If rounding occurs, is it normal rounding or round to even? + if offset is None: + # Round to even occurs, modify only non-even, cast to allow + (-1) + cmp_patterns[0::2].view(np.int16)[...] += shift_pattern + else: + cmp_patterns.view(np.int16)[...] += shift_pattern + + assert_equal(res_patterns, cmp_patterns) + + @pytest.mark.parametrize(["float_t", "uint_t", "bits"], + [(np.float32, np.uint32, 23), + (np.float64, np.uint64, 52)]) + def test_half_conversion_denormal_round_even(self, float_t, uint_t, bits): + # Test specifically that all bits are considered when deciding + # whether round to even should occur (i.e. no bits are lost at the + # end. Compare also gh-12721. The most bits can get lost for the + # smallest denormal: + smallest_value = np.uint16(1).view(np.float16).astype(float_t) + assert smallest_value == 2**-24 + + # Will be rounded to zero based on round to even rule: + rounded_to_zero = smallest_value / float_t(2) + assert rounded_to_zero.astype(np.float16) == 0 + + # The significand will be all 0 for the float_t, test that we do not + # lose the lower ones of these: + for i in range(bits): + # slightly increasing the value should make it round up: + larger_pattern = rounded_to_zero.view(uint_t) | uint_t(1 << i) + larger_value = larger_pattern.view(float_t) + assert larger_value.astype(np.float16) == smallest_value + + def test_nans_infs(self): + with np.errstate(all='ignore'): + # Check some of the ufuncs + assert_equal(np.isnan(self.all_f16), np.isnan(self.all_f32)) + assert_equal(np.isinf(self.all_f16), np.isinf(self.all_f32)) + assert_equal(np.isfinite(self.all_f16), np.isfinite(self.all_f32)) + assert_equal(np.signbit(self.all_f16), np.signbit(self.all_f32)) + assert_equal(np.spacing(float16(65504)), np.inf) + + # Check comparisons of all values with NaN + nan = float16(np.nan) + + assert_(not (self.all_f16 == nan).any()) + assert_(not (nan == self.all_f16).any()) + + assert_((self.all_f16 != nan).all()) + assert_((nan != self.all_f16).all()) + + assert_(not (self.all_f16 < nan).any()) + assert_(not (nan < self.all_f16).any()) + + assert_(not (self.all_f16 <= nan).any()) + assert_(not (nan <= self.all_f16).any()) + + assert_(not (self.all_f16 > nan).any()) + assert_(not (nan > self.all_f16).any()) + + assert_(not (self.all_f16 >= nan).any()) + assert_(not (nan >= self.all_f16).any()) + + def test_half_values(self): + """Confirms a small number of known half values""" + a = np.array([1.0, -1.0, + 2.0, -2.0, + 0.0999755859375, 0.333251953125, # 1/10, 1/3 + 65504, -65504, # Maximum magnitude + 2.0**(-14), -2.0**(-14), # Minimum normal + 2.0**(-24), -2.0**(-24), # Minimum subnormal + 0, -1/1e1000, # Signed zeros + np.inf, -np.inf]) + b = np.array([0x3c00, 0xbc00, + 0x4000, 0xc000, + 0x2e66, 0x3555, + 0x7bff, 0xfbff, + 0x0400, 0x8400, + 0x0001, 0x8001, + 0x0000, 0x8000, + 0x7c00, 0xfc00], dtype=uint16) + b.dtype = float16 + assert_equal(a, b) + + def test_half_rounding(self): + """Checks that rounding when converting to half is correct""" + a = np.array([2.0**-25 + 2.0**-35, # Rounds to minimum subnormal + 2.0**-25, # Underflows to zero (nearest even mode) + 2.0**-26, # Underflows to zero + 1.0+2.0**-11 + 2.0**-16, # rounds to 1.0+2**(-10) + 1.0+2.0**-11, # rounds to 1.0 (nearest even mode) + 1.0+2.0**-12, # rounds to 1.0 + 65519, # rounds to 65504 + 65520], # rounds to inf + dtype=float64) + rounded = [2.0**-24, + 0.0, + 0.0, + 1.0+2.0**(-10), + 1.0, + 1.0, + 65504, + np.inf] + + # Check float64->float16 rounding + b = np.array(a, dtype=float16) + assert_equal(b, rounded) + + # Check float32->float16 rounding + a = np.array(a, dtype=float32) + b = np.array(a, dtype=float16) + assert_equal(b, rounded) + + def test_half_correctness(self): + """Take every finite float16, and check the casting functions with + a manual conversion.""" + + # Create an array of all finite float16s + a_bits = self.finite_f16.view(dtype=uint16) + + # Convert to 64-bit float manually + a_sgn = (-1.0)**((a_bits & 0x8000) >> 15) + a_exp = np.array((a_bits & 0x7c00) >> 10, dtype=np.int32) - 15 + a_man = (a_bits & 0x03ff) * 2.0**(-10) + # Implicit bit of normalized floats + a_man[a_exp != -15] += 1 + # Denormalized exponent is -14 + a_exp[a_exp == -15] = -14 + + a_manual = a_sgn * a_man * 2.0**a_exp + + a32_fail = np.nonzero(self.finite_f32 != a_manual)[0] + if len(a32_fail) != 0: + bad_index = a32_fail[0] + assert_equal(self.finite_f32, a_manual, + "First non-equal is half value %x -> %g != %g" % + (self.finite_f16[bad_index], + self.finite_f32[bad_index], + a_manual[bad_index])) + + a64_fail = np.nonzero(self.finite_f64 != a_manual)[0] + if len(a64_fail) != 0: + bad_index = a64_fail[0] + assert_equal(self.finite_f64, a_manual, + "First non-equal is half value %x -> %g != %g" % + (self.finite_f16[bad_index], + self.finite_f64[bad_index], + a_manual[bad_index])) + + def test_half_ordering(self): + """Make sure comparisons are working right""" + + # All non-NaN float16 values in reverse order + a = self.nonan_f16[::-1].copy() + + # 32-bit float copy + b = np.array(a, dtype=float32) + + # Should sort the same + a.sort() + b.sort() + assert_equal(a, b) + + # Comparisons should work + assert_((a[:-1] <= a[1:]).all()) + assert_(not (a[:-1] > a[1:]).any()) + assert_((a[1:] >= a[:-1]).all()) + assert_(not (a[1:] < a[:-1]).any()) + # All != except for +/-0 + assert_equal(np.nonzero(a[:-1] < a[1:])[0].size, a.size-2) + assert_equal(np.nonzero(a[1:] > a[:-1])[0].size, a.size-2) + + def test_half_funcs(self): + """Test the various ArrFuncs""" + + # fill + assert_equal(np.arange(10, dtype=float16), + np.arange(10, dtype=float32)) + + # fillwithscalar + a = np.zeros((5,), dtype=float16) + a.fill(1) + assert_equal(a, np.ones((5,), dtype=float16)) + + # nonzero and copyswap + a = np.array([0, 0, -1, -1/1e20, 0, 2.0**-24, 7.629e-6], dtype=float16) + assert_equal(a.nonzero()[0], + [2, 5, 6]) + a = a.byteswap().newbyteorder() + assert_equal(a.nonzero()[0], + [2, 5, 6]) + + # dot + a = np.arange(0, 10, 0.5, dtype=float16) + b = np.ones((20,), dtype=float16) + assert_equal(np.dot(a, b), + 95) + + # argmax + a = np.array([0, -np.inf, -2, 0.5, 12.55, 7.3, 2.1, 12.4], dtype=float16) + assert_equal(a.argmax(), + 4) + a = np.array([0, -np.inf, -2, np.inf, 12.55, np.nan, 2.1, 12.4], dtype=float16) + assert_equal(a.argmax(), + 5) + + # getitem + a = np.arange(10, dtype=float16) + for i in range(10): + assert_equal(a.item(i), i) + + def test_spacing_nextafter(self): + """Test np.spacing and np.nextafter""" + # All non-negative finite #'s + a = np.arange(0x7c00, dtype=uint16) + hinf = np.array((np.inf,), dtype=float16) + a_f16 = a.view(dtype=float16) + + assert_equal(np.spacing(a_f16[:-1]), a_f16[1:]-a_f16[:-1]) + + assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:]) + assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1]) + assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1]) + + # switch to negatives + a |= 0x8000 + + assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1])) + assert_equal(np.spacing(a_f16[1:]), a_f16[:-1]-a_f16[1:]) + + assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1]) + assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1]) + assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:]) + + def test_half_ufuncs(self): + """Test the various ufuncs""" + + a = np.array([0, 1, 2, 4, 2], dtype=float16) + b = np.array([-2, 5, 1, 4, 3], dtype=float16) + c = np.array([0, -1, -np.inf, np.nan, 6], dtype=float16) + + assert_equal(np.add(a, b), [-2, 6, 3, 8, 5]) + assert_equal(np.subtract(a, b), [2, -4, 1, 0, -1]) + assert_equal(np.multiply(a, b), [0, 5, 2, 16, 6]) + assert_equal(np.divide(a, b), [0, 0.199951171875, 2, 1, 0.66650390625]) + + assert_equal(np.equal(a, b), [False, False, False, True, False]) + assert_equal(np.not_equal(a, b), [True, True, True, False, True]) + assert_equal(np.less(a, b), [False, True, False, False, True]) + assert_equal(np.less_equal(a, b), [False, True, False, True, True]) + assert_equal(np.greater(a, b), [True, False, True, False, False]) + assert_equal(np.greater_equal(a, b), [True, False, True, True, False]) + assert_equal(np.logical_and(a, b), [False, True, True, True, True]) + assert_equal(np.logical_or(a, b), [True, True, True, True, True]) + assert_equal(np.logical_xor(a, b), [True, False, False, False, False]) + assert_equal(np.logical_not(a), [True, False, False, False, False]) + + assert_equal(np.isnan(c), [False, False, False, True, False]) + assert_equal(np.isinf(c), [False, False, True, False, False]) + assert_equal(np.isfinite(c), [True, True, False, False, True]) + assert_equal(np.signbit(b), [True, False, False, False, False]) + + assert_equal(np.copysign(b, a), [2, 5, 1, 4, 3]) + + assert_equal(np.maximum(a, b), [0, 5, 2, 4, 3]) + + x = np.maximum(b, c) + assert_(np.isnan(x[3])) + x[3] = 0 + assert_equal(x, [0, 5, 1, 0, 6]) + + assert_equal(np.minimum(a, b), [-2, 1, 1, 4, 2]) + + x = np.minimum(b, c) + assert_(np.isnan(x[3])) + x[3] = 0 + assert_equal(x, [-2, -1, -np.inf, 0, 3]) + + assert_equal(np.fmax(a, b), [0, 5, 2, 4, 3]) + assert_equal(np.fmax(b, c), [0, 5, 1, 4, 6]) + assert_equal(np.fmin(a, b), [-2, 1, 1, 4, 2]) + assert_equal(np.fmin(b, c), [-2, -1, -np.inf, 4, 3]) + + assert_equal(np.floor_divide(a, b), [0, 0, 2, 1, 0]) + assert_equal(np.remainder(a, b), [0, 1, 0, 0, 2]) + assert_equal(np.divmod(a, b), ([0, 0, 2, 1, 0], [0, 1, 0, 0, 2])) + assert_equal(np.square(b), [4, 25, 1, 16, 9]) + assert_equal(np.reciprocal(b), [-0.5, 0.199951171875, 1, 0.25, 0.333251953125]) + assert_equal(np.ones_like(b), [1, 1, 1, 1, 1]) + assert_equal(np.conjugate(b), b) + assert_equal(np.absolute(b), [2, 5, 1, 4, 3]) + assert_equal(np.negative(b), [2, -5, -1, -4, -3]) + assert_equal(np.positive(b), b) + assert_equal(np.sign(b), [-1, 1, 1, 1, 1]) + assert_equal(np.modf(b), ([0, 0, 0, 0, 0], b)) + assert_equal(np.frexp(b), ([-0.5, 0.625, 0.5, 0.5, 0.75], [2, 3, 1, 3, 2])) + assert_equal(np.ldexp(b, [0, 1, 2, 4, 2]), [-2, 10, 4, 64, 12]) + + def test_half_coercion(self): + """Test that half gets coerced properly with the other types""" + a16 = np.array((1,), dtype=float16) + a32 = np.array((1,), dtype=float32) + b16 = float16(1) + b32 = float32(1) + + assert_equal(np.power(a16, 2).dtype, float16) + assert_equal(np.power(a16, 2.0).dtype, float16) + assert_equal(np.power(a16, b16).dtype, float16) + assert_equal(np.power(a16, b32).dtype, float16) + assert_equal(np.power(a16, a16).dtype, float16) + assert_equal(np.power(a16, a32).dtype, float32) + + assert_equal(np.power(b16, 2).dtype, float64) + assert_equal(np.power(b16, 2.0).dtype, float64) + assert_equal(np.power(b16, b16).dtype, float16) + assert_equal(np.power(b16, b32).dtype, float32) + assert_equal(np.power(b16, a16).dtype, float16) + assert_equal(np.power(b16, a32).dtype, float32) + + assert_equal(np.power(a32, a16).dtype, float32) + assert_equal(np.power(a32, b16).dtype, float32) + assert_equal(np.power(b32, a16).dtype, float16) + assert_equal(np.power(b32, b16).dtype, float32) + + @pytest.mark.skipif(platform.machine() == "armv5tel", + reason="See gh-413.") + def test_half_fpe(self): + with np.errstate(all='raise'): + sx16 = np.array((1e-4,), dtype=float16) + bx16 = np.array((1e4,), dtype=float16) + sy16 = float16(1e-4) + by16 = float16(1e4) + + # Underflow errors + assert_raises_fpe('underflow', lambda a, b:a*b, sx16, sx16) + assert_raises_fpe('underflow', lambda a, b:a*b, sx16, sy16) + assert_raises_fpe('underflow', lambda a, b:a*b, sy16, sx16) + assert_raises_fpe('underflow', lambda a, b:a*b, sy16, sy16) + assert_raises_fpe('underflow', lambda a, b:a/b, sx16, bx16) + assert_raises_fpe('underflow', lambda a, b:a/b, sx16, by16) + assert_raises_fpe('underflow', lambda a, b:a/b, sy16, bx16) + assert_raises_fpe('underflow', lambda a, b:a/b, sy16, by16) + assert_raises_fpe('underflow', lambda a, b:a/b, + float16(2.**-14), float16(2**11)) + assert_raises_fpe('underflow', lambda a, b:a/b, + float16(-2.**-14), float16(2**11)) + assert_raises_fpe('underflow', lambda a, b:a/b, + float16(2.**-14+2**-24), float16(2)) + assert_raises_fpe('underflow', lambda a, b:a/b, + float16(-2.**-14-2**-24), float16(2)) + assert_raises_fpe('underflow', lambda a, b:a/b, + float16(2.**-14+2**-23), float16(4)) + + # Overflow errors + assert_raises_fpe('overflow', lambda a, b:a*b, bx16, bx16) + assert_raises_fpe('overflow', lambda a, b:a*b, bx16, by16) + assert_raises_fpe('overflow', lambda a, b:a*b, by16, bx16) + assert_raises_fpe('overflow', lambda a, b:a*b, by16, by16) + assert_raises_fpe('overflow', lambda a, b:a/b, bx16, sx16) + assert_raises_fpe('overflow', lambda a, b:a/b, bx16, sy16) + assert_raises_fpe('overflow', lambda a, b:a/b, by16, sx16) + assert_raises_fpe('overflow', lambda a, b:a/b, by16, sy16) + assert_raises_fpe('overflow', lambda a, b:a+b, + float16(65504), float16(17)) + assert_raises_fpe('overflow', lambda a, b:a-b, + float16(-65504), float16(17)) + assert_raises_fpe('overflow', np.nextafter, float16(65504), float16(np.inf)) + assert_raises_fpe('overflow', np.nextafter, float16(-65504), float16(-np.inf)) + assert_raises_fpe('overflow', np.spacing, float16(65504)) + + # Invalid value errors + assert_raises_fpe('invalid', np.divide, float16(np.inf), float16(np.inf)) + assert_raises_fpe('invalid', np.spacing, float16(np.inf)) + assert_raises_fpe('invalid', np.spacing, float16(np.nan)) + assert_raises_fpe('invalid', np.nextafter, float16(np.inf), float16(0)) + assert_raises_fpe('invalid', np.nextafter, float16(-np.inf), float16(0)) + assert_raises_fpe('invalid', np.nextafter, float16(0), float16(np.nan)) + + # These should not raise + float16(65472)+float16(32) + float16(2**-13)/float16(2) + float16(2**-14)/float16(2**10) + np.spacing(float16(-65504)) + np.nextafter(float16(65504), float16(-np.inf)) + np.nextafter(float16(-65504), float16(np.inf)) + float16(2**-14)/float16(2**10) + float16(-2**-14)/float16(2**10) + float16(2**-14+2**-23)/float16(2) + float16(-2**-14-2**-23)/float16(2) + + def test_half_array_interface(self): + """Test that half is compatible with __array_interface__""" + class Dummy: + pass + + a = np.ones((1,), dtype=float16) + b = Dummy() + b.__array_interface__ = a.__array_interface__ + c = np.array(b) + assert_(c.dtype == float16) + assert_equal(a, c) diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_half.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_half.pyc new file mode 100644 index 0000000..700c4f3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_half.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_indexerrors.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_indexerrors.py new file mode 100644 index 0000000..63b43c4 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_indexerrors.py @@ -0,0 +1,123 @@ +from __future__ import division, absolute_import, print_function + +import numpy as np +from numpy.testing import assert_raises + +class TestIndexErrors(object): + '''Tests to exercise indexerrors not covered by other tests.''' + + def test_arraytypes_fasttake(self): + 'take from a 0-length dimension' + x = np.empty((2, 3, 0, 4)) + assert_raises(IndexError, x.take, [0], axis=2) + assert_raises(IndexError, x.take, [1], axis=2) + assert_raises(IndexError, x.take, [0], axis=2, mode='wrap') + assert_raises(IndexError, x.take, [0], axis=2, mode='clip') + + def test_take_from_object(self): + # Check exception taking from object array + d = np.zeros(5, dtype=object) + assert_raises(IndexError, d.take, [6]) + + # Check exception taking from 0-d array + d = np.zeros((5, 0), dtype=object) + assert_raises(IndexError, d.take, [1], axis=1) + assert_raises(IndexError, d.take, [0], axis=1) + assert_raises(IndexError, d.take, [0]) + assert_raises(IndexError, d.take, [0], mode='wrap') + assert_raises(IndexError, d.take, [0], mode='clip') + + def test_multiindex_exceptions(self): + a = np.empty(5, dtype=object) + assert_raises(IndexError, a.item, 20) + a = np.empty((5, 0), dtype=object) + assert_raises(IndexError, a.item, (0, 0)) + + a = np.empty(5, dtype=object) + assert_raises(IndexError, a.itemset, 20, 0) + a = np.empty((5, 0), dtype=object) + assert_raises(IndexError, a.itemset, (0, 0), 0) + + def test_put_exceptions(self): + a = np.zeros((5, 5)) + assert_raises(IndexError, a.put, 100, 0) + a = np.zeros((5, 5), dtype=object) + assert_raises(IndexError, a.put, 100, 0) + a = np.zeros((5, 5, 0)) + assert_raises(IndexError, a.put, 100, 0) + a = np.zeros((5, 5, 0), dtype=object) + assert_raises(IndexError, a.put, 100, 0) + + def test_iterators_exceptions(self): + "cases in iterators.c" + def assign(obj, ind, val): + obj[ind] = val + + a = np.zeros([1, 2, 3]) + assert_raises(IndexError, lambda: a[0, 5, None, 2]) + assert_raises(IndexError, lambda: a[0, 5, 0, 2]) + assert_raises(IndexError, lambda: assign(a, (0, 5, None, 2), 1)) + assert_raises(IndexError, lambda: assign(a, (0, 5, 0, 2), 1)) + + a = np.zeros([1, 0, 3]) + assert_raises(IndexError, lambda: a[0, 0, None, 2]) + assert_raises(IndexError, lambda: assign(a, (0, 0, None, 2), 1)) + + a = np.zeros([1, 2, 3]) + assert_raises(IndexError, lambda: a.flat[10]) + assert_raises(IndexError, lambda: assign(a.flat, 10, 5)) + a = np.zeros([1, 0, 3]) + assert_raises(IndexError, lambda: a.flat[10]) + assert_raises(IndexError, lambda: assign(a.flat, 10, 5)) + + a = np.zeros([1, 2, 3]) + assert_raises(IndexError, lambda: a.flat[np.array(10)]) + assert_raises(IndexError, lambda: assign(a.flat, np.array(10), 5)) + a = np.zeros([1, 0, 3]) + assert_raises(IndexError, lambda: a.flat[np.array(10)]) + assert_raises(IndexError, lambda: assign(a.flat, np.array(10), 5)) + + a = np.zeros([1, 2, 3]) + assert_raises(IndexError, lambda: a.flat[np.array([10])]) + assert_raises(IndexError, lambda: assign(a.flat, np.array([10]), 5)) + a = np.zeros([1, 0, 3]) + assert_raises(IndexError, lambda: a.flat[np.array([10])]) + assert_raises(IndexError, lambda: assign(a.flat, np.array([10]), 5)) + + def test_mapping(self): + "cases from mapping.c" + + def assign(obj, ind, val): + obj[ind] = val + + a = np.zeros((0, 10)) + assert_raises(IndexError, lambda: a[12]) + + a = np.zeros((3, 5)) + assert_raises(IndexError, lambda: a[(10, 20)]) + assert_raises(IndexError, lambda: assign(a, (10, 20), 1)) + a = np.zeros((3, 0)) + assert_raises(IndexError, lambda: a[(1, 0)]) + assert_raises(IndexError, lambda: assign(a, (1, 0), 1)) + + a = np.zeros((10,)) + assert_raises(IndexError, lambda: assign(a, 10, 1)) + a = np.zeros((0,)) + assert_raises(IndexError, lambda: assign(a, 10, 1)) + + a = np.zeros((3, 5)) + assert_raises(IndexError, lambda: a[(1, [1, 20])]) + assert_raises(IndexError, lambda: assign(a, (1, [1, 20]), 1)) + a = np.zeros((3, 0)) + assert_raises(IndexError, lambda: a[(1, [0, 1])]) + assert_raises(IndexError, lambda: assign(a, (1, [0, 1]), 1)) + + def test_methods(self): + "cases from methods.c" + + a = np.zeros((3, 3)) + assert_raises(IndexError, lambda: a.item(100)) + assert_raises(IndexError, lambda: a.itemset(100, 1)) + a = np.zeros((0, 3)) + assert_raises(IndexError, lambda: a.item(100)) + assert_raises(IndexError, lambda: a.itemset(100, 1)) diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_indexerrors.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_indexerrors.pyc new file mode 100644 index 0000000..4ad649a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_indexerrors.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_indexing.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_indexing.py new file mode 100644 index 0000000..99792ce --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_indexing.py @@ -0,0 +1,1325 @@ +from __future__ import division, absolute_import, print_function + +import sys +import warnings +import functools +import operator +import pytest + +import numpy as np +from numpy.core._multiarray_tests import array_indexing +from itertools import product +from numpy.testing import ( + assert_, assert_equal, assert_raises, assert_array_equal, assert_warns, + HAS_REFCOUNT, suppress_warnings, + ) + + +class TestIndexing(object): + def test_index_no_floats(self): + a = np.array([[[5]]]) + + assert_raises(IndexError, lambda: a[0.0]) + assert_raises(IndexError, lambda: a[0, 0.0]) + assert_raises(IndexError, lambda: a[0.0, 0]) + assert_raises(IndexError, lambda: a[0.0,:]) + assert_raises(IndexError, lambda: a[:, 0.0]) + assert_raises(IndexError, lambda: a[:, 0.0,:]) + assert_raises(IndexError, lambda: a[0.0,:,:]) + assert_raises(IndexError, lambda: a[0, 0, 0.0]) + assert_raises(IndexError, lambda: a[0.0, 0, 0]) + assert_raises(IndexError, lambda: a[0, 0.0, 0]) + assert_raises(IndexError, lambda: a[-1.4]) + assert_raises(IndexError, lambda: a[0, -1.4]) + assert_raises(IndexError, lambda: a[-1.4, 0]) + assert_raises(IndexError, lambda: a[-1.4,:]) + assert_raises(IndexError, lambda: a[:, -1.4]) + assert_raises(IndexError, lambda: a[:, -1.4,:]) + assert_raises(IndexError, lambda: a[-1.4,:,:]) + assert_raises(IndexError, lambda: a[0, 0, -1.4]) + assert_raises(IndexError, lambda: a[-1.4, 0, 0]) + assert_raises(IndexError, lambda: a[0, -1.4, 0]) + assert_raises(IndexError, lambda: a[0.0:, 0.0]) + assert_raises(IndexError, lambda: a[0.0:, 0.0,:]) + + def test_slicing_no_floats(self): + a = np.array([[5]]) + + # start as float. + assert_raises(TypeError, lambda: a[0.0:]) + assert_raises(TypeError, lambda: a[0:, 0.0:2]) + assert_raises(TypeError, lambda: a[0.0::2, :0]) + assert_raises(TypeError, lambda: a[0.0:1:2,:]) + assert_raises(TypeError, lambda: a[:, 0.0:]) + # stop as float. + assert_raises(TypeError, lambda: a[:0.0]) + assert_raises(TypeError, lambda: a[:0, 1:2.0]) + assert_raises(TypeError, lambda: a[:0.0:2, :0]) + assert_raises(TypeError, lambda: a[:0.0,:]) + assert_raises(TypeError, lambda: a[:, 0:4.0:2]) + # step as float. + assert_raises(TypeError, lambda: a[::1.0]) + assert_raises(TypeError, lambda: a[0:, :2:2.0]) + assert_raises(TypeError, lambda: a[1::4.0, :0]) + assert_raises(TypeError, lambda: a[::5.0,:]) + assert_raises(TypeError, lambda: a[:, 0:4:2.0]) + # mixed. + assert_raises(TypeError, lambda: a[1.0:2:2.0]) + assert_raises(TypeError, lambda: a[1.0::2.0]) + assert_raises(TypeError, lambda: a[0:, :2.0:2.0]) + assert_raises(TypeError, lambda: a[1.0:1:4.0, :0]) + assert_raises(TypeError, lambda: a[1.0:5.0:5.0,:]) + assert_raises(TypeError, lambda: a[:, 0.4:4.0:2.0]) + # should still get the DeprecationWarning if step = 0. + assert_raises(TypeError, lambda: a[::0.0]) + + def test_index_no_array_to_index(self): + # No non-scalar arrays. + a = np.array([[[1]]]) + + assert_raises(TypeError, lambda: a[a:a:a]) + + def test_none_index(self): + # `None` index adds newaxis + a = np.array([1, 2, 3]) + assert_equal(a[None], a[np.newaxis]) + assert_equal(a[None].ndim, a.ndim + 1) + + def test_empty_tuple_index(self): + # Empty tuple index creates a view + a = np.array([1, 2, 3]) + assert_equal(a[()], a) + assert_(a[()].base is a) + a = np.array(0) + assert_(isinstance(a[()], np.int_)) + + def test_void_scalar_empty_tuple(self): + s = np.zeros((), dtype='V4') + assert_equal(s[()].dtype, s.dtype) + assert_equal(s[()], s) + assert_equal(type(s[...]), np.ndarray) + + def test_same_kind_index_casting(self): + # Indexes should be cast with same-kind and not safe, even if that + # is somewhat unsafe. So test various different code paths. + index = np.arange(5) + u_index = index.astype(np.uintp) + arr = np.arange(10) + + assert_array_equal(arr[index], arr[u_index]) + arr[u_index] = np.arange(5) + assert_array_equal(arr, np.arange(10)) + + arr = np.arange(10).reshape(5, 2) + assert_array_equal(arr[index], arr[u_index]) + + arr[u_index] = np.arange(5)[:,None] + assert_array_equal(arr, np.arange(5)[:,None].repeat(2, axis=1)) + + arr = np.arange(25).reshape(5, 5) + assert_array_equal(arr[u_index, u_index], arr[index, index]) + + def test_empty_fancy_index(self): + # Empty list index creates an empty array + # with the same dtype (but with weird shape) + a = np.array([1, 2, 3]) + assert_equal(a[[]], []) + assert_equal(a[[]].dtype, a.dtype) + + b = np.array([], dtype=np.intp) + assert_equal(a[[]], []) + assert_equal(a[[]].dtype, a.dtype) + + b = np.array([]) + assert_raises(IndexError, a.__getitem__, b) + + def test_ellipsis_index(self): + a = np.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + assert_(a[...] is not a) + assert_equal(a[...], a) + # `a[...]` was `a` in numpy <1.9. + assert_(a[...].base is a) + + # Slicing with ellipsis can skip an + # arbitrary number of dimensions + assert_equal(a[0, ...], a[0]) + assert_equal(a[0, ...], a[0,:]) + assert_equal(a[..., 0], a[:, 0]) + + # Slicing with ellipsis always results + # in an array, not a scalar + assert_equal(a[0, ..., 1], np.array(2)) + + # Assignment with `(Ellipsis,)` on 0-d arrays + b = np.array(1) + b[(Ellipsis,)] = 2 + assert_equal(b, 2) + + def test_single_int_index(self): + # Single integer index selects one row + a = np.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + + assert_equal(a[0], [1, 2, 3]) + assert_equal(a[-1], [7, 8, 9]) + + # Index out of bounds produces IndexError + assert_raises(IndexError, a.__getitem__, 1 << 30) + # Index overflow produces IndexError + assert_raises(IndexError, a.__getitem__, 1 << 64) + + def test_single_bool_index(self): + # Single boolean index + a = np.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + + assert_equal(a[np.array(True)], a[None]) + assert_equal(a[np.array(False)], a[None][0:0]) + + def test_boolean_shape_mismatch(self): + arr = np.ones((5, 4, 3)) + + index = np.array([True]) + assert_raises(IndexError, arr.__getitem__, index) + + index = np.array([False] * 6) + assert_raises(IndexError, arr.__getitem__, index) + + index = np.zeros((4, 4), dtype=bool) + assert_raises(IndexError, arr.__getitem__, index) + + assert_raises(IndexError, arr.__getitem__, (slice(None), index)) + + def test_boolean_indexing_onedim(self): + # Indexing a 2-dimensional array with + # boolean array of length one + a = np.array([[ 0., 0., 0.]]) + b = np.array([ True], dtype=bool) + assert_equal(a[b], a) + # boolean assignment + a[b] = 1. + assert_equal(a, [[1., 1., 1.]]) + + def test_boolean_assignment_value_mismatch(self): + # A boolean assignment should fail when the shape of the values + # cannot be broadcast to the subscription. (see also gh-3458) + a = np.arange(4) + + def f(a, v): + a[a > -1] = v + + assert_raises(ValueError, f, a, []) + assert_raises(ValueError, f, a, [1, 2, 3]) + assert_raises(ValueError, f, a[:1], [1, 2, 3]) + + def test_boolean_assignment_needs_api(self): + # See also gh-7666 + # This caused a segfault on Python 2 due to the GIL not being + # held when the iterator does not need it, but the transfer function + # does + arr = np.zeros(1000) + indx = np.zeros(1000, dtype=bool) + indx[:100] = True + arr[indx] = np.ones(100, dtype=object) + + expected = np.zeros(1000) + expected[:100] = 1 + assert_array_equal(arr, expected) + + def test_boolean_indexing_twodim(self): + # Indexing a 2-dimensional array with + # 2-dimensional boolean array + a = np.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + b = np.array([[ True, False, True], + [False, True, False], + [ True, False, True]]) + assert_equal(a[b], [1, 3, 5, 7, 9]) + assert_equal(a[b[1]], [[4, 5, 6]]) + assert_equal(a[b[0]], a[b[2]]) + + # boolean assignment + a[b] = 0 + assert_equal(a, [[0, 2, 0], + [4, 0, 6], + [0, 8, 0]]) + + def test_reverse_strides_and_subspace_bufferinit(self): + # This tests that the strides are not reversed for simple and + # subspace fancy indexing. + a = np.ones(5) + b = np.zeros(5, dtype=np.intp)[::-1] + c = np.arange(5)[::-1] + + a[b] = c + # If the strides are not reversed, the 0 in the arange comes last. + assert_equal(a[0], 0) + + # This also tests that the subspace buffer is initialized: + a = np.ones((5, 2)) + c = np.arange(10).reshape(5, 2)[::-1] + a[b, :] = c + assert_equal(a[0], [0, 1]) + + def test_reversed_strides_result_allocation(self): + # Test a bug when calculating the output strides for a result array + # when the subspace size was 1 (and test other cases as well) + a = np.arange(10)[:, None] + i = np.arange(10)[::-1] + assert_array_equal(a[i], a[i.copy('C')]) + + a = np.arange(20).reshape(-1, 2) + + def test_uncontiguous_subspace_assignment(self): + # During development there was a bug activating a skip logic + # based on ndim instead of size. + a = np.full((3, 4, 2), -1) + b = np.full((3, 4, 2), -1) + + a[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T + b[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T.copy() + + assert_equal(a, b) + + def test_too_many_fancy_indices_special_case(self): + # Just documents behaviour, this is a small limitation. + a = np.ones((1,) * 32) # 32 is NPY_MAXDIMS + assert_raises(IndexError, a.__getitem__, (np.array([0]),) * 32) + + def test_scalar_array_bool(self): + # NumPy bools can be used as boolean index (python ones as of yet not) + a = np.array(1) + assert_equal(a[np.bool_(True)], a[np.array(True)]) + assert_equal(a[np.bool_(False)], a[np.array(False)]) + + # After deprecating bools as integers: + #a = np.array([0,1,2]) + #assert_equal(a[True, :], a[None, :]) + #assert_equal(a[:, True], a[:, None]) + # + #assert_(not np.may_share_memory(a, a[True, :])) + + def test_everything_returns_views(self): + # Before `...` would return a itself. + a = np.arange(5) + + assert_(a is not a[()]) + assert_(a is not a[...]) + assert_(a is not a[:]) + + def test_broaderrors_indexing(self): + a = np.zeros((5, 5)) + assert_raises(IndexError, a.__getitem__, ([0, 1], [0, 1, 2])) + assert_raises(IndexError, a.__setitem__, ([0, 1], [0, 1, 2]), 0) + + def test_trivial_fancy_out_of_bounds(self): + a = np.zeros(5) + ind = np.ones(20, dtype=np.intp) + ind[-1] = 10 + assert_raises(IndexError, a.__getitem__, ind) + assert_raises(IndexError, a.__setitem__, ind, 0) + ind = np.ones(20, dtype=np.intp) + ind[0] = 11 + assert_raises(IndexError, a.__getitem__, ind) + assert_raises(IndexError, a.__setitem__, ind, 0) + + def test_trivial_fancy_not_possible(self): + # Test that the fast path for trivial assignment is not incorrectly + # used when the index is not contiguous or 1D, see also gh-11467. + a = np.arange(6) + idx = np.arange(6, dtype=np.intp).reshape(2, 1, 3)[:, :, 0] + assert_array_equal(a[idx], idx) + + # this case must not go into the fast path, note that idx is + # a non-contiuguous none 1D array here. + a[idx] = -1 + res = np.arange(6) + res[0] = -1 + res[3] = -1 + assert_array_equal(a, res) + + def test_nonbaseclass_values(self): + class SubClass(np.ndarray): + def __array_finalize__(self, old): + # Have array finalize do funny things + self.fill(99) + + a = np.zeros((5, 5)) + s = a.copy().view(type=SubClass) + s.fill(1) + + a[[0, 1, 2, 3, 4], :] = s + assert_((a == 1).all()) + + # Subspace is last, so transposing might want to finalize + a[:, [0, 1, 2, 3, 4]] = s + assert_((a == 1).all()) + + a.fill(0) + a[...] = s + assert_((a == 1).all()) + + def test_subclass_writeable(self): + d = np.rec.array([('NGC1001', 11), ('NGC1002', 1.), ('NGC1003', 1.)], + dtype=[('target', 'S20'), ('V_mag', '>f4')]) + ind = np.array([False, True, True], dtype=bool) + assert_(d[ind].flags.writeable) + ind = np.array([0, 1]) + assert_(d[ind].flags.writeable) + assert_(d[...].flags.writeable) + assert_(d[0].flags.writeable) + + def test_memory_order(self): + # This is not necessary to preserve. Memory layouts for + # more complex indices are not as simple. + a = np.arange(10) + b = np.arange(10).reshape(5,2).T + assert_(a[b].flags.f_contiguous) + + # Takes a different implementation branch: + a = a.reshape(-1, 1) + assert_(a[b, 0].flags.f_contiguous) + + def test_scalar_return_type(self): + # Full scalar indices should return scalars and object + # arrays should not call PyArray_Return on their items + class Zero(object): + # The most basic valid indexing + def __index__(self): + return 0 + + z = Zero() + + class ArrayLike(object): + # Simple array, should behave like the array + def __array__(self): + return np.array(0) + + a = np.zeros(()) + assert_(isinstance(a[()], np.float_)) + a = np.zeros(1) + assert_(isinstance(a[z], np.float_)) + a = np.zeros((1, 1)) + assert_(isinstance(a[z, np.array(0)], np.float_)) + assert_(isinstance(a[z, ArrayLike()], np.float_)) + + # And object arrays do not call it too often: + b = np.array(0) + a = np.array(0, dtype=object) + a[()] = b + assert_(isinstance(a[()], np.ndarray)) + a = np.array([b, None]) + assert_(isinstance(a[z], np.ndarray)) + a = np.array([[b, None]]) + assert_(isinstance(a[z, np.array(0)], np.ndarray)) + assert_(isinstance(a[z, ArrayLike()], np.ndarray)) + + def test_small_regressions(self): + # Reference count of intp for index checks + a = np.array([0]) + if HAS_REFCOUNT: + refcount = sys.getrefcount(np.dtype(np.intp)) + # item setting always checks indices in separate function: + a[np.array([0], dtype=np.intp)] = 1 + a[np.array([0], dtype=np.uint8)] = 1 + assert_raises(IndexError, a.__setitem__, + np.array([1], dtype=np.intp), 1) + assert_raises(IndexError, a.__setitem__, + np.array([1], dtype=np.uint8), 1) + + if HAS_REFCOUNT: + assert_equal(sys.getrefcount(np.dtype(np.intp)), refcount) + + def test_unaligned(self): + v = (np.zeros(64, dtype=np.int8) + ord('a'))[1:-7] + d = v.view(np.dtype("S8")) + # unaligned source + x = (np.zeros(16, dtype=np.int8) + ord('a'))[1:-7] + x = x.view(np.dtype("S8")) + x[...] = np.array("b" * 8, dtype="S") + b = np.arange(d.size) + #trivial + assert_equal(d[b], d) + d[b] = x + # nontrivial + # unaligned index array + b = np.zeros(d.size + 1).view(np.int8)[1:-(np.intp(0).itemsize - 1)] + b = b.view(np.intp)[:d.size] + b[...] = np.arange(d.size) + assert_equal(d[b.astype(np.int16)], d) + d[b.astype(np.int16)] = x + # boolean + d[b % 2 == 0] + d[b % 2 == 0] = x[::2] + + def test_tuple_subclass(self): + arr = np.ones((5, 5)) + + # A tuple subclass should also be an nd-index + class TupleSubclass(tuple): + pass + index = ([1], [1]) + index = TupleSubclass(index) + assert_(arr[index].shape == (1,)) + # Unlike the non nd-index: + assert_(arr[index,].shape != (1,)) + + def test_broken_sequence_not_nd_index(self): + # See gh-5063: + # If we have an object which claims to be a sequence, but fails + # on item getting, this should not be converted to an nd-index (tuple) + # If this object happens to be a valid index otherwise, it should work + # This object here is very dubious and probably bad though: + class SequenceLike(object): + def __index__(self): + return 0 + + def __len__(self): + return 1 + + def __getitem__(self, item): + raise IndexError('Not possible') + + arr = np.arange(10) + assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),]) + + # also test that field indexing does not segfault + # for a similar reason, by indexing a structured array + arr = np.zeros((1,), dtype=[('f1', 'i8'), ('f2', 'i8')]) + assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),]) + + def test_indexing_array_weird_strides(self): + # See also gh-6221 + # the shapes used here come from the issue and create the correct + # size for the iterator buffering size. + x = np.ones(10) + x2 = np.ones((10, 2)) + ind = np.arange(10)[:, None, None, None] + ind = np.broadcast_to(ind, (10, 55, 4, 4)) + + # single advanced index case + assert_array_equal(x[ind], x[ind.copy()]) + # higher dimensional advanced index + zind = np.zeros(4, dtype=np.intp) + assert_array_equal(x2[ind, zind], x2[ind.copy(), zind]) + + def test_indexing_array_negative_strides(self): + # From gh-8264, + # core dumps if negative strides are used in iteration + arro = np.zeros((4, 4)) + arr = arro[::-1, ::-1] + + slices = (slice(None), [0, 1, 2, 3]) + arr[slices] = 10 + assert_array_equal(arr, 10.) + +class TestFieldIndexing(object): + def test_scalar_return_type(self): + # Field access on an array should return an array, even if it + # is 0-d. + a = np.zeros((), [('a','f8')]) + assert_(isinstance(a['a'], np.ndarray)) + assert_(isinstance(a[['a']], np.ndarray)) + + +class TestBroadcastedAssignments(object): + def assign(self, a, ind, val): + a[ind] = val + return a + + def test_prepending_ones(self): + a = np.zeros((3, 2)) + + a[...] = np.ones((1, 3, 2)) + # Fancy with subspace with and without transpose + a[[0, 1, 2], :] = np.ones((1, 3, 2)) + a[:, [0, 1]] = np.ones((1, 3, 2)) + # Fancy without subspace (with broadcasting) + a[[[0], [1], [2]], [0, 1]] = np.ones((1, 3, 2)) + + def test_prepend_not_one(self): + assign = self.assign + s_ = np.s_ + a = np.zeros(5) + + # Too large and not only ones. + assert_raises(ValueError, assign, a, s_[...], np.ones((2, 1))) + assert_raises(ValueError, assign, a, s_[[1, 2, 3],], np.ones((2, 1))) + assert_raises(ValueError, assign, a, s_[[[1], [2]],], np.ones((2,2,1))) + + def test_simple_broadcasting_errors(self): + assign = self.assign + s_ = np.s_ + a = np.zeros((5, 1)) + + assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 2))) + assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 0))) + assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 2))) + assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 0))) + assert_raises(ValueError, assign, a, s_[[0], :], np.zeros((2, 1))) + + def test_index_is_larger(self): + # Simple case of fancy index broadcasting of the index. + a = np.zeros((5, 5)) + a[[[0], [1], [2]], [0, 1, 2]] = [2, 3, 4] + + assert_((a[:3, :3] == [2, 3, 4]).all()) + + def test_broadcast_subspace(self): + a = np.zeros((100, 100)) + v = np.arange(100)[:,None] + b = np.arange(100)[::-1] + a[b] = v + assert_((a[::-1] == v).all()) + + +class TestSubclasses(object): + def test_basic(self): + # Test that indexing in various ways produces SubClass instances, + # and that the base is set up correctly: the original subclass + # instance for views, and a new ndarray for advanced/boolean indexing + # where a copy was made (latter a regression test for gh-11983). + class SubClass(np.ndarray): + pass + + a = np.arange(5) + s = a.view(SubClass) + s_slice = s[:3] + assert_(type(s_slice) is SubClass) + assert_(s_slice.base is s) + assert_array_equal(s_slice, a[:3]) + + s_fancy = s[[0, 1, 2]] + assert_(type(s_fancy) is SubClass) + assert_(s_fancy.base is not s) + assert_(type(s_fancy.base) is np.ndarray) + assert_array_equal(s_fancy, a[[0, 1, 2]]) + assert_array_equal(s_fancy.base, a[[0, 1, 2]]) + + s_bool = s[s > 0] + assert_(type(s_bool) is SubClass) + assert_(s_bool.base is not s) + assert_(type(s_bool.base) is np.ndarray) + assert_array_equal(s_bool, a[a > 0]) + assert_array_equal(s_bool.base, a[a > 0]) + + def test_finalize_gets_full_info(self): + # Array finalize should be called on the filled array. + class SubClass(np.ndarray): + def __array_finalize__(self, old): + self.finalize_status = np.array(self) + self.old = old + + s = np.arange(10).view(SubClass) + new_s = s[:3] + assert_array_equal(new_s.finalize_status, new_s) + assert_array_equal(new_s.old, s) + + new_s = s[[0,1,2,3]] + assert_array_equal(new_s.finalize_status, new_s) + assert_array_equal(new_s.old, s) + + new_s = s[s > 0] + assert_array_equal(new_s.finalize_status, new_s) + assert_array_equal(new_s.old, s) + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_slice_decref_getsetslice(self): + # See gh-10066, a temporary slice object should be discarted. + # This test is only really interesting on Python 2 since + # it goes through `__set/getslice__` here and can probably be + # removed. Use 0:7 to make sure it is never None:7. + class KeepIndexObject(np.ndarray): + def __getitem__(self, indx): + self.indx = indx + if indx == slice(0, 7): + raise ValueError + + def __setitem__(self, indx, val): + self.indx = indx + if indx == slice(0, 4): + raise ValueError + + k = np.array([1]).view(KeepIndexObject) + k[0:5] + assert_equal(k.indx, slice(0, 5)) + assert_equal(sys.getrefcount(k.indx), 2) + try: + k[0:7] + raise AssertionError + except ValueError: + # The exception holds a reference to the slice so clear on Py2 + if hasattr(sys, 'exc_clear'): + with suppress_warnings() as sup: + sup.filter(DeprecationWarning) + sys.exc_clear() + assert_equal(k.indx, slice(0, 7)) + assert_equal(sys.getrefcount(k.indx), 2) + + k[0:3] = 6 + assert_equal(k.indx, slice(0, 3)) + assert_equal(sys.getrefcount(k.indx), 2) + try: + k[0:4] = 2 + raise AssertionError + except ValueError: + # The exception holds a reference to the slice so clear on Py2 + if hasattr(sys, 'exc_clear'): + with suppress_warnings() as sup: + sup.filter(DeprecationWarning) + sys.exc_clear() + assert_equal(k.indx, slice(0, 4)) + assert_equal(sys.getrefcount(k.indx), 2) + + +class TestFancyIndexingCast(object): + def test_boolean_index_cast_assign(self): + # Setup the boolean index and float arrays. + shape = (8, 63) + bool_index = np.zeros(shape).astype(bool) + bool_index[0, 1] = True + zero_array = np.zeros(shape) + + # Assigning float is fine. + zero_array[bool_index] = np.array([1]) + assert_equal(zero_array[0, 1], 1) + + # Fancy indexing works, although we get a cast warning. + assert_warns(np.ComplexWarning, + zero_array.__setitem__, ([0], [1]), np.array([2 + 1j])) + assert_equal(zero_array[0, 1], 2) # No complex part + + # Cast complex to float, throwing away the imaginary portion. + assert_warns(np.ComplexWarning, + zero_array.__setitem__, bool_index, np.array([1j])) + assert_equal(zero_array[0, 1], 0) + +class TestFancyIndexingEquivalence(object): + def test_object_assign(self): + # Check that the field and object special case using copyto is active. + # The right hand side cannot be converted to an array here. + a = np.arange(5, dtype=object) + b = a.copy() + a[:3] = [1, (1,2), 3] + b[[0, 1, 2]] = [1, (1,2), 3] + assert_array_equal(a, b) + + # test same for subspace fancy indexing + b = np.arange(5, dtype=object)[None, :] + b[[0], :3] = [[1, (1,2), 3]] + assert_array_equal(a, b[0]) + + # Check that swapping of axes works. + # There was a bug that made the later assignment throw a ValueError + # do to an incorrectly transposed temporary right hand side (gh-5714) + b = b.T + b[:3, [0]] = [[1], [(1,2)], [3]] + assert_array_equal(a, b[:, 0]) + + # Another test for the memory order of the subspace + arr = np.ones((3, 4, 5), dtype=object) + # Equivalent slicing assignment for comparison + cmp_arr = arr.copy() + cmp_arr[:1, ...] = [[[1], [2], [3], [4]]] + arr[[0], ...] = [[[1], [2], [3], [4]]] + assert_array_equal(arr, cmp_arr) + arr = arr.copy('F') + arr[[0], ...] = [[[1], [2], [3], [4]]] + assert_array_equal(arr, cmp_arr) + + def test_cast_equivalence(self): + # Yes, normal slicing uses unsafe casting. + a = np.arange(5) + b = a.copy() + + a[:3] = np.array(['2', '-3', '-1']) + b[[0, 2, 1]] = np.array(['2', '-1', '-3']) + assert_array_equal(a, b) + + # test the same for subspace fancy indexing + b = np.arange(5)[None, :] + b[[0], :3] = np.array([['2', '-3', '-1']]) + assert_array_equal(a, b[0]) + + +class TestMultiIndexingAutomated(object): + """ + These tests use code to mimic the C-Code indexing for selection. + + NOTE: + + * This still lacks tests for complex item setting. + * If you change behavior of indexing, you might want to modify + these tests to try more combinations. + * Behavior was written to match numpy version 1.8. (though a + first version matched 1.7.) + * Only tuple indices are supported by the mimicking code. + (and tested as of writing this) + * Error types should match most of the time as long as there + is only one error. For multiple errors, what gets raised + will usually not be the same one. They are *not* tested. + + Update 2016-11-30: It is probably not worth maintaining this test + indefinitely and it can be dropped if maintenance becomes a burden. + + """ + + def setup(self): + self.a = np.arange(np.prod([3, 1, 5, 6])).reshape(3, 1, 5, 6) + self.b = np.empty((3, 0, 5, 6)) + self.complex_indices = ['skip', Ellipsis, + 0, + # Boolean indices, up to 3-d for some special cases of eating up + # dimensions, also need to test all False + np.array([True, False, False]), + np.array([[True, False], [False, True]]), + np.array([[[False, False], [False, False]]]), + # Some slices: + slice(-5, 5, 2), + slice(1, 1, 100), + slice(4, -1, -2), + slice(None, None, -3), + # Some Fancy indexes: + np.empty((0, 1, 1), dtype=np.intp), # empty and can be broadcast + np.array([0, 1, -2]), + np.array([[2], [0], [1]]), + np.array([[0, -1], [0, 1]], dtype=np.dtype('intp').newbyteorder()), + np.array([2, -1], dtype=np.int8), + np.zeros([1]*31, dtype=int), # trigger too large array. + np.array([0., 1.])] # invalid datatype + # Some simpler indices that still cover a bit more + self.simple_indices = [Ellipsis, None, -1, [1], np.array([True]), + 'skip'] + # Very simple ones to fill the rest: + self.fill_indices = [slice(None, None), 0] + + def _get_multi_index(self, arr, indices): + """Mimic multi dimensional indexing. + + Parameters + ---------- + arr : ndarray + Array to be indexed. + indices : tuple of index objects + + Returns + ------- + out : ndarray + An array equivalent to the indexing operation (but always a copy). + `arr[indices]` should be identical. + no_copy : bool + Whether the indexing operation requires a copy. If this is `True`, + `np.may_share_memory(arr, arr[indices])` should be `True` (with + some exceptions for scalars and possibly 0-d arrays). + + Notes + ----- + While the function may mostly match the errors of normal indexing this + is generally not the case. + """ + in_indices = list(indices) + indices = [] + # if False, this is a fancy or boolean index + no_copy = True + # number of fancy/scalar indexes that are not consecutive + num_fancy = 0 + # number of dimensions indexed by a "fancy" index + fancy_dim = 0 + # NOTE: This is a funny twist (and probably OK to change). + # The boolean array has illegal indexes, but this is + # allowed if the broadcast fancy-indices are 0-sized. + # This variable is to catch that case. + error_unless_broadcast_to_empty = False + + # We need to handle Ellipsis and make arrays from indices, also + # check if this is fancy indexing (set no_copy). + ndim = 0 + ellipsis_pos = None # define here mostly to replace all but first. + for i, indx in enumerate(in_indices): + if indx is None: + continue + if isinstance(indx, np.ndarray) and indx.dtype == bool: + no_copy = False + if indx.ndim == 0: + raise IndexError + # boolean indices can have higher dimensions + ndim += indx.ndim + fancy_dim += indx.ndim + continue + if indx is Ellipsis: + if ellipsis_pos is None: + ellipsis_pos = i + continue # do not increment ndim counter + raise IndexError + if isinstance(indx, slice): + ndim += 1 + continue + if not isinstance(indx, np.ndarray): + # This could be open for changes in numpy. + # numpy should maybe raise an error if casting to intp + # is not safe. It rejects np.array([1., 2.]) but not + # [1., 2.] as index (same for ie. np.take). + # (Note the importance of empty lists if changing this here) + try: + indx = np.array(indx, dtype=np.intp) + except ValueError: + raise IndexError + in_indices[i] = indx + elif indx.dtype.kind != 'b' and indx.dtype.kind != 'i': + raise IndexError('arrays used as indices must be of ' + 'integer (or boolean) type') + if indx.ndim != 0: + no_copy = False + ndim += 1 + fancy_dim += 1 + + if arr.ndim - ndim < 0: + # we can't take more dimensions then we have, not even for 0-d + # arrays. since a[()] makes sense, but not a[(),]. We will + # raise an error later on, unless a broadcasting error occurs + # first. + raise IndexError + + if ndim == 0 and None not in in_indices: + # Well we have no indexes or one Ellipsis. This is legal. + return arr.copy(), no_copy + + if ellipsis_pos is not None: + in_indices[ellipsis_pos:ellipsis_pos+1] = ([slice(None, None)] * + (arr.ndim - ndim)) + + for ax, indx in enumerate(in_indices): + if isinstance(indx, slice): + # convert to an index array + indx = np.arange(*indx.indices(arr.shape[ax])) + indices.append(['s', indx]) + continue + elif indx is None: + # this is like taking a slice with one element from a new axis: + indices.append(['n', np.array([0], dtype=np.intp)]) + arr = arr.reshape((arr.shape[:ax] + (1,) + arr.shape[ax:])) + continue + if isinstance(indx, np.ndarray) and indx.dtype == bool: + if indx.shape != arr.shape[ax:ax+indx.ndim]: + raise IndexError + + try: + flat_indx = np.ravel_multi_index(np.nonzero(indx), + arr.shape[ax:ax+indx.ndim], mode='raise') + except Exception: + error_unless_broadcast_to_empty = True + # fill with 0s instead, and raise error later + flat_indx = np.array([0]*indx.sum(), dtype=np.intp) + # concatenate axis into a single one: + if indx.ndim != 0: + arr = arr.reshape((arr.shape[:ax] + + (np.prod(arr.shape[ax:ax+indx.ndim]),) + + arr.shape[ax+indx.ndim:])) + indx = flat_indx + else: + # This could be changed, a 0-d boolean index can + # make sense (even outside the 0-d indexed array case) + # Note that originally this is could be interpreted as + # integer in the full integer special case. + raise IndexError + else: + # If the index is a singleton, the bounds check is done + # before the broadcasting. This used to be different in <1.9 + if indx.ndim == 0: + if indx >= arr.shape[ax] or indx < -arr.shape[ax]: + raise IndexError + if indx.ndim == 0: + # The index is a scalar. This used to be two fold, but if + # fancy indexing was active, the check was done later, + # possibly after broadcasting it away (1.7. or earlier). + # Now it is always done. + if indx >= arr.shape[ax] or indx < - arr.shape[ax]: + raise IndexError + if (len(indices) > 0 and + indices[-1][0] == 'f' and + ax != ellipsis_pos): + # NOTE: There could still have been a 0-sized Ellipsis + # between them. Checked that with ellipsis_pos. + indices[-1].append(indx) + else: + # We have a fancy index that is not after an existing one. + # NOTE: A 0-d array triggers this as well, while one may + # expect it to not trigger it, since a scalar would not be + # considered fancy indexing. + num_fancy += 1 + indices.append(['f', indx]) + + if num_fancy > 1 and not no_copy: + # We have to flush the fancy indexes left + new_indices = indices[:] + axes = list(range(arr.ndim)) + fancy_axes = [] + new_indices.insert(0, ['f']) + ni = 0 + ai = 0 + for indx in indices: + ni += 1 + if indx[0] == 'f': + new_indices[0].extend(indx[1:]) + del new_indices[ni] + ni -= 1 + for ax in range(ai, ai + len(indx[1:])): + fancy_axes.append(ax) + axes.remove(ax) + ai += len(indx) - 1 # axis we are at + indices = new_indices + # and now we need to transpose arr: + arr = arr.transpose(*(fancy_axes + axes)) + + # We only have one 'f' index now and arr is transposed accordingly. + # Now handle newaxis by reshaping... + ax = 0 + for indx in indices: + if indx[0] == 'f': + if len(indx) == 1: + continue + # First of all, reshape arr to combine fancy axes into one: + orig_shape = arr.shape + orig_slice = orig_shape[ax:ax + len(indx[1:])] + arr = arr.reshape((arr.shape[:ax] + + (np.prod(orig_slice).astype(int),) + + arr.shape[ax + len(indx[1:]):])) + + # Check if broadcasting works + res = np.broadcast(*indx[1:]) + # unfortunately the indices might be out of bounds. So check + # that first, and use mode='wrap' then. However only if + # there are any indices... + if res.size != 0: + if error_unless_broadcast_to_empty: + raise IndexError + for _indx, _size in zip(indx[1:], orig_slice): + if _indx.size == 0: + continue + if np.any(_indx >= _size) or np.any(_indx < -_size): + raise IndexError + if len(indx[1:]) == len(orig_slice): + if np.product(orig_slice) == 0: + # Work around for a crash or IndexError with 'wrap' + # in some 0-sized cases. + try: + mi = np.ravel_multi_index(indx[1:], orig_slice, + mode='raise') + except Exception: + # This happens with 0-sized orig_slice (sometimes?) + # here it is a ValueError, but indexing gives a: + raise IndexError('invalid index into 0-sized') + else: + mi = np.ravel_multi_index(indx[1:], orig_slice, + mode='wrap') + else: + # Maybe never happens... + raise ValueError + arr = arr.take(mi.ravel(), axis=ax) + try: + arr = arr.reshape((arr.shape[:ax] + + mi.shape + + arr.shape[ax+1:])) + except ValueError: + # too many dimensions, probably + raise IndexError + ax += mi.ndim + continue + + # If we are here, we have a 1D array for take: + arr = arr.take(indx[1], axis=ax) + ax += 1 + + return arr, no_copy + + def _check_multi_index(self, arr, index): + """Check a multi index item getting and simple setting. + + Parameters + ---------- + arr : ndarray + Array to be indexed, must be a reshaped arange. + index : tuple of indexing objects + Index being tested. + """ + # Test item getting + try: + mimic_get, no_copy = self._get_multi_index(arr, index) + except Exception as e: + if HAS_REFCOUNT: + prev_refcount = sys.getrefcount(arr) + assert_raises(type(e), arr.__getitem__, index) + assert_raises(type(e), arr.__setitem__, index, 0) + if HAS_REFCOUNT: + assert_equal(prev_refcount, sys.getrefcount(arr)) + return + + self._compare_index_result(arr, index, mimic_get, no_copy) + + def _check_single_index(self, arr, index): + """Check a single index item getting and simple setting. + + Parameters + ---------- + arr : ndarray + Array to be indexed, must be an arange. + index : indexing object + Index being tested. Must be a single index and not a tuple + of indexing objects (see also `_check_multi_index`). + """ + try: + mimic_get, no_copy = self._get_multi_index(arr, (index,)) + except Exception as e: + if HAS_REFCOUNT: + prev_refcount = sys.getrefcount(arr) + assert_raises(type(e), arr.__getitem__, index) + assert_raises(type(e), arr.__setitem__, index, 0) + if HAS_REFCOUNT: + assert_equal(prev_refcount, sys.getrefcount(arr)) + return + + self._compare_index_result(arr, index, mimic_get, no_copy) + + def _compare_index_result(self, arr, index, mimic_get, no_copy): + """Compare mimicked result to indexing result. + """ + arr = arr.copy() + indexed_arr = arr[index] + assert_array_equal(indexed_arr, mimic_get) + # Check if we got a view, unless its a 0-sized or 0-d array. + # (then its not a view, and that does not matter) + if indexed_arr.size != 0 and indexed_arr.ndim != 0: + assert_(np.may_share_memory(indexed_arr, arr) == no_copy) + # Check reference count of the original array + if HAS_REFCOUNT: + if no_copy: + # refcount increases by one: + assert_equal(sys.getrefcount(arr), 3) + else: + assert_equal(sys.getrefcount(arr), 2) + + # Test non-broadcast setitem: + b = arr.copy() + b[index] = mimic_get + 1000 + if b.size == 0: + return # nothing to compare here... + if no_copy and indexed_arr.ndim != 0: + # change indexed_arr in-place to manipulate original: + indexed_arr += 1000 + assert_array_equal(arr, b) + return + # Use the fact that the array is originally an arange: + arr.flat[indexed_arr.ravel()] += 1000 + assert_array_equal(arr, b) + + def test_boolean(self): + a = np.array(5) + assert_equal(a[np.array(True)], 5) + a[np.array(True)] = 1 + assert_equal(a, 1) + # NOTE: This is different from normal broadcasting, as + # arr[boolean_array] works like in a multi index. Which means + # it is aligned to the left. This is probably correct for + # consistency with arr[boolean_array,] also no broadcasting + # is done at all + self._check_multi_index( + self.a, (np.zeros_like(self.a, dtype=bool),)) + self._check_multi_index( + self.a, (np.zeros_like(self.a, dtype=bool)[..., 0],)) + self._check_multi_index( + self.a, (np.zeros_like(self.a, dtype=bool)[None, ...],)) + + def test_multidim(self): + # Automatically test combinations with complex indexes on 2nd (or 1st) + # spot and the simple ones in one other spot. + with warnings.catch_warnings(): + # This is so that np.array(True) is not accepted in a full integer + # index, when running the file separately. + warnings.filterwarnings('error', '', DeprecationWarning) + warnings.filterwarnings('error', '', np.VisibleDeprecationWarning) + + def isskip(idx): + return isinstance(idx, str) and idx == "skip" + + for simple_pos in [0, 2, 3]: + tocheck = [self.fill_indices, self.complex_indices, + self.fill_indices, self.fill_indices] + tocheck[simple_pos] = self.simple_indices + for index in product(*tocheck): + index = tuple(i for i in index if not isskip(i)) + self._check_multi_index(self.a, index) + self._check_multi_index(self.b, index) + + # Check very simple item getting: + self._check_multi_index(self.a, (0, 0, 0, 0)) + self._check_multi_index(self.b, (0, 0, 0, 0)) + # Also check (simple cases of) too many indices: + assert_raises(IndexError, self.a.__getitem__, (0, 0, 0, 0, 0)) + assert_raises(IndexError, self.a.__setitem__, (0, 0, 0, 0, 0), 0) + assert_raises(IndexError, self.a.__getitem__, (0, 0, [1], 0, 0)) + assert_raises(IndexError, self.a.__setitem__, (0, 0, [1], 0, 0), 0) + + def test_1d(self): + a = np.arange(10) + for index in self.complex_indices: + self._check_single_index(a, index) + +class TestFloatNonIntegerArgument(object): + """ + These test that ``TypeError`` is raised when you try to use + non-integers as arguments to for indexing and slicing e.g. ``a[0.0:5]`` + and ``a[0.5]``, or other functions like ``array.reshape(1., -1)``. + + """ + def test_valid_indexing(self): + # These should raise no errors. + a = np.array([[[5]]]) + + a[np.array([0])] + a[[0, 0]] + a[:, [0, 0]] + a[:, 0,:] + a[:,:,:] + + def test_valid_slicing(self): + # These should raise no errors. + a = np.array([[[5]]]) + + a[::] + a[0:] + a[:2] + a[0:2] + a[::2] + a[1::2] + a[:2:2] + a[1:2:2] + + def test_non_integer_argument_errors(self): + a = np.array([[5]]) + + assert_raises(TypeError, np.reshape, a, (1., 1., -1)) + assert_raises(TypeError, np.reshape, a, (np.array(1.), -1)) + assert_raises(TypeError, np.take, a, [0], 1.) + assert_raises(TypeError, np.take, a, [0], np.float64(1.)) + + def test_non_integer_sequence_multiplication(self): + # NumPy scalar sequence multiply should not work with non-integers + def mult(a, b): + return a * b + + assert_raises(TypeError, mult, [1], np.float_(3)) + # following should be OK + mult([1], np.int_(3)) + + def test_reduce_axis_float_index(self): + d = np.zeros((3,3,3)) + assert_raises(TypeError, np.min, d, 0.5) + assert_raises(TypeError, np.min, d, (0.5, 1)) + assert_raises(TypeError, np.min, d, (1, 2.2)) + assert_raises(TypeError, np.min, d, (.2, 1.2)) + + +class TestBooleanIndexing(object): + # Using a boolean as integer argument/indexing is an error. + def test_bool_as_int_argument_errors(self): + a = np.array([[[1]]]) + + assert_raises(TypeError, np.reshape, a, (True, -1)) + assert_raises(TypeError, np.reshape, a, (np.bool_(True), -1)) + # Note that operator.index(np.array(True)) does not work, a boolean + # array is thus also deprecated, but not with the same message: + assert_raises(TypeError, operator.index, np.array(True)) + assert_warns(DeprecationWarning, operator.index, np.True_) + assert_raises(TypeError, np.take, args=(a, [0], False)) + + def test_boolean_indexing_weirdness(self): + # Weird boolean indexing things + a = np.ones((2, 3, 4)) + a[False, True, ...].shape == (0, 2, 3, 4) + a[True, [0, 1], True, True, [1], [[2]]] == (1, 2) + assert_raises(IndexError, lambda: a[False, [0, 1], ...]) + + +class TestArrayToIndexDeprecation(object): + """Creating an an index from array not 0-D is an error. + + """ + def test_array_to_index_error(self): + # so no exception is expected. The raising is effectively tested above. + a = np.array([[[1]]]) + + assert_raises(TypeError, operator.index, np.array([1])) + assert_raises(TypeError, np.reshape, a, (a, -1)) + assert_raises(TypeError, np.take, a, [0], a) + + +class TestNonIntegerArrayLike(object): + """Tests that array_likes only valid if can safely cast to integer. + + For instance, lists give IndexError when they cannot be safely cast to + an integer. + + """ + def test_basic(self): + a = np.arange(10) + + assert_raises(IndexError, a.__getitem__, [0.5, 1.5]) + assert_raises(IndexError, a.__getitem__, (['1', '2'],)) + + # The following is valid + a.__getitem__([]) + + +class TestMultipleEllipsisError(object): + """An index can only have a single ellipsis. + + """ + def test_basic(self): + a = np.arange(10) + assert_raises(IndexError, lambda: a[..., ...]) + assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 2,)) + assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 3,)) + + +class TestCApiAccess(object): + def test_getitem(self): + subscript = functools.partial(array_indexing, 0) + + # 0-d arrays don't work: + assert_raises(IndexError, subscript, np.ones(()), 0) + # Out of bound values: + assert_raises(IndexError, subscript, np.ones(10), 11) + assert_raises(IndexError, subscript, np.ones(10), -11) + assert_raises(IndexError, subscript, np.ones((10, 10)), 11) + assert_raises(IndexError, subscript, np.ones((10, 10)), -11) + + a = np.arange(10) + assert_array_equal(a[4], subscript(a, 4)) + a = a.reshape(5, 2) + assert_array_equal(a[-4], subscript(a, -4)) + + def test_setitem(self): + assign = functools.partial(array_indexing, 1) + + # Deletion is impossible: + assert_raises(ValueError, assign, np.ones(10), 0) + # 0-d arrays don't work: + assert_raises(IndexError, assign, np.ones(()), 0, 0) + # Out of bound values: + assert_raises(IndexError, assign, np.ones(10), 11, 0) + assert_raises(IndexError, assign, np.ones(10), -11, 0) + assert_raises(IndexError, assign, np.ones((10, 10)), 11, 0) + assert_raises(IndexError, assign, np.ones((10, 10)), -11, 0) + + a = np.arange(10) + assign(a, 4, 10) + assert_(a[4] == 10) + + a = a.reshape(5, 2) + assign(a, 4, 10) + assert_array_equal(a[-1], [10, 10]) diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_indexing.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_indexing.pyc new file mode 100644 index 0000000..2c6bb27 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_indexing.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_item_selection.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_item_selection.py new file mode 100644 index 0000000..3bc24fc --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_item_selection.py @@ -0,0 +1,87 @@ +from __future__ import division, absolute_import, print_function + +import sys + +import numpy as np +from numpy.testing import ( + assert_, assert_raises, assert_array_equal, HAS_REFCOUNT + ) + + +class TestTake(object): + def test_simple(self): + a = [[1, 2], [3, 4]] + a_str = [[b'1', b'2'], [b'3', b'4']] + modes = ['raise', 'wrap', 'clip'] + indices = [-1, 4] + index_arrays = [np.empty(0, dtype=np.intp), + np.empty(tuple(), dtype=np.intp), + np.empty((1, 1), dtype=np.intp)] + real_indices = {'raise': {-1: 1, 4: IndexError}, + 'wrap': {-1: 1, 4: 0}, + 'clip': {-1: 0, 4: 1}} + # Currently all types but object, use the same function generation. + # So it should not be necessary to test all. However test also a non + # refcounted struct on top of object. + types = int, object, np.dtype([('', 'i', 2)]) + for t in types: + # ta works, even if the array may be odd if buffer interface is used + ta = np.array(a if np.issubdtype(t, np.number) else a_str, dtype=t) + tresult = list(ta.T.copy()) + for index_array in index_arrays: + if index_array.size != 0: + tresult[0].shape = (2,) + index_array.shape + tresult[1].shape = (2,) + index_array.shape + for mode in modes: + for index in indices: + real_index = real_indices[mode][index] + if real_index is IndexError and index_array.size != 0: + index_array.put(0, index) + assert_raises(IndexError, ta.take, index_array, + mode=mode, axis=1) + elif index_array.size != 0: + index_array.put(0, index) + res = ta.take(index_array, mode=mode, axis=1) + assert_array_equal(res, tresult[real_index]) + else: + res = ta.take(index_array, mode=mode, axis=1) + assert_(res.shape == (2,) + index_array.shape) + + def test_refcounting(self): + objects = [object() for i in range(10)] + for mode in ('raise', 'clip', 'wrap'): + a = np.array(objects) + b = np.array([2, 2, 4, 5, 3, 5]) + a.take(b, out=a[:6], mode=mode) + del a + if HAS_REFCOUNT: + assert_(all(sys.getrefcount(o) == 3 for o in objects)) + # not contiguous, example: + a = np.array(objects * 2)[::2] + a.take(b, out=a[:6], mode=mode) + del a + if HAS_REFCOUNT: + assert_(all(sys.getrefcount(o) == 3 for o in objects)) + + def test_unicode_mode(self): + d = np.arange(10) + k = b'\xc3\xa4'.decode("UTF8") + assert_raises(ValueError, d.take, 5, mode=k) + + def test_empty_partition(self): + # In reference to github issue #6530 + a_original = np.array([0, 2, 4, 6, 8, 10]) + a = a_original.copy() + + # An empty partition should be a successful no-op + a.partition(np.array([], dtype=np.int16)) + + assert_array_equal(a, a_original) + + def test_empty_argpartition(self): + # In reference to github issue #6530 + a = np.array([0, 2, 4, 6, 8, 10]) + a = a.argpartition(np.array([], dtype=np.int16)) + + b = np.array([0, 1, 2, 3, 4, 5]) + assert_array_equal(a, b) diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_item_selection.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_item_selection.pyc new file mode 100644 index 0000000..f2edaf1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_item_selection.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_longdouble.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_longdouble.py new file mode 100644 index 0000000..cf50d5d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_longdouble.py @@ -0,0 +1,207 @@ +from __future__ import division, absolute_import, print_function + +import pytest + +import numpy as np +from numpy.testing import ( + assert_, assert_equal, assert_raises, assert_array_equal, temppath, + ) +from numpy.core.tests._locales import CommaDecimalPointLocale + +LD_INFO = np.finfo(np.longdouble) +longdouble_longer_than_double = (LD_INFO.eps < np.finfo(np.double).eps) + + +_o = 1 + LD_INFO.eps +string_to_longdouble_inaccurate = (_o != np.longdouble(repr(_o))) +del _o + + +def test_scalar_extraction(): + """Confirm that extracting a value doesn't convert to python float""" + o = 1 + LD_INFO.eps + a = np.array([o, o, o]) + assert_equal(a[1], o) + + +# Conversions string -> long double + +# 0.1 not exactly representable in base 2 floating point. +repr_precision = len(repr(np.longdouble(0.1))) +# +2 from macro block starting around line 842 in scalartypes.c.src. +@pytest.mark.skipif(LD_INFO.precision + 2 >= repr_precision, + reason="repr precision not enough to show eps") +def test_repr_roundtrip(): + # We will only see eps in repr if within printing precision. + o = 1 + LD_INFO.eps + assert_equal(np.longdouble(repr(o)), o, "repr was %s" % repr(o)) + + +def test_unicode(): + np.longdouble(u"1.2") + + +def test_string(): + np.longdouble("1.2") + + +def test_bytes(): + np.longdouble(b"1.2") + + +@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") +def test_repr_roundtrip_bytes(): + o = 1 + LD_INFO.eps + assert_equal(np.longdouble(repr(o).encode("ascii")), o) + + +def test_bogus_string(): + assert_raises(ValueError, np.longdouble, "spam") + assert_raises(ValueError, np.longdouble, "1.0 flub") + + +@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") +def test_fromstring(): + o = 1 + LD_INFO.eps + s = (" " + repr(o))*5 + a = np.array([o]*5) + assert_equal(np.fromstring(s, sep=" ", dtype=np.longdouble), a, + err_msg="reading '%s'" % s) + + +def test_fromstring_bogus(): + assert_equal(np.fromstring("1. 2. 3. flop 4.", dtype=float, sep=" "), + np.array([1., 2., 3.])) + + +def test_fromstring_empty(): + assert_equal(np.fromstring("xxxxx", sep="x"), + np.array([])) + + +def test_fromstring_missing(): + assert_equal(np.fromstring("1xx3x4x5x6", sep="x"), + np.array([1])) + + +class TestFileBased(object): + + ldbl = 1 + LD_INFO.eps + tgt = np.array([ldbl]*5) + out = ''.join([repr(t) + '\n' for t in tgt]) + + def test_fromfile_bogus(self): + with temppath() as path: + with open(path, 'wt') as f: + f.write("1. 2. 3. flop 4.\n") + res = np.fromfile(path, dtype=float, sep=" ") + assert_equal(res, np.array([1., 2., 3.])) + + @pytest.mark.skipif(string_to_longdouble_inaccurate, + reason="Need strtold_l") + def test_fromfile(self): + with temppath() as path: + with open(path, 'wt') as f: + f.write(self.out) + res = np.fromfile(path, dtype=np.longdouble, sep="\n") + assert_equal(res, self.tgt) + + @pytest.mark.skipif(string_to_longdouble_inaccurate, + reason="Need strtold_l") + def test_genfromtxt(self): + with temppath() as path: + with open(path, 'wt') as f: + f.write(self.out) + res = np.genfromtxt(path, dtype=np.longdouble) + assert_equal(res, self.tgt) + + @pytest.mark.skipif(string_to_longdouble_inaccurate, + reason="Need strtold_l") + def test_loadtxt(self): + with temppath() as path: + with open(path, 'wt') as f: + f.write(self.out) + res = np.loadtxt(path, dtype=np.longdouble) + assert_equal(res, self.tgt) + + @pytest.mark.skipif(string_to_longdouble_inaccurate, + reason="Need strtold_l") + def test_tofile_roundtrip(self): + with temppath() as path: + self.tgt.tofile(path, sep=" ") + res = np.fromfile(path, dtype=np.longdouble, sep=" ") + assert_equal(res, self.tgt) + + +# Conversions long double -> string + + +def test_repr_exact(): + o = 1 + LD_INFO.eps + assert_(repr(o) != '1') + + +@pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376") +@pytest.mark.skipif(string_to_longdouble_inaccurate, + reason="Need strtold_l") +def test_format(): + o = 1 + LD_INFO.eps + assert_("{0:.40g}".format(o) != '1') + + +@pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376") +@pytest.mark.skipif(string_to_longdouble_inaccurate, + reason="Need strtold_l") +def test_percent(): + o = 1 + LD_INFO.eps + assert_("%.40g" % o != '1') + + +@pytest.mark.skipif(longdouble_longer_than_double, + reason="array repr problem") +@pytest.mark.skipif(string_to_longdouble_inaccurate, + reason="Need strtold_l") +def test_array_repr(): + o = 1 + LD_INFO.eps + a = np.array([o]) + b = np.array([1], dtype=np.longdouble) + if not np.all(a != b): + raise ValueError("precision loss creating arrays") + assert_(repr(a) != repr(b)) + +# +# Locale tests: scalar types formatting should be independent of the locale +# + +class TestCommaDecimalPointLocale(CommaDecimalPointLocale): + + def test_repr_roundtrip_foreign(self): + o = 1.5 + assert_equal(o, np.longdouble(repr(o))) + + def test_fromstring_foreign_repr(self): + f = 1.234 + a = np.fromstring(repr(f), dtype=float, sep=" ") + assert_equal(a[0], f) + + def test_fromstring_best_effort_float(self): + assert_equal(np.fromstring("1,234", dtype=float, sep=" "), + np.array([1.])) + + def test_fromstring_best_effort(self): + assert_equal(np.fromstring("1,234", dtype=np.longdouble, sep=" "), + np.array([1.])) + + def test_fromstring_foreign(self): + s = "1.234" + a = np.fromstring(s, dtype=np.longdouble, sep=" ") + assert_equal(a[0], np.longdouble(s)) + + def test_fromstring_foreign_sep(self): + a = np.array([1, 2, 3, 4]) + b = np.fromstring("1,2,3,4,", dtype=np.longdouble, sep=",") + assert_array_equal(a, b) + + def test_fromstring_foreign_value(self): + b = np.fromstring("1,234", dtype=np.longdouble, sep=" ") + assert_array_equal(b[0], 1) diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_longdouble.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_longdouble.pyc new file mode 100644 index 0000000..5c804d4 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_longdouble.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_machar.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_machar.py new file mode 100644 index 0000000..ab8800c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_machar.py @@ -0,0 +1,32 @@ +""" +Test machar. Given recent changes to hardcode type data, we might want to get +rid of both MachAr and this test at some point. + +""" +from __future__ import division, absolute_import, print_function + +from numpy.core.machar import MachAr +import numpy.core.numerictypes as ntypes +from numpy import errstate, array + + +class TestMachAr(object): + def _run_machar_highprec(self): + # Instantiate MachAr instance with high enough precision to cause + # underflow + try: + hiprec = ntypes.float96 + MachAr(lambda v:array([v], hiprec)) + except AttributeError: + # Fixme, this needs to raise a 'skip' exception. + "Skipping test: no ntypes.float96 available on this platform." + + def test_underlow(self): + # Regression test for #759: + # instantiating MachAr for dtype = np.float96 raises spurious warning. + with errstate(all='raise'): + try: + self._run_machar_highprec() + except FloatingPointError as e: + msg = "Caught %s exception, should not have been raised." % e + raise AssertionError(msg) diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_machar.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_machar.pyc new file mode 100644 index 0000000..47bcff3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_machar.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_mem_overlap.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_mem_overlap.py new file mode 100644 index 0000000..3c8e0e7 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_mem_overlap.py @@ -0,0 +1,950 @@ +from __future__ import division, absolute_import, print_function + +import sys +import itertools +import pytest + +import numpy as np +from numpy.core._multiarray_tests import solve_diophantine, internal_overlap +from numpy.core import _umath_tests +from numpy.lib.stride_tricks import as_strided +from numpy.compat import long +from numpy.testing import ( + assert_, assert_raises, assert_equal, assert_array_equal + ) + +if sys.version_info[0] >= 3: + xrange = range + + +ndims = 2 +size = 10 +shape = tuple([size] * ndims) + +MAY_SHARE_BOUNDS = 0 +MAY_SHARE_EXACT = -1 + + +def _indices_for_nelems(nelems): + """Returns slices of length nelems, from start onwards, in direction sign.""" + + if nelems == 0: + return [size // 2] # int index + + res = [] + for step in (1, 2): + for sign in (-1, 1): + start = size // 2 - nelems * step * sign // 2 + stop = start + nelems * step * sign + res.append(slice(start, stop, step * sign)) + + return res + + +def _indices_for_axis(): + """Returns (src, dst) pairs of indices.""" + + res = [] + for nelems in (0, 2, 3): + ind = _indices_for_nelems(nelems) + + # no itertools.product available in Py2.4 + res.extend([(a, b) for a in ind for b in ind]) # all assignments of size "nelems" + + return res + + +def _indices(ndims): + """Returns ((axis0_src, axis0_dst), (axis1_src, axis1_dst), ... ) index pairs.""" + + ind = _indices_for_axis() + + # no itertools.product available in Py2.4 + + res = [[]] + for i in range(ndims): + newres = [] + for elem in ind: + for others in res: + newres.append([elem] + others) + res = newres + + return res + + +def _check_assignment(srcidx, dstidx): + """Check assignment arr[dstidx] = arr[srcidx] works.""" + + arr = np.arange(np.product(shape)).reshape(shape) + + cpy = arr.copy() + + cpy[dstidx] = arr[srcidx] + arr[dstidx] = arr[srcidx] + + assert_(np.all(arr == cpy), + 'assigning arr[%s] = arr[%s]' % (dstidx, srcidx)) + + +def test_overlapping_assignments(): + # Test automatically generated assignments which overlap in memory. + + inds = _indices(ndims) + + for ind in inds: + srcidx = tuple([a[0] for a in ind]) + dstidx = tuple([a[1] for a in ind]) + + _check_assignment(srcidx, dstidx) + + +@pytest.mark.slow +def test_diophantine_fuzz(): + # Fuzz test the diophantine solver + rng = np.random.RandomState(1234) + + max_int = np.iinfo(np.intp).max + + for ndim in range(10): + feasible_count = 0 + infeasible_count = 0 + + min_count = 500//(ndim + 1) + + while min(feasible_count, infeasible_count) < min_count: + # Ensure big and small integer problems + A_max = 1 + rng.randint(0, 11, dtype=np.intp)**6 + U_max = rng.randint(0, 11, dtype=np.intp)**6 + + A_max = min(max_int, A_max) + U_max = min(max_int-1, U_max) + + A = tuple(int(rng.randint(1, A_max+1, dtype=np.intp)) + for j in range(ndim)) + U = tuple(int(rng.randint(0, U_max+2, dtype=np.intp)) + for j in range(ndim)) + + b_ub = min(max_int-2, sum(a*ub for a, ub in zip(A, U))) + b = rng.randint(-1, b_ub+2, dtype=np.intp) + + if ndim == 0 and feasible_count < min_count: + b = 0 + + X = solve_diophantine(A, U, b) + + if X is None: + # Check the simplified decision problem agrees + X_simplified = solve_diophantine(A, U, b, simplify=1) + assert_(X_simplified is None, (A, U, b, X_simplified)) + + # Check no solution exists (provided the problem is + # small enough so that brute force checking doesn't + # take too long) + try: + ranges = tuple(xrange(0, a*ub+1, a) for a, ub in zip(A, U)) + except OverflowError: + # xrange on 32-bit Python 2 may overflow + continue + + size = 1 + for r in ranges: + size *= len(r) + if size < 100000: + assert_(not any(sum(w) == b for w in itertools.product(*ranges))) + infeasible_count += 1 + else: + # Check the simplified decision problem agrees + X_simplified = solve_diophantine(A, U, b, simplify=1) + assert_(X_simplified is not None, (A, U, b, X_simplified)) + + # Check validity + assert_(sum(a*x for a, x in zip(A, X)) == b) + assert_(all(0 <= x <= ub for x, ub in zip(X, U))) + feasible_count += 1 + + +def test_diophantine_overflow(): + # Smoke test integer overflow detection + max_intp = np.iinfo(np.intp).max + max_int64 = np.iinfo(np.int64).max + + if max_int64 <= max_intp: + # Check that the algorithm works internally in 128-bit; + # solving this problem requires large intermediate numbers + A = (max_int64//2, max_int64//2 - 10) + U = (max_int64//2, max_int64//2 - 10) + b = 2*(max_int64//2) - 10 + + assert_equal(solve_diophantine(A, U, b), (1, 1)) + + +def check_may_share_memory_exact(a, b): + got = np.may_share_memory(a, b, max_work=MAY_SHARE_EXACT) + + assert_equal(np.may_share_memory(a, b), + np.may_share_memory(a, b, max_work=MAY_SHARE_BOUNDS)) + + a.fill(0) + b.fill(0) + a.fill(1) + exact = b.any() + + err_msg = "" + if got != exact: + err_msg = " " + "\n ".join([ + "base_a - base_b = %r" % (a.__array_interface__['data'][0] - b.__array_interface__['data'][0],), + "shape_a = %r" % (a.shape,), + "shape_b = %r" % (b.shape,), + "strides_a = %r" % (a.strides,), + "strides_b = %r" % (b.strides,), + "size_a = %r" % (a.size,), + "size_b = %r" % (b.size,) + ]) + + assert_equal(got, exact, err_msg=err_msg) + + +def test_may_share_memory_manual(): + # Manual test cases for may_share_memory + + # Base arrays + xs0 = [ + np.zeros([13, 21, 23, 22], dtype=np.int8), + np.zeros([13, 21, 23*2, 22], dtype=np.int8)[:,:,::2,:] + ] + + # Generate all negative stride combinations + xs = [] + for x in xs0: + for ss in itertools.product(*(([slice(None), slice(None, None, -1)],)*4)): + xp = x[ss] + xs.append(xp) + + for x in xs: + # The default is a simple extent check + assert_(np.may_share_memory(x[:,0,:], x[:,1,:])) + assert_(np.may_share_memory(x[:,0,:], x[:,1,:], max_work=None)) + + # Exact checks + check_may_share_memory_exact(x[:,0,:], x[:,1,:]) + check_may_share_memory_exact(x[:,::7], x[:,3::3]) + + try: + xp = x.ravel() + if xp.flags.owndata: + continue + xp = xp.view(np.int16) + except ValueError: + continue + + # 0-size arrays cannot overlap + check_may_share_memory_exact(x.ravel()[6:6], + xp.reshape(13, 21, 23, 11)[:,::7]) + + # Test itemsize is dealt with + check_may_share_memory_exact(x[:,::7], + xp.reshape(13, 21, 23, 11)) + check_may_share_memory_exact(x[:,::7], + xp.reshape(13, 21, 23, 11)[:,3::3]) + check_may_share_memory_exact(x.ravel()[6:7], + xp.reshape(13, 21, 23, 11)[:,::7]) + + # Check unit size + x = np.zeros([1], dtype=np.int8) + check_may_share_memory_exact(x, x) + check_may_share_memory_exact(x, x.copy()) + + +def iter_random_view_pairs(x, same_steps=True, equal_size=False): + rng = np.random.RandomState(1234) + + if equal_size and same_steps: + raise ValueError() + + def random_slice(n, step): + start = rng.randint(0, n+1, dtype=np.intp) + stop = rng.randint(start, n+1, dtype=np.intp) + if rng.randint(0, 2, dtype=np.intp) == 0: + stop, start = start, stop + step *= -1 + return slice(start, stop, step) + + def random_slice_fixed_size(n, step, size): + start = rng.randint(0, n+1 - size*step) + stop = start + (size-1)*step + 1 + if rng.randint(0, 2) == 0: + stop, start = start-1, stop-1 + if stop < 0: + stop = None + step *= -1 + return slice(start, stop, step) + + # First a few regular views + yield x, x + for j in range(1, 7, 3): + yield x[j:], x[:-j] + yield x[...,j:], x[...,:-j] + + # An array with zero stride internal overlap + strides = list(x.strides) + strides[0] = 0 + xp = as_strided(x, shape=x.shape, strides=strides) + yield x, xp + yield xp, xp + + # An array with non-zero stride internal overlap + strides = list(x.strides) + if strides[0] > 1: + strides[0] = 1 + xp = as_strided(x, shape=x.shape, strides=strides) + yield x, xp + yield xp, xp + + # Then discontiguous views + while True: + steps = tuple(rng.randint(1, 11, dtype=np.intp) + if rng.randint(0, 5, dtype=np.intp) == 0 else 1 + for j in range(x.ndim)) + s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps)) + + t1 = np.arange(x.ndim) + rng.shuffle(t1) + + if equal_size: + t2 = t1 + else: + t2 = np.arange(x.ndim) + rng.shuffle(t2) + + a = x[s1] + + if equal_size: + if a.size == 0: + continue + + steps2 = tuple(rng.randint(1, max(2, p//(1+pa))) + if rng.randint(0, 5) == 0 else 1 + for p, s, pa in zip(x.shape, s1, a.shape)) + s2 = tuple(random_slice_fixed_size(p, s, pa) + for p, s, pa in zip(x.shape, steps2, a.shape)) + elif same_steps: + steps2 = steps + else: + steps2 = tuple(rng.randint(1, 11, dtype=np.intp) + if rng.randint(0, 5, dtype=np.intp) == 0 else 1 + for j in range(x.ndim)) + + if not equal_size: + s2 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps2)) + + a = a.transpose(t1) + b = x[s2].transpose(t2) + + yield a, b + + +def check_may_share_memory_easy_fuzz(get_max_work, same_steps, min_count): + # Check that overlap problems with common strides are solved with + # little work. + x = np.zeros([17,34,71,97], dtype=np.int16) + + feasible = 0 + infeasible = 0 + + pair_iter = iter_random_view_pairs(x, same_steps) + + while min(feasible, infeasible) < min_count: + a, b = next(pair_iter) + + bounds_overlap = np.may_share_memory(a, b) + may_share_answer = np.may_share_memory(a, b) + easy_answer = np.may_share_memory(a, b, max_work=get_max_work(a, b)) + exact_answer = np.may_share_memory(a, b, max_work=MAY_SHARE_EXACT) + + if easy_answer != exact_answer: + # assert_equal is slow... + assert_equal(easy_answer, exact_answer) + + if may_share_answer != bounds_overlap: + assert_equal(may_share_answer, bounds_overlap) + + if bounds_overlap: + if exact_answer: + feasible += 1 + else: + infeasible += 1 + + +@pytest.mark.slow +def test_may_share_memory_easy_fuzz(): + # Check that overlap problems with common strides are always + # solved with little work. + + check_may_share_memory_easy_fuzz(get_max_work=lambda a, b: 1, + same_steps=True, + min_count=2000) + + +@pytest.mark.slow +def test_may_share_memory_harder_fuzz(): + # Overlap problems with not necessarily common strides take more + # work. + # + # The work bound below can't be reduced much. Harder problems can + # also exist but not be detected here, as the set of problems + # comes from RNG. + + check_may_share_memory_easy_fuzz(get_max_work=lambda a, b: max(a.size, b.size)//2, + same_steps=False, + min_count=2000) + + +def test_shares_memory_api(): + x = np.zeros([4, 5, 6], dtype=np.int8) + + assert_equal(np.shares_memory(x, x), True) + assert_equal(np.shares_memory(x, x.copy()), False) + + a = x[:,::2,::3] + b = x[:,::3,::2] + assert_equal(np.shares_memory(a, b), True) + assert_equal(np.shares_memory(a, b, max_work=None), True) + assert_raises(np.TooHardError, np.shares_memory, a, b, max_work=1) + assert_raises(np.TooHardError, np.shares_memory, a, b, max_work=long(1)) + + +def test_may_share_memory_bad_max_work(): + x = np.zeros([1]) + assert_raises(OverflowError, np.may_share_memory, x, x, max_work=10**100) + assert_raises(OverflowError, np.shares_memory, x, x, max_work=10**100) + + +def test_internal_overlap_diophantine(): + def check(A, U, exists=None): + X = solve_diophantine(A, U, 0, require_ub_nontrivial=1) + + if exists is None: + exists = (X is not None) + + if X is not None: + assert_(sum(a*x for a, x in zip(A, X)) == sum(a*u//2 for a, u in zip(A, U))) + assert_(all(0 <= x <= u for x, u in zip(X, U))) + assert_(any(x != u//2 for x, u in zip(X, U))) + + if exists: + assert_(X is not None, repr(X)) + else: + assert_(X is None, repr(X)) + + # Smoke tests + check((3, 2), (2*2, 3*2), exists=True) + check((3*2, 2), (15*2, (3-1)*2), exists=False) + + +def test_internal_overlap_slices(): + # Slicing an array never generates internal overlap + + x = np.zeros([17,34,71,97], dtype=np.int16) + + rng = np.random.RandomState(1234) + + def random_slice(n, step): + start = rng.randint(0, n+1, dtype=np.intp) + stop = rng.randint(start, n+1, dtype=np.intp) + if rng.randint(0, 2, dtype=np.intp) == 0: + stop, start = start, stop + step *= -1 + return slice(start, stop, step) + + cases = 0 + min_count = 5000 + + while cases < min_count: + steps = tuple(rng.randint(1, 11, dtype=np.intp) + if rng.randint(0, 5, dtype=np.intp) == 0 else 1 + for j in range(x.ndim)) + t1 = np.arange(x.ndim) + rng.shuffle(t1) + s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps)) + a = x[s1].transpose(t1) + + assert_(not internal_overlap(a)) + cases += 1 + + +def check_internal_overlap(a, manual_expected=None): + got = internal_overlap(a) + + # Brute-force check + m = set() + ranges = tuple(xrange(n) for n in a.shape) + for v in itertools.product(*ranges): + offset = sum(s*w for s, w in zip(a.strides, v)) + if offset in m: + expected = True + break + else: + m.add(offset) + else: + expected = False + + # Compare + if got != expected: + assert_equal(got, expected, err_msg=repr((a.strides, a.shape))) + if manual_expected is not None and expected != manual_expected: + assert_equal(expected, manual_expected) + return got + + +def test_internal_overlap_manual(): + # Stride tricks can construct arrays with internal overlap + + # We don't care about memory bounds, the array is not + # read/write accessed + x = np.arange(1).astype(np.int8) + + # Check low-dimensional special cases + + check_internal_overlap(x, False) # 1-dim + check_internal_overlap(x.reshape([]), False) # 0-dim + + a = as_strided(x, strides=(3, 4), shape=(4, 4)) + check_internal_overlap(a, False) + + a = as_strided(x, strides=(3, 4), shape=(5, 4)) + check_internal_overlap(a, True) + + a = as_strided(x, strides=(0,), shape=(0,)) + check_internal_overlap(a, False) + + a = as_strided(x, strides=(0,), shape=(1,)) + check_internal_overlap(a, False) + + a = as_strided(x, strides=(0,), shape=(2,)) + check_internal_overlap(a, True) + + a = as_strided(x, strides=(0, -9993), shape=(87, 22)) + check_internal_overlap(a, True) + + a = as_strided(x, strides=(0, -9993), shape=(1, 22)) + check_internal_overlap(a, False) + + a = as_strided(x, strides=(0, -9993), shape=(0, 22)) + check_internal_overlap(a, False) + + +def test_internal_overlap_fuzz(): + # Fuzz check; the brute-force check is fairly slow + + x = np.arange(1).astype(np.int8) + + overlap = 0 + no_overlap = 0 + min_count = 100 + + rng = np.random.RandomState(1234) + + while min(overlap, no_overlap) < min_count: + ndim = rng.randint(1, 4, dtype=np.intp) + + strides = tuple(rng.randint(-1000, 1000, dtype=np.intp) + for j in range(ndim)) + shape = tuple(rng.randint(1, 30, dtype=np.intp) + for j in range(ndim)) + + a = as_strided(x, strides=strides, shape=shape) + result = check_internal_overlap(a) + + if result: + overlap += 1 + else: + no_overlap += 1 + + +def test_non_ndarray_inputs(): + # Regression check for gh-5604 + + class MyArray(object): + def __init__(self, data): + self.data = data + + @property + def __array_interface__(self): + return self.data.__array_interface__ + + class MyArray2(object): + def __init__(self, data): + self.data = data + + def __array__(self): + return self.data + + for cls in [MyArray, MyArray2]: + x = np.arange(5) + + assert_(np.may_share_memory(cls(x[::2]), x[1::2])) + assert_(not np.shares_memory(cls(x[::2]), x[1::2])) + + assert_(np.shares_memory(cls(x[1::3]), x[::2])) + assert_(np.may_share_memory(cls(x[1::3]), x[::2])) + + +def view_element_first_byte(x): + """Construct an array viewing the first byte of each element of `x`""" + from numpy.lib.stride_tricks import DummyArray + interface = dict(x.__array_interface__) + interface['typestr'] = '|b1' + interface['descr'] = [('', '|b1')] + return np.asarray(DummyArray(interface, x)) + + +def assert_copy_equivalent(operation, args, out, **kwargs): + """ + Check that operation(*args, out=out) produces results + equivalent to out[...] = operation(*args, out=out.copy()) + """ + + kwargs['out'] = out + kwargs2 = dict(kwargs) + kwargs2['out'] = out.copy() + + out_orig = out.copy() + out[...] = operation(*args, **kwargs2) + expected = out.copy() + out[...] = out_orig + + got = operation(*args, **kwargs).copy() + + if (got != expected).any(): + assert_equal(got, expected) + + +class TestUFunc(object): + """ + Test ufunc call memory overlap handling + """ + + def check_unary_fuzz(self, operation, get_out_axis_size, dtype=np.int16, + count=5000): + shapes = [7, 13, 8, 21, 29, 32] + + rng = np.random.RandomState(1234) + + for ndim in range(1, 6): + x = rng.randint(0, 2**16, size=shapes[:ndim]).astype(dtype) + + it = iter_random_view_pairs(x, same_steps=False, equal_size=True) + + min_count = count // (ndim + 1)**2 + + overlapping = 0 + while overlapping < min_count: + a, b = next(it) + + a_orig = a.copy() + b_orig = b.copy() + + if get_out_axis_size is None: + assert_copy_equivalent(operation, [a], out=b) + + if np.shares_memory(a, b): + overlapping += 1 + else: + for axis in itertools.chain(range(ndim), [None]): + a[...] = a_orig + b[...] = b_orig + + # Determine size for reduction axis (None if scalar) + outsize, scalarize = get_out_axis_size(a, b, axis) + if outsize == 'skip': + continue + + # Slice b to get an output array of the correct size + sl = [slice(None)] * ndim + if axis is None: + if outsize is None: + sl = [slice(0, 1)] + [0]*(ndim - 1) + else: + sl = [slice(0, outsize)] + [0]*(ndim - 1) + else: + if outsize is None: + k = b.shape[axis]//2 + if ndim == 1: + sl[axis] = slice(k, k + 1) + else: + sl[axis] = k + else: + assert b.shape[axis] >= outsize + sl[axis] = slice(0, outsize) + b_out = b[tuple(sl)] + + if scalarize: + b_out = b_out.reshape([]) + + if np.shares_memory(a, b_out): + overlapping += 1 + + # Check result + assert_copy_equivalent(operation, [a], out=b_out, axis=axis) + + @pytest.mark.slow + def test_unary_ufunc_call_fuzz(self): + self.check_unary_fuzz(np.invert, None, np.int16) + + def test_binary_ufunc_accumulate_fuzz(self): + def get_out_axis_size(a, b, axis): + if axis is None: + if a.ndim == 1: + return a.size, False + else: + return 'skip', False # accumulate doesn't support this + else: + return a.shape[axis], False + + self.check_unary_fuzz(np.add.accumulate, get_out_axis_size, + dtype=np.int16, count=500) + + def test_binary_ufunc_reduce_fuzz(self): + def get_out_axis_size(a, b, axis): + return None, (axis is None or a.ndim == 1) + + self.check_unary_fuzz(np.add.reduce, get_out_axis_size, + dtype=np.int16, count=500) + + def test_binary_ufunc_reduceat_fuzz(self): + def get_out_axis_size(a, b, axis): + if axis is None: + if a.ndim == 1: + return a.size, False + else: + return 'skip', False # reduceat doesn't support this + else: + return a.shape[axis], False + + def do_reduceat(a, out, axis): + if axis is None: + size = len(a) + step = size//len(out) + else: + size = a.shape[axis] + step = a.shape[axis] // out.shape[axis] + idx = np.arange(0, size, step) + return np.add.reduceat(a, idx, out=out, axis=axis) + + self.check_unary_fuzz(do_reduceat, get_out_axis_size, + dtype=np.int16, count=500) + + def test_binary_ufunc_reduceat_manual(self): + def check(ufunc, a, ind, out): + c1 = ufunc.reduceat(a.copy(), ind.copy(), out=out.copy()) + c2 = ufunc.reduceat(a, ind, out=out) + assert_array_equal(c1, c2) + + # Exactly same input/output arrays + a = np.arange(10000, dtype=np.int16) + check(np.add, a, a[::-1].copy(), a) + + # Overlap with index + a = np.arange(10000, dtype=np.int16) + check(np.add, a, a[::-1], a) + + def test_unary_gufunc_fuzz(self): + shapes = [7, 13, 8, 21, 29, 32] + gufunc = _umath_tests.euclidean_pdist + + rng = np.random.RandomState(1234) + + for ndim in range(2, 6): + x = rng.rand(*shapes[:ndim]) + + it = iter_random_view_pairs(x, same_steps=False, equal_size=True) + + min_count = 500 // (ndim + 1)**2 + + overlapping = 0 + while overlapping < min_count: + a, b = next(it) + + if min(a.shape[-2:]) < 2 or min(b.shape[-2:]) < 2 or a.shape[-1] < 2: + continue + + # Ensure the shapes are so that euclidean_pdist is happy + if b.shape[-1] > b.shape[-2]: + b = b[...,0,:] + else: + b = b[...,:,0] + + n = a.shape[-2] + p = n * (n - 1) // 2 + if p <= b.shape[-1] and p > 0: + b = b[...,:p] + else: + n = max(2, int(np.sqrt(b.shape[-1]))//2) + p = n * (n - 1) // 2 + a = a[...,:n,:] + b = b[...,:p] + + # Call + if np.shares_memory(a, b): + overlapping += 1 + + with np.errstate(over='ignore', invalid='ignore'): + assert_copy_equivalent(gufunc, [a], out=b) + + def test_ufunc_at_manual(self): + def check(ufunc, a, ind, b=None): + a0 = a.copy() + if b is None: + ufunc.at(a0, ind.copy()) + c1 = a0.copy() + ufunc.at(a, ind) + c2 = a.copy() + else: + ufunc.at(a0, ind.copy(), b.copy()) + c1 = a0.copy() + ufunc.at(a, ind, b) + c2 = a.copy() + assert_array_equal(c1, c2) + + # Overlap with index + a = np.arange(10000, dtype=np.int16) + check(np.invert, a[::-1], a) + + # Overlap with second data array + a = np.arange(100, dtype=np.int16) + ind = np.arange(0, 100, 2, dtype=np.int16) + check(np.add, a, ind, a[25:75]) + + def test_unary_ufunc_1d_manual(self): + # Exercise branches in PyArray_EQUIVALENTLY_ITERABLE + + def check(a, b): + a_orig = a.copy() + b_orig = b.copy() + + b0 = b.copy() + c1 = ufunc(a, out=b0) + c2 = ufunc(a, out=b) + assert_array_equal(c1, c2) + + # Trigger "fancy ufunc loop" code path + mask = view_element_first_byte(b).view(np.bool_) + + a[...] = a_orig + b[...] = b_orig + c1 = ufunc(a, out=b.copy(), where=mask.copy()).copy() + + a[...] = a_orig + b[...] = b_orig + c2 = ufunc(a, out=b, where=mask.copy()).copy() + + # Also, mask overlapping with output + a[...] = a_orig + b[...] = b_orig + c3 = ufunc(a, out=b, where=mask).copy() + + assert_array_equal(c1, c2) + assert_array_equal(c1, c3) + + dtypes = [np.int8, np.int16, np.int32, np.int64, np.float32, + np.float64, np.complex64, np.complex128] + dtypes = [np.dtype(x) for x in dtypes] + + for dtype in dtypes: + if np.issubdtype(dtype, np.integer): + ufunc = np.invert + else: + ufunc = np.reciprocal + + n = 1000 + k = 10 + indices = [ + np.index_exp[:n], + np.index_exp[k:k+n], + np.index_exp[n-1::-1], + np.index_exp[k+n-1:k-1:-1], + np.index_exp[:2*n:2], + np.index_exp[k:k+2*n:2], + np.index_exp[2*n-1::-2], + np.index_exp[k+2*n-1:k-1:-2], + ] + + for xi, yi in itertools.product(indices, indices): + v = np.arange(1, 1 + n*2 + k, dtype=dtype) + x = v[xi] + y = v[yi] + + with np.errstate(all='ignore'): + check(x, y) + + # Scalar cases + check(x[:1], y) + check(x[-1:], y) + check(x[:1].reshape([]), y) + check(x[-1:].reshape([]), y) + + def test_unary_ufunc_where_same(self): + # Check behavior at wheremask overlap + ufunc = np.invert + + def check(a, out, mask): + c1 = ufunc(a, out=out.copy(), where=mask.copy()) + c2 = ufunc(a, out=out, where=mask) + assert_array_equal(c1, c2) + + # Check behavior with same input and output arrays + x = np.arange(100).astype(np.bool_) + check(x, x, x) + check(x, x.copy(), x) + check(x, x, x.copy()) + + @pytest.mark.slow + def test_binary_ufunc_1d_manual(self): + ufunc = np.add + + def check(a, b, c): + c0 = c.copy() + c1 = ufunc(a, b, out=c0) + c2 = ufunc(a, b, out=c) + assert_array_equal(c1, c2) + + for dtype in [np.int8, np.int16, np.int32, np.int64, + np.float32, np.float64, np.complex64, np.complex128]: + # Check different data dependency orders + + n = 1000 + k = 10 + + indices = [] + for p in [1, 2]: + indices.extend([ + np.index_exp[:p*n:p], + np.index_exp[k:k+p*n:p], + np.index_exp[p*n-1::-p], + np.index_exp[k+p*n-1:k-1:-p], + ]) + + for x, y, z in itertools.product(indices, indices, indices): + v = np.arange(6*n).astype(dtype) + x = v[x] + y = v[y] + z = v[z] + + check(x, y, z) + + # Scalar cases + check(x[:1], y, z) + check(x[-1:], y, z) + check(x[:1].reshape([]), y, z) + check(x[-1:].reshape([]), y, z) + check(x, y[:1], z) + check(x, y[-1:], z) + check(x, y[:1].reshape([]), z) + check(x, y[-1:].reshape([]), z) + + def test_inplace_op_simple_manual(self): + rng = np.random.RandomState(1234) + x = rng.rand(200, 200) # bigger than bufsize + + x += x.T + assert_array_equal(x - x.T, 0) diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_mem_overlap.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_mem_overlap.pyc new file mode 100644 index 0000000..4c41a10 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_mem_overlap.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_memmap.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_memmap.py new file mode 100644 index 0000000..990d0ae --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_memmap.py @@ -0,0 +1,206 @@ +from __future__ import division, absolute_import, print_function + +import sys +import os +import shutil +import mmap +import pytest +from tempfile import NamedTemporaryFile, TemporaryFile, mktemp, mkdtemp + +from numpy import ( + memmap, sum, average, product, ndarray, isscalar, add, subtract, multiply) +from numpy.compat import Path + +from numpy import arange, allclose, asarray +from numpy.testing import ( + assert_, assert_equal, assert_array_equal, suppress_warnings + ) + +class TestMemmap(object): + def setup(self): + self.tmpfp = NamedTemporaryFile(prefix='mmap') + self.tempdir = mkdtemp() + self.shape = (3, 4) + self.dtype = 'float32' + self.data = arange(12, dtype=self.dtype) + self.data.resize(self.shape) + + def teardown(self): + self.tmpfp.close() + shutil.rmtree(self.tempdir) + + def test_roundtrip(self): + # Write data to file + fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', + shape=self.shape) + fp[:] = self.data[:] + del fp # Test __del__ machinery, which handles cleanup + + # Read data back from file + newfp = memmap(self.tmpfp, dtype=self.dtype, mode='r', + shape=self.shape) + assert_(allclose(self.data, newfp)) + assert_array_equal(self.data, newfp) + assert_equal(newfp.flags.writeable, False) + + def test_open_with_filename(self): + tmpname = mktemp('', 'mmap', dir=self.tempdir) + fp = memmap(tmpname, dtype=self.dtype, mode='w+', + shape=self.shape) + fp[:] = self.data[:] + del fp + + def test_unnamed_file(self): + with TemporaryFile() as f: + fp = memmap(f, dtype=self.dtype, shape=self.shape) + del fp + + def test_attributes(self): + offset = 1 + mode = "w+" + fp = memmap(self.tmpfp, dtype=self.dtype, mode=mode, + shape=self.shape, offset=offset) + assert_equal(offset, fp.offset) + assert_equal(mode, fp.mode) + del fp + + def test_filename(self): + tmpname = mktemp('', 'mmap', dir=self.tempdir) + fp = memmap(tmpname, dtype=self.dtype, mode='w+', + shape=self.shape) + abspath = os.path.abspath(tmpname) + fp[:] = self.data[:] + assert_equal(abspath, fp.filename) + b = fp[:1] + assert_equal(abspath, b.filename) + del b + del fp + + @pytest.mark.skipif(Path is None, reason="No pathlib.Path") + def test_path(self): + tmpname = mktemp('', 'mmap', dir=self.tempdir) + fp = memmap(Path(tmpname), dtype=self.dtype, mode='w+', + shape=self.shape) + # os.path.realpath does not resolve symlinks on Windows + # see: https://bugs.python.org/issue9949 + # use Path.resolve, just as memmap class does internally + abspath = str(Path(tmpname).resolve()) + fp[:] = self.data[:] + assert_equal(abspath, str(fp.filename.resolve())) + b = fp[:1] + assert_equal(abspath, str(b.filename.resolve())) + del b + del fp + + def test_filename_fileobj(self): + fp = memmap(self.tmpfp, dtype=self.dtype, mode="w+", + shape=self.shape) + assert_equal(fp.filename, self.tmpfp.name) + + @pytest.mark.skipif(sys.platform == 'gnu0', + reason="Known to fail on hurd") + def test_flush(self): + fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', + shape=self.shape) + fp[:] = self.data[:] + assert_equal(fp[0], self.data[0]) + fp.flush() + + def test_del(self): + # Make sure a view does not delete the underlying mmap + fp_base = memmap(self.tmpfp, dtype=self.dtype, mode='w+', + shape=self.shape) + fp_base[0] = 5 + fp_view = fp_base[0:1] + assert_equal(fp_view[0], 5) + del fp_view + # Should still be able to access and assign values after + # deleting the view + assert_equal(fp_base[0], 5) + fp_base[0] = 6 + assert_equal(fp_base[0], 6) + + def test_arithmetic_drops_references(self): + fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', + shape=self.shape) + tmp = (fp + 10) + if isinstance(tmp, memmap): + assert_(tmp._mmap is not fp._mmap) + + def test_indexing_drops_references(self): + fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', + shape=self.shape) + tmp = fp[(1, 2), (2, 3)] + if isinstance(tmp, memmap): + assert_(tmp._mmap is not fp._mmap) + + def test_slicing_keeps_references(self): + fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', + shape=self.shape) + assert_(fp[:2, :2]._mmap is fp._mmap) + + def test_view(self): + fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) + new1 = fp.view() + new2 = new1.view() + assert_(new1.base is fp) + assert_(new2.base is fp) + new_array = asarray(fp) + assert_(new_array.base is fp) + + def test_ufunc_return_ndarray(self): + fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) + fp[:] = self.data + + with suppress_warnings() as sup: + sup.filter(FutureWarning, "np.average currently does not preserve") + for unary_op in [sum, average, product]: + result = unary_op(fp) + assert_(isscalar(result)) + assert_(result.__class__ is self.data[0, 0].__class__) + + assert_(unary_op(fp, axis=0).__class__ is ndarray) + assert_(unary_op(fp, axis=1).__class__ is ndarray) + + for binary_op in [add, subtract, multiply]: + assert_(binary_op(fp, self.data).__class__ is ndarray) + assert_(binary_op(self.data, fp).__class__ is ndarray) + assert_(binary_op(fp, fp).__class__ is ndarray) + + fp += 1 + assert(fp.__class__ is memmap) + add(fp, 1, out=fp) + assert(fp.__class__ is memmap) + + def test_getitem(self): + fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) + fp[:] = self.data + + assert_(fp[1:, :-1].__class__ is memmap) + # Fancy indexing returns a copy that is not memmapped + assert_(fp[[0, 1]].__class__ is ndarray) + + def test_memmap_subclass(self): + class MemmapSubClass(memmap): + pass + + fp = MemmapSubClass(self.tmpfp, dtype=self.dtype, shape=self.shape) + fp[:] = self.data + + # We keep previous behavior for subclasses of memmap, i.e. the + # ufunc and __getitem__ output is never turned into a ndarray + assert_(sum(fp, axis=0).__class__ is MemmapSubClass) + assert_(sum(fp).__class__ is MemmapSubClass) + assert_(fp[1:, :-1].__class__ is MemmapSubClass) + assert(fp[[0, 1]].__class__ is MemmapSubClass) + + def test_mmap_offset_greater_than_allocation_granularity(self): + size = 5 * mmap.ALLOCATIONGRANULARITY + offset = mmap.ALLOCATIONGRANULARITY + 1 + fp = memmap(self.tmpfp, shape=size, mode='w+', offset=offset) + assert_(fp.offset == offset) + + def test_no_shape(self): + self.tmpfp.write(b'a'*16) + mm = memmap(self.tmpfp, dtype='float64') + assert_equal(mm.shape, (2,)) diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_memmap.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_memmap.pyc new file mode 100644 index 0000000..833c00c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_memmap.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_multiarray.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_multiarray.py new file mode 100644 index 0000000..7dd35c7 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_multiarray.py @@ -0,0 +1,8095 @@ +from __future__ import division, absolute_import, print_function + +try: + # Accessing collections abstract classes from collections + # has been deprecated since Python 3.3 + import collections.abc as collections_abc +except ImportError: + import collections as collections_abc +import tempfile +import sys +import shutil +import warnings +import operator +import io +import itertools +import functools +import ctypes +import os +import gc +import weakref +import pytest +from contextlib import contextmanager + +from numpy.core.numeric import pickle + +if sys.version_info[0] >= 3: + import builtins +else: + import __builtin__ as builtins +from decimal import Decimal + +import numpy as np +from numpy.compat import strchar, unicode +import numpy.core._multiarray_tests as _multiarray_tests +from numpy.testing import ( + assert_, assert_raises, assert_warns, assert_equal, assert_almost_equal, + assert_array_equal, assert_raises_regex, assert_array_almost_equal, + assert_allclose, IS_PYPY, HAS_REFCOUNT, assert_array_less, runstring, + temppath, suppress_warnings + ) +from numpy.core.tests._locales import CommaDecimalPointLocale + +# Need to test an object that does not fully implement math interface +from datetime import timedelta, datetime + + +if sys.version_info[:2] > (3, 2): + # In Python 3.3 the representation of empty shape, strides and sub-offsets + # is an empty tuple instead of None. + # https://docs.python.org/dev/whatsnew/3.3.html#api-changes + EMPTY = () +else: + EMPTY = None + + +def _aligned_zeros(shape, dtype=float, order="C", align=None): + """ + Allocate a new ndarray with aligned memory. + + The ndarray is guaranteed *not* aligned to twice the requested alignment. + Eg, if align=4, guarantees it is not aligned to 8. If align=None uses + dtype.alignment.""" + dtype = np.dtype(dtype) + if dtype == np.dtype(object): + # Can't do this, fall back to standard allocation (which + # should always be sufficiently aligned) + if align is not None: + raise ValueError("object array alignment not supported") + return np.zeros(shape, dtype=dtype, order=order) + if align is None: + align = dtype.alignment + if not hasattr(shape, '__len__'): + shape = (shape,) + size = functools.reduce(operator.mul, shape) * dtype.itemsize + buf = np.empty(size + 2*align + 1, np.uint8) + + ptr = buf.__array_interface__['data'][0] + offset = ptr % align + if offset != 0: + offset = align - offset + if (ptr % (2*align)) == 0: + offset += align + + # Note: slices producing 0-size arrays do not necessarily change + # data pointer --- so we use and allocate size+1 + buf = buf[offset:offset+size+1][:-1] + data = np.ndarray(shape, dtype, buf, order=order) + data.fill(0) + return data + + +class TestFlags(object): + def setup(self): + self.a = np.arange(10) + + def test_writeable(self): + mydict = locals() + self.a.flags.writeable = False + assert_raises(ValueError, runstring, 'self.a[0] = 3', mydict) + assert_raises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict) + self.a.flags.writeable = True + self.a[0] = 5 + self.a[0] = 0 + + def test_writeable_from_readonly(self): + # gh-9440 - make sure fromstring, from buffer on readonly buffers + # set writeable False + data = b'\x00' * 100 + vals = np.frombuffer(data, 'B') + assert_raises(ValueError, vals.setflags, write=True) + types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] ) + values = np.core.records.fromstring(data, types) + vals = values['vals'] + assert_raises(ValueError, vals.setflags, write=True) + + def test_writeable_from_buffer(self): + data = bytearray(b'\x00' * 100) + vals = np.frombuffer(data, 'B') + assert_(vals.flags.writeable) + vals.setflags(write=False) + assert_(vals.flags.writeable is False) + vals.setflags(write=True) + assert_(vals.flags.writeable) + types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] ) + values = np.core.records.fromstring(data, types) + vals = values['vals'] + assert_(vals.flags.writeable) + vals.setflags(write=False) + assert_(vals.flags.writeable is False) + vals.setflags(write=True) + assert_(vals.flags.writeable) + + @pytest.mark.skipif(sys.version_info[0] < 3, reason="Python 2 always copies") + def test_writeable_pickle(self): + import pickle + # Small arrays will be copied without setting base. + # See condition for using PyArray_SetBaseObject in + # array_setstate. + a = np.arange(1000) + for v in range(pickle.HIGHEST_PROTOCOL): + vals = pickle.loads(pickle.dumps(a, v)) + assert_(vals.flags.writeable) + assert_(isinstance(vals.base, bytes)) + + def test_otherflags(self): + assert_equal(self.a.flags.carray, True) + assert_equal(self.a.flags['C'], True) + assert_equal(self.a.flags.farray, False) + assert_equal(self.a.flags.behaved, True) + assert_equal(self.a.flags.fnc, False) + assert_equal(self.a.flags.forc, True) + assert_equal(self.a.flags.owndata, True) + assert_equal(self.a.flags.writeable, True) + assert_equal(self.a.flags.aligned, True) + with assert_warns(DeprecationWarning): + assert_equal(self.a.flags.updateifcopy, False) + with assert_warns(DeprecationWarning): + assert_equal(self.a.flags['U'], False) + assert_equal(self.a.flags['UPDATEIFCOPY'], False) + assert_equal(self.a.flags.writebackifcopy, False) + assert_equal(self.a.flags['X'], False) + assert_equal(self.a.flags['WRITEBACKIFCOPY'], False) + + def test_string_align(self): + a = np.zeros(4, dtype=np.dtype('|S4')) + assert_(a.flags.aligned) + # not power of two are accessed byte-wise and thus considered aligned + a = np.zeros(5, dtype=np.dtype('|S4')) + assert_(a.flags.aligned) + + def test_void_align(self): + a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")])) + assert_(a.flags.aligned) + + +class TestHash(object): + # see #3793 + def test_int(self): + for st, ut, s in [(np.int8, np.uint8, 8), + (np.int16, np.uint16, 16), + (np.int32, np.uint32, 32), + (np.int64, np.uint64, 64)]: + for i in range(1, s): + assert_equal(hash(st(-2**i)), hash(-2**i), + err_msg="%r: -2**%d" % (st, i)) + assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)), + err_msg="%r: 2**%d" % (st, i - 1)) + assert_equal(hash(st(2**i - 1)), hash(2**i - 1), + err_msg="%r: 2**%d - 1" % (st, i)) + + i = max(i - 1, 1) + assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)), + err_msg="%r: 2**%d" % (ut, i - 1)) + assert_equal(hash(ut(2**i - 1)), hash(2**i - 1), + err_msg="%r: 2**%d - 1" % (ut, i)) + + +class TestAttributes(object): + def setup(self): + self.one = np.arange(10) + self.two = np.arange(20).reshape(4, 5) + self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6) + + def test_attributes(self): + assert_equal(self.one.shape, (10,)) + assert_equal(self.two.shape, (4, 5)) + assert_equal(self.three.shape, (2, 5, 6)) + self.three.shape = (10, 3, 2) + assert_equal(self.three.shape, (10, 3, 2)) + self.three.shape = (2, 5, 6) + assert_equal(self.one.strides, (self.one.itemsize,)) + num = self.two.itemsize + assert_equal(self.two.strides, (5*num, num)) + num = self.three.itemsize + assert_equal(self.three.strides, (30*num, 6*num, num)) + assert_equal(self.one.ndim, 1) + assert_equal(self.two.ndim, 2) + assert_equal(self.three.ndim, 3) + num = self.two.itemsize + assert_equal(self.two.size, 20) + assert_equal(self.two.nbytes, 20*num) + assert_equal(self.two.itemsize, self.two.dtype.itemsize) + assert_equal(self.two.base, np.arange(20)) + + def test_dtypeattr(self): + assert_equal(self.one.dtype, np.dtype(np.int_)) + assert_equal(self.three.dtype, np.dtype(np.float_)) + assert_equal(self.one.dtype.char, 'l') + assert_equal(self.three.dtype.char, 'd') + assert_(self.three.dtype.str[0] in '<>') + assert_equal(self.one.dtype.str[1], 'i') + assert_equal(self.three.dtype.str[1], 'f') + + def test_int_subclassing(self): + # Regression test for https://github.com/numpy/numpy/pull/3526 + + numpy_int = np.int_(0) + + if sys.version_info[0] >= 3: + # On Py3k int_ should not inherit from int, because it's not + # fixed-width anymore + assert_equal(isinstance(numpy_int, int), False) + else: + # Otherwise, it should inherit from int... + assert_equal(isinstance(numpy_int, int), True) + + # ... and fast-path checks on C-API level should also work + from numpy.core._multiarray_tests import test_int_subclass + assert_equal(test_int_subclass(numpy_int), True) + + def test_stridesattr(self): + x = self.one + + def make_array(size, offset, strides): + return np.ndarray(size, buffer=x, dtype=int, + offset=offset*x.itemsize, + strides=strides*x.itemsize) + + assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1])) + assert_raises(ValueError, make_array, 4, 4, -2) + assert_raises(ValueError, make_array, 4, 2, -1) + assert_raises(ValueError, make_array, 8, 3, 1) + assert_equal(make_array(8, 3, 0), np.array([3]*8)) + # Check behavior reported in gh-2503: + assert_raises(ValueError, make_array, (2, 3), 5, np.array([-2, -3])) + make_array(0, 0, 10) + + def test_set_stridesattr(self): + x = self.one + + def make_array(size, offset, strides): + try: + r = np.ndarray([size], dtype=int, buffer=x, + offset=offset*x.itemsize) + except Exception as e: + raise RuntimeError(e) + r.strides = strides = strides*x.itemsize + return r + + assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1])) + assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9])) + assert_raises(ValueError, make_array, 4, 4, -2) + assert_raises(ValueError, make_array, 4, 2, -1) + assert_raises(RuntimeError, make_array, 8, 3, 1) + # Check that the true extent of the array is used. + # Test relies on as_strided base not exposing a buffer. + x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0)) + + def set_strides(arr, strides): + arr.strides = strides + + assert_raises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize)) + + # Test for offset calculations: + x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1], + shape=(10,), strides=(-1,)) + assert_raises(ValueError, set_strides, x[::-1], -1) + a = x[::-1] + a.strides = 1 + a[::2].strides = 2 + + def test_fill(self): + for t in "?bhilqpBHILQPfdgFDGO": + x = np.empty((3, 2, 1), t) + y = np.empty((3, 2, 1), t) + x.fill(1) + y[...] = 1 + assert_equal(x, y) + + def test_fill_max_uint64(self): + x = np.empty((3, 2, 1), dtype=np.uint64) + y = np.empty((3, 2, 1), dtype=np.uint64) + value = 2**64 - 1 + y[...] = value + x.fill(value) + assert_array_equal(x, y) + + def test_fill_struct_array(self): + # Filling from a scalar + x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8') + x.fill(x[0]) + assert_equal(x['f1'][1], x['f1'][0]) + # Filling from a tuple that can be converted + # to a scalar + x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')]) + x.fill((3.5, -2)) + assert_array_equal(x['a'], [3.5, 3.5]) + assert_array_equal(x['b'], [-2, -2]) + + +class TestArrayConstruction(object): + def test_array(self): + d = np.ones(6) + r = np.array([d, d]) + assert_equal(r, np.ones((2, 6))) + + d = np.ones(6) + tgt = np.ones((2, 6)) + r = np.array([d, d]) + assert_equal(r, tgt) + tgt[1] = 2 + r = np.array([d, d + 1]) + assert_equal(r, tgt) + + d = np.ones(6) + r = np.array([[d, d]]) + assert_equal(r, np.ones((1, 2, 6))) + + d = np.ones(6) + r = np.array([[d, d], [d, d]]) + assert_equal(r, np.ones((2, 2, 6))) + + d = np.ones((6, 6)) + r = np.array([d, d]) + assert_equal(r, np.ones((2, 6, 6))) + + d = np.ones((6, )) + r = np.array([[d, d + 1], d + 2]) + assert_equal(len(r), 2) + assert_equal(r[0], [d, d + 1]) + assert_equal(r[1], d + 2) + + tgt = np.ones((2, 3), dtype=bool) + tgt[0, 2] = False + tgt[1, 0:2] = False + r = np.array([[True, True, False], [False, False, True]]) + assert_equal(r, tgt) + r = np.array([[True, False], [True, False], [False, True]]) + assert_equal(r, tgt.T) + + def test_array_empty(self): + assert_raises(TypeError, np.array) + + def test_array_copy_false(self): + d = np.array([1, 2, 3]) + e = np.array(d, copy=False) + d[1] = 3 + assert_array_equal(e, [1, 3, 3]) + e = np.array(d, copy=False, order='F') + d[1] = 4 + assert_array_equal(e, [1, 4, 3]) + e[2] = 7 + assert_array_equal(d, [1, 4, 7]) + + def test_array_copy_true(self): + d = np.array([[1,2,3], [1, 2, 3]]) + e = np.array(d, copy=True) + d[0, 1] = 3 + e[0, 2] = -7 + assert_array_equal(e, [[1, 2, -7], [1, 2, 3]]) + assert_array_equal(d, [[1, 3, 3], [1, 2, 3]]) + e = np.array(d, copy=True, order='F') + d[0, 1] = 5 + e[0, 2] = 7 + assert_array_equal(e, [[1, 3, 7], [1, 2, 3]]) + assert_array_equal(d, [[1, 5, 3], [1,2,3]]) + + def test_array_cont(self): + d = np.ones(10)[::2] + assert_(np.ascontiguousarray(d).flags.c_contiguous) + assert_(np.ascontiguousarray(d).flags.f_contiguous) + assert_(np.asfortranarray(d).flags.c_contiguous) + assert_(np.asfortranarray(d).flags.f_contiguous) + d = np.ones((10, 10))[::2,::2] + assert_(np.ascontiguousarray(d).flags.c_contiguous) + assert_(np.asfortranarray(d).flags.f_contiguous) + + +class TestAssignment(object): + def test_assignment_broadcasting(self): + a = np.arange(6).reshape(2, 3) + + # Broadcasting the input to the output + a[...] = np.arange(3) + assert_equal(a, [[0, 1, 2], [0, 1, 2]]) + a[...] = np.arange(2).reshape(2, 1) + assert_equal(a, [[0, 0, 0], [1, 1, 1]]) + + # For compatibility with <= 1.5, a limited version of broadcasting + # the output to the input. + # + # This behavior is inconsistent with NumPy broadcasting + # in general, because it only uses one of the two broadcasting + # rules (adding a new "1" dimension to the left of the shape), + # applied to the output instead of an input. In NumPy 2.0, this kind + # of broadcasting assignment will likely be disallowed. + a[...] = np.arange(6)[::-1].reshape(1, 2, 3) + assert_equal(a, [[5, 4, 3], [2, 1, 0]]) + # The other type of broadcasting would require a reduction operation. + + def assign(a, b): + a[...] = b + + assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3)) + + def test_assignment_errors(self): + # Address issue #2276 + class C: + pass + a = np.zeros(1) + + def assign(v): + a[0] = v + + assert_raises((AttributeError, TypeError), assign, C()) + assert_raises(ValueError, assign, [1]) + + def test_unicode_assignment(self): + # gh-5049 + from numpy.core.numeric import set_string_function + + @contextmanager + def inject_str(s): + """ replace ndarray.__str__ temporarily """ + set_string_function(lambda x: s, repr=False) + try: + yield + finally: + set_string_function(None, repr=False) + + a1d = np.array([u'test']) + a0d = np.array(u'done') + with inject_str(u'bad'): + a1d[0] = a0d # previously this would invoke __str__ + assert_equal(a1d[0], u'done') + + # this would crash for the same reason + np.array([np.array(u'\xe5\xe4\xf6')]) + + def test_stringlike_empty_list(self): + # gh-8902 + u = np.array([u'done']) + b = np.array([b'done']) + + class bad_sequence(object): + def __getitem__(self): pass + def __len__(self): raise RuntimeError + + assert_raises(ValueError, operator.setitem, u, 0, []) + assert_raises(ValueError, operator.setitem, b, 0, []) + + assert_raises(ValueError, operator.setitem, u, 0, bad_sequence()) + assert_raises(ValueError, operator.setitem, b, 0, bad_sequence()) + + def test_longdouble_assignment(self): + # only relevant if longdouble is larger than float + # we're looking for loss of precision + + for dtype in (np.longdouble, np.longcomplex): + # gh-8902 + tinyb = np.nextafter(np.longdouble(0), 1).astype(dtype) + tinya = np.nextafter(np.longdouble(0), -1).astype(dtype) + + # construction + tiny1d = np.array([tinya]) + assert_equal(tiny1d[0], tinya) + + # scalar = scalar + tiny1d[0] = tinyb + assert_equal(tiny1d[0], tinyb) + + # 0d = scalar + tiny1d[0, ...] = tinya + assert_equal(tiny1d[0], tinya) + + # 0d = 0d + tiny1d[0, ...] = tinyb[...] + assert_equal(tiny1d[0], tinyb) + + # scalar = 0d + tiny1d[0] = tinyb[...] + assert_equal(tiny1d[0], tinyb) + + arr = np.array([np.array(tinya)]) + assert_equal(arr[0], tinya) + + def test_cast_to_string(self): + # cast to str should do "str(scalar)", not "str(scalar.item())" + # Example: In python2, str(float) is truncated, so we want to avoid + # str(np.float64(...).item()) as this would incorrectly truncate. + a = np.zeros(1, dtype='S20') + a[:] = np.array(['1.12345678901234567890'], dtype='f8') + assert_equal(a[0], b"1.1234567890123457") + + +class TestDtypedescr(object): + def test_construction(self): + d1 = np.dtype('i4') + assert_equal(d1, np.dtype(np.int32)) + d2 = np.dtype('f8') + assert_equal(d2, np.dtype(np.float64)) + + def test_byteorders(self): + assert_(np.dtype('i4')) + assert_(np.dtype([('a', 'i4')])) + + def test_structured_non_void(self): + fields = [('a', '= 3, reason="Not Python 2") + def test_sequence_long(self): + assert_equal(np.array([long(4), long(4)]).dtype, np.long) + assert_equal(np.array([long(4), 2**80]).dtype, object) + assert_equal(np.array([long(4), 2**80, long(4)]).dtype, object) + assert_equal(np.array([2**80, long(4)]).dtype, object) + + def test_non_sequence_sequence(self): + """Should not segfault. + + Class Fail breaks the sequence protocol for new style classes, i.e., + those derived from object. Class Map is a mapping type indicated by + raising a ValueError. At some point we may raise a warning instead + of an error in the Fail case. + + """ + class Fail(object): + def __len__(self): + return 1 + + def __getitem__(self, index): + raise ValueError() + + class Map(object): + def __len__(self): + return 1 + + def __getitem__(self, index): + raise KeyError() + + a = np.array([Map()]) + assert_(a.shape == (1,)) + assert_(a.dtype == np.dtype(object)) + assert_raises(ValueError, np.array, [Fail()]) + + def test_no_len_object_type(self): + # gh-5100, want object array from iterable object without len() + class Point2: + def __init__(self): + pass + + def __getitem__(self, ind): + if ind in [0, 1]: + return ind + else: + raise IndexError() + d = np.array([Point2(), Point2(), Point2()]) + assert_equal(d.dtype, np.dtype(object)) + + def test_false_len_sequence(self): + # gh-7264, segfault for this example + class C: + def __getitem__(self, i): + raise IndexError + def __len__(self): + return 42 + + assert_raises(ValueError, np.array, C()) # segfault? + + def test_failed_len_sequence(self): + # gh-7393 + class A(object): + def __init__(self, data): + self._data = data + def __getitem__(self, item): + return type(self)(self._data[item]) + def __len__(self): + return len(self._data) + + # len(d) should give 3, but len(d[0]) will fail + d = A([1,2,3]) + assert_equal(len(np.array(d)), 3) + + def test_array_too_big(self): + # Test that array creation succeeds for arrays addressable by intp + # on the byte level and fails for too large arrays. + buf = np.zeros(100) + + max_bytes = np.iinfo(np.intp).max + for dtype in ["intp", "S20", "b"]: + dtype = np.dtype(dtype) + itemsize = dtype.itemsize + + np.ndarray(buffer=buf, strides=(0,), + shape=(max_bytes//itemsize,), dtype=dtype) + assert_raises(ValueError, np.ndarray, buffer=buf, strides=(0,), + shape=(max_bytes//itemsize + 1,), dtype=dtype) + + def test_jagged_ndim_object(self): + # Lists of mismatching depths are treated as object arrays + a = np.array([[1], 2, 3]) + assert_equal(a.shape, (3,)) + assert_equal(a.dtype, object) + + a = np.array([1, [2], 3]) + assert_equal(a.shape, (3,)) + assert_equal(a.dtype, object) + + a = np.array([1, 2, [3]]) + assert_equal(a.shape, (3,)) + assert_equal(a.dtype, object) + + def test_jagged_shape_object(self): + # The jagged dimension of a list is turned into an object array + a = np.array([[1, 1], [2], [3]]) + assert_equal(a.shape, (3,)) + assert_equal(a.dtype, object) + + a = np.array([[1], [2, 2], [3]]) + assert_equal(a.shape, (3,)) + assert_equal(a.dtype, object) + + a = np.array([[1], [2], [3, 3]]) + assert_equal(a.shape, (3,)) + assert_equal(a.dtype, object) + + +class TestStructured(object): + def test_subarray_field_access(self): + a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))]) + a['a'] = np.arange(60).reshape(3, 5, 2, 2) + + # Since the subarray is always in C-order, a transpose + # does not swap the subarray: + assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3)) + + # In Fortran order, the subarray gets appended + # like in all other cases, not prepended as a special case + b = a.copy(order='F') + assert_equal(a['a'].shape, b['a'].shape) + assert_equal(a.T['a'].shape, a.T.copy()['a'].shape) + + def test_subarray_comparison(self): + # Check that comparisons between record arrays with + # multi-dimensional field types work properly + a = np.rec.fromrecords( + [([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])], + dtype=[('a', ('f4', 3)), ('b', object), ('c', ('i4', (2, 2)))]) + b = a.copy() + assert_equal(a == b, [True, True]) + assert_equal(a != b, [False, False]) + b[1].b = 'c' + assert_equal(a == b, [True, False]) + assert_equal(a != b, [False, True]) + for i in range(3): + b[0].a = a[0].a + b[0].a[i] = 5 + assert_equal(a == b, [False, False]) + assert_equal(a != b, [True, True]) + for i in range(2): + for j in range(2): + b = a.copy() + b[0].c[i, j] = 10 + assert_equal(a == b, [False, True]) + assert_equal(a != b, [True, False]) + + # Check that broadcasting with a subarray works + a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')]) + b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')]) + assert_equal(a == b, [[True, True, False], [False, False, True]]) + assert_equal(b == a, [[True, True, False], [False, False, True]]) + a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))]) + b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))]) + assert_equal(a == b, [[True, True, False], [False, False, True]]) + assert_equal(b == a, [[True, True, False], [False, False, True]]) + a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))]) + b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))]) + assert_equal(a == b, [[True, False, False], [False, False, True]]) + assert_equal(b == a, [[True, False, False], [False, False, True]]) + + # Check that broadcasting Fortran-style arrays with a subarray work + a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F') + b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))]) + assert_equal(a == b, [[True, False, False], [False, False, True]]) + assert_equal(b == a, [[True, False, False], [False, False, True]]) + + # Check that incompatible sub-array shapes don't result to broadcasting + x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')]) + y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')]) + # This comparison invokes deprecated behaviour, and will probably + # start raising an error eventually. What we really care about in this + # test is just that it doesn't return True. + with suppress_warnings() as sup: + sup.filter(FutureWarning, "elementwise == comparison failed") + assert_equal(x == y, False) + + x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')]) + y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')]) + # This comparison invokes deprecated behaviour, and will probably + # start raising an error eventually. What we really care about in this + # test is just that it doesn't return True. + with suppress_warnings() as sup: + sup.filter(FutureWarning, "elementwise == comparison failed") + assert_equal(x == y, False) + + # Check that structured arrays that are different only in + # byte-order work + a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', 'f8')]) + assert_equal(a == b, [False, True]) + + def test_casting(self): + # Check that casting a structured array to change its byte order + # works + a = np.array([(1,)], dtype=[('a', 'i4')], casting='unsafe')) + b = a.astype([('a', '>i4')]) + assert_equal(b, a.byteswap().newbyteorder()) + assert_equal(a['a'][0], b['a'][0]) + + # Check that equality comparison works on structured arrays if + # they are 'equiv'-castable + a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', 'f8')]) + assert_(np.can_cast(a.dtype, b.dtype, casting='equiv')) + assert_equal(a == b, [True, True]) + + # Check that 'equiv' casting can change byte order + assert_(np.can_cast(a.dtype, b.dtype, casting='equiv')) + c = a.astype(b.dtype, casting='equiv') + assert_equal(a == c, [True, True]) + + # Check that 'safe' casting can change byte order and up-cast + # fields + t = [('a', 'f8')] + assert_(np.can_cast(a.dtype, t, casting='safe')) + c = a.astype(t, casting='safe') + assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)), + [True, True]) + + # Check that 'same_kind' casting can change byte order and + # change field widths within a "kind" + t = [('a', 'f4')] + assert_(np.can_cast(a.dtype, t, casting='same_kind')) + c = a.astype(t, casting='same_kind') + assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)), + [True, True]) + + # Check that casting fails if the casting rule should fail on + # any of the fields + t = [('a', '>i8'), ('b', 'i2'), ('b', 'i8'), ('b', 'i4')] + assert_(not np.can_cast(a.dtype, t, casting=casting)) + t = [('a', '>i4'), ('b', ' false + for n in range(3): + v = np.array(b'', (dtype, n)) + assert_equal(bool(v), False) + assert_equal(bool(v[()]), False) + assert_equal(v.astype(bool), False) + assert_(isinstance(v.astype(bool), np.ndarray)) + assert_(v[()].astype(bool) is np.False_) + + # anything else -> true + for n in range(1, 4): + for val in [b'a', b'0', b' ']: + v = np.array(val, (dtype, n)) + assert_equal(bool(v), True) + assert_equal(bool(v[()]), True) + assert_equal(v.astype(bool), True) + assert_(isinstance(v.astype(bool), np.ndarray)) + assert_(v[()].astype(bool) is np.True_) + + def test_cast_from_void(self): + self._test_cast_from_flexible(np.void) + + @pytest.mark.xfail(reason="See gh-9847") + def test_cast_from_unicode(self): + self._test_cast_from_flexible(np.unicode_) + + @pytest.mark.xfail(reason="See gh-9847") + def test_cast_from_bytes(self): + self._test_cast_from_flexible(np.bytes_) + + +class TestZeroSizeFlexible(object): + @staticmethod + def _zeros(shape, dtype=str): + dtype = np.dtype(dtype) + if dtype == np.void: + return np.zeros(shape, dtype=(dtype, 0)) + + # not constructable directly + dtype = np.dtype([('x', dtype, 0)]) + return np.zeros(shape, dtype=dtype)['x'] + + def test_create(self): + zs = self._zeros(10, bytes) + assert_equal(zs.itemsize, 0) + zs = self._zeros(10, np.void) + assert_equal(zs.itemsize, 0) + zs = self._zeros(10, unicode) + assert_equal(zs.itemsize, 0) + + def _test_sort_partition(self, name, kinds, **kwargs): + # Previously, these would all hang + for dt in [bytes, np.void, unicode]: + zs = self._zeros(10, dt) + sort_method = getattr(zs, name) + sort_func = getattr(np, name) + for kind in kinds: + sort_method(kind=kind, **kwargs) + sort_func(zs, kind=kind, **kwargs) + + def test_sort(self): + self._test_sort_partition('sort', kinds='qhm') + + def test_argsort(self): + self._test_sort_partition('argsort', kinds='qhm') + + def test_partition(self): + self._test_sort_partition('partition', kinds=['introselect'], kth=2) + + def test_argpartition(self): + self._test_sort_partition('argpartition', kinds=['introselect'], kth=2) + + def test_resize(self): + # previously an error + for dt in [bytes, np.void, unicode]: + zs = self._zeros(10, dt) + zs.resize(25) + zs.resize((10, 10)) + + def test_view(self): + for dt in [bytes, np.void, unicode]: + zs = self._zeros(10, dt) + + # viewing as itself should be allowed + assert_equal(zs.view(dt).dtype, np.dtype(dt)) + + # viewing as any non-empty type gives an empty result + assert_equal(zs.view((dt, 1)).shape, (0,)) + + def test_pickle(self): + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + for dt in [bytes, np.void, unicode]: + zs = self._zeros(10, dt) + p = pickle.dumps(zs, protocol=proto) + zs2 = pickle.loads(p) + + assert_equal(zs.dtype, zs2.dtype) + + @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5, + reason="requires pickle protocol 5") + def test_pickle_with_buffercallback(self): + array = np.arange(10) + buffers = [] + bytes_string = pickle.dumps(array, buffer_callback=buffers.append, + protocol=5) + array_from_buffer = pickle.loads(bytes_string, buffers=buffers) + # when using pickle protocol 5 with buffer callbacks, + # array_from_buffer is reconstructed from a buffer holding a view + # to the initial array's data, so modifying an element in array + # should modify it in array_from_buffer too. + array[0] = -1 + assert array_from_buffer[0] == -1, array_from_buffer[0] + + +class TestMethods(object): + def test_compress(self): + tgt = [[5, 6, 7, 8, 9]] + arr = np.arange(10).reshape(2, 5) + out = arr.compress([0, 1], axis=0) + assert_equal(out, tgt) + + tgt = [[1, 3], [6, 8]] + out = arr.compress([0, 1, 0, 1, 0], axis=1) + assert_equal(out, tgt) + + tgt = [[1], [6]] + arr = np.arange(10).reshape(2, 5) + out = arr.compress([0, 1], axis=1) + assert_equal(out, tgt) + + arr = np.arange(10).reshape(2, 5) + out = arr.compress([0, 1]) + assert_equal(out, 1) + + def test_choose(self): + x = 2*np.ones((3,), dtype=int) + y = 3*np.ones((3,), dtype=int) + x2 = 2*np.ones((2, 3), dtype=int) + y2 = 3*np.ones((2, 3), dtype=int) + ind = np.array([0, 0, 1]) + + A = ind.choose((x, y)) + assert_equal(A, [2, 2, 3]) + + A = ind.choose((x2, y2)) + assert_equal(A, [[2, 2, 3], [2, 2, 3]]) + + A = ind.choose((x, y2)) + assert_equal(A, [[2, 2, 3], [2, 2, 3]]) + + oned = np.ones(1) + # gh-12031, caused SEGFAULT + assert_raises(TypeError, oned.choose,np.void(0), [oned]) + + def test_prod(self): + ba = [1, 2, 10, 11, 6, 5, 4] + ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]] + + for ctype in [np.int16, np.uint16, np.int32, np.uint32, + np.float32, np.float64, np.complex64, np.complex128]: + a = np.array(ba, ctype) + a2 = np.array(ba2, ctype) + if ctype in ['1', 'b']: + assert_raises(ArithmeticError, a.prod) + assert_raises(ArithmeticError, a2.prod, axis=1) + else: + assert_equal(a.prod(axis=0), 26400) + assert_array_equal(a2.prod(axis=0), + np.array([50, 36, 84, 180], ctype)) + assert_array_equal(a2.prod(axis=-1), + np.array([24, 1890, 600], ctype)) + + def test_repeat(self): + m = np.array([1, 2, 3, 4, 5, 6]) + m_rect = m.reshape((2, 3)) + + A = m.repeat([1, 3, 2, 1, 1, 2]) + assert_equal(A, [1, 2, 2, 2, 3, + 3, 4, 5, 6, 6]) + + A = m.repeat(2) + assert_equal(A, [1, 1, 2, 2, 3, 3, + 4, 4, 5, 5, 6, 6]) + + A = m_rect.repeat([2, 1], axis=0) + assert_equal(A, [[1, 2, 3], + [1, 2, 3], + [4, 5, 6]]) + + A = m_rect.repeat([1, 3, 2], axis=1) + assert_equal(A, [[1, 2, 2, 2, 3, 3], + [4, 5, 5, 5, 6, 6]]) + + A = m_rect.repeat(2, axis=0) + assert_equal(A, [[1, 2, 3], + [1, 2, 3], + [4, 5, 6], + [4, 5, 6]]) + + A = m_rect.repeat(2, axis=1) + assert_equal(A, [[1, 1, 2, 2, 3, 3], + [4, 4, 5, 5, 6, 6]]) + + def test_reshape(self): + arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]) + + tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]] + assert_equal(arr.reshape(2, 6), tgt) + + tgt = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]] + assert_equal(arr.reshape(3, 4), tgt) + + tgt = [[1, 10, 8, 6], [4, 2, 11, 9], [7, 5, 3, 12]] + assert_equal(arr.reshape((3, 4), order='F'), tgt) + + tgt = [[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]] + assert_equal(arr.T.reshape((3, 4), order='C'), tgt) + + def test_round(self): + def check_round(arr, expected, *round_args): + assert_equal(arr.round(*round_args), expected) + # With output array + out = np.zeros_like(arr) + res = arr.round(*round_args, out=out) + assert_equal(out, expected) + assert_equal(out, res) + + check_round(np.array([1.2, 1.5]), [1, 2]) + check_round(np.array(1.5), 2) + check_round(np.array([12.2, 15.5]), [10, 20], -1) + check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1) + # Complex rounding + check_round(np.array([4.5 + 1.5j]), [4 + 2j]) + check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1) + + def test_squeeze(self): + a = np.array([[[1], [2], [3]]]) + assert_equal(a.squeeze(), [1, 2, 3]) + assert_equal(a.squeeze(axis=(0,)), [[1], [2], [3]]) + assert_raises(ValueError, a.squeeze, axis=(1,)) + assert_equal(a.squeeze(axis=(2,)), [[1, 2, 3]]) + + def test_transpose(self): + a = np.array([[1, 2], [3, 4]]) + assert_equal(a.transpose(), [[1, 3], [2, 4]]) + assert_raises(ValueError, lambda: a.transpose(0)) + assert_raises(ValueError, lambda: a.transpose(0, 0)) + assert_raises(ValueError, lambda: a.transpose(0, 1, 2)) + + def test_sort(self): + # test ordering for floats and complex containing nans. It is only + # necessary to check the less-than comparison, so sorts that + # only follow the insertion sort path are sufficient. We only + # test doubles and complex doubles as the logic is the same. + + # check doubles + msg = "Test real sort order with nans" + a = np.array([np.nan, 1, 0]) + b = np.sort(a) + assert_equal(b, a[::-1], msg) + # check complex + msg = "Test complex sort order with nans" + a = np.zeros(9, dtype=np.complex128) + a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0] + a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0] + b = np.sort(a) + assert_equal(b, a[::-1], msg) + + # all c scalar sorts use the same code with different types + # so it suffices to run a quick check with one type. The number + # of sorted items must be greater than ~50 to check the actual + # algorithm because quick and merge sort fall over to insertion + # sort for small arrays. + a = np.arange(101) + b = a[::-1].copy() + for kind in ['q', 'm', 'h']: + msg = "scalar sort, kind=%s" % kind + c = a.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + c = b.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + + # test complex sorts. These use the same code as the scalars + # but the compare function differs. + ai = a*1j + 1 + bi = b*1j + 1 + for kind in ['q', 'm', 'h']: + msg = "complex sort, real part == 1, kind=%s" % kind + c = ai.copy() + c.sort(kind=kind) + assert_equal(c, ai, msg) + c = bi.copy() + c.sort(kind=kind) + assert_equal(c, ai, msg) + ai = a + 1j + bi = b + 1j + for kind in ['q', 'm', 'h']: + msg = "complex sort, imag part == 1, kind=%s" % kind + c = ai.copy() + c.sort(kind=kind) + assert_equal(c, ai, msg) + c = bi.copy() + c.sort(kind=kind) + assert_equal(c, ai, msg) + + # test sorting of complex arrays requiring byte-swapping, gh-5441 + for endianness in '<>': + for dt in np.typecodes['Complex']: + arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt) + c = arr.copy() + c.sort() + msg = 'byte-swapped complex sort, dtype={0}'.format(dt) + assert_equal(c, arr, msg) + + # test string sorts. + s = 'aaaaaaaa' + a = np.array([s + chr(i) for i in range(101)]) + b = a[::-1].copy() + for kind in ['q', 'm', 'h']: + msg = "string sort, kind=%s" % kind + c = a.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + c = b.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + + # test unicode sorts. + s = 'aaaaaaaa' + a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode) + b = a[::-1].copy() + for kind in ['q', 'm', 'h']: + msg = "unicode sort, kind=%s" % kind + c = a.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + c = b.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + + # test object array sorts. + a = np.empty((101,), dtype=object) + a[:] = list(range(101)) + b = a[::-1] + for kind in ['q', 'h', 'm']: + msg = "object sort, kind=%s" % kind + c = a.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + c = b.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + + # test record array sorts. + dt = np.dtype([('f', float), ('i', int)]) + a = np.array([(i, i) for i in range(101)], dtype=dt) + b = a[::-1] + for kind in ['q', 'h', 'm']: + msg = "object sort, kind=%s" % kind + c = a.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + c = b.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + + # test datetime64 sorts. + a = np.arange(0, 101, dtype='datetime64[D]') + b = a[::-1] + for kind in ['q', 'h', 'm']: + msg = "datetime64 sort, kind=%s" % kind + c = a.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + c = b.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + + # test timedelta64 sorts. + a = np.arange(0, 101, dtype='timedelta64[D]') + b = a[::-1] + for kind in ['q', 'h', 'm']: + msg = "timedelta64 sort, kind=%s" % kind + c = a.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + c = b.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + + # check axis handling. This should be the same for all type + # specific sorts, so we only check it for one type and one kind + a = np.array([[3, 2], [1, 0]]) + b = np.array([[1, 0], [3, 2]]) + c = np.array([[2, 3], [0, 1]]) + d = a.copy() + d.sort(axis=0) + assert_equal(d, b, "test sort with axis=0") + d = a.copy() + d.sort(axis=1) + assert_equal(d, c, "test sort with axis=1") + d = a.copy() + d.sort() + assert_equal(d, c, "test sort with default axis") + + # check axis handling for multidimensional empty arrays + a = np.array([]) + a.shape = (3, 2, 1, 0) + for axis in range(-a.ndim, a.ndim): + msg = 'test empty array sort with axis={0}'.format(axis) + assert_equal(np.sort(a, axis=axis), a, msg) + msg = 'test empty array sort with axis=None' + assert_equal(np.sort(a, axis=None), a.ravel(), msg) + + # test generic class with bogus ordering, + # should not segfault. + class Boom(object): + def __lt__(self, other): + return True + + a = np.array([Boom()]*100, dtype=object) + for kind in ['q', 'm', 'h']: + msg = "bogus comparison object sort, kind=%s" % kind + c.sort(kind=kind) + + def test_void_sort(self): + # gh-8210 - previously segfaulted + for i in range(4): + rand = np.random.randint(256, size=4000, dtype=np.uint8) + arr = rand.view('V4') + arr[::-1].sort() + + dt = np.dtype([('val', 'i4', (1,))]) + for i in range(4): + rand = np.random.randint(256, size=4000, dtype=np.uint8) + arr = rand.view(dt) + arr[::-1].sort() + + def test_sort_raises(self): + #gh-9404 + arr = np.array([0, datetime.now(), 1], dtype=object) + for kind in ['q', 'm', 'h']: + assert_raises(TypeError, arr.sort, kind=kind) + #gh-3879 + class Raiser(object): + def raises_anything(*args, **kwargs): + raise TypeError("SOMETHING ERRORED") + __eq__ = __ne__ = __lt__ = __gt__ = __ge__ = __le__ = raises_anything + arr = np.array([[Raiser(), n] for n in range(10)]).reshape(-1) + np.random.shuffle(arr) + for kind in ['q', 'm', 'h']: + assert_raises(TypeError, arr.sort, kind=kind) + + def test_sort_degraded(self): + # test degraded dataset would take minutes to run with normal qsort + d = np.arange(1000000) + do = d.copy() + x = d + # create a median of 3 killer where each median is the sorted second + # last element of the quicksort partition + while x.size > 3: + mid = x.size // 2 + x[mid], x[-2] = x[-2], x[mid] + x = x[:-2] + + assert_equal(np.sort(d), do) + assert_equal(d[np.argsort(d)], do) + + def test_copy(self): + def assert_fortran(arr): + assert_(arr.flags.fortran) + assert_(arr.flags.f_contiguous) + assert_(not arr.flags.c_contiguous) + + def assert_c(arr): + assert_(not arr.flags.fortran) + assert_(not arr.flags.f_contiguous) + assert_(arr.flags.c_contiguous) + + a = np.empty((2, 2), order='F') + # Test copying a Fortran array + assert_c(a.copy()) + assert_c(a.copy('C')) + assert_fortran(a.copy('F')) + assert_fortran(a.copy('A')) + + # Now test starting with a C array. + a = np.empty((2, 2), order='C') + assert_c(a.copy()) + assert_c(a.copy('C')) + assert_fortran(a.copy('F')) + assert_c(a.copy('A')) + + def test_sort_order(self): + # Test sorting an array with fields + x1 = np.array([21, 32, 14]) + x2 = np.array(['my', 'first', 'name']) + x3 = np.array([3.1, 4.5, 6.2]) + r = np.rec.fromarrays([x1, x2, x3], names='id,word,number') + + r.sort(order=['id']) + assert_equal(r.id, np.array([14, 21, 32])) + assert_equal(r.word, np.array(['name', 'my', 'first'])) + assert_equal(r.number, np.array([6.2, 3.1, 4.5])) + + r.sort(order=['word']) + assert_equal(r.id, np.array([32, 21, 14])) + assert_equal(r.word, np.array(['first', 'my', 'name'])) + assert_equal(r.number, np.array([4.5, 3.1, 6.2])) + + r.sort(order=['number']) + assert_equal(r.id, np.array([21, 32, 14])) + assert_equal(r.word, np.array(['my', 'first', 'name'])) + assert_equal(r.number, np.array([3.1, 4.5, 6.2])) + + assert_raises_regex(ValueError, 'duplicate', + lambda: r.sort(order=['id', 'id'])) + + if sys.byteorder == 'little': + strtype = '>i2' + else: + strtype = '': + for dt in np.typecodes['Complex']: + arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt) + msg = 'byte-swapped complex argsort, dtype={0}'.format(dt) + assert_equal(arr.argsort(), + np.arange(len(arr), dtype=np.intp), msg) + + # test string argsorts. + s = 'aaaaaaaa' + a = np.array([s + chr(i) for i in range(101)]) + b = a[::-1].copy() + r = np.arange(101) + rr = r[::-1] + for kind in ['q', 'm', 'h']: + msg = "string argsort, kind=%s" % kind + assert_equal(a.copy().argsort(kind=kind), r, msg) + assert_equal(b.copy().argsort(kind=kind), rr, msg) + + # test unicode argsorts. + s = 'aaaaaaaa' + a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode) + b = a[::-1] + r = np.arange(101) + rr = r[::-1] + for kind in ['q', 'm', 'h']: + msg = "unicode argsort, kind=%s" % kind + assert_equal(a.copy().argsort(kind=kind), r, msg) + assert_equal(b.copy().argsort(kind=kind), rr, msg) + + # test object array argsorts. + a = np.empty((101,), dtype=object) + a[:] = list(range(101)) + b = a[::-1] + r = np.arange(101) + rr = r[::-1] + for kind in ['q', 'm', 'h']: + msg = "object argsort, kind=%s" % kind + assert_equal(a.copy().argsort(kind=kind), r, msg) + assert_equal(b.copy().argsort(kind=kind), rr, msg) + + # test structured array argsorts. + dt = np.dtype([('f', float), ('i', int)]) + a = np.array([(i, i) for i in range(101)], dtype=dt) + b = a[::-1] + r = np.arange(101) + rr = r[::-1] + for kind in ['q', 'm', 'h']: + msg = "structured array argsort, kind=%s" % kind + assert_equal(a.copy().argsort(kind=kind), r, msg) + assert_equal(b.copy().argsort(kind=kind), rr, msg) + + # test datetime64 argsorts. + a = np.arange(0, 101, dtype='datetime64[D]') + b = a[::-1] + r = np.arange(101) + rr = r[::-1] + for kind in ['q', 'h', 'm']: + msg = "datetime64 argsort, kind=%s" % kind + assert_equal(a.copy().argsort(kind=kind), r, msg) + assert_equal(b.copy().argsort(kind=kind), rr, msg) + + # test timedelta64 argsorts. + a = np.arange(0, 101, dtype='timedelta64[D]') + b = a[::-1] + r = np.arange(101) + rr = r[::-1] + for kind in ['q', 'h', 'm']: + msg = "timedelta64 argsort, kind=%s" % kind + assert_equal(a.copy().argsort(kind=kind), r, msg) + assert_equal(b.copy().argsort(kind=kind), rr, msg) + + # check axis handling. This should be the same for all type + # specific argsorts, so we only check it for one type and one kind + a = np.array([[3, 2], [1, 0]]) + b = np.array([[1, 1], [0, 0]]) + c = np.array([[1, 0], [1, 0]]) + assert_equal(a.copy().argsort(axis=0), b) + assert_equal(a.copy().argsort(axis=1), c) + assert_equal(a.copy().argsort(), c) + + # check axis handling for multidimensional empty arrays + a = np.array([]) + a.shape = (3, 2, 1, 0) + for axis in range(-a.ndim, a.ndim): + msg = 'test empty array argsort with axis={0}'.format(axis) + assert_equal(np.argsort(a, axis=axis), + np.zeros_like(a, dtype=np.intp), msg) + msg = 'test empty array argsort with axis=None' + assert_equal(np.argsort(a, axis=None), + np.zeros_like(a.ravel(), dtype=np.intp), msg) + + # check that stable argsorts are stable + r = np.arange(100) + # scalars + a = np.zeros(100) + assert_equal(a.argsort(kind='m'), r) + # complex + a = np.zeros(100, dtype=complex) + assert_equal(a.argsort(kind='m'), r) + # string + a = np.array(['aaaaaaaaa' for i in range(100)]) + assert_equal(a.argsort(kind='m'), r) + # unicode + a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode) + assert_equal(a.argsort(kind='m'), r) + + def test_sort_unicode_kind(self): + d = np.arange(10) + k = b'\xc3\xa4'.decode("UTF8") + assert_raises(ValueError, d.sort, kind=k) + assert_raises(ValueError, d.argsort, kind=k) + + def test_searchsorted(self): + # test for floats and complex containing nans. The logic is the + # same for all float types so only test double types for now. + # The search sorted routines use the compare functions for the + # array type, so this checks if that is consistent with the sort + # order. + + # check double + a = np.array([0, 1, np.nan]) + msg = "Test real searchsorted with nans, side='l'" + b = a.searchsorted(a, side='l') + assert_equal(b, np.arange(3), msg) + msg = "Test real searchsorted with nans, side='r'" + b = a.searchsorted(a, side='r') + assert_equal(b, np.arange(1, 4), msg) + # check double complex + a = np.zeros(9, dtype=np.complex128) + a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan] + a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan] + msg = "Test complex searchsorted with nans, side='l'" + b = a.searchsorted(a, side='l') + assert_equal(b, np.arange(9), msg) + msg = "Test complex searchsorted with nans, side='r'" + b = a.searchsorted(a, side='r') + assert_equal(b, np.arange(1, 10), msg) + msg = "Test searchsorted with little endian, side='l'" + a = np.array([0, 128], dtype=' p[:, i]).all(), + msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T)) + aae(p, d1[np.arange(d1.shape[0])[:, None], + np.argpartition(d1, i, axis=1, kind=k)]) + + p = np.partition(d0, i, axis=0, kind=k) + aae(p[i, :], np.array([i] * d1.shape[0], dtype=dt)) + # array_less does not seem to work right + at((p[:i, :] <= p[i, :]).all(), + msg="%d: %r <= %r" % (i, p[i, :], p[:i, :])) + at((p[i + 1:, :] > p[i, :]).all(), + msg="%d: %r < %r" % (i, p[i, :], p[:, i + 1:])) + aae(p, d0[np.argpartition(d0, i, axis=0, kind=k), + np.arange(d0.shape[1])[None, :]]) + + # check inplace + dc = d.copy() + dc.partition(i, kind=k) + assert_equal(dc, np.partition(d, i, kind=k)) + dc = d0.copy() + dc.partition(i, axis=0, kind=k) + assert_equal(dc, np.partition(d0, i, axis=0, kind=k)) + dc = d1.copy() + dc.partition(i, axis=1, kind=k) + assert_equal(dc, np.partition(d1, i, axis=1, kind=k)) + + def assert_partitioned(self, d, kth): + prev = 0 + for k in np.sort(kth): + assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k) + assert_((d[k:] >= d[k]).all(), + msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k])) + prev = k + 1 + + def test_partition_iterative(self): + d = np.arange(17) + kth = (0, 1, 2, 429, 231) + assert_raises(ValueError, d.partition, kth) + assert_raises(ValueError, d.argpartition, kth) + d = np.arange(10).reshape((2, 5)) + assert_raises(ValueError, d.partition, kth, axis=0) + assert_raises(ValueError, d.partition, kth, axis=1) + assert_raises(ValueError, np.partition, d, kth, axis=1) + assert_raises(ValueError, np.partition, d, kth, axis=None) + + d = np.array([3, 4, 2, 1]) + p = np.partition(d, (0, 3)) + self.assert_partitioned(p, (0, 3)) + self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3)) + + assert_array_equal(p, np.partition(d, (-3, -1))) + assert_array_equal(p, d[np.argpartition(d, (-3, -1))]) + + d = np.arange(17) + np.random.shuffle(d) + d.partition(range(d.size)) + assert_array_equal(np.arange(17), d) + np.random.shuffle(d) + assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))]) + + # test unsorted kth + d = np.arange(17) + np.random.shuffle(d) + keys = np.array([1, 3, 8, -2]) + np.random.shuffle(d) + p = np.partition(d, keys) + self.assert_partitioned(p, keys) + p = d[np.argpartition(d, keys)] + self.assert_partitioned(p, keys) + np.random.shuffle(keys) + assert_array_equal(np.partition(d, keys), p) + assert_array_equal(d[np.argpartition(d, keys)], p) + + # equal kth + d = np.arange(20)[::-1] + self.assert_partitioned(np.partition(d, [5]*4), [5]) + self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]), + [5]*4 + [6, 13]) + self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5]) + self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])], + [5]*4 + [6, 13]) + + d = np.arange(12) + np.random.shuffle(d) + d1 = np.tile(np.arange(12), (4, 1)) + map(np.random.shuffle, d1) + d0 = np.transpose(d1) + + kth = (1, 6, 7, -1) + p = np.partition(d1, kth, axis=1) + pa = d1[np.arange(d1.shape[0])[:, None], + d1.argpartition(kth, axis=1)] + assert_array_equal(p, pa) + for i in range(d1.shape[0]): + self.assert_partitioned(p[i,:], kth) + p = np.partition(d0, kth, axis=0) + pa = d0[np.argpartition(d0, kth, axis=0), + np.arange(d0.shape[1])[None,:]] + assert_array_equal(p, pa) + for i in range(d0.shape[1]): + self.assert_partitioned(p[:, i], kth) + + def test_partition_cdtype(self): + d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41), + ('Lancelot', 1.9, 38)], + dtype=[('name', '|S10'), ('height', ' (numpy ufunc, has_in_place_version, preferred_dtype) + ops = { + 'add': (np.add, True, float), + 'sub': (np.subtract, True, float), + 'mul': (np.multiply, True, float), + 'truediv': (np.true_divide, True, float), + 'floordiv': (np.floor_divide, True, float), + 'mod': (np.remainder, True, float), + 'divmod': (np.divmod, False, float), + 'pow': (np.power, True, int), + 'lshift': (np.left_shift, True, int), + 'rshift': (np.right_shift, True, int), + 'and': (np.bitwise_and, True, int), + 'xor': (np.bitwise_xor, True, int), + 'or': (np.bitwise_or, True, int), + # 'ge': (np.less_equal, False), + # 'gt': (np.less, False), + # 'le': (np.greater_equal, False), + # 'lt': (np.greater, False), + # 'eq': (np.equal, False), + # 'ne': (np.not_equal, False), + } + if sys.version_info >= (3, 5): + ops['matmul'] = (np.matmul, False, float) + + class Coerced(Exception): + pass + + def array_impl(self): + raise Coerced + + def op_impl(self, other): + return "forward" + + def rop_impl(self, other): + return "reverse" + + def iop_impl(self, other): + return "in-place" + + def array_ufunc_impl(self, ufunc, method, *args, **kwargs): + return ("__array_ufunc__", ufunc, method, args, kwargs) + + # Create an object with the given base, in the given module, with a + # bunch of placeholder __op__ methods, and optionally a + # __array_ufunc__ and __array_priority__. + def make_obj(base, array_priority=False, array_ufunc=False, + alleged_module="__main__"): + class_namespace = {"__array__": array_impl} + if array_priority is not False: + class_namespace["__array_priority__"] = array_priority + for op in ops: + class_namespace["__{0}__".format(op)] = op_impl + class_namespace["__r{0}__".format(op)] = rop_impl + class_namespace["__i{0}__".format(op)] = iop_impl + if array_ufunc is not False: + class_namespace["__array_ufunc__"] = array_ufunc + eval_namespace = {"base": base, + "class_namespace": class_namespace, + "__name__": alleged_module, + } + MyType = eval("type('MyType', (base,), class_namespace)", + eval_namespace) + if issubclass(MyType, np.ndarray): + # Use this range to avoid special case weirdnesses around + # divide-by-0, pow(x, 2), overflow due to pow(big, big), etc. + return np.arange(3, 7).reshape(2, 2).view(MyType) + else: + return MyType() + + def check(obj, binop_override_expected, ufunc_override_expected, + inplace_override_expected, check_scalar=True): + for op, (ufunc, has_inplace, dtype) in ops.items(): + err_msg = ('op: %s, ufunc: %s, has_inplace: %s, dtype: %s' + % (op, ufunc, has_inplace, dtype)) + check_objs = [np.arange(3, 7, dtype=dtype).reshape(2, 2)] + if check_scalar: + check_objs.append(check_objs[0][0]) + for arr in check_objs: + arr_method = getattr(arr, "__{0}__".format(op)) + + def first_out_arg(result): + if op == "divmod": + assert_(isinstance(result, tuple)) + return result[0] + else: + return result + + # arr __op__ obj + if binop_override_expected: + assert_equal(arr_method(obj), NotImplemented, err_msg) + elif ufunc_override_expected: + assert_equal(arr_method(obj)[0], "__array_ufunc__", + err_msg) + else: + if (isinstance(obj, np.ndarray) and + (type(obj).__array_ufunc__ is + np.ndarray.__array_ufunc__)): + # __array__ gets ignored + res = first_out_arg(arr_method(obj)) + assert_(res.__class__ is obj.__class__, err_msg) + else: + assert_raises((TypeError, Coerced), + arr_method, obj, err_msg=err_msg) + # obj __op__ arr + arr_rmethod = getattr(arr, "__r{0}__".format(op)) + if ufunc_override_expected: + res = arr_rmethod(obj) + assert_equal(res[0], "__array_ufunc__", + err_msg=err_msg) + assert_equal(res[1], ufunc, err_msg=err_msg) + else: + if (isinstance(obj, np.ndarray) and + (type(obj).__array_ufunc__ is + np.ndarray.__array_ufunc__)): + # __array__ gets ignored + res = first_out_arg(arr_rmethod(obj)) + assert_(res.__class__ is obj.__class__, err_msg) + else: + # __array_ufunc__ = "asdf" creates a TypeError + assert_raises((TypeError, Coerced), + arr_rmethod, obj, err_msg=err_msg) + + # arr __iop__ obj + # array scalars don't have in-place operators + if has_inplace and isinstance(arr, np.ndarray): + arr_imethod = getattr(arr, "__i{0}__".format(op)) + if inplace_override_expected: + assert_equal(arr_method(obj), NotImplemented, + err_msg=err_msg) + elif ufunc_override_expected: + res = arr_imethod(obj) + assert_equal(res[0], "__array_ufunc__", err_msg) + assert_equal(res[1], ufunc, err_msg) + assert_(type(res[-1]["out"]) is tuple, err_msg) + assert_(res[-1]["out"][0] is arr, err_msg) + else: + if (isinstance(obj, np.ndarray) and + (type(obj).__array_ufunc__ is + np.ndarray.__array_ufunc__)): + # __array__ gets ignored + assert_(arr_imethod(obj) is arr, err_msg) + else: + assert_raises((TypeError, Coerced), + arr_imethod, obj, + err_msg=err_msg) + + op_fn = getattr(operator, op, None) + if op_fn is None: + op_fn = getattr(operator, op + "_", None) + if op_fn is None: + op_fn = getattr(builtins, op) + assert_equal(op_fn(obj, arr), "forward", err_msg) + if not isinstance(obj, np.ndarray): + if binop_override_expected: + assert_equal(op_fn(arr, obj), "reverse", err_msg) + elif ufunc_override_expected: + assert_equal(op_fn(arr, obj)[0], "__array_ufunc__", + err_msg) + if ufunc_override_expected: + assert_equal(ufunc(obj, arr)[0], "__array_ufunc__", + err_msg) + + # No array priority, no array_ufunc -> nothing called + check(make_obj(object), False, False, False) + # Negative array priority, no array_ufunc -> nothing called + # (has to be very negative, because scalar priority is -1000000.0) + check(make_obj(object, array_priority=-2**30), False, False, False) + # Positive array priority, no array_ufunc -> binops and iops only + check(make_obj(object, array_priority=1), True, False, True) + # ndarray ignores array_priority for ndarray subclasses + check(make_obj(np.ndarray, array_priority=1), False, False, False, + check_scalar=False) + # Positive array_priority and array_ufunc -> array_ufunc only + check(make_obj(object, array_priority=1, + array_ufunc=array_ufunc_impl), False, True, False) + check(make_obj(np.ndarray, array_priority=1, + array_ufunc=array_ufunc_impl), False, True, False) + # array_ufunc set to None -> defer binops only + check(make_obj(object, array_ufunc=None), True, False, False) + check(make_obj(np.ndarray, array_ufunc=None), True, False, False, + check_scalar=False) + + def test_ufunc_override_normalize_signature(self): + # gh-5674 + class SomeClass(object): + def __array_ufunc__(self, ufunc, method, *inputs, **kw): + return kw + + a = SomeClass() + kw = np.add(a, [1]) + assert_('sig' not in kw and 'signature' not in kw) + kw = np.add(a, [1], sig='ii->i') + assert_('sig' not in kw and 'signature' in kw) + assert_equal(kw['signature'], 'ii->i') + kw = np.add(a, [1], signature='ii->i') + assert_('sig' not in kw and 'signature' in kw) + assert_equal(kw['signature'], 'ii->i') + + def test_array_ufunc_index(self): + # Check that index is set appropriately, also if only an output + # is passed on (latter is another regression tests for github bug 4753) + # This also checks implicitly that 'out' is always a tuple. + class CheckIndex(object): + def __array_ufunc__(self, ufunc, method, *inputs, **kw): + for i, a in enumerate(inputs): + if a is self: + return i + # calls below mean we must be in an output. + for j, a in enumerate(kw['out']): + if a is self: + return (j,) + + a = CheckIndex() + dummy = np.arange(2.) + # 1 input, 1 output + assert_equal(np.sin(a), 0) + assert_equal(np.sin(dummy, a), (0,)) + assert_equal(np.sin(dummy, out=a), (0,)) + assert_equal(np.sin(dummy, out=(a,)), (0,)) + assert_equal(np.sin(a, a), 0) + assert_equal(np.sin(a, out=a), 0) + assert_equal(np.sin(a, out=(a,)), 0) + # 1 input, 2 outputs + assert_equal(np.modf(dummy, a), (0,)) + assert_equal(np.modf(dummy, None, a), (1,)) + assert_equal(np.modf(dummy, dummy, a), (1,)) + assert_equal(np.modf(dummy, out=(a, None)), (0,)) + assert_equal(np.modf(dummy, out=(a, dummy)), (0,)) + assert_equal(np.modf(dummy, out=(None, a)), (1,)) + assert_equal(np.modf(dummy, out=(dummy, a)), (1,)) + assert_equal(np.modf(a, out=(dummy, a)), 0) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', DeprecationWarning) + assert_equal(np.modf(dummy, out=a), (0,)) + assert_(w[0].category is DeprecationWarning) + assert_raises(ValueError, np.modf, dummy, out=(a,)) + + # 2 inputs, 1 output + assert_equal(np.add(a, dummy), 0) + assert_equal(np.add(dummy, a), 1) + assert_equal(np.add(dummy, dummy, a), (0,)) + assert_equal(np.add(dummy, a, a), 1) + assert_equal(np.add(dummy, dummy, out=a), (0,)) + assert_equal(np.add(dummy, dummy, out=(a,)), (0,)) + assert_equal(np.add(a, dummy, out=a), 0) + + def test_out_override(self): + # regression test for github bug 4753 + class OutClass(np.ndarray): + def __array_ufunc__(self, ufunc, method, *inputs, **kw): + if 'out' in kw: + tmp_kw = kw.copy() + tmp_kw.pop('out') + func = getattr(ufunc, method) + kw['out'][0][...] = func(*inputs, **tmp_kw) + + A = np.array([0]).view(OutClass) + B = np.array([5]) + C = np.array([6]) + np.multiply(C, B, A) + assert_equal(A[0], 30) + assert_(isinstance(A, OutClass)) + A[0] = 0 + np.multiply(C, B, out=A) + assert_equal(A[0], 30) + assert_(isinstance(A, OutClass)) + + def test_pow_override_with_errors(self): + # regression test for gh-9112 + class PowerOnly(np.ndarray): + def __array_ufunc__(self, ufunc, method, *inputs, **kw): + if ufunc is not np.power: + raise NotImplementedError + return "POWER!" + # explicit cast to float, to ensure the fast power path is taken. + a = np.array(5., dtype=np.float64).view(PowerOnly) + assert_equal(a ** 2.5, "POWER!") + with assert_raises(NotImplementedError): + a ** 0.5 + with assert_raises(NotImplementedError): + a ** 0 + with assert_raises(NotImplementedError): + a ** 1 + with assert_raises(NotImplementedError): + a ** -1 + with assert_raises(NotImplementedError): + a ** 2 + + def test_pow_array_object_dtype(self): + # test pow on arrays of object dtype + class SomeClass(object): + def __init__(self, num=None): + self.num = num + + # want to ensure a fast pow path is not taken + def __mul__(self, other): + raise AssertionError('__mul__ should not be called') + + def __div__(self, other): + raise AssertionError('__div__ should not be called') + + def __pow__(self, exp): + return SomeClass(num=self.num ** exp) + + def __eq__(self, other): + if isinstance(other, SomeClass): + return self.num == other.num + + __rpow__ = __pow__ + + def pow_for(exp, arr): + return np.array([x ** exp for x in arr]) + + obj_arr = np.array([SomeClass(1), SomeClass(2), SomeClass(3)]) + + assert_equal(obj_arr ** 0.5, pow_for(0.5, obj_arr)) + assert_equal(obj_arr ** 0, pow_for(0, obj_arr)) + assert_equal(obj_arr ** 1, pow_for(1, obj_arr)) + assert_equal(obj_arr ** -1, pow_for(-1, obj_arr)) + assert_equal(obj_arr ** 2, pow_for(2, obj_arr)) + + def test_pos_array_ufunc_override(self): + class A(np.ndarray): + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + return getattr(ufunc, method)(*[i.view(np.ndarray) for + i in inputs], **kwargs) + tst = np.array('foo').view(A) + with assert_raises(TypeError): + +tst + + +class TestTemporaryElide(object): + # elision is only triggered on relatively large arrays + + def test_extension_incref_elide(self): + # test extension (e.g. cython) calling PyNumber_* slots without + # increasing the reference counts + # + # def incref_elide(a): + # d = input.copy() # refcount 1 + # return d, d + d # PyNumber_Add without increasing refcount + from numpy.core._multiarray_tests import incref_elide + d = np.ones(100000) + orig, res = incref_elide(d) + d + d + # the return original should not be changed to an inplace operation + assert_array_equal(orig, d) + assert_array_equal(res, d + d) + + def test_extension_incref_elide_stack(self): + # scanning if the refcount == 1 object is on the python stack to check + # that we are called directly from python is flawed as object may still + # be above the stack pointer and we have no access to the top of it + # + # def incref_elide_l(d): + # return l[4] + l[4] # PyNumber_Add without increasing refcount + from numpy.core._multiarray_tests import incref_elide_l + # padding with 1 makes sure the object on the stack is not overwritten + l = [1, 1, 1, 1, np.ones(100000)] + res = incref_elide_l(l) + # the return original should not be changed to an inplace operation + assert_array_equal(l[4], np.ones(100000)) + assert_array_equal(res, l[4] + l[4]) + + def test_temporary_with_cast(self): + # check that we don't elide into a temporary which would need casting + d = np.ones(200000, dtype=np.int64) + assert_equal(((d + d) + 2**222).dtype, np.dtype('O')) + + r = ((d + d) / 2) + assert_equal(r.dtype, np.dtype('f8')) + + r = np.true_divide((d + d), 2) + assert_equal(r.dtype, np.dtype('f8')) + + r = ((d + d) / 2.) + assert_equal(r.dtype, np.dtype('f8')) + + r = ((d + d) // 2) + assert_equal(r.dtype, np.dtype(np.int64)) + + # commutative elision into the astype result + f = np.ones(100000, dtype=np.float32) + assert_equal(((f + f) + f.astype(np.float64)).dtype, np.dtype('f8')) + + # no elision into lower type + d = f.astype(np.float64) + assert_equal(((f + f) + d).dtype, d.dtype) + l = np.ones(100000, dtype=np.longdouble) + assert_equal(((d + d) + l).dtype, l.dtype) + + # test unary abs with different output dtype + for dt in (np.complex64, np.complex128, np.clongdouble): + c = np.ones(100000, dtype=dt) + r = abs(c * 2.0) + assert_equal(r.dtype, np.dtype('f%d' % (c.itemsize // 2))) + + def test_elide_broadcast(self): + # test no elision on broadcast to higher dimension + # only triggers elision code path in debug mode as triggering it in + # normal mode needs 256kb large matching dimension, so a lot of memory + d = np.ones((2000, 1), dtype=int) + b = np.ones((2000), dtype=bool) + r = (1 - d) + b + assert_equal(r, 1) + assert_equal(r.shape, (2000, 2000)) + + def test_elide_scalar(self): + # check inplace op does not create ndarray from scalars + a = np.bool_() + assert_(type(~(a & a)) is np.bool_) + + def test_elide_scalar_readonly(self): + # The imaginary part of a real array is readonly. This needs to go + # through fast_scalar_power which is only called for powers of + # +1, -1, 0, 0.5, and 2, so use 2. Also need valid refcount for + # elision which can be gotten for the imaginary part of a real + # array. Should not error. + a = np.empty(100000, dtype=np.float64) + a.imag ** 2 + + def test_elide_readonly(self): + # don't try to elide readonly temporaries + r = np.asarray(np.broadcast_to(np.zeros(1), 100000).flat) * 0.0 + assert_equal(r, 0) + + def test_elide_updateifcopy(self): + a = np.ones(2**20)[::2] + b = a.flat.__array__() + 1 + del b + assert_equal(a, 1) + + +class TestCAPI(object): + def test_IsPythonScalar(self): + from numpy.core._multiarray_tests import IsPythonScalar + assert_(IsPythonScalar(b'foobar')) + assert_(IsPythonScalar(1)) + assert_(IsPythonScalar(2**80)) + assert_(IsPythonScalar(2.)) + assert_(IsPythonScalar("a")) + + +class TestSubscripting(object): + def test_test_zero_rank(self): + x = np.array([1, 2, 3]) + assert_(isinstance(x[0], np.int_)) + if sys.version_info[0] < 3: + assert_(isinstance(x[0], int)) + assert_(type(x[0, ...]) is np.ndarray) + + +class TestPickling(object): + def test_highest_available_pickle_protocol(self): + try: + import pickle5 + except ImportError: + pickle5 = None + + if sys.version_info[:2] >= (3, 8) or pickle5 is not None: + assert pickle.HIGHEST_PROTOCOL >= 5 + else: + assert pickle.HIGHEST_PROTOCOL < 5 + + @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL >= 5, + reason=('this tests the error messages when trying to' + 'protocol 5 although it is not available')) + def test_correct_protocol5_error_message(self): + array = np.arange(10) + + if sys.version_info[:2] in ((3, 6), (3, 7)): + # For the specific case of python3.6 and 3.7, raise a clear import + # error about the pickle5 backport when trying to use protocol=5 + # without the pickle5 package + with pytest.raises(ImportError): + array.__reduce_ex__(5) + + elif sys.version_info[:2] < (3, 6): + # when calling __reduce_ex__ explicitly with protocol=5 on python + # raise a ValueError saying that protocol 5 is not available for + # this python version + with pytest.raises(ValueError): + array.__reduce_ex__(5) + + def test_record_array_with_object_dtype(self): + my_object = object() + + arr_with_object = np.array( + [(my_object, 1, 2.0)], + dtype=[('a', object), ('b', int), ('c', float)]) + arr_without_object = np.array( + [('xxx', 1, 2.0)], + dtype=[('a', str), ('b', int), ('c', float)]) + + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + depickled_arr_with_object = pickle.loads( + pickle.dumps(arr_with_object, protocol=proto)) + depickled_arr_without_object = pickle.loads( + pickle.dumps(arr_without_object, protocol=proto)) + + assert_equal(arr_with_object.dtype, + depickled_arr_with_object.dtype) + assert_equal(arr_without_object.dtype, + depickled_arr_without_object.dtype) + + @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5, + reason="requires pickle protocol 5") + def test_f_contiguous_array(self): + f_contiguous_array = np.array([[1, 2, 3], [4, 5, 6]], order='F') + buffers = [] + + # When using pickle protocol 5, Fortran-contiguous arrays can be + # serialized using out-of-band buffers + bytes_string = pickle.dumps(f_contiguous_array, protocol=5, + buffer_callback=buffers.append) + + assert len(buffers) > 0 + + depickled_f_contiguous_array = pickle.loads(bytes_string, + buffers=buffers) + + assert_equal(f_contiguous_array, depickled_f_contiguous_array) + + def test_non_contiguous_array(self): + non_contiguous_array = np.arange(12).reshape(3, 4)[:, :2] + assert not non_contiguous_array.flags.c_contiguous + assert not non_contiguous_array.flags.f_contiguous + + # make sure non-contiguous arrays can be pickled-depickled + # using any protocol + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + depickled_non_contiguous_array = pickle.loads( + pickle.dumps(non_contiguous_array, protocol=proto)) + + assert_equal(non_contiguous_array, depickled_non_contiguous_array) + + def test_roundtrip(self): + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + carray = np.array([[2, 9], [7, 0], [3, 8]]) + DATA = [ + carray, + np.transpose(carray), + np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int), + ('c', float)]) + ] + + refs = [weakref.ref(a) for a in DATA] + for a in DATA: + assert_equal( + a, pickle.loads(pickle.dumps(a, protocol=proto)), + err_msg="%r" % a) + del a, DATA, carray + gc.collect() + # check for reference leaks (gh-12793) + for ref in refs: + assert ref() is None + + def _loads(self, obj): + if sys.version_info[0] >= 3: + return pickle.loads(obj, encoding='latin1') + else: + return pickle.loads(obj) + + # version 0 pickles, using protocol=2 to pickle + # version 0 doesn't have a version field + def test_version0_int8(self): + s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.' + a = np.array([1, 2, 3, 4], dtype=np.int8) + p = self._loads(s) + assert_equal(a, p) + + def test_version0_float32(self): + s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01= g2, [g1[i] >= g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]]) + + def test_mixed(self): + g1 = np.array(["spam", "spa", "spammer", "and eggs"]) + g2 = "spam" + assert_array_equal(g1 == g2, [x == g2 for x in g1]) + assert_array_equal(g1 != g2, [x != g2 for x in g1]) + assert_array_equal(g1 < g2, [x < g2 for x in g1]) + assert_array_equal(g1 > g2, [x > g2 for x in g1]) + assert_array_equal(g1 <= g2, [x <= g2 for x in g1]) + assert_array_equal(g1 >= g2, [x >= g2 for x in g1]) + + def test_unicode(self): + g1 = np.array([u"This", u"is", u"example"]) + g2 = np.array([u"This", u"was", u"example"]) + assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]]) + + +class TestArgmax(object): + + nan_arr = [ + ([0, 1, 2, 3, np.nan], 4), + ([0, 1, 2, np.nan, 3], 3), + ([np.nan, 0, 1, 2, 3], 0), + ([np.nan, 0, np.nan, 2, 3], 0), + ([0, 1, 2, 3, complex(0, np.nan)], 4), + ([0, 1, 2, 3, complex(np.nan, 0)], 4), + ([0, 1, 2, complex(np.nan, 0), 3], 3), + ([0, 1, 2, complex(0, np.nan), 3], 3), + ([complex(0, np.nan), 0, 1, 2, 3], 0), + ([complex(np.nan, np.nan), 0, 1, 2, 3], 0), + ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0), + ([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0), + ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0), + + ([complex(0, 0), complex(0, 2), complex(0, 1)], 1), + ([complex(1, 0), complex(0, 2), complex(0, 1)], 0), + ([complex(1, 0), complex(0, 2), complex(1, 1)], 2), + + ([np.datetime64('1923-04-14T12:43:12'), + np.datetime64('1994-06-21T14:43:15'), + np.datetime64('2001-10-15T04:10:32'), + np.datetime64('1995-11-25T16:02:16'), + np.datetime64('2005-01-04T03:14:12'), + np.datetime64('2041-12-03T14:05:03')], 5), + ([np.datetime64('1935-09-14T04:40:11'), + np.datetime64('1949-10-12T12:32:11'), + np.datetime64('2010-01-03T05:14:12'), + np.datetime64('2015-11-20T12:20:59'), + np.datetime64('1932-09-23T10:10:13'), + np.datetime64('2014-10-10T03:50:30')], 3), + # Assorted tests with NaTs + ([np.datetime64('NaT'), + np.datetime64('NaT'), + np.datetime64('2010-01-03T05:14:12'), + np.datetime64('NaT'), + np.datetime64('2015-09-23T10:10:13'), + np.datetime64('1932-10-10T03:50:30')], 4), + ([np.datetime64('2059-03-14T12:43:12'), + np.datetime64('1996-09-21T14:43:15'), + np.datetime64('NaT'), + np.datetime64('2022-12-25T16:02:16'), + np.datetime64('1963-10-04T03:14:12'), + np.datetime64('2013-05-08T18:15:23')], 0), + ([np.timedelta64(2, 's'), + np.timedelta64(1, 's'), + np.timedelta64('NaT', 's'), + np.timedelta64(3, 's')], 3), + ([np.timedelta64('NaT', 's')] * 3, 0), + + ([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35), + timedelta(days=-1, seconds=23)], 0), + ([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5), + timedelta(days=5, seconds=14)], 1), + ([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5), + timedelta(days=10, seconds=43)], 2), + + ([False, False, False, False, True], 4), + ([False, False, False, True, False], 3), + ([True, False, False, False, False], 0), + ([True, False, True, False, False], 0), + ] + + def test_all(self): + a = np.random.normal(0, 1, (4, 5, 6, 7, 8)) + for i in range(a.ndim): + amax = a.max(i) + aargmax = a.argmax(i) + axes = list(range(a.ndim)) + axes.remove(i) + assert_(np.all(amax == aargmax.choose(*a.transpose(i,*axes)))) + + def test_combinations(self): + for arr, pos in self.nan_arr: + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, + "invalid value encountered in reduce") + max_val = np.max(arr) + + assert_equal(np.argmax(arr), pos, err_msg="%r" % arr) + assert_equal(arr[np.argmax(arr)], max_val, err_msg="%r" % arr) + + def test_output_shape(self): + # see also gh-616 + a = np.ones((10, 5)) + # Check some simple shape mismatches + out = np.ones(11, dtype=np.int_) + assert_raises(ValueError, a.argmax, -1, out) + + out = np.ones((2, 5), dtype=np.int_) + assert_raises(ValueError, a.argmax, -1, out) + + # these could be relaxed possibly (used to allow even the previous) + out = np.ones((1, 10), dtype=np.int_) + assert_raises(ValueError, a.argmax, -1, out) + + out = np.ones(10, dtype=np.int_) + a.argmax(-1, out=out) + assert_equal(out, a.argmax(-1)) + + def test_argmax_unicode(self): + d = np.zeros(6031, dtype='= cmin)) + assert_(np.all(x <= cmax)) + + def _clip_type(self, type_group, array_max, + clip_min, clip_max, inplace=False, + expected_min=None, expected_max=None): + if expected_min is None: + expected_min = clip_min + if expected_max is None: + expected_max = clip_max + + for T in np.sctypes[type_group]: + if sys.byteorder == 'little': + byte_orders = ['=', '>'] + else: + byte_orders = ['<', '='] + + for byteorder in byte_orders: + dtype = np.dtype(T).newbyteorder(byteorder) + + x = (np.random.random(1000) * array_max).astype(dtype) + if inplace: + x.clip(clip_min, clip_max, x) + else: + x = x.clip(clip_min, clip_max) + byteorder = '=' + + if x.dtype.byteorder == '|': + byteorder = '|' + assert_equal(x.dtype.byteorder, byteorder) + self._check_range(x, expected_min, expected_max) + return x + + def test_basic(self): + for inplace in [False, True]: + self._clip_type( + 'float', 1024, -12.8, 100.2, inplace=inplace) + self._clip_type( + 'float', 1024, 0, 0, inplace=inplace) + + self._clip_type( + 'int', 1024, -120, 100.5, inplace=inplace) + self._clip_type( + 'int', 1024, 0, 0, inplace=inplace) + + self._clip_type( + 'uint', 1024, 0, 0, inplace=inplace) + self._clip_type( + 'uint', 1024, -120, 100, inplace=inplace, expected_min=0) + + def test_record_array(self): + rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)], + dtype=[('x', '= 3)) + x = val.clip(min=3) + assert_(np.all(x >= 3)) + x = val.clip(max=4) + assert_(np.all(x <= 4)) + + def test_nan(self): + input_arr = np.array([-2., np.nan, 0.5, 3., 0.25, np.nan]) + result = input_arr.clip(-1, 1) + expected = np.array([-1., np.nan, 0.5, 1., 0.25, np.nan]) + assert_array_equal(result, expected) + + +class TestCompress(object): + def test_axis(self): + tgt = [[5, 6, 7, 8, 9]] + arr = np.arange(10).reshape(2, 5) + out = np.compress([0, 1], arr, axis=0) + assert_equal(out, tgt) + + tgt = [[1, 3], [6, 8]] + out = np.compress([0, 1, 0, 1, 0], arr, axis=1) + assert_equal(out, tgt) + + def test_truncate(self): + tgt = [[1], [6]] + arr = np.arange(10).reshape(2, 5) + out = np.compress([0, 1], arr, axis=1) + assert_equal(out, tgt) + + def test_flatten(self): + arr = np.arange(10).reshape(2, 5) + out = np.compress([0, 1], arr) + assert_equal(out, 1) + + +class TestPutmask(object): + def tst_basic(self, x, T, mask, val): + np.putmask(x, mask, val) + assert_equal(x[mask], T(val)) + assert_equal(x.dtype, T) + + def test_ip_types(self): + unchecked_types = [bytes, unicode, np.void, object] + + x = np.random.random(1000)*100 + mask = x < 40 + + for val in [-100, 0, 15]: + for types in np.sctypes.values(): + for T in types: + if T not in unchecked_types: + self.tst_basic(x.copy().astype(T), T, mask, val) + + def test_mask_size(self): + assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5) + + @pytest.mark.parametrize('dtype', ('>i4', 'f8'), ('z', 'i4', 'f8'), ('z', ' 16MB + d = np.zeros(4 * 1024 ** 2) + d.tofile(self.filename) + assert_equal(os.path.getsize(self.filename), d.nbytes) + assert_array_equal(d, np.fromfile(self.filename)) + # check offset + with open(self.filename, "r+b") as f: + f.seek(d.nbytes) + d.tofile(f) + assert_equal(os.path.getsize(self.filename), d.nbytes * 2) + # check append mode (gh-8329) + open(self.filename, "w").close() # delete file contents + with open(self.filename, "ab") as f: + d.tofile(f) + assert_array_equal(d, np.fromfile(self.filename)) + with open(self.filename, "ab") as f: + d.tofile(f) + assert_equal(os.path.getsize(self.filename), d.nbytes * 2) + + def test_io_open_buffered_fromfile(self): + # gh-6632 + self.x.tofile(self.filename) + with io.open(self.filename, 'rb', buffering=-1) as f: + y = np.fromfile(f, dtype=self.dtype) + assert_array_equal(y, self.x.flat) + + def test_file_position_after_fromfile(self): + # gh-4118 + sizes = [io.DEFAULT_BUFFER_SIZE//8, + io.DEFAULT_BUFFER_SIZE, + io.DEFAULT_BUFFER_SIZE*8] + + for size in sizes: + f = open(self.filename, 'wb') + f.seek(size-1) + f.write(b'\0') + f.close() + + for mode in ['rb', 'r+b']: + err_msg = "%d %s" % (size, mode) + + f = open(self.filename, mode) + f.read(2) + np.fromfile(f, dtype=np.float64, count=1) + pos = f.tell() + f.close() + assert_equal(pos, 10, err_msg=err_msg) + + def test_file_position_after_tofile(self): + # gh-4118 + sizes = [io.DEFAULT_BUFFER_SIZE//8, + io.DEFAULT_BUFFER_SIZE, + io.DEFAULT_BUFFER_SIZE*8] + + for size in sizes: + err_msg = "%d" % (size,) + + f = open(self.filename, 'wb') + f.seek(size-1) + f.write(b'\0') + f.seek(10) + f.write(b'12') + np.array([0], dtype=np.float64).tofile(f) + pos = f.tell() + f.close() + assert_equal(pos, 10 + 2 + 8, err_msg=err_msg) + + f = open(self.filename, 'r+b') + f.read(2) + f.seek(0, 1) # seek between read&write required by ANSI C + np.array([0], dtype=np.float64).tofile(f) + pos = f.tell() + f.close() + assert_equal(pos, 10, err_msg=err_msg) + + def test_load_object_array_fromfile(self): + # gh-12300 + with open(self.filename, 'w') as f: + # Ensure we have a file with consistent contents + pass + + with open(self.filename, 'rb') as f: + assert_raises_regex(ValueError, "Cannot read into object array", + np.fromfile, f, dtype=object) + + assert_raises_regex(ValueError, "Cannot read into object array", + np.fromfile, self.filename, dtype=object) + + def _check_from(self, s, value, **kw): + if 'sep' not in kw: + y = np.frombuffer(s, **kw) + else: + y = np.fromstring(s, **kw) + assert_array_equal(y, value) + + f = open(self.filename, 'wb') + f.write(s) + f.close() + y = np.fromfile(self.filename, **kw) + assert_array_equal(y, value) + + def test_nan(self): + self._check_from( + b"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)", + [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], + sep=' ') + + def test_inf(self): + self._check_from( + b"inf +inf -inf infinity -Infinity iNfInItY -inF", + [np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf], + sep=' ') + + def test_numbers(self): + self._check_from(b"1.234 -1.234 .3 .3e55 -123133.1231e+133", + [1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ') + + def test_binary(self): + self._check_from(b'\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@', + np.array([1, 2, 3, 4]), + dtype=' 1 minute on mechanical hard drive + def test_big_binary(self): + """Test workarounds for 32-bit limited fwrite, fseek, and ftell + calls in windows. These normally would hang doing something like this. + See http://projects.scipy.org/numpy/ticket/1660""" + if sys.platform != 'win32': + return + try: + # before workarounds, only up to 2**32-1 worked + fourgbplus = 2**32 + 2**16 + testbytes = np.arange(8, dtype=np.int8) + n = len(testbytes) + flike = tempfile.NamedTemporaryFile() + f = flike.file + np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f) + flike.seek(0) + a = np.fromfile(f, dtype=np.int8) + flike.close() + assert_(len(a) == fourgbplus) + # check only start and end for speed: + assert_((a[:n] == testbytes).all()) + assert_((a[-n:] == testbytes).all()) + except (MemoryError, ValueError): + pass + + def test_string(self): + self._check_from(b'1,2,3,4', [1., 2., 3., 4.], sep=',') + + def test_counted_string(self): + self._check_from(b'1,2,3,4', [1., 2., 3., 4.], count=4, sep=',') + self._check_from(b'1,2,3,4', [1., 2., 3.], count=3, sep=',') + self._check_from(b'1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',') + + def test_string_with_ws(self): + self._check_from(b'1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ') + + def test_counted_string_with_ws(self): + self._check_from(b'1 2 3 4 ', [1, 2, 3], count=3, dtype=int, + sep=' ') + + def test_ascii(self): + self._check_from(b'1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',') + self._check_from(b'1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',') + + def test_malformed(self): + self._check_from(b'1.234 1,234', [1.234, 1.], sep=' ') + + def test_long_sep(self): + self._check_from(b'1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_') + + def test_dtype(self): + v = np.array([1, 2, 3, 4], dtype=np.int_) + self._check_from(b'1,2,3,4', v, sep=',', dtype=np.int_) + + def test_dtype_bool(self): + # can't use _check_from because fromstring can't handle True/False + v = np.array([True, False, True, False], dtype=np.bool_) + s = b'1,0,-2.3,0' + f = open(self.filename, 'wb') + f.write(s) + f.close() + y = np.fromfile(self.filename, sep=',', dtype=np.bool_) + assert_(y.dtype == '?') + assert_array_equal(y, v) + + def test_tofile_sep(self): + x = np.array([1.51, 2, 3.51, 4], dtype=float) + f = open(self.filename, 'w') + x.tofile(f, sep=',') + f.close() + f = open(self.filename, 'r') + s = f.read() + f.close() + #assert_equal(s, '1.51,2.0,3.51,4.0') + y = np.array([float(p) for p in s.split(',')]) + assert_array_equal(x,y) + + def test_tofile_format(self): + x = np.array([1.51, 2, 3.51, 4], dtype=float) + f = open(self.filename, 'w') + x.tofile(f, sep=',', format='%.2f') + f.close() + f = open(self.filename, 'r') + s = f.read() + f.close() + assert_equal(s, '1.51,2.00,3.51,4.00') + + def test_locale(self): + with CommaDecimalPointLocale(): + self.test_numbers() + self.test_nan() + self.test_inf() + self.test_counted_string() + self.test_ascii() + self.test_malformed() + self.test_tofile_sep() + self.test_tofile_format() + + +class TestFromBuffer(object): + @pytest.mark.parametrize('byteorder', ['<', '>']) + @pytest.mark.parametrize('dtype', [float, int, complex]) + def test_basic(self, byteorder, dtype): + dt = np.dtype(dtype).newbyteorder(byteorder) + x = (np.random.random((4, 7)) * 5).astype(dt) + buf = x.tobytes() + assert_array_equal(np.frombuffer(buf, dtype=dt), x.flat) + + def test_empty(self): + assert_array_equal(np.frombuffer(b''), np.array([])) + + +class TestFlat(object): + def setup(self): + a0 = np.arange(20.0) + a = a0.reshape(4, 5) + a0.shape = (4, 5) + a.flags.writeable = False + self.a = a + self.b = a[::2, ::2] + self.a0 = a0 + self.b0 = a0[::2, ::2] + + def test_contiguous(self): + testpassed = False + try: + self.a.flat[12] = 100.0 + except ValueError: + testpassed = True + assert_(testpassed) + assert_(self.a.flat[12] == 12.0) + + def test_discontiguous(self): + testpassed = False + try: + self.b.flat[4] = 100.0 + except ValueError: + testpassed = True + assert_(testpassed) + assert_(self.b.flat[4] == 12.0) + + def test___array__(self): + c = self.a.flat.__array__() + d = self.b.flat.__array__() + e = self.a0.flat.__array__() + f = self.b0.flat.__array__() + + assert_(c.flags.writeable is False) + assert_(d.flags.writeable is False) + # for 1.14 all are set to non-writeable on the way to replacing the + # UPDATEIFCOPY array returned for non-contiguous arrays. + assert_(e.flags.writeable is True) + assert_(f.flags.writeable is False) + with assert_warns(DeprecationWarning): + assert_(c.flags.updateifcopy is False) + with assert_warns(DeprecationWarning): + assert_(d.flags.updateifcopy is False) + with assert_warns(DeprecationWarning): + assert_(e.flags.updateifcopy is False) + with assert_warns(DeprecationWarning): + # UPDATEIFCOPY is removed. + assert_(f.flags.updateifcopy is False) + assert_(c.flags.writebackifcopy is False) + assert_(d.flags.writebackifcopy is False) + assert_(e.flags.writebackifcopy is False) + assert_(f.flags.writebackifcopy is False) + + +class TestResize(object): + def test_basic(self): + x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + if IS_PYPY: + x.resize((5, 5), refcheck=False) + else: + x.resize((5, 5)) + assert_array_equal(x.flat[:9], + np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat) + assert_array_equal(x[9:].flat, 0) + + def test_check_reference(self): + x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + y = x + assert_raises(ValueError, x.resize, (5, 1)) + del y # avoid pyflakes unused variable warning. + + def test_int_shape(self): + x = np.eye(3) + if IS_PYPY: + x.resize(3, refcheck=False) + else: + x.resize(3) + assert_array_equal(x, np.eye(3)[0,:]) + + def test_none_shape(self): + x = np.eye(3) + x.resize(None) + assert_array_equal(x, np.eye(3)) + x.resize() + assert_array_equal(x, np.eye(3)) + + def test_0d_shape(self): + # to it multiple times to test it does not break alloc cache gh-9216 + for i in range(10): + x = np.empty((1,)) + x.resize(()) + assert_equal(x.shape, ()) + assert_equal(x.size, 1) + x = np.empty(()) + x.resize((1,)) + assert_equal(x.shape, (1,)) + assert_equal(x.size, 1) + + def test_invalid_arguments(self): + assert_raises(TypeError, np.eye(3).resize, 'hi') + assert_raises(ValueError, np.eye(3).resize, -1) + assert_raises(TypeError, np.eye(3).resize, order=1) + assert_raises(TypeError, np.eye(3).resize, refcheck='hi') + + def test_freeform_shape(self): + x = np.eye(3) + if IS_PYPY: + x.resize(3, 2, 1, refcheck=False) + else: + x.resize(3, 2, 1) + assert_(x.shape == (3, 2, 1)) + + def test_zeros_appended(self): + x = np.eye(3) + if IS_PYPY: + x.resize(2, 3, 3, refcheck=False) + else: + x.resize(2, 3, 3) + assert_array_equal(x[0], np.eye(3)) + assert_array_equal(x[1], np.zeros((3, 3))) + + def test_obj_obj(self): + # check memory is initialized on resize, gh-4857 + a = np.ones(10, dtype=[('k', object, 2)]) + if IS_PYPY: + a.resize(15, refcheck=False) + else: + a.resize(15,) + assert_equal(a.shape, (15,)) + assert_array_equal(a['k'][-5:], 0) + assert_array_equal(a['k'][:-5], 1) + + def test_empty_view(self): + # check that sizes containing a zero don't trigger a reallocate for + # already empty arrays + x = np.zeros((10, 0), int) + x_view = x[...] + x_view.resize((0, 10)) + x_view.resize((0, 100)) + + def test_check_weakref(self): + x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + xref = weakref.ref(x) + assert_raises(ValueError, x.resize, (5, 1)) + del xref # avoid pyflakes unused variable warning. + + +class TestRecord(object): + def test_field_rename(self): + dt = np.dtype([('f', float), ('i', int)]) + dt.names = ['p', 'q'] + assert_equal(dt.names, ['p', 'q']) + + def test_multiple_field_name_occurrence(self): + def test_dtype_init(): + np.dtype([("A", "f8"), ("B", "f8"), ("A", "f8")]) + + # Error raised when multiple fields have the same name + assert_raises(ValueError, test_dtype_init) + + @pytest.mark.skipif(sys.version_info[0] < 3, reason="Not Python 3") + def test_bytes_fields(self): + # Bytes are not allowed in field names and not recognized in titles + # on Py3 + assert_raises(TypeError, np.dtype, [(b'a', int)]) + assert_raises(TypeError, np.dtype, [(('b', b'a'), int)]) + + dt = np.dtype([((b'a', 'b'), int)]) + assert_raises(TypeError, dt.__getitem__, b'a') + + x = np.array([(1,), (2,), (3,)], dtype=dt) + assert_raises(IndexError, x.__getitem__, b'a') + + y = x[0] + assert_raises(IndexError, y.__getitem__, b'a') + + @pytest.mark.skipif(sys.version_info[0] < 3, reason="Not Python 3") + def test_multiple_field_name_unicode(self): + def test_dtype_unicode(): + np.dtype([("\u20B9", "f8"), ("B", "f8"), ("\u20B9", "f8")]) + + # Error raised when multiple fields have the same name(unicode included) + assert_raises(ValueError, test_dtype_unicode) + + @pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2") + def test_unicode_field_titles(self): + # Unicode field titles are added to field dict on Py2 + title = u'b' + dt = np.dtype([((title, 'a'), int)]) + dt[title] + dt['a'] + x = np.array([(1,), (2,), (3,)], dtype=dt) + x[title] + x['a'] + y = x[0] + y[title] + y['a'] + + @pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2") + def test_unicode_field_names(self): + # Unicode field names are converted to ascii on Python 2: + encodable_name = u'b' + assert_equal(np.dtype([(encodable_name, int)]).names[0], b'b') + assert_equal(np.dtype([(('a', encodable_name), int)]).names[0], b'b') + + # But raises UnicodeEncodeError if it can't be encoded: + nonencodable_name = u'\uc3bc' + assert_raises(UnicodeEncodeError, np.dtype, [(nonencodable_name, int)]) + assert_raises(UnicodeEncodeError, np.dtype, [(('a', nonencodable_name), int)]) + + def test_fromarrays_unicode(self): + # A single name string provided to fromarrays() is allowed to be unicode + # on both Python 2 and 3: + x = np.core.records.fromarrays([[0], [1]], names=u'a,b', formats=u'i4,i4') + assert_equal(x['a'][0], 0) + assert_equal(x['b'][0], 1) + + def test_unicode_order(self): + # Test that we can sort with order as a unicode field name in both Python 2 and + # 3: + name = u'b' + x = np.array([1, 3, 2], dtype=[(name, int)]) + x.sort(order=name) + assert_equal(x[u'b'], np.array([1, 2, 3])) + + def test_field_names(self): + # Test unicode and 8-bit / byte strings can be used + a = np.zeros((1,), dtype=[('f1', 'i4'), + ('f2', 'i4'), + ('f3', [('sf1', 'i4')])]) + is_py3 = sys.version_info[0] >= 3 + if is_py3: + funcs = (str,) + # byte string indexing fails gracefully + assert_raises(IndexError, a.__setitem__, b'f1', 1) + assert_raises(IndexError, a.__getitem__, b'f1') + assert_raises(IndexError, a['f1'].__setitem__, b'sf1', 1) + assert_raises(IndexError, a['f1'].__getitem__, b'sf1') + else: + funcs = (str, unicode) + for func in funcs: + b = a.copy() + fn1 = func('f1') + b[fn1] = 1 + assert_equal(b[fn1], 1) + fnn = func('not at all') + assert_raises(ValueError, b.__setitem__, fnn, 1) + assert_raises(ValueError, b.__getitem__, fnn) + b[0][fn1] = 2 + assert_equal(b[fn1], 2) + # Subfield + assert_raises(ValueError, b[0].__setitem__, fnn, 1) + assert_raises(ValueError, b[0].__getitem__, fnn) + # Subfield + fn3 = func('f3') + sfn1 = func('sf1') + b[fn3][sfn1] = 1 + assert_equal(b[fn3][sfn1], 1) + assert_raises(ValueError, b[fn3].__setitem__, fnn, 1) + assert_raises(ValueError, b[fn3].__getitem__, fnn) + # multiple subfields + fn2 = func('f2') + b[fn2] = 3 + + assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3)) + assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2)) + assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,))) + + # non-ascii unicode field indexing is well behaved + if not is_py3: + pytest.skip('non ascii unicode field indexing skipped; ' + 'raises segfault on python 2.x') + else: + assert_raises(ValueError, a.__setitem__, u'\u03e0', 1) + assert_raises(ValueError, a.__getitem__, u'\u03e0') + + def test_record_hash(self): + a = np.array([(1, 2), (1, 2)], dtype='i1,i2') + a.flags.writeable = False + b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')]) + b.flags.writeable = False + c = np.array([(1, 2), (3, 4)], dtype='i1,i2') + c.flags.writeable = False + assert_(hash(a[0]) == hash(a[1])) + assert_(hash(a[0]) == hash(b[0])) + assert_(hash(a[0]) != hash(b[1])) + assert_(hash(c[0]) == hash(a[0]) and c[0] == a[0]) + + def test_record_no_hash(self): + a = np.array([(1, 2), (1, 2)], dtype='i1,i2') + assert_raises(TypeError, hash, a[0]) + + def test_empty_structure_creation(self): + # make sure these do not raise errors (gh-5631) + np.array([()], dtype={'names': [], 'formats': [], + 'offsets': [], 'itemsize': 12}) + np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [], + 'offsets': [], 'itemsize': 12}) + + def test_multifield_indexing_view(self): + a = np.ones(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u4')]) + v = a[['a', 'c']] + assert_(v.base is a) + assert_(v.dtype == np.dtype({'names': ['a', 'c'], + 'formats': ['i4', 'u4'], + 'offsets': [0, 8]})) + v[:] = (4,5) + assert_equal(a[0].item(), (4, 1, 5)) + +class TestView(object): + def test_basic(self): + x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], + dtype=[('r', np.int8), ('g', np.int8), + ('b', np.int8), ('a', np.int8)]) + # We must be specific about the endianness here: + y = x.view(dtype=' 0) + assert_(issubclass(w[0].category, RuntimeWarning)) + + def test_empty(self): + A = np.zeros((0, 3)) + for f in self.funcs: + for axis in [0, None]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(f(A, axis=axis)).all()) + assert_(len(w) > 0) + assert_(issubclass(w[0].category, RuntimeWarning)) + for axis in [1]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_equal(f(A, axis=axis), np.zeros([])) + + def test_mean_values(self): + for mat in [self.rmat, self.cmat, self.omat]: + for axis in [0, 1]: + tgt = mat.sum(axis=axis) + res = _mean(mat, axis=axis) * mat.shape[axis] + assert_almost_equal(res, tgt) + for axis in [None]: + tgt = mat.sum(axis=axis) + res = _mean(mat, axis=axis) * np.prod(mat.shape) + assert_almost_equal(res, tgt) + + def test_mean_float16(self): + # This fail if the sum inside mean is done in float16 instead + # of float32. + assert_(_mean(np.ones(100000, dtype='float16')) == 1) + + def test_var_values(self): + for mat in [self.rmat, self.cmat, self.omat]: + for axis in [0, 1, None]: + msqr = _mean(mat * mat.conj(), axis=axis) + mean = _mean(mat, axis=axis) + tgt = msqr - mean * mean.conjugate() + res = _var(mat, axis=axis) + assert_almost_equal(res, tgt) + + def test_std_values(self): + for mat in [self.rmat, self.cmat, self.omat]: + for axis in [0, 1, None]: + tgt = np.sqrt(_var(mat, axis=axis)) + res = _std(mat, axis=axis) + assert_almost_equal(res, tgt) + + def test_subclass(self): + class TestArray(np.ndarray): + def __new__(cls, data, info): + result = np.array(data) + result = result.view(cls) + result.info = info + return result + + def __array_finalize__(self, obj): + self.info = getattr(obj, "info", '') + + dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba') + res = dat.mean(1) + assert_(res.info == dat.info) + res = dat.std(1) + assert_(res.info == dat.info) + res = dat.var(1) + assert_(res.info == dat.info) + +class TestVdot(object): + def test_basic(self): + dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger'] + dt_complex = np.typecodes['Complex'] + + # test real + a = np.eye(3) + for dt in dt_numeric + 'O': + b = a.astype(dt) + res = np.vdot(b, b) + assert_(np.isscalar(res)) + assert_equal(np.vdot(b, b), 3) + + # test complex + a = np.eye(3) * 1j + for dt in dt_complex + 'O': + b = a.astype(dt) + res = np.vdot(b, b) + assert_(np.isscalar(res)) + assert_equal(np.vdot(b, b), 3) + + # test boolean + b = np.eye(3, dtype=bool) + res = np.vdot(b, b) + assert_(np.isscalar(res)) + assert_equal(np.vdot(b, b), True) + + def test_vdot_array_order(self): + a = np.array([[1, 2], [3, 4]], order='C') + b = np.array([[1, 2], [3, 4]], order='F') + res = np.vdot(a, a) + + # integer arrays are exact + assert_equal(np.vdot(a, b), res) + assert_equal(np.vdot(b, a), res) + assert_equal(np.vdot(b, b), res) + + def test_vdot_uncontiguous(self): + for size in [2, 1000]: + # Different sizes match different branches in vdot. + a = np.zeros((size, 2, 2)) + b = np.zeros((size, 2, 2)) + a[:, 0, 0] = np.arange(size) + b[:, 0, 0] = np.arange(size) + 1 + # Make a and b uncontiguous: + a = a[..., 0] + b = b[..., 0] + + assert_equal(np.vdot(a, b), + np.vdot(a.flatten(), b.flatten())) + assert_equal(np.vdot(a, b.copy()), + np.vdot(a.flatten(), b.flatten())) + assert_equal(np.vdot(a.copy(), b), + np.vdot(a.flatten(), b.flatten())) + assert_equal(np.vdot(a.copy('F'), b), + np.vdot(a.flatten(), b.flatten())) + assert_equal(np.vdot(a, b.copy('F')), + np.vdot(a.flatten(), b.flatten())) + + +class TestDot(object): + def setup(self): + np.random.seed(128) + self.A = np.random.rand(4, 2) + self.b1 = np.random.rand(2, 1) + self.b2 = np.random.rand(2) + self.b3 = np.random.rand(1, 2) + self.b4 = np.random.rand(4) + self.N = 7 + + def test_dotmatmat(self): + A = self.A + res = np.dot(A.transpose(), A) + tgt = np.array([[1.45046013, 0.86323640], + [0.86323640, 0.84934569]]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotmatvec(self): + A, b1 = self.A, self.b1 + res = np.dot(A, b1) + tgt = np.array([[0.32114320], [0.04889721], + [0.15696029], [0.33612621]]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotmatvec2(self): + A, b2 = self.A, self.b2 + res = np.dot(A, b2) + tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotvecmat(self): + A, b4 = self.A, self.b4 + res = np.dot(b4, A) + tgt = np.array([1.23495091, 1.12222648]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotvecmat2(self): + b3, A = self.b3, self.A + res = np.dot(b3, A.transpose()) + tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotvecmat3(self): + A, b4 = self.A, self.b4 + res = np.dot(A.transpose(), b4) + tgt = np.array([1.23495091, 1.12222648]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotvecvecouter(self): + b1, b3 = self.b1, self.b3 + res = np.dot(b1, b3) + tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotvecvecinner(self): + b1, b3 = self.b1, self.b3 + res = np.dot(b3, b1) + tgt = np.array([[ 0.23129668]]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotcolumnvect1(self): + b1 = np.ones((3, 1)) + b2 = [5.3] + res = np.dot(b1, b2) + tgt = np.array([5.3, 5.3, 5.3]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotcolumnvect2(self): + b1 = np.ones((3, 1)).transpose() + b2 = [6.2] + res = np.dot(b2, b1) + tgt = np.array([6.2, 6.2, 6.2]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotvecscalar(self): + np.random.seed(100) + b1 = np.random.rand(1, 1) + b2 = np.random.rand(1, 4) + res = np.dot(b1, b2) + tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotvecscalar2(self): + np.random.seed(100) + b1 = np.random.rand(4, 1) + b2 = np.random.rand(1, 1) + res = np.dot(b1, b2) + tgt = np.array([[0.00256425],[0.00131359],[0.00200324],[ 0.00398638]]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_all(self): + dims = [(), (1,), (1, 1)] + dout = [(), (1,), (1, 1), (1,), (), (1,), (1, 1), (1,), (1, 1)] + for dim, (dim1, dim2) in zip(dout, itertools.product(dims, dims)): + b1 = np.zeros(dim1) + b2 = np.zeros(dim2) + res = np.dot(b1, b2) + tgt = np.zeros(dim) + assert_(res.shape == tgt.shape) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_vecobject(self): + class Vec(object): + def __init__(self, sequence=None): + if sequence is None: + sequence = [] + self.array = np.array(sequence) + + def __add__(self, other): + out = Vec() + out.array = self.array + other.array + return out + + def __sub__(self, other): + out = Vec() + out.array = self.array - other.array + return out + + def __mul__(self, other): # with scalar + out = Vec(self.array.copy()) + out.array *= other + return out + + def __rmul__(self, other): + return self*other + + U_non_cont = np.transpose([[1., 1.], [1., 2.]]) + U_cont = np.ascontiguousarray(U_non_cont) + x = np.array([Vec([1., 0.]), Vec([0., 1.])]) + zeros = np.array([Vec([0., 0.]), Vec([0., 0.])]) + zeros_test = np.dot(U_cont, x) - np.dot(U_non_cont, x) + assert_equal(zeros[0].array, zeros_test[0].array) + assert_equal(zeros[1].array, zeros_test[1].array) + + def test_dot_2args(self): + from numpy.core.multiarray import dot + + a = np.array([[1, 2], [3, 4]], dtype=float) + b = np.array([[1, 0], [1, 1]], dtype=float) + c = np.array([[3, 2], [7, 4]], dtype=float) + + d = dot(a, b) + assert_allclose(c, d) + + def test_dot_3args(self): + from numpy.core.multiarray import dot + + np.random.seed(22) + f = np.random.random_sample((1024, 16)) + v = np.random.random_sample((16, 32)) + + r = np.empty((1024, 32)) + for i in range(12): + dot(f, v, r) + if HAS_REFCOUNT: + assert_equal(sys.getrefcount(r), 2) + r2 = dot(f, v, out=None) + assert_array_equal(r2, r) + assert_(r is dot(f, v, out=r)) + + v = v[:, 0].copy() # v.shape == (16,) + r = r[:, 0].copy() # r.shape == (1024,) + r2 = dot(f, v) + assert_(r is dot(f, v, r)) + assert_array_equal(r2, r) + + def test_dot_3args_errors(self): + from numpy.core.multiarray import dot + + np.random.seed(22) + f = np.random.random_sample((1024, 16)) + v = np.random.random_sample((16, 32)) + + r = np.empty((1024, 31)) + assert_raises(ValueError, dot, f, v, r) + + r = np.empty((1024,)) + assert_raises(ValueError, dot, f, v, r) + + r = np.empty((32,)) + assert_raises(ValueError, dot, f, v, r) + + r = np.empty((32, 1024)) + assert_raises(ValueError, dot, f, v, r) + assert_raises(ValueError, dot, f, v, r.T) + + r = np.empty((1024, 64)) + assert_raises(ValueError, dot, f, v, r[:, ::2]) + assert_raises(ValueError, dot, f, v, r[:, :32]) + + r = np.empty((1024, 32), dtype=np.float32) + assert_raises(ValueError, dot, f, v, r) + + r = np.empty((1024, 32), dtype=int) + assert_raises(ValueError, dot, f, v, r) + + def test_dot_array_order(self): + a = np.array([[1, 2], [3, 4]], order='C') + b = np.array([[1, 2], [3, 4]], order='F') + res = np.dot(a, a) + + # integer arrays are exact + assert_equal(np.dot(a, b), res) + assert_equal(np.dot(b, a), res) + assert_equal(np.dot(b, b), res) + + def test_accelerate_framework_sgemv_fix(self): + + def aligned_array(shape, align, dtype, order='C'): + d = dtype(0) + N = np.prod(shape) + tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8) + address = tmp.__array_interface__["data"][0] + for offset in range(align): + if (address + offset) % align == 0: + break + tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype) + return tmp.reshape(shape, order=order) + + def as_aligned(arr, align, dtype, order='C'): + aligned = aligned_array(arr.shape, align, dtype, order) + aligned[:] = arr[:] + return aligned + + def assert_dot_close(A, X, desired): + assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7) + + m = aligned_array(100, 15, np.float32) + s = aligned_array((100, 100), 15, np.float32) + np.dot(s, m) # this will always segfault if the bug is present + + testdata = itertools.product((15,32), (10000,), (200,89), ('C','F')) + for align, m, n, a_order in testdata: + # Calculation in double precision + A_d = np.random.rand(m, n) + X_d = np.random.rand(n) + desired = np.dot(A_d, X_d) + # Calculation with aligned single precision + A_f = as_aligned(A_d, align, np.float32, order=a_order) + X_f = as_aligned(X_d, align, np.float32) + assert_dot_close(A_f, X_f, desired) + # Strided A rows + A_d_2 = A_d[::2] + desired = np.dot(A_d_2, X_d) + A_f_2 = A_f[::2] + assert_dot_close(A_f_2, X_f, desired) + # Strided A columns, strided X vector + A_d_22 = A_d_2[:, ::2] + X_d_2 = X_d[::2] + desired = np.dot(A_d_22, X_d_2) + A_f_22 = A_f_2[:, ::2] + X_f_2 = X_f[::2] + assert_dot_close(A_f_22, X_f_2, desired) + # Check the strides are as expected + if a_order == 'F': + assert_equal(A_f_22.strides, (8, 8 * m)) + else: + assert_equal(A_f_22.strides, (8 * n, 8)) + assert_equal(X_f_2.strides, (8,)) + # Strides in A rows + cols only + X_f_2c = as_aligned(X_f_2, align, np.float32) + assert_dot_close(A_f_22, X_f_2c, desired) + # Strides just in A cols + A_d_12 = A_d[:, ::2] + desired = np.dot(A_d_12, X_d_2) + A_f_12 = A_f[:, ::2] + assert_dot_close(A_f_12, X_f_2c, desired) + # Strides in A cols and X + assert_dot_close(A_f_12, X_f_2, desired) + + +class MatmulCommon(object): + """Common tests for '@' operator and numpy.matmul. + + """ + # Should work with these types. Will want to add + # "O" at some point + types = "?bhilqBHILQefdgFDG" + + def test_exceptions(self): + dims = [ + ((1,), (2,)), # mismatched vector vector + ((2, 1,), (2,)), # mismatched matrix vector + ((2,), (1, 2)), # mismatched vector matrix + ((1, 2), (3, 1)), # mismatched matrix matrix + ((1,), ()), # vector scalar + ((), (1)), # scalar vector + ((1, 1), ()), # matrix scalar + ((), (1, 1)), # scalar matrix + ((2, 2, 1), (3, 1, 2)), # cannot broadcast + ] + + for dt, (dm1, dm2) in itertools.product(self.types, dims): + a = np.ones(dm1, dtype=dt) + b = np.ones(dm2, dtype=dt) + assert_raises(ValueError, self.matmul, a, b) + + def test_shapes(self): + dims = [ + ((1, 1), (2, 1, 1)), # broadcast first argument + ((2, 1, 1), (1, 1)), # broadcast second argument + ((2, 1, 1), (2, 1, 1)), # matrix stack sizes match + ] + + for dt, (dm1, dm2) in itertools.product(self.types, dims): + a = np.ones(dm1, dtype=dt) + b = np.ones(dm2, dtype=dt) + res = self.matmul(a, b) + assert_(res.shape == (2, 1, 1)) + + # vector vector returns scalars. + for dt in self.types: + a = np.ones((2,), dtype=dt) + b = np.ones((2,), dtype=dt) + c = self.matmul(a, b) + assert_(np.array(c).shape == ()) + + def test_result_types(self): + mat = np.ones((1,1)) + vec = np.ones((1,)) + for dt in self.types: + m = mat.astype(dt) + v = vec.astype(dt) + for arg in [(m, v), (v, m), (m, m)]: + res = self.matmul(*arg) + assert_(res.dtype == dt) + + # vector vector returns scalars + res = self.matmul(v, v) + assert_(type(res) is np.dtype(dt).type) + + def test_scalar_output(self): + vec1 = np.array([2]) + vec2 = np.array([3, 4]).reshape(1, -1) + tgt = np.array([6, 8]) + for dt in self.types[1:]: + v1 = vec1.astype(dt) + v2 = vec2.astype(dt) + res = self.matmul(v1, v2) + assert_equal(res, tgt) + res = self.matmul(v2.T, v1) + assert_equal(res, tgt) + + # boolean type + vec = np.array([True, True], dtype='?').reshape(1, -1) + res = self.matmul(vec[:, 0], vec) + assert_equal(res, True) + + def test_vector_vector_values(self): + vec1 = np.array([1, 2]) + vec2 = np.array([3, 4]).reshape(-1, 1) + tgt1 = np.array([11]) + tgt2 = np.array([[3, 6], [4, 8]]) + for dt in self.types[1:]: + v1 = vec1.astype(dt) + v2 = vec2.astype(dt) + res = self.matmul(v1, v2) + assert_equal(res, tgt1) + # no broadcast, we must make v1 into a 2d ndarray + res = self.matmul(v2, v1.reshape(1, -1)) + assert_equal(res, tgt2) + + # boolean type + vec = np.array([True, True], dtype='?') + res = self.matmul(vec, vec) + assert_equal(res, True) + + def test_vector_matrix_values(self): + vec = np.array([1, 2]) + mat1 = np.array([[1, 2], [3, 4]]) + mat2 = np.stack([mat1]*2, axis=0) + tgt1 = np.array([7, 10]) + tgt2 = np.stack([tgt1]*2, axis=0) + for dt in self.types[1:]: + v = vec.astype(dt) + m1 = mat1.astype(dt) + m2 = mat2.astype(dt) + res = self.matmul(v, m1) + assert_equal(res, tgt1) + res = self.matmul(v, m2) + assert_equal(res, tgt2) + + # boolean type + vec = np.array([True, False]) + mat1 = np.array([[True, False], [False, True]]) + mat2 = np.stack([mat1]*2, axis=0) + tgt1 = np.array([True, False]) + tgt2 = np.stack([tgt1]*2, axis=0) + + res = self.matmul(vec, mat1) + assert_equal(res, tgt1) + res = self.matmul(vec, mat2) + assert_equal(res, tgt2) + + def test_matrix_vector_values(self): + vec = np.array([1, 2]) + mat1 = np.array([[1, 2], [3, 4]]) + mat2 = np.stack([mat1]*2, axis=0) + tgt1 = np.array([5, 11]) + tgt2 = np.stack([tgt1]*2, axis=0) + for dt in self.types[1:]: + v = vec.astype(dt) + m1 = mat1.astype(dt) + m2 = mat2.astype(dt) + res = self.matmul(m1, v) + assert_equal(res, tgt1) + res = self.matmul(m2, v) + assert_equal(res, tgt2) + + # boolean type + vec = np.array([True, False]) + mat1 = np.array([[True, False], [False, True]]) + mat2 = np.stack([mat1]*2, axis=0) + tgt1 = np.array([True, False]) + tgt2 = np.stack([tgt1]*2, axis=0) + + res = self.matmul(vec, mat1) + assert_equal(res, tgt1) + res = self.matmul(vec, mat2) + assert_equal(res, tgt2) + + def test_matrix_matrix_values(self): + mat1 = np.array([[1, 2], [3, 4]]) + mat2 = np.array([[1, 0], [1, 1]]) + mat12 = np.stack([mat1, mat2], axis=0) + mat21 = np.stack([mat2, mat1], axis=0) + tgt11 = np.array([[7, 10], [15, 22]]) + tgt12 = np.array([[3, 2], [7, 4]]) + tgt21 = np.array([[1, 2], [4, 6]]) + tgt12_21 = np.stack([tgt12, tgt21], axis=0) + tgt11_12 = np.stack((tgt11, tgt12), axis=0) + tgt11_21 = np.stack((tgt11, tgt21), axis=0) + for dt in self.types[1:]: + m1 = mat1.astype(dt) + m2 = mat2.astype(dt) + m12 = mat12.astype(dt) + m21 = mat21.astype(dt) + + # matrix @ matrix + res = self.matmul(m1, m2) + assert_equal(res, tgt12) + res = self.matmul(m2, m1) + assert_equal(res, tgt21) + + # stacked @ matrix + res = self.matmul(m12, m1) + assert_equal(res, tgt11_21) + + # matrix @ stacked + res = self.matmul(m1, m12) + assert_equal(res, tgt11_12) + + # stacked @ stacked + res = self.matmul(m12, m21) + assert_equal(res, tgt12_21) + + # boolean type + m1 = np.array([[1, 1], [0, 0]], dtype=np.bool_) + m2 = np.array([[1, 0], [1, 1]], dtype=np.bool_) + m12 = np.stack([m1, m2], axis=0) + m21 = np.stack([m2, m1], axis=0) + tgt11 = m1 + tgt12 = m1 + tgt21 = np.array([[1, 1], [1, 1]], dtype=np.bool_) + tgt12_21 = np.stack([tgt12, tgt21], axis=0) + tgt11_12 = np.stack((tgt11, tgt12), axis=0) + tgt11_21 = np.stack((tgt11, tgt21), axis=0) + + # matrix @ matrix + res = self.matmul(m1, m2) + assert_equal(res, tgt12) + res = self.matmul(m2, m1) + assert_equal(res, tgt21) + + # stacked @ matrix + res = self.matmul(m12, m1) + assert_equal(res, tgt11_21) + + # matrix @ stacked + res = self.matmul(m1, m12) + assert_equal(res, tgt11_12) + + # stacked @ stacked + res = self.matmul(m12, m21) + assert_equal(res, tgt12_21) + + +class TestMatmul(MatmulCommon): + matmul = np.matmul + + def test_out_arg(self): + a = np.ones((5, 2), dtype=float) + b = np.array([[1, 3], [5, 7]], dtype=float) + tgt = np.dot(a, b) + + # test as positional argument + msg = "out positional argument" + out = np.zeros((5, 2), dtype=float) + self.matmul(a, b, out) + assert_array_equal(out, tgt, err_msg=msg) + + # test as keyword argument + msg = "out keyword argument" + out = np.zeros((5, 2), dtype=float) + self.matmul(a, b, out=out) + assert_array_equal(out, tgt, err_msg=msg) + + # test out with not allowed type cast (safe casting) + msg = "Cannot cast ufunc matmul output" + out = np.zeros((5, 2), dtype=np.int32) + assert_raises_regex(TypeError, msg, self.matmul, a, b, out=out) + + # test out with type upcast to complex + out = np.zeros((5, 2), dtype=np.complex128) + c = self.matmul(a, b, out=out) + assert_(c is out) + with suppress_warnings() as sup: + sup.filter(np.ComplexWarning, '') + c = c.astype(tgt.dtype) + assert_array_equal(c, tgt) + + def test_out_contiguous(self): + a = np.ones((5, 2), dtype=float) + b = np.array([[1, 3], [5, 7]], dtype=float) + v = np.array([1, 3], dtype=float) + tgt = np.dot(a, b) + tgt_mv = np.dot(a, v) + + # test out non-contiguous + out = np.ones((5, 2, 2), dtype=float) + c = self.matmul(a, b, out=out[..., 0]) + assert c.base is out + assert_array_equal(c, tgt) + c = self.matmul(a, v, out=out[:, 0, 0]) + assert_array_equal(c, tgt_mv) + c = self.matmul(v, a.T, out=out[:, 0, 0]) + assert_array_equal(c, tgt_mv) + + # test out contiguous in only last dim + out = np.ones((10, 2), dtype=float) + c = self.matmul(a, b, out=out[::2, :]) + assert_array_equal(c, tgt) + + # test transposes of out, args + out = np.ones((5, 2), dtype=float) + c = self.matmul(b.T, a.T, out=out.T) + assert_array_equal(out, tgt) + + m1 = np.arange(15.).reshape(5, 3) + m2 = np.arange(21.).reshape(3, 7) + m3 = np.arange(30.).reshape(5, 6)[:, ::2] # non-contiguous + vc = np.arange(10.) + vr = np.arange(6.) + m0 = np.zeros((3, 0)) + @pytest.mark.parametrize('args', ( + # matrix-matrix + (m1, m2), (m2.T, m1.T), (m2.T.copy(), m1.T), (m2.T, m1.T.copy()), + # matrix-matrix-transpose, contiguous and non + (m1, m1.T), (m1.T, m1), (m1, m3.T), (m3, m1.T), + (m3, m3.T), (m3.T, m3), + # matrix-matrix non-contiguous + (m3, m2), (m2.T, m3.T), (m2.T.copy(), m3.T), + # vector-matrix, matrix-vector, contiguous + (m1, vr[:3]), (vc[:5], m1), (m1.T, vc[:5]), (vr[:3], m1.T), + # vector-matrix, matrix-vector, vector non-contiguous + (m1, vr[::2]), (vc[::2], m1), (m1.T, vc[::2]), (vr[::2], m1.T), + # vector-matrix, matrix-vector, matrix non-contiguous + (m3, vr[:3]), (vc[:5], m3), (m3.T, vc[:5]), (vr[:3], m3.T), + # vector-matrix, matrix-vector, both non-contiguous + (m3, vr[::2]), (vc[::2], m3), (m3.T, vc[::2]), (vr[::2], m3.T), + # size == 0 + (m0, m0.T), (m0.T, m0), (m1, m0), (m0.T, m1.T), + )) + def test_dot_equivalent(self, args): + r1 = np.matmul(*args) + r2 = np.dot(*args) + assert_equal(r1, r2) + + r3 = np.matmul(args[0].copy(), args[1].copy()) + assert_equal(r1, r3) + + + +if sys.version_info[:2] >= (3, 5): + class TestMatmulOperator(MatmulCommon): + import operator + matmul = operator.matmul + + def test_array_priority_override(self): + + class A(object): + __array_priority__ = 1000 + + def __matmul__(self, other): + return "A" + + def __rmatmul__(self, other): + return "A" + + a = A() + b = np.ones(2) + assert_equal(self.matmul(a, b), "A") + assert_equal(self.matmul(b, a), "A") + + def test_matmul_raises(self): + assert_raises(TypeError, self.matmul, np.int8(5), np.int8(5)) + assert_raises(TypeError, self.matmul, np.void(b'abc'), np.void(b'abc')) + assert_raises(ValueError, self.matmul, np.arange(10), np.void(b'abc')) + + def test_matmul_inplace(): + # It would be nice to support in-place matmul eventually, but for now + # we don't have a working implementation, so better just to error out + # and nudge people to writing "a = a @ b". + a = np.eye(3) + b = np.eye(3) + assert_raises(TypeError, a.__imatmul__, b) + import operator + assert_raises(TypeError, operator.imatmul, a, b) + # we avoid writing the token `exec` so as not to crash python 2's + # parser + exec_ = getattr(builtins, "exec") + assert_raises(TypeError, exec_, "a @= b", globals(), locals()) + + def test_matmul_axes(): + a = np.arange(3*4*5).reshape(3, 4, 5) + c = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (1, 2)]) + assert c.shape == (3, 4, 4) + d = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (0, 1)]) + assert d.shape == (4, 4, 3) + e = np.swapaxes(d, 0, 2) + assert_array_equal(e, c) + f = np.matmul(a, np.arange(3), axes=[(1, 0), (0), (0)]) + assert f.shape == (4, 5) + + +class TestInner(object): + + def test_inner_type_mismatch(self): + c = 1. + A = np.array((1,1), dtype='i,i') + + assert_raises(TypeError, np.inner, c, A) + assert_raises(TypeError, np.inner, A, c) + + def test_inner_scalar_and_vector(self): + for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': + sca = np.array(3, dtype=dt)[()] + vec = np.array([1, 2], dtype=dt) + desired = np.array([3, 6], dtype=dt) + assert_equal(np.inner(vec, sca), desired) + assert_equal(np.inner(sca, vec), desired) + + def test_vecself(self): + # Ticket 844. + # Inner product of a vector with itself segfaults or give + # meaningless result + a = np.zeros(shape=(1, 80), dtype=np.float64) + p = np.inner(a, a) + assert_almost_equal(p, 0, decimal=14) + + def test_inner_product_with_various_contiguities(self): + # github issue 6532 + for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': + # check an inner product involving a matrix transpose + A = np.array([[1, 2], [3, 4]], dtype=dt) + B = np.array([[1, 3], [2, 4]], dtype=dt) + C = np.array([1, 1], dtype=dt) + desired = np.array([4, 6], dtype=dt) + assert_equal(np.inner(A.T, C), desired) + assert_equal(np.inner(C, A.T), desired) + assert_equal(np.inner(B, C), desired) + assert_equal(np.inner(C, B), desired) + # check a matrix product + desired = np.array([[7, 10], [15, 22]], dtype=dt) + assert_equal(np.inner(A, B), desired) + # check the syrk vs. gemm paths + desired = np.array([[5, 11], [11, 25]], dtype=dt) + assert_equal(np.inner(A, A), desired) + assert_equal(np.inner(A, A.copy()), desired) + # check an inner product involving an aliased and reversed view + a = np.arange(5).astype(dt) + b = a[::-1] + desired = np.array(10, dtype=dt).item() + assert_equal(np.inner(b, a), desired) + + def test_3d_tensor(self): + for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': + a = np.arange(24).reshape(2,3,4).astype(dt) + b = np.arange(24, 48).reshape(2,3,4).astype(dt) + desired = np.array( + [[[[ 158, 182, 206], + [ 230, 254, 278]], + + [[ 566, 654, 742], + [ 830, 918, 1006]], + + [[ 974, 1126, 1278], + [1430, 1582, 1734]]], + + [[[1382, 1598, 1814], + [2030, 2246, 2462]], + + [[1790, 2070, 2350], + [2630, 2910, 3190]], + + [[2198, 2542, 2886], + [3230, 3574, 3918]]]], + dtype=dt + ) + assert_equal(np.inner(a, b), desired) + assert_equal(np.inner(b, a).transpose(2,3,0,1), desired) + + +class TestAlen(object): + def test_basic(self): + m = np.array([1, 2, 3]) + assert_equal(np.alen(m), 3) + + m = np.array([[1, 2, 3], [4, 5, 7]]) + assert_equal(np.alen(m), 2) + + m = [1, 2, 3] + assert_equal(np.alen(m), 3) + + m = [[1, 2, 3], [4, 5, 7]] + assert_equal(np.alen(m), 2) + + def test_singleton(self): + assert_equal(np.alen(5), 1) + + +class TestChoose(object): + def setup(self): + self.x = 2*np.ones((3,), dtype=int) + self.y = 3*np.ones((3,), dtype=int) + self.x2 = 2*np.ones((2, 3), dtype=int) + self.y2 = 3*np.ones((2, 3), dtype=int) + self.ind = [0, 0, 1] + + def test_basic(self): + A = np.choose(self.ind, (self.x, self.y)) + assert_equal(A, [2, 2, 3]) + + def test_broadcast1(self): + A = np.choose(self.ind, (self.x2, self.y2)) + assert_equal(A, [[2, 2, 3], [2, 2, 3]]) + + def test_broadcast2(self): + A = np.choose(self.ind, (self.x, self.y2)) + assert_equal(A, [[2, 2, 3], [2, 2, 3]]) + + +class TestRepeat(object): + def setup(self): + self.m = np.array([1, 2, 3, 4, 5, 6]) + self.m_rect = self.m.reshape((2, 3)) + + def test_basic(self): + A = np.repeat(self.m, [1, 3, 2, 1, 1, 2]) + assert_equal(A, [1, 2, 2, 2, 3, + 3, 4, 5, 6, 6]) + + def test_broadcast1(self): + A = np.repeat(self.m, 2) + assert_equal(A, [1, 1, 2, 2, 3, 3, + 4, 4, 5, 5, 6, 6]) + + def test_axis_spec(self): + A = np.repeat(self.m_rect, [2, 1], axis=0) + assert_equal(A, [[1, 2, 3], + [1, 2, 3], + [4, 5, 6]]) + + A = np.repeat(self.m_rect, [1, 3, 2], axis=1) + assert_equal(A, [[1, 2, 2, 2, 3, 3], + [4, 5, 5, 5, 6, 6]]) + + def test_broadcast2(self): + A = np.repeat(self.m_rect, 2, axis=0) + assert_equal(A, [[1, 2, 3], + [1, 2, 3], + [4, 5, 6], + [4, 5, 6]]) + + A = np.repeat(self.m_rect, 2, axis=1) + assert_equal(A, [[1, 1, 2, 2, 3, 3], + [4, 4, 5, 5, 6, 6]]) + + +# TODO: test for multidimensional +NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4} + + +@pytest.mark.parametrize('dt', [float, Decimal], ids=['float', 'object']) +class TestNeighborhoodIter(object): + # Simple, 2d tests + def test_simple2d(self, dt): + # Test zero and one padding for simple data type + x = np.array([[0, 1], [2, 3]], dtype=dt) + r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt), + np.array([[0, 0, 0], [0, 1, 0]], dtype=dt), + np.array([[0, 0, 1], [0, 2, 3]], dtype=dt), + np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 0, -1, 1], x[0], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt), + np.array([[1, 1, 1], [0, 1, 1]], dtype=dt), + np.array([[1, 0, 1], [1, 2, 3]], dtype=dt), + np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 0, -1, 1], x[0], NEIGH_MODE['one']) + assert_array_equal(l, r) + + r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt), + np.array([[4, 4, 4], [0, 1, 4]], dtype=dt), + np.array([[4, 0, 1], [4, 2, 3]], dtype=dt), + np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant']) + assert_array_equal(l, r) + + def test_mirror2d(self, dt): + x = np.array([[0, 1], [2, 3]], dtype=dt) + r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt), + np.array([[0, 1, 1], [0, 1, 1]], dtype=dt), + np.array([[0, 0, 1], [2, 2, 3]], dtype=dt), + np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 0, -1, 1], x[0], NEIGH_MODE['mirror']) + assert_array_equal(l, r) + + # Simple, 1d tests + def test_simple(self, dt): + # Test padding with constant values + x = np.linspace(1, 5, 5).astype(dt) + r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 1], x[0], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 1], x[0], NEIGH_MODE['one']) + assert_array_equal(l, r) + + r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 1], x[4], NEIGH_MODE['constant']) + assert_array_equal(l, r) + + # Test mirror modes + def test_mirror(self, dt): + x = np.linspace(1, 5, 5).astype(dt) + r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5], + [2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt) + l = _multiarray_tests.test_neighborhood_iterator( + x, [-2, 2], x[1], NEIGH_MODE['mirror']) + assert_([i.dtype == dt for i in l]) + assert_array_equal(l, r) + + # Circular mode + def test_circular(self, dt): + x = np.linspace(1, 5, 5).astype(dt) + r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5], + [2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt) + l = _multiarray_tests.test_neighborhood_iterator( + x, [-2, 2], x[0], NEIGH_MODE['circular']) + assert_array_equal(l, r) + + +# Test stacking neighborhood iterators +class TestStackedNeighborhoodIter(object): + # Simple, 1d test: stacking 2 constant-padded neigh iterators + def test_simple_const(self): + dt = np.float64 + # Test zero and one padding for simple data type + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([0], dtype=dt), + np.array([0], dtype=dt), + np.array([1], dtype=dt), + np.array([2], dtype=dt), + np.array([3], dtype=dt), + np.array([0], dtype=dt), + np.array([0], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-2, 4], NEIGH_MODE['zero'], [0, 0], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + r = [np.array([1, 0, 1], dtype=dt), + np.array([0, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 0], dtype=dt), + np.array([3, 0, 1], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['zero'], [-1, 1], NEIGH_MODE['one']) + assert_array_equal(l, r) + + # 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and + # mirror padding + def test_simple_mirror(self): + dt = np.float64 + # Stacking zero on top of mirror + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([0, 1, 1], dtype=dt), + np.array([1, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 3], dtype=dt), + np.array([3, 3, 0], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['mirror'], [-1, 1], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([1, 0, 0], dtype=dt), + np.array([0, 0, 1], dtype=dt), + np.array([0, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 0], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['mirror']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero: 2nd + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([0, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 0], dtype=dt), + np.array([3, 0, 0], dtype=dt), + np.array([0, 0, 3], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['mirror']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero: 3rd + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([1, 0, 0, 1, 2], dtype=dt), + np.array([0, 0, 1, 2, 3], dtype=dt), + np.array([0, 1, 2, 3, 0], dtype=dt), + np.array([1, 2, 3, 0, 0], dtype=dt), + np.array([2, 3, 0, 0, 3], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['mirror']) + assert_array_equal(l, r) + + # 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and + # circular padding + def test_simple_circular(self): + dt = np.float64 + # Stacking zero on top of mirror + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([0, 3, 1], dtype=dt), + np.array([3, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 1], dtype=dt), + np.array([3, 1, 0], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['circular'], [-1, 1], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([3, 0, 0], dtype=dt), + np.array([0, 0, 1], dtype=dt), + np.array([0, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 0], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['circular']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero: 2nd + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([0, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 0], dtype=dt), + np.array([3, 0, 0], dtype=dt), + np.array([0, 0, 1], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['circular']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero: 3rd + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([3, 0, 0, 1, 2], dtype=dt), + np.array([0, 0, 1, 2, 3], dtype=dt), + np.array([0, 1, 2, 3, 0], dtype=dt), + np.array([1, 2, 3, 0, 0], dtype=dt), + np.array([2, 3, 0, 0, 1], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['circular']) + assert_array_equal(l, r) + + # 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator + # being strictly within the array + def test_simple_strict_within(self): + dt = np.float64 + # Stacking zero on top of zero, first neighborhood strictly inside the + # array + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([1, 2, 3, 0], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero, first neighborhood strictly inside the + # array + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([1, 2, 3, 3], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['mirror']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero, first neighborhood strictly inside the + # array + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([1, 2, 3, 1], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['circular']) + assert_array_equal(l, r) + +class TestWarnings(object): + + def test_complex_warning(self): + x = np.array([1, 2]) + y = np.array([1-2j, 1+2j]) + + with warnings.catch_warnings(): + warnings.simplefilter("error", np.ComplexWarning) + assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y) + assert_equal(x, [1, 2]) + + +class TestMinScalarType(object): + + def test_usigned_shortshort(self): + dt = np.min_scalar_type(2**8-1) + wanted = np.dtype('uint8') + assert_equal(wanted, dt) + + def test_usigned_short(self): + dt = np.min_scalar_type(2**16-1) + wanted = np.dtype('uint16') + assert_equal(wanted, dt) + + def test_usigned_int(self): + dt = np.min_scalar_type(2**32-1) + wanted = np.dtype('uint32') + assert_equal(wanted, dt) + + def test_usigned_longlong(self): + dt = np.min_scalar_type(2**63-1) + wanted = np.dtype('uint64') + assert_equal(wanted, dt) + + def test_object(self): + dt = np.min_scalar_type(2**64) + wanted = np.dtype('O') + assert_equal(wanted, dt) + + +from numpy.core._internal import _dtype_from_pep3118 + + +class TestPEP3118Dtype(object): + def _check(self, spec, wanted): + dt = np.dtype(wanted) + actual = _dtype_from_pep3118(spec) + assert_equal(actual, dt, + err_msg="spec %r != dtype %r" % (spec, wanted)) + + def test_native_padding(self): + align = np.dtype('i').alignment + for j in range(8): + if j == 0: + s = 'bi' + else: + s = 'b%dxi' % j + self._check('@'+s, {'f0': ('i1', 0), + 'f1': ('i', align*(1 + j//align))}) + self._check('='+s, {'f0': ('i1', 0), + 'f1': ('i', 1+j)}) + + def test_native_padding_2(self): + # Native padding should work also for structs and sub-arrays + self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)}) + self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)}) + + def test_trailing_padding(self): + # Trailing padding should be included, *and*, the item size + # should match the alignment if in aligned mode + align = np.dtype('i').alignment + size = np.dtype('i').itemsize + + def aligned(n): + return align*(1 + (n-1)//align) + + base = dict(formats=['i'], names=['f0']) + + self._check('ix', dict(itemsize=aligned(size + 1), **base)) + self._check('ixx', dict(itemsize=aligned(size + 2), **base)) + self._check('ixxx', dict(itemsize=aligned(size + 3), **base)) + self._check('ixxxx', dict(itemsize=aligned(size + 4), **base)) + self._check('i7x', dict(itemsize=aligned(size + 7), **base)) + + self._check('^ix', dict(itemsize=size + 1, **base)) + self._check('^ixx', dict(itemsize=size + 2, **base)) + self._check('^ixxx', dict(itemsize=size + 3, **base)) + self._check('^ixxxx', dict(itemsize=size + 4, **base)) + self._check('^i7x', dict(itemsize=size + 7, **base)) + + def test_native_padding_3(self): + dt = np.dtype( + [('a', 'b'), ('b', 'i'), + ('sub', np.dtype('b,i')), ('c', 'i')], + align=True) + self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt) + + dt = np.dtype( + [('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'), + ('e', 'b'), ('sub', np.dtype('b,i', align=True))]) + self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt) + + def test_padding_with_array_inside_struct(self): + dt = np.dtype( + [('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), + ('d', 'i')], + align=True) + self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt) + + def test_byteorder_inside_struct(self): + # The byte order after @T{=i} should be '=', not '@'. + # Check this by noting the absence of native alignment. + self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0), + 'f1': ('i', 5)}) + + def test_intra_padding(self): + # Natively aligned sub-arrays may require some internal padding + align = np.dtype('i').alignment + size = np.dtype('i').itemsize + + def aligned(n): + return (align*(1 + (n-1)//align)) + + self._check('(3)T{ix}', (dict( + names=['f0'], + formats=['i'], + offsets=[0], + itemsize=aligned(size + 1) + ), (3,))) + + def test_char_vs_string(self): + dt = np.dtype('c') + self._check('c', dt) + + dt = np.dtype([('f0', 'S1', (4,)), ('f1', 'S4')]) + self._check('4c4s', dt) + + def test_field_order(self): + # gh-9053 - previously, we relied on dictionary key order + self._check("(0)I:a:f:b:", [('a', 'I', (0,)), ('b', 'f')]) + self._check("(0)I:b:f:a:", [('b', 'I', (0,)), ('a', 'f')]) + + def test_unnamed_fields(self): + self._check('ii', [('f0', 'i'), ('f1', 'i')]) + self._check('ii:f0:', [('f1', 'i'), ('f0', 'i')]) + + self._check('i', 'i') + self._check('i:f0:', [('f0', 'i')]) + + +class TestNewBufferProtocol(object): + """ Test PEP3118 buffers """ + + def _check_roundtrip(self, obj): + obj = np.asarray(obj) + x = memoryview(obj) + y = np.asarray(x) + y2 = np.array(x) + assert_(not y.flags.owndata) + assert_(y2.flags.owndata) + + assert_equal(y.dtype, obj.dtype) + assert_equal(y.shape, obj.shape) + assert_array_equal(obj, y) + + assert_equal(y2.dtype, obj.dtype) + assert_equal(y2.shape, obj.shape) + assert_array_equal(obj, y2) + + def test_roundtrip(self): + x = np.array([1, 2, 3, 4, 5], dtype='i4') + self._check_roundtrip(x) + + x = np.array([[1, 2], [3, 4]], dtype=np.float64) + self._check_roundtrip(x) + + x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:] + self._check_roundtrip(x) + + dt = [('a', 'b'), + ('b', 'h'), + ('c', 'i'), + ('d', 'l'), + ('dx', 'q'), + ('e', 'B'), + ('f', 'H'), + ('g', 'I'), + ('h', 'L'), + ('hx', 'Q'), + ('i', np.single), + ('j', np.double), + ('k', np.longdouble), + ('ix', np.csingle), + ('jx', np.cdouble), + ('kx', np.clongdouble), + ('l', 'S4'), + ('m', 'U4'), + ('n', 'V3'), + ('o', '?'), + ('p', np.half), + ] + x = np.array( + [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + b'aaaa', 'bbbb', b'xxx', True, 1.0)], + dtype=dt) + self._check_roundtrip(x) + + x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))]) + self._check_roundtrip(x) + + x = np.array([1, 2, 3], dtype='>i2') + self._check_roundtrip(x) + + x = np.array([1, 2, 3], dtype='') + x = np.zeros(4, dtype=dt) + self._check_roundtrip(x) + + def test_roundtrip_scalar(self): + # Issue #4015. + self._check_roundtrip(0) + + def test_invalid_buffer_format(self): + # datetime64 cannot be used fully in a buffer yet + # Should be fixed in the next Numpy major release + dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')]) + a = np.empty(3, dt) + assert_raises((ValueError, BufferError), memoryview, a) + assert_raises((ValueError, BufferError), memoryview, np.array((3), 'M8[D]')) + + def test_export_simple_1d(self): + x = np.array([1, 2, 3, 4, 5], dtype='i') + y = memoryview(x) + assert_equal(y.format, 'i') + assert_equal(y.shape, (5,)) + assert_equal(y.ndim, 1) + assert_equal(y.strides, (4,)) + assert_equal(y.suboffsets, EMPTY) + assert_equal(y.itemsize, 4) + + def test_export_simple_nd(self): + x = np.array([[1, 2], [3, 4]], dtype=np.float64) + y = memoryview(x) + assert_equal(y.format, 'd') + assert_equal(y.shape, (2, 2)) + assert_equal(y.ndim, 2) + assert_equal(y.strides, (16, 8)) + assert_equal(y.suboffsets, EMPTY) + assert_equal(y.itemsize, 8) + + def test_export_discontiguous(self): + x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:] + y = memoryview(x) + assert_equal(y.format, 'f') + assert_equal(y.shape, (3, 3)) + assert_equal(y.ndim, 2) + assert_equal(y.strides, (36, 4)) + assert_equal(y.suboffsets, EMPTY) + assert_equal(y.itemsize, 4) + + def test_export_record(self): + dt = [('a', 'b'), + ('b', 'h'), + ('c', 'i'), + ('d', 'l'), + ('dx', 'q'), + ('e', 'B'), + ('f', 'H'), + ('g', 'I'), + ('h', 'L'), + ('hx', 'Q'), + ('i', np.single), + ('j', np.double), + ('k', np.longdouble), + ('ix', np.csingle), + ('jx', np.cdouble), + ('kx', np.clongdouble), + ('l', 'S4'), + ('m', 'U4'), + ('n', 'V3'), + ('o', '?'), + ('p', np.half), + ] + x = np.array( + [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + b'aaaa', 'bbbb', b' ', True, 1.0)], + dtype=dt) + y = memoryview(x) + assert_equal(y.shape, (1,)) + assert_equal(y.ndim, 1) + assert_equal(y.suboffsets, EMPTY) + + sz = sum([np.dtype(b).itemsize for a, b in dt]) + if np.dtype('l').itemsize == 4: + assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}') + else: + assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}') + # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides + if not (np.ones(1).strides[0] == np.iinfo(np.intp).max): + assert_equal(y.strides, (sz,)) + assert_equal(y.itemsize, sz) + + def test_export_subarray(self): + x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))]) + y = memoryview(x) + assert_equal(y.format, 'T{(2,2)i:a:}') + assert_equal(y.shape, EMPTY) + assert_equal(y.ndim, 0) + assert_equal(y.strides, EMPTY) + assert_equal(y.suboffsets, EMPTY) + assert_equal(y.itemsize, 16) + + def test_export_endian(self): + x = np.array([1, 2, 3], dtype='>i') + y = memoryview(x) + if sys.byteorder == 'little': + assert_equal(y.format, '>i') + else: + assert_equal(y.format, 'i') + + x = np.array([1, 2, 3], dtype=' 2: + with assert_raises_regex( + NotImplementedError, + r"Unrepresentable .* 'u' \(UCS-2 strings\)" + ): + raise exc.__cause__ + + def test_ctypes_integer_via_memoryview(self): + # gh-11150, due to bpo-10746 + for c_integer in {ctypes.c_int, ctypes.c_long, ctypes.c_longlong}: + value = c_integer(42) + with warnings.catch_warnings(record=True): + warnings.filterwarnings('always', r'.*\bctypes\b', RuntimeWarning) + np.asarray(value) + + def test_ctypes_struct_via_memoryview(self): + # gh-10528 + class foo(ctypes.Structure): + _fields_ = [('a', ctypes.c_uint8), ('b', ctypes.c_uint32)] + f = foo(a=1, b=2) + + with warnings.catch_warnings(record=True): + warnings.filterwarnings('always', r'.*\bctypes\b', RuntimeWarning) + arr = np.asarray(f) + + assert_equal(arr['a'], 1) + assert_equal(arr['b'], 2) + f.a = 3 + assert_equal(arr['a'], 3) + + +class TestArrayAttributeDeletion(object): + + def test_multiarray_writable_attributes_deletion(self): + # ticket #2046, should not seqfault, raise AttributeError + a = np.ones(2) + attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat'] + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "Assigning the 'data' attribute") + for s in attr: + assert_raises(AttributeError, delattr, a, s) + + def test_multiarray_not_writable_attributes_deletion(self): + a = np.ones(2) + attr = ["ndim", "flags", "itemsize", "size", "nbytes", "base", + "ctypes", "T", "__array_interface__", "__array_struct__", + "__array_priority__", "__array_finalize__"] + for s in attr: + assert_raises(AttributeError, delattr, a, s) + + def test_multiarray_flags_writable_attribute_deletion(self): + a = np.ones(2).flags + attr = ['writebackifcopy', 'updateifcopy', 'aligned', 'writeable'] + for s in attr: + assert_raises(AttributeError, delattr, a, s) + + def test_multiarray_flags_not_writable_attribute_deletion(self): + a = np.ones(2).flags + attr = ["contiguous", "c_contiguous", "f_contiguous", "fortran", + "owndata", "fnc", "forc", "behaved", "carray", "farray", + "num"] + for s in attr: + assert_raises(AttributeError, delattr, a, s) + + +class TestArrayInterface(): + class Foo(object): + def __init__(self, value): + self.value = value + self.iface = {'typestr': 'f8'} + + def __float__(self): + return float(self.value) + + @property + def __array_interface__(self): + return self.iface + + + f = Foo(0.5) + + @pytest.mark.parametrize('val, iface, expected', [ + (f, {}, 0.5), + ([f], {}, [0.5]), + ([f, f], {}, [0.5, 0.5]), + (f, {'shape': ()}, 0.5), + (f, {'shape': None}, TypeError), + (f, {'shape': (1, 1)}, [[0.5]]), + (f, {'shape': (2,)}, ValueError), + (f, {'strides': ()}, 0.5), + (f, {'strides': (2,)}, ValueError), + (f, {'strides': 16}, TypeError), + ]) + def test_scalar_interface(self, val, iface, expected): + # Test scalar coercion within the array interface + self.f.iface = {'typestr': 'f8'} + self.f.iface.update(iface) + if HAS_REFCOUNT: + pre_cnt = sys.getrefcount(np.dtype('f8')) + if isinstance(expected, type): + assert_raises(expected, np.array, val) + else: + result = np.array(val) + assert_equal(np.array(val), expected) + assert result.dtype == 'f8' + del result + if HAS_REFCOUNT: + post_cnt = sys.getrefcount(np.dtype('f8')) + assert_equal(pre_cnt, post_cnt) + +def test_interface_no_shape(): + class ArrayLike(object): + array = np.array(1) + __array_interface__ = array.__array_interface__ + assert_equal(np.array(ArrayLike()), 1) + + +def test_array_interface_itemsize(): + # See gh-6361 + my_dtype = np.dtype({'names': ['A', 'B'], 'formats': ['f4', 'f4'], + 'offsets': [0, 8], 'itemsize': 16}) + a = np.ones(10, dtype=my_dtype) + descr_t = np.dtype(a.__array_interface__['descr']) + typestr_t = np.dtype(a.__array_interface__['typestr']) + assert_equal(descr_t.itemsize, typestr_t.itemsize) + + +def test_array_interface_empty_shape(): + # See gh-7994 + arr = np.array([1, 2, 3]) + interface1 = dict(arr.__array_interface__) + interface1['shape'] = () + + class DummyArray1(object): + __array_interface__ = interface1 + + # NOTE: Because Py2 str/Py3 bytes supports the buffer interface, setting + # the interface data to bytes would invoke the bug this tests for, that + # __array_interface__ with shape=() is not allowed if the data is an object + # exposing the buffer interface + interface2 = dict(interface1) + interface2['data'] = arr[0].tobytes() + + class DummyArray2(object): + __array_interface__ = interface2 + + arr1 = np.asarray(DummyArray1()) + arr2 = np.asarray(DummyArray2()) + arr3 = arr[:1].reshape(()) + assert_equal(arr1, arr2) + assert_equal(arr1, arr3) + + +def test_flat_element_deletion(): + it = np.ones(3).flat + try: + del it[1] + del it[1:2] + except TypeError: + pass + except Exception: + raise AssertionError + + +def test_scalar_element_deletion(): + a = np.zeros(2, dtype=[('x', 'int'), ('y', 'int')]) + assert_raises(ValueError, a[0].__delitem__, 'x') + + +class TestMemEventHook(object): + def test_mem_seteventhook(self): + # The actual tests are within the C code in + # multiarray/_multiarray_tests.c.src + _multiarray_tests.test_pydatamem_seteventhook_start() + # force an allocation and free of a numpy array + # needs to be larger then limit of small memory cacher in ctors.c + a = np.zeros(1000) + del a + gc.collect() + _multiarray_tests.test_pydatamem_seteventhook_end() + +class TestMapIter(object): + def test_mapiter(self): + # The actual tests are within the C code in + # multiarray/_multiarray_tests.c.src + + a = np.arange(12).reshape((3, 4)).astype(float) + index = ([1, 1, 2, 0], + [0, 0, 2, 3]) + vals = [50, 50, 30, 16] + + _multiarray_tests.test_inplace_increment(a, index, vals) + assert_equal(a, [[0.00, 1., 2.0, 19.], + [104., 5., 6.0, 7.0], + [8.00, 9., 40., 11.]]) + + b = np.arange(6).astype(float) + index = (np.array([1, 2, 0]),) + vals = [50, 4, 100.1] + _multiarray_tests.test_inplace_increment(b, index, vals) + assert_equal(b, [100.1, 51., 6., 3., 4., 5.]) + + +class TestAsCArray(object): + def test_1darray(self): + array = np.arange(24, dtype=np.double) + from_c = _multiarray_tests.test_as_c_array(array, 3) + assert_equal(array[3], from_c) + + def test_2darray(self): + array = np.arange(24, dtype=np.double).reshape(3, 8) + from_c = _multiarray_tests.test_as_c_array(array, 2, 4) + assert_equal(array[2, 4], from_c) + + def test_3darray(self): + array = np.arange(24, dtype=np.double).reshape(2, 3, 4) + from_c = _multiarray_tests.test_as_c_array(array, 1, 2, 3) + assert_equal(array[1, 2, 3], from_c) + + +class TestConversion(object): + def test_array_scalar_relational_operation(self): + # All integer + for dt1 in np.typecodes['AllInteger']: + assert_(1 > np.array(0, dtype=dt1), "type %s failed" % (dt1,)) + assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,)) + + for dt2 in np.typecodes['AllInteger']: + assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2), + "type %s and %s failed" % (dt1, dt2)) + assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2), + "type %s and %s failed" % (dt1, dt2)) + + # Unsigned integers + for dt1 in 'BHILQP': + assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,)) + assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,)) + assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,)) + + # Unsigned vs signed + for dt2 in 'bhilqp': + assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2), + "type %s and %s failed" % (dt1, dt2)) + assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2), + "type %s and %s failed" % (dt1, dt2)) + assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2), + "type %s and %s failed" % (dt1, dt2)) + + # Signed integers and floats + for dt1 in 'bhlqp' + np.typecodes['Float']: + assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,)) + assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,)) + assert_(-1 == np.array(-1, dtype=dt1), "type %s failed" % (dt1,)) + + for dt2 in 'bhlqp' + np.typecodes['Float']: + assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2), + "type %s and %s failed" % (dt1, dt2)) + assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2), + "type %s and %s failed" % (dt1, dt2)) + assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2), + "type %s and %s failed" % (dt1, dt2)) + + def test_to_bool_scalar(self): + assert_equal(bool(np.array([False])), False) + assert_equal(bool(np.array([True])), True) + assert_equal(bool(np.array([[42]])), True) + assert_raises(ValueError, bool, np.array([1, 2])) + + class NotConvertible(object): + def __bool__(self): + raise NotImplementedError + __nonzero__ = __bool__ # python 2 + + assert_raises(NotImplementedError, bool, np.array(NotConvertible())) + assert_raises(NotImplementedError, bool, np.array([NotConvertible()])) + + self_containing = np.array([None]) + self_containing[0] = self_containing + try: + Error = RecursionError + except NameError: + Error = RuntimeError # python < 3.5 + assert_raises(Error, bool, self_containing) # previously stack overflow + self_containing[0] = None # resolve circular reference + + def test_to_int_scalar(self): + # gh-9972 means that these aren't always the same + int_funcs = (int, lambda x: x.__int__()) + for int_func in int_funcs: + assert_equal(int_func(np.array([1])), 1) + assert_equal(int_func(np.array([0])), 0) + assert_equal(int_func(np.array([[42]])), 42) + assert_raises(TypeError, int_func, np.array([1, 2])) + + # gh-9972 + assert_equal(4, int_func(np.array('4'))) + assert_equal(5, int_func(np.bytes_(b'5'))) + assert_equal(6, int_func(np.unicode_(u'6'))) + + class HasTrunc: + def __trunc__(self): + return 3 + assert_equal(3, int_func(np.array(HasTrunc()))) + assert_equal(3, int_func(np.array([HasTrunc()]))) + + class NotConvertible(object): + def __int__(self): + raise NotImplementedError + assert_raises(NotImplementedError, + int_func, np.array(NotConvertible())) + assert_raises(NotImplementedError, + int_func, np.array([NotConvertible()])) + + +class TestWhere(object): + def test_basic(self): + dts = [bool, np.int16, np.int32, np.int64, np.double, np.complex128, + np.longdouble, np.clongdouble] + for dt in dts: + c = np.ones(53, dtype=bool) + assert_equal(np.where( c, dt(0), dt(1)), dt(0)) + assert_equal(np.where(~c, dt(0), dt(1)), dt(1)) + assert_equal(np.where(True, dt(0), dt(1)), dt(0)) + assert_equal(np.where(False, dt(0), dt(1)), dt(1)) + d = np.ones_like(c).astype(dt) + e = np.zeros_like(d) + r = d.astype(dt) + c[7] = False + r[7] = e[7] + assert_equal(np.where(c, e, e), e) + assert_equal(np.where(c, d, e), r) + assert_equal(np.where(c, d, e[0]), r) + assert_equal(np.where(c, d[0], e), r) + assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2]) + assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2]) + assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3]) + assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3]) + assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2]) + assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3]) + assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3]) + + def test_exotic(self): + # object + assert_array_equal(np.where(True, None, None), np.array(None)) + # zero sized + m = np.array([], dtype=bool).reshape(0, 3) + b = np.array([], dtype=np.float64).reshape(0, 3) + assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3)) + + # object cast + d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313, + 0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013, + 1.267, 0.229, -1.39, 0.487]) + nan = float('NaN') + e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan, + 'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'], + dtype=object) + m = np.array([0, 0, 1, 0, 1, 1, 0, 0, 1, 1, + 0, 1, 1, 0, 1, 1, 0, 1, 0, 0], dtype=bool) + + r = e[:] + r[np.where(m)] = d[np.where(m)] + assert_array_equal(np.where(m, d, e), r) + + r = e[:] + r[np.where(~m)] = d[np.where(~m)] + assert_array_equal(np.where(m, e, d), r) + + assert_array_equal(np.where(m, e, e), e) + + # minimal dtype result with NaN scalar (e.g required by pandas) + d = np.array([1., 2.], dtype=np.float32) + e = float('NaN') + assert_equal(np.where(True, d, e).dtype, np.float32) + e = float('Infinity') + assert_equal(np.where(True, d, e).dtype, np.float32) + e = float('-Infinity') + assert_equal(np.where(True, d, e).dtype, np.float32) + # also check upcast + e = float(1e150) + assert_equal(np.where(True, d, e).dtype, np.float64) + + def test_ndim(self): + c = [True, False] + a = np.zeros((2, 25)) + b = np.ones((2, 25)) + r = np.where(np.array(c)[:,np.newaxis], a, b) + assert_array_equal(r[0], a[0]) + assert_array_equal(r[1], b[0]) + + a = a.T + b = b.T + r = np.where(c, a, b) + assert_array_equal(r[:,0], a[:,0]) + assert_array_equal(r[:,1], b[:,0]) + + def test_dtype_mix(self): + c = np.array([False, True, False, False, False, False, True, False, + False, False, True, False]) + a = np.uint32(1) + b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.], + dtype=np.float64) + r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.], + dtype=np.float64) + assert_equal(np.where(c, a, b), r) + + a = a.astype(np.float32) + b = b.astype(np.int64) + assert_equal(np.where(c, a, b), r) + + # non bool mask + c = c.astype(int) + c[c != 0] = 34242324 + assert_equal(np.where(c, a, b), r) + # invert + tmpmask = c != 0 + c[c == 0] = 41247212 + c[tmpmask] = 0 + assert_equal(np.where(c, b, a), r) + + def test_foreign(self): + c = np.array([False, True, False, False, False, False, True, False, + False, False, True, False]) + r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.], + dtype=np.float64) + a = np.ones(1, dtype='>i4') + b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.], + dtype=np.float64) + assert_equal(np.where(c, a, b), r) + + b = b.astype('>f8') + assert_equal(np.where(c, a, b), r) + + a = a.astype('i4') + assert_equal(np.where(c, a, b), r) + + def test_error(self): + c = [True, True] + a = np.ones((4, 5)) + b = np.ones((5, 5)) + assert_raises(ValueError, np.where, c, a, a) + assert_raises(ValueError, np.where, c[0], a, b) + + def test_string(self): + # gh-4778 check strings are properly filled with nulls + a = np.array("abc") + b = np.array("x" * 753) + assert_equal(np.where(True, a, b), "abc") + assert_equal(np.where(False, b, a), "abc") + + # check native datatype sized strings + a = np.array("abcd") + b = np.array("x" * 8) + assert_equal(np.where(True, a, b), "abcd") + assert_equal(np.where(False, b, a), "abcd") + + def test_empty_result(self): + # pass empty where result through an assignment which reads the data of + # empty arrays, error detectable with valgrind, see gh-8922 + x = np.zeros((1, 1)) + ibad = np.vstack(np.where(x == 99.)) + assert_array_equal(ibad, + np.atleast_2d(np.array([[],[]], dtype=np.intp))) + + def test_largedim(self): + # invalid read regression gh-9304 + shape = [10, 2, 3, 4, 5, 6] + np.random.seed(2) + array = np.random.rand(*shape) + + for i in range(10): + benchmark = array.nonzero() + result = array.nonzero() + assert_array_equal(benchmark, result) + + +if not IS_PYPY: + # sys.getsizeof() is not valid on PyPy + class TestSizeOf(object): + + def test_empty_array(self): + x = np.array([]) + assert_(sys.getsizeof(x) > 0) + + def check_array(self, dtype): + elem_size = dtype(0).itemsize + + for length in [10, 50, 100, 500]: + x = np.arange(length, dtype=dtype) + assert_(sys.getsizeof(x) > length * elem_size) + + def test_array_int32(self): + self.check_array(np.int32) + + def test_array_int64(self): + self.check_array(np.int64) + + def test_array_float32(self): + self.check_array(np.float32) + + def test_array_float64(self): + self.check_array(np.float64) + + def test_view(self): + d = np.ones(100) + assert_(sys.getsizeof(d[...]) < sys.getsizeof(d)) + + def test_reshape(self): + d = np.ones(100) + assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy())) + + def test_resize(self): + d = np.ones(100) + old = sys.getsizeof(d) + d.resize(50) + assert_(old > sys.getsizeof(d)) + d.resize(150) + assert_(old < sys.getsizeof(d)) + + def test_error(self): + d = np.ones(100) + assert_raises(TypeError, d.__sizeof__, "a") + + +class TestHashing(object): + + def test_arrays_not_hashable(self): + x = np.ones(3) + assert_raises(TypeError, hash, x) + + def test_collections_hashable(self): + x = np.array([]) + assert_(not isinstance(x, collections_abc.Hashable)) + + +class TestArrayPriority(object): + # This will go away when __array_priority__ is settled, meanwhile + # it serves to check unintended changes. + op = operator + binary_ops = [ + op.pow, op.add, op.sub, op.mul, op.floordiv, op.truediv, op.mod, + op.and_, op.or_, op.xor, op.lshift, op.rshift, op.mod, op.gt, + op.ge, op.lt, op.le, op.ne, op.eq + ] + + # See #7949. Don't use "/" operator With -3 switch, since python reports it + # as a DeprecationWarning + if sys.version_info[0] < 3 and not sys.py3kwarning: + binary_ops.append(op.div) + + class Foo(np.ndarray): + __array_priority__ = 100. + + def __new__(cls, *args, **kwargs): + return np.array(*args, **kwargs).view(cls) + + class Bar(np.ndarray): + __array_priority__ = 101. + + def __new__(cls, *args, **kwargs): + return np.array(*args, **kwargs).view(cls) + + class Other(object): + __array_priority__ = 1000. + + def _all(self, other): + return self.__class__() + + __add__ = __radd__ = _all + __sub__ = __rsub__ = _all + __mul__ = __rmul__ = _all + __pow__ = __rpow__ = _all + __div__ = __rdiv__ = _all + __mod__ = __rmod__ = _all + __truediv__ = __rtruediv__ = _all + __floordiv__ = __rfloordiv__ = _all + __and__ = __rand__ = _all + __xor__ = __rxor__ = _all + __or__ = __ror__ = _all + __lshift__ = __rlshift__ = _all + __rshift__ = __rrshift__ = _all + __eq__ = _all + __ne__ = _all + __gt__ = _all + __ge__ = _all + __lt__ = _all + __le__ = _all + + def test_ndarray_subclass(self): + a = np.array([1, 2]) + b = self.Bar([1, 2]) + for f in self.binary_ops: + msg = repr(f) + assert_(isinstance(f(a, b), self.Bar), msg) + assert_(isinstance(f(b, a), self.Bar), msg) + + def test_ndarray_other(self): + a = np.array([1, 2]) + b = self.Other() + for f in self.binary_ops: + msg = repr(f) + assert_(isinstance(f(a, b), self.Other), msg) + assert_(isinstance(f(b, a), self.Other), msg) + + def test_subclass_subclass(self): + a = self.Foo([1, 2]) + b = self.Bar([1, 2]) + for f in self.binary_ops: + msg = repr(f) + assert_(isinstance(f(a, b), self.Bar), msg) + assert_(isinstance(f(b, a), self.Bar), msg) + + def test_subclass_other(self): + a = self.Foo([1, 2]) + b = self.Other() + for f in self.binary_ops: + msg = repr(f) + assert_(isinstance(f(a, b), self.Other), msg) + assert_(isinstance(f(b, a), self.Other), msg) + + +class TestBytestringArrayNonzero(object): + + def test_empty_bstring_array_is_falsey(self): + assert_(not np.array([''], dtype=str)) + + def test_whitespace_bstring_array_is_falsey(self): + a = np.array(['spam'], dtype=str) + a[0] = ' \0\0' + assert_(not a) + + def test_all_null_bstring_array_is_falsey(self): + a = np.array(['spam'], dtype=str) + a[0] = '\0\0\0\0' + assert_(not a) + + def test_null_inside_bstring_array_is_truthy(self): + a = np.array(['spam'], dtype=str) + a[0] = ' \0 \0' + assert_(a) + + +class TestUnicodeArrayNonzero(object): + + def test_empty_ustring_array_is_falsey(self): + assert_(not np.array([''], dtype=np.unicode)) + + def test_whitespace_ustring_array_is_falsey(self): + a = np.array(['eggs'], dtype=np.unicode) + a[0] = ' \0\0' + assert_(not a) + + def test_all_null_ustring_array_is_falsey(self): + a = np.array(['eggs'], dtype=np.unicode) + a[0] = '\0\0\0\0' + assert_(not a) + + def test_null_inside_ustring_array_is_truthy(self): + a = np.array(['eggs'], dtype=np.unicode) + a[0] = ' \0 \0' + assert_(a) + + +class TestFormat(object): + + def test_0d(self): + a = np.array(np.pi) + assert_equal('{:0.3g}'.format(a), '3.14') + assert_equal('{:0.3g}'.format(a[()]), '3.14') + + def test_1d_no_format(self): + a = np.array([np.pi]) + assert_equal('{}'.format(a), str(a)) + + def test_1d_format(self): + # until gh-5543, ensure that the behaviour matches what it used to be + a = np.array([np.pi]) + if sys.version_info[:2] >= (3, 4): + assert_raises(TypeError, '{:30}'.format, a) + else: + with suppress_warnings() as sup: + sup.filter(PendingDeprecationWarning) + res = '{:30}'.format(a) + dst = object.__format__(a, '30') + assert_equal(res, dst) + +class TestCTypes(object): + + def test_ctypes_is_available(self): + test_arr = np.array([[1, 2, 3], [4, 5, 6]]) + + assert_equal(ctypes, test_arr.ctypes._ctypes) + assert_equal(tuple(test_arr.ctypes.shape), (2, 3)) + + def test_ctypes_is_not_available(self): + from numpy.core import _internal + _internal.ctypes = None + try: + test_arr = np.array([[1, 2, 3], [4, 5, 6]]) + + assert_(isinstance(test_arr.ctypes._ctypes, + _internal._missing_ctypes)) + assert_equal(tuple(test_arr.ctypes.shape), (2, 3)) + finally: + _internal.ctypes = ctypes + + def _make_readonly(x): + x.flags.writeable = False + return x + + @pytest.mark.parametrize('arr', [ + np.array([1, 2, 3]), + np.array([['one', 'two'], ['three', 'four']]), + np.array((1, 2), dtype='i4,i4'), + np.zeros((2,), dtype= + np.dtype(dict( + formats=['2, [44, 55]) + assert_equal(a, np.array([[0, 44], [1, 55], [2, 44]])) + # hit one of the failing paths + assert_raises(ValueError, np.place, a, a>20, []) + + def test_put_noncontiguous(self): + a = np.arange(6).reshape(2,3).T # force non-c-contiguous + np.put(a, [0, 2], [44, 55]) + assert_equal(a, np.array([[44, 3], [55, 4], [2, 5]])) + + def test_putmask_noncontiguous(self): + a = np.arange(6).reshape(2,3).T # force non-c-contiguous + # uses arr_putmask + np.putmask(a, a>2, a**2) + assert_equal(a, np.array([[0, 9], [1, 16], [2, 25]])) + + def test_take_mode_raise(self): + a = np.arange(6, dtype='int') + out = np.empty(2, dtype='int') + np.take(a, [0, 2], out=out, mode='raise') + assert_equal(out, np.array([0, 2])) + + def test_choose_mod_raise(self): + a = np.array([[1, 0, 1], [0, 1, 0], [1, 0, 1]]) + out = np.empty((3,3), dtype='int') + choices = [-10, 10] + np.choose(a, choices, out=out, mode='raise') + assert_equal(out, np.array([[ 10, -10, 10], + [-10, 10, -10], + [ 10, -10, 10]])) + + def test_flatiter__array__(self): + a = np.arange(9).reshape(3,3) + b = a.T.flat + c = b.__array__() + # triggers the WRITEBACKIFCOPY resolution, assuming refcount semantics + del c + + def test_dot_out(self): + # if HAVE_CBLAS, will use WRITEBACKIFCOPY + a = np.arange(9, dtype=float).reshape(3,3) + b = np.dot(a, a, out=a) + assert_equal(b, np.array([[15, 18, 21], [42, 54, 66], [69, 90, 111]])) + + def test_view_assign(self): + from numpy.core._multiarray_tests import npy_create_writebackifcopy, npy_resolve + + arr = np.arange(9).reshape(3, 3).T + arr_wb = npy_create_writebackifcopy(arr) + assert_(arr_wb.flags.writebackifcopy) + assert_(arr_wb.base is arr) + arr_wb[...] = -100 + npy_resolve(arr_wb) + # arr changes after resolve, even though we assigned to arr_wb + assert_equal(arr, -100) + # after resolve, the two arrays no longer reference each other + assert_(arr_wb.ctypes.data != 0) + assert_equal(arr_wb.base, None) + # assigning to arr_wb does not get transferred to arr + arr_wb[...] = 100 + assert_equal(arr, -100) + + def test_dealloc_warning(self): + with suppress_warnings() as sup: + sup.record(RuntimeWarning) + arr = np.arange(9).reshape(3, 3) + v = arr.T + _multiarray_tests.npy_abuse_writebackifcopy(v) + assert len(sup.log) == 1 + + def test_view_discard_refcount(self): + from numpy.core._multiarray_tests import npy_create_writebackifcopy, npy_discard + + arr = np.arange(9).reshape(3, 3).T + orig = arr.copy() + if HAS_REFCOUNT: + arr_cnt = sys.getrefcount(arr) + arr_wb = npy_create_writebackifcopy(arr) + assert_(arr_wb.flags.writebackifcopy) + assert_(arr_wb.base is arr) + arr_wb[...] = -100 + npy_discard(arr_wb) + # arr remains unchanged after discard + assert_equal(arr, orig) + # after discard, the two arrays no longer reference each other + assert_(arr_wb.ctypes.data != 0) + assert_equal(arr_wb.base, None) + if HAS_REFCOUNT: + assert_equal(arr_cnt, sys.getrefcount(arr)) + # assigning to arr_wb does not get transferred to arr + arr_wb[...] = 100 + assert_equal(arr, orig) + + +class TestArange(object): + def test_infinite(self): + assert_raises_regex( + ValueError, "size exceeded", + np.arange, 0, np.inf + ) + + def test_nan_step(self): + assert_raises_regex( + ValueError, "cannot compute length", + np.arange, 0, 1, np.nan + ) + + def test_zero_step(self): + assert_raises(ZeroDivisionError, np.arange, 0, 10, 0) + assert_raises(ZeroDivisionError, np.arange, 0.0, 10.0, 0.0) + + # empty range + assert_raises(ZeroDivisionError, np.arange, 0, 0, 0) + assert_raises(ZeroDivisionError, np.arange, 0.0, 0.0, 0.0) + + +class TestArrayFinalize(object): + """ Tests __array_finalize__ """ + + def test_receives_base(self): + # gh-11237 + class SavesBase(np.ndarray): + def __array_finalize__(self, obj): + self.saved_base = self.base + + a = np.array(1).view(SavesBase) + assert_(a.saved_base is a.base) + + def test_lifetime_on_error(self): + # gh-11237 + class RaisesInFinalize(np.ndarray): + def __array_finalize__(self, obj): + # crash, but keep this object alive + raise Exception(self) + + # a plain object can't be weakref'd + class Dummy(object): pass + + # get a weak reference to an object within an array + obj_arr = np.array(Dummy()) + obj_ref = weakref.ref(obj_arr[()]) + + # get an array that crashed in __array_finalize__ + with assert_raises(Exception) as e: + obj_arr.view(RaisesInFinalize) + if sys.version_info.major == 2: + # prevent an extra reference being kept + sys.exc_clear() + + obj_subarray = e.exception.args[0] + del e + assert_(isinstance(obj_subarray, RaisesInFinalize)) + + # reference should still be held by obj_arr + gc.collect() + assert_(obj_ref() is not None, "object should not already be dead") + + del obj_arr + gc.collect() + assert_(obj_ref() is not None, "obj_arr should not hold the last reference") + + del obj_subarray + gc.collect() + assert_(obj_ref() is None, "no references should remain") + + +def test_orderconverter_with_nonASCII_unicode_ordering(): + # gh-7475 + a = np.arange(5) + assert_raises(ValueError, a.flatten, order=u'\xe2') + + +def test_equal_override(): + # gh-9153: ndarray.__eq__ uses special logic for structured arrays, which + # did not respect overrides with __array_priority__ or __array_ufunc__. + # The PR fixed this for __array_priority__ and __array_ufunc__ = None. + class MyAlwaysEqual(object): + def __eq__(self, other): + return "eq" + + def __ne__(self, other): + return "ne" + + class MyAlwaysEqualOld(MyAlwaysEqual): + __array_priority__ = 10000 + + class MyAlwaysEqualNew(MyAlwaysEqual): + __array_ufunc__ = None + + array = np.array([(0, 1), (2, 3)], dtype='i4,i4') + for my_always_equal_cls in MyAlwaysEqualOld, MyAlwaysEqualNew: + my_always_equal = my_always_equal_cls() + assert_equal(my_always_equal == array, 'eq') + assert_equal(array == my_always_equal, 'eq') + assert_equal(my_always_equal != array, 'ne') + assert_equal(array != my_always_equal, 'ne') + + +def test_npymath_complex(): + # Smoketest npymath functions + from numpy.core._multiarray_tests import ( + npy_cabs, npy_carg) + + funcs = {npy_cabs: np.absolute, + npy_carg: np.angle} + vals = (1, np.inf, -np.inf, np.nan) + types = (np.complex64, np.complex128, np.clongdouble) + + for fun, npfun in funcs.items(): + for x, y in itertools.product(vals, vals): + for t in types: + z = t(complex(x, y)) + got = fun(z) + expected = npfun(z) + assert_allclose(got, expected) + + +def test_npymath_real(): + # Smoketest npymath functions + from numpy.core._multiarray_tests import ( + npy_log10, npy_cosh, npy_sinh, npy_tan, npy_tanh) + + funcs = {npy_log10: np.log10, + npy_cosh: np.cosh, + npy_sinh: np.sinh, + npy_tan: np.tan, + npy_tanh: np.tanh} + vals = (1, np.inf, -np.inf, np.nan) + types = (np.float32, np.float64, np.longdouble) + + with np.errstate(all='ignore'): + for fun, npfun in funcs.items(): + for x, t in itertools.product(vals, types): + z = t(x) + got = fun(z) + expected = npfun(z) + assert_allclose(got, expected) + +def test_uintalignment_and_alignment(): + # alignment code needs to satisfy these requrements: + # 1. numpy structs match C struct layout + # 2. ufuncs/casting is safe wrt to aligned access + # 3. copy code is safe wrt to "uint alidned" access + # + # Complex types are the main problem, whose alignment may not be the same + # as their "uint alignment". + # + # This test might only fail on certain platforms, where uint64 alignment is + # not equal to complex64 alignment. The second 2 tests will only fail + # for DEBUG=1. + + d1 = np.dtype('u1,c8', align=True) + d2 = np.dtype('u4,c8', align=True) + d3 = np.dtype({'names': ['a', 'b'], 'formats': ['u1', d1]}, align=True) + + assert_equal(np.zeros(1, dtype=d1)['f1'].flags['ALIGNED'], True) + assert_equal(np.zeros(1, dtype=d2)['f1'].flags['ALIGNED'], True) + assert_equal(np.zeros(1, dtype='u1,c8')['f1'].flags['ALIGNED'], False) + + # check that C struct matches numpy struct size + s = _multiarray_tests.get_struct_alignments() + for d, (alignment, size) in zip([d1,d2,d3], s): + assert_equal(d.alignment, alignment) + assert_equal(d.itemsize, size) + + # check that ufuncs don't complain in debug mode + # (this is probably OK if the aligned flag is true above) + src = np.zeros((2,2), dtype=d1)['f1'] # 4-byte aligned, often + np.exp(src) # assert fails? + + # check that copy code doesn't complain in debug mode + dst = np.zeros((2,2), dtype='c8') + dst[:,1] = src[:,1] # assert in lowlevel_strided_loops fails? + +class TestAlignment(object): + # adapted from scipy._lib.tests.test__util.test__aligned_zeros + # Checks that unusual memory alignments don't trip up numpy. + # In particular, check RELAXED_STRIDES don't trip alignment assertions in + # NDEBUG mode for size-0 arrays (gh-12503) + + def check(self, shape, dtype, order, align): + err_msg = repr((shape, dtype, order, align)) + x = _aligned_zeros(shape, dtype, order, align=align) + if align is None: + align = np.dtype(dtype).alignment + assert_equal(x.__array_interface__['data'][0] % align, 0) + if hasattr(shape, '__len__'): + assert_equal(x.shape, shape, err_msg) + else: + assert_equal(x.shape, (shape,), err_msg) + assert_equal(x.dtype, dtype) + if order == "C": + assert_(x.flags.c_contiguous, err_msg) + elif order == "F": + if x.size > 0: + assert_(x.flags.f_contiguous, err_msg) + elif order is None: + assert_(x.flags.c_contiguous, err_msg) + else: + raise ValueError() + + def test_various_alignments(self): + for align in [1, 2, 3, 4, 8, 12, 16, 32, 64, None]: + for n in [0, 1, 3, 11]: + for order in ["C", "F", None]: + for dtype in list(np.typecodes["All"]) + ['i4,i4,i4']: + if dtype == 'O': + # object dtype can't be misaligned + continue + for shape in [n, (1, 2, 3, n)]: + self.check(shape, np.dtype(dtype), order, align) + + def test_strided_loop_alignments(self): + # particularly test that complex64 and float128 use right alignment + # code-paths, since these are particularly problematic. It is useful to + # turn on USE_DEBUG for this test, so lowlevel-loop asserts are run. + for align in [1, 2, 4, 8, 12, 16, None]: + xf64 = _aligned_zeros(3, np.float64) + + xc64 = _aligned_zeros(3, np.complex64, align=align) + xf128 = _aligned_zeros(3, np.longdouble, align=align) + + # test casting, both to and from misaligned + with suppress_warnings() as sup: + sup.filter(np.ComplexWarning, "Casting complex values") + xc64.astype('f8') + xf64.astype(np.complex64) + test = xc64 + xf64 + + xf128.astype('f8') + xf64.astype(np.longdouble) + test = xf128 + xf64 + + test = xf128 + xc64 + + # test copy, both to and from misaligned + # contig copy + xf64[:] = xf64.copy() + xc64[:] = xc64.copy() + xf128[:] = xf128.copy() + # strided copy + xf64[::2] = xf64[::2].copy() + xc64[::2] = xc64[::2].copy() + xf128[::2] = xf128[::2].copy() + +def test_getfield(): + a = np.arange(32, dtype='uint16') + if sys.byteorder == 'little': + i = 0 + j = 1 + else: + i = 1 + j = 0 + b = a.getfield('int8', i) + assert_equal(b, a) + b = a.getfield('int8', j) + assert_equal(b, 0) + pytest.raises(ValueError, a.getfield, 'uint8', -1) + pytest.raises(ValueError, a.getfield, 'uint8', 16) + pytest.raises(ValueError, a.getfield, 'uint64', 0) diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_multiarray.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_multiarray.pyc new file mode 100644 index 0000000..bcf8596 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_multiarray.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_nditer.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_nditer.py new file mode 100644 index 0000000..26fd9c3 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_nditer.py @@ -0,0 +1,2861 @@ +from __future__ import division, absolute_import, print_function + +import sys +import pytest + +import numpy as np +import numpy.core._multiarray_tests as _multiarray_tests +from numpy import array, arange, nditer, all +from numpy.testing import ( + assert_, assert_equal, assert_array_equal, assert_raises, + HAS_REFCOUNT, suppress_warnings + ) + + +def iter_multi_index(i): + ret = [] + while not i.finished: + ret.append(i.multi_index) + i.iternext() + return ret + +def iter_indices(i): + ret = [] + while not i.finished: + ret.append(i.index) + i.iternext() + return ret + +def iter_iterindices(i): + ret = [] + while not i.finished: + ret.append(i.iterindex) + i.iternext() + return ret + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_iter_refcount(): + # Make sure the iterator doesn't leak + + # Basic + a = arange(6) + dt = np.dtype('f4').newbyteorder() + rc_a = sys.getrefcount(a) + rc_dt = sys.getrefcount(dt) + with nditer(a, [], + [['readwrite', 'updateifcopy']], + casting='unsafe', + op_dtypes=[dt]) as it: + assert_(not it.iterationneedsapi) + assert_(sys.getrefcount(a) > rc_a) + assert_(sys.getrefcount(dt) > rc_dt) + # del 'it' + it = None + assert_equal(sys.getrefcount(a), rc_a) + assert_equal(sys.getrefcount(dt), rc_dt) + + # With a copy + a = arange(6, dtype='f4') + dt = np.dtype('f4') + rc_a = sys.getrefcount(a) + rc_dt = sys.getrefcount(dt) + it = nditer(a, [], + [['readwrite']], + op_dtypes=[dt]) + rc2_a = sys.getrefcount(a) + rc2_dt = sys.getrefcount(dt) + it2 = it.copy() + assert_(sys.getrefcount(a) > rc2_a) + assert_(sys.getrefcount(dt) > rc2_dt) + it = None + assert_equal(sys.getrefcount(a), rc2_a) + assert_equal(sys.getrefcount(dt), rc2_dt) + it2 = None + assert_equal(sys.getrefcount(a), rc_a) + assert_equal(sys.getrefcount(dt), rc_dt) + + del it2 # avoid pyflakes unused variable warning + +def test_iter_best_order(): + # The iterator should always find the iteration order + # with increasing memory addresses + + # Test the ordering for 1-D to 5-D shapes + for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: + a = arange(np.prod(shape)) + # Test each combination of positive and negative strides + for dirs in range(2**len(shape)): + dirs_index = [slice(None)]*len(shape) + for bit in range(len(shape)): + if ((2**bit) & dirs): + dirs_index[bit] = slice(None, None, -1) + dirs_index = tuple(dirs_index) + + aview = a.reshape(shape)[dirs_index] + # C-order + i = nditer(aview, [], [['readonly']]) + assert_equal([x for x in i], a) + # Fortran-order + i = nditer(aview.T, [], [['readonly']]) + assert_equal([x for x in i], a) + # Other order + if len(shape) > 2: + i = nditer(aview.swapaxes(0, 1), [], [['readonly']]) + assert_equal([x for x in i], a) + +def test_iter_c_order(): + # Test forcing C order + + # Test the ordering for 1-D to 5-D shapes + for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: + a = arange(np.prod(shape)) + # Test each combination of positive and negative strides + for dirs in range(2**len(shape)): + dirs_index = [slice(None)]*len(shape) + for bit in range(len(shape)): + if ((2**bit) & dirs): + dirs_index[bit] = slice(None, None, -1) + dirs_index = tuple(dirs_index) + + aview = a.reshape(shape)[dirs_index] + # C-order + i = nditer(aview, order='C') + assert_equal([x for x in i], aview.ravel(order='C')) + # Fortran-order + i = nditer(aview.T, order='C') + assert_equal([x for x in i], aview.T.ravel(order='C')) + # Other order + if len(shape) > 2: + i = nditer(aview.swapaxes(0, 1), order='C') + assert_equal([x for x in i], + aview.swapaxes(0, 1).ravel(order='C')) + +def test_iter_f_order(): + # Test forcing F order + + # Test the ordering for 1-D to 5-D shapes + for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: + a = arange(np.prod(shape)) + # Test each combination of positive and negative strides + for dirs in range(2**len(shape)): + dirs_index = [slice(None)]*len(shape) + for bit in range(len(shape)): + if ((2**bit) & dirs): + dirs_index[bit] = slice(None, None, -1) + dirs_index = tuple(dirs_index) + + aview = a.reshape(shape)[dirs_index] + # C-order + i = nditer(aview, order='F') + assert_equal([x for x in i], aview.ravel(order='F')) + # Fortran-order + i = nditer(aview.T, order='F') + assert_equal([x for x in i], aview.T.ravel(order='F')) + # Other order + if len(shape) > 2: + i = nditer(aview.swapaxes(0, 1), order='F') + assert_equal([x for x in i], + aview.swapaxes(0, 1).ravel(order='F')) + +def test_iter_c_or_f_order(): + # Test forcing any contiguous (C or F) order + + # Test the ordering for 1-D to 5-D shapes + for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: + a = arange(np.prod(shape)) + # Test each combination of positive and negative strides + for dirs in range(2**len(shape)): + dirs_index = [slice(None)]*len(shape) + for bit in range(len(shape)): + if ((2**bit) & dirs): + dirs_index[bit] = slice(None, None, -1) + dirs_index = tuple(dirs_index) + + aview = a.reshape(shape)[dirs_index] + # C-order + i = nditer(aview, order='A') + assert_equal([x for x in i], aview.ravel(order='A')) + # Fortran-order + i = nditer(aview.T, order='A') + assert_equal([x for x in i], aview.T.ravel(order='A')) + # Other order + if len(shape) > 2: + i = nditer(aview.swapaxes(0, 1), order='A') + assert_equal([x for x in i], + aview.swapaxes(0, 1).ravel(order='A')) + +def test_iter_best_order_multi_index_1d(): + # The multi-indices should be correct with any reordering + + a = arange(4) + # 1D order + i = nditer(a, ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(0,), (1,), (2,), (3,)]) + # 1D reversed order + i = nditer(a[::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(3,), (2,), (1,), (0,)]) + +def test_iter_best_order_multi_index_2d(): + # The multi-indices should be correct with any reordering + + a = arange(6) + # 2D C-order + i = nditer(a.reshape(2, 3), ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)]) + # 2D Fortran-order + i = nditer(a.reshape(2, 3).copy(order='F'), ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(0, 0), (1, 0), (0, 1), (1, 1), (0, 2), (1, 2)]) + # 2D reversed C-order + i = nditer(a.reshape(2, 3)[::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(1, 0), (1, 1), (1, 2), (0, 0), (0, 1), (0, 2)]) + i = nditer(a.reshape(2, 3)[:, ::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(0, 2), (0, 1), (0, 0), (1, 2), (1, 1), (1, 0)]) + i = nditer(a.reshape(2, 3)[::-1, ::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(1, 2), (1, 1), (1, 0), (0, 2), (0, 1), (0, 0)]) + # 2D reversed Fortran-order + i = nditer(a.reshape(2, 3).copy(order='F')[::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(1, 0), (0, 0), (1, 1), (0, 1), (1, 2), (0, 2)]) + i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1], + ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(0, 2), (1, 2), (0, 1), (1, 1), (0, 0), (1, 0)]) + i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1], + ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(1, 2), (0, 2), (1, 1), (0, 1), (1, 0), (0, 0)]) + +def test_iter_best_order_multi_index_3d(): + # The multi-indices should be correct with any reordering + + a = arange(12) + # 3D C-order + i = nditer(a.reshape(2, 3, 2), ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1), + (1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1)]) + # 3D Fortran-order + i = nditer(a.reshape(2, 3, 2).copy(order='F'), ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0), + (0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1)]) + # 3D reversed C-order + i = nditer(a.reshape(2, 3, 2)[::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1), + (0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1)]) + i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(0, 2, 0), (0, 2, 1), (0, 1, 0), (0, 1, 1), (0, 0, 0), (0, 0, 1), + (1, 2, 0), (1, 2, 1), (1, 1, 0), (1, 1, 1), (1, 0, 0), (1, 0, 1)]) + i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(0, 0, 1), (0, 0, 0), (0, 1, 1), (0, 1, 0), (0, 2, 1), (0, 2, 0), + (1, 0, 1), (1, 0, 0), (1, 1, 1), (1, 1, 0), (1, 2, 1), (1, 2, 0)]) + # 3D reversed Fortran-order + i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], + ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(1, 0, 0), (0, 0, 0), (1, 1, 0), (0, 1, 0), (1, 2, 0), (0, 2, 0), + (1, 0, 1), (0, 0, 1), (1, 1, 1), (0, 1, 1), (1, 2, 1), (0, 2, 1)]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], + ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(0, 2, 0), (1, 2, 0), (0, 1, 0), (1, 1, 0), (0, 0, 0), (1, 0, 0), + (0, 2, 1), (1, 2, 1), (0, 1, 1), (1, 1, 1), (0, 0, 1), (1, 0, 1)]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1], + ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1), + (0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0)]) + +def test_iter_best_order_c_index_1d(): + # The C index should be correct with any reordering + + a = arange(4) + # 1D order + i = nditer(a, ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 1, 2, 3]) + # 1D reversed order + i = nditer(a[::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [3, 2, 1, 0]) + +def test_iter_best_order_c_index_2d(): + # The C index should be correct with any reordering + + a = arange(6) + # 2D C-order + i = nditer(a.reshape(2, 3), ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5]) + # 2D Fortran-order + i = nditer(a.reshape(2, 3).copy(order='F'), + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 3, 1, 4, 2, 5]) + # 2D reversed C-order + i = nditer(a.reshape(2, 3)[::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [3, 4, 5, 0, 1, 2]) + i = nditer(a.reshape(2, 3)[:, ::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [2, 1, 0, 5, 4, 3]) + i = nditer(a.reshape(2, 3)[::-1, ::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [5, 4, 3, 2, 1, 0]) + # 2D reversed Fortran-order + i = nditer(a.reshape(2, 3).copy(order='F')[::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [3, 0, 4, 1, 5, 2]) + i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [2, 5, 1, 4, 0, 3]) + i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [5, 2, 4, 1, 3, 0]) + +def test_iter_best_order_c_index_3d(): + # The C index should be correct with any reordering + + a = arange(12) + # 3D C-order + i = nditer(a.reshape(2, 3, 2), ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) + # 3D Fortran-order + i = nditer(a.reshape(2, 3, 2).copy(order='F'), + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11]) + # 3D reversed C-order + i = nditer(a.reshape(2, 3, 2)[::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5]) + i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7]) + i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10]) + # 3D reversed Fortran-order + i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10]) + +def test_iter_best_order_f_index_1d(): + # The Fortran index should be correct with any reordering + + a = arange(4) + # 1D order + i = nditer(a, ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 1, 2, 3]) + # 1D reversed order + i = nditer(a[::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [3, 2, 1, 0]) + +def test_iter_best_order_f_index_2d(): + # The Fortran index should be correct with any reordering + + a = arange(6) + # 2D C-order + i = nditer(a.reshape(2, 3), ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 2, 4, 1, 3, 5]) + # 2D Fortran-order + i = nditer(a.reshape(2, 3).copy(order='F'), + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5]) + # 2D reversed C-order + i = nditer(a.reshape(2, 3)[::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [1, 3, 5, 0, 2, 4]) + i = nditer(a.reshape(2, 3)[:, ::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [4, 2, 0, 5, 3, 1]) + i = nditer(a.reshape(2, 3)[::-1, ::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [5, 3, 1, 4, 2, 0]) + # 2D reversed Fortran-order + i = nditer(a.reshape(2, 3).copy(order='F')[::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [1, 0, 3, 2, 5, 4]) + i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [4, 5, 2, 3, 0, 1]) + i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [5, 4, 3, 2, 1, 0]) + +def test_iter_best_order_f_index_3d(): + # The Fortran index should be correct with any reordering + + a = arange(12) + # 3D C-order + i = nditer(a.reshape(2, 3, 2), ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11]) + # 3D Fortran-order + i = nditer(a.reshape(2, 3, 2).copy(order='F'), + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) + # 3D reversed C-order + i = nditer(a.reshape(2, 3, 2)[::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10]) + i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7]) + i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5]) + # 3D reversed Fortran-order + i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5]) + +def test_iter_no_inner_full_coalesce(): + # Check no_inner iterators which coalesce into a single inner loop + + for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: + size = np.prod(shape) + a = arange(size) + # Test each combination of forward and backwards indexing + for dirs in range(2**len(shape)): + dirs_index = [slice(None)]*len(shape) + for bit in range(len(shape)): + if ((2**bit) & dirs): + dirs_index[bit] = slice(None, None, -1) + dirs_index = tuple(dirs_index) + + aview = a.reshape(shape)[dirs_index] + # C-order + i = nditer(aview, ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 1) + assert_equal(i[0].shape, (size,)) + # Fortran-order + i = nditer(aview.T, ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 1) + assert_equal(i[0].shape, (size,)) + # Other order + if len(shape) > 2: + i = nditer(aview.swapaxes(0, 1), + ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 1) + assert_equal(i[0].shape, (size,)) + +def test_iter_no_inner_dim_coalescing(): + # Check no_inner iterators whose dimensions may not coalesce completely + + # Skipping the last element in a dimension prevents coalescing + # with the next-bigger dimension + a = arange(24).reshape(2, 3, 4)[:,:, :-1] + i = nditer(a, ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 2) + assert_equal(i[0].shape, (3,)) + a = arange(24).reshape(2, 3, 4)[:, :-1,:] + i = nditer(a, ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 2) + assert_equal(i[0].shape, (8,)) + a = arange(24).reshape(2, 3, 4)[:-1,:,:] + i = nditer(a, ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 1) + assert_equal(i[0].shape, (12,)) + + # Even with lots of 1-sized dimensions, should still coalesce + a = arange(24).reshape(1, 1, 2, 1, 1, 3, 1, 1, 4, 1, 1) + i = nditer(a, ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 1) + assert_equal(i[0].shape, (24,)) + +def test_iter_dim_coalescing(): + # Check that the correct number of dimensions are coalesced + + # Tracking a multi-index disables coalescing + a = arange(24).reshape(2, 3, 4) + i = nditer(a, ['multi_index'], [['readonly']]) + assert_equal(i.ndim, 3) + + # A tracked index can allow coalescing if it's compatible with the array + a3d = arange(24).reshape(2, 3, 4) + i = nditer(a3d, ['c_index'], [['readonly']]) + assert_equal(i.ndim, 1) + i = nditer(a3d.swapaxes(0, 1), ['c_index'], [['readonly']]) + assert_equal(i.ndim, 3) + i = nditer(a3d.T, ['c_index'], [['readonly']]) + assert_equal(i.ndim, 3) + i = nditer(a3d.T, ['f_index'], [['readonly']]) + assert_equal(i.ndim, 1) + i = nditer(a3d.T.swapaxes(0, 1), ['f_index'], [['readonly']]) + assert_equal(i.ndim, 3) + + # When C or F order is forced, coalescing may still occur + a3d = arange(24).reshape(2, 3, 4) + i = nditer(a3d, order='C') + assert_equal(i.ndim, 1) + i = nditer(a3d.T, order='C') + assert_equal(i.ndim, 3) + i = nditer(a3d, order='F') + assert_equal(i.ndim, 3) + i = nditer(a3d.T, order='F') + assert_equal(i.ndim, 1) + i = nditer(a3d, order='A') + assert_equal(i.ndim, 1) + i = nditer(a3d.T, order='A') + assert_equal(i.ndim, 1) + +def test_iter_broadcasting(): + # Standard NumPy broadcasting rules + + # 1D with scalar + i = nditer([arange(6), np.int32(2)], ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 6) + assert_equal(i.shape, (6,)) + + # 2D with scalar + i = nditer([arange(6).reshape(2, 3), np.int32(2)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 6) + assert_equal(i.shape, (2, 3)) + # 2D with 1D + i = nditer([arange(6).reshape(2, 3), arange(3)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 6) + assert_equal(i.shape, (2, 3)) + i = nditer([arange(2).reshape(2, 1), arange(3)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 6) + assert_equal(i.shape, (2, 3)) + # 2D with 2D + i = nditer([arange(2).reshape(2, 1), arange(3).reshape(1, 3)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 6) + assert_equal(i.shape, (2, 3)) + + # 3D with scalar + i = nditer([np.int32(2), arange(24).reshape(4, 2, 3)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + # 3D with 1D + i = nditer([arange(3), arange(24).reshape(4, 2, 3)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + i = nditer([arange(3), arange(8).reshape(4, 2, 1)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + # 3D with 2D + i = nditer([arange(6).reshape(2, 3), arange(24).reshape(4, 2, 3)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + i = nditer([arange(2).reshape(2, 1), arange(24).reshape(4, 2, 3)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + i = nditer([arange(3).reshape(1, 3), arange(8).reshape(4, 2, 1)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + # 3D with 3D + i = nditer([arange(2).reshape(1, 2, 1), arange(3).reshape(1, 1, 3), + arange(4).reshape(4, 1, 1)], + ['multi_index'], [['readonly']]*3) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + i = nditer([arange(6).reshape(1, 2, 3), arange(4).reshape(4, 1, 1)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + i = nditer([arange(24).reshape(4, 2, 3), arange(12).reshape(4, 1, 3)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + +def test_iter_itershape(): + # Check that allocated outputs work with a specified shape + a = np.arange(6, dtype='i2').reshape(2, 3) + i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], + op_axes=[[0, 1, None], None], + itershape=(-1, -1, 4)) + assert_equal(i.operands[1].shape, (2, 3, 4)) + assert_equal(i.operands[1].strides, (24, 8, 2)) + + i = nditer([a.T, None], [], [['readonly'], ['writeonly', 'allocate']], + op_axes=[[0, 1, None], None], + itershape=(-1, -1, 4)) + assert_equal(i.operands[1].shape, (3, 2, 4)) + assert_equal(i.operands[1].strides, (8, 24, 2)) + + i = nditer([a.T, None], [], [['readonly'], ['writeonly', 'allocate']], + order='F', + op_axes=[[0, 1, None], None], + itershape=(-1, -1, 4)) + assert_equal(i.operands[1].shape, (3, 2, 4)) + assert_equal(i.operands[1].strides, (2, 6, 12)) + + # If we specify 1 in the itershape, it shouldn't allow broadcasting + # of that dimension to a bigger value + assert_raises(ValueError, nditer, [a, None], [], + [['readonly'], ['writeonly', 'allocate']], + op_axes=[[0, 1, None], None], + itershape=(-1, 1, 4)) + # Test bug that for no op_axes but itershape, they are NULLed correctly + i = np.nditer([np.ones(2), None, None], itershape=(2,)) + +def test_iter_broadcasting_errors(): + # Check that errors are thrown for bad broadcasting shapes + + # 1D with 1D + assert_raises(ValueError, nditer, [arange(2), arange(3)], + [], [['readonly']]*2) + # 2D with 1D + assert_raises(ValueError, nditer, + [arange(6).reshape(2, 3), arange(2)], + [], [['readonly']]*2) + # 2D with 2D + assert_raises(ValueError, nditer, + [arange(6).reshape(2, 3), arange(9).reshape(3, 3)], + [], [['readonly']]*2) + assert_raises(ValueError, nditer, + [arange(6).reshape(2, 3), arange(4).reshape(2, 2)], + [], [['readonly']]*2) + # 3D with 3D + assert_raises(ValueError, nditer, + [arange(36).reshape(3, 3, 4), arange(24).reshape(2, 3, 4)], + [], [['readonly']]*2) + assert_raises(ValueError, nditer, + [arange(8).reshape(2, 4, 1), arange(24).reshape(2, 3, 4)], + [], [['readonly']]*2) + + # Verify that the error message mentions the right shapes + try: + nditer([arange(2).reshape(1, 2, 1), + arange(3).reshape(1, 3), + arange(6).reshape(2, 3)], + [], + [['readonly'], ['readonly'], ['writeonly', 'no_broadcast']]) + raise AssertionError('Should have raised a broadcast error') + except ValueError as e: + msg = str(e) + # The message should contain the shape of the 3rd operand + assert_(msg.find('(2,3)') >= 0, + 'Message "%s" doesn\'t contain operand shape (2,3)' % msg) + # The message should contain the broadcast shape + assert_(msg.find('(1,2,3)') >= 0, + 'Message "%s" doesn\'t contain broadcast shape (1,2,3)' % msg) + + try: + nditer([arange(6).reshape(2, 3), arange(2)], + [], + [['readonly'], ['readonly']], + op_axes=[[0, 1], [0, np.newaxis]], + itershape=(4, 3)) + raise AssertionError('Should have raised a broadcast error') + except ValueError as e: + msg = str(e) + # The message should contain "shape->remappedshape" for each operand + assert_(msg.find('(2,3)->(2,3)') >= 0, + 'Message "%s" doesn\'t contain operand shape (2,3)->(2,3)' % msg) + assert_(msg.find('(2,)->(2,newaxis)') >= 0, + ('Message "%s" doesn\'t contain remapped operand shape' + + '(2,)->(2,newaxis)') % msg) + # The message should contain the itershape parameter + assert_(msg.find('(4,3)') >= 0, + 'Message "%s" doesn\'t contain itershape parameter (4,3)' % msg) + + try: + nditer([np.zeros((2, 1, 1)), np.zeros((2,))], + [], + [['writeonly', 'no_broadcast'], ['readonly']]) + raise AssertionError('Should have raised a broadcast error') + except ValueError as e: + msg = str(e) + # The message should contain the shape of the bad operand + assert_(msg.find('(2,1,1)') >= 0, + 'Message "%s" doesn\'t contain operand shape (2,1,1)' % msg) + # The message should contain the broadcast shape + assert_(msg.find('(2,1,2)') >= 0, + 'Message "%s" doesn\'t contain the broadcast shape (2,1,2)' % msg) + +def test_iter_flags_errors(): + # Check that bad combinations of flags produce errors + + a = arange(6) + + # Not enough operands + assert_raises(ValueError, nditer, [], [], []) + # Too many operands + assert_raises(ValueError, nditer, [a]*100, [], [['readonly']]*100) + # Bad global flag + assert_raises(ValueError, nditer, [a], ['bad flag'], [['readonly']]) + # Bad op flag + assert_raises(ValueError, nditer, [a], [], [['readonly', 'bad flag']]) + # Bad order parameter + assert_raises(ValueError, nditer, [a], [], [['readonly']], order='G') + # Bad casting parameter + assert_raises(ValueError, nditer, [a], [], [['readonly']], casting='noon') + # op_flags must match ops + assert_raises(ValueError, nditer, [a]*3, [], [['readonly']]*2) + # Cannot track both a C and an F index + assert_raises(ValueError, nditer, a, + ['c_index', 'f_index'], [['readonly']]) + # Inner iteration and multi-indices/indices are incompatible + assert_raises(ValueError, nditer, a, + ['external_loop', 'multi_index'], [['readonly']]) + assert_raises(ValueError, nditer, a, + ['external_loop', 'c_index'], [['readonly']]) + assert_raises(ValueError, nditer, a, + ['external_loop', 'f_index'], [['readonly']]) + # Must specify exactly one of readwrite/readonly/writeonly per operand + assert_raises(ValueError, nditer, a, [], [[]]) + assert_raises(ValueError, nditer, a, [], [['readonly', 'writeonly']]) + assert_raises(ValueError, nditer, a, [], [['readonly', 'readwrite']]) + assert_raises(ValueError, nditer, a, [], [['writeonly', 'readwrite']]) + assert_raises(ValueError, nditer, a, + [], [['readonly', 'writeonly', 'readwrite']]) + # Python scalars are always readonly + assert_raises(TypeError, nditer, 1.5, [], [['writeonly']]) + assert_raises(TypeError, nditer, 1.5, [], [['readwrite']]) + # Array scalars are always readonly + assert_raises(TypeError, nditer, np.int32(1), [], [['writeonly']]) + assert_raises(TypeError, nditer, np.int32(1), [], [['readwrite']]) + # Check readonly array + a.flags.writeable = False + assert_raises(ValueError, nditer, a, [], [['writeonly']]) + assert_raises(ValueError, nditer, a, [], [['readwrite']]) + a.flags.writeable = True + # Multi-indices available only with the multi_index flag + i = nditer(arange(6), [], [['readonly']]) + assert_raises(ValueError, lambda i:i.multi_index, i) + # Index available only with an index flag + assert_raises(ValueError, lambda i:i.index, i) + # GotoCoords and GotoIndex incompatible with buffering or no_inner + + def assign_multi_index(i): + i.multi_index = (0,) + + def assign_index(i): + i.index = 0 + + def assign_iterindex(i): + i.iterindex = 0 + + def assign_iterrange(i): + i.iterrange = (0, 1) + i = nditer(arange(6), ['external_loop']) + assert_raises(ValueError, assign_multi_index, i) + assert_raises(ValueError, assign_index, i) + assert_raises(ValueError, assign_iterindex, i) + assert_raises(ValueError, assign_iterrange, i) + i = nditer(arange(6), ['buffered']) + assert_raises(ValueError, assign_multi_index, i) + assert_raises(ValueError, assign_index, i) + assert_raises(ValueError, assign_iterrange, i) + # Can't iterate if size is zero + assert_raises(ValueError, nditer, np.array([])) + +def test_iter_slice(): + a, b, c = np.arange(3), np.arange(3), np.arange(3.) + i = nditer([a, b, c], [], ['readwrite']) + with i: + i[0:2] = (3, 3) + assert_equal(a, [3, 1, 2]) + assert_equal(b, [3, 1, 2]) + assert_equal(c, [0, 1, 2]) + i[1] = 12 + assert_equal(i[0:2], [3, 12]) + +def test_iter_assign_mapping(): + a = np.arange(24, dtype='f8').reshape(2, 3, 4).T + it = np.nditer(a, [], [['readwrite', 'updateifcopy']], + casting='same_kind', op_dtypes=[np.dtype('f4')]) + with it: + it.operands[0][...] = 3 + it.operands[0][...] = 14 + assert_equal(a, 14) + it = np.nditer(a, [], [['readwrite', 'updateifcopy']], + casting='same_kind', op_dtypes=[np.dtype('f4')]) + with it: + x = it.operands[0][-1:1] + x[...] = 14 + it.operands[0][...] = -1234 + assert_equal(a, -1234) + # check for no warnings on dealloc + x = None + it = None + +def test_iter_nbo_align_contig(): + # Check that byte order, alignment, and contig changes work + + # Byte order change by requesting a specific dtype + a = np.arange(6, dtype='f4') + au = a.byteswap().newbyteorder() + assert_(a.dtype.byteorder != au.dtype.byteorder) + i = nditer(au, [], [['readwrite', 'updateifcopy']], + casting='equiv', + op_dtypes=[np.dtype('f4')]) + with i: + # context manager triggers UPDATEIFCOPY on i at exit + assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder) + assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder) + assert_equal(i.operands[0], a) + i.operands[0][:] = 2 + assert_equal(au, [2]*6) + del i # should not raise a warning + # Byte order change by requesting NBO + a = np.arange(6, dtype='f4') + au = a.byteswap().newbyteorder() + assert_(a.dtype.byteorder != au.dtype.byteorder) + with nditer(au, [], [['readwrite', 'updateifcopy', 'nbo']], + casting='equiv') as i: + # context manager triggers UPDATEIFCOPY on i at exit + assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder) + assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder) + assert_equal(i.operands[0], a) + i.operands[0][:] = 12345 + i.operands[0][:] = 2 + assert_equal(au, [2]*6) + + # Unaligned input + a = np.zeros((6*4+1,), dtype='i1')[1:] + a.dtype = 'f4' + a[:] = np.arange(6, dtype='f4') + assert_(not a.flags.aligned) + # Without 'aligned', shouldn't copy + i = nditer(a, [], [['readonly']]) + assert_(not i.operands[0].flags.aligned) + assert_equal(i.operands[0], a) + # With 'aligned', should make a copy + with nditer(a, [], [['readwrite', 'updateifcopy', 'aligned']]) as i: + assert_(i.operands[0].flags.aligned) + # context manager triggers UPDATEIFCOPY on i at exit + assert_equal(i.operands[0], a) + i.operands[0][:] = 3 + assert_equal(a, [3]*6) + + # Discontiguous input + a = arange(12) + # If it is contiguous, shouldn't copy + i = nditer(a[:6], [], [['readonly']]) + assert_(i.operands[0].flags.contiguous) + assert_equal(i.operands[0], a[:6]) + # If it isn't contiguous, should buffer + i = nditer(a[::2], ['buffered', 'external_loop'], + [['readonly', 'contig']], + buffersize=10) + assert_(i[0].flags.contiguous) + assert_equal(i[0], a[::2]) + +def test_iter_array_cast(): + # Check that arrays are cast as requested + + # No cast 'f4' -> 'f4' + a = np.arange(6, dtype='f4').reshape(2, 3) + i = nditer(a, [], [['readwrite']], op_dtypes=[np.dtype('f4')]) + with i: + assert_equal(i.operands[0], a) + assert_equal(i.operands[0].dtype, np.dtype('f4')) + + # Byte-order cast ' '>f4' + a = np.arange(6, dtype='f4')]) as i: + assert_equal(i.operands[0], a) + assert_equal(i.operands[0].dtype, np.dtype('>f4')) + + # Safe case 'f4' -> 'f8' + a = np.arange(24, dtype='f4').reshape(2, 3, 4).swapaxes(1, 2) + i = nditer(a, [], [['readonly', 'copy']], + casting='safe', + op_dtypes=[np.dtype('f8')]) + assert_equal(i.operands[0], a) + assert_equal(i.operands[0].dtype, np.dtype('f8')) + # The memory layout of the temporary should match a (a is (48,4,16)) + # except negative strides get flipped to positive strides. + assert_equal(i.operands[0].strides, (96, 8, 32)) + a = a[::-1,:, ::-1] + i = nditer(a, [], [['readonly', 'copy']], + casting='safe', + op_dtypes=[np.dtype('f8')]) + assert_equal(i.operands[0], a) + assert_equal(i.operands[0].dtype, np.dtype('f8')) + assert_equal(i.operands[0].strides, (96, 8, 32)) + + # Same-kind cast 'f8' -> 'f4' -> 'f8' + a = np.arange(24, dtype='f8').reshape(2, 3, 4).T + with nditer(a, [], + [['readwrite', 'updateifcopy']], + casting='same_kind', + op_dtypes=[np.dtype('f4')]) as i: + assert_equal(i.operands[0], a) + assert_equal(i.operands[0].dtype, np.dtype('f4')) + assert_equal(i.operands[0].strides, (4, 16, 48)) + # Check that WRITEBACKIFCOPY is activated at exit + i.operands[0][2, 1, 1] = -12.5 + assert_(a[2, 1, 1] != -12.5) + assert_equal(a[2, 1, 1], -12.5) + + a = np.arange(6, dtype='i4')[::-2] + with nditer(a, [], + [['writeonly', 'updateifcopy']], + casting='unsafe', + op_dtypes=[np.dtype('f4')]) as i: + assert_equal(i.operands[0].dtype, np.dtype('f4')) + # Even though the stride was negative in 'a', it + # becomes positive in the temporary + assert_equal(i.operands[0].strides, (4,)) + i.operands[0][:] = [1, 2, 3] + assert_equal(a, [1, 2, 3]) + +def test_iter_array_cast_errors(): + # Check that invalid casts are caught + + # Need to enable copying for casts to occur + assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], + [['readonly']], op_dtypes=[np.dtype('f8')]) + # Also need to allow casting for casts to occur + assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], + [['readonly', 'copy']], casting='no', + op_dtypes=[np.dtype('f8')]) + assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], + [['readonly', 'copy']], casting='equiv', + op_dtypes=[np.dtype('f8')]) + assert_raises(TypeError, nditer, arange(2, dtype='f8'), [], + [['writeonly', 'updateifcopy']], + casting='no', + op_dtypes=[np.dtype('f4')]) + assert_raises(TypeError, nditer, arange(2, dtype='f8'), [], + [['writeonly', 'updateifcopy']], + casting='equiv', + op_dtypes=[np.dtype('f4')]) + # ' '>f4' should not work with casting='no' + assert_raises(TypeError, nditer, arange(2, dtype='f4')]) + # 'f4' -> 'f8' is a safe cast, but 'f8' -> 'f4' isn't + assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], + [['readwrite', 'updateifcopy']], + casting='safe', + op_dtypes=[np.dtype('f8')]) + assert_raises(TypeError, nditer, arange(2, dtype='f8'), [], + [['readwrite', 'updateifcopy']], + casting='safe', + op_dtypes=[np.dtype('f4')]) + # 'f4' -> 'i4' is neither a safe nor a same-kind cast + assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], + [['readonly', 'copy']], + casting='same_kind', + op_dtypes=[np.dtype('i4')]) + assert_raises(TypeError, nditer, arange(2, dtype='i4'), [], + [['writeonly', 'updateifcopy']], + casting='same_kind', + op_dtypes=[np.dtype('f4')]) + +def test_iter_scalar_cast(): + # Check that scalars are cast as requested + + # No cast 'f4' -> 'f4' + i = nditer(np.float32(2.5), [], [['readonly']], + op_dtypes=[np.dtype('f4')]) + assert_equal(i.dtypes[0], np.dtype('f4')) + assert_equal(i.value.dtype, np.dtype('f4')) + assert_equal(i.value, 2.5) + # Safe cast 'f4' -> 'f8' + i = nditer(np.float32(2.5), [], + [['readonly', 'copy']], + casting='safe', + op_dtypes=[np.dtype('f8')]) + assert_equal(i.dtypes[0], np.dtype('f8')) + assert_equal(i.value.dtype, np.dtype('f8')) + assert_equal(i.value, 2.5) + # Same-kind cast 'f8' -> 'f4' + i = nditer(np.float64(2.5), [], + [['readonly', 'copy']], + casting='same_kind', + op_dtypes=[np.dtype('f4')]) + assert_equal(i.dtypes[0], np.dtype('f4')) + assert_equal(i.value.dtype, np.dtype('f4')) + assert_equal(i.value, 2.5) + # Unsafe cast 'f8' -> 'i4' + i = nditer(np.float64(3.0), [], + [['readonly', 'copy']], + casting='unsafe', + op_dtypes=[np.dtype('i4')]) + assert_equal(i.dtypes[0], np.dtype('i4')) + assert_equal(i.value.dtype, np.dtype('i4')) + assert_equal(i.value, 3) + # Readonly scalars may be cast even without setting COPY or BUFFERED + i = nditer(3, [], [['readonly']], op_dtypes=[np.dtype('f8')]) + assert_equal(i[0].dtype, np.dtype('f8')) + assert_equal(i[0], 3.) + +def test_iter_scalar_cast_errors(): + # Check that invalid casts are caught + + # Need to allow copying/buffering for write casts of scalars to occur + assert_raises(TypeError, nditer, np.float32(2), [], + [['readwrite']], op_dtypes=[np.dtype('f8')]) + assert_raises(TypeError, nditer, 2.5, [], + [['readwrite']], op_dtypes=[np.dtype('f4')]) + # 'f8' -> 'f4' isn't a safe cast if the value would overflow + assert_raises(TypeError, nditer, np.float64(1e60), [], + [['readonly']], + casting='safe', + op_dtypes=[np.dtype('f4')]) + # 'f4' -> 'i4' is neither a safe nor a same-kind cast + assert_raises(TypeError, nditer, np.float32(2), [], + [['readonly']], + casting='same_kind', + op_dtypes=[np.dtype('i4')]) + +def test_iter_object_arrays_basic(): + # Check that object arrays work + + obj = {'a':3,'b':'d'} + a = np.array([[1, 2, 3], None, obj, None], dtype='O') + if HAS_REFCOUNT: + rc = sys.getrefcount(obj) + + # Need to allow references for object arrays + assert_raises(TypeError, nditer, a) + if HAS_REFCOUNT: + assert_equal(sys.getrefcount(obj), rc) + + i = nditer(a, ['refs_ok'], ['readonly']) + vals = [x_[()] for x_ in i] + assert_equal(np.array(vals, dtype='O'), a) + vals, i, x = [None]*3 + if HAS_REFCOUNT: + assert_equal(sys.getrefcount(obj), rc) + + i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'], + ['readonly'], order='C') + assert_(i.iterationneedsapi) + vals = [x_[()] for x_ in i] + assert_equal(np.array(vals, dtype='O'), a.reshape(2, 2).ravel(order='F')) + vals, i, x = [None]*3 + if HAS_REFCOUNT: + assert_equal(sys.getrefcount(obj), rc) + + i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'], + ['readwrite'], order='C') + with i: + for x in i: + x[...] = None + vals, i, x = [None]*3 + if HAS_REFCOUNT: + assert_(sys.getrefcount(obj) == rc-1) + assert_equal(a, np.array([None]*4, dtype='O')) + +def test_iter_object_arrays_conversions(): + # Conversions to/from objects + a = np.arange(6, dtype='O') + i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], + casting='unsafe', op_dtypes='i4') + with i: + for x in i: + x[...] += 1 + assert_equal(a, np.arange(6)+1) + + a = np.arange(6, dtype='i4') + i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], + casting='unsafe', op_dtypes='O') + with i: + for x in i: + x[...] += 1 + assert_equal(a, np.arange(6)+1) + + # Non-contiguous object array + a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'O')]) + a = a['a'] + a[:] = np.arange(6) + i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], + casting='unsafe', op_dtypes='i4') + with i: + for x in i: + x[...] += 1 + assert_equal(a, np.arange(6)+1) + + #Non-contiguous value array + a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'i4')]) + a = a['a'] + a[:] = np.arange(6) + 98172488 + i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], + casting='unsafe', op_dtypes='O') + with i: + ob = i[0][()] + if HAS_REFCOUNT: + rc = sys.getrefcount(ob) + for x in i: + x[...] += 1 + if HAS_REFCOUNT: + assert_(sys.getrefcount(ob) == rc-1) + assert_equal(a, np.arange(6)+98172489) + +def test_iter_common_dtype(): + # Check that the iterator finds a common data type correctly + + i = nditer([array([3], dtype='f4'), array([0], dtype='f8')], + ['common_dtype'], + [['readonly', 'copy']]*2, + casting='safe') + assert_equal(i.dtypes[0], np.dtype('f8')) + assert_equal(i.dtypes[1], np.dtype('f8')) + i = nditer([array([3], dtype='i4'), array([0], dtype='f4')], + ['common_dtype'], + [['readonly', 'copy']]*2, + casting='safe') + assert_equal(i.dtypes[0], np.dtype('f8')) + assert_equal(i.dtypes[1], np.dtype('f8')) + i = nditer([array([3], dtype='f4'), array(0, dtype='f8')], + ['common_dtype'], + [['readonly', 'copy']]*2, + casting='same_kind') + assert_equal(i.dtypes[0], np.dtype('f4')) + assert_equal(i.dtypes[1], np.dtype('f4')) + i = nditer([array([3], dtype='u4'), array(0, dtype='i4')], + ['common_dtype'], + [['readonly', 'copy']]*2, + casting='safe') + assert_equal(i.dtypes[0], np.dtype('u4')) + assert_equal(i.dtypes[1], np.dtype('u4')) + i = nditer([array([3], dtype='u4'), array(-12, dtype='i4')], + ['common_dtype'], + [['readonly', 'copy']]*2, + casting='safe') + assert_equal(i.dtypes[0], np.dtype('i8')) + assert_equal(i.dtypes[1], np.dtype('i8')) + i = nditer([array([3], dtype='u4'), array(-12, dtype='i4'), + array([2j], dtype='c8'), array([9], dtype='f8')], + ['common_dtype'], + [['readonly', 'copy']]*4, + casting='safe') + assert_equal(i.dtypes[0], np.dtype('c16')) + assert_equal(i.dtypes[1], np.dtype('c16')) + assert_equal(i.dtypes[2], np.dtype('c16')) + assert_equal(i.dtypes[3], np.dtype('c16')) + assert_equal(i.value, (3, -12, 2j, 9)) + + # When allocating outputs, other outputs aren't factored in + i = nditer([array([3], dtype='i4'), None, array([2j], dtype='c16')], [], + [['readonly', 'copy'], + ['writeonly', 'allocate'], + ['writeonly']], + casting='safe') + assert_equal(i.dtypes[0], np.dtype('i4')) + assert_equal(i.dtypes[1], np.dtype('i4')) + assert_equal(i.dtypes[2], np.dtype('c16')) + # But, if common data types are requested, they are + i = nditer([array([3], dtype='i4'), None, array([2j], dtype='c16')], + ['common_dtype'], + [['readonly', 'copy'], + ['writeonly', 'allocate'], + ['writeonly']], + casting='safe') + assert_equal(i.dtypes[0], np.dtype('c16')) + assert_equal(i.dtypes[1], np.dtype('c16')) + assert_equal(i.dtypes[2], np.dtype('c16')) + +def test_iter_copy_if_overlap(): + # Ensure the iterator makes copies on read/write overlap, if requested + + # Copy not needed, 1 op + for flag in ['readonly', 'writeonly', 'readwrite']: + a = arange(10) + i = nditer([a], ['copy_if_overlap'], [[flag]]) + with i: + assert_(i.operands[0] is a) + + # Copy needed, 2 ops, read-write overlap + x = arange(10) + a = x[1:] + b = x[:-1] + with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']]) as i: + assert_(not np.shares_memory(*i.operands)) + + # Copy not needed with elementwise, 2 ops, exactly same arrays + x = arange(10) + a = x + b = x + i = nditer([a, b], ['copy_if_overlap'], [['readonly', 'overlap_assume_elementwise'], + ['readwrite', 'overlap_assume_elementwise']]) + with i: + assert_(i.operands[0] is a and i.operands[1] is b) + with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']]) as i: + assert_(i.operands[0] is a and not np.shares_memory(i.operands[1], b)) + + # Copy not needed, 2 ops, no overlap + x = arange(10) + a = x[::2] + b = x[1::2] + i = nditer([a, b], ['copy_if_overlap'], [['readonly'], ['writeonly']]) + assert_(i.operands[0] is a and i.operands[1] is b) + + # Copy needed, 2 ops, read-write overlap + x = arange(4, dtype=np.int8) + a = x[3:] + b = x.view(np.int32)[:1] + with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['writeonly']]) as i: + assert_(not np.shares_memory(*i.operands)) + + # Copy needed, 3 ops, read-write overlap + for flag in ['writeonly', 'readwrite']: + x = np.ones([10, 10]) + a = x + b = x.T + c = x + with nditer([a, b, c], ['copy_if_overlap'], + [['readonly'], ['readonly'], [flag]]) as i: + a2, b2, c2 = i.operands + assert_(not np.shares_memory(a2, c2)) + assert_(not np.shares_memory(b2, c2)) + + # Copy not needed, 3 ops, read-only overlap + x = np.ones([10, 10]) + a = x + b = x.T + c = x + i = nditer([a, b, c], ['copy_if_overlap'], + [['readonly'], ['readonly'], ['readonly']]) + a2, b2, c2 = i.operands + assert_(a is a2) + assert_(b is b2) + assert_(c is c2) + + # Copy not needed, 3 ops, read-only overlap + x = np.ones([10, 10]) + a = x + b = np.ones([10, 10]) + c = x.T + i = nditer([a, b, c], ['copy_if_overlap'], + [['readonly'], ['writeonly'], ['readonly']]) + a2, b2, c2 = i.operands + assert_(a is a2) + assert_(b is b2) + assert_(c is c2) + + # Copy not needed, 3 ops, write-only overlap + x = np.arange(7) + a = x[:3] + b = x[3:6] + c = x[4:7] + i = nditer([a, b, c], ['copy_if_overlap'], + [['readonly'], ['writeonly'], ['writeonly']]) + a2, b2, c2 = i.operands + assert_(a is a2) + assert_(b is b2) + assert_(c is c2) + +def test_iter_op_axes(): + # Check that custom axes work + + # Reverse the axes + a = arange(6).reshape(2, 3) + i = nditer([a, a.T], [], [['readonly']]*2, op_axes=[[0, 1], [1, 0]]) + assert_(all([x == y for (x, y) in i])) + a = arange(24).reshape(2, 3, 4) + i = nditer([a.T, a], [], [['readonly']]*2, op_axes=[[2, 1, 0], None]) + assert_(all([x == y for (x, y) in i])) + + # Broadcast 1D to any dimension + a = arange(1, 31).reshape(2, 3, 5) + b = arange(1, 3) + i = nditer([a, b], [], [['readonly']]*2, op_axes=[None, [0, -1, -1]]) + assert_equal([x*y for (x, y) in i], (a*b.reshape(2, 1, 1)).ravel()) + b = arange(1, 4) + i = nditer([a, b], [], [['readonly']]*2, op_axes=[None, [-1, 0, -1]]) + assert_equal([x*y for (x, y) in i], (a*b.reshape(1, 3, 1)).ravel()) + b = arange(1, 6) + i = nditer([a, b], [], [['readonly']]*2, + op_axes=[None, [np.newaxis, np.newaxis, 0]]) + assert_equal([x*y for (x, y) in i], (a*b.reshape(1, 1, 5)).ravel()) + + # Inner product-style broadcasting + a = arange(24).reshape(2, 3, 4) + b = arange(40).reshape(5, 2, 4) + i = nditer([a, b], ['multi_index'], [['readonly']]*2, + op_axes=[[0, 1, -1, -1], [-1, -1, 0, 1]]) + assert_equal(i.shape, (2, 3, 5, 2)) + + # Matrix product-style broadcasting + a = arange(12).reshape(3, 4) + b = arange(20).reshape(4, 5) + i = nditer([a, b], ['multi_index'], [['readonly']]*2, + op_axes=[[0, -1], [-1, 1]]) + assert_equal(i.shape, (3, 5)) + +def test_iter_op_axes_errors(): + # Check that custom axes throws errors for bad inputs + + # Wrong number of items in op_axes + a = arange(6).reshape(2, 3) + assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + op_axes=[[0], [1], [0]]) + # Out of bounds items in op_axes + assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + op_axes=[[2, 1], [0, 1]]) + assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + op_axes=[[0, 1], [2, -1]]) + # Duplicate items in op_axes + assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + op_axes=[[0, 0], [0, 1]]) + assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + op_axes=[[0, 1], [1, 1]]) + + # Different sized arrays in op_axes + assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + op_axes=[[0, 1], [0, 1, 0]]) + + # Non-broadcastable dimensions in the result + assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + op_axes=[[0, 1], [1, 0]]) + +def test_iter_copy(): + # Check that copying the iterator works correctly + a = arange(24).reshape(2, 3, 4) + + # Simple iterator + i = nditer(a) + j = i.copy() + assert_equal([x[()] for x in i], [x[()] for x in j]) + + i.iterindex = 3 + j = i.copy() + assert_equal([x[()] for x in i], [x[()] for x in j]) + + # Buffered iterator + i = nditer(a, ['buffered', 'ranged'], order='F', buffersize=3) + j = i.copy() + assert_equal([x[()] for x in i], [x[()] for x in j]) + + i.iterindex = 3 + j = i.copy() + assert_equal([x[()] for x in i], [x[()] for x in j]) + + i.iterrange = (3, 9) + j = i.copy() + assert_equal([x[()] for x in i], [x[()] for x in j]) + + i.iterrange = (2, 18) + next(i) + next(i) + j = i.copy() + assert_equal([x[()] for x in i], [x[()] for x in j]) + + # Casting iterator + with nditer(a, ['buffered'], order='F', casting='unsafe', + op_dtypes='f8', buffersize=5) as i: + j = i.copy() + assert_equal([x[()] for x in j], a.ravel(order='F')) + + a = arange(24, dtype='cast->swap + + a = np.arange(10, dtype='f4').newbyteorder().byteswap() + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='same_kind', + op_dtypes=[np.dtype('f8').newbyteorder()], + buffersize=3) + with i: + for v in i: + v[...] *= 2 + + assert_equal(a, 2*np.arange(10, dtype='f4')) + + with suppress_warnings() as sup: + sup.filter(np.ComplexWarning) + + a = np.arange(10, dtype='f8').newbyteorder().byteswap() + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='unsafe', + op_dtypes=[np.dtype('c8').newbyteorder()], + buffersize=3) + with i: + for v in i: + v[...] *= 2 + + assert_equal(a, 2*np.arange(10, dtype='f8')) + +def test_iter_buffered_cast_byteswapped_complex(): + # Test that buffering can handle a cast which requires swap->cast->copy + + a = np.arange(10, dtype='c8').newbyteorder().byteswap() + a += 2j + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='same_kind', + op_dtypes=[np.dtype('c16')], + buffersize=3) + with i: + for v in i: + v[...] *= 2 + assert_equal(a, 2*np.arange(10, dtype='c8') + 4j) + + a = np.arange(10, dtype='c8') + a += 2j + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='same_kind', + op_dtypes=[np.dtype('c16').newbyteorder()], + buffersize=3) + with i: + for v in i: + v[...] *= 2 + assert_equal(a, 2*np.arange(10, dtype='c8') + 4j) + + a = np.arange(10, dtype=np.clongdouble).newbyteorder().byteswap() + a += 2j + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='same_kind', + op_dtypes=[np.dtype('c16')], + buffersize=3) + with i: + for v in i: + v[...] *= 2 + assert_equal(a, 2*np.arange(10, dtype=np.clongdouble) + 4j) + + a = np.arange(10, dtype=np.longdouble).newbyteorder().byteswap() + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='same_kind', + op_dtypes=[np.dtype('f4')], + buffersize=7) + with i: + for v in i: + v[...] *= 2 + assert_equal(a, 2*np.arange(10, dtype=np.longdouble)) + +def test_iter_buffered_cast_structured_type(): + # Tests buffering of structured types + + # simple -> struct type (duplicates the value) + sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')] + a = np.arange(3, dtype='f4') + 0.5 + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt) + vals = [np.array(x) for x in i] + assert_equal(vals[0]['a'], 0.5) + assert_equal(vals[0]['b'], 0) + assert_equal(vals[0]['c'], [[(0.5)]*3]*2) + assert_equal(vals[0]['d'], 0.5) + assert_equal(vals[1]['a'], 1.5) + assert_equal(vals[1]['b'], 1) + assert_equal(vals[1]['c'], [[(1.5)]*3]*2) + assert_equal(vals[1]['d'], 1.5) + assert_equal(vals[0].dtype, np.dtype(sdt)) + + # object -> struct type + sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')] + a = np.zeros((3,), dtype='O') + a[0] = (0.5, 0.5, [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]], 0.5) + a[1] = (1.5, 1.5, [[1.5, 1.5, 1.5], [1.5, 1.5, 1.5]], 1.5) + a[2] = (2.5, 2.5, [[2.5, 2.5, 2.5], [2.5, 2.5, 2.5]], 2.5) + if HAS_REFCOUNT: + rc = sys.getrefcount(a[0]) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt) + vals = [x.copy() for x in i] + assert_equal(vals[0]['a'], 0.5) + assert_equal(vals[0]['b'], 0) + assert_equal(vals[0]['c'], [[(0.5)]*3]*2) + assert_equal(vals[0]['d'], 0.5) + assert_equal(vals[1]['a'], 1.5) + assert_equal(vals[1]['b'], 1) + assert_equal(vals[1]['c'], [[(1.5)]*3]*2) + assert_equal(vals[1]['d'], 1.5) + assert_equal(vals[0].dtype, np.dtype(sdt)) + vals, i, x = [None]*3 + if HAS_REFCOUNT: + assert_equal(sys.getrefcount(a[0]), rc) + + # single-field struct type -> simple + sdt = [('a', 'f4')] + a = np.array([(5.5,), (8,)], dtype=sdt) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes='i4') + assert_equal([x_[()] for x_ in i], [5, 8]) + + # make sure multi-field struct type -> simple doesn't work + sdt = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] + a = np.array([(5.5, 7, 'test'), (8, 10, 11)], dtype=sdt) + assert_raises(ValueError, lambda: ( + nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes='i4'))) + + # struct type -> struct type (field-wise copy) + sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] + sdt2 = [('d', 'u2'), ('a', 'O'), ('b', 'f8')] + a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + assert_equal([np.array(x_) for x_ in i], + [np.array((1, 2, 3), dtype=sdt2), + np.array((4, 5, 6), dtype=sdt2)]) + + # make sure struct type -> struct type with different + # number of fields fails + sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] + sdt2 = [('b', 'O'), ('a', 'f8')] + a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1) + + assert_raises(ValueError, lambda : ( + nditer(a, ['buffered', 'refs_ok'], ['readwrite'], + casting='unsafe', + op_dtypes=sdt2))) + + +def test_iter_buffered_cast_subarray(): + # Tests buffering of subarrays + + # one element -> many (copies it to all) + sdt1 = [('a', 'f4')] + sdt2 = [('a', 'f8', (3, 2, 2))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + for x, count in zip(i, list(range(6))): + assert_(np.all(x['a'] == count)) + + # one element -> many -> back (copies it to all) + sdt1 = [('a', 'O', (1, 1))] + sdt2 = [('a', 'O', (3, 2, 2))] + a = np.zeros((6,), dtype=sdt1) + a['a'][:, 0, 0] = np.arange(6) + i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], + casting='unsafe', + op_dtypes=sdt2) + with i: + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_(np.all(x['a'] == count)) + x['a'][0] += 2 + count += 1 + assert_equal(a['a'], np.arange(6).reshape(6, 1, 1)+2) + + # many -> one element -> back (copies just element 0) + sdt1 = [('a', 'O', (3, 2, 2))] + sdt2 = [('a', 'O', (1,))] + a = np.zeros((6,), dtype=sdt1) + a['a'][:, 0, 0, 0] = np.arange(6) + i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], + casting='unsafe', + op_dtypes=sdt2) + with i: + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'], count) + x['a'] += 2 + count += 1 + assert_equal(a['a'], np.arange(6).reshape(6, 1, 1, 1)*np.ones((1, 3, 2, 2))+2) + + # many -> one element -> back (copies just element 0) + sdt1 = [('a', 'f8', (3, 2, 2))] + sdt2 = [('a', 'O', (1,))] + a = np.zeros((6,), dtype=sdt1) + a['a'][:, 0, 0, 0] = np.arange(6) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'], count) + count += 1 + + # many -> one element (copies just element 0) + sdt1 = [('a', 'O', (3, 2, 2))] + sdt2 = [('a', 'f4', (1,))] + a = np.zeros((6,), dtype=sdt1) + a['a'][:, 0, 0, 0] = np.arange(6) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'], count) + count += 1 + + # many -> matching shape (straightforward copy) + sdt1 = [('a', 'O', (3, 2, 2))] + sdt2 = [('a', 'f4', (3, 2, 2))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6*3*2*2).reshape(6, 3, 2, 2) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'], a[count]['a']) + count += 1 + + # vector -> smaller vector (truncates) + sdt1 = [('a', 'f8', (6,))] + sdt2 = [('a', 'f4', (2,))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6*6).reshape(6, 6) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'], a[count]['a'][:2]) + count += 1 + + # vector -> bigger vector (pads with zeros) + sdt1 = [('a', 'f8', (2,))] + sdt2 = [('a', 'f4', (6,))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6*2).reshape(6, 2) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'][:2], a[count]['a']) + assert_equal(x['a'][2:], [0, 0, 0, 0]) + count += 1 + + # vector -> matrix (broadcasts) + sdt1 = [('a', 'f8', (2,))] + sdt2 = [('a', 'f4', (2, 2))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6*2).reshape(6, 2) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'][0], a[count]['a']) + assert_equal(x['a'][1], a[count]['a']) + count += 1 + + # vector -> matrix (broadcasts and zero-pads) + sdt1 = [('a', 'f8', (2, 1))] + sdt2 = [('a', 'f4', (3, 2))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6*2).reshape(6, 2, 1) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'][:2, 0], a[count]['a'][:, 0]) + assert_equal(x['a'][:2, 1], a[count]['a'][:, 0]) + assert_equal(x['a'][2,:], [0, 0]) + count += 1 + + # matrix -> matrix (truncates and zero-pads) + sdt1 = [('a', 'f8', (2, 3))] + sdt2 = [('a', 'f4', (3, 2))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6*2*3).reshape(6, 2, 3) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'][:2, 0], a[count]['a'][:, 0]) + assert_equal(x['a'][:2, 1], a[count]['a'][:, 1]) + assert_equal(x['a'][2,:], [0, 0]) + count += 1 + +def test_iter_buffering_badwriteback(): + # Writing back from a buffer cannot combine elements + + # a needs write buffering, but had a broadcast dimension + a = np.arange(6).reshape(2, 3, 1) + b = np.arange(12).reshape(2, 3, 2) + assert_raises(ValueError, nditer, [a, b], + ['buffered', 'external_loop'], + [['readwrite'], ['writeonly']], + order='C') + + # But if a is readonly, it's fine + nditer([a, b], ['buffered', 'external_loop'], + [['readonly'], ['writeonly']], + order='C') + + # If a has just one element, it's fine too (constant 0 stride, a reduction) + a = np.arange(1).reshape(1, 1, 1) + nditer([a, b], ['buffered', 'external_loop', 'reduce_ok'], + [['readwrite'], ['writeonly']], + order='C') + + # check that it fails on other dimensions too + a = np.arange(6).reshape(1, 3, 2) + assert_raises(ValueError, nditer, [a, b], + ['buffered', 'external_loop'], + [['readwrite'], ['writeonly']], + order='C') + a = np.arange(4).reshape(2, 1, 2) + assert_raises(ValueError, nditer, [a, b], + ['buffered', 'external_loop'], + [['readwrite'], ['writeonly']], + order='C') + +def test_iter_buffering_string(): + # Safe casting disallows shrinking strings + a = np.array(['abc', 'a', 'abcd'], dtype=np.bytes_) + assert_equal(a.dtype, np.dtype('S4')) + assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'], + op_dtypes='S2') + i = nditer(a, ['buffered'], ['readonly'], op_dtypes='S6') + assert_equal(i[0], b'abc') + assert_equal(i[0].dtype, np.dtype('S6')) + + a = np.array(['abc', 'a', 'abcd'], dtype=np.unicode) + assert_equal(a.dtype, np.dtype('U4')) + assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'], + op_dtypes='U2') + i = nditer(a, ['buffered'], ['readonly'], op_dtypes='U6') + assert_equal(i[0], u'abc') + assert_equal(i[0].dtype, np.dtype('U6')) + +def test_iter_buffering_growinner(): + # Test that the inner loop grows when no buffering is needed + a = np.arange(30) + i = nditer(a, ['buffered', 'growinner', 'external_loop'], + buffersize=5) + # Should end up with just one inner loop here + assert_equal(i[0].size, a.size) + + +@pytest.mark.slow +def test_iter_buffered_reduce_reuse(): + # large enough array for all views, including negative strides. + a = np.arange(2*3**5)[3**5:3**5+1] + flags = ['buffered', 'delay_bufalloc', 'multi_index', 'reduce_ok', 'refs_ok'] + op_flags = [('readonly',), ('readwrite', 'allocate')] + op_axes_list = [[(0, 1, 2), (0, 1, -1)], [(0, 1, 2), (0, -1, -1)]] + # wrong dtype to force buffering + op_dtypes = [float, a.dtype] + + def get_params(): + for xs in range(-3**2, 3**2 + 1): + for ys in range(xs, 3**2 + 1): + for op_axes in op_axes_list: + # last stride is reduced and because of that not + # important for this test, as it is the inner stride. + strides = (xs * a.itemsize, ys * a.itemsize, a.itemsize) + arr = np.lib.stride_tricks.as_strided(a, (3, 3, 3), strides) + + for skip in [0, 1]: + yield arr, op_axes, skip + + for arr, op_axes, skip in get_params(): + nditer2 = np.nditer([arr.copy(), None], + op_axes=op_axes, flags=flags, op_flags=op_flags, + op_dtypes=op_dtypes) + with nditer2: + nditer2.operands[-1][...] = 0 + nditer2.reset() + nditer2.iterindex = skip + + for (a2_in, b2_in) in nditer2: + b2_in += a2_in.astype(np.int_) + + comp_res = nditer2.operands[-1] + + for bufsize in range(0, 3**3): + nditer1 = np.nditer([arr, None], + op_axes=op_axes, flags=flags, op_flags=op_flags, + buffersize=bufsize, op_dtypes=op_dtypes) + with nditer1: + nditer1.operands[-1][...] = 0 + nditer1.reset() + nditer1.iterindex = skip + + for (a1_in, b1_in) in nditer1: + b1_in += a1_in.astype(np.int_) + + res = nditer1.operands[-1] + assert_array_equal(res, comp_res) + + +def test_iter_no_broadcast(): + # Test that the no_broadcast flag works + a = np.arange(24).reshape(2, 3, 4) + b = np.arange(6).reshape(2, 3, 1) + c = np.arange(12).reshape(3, 4) + + nditer([a, b, c], [], + [['readonly', 'no_broadcast'], + ['readonly'], ['readonly']]) + assert_raises(ValueError, nditer, [a, b, c], [], + [['readonly'], ['readonly', 'no_broadcast'], ['readonly']]) + assert_raises(ValueError, nditer, [a, b, c], [], + [['readonly'], ['readonly'], ['readonly', 'no_broadcast']]) + + +class TestIterNested(object): + + def test_basic(self): + # Test nested iteration basic usage + a = arange(12).reshape(2, 3, 2) + + i, j = np.nested_iters(a, [[0], [1, 2]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) + + i, j = np.nested_iters(a, [[0, 1], [2]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) + + i, j = np.nested_iters(a, [[0, 2], [1]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) + + def test_reorder(self): + # Test nested iteration basic usage + a = arange(12).reshape(2, 3, 2) + + # In 'K' order (default), it gets reordered + i, j = np.nested_iters(a, [[0], [2, 1]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) + + i, j = np.nested_iters(a, [[1, 0], [2]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) + + i, j = np.nested_iters(a, [[2, 0], [1]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) + + # In 'C' order, it doesn't + i, j = np.nested_iters(a, [[0], [2, 1]], order='C') + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 2, 4, 1, 3, 5], [6, 8, 10, 7, 9, 11]]) + + i, j = np.nested_iters(a, [[1, 0], [2]], order='C') + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1], [6, 7], [2, 3], [8, 9], [4, 5], [10, 11]]) + + i, j = np.nested_iters(a, [[2, 0], [1]], order='C') + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 2, 4], [6, 8, 10], [1, 3, 5], [7, 9, 11]]) + + def test_flip_axes(self): + # Test nested iteration with negative axes + a = arange(12).reshape(2, 3, 2)[::-1, ::-1, ::-1] + + # In 'K' order (default), the axes all get flipped + i, j = np.nested_iters(a, [[0], [1, 2]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) + + i, j = np.nested_iters(a, [[0, 1], [2]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) + + i, j = np.nested_iters(a, [[0, 2], [1]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) + + # In 'C' order, flipping axes is disabled + i, j = np.nested_iters(a, [[0], [1, 2]], order='C') + vals = [list(j) for _ in i] + assert_equal(vals, [[11, 10, 9, 8, 7, 6], [5, 4, 3, 2, 1, 0]]) + + i, j = np.nested_iters(a, [[0, 1], [2]], order='C') + vals = [list(j) for _ in i] + assert_equal(vals, [[11, 10], [9, 8], [7, 6], [5, 4], [3, 2], [1, 0]]) + + i, j = np.nested_iters(a, [[0, 2], [1]], order='C') + vals = [list(j) for _ in i] + assert_equal(vals, [[11, 9, 7], [10, 8, 6], [5, 3, 1], [4, 2, 0]]) + + def test_broadcast(self): + # Test nested iteration with broadcasting + a = arange(2).reshape(2, 1) + b = arange(3).reshape(1, 3) + + i, j = np.nested_iters([a, b], [[0], [1]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[[0, 0], [0, 1], [0, 2]], [[1, 0], [1, 1], [1, 2]]]) + + i, j = np.nested_iters([a, b], [[1], [0]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[[0, 0], [1, 0]], [[0, 1], [1, 1]], [[0, 2], [1, 2]]]) + + def test_dtype_copy(self): + # Test nested iteration with a copy to change dtype + + # copy + a = arange(6, dtype='i4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + op_flags=['readonly', 'copy'], + op_dtypes='f8') + assert_equal(j[0].dtype, np.dtype('f8')) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1, 2], [3, 4, 5]]) + vals = None + + # writebackifcopy - using conext manager + a = arange(6, dtype='f4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + op_flags=['readwrite', 'updateifcopy'], + casting='same_kind', + op_dtypes='f8') + with i, j: + assert_equal(j[0].dtype, np.dtype('f8')) + for x in i: + for y in j: + y[...] += 1 + assert_equal(a, [[0, 1, 2], [3, 4, 5]]) + assert_equal(a, [[1, 2, 3], [4, 5, 6]]) + + # writebackifcopy - using close() + a = arange(6, dtype='f4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + op_flags=['readwrite', 'updateifcopy'], + casting='same_kind', + op_dtypes='f8') + assert_equal(j[0].dtype, np.dtype('f8')) + for x in i: + for y in j: + y[...] += 1 + assert_equal(a, [[0, 1, 2], [3, 4, 5]]) + i.close() + j.close() + assert_equal(a, [[1, 2, 3], [4, 5, 6]]) + + def test_dtype_buffered(self): + # Test nested iteration with buffering to change dtype + + a = arange(6, dtype='f4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + flags=['buffered'], + op_flags=['readwrite'], + casting='same_kind', + op_dtypes='f8') + assert_equal(j[0].dtype, np.dtype('f8')) + for x in i: + for y in j: + y[...] += 1 + assert_equal(a, [[1, 2, 3], [4, 5, 6]]) + + def test_0d(self): + a = np.arange(12).reshape(2, 3, 2) + i, j = np.nested_iters(a, [[], [1, 0, 2]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]]) + + i, j = np.nested_iters(a, [[1, 0, 2], []]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11]]) + + i, j, k = np.nested_iters(a, [[2, 0], [], [1]]) + vals = [] + for x in i: + for y in j: + vals.append([z for z in k]) + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) + + def test_iter_nested_iters_dtype_buffered(self): + # Test nested iteration with buffering to change dtype + + a = arange(6, dtype='f4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + flags=['buffered'], + op_flags=['readwrite'], + casting='same_kind', + op_dtypes='f8') + with i, j: + assert_equal(j[0].dtype, np.dtype('f8')) + for x in i: + for y in j: + y[...] += 1 + assert_equal(a, [[1, 2, 3], [4, 5, 6]]) + +def test_iter_reduction_error(): + + a = np.arange(6) + assert_raises(ValueError, nditer, [a, None], [], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[[0], [-1]]) + + a = np.arange(6).reshape(2, 3) + assert_raises(ValueError, nditer, [a, None], ['external_loop'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[[0, 1], [-1, -1]]) + +def test_iter_reduction(): + # Test doing reductions with the iterator + + a = np.arange(6) + i = nditer([a, None], ['reduce_ok'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[[0], [-1]]) + # Need to initialize the output operand to the addition unit + with i: + i.operands[1][...] = 0 + # Do the reduction + for x, y in i: + y[...] += x + # Since no axes were specified, should have allocated a scalar + assert_equal(i.operands[1].ndim, 0) + assert_equal(i.operands[1], np.sum(a)) + + a = np.arange(6).reshape(2, 3) + i = nditer([a, None], ['reduce_ok', 'external_loop'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[[0, 1], [-1, -1]]) + # Need to initialize the output operand to the addition unit + with i: + i.operands[1][...] = 0 + # Reduction shape/strides for the output + assert_equal(i[1].shape, (6,)) + assert_equal(i[1].strides, (0,)) + # Do the reduction + for x, y in i: + # Use a for loop instead of ``y[...] += x`` + # (equivalent to ``y[...] = y[...].copy() + x``), + # because y has zero strides we use for the reduction + for j in range(len(y)): + y[j] += x[j] + # Since no axes were specified, should have allocated a scalar + assert_equal(i.operands[1].ndim, 0) + assert_equal(i.operands[1], np.sum(a)) + + # This is a tricky reduction case for the buffering double loop + # to handle + a = np.ones((2, 3, 5)) + it1 = nditer([a, None], ['reduce_ok', 'external_loop'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[None, [0, -1, 1]]) + it2 = nditer([a, None], ['reduce_ok', 'external_loop', + 'buffered', 'delay_bufalloc'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[None, [0, -1, 1]], buffersize=10) + with it1, it2: + it1.operands[1].fill(0) + it2.operands[1].fill(0) + it2.reset() + for x in it1: + x[1][...] += x[0] + for x in it2: + x[1][...] += x[0] + assert_equal(it1.operands[1], it2.operands[1]) + assert_equal(it2.operands[1].sum(), a.size) + +def test_iter_buffering_reduction(): + # Test doing buffered reductions with the iterator + + a = np.arange(6) + b = np.array(0., dtype='f8').byteswap().newbyteorder() + i = nditer([a, b], ['reduce_ok', 'buffered'], + [['readonly'], ['readwrite', 'nbo']], + op_axes=[[0], [-1]]) + with i: + assert_equal(i[1].dtype, np.dtype('f8')) + assert_(i[1].dtype != b.dtype) + # Do the reduction + for x, y in i: + y[...] += x + # Since no axes were specified, should have allocated a scalar + assert_equal(b, np.sum(a)) + + a = np.arange(6).reshape(2, 3) + b = np.array([0, 0], dtype='f8').byteswap().newbyteorder() + i = nditer([a, b], ['reduce_ok', 'external_loop', 'buffered'], + [['readonly'], ['readwrite', 'nbo']], + op_axes=[[0, 1], [0, -1]]) + # Reduction shape/strides for the output + with i: + assert_equal(i[1].shape, (3,)) + assert_equal(i[1].strides, (0,)) + # Do the reduction + for x, y in i: + # Use a for loop instead of ``y[...] += x`` + # (equivalent to ``y[...] = y[...].copy() + x``), + # because y has zero strides we use for the reduction + for j in range(len(y)): + y[j] += x[j] + assert_equal(b, np.sum(a, axis=1)) + + # Iterator inner double loop was wrong on this one + p = np.arange(2) + 1 + it = np.nditer([p, None], + ['delay_bufalloc', 'reduce_ok', 'buffered', 'external_loop'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[[-1, 0], [-1, -1]], + itershape=(2, 2)) + with it: + it.operands[1].fill(0) + it.reset() + assert_equal(it[0], [1, 2, 1, 2]) + + # Iterator inner loop should take argument contiguity into account + x = np.ones((7, 13, 8), np.int8)[4:6,1:11:6,1:5].transpose(1, 2, 0) + x[...] = np.arange(x.size).reshape(x.shape) + y_base = np.arange(4*4, dtype=np.int8).reshape(4, 4) + y_base_copy = y_base.copy() + y = y_base[::2,:,None] + + it = np.nditer([y, x], + ['buffered', 'external_loop', 'reduce_ok'], + [['readwrite'], ['readonly']]) + with it: + for a, b in it: + a.fill(2) + + assert_equal(y_base[1::2], y_base_copy[1::2]) + assert_equal(y_base[::2], 2) + +def test_iter_buffering_reduction_reuse_reduce_loops(): + # There was a bug triggering reuse of the reduce loop inappropriately, + # which caused processing to happen in unnecessarily small chunks + # and overran the buffer. + + a = np.zeros((2, 7)) + b = np.zeros((1, 7)) + it = np.nditer([a, b], flags=['reduce_ok', 'external_loop', 'buffered'], + op_flags=[['readonly'], ['readwrite']], + buffersize=5) + + with it: + bufsizes = [x.shape[0] for x, y in it] + assert_equal(bufsizes, [5, 2, 5, 2]) + assert_equal(sum(bufsizes), a.size) + +def test_iter_writemasked_badinput(): + a = np.zeros((2, 3)) + b = np.zeros((3,)) + m = np.array([[True, True, False], [False, True, False]]) + m2 = np.array([True, True, False]) + m3 = np.array([0, 1, 1], dtype='u1') + mbad1 = np.array([0, 1, 1], dtype='i1') + mbad2 = np.array([0, 1, 1], dtype='f4') + + # Need an 'arraymask' if any operand is 'writemasked' + assert_raises(ValueError, nditer, [a, m], [], + [['readwrite', 'writemasked'], ['readonly']]) + + # A 'writemasked' operand must not be readonly + assert_raises(ValueError, nditer, [a, m], [], + [['readonly', 'writemasked'], ['readonly', 'arraymask']]) + + # 'writemasked' and 'arraymask' may not be used together + assert_raises(ValueError, nditer, [a, m], [], + [['readonly'], ['readwrite', 'arraymask', 'writemasked']]) + + # 'arraymask' may only be specified once + assert_raises(ValueError, nditer, [a, m, m2], [], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask'], + ['readonly', 'arraymask']]) + + # An 'arraymask' with nothing 'writemasked' also doesn't make sense + assert_raises(ValueError, nditer, [a, m], [], + [['readwrite'], ['readonly', 'arraymask']]) + + # A writemasked reduction requires a similarly smaller mask + assert_raises(ValueError, nditer, [a, b, m], ['reduce_ok'], + [['readonly'], + ['readwrite', 'writemasked'], + ['readonly', 'arraymask']]) + # But this should work with a smaller/equal mask to the reduction operand + np.nditer([a, b, m2], ['reduce_ok'], + [['readonly'], + ['readwrite', 'writemasked'], + ['readonly', 'arraymask']]) + # The arraymask itself cannot be a reduction + assert_raises(ValueError, nditer, [a, b, m2], ['reduce_ok'], + [['readonly'], + ['readwrite', 'writemasked'], + ['readwrite', 'arraymask']]) + + # A uint8 mask is ok too + np.nditer([a, m3], ['buffered'], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']], + op_dtypes=['f4', None], + casting='same_kind') + # An int8 mask isn't ok + assert_raises(TypeError, np.nditer, [a, mbad1], ['buffered'], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']], + op_dtypes=['f4', None], + casting='same_kind') + # A float32 mask isn't ok + assert_raises(TypeError, np.nditer, [a, mbad2], ['buffered'], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']], + op_dtypes=['f4', None], + casting='same_kind') + +def test_iter_writemasked(): + a = np.zeros((3,), dtype='f8') + msk = np.array([True, True, False]) + + # When buffering is unused, 'writemasked' effectively does nothing. + # It's up to the user of the iterator to obey the requested semantics. + it = np.nditer([a, msk], [], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']]) + with it: + for x, m in it: + x[...] = 1 + # Because we violated the semantics, all the values became 1 + assert_equal(a, [1, 1, 1]) + + # Even if buffering is enabled, we still may be accessing the array + # directly. + it = np.nditer([a, msk], ['buffered'], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']]) + with it: + for x, m in it: + x[...] = 2.5 + # Because we violated the semantics, all the values became 2.5 + assert_equal(a, [2.5, 2.5, 2.5]) + + # If buffering will definitely happening, for instance because of + # a cast, only the items selected by the mask will be copied back from + # the buffer. + it = np.nditer([a, msk], ['buffered'], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']], + op_dtypes=['i8', None], + casting='unsafe') + with it: + for x, m in it: + x[...] = 3 + # Even though we violated the semantics, only the selected values + # were copied back + assert_equal(a, [3, 3, 2.5]) + +def test_iter_non_writable_attribute_deletion(): + it = np.nditer(np.ones(2)) + attr = ["value", "shape", "operands", "itviews", "has_delayed_bufalloc", + "iterationneedsapi", "has_multi_index", "has_index", "dtypes", + "ndim", "nop", "itersize", "finished"] + + for s in attr: + assert_raises(AttributeError, delattr, it, s) + + +def test_iter_writable_attribute_deletion(): + it = np.nditer(np.ones(2)) + attr = [ "multi_index", "index", "iterrange", "iterindex"] + for s in attr: + assert_raises(AttributeError, delattr, it, s) + + +def test_iter_element_deletion(): + it = np.nditer(np.ones(3)) + try: + del it[1] + del it[1:2] + except TypeError: + pass + except Exception: + raise AssertionError + +def test_iter_allocated_array_dtypes(): + # If the dtype of an allocated output has a shape, the shape gets + # tacked onto the end of the result. + it = np.nditer(([1, 3, 20], None), op_dtypes=[None, ('i4', (2,))]) + for a, b in it: + b[0] = a - 1 + b[1] = a + 1 + assert_equal(it.operands[1], [[0, 2], [2, 4], [19, 21]]) + + # Make sure this works for scalars too + it = np.nditer((10, 2, None), op_dtypes=[None, None, ('i4', (2, 2))]) + for a, b, c in it: + c[0, 0] = a - b + c[0, 1] = a + b + c[1, 0] = a * b + c[1, 1] = a / b + assert_equal(it.operands[2], [[8, 12], [20, 5]]) + + +def test_0d_iter(): + # Basic test for iteration of 0-d arrays: + i = nditer([2, 3], ['multi_index'], [['readonly']]*2) + assert_equal(i.ndim, 0) + assert_equal(next(i), (2, 3)) + assert_equal(i.multi_index, ()) + assert_equal(i.iterindex, 0) + assert_raises(StopIteration, next, i) + # test reset: + i.reset() + assert_equal(next(i), (2, 3)) + assert_raises(StopIteration, next, i) + + # test forcing to 0-d + i = nditer(np.arange(5), ['multi_index'], [['readonly']], op_axes=[()]) + assert_equal(i.ndim, 0) + assert_equal(len(i), 1) + # note that itershape=(), still behaves like None due to the conversions + + # Test a more complex buffered casting case (same as another test above) + sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')] + a = np.array(0.5, dtype='f4') + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', op_dtypes=sdt) + vals = next(i) + assert_equal(vals['a'], 0.5) + assert_equal(vals['b'], 0) + assert_equal(vals['c'], [[(0.5)]*3]*2) + assert_equal(vals['d'], 0.5) + + +def test_iter_too_large(): + # The total size of the iterator must not exceed the maximum intp due + # to broadcasting. Dividing by 1024 will keep it small enough to + # give a legal array. + size = np.iinfo(np.intp).max // 1024 + arr = np.lib.stride_tricks.as_strided(np.zeros(1), (size,), (0,)) + assert_raises(ValueError, nditer, (arr, arr[:, None])) + # test the same for multiindex. That may get more interesting when + # removing 0 dimensional axis is allowed (since an iterator can grow then) + assert_raises(ValueError, nditer, + (arr, arr[:, None]), flags=['multi_index']) + + +def test_iter_too_large_with_multiindex(): + # When a multi index is being tracked, the error is delayed this + # checks the delayed error messages and getting below that by + # removing an axis. + base_size = 2**10 + num = 1 + while base_size**num < np.iinfo(np.intp).max: + num += 1 + + shape_template = [1, 1] * num + arrays = [] + for i in range(num): + shape = shape_template[:] + shape[i * 2] = 2**10 + arrays.append(np.empty(shape)) + arrays = tuple(arrays) + + # arrays are now too large to be broadcast. The different modes test + # different nditer functionality with or without GIL. + for mode in range(6): + with assert_raises(ValueError): + _multiarray_tests.test_nditer_too_large(arrays, -1, mode) + # but if we do nothing with the nditer, it can be constructed: + _multiarray_tests.test_nditer_too_large(arrays, -1, 7) + + # When an axis is removed, things should work again (half the time): + for i in range(num): + for mode in range(6): + # an axis with size 1024 is removed: + _multiarray_tests.test_nditer_too_large(arrays, i*2, mode) + # an axis with size 1 is removed: + with assert_raises(ValueError): + _multiarray_tests.test_nditer_too_large(arrays, i*2 + 1, mode) + +def test_writebacks(): + a = np.arange(6, dtype='f4') + au = a.byteswap().newbyteorder() + assert_(a.dtype.byteorder != au.dtype.byteorder) + it = nditer(au, [], [['readwrite', 'updateifcopy']], + casting='equiv', op_dtypes=[np.dtype('f4')]) + with it: + it.operands[0][:] = 100 + assert_equal(au, 100) + # do it again, this time raise an error, + it = nditer(au, [], [['readwrite', 'updateifcopy']], + casting='equiv', op_dtypes=[np.dtype('f4')]) + try: + with it: + assert_equal(au.flags.writeable, False) + it.operands[0][:] = 0 + raise ValueError('exit context manager on exception') + except: + pass + assert_equal(au, 0) + assert_equal(au.flags.writeable, True) + # cannot reuse i outside context manager + assert_raises(ValueError, getattr, it, 'operands') + + it = nditer(au, [], [['readwrite', 'updateifcopy']], + casting='equiv', op_dtypes=[np.dtype('f4')]) + with it: + x = it.operands[0] + x[:] = 6 + assert_(x.flags.writebackifcopy) + assert_equal(au, 6) + assert_(not x.flags.writebackifcopy) + x[:] = 123 # x.data still valid + assert_equal(au, 6) # but not connected to au + + it = nditer(au, [], + [['readwrite', 'updateifcopy']], + casting='equiv', op_dtypes=[np.dtype('f4')]) + # reentering works + with it: + with it: + for x in it: + x[...] = 123 + + it = nditer(au, [], + [['readwrite', 'updateifcopy']], + casting='equiv', op_dtypes=[np.dtype('f4')]) + # make sure exiting the inner context manager closes the iterator + with it: + with it: + for x in it: + x[...] = 123 + assert_raises(ValueError, getattr, it, 'operands') + # do not crash if original data array is decrefed + it = nditer(au, [], + [['readwrite', 'updateifcopy']], + casting='equiv', op_dtypes=[np.dtype('f4')]) + del au + with it: + for x in it: + x[...] = 123 + # make sure we cannot reenter the closed iterator + enter = it.__enter__ + assert_raises(RuntimeError, enter) + +def test_close_equivalent(): + ''' using a context amanger and using nditer.close are equivalent + ''' + def add_close(x, y, out=None): + addop = np.add + it = np.nditer([x, y, out], [], + [['readonly'], ['readonly'], ['writeonly','allocate']]) + for (a, b, c) in it: + addop(a, b, out=c) + ret = it.operands[2] + it.close() + return ret + + def add_context(x, y, out=None): + addop = np.add + it = np.nditer([x, y, out], [], + [['readonly'], ['readonly'], ['writeonly','allocate']]) + with it: + for (a, b, c) in it: + addop(a, b, out=c) + return it.operands[2] + z = add_close(range(5), range(5)) + assert_equal(z, range(0, 10, 2)) + z = add_context(range(5), range(5)) + assert_equal(z, range(0, 10, 2)) + +def test_close_raises(): + it = np.nditer(np.arange(3)) + assert_equal (next(it), 0) + it.close() + assert_raises(StopIteration, next, it) + assert_raises(ValueError, getattr, it, 'operands') + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_warn_noclose(): + a = np.arange(6, dtype='f4') + au = a.byteswap().newbyteorder() + with suppress_warnings() as sup: + sup.record(RuntimeWarning) + it = np.nditer(au, [], [['readwrite', 'updateifcopy']], + casting='equiv', op_dtypes=[np.dtype('f4')]) + del it + assert len(sup.log) == 1 diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_nditer.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_nditer.pyc new file mode 100644 index 0000000..e623a89 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_nditer.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_numeric.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_numeric.py new file mode 100644 index 0000000..3753472 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_numeric.py @@ -0,0 +1,2750 @@ +from __future__ import division, absolute_import, print_function + +import sys +import warnings +import itertools +import platform +import pytest +from decimal import Decimal + +import numpy as np +from numpy.core import umath +from numpy.random import rand, randint, randn +from numpy.testing import ( + assert_, assert_equal, assert_raises, assert_raises_regex, + assert_array_equal, assert_almost_equal, assert_array_almost_equal, + HAS_REFCOUNT + ) + + +class TestResize(object): + def test_copies(self): + A = np.array([[1, 2], [3, 4]]) + Ar1 = np.array([[1, 2, 3, 4], [1, 2, 3, 4]]) + assert_equal(np.resize(A, (2, 4)), Ar1) + + Ar2 = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) + assert_equal(np.resize(A, (4, 2)), Ar2) + + Ar3 = np.array([[1, 2, 3], [4, 1, 2], [3, 4, 1], [2, 3, 4]]) + assert_equal(np.resize(A, (4, 3)), Ar3) + + def test_zeroresize(self): + A = np.array([[1, 2], [3, 4]]) + Ar = np.resize(A, (0,)) + assert_array_equal(Ar, np.array([])) + assert_equal(A.dtype, Ar.dtype) + + Ar = np.resize(A, (0, 2)) + assert_equal(Ar.shape, (0, 2)) + + Ar = np.resize(A, (2, 0)) + assert_equal(Ar.shape, (2, 0)) + + def test_reshape_from_zero(self): + # See also gh-6740 + A = np.zeros(0, dtype=[('a', np.float32, 1)]) + Ar = np.resize(A, (2, 1)) + assert_array_equal(Ar, np.zeros((2, 1), Ar.dtype)) + assert_equal(A.dtype, Ar.dtype) + + +class TestNonarrayArgs(object): + # check that non-array arguments to functions wrap them in arrays + def test_choose(self): + choices = [[0, 1, 2], + [3, 4, 5], + [5, 6, 7]] + tgt = [5, 1, 5] + a = [2, 0, 1] + + out = np.choose(a, choices) + assert_equal(out, tgt) + + def test_clip(self): + arr = [-1, 5, 2, 3, 10, -4, -9] + out = np.clip(arr, 2, 7) + tgt = [2, 5, 2, 3, 7, 2, 2] + assert_equal(out, tgt) + + def test_compress(self): + arr = [[0, 1, 2, 3, 4], + [5, 6, 7, 8, 9]] + tgt = [[5, 6, 7, 8, 9]] + out = np.compress([0, 1], arr, axis=0) + assert_equal(out, tgt) + + def test_count_nonzero(self): + arr = [[0, 1, 7, 0, 0], + [3, 0, 0, 2, 19]] + tgt = np.array([2, 3]) + out = np.count_nonzero(arr, axis=1) + assert_equal(out, tgt) + + def test_cumproduct(self): + A = [[1, 2, 3], [4, 5, 6]] + assert_(np.all(np.cumproduct(A) == np.array([1, 2, 6, 24, 120, 720]))) + + def test_diagonal(self): + a = [[0, 1, 2, 3], + [4, 5, 6, 7], + [8, 9, 10, 11]] + out = np.diagonal(a) + tgt = [0, 5, 10] + + assert_equal(out, tgt) + + def test_mean(self): + A = [[1, 2, 3], [4, 5, 6]] + assert_(np.mean(A) == 3.5) + assert_(np.all(np.mean(A, 0) == np.array([2.5, 3.5, 4.5]))) + assert_(np.all(np.mean(A, 1) == np.array([2., 5.]))) + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_(np.isnan(np.mean([]))) + assert_(w[0].category is RuntimeWarning) + + def test_ptp(self): + a = [3, 4, 5, 10, -3, -5, 6.0] + assert_equal(np.ptp(a, axis=0), 15.0) + + def test_prod(self): + arr = [[1, 2, 3, 4], + [5, 6, 7, 9], + [10, 3, 4, 5]] + tgt = [24, 1890, 600] + + assert_equal(np.prod(arr, axis=-1), tgt) + + def test_ravel(self): + a = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]] + tgt = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] + assert_equal(np.ravel(a), tgt) + + def test_repeat(self): + a = [1, 2, 3] + tgt = [1, 1, 2, 2, 3, 3] + + out = np.repeat(a, 2) + assert_equal(out, tgt) + + def test_reshape(self): + arr = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]] + tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]] + assert_equal(np.reshape(arr, (2, 6)), tgt) + + def test_round(self): + arr = [1.56, 72.54, 6.35, 3.25] + tgt = [1.6, 72.5, 6.4, 3.2] + assert_equal(np.around(arr, decimals=1), tgt) + + def test_searchsorted(self): + arr = [-8, -5, -1, 3, 6, 10] + out = np.searchsorted(arr, 0) + assert_equal(out, 3) + + def test_size(self): + A = [[1, 2, 3], [4, 5, 6]] + assert_(np.size(A) == 6) + assert_(np.size(A, 0) == 2) + assert_(np.size(A, 1) == 3) + + def test_squeeze(self): + A = [[[1, 1, 1], [2, 2, 2], [3, 3, 3]]] + assert_(np.squeeze(A).shape == (3, 3)) + + def test_std(self): + A = [[1, 2, 3], [4, 5, 6]] + assert_almost_equal(np.std(A), 1.707825127659933) + assert_almost_equal(np.std(A, 0), np.array([1.5, 1.5, 1.5])) + assert_almost_equal(np.std(A, 1), np.array([0.81649658, 0.81649658])) + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_(np.isnan(np.std([]))) + assert_(w[0].category is RuntimeWarning) + + def test_swapaxes(self): + tgt = [[[0, 4], [2, 6]], [[1, 5], [3, 7]]] + a = [[[0, 1], [2, 3]], [[4, 5], [6, 7]]] + out = np.swapaxes(a, 0, 2) + assert_equal(out, tgt) + + def test_sum(self): + m = [[1, 2, 3], + [4, 5, 6], + [7, 8, 9]] + tgt = [[6], [15], [24]] + out = np.sum(m, axis=1, keepdims=True) + + assert_equal(tgt, out) + + def test_take(self): + tgt = [2, 3, 5] + indices = [1, 2, 4] + a = [1, 2, 3, 4, 5] + + out = np.take(a, indices) + assert_equal(out, tgt) + + def test_trace(self): + c = [[1, 2], [3, 4], [5, 6]] + assert_equal(np.trace(c), 5) + + def test_transpose(self): + arr = [[1, 2], [3, 4], [5, 6]] + tgt = [[1, 3, 5], [2, 4, 6]] + assert_equal(np.transpose(arr, (1, 0)), tgt) + + def test_var(self): + A = [[1, 2, 3], [4, 5, 6]] + assert_almost_equal(np.var(A), 2.9166666666666665) + assert_almost_equal(np.var(A, 0), np.array([2.25, 2.25, 2.25])) + assert_almost_equal(np.var(A, 1), np.array([0.66666667, 0.66666667])) + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_(np.isnan(np.var([]))) + assert_(w[0].category is RuntimeWarning) + + +class TestIsscalar(object): + def test_isscalar(self): + assert_(np.isscalar(3.1)) + assert_(np.isscalar(np.int16(12345))) + assert_(np.isscalar(False)) + assert_(np.isscalar('numpy')) + assert_(not np.isscalar([3.1])) + assert_(not np.isscalar(None)) + + # PEP 3141 + from fractions import Fraction + assert_(np.isscalar(Fraction(5, 17))) + from numbers import Number + assert_(np.isscalar(Number())) + + +class TestBoolScalar(object): + def test_logical(self): + f = np.False_ + t = np.True_ + s = "xyz" + assert_((t and s) is s) + assert_((f and s) is f) + + def test_bitwise_or(self): + f = np.False_ + t = np.True_ + assert_((t | t) is t) + assert_((f | t) is t) + assert_((t | f) is t) + assert_((f | f) is f) + + def test_bitwise_and(self): + f = np.False_ + t = np.True_ + assert_((t & t) is t) + assert_((f & t) is f) + assert_((t & f) is f) + assert_((f & f) is f) + + def test_bitwise_xor(self): + f = np.False_ + t = np.True_ + assert_((t ^ t) is f) + assert_((f ^ t) is t) + assert_((t ^ f) is t) + assert_((f ^ f) is f) + + +class TestBoolArray(object): + def setup(self): + # offset for simd tests + self.t = np.array([True] * 41, dtype=bool)[1::] + self.f = np.array([False] * 41, dtype=bool)[1::] + self.o = np.array([False] * 42, dtype=bool)[2::] + self.nm = self.f.copy() + self.im = self.t.copy() + self.nm[3] = True + self.nm[-2] = True + self.im[3] = False + self.im[-2] = False + + def test_all_any(self): + assert_(self.t.all()) + assert_(self.t.any()) + assert_(not self.f.all()) + assert_(not self.f.any()) + assert_(self.nm.any()) + assert_(self.im.any()) + assert_(not self.nm.all()) + assert_(not self.im.all()) + # check bad element in all positions + for i in range(256 - 7): + d = np.array([False] * 256, dtype=bool)[7::] + d[i] = True + assert_(np.any(d)) + e = np.array([True] * 256, dtype=bool)[7::] + e[i] = False + assert_(not np.all(e)) + assert_array_equal(e, ~d) + # big array test for blocked libc loops + for i in list(range(9, 6000, 507)) + [7764, 90021, -10]: + d = np.array([False] * 100043, dtype=bool) + d[i] = True + assert_(np.any(d), msg="%r" % i) + e = np.array([True] * 100043, dtype=bool) + e[i] = False + assert_(not np.all(e), msg="%r" % i) + + def test_logical_not_abs(self): + assert_array_equal(~self.t, self.f) + assert_array_equal(np.abs(~self.t), self.f) + assert_array_equal(np.abs(~self.f), self.t) + assert_array_equal(np.abs(self.f), self.f) + assert_array_equal(~np.abs(self.f), self.t) + assert_array_equal(~np.abs(self.t), self.f) + assert_array_equal(np.abs(~self.nm), self.im) + np.logical_not(self.t, out=self.o) + assert_array_equal(self.o, self.f) + np.abs(self.t, out=self.o) + assert_array_equal(self.o, self.t) + + def test_logical_and_or_xor(self): + assert_array_equal(self.t | self.t, self.t) + assert_array_equal(self.f | self.f, self.f) + assert_array_equal(self.t | self.f, self.t) + assert_array_equal(self.f | self.t, self.t) + np.logical_or(self.t, self.t, out=self.o) + assert_array_equal(self.o, self.t) + assert_array_equal(self.t & self.t, self.t) + assert_array_equal(self.f & self.f, self.f) + assert_array_equal(self.t & self.f, self.f) + assert_array_equal(self.f & self.t, self.f) + np.logical_and(self.t, self.t, out=self.o) + assert_array_equal(self.o, self.t) + assert_array_equal(self.t ^ self.t, self.f) + assert_array_equal(self.f ^ self.f, self.f) + assert_array_equal(self.t ^ self.f, self.t) + assert_array_equal(self.f ^ self.t, self.t) + np.logical_xor(self.t, self.t, out=self.o) + assert_array_equal(self.o, self.f) + + assert_array_equal(self.nm & self.t, self.nm) + assert_array_equal(self.im & self.f, False) + assert_array_equal(self.nm & True, self.nm) + assert_array_equal(self.im & False, self.f) + assert_array_equal(self.nm | self.t, self.t) + assert_array_equal(self.im | self.f, self.im) + assert_array_equal(self.nm | True, self.t) + assert_array_equal(self.im | False, self.im) + assert_array_equal(self.nm ^ self.t, self.im) + assert_array_equal(self.im ^ self.f, self.im) + assert_array_equal(self.nm ^ True, self.im) + assert_array_equal(self.im ^ False, self.im) + + +class TestBoolCmp(object): + def setup(self): + self.f = np.ones(256, dtype=np.float32) + self.ef = np.ones(self.f.size, dtype=bool) + self.d = np.ones(128, dtype=np.float64) + self.ed = np.ones(self.d.size, dtype=bool) + # generate values for all permutation of 256bit simd vectors + s = 0 + for i in range(32): + self.f[s:s+8] = [i & 2**x for x in range(8)] + self.ef[s:s+8] = [(i & 2**x) != 0 for x in range(8)] + s += 8 + s = 0 + for i in range(16): + self.d[s:s+4] = [i & 2**x for x in range(4)] + self.ed[s:s+4] = [(i & 2**x) != 0 for x in range(4)] + s += 4 + + self.nf = self.f.copy() + self.nd = self.d.copy() + self.nf[self.ef] = np.nan + self.nd[self.ed] = np.nan + + self.inff = self.f.copy() + self.infd = self.d.copy() + self.inff[::3][self.ef[::3]] = np.inf + self.infd[::3][self.ed[::3]] = np.inf + self.inff[1::3][self.ef[1::3]] = -np.inf + self.infd[1::3][self.ed[1::3]] = -np.inf + self.inff[2::3][self.ef[2::3]] = np.nan + self.infd[2::3][self.ed[2::3]] = np.nan + self.efnonan = self.ef.copy() + self.efnonan[2::3] = False + self.ednonan = self.ed.copy() + self.ednonan[2::3] = False + + self.signf = self.f.copy() + self.signd = self.d.copy() + self.signf[self.ef] *= -1. + self.signd[self.ed] *= -1. + self.signf[1::6][self.ef[1::6]] = -np.inf + self.signd[1::6][self.ed[1::6]] = -np.inf + self.signf[3::6][self.ef[3::6]] = -np.nan + self.signd[3::6][self.ed[3::6]] = -np.nan + self.signf[4::6][self.ef[4::6]] = -0. + self.signd[4::6][self.ed[4::6]] = -0. + + def test_float(self): + # offset for alignment test + for i in range(4): + assert_array_equal(self.f[i:] > 0, self.ef[i:]) + assert_array_equal(self.f[i:] - 1 >= 0, self.ef[i:]) + assert_array_equal(self.f[i:] == 0, ~self.ef[i:]) + assert_array_equal(-self.f[i:] < 0, self.ef[i:]) + assert_array_equal(-self.f[i:] + 1 <= 0, self.ef[i:]) + r = self.f[i:] != 0 + assert_array_equal(r, self.ef[i:]) + r2 = self.f[i:] != np.zeros_like(self.f[i:]) + r3 = 0 != self.f[i:] + assert_array_equal(r, r2) + assert_array_equal(r, r3) + # check bool == 0x1 + assert_array_equal(r.view(np.int8), r.astype(np.int8)) + assert_array_equal(r2.view(np.int8), r2.astype(np.int8)) + assert_array_equal(r3.view(np.int8), r3.astype(np.int8)) + + # isnan on amd64 takes the same code path + assert_array_equal(np.isnan(self.nf[i:]), self.ef[i:]) + assert_array_equal(np.isfinite(self.nf[i:]), ~self.ef[i:]) + assert_array_equal(np.isfinite(self.inff[i:]), ~self.ef[i:]) + assert_array_equal(np.isinf(self.inff[i:]), self.efnonan[i:]) + assert_array_equal(np.signbit(self.signf[i:]), self.ef[i:]) + + def test_double(self): + # offset for alignment test + for i in range(2): + assert_array_equal(self.d[i:] > 0, self.ed[i:]) + assert_array_equal(self.d[i:] - 1 >= 0, self.ed[i:]) + assert_array_equal(self.d[i:] == 0, ~self.ed[i:]) + assert_array_equal(-self.d[i:] < 0, self.ed[i:]) + assert_array_equal(-self.d[i:] + 1 <= 0, self.ed[i:]) + r = self.d[i:] != 0 + assert_array_equal(r, self.ed[i:]) + r2 = self.d[i:] != np.zeros_like(self.d[i:]) + r3 = 0 != self.d[i:] + assert_array_equal(r, r2) + assert_array_equal(r, r3) + # check bool == 0x1 + assert_array_equal(r.view(np.int8), r.astype(np.int8)) + assert_array_equal(r2.view(np.int8), r2.astype(np.int8)) + assert_array_equal(r3.view(np.int8), r3.astype(np.int8)) + + # isnan on amd64 takes the same code path + assert_array_equal(np.isnan(self.nd[i:]), self.ed[i:]) + assert_array_equal(np.isfinite(self.nd[i:]), ~self.ed[i:]) + assert_array_equal(np.isfinite(self.infd[i:]), ~self.ed[i:]) + assert_array_equal(np.isinf(self.infd[i:]), self.ednonan[i:]) + assert_array_equal(np.signbit(self.signd[i:]), self.ed[i:]) + + +class TestSeterr(object): + def test_default(self): + err = np.geterr() + assert_equal(err, + dict(divide='warn', + invalid='warn', + over='warn', + under='ignore') + ) + + def test_set(self): + with np.errstate(): + err = np.seterr() + old = np.seterr(divide='print') + assert_(err == old) + new = np.seterr() + assert_(new['divide'] == 'print') + np.seterr(over='raise') + assert_(np.geterr()['over'] == 'raise') + assert_(new['divide'] == 'print') + np.seterr(**old) + assert_(np.geterr() == old) + + @pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.") + def test_divide_err(self): + with np.errstate(divide='raise'): + with assert_raises(FloatingPointError): + np.array([1.]) / np.array([0.]) + + np.seterr(divide='ignore') + np.array([1.]) / np.array([0.]) + + def test_errobj(self): + olderrobj = np.geterrobj() + self.called = 0 + try: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + with np.errstate(divide='warn'): + np.seterrobj([20000, 1, None]) + np.array([1.]) / np.array([0.]) + assert_equal(len(w), 1) + + def log_err(*args): + self.called += 1 + extobj_err = args + assert_(len(extobj_err) == 2) + assert_("divide" in extobj_err[0]) + + with np.errstate(divide='ignore'): + np.seterrobj([20000, 3, log_err]) + np.array([1.]) / np.array([0.]) + assert_equal(self.called, 1) + + np.seterrobj(olderrobj) + with np.errstate(divide='ignore'): + np.divide(1., 0., extobj=[20000, 3, log_err]) + assert_equal(self.called, 2) + finally: + np.seterrobj(olderrobj) + del self.called + + def test_errobj_noerrmask(self): + # errmask = 0 has a special code path for the default + olderrobj = np.geterrobj() + try: + # set errobj to something non default + np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT, + umath.ERR_DEFAULT + 1, None]) + # call a ufunc + np.isnan(np.array([6])) + # same with the default, lots of times to get rid of possible + # pre-existing stack in the code + for i in range(10000): + np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT, umath.ERR_DEFAULT, + None]) + np.isnan(np.array([6])) + finally: + np.seterrobj(olderrobj) + + +class TestFloatExceptions(object): + def assert_raises_fpe(self, fpeerr, flop, x, y): + ftype = type(x) + try: + flop(x, y) + assert_(False, + "Type %s did not raise fpe error '%s'." % (ftype, fpeerr)) + except FloatingPointError as exc: + assert_(str(exc).find(fpeerr) >= 0, + "Type %s raised wrong fpe error '%s'." % (ftype, exc)) + + def assert_op_raises_fpe(self, fpeerr, flop, sc1, sc2): + # Check that fpe exception is raised. + # + # Given a floating operation `flop` and two scalar values, check that + # the operation raises the floating point exception specified by + # `fpeerr`. Tests all variants with 0-d array scalars as well. + + self.assert_raises_fpe(fpeerr, flop, sc1, sc2) + self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2) + self.assert_raises_fpe(fpeerr, flop, sc1, sc2[()]) + self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2[()]) + + def test_floating_exceptions(self): + # Test basic arithmetic function errors + with np.errstate(all='raise'): + # Test for all real and complex float types + for typecode in np.typecodes['AllFloat']: + ftype = np.obj2sctype(typecode) + if np.dtype(ftype).kind == 'f': + # Get some extreme values for the type + fi = np.finfo(ftype) + ft_tiny = fi.tiny + ft_max = fi.max + ft_eps = fi.eps + underflow = 'underflow' + divbyzero = 'divide by zero' + else: + # 'c', complex, corresponding real dtype + rtype = type(ftype(0).real) + fi = np.finfo(rtype) + ft_tiny = ftype(fi.tiny) + ft_max = ftype(fi.max) + ft_eps = ftype(fi.eps) + # The complex types raise different exceptions + underflow = '' + divbyzero = '' + overflow = 'overflow' + invalid = 'invalid' + + self.assert_raises_fpe(underflow, + lambda a, b: a/b, ft_tiny, ft_max) + self.assert_raises_fpe(underflow, + lambda a, b: a*b, ft_tiny, ft_tiny) + self.assert_raises_fpe(overflow, + lambda a, b: a*b, ft_max, ftype(2)) + self.assert_raises_fpe(overflow, + lambda a, b: a/b, ft_max, ftype(0.5)) + self.assert_raises_fpe(overflow, + lambda a, b: a+b, ft_max, ft_max*ft_eps) + self.assert_raises_fpe(overflow, + lambda a, b: a-b, -ft_max, ft_max*ft_eps) + self.assert_raises_fpe(overflow, + np.power, ftype(2), ftype(2**fi.nexp)) + self.assert_raises_fpe(divbyzero, + lambda a, b: a/b, ftype(1), ftype(0)) + self.assert_raises_fpe(invalid, + lambda a, b: a/b, ftype(np.inf), ftype(np.inf)) + self.assert_raises_fpe(invalid, + lambda a, b: a/b, ftype(0), ftype(0)) + self.assert_raises_fpe(invalid, + lambda a, b: a-b, ftype(np.inf), ftype(np.inf)) + self.assert_raises_fpe(invalid, + lambda a, b: a+b, ftype(np.inf), ftype(-np.inf)) + self.assert_raises_fpe(invalid, + lambda a, b: a*b, ftype(0), ftype(np.inf)) + + def test_warnings(self): + # test warning code path + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + with np.errstate(all="warn"): + np.divide(1, 0.) + assert_equal(len(w), 1) + assert_("divide by zero" in str(w[0].message)) + np.array(1e300) * np.array(1e300) + assert_equal(len(w), 2) + assert_("overflow" in str(w[-1].message)) + np.array(np.inf) - np.array(np.inf) + assert_equal(len(w), 3) + assert_("invalid value" in str(w[-1].message)) + np.array(1e-300) * np.array(1e-300) + assert_equal(len(w), 4) + assert_("underflow" in str(w[-1].message)) + + +class TestTypes(object): + def check_promotion_cases(self, promote_func): + # tests that the scalars get coerced correctly. + b = np.bool_(0) + i8, i16, i32, i64 = np.int8(0), np.int16(0), np.int32(0), np.int64(0) + u8, u16, u32, u64 = np.uint8(0), np.uint16(0), np.uint32(0), np.uint64(0) + f32, f64, fld = np.float32(0), np.float64(0), np.longdouble(0) + c64, c128, cld = np.complex64(0), np.complex128(0), np.clongdouble(0) + + # coercion within the same kind + assert_equal(promote_func(i8, i16), np.dtype(np.int16)) + assert_equal(promote_func(i32, i8), np.dtype(np.int32)) + assert_equal(promote_func(i16, i64), np.dtype(np.int64)) + assert_equal(promote_func(u8, u32), np.dtype(np.uint32)) + assert_equal(promote_func(f32, f64), np.dtype(np.float64)) + assert_equal(promote_func(fld, f32), np.dtype(np.longdouble)) + assert_equal(promote_func(f64, fld), np.dtype(np.longdouble)) + assert_equal(promote_func(c128, c64), np.dtype(np.complex128)) + assert_equal(promote_func(cld, c128), np.dtype(np.clongdouble)) + assert_equal(promote_func(c64, fld), np.dtype(np.clongdouble)) + + # coercion between kinds + assert_equal(promote_func(b, i32), np.dtype(np.int32)) + assert_equal(promote_func(b, u8), np.dtype(np.uint8)) + assert_equal(promote_func(i8, u8), np.dtype(np.int16)) + assert_equal(promote_func(u8, i32), np.dtype(np.int32)) + assert_equal(promote_func(i64, u32), np.dtype(np.int64)) + assert_equal(promote_func(u64, i32), np.dtype(np.float64)) + assert_equal(promote_func(i32, f32), np.dtype(np.float64)) + assert_equal(promote_func(i64, f32), np.dtype(np.float64)) + assert_equal(promote_func(f32, i16), np.dtype(np.float32)) + assert_equal(promote_func(f32, u32), np.dtype(np.float64)) + assert_equal(promote_func(f32, c64), np.dtype(np.complex64)) + assert_equal(promote_func(c128, f32), np.dtype(np.complex128)) + assert_equal(promote_func(cld, f64), np.dtype(np.clongdouble)) + + # coercion between scalars and 1-D arrays + assert_equal(promote_func(np.array([b]), i8), np.dtype(np.int8)) + assert_equal(promote_func(np.array([b]), u8), np.dtype(np.uint8)) + assert_equal(promote_func(np.array([b]), i32), np.dtype(np.int32)) + assert_equal(promote_func(np.array([b]), u32), np.dtype(np.uint32)) + assert_equal(promote_func(np.array([i8]), i64), np.dtype(np.int8)) + assert_equal(promote_func(u64, np.array([i32])), np.dtype(np.int32)) + assert_equal(promote_func(i64, np.array([u32])), np.dtype(np.uint32)) + assert_equal(promote_func(np.int32(-1), np.array([u64])), + np.dtype(np.float64)) + assert_equal(promote_func(f64, np.array([f32])), np.dtype(np.float32)) + assert_equal(promote_func(fld, np.array([f32])), np.dtype(np.float32)) + assert_equal(promote_func(np.array([f64]), fld), np.dtype(np.float64)) + assert_equal(promote_func(fld, np.array([c64])), + np.dtype(np.complex64)) + assert_equal(promote_func(c64, np.array([f64])), + np.dtype(np.complex128)) + assert_equal(promote_func(np.complex64(3j), np.array([f64])), + np.dtype(np.complex128)) + + # coercion between scalars and 1-D arrays, where + # the scalar has greater kind than the array + assert_equal(promote_func(np.array([b]), f64), np.dtype(np.float64)) + assert_equal(promote_func(np.array([b]), i64), np.dtype(np.int64)) + assert_equal(promote_func(np.array([b]), u64), np.dtype(np.uint64)) + assert_equal(promote_func(np.array([i8]), f64), np.dtype(np.float64)) + assert_equal(promote_func(np.array([u16]), f64), np.dtype(np.float64)) + + # uint and int are treated as the same "kind" for + # the purposes of array-scalar promotion. + assert_equal(promote_func(np.array([u16]), i32), np.dtype(np.uint16)) + + # float and complex are treated as the same "kind" for + # the purposes of array-scalar promotion, so that you can do + # (0j + float32array) to get a complex64 array instead of + # a complex128 array. + assert_equal(promote_func(np.array([f32]), c128), + np.dtype(np.complex64)) + + def test_coercion(self): + def res_type(a, b): + return np.add(a, b).dtype + + self.check_promotion_cases(res_type) + + # Use-case: float/complex scalar * bool/int8 array + # shouldn't narrow the float/complex type + for a in [np.array([True, False]), np.array([-3, 12], dtype=np.int8)]: + b = 1.234 * a + assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype) + b = np.longdouble(1.234) * a + assert_equal(b.dtype, np.dtype(np.longdouble), + "array type %s" % a.dtype) + b = np.float64(1.234) * a + assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype) + b = np.float32(1.234) * a + assert_equal(b.dtype, np.dtype('f4'), "array type %s" % a.dtype) + b = np.float16(1.234) * a + assert_equal(b.dtype, np.dtype('f2'), "array type %s" % a.dtype) + + b = 1.234j * a + assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype) + b = np.clongdouble(1.234j) * a + assert_equal(b.dtype, np.dtype(np.clongdouble), + "array type %s" % a.dtype) + b = np.complex128(1.234j) * a + assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype) + b = np.complex64(1.234j) * a + assert_equal(b.dtype, np.dtype('c8'), "array type %s" % a.dtype) + + # The following use-case is problematic, and to resolve its + # tricky side-effects requires more changes. + # + # Use-case: (1-t)*a, where 't' is a boolean array and 'a' is + # a float32, shouldn't promote to float64 + # + # a = np.array([1.0, 1.5], dtype=np.float32) + # t = np.array([True, False]) + # b = t*a + # assert_equal(b, [1.0, 0.0]) + # assert_equal(b.dtype, np.dtype('f4')) + # b = (1-t)*a + # assert_equal(b, [0.0, 1.5]) + # assert_equal(b.dtype, np.dtype('f4')) + # + # Probably ~t (bitwise negation) is more proper to use here, + # but this is arguably less intuitive to understand at a glance, and + # would fail if 't' is actually an integer array instead of boolean: + # + # b = (~t)*a + # assert_equal(b, [0.0, 1.5]) + # assert_equal(b.dtype, np.dtype('f4')) + + def test_result_type(self): + self.check_promotion_cases(np.result_type) + assert_(np.result_type(None) == np.dtype(None)) + + def test_promote_types_endian(self): + # promote_types should always return native-endian types + assert_equal(np.promote_types('i8', '>i8'), np.dtype('i8')) + + assert_equal(np.promote_types('>i8', '>U16'), np.dtype('U21')) + assert_equal(np.promote_types('U16', '>i8'), np.dtype('U21')) + assert_equal(np.promote_types('S5', '>U8'), np.dtype('U8')) + assert_equal(np.promote_types('U8', '>S5'), np.dtype('U8')) + assert_equal(np.promote_types('U8', '>U5'), np.dtype('U8')) + + assert_equal(np.promote_types('M8', '>M8'), np.dtype('M8')) + assert_equal(np.promote_types('m8', '>m8'), np.dtype('m8')) + + def test_promote_types_strings(self): + assert_equal(np.promote_types('bool', 'S'), np.dtype('S5')) + assert_equal(np.promote_types('b', 'S'), np.dtype('S4')) + assert_equal(np.promote_types('u1', 'S'), np.dtype('S3')) + assert_equal(np.promote_types('u2', 'S'), np.dtype('S5')) + assert_equal(np.promote_types('u4', 'S'), np.dtype('S10')) + assert_equal(np.promote_types('u8', 'S'), np.dtype('S20')) + assert_equal(np.promote_types('i1', 'S'), np.dtype('S4')) + assert_equal(np.promote_types('i2', 'S'), np.dtype('S6')) + assert_equal(np.promote_types('i4', 'S'), np.dtype('S11')) + assert_equal(np.promote_types('i8', 'S'), np.dtype('S21')) + assert_equal(np.promote_types('bool', 'U'), np.dtype('U5')) + assert_equal(np.promote_types('b', 'U'), np.dtype('U4')) + assert_equal(np.promote_types('u1', 'U'), np.dtype('U3')) + assert_equal(np.promote_types('u2', 'U'), np.dtype('U5')) + assert_equal(np.promote_types('u4', 'U'), np.dtype('U10')) + assert_equal(np.promote_types('u8', 'U'), np.dtype('U20')) + assert_equal(np.promote_types('i1', 'U'), np.dtype('U4')) + assert_equal(np.promote_types('i2', 'U'), np.dtype('U6')) + assert_equal(np.promote_types('i4', 'U'), np.dtype('U11')) + assert_equal(np.promote_types('i8', 'U'), np.dtype('U21')) + assert_equal(np.promote_types('bool', 'S1'), np.dtype('S5')) + assert_equal(np.promote_types('bool', 'S30'), np.dtype('S30')) + assert_equal(np.promote_types('b', 'S1'), np.dtype('S4')) + assert_equal(np.promote_types('b', 'S30'), np.dtype('S30')) + assert_equal(np.promote_types('u1', 'S1'), np.dtype('S3')) + assert_equal(np.promote_types('u1', 'S30'), np.dtype('S30')) + assert_equal(np.promote_types('u2', 'S1'), np.dtype('S5')) + assert_equal(np.promote_types('u2', 'S30'), np.dtype('S30')) + assert_equal(np.promote_types('u4', 'S1'), np.dtype('S10')) + assert_equal(np.promote_types('u4', 'S30'), np.dtype('S30')) + assert_equal(np.promote_types('u8', 'S1'), np.dtype('S20')) + assert_equal(np.promote_types('u8', 'S30'), np.dtype('S30')) + + def test_can_cast(self): + assert_(np.can_cast(np.int32, np.int64)) + assert_(np.can_cast(np.float64, complex)) + assert_(not np.can_cast(complex, float)) + + assert_(np.can_cast('i8', 'f8')) + assert_(not np.can_cast('i8', 'f4')) + assert_(np.can_cast('i4', 'S11')) + + assert_(np.can_cast('i8', 'i8', 'no')) + assert_(not np.can_cast('i8', 'no')) + + assert_(np.can_cast('i8', 'equiv')) + assert_(not np.can_cast('i8', 'equiv')) + + assert_(np.can_cast('i8', 'safe')) + assert_(not np.can_cast('i4', 'safe')) + + assert_(np.can_cast('i4', 'same_kind')) + assert_(not np.can_cast('u4', 'same_kind')) + + assert_(np.can_cast('u4', 'unsafe')) + + assert_(np.can_cast('bool', 'S5')) + assert_(not np.can_cast('bool', 'S4')) + + assert_(np.can_cast('b', 'S4')) + assert_(not np.can_cast('b', 'S3')) + + assert_(np.can_cast('u1', 'S3')) + assert_(not np.can_cast('u1', 'S2')) + assert_(np.can_cast('u2', 'S5')) + assert_(not np.can_cast('u2', 'S4')) + assert_(np.can_cast('u4', 'S10')) + assert_(not np.can_cast('u4', 'S9')) + assert_(np.can_cast('u8', 'S20')) + assert_(not np.can_cast('u8', 'S19')) + + assert_(np.can_cast('i1', 'S4')) + assert_(not np.can_cast('i1', 'S3')) + assert_(np.can_cast('i2', 'S6')) + assert_(not np.can_cast('i2', 'S5')) + assert_(np.can_cast('i4', 'S11')) + assert_(not np.can_cast('i4', 'S10')) + assert_(np.can_cast('i8', 'S21')) + assert_(not np.can_cast('i8', 'S20')) + + assert_(np.can_cast('bool', 'S5')) + assert_(not np.can_cast('bool', 'S4')) + + assert_(np.can_cast('b', 'U4')) + assert_(not np.can_cast('b', 'U3')) + + assert_(np.can_cast('u1', 'U3')) + assert_(not np.can_cast('u1', 'U2')) + assert_(np.can_cast('u2', 'U5')) + assert_(not np.can_cast('u2', 'U4')) + assert_(np.can_cast('u4', 'U10')) + assert_(not np.can_cast('u4', 'U9')) + assert_(np.can_cast('u8', 'U20')) + assert_(not np.can_cast('u8', 'U19')) + + assert_(np.can_cast('i1', 'U4')) + assert_(not np.can_cast('i1', 'U3')) + assert_(np.can_cast('i2', 'U6')) + assert_(not np.can_cast('i2', 'U5')) + assert_(np.can_cast('i4', 'U11')) + assert_(not np.can_cast('i4', 'U10')) + assert_(np.can_cast('i8', 'U21')) + assert_(not np.can_cast('i8', 'U20')) + + assert_raises(TypeError, np.can_cast, 'i4', None) + assert_raises(TypeError, np.can_cast, None, 'i4') + + # Also test keyword arguments + assert_(np.can_cast(from_=np.int32, to=np.int64)) + + def test_can_cast_values(self): + # gh-5917 + for dt in np.sctypes['int'] + np.sctypes['uint']: + ii = np.iinfo(dt) + assert_(np.can_cast(ii.min, dt)) + assert_(np.can_cast(ii.max, dt)) + assert_(not np.can_cast(ii.min - 1, dt)) + assert_(not np.can_cast(ii.max + 1, dt)) + + for dt in np.sctypes['float']: + fi = np.finfo(dt) + assert_(np.can_cast(fi.min, dt)) + assert_(np.can_cast(fi.max, dt)) + + +# Custom exception class to test exception propagation in fromiter +class NIterError(Exception): + pass + + +class TestFromiter(object): + def makegen(self): + for x in range(24): + yield x**2 + + def test_types(self): + ai32 = np.fromiter(self.makegen(), np.int32) + ai64 = np.fromiter(self.makegen(), np.int64) + af = np.fromiter(self.makegen(), float) + assert_(ai32.dtype == np.dtype(np.int32)) + assert_(ai64.dtype == np.dtype(np.int64)) + assert_(af.dtype == np.dtype(float)) + + def test_lengths(self): + expected = np.array(list(self.makegen())) + a = np.fromiter(self.makegen(), int) + a20 = np.fromiter(self.makegen(), int, 20) + assert_(len(a) == len(expected)) + assert_(len(a20) == 20) + assert_raises(ValueError, np.fromiter, + self.makegen(), int, len(expected) + 10) + + def test_values(self): + expected = np.array(list(self.makegen())) + a = np.fromiter(self.makegen(), int) + a20 = np.fromiter(self.makegen(), int, 20) + assert_(np.alltrue(a == expected, axis=0)) + assert_(np.alltrue(a20 == expected[:20], axis=0)) + + def load_data(self, n, eindex): + # Utility method for the issue 2592 tests. + # Raise an exception at the desired index in the iterator. + for e in range(n): + if e == eindex: + raise NIterError('error at index %s' % eindex) + yield e + + def test_2592(self): + # Test iteration exceptions are correctly raised. + count, eindex = 10, 5 + assert_raises(NIterError, np.fromiter, + self.load_data(count, eindex), dtype=int, count=count) + + def test_2592_edge(self): + # Test iter. exceptions, edge case (exception at end of iterator). + count = 10 + eindex = count-1 + assert_raises(NIterError, np.fromiter, + self.load_data(count, eindex), dtype=int, count=count) + + +class TestNonzero(object): + def test_nonzero_trivial(self): + assert_equal(np.count_nonzero(np.array([])), 0) + assert_equal(np.count_nonzero(np.array([], dtype='?')), 0) + assert_equal(np.nonzero(np.array([])), ([],)) + + assert_equal(np.count_nonzero(np.array(0)), 0) + assert_equal(np.count_nonzero(np.array(0, dtype='?')), 0) + assert_equal(np.nonzero(np.array(0)), ([],)) + assert_equal(np.count_nonzero(np.array(1)), 1) + assert_equal(np.count_nonzero(np.array(1, dtype='?')), 1) + assert_equal(np.nonzero(np.array(1)), ([0],)) + + def test_nonzero_onedim(self): + x = np.array([1, 0, 2, -1, 0, 0, 8]) + assert_equal(np.count_nonzero(x), 4) + assert_equal(np.count_nonzero(x), 4) + assert_equal(np.nonzero(x), ([0, 2, 3, 6],)) + + x = np.array([(1, 2), (0, 0), (1, 1), (-1, 3), (0, 7)], + dtype=[('a', 'i4'), ('b', 'i2')]) + assert_equal(np.count_nonzero(x['a']), 3) + assert_equal(np.count_nonzero(x['b']), 4) + assert_equal(np.nonzero(x['a']), ([0, 2, 3],)) + assert_equal(np.nonzero(x['b']), ([0, 2, 3, 4],)) + + def test_nonzero_twodim(self): + x = np.array([[0, 1, 0], [2, 0, 3]]) + assert_equal(np.count_nonzero(x), 3) + assert_equal(np.nonzero(x), ([0, 1, 1], [1, 0, 2])) + + x = np.eye(3) + assert_equal(np.count_nonzero(x), 3) + assert_equal(np.nonzero(x), ([0, 1, 2], [0, 1, 2])) + + x = np.array([[(0, 1), (0, 0), (1, 11)], + [(1, 1), (1, 0), (0, 0)], + [(0, 0), (1, 5), (0, 1)]], dtype=[('a', 'f4'), ('b', 'u1')]) + assert_equal(np.count_nonzero(x['a']), 4) + assert_equal(np.count_nonzero(x['b']), 5) + assert_equal(np.nonzero(x['a']), ([0, 1, 1, 2], [2, 0, 1, 1])) + assert_equal(np.nonzero(x['b']), ([0, 0, 1, 2, 2], [0, 2, 0, 1, 2])) + + assert_(not x['a'].T.flags.aligned) + assert_equal(np.count_nonzero(x['a'].T), 4) + assert_equal(np.count_nonzero(x['b'].T), 5) + assert_equal(np.nonzero(x['a'].T), ([0, 1, 1, 2], [1, 1, 2, 0])) + assert_equal(np.nonzero(x['b'].T), ([0, 0, 1, 2, 2], [0, 1, 2, 0, 2])) + + def test_sparse(self): + # test special sparse condition boolean code path + for i in range(20): + c = np.zeros(200, dtype=bool) + c[i::20] = True + assert_equal(np.nonzero(c)[0], np.arange(i, 200 + i, 20)) + + c = np.zeros(400, dtype=bool) + c[10 + i:20 + i] = True + c[20 + i*2] = True + assert_equal(np.nonzero(c)[0], + np.concatenate((np.arange(10 + i, 20 + i), [20 + i*2]))) + + def test_return_type(self): + class C(np.ndarray): + pass + + for view in (C, np.ndarray): + for nd in range(1, 4): + shape = tuple(range(2, 2+nd)) + x = np.arange(np.prod(shape)).reshape(shape).view(view) + for nzx in (np.nonzero(x), x.nonzero()): + for nzx_i in nzx: + assert_(type(nzx_i) is np.ndarray) + assert_(nzx_i.flags.writeable) + + def test_count_nonzero_axis(self): + # Basic check of functionality + m = np.array([[0, 1, 7, 0, 0], [3, 0, 0, 2, 19]]) + + expected = np.array([1, 1, 1, 1, 1]) + assert_equal(np.count_nonzero(m, axis=0), expected) + + expected = np.array([2, 3]) + assert_equal(np.count_nonzero(m, axis=1), expected) + + assert_raises(ValueError, np.count_nonzero, m, axis=(1, 1)) + assert_raises(TypeError, np.count_nonzero, m, axis='foo') + assert_raises(np.AxisError, np.count_nonzero, m, axis=3) + assert_raises(TypeError, np.count_nonzero, + m, axis=np.array([[1], [2]])) + + def test_count_nonzero_axis_all_dtypes(self): + # More thorough test that the axis argument is respected + # for all dtypes and responds correctly when presented with + # either integer or tuple arguments for axis + msg = "Mismatch for dtype: %s" + + def assert_equal_w_dt(a, b, err_msg): + assert_equal(a.dtype, b.dtype, err_msg=err_msg) + assert_equal(a, b, err_msg=err_msg) + + for dt in np.typecodes['All']: + err_msg = msg % (np.dtype(dt).name,) + + if dt != 'V': + if dt != 'M': + m = np.zeros((3, 3), dtype=dt) + n = np.ones(1, dtype=dt) + + m[0, 0] = n[0] + m[1, 0] = n[0] + + else: # np.zeros doesn't work for np.datetime64 + m = np.array(['1970-01-01'] * 9) + m = m.reshape((3, 3)) + + m[0, 0] = '1970-01-12' + m[1, 0] = '1970-01-12' + m = m.astype(dt) + + expected = np.array([2, 0, 0], dtype=np.intp) + assert_equal_w_dt(np.count_nonzero(m, axis=0), + expected, err_msg=err_msg) + + expected = np.array([1, 1, 0], dtype=np.intp) + assert_equal_w_dt(np.count_nonzero(m, axis=1), + expected, err_msg=err_msg) + + expected = np.array(2) + assert_equal(np.count_nonzero(m, axis=(0, 1)), + expected, err_msg=err_msg) + assert_equal(np.count_nonzero(m, axis=None), + expected, err_msg=err_msg) + assert_equal(np.count_nonzero(m), + expected, err_msg=err_msg) + + if dt == 'V': + # There are no 'nonzero' objects for np.void, so the testing + # setup is slightly different for this dtype + m = np.array([np.void(1)] * 6).reshape((2, 3)) + + expected = np.array([0, 0, 0], dtype=np.intp) + assert_equal_w_dt(np.count_nonzero(m, axis=0), + expected, err_msg=err_msg) + + expected = np.array([0, 0], dtype=np.intp) + assert_equal_w_dt(np.count_nonzero(m, axis=1), + expected, err_msg=err_msg) + + expected = np.array(0) + assert_equal(np.count_nonzero(m, axis=(0, 1)), + expected, err_msg=err_msg) + assert_equal(np.count_nonzero(m, axis=None), + expected, err_msg=err_msg) + assert_equal(np.count_nonzero(m), + expected, err_msg=err_msg) + + def test_count_nonzero_axis_consistent(self): + # Check that the axis behaviour for valid axes in + # non-special cases is consistent (and therefore + # correct) by checking it against an integer array + # that is then casted to the generic object dtype + from itertools import combinations, permutations + + axis = (0, 1, 2, 3) + size = (5, 5, 5, 5) + msg = "Mismatch for axis: %s" + + rng = np.random.RandomState(1234) + m = rng.randint(-100, 100, size=size) + n = m.astype(object) + + for length in range(len(axis)): + for combo in combinations(axis, length): + for perm in permutations(combo): + assert_equal( + np.count_nonzero(m, axis=perm), + np.count_nonzero(n, axis=perm), + err_msg=msg % (perm,)) + + def test_countnonzero_axis_empty(self): + a = np.array([[0, 0, 1], [1, 0, 1]]) + assert_equal(np.count_nonzero(a, axis=()), a.astype(bool)) + + def test_array_method(self): + # Tests that the array method + # call to nonzero works + m = np.array([[1, 0, 0], [4, 0, 6]]) + tgt = [[0, 1, 1], [0, 0, 2]] + + assert_equal(m.nonzero(), tgt) + + def test_nonzero_invalid_object(self): + # gh-9295 + a = np.array([np.array([1, 2]), 3]) + assert_raises(ValueError, np.nonzero, a) + + class BoolErrors: + def __bool__(self): + raise ValueError("Not allowed") + def __nonzero__(self): + raise ValueError("Not allowed") + + assert_raises(ValueError, np.nonzero, np.array([BoolErrors()])) + + +class TestIndex(object): + def test_boolean(self): + a = rand(3, 5, 8) + V = rand(5, 8) + g1 = randint(0, 5, size=15) + g2 = randint(0, 8, size=15) + V[g1, g2] = -V[g1, g2] + assert_((np.array([a[0][V > 0], a[1][V > 0], a[2][V > 0]]) == a[:, V > 0]).all()) + + def test_boolean_edgecase(self): + a = np.array([], dtype='int32') + b = np.array([], dtype='bool') + c = a[b] + assert_equal(c, []) + assert_equal(c.dtype, np.dtype('int32')) + + +class TestBinaryRepr(object): + def test_zero(self): + assert_equal(np.binary_repr(0), '0') + + def test_positive(self): + assert_equal(np.binary_repr(10), '1010') + assert_equal(np.binary_repr(12522), + '11000011101010') + assert_equal(np.binary_repr(10736848), + '101000111101010011010000') + + def test_negative(self): + assert_equal(np.binary_repr(-1), '-1') + assert_equal(np.binary_repr(-10), '-1010') + assert_equal(np.binary_repr(-12522), + '-11000011101010') + assert_equal(np.binary_repr(-10736848), + '-101000111101010011010000') + + def test_sufficient_width(self): + assert_equal(np.binary_repr(0, width=5), '00000') + assert_equal(np.binary_repr(10, width=7), '0001010') + assert_equal(np.binary_repr(-5, width=7), '1111011') + + def test_neg_width_boundaries(self): + # see gh-8670 + + # Ensure that the example in the issue does not + # break before proceeding to a more thorough test. + assert_equal(np.binary_repr(-128, width=8), '10000000') + + for width in range(1, 11): + num = -2**(width - 1) + exp = '1' + (width - 1) * '0' + assert_equal(np.binary_repr(num, width=width), exp) + + +class TestBaseRepr(object): + def test_base3(self): + assert_equal(np.base_repr(3**5, 3), '100000') + + def test_positive(self): + assert_equal(np.base_repr(12, 10), '12') + assert_equal(np.base_repr(12, 10, 4), '000012') + assert_equal(np.base_repr(12, 4), '30') + assert_equal(np.base_repr(3731624803700888, 36), '10QR0ROFCEW') + + def test_negative(self): + assert_equal(np.base_repr(-12, 10), '-12') + assert_equal(np.base_repr(-12, 10, 4), '-000012') + assert_equal(np.base_repr(-12, 4), '-30') + + def test_base_range(self): + with assert_raises(ValueError): + np.base_repr(1, 1) + with assert_raises(ValueError): + np.base_repr(1, 37) + + +class TestArrayComparisons(object): + def test_array_equal(self): + res = np.array_equal(np.array([1, 2]), np.array([1, 2])) + assert_(res) + assert_(type(res) is bool) + res = np.array_equal(np.array([1, 2]), np.array([1, 2, 3])) + assert_(not res) + assert_(type(res) is bool) + res = np.array_equal(np.array([1, 2]), np.array([3, 4])) + assert_(not res) + assert_(type(res) is bool) + res = np.array_equal(np.array([1, 2]), np.array([1, 3])) + assert_(not res) + assert_(type(res) is bool) + res = np.array_equal(np.array(['a'], dtype='S1'), np.array(['a'], dtype='S1')) + assert_(res) + assert_(type(res) is bool) + res = np.array_equal(np.array([('a', 1)], dtype='S1,u4'), + np.array([('a', 1)], dtype='S1,u4')) + assert_(res) + assert_(type(res) is bool) + + def test_none_compares_elementwise(self): + a = np.array([None, 1, None], dtype=object) + assert_equal(a == None, [True, False, True]) + assert_equal(a != None, [False, True, False]) + + a = np.ones(3) + assert_equal(a == None, [False, False, False]) + assert_equal(a != None, [True, True, True]) + + def test_array_equiv(self): + res = np.array_equiv(np.array([1, 2]), np.array([1, 2])) + assert_(res) + assert_(type(res) is bool) + res = np.array_equiv(np.array([1, 2]), np.array([1, 2, 3])) + assert_(not res) + assert_(type(res) is bool) + res = np.array_equiv(np.array([1, 2]), np.array([3, 4])) + assert_(not res) + assert_(type(res) is bool) + res = np.array_equiv(np.array([1, 2]), np.array([1, 3])) + assert_(not res) + assert_(type(res) is bool) + + res = np.array_equiv(np.array([1, 1]), np.array([1])) + assert_(res) + assert_(type(res) is bool) + res = np.array_equiv(np.array([1, 1]), np.array([[1], [1]])) + assert_(res) + assert_(type(res) is bool) + res = np.array_equiv(np.array([1, 2]), np.array([2])) + assert_(not res) + assert_(type(res) is bool) + res = np.array_equiv(np.array([1, 2]), np.array([[1], [2]])) + assert_(not res) + assert_(type(res) is bool) + res = np.array_equiv(np.array([1, 2]), np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])) + assert_(not res) + assert_(type(res) is bool) + + +def assert_array_strict_equal(x, y): + assert_array_equal(x, y) + # Check flags, 32 bit arches typically don't provide 16 byte alignment + if ((x.dtype.alignment <= 8 or + np.intp().dtype.itemsize != 4) and + sys.platform != 'win32'): + assert_(x.flags == y.flags) + else: + assert_(x.flags.owndata == y.flags.owndata) + assert_(x.flags.writeable == y.flags.writeable) + assert_(x.flags.c_contiguous == y.flags.c_contiguous) + assert_(x.flags.f_contiguous == y.flags.f_contiguous) + assert_(x.flags.writebackifcopy == y.flags.writebackifcopy) + # check endianness + assert_(x.dtype.isnative == y.dtype.isnative) + + +class TestClip(object): + def setup(self): + self.nr = 5 + self.nc = 3 + + def fastclip(self, a, m, M, out=None): + if out is None: + return a.clip(m, M) + else: + return a.clip(m, M, out) + + def clip(self, a, m, M, out=None): + # use slow-clip + selector = np.less(a, m) + 2*np.greater(a, M) + return selector.choose((a, m, M), out=out) + + # Handy functions + def _generate_data(self, n, m): + return randn(n, m) + + def _generate_data_complex(self, n, m): + return randn(n, m) + 1.j * rand(n, m) + + def _generate_flt_data(self, n, m): + return (randn(n, m)).astype(np.float32) + + def _neg_byteorder(self, a): + a = np.asarray(a) + if sys.byteorder == 'little': + a = a.astype(a.dtype.newbyteorder('>')) + else: + a = a.astype(a.dtype.newbyteorder('<')) + return a + + def _generate_non_native_data(self, n, m): + data = randn(n, m) + data = self._neg_byteorder(data) + assert_(not data.dtype.isnative) + return data + + def _generate_int_data(self, n, m): + return (10 * rand(n, m)).astype(np.int64) + + def _generate_int32_data(self, n, m): + return (10 * rand(n, m)).astype(np.int32) + + # Now the real test cases + def test_simple_double(self): + # Test native double input with scalar min/max. + a = self._generate_data(self.nr, self.nc) + m = 0.1 + M = 0.6 + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_simple_int(self): + # Test native int input with scalar min/max. + a = self._generate_int_data(self.nr, self.nc) + a = a.astype(int) + m = -2 + M = 4 + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_array_double(self): + # Test native double input with array min/max. + a = self._generate_data(self.nr, self.nc) + m = np.zeros(a.shape) + M = m + 0.5 + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_simple_nonnative(self): + # Test non native double input with scalar min/max. + # Test native double input with non native double scalar min/max. + a = self._generate_non_native_data(self.nr, self.nc) + m = -0.5 + M = 0.6 + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_equal(ac, act) + + # Test native double input with non native double scalar min/max. + a = self._generate_data(self.nr, self.nc) + m = -0.5 + M = self._neg_byteorder(0.6) + assert_(not M.dtype.isnative) + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_equal(ac, act) + + def test_simple_complex(self): + # Test native complex input with native double scalar min/max. + # Test native input with complex double scalar min/max. + a = 3 * self._generate_data_complex(self.nr, self.nc) + m = -0.5 + M = 1. + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + # Test native input with complex double scalar min/max. + a = 3 * self._generate_data(self.nr, self.nc) + m = -0.5 + 1.j + M = 1. + 2.j + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_clip_complex(self): + # Address Issue gh-5354 for clipping complex arrays + # Test native complex input without explicit min/max + # ie, either min=None or max=None + a = np.ones(10, dtype=complex) + m = a.min() + M = a.max() + am = self.fastclip(a, m, None) + aM = self.fastclip(a, None, M) + assert_array_strict_equal(am, a) + assert_array_strict_equal(aM, a) + + def test_clip_non_contig(self): + # Test clip for non contiguous native input and native scalar min/max. + a = self._generate_data(self.nr * 2, self.nc * 3) + a = a[::2, ::3] + assert_(not a.flags['F_CONTIGUOUS']) + assert_(not a.flags['C_CONTIGUOUS']) + ac = self.fastclip(a, -1.6, 1.7) + act = self.clip(a, -1.6, 1.7) + assert_array_strict_equal(ac, act) + + def test_simple_out(self): + # Test native double input with scalar min/max. + a = self._generate_data(self.nr, self.nc) + m = -0.5 + M = 0.6 + ac = np.zeros(a.shape) + act = np.zeros(a.shape) + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_simple_int32_inout(self): + # Test native int32 input with double min/max and int32 out. + a = self._generate_int32_data(self.nr, self.nc) + m = np.float64(0) + M = np.float64(2) + ac = np.zeros(a.shape, dtype=np.int32) + act = ac.copy() + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_simple_int64_out(self): + # Test native int32 input with int32 scalar min/max and int64 out. + a = self._generate_int32_data(self.nr, self.nc) + m = np.int32(-1) + M = np.int32(1) + ac = np.zeros(a.shape, dtype=np.int64) + act = ac.copy() + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_simple_int64_inout(self): + # Test native int32 input with double array min/max and int32 out. + a = self._generate_int32_data(self.nr, self.nc) + m = np.zeros(a.shape, np.float64) + M = np.float64(1) + ac = np.zeros(a.shape, dtype=np.int32) + act = ac.copy() + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_simple_int32_out(self): + # Test native double input with scalar min/max and int out. + a = self._generate_data(self.nr, self.nc) + m = -1.0 + M = 2.0 + ac = np.zeros(a.shape, dtype=np.int32) + act = ac.copy() + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_simple_inplace_01(self): + # Test native double input with array min/max in-place. + a = self._generate_data(self.nr, self.nc) + ac = a.copy() + m = np.zeros(a.shape) + M = 1.0 + self.fastclip(a, m, M, a) + self.clip(a, m, M, ac) + assert_array_strict_equal(a, ac) + + def test_simple_inplace_02(self): + # Test native double input with scalar min/max in-place. + a = self._generate_data(self.nr, self.nc) + ac = a.copy() + m = -0.5 + M = 0.6 + self.fastclip(a, m, M, a) + self.clip(ac, m, M, ac) + assert_array_strict_equal(a, ac) + + def test_noncontig_inplace(self): + # Test non contiguous double input with double scalar min/max in-place. + a = self._generate_data(self.nr * 2, self.nc * 3) + a = a[::2, ::3] + assert_(not a.flags['F_CONTIGUOUS']) + assert_(not a.flags['C_CONTIGUOUS']) + ac = a.copy() + m = -0.5 + M = 0.6 + self.fastclip(a, m, M, a) + self.clip(ac, m, M, ac) + assert_array_equal(a, ac) + + def test_type_cast_01(self): + # Test native double input with scalar min/max. + a = self._generate_data(self.nr, self.nc) + m = -0.5 + M = 0.6 + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_02(self): + # Test native int32 input with int32 scalar min/max. + a = self._generate_int_data(self.nr, self.nc) + a = a.astype(np.int32) + m = -2 + M = 4 + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_03(self): + # Test native int32 input with float64 scalar min/max. + a = self._generate_int32_data(self.nr, self.nc) + m = -2 + M = 4 + ac = self.fastclip(a, np.float64(m), np.float64(M)) + act = self.clip(a, np.float64(m), np.float64(M)) + assert_array_strict_equal(ac, act) + + def test_type_cast_04(self): + # Test native int32 input with float32 scalar min/max. + a = self._generate_int32_data(self.nr, self.nc) + m = np.float32(-2) + M = np.float32(4) + act = self.fastclip(a, m, M) + ac = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_05(self): + # Test native int32 with double arrays min/max. + a = self._generate_int_data(self.nr, self.nc) + m = -0.5 + M = 1. + ac = self.fastclip(a, m * np.zeros(a.shape), M) + act = self.clip(a, m * np.zeros(a.shape), M) + assert_array_strict_equal(ac, act) + + def test_type_cast_06(self): + # Test native with NON native scalar min/max. + a = self._generate_data(self.nr, self.nc) + m = 0.5 + m_s = self._neg_byteorder(m) + M = 1. + act = self.clip(a, m_s, M) + ac = self.fastclip(a, m_s, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_07(self): + # Test NON native with native array min/max. + a = self._generate_data(self.nr, self.nc) + m = -0.5 * np.ones(a.shape) + M = 1. + a_s = self._neg_byteorder(a) + assert_(not a_s.dtype.isnative) + act = a_s.clip(m, M) + ac = self.fastclip(a_s, m, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_08(self): + # Test NON native with native scalar min/max. + a = self._generate_data(self.nr, self.nc) + m = -0.5 + M = 1. + a_s = self._neg_byteorder(a) + assert_(not a_s.dtype.isnative) + ac = self.fastclip(a_s, m, M) + act = a_s.clip(m, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_09(self): + # Test native with NON native array min/max. + a = self._generate_data(self.nr, self.nc) + m = -0.5 * np.ones(a.shape) + M = 1. + m_s = self._neg_byteorder(m) + assert_(not m_s.dtype.isnative) + ac = self.fastclip(a, m_s, M) + act = self.clip(a, m_s, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_10(self): + # Test native int32 with float min/max and float out for output argument. + a = self._generate_int_data(self.nr, self.nc) + b = np.zeros(a.shape, dtype=np.float32) + m = np.float32(-0.5) + M = np.float32(1) + act = self.clip(a, m, M, out=b) + ac = self.fastclip(a, m, M, out=b) + assert_array_strict_equal(ac, act) + + def test_type_cast_11(self): + # Test non native with native scalar, min/max, out non native + a = self._generate_non_native_data(self.nr, self.nc) + b = a.copy() + b = b.astype(b.dtype.newbyteorder('>')) + bt = b.copy() + m = -0.5 + M = 1. + self.fastclip(a, m, M, out=b) + self.clip(a, m, M, out=bt) + assert_array_strict_equal(b, bt) + + def test_type_cast_12(self): + # Test native int32 input and min/max and float out + a = self._generate_int_data(self.nr, self.nc) + b = np.zeros(a.shape, dtype=np.float32) + m = np.int32(0) + M = np.int32(1) + act = self.clip(a, m, M, out=b) + ac = self.fastclip(a, m, M, out=b) + assert_array_strict_equal(ac, act) + + def test_clip_with_out_simple(self): + # Test native double input with scalar min/max + a = self._generate_data(self.nr, self.nc) + m = -0.5 + M = 0.6 + ac = np.zeros(a.shape) + act = np.zeros(a.shape) + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_clip_with_out_simple2(self): + # Test native int32 input with double min/max and int32 out + a = self._generate_int32_data(self.nr, self.nc) + m = np.float64(0) + M = np.float64(2) + ac = np.zeros(a.shape, dtype=np.int32) + act = ac.copy() + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_clip_with_out_simple_int32(self): + # Test native int32 input with int32 scalar min/max and int64 out + a = self._generate_int32_data(self.nr, self.nc) + m = np.int32(-1) + M = np.int32(1) + ac = np.zeros(a.shape, dtype=np.int64) + act = ac.copy() + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_clip_with_out_array_int32(self): + # Test native int32 input with double array min/max and int32 out + a = self._generate_int32_data(self.nr, self.nc) + m = np.zeros(a.shape, np.float64) + M = np.float64(1) + ac = np.zeros(a.shape, dtype=np.int32) + act = ac.copy() + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_clip_with_out_array_outint32(self): + # Test native double input with scalar min/max and int out + a = self._generate_data(self.nr, self.nc) + m = -1.0 + M = 2.0 + ac = np.zeros(a.shape, dtype=np.int32) + act = ac.copy() + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_clip_with_out_transposed(self): + # Test that the out argument works when tranposed + a = np.arange(16).reshape(4, 4) + out = np.empty_like(a).T + a.clip(4, 10, out=out) + expected = self.clip(a, 4, 10) + assert_array_equal(out, expected) + + def test_clip_with_out_memory_overlap(self): + # Test that the out argument works when it has memory overlap + a = np.arange(16).reshape(4, 4) + ac = a.copy() + a[:-1].clip(4, 10, out=a[1:]) + expected = self.clip(ac[:-1], 4, 10) + assert_array_equal(a[1:], expected) + + def test_clip_inplace_array(self): + # Test native double input with array min/max + a = self._generate_data(self.nr, self.nc) + ac = a.copy() + m = np.zeros(a.shape) + M = 1.0 + self.fastclip(a, m, M, a) + self.clip(a, m, M, ac) + assert_array_strict_equal(a, ac) + + def test_clip_inplace_simple(self): + # Test native double input with scalar min/max + a = self._generate_data(self.nr, self.nc) + ac = a.copy() + m = -0.5 + M = 0.6 + self.fastclip(a, m, M, a) + self.clip(a, m, M, ac) + assert_array_strict_equal(a, ac) + + def test_clip_func_takes_out(self): + # Ensure that the clip() function takes an out=argument. + a = self._generate_data(self.nr, self.nc) + ac = a.copy() + m = -0.5 + M = 0.6 + a2 = np.clip(a, m, M, out=a) + self.clip(a, m, M, ac) + assert_array_strict_equal(a2, ac) + assert_(a2 is a) + + def test_clip_nan(self): + d = np.arange(7.) + assert_equal(d.clip(min=np.nan), d) + assert_equal(d.clip(max=np.nan), d) + assert_equal(d.clip(min=np.nan, max=np.nan), d) + assert_equal(d.clip(min=-2, max=np.nan), d) + assert_equal(d.clip(min=np.nan, max=10), d) + + +class TestAllclose(object): + rtol = 1e-5 + atol = 1e-8 + + def setup(self): + self.olderr = np.seterr(invalid='ignore') + + def teardown(self): + np.seterr(**self.olderr) + + def tst_allclose(self, x, y): + assert_(np.allclose(x, y), "%s and %s not close" % (x, y)) + + def tst_not_allclose(self, x, y): + assert_(not np.allclose(x, y), "%s and %s shouldn't be close" % (x, y)) + + def test_ip_allclose(self): + # Parametric test factory. + arr = np.array([100, 1000]) + aran = np.arange(125).reshape((5, 5, 5)) + + atol = self.atol + rtol = self.rtol + + data = [([1, 0], [1, 0]), + ([atol], [0]), + ([1], [1+rtol+atol]), + (arr, arr + arr*rtol), + (arr, arr + arr*rtol + atol*2), + (aran, aran + aran*rtol), + (np.inf, np.inf), + (np.inf, [np.inf])] + + for (x, y) in data: + self.tst_allclose(x, y) + + def test_ip_not_allclose(self): + # Parametric test factory. + aran = np.arange(125).reshape((5, 5, 5)) + + atol = self.atol + rtol = self.rtol + + data = [([np.inf, 0], [1, np.inf]), + ([np.inf, 0], [1, 0]), + ([np.inf, np.inf], [1, np.inf]), + ([np.inf, np.inf], [1, 0]), + ([-np.inf, 0], [np.inf, 0]), + ([np.nan, 0], [np.nan, 0]), + ([atol*2], [0]), + ([1], [1+rtol+atol*2]), + (aran, aran + aran*atol + atol*2), + (np.array([np.inf, 1]), np.array([0, np.inf]))] + + for (x, y) in data: + self.tst_not_allclose(x, y) + + def test_no_parameter_modification(self): + x = np.array([np.inf, 1]) + y = np.array([0, np.inf]) + np.allclose(x, y) + assert_array_equal(x, np.array([np.inf, 1])) + assert_array_equal(y, np.array([0, np.inf])) + + def test_min_int(self): + # Could make problems because of abs(min_int) == min_int + min_int = np.iinfo(np.int_).min + a = np.array([min_int], dtype=np.int_) + assert_(np.allclose(a, a)) + + def test_equalnan(self): + x = np.array([1.0, np.nan]) + assert_(np.allclose(x, x, equal_nan=True)) + + def test_return_class_is_ndarray(self): + # Issue gh-6475 + # Check that allclose does not preserve subtypes + class Foo(np.ndarray): + def __new__(cls, *args, **kwargs): + return np.array(*args, **kwargs).view(cls) + + a = Foo([1]) + assert_(type(np.allclose(a, a)) is bool) + + +class TestIsclose(object): + rtol = 1e-5 + atol = 1e-8 + + def setup(self): + atol = self.atol + rtol = self.rtol + arr = np.array([100, 1000]) + aran = np.arange(125).reshape((5, 5, 5)) + + self.all_close_tests = [ + ([1, 0], [1, 0]), + ([atol], [0]), + ([1], [1 + rtol + atol]), + (arr, arr + arr*rtol), + (arr, arr + arr*rtol + atol), + (aran, aran + aran*rtol), + (np.inf, np.inf), + (np.inf, [np.inf]), + ([np.inf, -np.inf], [np.inf, -np.inf]), + ] + self.none_close_tests = [ + ([np.inf, 0], [1, np.inf]), + ([np.inf, -np.inf], [1, 0]), + ([np.inf, np.inf], [1, -np.inf]), + ([np.inf, np.inf], [1, 0]), + ([np.nan, 0], [np.nan, -np.inf]), + ([atol*2], [0]), + ([1], [1 + rtol + atol*2]), + (aran, aran + rtol*1.1*aran + atol*1.1), + (np.array([np.inf, 1]), np.array([0, np.inf])), + ] + self.some_close_tests = [ + ([np.inf, 0], [np.inf, atol*2]), + ([atol, 1, 1e6*(1 + 2*rtol) + atol], [0, np.nan, 1e6]), + (np.arange(3), [0, 1, 2.1]), + (np.nan, [np.nan, np.nan, np.nan]), + ([0], [atol, np.inf, -np.inf, np.nan]), + (0, [atol, np.inf, -np.inf, np.nan]), + ] + self.some_close_results = [ + [True, False], + [True, False, False], + [True, True, False], + [False, False, False], + [True, False, False, False], + [True, False, False, False], + ] + + def test_ip_isclose(self): + self.setup() + tests = self.some_close_tests + results = self.some_close_results + for (x, y), result in zip(tests, results): + assert_array_equal(np.isclose(x, y), result) + + def tst_all_isclose(self, x, y): + assert_(np.all(np.isclose(x, y)), "%s and %s not close" % (x, y)) + + def tst_none_isclose(self, x, y): + msg = "%s and %s shouldn't be close" + assert_(not np.any(np.isclose(x, y)), msg % (x, y)) + + def tst_isclose_allclose(self, x, y): + msg = "isclose.all() and allclose aren't same for %s and %s" + msg2 = "isclose and allclose aren't same for %s and %s" + if np.isscalar(x) and np.isscalar(y): + assert_(np.isclose(x, y) == np.allclose(x, y), msg=msg2 % (x, y)) + else: + assert_array_equal(np.isclose(x, y).all(), np.allclose(x, y), msg % (x, y)) + + def test_ip_all_isclose(self): + self.setup() + for (x, y) in self.all_close_tests: + self.tst_all_isclose(x, y) + + def test_ip_none_isclose(self): + self.setup() + for (x, y) in self.none_close_tests: + self.tst_none_isclose(x, y) + + def test_ip_isclose_allclose(self): + self.setup() + tests = (self.all_close_tests + self.none_close_tests + + self.some_close_tests) + for (x, y) in tests: + self.tst_isclose_allclose(x, y) + + def test_equal_nan(self): + assert_array_equal(np.isclose(np.nan, np.nan, equal_nan=True), [True]) + arr = np.array([1.0, np.nan]) + assert_array_equal(np.isclose(arr, arr, equal_nan=True), [True, True]) + + def test_masked_arrays(self): + # Make sure to test the output type when arguments are interchanged. + + x = np.ma.masked_where([True, True, False], np.arange(3)) + assert_(type(x) is type(np.isclose(2, x))) + assert_(type(x) is type(np.isclose(x, 2))) + + x = np.ma.masked_where([True, True, False], [np.nan, np.inf, np.nan]) + assert_(type(x) is type(np.isclose(np.inf, x))) + assert_(type(x) is type(np.isclose(x, np.inf))) + + x = np.ma.masked_where([True, True, False], [np.nan, np.nan, np.nan]) + y = np.isclose(np.nan, x, equal_nan=True) + assert_(type(x) is type(y)) + # Ensure that the mask isn't modified... + assert_array_equal([True, True, False], y.mask) + y = np.isclose(x, np.nan, equal_nan=True) + assert_(type(x) is type(y)) + # Ensure that the mask isn't modified... + assert_array_equal([True, True, False], y.mask) + + x = np.ma.masked_where([True, True, False], [np.nan, np.nan, np.nan]) + y = np.isclose(x, x, equal_nan=True) + assert_(type(x) is type(y)) + # Ensure that the mask isn't modified... + assert_array_equal([True, True, False], y.mask) + + def test_scalar_return(self): + assert_(np.isscalar(np.isclose(1, 1))) + + def test_no_parameter_modification(self): + x = np.array([np.inf, 1]) + y = np.array([0, np.inf]) + np.isclose(x, y) + assert_array_equal(x, np.array([np.inf, 1])) + assert_array_equal(y, np.array([0, np.inf])) + + def test_non_finite_scalar(self): + # GH7014, when two scalars are compared the output should also be a + # scalar + assert_(np.isclose(np.inf, -np.inf) is np.False_) + assert_(np.isclose(0, np.inf) is np.False_) + assert_(type(np.isclose(0, np.inf)) is np.bool_) + + +class TestStdVar(object): + def setup(self): + self.A = np.array([1, -1, 1, -1]) + self.real_var = 1 + + def test_basic(self): + assert_almost_equal(np.var(self.A), self.real_var) + assert_almost_equal(np.std(self.A)**2, self.real_var) + + def test_scalars(self): + assert_equal(np.var(1), 0) + assert_equal(np.std(1), 0) + + def test_ddof1(self): + assert_almost_equal(np.var(self.A, ddof=1), + self.real_var*len(self.A)/float(len(self.A)-1)) + assert_almost_equal(np.std(self.A, ddof=1)**2, + self.real_var*len(self.A)/float(len(self.A)-1)) + + def test_ddof2(self): + assert_almost_equal(np.var(self.A, ddof=2), + self.real_var*len(self.A)/float(len(self.A)-2)) + assert_almost_equal(np.std(self.A, ddof=2)**2, + self.real_var*len(self.A)/float(len(self.A)-2)) + + def test_out_scalar(self): + d = np.arange(10) + out = np.array(0.) + r = np.std(d, out=out) + assert_(r is out) + assert_array_equal(r, out) + r = np.var(d, out=out) + assert_(r is out) + assert_array_equal(r, out) + r = np.mean(d, out=out) + assert_(r is out) + assert_array_equal(r, out) + + +class TestStdVarComplex(object): + def test_basic(self): + A = np.array([1, 1.j, -1, -1.j]) + real_var = 1 + assert_almost_equal(np.var(A), real_var) + assert_almost_equal(np.std(A)**2, real_var) + + def test_scalars(self): + assert_equal(np.var(1j), 0) + assert_equal(np.std(1j), 0) + + +class TestCreationFuncs(object): + # Test ones, zeros, empty and full. + + def setup(self): + dtypes = {np.dtype(tp) for tp in itertools.chain(*np.sctypes.values())} + # void, bytes, str + variable_sized = {tp for tp in dtypes if tp.str.endswith('0')} + self.dtypes = sorted(dtypes - variable_sized | + {np.dtype(tp.str.replace("0", str(i))) + for tp in variable_sized for i in range(1, 10)}, + key=lambda dtype: dtype.str) + self.orders = {'C': 'c_contiguous', 'F': 'f_contiguous'} + self.ndims = 10 + + def check_function(self, func, fill_value=None): + par = ((0, 1, 2), + range(self.ndims), + self.orders, + self.dtypes) + fill_kwarg = {} + if fill_value is not None: + fill_kwarg = {'fill_value': fill_value} + + for size, ndims, order, dtype in itertools.product(*par): + shape = ndims * [size] + + # do not fill void type + if fill_kwarg and dtype.str.startswith('|V'): + continue + + arr = func(shape, order=order, dtype=dtype, + **fill_kwarg) + + assert_equal(arr.dtype, dtype) + assert_(getattr(arr.flags, self.orders[order])) + + if fill_value is not None: + if dtype.str.startswith('|S'): + val = str(fill_value) + else: + val = fill_value + assert_equal(arr, dtype.type(val)) + + def test_zeros(self): + self.check_function(np.zeros) + + def test_ones(self): + self.check_function(np.zeros) + + def test_empty(self): + self.check_function(np.empty) + + def test_full(self): + self.check_function(np.full, 0) + self.check_function(np.full, 1) + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_for_reference_leak(self): + # Make sure we have an object for reference + dim = 1 + beg = sys.getrefcount(dim) + np.zeros([dim]*10) + assert_(sys.getrefcount(dim) == beg) + np.ones([dim]*10) + assert_(sys.getrefcount(dim) == beg) + np.empty([dim]*10) + assert_(sys.getrefcount(dim) == beg) + np.full([dim]*10, 0) + assert_(sys.getrefcount(dim) == beg) + + +class TestLikeFuncs(object): + '''Test ones_like, zeros_like, empty_like and full_like''' + + def setup(self): + self.data = [ + # Array scalars + (np.array(3.), None), + (np.array(3), 'f8'), + # 1D arrays + (np.arange(6, dtype='f4'), None), + (np.arange(6), 'c16'), + # 2D C-layout arrays + (np.arange(6).reshape(2, 3), None), + (np.arange(6).reshape(3, 2), 'i1'), + # 2D F-layout arrays + (np.arange(6).reshape((2, 3), order='F'), None), + (np.arange(6).reshape((3, 2), order='F'), 'i1'), + # 3D C-layout arrays + (np.arange(24).reshape(2, 3, 4), None), + (np.arange(24).reshape(4, 3, 2), 'f4'), + # 3D F-layout arrays + (np.arange(24).reshape((2, 3, 4), order='F'), None), + (np.arange(24).reshape((4, 3, 2), order='F'), 'f4'), + # 3D non-C/F-layout arrays + (np.arange(24).reshape(2, 3, 4).swapaxes(0, 1), None), + (np.arange(24).reshape(4, 3, 2).swapaxes(0, 1), '?'), + ] + + def compare_array_value(self, dz, value, fill_value): + if value is not None: + if fill_value: + try: + z = dz.dtype.type(value) + except OverflowError: + pass + else: + assert_(np.all(dz == z)) + else: + assert_(np.all(dz == value)) + + def check_like_function(self, like_function, value, fill_value=False): + if fill_value: + fill_kwarg = {'fill_value': value} + else: + fill_kwarg = {} + for d, dtype in self.data: + # default (K) order, dtype + dz = like_function(d, dtype=dtype, **fill_kwarg) + assert_equal(dz.shape, d.shape) + assert_equal(np.array(dz.strides)*d.dtype.itemsize, + np.array(d.strides)*dz.dtype.itemsize) + assert_equal(d.flags.c_contiguous, dz.flags.c_contiguous) + assert_equal(d.flags.f_contiguous, dz.flags.f_contiguous) + if dtype is None: + assert_equal(dz.dtype, d.dtype) + else: + assert_equal(dz.dtype, np.dtype(dtype)) + self.compare_array_value(dz, value, fill_value) + + # C order, default dtype + dz = like_function(d, order='C', dtype=dtype, **fill_kwarg) + assert_equal(dz.shape, d.shape) + assert_(dz.flags.c_contiguous) + if dtype is None: + assert_equal(dz.dtype, d.dtype) + else: + assert_equal(dz.dtype, np.dtype(dtype)) + self.compare_array_value(dz, value, fill_value) + + # F order, default dtype + dz = like_function(d, order='F', dtype=dtype, **fill_kwarg) + assert_equal(dz.shape, d.shape) + assert_(dz.flags.f_contiguous) + if dtype is None: + assert_equal(dz.dtype, d.dtype) + else: + assert_equal(dz.dtype, np.dtype(dtype)) + self.compare_array_value(dz, value, fill_value) + + # A order + dz = like_function(d, order='A', dtype=dtype, **fill_kwarg) + assert_equal(dz.shape, d.shape) + if d.flags.f_contiguous: + assert_(dz.flags.f_contiguous) + else: + assert_(dz.flags.c_contiguous) + if dtype is None: + assert_equal(dz.dtype, d.dtype) + else: + assert_equal(dz.dtype, np.dtype(dtype)) + self.compare_array_value(dz, value, fill_value) + + # Test the 'subok' parameter + class MyNDArray(np.ndarray): + pass + + a = np.array([[1, 2], [3, 4]]).view(MyNDArray) + + b = like_function(a, **fill_kwarg) + assert_(type(b) is MyNDArray) + + b = like_function(a, subok=False, **fill_kwarg) + assert_(type(b) is not MyNDArray) + + def test_ones_like(self): + self.check_like_function(np.ones_like, 1) + + def test_zeros_like(self): + self.check_like_function(np.zeros_like, 0) + + def test_empty_like(self): + self.check_like_function(np.empty_like, None) + + def test_filled_like(self): + self.check_like_function(np.full_like, 0, True) + self.check_like_function(np.full_like, 1, True) + self.check_like_function(np.full_like, 1000, True) + self.check_like_function(np.full_like, 123.456, True) + self.check_like_function(np.full_like, np.inf, True) + + +class TestCorrelate(object): + def _setup(self, dt): + self.x = np.array([1, 2, 3, 4, 5], dtype=dt) + self.xs = np.arange(1, 20)[::3] + self.y = np.array([-1, -2, -3], dtype=dt) + self.z1 = np.array([ -3., -8., -14., -20., -26., -14., -5.], dtype=dt) + self.z1_4 = np.array([-2., -5., -8., -11., -14., -5.], dtype=dt) + self.z1r = np.array([-15., -22., -22., -16., -10., -4., -1.], dtype=dt) + self.z2 = np.array([-5., -14., -26., -20., -14., -8., -3.], dtype=dt) + self.z2r = np.array([-1., -4., -10., -16., -22., -22., -15.], dtype=dt) + self.zs = np.array([-3., -14., -30., -48., -66., -84., + -102., -54., -19.], dtype=dt) + + def test_float(self): + self._setup(float) + z = np.correlate(self.x, self.y, 'full') + assert_array_almost_equal(z, self.z1) + z = np.correlate(self.x, self.y[:-1], 'full') + assert_array_almost_equal(z, self.z1_4) + z = np.correlate(self.y, self.x, 'full') + assert_array_almost_equal(z, self.z2) + z = np.correlate(self.x[::-1], self.y, 'full') + assert_array_almost_equal(z, self.z1r) + z = np.correlate(self.y, self.x[::-1], 'full') + assert_array_almost_equal(z, self.z2r) + z = np.correlate(self.xs, self.y, 'full') + assert_array_almost_equal(z, self.zs) + + def test_object(self): + self._setup(Decimal) + z = np.correlate(self.x, self.y, 'full') + assert_array_almost_equal(z, self.z1) + z = np.correlate(self.y, self.x, 'full') + assert_array_almost_equal(z, self.z2) + + def test_no_overwrite(self): + d = np.ones(100) + k = np.ones(3) + np.correlate(d, k) + assert_array_equal(d, np.ones(100)) + assert_array_equal(k, np.ones(3)) + + def test_complex(self): + x = np.array([1, 2, 3, 4+1j], dtype=complex) + y = np.array([-1, -2j, 3+1j], dtype=complex) + r_z = np.array([3-1j, 6, 8+1j, 11+5j, -5+8j, -4-1j], dtype=complex) + r_z = r_z[::-1].conjugate() + z = np.correlate(y, x, mode='full') + assert_array_almost_equal(z, r_z) + + +class TestConvolve(object): + def test_object(self): + d = [1.] * 100 + k = [1.] * 3 + assert_array_almost_equal(np.convolve(d, k)[2:-2], np.full(98, 3)) + + def test_no_overwrite(self): + d = np.ones(100) + k = np.ones(3) + np.convolve(d, k) + assert_array_equal(d, np.ones(100)) + assert_array_equal(k, np.ones(3)) + + +class TestArgwhere(object): + def test_2D(self): + x = np.arange(6).reshape((2, 3)) + assert_array_equal(np.argwhere(x > 1), + [[0, 2], + [1, 0], + [1, 1], + [1, 2]]) + + def test_list(self): + assert_equal(np.argwhere([4, 0, 2, 1, 3]), [[0], [2], [3], [4]]) + + +class TestStringFunction(object): + + def test_set_string_function(self): + a = np.array([1]) + np.set_string_function(lambda x: "FOO", repr=True) + assert_equal(repr(a), "FOO") + np.set_string_function(None, repr=True) + assert_equal(repr(a), "array([1])") + + np.set_string_function(lambda x: "FOO", repr=False) + assert_equal(str(a), "FOO") + np.set_string_function(None, repr=False) + assert_equal(str(a), "[1]") + + +class TestRoll(object): + def test_roll1d(self): + x = np.arange(10) + xr = np.roll(x, 2) + assert_equal(xr, np.array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])) + + def test_roll2d(self): + x2 = np.reshape(np.arange(10), (2, 5)) + x2r = np.roll(x2, 1) + assert_equal(x2r, np.array([[9, 0, 1, 2, 3], [4, 5, 6, 7, 8]])) + + x2r = np.roll(x2, 1, axis=0) + assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]])) + + x2r = np.roll(x2, 1, axis=1) + assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]])) + + # Roll multiple axes at once. + x2r = np.roll(x2, 1, axis=(0, 1)) + assert_equal(x2r, np.array([[9, 5, 6, 7, 8], [4, 0, 1, 2, 3]])) + + x2r = np.roll(x2, (1, 0), axis=(0, 1)) + assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]])) + + x2r = np.roll(x2, (-1, 0), axis=(0, 1)) + assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]])) + + x2r = np.roll(x2, (0, 1), axis=(0, 1)) + assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]])) + + x2r = np.roll(x2, (0, -1), axis=(0, 1)) + assert_equal(x2r, np.array([[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]])) + + x2r = np.roll(x2, (1, 1), axis=(0, 1)) + assert_equal(x2r, np.array([[9, 5, 6, 7, 8], [4, 0, 1, 2, 3]])) + + x2r = np.roll(x2, (-1, -1), axis=(0, 1)) + assert_equal(x2r, np.array([[6, 7, 8, 9, 5], [1, 2, 3, 4, 0]])) + + # Roll the same axis multiple times. + x2r = np.roll(x2, 1, axis=(0, 0)) + assert_equal(x2r, np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])) + + x2r = np.roll(x2, 1, axis=(1, 1)) + assert_equal(x2r, np.array([[3, 4, 0, 1, 2], [8, 9, 5, 6, 7]])) + + # Roll more than one turn in either direction. + x2r = np.roll(x2, 6, axis=1) + assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]])) + + x2r = np.roll(x2, -4, axis=1) + assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]])) + + def test_roll_empty(self): + x = np.array([]) + assert_equal(np.roll(x, 1), np.array([])) + + +class TestRollaxis(object): + + # expected shape indexed by (axis, start) for array of + # shape (1, 2, 3, 4) + tgtshape = {(0, 0): (1, 2, 3, 4), (0, 1): (1, 2, 3, 4), + (0, 2): (2, 1, 3, 4), (0, 3): (2, 3, 1, 4), + (0, 4): (2, 3, 4, 1), + (1, 0): (2, 1, 3, 4), (1, 1): (1, 2, 3, 4), + (1, 2): (1, 2, 3, 4), (1, 3): (1, 3, 2, 4), + (1, 4): (1, 3, 4, 2), + (2, 0): (3, 1, 2, 4), (2, 1): (1, 3, 2, 4), + (2, 2): (1, 2, 3, 4), (2, 3): (1, 2, 3, 4), + (2, 4): (1, 2, 4, 3), + (3, 0): (4, 1, 2, 3), (3, 1): (1, 4, 2, 3), + (3, 2): (1, 2, 4, 3), (3, 3): (1, 2, 3, 4), + (3, 4): (1, 2, 3, 4)} + + def test_exceptions(self): + a = np.arange(1*2*3*4).reshape(1, 2, 3, 4) + assert_raises(np.AxisError, np.rollaxis, a, -5, 0) + assert_raises(np.AxisError, np.rollaxis, a, 0, -5) + assert_raises(np.AxisError, np.rollaxis, a, 4, 0) + assert_raises(np.AxisError, np.rollaxis, a, 0, 5) + + def test_results(self): + a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy() + aind = np.indices(a.shape) + assert_(a.flags['OWNDATA']) + for (i, j) in self.tgtshape: + # positive axis, positive start + res = np.rollaxis(a, axis=i, start=j) + i0, i1, i2, i3 = aind[np.array(res.shape) - 1] + assert_(np.all(res[i0, i1, i2, i3] == a)) + assert_(res.shape == self.tgtshape[(i, j)], str((i,j))) + assert_(not res.flags['OWNDATA']) + + # negative axis, positive start + ip = i + 1 + res = np.rollaxis(a, axis=-ip, start=j) + i0, i1, i2, i3 = aind[np.array(res.shape) - 1] + assert_(np.all(res[i0, i1, i2, i3] == a)) + assert_(res.shape == self.tgtshape[(4 - ip, j)]) + assert_(not res.flags['OWNDATA']) + + # positive axis, negative start + jp = j + 1 if j < 4 else j + res = np.rollaxis(a, axis=i, start=-jp) + i0, i1, i2, i3 = aind[np.array(res.shape) - 1] + assert_(np.all(res[i0, i1, i2, i3] == a)) + assert_(res.shape == self.tgtshape[(i, 4 - jp)]) + assert_(not res.flags['OWNDATA']) + + # negative axis, negative start + ip = i + 1 + jp = j + 1 if j < 4 else j + res = np.rollaxis(a, axis=-ip, start=-jp) + i0, i1, i2, i3 = aind[np.array(res.shape) - 1] + assert_(np.all(res[i0, i1, i2, i3] == a)) + assert_(res.shape == self.tgtshape[(4 - ip, 4 - jp)]) + assert_(not res.flags['OWNDATA']) + + +class TestMoveaxis(object): + def test_move_to_end(self): + x = np.random.randn(5, 6, 7) + for source, expected in [(0, (6, 7, 5)), + (1, (5, 7, 6)), + (2, (5, 6, 7)), + (-1, (5, 6, 7))]: + actual = np.moveaxis(x, source, -1).shape + assert_(actual, expected) + + def test_move_new_position(self): + x = np.random.randn(1, 2, 3, 4) + for source, destination, expected in [ + (0, 1, (2, 1, 3, 4)), + (1, 2, (1, 3, 2, 4)), + (1, -1, (1, 3, 4, 2)), + ]: + actual = np.moveaxis(x, source, destination).shape + assert_(actual, expected) + + def test_preserve_order(self): + x = np.zeros((1, 2, 3, 4)) + for source, destination in [ + (0, 0), + (3, -1), + (-1, 3), + ([0, -1], [0, -1]), + ([2, 0], [2, 0]), + (range(4), range(4)), + ]: + actual = np.moveaxis(x, source, destination).shape + assert_(actual, (1, 2, 3, 4)) + + def test_move_multiples(self): + x = np.zeros((0, 1, 2, 3)) + for source, destination, expected in [ + ([0, 1], [2, 3], (2, 3, 0, 1)), + ([2, 3], [0, 1], (2, 3, 0, 1)), + ([0, 1, 2], [2, 3, 0], (2, 3, 0, 1)), + ([3, 0], [1, 0], (0, 3, 1, 2)), + ([0, 3], [0, 1], (0, 3, 1, 2)), + ]: + actual = np.moveaxis(x, source, destination).shape + assert_(actual, expected) + + def test_errors(self): + x = np.random.randn(1, 2, 3) + assert_raises_regex(np.AxisError, 'source.*out of bounds', + np.moveaxis, x, 3, 0) + assert_raises_regex(np.AxisError, 'source.*out of bounds', + np.moveaxis, x, -4, 0) + assert_raises_regex(np.AxisError, 'destination.*out of bounds', + np.moveaxis, x, 0, 5) + assert_raises_regex(ValueError, 'repeated axis in `source`', + np.moveaxis, x, [0, 0], [0, 1]) + assert_raises_regex(ValueError, 'repeated axis in `destination`', + np.moveaxis, x, [0, 1], [1, 1]) + assert_raises_regex(ValueError, 'must have the same number', + np.moveaxis, x, 0, [0, 1]) + assert_raises_regex(ValueError, 'must have the same number', + np.moveaxis, x, [0, 1], [0]) + + def test_array_likes(self): + x = np.ma.zeros((1, 2, 3)) + result = np.moveaxis(x, 0, 0) + assert_(x.shape, result.shape) + assert_(isinstance(result, np.ma.MaskedArray)) + + x = [1, 2, 3] + result = np.moveaxis(x, 0, 0) + assert_(x, list(result)) + assert_(isinstance(result, np.ndarray)) + + +class TestCross(object): + def test_2x2(self): + u = [1, 2] + v = [3, 4] + z = -2 + cp = np.cross(u, v) + assert_equal(cp, z) + cp = np.cross(v, u) + assert_equal(cp, -z) + + def test_2x3(self): + u = [1, 2] + v = [3, 4, 5] + z = np.array([10, -5, -2]) + cp = np.cross(u, v) + assert_equal(cp, z) + cp = np.cross(v, u) + assert_equal(cp, -z) + + def test_3x3(self): + u = [1, 2, 3] + v = [4, 5, 6] + z = np.array([-3, 6, -3]) + cp = np.cross(u, v) + assert_equal(cp, z) + cp = np.cross(v, u) + assert_equal(cp, -z) + + def test_broadcasting(self): + # Ticket #2624 (Trac #2032) + u = np.tile([1, 2], (11, 1)) + v = np.tile([3, 4], (11, 1)) + z = -2 + assert_equal(np.cross(u, v), z) + assert_equal(np.cross(v, u), -z) + assert_equal(np.cross(u, u), 0) + + u = np.tile([1, 2], (11, 1)).T + v = np.tile([3, 4, 5], (11, 1)) + z = np.tile([10, -5, -2], (11, 1)) + assert_equal(np.cross(u, v, axisa=0), z) + assert_equal(np.cross(v, u.T), -z) + assert_equal(np.cross(v, v), 0) + + u = np.tile([1, 2, 3], (11, 1)).T + v = np.tile([3, 4], (11, 1)).T + z = np.tile([-12, 9, -2], (11, 1)) + assert_equal(np.cross(u, v, axisa=0, axisb=0), z) + assert_equal(np.cross(v.T, u.T), -z) + assert_equal(np.cross(u.T, u.T), 0) + + u = np.tile([1, 2, 3], (5, 1)) + v = np.tile([4, 5, 6], (5, 1)).T + z = np.tile([-3, 6, -3], (5, 1)) + assert_equal(np.cross(u, v, axisb=0), z) + assert_equal(np.cross(v.T, u), -z) + assert_equal(np.cross(u, u), 0) + + def test_broadcasting_shapes(self): + u = np.ones((2, 1, 3)) + v = np.ones((5, 3)) + assert_equal(np.cross(u, v).shape, (2, 5, 3)) + u = np.ones((10, 3, 5)) + v = np.ones((2, 5)) + assert_equal(np.cross(u, v, axisa=1, axisb=0).shape, (10, 5, 3)) + assert_raises(np.AxisError, np.cross, u, v, axisa=1, axisb=2) + assert_raises(np.AxisError, np.cross, u, v, axisa=3, axisb=0) + u = np.ones((10, 3, 5, 7)) + v = np.ones((5, 7, 2)) + assert_equal(np.cross(u, v, axisa=1, axisc=2).shape, (10, 5, 3, 7)) + assert_raises(np.AxisError, np.cross, u, v, axisa=-5, axisb=2) + assert_raises(np.AxisError, np.cross, u, v, axisa=1, axisb=-4) + # gh-5885 + u = np.ones((3, 4, 2)) + for axisc in range(-2, 2): + assert_equal(np.cross(u, u, axisc=axisc).shape, (3, 4)) + + +def test_outer_out_param(): + arr1 = np.ones((5,)) + arr2 = np.ones((2,)) + arr3 = np.linspace(-2, 2, 5) + out1 = np.ndarray(shape=(5,5)) + out2 = np.ndarray(shape=(2, 5)) + res1 = np.outer(arr1, arr3, out1) + assert_equal(res1, out1) + assert_equal(np.outer(arr2, arr3, out2), out2) + + +class TestRequire(object): + flag_names = ['C', 'C_CONTIGUOUS', 'CONTIGUOUS', + 'F', 'F_CONTIGUOUS', 'FORTRAN', + 'A', 'ALIGNED', + 'W', 'WRITEABLE', + 'O', 'OWNDATA'] + + def generate_all_false(self, dtype): + arr = np.zeros((2, 2), [('junk', 'i1'), ('a', dtype)]) + arr.setflags(write=False) + a = arr['a'] + assert_(not a.flags['C']) + assert_(not a.flags['F']) + assert_(not a.flags['O']) + assert_(not a.flags['W']) + assert_(not a.flags['A']) + return a + + def set_and_check_flag(self, flag, dtype, arr): + if dtype is None: + dtype = arr.dtype + b = np.require(arr, dtype, [flag]) + assert_(b.flags[flag]) + assert_(b.dtype == dtype) + + # a further call to np.require ought to return the same array + # unless OWNDATA is specified. + c = np.require(b, None, [flag]) + if flag[0] != 'O': + assert_(c is b) + else: + assert_(c.flags[flag]) + + def test_require_each(self): + + id = ['f8', 'i4'] + fd = [None, 'f8', 'c16'] + for idtype, fdtype, flag in itertools.product(id, fd, self.flag_names): + a = self.generate_all_false(idtype) + self.set_and_check_flag(flag, fdtype, a) + + def test_unknown_requirement(self): + a = self.generate_all_false('f8') + assert_raises(KeyError, np.require, a, None, 'Q') + + def test_non_array_input(self): + a = np.require([1, 2, 3, 4], 'i4', ['C', 'A', 'O']) + assert_(a.flags['O']) + assert_(a.flags['C']) + assert_(a.flags['A']) + assert_(a.dtype == 'i4') + assert_equal(a, [1, 2, 3, 4]) + + def test_C_and_F_simul(self): + a = self.generate_all_false('f8') + assert_raises(ValueError, np.require, a, None, ['C', 'F']) + + def test_ensure_array(self): + class ArraySubclass(np.ndarray): + pass + + a = ArraySubclass((2, 2)) + b = np.require(a, None, ['E']) + assert_(type(b) is np.ndarray) + + def test_preserve_subtype(self): + class ArraySubclass(np.ndarray): + pass + + for flag in self.flag_names: + a = ArraySubclass((2, 2)) + self.set_and_check_flag(flag, None, a) + + +class TestBroadcast(object): + def test_broadcast_in_args(self): + # gh-5881 + arrs = [np.empty((6, 7)), np.empty((5, 6, 1)), np.empty((7,)), + np.empty((5, 1, 7))] + mits = [np.broadcast(*arrs), + np.broadcast(np.broadcast(*arrs[:2]), np.broadcast(*arrs[2:])), + np.broadcast(arrs[0], np.broadcast(*arrs[1:-1]), arrs[-1])] + for mit in mits: + assert_equal(mit.shape, (5, 6, 7)) + assert_equal(mit.ndim, 3) + assert_equal(mit.nd, 3) + assert_equal(mit.numiter, 4) + for a, ia in zip(arrs, mit.iters): + assert_(a is ia.base) + + def test_broadcast_single_arg(self): + # gh-6899 + arrs = [np.empty((5, 6, 7))] + mit = np.broadcast(*arrs) + assert_equal(mit.shape, (5, 6, 7)) + assert_equal(mit.ndim, 3) + assert_equal(mit.nd, 3) + assert_equal(mit.numiter, 1) + assert_(arrs[0] is mit.iters[0].base) + + def test_number_of_arguments(self): + arr = np.empty((5,)) + for j in range(35): + arrs = [arr] * j + if j < 1 or j > 32: + assert_raises(ValueError, np.broadcast, *arrs) + else: + mit = np.broadcast(*arrs) + assert_equal(mit.numiter, j) + + +class TestKeepdims(object): + + class sub_array(np.ndarray): + def sum(self, axis=None, dtype=None, out=None): + return np.ndarray.sum(self, axis, dtype, out, keepdims=True) + + def test_raise(self): + sub_class = self.sub_array + x = np.arange(30).view(sub_class) + assert_raises(TypeError, np.sum, x, keepdims=True) + + +class TestTensordot(object): + + def test_zero_dimension(self): + # Test resolution to issue #5663 + a = np.ndarray((3,0)) + b = np.ndarray((0,4)) + td = np.tensordot(a, b, (1, 0)) + assert_array_equal(td, np.dot(a, b)) + assert_array_equal(td, np.einsum('ij,jk', a, b)) diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_numeric.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_numeric.pyc new file mode 100644 index 0000000..ddf73ce Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_numeric.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_numerictypes.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_numerictypes.py new file mode 100644 index 0000000..71f7b71 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_numerictypes.py @@ -0,0 +1,499 @@ +from __future__ import division, absolute_import, print_function + +import sys +import itertools + +import pytest +import numpy as np +from numpy.testing import assert_, assert_equal, assert_raises + +# This is the structure of the table used for plain objects: +# +# +-+-+-+ +# |x|y|z| +# +-+-+-+ + +# Structure of a plain array description: +Pdescr = [ + ('x', 'i4', (2,)), + ('y', 'f8', (2, 2)), + ('z', 'u1')] + +# A plain list of tuples with values for testing: +PbufferT = [ + # x y z + ([3, 2], [[6., 4.], [6., 4.]], 8), + ([4, 3], [[7., 5.], [7., 5.]], 9), + ] + + +# This is the structure of the table used for nested objects (DON'T PANIC!): +# +# +-+---------------------------------+-----+----------+-+-+ +# |x|Info |color|info |y|z| +# | +-----+--+----------------+----+--+ +----+-----+ | | +# | |value|y2|Info2 |name|z2| |Name|Value| | | +# | | | +----+-----+--+--+ | | | | | | | +# | | | |name|value|y3|z3| | | | | | | | +# +-+-----+--+----+-----+--+--+----+--+-----+----+-----+-+-+ +# + +# The corresponding nested array description: +Ndescr = [ + ('x', 'i4', (2,)), + ('Info', [ + ('value', 'c16'), + ('y2', 'f8'), + ('Info2', [ + ('name', 'S2'), + ('value', 'c16', (2,)), + ('y3', 'f8', (2,)), + ('z3', 'u4', (2,))]), + ('name', 'S2'), + ('z2', 'b1')]), + ('color', 'S2'), + ('info', [ + ('Name', 'U8'), + ('Value', 'c16')]), + ('y', 'f8', (2, 2)), + ('z', 'u1')] + +NbufferT = [ + # x Info color info y z + # value y2 Info2 name z2 Name Value + # name value y3 z3 + ([3, 2], (6j, 6., (b'nn', [6j, 4j], [6., 4.], [1, 2]), b'NN', True), b'cc', (u'NN', 6j), [[6., 4.], [6., 4.]], 8), + ([4, 3], (7j, 7., (b'oo', [7j, 5j], [7., 5.], [2, 1]), b'OO', False), b'dd', (u'OO', 7j), [[7., 5.], [7., 5.]], 9), + ] + + +byteorder = {'little':'<', 'big':'>'}[sys.byteorder] + +def normalize_descr(descr): + "Normalize a description adding the platform byteorder." + + out = [] + for item in descr: + dtype = item[1] + if isinstance(dtype, str): + if dtype[0] not in ['|', '<', '>']: + onebyte = dtype[1:] == "1" + if onebyte or dtype[0] in ['S', 'V', 'b']: + dtype = "|" + dtype + else: + dtype = byteorder + dtype + if len(item) > 2 and np.prod(item[2]) > 1: + nitem = (item[0], dtype, item[2]) + else: + nitem = (item[0], dtype) + out.append(nitem) + elif isinstance(dtype, list): + l = normalize_descr(dtype) + out.append((item[0], l)) + else: + raise ValueError("Expected a str or list and got %s" % + (type(item))) + return out + + +############################################################ +# Creation tests +############################################################ + +class CreateZeros(object): + """Check the creation of heterogeneous arrays zero-valued""" + + def test_zeros0D(self): + """Check creation of 0-dimensional objects""" + h = np.zeros((), dtype=self._descr) + assert_(normalize_descr(self._descr) == h.dtype.descr) + assert_(h.dtype.fields['x'][0].name[:4] == 'void') + assert_(h.dtype.fields['x'][0].char == 'V') + assert_(h.dtype.fields['x'][0].type == np.void) + # A small check that data is ok + assert_equal(h['z'], np.zeros((), dtype='u1')) + + def test_zerosSD(self): + """Check creation of single-dimensional objects""" + h = np.zeros((2,), dtype=self._descr) + assert_(normalize_descr(self._descr) == h.dtype.descr) + assert_(h.dtype['y'].name[:4] == 'void') + assert_(h.dtype['y'].char == 'V') + assert_(h.dtype['y'].type == np.void) + # A small check that data is ok + assert_equal(h['z'], np.zeros((2,), dtype='u1')) + + def test_zerosMD(self): + """Check creation of multi-dimensional objects""" + h = np.zeros((2, 3), dtype=self._descr) + assert_(normalize_descr(self._descr) == h.dtype.descr) + assert_(h.dtype['z'].name == 'uint8') + assert_(h.dtype['z'].char == 'B') + assert_(h.dtype['z'].type == np.uint8) + # A small check that data is ok + assert_equal(h['z'], np.zeros((2, 3), dtype='u1')) + + +class TestCreateZerosPlain(CreateZeros): + """Check the creation of heterogeneous arrays zero-valued (plain)""" + _descr = Pdescr + +class TestCreateZerosNested(CreateZeros): + """Check the creation of heterogeneous arrays zero-valued (nested)""" + _descr = Ndescr + + +class CreateValues(object): + """Check the creation of heterogeneous arrays with values""" + + def test_tuple(self): + """Check creation from tuples""" + h = np.array(self._buffer, dtype=self._descr) + assert_(normalize_descr(self._descr) == h.dtype.descr) + if self.multiple_rows: + assert_(h.shape == (2,)) + else: + assert_(h.shape == ()) + + def test_list_of_tuple(self): + """Check creation from list of tuples""" + h = np.array([self._buffer], dtype=self._descr) + assert_(normalize_descr(self._descr) == h.dtype.descr) + if self.multiple_rows: + assert_(h.shape == (1, 2)) + else: + assert_(h.shape == (1,)) + + def test_list_of_list_of_tuple(self): + """Check creation from list of list of tuples""" + h = np.array([[self._buffer]], dtype=self._descr) + assert_(normalize_descr(self._descr) == h.dtype.descr) + if self.multiple_rows: + assert_(h.shape == (1, 1, 2)) + else: + assert_(h.shape == (1, 1)) + + +class TestCreateValuesPlainSingle(CreateValues): + """Check the creation of heterogeneous arrays (plain, single row)""" + _descr = Pdescr + multiple_rows = 0 + _buffer = PbufferT[0] + +class TestCreateValuesPlainMultiple(CreateValues): + """Check the creation of heterogeneous arrays (plain, multiple rows)""" + _descr = Pdescr + multiple_rows = 1 + _buffer = PbufferT + +class TestCreateValuesNestedSingle(CreateValues): + """Check the creation of heterogeneous arrays (nested, single row)""" + _descr = Ndescr + multiple_rows = 0 + _buffer = NbufferT[0] + +class TestCreateValuesNestedMultiple(CreateValues): + """Check the creation of heterogeneous arrays (nested, multiple rows)""" + _descr = Ndescr + multiple_rows = 1 + _buffer = NbufferT + + +############################################################ +# Reading tests +############################################################ + +class ReadValuesPlain(object): + """Check the reading of values in heterogeneous arrays (plain)""" + + def test_access_fields(self): + h = np.array(self._buffer, dtype=self._descr) + if not self.multiple_rows: + assert_(h.shape == ()) + assert_equal(h['x'], np.array(self._buffer[0], dtype='i4')) + assert_equal(h['y'], np.array(self._buffer[1], dtype='f8')) + assert_equal(h['z'], np.array(self._buffer[2], dtype='u1')) + else: + assert_(len(h) == 2) + assert_equal(h['x'], np.array([self._buffer[0][0], + self._buffer[1][0]], dtype='i4')) + assert_equal(h['y'], np.array([self._buffer[0][1], + self._buffer[1][1]], dtype='f8')) + assert_equal(h['z'], np.array([self._buffer[0][2], + self._buffer[1][2]], dtype='u1')) + + +class TestReadValuesPlainSingle(ReadValuesPlain): + """Check the creation of heterogeneous arrays (plain, single row)""" + _descr = Pdescr + multiple_rows = 0 + _buffer = PbufferT[0] + +class TestReadValuesPlainMultiple(ReadValuesPlain): + """Check the values of heterogeneous arrays (plain, multiple rows)""" + _descr = Pdescr + multiple_rows = 1 + _buffer = PbufferT + +class ReadValuesNested(object): + """Check the reading of values in heterogeneous arrays (nested)""" + + def test_access_top_fields(self): + """Check reading the top fields of a nested array""" + h = np.array(self._buffer, dtype=self._descr) + if not self.multiple_rows: + assert_(h.shape == ()) + assert_equal(h['x'], np.array(self._buffer[0], dtype='i4')) + assert_equal(h['y'], np.array(self._buffer[4], dtype='f8')) + assert_equal(h['z'], np.array(self._buffer[5], dtype='u1')) + else: + assert_(len(h) == 2) + assert_equal(h['x'], np.array([self._buffer[0][0], + self._buffer[1][0]], dtype='i4')) + assert_equal(h['y'], np.array([self._buffer[0][4], + self._buffer[1][4]], dtype='f8')) + assert_equal(h['z'], np.array([self._buffer[0][5], + self._buffer[1][5]], dtype='u1')) + + def test_nested1_acessors(self): + """Check reading the nested fields of a nested array (1st level)""" + h = np.array(self._buffer, dtype=self._descr) + if not self.multiple_rows: + assert_equal(h['Info']['value'], + np.array(self._buffer[1][0], dtype='c16')) + assert_equal(h['Info']['y2'], + np.array(self._buffer[1][1], dtype='f8')) + assert_equal(h['info']['Name'], + np.array(self._buffer[3][0], dtype='U2')) + assert_equal(h['info']['Value'], + np.array(self._buffer[3][1], dtype='c16')) + else: + assert_equal(h['Info']['value'], + np.array([self._buffer[0][1][0], + self._buffer[1][1][0]], + dtype='c16')) + assert_equal(h['Info']['y2'], + np.array([self._buffer[0][1][1], + self._buffer[1][1][1]], + dtype='f8')) + assert_equal(h['info']['Name'], + np.array([self._buffer[0][3][0], + self._buffer[1][3][0]], + dtype='U2')) + assert_equal(h['info']['Value'], + np.array([self._buffer[0][3][1], + self._buffer[1][3][1]], + dtype='c16')) + + def test_nested2_acessors(self): + """Check reading the nested fields of a nested array (2nd level)""" + h = np.array(self._buffer, dtype=self._descr) + if not self.multiple_rows: + assert_equal(h['Info']['Info2']['value'], + np.array(self._buffer[1][2][1], dtype='c16')) + assert_equal(h['Info']['Info2']['z3'], + np.array(self._buffer[1][2][3], dtype='u4')) + else: + assert_equal(h['Info']['Info2']['value'], + np.array([self._buffer[0][1][2][1], + self._buffer[1][1][2][1]], + dtype='c16')) + assert_equal(h['Info']['Info2']['z3'], + np.array([self._buffer[0][1][2][3], + self._buffer[1][1][2][3]], + dtype='u4')) + + def test_nested1_descriptor(self): + """Check access nested descriptors of a nested array (1st level)""" + h = np.array(self._buffer, dtype=self._descr) + assert_(h.dtype['Info']['value'].name == 'complex128') + assert_(h.dtype['Info']['y2'].name == 'float64') + if sys.version_info[0] >= 3: + assert_(h.dtype['info']['Name'].name == 'str256') + else: + assert_(h.dtype['info']['Name'].name == 'unicode256') + assert_(h.dtype['info']['Value'].name == 'complex128') + + def test_nested2_descriptor(self): + """Check access nested descriptors of a nested array (2nd level)""" + h = np.array(self._buffer, dtype=self._descr) + assert_(h.dtype['Info']['Info2']['value'].name == 'void256') + assert_(h.dtype['Info']['Info2']['z3'].name == 'void64') + + +class TestReadValuesNestedSingle(ReadValuesNested): + """Check the values of heterogeneous arrays (nested, single row)""" + _descr = Ndescr + multiple_rows = False + _buffer = NbufferT[0] + +class TestReadValuesNestedMultiple(ReadValuesNested): + """Check the values of heterogeneous arrays (nested, multiple rows)""" + _descr = Ndescr + multiple_rows = True + _buffer = NbufferT + +class TestEmptyField(object): + def test_assign(self): + a = np.arange(10, dtype=np.float32) + a.dtype = [("int", "<0i4"), ("float", "<2f4")] + assert_(a['int'].shape == (5, 0)) + assert_(a['float'].shape == (5, 2)) + +class TestCommonType(object): + def test_scalar_loses1(self): + res = np.find_common_type(['f4', 'f4', 'i2'], ['f8']) + assert_(res == 'f4') + + def test_scalar_loses2(self): + res = np.find_common_type(['f4', 'f4'], ['i8']) + assert_(res == 'f4') + + def test_scalar_wins(self): + res = np.find_common_type(['f4', 'f4', 'i2'], ['c8']) + assert_(res == 'c8') + + def test_scalar_wins2(self): + res = np.find_common_type(['u4', 'i4', 'i4'], ['f4']) + assert_(res == 'f8') + + def test_scalar_wins3(self): # doesn't go up to 'f16' on purpose + res = np.find_common_type(['u8', 'i8', 'i8'], ['f8']) + assert_(res == 'f8') + +class TestMultipleFields(object): + def setup(self): + self.ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8') + + def _bad_call(self): + return self.ary['f0', 'f1'] + + def test_no_tuple(self): + assert_raises(IndexError, self._bad_call) + + def test_return(self): + res = self.ary[['f0', 'f2']].tolist() + assert_(res == [(1, 3), (5, 7)]) + + +class TestIsSubDType(object): + # scalar types can be promoted into dtypes + wrappers = [np.dtype, lambda x: x] + + def test_both_abstract(self): + assert_(np.issubdtype(np.floating, np.inexact)) + assert_(not np.issubdtype(np.inexact, np.floating)) + + def test_same(self): + for cls in (np.float32, np.int32): + for w1, w2 in itertools.product(self.wrappers, repeat=2): + assert_(np.issubdtype(w1(cls), w2(cls))) + + def test_subclass(self): + # note we cannot promote floating to a dtype, as it would turn into a + # concrete type + for w in self.wrappers: + assert_(np.issubdtype(w(np.float32), np.floating)) + assert_(np.issubdtype(w(np.float64), np.floating)) + + def test_subclass_backwards(self): + for w in self.wrappers: + assert_(not np.issubdtype(np.floating, w(np.float32))) + assert_(not np.issubdtype(np.floating, w(np.float64))) + + def test_sibling_class(self): + for w1, w2 in itertools.product(self.wrappers, repeat=2): + assert_(not np.issubdtype(w1(np.float32), w2(np.float64))) + assert_(not np.issubdtype(w1(np.float64), w2(np.float32))) + + +class TestSctypeDict(object): + def test_longdouble(self): + assert_(np.sctypeDict['f8'] is not np.longdouble) + assert_(np.sctypeDict['c16'] is not np.clongdouble) + + +class TestBitName(object): + def test_abstract(self): + assert_raises(ValueError, np.core.numerictypes.bitname, np.floating) + + +class TestMaximumSctype(object): + + # note that parametrizing with sctype['int'] and similar would skip types + # with the same size (gh-11923) + + @pytest.mark.parametrize('t', [np.byte, np.short, np.intc, np.int_, np.longlong]) + def test_int(self, t): + assert_equal(np.maximum_sctype(t), np.sctypes['int'][-1]) + + @pytest.mark.parametrize('t', [np.ubyte, np.ushort, np.uintc, np.uint, np.ulonglong]) + def test_uint(self, t): + assert_equal(np.maximum_sctype(t), np.sctypes['uint'][-1]) + + @pytest.mark.parametrize('t', [np.half, np.single, np.double, np.longdouble]) + def test_float(self, t): + assert_equal(np.maximum_sctype(t), np.sctypes['float'][-1]) + + @pytest.mark.parametrize('t', [np.csingle, np.cdouble, np.clongdouble]) + def test_complex(self, t): + assert_equal(np.maximum_sctype(t), np.sctypes['complex'][-1]) + + @pytest.mark.parametrize('t', [np.bool_, np.object_, np.unicode_, np.bytes_, np.void]) + def test_other(self, t): + assert_equal(np.maximum_sctype(t), t) + + +class Test_sctype2char(object): + # This function is old enough that we're really just documenting the quirks + # at this point. + + def test_scalar_type(self): + assert_equal(np.sctype2char(np.double), 'd') + assert_equal(np.sctype2char(np.int_), 'l') + assert_equal(np.sctype2char(np.unicode_), 'U') + assert_equal(np.sctype2char(np.bytes_), 'S') + + def test_other_type(self): + assert_equal(np.sctype2char(float), 'd') + assert_equal(np.sctype2char(list), 'O') + assert_equal(np.sctype2char(np.ndarray), 'O') + + def test_third_party_scalar_type(self): + from numpy.core._rational_tests import rational + assert_raises(KeyError, np.sctype2char, rational) + assert_raises(KeyError, np.sctype2char, rational(1)) + + def test_array_instance(self): + assert_equal(np.sctype2char(np.array([1.0, 2.0])), 'd') + + def test_abstract_type(self): + assert_raises(KeyError, np.sctype2char, np.floating) + + def test_non_type(self): + assert_raises(ValueError, np.sctype2char, 1) + +@pytest.mark.parametrize("rep, expected", [ + (np.int32, True), + (list, False), + (1.1, False), + (str, True), + (np.dtype(np.float64), True), + (np.dtype((np.int16, (3, 4))), True), + (np.dtype([('a', np.int8)]), True), + ]) +def test_issctype(rep, expected): + # ensure proper identification of scalar + # data-types by issctype() + actual = np.issctype(rep) + assert_equal(actual, expected) + + +@pytest.mark.skipif(sys.flags.optimize > 1, + reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1") +class TestDocStrings(object): + def test_platform_dependent_aliases(self): + if np.int64 is np.int_: + assert_('int64' in np.int_.__doc__) + elif np.int64 is np.longlong: + assert_('int64' in np.longlong.__doc__) diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_numerictypes.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_numerictypes.pyc new file mode 100644 index 0000000..486c537 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_numerictypes.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_overrides.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_overrides.py new file mode 100644 index 0000000..8f1c165 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_overrides.py @@ -0,0 +1,392 @@ +from __future__ import division, absolute_import, print_function + +import inspect +import sys + +import numpy as np +from numpy.testing import ( + assert_, assert_equal, assert_raises, assert_raises_regex) +from numpy.core.overrides import ( + _get_implementing_args, array_function_dispatch, + verify_matching_signatures, ENABLE_ARRAY_FUNCTION) +from numpy.core.numeric import pickle +import pytest + + +requires_array_function = pytest.mark.skipif( + not ENABLE_ARRAY_FUNCTION, + reason="__array_function__ dispatch not enabled.") + + +def _return_not_implemented(self, *args, **kwargs): + return NotImplemented + + +# need to define this at the top level to test pickling +@array_function_dispatch(lambda array: (array,)) +def dispatched_one_arg(array): + """Docstring.""" + return 'original' + + +@array_function_dispatch(lambda array1, array2: (array1, array2)) +def dispatched_two_arg(array1, array2): + """Docstring.""" + return 'original' + + +@requires_array_function +class TestGetImplementingArgs(object): + + def test_ndarray(self): + array = np.array(1) + + args = _get_implementing_args([array]) + assert_equal(list(args), [array]) + + args = _get_implementing_args([array, array]) + assert_equal(list(args), [array]) + + args = _get_implementing_args([array, 1]) + assert_equal(list(args), [array]) + + args = _get_implementing_args([1, array]) + assert_equal(list(args), [array]) + + def test_ndarray_subclasses(self): + + class OverrideSub(np.ndarray): + __array_function__ = _return_not_implemented + + class NoOverrideSub(np.ndarray): + pass + + array = np.array(1).view(np.ndarray) + override_sub = np.array(1).view(OverrideSub) + no_override_sub = np.array(1).view(NoOverrideSub) + + args = _get_implementing_args([array, override_sub]) + assert_equal(list(args), [override_sub, array]) + + args = _get_implementing_args([array, no_override_sub]) + assert_equal(list(args), [no_override_sub, array]) + + args = _get_implementing_args( + [override_sub, no_override_sub]) + assert_equal(list(args), [override_sub, no_override_sub]) + + def test_ndarray_and_duck_array(self): + + class Other(object): + __array_function__ = _return_not_implemented + + array = np.array(1) + other = Other() + + args = _get_implementing_args([other, array]) + assert_equal(list(args), [other, array]) + + args = _get_implementing_args([array, other]) + assert_equal(list(args), [array, other]) + + def test_ndarray_subclass_and_duck_array(self): + + class OverrideSub(np.ndarray): + __array_function__ = _return_not_implemented + + class Other(object): + __array_function__ = _return_not_implemented + + array = np.array(1) + subarray = np.array(1).view(OverrideSub) + other = Other() + + assert_equal(_get_implementing_args([array, subarray, other]), + [subarray, array, other]) + assert_equal(_get_implementing_args([array, other, subarray]), + [subarray, array, other]) + + def test_many_duck_arrays(self): + + class A(object): + __array_function__ = _return_not_implemented + + class B(A): + __array_function__ = _return_not_implemented + + class C(A): + __array_function__ = _return_not_implemented + + class D(object): + __array_function__ = _return_not_implemented + + a = A() + b = B() + c = C() + d = D() + + assert_equal(_get_implementing_args([1]), []) + assert_equal(_get_implementing_args([a]), [a]) + assert_equal(_get_implementing_args([a, 1]), [a]) + assert_equal(_get_implementing_args([a, a, a]), [a]) + assert_equal(_get_implementing_args([a, d, a]), [a, d]) + assert_equal(_get_implementing_args([a, b]), [b, a]) + assert_equal(_get_implementing_args([b, a]), [b, a]) + assert_equal(_get_implementing_args([a, b, c]), [b, c, a]) + assert_equal(_get_implementing_args([a, c, b]), [c, b, a]) + + def test_too_many_duck_arrays(self): + namespace = dict(__array_function__=_return_not_implemented) + types = [type('A' + str(i), (object,), namespace) for i in range(33)] + relevant_args = [t() for t in types] + + actual = _get_implementing_args(relevant_args[:32]) + assert_equal(actual, relevant_args[:32]) + + with assert_raises_regex(TypeError, 'distinct argument types'): + _get_implementing_args(relevant_args) + + +@requires_array_function +class TestNDArrayArrayFunction(object): + + def test_method(self): + + class Other(object): + __array_function__ = _return_not_implemented + + class NoOverrideSub(np.ndarray): + pass + + class OverrideSub(np.ndarray): + __array_function__ = _return_not_implemented + + array = np.array([1]) + other = Other() + no_override_sub = array.view(NoOverrideSub) + override_sub = array.view(OverrideSub) + + result = array.__array_function__(func=dispatched_two_arg, + types=(np.ndarray,), + args=(array, 1.), kwargs={}) + assert_equal(result, 'original') + + result = array.__array_function__(func=dispatched_two_arg, + types=(np.ndarray, Other), + args=(array, other), kwargs={}) + assert_(result is NotImplemented) + + result = array.__array_function__(func=dispatched_two_arg, + types=(np.ndarray, NoOverrideSub), + args=(array, no_override_sub), + kwargs={}) + assert_equal(result, 'original') + + result = array.__array_function__(func=dispatched_two_arg, + types=(np.ndarray, OverrideSub), + args=(array, override_sub), + kwargs={}) + assert_equal(result, 'original') + + with assert_raises_regex(TypeError, 'no implementation found'): + np.concatenate((array, other)) + + expected = np.concatenate((array, array)) + result = np.concatenate((array, no_override_sub)) + assert_equal(result, expected.view(NoOverrideSub)) + result = np.concatenate((array, override_sub)) + assert_equal(result, expected.view(OverrideSub)) + + def test_no_wrapper(self): + array = np.array(1) + func = dispatched_one_arg.__wrapped__ + with assert_raises_regex(AttributeError, '__wrapped__'): + array.__array_function__(func=func, + types=(np.ndarray,), + args=(array,), kwargs={}) + + +@requires_array_function +class TestArrayFunctionDispatch(object): + + def test_pickle(self): + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + roundtripped = pickle.loads( + pickle.dumps(dispatched_one_arg, protocol=proto)) + assert_(roundtripped is dispatched_one_arg) + + def test_name_and_docstring(self): + assert_equal(dispatched_one_arg.__name__, 'dispatched_one_arg') + if sys.flags.optimize < 2: + assert_equal(dispatched_one_arg.__doc__, 'Docstring.') + + def test_interface(self): + + class MyArray(object): + def __array_function__(self, func, types, args, kwargs): + return (self, func, types, args, kwargs) + + original = MyArray() + (obj, func, types, args, kwargs) = dispatched_one_arg(original) + assert_(obj is original) + assert_(func is dispatched_one_arg) + assert_equal(set(types), {MyArray}) + # assert_equal uses the overloaded np.iscomplexobj() internally + assert_(args == (original,)) + assert_equal(kwargs, {}) + + def test_not_implemented(self): + + class MyArray(object): + def __array_function__(self, func, types, args, kwargs): + return NotImplemented + + array = MyArray() + with assert_raises_regex(TypeError, 'no implementation found'): + dispatched_one_arg(array) + + +@requires_array_function +class TestVerifyMatchingSignatures(object): + + def test_verify_matching_signatures(self): + + verify_matching_signatures(lambda x: 0, lambda x: 0) + verify_matching_signatures(lambda x=None: 0, lambda x=None: 0) + verify_matching_signatures(lambda x=1: 0, lambda x=None: 0) + + with assert_raises(RuntimeError): + verify_matching_signatures(lambda a: 0, lambda b: 0) + with assert_raises(RuntimeError): + verify_matching_signatures(lambda x: 0, lambda x=None: 0) + with assert_raises(RuntimeError): + verify_matching_signatures(lambda x=None: 0, lambda y=None: 0) + with assert_raises(RuntimeError): + verify_matching_signatures(lambda x=1: 0, lambda y=1: 0) + + def test_array_function_dispatch(self): + + with assert_raises(RuntimeError): + @array_function_dispatch(lambda x: (x,)) + def f(y): + pass + + # should not raise + @array_function_dispatch(lambda x: (x,), verify=False) + def f(y): + pass + + +def _new_duck_type_and_implements(): + """Create a duck array type and implements functions.""" + HANDLED_FUNCTIONS = {} + + class MyArray(object): + def __array_function__(self, func, types, args, kwargs): + if func not in HANDLED_FUNCTIONS: + return NotImplemented + if not all(issubclass(t, MyArray) for t in types): + return NotImplemented + return HANDLED_FUNCTIONS[func](*args, **kwargs) + + def implements(numpy_function): + """Register an __array_function__ implementations.""" + def decorator(func): + HANDLED_FUNCTIONS[numpy_function] = func + return func + return decorator + + return (MyArray, implements) + + +@requires_array_function +class TestArrayFunctionImplementation(object): + + def test_one_arg(self): + MyArray, implements = _new_duck_type_and_implements() + + @implements(dispatched_one_arg) + def _(array): + return 'myarray' + + assert_equal(dispatched_one_arg(1), 'original') + assert_equal(dispatched_one_arg(MyArray()), 'myarray') + + def test_optional_args(self): + MyArray, implements = _new_duck_type_and_implements() + + @array_function_dispatch(lambda array, option=None: (array,)) + def func_with_option(array, option='default'): + return option + + @implements(func_with_option) + def my_array_func_with_option(array, new_option='myarray'): + return new_option + + # we don't need to implement every option on __array_function__ + # implementations + assert_equal(func_with_option(1), 'default') + assert_equal(func_with_option(1, option='extra'), 'extra') + assert_equal(func_with_option(MyArray()), 'myarray') + with assert_raises(TypeError): + func_with_option(MyArray(), option='extra') + + # but new options on implementations can't be used + result = my_array_func_with_option(MyArray(), new_option='yes') + assert_equal(result, 'yes') + with assert_raises(TypeError): + func_with_option(MyArray(), new_option='no') + + def test_not_implemented(self): + MyArray, implements = _new_duck_type_and_implements() + + @array_function_dispatch(lambda array: (array,), module='my') + def func(array): + return array + + array = np.array(1) + assert_(func(array) is array) + assert_equal(func.__module__, 'my') + + with assert_raises_regex( + TypeError, "no implementation found for 'my.func'"): + func(MyArray()) + + +class TestNDArrayMethods(object): + + def test_repr(self): + # gh-12162: should still be defined even if __array_function__ doesn't + # implement np.array_repr() + + class MyArray(np.ndarray): + def __array_function__(*args, **kwargs): + return NotImplemented + + array = np.array(1).view(MyArray) + assert_equal(repr(array), 'MyArray(1)') + assert_equal(str(array), '1') + + +class TestNumPyFunctions(object): + + def test_set_module(self): + assert_equal(np.sum.__module__, 'numpy') + assert_equal(np.char.equal.__module__, 'numpy.char') + assert_equal(np.fft.fft.__module__, 'numpy.fft') + assert_equal(np.linalg.solve.__module__, 'numpy.linalg') + + @pytest.mark.skipif(sys.version_info[0] < 3, reason="Python 3 only") + def test_inspect_sum(self): + signature = inspect.signature(np.sum) + assert_('axis' in signature.parameters) + + @requires_array_function + def test_override_sum(self): + MyArray, implements = _new_duck_type_and_implements() + + @implements(np.sum) + def _(array): + return 'yes' + + assert_equal(np.sum(MyArray()), 'yes') diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_overrides.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_overrides.pyc new file mode 100644 index 0000000..7140b53 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_overrides.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_print.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_print.py new file mode 100644 index 0000000..c5c091e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_print.py @@ -0,0 +1,205 @@ +from __future__ import division, absolute_import, print_function + +import sys + +import pytest + +import numpy as np +from numpy.testing import assert_, assert_equal +from numpy.core.tests._locales import CommaDecimalPointLocale + + +if sys.version_info[0] >= 3: + from io import StringIO +else: + from StringIO import StringIO + +_REF = {np.inf: 'inf', -np.inf: '-inf', np.nan: 'nan'} + + +@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble]) +def test_float_types(tp): + """ Check formatting. + + This is only for the str function, and only for simple types. + The precision of np.float32 and np.longdouble aren't the same as the + python float precision. + + """ + for x in [0, 1, -1, 1e20]: + assert_equal(str(tp(x)), str(float(x)), + err_msg='Failed str formatting for type %s' % tp) + + if tp(1e16).itemsize > 4: + assert_equal(str(tp(1e16)), str(float('1e16')), + err_msg='Failed str formatting for type %s' % tp) + else: + ref = '1e+16' + assert_equal(str(tp(1e16)), ref, + err_msg='Failed str formatting for type %s' % tp) + + +@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble]) +def test_nan_inf_float(tp): + """ Check formatting of nan & inf. + + This is only for the str function, and only for simple types. + The precision of np.float32 and np.longdouble aren't the same as the + python float precision. + + """ + for x in [np.inf, -np.inf, np.nan]: + assert_equal(str(tp(x)), _REF[x], + err_msg='Failed str formatting for type %s' % tp) + + +@pytest.mark.parametrize('tp', [np.complex64, np.cdouble, np.clongdouble]) +def test_complex_types(tp): + """Check formatting of complex types. + + This is only for the str function, and only for simple types. + The precision of np.float32 and np.longdouble aren't the same as the + python float precision. + + """ + for x in [0, 1, -1, 1e20]: + assert_equal(str(tp(x)), str(complex(x)), + err_msg='Failed str formatting for type %s' % tp) + assert_equal(str(tp(x*1j)), str(complex(x*1j)), + err_msg='Failed str formatting for type %s' % tp) + assert_equal(str(tp(x + x*1j)), str(complex(x + x*1j)), + err_msg='Failed str formatting for type %s' % tp) + + if tp(1e16).itemsize > 8: + assert_equal(str(tp(1e16)), str(complex(1e16)), + err_msg='Failed str formatting for type %s' % tp) + else: + ref = '(1e+16+0j)' + assert_equal(str(tp(1e16)), ref, + err_msg='Failed str formatting for type %s' % tp) + + +@pytest.mark.parametrize('dtype', [np.complex64, np.cdouble, np.clongdouble]) +def test_complex_inf_nan(dtype): + """Check inf/nan formatting of complex types.""" + TESTS = { + complex(np.inf, 0): "(inf+0j)", + complex(0, np.inf): "infj", + complex(-np.inf, 0): "(-inf+0j)", + complex(0, -np.inf): "-infj", + complex(np.inf, 1): "(inf+1j)", + complex(1, np.inf): "(1+infj)", + complex(-np.inf, 1): "(-inf+1j)", + complex(1, -np.inf): "(1-infj)", + complex(np.nan, 0): "(nan+0j)", + complex(0, np.nan): "nanj", + complex(-np.nan, 0): "(nan+0j)", + complex(0, -np.nan): "nanj", + complex(np.nan, 1): "(nan+1j)", + complex(1, np.nan): "(1+nanj)", + complex(-np.nan, 1): "(nan+1j)", + complex(1, -np.nan): "(1+nanj)", + } + for c, s in TESTS.items(): + assert_equal(str(dtype(c)), s) + + +# print tests +def _test_redirected_print(x, tp, ref=None): + file = StringIO() + file_tp = StringIO() + stdout = sys.stdout + try: + sys.stdout = file_tp + print(tp(x)) + sys.stdout = file + if ref: + print(ref) + else: + print(x) + finally: + sys.stdout = stdout + + assert_equal(file.getvalue(), file_tp.getvalue(), + err_msg='print failed for type%s' % tp) + + +@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble]) +def test_float_type_print(tp): + """Check formatting when using print """ + for x in [0, 1, -1, 1e20]: + _test_redirected_print(float(x), tp) + + for x in [np.inf, -np.inf, np.nan]: + _test_redirected_print(float(x), tp, _REF[x]) + + if tp(1e16).itemsize > 4: + _test_redirected_print(float(1e16), tp) + else: + ref = '1e+16' + _test_redirected_print(float(1e16), tp, ref) + + +@pytest.mark.parametrize('tp', [np.complex64, np.cdouble, np.clongdouble]) +def test_complex_type_print(tp): + """Check formatting when using print """ + # We do not create complex with inf/nan directly because the feature is + # missing in python < 2.6 + for x in [0, 1, -1, 1e20]: + _test_redirected_print(complex(x), tp) + + if tp(1e16).itemsize > 8: + _test_redirected_print(complex(1e16), tp) + else: + ref = '(1e+16+0j)' + _test_redirected_print(complex(1e16), tp, ref) + + _test_redirected_print(complex(np.inf, 1), tp, '(inf+1j)') + _test_redirected_print(complex(-np.inf, 1), tp, '(-inf+1j)') + _test_redirected_print(complex(-np.nan, 1), tp, '(nan+1j)') + + +def test_scalar_format(): + """Test the str.format method with NumPy scalar types""" + tests = [('{0}', True, np.bool_), + ('{0}', False, np.bool_), + ('{0:d}', 130, np.uint8), + ('{0:d}', 50000, np.uint16), + ('{0:d}', 3000000000, np.uint32), + ('{0:d}', 15000000000000000000, np.uint64), + ('{0:d}', -120, np.int8), + ('{0:d}', -30000, np.int16), + ('{0:d}', -2000000000, np.int32), + ('{0:d}', -7000000000000000000, np.int64), + ('{0:g}', 1.5, np.float16), + ('{0:g}', 1.5, np.float32), + ('{0:g}', 1.5, np.float64), + ('{0:g}', 1.5, np.longdouble), + ('{0:g}', 1.5+0.5j, np.complex64), + ('{0:g}', 1.5+0.5j, np.complex128), + ('{0:g}', 1.5+0.5j, np.clongdouble)] + + for (fmat, val, valtype) in tests: + try: + assert_equal(fmat.format(val), fmat.format(valtype(val)), + "failed with val %s, type %s" % (val, valtype)) + except ValueError as e: + assert_(False, + "format raised exception (fmt='%s', val=%s, type=%s, exc='%s')" % + (fmat, repr(val), repr(valtype), str(e))) + + +# +# Locale tests: scalar types formatting should be independent of the locale +# + +class TestCommaDecimalPointLocale(CommaDecimalPointLocale): + + def test_locale_single(self): + assert_equal(str(np.float32(1.2)), str(float(1.2))) + + def test_locale_double(self): + assert_equal(str(np.double(1.2)), str(float(1.2))) + + def test_locale_longdouble(self): + assert_equal(str(np.longdouble('1.2')), str(float(1.2))) diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_print.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_print.pyc new file mode 100644 index 0000000..8249c52 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_print.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_records.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_records.py new file mode 100644 index 0000000..c059ef5 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_records.py @@ -0,0 +1,452 @@ +from __future__ import division, absolute_import, print_function + +import sys +try: + # Accessing collections abstract classes from collections + # has been deprecated since Python 3.3 + import collections.abc as collections_abc +except ImportError: + import collections as collections_abc +import textwrap +from os import path +import pytest + +import numpy as np +from numpy.compat import Path +from numpy.testing import ( + assert_, assert_equal, assert_array_equal, assert_array_almost_equal, + assert_raises, temppath + ) +from numpy.core.numeric import pickle + + +class TestFromrecords(object): + def test_fromrecords(self): + r = np.rec.fromrecords([[456, 'dbe', 1.2], [2, 'de', 1.3]], + names='col1,col2,col3') + assert_equal(r[0].item(), (456, 'dbe', 1.2)) + assert_equal(r['col1'].dtype.kind, 'i') + if sys.version_info[0] >= 3: + assert_equal(r['col2'].dtype.kind, 'U') + assert_equal(r['col2'].dtype.itemsize, 12) + else: + assert_equal(r['col2'].dtype.kind, 'S') + assert_equal(r['col2'].dtype.itemsize, 3) + assert_equal(r['col3'].dtype.kind, 'f') + + def test_fromrecords_0len(self): + """ Verify fromrecords works with a 0-length input """ + dtype = [('a', float), ('b', float)] + r = np.rec.fromrecords([], dtype=dtype) + assert_equal(r.shape, (0,)) + + def test_fromrecords_2d(self): + data = [ + [(1, 2), (3, 4), (5, 6)], + [(6, 5), (4, 3), (2, 1)] + ] + expected_a = [[1, 3, 5], [6, 4, 2]] + expected_b = [[2, 4, 6], [5, 3, 1]] + + # try with dtype + r1 = np.rec.fromrecords(data, dtype=[('a', int), ('b', int)]) + assert_equal(r1['a'], expected_a) + assert_equal(r1['b'], expected_b) + + # try with names + r2 = np.rec.fromrecords(data, names=['a', 'b']) + assert_equal(r2['a'], expected_a) + assert_equal(r2['b'], expected_b) + + assert_equal(r1, r2) + + def test_method_array(self): + r = np.rec.array(b'abcdefg' * 100, formats='i2,a3,i4', shape=3, byteorder='big') + assert_equal(r[1].item(), (25444, b'efg', 1633837924)) + + def test_method_array2(self): + r = np.rec.array([(1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), (5, 55, 'ex'), + (6, 66, 'f'), (7, 77, 'g')], formats='u1,f4,a1') + assert_equal(r[1].item(), (2, 22.0, b'b')) + + def test_recarray_slices(self): + r = np.rec.array([(1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), (5, 55, 'ex'), + (6, 66, 'f'), (7, 77, 'g')], formats='u1,f4,a1') + assert_equal(r[1::2][1].item(), (4, 44.0, b'd')) + + def test_recarray_fromarrays(self): + x1 = np.array([1, 2, 3, 4]) + x2 = np.array(['a', 'dd', 'xyz', '12']) + x3 = np.array([1.1, 2, 3, 4]) + r = np.rec.fromarrays([x1, x2, x3], names='a,b,c') + assert_equal(r[1].item(), (2, 'dd', 2.0)) + x1[1] = 34 + assert_equal(r.a, np.array([1, 2, 3, 4])) + + def test_recarray_fromfile(self): + data_dir = path.join(path.dirname(__file__), 'data') + filename = path.join(data_dir, 'recarray_from_file.fits') + fd = open(filename, 'rb') + fd.seek(2880 * 2) + r1 = np.rec.fromfile(fd, formats='f8,i4,a5', shape=3, byteorder='big') + fd.seek(2880 * 2) + r2 = np.rec.array(fd, formats='f8,i4,a5', shape=3, byteorder='big') + fd.close() + assert_equal(r1, r2) + + def test_recarray_from_obj(self): + count = 10 + a = np.zeros(count, dtype='O') + b = np.zeros(count, dtype='f8') + c = np.zeros(count, dtype='f8') + for i in range(len(a)): + a[i] = list(range(1, 10)) + + mine = np.rec.fromarrays([a, b, c], names='date,data1,data2') + for i in range(len(a)): + assert_((mine.date[i] == list(range(1, 10)))) + assert_((mine.data1[i] == 0.0)) + assert_((mine.data2[i] == 0.0)) + + def test_recarray_repr(self): + a = np.array([(1, 0.1), (2, 0.2)], + dtype=[('foo', ' 2) & (a < 6)) + xb = np.where((b > 2) & (b < 6)) + ya = ((a > 2) & (a < 6)) + yb = ((b > 2) & (b < 6)) + assert_array_almost_equal(xa, ya.nonzero()) + assert_array_almost_equal(xb, yb.nonzero()) + assert_(np.all(a[ya] > 0.5)) + assert_(np.all(b[yb] > 0.5)) + + def test_endian_where(self): + # GitHub issue #369 + net = np.zeros(3, dtype='>f4') + net[1] = 0.00458849 + net[2] = 0.605202 + max_net = net.max() + test = np.where(net <= 0., max_net, net) + correct = np.array([ 0.60520202, 0.00458849, 0.60520202]) + assert_array_almost_equal(test, correct) + + def test_endian_recarray(self): + # Ticket #2185 + dt = np.dtype([ + ('head', '>u4'), + ('data', '>u4', 2), + ]) + buf = np.recarray(1, dtype=dt) + buf[0]['head'] = 1 + buf[0]['data'][:] = [1, 1] + + h = buf[0]['head'] + d = buf[0]['data'][0] + buf[0]['head'] = h + buf[0]['data'][0] = d + assert_(buf[0]['head'] == 1) + + def test_mem_dot(self): + # Ticket #106 + x = np.random.randn(0, 1) + y = np.random.randn(10, 1) + # Dummy array to detect bad memory access: + _z = np.ones(10) + _dummy = np.empty((0, 10)) + z = np.lib.stride_tricks.as_strided(_z, _dummy.shape, _dummy.strides) + np.dot(x, np.transpose(y), out=z) + assert_equal(_z, np.ones(10)) + # Do the same for the built-in dot: + np.core.multiarray.dot(x, np.transpose(y), out=z) + assert_equal(_z, np.ones(10)) + + def test_arange_endian(self): + # Ticket #111 + ref = np.arange(10) + x = np.arange(10, dtype='= (3, 4): + # encoding='bytes' was added in Py3.4 + for original, data in test_data: + result = pickle.loads(data, encoding='bytes') + assert_equal(result, original) + + if isinstance(result, np.ndarray) and result.dtype.names: + for name in result.dtype.names: + assert_(isinstance(name, str)) + + def test_pickle_dtype(self): + # Ticket #251 + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + pickle.dumps(float, protocol=proto) + + def test_swap_real(self): + # Ticket #265 + assert_equal(np.arange(4, dtype='>c8').imag.max(), 0.0) + assert_equal(np.arange(4, dtype=' 1 and x['two'] > 2) + + def test_method_args(self): + # Make sure methods and functions have same default axis + # keyword and arguments + funcs1 = ['argmax', 'argmin', 'sum', ('product', 'prod'), + ('sometrue', 'any'), + ('alltrue', 'all'), 'cumsum', ('cumproduct', 'cumprod'), + 'ptp', 'cumprod', 'prod', 'std', 'var', 'mean', + 'round', 'min', 'max', 'argsort', 'sort'] + funcs2 = ['compress', 'take', 'repeat'] + + for func in funcs1: + arr = np.random.rand(8, 7) + arr2 = arr.copy() + if isinstance(func, tuple): + func_meth = func[1] + func = func[0] + else: + func_meth = func + res1 = getattr(arr, func_meth)() + res2 = getattr(np, func)(arr2) + if res1 is None: + res1 = arr + + if res1.dtype.kind in 'uib': + assert_((res1 == res2).all(), func) + else: + assert_(abs(res1-res2).max() < 1e-8, func) + + for func in funcs2: + arr1 = np.random.rand(8, 7) + arr2 = np.random.rand(8, 7) + res1 = None + if func == 'compress': + arr1 = arr1.ravel() + res1 = getattr(arr2, func)(arr1) + else: + arr2 = (15*arr2).astype(int).ravel() + if res1 is None: + res1 = getattr(arr1, func)(arr2) + res2 = getattr(np, func)(arr1, arr2) + assert_(abs(res1-res2).max() < 1e-8, func) + + def test_mem_lexsort_strings(self): + # Ticket #298 + lst = ['abc', 'cde', 'fgh'] + np.lexsort((lst,)) + + def test_fancy_index(self): + # Ticket #302 + x = np.array([1, 2])[np.array([0])] + assert_equal(x.shape, (1,)) + + def test_recarray_copy(self): + # Ticket #312 + dt = [('x', np.int16), ('y', np.float64)] + ra = np.array([(1, 2.3)], dtype=dt) + rb = np.rec.array(ra, dtype=dt) + rb['x'] = 2. + assert_(ra['x'] != rb['x']) + + def test_rec_fromarray(self): + # Ticket #322 + x1 = np.array([[1, 2], [3, 4], [5, 6]]) + x2 = np.array(['a', 'dd', 'xyz']) + x3 = np.array([1.1, 2, 3]) + np.rec.fromarrays([x1, x2, x3], formats="(2,)i4,a3,f8") + + def test_object_array_assign(self): + x = np.empty((2, 2), object) + x.flat[2] = (1, 2, 3) + assert_equal(x.flat[2], (1, 2, 3)) + + def test_ndmin_float64(self): + # Ticket #324 + x = np.array([1, 2, 3], dtype=np.float64) + assert_equal(np.array(x, dtype=np.float32, ndmin=2).ndim, 2) + assert_equal(np.array(x, dtype=np.float64, ndmin=2).ndim, 2) + + def test_ndmin_order(self): + # Issue #465 and related checks + assert_(np.array([1, 2], order='C', ndmin=3).flags.c_contiguous) + assert_(np.array([1, 2], order='F', ndmin=3).flags.f_contiguous) + assert_(np.array(np.ones((2, 2), order='F'), ndmin=3).flags.f_contiguous) + assert_(np.array(np.ones((2, 2), order='C'), ndmin=3).flags.c_contiguous) + + def test_mem_axis_minimization(self): + # Ticket #327 + data = np.arange(5) + data = np.add.outer(data, data) + + def test_mem_float_imag(self): + # Ticket #330 + np.float64(1.0).imag + + def test_dtype_tuple(self): + # Ticket #334 + assert_(np.dtype('i4') == np.dtype(('i4', ()))) + + def test_dtype_posttuple(self): + # Ticket #335 + np.dtype([('col1', '()i4')]) + + def test_numeric_carray_compare(self): + # Ticket #341 + assert_equal(np.array(['X'], 'c'), b'X') + + def test_string_array_size(self): + # Ticket #342 + assert_raises(ValueError, + np.array, [['X'], ['X', 'X', 'X']], '|S1') + + def test_dtype_repr(self): + # Ticket #344 + dt1 = np.dtype(('uint32', 2)) + dt2 = np.dtype(('uint32', (2,))) + assert_equal(dt1.__repr__(), dt2.__repr__()) + + def test_reshape_order(self): + # Make sure reshape order works. + a = np.arange(6).reshape(2, 3, order='F') + assert_equal(a, [[0, 2, 4], [1, 3, 5]]) + a = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) + b = a[:, 1] + assert_equal(b.reshape(2, 2, order='F'), [[2, 6], [4, 8]]) + + def test_reshape_zero_strides(self): + # Issue #380, test reshaping of zero strided arrays + a = np.ones(1) + a = np.lib.stride_tricks.as_strided(a, shape=(5,), strides=(0,)) + assert_(a.reshape(5, 1).strides[0] == 0) + + def test_reshape_zero_size(self): + # GitHub Issue #2700, setting shape failed for 0-sized arrays + a = np.ones((0, 2)) + a.shape = (-1, 2) + + # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides. + # With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous. + @pytest.mark.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max, + reason="Using relaxed stride checking") + def test_reshape_trailing_ones_strides(self): + # GitHub issue gh-2949, bad strides for trailing ones of new shape + a = np.zeros(12, dtype=np.int32)[::2] # not contiguous + strides_c = (16, 8, 8, 8) + strides_f = (8, 24, 48, 48) + assert_equal(a.reshape(3, 2, 1, 1).strides, strides_c) + assert_equal(a.reshape(3, 2, 1, 1, order='F').strides, strides_f) + assert_equal(np.array(0, dtype=np.int32).reshape(1, 1).strides, (4, 4)) + + def test_repeat_discont(self): + # Ticket #352 + a = np.arange(12).reshape(4, 3)[:, 2] + assert_equal(a.repeat(3), [2, 2, 2, 5, 5, 5, 8, 8, 8, 11, 11, 11]) + + def test_array_index(self): + # Make sure optimization is not called in this case. + a = np.array([1, 2, 3]) + a2 = np.array([[1, 2, 3]]) + assert_equal(a[np.where(a == 3)], a2[np.where(a2 == 3)]) + + def test_object_argmax(self): + a = np.array([1, 2, 3], dtype=object) + assert_(a.argmax() == 2) + + def test_recarray_fields(self): + # Ticket #372 + dt0 = np.dtype([('f0', 'i4'), ('f1', 'i4')]) + dt1 = np.dtype([('f0', 'i8'), ('f1', 'i8')]) + for a in [np.array([(1, 2), (3, 4)], "i4,i4"), + np.rec.array([(1, 2), (3, 4)], "i4,i4"), + np.rec.array([(1, 2), (3, 4)]), + np.rec.fromarrays([(1, 2), (3, 4)], "i4,i4"), + np.rec.fromarrays([(1, 2), (3, 4)])]: + assert_(a.dtype in [dt0, dt1]) + + def test_random_shuffle(self): + # Ticket #374 + a = np.arange(5).reshape((5, 1)) + b = a.copy() + np.random.shuffle(b) + assert_equal(np.sort(b, axis=0), a) + + def test_refcount_vdot(self): + # Changeset #3443 + _assert_valid_refcount(np.vdot) + + def test_startswith(self): + ca = np.char.array(['Hi', 'There']) + assert_equal(ca.startswith('H'), [True, False]) + + def test_noncommutative_reduce_accumulate(self): + # Ticket #413 + tosubtract = np.arange(5) + todivide = np.array([2.0, 0.5, 0.25]) + assert_equal(np.subtract.reduce(tosubtract), -10) + assert_equal(np.divide.reduce(todivide), 16.0) + assert_array_equal(np.subtract.accumulate(tosubtract), + np.array([0, -1, -3, -6, -10])) + assert_array_equal(np.divide.accumulate(todivide), + np.array([2., 4., 16.])) + + def test_convolve_empty(self): + # Convolve should raise an error for empty input array. + assert_raises(ValueError, np.convolve, [], [1]) + assert_raises(ValueError, np.convolve, [1], []) + + def test_multidim_byteswap(self): + # Ticket #449 + r = np.array([(1, (0, 1, 2))], dtype="i2,3i2") + assert_array_equal(r.byteswap(), + np.array([(256, (0, 256, 512))], r.dtype)) + + def test_string_NULL(self): + # Changeset 3557 + assert_equal(np.array("a\x00\x0b\x0c\x00").item(), + 'a\x00\x0b\x0c') + + def test_junk_in_string_fields_of_recarray(self): + # Ticket #483 + r = np.array([[b'abc']], dtype=[('var1', '|S20')]) + assert_(asbytes(r['var1'][0][0]) == b'abc') + + def test_take_output(self): + # Ensure that 'take' honours output parameter. + x = np.arange(12).reshape((3, 4)) + a = np.take(x, [0, 2], axis=1) + b = np.zeros_like(a) + np.take(x, [0, 2], axis=1, out=b) + assert_array_equal(a, b) + + def test_take_object_fail(self): + # Issue gh-3001 + d = 123. + a = np.array([d, 1], dtype=object) + if HAS_REFCOUNT: + ref_d = sys.getrefcount(d) + try: + a.take([0, 100]) + except IndexError: + pass + if HAS_REFCOUNT: + assert_(ref_d == sys.getrefcount(d)) + + def test_array_str_64bit(self): + # Ticket #501 + s = np.array([1, np.nan], dtype=np.float64) + with np.errstate(all='raise'): + np.array_str(s) # Should succeed + + def test_frompyfunc_endian(self): + # Ticket #503 + from math import radians + uradians = np.frompyfunc(radians, 1, 1) + big_endian = np.array([83.4, 83.5], dtype='>f8') + little_endian = np.array([83.4, 83.5], dtype=' object + # casting succeeds + def rs(): + x = np.ones([484, 286]) + y = np.zeros([484, 286]) + x |= y + + assert_raises(TypeError, rs) + + def test_unicode_scalar(self): + # Ticket #600 + x = np.array(["DROND", "DROND1"], dtype="U6") + el = x[1] + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + new = pickle.loads(pickle.dumps(el, protocol=proto)) + assert_equal(new, el) + + def test_arange_non_native_dtype(self): + # Ticket #616 + for T in ('>f4', ' 0)] = v + + assert_raises(IndexError, ia, x, s, np.zeros(9, dtype=float)) + assert_raises(IndexError, ia, x, s, np.zeros(11, dtype=float)) + + # Old special case (different code path): + assert_raises(ValueError, ia, x.flat, s, np.zeros(9, dtype=float)) + assert_raises(ValueError, ia, x.flat, s, np.zeros(11, dtype=float)) + + def test_mem_scalar_indexing(self): + # Ticket #603 + x = np.array([0], dtype=float) + index = np.array(0, dtype=np.int32) + x[index] + + def test_binary_repr_0_width(self): + assert_equal(np.binary_repr(0, width=3), '000') + + def test_fromstring(self): + assert_equal(np.fromstring("12:09:09", dtype=int, sep=":"), + [12, 9, 9]) + + def test_searchsorted_variable_length(self): + x = np.array(['a', 'aa', 'b']) + y = np.array(['d', 'e']) + assert_equal(x.searchsorted(y), [3, 3]) + + def test_string_argsort_with_zeros(self): + # Check argsort for strings containing zeros. + x = np.frombuffer(b"\x00\x02\x00\x01", dtype="|S2") + assert_array_equal(x.argsort(kind='m'), np.array([1, 0])) + assert_array_equal(x.argsort(kind='q'), np.array([1, 0])) + + def test_string_sort_with_zeros(self): + # Check sort for strings containing zeros. + x = np.frombuffer(b"\x00\x02\x00\x01", dtype="|S2") + y = np.frombuffer(b"\x00\x01\x00\x02", dtype="|S2") + assert_array_equal(np.sort(x, kind="q"), y) + + def test_copy_detection_zero_dim(self): + # Ticket #658 + np.indices((0, 3, 4)).T.reshape(-1, 3) + + def test_flat_byteorder(self): + # Ticket #657 + x = np.arange(10) + assert_array_equal(x.astype('>i4'), x.astype('i4').flat[:], x.astype('i4')): + x = np.array([-1, 0, 1], dtype=dt) + assert_equal(x.flat[0].dtype, x[0].dtype) + + def test_copy_detection_corner_case(self): + # Ticket #658 + np.indices((0, 3, 4)).T.reshape(-1, 3) + + # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides. + # With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous, + # 0-sized reshape itself is tested elsewhere. + @pytest.mark.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max, + reason="Using relaxed stride checking") + def test_copy_detection_corner_case2(self): + # Ticket #771: strides are not set correctly when reshaping 0-sized + # arrays + b = np.indices((0, 3, 4)).T.reshape(-1, 3) + assert_equal(b.strides, (3 * b.itemsize, b.itemsize)) + + def test_object_array_refcounting(self): + # Ticket #633 + if not hasattr(sys, 'getrefcount'): + return + + # NB. this is probably CPython-specific + + cnt = sys.getrefcount + + a = object() + b = object() + c = object() + + cnt0_a = cnt(a) + cnt0_b = cnt(b) + cnt0_c = cnt(c) + + # -- 0d -> 1-d broadcast slice assignment + + arr = np.zeros(5, dtype=np.object_) + + arr[:] = a + assert_equal(cnt(a), cnt0_a + 5) + + arr[:] = b + assert_equal(cnt(a), cnt0_a) + assert_equal(cnt(b), cnt0_b + 5) + + arr[:2] = c + assert_equal(cnt(b), cnt0_b + 3) + assert_equal(cnt(c), cnt0_c + 2) + + del arr + + # -- 1-d -> 2-d broadcast slice assignment + + arr = np.zeros((5, 2), dtype=np.object_) + arr0 = np.zeros(2, dtype=np.object_) + + arr0[0] = a + assert_(cnt(a) == cnt0_a + 1) + arr0[1] = b + assert_(cnt(b) == cnt0_b + 1) + + arr[:, :] = arr0 + assert_(cnt(a) == cnt0_a + 6) + assert_(cnt(b) == cnt0_b + 6) + + arr[:, 0] = None + assert_(cnt(a) == cnt0_a + 1) + + del arr, arr0 + + # -- 2-d copying + flattening + + arr = np.zeros((5, 2), dtype=np.object_) + + arr[:, 0] = a + arr[:, 1] = b + assert_(cnt(a) == cnt0_a + 5) + assert_(cnt(b) == cnt0_b + 5) + + arr2 = arr.copy() + assert_(cnt(a) == cnt0_a + 10) + assert_(cnt(b) == cnt0_b + 10) + + arr2 = arr[:, 0].copy() + assert_(cnt(a) == cnt0_a + 10) + assert_(cnt(b) == cnt0_b + 5) + + arr2 = arr.flatten() + assert_(cnt(a) == cnt0_a + 10) + assert_(cnt(b) == cnt0_b + 10) + + del arr, arr2 + + # -- concatenate, repeat, take, choose + + arr1 = np.zeros((5, 1), dtype=np.object_) + arr2 = np.zeros((5, 1), dtype=np.object_) + + arr1[...] = a + arr2[...] = b + assert_(cnt(a) == cnt0_a + 5) + assert_(cnt(b) == cnt0_b + 5) + + tmp = np.concatenate((arr1, arr2)) + assert_(cnt(a) == cnt0_a + 5 + 5) + assert_(cnt(b) == cnt0_b + 5 + 5) + + tmp = arr1.repeat(3, axis=0) + assert_(cnt(a) == cnt0_a + 5 + 3*5) + + tmp = arr1.take([1, 2, 3], axis=0) + assert_(cnt(a) == cnt0_a + 5 + 3) + + x = np.array([[0], [1], [0], [1], [1]], int) + tmp = x.choose(arr1, arr2) + assert_(cnt(a) == cnt0_a + 5 + 2) + assert_(cnt(b) == cnt0_b + 5 + 3) + + del tmp # Avoid pyflakes unused variable warning + + def test_mem_custom_float_to_array(self): + # Ticket 702 + class MyFloat(object): + def __float__(self): + return 1.0 + + tmp = np.atleast_1d([MyFloat()]) + tmp.astype(float) # Should succeed + + def test_object_array_refcount_self_assign(self): + # Ticket #711 + class VictimObject(object): + deleted = False + + def __del__(self): + self.deleted = True + + d = VictimObject() + arr = np.zeros(5, dtype=np.object_) + arr[:] = d + del d + arr[:] = arr # refcount of 'd' might hit zero here + assert_(not arr[0].deleted) + arr[:] = arr # trying to induce a segfault by doing it again... + assert_(not arr[0].deleted) + + def test_mem_fromiter_invalid_dtype_string(self): + x = [1, 2, 3] + assert_raises(ValueError, + np.fromiter, [xi for xi in x], dtype='S') + + def test_reduce_big_object_array(self): + # Ticket #713 + oldsize = np.setbufsize(10*16) + a = np.array([None]*161, object) + assert_(not np.any(a)) + np.setbufsize(oldsize) + + def test_mem_0d_array_index(self): + # Ticket #714 + np.zeros(10)[np.array(0)] + + def test_nonnative_endian_fill(self): + # Non-native endian arrays were incorrectly filled with scalars + # before r5034. + if sys.byteorder == 'little': + dtype = np.dtype('>i4') + else: + dtype = np.dtype('= 3: + f = open(filename, 'rb') + xp = pickle.load(f, encoding='latin1') + f.close() + else: + f = open(filename) + xp = pickle.load(f) + f.close() + xpd = xp.astype(np.float64) + assert_((xp.__array_interface__['data'][0] != + xpd.__array_interface__['data'][0])) + + def test_compress_small_type(self): + # Ticket #789, changeset 5217. + # compress with out argument segfaulted if cannot cast safely + import numpy as np + a = np.array([[1, 2], [3, 4]]) + b = np.zeros((2, 1), dtype=np.single) + try: + a.compress([True, False], axis=1, out=b) + raise AssertionError("compress with an out which cannot be " + "safely casted should not return " + "successfully") + except TypeError: + pass + + def test_attributes(self): + # Ticket #791 + class TestArray(np.ndarray): + def __new__(cls, data, info): + result = np.array(data) + result = result.view(cls) + result.info = info + return result + + def __array_finalize__(self, obj): + self.info = getattr(obj, 'info', '') + + dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba') + assert_(dat.info == 'jubba') + dat.resize((4, 2)) + assert_(dat.info == 'jubba') + dat.sort() + assert_(dat.info == 'jubba') + dat.fill(2) + assert_(dat.info == 'jubba') + dat.put([2, 3, 4], [6, 3, 4]) + assert_(dat.info == 'jubba') + dat.setfield(4, np.int32, 0) + assert_(dat.info == 'jubba') + dat.setflags() + assert_(dat.info == 'jubba') + assert_(dat.all(1).info == 'jubba') + assert_(dat.any(1).info == 'jubba') + assert_(dat.argmax(1).info == 'jubba') + assert_(dat.argmin(1).info == 'jubba') + assert_(dat.argsort(1).info == 'jubba') + assert_(dat.astype(TestArray).info == 'jubba') + assert_(dat.byteswap().info == 'jubba') + assert_(dat.clip(2, 7).info == 'jubba') + assert_(dat.compress([0, 1, 1]).info == 'jubba') + assert_(dat.conj().info == 'jubba') + assert_(dat.conjugate().info == 'jubba') + assert_(dat.copy().info == 'jubba') + dat2 = TestArray([2, 3, 1, 0], 'jubba') + choices = [[0, 1, 2, 3], [10, 11, 12, 13], + [20, 21, 22, 23], [30, 31, 32, 33]] + assert_(dat2.choose(choices).info == 'jubba') + assert_(dat.cumprod(1).info == 'jubba') + assert_(dat.cumsum(1).info == 'jubba') + assert_(dat.diagonal().info == 'jubba') + assert_(dat.flatten().info == 'jubba') + assert_(dat.getfield(np.int32, 0).info == 'jubba') + assert_(dat.imag.info == 'jubba') + assert_(dat.max(1).info == 'jubba') + assert_(dat.mean(1).info == 'jubba') + assert_(dat.min(1).info == 'jubba') + assert_(dat.newbyteorder().info == 'jubba') + assert_(dat.prod(1).info == 'jubba') + assert_(dat.ptp(1).info == 'jubba') + assert_(dat.ravel().info == 'jubba') + assert_(dat.real.info == 'jubba') + assert_(dat.repeat(2).info == 'jubba') + assert_(dat.reshape((2, 4)).info == 'jubba') + assert_(dat.round().info == 'jubba') + assert_(dat.squeeze().info == 'jubba') + assert_(dat.std(1).info == 'jubba') + assert_(dat.sum(1).info == 'jubba') + assert_(dat.swapaxes(0, 1).info == 'jubba') + assert_(dat.take([2, 3, 5]).info == 'jubba') + assert_(dat.transpose().info == 'jubba') + assert_(dat.T.info == 'jubba') + assert_(dat.var(1).info == 'jubba') + assert_(dat.view(TestArray).info == 'jubba') + # These methods do not preserve subclasses + assert_(type(dat.nonzero()[0]) is np.ndarray) + assert_(type(dat.nonzero()[1]) is np.ndarray) + + def test_recarray_tolist(self): + # Ticket #793, changeset r5215 + # Comparisons fail for NaN, so we can't use random memory + # for the test. + buf = np.zeros(40, dtype=np.int8) + a = np.recarray(2, formats="i4,f8,f8", names="id,x,y", buf=buf) + b = a.tolist() + assert_( a[0].tolist() == b[0]) + assert_( a[1].tolist() == b[1]) + + def test_nonscalar_item_method(self): + # Make sure that .item() fails graciously when it should + a = np.arange(5) + assert_raises(ValueError, a.item) + + def test_char_array_creation(self): + a = np.array('123', dtype='c') + b = np.array([b'1', b'2', b'3']) + assert_equal(a, b) + + def test_unaligned_unicode_access(self): + # Ticket #825 + for i in range(1, 9): + msg = 'unicode offset: %d chars' % i + t = np.dtype([('a', 'S%d' % i), ('b', 'U2')]) + x = np.array([(b'a', u'b')], dtype=t) + if sys.version_info[0] >= 3: + assert_equal(str(x), "[(b'a', 'b')]", err_msg=msg) + else: + assert_equal(str(x), "[('a', u'b')]", err_msg=msg) + + def test_sign_for_complex_nan(self): + # Ticket 794. + with np.errstate(invalid='ignore'): + C = np.array([-np.inf, -2+1j, 0, 2-1j, np.inf, np.nan]) + have = np.sign(C) + want = np.array([-1+0j, -1+0j, 0+0j, 1+0j, 1+0j, np.nan]) + assert_equal(have, want) + + def test_for_equal_names(self): + # Ticket #674 + dt = np.dtype([('foo', float), ('bar', float)]) + a = np.zeros(10, dt) + b = list(a.dtype.names) + b[0] = "notfoo" + a.dtype.names = b + assert_(a.dtype.names[0] == "notfoo") + assert_(a.dtype.names[1] == "bar") + + def test_for_object_scalar_creation(self): + # Ticket #816 + a = np.object_() + b = np.object_(3) + b2 = np.object_(3.0) + c = np.object_([4, 5]) + d = np.object_([None, {}, []]) + assert_(a is None) + assert_(type(b) is int) + assert_(type(b2) is float) + assert_(type(c) is np.ndarray) + assert_(c.dtype == object) + assert_(d.dtype == object) + + def test_array_resize_method_system_error(self): + # Ticket #840 - order should be an invalid keyword. + x = np.array([[0, 1], [2, 3]]) + assert_raises(TypeError, x.resize, (2, 2), order='C') + + def test_for_zero_length_in_choose(self): + "Ticket #882" + a = np.array(1) + assert_raises(ValueError, lambda x: x.choose([]), a) + + def test_array_ndmin_overflow(self): + "Ticket #947." + assert_raises(ValueError, lambda: np.array([1], ndmin=33)) + + def test_void_scalar_with_titles(self): + # No ticket + data = [('john', 4), ('mary', 5)] + dtype1 = [(('source:yy', 'name'), 'O'), (('source:xx', 'id'), int)] + arr = np.array(data, dtype=dtype1) + assert_(arr[0][0] == 'john') + assert_(arr[0][1] == 4) + + def test_void_scalar_constructor(self): + #Issue #1550 + + #Create test string data, construct void scalar from data and assert + #that void scalar contains original data. + test_string = np.array("test") + test_string_void_scalar = np.core.multiarray.scalar( + np.dtype(("V", test_string.dtype.itemsize)), test_string.tobytes()) + + assert_(test_string_void_scalar.view(test_string.dtype) == test_string) + + #Create record scalar, construct from data and assert that + #reconstructed scalar is correct. + test_record = np.ones((), "i,i") + test_record_void_scalar = np.core.multiarray.scalar( + test_record.dtype, test_record.tobytes()) + + assert_(test_record_void_scalar == test_record) + + # Test pickle and unpickle of void and record scalars + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + assert_(pickle.loads( + pickle.dumps(test_string, protocol=proto)) == test_string) + assert_(pickle.loads( + pickle.dumps(test_record, protocol=proto)) == test_record) + + def test_blasdot_uninitialized_memory(self): + # Ticket #950 + for m in [0, 1, 2]: + for n in [0, 1, 2]: + for k in range(3): + # Try to ensure that x->data contains non-zero floats + x = np.array([123456789e199], dtype=np.float64) + if IS_PYPY: + x.resize((m, 0), refcheck=False) + else: + x.resize((m, 0)) + y = np.array([123456789e199], dtype=np.float64) + if IS_PYPY: + y.resize((0, n), refcheck=False) + else: + y.resize((0, n)) + + # `dot` should just return zero (m, n) matrix + z = np.dot(x, y) + assert_(np.all(z == 0)) + assert_(z.shape == (m, n)) + + def test_zeros(self): + # Regression test for #1061. + # Set a size which cannot fit into a 64 bits signed integer + sz = 2 ** 64 + with assert_raises_regex(ValueError, + 'Maximum allowed dimension exceeded'): + np.empty(sz) + + def test_huge_arange(self): + # Regression test for #1062. + # Set a size which cannot fit into a 64 bits signed integer + sz = 2 ** 64 + with assert_raises_regex(ValueError, + 'Maximum allowed size exceeded'): + np.arange(sz) + assert_(np.size == sz) + + def test_fromiter_bytes(self): + # Ticket #1058 + a = np.fromiter(list(range(10)), dtype='b') + b = np.fromiter(list(range(10)), dtype='B') + assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) + assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) + + def test_array_from_sequence_scalar_array(self): + # Ticket #1078: segfaults when creating an array with a sequence of + # 0d arrays. + a = np.array((np.ones(2), np.array(2))) + assert_equal(a.shape, (2,)) + assert_equal(a.dtype, np.dtype(object)) + assert_equal(a[0], np.ones(2)) + assert_equal(a[1], np.array(2)) + + a = np.array(((1,), np.array(1))) + assert_equal(a.shape, (2,)) + assert_equal(a.dtype, np.dtype(object)) + assert_equal(a[0], (1,)) + assert_equal(a[1], np.array(1)) + + def test_array_from_sequence_scalar_array2(self): + # Ticket #1081: weird array with strange input... + t = np.array([np.array([]), np.array(0, object)]) + assert_equal(t.shape, (2,)) + assert_equal(t.dtype, np.dtype(object)) + + def test_array_too_big(self): + # Ticket #1080. + assert_raises(ValueError, np.zeros, [975]*7, np.int8) + assert_raises(ValueError, np.zeros, [26244]*5, np.int8) + + def test_dtype_keyerrors_(self): + # Ticket #1106. + dt = np.dtype([('f1', np.uint)]) + assert_raises(KeyError, dt.__getitem__, "f2") + assert_raises(IndexError, dt.__getitem__, 1) + assert_raises(TypeError, dt.__getitem__, 0.0) + + def test_lexsort_buffer_length(self): + # Ticket #1217, don't segfault. + a = np.ones(100, dtype=np.int8) + b = np.ones(100, dtype=np.int32) + i = np.lexsort((a[::-1], b)) + assert_equal(i, np.arange(100, dtype=int)) + + def test_object_array_to_fixed_string(self): + # Ticket #1235. + a = np.array(['abcdefgh', 'ijklmnop'], dtype=np.object_) + b = np.array(a, dtype=(np.str_, 8)) + assert_equal(a, b) + c = np.array(a, dtype=(np.str_, 5)) + assert_equal(c, np.array(['abcde', 'ijklm'])) + d = np.array(a, dtype=(np.str_, 12)) + assert_equal(a, d) + e = np.empty((2, ), dtype=(np.str_, 8)) + e[:] = a[:] + assert_equal(a, e) + + def test_unicode_to_string_cast(self): + # Ticket #1240. + a = np.array([[u'abc', u'\u03a3'], + [u'asdf', u'erw']], + dtype='U') + assert_raises(UnicodeEncodeError, np.array, a, 'S4') + + def test_mixed_string_unicode_array_creation(self): + a = np.array(['1234', u'123']) + assert_(a.itemsize == 16) + a = np.array([u'123', '1234']) + assert_(a.itemsize == 16) + a = np.array(['1234', u'123', '12345']) + assert_(a.itemsize == 20) + a = np.array([u'123', '1234', u'12345']) + assert_(a.itemsize == 20) + a = np.array([u'123', '1234', u'1234']) + assert_(a.itemsize == 16) + + def test_misaligned_objects_segfault(self): + # Ticket #1198 and #1267 + a1 = np.zeros((10,), dtype='O,c') + a2 = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], 'S10') + a1['f0'] = a2 + repr(a1) + np.argmax(a1['f0']) + a1['f0'][1] = "FOO" + a1['f0'] = "FOO" + np.array(a1['f0'], dtype='S') + np.nonzero(a1['f0']) + a1.sort() + copy.deepcopy(a1) + + def test_misaligned_scalars_segfault(self): + # Ticket #1267 + s1 = np.array(('a', 'Foo'), dtype='c,O') + s2 = np.array(('b', 'Bar'), dtype='c,O') + s1['f1'] = s2['f1'] + s1['f1'] = 'Baz' + + def test_misaligned_dot_product_objects(self): + # Ticket #1267 + # This didn't require a fix, but it's worth testing anyway, because + # it may fail if .dot stops enforcing the arrays to be BEHAVED + a = np.array([[(1, 'a'), (0, 'a')], [(0, 'a'), (1, 'a')]], dtype='O,c') + b = np.array([[(4, 'a'), (1, 'a')], [(2, 'a'), (2, 'a')]], dtype='O,c') + np.dot(a['f0'], b['f0']) + + def test_byteswap_complex_scalar(self): + # Ticket #1259 and gh-441 + for dtype in [np.dtype('<'+t) for t in np.typecodes['Complex']]: + z = np.array([2.2-1.1j], dtype) + x = z[0] # always native-endian + y = x.byteswap() + if x.dtype.byteorder == z.dtype.byteorder: + # little-endian machine + assert_equal(x, np.frombuffer(y.tobytes(), dtype=dtype.newbyteorder())) + else: + # big-endian machine + assert_equal(x, np.frombuffer(y.tobytes(), dtype=dtype)) + # double check real and imaginary parts: + assert_equal(x.real, y.real.byteswap()) + assert_equal(x.imag, y.imag.byteswap()) + + def test_structured_arrays_with_objects1(self): + # Ticket #1299 + stra = 'aaaa' + strb = 'bbbb' + x = np.array([[(0, stra), (1, strb)]], 'i8,O') + x[x.nonzero()] = x.ravel()[:1] + assert_(x[0, 1] == x[0, 0]) + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_structured_arrays_with_objects2(self): + # Ticket #1299 second test + stra = 'aaaa' + strb = 'bbbb' + numb = sys.getrefcount(strb) + numa = sys.getrefcount(stra) + x = np.array([[(0, stra), (1, strb)]], 'i8,O') + x[x.nonzero()] = x.ravel()[:1] + assert_(sys.getrefcount(strb) == numb) + assert_(sys.getrefcount(stra) == numa + 2) + + def test_duplicate_title_and_name(self): + # Ticket #1254 + dtspec = [(('a', 'a'), 'i'), ('b', 'i')] + assert_raises(ValueError, np.dtype, dtspec) + + def test_signed_integer_division_overflow(self): + # Ticket #1317. + def test_type(t): + min = np.array([np.iinfo(t).min]) + min //= -1 + + with np.errstate(divide="ignore"): + for t in (np.int8, np.int16, np.int32, np.int64, int, np.long): + test_type(t) + + def test_buffer_hashlib(self): + try: + from hashlib import md5 + except ImportError: + from md5 import new as md5 + + x = np.array([1, 2, 3], dtype=np.dtype('c') + + def test_log1p_compiler_shenanigans(self): + # Check if log1p is behaving on 32 bit intel systems. + assert_(np.isfinite(np.log1p(np.exp2(-53)))) + + def test_fromiter_comparison(self): + a = np.fromiter(list(range(10)), dtype='b') + b = np.fromiter(list(range(10)), dtype='B') + assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) + assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) + + def test_fromstring_crash(self): + # Ticket #1345: the following should not cause a crash + np.fromstring(b'aa, aa, 1.0', sep=',') + + def test_ticket_1539(self): + dtypes = [x for x in np.typeDict.values() + if (issubclass(x, np.number) + and not issubclass(x, np.timedelta64))] + a = np.array([], np.bool_) # not x[0] because it is unordered + failures = [] + + for x in dtypes: + b = a.astype(x) + for y in dtypes: + c = a.astype(y) + try: + np.dot(b, c) + except TypeError: + failures.append((x, y)) + if failures: + raise AssertionError("Failures: %r" % failures) + + def test_ticket_1538(self): + x = np.finfo(np.float32) + for name in 'eps epsneg max min resolution tiny'.split(): + assert_equal(type(getattr(x, name)), np.float32, + err_msg=name) + + def test_ticket_1434(self): + # Check that the out= argument in var and std has an effect + data = np.array(((1, 2, 3), (4, 5, 6), (7, 8, 9))) + out = np.zeros((3,)) + + ret = data.var(axis=1, out=out) + assert_(ret is out) + assert_array_equal(ret, data.var(axis=1)) + + ret = data.std(axis=1, out=out) + assert_(ret is out) + assert_array_equal(ret, data.std(axis=1)) + + def test_complex_nan_maximum(self): + cnan = complex(0, np.nan) + assert_equal(np.maximum(1, cnan), cnan) + + def test_subclass_int_tuple_assignment(self): + # ticket #1563 + class Subclass(np.ndarray): + def __new__(cls, i): + return np.ones((i,)).view(cls) + + x = Subclass(5) + x[(0,)] = 2 # shouldn't raise an exception + assert_equal(x[0], 2) + + def test_ufunc_no_unnecessary_views(self): + # ticket #1548 + class Subclass(np.ndarray): + pass + x = np.array([1, 2, 3]).view(Subclass) + y = np.add(x, x, x) + assert_equal(id(x), id(y)) + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_take_refcount(self): + # ticket #939 + a = np.arange(16, dtype=float) + a.shape = (4, 4) + lut = np.ones((5 + 3, 4), float) + rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype) + c1 = sys.getrefcount(rgba) + try: + lut.take(a, axis=0, mode='clip', out=rgba) + except TypeError: + pass + c2 = sys.getrefcount(rgba) + assert_equal(c1, c2) + + def test_fromfile_tofile_seeks(self): + # On Python 3, tofile/fromfile used to get (#1610) the Python + # file handle out of sync + f0 = tempfile.NamedTemporaryFile() + f = f0.file + f.write(np.arange(255, dtype='u1').tobytes()) + + f.seek(20) + ret = np.fromfile(f, count=4, dtype='u1') + assert_equal(ret, np.array([20, 21, 22, 23], dtype='u1')) + assert_equal(f.tell(), 24) + + f.seek(40) + np.array([1, 2, 3], dtype='u1').tofile(f) + assert_equal(f.tell(), 43) + + f.seek(40) + data = f.read(3) + assert_equal(data, b"\x01\x02\x03") + + f.seek(80) + f.read(4) + data = np.fromfile(f, dtype='u1', count=4) + assert_equal(data, np.array([84, 85, 86, 87], dtype='u1')) + + f.close() + + def test_complex_scalar_warning(self): + for tp in [np.csingle, np.cdouble, np.clongdouble]: + x = tp(1+2j) + assert_warns(np.ComplexWarning, float, x) + with suppress_warnings() as sup: + sup.filter(np.ComplexWarning) + assert_equal(float(x), float(x.real)) + + def test_complex_scalar_complex_cast(self): + for tp in [np.csingle, np.cdouble, np.clongdouble]: + x = tp(1+2j) + assert_equal(complex(x), 1+2j) + + def test_complex_boolean_cast(self): + # Ticket #2218 + for tp in [np.csingle, np.cdouble, np.clongdouble]: + x = np.array([0, 0+0.5j, 0.5+0j], dtype=tp) + assert_equal(x.astype(bool), np.array([0, 1, 1], dtype=bool)) + assert_(np.any(x)) + assert_(np.all(x[1:])) + + def test_uint_int_conversion(self): + x = 2**64 - 1 + assert_equal(int(np.uint64(x)), x) + + def test_duplicate_field_names_assign(self): + ra = np.fromiter(((i*3, i*2) for i in range(10)), dtype='i8,f8') + ra.dtype.names = ('f1', 'f2') + repr(ra) # should not cause a segmentation fault + assert_raises(ValueError, setattr, ra.dtype, 'names', ('f1', 'f1')) + + def test_eq_string_and_object_array(self): + # From e-mail thread "__eq__ with str and object" (Keith Goodman) + a1 = np.array(['a', 'b'], dtype=object) + a2 = np.array(['a', 'c']) + assert_array_equal(a1 == a2, [True, False]) + assert_array_equal(a2 == a1, [True, False]) + + def test_nonzero_byteswap(self): + a = np.array([0x80000000, 0x00000080, 0], dtype=np.uint32) + a.dtype = np.float32 + assert_equal(a.nonzero()[0], [1]) + a = a.byteswap().newbyteorder() + assert_equal(a.nonzero()[0], [1]) # [0] if nonzero() ignores swap + + def test_find_common_type_boolean(self): + # Ticket #1695 + assert_(np.find_common_type([], ['?', '?']) == '?') + + def test_empty_mul(self): + a = np.array([1.]) + a[1:1] *= 2 + assert_equal(a, [1.]) + + def test_array_side_effect(self): + # The second use of itemsize was throwing an exception because in + # ctors.c, discover_itemsize was calling PyObject_Length without + # checking the return code. This failed to get the length of the + # number 2, and the exception hung around until something checked + # PyErr_Occurred() and returned an error. + assert_equal(np.dtype('S10').itemsize, 10) + np.array([['abc', 2], ['long ', '0123456789']], dtype=np.string_) + assert_equal(np.dtype('S10').itemsize, 10) + + def test_any_float(self): + # all and any for floats + a = np.array([0.1, 0.9]) + assert_(np.any(a)) + assert_(np.all(a)) + + def test_large_float_sum(self): + a = np.arange(10000, dtype='f') + assert_equal(a.sum(dtype='d'), a.astype('d').sum()) + + def test_ufunc_casting_out(self): + a = np.array(1.0, dtype=np.float32) + b = np.array(1.0, dtype=np.float64) + c = np.array(1.0, dtype=np.float32) + np.add(a, b, out=c) + assert_equal(c, 2.0) + + def test_array_scalar_contiguous(self): + # Array scalars are both C and Fortran contiguous + assert_(np.array(1.0).flags.c_contiguous) + assert_(np.array(1.0).flags.f_contiguous) + assert_(np.array(np.float32(1.0)).flags.c_contiguous) + assert_(np.array(np.float32(1.0)).flags.f_contiguous) + + def test_squeeze_contiguous(self): + # Similar to GitHub issue #387 + a = np.zeros((1, 2)).squeeze() + b = np.zeros((2, 2, 2), order='F')[:, :, ::2].squeeze() + assert_(a.flags.c_contiguous) + assert_(a.flags.f_contiguous) + assert_(b.flags.f_contiguous) + + def test_squeeze_axis_handling(self): + # Issue #10779 + # Ensure proper handling of objects + # that don't support axis specification + # when squeezing + + class OldSqueeze(np.ndarray): + + def __new__(cls, + input_array): + obj = np.asarray(input_array).view(cls) + return obj + + # it is perfectly reasonable that prior + # to numpy version 1.7.0 a subclass of ndarray + # might have been created that did not expect + # squeeze to have an axis argument + # NOTE: this example is somewhat artificial; + # it is designed to simulate an old API + # expectation to guard against regression + def squeeze(self): + return super(OldSqueeze, self).squeeze() + + oldsqueeze = OldSqueeze(np.array([[1],[2],[3]])) + + # if no axis argument is specified the old API + # expectation should give the correct result + assert_equal(np.squeeze(oldsqueeze), + np.array([1,2,3])) + + # likewise, axis=None should work perfectly well + # with the old API expectation + assert_equal(np.squeeze(oldsqueeze, axis=None), + np.array([1,2,3])) + + # however, specification of any particular axis + # should raise a TypeError in the context of the + # old API specification, even when using a valid + # axis specification like 1 for this array + with assert_raises(TypeError): + # this would silently succeed for array + # subclasses / objects that did not support + # squeeze axis argument handling before fixing + # Issue #10779 + np.squeeze(oldsqueeze, axis=1) + + # check for the same behavior when using an invalid + # axis specification -- in this case axis=0 does not + # have size 1, but the priority should be to raise + # a TypeError for the axis argument and NOT a + # ValueError for squeezing a non-empty dimension + with assert_raises(TypeError): + np.squeeze(oldsqueeze, axis=0) + + # the new API knows how to handle the axis + # argument and will return a ValueError if + # attempting to squeeze an axis that is not + # of length 1 + with assert_raises(ValueError): + np.squeeze(np.array([[1],[2],[3]]), axis=0) + + def test_reduce_contiguous(self): + # GitHub issue #387 + a = np.add.reduce(np.zeros((2, 1, 2)), (0, 1)) + b = np.add.reduce(np.zeros((2, 1, 2)), 1) + assert_(a.flags.c_contiguous) + assert_(a.flags.f_contiguous) + assert_(b.flags.c_contiguous) + + def test_object_array_self_reference(self): + # Object arrays with references to themselves can cause problems + a = np.array(0, dtype=object) + a[()] = a + assert_raises(RecursionError, int, a) + assert_raises(RecursionError, long, a) + assert_raises(RecursionError, float, a) + if sys.version_info.major == 2: + # in python 3, this falls back on operator.index, which fails on + # on dtype=object + assert_raises(RecursionError, oct, a) + assert_raises(RecursionError, hex, a) + a[()] = None + + def test_object_array_circular_reference(self): + # Test the same for a circular reference. + a = np.array(0, dtype=object) + b = np.array(0, dtype=object) + a[()] = b + b[()] = a + assert_raises(RecursionError, int, a) + # NumPy has no tp_traverse currently, so circular references + # cannot be detected. So resolve it: + a[()] = None + + # This was causing a to become like the above + a = np.array(0, dtype=object) + a[...] += 1 + assert_equal(a, 1) + + def test_object_array_nested(self): + # but is fine with a reference to a different array + a = np.array(0, dtype=object) + b = np.array(0, dtype=object) + a[()] = b + assert_equal(int(a), int(0)) + assert_equal(long(a), long(0)) + assert_equal(float(a), float(0)) + if sys.version_info.major == 2: + # in python 3, this falls back on operator.index, which fails on + # on dtype=object + assert_equal(oct(a), oct(0)) + assert_equal(hex(a), hex(0)) + + def test_object_array_self_copy(self): + # An object array being copied into itself DECREF'ed before INCREF'ing + # causing segmentation faults (gh-3787) + a = np.array(object(), dtype=object) + np.copyto(a, a) + if HAS_REFCOUNT: + assert_(sys.getrefcount(a[()]) == 2) + a[()].__class__ # will segfault if object was deleted + + def test_zerosize_accumulate(self): + "Ticket #1733" + x = np.array([[42, 0]], dtype=np.uint32) + assert_equal(np.add.accumulate(x[:-1, 0]), []) + + def test_objectarray_setfield(self): + # Setfield should not overwrite Object fields with non-Object data + x = np.array([1, 2, 3], dtype=object) + assert_raises(TypeError, x.setfield, 4, np.int32, 0) + + def test_setting_rank0_string(self): + "Ticket #1736" + s1 = b"hello1" + s2 = b"hello2" + a = np.zeros((), dtype="S10") + a[()] = s1 + assert_equal(a, np.array(s1)) + a[()] = np.array(s2) + assert_equal(a, np.array(s2)) + + a = np.zeros((), dtype='f4') + a[()] = 3 + assert_equal(a, np.array(3)) + a[()] = np.array(4) + assert_equal(a, np.array(4)) + + def test_string_astype(self): + "Ticket #1748" + s1 = b'black' + s2 = b'white' + s3 = b'other' + a = np.array([[s1], [s2], [s3]]) + assert_equal(a.dtype, np.dtype('S5')) + b = a.astype(np.dtype('S0')) + assert_equal(b.dtype, np.dtype('S5')) + + def test_ticket_1756(self): + # Ticket #1756 + s = b'0123456789abcdef' + a = np.array([s]*5) + for i in range(1, 17): + a1 = np.array(a, "|S%d" % i) + a2 = np.array([s[:i]]*5) + assert_equal(a1, a2) + + def test_fields_strides(self): + "gh-2355" + r = np.frombuffer(b'abcdefghijklmnop'*4*3, dtype='i4,(2,3)u2') + assert_equal(r[0:3:2]['f1'], r['f1'][0:3:2]) + assert_equal(r[0:3:2]['f1'][0], r[0:3:2][0]['f1']) + assert_equal(r[0:3:2]['f1'][0][()], r[0:3:2][0]['f1'][()]) + assert_equal(r[0:3:2]['f1'][0].strides, r[0:3:2][0]['f1'].strides) + + def test_alignment_update(self): + # Check that alignment flag is updated on stride setting + a = np.arange(10) + assert_(a.flags.aligned) + a.strides = 3 + assert_(not a.flags.aligned) + + def test_ticket_1770(self): + "Should not segfault on python 3k" + import numpy as np + try: + a = np.zeros((1,), dtype=[('f1', 'f')]) + a['f1'] = 1 + a['f2'] = 1 + except ValueError: + pass + except Exception: + raise AssertionError + + def test_ticket_1608(self): + "x.flat shouldn't modify data" + x = np.array([[1, 2], [3, 4]]).T + np.array(x.flat) + assert_equal(x, [[1, 3], [2, 4]]) + + def test_pickle_string_overwrite(self): + import re + + data = np.array([1], dtype='b') + blob = pickle.dumps(data, protocol=1) + data = pickle.loads(blob) + + # Check that loads does not clobber interned strings + s = re.sub("a(.)", "\x01\\1", "a_") + assert_equal(s[0], "\x01") + data[0] = 0xbb + s = re.sub("a(.)", "\x01\\1", "a_") + assert_equal(s[0], "\x01") + + def test_pickle_bytes_overwrite(self): + if sys.version_info[0] >= 3: + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + data = np.array([1], dtype='b') + data = pickle.loads(pickle.dumps(data, protocol=proto)) + data[0] = 0xdd + bytestring = "\x01 ".encode('ascii') + assert_equal(bytestring[0:1], '\x01'.encode('ascii')) + + def test_pickle_py2_array_latin1_hack(self): + # Check that unpickling hacks in Py3 that support + # encoding='latin1' work correctly. + + # Python2 output for pickle.dumps(numpy.array([129], dtype='b')) + data = (b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\n" + b"tp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'i1'\np8\n" + b"I0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nNNNI-1\nI-1\nI0\ntp12\nbI00\nS'\\x81'\n" + b"p13\ntp14\nb.") + if sys.version_info[0] >= 3: + # This should work: + result = pickle.loads(data, encoding='latin1') + assert_array_equal(result, np.array([129], dtype='b')) + # Should not segfault: + assert_raises(Exception, pickle.loads, data, encoding='koi8-r') + + def test_pickle_py2_scalar_latin1_hack(self): + # Check that scalar unpickling hack in Py3 that supports + # encoding='latin1' work correctly. + + # Python2 output for pickle.dumps(...) + datas = [ + # (original, python2_pickle, koi8r_validity) + (np.unicode_('\u6bd2'), + (b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n" + b"(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\nI0\n" + b"tp6\nbS'\\xd2k\\x00\\x00'\np7\ntp8\nRp9\n."), + 'invalid'), + + (np.float64(9e123), + (b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'f8'\n" + b"p2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI-1\nI-1\nI0\ntp6\n" + b"bS'O\\x81\\xb7Z\\xaa:\\xabY'\np7\ntp8\nRp9\n."), + 'invalid'), + + (np.bytes_(b'\x9c'), # different 8-bit code point in KOI8-R vs latin1 + (b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'S1'\np2\n" + b"I0\nI1\ntp3\nRp4\n(I3\nS'|'\np5\nNNNI1\nI1\nI0\ntp6\nbS'\\x9c'\np7\n" + b"tp8\nRp9\n."), + 'different'), + ] + if sys.version_info[0] >= 3: + for original, data, koi8r_validity in datas: + result = pickle.loads(data, encoding='latin1') + assert_equal(result, original) + + # Decoding under non-latin1 encoding (e.g.) KOI8-R can + # produce bad results, but should not segfault. + if koi8r_validity == 'different': + # Unicode code points happen to lie within latin1, + # but are different in koi8-r, resulting to silent + # bogus results + result = pickle.loads(data, encoding='koi8-r') + assert_(result != original) + elif koi8r_validity == 'invalid': + # Unicode code points outside latin1, so results + # to an encoding exception + assert_raises(ValueError, pickle.loads, data, encoding='koi8-r') + else: + raise ValueError(koi8r_validity) + + def test_structured_type_to_object(self): + a_rec = np.array([(0, 1), (3, 2)], dtype='i4,i8') + a_obj = np.empty((2,), dtype=object) + a_obj[0] = (0, 1) + a_obj[1] = (3, 2) + # astype records -> object + assert_equal(a_rec.astype(object), a_obj) + # '=' records -> object + b = np.empty_like(a_obj) + b[...] = a_rec + assert_equal(b, a_obj) + # '=' object -> records + b = np.empty_like(a_rec) + b[...] = a_obj + assert_equal(b, a_rec) + + def test_assign_obj_listoflists(self): + # Ticket # 1870 + # The inner list should get assigned to the object elements + a = np.zeros(4, dtype=object) + b = a.copy() + a[0] = [1] + a[1] = [2] + a[2] = [3] + a[3] = [4] + b[...] = [[1], [2], [3], [4]] + assert_equal(a, b) + # The first dimension should get broadcast + a = np.zeros((2, 2), dtype=object) + a[...] = [[1, 2]] + assert_equal(a, [[1, 2], [1, 2]]) + + def test_memoryleak(self): + # Ticket #1917 - ensure that array data doesn't leak + for i in range(1000): + # 100MB times 1000 would give 100GB of memory usage if it leaks + a = np.empty((100000000,), dtype='i1') + del a + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_ufunc_reduce_memoryleak(self): + a = np.arange(6) + acnt = sys.getrefcount(a) + np.add.reduce(a) + assert_equal(sys.getrefcount(a), acnt) + + def test_search_sorted_invalid_arguments(self): + # Ticket #2021, should not segfault. + x = np.arange(0, 4, dtype='datetime64[D]') + assert_raises(TypeError, x.searchsorted, 1) + + def test_string_truncation(self): + # Ticket #1990 - Data can be truncated in creation of an array from a + # mixed sequence of numeric values and strings + for val in [True, 1234, 123.4, complex(1, 234)]: + for tostr in [asunicode, asbytes]: + b = np.array([val, tostr('xx')]) + assert_equal(tostr(b[0]), tostr(val)) + b = np.array([tostr('xx'), val]) + assert_equal(tostr(b[1]), tostr(val)) + + # test also with longer strings + b = np.array([val, tostr('xxxxxxxxxx')]) + assert_equal(tostr(b[0]), tostr(val)) + b = np.array([tostr('xxxxxxxxxx'), val]) + assert_equal(tostr(b[1]), tostr(val)) + + def test_string_truncation_ucs2(self): + # Ticket #2081. Python compiled with two byte unicode + # can lead to truncation if itemsize is not properly + # adjusted for NumPy's four byte unicode. + if sys.version_info[0] >= 3: + a = np.array(['abcd']) + else: + a = np.array([u'abcd']) + assert_equal(a.dtype.itemsize, 16) + + def test_unique_stable(self): + # Ticket #2063 must always choose stable sort for argsort to + # get consistent results + v = np.array(([0]*5 + [1]*6 + [2]*6)*4) + res = np.unique(v, return_index=True) + tgt = (np.array([0, 1, 2]), np.array([ 0, 5, 11])) + assert_equal(res, tgt) + + def test_unicode_alloc_dealloc_match(self): + # Ticket #1578, the mismatch only showed up when running + # python-debug for python versions >= 2.7, and then as + # a core dump and error message. + a = np.array(['abc'], dtype=np.unicode)[0] + del a + + def test_refcount_error_in_clip(self): + # Ticket #1588 + a = np.zeros((2,), dtype='>i2').clip(min=0) + x = a + a + # This used to segfault: + y = str(x) + # Check the final string: + assert_(y == "[0 0]") + + def test_searchsorted_wrong_dtype(self): + # Ticket #2189, it used to segfault, so we check that it raises the + # proper exception. + a = np.array([('a', 1)], dtype='S1, int') + assert_raises(TypeError, np.searchsorted, a, 1.2) + # Ticket #2066, similar problem: + dtype = np.format_parser(['i4', 'i4'], [], []) + a = np.recarray((2, ), dtype) + assert_raises(TypeError, np.searchsorted, a, 1) + + def test_complex64_alignment(self): + # Issue gh-2668 (trac 2076), segfault on sparc due to misalignment + dtt = np.complex64 + arr = np.arange(10, dtype=dtt) + # 2D array + arr2 = np.reshape(arr, (2, 5)) + # Fortran write followed by (C or F) read caused bus error + data_str = arr2.tobytes('F') + data_back = np.ndarray(arr2.shape, + arr2.dtype, + buffer=data_str, + order='F') + assert_array_equal(arr2, data_back) + + def test_structured_count_nonzero(self): + arr = np.array([0, 1]).astype('i4, (2)i4')[:1] + count = np.count_nonzero(arr) + assert_equal(count, 0) + + def test_copymodule_preserves_f_contiguity(self): + a = np.empty((2, 2), order='F') + b = copy.copy(a) + c = copy.deepcopy(a) + assert_(b.flags.fortran) + assert_(b.flags.f_contiguous) + assert_(c.flags.fortran) + assert_(c.flags.f_contiguous) + + def test_fortran_order_buffer(self): + import numpy as np + a = np.array([['Hello', 'Foob']], dtype='U5', order='F') + arr = np.ndarray(shape=[1, 2, 5], dtype='U1', buffer=a) + arr2 = np.array([[[u'H', u'e', u'l', u'l', u'o'], + [u'F', u'o', u'o', u'b', u'']]]) + assert_array_equal(arr, arr2) + + def test_assign_from_sequence_error(self): + # Ticket #4024. + arr = np.array([1, 2, 3]) + assert_raises(ValueError, arr.__setitem__, slice(None), [9, 9]) + arr.__setitem__(slice(None), [9]) + assert_equal(arr, [9, 9, 9]) + + def test_format_on_flex_array_element(self): + # Ticket #4369. + dt = np.dtype([('date', '= 3: + assert_raises(TypeError, f, lhs, rhs) + elif not sys.py3kwarning: + # With -3 switch in python 2, DeprecationWarning is raised + # which we are not interested in + f(lhs, rhs) + assert_(not op.eq(lhs, rhs)) + assert_(op.ne(lhs, rhs)) + + def test_richcompare_scalar_and_subclass(self): + # gh-4709 + class Foo(np.ndarray): + def __eq__(self, other): + return "OK" + + x = np.array([1, 2, 3]).view(Foo) + assert_equal(10 == x, "OK") + assert_equal(np.int32(10) == x, "OK") + assert_equal(np.array([10]) == x, "OK") + + def test_pickle_empty_string(self): + # gh-3926 + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + test_string = np.string_('') + assert_equal(pickle.loads( + pickle.dumps(test_string, protocol=proto)), test_string) + + def test_frompyfunc_many_args(self): + # gh-5672 + + def passer(*args): + pass + + assert_raises(ValueError, np.frompyfunc, passer, 32, 1) + + def test_repeat_broadcasting(self): + # gh-5743 + a = np.arange(60).reshape(3, 4, 5) + for axis in chain(range(-a.ndim, a.ndim), [None]): + assert_equal(a.repeat(2, axis=axis), a.repeat([2], axis=axis)) + + def test_frompyfunc_nout_0(self): + # gh-2014 + + def f(x): + x[0], x[-1] = x[-1], x[0] + + uf = np.frompyfunc(f, 1, 0) + a = np.array([[1, 2, 3], [4, 5], [6, 7, 8, 9]]) + assert_equal(uf(a), ()) + assert_array_equal(a, [[3, 2, 1], [5, 4], [9, 7, 8, 6]]) + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_leak_in_structured_dtype_comparison(self): + # gh-6250 + recordtype = np.dtype([('a', np.float64), + ('b', np.int32), + ('d', (str, 5))]) + + # Simple case + a = np.zeros(2, dtype=recordtype) + for i in range(100): + a == a + assert_(sys.getrefcount(a) < 10) + + # The case in the bug report. + before = sys.getrefcount(a) + u, v = a[0], a[1] + u == v + del u, v + gc.collect() + after = sys.getrefcount(a) + assert_equal(before, after) + + def test_empty_percentile(self): + # gh-6530 / gh-6553 + assert_array_equal(np.percentile(np.arange(10), []), np.array([])) + + def test_void_compare_segfault(self): + # gh-6922. The following should not segfault + a = np.ones(3, dtype=[('object', 'O'), ('int', ' 0: + # unpickling ndarray goes through _frombuffer for protocol 5 + assert b'numpy.core.numeric' in s + else: + assert b'numpy.core.multiarray' in s + + def test_object_casting_errors(self): + # gh-11993 + arr = np.array(['AAAAA', 18465886.0, 18465886.0], dtype=object) + assert_raises(TypeError, arr.astype, 'c8') + + def test_eff1d_casting(self): + # gh-12711 + x = np.array([1, 2, 4, 7, 0], dtype=np.int16) + res = np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99])) + assert_equal(res, [-99, 1, 2, 3, -7, 88, 99]) + assert_raises(ValueError, np.ediff1d, x, to_begin=(1<<20)) + assert_raises(ValueError, np.ediff1d, x, to_end=(1<<20)) + + def test_pickle_datetime64_array(self): + # gh-12745 (would fail with pickle5 installed) + d = np.datetime64('2015-07-04 12:59:59.50', 'ns') + arr = np.array([d]) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + dumped = pickle.dumps(arr, protocol=proto) + assert_equal(pickle.loads(dumped), arr) diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_regression.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_regression.pyc new file mode 100644 index 0000000..38ee5dd Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_regression.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalar_ctors.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalar_ctors.py new file mode 100644 index 0000000..b21bc9d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalar_ctors.py @@ -0,0 +1,65 @@ +""" +Test the scalar constructors, which also do type-coercion +""" +from __future__ import division, absolute_import, print_function + +import sys +import platform +import pytest + +import numpy as np +from numpy.testing import ( + assert_equal, assert_almost_equal, assert_raises, assert_warns, + ) + +class TestFromString(object): + def test_floating(self): + # Ticket #640, floats from string + fsingle = np.single('1.234') + fdouble = np.double('1.234') + flongdouble = np.longdouble('1.234') + assert_almost_equal(fsingle, 1.234) + assert_almost_equal(fdouble, 1.234) + assert_almost_equal(flongdouble, 1.234) + + def test_floating_overflow(self): + """ Strings containing an unrepresentable float overflow """ + fhalf = np.half('1e10000') + assert_equal(fhalf, np.inf) + fsingle = np.single('1e10000') + assert_equal(fsingle, np.inf) + fdouble = np.double('1e10000') + assert_equal(fdouble, np.inf) + flongdouble = assert_warns(RuntimeWarning, np.longdouble, '1e10000') + assert_equal(flongdouble, np.inf) + + fhalf = np.half('-1e10000') + assert_equal(fhalf, -np.inf) + fsingle = np.single('-1e10000') + assert_equal(fsingle, -np.inf) + fdouble = np.double('-1e10000') + assert_equal(fdouble, -np.inf) + flongdouble = assert_warns(RuntimeWarning, np.longdouble, '-1e10000') + assert_equal(flongdouble, -np.inf) + + @pytest.mark.skipif((sys.version_info[0] >= 3) + or (sys.platform == "win32" + and platform.architecture()[0] == "64bit"), + reason="numpy.intp('0xff', 16) not supported on Py3 " + "or 64 bit Windows") + def test_intp(self): + # Ticket #99 + i_width = np.int_(0).nbytes*2 - 1 + np.intp('0x' + 'f'*i_width, 16) + assert_raises(OverflowError, np.intp, '0x' + 'f'*(i_width+1), 16) + assert_raises(ValueError, np.intp, '0x1', 32) + assert_equal(255, np.intp('0xFF', 16)) + + +class TestFromInt(object): + def test_intp(self): + # Ticket #99 + assert_equal(1024, np.intp(1024)) + + def test_uint64_from_negative(self): + assert_equal(np.uint64(-2), np.uint64(18446744073709551614)) diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalar_ctors.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalar_ctors.pyc new file mode 100644 index 0000000..f48f1c8 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalar_ctors.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarbuffer.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarbuffer.py new file mode 100644 index 0000000..cd520d9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarbuffer.py @@ -0,0 +1,105 @@ +""" +Test scalar buffer interface adheres to PEP 3118 +""" +import sys +import numpy as np +import pytest + +from numpy.testing import assert_, assert_equal, assert_raises + +# PEP3118 format strings for native (standard alignment and byteorder) types +scalars_and_codes = [ + (np.bool_, '?'), + (np.byte, 'b'), + (np.short, 'h'), + (np.intc, 'i'), + (np.int_, 'l'), + (np.longlong, 'q'), + (np.ubyte, 'B'), + (np.ushort, 'H'), + (np.uintc, 'I'), + (np.uint, 'L'), + (np.ulonglong, 'Q'), + (np.half, 'e'), + (np.single, 'f'), + (np.double, 'd'), + (np.longdouble, 'g'), + (np.csingle, 'Zf'), + (np.cdouble, 'Zd'), + (np.clongdouble, 'Zg'), +] +scalars_only, codes_only = zip(*scalars_and_codes) + + +@pytest.mark.skipif(sys.version_info.major < 3, + reason="Python 2 scalars lack a buffer interface") +class TestScalarPEP3118(object): + + @pytest.mark.parametrize('scalar', scalars_only, ids=codes_only) + def test_scalar_match_array(self, scalar): + x = scalar() + a = np.array([], dtype=np.dtype(scalar)) + mv_x = memoryview(x) + mv_a = memoryview(a) + assert_equal(mv_x.format, mv_a.format) + + @pytest.mark.parametrize('scalar', scalars_only, ids=codes_only) + def test_scalar_dim(self, scalar): + x = scalar() + mv_x = memoryview(x) + assert_equal(mv_x.itemsize, np.dtype(scalar).itemsize) + assert_equal(mv_x.ndim, 0) + assert_equal(mv_x.shape, ()) + assert_equal(mv_x.strides, ()) + assert_equal(mv_x.suboffsets, ()) + + @pytest.mark.parametrize('scalar, code', scalars_and_codes, ids=codes_only) + def test_scalar_known_code(self, scalar, code): + x = scalar() + mv_x = memoryview(x) + assert_equal(mv_x.format, code) + + def test_void_scalar_structured_data(self): + dt = np.dtype([('name', np.unicode_, 16), ('grades', np.float64, (2,))]) + x = np.array(('ndarray_scalar', (1.2, 3.0)), dtype=dt)[()] + assert_(isinstance(x, np.void)) + mv_x = memoryview(x) + expected_size = 16 * np.dtype((np.unicode_, 1)).itemsize + expected_size += 2 * np.dtype((np.float64, 1)).itemsize + assert_equal(mv_x.itemsize, expected_size) + assert_equal(mv_x.ndim, 0) + assert_equal(mv_x.shape, ()) + assert_equal(mv_x.strides, ()) + assert_equal(mv_x.suboffsets, ()) + + # check scalar format string against ndarray format string + a = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt) + assert_(isinstance(a, np.ndarray)) + mv_a = memoryview(a) + assert_equal(mv_x.itemsize, mv_a.itemsize) + assert_equal(mv_x.format, mv_a.format) + + def test_datetime_memoryview(self): + # gh-11656 + # Values verified with v1.13.3, shape is not () as in test_scalar_dim + def as_dict(m): + return dict(strides=m.strides, shape=m.shape, itemsize=m.itemsize, + ndim=m.ndim, format=m.format) + + dt1 = np.datetime64('2016-01-01') + dt2 = np.datetime64('2017-01-01') + expected = {'strides': (1,), 'itemsize': 1, 'ndim': 1, + 'shape': (8,), 'format': 'B'} + v = memoryview(dt1) + res = as_dict(v) + assert_equal(res, expected) + + v = memoryview(dt2 - dt1) + res = as_dict(v) + assert_equal(res, expected) + + dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')]) + a = np.empty(1, dt) + # Fails to create a PEP 3118 valid buffer + assert_raises((ValueError, BufferError), memoryview, a[0]) + diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarbuffer.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarbuffer.pyc new file mode 100644 index 0000000..78a56ce Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarbuffer.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarinherit.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarinherit.py new file mode 100644 index 0000000..9e32cf6 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarinherit.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- +""" Test printing of scalar types. + +""" +from __future__ import division, absolute_import, print_function + +import numpy as np +from numpy.testing import assert_ + + +class A(object): + pass +class B(A, np.float64): + pass + +class C(B): + pass +class D(C, B): + pass + +class B0(np.float64, A): + pass +class C0(B0): + pass + +class TestInherit(object): + def test_init(self): + x = B(1.0) + assert_(str(x) == '1.0') + y = C(2.0) + assert_(str(y) == '2.0') + z = D(3.0) + assert_(str(z) == '3.0') + + def test_init2(self): + x = B0(1.0) + assert_(str(x) == '1.0') + y = C0(2.0) + assert_(str(y) == '2.0') + + +class TestCharacter(object): + def test_char_radd(self): + # GH issue 9620, reached gentype_add and raise TypeError + np_s = np.string_('abc') + np_u = np.unicode_('abc') + s = b'def' + u = u'def' + assert_(np_s.__radd__(np_s) is NotImplemented) + assert_(np_s.__radd__(np_u) is NotImplemented) + assert_(np_s.__radd__(s) is NotImplemented) + assert_(np_s.__radd__(u) is NotImplemented) + assert_(np_u.__radd__(np_s) is NotImplemented) + assert_(np_u.__radd__(np_u) is NotImplemented) + assert_(np_u.__radd__(s) is NotImplemented) + assert_(np_u.__radd__(u) is NotImplemented) + assert_(s + np_s == b'defabc') + assert_(u + np_u == u'defabc') + + + class Mystr(str, np.generic): + # would segfault + pass + + ret = s + Mystr('abc') + assert_(type(ret) is type(s)) + + def test_char_repeat(self): + np_s = np.string_('abc') + np_u = np.unicode_('abc') + np_i = np.int(5) + res_s = b'abc' * 5 + res_u = u'abc' * 5 + assert_(np_s * np_i == res_s) + assert_(np_u * np_i == res_u) diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarinherit.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarinherit.pyc new file mode 100644 index 0000000..eaade3e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarinherit.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarmath.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarmath.py new file mode 100644 index 0000000..51bcf2b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarmath.py @@ -0,0 +1,666 @@ +from __future__ import division, absolute_import, print_function + +import sys +import warnings +import itertools +import operator +import platform +import pytest + +import numpy as np +from numpy.testing import ( + assert_, assert_equal, assert_raises, assert_almost_equal, + assert_array_equal, IS_PYPY, suppress_warnings, _gen_alignment_data, + assert_warns + ) + +types = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc, + np.int_, np.uint, np.longlong, np.ulonglong, + np.single, np.double, np.longdouble, np.csingle, + np.cdouble, np.clongdouble] + +floating_types = np.floating.__subclasses__() +complex_floating_types = np.complexfloating.__subclasses__() + + +# This compares scalarmath against ufuncs. + +class TestTypes(object): + def test_types(self): + for atype in types: + a = atype(1) + assert_(a == 1, "error with %r: got %r" % (atype, a)) + + def test_type_add(self): + # list of types + for k, atype in enumerate(types): + a_scalar = atype(3) + a_array = np.array([3], dtype=atype) + for l, btype in enumerate(types): + b_scalar = btype(1) + b_array = np.array([1], dtype=btype) + c_scalar = a_scalar + b_scalar + c_array = a_array + b_array + # It was comparing the type numbers, but the new ufunc + # function-finding mechanism finds the lowest function + # to which both inputs can be cast - which produces 'l' + # when you do 'q' + 'b'. The old function finding mechanism + # skipped ahead based on the first argument, but that + # does not produce properly symmetric results... + assert_equal(c_scalar.dtype, c_array.dtype, + "error with types (%d/'%c' + %d/'%c')" % + (k, np.dtype(atype).char, l, np.dtype(btype).char)) + + def test_type_create(self): + for k, atype in enumerate(types): + a = np.array([1, 2, 3], atype) + b = atype([1, 2, 3]) + assert_equal(a, b) + + def test_leak(self): + # test leak of scalar objects + # a leak would show up in valgrind as still-reachable of ~2.6MB + for i in range(200000): + np.add(1, 1) + + +class TestBaseMath(object): + def test_blocked(self): + # test alignments offsets for simd instructions + # alignments for vz + 2 * (vs - 1) + 1 + for dt, sz in [(np.float32, 11), (np.float64, 7), (np.int32, 11)]: + for out, inp1, inp2, msg in _gen_alignment_data(dtype=dt, + type='binary', + max_size=sz): + exp1 = np.ones_like(inp1) + inp1[...] = np.ones_like(inp1) + inp2[...] = np.zeros_like(inp2) + assert_almost_equal(np.add(inp1, inp2), exp1, err_msg=msg) + assert_almost_equal(np.add(inp1, 2), exp1 + 2, err_msg=msg) + assert_almost_equal(np.add(1, inp2), exp1, err_msg=msg) + + np.add(inp1, inp2, out=out) + assert_almost_equal(out, exp1, err_msg=msg) + + inp2[...] += np.arange(inp2.size, dtype=dt) + 1 + assert_almost_equal(np.square(inp2), + np.multiply(inp2, inp2), err_msg=msg) + # skip true divide for ints + if dt != np.int32 or (sys.version_info.major < 3 and not sys.py3kwarning): + assert_almost_equal(np.reciprocal(inp2), + np.divide(1, inp2), err_msg=msg) + + inp1[...] = np.ones_like(inp1) + np.add(inp1, 2, out=out) + assert_almost_equal(out, exp1 + 2, err_msg=msg) + inp2[...] = np.ones_like(inp2) + np.add(2, inp2, out=out) + assert_almost_equal(out, exp1 + 2, err_msg=msg) + + def test_lower_align(self): + # check data that is not aligned to element size + # i.e doubles are aligned to 4 bytes on i386 + d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) + o = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) + assert_almost_equal(d + d, d * 2) + np.add(d, d, out=o) + np.add(np.ones_like(d), d, out=o) + np.add(d, np.ones_like(d), out=o) + np.add(np.ones_like(d), d) + np.add(d, np.ones_like(d)) + + +class TestPower(object): + def test_small_types(self): + for t in [np.int8, np.int16, np.float16]: + a = t(3) + b = a ** 4 + assert_(b == 81, "error with %r: got %r" % (t, b)) + + def test_large_types(self): + for t in [np.int32, np.int64, np.float32, np.float64, np.longdouble]: + a = t(51) + b = a ** 4 + msg = "error with %r: got %r" % (t, b) + if np.issubdtype(t, np.integer): + assert_(b == 6765201, msg) + else: + assert_almost_equal(b, 6765201, err_msg=msg) + + def test_integers_to_negative_integer_power(self): + # Note that the combination of uint64 with a signed integer + # has common type np.float64. The other combinations should all + # raise a ValueError for integer ** negative integer. + exp = [np.array(-1, dt)[()] for dt in 'bhilq'] + + # 1 ** -1 possible special case + base = [np.array(1, dt)[()] for dt in 'bhilqBHILQ'] + for i1, i2 in itertools.product(base, exp): + if i1.dtype != np.uint64: + assert_raises(ValueError, operator.pow, i1, i2) + else: + res = operator.pow(i1, i2) + assert_(res.dtype.type is np.float64) + assert_almost_equal(res, 1.) + + # -1 ** -1 possible special case + base = [np.array(-1, dt)[()] for dt in 'bhilq'] + for i1, i2 in itertools.product(base, exp): + if i1.dtype != np.uint64: + assert_raises(ValueError, operator.pow, i1, i2) + else: + res = operator.pow(i1, i2) + assert_(res.dtype.type is np.float64) + assert_almost_equal(res, -1.) + + # 2 ** -1 perhaps generic + base = [np.array(2, dt)[()] for dt in 'bhilqBHILQ'] + for i1, i2 in itertools.product(base, exp): + if i1.dtype != np.uint64: + assert_raises(ValueError, operator.pow, i1, i2) + else: + res = operator.pow(i1, i2) + assert_(res.dtype.type is np.float64) + assert_almost_equal(res, .5) + + def test_mixed_types(self): + typelist = [np.int8, np.int16, np.float16, + np.float32, np.float64, np.int8, + np.int16, np.int32, np.int64] + for t1 in typelist: + for t2 in typelist: + a = t1(3) + b = t2(2) + result = a**b + msg = ("error with %r and %r:" + "got %r, expected %r") % (t1, t2, result, 9) + if np.issubdtype(np.dtype(result), np.integer): + assert_(result == 9, msg) + else: + assert_almost_equal(result, 9, err_msg=msg) + + def test_modular_power(self): + # modular power is not implemented, so ensure it errors + a = 5 + b = 4 + c = 10 + expected = pow(a, b, c) # noqa: F841 + for t in (np.int32, np.float32, np.complex64): + # note that 3-operand power only dispatches on the first argument + assert_raises(TypeError, operator.pow, t(a), b, c) + assert_raises(TypeError, operator.pow, np.array(t(a)), b, c) + + +def floordiv_and_mod(x, y): + return (x // y, x % y) + + +def _signs(dt): + if dt in np.typecodes['UnsignedInteger']: + return (+1,) + else: + return (+1, -1) + + +class TestModulus(object): + + def test_modulus_basic(self): + dt = np.typecodes['AllInteger'] + np.typecodes['Float'] + for op in [floordiv_and_mod, divmod]: + for dt1, dt2 in itertools.product(dt, dt): + for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)): + fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s' + msg = fmt % (op.__name__, dt1, dt2, sg1, sg2) + a = np.array(sg1*71, dtype=dt1)[()] + b = np.array(sg2*19, dtype=dt2)[()] + div, rem = op(a, b) + assert_equal(div*b + rem, a, err_msg=msg) + if sg2 == -1: + assert_(b < rem <= 0, msg) + else: + assert_(b > rem >= 0, msg) + + def test_float_modulus_exact(self): + # test that float results are exact for small integers. This also + # holds for the same integers scaled by powers of two. + nlst = list(range(-127, 0)) + plst = list(range(1, 128)) + dividend = nlst + [0] + plst + divisor = nlst + plst + arg = list(itertools.product(dividend, divisor)) + tgt = list(divmod(*t) for t in arg) + + a, b = np.array(arg, dtype=int).T + # convert exact integer results from Python to float so that + # signed zero can be used, it is checked. + tgtdiv, tgtrem = np.array(tgt, dtype=float).T + tgtdiv = np.where((tgtdiv == 0.0) & ((b < 0) ^ (a < 0)), -0.0, tgtdiv) + tgtrem = np.where((tgtrem == 0.0) & (b < 0), -0.0, tgtrem) + + for op in [floordiv_and_mod, divmod]: + for dt in np.typecodes['Float']: + msg = 'op: %s, dtype: %s' % (op.__name__, dt) + fa = a.astype(dt) + fb = b.astype(dt) + # use list comprehension so a_ and b_ are scalars + div, rem = zip(*[op(a_, b_) for a_, b_ in zip(fa, fb)]) + assert_equal(div, tgtdiv, err_msg=msg) + assert_equal(rem, tgtrem, err_msg=msg) + + def test_float_modulus_roundoff(self): + # gh-6127 + dt = np.typecodes['Float'] + for op in [floordiv_and_mod, divmod]: + for dt1, dt2 in itertools.product(dt, dt): + for sg1, sg2 in itertools.product((+1, -1), (+1, -1)): + fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s' + msg = fmt % (op.__name__, dt1, dt2, sg1, sg2) + a = np.array(sg1*78*6e-8, dtype=dt1)[()] + b = np.array(sg2*6e-8, dtype=dt2)[()] + div, rem = op(a, b) + # Equal assertion should hold when fmod is used + assert_equal(div*b + rem, a, err_msg=msg) + if sg2 == -1: + assert_(b < rem <= 0, msg) + else: + assert_(b > rem >= 0, msg) + + def test_float_modulus_corner_cases(self): + # Check remainder magnitude. + for dt in np.typecodes['Float']: + b = np.array(1.0, dtype=dt) + a = np.nextafter(np.array(0.0, dtype=dt), -b) + rem = operator.mod(a, b) + assert_(rem <= b, 'dt: %s' % dt) + rem = operator.mod(-a, -b) + assert_(rem >= -b, 'dt: %s' % dt) + + # Check nans, inf + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in remainder") + for dt in np.typecodes['Float']: + fone = np.array(1.0, dtype=dt) + fzer = np.array(0.0, dtype=dt) + finf = np.array(np.inf, dtype=dt) + fnan = np.array(np.nan, dtype=dt) + rem = operator.mod(fone, fzer) + assert_(np.isnan(rem), 'dt: %s' % dt) + # MSVC 2008 returns NaN here, so disable the check. + #rem = operator.mod(fone, finf) + #assert_(rem == fone, 'dt: %s' % dt) + rem = operator.mod(fone, fnan) + assert_(np.isnan(rem), 'dt: %s' % dt) + rem = operator.mod(finf, fone) + assert_(np.isnan(rem), 'dt: %s' % dt) + + +class TestComplexDivision(object): + def test_zero_division(self): + with np.errstate(all="ignore"): + for t in [np.complex64, np.complex128]: + a = t(0.0) + b = t(1.0) + assert_(np.isinf(b/a)) + b = t(complex(np.inf, np.inf)) + assert_(np.isinf(b/a)) + b = t(complex(np.inf, np.nan)) + assert_(np.isinf(b/a)) + b = t(complex(np.nan, np.inf)) + assert_(np.isinf(b/a)) + b = t(complex(np.nan, np.nan)) + assert_(np.isnan(b/a)) + b = t(0.) + assert_(np.isnan(b/a)) + + def test_signed_zeros(self): + with np.errstate(all="ignore"): + for t in [np.complex64, np.complex128]: + # tupled (numerator, denominator, expected) + # for testing as expected == numerator/denominator + data = ( + (( 0.0,-1.0), ( 0.0, 1.0), (-1.0,-0.0)), + (( 0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)), + (( 0.0,-1.0), (-0.0,-1.0), ( 1.0, 0.0)), + (( 0.0,-1.0), (-0.0, 1.0), (-1.0, 0.0)), + (( 0.0, 1.0), ( 0.0,-1.0), (-1.0, 0.0)), + (( 0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)), + ((-0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)), + ((-0.0, 1.0), ( 0.0,-1.0), (-1.0,-0.0)) + ) + for cases in data: + n = cases[0] + d = cases[1] + ex = cases[2] + result = t(complex(n[0], n[1])) / t(complex(d[0], d[1])) + # check real and imag parts separately to avoid comparison + # in array context, which does not account for signed zeros + assert_equal(result.real, ex[0]) + assert_equal(result.imag, ex[1]) + + def test_branches(self): + with np.errstate(all="ignore"): + for t in [np.complex64, np.complex128]: + # tupled (numerator, denominator, expected) + # for testing as expected == numerator/denominator + data = list() + + # trigger branch: real(fabs(denom)) > imag(fabs(denom)) + # followed by else condition as neither are == 0 + data.append((( 2.0, 1.0), ( 2.0, 1.0), (1.0, 0.0))) + + # trigger branch: real(fabs(denom)) > imag(fabs(denom)) + # followed by if condition as both are == 0 + # is performed in test_zero_division(), so this is skipped + + # trigger else if branch: real(fabs(denom)) < imag(fabs(denom)) + data.append((( 1.0, 2.0), ( 1.0, 2.0), (1.0, 0.0))) + + for cases in data: + n = cases[0] + d = cases[1] + ex = cases[2] + result = t(complex(n[0], n[1])) / t(complex(d[0], d[1])) + # check real and imag parts separately to avoid comparison + # in array context, which does not account for signed zeros + assert_equal(result.real, ex[0]) + assert_equal(result.imag, ex[1]) + + +class TestConversion(object): + def test_int_from_long(self): + l = [1e6, 1e12, 1e18, -1e6, -1e12, -1e18] + li = [10**6, 10**12, 10**18, -10**6, -10**12, -10**18] + for T in [None, np.float64, np.int64]: + a = np.array(l, dtype=T) + assert_equal([int(_m) for _m in a], li) + + a = np.array(l[:3], dtype=np.uint64) + assert_equal([int(_m) for _m in a], li[:3]) + + def test_iinfo_long_values(self): + for code in 'bBhH': + res = np.array(np.iinfo(code).max + 1, dtype=code) + tgt = np.iinfo(code).min + assert_(res == tgt) + + for code in np.typecodes['AllInteger']: + res = np.array(np.iinfo(code).max, dtype=code) + tgt = np.iinfo(code).max + assert_(res == tgt) + + for code in np.typecodes['AllInteger']: + res = np.typeDict[code](np.iinfo(code).max) + tgt = np.iinfo(code).max + assert_(res == tgt) + + def test_int_raise_behaviour(self): + def overflow_error_func(dtype): + np.typeDict[dtype](np.iinfo(dtype).max + 1) + + for code in 'lLqQ': + assert_raises(OverflowError, overflow_error_func, code) + + def test_int_from_infinite_longdouble(self): + # gh-627 + x = np.longdouble(np.inf) + assert_raises(OverflowError, int, x) + with suppress_warnings() as sup: + sup.record(np.ComplexWarning) + x = np.clongdouble(np.inf) + assert_raises(OverflowError, int, x) + assert_equal(len(sup.log), 1) + + @pytest.mark.skipif(not IS_PYPY, reason="Test is PyPy only (gh-9972)") + def test_int_from_infinite_longdouble___int__(self): + x = np.longdouble(np.inf) + assert_raises(OverflowError, x.__int__) + with suppress_warnings() as sup: + sup.record(np.ComplexWarning) + x = np.clongdouble(np.inf) + assert_raises(OverflowError, x.__int__) + assert_equal(len(sup.log), 1) + + @pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble), + reason="long double is same as double") + @pytest.mark.skipif(platform.machine().startswith("ppc64"), + reason="IBM double double") + def test_int_from_huge_longdouble(self): + # Produce a longdouble that would overflow a double, + # use exponent that avoids bug in Darwin pow function. + exp = np.finfo(np.double).maxexp - 1 + huge_ld = 2 * 1234 * np.longdouble(2) ** exp + huge_i = 2 * 1234 * 2 ** exp + assert_(huge_ld != np.inf) + assert_equal(int(huge_ld), huge_i) + + def test_int_from_longdouble(self): + x = np.longdouble(1.5) + assert_equal(int(x), 1) + x = np.longdouble(-10.5) + assert_equal(int(x), -10) + + def test_numpy_scalar_relational_operators(self): + # All integer + for dt1 in np.typecodes['AllInteger']: + assert_(1 > np.array(0, dtype=dt1)[()], "type %s failed" % (dt1,)) + assert_(not 1 < np.array(0, dtype=dt1)[()], "type %s failed" % (dt1,)) + + for dt2 in np.typecodes['AllInteger']: + assert_(np.array(1, dtype=dt1)[()] > np.array(0, dtype=dt2)[()], + "type %s and %s failed" % (dt1, dt2)) + assert_(not np.array(1, dtype=dt1)[()] < np.array(0, dtype=dt2)[()], + "type %s and %s failed" % (dt1, dt2)) + + #Unsigned integers + for dt1 in 'BHILQP': + assert_(-1 < np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,)) + assert_(not -1 > np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,)) + assert_(-1 != np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,)) + + #unsigned vs signed + for dt2 in 'bhilqp': + assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()], + "type %s and %s failed" % (dt1, dt2)) + assert_(not np.array(1, dtype=dt1)[()] < np.array(-1, dtype=dt2)[()], + "type %s and %s failed" % (dt1, dt2)) + assert_(np.array(1, dtype=dt1)[()] != np.array(-1, dtype=dt2)[()], + "type %s and %s failed" % (dt1, dt2)) + + #Signed integers and floats + for dt1 in 'bhlqp' + np.typecodes['Float']: + assert_(1 > np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,)) + assert_(not 1 < np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,)) + assert_(-1 == np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,)) + + for dt2 in 'bhlqp' + np.typecodes['Float']: + assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()], + "type %s and %s failed" % (dt1, dt2)) + assert_(not np.array(1, dtype=dt1)[()] < np.array(-1, dtype=dt2)[()], + "type %s and %s failed" % (dt1, dt2)) + assert_(np.array(-1, dtype=dt1)[()] == np.array(-1, dtype=dt2)[()], + "type %s and %s failed" % (dt1, dt2)) + + def test_scalar_comparison_to_none(self): + # Scalars should just return False and not give a warnings. + # The comparisons are flagged by pep8, ignore that. + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', FutureWarning) + assert_(not np.float32(1) == None) + assert_(not np.str_('test') == None) + # This is dubious (see below): + assert_(not np.datetime64('NaT') == None) + + assert_(np.float32(1) != None) + assert_(np.str_('test') != None) + # This is dubious (see below): + assert_(np.datetime64('NaT') != None) + assert_(len(w) == 0) + + # For documentation purposes, this is why the datetime is dubious. + # At the time of deprecation this was no behaviour change, but + # it has to be considered when the deprecations are done. + assert_(np.equal(np.datetime64('NaT'), None)) + + +#class TestRepr(object): +# def test_repr(self): +# for t in types: +# val = t(1197346475.0137341) +# val_repr = repr(val) +# val2 = eval(val_repr) +# assert_equal( val, val2 ) + + +class TestRepr(object): + def _test_type_repr(self, t): + finfo = np.finfo(t) + last_fraction_bit_idx = finfo.nexp + finfo.nmant + last_exponent_bit_idx = finfo.nexp + storage_bytes = np.dtype(t).itemsize*8 + # could add some more types to the list below + for which in ['small denorm', 'small norm']: + # Values from https://en.wikipedia.org/wiki/IEEE_754 + constr = np.array([0x00]*storage_bytes, dtype=np.uint8) + if which == 'small denorm': + byte = last_fraction_bit_idx // 8 + bytebit = 7-(last_fraction_bit_idx % 8) + constr[byte] = 1 << bytebit + elif which == 'small norm': + byte = last_exponent_bit_idx // 8 + bytebit = 7-(last_exponent_bit_idx % 8) + constr[byte] = 1 << bytebit + else: + raise ValueError('hmm') + val = constr.view(t)[0] + val_repr = repr(val) + val2 = t(eval(val_repr)) + if not (val2 == 0 and val < 1e-100): + assert_equal(val, val2) + + def test_float_repr(self): + # long double test cannot work, because eval goes through a python + # float + for t in [np.float32, np.float64]: + self._test_type_repr(t) + + +if not IS_PYPY: + # sys.getsizeof() is not valid on PyPy + class TestSizeOf(object): + + def test_equal_nbytes(self): + for type in types: + x = type(0) + assert_(sys.getsizeof(x) > x.nbytes) + + def test_error(self): + d = np.float32() + assert_raises(TypeError, d.__sizeof__, "a") + + +class TestMultiply(object): + def test_seq_repeat(self): + # Test that basic sequences get repeated when multiplied with + # numpy integers. And errors are raised when multiplied with others. + # Some of this behaviour may be controversial and could be open for + # change. + accepted_types = set(np.typecodes["AllInteger"]) + deprecated_types = {'?'} + forbidden_types = ( + set(np.typecodes["All"]) - accepted_types - deprecated_types) + forbidden_types -= {'V'} # can't default-construct void scalars + + for seq_type in (list, tuple): + seq = seq_type([1, 2, 3]) + for numpy_type in accepted_types: + i = np.dtype(numpy_type).type(2) + assert_equal(seq * i, seq * int(i)) + assert_equal(i * seq, int(i) * seq) + + for numpy_type in deprecated_types: + i = np.dtype(numpy_type).type() + assert_equal( + assert_warns(DeprecationWarning, operator.mul, seq, i), + seq * int(i)) + assert_equal( + assert_warns(DeprecationWarning, operator.mul, i, seq), + int(i) * seq) + + for numpy_type in forbidden_types: + i = np.dtype(numpy_type).type() + assert_raises(TypeError, operator.mul, seq, i) + assert_raises(TypeError, operator.mul, i, seq) + + def test_no_seq_repeat_basic_array_like(self): + # Test that an array-like which does not know how to be multiplied + # does not attempt sequence repeat (raise TypeError). + # See also gh-7428. + class ArrayLike(object): + def __init__(self, arr): + self.arr = arr + def __array__(self): + return self.arr + + # Test for simple ArrayLike above and memoryviews (original report) + for arr_like in (ArrayLike(np.ones(3)), memoryview(np.ones(3))): + assert_array_equal(arr_like * np.float32(3.), np.full(3, 3.)) + assert_array_equal(np.float32(3.) * arr_like, np.full(3, 3.)) + assert_array_equal(arr_like * np.int_(3), np.full(3, 3)) + assert_array_equal(np.int_(3) * arr_like, np.full(3, 3)) + + +class TestNegative(object): + def test_exceptions(self): + a = np.ones((), dtype=np.bool_)[()] + assert_raises(TypeError, operator.neg, a) + + def test_result(self): + types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + for dt in types: + a = np.ones((), dtype=dt)[()] + assert_equal(operator.neg(a) + a, 0) + + +class TestSubtract(object): + def test_exceptions(self): + a = np.ones((), dtype=np.bool_)[()] + assert_raises(TypeError, operator.sub, a, a) + + def test_result(self): + types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + for dt in types: + a = np.ones((), dtype=dt)[()] + assert_equal(operator.sub(a, a), 0) + + +class TestAbs(object): + def _test_abs_func(self, absfunc): + for tp in floating_types + complex_floating_types: + x = tp(-1.5) + assert_equal(absfunc(x), 1.5) + x = tp(0.0) + res = absfunc(x) + # assert_equal() checks zero signedness + assert_equal(res, 0.0) + x = tp(-0.0) + res = absfunc(x) + assert_equal(res, 0.0) + + x = tp(np.finfo(tp).max) + assert_equal(absfunc(x), x.real) + + x = tp(np.finfo(tp).tiny) + assert_equal(absfunc(x), x.real) + + x = tp(np.finfo(tp).min) + assert_equal(absfunc(x), -x.real) + + def test_builtin_abs(self): + self._test_abs_func(abs) + + def test_numpy_abs(self): + self._test_abs_func(np.abs) diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarmath.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarmath.pyc new file mode 100644 index 0000000..39484a4 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarmath.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarprint.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarprint.py new file mode 100644 index 0000000..cde1355 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarprint.py @@ -0,0 +1,326 @@ +# -*- coding: utf-8 -*- +""" Test printing of scalar types. + +""" +from __future__ import division, absolute_import, print_function + +import code, sys +import platform +import pytest + +from tempfile import TemporaryFile +import numpy as np +from numpy.testing import assert_, assert_equal, suppress_warnings + +class TestRealScalars(object): + def test_str(self): + svals = [0.0, -0.0, 1, -1, np.inf, -np.inf, np.nan] + styps = [np.float16, np.float32, np.float64, np.longdouble] + wanted = [ + ['0.0', '0.0', '0.0', '0.0' ], + ['-0.0', '-0.0', '-0.0', '-0.0'], + ['1.0', '1.0', '1.0', '1.0' ], + ['-1.0', '-1.0', '-1.0', '-1.0'], + ['inf', 'inf', 'inf', 'inf' ], + ['-inf', '-inf', '-inf', '-inf'], + ['nan', 'nan', 'nan', 'nan']] + + for wants, val in zip(wanted, svals): + for want, styp in zip(wants, styps): + msg = 'for str({}({}))'.format(np.dtype(styp).name, repr(val)) + assert_equal(str(styp(val)), want, err_msg=msg) + + def test_scalar_cutoffs(self): + # test that both the str and repr of np.float64 behaves + # like python floats in python3. Note that in python2 + # the str has truncated digits, but we do not do this + def check(v): + # we compare str to repr, to avoid python2 truncation behavior + assert_equal(str(np.float64(v)), repr(v)) + assert_equal(repr(np.float64(v)), repr(v)) + + # check we use the same number of significant digits + check(1.12345678901234567890) + check(0.0112345678901234567890) + + # check switch from scientific output to positional and back + check(1e-5) + check(1e-4) + check(1e15) + check(1e16) + + def test_py2_float_print(self): + # gh-10753 + # In python2, the python float type implements an obsolte method + # tp_print, which overrides tp_repr and tp_str when using "print" to + # output to a "real file" (ie, not a StringIO). Make sure we don't + # inherit it. + x = np.double(0.1999999999999) + with TemporaryFile('r+t') as f: + print(x, file=f) + f.seek(0) + output = f.read() + assert_equal(output, str(x) + '\n') + # In python2 the value float('0.1999999999999') prints with reduced + # precision as '0.2', but we want numpy's np.double('0.1999999999999') + # to print the unique value, '0.1999999999999'. + + # gh-11031 + # Only in the python2 interactive shell and when stdout is a "real" + # file, the output of the last command is printed to stdout without + # Py_PRINT_RAW (unlike the print statement) so `>>> x` and `>>> print + # x` are potentially different. Make sure they are the same. The only + # way I found to get prompt-like output is using an actual prompt from + # the 'code' module. Again, must use tempfile to get a "real" file. + + # dummy user-input which enters one line and then ctrl-Ds. + def userinput(): + yield 'np.sqrt(2)' + raise EOFError + gen = userinput() + input_func = lambda prompt="": next(gen) + + with TemporaryFile('r+t') as fo, TemporaryFile('r+t') as fe: + orig_stdout, orig_stderr = sys.stdout, sys.stderr + sys.stdout, sys.stderr = fo, fe + + # py2 code.interact sends irrelevant internal DeprecationWarnings + with suppress_warnings() as sup: + sup.filter(DeprecationWarning) + code.interact(local={'np': np}, readfunc=input_func, banner='') + + sys.stdout, sys.stderr = orig_stdout, orig_stderr + + fo.seek(0) + capture = fo.read().strip() + + assert_equal(capture, repr(np.sqrt(2))) + + def test_dragon4(self): + # these tests are adapted from Ryan Juckett's dragon4 implementation, + # see dragon4.c for details. + + fpos32 = lambda x, **k: np.format_float_positional(np.float32(x), **k) + fsci32 = lambda x, **k: np.format_float_scientific(np.float32(x), **k) + fpos64 = lambda x, **k: np.format_float_positional(np.float64(x), **k) + fsci64 = lambda x, **k: np.format_float_scientific(np.float64(x), **k) + + preckwd = lambda prec: {'unique': False, 'precision': prec} + + assert_equal(fpos32('1.0'), "1.") + assert_equal(fsci32('1.0'), "1.e+00") + assert_equal(fpos32('10.234'), "10.234") + assert_equal(fpos32('-10.234'), "-10.234") + assert_equal(fsci32('10.234'), "1.0234e+01") + assert_equal(fsci32('-10.234'), "-1.0234e+01") + assert_equal(fpos32('1000.0'), "1000.") + assert_equal(fpos32('1.0', precision=0), "1.") + assert_equal(fsci32('1.0', precision=0), "1.e+00") + assert_equal(fpos32('10.234', precision=0), "10.") + assert_equal(fpos32('-10.234', precision=0), "-10.") + assert_equal(fsci32('10.234', precision=0), "1.e+01") + assert_equal(fsci32('-10.234', precision=0), "-1.e+01") + assert_equal(fpos32('10.234', precision=2), "10.23") + assert_equal(fsci32('-10.234', precision=2), "-1.02e+01") + assert_equal(fsci64('9.9999999999999995e-08', **preckwd(16)), + '9.9999999999999995e-08') + assert_equal(fsci64('9.8813129168249309e-324', **preckwd(16)), + '9.8813129168249309e-324') + assert_equal(fsci64('9.9999999999999694e-311', **preckwd(16)), + '9.9999999999999694e-311') + + + # test rounding + # 3.1415927410 is closest float32 to np.pi + assert_equal(fpos32('3.14159265358979323846', **preckwd(10)), + "3.1415927410") + assert_equal(fsci32('3.14159265358979323846', **preckwd(10)), + "3.1415927410e+00") + assert_equal(fpos64('3.14159265358979323846', **preckwd(10)), + "3.1415926536") + assert_equal(fsci64('3.14159265358979323846', **preckwd(10)), + "3.1415926536e+00") + # 299792448 is closest float32 to 299792458 + assert_equal(fpos32('299792458.0', **preckwd(5)), "299792448.00000") + assert_equal(fsci32('299792458.0', **preckwd(5)), "2.99792e+08") + assert_equal(fpos64('299792458.0', **preckwd(5)), "299792458.00000") + assert_equal(fsci64('299792458.0', **preckwd(5)), "2.99792e+08") + + assert_equal(fpos32('3.14159265358979323846', **preckwd(25)), + "3.1415927410125732421875000") + assert_equal(fpos64('3.14159265358979323846', **preckwd(50)), + "3.14159265358979311599796346854418516159057617187500") + assert_equal(fpos64('3.14159265358979323846'), "3.141592653589793") + + + # smallest numbers + assert_equal(fpos32(0.5**(126 + 23), unique=False, precision=149), + "0.00000000000000000000000000000000000000000000140129846432" + "4817070923729583289916131280261941876515771757068283889791" + "08268586060148663818836212158203125") + assert_equal(fpos64(0.5**(1022 + 52), unique=False, precision=1074), + "0.00000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000049406564584124654417656" + "8792868221372365059802614324764425585682500675507270208751" + "8652998363616359923797965646954457177309266567103559397963" + "9877479601078187812630071319031140452784581716784898210368" + "8718636056998730723050006387409153564984387312473397273169" + "6151400317153853980741262385655911710266585566867681870395" + "6031062493194527159149245532930545654440112748012970999954" + "1931989409080416563324524757147869014726780159355238611550" + "1348035264934720193790268107107491703332226844753335720832" + "4319360923828934583680601060115061698097530783422773183292" + "4790498252473077637592724787465608477820373446969953364701" + "7972677717585125660551199131504891101451037862738167250955" + "8373897335989936648099411642057026370902792427675445652290" + "87538682506419718265533447265625") + + # largest numbers + assert_equal(fpos32(np.finfo(np.float32).max, **preckwd(0)), + "340282346638528859811704183484516925440.") + assert_equal(fpos64(np.finfo(np.float64).max, **preckwd(0)), + "1797693134862315708145274237317043567980705675258449965989" + "1747680315726078002853876058955863276687817154045895351438" + "2464234321326889464182768467546703537516986049910576551282" + "0762454900903893289440758685084551339423045832369032229481" + "6580855933212334827479782620414472316873817718091929988125" + "0404026184124858368.") + # Warning: In unique mode only the integer digits necessary for + # uniqueness are computed, the rest are 0. Should we change this? + assert_equal(fpos32(np.finfo(np.float32).max, precision=0), + "340282350000000000000000000000000000000.") + + # test trailing zeros + assert_equal(fpos32('1.0', unique=False, precision=3), "1.000") + assert_equal(fpos64('1.0', unique=False, precision=3), "1.000") + assert_equal(fsci32('1.0', unique=False, precision=3), "1.000e+00") + assert_equal(fsci64('1.0', unique=False, precision=3), "1.000e+00") + assert_equal(fpos32('1.5', unique=False, precision=3), "1.500") + assert_equal(fpos64('1.5', unique=False, precision=3), "1.500") + assert_equal(fsci32('1.5', unique=False, precision=3), "1.500e+00") + assert_equal(fsci64('1.5', unique=False, precision=3), "1.500e+00") + # gh-10713 + assert_equal(fpos64('324', unique=False, precision=5, fractional=False), "324.00") + + def test_dragon4_interface(self): + tps = [np.float16, np.float32, np.float64] + if hasattr(np, 'float128'): + tps.append(np.float128) + + fpos = np.format_float_positional + fsci = np.format_float_scientific + + for tp in tps: + # test padding + assert_equal(fpos(tp('1.0'), pad_left=4, pad_right=4), " 1. ") + assert_equal(fpos(tp('-1.0'), pad_left=4, pad_right=4), " -1. ") + assert_equal(fpos(tp('-10.2'), + pad_left=4, pad_right=4), " -10.2 ") + + # test exp_digits + assert_equal(fsci(tp('1.23e1'), exp_digits=5), "1.23e+00001") + + # test fixed (non-unique) mode + assert_equal(fpos(tp('1.0'), unique=False, precision=4), "1.0000") + assert_equal(fsci(tp('1.0'), unique=False, precision=4), + "1.0000e+00") + + # test trimming + # trim of 'k' or '.' only affects non-unique mode, since unique + # mode will not output trailing 0s. + assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='k'), + "1.0000") + + assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='.'), + "1.") + assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='.'), + "1.2" if tp != np.float16 else "1.2002") + + assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='0'), + "1.0") + assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='0'), + "1.2" if tp != np.float16 else "1.2002") + assert_equal(fpos(tp('1.'), trim='0'), "1.0") + + assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='-'), + "1") + assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='-'), + "1.2" if tp != np.float16 else "1.2002") + assert_equal(fpos(tp('1.'), trim='-'), "1") + + @pytest.mark.skipif(not platform.machine().startswith("ppc64"), + reason="only applies to ppc float128 values") + def test_ppc64_ibm_double_double128(self): + # check that the precision decreases once we get into the subnormal + # range. Unlike float64, this starts around 1e-292 instead of 1e-308, + # which happens when the first double is normal and the second is + # subnormal. + x = np.float128('2.123123123123123123123123123123123e-286') + got = [str(x/np.float128('2e' + str(i))) for i in range(0,40)] + expected = [ + "1.06156156156156156156156156156157e-286", + "1.06156156156156156156156156156158e-287", + "1.06156156156156156156156156156159e-288", + "1.0615615615615615615615615615616e-289", + "1.06156156156156156156156156156157e-290", + "1.06156156156156156156156156156156e-291", + "1.0615615615615615615615615615616e-292", + "1.0615615615615615615615615615615e-293", + "1.061561561561561561561561561562e-294", + "1.06156156156156156156156156155e-295", + "1.0615615615615615615615615616e-296", + "1.06156156156156156156156156e-297", + "1.06156156156156156156156157e-298", + "1.0615615615615615615615616e-299", + "1.06156156156156156156156e-300", + "1.06156156156156156156155e-301", + "1.0615615615615615615616e-302", + "1.061561561561561561562e-303", + "1.06156156156156156156e-304", + "1.0615615615615615618e-305", + "1.06156156156156156e-306", + "1.06156156156156157e-307", + "1.0615615615615616e-308", + "1.06156156156156e-309", + "1.06156156156157e-310", + "1.0615615615616e-311", + "1.06156156156e-312", + "1.06156156154e-313", + "1.0615615616e-314", + "1.06156156e-315", + "1.06156155e-316", + "1.061562e-317", + "1.06156e-318", + "1.06155e-319", + "1.0617e-320", + "1.06e-321", + "1.04e-322", + "1e-323", + "0.0", + "0.0"] + assert_equal(got, expected) + + # Note: we follow glibc behavior, but it (or gcc) might not be right. + # In particular we can get two values that print the same but are not + # equal: + a = np.float128('2')/np.float128('3') + b = np.float128(str(a)) + assert_equal(str(a), str(b)) + assert_(a != b) + + def float32_roundtrip(self): + # gh-9360 + x = np.float32(1024 - 2**-14) + y = np.float32(1024 - 2**-13) + assert_(repr(x) != repr(y)) + assert_equal(np.float32(repr(x)), x) + assert_equal(np.float32(repr(y)), y) + + def float64_vs_python(self): + # gh-2643, gh-6136, gh-6908 + assert_equal(repr(np.float64(0.1)), repr(0.1)) + assert_(repr(np.float64(0.20000000000000004)) != repr(0.2)) diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarprint.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarprint.pyc new file mode 100644 index 0000000..c2b6ac7 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarprint.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_shape_base.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_shape_base.py new file mode 100644 index 0000000..b996321 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_shape_base.py @@ -0,0 +1,706 @@ +from __future__ import division, absolute_import, print_function + +import pytest +import sys +import numpy as np +from numpy.core import ( + array, arange, atleast_1d, atleast_2d, atleast_3d, block, vstack, hstack, + newaxis, concatenate, stack + ) +from numpy.core.shape_base import (_block_dispatcher, _block_setup, + _block_concatenate, _block_slicing) +from numpy.testing import ( + assert_, assert_raises, assert_array_equal, assert_equal, + assert_raises_regex, assert_warns + ) + +from numpy.compat import long + +class TestAtleast1d(object): + def test_0D_array(self): + a = array(1) + b = array(2) + res = [atleast_1d(a), atleast_1d(b)] + desired = [array([1]), array([2])] + assert_array_equal(res, desired) + + def test_1D_array(self): + a = array([1, 2]) + b = array([2, 3]) + res = [atleast_1d(a), atleast_1d(b)] + desired = [array([1, 2]), array([2, 3])] + assert_array_equal(res, desired) + + def test_2D_array(self): + a = array([[1, 2], [1, 2]]) + b = array([[2, 3], [2, 3]]) + res = [atleast_1d(a), atleast_1d(b)] + desired = [a, b] + assert_array_equal(res, desired) + + def test_3D_array(self): + a = array([[1, 2], [1, 2]]) + b = array([[2, 3], [2, 3]]) + a = array([a, a]) + b = array([b, b]) + res = [atleast_1d(a), atleast_1d(b)] + desired = [a, b] + assert_array_equal(res, desired) + + def test_r1array(self): + """ Test to make sure equivalent Travis O's r1array function + """ + assert_(atleast_1d(3).shape == (1,)) + assert_(atleast_1d(3j).shape == (1,)) + assert_(atleast_1d(long(3)).shape == (1,)) + assert_(atleast_1d(3.0).shape == (1,)) + assert_(atleast_1d([[2, 3], [4, 5]]).shape == (2, 2)) + + +class TestAtleast2d(object): + def test_0D_array(self): + a = array(1) + b = array(2) + res = [atleast_2d(a), atleast_2d(b)] + desired = [array([[1]]), array([[2]])] + assert_array_equal(res, desired) + + def test_1D_array(self): + a = array([1, 2]) + b = array([2, 3]) + res = [atleast_2d(a), atleast_2d(b)] + desired = [array([[1, 2]]), array([[2, 3]])] + assert_array_equal(res, desired) + + def test_2D_array(self): + a = array([[1, 2], [1, 2]]) + b = array([[2, 3], [2, 3]]) + res = [atleast_2d(a), atleast_2d(b)] + desired = [a, b] + assert_array_equal(res, desired) + + def test_3D_array(self): + a = array([[1, 2], [1, 2]]) + b = array([[2, 3], [2, 3]]) + a = array([a, a]) + b = array([b, b]) + res = [atleast_2d(a), atleast_2d(b)] + desired = [a, b] + assert_array_equal(res, desired) + + def test_r2array(self): + """ Test to make sure equivalent Travis O's r2array function + """ + assert_(atleast_2d(3).shape == (1, 1)) + assert_(atleast_2d([3j, 1]).shape == (1, 2)) + assert_(atleast_2d([[[3, 1], [4, 5]], [[3, 5], [1, 2]]]).shape == (2, 2, 2)) + + +class TestAtleast3d(object): + def test_0D_array(self): + a = array(1) + b = array(2) + res = [atleast_3d(a), atleast_3d(b)] + desired = [array([[[1]]]), array([[[2]]])] + assert_array_equal(res, desired) + + def test_1D_array(self): + a = array([1, 2]) + b = array([2, 3]) + res = [atleast_3d(a), atleast_3d(b)] + desired = [array([[[1], [2]]]), array([[[2], [3]]])] + assert_array_equal(res, desired) + + def test_2D_array(self): + a = array([[1, 2], [1, 2]]) + b = array([[2, 3], [2, 3]]) + res = [atleast_3d(a), atleast_3d(b)] + desired = [a[:,:, newaxis], b[:,:, newaxis]] + assert_array_equal(res, desired) + + def test_3D_array(self): + a = array([[1, 2], [1, 2]]) + b = array([[2, 3], [2, 3]]) + a = array([a, a]) + b = array([b, b]) + res = [atleast_3d(a), atleast_3d(b)] + desired = [a, b] + assert_array_equal(res, desired) + + +class TestHstack(object): + def test_non_iterable(self): + assert_raises(TypeError, hstack, 1) + + def test_empty_input(self): + assert_raises(ValueError, hstack, ()) + + def test_0D_array(self): + a = array(1) + b = array(2) + res = hstack([a, b]) + desired = array([1, 2]) + assert_array_equal(res, desired) + + def test_1D_array(self): + a = array([1]) + b = array([2]) + res = hstack([a, b]) + desired = array([1, 2]) + assert_array_equal(res, desired) + + def test_2D_array(self): + a = array([[1], [2]]) + b = array([[1], [2]]) + res = hstack([a, b]) + desired = array([[1, 1], [2, 2]]) + assert_array_equal(res, desired) + + def test_generator(self): + with assert_warns(FutureWarning): + hstack((np.arange(3) for _ in range(2))) + if sys.version_info.major > 2: + # map returns a list on Python 2 + with assert_warns(FutureWarning): + hstack(map(lambda x: x, np.ones((3, 2)))) + + +class TestVstack(object): + def test_non_iterable(self): + assert_raises(TypeError, vstack, 1) + + def test_empty_input(self): + assert_raises(ValueError, vstack, ()) + + def test_0D_array(self): + a = array(1) + b = array(2) + res = vstack([a, b]) + desired = array([[1], [2]]) + assert_array_equal(res, desired) + + def test_1D_array(self): + a = array([1]) + b = array([2]) + res = vstack([a, b]) + desired = array([[1], [2]]) + assert_array_equal(res, desired) + + def test_2D_array(self): + a = array([[1], [2]]) + b = array([[1], [2]]) + res = vstack([a, b]) + desired = array([[1], [2], [1], [2]]) + assert_array_equal(res, desired) + + def test_2D_array2(self): + a = array([1, 2]) + b = array([1, 2]) + res = vstack([a, b]) + desired = array([[1, 2], [1, 2]]) + assert_array_equal(res, desired) + + def test_generator(self): + with assert_warns(FutureWarning): + vstack((np.arange(3) for _ in range(2))) + + +class TestConcatenate(object): + def test_returns_copy(self): + a = np.eye(3) + b = np.concatenate([a]) + b[0, 0] = 2 + assert b[0, 0] != a[0, 0] + + def test_exceptions(self): + # test axis must be in bounds + for ndim in [1, 2, 3]: + a = np.ones((1,)*ndim) + np.concatenate((a, a), axis=0) # OK + assert_raises(np.AxisError, np.concatenate, (a, a), axis=ndim) + assert_raises(np.AxisError, np.concatenate, (a, a), axis=-(ndim + 1)) + + # Scalars cannot be concatenated + assert_raises(ValueError, concatenate, (0,)) + assert_raises(ValueError, concatenate, (np.array(0),)) + + # test shapes must match except for concatenation axis + a = np.ones((1, 2, 3)) + b = np.ones((2, 2, 3)) + axis = list(range(3)) + for i in range(3): + np.concatenate((a, b), axis=axis[0]) # OK + assert_raises(ValueError, np.concatenate, (a, b), axis=axis[1]) + assert_raises(ValueError, np.concatenate, (a, b), axis=axis[2]) + a = np.moveaxis(a, -1, 0) + b = np.moveaxis(b, -1, 0) + axis.append(axis.pop(0)) + + # No arrays to concatenate raises ValueError + assert_raises(ValueError, concatenate, ()) + + def test_concatenate_axis_None(self): + a = np.arange(4, dtype=np.float64).reshape((2, 2)) + b = list(range(3)) + c = ['x'] + r = np.concatenate((a, a), axis=None) + assert_equal(r.dtype, a.dtype) + assert_equal(r.ndim, 1) + r = np.concatenate((a, b), axis=None) + assert_equal(r.size, a.size + len(b)) + assert_equal(r.dtype, a.dtype) + r = np.concatenate((a, b, c), axis=None) + d = array(['0.0', '1.0', '2.0', '3.0', + '0', '1', '2', 'x']) + assert_array_equal(r, d) + + out = np.zeros(a.size + len(b)) + r = np.concatenate((a, b), axis=None) + rout = np.concatenate((a, b), axis=None, out=out) + assert_(out is rout) + assert_equal(r, rout) + + def test_large_concatenate_axis_None(self): + # When no axis is given, concatenate uses flattened versions. + # This also had a bug with many arrays (see gh-5979). + x = np.arange(1, 100) + r = np.concatenate(x, None) + assert_array_equal(x, r) + + # This should probably be deprecated: + r = np.concatenate(x, 100) # axis is >= MAXDIMS + assert_array_equal(x, r) + + def test_concatenate(self): + # Test concatenate function + # One sequence returns unmodified (but as array) + r4 = list(range(4)) + assert_array_equal(concatenate((r4,)), r4) + # Any sequence + assert_array_equal(concatenate((tuple(r4),)), r4) + assert_array_equal(concatenate((array(r4),)), r4) + # 1D default concatenation + r3 = list(range(3)) + assert_array_equal(concatenate((r4, r3)), r4 + r3) + # Mixed sequence types + assert_array_equal(concatenate((tuple(r4), r3)), r4 + r3) + assert_array_equal(concatenate((array(r4), r3)), r4 + r3) + # Explicit axis specification + assert_array_equal(concatenate((r4, r3), 0), r4 + r3) + # Including negative + assert_array_equal(concatenate((r4, r3), -1), r4 + r3) + # 2D + a23 = array([[10, 11, 12], [13, 14, 15]]) + a13 = array([[0, 1, 2]]) + res = array([[10, 11, 12], [13, 14, 15], [0, 1, 2]]) + assert_array_equal(concatenate((a23, a13)), res) + assert_array_equal(concatenate((a23, a13), 0), res) + assert_array_equal(concatenate((a23.T, a13.T), 1), res.T) + assert_array_equal(concatenate((a23.T, a13.T), -1), res.T) + # Arrays much match shape + assert_raises(ValueError, concatenate, (a23.T, a13.T), 0) + # 3D + res = arange(2 * 3 * 7).reshape((2, 3, 7)) + a0 = res[..., :4] + a1 = res[..., 4:6] + a2 = res[..., 6:] + assert_array_equal(concatenate((a0, a1, a2), 2), res) + assert_array_equal(concatenate((a0, a1, a2), -1), res) + assert_array_equal(concatenate((a0.T, a1.T, a2.T), 0), res.T) + + out = res.copy() + rout = concatenate((a0, a1, a2), 2, out=out) + assert_(out is rout) + assert_equal(res, rout) + + def test_bad_out_shape(self): + a = array([1, 2]) + b = array([3, 4]) + + assert_raises(ValueError, concatenate, (a, b), out=np.empty(5)) + assert_raises(ValueError, concatenate, (a, b), out=np.empty((4,1))) + assert_raises(ValueError, concatenate, (a, b), out=np.empty((1,4))) + concatenate((a, b), out=np.empty(4)) + + def test_out_dtype(self): + out = np.empty(4, np.float32) + res = concatenate((array([1, 2]), array([3, 4])), out=out) + assert_(out is res) + + out = np.empty(4, np.complex64) + res = concatenate((array([0.1, 0.2]), array([0.3, 0.4])), out=out) + assert_(out is res) + + # invalid cast + out = np.empty(4, np.int32) + assert_raises(TypeError, concatenate, + (array([0.1, 0.2]), array([0.3, 0.4])), out=out) + + +def test_stack(): + # non-iterable input + assert_raises(TypeError, stack, 1) + + # 0d input + for input_ in [(1, 2, 3), + [np.int32(1), np.int32(2), np.int32(3)], + [np.array(1), np.array(2), np.array(3)]]: + assert_array_equal(stack(input_), [1, 2, 3]) + # 1d input examples + a = np.array([1, 2, 3]) + b = np.array([4, 5, 6]) + r1 = array([[1, 2, 3], [4, 5, 6]]) + assert_array_equal(np.stack((a, b)), r1) + assert_array_equal(np.stack((a, b), axis=1), r1.T) + # all input types + assert_array_equal(np.stack(list([a, b])), r1) + assert_array_equal(np.stack(array([a, b])), r1) + # all shapes for 1d input + arrays = [np.random.randn(3) for _ in range(10)] + axes = [0, 1, -1, -2] + expected_shapes = [(10, 3), (3, 10), (3, 10), (10, 3)] + for axis, expected_shape in zip(axes, expected_shapes): + assert_equal(np.stack(arrays, axis).shape, expected_shape) + assert_raises_regex(np.AxisError, 'out of bounds', stack, arrays, axis=2) + assert_raises_regex(np.AxisError, 'out of bounds', stack, arrays, axis=-3) + # all shapes for 2d input + arrays = [np.random.randn(3, 4) for _ in range(10)] + axes = [0, 1, 2, -1, -2, -3] + expected_shapes = [(10, 3, 4), (3, 10, 4), (3, 4, 10), + (3, 4, 10), (3, 10, 4), (10, 3, 4)] + for axis, expected_shape in zip(axes, expected_shapes): + assert_equal(np.stack(arrays, axis).shape, expected_shape) + # empty arrays + assert_(stack([[], [], []]).shape == (3, 0)) + assert_(stack([[], [], []], axis=1).shape == (0, 3)) + # out + out = np.zeros_like(r1) + np.stack((a, b), out=out) + assert_array_equal(out, r1) + # edge cases + assert_raises_regex(ValueError, 'need at least one array', stack, []) + assert_raises_regex(ValueError, 'must have the same shape', + stack, [1, np.arange(3)]) + assert_raises_regex(ValueError, 'must have the same shape', + stack, [np.arange(3), 1]) + assert_raises_regex(ValueError, 'must have the same shape', + stack, [np.arange(3), 1], axis=1) + assert_raises_regex(ValueError, 'must have the same shape', + stack, [np.zeros((3, 3)), np.zeros(3)], axis=1) + assert_raises_regex(ValueError, 'must have the same shape', + stack, [np.arange(2), np.arange(3)]) + # generator is deprecated + with assert_warns(FutureWarning): + result = stack((x for x in range(3))) + assert_array_equal(result, np.array([0, 1, 2])) + + +class TestBlock(object): + @pytest.fixture(params=['block', 'force_concatenate', 'force_slicing']) + def block(self, request): + # blocking small arrays and large arrays go through different paths. + # the algorithm is triggered depending on the number of element + # copies required. + # We define a test fixture that forces most tests to go through + # both code paths. + # Ultimately, this should be removed if a single algorithm is found + # to be faster for both small and large arrays. + def _block_force_concatenate(arrays): + arrays, list_ndim, result_ndim, _ = _block_setup(arrays) + return _block_concatenate(arrays, list_ndim, result_ndim) + + def _block_force_slicing(arrays): + arrays, list_ndim, result_ndim, _ = _block_setup(arrays) + return _block_slicing(arrays, list_ndim, result_ndim) + + if request.param == 'force_concatenate': + return _block_force_concatenate + elif request.param == 'force_slicing': + return _block_force_slicing + elif request.param == 'block': + return block + else: + raise ValueError('Unknown blocking request. There is a typo in the tests.') + + def test_returns_copy(self, block): + a = np.eye(3) + b = block(a) + b[0, 0] = 2 + assert b[0, 0] != a[0, 0] + + def test_block_total_size_estimate(self, block): + _, _, _, total_size = _block_setup([1]) + assert total_size == 1 + + _, _, _, total_size = _block_setup([[1]]) + assert total_size == 1 + + _, _, _, total_size = _block_setup([[1, 1]]) + assert total_size == 2 + + _, _, _, total_size = _block_setup([[1], [1]]) + assert total_size == 2 + + _, _, _, total_size = _block_setup([[1, 2], [3, 4]]) + assert total_size == 4 + + def test_block_simple_row_wise(self, block): + a_2d = np.ones((2, 2)) + b_2d = 2 * a_2d + desired = np.array([[1, 1, 2, 2], + [1, 1, 2, 2]]) + result = block([a_2d, b_2d]) + assert_equal(desired, result) + + def test_block_simple_column_wise(self, block): + a_2d = np.ones((2, 2)) + b_2d = 2 * a_2d + expected = np.array([[1, 1], + [1, 1], + [2, 2], + [2, 2]]) + result = block([[a_2d], [b_2d]]) + assert_equal(expected, result) + + def test_block_with_1d_arrays_row_wise(self, block): + # # # 1-D vectors are treated as row arrays + a = np.array([1, 2, 3]) + b = np.array([2, 3, 4]) + expected = np.array([1, 2, 3, 2, 3, 4]) + result = block([a, b]) + assert_equal(expected, result) + + def test_block_with_1d_arrays_multiple_rows(self, block): + a = np.array([1, 2, 3]) + b = np.array([2, 3, 4]) + expected = np.array([[1, 2, 3, 2, 3, 4], + [1, 2, 3, 2, 3, 4]]) + result = block([[a, b], [a, b]]) + assert_equal(expected, result) + + def test_block_with_1d_arrays_column_wise(self, block): + # # # 1-D vectors are treated as row arrays + a_1d = np.array([1, 2, 3]) + b_1d = np.array([2, 3, 4]) + expected = np.array([[1, 2, 3], + [2, 3, 4]]) + result = block([[a_1d], [b_1d]]) + assert_equal(expected, result) + + def test_block_mixed_1d_and_2d(self, block): + a_2d = np.ones((2, 2)) + b_1d = np.array([2, 2]) + result = block([[a_2d], [b_1d]]) + expected = np.array([[1, 1], + [1, 1], + [2, 2]]) + assert_equal(expected, result) + + def test_block_complicated(self, block): + # a bit more complicated + one_2d = np.array([[1, 1, 1]]) + two_2d = np.array([[2, 2, 2]]) + three_2d = np.array([[3, 3, 3, 3, 3, 3]]) + four_1d = np.array([4, 4, 4, 4, 4, 4]) + five_0d = np.array(5) + six_1d = np.array([6, 6, 6, 6, 6]) + zero_2d = np.zeros((2, 6)) + + expected = np.array([[1, 1, 1, 2, 2, 2], + [3, 3, 3, 3, 3, 3], + [4, 4, 4, 4, 4, 4], + [5, 6, 6, 6, 6, 6], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0]]) + + result = block([[one_2d, two_2d], + [three_2d], + [four_1d], + [five_0d, six_1d], + [zero_2d]]) + assert_equal(result, expected) + + def test_nested(self, block): + one = np.array([1, 1, 1]) + two = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]]) + three = np.array([3, 3, 3]) + four = np.array([4, 4, 4]) + five = np.array(5) + six = np.array([6, 6, 6, 6, 6]) + zero = np.zeros((2, 6)) + + result = block([ + [ + block([ + [one], + [three], + [four] + ]), + two + ], + [five, six], + [zero] + ]) + expected = np.array([[1, 1, 1, 2, 2, 2], + [3, 3, 3, 2, 2, 2], + [4, 4, 4, 2, 2, 2], + [5, 6, 6, 6, 6, 6], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0]]) + + assert_equal(result, expected) + + def test_3d(self, block): + a000 = np.ones((2, 2, 2), int) * 1 + + a100 = np.ones((3, 2, 2), int) * 2 + a010 = np.ones((2, 3, 2), int) * 3 + a001 = np.ones((2, 2, 3), int) * 4 + + a011 = np.ones((2, 3, 3), int) * 5 + a101 = np.ones((3, 2, 3), int) * 6 + a110 = np.ones((3, 3, 2), int) * 7 + + a111 = np.ones((3, 3, 3), int) * 8 + + result = block([ + [ + [a000, a001], + [a010, a011], + ], + [ + [a100, a101], + [a110, a111], + ] + ]) + expected = array([[[1, 1, 4, 4, 4], + [1, 1, 4, 4, 4], + [3, 3, 5, 5, 5], + [3, 3, 5, 5, 5], + [3, 3, 5, 5, 5]], + + [[1, 1, 4, 4, 4], + [1, 1, 4, 4, 4], + [3, 3, 5, 5, 5], + [3, 3, 5, 5, 5], + [3, 3, 5, 5, 5]], + + [[2, 2, 6, 6, 6], + [2, 2, 6, 6, 6], + [7, 7, 8, 8, 8], + [7, 7, 8, 8, 8], + [7, 7, 8, 8, 8]], + + [[2, 2, 6, 6, 6], + [2, 2, 6, 6, 6], + [7, 7, 8, 8, 8], + [7, 7, 8, 8, 8], + [7, 7, 8, 8, 8]], + + [[2, 2, 6, 6, 6], + [2, 2, 6, 6, 6], + [7, 7, 8, 8, 8], + [7, 7, 8, 8, 8], + [7, 7, 8, 8, 8]]]) + + assert_array_equal(result, expected) + + def test_block_with_mismatched_shape(self, block): + a = np.array([0, 0]) + b = np.eye(2) + assert_raises(ValueError, block, [a, b]) + assert_raises(ValueError, block, [b, a]) + + to_block = [[np.ones((2,3)), np.ones((2,2))], + [np.ones((2,2)), np.ones((2,2))]] + assert_raises(ValueError, block, to_block) + def test_no_lists(self, block): + assert_equal(block(1), np.array(1)) + assert_equal(block(np.eye(3)), np.eye(3)) + + def test_invalid_nesting(self, block): + msg = 'depths are mismatched' + assert_raises_regex(ValueError, msg, block, [1, [2]]) + assert_raises_regex(ValueError, msg, block, [1, []]) + assert_raises_regex(ValueError, msg, block, [[1], 2]) + assert_raises_regex(ValueError, msg, block, [[], 2]) + assert_raises_regex(ValueError, msg, block, [ + [[1], [2]], + [[3, 4]], + [5] # missing brackets + ]) + + def test_empty_lists(self, block): + assert_raises_regex(ValueError, 'empty', block, []) + assert_raises_regex(ValueError, 'empty', block, [[]]) + assert_raises_regex(ValueError, 'empty', block, [[1], []]) + + def test_tuple(self, block): + assert_raises_regex(TypeError, 'tuple', block, ([1, 2], [3, 4])) + assert_raises_regex(TypeError, 'tuple', block, [(1, 2), (3, 4)]) + + def test_different_ndims(self, block): + a = 1. + b = 2 * np.ones((1, 2)) + c = 3 * np.ones((1, 1, 3)) + + result = block([a, b, c]) + expected = np.array([[[1., 2., 2., 3., 3., 3.]]]) + + assert_equal(result, expected) + + def test_different_ndims_depths(self, block): + a = 1. + b = 2 * np.ones((1, 2)) + c = 3 * np.ones((1, 2, 3)) + + result = block([[a, b], [c]]) + expected = np.array([[[1., 2., 2.], + [3., 3., 3.], + [3., 3., 3.]]]) + + assert_equal(result, expected) + + def test_block_memory_order(self, block): + # 3D + arr_c = np.zeros((3,)*3, order='C') + arr_f = np.zeros((3,)*3, order='F') + + b_c = [[[arr_c, arr_c], + [arr_c, arr_c]], + [[arr_c, arr_c], + [arr_c, arr_c]]] + + b_f = [[[arr_f, arr_f], + [arr_f, arr_f]], + [[arr_f, arr_f], + [arr_f, arr_f]]] + + assert block(b_c).flags['C_CONTIGUOUS'] + assert block(b_f).flags['F_CONTIGUOUS'] + + arr_c = np.zeros((3, 3), order='C') + arr_f = np.zeros((3, 3), order='F') + # 2D + b_c = [[arr_c, arr_c], + [arr_c, arr_c]] + + b_f = [[arr_f, arr_f], + [arr_f, arr_f]] + + assert block(b_c).flags['C_CONTIGUOUS'] + assert block(b_f).flags['F_CONTIGUOUS'] + + +def test_block_dispatcher(): + class ArrayLike(object): + pass + a = ArrayLike() + b = ArrayLike() + c = ArrayLike() + assert_equal(list(_block_dispatcher(a)), [a]) + assert_equal(list(_block_dispatcher([a])), [a]) + assert_equal(list(_block_dispatcher([a, b])), [a, b]) + assert_equal(list(_block_dispatcher([[a], [b, [c]]])), [a, b, c]) + # don't recurse into non-lists + assert_equal(list(_block_dispatcher((a, b))), [(a, b)]) diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_shape_base.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_shape_base.pyc new file mode 100644 index 0000000..3dbb3af Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_shape_base.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_ufunc.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_ufunc.py new file mode 100644 index 0000000..b83b8cc --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_ufunc.py @@ -0,0 +1,1859 @@ +from __future__ import division, absolute_import, print_function + +import warnings +import itertools + +import numpy as np +import numpy.core._umath_tests as umt +import numpy.linalg._umath_linalg as uml +import numpy.core._operand_flag_tests as opflag_tests +import numpy.core._rational_tests as _rational_tests +from numpy.testing import ( + assert_, assert_equal, assert_raises, assert_array_equal, + assert_almost_equal, assert_array_almost_equal, assert_no_warnings, + assert_allclose, + ) +from numpy.core.numeric import pickle + + +class TestUfuncKwargs(object): + def test_kwarg_exact(self): + assert_raises(TypeError, np.add, 1, 2, castingx='safe') + assert_raises(TypeError, np.add, 1, 2, dtypex=int) + assert_raises(TypeError, np.add, 1, 2, extobjx=[4096]) + assert_raises(TypeError, np.add, 1, 2, outx=None) + assert_raises(TypeError, np.add, 1, 2, sigx='ii->i') + assert_raises(TypeError, np.add, 1, 2, signaturex='ii->i') + assert_raises(TypeError, np.add, 1, 2, subokx=False) + assert_raises(TypeError, np.add, 1, 2, wherex=[True]) + + def test_sig_signature(self): + assert_raises(ValueError, np.add, 1, 2, sig='ii->i', + signature='ii->i') + + def test_sig_dtype(self): + assert_raises(RuntimeError, np.add, 1, 2, sig='ii->i', + dtype=int) + assert_raises(RuntimeError, np.add, 1, 2, signature='ii->i', + dtype=int) + + def test_extobj_refcount(self): + # Should not segfault with USE_DEBUG. + assert_raises(TypeError, np.add, 1, 2, extobj=[4096], parrot=True) + + +class TestUfunc(object): + def test_pickle(self): + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + assert_(pickle.loads(pickle.dumps(np.sin, + protocol=proto)) is np.sin) + + # Check that ufunc not defined in the top level numpy namespace + # such as numpy.core._rational_tests.test_add can also be pickled + res = pickle.loads(pickle.dumps(_rational_tests.test_add, + protocol=proto)) + assert_(res is _rational_tests.test_add) + + def test_pickle_withstring(self): + astring = (b"cnumpy.core\n_ufunc_reconstruct\np0\n" + b"(S'numpy.core.umath'\np1\nS'cos'\np2\ntp3\nRp4\n.") + assert_(pickle.loads(astring) is np.cos) + + def test_reduceat_shifting_sum(self): + L = 6 + x = np.arange(L) + idx = np.array(list(zip(np.arange(L - 2), np.arange(L - 2) + 2))).ravel() + assert_array_equal(np.add.reduceat(x, idx)[::2], [1, 3, 5, 7]) + + def test_generic_loops(self): + """Test generic loops. + + The loops to be tested are: + + PyUFunc_ff_f_As_dd_d + PyUFunc_ff_f + PyUFunc_dd_d + PyUFunc_gg_g + PyUFunc_FF_F_As_DD_D + PyUFunc_DD_D + PyUFunc_FF_F + PyUFunc_GG_G + PyUFunc_OO_O + PyUFunc_OO_O_method + PyUFunc_f_f_As_d_d + PyUFunc_d_d + PyUFunc_f_f + PyUFunc_g_g + PyUFunc_F_F_As_D_D + PyUFunc_F_F + PyUFunc_D_D + PyUFunc_G_G + PyUFunc_O_O + PyUFunc_O_O_method + PyUFunc_On_Om + + Where: + + f -- float + d -- double + g -- long double + F -- complex float + D -- complex double + G -- complex long double + O -- python object + + It is difficult to assure that each of these loops is entered from the + Python level as the special cased loops are a moving target and the + corresponding types are architecture dependent. We probably need to + define C level testing ufuncs to get at them. For the time being, I've + just looked at the signatures registered in the build directory to find + relevant functions. + + Fixme, currently untested: + + PyUFunc_ff_f_As_dd_d + PyUFunc_FF_F_As_DD_D + PyUFunc_f_f_As_d_d + PyUFunc_F_F_As_D_D + PyUFunc_On_Om + + """ + fone = np.exp + ftwo = lambda x, y: x**y + fone_val = 1 + ftwo_val = 1 + # check unary PyUFunc_f_f. + msg = "PyUFunc_f_f" + x = np.zeros(10, dtype=np.single)[0::2] + assert_almost_equal(fone(x), fone_val, err_msg=msg) + # check unary PyUFunc_d_d. + msg = "PyUFunc_d_d" + x = np.zeros(10, dtype=np.double)[0::2] + assert_almost_equal(fone(x), fone_val, err_msg=msg) + # check unary PyUFunc_g_g. + msg = "PyUFunc_g_g" + x = np.zeros(10, dtype=np.longdouble)[0::2] + assert_almost_equal(fone(x), fone_val, err_msg=msg) + # check unary PyUFunc_F_F. + msg = "PyUFunc_F_F" + x = np.zeros(10, dtype=np.csingle)[0::2] + assert_almost_equal(fone(x), fone_val, err_msg=msg) + # check unary PyUFunc_D_D. + msg = "PyUFunc_D_D" + x = np.zeros(10, dtype=np.cdouble)[0::2] + assert_almost_equal(fone(x), fone_val, err_msg=msg) + # check unary PyUFunc_G_G. + msg = "PyUFunc_G_G" + x = np.zeros(10, dtype=np.clongdouble)[0::2] + assert_almost_equal(fone(x), fone_val, err_msg=msg) + + # check binary PyUFunc_ff_f. + msg = "PyUFunc_ff_f" + x = np.ones(10, dtype=np.single)[0::2] + assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg) + # check binary PyUFunc_dd_d. + msg = "PyUFunc_dd_d" + x = np.ones(10, dtype=np.double)[0::2] + assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg) + # check binary PyUFunc_gg_g. + msg = "PyUFunc_gg_g" + x = np.ones(10, dtype=np.longdouble)[0::2] + assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg) + # check binary PyUFunc_FF_F. + msg = "PyUFunc_FF_F" + x = np.ones(10, dtype=np.csingle)[0::2] + assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg) + # check binary PyUFunc_DD_D. + msg = "PyUFunc_DD_D" + x = np.ones(10, dtype=np.cdouble)[0::2] + assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg) + # check binary PyUFunc_GG_G. + msg = "PyUFunc_GG_G" + x = np.ones(10, dtype=np.clongdouble)[0::2] + assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg) + + # class to use in testing object method loops + class foo(object): + def conjugate(self): + return np.bool_(1) + + def logical_xor(self, obj): + return np.bool_(1) + + # check unary PyUFunc_O_O + msg = "PyUFunc_O_O" + x = np.ones(10, dtype=object)[0::2] + assert_(np.all(np.abs(x) == 1), msg) + # check unary PyUFunc_O_O_method + msg = "PyUFunc_O_O_method" + x = np.zeros(10, dtype=object)[0::2] + for i in range(len(x)): + x[i] = foo() + assert_(np.all(np.conjugate(x) == True), msg) + + # check binary PyUFunc_OO_O + msg = "PyUFunc_OO_O" + x = np.ones(10, dtype=object)[0::2] + assert_(np.all(np.add(x, x) == 2), msg) + # check binary PyUFunc_OO_O_method + msg = "PyUFunc_OO_O_method" + x = np.zeros(10, dtype=object)[0::2] + for i in range(len(x)): + x[i] = foo() + assert_(np.all(np.logical_xor(x, x)), msg) + + # check PyUFunc_On_Om + # fixme -- I don't know how to do this yet + + def test_all_ufunc(self): + """Try to check presence and results of all ufuncs. + + The list of ufuncs comes from generate_umath.py and is as follows: + + ===== ==== ============= =============== ======================== + done args function types notes + ===== ==== ============= =============== ======================== + n 1 conjugate nums + O + n 1 absolute nums + O complex -> real + n 1 negative nums + O + n 1 sign nums + O -> int + n 1 invert bool + ints + O flts raise an error + n 1 degrees real + M cmplx raise an error + n 1 radians real + M cmplx raise an error + n 1 arccos flts + M + n 1 arccosh flts + M + n 1 arcsin flts + M + n 1 arcsinh flts + M + n 1 arctan flts + M + n 1 arctanh flts + M + n 1 cos flts + M + n 1 sin flts + M + n 1 tan flts + M + n 1 cosh flts + M + n 1 sinh flts + M + n 1 tanh flts + M + n 1 exp flts + M + n 1 expm1 flts + M + n 1 log flts + M + n 1 log10 flts + M + n 1 log1p flts + M + n 1 sqrt flts + M real x < 0 raises error + n 1 ceil real + M + n 1 trunc real + M + n 1 floor real + M + n 1 fabs real + M + n 1 rint flts + M + n 1 isnan flts -> bool + n 1 isinf flts -> bool + n 1 isfinite flts -> bool + n 1 signbit real -> bool + n 1 modf real -> (frac, int) + n 1 logical_not bool + nums + M -> bool + n 2 left_shift ints + O flts raise an error + n 2 right_shift ints + O flts raise an error + n 2 add bool + nums + O boolean + is || + n 2 subtract bool + nums + O boolean - is ^ + n 2 multiply bool + nums + O boolean * is & + n 2 divide nums + O + n 2 floor_divide nums + O + n 2 true_divide nums + O bBhH -> f, iIlLqQ -> d + n 2 fmod nums + M + n 2 power nums + O + n 2 greater bool + nums + O -> bool + n 2 greater_equal bool + nums + O -> bool + n 2 less bool + nums + O -> bool + n 2 less_equal bool + nums + O -> bool + n 2 equal bool + nums + O -> bool + n 2 not_equal bool + nums + O -> bool + n 2 logical_and bool + nums + M -> bool + n 2 logical_or bool + nums + M -> bool + n 2 logical_xor bool + nums + M -> bool + n 2 maximum bool + nums + O + n 2 minimum bool + nums + O + n 2 bitwise_and bool + ints + O flts raise an error + n 2 bitwise_or bool + ints + O flts raise an error + n 2 bitwise_xor bool + ints + O flts raise an error + n 2 arctan2 real + M + n 2 remainder ints + real + O + n 2 hypot real + M + ===== ==== ============= =============== ======================== + + Types other than those listed will be accepted, but they are cast to + the smallest compatible type for which the function is defined. The + casting rules are: + + bool -> int8 -> float32 + ints -> double + + """ + pass + + # from include/numpy/ufuncobject.h + size_inferred = 2 + can_ignore = 4 + def test_signature0(self): + # the arguments to test_signature are: nin, nout, core_signature + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 2, 1, "(i),(i)->()") + assert_equal(enabled, 1) + assert_equal(num_dims, (1, 1, 0)) + assert_equal(ixs, (0, 0)) + assert_equal(flags, (self.size_inferred,)) + assert_equal(sizes, (-1,)) + + def test_signature1(self): + # empty core signature; treat as plain ufunc (with trivial core) + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 2, 1, "(),()->()") + assert_equal(enabled, 0) + assert_equal(num_dims, (0, 0, 0)) + assert_equal(ixs, ()) + assert_equal(flags, ()) + assert_equal(sizes, ()) + + def test_signature2(self): + # more complicated names for variables + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 2, 1, "(i1,i2),(J_1)->(_kAB)") + assert_equal(enabled, 1) + assert_equal(num_dims, (2, 1, 1)) + assert_equal(ixs, (0, 1, 2, 3)) + assert_equal(flags, (self.size_inferred,)*4) + assert_equal(sizes, (-1, -1, -1, -1)) + + def test_signature3(self): + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 2, 1, u"(i1, i12), (J_1)->(i12, i2)") + assert_equal(enabled, 1) + assert_equal(num_dims, (2, 1, 2)) + assert_equal(ixs, (0, 1, 2, 1, 3)) + assert_equal(flags, (self.size_inferred,)*4) + assert_equal(sizes, (-1, -1, -1, -1)) + + def test_signature4(self): + # matrix_multiply signature from _umath_tests + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 2, 1, "(n,k),(k,m)->(n,m)") + assert_equal(enabled, 1) + assert_equal(num_dims, (2, 2, 2)) + assert_equal(ixs, (0, 1, 1, 2, 0, 2)) + assert_equal(flags, (self.size_inferred,)*3) + assert_equal(sizes, (-1, -1, -1)) + + def test_signature5(self): + # matmul signature from _umath_tests + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 2, 1, "(n?,k),(k,m?)->(n?,m?)") + assert_equal(enabled, 1) + assert_equal(num_dims, (2, 2, 2)) + assert_equal(ixs, (0, 1, 1, 2, 0, 2)) + assert_equal(flags, (self.size_inferred | self.can_ignore, + self.size_inferred, + self.size_inferred | self.can_ignore)) + assert_equal(sizes, (-1, -1, -1)) + + def test_signature6(self): + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 1, 1, "(3)->()") + assert_equal(enabled, 1) + assert_equal(num_dims, (1, 0)) + assert_equal(ixs, (0,)) + assert_equal(flags, (0,)) + assert_equal(sizes, (3,)) + + def test_signature7(self): + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 3, 1, "(3),(03,3),(n)->(9)") + assert_equal(enabled, 1) + assert_equal(num_dims, (1, 2, 1, 1)) + assert_equal(ixs, (0, 0, 0, 1, 2)) + assert_equal(flags, (0, self.size_inferred, 0)) + assert_equal(sizes, (3, -1, 9)) + + def test_signature8(self): + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 3, 1, "(3?),(3?,3?),(n)->(9)") + assert_equal(enabled, 1) + assert_equal(num_dims, (1, 2, 1, 1)) + assert_equal(ixs, (0, 0, 0, 1, 2)) + assert_equal(flags, (self.can_ignore, self.size_inferred, 0)) + assert_equal(sizes, (3, -1, 9)) + + def test_signature_failure0(self): + # in the following calls, a ValueError should be raised because + # of error in core signature + # FIXME These should be using assert_raises + + # error: extra parenthesis + msg = "core_sig: extra parenthesis" + try: + ret = umt.test_signature(2, 1, "((i)),(i)->()") + assert_equal(ret, None, err_msg=msg) + except ValueError: + pass + + def test_signature_failure1(self): + # error: parenthesis matching + msg = "core_sig: parenthesis matching" + try: + ret = umt.test_signature(2, 1, "(i),)i(->()") + assert_equal(ret, None, err_msg=msg) + except ValueError: + pass + + def test_signature_failure2(self): + # error: incomplete signature. letters outside of parenthesis are ignored + msg = "core_sig: incomplete signature" + try: + ret = umt.test_signature(2, 1, "(i),->()") + assert_equal(ret, None, err_msg=msg) + except ValueError: + pass + + def test_signature_failure3(self): + # error: incomplete signature. 2 output arguments are specified + msg = "core_sig: incomplete signature" + try: + ret = umt.test_signature(2, 2, "(i),(i)->()") + assert_equal(ret, None, err_msg=msg) + except ValueError: + pass + + def test_get_signature(self): + assert_equal(umt.inner1d.signature, "(i),(i)->()") + + def test_forced_sig(self): + a = 0.5*np.arange(3, dtype='f8') + assert_equal(np.add(a, 0.5), [0.5, 1, 1.5]) + assert_equal(np.add(a, 0.5, sig='i', casting='unsafe'), [0, 0, 1]) + assert_equal(np.add(a, 0.5, sig='ii->i', casting='unsafe'), [0, 0, 1]) + assert_equal(np.add(a, 0.5, sig=('i4',), casting='unsafe'), [0, 0, 1]) + assert_equal(np.add(a, 0.5, sig=('i4', 'i4', 'i4'), + casting='unsafe'), [0, 0, 1]) + + b = np.zeros((3,), dtype='f8') + np.add(a, 0.5, out=b) + assert_equal(b, [0.5, 1, 1.5]) + b[:] = 0 + np.add(a, 0.5, sig='i', out=b, casting='unsafe') + assert_equal(b, [0, 0, 1]) + b[:] = 0 + np.add(a, 0.5, sig='ii->i', out=b, casting='unsafe') + assert_equal(b, [0, 0, 1]) + b[:] = 0 + np.add(a, 0.5, sig=('i4',), out=b, casting='unsafe') + assert_equal(b, [0, 0, 1]) + b[:] = 0 + np.add(a, 0.5, sig=('i4', 'i4', 'i4'), out=b, casting='unsafe') + assert_equal(b, [0, 0, 1]) + + def test_true_divide(self): + a = np.array(10) + b = np.array(20) + tgt = np.array(0.5) + + for tc in 'bhilqBHILQefdgFDG': + dt = np.dtype(tc) + aa = a.astype(dt) + bb = b.astype(dt) + + # Check result value and dtype. + for x, y in itertools.product([aa, -aa], [bb, -bb]): + + # Check with no output type specified + if tc in 'FDG': + tgt = complex(x)/complex(y) + else: + tgt = float(x)/float(y) + + res = np.true_divide(x, y) + rtol = max(np.finfo(res).resolution, 1e-15) + assert_allclose(res, tgt, rtol=rtol) + + if tc in 'bhilqBHILQ': + assert_(res.dtype.name == 'float64') + else: + assert_(res.dtype.name == dt.name ) + + # Check with output type specified. This also checks for the + # incorrect casts in issue gh-3484 because the unary '-' does + # not change types, even for unsigned types, Hence casts in the + # ufunc from signed to unsigned and vice versa will lead to + # errors in the values. + for tcout in 'bhilqBHILQ': + dtout = np.dtype(tcout) + assert_raises(TypeError, np.true_divide, x, y, dtype=dtout) + + for tcout in 'efdg': + dtout = np.dtype(tcout) + if tc in 'FDG': + # Casting complex to float is not allowed + assert_raises(TypeError, np.true_divide, x, y, dtype=dtout) + else: + tgt = float(x)/float(y) + rtol = max(np.finfo(dtout).resolution, 1e-15) + atol = max(np.finfo(dtout).tiny, 3e-308) + # Some test values result in invalid for float16. + with np.errstate(invalid='ignore'): + res = np.true_divide(x, y, dtype=dtout) + if not np.isfinite(res) and tcout == 'e': + continue + assert_allclose(res, tgt, rtol=rtol, atol=atol) + assert_(res.dtype.name == dtout.name) + + for tcout in 'FDG': + dtout = np.dtype(tcout) + tgt = complex(x)/complex(y) + rtol = max(np.finfo(dtout).resolution, 1e-15) + atol = max(np.finfo(dtout).tiny, 3e-308) + res = np.true_divide(x, y, dtype=dtout) + if not np.isfinite(res): + continue + assert_allclose(res, tgt, rtol=rtol, atol=atol) + assert_(res.dtype.name == dtout.name) + + # Check booleans + a = np.ones((), dtype=np.bool_) + res = np.true_divide(a, a) + assert_(res == 1.0) + assert_(res.dtype.name == 'float64') + res = np.true_divide(~a, a) + assert_(res == 0.0) + assert_(res.dtype.name == 'float64') + + def test_sum_stability(self): + a = np.ones(500, dtype=np.float32) + assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 4) + + a = np.ones(500, dtype=np.float64) + assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 13) + + def test_sum(self): + for dt in (int, np.float16, np.float32, np.float64, np.longdouble): + for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127, + 128, 1024, 1235): + tgt = dt(v * (v + 1) / 2) + d = np.arange(1, v + 1, dtype=dt) + + # warning if sum overflows, which it does in float16 + overflow = not np.isfinite(tgt) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + assert_almost_equal(np.sum(d), tgt) + assert_equal(len(w), 1 * overflow) + + assert_almost_equal(np.sum(d[::-1]), tgt) + assert_equal(len(w), 2 * overflow) + + d = np.ones(500, dtype=dt) + assert_almost_equal(np.sum(d[::2]), 250.) + assert_almost_equal(np.sum(d[1::2]), 250.) + assert_almost_equal(np.sum(d[::3]), 167.) + assert_almost_equal(np.sum(d[1::3]), 167.) + assert_almost_equal(np.sum(d[::-2]), 250.) + assert_almost_equal(np.sum(d[-1::-2]), 250.) + assert_almost_equal(np.sum(d[::-3]), 167.) + assert_almost_equal(np.sum(d[-1::-3]), 167.) + # sum with first reduction entry != 0 + d = np.ones((1,), dtype=dt) + d += d + assert_almost_equal(d, 2.) + + def test_sum_complex(self): + for dt in (np.complex64, np.complex128, np.clongdouble): + for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127, + 128, 1024, 1235): + tgt = dt(v * (v + 1) / 2) - dt((v * (v + 1) / 2) * 1j) + d = np.empty(v, dtype=dt) + d.real = np.arange(1, v + 1) + d.imag = -np.arange(1, v + 1) + assert_almost_equal(np.sum(d), tgt) + assert_almost_equal(np.sum(d[::-1]), tgt) + + d = np.ones(500, dtype=dt) + 1j + assert_almost_equal(np.sum(d[::2]), 250. + 250j) + assert_almost_equal(np.sum(d[1::2]), 250. + 250j) + assert_almost_equal(np.sum(d[::3]), 167. + 167j) + assert_almost_equal(np.sum(d[1::3]), 167. + 167j) + assert_almost_equal(np.sum(d[::-2]), 250. + 250j) + assert_almost_equal(np.sum(d[-1::-2]), 250. + 250j) + assert_almost_equal(np.sum(d[::-3]), 167. + 167j) + assert_almost_equal(np.sum(d[-1::-3]), 167. + 167j) + # sum with first reduction entry != 0 + d = np.ones((1,), dtype=dt) + 1j + d += d + assert_almost_equal(d, 2. + 2j) + + def test_sum_initial(self): + # Integer, single axis + assert_equal(np.sum([3], initial=2), 5) + + # Floating point + assert_almost_equal(np.sum([0.2], initial=0.1), 0.3) + + # Multiple non-adjacent axes + assert_equal(np.sum(np.ones((2, 3, 5), dtype=np.int64), axis=(0, 2), initial=2), + [12, 12, 12]) + + def test_inner1d(self): + a = np.arange(6).reshape((2, 3)) + assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1)) + a = np.arange(6) + assert_array_equal(umt.inner1d(a, a), np.sum(a*a)) + + def test_broadcast(self): + msg = "broadcast" + a = np.arange(4).reshape((2, 1, 2)) + b = np.arange(4).reshape((1, 2, 2)) + assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg) + msg = "extend & broadcast loop dimensions" + b = np.arange(4).reshape((2, 2)) + assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg) + # Broadcast in core dimensions should fail + a = np.arange(8).reshape((4, 2)) + b = np.arange(4).reshape((4, 1)) + assert_raises(ValueError, umt.inner1d, a, b) + # Extend core dimensions should fail + a = np.arange(8).reshape((4, 2)) + b = np.array(7) + assert_raises(ValueError, umt.inner1d, a, b) + # Broadcast should fail + a = np.arange(2).reshape((2, 1, 1)) + b = np.arange(3).reshape((3, 1, 1)) + assert_raises(ValueError, umt.inner1d, a, b) + + def test_type_cast(self): + msg = "type cast" + a = np.arange(6, dtype='short').reshape((2, 3)) + assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1), + err_msg=msg) + msg = "type cast on one argument" + a = np.arange(6).reshape((2, 3)) + b = a + 0.1 + assert_array_almost_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), + err_msg=msg) + + def test_endian(self): + msg = "big endian" + a = np.arange(6, dtype='>i4').reshape((2, 3)) + assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1), + err_msg=msg) + msg = "little endian" + a = np.arange(6, dtype='()' + inner1d = umt.inner1d + a = np.arange(27.).reshape((3, 3, 3)) + b = np.arange(10., 19.).reshape((3, 1, 3)) + # basic tests on inputs (outputs tested below with matrix_multiply). + c = inner1d(a, b) + assert_array_equal(c, (a * b).sum(-1)) + # default + c = inner1d(a, b, axes=[(-1,), (-1,), ()]) + assert_array_equal(c, (a * b).sum(-1)) + # integers ok for single axis. + c = inner1d(a, b, axes=[-1, -1, ()]) + assert_array_equal(c, (a * b).sum(-1)) + # mix fine + c = inner1d(a, b, axes=[(-1,), -1, ()]) + assert_array_equal(c, (a * b).sum(-1)) + # can omit last axis. + c = inner1d(a, b, axes=[-1, -1]) + assert_array_equal(c, (a * b).sum(-1)) + # can pass in other types of integer (with __index__ protocol) + c = inner1d(a, b, axes=[np.int8(-1), np.array(-1, dtype=np.int32)]) + assert_array_equal(c, (a * b).sum(-1)) + # swap some axes + c = inner1d(a, b, axes=[0, 0]) + assert_array_equal(c, (a * b).sum(0)) + c = inner1d(a, b, axes=[0, 2]) + assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1)) + # Check errors for improperly constructed axes arguments. + # should have list. + assert_raises(TypeError, inner1d, a, b, axes=-1) + # needs enough elements + assert_raises(ValueError, inner1d, a, b, axes=[-1]) + # should pass in indices. + assert_raises(TypeError, inner1d, a, b, axes=[-1.0, -1.0]) + assert_raises(TypeError, inner1d, a, b, axes=[(-1.0,), -1]) + assert_raises(TypeError, inner1d, a, b, axes=[None, 1]) + # cannot pass an index unless there is only one dimension + # (output is wrong in this case) + assert_raises(TypeError, inner1d, a, b, axes=[-1, -1, -1]) + # or pass in generally the wrong number of axes + assert_raises(ValueError, inner1d, a, b, axes=[-1, -1, (-1,)]) + assert_raises(ValueError, inner1d, a, b, axes=[-1, (-2, -1), ()]) + # axes need to have same length. + assert_raises(ValueError, inner1d, a, b, axes=[0, 1]) + + # matrix_multiply signature: '(m,n),(n,p)->(m,p)' + mm = umt.matrix_multiply + a = np.arange(12).reshape((2, 3, 2)) + b = np.arange(8).reshape((2, 2, 2, 1)) + 1 + # Sanity check. + c = mm(a, b) + assert_array_equal(c, np.matmul(a, b)) + # Default axes. + c = mm(a, b, axes=[(-2, -1), (-2, -1), (-2, -1)]) + assert_array_equal(c, np.matmul(a, b)) + # Default with explicit axes. + c = mm(a, b, axes=[(1, 2), (2, 3), (2, 3)]) + assert_array_equal(c, np.matmul(a, b)) + # swap some axes. + c = mm(a, b, axes=[(0, -1), (1, 2), (-2, -1)]) + assert_array_equal(c, np.matmul(a.transpose(1, 0, 2), + b.transpose(0, 3, 1, 2))) + # Default with output array. + c = np.empty((2, 2, 3, 1)) + d = mm(a, b, out=c, axes=[(1, 2), (2, 3), (2, 3)]) + assert_(c is d) + assert_array_equal(c, np.matmul(a, b)) + # Transposed output array + c = np.empty((1, 2, 2, 3)) + d = mm(a, b, out=c, axes=[(-2, -1), (-2, -1), (3, 0)]) + assert_(c is d) + assert_array_equal(c, np.matmul(a, b).transpose(3, 0, 1, 2)) + # Check errors for improperly constructed axes arguments. + # wrong argument + assert_raises(TypeError, mm, a, b, axis=1) + # axes should be list + assert_raises(TypeError, mm, a, b, axes=1) + assert_raises(TypeError, mm, a, b, axes=((-2, -1), (-2, -1), (-2, -1))) + # list needs to have right length + assert_raises(ValueError, mm, a, b, axes=[]) + assert_raises(ValueError, mm, a, b, axes=[(-2, -1)]) + # list should contain tuples for multiple axes + assert_raises(TypeError, mm, a, b, axes=[-1, -1, -1]) + assert_raises(TypeError, mm, a, b, axes=[(-2, -1), (-2, -1), -1]) + assert_raises(TypeError, + mm, a, b, axes=[[-2, -1], [-2, -1], [-2, -1]]) + assert_raises(TypeError, + mm, a, b, axes=[(-2, -1), (-2, -1), [-2, -1]]) + assert_raises(TypeError, mm, a, b, axes=[(-2, -1), (-2, -1), None]) + # tuples should not have duplicated values + assert_raises(ValueError, mm, a, b, axes=[(-2, -1), (-2, -1), (-2, -2)]) + # arrays should have enough axes. + z = np.zeros((2, 2)) + assert_raises(ValueError, mm, z, z[0]) + assert_raises(ValueError, mm, z, z, out=z[:, 0]) + assert_raises(ValueError, mm, z[1], z, axes=[0, 1]) + assert_raises(ValueError, mm, z, z, out=z[0], axes=[0, 1]) + # Regular ufuncs should not accept axes. + assert_raises(TypeError, np.add, 1., 1., axes=[0]) + # should be able to deal with bad unrelated kwargs. + assert_raises(TypeError, mm, z, z, axes=[0, 1], parrot=True) + + def test_axis_argument(self): + # inner1d signature: '(i),(i)->()' + inner1d = umt.inner1d + a = np.arange(27.).reshape((3, 3, 3)) + b = np.arange(10., 19.).reshape((3, 1, 3)) + c = inner1d(a, b) + assert_array_equal(c, (a * b).sum(-1)) + c = inner1d(a, b, axis=-1) + assert_array_equal(c, (a * b).sum(-1)) + out = np.zeros_like(c) + d = inner1d(a, b, axis=-1, out=out) + assert_(d is out) + assert_array_equal(d, c) + c = inner1d(a, b, axis=0) + assert_array_equal(c, (a * b).sum(0)) + # Sanity checks on innerwt and cumsum. + a = np.arange(6).reshape((2, 3)) + b = np.arange(10, 16).reshape((2, 3)) + w = np.arange(20, 26).reshape((2, 3)) + assert_array_equal(umt.innerwt(a, b, w, axis=0), + np.sum(a * b * w, axis=0)) + assert_array_equal(umt.cumsum(a, axis=0), np.cumsum(a, axis=0)) + assert_array_equal(umt.cumsum(a, axis=-1), np.cumsum(a, axis=-1)) + out = np.empty_like(a) + b = umt.cumsum(a, out=out, axis=0) + assert_(out is b) + assert_array_equal(b, np.cumsum(a, axis=0)) + b = umt.cumsum(a, out=out, axis=1) + assert_(out is b) + assert_array_equal(b, np.cumsum(a, axis=-1)) + # Check errors. + # Cannot pass in both axis and axes. + assert_raises(TypeError, inner1d, a, b, axis=0, axes=[0, 0]) + # Not an integer. + assert_raises(TypeError, inner1d, a, b, axis=[0]) + # more than 1 core dimensions. + mm = umt.matrix_multiply + assert_raises(TypeError, mm, a, b, axis=1) + # Output wrong size in axis. + out = np.empty((1, 2, 3), dtype=a.dtype) + assert_raises(ValueError, umt.cumsum, a, out=out, axis=0) + # Regular ufuncs should not accept axis. + assert_raises(TypeError, np.add, 1., 1., axis=0) + + def test_keepdims_argument(self): + # inner1d signature: '(i),(i)->()' + inner1d = umt.inner1d + a = np.arange(27.).reshape((3, 3, 3)) + b = np.arange(10., 19.).reshape((3, 1, 3)) + c = inner1d(a, b) + assert_array_equal(c, (a * b).sum(-1)) + c = inner1d(a, b, keepdims=False) + assert_array_equal(c, (a * b).sum(-1)) + c = inner1d(a, b, keepdims=True) + assert_array_equal(c, (a * b).sum(-1, keepdims=True)) + out = np.zeros_like(c) + d = inner1d(a, b, keepdims=True, out=out) + assert_(d is out) + assert_array_equal(d, c) + # Now combined with axis and axes. + c = inner1d(a, b, axis=-1, keepdims=False) + assert_array_equal(c, (a * b).sum(-1, keepdims=False)) + c = inner1d(a, b, axis=-1, keepdims=True) + assert_array_equal(c, (a * b).sum(-1, keepdims=True)) + c = inner1d(a, b, axis=0, keepdims=False) + assert_array_equal(c, (a * b).sum(0, keepdims=False)) + c = inner1d(a, b, axis=0, keepdims=True) + assert_array_equal(c, (a * b).sum(0, keepdims=True)) + c = inner1d(a, b, axes=[(-1,), (-1,), ()], keepdims=False) + assert_array_equal(c, (a * b).sum(-1)) + c = inner1d(a, b, axes=[(-1,), (-1,), (-1,)], keepdims=True) + assert_array_equal(c, (a * b).sum(-1, keepdims=True)) + c = inner1d(a, b, axes=[0, 0], keepdims=False) + assert_array_equal(c, (a * b).sum(0)) + c = inner1d(a, b, axes=[0, 0, 0], keepdims=True) + assert_array_equal(c, (a * b).sum(0, keepdims=True)) + c = inner1d(a, b, axes=[0, 2], keepdims=False) + assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1)) + c = inner1d(a, b, axes=[0, 2], keepdims=True) + assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1, + keepdims=True)) + c = inner1d(a, b, axes=[0, 2, 2], keepdims=True) + assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1, + keepdims=True)) + c = inner1d(a, b, axes=[0, 2, 0], keepdims=True) + assert_array_equal(c, (a * b.transpose(2, 0, 1)).sum(0, keepdims=True)) + # Hardly useful, but should work. + c = inner1d(a, b, axes=[0, 2, 1], keepdims=True) + assert_array_equal(c, (a.transpose(1, 0, 2) * b.transpose(0, 2, 1)) + .sum(1, keepdims=True)) + # Check with two core dimensions. + a = np.eye(3) * np.arange(4.)[:, np.newaxis, np.newaxis] + expected = uml.det(a) + c = uml.det(a, keepdims=False) + assert_array_equal(c, expected) + c = uml.det(a, keepdims=True) + assert_array_equal(c, expected[:, np.newaxis, np.newaxis]) + a = np.eye(3) * np.arange(4.)[:, np.newaxis, np.newaxis] + expected_s, expected_l = uml.slogdet(a) + cs, cl = uml.slogdet(a, keepdims=False) + assert_array_equal(cs, expected_s) + assert_array_equal(cl, expected_l) + cs, cl = uml.slogdet(a, keepdims=True) + assert_array_equal(cs, expected_s[:, np.newaxis, np.newaxis]) + assert_array_equal(cl, expected_l[:, np.newaxis, np.newaxis]) + # Sanity check on innerwt. + a = np.arange(6).reshape((2, 3)) + b = np.arange(10, 16).reshape((2, 3)) + w = np.arange(20, 26).reshape((2, 3)) + assert_array_equal(umt.innerwt(a, b, w, keepdims=True), + np.sum(a * b * w, axis=-1, keepdims=True)) + assert_array_equal(umt.innerwt(a, b, w, axis=0, keepdims=True), + np.sum(a * b * w, axis=0, keepdims=True)) + # Check errors. + # Not a boolean + assert_raises(TypeError, inner1d, a, b, keepdims='true') + # More than 1 core dimension, and core output dimensions. + mm = umt.matrix_multiply + assert_raises(TypeError, mm, a, b, keepdims=True) + assert_raises(TypeError, mm, a, b, keepdims=False) + # Regular ufuncs should not accept keepdims. + assert_raises(TypeError, np.add, 1., 1., keepdims=False) + + def test_innerwt(self): + a = np.arange(6).reshape((2, 3)) + b = np.arange(10, 16).reshape((2, 3)) + w = np.arange(20, 26).reshape((2, 3)) + assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1)) + a = np.arange(100, 124).reshape((2, 3, 4)) + b = np.arange(200, 224).reshape((2, 3, 4)) + w = np.arange(300, 324).reshape((2, 3, 4)) + assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1)) + + def test_innerwt_empty(self): + """Test generalized ufunc with zero-sized operands""" + a = np.array([], dtype='f8') + b = np.array([], dtype='f8') + w = np.array([], dtype='f8') + assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1)) + + def test_cross1d(self): + """Test with fixed-sized signature.""" + a = np.eye(3) + assert_array_equal(umt.cross1d(a, a), np.zeros((3, 3))) + out = np.zeros((3, 3)) + result = umt.cross1d(a[0], a, out) + assert_(result is out) + assert_array_equal(result, np.vstack((np.zeros(3), a[2], -a[1]))) + assert_raises(ValueError, umt.cross1d, np.eye(4), np.eye(4)) + assert_raises(ValueError, umt.cross1d, a, np.arange(4.)) + assert_raises(ValueError, umt.cross1d, a, np.arange(3.), np.zeros((3, 4))) + + def test_can_ignore_signature(self): + # Comparing the effects of ? in signature: + # matrix_multiply: (m,n),(n,p)->(m,p) # all must be there. + # matmul: (m?,n),(n,p?)->(m?,p?) # allow missing m, p. + mat = np.arange(12).reshape((2, 3, 2)) + single_vec = np.arange(2) + col_vec = single_vec[:, np.newaxis] + col_vec_array = np.arange(8).reshape((2, 2, 2, 1)) + 1 + # matrix @ single column vector with proper dimension + mm_col_vec = umt.matrix_multiply(mat, col_vec) + # matmul does the same thing + matmul_col_vec = umt.matmul(mat, col_vec) + assert_array_equal(matmul_col_vec, mm_col_vec) + # matrix @ vector without dimension making it a column vector. + # matrix multiply fails -> missing core dim. + assert_raises(ValueError, umt.matrix_multiply, mat, single_vec) + # matmul mimicker passes, and returns a vector. + matmul_col = umt.matmul(mat, single_vec) + assert_array_equal(matmul_col, mm_col_vec.squeeze()) + # Now with a column array: same as for column vector, + # broadcasting sensibly. + mm_col_vec = umt.matrix_multiply(mat, col_vec_array) + matmul_col_vec = umt.matmul(mat, col_vec_array) + assert_array_equal(matmul_col_vec, mm_col_vec) + # As above, but for row vector + single_vec = np.arange(3) + row_vec = single_vec[np.newaxis, :] + row_vec_array = np.arange(24).reshape((4, 2, 1, 1, 3)) + 1 + # row vector @ matrix + mm_row_vec = umt.matrix_multiply(row_vec, mat) + matmul_row_vec = umt.matmul(row_vec, mat) + assert_array_equal(matmul_row_vec, mm_row_vec) + # single row vector @ matrix + assert_raises(ValueError, umt.matrix_multiply, single_vec, mat) + matmul_row = umt.matmul(single_vec, mat) + assert_array_equal(matmul_row, mm_row_vec.squeeze()) + # row vector array @ matrix + mm_row_vec = umt.matrix_multiply(row_vec_array, mat) + matmul_row_vec = umt.matmul(row_vec_array, mat) + assert_array_equal(matmul_row_vec, mm_row_vec) + # Now for vector combinations + # row vector @ column vector + col_vec = row_vec.T + col_vec_array = row_vec_array.swapaxes(-2, -1) + mm_row_col_vec = umt.matrix_multiply(row_vec, col_vec) + matmul_row_col_vec = umt.matmul(row_vec, col_vec) + assert_array_equal(matmul_row_col_vec, mm_row_col_vec) + # single row vector @ single col vector + assert_raises(ValueError, umt.matrix_multiply, single_vec, single_vec) + matmul_row_col = umt.matmul(single_vec, single_vec) + assert_array_equal(matmul_row_col, mm_row_col_vec.squeeze()) + # row vector array @ matrix + mm_row_col_array = umt.matrix_multiply(row_vec_array, col_vec_array) + matmul_row_col_array = umt.matmul(row_vec_array, col_vec_array) + assert_array_equal(matmul_row_col_array, mm_row_col_array) + # Finally, check that things are *not* squeezed if one gives an + # output. + out = np.zeros_like(mm_row_col_array) + out = umt.matrix_multiply(row_vec_array, col_vec_array, out=out) + assert_array_equal(out, mm_row_col_array) + out[:] = 0 + out = umt.matmul(row_vec_array, col_vec_array, out=out) + assert_array_equal(out, mm_row_col_array) + # And check one cannot put missing dimensions back. + out = np.zeros_like(mm_row_col_vec) + assert_raises(ValueError, umt.matrix_multiply, single_vec, single_vec, + out) + # But fine for matmul, since it is just a broadcast. + out = umt.matmul(single_vec, single_vec, out) + assert_array_equal(out, mm_row_col_vec.squeeze()) + + def test_matrix_multiply(self): + self.compare_matrix_multiply_results(np.long) + self.compare_matrix_multiply_results(np.double) + + def test_matrix_multiply_umath_empty(self): + res = umt.matrix_multiply(np.ones((0, 10)), np.ones((10, 0))) + assert_array_equal(res, np.zeros((0, 0))) + res = umt.matrix_multiply(np.ones((10, 0)), np.ones((0, 10))) + assert_array_equal(res, np.zeros((10, 10))) + + def compare_matrix_multiply_results(self, tp): + d1 = np.array(np.random.rand(2, 3, 4), dtype=tp) + d2 = np.array(np.random.rand(2, 3, 4), dtype=tp) + msg = "matrix multiply on type %s" % d1.dtype.name + + def permute_n(n): + if n == 1: + return ([0],) + ret = () + base = permute_n(n-1) + for perm in base: + for i in range(n): + new = perm + [n-1] + new[n-1] = new[i] + new[i] = n-1 + ret += (new,) + return ret + + def slice_n(n): + if n == 0: + return ((),) + ret = () + base = slice_n(n-1) + for sl in base: + ret += (sl+(slice(None),),) + ret += (sl+(slice(0, 1),),) + return ret + + def broadcastable(s1, s2): + return s1 == s2 or s1 == 1 or s2 == 1 + + permute_3 = permute_n(3) + slice_3 = slice_n(3) + ((slice(None, None, -1),)*3,) + + ref = True + for p1 in permute_3: + for p2 in permute_3: + for s1 in slice_3: + for s2 in slice_3: + a1 = d1.transpose(p1)[s1] + a2 = d2.transpose(p2)[s2] + ref = ref and a1.base is not None + ref = ref and a2.base is not None + if (a1.shape[-1] == a2.shape[-2] and + broadcastable(a1.shape[0], a2.shape[0])): + assert_array_almost_equal( + umt.matrix_multiply(a1, a2), + np.sum(a2[..., np.newaxis].swapaxes(-3, -1) * + a1[..., np.newaxis,:], axis=-1), + err_msg=msg + ' %s %s' % (str(a1.shape), + str(a2.shape))) + + assert_equal(ref, True, err_msg="reference check") + + def test_euclidean_pdist(self): + a = np.arange(12, dtype=float).reshape(4, 3) + out = np.empty((a.shape[0] * (a.shape[0] - 1) // 2,), dtype=a.dtype) + umt.euclidean_pdist(a, out) + b = np.sqrt(np.sum((a[:, None] - a)**2, axis=-1)) + b = b[~np.tri(a.shape[0], dtype=bool)] + assert_almost_equal(out, b) + # An output array is required to determine p with signature (n,d)->(p) + assert_raises(ValueError, umt.euclidean_pdist, a) + + def test_cumsum(self): + a = np.arange(10) + result = umt.cumsum(a) + assert_array_equal(result, a.cumsum()) + + def test_object_logical(self): + a = np.array([3, None, True, False, "test", ""], dtype=object) + assert_equal(np.logical_or(a, None), + np.array([x or None for x in a], dtype=object)) + assert_equal(np.logical_or(a, True), + np.array([x or True for x in a], dtype=object)) + assert_equal(np.logical_or(a, 12), + np.array([x or 12 for x in a], dtype=object)) + assert_equal(np.logical_or(a, "blah"), + np.array([x or "blah" for x in a], dtype=object)) + + assert_equal(np.logical_and(a, None), + np.array([x and None for x in a], dtype=object)) + assert_equal(np.logical_and(a, True), + np.array([x and True for x in a], dtype=object)) + assert_equal(np.logical_and(a, 12), + np.array([x and 12 for x in a], dtype=object)) + assert_equal(np.logical_and(a, "blah"), + np.array([x and "blah" for x in a], dtype=object)) + + assert_equal(np.logical_not(a), + np.array([not x for x in a], dtype=object)) + + assert_equal(np.logical_or.reduce(a), 3) + assert_equal(np.logical_and.reduce(a), None) + + def test_object_comparison(self): + class HasComparisons(object): + def __eq__(self, other): + return '==' + + arr0d = np.array(HasComparisons()) + assert_equal(arr0d == arr0d, True) + assert_equal(np.equal(arr0d, arr0d), True) # normal behavior is a cast + assert_equal(np.equal(arr0d, arr0d, dtype=object), '==') + + arr1d = np.array([HasComparisons()]) + assert_equal(arr1d == arr1d, np.array([True])) + assert_equal(np.equal(arr1d, arr1d), np.array([True])) # normal behavior is a cast + assert_equal(np.equal(arr1d, arr1d, dtype=object), np.array(['=='])) + + def test_object_array_reduction(self): + # Reductions on object arrays + a = np.array(['a', 'b', 'c'], dtype=object) + assert_equal(np.sum(a), 'abc') + assert_equal(np.max(a), 'c') + assert_equal(np.min(a), 'a') + a = np.array([True, False, True], dtype=object) + assert_equal(np.sum(a), 2) + assert_equal(np.prod(a), 0) + assert_equal(np.any(a), True) + assert_equal(np.all(a), False) + assert_equal(np.max(a), True) + assert_equal(np.min(a), False) + assert_equal(np.array([[1]], dtype=object).sum(), 1) + assert_equal(np.array([[[1, 2]]], dtype=object).sum((0, 1)), [1, 2]) + assert_equal(np.array([1], dtype=object).sum(initial=1), 2) + + def test_object_array_accumulate_inplace(self): + # Checks that in-place accumulates work, see also gh-7402 + arr = np.ones(4, dtype=object) + arr[:] = [[1] for i in range(4)] + # Twice reproduced also for tuples: + np.add.accumulate(arr, out=arr) + np.add.accumulate(arr, out=arr) + assert_array_equal(arr, np.array([[1]*i for i in [1, 3, 6, 10]])) + + # And the same if the axis argument is used + arr = np.ones((2, 4), dtype=object) + arr[0, :] = [[2] for i in range(4)] + np.add.accumulate(arr, out=arr, axis=-1) + np.add.accumulate(arr, out=arr, axis=-1) + assert_array_equal(arr[0, :], np.array([[2]*i for i in [1, 3, 6, 10]])) + + def test_object_array_reduceat_inplace(self): + # Checks that in-place reduceats work, see also gh-7465 + arr = np.empty(4, dtype=object) + arr[:] = [[1] for i in range(4)] + out = np.empty(4, dtype=object) + out[:] = [[1] for i in range(4)] + np.add.reduceat(arr, np.arange(4), out=arr) + np.add.reduceat(arr, np.arange(4), out=arr) + assert_array_equal(arr, out) + + # And the same if the axis argument is used + arr = np.ones((2, 4), dtype=object) + arr[0, :] = [[2] for i in range(4)] + out = np.ones((2, 4), dtype=object) + out[0, :] = [[2] for i in range(4)] + np.add.reduceat(arr, np.arange(4), out=arr, axis=-1) + np.add.reduceat(arr, np.arange(4), out=arr, axis=-1) + assert_array_equal(arr, out) + + def test_zerosize_reduction(self): + # Test with default dtype and object dtype + for a in [[], np.array([], dtype=object)]: + assert_equal(np.sum(a), 0) + assert_equal(np.prod(a), 1) + assert_equal(np.any(a), False) + assert_equal(np.all(a), True) + assert_raises(ValueError, np.max, a) + assert_raises(ValueError, np.min, a) + + def test_axis_out_of_bounds(self): + a = np.array([False, False]) + assert_raises(np.AxisError, a.all, axis=1) + a = np.array([False, False]) + assert_raises(np.AxisError, a.all, axis=-2) + + a = np.array([False, False]) + assert_raises(np.AxisError, a.any, axis=1) + a = np.array([False, False]) + assert_raises(np.AxisError, a.any, axis=-2) + + def test_scalar_reduction(self): + # The functions 'sum', 'prod', etc allow specifying axis=0 + # even for scalars + assert_equal(np.sum(3, axis=0), 3) + assert_equal(np.prod(3.5, axis=0), 3.5) + assert_equal(np.any(True, axis=0), True) + assert_equal(np.all(False, axis=0), False) + assert_equal(np.max(3, axis=0), 3) + assert_equal(np.min(2.5, axis=0), 2.5) + + # Check scalar behaviour for ufuncs without an identity + assert_equal(np.power.reduce(3), 3) + + # Make sure that scalars are coming out from this operation + assert_(type(np.prod(np.float32(2.5), axis=0)) is np.float32) + assert_(type(np.sum(np.float32(2.5), axis=0)) is np.float32) + assert_(type(np.max(np.float32(2.5), axis=0)) is np.float32) + assert_(type(np.min(np.float32(2.5), axis=0)) is np.float32) + + # check if scalars/0-d arrays get cast + assert_(type(np.any(0, axis=0)) is np.bool_) + + # assert that 0-d arrays get wrapped + class MyArray(np.ndarray): + pass + a = np.array(1).view(MyArray) + assert_(type(np.any(a)) is MyArray) + + def test_casting_out_param(self): + # Test that it's possible to do casts on output + a = np.ones((200, 100), np.int64) + b = np.ones((200, 100), np.int64) + c = np.ones((200, 100), np.float64) + np.add(a, b, out=c) + assert_equal(c, 2) + + a = np.zeros(65536) + b = np.zeros(65536, dtype=np.float32) + np.subtract(a, 0, out=b) + assert_equal(b, 0) + + def test_where_param(self): + # Test that the where= ufunc parameter works with regular arrays + a = np.arange(7) + b = np.ones(7) + c = np.zeros(7) + np.add(a, b, out=c, where=(a % 2 == 1)) + assert_equal(c, [0, 2, 0, 4, 0, 6, 0]) + + a = np.arange(4).reshape(2, 2) + 2 + np.power(a, [2, 3], out=a, where=[[0, 1], [1, 0]]) + assert_equal(a, [[2, 27], [16, 5]]) + # Broadcasting the where= parameter + np.subtract(a, 2, out=a, where=[True, False]) + assert_equal(a, [[0, 27], [14, 5]]) + + def test_where_param_buffer_output(self): + # This test is temporarily skipped because it requires + # adding masking features to the nditer to work properly + + # With casting on output + a = np.ones(10, np.int64) + b = np.ones(10, np.int64) + c = 1.5 * np.ones(10, np.float64) + np.add(a, b, out=c, where=[1, 0, 0, 1, 0, 0, 1, 1, 1, 0]) + assert_equal(c, [2, 1.5, 1.5, 2, 1.5, 1.5, 2, 2, 2, 1.5]) + + def test_where_param_alloc(self): + # With casting and allocated output + a = np.array([1], dtype=np.int64) + m = np.array([True], dtype=bool) + assert_equal(np.sqrt(a, where=m), [1]) + + # No casting and allocated output + a = np.array([1], dtype=np.float64) + m = np.array([True], dtype=bool) + assert_equal(np.sqrt(a, where=m), [1]) + + def check_identityless_reduction(self, a): + # np.minimum.reduce is an identityless reduction + + # Verify that it sees the zero at various positions + a[...] = 1 + a[1, 0, 0] = 0 + assert_equal(np.minimum.reduce(a, axis=None), 0) + assert_equal(np.minimum.reduce(a, axis=(0, 1)), [0, 1, 1, 1]) + assert_equal(np.minimum.reduce(a, axis=(0, 2)), [0, 1, 1]) + assert_equal(np.minimum.reduce(a, axis=(1, 2)), [1, 0]) + assert_equal(np.minimum.reduce(a, axis=0), + [[0, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]) + assert_equal(np.minimum.reduce(a, axis=1), + [[1, 1, 1, 1], [0, 1, 1, 1]]) + assert_equal(np.minimum.reduce(a, axis=2), + [[1, 1, 1], [0, 1, 1]]) + assert_equal(np.minimum.reduce(a, axis=()), a) + + a[...] = 1 + a[0, 1, 0] = 0 + assert_equal(np.minimum.reduce(a, axis=None), 0) + assert_equal(np.minimum.reduce(a, axis=(0, 1)), [0, 1, 1, 1]) + assert_equal(np.minimum.reduce(a, axis=(0, 2)), [1, 0, 1]) + assert_equal(np.minimum.reduce(a, axis=(1, 2)), [0, 1]) + assert_equal(np.minimum.reduce(a, axis=0), + [[1, 1, 1, 1], [0, 1, 1, 1], [1, 1, 1, 1]]) + assert_equal(np.minimum.reduce(a, axis=1), + [[0, 1, 1, 1], [1, 1, 1, 1]]) + assert_equal(np.minimum.reduce(a, axis=2), + [[1, 0, 1], [1, 1, 1]]) + assert_equal(np.minimum.reduce(a, axis=()), a) + + a[...] = 1 + a[0, 0, 1] = 0 + assert_equal(np.minimum.reduce(a, axis=None), 0) + assert_equal(np.minimum.reduce(a, axis=(0, 1)), [1, 0, 1, 1]) + assert_equal(np.minimum.reduce(a, axis=(0, 2)), [0, 1, 1]) + assert_equal(np.minimum.reduce(a, axis=(1, 2)), [0, 1]) + assert_equal(np.minimum.reduce(a, axis=0), + [[1, 0, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]) + assert_equal(np.minimum.reduce(a, axis=1), + [[1, 0, 1, 1], [1, 1, 1, 1]]) + assert_equal(np.minimum.reduce(a, axis=2), + [[0, 1, 1], [1, 1, 1]]) + assert_equal(np.minimum.reduce(a, axis=()), a) + + def test_identityless_reduction_corder(self): + a = np.empty((2, 3, 4), order='C') + self.check_identityless_reduction(a) + + def test_identityless_reduction_forder(self): + a = np.empty((2, 3, 4), order='F') + self.check_identityless_reduction(a) + + def test_identityless_reduction_otherorder(self): + a = np.empty((2, 4, 3), order='C').swapaxes(1, 2) + self.check_identityless_reduction(a) + + def test_identityless_reduction_noncontig(self): + a = np.empty((3, 5, 4), order='C').swapaxes(1, 2) + a = a[1:, 1:, 1:] + self.check_identityless_reduction(a) + + def test_identityless_reduction_noncontig_unaligned(self): + a = np.empty((3*4*5*8 + 1,), dtype='i1') + a = a[1:].view(dtype='f8') + a.shape = (3, 4, 5) + a = a[1:, 1:, 1:] + self.check_identityless_reduction(a) + + def test_initial_reduction(self): + # np.minimum.reduce is an identityless reduction + + # For cases like np.maximum(np.abs(...), initial=0) + # More generally, a supremum over non-negative numbers. + assert_equal(np.maximum.reduce([], initial=0), 0) + + # For cases like reduction of an empty array over the reals. + assert_equal(np.minimum.reduce([], initial=np.inf), np.inf) + assert_equal(np.maximum.reduce([], initial=-np.inf), -np.inf) + + # Random tests + assert_equal(np.minimum.reduce([5], initial=4), 4) + assert_equal(np.maximum.reduce([4], initial=5), 5) + assert_equal(np.maximum.reduce([5], initial=4), 5) + assert_equal(np.minimum.reduce([4], initial=5), 4) + + # Check initial=None raises ValueError for both types of ufunc reductions + assert_raises(ValueError, np.minimum.reduce, [], initial=None) + assert_raises(ValueError, np.add.reduce, [], initial=None) + + # Check that np._NoValue gives default behavior. + assert_equal(np.add.reduce([], initial=np._NoValue), 0) + + # Check that initial kwarg behaves as intended for dtype=object + a = np.array([10], dtype=object) + res = np.add.reduce(a, initial=5) + assert_equal(res, 15) + + def test_identityless_reduction_nonreorderable(self): + a = np.array([[8.0, 2.0, 2.0], [1.0, 0.5, 0.25]]) + + res = np.divide.reduce(a, axis=0) + assert_equal(res, [8.0, 4.0, 8.0]) + + res = np.divide.reduce(a, axis=1) + assert_equal(res, [2.0, 8.0]) + + res = np.divide.reduce(a, axis=()) + assert_equal(res, a) + + assert_raises(ValueError, np.divide.reduce, a, axis=(0, 1)) + + def test_reduce_zero_axis(self): + # If we have a n x m array and do a reduction with axis=1, then we are + # doing n reductions, and each reduction takes an m-element array. For + # a reduction operation without an identity, then: + # n > 0, m > 0: fine + # n = 0, m > 0: fine, doing 0 reductions of m-element arrays + # n > 0, m = 0: can't reduce a 0-element array, ValueError + # n = 0, m = 0: can't reduce a 0-element array, ValueError (for + # consistency with the above case) + # This test doesn't actually look at return values, it just checks to + # make sure that error we get an error in exactly those cases where we + # expect one, and assumes the calculations themselves are done + # correctly. + + def ok(f, *args, **kwargs): + f(*args, **kwargs) + + def err(f, *args, **kwargs): + assert_raises(ValueError, f, *args, **kwargs) + + def t(expect, func, n, m): + expect(func, np.zeros((n, m)), axis=1) + expect(func, np.zeros((m, n)), axis=0) + expect(func, np.zeros((n // 2, n // 2, m)), axis=2) + expect(func, np.zeros((n // 2, m, n // 2)), axis=1) + expect(func, np.zeros((n, m // 2, m // 2)), axis=(1, 2)) + expect(func, np.zeros((m // 2, n, m // 2)), axis=(0, 2)) + expect(func, np.zeros((m // 3, m // 3, m // 3, + n // 2, n // 2)), + axis=(0, 1, 2)) + # Check what happens if the inner (resp. outer) dimensions are a + # mix of zero and non-zero: + expect(func, np.zeros((10, m, n)), axis=(0, 1)) + expect(func, np.zeros((10, n, m)), axis=(0, 2)) + expect(func, np.zeros((m, 10, n)), axis=0) + expect(func, np.zeros((10, m, n)), axis=1) + expect(func, np.zeros((10, n, m)), axis=2) + + # np.maximum is just an arbitrary ufunc with no reduction identity + assert_equal(np.maximum.identity, None) + t(ok, np.maximum.reduce, 30, 30) + t(ok, np.maximum.reduce, 0, 30) + t(err, np.maximum.reduce, 30, 0) + t(err, np.maximum.reduce, 0, 0) + err(np.maximum.reduce, []) + np.maximum.reduce(np.zeros((0, 0)), axis=()) + + # all of the combinations are fine for a reduction that has an + # identity + t(ok, np.add.reduce, 30, 30) + t(ok, np.add.reduce, 0, 30) + t(ok, np.add.reduce, 30, 0) + t(ok, np.add.reduce, 0, 0) + np.add.reduce([]) + np.add.reduce(np.zeros((0, 0)), axis=()) + + # OTOH, accumulate always makes sense for any combination of n and m, + # because it maps an m-element array to an m-element array. These + # tests are simpler because accumulate doesn't accept multiple axes. + for uf in (np.maximum, np.add): + uf.accumulate(np.zeros((30, 0)), axis=0) + uf.accumulate(np.zeros((0, 30)), axis=0) + uf.accumulate(np.zeros((30, 30)), axis=0) + uf.accumulate(np.zeros((0, 0)), axis=0) + + def test_safe_casting(self): + # In old versions of numpy, in-place operations used the 'unsafe' + # casting rules. In versions >= 1.10, 'same_kind' is the + # default and an exception is raised instead of a warning. + # when 'same_kind' is not satisfied. + a = np.array([1, 2, 3], dtype=int) + # Non-in-place addition is fine + assert_array_equal(assert_no_warnings(np.add, a, 1.1), + [2.1, 3.1, 4.1]) + assert_raises(TypeError, np.add, a, 1.1, out=a) + + def add_inplace(a, b): + a += b + + assert_raises(TypeError, add_inplace, a, 1.1) + # Make sure that explicitly overriding the exception is allowed: + assert_no_warnings(np.add, a, 1.1, out=a, casting="unsafe") + assert_array_equal(a, [2, 3, 4]) + + def test_ufunc_custom_out(self): + # Test ufunc with built in input types and custom output type + + a = np.array([0, 1, 2], dtype='i8') + b = np.array([0, 1, 2], dtype='i8') + c = np.empty(3, dtype=_rational_tests.rational) + + # Output must be specified so numpy knows what + # ufunc signature to look for + result = _rational_tests.test_add(a, b, c) + target = np.array([0, 2, 4], dtype=_rational_tests.rational) + assert_equal(result, target) + + # no output type should raise TypeError + with assert_raises(TypeError): + _rational_tests.test_add(a, b) + + def test_operand_flags(self): + a = np.arange(16, dtype='l').reshape(4, 4) + b = np.arange(9, dtype='l').reshape(3, 3) + opflag_tests.inplace_add(a[:-1, :-1], b) + assert_equal(a, np.array([[0, 2, 4, 3], [7, 9, 11, 7], + [14, 16, 18, 11], [12, 13, 14, 15]], dtype='l')) + + a = np.array(0) + opflag_tests.inplace_add(a, 3) + assert_equal(a, 3) + opflag_tests.inplace_add(a, [3, 4]) + assert_equal(a, 10) + + def test_struct_ufunc(self): + import numpy.core._struct_ufunc_tests as struct_ufunc + + a = np.array([(1, 2, 3)], dtype='u8,u8,u8') + b = np.array([(1, 2, 3)], dtype='u8,u8,u8') + + result = struct_ufunc.add_triplet(a, b) + assert_equal(result, np.array([(2, 4, 6)], dtype='u8,u8,u8')) + + def test_custom_ufunc(self): + a = np.array( + [_rational_tests.rational(1, 2), + _rational_tests.rational(1, 3), + _rational_tests.rational(1, 4)], + dtype=_rational_tests.rational) + b = np.array( + [_rational_tests.rational(1, 2), + _rational_tests.rational(1, 3), + _rational_tests.rational(1, 4)], + dtype=_rational_tests.rational) + + result = _rational_tests.test_add_rationals(a, b) + expected = np.array( + [_rational_tests.rational(1), + _rational_tests.rational(2, 3), + _rational_tests.rational(1, 2)], + dtype=_rational_tests.rational) + assert_equal(result, expected) + + def test_custom_ufunc_forced_sig(self): + # gh-9351 - looking for a non-first userloop would previously hang + with assert_raises(TypeError): + np.multiply(_rational_tests.rational(1), 1, + signature=(_rational_tests.rational, int, None)) + + def test_custom_array_like(self): + + class MyThing(object): + __array_priority__ = 1000 + + rmul_count = 0 + getitem_count = 0 + + def __init__(self, shape): + self.shape = shape + + def __len__(self): + return self.shape[0] + + def __getitem__(self, i): + MyThing.getitem_count += 1 + if not isinstance(i, tuple): + i = (i,) + if len(i) > self.ndim: + raise IndexError("boo") + + return MyThing(self.shape[len(i):]) + + def __rmul__(self, other): + MyThing.rmul_count += 1 + return self + + np.float64(5)*MyThing((3, 3)) + assert_(MyThing.rmul_count == 1, MyThing.rmul_count) + assert_(MyThing.getitem_count <= 2, MyThing.getitem_count) + + def test_inplace_fancy_indexing(self): + + a = np.arange(10) + np.add.at(a, [2, 5, 2], 1) + assert_equal(a, [0, 1, 4, 3, 4, 6, 6, 7, 8, 9]) + + a = np.arange(10) + b = np.array([100, 100, 100]) + np.add.at(a, [2, 5, 2], b) + assert_equal(a, [0, 1, 202, 3, 4, 105, 6, 7, 8, 9]) + + a = np.arange(9).reshape(3, 3) + b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]]) + np.add.at(a, (slice(None), [1, 2, 1]), b) + assert_equal(a, [[0, 201, 102], [3, 404, 205], [6, 607, 308]]) + + a = np.arange(27).reshape(3, 3, 3) + b = np.array([100, 200, 300]) + np.add.at(a, (slice(None), slice(None), [1, 2, 1]), b) + assert_equal(a, + [[[0, 401, 202], + [3, 404, 205], + [6, 407, 208]], + + [[9, 410, 211], + [12, 413, 214], + [15, 416, 217]], + + [[18, 419, 220], + [21, 422, 223], + [24, 425, 226]]]) + + a = np.arange(9).reshape(3, 3) + b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]]) + np.add.at(a, ([1, 2, 1], slice(None)), b) + assert_equal(a, [[0, 1, 2], [403, 404, 405], [206, 207, 208]]) + + a = np.arange(27).reshape(3, 3, 3) + b = np.array([100, 200, 300]) + np.add.at(a, (slice(None), [1, 2, 1], slice(None)), b) + assert_equal(a, + [[[0, 1, 2], + [203, 404, 605], + [106, 207, 308]], + + [[9, 10, 11], + [212, 413, 614], + [115, 216, 317]], + + [[18, 19, 20], + [221, 422, 623], + [124, 225, 326]]]) + + a = np.arange(9).reshape(3, 3) + b = np.array([100, 200, 300]) + np.add.at(a, (0, [1, 2, 1]), b) + assert_equal(a, [[0, 401, 202], [3, 4, 5], [6, 7, 8]]) + + a = np.arange(27).reshape(3, 3, 3) + b = np.array([100, 200, 300]) + np.add.at(a, ([1, 2, 1], 0, slice(None)), b) + assert_equal(a, + [[[0, 1, 2], + [3, 4, 5], + [6, 7, 8]], + + [[209, 410, 611], + [12, 13, 14], + [15, 16, 17]], + + [[118, 219, 320], + [21, 22, 23], + [24, 25, 26]]]) + + a = np.arange(27).reshape(3, 3, 3) + b = np.array([100, 200, 300]) + np.add.at(a, (slice(None), slice(None), slice(None)), b) + assert_equal(a, + [[[100, 201, 302], + [103, 204, 305], + [106, 207, 308]], + + [[109, 210, 311], + [112, 213, 314], + [115, 216, 317]], + + [[118, 219, 320], + [121, 222, 323], + [124, 225, 326]]]) + + a = np.arange(10) + np.negative.at(a, [2, 5, 2]) + assert_equal(a, [0, 1, 2, 3, 4, -5, 6, 7, 8, 9]) + + # Test 0-dim array + a = np.array(0) + np.add.at(a, (), 1) + assert_equal(a, 1) + + assert_raises(IndexError, np.add.at, a, 0, 1) + assert_raises(IndexError, np.add.at, a, [], 1) + + # Test mixed dtypes + a = np.arange(10) + np.power.at(a, [1, 2, 3, 2], 3.5) + assert_equal(a, np.array([0, 1, 4414, 46, 4, 5, 6, 7, 8, 9])) + + # Test boolean indexing and boolean ufuncs + a = np.arange(10) + index = a % 2 == 0 + np.equal.at(a, index, [0, 2, 4, 6, 8]) + assert_equal(a, [1, 1, 1, 3, 1, 5, 1, 7, 1, 9]) + + # Test unary operator + a = np.arange(10, dtype='u4') + np.invert.at(a, [2, 5, 2]) + assert_equal(a, [0, 1, 2, 3, 4, 5 ^ 0xffffffff, 6, 7, 8, 9]) + + # Test empty subspace + orig = np.arange(4) + a = orig[:, None][:, 0:0] + np.add.at(a, [0, 1], 3) + assert_array_equal(orig, np.arange(4)) + + # Test with swapped byte order + index = np.array([1, 2, 1], np.dtype('i').newbyteorder()) + values = np.array([1, 2, 3, 4], np.dtype('f').newbyteorder()) + np.add.at(values, index, 3) + assert_array_equal(values, [1, 8, 6, 4]) + + # Test exception thrown + values = np.array(['a', 1], dtype=object) + assert_raises(TypeError, np.add.at, values, [0, 1], 1) + assert_array_equal(values, np.array(['a', 1], dtype=object)) + + # Test multiple output ufuncs raise error, gh-5665 + assert_raises(ValueError, np.modf.at, np.arange(10), [1]) + + def test_reduce_arguments(self): + f = np.add.reduce + d = np.ones((5,2), dtype=int) + o = np.ones((2,), dtype=d.dtype) + r = o * 5 + assert_equal(f(d), r) + # a, axis=0, dtype=None, out=None, keepdims=False + assert_equal(f(d, axis=0), r) + assert_equal(f(d, 0), r) + assert_equal(f(d, 0, dtype=None), r) + assert_equal(f(d, 0, dtype='i'), r) + assert_equal(f(d, 0, 'i'), r) + assert_equal(f(d, 0, None), r) + assert_equal(f(d, 0, None, out=None), r) + assert_equal(f(d, 0, None, out=o), r) + assert_equal(f(d, 0, None, o), r) + assert_equal(f(d, 0, None, None), r) + assert_equal(f(d, 0, None, None, keepdims=False), r) + assert_equal(f(d, 0, None, None, True), r.reshape((1,) + r.shape)) + assert_equal(f(d, 0, None, None, False, 0), r) + assert_equal(f(d, 0, None, None, False, initial=0), r) + # multiple keywords + assert_equal(f(d, axis=0, dtype=None, out=None, keepdims=False), r) + assert_equal(f(d, 0, dtype=None, out=None, keepdims=False), r) + assert_equal(f(d, 0, None, out=None, keepdims=False), r) + assert_equal(f(d, 0, None, out=None, keepdims=False, initial=0), r) + + # too little + assert_raises(TypeError, f) + # too much + assert_raises(TypeError, f, d, 0, None, None, False, 0, 1) + # invalid axis + assert_raises(TypeError, f, d, "invalid") + assert_raises(TypeError, f, d, axis="invalid") + assert_raises(TypeError, f, d, axis="invalid", dtype=None, + keepdims=True) + # invalid dtype + assert_raises(TypeError, f, d, 0, "invalid") + assert_raises(TypeError, f, d, dtype="invalid") + assert_raises(TypeError, f, d, dtype="invalid", out=None) + # invalid out + assert_raises(TypeError, f, d, 0, None, "invalid") + assert_raises(TypeError, f, d, out="invalid") + assert_raises(TypeError, f, d, out="invalid", dtype=None) + # keepdims boolean, no invalid value + # assert_raises(TypeError, f, d, 0, None, None, "invalid") + # assert_raises(TypeError, f, d, keepdims="invalid", axis=0, dtype=None) + # invalid mix + assert_raises(TypeError, f, d, 0, keepdims="invalid", dtype="invalid", + out=None) + + # invalid keyord + assert_raises(TypeError, f, d, axis=0, dtype=None, invalid=0) + assert_raises(TypeError, f, d, invalid=0) + assert_raises(TypeError, f, d, 0, keepdims=True, invalid="invalid", + out=None) + assert_raises(TypeError, f, d, axis=0, dtype=None, keepdims=True, + out=None, invalid=0) + assert_raises(TypeError, f, d, axis=0, dtype=None, + out=None, invalid=0) + + def test_structured_equal(self): + # https://github.com/numpy/numpy/issues/4855 + + class MyA(np.ndarray): + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + return getattr(ufunc, method)(*(input.view(np.ndarray) + for input in inputs), **kwargs) + a = np.arange(12.).reshape(4,3) + ra = a.view(dtype=('f8,f8,f8')).squeeze() + mra = ra.view(MyA) + + target = np.array([ True, False, False, False], dtype=bool) + assert_equal(np.all(target == (mra == ra[0])), True) + + def test_scalar_equal(self): + # Scalar comparisons should always work, without deprecation warnings. + # even when the ufunc fails. + a = np.array(0.) + b = np.array('a') + assert_(a != b) + assert_(b != a) + assert_(not (a == b)) + assert_(not (b == a)) + + def test_NotImplemented_not_returned(self): + # See gh-5964 and gh-2091. Some of these functions are not operator + # related and were fixed for other reasons in the past. + binary_funcs = [ + np.power, np.add, np.subtract, np.multiply, np.divide, + np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or, + np.bitwise_xor, np.left_shift, np.right_shift, np.fmax, + np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2, + np.logical_and, np.logical_or, np.logical_xor, np.maximum, + np.minimum, np.mod, + np.greater, np.greater_equal, np.less, np.less_equal, + np.equal, np.not_equal] + + a = np.array('1') + b = 1 + c = np.array([1., 2.]) + for f in binary_funcs: + assert_raises(TypeError, f, a, b) + assert_raises(TypeError, f, c, a) + + def test_reduce_noncontig_output(self): + # Check that reduction deals with non-contiguous output arrays + # appropriately. + # + # gh-8036 + + x = np.arange(7*13*8, dtype=np.int16).reshape(7, 13, 8) + x = x[4:6,1:11:6,1:5].transpose(1, 2, 0) + y_base = np.arange(4*4, dtype=np.int16).reshape(4, 4) + y = y_base[::2,:] + + y_base_copy = y_base.copy() + + r0 = np.add.reduce(x, out=y.copy(), axis=2) + r1 = np.add.reduce(x, out=y, axis=2) + + # The results should match, and y_base shouldn't get clobbered + assert_equal(r0, r1) + assert_equal(y_base[1,:], y_base_copy[1,:]) + assert_equal(y_base[3,:], y_base_copy[3,:]) + + def test_no_doc_string(self): + # gh-9337 + assert_('\n' not in umt.inner1d_no_doc.__doc__) diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_ufunc.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_ufunc.pyc new file mode 100644 index 0000000..01d6f07 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_ufunc.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_umath.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_umath.py new file mode 100644 index 0000000..eb6a67f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_umath.py @@ -0,0 +1,2920 @@ +from __future__ import division, absolute_import, print_function + +import platform +import warnings +import fnmatch +import itertools +import pytest + +import numpy.core.umath as ncu +from numpy.core import _umath_tests as ncu_tests +import numpy as np +from numpy.testing import ( + assert_, assert_equal, assert_raises, assert_raises_regex, + assert_array_equal, assert_almost_equal, assert_array_almost_equal, + assert_allclose, assert_no_warnings, suppress_warnings, + _gen_alignment_data + ) + + +def on_powerpc(): + """ True if we are running on a Power PC platform.""" + return platform.processor() == 'powerpc' or \ + platform.machine().startswith('ppc') + + +class _FilterInvalids(object): + def setup(self): + self.olderr = np.seterr(invalid='ignore') + + def teardown(self): + np.seterr(**self.olderr) + + +class TestConstants(object): + def test_pi(self): + assert_allclose(ncu.pi, 3.141592653589793, 1e-15) + + def test_e(self): + assert_allclose(ncu.e, 2.718281828459045, 1e-15) + + def test_euler_gamma(self): + assert_allclose(ncu.euler_gamma, 0.5772156649015329, 1e-15) + + +class TestOut(object): + def test_out_subok(self): + for subok in (True, False): + a = np.array(0.5) + o = np.empty(()) + + r = np.add(a, 2, o, subok=subok) + assert_(r is o) + r = np.add(a, 2, out=o, subok=subok) + assert_(r is o) + r = np.add(a, 2, out=(o,), subok=subok) + assert_(r is o) + + d = np.array(5.7) + o1 = np.empty(()) + o2 = np.empty((), dtype=np.int32) + + r1, r2 = np.frexp(d, o1, None, subok=subok) + assert_(r1 is o1) + r1, r2 = np.frexp(d, None, o2, subok=subok) + assert_(r2 is o2) + r1, r2 = np.frexp(d, o1, o2, subok=subok) + assert_(r1 is o1) + assert_(r2 is o2) + + r1, r2 = np.frexp(d, out=(o1, None), subok=subok) + assert_(r1 is o1) + r1, r2 = np.frexp(d, out=(None, o2), subok=subok) + assert_(r2 is o2) + r1, r2 = np.frexp(d, out=(o1, o2), subok=subok) + assert_(r1 is o1) + assert_(r2 is o2) + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', DeprecationWarning) + r1, r2 = np.frexp(d, out=o1, subok=subok) + assert_(r1 is o1) + assert_(w[0].category is DeprecationWarning) + + assert_raises(ValueError, np.add, a, 2, o, o, subok=subok) + assert_raises(ValueError, np.add, a, 2, o, out=o, subok=subok) + assert_raises(ValueError, np.add, a, 2, None, out=o, subok=subok) + assert_raises(ValueError, np.add, a, 2, out=(o, o), subok=subok) + assert_raises(ValueError, np.add, a, 2, out=(), subok=subok) + assert_raises(TypeError, np.add, a, 2, [], subok=subok) + assert_raises(TypeError, np.add, a, 2, out=[], subok=subok) + assert_raises(TypeError, np.add, a, 2, out=([],), subok=subok) + o.flags.writeable = False + assert_raises(ValueError, np.add, a, 2, o, subok=subok) + assert_raises(ValueError, np.add, a, 2, out=o, subok=subok) + assert_raises(ValueError, np.add, a, 2, out=(o,), subok=subok) + + def test_out_wrap_subok(self): + class ArrayWrap(np.ndarray): + __array_priority__ = 10 + + def __new__(cls, arr): + return np.asarray(arr).view(cls).copy() + + def __array_wrap__(self, arr, context): + return arr.view(type(self)) + + for subok in (True, False): + a = ArrayWrap([0.5]) + + r = np.add(a, 2, subok=subok) + if subok: + assert_(isinstance(r, ArrayWrap)) + else: + assert_(type(r) == np.ndarray) + + r = np.add(a, 2, None, subok=subok) + if subok: + assert_(isinstance(r, ArrayWrap)) + else: + assert_(type(r) == np.ndarray) + + r = np.add(a, 2, out=None, subok=subok) + if subok: + assert_(isinstance(r, ArrayWrap)) + else: + assert_(type(r) == np.ndarray) + + r = np.add(a, 2, out=(None,), subok=subok) + if subok: + assert_(isinstance(r, ArrayWrap)) + else: + assert_(type(r) == np.ndarray) + + d = ArrayWrap([5.7]) + o1 = np.empty((1,)) + o2 = np.empty((1,), dtype=np.int32) + + r1, r2 = np.frexp(d, o1, subok=subok) + if subok: + assert_(isinstance(r2, ArrayWrap)) + else: + assert_(type(r2) == np.ndarray) + + r1, r2 = np.frexp(d, o1, None, subok=subok) + if subok: + assert_(isinstance(r2, ArrayWrap)) + else: + assert_(type(r2) == np.ndarray) + + r1, r2 = np.frexp(d, None, o2, subok=subok) + if subok: + assert_(isinstance(r1, ArrayWrap)) + else: + assert_(type(r1) == np.ndarray) + + r1, r2 = np.frexp(d, out=(o1, None), subok=subok) + if subok: + assert_(isinstance(r2, ArrayWrap)) + else: + assert_(type(r2) == np.ndarray) + + r1, r2 = np.frexp(d, out=(None, o2), subok=subok) + if subok: + assert_(isinstance(r1, ArrayWrap)) + else: + assert_(type(r1) == np.ndarray) + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', DeprecationWarning) + r1, r2 = np.frexp(d, out=o1, subok=subok) + if subok: + assert_(isinstance(r2, ArrayWrap)) + else: + assert_(type(r2) == np.ndarray) + assert_(w[0].category is DeprecationWarning) + + +class TestComparisons(object): + def test_ignore_object_identity_in_equal(self): + # Check error raised when comparing identical objects whose comparison + # is not a simple boolean, e.g., arrays that are compared elementwise. + a = np.array([np.array([1, 2, 3]), None], dtype=object) + assert_raises(ValueError, np.equal, a, a) + + # Check error raised when comparing identical non-comparable objects. + class FunkyType(object): + def __eq__(self, other): + raise TypeError("I won't compare") + + a = np.array([FunkyType()]) + assert_raises(TypeError, np.equal, a, a) + + # Check identity doesn't override comparison mismatch. + a = np.array([np.nan], dtype=object) + assert_equal(np.equal(a, a), [False]) + + def test_ignore_object_identity_in_not_equal(self): + # Check error raised when comparing identical objects whose comparison + # is not a simple boolean, e.g., arrays that are compared elementwise. + a = np.array([np.array([1, 2, 3]), None], dtype=object) + assert_raises(ValueError, np.not_equal, a, a) + + # Check error raised when comparing identical non-comparable objects. + class FunkyType(object): + def __ne__(self, other): + raise TypeError("I won't compare") + + a = np.array([FunkyType()]) + assert_raises(TypeError, np.not_equal, a, a) + + # Check identity doesn't override comparison mismatch. + a = np.array([np.nan], dtype=object) + assert_equal(np.not_equal(a, a), [True]) + + +class TestAdd(object): + def test_reduce_alignment(self): + # gh-9876 + # make sure arrays with weird strides work with the optimizations in + # pairwise_sum_@TYPE@. On x86, the 'b' field will count as aligned at a + # 4 byte offset, even though its itemsize is 8. + a = np.zeros(2, dtype=[('a', np.int32), ('b', np.float64)]) + a['a'] = -1 + assert_equal(a['b'].sum(), 0) + + +class TestDivision(object): + def test_division_int(self): + # int division should follow Python + x = np.array([5, 10, 90, 100, -5, -10, -90, -100, -120]) + if 5 / 10 == 0.5: + assert_equal(x / 100, [0.05, 0.1, 0.9, 1, + -0.05, -0.1, -0.9, -1, -1.2]) + else: + assert_equal(x / 100, [0, 0, 0, 1, -1, -1, -1, -1, -2]) + assert_equal(x // 100, [0, 0, 0, 1, -1, -1, -1, -1, -2]) + assert_equal(x % 100, [5, 10, 90, 0, 95, 90, 10, 0, 80]) + + def test_division_complex(self): + # check that implementation is correct + msg = "Complex division implementation check" + x = np.array([1. + 1.*1j, 1. + .5*1j, 1. + 2.*1j], dtype=np.complex128) + assert_almost_equal(x**2/x, x, err_msg=msg) + # check overflow, underflow + msg = "Complex division overflow/underflow check" + x = np.array([1.e+110, 1.e-110], dtype=np.complex128) + y = x**2/x + assert_almost_equal(y/x, [1, 1], err_msg=msg) + + def test_zero_division_complex(self): + with np.errstate(invalid="ignore", divide="ignore"): + x = np.array([0.0], dtype=np.complex128) + y = 1.0/x + assert_(np.isinf(y)[0]) + y = complex(np.inf, np.nan)/x + assert_(np.isinf(y)[0]) + y = complex(np.nan, np.inf)/x + assert_(np.isinf(y)[0]) + y = complex(np.inf, np.inf)/x + assert_(np.isinf(y)[0]) + y = 0.0/x + assert_(np.isnan(y)[0]) + + def test_floor_division_complex(self): + # check that implementation is correct + msg = "Complex floor division implementation check" + x = np.array([.9 + 1j, -.1 + 1j, .9 + .5*1j, .9 + 2.*1j], dtype=np.complex128) + y = np.array([0., -1., 0., 0.], dtype=np.complex128) + assert_equal(np.floor_divide(x**2, x), y, err_msg=msg) + # check overflow, underflow + msg = "Complex floor division overflow/underflow check" + x = np.array([1.e+110, 1.e-110], dtype=np.complex128) + y = np.floor_divide(x**2, x) + assert_equal(y, [1.e+110, 0], err_msg=msg) + + def test_floor_division_signed_zero(self): + # Check that the sign bit is correctly set when dividing positive and + # negative zero by one. + x = np.zeros(10) + assert_equal(np.signbit(x//1), 0) + assert_equal(np.signbit((-x)//1), 1) + +def floor_divide_and_remainder(x, y): + return (np.floor_divide(x, y), np.remainder(x, y)) + + +def _signs(dt): + if dt in np.typecodes['UnsignedInteger']: + return (+1,) + else: + return (+1, -1) + + +class TestRemainder(object): + + def test_remainder_basic(self): + dt = np.typecodes['AllInteger'] + np.typecodes['Float'] + for op in [floor_divide_and_remainder, np.divmod]: + for dt1, dt2 in itertools.product(dt, dt): + for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)): + fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s' + msg = fmt % (op.__name__, dt1, dt2, sg1, sg2) + a = np.array(sg1*71, dtype=dt1) + b = np.array(sg2*19, dtype=dt2) + div, rem = op(a, b) + assert_equal(div*b + rem, a, err_msg=msg) + if sg2 == -1: + assert_(b < rem <= 0, msg) + else: + assert_(b > rem >= 0, msg) + + def test_float_remainder_exact(self): + # test that float results are exact for small integers. This also + # holds for the same integers scaled by powers of two. + nlst = list(range(-127, 0)) + plst = list(range(1, 128)) + dividend = nlst + [0] + plst + divisor = nlst + plst + arg = list(itertools.product(dividend, divisor)) + tgt = list(divmod(*t) for t in arg) + + a, b = np.array(arg, dtype=int).T + # convert exact integer results from Python to float so that + # signed zero can be used, it is checked. + tgtdiv, tgtrem = np.array(tgt, dtype=float).T + tgtdiv = np.where((tgtdiv == 0.0) & ((b < 0) ^ (a < 0)), -0.0, tgtdiv) + tgtrem = np.where((tgtrem == 0.0) & (b < 0), -0.0, tgtrem) + + for op in [floor_divide_and_remainder, np.divmod]: + for dt in np.typecodes['Float']: + msg = 'op: %s, dtype: %s' % (op.__name__, dt) + fa = a.astype(dt) + fb = b.astype(dt) + div, rem = op(fa, fb) + assert_equal(div, tgtdiv, err_msg=msg) + assert_equal(rem, tgtrem, err_msg=msg) + + def test_float_remainder_roundoff(self): + # gh-6127 + dt = np.typecodes['Float'] + for op in [floor_divide_and_remainder, np.divmod]: + for dt1, dt2 in itertools.product(dt, dt): + for sg1, sg2 in itertools.product((+1, -1), (+1, -1)): + fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s' + msg = fmt % (op.__name__, dt1, dt2, sg1, sg2) + a = np.array(sg1*78*6e-8, dtype=dt1) + b = np.array(sg2*6e-8, dtype=dt2) + div, rem = op(a, b) + # Equal assertion should hold when fmod is used + assert_equal(div*b + rem, a, err_msg=msg) + if sg2 == -1: + assert_(b < rem <= 0, msg) + else: + assert_(b > rem >= 0, msg) + + def test_float_remainder_corner_cases(self): + # Check remainder magnitude. + for dt in np.typecodes['Float']: + b = np.array(1.0, dtype=dt) + a = np.nextafter(np.array(0.0, dtype=dt), -b) + rem = np.remainder(a, b) + assert_(rem <= b, 'dt: %s' % dt) + rem = np.remainder(-a, -b) + assert_(rem >= -b, 'dt: %s' % dt) + + # Check nans, inf + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in remainder") + for dt in np.typecodes['Float']: + fone = np.array(1.0, dtype=dt) + fzer = np.array(0.0, dtype=dt) + finf = np.array(np.inf, dtype=dt) + fnan = np.array(np.nan, dtype=dt) + rem = np.remainder(fone, fzer) + assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) + # MSVC 2008 returns NaN here, so disable the check. + #rem = np.remainder(fone, finf) + #assert_(rem == fone, 'dt: %s, rem: %s' % (dt, rem)) + rem = np.remainder(fone, fnan) + assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) + rem = np.remainder(finf, fone) + assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) + + +class TestCbrt(object): + def test_cbrt_scalar(self): + assert_almost_equal((np.cbrt(np.float32(-2.5)**3)), -2.5) + + def test_cbrt(self): + x = np.array([1., 2., -3., np.inf, -np.inf]) + assert_almost_equal(np.cbrt(x**3), x) + + assert_(np.isnan(np.cbrt(np.nan))) + assert_equal(np.cbrt(np.inf), np.inf) + assert_equal(np.cbrt(-np.inf), -np.inf) + + +class TestPower(object): + def test_power_float(self): + x = np.array([1., 2., 3.]) + assert_equal(x**0, [1., 1., 1.]) + assert_equal(x**1, x) + assert_equal(x**2, [1., 4., 9.]) + y = x.copy() + y **= 2 + assert_equal(y, [1., 4., 9.]) + assert_almost_equal(x**(-1), [1., 0.5, 1./3]) + assert_almost_equal(x**(0.5), [1., ncu.sqrt(2), ncu.sqrt(3)]) + + for out, inp, msg in _gen_alignment_data(dtype=np.float32, + type='unary', + max_size=11): + exp = [ncu.sqrt(i) for i in inp] + assert_almost_equal(inp**(0.5), exp, err_msg=msg) + np.sqrt(inp, out=out) + assert_equal(out, exp, err_msg=msg) + + for out, inp, msg in _gen_alignment_data(dtype=np.float64, + type='unary', + max_size=7): + exp = [ncu.sqrt(i) for i in inp] + assert_almost_equal(inp**(0.5), exp, err_msg=msg) + np.sqrt(inp, out=out) + assert_equal(out, exp, err_msg=msg) + + def test_power_complex(self): + x = np.array([1+2j, 2+3j, 3+4j]) + assert_equal(x**0, [1., 1., 1.]) + assert_equal(x**1, x) + assert_almost_equal(x**2, [-3+4j, -5+12j, -7+24j]) + assert_almost_equal(x**3, [(1+2j)**3, (2+3j)**3, (3+4j)**3]) + assert_almost_equal(x**4, [(1+2j)**4, (2+3j)**4, (3+4j)**4]) + assert_almost_equal(x**(-1), [1/(1+2j), 1/(2+3j), 1/(3+4j)]) + assert_almost_equal(x**(-2), [1/(1+2j)**2, 1/(2+3j)**2, 1/(3+4j)**2]) + assert_almost_equal(x**(-3), [(-11+2j)/125, (-46-9j)/2197, + (-117-44j)/15625]) + assert_almost_equal(x**(0.5), [ncu.sqrt(1+2j), ncu.sqrt(2+3j), + ncu.sqrt(3+4j)]) + norm = 1./((x**14)[0]) + assert_almost_equal(x**14 * norm, + [i * norm for i in [-76443+16124j, 23161315+58317492j, + 5583548873 + 2465133864j]]) + + # Ticket #836 + def assert_complex_equal(x, y): + assert_array_equal(x.real, y.real) + assert_array_equal(x.imag, y.imag) + + for z in [complex(0, np.inf), complex(1, np.inf)]: + z = np.array([z], dtype=np.complex_) + with np.errstate(invalid="ignore"): + assert_complex_equal(z**1, z) + assert_complex_equal(z**2, z*z) + assert_complex_equal(z**3, z*z*z) + + def test_power_zero(self): + # ticket #1271 + zero = np.array([0j]) + one = np.array([1+0j]) + cnan = np.array([complex(np.nan, np.nan)]) + # FIXME cinf not tested. + #cinf = np.array([complex(np.inf, 0)]) + + def assert_complex_equal(x, y): + x, y = np.asarray(x), np.asarray(y) + assert_array_equal(x.real, y.real) + assert_array_equal(x.imag, y.imag) + + # positive powers + for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]: + assert_complex_equal(np.power(zero, p), zero) + + # zero power + assert_complex_equal(np.power(zero, 0), one) + with np.errstate(invalid="ignore"): + assert_complex_equal(np.power(zero, 0+1j), cnan) + + # negative power + for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]: + assert_complex_equal(np.power(zero, -p), cnan) + assert_complex_equal(np.power(zero, -1+0.2j), cnan) + + def test_fast_power(self): + x = np.array([1, 2, 3], np.int16) + res = x**2.0 + assert_((x**2.00001).dtype is res.dtype) + assert_array_equal(res, [1, 4, 9]) + # check the inplace operation on the casted copy doesn't mess with x + assert_(not np.may_share_memory(res, x)) + assert_array_equal(x, [1, 2, 3]) + + # Check that the fast path ignores 1-element not 0-d arrays + res = x ** np.array([[[2]]]) + assert_equal(res.shape, (1, 1, 3)) + + def test_integer_power(self): + a = np.array([15, 15], 'i8') + b = np.power(a, a) + assert_equal(b, [437893890380859375, 437893890380859375]) + + def test_integer_power_with_integer_zero_exponent(self): + dtypes = np.typecodes['Integer'] + for dt in dtypes: + arr = np.arange(-10, 10, dtype=dt) + assert_equal(np.power(arr, 0), np.ones_like(arr)) + + dtypes = np.typecodes['UnsignedInteger'] + for dt in dtypes: + arr = np.arange(10, dtype=dt) + assert_equal(np.power(arr, 0), np.ones_like(arr)) + + def test_integer_power_of_1(self): + dtypes = np.typecodes['AllInteger'] + for dt in dtypes: + arr = np.arange(10, dtype=dt) + assert_equal(np.power(1, arr), np.ones_like(arr)) + + def test_integer_power_of_zero(self): + dtypes = np.typecodes['AllInteger'] + for dt in dtypes: + arr = np.arange(1, 10, dtype=dt) + assert_equal(np.power(0, arr), np.zeros_like(arr)) + + def test_integer_to_negative_power(self): + dtypes = np.typecodes['Integer'] + for dt in dtypes: + a = np.array([0, 1, 2, 3], dtype=dt) + b = np.array([0, 1, 2, -3], dtype=dt) + one = np.array(1, dtype=dt) + minusone = np.array(-1, dtype=dt) + assert_raises(ValueError, np.power, a, b) + assert_raises(ValueError, np.power, a, minusone) + assert_raises(ValueError, np.power, one, b) + assert_raises(ValueError, np.power, one, minusone) + + +class TestFloat_power(object): + def test_type_conversion(self): + arg_type = '?bhilBHILefdgFDG' + res_type = 'ddddddddddddgDDG' + for dtin, dtout in zip(arg_type, res_type): + msg = "dtin: %s, dtout: %s" % (dtin, dtout) + arg = np.ones(1, dtype=dtin) + res = np.float_power(arg, arg) + assert_(res.dtype.name == np.dtype(dtout).name, msg) + + +class TestLog2(object): + def test_log2_values(self): + x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] + y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + for dt in ['f', 'd', 'g']: + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_almost_equal(np.log2(xf), yf) + + def test_log2_ints(self): + # a good log2 implementation should provide this, + # might fail on OS with bad libm + for i in range(1, 65): + v = np.log2(2.**i) + assert_equal(v, float(i), err_msg='at exponent %d' % i) + + def test_log2_special(self): + assert_equal(np.log2(1.), 0.) + assert_equal(np.log2(np.inf), np.inf) + assert_(np.isnan(np.log2(np.nan))) + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_(np.isnan(np.log2(-1.))) + assert_(np.isnan(np.log2(-np.inf))) + assert_equal(np.log2(0.), -np.inf) + assert_(w[0].category is RuntimeWarning) + assert_(w[1].category is RuntimeWarning) + assert_(w[2].category is RuntimeWarning) + + +class TestExp2(object): + def test_exp2_values(self): + x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] + y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + for dt in ['f', 'd', 'g']: + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_almost_equal(np.exp2(yf), xf) + + +class TestLogAddExp2(_FilterInvalids): + # Need test for intermediate precisions + def test_logaddexp2_values(self): + x = [1, 2, 3, 4, 5] + y = [5, 4, 3, 2, 1] + z = [6, 6, 6, 6, 6] + for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]): + xf = np.log2(np.array(x, dtype=dt)) + yf = np.log2(np.array(y, dtype=dt)) + zf = np.log2(np.array(z, dtype=dt)) + assert_almost_equal(np.logaddexp2(xf, yf), zf, decimal=dec_) + + def test_logaddexp2_range(self): + x = [1000000, -1000000, 1000200, -1000200] + y = [1000200, -1000200, 1000000, -1000000] + z = [1000200, -1000000, 1000200, -1000000] + for dt in ['f', 'd', 'g']: + logxf = np.array(x, dtype=dt) + logyf = np.array(y, dtype=dt) + logzf = np.array(z, dtype=dt) + assert_almost_equal(np.logaddexp2(logxf, logyf), logzf) + + def test_inf(self): + inf = np.inf + x = [inf, -inf, inf, -inf, inf, 1, -inf, 1] + y = [inf, inf, -inf, -inf, 1, inf, 1, -inf] + z = [inf, inf, inf, -inf, inf, inf, 1, 1] + with np.errstate(invalid='raise'): + for dt in ['f', 'd', 'g']: + logxf = np.array(x, dtype=dt) + logyf = np.array(y, dtype=dt) + logzf = np.array(z, dtype=dt) + assert_equal(np.logaddexp2(logxf, logyf), logzf) + + def test_nan(self): + assert_(np.isnan(np.logaddexp2(np.nan, np.inf))) + assert_(np.isnan(np.logaddexp2(np.inf, np.nan))) + assert_(np.isnan(np.logaddexp2(np.nan, 0))) + assert_(np.isnan(np.logaddexp2(0, np.nan))) + assert_(np.isnan(np.logaddexp2(np.nan, np.nan))) + + +class TestLog(object): + def test_log_values(self): + x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] + y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + for dt in ['f', 'd', 'g']: + log2_ = 0.69314718055994530943 + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt)*log2_ + assert_almost_equal(np.log(xf), yf) + + +class TestExp(object): + def test_exp_values(self): + x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] + y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + for dt in ['f', 'd', 'g']: + log2_ = 0.69314718055994530943 + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt)*log2_ + assert_almost_equal(np.exp(yf), xf) + + +class TestLogAddExp(_FilterInvalids): + def test_logaddexp_values(self): + x = [1, 2, 3, 4, 5] + y = [5, 4, 3, 2, 1] + z = [6, 6, 6, 6, 6] + for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]): + xf = np.log(np.array(x, dtype=dt)) + yf = np.log(np.array(y, dtype=dt)) + zf = np.log(np.array(z, dtype=dt)) + assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec_) + + def test_logaddexp_range(self): + x = [1000000, -1000000, 1000200, -1000200] + y = [1000200, -1000200, 1000000, -1000000] + z = [1000200, -1000000, 1000200, -1000000] + for dt in ['f', 'd', 'g']: + logxf = np.array(x, dtype=dt) + logyf = np.array(y, dtype=dt) + logzf = np.array(z, dtype=dt) + assert_almost_equal(np.logaddexp(logxf, logyf), logzf) + + def test_inf(self): + inf = np.inf + x = [inf, -inf, inf, -inf, inf, 1, -inf, 1] + y = [inf, inf, -inf, -inf, 1, inf, 1, -inf] + z = [inf, inf, inf, -inf, inf, inf, 1, 1] + with np.errstate(invalid='raise'): + for dt in ['f', 'd', 'g']: + logxf = np.array(x, dtype=dt) + logyf = np.array(y, dtype=dt) + logzf = np.array(z, dtype=dt) + assert_equal(np.logaddexp(logxf, logyf), logzf) + + def test_nan(self): + assert_(np.isnan(np.logaddexp(np.nan, np.inf))) + assert_(np.isnan(np.logaddexp(np.inf, np.nan))) + assert_(np.isnan(np.logaddexp(np.nan, 0))) + assert_(np.isnan(np.logaddexp(0, np.nan))) + assert_(np.isnan(np.logaddexp(np.nan, np.nan))) + + def test_reduce(self): + assert_equal(np.logaddexp.identity, -np.inf) + assert_equal(np.logaddexp.reduce([]), -np.inf) + + +class TestLog1p(object): + def test_log1p(self): + assert_almost_equal(ncu.log1p(0.2), ncu.log(1.2)) + assert_almost_equal(ncu.log1p(1e-6), ncu.log(1+1e-6)) + + def test_special(self): + with np.errstate(invalid="ignore", divide="ignore"): + assert_equal(ncu.log1p(np.nan), np.nan) + assert_equal(ncu.log1p(np.inf), np.inf) + assert_equal(ncu.log1p(-1.), -np.inf) + assert_equal(ncu.log1p(-2.), np.nan) + assert_equal(ncu.log1p(-np.inf), np.nan) + + +class TestExpm1(object): + def test_expm1(self): + assert_almost_equal(ncu.expm1(0.2), ncu.exp(0.2)-1) + assert_almost_equal(ncu.expm1(1e-6), ncu.exp(1e-6)-1) + + def test_special(self): + assert_equal(ncu.expm1(np.inf), np.inf) + assert_equal(ncu.expm1(0.), 0.) + assert_equal(ncu.expm1(-0.), -0.) + assert_equal(ncu.expm1(np.inf), np.inf) + assert_equal(ncu.expm1(-np.inf), -1.) + + +class TestHypot(object): + def test_simple(self): + assert_almost_equal(ncu.hypot(1, 1), ncu.sqrt(2)) + assert_almost_equal(ncu.hypot(0, 0), 0) + + def test_reduce(self): + assert_almost_equal(ncu.hypot.reduce([3.0, 4.0]), 5.0) + assert_almost_equal(ncu.hypot.reduce([3.0, 4.0, 0]), 5.0) + assert_almost_equal(ncu.hypot.reduce([9.0, 12.0, 20.0]), 25.0) + assert_equal(ncu.hypot.reduce([]), 0.0) + + +def assert_hypot_isnan(x, y): + with np.errstate(invalid='ignore'): + assert_(np.isnan(ncu.hypot(x, y)), + "hypot(%s, %s) is %s, not nan" % (x, y, ncu.hypot(x, y))) + + +def assert_hypot_isinf(x, y): + with np.errstate(invalid='ignore'): + assert_(np.isinf(ncu.hypot(x, y)), + "hypot(%s, %s) is %s, not inf" % (x, y, ncu.hypot(x, y))) + + +class TestHypotSpecialValues(object): + def test_nan_outputs(self): + assert_hypot_isnan(np.nan, np.nan) + assert_hypot_isnan(np.nan, 1) + + def test_nan_outputs2(self): + assert_hypot_isinf(np.nan, np.inf) + assert_hypot_isinf(np.inf, np.nan) + assert_hypot_isinf(np.inf, 0) + assert_hypot_isinf(0, np.inf) + assert_hypot_isinf(np.inf, np.inf) + assert_hypot_isinf(np.inf, 23.0) + + def test_no_fpe(self): + assert_no_warnings(ncu.hypot, np.inf, 0) + + +def assert_arctan2_isnan(x, y): + assert_(np.isnan(ncu.arctan2(x, y)), "arctan(%s, %s) is %s, not nan" % (x, y, ncu.arctan2(x, y))) + + +def assert_arctan2_ispinf(x, y): + assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) > 0), "arctan(%s, %s) is %s, not +inf" % (x, y, ncu.arctan2(x, y))) + + +def assert_arctan2_isninf(x, y): + assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) < 0), "arctan(%s, %s) is %s, not -inf" % (x, y, ncu.arctan2(x, y))) + + +def assert_arctan2_ispzero(x, y): + assert_((ncu.arctan2(x, y) == 0 and not np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not +0" % (x, y, ncu.arctan2(x, y))) + + +def assert_arctan2_isnzero(x, y): + assert_((ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not -0" % (x, y, ncu.arctan2(x, y))) + + +class TestArctan2SpecialValues(object): + def test_one_one(self): + # atan2(1, 1) returns pi/4. + assert_almost_equal(ncu.arctan2(1, 1), 0.25 * np.pi) + assert_almost_equal(ncu.arctan2(-1, 1), -0.25 * np.pi) + assert_almost_equal(ncu.arctan2(1, -1), 0.75 * np.pi) + + def test_zero_nzero(self): + # atan2(+-0, -0) returns +-pi. + assert_almost_equal(ncu.arctan2(np.PZERO, np.NZERO), np.pi) + assert_almost_equal(ncu.arctan2(np.NZERO, np.NZERO), -np.pi) + + def test_zero_pzero(self): + # atan2(+-0, +0) returns +-0. + assert_arctan2_ispzero(np.PZERO, np.PZERO) + assert_arctan2_isnzero(np.NZERO, np.PZERO) + + def test_zero_negative(self): + # atan2(+-0, x) returns +-pi for x < 0. + assert_almost_equal(ncu.arctan2(np.PZERO, -1), np.pi) + assert_almost_equal(ncu.arctan2(np.NZERO, -1), -np.pi) + + def test_zero_positive(self): + # atan2(+-0, x) returns +-0 for x > 0. + assert_arctan2_ispzero(np.PZERO, 1) + assert_arctan2_isnzero(np.NZERO, 1) + + def test_positive_zero(self): + # atan2(y, +-0) returns +pi/2 for y > 0. + assert_almost_equal(ncu.arctan2(1, np.PZERO), 0.5 * np.pi) + assert_almost_equal(ncu.arctan2(1, np.NZERO), 0.5 * np.pi) + + def test_negative_zero(self): + # atan2(y, +-0) returns -pi/2 for y < 0. + assert_almost_equal(ncu.arctan2(-1, np.PZERO), -0.5 * np.pi) + assert_almost_equal(ncu.arctan2(-1, np.NZERO), -0.5 * np.pi) + + def test_any_ninf(self): + # atan2(+-y, -infinity) returns +-pi for finite y > 0. + assert_almost_equal(ncu.arctan2(1, np.NINF), np.pi) + assert_almost_equal(ncu.arctan2(-1, np.NINF), -np.pi) + + def test_any_pinf(self): + # atan2(+-y, +infinity) returns +-0 for finite y > 0. + assert_arctan2_ispzero(1, np.inf) + assert_arctan2_isnzero(-1, np.inf) + + def test_inf_any(self): + # atan2(+-infinity, x) returns +-pi/2 for finite x. + assert_almost_equal(ncu.arctan2( np.inf, 1), 0.5 * np.pi) + assert_almost_equal(ncu.arctan2(-np.inf, 1), -0.5 * np.pi) + + def test_inf_ninf(self): + # atan2(+-infinity, -infinity) returns +-3*pi/4. + assert_almost_equal(ncu.arctan2( np.inf, -np.inf), 0.75 * np.pi) + assert_almost_equal(ncu.arctan2(-np.inf, -np.inf), -0.75 * np.pi) + + def test_inf_pinf(self): + # atan2(+-infinity, +infinity) returns +-pi/4. + assert_almost_equal(ncu.arctan2( np.inf, np.inf), 0.25 * np.pi) + assert_almost_equal(ncu.arctan2(-np.inf, np.inf), -0.25 * np.pi) + + def test_nan_any(self): + # atan2(nan, x) returns nan for any x, including inf + assert_arctan2_isnan(np.nan, np.inf) + assert_arctan2_isnan(np.inf, np.nan) + assert_arctan2_isnan(np.nan, np.nan) + + +class TestLdexp(object): + def _check_ldexp(self, tp): + assert_almost_equal(ncu.ldexp(np.array(2., np.float32), + np.array(3, tp)), 16.) + assert_almost_equal(ncu.ldexp(np.array(2., np.float64), + np.array(3, tp)), 16.) + assert_almost_equal(ncu.ldexp(np.array(2., np.longdouble), + np.array(3, tp)), 16.) + + def test_ldexp(self): + # The default Python int type should work + assert_almost_equal(ncu.ldexp(2., 3), 16.) + # The following int types should all be accepted + self._check_ldexp(np.int8) + self._check_ldexp(np.int16) + self._check_ldexp(np.int32) + self._check_ldexp('i') + self._check_ldexp('l') + + def test_ldexp_overflow(self): + # silence warning emitted on overflow + with np.errstate(over="ignore"): + imax = np.iinfo(np.dtype('l')).max + imin = np.iinfo(np.dtype('l')).min + assert_equal(ncu.ldexp(2., imax), np.inf) + assert_equal(ncu.ldexp(2., imin), 0) + + +class TestMaximum(_FilterInvalids): + def test_reduce(self): + dflt = np.typecodes['AllFloat'] + dint = np.typecodes['AllInteger'] + seq1 = np.arange(11) + seq2 = seq1[::-1] + func = np.maximum.reduce + for dt in dint: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 10) + assert_equal(func(tmp2), 10) + for dt in dflt: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 10) + assert_equal(func(tmp2), 10) + tmp1[::2] = np.nan + tmp2[::2] = np.nan + assert_equal(func(tmp1), np.nan) + assert_equal(func(tmp2), np.nan) + + def test_reduce_complex(self): + assert_equal(np.maximum.reduce([1, 2j]), 1) + assert_equal(np.maximum.reduce([1+3j, 2j]), 1+3j) + + def test_float_nans(self): + nan = np.nan + arg1 = np.array([0, nan, nan]) + arg2 = np.array([nan, 0, nan]) + out = np.array([nan, nan, nan]) + assert_equal(np.maximum(arg1, arg2), out) + + def test_object_nans(self): + # Multiple checks to give this a chance to + # fail if cmp is used instead of rich compare. + # Failure cannot be guaranteed. + for i in range(1): + x = np.array(float('nan'), object) + y = 1.0 + z = np.array(float('nan'), object) + assert_(np.maximum(x, y) == 1.0) + assert_(np.maximum(z, y) == 1.0) + + def test_complex_nans(self): + nan = np.nan + for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]: + arg1 = np.array([0, cnan, cnan], dtype=complex) + arg2 = np.array([cnan, 0, cnan], dtype=complex) + out = np.array([nan, nan, nan], dtype=complex) + assert_equal(np.maximum(arg1, arg2), out) + + def test_object_array(self): + arg1 = np.arange(5, dtype=object) + arg2 = arg1 + 1 + assert_equal(np.maximum(arg1, arg2), arg2) + + +class TestMinimum(_FilterInvalids): + def test_reduce(self): + dflt = np.typecodes['AllFloat'] + dint = np.typecodes['AllInteger'] + seq1 = np.arange(11) + seq2 = seq1[::-1] + func = np.minimum.reduce + for dt in dint: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 0) + assert_equal(func(tmp2), 0) + for dt in dflt: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 0) + assert_equal(func(tmp2), 0) + tmp1[::2] = np.nan + tmp2[::2] = np.nan + assert_equal(func(tmp1), np.nan) + assert_equal(func(tmp2), np.nan) + + def test_reduce_complex(self): + assert_equal(np.minimum.reduce([1, 2j]), 2j) + assert_equal(np.minimum.reduce([1+3j, 2j]), 2j) + + def test_float_nans(self): + nan = np.nan + arg1 = np.array([0, nan, nan]) + arg2 = np.array([nan, 0, nan]) + out = np.array([nan, nan, nan]) + assert_equal(np.minimum(arg1, arg2), out) + + def test_object_nans(self): + # Multiple checks to give this a chance to + # fail if cmp is used instead of rich compare. + # Failure cannot be guaranteed. + for i in range(1): + x = np.array(float('nan'), object) + y = 1.0 + z = np.array(float('nan'), object) + assert_(np.minimum(x, y) == 1.0) + assert_(np.minimum(z, y) == 1.0) + + def test_complex_nans(self): + nan = np.nan + for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]: + arg1 = np.array([0, cnan, cnan], dtype=complex) + arg2 = np.array([cnan, 0, cnan], dtype=complex) + out = np.array([nan, nan, nan], dtype=complex) + assert_equal(np.minimum(arg1, arg2), out) + + def test_object_array(self): + arg1 = np.arange(5, dtype=object) + arg2 = arg1 + 1 + assert_equal(np.minimum(arg1, arg2), arg1) + + +class TestFmax(_FilterInvalids): + def test_reduce(self): + dflt = np.typecodes['AllFloat'] + dint = np.typecodes['AllInteger'] + seq1 = np.arange(11) + seq2 = seq1[::-1] + func = np.fmax.reduce + for dt in dint: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 10) + assert_equal(func(tmp2), 10) + for dt in dflt: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 10) + assert_equal(func(tmp2), 10) + tmp1[::2] = np.nan + tmp2[::2] = np.nan + assert_equal(func(tmp1), 9) + assert_equal(func(tmp2), 9) + + def test_reduce_complex(self): + assert_equal(np.fmax.reduce([1, 2j]), 1) + assert_equal(np.fmax.reduce([1+3j, 2j]), 1+3j) + + def test_float_nans(self): + nan = np.nan + arg1 = np.array([0, nan, nan]) + arg2 = np.array([nan, 0, nan]) + out = np.array([0, 0, nan]) + assert_equal(np.fmax(arg1, arg2), out) + + def test_complex_nans(self): + nan = np.nan + for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]: + arg1 = np.array([0, cnan, cnan], dtype=complex) + arg2 = np.array([cnan, 0, cnan], dtype=complex) + out = np.array([0, 0, nan], dtype=complex) + assert_equal(np.fmax(arg1, arg2), out) + + +class TestFmin(_FilterInvalids): + def test_reduce(self): + dflt = np.typecodes['AllFloat'] + dint = np.typecodes['AllInteger'] + seq1 = np.arange(11) + seq2 = seq1[::-1] + func = np.fmin.reduce + for dt in dint: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 0) + assert_equal(func(tmp2), 0) + for dt in dflt: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 0) + assert_equal(func(tmp2), 0) + tmp1[::2] = np.nan + tmp2[::2] = np.nan + assert_equal(func(tmp1), 1) + assert_equal(func(tmp2), 1) + + def test_reduce_complex(self): + assert_equal(np.fmin.reduce([1, 2j]), 2j) + assert_equal(np.fmin.reduce([1+3j, 2j]), 2j) + + def test_float_nans(self): + nan = np.nan + arg1 = np.array([0, nan, nan]) + arg2 = np.array([nan, 0, nan]) + out = np.array([0, 0, nan]) + assert_equal(np.fmin(arg1, arg2), out) + + def test_complex_nans(self): + nan = np.nan + for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]: + arg1 = np.array([0, cnan, cnan], dtype=complex) + arg2 = np.array([cnan, 0, cnan], dtype=complex) + out = np.array([0, 0, nan], dtype=complex) + assert_equal(np.fmin(arg1, arg2), out) + + +class TestBool(object): + def test_exceptions(self): + a = np.ones(1, dtype=np.bool_) + assert_raises(TypeError, np.negative, a) + assert_raises(TypeError, np.positive, a) + assert_raises(TypeError, np.subtract, a, a) + + def test_truth_table_logical(self): + # 2, 3 and 4 serves as true values + input1 = [0, 0, 3, 2] + input2 = [0, 4, 0, 2] + + typecodes = (np.typecodes['AllFloat'] + + np.typecodes['AllInteger'] + + '?') # boolean + for dtype in map(np.dtype, typecodes): + arg1 = np.asarray(input1, dtype=dtype) + arg2 = np.asarray(input2, dtype=dtype) + + # OR + out = [False, True, True, True] + for func in (np.logical_or, np.maximum): + assert_equal(func(arg1, arg2).astype(bool), out) + # AND + out = [False, False, False, True] + for func in (np.logical_and, np.minimum): + assert_equal(func(arg1, arg2).astype(bool), out) + # XOR + out = [False, True, True, False] + for func in (np.logical_xor, np.not_equal): + assert_equal(func(arg1, arg2).astype(bool), out) + + def test_truth_table_bitwise(self): + arg1 = [False, False, True, True] + arg2 = [False, True, False, True] + + out = [False, True, True, True] + assert_equal(np.bitwise_or(arg1, arg2), out) + + out = [False, False, False, True] + assert_equal(np.bitwise_and(arg1, arg2), out) + + out = [False, True, True, False] + assert_equal(np.bitwise_xor(arg1, arg2), out) + + def test_reduce(self): + none = np.array([0, 0, 0, 0], bool) + some = np.array([1, 0, 1, 1], bool) + every = np.array([1, 1, 1, 1], bool) + empty = np.array([], bool) + + arrs = [none, some, every, empty] + + for arr in arrs: + assert_equal(np.logical_and.reduce(arr), all(arr)) + + for arr in arrs: + assert_equal(np.logical_or.reduce(arr), any(arr)) + + for arr in arrs: + assert_equal(np.logical_xor.reduce(arr), arr.sum() % 2 == 1) + + +class TestBitwiseUFuncs(object): + + bitwise_types = [np.dtype(c) for c in '?' + 'bBhHiIlLqQ' + 'O'] + + def test_values(self): + for dt in self.bitwise_types: + zeros = np.array([0], dtype=dt) + ones = np.array([-1], dtype=dt) + msg = "dt = '%s'" % dt.char + + assert_equal(np.bitwise_not(zeros), ones, err_msg=msg) + assert_equal(np.bitwise_not(ones), zeros, err_msg=msg) + + assert_equal(np.bitwise_or(zeros, zeros), zeros, err_msg=msg) + assert_equal(np.bitwise_or(zeros, ones), ones, err_msg=msg) + assert_equal(np.bitwise_or(ones, zeros), ones, err_msg=msg) + assert_equal(np.bitwise_or(ones, ones), ones, err_msg=msg) + + assert_equal(np.bitwise_xor(zeros, zeros), zeros, err_msg=msg) + assert_equal(np.bitwise_xor(zeros, ones), ones, err_msg=msg) + assert_equal(np.bitwise_xor(ones, zeros), ones, err_msg=msg) + assert_equal(np.bitwise_xor(ones, ones), zeros, err_msg=msg) + + assert_equal(np.bitwise_and(zeros, zeros), zeros, err_msg=msg) + assert_equal(np.bitwise_and(zeros, ones), zeros, err_msg=msg) + assert_equal(np.bitwise_and(ones, zeros), zeros, err_msg=msg) + assert_equal(np.bitwise_and(ones, ones), ones, err_msg=msg) + + def test_types(self): + for dt in self.bitwise_types: + zeros = np.array([0], dtype=dt) + ones = np.array([-1], dtype=dt) + msg = "dt = '%s'" % dt.char + + assert_(np.bitwise_not(zeros).dtype == dt, msg) + assert_(np.bitwise_or(zeros, zeros).dtype == dt, msg) + assert_(np.bitwise_xor(zeros, zeros).dtype == dt, msg) + assert_(np.bitwise_and(zeros, zeros).dtype == dt, msg) + + def test_identity(self): + assert_(np.bitwise_or.identity == 0, 'bitwise_or') + assert_(np.bitwise_xor.identity == 0, 'bitwise_xor') + assert_(np.bitwise_and.identity == -1, 'bitwise_and') + + def test_reduction(self): + binary_funcs = (np.bitwise_or, np.bitwise_xor, np.bitwise_and) + + for dt in self.bitwise_types: + zeros = np.array([0], dtype=dt) + ones = np.array([-1], dtype=dt) + for f in binary_funcs: + msg = "dt: '%s', f: '%s'" % (dt, f) + assert_equal(f.reduce(zeros), zeros, err_msg=msg) + assert_equal(f.reduce(ones), ones, err_msg=msg) + + # Test empty reduction, no object dtype + for dt in self.bitwise_types[:-1]: + # No object array types + empty = np.array([], dtype=dt) + for f in binary_funcs: + msg = "dt: '%s', f: '%s'" % (dt, f) + tgt = np.array(f.identity, dtype=dt) + res = f.reduce(empty) + assert_equal(res, tgt, err_msg=msg) + assert_(res.dtype == tgt.dtype, msg) + + # Empty object arrays use the identity. Note that the types may + # differ, the actual type used is determined by the assign_identity + # function and is not the same as the type returned by the identity + # method. + for f in binary_funcs: + msg = "dt: '%s'" % (f,) + empty = np.array([], dtype=object) + tgt = f.identity + res = f.reduce(empty) + assert_equal(res, tgt, err_msg=msg) + + # Non-empty object arrays do not use the identity + for f in binary_funcs: + msg = "dt: '%s'" % (f,) + btype = np.array([True], dtype=object) + assert_(type(f.reduce(btype)) is bool, msg) + + +class TestInt(object): + def test_logical_not(self): + x = np.ones(10, dtype=np.int16) + o = np.ones(10 * 2, dtype=bool) + tgt = o.copy() + tgt[::2] = False + os = o[::2] + assert_array_equal(np.logical_not(x, out=os), False) + assert_array_equal(o, tgt) + + +class TestFloatingPoint(object): + def test_floating_point(self): + assert_equal(ncu.FLOATING_POINT_SUPPORT, 1) + + +class TestDegrees(object): + def test_degrees(self): + assert_almost_equal(ncu.degrees(np.pi), 180.0) + assert_almost_equal(ncu.degrees(-0.5*np.pi), -90.0) + + +class TestRadians(object): + def test_radians(self): + assert_almost_equal(ncu.radians(180.0), np.pi) + assert_almost_equal(ncu.radians(-90.0), -0.5*np.pi) + + +class TestHeavside(object): + def test_heaviside(self): + x = np.array([[-30.0, -0.1, 0.0, 0.2], [7.5, np.nan, np.inf, -np.inf]]) + expectedhalf = np.array([[0.0, 0.0, 0.5, 1.0], [1.0, np.nan, 1.0, 0.0]]) + expected1 = expectedhalf.copy() + expected1[0, 2] = 1 + + h = ncu.heaviside(x, 0.5) + assert_equal(h, expectedhalf) + + h = ncu.heaviside(x, 1.0) + assert_equal(h, expected1) + + x = x.astype(np.float32) + + h = ncu.heaviside(x, np.float32(0.5)) + assert_equal(h, expectedhalf.astype(np.float32)) + + h = ncu.heaviside(x, np.float32(1.0)) + assert_equal(h, expected1.astype(np.float32)) + + +class TestSign(object): + def test_sign(self): + a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0]) + out = np.zeros(a.shape) + tgt = np.array([1., -1., np.nan, 0.0, 1.0, -1.0]) + + with np.errstate(invalid='ignore'): + res = ncu.sign(a) + assert_equal(res, tgt) + res = ncu.sign(a, out) + assert_equal(res, tgt) + assert_equal(out, tgt) + + def test_sign_dtype_object(self): + # In reference to github issue #6229 + + foo = np.array([-.1, 0, .1]) + a = np.sign(foo.astype(object)) + b = np.sign(foo) + + assert_array_equal(a, b) + + def test_sign_dtype_nan_object(self): + # In reference to github issue #6229 + def test_nan(): + foo = np.array([np.nan]) + # FIXME: a not used + a = np.sign(foo.astype(object)) + + assert_raises(TypeError, test_nan) + +class TestMinMax(object): + def test_minmax_blocked(self): + # simd tests on max/min, test all alignments, slow but important + # for 2 * vz + 2 * (vs - 1) + 1 (unrolled once) + for dt, sz in [(np.float32, 15), (np.float64, 7)]: + for out, inp, msg in _gen_alignment_data(dtype=dt, type='unary', + max_size=sz): + for i in range(inp.size): + inp[:] = np.arange(inp.size, dtype=dt) + inp[i] = np.nan + emsg = lambda: '%r\n%s' % (inp, msg) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, + "invalid value encountered in reduce") + assert_(np.isnan(inp.max()), msg=emsg) + assert_(np.isnan(inp.min()), msg=emsg) + + inp[i] = 1e10 + assert_equal(inp.max(), 1e10, err_msg=msg) + inp[i] = -1e10 + assert_equal(inp.min(), -1e10, err_msg=msg) + + def test_lower_align(self): + # check data that is not aligned to element size + # i.e doubles are aligned to 4 bytes on i386 + d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) + assert_equal(d.max(), d[0]) + assert_equal(d.min(), d[0]) + + def test_reduce_reorder(self): + # gh 10370, 11029 Some compilers reorder the call to npy_getfloatstatus + # and put it before the call to an intrisic function that causes + # invalid status to be set. Also make sure warnings are not emitted + for n in (2, 4, 8, 16, 32): + for dt in (np.float32, np.float16, np.complex64): + for r in np.diagflat(np.array([np.nan] * n, dtype=dt)): + assert_equal(np.min(r), np.nan) + + def test_minimize_no_warns(self): + a = np.minimum(np.nan, 1) + assert_equal(a, np.nan) + + +class TestAbsoluteNegative(object): + def test_abs_neg_blocked(self): + # simd tests on abs, test all alignments for vz + 2 * (vs - 1) + 1 + for dt, sz in [(np.float32, 11), (np.float64, 5)]: + for out, inp, msg in _gen_alignment_data(dtype=dt, type='unary', + max_size=sz): + tgt = [ncu.absolute(i) for i in inp] + np.absolute(inp, out=out) + assert_equal(out, tgt, err_msg=msg) + assert_((out >= 0).all()) + + tgt = [-1*(i) for i in inp] + np.negative(inp, out=out) + assert_equal(out, tgt, err_msg=msg) + + for v in [np.nan, -np.inf, np.inf]: + for i in range(inp.size): + d = np.arange(inp.size, dtype=dt) + inp[:] = -d + inp[i] = v + d[i] = -v if v == -np.inf else v + assert_array_equal(np.abs(inp), d, err_msg=msg) + np.abs(inp, out=out) + assert_array_equal(out, d, err_msg=msg) + + assert_array_equal(-inp, -1*inp, err_msg=msg) + d = -1 * inp + np.negative(inp, out=out) + assert_array_equal(out, d, err_msg=msg) + + def test_lower_align(self): + # check data that is not aligned to element size + # i.e doubles are aligned to 4 bytes on i386 + d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) + assert_equal(np.abs(d), d) + assert_equal(np.negative(d), -d) + np.negative(d, out=d) + np.negative(np.ones_like(d), out=d) + np.abs(d, out=d) + np.abs(np.ones_like(d), out=d) + + +class TestPositive(object): + def test_valid(self): + valid_dtypes = [int, float, complex, object] + for dtype in valid_dtypes: + x = np.arange(5, dtype=dtype) + result = np.positive(x) + assert_equal(x, result, err_msg=str(dtype)) + + def test_invalid(self): + with assert_raises(TypeError): + np.positive(True) + with assert_raises(TypeError): + np.positive(np.datetime64('2000-01-01')) + with assert_raises(TypeError): + np.positive(np.array(['foo'], dtype=str)) + with assert_raises(TypeError): + np.positive(np.array(['bar'], dtype=object)) + + +class TestSpecialMethods(object): + def test_wrap(self): + + class with_wrap(object): + def __array__(self): + return np.zeros(1) + + def __array_wrap__(self, arr, context): + r = with_wrap() + r.arr = arr + r.context = context + return r + + a = with_wrap() + x = ncu.minimum(a, a) + assert_equal(x.arr, np.zeros(1)) + func, args, i = x.context + assert_(func is ncu.minimum) + assert_equal(len(args), 2) + assert_equal(args[0], a) + assert_equal(args[1], a) + assert_equal(i, 0) + + def test_wrap_and_prepare_out(self): + # Calling convention for out should not affect how special methods are + # called + + class StoreArrayPrepareWrap(np.ndarray): + _wrap_args = None + _prepare_args = None + def __new__(cls): + return np.empty(()).view(cls) + def __array_wrap__(self, obj, context): + self._wrap_args = context[1] + return obj + def __array_prepare__(self, obj, context): + self._prepare_args = context[1] + return obj + @property + def args(self): + # We need to ensure these are fetched at the same time, before + # any other ufuncs are calld by the assertions + return (self._prepare_args, self._wrap_args) + def __repr__(self): + return "a" # for short test output + + def do_test(f_call, f_expected): + a = StoreArrayPrepareWrap() + f_call(a) + p, w = a.args + expected = f_expected(a) + try: + assert_equal(p, expected) + assert_equal(w, expected) + except AssertionError as e: + # assert_equal produces truly useless error messages + raise AssertionError("\n".join([ + "Bad arguments passed in ufunc call", + " expected: {}".format(expected), + " __array_prepare__ got: {}".format(p), + " __array_wrap__ got: {}".format(w) + ])) + + # method not on the out argument + do_test(lambda a: np.add(a, 0), lambda a: (a, 0)) + do_test(lambda a: np.add(a, 0, None), lambda a: (a, 0)) + do_test(lambda a: np.add(a, 0, out=None), lambda a: (a, 0)) + do_test(lambda a: np.add(a, 0, out=(None,)), lambda a: (a, 0)) + + # method on the out argument + do_test(lambda a: np.add(0, 0, a), lambda a: (0, 0, a)) + do_test(lambda a: np.add(0, 0, out=a), lambda a: (0, 0, a)) + do_test(lambda a: np.add(0, 0, out=(a,)), lambda a: (0, 0, a)) + + def test_wrap_with_iterable(self): + # test fix for bug #1026: + + class with_wrap(np.ndarray): + __array_priority__ = 10 + + def __new__(cls): + return np.asarray(1).view(cls).copy() + + def __array_wrap__(self, arr, context): + return arr.view(type(self)) + + a = with_wrap() + x = ncu.multiply(a, (1, 2, 3)) + assert_(isinstance(x, with_wrap)) + assert_array_equal(x, np.array((1, 2, 3))) + + def test_priority_with_scalar(self): + # test fix for bug #826: + + class A(np.ndarray): + __array_priority__ = 10 + + def __new__(cls): + return np.asarray(1.0, 'float64').view(cls).copy() + + a = A() + x = np.float64(1)*a + assert_(isinstance(x, A)) + assert_array_equal(x, np.array(1)) + + def test_old_wrap(self): + + class with_wrap(object): + def __array__(self): + return np.zeros(1) + + def __array_wrap__(self, arr): + r = with_wrap() + r.arr = arr + return r + + a = with_wrap() + x = ncu.minimum(a, a) + assert_equal(x.arr, np.zeros(1)) + + def test_priority(self): + + class A(object): + def __array__(self): + return np.zeros(1) + + def __array_wrap__(self, arr, context): + r = type(self)() + r.arr = arr + r.context = context + return r + + class B(A): + __array_priority__ = 20. + + class C(A): + __array_priority__ = 40. + + x = np.zeros(1) + a = A() + b = B() + c = C() + f = ncu.minimum + assert_(type(f(x, x)) is np.ndarray) + assert_(type(f(x, a)) is A) + assert_(type(f(x, b)) is B) + assert_(type(f(x, c)) is C) + assert_(type(f(a, x)) is A) + assert_(type(f(b, x)) is B) + assert_(type(f(c, x)) is C) + + assert_(type(f(a, a)) is A) + assert_(type(f(a, b)) is B) + assert_(type(f(b, a)) is B) + assert_(type(f(b, b)) is B) + assert_(type(f(b, c)) is C) + assert_(type(f(c, b)) is C) + assert_(type(f(c, c)) is C) + + assert_(type(ncu.exp(a) is A)) + assert_(type(ncu.exp(b) is B)) + assert_(type(ncu.exp(c) is C)) + + def test_failing_wrap(self): + + class A(object): + def __array__(self): + return np.zeros(2) + + def __array_wrap__(self, arr, context): + raise RuntimeError + + a = A() + assert_raises(RuntimeError, ncu.maximum, a, a) + assert_raises(RuntimeError, ncu.maximum.reduce, a) + + def test_failing_out_wrap(self): + + singleton = np.array([1.0]) + + class Ok(np.ndarray): + def __array_wrap__(self, obj): + return singleton + + class Bad(np.ndarray): + def __array_wrap__(self, obj): + raise RuntimeError + + ok = np.empty(1).view(Ok) + bad = np.empty(1).view(Bad) + + # double-free (segfault) of "ok" if "bad" raises an exception + for i in range(10): + assert_raises(RuntimeError, ncu.frexp, 1, ok, bad) + + def test_none_wrap(self): + # Tests that issue #8507 is resolved. Previously, this would segfault + + class A(object): + def __array__(self): + return np.zeros(1) + + def __array_wrap__(self, arr, context=None): + return None + + a = A() + assert_equal(ncu.maximum(a, a), None) + + def test_default_prepare(self): + + class with_wrap(object): + __array_priority__ = 10 + + def __array__(self): + return np.zeros(1) + + def __array_wrap__(self, arr, context): + return arr + + a = with_wrap() + x = ncu.minimum(a, a) + assert_equal(x, np.zeros(1)) + assert_equal(type(x), np.ndarray) + + def test_prepare(self): + + class with_prepare(np.ndarray): + __array_priority__ = 10 + + def __array_prepare__(self, arr, context): + # make sure we can return a new + return np.array(arr).view(type=with_prepare) + + a = np.array(1).view(type=with_prepare) + x = np.add(a, a) + assert_equal(x, np.array(2)) + assert_equal(type(x), with_prepare) + + def test_prepare_out(self): + + class with_prepare(np.ndarray): + __array_priority__ = 10 + + def __array_prepare__(self, arr, context): + return np.array(arr).view(type=with_prepare) + + a = np.array([1]).view(type=with_prepare) + x = np.add(a, a, a) + # Returned array is new, because of the strange + # __array_prepare__ above + assert_(not np.shares_memory(x, a)) + assert_equal(x, np.array([2])) + assert_equal(type(x), with_prepare) + + def test_failing_prepare(self): + + class A(object): + def __array__(self): + return np.zeros(1) + + def __array_prepare__(self, arr, context=None): + raise RuntimeError + + a = A() + assert_raises(RuntimeError, ncu.maximum, a, a) + + def test_array_with_context(self): + + class A(object): + def __array__(self, dtype=None, context=None): + func, args, i = context + self.func = func + self.args = args + self.i = i + return np.zeros(1) + + class B(object): + def __array__(self, dtype=None): + return np.zeros(1, dtype) + + class C(object): + def __array__(self): + return np.zeros(1) + + a = A() + ncu.maximum(np.zeros(1), a) + assert_(a.func is ncu.maximum) + assert_equal(a.args[0], 0) + assert_(a.args[1] is a) + assert_(a.i == 1) + assert_equal(ncu.maximum(a, B()), 0) + assert_equal(ncu.maximum(a, C()), 0) + + def test_ufunc_override(self): + # check override works even with instance with high priority. + class A(object): + def __array_ufunc__(self, func, method, *inputs, **kwargs): + return self, func, method, inputs, kwargs + + class MyNDArray(np.ndarray): + __array_priority__ = 100 + + a = A() + b = np.array([1]).view(MyNDArray) + res0 = np.multiply(a, b) + res1 = np.multiply(b, b, out=a) + + # self + assert_equal(res0[0], a) + assert_equal(res1[0], a) + assert_equal(res0[1], np.multiply) + assert_equal(res1[1], np.multiply) + assert_equal(res0[2], '__call__') + assert_equal(res1[2], '__call__') + assert_equal(res0[3], (a, b)) + assert_equal(res1[3], (b, b)) + assert_equal(res0[4], {}) + assert_equal(res1[4], {'out': (a,)}) + + def test_ufunc_override_mro(self): + + # Some multi arg functions for testing. + def tres_mul(a, b, c): + return a * b * c + + def quatro_mul(a, b, c, d): + return a * b * c * d + + # Make these into ufuncs. + three_mul_ufunc = np.frompyfunc(tres_mul, 3, 1) + four_mul_ufunc = np.frompyfunc(quatro_mul, 4, 1) + + class A(object): + def __array_ufunc__(self, func, method, *inputs, **kwargs): + return "A" + + class ASub(A): + def __array_ufunc__(self, func, method, *inputs, **kwargs): + return "ASub" + + class B(object): + def __array_ufunc__(self, func, method, *inputs, **kwargs): + return "B" + + class C(object): + def __init__(self): + self.count = 0 + + def __array_ufunc__(self, func, method, *inputs, **kwargs): + self.count += 1 + return NotImplemented + + class CSub(C): + def __array_ufunc__(self, func, method, *inputs, **kwargs): + self.count += 1 + return NotImplemented + + a = A() + a_sub = ASub() + b = B() + c = C() + + # Standard + res = np.multiply(a, a_sub) + assert_equal(res, "ASub") + res = np.multiply(a_sub, b) + assert_equal(res, "ASub") + + # With 1 NotImplemented + res = np.multiply(c, a) + assert_equal(res, "A") + assert_equal(c.count, 1) + # Check our counter works, so we can trust tests below. + res = np.multiply(c, a) + assert_equal(c.count, 2) + + # Both NotImplemented. + c = C() + c_sub = CSub() + assert_raises(TypeError, np.multiply, c, c_sub) + assert_equal(c.count, 1) + assert_equal(c_sub.count, 1) + c.count = c_sub.count = 0 + assert_raises(TypeError, np.multiply, c_sub, c) + assert_equal(c.count, 1) + assert_equal(c_sub.count, 1) + c.count = 0 + assert_raises(TypeError, np.multiply, c, c) + assert_equal(c.count, 1) + c.count = 0 + assert_raises(TypeError, np.multiply, 2, c) + assert_equal(c.count, 1) + + # Ternary testing. + assert_equal(three_mul_ufunc(a, 1, 2), "A") + assert_equal(three_mul_ufunc(1, a, 2), "A") + assert_equal(three_mul_ufunc(1, 2, a), "A") + + assert_equal(three_mul_ufunc(a, a, 6), "A") + assert_equal(three_mul_ufunc(a, 2, a), "A") + assert_equal(three_mul_ufunc(a, 2, b), "A") + assert_equal(three_mul_ufunc(a, 2, a_sub), "ASub") + assert_equal(three_mul_ufunc(a, a_sub, 3), "ASub") + c.count = 0 + assert_equal(three_mul_ufunc(c, a_sub, 3), "ASub") + assert_equal(c.count, 1) + c.count = 0 + assert_equal(three_mul_ufunc(1, a_sub, c), "ASub") + assert_equal(c.count, 0) + + c.count = 0 + assert_equal(three_mul_ufunc(a, b, c), "A") + assert_equal(c.count, 0) + c_sub.count = 0 + assert_equal(three_mul_ufunc(a, b, c_sub), "A") + assert_equal(c_sub.count, 0) + assert_equal(three_mul_ufunc(1, 2, b), "B") + + assert_raises(TypeError, three_mul_ufunc, 1, 2, c) + assert_raises(TypeError, three_mul_ufunc, c_sub, 2, c) + assert_raises(TypeError, three_mul_ufunc, c_sub, 2, 3) + + # Quaternary testing. + assert_equal(four_mul_ufunc(a, 1, 2, 3), "A") + assert_equal(four_mul_ufunc(1, a, 2, 3), "A") + assert_equal(four_mul_ufunc(1, 1, a, 3), "A") + assert_equal(four_mul_ufunc(1, 1, 2, a), "A") + + assert_equal(four_mul_ufunc(a, b, 2, 3), "A") + assert_equal(four_mul_ufunc(1, a, 2, b), "A") + assert_equal(four_mul_ufunc(b, 1, a, 3), "B") + assert_equal(four_mul_ufunc(a_sub, 1, 2, a), "ASub") + assert_equal(four_mul_ufunc(a, 1, 2, a_sub), "ASub") + + c = C() + c_sub = CSub() + assert_raises(TypeError, four_mul_ufunc, 1, 2, 3, c) + assert_equal(c.count, 1) + c.count = 0 + assert_raises(TypeError, four_mul_ufunc, 1, 2, c_sub, c) + assert_equal(c_sub.count, 1) + assert_equal(c.count, 1) + c2 = C() + c.count = c_sub.count = 0 + assert_raises(TypeError, four_mul_ufunc, 1, c, c_sub, c2) + assert_equal(c_sub.count, 1) + assert_equal(c.count, 1) + assert_equal(c2.count, 0) + c.count = c2.count = c_sub.count = 0 + assert_raises(TypeError, four_mul_ufunc, c2, c, c_sub, c) + assert_equal(c_sub.count, 1) + assert_equal(c.count, 0) + assert_equal(c2.count, 1) + + def test_ufunc_override_methods(self): + + class A(object): + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + return self, ufunc, method, inputs, kwargs + + # __call__ + a = A() + res = np.multiply.__call__(1, a, foo='bar', answer=42) + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], '__call__') + assert_equal(res[3], (1, a)) + assert_equal(res[4], {'foo': 'bar', 'answer': 42}) + + # __call__, wrong args + assert_raises(TypeError, np.multiply, a) + assert_raises(TypeError, np.multiply, a, a, a, a) + assert_raises(TypeError, np.multiply, a, a, sig='a', signature='a') + assert_raises(TypeError, ncu_tests.inner1d, a, a, axis=0, axes=[0, 0]) + + # reduce, positional args + res = np.multiply.reduce(a, 'axis0', 'dtype0', 'out0', 'keep0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'reduce') + assert_equal(res[3], (a,)) + assert_equal(res[4], {'dtype':'dtype0', + 'out': ('out0',), + 'keepdims': 'keep0', + 'axis': 'axis0'}) + + # reduce, kwargs + res = np.multiply.reduce(a, axis='axis0', dtype='dtype0', out='out0', + keepdims='keep0', initial='init0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'reduce') + assert_equal(res[3], (a,)) + assert_equal(res[4], {'dtype':'dtype0', + 'out': ('out0',), + 'keepdims': 'keep0', + 'axis': 'axis0', + 'initial': 'init0'}) + + # reduce, output equal to None removed, but not other explicit ones, + # even if they are at their default value. + res = np.multiply.reduce(a, 0, None, None, False) + assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False}) + res = np.multiply.reduce(a, out=None, axis=0, keepdims=True) + assert_equal(res[4], {'axis': 0, 'keepdims': True}) + res = np.multiply.reduce(a, None, out=(None,), dtype=None) + assert_equal(res[4], {'axis': None, 'dtype': None}) + res = np.multiply.reduce(a, 0, None, None, False, 2) + assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False, 'initial': 2}) + # np._NoValue ignored for initial. + res = np.multiply.reduce(a, 0, None, None, False, np._NoValue) + assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False}) + # None kept for initial. + res = np.multiply.reduce(a, 0, None, None, False, None) + assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False, 'initial': None}) + + # reduce, wrong args + assert_raises(ValueError, np.multiply.reduce, a, out=()) + assert_raises(ValueError, np.multiply.reduce, a, out=('out0', 'out1')) + assert_raises(TypeError, np.multiply.reduce, a, 'axis0', axis='axis0') + + # accumulate, pos args + res = np.multiply.accumulate(a, 'axis0', 'dtype0', 'out0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'accumulate') + assert_equal(res[3], (a,)) + assert_equal(res[4], {'dtype':'dtype0', + 'out': ('out0',), + 'axis': 'axis0'}) + + # accumulate, kwargs + res = np.multiply.accumulate(a, axis='axis0', dtype='dtype0', + out='out0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'accumulate') + assert_equal(res[3], (a,)) + assert_equal(res[4], {'dtype':'dtype0', + 'out': ('out0',), + 'axis': 'axis0'}) + + # accumulate, output equal to None removed. + res = np.multiply.accumulate(a, 0, None, None) + assert_equal(res[4], {'axis': 0, 'dtype': None}) + res = np.multiply.accumulate(a, out=None, axis=0, dtype='dtype1') + assert_equal(res[4], {'axis': 0, 'dtype': 'dtype1'}) + res = np.multiply.accumulate(a, None, out=(None,), dtype=None) + assert_equal(res[4], {'axis': None, 'dtype': None}) + + # accumulate, wrong args + assert_raises(ValueError, np.multiply.accumulate, a, out=()) + assert_raises(ValueError, np.multiply.accumulate, a, + out=('out0', 'out1')) + assert_raises(TypeError, np.multiply.accumulate, a, + 'axis0', axis='axis0') + + # reduceat, pos args + res = np.multiply.reduceat(a, [4, 2], 'axis0', 'dtype0', 'out0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'reduceat') + assert_equal(res[3], (a, [4, 2])) + assert_equal(res[4], {'dtype':'dtype0', + 'out': ('out0',), + 'axis': 'axis0'}) + + # reduceat, kwargs + res = np.multiply.reduceat(a, [4, 2], axis='axis0', dtype='dtype0', + out='out0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'reduceat') + assert_equal(res[3], (a, [4, 2])) + assert_equal(res[4], {'dtype':'dtype0', + 'out': ('out0',), + 'axis': 'axis0'}) + + # reduceat, output equal to None removed. + res = np.multiply.reduceat(a, [4, 2], 0, None, None) + assert_equal(res[4], {'axis': 0, 'dtype': None}) + res = np.multiply.reduceat(a, [4, 2], axis=None, out=None, dtype='dt') + assert_equal(res[4], {'axis': None, 'dtype': 'dt'}) + res = np.multiply.reduceat(a, [4, 2], None, None, out=(None,)) + assert_equal(res[4], {'axis': None, 'dtype': None}) + + # reduceat, wrong args + assert_raises(ValueError, np.multiply.reduce, a, [4, 2], out=()) + assert_raises(ValueError, np.multiply.reduce, a, [4, 2], + out=('out0', 'out1')) + assert_raises(TypeError, np.multiply.reduce, a, [4, 2], + 'axis0', axis='axis0') + + # outer + res = np.multiply.outer(a, 42) + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'outer') + assert_equal(res[3], (a, 42)) + assert_equal(res[4], {}) + + # outer, wrong args + assert_raises(TypeError, np.multiply.outer, a) + assert_raises(TypeError, np.multiply.outer, a, a, a, a) + assert_raises(TypeError, np.multiply.outer, a, a, sig='a', signature='a') + + # at + res = np.multiply.at(a, [4, 2], 'b0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'at') + assert_equal(res[3], (a, [4, 2], 'b0')) + + # at, wrong args + assert_raises(TypeError, np.multiply.at, a) + assert_raises(TypeError, np.multiply.at, a, a, a, a) + + def test_ufunc_override_out(self): + + class A(object): + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + return kwargs + + class B(object): + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + return kwargs + + a = A() + b = B() + res0 = np.multiply(a, b, 'out_arg') + res1 = np.multiply(a, b, out='out_arg') + res2 = np.multiply(2, b, 'out_arg') + res3 = np.multiply(3, b, out='out_arg') + res4 = np.multiply(a, 4, 'out_arg') + res5 = np.multiply(a, 5, out='out_arg') + + assert_equal(res0['out'][0], 'out_arg') + assert_equal(res1['out'][0], 'out_arg') + assert_equal(res2['out'][0], 'out_arg') + assert_equal(res3['out'][0], 'out_arg') + assert_equal(res4['out'][0], 'out_arg') + assert_equal(res5['out'][0], 'out_arg') + + # ufuncs with multiple output modf and frexp. + res6 = np.modf(a, 'out0', 'out1') + res7 = np.frexp(a, 'out0', 'out1') + assert_equal(res6['out'][0], 'out0') + assert_equal(res6['out'][1], 'out1') + assert_equal(res7['out'][0], 'out0') + assert_equal(res7['out'][1], 'out1') + + # While we're at it, check that default output is never passed on. + assert_(np.sin(a, None) == {}) + assert_(np.sin(a, out=None) == {}) + assert_(np.sin(a, out=(None,)) == {}) + assert_(np.modf(a, None) == {}) + assert_(np.modf(a, None, None) == {}) + assert_(np.modf(a, out=(None, None)) == {}) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', DeprecationWarning) + assert_(np.modf(a, out=None) == {}) + assert_(w[0].category is DeprecationWarning) + + # don't give positional and output argument, or too many arguments. + # wrong number of arguments in the tuple is an error too. + assert_raises(TypeError, np.multiply, a, b, 'one', out='two') + assert_raises(TypeError, np.multiply, a, b, 'one', 'two') + assert_raises(ValueError, np.multiply, a, b, out=('one', 'two')) + assert_raises(ValueError, np.multiply, a, out=()) + assert_raises(TypeError, np.modf, a, 'one', out=('two', 'three')) + assert_raises(TypeError, np.modf, a, 'one', 'two', 'three') + assert_raises(ValueError, np.modf, a, out=('one', 'two', 'three')) + assert_raises(ValueError, np.modf, a, out=('one',)) + + def test_ufunc_override_exception(self): + + class A(object): + def __array_ufunc__(self, *a, **kwargs): + raise ValueError("oops") + + a = A() + assert_raises(ValueError, np.negative, 1, out=a) + assert_raises(ValueError, np.negative, a) + assert_raises(ValueError, np.divide, 1., a) + + def test_ufunc_override_not_implemented(self): + + class A(object): + def __array_ufunc__(self, *args, **kwargs): + return NotImplemented + + msg = ("operand type(s) all returned NotImplemented from " + "__array_ufunc__(, '__call__', <*>): 'A'") + with assert_raises_regex(TypeError, fnmatch.translate(msg)): + np.negative(A()) + + msg = ("operand type(s) all returned NotImplemented from " + "__array_ufunc__(, '__call__', <*>, , " + "out=(1,)): 'A', 'object', 'int'") + with assert_raises_regex(TypeError, fnmatch.translate(msg)): + np.add(A(), object(), out=1) + + def test_ufunc_override_disabled(self): + + class OptOut(object): + __array_ufunc__ = None + + opt_out = OptOut() + + # ufuncs always raise + msg = "operand 'OptOut' does not support ufuncs" + with assert_raises_regex(TypeError, msg): + np.add(opt_out, 1) + with assert_raises_regex(TypeError, msg): + np.add(1, opt_out) + with assert_raises_regex(TypeError, msg): + np.negative(opt_out) + + # opt-outs still hold even when other arguments have pathological + # __array_ufunc__ implementations + + class GreedyArray(object): + def __array_ufunc__(self, *args, **kwargs): + return self + + greedy = GreedyArray() + assert_(np.negative(greedy) is greedy) + with assert_raises_regex(TypeError, msg): + np.add(greedy, opt_out) + with assert_raises_regex(TypeError, msg): + np.add(greedy, 1, out=opt_out) + + def test_gufunc_override(self): + # gufunc are just ufunc instances, but follow a different path, + # so check __array_ufunc__ overrides them properly. + class A(object): + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + return self, ufunc, method, inputs, kwargs + + inner1d = ncu_tests.inner1d + a = A() + res = inner1d(a, a) + assert_equal(res[0], a) + assert_equal(res[1], inner1d) + assert_equal(res[2], '__call__') + assert_equal(res[3], (a, a)) + assert_equal(res[4], {}) + + res = inner1d(1, 1, out=a) + assert_equal(res[0], a) + assert_equal(res[1], inner1d) + assert_equal(res[2], '__call__') + assert_equal(res[3], (1, 1)) + assert_equal(res[4], {'out': (a,)}) + + # wrong number of arguments in the tuple is an error too. + assert_raises(TypeError, inner1d, a, out='two') + assert_raises(TypeError, inner1d, a, a, 'one', out='two') + assert_raises(TypeError, inner1d, a, a, 'one', 'two') + assert_raises(ValueError, inner1d, a, a, out=('one', 'two')) + assert_raises(ValueError, inner1d, a, a, out=()) + + def test_ufunc_override_with_super(self): + # NOTE: this class is given as an example in doc/subclassing.py; + # if you make any changes here, do update it there too. + class A(np.ndarray): + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + args = [] + in_no = [] + for i, input_ in enumerate(inputs): + if isinstance(input_, A): + in_no.append(i) + args.append(input_.view(np.ndarray)) + else: + args.append(input_) + + outputs = kwargs.pop('out', None) + out_no = [] + if outputs: + out_args = [] + for j, output in enumerate(outputs): + if isinstance(output, A): + out_no.append(j) + out_args.append(output.view(np.ndarray)) + else: + out_args.append(output) + kwargs['out'] = tuple(out_args) + else: + outputs = (None,) * ufunc.nout + + info = {} + if in_no: + info['inputs'] = in_no + if out_no: + info['outputs'] = out_no + + results = super(A, self).__array_ufunc__(ufunc, method, + *args, **kwargs) + if results is NotImplemented: + return NotImplemented + + if method == 'at': + if isinstance(inputs[0], A): + inputs[0].info = info + return + + if ufunc.nout == 1: + results = (results,) + + results = tuple((np.asarray(result).view(A) + if output is None else output) + for result, output in zip(results, outputs)) + if results and isinstance(results[0], A): + results[0].info = info + + return results[0] if len(results) == 1 else results + + class B(object): + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + if any(isinstance(input_, A) for input_ in inputs): + return "A!" + else: + return NotImplemented + + d = np.arange(5.) + # 1 input, 1 output + a = np.arange(5.).view(A) + b = np.sin(a) + check = np.sin(d) + assert_(np.all(check == b)) + assert_equal(b.info, {'inputs': [0]}) + b = np.sin(d, out=(a,)) + assert_(np.all(check == b)) + assert_equal(b.info, {'outputs': [0]}) + assert_(b is a) + a = np.arange(5.).view(A) + b = np.sin(a, out=a) + assert_(np.all(check == b)) + assert_equal(b.info, {'inputs': [0], 'outputs': [0]}) + + # 1 input, 2 outputs + a = np.arange(5.).view(A) + b1, b2 = np.modf(a) + assert_equal(b1.info, {'inputs': [0]}) + b1, b2 = np.modf(d, out=(None, a)) + assert_(b2 is a) + assert_equal(b1.info, {'outputs': [1]}) + a = np.arange(5.).view(A) + b = np.arange(5.).view(A) + c1, c2 = np.modf(a, out=(a, b)) + assert_(c1 is a) + assert_(c2 is b) + assert_equal(c1.info, {'inputs': [0], 'outputs': [0, 1]}) + + # 2 input, 1 output + a = np.arange(5.).view(A) + b = np.arange(5.).view(A) + c = np.add(a, b, out=a) + assert_(c is a) + assert_equal(c.info, {'inputs': [0, 1], 'outputs': [0]}) + # some tests with a non-ndarray subclass + a = np.arange(5.) + b = B() + assert_(a.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented) + assert_(b.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented) + assert_raises(TypeError, np.add, a, b) + a = a.view(A) + assert_(a.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented) + assert_(b.__array_ufunc__(np.add, '__call__', a, b) == "A!") + assert_(np.add(a, b) == "A!") + # regression check for gh-9102 -- tests ufunc.reduce implicitly. + d = np.array([[1, 2, 3], [1, 2, 3]]) + a = d.view(A) + c = a.any() + check = d.any() + assert_equal(c, check) + assert_(c.info, {'inputs': [0]}) + c = a.max() + check = d.max() + assert_equal(c, check) + assert_(c.info, {'inputs': [0]}) + b = np.array(0).view(A) + c = a.max(out=b) + assert_equal(c, check) + assert_(c is b) + assert_(c.info, {'inputs': [0], 'outputs': [0]}) + check = a.max(axis=0) + b = np.zeros_like(check).view(A) + c = a.max(axis=0, out=b) + assert_equal(c, check) + assert_(c is b) + assert_(c.info, {'inputs': [0], 'outputs': [0]}) + # simple explicit tests of reduce, accumulate, reduceat + check = np.add.reduce(d, axis=1) + c = np.add.reduce(a, axis=1) + assert_equal(c, check) + assert_(c.info, {'inputs': [0]}) + b = np.zeros_like(c) + c = np.add.reduce(a, 1, None, b) + assert_equal(c, check) + assert_(c is b) + assert_(c.info, {'inputs': [0], 'outputs': [0]}) + check = np.add.accumulate(d, axis=0) + c = np.add.accumulate(a, axis=0) + assert_equal(c, check) + assert_(c.info, {'inputs': [0]}) + b = np.zeros_like(c) + c = np.add.accumulate(a, 0, None, b) + assert_equal(c, check) + assert_(c is b) + assert_(c.info, {'inputs': [0], 'outputs': [0]}) + indices = [0, 2, 1] + check = np.add.reduceat(d, indices, axis=1) + c = np.add.reduceat(a, indices, axis=1) + assert_equal(c, check) + assert_(c.info, {'inputs': [0]}) + b = np.zeros_like(c) + c = np.add.reduceat(a, indices, 1, None, b) + assert_equal(c, check) + assert_(c is b) + assert_(c.info, {'inputs': [0], 'outputs': [0]}) + # and a few tests for at + d = np.array([[1, 2, 3], [1, 2, 3]]) + check = d.copy() + a = d.copy().view(A) + np.add.at(check, ([0, 1], [0, 2]), 1.) + np.add.at(a, ([0, 1], [0, 2]), 1.) + assert_equal(a, check) + assert_(a.info, {'inputs': [0]}) + b = np.array(1.).view(A) + a = d.copy().view(A) + np.add.at(a, ([0, 1], [0, 2]), b) + assert_equal(a, check) + assert_(a.info, {'inputs': [0, 2]}) + + +class TestChoose(object): + def test_mixed(self): + c = np.array([True, True]) + a = np.array([True, True]) + assert_equal(np.choose(c, (a, 1)), np.array([1, 1])) + + +class TestRationalFunctions(object): + def test_lcm(self): + self._test_lcm_inner(np.int16) + self._test_lcm_inner(np.uint16) + + def test_lcm_object(self): + self._test_lcm_inner(np.object_) + + def test_gcd(self): + self._test_gcd_inner(np.int16) + self._test_lcm_inner(np.uint16) + + def test_gcd_object(self): + self._test_gcd_inner(np.object_) + + def _test_lcm_inner(self, dtype): + # basic use + a = np.array([12, 120], dtype=dtype) + b = np.array([20, 200], dtype=dtype) + assert_equal(np.lcm(a, b), [60, 600]) + + if not issubclass(dtype, np.unsignedinteger): + # negatives are ignored + a = np.array([12, -12, 12, -12], dtype=dtype) + b = np.array([20, 20, -20, -20], dtype=dtype) + assert_equal(np.lcm(a, b), [60]*4) + + # reduce + a = np.array([3, 12, 20], dtype=dtype) + assert_equal(np.lcm.reduce([3, 12, 20]), 60) + + # broadcasting, and a test including 0 + a = np.arange(6).astype(dtype) + b = 20 + assert_equal(np.lcm(a, b), [0, 20, 20, 60, 20, 20]) + + def _test_gcd_inner(self, dtype): + # basic use + a = np.array([12, 120], dtype=dtype) + b = np.array([20, 200], dtype=dtype) + assert_equal(np.gcd(a, b), [4, 40]) + + if not issubclass(dtype, np.unsignedinteger): + # negatives are ignored + a = np.array([12, -12, 12, -12], dtype=dtype) + b = np.array([20, 20, -20, -20], dtype=dtype) + assert_equal(np.gcd(a, b), [4]*4) + + # reduce + a = np.array([15, 25, 35], dtype=dtype) + assert_equal(np.gcd.reduce(a), 5) + + # broadcasting, and a test including 0 + a = np.arange(6).astype(dtype) + b = 20 + assert_equal(np.gcd(a, b), [20, 1, 2, 1, 4, 5]) + + def test_lcm_overflow(self): + # verify that we don't overflow when a*b does overflow + big = np.int32(np.iinfo(np.int32).max // 11) + a = 2*big + b = 5*big + assert_equal(np.lcm(a, b), 10*big) + + def test_gcd_overflow(self): + for dtype in (np.int32, np.int64): + # verify that we don't overflow when taking abs(x) + # not relevant for lcm, where the result is unrepresentable anyway + a = dtype(np.iinfo(dtype).min) # negative power of two + q = -(a // 4) + assert_equal(np.gcd(a, q*3), q) + assert_equal(np.gcd(a, -q*3), q) + + def test_decimal(self): + from decimal import Decimal + a = np.array([1, 1, -1, -1]) * Decimal('0.20') + b = np.array([1, -1, 1, -1]) * Decimal('0.12') + + assert_equal(np.gcd(a, b), 4*[Decimal('0.04')]) + assert_equal(np.lcm(a, b), 4*[Decimal('0.60')]) + + def test_float(self): + # not well-defined on float due to rounding errors + assert_raises(TypeError, np.gcd, 0.3, 0.4) + assert_raises(TypeError, np.lcm, 0.3, 0.4) + + def test_builtin_long(self): + # sanity check that array coercion is alright for builtin longs + assert_equal(np.array(2**200).item(), 2**200) + + # expressed as prime factors + a = np.array(2**100 * 3**5) + b = np.array([2**100 * 5**7, 2**50 * 3**10]) + assert_equal(np.gcd(a, b), [2**100, 2**50 * 3**5]) + assert_equal(np.lcm(a, b), [2**100 * 3**5 * 5**7, 2**100 * 3**10]) + + assert_equal(np.gcd(2**100, 3**100), 1) + + +class TestComplexFunctions(object): + funcs = [np.arcsin, np.arccos, np.arctan, np.arcsinh, np.arccosh, + np.arctanh, np.sin, np.cos, np.tan, np.exp, + np.exp2, np.log, np.sqrt, np.log10, np.log2, + np.log1p] + + def test_it(self): + for f in self.funcs: + if f is np.arccosh: + x = 1.5 + else: + x = .5 + fr = f(x) + fz = f(complex(x)) + assert_almost_equal(fz.real, fr, err_msg='real part %s' % f) + assert_almost_equal(fz.imag, 0., err_msg='imag part %s' % f) + + def test_precisions_consistent(self): + z = 1 + 1j + for f in self.funcs: + fcf = f(np.csingle(z)) + fcd = f(np.cdouble(z)) + fcl = f(np.clongdouble(z)) + assert_almost_equal(fcf, fcd, decimal=6, err_msg='fch-fcd %s' % f) + assert_almost_equal(fcl, fcd, decimal=15, err_msg='fch-fcl %s' % f) + + def test_branch_cuts(self): + # check branch cuts and continuity on them + _check_branch_cut(np.log, -0.5, 1j, 1, -1, True) + _check_branch_cut(np.log2, -0.5, 1j, 1, -1, True) + _check_branch_cut(np.log10, -0.5, 1j, 1, -1, True) + _check_branch_cut(np.log1p, -1.5, 1j, 1, -1, True) + _check_branch_cut(np.sqrt, -0.5, 1j, 1, -1, True) + + _check_branch_cut(np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True) + _check_branch_cut(np.arccos, [ -2, 2], [1j, 1j], 1, -1, True) + _check_branch_cut(np.arctan, [0-2j, 2j], [1, 1], -1, 1, True) + + _check_branch_cut(np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True) + _check_branch_cut(np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True) + _check_branch_cut(np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True) + + # check against bogus branch cuts: assert continuity between quadrants + _check_branch_cut(np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1) + _check_branch_cut(np.arccos, [0-2j, 2j], [ 1, 1], 1, 1) + _check_branch_cut(np.arctan, [ -2, 2], [1j, 1j], 1, 1) + + _check_branch_cut(np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1) + _check_branch_cut(np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1) + _check_branch_cut(np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1) + + def test_branch_cuts_complex64(self): + # check branch cuts and continuity on them + _check_branch_cut(np.log, -0.5, 1j, 1, -1, True, np.complex64) + _check_branch_cut(np.log2, -0.5, 1j, 1, -1, True, np.complex64) + _check_branch_cut(np.log10, -0.5, 1j, 1, -1, True, np.complex64) + _check_branch_cut(np.log1p, -1.5, 1j, 1, -1, True, np.complex64) + _check_branch_cut(np.sqrt, -0.5, 1j, 1, -1, True, np.complex64) + + _check_branch_cut(np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64) + _check_branch_cut(np.arccos, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64) + _check_branch_cut(np.arctan, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64) + + _check_branch_cut(np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64) + _check_branch_cut(np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True, np.complex64) + _check_branch_cut(np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64) + + # check against bogus branch cuts: assert continuity between quadrants + _check_branch_cut(np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64) + _check_branch_cut(np.arccos, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64) + _check_branch_cut(np.arctan, [ -2, 2], [1j, 1j], 1, 1, False, np.complex64) + + _check_branch_cut(np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1, False, np.complex64) + _check_branch_cut(np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1, False, np.complex64) + _check_branch_cut(np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1, False, np.complex64) + + def test_against_cmath(self): + import cmath + + points = [-1-1j, -1+1j, +1-1j, +1+1j] + name_map = {'arcsin': 'asin', 'arccos': 'acos', 'arctan': 'atan', + 'arcsinh': 'asinh', 'arccosh': 'acosh', 'arctanh': 'atanh'} + atol = 4*np.finfo(complex).eps + for func in self.funcs: + fname = func.__name__.split('.')[-1] + cname = name_map.get(fname, fname) + try: + cfunc = getattr(cmath, cname) + except AttributeError: + continue + for p in points: + a = complex(func(np.complex_(p))) + b = cfunc(p) + assert_(abs(a - b) < atol, "%s %s: %s; cmath: %s" % (fname, p, a, b)) + + @pytest.mark.parametrize('dtype', [np.complex64, np.complex_, np.longcomplex]) + def test_loss_of_precision(self, dtype): + """Check loss of precision in complex arc* functions""" + + # Check against known-good functions + + info = np.finfo(dtype) + real_dtype = dtype(0.).real.dtype + eps = info.eps + + def check(x, rtol): + x = x.astype(real_dtype) + + z = x.astype(dtype) + d = np.absolute(np.arcsinh(x)/np.arcsinh(z).real - 1) + assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), + 'arcsinh')) + + z = (1j*x).astype(dtype) + d = np.absolute(np.arcsinh(x)/np.arcsin(z).imag - 1) + assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), + 'arcsin')) + + z = x.astype(dtype) + d = np.absolute(np.arctanh(x)/np.arctanh(z).real - 1) + assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), + 'arctanh')) + + z = (1j*x).astype(dtype) + d = np.absolute(np.arctanh(x)/np.arctan(z).imag - 1) + assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), + 'arctan')) + + # The switchover was chosen as 1e-3; hence there can be up to + # ~eps/1e-3 of relative cancellation error before it + + x_series = np.logspace(-20, -3.001, 200) + x_basic = np.logspace(-2.999, 0, 10, endpoint=False) + + if dtype is np.longcomplex: + # It's not guaranteed that the system-provided arc functions + # are accurate down to a few epsilons. (Eg. on Linux 64-bit) + # So, give more leeway for long complex tests here: + # Can use 2.1 for > Ubuntu LTS Trusty (2014), glibc = 2.19. + check(x_series, 50.0*eps) + else: + check(x_series, 2.1*eps) + check(x_basic, 2.0*eps/1e-3) + + # Check a few points + + z = np.array([1e-5*(1+1j)], dtype=dtype) + p = 9.999999999333333333e-6 + 1.000000000066666666e-5j + d = np.absolute(1-np.arctanh(z)/p) + assert_(np.all(d < 1e-15)) + + p = 1.0000000000333333333e-5 + 9.999999999666666667e-6j + d = np.absolute(1-np.arcsinh(z)/p) + assert_(np.all(d < 1e-15)) + + p = 9.999999999333333333e-6j + 1.000000000066666666e-5 + d = np.absolute(1-np.arctan(z)/p) + assert_(np.all(d < 1e-15)) + + p = 1.0000000000333333333e-5j + 9.999999999666666667e-6 + d = np.absolute(1-np.arcsin(z)/p) + assert_(np.all(d < 1e-15)) + + # Check continuity across switchover points + + def check(func, z0, d=1): + z0 = np.asarray(z0, dtype=dtype) + zp = z0 + abs(z0) * d * eps * 2 + zm = z0 - abs(z0) * d * eps * 2 + assert_(np.all(zp != zm), (zp, zm)) + + # NB: the cancellation error at the switchover is at least eps + good = (abs(func(zp) - func(zm)) < 2*eps) + assert_(np.all(good), (func, z0[~good])) + + for func in (np.arcsinh, np.arcsinh, np.arcsin, np.arctanh, np.arctan): + pts = [rp+1j*ip for rp in (-1e-3, 0, 1e-3) for ip in(-1e-3, 0, 1e-3) + if rp != 0 or ip != 0] + check(func, pts, 1) + check(func, pts, 1j) + check(func, pts, 1+1j) + + +class TestAttributes(object): + def test_attributes(self): + add = ncu.add + assert_equal(add.__name__, 'add') + assert_(add.ntypes >= 18) # don't fail if types added + assert_('ii->i' in add.types) + assert_equal(add.nin, 2) + assert_equal(add.nout, 1) + assert_equal(add.identity, 0) + + def test_doc(self): + # don't bother checking the long list of kwargs, which are likely to + # change + assert_(ncu.add.__doc__.startswith( + "add(x1, x2, /, out=None, *, where=True")) + assert_(ncu.frexp.__doc__.startswith( + "frexp(x[, out1, out2], / [, out=(None, None)], *, where=True")) + + +class TestSubclass(object): + + def test_subclass_op(self): + + class simple(np.ndarray): + def __new__(subtype, shape): + self = np.ndarray.__new__(subtype, shape, dtype=object) + self.fill(0) + return self + + a = simple((3, 4)) + assert_equal(a+a, a) + +def _check_branch_cut(f, x0, dx, re_sign=1, im_sign=-1, sig_zero_ok=False, + dtype=complex): + """ + Check for a branch cut in a function. + + Assert that `x0` lies on a branch cut of function `f` and `f` is + continuous from the direction `dx`. + + Parameters + ---------- + f : func + Function to check + x0 : array-like + Point on branch cut + dx : array-like + Direction to check continuity in + re_sign, im_sign : {1, -1} + Change of sign of the real or imaginary part expected + sig_zero_ok : bool + Whether to check if the branch cut respects signed zero (if applicable) + dtype : dtype + Dtype to check (should be complex) + + """ + x0 = np.atleast_1d(x0).astype(dtype) + dx = np.atleast_1d(dx).astype(dtype) + + if np.dtype(dtype).char == 'F': + scale = np.finfo(dtype).eps * 1e2 + atol = np.float32(1e-2) + else: + scale = np.finfo(dtype).eps * 1e3 + atol = 1e-4 + + y0 = f(x0) + yp = f(x0 + dx*scale*np.absolute(x0)/np.absolute(dx)) + ym = f(x0 - dx*scale*np.absolute(x0)/np.absolute(dx)) + + assert_(np.all(np.absolute(y0.real - yp.real) < atol), (y0, yp)) + assert_(np.all(np.absolute(y0.imag - yp.imag) < atol), (y0, yp)) + assert_(np.all(np.absolute(y0.real - ym.real*re_sign) < atol), (y0, ym)) + assert_(np.all(np.absolute(y0.imag - ym.imag*im_sign) < atol), (y0, ym)) + + if sig_zero_ok: + # check that signed zeros also work as a displacement + jr = (x0.real == 0) & (dx.real != 0) + ji = (x0.imag == 0) & (dx.imag != 0) + if np.any(jr): + x = x0[jr] + x.real = np.NZERO + ym = f(x) + assert_(np.all(np.absolute(y0[jr].real - ym.real*re_sign) < atol), (y0[jr], ym)) + assert_(np.all(np.absolute(y0[jr].imag - ym.imag*im_sign) < atol), (y0[jr], ym)) + + if np.any(ji): + x = x0[ji] + x.imag = np.NZERO + ym = f(x) + assert_(np.all(np.absolute(y0[ji].real - ym.real*re_sign) < atol), (y0[ji], ym)) + assert_(np.all(np.absolute(y0[ji].imag - ym.imag*im_sign) < atol), (y0[ji], ym)) + +def test_copysign(): + assert_(np.copysign(1, -1) == -1) + with np.errstate(divide="ignore"): + assert_(1 / np.copysign(0, -1) < 0) + assert_(1 / np.copysign(0, 1) > 0) + assert_(np.signbit(np.copysign(np.nan, -1))) + assert_(not np.signbit(np.copysign(np.nan, 1))) + +def _test_nextafter(t): + one = t(1) + two = t(2) + zero = t(0) + eps = np.finfo(t).eps + assert_(np.nextafter(one, two) - one == eps) + assert_(np.nextafter(one, zero) - one < 0) + assert_(np.isnan(np.nextafter(np.nan, one))) + assert_(np.isnan(np.nextafter(one, np.nan))) + assert_(np.nextafter(one, one) == one) + +def test_nextafter(): + return _test_nextafter(np.float64) + + +def test_nextafterf(): + return _test_nextafter(np.float32) + + +@pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble), + reason="long double is same as double") +@pytest.mark.xfail(condition=platform.machine().startswith("ppc64"), + reason="IBM double double") +def test_nextafterl(): + return _test_nextafter(np.longdouble) + + +def test_nextafter_0(): + for t, direction in itertools.product(np.sctypes['float'], (1, -1)): + tiny = np.finfo(t).tiny + assert_(0. < direction * np.nextafter(t(0), t(direction)) < tiny) + assert_equal(np.nextafter(t(0), t(direction)) / t(2.1), direction * 0.0) + +def _test_spacing(t): + one = t(1) + eps = np.finfo(t).eps + nan = t(np.nan) + inf = t(np.inf) + with np.errstate(invalid='ignore'): + assert_(np.spacing(one) == eps) + assert_(np.isnan(np.spacing(nan))) + assert_(np.isnan(np.spacing(inf))) + assert_(np.isnan(np.spacing(-inf))) + assert_(np.spacing(t(1e30)) != 0) + +def test_spacing(): + return _test_spacing(np.float64) + +def test_spacingf(): + return _test_spacing(np.float32) + + +@pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble), + reason="long double is same as double") +@pytest.mark.xfail(condition=platform.machine().startswith("ppc64"), + reason="IBM double double") +def test_spacingl(): + return _test_spacing(np.longdouble) + +def test_spacing_gfortran(): + # Reference from this fortran file, built with gfortran 4.3.3 on linux + # 32bits: + # PROGRAM test_spacing + # INTEGER, PARAMETER :: SGL = SELECTED_REAL_KIND(p=6, r=37) + # INTEGER, PARAMETER :: DBL = SELECTED_REAL_KIND(p=13, r=200) + # + # WRITE(*,*) spacing(0.00001_DBL) + # WRITE(*,*) spacing(1.0_DBL) + # WRITE(*,*) spacing(1000._DBL) + # WRITE(*,*) spacing(10500._DBL) + # + # WRITE(*,*) spacing(0.00001_SGL) + # WRITE(*,*) spacing(1.0_SGL) + # WRITE(*,*) spacing(1000._SGL) + # WRITE(*,*) spacing(10500._SGL) + # END PROGRAM + ref = {np.float64: [1.69406589450860068E-021, + 2.22044604925031308E-016, + 1.13686837721616030E-013, + 1.81898940354585648E-012], + np.float32: [9.09494702E-13, + 1.19209290E-07, + 6.10351563E-05, + 9.76562500E-04]} + + for dt, dec_ in zip([np.float32, np.float64], (10, 20)): + x = np.array([1e-5, 1, 1000, 10500], dtype=dt) + assert_array_almost_equal(np.spacing(x), ref[dt], decimal=dec_) + +def test_nextafter_vs_spacing(): + # XXX: spacing does not handle long double yet + for t in [np.float32, np.float64]: + for _f in [1, 1e-5, 1000]: + f = t(_f) + f1 = t(_f + 1) + assert_(np.nextafter(f, f1) - f == np.spacing(f)) + +def test_pos_nan(): + """Check np.nan is a positive nan.""" + assert_(np.signbit(np.nan) == 0) + +def test_reduceat(): + """Test bug in reduceat when structured arrays are not copied.""" + db = np.dtype([('name', 'S11'), ('time', np.int64), ('value', np.float32)]) + a = np.empty([100], dtype=db) + a['name'] = 'Simple' + a['time'] = 10 + a['value'] = 100 + indx = [0, 7, 15, 25] + + h2 = [] + val1 = indx[0] + for val2 in indx[1:]: + h2.append(np.add.reduce(a['value'][val1:val2])) + val1 = val2 + h2.append(np.add.reduce(a['value'][val1:])) + h2 = np.array(h2) + + # test buffered -- this should work + h1 = np.add.reduceat(a['value'], indx) + assert_array_almost_equal(h1, h2) + + # This is when the error occurs. + # test no buffer + np.setbufsize(32) + h1 = np.add.reduceat(a['value'], indx) + np.setbufsize(np.UFUNC_BUFSIZE_DEFAULT) + assert_array_almost_equal(h1, h2) + +def test_reduceat_empty(): + """Reduceat should work with empty arrays""" + indices = np.array([], 'i4') + x = np.array([], 'f8') + result = np.add.reduceat(x, indices) + assert_equal(result.dtype, x.dtype) + assert_equal(result.shape, (0,)) + # Another case with a slightly different zero-sized shape + x = np.ones((5, 2)) + result = np.add.reduceat(x, [], axis=0) + assert_equal(result.dtype, x.dtype) + assert_equal(result.shape, (0, 2)) + result = np.add.reduceat(x, [], axis=1) + assert_equal(result.dtype, x.dtype) + assert_equal(result.shape, (5, 0)) + +def test_complex_nan_comparisons(): + nans = [complex(np.nan, 0), complex(0, np.nan), complex(np.nan, np.nan)] + fins = [complex(1, 0), complex(-1, 0), complex(0, 1), complex(0, -1), + complex(1, 1), complex(-1, -1), complex(0, 0)] + + with np.errstate(invalid='ignore'): + for x in nans + fins: + x = np.array([x]) + for y in nans + fins: + y = np.array([y]) + + if np.isfinite(x) and np.isfinite(y): + continue + + assert_equal(x < y, False, err_msg="%r < %r" % (x, y)) + assert_equal(x > y, False, err_msg="%r > %r" % (x, y)) + assert_equal(x <= y, False, err_msg="%r <= %r" % (x, y)) + assert_equal(x >= y, False, err_msg="%r >= %r" % (x, y)) + assert_equal(x == y, False, err_msg="%r == %r" % (x, y)) + + +def test_rint_big_int(): + # np.rint bug for large integer values on Windows 32-bit and MKL + # https://github.com/numpy/numpy/issues/6685 + val = 4607998452777363968 + # This is exactly representable in floating point + assert_equal(val, int(float(val))) + # Rint should not change the value + assert_equal(val, np.rint(val)) + + +def test_signaling_nan_exceptions(): + with assert_no_warnings(): + a = np.ndarray(shape=(), dtype='float32', buffer=b'\x00\xe0\xbf\xff') + np.isnan(a) diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_umath.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_umath.pyc new file mode 100644 index 0000000..547bbca Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_umath.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_umath_complex.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_umath_complex.py new file mode 100644 index 0000000..785ae8c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_umath_complex.py @@ -0,0 +1,543 @@ +from __future__ import division, absolute_import, print_function + +import sys +import platform +import pytest + +import numpy as np +import numpy.core.umath as ncu +from numpy.testing import ( + assert_raises, assert_equal, assert_array_equal, assert_almost_equal + ) + +# TODO: branch cuts (use Pauli code) +# TODO: conj 'symmetry' +# TODO: FPU exceptions + +# At least on Windows the results of many complex functions are not conforming +# to the C99 standard. See ticket 1574. +# Ditto for Solaris (ticket 1642) and OS X on PowerPC. +#FIXME: this will probably change when we require full C99 campatibility +with np.errstate(all='ignore'): + functions_seem_flaky = ((np.exp(complex(np.inf, 0)).imag != 0) + or (np.log(complex(np.NZERO, 0)).imag != np.pi)) +# TODO: replace with a check on whether platform-provided C99 funcs are used +xfail_complex_tests = (not sys.platform.startswith('linux') or functions_seem_flaky) + +# TODO This can be xfail when the generator functions are got rid of. +platform_skip = pytest.mark.skipif(xfail_complex_tests, + reason="Inadequate C99 complex support") + + + +class TestCexp(object): + def test_simple(self): + check = check_complex_value + f = np.exp + + check(f, 1, 0, np.exp(1), 0, False) + check(f, 0, 1, np.cos(1), np.sin(1), False) + + ref = np.exp(1) * complex(np.cos(1), np.sin(1)) + check(f, 1, 1, ref.real, ref.imag, False) + + @platform_skip + def test_special_values(self): + # C99: Section G 6.3.1 + + check = check_complex_value + f = np.exp + + # cexp(+-0 + 0i) is 1 + 0i + check(f, np.PZERO, 0, 1, 0, False) + check(f, np.NZERO, 0, 1, 0, False) + + # cexp(x + infi) is nan + nani for finite x and raises 'invalid' FPU + # exception + check(f, 1, np.inf, np.nan, np.nan) + check(f, -1, np.inf, np.nan, np.nan) + check(f, 0, np.inf, np.nan, np.nan) + + # cexp(inf + 0i) is inf + 0i + check(f, np.inf, 0, np.inf, 0) + + # cexp(-inf + yi) is +0 * (cos(y) + i sin(y)) for finite y + check(f, -np.inf, 1, np.PZERO, np.PZERO) + check(f, -np.inf, 0.75 * np.pi, np.NZERO, np.PZERO) + + # cexp(inf + yi) is +inf * (cos(y) + i sin(y)) for finite y + check(f, np.inf, 1, np.inf, np.inf) + check(f, np.inf, 0.75 * np.pi, -np.inf, np.inf) + + # cexp(-inf + inf i) is +-0 +- 0i (signs unspecified) + def _check_ninf_inf(dummy): + msgform = "cexp(-inf, inf) is (%f, %f), expected (+-0, +-0)" + with np.errstate(invalid='ignore'): + z = f(np.array(complex(-np.inf, np.inf))) + if z.real != 0 or z.imag != 0: + raise AssertionError(msgform % (z.real, z.imag)) + + _check_ninf_inf(None) + + # cexp(inf + inf i) is +-inf + NaNi and raised invalid FPU ex. + def _check_inf_inf(dummy): + msgform = "cexp(inf, inf) is (%f, %f), expected (+-inf, nan)" + with np.errstate(invalid='ignore'): + z = f(np.array(complex(np.inf, np.inf))) + if not np.isinf(z.real) or not np.isnan(z.imag): + raise AssertionError(msgform % (z.real, z.imag)) + + _check_inf_inf(None) + + # cexp(-inf + nan i) is +-0 +- 0i + def _check_ninf_nan(dummy): + msgform = "cexp(-inf, nan) is (%f, %f), expected (+-0, +-0)" + with np.errstate(invalid='ignore'): + z = f(np.array(complex(-np.inf, np.nan))) + if z.real != 0 or z.imag != 0: + raise AssertionError(msgform % (z.real, z.imag)) + + _check_ninf_nan(None) + + # cexp(inf + nan i) is +-inf + nan + def _check_inf_nan(dummy): + msgform = "cexp(-inf, nan) is (%f, %f), expected (+-inf, nan)" + with np.errstate(invalid='ignore'): + z = f(np.array(complex(np.inf, np.nan))) + if not np.isinf(z.real) or not np.isnan(z.imag): + raise AssertionError(msgform % (z.real, z.imag)) + + _check_inf_nan(None) + + # cexp(nan + yi) is nan + nani for y != 0 (optional: raises invalid FPU + # ex) + check(f, np.nan, 1, np.nan, np.nan) + check(f, np.nan, -1, np.nan, np.nan) + + check(f, np.nan, np.inf, np.nan, np.nan) + check(f, np.nan, -np.inf, np.nan, np.nan) + + # cexp(nan + nani) is nan + nani + check(f, np.nan, np.nan, np.nan, np.nan) + + # TODO This can be xfail when the generator functions are got rid of. + @pytest.mark.skip(reason="cexp(nan + 0I) is wrong on most platforms") + def test_special_values2(self): + # XXX: most implementations get it wrong here (including glibc <= 2.10) + # cexp(nan + 0i) is nan + 0i + check = check_complex_value + f = np.exp + + check(f, np.nan, 0, np.nan, 0) + +class TestClog(object): + def test_simple(self): + x = np.array([1+0j, 1+2j]) + y_r = np.log(np.abs(x)) + 1j * np.angle(x) + y = np.log(x) + for i in range(len(x)): + assert_almost_equal(y[i], y_r[i]) + + @platform_skip + @pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.") + def test_special_values(self): + xl = [] + yl = [] + + # From C99 std (Sec 6.3.2) + # XXX: check exceptions raised + # --- raise for invalid fails. + + # clog(-0 + i0) returns -inf + i pi and raises the 'divide-by-zero' + # floating-point exception. + with np.errstate(divide='raise'): + x = np.array([np.NZERO], dtype=complex) + y = complex(-np.inf, np.pi) + assert_raises(FloatingPointError, np.log, x) + with np.errstate(divide='ignore'): + assert_almost_equal(np.log(x), y) + + xl.append(x) + yl.append(y) + + # clog(+0 + i0) returns -inf + i0 and raises the 'divide-by-zero' + # floating-point exception. + with np.errstate(divide='raise'): + x = np.array([0], dtype=complex) + y = complex(-np.inf, 0) + assert_raises(FloatingPointError, np.log, x) + with np.errstate(divide='ignore'): + assert_almost_equal(np.log(x), y) + + xl.append(x) + yl.append(y) + + # clog(x + i inf returns +inf + i pi /2, for finite x. + x = np.array([complex(1, np.inf)], dtype=complex) + y = complex(np.inf, 0.5 * np.pi) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + x = np.array([complex(-1, np.inf)], dtype=complex) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(x + iNaN) returns NaN + iNaN and optionally raises the + # 'invalid' floating- point exception, for finite x. + with np.errstate(invalid='raise'): + x = np.array([complex(1., np.nan)], dtype=complex) + y = complex(np.nan, np.nan) + #assert_raises(FloatingPointError, np.log, x) + with np.errstate(invalid='ignore'): + assert_almost_equal(np.log(x), y) + + xl.append(x) + yl.append(y) + + with np.errstate(invalid='raise'): + x = np.array([np.inf + 1j * np.nan], dtype=complex) + #assert_raises(FloatingPointError, np.log, x) + with np.errstate(invalid='ignore'): + assert_almost_equal(np.log(x), y) + + xl.append(x) + yl.append(y) + + # clog(- inf + iy) returns +inf + ipi , for finite positive-signed y. + x = np.array([-np.inf + 1j], dtype=complex) + y = complex(np.inf, np.pi) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(+ inf + iy) returns +inf + i0, for finite positive-signed y. + x = np.array([np.inf + 1j], dtype=complex) + y = complex(np.inf, 0) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(- inf + i inf) returns +inf + i3pi /4. + x = np.array([complex(-np.inf, np.inf)], dtype=complex) + y = complex(np.inf, 0.75 * np.pi) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(+ inf + i inf) returns +inf + ipi /4. + x = np.array([complex(np.inf, np.inf)], dtype=complex) + y = complex(np.inf, 0.25 * np.pi) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(+/- inf + iNaN) returns +inf + iNaN. + x = np.array([complex(np.inf, np.nan)], dtype=complex) + y = complex(np.inf, np.nan) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + x = np.array([complex(-np.inf, np.nan)], dtype=complex) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(NaN + iy) returns NaN + iNaN and optionally raises the + # 'invalid' floating-point exception, for finite y. + x = np.array([complex(np.nan, 1)], dtype=complex) + y = complex(np.nan, np.nan) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(NaN + i inf) returns +inf + iNaN. + x = np.array([complex(np.nan, np.inf)], dtype=complex) + y = complex(np.inf, np.nan) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(NaN + iNaN) returns NaN + iNaN. + x = np.array([complex(np.nan, np.nan)], dtype=complex) + y = complex(np.nan, np.nan) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(conj(z)) = conj(clog(z)). + xa = np.array(xl, dtype=complex) + ya = np.array(yl, dtype=complex) + with np.errstate(divide='ignore'): + for i in range(len(xa)): + assert_almost_equal(np.log(xa[i].conj()), ya[i].conj()) + + +class TestCsqrt(object): + + def test_simple(self): + # sqrt(1) + check_complex_value(np.sqrt, 1, 0, 1, 0) + + # sqrt(1i) + rres = 0.5*np.sqrt(2) + ires = rres + check_complex_value(np.sqrt, 0, 1, rres, ires, False) + + # sqrt(-1) + check_complex_value(np.sqrt, -1, 0, 0, 1) + + def test_simple_conjugate(self): + ref = np.conj(np.sqrt(complex(1, 1))) + + def f(z): + return np.sqrt(np.conj(z)) + + check_complex_value(f, 1, 1, ref.real, ref.imag, False) + + #def test_branch_cut(self): + # _check_branch_cut(f, -1, 0, 1, -1) + + @platform_skip + def test_special_values(self): + # C99: Sec G 6.4.2 + + check = check_complex_value + f = np.sqrt + + # csqrt(+-0 + 0i) is 0 + 0i + check(f, np.PZERO, 0, 0, 0) + check(f, np.NZERO, 0, 0, 0) + + # csqrt(x + infi) is inf + infi for any x (including NaN) + check(f, 1, np.inf, np.inf, np.inf) + check(f, -1, np.inf, np.inf, np.inf) + + check(f, np.PZERO, np.inf, np.inf, np.inf) + check(f, np.NZERO, np.inf, np.inf, np.inf) + check(f, np.inf, np.inf, np.inf, np.inf) + check(f, -np.inf, np.inf, np.inf, np.inf) + check(f, -np.nan, np.inf, np.inf, np.inf) + + # csqrt(x + nani) is nan + nani for any finite x + check(f, 1, np.nan, np.nan, np.nan) + check(f, -1, np.nan, np.nan, np.nan) + check(f, 0, np.nan, np.nan, np.nan) + + # csqrt(-inf + yi) is +0 + infi for any finite y > 0 + check(f, -np.inf, 1, np.PZERO, np.inf) + + # csqrt(inf + yi) is +inf + 0i for any finite y > 0 + check(f, np.inf, 1, np.inf, np.PZERO) + + # csqrt(-inf + nani) is nan +- infi (both +i infi are valid) + def _check_ninf_nan(dummy): + msgform = "csqrt(-inf, nan) is (%f, %f), expected (nan, +-inf)" + z = np.sqrt(np.array(complex(-np.inf, np.nan))) + #Fixme: ugly workaround for isinf bug. + with np.errstate(invalid='ignore'): + if not (np.isnan(z.real) and np.isinf(z.imag)): + raise AssertionError(msgform % (z.real, z.imag)) + + _check_ninf_nan(None) + + # csqrt(+inf + nani) is inf + nani + check(f, np.inf, np.nan, np.inf, np.nan) + + # csqrt(nan + yi) is nan + nani for any finite y (infinite handled in x + # + nani) + check(f, np.nan, 0, np.nan, np.nan) + check(f, np.nan, 1, np.nan, np.nan) + check(f, np.nan, np.nan, np.nan, np.nan) + + # XXX: check for conj(csqrt(z)) == csqrt(conj(z)) (need to fix branch + # cuts first) + +class TestCpow(object): + def setup(self): + self.olderr = np.seterr(invalid='ignore') + + def teardown(self): + np.seterr(**self.olderr) + + def test_simple(self): + x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan]) + y_r = x ** 2 + y = np.power(x, 2) + for i in range(len(x)): + assert_almost_equal(y[i], y_r[i]) + + def test_scalar(self): + x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan]) + y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3]) + lx = list(range(len(x))) + # Compute the values for complex type in python + p_r = [complex(x[i]) ** complex(y[i]) for i in lx] + # Substitute a result allowed by C99 standard + p_r[4] = complex(np.inf, np.nan) + # Do the same with numpy complex scalars + n_r = [x[i] ** y[i] for i in lx] + for i in lx: + assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i) + + def test_array(self): + x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan]) + y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3]) + lx = list(range(len(x))) + # Compute the values for complex type in python + p_r = [complex(x[i]) ** complex(y[i]) for i in lx] + # Substitute a result allowed by C99 standard + p_r[4] = complex(np.inf, np.nan) + # Do the same with numpy arrays + n_r = x ** y + for i in lx: + assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i) + +class TestCabs(object): + def setup(self): + self.olderr = np.seterr(invalid='ignore') + + def teardown(self): + np.seterr(**self.olderr) + + def test_simple(self): + x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan]) + y_r = np.array([np.sqrt(2.), 2, np.sqrt(5), np.inf, np.nan]) + y = np.abs(x) + for i in range(len(x)): + assert_almost_equal(y[i], y_r[i]) + + def test_fabs(self): + # Test that np.abs(x +- 0j) == np.abs(x) (as mandated by C99 for cabs) + x = np.array([1+0j], dtype=complex) + assert_array_equal(np.abs(x), np.real(x)) + + x = np.array([complex(1, np.NZERO)], dtype=complex) + assert_array_equal(np.abs(x), np.real(x)) + + x = np.array([complex(np.inf, np.NZERO)], dtype=complex) + assert_array_equal(np.abs(x), np.real(x)) + + x = np.array([complex(np.nan, np.NZERO)], dtype=complex) + assert_array_equal(np.abs(x), np.real(x)) + + def test_cabs_inf_nan(self): + x, y = [], [] + + # cabs(+-nan + nani) returns nan + x.append(np.nan) + y.append(np.nan) + check_real_value(np.abs, np.nan, np.nan, np.nan) + + x.append(np.nan) + y.append(-np.nan) + check_real_value(np.abs, -np.nan, np.nan, np.nan) + + # According to C99 standard, if exactly one of the real/part is inf and + # the other nan, then cabs should return inf + x.append(np.inf) + y.append(np.nan) + check_real_value(np.abs, np.inf, np.nan, np.inf) + + x.append(-np.inf) + y.append(np.nan) + check_real_value(np.abs, -np.inf, np.nan, np.inf) + + # cabs(conj(z)) == conj(cabs(z)) (= cabs(z)) + def f(a): + return np.abs(np.conj(a)) + + def g(a, b): + return np.abs(complex(a, b)) + + xa = np.array(x, dtype=complex) + for i in range(len(xa)): + ref = g(x[i], y[i]) + check_real_value(f, x[i], y[i], ref) + +class TestCarg(object): + def test_simple(self): + check_real_value(ncu._arg, 1, 0, 0, False) + check_real_value(ncu._arg, 0, 1, 0.5*np.pi, False) + + check_real_value(ncu._arg, 1, 1, 0.25*np.pi, False) + check_real_value(ncu._arg, np.PZERO, np.PZERO, np.PZERO) + + # TODO This can be xfail when the generator functions are got rid of. + @pytest.mark.skip( + reason="Complex arithmetic with signed zero fails on most platforms") + def test_zero(self): + # carg(-0 +- 0i) returns +- pi + check_real_value(ncu._arg, np.NZERO, np.PZERO, np.pi, False) + check_real_value(ncu._arg, np.NZERO, np.NZERO, -np.pi, False) + + # carg(+0 +- 0i) returns +- 0 + check_real_value(ncu._arg, np.PZERO, np.PZERO, np.PZERO) + check_real_value(ncu._arg, np.PZERO, np.NZERO, np.NZERO) + + # carg(x +- 0i) returns +- 0 for x > 0 + check_real_value(ncu._arg, 1, np.PZERO, np.PZERO, False) + check_real_value(ncu._arg, 1, np.NZERO, np.NZERO, False) + + # carg(x +- 0i) returns +- pi for x < 0 + check_real_value(ncu._arg, -1, np.PZERO, np.pi, False) + check_real_value(ncu._arg, -1, np.NZERO, -np.pi, False) + + # carg(+- 0 + yi) returns pi/2 for y > 0 + check_real_value(ncu._arg, np.PZERO, 1, 0.5 * np.pi, False) + check_real_value(ncu._arg, np.NZERO, 1, 0.5 * np.pi, False) + + # carg(+- 0 + yi) returns -pi/2 for y < 0 + check_real_value(ncu._arg, np.PZERO, -1, 0.5 * np.pi, False) + check_real_value(ncu._arg, np.NZERO, -1, -0.5 * np.pi, False) + + #def test_branch_cuts(self): + # _check_branch_cut(ncu._arg, -1, 1j, -1, 1) + + def test_special_values(self): + # carg(-np.inf +- yi) returns +-pi for finite y > 0 + check_real_value(ncu._arg, -np.inf, 1, np.pi, False) + check_real_value(ncu._arg, -np.inf, -1, -np.pi, False) + + # carg(np.inf +- yi) returns +-0 for finite y > 0 + check_real_value(ncu._arg, np.inf, 1, np.PZERO, False) + check_real_value(ncu._arg, np.inf, -1, np.NZERO, False) + + # carg(x +- np.infi) returns +-pi/2 for finite x + check_real_value(ncu._arg, 1, np.inf, 0.5 * np.pi, False) + check_real_value(ncu._arg, 1, -np.inf, -0.5 * np.pi, False) + + # carg(-np.inf +- np.infi) returns +-3pi/4 + check_real_value(ncu._arg, -np.inf, np.inf, 0.75 * np.pi, False) + check_real_value(ncu._arg, -np.inf, -np.inf, -0.75 * np.pi, False) + + # carg(np.inf +- np.infi) returns +-pi/4 + check_real_value(ncu._arg, np.inf, np.inf, 0.25 * np.pi, False) + check_real_value(ncu._arg, np.inf, -np.inf, -0.25 * np.pi, False) + + # carg(x + yi) returns np.nan if x or y is nan + check_real_value(ncu._arg, np.nan, 0, np.nan, False) + check_real_value(ncu._arg, 0, np.nan, np.nan, False) + + check_real_value(ncu._arg, np.nan, np.inf, np.nan, False) + check_real_value(ncu._arg, np.inf, np.nan, np.nan, False) + + +def check_real_value(f, x1, y1, x, exact=True): + z1 = np.array([complex(x1, y1)]) + if exact: + assert_equal(f(z1), x) + else: + assert_almost_equal(f(z1), x) + + +def check_complex_value(f, x1, y1, x2, y2, exact=True): + z1 = np.array([complex(x1, y1)]) + z2 = complex(x2, y2) + with np.errstate(invalid='ignore'): + if exact: + assert_equal(f(z1), z2) + else: + assert_almost_equal(f(z1), z2) diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_umath_complex.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_umath_complex.pyc new file mode 100644 index 0000000..5ce8eb1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_umath_complex.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_unicode.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_unicode.py new file mode 100644 index 0000000..2ffd880 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_unicode.py @@ -0,0 +1,396 @@ +from __future__ import division, absolute_import, print_function + +import sys + +import numpy as np +from numpy.compat import unicode +from numpy.testing import assert_, assert_equal, assert_array_equal + +# Guess the UCS length for this python interpreter +if sys.version_info[:2] >= (3, 3): + # Python 3.3 uses a flexible string representation + ucs4 = False + + def buffer_length(arr): + if isinstance(arr, unicode): + arr = str(arr) + if not arr: + charmax = 0 + else: + charmax = max([ord(c) for c in arr]) + if charmax < 256: + size = 1 + elif charmax < 65536: + size = 2 + else: + size = 4 + return size * len(arr) + v = memoryview(arr) + if v.shape is None: + return len(v) * v.itemsize + else: + return np.prod(v.shape) * v.itemsize +else: + if len(buffer(u'u')) == 4: + ucs4 = True + else: + ucs4 = False + + def buffer_length(arr): + if isinstance(arr, np.ndarray): + return len(arr.data) + return len(buffer(arr)) + +# In both cases below we need to make sure that the byte swapped value (as +# UCS4) is still a valid unicode: +# Value that can be represented in UCS2 interpreters +ucs2_value = u'\u0900' +# Value that cannot be represented in UCS2 interpreters (but can in UCS4) +ucs4_value = u'\U00100900' + + +def test_string_cast(): + str_arr = np.array(["1234", "1234\0\0"], dtype='S') + uni_arr1 = str_arr.astype('>U') + uni_arr2 = str_arr.astype('>> _lib = np.ctypeslib.load_library('libmystuff', '.') #doctest: +SKIP + +Our result type, an ndarray that must be of type double, be 1-dimensional +and is C-contiguous in memory: + +>>> array_1d_double = np.ctypeslib.ndpointer( +... dtype=np.double, +... ndim=1, flags='CONTIGUOUS') #doctest: +SKIP + +Our C-function typically takes an array and updates its values +in-place. For example:: + + void foo_func(double* x, int length) + { + int i; + for (i = 0; i < length; i++) { + x[i] = i*i; + } + } + +We wrap it using: + +>>> _lib.foo_func.restype = None #doctest: +SKIP +>>> _lib.foo_func.argtypes = [array_1d_double, c_int] #doctest: +SKIP + +Then, we're ready to call ``foo_func``: + +>>> out = np.empty(15, dtype=np.double) +>>> _lib.foo_func(out, len(out)) #doctest: +SKIP + +""" +from __future__ import division, absolute_import, print_function + +__all__ = ['load_library', 'ndpointer', 'test', 'ctypes_load_library', + 'c_intp', 'as_ctypes', 'as_array'] + +import os +from numpy import ( + integer, ndarray, dtype as _dtype, deprecate, array, frombuffer +) +from numpy.core.multiarray import _flagdict, flagsobj + +try: + import ctypes +except ImportError: + ctypes = None + +if ctypes is None: + def _dummy(*args, **kwds): + """ + Dummy object that raises an ImportError if ctypes is not available. + + Raises + ------ + ImportError + If ctypes is not available. + + """ + raise ImportError("ctypes is not available.") + ctypes_load_library = _dummy + load_library = _dummy + as_ctypes = _dummy + as_array = _dummy + from numpy import intp as c_intp + _ndptr_base = object +else: + import numpy.core._internal as nic + c_intp = nic._getintp_ctype() + del nic + _ndptr_base = ctypes.c_void_p + + # Adapted from Albert Strasheim + def load_library(libname, loader_path): + """ + It is possible to load a library using + >>> lib = ctypes.cdll[] + + But there are cross-platform considerations, such as library file extensions, + plus the fact Windows will just load the first library it finds with that name. + NumPy supplies the load_library function as a convenience. + + Parameters + ---------- + libname : str + Name of the library, which can have 'lib' as a prefix, + but without an extension. + loader_path : str + Where the library can be found. + + Returns + ------- + ctypes.cdll[libpath] : library object + A ctypes library object + + Raises + ------ + OSError + If there is no library with the expected extension, or the + library is defective and cannot be loaded. + """ + if ctypes.__version__ < '1.0.1': + import warnings + warnings.warn("All features of ctypes interface may not work " \ + "with ctypes < 1.0.1", stacklevel=2) + + ext = os.path.splitext(libname)[1] + if not ext: + # Try to load library with platform-specific name, otherwise + # default to libname.[so|pyd]. Sometimes, these files are built + # erroneously on non-linux platforms. + from numpy.distutils.misc_util import get_shared_lib_extension + so_ext = get_shared_lib_extension() + libname_ext = [libname + so_ext] + # mac, windows and linux >= py3.2 shared library and loadable + # module have different extensions so try both + so_ext2 = get_shared_lib_extension(is_python_ext=True) + if not so_ext2 == so_ext: + libname_ext.insert(0, libname + so_ext2) + else: + libname_ext = [libname] + + loader_path = os.path.abspath(loader_path) + if not os.path.isdir(loader_path): + libdir = os.path.dirname(loader_path) + else: + libdir = loader_path + + for ln in libname_ext: + libpath = os.path.join(libdir, ln) + if os.path.exists(libpath): + try: + return ctypes.cdll[libpath] + except OSError: + ## defective lib file + raise + ## if no successful return in the libname_ext loop: + raise OSError("no file with expected extension") + + ctypes_load_library = deprecate(load_library, 'ctypes_load_library', + 'load_library') + +def _num_fromflags(flaglist): + num = 0 + for val in flaglist: + num += _flagdict[val] + return num + +_flagnames = ['C_CONTIGUOUS', 'F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE', + 'OWNDATA', 'UPDATEIFCOPY', 'WRITEBACKIFCOPY'] +def _flags_fromnum(num): + res = [] + for key in _flagnames: + value = _flagdict[key] + if (num & value): + res.append(key) + return res + + +class _ndptr(_ndptr_base): + @classmethod + def from_param(cls, obj): + if not isinstance(obj, ndarray): + raise TypeError("argument must be an ndarray") + if cls._dtype_ is not None \ + and obj.dtype != cls._dtype_: + raise TypeError("array must have data type %s" % cls._dtype_) + if cls._ndim_ is not None \ + and obj.ndim != cls._ndim_: + raise TypeError("array must have %d dimension(s)" % cls._ndim_) + if cls._shape_ is not None \ + and obj.shape != cls._shape_: + raise TypeError("array must have shape %s" % str(cls._shape_)) + if cls._flags_ is not None \ + and ((obj.flags.num & cls._flags_) != cls._flags_): + raise TypeError("array must have flags %s" % + _flags_fromnum(cls._flags_)) + return obj.ctypes + + +class _concrete_ndptr(_ndptr): + """ + Like _ndptr, but with `_shape_` and `_dtype_` specified. + + Notably, this means the pointer has enough information to reconstruct + the array, which is not generally true. + """ + def _check_retval_(self): + """ + This method is called when this class is used as the .restype + attribute for a shared-library function, to automatically wrap the + pointer into an array. + """ + return self.contents + + @property + def contents(self): + """ + Get an ndarray viewing the data pointed to by this pointer. + + This mirrors the `contents` attribute of a normal ctypes pointer + """ + full_dtype = _dtype((self._dtype_, self._shape_)) + full_ctype = ctypes.c_char * full_dtype.itemsize + buffer = ctypes.cast(self, ctypes.POINTER(full_ctype)).contents + return frombuffer(buffer, dtype=full_dtype).squeeze(axis=0) + + +# Factory for an array-checking class with from_param defined for +# use with ctypes argtypes mechanism +_pointer_type_cache = {} +def ndpointer(dtype=None, ndim=None, shape=None, flags=None): + """ + Array-checking restype/argtypes. + + An ndpointer instance is used to describe an ndarray in restypes + and argtypes specifications. This approach is more flexible than + using, for example, ``POINTER(c_double)``, since several restrictions + can be specified, which are verified upon calling the ctypes function. + These include data type, number of dimensions, shape and flags. If a + given array does not satisfy the specified restrictions, + a ``TypeError`` is raised. + + Parameters + ---------- + dtype : data-type, optional + Array data-type. + ndim : int, optional + Number of array dimensions. + shape : tuple of ints, optional + Array shape. + flags : str or tuple of str + Array flags; may be one or more of: + + - C_CONTIGUOUS / C / CONTIGUOUS + - F_CONTIGUOUS / F / FORTRAN + - OWNDATA / O + - WRITEABLE / W + - ALIGNED / A + - WRITEBACKIFCOPY / X + - UPDATEIFCOPY / U + + Returns + ------- + klass : ndpointer type object + A type object, which is an ``_ndtpr`` instance containing + dtype, ndim, shape and flags information. + + Raises + ------ + TypeError + If a given array does not satisfy the specified restrictions. + + Examples + -------- + >>> clib.somefunc.argtypes = [np.ctypeslib.ndpointer(dtype=np.float64, + ... ndim=1, + ... flags='C_CONTIGUOUS')] + ... #doctest: +SKIP + >>> clib.somefunc(np.array([1, 2, 3], dtype=np.float64)) + ... #doctest: +SKIP + + """ + + # normalize dtype to an Optional[dtype] + if dtype is not None: + dtype = _dtype(dtype) + + # normalize flags to an Optional[int] + num = None + if flags is not None: + if isinstance(flags, str): + flags = flags.split(',') + elif isinstance(flags, (int, integer)): + num = flags + flags = _flags_fromnum(num) + elif isinstance(flags, flagsobj): + num = flags.num + flags = _flags_fromnum(num) + if num is None: + try: + flags = [x.strip().upper() for x in flags] + except Exception: + raise TypeError("invalid flags specification") + num = _num_fromflags(flags) + + # normalize shape to an Optional[tuple] + if shape is not None: + try: + shape = tuple(shape) + except TypeError: + # single integer -> 1-tuple + shape = (shape,) + + cache_key = (dtype, ndim, shape, num) + + try: + return _pointer_type_cache[cache_key] + except KeyError: + pass + + # produce a name for the new type + if dtype is None: + name = 'any' + elif dtype.names: + name = str(id(dtype)) + else: + name = dtype.str + if ndim is not None: + name += "_%dd" % ndim + if shape is not None: + name += "_"+"x".join(str(x) for x in shape) + if flags is not None: + name += "_"+"_".join(flags) + + if dtype is not None and shape is not None: + base = _concrete_ndptr + else: + base = _ndptr + + klass = type("ndpointer_%s"%name, (base,), + {"_dtype_": dtype, + "_shape_" : shape, + "_ndim_" : ndim, + "_flags_" : num}) + _pointer_type_cache[cache_key] = klass + return klass + + +if ctypes is not None: + def _ctype_ndarray(element_type, shape): + """ Create an ndarray of the given element type and shape """ + for dim in shape[::-1]: + element_type = dim * element_type + # prevent the type name include np.ctypeslib + element_type.__module__ = None + return element_type + + + def _get_scalar_type_map(): + """ + Return a dictionary mapping native endian scalar dtype to ctypes types + """ + ct = ctypes + simple_types = [ + ct.c_byte, ct.c_short, ct.c_int, ct.c_long, ct.c_longlong, + ct.c_ubyte, ct.c_ushort, ct.c_uint, ct.c_ulong, ct.c_ulonglong, + ct.c_float, ct.c_double, + ct.c_bool, + ] + return {_dtype(ctype): ctype for ctype in simple_types} + + + _scalar_type_map = _get_scalar_type_map() + + + def _ctype_from_dtype_scalar(dtype): + # swapping twice ensure that `=` is promoted to <, >, or | + dtype_with_endian = dtype.newbyteorder('S').newbyteorder('S') + dtype_native = dtype.newbyteorder('=') + try: + ctype = _scalar_type_map[dtype_native] + except KeyError: + raise NotImplementedError( + "Converting {!r} to a ctypes type".format(dtype) + ) + + if dtype_with_endian.byteorder == '>': + ctype = ctype.__ctype_be__ + elif dtype_with_endian.byteorder == '<': + ctype = ctype.__ctype_le__ + + return ctype + + + def _ctype_from_dtype_subarray(dtype): + element_dtype, shape = dtype.subdtype + ctype = _ctype_from_dtype(element_dtype) + return _ctype_ndarray(ctype, shape) + + + def _ctype_from_dtype_structured(dtype): + # extract offsets of each field + field_data = [] + for name in dtype.names: + field_dtype, offset = dtype.fields[name][:2] + field_data.append((offset, name, _ctype_from_dtype(field_dtype))) + + # ctypes doesn't care about field order + field_data = sorted(field_data, key=lambda f: f[0]) + + if len(field_data) > 1 and all(offset == 0 for offset, name, ctype in field_data): + # union, if multiple fields all at address 0 + size = 0 + _fields_ = [] + for offset, name, ctype in field_data: + _fields_.append((name, ctype)) + size = max(size, ctypes.sizeof(ctype)) + + # pad to the right size + if dtype.itemsize != size: + _fields_.append(('', ctypes.c_char * dtype.itemsize)) + + # we inserted manual padding, so always `_pack_` + return type('union', (ctypes.Union,), dict( + _fields_=_fields_, + _pack_=1, + __module__=None, + )) + else: + last_offset = 0 + _fields_ = [] + for offset, name, ctype in field_data: + padding = offset - last_offset + if padding < 0: + raise NotImplementedError("Overlapping fields") + if padding > 0: + _fields_.append(('', ctypes.c_char * padding)) + + _fields_.append((name, ctype)) + last_offset = offset + ctypes.sizeof(ctype) + + + padding = dtype.itemsize - last_offset + if padding > 0: + _fields_.append(('', ctypes.c_char * padding)) + + # we inserted manual padding, so always `_pack_` + return type('struct', (ctypes.Structure,), dict( + _fields_=_fields_, + _pack_=1, + __module__=None, + )) + + + def _ctype_from_dtype(dtype): + if dtype.fields is not None: + return _ctype_from_dtype_structured(dtype) + elif dtype.subdtype is not None: + return _ctype_from_dtype_subarray(dtype) + else: + return _ctype_from_dtype_scalar(dtype) + + + def as_ctypes_type(dtype): + """ + Convert a dtype into a ctypes type. + + Parameters + ---------- + dtype : dtype + The dtype to convert + + Returns + ------- + ctypes + A ctype scalar, union, array, or struct + + Raises + ------ + NotImplementedError + If the conversion is not possible + + Notes + ----- + This function does not losslessly round-trip in either direction. + + ``np.dtype(as_ctypes_type(dt))`` will: + - insert padding fields + - reorder fields to be sorted by offset + - discard field titles + + ``as_ctypes_type(np.dtype(ctype))`` will: + - discard the class names of ``Structure``s and ``Union``s + - convert single-element ``Union``s into single-element ``Structure``s + - insert padding fields + + """ + return _ctype_from_dtype(_dtype(dtype)) + + + def as_array(obj, shape=None): + """ + Create a numpy array from a ctypes array or POINTER. + + The numpy array shares the memory with the ctypes object. + + The shape parameter must be given if converting from a ctypes POINTER. + The shape parameter is ignored if converting from a ctypes array + """ + if isinstance(obj, ctypes._Pointer): + # convert pointers to an array of the desired shape + if shape is None: + raise TypeError( + 'as_array() requires a shape argument when called on a ' + 'pointer') + p_arr_type = ctypes.POINTER(_ctype_ndarray(obj._type_, shape)) + obj = ctypes.cast(obj, p_arr_type).contents + + return array(obj, copy=False) + + + def as_ctypes(obj): + """Create and return a ctypes object from a numpy array. Actually + anything that exposes the __array_interface__ is accepted.""" + ai = obj.__array_interface__ + if ai["strides"]: + raise TypeError("strided arrays not supported") + if ai["version"] != 3: + raise TypeError("only __array_interface__ version 3 supported") + addr, readonly = ai["data"] + if readonly: + raise TypeError("readonly arrays unsupported") + + dtype = _dtype((ai["typestr"], ai["shape"])) + result = as_ctypes_type(dtype).from_address(addr) + result.__keep = obj + return result diff --git a/project/venv/lib/python2.7/site-packages/numpy/ctypeslib.pyc b/project/venv/lib/python2.7/site-packages/numpy/ctypeslib.pyc new file mode 100644 index 0000000..b3c75fa Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/ctypeslib.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/__config__.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/__config__.py new file mode 100644 index 0000000..adfef3f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/__config__.py @@ -0,0 +1,39 @@ +# This file is generated by numpy's setup.py +# It contains system_info results at the time of building this package. +__all__ = ["get_info","show"] + + + +import os +import sys + +extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs') + +if sys.platform == 'win32' and os.path.isdir(extra_dll_dir): + os.environ.setdefault('PATH', '') + os.environ['PATH'] += os.pathsep + extra_dll_dir + +lapack_opt_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]} +blas_opt_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]} +blis_info={} +openblas_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]} +openblas_lapack_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]} +lapack_mkl_info={} +blas_mkl_info={} + +def get_info(name): + g = globals() + return g.get(name, g.get(name + "_info", {})) + +def show(): + for name,info_dict in globals().items(): + if name[0] == "_" or type(info_dict) is not type({}): continue + print(name + ":") + if not info_dict: + print(" NOT AVAILABLE") + for k,v in info_dict.items(): + v = str(v) + if k == "sources" and len(v) > 200: + v = v[:60] + " ...\n... " + v[-60:] + print(" %s = %s" % (k,v)) + \ No newline at end of file diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/__config__.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/__config__.pyc new file mode 100644 index 0000000..e2607ca Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/__config__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/__init__.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/__init__.py new file mode 100644 index 0000000..5551475 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/__init__.py @@ -0,0 +1,34 @@ +from __future__ import division, absolute_import, print_function + +from .__version__ import version as __version__ +# Must import local ccompiler ASAP in order to get +# customized CCompiler.spawn effective. +from . import ccompiler +from . import unixccompiler + +from .info import __doc__ +from .npy_pkg_config import * + +# If numpy is installed, add distutils.test() +try: + from . import __config__ + # Normally numpy is installed if the above import works, but an interrupted + # in-place build could also have left a __config__.py. In that case the + # next import may still fail, so keep it inside the try block. + from numpy._pytesttester import PytestTester + test = PytestTester(__name__) + del PytestTester +except ImportError: + pass + + +def customized_fcompiler(plat=None, compiler=None): + from numpy.distutils.fcompiler import new_fcompiler + c = new_fcompiler(plat=plat, compiler=compiler) + c.customize() + return c + +def customized_ccompiler(plat=None, compiler=None): + c = ccompiler.new_compiler(plat=plat, compiler=compiler) + c.customize('') + return c diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/__init__.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/__init__.pyc new file mode 100644 index 0000000..83e9681 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/__version__.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/__version__.py new file mode 100644 index 0000000..969decb --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/__version__.py @@ -0,0 +1,6 @@ +from __future__ import division, absolute_import, print_function + +major = 0 +minor = 4 +micro = 0 +version = '%(major)d.%(minor)d.%(micro)d' % (locals()) diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/__version__.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/__version__.pyc new file mode 100644 index 0000000..e67bcce Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/__version__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/_shell_utils.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/_shell_utils.py new file mode 100644 index 0000000..5ef8749 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/_shell_utils.py @@ -0,0 +1,91 @@ +""" +Helper functions for interacting with the shell, and consuming shell-style +parameters provided in config files. +""" +import os +import shlex +import subprocess +try: + from shlex import quote +except ImportError: + from pipes import quote + +__all__ = ['WindowsParser', 'PosixParser', 'NativeParser'] + + +class CommandLineParser: + """ + An object that knows how to split and join command-line arguments. + + It must be true that ``argv == split(join(argv))`` for all ``argv``. + The reverse neednt be true - `join(split(cmd))` may result in the addition + or removal of unnecessary escaping. + """ + @staticmethod + def join(argv): + """ Join a list of arguments into a command line string """ + raise NotImplemented + + @staticmethod + def split(cmd): + """ Split a command line string into a list of arguments """ + raise NotImplemented + + +class WindowsParser: + """ + The parsing behavior used by `subprocess.call("string")` on Windows, which + matches the Microsoft C/C++ runtime. + + Note that this is _not_ the behavior of cmd. + """ + @staticmethod + def join(argv): + # note that list2cmdline is specific to the windows syntax + return subprocess.list2cmdline(argv) + + @staticmethod + def split(cmd): + import ctypes # guarded import for systems without ctypes + try: + ctypes.windll + except AttributeError: + raise NotImplementedError + + # Windows has special parsing rules for the executable (no quotes), + # that we do not care about - insert a dummy element + if not cmd: + return [] + cmd = 'dummy ' + cmd + + CommandLineToArgvW = ctypes.windll.shell32.CommandLineToArgvW + CommandLineToArgvW.restype = ctypes.POINTER(ctypes.c_wchar_p) + CommandLineToArgvW.argtypes = (ctypes.c_wchar_p, ctypes.POINTER(ctypes.c_int)) + + nargs = ctypes.c_int() + lpargs = CommandLineToArgvW(cmd, ctypes.byref(nargs)) + args = [lpargs[i] for i in range(nargs.value)] + assert not ctypes.windll.kernel32.LocalFree(lpargs) + + # strip the element we inserted + assert args[0] == "dummy" + return args[1:] + + +class PosixParser: + """ + The parsing behavior used by `subprocess.call("string", shell=True)` on Posix. + """ + @staticmethod + def join(argv): + return ' '.join(quote(arg) for arg in argv) + + @staticmethod + def split(cmd): + return shlex.split(cmd, posix=True) + + +if os.name == 'nt': + NativeParser = WindowsParser +elif os.name == 'posix': + NativeParser = PosixParser diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/_shell_utils.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/_shell_utils.pyc new file mode 100644 index 0000000..19572d0 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/_shell_utils.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/ccompiler.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/ccompiler.py new file mode 100644 index 0000000..100d0d0 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/ccompiler.py @@ -0,0 +1,858 @@ +from __future__ import division, absolute_import, print_function + +import os +import re +import sys +import types +import shlex +import time +import subprocess +from copy import copy +from distutils import ccompiler +from distutils.ccompiler import * +from distutils.errors import DistutilsExecError, DistutilsModuleError, \ + DistutilsPlatformError, CompileError +from distutils.sysconfig import customize_compiler +from distutils.version import LooseVersion + +from numpy.distutils import log +from numpy.distutils.compat import get_exception +from numpy.distutils.exec_command import ( + filepath_from_subprocess_output, forward_bytes_to_stdout +) +from numpy.distutils.misc_util import cyg2win32, is_sequence, mingw32, \ + get_num_build_jobs, \ + _commandline_dep_string + +# globals for parallel build management +try: + import threading +except ImportError: + import dummy_threading as threading +_job_semaphore = None +_global_lock = threading.Lock() +_processing_files = set() + + +def _needs_build(obj, cc_args, extra_postargs, pp_opts): + """ + Check if an objects needs to be rebuild based on its dependencies + + Parameters + ---------- + obj : str + object file + + Returns + ------- + bool + """ + # defined in unixcompiler.py + dep_file = obj + '.d' + if not os.path.exists(dep_file): + return True + + # dep_file is a makefile containing 'object: dependencies' + # formatted like posix shell (spaces escaped, \ line continuations) + # the last line contains the compiler commandline arguments as some + # projects may compile an extension multiple times with different + # arguments + with open(dep_file, "r") as f: + lines = f.readlines() + + cmdline =_commandline_dep_string(cc_args, extra_postargs, pp_opts) + last_cmdline = lines[-1] + if last_cmdline != cmdline: + return True + + contents = ''.join(lines[:-1]) + deps = [x for x in shlex.split(contents, posix=True) + if x != "\n" and not x.endswith(":")] + + try: + t_obj = os.stat(obj).st_mtime + + # check if any of the dependencies is newer than the object + # the dependencies includes the source used to create the object + for f in deps: + if os.stat(f).st_mtime > t_obj: + return True + except OSError: + # no object counts as newer (shouldn't happen if dep_file exists) + return True + + return False + + +def replace_method(klass, method_name, func): + if sys.version_info[0] < 3: + m = types.MethodType(func, None, klass) + else: + # Py3k does not have unbound method anymore, MethodType does not work + m = lambda self, *args, **kw: func(self, *args, **kw) + setattr(klass, method_name, m) + + +###################################################################### +## Method that subclasses may redefine. But don't call this method, +## it i private to CCompiler class and may return unexpected +## results if used elsewhere. So, you have been warned.. + +def CCompiler_find_executables(self): + """ + Does nothing here, but is called by the get_version method and can be + overridden by subclasses. In particular it is redefined in the `FCompiler` + class where more documentation can be found. + + """ + pass + + +replace_method(CCompiler, 'find_executables', CCompiler_find_executables) + + +# Using customized CCompiler.spawn. +def CCompiler_spawn(self, cmd, display=None): + """ + Execute a command in a sub-process. + + Parameters + ---------- + cmd : str + The command to execute. + display : str or sequence of str, optional + The text to add to the log file kept by `numpy.distutils`. + If not given, `display` is equal to `cmd`. + + Returns + ------- + None + + Raises + ------ + DistutilsExecError + If the command failed, i.e. the exit status was not 0. + + """ + if display is None: + display = cmd + if is_sequence(display): + display = ' '.join(list(display)) + log.info(display) + try: + subprocess.check_output(cmd) + except subprocess.CalledProcessError as exc: + o = exc.output + s = exc.returncode + except OSError: + # OSError doesn't have the same hooks for the exception + # output, but exec_command() historically would use an + # empty string for EnvironmentError (base class for + # OSError) + o = b'' + # status previously used by exec_command() for parent + # of OSError + s = 127 + else: + # use a convenience return here so that any kind of + # caught exception will execute the default code after the + # try / except block, which handles various exceptions + return None + + if is_sequence(cmd): + cmd = ' '.join(list(cmd)) + + forward_bytes_to_stdout(o) + + if re.search(b'Too many open files', o): + msg = '\nTry rerunning setup command until build succeeds.' + else: + msg = '' + raise DistutilsExecError('Command "%s" failed with exit status %d%s' % + (cmd, s, msg)) + +replace_method(CCompiler, 'spawn', CCompiler_spawn) + +def CCompiler_object_filenames(self, source_filenames, strip_dir=0, output_dir=''): + """ + Return the name of the object files for the given source files. + + Parameters + ---------- + source_filenames : list of str + The list of paths to source files. Paths can be either relative or + absolute, this is handled transparently. + strip_dir : bool, optional + Whether to strip the directory from the returned paths. If True, + the file name prepended by `output_dir` is returned. Default is False. + output_dir : str, optional + If given, this path is prepended to the returned paths to the + object files. + + Returns + ------- + obj_names : list of str + The list of paths to the object files corresponding to the source + files in `source_filenames`. + + """ + if output_dir is None: + output_dir = '' + obj_names = [] + for src_name in source_filenames: + base, ext = os.path.splitext(os.path.normpath(src_name)) + base = os.path.splitdrive(base)[1] # Chop off the drive + base = base[os.path.isabs(base):] # If abs, chop off leading / + if base.startswith('..'): + # Resolve starting relative path components, middle ones + # (if any) have been handled by os.path.normpath above. + i = base.rfind('..')+2 + d = base[:i] + d = os.path.basename(os.path.abspath(d)) + base = d + base[i:] + if ext not in self.src_extensions: + raise UnknownFileError("unknown file type '%s' (from '%s')" % (ext, src_name)) + if strip_dir: + base = os.path.basename(base) + obj_name = os.path.join(output_dir, base + self.obj_extension) + obj_names.append(obj_name) + return obj_names + +replace_method(CCompiler, 'object_filenames', CCompiler_object_filenames) + +def CCompiler_compile(self, sources, output_dir=None, macros=None, + include_dirs=None, debug=0, extra_preargs=None, + extra_postargs=None, depends=None): + """ + Compile one or more source files. + + Please refer to the Python distutils API reference for more details. + + Parameters + ---------- + sources : list of str + A list of filenames + output_dir : str, optional + Path to the output directory. + macros : list of tuples + A list of macro definitions. + include_dirs : list of str, optional + The directories to add to the default include file search path for + this compilation only. + debug : bool, optional + Whether or not to output debug symbols in or alongside the object + file(s). + extra_preargs, extra_postargs : ? + Extra pre- and post-arguments. + depends : list of str, optional + A list of file names that all targets depend on. + + Returns + ------- + objects : list of str + A list of object file names, one per source file `sources`. + + Raises + ------ + CompileError + If compilation fails. + + """ + # This method is effective only with Python >=2.3 distutils. + # Any changes here should be applied also to fcompiler.compile + # method to support pre Python 2.3 distutils. + global _job_semaphore + + jobs = get_num_build_jobs() + + # setup semaphore to not exceed number of compile jobs when parallelized at + # extension level (python >= 3.5) + with _global_lock: + if _job_semaphore is None: + _job_semaphore = threading.Semaphore(jobs) + + if not sources: + return [] + # FIXME:RELATIVE_IMPORT + if sys.version_info[0] < 3: + from .fcompiler import FCompiler, is_f_file, has_f90_header + else: + from numpy.distutils.fcompiler import (FCompiler, is_f_file, + has_f90_header) + if isinstance(self, FCompiler): + display = [] + for fc in ['f77', 'f90', 'fix']: + fcomp = getattr(self, 'compiler_'+fc) + if fcomp is None: + continue + display.append("Fortran %s compiler: %s" % (fc, ' '.join(fcomp))) + display = '\n'.join(display) + else: + ccomp = self.compiler_so + display = "C compiler: %s\n" % (' '.join(ccomp),) + log.info(display) + macros, objects, extra_postargs, pp_opts, build = \ + self._setup_compile(output_dir, macros, include_dirs, sources, + depends, extra_postargs) + cc_args = self._get_cc_args(pp_opts, debug, extra_preargs) + display = "compile options: '%s'" % (' '.join(cc_args)) + if extra_postargs: + display += "\nextra options: '%s'" % (' '.join(extra_postargs)) + log.info(display) + + def single_compile(args): + obj, (src, ext) = args + if not _needs_build(obj, cc_args, extra_postargs, pp_opts): + return + + # check if we are currently already processing the same object + # happens when using the same source in multiple extensions + while True: + # need explicit lock as there is no atomic check and add with GIL + with _global_lock: + # file not being worked on, start working + if obj not in _processing_files: + _processing_files.add(obj) + break + # wait for the processing to end + time.sleep(0.1) + + try: + # retrieve slot from our #job semaphore and build + with _job_semaphore: + self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts) + finally: + # register being done processing + with _global_lock: + _processing_files.remove(obj) + + + if isinstance(self, FCompiler): + objects_to_build = list(build.keys()) + f77_objects, other_objects = [], [] + for obj in objects: + if obj in objects_to_build: + src, ext = build[obj] + if self.compiler_type=='absoft': + obj = cyg2win32(obj) + src = cyg2win32(src) + if is_f_file(src) and not has_f90_header(src): + f77_objects.append((obj, (src, ext))) + else: + other_objects.append((obj, (src, ext))) + + # f77 objects can be built in parallel + build_items = f77_objects + # build f90 modules serial, module files are generated during + # compilation and may be used by files later in the list so the + # ordering is important + for o in other_objects: + single_compile(o) + else: + build_items = build.items() + + if len(build) > 1 and jobs > 1: + # build parallel + import multiprocessing.pool + pool = multiprocessing.pool.ThreadPool(jobs) + pool.map(single_compile, build_items) + pool.close() + else: + # build serial + for o in build_items: + single_compile(o) + + # Return *all* object filenames, not just the ones we just built. + return objects + +replace_method(CCompiler, 'compile', CCompiler_compile) + +def CCompiler_customize_cmd(self, cmd, ignore=()): + """ + Customize compiler using distutils command. + + Parameters + ---------- + cmd : class instance + An instance inheriting from `distutils.cmd.Command`. + ignore : sequence of str, optional + List of `CCompiler` commands (without ``'set_'``) that should not be + altered. Strings that are checked for are: + ``('include_dirs', 'define', 'undef', 'libraries', 'library_dirs', + 'rpath', 'link_objects')``. + + Returns + ------- + None + + """ + log.info('customize %s using %s' % (self.__class__.__name__, + cmd.__class__.__name__)) + def allow(attr): + return getattr(cmd, attr, None) is not None and attr not in ignore + + if allow('include_dirs'): + self.set_include_dirs(cmd.include_dirs) + if allow('define'): + for (name, value) in cmd.define: + self.define_macro(name, value) + if allow('undef'): + for macro in cmd.undef: + self.undefine_macro(macro) + if allow('libraries'): + self.set_libraries(self.libraries + cmd.libraries) + if allow('library_dirs'): + self.set_library_dirs(self.library_dirs + cmd.library_dirs) + if allow('rpath'): + self.set_runtime_library_dirs(cmd.rpath) + if allow('link_objects'): + self.set_link_objects(cmd.link_objects) + +replace_method(CCompiler, 'customize_cmd', CCompiler_customize_cmd) + +def _compiler_to_string(compiler): + props = [] + mx = 0 + keys = list(compiler.executables.keys()) + for key in ['version', 'libraries', 'library_dirs', + 'object_switch', 'compile_switch', + 'include_dirs', 'define', 'undef', 'rpath', 'link_objects']: + if key not in keys: + keys.append(key) + for key in keys: + if hasattr(compiler, key): + v = getattr(compiler, key) + mx = max(mx, len(key)) + props.append((key, repr(v))) + fmt = '%-' + repr(mx+1) + 's = %s' + lines = [fmt % prop for prop in props] + return '\n'.join(lines) + +def CCompiler_show_customization(self): + """ + Print the compiler customizations to stdout. + + Parameters + ---------- + None + + Returns + ------- + None + + Notes + ----- + Printing is only done if the distutils log threshold is < 2. + + """ + if 0: + for attrname in ['include_dirs', 'define', 'undef', + 'libraries', 'library_dirs', + 'rpath', 'link_objects']: + attr = getattr(self, attrname, None) + if not attr: + continue + log.info("compiler '%s' is set to %s" % (attrname, attr)) + try: + self.get_version() + except Exception: + pass + if log._global_log.threshold<2: + print('*'*80) + print(self.__class__) + print(_compiler_to_string(self)) + print('*'*80) + +replace_method(CCompiler, 'show_customization', CCompiler_show_customization) + +def CCompiler_customize(self, dist, need_cxx=0): + """ + Do any platform-specific customization of a compiler instance. + + This method calls `distutils.sysconfig.customize_compiler` for + platform-specific customization, as well as optionally remove a flag + to suppress spurious warnings in case C++ code is being compiled. + + Parameters + ---------- + dist : object + This parameter is not used for anything. + need_cxx : bool, optional + Whether or not C++ has to be compiled. If so (True), the + ``"-Wstrict-prototypes"`` option is removed to prevent spurious + warnings. Default is False. + + Returns + ------- + None + + Notes + ----- + All the default options used by distutils can be extracted with:: + + from distutils import sysconfig + sysconfig.get_config_vars('CC', 'CXX', 'OPT', 'BASECFLAGS', + 'CCSHARED', 'LDSHARED', 'SO') + + """ + # See FCompiler.customize for suggested usage. + log.info('customize %s' % (self.__class__.__name__)) + customize_compiler(self) + if need_cxx: + # In general, distutils uses -Wstrict-prototypes, but this option is + # not valid for C++ code, only for C. Remove it if it's there to + # avoid a spurious warning on every compilation. + try: + self.compiler_so.remove('-Wstrict-prototypes') + except (AttributeError, ValueError): + pass + + if hasattr(self, 'compiler') and 'cc' in self.compiler[0]: + if not self.compiler_cxx: + if self.compiler[0].startswith('gcc'): + a, b = 'gcc', 'g++' + else: + a, b = 'cc', 'c++' + self.compiler_cxx = [self.compiler[0].replace(a, b)]\ + + self.compiler[1:] + else: + if hasattr(self, 'compiler'): + log.warn("#### %s #######" % (self.compiler,)) + if not hasattr(self, 'compiler_cxx'): + log.warn('Missing compiler_cxx fix for ' + self.__class__.__name__) + + + # check if compiler supports gcc style automatic dependencies + # run on every extension so skip for known good compilers + if hasattr(self, 'compiler') and ('gcc' in self.compiler[0] or + 'g++' in self.compiler[0] or + 'clang' in self.compiler[0]): + self._auto_depends = True + elif os.name == 'posix': + import tempfile + import shutil + tmpdir = tempfile.mkdtemp() + try: + fn = os.path.join(tmpdir, "file.c") + with open(fn, "w") as f: + f.write("int a;\n") + self.compile([fn], output_dir=tmpdir, + extra_preargs=['-MMD', '-MF', fn + '.d']) + self._auto_depends = True + except CompileError: + self._auto_depends = False + finally: + shutil.rmtree(tmpdir) + + return + +replace_method(CCompiler, 'customize', CCompiler_customize) + +def simple_version_match(pat=r'[-.\d]+', ignore='', start=''): + """ + Simple matching of version numbers, for use in CCompiler and FCompiler. + + Parameters + ---------- + pat : str, optional + A regular expression matching version numbers. + Default is ``r'[-.\\d]+'``. + ignore : str, optional + A regular expression matching patterns to skip. + Default is ``''``, in which case nothing is skipped. + start : str, optional + A regular expression matching the start of where to start looking + for version numbers. + Default is ``''``, in which case searching is started at the + beginning of the version string given to `matcher`. + + Returns + ------- + matcher : callable + A function that is appropriate to use as the ``.version_match`` + attribute of a `CCompiler` class. `matcher` takes a single parameter, + a version string. + + """ + def matcher(self, version_string): + # version string may appear in the second line, so getting rid + # of new lines: + version_string = version_string.replace('\n', ' ') + pos = 0 + if start: + m = re.match(start, version_string) + if not m: + return None + pos = m.end() + while True: + m = re.search(pat, version_string[pos:]) + if not m: + return None + if ignore and re.match(ignore, m.group(0)): + pos = m.end() + continue + break + return m.group(0) + return matcher + +def CCompiler_get_version(self, force=False, ok_status=[0]): + """ + Return compiler version, or None if compiler is not available. + + Parameters + ---------- + force : bool, optional + If True, force a new determination of the version, even if the + compiler already has a version attribute. Default is False. + ok_status : list of int, optional + The list of status values returned by the version look-up process + for which a version string is returned. If the status value is not + in `ok_status`, None is returned. Default is ``[0]``. + + Returns + ------- + version : str or None + Version string, in the format of `distutils.version.LooseVersion`. + + """ + if not force and hasattr(self, 'version'): + return self.version + self.find_executables() + try: + version_cmd = self.version_cmd + except AttributeError: + return None + if not version_cmd or not version_cmd[0]: + return None + try: + matcher = self.version_match + except AttributeError: + try: + pat = self.version_pattern + except AttributeError: + return None + def matcher(version_string): + m = re.match(pat, version_string) + if not m: + return None + version = m.group('version') + return version + + try: + output = subprocess.check_output(version_cmd) + except subprocess.CalledProcessError as exc: + output = exc.output + status = exc.returncode + except OSError: + # match the historical returns for a parent + # exception class caught by exec_command() + status = 127 + output = b'' + else: + # output isn't actually a filepath but we do this + # for now to match previous distutils behavior + output = filepath_from_subprocess_output(output) + status = 0 + + version = None + if status in ok_status: + version = matcher(output) + if version: + version = LooseVersion(version) + self.version = version + return version + +replace_method(CCompiler, 'get_version', CCompiler_get_version) + +def CCompiler_cxx_compiler(self): + """ + Return the C++ compiler. + + Parameters + ---------- + None + + Returns + ------- + cxx : class instance + The C++ compiler, as a `CCompiler` instance. + + """ + if self.compiler_type in ('msvc', 'intelw', 'intelemw'): + return self + + cxx = copy(self) + cxx.compiler_so = [cxx.compiler_cxx[0]] + cxx.compiler_so[1:] + if sys.platform.startswith('aix') and 'ld_so_aix' in cxx.linker_so[0]: + # AIX needs the ld_so_aix script included with Python + cxx.linker_so = [cxx.linker_so[0], cxx.compiler_cxx[0]] \ + + cxx.linker_so[2:] + else: + cxx.linker_so = [cxx.compiler_cxx[0]] + cxx.linker_so[1:] + return cxx + +replace_method(CCompiler, 'cxx_compiler', CCompiler_cxx_compiler) + +compiler_class['intel'] = ('intelccompiler', 'IntelCCompiler', + "Intel C Compiler for 32-bit applications") +compiler_class['intele'] = ('intelccompiler', 'IntelItaniumCCompiler', + "Intel C Itanium Compiler for Itanium-based applications") +compiler_class['intelem'] = ('intelccompiler', 'IntelEM64TCCompiler', + "Intel C Compiler for 64-bit applications") +compiler_class['intelw'] = ('intelccompiler', 'IntelCCompilerW', + "Intel C Compiler for 32-bit applications on Windows") +compiler_class['intelemw'] = ('intelccompiler', 'IntelEM64TCCompilerW', + "Intel C Compiler for 64-bit applications on Windows") +compiler_class['pathcc'] = ('pathccompiler', 'PathScaleCCompiler', + "PathScale Compiler for SiCortex-based applications") +ccompiler._default_compilers += (('linux.*', 'intel'), + ('linux.*', 'intele'), + ('linux.*', 'intelem'), + ('linux.*', 'pathcc'), + ('nt', 'intelw'), + ('nt', 'intelemw')) + +if sys.platform == 'win32': + compiler_class['mingw32'] = ('mingw32ccompiler', 'Mingw32CCompiler', + "Mingw32 port of GNU C Compiler for Win32"\ + "(for MSC built Python)") + if mingw32(): + # On windows platforms, we want to default to mingw32 (gcc) + # because msvc can't build blitz stuff. + log.info('Setting mingw32 as default compiler for nt.') + ccompiler._default_compilers = (('nt', 'mingw32'),) \ + + ccompiler._default_compilers + + +_distutils_new_compiler = new_compiler +def new_compiler (plat=None, + compiler=None, + verbose=0, + dry_run=0, + force=0): + # Try first C compilers from numpy.distutils. + if plat is None: + plat = os.name + try: + if compiler is None: + compiler = get_default_compiler(plat) + (module_name, class_name, long_description) = compiler_class[compiler] + except KeyError: + msg = "don't know how to compile C/C++ code on platform '%s'" % plat + if compiler is not None: + msg = msg + " with '%s' compiler" % compiler + raise DistutilsPlatformError(msg) + module_name = "numpy.distutils." + module_name + try: + __import__ (module_name) + except ImportError: + msg = str(get_exception()) + log.info('%s in numpy.distutils; trying from distutils', + str(msg)) + module_name = module_name[6:] + try: + __import__(module_name) + except ImportError: + msg = str(get_exception()) + raise DistutilsModuleError("can't compile C/C++ code: unable to load module '%s'" % \ + module_name) + try: + module = sys.modules[module_name] + klass = vars(module)[class_name] + except KeyError: + raise DistutilsModuleError(("can't compile C/C++ code: unable to find class '%s' " + + "in module '%s'") % (class_name, module_name)) + compiler = klass(None, dry_run, force) + log.debug('new_compiler returns %s' % (klass)) + return compiler + +ccompiler.new_compiler = new_compiler + +_distutils_gen_lib_options = gen_lib_options +def gen_lib_options(compiler, library_dirs, runtime_library_dirs, libraries): + # the version of this function provided by CPython allows the following + # to return lists, which are unpacked automatically: + # - compiler.runtime_library_dir_option + # our version extends the behavior to: + # - compiler.library_dir_option + # - compiler.library_option + # - compiler.find_library_file + r = _distutils_gen_lib_options(compiler, library_dirs, + runtime_library_dirs, libraries) + lib_opts = [] + for i in r: + if is_sequence(i): + lib_opts.extend(list(i)) + else: + lib_opts.append(i) + return lib_opts +ccompiler.gen_lib_options = gen_lib_options + +# Also fix up the various compiler modules, which do +# from distutils.ccompiler import gen_lib_options +# Don't bother with mwerks, as we don't support Classic Mac. +for _cc in ['msvc9', 'msvc', '_msvc', 'bcpp', 'cygwinc', 'emxc', 'unixc']: + _m = sys.modules.get('distutils.' + _cc + 'compiler') + if _m is not None: + setattr(_m, 'gen_lib_options', gen_lib_options) + + +##Fix distutils.util.split_quoted: +# NOTE: I removed this fix in revision 4481 (see ticket #619), but it appears +# that removing this fix causes f2py problems on Windows XP (see ticket #723). +# Specifically, on WinXP when gfortran is installed in a directory path, which +# contains spaces, then f2py is unable to find it. +import string +_wordchars_re = re.compile(r'[^\\\'\"%s ]*' % string.whitespace) +_squote_re = re.compile(r"'(?:[^'\\]|\\.)*'") +_dquote_re = re.compile(r'"(?:[^"\\]|\\.)*"') +_has_white_re = re.compile(r'\s') +def split_quoted(s): + s = s.strip() + words = [] + pos = 0 + + while s: + m = _wordchars_re.match(s, pos) + end = m.end() + if end == len(s): + words.append(s[:end]) + break + + if s[end] in string.whitespace: # unescaped, unquoted whitespace: now + words.append(s[:end]) # we definitely have a word delimiter + s = s[end:].lstrip() + pos = 0 + + elif s[end] == '\\': # preserve whatever is being escaped; + # will become part of the current word + s = s[:end] + s[end+1:] + pos = end+1 + + else: + if s[end] == "'": # slurp singly-quoted string + m = _squote_re.match(s, end) + elif s[end] == '"': # slurp doubly-quoted string + m = _dquote_re.match(s, end) + else: + raise RuntimeError("this can't happen (bad char '%c')" % s[end]) + + if m is None: + raise ValueError("bad string (mismatched %s quotes?)" % s[end]) + + (beg, end) = m.span() + if _has_white_re.search(s[beg+1:end-1]): + s = s[:beg] + s[beg+1:end-1] + s[end:] + pos = m.end() - 2 + else: + # Keeping quotes when a quoted word does not contain + # white-space. XXX: send a patch to distutils + pos = m.end() + + if pos >= len(s): + words.append(s) + break + + return words +ccompiler.split_quoted = split_quoted +##Fix distutils.util.split_quoted: diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/ccompiler.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/ccompiler.pyc new file mode 100644 index 0000000..ca528e8 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/ccompiler.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/command/__init__.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/__init__.py new file mode 100644 index 0000000..76a2600 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/__init__.py @@ -0,0 +1,43 @@ +"""distutils.command + +Package containing implementation of all the standard Distutils +commands. + +""" +from __future__ import division, absolute_import, print_function + +def test_na_writable_attributes_deletion(): + a = np.NA(2) + attr = ['payload', 'dtype'] + for s in attr: + assert_raises(AttributeError, delattr, a, s) + + +__revision__ = "$Id: __init__.py,v 1.3 2005/05/16 11:08:49 pearu Exp $" + +distutils_all = [ #'build_py', + 'clean', + 'install_clib', + 'install_scripts', + 'bdist', + 'bdist_dumb', + 'bdist_wininst', + ] + +__import__('distutils.command', globals(), locals(), distutils_all) + +__all__ = ['build', + 'config_compiler', + 'config', + 'build_src', + 'build_py', + 'build_ext', + 'build_clib', + 'build_scripts', + 'install', + 'install_data', + 'install_headers', + 'install_lib', + 'bdist_rpm', + 'sdist', + ] + distutils_all diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/command/__init__.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/__init__.pyc new file mode 100644 index 0000000..c304ddd Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/command/autodist.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/autodist.py new file mode 100644 index 0000000..d5e7896 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/autodist.py @@ -0,0 +1,96 @@ +"""This module implements additional tests ala autoconf which can be useful. + +""" +from __future__ import division, absolute_import, print_function + + +# We put them here since they could be easily reused outside numpy.distutils + +def check_inline(cmd): + """Return the inline identifier (may be empty).""" + cmd._check_compiler() + body = """ +#ifndef __cplusplus +static %(inline)s int static_func (void) +{ + return 0; +} +%(inline)s int nostatic_func (void) +{ + return 0; +} +#endif""" + + for kw in ['inline', '__inline__', '__inline']: + st = cmd.try_compile(body % {'inline': kw}, None, None) + if st: + return kw + + return '' + +def check_restrict(cmd): + """Return the restrict identifier (may be empty).""" + cmd._check_compiler() + body = """ +static int static_func (char * %(restrict)s a) +{ + return 0; +} +""" + + for kw in ['restrict', '__restrict__', '__restrict']: + st = cmd.try_compile(body % {'restrict': kw}, None, None) + if st: + return kw + + return '' + +def check_compiler_gcc4(cmd): + """Return True if the C compiler is GCC 4.x.""" + cmd._check_compiler() + body = """ +int +main() +{ +#if (! defined __GNUC__) || (__GNUC__ < 4) +#error gcc >= 4 required +#endif + return 0; +} +""" + return cmd.try_compile(body, None, None) + + +def check_gcc_function_attribute(cmd, attribute, name): + """Return True if the given function attribute is supported.""" + cmd._check_compiler() + body = """ +#pragma GCC diagnostic error "-Wattributes" +#pragma clang diagnostic error "-Wattributes" + +int %s %s(void*); + +int +main() +{ + return 0; +} +""" % (attribute, name) + return cmd.try_compile(body, None, None) != 0 + +def check_gcc_variable_attribute(cmd, attribute): + """Return True if the given variable attribute is supported.""" + cmd._check_compiler() + body = """ +#pragma GCC diagnostic error "-Wattributes" +#pragma clang diagnostic error "-Wattributes" + +int %s foo; + +int +main() +{ + return 0; +} +""" % (attribute, ) + return cmd.try_compile(body, None, None) != 0 diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/command/autodist.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/autodist.pyc new file mode 100644 index 0000000..c2c8604 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/autodist.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/command/bdist_rpm.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/bdist_rpm.py new file mode 100644 index 0000000..3e52a50 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/bdist_rpm.py @@ -0,0 +1,24 @@ +from __future__ import division, absolute_import, print_function + +import os +import sys +if 'setuptools' in sys.modules: + from setuptools.command.bdist_rpm import bdist_rpm as old_bdist_rpm +else: + from distutils.command.bdist_rpm import bdist_rpm as old_bdist_rpm + +class bdist_rpm(old_bdist_rpm): + + def _make_spec_file(self): + spec_file = old_bdist_rpm._make_spec_file(self) + + # Replace hardcoded setup.py script name + # with the real setup script name. + setup_py = os.path.basename(sys.argv[0]) + if setup_py == 'setup.py': + return spec_file + new_spec_file = [] + for line in spec_file: + line = line.replace('setup.py', setup_py) + new_spec_file.append(line) + return new_spec_file diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/command/bdist_rpm.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/bdist_rpm.pyc new file mode 100644 index 0000000..d9ca0fc Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/bdist_rpm.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/command/build.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/build.py new file mode 100644 index 0000000..3d71015 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/build.py @@ -0,0 +1,47 @@ +from __future__ import division, absolute_import, print_function + +import os +import sys +from distutils.command.build import build as old_build +from distutils.util import get_platform +from numpy.distutils.command.config_compiler import show_fortran_compilers + +class build(old_build): + + sub_commands = [('config_cc', lambda *args: True), + ('config_fc', lambda *args: True), + ('build_src', old_build.has_ext_modules), + ] + old_build.sub_commands + + user_options = old_build.user_options + [ + ('fcompiler=', None, + "specify the Fortran compiler type"), + ('parallel=', 'j', + "number of parallel jobs"), + ] + + help_options = old_build.help_options + [ + ('help-fcompiler', None, "list available Fortran compilers", + show_fortran_compilers), + ] + + def initialize_options(self): + old_build.initialize_options(self) + self.fcompiler = None + self.parallel = None + + def finalize_options(self): + if self.parallel: + try: + self.parallel = int(self.parallel) + except ValueError: + raise ValueError("--parallel/-j argument must be an integer") + build_scripts = self.build_scripts + old_build.finalize_options(self) + plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3]) + if build_scripts is None: + self.build_scripts = os.path.join(self.build_base, + 'scripts' + plat_specifier) + + def run(self): + old_build.run(self) diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/command/build.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/build.pyc new file mode 100644 index 0000000..40391f3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/build.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/command/build_clib.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/build_clib.py new file mode 100644 index 0000000..910493a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/build_clib.py @@ -0,0 +1,323 @@ +""" Modified version of build_clib that handles fortran source files. +""" +from __future__ import division, absolute_import, print_function + +import os +from glob import glob +import shutil +from distutils.command.build_clib import build_clib as old_build_clib +from distutils.errors import DistutilsSetupError, DistutilsError, \ + DistutilsFileError + +from numpy.distutils import log +from distutils.dep_util import newer_group +from numpy.distutils.misc_util import filter_sources, has_f_sources,\ + has_cxx_sources, all_strings, get_lib_source_files, is_sequence, \ + get_numpy_include_dirs + +# Fix Python distutils bug sf #1718574: +_l = old_build_clib.user_options +for _i in range(len(_l)): + if _l[_i][0] in ['build-clib', 'build-temp']: + _l[_i] = (_l[_i][0] + '=',) + _l[_i][1:] +# + + +class build_clib(old_build_clib): + + description = "build C/C++/F libraries used by Python extensions" + + user_options = old_build_clib.user_options + [ + ('fcompiler=', None, + "specify the Fortran compiler type"), + ('inplace', 'i', 'Build in-place'), + ('parallel=', 'j', + "number of parallel jobs"), + ] + + boolean_options = old_build_clib.boolean_options + ['inplace'] + + def initialize_options(self): + old_build_clib.initialize_options(self) + self.fcompiler = None + self.inplace = 0 + self.parallel = None + + def finalize_options(self): + if self.parallel: + try: + self.parallel = int(self.parallel) + except ValueError: + raise ValueError("--parallel/-j argument must be an integer") + old_build_clib.finalize_options(self) + self.set_undefined_options('build', ('parallel', 'parallel')) + + def have_f_sources(self): + for (lib_name, build_info) in self.libraries: + if has_f_sources(build_info.get('sources', [])): + return True + return False + + def have_cxx_sources(self): + for (lib_name, build_info) in self.libraries: + if has_cxx_sources(build_info.get('sources', [])): + return True + return False + + def run(self): + if not self.libraries: + return + + # Make sure that library sources are complete. + languages = [] + + # Make sure that extension sources are complete. + self.run_command('build_src') + + for (lib_name, build_info) in self.libraries: + l = build_info.get('language', None) + if l and l not in languages: + languages.append(l) + + from distutils.ccompiler import new_compiler + self.compiler = new_compiler(compiler=self.compiler, + dry_run=self.dry_run, + force=self.force) + self.compiler.customize(self.distribution, + need_cxx=self.have_cxx_sources()) + + libraries = self.libraries + self.libraries = None + self.compiler.customize_cmd(self) + self.libraries = libraries + + self.compiler.show_customization() + + if self.have_f_sources(): + from numpy.distutils.fcompiler import new_fcompiler + self._f_compiler = new_fcompiler(compiler=self.fcompiler, + verbose=self.verbose, + dry_run=self.dry_run, + force=self.force, + requiref90='f90' in languages, + c_compiler=self.compiler) + if self._f_compiler is not None: + self._f_compiler.customize(self.distribution) + + libraries = self.libraries + self.libraries = None + self._f_compiler.customize_cmd(self) + self.libraries = libraries + + self._f_compiler.show_customization() + else: + self._f_compiler = None + + self.build_libraries(self.libraries) + + if self.inplace: + for l in self.distribution.installed_libraries: + libname = self.compiler.library_filename(l.name) + source = os.path.join(self.build_clib, libname) + target = os.path.join(l.target_dir, libname) + self.mkpath(l.target_dir) + shutil.copy(source, target) + + def get_source_files(self): + self.check_library_list(self.libraries) + filenames = [] + for lib in self.libraries: + filenames.extend(get_lib_source_files(lib)) + return filenames + + def build_libraries(self, libraries): + for (lib_name, build_info) in libraries: + self.build_a_library(build_info, lib_name, libraries) + + def build_a_library(self, build_info, lib_name, libraries): + # default compilers + compiler = self.compiler + fcompiler = self._f_compiler + + sources = build_info.get('sources') + if sources is None or not is_sequence(sources): + raise DistutilsSetupError(("in 'libraries' option (library '%s'), " + + "'sources' must be present and must be " + + "a list of source filenames") % lib_name) + sources = list(sources) + + c_sources, cxx_sources, f_sources, fmodule_sources \ + = filter_sources(sources) + requiref90 = not not fmodule_sources or \ + build_info.get('language', 'c') == 'f90' + + # save source type information so that build_ext can use it. + source_languages = [] + if c_sources: + source_languages.append('c') + if cxx_sources: + source_languages.append('c++') + if requiref90: + source_languages.append('f90') + elif f_sources: + source_languages.append('f77') + build_info['source_languages'] = source_languages + + lib_file = compiler.library_filename(lib_name, + output_dir=self.build_clib) + depends = sources + build_info.get('depends', []) + if not (self.force or newer_group(depends, lib_file, 'newer')): + log.debug("skipping '%s' library (up-to-date)", lib_name) + return + else: + log.info("building '%s' library", lib_name) + + config_fc = build_info.get('config_fc', {}) + if fcompiler is not None and config_fc: + log.info('using additional config_fc from setup script ' + 'for fortran compiler: %s' + % (config_fc,)) + from numpy.distutils.fcompiler import new_fcompiler + fcompiler = new_fcompiler(compiler=fcompiler.compiler_type, + verbose=self.verbose, + dry_run=self.dry_run, + force=self.force, + requiref90=requiref90, + c_compiler=self.compiler) + if fcompiler is not None: + dist = self.distribution + base_config_fc = dist.get_option_dict('config_fc').copy() + base_config_fc.update(config_fc) + fcompiler.customize(base_config_fc) + + # check availability of Fortran compilers + if (f_sources or fmodule_sources) and fcompiler is None: + raise DistutilsError("library %s has Fortran sources" + " but no Fortran compiler found" % (lib_name)) + + if fcompiler is not None: + fcompiler.extra_f77_compile_args = build_info.get( + 'extra_f77_compile_args') or [] + fcompiler.extra_f90_compile_args = build_info.get( + 'extra_f90_compile_args') or [] + + macros = build_info.get('macros') + include_dirs = build_info.get('include_dirs') + if include_dirs is None: + include_dirs = [] + extra_postargs = build_info.get('extra_compiler_args') or [] + + include_dirs.extend(get_numpy_include_dirs()) + # where compiled F90 module files are: + module_dirs = build_info.get('module_dirs') or [] + module_build_dir = os.path.dirname(lib_file) + if requiref90: + self.mkpath(module_build_dir) + + if compiler.compiler_type == 'msvc': + # this hack works around the msvc compiler attributes + # problem, msvc uses its own convention :( + c_sources += cxx_sources + cxx_sources = [] + + objects = [] + if c_sources: + log.info("compiling C sources") + objects = compiler.compile(c_sources, + output_dir=self.build_temp, + macros=macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=extra_postargs) + + if cxx_sources: + log.info("compiling C++ sources") + cxx_compiler = compiler.cxx_compiler() + cxx_objects = cxx_compiler.compile(cxx_sources, + output_dir=self.build_temp, + macros=macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=extra_postargs) + objects.extend(cxx_objects) + + if f_sources or fmodule_sources: + extra_postargs = [] + f_objects = [] + + if requiref90: + if fcompiler.module_dir_switch is None: + existing_modules = glob('*.mod') + extra_postargs += fcompiler.module_options( + module_dirs, module_build_dir) + + if fmodule_sources: + log.info("compiling Fortran 90 module sources") + f_objects += fcompiler.compile(fmodule_sources, + output_dir=self.build_temp, + macros=macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=extra_postargs) + + if requiref90 and self._f_compiler.module_dir_switch is None: + # move new compiled F90 module files to module_build_dir + for f in glob('*.mod'): + if f in existing_modules: + continue + t = os.path.join(module_build_dir, f) + if os.path.abspath(f) == os.path.abspath(t): + continue + if os.path.isfile(t): + os.remove(t) + try: + self.move_file(f, module_build_dir) + except DistutilsFileError: + log.warn('failed to move %r to %r' + % (f, module_build_dir)) + + if f_sources: + log.info("compiling Fortran sources") + f_objects += fcompiler.compile(f_sources, + output_dir=self.build_temp, + macros=macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=extra_postargs) + else: + f_objects = [] + + if f_objects and not fcompiler.can_ccompiler_link(compiler): + # Default linker cannot link Fortran object files, and results + # need to be wrapped later. Instead of creating a real static + # library, just keep track of the object files. + listfn = os.path.join(self.build_clib, + lib_name + '.fobjects') + with open(listfn, 'w') as f: + f.write("\n".join(os.path.abspath(obj) for obj in f_objects)) + + listfn = os.path.join(self.build_clib, + lib_name + '.cobjects') + with open(listfn, 'w') as f: + f.write("\n".join(os.path.abspath(obj) for obj in objects)) + + # create empty "library" file for dependency tracking + lib_fname = os.path.join(self.build_clib, + lib_name + compiler.static_lib_extension) + with open(lib_fname, 'wb') as f: + pass + else: + # assume that default linker is suitable for + # linking Fortran object files + objects.extend(f_objects) + compiler.create_static_lib(objects, lib_name, + output_dir=self.build_clib, + debug=self.debug) + + # fix library dependencies + clib_libraries = build_info.get('libraries', []) + for lname, binfo in libraries: + if lname in clib_libraries: + clib_libraries.extend(binfo.get('libraries', [])) + if clib_libraries: + build_info['libraries'] = clib_libraries diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/command/build_clib.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/build_clib.pyc new file mode 100644 index 0000000..d4624da Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/build_clib.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/command/build_ext.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/build_ext.py new file mode 100644 index 0000000..ab9d585 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/build_ext.py @@ -0,0 +1,598 @@ +""" Modified version of build_ext that handles fortran source files. + +""" +from __future__ import division, absolute_import, print_function + +import os +import subprocess +from glob import glob + +from distutils.dep_util import newer_group +from distutils.command.build_ext import build_ext as old_build_ext +from distutils.errors import DistutilsFileError, DistutilsSetupError,\ + DistutilsError +from distutils.file_util import copy_file + +from numpy.distutils import log +from numpy.distutils.exec_command import filepath_from_subprocess_output +from numpy.distutils.system_info import combine_paths, system_info +from numpy.distutils.misc_util import filter_sources, has_f_sources, \ + has_cxx_sources, get_ext_source_files, \ + get_numpy_include_dirs, is_sequence, get_build_architecture, \ + msvc_version +from numpy.distutils.command.config_compiler import show_fortran_compilers + + + +class build_ext (old_build_ext): + + description = "build C/C++/F extensions (compile/link to build directory)" + + user_options = old_build_ext.user_options + [ + ('fcompiler=', None, + "specify the Fortran compiler type"), + ('parallel=', 'j', + "number of parallel jobs"), + ] + + help_options = old_build_ext.help_options + [ + ('help-fcompiler', None, "list available Fortran compilers", + show_fortran_compilers), + ] + + def initialize_options(self): + old_build_ext.initialize_options(self) + self.fcompiler = None + self.parallel = None + + def finalize_options(self): + if self.parallel: + try: + self.parallel = int(self.parallel) + except ValueError: + raise ValueError("--parallel/-j argument must be an integer") + + # Ensure that self.include_dirs and self.distribution.include_dirs + # refer to the same list object. finalize_options will modify + # self.include_dirs, but self.distribution.include_dirs is used + # during the actual build. + # self.include_dirs is None unless paths are specified with + # --include-dirs. + # The include paths will be passed to the compiler in the order: + # numpy paths, --include-dirs paths, Python include path. + if isinstance(self.include_dirs, str): + self.include_dirs = self.include_dirs.split(os.pathsep) + incl_dirs = self.include_dirs or [] + if self.distribution.include_dirs is None: + self.distribution.include_dirs = [] + self.include_dirs = self.distribution.include_dirs + self.include_dirs.extend(incl_dirs) + + old_build_ext.finalize_options(self) + self.set_undefined_options('build', ('parallel', 'parallel')) + + def run(self): + if not self.extensions: + return + + # Make sure that extension sources are complete. + self.run_command('build_src') + + if self.distribution.has_c_libraries(): + if self.inplace: + if self.distribution.have_run.get('build_clib'): + log.warn('build_clib already run, it is too late to ' + 'ensure in-place build of build_clib') + build_clib = self.distribution.get_command_obj( + 'build_clib') + else: + build_clib = self.distribution.get_command_obj( + 'build_clib') + build_clib.inplace = 1 + build_clib.ensure_finalized() + build_clib.run() + self.distribution.have_run['build_clib'] = 1 + + else: + self.run_command('build_clib') + build_clib = self.get_finalized_command('build_clib') + self.library_dirs.append(build_clib.build_clib) + else: + build_clib = None + + # Not including C libraries to the list of + # extension libraries automatically to prevent + # bogus linking commands. Extensions must + # explicitly specify the C libraries that they use. + + from distutils.ccompiler import new_compiler + from numpy.distutils.fcompiler import new_fcompiler + + compiler_type = self.compiler + # Initialize C compiler: + self.compiler = new_compiler(compiler=compiler_type, + verbose=self.verbose, + dry_run=self.dry_run, + force=self.force) + self.compiler.customize(self.distribution) + self.compiler.customize_cmd(self) + self.compiler.show_customization() + + # Setup directory for storing generated extra DLL files on Windows + self.extra_dll_dir = os.path.join(self.build_temp, '.libs') + if not os.path.isdir(self.extra_dll_dir): + os.makedirs(self.extra_dll_dir) + + # Create mapping of libraries built by build_clib: + clibs = {} + if build_clib is not None: + for libname, build_info in build_clib.libraries or []: + if libname in clibs and clibs[libname] != build_info: + log.warn('library %r defined more than once,' + ' overwriting build_info\n%s... \nwith\n%s...' + % (libname, repr(clibs[libname])[:300], repr(build_info)[:300])) + clibs[libname] = build_info + # .. and distribution libraries: + for libname, build_info in self.distribution.libraries or []: + if libname in clibs: + # build_clib libraries have a precedence before distribution ones + continue + clibs[libname] = build_info + + # Determine if C++/Fortran 77/Fortran 90 compilers are needed. + # Update extension libraries, library_dirs, and macros. + all_languages = set() + for ext in self.extensions: + ext_languages = set() + c_libs = [] + c_lib_dirs = [] + macros = [] + for libname in ext.libraries: + if libname in clibs: + binfo = clibs[libname] + c_libs += binfo.get('libraries', []) + c_lib_dirs += binfo.get('library_dirs', []) + for m in binfo.get('macros', []): + if m not in macros: + macros.append(m) + + for l in clibs.get(libname, {}).get('source_languages', []): + ext_languages.add(l) + if c_libs: + new_c_libs = ext.libraries + c_libs + log.info('updating extension %r libraries from %r to %r' + % (ext.name, ext.libraries, new_c_libs)) + ext.libraries = new_c_libs + ext.library_dirs = ext.library_dirs + c_lib_dirs + if macros: + log.info('extending extension %r defined_macros with %r' + % (ext.name, macros)) + ext.define_macros = ext.define_macros + macros + + # determine extension languages + if has_f_sources(ext.sources): + ext_languages.add('f77') + if has_cxx_sources(ext.sources): + ext_languages.add('c++') + l = ext.language or self.compiler.detect_language(ext.sources) + if l: + ext_languages.add(l) + # reset language attribute for choosing proper linker + if 'c++' in ext_languages: + ext_language = 'c++' + elif 'f90' in ext_languages: + ext_language = 'f90' + elif 'f77' in ext_languages: + ext_language = 'f77' + else: + ext_language = 'c' # default + if l and l != ext_language and ext.language: + log.warn('resetting extension %r language from %r to %r.' % + (ext.name, l, ext_language)) + ext.language = ext_language + # global language + all_languages.update(ext_languages) + + need_f90_compiler = 'f90' in all_languages + need_f77_compiler = 'f77' in all_languages + need_cxx_compiler = 'c++' in all_languages + + # Initialize C++ compiler: + if need_cxx_compiler: + self._cxx_compiler = new_compiler(compiler=compiler_type, + verbose=self.verbose, + dry_run=self.dry_run, + force=self.force) + compiler = self._cxx_compiler + compiler.customize(self.distribution, need_cxx=need_cxx_compiler) + compiler.customize_cmd(self) + compiler.show_customization() + self._cxx_compiler = compiler.cxx_compiler() + else: + self._cxx_compiler = None + + # Initialize Fortran 77 compiler: + if need_f77_compiler: + ctype = self.fcompiler + self._f77_compiler = new_fcompiler(compiler=self.fcompiler, + verbose=self.verbose, + dry_run=self.dry_run, + force=self.force, + requiref90=False, + c_compiler=self.compiler) + fcompiler = self._f77_compiler + if fcompiler: + ctype = fcompiler.compiler_type + fcompiler.customize(self.distribution) + if fcompiler and fcompiler.get_version(): + fcompiler.customize_cmd(self) + fcompiler.show_customization() + else: + self.warn('f77_compiler=%s is not available.' % + (ctype)) + self._f77_compiler = None + else: + self._f77_compiler = None + + # Initialize Fortran 90 compiler: + if need_f90_compiler: + ctype = self.fcompiler + self._f90_compiler = new_fcompiler(compiler=self.fcompiler, + verbose=self.verbose, + dry_run=self.dry_run, + force=self.force, + requiref90=True, + c_compiler=self.compiler) + fcompiler = self._f90_compiler + if fcompiler: + ctype = fcompiler.compiler_type + fcompiler.customize(self.distribution) + if fcompiler and fcompiler.get_version(): + fcompiler.customize_cmd(self) + fcompiler.show_customization() + else: + self.warn('f90_compiler=%s is not available.' % + (ctype)) + self._f90_compiler = None + else: + self._f90_compiler = None + + # Build extensions + self.build_extensions() + + # Copy over any extra DLL files + # FIXME: In the case where there are more than two packages, + # we blindly assume that both packages need all of the libraries, + # resulting in a larger wheel than is required. This should be fixed, + # but it's so rare that I won't bother to handle it. + pkg_roots = { + self.get_ext_fullname(ext.name).split('.')[0] + for ext in self.extensions + } + for pkg_root in pkg_roots: + shared_lib_dir = os.path.join(pkg_root, '.libs') + if not self.inplace: + shared_lib_dir = os.path.join(self.build_lib, shared_lib_dir) + for fn in os.listdir(self.extra_dll_dir): + if not os.path.isdir(shared_lib_dir): + os.makedirs(shared_lib_dir) + if not fn.lower().endswith('.dll'): + continue + runtime_lib = os.path.join(self.extra_dll_dir, fn) + copy_file(runtime_lib, shared_lib_dir) + + def swig_sources(self, sources): + # Do nothing. Swig sources have beed handled in build_src command. + return sources + + def build_extension(self, ext): + sources = ext.sources + if sources is None or not is_sequence(sources): + raise DistutilsSetupError( + ("in 'ext_modules' option (extension '%s'), " + + "'sources' must be present and must be " + + "a list of source filenames") % ext.name) + sources = list(sources) + + if not sources: + return + + fullname = self.get_ext_fullname(ext.name) + if self.inplace: + modpath = fullname.split('.') + package = '.'.join(modpath[0:-1]) + base = modpath[-1] + build_py = self.get_finalized_command('build_py') + package_dir = build_py.get_package_dir(package) + ext_filename = os.path.join(package_dir, + self.get_ext_filename(base)) + else: + ext_filename = os.path.join(self.build_lib, + self.get_ext_filename(fullname)) + depends = sources + ext.depends + + if not (self.force or newer_group(depends, ext_filename, 'newer')): + log.debug("skipping '%s' extension (up-to-date)", ext.name) + return + else: + log.info("building '%s' extension", ext.name) + + extra_args = ext.extra_compile_args or [] + macros = ext.define_macros[:] + for undef in ext.undef_macros: + macros.append((undef,)) + + c_sources, cxx_sources, f_sources, fmodule_sources = \ + filter_sources(ext.sources) + + if self.compiler.compiler_type == 'msvc': + if cxx_sources: + # Needed to compile kiva.agg._agg extension. + extra_args.append('/Zm1000') + # this hack works around the msvc compiler attributes + # problem, msvc uses its own convention :( + c_sources += cxx_sources + cxx_sources = [] + + # Set Fortran/C++ compilers for compilation and linking. + if ext.language == 'f90': + fcompiler = self._f90_compiler + elif ext.language == 'f77': + fcompiler = self._f77_compiler + else: # in case ext.language is c++, for instance + fcompiler = self._f90_compiler or self._f77_compiler + if fcompiler is not None: + fcompiler.extra_f77_compile_args = (ext.extra_f77_compile_args or []) if hasattr( + ext, 'extra_f77_compile_args') else [] + fcompiler.extra_f90_compile_args = (ext.extra_f90_compile_args or []) if hasattr( + ext, 'extra_f90_compile_args') else [] + cxx_compiler = self._cxx_compiler + + # check for the availability of required compilers + if cxx_sources and cxx_compiler is None: + raise DistutilsError("extension %r has C++ sources" + "but no C++ compiler found" % (ext.name)) + if (f_sources or fmodule_sources) and fcompiler is None: + raise DistutilsError("extension %r has Fortran sources " + "but no Fortran compiler found" % (ext.name)) + if ext.language in ['f77', 'f90'] and fcompiler is None: + self.warn("extension %r has Fortran libraries " + "but no Fortran linker found, using default linker" % (ext.name)) + if ext.language == 'c++' and cxx_compiler is None: + self.warn("extension %r has C++ libraries " + "but no C++ linker found, using default linker" % (ext.name)) + + kws = {'depends': ext.depends} + output_dir = self.build_temp + + include_dirs = ext.include_dirs + get_numpy_include_dirs() + + c_objects = [] + if c_sources: + log.info("compiling C sources") + c_objects = self.compiler.compile(c_sources, + output_dir=output_dir, + macros=macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=extra_args, + **kws) + + if cxx_sources: + log.info("compiling C++ sources") + c_objects += cxx_compiler.compile(cxx_sources, + output_dir=output_dir, + macros=macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=extra_args, + **kws) + + extra_postargs = [] + f_objects = [] + if fmodule_sources: + log.info("compiling Fortran 90 module sources") + module_dirs = ext.module_dirs[:] + module_build_dir = os.path.join( + self.build_temp, os.path.dirname( + self.get_ext_filename(fullname))) + + self.mkpath(module_build_dir) + if fcompiler.module_dir_switch is None: + existing_modules = glob('*.mod') + extra_postargs += fcompiler.module_options( + module_dirs, module_build_dir) + f_objects += fcompiler.compile(fmodule_sources, + output_dir=self.build_temp, + macros=macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=extra_postargs, + depends=ext.depends) + + if fcompiler.module_dir_switch is None: + for f in glob('*.mod'): + if f in existing_modules: + continue + t = os.path.join(module_build_dir, f) + if os.path.abspath(f) == os.path.abspath(t): + continue + if os.path.isfile(t): + os.remove(t) + try: + self.move_file(f, module_build_dir) + except DistutilsFileError: + log.warn('failed to move %r to %r' % + (f, module_build_dir)) + if f_sources: + log.info("compiling Fortran sources") + f_objects += fcompiler.compile(f_sources, + output_dir=self.build_temp, + macros=macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=extra_postargs, + depends=ext.depends) + + if f_objects and not fcompiler.can_ccompiler_link(self.compiler): + unlinkable_fobjects = f_objects + objects = c_objects + else: + unlinkable_fobjects = [] + objects = c_objects + f_objects + + if ext.extra_objects: + objects.extend(ext.extra_objects) + extra_args = ext.extra_link_args or [] + libraries = self.get_libraries(ext)[:] + library_dirs = ext.library_dirs[:] + + linker = self.compiler.link_shared_object + # Always use system linker when using MSVC compiler. + if self.compiler.compiler_type in ('msvc', 'intelw', 'intelemw'): + # expand libraries with fcompiler libraries as we are + # not using fcompiler linker + self._libs_with_msvc_and_fortran( + fcompiler, libraries, library_dirs) + + elif ext.language in ['f77', 'f90'] and fcompiler is not None: + linker = fcompiler.link_shared_object + if ext.language == 'c++' and cxx_compiler is not None: + linker = cxx_compiler.link_shared_object + + if fcompiler is not None: + objects, libraries = self._process_unlinkable_fobjects( + objects, libraries, + fcompiler, library_dirs, + unlinkable_fobjects) + + linker(objects, ext_filename, + libraries=libraries, + library_dirs=library_dirs, + runtime_library_dirs=ext.runtime_library_dirs, + extra_postargs=extra_args, + export_symbols=self.get_export_symbols(ext), + debug=self.debug, + build_temp=self.build_temp, + target_lang=ext.language) + + def _add_dummy_mingwex_sym(self, c_sources): + build_src = self.get_finalized_command("build_src").build_src + build_clib = self.get_finalized_command("build_clib").build_clib + objects = self.compiler.compile([os.path.join(build_src, + "gfortran_vs2003_hack.c")], + output_dir=self.build_temp) + self.compiler.create_static_lib( + objects, "_gfortran_workaround", output_dir=build_clib, debug=self.debug) + + def _process_unlinkable_fobjects(self, objects, libraries, + fcompiler, library_dirs, + unlinkable_fobjects): + libraries = list(libraries) + objects = list(objects) + unlinkable_fobjects = list(unlinkable_fobjects) + + # Expand possible fake static libraries to objects + for lib in list(libraries): + for libdir in library_dirs: + fake_lib = os.path.join(libdir, lib + '.fobjects') + if os.path.isfile(fake_lib): + # Replace fake static library + libraries.remove(lib) + with open(fake_lib, 'r') as f: + unlinkable_fobjects.extend(f.read().splitlines()) + + # Expand C objects + c_lib = os.path.join(libdir, lib + '.cobjects') + with open(c_lib, 'r') as f: + objects.extend(f.read().splitlines()) + + # Wrap unlinkable objects to a linkable one + if unlinkable_fobjects: + fobjects = [os.path.relpath(obj) for obj in unlinkable_fobjects] + wrapped = fcompiler.wrap_unlinkable_objects( + fobjects, output_dir=self.build_temp, + extra_dll_dir=self.extra_dll_dir) + objects.extend(wrapped) + + return objects, libraries + + def _libs_with_msvc_and_fortran(self, fcompiler, c_libraries, + c_library_dirs): + if fcompiler is None: + return + + for libname in c_libraries: + if libname.startswith('msvc'): + continue + fileexists = False + for libdir in c_library_dirs or []: + libfile = os.path.join(libdir, '%s.lib' % (libname)) + if os.path.isfile(libfile): + fileexists = True + break + if fileexists: + continue + # make g77-compiled static libs available to MSVC + fileexists = False + for libdir in c_library_dirs: + libfile = os.path.join(libdir, 'lib%s.a' % (libname)) + if os.path.isfile(libfile): + # copy libname.a file to name.lib so that MSVC linker + # can find it + libfile2 = os.path.join(self.build_temp, libname + '.lib') + copy_file(libfile, libfile2) + if self.build_temp not in c_library_dirs: + c_library_dirs.append(self.build_temp) + fileexists = True + break + if fileexists: + continue + log.warn('could not find library %r in directories %s' + % (libname, c_library_dirs)) + + # Always use system linker when using MSVC compiler. + f_lib_dirs = [] + for dir in fcompiler.library_dirs: + # correct path when compiling in Cygwin but with normal Win + # Python + if dir.startswith('/usr/lib'): + try: + dir = subprocess.check_output(['cygpath', '-w', dir]) + except (OSError, subprocess.CalledProcessError): + pass + else: + dir = filepath_from_subprocess_output(dir) + f_lib_dirs.append(dir) + c_library_dirs.extend(f_lib_dirs) + + # make g77-compiled static libs available to MSVC + for lib in fcompiler.libraries: + if not lib.startswith('msvc'): + c_libraries.append(lib) + p = combine_paths(f_lib_dirs, 'lib' + lib + '.a') + if p: + dst_name = os.path.join(self.build_temp, lib + '.lib') + if not os.path.isfile(dst_name): + copy_file(p[0], dst_name) + if self.build_temp not in c_library_dirs: + c_library_dirs.append(self.build_temp) + + def get_source_files(self): + self.check_extensions_list(self.extensions) + filenames = [] + for ext in self.extensions: + filenames.extend(get_ext_source_files(ext)) + return filenames + + def get_outputs(self): + self.check_extensions_list(self.extensions) + + outputs = [] + for ext in self.extensions: + if not ext.sources: + continue + fullname = self.get_ext_fullname(ext.name) + outputs.append(os.path.join(self.build_lib, + self.get_ext_filename(fullname))) + return outputs diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/command/build_ext.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/build_ext.pyc new file mode 100644 index 0000000..f1bd2c7 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/build_ext.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/command/build_py.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/build_py.py new file mode 100644 index 0000000..54dcde4 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/build_py.py @@ -0,0 +1,33 @@ +from __future__ import division, absolute_import, print_function + +from distutils.command.build_py import build_py as old_build_py +from numpy.distutils.misc_util import is_string + +class build_py(old_build_py): + + def run(self): + build_src = self.get_finalized_command('build_src') + if build_src.py_modules_dict and self.packages is None: + self.packages = list(build_src.py_modules_dict.keys ()) + old_build_py.run(self) + + def find_package_modules(self, package, package_dir): + modules = old_build_py.find_package_modules(self, package, package_dir) + + # Find build_src generated *.py files. + build_src = self.get_finalized_command('build_src') + modules += build_src.py_modules_dict.get(package, []) + + return modules + + def find_modules(self): + old_py_modules = self.py_modules[:] + new_py_modules = [_m for _m in self.py_modules if is_string(_m)] + self.py_modules[:] = new_py_modules + modules = old_build_py.find_modules(self) + self.py_modules[:] = old_py_modules + + return modules + + # XXX: Fix find_source_files for item in py_modules such that item is 3-tuple + # and item[2] is source file. diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/command/build_py.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/build_py.pyc new file mode 100644 index 0000000..90fa401 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/build_py.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/command/build_scripts.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/build_scripts.py new file mode 100644 index 0000000..c8b25fc --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/build_scripts.py @@ -0,0 +1,51 @@ +""" Modified version of build_scripts that handles building scripts from functions. + +""" +from __future__ import division, absolute_import, print_function + +from distutils.command.build_scripts import build_scripts as old_build_scripts +from numpy.distutils import log +from numpy.distutils.misc_util import is_string + +class build_scripts(old_build_scripts): + + def generate_scripts(self, scripts): + new_scripts = [] + func_scripts = [] + for script in scripts: + if is_string(script): + new_scripts.append(script) + else: + func_scripts.append(script) + if not func_scripts: + return new_scripts + + build_dir = self.build_dir + self.mkpath(build_dir) + for func in func_scripts: + script = func(build_dir) + if not script: + continue + if is_string(script): + log.info(" adding '%s' to scripts" % (script,)) + new_scripts.append(script) + else: + [log.info(" adding '%s' to scripts" % (s,)) for s in script] + new_scripts.extend(list(script)) + return new_scripts + + def run (self): + if not self.scripts: + return + + self.scripts = self.generate_scripts(self.scripts) + # Now make sure that the distribution object has this list of scripts. + # setuptools' develop command requires that this be a list of filenames, + # not functions. + self.distribution.scripts = self.scripts + + return old_build_scripts.run(self) + + def get_source_files(self): + from numpy.distutils.misc_util import get_script_files + return get_script_files(self.scripts) diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/command/build_scripts.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/build_scripts.pyc new file mode 100644 index 0000000..592ae9f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/build_scripts.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/command/build_src.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/build_src.py new file mode 100644 index 0000000..668bc23 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/build_src.py @@ -0,0 +1,774 @@ +""" Build swig and f2py sources. +""" +from __future__ import division, absolute_import, print_function + +import os +import re +import sys +import shlex +import copy + +from distutils.command import build_ext +from distutils.dep_util import newer_group, newer +from distutils.util import get_platform +from distutils.errors import DistutilsError, DistutilsSetupError + + +# this import can't be done here, as it uses numpy stuff only available +# after it's installed +#import numpy.f2py +from numpy.distutils import log +from numpy.distutils.misc_util import ( + fortran_ext_match, appendpath, is_string, is_sequence, get_cmd + ) +from numpy.distutils.from_template import process_file as process_f_file +from numpy.distutils.conv_template import process_file as process_c_file + +def subst_vars(target, source, d): + """Substitute any occurrence of @foo@ by d['foo'] from source file into + target.""" + var = re.compile('@([a-zA-Z_]+)@') + fs = open(source, 'r') + try: + ft = open(target, 'w') + try: + for l in fs: + m = var.search(l) + if m: + ft.write(l.replace('@%s@' % m.group(1), d[m.group(1)])) + else: + ft.write(l) + finally: + ft.close() + finally: + fs.close() + +class build_src(build_ext.build_ext): + + description = "build sources from SWIG, F2PY files or a function" + + user_options = [ + ('build-src=', 'd', "directory to \"build\" sources to"), + ('f2py-opts=', None, "list of f2py command line options"), + ('swig=', None, "path to the SWIG executable"), + ('swig-opts=', None, "list of SWIG command line options"), + ('swig-cpp', None, "make SWIG create C++ files (default is autodetected from sources)"), + ('f2pyflags=', None, "additional flags to f2py (use --f2py-opts= instead)"), # obsolete + ('swigflags=', None, "additional flags to swig (use --swig-opts= instead)"), # obsolete + ('force', 'f', "forcibly build everything (ignore file timestamps)"), + ('inplace', 'i', + "ignore build-lib and put compiled extensions into the source " + + "directory alongside your pure Python modules"), + ] + + boolean_options = ['force', 'inplace'] + + help_options = [] + + def initialize_options(self): + self.extensions = None + self.package = None + self.py_modules = None + self.py_modules_dict = None + self.build_src = None + self.build_lib = None + self.build_base = None + self.force = None + self.inplace = None + self.package_dir = None + self.f2pyflags = None # obsolete + self.f2py_opts = None + self.swigflags = None # obsolete + self.swig_opts = None + self.swig_cpp = None + self.swig = None + + def finalize_options(self): + self.set_undefined_options('build', + ('build_base', 'build_base'), + ('build_lib', 'build_lib'), + ('force', 'force')) + if self.package is None: + self.package = self.distribution.ext_package + self.extensions = self.distribution.ext_modules + self.libraries = self.distribution.libraries or [] + self.py_modules = self.distribution.py_modules or [] + self.data_files = self.distribution.data_files or [] + + if self.build_src is None: + plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3]) + self.build_src = os.path.join(self.build_base, 'src'+plat_specifier) + + # py_modules_dict is used in build_py.find_package_modules + self.py_modules_dict = {} + + if self.f2pyflags: + if self.f2py_opts: + log.warn('ignoring --f2pyflags as --f2py-opts already used') + else: + self.f2py_opts = self.f2pyflags + self.f2pyflags = None + if self.f2py_opts is None: + self.f2py_opts = [] + else: + self.f2py_opts = shlex.split(self.f2py_opts) + + if self.swigflags: + if self.swig_opts: + log.warn('ignoring --swigflags as --swig-opts already used') + else: + self.swig_opts = self.swigflags + self.swigflags = None + + if self.swig_opts is None: + self.swig_opts = [] + else: + self.swig_opts = shlex.split(self.swig_opts) + + # use options from build_ext command + build_ext = self.get_finalized_command('build_ext') + if self.inplace is None: + self.inplace = build_ext.inplace + if self.swig_cpp is None: + self.swig_cpp = build_ext.swig_cpp + for c in ['swig', 'swig_opt']: + o = '--'+c.replace('_', '-') + v = getattr(build_ext, c, None) + if v: + if getattr(self, c): + log.warn('both build_src and build_ext define %s option' % (o)) + else: + log.info('using "%s=%s" option from build_ext command' % (o, v)) + setattr(self, c, v) + + def run(self): + log.info("build_src") + if not (self.extensions or self.libraries): + return + self.build_sources() + + def build_sources(self): + + if self.inplace: + self.get_package_dir = \ + self.get_finalized_command('build_py').get_package_dir + + self.build_py_modules_sources() + + for libname_info in self.libraries: + self.build_library_sources(*libname_info) + + if self.extensions: + self.check_extensions_list(self.extensions) + + for ext in self.extensions: + self.build_extension_sources(ext) + + self.build_data_files_sources() + self.build_npy_pkg_config() + + def build_data_files_sources(self): + if not self.data_files: + return + log.info('building data_files sources') + from numpy.distutils.misc_util import get_data_files + new_data_files = [] + for data in self.data_files: + if isinstance(data, str): + new_data_files.append(data) + elif isinstance(data, tuple): + d, files = data + if self.inplace: + build_dir = self.get_package_dir('.'.join(d.split(os.sep))) + else: + build_dir = os.path.join(self.build_src, d) + funcs = [f for f in files if hasattr(f, '__call__')] + files = [f for f in files if not hasattr(f, '__call__')] + for f in funcs: + if f.__code__.co_argcount==1: + s = f(build_dir) + else: + s = f() + if s is not None: + if isinstance(s, list): + files.extend(s) + elif isinstance(s, str): + files.append(s) + else: + raise TypeError(repr(s)) + filenames = get_data_files((d, files)) + new_data_files.append((d, filenames)) + else: + raise TypeError(repr(data)) + self.data_files[:] = new_data_files + + + def _build_npy_pkg_config(self, info, gd): + template, install_dir, subst_dict = info + template_dir = os.path.dirname(template) + for k, v in gd.items(): + subst_dict[k] = v + + if self.inplace == 1: + generated_dir = os.path.join(template_dir, install_dir) + else: + generated_dir = os.path.join(self.build_src, template_dir, + install_dir) + generated = os.path.basename(os.path.splitext(template)[0]) + generated_path = os.path.join(generated_dir, generated) + if not os.path.exists(generated_dir): + os.makedirs(generated_dir) + + subst_vars(generated_path, template, subst_dict) + + # Where to install relatively to install prefix + full_install_dir = os.path.join(template_dir, install_dir) + return full_install_dir, generated_path + + def build_npy_pkg_config(self): + log.info('build_src: building npy-pkg config files') + + # XXX: another ugly workaround to circumvent distutils brain damage. We + # need the install prefix here, but finalizing the options of the + # install command when only building sources cause error. Instead, we + # copy the install command instance, and finalize the copy so that it + # does not disrupt how distutils want to do things when with the + # original install command instance. + install_cmd = copy.copy(get_cmd('install')) + if not install_cmd.finalized == 1: + install_cmd.finalize_options() + build_npkg = False + if self.inplace == 1: + top_prefix = '.' + build_npkg = True + elif hasattr(install_cmd, 'install_libbase'): + top_prefix = install_cmd.install_libbase + build_npkg = True + + if build_npkg: + for pkg, infos in self.distribution.installed_pkg_config.items(): + pkg_path = self.distribution.package_dir[pkg] + prefix = os.path.join(os.path.abspath(top_prefix), pkg_path) + d = {'prefix': prefix} + for info in infos: + install_dir, generated = self._build_npy_pkg_config(info, d) + self.distribution.data_files.append((install_dir, + [generated])) + + def build_py_modules_sources(self): + if not self.py_modules: + return + log.info('building py_modules sources') + new_py_modules = [] + for source in self.py_modules: + if is_sequence(source) and len(source)==3: + package, module_base, source = source + if self.inplace: + build_dir = self.get_package_dir(package) + else: + build_dir = os.path.join(self.build_src, + os.path.join(*package.split('.'))) + if hasattr(source, '__call__'): + target = os.path.join(build_dir, module_base + '.py') + source = source(target) + if source is None: + continue + modules = [(package, module_base, source)] + if package not in self.py_modules_dict: + self.py_modules_dict[package] = [] + self.py_modules_dict[package] += modules + else: + new_py_modules.append(source) + self.py_modules[:] = new_py_modules + + def build_library_sources(self, lib_name, build_info): + sources = list(build_info.get('sources', [])) + + if not sources: + return + + log.info('building library "%s" sources' % (lib_name)) + + sources = self.generate_sources(sources, (lib_name, build_info)) + + sources = self.template_sources(sources, (lib_name, build_info)) + + sources, h_files = self.filter_h_files(sources) + + if h_files: + log.info('%s - nothing done with h_files = %s', + self.package, h_files) + + #for f in h_files: + # self.distribution.headers.append((lib_name,f)) + + build_info['sources'] = sources + return + + def build_extension_sources(self, ext): + + sources = list(ext.sources) + + log.info('building extension "%s" sources' % (ext.name)) + + fullname = self.get_ext_fullname(ext.name) + + modpath = fullname.split('.') + package = '.'.join(modpath[0:-1]) + + if self.inplace: + self.ext_target_dir = self.get_package_dir(package) + + sources = self.generate_sources(sources, ext) + sources = self.template_sources(sources, ext) + sources = self.swig_sources(sources, ext) + sources = self.f2py_sources(sources, ext) + sources = self.pyrex_sources(sources, ext) + + sources, py_files = self.filter_py_files(sources) + + if package not in self.py_modules_dict: + self.py_modules_dict[package] = [] + modules = [] + for f in py_files: + module = os.path.splitext(os.path.basename(f))[0] + modules.append((package, module, f)) + self.py_modules_dict[package] += modules + + sources, h_files = self.filter_h_files(sources) + + if h_files: + log.info('%s - nothing done with h_files = %s', + package, h_files) + #for f in h_files: + # self.distribution.headers.append((package,f)) + + ext.sources = sources + + def generate_sources(self, sources, extension): + new_sources = [] + func_sources = [] + for source in sources: + if is_string(source): + new_sources.append(source) + else: + func_sources.append(source) + if not func_sources: + return new_sources + if self.inplace and not is_sequence(extension): + build_dir = self.ext_target_dir + else: + if is_sequence(extension): + name = extension[0] + # if 'include_dirs' not in extension[1]: + # extension[1]['include_dirs'] = [] + # incl_dirs = extension[1]['include_dirs'] + else: + name = extension.name + # incl_dirs = extension.include_dirs + #if self.build_src not in incl_dirs: + # incl_dirs.append(self.build_src) + build_dir = os.path.join(*([self.build_src]\ + +name.split('.')[:-1])) + self.mkpath(build_dir) + for func in func_sources: + source = func(extension, build_dir) + if not source: + continue + if is_sequence(source): + [log.info(" adding '%s' to sources." % (s,)) for s in source] + new_sources.extend(source) + else: + log.info(" adding '%s' to sources." % (source,)) + new_sources.append(source) + + return new_sources + + def filter_py_files(self, sources): + return self.filter_files(sources, ['.py']) + + def filter_h_files(self, sources): + return self.filter_files(sources, ['.h', '.hpp', '.inc']) + + def filter_files(self, sources, exts = []): + new_sources = [] + files = [] + for source in sources: + (base, ext) = os.path.splitext(source) + if ext in exts: + files.append(source) + else: + new_sources.append(source) + return new_sources, files + + def template_sources(self, sources, extension): + new_sources = [] + if is_sequence(extension): + depends = extension[1].get('depends') + include_dirs = extension[1].get('include_dirs') + else: + depends = extension.depends + include_dirs = extension.include_dirs + for source in sources: + (base, ext) = os.path.splitext(source) + if ext == '.src': # Template file + if self.inplace: + target_dir = os.path.dirname(base) + else: + target_dir = appendpath(self.build_src, os.path.dirname(base)) + self.mkpath(target_dir) + target_file = os.path.join(target_dir, os.path.basename(base)) + if (self.force or newer_group([source] + depends, target_file)): + if _f_pyf_ext_match(base): + log.info("from_template:> %s" % (target_file)) + outstr = process_f_file(source) + else: + log.info("conv_template:> %s" % (target_file)) + outstr = process_c_file(source) + fid = open(target_file, 'w') + fid.write(outstr) + fid.close() + if _header_ext_match(target_file): + d = os.path.dirname(target_file) + if d not in include_dirs: + log.info(" adding '%s' to include_dirs." % (d)) + include_dirs.append(d) + new_sources.append(target_file) + else: + new_sources.append(source) + return new_sources + + def pyrex_sources(self, sources, extension): + """Pyrex not supported; this remains for Cython support (see below)""" + new_sources = [] + ext_name = extension.name.split('.')[-1] + for source in sources: + (base, ext) = os.path.splitext(source) + if ext == '.pyx': + target_file = self.generate_a_pyrex_source(base, ext_name, + source, + extension) + new_sources.append(target_file) + else: + new_sources.append(source) + return new_sources + + def generate_a_pyrex_source(self, base, ext_name, source, extension): + """Pyrex is not supported, but some projects monkeypatch this method. + + That allows compiling Cython code, see gh-6955. + This method will remain here for compatibility reasons. + """ + return [] + + def f2py_sources(self, sources, extension): + new_sources = [] + f2py_sources = [] + f_sources = [] + f2py_targets = {} + target_dirs = [] + ext_name = extension.name.split('.')[-1] + skip_f2py = 0 + + for source in sources: + (base, ext) = os.path.splitext(source) + if ext == '.pyf': # F2PY interface file + if self.inplace: + target_dir = os.path.dirname(base) + else: + target_dir = appendpath(self.build_src, os.path.dirname(base)) + if os.path.isfile(source): + name = get_f2py_modulename(source) + if name != ext_name: + raise DistutilsSetupError('mismatch of extension names: %s ' + 'provides %r but expected %r' % ( + source, name, ext_name)) + target_file = os.path.join(target_dir, name+'module.c') + else: + log.debug(' source %s does not exist: skipping f2py\'ing.' \ + % (source)) + name = ext_name + skip_f2py = 1 + target_file = os.path.join(target_dir, name+'module.c') + if not os.path.isfile(target_file): + log.warn(' target %s does not exist:\n '\ + 'Assuming %smodule.c was generated with '\ + '"build_src --inplace" command.' \ + % (target_file, name)) + target_dir = os.path.dirname(base) + target_file = os.path.join(target_dir, name+'module.c') + if not os.path.isfile(target_file): + raise DistutilsSetupError("%r missing" % (target_file,)) + log.info(' Yes! Using %r as up-to-date target.' \ + % (target_file)) + target_dirs.append(target_dir) + f2py_sources.append(source) + f2py_targets[source] = target_file + new_sources.append(target_file) + elif fortran_ext_match(ext): + f_sources.append(source) + else: + new_sources.append(source) + + if not (f2py_sources or f_sources): + return new_sources + + for d in target_dirs: + self.mkpath(d) + + f2py_options = extension.f2py_options + self.f2py_opts + + if self.distribution.libraries: + for name, build_info in self.distribution.libraries: + if name in extension.libraries: + f2py_options.extend(build_info.get('f2py_options', [])) + + log.info("f2py options: %s" % (f2py_options)) + + if f2py_sources: + if len(f2py_sources) != 1: + raise DistutilsSetupError( + 'only one .pyf file is allowed per extension module but got'\ + ' more: %r' % (f2py_sources,)) + source = f2py_sources[0] + target_file = f2py_targets[source] + target_dir = os.path.dirname(target_file) or '.' + depends = [source] + extension.depends + if (self.force or newer_group(depends, target_file, 'newer')) \ + and not skip_f2py: + log.info("f2py: %s" % (source)) + import numpy.f2py + numpy.f2py.run_main(f2py_options + + ['--build-dir', target_dir, source]) + else: + log.debug(" skipping '%s' f2py interface (up-to-date)" % (source)) + else: + #XXX TODO: --inplace support for sdist command + if is_sequence(extension): + name = extension[0] + else: name = extension.name + target_dir = os.path.join(*([self.build_src]\ + +name.split('.')[:-1])) + target_file = os.path.join(target_dir, ext_name + 'module.c') + new_sources.append(target_file) + depends = f_sources + extension.depends + if (self.force or newer_group(depends, target_file, 'newer')) \ + and not skip_f2py: + log.info("f2py:> %s" % (target_file)) + self.mkpath(target_dir) + import numpy.f2py + numpy.f2py.run_main(f2py_options + ['--lower', + '--build-dir', target_dir]+\ + ['-m', ext_name]+f_sources) + else: + log.debug(" skipping f2py fortran files for '%s' (up-to-date)"\ + % (target_file)) + + if not os.path.isfile(target_file): + raise DistutilsError("f2py target file %r not generated" % (target_file,)) + + build_dir = os.path.join(self.build_src, target_dir) + target_c = os.path.join(build_dir, 'fortranobject.c') + target_h = os.path.join(build_dir, 'fortranobject.h') + log.info(" adding '%s' to sources." % (target_c)) + new_sources.append(target_c) + if build_dir not in extension.include_dirs: + log.info(" adding '%s' to include_dirs." % (build_dir)) + extension.include_dirs.append(build_dir) + + if not skip_f2py: + import numpy.f2py + d = os.path.dirname(numpy.f2py.__file__) + source_c = os.path.join(d, 'src', 'fortranobject.c') + source_h = os.path.join(d, 'src', 'fortranobject.h') + if newer(source_c, target_c) or newer(source_h, target_h): + self.mkpath(os.path.dirname(target_c)) + self.copy_file(source_c, target_c) + self.copy_file(source_h, target_h) + else: + if not os.path.isfile(target_c): + raise DistutilsSetupError("f2py target_c file %r not found" % (target_c,)) + if not os.path.isfile(target_h): + raise DistutilsSetupError("f2py target_h file %r not found" % (target_h,)) + + for name_ext in ['-f2pywrappers.f', '-f2pywrappers2.f90']: + filename = os.path.join(target_dir, ext_name + name_ext) + if os.path.isfile(filename): + log.info(" adding '%s' to sources." % (filename)) + f_sources.append(filename) + + return new_sources + f_sources + + def swig_sources(self, sources, extension): + # Assuming SWIG 1.3.14 or later. See compatibility note in + # http://www.swig.org/Doc1.3/Python.html#Python_nn6 + + new_sources = [] + swig_sources = [] + swig_targets = {} + target_dirs = [] + py_files = [] # swig generated .py files + target_ext = '.c' + if '-c++' in extension.swig_opts: + typ = 'c++' + is_cpp = True + extension.swig_opts.remove('-c++') + elif self.swig_cpp: + typ = 'c++' + is_cpp = True + else: + typ = None + is_cpp = False + skip_swig = 0 + ext_name = extension.name.split('.')[-1] + + for source in sources: + (base, ext) = os.path.splitext(source) + if ext == '.i': # SWIG interface file + # the code below assumes that the sources list + # contains not more than one .i SWIG interface file + if self.inplace: + target_dir = os.path.dirname(base) + py_target_dir = self.ext_target_dir + else: + target_dir = appendpath(self.build_src, os.path.dirname(base)) + py_target_dir = target_dir + if os.path.isfile(source): + name = get_swig_modulename(source) + if name != ext_name[1:]: + raise DistutilsSetupError( + 'mismatch of extension names: %s provides %r' + ' but expected %r' % (source, name, ext_name[1:])) + if typ is None: + typ = get_swig_target(source) + is_cpp = typ=='c++' + else: + typ2 = get_swig_target(source) + if typ2 is None: + log.warn('source %r does not define swig target, assuming %s swig target' \ + % (source, typ)) + elif typ!=typ2: + log.warn('expected %r but source %r defines %r swig target' \ + % (typ, source, typ2)) + if typ2=='c++': + log.warn('resetting swig target to c++ (some targets may have .c extension)') + is_cpp = True + else: + log.warn('assuming that %r has c++ swig target' % (source)) + if is_cpp: + target_ext = '.cpp' + target_file = os.path.join(target_dir, '%s_wrap%s' \ + % (name, target_ext)) + else: + log.warn(' source %s does not exist: skipping swig\'ing.' \ + % (source)) + name = ext_name[1:] + skip_swig = 1 + target_file = _find_swig_target(target_dir, name) + if not os.path.isfile(target_file): + log.warn(' target %s does not exist:\n '\ + 'Assuming %s_wrap.{c,cpp} was generated with '\ + '"build_src --inplace" command.' \ + % (target_file, name)) + target_dir = os.path.dirname(base) + target_file = _find_swig_target(target_dir, name) + if not os.path.isfile(target_file): + raise DistutilsSetupError("%r missing" % (target_file,)) + log.warn(' Yes! Using %r as up-to-date target.' \ + % (target_file)) + target_dirs.append(target_dir) + new_sources.append(target_file) + py_files.append(os.path.join(py_target_dir, name+'.py')) + swig_sources.append(source) + swig_targets[source] = new_sources[-1] + else: + new_sources.append(source) + + if not swig_sources: + return new_sources + + if skip_swig: + return new_sources + py_files + + for d in target_dirs: + self.mkpath(d) + + swig = self.swig or self.find_swig() + swig_cmd = [swig, "-python"] + extension.swig_opts + if is_cpp: + swig_cmd.append('-c++') + for d in extension.include_dirs: + swig_cmd.append('-I'+d) + for source in swig_sources: + target = swig_targets[source] + depends = [source] + extension.depends + if self.force or newer_group(depends, target, 'newer'): + log.info("%s: %s" % (os.path.basename(swig) \ + + (is_cpp and '++' or ''), source)) + self.spawn(swig_cmd + self.swig_opts \ + + ["-o", target, '-outdir', py_target_dir, source]) + else: + log.debug(" skipping '%s' swig interface (up-to-date)" \ + % (source)) + + return new_sources + py_files + +_f_pyf_ext_match = re.compile(r'.*[.](f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match +_header_ext_match = re.compile(r'.*[.](inc|h|hpp)\Z', re.I).match + +#### SWIG related auxiliary functions #### +_swig_module_name_match = re.compile(r'\s*%module\s*(.*\(\s*package\s*=\s*"(?P[\w_]+)".*\)|)\s*(?P[\w_]+)', + re.I).match +_has_c_header = re.compile(r'-[*]-\s*c\s*-[*]-', re.I).search +_has_cpp_header = re.compile(r'-[*]-\s*c[+][+]\s*-[*]-', re.I).search + +def get_swig_target(source): + f = open(source, 'r') + result = None + line = f.readline() + if _has_cpp_header(line): + result = 'c++' + if _has_c_header(line): + result = 'c' + f.close() + return result + +def get_swig_modulename(source): + f = open(source, 'r') + name = None + for line in f: + m = _swig_module_name_match(line) + if m: + name = m.group('name') + break + f.close() + return name + +def _find_swig_target(target_dir, name): + for ext in ['.cpp', '.c']: + target = os.path.join(target_dir, '%s_wrap%s' % (name, ext)) + if os.path.isfile(target): + break + return target + +#### F2PY related auxiliary functions #### + +_f2py_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]+)', + re.I).match +_f2py_user_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]*?' + r'__user__[\w_]*)', re.I).match + +def get_f2py_modulename(source): + name = None + f = open(source) + for line in f: + m = _f2py_module_name_match(line) + if m: + if _f2py_user_module_name_match(line): # skip *__user__* names + continue + name = m.group('name') + break + f.close() + return name + +########################################## diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/command/build_src.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/build_src.pyc new file mode 100644 index 0000000..40ecf21 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/build_src.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/command/config.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/config.py new file mode 100644 index 0000000..d9b1e84 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/config.py @@ -0,0 +1,506 @@ +# Added Fortran compiler support to config. Currently useful only for +# try_compile call. try_run works but is untested for most of Fortran +# compilers (they must define linker_exe first). +# Pearu Peterson +from __future__ import division, absolute_import, print_function + +import os, signal +import warnings +import sys +import subprocess + +from distutils.command.config import config as old_config +from distutils.command.config import LANG_EXT +from distutils import log +from distutils.file_util import copy_file +from distutils.ccompiler import CompileError, LinkError +import distutils +from numpy.distutils.exec_command import filepath_from_subprocess_output +from numpy.distutils.mingw32ccompiler import generate_manifest +from numpy.distutils.command.autodist import (check_gcc_function_attribute, + check_gcc_variable_attribute, + check_inline, + check_restrict, + check_compiler_gcc4) +from numpy.distutils.compat import get_exception + +LANG_EXT['f77'] = '.f' +LANG_EXT['f90'] = '.f90' + +class config(old_config): + old_config.user_options += [ + ('fcompiler=', None, "specify the Fortran compiler type"), + ] + + def initialize_options(self): + self.fcompiler = None + old_config.initialize_options(self) + + def _check_compiler (self): + old_config._check_compiler(self) + from numpy.distutils.fcompiler import FCompiler, new_fcompiler + + if sys.platform == 'win32' and (self.compiler.compiler_type in + ('msvc', 'intelw', 'intelemw')): + # XXX: hack to circumvent a python 2.6 bug with msvc9compiler: + # initialize call query_vcvarsall, which throws an IOError, and + # causes an error along the way without much information. We try to + # catch it here, hoping it is early enough, and print an helpful + # message instead of Error: None. + if not self.compiler.initialized: + try: + self.compiler.initialize() + except IOError: + e = get_exception() + msg = """\ +Could not initialize compiler instance: do you have Visual Studio +installed? If you are trying to build with MinGW, please use "python setup.py +build -c mingw32" instead. If you have Visual Studio installed, check it is +correctly installed, and the right version (VS 2008 for python 2.6, 2.7 and 3.2, +VS 2010 for >= 3.3). + +Original exception was: %s, and the Compiler class was %s +============================================================================""" \ + % (e, self.compiler.__class__.__name__) + print ("""\ +============================================================================""") + raise distutils.errors.DistutilsPlatformError(msg) + + # After MSVC is initialized, add an explicit /MANIFEST to linker + # flags. See issues gh-4245 and gh-4101 for details. Also + # relevant are issues 4431 and 16296 on the Python bug tracker. + from distutils import msvc9compiler + if msvc9compiler.get_build_version() >= 10: + for ldflags in [self.compiler.ldflags_shared, + self.compiler.ldflags_shared_debug]: + if '/MANIFEST' not in ldflags: + ldflags.append('/MANIFEST') + + if not isinstance(self.fcompiler, FCompiler): + self.fcompiler = new_fcompiler(compiler=self.fcompiler, + dry_run=self.dry_run, force=1, + c_compiler=self.compiler) + if self.fcompiler is not None: + self.fcompiler.customize(self.distribution) + if self.fcompiler.get_version(): + self.fcompiler.customize_cmd(self) + self.fcompiler.show_customization() + + def _wrap_method(self, mth, lang, args): + from distutils.ccompiler import CompileError + from distutils.errors import DistutilsExecError + save_compiler = self.compiler + if lang in ['f77', 'f90']: + self.compiler = self.fcompiler + try: + ret = mth(*((self,)+args)) + except (DistutilsExecError, CompileError): + str(get_exception()) + self.compiler = save_compiler + raise CompileError + self.compiler = save_compiler + return ret + + def _compile (self, body, headers, include_dirs, lang): + src, obj = self._wrap_method(old_config._compile, lang, + (body, headers, include_dirs, lang)) + # _compile in unixcompiler.py sometimes creates .d dependency files. + # Clean them up. + self.temp_files.append(obj + '.d') + return src, obj + + def _link (self, body, + headers, include_dirs, + libraries, library_dirs, lang): + if self.compiler.compiler_type=='msvc': + libraries = (libraries or [])[:] + library_dirs = (library_dirs or [])[:] + if lang in ['f77', 'f90']: + lang = 'c' # always use system linker when using MSVC compiler + if self.fcompiler: + for d in self.fcompiler.library_dirs or []: + # correct path when compiling in Cygwin but with + # normal Win Python + if d.startswith('/usr/lib'): + try: + d = subprocess.check_output(['cygpath', + '-w', d]) + except (OSError, subprocess.CalledProcessError): + pass + else: + d = filepath_from_subprocess_output(d) + library_dirs.append(d) + for libname in self.fcompiler.libraries or []: + if libname not in libraries: + libraries.append(libname) + for libname in libraries: + if libname.startswith('msvc'): continue + fileexists = False + for libdir in library_dirs or []: + libfile = os.path.join(libdir, '%s.lib' % (libname)) + if os.path.isfile(libfile): + fileexists = True + break + if fileexists: continue + # make g77-compiled static libs available to MSVC + fileexists = False + for libdir in library_dirs: + libfile = os.path.join(libdir, 'lib%s.a' % (libname)) + if os.path.isfile(libfile): + # copy libname.a file to name.lib so that MSVC linker + # can find it + libfile2 = os.path.join(libdir, '%s.lib' % (libname)) + copy_file(libfile, libfile2) + self.temp_files.append(libfile2) + fileexists = True + break + if fileexists: continue + log.warn('could not find library %r in directories %s' \ + % (libname, library_dirs)) + elif self.compiler.compiler_type == 'mingw32': + generate_manifest(self) + return self._wrap_method(old_config._link, lang, + (body, headers, include_dirs, + libraries, library_dirs, lang)) + + def check_header(self, header, include_dirs=None, library_dirs=None, lang='c'): + self._check_compiler() + return self.try_compile( + "/* we need a dummy line to make distutils happy */", + [header], include_dirs) + + def check_decl(self, symbol, + headers=None, include_dirs=None): + self._check_compiler() + body = """ +int main(void) +{ +#ifndef %s + (void) %s; +#endif + ; + return 0; +}""" % (symbol, symbol) + + return self.try_compile(body, headers, include_dirs) + + def check_macro_true(self, symbol, + headers=None, include_dirs=None): + self._check_compiler() + body = """ +int main(void) +{ +#if %s +#else +#error false or undefined macro +#endif + ; + return 0; +}""" % (symbol,) + + return self.try_compile(body, headers, include_dirs) + + def check_type(self, type_name, headers=None, include_dirs=None, + library_dirs=None): + """Check type availability. Return True if the type can be compiled, + False otherwise""" + self._check_compiler() + + # First check the type can be compiled + body = r""" +int main(void) { + if ((%(name)s *) 0) + return 0; + if (sizeof (%(name)s)) + return 0; +} +""" % {'name': type_name} + + st = False + try: + try: + self._compile(body % {'type': type_name}, + headers, include_dirs, 'c') + st = True + except distutils.errors.CompileError: + st = False + finally: + self._clean() + + return st + + def check_type_size(self, type_name, headers=None, include_dirs=None, library_dirs=None, expected=None): + """Check size of a given type.""" + self._check_compiler() + + # First check the type can be compiled + body = r""" +typedef %(type)s npy_check_sizeof_type; +int main (void) +{ + static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) >= 0)]; + test_array [0] = 0 + + ; + return 0; +} +""" + self._compile(body % {'type': type_name}, + headers, include_dirs, 'c') + self._clean() + + if expected: + body = r""" +typedef %(type)s npy_check_sizeof_type; +int main (void) +{ + static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) == %(size)s)]; + test_array [0] = 0 + + ; + return 0; +} +""" + for size in expected: + try: + self._compile(body % {'type': type_name, 'size': size}, + headers, include_dirs, 'c') + self._clean() + return size + except CompileError: + pass + + # this fails to *compile* if size > sizeof(type) + body = r""" +typedef %(type)s npy_check_sizeof_type; +int main (void) +{ + static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) <= %(size)s)]; + test_array [0] = 0 + + ; + return 0; +} +""" + + # The principle is simple: we first find low and high bounds of size + # for the type, where low/high are looked up on a log scale. Then, we + # do a binary search to find the exact size between low and high + low = 0 + mid = 0 + while True: + try: + self._compile(body % {'type': type_name, 'size': mid}, + headers, include_dirs, 'c') + self._clean() + break + except CompileError: + #log.info("failure to test for bound %d" % mid) + low = mid + 1 + mid = 2 * mid + 1 + + high = mid + # Binary search: + while low != high: + mid = (high - low) // 2 + low + try: + self._compile(body % {'type': type_name, 'size': mid}, + headers, include_dirs, 'c') + self._clean() + high = mid + except CompileError: + low = mid + 1 + return low + + def check_func(self, func, + headers=None, include_dirs=None, + libraries=None, library_dirs=None, + decl=False, call=False, call_args=None): + # clean up distutils's config a bit: add void to main(), and + # return a value. + self._check_compiler() + body = [] + if decl: + if type(decl) == str: + body.append(decl) + else: + body.append("int %s (void);" % func) + # Handle MSVC intrinsics: force MS compiler to make a function call. + # Useful to test for some functions when built with optimization on, to + # avoid build error because the intrinsic and our 'fake' test + # declaration do not match. + body.append("#ifdef _MSC_VER") + body.append("#pragma function(%s)" % func) + body.append("#endif") + body.append("int main (void) {") + if call: + if call_args is None: + call_args = '' + body.append(" %s(%s);" % (func, call_args)) + else: + body.append(" %s;" % func) + body.append(" return 0;") + body.append("}") + body = '\n'.join(body) + "\n" + + return self.try_link(body, headers, include_dirs, + libraries, library_dirs) + + def check_funcs_once(self, funcs, + headers=None, include_dirs=None, + libraries=None, library_dirs=None, + decl=False, call=False, call_args=None): + """Check a list of functions at once. + + This is useful to speed up things, since all the functions in the funcs + list will be put in one compilation unit. + + Arguments + --------- + funcs : seq + list of functions to test + include_dirs : seq + list of header paths + libraries : seq + list of libraries to link the code snippet to + library_dirs : seq + list of library paths + decl : dict + for every (key, value), the declaration in the value will be + used for function in key. If a function is not in the + dictionary, no declaration will be used. + call : dict + for every item (f, value), if the value is True, a call will be + done to the function f. + """ + self._check_compiler() + body = [] + if decl: + for f, v in decl.items(): + if v: + body.append("int %s (void);" % f) + + # Handle MS intrinsics. See check_func for more info. + body.append("#ifdef _MSC_VER") + for func in funcs: + body.append("#pragma function(%s)" % func) + body.append("#endif") + + body.append("int main (void) {") + if call: + for f in funcs: + if f in call and call[f]: + if not (call_args and f in call_args and call_args[f]): + args = '' + else: + args = call_args[f] + body.append(" %s(%s);" % (f, args)) + else: + body.append(" %s;" % f) + else: + for f in funcs: + body.append(" %s;" % f) + body.append(" return 0;") + body.append("}") + body = '\n'.join(body) + "\n" + + return self.try_link(body, headers, include_dirs, + libraries, library_dirs) + + def check_inline(self): + """Return the inline keyword recognized by the compiler, empty string + otherwise.""" + return check_inline(self) + + def check_restrict(self): + """Return the restrict keyword recognized by the compiler, empty string + otherwise.""" + return check_restrict(self) + + def check_compiler_gcc4(self): + """Return True if the C compiler is gcc >= 4.""" + return check_compiler_gcc4(self) + + def check_gcc_function_attribute(self, attribute, name): + return check_gcc_function_attribute(self, attribute, name) + + def check_gcc_variable_attribute(self, attribute): + return check_gcc_variable_attribute(self, attribute) + + def get_output(self, body, headers=None, include_dirs=None, + libraries=None, library_dirs=None, + lang="c", use_tee=None): + """Try to compile, link to an executable, and run a program + built from 'body' and 'headers'. Returns the exit status code + of the program and its output. + """ + # 2008-11-16, RemoveMe + warnings.warn("\n+++++++++++++++++++++++++++++++++++++++++++++++++\n" \ + "Usage of get_output is deprecated: please do not \n" \ + "use it anymore, and avoid configuration checks \n" \ + "involving running executable on the target machine.\n" \ + "+++++++++++++++++++++++++++++++++++++++++++++++++\n", + DeprecationWarning, stacklevel=2) + self._check_compiler() + exitcode, output = 255, '' + try: + grabber = GrabStdout() + try: + src, obj, exe = self._link(body, headers, include_dirs, + libraries, library_dirs, lang) + grabber.restore() + except Exception: + output = grabber.data + grabber.restore() + raise + exe = os.path.join('.', exe) + try: + # specify cwd arg for consistency with + # historic usage pattern of exec_command() + # also, note that exe appears to be a string, + # which exec_command() handled, but we now + # use a list for check_output() -- this assumes + # that exe is always a single command + output = subprocess.check_output([exe], cwd='.') + except subprocess.CalledProcessError as exc: + exitstatus = exc.returncode + output = '' + except OSError: + # preserve the EnvironmentError exit status + # used historically in exec_command() + exitstatus = 127 + output = '' + else: + output = filepath_from_subprocess_output(output) + if hasattr(os, 'WEXITSTATUS'): + exitcode = os.WEXITSTATUS(exitstatus) + if os.WIFSIGNALED(exitstatus): + sig = os.WTERMSIG(exitstatus) + log.error('subprocess exited with signal %d' % (sig,)) + if sig == signal.SIGINT: + # control-C + raise KeyboardInterrupt + else: + exitcode = exitstatus + log.info("success!") + except (CompileError, LinkError): + log.info("failure.") + self._clean() + return exitcode, output + +class GrabStdout(object): + + def __init__(self): + self.sys_stdout = sys.stdout + self.data = '' + sys.stdout = self + + def write (self, data): + self.sys_stdout.write(data) + self.data += data + + def flush (self): + self.sys_stdout.flush() + + def restore(self): + sys.stdout = self.sys_stdout diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/command/config.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/config.pyc new file mode 100644 index 0000000..b3fc5f4 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/config.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/command/config_compiler.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/config_compiler.py new file mode 100644 index 0000000..bf17006 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/config_compiler.py @@ -0,0 +1,128 @@ +from __future__ import division, absolute_import, print_function + +from distutils.core import Command +from numpy.distutils import log + +#XXX: Linker flags + +def show_fortran_compilers(_cache=None): + # Using cache to prevent infinite recursion. + if _cache: + return + elif _cache is None: + _cache = [] + _cache.append(1) + from numpy.distutils.fcompiler import show_fcompilers + import distutils.core + dist = distutils.core._setup_distribution + show_fcompilers(dist) + +class config_fc(Command): + """ Distutils command to hold user specified options + to Fortran compilers. + + config_fc command is used by the FCompiler.customize() method. + """ + + description = "specify Fortran 77/Fortran 90 compiler information" + + user_options = [ + ('fcompiler=', None, "specify Fortran compiler type"), + ('f77exec=', None, "specify F77 compiler command"), + ('f90exec=', None, "specify F90 compiler command"), + ('f77flags=', None, "specify F77 compiler flags"), + ('f90flags=', None, "specify F90 compiler flags"), + ('opt=', None, "specify optimization flags"), + ('arch=', None, "specify architecture specific optimization flags"), + ('debug', 'g', "compile with debugging information"), + ('noopt', None, "compile without optimization"), + ('noarch', None, "compile without arch-dependent optimization"), + ] + + help_options = [ + ('help-fcompiler', None, "list available Fortran compilers", + show_fortran_compilers), + ] + + boolean_options = ['debug', 'noopt', 'noarch'] + + def initialize_options(self): + self.fcompiler = None + self.f77exec = None + self.f90exec = None + self.f77flags = None + self.f90flags = None + self.opt = None + self.arch = None + self.debug = None + self.noopt = None + self.noarch = None + + def finalize_options(self): + log.info('unifing config_fc, config, build_clib, build_ext, build commands --fcompiler options') + build_clib = self.get_finalized_command('build_clib') + build_ext = self.get_finalized_command('build_ext') + config = self.get_finalized_command('config') + build = self.get_finalized_command('build') + cmd_list = [self, config, build_clib, build_ext, build] + for a in ['fcompiler']: + l = [] + for c in cmd_list: + v = getattr(c, a) + if v is not None: + if not isinstance(v, str): v = v.compiler_type + if v not in l: l.append(v) + if not l: v1 = None + else: v1 = l[0] + if len(l)>1: + log.warn(' commands have different --%s options: %s'\ + ', using first in list as default' % (a, l)) + if v1: + for c in cmd_list: + if getattr(c, a) is None: setattr(c, a, v1) + + def run(self): + # Do nothing. + return + +class config_cc(Command): + """ Distutils command to hold user specified options + to C/C++ compilers. + """ + + description = "specify C/C++ compiler information" + + user_options = [ + ('compiler=', None, "specify C/C++ compiler type"), + ] + + def initialize_options(self): + self.compiler = None + + def finalize_options(self): + log.info('unifing config_cc, config, build_clib, build_ext, build commands --compiler options') + build_clib = self.get_finalized_command('build_clib') + build_ext = self.get_finalized_command('build_ext') + config = self.get_finalized_command('config') + build = self.get_finalized_command('build') + cmd_list = [self, config, build_clib, build_ext, build] + for a in ['compiler']: + l = [] + for c in cmd_list: + v = getattr(c, a) + if v is not None: + if not isinstance(v, str): v = v.compiler_type + if v not in l: l.append(v) + if not l: v1 = None + else: v1 = l[0] + if len(l)>1: + log.warn(' commands have different --%s options: %s'\ + ', using first in list as default' % (a, l)) + if v1: + for c in cmd_list: + if getattr(c, a) is None: setattr(c, a, v1) + return + + def run(self): + # Do nothing. + return diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/command/config_compiler.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/config_compiler.pyc new file mode 100644 index 0000000..657be62 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/config_compiler.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/command/develop.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/develop.py new file mode 100644 index 0000000..1410ab2 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/develop.py @@ -0,0 +1,17 @@ +""" Override the develop command from setuptools so we can ensure that our +generated files (from build_src or build_scripts) are properly converted to real +files with filenames. + +""" +from __future__ import division, absolute_import, print_function + +from setuptools.command.develop import develop as old_develop + +class develop(old_develop): + __doc__ = old_develop.__doc__ + def install_for_development(self): + # Build sources in-place, too. + self.reinitialize_command('build_src', inplace=1) + # Make sure scripts are built. + self.run_command('build_scripts') + old_develop.install_for_development(self) diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/command/develop.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/develop.pyc new file mode 100644 index 0000000..b4fbe06 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/develop.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/command/egg_info.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/egg_info.py new file mode 100644 index 0000000..18673ec --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/egg_info.py @@ -0,0 +1,27 @@ +from __future__ import division, absolute_import, print_function + +import sys + +from setuptools.command.egg_info import egg_info as _egg_info + +class egg_info(_egg_info): + def run(self): + if 'sdist' in sys.argv: + import warnings + import textwrap + msg = textwrap.dedent(""" + `build_src` is being run, this may lead to missing + files in your sdist! You want to use distutils.sdist + instead of the setuptools version: + + from distutils.command.sdist import sdist + cmdclass={'sdist': sdist}" + + See numpy's setup.py or gh-7131 for details.""") + warnings.warn(msg, UserWarning, stacklevel=2) + + # We need to ensure that build_src has been executed in order to give + # setuptools' egg_info command real filenames instead of functions which + # generate files. + self.run_command("build_src") + _egg_info.run(self) diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/command/egg_info.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/egg_info.pyc new file mode 100644 index 0000000..555ed96 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/egg_info.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/command/install.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/install.py new file mode 100644 index 0000000..a1dd477 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/install.py @@ -0,0 +1,82 @@ +from __future__ import division, absolute_import, print_function + +import sys +if 'setuptools' in sys.modules: + import setuptools.command.install as old_install_mod + have_setuptools = True +else: + import distutils.command.install as old_install_mod + have_setuptools = False +from distutils.file_util import write_file + +old_install = old_install_mod.install + +class install(old_install): + + # Always run install_clib - the command is cheap, so no need to bypass it; + # but it's not run by setuptools -- so it's run again in install_data + sub_commands = old_install.sub_commands + [ + ('install_clib', lambda x: True) + ] + + def finalize_options (self): + old_install.finalize_options(self) + self.install_lib = self.install_libbase + + def setuptools_run(self): + """ The setuptools version of the .run() method. + + We must pull in the entire code so we can override the level used in the + _getframe() call since we wrap this call by one more level. + """ + from distutils.command.install import install as distutils_install + + # Explicit request for old-style install? Just do it + if self.old_and_unmanageable or self.single_version_externally_managed: + return distutils_install.run(self) + + # Attempt to detect whether we were called from setup() or by another + # command. If we were called by setup(), our caller will be the + # 'run_command' method in 'distutils.dist', and *its* caller will be + # the 'run_commands' method. If we were called any other way, our + # immediate caller *might* be 'run_command', but it won't have been + # called by 'run_commands'. This is slightly kludgy, but seems to + # work. + # + caller = sys._getframe(3) + caller_module = caller.f_globals.get('__name__', '') + caller_name = caller.f_code.co_name + + if caller_module != 'distutils.dist' or caller_name!='run_commands': + # We weren't called from the command line or setup(), so we + # should run in backward-compatibility mode to support bdist_* + # commands. + distutils_install.run(self) + else: + self.do_egg_install() + + def run(self): + if not have_setuptools: + r = old_install.run(self) + else: + r = self.setuptools_run() + if self.record: + # bdist_rpm fails when INSTALLED_FILES contains + # paths with spaces. Such paths must be enclosed + # with double-quotes. + f = open(self.record, 'r') + lines = [] + need_rewrite = False + for l in f: + l = l.rstrip() + if ' ' in l: + need_rewrite = True + l = '"%s"' % (l) + lines.append(l) + f.close() + if need_rewrite: + self.execute(write_file, + (self.record, lines), + "re-writing list of installed files to '%s'" % + self.record) + return r diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/command/install.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/install.pyc new file mode 100644 index 0000000..a07c226 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/install.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/command/install_clib.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/install_clib.py new file mode 100644 index 0000000..662aa00 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/install_clib.py @@ -0,0 +1,39 @@ +from __future__ import division, absolute_import, print_function + +import os +from distutils.core import Command +from distutils.ccompiler import new_compiler +from numpy.distutils.misc_util import get_cmd + +class install_clib(Command): + description = "Command to install installable C libraries" + + user_options = [] + + def initialize_options(self): + self.install_dir = None + self.outfiles = [] + + def finalize_options(self): + self.set_undefined_options('install', ('install_lib', 'install_dir')) + + def run (self): + build_clib_cmd = get_cmd("build_clib") + build_dir = build_clib_cmd.build_clib + + # We need the compiler to get the library name -> filename association + if not build_clib_cmd.compiler: + compiler = new_compiler(compiler=None) + compiler.customize(self.distribution) + else: + compiler = build_clib_cmd.compiler + + for l in self.distribution.installed_libraries: + target_dir = os.path.join(self.install_dir, l.target_dir) + name = compiler.library_filename(l.name) + source = os.path.join(build_dir, name) + self.mkpath(target_dir) + self.outfiles.append(self.copy_file(source, target_dir)[0]) + + def get_outputs(self): + return self.outfiles diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/command/install_clib.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/install_clib.pyc new file mode 100644 index 0000000..cb2b4f6 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/install_clib.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/command/install_data.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/install_data.py new file mode 100644 index 0000000..996cf7e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/install_data.py @@ -0,0 +1,26 @@ +from __future__ import division, absolute_import, print_function + +import sys +have_setuptools = ('setuptools' in sys.modules) + +from distutils.command.install_data import install_data as old_install_data + +#data installer with improved intelligence over distutils +#data files are copied into the project directory instead +#of willy-nilly +class install_data (old_install_data): + + def run(self): + old_install_data.run(self) + + if have_setuptools: + # Run install_clib again, since setuptools does not run sub-commands + # of install automatically + self.run_command('install_clib') + + def finalize_options (self): + self.set_undefined_options('install', + ('install_lib', 'install_dir'), + ('root', 'root'), + ('force', 'force'), + ) diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/command/install_data.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/install_data.pyc new file mode 100644 index 0000000..a0dadbd Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/install_data.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/command/install_headers.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/install_headers.py new file mode 100644 index 0000000..f3f58aa --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/install_headers.py @@ -0,0 +1,27 @@ +from __future__ import division, absolute_import, print_function + +import os +from distutils.command.install_headers import install_headers as old_install_headers + +class install_headers (old_install_headers): + + def run (self): + headers = self.distribution.headers + if not headers: + return + + prefix = os.path.dirname(self.install_dir) + for header in headers: + if isinstance(header, tuple): + # Kind of a hack, but I don't know where else to change this... + if header[0] == 'numpy.core': + header = ('numpy', header[1]) + if os.path.splitext(header[1])[1] == '.inc': + continue + d = os.path.join(*([prefix]+header[0].split('.'))) + header = header[1] + else: + d = self.install_dir + self.mkpath(d) + (out, _) = self.copy_file(header, d) + self.outfiles.append(out) diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/command/install_headers.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/install_headers.pyc new file mode 100644 index 0000000..d166a95 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/install_headers.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/command/sdist.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/sdist.py new file mode 100644 index 0000000..bfaab1c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/sdist.py @@ -0,0 +1,29 @@ +from __future__ import division, absolute_import, print_function + +import sys +if 'setuptools' in sys.modules: + from setuptools.command.sdist import sdist as old_sdist +else: + from distutils.command.sdist import sdist as old_sdist + +from numpy.distutils.misc_util import get_data_files + +class sdist(old_sdist): + + def add_defaults (self): + old_sdist.add_defaults(self) + + dist = self.distribution + + if dist.has_data_files(): + for data in dist.data_files: + self.filelist.extend(get_data_files(data)) + + if dist.has_headers(): + headers = [] + for h in dist.headers: + if isinstance(h, str): headers.append(h) + else: headers.append(h[1]) + self.filelist.extend(headers) + + return diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/command/sdist.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/sdist.pyc new file mode 100644 index 0000000..15ecae1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/command/sdist.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/compat.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/compat.py new file mode 100644 index 0000000..9a81cd3 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/compat.py @@ -0,0 +1,10 @@ +"""Small modules to cope with python 2 vs 3 incompatibilities inside +numpy.distutils + +""" +from __future__ import division, absolute_import, print_function + +import sys + +def get_exception(): + return sys.exc_info()[1] diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/compat.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/compat.pyc new file mode 100644 index 0000000..1830095 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/compat.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/conv_template.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/conv_template.py new file mode 100644 index 0000000..b33e315 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/conv_template.py @@ -0,0 +1,337 @@ +#!/usr/bin/env python +""" +takes templated file .xxx.src and produces .xxx file where .xxx is +.i or .c or .h, using the following template rules + +/**begin repeat -- on a line by itself marks the start of a repeated code + segment +/**end repeat**/ -- on a line by itself marks it's end + +After the /**begin repeat and before the */, all the named templates are placed +these should all have the same number of replacements + +Repeat blocks can be nested, with each nested block labeled with its depth, +i.e. +/**begin repeat1 + *.... + */ +/**end repeat1**/ + +When using nested loops, you can optionally exclude particular +combinations of the variables using (inside the comment portion of the inner loop): + + :exclude: var1=value1, var2=value2, ... + +This will exclude the pattern where var1 is value1 and var2 is value2 when +the result is being generated. + + +In the main body each replace will use one entry from the list of named replacements + + Note that all #..# forms in a block must have the same number of + comma-separated entries. + +Example: + + An input file containing + + /**begin repeat + * #a = 1,2,3# + * #b = 1,2,3# + */ + + /**begin repeat1 + * #c = ted, jim# + */ + @a@, @b@, @c@ + /**end repeat1**/ + + /**end repeat**/ + + produces + + line 1 "template.c.src" + + /* + ********************************************************************* + ** This file was autogenerated from a template DO NOT EDIT!!** + ** Changes should be made to the original source (.src) file ** + ********************************************************************* + */ + + #line 9 + 1, 1, ted + + #line 9 + 1, 1, jim + + #line 9 + 2, 2, ted + + #line 9 + 2, 2, jim + + #line 9 + 3, 3, ted + + #line 9 + 3, 3, jim + +""" +from __future__ import division, absolute_import, print_function + + +__all__ = ['process_str', 'process_file'] + +import os +import sys +import re + +from numpy.distutils.compat import get_exception + +# names for replacement that are already global. +global_names = {} + +# header placed at the front of head processed file +header =\ +""" +/* + ***************************************************************************** + ** This file was autogenerated from a template DO NOT EDIT!!!! ** + ** Changes should be made to the original source (.src) file ** + ***************************************************************************** + */ + +""" +# Parse string for repeat loops +def parse_structure(astr, level): + """ + The returned line number is from the beginning of the string, starting + at zero. Returns an empty list if no loops found. + + """ + if level == 0 : + loopbeg = "/**begin repeat" + loopend = "/**end repeat**/" + else : + loopbeg = "/**begin repeat%d" % level + loopend = "/**end repeat%d**/" % level + + ind = 0 + line = 0 + spanlist = [] + while True: + start = astr.find(loopbeg, ind) + if start == -1: + break + start2 = astr.find("*/", start) + start2 = astr.find("\n", start2) + fini1 = astr.find(loopend, start2) + fini2 = astr.find("\n", fini1) + line += astr.count("\n", ind, start2+1) + spanlist.append((start, start2+1, fini1, fini2+1, line)) + line += astr.count("\n", start2+1, fini2) + ind = fini2 + spanlist.sort() + return spanlist + + +def paren_repl(obj): + torep = obj.group(1) + numrep = obj.group(2) + return ','.join([torep]*int(numrep)) + +parenrep = re.compile(r"[(]([^)]*)[)]\*(\d+)") +plainrep = re.compile(r"([^*]+)\*(\d+)") +def parse_values(astr): + # replaces all occurrences of '(a,b,c)*4' in astr + # with 'a,b,c,a,b,c,a,b,c,a,b,c'. Empty braces generate + # empty values, i.e., ()*4 yields ',,,'. The result is + # split at ',' and a list of values returned. + astr = parenrep.sub(paren_repl, astr) + # replaces occurrences of xxx*3 with xxx, xxx, xxx + astr = ','.join([plainrep.sub(paren_repl, x.strip()) + for x in astr.split(',')]) + return astr.split(',') + + +stripast = re.compile(r"\n\s*\*?") +named_re = re.compile(r"#\s*(\w*)\s*=([^#]*)#") +exclude_vars_re = re.compile(r"(\w*)=(\w*)") +exclude_re = re.compile(":exclude:") +def parse_loop_header(loophead) : + """Find all named replacements in the header + + Returns a list of dictionaries, one for each loop iteration, + where each key is a name to be substituted and the corresponding + value is the replacement string. + + Also return a list of exclusions. The exclusions are dictionaries + of key value pairs. There can be more than one exclusion. + [{'var1':'value1', 'var2', 'value2'[,...]}, ...] + + """ + # Strip out '\n' and leading '*', if any, in continuation lines. + # This should not effect code previous to this change as + # continuation lines were not allowed. + loophead = stripast.sub("", loophead) + # parse out the names and lists of values + names = [] + reps = named_re.findall(loophead) + nsub = None + for rep in reps: + name = rep[0] + vals = parse_values(rep[1]) + size = len(vals) + if nsub is None : + nsub = size + elif nsub != size : + msg = "Mismatch in number of values, %d != %d\n%s = %s" + raise ValueError(msg % (nsub, size, name, vals)) + names.append((name, vals)) + + + # Find any exclude variables + excludes = [] + + for obj in exclude_re.finditer(loophead): + span = obj.span() + # find next newline + endline = loophead.find('\n', span[1]) + substr = loophead[span[1]:endline] + ex_names = exclude_vars_re.findall(substr) + excludes.append(dict(ex_names)) + + # generate list of dictionaries, one for each template iteration + dlist = [] + if nsub is None : + raise ValueError("No substitution variables found") + for i in range(nsub): + tmp = {name: vals[i] for name, vals in names} + dlist.append(tmp) + return dlist + +replace_re = re.compile(r"@([\w]+)@") +def parse_string(astr, env, level, line) : + lineno = "#line %d\n" % line + + # local function for string replacement, uses env + def replace(match): + name = match.group(1) + try : + val = env[name] + except KeyError: + msg = 'line %d: no definition of key "%s"'%(line, name) + raise ValueError(msg) + return val + + code = [lineno] + struct = parse_structure(astr, level) + if struct : + # recurse over inner loops + oldend = 0 + newlevel = level + 1 + for sub in struct: + pref = astr[oldend:sub[0]] + head = astr[sub[0]:sub[1]] + text = astr[sub[1]:sub[2]] + oldend = sub[3] + newline = line + sub[4] + code.append(replace_re.sub(replace, pref)) + try : + envlist = parse_loop_header(head) + except ValueError: + e = get_exception() + msg = "line %d: %s" % (newline, e) + raise ValueError(msg) + for newenv in envlist : + newenv.update(env) + newcode = parse_string(text, newenv, newlevel, newline) + code.extend(newcode) + suff = astr[oldend:] + code.append(replace_re.sub(replace, suff)) + else : + # replace keys + code.append(replace_re.sub(replace, astr)) + code.append('\n') + return ''.join(code) + +def process_str(astr): + code = [header] + code.extend(parse_string(astr, global_names, 0, 1)) + return ''.join(code) + + +include_src_re = re.compile(r"(\n|\A)#include\s*['\"]" + r"(?P[\w\d./\\]+[.]src)['\"]", re.I) + +def resolve_includes(source): + d = os.path.dirname(source) + fid = open(source) + lines = [] + for line in fid: + m = include_src_re.match(line) + if m: + fn = m.group('name') + if not os.path.isabs(fn): + fn = os.path.join(d, fn) + if os.path.isfile(fn): + print('Including file', fn) + lines.extend(resolve_includes(fn)) + else: + lines.append(line) + else: + lines.append(line) + fid.close() + return lines + +def process_file(source): + lines = resolve_includes(source) + sourcefile = os.path.normcase(source).replace("\\", "\\\\") + try: + code = process_str(''.join(lines)) + except ValueError: + e = get_exception() + raise ValueError('In "%s" loop at %s' % (sourcefile, e)) + return '#line 1 "%s"\n%s' % (sourcefile, code) + + +def unique_key(adict): + # this obtains a unique key given a dictionary + # currently it works by appending together n of the letters of the + # current keys and increasing n until a unique key is found + # -- not particularly quick + allkeys = list(adict.keys()) + done = False + n = 1 + while not done: + newkey = "".join([x[:n] for x in allkeys]) + if newkey in allkeys: + n += 1 + else: + done = True + return newkey + + +def main(): + try: + file = sys.argv[1] + except IndexError: + fid = sys.stdin + outfile = sys.stdout + else: + fid = open(file, 'r') + (base, ext) = os.path.splitext(file) + newname = base + outfile = open(newname, 'w') + + allstr = fid.read() + try: + writestr = process_str(allstr) + except ValueError: + e = get_exception() + raise ValueError("In %s loop at %s" % (file, e)) + outfile.write(writestr) + +if __name__ == "__main__": + main() diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/conv_template.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/conv_template.pyc new file mode 100644 index 0000000..a7e3f97 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/conv_template.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/core.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/core.py new file mode 100644 index 0000000..70cc37c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/core.py @@ -0,0 +1,217 @@ +from __future__ import division, absolute_import, print_function + +import sys +from distutils.core import * + +if 'setuptools' in sys.modules: + have_setuptools = True + from setuptools import setup as old_setup + # easy_install imports math, it may be picked up from cwd + from setuptools.command import easy_install + try: + # very old versions of setuptools don't have this + from setuptools.command import bdist_egg + except ImportError: + have_setuptools = False +else: + from distutils.core import setup as old_setup + have_setuptools = False + +import warnings +import distutils.core +import distutils.dist + +from numpy.distutils.extension import Extension +from numpy.distutils.numpy_distribution import NumpyDistribution +from numpy.distutils.command import config, config_compiler, \ + build, build_py, build_ext, build_clib, build_src, build_scripts, \ + sdist, install_data, install_headers, install, bdist_rpm, \ + install_clib +from numpy.distutils.misc_util import get_data_files, is_sequence, is_string + +numpy_cmdclass = {'build': build.build, + 'build_src': build_src.build_src, + 'build_scripts': build_scripts.build_scripts, + 'config_cc': config_compiler.config_cc, + 'config_fc': config_compiler.config_fc, + 'config': config.config, + 'build_ext': build_ext.build_ext, + 'build_py': build_py.build_py, + 'build_clib': build_clib.build_clib, + 'sdist': sdist.sdist, + 'install_data': install_data.install_data, + 'install_headers': install_headers.install_headers, + 'install_clib': install_clib.install_clib, + 'install': install.install, + 'bdist_rpm': bdist_rpm.bdist_rpm, + } +if have_setuptools: + # Use our own versions of develop and egg_info to ensure that build_src is + # handled appropriately. + from numpy.distutils.command import develop, egg_info + numpy_cmdclass['bdist_egg'] = bdist_egg.bdist_egg + numpy_cmdclass['develop'] = develop.develop + numpy_cmdclass['easy_install'] = easy_install.easy_install + numpy_cmdclass['egg_info'] = egg_info.egg_info + +def _dict_append(d, **kws): + for k, v in kws.items(): + if k not in d: + d[k] = v + continue + dv = d[k] + if isinstance(dv, tuple): + d[k] = dv + tuple(v) + elif isinstance(dv, list): + d[k] = dv + list(v) + elif isinstance(dv, dict): + _dict_append(dv, **v) + elif is_string(dv): + d[k] = dv + v + else: + raise TypeError(repr(type(dv))) + +def _command_line_ok(_cache=None): + """ Return True if command line does not contain any + help or display requests. + """ + if _cache: + return _cache[0] + elif _cache is None: + _cache = [] + ok = True + display_opts = ['--'+n for n in Distribution.display_option_names] + for o in Distribution.display_options: + if o[1]: + display_opts.append('-'+o[1]) + for arg in sys.argv: + if arg.startswith('--help') or arg=='-h' or arg in display_opts: + ok = False + break + _cache.append(ok) + return ok + +def get_distribution(always=False): + dist = distutils.core._setup_distribution + # XXX Hack to get numpy installable with easy_install. + # The problem is easy_install runs it's own setup(), which + # sets up distutils.core._setup_distribution. However, + # when our setup() runs, that gets overwritten and lost. + # We can't use isinstance, as the DistributionWithoutHelpCommands + # class is local to a function in setuptools.command.easy_install + if dist is not None and \ + 'DistributionWithoutHelpCommands' in repr(dist): + dist = None + if always and dist is None: + dist = NumpyDistribution() + return dist + +def setup(**attr): + + cmdclass = numpy_cmdclass.copy() + + new_attr = attr.copy() + if 'cmdclass' in new_attr: + cmdclass.update(new_attr['cmdclass']) + new_attr['cmdclass'] = cmdclass + + if 'configuration' in new_attr: + # To avoid calling configuration if there are any errors + # or help request in command in the line. + configuration = new_attr.pop('configuration') + + old_dist = distutils.core._setup_distribution + old_stop = distutils.core._setup_stop_after + distutils.core._setup_distribution = None + distutils.core._setup_stop_after = "commandline" + try: + dist = setup(**new_attr) + finally: + distutils.core._setup_distribution = old_dist + distutils.core._setup_stop_after = old_stop + if dist.help or not _command_line_ok(): + # probably displayed help, skip running any commands + return dist + + # create setup dictionary and append to new_attr + config = configuration() + if hasattr(config, 'todict'): + config = config.todict() + _dict_append(new_attr, **config) + + # Move extension source libraries to libraries + libraries = [] + for ext in new_attr.get('ext_modules', []): + new_libraries = [] + for item in ext.libraries: + if is_sequence(item): + lib_name, build_info = item + _check_append_ext_library(libraries, lib_name, build_info) + new_libraries.append(lib_name) + elif is_string(item): + new_libraries.append(item) + else: + raise TypeError("invalid description of extension module " + "library %r" % (item,)) + ext.libraries = new_libraries + if libraries: + if 'libraries' not in new_attr: + new_attr['libraries'] = [] + for item in libraries: + _check_append_library(new_attr['libraries'], item) + + # sources in ext_modules or libraries may contain header files + if ('ext_modules' in new_attr or 'libraries' in new_attr) \ + and 'headers' not in new_attr: + new_attr['headers'] = [] + + # Use our custom NumpyDistribution class instead of distutils' one + new_attr['distclass'] = NumpyDistribution + + return old_setup(**new_attr) + +def _check_append_library(libraries, item): + for libitem in libraries: + if is_sequence(libitem): + if is_sequence(item): + if item[0]==libitem[0]: + if item[1] is libitem[1]: + return + warnings.warn("[0] libraries list contains %r with" + " different build_info" % (item[0],), + stacklevel=2) + break + else: + if item==libitem[0]: + warnings.warn("[1] libraries list contains %r with" + " no build_info" % (item[0],), + stacklevel=2) + break + else: + if is_sequence(item): + if item[0]==libitem: + warnings.warn("[2] libraries list contains %r with" + " no build_info" % (item[0],), + stacklevel=2) + break + else: + if item==libitem: + return + libraries.append(item) + +def _check_append_ext_library(libraries, lib_name, build_info): + for item in libraries: + if is_sequence(item): + if item[0]==lib_name: + if item[1] is build_info: + return + warnings.warn("[3] libraries list contains %r with" + " different build_info" % (lib_name,), + stacklevel=2) + break + elif item==lib_name: + warnings.warn("[4] libraries list contains %r with" + " no build_info" % (lib_name,), + stacklevel=2) + break + libraries.append((lib_name, build_info)) diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/core.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/core.pyc new file mode 100644 index 0000000..e129dfb Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/core.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/cpuinfo.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/cpuinfo.py new file mode 100644 index 0000000..5802993 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/cpuinfo.py @@ -0,0 +1,693 @@ +#!/usr/bin/env python +""" +cpuinfo + +Copyright 2002 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy (BSD style) license. See LICENSE.txt that came with +this distribution for specifics. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +Pearu Peterson + +""" +from __future__ import division, absolute_import, print_function + +__all__ = ['cpu'] + +import sys, re, types +import os + +if sys.version_info[0] >= 3: + from subprocess import getstatusoutput +else: + from commands import getstatusoutput + +import warnings +import platform + +from numpy.distutils.compat import get_exception + +def getoutput(cmd, successful_status=(0,), stacklevel=1): + try: + status, output = getstatusoutput(cmd) + except EnvironmentError: + e = get_exception() + warnings.warn(str(e), UserWarning, stacklevel=stacklevel) + return False, "" + if os.WIFEXITED(status) and os.WEXITSTATUS(status) in successful_status: + return True, output + return False, output + +def command_info(successful_status=(0,), stacklevel=1, **kw): + info = {} + for key in kw: + ok, output = getoutput(kw[key], successful_status=successful_status, + stacklevel=stacklevel+1) + if ok: + info[key] = output.strip() + return info + +def command_by_line(cmd, successful_status=(0,), stacklevel=1): + ok, output = getoutput(cmd, successful_status=successful_status, + stacklevel=stacklevel+1) + if not ok: + return + for line in output.splitlines(): + yield line.strip() + +def key_value_from_command(cmd, sep, successful_status=(0,), + stacklevel=1): + d = {} + for line in command_by_line(cmd, successful_status=successful_status, + stacklevel=stacklevel+1): + l = [s.strip() for s in line.split(sep, 1)] + if len(l) == 2: + d[l[0]] = l[1] + return d + +class CPUInfoBase(object): + """Holds CPU information and provides methods for requiring + the availability of various CPU features. + """ + + def _try_call(self, func): + try: + return func() + except Exception: + pass + + def __getattr__(self, name): + if not name.startswith('_'): + if hasattr(self, '_'+name): + attr = getattr(self, '_'+name) + if isinstance(attr, types.MethodType): + return lambda func=self._try_call,attr=attr : func(attr) + else: + return lambda : None + raise AttributeError(name) + + def _getNCPUs(self): + return 1 + + def __get_nbits(self): + abits = platform.architecture()[0] + nbits = re.compile(r'(\d+)bit').search(abits).group(1) + return nbits + + def _is_32bit(self): + return self.__get_nbits() == '32' + + def _is_64bit(self): + return self.__get_nbits() == '64' + +class LinuxCPUInfo(CPUInfoBase): + + info = None + + def __init__(self): + if self.info is not None: + return + info = [ {} ] + ok, output = getoutput('uname -m') + if ok: + info[0]['uname_m'] = output.strip() + try: + fo = open('/proc/cpuinfo') + except EnvironmentError: + e = get_exception() + warnings.warn(str(e), UserWarning, stacklevel=2) + else: + for line in fo: + name_value = [s.strip() for s in line.split(':', 1)] + if len(name_value) != 2: + continue + name, value = name_value + if not info or name in info[-1]: # next processor + info.append({}) + info[-1][name] = value + fo.close() + self.__class__.info = info + + def _not_impl(self): pass + + # Athlon + + def _is_AMD(self): + return self.info[0]['vendor_id']=='AuthenticAMD' + + def _is_AthlonK6_2(self): + return self._is_AMD() and self.info[0]['model'] == '2' + + def _is_AthlonK6_3(self): + return self._is_AMD() and self.info[0]['model'] == '3' + + def _is_AthlonK6(self): + return re.match(r'.*?AMD-K6', self.info[0]['model name']) is not None + + def _is_AthlonK7(self): + return re.match(r'.*?AMD-K7', self.info[0]['model name']) is not None + + def _is_AthlonMP(self): + return re.match(r'.*?Athlon\(tm\) MP\b', + self.info[0]['model name']) is not None + + def _is_AMD64(self): + return self.is_AMD() and self.info[0]['family'] == '15' + + def _is_Athlon64(self): + return re.match(r'.*?Athlon\(tm\) 64\b', + self.info[0]['model name']) is not None + + def _is_AthlonHX(self): + return re.match(r'.*?Athlon HX\b', + self.info[0]['model name']) is not None + + def _is_Opteron(self): + return re.match(r'.*?Opteron\b', + self.info[0]['model name']) is not None + + def _is_Hammer(self): + return re.match(r'.*?Hammer\b', + self.info[0]['model name']) is not None + + # Alpha + + def _is_Alpha(self): + return self.info[0]['cpu']=='Alpha' + + def _is_EV4(self): + return self.is_Alpha() and self.info[0]['cpu model'] == 'EV4' + + def _is_EV5(self): + return self.is_Alpha() and self.info[0]['cpu model'] == 'EV5' + + def _is_EV56(self): + return self.is_Alpha() and self.info[0]['cpu model'] == 'EV56' + + def _is_PCA56(self): + return self.is_Alpha() and self.info[0]['cpu model'] == 'PCA56' + + # Intel + + #XXX + _is_i386 = _not_impl + + def _is_Intel(self): + return self.info[0]['vendor_id']=='GenuineIntel' + + def _is_i486(self): + return self.info[0]['cpu']=='i486' + + def _is_i586(self): + return self.is_Intel() and self.info[0]['cpu family'] == '5' + + def _is_i686(self): + return self.is_Intel() and self.info[0]['cpu family'] == '6' + + def _is_Celeron(self): + return re.match(r'.*?Celeron', + self.info[0]['model name']) is not None + + def _is_Pentium(self): + return re.match(r'.*?Pentium', + self.info[0]['model name']) is not None + + def _is_PentiumII(self): + return re.match(r'.*?Pentium.*?II\b', + self.info[0]['model name']) is not None + + def _is_PentiumPro(self): + return re.match(r'.*?PentiumPro\b', + self.info[0]['model name']) is not None + + def _is_PentiumMMX(self): + return re.match(r'.*?Pentium.*?MMX\b', + self.info[0]['model name']) is not None + + def _is_PentiumIII(self): + return re.match(r'.*?Pentium.*?III\b', + self.info[0]['model name']) is not None + + def _is_PentiumIV(self): + return re.match(r'.*?Pentium.*?(IV|4)\b', + self.info[0]['model name']) is not None + + def _is_PentiumM(self): + return re.match(r'.*?Pentium.*?M\b', + self.info[0]['model name']) is not None + + def _is_Prescott(self): + return self.is_PentiumIV() and self.has_sse3() + + def _is_Nocona(self): + return self.is_Intel() \ + and (self.info[0]['cpu family'] == '6' \ + or self.info[0]['cpu family'] == '15' ) \ + and (self.has_sse3() and not self.has_ssse3())\ + and re.match(r'.*?\blm\b', self.info[0]['flags']) is not None + + def _is_Core2(self): + return self.is_64bit() and self.is_Intel() and \ + re.match(r'.*?Core\(TM\)2\b', \ + self.info[0]['model name']) is not None + + def _is_Itanium(self): + return re.match(r'.*?Itanium\b', + self.info[0]['family']) is not None + + def _is_XEON(self): + return re.match(r'.*?XEON\b', + self.info[0]['model name'], re.IGNORECASE) is not None + + _is_Xeon = _is_XEON + + # Varia + + def _is_singleCPU(self): + return len(self.info) == 1 + + def _getNCPUs(self): + return len(self.info) + + def _has_fdiv_bug(self): + return self.info[0]['fdiv_bug']=='yes' + + def _has_f00f_bug(self): + return self.info[0]['f00f_bug']=='yes' + + def _has_mmx(self): + return re.match(r'.*?\bmmx\b', self.info[0]['flags']) is not None + + def _has_sse(self): + return re.match(r'.*?\bsse\b', self.info[0]['flags']) is not None + + def _has_sse2(self): + return re.match(r'.*?\bsse2\b', self.info[0]['flags']) is not None + + def _has_sse3(self): + return re.match(r'.*?\bpni\b', self.info[0]['flags']) is not None + + def _has_ssse3(self): + return re.match(r'.*?\bssse3\b', self.info[0]['flags']) is not None + + def _has_3dnow(self): + return re.match(r'.*?\b3dnow\b', self.info[0]['flags']) is not None + + def _has_3dnowext(self): + return re.match(r'.*?\b3dnowext\b', self.info[0]['flags']) is not None + +class IRIXCPUInfo(CPUInfoBase): + info = None + + def __init__(self): + if self.info is not None: + return + info = key_value_from_command('sysconf', sep=' ', + successful_status=(0, 1)) + self.__class__.info = info + + def _not_impl(self): pass + + def _is_singleCPU(self): + return self.info.get('NUM_PROCESSORS') == '1' + + def _getNCPUs(self): + return int(self.info.get('NUM_PROCESSORS', 1)) + + def __cputype(self, n): + return self.info.get('PROCESSORS').split()[0].lower() == 'r%s' % (n) + def _is_r2000(self): return self.__cputype(2000) + def _is_r3000(self): return self.__cputype(3000) + def _is_r3900(self): return self.__cputype(3900) + def _is_r4000(self): return self.__cputype(4000) + def _is_r4100(self): return self.__cputype(4100) + def _is_r4300(self): return self.__cputype(4300) + def _is_r4400(self): return self.__cputype(4400) + def _is_r4600(self): return self.__cputype(4600) + def _is_r4650(self): return self.__cputype(4650) + def _is_r5000(self): return self.__cputype(5000) + def _is_r6000(self): return self.__cputype(6000) + def _is_r8000(self): return self.__cputype(8000) + def _is_r10000(self): return self.__cputype(10000) + def _is_r12000(self): return self.__cputype(12000) + def _is_rorion(self): return self.__cputype('orion') + + def get_ip(self): + try: return self.info.get('MACHINE') + except Exception: pass + def __machine(self, n): + return self.info.get('MACHINE').lower() == 'ip%s' % (n) + def _is_IP19(self): return self.__machine(19) + def _is_IP20(self): return self.__machine(20) + def _is_IP21(self): return self.__machine(21) + def _is_IP22(self): return self.__machine(22) + def _is_IP22_4k(self): return self.__machine(22) and self._is_r4000() + def _is_IP22_5k(self): return self.__machine(22) and self._is_r5000() + def _is_IP24(self): return self.__machine(24) + def _is_IP25(self): return self.__machine(25) + def _is_IP26(self): return self.__machine(26) + def _is_IP27(self): return self.__machine(27) + def _is_IP28(self): return self.__machine(28) + def _is_IP30(self): return self.__machine(30) + def _is_IP32(self): return self.__machine(32) + def _is_IP32_5k(self): return self.__machine(32) and self._is_r5000() + def _is_IP32_10k(self): return self.__machine(32) and self._is_r10000() + + +class DarwinCPUInfo(CPUInfoBase): + info = None + + def __init__(self): + if self.info is not None: + return + info = command_info(arch='arch', + machine='machine') + info['sysctl_hw'] = key_value_from_command('sysctl hw', sep='=') + self.__class__.info = info + + def _not_impl(self): pass + + def _getNCPUs(self): + return int(self.info['sysctl_hw'].get('hw.ncpu', 1)) + + def _is_Power_Macintosh(self): + return self.info['sysctl_hw']['hw.machine']=='Power Macintosh' + + def _is_i386(self): + return self.info['arch']=='i386' + def _is_ppc(self): + return self.info['arch']=='ppc' + + def __machine(self, n): + return self.info['machine'] == 'ppc%s'%n + def _is_ppc601(self): return self.__machine(601) + def _is_ppc602(self): return self.__machine(602) + def _is_ppc603(self): return self.__machine(603) + def _is_ppc603e(self): return self.__machine('603e') + def _is_ppc604(self): return self.__machine(604) + def _is_ppc604e(self): return self.__machine('604e') + def _is_ppc620(self): return self.__machine(620) + def _is_ppc630(self): return self.__machine(630) + def _is_ppc740(self): return self.__machine(740) + def _is_ppc7400(self): return self.__machine(7400) + def _is_ppc7450(self): return self.__machine(7450) + def _is_ppc750(self): return self.__machine(750) + def _is_ppc403(self): return self.__machine(403) + def _is_ppc505(self): return self.__machine(505) + def _is_ppc801(self): return self.__machine(801) + def _is_ppc821(self): return self.__machine(821) + def _is_ppc823(self): return self.__machine(823) + def _is_ppc860(self): return self.__machine(860) + + +class SunOSCPUInfo(CPUInfoBase): + + info = None + + def __init__(self): + if self.info is not None: + return + info = command_info(arch='arch', + mach='mach', + uname_i='uname_i', + isainfo_b='isainfo -b', + isainfo_n='isainfo -n', + ) + info['uname_X'] = key_value_from_command('uname -X', sep='=') + for line in command_by_line('psrinfo -v 0'): + m = re.match(r'\s*The (?P

    [\w\d]+) processor operates at', line) + if m: + info['processor'] = m.group('p') + break + self.__class__.info = info + + def _not_impl(self): pass + + def _is_i386(self): + return self.info['isainfo_n']=='i386' + def _is_sparc(self): + return self.info['isainfo_n']=='sparc' + def _is_sparcv9(self): + return self.info['isainfo_n']=='sparcv9' + + def _getNCPUs(self): + return int(self.info['uname_X'].get('NumCPU', 1)) + + def _is_sun4(self): + return self.info['arch']=='sun4' + + def _is_SUNW(self): + return re.match(r'SUNW', self.info['uname_i']) is not None + def _is_sparcstation5(self): + return re.match(r'.*SPARCstation-5', self.info['uname_i']) is not None + def _is_ultra1(self): + return re.match(r'.*Ultra-1', self.info['uname_i']) is not None + def _is_ultra250(self): + return re.match(r'.*Ultra-250', self.info['uname_i']) is not None + def _is_ultra2(self): + return re.match(r'.*Ultra-2', self.info['uname_i']) is not None + def _is_ultra30(self): + return re.match(r'.*Ultra-30', self.info['uname_i']) is not None + def _is_ultra4(self): + return re.match(r'.*Ultra-4', self.info['uname_i']) is not None + def _is_ultra5_10(self): + return re.match(r'.*Ultra-5_10', self.info['uname_i']) is not None + def _is_ultra5(self): + return re.match(r'.*Ultra-5', self.info['uname_i']) is not None + def _is_ultra60(self): + return re.match(r'.*Ultra-60', self.info['uname_i']) is not None + def _is_ultra80(self): + return re.match(r'.*Ultra-80', self.info['uname_i']) is not None + def _is_ultraenterprice(self): + return re.match(r'.*Ultra-Enterprise', self.info['uname_i']) is not None + def _is_ultraenterprice10k(self): + return re.match(r'.*Ultra-Enterprise-10000', self.info['uname_i']) is not None + def _is_sunfire(self): + return re.match(r'.*Sun-Fire', self.info['uname_i']) is not None + def _is_ultra(self): + return re.match(r'.*Ultra', self.info['uname_i']) is not None + + def _is_cpusparcv7(self): + return self.info['processor']=='sparcv7' + def _is_cpusparcv8(self): + return self.info['processor']=='sparcv8' + def _is_cpusparcv9(self): + return self.info['processor']=='sparcv9' + +class Win32CPUInfo(CPUInfoBase): + + info = None + pkey = r"HARDWARE\DESCRIPTION\System\CentralProcessor" + # XXX: what does the value of + # HKEY_LOCAL_MACHINE\HARDWARE\DESCRIPTION\System\CentralProcessor\0 + # mean? + + def __init__(self): + if self.info is not None: + return + info = [] + try: + #XXX: Bad style to use so long `try:...except:...`. Fix it! + if sys.version_info[0] >= 3: + import winreg + else: + import _winreg as winreg + + prgx = re.compile(r"family\s+(?P\d+)\s+model\s+(?P\d+)" + r"\s+stepping\s+(?P\d+)", re.IGNORECASE) + chnd=winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, self.pkey) + pnum=0 + while True: + try: + proc=winreg.EnumKey(chnd, pnum) + except winreg.error: + break + else: + pnum+=1 + info.append({"Processor":proc}) + phnd=winreg.OpenKey(chnd, proc) + pidx=0 + while True: + try: + name, value, vtpe=winreg.EnumValue(phnd, pidx) + except winreg.error: + break + else: + pidx=pidx+1 + info[-1][name]=value + if name=="Identifier": + srch=prgx.search(value) + if srch: + info[-1]["Family"]=int(srch.group("FML")) + info[-1]["Model"]=int(srch.group("MDL")) + info[-1]["Stepping"]=int(srch.group("STP")) + except Exception: + print(sys.exc_info()[1], '(ignoring)') + self.__class__.info = info + + def _not_impl(self): pass + + # Athlon + + def _is_AMD(self): + return self.info[0]['VendorIdentifier']=='AuthenticAMD' + + def _is_Am486(self): + return self.is_AMD() and self.info[0]['Family']==4 + + def _is_Am5x86(self): + return self.is_AMD() and self.info[0]['Family']==4 + + def _is_AMDK5(self): + return self.is_AMD() and self.info[0]['Family']==5 \ + and self.info[0]['Model'] in [0, 1, 2, 3] + + def _is_AMDK6(self): + return self.is_AMD() and self.info[0]['Family']==5 \ + and self.info[0]['Model'] in [6, 7] + + def _is_AMDK6_2(self): + return self.is_AMD() and self.info[0]['Family']==5 \ + and self.info[0]['Model']==8 + + def _is_AMDK6_3(self): + return self.is_AMD() and self.info[0]['Family']==5 \ + and self.info[0]['Model']==9 + + def _is_AMDK7(self): + return self.is_AMD() and self.info[0]['Family'] == 6 + + # To reliably distinguish between the different types of AMD64 chips + # (Athlon64, Operton, Athlon64 X2, Semperon, Turion 64, etc.) would + # require looking at the 'brand' from cpuid + + def _is_AMD64(self): + return self.is_AMD() and self.info[0]['Family'] == 15 + + # Intel + + def _is_Intel(self): + return self.info[0]['VendorIdentifier']=='GenuineIntel' + + def _is_i386(self): + return self.info[0]['Family']==3 + + def _is_i486(self): + return self.info[0]['Family']==4 + + def _is_i586(self): + return self.is_Intel() and self.info[0]['Family']==5 + + def _is_i686(self): + return self.is_Intel() and self.info[0]['Family']==6 + + def _is_Pentium(self): + return self.is_Intel() and self.info[0]['Family']==5 + + def _is_PentiumMMX(self): + return self.is_Intel() and self.info[0]['Family']==5 \ + and self.info[0]['Model']==4 + + def _is_PentiumPro(self): + return self.is_Intel() and self.info[0]['Family']==6 \ + and self.info[0]['Model']==1 + + def _is_PentiumII(self): + return self.is_Intel() and self.info[0]['Family']==6 \ + and self.info[0]['Model'] in [3, 5, 6] + + def _is_PentiumIII(self): + return self.is_Intel() and self.info[0]['Family']==6 \ + and self.info[0]['Model'] in [7, 8, 9, 10, 11] + + def _is_PentiumIV(self): + return self.is_Intel() and self.info[0]['Family']==15 + + def _is_PentiumM(self): + return self.is_Intel() and self.info[0]['Family'] == 6 \ + and self.info[0]['Model'] in [9, 13, 14] + + def _is_Core2(self): + return self.is_Intel() and self.info[0]['Family'] == 6 \ + and self.info[0]['Model'] in [15, 16, 17] + + # Varia + + def _is_singleCPU(self): + return len(self.info) == 1 + + def _getNCPUs(self): + return len(self.info) + + def _has_mmx(self): + if self.is_Intel(): + return (self.info[0]['Family']==5 and self.info[0]['Model']==4) \ + or (self.info[0]['Family'] in [6, 15]) + elif self.is_AMD(): + return self.info[0]['Family'] in [5, 6, 15] + else: + return False + + def _has_sse(self): + if self.is_Intel(): + return (self.info[0]['Family']==6 and \ + self.info[0]['Model'] in [7, 8, 9, 10, 11]) \ + or self.info[0]['Family']==15 + elif self.is_AMD(): + return (self.info[0]['Family']==6 and \ + self.info[0]['Model'] in [6, 7, 8, 10]) \ + or self.info[0]['Family']==15 + else: + return False + + def _has_sse2(self): + if self.is_Intel(): + return self.is_Pentium4() or self.is_PentiumM() \ + or self.is_Core2() + elif self.is_AMD(): + return self.is_AMD64() + else: + return False + + def _has_3dnow(self): + return self.is_AMD() and self.info[0]['Family'] in [5, 6, 15] + + def _has_3dnowext(self): + return self.is_AMD() and self.info[0]['Family'] in [6, 15] + +if sys.platform.startswith('linux'): # variations: linux2,linux-i386 (any others?) + cpuinfo = LinuxCPUInfo +elif sys.platform.startswith('irix'): + cpuinfo = IRIXCPUInfo +elif sys.platform == 'darwin': + cpuinfo = DarwinCPUInfo +elif sys.platform.startswith('sunos'): + cpuinfo = SunOSCPUInfo +elif sys.platform.startswith('win32'): + cpuinfo = Win32CPUInfo +elif sys.platform.startswith('cygwin'): + cpuinfo = LinuxCPUInfo +#XXX: other OS's. Eg. use _winreg on Win32. Or os.uname on unices. +else: + cpuinfo = CPUInfoBase + +cpu = cpuinfo() + +#if __name__ == "__main__": +# +# cpu.is_blaa() +# cpu.is_Intel() +# cpu.is_Alpha() +# +# print('CPU information:'), +# for name in dir(cpuinfo): +# if name[0]=='_' and name[1]!='_': +# r = getattr(cpu,name[1:])() +# if r: +# if r!=1: +# print('%s=%s' %(name[1:],r)) +# else: +# print(name[1:]), +# print() diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/cpuinfo.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/cpuinfo.pyc new file mode 100644 index 0000000..96e410e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/cpuinfo.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/exec_command.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/exec_command.py new file mode 100644 index 0000000..ede347b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/exec_command.py @@ -0,0 +1,333 @@ +""" +exec_command + +Implements exec_command function that is (almost) equivalent to +commands.getstatusoutput function but on NT, DOS systems the +returned status is actually correct (though, the returned status +values may be different by a factor). In addition, exec_command +takes keyword arguments for (re-)defining environment variables. + +Provides functions: + + exec_command --- execute command in a specified directory and + in the modified environment. + find_executable --- locate a command using info from environment + variable PATH. Equivalent to posix `which` + command. + +Author: Pearu Peterson +Created: 11 January 2003 + +Requires: Python 2.x + +Successfully tested on: + +======== ============ ================================================= +os.name sys.platform comments +======== ============ ================================================= +posix linux2 Debian (sid) Linux, Python 2.1.3+, 2.2.3+, 2.3.3 + PyCrust 0.9.3, Idle 1.0.2 +posix linux2 Red Hat 9 Linux, Python 2.1.3, 2.2.2, 2.3.2 +posix sunos5 SunOS 5.9, Python 2.2, 2.3.2 +posix darwin Darwin 7.2.0, Python 2.3 +nt win32 Windows Me + Python 2.3(EE), Idle 1.0, PyCrust 0.7.2 + Python 2.1.1 Idle 0.8 +nt win32 Windows 98, Python 2.1.1. Idle 0.8 +nt win32 Cygwin 98-4.10, Python 2.1.1(MSC) - echo tests + fail i.e. redefining environment variables may + not work. FIXED: don't use cygwin echo! + Comment: also `cmd /c echo` will not work + but redefining environment variables do work. +posix cygwin Cygwin 98-4.10, Python 2.3.3(cygming special) +nt win32 Windows XP, Python 2.3.3 +======== ============ ================================================= + +Known bugs: + +* Tests, that send messages to stderr, fail when executed from MSYS prompt + because the messages are lost at some point. + +""" +from __future__ import division, absolute_import, print_function + +__all__ = ['exec_command', 'find_executable'] + +import os +import sys +import subprocess +import locale + +from numpy.distutils.misc_util import is_sequence, make_temp_file +from numpy.distutils import log + +def filepath_from_subprocess_output(output): + """ + Convert `bytes` in the encoding used by a subprocess into a filesystem-appropriate `str`. + + Inherited from `exec_command`, and possibly incorrect. + """ + mylocale = locale.getpreferredencoding(False) + if mylocale is None: + mylocale = 'ascii' + output = output.decode(mylocale, errors='replace') + output = output.replace('\r\n', '\n') + # Another historical oddity + if output[-1:] == '\n': + output = output[:-1] + # stdio uses bytes in python 2, so to avoid issues, we simply + # remove all non-ascii characters + if sys.version_info < (3, 0): + output = output.encode('ascii', errors='replace') + return output + + +def forward_bytes_to_stdout(val): + """ + Forward bytes from a subprocess call to the console, without attempting to + decode them. + + The assumption is that the subprocess call already returned bytes in + a suitable encoding. + """ + if sys.version_info.major < 3: + # python 2 has binary output anyway + sys.stdout.write(val) + elif hasattr(sys.stdout, 'buffer'): + # use the underlying binary output if there is one + sys.stdout.buffer.write(val) + elif hasattr(sys.stdout, 'encoding'): + # round-trip the encoding if necessary + sys.stdout.write(val.decode(sys.stdout.encoding)) + else: + # make a best-guess at the encoding + sys.stdout.write(val.decode('utf8', errors='replace')) + + +def temp_file_name(): + fo, name = make_temp_file() + fo.close() + return name + +def get_pythonexe(): + pythonexe = sys.executable + if os.name in ['nt', 'dos']: + fdir, fn = os.path.split(pythonexe) + fn = fn.upper().replace('PYTHONW', 'PYTHON') + pythonexe = os.path.join(fdir, fn) + assert os.path.isfile(pythonexe), '%r is not a file' % (pythonexe,) + return pythonexe + +def find_executable(exe, path=None, _cache={}): + """Return full path of a executable or None. + + Symbolic links are not followed. + """ + key = exe, path + try: + return _cache[key] + except KeyError: + pass + log.debug('find_executable(%r)' % exe) + orig_exe = exe + + if path is None: + path = os.environ.get('PATH', os.defpath) + if os.name=='posix': + realpath = os.path.realpath + else: + realpath = lambda a:a + + if exe.startswith('"'): + exe = exe[1:-1] + + suffixes = [''] + if os.name in ['nt', 'dos', 'os2']: + fn, ext = os.path.splitext(exe) + extra_suffixes = ['.exe', '.com', '.bat'] + if ext.lower() not in extra_suffixes: + suffixes = extra_suffixes + + if os.path.isabs(exe): + paths = [''] + else: + paths = [ os.path.abspath(p) for p in path.split(os.pathsep) ] + + for path in paths: + fn = os.path.join(path, exe) + for s in suffixes: + f_ext = fn+s + if not os.path.islink(f_ext): + f_ext = realpath(f_ext) + if os.path.isfile(f_ext) and os.access(f_ext, os.X_OK): + log.info('Found executable %s' % f_ext) + _cache[key] = f_ext + return f_ext + + log.warn('Could not locate executable %s' % orig_exe) + return None + +############################################################ + +def _preserve_environment( names ): + log.debug('_preserve_environment(%r)' % (names)) + env = {name: os.environ.get(name) for name in names} + return env + +def _update_environment( **env ): + log.debug('_update_environment(...)') + for name, value in env.items(): + os.environ[name] = value or '' + +def _supports_fileno(stream): + """ + Returns True if 'stream' supports the file descriptor and allows fileno(). + """ + if hasattr(stream, 'fileno'): + try: + stream.fileno() + return True + except IOError: + return False + else: + return False + +def exec_command(command, execute_in='', use_shell=None, use_tee=None, + _with_python = 1, **env ): + """ + Return (status,output) of executed command. + + Parameters + ---------- + command : str + A concatenated string of executable and arguments. + execute_in : str + Before running command ``cd execute_in`` and after ``cd -``. + use_shell : {bool, None}, optional + If True, execute ``sh -c command``. Default None (True) + use_tee : {bool, None}, optional + If True use tee. Default None (True) + + + Returns + ------- + res : str + Both stdout and stderr messages. + + Notes + ----- + On NT, DOS systems the returned status is correct for external commands. + Wild cards will not work for non-posix systems or when use_shell=0. + + """ + log.debug('exec_command(%r,%s)' % (command,\ + ','.join(['%s=%r'%kv for kv in env.items()]))) + + if use_tee is None: + use_tee = os.name=='posix' + if use_shell is None: + use_shell = os.name=='posix' + execute_in = os.path.abspath(execute_in) + oldcwd = os.path.abspath(os.getcwd()) + + if __name__[-12:] == 'exec_command': + exec_dir = os.path.dirname(os.path.abspath(__file__)) + elif os.path.isfile('exec_command.py'): + exec_dir = os.path.abspath('.') + else: + exec_dir = os.path.abspath(sys.argv[0]) + if os.path.isfile(exec_dir): + exec_dir = os.path.dirname(exec_dir) + + if oldcwd!=execute_in: + os.chdir(execute_in) + log.debug('New cwd: %s' % execute_in) + else: + log.debug('Retaining cwd: %s' % oldcwd) + + oldenv = _preserve_environment( list(env.keys()) ) + _update_environment( **env ) + + try: + st = _exec_command(command, + use_shell=use_shell, + use_tee=use_tee, + **env) + finally: + if oldcwd!=execute_in: + os.chdir(oldcwd) + log.debug('Restored cwd to %s' % oldcwd) + _update_environment(**oldenv) + + return st + + +def _exec_command(command, use_shell=None, use_tee = None, **env): + """ + Internal workhorse for exec_command(). + """ + if use_shell is None: + use_shell = os.name=='posix' + if use_tee is None: + use_tee = os.name=='posix' + + if os.name == 'posix' and use_shell: + # On POSIX, subprocess always uses /bin/sh, override + sh = os.environ.get('SHELL', '/bin/sh') + if is_sequence(command): + command = [sh, '-c', ' '.join(command)] + else: + command = [sh, '-c', command] + use_shell = False + + elif os.name == 'nt' and is_sequence(command): + # On Windows, join the string for CreateProcess() ourselves as + # subprocess does it a bit differently + command = ' '.join(_quote_arg(arg) for arg in command) + + # Inherit environment by default + env = env or None + try: + # universal_newlines is set to False so that communicate() + # will return bytes. We need to decode the output ourselves + # so that Python will not raise a UnicodeDecodeError when + # it encounters an invalid character; rather, we simply replace it + proc = subprocess.Popen(command, shell=use_shell, env=env, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + universal_newlines=False) + except EnvironmentError: + # Return 127, as os.spawn*() and /bin/sh do + return 127, '' + + text, err = proc.communicate() + mylocale = locale.getpreferredencoding(False) + if mylocale is None: + mylocale = 'ascii' + text = text.decode(mylocale, errors='replace') + text = text.replace('\r\n', '\n') + # Another historical oddity + if text[-1:] == '\n': + text = text[:-1] + + # stdio uses bytes in python 2, so to avoid issues, we simply + # remove all non-ascii characters + if sys.version_info < (3, 0): + text = text.encode('ascii', errors='replace') + + if use_tee and text: + print(text) + return proc.returncode, text + + +def _quote_arg(arg): + """ + Quote the argument for safe use in a shell command line. + """ + # If there is a quote in the string, assume relevants parts of the + # string are already quoted (e.g. '-I"C:\\Program Files\\..."') + if '"' not in arg and ' ' in arg: + return '"%s"' % arg + return arg + +############################################################ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/exec_command.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/exec_command.pyc new file mode 100644 index 0000000..719e4b5 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/exec_command.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/extension.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/extension.py new file mode 100644 index 0000000..935f3ee --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/extension.py @@ -0,0 +1,93 @@ +"""distutils.extension + +Provides the Extension class, used to describe C/C++ extension +modules in setup scripts. + +Overridden to support f2py. + +""" +from __future__ import division, absolute_import, print_function + +import sys +import re +from distutils.extension import Extension as old_Extension + +if sys.version_info[0] >= 3: + basestring = str + + +cxx_ext_re = re.compile(r'.*[.](cpp|cxx|cc)\Z', re.I).match +fortran_pyf_ext_re = re.compile(r'.*[.](f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match + +class Extension(old_Extension): + def __init__ ( + self, name, sources, + include_dirs=None, + define_macros=None, + undef_macros=None, + library_dirs=None, + libraries=None, + runtime_library_dirs=None, + extra_objects=None, + extra_compile_args=None, + extra_link_args=None, + export_symbols=None, + swig_opts=None, + depends=None, + language=None, + f2py_options=None, + module_dirs=None, + extra_f77_compile_args=None, + extra_f90_compile_args=None,): + + old_Extension.__init__( + self, name, [], + include_dirs=include_dirs, + define_macros=define_macros, + undef_macros=undef_macros, + library_dirs=library_dirs, + libraries=libraries, + runtime_library_dirs=runtime_library_dirs, + extra_objects=extra_objects, + extra_compile_args=extra_compile_args, + extra_link_args=extra_link_args, + export_symbols=export_symbols) + + # Avoid assert statements checking that sources contains strings: + self.sources = sources + + # Python 2.4 distutils new features + self.swig_opts = swig_opts or [] + # swig_opts is assumed to be a list. Here we handle the case where it + # is specified as a string instead. + if isinstance(self.swig_opts, basestring): + import warnings + msg = "swig_opts is specified as a string instead of a list" + warnings.warn(msg, SyntaxWarning, stacklevel=2) + self.swig_opts = self.swig_opts.split() + + # Python 2.3 distutils new features + self.depends = depends or [] + self.language = language + + # numpy_distutils features + self.f2py_options = f2py_options or [] + self.module_dirs = module_dirs or [] + self.extra_f77_compile_args = extra_f77_compile_args or [] + self.extra_f90_compile_args = extra_f90_compile_args or [] + + return + + def has_cxx_sources(self): + for source in self.sources: + if cxx_ext_re(str(source)): + return True + return False + + def has_f2py_sources(self): + for source in self.sources: + if fortran_pyf_ext_re(source): + return True + return False + +# class Extension diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/extension.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/extension.pyc new file mode 100644 index 0000000..be66d9a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/extension.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/__init__.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/__init__.py new file mode 100644 index 0000000..bd3739a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/__init__.py @@ -0,0 +1,1032 @@ +"""numpy.distutils.fcompiler + +Contains FCompiler, an abstract base class that defines the interface +for the numpy.distutils Fortran compiler abstraction model. + +Terminology: + +To be consistent, where the term 'executable' is used, it means the single +file, like 'gcc', that is executed, and should be a string. In contrast, +'command' means the entire command line, like ['gcc', '-c', 'file.c'], and +should be a list. + +But note that FCompiler.executables is actually a dictionary of commands. + +""" +from __future__ import division, absolute_import, print_function + +__all__ = ['FCompiler', 'new_fcompiler', 'show_fcompilers', + 'dummy_fortran_file'] + +import os +import sys +import re +import types + +from numpy.compat import open_latin1 + +from distutils.sysconfig import get_python_lib +from distutils.fancy_getopt import FancyGetopt +from distutils.errors import DistutilsModuleError, \ + DistutilsExecError, CompileError, LinkError, DistutilsPlatformError +from distutils.util import split_quoted, strtobool + +from numpy.distutils.ccompiler import CCompiler, gen_lib_options +from numpy.distutils import log +from numpy.distutils.misc_util import is_string, all_strings, is_sequence, \ + make_temp_file, get_shared_lib_extension +from numpy.distutils.exec_command import find_executable +from numpy.distutils.compat import get_exception +from numpy.distutils import _shell_utils + +from .environment import EnvironmentConfig + +__metaclass__ = type + +class CompilerNotFound(Exception): + pass + +def flaglist(s): + if is_string(s): + return split_quoted(s) + else: + return s + +def str2bool(s): + if is_string(s): + return strtobool(s) + return bool(s) + +def is_sequence_of_strings(seq): + return is_sequence(seq) and all_strings(seq) + +class FCompiler(CCompiler): + """Abstract base class to define the interface that must be implemented + by real Fortran compiler classes. + + Methods that subclasses may redefine: + + update_executables(), find_executables(), get_version() + get_flags(), get_flags_opt(), get_flags_arch(), get_flags_debug() + get_flags_f77(), get_flags_opt_f77(), get_flags_arch_f77(), + get_flags_debug_f77(), get_flags_f90(), get_flags_opt_f90(), + get_flags_arch_f90(), get_flags_debug_f90(), + get_flags_fix(), get_flags_linker_so() + + DON'T call these methods (except get_version) after + constructing a compiler instance or inside any other method. + All methods, except update_executables() and find_executables(), + may call the get_version() method. + + After constructing a compiler instance, always call customize(dist=None) + method that finalizes compiler construction and makes the following + attributes available: + compiler_f77 + compiler_f90 + compiler_fix + linker_so + archiver + ranlib + libraries + library_dirs + """ + + # These are the environment variables and distutils keys used. + # Each configuration description is + # (, , , , ) + # The hook names are handled by the self._environment_hook method. + # - names starting with 'self.' call methods in this class + # - names starting with 'exe.' return the key in the executables dict + # - names like 'flags.YYY' return self.get_flag_YYY() + # convert is either None or a function to convert a string to the + # appropriate type used. + + distutils_vars = EnvironmentConfig( + distutils_section='config_fc', + noopt = (None, None, 'noopt', str2bool, False), + noarch = (None, None, 'noarch', str2bool, False), + debug = (None, None, 'debug', str2bool, False), + verbose = (None, None, 'verbose', str2bool, False), + ) + + command_vars = EnvironmentConfig( + distutils_section='config_fc', + compiler_f77 = ('exe.compiler_f77', 'F77', 'f77exec', None, False), + compiler_f90 = ('exe.compiler_f90', 'F90', 'f90exec', None, False), + compiler_fix = ('exe.compiler_fix', 'F90', 'f90exec', None, False), + version_cmd = ('exe.version_cmd', None, None, None, False), + linker_so = ('exe.linker_so', 'LDSHARED', 'ldshared', None, False), + linker_exe = ('exe.linker_exe', 'LD', 'ld', None, False), + archiver = (None, 'AR', 'ar', None, False), + ranlib = (None, 'RANLIB', 'ranlib', None, False), + ) + + flag_vars = EnvironmentConfig( + distutils_section='config_fc', + f77 = ('flags.f77', 'F77FLAGS', 'f77flags', flaglist, True), + f90 = ('flags.f90', 'F90FLAGS', 'f90flags', flaglist, True), + free = ('flags.free', 'FREEFLAGS', 'freeflags', flaglist, True), + fix = ('flags.fix', None, None, flaglist, False), + opt = ('flags.opt', 'FOPT', 'opt', flaglist, True), + opt_f77 = ('flags.opt_f77', None, None, flaglist, False), + opt_f90 = ('flags.opt_f90', None, None, flaglist, False), + arch = ('flags.arch', 'FARCH', 'arch', flaglist, False), + arch_f77 = ('flags.arch_f77', None, None, flaglist, False), + arch_f90 = ('flags.arch_f90', None, None, flaglist, False), + debug = ('flags.debug', 'FDEBUG', 'fdebug', flaglist, True), + debug_f77 = ('flags.debug_f77', None, None, flaglist, False), + debug_f90 = ('flags.debug_f90', None, None, flaglist, False), + flags = ('self.get_flags', 'FFLAGS', 'fflags', flaglist, True), + linker_so = ('flags.linker_so', 'LDFLAGS', 'ldflags', flaglist, True), + linker_exe = ('flags.linker_exe', 'LDFLAGS', 'ldflags', flaglist, True), + ar = ('flags.ar', 'ARFLAGS', 'arflags', flaglist, True), + ) + + language_map = {'.f': 'f77', + '.for': 'f77', + '.F': 'f77', # XXX: needs preprocessor + '.ftn': 'f77', + '.f77': 'f77', + '.f90': 'f90', + '.F90': 'f90', # XXX: needs preprocessor + '.f95': 'f90', + } + language_order = ['f90', 'f77'] + + + # These will be set by the subclass + + compiler_type = None + compiler_aliases = () + version_pattern = None + + possible_executables = [] + executables = { + 'version_cmd': ["f77", "-v"], + 'compiler_f77': ["f77"], + 'compiler_f90': ["f90"], + 'compiler_fix': ["f90", "-fixed"], + 'linker_so': ["f90", "-shared"], + 'linker_exe': ["f90"], + 'archiver': ["ar", "-cr"], + 'ranlib': None, + } + + # If compiler does not support compiling Fortran 90 then it can + # suggest using another compiler. For example, gnu would suggest + # gnu95 compiler type when there are F90 sources. + suggested_f90_compiler = None + + compile_switch = "-c" + object_switch = "-o " # Ending space matters! It will be stripped + # but if it is missing then object_switch + # will be prefixed to object file name by + # string concatenation. + library_switch = "-o " # Ditto! + + # Switch to specify where module files are created and searched + # for USE statement. Normally it is a string and also here ending + # space matters. See above. + module_dir_switch = None + + # Switch to specify where module files are searched for USE statement. + module_include_switch = '-I' + + pic_flags = [] # Flags to create position-independent code + + src_extensions = ['.for', '.ftn', '.f77', '.f', '.f90', '.f95', '.F', '.F90', '.FOR'] + obj_extension = ".o" + + shared_lib_extension = get_shared_lib_extension() + static_lib_extension = ".a" # or .lib + static_lib_format = "lib%s%s" # or %s%s + shared_lib_format = "%s%s" + exe_extension = "" + + _exe_cache = {} + + _executable_keys = ['version_cmd', 'compiler_f77', 'compiler_f90', + 'compiler_fix', 'linker_so', 'linker_exe', 'archiver', + 'ranlib'] + + # This will be set by new_fcompiler when called in + # command/{build_ext.py, build_clib.py, config.py} files. + c_compiler = None + + # extra_{f77,f90}_compile_args are set by build_ext.build_extension method + extra_f77_compile_args = [] + extra_f90_compile_args = [] + + def __init__(self, *args, **kw): + CCompiler.__init__(self, *args, **kw) + self.distutils_vars = self.distutils_vars.clone(self._environment_hook) + self.command_vars = self.command_vars.clone(self._environment_hook) + self.flag_vars = self.flag_vars.clone(self._environment_hook) + self.executables = self.executables.copy() + for e in self._executable_keys: + if e not in self.executables: + self.executables[e] = None + + # Some methods depend on .customize() being called first, so + # this keeps track of whether that's happened yet. + self._is_customised = False + + def __copy__(self): + obj = self.__new__(self.__class__) + obj.__dict__.update(self.__dict__) + obj.distutils_vars = obj.distutils_vars.clone(obj._environment_hook) + obj.command_vars = obj.command_vars.clone(obj._environment_hook) + obj.flag_vars = obj.flag_vars.clone(obj._environment_hook) + obj.executables = obj.executables.copy() + return obj + + def copy(self): + return self.__copy__() + + # Use properties for the attributes used by CCompiler. Setting them + # as attributes from the self.executables dictionary is error-prone, + # so we get them from there each time. + def _command_property(key): + def fget(self): + assert self._is_customised + return self.executables[key] + return property(fget=fget) + version_cmd = _command_property('version_cmd') + compiler_f77 = _command_property('compiler_f77') + compiler_f90 = _command_property('compiler_f90') + compiler_fix = _command_property('compiler_fix') + linker_so = _command_property('linker_so') + linker_exe = _command_property('linker_exe') + archiver = _command_property('archiver') + ranlib = _command_property('ranlib') + + # Make our terminology consistent. + def set_executable(self, key, value): + self.set_command(key, value) + + def set_commands(self, **kw): + for k, v in kw.items(): + self.set_command(k, v) + + def set_command(self, key, value): + if not key in self._executable_keys: + raise ValueError( + "unknown executable '%s' for class %s" % + (key, self.__class__.__name__)) + if is_string(value): + value = split_quoted(value) + assert value is None or is_sequence_of_strings(value[1:]), (key, value) + self.executables[key] = value + + ###################################################################### + ## Methods that subclasses may redefine. But don't call these methods! + ## They are private to FCompiler class and may return unexpected + ## results if used elsewhere. So, you have been warned.. + + def find_executables(self): + """Go through the self.executables dictionary, and attempt to + find and assign appropriate executables. + + Executable names are looked for in the environment (environment + variables, the distutils.cfg, and command line), the 0th-element of + the command list, and the self.possible_executables list. + + Also, if the 0th element is "" or "", the Fortran 77 + or the Fortran 90 compiler executable is used, unless overridden + by an environment setting. + + Subclasses should call this if overridden. + """ + assert self._is_customised + exe_cache = self._exe_cache + def cached_find_executable(exe): + if exe in exe_cache: + return exe_cache[exe] + fc_exe = find_executable(exe) + exe_cache[exe] = exe_cache[fc_exe] = fc_exe + return fc_exe + def verify_command_form(name, value): + if value is not None and not is_sequence_of_strings(value): + raise ValueError( + "%s value %r is invalid in class %s" % + (name, value, self.__class__.__name__)) + def set_exe(exe_key, f77=None, f90=None): + cmd = self.executables.get(exe_key, None) + if not cmd: + return None + # Note that we get cmd[0] here if the environment doesn't + # have anything set + exe_from_environ = getattr(self.command_vars, exe_key) + if not exe_from_environ: + possibles = [f90, f77] + self.possible_executables + else: + possibles = [exe_from_environ] + self.possible_executables + + seen = set() + unique_possibles = [] + for e in possibles: + if e == '': + e = f77 + elif e == '': + e = f90 + if not e or e in seen: + continue + seen.add(e) + unique_possibles.append(e) + + for exe in unique_possibles: + fc_exe = cached_find_executable(exe) + if fc_exe: + cmd[0] = fc_exe + return fc_exe + self.set_command(exe_key, None) + return None + + ctype = self.compiler_type + f90 = set_exe('compiler_f90') + if not f90: + f77 = set_exe('compiler_f77') + if f77: + log.warn('%s: no Fortran 90 compiler found' % ctype) + else: + raise CompilerNotFound('%s: f90 nor f77' % ctype) + else: + f77 = set_exe('compiler_f77', f90=f90) + if not f77: + log.warn('%s: no Fortran 77 compiler found' % ctype) + set_exe('compiler_fix', f90=f90) + + set_exe('linker_so', f77=f77, f90=f90) + set_exe('linker_exe', f77=f77, f90=f90) + set_exe('version_cmd', f77=f77, f90=f90) + set_exe('archiver') + set_exe('ranlib') + + def update_executables(elf): + """Called at the beginning of customisation. Subclasses should + override this if they need to set up the executables dictionary. + + Note that self.find_executables() is run afterwards, so the + self.executables dictionary values can contain or as + the command, which will be replaced by the found F77 or F90 + compiler. + """ + pass + + def get_flags(self): + """List of flags common to all compiler types.""" + return [] + self.pic_flags + + def _get_command_flags(self, key): + cmd = self.executables.get(key, None) + if cmd is None: + return [] + return cmd[1:] + + def get_flags_f77(self): + """List of Fortran 77 specific flags.""" + return self._get_command_flags('compiler_f77') + def get_flags_f90(self): + """List of Fortran 90 specific flags.""" + return self._get_command_flags('compiler_f90') + def get_flags_free(self): + """List of Fortran 90 free format specific flags.""" + return [] + def get_flags_fix(self): + """List of Fortran 90 fixed format specific flags.""" + return self._get_command_flags('compiler_fix') + def get_flags_linker_so(self): + """List of linker flags to build a shared library.""" + return self._get_command_flags('linker_so') + def get_flags_linker_exe(self): + """List of linker flags to build an executable.""" + return self._get_command_flags('linker_exe') + def get_flags_ar(self): + """List of archiver flags. """ + return self._get_command_flags('archiver') + def get_flags_opt(self): + """List of architecture independent compiler flags.""" + return [] + def get_flags_arch(self): + """List of architecture dependent compiler flags.""" + return [] + def get_flags_debug(self): + """List of compiler flags to compile with debugging information.""" + return [] + + get_flags_opt_f77 = get_flags_opt_f90 = get_flags_opt + get_flags_arch_f77 = get_flags_arch_f90 = get_flags_arch + get_flags_debug_f77 = get_flags_debug_f90 = get_flags_debug + + def get_libraries(self): + """List of compiler libraries.""" + return self.libraries[:] + def get_library_dirs(self): + """List of compiler library directories.""" + return self.library_dirs[:] + + def get_version(self, force=False, ok_status=[0]): + assert self._is_customised + version = CCompiler.get_version(self, force=force, ok_status=ok_status) + if version is None: + raise CompilerNotFound() + return version + + + ############################################################ + + ## Public methods: + + def customize(self, dist = None): + """Customize Fortran compiler. + + This method gets Fortran compiler specific information from + (i) class definition, (ii) environment, (iii) distutils config + files, and (iv) command line (later overrides earlier). + + This method should be always called after constructing a + compiler instance. But not in __init__ because Distribution + instance is needed for (iii) and (iv). + """ + log.info('customize %s' % (self.__class__.__name__)) + + self._is_customised = True + + self.distutils_vars.use_distribution(dist) + self.command_vars.use_distribution(dist) + self.flag_vars.use_distribution(dist) + + self.update_executables() + + # find_executables takes care of setting the compiler commands, + # version_cmd, linker_so, linker_exe, ar, and ranlib + self.find_executables() + + noopt = self.distutils_vars.get('noopt', False) + noarch = self.distutils_vars.get('noarch', noopt) + debug = self.distutils_vars.get('debug', False) + + f77 = self.command_vars.compiler_f77 + f90 = self.command_vars.compiler_f90 + + f77flags = [] + f90flags = [] + freeflags = [] + fixflags = [] + + if f77: + f77 = _shell_utils.NativeParser.split(f77) + f77flags = self.flag_vars.f77 + if f90: + f90 = _shell_utils.NativeParser.split(f90) + f90flags = self.flag_vars.f90 + freeflags = self.flag_vars.free + # XXX Assuming that free format is default for f90 compiler. + fix = self.command_vars.compiler_fix + # NOTE: this and similar examples are probably just + # exluding --coverage flag when F90 = gfortran --coverage + # instead of putting that flag somewhere more appropriate + # this and similar examples where a Fortran compiler + # environment variable has been customized by CI or a user + # should perhaps eventually be more throughly tested and more + # robustly handled + if fix: + fix = _shell_utils.NativeParser.split(fix) + fixflags = self.flag_vars.fix + f90flags + + oflags, aflags, dflags = [], [], [] + # examine get_flags__ for extra flags + # only add them if the method is different from get_flags_ + def get_flags(tag, flags): + # note that self.flag_vars. calls self.get_flags_() + flags.extend(getattr(self.flag_vars, tag)) + this_get = getattr(self, 'get_flags_' + tag) + for name, c, flagvar in [('f77', f77, f77flags), + ('f90', f90, f90flags), + ('f90', fix, fixflags)]: + t = '%s_%s' % (tag, name) + if c and this_get is not getattr(self, 'get_flags_' + t): + flagvar.extend(getattr(self.flag_vars, t)) + if not noopt: + get_flags('opt', oflags) + if not noarch: + get_flags('arch', aflags) + if debug: + get_flags('debug', dflags) + + fflags = self.flag_vars.flags + dflags + oflags + aflags + + if f77: + self.set_commands(compiler_f77=f77+f77flags+fflags) + if f90: + self.set_commands(compiler_f90=f90+freeflags+f90flags+fflags) + if fix: + self.set_commands(compiler_fix=fix+fixflags+fflags) + + + #XXX: Do we need LDSHARED->SOSHARED, LDFLAGS->SOFLAGS + linker_so = self.linker_so + if linker_so: + linker_so_flags = self.flag_vars.linker_so + if sys.platform.startswith('aix'): + python_lib = get_python_lib(standard_lib=1) + ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix') + python_exp = os.path.join(python_lib, 'config', 'python.exp') + linker_so = [ld_so_aix] + linker_so + ['-bI:'+python_exp] + self.set_commands(linker_so=linker_so+linker_so_flags) + + linker_exe = self.linker_exe + if linker_exe: + linker_exe_flags = self.flag_vars.linker_exe + self.set_commands(linker_exe=linker_exe+linker_exe_flags) + + ar = self.command_vars.archiver + if ar: + arflags = self.flag_vars.ar + self.set_commands(archiver=[ar]+arflags) + + self.set_library_dirs(self.get_library_dirs()) + self.set_libraries(self.get_libraries()) + + def dump_properties(self): + """Print out the attributes of a compiler instance.""" + props = [] + for key in list(self.executables.keys()) + \ + ['version', 'libraries', 'library_dirs', + 'object_switch', 'compile_switch']: + if hasattr(self, key): + v = getattr(self, key) + props.append((key, None, '= '+repr(v))) + props.sort() + + pretty_printer = FancyGetopt(props) + for l in pretty_printer.generate_help("%s instance properties:" \ + % (self.__class__.__name__)): + if l[:4]==' --': + l = ' ' + l[4:] + print(l) + + ################### + + def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): + """Compile 'src' to product 'obj'.""" + src_flags = {} + if is_f_file(src) and not has_f90_header(src): + flavor = ':f77' + compiler = self.compiler_f77 + src_flags = get_f77flags(src) + extra_compile_args = self.extra_f77_compile_args or [] + elif is_free_format(src): + flavor = ':f90' + compiler = self.compiler_f90 + if compiler is None: + raise DistutilsExecError('f90 not supported by %s needed for %s'\ + % (self.__class__.__name__, src)) + extra_compile_args = self.extra_f90_compile_args or [] + else: + flavor = ':fix' + compiler = self.compiler_fix + if compiler is None: + raise DistutilsExecError('f90 (fixed) not supported by %s needed for %s'\ + % (self.__class__.__name__, src)) + extra_compile_args = self.extra_f90_compile_args or [] + if self.object_switch[-1]==' ': + o_args = [self.object_switch.strip(), obj] + else: + o_args = [self.object_switch.strip()+obj] + + assert self.compile_switch.strip() + s_args = [self.compile_switch, src] + + if extra_compile_args: + log.info('extra %s options: %r' \ + % (flavor[1:], ' '.join(extra_compile_args))) + + extra_flags = src_flags.get(self.compiler_type, []) + if extra_flags: + log.info('using compile options from source: %r' \ + % ' '.join(extra_flags)) + + command = compiler + cc_args + extra_flags + s_args + o_args \ + + extra_postargs + extra_compile_args + + display = '%s: %s' % (os.path.basename(compiler[0]) + flavor, + src) + try: + self.spawn(command, display=display) + except DistutilsExecError: + msg = str(get_exception()) + raise CompileError(msg) + + def module_options(self, module_dirs, module_build_dir): + options = [] + if self.module_dir_switch is not None: + if self.module_dir_switch[-1]==' ': + options.extend([self.module_dir_switch.strip(), module_build_dir]) + else: + options.append(self.module_dir_switch.strip()+module_build_dir) + else: + print('XXX: module_build_dir=%r option ignored' % (module_build_dir)) + print('XXX: Fix module_dir_switch for ', self.__class__.__name__) + if self.module_include_switch is not None: + for d in [module_build_dir]+module_dirs: + options.append('%s%s' % (self.module_include_switch, d)) + else: + print('XXX: module_dirs=%r option ignored' % (module_dirs)) + print('XXX: Fix module_include_switch for ', self.__class__.__name__) + return options + + def library_option(self, lib): + return "-l" + lib + def library_dir_option(self, dir): + return "-L" + dir + + def link(self, target_desc, objects, + output_filename, output_dir=None, libraries=None, + library_dirs=None, runtime_library_dirs=None, + export_symbols=None, debug=0, extra_preargs=None, + extra_postargs=None, build_temp=None, target_lang=None): + objects, output_dir = self._fix_object_args(objects, output_dir) + libraries, library_dirs, runtime_library_dirs = \ + self._fix_lib_args(libraries, library_dirs, runtime_library_dirs) + + lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs, + libraries) + if is_string(output_dir): + output_filename = os.path.join(output_dir, output_filename) + elif output_dir is not None: + raise TypeError("'output_dir' must be a string or None") + + if self._need_link(objects, output_filename): + if self.library_switch[-1]==' ': + o_args = [self.library_switch.strip(), output_filename] + else: + o_args = [self.library_switch.strip()+output_filename] + + if is_string(self.objects): + ld_args = objects + [self.objects] + else: + ld_args = objects + self.objects + ld_args = ld_args + lib_opts + o_args + if debug: + ld_args[:0] = ['-g'] + if extra_preargs: + ld_args[:0] = extra_preargs + if extra_postargs: + ld_args.extend(extra_postargs) + self.mkpath(os.path.dirname(output_filename)) + if target_desc == CCompiler.EXECUTABLE: + linker = self.linker_exe[:] + else: + linker = self.linker_so[:] + command = linker + ld_args + try: + self.spawn(command) + except DistutilsExecError: + msg = str(get_exception()) + raise LinkError(msg) + else: + log.debug("skipping %s (up-to-date)", output_filename) + + def _environment_hook(self, name, hook_name): + if hook_name is None: + return None + if is_string(hook_name): + if hook_name.startswith('self.'): + hook_name = hook_name[5:] + hook = getattr(self, hook_name) + return hook() + elif hook_name.startswith('exe.'): + hook_name = hook_name[4:] + var = self.executables[hook_name] + if var: + return var[0] + else: + return None + elif hook_name.startswith('flags.'): + hook_name = hook_name[6:] + hook = getattr(self, 'get_flags_' + hook_name) + return hook() + else: + return hook_name() + + def can_ccompiler_link(self, ccompiler): + """ + Check if the given C compiler can link objects produced by + this compiler. + """ + return True + + def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir): + """ + Convert a set of object files that are not compatible with the default + linker, to a file that is compatible. + + Parameters + ---------- + objects : list + List of object files to include. + output_dir : str + Output directory to place generated object files. + extra_dll_dir : str + Output directory to place extra DLL files that need to be + included on Windows. + + Returns + ------- + converted_objects : list of str + List of converted object files. + Note that the number of output files is not necessarily + the same as inputs. + + """ + raise NotImplementedError() + + ## class FCompiler + +_default_compilers = ( + # sys.platform mappings + ('win32', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95', + 'intelvem', 'intelem', 'flang')), + ('cygwin.*', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95')), + ('linux.*', ('gnu95', 'intel', 'lahey', 'pg', 'absoft', 'nag', 'vast', 'compaq', + 'intele', 'intelem', 'gnu', 'g95', 'pathf95', 'nagfor')), + ('darwin.*', ('gnu95', 'nag', 'absoft', 'ibm', 'intel', 'gnu', 'g95', 'pg')), + ('sunos.*', ('sun', 'gnu', 'gnu95', 'g95')), + ('irix.*', ('mips', 'gnu', 'gnu95',)), + ('aix.*', ('ibm', 'gnu', 'gnu95',)), + # os.name mappings + ('posix', ('gnu', 'gnu95',)), + ('nt', ('gnu', 'gnu95',)), + ('mac', ('gnu95', 'gnu', 'pg')), + ) + +fcompiler_class = None +fcompiler_aliases = None + +def load_all_fcompiler_classes(): + """Cache all the FCompiler classes found in modules in the + numpy.distutils.fcompiler package. + """ + from glob import glob + global fcompiler_class, fcompiler_aliases + if fcompiler_class is not None: + return + pys = os.path.join(os.path.dirname(__file__), '*.py') + fcompiler_class = {} + fcompiler_aliases = {} + for fname in glob(pys): + module_name, ext = os.path.splitext(os.path.basename(fname)) + module_name = 'numpy.distutils.fcompiler.' + module_name + __import__ (module_name) + module = sys.modules[module_name] + if hasattr(module, 'compilers'): + for cname in module.compilers: + klass = getattr(module, cname) + desc = (klass.compiler_type, klass, klass.description) + fcompiler_class[klass.compiler_type] = desc + for alias in klass.compiler_aliases: + if alias in fcompiler_aliases: + raise ValueError("alias %r defined for both %s and %s" + % (alias, klass.__name__, + fcompiler_aliases[alias][1].__name__)) + fcompiler_aliases[alias] = desc + +def _find_existing_fcompiler(compiler_types, + osname=None, platform=None, + requiref90=False, + c_compiler=None): + from numpy.distutils.core import get_distribution + dist = get_distribution(always=True) + for compiler_type in compiler_types: + v = None + try: + c = new_fcompiler(plat=platform, compiler=compiler_type, + c_compiler=c_compiler) + c.customize(dist) + v = c.get_version() + if requiref90 and c.compiler_f90 is None: + v = None + new_compiler = c.suggested_f90_compiler + if new_compiler: + log.warn('Trying %r compiler as suggested by %r ' + 'compiler for f90 support.' % (compiler_type, + new_compiler)) + c = new_fcompiler(plat=platform, compiler=new_compiler, + c_compiler=c_compiler) + c.customize(dist) + v = c.get_version() + if v is not None: + compiler_type = new_compiler + if requiref90 and c.compiler_f90 is None: + raise ValueError('%s does not support compiling f90 codes, ' + 'skipping.' % (c.__class__.__name__)) + except DistutilsModuleError: + log.debug("_find_existing_fcompiler: compiler_type='%s' raised DistutilsModuleError", compiler_type) + except CompilerNotFound: + log.debug("_find_existing_fcompiler: compiler_type='%s' not found", compiler_type) + if v is not None: + return compiler_type + return None + +def available_fcompilers_for_platform(osname=None, platform=None): + if osname is None: + osname = os.name + if platform is None: + platform = sys.platform + matching_compiler_types = [] + for pattern, compiler_type in _default_compilers: + if re.match(pattern, platform) or re.match(pattern, osname): + for ct in compiler_type: + if ct not in matching_compiler_types: + matching_compiler_types.append(ct) + if not matching_compiler_types: + matching_compiler_types.append('gnu') + return matching_compiler_types + +def get_default_fcompiler(osname=None, platform=None, requiref90=False, + c_compiler=None): + """Determine the default Fortran compiler to use for the given + platform.""" + matching_compiler_types = available_fcompilers_for_platform(osname, + platform) + log.info("get_default_fcompiler: matching types: '%s'", + matching_compiler_types) + compiler_type = _find_existing_fcompiler(matching_compiler_types, + osname=osname, + platform=platform, + requiref90=requiref90, + c_compiler=c_compiler) + return compiler_type + +# Flag to avoid rechecking for Fortran compiler every time +failed_fcompilers = set() + +def new_fcompiler(plat=None, + compiler=None, + verbose=0, + dry_run=0, + force=0, + requiref90=False, + c_compiler = None): + """Generate an instance of some FCompiler subclass for the supplied + platform/compiler combination. + """ + global failed_fcompilers + fcompiler_key = (plat, compiler) + if fcompiler_key in failed_fcompilers: + return None + + load_all_fcompiler_classes() + if plat is None: + plat = os.name + if compiler is None: + compiler = get_default_fcompiler(plat, requiref90=requiref90, + c_compiler=c_compiler) + if compiler in fcompiler_class: + module_name, klass, long_description = fcompiler_class[compiler] + elif compiler in fcompiler_aliases: + module_name, klass, long_description = fcompiler_aliases[compiler] + else: + msg = "don't know how to compile Fortran code on platform '%s'" % plat + if compiler is not None: + msg = msg + " with '%s' compiler." % compiler + msg = msg + " Supported compilers are: %s)" \ + % (','.join(fcompiler_class.keys())) + log.warn(msg) + failed_fcompilers.add(fcompiler_key) + return None + + compiler = klass(verbose=verbose, dry_run=dry_run, force=force) + compiler.c_compiler = c_compiler + return compiler + +def show_fcompilers(dist=None): + """Print list of available compilers (used by the "--help-fcompiler" + option to "config_fc"). + """ + if dist is None: + from distutils.dist import Distribution + from numpy.distutils.command.config_compiler import config_fc + dist = Distribution() + dist.script_name = os.path.basename(sys.argv[0]) + dist.script_args = ['config_fc'] + sys.argv[1:] + try: + dist.script_args.remove('--help-fcompiler') + except ValueError: + pass + dist.cmdclass['config_fc'] = config_fc + dist.parse_config_files() + dist.parse_command_line() + compilers = [] + compilers_na = [] + compilers_ni = [] + if not fcompiler_class: + load_all_fcompiler_classes() + platform_compilers = available_fcompilers_for_platform() + for compiler in platform_compilers: + v = None + log.set_verbosity(-2) + try: + c = new_fcompiler(compiler=compiler, verbose=dist.verbose) + c.customize(dist) + v = c.get_version() + except (DistutilsModuleError, CompilerNotFound): + e = get_exception() + log.debug("show_fcompilers: %s not found" % (compiler,)) + log.debug(repr(e)) + + if v is None: + compilers_na.append(("fcompiler="+compiler, None, + fcompiler_class[compiler][2])) + else: + c.dump_properties() + compilers.append(("fcompiler="+compiler, None, + fcompiler_class[compiler][2] + ' (%s)' % v)) + + compilers_ni = list(set(fcompiler_class.keys()) - set(platform_compilers)) + compilers_ni = [("fcompiler="+fc, None, fcompiler_class[fc][2]) + for fc in compilers_ni] + + compilers.sort() + compilers_na.sort() + compilers_ni.sort() + pretty_printer = FancyGetopt(compilers) + pretty_printer.print_help("Fortran compilers found:") + pretty_printer = FancyGetopt(compilers_na) + pretty_printer.print_help("Compilers available for this " + "platform, but not found:") + if compilers_ni: + pretty_printer = FancyGetopt(compilers_ni) + pretty_printer.print_help("Compilers not available on this platform:") + print("For compiler details, run 'config_fc --verbose' setup command.") + + +def dummy_fortran_file(): + fo, name = make_temp_file(suffix='.f') + fo.write(" subroutine dummy()\n end\n") + fo.close() + return name[:-2] + + +is_f_file = re.compile(r'.*[.](for|ftn|f77|f)\Z', re.I).match +_has_f_header = re.compile(r'-[*]-\s*fortran\s*-[*]-', re.I).search +_has_f90_header = re.compile(r'-[*]-\s*f90\s*-[*]-', re.I).search +_has_fix_header = re.compile(r'-[*]-\s*fix\s*-[*]-', re.I).search +_free_f90_start = re.compile(r'[^c*!]\s*[^\s\d\t]', re.I).match + +def is_free_format(file): + """Check if file is in free format Fortran.""" + # f90 allows both fixed and free format, assuming fixed unless + # signs of free format are detected. + result = 0 + f = open_latin1(file, 'r') + line = f.readline() + n = 10000 # the number of non-comment lines to scan for hints + if _has_f_header(line): + n = 0 + elif _has_f90_header(line): + n = 0 + result = 1 + while n>0 and line: + line = line.rstrip() + if line and line[0]!='!': + n -= 1 + if (line[0]!='\t' and _free_f90_start(line[:5])) or line[-1:]=='&': + result = 1 + break + line = f.readline() + f.close() + return result + +def has_f90_header(src): + f = open_latin1(src, 'r') + line = f.readline() + f.close() + return _has_f90_header(line) or _has_fix_header(line) + +_f77flags_re = re.compile(r'(c|)f77flags\s*\(\s*(?P\w+)\s*\)\s*=\s*(?P.*)', re.I) +def get_f77flags(src): + """ + Search the first 20 lines of fortran 77 code for line pattern + `CF77FLAGS()=` + Return a dictionary {:}. + """ + flags = {} + f = open_latin1(src, 'r') + i = 0 + for line in f: + i += 1 + if i>20: break + m = _f77flags_re.match(line) + if not m: continue + fcname = m.group('fcname').strip() + fflags = m.group('fflags').strip() + flags[fcname] = split_quoted(fflags) + f.close() + return flags + +# TODO: implement get_f90flags and use it in _compile similarly to get_f77flags + +if __name__ == '__main__': + show_fcompilers() diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/__init__.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/__init__.pyc new file mode 100644 index 0000000..e3d7a93 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/absoft.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/absoft.py new file mode 100644 index 0000000..d14fee0 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/absoft.py @@ -0,0 +1,158 @@ + +# http://www.absoft.com/literature/osxuserguide.pdf +# http://www.absoft.com/documentation.html + +# Notes: +# - when using -g77 then use -DUNDERSCORE_G77 to compile f2py +# generated extension modules (works for f2py v2.45.241_1936 and up) +from __future__ import division, absolute_import, print_function + +import os + +from numpy.distutils.cpuinfo import cpu +from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file +from numpy.distutils.misc_util import cyg2win32 + +compilers = ['AbsoftFCompiler'] + +class AbsoftFCompiler(FCompiler): + + compiler_type = 'absoft' + description = 'Absoft Corp Fortran Compiler' + #version_pattern = r'FORTRAN 77 Compiler (?P[^\s*,]*).*?Absoft Corp' + version_pattern = r'(f90:.*?(Absoft Pro FORTRAN Version|FORTRAN 77 Compiler|Absoft Fortran Compiler Version|Copyright Absoft Corporation.*?Version))'+\ + r' (?P[^\s*,]*)(.*?Absoft Corp|)' + + # on windows: f90 -V -c dummy.f + # f90: Copyright Absoft Corporation 1994-1998 mV2; Cray Research, Inc. 1994-1996 CF90 (2.x.x.x f36t87) Version 2.3 Wed Apr 19, 2006 13:05:16 + + # samt5735(8)$ f90 -V -c dummy.f + # f90: Copyright Absoft Corporation 1994-2002; Absoft Pro FORTRAN Version 8.0 + # Note that fink installs g77 as f77, so need to use f90 for detection. + + executables = { + 'version_cmd' : None, # set by update_executables + 'compiler_f77' : ["f77"], + 'compiler_fix' : ["f90"], + 'compiler_f90' : ["f90"], + 'linker_so' : [""], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + if os.name=='nt': + library_switch = '/out:' #No space after /out:! + + module_dir_switch = None + module_include_switch = '-p' + + def update_executables(self): + f = cyg2win32(dummy_fortran_file()) + self.executables['version_cmd'] = ['', '-V', '-c', + f+'.f', '-o', f+'.o'] + + def get_flags_linker_so(self): + if os.name=='nt': + opt = ['/dll'] + # The "-K shared" switches are being left in for pre-9.0 versions + # of Absoft though I don't think versions earlier than 9 can + # actually be used to build shared libraries. In fact, version + # 8 of Absoft doesn't recognize "-K shared" and will fail. + elif self.get_version() >= '9.0': + opt = ['-shared'] + else: + opt = ["-K", "shared"] + return opt + + def library_dir_option(self, dir): + if os.name=='nt': + return ['-link', '/PATH:%s' % (dir)] + return "-L" + dir + + def library_option(self, lib): + if os.name=='nt': + return '%s.lib' % (lib) + return "-l" + lib + + def get_library_dirs(self): + opt = FCompiler.get_library_dirs(self) + d = os.environ.get('ABSOFT') + if d: + if self.get_version() >= '10.0': + # use shared libraries, the static libraries were not compiled -fPIC + prefix = 'sh' + else: + prefix = '' + if cpu.is_64bit(): + suffix = '64' + else: + suffix = '' + opt.append(os.path.join(d, '%slib%s' % (prefix, suffix))) + return opt + + def get_libraries(self): + opt = FCompiler.get_libraries(self) + if self.get_version() >= '11.0': + opt.extend(['af90math', 'afio', 'af77math', 'amisc']) + elif self.get_version() >= '10.0': + opt.extend(['af90math', 'afio', 'af77math', 'U77']) + elif self.get_version() >= '8.0': + opt.extend(['f90math', 'fio', 'f77math', 'U77']) + else: + opt.extend(['fio', 'f90math', 'fmath', 'U77']) + if os.name =='nt': + opt.append('COMDLG32') + return opt + + def get_flags(self): + opt = FCompiler.get_flags(self) + if os.name != 'nt': + opt.extend(['-s']) + if self.get_version(): + if self.get_version()>='8.2': + opt.append('-fpic') + return opt + + def get_flags_f77(self): + opt = FCompiler.get_flags_f77(self) + opt.extend(['-N22', '-N90', '-N110']) + v = self.get_version() + if os.name == 'nt': + if v and v>='8.0': + opt.extend(['-f', '-N15']) + else: + opt.append('-f') + if v: + if v<='4.6': + opt.append('-B108') + else: + # Though -N15 is undocumented, it works with + # Absoft 8.0 on Linux + opt.append('-N15') + return opt + + def get_flags_f90(self): + opt = FCompiler.get_flags_f90(self) + opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX", + "-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"]) + if self.get_version(): + if self.get_version()>'4.6': + opt.extend(["-YDEALLOC=ALL"]) + return opt + + def get_flags_fix(self): + opt = FCompiler.get_flags_fix(self) + opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX", + "-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"]) + opt.extend(["-f", "fixed"]) + return opt + + def get_flags_opt(self): + opt = ['-O'] + return opt + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='absoft').get_version()) diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/absoft.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/absoft.pyc new file mode 100644 index 0000000..dafc02d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/absoft.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/compaq.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/compaq.py new file mode 100644 index 0000000..07d5027 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/compaq.py @@ -0,0 +1,126 @@ + +#http://www.compaq.com/fortran/docs/ +from __future__ import division, absolute_import, print_function + +import os +import sys + +from numpy.distutils.fcompiler import FCompiler +from numpy.distutils.compat import get_exception +from distutils.errors import DistutilsPlatformError + +compilers = ['CompaqFCompiler'] +if os.name != 'posix' or sys.platform[:6] == 'cygwin' : + # Otherwise we'd get a false positive on posix systems with + # case-insensitive filesystems (like darwin), because we'll pick + # up /bin/df + compilers.append('CompaqVisualFCompiler') + +class CompaqFCompiler(FCompiler): + + compiler_type = 'compaq' + description = 'Compaq Fortran Compiler' + version_pattern = r'Compaq Fortran (?P[^\s]*).*' + + if sys.platform[:5]=='linux': + fc_exe = 'fort' + else: + fc_exe = 'f90' + + executables = { + 'version_cmd' : ['', "-version"], + 'compiler_f77' : [fc_exe, "-f77rtl", "-fixed"], + 'compiler_fix' : [fc_exe, "-fixed"], + 'compiler_f90' : [fc_exe], + 'linker_so' : [''], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + module_dir_switch = '-module ' # not tested + module_include_switch = '-I' + + def get_flags(self): + return ['-assume no2underscore', '-nomixed_str_len_arg'] + def get_flags_debug(self): + return ['-g', '-check bounds'] + def get_flags_opt(self): + return ['-O4', '-align dcommons', '-assume bigarrays', + '-assume nozsize', '-math_library fast'] + def get_flags_arch(self): + return ['-arch host', '-tune host'] + def get_flags_linker_so(self): + if sys.platform[:5]=='linux': + return ['-shared'] + return ['-shared', '-Wl,-expect_unresolved,*'] + +class CompaqVisualFCompiler(FCompiler): + + compiler_type = 'compaqv' + description = 'DIGITAL or Compaq Visual Fortran Compiler' + version_pattern = (r'(DIGITAL|Compaq) Visual Fortran Optimizing Compiler' + r' Version (?P[^\s]*).*') + + compile_switch = '/compile_only' + object_switch = '/object:' + library_switch = '/OUT:' #No space after /OUT:! + + static_lib_extension = ".lib" + static_lib_format = "%s%s" + module_dir_switch = '/module:' + module_include_switch = '/I' + + ar_exe = 'lib.exe' + fc_exe = 'DF' + + if sys.platform=='win32': + from numpy.distutils.msvccompiler import MSVCCompiler + + try: + m = MSVCCompiler() + m.initialize() + ar_exe = m.lib + except DistutilsPlatformError: + pass + except AttributeError: + msg = get_exception() + if '_MSVCCompiler__root' in str(msg): + print('Ignoring "%s" (I think it is msvccompiler.py bug)' % (msg)) + else: + raise + except IOError: + e = get_exception() + if not "vcvarsall.bat" in str(e): + print("Unexpected IOError in", __file__) + raise e + except ValueError: + e = get_exception() + if not "path']" in str(e): + print("Unexpected ValueError in", __file__) + raise e + + executables = { + 'version_cmd' : ['', "/what"], + 'compiler_f77' : [fc_exe, "/f77rtl", "/fixed"], + 'compiler_fix' : [fc_exe, "/fixed"], + 'compiler_f90' : [fc_exe], + 'linker_so' : [''], + 'archiver' : [ar_exe, "/OUT:"], + 'ranlib' : None + } + + def get_flags(self): + return ['/nologo', '/MD', '/WX', '/iface=(cref,nomixed_str_len_arg)', + '/names:lowercase', '/assume:underscore'] + def get_flags_opt(self): + return ['/Ox', '/fast', '/optimize:5', '/unroll:0', '/math_library:fast'] + def get_flags_arch(self): + return ['/threads'] + def get_flags_debug(self): + return ['/debug'] + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='compaq').get_version()) diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/compaq.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/compaq.pyc new file mode 100644 index 0000000..d95be3d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/compaq.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/environment.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/environment.py new file mode 100644 index 0000000..4238f35 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/environment.py @@ -0,0 +1,88 @@ +from __future__ import division, absolute_import, print_function + +import os +import warnings +from distutils.dist import Distribution + +__metaclass__ = type + +class EnvironmentConfig(object): + def __init__(self, distutils_section='ALL', **kw): + self._distutils_section = distutils_section + self._conf_keys = kw + self._conf = None + self._hook_handler = None + + def dump_variable(self, name): + conf_desc = self._conf_keys[name] + hook, envvar, confvar, convert, append = conf_desc + if not convert: + convert = lambda x : x + print('%s.%s:' % (self._distutils_section, name)) + v = self._hook_handler(name, hook) + print(' hook : %s' % (convert(v),)) + if envvar: + v = os.environ.get(envvar, None) + print(' environ: %s' % (convert(v),)) + if confvar and self._conf: + v = self._conf.get(confvar, (None, None))[1] + print(' config : %s' % (convert(v),)) + + def dump_variables(self): + for name in self._conf_keys: + self.dump_variable(name) + + def __getattr__(self, name): + try: + conf_desc = self._conf_keys[name] + except KeyError: + raise AttributeError(name) + return self._get_var(name, conf_desc) + + def get(self, name, default=None): + try: + conf_desc = self._conf_keys[name] + except KeyError: + return default + var = self._get_var(name, conf_desc) + if var is None: + var = default + return var + + def _get_var(self, name, conf_desc): + hook, envvar, confvar, convert, append = conf_desc + var = self._hook_handler(name, hook) + if envvar is not None: + envvar_contents = os.environ.get(envvar) + if envvar_contents is not None: + if var and append: + if os.environ.get('NPY_DISTUTILS_APPEND_FLAGS', '0') == '1': + var = var + [envvar_contents] + else: + var = envvar_contents + if 'NPY_DISTUTILS_APPEND_FLAGS' not in os.environ.keys(): + msg = "{} is used as is, not appended ".format(envvar) + \ + "to flags already defined " + \ + "by numpy.distutils! Use NPY_DISTUTILS_APPEND_FLAGS=1 " + \ + "to obtain appending behavior instead (this " + \ + "behavior will become default in a future release)." + warnings.warn(msg, UserWarning, stacklevel=3) + else: + var = envvar_contents + if confvar is not None and self._conf: + var = self._conf.get(confvar, (None, var))[1] + if convert is not None: + var = convert(var) + return var + + def clone(self, hook_handler): + ec = self.__class__(distutils_section=self._distutils_section, + **self._conf_keys) + ec._hook_handler = hook_handler + return ec + + def use_distribution(self, dist): + if isinstance(dist, Distribution): + self._conf = dist.get_option_dict(self._distutils_section) + else: + self._conf = dist diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/environment.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/environment.pyc new file mode 100644 index 0000000..6569481 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/environment.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/g95.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/g95.py new file mode 100644 index 0000000..e7c659b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/g95.py @@ -0,0 +1,44 @@ +# http://g95.sourceforge.net/ +from __future__ import division, absolute_import, print_function + +from numpy.distutils.fcompiler import FCompiler + +compilers = ['G95FCompiler'] + +class G95FCompiler(FCompiler): + compiler_type = 'g95' + description = 'G95 Fortran Compiler' + +# version_pattern = r'G95 \((GCC (?P[\d.]+)|.*?) \(g95!\) (?P.*)\).*' + # $ g95 --version + # G95 (GCC 4.0.3 (g95!) May 22 2006) + + version_pattern = r'G95 \((GCC (?P[\d.]+)|.*?) \(g95 (?P.*)!\) (?P.*)\).*' + # $ g95 --version + # G95 (GCC 4.0.3 (g95 0.90!) Aug 22 2006) + + executables = { + 'version_cmd' : ["", "--version"], + 'compiler_f77' : ["g95", "-ffixed-form"], + 'compiler_fix' : ["g95", "-ffixed-form"], + 'compiler_f90' : ["g95"], + 'linker_so' : ["", "-shared"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + pic_flags = ['-fpic'] + module_dir_switch = '-fmod=' + module_include_switch = '-I' + + def get_flags(self): + return ['-fno-second-underscore'] + def get_flags_opt(self): + return ['-O'] + def get_flags_debug(self): + return ['-g'] + +if __name__ == '__main__': + from distutils import log + from numpy.distutils import customized_fcompiler + log.set_verbosity(2) + print(customized_fcompiler('g95').get_version()) diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/g95.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/g95.pyc new file mode 100644 index 0000000..1beb10a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/g95.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/gnu.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/gnu.py new file mode 100644 index 0000000..965c670 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/gnu.py @@ -0,0 +1,564 @@ +from __future__ import division, absolute_import, print_function + +import re +import os +import sys +import warnings +import platform +import tempfile +import hashlib +import base64 +import subprocess +from subprocess import Popen, PIPE, STDOUT +from numpy.distutils.exec_command import filepath_from_subprocess_output +from numpy.distutils.fcompiler import FCompiler +from numpy.distutils.compat import get_exception +from numpy.distutils.system_info import system_info + +compilers = ['GnuFCompiler', 'Gnu95FCompiler'] + +TARGET_R = re.compile(r"Target: ([a-zA-Z0-9_\-]*)") + +# XXX: handle cross compilation + + +def is_win64(): + return sys.platform == "win32" and platform.architecture()[0] == "64bit" + + +if is_win64(): + #_EXTRAFLAGS = ["-fno-leading-underscore"] + _EXTRAFLAGS = [] +else: + _EXTRAFLAGS = [] + + +class GnuFCompiler(FCompiler): + compiler_type = 'gnu' + compiler_aliases = ('g77', ) + description = 'GNU Fortran 77 compiler' + + def gnu_version_match(self, version_string): + """Handle the different versions of GNU fortran compilers""" + # Strip warning(s) that may be emitted by gfortran + while version_string.startswith('gfortran: warning'): + version_string = version_string[version_string.find('\n') + 1:] + + # Gfortran versions from after 2010 will output a simple string + # (usually "x.y", "x.y.z" or "x.y.z-q") for ``-dumpversion``; older + # gfortrans may still return long version strings (``-dumpversion`` was + # an alias for ``--version``) + if len(version_string) <= 20: + # Try to find a valid version string + m = re.search(r'([0-9.]+)', version_string) + if m: + # g77 provides a longer version string that starts with GNU + # Fortran + if version_string.startswith('GNU Fortran'): + return ('g77', m.group(1)) + + # gfortran only outputs a version string such as #.#.#, so check + # if the match is at the start of the string + elif m.start() == 0: + return ('gfortran', m.group(1)) + else: + # Output probably from --version, try harder: + m = re.search(r'GNU Fortran\s+95.*?([0-9-.]+)', version_string) + if m: + return ('gfortran', m.group(1)) + m = re.search( + r'GNU Fortran.*?\-?([0-9-.]+\.[0-9-.]+)', version_string) + if m: + v = m.group(1) + if v.startswith('0') or v.startswith('2') or v.startswith('3'): + # the '0' is for early g77's + return ('g77', v) + else: + # at some point in the 4.x series, the ' 95' was dropped + # from the version string + return ('gfortran', v) + + # If still nothing, raise an error to make the problem easy to find. + err = 'A valid Fortran version was not found in this string:\n' + raise ValueError(err + version_string) + + def version_match(self, version_string): + v = self.gnu_version_match(version_string) + if not v or v[0] != 'g77': + return None + return v[1] + + possible_executables = ['g77', 'f77'] + executables = { + 'version_cmd' : [None, "-dumpversion"], + 'compiler_f77' : [None, "-g", "-Wall", "-fno-second-underscore"], + 'compiler_f90' : None, # Use --fcompiler=gnu95 for f90 codes + 'compiler_fix' : None, + 'linker_so' : [None, "-g", "-Wall"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"], + 'linker_exe' : [None, "-g", "-Wall"] + } + module_dir_switch = None + module_include_switch = None + + # Cygwin: f771: warning: -fPIC ignored for target (all code is + # position independent) + if os.name != 'nt' and sys.platform != 'cygwin': + pic_flags = ['-fPIC'] + + # use -mno-cygwin for g77 when Python is not Cygwin-Python + if sys.platform == 'win32': + for key in ['version_cmd', 'compiler_f77', 'linker_so', 'linker_exe']: + executables[key].append('-mno-cygwin') + + g2c = 'g2c' + suggested_f90_compiler = 'gnu95' + + def get_flags_linker_so(self): + opt = self.linker_so[1:] + if sys.platform == 'darwin': + target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', None) + # If MACOSX_DEPLOYMENT_TARGET is set, we simply trust the value + # and leave it alone. But, distutils will complain if the + # environment's value is different from the one in the Python + # Makefile used to build Python. We let disutils handle this + # error checking. + if not target: + # If MACOSX_DEPLOYMENT_TARGET is not set in the environment, + # we try to get it first from the Python Makefile and then we + # fall back to setting it to 10.3 to maximize the set of + # versions we can work with. This is a reasonable default + # even when using the official Python dist and those derived + # from it. + import distutils.sysconfig as sc + g = {} + try: + get_makefile_filename = sc.get_makefile_filename + except AttributeError: + pass # i.e. PyPy + else: + filename = get_makefile_filename() + sc.parse_makefile(filename, g) + target = g.get('MACOSX_DEPLOYMENT_TARGET', '10.3') + os.environ['MACOSX_DEPLOYMENT_TARGET'] = target + if target == '10.3': + s = 'Env. variable MACOSX_DEPLOYMENT_TARGET set to 10.3' + warnings.warn(s, stacklevel=2) + + opt.extend(['-undefined', 'dynamic_lookup', '-bundle']) + else: + opt.append("-shared") + if sys.platform.startswith('sunos'): + # SunOS often has dynamically loaded symbols defined in the + # static library libg2c.a The linker doesn't like this. To + # ignore the problem, use the -mimpure-text flag. It isn't + # the safest thing, but seems to work. 'man gcc' says: + # ".. Instead of using -mimpure-text, you should compile all + # source code with -fpic or -fPIC." + opt.append('-mimpure-text') + return opt + + def get_libgcc_dir(self): + try: + output = subprocess.check_output(self.compiler_f77 + + ['-print-libgcc-file-name']) + except (OSError, subprocess.CalledProcessError): + pass + else: + output = filepath_from_subprocess_output(output) + return os.path.dirname(output) + return None + + def get_libgfortran_dir(self): + if sys.platform[:5] == 'linux': + libgfortran_name = 'libgfortran.so' + elif sys.platform == 'darwin': + libgfortran_name = 'libgfortran.dylib' + else: + libgfortran_name = None + + libgfortran_dir = None + if libgfortran_name: + find_lib_arg = ['-print-file-name={0}'.format(libgfortran_name)] + try: + output = subprocess.check_output( + self.compiler_f77 + find_lib_arg) + except (OSError, subprocess.CalledProcessError): + pass + else: + output = filepath_from_subprocess_output(output) + libgfortran_dir = os.path.dirname(output) + return libgfortran_dir + + def get_library_dirs(self): + opt = [] + if sys.platform[:5] != 'linux': + d = self.get_libgcc_dir() + if d: + # if windows and not cygwin, libg2c lies in a different folder + if sys.platform == 'win32' and not d.startswith('/usr/lib'): + d = os.path.normpath(d) + path = os.path.join(d, "lib%s.a" % self.g2c) + if not os.path.exists(path): + root = os.path.join(d, *((os.pardir, ) * 4)) + d2 = os.path.abspath(os.path.join(root, 'lib')) + path = os.path.join(d2, "lib%s.a" % self.g2c) + if os.path.exists(path): + opt.append(d2) + opt.append(d) + # For Macports / Linux, libgfortran and libgcc are not co-located + lib_gfortran_dir = self.get_libgfortran_dir() + if lib_gfortran_dir: + opt.append(lib_gfortran_dir) + return opt + + def get_libraries(self): + opt = [] + d = self.get_libgcc_dir() + if d is not None: + g2c = self.g2c + '-pic' + f = self.static_lib_format % (g2c, self.static_lib_extension) + if not os.path.isfile(os.path.join(d, f)): + g2c = self.g2c + else: + g2c = self.g2c + + if g2c is not None: + opt.append(g2c) + c_compiler = self.c_compiler + if sys.platform == 'win32' and c_compiler and \ + c_compiler.compiler_type == 'msvc': + opt.append('gcc') + if sys.platform == 'darwin': + opt.append('cc_dynamic') + return opt + + def get_flags_debug(self): + return ['-g'] + + def get_flags_opt(self): + v = self.get_version() + if v and v <= '3.3.3': + # With this compiler version building Fortran BLAS/LAPACK + # with -O3 caused failures in lib.lapack heevr,syevr tests. + opt = ['-O2'] + else: + opt = ['-O3'] + opt.append('-funroll-loops') + return opt + + def _c_arch_flags(self): + """ Return detected arch flags from CFLAGS """ + from distutils import sysconfig + try: + cflags = sysconfig.get_config_vars()['CFLAGS'] + except KeyError: + return [] + arch_re = re.compile(r"-arch\s+(\w+)") + arch_flags = [] + for arch in arch_re.findall(cflags): + arch_flags += ['-arch', arch] + return arch_flags + + def get_flags_arch(self): + return [] + + def runtime_library_dir_option(self, dir): + if sys.platform[:3] == 'aix' or sys.platform == 'win32': + # Linux/Solaris/Unix support RPATH, Windows and AIX do not + raise NotImplementedError + + # TODO: could use -Xlinker here, if it's supported + assert "," not in dir + + sep = ',' if sys.platform == 'darwin' else '=' + return '-Wl,-rpath%s%s' % (sep, dir) + + +class Gnu95FCompiler(GnuFCompiler): + compiler_type = 'gnu95' + compiler_aliases = ('gfortran', ) + description = 'GNU Fortran 95 compiler' + + def version_match(self, version_string): + v = self.gnu_version_match(version_string) + if not v or v[0] != 'gfortran': + return None + v = v[1] + if v >= '4.': + # gcc-4 series releases do not support -mno-cygwin option + pass + else: + # use -mno-cygwin flag for gfortran when Python is not + # Cygwin-Python + if sys.platform == 'win32': + for key in [ + 'version_cmd', 'compiler_f77', 'compiler_f90', + 'compiler_fix', 'linker_so', 'linker_exe' + ]: + self.executables[key].append('-mno-cygwin') + return v + + possible_executables = ['gfortran', 'f95'] + executables = { + 'version_cmd' : ["", "-dumpversion"], + 'compiler_f77' : [None, "-Wall", "-g", "-ffixed-form", + "-fno-second-underscore"] + _EXTRAFLAGS, + 'compiler_f90' : [None, "-Wall", "-g", + "-fno-second-underscore"] + _EXTRAFLAGS, + 'compiler_fix' : [None, "-Wall", "-g","-ffixed-form", + "-fno-second-underscore"] + _EXTRAFLAGS, + 'linker_so' : ["", "-Wall", "-g"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"], + 'linker_exe' : [None, "-Wall"] + } + + module_dir_switch = '-J' + module_include_switch = '-I' + + if sys.platform[:3] == 'aix': + executables['linker_so'].append('-lpthread') + if platform.architecture()[0][:2] == '64': + for key in ['compiler_f77', 'compiler_f90','compiler_fix','linker_so', 'linker_exe']: + executables[key].append('-maix64') + + g2c = 'gfortran' + + def _universal_flags(self, cmd): + """Return a list of -arch flags for every supported architecture.""" + if not sys.platform == 'darwin': + return [] + arch_flags = [] + # get arches the C compiler gets. + c_archs = self._c_arch_flags() + if "i386" in c_archs: + c_archs[c_archs.index("i386")] = "i686" + # check the arches the Fortran compiler supports, and compare with + # arch flags from C compiler + for arch in ["ppc", "i686", "x86_64", "ppc64"]: + if _can_target(cmd, arch) and arch in c_archs: + arch_flags.extend(["-arch", arch]) + return arch_flags + + def get_flags(self): + flags = GnuFCompiler.get_flags(self) + arch_flags = self._universal_flags(self.compiler_f90) + if arch_flags: + flags[:0] = arch_flags + return flags + + def get_flags_linker_so(self): + flags = GnuFCompiler.get_flags_linker_so(self) + arch_flags = self._universal_flags(self.linker_so) + if arch_flags: + flags[:0] = arch_flags + return flags + + def get_library_dirs(self): + opt = GnuFCompiler.get_library_dirs(self) + if sys.platform == 'win32': + c_compiler = self.c_compiler + if c_compiler and c_compiler.compiler_type == "msvc": + target = self.get_target() + if target: + d = os.path.normpath(self.get_libgcc_dir()) + root = os.path.join(d, *((os.pardir, ) * 4)) + path = os.path.join(root, "lib") + mingwdir = os.path.normpath(path) + if os.path.exists(os.path.join(mingwdir, "libmingwex.a")): + opt.append(mingwdir) + # For Macports / Linux, libgfortran and libgcc are not co-located + lib_gfortran_dir = self.get_libgfortran_dir() + if lib_gfortran_dir: + opt.append(lib_gfortran_dir) + return opt + + def get_libraries(self): + opt = GnuFCompiler.get_libraries(self) + if sys.platform == 'darwin': + opt.remove('cc_dynamic') + if sys.platform == 'win32': + c_compiler = self.c_compiler + if c_compiler and c_compiler.compiler_type == "msvc": + if "gcc" in opt: + i = opt.index("gcc") + opt.insert(i + 1, "mingwex") + opt.insert(i + 1, "mingw32") + c_compiler = self.c_compiler + if c_compiler and c_compiler.compiler_type == "msvc": + return [] + else: + pass + return opt + + def get_target(self): + try: + output = subprocess.check_output(self.compiler_f77 + ['-v']) + except (OSError, subprocess.CalledProcessError): + pass + else: + output = filepath_from_subprocess_output(output) + m = TARGET_R.search(output) + if m: + return m.group(1) + return "" + + def _hash_files(self, filenames): + h = hashlib.sha1() + for fn in filenames: + with open(fn, 'rb') as f: + while True: + block = f.read(131072) + if not block: + break + h.update(block) + text = base64.b32encode(h.digest()) + if sys.version_info[0] >= 3: + text = text.decode('ascii') + return text.rstrip('=') + + def _link_wrapper_lib(self, objects, output_dir, extra_dll_dir, + chained_dlls, is_archive): + """Create a wrapper shared library for the given objects + + Return an MSVC-compatible lib + """ + + c_compiler = self.c_compiler + if c_compiler.compiler_type != "msvc": + raise ValueError("This method only supports MSVC") + + object_hash = self._hash_files(list(objects) + list(chained_dlls)) + + if is_win64(): + tag = 'win_amd64' + else: + tag = 'win32' + + basename = 'lib' + os.path.splitext( + os.path.basename(objects[0]))[0][:8] + root_name = basename + '.' + object_hash + '.gfortran-' + tag + dll_name = root_name + '.dll' + def_name = root_name + '.def' + lib_name = root_name + '.lib' + dll_path = os.path.join(extra_dll_dir, dll_name) + def_path = os.path.join(output_dir, def_name) + lib_path = os.path.join(output_dir, lib_name) + + if os.path.isfile(lib_path): + # Nothing to do + return lib_path, dll_path + + if is_archive: + objects = (["-Wl,--whole-archive"] + list(objects) + + ["-Wl,--no-whole-archive"]) + self.link_shared_object( + objects, + dll_name, + output_dir=extra_dll_dir, + extra_postargs=list(chained_dlls) + [ + '-Wl,--allow-multiple-definition', + '-Wl,--output-def,' + def_path, + '-Wl,--export-all-symbols', + '-Wl,--enable-auto-import', + '-static', + '-mlong-double-64', + ]) + + # No PowerPC! + if is_win64(): + specifier = '/MACHINE:X64' + else: + specifier = '/MACHINE:X86' + + # MSVC specific code + lib_args = ['/def:' + def_path, '/OUT:' + lib_path, specifier] + if not c_compiler.initialized: + c_compiler.initialize() + c_compiler.spawn([c_compiler.lib] + lib_args) + + return lib_path, dll_path + + def can_ccompiler_link(self, compiler): + # MSVC cannot link objects compiled by GNU fortran + return compiler.compiler_type not in ("msvc", ) + + def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir): + """ + Convert a set of object files that are not compatible with the default + linker, to a file that is compatible. + """ + if self.c_compiler.compiler_type == "msvc": + # Compile a DLL and return the lib for the DLL as + # the object. Also keep track of previous DLLs that + # we have compiled so that we can link against them. + + # If there are .a archives, assume they are self-contained + # static libraries, and build separate DLLs for each + archives = [] + plain_objects = [] + for obj in objects: + if obj.lower().endswith('.a'): + archives.append(obj) + else: + plain_objects.append(obj) + + chained_libs = [] + chained_dlls = [] + for archive in archives[::-1]: + lib, dll = self._link_wrapper_lib( + [archive], + output_dir, + extra_dll_dir, + chained_dlls=chained_dlls, + is_archive=True) + chained_libs.insert(0, lib) + chained_dlls.insert(0, dll) + + if not plain_objects: + return chained_libs + + lib, dll = self._link_wrapper_lib( + plain_objects, + output_dir, + extra_dll_dir, + chained_dlls=chained_dlls, + is_archive=False) + return [lib] + chained_libs + else: + raise ValueError("Unsupported C compiler") + + +def _can_target(cmd, arch): + """Return true if the architecture supports the -arch flag""" + newcmd = cmd[:] + fid, filename = tempfile.mkstemp(suffix=".f") + os.close(fid) + try: + d = os.path.dirname(filename) + output = os.path.splitext(filename)[0] + ".o" + try: + newcmd.extend(["-arch", arch, "-c", filename]) + p = Popen(newcmd, stderr=STDOUT, stdout=PIPE, cwd=d) + p.communicate() + return p.returncode == 0 + finally: + if os.path.exists(output): + os.remove(output) + finally: + os.remove(filename) + return False + + +if __name__ == '__main__': + from distutils import log + from numpy.distutils import customized_fcompiler + log.set_verbosity(2) + + print(customized_fcompiler('gnu').get_version()) + try: + print(customized_fcompiler('g95').get_version()) + except Exception: + print(get_exception()) diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/gnu.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/gnu.pyc new file mode 100644 index 0000000..5ddb89d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/gnu.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/hpux.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/hpux.py new file mode 100644 index 0000000..51bad54 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/hpux.py @@ -0,0 +1,43 @@ +from __future__ import division, absolute_import, print_function + +from numpy.distutils.fcompiler import FCompiler + +compilers = ['HPUXFCompiler'] + +class HPUXFCompiler(FCompiler): + + compiler_type = 'hpux' + description = 'HP Fortran 90 Compiler' + version_pattern = r'HP F90 (?P[^\s*,]*)' + + executables = { + 'version_cmd' : ["f90", "+version"], + 'compiler_f77' : ["f90"], + 'compiler_fix' : ["f90"], + 'compiler_f90' : ["f90"], + 'linker_so' : ["ld", "-b"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + module_dir_switch = None #XXX: fix me + module_include_switch = None #XXX: fix me + pic_flags = ['+Z'] + def get_flags(self): + return self.pic_flags + ['+ppu', '+DD64'] + def get_flags_opt(self): + return ['-O3'] + def get_libraries(self): + return ['m'] + def get_library_dirs(self): + opt = ['/usr/lib/hpux64'] + return opt + def get_version(self, force=0, ok_status=[256, 0, 1]): + # XXX status==256 may indicate 'unrecognized option' or + # 'no input file'. So, version_cmd needs more work. + return FCompiler.get_version(self, force, ok_status) + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(10) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='hpux').get_version()) diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/hpux.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/hpux.pyc new file mode 100644 index 0000000..2f8662c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/hpux.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/ibm.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/ibm.py new file mode 100644 index 0000000..c4cb2fc --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/ibm.py @@ -0,0 +1,100 @@ +from __future__ import division, absolute_import, print_function + +import os +import re +import sys +import subprocess + +from numpy.distutils.fcompiler import FCompiler +from numpy.distutils.exec_command import find_executable +from numpy.distutils.misc_util import make_temp_file +from distutils import log + +compilers = ['IBMFCompiler'] + +class IBMFCompiler(FCompiler): + compiler_type = 'ibm' + description = 'IBM XL Fortran Compiler' + version_pattern = r'(xlf\(1\)\s*|)IBM XL Fortran ((Advanced Edition |)Version |Enterprise Edition V|for AIX, V)(?P[^\s*]*)' + #IBM XL Fortran Enterprise Edition V10.1 for AIX \nVersion: 10.01.0000.0004 + + executables = { + 'version_cmd' : ["", "-qversion"], + 'compiler_f77' : ["xlf"], + 'compiler_fix' : ["xlf90", "-qfixed"], + 'compiler_f90' : ["xlf90"], + 'linker_so' : ["xlf95"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + def get_version(self,*args,**kwds): + version = FCompiler.get_version(self,*args,**kwds) + + if version is None and sys.platform.startswith('aix'): + # use lslpp to find out xlf version + lslpp = find_executable('lslpp') + xlf = find_executable('xlf') + if os.path.exists(xlf) and os.path.exists(lslpp): + try: + o = subprocess.check_output([lslpp, '-Lc', 'xlfcmp']) + except (OSError, subprocess.CalledProcessError): + pass + else: + m = re.search(r'xlfcmp:(?P\d+([.]\d+)+)', o) + if m: version = m.group('version') + + xlf_dir = '/etc/opt/ibmcmp/xlf' + if version is None and os.path.isdir(xlf_dir): + # linux: + # If the output of xlf does not contain version info + # (that's the case with xlf 8.1, for instance) then + # let's try another method: + l = sorted(os.listdir(xlf_dir)) + l.reverse() + l = [d for d in l if os.path.isfile(os.path.join(xlf_dir, d, 'xlf.cfg'))] + if l: + from distutils.version import LooseVersion + self.version = version = LooseVersion(l[0]) + return version + + def get_flags(self): + return ['-qextname'] + + def get_flags_debug(self): + return ['-g'] + + def get_flags_linker_so(self): + opt = [] + if sys.platform=='darwin': + opt.append('-Wl,-bundle,-flat_namespace,-undefined,suppress') + else: + opt.append('-bshared') + version = self.get_version(ok_status=[0, 40]) + if version is not None: + if sys.platform.startswith('aix'): + xlf_cfg = '/etc/xlf.cfg' + else: + xlf_cfg = '/etc/opt/ibmcmp/xlf/%s/xlf.cfg' % version + fo, new_cfg = make_temp_file(suffix='_xlf.cfg') + log.info('Creating '+new_cfg) + fi = open(xlf_cfg, 'r') + crt1_match = re.compile(r'\s*crt\s*[=]\s*(?P.*)/crt1.o').match + for line in fi: + m = crt1_match(line) + if m: + fo.write('crt = %s/bundle1.o\n' % (m.group('path'))) + else: + fo.write(line) + fi.close() + fo.close() + opt.append('-F'+new_cfg) + return opt + + def get_flags_opt(self): + return ['-O3'] + +if __name__ == '__main__': + from numpy.distutils import customized_fcompiler + log.set_verbosity(2) + print(customized_fcompiler(compiler='ibm').get_version()) diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/ibm.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/ibm.pyc new file mode 100644 index 0000000..f1d691c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/ibm.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/intel.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/intel.py new file mode 100644 index 0000000..51f6812 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/intel.py @@ -0,0 +1,222 @@ +# http://developer.intel.com/software/products/compilers/flin/ +from __future__ import division, absolute_import, print_function + +import sys + +from numpy.distutils.ccompiler import simple_version_match +from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file + +compilers = ['IntelFCompiler', 'IntelVisualFCompiler', + 'IntelItaniumFCompiler', 'IntelItaniumVisualFCompiler', + 'IntelEM64VisualFCompiler', 'IntelEM64TFCompiler'] + + +def intel_version_match(type): + # Match against the important stuff in the version string + return simple_version_match(start=r'Intel.*?Fortran.*?(?:%s).*?Version' % (type,)) + + +class BaseIntelFCompiler(FCompiler): + def update_executables(self): + f = dummy_fortran_file() + self.executables['version_cmd'] = ['', '-FI', '-V', '-c', + f + '.f', '-o', f + '.o'] + + def runtime_library_dir_option(self, dir): + # TODO: could use -Xlinker here, if it's supported + assert "," not in dir + + return '-Wl,-rpath=%s' % dir + + +class IntelFCompiler(BaseIntelFCompiler): + + compiler_type = 'intel' + compiler_aliases = ('ifort',) + description = 'Intel Fortran Compiler for 32-bit apps' + version_match = intel_version_match('32-bit|IA-32') + + possible_executables = ['ifort', 'ifc'] + + executables = { + 'version_cmd' : None, # set by update_executables + 'compiler_f77' : [None, "-72", "-w90", "-w95"], + 'compiler_f90' : [None], + 'compiler_fix' : [None, "-FI"], + 'linker_so' : ["", "-shared"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + pic_flags = ['-fPIC'] + module_dir_switch = '-module ' # Don't remove ending space! + module_include_switch = '-I' + + def get_flags_free(self): + return ['-FR'] + + def get_flags(self): + return ['-fPIC'] + + def get_flags_opt(self): # Scipy test failures with -O2 + v = self.get_version() + mpopt = 'openmp' if v and v < '15' else 'qopenmp' + return ['-fp-model strict -O1 -{}'.format(mpopt)] + + def get_flags_arch(self): + return [] + + def get_flags_linker_so(self): + opt = FCompiler.get_flags_linker_so(self) + v = self.get_version() + if v and v >= '8.0': + opt.append('-nofor_main') + if sys.platform == 'darwin': + # Here, it's -dynamiclib + try: + idx = opt.index('-shared') + opt.remove('-shared') + except ValueError: + idx = 0 + opt[idx:idx] = ['-dynamiclib', '-Wl,-undefined,dynamic_lookup'] + return opt + + +class IntelItaniumFCompiler(IntelFCompiler): + compiler_type = 'intele' + compiler_aliases = () + description = 'Intel Fortran Compiler for Itanium apps' + + version_match = intel_version_match('Itanium|IA-64') + + possible_executables = ['ifort', 'efort', 'efc'] + + executables = { + 'version_cmd' : None, + 'compiler_f77' : [None, "-FI", "-w90", "-w95"], + 'compiler_fix' : [None, "-FI"], + 'compiler_f90' : [None], + 'linker_so' : ['', "-shared"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + +class IntelEM64TFCompiler(IntelFCompiler): + compiler_type = 'intelem' + compiler_aliases = () + description = 'Intel Fortran Compiler for 64-bit apps' + + version_match = intel_version_match('EM64T-based|Intel\\(R\\) 64|64|IA-64|64-bit') + + possible_executables = ['ifort', 'efort', 'efc'] + + executables = { + 'version_cmd' : None, + 'compiler_f77' : [None, "-FI"], + 'compiler_fix' : [None, "-FI"], + 'compiler_f90' : [None], + 'linker_so' : ['', "-shared"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + def get_flags(self): + return ['-fPIC'] + + def get_flags_opt(self): # Scipy test failures with -O2 + v = self.get_version() + mpopt = 'openmp' if v and v < '15' else 'qopenmp' + return ['-fp-model strict -O1 -{}'.format(mpopt)] + + def get_flags_arch(self): + return [''] + +# Is there no difference in the version string between the above compilers +# and the Visual compilers? + + +class IntelVisualFCompiler(BaseIntelFCompiler): + compiler_type = 'intelv' + description = 'Intel Visual Fortran Compiler for 32-bit apps' + version_match = intel_version_match('32-bit|IA-32') + + def update_executables(self): + f = dummy_fortran_file() + self.executables['version_cmd'] = ['', '/FI', '/c', + f + '.f', '/o', f + '.o'] + + ar_exe = 'lib.exe' + possible_executables = ['ifort', 'ifl'] + + executables = { + 'version_cmd' : None, + 'compiler_f77' : [None], + 'compiler_fix' : [None], + 'compiler_f90' : [None], + 'linker_so' : [None], + 'archiver' : [ar_exe, "/verbose", "/OUT:"], + 'ranlib' : None + } + + compile_switch = '/c ' + object_switch = '/Fo' # No space after /Fo! + library_switch = '/OUT:' # No space after /OUT:! + module_dir_switch = '/module:' # No space after /module: + module_include_switch = '/I' + + def get_flags(self): + opt = ['/nologo', '/MD', '/nbs', '/names:lowercase', '/assume:underscore'] + return opt + + def get_flags_free(self): + return [] + + def get_flags_debug(self): + return ['/4Yb', '/d2'] + + def get_flags_opt(self): + return ['/O1'] # Scipy test failures with /O2 + + def get_flags_arch(self): + return ["/arch:IA32", "/QaxSSE3"] + + def runtime_library_dir_option(self, dir): + raise NotImplementedError + + +class IntelItaniumVisualFCompiler(IntelVisualFCompiler): + compiler_type = 'intelev' + description = 'Intel Visual Fortran Compiler for Itanium apps' + + version_match = intel_version_match('Itanium') + + possible_executables = ['efl'] # XXX this is a wild guess + ar_exe = IntelVisualFCompiler.ar_exe + + executables = { + 'version_cmd' : None, + 'compiler_f77' : [None, "-FI", "-w90", "-w95"], + 'compiler_fix' : [None, "-FI", "-4L72", "-w"], + 'compiler_f90' : [None], + 'linker_so' : ['', "-shared"], + 'archiver' : [ar_exe, "/verbose", "/OUT:"], + 'ranlib' : None + } + + +class IntelEM64VisualFCompiler(IntelVisualFCompiler): + compiler_type = 'intelvem' + description = 'Intel Visual Fortran Compiler for 64-bit apps' + + version_match = simple_version_match(start=r'Intel\(R\).*?64,') + + def get_flags_arch(self): + return [''] + + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='intel').get_version()) diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/intel.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/intel.pyc new file mode 100644 index 0000000..2fd5d6e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/intel.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/lahey.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/lahey.py new file mode 100644 index 0000000..1beb662 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/lahey.py @@ -0,0 +1,47 @@ +from __future__ import division, absolute_import, print_function + +import os + +from numpy.distutils.fcompiler import FCompiler + +compilers = ['LaheyFCompiler'] + +class LaheyFCompiler(FCompiler): + + compiler_type = 'lahey' + description = 'Lahey/Fujitsu Fortran 95 Compiler' + version_pattern = r'Lahey/Fujitsu Fortran 95 Compiler Release (?P[^\s*]*)' + + executables = { + 'version_cmd' : ["", "--version"], + 'compiler_f77' : ["lf95", "--fix"], + 'compiler_fix' : ["lf95", "--fix"], + 'compiler_f90' : ["lf95"], + 'linker_so' : ["lf95", "-shared"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + module_dir_switch = None #XXX Fix me + module_include_switch = None #XXX Fix me + + def get_flags_opt(self): + return ['-O'] + def get_flags_debug(self): + return ['-g', '--chk', '--chkglobal'] + def get_library_dirs(self): + opt = [] + d = os.environ.get('LAHEY') + if d: + opt.append(os.path.join(d, 'lib')) + return opt + def get_libraries(self): + opt = [] + opt.extend(['fj9f6', 'fj9i6', 'fj9ipp', 'fj9e6']) + return opt + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='lahey').get_version()) diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/lahey.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/lahey.pyc new file mode 100644 index 0000000..db9f6fd Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/lahey.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/mips.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/mips.py new file mode 100644 index 0000000..da337b2 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/mips.py @@ -0,0 +1,56 @@ +from __future__ import division, absolute_import, print_function + +from numpy.distutils.cpuinfo import cpu +from numpy.distutils.fcompiler import FCompiler + +compilers = ['MIPSFCompiler'] + +class MIPSFCompiler(FCompiler): + + compiler_type = 'mips' + description = 'MIPSpro Fortran Compiler' + version_pattern = r'MIPSpro Compilers: Version (?P[^\s*,]*)' + + executables = { + 'version_cmd' : ["", "-version"], + 'compiler_f77' : ["f77", "-f77"], + 'compiler_fix' : ["f90", "-fixedform"], + 'compiler_f90' : ["f90"], + 'linker_so' : ["f90", "-shared"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : None + } + module_dir_switch = None #XXX: fix me + module_include_switch = None #XXX: fix me + pic_flags = ['-KPIC'] + + def get_flags(self): + return self.pic_flags + ['-n32'] + def get_flags_opt(self): + return ['-O3'] + def get_flags_arch(self): + opt = [] + for a in '19 20 21 22_4k 22_5k 24 25 26 27 28 30 32_5k 32_10k'.split(): + if getattr(cpu, 'is_IP%s'%a)(): + opt.append('-TARG:platform=IP%s' % a) + break + return opt + def get_flags_arch_f77(self): + r = None + if cpu.is_r10000(): r = 10000 + elif cpu.is_r12000(): r = 12000 + elif cpu.is_r8000(): r = 8000 + elif cpu.is_r5000(): r = 5000 + elif cpu.is_r4000(): r = 4000 + if r is not None: + return ['r%s' % (r)] + return [] + def get_flags_arch_f90(self): + r = self.get_flags_arch_f77() + if r: + r[0] = '-' + r[0] + return r + +if __name__ == '__main__': + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='mips').get_version()) diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/mips.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/mips.pyc new file mode 100644 index 0000000..9f4d651 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/mips.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/nag.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/nag.py new file mode 100644 index 0000000..cb71d54 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/nag.py @@ -0,0 +1,84 @@ +from __future__ import division, absolute_import, print_function + +import sys +import re +from numpy.distutils.fcompiler import FCompiler + +compilers = ['NAGFCompiler', 'NAGFORCompiler'] + +class BaseNAGFCompiler(FCompiler): + version_pattern = r'NAG.* Release (?P[^(\s]*)' + + def version_match(self, version_string): + m = re.search(self.version_pattern, version_string) + if m: + return m.group('version') + else: + return None + + def get_flags_linker_so(self): + return ["-Wl,-shared"] + def get_flags_opt(self): + return ['-O4'] + def get_flags_arch(self): + return [''] + +class NAGFCompiler(BaseNAGFCompiler): + + compiler_type = 'nag' + description = 'NAGWare Fortran 95 Compiler' + + executables = { + 'version_cmd' : ["", "-V"], + 'compiler_f77' : ["f95", "-fixed"], + 'compiler_fix' : ["f95", "-fixed"], + 'compiler_f90' : ["f95"], + 'linker_so' : [""], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + def get_flags_linker_so(self): + if sys.platform == 'darwin': + return ['-unsharedf95', '-Wl,-bundle,-flat_namespace,-undefined,suppress'] + return BaseNAGFCompiler.get_flags_linker_so(self) + def get_flags_arch(self): + version = self.get_version() + if version and version < '5.1': + return ['-target=native'] + else: + return BaseNAGFCompiler.get_flags_arch(self) + def get_flags_debug(self): + return ['-g', '-gline', '-g90', '-nan', '-C'] + +class NAGFORCompiler(BaseNAGFCompiler): + + compiler_type = 'nagfor' + description = 'NAG Fortran Compiler' + + executables = { + 'version_cmd' : ["nagfor", "-V"], + 'compiler_f77' : ["nagfor", "-fixed"], + 'compiler_fix' : ["nagfor", "-fixed"], + 'compiler_f90' : ["nagfor"], + 'linker_so' : ["nagfor"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + def get_flags_debug(self): + version = self.get_version() + if version and version > '6.1': + return ['-g', '-u', '-nan', '-C=all', '-thread_safe', + '-kind=unique', '-Warn=allocation', '-Warn=subnormal'] + else: + return ['-g', '-nan', '-C=all', '-u', '-thread_safe'] + + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + compiler = customized_fcompiler(compiler='nagfor') + print(compiler.get_version()) + print(compiler.get_flags_debug()) diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/nag.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/nag.pyc new file mode 100644 index 0000000..8a63a10 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/nag.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/none.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/none.py new file mode 100644 index 0000000..bdeea15 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/none.py @@ -0,0 +1,30 @@ +from __future__ import division, absolute_import, print_function + +from numpy.distutils.fcompiler import FCompiler +from numpy.distutils import customized_fcompiler + +compilers = ['NoneFCompiler'] + +class NoneFCompiler(FCompiler): + + compiler_type = 'none' + description = 'Fake Fortran compiler' + + executables = {'compiler_f77': None, + 'compiler_f90': None, + 'compiler_fix': None, + 'linker_so': None, + 'linker_exe': None, + 'archiver': None, + 'ranlib': None, + 'version_cmd': None, + } + + def find_executables(self): + pass + + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + print(customized_fcompiler(compiler='none').get_version()) diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/none.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/none.pyc new file mode 100644 index 0000000..967d501 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/none.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/pathf95.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/pathf95.py new file mode 100644 index 0000000..5de86f6 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/pathf95.py @@ -0,0 +1,35 @@ +from __future__ import division, absolute_import, print_function + +from numpy.distutils.fcompiler import FCompiler + +compilers = ['PathScaleFCompiler'] + +class PathScaleFCompiler(FCompiler): + + compiler_type = 'pathf95' + description = 'PathScale Fortran Compiler' + version_pattern = r'PathScale\(TM\) Compiler Suite: Version (?P[\d.]+)' + + executables = { + 'version_cmd' : ["pathf95", "-version"], + 'compiler_f77' : ["pathf95", "-fixedform"], + 'compiler_fix' : ["pathf95", "-fixedform"], + 'compiler_f90' : ["pathf95"], + 'linker_so' : ["pathf95", "-shared"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + pic_flags = ['-fPIC'] + module_dir_switch = '-module ' # Don't remove ending space! + module_include_switch = '-I' + + def get_flags_opt(self): + return ['-O3'] + def get_flags_debug(self): + return ['-g'] + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='pathf95').get_version()) diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/pathf95.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/pathf95.pyc new file mode 100644 index 0000000..5004d8e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/pathf95.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/pg.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/pg.py new file mode 100644 index 0000000..9c51947 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/pg.py @@ -0,0 +1,142 @@ +# http://www.pgroup.com +from __future__ import division, absolute_import, print_function + +import sys + +from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file +from sys import platform +from os.path import join, dirname, normpath + +compilers = ['PGroupFCompiler', 'PGroupFlangCompiler'] + + +class PGroupFCompiler(FCompiler): + + compiler_type = 'pg' + description = 'Portland Group Fortran Compiler' + version_pattern = r'\s*pg(f77|f90|hpf|fortran) (?P[\d.-]+).*' + + if platform == 'darwin': + executables = { + 'version_cmd': ["", "-V"], + 'compiler_f77': ["pgfortran", "-dynamiclib"], + 'compiler_fix': ["pgfortran", "-Mfixed", "-dynamiclib"], + 'compiler_f90': ["pgfortran", "-dynamiclib"], + 'linker_so': ["libtool"], + 'archiver': ["ar", "-cr"], + 'ranlib': ["ranlib"] + } + pic_flags = [''] + else: + executables = { + 'version_cmd': ["", "-V"], + 'compiler_f77': ["pgfortran"], + 'compiler_fix': ["pgfortran", "-Mfixed"], + 'compiler_f90': ["pgfortran"], + 'linker_so': ["pgfortran"], + 'archiver': ["ar", "-cr"], + 'ranlib': ["ranlib"] + } + pic_flags = ['-fpic'] + + module_dir_switch = '-module ' + module_include_switch = '-I' + + def get_flags(self): + opt = ['-Minform=inform', '-Mnosecond_underscore'] + return self.pic_flags + opt + + def get_flags_opt(self): + return ['-fast'] + + def get_flags_debug(self): + return ['-g'] + + if platform == 'darwin': + def get_flags_linker_so(self): + return ["-dynamic", '-undefined', 'dynamic_lookup'] + + else: + def get_flags_linker_so(self): + return ["-shared", '-fpic'] + + def runtime_library_dir_option(self, dir): + return '-R%s' % dir + + +if sys.version_info >= (3, 5): + import functools + + class PGroupFlangCompiler(FCompiler): + compiler_type = 'flang' + description = 'Portland Group Fortran LLVM Compiler' + version_pattern = r'\s*(flang|clang) version (?P[\d.-]+).*' + + ar_exe = 'lib.exe' + possible_executables = ['flang'] + + executables = { + 'version_cmd': ["", "--version"], + 'compiler_f77': ["flang"], + 'compiler_fix': ["flang"], + 'compiler_f90': ["flang"], + 'linker_so': [None], + 'archiver': [ar_exe, "/verbose", "/OUT:"], + 'ranlib': None + } + + library_switch = '/OUT:' # No space after /OUT:! + module_dir_switch = '-module ' # Don't remove ending space! + + def get_libraries(self): + opt = FCompiler.get_libraries(self) + opt.extend(['flang', 'flangrti', 'ompstub']) + return opt + + @functools.lru_cache(maxsize=128) + def get_library_dirs(self): + """List of compiler library directories.""" + opt = FCompiler.get_library_dirs(self) + flang_dir = dirname(self.executables['compiler_f77'][0]) + opt.append(normpath(join(flang_dir, '..', 'lib'))) + + return opt + + def get_flags(self): + return [] + + def get_flags_free(self): + return [] + + def get_flags_debug(self): + return ['-g'] + + def get_flags_opt(self): + return ['-O3'] + + def get_flags_arch(self): + return [] + + def runtime_library_dir_option(self, dir): + raise NotImplementedError + +else: + from numpy.distutils.fcompiler import CompilerNotFound + + # No point in supporting on older Pythons because not ABI compatible + class PGroupFlangCompiler(FCompiler): + compiler_type = 'flang' + description = 'Portland Group Fortran LLVM Compiler' + + def get_version(self): + raise CompilerNotFound('Flang unsupported on Python < 3.5') + + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + if 'flang' in sys.argv: + print(customized_fcompiler(compiler='flang').get_version()) + else: + print(customized_fcompiler(compiler='pg').get_version()) diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/pg.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/pg.pyc new file mode 100644 index 0000000..2eeca96 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/pg.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/sun.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/sun.py new file mode 100644 index 0000000..561ea85 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/sun.py @@ -0,0 +1,53 @@ +from __future__ import division, absolute_import, print_function + +from numpy.distutils.ccompiler import simple_version_match +from numpy.distutils.fcompiler import FCompiler + +compilers = ['SunFCompiler'] + +class SunFCompiler(FCompiler): + + compiler_type = 'sun' + description = 'Sun or Forte Fortran 95 Compiler' + # ex: + # f90: Sun WorkShop 6 update 2 Fortran 95 6.2 Patch 111690-10 2003/08/28 + version_match = simple_version_match( + start=r'f9[05]: (Sun|Forte|WorkShop).*Fortran 95') + + executables = { + 'version_cmd' : ["", "-V"], + 'compiler_f77' : ["f90"], + 'compiler_fix' : ["f90", "-fixed"], + 'compiler_f90' : ["f90"], + 'linker_so' : ["", "-Bdynamic", "-G"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + module_dir_switch = '-moddir=' + module_include_switch = '-M' + pic_flags = ['-xcode=pic32'] + + def get_flags_f77(self): + ret = ["-ftrap=%none"] + if (self.get_version() or '') >= '7': + ret.append("-f77") + else: + ret.append("-fixed") + return ret + def get_opt(self): + return ['-fast', '-dalign'] + def get_arch(self): + return ['-xtarget=generic'] + def get_libraries(self): + opt = [] + opt.extend(['fsu', 'sunmath', 'mvec']) + return opt + + def runtime_library_dir_option(self, dir): + return '-R%s' % dir + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='sun').get_version()) diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/sun.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/sun.pyc new file mode 100644 index 0000000..eb82556 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/sun.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/vast.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/vast.py new file mode 100644 index 0000000..adc1591 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/vast.py @@ -0,0 +1,54 @@ +from __future__ import division, absolute_import, print_function + +import os + +from numpy.distutils.fcompiler.gnu import GnuFCompiler + +compilers = ['VastFCompiler'] + +class VastFCompiler(GnuFCompiler): + compiler_type = 'vast' + compiler_aliases = () + description = 'Pacific-Sierra Research Fortran 90 Compiler' + version_pattern = (r'\s*Pacific-Sierra Research vf90 ' + r'(Personal|Professional)\s+(?P[^\s]*)') + + # VAST f90 does not support -o with -c. So, object files are created + # to the current directory and then moved to build directory + object_switch = ' && function _mvfile { mv -v `basename $1` $1 ; } && _mvfile ' + + executables = { + 'version_cmd' : ["vf90", "-v"], + 'compiler_f77' : ["g77"], + 'compiler_fix' : ["f90", "-Wv,-ya"], + 'compiler_f90' : ["f90"], + 'linker_so' : [""], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + module_dir_switch = None #XXX Fix me + module_include_switch = None #XXX Fix me + + def find_executables(self): + pass + + def get_version_cmd(self): + f90 = self.compiler_f90[0] + d, b = os.path.split(f90) + vf90 = os.path.join(d, 'v'+b) + return vf90 + + def get_flags_arch(self): + vast_version = self.get_version() + gnu = GnuFCompiler() + gnu.customize(None) + self.version = gnu.get_version() + opt = GnuFCompiler.get_flags_arch(self) + self.version = vast_version + return opt + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='vast').get_version()) diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/vast.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/vast.pyc new file mode 100644 index 0000000..57159c9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/fcompiler/vast.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/from_template.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/from_template.py new file mode 100644 index 0000000..65c60c4 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/from_template.py @@ -0,0 +1,264 @@ +#!/usr/bin/env python +""" + +process_file(filename) + + takes templated file .xxx.src and produces .xxx file where .xxx + is .pyf .f90 or .f using the following template rules: + + '<..>' denotes a template. + + All function and subroutine blocks in a source file with names that + contain '<..>' will be replicated according to the rules in '<..>'. + + The number of comma-separated words in '<..>' will determine the number of + replicates. + + '<..>' may have two different forms, named and short. For example, + + named: + where anywhere inside a block '

    ' will be replaced with + 'd', 's', 'z', and 'c' for each replicate of the block. + + <_c> is already defined: <_c=s,d,c,z> + <_t> is already defined: <_t=real,double precision,complex,double complex> + + short: + , a short form of the named, useful when no

    appears inside + a block. + + In general, '<..>' contains a comma separated list of arbitrary + expressions. If these expression must contain a comma|leftarrow|rightarrow, + then prepend the comma|leftarrow|rightarrow with a backslash. + + If an expression matches '\\' then it will be replaced + by -th expression. + + Note that all '<..>' forms in a block must have the same number of + comma-separated entries. + + Predefined named template rules: + + + + + + +""" +from __future__ import division, absolute_import, print_function + +__all__ = ['process_str', 'process_file'] + +import os +import sys +import re + +routine_start_re = re.compile(r'(\n|\A)(( (\$|\*))|)\s*(subroutine|function)\b', re.I) +routine_end_re = re.compile(r'\n\s*end\s*(subroutine|function)\b.*(\n|\Z)', re.I) +function_start_re = re.compile(r'\n (\$|\*)\s*function\b', re.I) + +def parse_structure(astr): + """ Return a list of tuples for each function or subroutine each + tuple is the start and end of a subroutine or function to be + expanded. + """ + + spanlist = [] + ind = 0 + while True: + m = routine_start_re.search(astr, ind) + if m is None: + break + start = m.start() + if function_start_re.match(astr, start, m.end()): + while True: + i = astr.rfind('\n', ind, start) + if i==-1: + break + start = i + if astr[i:i+7]!='\n $': + break + start += 1 + m = routine_end_re.search(astr, m.end()) + ind = end = m and m.end()-1 or len(astr) + spanlist.append((start, end)) + return spanlist + +template_re = re.compile(r"<\s*(\w[\w\d]*)\s*>") +named_re = re.compile(r"<\s*(\w[\w\d]*)\s*=\s*(.*?)\s*>") +list_re = re.compile(r"<\s*((.*?))\s*>") + +def find_repl_patterns(astr): + reps = named_re.findall(astr) + names = {} + for rep in reps: + name = rep[0].strip() or unique_key(names) + repl = rep[1].replace(r'\,', '@comma@') + thelist = conv(repl) + names[name] = thelist + return names + +def find_and_remove_repl_patterns(astr): + names = find_repl_patterns(astr) + astr = re.subn(named_re, '', astr)[0] + return astr, names + +item_re = re.compile(r"\A\\(?P\d+)\Z") +def conv(astr): + b = astr.split(',') + l = [x.strip() for x in b] + for i in range(len(l)): + m = item_re.match(l[i]) + if m: + j = int(m.group('index')) + l[i] = l[j] + return ','.join(l) + +def unique_key(adict): + """ Obtain a unique key given a dictionary.""" + allkeys = list(adict.keys()) + done = False + n = 1 + while not done: + newkey = '__l%s' % (n) + if newkey in allkeys: + n += 1 + else: + done = True + return newkey + + +template_name_re = re.compile(r'\A\s*(\w[\w\d]*)\s*\Z') +def expand_sub(substr, names): + substr = substr.replace(r'\>', '@rightarrow@') + substr = substr.replace(r'\<', '@leftarrow@') + lnames = find_repl_patterns(substr) + substr = named_re.sub(r"<\1>", substr) # get rid of definition templates + + def listrepl(mobj): + thelist = conv(mobj.group(1).replace(r'\,', '@comma@')) + if template_name_re.match(thelist): + return "<%s>" % (thelist) + name = None + for key in lnames.keys(): # see if list is already in dictionary + if lnames[key] == thelist: + name = key + if name is None: # this list is not in the dictionary yet + name = unique_key(lnames) + lnames[name] = thelist + return "<%s>" % name + + substr = list_re.sub(listrepl, substr) # convert all lists to named templates + # newnames are constructed as needed + + numsubs = None + base_rule = None + rules = {} + for r in template_re.findall(substr): + if r not in rules: + thelist = lnames.get(r, names.get(r, None)) + if thelist is None: + raise ValueError('No replicates found for <%s>' % (r)) + if r not in names and not thelist.startswith('_'): + names[r] = thelist + rule = [i.replace('@comma@', ',') for i in thelist.split(',')] + num = len(rule) + + if numsubs is None: + numsubs = num + rules[r] = rule + base_rule = r + elif num == numsubs: + rules[r] = rule + else: + print("Mismatch in number of replacements (base <%s=%s>)" + " for <%s=%s>. Ignoring." % + (base_rule, ','.join(rules[base_rule]), r, thelist)) + if not rules: + return substr + + def namerepl(mobj): + name = mobj.group(1) + return rules.get(name, (k+1)*[name])[k] + + newstr = '' + for k in range(numsubs): + newstr += template_re.sub(namerepl, substr) + '\n\n' + + newstr = newstr.replace('@rightarrow@', '>') + newstr = newstr.replace('@leftarrow@', '<') + return newstr + +def process_str(allstr): + newstr = allstr + writestr = '' + + struct = parse_structure(newstr) + + oldend = 0 + names = {} + names.update(_special_names) + for sub in struct: + cleanedstr, defs = find_and_remove_repl_patterns(newstr[oldend:sub[0]]) + writestr += cleanedstr + names.update(defs) + writestr += expand_sub(newstr[sub[0]:sub[1]], names) + oldend = sub[1] + writestr += newstr[oldend:] + + return writestr + +include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P[\w\d./\\]+[.]src)['\"]", re.I) + +def resolve_includes(source): + d = os.path.dirname(source) + fid = open(source) + lines = [] + for line in fid: + m = include_src_re.match(line) + if m: + fn = m.group('name') + if not os.path.isabs(fn): + fn = os.path.join(d, fn) + if os.path.isfile(fn): + print('Including file', fn) + lines.extend(resolve_includes(fn)) + else: + lines.append(line) + else: + lines.append(line) + fid.close() + return lines + +def process_file(source): + lines = resolve_includes(source) + return process_str(''.join(lines)) + +_special_names = find_repl_patterns(''' +<_c=s,d,c,z> +<_t=real,double precision,complex,double complex> + + + + + +''') + +def main(): + try: + file = sys.argv[1] + except IndexError: + fid = sys.stdin + outfile = sys.stdout + else: + fid = open(file, 'r') + (base, ext) = os.path.splitext(file) + newname = base + outfile = open(newname, 'w') + + allstr = fid.read() + writestr = process_str(allstr) + outfile.write(writestr) + +if __name__ == "__main__": + main() diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/from_template.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/from_template.pyc new file mode 100644 index 0000000..02699a6 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/from_template.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/info.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/info.py new file mode 100644 index 0000000..2f53106 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/info.py @@ -0,0 +1,6 @@ +""" +Enhanced distutils with Fortran compilers support and more. +""" +from __future__ import division, absolute_import, print_function + +postpone_import = True diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/info.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/info.pyc new file mode 100644 index 0000000..1b9a837 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/info.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/intelccompiler.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/intelccompiler.py new file mode 100644 index 0000000..3386775 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/intelccompiler.py @@ -0,0 +1,113 @@ +from __future__ import division, absolute_import, print_function + +import platform + +from distutils.unixccompiler import UnixCCompiler +from numpy.distutils.exec_command import find_executable +from numpy.distutils.ccompiler import simple_version_match +if platform.system() == 'Windows': + from numpy.distutils.msvc9compiler import MSVCCompiler + + +class IntelCCompiler(UnixCCompiler): + """A modified Intel compiler compatible with a GCC-built Python.""" + compiler_type = 'intel' + cc_exe = 'icc' + cc_args = 'fPIC' + + def __init__(self, verbose=0, dry_run=0, force=0): + UnixCCompiler.__init__(self, verbose, dry_run, force) + + v = self.get_version() + mpopt = 'openmp' if v and v < '15' else 'qopenmp' + self.cc_exe = ('icc -fPIC -fp-model strict -O3 ' + '-fomit-frame-pointer -{}').format(mpopt) + compiler = self.cc_exe + + if platform.system() == 'Darwin': + shared_flag = '-Wl,-undefined,dynamic_lookup' + else: + shared_flag = '-shared' + self.set_executables(compiler=compiler, + compiler_so=compiler, + compiler_cxx=compiler, + archiver='xiar' + ' cru', + linker_exe=compiler + ' -shared-intel', + linker_so=compiler + ' ' + shared_flag + + ' -shared-intel') + + +class IntelItaniumCCompiler(IntelCCompiler): + compiler_type = 'intele' + + # On Itanium, the Intel Compiler used to be called ecc, let's search for + # it (now it's also icc, so ecc is last in the search). + for cc_exe in map(find_executable, ['icc', 'ecc']): + if cc_exe: + break + + +class IntelEM64TCCompiler(UnixCCompiler): + """ + A modified Intel x86_64 compiler compatible with a 64bit GCC-built Python. + """ + compiler_type = 'intelem' + cc_exe = 'icc -m64' + cc_args = '-fPIC' + + def __init__(self, verbose=0, dry_run=0, force=0): + UnixCCompiler.__init__(self, verbose, dry_run, force) + + v = self.get_version() + mpopt = 'openmp' if v and v < '15' else 'qopenmp' + self.cc_exe = ('icc -m64 -fPIC -fp-model strict -O3 ' + '-fomit-frame-pointer -{}').format(mpopt) + compiler = self.cc_exe + + if platform.system() == 'Darwin': + shared_flag = '-Wl,-undefined,dynamic_lookup' + else: + shared_flag = '-shared' + self.set_executables(compiler=compiler, + compiler_so=compiler, + compiler_cxx=compiler, + archiver='xiar' + ' cru', + linker_exe=compiler + ' -shared-intel', + linker_so=compiler + ' ' + shared_flag + + ' -shared-intel') + + +if platform.system() == 'Windows': + class IntelCCompilerW(MSVCCompiler): + """ + A modified Intel compiler compatible with an MSVC-built Python. + """ + compiler_type = 'intelw' + compiler_cxx = 'icl' + + def __init__(self, verbose=0, dry_run=0, force=0): + MSVCCompiler.__init__(self, verbose, dry_run, force) + version_match = simple_version_match(start=r'Intel\(R\).*?32,') + self.__version = version_match + + def initialize(self, plat_name=None): + MSVCCompiler.initialize(self, plat_name) + self.cc = self.find_exe('icl.exe') + self.lib = self.find_exe('xilib') + self.linker = self.find_exe('xilink') + self.compile_options = ['/nologo', '/O3', '/MD', '/W3', + '/Qstd=c99'] + self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', + '/Qstd=c99', '/Z7', '/D_DEBUG'] + + class IntelEM64TCCompilerW(IntelCCompilerW): + """ + A modified Intel x86_64 compiler compatible with + a 64bit MSVC-built Python. + """ + compiler_type = 'intelemw' + + def __init__(self, verbose=0, dry_run=0, force=0): + MSVCCompiler.__init__(self, verbose, dry_run, force) + version_match = simple_version_match(start=r'Intel\(R\).*?64,') + self.__version = version_match diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/intelccompiler.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/intelccompiler.pyc new file mode 100644 index 0000000..18dccb7 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/intelccompiler.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/lib2def.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/lib2def.py new file mode 100644 index 0000000..2d013a1 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/lib2def.py @@ -0,0 +1,115 @@ +from __future__ import division, absolute_import, print_function + +import re +import sys +import subprocess + +__doc__ = """This module generates a DEF file from the symbols in +an MSVC-compiled DLL import library. It correctly discriminates between +data and functions. The data is collected from the output of the program +nm(1). + +Usage: + python lib2def.py [libname.lib] [output.def] +or + python lib2def.py [libname.lib] > output.def + +libname.lib defaults to python.lib and output.def defaults to stdout + +Author: Robert Kern +Last Update: April 30, 1999 +""" + +__version__ = '0.1a' + +py_ver = "%d%d" % tuple(sys.version_info[:2]) + +DEFAULT_NM = 'nm -Cs' + +DEF_HEADER = """LIBRARY python%s.dll +;CODE PRELOAD MOVEABLE DISCARDABLE +;DATA PRELOAD SINGLE + +EXPORTS +""" % py_ver +# the header of the DEF file + +FUNC_RE = re.compile(r"^(.*) in python%s\.dll" % py_ver, re.MULTILINE) +DATA_RE = re.compile(r"^_imp__(.*) in python%s\.dll" % py_ver, re.MULTILINE) + +def parse_cmd(): + """Parses the command-line arguments. + +libfile, deffile = parse_cmd()""" + if len(sys.argv) == 3: + if sys.argv[1][-4:] == '.lib' and sys.argv[2][-4:] == '.def': + libfile, deffile = sys.argv[1:] + elif sys.argv[1][-4:] == '.def' and sys.argv[2][-4:] == '.lib': + deffile, libfile = sys.argv[1:] + else: + print("I'm assuming that your first argument is the library") + print("and the second is the DEF file.") + elif len(sys.argv) == 2: + if sys.argv[1][-4:] == '.def': + deffile = sys.argv[1] + libfile = 'python%s.lib' % py_ver + elif sys.argv[1][-4:] == '.lib': + deffile = None + libfile = sys.argv[1] + else: + libfile = 'python%s.lib' % py_ver + deffile = None + return libfile, deffile + +def getnm(nm_cmd = ['nm', '-Cs', 'python%s.lib' % py_ver]): + """Returns the output of nm_cmd via a pipe. + +nm_output = getnam(nm_cmd = 'nm -Cs py_lib')""" + f = subprocess.Popen(nm_cmd, shell=True, stdout=subprocess.PIPE, universal_newlines=True) + nm_output = f.stdout.read() + f.stdout.close() + return nm_output + +def parse_nm(nm_output): + """Returns a tuple of lists: dlist for the list of data +symbols and flist for the list of function symbols. + +dlist, flist = parse_nm(nm_output)""" + data = DATA_RE.findall(nm_output) + func = FUNC_RE.findall(nm_output) + + flist = [] + for sym in data: + if sym in func and (sym[:2] == 'Py' or sym[:3] == '_Py' or sym[:4] == 'init'): + flist.append(sym) + + dlist = [] + for sym in data: + if sym not in flist and (sym[:2] == 'Py' or sym[:3] == '_Py'): + dlist.append(sym) + + dlist.sort() + flist.sort() + return dlist, flist + +def output_def(dlist, flist, header, file = sys.stdout): + """Outputs the final DEF file to a file defaulting to stdout. + +output_def(dlist, flist, header, file = sys.stdout)""" + for data_sym in dlist: + header = header + '\t%s DATA\n' % data_sym + header = header + '\n' # blank line + for func_sym in flist: + header = header + '\t%s\n' % func_sym + file.write(header) + +if __name__ == '__main__': + libfile, deffile = parse_cmd() + if deffile is None: + deffile = sys.stdout + else: + deffile = open(deffile, 'w') + nm_cmd = [str(DEFAULT_NM), str(libfile)] + nm_output = getnm(nm_cmd) + dlist, flist = parse_nm(nm_output) + output_def(dlist, flist, DEF_HEADER, deffile) diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/lib2def.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/lib2def.pyc new file mode 100644 index 0000000..9084659 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/lib2def.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/line_endings.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/line_endings.py new file mode 100644 index 0000000..5ecb104 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/line_endings.py @@ -0,0 +1,76 @@ +""" Functions for converting from DOS to UNIX line endings + +""" +from __future__ import division, absolute_import, print_function + +import sys, re, os + +def dos2unix(file): + "Replace CRLF with LF in argument files. Print names of changed files." + if os.path.isdir(file): + print(file, "Directory!") + return + + data = open(file, "rb").read() + if '\0' in data: + print(file, "Binary!") + return + + newdata = re.sub("\r\n", "\n", data) + if newdata != data: + print('dos2unix:', file) + f = open(file, "wb") + f.write(newdata) + f.close() + return file + else: + print(file, 'ok') + +def dos2unix_one_dir(modified_files, dir_name, file_names): + for file in file_names: + full_path = os.path.join(dir_name, file) + file = dos2unix(full_path) + if file is not None: + modified_files.append(file) + +def dos2unix_dir(dir_name): + modified_files = [] + os.path.walk(dir_name, dos2unix_one_dir, modified_files) + return modified_files +#---------------------------------- + +def unix2dos(file): + "Replace LF with CRLF in argument files. Print names of changed files." + if os.path.isdir(file): + print(file, "Directory!") + return + + data = open(file, "rb").read() + if '\0' in data: + print(file, "Binary!") + return + newdata = re.sub("\r\n", "\n", data) + newdata = re.sub("\n", "\r\n", newdata) + if newdata != data: + print('unix2dos:', file) + f = open(file, "wb") + f.write(newdata) + f.close() + return file + else: + print(file, 'ok') + +def unix2dos_one_dir(modified_files, dir_name, file_names): + for file in file_names: + full_path = os.path.join(dir_name, file) + unix2dos(full_path) + if file is not None: + modified_files.append(file) + +def unix2dos_dir(dir_name): + modified_files = [] + os.path.walk(dir_name, unix2dos_one_dir, modified_files) + return modified_files + +if __name__ == "__main__": + dos2unix_dir(sys.argv[1]) diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/line_endings.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/line_endings.pyc new file mode 100644 index 0000000..fca7c28 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/line_endings.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/log.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/log.py new file mode 100644 index 0000000..37f9fe5 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/log.py @@ -0,0 +1,93 @@ +# Colored log, requires Python 2.3 or up. +from __future__ import division, absolute_import, print_function + +import sys +from distutils.log import * +from distutils.log import Log as old_Log +from distutils.log import _global_log + +if sys.version_info[0] < 3: + from .misc_util import (red_text, default_text, cyan_text, green_text, + is_sequence, is_string) +else: + from numpy.distutils.misc_util import (red_text, default_text, cyan_text, + green_text, is_sequence, is_string) + + +def _fix_args(args,flag=1): + if is_string(args): + return args.replace('%', '%%') + if flag and is_sequence(args): + return tuple([_fix_args(a, flag=0) for a in args]) + return args + + +class Log(old_Log): + def _log(self, level, msg, args): + if level >= self.threshold: + if args: + msg = msg % _fix_args(args) + if 0: + if msg.startswith('copying ') and msg.find(' -> ') != -1: + return + if msg.startswith('byte-compiling '): + return + print(_global_color_map[level](msg)) + sys.stdout.flush() + + def good(self, msg, *args): + """ + If we log WARN messages, log this message as a 'nice' anti-warn + message. + + """ + if WARN >= self.threshold: + if args: + print(green_text(msg % _fix_args(args))) + else: + print(green_text(msg)) + sys.stdout.flush() + + +_global_log.__class__ = Log + +good = _global_log.good + +def set_threshold(level, force=False): + prev_level = _global_log.threshold + if prev_level > DEBUG or force: + # If we're running at DEBUG, don't change the threshold, as there's + # likely a good reason why we're running at this level. + _global_log.threshold = level + if level <= DEBUG: + info('set_threshold: setting threshold to DEBUG level,' + ' it can be changed only with force argument') + else: + info('set_threshold: not changing threshold from DEBUG level' + ' %s to %s' % (prev_level, level)) + return prev_level + + +def set_verbosity(v, force=False): + prev_level = _global_log.threshold + if v < 0: + set_threshold(ERROR, force) + elif v == 0: + set_threshold(WARN, force) + elif v == 1: + set_threshold(INFO, force) + elif v >= 2: + set_threshold(DEBUG, force) + return {FATAL:-2,ERROR:-1,WARN:0,INFO:1,DEBUG:2}.get(prev_level, 1) + + +_global_color_map = { + DEBUG:cyan_text, + INFO:default_text, + WARN:red_text, + ERROR:red_text, + FATAL:red_text +} + +# don't use INFO,.. flags in set_verbosity, these flags are for set_threshold. +set_verbosity(0, force=True) diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/log.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/log.pyc new file mode 100644 index 0000000..1e555be Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/log.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/mingw/gfortran_vs2003_hack.c b/project/venv/lib/python2.7/site-packages/numpy/distutils/mingw/gfortran_vs2003_hack.c new file mode 100644 index 0000000..485a675 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/mingw/gfortran_vs2003_hack.c @@ -0,0 +1,6 @@ +int _get_output_format(void) +{ + return 0; +} + +int _imp____lc_codepage = 0; diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/mingw32ccompiler.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/mingw32ccompiler.py new file mode 100644 index 0000000..e6bbe19 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/mingw32ccompiler.py @@ -0,0 +1,656 @@ +""" +Support code for building Python extensions on Windows. + + # NT stuff + # 1. Make sure libpython.a exists for gcc. If not, build it. + # 2. Force windows to use gcc (we're struggling with MSVC and g77 support) + # 3. Force windows to use g77 + +""" +from __future__ import division, absolute_import, print_function + +import os +import sys +import subprocess +import re + +# Overwrite certain distutils.ccompiler functions: +import numpy.distutils.ccompiler + +if sys.version_info[0] < 3: + from . import log +else: + from numpy.distutils import log +# NT stuff +# 1. Make sure libpython.a exists for gcc. If not, build it. +# 2. Force windows to use gcc (we're struggling with MSVC and g77 support) +# --> this is done in numpy/distutils/ccompiler.py +# 3. Force windows to use g77 + +import distutils.cygwinccompiler +from distutils.version import StrictVersion +from numpy.distutils.ccompiler import gen_preprocess_options, gen_lib_options +from distutils.unixccompiler import UnixCCompiler +from distutils.msvccompiler import get_build_version as get_build_msvc_version +from distutils.errors import (DistutilsExecError, CompileError, + UnknownFileError) +from numpy.distutils.misc_util import (msvc_runtime_library, + msvc_runtime_version, + msvc_runtime_major, + get_build_architecture) + +def get_msvcr_replacement(): + """Replacement for outdated version of get_msvcr from cygwinccompiler""" + msvcr = msvc_runtime_library() + return [] if msvcr is None else [msvcr] + +# monkey-patch cygwinccompiler with our updated version from misc_util +# to avoid getting an exception raised on Python 3.5 +distutils.cygwinccompiler.get_msvcr = get_msvcr_replacement + +# Useful to generate table of symbols from a dll +_START = re.compile(r'\[Ordinal/Name Pointer\] Table') +_TABLE = re.compile(r'^\s+\[([\s*[0-9]*)\] ([a-zA-Z0-9_]*)') + +# the same as cygwin plus some additional parameters +class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler): + """ A modified MingW32 compiler compatible with an MSVC built Python. + + """ + + compiler_type = 'mingw32' + + def __init__ (self, + verbose=0, + dry_run=0, + force=0): + + distutils.cygwinccompiler.CygwinCCompiler.__init__ (self, verbose, + dry_run, force) + + # we need to support 3.2 which doesn't match the standard + # get_versions methods regex + if self.gcc_version is None: + p = subprocess.Popen(['gcc', '-dumpversion'], shell=True, + stdout=subprocess.PIPE) + out_string = p.stdout.read() + p.stdout.close() + result = re.search(r'(\d+\.\d+)', out_string) + if result: + self.gcc_version = StrictVersion(result.group(1)) + + # A real mingw32 doesn't need to specify a different entry point, + # but cygwin 2.91.57 in no-cygwin-mode needs it. + if self.gcc_version <= "2.91.57": + entry_point = '--entry _DllMain@12' + else: + entry_point = '' + + if self.linker_dll == 'dllwrap': + # Commented out '--driver-name g++' part that fixes weird + # g++.exe: g++: No such file or directory + # error (mingw 1.0 in Enthon24 tree, gcc-3.4.5). + # If the --driver-name part is required for some environment + # then make the inclusion of this part specific to that + # environment. + self.linker = 'dllwrap' # --driver-name g++' + elif self.linker_dll == 'gcc': + self.linker = 'g++' + + # **changes: eric jones 4/11/01 + # 1. Check for import library on Windows. Build if it doesn't exist. + + build_import_library() + + # Check for custom msvc runtime library on Windows. Build if it doesn't exist. + msvcr_success = build_msvcr_library() + msvcr_dbg_success = build_msvcr_library(debug=True) + if msvcr_success or msvcr_dbg_success: + # add preprocessor statement for using customized msvcr lib + self.define_macro('NPY_MINGW_USE_CUSTOM_MSVCR') + + # Define the MSVC version as hint for MinGW + msvcr_version = msvc_runtime_version() + if msvcr_version: + self.define_macro('__MSVCRT_VERSION__', '0x%04i' % msvcr_version) + + # MS_WIN64 should be defined when building for amd64 on windows, + # but python headers define it only for MS compilers, which has all + # kind of bad consequences, like using Py_ModuleInit4 instead of + # Py_ModuleInit4_64, etc... So we add it here + if get_build_architecture() == 'AMD64': + if self.gcc_version < "4.0": + self.set_executables( + compiler='gcc -g -DDEBUG -DMS_WIN64 -mno-cygwin -O0 -Wall', + compiler_so='gcc -g -DDEBUG -DMS_WIN64 -mno-cygwin -O0' + ' -Wall -Wstrict-prototypes', + linker_exe='gcc -g -mno-cygwin', + linker_so='gcc -g -mno-cygwin -shared') + else: + # gcc-4 series releases do not support -mno-cygwin option + self.set_executables( + compiler='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall', + compiler_so='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall -Wstrict-prototypes', + linker_exe='gcc -g', + linker_so='gcc -g -shared') + else: + if self.gcc_version <= "3.0.0": + self.set_executables( + compiler='gcc -mno-cygwin -O2 -w', + compiler_so='gcc -mno-cygwin -mdll -O2 -w' + ' -Wstrict-prototypes', + linker_exe='g++ -mno-cygwin', + linker_so='%s -mno-cygwin -mdll -static %s' % + (self.linker, entry_point)) + elif self.gcc_version < "4.0": + self.set_executables( + compiler='gcc -mno-cygwin -O2 -Wall', + compiler_so='gcc -mno-cygwin -O2 -Wall' + ' -Wstrict-prototypes', + linker_exe='g++ -mno-cygwin', + linker_so='g++ -mno-cygwin -shared') + else: + # gcc-4 series releases do not support -mno-cygwin option + self.set_executables(compiler='gcc -O2 -Wall', + compiler_so='gcc -O2 -Wall -Wstrict-prototypes', + linker_exe='g++ ', + linker_so='g++ -shared') + # added for python2.3 support + # we can't pass it through set_executables because pre 2.2 would fail + self.compiler_cxx = ['g++'] + + # Maybe we should also append -mthreads, but then the finished dlls + # need another dll (mingwm10.dll see Mingw32 docs) (-mthreads: Support + # thread-safe exception handling on `Mingw32') + + # no additional libraries needed + #self.dll_libraries=[] + return + + # __init__ () + + def link(self, + target_desc, + objects, + output_filename, + output_dir, + libraries, + library_dirs, + runtime_library_dirs, + export_symbols = None, + debug=0, + extra_preargs=None, + extra_postargs=None, + build_temp=None, + target_lang=None): + # Include the appropriate MSVC runtime library if Python was built + # with MSVC >= 7.0 (MinGW standard is msvcrt) + runtime_library = msvc_runtime_library() + if runtime_library: + if not libraries: + libraries = [] + libraries.append(runtime_library) + args = (self, + target_desc, + objects, + output_filename, + output_dir, + libraries, + library_dirs, + runtime_library_dirs, + None, #export_symbols, we do this in our def-file + debug, + extra_preargs, + extra_postargs, + build_temp, + target_lang) + if self.gcc_version < "3.0.0": + func = distutils.cygwinccompiler.CygwinCCompiler.link + else: + func = UnixCCompiler.link + func(*args[:func.__code__.co_argcount]) + return + + def object_filenames (self, + source_filenames, + strip_dir=0, + output_dir=''): + if output_dir is None: output_dir = '' + obj_names = [] + for src_name in source_filenames: + # use normcase to make sure '.rc' is really '.rc' and not '.RC' + (base, ext) = os.path.splitext (os.path.normcase(src_name)) + + # added these lines to strip off windows drive letters + # without it, .o files are placed next to .c files + # instead of the build directory + drv, base = os.path.splitdrive(base) + if drv: + base = base[1:] + + if ext not in (self.src_extensions + ['.rc', '.res']): + raise UnknownFileError( + "unknown file type '%s' (from '%s')" % \ + (ext, src_name)) + if strip_dir: + base = os.path.basename (base) + if ext == '.res' or ext == '.rc': + # these need to be compiled to object files + obj_names.append (os.path.join (output_dir, + base + ext + self.obj_extension)) + else: + obj_names.append (os.path.join (output_dir, + base + self.obj_extension)) + return obj_names + + # object_filenames () + + +def find_python_dll(): + # We can't do much here: + # - find it in the virtualenv (sys.prefix) + # - find it in python main dir (sys.base_prefix, if in a virtualenv) + # - sys.real_prefix is main dir for virtualenvs in Python 2.7 + # - in system32, + # - ortherwise (Sxs), I don't know how to get it. + stems = [sys.prefix] + if hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix: + stems.append(sys.base_prefix) + elif hasattr(sys, 'real_prefix') and sys.real_prefix != sys.prefix: + stems.append(sys.real_prefix) + + sub_dirs = ['', 'lib', 'bin'] + # generate possible combinations of directory trees and sub-directories + lib_dirs = [] + for stem in stems: + for folder in sub_dirs: + lib_dirs.append(os.path.join(stem, folder)) + + # add system directory as well + if 'SYSTEMROOT' in os.environ: + lib_dirs.append(os.path.join(os.environ['SYSTEMROOT'], 'System32')) + + # search in the file system for possible candidates + major_version, minor_version = tuple(sys.version_info[:2]) + patterns = ['python%d%d.dll'] + + for pat in patterns: + dllname = pat % (major_version, minor_version) + print("Looking for %s" % dllname) + for folder in lib_dirs: + dll = os.path.join(folder, dllname) + if os.path.exists(dll): + return dll + + raise ValueError("%s not found in %s" % (dllname, lib_dirs)) + +def dump_table(dll): + st = subprocess.Popen(["objdump.exe", "-p", dll], stdout=subprocess.PIPE) + return st.stdout.readlines() + +def generate_def(dll, dfile): + """Given a dll file location, get all its exported symbols and dump them + into the given def file. + + The .def file will be overwritten""" + dump = dump_table(dll) + for i in range(len(dump)): + if _START.match(dump[i].decode()): + break + else: + raise ValueError("Symbol table not found") + + syms = [] + for j in range(i+1, len(dump)): + m = _TABLE.match(dump[j].decode()) + if m: + syms.append((int(m.group(1).strip()), m.group(2))) + else: + break + + if len(syms) == 0: + log.warn('No symbols found in %s' % dll) + + d = open(dfile, 'w') + d.write('LIBRARY %s\n' % os.path.basename(dll)) + d.write(';CODE PRELOAD MOVEABLE DISCARDABLE\n') + d.write(';DATA PRELOAD SINGLE\n') + d.write('\nEXPORTS\n') + for s in syms: + #d.write('@%d %s\n' % (s[0], s[1])) + d.write('%s\n' % s[1]) + d.close() + +def find_dll(dll_name): + + arch = {'AMD64' : 'amd64', + 'Intel' : 'x86'}[get_build_architecture()] + + def _find_dll_in_winsxs(dll_name): + # Walk through the WinSxS directory to find the dll. + winsxs_path = os.path.join(os.environ.get('WINDIR', r'C:\WINDOWS'), + 'winsxs') + if not os.path.exists(winsxs_path): + return None + for root, dirs, files in os.walk(winsxs_path): + if dll_name in files and arch in root: + return os.path.join(root, dll_name) + return None + + def _find_dll_in_path(dll_name): + # First, look in the Python directory, then scan PATH for + # the given dll name. + for path in [sys.prefix] + os.environ['PATH'].split(';'): + filepath = os.path.join(path, dll_name) + if os.path.exists(filepath): + return os.path.abspath(filepath) + + return _find_dll_in_winsxs(dll_name) or _find_dll_in_path(dll_name) + +def build_msvcr_library(debug=False): + if os.name != 'nt': + return False + + # If the version number is None, then we couldn't find the MSVC runtime at + # all, because we are running on a Python distribution which is customed + # compiled; trust that the compiler is the same as the one available to us + # now, and that it is capable of linking with the correct runtime without + # any extra options. + msvcr_ver = msvc_runtime_major() + if msvcr_ver is None: + log.debug('Skip building import library: ' + 'Runtime is not compiled with MSVC') + return False + + # Skip using a custom library for versions < MSVC 8.0 + if msvcr_ver < 80: + log.debug('Skip building msvcr library:' + ' custom functionality not present') + return False + + msvcr_name = msvc_runtime_library() + if debug: + msvcr_name += 'd' + + # Skip if custom library already exists + out_name = "lib%s.a" % msvcr_name + out_file = os.path.join(sys.prefix, 'libs', out_name) + if os.path.isfile(out_file): + log.debug('Skip building msvcr library: "%s" exists' % + (out_file,)) + return True + + # Find the msvcr dll + msvcr_dll_name = msvcr_name + '.dll' + dll_file = find_dll(msvcr_dll_name) + if not dll_file: + log.warn('Cannot build msvcr library: "%s" not found' % + msvcr_dll_name) + return False + + def_name = "lib%s.def" % msvcr_name + def_file = os.path.join(sys.prefix, 'libs', def_name) + + log.info('Building msvcr library: "%s" (from %s)' \ + % (out_file, dll_file)) + + # Generate a symbol definition file from the msvcr dll + generate_def(dll_file, def_file) + + # Create a custom mingw library for the given symbol definitions + cmd = ['dlltool', '-d', def_file, '-l', out_file] + retcode = subprocess.call(cmd) + + # Clean up symbol definitions + os.remove(def_file) + + return (not retcode) + +def build_import_library(): + if os.name != 'nt': + return + + arch = get_build_architecture() + if arch == 'AMD64': + return _build_import_library_amd64() + elif arch == 'Intel': + return _build_import_library_x86() + else: + raise ValueError("Unhandled arch %s" % arch) + +def _check_for_import_lib(): + """Check if an import library for the Python runtime already exists.""" + major_version, minor_version = tuple(sys.version_info[:2]) + + # patterns for the file name of the library itself + patterns = ['libpython%d%d.a', + 'libpython%d%d.dll.a', + 'libpython%d.%d.dll.a'] + + # directory trees that may contain the library + stems = [sys.prefix] + if hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix: + stems.append(sys.base_prefix) + elif hasattr(sys, 'real_prefix') and sys.real_prefix != sys.prefix: + stems.append(sys.real_prefix) + + # possible subdirectories within those trees where it is placed + sub_dirs = ['libs', 'lib'] + + # generate a list of candidate locations + candidates = [] + for pat in patterns: + filename = pat % (major_version, minor_version) + for stem_dir in stems: + for folder in sub_dirs: + candidates.append(os.path.join(stem_dir, folder, filename)) + + # test the filesystem to see if we can find any of these + for fullname in candidates: + if os.path.isfile(fullname): + # already exists, in location given + return (True, fullname) + + # needs to be built, preferred location given first + return (False, candidates[0]) + +def _build_import_library_amd64(): + out_exists, out_file = _check_for_import_lib() + if out_exists: + log.debug('Skip building import library: "%s" exists', out_file) + return + + # get the runtime dll for which we are building import library + dll_file = find_python_dll() + log.info('Building import library (arch=AMD64): "%s" (from %s)' % + (out_file, dll_file)) + + # generate symbol list from this library + def_name = "python%d%d.def" % tuple(sys.version_info[:2]) + def_file = os.path.join(sys.prefix, 'libs', def_name) + generate_def(dll_file, def_file) + + # generate import library from this symbol list + cmd = ['dlltool', '-d', def_file, '-l', out_file] + subprocess.Popen(cmd) + +def _build_import_library_x86(): + """ Build the import libraries for Mingw32-gcc on Windows + """ + out_exists, out_file = _check_for_import_lib() + if out_exists: + log.debug('Skip building import library: "%s" exists', out_file) + return + + lib_name = "python%d%d.lib" % tuple(sys.version_info[:2]) + lib_file = os.path.join(sys.prefix, 'libs', lib_name) + if not os.path.isfile(lib_file): + # didn't find library file in virtualenv, try base distribution, too, + # and use that instead if found there. for Python 2.7 venvs, the base + # directory is in attribute real_prefix instead of base_prefix. + if hasattr(sys, 'base_prefix'): + base_lib = os.path.join(sys.base_prefix, 'libs', lib_name) + elif hasattr(sys, 'real_prefix'): + base_lib = os.path.join(sys.real_prefix, 'libs', lib_name) + else: + base_lib = '' # os.path.isfile('') == False + + if os.path.isfile(base_lib): + lib_file = base_lib + else: + log.warn('Cannot build import library: "%s" not found', lib_file) + return + log.info('Building import library (ARCH=x86): "%s"', out_file) + + from numpy.distutils import lib2def + + def_name = "python%d%d.def" % tuple(sys.version_info[:2]) + def_file = os.path.join(sys.prefix, 'libs', def_name) + nm_cmd = '%s %s' % (lib2def.DEFAULT_NM, lib_file) + nm_output = lib2def.getnm(nm_cmd) + dlist, flist = lib2def.parse_nm(nm_output) + lib2def.output_def(dlist, flist, lib2def.DEF_HEADER, open(def_file, 'w')) + + dll_name = find_python_dll () + args = (dll_name, def_file, out_file) + cmd = 'dlltool --dllname "%s" --def "%s" --output-lib "%s"' % args + status = os.system(cmd) + # for now, fail silently + if status: + log.warn('Failed to build import library for gcc. Linking will fail.') + return + +#===================================== +# Dealing with Visual Studio MANIFESTS +#===================================== + +# Functions to deal with visual studio manifests. Manifest are a mechanism to +# enforce strong DLL versioning on windows, and has nothing to do with +# distutils MANIFEST. manifests are XML files with version info, and used by +# the OS loader; they are necessary when linking against a DLL not in the +# system path; in particular, official python 2.6 binary is built against the +# MS runtime 9 (the one from VS 2008), which is not available on most windows +# systems; python 2.6 installer does install it in the Win SxS (Side by side) +# directory, but this requires the manifest for this to work. This is a big +# mess, thanks MS for a wonderful system. + +# XXX: ideally, we should use exactly the same version as used by python. I +# submitted a patch to get this version, but it was only included for python +# 2.6.1 and above. So for versions below, we use a "best guess". +_MSVCRVER_TO_FULLVER = {} +if sys.platform == 'win32': + try: + import msvcrt + # I took one version in my SxS directory: no idea if it is the good + # one, and we can't retrieve it from python + _MSVCRVER_TO_FULLVER['80'] = "8.0.50727.42" + _MSVCRVER_TO_FULLVER['90'] = "9.0.21022.8" + # Value from msvcrt.CRT_ASSEMBLY_VERSION under Python 3.3.0 + # on Windows XP: + _MSVCRVER_TO_FULLVER['100'] = "10.0.30319.460" + if hasattr(msvcrt, "CRT_ASSEMBLY_VERSION"): + major, minor, rest = msvcrt.CRT_ASSEMBLY_VERSION.split(".", 2) + _MSVCRVER_TO_FULLVER[major + minor] = msvcrt.CRT_ASSEMBLY_VERSION + del major, minor, rest + except ImportError: + # If we are here, means python was not built with MSVC. Not sure what + # to do in that case: manifest building will fail, but it should not be + # used in that case anyway + log.warn('Cannot import msvcrt: using manifest will not be possible') + +def msvc_manifest_xml(maj, min): + """Given a major and minor version of the MSVCR, returns the + corresponding XML file.""" + try: + fullver = _MSVCRVER_TO_FULLVER[str(maj * 10 + min)] + except KeyError: + raise ValueError("Version %d,%d of MSVCRT not supported yet" % + (maj, min)) + # Don't be fooled, it looks like an XML, but it is not. In particular, it + # should not have any space before starting, and its size should be + # divisible by 4, most likely for alignment constraints when the xml is + # embedded in the binary... + # This template was copied directly from the python 2.6 binary (using + # strings.exe from mingw on python.exe). + template = """\ + + + + + + + + + + + + + +""" + + return template % {'fullver': fullver, 'maj': maj, 'min': min} + +def manifest_rc(name, type='dll'): + """Return the rc file used to generate the res file which will be embedded + as manifest for given manifest file name, of given type ('dll' or + 'exe'). + + Parameters + ---------- + name : str + name of the manifest file to embed + type : str {'dll', 'exe'} + type of the binary which will embed the manifest + + """ + if type == 'dll': + rctype = 2 + elif type == 'exe': + rctype = 1 + else: + raise ValueError("Type %s not supported" % type) + + return """\ +#include "winuser.h" +%d RT_MANIFEST %s""" % (rctype, name) + +def check_embedded_msvcr_match_linked(msver): + """msver is the ms runtime version used for the MANIFEST.""" + # check msvcr major version are the same for linking and + # embedding + maj = msvc_runtime_major() + if maj: + if not maj == int(msver): + raise ValueError( + "Discrepancy between linked msvcr " \ + "(%d) and the one about to be embedded " \ + "(%d)" % (int(msver), maj)) + +def configtest_name(config): + base = os.path.basename(config._gen_temp_sourcefile("yo", [], "c")) + return os.path.splitext(base)[0] + +def manifest_name(config): + # Get configest name (including suffix) + root = configtest_name(config) + exext = config.compiler.exe_extension + return root + exext + ".manifest" + +def rc_name(config): + # Get configtest name (including suffix) + root = configtest_name(config) + return root + ".rc" + +def generate_manifest(config): + msver = get_build_msvc_version() + if msver is not None: + if msver >= 8: + check_embedded_msvcr_match_linked(msver) + ma = int(msver) + mi = int((msver - ma) * 10) + # Write the manifest file + manxml = msvc_manifest_xml(ma, mi) + man = open(manifest_name(config), "w") + config.temp_files.append(manifest_name(config)) + man.write(manxml) + man.close() diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/mingw32ccompiler.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/mingw32ccompiler.pyc new file mode 100644 index 0000000..b1ae622 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/mingw32ccompiler.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/misc_util.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/misc_util.py new file mode 100644 index 0000000..67a5f72 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/misc_util.py @@ -0,0 +1,2341 @@ +from __future__ import division, absolute_import, print_function + +import os +import re +import sys +import copy +import glob +import atexit +import tempfile +import subprocess +import shutil +import multiprocessing + +import distutils +from distutils.errors import DistutilsError +try: + from threading import local as tlocal +except ImportError: + from dummy_threading import local as tlocal + +# stores temporary directory of each thread to only create one per thread +_tdata = tlocal() + +# store all created temporary directories so they can be deleted on exit +_tmpdirs = [] +def clean_up_temporary_directory(): + if _tmpdirs is not None: + for d in _tmpdirs: + try: + shutil.rmtree(d) + except OSError: + pass + +atexit.register(clean_up_temporary_directory) + +from numpy.distutils.compat import get_exception +from numpy.compat import basestring +from numpy.compat import npy_load_module + +__all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict', + 'dict_append', 'appendpath', 'generate_config_py', + 'get_cmd', 'allpath', 'get_mathlibs', + 'terminal_has_colors', 'red_text', 'green_text', 'yellow_text', + 'blue_text', 'cyan_text', 'cyg2win32', 'mingw32', 'all_strings', + 'has_f_sources', 'has_cxx_sources', 'filter_sources', + 'get_dependencies', 'is_local_src_dir', 'get_ext_source_files', + 'get_script_files', 'get_lib_source_files', 'get_data_files', + 'dot_join', 'get_frame', 'minrelpath', 'njoin', + 'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language', + 'quote_args', 'get_build_architecture', 'get_info', 'get_pkg_info', + 'get_num_build_jobs'] + +class InstallableLib(object): + """ + Container to hold information on an installable library. + + Parameters + ---------- + name : str + Name of the installed library. + build_info : dict + Dictionary holding build information. + target_dir : str + Absolute path specifying where to install the library. + + See Also + -------- + Configuration.add_installed_library + + Notes + ----- + The three parameters are stored as attributes with the same names. + + """ + def __init__(self, name, build_info, target_dir): + self.name = name + self.build_info = build_info + self.target_dir = target_dir + + +def get_num_build_jobs(): + """ + Get number of parallel build jobs set by the --parallel command line + argument of setup.py + If the command did not receive a setting the environment variable + NPY_NUM_BUILD_JOBS is checked. If that is unset, return the number of + processors on the system, with a maximum of 8 (to prevent + overloading the system if there a lot of CPUs). + + Returns + ------- + out : int + number of parallel jobs that can be run + + """ + from numpy.distutils.core import get_distribution + try: + cpu_count = len(os.sched_getaffinity(0)) + except AttributeError: + cpu_count = multiprocessing.cpu_count() + cpu_count = min(cpu_count, 8) + envjobs = int(os.environ.get("NPY_NUM_BUILD_JOBS", cpu_count)) + dist = get_distribution() + # may be None during configuration + if dist is None: + return envjobs + + # any of these three may have the job set, take the largest + cmdattr = (getattr(dist.get_command_obj('build'), 'parallel', None), + getattr(dist.get_command_obj('build_ext'), 'parallel', None), + getattr(dist.get_command_obj('build_clib'), 'parallel', None)) + if all(x is None for x in cmdattr): + return envjobs + else: + return max(x for x in cmdattr if x is not None) + +def quote_args(args): + # don't used _nt_quote_args as it does not check if + # args items already have quotes or not. + args = list(args) + for i in range(len(args)): + a = args[i] + if ' ' in a and a[0] not in '"\'': + args[i] = '"%s"' % (a) + return args + +def allpath(name): + "Convert a /-separated pathname to one using the OS's path separator." + splitted = name.split('/') + return os.path.join(*splitted) + +def rel_path(path, parent_path): + """Return path relative to parent_path.""" + # Use realpath to avoid issues with symlinked dirs (see gh-7707) + pd = os.path.realpath(os.path.abspath(parent_path)) + apath = os.path.realpath(os.path.abspath(path)) + if len(apath) < len(pd): + return path + if apath == pd: + return '' + if pd == apath[:len(pd)]: + assert apath[len(pd)] in [os.sep], repr((path, apath[len(pd)])) + path = apath[len(pd)+1:] + return path + +def get_path_from_frame(frame, parent_path=None): + """Return path of the module given a frame object from the call stack. + + Returned path is relative to parent_path when given, + otherwise it is absolute path. + """ + + # First, try to find if the file name is in the frame. + try: + caller_file = eval('__file__', frame.f_globals, frame.f_locals) + d = os.path.dirname(os.path.abspath(caller_file)) + except NameError: + # __file__ is not defined, so let's try __name__. We try this second + # because setuptools spoofs __name__ to be '__main__' even though + # sys.modules['__main__'] might be something else, like easy_install(1). + caller_name = eval('__name__', frame.f_globals, frame.f_locals) + __import__(caller_name) + mod = sys.modules[caller_name] + if hasattr(mod, '__file__'): + d = os.path.dirname(os.path.abspath(mod.__file__)) + else: + # we're probably running setup.py as execfile("setup.py") + # (likely we're building an egg) + d = os.path.abspath('.') + # hmm, should we use sys.argv[0] like in __builtin__ case? + + if parent_path is not None: + d = rel_path(d, parent_path) + + return d or '.' + +def njoin(*path): + """Join two or more pathname components + + - convert a /-separated pathname to one using the OS's path separator. + - resolve `..` and `.` from path. + + Either passing n arguments as in njoin('a','b'), or a sequence + of n names as in njoin(['a','b']) is handled, or a mixture of such arguments. + """ + paths = [] + for p in path: + if is_sequence(p): + # njoin(['a', 'b'], 'c') + paths.append(njoin(*p)) + else: + assert is_string(p) + paths.append(p) + path = paths + if not path: + # njoin() + joined = '' + else: + # njoin('a', 'b') + joined = os.path.join(*path) + if os.path.sep != '/': + joined = joined.replace('/', os.path.sep) + return minrelpath(joined) + +def get_mathlibs(path=None): + """Return the MATHLIB line from numpyconfig.h + """ + if path is not None: + config_file = os.path.join(path, '_numpyconfig.h') + else: + # Look for the file in each of the numpy include directories. + dirs = get_numpy_include_dirs() + for path in dirs: + fn = os.path.join(path, '_numpyconfig.h') + if os.path.exists(fn): + config_file = fn + break + else: + raise DistutilsError('_numpyconfig.h not found in numpy include ' + 'dirs %r' % (dirs,)) + + fid = open(config_file) + mathlibs = [] + s = '#define MATHLIB' + for line in fid: + if line.startswith(s): + value = line[len(s):].strip() + if value: + mathlibs.extend(value.split(',')) + fid.close() + return mathlibs + +def minrelpath(path): + """Resolve `..` and '.' from path. + """ + if not is_string(path): + return path + if '.' not in path: + return path + l = path.split(os.sep) + while l: + try: + i = l.index('.', 1) + except ValueError: + break + del l[i] + j = 1 + while l: + try: + i = l.index('..', j) + except ValueError: + break + if l[i-1]=='..': + j += 1 + else: + del l[i], l[i-1] + j = 1 + if not l: + return '' + return os.sep.join(l) + +def sorted_glob(fileglob): + """sorts output of python glob for https://bugs.python.org/issue30461 + to allow extensions to have reproducible build results""" + return sorted(glob.glob(fileglob)) + +def _fix_paths(paths, local_path, include_non_existing): + assert is_sequence(paths), repr(type(paths)) + new_paths = [] + assert not is_string(paths), repr(paths) + for n in paths: + if is_string(n): + if '*' in n or '?' in n: + p = sorted_glob(n) + p2 = sorted_glob(njoin(local_path, n)) + if p2: + new_paths.extend(p2) + elif p: + new_paths.extend(p) + else: + if include_non_existing: + new_paths.append(n) + print('could not resolve pattern in %r: %r' % + (local_path, n)) + else: + n2 = njoin(local_path, n) + if os.path.exists(n2): + new_paths.append(n2) + else: + if os.path.exists(n): + new_paths.append(n) + elif include_non_existing: + new_paths.append(n) + if not os.path.exists(n): + print('non-existing path in %r: %r' % + (local_path, n)) + + elif is_sequence(n): + new_paths.extend(_fix_paths(n, local_path, include_non_existing)) + else: + new_paths.append(n) + return [minrelpath(p) for p in new_paths] + +def gpaths(paths, local_path='', include_non_existing=True): + """Apply glob to paths and prepend local_path if needed. + """ + if is_string(paths): + paths = (paths,) + return _fix_paths(paths, local_path, include_non_existing) + +def make_temp_file(suffix='', prefix='', text=True): + if not hasattr(_tdata, 'tempdir'): + _tdata.tempdir = tempfile.mkdtemp() + _tmpdirs.append(_tdata.tempdir) + fid, name = tempfile.mkstemp(suffix=suffix, + prefix=prefix, + dir=_tdata.tempdir, + text=text) + fo = os.fdopen(fid, 'w') + return fo, name + +# Hooks for colored terminal output. +# See also https://web.archive.org/web/20100314204946/http://www.livinglogic.de/Python/ansistyle +def terminal_has_colors(): + if sys.platform=='cygwin' and 'USE_COLOR' not in os.environ: + # Avoid importing curses that causes illegal operation + # with a message: + # PYTHON2 caused an invalid page fault in + # module CYGNURSES7.DLL as 015f:18bbfc28 + # Details: Python 2.3.3 [GCC 3.3.1 (cygming special)] + # ssh to Win32 machine from debian + # curses.version is 2.2 + # CYGWIN_98-4.10, release 1.5.7(0.109/3/2)) + return 0 + if hasattr(sys.stdout, 'isatty') and sys.stdout.isatty(): + try: + import curses + curses.setupterm() + if (curses.tigetnum("colors") >= 0 + and curses.tigetnum("pairs") >= 0 + and ((curses.tigetstr("setf") is not None + and curses.tigetstr("setb") is not None) + or (curses.tigetstr("setaf") is not None + and curses.tigetstr("setab") is not None) + or curses.tigetstr("scp") is not None)): + return 1 + except Exception: + pass + return 0 + +if terminal_has_colors(): + _colour_codes = dict(black=0, red=1, green=2, yellow=3, + blue=4, magenta=5, cyan=6, white=7, default=9) + def colour_text(s, fg=None, bg=None, bold=False): + seq = [] + if bold: + seq.append('1') + if fg: + fgcode = 30 + _colour_codes.get(fg.lower(), 0) + seq.append(str(fgcode)) + if bg: + bgcode = 40 + _colour_codes.get(fg.lower(), 7) + seq.append(str(bgcode)) + if seq: + return '\x1b[%sm%s\x1b[0m' % (';'.join(seq), s) + else: + return s +else: + def colour_text(s, fg=None, bg=None): + return s + +def default_text(s): + return colour_text(s, 'default') +def red_text(s): + return colour_text(s, 'red') +def green_text(s): + return colour_text(s, 'green') +def yellow_text(s): + return colour_text(s, 'yellow') +def cyan_text(s): + return colour_text(s, 'cyan') +def blue_text(s): + return colour_text(s, 'blue') + +######################### + +def cyg2win32(path): + if sys.platform=='cygwin' and path.startswith('/cygdrive'): + path = path[10] + ':' + os.path.normcase(path[11:]) + return path + +def mingw32(): + """Return true when using mingw32 environment. + """ + if sys.platform=='win32': + if os.environ.get('OSTYPE', '')=='msys': + return True + if os.environ.get('MSYSTEM', '')=='MINGW32': + return True + return False + +def msvc_runtime_version(): + "Return version of MSVC runtime library, as defined by __MSC_VER__ macro" + msc_pos = sys.version.find('MSC v.') + if msc_pos != -1: + msc_ver = int(sys.version[msc_pos+6:msc_pos+10]) + else: + msc_ver = None + return msc_ver + +def msvc_runtime_library(): + "Return name of MSVC runtime library if Python was built with MSVC >= 7" + ver = msvc_runtime_major () + if ver: + if ver < 140: + return "msvcr%i" % ver + else: + return "vcruntime%i" % ver + else: + return None + +def msvc_runtime_major(): + "Return major version of MSVC runtime coded like get_build_msvc_version" + major = {1300: 70, # MSVC 7.0 + 1310: 71, # MSVC 7.1 + 1400: 80, # MSVC 8 + 1500: 90, # MSVC 9 (aka 2008) + 1600: 100, # MSVC 10 (aka 2010) + 1900: 140, # MSVC 14 (aka 2015) + }.get(msvc_runtime_version(), None) + return major + +######################### + +#XXX need support for .C that is also C++ +cxx_ext_match = re.compile(r'.*[.](cpp|cxx|cc)\Z', re.I).match +fortran_ext_match = re.compile(r'.*[.](f90|f95|f77|for|ftn|f)\Z', re.I).match +f90_ext_match = re.compile(r'.*[.](f90|f95)\Z', re.I).match +f90_module_name_match = re.compile(r'\s*module\s*(?P[\w_]+)', re.I).match +def _get_f90_modules(source): + """Return a list of Fortran f90 module names that + given source file defines. + """ + if not f90_ext_match(source): + return [] + modules = [] + f = open(source, 'r') + for line in f: + m = f90_module_name_match(line) + if m: + name = m.group('name') + modules.append(name) + # break # XXX can we assume that there is one module per file? + f.close() + return modules + +def is_string(s): + return isinstance(s, basestring) + +def all_strings(lst): + """Return True if all items in lst are string objects. """ + for item in lst: + if not is_string(item): + return False + return True + +def is_sequence(seq): + if is_string(seq): + return False + try: + len(seq) + except Exception: + return False + return True + +def is_glob_pattern(s): + return is_string(s) and ('*' in s or '?' is s) + +def as_list(seq): + if is_sequence(seq): + return list(seq) + else: + return [seq] + +def get_language(sources): + # not used in numpy/scipy packages, use build_ext.detect_language instead + """Determine language value (c,f77,f90) from sources """ + language = None + for source in sources: + if isinstance(source, str): + if f90_ext_match(source): + language = 'f90' + break + elif fortran_ext_match(source): + language = 'f77' + return language + +def has_f_sources(sources): + """Return True if sources contains Fortran files """ + for source in sources: + if fortran_ext_match(source): + return True + return False + +def has_cxx_sources(sources): + """Return True if sources contains C++ files """ + for source in sources: + if cxx_ext_match(source): + return True + return False + +def filter_sources(sources): + """Return four lists of filenames containing + C, C++, Fortran, and Fortran 90 module sources, + respectively. + """ + c_sources = [] + cxx_sources = [] + f_sources = [] + fmodule_sources = [] + for source in sources: + if fortran_ext_match(source): + modules = _get_f90_modules(source) + if modules: + fmodule_sources.append(source) + else: + f_sources.append(source) + elif cxx_ext_match(source): + cxx_sources.append(source) + else: + c_sources.append(source) + return c_sources, cxx_sources, f_sources, fmodule_sources + + +def _get_headers(directory_list): + # get *.h files from list of directories + headers = [] + for d in directory_list: + head = sorted_glob(os.path.join(d, "*.h")) #XXX: *.hpp files?? + headers.extend(head) + return headers + +def _get_directories(list_of_sources): + # get unique directories from list of sources. + direcs = [] + for f in list_of_sources: + d = os.path.split(f) + if d[0] != '' and not d[0] in direcs: + direcs.append(d[0]) + return direcs + +def _commandline_dep_string(cc_args, extra_postargs, pp_opts): + """ + Return commandline representation used to determine if a file needs + to be recompiled + """ + cmdline = 'commandline: ' + cmdline += ' '.join(cc_args) + cmdline += ' '.join(extra_postargs) + cmdline += ' '.join(pp_opts) + '\n' + return cmdline + + +def get_dependencies(sources): + #XXX scan sources for include statements + return _get_headers(_get_directories(sources)) + +def is_local_src_dir(directory): + """Return true if directory is local directory. + """ + if not is_string(directory): + return False + abs_dir = os.path.abspath(directory) + c = os.path.commonprefix([os.getcwd(), abs_dir]) + new_dir = abs_dir[len(c):].split(os.sep) + if new_dir and not new_dir[0]: + new_dir = new_dir[1:] + if new_dir and new_dir[0]=='build': + return False + new_dir = os.sep.join(new_dir) + return os.path.isdir(new_dir) + +def general_source_files(top_path): + pruned_directories = {'CVS':1, '.svn':1, 'build':1} + prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$') + for dirpath, dirnames, filenames in os.walk(top_path, topdown=True): + pruned = [ d for d in dirnames if d not in pruned_directories ] + dirnames[:] = pruned + for f in filenames: + if not prune_file_pat.search(f): + yield os.path.join(dirpath, f) + +def general_source_directories_files(top_path): + """Return a directory name relative to top_path and + files contained. + """ + pruned_directories = ['CVS', '.svn', 'build'] + prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$') + for dirpath, dirnames, filenames in os.walk(top_path, topdown=True): + pruned = [ d for d in dirnames if d not in pruned_directories ] + dirnames[:] = pruned + for d in dirnames: + dpath = os.path.join(dirpath, d) + rpath = rel_path(dpath, top_path) + files = [] + for f in os.listdir(dpath): + fn = os.path.join(dpath, f) + if os.path.isfile(fn) and not prune_file_pat.search(fn): + files.append(fn) + yield rpath, files + dpath = top_path + rpath = rel_path(dpath, top_path) + filenames = [os.path.join(dpath, f) for f in os.listdir(dpath) \ + if not prune_file_pat.search(f)] + files = [f for f in filenames if os.path.isfile(f)] + yield rpath, files + + +def get_ext_source_files(ext): + # Get sources and any include files in the same directory. + filenames = [] + sources = [_m for _m in ext.sources if is_string(_m)] + filenames.extend(sources) + filenames.extend(get_dependencies(sources)) + for d in ext.depends: + if is_local_src_dir(d): + filenames.extend(list(general_source_files(d))) + elif os.path.isfile(d): + filenames.append(d) + return filenames + +def get_script_files(scripts): + scripts = [_m for _m in scripts if is_string(_m)] + return scripts + +def get_lib_source_files(lib): + filenames = [] + sources = lib[1].get('sources', []) + sources = [_m for _m in sources if is_string(_m)] + filenames.extend(sources) + filenames.extend(get_dependencies(sources)) + depends = lib[1].get('depends', []) + for d in depends: + if is_local_src_dir(d): + filenames.extend(list(general_source_files(d))) + elif os.path.isfile(d): + filenames.append(d) + return filenames + +def get_shared_lib_extension(is_python_ext=False): + """Return the correct file extension for shared libraries. + + Parameters + ---------- + is_python_ext : bool, optional + Whether the shared library is a Python extension. Default is False. + + Returns + ------- + so_ext : str + The shared library extension. + + Notes + ----- + For Python shared libs, `so_ext` will typically be '.so' on Linux and OS X, + and '.pyd' on Windows. For Python >= 3.2 `so_ext` has a tag prepended on + POSIX systems according to PEP 3149. For Python 3.2 this is implemented on + Linux, but not on OS X. + + """ + confvars = distutils.sysconfig.get_config_vars() + # SO is deprecated in 3.3.1, use EXT_SUFFIX instead + so_ext = confvars.get('EXT_SUFFIX', None) + if so_ext is None: + so_ext = confvars.get('SO', '') + + if not is_python_ext: + # hardcode known values, config vars (including SHLIB_SUFFIX) are + # unreliable (see #3182) + # darwin, windows and debug linux are wrong in 3.3.1 and older + if (sys.platform.startswith('linux') or + sys.platform.startswith('gnukfreebsd')): + so_ext = '.so' + elif sys.platform.startswith('darwin'): + so_ext = '.dylib' + elif sys.platform.startswith('win'): + so_ext = '.dll' + else: + # fall back to config vars for unknown platforms + # fix long extension for Python >=3.2, see PEP 3149. + if 'SOABI' in confvars: + # Does nothing unless SOABI config var exists + so_ext = so_ext.replace('.' + confvars.get('SOABI'), '', 1) + + return so_ext + +def get_data_files(data): + if is_string(data): + return [data] + sources = data[1] + filenames = [] + for s in sources: + if hasattr(s, '__call__'): + continue + if is_local_src_dir(s): + filenames.extend(list(general_source_files(s))) + elif is_string(s): + if os.path.isfile(s): + filenames.append(s) + else: + print('Not existing data file:', s) + else: + raise TypeError(repr(s)) + return filenames + +def dot_join(*args): + return '.'.join([a for a in args if a]) + +def get_frame(level=0): + """Return frame object from call stack with given level. + """ + try: + return sys._getframe(level+1) + except AttributeError: + frame = sys.exc_info()[2].tb_frame + for _ in range(level+1): + frame = frame.f_back + return frame + + +###################### + +class Configuration(object): + + _list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs', + 'libraries', 'headers', 'scripts', 'py_modules', + 'installed_libraries', 'define_macros'] + _dict_keys = ['package_dir', 'installed_pkg_config'] + _extra_keys = ['name', 'version'] + + numpy_include_dirs = [] + + def __init__(self, + package_name=None, + parent_name=None, + top_path=None, + package_path=None, + caller_level=1, + setup_name='setup.py', + **attrs): + """Construct configuration instance of a package. + + package_name -- name of the package + Ex.: 'distutils' + parent_name -- name of the parent package + Ex.: 'numpy' + top_path -- directory of the toplevel package + Ex.: the directory where the numpy package source sits + package_path -- directory of package. Will be computed by magic from the + directory of the caller module if not specified + Ex.: the directory where numpy.distutils is + caller_level -- frame level to caller namespace, internal parameter. + """ + self.name = dot_join(parent_name, package_name) + self.version = None + + caller_frame = get_frame(caller_level) + self.local_path = get_path_from_frame(caller_frame, top_path) + # local_path -- directory of a file (usually setup.py) that + # defines a configuration() function. + # local_path -- directory of a file (usually setup.py) that + # defines a configuration() function. + if top_path is None: + top_path = self.local_path + self.local_path = '' + if package_path is None: + package_path = self.local_path + elif os.path.isdir(njoin(self.local_path, package_path)): + package_path = njoin(self.local_path, package_path) + if not os.path.isdir(package_path or '.'): + raise ValueError("%r is not a directory" % (package_path,)) + self.top_path = top_path + self.package_path = package_path + # this is the relative path in the installed package + self.path_in_package = os.path.join(*self.name.split('.')) + + self.list_keys = self._list_keys[:] + self.dict_keys = self._dict_keys[:] + + for n in self.list_keys: + v = copy.copy(attrs.get(n, [])) + setattr(self, n, as_list(v)) + + for n in self.dict_keys: + v = copy.copy(attrs.get(n, {})) + setattr(self, n, v) + + known_keys = self.list_keys + self.dict_keys + self.extra_keys = self._extra_keys[:] + for n in attrs.keys(): + if n in known_keys: + continue + a = attrs[n] + setattr(self, n, a) + if isinstance(a, list): + self.list_keys.append(n) + elif isinstance(a, dict): + self.dict_keys.append(n) + else: + self.extra_keys.append(n) + + if os.path.exists(njoin(package_path, '__init__.py')): + self.packages.append(self.name) + self.package_dir[self.name] = package_path + + self.options = dict( + ignore_setup_xxx_py = False, + assume_default_configuration = False, + delegate_options_to_subpackages = False, + quiet = False, + ) + + caller_instance = None + for i in range(1, 3): + try: + f = get_frame(i) + except ValueError: + break + try: + caller_instance = eval('self', f.f_globals, f.f_locals) + break + except NameError: + pass + if isinstance(caller_instance, self.__class__): + if caller_instance.options['delegate_options_to_subpackages']: + self.set_options(**caller_instance.options) + + self.setup_name = setup_name + + def todict(self): + """ + Return a dictionary compatible with the keyword arguments of distutils + setup function. + + Examples + -------- + >>> setup(**config.todict()) #doctest: +SKIP + """ + + self._optimize_data_files() + d = {} + known_keys = self.list_keys + self.dict_keys + self.extra_keys + for n in known_keys: + a = getattr(self, n) + if a: + d[n] = a + return d + + def info(self, message): + if not self.options['quiet']: + print(message) + + def warn(self, message): + sys.stderr.write('Warning: %s' % (message,)) + + def set_options(self, **options): + """ + Configure Configuration instance. + + The following options are available: + - ignore_setup_xxx_py + - assume_default_configuration + - delegate_options_to_subpackages + - quiet + + """ + for key, value in options.items(): + if key in self.options: + self.options[key] = value + else: + raise ValueError('Unknown option: '+key) + + def get_distribution(self): + """Return the distutils distribution object for self.""" + from numpy.distutils.core import get_distribution + return get_distribution() + + def _wildcard_get_subpackage(self, subpackage_name, + parent_name, + caller_level = 1): + l = subpackage_name.split('.') + subpackage_path = njoin([self.local_path]+l) + dirs = [_m for _m in sorted_glob(subpackage_path) if os.path.isdir(_m)] + config_list = [] + for d in dirs: + if not os.path.isfile(njoin(d, '__init__.py')): + continue + if 'build' in d.split(os.sep): + continue + n = '.'.join(d.split(os.sep)[-len(l):]) + c = self.get_subpackage(n, + parent_name = parent_name, + caller_level = caller_level+1) + config_list.extend(c) + return config_list + + def _get_configuration_from_setup_py(self, setup_py, + subpackage_name, + subpackage_path, + parent_name, + caller_level = 1): + # In case setup_py imports local modules: + sys.path.insert(0, os.path.dirname(setup_py)) + try: + setup_name = os.path.splitext(os.path.basename(setup_py))[0] + n = dot_join(self.name, subpackage_name, setup_name) + setup_module = npy_load_module('_'.join(n.split('.')), + setup_py, + ('.py', 'U', 1)) + if not hasattr(setup_module, 'configuration'): + if not self.options['assume_default_configuration']: + self.warn('Assuming default configuration '\ + '(%s does not define configuration())'\ + % (setup_module)) + config = Configuration(subpackage_name, parent_name, + self.top_path, subpackage_path, + caller_level = caller_level + 1) + else: + pn = dot_join(*([parent_name] + subpackage_name.split('.')[:-1])) + args = (pn,) + def fix_args_py2(args): + if setup_module.configuration.__code__.co_argcount > 1: + args = args + (self.top_path,) + return args + def fix_args_py3(args): + if setup_module.configuration.__code__.co_argcount > 1: + args = args + (self.top_path,) + return args + if sys.version_info[0] < 3: + args = fix_args_py2(args) + else: + args = fix_args_py3(args) + config = setup_module.configuration(*args) + if config.name!=dot_join(parent_name, subpackage_name): + self.warn('Subpackage %r configuration returned as %r' % \ + (dot_join(parent_name, subpackage_name), config.name)) + finally: + del sys.path[0] + return config + + def get_subpackage(self,subpackage_name, + subpackage_path=None, + parent_name=None, + caller_level = 1): + """Return list of subpackage configurations. + + Parameters + ---------- + subpackage_name : str or None + Name of the subpackage to get the configuration. '*' in + subpackage_name is handled as a wildcard. + subpackage_path : str + If None, then the path is assumed to be the local path plus the + subpackage_name. If a setup.py file is not found in the + subpackage_path, then a default configuration is used. + parent_name : str + Parent name. + """ + if subpackage_name is None: + if subpackage_path is None: + raise ValueError( + "either subpackage_name or subpackage_path must be specified") + subpackage_name = os.path.basename(subpackage_path) + + # handle wildcards + l = subpackage_name.split('.') + if subpackage_path is None and '*' in subpackage_name: + return self._wildcard_get_subpackage(subpackage_name, + parent_name, + caller_level = caller_level+1) + assert '*' not in subpackage_name, repr((subpackage_name, subpackage_path, parent_name)) + if subpackage_path is None: + subpackage_path = njoin([self.local_path] + l) + else: + subpackage_path = njoin([subpackage_path] + l[:-1]) + subpackage_path = self.paths([subpackage_path])[0] + setup_py = njoin(subpackage_path, self.setup_name) + if not self.options['ignore_setup_xxx_py']: + if not os.path.isfile(setup_py): + setup_py = njoin(subpackage_path, + 'setup_%s.py' % (subpackage_name)) + if not os.path.isfile(setup_py): + if not self.options['assume_default_configuration']: + self.warn('Assuming default configuration '\ + '(%s/{setup_%s,setup}.py was not found)' \ + % (os.path.dirname(setup_py), subpackage_name)) + config = Configuration(subpackage_name, parent_name, + self.top_path, subpackage_path, + caller_level = caller_level+1) + else: + config = self._get_configuration_from_setup_py( + setup_py, + subpackage_name, + subpackage_path, + parent_name, + caller_level = caller_level + 1) + if config: + return [config] + else: + return [] + + def add_subpackage(self,subpackage_name, + subpackage_path=None, + standalone = False): + """Add a sub-package to the current Configuration instance. + + This is useful in a setup.py script for adding sub-packages to a + package. + + Parameters + ---------- + subpackage_name : str + name of the subpackage + subpackage_path : str + if given, the subpackage path such as the subpackage is in + subpackage_path / subpackage_name. If None,the subpackage is + assumed to be located in the local path / subpackage_name. + standalone : bool + """ + + if standalone: + parent_name = None + else: + parent_name = self.name + config_list = self.get_subpackage(subpackage_name, subpackage_path, + parent_name = parent_name, + caller_level = 2) + if not config_list: + self.warn('No configuration returned, assuming unavailable.') + for config in config_list: + d = config + if isinstance(config, Configuration): + d = config.todict() + assert isinstance(d, dict), repr(type(d)) + + self.info('Appending %s configuration to %s' \ + % (d.get('name'), self.name)) + self.dict_append(**d) + + dist = self.get_distribution() + if dist is not None: + self.warn('distutils distribution has been initialized,'\ + ' it may be too late to add a subpackage '+ subpackage_name) + + def add_data_dir(self, data_path): + """Recursively add files under data_path to data_files list. + + Recursively add files under data_path to the list of data_files to be + installed (and distributed). The data_path can be either a relative + path-name, or an absolute path-name, or a 2-tuple where the first + argument shows where in the install directory the data directory + should be installed to. + + Parameters + ---------- + data_path : seq or str + Argument can be either + + * 2-sequence (, ) + * path to data directory where python datadir suffix defaults + to package dir. + + Notes + ----- + Rules for installation paths:: + + foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar + (gun, foo/bar) -> parent/gun + foo/* -> (foo/a, foo/a), (foo/b, foo/b) -> parent/foo/a, parent/foo/b + (gun, foo/*) -> (gun, foo/a), (gun, foo/b) -> gun + (gun/*, foo/*) -> parent/gun/a, parent/gun/b + /foo/bar -> (bar, /foo/bar) -> parent/bar + (gun, /foo/bar) -> parent/gun + (fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar + + Examples + -------- + For example suppose the source directory contains fun/foo.dat and + fun/bar/car.dat: + + >>> self.add_data_dir('fun') #doctest: +SKIP + >>> self.add_data_dir(('sun', 'fun')) #doctest: +SKIP + >>> self.add_data_dir(('gun', '/full/path/to/fun'))#doctest: +SKIP + + Will install data-files to the locations:: + + / + fun/ + foo.dat + bar/ + car.dat + sun/ + foo.dat + bar/ + car.dat + gun/ + foo.dat + car.dat + + """ + if is_sequence(data_path): + d, data_path = data_path + else: + d = None + if is_sequence(data_path): + [self.add_data_dir((d, p)) for p in data_path] + return + if not is_string(data_path): + raise TypeError("not a string: %r" % (data_path,)) + if d is None: + if os.path.isabs(data_path): + return self.add_data_dir((os.path.basename(data_path), data_path)) + return self.add_data_dir((data_path, data_path)) + paths = self.paths(data_path, include_non_existing=False) + if is_glob_pattern(data_path): + if is_glob_pattern(d): + pattern_list = allpath(d).split(os.sep) + pattern_list.reverse() + # /a/*//b/ -> /a/*/b + rl = list(range(len(pattern_list)-1)); rl.reverse() + for i in rl: + if not pattern_list[i]: + del pattern_list[i] + # + for path in paths: + if not os.path.isdir(path): + print('Not a directory, skipping', path) + continue + rpath = rel_path(path, self.local_path) + path_list = rpath.split(os.sep) + path_list.reverse() + target_list = [] + i = 0 + for s in pattern_list: + if is_glob_pattern(s): + if i>=len(path_list): + raise ValueError('cannot fill pattern %r with %r' \ + % (d, path)) + target_list.append(path_list[i]) + else: + assert s==path_list[i], repr((s, path_list[i], data_path, d, path, rpath)) + target_list.append(s) + i += 1 + if path_list[i:]: + self.warn('mismatch of pattern_list=%s and path_list=%s'\ + % (pattern_list, path_list)) + target_list.reverse() + self.add_data_dir((os.sep.join(target_list), path)) + else: + for path in paths: + self.add_data_dir((d, path)) + return + assert not is_glob_pattern(d), repr(d) + + dist = self.get_distribution() + if dist is not None and dist.data_files is not None: + data_files = dist.data_files + else: + data_files = self.data_files + + for path in paths: + for d1, f in list(general_source_directories_files(path)): + target_path = os.path.join(self.path_in_package, d, d1) + data_files.append((target_path, f)) + + def _optimize_data_files(self): + data_dict = {} + for p, files in self.data_files: + if p not in data_dict: + data_dict[p] = set() + for f in files: + data_dict[p].add(f) + self.data_files[:] = [(p, list(files)) for p, files in data_dict.items()] + + def add_data_files(self,*files): + """Add data files to configuration data_files. + + Parameters + ---------- + files : sequence + Argument(s) can be either + + * 2-sequence (,) + * paths to data files where python datadir prefix defaults + to package dir. + + Notes + ----- + The form of each element of the files sequence is very flexible + allowing many combinations of where to get the files from the package + and where they should ultimately be installed on the system. The most + basic usage is for an element of the files argument sequence to be a + simple filename. This will cause that file from the local path to be + installed to the installation path of the self.name package (package + path). The file argument can also be a relative path in which case the + entire relative path will be installed into the package directory. + Finally, the file can be an absolute path name in which case the file + will be found at the absolute path name but installed to the package + path. + + This basic behavior can be augmented by passing a 2-tuple in as the + file argument. The first element of the tuple should specify the + relative path (under the package install directory) where the + remaining sequence of files should be installed to (it has nothing to + do with the file-names in the source distribution). The second element + of the tuple is the sequence of files that should be installed. The + files in this sequence can be filenames, relative paths, or absolute + paths. For absolute paths the file will be installed in the top-level + package installation directory (regardless of the first argument). + Filenames and relative path names will be installed in the package + install directory under the path name given as the first element of + the tuple. + + Rules for installation paths: + + #. file.txt -> (., file.txt)-> parent/file.txt + #. foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt + #. /foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt + #. ``*``.txt -> parent/a.txt, parent/b.txt + #. foo/``*``.txt`` -> parent/foo/a.txt, parent/foo/b.txt + #. ``*/*.txt`` -> (``*``, ``*``/``*``.txt) -> parent/c/a.txt, parent/d/b.txt + #. (sun, file.txt) -> parent/sun/file.txt + #. (sun, bar/file.txt) -> parent/sun/file.txt + #. (sun, /foo/bar/file.txt) -> parent/sun/file.txt + #. (sun, ``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt + #. (sun, bar/``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt + #. (sun/``*``, ``*``/``*``.txt) -> parent/sun/c/a.txt, parent/d/b.txt + + An additional feature is that the path to a data-file can actually be + a function that takes no arguments and returns the actual path(s) to + the data-files. This is useful when the data files are generated while + building the package. + + Examples + -------- + Add files to the list of data_files to be included with the package. + + >>> self.add_data_files('foo.dat', + ... ('fun', ['gun.dat', 'nun/pun.dat', '/tmp/sun.dat']), + ... 'bar/cat.dat', + ... '/full/path/to/can.dat') #doctest: +SKIP + + will install these data files to:: + + / + foo.dat + fun/ + gun.dat + nun/ + pun.dat + sun.dat + bar/ + car.dat + can.dat + + where is the package (or sub-package) + directory such as '/usr/lib/python2.4/site-packages/mypackage' ('C: + \\Python2.4 \\Lib \\site-packages \\mypackage') or + '/usr/lib/python2.4/site- packages/mypackage/mysubpackage' ('C: + \\Python2.4 \\Lib \\site-packages \\mypackage \\mysubpackage'). + """ + + if len(files)>1: + for f in files: + self.add_data_files(f) + return + assert len(files)==1 + if is_sequence(files[0]): + d, files = files[0] + else: + d = None + if is_string(files): + filepat = files + elif is_sequence(files): + if len(files)==1: + filepat = files[0] + else: + for f in files: + self.add_data_files((d, f)) + return + else: + raise TypeError(repr(type(files))) + + if d is None: + if hasattr(filepat, '__call__'): + d = '' + elif os.path.isabs(filepat): + d = '' + else: + d = os.path.dirname(filepat) + self.add_data_files((d, files)) + return + + paths = self.paths(filepat, include_non_existing=False) + if is_glob_pattern(filepat): + if is_glob_pattern(d): + pattern_list = d.split(os.sep) + pattern_list.reverse() + for path in paths: + path_list = path.split(os.sep) + path_list.reverse() + path_list.pop() # filename + target_list = [] + i = 0 + for s in pattern_list: + if is_glob_pattern(s): + target_list.append(path_list[i]) + i += 1 + else: + target_list.append(s) + target_list.reverse() + self.add_data_files((os.sep.join(target_list), path)) + else: + self.add_data_files((d, paths)) + return + assert not is_glob_pattern(d), repr((d, filepat)) + + dist = self.get_distribution() + if dist is not None and dist.data_files is not None: + data_files = dist.data_files + else: + data_files = self.data_files + + data_files.append((os.path.join(self.path_in_package, d), paths)) + + ### XXX Implement add_py_modules + + def add_define_macros(self, macros): + """Add define macros to configuration + + Add the given sequence of macro name and value duples to the beginning + of the define_macros list This list will be visible to all extension + modules of the current package. + """ + dist = self.get_distribution() + if dist is not None: + if not hasattr(dist, 'define_macros'): + dist.define_macros = [] + dist.define_macros.extend(macros) + else: + self.define_macros.extend(macros) + + + def add_include_dirs(self,*paths): + """Add paths to configuration include directories. + + Add the given sequence of paths to the beginning of the include_dirs + list. This list will be visible to all extension modules of the + current package. + """ + include_dirs = self.paths(paths) + dist = self.get_distribution() + if dist is not None: + if dist.include_dirs is None: + dist.include_dirs = [] + dist.include_dirs.extend(include_dirs) + else: + self.include_dirs.extend(include_dirs) + + def add_headers(self,*files): + """Add installable headers to configuration. + + Add the given sequence of files to the beginning of the headers list. + By default, headers will be installed under // directory. If an item of files + is a tuple, then its first argument specifies the actual installation + location relative to the path. + + Parameters + ---------- + files : str or seq + Argument(s) can be either: + + * 2-sequence (,) + * path(s) to header file(s) where python includedir suffix will + default to package name. + """ + headers = [] + for path in files: + if is_string(path): + [headers.append((self.name, p)) for p in self.paths(path)] + else: + if not isinstance(path, (tuple, list)) or len(path) != 2: + raise TypeError(repr(path)) + [headers.append((path[0], p)) for p in self.paths(path[1])] + dist = self.get_distribution() + if dist is not None: + if dist.headers is None: + dist.headers = [] + dist.headers.extend(headers) + else: + self.headers.extend(headers) + + def paths(self,*paths,**kws): + """Apply glob to paths and prepend local_path if needed. + + Applies glob.glob(...) to each path in the sequence (if needed) and + pre-pends the local_path if needed. Because this is called on all + source lists, this allows wildcard characters to be specified in lists + of sources for extension modules and libraries and scripts and allows + path-names be relative to the source directory. + + """ + include_non_existing = kws.get('include_non_existing', True) + return gpaths(paths, + local_path = self.local_path, + include_non_existing=include_non_existing) + + def _fix_paths_dict(self, kw): + for k in kw.keys(): + v = kw[k] + if k in ['sources', 'depends', 'include_dirs', 'library_dirs', + 'module_dirs', 'extra_objects']: + new_v = self.paths(v) + kw[k] = new_v + + def add_extension(self,name,sources,**kw): + """Add extension to configuration. + + Create and add an Extension instance to the ext_modules list. This + method also takes the following optional keyword arguments that are + passed on to the Extension constructor. + + Parameters + ---------- + name : str + name of the extension + sources : seq + list of the sources. The list of sources may contain functions + (called source generators) which must take an extension instance + and a build directory as inputs and return a source file or list of + source files or None. If None is returned then no sources are + generated. If the Extension instance has no sources after + processing all source generators, then no extension module is + built. + include_dirs : + define_macros : + undef_macros : + library_dirs : + libraries : + runtime_library_dirs : + extra_objects : + extra_compile_args : + extra_link_args : + extra_f77_compile_args : + extra_f90_compile_args : + export_symbols : + swig_opts : + depends : + The depends list contains paths to files or directories that the + sources of the extension module depend on. If any path in the + depends list is newer than the extension module, then the module + will be rebuilt. + language : + f2py_options : + module_dirs : + extra_info : dict or list + dict or list of dict of keywords to be appended to keywords. + + Notes + ----- + The self.paths(...) method is applied to all lists that may contain + paths. + """ + ext_args = copy.copy(kw) + ext_args['name'] = dot_join(self.name, name) + ext_args['sources'] = sources + + if 'extra_info' in ext_args: + extra_info = ext_args['extra_info'] + del ext_args['extra_info'] + if isinstance(extra_info, dict): + extra_info = [extra_info] + for info in extra_info: + assert isinstance(info, dict), repr(info) + dict_append(ext_args,**info) + + self._fix_paths_dict(ext_args) + + # Resolve out-of-tree dependencies + libraries = ext_args.get('libraries', []) + libnames = [] + ext_args['libraries'] = [] + for libname in libraries: + if isinstance(libname, tuple): + self._fix_paths_dict(libname[1]) + + # Handle library names of the form libname@relative/path/to/library + if '@' in libname: + lname, lpath = libname.split('@', 1) + lpath = os.path.abspath(njoin(self.local_path, lpath)) + if os.path.isdir(lpath): + c = self.get_subpackage(None, lpath, + caller_level = 2) + if isinstance(c, Configuration): + c = c.todict() + for l in [l[0] for l in c.get('libraries', [])]: + llname = l.split('__OF__', 1)[0] + if llname == lname: + c.pop('name', None) + dict_append(ext_args,**c) + break + continue + libnames.append(libname) + + ext_args['libraries'] = libnames + ext_args['libraries'] + ext_args['define_macros'] = \ + self.define_macros + ext_args.get('define_macros', []) + + from numpy.distutils.core import Extension + ext = Extension(**ext_args) + self.ext_modules.append(ext) + + dist = self.get_distribution() + if dist is not None: + self.warn('distutils distribution has been initialized,'\ + ' it may be too late to add an extension '+name) + return ext + + def add_library(self,name,sources,**build_info): + """ + Add library to configuration. + + Parameters + ---------- + name : str + Name of the extension. + sources : sequence + List of the sources. The list of sources may contain functions + (called source generators) which must take an extension instance + and a build directory as inputs and return a source file or list of + source files or None. If None is returned then no sources are + generated. If the Extension instance has no sources after + processing all source generators, then no extension module is + built. + build_info : dict, optional + The following keys are allowed: + + * depends + * macros + * include_dirs + * extra_compiler_args + * extra_f77_compile_args + * extra_f90_compile_args + * f2py_options + * language + + """ + self._add_library(name, sources, None, build_info) + + dist = self.get_distribution() + if dist is not None: + self.warn('distutils distribution has been initialized,'\ + ' it may be too late to add a library '+ name) + + def _add_library(self, name, sources, install_dir, build_info): + """Common implementation for add_library and add_installed_library. Do + not use directly""" + build_info = copy.copy(build_info) + build_info['sources'] = sources + + # Sometimes, depends is not set up to an empty list by default, and if + # depends is not given to add_library, distutils barfs (#1134) + if not 'depends' in build_info: + build_info['depends'] = [] + + self._fix_paths_dict(build_info) + + # Add to libraries list so that it is build with build_clib + self.libraries.append((name, build_info)) + + def add_installed_library(self, name, sources, install_dir, build_info=None): + """ + Similar to add_library, but the specified library is installed. + + Most C libraries used with `distutils` are only used to build python + extensions, but libraries built through this method will be installed + so that they can be reused by third-party packages. + + Parameters + ---------- + name : str + Name of the installed library. + sources : sequence + List of the library's source files. See `add_library` for details. + install_dir : str + Path to install the library, relative to the current sub-package. + build_info : dict, optional + The following keys are allowed: + + * depends + * macros + * include_dirs + * extra_compiler_args + * extra_f77_compile_args + * extra_f90_compile_args + * f2py_options + * language + + Returns + ------- + None + + See Also + -------- + add_library, add_npy_pkg_config, get_info + + Notes + ----- + The best way to encode the options required to link against the specified + C libraries is to use a "libname.ini" file, and use `get_info` to + retrieve the required options (see `add_npy_pkg_config` for more + information). + + """ + if not build_info: + build_info = {} + + install_dir = os.path.join(self.package_path, install_dir) + self._add_library(name, sources, install_dir, build_info) + self.installed_libraries.append(InstallableLib(name, build_info, install_dir)) + + def add_npy_pkg_config(self, template, install_dir, subst_dict=None): + """ + Generate and install a npy-pkg config file from a template. + + The config file generated from `template` is installed in the + given install directory, using `subst_dict` for variable substitution. + + Parameters + ---------- + template : str + The path of the template, relatively to the current package path. + install_dir : str + Where to install the npy-pkg config file, relatively to the current + package path. + subst_dict : dict, optional + If given, any string of the form ``@key@`` will be replaced by + ``subst_dict[key]`` in the template file when installed. The install + prefix is always available through the variable ``@prefix@``, since the + install prefix is not easy to get reliably from setup.py. + + See also + -------- + add_installed_library, get_info + + Notes + ----- + This works for both standard installs and in-place builds, i.e. the + ``@prefix@`` refer to the source directory for in-place builds. + + Examples + -------- + :: + + config.add_npy_pkg_config('foo.ini.in', 'lib', {'foo': bar}) + + Assuming the foo.ini.in file has the following content:: + + [meta] + Name=@foo@ + Version=1.0 + Description=dummy description + + [default] + Cflags=-I@prefix@/include + Libs= + + The generated file will have the following content:: + + [meta] + Name=bar + Version=1.0 + Description=dummy description + + [default] + Cflags=-Iprefix_dir/include + Libs= + + and will be installed as foo.ini in the 'lib' subpath. + + """ + if subst_dict is None: + subst_dict = {} + template = os.path.join(self.package_path, template) + + if self.name in self.installed_pkg_config: + self.installed_pkg_config[self.name].append((template, install_dir, + subst_dict)) + else: + self.installed_pkg_config[self.name] = [(template, install_dir, + subst_dict)] + + + def add_scripts(self,*files): + """Add scripts to configuration. + + Add the sequence of files to the beginning of the scripts list. + Scripts will be installed under the /bin/ directory. + + """ + scripts = self.paths(files) + dist = self.get_distribution() + if dist is not None: + if dist.scripts is None: + dist.scripts = [] + dist.scripts.extend(scripts) + else: + self.scripts.extend(scripts) + + def dict_append(self,**dict): + for key in self.list_keys: + a = getattr(self, key) + a.extend(dict.get(key, [])) + for key in self.dict_keys: + a = getattr(self, key) + a.update(dict.get(key, {})) + known_keys = self.list_keys + self.dict_keys + self.extra_keys + for key in dict.keys(): + if key not in known_keys: + a = getattr(self, key, None) + if a and a==dict[key]: continue + self.warn('Inheriting attribute %r=%r from %r' \ + % (key, dict[key], dict.get('name', '?'))) + setattr(self, key, dict[key]) + self.extra_keys.append(key) + elif key in self.extra_keys: + self.info('Ignoring attempt to set %r (from %r to %r)' \ + % (key, getattr(self, key), dict[key])) + elif key in known_keys: + # key is already processed above + pass + else: + raise ValueError("Don't know about key=%r" % (key)) + + def __str__(self): + from pprint import pformat + known_keys = self.list_keys + self.dict_keys + self.extra_keys + s = '<'+5*'-' + '\n' + s += 'Configuration of '+self.name+':\n' + known_keys.sort() + for k in known_keys: + a = getattr(self, k, None) + if a: + s += '%s = %s\n' % (k, pformat(a)) + s += 5*'-' + '>' + return s + + def get_config_cmd(self): + """ + Returns the numpy.distutils config command instance. + """ + cmd = get_cmd('config') + cmd.ensure_finalized() + cmd.dump_source = 0 + cmd.noisy = 0 + old_path = os.environ.get('PATH') + if old_path: + path = os.pathsep.join(['.', old_path]) + os.environ['PATH'] = path + return cmd + + def get_build_temp_dir(self): + """ + Return a path to a temporary directory where temporary files should be + placed. + """ + cmd = get_cmd('build') + cmd.ensure_finalized() + return cmd.build_temp + + def have_f77c(self): + """Check for availability of Fortran 77 compiler. + + Use it inside source generating function to ensure that + setup distribution instance has been initialized. + + Notes + ----- + True if a Fortran 77 compiler is available (because a simple Fortran 77 + code was able to be compiled successfully). + """ + simple_fortran_subroutine = ''' + subroutine simple + end + ''' + config_cmd = self.get_config_cmd() + flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f77') + return flag + + def have_f90c(self): + """Check for availability of Fortran 90 compiler. + + Use it inside source generating function to ensure that + setup distribution instance has been initialized. + + Notes + ----- + True if a Fortran 90 compiler is available (because a simple Fortran + 90 code was able to be compiled successfully) + """ + simple_fortran_subroutine = ''' + subroutine simple + end + ''' + config_cmd = self.get_config_cmd() + flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f90') + return flag + + def append_to(self, extlib): + """Append libraries, include_dirs to extension or library item. + """ + if is_sequence(extlib): + lib_name, build_info = extlib + dict_append(build_info, + libraries=self.libraries, + include_dirs=self.include_dirs) + else: + from numpy.distutils.core import Extension + assert isinstance(extlib, Extension), repr(extlib) + extlib.libraries.extend(self.libraries) + extlib.include_dirs.extend(self.include_dirs) + + def _get_svn_revision(self, path): + """Return path's SVN revision number. + """ + revision = None + m = None + cwd = os.getcwd() + try: + os.chdir(path or '.') + p = subprocess.Popen(['svnversion'], shell=True, + stdout=subprocess.PIPE, stderr=None, + close_fds=True) + sout = p.stdout + m = re.match(r'(?P\d+)', sout.read()) + except Exception: + pass + os.chdir(cwd) + if m: + revision = int(m.group('revision')) + return revision + if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK', None): + entries = njoin(path, '_svn', 'entries') + else: + entries = njoin(path, '.svn', 'entries') + if os.path.isfile(entries): + f = open(entries) + fstr = f.read() + f.close() + if fstr[:5] == '\d+)"', fstr) + if m: + revision = int(m.group('revision')) + else: # non-xml entries file --- check to be sure that + m = re.search(r'dir[\n\r]+(?P\d+)', fstr) + if m: + revision = int(m.group('revision')) + return revision + + def _get_hg_revision(self, path): + """Return path's Mercurial revision number. + """ + revision = None + m = None + cwd = os.getcwd() + try: + os.chdir(path or '.') + p = subprocess.Popen(['hg identify --num'], shell=True, + stdout=subprocess.PIPE, stderr=None, + close_fds=True) + sout = p.stdout + m = re.match(r'(?P\d+)', sout.read()) + except Exception: + pass + os.chdir(cwd) + if m: + revision = int(m.group('revision')) + return revision + branch_fn = njoin(path, '.hg', 'branch') + branch_cache_fn = njoin(path, '.hg', 'branch.cache') + + if os.path.isfile(branch_fn): + branch0 = None + f = open(branch_fn) + revision0 = f.read().strip() + f.close() + + branch_map = {} + for line in file(branch_cache_fn, 'r'): + branch1, revision1 = line.split()[:2] + if revision1==revision0: + branch0 = branch1 + try: + revision1 = int(revision1) + except ValueError: + continue + branch_map[branch1] = revision1 + + revision = branch_map.get(branch0) + return revision + + + def get_version(self, version_file=None, version_variable=None): + """Try to get version string of a package. + + Return a version string of the current package or None if the version + information could not be detected. + + Notes + ----- + This method scans files named + __version__.py, _version.py, version.py, and + __svn_version__.py for string variables version, __version__, and + _version, until a version number is found. + """ + version = getattr(self, 'version', None) + if version is not None: + return version + + # Get version from version file. + if version_file is None: + files = ['__version__.py', + self.name.split('.')[-1]+'_version.py', + 'version.py', + '__svn_version__.py', + '__hg_version__.py'] + else: + files = [version_file] + if version_variable is None: + version_vars = ['version', + '__version__', + self.name.split('.')[-1]+'_version'] + else: + version_vars = [version_variable] + for f in files: + fn = njoin(self.local_path, f) + if os.path.isfile(fn): + info = ('.py', 'U', 1) + name = os.path.splitext(os.path.basename(fn))[0] + n = dot_join(self.name, name) + try: + version_module = npy_load_module('_'.join(n.split('.')), + fn, info) + except ImportError: + msg = get_exception() + self.warn(str(msg)) + version_module = None + if version_module is None: + continue + + for a in version_vars: + version = getattr(version_module, a, None) + if version is not None: + break + if version is not None: + break + + if version is not None: + self.version = version + return version + + # Get version as SVN or Mercurial revision number + revision = self._get_svn_revision(self.local_path) + if revision is None: + revision = self._get_hg_revision(self.local_path) + + if revision is not None: + version = str(revision) + self.version = version + + return version + + def make_svn_version_py(self, delete=True): + """Appends a data function to the data_files list that will generate + __svn_version__.py file to the current package directory. + + Generate package __svn_version__.py file from SVN revision number, + it will be removed after python exits but will be available + when sdist, etc commands are executed. + + Notes + ----- + If __svn_version__.py existed before, nothing is done. + + This is + intended for working with source directories that are in an SVN + repository. + """ + target = njoin(self.local_path, '__svn_version__.py') + revision = self._get_svn_revision(self.local_path) + if os.path.isfile(target) or revision is None: + return + else: + def generate_svn_version_py(): + if not os.path.isfile(target): + version = str(revision) + self.info('Creating %s (version=%r)' % (target, version)) + f = open(target, 'w') + f.write('version = %r\n' % (version)) + f.close() + + def rm_file(f=target,p=self.info): + if delete: + try: os.remove(f); p('removed '+f) + except OSError: pass + try: os.remove(f+'c'); p('removed '+f+'c') + except OSError: pass + + atexit.register(rm_file) + + return target + + self.add_data_files(('', generate_svn_version_py())) + + def make_hg_version_py(self, delete=True): + """Appends a data function to the data_files list that will generate + __hg_version__.py file to the current package directory. + + Generate package __hg_version__.py file from Mercurial revision, + it will be removed after python exits but will be available + when sdist, etc commands are executed. + + Notes + ----- + If __hg_version__.py existed before, nothing is done. + + This is intended for working with source directories that are + in an Mercurial repository. + """ + target = njoin(self.local_path, '__hg_version__.py') + revision = self._get_hg_revision(self.local_path) + if os.path.isfile(target) or revision is None: + return + else: + def generate_hg_version_py(): + if not os.path.isfile(target): + version = str(revision) + self.info('Creating %s (version=%r)' % (target, version)) + f = open(target, 'w') + f.write('version = %r\n' % (version)) + f.close() + + def rm_file(f=target,p=self.info): + if delete: + try: os.remove(f); p('removed '+f) + except OSError: pass + try: os.remove(f+'c'); p('removed '+f+'c') + except OSError: pass + + atexit.register(rm_file) + + return target + + self.add_data_files(('', generate_hg_version_py())) + + def make_config_py(self,name='__config__'): + """Generate package __config__.py file containing system_info + information used during building the package. + + This file is installed to the + package installation directory. + + """ + self.py_modules.append((self.name, name, generate_config_py)) + + def get_info(self,*names): + """Get resources information. + + Return information (from system_info.get_info) for all of the names in + the argument list in a single dictionary. + """ + from .system_info import get_info, dict_append + info_dict = {} + for a in names: + dict_append(info_dict,**get_info(a)) + return info_dict + + +def get_cmd(cmdname, _cache={}): + if cmdname not in _cache: + import distutils.core + dist = distutils.core._setup_distribution + if dist is None: + from distutils.errors import DistutilsInternalError + raise DistutilsInternalError( + 'setup distribution instance not initialized') + cmd = dist.get_command_obj(cmdname) + _cache[cmdname] = cmd + return _cache[cmdname] + +def get_numpy_include_dirs(): + # numpy_include_dirs are set by numpy/core/setup.py, otherwise [] + include_dirs = Configuration.numpy_include_dirs[:] + if not include_dirs: + import numpy + include_dirs = [ numpy.get_include() ] + # else running numpy/core/setup.py + return include_dirs + +def get_npy_pkg_dir(): + """Return the path where to find the npy-pkg-config directory.""" + # XXX: import here for bootstrapping reasons + import numpy + d = os.path.join(os.path.dirname(numpy.__file__), + 'core', 'lib', 'npy-pkg-config') + return d + +def get_pkg_info(pkgname, dirs=None): + """ + Return library info for the given package. + + Parameters + ---------- + pkgname : str + Name of the package (should match the name of the .ini file, without + the extension, e.g. foo for the file foo.ini). + dirs : sequence, optional + If given, should be a sequence of additional directories where to look + for npy-pkg-config files. Those directories are searched prior to the + NumPy directory. + + Returns + ------- + pkginfo : class instance + The `LibraryInfo` instance containing the build information. + + Raises + ------ + PkgNotFound + If the package is not found. + + See Also + -------- + Configuration.add_npy_pkg_config, Configuration.add_installed_library, + get_info + + """ + from numpy.distutils.npy_pkg_config import read_config + + if dirs: + dirs.append(get_npy_pkg_dir()) + else: + dirs = [get_npy_pkg_dir()] + return read_config(pkgname, dirs) + +def get_info(pkgname, dirs=None): + """ + Return an info dict for a given C library. + + The info dict contains the necessary options to use the C library. + + Parameters + ---------- + pkgname : str + Name of the package (should match the name of the .ini file, without + the extension, e.g. foo for the file foo.ini). + dirs : sequence, optional + If given, should be a sequence of additional directories where to look + for npy-pkg-config files. Those directories are searched prior to the + NumPy directory. + + Returns + ------- + info : dict + The dictionary with build information. + + Raises + ------ + PkgNotFound + If the package is not found. + + See Also + -------- + Configuration.add_npy_pkg_config, Configuration.add_installed_library, + get_pkg_info + + Examples + -------- + To get the necessary information for the npymath library from NumPy: + + >>> npymath_info = np.distutils.misc_util.get_info('npymath') + >>> npymath_info #doctest: +SKIP + {'define_macros': [], 'libraries': ['npymath'], 'library_dirs': + ['.../numpy/core/lib'], 'include_dirs': ['.../numpy/core/include']} + + This info dict can then be used as input to a `Configuration` instance:: + + config.add_extension('foo', sources=['foo.c'], extra_info=npymath_info) + + """ + from numpy.distutils.npy_pkg_config import parse_flags + pkg_info = get_pkg_info(pkgname, dirs) + + # Translate LibraryInfo instance into a build_info dict + info = parse_flags(pkg_info.cflags()) + for k, v in parse_flags(pkg_info.libs()).items(): + info[k].extend(v) + + # add_extension extra_info argument is ANAL + info['define_macros'] = info['macros'] + del info['macros'] + del info['ignored'] + + return info + +def is_bootstrapping(): + if sys.version_info[0] >= 3: + import builtins + else: + import __builtin__ as builtins + + try: + builtins.__NUMPY_SETUP__ + return True + except AttributeError: + return False + + +######################### + +def default_config_dict(name = None, parent_name = None, local_path=None): + """Return a configuration dictionary for usage in + configuration() function defined in file setup_.py. + """ + import warnings + warnings.warn('Use Configuration(%r,%r,top_path=%r) instead of '\ + 'deprecated default_config_dict(%r,%r,%r)' + % (name, parent_name, local_path, + name, parent_name, local_path, + ), stacklevel=2) + c = Configuration(name, parent_name, local_path) + return c.todict() + + +def dict_append(d, **kws): + for k, v in kws.items(): + if k in d: + ov = d[k] + if isinstance(ov, str): + d[k] = v + else: + d[k].extend(v) + else: + d[k] = v + +def appendpath(prefix, path): + if os.path.sep != '/': + prefix = prefix.replace('/', os.path.sep) + path = path.replace('/', os.path.sep) + drive = '' + if os.path.isabs(path): + drive = os.path.splitdrive(prefix)[0] + absprefix = os.path.splitdrive(os.path.abspath(prefix))[1] + pathdrive, path = os.path.splitdrive(path) + d = os.path.commonprefix([absprefix, path]) + if os.path.join(absprefix[:len(d)], absprefix[len(d):]) != absprefix \ + or os.path.join(path[:len(d)], path[len(d):]) != path: + # Handle invalid paths + d = os.path.dirname(d) + subpath = path[len(d):] + if os.path.isabs(subpath): + subpath = subpath[1:] + else: + subpath = path + return os.path.normpath(njoin(drive + prefix, subpath)) + +def generate_config_py(target): + """Generate config.py file containing system_info information + used during building the package. + + Usage: + config['py_modules'].append((packagename, '__config__',generate_config_py)) + """ + from numpy.distutils.system_info import system_info + from distutils.dir_util import mkpath + mkpath(os.path.dirname(target)) + f = open(target, 'w') + f.write('# This file is generated by numpy\'s %s\n' % (os.path.basename(sys.argv[0]))) + f.write('# It contains system_info results at the time of building this package.\n') + f.write('__all__ = ["get_info","show"]\n\n') + + # For gfortran+msvc combination, extra shared libraries may exist + f.write(""" + +import os +import sys + +extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs') + +if sys.platform == 'win32' and os.path.isdir(extra_dll_dir): + os.environ.setdefault('PATH', '') + os.environ['PATH'] += os.pathsep + extra_dll_dir + +""") + + for k, i in system_info.saved_results.items(): + f.write('%s=%r\n' % (k, i)) + f.write(r''' +def get_info(name): + g = globals() + return g.get(name, g.get(name + "_info", {})) + +def show(): + for name,info_dict in globals().items(): + if name[0] == "_" or type(info_dict) is not type({}): continue + print(name + ":") + if not info_dict: + print(" NOT AVAILABLE") + for k,v in info_dict.items(): + v = str(v) + if k == "sources" and len(v) > 200: + v = v[:60] + " ...\n... " + v[-60:] + print(" %s = %s" % (k,v)) + ''') + + f.close() + return target + +def msvc_version(compiler): + """Return version major and minor of compiler instance if it is + MSVC, raise an exception otherwise.""" + if not compiler.compiler_type == "msvc": + raise ValueError("Compiler instance is not msvc (%s)"\ + % compiler.compiler_type) + return compiler._MSVCCompiler__version + +def get_build_architecture(): + # Importing distutils.msvccompiler triggers a warning on non-Windows + # systems, so delay the import to here. + from distutils.msvccompiler import get_build_architecture + return get_build_architecture() diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/misc_util.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/misc_util.pyc new file mode 100644 index 0000000..db26722 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/misc_util.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/msvc9compiler.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/msvc9compiler.py new file mode 100644 index 0000000..e9cc334 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/msvc9compiler.py @@ -0,0 +1,65 @@ +from __future__ import division, absolute_import, print_function + +import os +from distutils.msvc9compiler import MSVCCompiler as _MSVCCompiler + +from .system_info import platform_bits + + +def _merge(old, new): + """Concatenate two environment paths avoiding repeats. + + Here `old` is the environment string before the base class initialize + function is called and `new` is the string after the call. The new string + will be a fixed string if it is not obtained from the current environment, + or the same as the old string if obtained from the same environment. The aim + here is not to append the new string if it is already contained in the old + string so as to limit the growth of the environment string. + + Parameters + ---------- + old : string + Previous environment string. + new : string + New environment string. + + Returns + ------- + ret : string + Updated environment string. + + """ + if not old: + return new + if new in old: + return old + + # Neither new nor old is empty. Give old priority. + return ';'.join([old, new]) + + +class MSVCCompiler(_MSVCCompiler): + def __init__(self, verbose=0, dry_run=0, force=0): + _MSVCCompiler.__init__(self, verbose, dry_run, force) + + def initialize(self, plat_name=None): + # The 'lib' and 'include' variables may be overwritten + # by MSVCCompiler.initialize, so save them for later merge. + environ_lib = os.getenv('lib') + environ_include = os.getenv('include') + _MSVCCompiler.initialize(self, plat_name) + + # Merge current and previous values of 'lib' and 'include' + os.environ['lib'] = _merge(environ_lib, os.environ['lib']) + os.environ['include'] = _merge(environ_include, os.environ['include']) + + # msvc9 building for 32 bits requires SSE2 to work around a + # compiler bug. + if platform_bits == 32: + self.compile_options += ['/arch:SSE2'] + self.compile_options_debug += ['/arch:SSE2'] + + def manifest_setup_ldargs(self, output_filename, build_temp, ld_args): + ld_args.append('/MANIFEST') + _MSVCCompiler.manifest_setup_ldargs(self, output_filename, + build_temp, ld_args) diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/msvc9compiler.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/msvc9compiler.pyc new file mode 100644 index 0000000..3b91d6e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/msvc9compiler.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/msvccompiler.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/msvccompiler.py new file mode 100644 index 0000000..0cb4bf9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/msvccompiler.py @@ -0,0 +1,60 @@ +from __future__ import division, absolute_import, print_function + +import os +from distutils.msvccompiler import MSVCCompiler as _MSVCCompiler + +from .system_info import platform_bits + + +def _merge(old, new): + """Concatenate two environment paths avoiding repeats. + + Here `old` is the environment string before the base class initialize + function is called and `new` is the string after the call. The new string + will be a fixed string if it is not obtained from the current environment, + or the same as the old string if obtained from the same environment. The aim + here is not to append the new string if it is already contained in the old + string so as to limit the growth of the environment string. + + Parameters + ---------- + old : string + Previous environment string. + new : string + New environment string. + + Returns + ------- + ret : string + Updated environment string. + + """ + if new in old: + return old + if not old: + return new + + # Neither new nor old is empty. Give old priority. + return ';'.join([old, new]) + + +class MSVCCompiler(_MSVCCompiler): + def __init__(self, verbose=0, dry_run=0, force=0): + _MSVCCompiler.__init__(self, verbose, dry_run, force) + + def initialize(self): + # The 'lib' and 'include' variables may be overwritten + # by MSVCCompiler.initialize, so save them for later merge. + environ_lib = os.getenv('lib', '') + environ_include = os.getenv('include', '') + _MSVCCompiler.initialize(self) + + # Merge current and previous values of 'lib' and 'include' + os.environ['lib'] = _merge(environ_lib, os.environ['lib']) + os.environ['include'] = _merge(environ_include, os.environ['include']) + + # msvc9 building for 32 bits requires SSE2 to work around a + # compiler bug. + if platform_bits == 32: + self.compile_options += ['/arch:SSE2'] + self.compile_options_debug += ['/arch:SSE2'] diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/msvccompiler.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/msvccompiler.pyc new file mode 100644 index 0000000..f4eff8c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/msvccompiler.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/npy_pkg_config.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/npy_pkg_config.py new file mode 100644 index 0000000..bfe8b9f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/npy_pkg_config.py @@ -0,0 +1,443 @@ +from __future__ import division, absolute_import, print_function + +import sys +import re +import os + +if sys.version_info[0] < 3: + from ConfigParser import RawConfigParser +else: + from configparser import RawConfigParser + +__all__ = ['FormatError', 'PkgNotFound', 'LibraryInfo', 'VariableSet', + 'read_config', 'parse_flags'] + +_VAR = re.compile(r'\$\{([a-zA-Z0-9_-]+)\}') + +class FormatError(IOError): + """ + Exception thrown when there is a problem parsing a configuration file. + + """ + def __init__(self, msg): + self.msg = msg + + def __str__(self): + return self.msg + +class PkgNotFound(IOError): + """Exception raised when a package can not be located.""" + def __init__(self, msg): + self.msg = msg + + def __str__(self): + return self.msg + +def parse_flags(line): + """ + Parse a line from a config file containing compile flags. + + Parameters + ---------- + line : str + A single line containing one or more compile flags. + + Returns + ------- + d : dict + Dictionary of parsed flags, split into relevant categories. + These categories are the keys of `d`: + + * 'include_dirs' + * 'library_dirs' + * 'libraries' + * 'macros' + * 'ignored' + + """ + d = {'include_dirs': [], 'library_dirs': [], 'libraries': [], + 'macros': [], 'ignored': []} + + flags = (' ' + line).split(' -') + for flag in flags: + flag = '-' + flag + if len(flag) > 0: + if flag.startswith('-I'): + d['include_dirs'].append(flag[2:].strip()) + elif flag.startswith('-L'): + d['library_dirs'].append(flag[2:].strip()) + elif flag.startswith('-l'): + d['libraries'].append(flag[2:].strip()) + elif flag.startswith('-D'): + d['macros'].append(flag[2:].strip()) + else: + d['ignored'].append(flag) + + return d + +def _escape_backslash(val): + return val.replace('\\', '\\\\') + +class LibraryInfo(object): + """ + Object containing build information about a library. + + Parameters + ---------- + name : str + The library name. + description : str + Description of the library. + version : str + Version string. + sections : dict + The sections of the configuration file for the library. The keys are + the section headers, the values the text under each header. + vars : class instance + A `VariableSet` instance, which contains ``(name, value)`` pairs for + variables defined in the configuration file for the library. + requires : sequence, optional + The required libraries for the library to be installed. + + Notes + ----- + All input parameters (except "sections" which is a method) are available as + attributes of the same name. + + """ + def __init__(self, name, description, version, sections, vars, requires=None): + self.name = name + self.description = description + if requires: + self.requires = requires + else: + self.requires = [] + self.version = version + self._sections = sections + self.vars = vars + + def sections(self): + """ + Return the section headers of the config file. + + Parameters + ---------- + None + + Returns + ------- + keys : list of str + The list of section headers. + + """ + return list(self._sections.keys()) + + def cflags(self, section="default"): + val = self.vars.interpolate(self._sections[section]['cflags']) + return _escape_backslash(val) + + def libs(self, section="default"): + val = self.vars.interpolate(self._sections[section]['libs']) + return _escape_backslash(val) + + def __str__(self): + m = ['Name: %s' % self.name, 'Description: %s' % self.description] + if self.requires: + m.append('Requires:') + else: + m.append('Requires: %s' % ",".join(self.requires)) + m.append('Version: %s' % self.version) + + return "\n".join(m) + +class VariableSet(object): + """ + Container object for the variables defined in a config file. + + `VariableSet` can be used as a plain dictionary, with the variable names + as keys. + + Parameters + ---------- + d : dict + Dict of items in the "variables" section of the configuration file. + + """ + def __init__(self, d): + self._raw_data = dict([(k, v) for k, v in d.items()]) + + self._re = {} + self._re_sub = {} + + self._init_parse() + + def _init_parse(self): + for k, v in self._raw_data.items(): + self._init_parse_var(k, v) + + def _init_parse_var(self, name, value): + self._re[name] = re.compile(r'\$\{%s\}' % name) + self._re_sub[name] = value + + def interpolate(self, value): + # Brute force: we keep interpolating until there is no '${var}' anymore + # or until interpolated string is equal to input string + def _interpolate(value): + for k in self._re.keys(): + value = self._re[k].sub(self._re_sub[k], value) + return value + while _VAR.search(value): + nvalue = _interpolate(value) + if nvalue == value: + break + value = nvalue + + return value + + def variables(self): + """ + Return the list of variable names. + + Parameters + ---------- + None + + Returns + ------- + names : list of str + The names of all variables in the `VariableSet` instance. + + """ + return list(self._raw_data.keys()) + + # Emulate a dict to set/get variables values + def __getitem__(self, name): + return self._raw_data[name] + + def __setitem__(self, name, value): + self._raw_data[name] = value + self._init_parse_var(name, value) + +def parse_meta(config): + if not config.has_section('meta'): + raise FormatError("No meta section found !") + + d = dict(config.items('meta')) + + for k in ['name', 'description', 'version']: + if not k in d: + raise FormatError("Option %s (section [meta]) is mandatory, " + "but not found" % k) + + if not 'requires' in d: + d['requires'] = [] + + return d + +def parse_variables(config): + if not config.has_section('variables'): + raise FormatError("No variables section found !") + + d = {} + + for name, value in config.items("variables"): + d[name] = value + + return VariableSet(d) + +def parse_sections(config): + return meta_d, r + +def pkg_to_filename(pkg_name): + return "%s.ini" % pkg_name + +def parse_config(filename, dirs=None): + if dirs: + filenames = [os.path.join(d, filename) for d in dirs] + else: + filenames = [filename] + + config = RawConfigParser() + + n = config.read(filenames) + if not len(n) >= 1: + raise PkgNotFound("Could not find file(s) %s" % str(filenames)) + + # Parse meta and variables sections + meta = parse_meta(config) + + vars = {} + if config.has_section('variables'): + for name, value in config.items("variables"): + vars[name] = _escape_backslash(value) + + # Parse "normal" sections + secs = [s for s in config.sections() if not s in ['meta', 'variables']] + sections = {} + + requires = {} + for s in secs: + d = {} + if config.has_option(s, "requires"): + requires[s] = config.get(s, 'requires') + + for name, value in config.items(s): + d[name] = value + sections[s] = d + + return meta, vars, sections, requires + +def _read_config_imp(filenames, dirs=None): + def _read_config(f): + meta, vars, sections, reqs = parse_config(f, dirs) + # recursively add sections and variables of required libraries + for rname, rvalue in reqs.items(): + nmeta, nvars, nsections, nreqs = _read_config(pkg_to_filename(rvalue)) + + # Update var dict for variables not in 'top' config file + for k, v in nvars.items(): + if not k in vars: + vars[k] = v + + # Update sec dict + for oname, ovalue in nsections[rname].items(): + if ovalue: + sections[rname][oname] += ' %s' % ovalue + + return meta, vars, sections, reqs + + meta, vars, sections, reqs = _read_config(filenames) + + # FIXME: document this. If pkgname is defined in the variables section, and + # there is no pkgdir variable defined, pkgdir is automatically defined to + # the path of pkgname. This requires the package to be imported to work + if not 'pkgdir' in vars and "pkgname" in vars: + pkgname = vars["pkgname"] + if not pkgname in sys.modules: + raise ValueError("You should import %s to get information on %s" % + (pkgname, meta["name"])) + + mod = sys.modules[pkgname] + vars["pkgdir"] = _escape_backslash(os.path.dirname(mod.__file__)) + + return LibraryInfo(name=meta["name"], description=meta["description"], + version=meta["version"], sections=sections, vars=VariableSet(vars)) + +# Trivial cache to cache LibraryInfo instances creation. To be really +# efficient, the cache should be handled in read_config, since a same file can +# be parsed many time outside LibraryInfo creation, but I doubt this will be a +# problem in practice +_CACHE = {} +def read_config(pkgname, dirs=None): + """ + Return library info for a package from its configuration file. + + Parameters + ---------- + pkgname : str + Name of the package (should match the name of the .ini file, without + the extension, e.g. foo for the file foo.ini). + dirs : sequence, optional + If given, should be a sequence of directories - usually including + the NumPy base directory - where to look for npy-pkg-config files. + + Returns + ------- + pkginfo : class instance + The `LibraryInfo` instance containing the build information. + + Raises + ------ + PkgNotFound + If the package is not found. + + See Also + -------- + misc_util.get_info, misc_util.get_pkg_info + + Examples + -------- + >>> npymath_info = np.distutils.npy_pkg_config.read_config('npymath') + >>> type(npymath_info) + + >>> print(npymath_info) + Name: npymath + Description: Portable, core math library implementing C99 standard + Requires: + Version: 0.1 #random + + """ + try: + return _CACHE[pkgname] + except KeyError: + v = _read_config_imp(pkg_to_filename(pkgname), dirs) + _CACHE[pkgname] = v + return v + +# TODO: +# - implements version comparison (modversion + atleast) + +# pkg-config simple emulator - useful for debugging, and maybe later to query +# the system +if __name__ == '__main__': + import sys + from optparse import OptionParser + import glob + + parser = OptionParser() + parser.add_option("--cflags", dest="cflags", action="store_true", + help="output all preprocessor and compiler flags") + parser.add_option("--libs", dest="libs", action="store_true", + help="output all linker flags") + parser.add_option("--use-section", dest="section", + help="use this section instead of default for options") + parser.add_option("--version", dest="version", action="store_true", + help="output version") + parser.add_option("--atleast-version", dest="min_version", + help="Minimal version") + parser.add_option("--list-all", dest="list_all", action="store_true", + help="Minimal version") + parser.add_option("--define-variable", dest="define_variable", + help="Replace variable with the given value") + + (options, args) = parser.parse_args(sys.argv) + + if len(args) < 2: + raise ValueError("Expect package name on the command line:") + + if options.list_all: + files = glob.glob("*.ini") + for f in files: + info = read_config(f) + print("%s\t%s - %s" % (info.name, info.name, info.description)) + + pkg_name = args[1] + d = os.environ.get('NPY_PKG_CONFIG_PATH') + if d: + info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.', d]) + else: + info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.']) + + if options.section: + section = options.section + else: + section = "default" + + if options.define_variable: + m = re.search(r'([\S]+)=([\S]+)', options.define_variable) + if not m: + raise ValueError("--define-variable option should be of " \ + "the form --define-variable=foo=bar") + else: + name = m.group(1) + value = m.group(2) + info.vars[name] = value + + if options.cflags: + print(info.cflags(section)) + if options.libs: + print(info.libs(section)) + if options.version: + print(info.version) + if options.min_version: + print(info.version >= options.min_version) diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/npy_pkg_config.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/npy_pkg_config.pyc new file mode 100644 index 0000000..c148487 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/npy_pkg_config.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/numpy_distribution.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/numpy_distribution.py new file mode 100644 index 0000000..6ae19d1 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/numpy_distribution.py @@ -0,0 +1,19 @@ +# XXX: Handle setuptools ? +from __future__ import division, absolute_import, print_function + +from distutils.core import Distribution + +# This class is used because we add new files (sconscripts, and so on) with the +# scons command +class NumpyDistribution(Distribution): + def __init__(self, attrs = None): + # A list of (sconscripts, pre_hook, post_hook, src, parent_names) + self.scons_data = [] + # A list of installable libraries + self.installed_libraries = [] + # A dict of pkg_config files to generate/install + self.installed_pkg_config = {} + Distribution.__init__(self, attrs) + + def has_scons_scripts(self): + return bool(self.scons_data) diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/numpy_distribution.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/numpy_distribution.pyc new file mode 100644 index 0000000..acd5fa7 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/numpy_distribution.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/pathccompiler.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/pathccompiler.py new file mode 100644 index 0000000..fc9872d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/pathccompiler.py @@ -0,0 +1,23 @@ +from __future__ import division, absolute_import, print_function + +from distutils.unixccompiler import UnixCCompiler + +class PathScaleCCompiler(UnixCCompiler): + + """ + PathScale compiler compatible with an gcc built Python. + """ + + compiler_type = 'pathcc' + cc_exe = 'pathcc' + cxx_exe = 'pathCC' + + def __init__ (self, verbose=0, dry_run=0, force=0): + UnixCCompiler.__init__ (self, verbose, dry_run, force) + cc_compiler = self.cc_exe + cxx_compiler = self.cxx_exe + self.set_executables(compiler=cc_compiler, + compiler_so=cc_compiler, + compiler_cxx=cxx_compiler, + linker_exe=cc_compiler, + linker_so=cc_compiler + ' -shared') diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/pathccompiler.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/pathccompiler.pyc new file mode 100644 index 0000000..67069ca Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/pathccompiler.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/setup.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/setup.py new file mode 100644 index 0000000..82a53bd --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/setup.py @@ -0,0 +1,17 @@ +#!/usr/bin/env python +from __future__ import division, print_function + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('distutils', parent_package, top_path) + config.add_subpackage('command') + config.add_subpackage('fcompiler') + config.add_data_dir('tests') + config.add_data_files('site.cfg') + config.add_data_files('mingw/gfortran_vs2003_hack.c') + config.make_config_py() + return config + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(configuration=configuration) diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/setup.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/setup.pyc new file mode 100644 index 0000000..c0616e1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/system_info.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/system_info.py new file mode 100644 index 0000000..2424943 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/system_info.py @@ -0,0 +1,2555 @@ +#!/usr/bin/env python +""" +This file defines a set of system_info classes for getting +information about various resources (libraries, library directories, +include directories, etc.) in the system. Currently, the following +classes are available: + + atlas_info + atlas_threads_info + atlas_blas_info + atlas_blas_threads_info + lapack_atlas_info + lapack_atlas_threads_info + atlas_3_10_info + atlas_3_10_threads_info + atlas_3_10_blas_info, + atlas_3_10_blas_threads_info, + lapack_atlas_3_10_info + lapack_atlas_3_10_threads_info + blas_info + lapack_info + openblas_info + blis_info + blas_opt_info # usage recommended + lapack_opt_info # usage recommended + fftw_info,dfftw_info,sfftw_info + fftw_threads_info,dfftw_threads_info,sfftw_threads_info + djbfft_info + x11_info + lapack_src_info + blas_src_info + numpy_info + numarray_info + numpy_info + boost_python_info + agg2_info + wx_info + gdk_pixbuf_xlib_2_info + gdk_pixbuf_2_info + gdk_x11_2_info + gtkp_x11_2_info + gtkp_2_info + xft_info + freetype2_info + umfpack_info + +Usage: + info_dict = get_info() + where is a string 'atlas','x11','fftw','lapack','blas', + 'lapack_src', 'blas_src', etc. For a complete list of allowed names, + see the definition of get_info() function below. + + Returned info_dict is a dictionary which is compatible with + distutils.setup keyword arguments. If info_dict == {}, then the + asked resource is not available (system_info could not find it). + + Several *_info classes specify an environment variable to specify + the locations of software. When setting the corresponding environment + variable to 'None' then the software will be ignored, even when it + is available in system. + +Global parameters: + system_info.search_static_first - search static libraries (.a) + in precedence to shared ones (.so, .sl) if enabled. + system_info.verbosity - output the results to stdout if enabled. + +The file 'site.cfg' is looked for in + +1) Directory of main setup.py file being run. +2) Home directory of user running the setup.py file as ~/.numpy-site.cfg +3) System wide directory (location of this file...) + +The first one found is used to get system configuration options The +format is that used by ConfigParser (i.e., Windows .INI style). The +section ALL has options that are the default for each section. The +available sections are fftw, atlas, and x11. Appropriate defaults are +used if nothing is specified. + +The order of finding the locations of resources is the following: + 1. environment variable + 2. section in site.cfg + 3. ALL section in site.cfg +Only the first complete match is returned. + +Example: +---------- +[ALL] +library_dirs = /usr/lib:/usr/local/lib:/opt/lib +include_dirs = /usr/include:/usr/local/include:/opt/include +src_dirs = /usr/local/src:/opt/src +# search static libraries (.a) in preference to shared ones (.so) +search_static_first = 0 + +[fftw] +fftw_libs = rfftw, fftw +fftw_opt_libs = rfftw_threaded, fftw_threaded +# if the above aren't found, look for {s,d}fftw_libs and {s,d}fftw_opt_libs + +[atlas] +library_dirs = /usr/lib/3dnow:/usr/lib/3dnow/atlas +# for overriding the names of the atlas libraries +atlas_libs = lapack, f77blas, cblas, atlas + +[x11] +library_dirs = /usr/X11R6/lib +include_dirs = /usr/X11R6/include +---------- + +Authors: + Pearu Peterson , February 2002 + David M. Cooke , April 2002 + +Copyright 2002 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy (BSD style) license. See LICENSE.txt that came with +this distribution for specifics. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. + +""" +from __future__ import division, absolute_import, print_function + +import sys +import os +import re +import copy +import warnings +from glob import glob +from functools import reduce +if sys.version_info[0] < 3: + from ConfigParser import NoOptionError + from ConfigParser import RawConfigParser as ConfigParser +else: + from configparser import NoOptionError + from configparser import RawConfigParser as ConfigParser +# It seems that some people are importing ConfigParser from here so is +# good to keep its class name. Use of RawConfigParser is needed in +# order to be able to load path names with percent in them, like +# `feature%2Fcool` which is common on git flow branch names. + +from distutils.errors import DistutilsError +from distutils.dist import Distribution +import distutils.sysconfig +from distutils import log +from distutils.util import get_platform + +from numpy.distutils.exec_command import ( + find_executable, filepath_from_subprocess_output, + get_pythonexe) +from numpy.distutils.misc_util import (is_sequence, is_string, + get_shared_lib_extension) +from numpy.distutils.command.config import config as cmd_config +from numpy.distutils.compat import get_exception +from numpy.distutils import customized_ccompiler +from numpy.distutils import _shell_utils +import distutils.ccompiler +import tempfile +import shutil + + +# Determine number of bits +import platform +_bits = {'32bit': 32, '64bit': 64} +platform_bits = _bits[platform.architecture()[0]] + + +def _c_string_literal(s): + """ + Convert a python string into a literal suitable for inclusion into C code + """ + # only these three characters are forbidden in C strings + s = s.replace('\\', r'\\') + s = s.replace('"', r'\"') + s = s.replace('\n', r'\n') + return '"{}"'.format(s) + + +def libpaths(paths, bits): + """Return a list of library paths valid on 32 or 64 bit systems. + + Inputs: + paths : sequence + A sequence of strings (typically paths) + bits : int + An integer, the only valid values are 32 or 64. A ValueError exception + is raised otherwise. + + Examples: + + Consider a list of directories + >>> paths = ['/usr/X11R6/lib','/usr/X11/lib','/usr/lib'] + + For a 32-bit platform, this is already valid: + >>> np.distutils.system_info.libpaths(paths,32) + ['/usr/X11R6/lib', '/usr/X11/lib', '/usr/lib'] + + On 64 bits, we prepend the '64' postfix + >>> np.distutils.system_info.libpaths(paths,64) + ['/usr/X11R6/lib64', '/usr/X11R6/lib', '/usr/X11/lib64', '/usr/X11/lib', + '/usr/lib64', '/usr/lib'] + """ + if bits not in (32, 64): + raise ValueError("Invalid bit size in libpaths: 32 or 64 only") + + # Handle 32bit case + if bits == 32: + return paths + + # Handle 64bit case + out = [] + for p in paths: + out.extend([p + '64', p]) + + return out + + +if sys.platform == 'win32': + default_lib_dirs = ['C:\\', + os.path.join(distutils.sysconfig.EXEC_PREFIX, + 'libs')] + default_runtime_dirs = [] + default_include_dirs = [] + default_src_dirs = ['.'] + default_x11_lib_dirs = [] + default_x11_include_dirs = [] + _include_dirs = [ + 'include', + 'include/suitesparse', + ] + _lib_dirs = [ + 'lib', + ] + + _include_dirs = [d.replace('/', os.sep) for d in _include_dirs] + _lib_dirs = [d.replace('/', os.sep) for d in _lib_dirs] + def add_system_root(library_root): + """Add a package manager root to the include directories""" + global default_lib_dirs + global default_include_dirs + + library_root = os.path.normpath(library_root) + + default_lib_dirs.extend( + os.path.join(library_root, d) for d in _lib_dirs) + default_include_dirs.extend( + os.path.join(library_root, d) for d in _include_dirs) + + if sys.version_info >= (3, 3): + # VCpkg is the de-facto package manager on windows for C/C++ + # libraries. If it is on the PATH, then we append its paths here. + # We also don't re-implement shutil.which for Python 2.7 because + # vcpkg doesn't support MSVC 2008. + vcpkg = shutil.which('vcpkg') + if vcpkg: + vcpkg_dir = os.path.dirname(vcpkg) + if platform.architecture() == '32bit': + specifier = 'x86' + else: + specifier = 'x64' + + vcpkg_installed = os.path.join(vcpkg_dir, 'installed') + for vcpkg_root in [ + os.path.join(vcpkg_installed, specifier + '-windows'), + os.path.join(vcpkg_installed, specifier + '-windows-static'), + ]: + add_system_root(vcpkg_root) + + # Conda is another popular package manager that provides libraries + conda = shutil.which('conda') + if conda: + conda_dir = os.path.dirname(conda) + add_system_root(os.path.join(conda_dir, '..', 'Library')) + add_system_root(os.path.join(conda_dir, 'Library')) + +else: + default_lib_dirs = libpaths(['/usr/local/lib', '/opt/lib', '/usr/lib', + '/opt/local/lib', '/sw/lib'], platform_bits) + default_runtime_dirs = [] + default_include_dirs = ['/usr/local/include', + '/opt/include', '/usr/include', + # path of umfpack under macports + '/opt/local/include/ufsparse', + '/opt/local/include', '/sw/include', + '/usr/include/suitesparse'] + default_src_dirs = ['.', '/usr/local/src', '/opt/src', '/sw/src'] + + default_x11_lib_dirs = libpaths(['/usr/X11R6/lib', '/usr/X11/lib', + '/usr/lib'], platform_bits) + default_x11_include_dirs = ['/usr/X11R6/include', '/usr/X11/include', + '/usr/include'] + + if os.path.exists('/usr/lib/X11'): + globbed_x11_dir = glob('/usr/lib/*/libX11.so') + if globbed_x11_dir: + x11_so_dir = os.path.split(globbed_x11_dir[0])[0] + default_x11_lib_dirs.extend([x11_so_dir, '/usr/lib/X11']) + default_x11_include_dirs.extend(['/usr/lib/X11/include', + '/usr/include/X11']) + + import subprocess as sp + tmp = None + try: + # Explicitly open/close file to avoid ResourceWarning when + # tests are run in debug mode Python 3. + tmp = open(os.devnull, 'w') + p = sp.Popen(["gcc", "-print-multiarch"], stdout=sp.PIPE, + stderr=tmp) + except (OSError, DistutilsError): + # OSError if gcc is not installed, or SandboxViolation (DistutilsError + # subclass) if an old setuptools bug is triggered (see gh-3160). + pass + else: + triplet = str(p.communicate()[0].decode().strip()) + if p.returncode == 0: + # gcc supports the "-print-multiarch" option + default_x11_lib_dirs += [os.path.join("/usr/lib/", triplet)] + default_lib_dirs += [os.path.join("/usr/lib/", triplet)] + finally: + if tmp is not None: + tmp.close() + +if os.path.join(sys.prefix, 'lib') not in default_lib_dirs: + default_lib_dirs.insert(0, os.path.join(sys.prefix, 'lib')) + default_include_dirs.append(os.path.join(sys.prefix, 'include')) + default_src_dirs.append(os.path.join(sys.prefix, 'src')) + +default_lib_dirs = [_m for _m in default_lib_dirs if os.path.isdir(_m)] +default_runtime_dirs = [_m for _m in default_runtime_dirs if os.path.isdir(_m)] +default_include_dirs = [_m for _m in default_include_dirs if os.path.isdir(_m)] +default_src_dirs = [_m for _m in default_src_dirs if os.path.isdir(_m)] + +so_ext = get_shared_lib_extension() + + +def get_standard_file(fname): + """Returns a list of files named 'fname' from + 1) System-wide directory (directory-location of this module) + 2) Users HOME directory (os.environ['HOME']) + 3) Local directory + """ + # System-wide file + filenames = [] + try: + f = __file__ + except NameError: + f = sys.argv[0] + else: + sysfile = os.path.join(os.path.split(os.path.abspath(f))[0], + fname) + if os.path.isfile(sysfile): + filenames.append(sysfile) + + # Home directory + # And look for the user config file + try: + f = os.path.expanduser('~') + except KeyError: + pass + else: + user_file = os.path.join(f, fname) + if os.path.isfile(user_file): + filenames.append(user_file) + + # Local file + if os.path.isfile(fname): + filenames.append(os.path.abspath(fname)) + + return filenames + + +def get_info(name, notfound_action=0): + """ + notfound_action: + 0 - do nothing + 1 - display warning message + 2 - raise error + """ + cl = {'atlas': atlas_info, # use lapack_opt or blas_opt instead + 'atlas_threads': atlas_threads_info, # ditto + 'atlas_blas': atlas_blas_info, + 'atlas_blas_threads': atlas_blas_threads_info, + 'lapack_atlas': lapack_atlas_info, # use lapack_opt instead + 'lapack_atlas_threads': lapack_atlas_threads_info, # ditto + 'atlas_3_10': atlas_3_10_info, # use lapack_opt or blas_opt instead + 'atlas_3_10_threads': atlas_3_10_threads_info, # ditto + 'atlas_3_10_blas': atlas_3_10_blas_info, + 'atlas_3_10_blas_threads': atlas_3_10_blas_threads_info, + 'lapack_atlas_3_10': lapack_atlas_3_10_info, # use lapack_opt instead + 'lapack_atlas_3_10_threads': lapack_atlas_3_10_threads_info, # ditto + 'mkl': mkl_info, + # openblas which may or may not have embedded lapack + 'openblas': openblas_info, # use blas_opt instead + # openblas with embedded lapack + 'openblas_lapack': openblas_lapack_info, # use blas_opt instead + 'openblas_clapack': openblas_clapack_info, # use blas_opt instead + 'blis': blis_info, # use blas_opt instead + 'lapack_mkl': lapack_mkl_info, # use lapack_opt instead + 'blas_mkl': blas_mkl_info, # use blas_opt instead + 'accelerate': accelerate_info, # use blas_opt instead + 'x11': x11_info, + 'fft_opt': fft_opt_info, + 'fftw': fftw_info, + 'fftw2': fftw2_info, + 'fftw3': fftw3_info, + 'dfftw': dfftw_info, + 'sfftw': sfftw_info, + 'fftw_threads': fftw_threads_info, + 'dfftw_threads': dfftw_threads_info, + 'sfftw_threads': sfftw_threads_info, + 'djbfft': djbfft_info, + 'blas': blas_info, # use blas_opt instead + 'lapack': lapack_info, # use lapack_opt instead + 'lapack_src': lapack_src_info, + 'blas_src': blas_src_info, + 'numpy': numpy_info, + 'f2py': f2py_info, + 'Numeric': Numeric_info, + 'numeric': Numeric_info, + 'numarray': numarray_info, + 'numerix': numerix_info, + 'lapack_opt': lapack_opt_info, + 'blas_opt': blas_opt_info, + 'boost_python': boost_python_info, + 'agg2': agg2_info, + 'wx': wx_info, + 'gdk_pixbuf_xlib_2': gdk_pixbuf_xlib_2_info, + 'gdk-pixbuf-xlib-2.0': gdk_pixbuf_xlib_2_info, + 'gdk_pixbuf_2': gdk_pixbuf_2_info, + 'gdk-pixbuf-2.0': gdk_pixbuf_2_info, + 'gdk': gdk_info, + 'gdk_2': gdk_2_info, + 'gdk-2.0': gdk_2_info, + 'gdk_x11_2': gdk_x11_2_info, + 'gdk-x11-2.0': gdk_x11_2_info, + 'gtkp_x11_2': gtkp_x11_2_info, + 'gtk+-x11-2.0': gtkp_x11_2_info, + 'gtkp_2': gtkp_2_info, + 'gtk+-2.0': gtkp_2_info, + 'xft': xft_info, + 'freetype2': freetype2_info, + 'umfpack': umfpack_info, + 'amd': amd_info, + }.get(name.lower(), system_info) + return cl().get_info(notfound_action) + + +class NotFoundError(DistutilsError): + """Some third-party program or library is not found.""" + + +class AtlasNotFoundError(NotFoundError): + """ + Atlas (http://math-atlas.sourceforge.net/) libraries not found. + Directories to search for the libraries can be specified in the + numpy/distutils/site.cfg file (section [atlas]) or by setting + the ATLAS environment variable.""" + + +class LapackNotFoundError(NotFoundError): + """ + Lapack (http://www.netlib.org/lapack/) libraries not found. + Directories to search for the libraries can be specified in the + numpy/distutils/site.cfg file (section [lapack]) or by setting + the LAPACK environment variable.""" + + +class LapackSrcNotFoundError(LapackNotFoundError): + """ + Lapack (http://www.netlib.org/lapack/) sources not found. + Directories to search for the sources can be specified in the + numpy/distutils/site.cfg file (section [lapack_src]) or by setting + the LAPACK_SRC environment variable.""" + + +class BlasNotFoundError(NotFoundError): + """ + Blas (http://www.netlib.org/blas/) libraries not found. + Directories to search for the libraries can be specified in the + numpy/distutils/site.cfg file (section [blas]) or by setting + the BLAS environment variable.""" + + +class BlasSrcNotFoundError(BlasNotFoundError): + """ + Blas (http://www.netlib.org/blas/) sources not found. + Directories to search for the sources can be specified in the + numpy/distutils/site.cfg file (section [blas_src]) or by setting + the BLAS_SRC environment variable.""" + + +class FFTWNotFoundError(NotFoundError): + """ + FFTW (http://www.fftw.org/) libraries not found. + Directories to search for the libraries can be specified in the + numpy/distutils/site.cfg file (section [fftw]) or by setting + the FFTW environment variable.""" + + +class DJBFFTNotFoundError(NotFoundError): + """ + DJBFFT (https://cr.yp.to/djbfft.html) libraries not found. + Directories to search for the libraries can be specified in the + numpy/distutils/site.cfg file (section [djbfft]) or by setting + the DJBFFT environment variable.""" + + +class NumericNotFoundError(NotFoundError): + """ + Numeric (https://www.numpy.org/) module not found. + Get it from above location, install it, and retry setup.py.""" + + +class X11NotFoundError(NotFoundError): + """X11 libraries not found.""" + + +class UmfpackNotFoundError(NotFoundError): + """ + UMFPACK sparse solver (https://www.cise.ufl.edu/research/sparse/umfpack/) + not found. Directories to search for the libraries can be specified in the + numpy/distutils/site.cfg file (section [umfpack]) or by setting + the UMFPACK environment variable.""" + + +class system_info(object): + + """ get_info() is the only public method. Don't use others. + """ + section = 'ALL' + dir_env_var = None + search_static_first = 0 # XXX: disabled by default, may disappear in + # future unless it is proved to be useful. + verbosity = 1 + saved_results = {} + + notfounderror = NotFoundError + + def __init__(self, + default_lib_dirs=default_lib_dirs, + default_include_dirs=default_include_dirs, + verbosity=1, + ): + self.__class__.info = {} + self.local_prefixes = [] + defaults = {'library_dirs': os.pathsep.join(default_lib_dirs), + 'include_dirs': os.pathsep.join(default_include_dirs), + 'runtime_library_dirs': os.pathsep.join(default_runtime_dirs), + 'rpath': '', + 'src_dirs': os.pathsep.join(default_src_dirs), + 'search_static_first': str(self.search_static_first), + 'extra_compile_args': '', 'extra_link_args': ''} + self.cp = ConfigParser(defaults) + self.files = [] + self.files.extend(get_standard_file('.numpy-site.cfg')) + self.files.extend(get_standard_file('site.cfg')) + self.parse_config_files() + + if self.section is not None: + self.search_static_first = self.cp.getboolean( + self.section, 'search_static_first') + assert isinstance(self.search_static_first, int) + + def parse_config_files(self): + self.cp.read(self.files) + if not self.cp.has_section(self.section): + if self.section is not None: + self.cp.add_section(self.section) + + def calc_libraries_info(self): + libs = self.get_libraries() + dirs = self.get_lib_dirs() + # The extensions use runtime_library_dirs + r_dirs = self.get_runtime_lib_dirs() + # Intrinsic distutils use rpath, we simply append both entries + # as though they were one entry + r_dirs.extend(self.get_runtime_lib_dirs(key='rpath')) + info = {} + for lib in libs: + i = self.check_libs(dirs, [lib]) + if i is not None: + dict_append(info, **i) + else: + log.info('Library %s was not found. Ignoring' % (lib)) + + if r_dirs: + i = self.check_libs(r_dirs, [lib]) + if i is not None: + # Swap library keywords found to runtime_library_dirs + # the libraries are insisting on the user having defined + # them using the library_dirs, and not necessarily by + # runtime_library_dirs + del i['libraries'] + i['runtime_library_dirs'] = i.pop('library_dirs') + dict_append(info, **i) + else: + log.info('Runtime library %s was not found. Ignoring' % (lib)) + + return info + + def set_info(self, **info): + if info: + lib_info = self.calc_libraries_info() + dict_append(info, **lib_info) + # Update extra information + extra_info = self.calc_extra_info() + dict_append(info, **extra_info) + self.saved_results[self.__class__.__name__] = info + + def has_info(self): + return self.__class__.__name__ in self.saved_results + + def calc_extra_info(self): + """ Updates the information in the current information with + respect to these flags: + extra_compile_args + extra_link_args + """ + info = {} + for key in ['extra_compile_args', 'extra_link_args']: + # Get values + opt = self.cp.get(self.section, key) + opt = _shell_utils.NativeParser.split(opt) + if opt: + tmp = {key: opt} + dict_append(info, **tmp) + return info + + def get_info(self, notfound_action=0): + """ Return a dictonary with items that are compatible + with numpy.distutils.setup keyword arguments. + """ + flag = 0 + if not self.has_info(): + flag = 1 + log.info(self.__class__.__name__ + ':') + if hasattr(self, 'calc_info'): + self.calc_info() + if notfound_action: + if not self.has_info(): + if notfound_action == 1: + warnings.warn(self.notfounderror.__doc__, stacklevel=2) + elif notfound_action == 2: + raise self.notfounderror(self.notfounderror.__doc__) + else: + raise ValueError(repr(notfound_action)) + + if not self.has_info(): + log.info(' NOT AVAILABLE') + self.set_info() + else: + log.info(' FOUND:') + + res = self.saved_results.get(self.__class__.__name__) + if self.verbosity > 0 and flag: + for k, v in res.items(): + v = str(v) + if k in ['sources', 'libraries'] and len(v) > 270: + v = v[:120] + '...\n...\n...' + v[-120:] + log.info(' %s = %s', k, v) + log.info('') + + return copy.deepcopy(res) + + def get_paths(self, section, key): + dirs = self.cp.get(section, key).split(os.pathsep) + env_var = self.dir_env_var + if env_var: + if is_sequence(env_var): + e0 = env_var[-1] + for e in env_var: + if e in os.environ: + e0 = e + break + if not env_var[0] == e0: + log.info('Setting %s=%s' % (env_var[0], e0)) + env_var = e0 + if env_var and env_var in os.environ: + d = os.environ[env_var] + if d == 'None': + log.info('Disabled %s: %s', + self.__class__.__name__, '(%s is None)' + % (env_var,)) + return [] + if os.path.isfile(d): + dirs = [os.path.dirname(d)] + dirs + l = getattr(self, '_lib_names', []) + if len(l) == 1: + b = os.path.basename(d) + b = os.path.splitext(b)[0] + if b[:3] == 'lib': + log.info('Replacing _lib_names[0]==%r with %r' \ + % (self._lib_names[0], b[3:])) + self._lib_names[0] = b[3:] + else: + ds = d.split(os.pathsep) + ds2 = [] + for d in ds: + if os.path.isdir(d): + ds2.append(d) + for dd in ['include', 'lib']: + d1 = os.path.join(d, dd) + if os.path.isdir(d1): + ds2.append(d1) + dirs = ds2 + dirs + default_dirs = self.cp.get(self.section, key).split(os.pathsep) + dirs.extend(default_dirs) + ret = [] + for d in dirs: + if len(d) > 0 and not os.path.isdir(d): + warnings.warn('Specified path %s is invalid.' % d, stacklevel=2) + continue + + if d not in ret: + ret.append(d) + + log.debug('( %s = %s )', key, ':'.join(ret)) + return ret + + def get_lib_dirs(self, key='library_dirs'): + return self.get_paths(self.section, key) + + def get_runtime_lib_dirs(self, key='runtime_library_dirs'): + path = self.get_paths(self.section, key) + if path == ['']: + path = [] + return path + + def get_include_dirs(self, key='include_dirs'): + return self.get_paths(self.section, key) + + def get_src_dirs(self, key='src_dirs'): + return self.get_paths(self.section, key) + + def get_libs(self, key, default): + try: + libs = self.cp.get(self.section, key) + except NoOptionError: + if not default: + return [] + if is_string(default): + return [default] + return default + return [b for b in [a.strip() for a in libs.split(',')] if b] + + def get_libraries(self, key='libraries'): + if hasattr(self, '_lib_names'): + return self.get_libs(key, default=self._lib_names) + else: + return self.get_libs(key, '') + + def library_extensions(self): + c = customized_ccompiler() + static_exts = [] + if c.compiler_type != 'msvc': + # MSVC doesn't understand binutils + static_exts.append('.a') + if sys.platform == 'win32': + static_exts.append('.lib') # .lib is used by MSVC and others + if self.search_static_first: + exts = static_exts + [so_ext] + else: + exts = [so_ext] + static_exts + if sys.platform == 'cygwin': + exts.append('.dll.a') + if sys.platform == 'darwin': + exts.append('.dylib') + return exts + + def check_libs(self, lib_dirs, libs, opt_libs=[]): + """If static or shared libraries are available then return + their info dictionary. + + Checks for all libraries as shared libraries first, then + static (or vice versa if self.search_static_first is True). + """ + exts = self.library_extensions() + info = None + for ext in exts: + info = self._check_libs(lib_dirs, libs, opt_libs, [ext]) + if info is not None: + break + if not info: + log.info(' libraries %s not found in %s', ','.join(libs), + lib_dirs) + return info + + def check_libs2(self, lib_dirs, libs, opt_libs=[]): + """If static or shared libraries are available then return + their info dictionary. + + Checks each library for shared or static. + """ + exts = self.library_extensions() + info = self._check_libs(lib_dirs, libs, opt_libs, exts) + if not info: + log.info(' libraries %s not found in %s', ','.join(libs), + lib_dirs) + + return info + + def _find_lib(self, lib_dir, lib, exts): + assert is_string(lib_dir) + # under windows first try without 'lib' prefix + if sys.platform == 'win32': + lib_prefixes = ['', 'lib'] + else: + lib_prefixes = ['lib'] + # for each library name, see if we can find a file for it. + for ext in exts: + for prefix in lib_prefixes: + p = self.combine_paths(lib_dir, prefix + lib + ext) + if p: + break + if p: + assert len(p) == 1 + # ??? splitext on p[0] would do this for cygwin + # doesn't seem correct + if ext == '.dll.a': + lib += '.dll' + if ext == '.lib': + lib = prefix + lib + return lib + + return False + + def _find_libs(self, lib_dirs, libs, exts): + # make sure we preserve the order of libs, as it can be important + found_dirs, found_libs = [], [] + for lib in libs: + for lib_dir in lib_dirs: + found_lib = self._find_lib(lib_dir, lib, exts) + if found_lib: + found_libs.append(found_lib) + if lib_dir not in found_dirs: + found_dirs.append(lib_dir) + break + return found_dirs, found_libs + + def _check_libs(self, lib_dirs, libs, opt_libs, exts): + """Find mandatory and optional libs in expected paths. + + Missing optional libraries are silently forgotten. + """ + if not is_sequence(lib_dirs): + lib_dirs = [lib_dirs] + # First, try to find the mandatory libraries + found_dirs, found_libs = self._find_libs(lib_dirs, libs, exts) + if len(found_libs) > 0 and len(found_libs) == len(libs): + # Now, check for optional libraries + opt_found_dirs, opt_found_libs = self._find_libs(lib_dirs, opt_libs, exts) + found_libs.extend(opt_found_libs) + for lib_dir in opt_found_dirs: + if lib_dir not in found_dirs: + found_dirs.append(lib_dir) + info = {'libraries': found_libs, 'library_dirs': found_dirs} + return info + else: + return None + + def combine_paths(self, *args): + """Return a list of existing paths composed by all combinations + of items from the arguments. + """ + return combine_paths(*args, **{'verbosity': self.verbosity}) + + +class fft_opt_info(system_info): + + def calc_info(self): + info = {} + fftw_info = get_info('fftw3') or get_info('fftw2') or get_info('dfftw') + djbfft_info = get_info('djbfft') + if fftw_info: + dict_append(info, **fftw_info) + if djbfft_info: + dict_append(info, **djbfft_info) + self.set_info(**info) + return + + +class fftw_info(system_info): + #variables to override + section = 'fftw' + dir_env_var = 'FFTW' + notfounderror = FFTWNotFoundError + ver_info = [{'name':'fftw3', + 'libs':['fftw3'], + 'includes':['fftw3.h'], + 'macros':[('SCIPY_FFTW3_H', None)]}, + {'name':'fftw2', + 'libs':['rfftw', 'fftw'], + 'includes':['fftw.h', 'rfftw.h'], + 'macros':[('SCIPY_FFTW_H', None)]}] + + def calc_ver_info(self, ver_param): + """Returns True on successful version detection, else False""" + lib_dirs = self.get_lib_dirs() + incl_dirs = self.get_include_dirs() + libs = self.get_libs(self.section + '_libs', ver_param['libs']) + info = self.check_libs(lib_dirs, libs) + if info is not None: + flag = 0 + for d in incl_dirs: + if len(self.combine_paths(d, ver_param['includes'])) \ + == len(ver_param['includes']): + dict_append(info, include_dirs=[d]) + flag = 1 + incl_dirs = [d] + break + if flag: + dict_append(info, define_macros=ver_param['macros']) + else: + info = None + if info is not None: + self.set_info(**info) + return True + else: + log.info(' %s not found' % (ver_param['name'])) + return False + + def calc_info(self): + for i in self.ver_info: + if self.calc_ver_info(i): + break + + +class fftw2_info(fftw_info): + #variables to override + section = 'fftw' + dir_env_var = 'FFTW' + notfounderror = FFTWNotFoundError + ver_info = [{'name':'fftw2', + 'libs':['rfftw', 'fftw'], + 'includes':['fftw.h', 'rfftw.h'], + 'macros':[('SCIPY_FFTW_H', None)]} + ] + + +class fftw3_info(fftw_info): + #variables to override + section = 'fftw3' + dir_env_var = 'FFTW3' + notfounderror = FFTWNotFoundError + ver_info = [{'name':'fftw3', + 'libs':['fftw3'], + 'includes':['fftw3.h'], + 'macros':[('SCIPY_FFTW3_H', None)]}, + ] + + +class dfftw_info(fftw_info): + section = 'fftw' + dir_env_var = 'FFTW' + ver_info = [{'name':'dfftw', + 'libs':['drfftw', 'dfftw'], + 'includes':['dfftw.h', 'drfftw.h'], + 'macros':[('SCIPY_DFFTW_H', None)]}] + + +class sfftw_info(fftw_info): + section = 'fftw' + dir_env_var = 'FFTW' + ver_info = [{'name':'sfftw', + 'libs':['srfftw', 'sfftw'], + 'includes':['sfftw.h', 'srfftw.h'], + 'macros':[('SCIPY_SFFTW_H', None)]}] + + +class fftw_threads_info(fftw_info): + section = 'fftw' + dir_env_var = 'FFTW' + ver_info = [{'name':'fftw threads', + 'libs':['rfftw_threads', 'fftw_threads'], + 'includes':['fftw_threads.h', 'rfftw_threads.h'], + 'macros':[('SCIPY_FFTW_THREADS_H', None)]}] + + +class dfftw_threads_info(fftw_info): + section = 'fftw' + dir_env_var = 'FFTW' + ver_info = [{'name':'dfftw threads', + 'libs':['drfftw_threads', 'dfftw_threads'], + 'includes':['dfftw_threads.h', 'drfftw_threads.h'], + 'macros':[('SCIPY_DFFTW_THREADS_H', None)]}] + + +class sfftw_threads_info(fftw_info): + section = 'fftw' + dir_env_var = 'FFTW' + ver_info = [{'name':'sfftw threads', + 'libs':['srfftw_threads', 'sfftw_threads'], + 'includes':['sfftw_threads.h', 'srfftw_threads.h'], + 'macros':[('SCIPY_SFFTW_THREADS_H', None)]}] + + +class djbfft_info(system_info): + section = 'djbfft' + dir_env_var = 'DJBFFT' + notfounderror = DJBFFTNotFoundError + + def get_paths(self, section, key): + pre_dirs = system_info.get_paths(self, section, key) + dirs = [] + for d in pre_dirs: + dirs.extend(self.combine_paths(d, ['djbfft']) + [d]) + return [d for d in dirs if os.path.isdir(d)] + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + incl_dirs = self.get_include_dirs() + info = None + for d in lib_dirs: + p = self.combine_paths(d, ['djbfft.a']) + if p: + info = {'extra_objects': p} + break + p = self.combine_paths(d, ['libdjbfft.a', 'libdjbfft' + so_ext]) + if p: + info = {'libraries': ['djbfft'], 'library_dirs': [d]} + break + if info is None: + return + for d in incl_dirs: + if len(self.combine_paths(d, ['fftc8.h', 'fftfreq.h'])) == 2: + dict_append(info, include_dirs=[d], + define_macros=[('SCIPY_DJBFFT_H', None)]) + self.set_info(**info) + return + return + + +class mkl_info(system_info): + section = 'mkl' + dir_env_var = 'MKLROOT' + _lib_mkl = ['mkl_rt'] + + def get_mkl_rootdir(self): + mklroot = os.environ.get('MKLROOT', None) + if mklroot is not None: + return mklroot + paths = os.environ.get('LD_LIBRARY_PATH', '').split(os.pathsep) + ld_so_conf = '/etc/ld.so.conf' + if os.path.isfile(ld_so_conf): + with open(ld_so_conf, 'r') as f: + for d in f: + d = d.strip() + if d: + paths.append(d) + intel_mkl_dirs = [] + for path in paths: + path_atoms = path.split(os.sep) + for m in path_atoms: + if m.startswith('mkl'): + d = os.sep.join(path_atoms[:path_atoms.index(m) + 2]) + intel_mkl_dirs.append(d) + break + for d in paths: + dirs = glob(os.path.join(d, 'mkl', '*')) + dirs += glob(os.path.join(d, 'mkl*')) + for d in dirs: + if os.path.isdir(os.path.join(d, 'lib')): + return d + return None + + def __init__(self): + mklroot = self.get_mkl_rootdir() + if mklroot is None: + system_info.__init__(self) + else: + from .cpuinfo import cpu + if cpu.is_Itanium(): + plt = '64' + elif cpu.is_Intel() and cpu.is_64bit(): + plt = 'intel64' + else: + plt = '32' + system_info.__init__( + self, + default_lib_dirs=[os.path.join(mklroot, 'lib', plt)], + default_include_dirs=[os.path.join(mklroot, 'include')]) + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + incl_dirs = self.get_include_dirs() + mkl_libs = self.get_libs('mkl_libs', self._lib_mkl) + info = self.check_libs2(lib_dirs, mkl_libs) + if info is None: + return + dict_append(info, + define_macros=[('SCIPY_MKL_H', None), + ('HAVE_CBLAS', None)], + include_dirs=incl_dirs) + if sys.platform == 'win32': + pass # win32 has no pthread library + else: + dict_append(info, libraries=['pthread']) + self.set_info(**info) + + +class lapack_mkl_info(mkl_info): + pass + + +class blas_mkl_info(mkl_info): + pass + + +class atlas_info(system_info): + section = 'atlas' + dir_env_var = 'ATLAS' + _lib_names = ['f77blas', 'cblas'] + if sys.platform[:7] == 'freebsd': + _lib_atlas = ['atlas_r'] + _lib_lapack = ['alapack_r'] + else: + _lib_atlas = ['atlas'] + _lib_lapack = ['lapack'] + + notfounderror = AtlasNotFoundError + + def get_paths(self, section, key): + pre_dirs = system_info.get_paths(self, section, key) + dirs = [] + for d in pre_dirs: + dirs.extend(self.combine_paths(d, ['atlas*', 'ATLAS*', + 'sse', '3dnow', 'sse2']) + [d]) + return [d for d in dirs if os.path.isdir(d)] + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + info = {} + atlas_libs = self.get_libs('atlas_libs', + self._lib_names + self._lib_atlas) + lapack_libs = self.get_libs('lapack_libs', self._lib_lapack) + atlas = None + lapack = None + atlas_1 = None + for d in lib_dirs: + # FIXME: lapack_atlas is unused + lapack_atlas = self.check_libs2(d, ['lapack_atlas'], []) + atlas = self.check_libs2(d, atlas_libs, []) + if atlas is not None: + lib_dirs2 = [d] + self.combine_paths(d, ['atlas*', 'ATLAS*']) + lapack = self.check_libs2(lib_dirs2, lapack_libs, []) + if lapack is not None: + break + if atlas: + atlas_1 = atlas + log.info(self.__class__) + if atlas is None: + atlas = atlas_1 + if atlas is None: + return + include_dirs = self.get_include_dirs() + h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) + h = h[0] + if h: + h = os.path.dirname(h) + dict_append(info, include_dirs=[h]) + info['language'] = 'c' + if lapack is not None: + dict_append(info, **lapack) + dict_append(info, **atlas) + elif 'lapack_atlas' in atlas['libraries']: + dict_append(info, **atlas) + dict_append(info, + define_macros=[('ATLAS_WITH_LAPACK_ATLAS', None)]) + self.set_info(**info) + return + else: + dict_append(info, **atlas) + dict_append(info, define_macros=[('ATLAS_WITHOUT_LAPACK', None)]) + message = """ +********************************************************************* + Could not find lapack library within the ATLAS installation. +********************************************************************* +""" + warnings.warn(message, stacklevel=2) + self.set_info(**info) + return + + # Check if lapack library is complete, only warn if it is not. + lapack_dir = lapack['library_dirs'][0] + lapack_name = lapack['libraries'][0] + lapack_lib = None + lib_prefixes = ['lib'] + if sys.platform == 'win32': + lib_prefixes.append('') + for e in self.library_extensions(): + for prefix in lib_prefixes: + fn = os.path.join(lapack_dir, prefix + lapack_name + e) + if os.path.exists(fn): + lapack_lib = fn + break + if lapack_lib: + break + if lapack_lib is not None: + sz = os.stat(lapack_lib)[6] + if sz <= 4000 * 1024: + message = """ +********************************************************************* + Lapack library (from ATLAS) is probably incomplete: + size of %s is %sk (expected >4000k) + + Follow the instructions in the KNOWN PROBLEMS section of the file + numpy/INSTALL.txt. +********************************************************************* +""" % (lapack_lib, sz / 1024) + warnings.warn(message, stacklevel=2) + else: + info['language'] = 'f77' + + atlas_version, atlas_extra_info = get_atlas_version(**atlas) + dict_append(info, **atlas_extra_info) + + self.set_info(**info) + + +class atlas_blas_info(atlas_info): + _lib_names = ['f77blas', 'cblas'] + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + info = {} + atlas_libs = self.get_libs('atlas_libs', + self._lib_names + self._lib_atlas) + atlas = self.check_libs2(lib_dirs, atlas_libs, []) + if atlas is None: + return + include_dirs = self.get_include_dirs() + h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) + h = h[0] + if h: + h = os.path.dirname(h) + dict_append(info, include_dirs=[h]) + info['language'] = 'c' + info['define_macros'] = [('HAVE_CBLAS', None)] + + atlas_version, atlas_extra_info = get_atlas_version(**atlas) + dict_append(atlas, **atlas_extra_info) + + dict_append(info, **atlas) + + self.set_info(**info) + return + + +class atlas_threads_info(atlas_info): + dir_env_var = ['PTATLAS', 'ATLAS'] + _lib_names = ['ptf77blas', 'ptcblas'] + + +class atlas_blas_threads_info(atlas_blas_info): + dir_env_var = ['PTATLAS', 'ATLAS'] + _lib_names = ['ptf77blas', 'ptcblas'] + + +class lapack_atlas_info(atlas_info): + _lib_names = ['lapack_atlas'] + atlas_info._lib_names + + +class lapack_atlas_threads_info(atlas_threads_info): + _lib_names = ['lapack_atlas'] + atlas_threads_info._lib_names + + +class atlas_3_10_info(atlas_info): + _lib_names = ['satlas'] + _lib_atlas = _lib_names + _lib_lapack = _lib_names + + +class atlas_3_10_blas_info(atlas_3_10_info): + _lib_names = ['satlas'] + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + info = {} + atlas_libs = self.get_libs('atlas_libs', + self._lib_names) + atlas = self.check_libs2(lib_dirs, atlas_libs, []) + if atlas is None: + return + include_dirs = self.get_include_dirs() + h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) + h = h[0] + if h: + h = os.path.dirname(h) + dict_append(info, include_dirs=[h]) + info['language'] = 'c' + info['define_macros'] = [('HAVE_CBLAS', None)] + + atlas_version, atlas_extra_info = get_atlas_version(**atlas) + dict_append(atlas, **atlas_extra_info) + + dict_append(info, **atlas) + + self.set_info(**info) + return + + +class atlas_3_10_threads_info(atlas_3_10_info): + dir_env_var = ['PTATLAS', 'ATLAS'] + _lib_names = ['tatlas'] + _lib_atlas = _lib_names + _lib_lapack = _lib_names + + +class atlas_3_10_blas_threads_info(atlas_3_10_blas_info): + dir_env_var = ['PTATLAS', 'ATLAS'] + _lib_names = ['tatlas'] + + +class lapack_atlas_3_10_info(atlas_3_10_info): + pass + + +class lapack_atlas_3_10_threads_info(atlas_3_10_threads_info): + pass + + +class lapack_info(system_info): + section = 'lapack' + dir_env_var = 'LAPACK' + _lib_names = ['lapack'] + notfounderror = LapackNotFoundError + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + + lapack_libs = self.get_libs('lapack_libs', self._lib_names) + info = self.check_libs(lib_dirs, lapack_libs, []) + if info is None: + return + info['language'] = 'f77' + self.set_info(**info) + + +class lapack_src_info(system_info): + section = 'lapack_src' + dir_env_var = 'LAPACK_SRC' + notfounderror = LapackSrcNotFoundError + + def get_paths(self, section, key): + pre_dirs = system_info.get_paths(self, section, key) + dirs = [] + for d in pre_dirs: + dirs.extend([d] + self.combine_paths(d, ['LAPACK*/SRC', 'SRC'])) + return [d for d in dirs if os.path.isdir(d)] + + def calc_info(self): + src_dirs = self.get_src_dirs() + src_dir = '' + for d in src_dirs: + if os.path.isfile(os.path.join(d, 'dgesv.f')): + src_dir = d + break + if not src_dir: + #XXX: Get sources from netlib. May be ask first. + return + # The following is extracted from LAPACK-3.0/SRC/Makefile. + # Added missing names from lapack-lite-3.1.1/SRC/Makefile + # while keeping removed names for Lapack-3.0 compatibility. + allaux = ''' + ilaenv ieeeck lsame lsamen xerbla + iparmq + ''' # *.f + laux = ''' + bdsdc bdsqr disna labad lacpy ladiv lae2 laebz laed0 laed1 + laed2 laed3 laed4 laed5 laed6 laed7 laed8 laed9 laeda laev2 + lagtf lagts lamch lamrg lanst lapy2 lapy3 larnv larrb larre + larrf lartg laruv las2 lascl lasd0 lasd1 lasd2 lasd3 lasd4 + lasd5 lasd6 lasd7 lasd8 lasd9 lasda lasdq lasdt laset lasq1 + lasq2 lasq3 lasq4 lasq5 lasq6 lasr lasrt lassq lasv2 pttrf + stebz stedc steqr sterf + + larra larrc larrd larr larrk larrj larrr laneg laisnan isnan + lazq3 lazq4 + ''' # [s|d]*.f + lasrc = ''' + gbbrd gbcon gbequ gbrfs gbsv gbsvx gbtf2 gbtrf gbtrs gebak + gebal gebd2 gebrd gecon geequ gees geesx geev geevx gegs gegv + gehd2 gehrd gelq2 gelqf gels gelsd gelss gelsx gelsy geql2 + geqlf geqp3 geqpf geqr2 geqrf gerfs gerq2 gerqf gesc2 gesdd + gesv gesvd gesvx getc2 getf2 getrf getri getrs ggbak ggbal + gges ggesx ggev ggevx ggglm gghrd gglse ggqrf ggrqf ggsvd + ggsvp gtcon gtrfs gtsv gtsvx gttrf gttrs gtts2 hgeqz hsein + hseqr labrd lacon laein lags2 lagtm lahqr lahrd laic1 lals0 + lalsa lalsd langb lange langt lanhs lansb lansp lansy lantb + lantp lantr lapll lapmt laqgb laqge laqp2 laqps laqsb laqsp + laqsy lar1v lar2v larf larfb larfg larft larfx largv larrv + lartv larz larzb larzt laswp lasyf latbs latdf latps latrd + latrs latrz latzm lauu2 lauum pbcon pbequ pbrfs pbstf pbsv + pbsvx pbtf2 pbtrf pbtrs pocon poequ porfs posv posvx potf2 + potrf potri potrs ppcon ppequ pprfs ppsv ppsvx pptrf pptri + pptrs ptcon pteqr ptrfs ptsv ptsvx pttrs ptts2 spcon sprfs + spsv spsvx sptrf sptri sptrs stegr stein sycon syrfs sysv + sysvx sytf2 sytrf sytri sytrs tbcon tbrfs tbtrs tgevc tgex2 + tgexc tgsen tgsja tgsna tgsy2 tgsyl tpcon tprfs tptri tptrs + trcon trevc trexc trrfs trsen trsna trsyl trti2 trtri trtrs + tzrqf tzrzf + + lacn2 lahr2 stemr laqr0 laqr1 laqr2 laqr3 laqr4 laqr5 + ''' # [s|c|d|z]*.f + sd_lasrc = ''' + laexc lag2 lagv2 laln2 lanv2 laqtr lasy2 opgtr opmtr org2l + org2r orgbr orghr orgl2 orglq orgql orgqr orgr2 orgrq orgtr + orm2l orm2r ormbr ormhr orml2 ormlq ormql ormqr ormr2 ormr3 + ormrq ormrz ormtr rscl sbev sbevd sbevx sbgst sbgv sbgvd sbgvx + sbtrd spev spevd spevx spgst spgv spgvd spgvx sptrd stev stevd + stevr stevx syev syevd syevr syevx sygs2 sygst sygv sygvd + sygvx sytd2 sytrd + ''' # [s|d]*.f + cz_lasrc = ''' + bdsqr hbev hbevd hbevx hbgst hbgv hbgvd hbgvx hbtrd hecon heev + heevd heevr heevx hegs2 hegst hegv hegvd hegvx herfs hesv + hesvx hetd2 hetf2 hetrd hetrf hetri hetrs hpcon hpev hpevd + hpevx hpgst hpgv hpgvd hpgvx hprfs hpsv hpsvx hptrd hptrf + hptri hptrs lacgv lacp2 lacpy lacrm lacrt ladiv laed0 laed7 + laed8 laesy laev2 lahef lanhb lanhe lanhp lanht laqhb laqhe + laqhp larcm larnv lartg lascl laset lasr lassq pttrf rot spmv + spr stedc steqr symv syr ung2l ung2r ungbr unghr ungl2 unglq + ungql ungqr ungr2 ungrq ungtr unm2l unm2r unmbr unmhr unml2 + unmlq unmql unmqr unmr2 unmr3 unmrq unmrz unmtr upgtr upmtr + ''' # [c|z]*.f + ####### + sclaux = laux + ' econd ' # s*.f + dzlaux = laux + ' secnd ' # d*.f + slasrc = lasrc + sd_lasrc # s*.f + dlasrc = lasrc + sd_lasrc # d*.f + clasrc = lasrc + cz_lasrc + ' srot srscl ' # c*.f + zlasrc = lasrc + cz_lasrc + ' drot drscl ' # z*.f + oclasrc = ' icmax1 scsum1 ' # *.f + ozlasrc = ' izmax1 dzsum1 ' # *.f + sources = ['s%s.f' % f for f in (sclaux + slasrc).split()] \ + + ['d%s.f' % f for f in (dzlaux + dlasrc).split()] \ + + ['c%s.f' % f for f in (clasrc).split()] \ + + ['z%s.f' % f for f in (zlasrc).split()] \ + + ['%s.f' % f for f in (allaux + oclasrc + ozlasrc).split()] + sources = [os.path.join(src_dir, f) for f in sources] + # Lapack 3.1: + src_dir2 = os.path.join(src_dir, '..', 'INSTALL') + sources += [os.path.join(src_dir2, p + 'lamch.f') for p in 'sdcz'] + # Lapack 3.2.1: + sources += [os.path.join(src_dir, p + 'larfp.f') for p in 'sdcz'] + sources += [os.path.join(src_dir, 'ila' + p + 'lr.f') for p in 'sdcz'] + sources += [os.path.join(src_dir, 'ila' + p + 'lc.f') for p in 'sdcz'] + # Should we check here actual existence of source files? + # Yes, the file listing is different between 3.0 and 3.1 + # versions. + sources = [f for f in sources if os.path.isfile(f)] + info = {'sources': sources, 'language': 'f77'} + self.set_info(**info) + +atlas_version_c_text = r''' +/* This file is generated from numpy/distutils/system_info.py */ +void ATL_buildinfo(void); +int main(void) { + ATL_buildinfo(); + return 0; +} +''' + +_cached_atlas_version = {} + + +def get_atlas_version(**config): + libraries = config.get('libraries', []) + library_dirs = config.get('library_dirs', []) + key = (tuple(libraries), tuple(library_dirs)) + if key in _cached_atlas_version: + return _cached_atlas_version[key] + c = cmd_config(Distribution()) + atlas_version = None + info = {} + try: + s, o = c.get_output(atlas_version_c_text, + libraries=libraries, library_dirs=library_dirs, + use_tee=(system_info.verbosity > 0)) + if s and re.search(r'undefined reference to `_gfortran', o, re.M): + s, o = c.get_output(atlas_version_c_text, + libraries=libraries + ['gfortran'], + library_dirs=library_dirs, + use_tee=(system_info.verbosity > 0)) + if not s: + warnings.warn(""" +***************************************************** +Linkage with ATLAS requires gfortran. Use + + python setup.py config_fc --fcompiler=gnu95 ... + +when building extension libraries that use ATLAS. +Make sure that -lgfortran is used for C++ extensions. +***************************************************** +""", stacklevel=2) + dict_append(info, language='f90', + define_macros=[('ATLAS_REQUIRES_GFORTRAN', None)]) + except Exception: # failed to get version from file -- maybe on Windows + # look at directory name + for o in library_dirs: + m = re.search(r'ATLAS_(?P\d+[.]\d+[.]\d+)_', o) + if m: + atlas_version = m.group('version') + if atlas_version is not None: + break + + # final choice --- look at ATLAS_VERSION environment + # variable + if atlas_version is None: + atlas_version = os.environ.get('ATLAS_VERSION', None) + if atlas_version: + dict_append(info, define_macros=[( + 'ATLAS_INFO', _c_string_literal(atlas_version)) + ]) + else: + dict_append(info, define_macros=[('NO_ATLAS_INFO', -1)]) + return atlas_version or '?.?.?', info + + if not s: + m = re.search(r'ATLAS version (?P\d+[.]\d+[.]\d+)', o) + if m: + atlas_version = m.group('version') + if atlas_version is None: + if re.search(r'undefined symbol: ATL_buildinfo', o, re.M): + atlas_version = '3.2.1_pre3.3.6' + else: + log.info('Status: %d', s) + log.info('Output: %s', o) + + if atlas_version == '3.2.1_pre3.3.6': + dict_append(info, define_macros=[('NO_ATLAS_INFO', -2)]) + else: + dict_append(info, define_macros=[( + 'ATLAS_INFO', _c_string_literal(atlas_version)) + ]) + result = _cached_atlas_version[key] = atlas_version, info + return result + + +class lapack_opt_info(system_info): + + notfounderror = LapackNotFoundError + + def calc_info(self): + + lapack_mkl_info = get_info('lapack_mkl') + if lapack_mkl_info: + self.set_info(**lapack_mkl_info) + return + + openblas_info = get_info('openblas_lapack') + if openblas_info: + self.set_info(**openblas_info) + return + + openblas_info = get_info('openblas_clapack') + if openblas_info: + self.set_info(**openblas_info) + return + + atlas_info = get_info('atlas_3_10_threads') + if not atlas_info: + atlas_info = get_info('atlas_3_10') + if not atlas_info: + atlas_info = get_info('atlas_threads') + if not atlas_info: + atlas_info = get_info('atlas') + + accelerate_info = get_info('accelerate') + if accelerate_info and not atlas_info: + self.set_info(**accelerate_info) + return + + need_lapack = 0 + need_blas = 0 + info = {} + if atlas_info: + l = atlas_info.get('define_macros', []) + if ('ATLAS_WITH_LAPACK_ATLAS', None) in l \ + or ('ATLAS_WITHOUT_LAPACK', None) in l: + need_lapack = 1 + info = atlas_info + + else: + warnings.warn(AtlasNotFoundError.__doc__, stacklevel=2) + need_blas = 1 + need_lapack = 1 + dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)]) + + if need_lapack: + lapack_info = get_info('lapack') + #lapack_info = {} ## uncomment for testing + if lapack_info: + dict_append(info, **lapack_info) + else: + warnings.warn(LapackNotFoundError.__doc__, stacklevel=2) + lapack_src_info = get_info('lapack_src') + if not lapack_src_info: + warnings.warn(LapackSrcNotFoundError.__doc__, stacklevel=2) + return + dict_append(info, libraries=[('flapack_src', lapack_src_info)]) + + if need_blas: + blas_info = get_info('blas') + if blas_info: + dict_append(info, **blas_info) + else: + warnings.warn(BlasNotFoundError.__doc__, stacklevel=2) + blas_src_info = get_info('blas_src') + if not blas_src_info: + warnings.warn(BlasSrcNotFoundError.__doc__, stacklevel=2) + return + dict_append(info, libraries=[('fblas_src', blas_src_info)]) + + self.set_info(**info) + return + + +class blas_opt_info(system_info): + + notfounderror = BlasNotFoundError + + def calc_info(self): + + blas_mkl_info = get_info('blas_mkl') + if blas_mkl_info: + self.set_info(**blas_mkl_info) + return + + blis_info = get_info('blis') + if blis_info: + self.set_info(**blis_info) + return + + openblas_info = get_info('openblas') + if openblas_info: + self.set_info(**openblas_info) + return + + atlas_info = get_info('atlas_3_10_blas_threads') + if not atlas_info: + atlas_info = get_info('atlas_3_10_blas') + if not atlas_info: + atlas_info = get_info('atlas_blas_threads') + if not atlas_info: + atlas_info = get_info('atlas_blas') + + accelerate_info = get_info('accelerate') + if accelerate_info and not atlas_info: + self.set_info(**accelerate_info) + return + + need_blas = 0 + info = {} + if atlas_info: + info = atlas_info + else: + warnings.warn(AtlasNotFoundError.__doc__, stacklevel=2) + need_blas = 1 + dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)]) + + if need_blas: + blas_info = get_info('blas') + if blas_info: + dict_append(info, **blas_info) + else: + warnings.warn(BlasNotFoundError.__doc__, stacklevel=2) + blas_src_info = get_info('blas_src') + if not blas_src_info: + warnings.warn(BlasSrcNotFoundError.__doc__, stacklevel=2) + return + dict_append(info, libraries=[('fblas_src', blas_src_info)]) + + self.set_info(**info) + return + + +class blas_info(system_info): + section = 'blas' + dir_env_var = 'BLAS' + _lib_names = ['blas'] + notfounderror = BlasNotFoundError + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + blas_libs = self.get_libs('blas_libs', self._lib_names) + info = self.check_libs(lib_dirs, blas_libs, []) + if info is None: + return + else: + info['include_dirs'] = self.get_include_dirs() + if platform.system() == 'Windows': + # The check for windows is needed because has_cblas uses the + # same compiler that was used to compile Python and msvc is + # often not installed when mingw is being used. This rough + # treatment is not desirable, but windows is tricky. + info['language'] = 'f77' # XXX: is it generally true? + else: + lib = self.has_cblas(info) + if lib is not None: + info['language'] = 'c' + info['libraries'] = [lib] + info['define_macros'] = [('HAVE_CBLAS', None)] + self.set_info(**info) + + def has_cblas(self, info): + # primitive cblas check by looking for the header and trying to link + # cblas or blas + res = False + c = customized_ccompiler() + tmpdir = tempfile.mkdtemp() + s = """#include + int main(int argc, const char *argv[]) + { + double a[4] = {1,2,3,4}; + double b[4] = {5,6,7,8}; + return cblas_ddot(4, a, 1, b, 1) > 10; + }""" + src = os.path.join(tmpdir, 'source.c') + try: + with open(src, 'wt') as f: + f.write(s) + + try: + # check we can compile (find headers) + obj = c.compile([src], output_dir=tmpdir, + include_dirs=self.get_include_dirs()) + + # check we can link (find library) + # some systems have separate cblas and blas libs. First + # check for cblas lib, and if not present check for blas lib. + try: + c.link_executable(obj, os.path.join(tmpdir, "a.out"), + libraries=["cblas"], + library_dirs=info['library_dirs'], + extra_postargs=info.get('extra_link_args', [])) + res = "cblas" + except distutils.ccompiler.LinkError: + c.link_executable(obj, os.path.join(tmpdir, "a.out"), + libraries=["blas"], + library_dirs=info['library_dirs'], + extra_postargs=info.get('extra_link_args', [])) + res = "blas" + except distutils.ccompiler.CompileError: + res = None + finally: + shutil.rmtree(tmpdir) + return res + + +class openblas_info(blas_info): + section = 'openblas' + dir_env_var = 'OPENBLAS' + _lib_names = ['openblas'] + notfounderror = BlasNotFoundError + + def check_embedded_lapack(self, info): + return True + + def calc_info(self): + c = customized_ccompiler() + + lib_dirs = self.get_lib_dirs() + + openblas_libs = self.get_libs('libraries', self._lib_names) + if openblas_libs == self._lib_names: # backward compat with 1.8.0 + openblas_libs = self.get_libs('openblas_libs', self._lib_names) + + info = self.check_libs(lib_dirs, openblas_libs, []) + + if c.compiler_type == "msvc" and info is None: + from numpy.distutils.fcompiler import new_fcompiler + f = new_fcompiler(c_compiler=c) + if f and f.compiler_type == 'gnu95': + # Try gfortran-compatible library files + info = self.check_msvc_gfortran_libs(lib_dirs, openblas_libs) + # Skip lapack check, we'd need build_ext to do it + assume_lapack = True + elif info: + assume_lapack = False + info['language'] = 'c' + + if info is None: + return + + # Add extra info for OpenBLAS + extra_info = self.calc_extra_info() + dict_append(info, **extra_info) + + if not (assume_lapack or self.check_embedded_lapack(info)): + return + + info['define_macros'] = [('HAVE_CBLAS', None)] + self.set_info(**info) + + def check_msvc_gfortran_libs(self, library_dirs, libraries): + # First, find the full path to each library directory + library_paths = [] + for library in libraries: + for library_dir in library_dirs: + # MinGW static ext will be .a + fullpath = os.path.join(library_dir, library + '.a') + if os.path.isfile(fullpath): + library_paths.append(fullpath) + break + else: + return None + + # Generate numpy.distutils virtual static library file + tmpdir = os.path.join(os.getcwd(), 'build', 'openblas') + if not os.path.isdir(tmpdir): + os.makedirs(tmpdir) + + info = {'library_dirs': [tmpdir], + 'libraries': ['openblas'], + 'language': 'f77'} + + fake_lib_file = os.path.join(tmpdir, 'openblas.fobjects') + fake_clib_file = os.path.join(tmpdir, 'openblas.cobjects') + with open(fake_lib_file, 'w') as f: + f.write("\n".join(library_paths)) + with open(fake_clib_file, 'w') as f: + pass + + return info + +class openblas_lapack_info(openblas_info): + section = 'openblas' + dir_env_var = 'OPENBLAS' + _lib_names = ['openblas'] + notfounderror = BlasNotFoundError + + def check_embedded_lapack(self, info): + res = False + c = customized_ccompiler() + + tmpdir = tempfile.mkdtemp() + s = """void zungqr_(); + int main(int argc, const char *argv[]) + { + zungqr_(); + return 0; + }""" + src = os.path.join(tmpdir, 'source.c') + out = os.path.join(tmpdir, 'a.out') + # Add the additional "extra" arguments + try: + extra_args = info['extra_link_args'] + except Exception: + extra_args = [] + if sys.version_info < (3, 5) and sys.version_info > (3, 0) and c.compiler_type == "msvc": + extra_args.append("/MANIFEST") + try: + with open(src, 'wt') as f: + f.write(s) + obj = c.compile([src], output_dir=tmpdir) + try: + c.link_executable(obj, out, libraries=info['libraries'], + library_dirs=info['library_dirs'], + extra_postargs=extra_args) + res = True + except distutils.ccompiler.LinkError: + res = False + finally: + shutil.rmtree(tmpdir) + return res + +class openblas_clapack_info(openblas_lapack_info): + _lib_names = ['openblas', 'lapack'] + +class blis_info(blas_info): + section = 'blis' + dir_env_var = 'BLIS' + _lib_names = ['blis'] + notfounderror = BlasNotFoundError + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + blis_libs = self.get_libs('libraries', self._lib_names) + if blis_libs == self._lib_names: + blis_libs = self.get_libs('blis_libs', self._lib_names) + + info = self.check_libs2(lib_dirs, blis_libs, []) + if info is None: + return + + # Add include dirs + incl_dirs = self.get_include_dirs() + dict_append(info, + language='c', + define_macros=[('HAVE_CBLAS', None)], + include_dirs=incl_dirs) + self.set_info(**info) + +class accelerate_info(system_info): + section = 'accelerate' + notfounderror = BlasNotFoundError + + def calc_info(self): + # Make possible to enable/disable from config file/env var + libraries = os.environ.get('ACCELERATE') + if libraries: + libraries = [libraries] + else: + libraries = self.get_libs('libraries', ['accelerate', 'veclib']) + libraries = [lib.strip().lower() for lib in libraries] + + if (sys.platform == 'darwin' and + not os.getenv('_PYTHON_HOST_PLATFORM', None)): + # Use the system BLAS from Accelerate or vecLib under OSX + args = [] + link_args = [] + if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \ + 'x86_64' in get_platform() or \ + 'i386' in platform.platform(): + intel = 1 + else: + intel = 0 + if (os.path.exists('/System/Library/Frameworks' + '/Accelerate.framework/') and + 'accelerate' in libraries): + if intel: + args.extend(['-msse3']) + else: + args.extend(['-faltivec']) + args.extend([ + '-I/System/Library/Frameworks/vecLib.framework/Headers']) + link_args.extend(['-Wl,-framework', '-Wl,Accelerate']) + elif (os.path.exists('/System/Library/Frameworks' + '/vecLib.framework/') and + 'veclib' in libraries): + if intel: + args.extend(['-msse3']) + else: + args.extend(['-faltivec']) + args.extend([ + '-I/System/Library/Frameworks/vecLib.framework/Headers']) + link_args.extend(['-Wl,-framework', '-Wl,vecLib']) + + if args: + self.set_info(extra_compile_args=args, + extra_link_args=link_args, + define_macros=[('NO_ATLAS_INFO', 3), + ('HAVE_CBLAS', None)]) + + return + +class blas_src_info(system_info): + section = 'blas_src' + dir_env_var = 'BLAS_SRC' + notfounderror = BlasSrcNotFoundError + + def get_paths(self, section, key): + pre_dirs = system_info.get_paths(self, section, key) + dirs = [] + for d in pre_dirs: + dirs.extend([d] + self.combine_paths(d, ['blas'])) + return [d for d in dirs if os.path.isdir(d)] + + def calc_info(self): + src_dirs = self.get_src_dirs() + src_dir = '' + for d in src_dirs: + if os.path.isfile(os.path.join(d, 'daxpy.f')): + src_dir = d + break + if not src_dir: + #XXX: Get sources from netlib. May be ask first. + return + blas1 = ''' + caxpy csscal dnrm2 dzasum saxpy srotg zdotc ccopy cswap drot + dznrm2 scasum srotm zdotu cdotc dasum drotg icamax scnrm2 + srotmg zdrot cdotu daxpy drotm idamax scopy sscal zdscal crotg + dcabs1 drotmg isamax sdot sswap zrotg cscal dcopy dscal izamax + snrm2 zaxpy zscal csrot ddot dswap sasum srot zcopy zswap + scabs1 + ''' + blas2 = ''' + cgbmv chpmv ctrsv dsymv dtrsv sspr2 strmv zhemv ztpmv cgemv + chpr dgbmv dsyr lsame ssymv strsv zher ztpsv cgerc chpr2 dgemv + dsyr2 sgbmv ssyr xerbla zher2 ztrmv cgeru ctbmv dger dtbmv + sgemv ssyr2 zgbmv zhpmv ztrsv chbmv ctbsv dsbmv dtbsv sger + stbmv zgemv zhpr chemv ctpmv dspmv dtpmv ssbmv stbsv zgerc + zhpr2 cher ctpsv dspr dtpsv sspmv stpmv zgeru ztbmv cher2 + ctrmv dspr2 dtrmv sspr stpsv zhbmv ztbsv + ''' + blas3 = ''' + cgemm csymm ctrsm dsyrk sgemm strmm zhemm zsyr2k chemm csyr2k + dgemm dtrmm ssymm strsm zher2k zsyrk cher2k csyrk dsymm dtrsm + ssyr2k zherk ztrmm cherk ctrmm dsyr2k ssyrk zgemm zsymm ztrsm + ''' + sources = [os.path.join(src_dir, f + '.f') \ + for f in (blas1 + blas2 + blas3).split()] + #XXX: should we check here actual existence of source files? + sources = [f for f in sources if os.path.isfile(f)] + info = {'sources': sources, 'language': 'f77'} + self.set_info(**info) + + +class x11_info(system_info): + section = 'x11' + notfounderror = X11NotFoundError + + def __init__(self): + system_info.__init__(self, + default_lib_dirs=default_x11_lib_dirs, + default_include_dirs=default_x11_include_dirs) + + def calc_info(self): + if sys.platform in ['win32']: + return + lib_dirs = self.get_lib_dirs() + include_dirs = self.get_include_dirs() + x11_libs = self.get_libs('x11_libs', ['X11']) + info = self.check_libs(lib_dirs, x11_libs, []) + if info is None: + return + inc_dir = None + for d in include_dirs: + if self.combine_paths(d, 'X11/X.h'): + inc_dir = d + break + if inc_dir is not None: + dict_append(info, include_dirs=[inc_dir]) + self.set_info(**info) + + +class _numpy_info(system_info): + section = 'Numeric' + modulename = 'Numeric' + notfounderror = NumericNotFoundError + + def __init__(self): + include_dirs = [] + try: + module = __import__(self.modulename) + prefix = [] + for name in module.__file__.split(os.sep): + if name == 'lib': + break + prefix.append(name) + + # Ask numpy for its own include path before attempting + # anything else + try: + include_dirs.append(getattr(module, 'get_include')()) + except AttributeError: + pass + + include_dirs.append(distutils.sysconfig.get_python_inc( + prefix=os.sep.join(prefix))) + except ImportError: + pass + py_incl_dir = distutils.sysconfig.get_python_inc() + include_dirs.append(py_incl_dir) + py_pincl_dir = distutils.sysconfig.get_python_inc(plat_specific=True) + if py_pincl_dir not in include_dirs: + include_dirs.append(py_pincl_dir) + for d in default_include_dirs: + d = os.path.join(d, os.path.basename(py_incl_dir)) + if d not in include_dirs: + include_dirs.append(d) + system_info.__init__(self, + default_lib_dirs=[], + default_include_dirs=include_dirs) + + def calc_info(self): + try: + module = __import__(self.modulename) + except ImportError: + return + info = {} + macros = [] + for v in ['__version__', 'version']: + vrs = getattr(module, v, None) + if vrs is None: + continue + macros = [(self.modulename.upper() + '_VERSION', + _c_string_literal(vrs)), + (self.modulename.upper(), None)] + break + dict_append(info, define_macros=macros) + include_dirs = self.get_include_dirs() + inc_dir = None + for d in include_dirs: + if self.combine_paths(d, + os.path.join(self.modulename, + 'arrayobject.h')): + inc_dir = d + break + if inc_dir is not None: + dict_append(info, include_dirs=[inc_dir]) + if info: + self.set_info(**info) + return + + +class numarray_info(_numpy_info): + section = 'numarray' + modulename = 'numarray' + + +class Numeric_info(_numpy_info): + section = 'Numeric' + modulename = 'Numeric' + + +class numpy_info(_numpy_info): + section = 'numpy' + modulename = 'numpy' + + +class numerix_info(system_info): + section = 'numerix' + + def calc_info(self): + which = None, None + if os.getenv("NUMERIX"): + which = os.getenv("NUMERIX"), "environment var" + # If all the above fail, default to numpy. + if which[0] is None: + which = "numpy", "defaulted" + try: + import numpy # noqa: F401 + which = "numpy", "defaulted" + except ImportError: + msg1 = str(get_exception()) + try: + import Numeric # noqa: F401 + which = "numeric", "defaulted" + except ImportError: + msg2 = str(get_exception()) + try: + import numarray # noqa: F401 + which = "numarray", "defaulted" + except ImportError: + msg3 = str(get_exception()) + log.info(msg1) + log.info(msg2) + log.info(msg3) + which = which[0].strip().lower(), which[1] + if which[0] not in ["numeric", "numarray", "numpy"]: + raise ValueError("numerix selector must be either 'Numeric' " + "or 'numarray' or 'numpy' but the value obtained" + " from the %s was '%s'." % (which[1], which[0])) + os.environ['NUMERIX'] = which[0] + self.set_info(**get_info(which[0])) + + +class f2py_info(system_info): + def calc_info(self): + try: + import numpy.f2py as f2py + except ImportError: + return + f2py_dir = os.path.join(os.path.dirname(f2py.__file__), 'src') + self.set_info(sources=[os.path.join(f2py_dir, 'fortranobject.c')], + include_dirs=[f2py_dir]) + return + + +class boost_python_info(system_info): + section = 'boost_python' + dir_env_var = 'BOOST' + + def get_paths(self, section, key): + pre_dirs = system_info.get_paths(self, section, key) + dirs = [] + for d in pre_dirs: + dirs.extend([d] + self.combine_paths(d, ['boost*'])) + return [d for d in dirs if os.path.isdir(d)] + + def calc_info(self): + src_dirs = self.get_src_dirs() + src_dir = '' + for d in src_dirs: + if os.path.isfile(os.path.join(d, 'libs', 'python', 'src', + 'module.cpp')): + src_dir = d + break + if not src_dir: + return + py_incl_dirs = [distutils.sysconfig.get_python_inc()] + py_pincl_dir = distutils.sysconfig.get_python_inc(plat_specific=True) + if py_pincl_dir not in py_incl_dirs: + py_incl_dirs.append(py_pincl_dir) + srcs_dir = os.path.join(src_dir, 'libs', 'python', 'src') + bpl_srcs = glob(os.path.join(srcs_dir, '*.cpp')) + bpl_srcs += glob(os.path.join(srcs_dir, '*', '*.cpp')) + info = {'libraries': [('boost_python_src', + {'include_dirs': [src_dir] + py_incl_dirs, + 'sources':bpl_srcs} + )], + 'include_dirs': [src_dir], + } + if info: + self.set_info(**info) + return + + +class agg2_info(system_info): + section = 'agg2' + dir_env_var = 'AGG2' + + def get_paths(self, section, key): + pre_dirs = system_info.get_paths(self, section, key) + dirs = [] + for d in pre_dirs: + dirs.extend([d] + self.combine_paths(d, ['agg2*'])) + return [d for d in dirs if os.path.isdir(d)] + + def calc_info(self): + src_dirs = self.get_src_dirs() + src_dir = '' + for d in src_dirs: + if os.path.isfile(os.path.join(d, 'src', 'agg_affine_matrix.cpp')): + src_dir = d + break + if not src_dir: + return + if sys.platform == 'win32': + agg2_srcs = glob(os.path.join(src_dir, 'src', 'platform', + 'win32', 'agg_win32_bmp.cpp')) + else: + agg2_srcs = glob(os.path.join(src_dir, 'src', '*.cpp')) + agg2_srcs += [os.path.join(src_dir, 'src', 'platform', + 'X11', + 'agg_platform_support.cpp')] + + info = {'libraries': + [('agg2_src', + {'sources': agg2_srcs, + 'include_dirs': [os.path.join(src_dir, 'include')], + } + )], + 'include_dirs': [os.path.join(src_dir, 'include')], + } + if info: + self.set_info(**info) + return + + +class _pkg_config_info(system_info): + section = None + config_env_var = 'PKG_CONFIG' + default_config_exe = 'pkg-config' + append_config_exe = '' + version_macro_name = None + release_macro_name = None + version_flag = '--modversion' + cflags_flag = '--cflags' + + def get_config_exe(self): + if self.config_env_var in os.environ: + return os.environ[self.config_env_var] + return self.default_config_exe + + def get_config_output(self, config_exe, option): + cmd = config_exe + ' ' + self.append_config_exe + ' ' + option + try: + o = subprocess.check_output(cmd) + except (OSError, subprocess.CalledProcessError): + pass + else: + o = filepath_from_subprocess_output(o) + return o + + def calc_info(self): + config_exe = find_executable(self.get_config_exe()) + if not config_exe: + log.warn('File not found: %s. Cannot determine %s info.' \ + % (config_exe, self.section)) + return + info = {} + macros = [] + libraries = [] + library_dirs = [] + include_dirs = [] + extra_link_args = [] + extra_compile_args = [] + version = self.get_config_output(config_exe, self.version_flag) + if version: + macros.append((self.__class__.__name__.split('.')[-1].upper(), + _c_string_literal(version))) + if self.version_macro_name: + macros.append((self.version_macro_name + '_%s' + % (version.replace('.', '_')), None)) + if self.release_macro_name: + release = self.get_config_output(config_exe, '--release') + if release: + macros.append((self.release_macro_name + '_%s' + % (release.replace('.', '_')), None)) + opts = self.get_config_output(config_exe, '--libs') + if opts: + for opt in opts.split(): + if opt[:2] == '-l': + libraries.append(opt[2:]) + elif opt[:2] == '-L': + library_dirs.append(opt[2:]) + else: + extra_link_args.append(opt) + opts = self.get_config_output(config_exe, self.cflags_flag) + if opts: + for opt in opts.split(): + if opt[:2] == '-I': + include_dirs.append(opt[2:]) + elif opt[:2] == '-D': + if '=' in opt: + n, v = opt[2:].split('=') + macros.append((n, v)) + else: + macros.append((opt[2:], None)) + else: + extra_compile_args.append(opt) + if macros: + dict_append(info, define_macros=macros) + if libraries: + dict_append(info, libraries=libraries) + if library_dirs: + dict_append(info, library_dirs=library_dirs) + if include_dirs: + dict_append(info, include_dirs=include_dirs) + if extra_link_args: + dict_append(info, extra_link_args=extra_link_args) + if extra_compile_args: + dict_append(info, extra_compile_args=extra_compile_args) + if info: + self.set_info(**info) + return + + +class wx_info(_pkg_config_info): + section = 'wx' + config_env_var = 'WX_CONFIG' + default_config_exe = 'wx-config' + append_config_exe = '' + version_macro_name = 'WX_VERSION' + release_macro_name = 'WX_RELEASE' + version_flag = '--version' + cflags_flag = '--cxxflags' + + +class gdk_pixbuf_xlib_2_info(_pkg_config_info): + section = 'gdk_pixbuf_xlib_2' + append_config_exe = 'gdk-pixbuf-xlib-2.0' + version_macro_name = 'GDK_PIXBUF_XLIB_VERSION' + + +class gdk_pixbuf_2_info(_pkg_config_info): + section = 'gdk_pixbuf_2' + append_config_exe = 'gdk-pixbuf-2.0' + version_macro_name = 'GDK_PIXBUF_VERSION' + + +class gdk_x11_2_info(_pkg_config_info): + section = 'gdk_x11_2' + append_config_exe = 'gdk-x11-2.0' + version_macro_name = 'GDK_X11_VERSION' + + +class gdk_2_info(_pkg_config_info): + section = 'gdk_2' + append_config_exe = 'gdk-2.0' + version_macro_name = 'GDK_VERSION' + + +class gdk_info(_pkg_config_info): + section = 'gdk' + append_config_exe = 'gdk' + version_macro_name = 'GDK_VERSION' + + +class gtkp_x11_2_info(_pkg_config_info): + section = 'gtkp_x11_2' + append_config_exe = 'gtk+-x11-2.0' + version_macro_name = 'GTK_X11_VERSION' + + +class gtkp_2_info(_pkg_config_info): + section = 'gtkp_2' + append_config_exe = 'gtk+-2.0' + version_macro_name = 'GTK_VERSION' + + +class xft_info(_pkg_config_info): + section = 'xft' + append_config_exe = 'xft' + version_macro_name = 'XFT_VERSION' + + +class freetype2_info(_pkg_config_info): + section = 'freetype2' + append_config_exe = 'freetype2' + version_macro_name = 'FREETYPE2_VERSION' + + +class amd_info(system_info): + section = 'amd' + dir_env_var = 'AMD' + _lib_names = ['amd'] + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + + amd_libs = self.get_libs('amd_libs', self._lib_names) + info = self.check_libs(lib_dirs, amd_libs, []) + if info is None: + return + + include_dirs = self.get_include_dirs() + + inc_dir = None + for d in include_dirs: + p = self.combine_paths(d, 'amd.h') + if p: + inc_dir = os.path.dirname(p[0]) + break + if inc_dir is not None: + dict_append(info, include_dirs=[inc_dir], + define_macros=[('SCIPY_AMD_H', None)], + swig_opts=['-I' + inc_dir]) + + self.set_info(**info) + return + + +class umfpack_info(system_info): + section = 'umfpack' + dir_env_var = 'UMFPACK' + notfounderror = UmfpackNotFoundError + _lib_names = ['umfpack'] + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + + umfpack_libs = self.get_libs('umfpack_libs', self._lib_names) + info = self.check_libs(lib_dirs, umfpack_libs, []) + if info is None: + return + + include_dirs = self.get_include_dirs() + + inc_dir = None + for d in include_dirs: + p = self.combine_paths(d, ['', 'umfpack'], 'umfpack.h') + if p: + inc_dir = os.path.dirname(p[0]) + break + if inc_dir is not None: + dict_append(info, include_dirs=[inc_dir], + define_macros=[('SCIPY_UMFPACK_H', None)], + swig_opts=['-I' + inc_dir]) + + dict_append(info, **get_info('amd')) + + self.set_info(**info) + return + + +def combine_paths(*args, **kws): + """ Return a list of existing paths composed by all combinations of + items from arguments. + """ + r = [] + for a in args: + if not a: + continue + if is_string(a): + a = [a] + r.append(a) + args = r + if not args: + return [] + if len(args) == 1: + result = reduce(lambda a, b: a + b, map(glob, args[0]), []) + elif len(args) == 2: + result = [] + for a0 in args[0]: + for a1 in args[1]: + result.extend(glob(os.path.join(a0, a1))) + else: + result = combine_paths(*(combine_paths(args[0], args[1]) + args[2:])) + log.debug('(paths: %s)', ','.join(result)) + return result + +language_map = {'c': 0, 'c++': 1, 'f77': 2, 'f90': 3} +inv_language_map = {0: 'c', 1: 'c++', 2: 'f77', 3: 'f90'} + + +def dict_append(d, **kws): + languages = [] + for k, v in kws.items(): + if k == 'language': + languages.append(v) + continue + if k in d: + if k in ['library_dirs', 'include_dirs', + 'extra_compile_args', 'extra_link_args', + 'runtime_library_dirs', 'define_macros']: + [d[k].append(vv) for vv in v if vv not in d[k]] + else: + d[k].extend(v) + else: + d[k] = v + if languages: + l = inv_language_map[max([language_map.get(l, 0) for l in languages])] + d['language'] = l + return + + +def parseCmdLine(argv=(None,)): + import optparse + parser = optparse.OptionParser("usage: %prog [-v] [info objs]") + parser.add_option('-v', '--verbose', action='store_true', dest='verbose', + default=False, + help='be verbose and print more messages') + + opts, args = parser.parse_args(args=argv[1:]) + return opts, args + + +def show_all(argv=None): + import inspect + if argv is None: + argv = sys.argv + opts, args = parseCmdLine(argv) + if opts.verbose: + log.set_threshold(log.DEBUG) + else: + log.set_threshold(log.INFO) + show_only = [] + for n in args: + if n[-5:] != '_info': + n = n + '_info' + show_only.append(n) + show_all = not show_only + _gdict_ = globals().copy() + for name, c in _gdict_.items(): + if not inspect.isclass(c): + continue + if not issubclass(c, system_info) or c is system_info: + continue + if not show_all: + if name not in show_only: + continue + del show_only[show_only.index(name)] + conf = c() + conf.verbosity = 2 + # FIXME: r not used + r = conf.get_info() + if show_only: + log.info('Info classes not defined: %s', ','.join(show_only)) + +if __name__ == "__main__": + show_all() diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/system_info.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/system_info.pyc new file mode 100644 index 0000000..f513353 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/system_info.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/__init__.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/__init__.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/__init__.pyc new file mode 100644 index 0000000..a6daeb5 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_exec_command.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_exec_command.py new file mode 100644 index 0000000..8bd2650 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_exec_command.py @@ -0,0 +1,215 @@ +from __future__ import division, absolute_import, print_function + +import os +import sys +from tempfile import TemporaryFile + +from numpy.distutils import exec_command +from numpy.distutils.exec_command import get_pythonexe +from numpy.testing import tempdir, assert_ + +# In python 3 stdout, stderr are text (unicode compliant) devices, so to +# emulate them import StringIO from the io module. +if sys.version_info[0] >= 3: + from io import StringIO +else: + from StringIO import StringIO + +class redirect_stdout(object): + """Context manager to redirect stdout for exec_command test.""" + def __init__(self, stdout=None): + self._stdout = stdout or sys.stdout + + def __enter__(self): + self.old_stdout = sys.stdout + sys.stdout = self._stdout + + def __exit__(self, exc_type, exc_value, traceback): + self._stdout.flush() + sys.stdout = self.old_stdout + # note: closing sys.stdout won't close it. + self._stdout.close() + +class redirect_stderr(object): + """Context manager to redirect stderr for exec_command test.""" + def __init__(self, stderr=None): + self._stderr = stderr or sys.stderr + + def __enter__(self): + self.old_stderr = sys.stderr + sys.stderr = self._stderr + + def __exit__(self, exc_type, exc_value, traceback): + self._stderr.flush() + sys.stderr = self.old_stderr + # note: closing sys.stderr won't close it. + self._stderr.close() + +class emulate_nonposix(object): + """Context manager to emulate os.name != 'posix' """ + def __init__(self, osname='non-posix'): + self._new_name = osname + + def __enter__(self): + self._old_name = os.name + os.name = self._new_name + + def __exit__(self, exc_type, exc_value, traceback): + os.name = self._old_name + + +def test_exec_command_stdout(): + # Regression test for gh-2999 and gh-2915. + # There are several packages (nose, scipy.weave.inline, Sage inline + # Fortran) that replace stdout, in which case it doesn't have a fileno + # method. This is tested here, with a do-nothing command that fails if the + # presence of fileno() is assumed in exec_command. + + # The code has a special case for posix systems, so if we are on posix test + # both that the special case works and that the generic code works. + + # Test posix version: + with redirect_stdout(StringIO()): + with redirect_stderr(TemporaryFile()): + exec_command.exec_command("cd '.'") + + if os.name == 'posix': + # Test general (non-posix) version: + with emulate_nonposix(): + with redirect_stdout(StringIO()): + with redirect_stderr(TemporaryFile()): + exec_command.exec_command("cd '.'") + +def test_exec_command_stderr(): + # Test posix version: + with redirect_stdout(TemporaryFile(mode='w+')): + with redirect_stderr(StringIO()): + exec_command.exec_command("cd '.'") + + if os.name == 'posix': + # Test general (non-posix) version: + with emulate_nonposix(): + with redirect_stdout(TemporaryFile()): + with redirect_stderr(StringIO()): + exec_command.exec_command("cd '.'") + + +class TestExecCommand(object): + def setup(self): + self.pyexe = get_pythonexe() + + def check_nt(self, **kws): + s, o = exec_command.exec_command('cmd /C echo path=%path%') + assert_(s == 0) + assert_(o != '') + + s, o = exec_command.exec_command( + '"%s" -c "import sys;sys.stderr.write(sys.platform)"' % self.pyexe) + assert_(s == 0) + assert_(o == 'win32') + + def check_posix(self, **kws): + s, o = exec_command.exec_command("echo Hello", **kws) + assert_(s == 0) + assert_(o == 'Hello') + + s, o = exec_command.exec_command('echo $AAA', **kws) + assert_(s == 0) + assert_(o == '') + + s, o = exec_command.exec_command('echo "$AAA"', AAA='Tere', **kws) + assert_(s == 0) + assert_(o == 'Tere') + + s, o = exec_command.exec_command('echo "$AAA"', **kws) + assert_(s == 0) + assert_(o == '') + + if 'BBB' not in os.environ: + os.environ['BBB'] = 'Hi' + s, o = exec_command.exec_command('echo "$BBB"', **kws) + assert_(s == 0) + assert_(o == 'Hi') + + s, o = exec_command.exec_command('echo "$BBB"', BBB='Hey', **kws) + assert_(s == 0) + assert_(o == 'Hey') + + s, o = exec_command.exec_command('echo "$BBB"', **kws) + assert_(s == 0) + assert_(o == 'Hi') + + del os.environ['BBB'] + + s, o = exec_command.exec_command('echo "$BBB"', **kws) + assert_(s == 0) + assert_(o == '') + + + s, o = exec_command.exec_command('this_is_not_a_command', **kws) + assert_(s != 0) + assert_(o != '') + + s, o = exec_command.exec_command('echo path=$PATH', **kws) + assert_(s == 0) + assert_(o != '') + + s, o = exec_command.exec_command( + '"%s" -c "import sys,os;sys.stderr.write(os.name)"' % + self.pyexe, **kws) + assert_(s == 0) + assert_(o == 'posix') + + def check_basic(self, *kws): + s, o = exec_command.exec_command( + '"%s" -c "raise \'Ignore me.\'"' % self.pyexe, **kws) + assert_(s != 0) + assert_(o != '') + + s, o = exec_command.exec_command( + '"%s" -c "import sys;sys.stderr.write(\'0\');' + 'sys.stderr.write(\'1\');sys.stderr.write(\'2\')"' % + self.pyexe, **kws) + assert_(s == 0) + assert_(o == '012') + + s, o = exec_command.exec_command( + '"%s" -c "import sys;sys.exit(15)"' % self.pyexe, **kws) + assert_(s == 15) + assert_(o == '') + + s, o = exec_command.exec_command( + '"%s" -c "print(\'Heipa\'")' % self.pyexe, **kws) + assert_(s == 0) + assert_(o == 'Heipa') + + def check_execute_in(self, **kws): + with tempdir() as tmpdir: + fn = "file" + tmpfile = os.path.join(tmpdir, fn) + f = open(tmpfile, 'w') + f.write('Hello') + f.close() + + s, o = exec_command.exec_command( + '"%s" -c "f = open(\'%s\', \'r\'); f.close()"' % + (self.pyexe, fn), **kws) + assert_(s != 0) + assert_(o != '') + s, o = exec_command.exec_command( + '"%s" -c "f = open(\'%s\', \'r\'); print(f.read()); ' + 'f.close()"' % (self.pyexe, fn), execute_in=tmpdir, **kws) + assert_(s == 0) + assert_(o == 'Hello') + + def test_basic(self): + with redirect_stdout(StringIO()): + with redirect_stderr(StringIO()): + if os.name == "posix": + self.check_posix(use_tee=0) + self.check_posix(use_tee=1) + elif os.name == "nt": + self.check_nt(use_tee=0) + self.check_nt(use_tee=1) + self.check_execute_in(use_tee=0) + self.check_execute_in(use_tee=1) diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_exec_command.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_exec_command.pyc new file mode 100644 index 0000000..4deb336 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_exec_command.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_fcompiler.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_fcompiler.py new file mode 100644 index 0000000..ba19a97 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_fcompiler.py @@ -0,0 +1,81 @@ +from __future__ import division, absolute_import, print_function + +import pytest + +from numpy.testing import assert_, suppress_warnings +import numpy.distutils.fcompiler + +customizable_flags = [ + ('f77', 'F77FLAGS'), + ('f90', 'F90FLAGS'), + ('free', 'FREEFLAGS'), + ('arch', 'FARCH'), + ('debug', 'FDEBUG'), + ('flags', 'FFLAGS'), + ('linker_so', 'LDFLAGS'), +] + + +def test_fcompiler_flags(monkeypatch): + monkeypatch.setenv('NPY_DISTUTILS_APPEND_FLAGS', '0') + fc = numpy.distutils.fcompiler.new_fcompiler(compiler='none') + flag_vars = fc.flag_vars.clone(lambda *args, **kwargs: None) + + for opt, envvar in customizable_flags: + new_flag = '-dummy-{}-flag'.format(opt) + prev_flags = getattr(flag_vars, opt) + + monkeypatch.setenv(envvar, new_flag) + new_flags = getattr(flag_vars, opt) + + monkeypatch.delenv(envvar) + assert_(new_flags == [new_flag]) + + monkeypatch.setenv('NPY_DISTUTILS_APPEND_FLAGS', '1') + + for opt, envvar in customizable_flags: + new_flag = '-dummy-{}-flag'.format(opt) + prev_flags = getattr(flag_vars, opt) + monkeypatch.setenv(envvar, new_flag) + new_flags = getattr(flag_vars, opt) + + monkeypatch.delenv(envvar) + if prev_flags is None: + assert_(new_flags == [new_flag]) + else: + assert_(new_flags == prev_flags + [new_flag]) + + +def test_fcompiler_flags_append_warning(monkeypatch): + # Test to check that the warning for append behavior changing in future + # is triggered. Need to use a real compiler instance so that we have + # non-empty flags to start with (otherwise the "if var and append" check + # will always be false). + try: + with suppress_warnings() as sup: + sup.record() + fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95') + fc.customize() + except numpy.distutils.fcompiler.CompilerNotFound: + pytest.skip("gfortran not found, so can't execute this test") + + # Ensure NPY_DISTUTILS_APPEND_FLAGS not defined + monkeypatch.delenv('NPY_DISTUTILS_APPEND_FLAGS', raising=False) + + for opt, envvar in customizable_flags: + new_flag = '-dummy-{}-flag'.format(opt) + with suppress_warnings() as sup: + sup.record() + prev_flags = getattr(fc.flag_vars, opt) + + monkeypatch.setenv(envvar, new_flag) + with suppress_warnings() as sup: + sup.record() + new_flags = getattr(fc.flag_vars, opt) + if prev_flags: + # Check that warning was issued + assert len(sup.log) == 1 + + monkeypatch.delenv(envvar) + assert_(new_flags == [new_flag]) + diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_fcompiler.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_fcompiler.pyc new file mode 100644 index 0000000..296f78e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_fcompiler.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_fcompiler_gnu.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_fcompiler_gnu.py new file mode 100644 index 0000000..49208aa --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_fcompiler_gnu.py @@ -0,0 +1,57 @@ +from __future__ import division, absolute_import, print_function + +from numpy.testing import assert_ + +import numpy.distutils.fcompiler + +g77_version_strings = [ + ('GNU Fortran 0.5.25 20010319 (prerelease)', '0.5.25'), + ('GNU Fortran (GCC 3.2) 3.2 20020814 (release)', '3.2'), + ('GNU Fortran (GCC) 3.3.3 20040110 (prerelease) (Debian)', '3.3.3'), + ('GNU Fortran (GCC) 3.3.3 (Debian 20040401)', '3.3.3'), + ('GNU Fortran (GCC 3.2.2 20030222 (Red Hat Linux 3.2.2-5)) 3.2.2' + ' 20030222 (Red Hat Linux 3.2.2-5)', '3.2.2'), +] + +gfortran_version_strings = [ + ('GNU Fortran 95 (GCC 4.0.3 20051023 (prerelease) (Debian 4.0.2-3))', + '4.0.3'), + ('GNU Fortran 95 (GCC) 4.1.0', '4.1.0'), + ('GNU Fortran 95 (GCC) 4.2.0 20060218 (experimental)', '4.2.0'), + ('GNU Fortran (GCC) 4.3.0 20070316 (experimental)', '4.3.0'), + ('GNU Fortran (rubenvb-4.8.0) 4.8.0', '4.8.0'), + ('4.8.0', '4.8.0'), + ('4.0.3-7', '4.0.3'), + ("gfortran: warning: couldn't understand kern.osversion '14.1.0\n4.9.1", + '4.9.1'), + ("gfortran: warning: couldn't understand kern.osversion '14.1.0\n" + "gfortran: warning: yet another warning\n4.9.1", + '4.9.1'), + ('GNU Fortran (crosstool-NG 8a21ab48) 7.2.0', '7.2.0') +] + +class TestG77Versions(object): + def test_g77_version(self): + fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu') + for vs, version in g77_version_strings: + v = fc.version_match(vs) + assert_(v == version, (vs, v)) + + def test_not_g77(self): + fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu') + for vs, _ in gfortran_version_strings: + v = fc.version_match(vs) + assert_(v is None, (vs, v)) + +class TestGFortranVersions(object): + def test_gfortran_version(self): + fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95') + for vs, version in gfortran_version_strings: + v = fc.version_match(vs) + assert_(v == version, (vs, v)) + + def test_not_gfortran(self): + fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95') + for vs, _ in g77_version_strings: + v = fc.version_match(vs) + assert_(v is None, (vs, v)) diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_fcompiler_gnu.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_fcompiler_gnu.pyc new file mode 100644 index 0000000..9a6c6a6 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_fcompiler_gnu.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_fcompiler_intel.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_fcompiler_intel.py new file mode 100644 index 0000000..5e014ba --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_fcompiler_intel.py @@ -0,0 +1,32 @@ +from __future__ import division, absolute_import, print_function + +import numpy.distutils.fcompiler +from numpy.testing import assert_ + + +intel_32bit_version_strings = [ + ("Intel(R) Fortran Intel(R) 32-bit Compiler Professional for applications" + "running on Intel(R) 32, Version 11.1", '11.1'), +] + +intel_64bit_version_strings = [ + ("Intel(R) Fortran IA-64 Compiler Professional for applications" + "running on IA-64, Version 11.0", '11.0'), + ("Intel(R) Fortran Intel(R) 64 Compiler Professional for applications" + "running on Intel(R) 64, Version 11.1", '11.1') +] + +class TestIntelFCompilerVersions(object): + def test_32bit_version(self): + fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intel') + for vs, version in intel_32bit_version_strings: + v = fc.version_match(vs) + assert_(v == version) + + +class TestIntelEM64TFCompilerVersions(object): + def test_64bit_version(self): + fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intelem') + for vs, version in intel_64bit_version_strings: + v = fc.version_match(vs) + assert_(v == version) diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_fcompiler_intel.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_fcompiler_intel.pyc new file mode 100644 index 0000000..e812829 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_fcompiler_intel.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_fcompiler_nagfor.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_fcompiler_nagfor.py new file mode 100644 index 0000000..1c93605 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_fcompiler_nagfor.py @@ -0,0 +1,24 @@ +from __future__ import division, absolute_import, print_function + +from numpy.testing import assert_ +import numpy.distutils.fcompiler + +nag_version_strings = [('nagfor', 'NAG Fortran Compiler Release ' + '6.2(Chiyoda) Build 6200', '6.2'), + ('nagfor', 'NAG Fortran Compiler Release ' + '6.1(Tozai) Build 6136', '6.1'), + ('nagfor', 'NAG Fortran Compiler Release ' + '6.0(Hibiya) Build 1021', '6.0'), + ('nagfor', 'NAG Fortran Compiler Release ' + '5.3.2(971)', '5.3.2'), + ('nag', 'NAGWare Fortran 95 compiler Release 5.1' + '(347,355-367,375,380-383,389,394,399,401-402,407,' + '431,435,437,446,459-460,463,472,494,496,503,508,' + '511,517,529,555,557,565)', '5.1')] + +class TestNagFCompilerVersions(object): + def test_version_match(self): + for comp, vs, version in nag_version_strings: + fc = numpy.distutils.fcompiler.new_fcompiler(compiler=comp) + v = fc.version_match(vs) + assert_(v == version) diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_fcompiler_nagfor.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_fcompiler_nagfor.pyc new file mode 100644 index 0000000..674e0c9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_fcompiler_nagfor.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_from_template.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_from_template.py new file mode 100644 index 0000000..5881754 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_from_template.py @@ -0,0 +1,44 @@ + +from numpy.distutils.from_template import process_str +from numpy.testing import assert_equal + + +pyf_src = """ +python module foo + <_rd=real,double precision> + interface + subroutine foosub(tol) + <_rd>, intent(in,out) :: tol + end subroutine foosub + end interface +end python module foo +""" + +expected_pyf = """ +python module foo + interface + subroutine sfoosub(tol) + real, intent(in,out) :: tol + end subroutine sfoosub + subroutine dfoosub(tol) + double precision, intent(in,out) :: tol + end subroutine dfoosub + end interface +end python module foo +""" + + +def normalize_whitespace(s): + """ + Remove leading and trailing whitespace, and convert internal + stretches of whitespace to a single space. + """ + return ' '.join(s.split()) + + +def test_from_template(): + """Regression test for gh-10712.""" + pyf = process_str(pyf_src) + normalized_pyf = normalize_whitespace(pyf) + normalized_expected_pyf = normalize_whitespace(expected_pyf) + assert_equal(normalized_pyf, normalized_expected_pyf) diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_from_template.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_from_template.pyc new file mode 100644 index 0000000..bf74496 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_from_template.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_misc_util.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_misc_util.py new file mode 100644 index 0000000..3e239cf --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_misc_util.py @@ -0,0 +1,84 @@ +from __future__ import division, absolute_import, print_function + +from os.path import join, sep, dirname + +from numpy.distutils.misc_util import ( + appendpath, minrelpath, gpaths, get_shared_lib_extension, get_info + ) +from numpy.testing import ( + assert_, assert_equal + ) + +ajoin = lambda *paths: join(*((sep,)+paths)) + +class TestAppendpath(object): + + def test_1(self): + assert_equal(appendpath('prefix', 'name'), join('prefix', 'name')) + assert_equal(appendpath('/prefix', 'name'), ajoin('prefix', 'name')) + assert_equal(appendpath('/prefix', '/name'), ajoin('prefix', 'name')) + assert_equal(appendpath('prefix', '/name'), join('prefix', 'name')) + + def test_2(self): + assert_equal(appendpath('prefix/sub', 'name'), + join('prefix', 'sub', 'name')) + assert_equal(appendpath('prefix/sub', 'sup/name'), + join('prefix', 'sub', 'sup', 'name')) + assert_equal(appendpath('/prefix/sub', '/prefix/name'), + ajoin('prefix', 'sub', 'name')) + + def test_3(self): + assert_equal(appendpath('/prefix/sub', '/prefix/sup/name'), + ajoin('prefix', 'sub', 'sup', 'name')) + assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sup/sup2/name'), + ajoin('prefix', 'sub', 'sub2', 'sup', 'sup2', 'name')) + assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sub/sup/name'), + ajoin('prefix', 'sub', 'sub2', 'sup', 'name')) + +class TestMinrelpath(object): + + def test_1(self): + n = lambda path: path.replace('/', sep) + assert_equal(minrelpath(n('aa/bb')), n('aa/bb')) + assert_equal(minrelpath('..'), '..') + assert_equal(minrelpath(n('aa/..')), '') + assert_equal(minrelpath(n('aa/../bb')), 'bb') + assert_equal(minrelpath(n('aa/bb/..')), 'aa') + assert_equal(minrelpath(n('aa/bb/../..')), '') + assert_equal(minrelpath(n('aa/bb/../cc/../dd')), n('aa/dd')) + assert_equal(minrelpath(n('.././..')), n('../..')) + assert_equal(minrelpath(n('aa/bb/.././../dd')), n('dd')) + +class TestGpaths(object): + + def test_gpaths(self): + local_path = minrelpath(join(dirname(__file__), '..')) + ls = gpaths('command/*.py', local_path) + assert_(join(local_path, 'command', 'build_src.py') in ls, repr(ls)) + f = gpaths('system_info.py', local_path) + assert_(join(local_path, 'system_info.py') == f[0], repr(f)) + +class TestSharedExtension(object): + + def test_get_shared_lib_extension(self): + import sys + ext = get_shared_lib_extension(is_python_ext=False) + if sys.platform.startswith('linux'): + assert_equal(ext, '.so') + elif sys.platform.startswith('gnukfreebsd'): + assert_equal(ext, '.so') + elif sys.platform.startswith('darwin'): + assert_equal(ext, '.dylib') + elif sys.platform.startswith('win'): + assert_equal(ext, '.dll') + # just check for no crash + assert_(get_shared_lib_extension(is_python_ext=True)) + + +def test_installed_npymath_ini(): + # Regression test for gh-7707. If npymath.ini wasn't installed, then this + # will give an error. + info = get_info('npymath') + + assert isinstance(info, dict) + assert "define_macros" in info diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_misc_util.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_misc_util.pyc new file mode 100644 index 0000000..4887268 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_misc_util.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_npy_pkg_config.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_npy_pkg_config.py new file mode 100644 index 0000000..537e16e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_npy_pkg_config.py @@ -0,0 +1,86 @@ +from __future__ import division, absolute_import, print_function + +import os + +from numpy.distutils.npy_pkg_config import read_config, parse_flags +from numpy.testing import temppath, assert_ + +simple = """\ +[meta] +Name = foo +Description = foo lib +Version = 0.1 + +[default] +cflags = -I/usr/include +libs = -L/usr/lib +""" +simple_d = {'cflags': '-I/usr/include', 'libflags': '-L/usr/lib', + 'version': '0.1', 'name': 'foo'} + +simple_variable = """\ +[meta] +Name = foo +Description = foo lib +Version = 0.1 + +[variables] +prefix = /foo/bar +libdir = ${prefix}/lib +includedir = ${prefix}/include + +[default] +cflags = -I${includedir} +libs = -L${libdir} +""" +simple_variable_d = {'cflags': '-I/foo/bar/include', 'libflags': '-L/foo/bar/lib', + 'version': '0.1', 'name': 'foo'} + +class TestLibraryInfo(object): + def test_simple(self): + with temppath('foo.ini') as path: + with open(path, 'w') as f: + f.write(simple) + pkg = os.path.splitext(path)[0] + out = read_config(pkg) + + assert_(out.cflags() == simple_d['cflags']) + assert_(out.libs() == simple_d['libflags']) + assert_(out.name == simple_d['name']) + assert_(out.version == simple_d['version']) + + def test_simple_variable(self): + with temppath('foo.ini') as path: + with open(path, 'w') as f: + f.write(simple_variable) + pkg = os.path.splitext(path)[0] + out = read_config(pkg) + + assert_(out.cflags() == simple_variable_d['cflags']) + assert_(out.libs() == simple_variable_d['libflags']) + assert_(out.name == simple_variable_d['name']) + assert_(out.version == simple_variable_d['version']) + out.vars['prefix'] = '/Users/david' + assert_(out.cflags() == '-I/Users/david/include') + +class TestParseFlags(object): + def test_simple_cflags(self): + d = parse_flags("-I/usr/include") + assert_(d['include_dirs'] == ['/usr/include']) + + d = parse_flags("-I/usr/include -DFOO") + assert_(d['include_dirs'] == ['/usr/include']) + assert_(d['macros'] == ['FOO']) + + d = parse_flags("-I /usr/include -DFOO") + assert_(d['include_dirs'] == ['/usr/include']) + assert_(d['macros'] == ['FOO']) + + def test_simple_lflags(self): + d = parse_flags("-L/usr/lib -lfoo -L/usr/lib -lbar") + assert_(d['library_dirs'] == ['/usr/lib', '/usr/lib']) + assert_(d['libraries'] == ['foo', 'bar']) + + d = parse_flags("-L /usr/lib -lfoo -L/usr/lib -lbar") + assert_(d['library_dirs'] == ['/usr/lib', '/usr/lib']) + assert_(d['libraries'] == ['foo', 'bar']) diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_npy_pkg_config.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_npy_pkg_config.pyc new file mode 100644 index 0000000..537b00f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_npy_pkg_config.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_shell_utils.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_shell_utils.py new file mode 100644 index 0000000..a034424 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_shell_utils.py @@ -0,0 +1,79 @@ +from __future__ import division, absolute_import, print_function + +import pytest +import subprocess +import os +import json +import sys + +from numpy.distutils import _shell_utils + +argv_cases = [ + [r'exe'], + [r'path/exe'], + [r'path\exe'], + [r'\\server\path\exe'], + [r'path to/exe'], + [r'path to\exe'], + + [r'exe', '--flag'], + [r'path/exe', '--flag'], + [r'path\exe', '--flag'], + [r'path to/exe', '--flag'], + [r'path to\exe', '--flag'], + + # flags containing literal quotes in their name + [r'path to/exe', '--flag-"quoted"'], + [r'path to\exe', '--flag-"quoted"'], + [r'path to/exe', '"--flag-quoted"'], + [r'path to\exe', '"--flag-quoted"'], +] + + +@pytest.fixture(params=[ + _shell_utils.WindowsParser, + _shell_utils.PosixParser +]) +def Parser(request): + return request.param + + +@pytest.fixture +def runner(Parser): + if Parser != _shell_utils.NativeParser: + pytest.skip('Unable to run with non-native parser') + + if Parser == _shell_utils.WindowsParser: + return lambda cmd: subprocess.check_output(cmd) + elif Parser == _shell_utils.PosixParser: + # posix has no non-shell string parsing + return lambda cmd: subprocess.check_output(cmd, shell=True) + else: + raise NotImplementedError + + +@pytest.mark.parametrize('argv', argv_cases) +def test_join_matches_subprocess(Parser, runner, argv): + """ + Test that join produces strings understood by subprocess + """ + # invoke python to return its arguments as json + cmd = [ + sys.executable, '-c', + 'import json, sys; print(json.dumps(sys.argv[1:]))' + ] + joined = Parser.join(cmd + argv) + json_out = runner(joined).decode() + assert json.loads(json_out) == argv + + +@pytest.mark.parametrize('argv', argv_cases) +def test_roundtrip(Parser, argv): + """ + Test that split is the inverse operation of join + """ + try: + joined = Parser.join(argv) + assert argv == Parser.split(joined) + except NotImplementedError: + pytest.skip("Not implemented") diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_shell_utils.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_shell_utils.pyc new file mode 100644 index 0000000..0ff04cd Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_shell_utils.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_system_info.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_system_info.py new file mode 100644 index 0000000..f7e275a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_system_info.py @@ -0,0 +1,237 @@ +from __future__ import division, print_function + +import os +import shutil +import pytest +from tempfile import mkstemp, mkdtemp +from subprocess import Popen, PIPE +from distutils.errors import DistutilsError + +from numpy.distutils import ccompiler, customized_ccompiler +from numpy.testing import assert_, assert_equal +from numpy.distutils.system_info import system_info, ConfigParser +from numpy.distutils.system_info import default_lib_dirs, default_include_dirs +from numpy.distutils import _shell_utils + + +def get_class(name, notfound_action=1): + """ + notfound_action: + 0 - do nothing + 1 - display warning message + 2 - raise error + """ + cl = {'temp1': Temp1Info, + 'temp2': Temp2Info + }.get(name.lower(), _system_info) + return cl() + +simple_site = """ +[ALL] +library_dirs = {dir1:s}{pathsep:s}{dir2:s} +libraries = {lib1:s},{lib2:s} +extra_compile_args = -I/fake/directory -I"/path with/spaces" -Os +runtime_library_dirs = {dir1:s} + +[temp1] +library_dirs = {dir1:s} +libraries = {lib1:s} +runtime_library_dirs = {dir1:s} + +[temp2] +library_dirs = {dir2:s} +libraries = {lib2:s} +extra_link_args = -Wl,-rpath={lib2_escaped:s} +rpath = {dir2:s} +""" +site_cfg = simple_site + +fakelib_c_text = """ +/* This file is generated from numpy/distutils/testing/test_system_info.py */ +#include +void foo(void) { + printf("Hello foo"); +} +void bar(void) { + printf("Hello bar"); +} +""" + +def have_compiler(): + """ Return True if there appears to be an executable compiler + """ + compiler = customized_ccompiler() + try: + cmd = compiler.compiler # Unix compilers + except AttributeError: + try: + if not compiler.initialized: + compiler.initialize() # MSVC is different + except (DistutilsError, ValueError): + return False + cmd = [compiler.cc] + try: + p = Popen(cmd, stdout=PIPE, stderr=PIPE) + p.stdout.close() + p.stderr.close() + p.wait() + except OSError: + return False + return True + + +HAVE_COMPILER = have_compiler() + + +class _system_info(system_info): + + def __init__(self, + default_lib_dirs=default_lib_dirs, + default_include_dirs=default_include_dirs, + verbosity=1, + ): + self.__class__.info = {} + self.local_prefixes = [] + defaults = {'library_dirs': '', + 'include_dirs': '', + 'runtime_library_dirs': '', + 'rpath': '', + 'src_dirs': '', + 'search_static_first': "0", + 'extra_compile_args': '', + 'extra_link_args': ''} + self.cp = ConfigParser(defaults) + # We have to parse the config files afterwards + # to have a consistent temporary filepath + + def _check_libs(self, lib_dirs, libs, opt_libs, exts): + """Override _check_libs to return with all dirs """ + info = {'libraries': libs, 'library_dirs': lib_dirs} + return info + + +class Temp1Info(_system_info): + """For testing purposes""" + section = 'temp1' + + +class Temp2Info(_system_info): + """For testing purposes""" + section = 'temp2' + + +class TestSystemInfoReading(object): + + def setup(self): + """ Create the libraries """ + # Create 2 sources and 2 libraries + self._dir1 = mkdtemp() + self._src1 = os.path.join(self._dir1, 'foo.c') + self._lib1 = os.path.join(self._dir1, 'libfoo.so') + self._dir2 = mkdtemp() + self._src2 = os.path.join(self._dir2, 'bar.c') + self._lib2 = os.path.join(self._dir2, 'libbar.so') + # Update local site.cfg + global simple_site, site_cfg + site_cfg = simple_site.format(**{ + 'dir1': self._dir1, + 'lib1': self._lib1, + 'dir2': self._dir2, + 'lib2': self._lib2, + 'pathsep': os.pathsep, + 'lib2_escaped': _shell_utils.NativeParser.join([self._lib2]) + }) + # Write site.cfg + fd, self._sitecfg = mkstemp() + os.close(fd) + with open(self._sitecfg, 'w') as fd: + fd.write(site_cfg) + # Write the sources + with open(self._src1, 'w') as fd: + fd.write(fakelib_c_text) + with open(self._src2, 'w') as fd: + fd.write(fakelib_c_text) + # We create all class-instances + + def site_and_parse(c, site_cfg): + c.files = [site_cfg] + c.parse_config_files() + return c + self.c_default = site_and_parse(get_class('default'), self._sitecfg) + self.c_temp1 = site_and_parse(get_class('temp1'), self._sitecfg) + self.c_temp2 = site_and_parse(get_class('temp2'), self._sitecfg) + + def teardown(self): + # Do each removal separately + try: + shutil.rmtree(self._dir1) + except Exception: + pass + try: + shutil.rmtree(self._dir2) + except Exception: + pass + try: + os.remove(self._sitecfg) + except Exception: + pass + + def test_all(self): + # Read in all information in the ALL block + tsi = self.c_default + assert_equal(tsi.get_lib_dirs(), [self._dir1, self._dir2]) + assert_equal(tsi.get_libraries(), [self._lib1, self._lib2]) + assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1]) + extra = tsi.calc_extra_info() + assert_equal(extra['extra_compile_args'], ['-I/fake/directory', '-I/path with/spaces', '-Os']) + + def test_temp1(self): + # Read in all information in the temp1 block + tsi = self.c_temp1 + assert_equal(tsi.get_lib_dirs(), [self._dir1]) + assert_equal(tsi.get_libraries(), [self._lib1]) + assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1]) + + def test_temp2(self): + # Read in all information in the temp2 block + tsi = self.c_temp2 + assert_equal(tsi.get_lib_dirs(), [self._dir2]) + assert_equal(tsi.get_libraries(), [self._lib2]) + # Now from rpath and not runtime_library_dirs + assert_equal(tsi.get_runtime_lib_dirs(key='rpath'), [self._dir2]) + extra = tsi.calc_extra_info() + assert_equal(extra['extra_link_args'], ['-Wl,-rpath=' + self._lib2]) + + @pytest.mark.skipif(not HAVE_COMPILER, reason="Missing compiler") + def test_compile1(self): + # Compile source and link the first source + c = customized_ccompiler() + previousDir = os.getcwd() + try: + # Change directory to not screw up directories + os.chdir(self._dir1) + c.compile([os.path.basename(self._src1)], output_dir=self._dir1) + # Ensure that the object exists + assert_(os.path.isfile(self._src1.replace('.c', '.o')) or + os.path.isfile(self._src1.replace('.c', '.obj'))) + finally: + os.chdir(previousDir) + + @pytest.mark.skipif(not HAVE_COMPILER, reason="Missing compiler") + @pytest.mark.skipif('msvc' in repr(ccompiler.new_compiler()), + reason="Fails with MSVC compiler ") + def test_compile2(self): + # Compile source and link the second source + tsi = self.c_temp2 + c = customized_ccompiler() + extra_link_args = tsi.calc_extra_info()['extra_link_args'] + previousDir = os.getcwd() + try: + # Change directory to not screw up directories + os.chdir(self._dir2) + c.compile([os.path.basename(self._src2)], output_dir=self._dir2, + extra_postargs=extra_link_args) + # Ensure that the object exists + assert_(os.path.isfile(self._src2.replace('.c', '.o'))) + finally: + os.chdir(previousDir) diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_system_info.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_system_info.pyc new file mode 100644 index 0000000..be7af3b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/tests/test_system_info.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/unixccompiler.py b/project/venv/lib/python2.7/site-packages/numpy/distutils/unixccompiler.py new file mode 100644 index 0000000..11b2cce --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/distutils/unixccompiler.py @@ -0,0 +1,139 @@ +""" +unixccompiler - can handle very long argument lists for ar. + +""" +from __future__ import division, absolute_import, print_function + +import os + +from distutils.errors import DistutilsExecError, CompileError +from distutils.unixccompiler import * +from numpy.distutils.ccompiler import replace_method +from numpy.distutils.compat import get_exception +from numpy.distutils.misc_util import _commandline_dep_string + +if sys.version_info[0] < 3: + from . import log +else: + from numpy.distutils import log + +# Note that UnixCCompiler._compile appeared in Python 2.3 +def UnixCCompiler__compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): + """Compile a single source files with a Unix-style compiler.""" + # HP ad-hoc fix, see ticket 1383 + ccomp = self.compiler_so + if ccomp[0] == 'aCC': + # remove flags that will trigger ANSI-C mode for aCC + if '-Ae' in ccomp: + ccomp.remove('-Ae') + if '-Aa' in ccomp: + ccomp.remove('-Aa') + # add flags for (almost) sane C++ handling + ccomp += ['-AA'] + self.compiler_so = ccomp + # ensure OPT environment variable is read + if 'OPT' in os.environ: + from distutils.sysconfig import get_config_vars + opt = " ".join(os.environ['OPT'].split()) + gcv_opt = " ".join(get_config_vars('OPT')[0].split()) + ccomp_s = " ".join(self.compiler_so) + if opt not in ccomp_s: + ccomp_s = ccomp_s.replace(gcv_opt, opt) + self.compiler_so = ccomp_s.split() + llink_s = " ".join(self.linker_so) + if opt not in llink_s: + self.linker_so = llink_s.split() + opt.split() + + display = '%s: %s' % (os.path.basename(self.compiler_so[0]), src) + + # gcc style automatic dependencies, outputs a makefile (-MF) that lists + # all headers needed by a c file as a side effect of compilation (-MMD) + if getattr(self, '_auto_depends', False): + deps = ['-MMD', '-MF', obj + '.d'] + else: + deps = [] + + try: + self.spawn(self.compiler_so + cc_args + [src, '-o', obj] + deps + + extra_postargs, display = display) + except DistutilsExecError: + msg = str(get_exception()) + raise CompileError(msg) + + # add commandline flags to dependency file + if deps: + with open(obj + '.d', 'a') as f: + f.write(_commandline_dep_string(cc_args, extra_postargs, pp_opts)) + +replace_method(UnixCCompiler, '_compile', UnixCCompiler__compile) + + +def UnixCCompiler_create_static_lib(self, objects, output_libname, + output_dir=None, debug=0, target_lang=None): + """ + Build a static library in a separate sub-process. + + Parameters + ---------- + objects : list or tuple of str + List of paths to object files used to build the static library. + output_libname : str + The library name as an absolute or relative (if `output_dir` is used) + path. + output_dir : str, optional + The path to the output directory. Default is None, in which case + the ``output_dir`` attribute of the UnixCCompiler instance. + debug : bool, optional + This parameter is not used. + target_lang : str, optional + This parameter is not used. + + Returns + ------- + None + + """ + objects, output_dir = self._fix_object_args(objects, output_dir) + + output_filename = \ + self.library_filename(output_libname, output_dir=output_dir) + + if self._need_link(objects, output_filename): + try: + # previous .a may be screwed up; best to remove it first + # and recreate. + # Also, ar on OS X doesn't handle updating universal archives + os.unlink(output_filename) + except (IOError, OSError): + pass + self.mkpath(os.path.dirname(output_filename)) + tmp_objects = objects + self.objects + while tmp_objects: + objects = tmp_objects[:50] + tmp_objects = tmp_objects[50:] + display = '%s: adding %d object files to %s' % ( + os.path.basename(self.archiver[0]), + len(objects), output_filename) + self.spawn(self.archiver + [output_filename] + objects, + display = display) + + # Not many Unices required ranlib anymore -- SunOS 4.x is, I + # think the only major Unix that does. Maybe we need some + # platform intelligence here to skip ranlib if it's not + # needed -- or maybe Python's configure script took care of + # it for us, hence the check for leading colon. + if self.ranlib: + display = '%s:@ %s' % (os.path.basename(self.ranlib[0]), + output_filename) + try: + self.spawn(self.ranlib + [output_filename], + display = display) + except DistutilsExecError: + msg = str(get_exception()) + raise LibError(msg) + else: + log.debug("skipping %s (up-to-date)", output_filename) + return + +replace_method(UnixCCompiler, 'create_static_lib', + UnixCCompiler_create_static_lib) diff --git a/project/venv/lib/python2.7/site-packages/numpy/distutils/unixccompiler.pyc b/project/venv/lib/python2.7/site-packages/numpy/distutils/unixccompiler.pyc new file mode 100644 index 0000000..b8503b8 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/distutils/unixccompiler.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/doc/__init__.py b/project/venv/lib/python2.7/site-packages/numpy/doc/__init__.py new file mode 100644 index 0000000..b6f1fa7 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/doc/__init__.py @@ -0,0 +1,28 @@ +from __future__ import division, absolute_import, print_function + +import os + +ref_dir = os.path.join(os.path.dirname(__file__)) + +__all__ = sorted(f[:-3] for f in os.listdir(ref_dir) if f.endswith('.py') and + not f.startswith('__')) + +for f in __all__: + __import__(__name__ + '.' + f) + +del f, ref_dir + +__doc__ = """\ +Topical documentation +===================== + +The following topics are available: +%s + +You can view them by + +>>> help(np.doc.TOPIC) #doctest: +SKIP + +""" % '\n- '.join([''] + __all__) + +__all__.extend(['__doc__']) diff --git a/project/venv/lib/python2.7/site-packages/numpy/doc/__init__.pyc b/project/venv/lib/python2.7/site-packages/numpy/doc/__init__.pyc new file mode 100644 index 0000000..704ef63 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/doc/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/doc/basics.py b/project/venv/lib/python2.7/site-packages/numpy/doc/basics.py new file mode 100644 index 0000000..c87a40c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/doc/basics.py @@ -0,0 +1,303 @@ +""" +============ +Array basics +============ + +Array types and conversions between types +========================================= + +NumPy supports a much greater variety of numerical types than Python does. +This section shows which are available, and how to modify an array's data-type. + +The primitive types supported are tied closely to those in C: + +.. list-table:: + :header-rows: 1 + + * - Numpy type + - C type + - Description + + * - `np.bool` + - ``bool`` + - Boolean (True or False) stored as a byte + + * - `np.byte` + - ``signed char`` + - Platform-defined + + * - `np.ubyte` + - ``unsigned char`` + - Platform-defined + + * - `np.short` + - ``short`` + - Platform-defined + + * - `np.ushort` + - ``unsigned short`` + - Platform-defined + + * - `np.intc` + - ``int`` + - Platform-defined + + * - `np.uintc` + - ``unsigned int`` + - Platform-defined + + * - `np.int_` + - ``long`` + - Platform-defined + + * - `np.uint` + - ``unsigned long`` + - Platform-defined + + * - `np.longlong` + - ``long long`` + - Platform-defined + + * - `np.ulonglong` + - ``unsigned long long`` + - Platform-defined + + * - `np.half` / `np.float16` + - + - Half precision float: + sign bit, 5 bits exponent, 10 bits mantissa + + * - `np.single` + - ``float`` + - Platform-defined single precision float: + typically sign bit, 8 bits exponent, 23 bits mantissa + + * - `np.double` + - ``double`` + - Platform-defined double precision float: + typically sign bit, 11 bits exponent, 52 bits mantissa. + + * - `np.longdouble` + - ``long double`` + - Platform-defined extended-precision float + + * - `np.csingle` + - ``float complex`` + - Complex number, represented by two single-precision floats (real and imaginary components) + + * - `np.cdouble` + - ``double complex`` + - Complex number, represented by two double-precision floats (real and imaginary components). + + * - `np.clongdouble` + - ``long double complex`` + - Complex number, represented by two extended-precision floats (real and imaginary components). + + +Since many of these have platform-dependent definitions, a set of fixed-size +aliases are provided: + +.. list-table:: + :header-rows: 1 + + * - Numpy type + - C type + - Description + + * - `np.int8` + - ``int8_t`` + - Byte (-128 to 127) + + * - `np.int16` + - ``int16_t`` + - Integer (-32768 to 32767) + + * - `np.int32` + - ``int32_t`` + - Integer (-2147483648 to 2147483647) + + * - `np.int64` + - ``int64_t`` + - Integer (-9223372036854775808 to 9223372036854775807) + + * - `np.uint8` + - ``uint8_t`` + - Unsigned integer (0 to 255) + + * - `np.uint16` + - ``uint16_t`` + - Unsigned integer (0 to 65535) + + * - `np.uint32` + - ``uint32_t`` + - Unsigned integer (0 to 4294967295) + + * - `np.uint64` + - ``uint64_t`` + - Unsigned integer (0 to 18446744073709551615) + + * - `np.intp` + - ``intptr_t`` + - Integer used for indexing, typically the same as ``ssize_t`` + + * - `np.uintp` + - ``uintptr_t`` + - Integer large enough to hold a pointer + + * - `np.float32` + - ``float`` + - + + * - `np.float64` / `np.float_` + - ``double`` + - Note that this matches the precision of the builtin python `float`. + + * - `np.complex64` + - ``float complex`` + - Complex number, represented by two 32-bit floats (real and imaginary components) + + * - `np.complex128` / `np.complex_` + - ``double complex`` + - Note that this matches the precision of the builtin python `complex`. + + +NumPy numerical types are instances of ``dtype`` (data-type) objects, each +having unique characteristics. Once you have imported NumPy using + + :: + + >>> import numpy as np + +the dtypes are available as ``np.bool_``, ``np.float32``, etc. + +Advanced types, not listed in the table above, are explored in +section :ref:`structured_arrays`. + +There are 5 basic numerical types representing booleans (bool), integers (int), +unsigned integers (uint) floating point (float) and complex. Those with numbers +in their name indicate the bitsize of the type (i.e. how many bits are needed +to represent a single value in memory). Some types, such as ``int`` and +``intp``, have differing bitsizes, dependent on the platforms (e.g. 32-bit +vs. 64-bit machines). This should be taken into account when interfacing +with low-level code (such as C or Fortran) where the raw memory is addressed. + +Data-types can be used as functions to convert python numbers to array scalars +(see the array scalar section for an explanation), python sequences of numbers +to arrays of that type, or as arguments to the dtype keyword that many numpy +functions or methods accept. Some examples:: + + >>> import numpy as np + >>> x = np.float32(1.0) + >>> x + 1.0 + >>> y = np.int_([1,2,4]) + >>> y + array([1, 2, 4]) + >>> z = np.arange(3, dtype=np.uint8) + >>> z + array([0, 1, 2], dtype=uint8) + +Array types can also be referred to by character codes, mostly to retain +backward compatibility with older packages such as Numeric. Some +documentation may still refer to these, for example:: + + >>> np.array([1, 2, 3], dtype='f') + array([ 1., 2., 3.], dtype=float32) + +We recommend using dtype objects instead. + +To convert the type of an array, use the .astype() method (preferred) or +the type itself as a function. For example: :: + + >>> z.astype(float) #doctest: +NORMALIZE_WHITESPACE + array([ 0., 1., 2.]) + >>> np.int8(z) + array([0, 1, 2], dtype=int8) + +Note that, above, we use the *Python* float object as a dtype. NumPy knows +that ``int`` refers to ``np.int_``, ``bool`` means ``np.bool_``, +that ``float`` is ``np.float_`` and ``complex`` is ``np.complex_``. +The other data-types do not have Python equivalents. + +To determine the type of an array, look at the dtype attribute:: + + >>> z.dtype + dtype('uint8') + +dtype objects also contain information about the type, such as its bit-width +and its byte-order. The data type can also be used indirectly to query +properties of the type, such as whether it is an integer:: + + >>> d = np.dtype(int) + >>> d + dtype('int32') + + >>> np.issubdtype(d, np.integer) + True + + >>> np.issubdtype(d, np.floating) + False + + +Array Scalars +============= + +NumPy generally returns elements of arrays as array scalars (a scalar +with an associated dtype). Array scalars differ from Python scalars, but +for the most part they can be used interchangeably (the primary +exception is for versions of Python older than v2.x, where integer array +scalars cannot act as indices for lists and tuples). There are some +exceptions, such as when code requires very specific attributes of a scalar +or when it checks specifically whether a value is a Python scalar. Generally, +problems are easily fixed by explicitly converting array scalars +to Python scalars, using the corresponding Python type function +(e.g., ``int``, ``float``, ``complex``, ``str``, ``unicode``). + +The primary advantage of using array scalars is that +they preserve the array type (Python may not have a matching scalar type +available, e.g. ``int16``). Therefore, the use of array scalars ensures +identical behaviour between arrays and scalars, irrespective of whether the +value is inside an array or not. NumPy scalars also have many of the same +methods arrays do. + +Extended Precision +================== + +Python's floating-point numbers are usually 64-bit floating-point numbers, +nearly equivalent to ``np.float64``. In some unusual situations it may be +useful to use floating-point numbers with more precision. Whether this +is possible in numpy depends on the hardware and on the development +environment: specifically, x86 machines provide hardware floating-point +with 80-bit precision, and while most C compilers provide this as their +``long double`` type, MSVC (standard for Windows builds) makes +``long double`` identical to ``double`` (64 bits). NumPy makes the +compiler's ``long double`` available as ``np.longdouble`` (and +``np.clongdouble`` for the complex numbers). You can find out what your +numpy provides with ``np.finfo(np.longdouble)``. + +NumPy does not provide a dtype with more precision than C +``long double``\\s; in particular, the 128-bit IEEE quad precision +data type (FORTRAN's ``REAL*16``\\) is not available. + +For efficient memory alignment, ``np.longdouble`` is usually stored +padded with zero bits, either to 96 or 128 bits. Which is more efficient +depends on hardware and development environment; typically on 32-bit +systems they are padded to 96 bits, while on 64-bit systems they are +typically padded to 128 bits. ``np.longdouble`` is padded to the system +default; ``np.float96`` and ``np.float128`` are provided for users who +want specific padding. In spite of the names, ``np.float96`` and +``np.float128`` provide only as much precision as ``np.longdouble``, +that is, 80 bits on most x86 machines and 64 bits in standard +Windows builds. + +Be warned that even if ``np.longdouble`` offers more precision than +python ``float``, it is easy to lose that extra precision, since +python often forces values to pass through ``float``. For example, +the ``%`` formatting operator requires its arguments to be converted +to standard python types, and it is therefore impossible to preserve +extended precision even if many decimal places are requested. It can +be useful to test your code with the value +``1 + np.finfo(np.longdouble).eps``. + +""" +from __future__ import division, absolute_import, print_function diff --git a/project/venv/lib/python2.7/site-packages/numpy/doc/basics.pyc b/project/venv/lib/python2.7/site-packages/numpy/doc/basics.pyc new file mode 100644 index 0000000..bda1b8f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/doc/basics.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/doc/broadcasting.py b/project/venv/lib/python2.7/site-packages/numpy/doc/broadcasting.py new file mode 100644 index 0000000..0bdb6ae --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/doc/broadcasting.py @@ -0,0 +1,182 @@ +""" +======================== +Broadcasting over arrays +======================== + +.. note:: + See `this article + `_ + for illustrations of broadcasting concepts. + + +The term broadcasting describes how numpy treats arrays with different +shapes during arithmetic operations. Subject to certain constraints, +the smaller array is "broadcast" across the larger array so that they +have compatible shapes. Broadcasting provides a means of vectorizing +array operations so that looping occurs in C instead of Python. It does +this without making needless copies of data and usually leads to +efficient algorithm implementations. There are, however, cases where +broadcasting is a bad idea because it leads to inefficient use of memory +that slows computation. + +NumPy operations are usually done on pairs of arrays on an +element-by-element basis. In the simplest case, the two arrays must +have exactly the same shape, as in the following example: + + >>> a = np.array([1.0, 2.0, 3.0]) + >>> b = np.array([2.0, 2.0, 2.0]) + >>> a * b + array([ 2., 4., 6.]) + +NumPy's broadcasting rule relaxes this constraint when the arrays' +shapes meet certain constraints. The simplest broadcasting example occurs +when an array and a scalar value are combined in an operation: + +>>> a = np.array([1.0, 2.0, 3.0]) +>>> b = 2.0 +>>> a * b +array([ 2., 4., 6.]) + +The result is equivalent to the previous example where ``b`` was an array. +We can think of the scalar ``b`` being *stretched* during the arithmetic +operation into an array with the same shape as ``a``. The new elements in +``b`` are simply copies of the original scalar. The stretching analogy is +only conceptual. NumPy is smart enough to use the original scalar value +without actually making copies, so that broadcasting operations are as +memory and computationally efficient as possible. + +The code in the second example is more efficient than that in the first +because broadcasting moves less memory around during the multiplication +(``b`` is a scalar rather than an array). + +General Broadcasting Rules +========================== +When operating on two arrays, NumPy compares their shapes element-wise. +It starts with the trailing dimensions, and works its way forward. Two +dimensions are compatible when + +1) they are equal, or +2) one of them is 1 + +If these conditions are not met, a +``ValueError: operands could not be broadcast together`` exception is +thrown, indicating that the arrays have incompatible shapes. The size of +the resulting array is the maximum size along each dimension of the input +arrays. + +Arrays do not need to have the same *number* of dimensions. For example, +if you have a ``256x256x3`` array of RGB values, and you want to scale +each color in the image by a different value, you can multiply the image +by a one-dimensional array with 3 values. Lining up the sizes of the +trailing axes of these arrays according to the broadcast rules, shows that +they are compatible:: + + Image (3d array): 256 x 256 x 3 + Scale (1d array): 3 + Result (3d array): 256 x 256 x 3 + +When either of the dimensions compared is one, the other is +used. In other words, dimensions with size 1 are stretched or "copied" +to match the other. + +In the following example, both the ``A`` and ``B`` arrays have axes with +length one that are expanded to a larger size during the broadcast +operation:: + + A (4d array): 8 x 1 x 6 x 1 + B (3d array): 7 x 1 x 5 + Result (4d array): 8 x 7 x 6 x 5 + +Here are some more examples:: + + A (2d array): 5 x 4 + B (1d array): 1 + Result (2d array): 5 x 4 + + A (2d array): 5 x 4 + B (1d array): 4 + Result (2d array): 5 x 4 + + A (3d array): 15 x 3 x 5 + B (3d array): 15 x 1 x 5 + Result (3d array): 15 x 3 x 5 + + A (3d array): 15 x 3 x 5 + B (2d array): 3 x 5 + Result (3d array): 15 x 3 x 5 + + A (3d array): 15 x 3 x 5 + B (2d array): 3 x 1 + Result (3d array): 15 x 3 x 5 + +Here are examples of shapes that do not broadcast:: + + A (1d array): 3 + B (1d array): 4 # trailing dimensions do not match + + A (2d array): 2 x 1 + B (3d array): 8 x 4 x 3 # second from last dimensions mismatched + +An example of broadcasting in practice:: + + >>> x = np.arange(4) + >>> xx = x.reshape(4,1) + >>> y = np.ones(5) + >>> z = np.ones((3,4)) + + >>> x.shape + (4,) + + >>> y.shape + (5,) + + >>> x + y + ValueError: operands could not be broadcast together with shapes (4,) (5,) + + >>> xx.shape + (4, 1) + + >>> y.shape + (5,) + + >>> (xx + y).shape + (4, 5) + + >>> xx + y + array([[ 1., 1., 1., 1., 1.], + [ 2., 2., 2., 2., 2.], + [ 3., 3., 3., 3., 3.], + [ 4., 4., 4., 4., 4.]]) + + >>> x.shape + (4,) + + >>> z.shape + (3, 4) + + >>> (x + z).shape + (3, 4) + + >>> x + z + array([[ 1., 2., 3., 4.], + [ 1., 2., 3., 4.], + [ 1., 2., 3., 4.]]) + +Broadcasting provides a convenient way of taking the outer product (or +any other outer operation) of two arrays. The following example shows an +outer addition operation of two 1-d arrays:: + + >>> a = np.array([0.0, 10.0, 20.0, 30.0]) + >>> b = np.array([1.0, 2.0, 3.0]) + >>> a[:, np.newaxis] + b + array([[ 1., 2., 3.], + [ 11., 12., 13.], + [ 21., 22., 23.], + [ 31., 32., 33.]]) + +Here the ``newaxis`` index operator inserts a new axis into ``a``, +making it a two-dimensional ``4x1`` array. Combining the ``4x1`` array +with ``b``, which has shape ``(3,)``, yields a ``4x3`` array. + +""" +from __future__ import division, absolute_import, print_function diff --git a/project/venv/lib/python2.7/site-packages/numpy/doc/broadcasting.pyc b/project/venv/lib/python2.7/site-packages/numpy/doc/broadcasting.pyc new file mode 100644 index 0000000..eb301dd Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/doc/broadcasting.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/doc/byteswapping.py b/project/venv/lib/python2.7/site-packages/numpy/doc/byteswapping.py new file mode 100644 index 0000000..f9491ed --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/doc/byteswapping.py @@ -0,0 +1,156 @@ +""" + +============================= + Byteswapping and byte order +============================= + +Introduction to byte ordering and ndarrays +========================================== + +The ``ndarray`` is an object that provide a python array interface to data +in memory. + +It often happens that the memory that you want to view with an array is +not of the same byte ordering as the computer on which you are running +Python. + +For example, I might be working on a computer with a little-endian CPU - +such as an Intel Pentium, but I have loaded some data from a file +written by a computer that is big-endian. Let's say I have loaded 4 +bytes from a file written by a Sun (big-endian) computer. I know that +these 4 bytes represent two 16-bit integers. On a big-endian machine, a +two-byte integer is stored with the Most Significant Byte (MSB) first, +and then the Least Significant Byte (LSB). Thus the bytes are, in memory order: + +#. MSB integer 1 +#. LSB integer 1 +#. MSB integer 2 +#. LSB integer 2 + +Let's say the two integers were in fact 1 and 770. Because 770 = 256 * +3 + 2, the 4 bytes in memory would contain respectively: 0, 1, 3, 2. +The bytes I have loaded from the file would have these contents: + +>>> big_end_str = chr(0) + chr(1) + chr(3) + chr(2) +>>> big_end_str +'\\x00\\x01\\x03\\x02' + +We might want to use an ``ndarray`` to access these integers. In that +case, we can create an array around this memory, and tell numpy that +there are two integers, and that they are 16 bit and big-endian: + +>>> import numpy as np +>>> big_end_arr = np.ndarray(shape=(2,),dtype='>i2', buffer=big_end_str) +>>> big_end_arr[0] +1 +>>> big_end_arr[1] +770 + +Note the array ``dtype`` above of ``>i2``. The ``>`` means 'big-endian' +(``<`` is little-endian) and ``i2`` means 'signed 2-byte integer'. For +example, if our data represented a single unsigned 4-byte little-endian +integer, the dtype string would be ``>> little_end_u4 = np.ndarray(shape=(1,),dtype='>> little_end_u4[0] == 1 * 256**1 + 3 * 256**2 + 2 * 256**3 +True + +Returning to our ``big_end_arr`` - in this case our underlying data is +big-endian (data endianness) and we've set the dtype to match (the dtype +is also big-endian). However, sometimes you need to flip these around. + +.. warning:: + + Scalars currently do not include byte order information, so extracting + a scalar from an array will return an integer in native byte order. + Hence: + + >>> big_end_arr[0].dtype.byteorder == little_end_u4[0].dtype.byteorder + True + +Changing byte ordering +====================== + +As you can imagine from the introduction, there are two ways you can +affect the relationship between the byte ordering of the array and the +underlying memory it is looking at: + +* Change the byte-ordering information in the array dtype so that it + interprets the underlying data as being in a different byte order. + This is the role of ``arr.newbyteorder()`` +* Change the byte-ordering of the underlying data, leaving the dtype + interpretation as it was. This is what ``arr.byteswap()`` does. + +The common situations in which you need to change byte ordering are: + +#. Your data and dtype endianness don't match, and you want to change + the dtype so that it matches the data. +#. Your data and dtype endianness don't match, and you want to swap the + data so that they match the dtype +#. Your data and dtype endianness match, but you want the data swapped + and the dtype to reflect this + +Data and dtype endianness don't match, change dtype to match data +----------------------------------------------------------------- + +We make something where they don't match: + +>>> wrong_end_dtype_arr = np.ndarray(shape=(2,),dtype='>> wrong_end_dtype_arr[0] +256 + +The obvious fix for this situation is to change the dtype so it gives +the correct endianness: + +>>> fixed_end_dtype_arr = wrong_end_dtype_arr.newbyteorder() +>>> fixed_end_dtype_arr[0] +1 + +Note the array has not changed in memory: + +>>> fixed_end_dtype_arr.tobytes() == big_end_str +True + +Data and type endianness don't match, change data to match dtype +---------------------------------------------------------------- + +You might want to do this if you need the data in memory to be a certain +ordering. For example you might be writing the memory out to a file +that needs a certain byte ordering. + +>>> fixed_end_mem_arr = wrong_end_dtype_arr.byteswap() +>>> fixed_end_mem_arr[0] +1 + +Now the array *has* changed in memory: + +>>> fixed_end_mem_arr.tobytes() == big_end_str +False + +Data and dtype endianness match, swap data and dtype +---------------------------------------------------- + +You may have a correctly specified array dtype, but you need the array +to have the opposite byte order in memory, and you want the dtype to +match so the array values make sense. In this case you just do both of +the previous operations: + +>>> swapped_end_arr = big_end_arr.byteswap().newbyteorder() +>>> swapped_end_arr[0] +1 +>>> swapped_end_arr.tobytes() == big_end_str +False + +An easier way of casting the data to a specific dtype and byte ordering +can be achieved with the ndarray astype method: + +>>> swapped_end_arr = big_end_arr.astype('>> swapped_end_arr[0] +1 +>>> swapped_end_arr.tobytes() == big_end_str +False + +""" +from __future__ import division, absolute_import, print_function diff --git a/project/venv/lib/python2.7/site-packages/numpy/doc/byteswapping.pyc b/project/venv/lib/python2.7/site-packages/numpy/doc/byteswapping.pyc new file mode 100644 index 0000000..6516478 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/doc/byteswapping.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/doc/constants.py b/project/venv/lib/python2.7/site-packages/numpy/doc/constants.py new file mode 100644 index 0000000..21c7a3c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/doc/constants.py @@ -0,0 +1,417 @@ +# -*- coding: utf-8 -*- +""" +========= +Constants +========= + +.. currentmodule:: numpy + +NumPy includes several constants: + +%(constant_list)s +""" +# +# Note: the docstring is autogenerated. +# +from __future__ import division, absolute_import, print_function + +import textwrap, re + +# Maintain same format as in numpy.add_newdocs +constants = [] +def add_newdoc(module, name, doc): + constants.append((name, doc)) + +add_newdoc('numpy', 'Inf', + """ + IEEE 754 floating point representation of (positive) infinity. + + Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for + `inf`. For more details, see `inf`. + + See Also + -------- + inf + + """) + +add_newdoc('numpy', 'Infinity', + """ + IEEE 754 floating point representation of (positive) infinity. + + Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for + `inf`. For more details, see `inf`. + + See Also + -------- + inf + + """) + +add_newdoc('numpy', 'NAN', + """ + IEEE 754 floating point representation of Not a Number (NaN). + + `NaN` and `NAN` are equivalent definitions of `nan`. Please use + `nan` instead of `NAN`. + + See Also + -------- + nan + + """) + +add_newdoc('numpy', 'NINF', + """ + IEEE 754 floating point representation of negative infinity. + + Returns + ------- + y : float + A floating point representation of negative infinity. + + See Also + -------- + isinf : Shows which elements are positive or negative infinity + + isposinf : Shows which elements are positive infinity + + isneginf : Shows which elements are negative infinity + + isnan : Shows which elements are Not a Number + + isfinite : Shows which elements are finite (not one of Not a Number, + positive infinity and negative infinity) + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). This means that Not a Number is not equivalent to infinity. + Also that positive infinity is not equivalent to negative infinity. But + infinity is equivalent to positive infinity. + + Examples + -------- + >>> np.NINF + -inf + >>> np.log(0) + -inf + + """) + +add_newdoc('numpy', 'NZERO', + """ + IEEE 754 floating point representation of negative zero. + + Returns + ------- + y : float + A floating point representation of negative zero. + + See Also + -------- + PZERO : Defines positive zero. + + isinf : Shows which elements are positive or negative infinity. + + isposinf : Shows which elements are positive infinity. + + isneginf : Shows which elements are negative infinity. + + isnan : Shows which elements are Not a Number. + + isfinite : Shows which elements are finite - not one of + Not a Number, positive infinity and negative infinity. + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). Negative zero is considered to be a finite number. + + Examples + -------- + >>> np.NZERO + -0.0 + >>> np.PZERO + 0.0 + + >>> np.isfinite([np.NZERO]) + array([ True]) + >>> np.isnan([np.NZERO]) + array([False]) + >>> np.isinf([np.NZERO]) + array([False]) + + """) + +add_newdoc('numpy', 'NaN', + """ + IEEE 754 floating point representation of Not a Number (NaN). + + `NaN` and `NAN` are equivalent definitions of `nan`. Please use + `nan` instead of `NaN`. + + See Also + -------- + nan + + """) + +add_newdoc('numpy', 'PINF', + """ + IEEE 754 floating point representation of (positive) infinity. + + Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for + `inf`. For more details, see `inf`. + + See Also + -------- + inf + + """) + +add_newdoc('numpy', 'PZERO', + """ + IEEE 754 floating point representation of positive zero. + + Returns + ------- + y : float + A floating point representation of positive zero. + + See Also + -------- + NZERO : Defines negative zero. + + isinf : Shows which elements are positive or negative infinity. + + isposinf : Shows which elements are positive infinity. + + isneginf : Shows which elements are negative infinity. + + isnan : Shows which elements are Not a Number. + + isfinite : Shows which elements are finite - not one of + Not a Number, positive infinity and negative infinity. + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). Positive zero is considered to be a finite number. + + Examples + -------- + >>> np.PZERO + 0.0 + >>> np.NZERO + -0.0 + + >>> np.isfinite([np.PZERO]) + array([ True]) + >>> np.isnan([np.PZERO]) + array([False]) + >>> np.isinf([np.PZERO]) + array([False]) + + """) + +add_newdoc('numpy', 'e', + """ + Euler's constant, base of natural logarithms, Napier's constant. + + ``e = 2.71828182845904523536028747135266249775724709369995...`` + + See Also + -------- + exp : Exponential function + log : Natural logarithm + + References + ---------- + https://en.wikipedia.org/wiki/E_%28mathematical_constant%29 + + """) + +add_newdoc('numpy', 'inf', + """ + IEEE 754 floating point representation of (positive) infinity. + + Returns + ------- + y : float + A floating point representation of positive infinity. + + See Also + -------- + isinf : Shows which elements are positive or negative infinity + + isposinf : Shows which elements are positive infinity + + isneginf : Shows which elements are negative infinity + + isnan : Shows which elements are Not a Number + + isfinite : Shows which elements are finite (not one of Not a Number, + positive infinity and negative infinity) + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). This means that Not a Number is not equivalent to infinity. + Also that positive infinity is not equivalent to negative infinity. But + infinity is equivalent to positive infinity. + + `Inf`, `Infinity`, `PINF` and `infty` are aliases for `inf`. + + Examples + -------- + >>> np.inf + inf + >>> np.array([1]) / 0. + array([ Inf]) + + """) + +add_newdoc('numpy', 'infty', + """ + IEEE 754 floating point representation of (positive) infinity. + + Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for + `inf`. For more details, see `inf`. + + See Also + -------- + inf + + """) + +add_newdoc('numpy', 'nan', + """ + IEEE 754 floating point representation of Not a Number (NaN). + + Returns + ------- + y : A floating point representation of Not a Number. + + See Also + -------- + isnan : Shows which elements are Not a Number. + + isfinite : Shows which elements are finite (not one of + Not a Number, positive infinity and negative infinity) + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). This means that Not a Number is not equivalent to infinity. + + `NaN` and `NAN` are aliases of `nan`. + + Examples + -------- + >>> np.nan + nan + >>> np.log(-1) + nan + >>> np.log([-1, 1, 2]) + array([ NaN, 0. , 0.69314718]) + + """) + +add_newdoc('numpy', 'newaxis', + """ + A convenient alias for None, useful for indexing arrays. + + See Also + -------- + `numpy.doc.indexing` + + Examples + -------- + >>> newaxis is None + True + >>> x = np.arange(3) + >>> x + array([0, 1, 2]) + >>> x[:, newaxis] + array([[0], + [1], + [2]]) + >>> x[:, newaxis, newaxis] + array([[[0]], + [[1]], + [[2]]]) + >>> x[:, newaxis] * x + array([[0, 0, 0], + [0, 1, 2], + [0, 2, 4]]) + + Outer product, same as ``outer(x, y)``: + + >>> y = np.arange(3, 6) + >>> x[:, newaxis] * y + array([[ 0, 0, 0], + [ 3, 4, 5], + [ 6, 8, 10]]) + + ``x[newaxis, :]`` is equivalent to ``x[newaxis]`` and ``x[None]``: + + >>> x[newaxis, :].shape + (1, 3) + >>> x[newaxis].shape + (1, 3) + >>> x[None].shape + (1, 3) + >>> x[:, newaxis].shape + (3, 1) + + """) + +add_newdoc('numpy', 'pi', + """ + ``pi = 3.1415926535897932384626433...`` + + References + ---------- + https://en.wikipedia.org/wiki/Pi + + """) + +add_newdoc('numpy', 'euler_gamma', + """ + ``γ = 0.5772156649015328606065120900824024310421...`` + + References + ---------- + https://en.wikipedia.org/wiki/Euler-Mascheroni_constant + + """) + +if __doc__: + constants_str = [] + constants.sort() + for name, doc in constants: + s = textwrap.dedent(doc).replace("\n", "\n ") + + # Replace sections by rubrics + lines = s.split("\n") + new_lines = [] + for line in lines: + m = re.match(r'^(\s+)[-=]+\s*$', line) + if m and new_lines: + prev = textwrap.dedent(new_lines.pop()) + new_lines.append('%s.. rubric:: %s' % (m.group(1), prev)) + new_lines.append('') + else: + new_lines.append(line) + s = "\n".join(new_lines) + + # Done. + constants_str.append(""".. data:: %s\n %s""" % (name, s)) + constants_str = "\n".join(constants_str) + + __doc__ = __doc__ % dict(constant_list=constants_str) + del constants_str, name, doc + del line, lines, new_lines, m, s, prev + +del constants, add_newdoc diff --git a/project/venv/lib/python2.7/site-packages/numpy/doc/constants.pyc b/project/venv/lib/python2.7/site-packages/numpy/doc/constants.pyc new file mode 100644 index 0000000..0325cc5 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/doc/constants.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/doc/creation.py b/project/venv/lib/python2.7/site-packages/numpy/doc/creation.py new file mode 100644 index 0000000..9ebe938 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/doc/creation.py @@ -0,0 +1,144 @@ +""" +============== +Array Creation +============== + +Introduction +============ + +There are 5 general mechanisms for creating arrays: + +1) Conversion from other Python structures (e.g., lists, tuples) +2) Intrinsic numpy array creation objects (e.g., arange, ones, zeros, + etc.) +3) Reading arrays from disk, either from standard or custom formats +4) Creating arrays from raw bytes through the use of strings or buffers +5) Use of special library functions (e.g., random) + +This section will not cover means of replicating, joining, or otherwise +expanding or mutating existing arrays. Nor will it cover creating object +arrays or structured arrays. Both of those are covered in their own sections. + +Converting Python array_like Objects to NumPy Arrays +==================================================== + +In general, numerical data arranged in an array-like structure in Python can +be converted to arrays through the use of the array() function. The most +obvious examples are lists and tuples. See the documentation for array() for +details for its use. Some objects may support the array-protocol and allow +conversion to arrays this way. A simple way to find out if the object can be +converted to a numpy array using array() is simply to try it interactively and +see if it works! (The Python Way). + +Examples: :: + + >>> x = np.array([2,3,1,0]) + >>> x = np.array([2, 3, 1, 0]) + >>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists, + and types + >>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]]) + +Intrinsic NumPy Array Creation +============================== + +NumPy has built-in functions for creating arrays from scratch: + +zeros(shape) will create an array filled with 0 values with the specified +shape. The default dtype is float64. :: + + >>> np.zeros((2, 3)) + array([[ 0., 0., 0.], [ 0., 0., 0.]]) + +ones(shape) will create an array filled with 1 values. It is identical to +zeros in all other respects. + +arange() will create arrays with regularly incrementing values. Check the +docstring for complete information on the various ways it can be used. A few +examples will be given here: :: + + >>> np.arange(10) + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> np.arange(2, 10, dtype=float) + array([ 2., 3., 4., 5., 6., 7., 8., 9.]) + >>> np.arange(2, 3, 0.1) + array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9]) + +Note that there are some subtleties regarding the last usage that the user +should be aware of that are described in the arange docstring. + +linspace() will create arrays with a specified number of elements, and +spaced equally between the specified beginning and end values. For +example: :: + + >>> np.linspace(1., 4., 6) + array([ 1. , 1.6, 2.2, 2.8, 3.4, 4. ]) + +The advantage of this creation function is that one can guarantee the +number of elements and the starting and end point, which arange() +generally will not do for arbitrary start, stop, and step values. + +indices() will create a set of arrays (stacked as a one-higher dimensioned +array), one per dimension with each representing variation in that dimension. +An example illustrates much better than a verbal description: :: + + >>> np.indices((3,3)) + array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]]) + +This is particularly useful for evaluating functions of multiple dimensions on +a regular grid. + +Reading Arrays From Disk +======================== + +This is presumably the most common case of large array creation. The details, +of course, depend greatly on the format of data on disk and so this section +can only give general pointers on how to handle various formats. + +Standard Binary Formats +----------------------- + +Various fields have standard formats for array data. The following lists the +ones with known python libraries to read them and return numpy arrays (there +may be others for which it is possible to read and convert to numpy arrays so +check the last section as well) +:: + + HDF5: h5py + FITS: Astropy + +Examples of formats that cannot be read directly but for which it is not hard to +convert are those formats supported by libraries like PIL (able to read and +write many image formats such as jpg, png, etc). + +Common ASCII Formats +------------------------ + +Comma Separated Value files (CSV) are widely used (and an export and import +option for programs like Excel). There are a number of ways of reading these +files in Python. There are CSV functions in Python and functions in pylab +(part of matplotlib). + +More generic ascii files can be read using the io package in scipy. + +Custom Binary Formats +--------------------- + +There are a variety of approaches one can use. If the file has a relatively +simple format then one can write a simple I/O library and use the numpy +fromfile() function and .tofile() method to read and write numpy arrays +directly (mind your byteorder though!) If a good C or C++ library exists that +read the data, one can wrap that library with a variety of techniques though +that certainly is much more work and requires significantly more advanced +knowledge to interface with C or C++. + +Use of Special Libraries +------------------------ + +There are libraries that can be used to generate arrays for special purposes +and it isn't possible to enumerate all of them. The most common uses are use +of the many array generation functions in random that can generate arrays of +random values, and some utility functions to generate special matrices (e.g. +diagonal). + +""" +from __future__ import division, absolute_import, print_function diff --git a/project/venv/lib/python2.7/site-packages/numpy/doc/creation.pyc b/project/venv/lib/python2.7/site-packages/numpy/doc/creation.pyc new file mode 100644 index 0000000..c944752 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/doc/creation.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/doc/glossary.py b/project/venv/lib/python2.7/site-packages/numpy/doc/glossary.py new file mode 100644 index 0000000..a3b9423 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/doc/glossary.py @@ -0,0 +1,450 @@ +""" +======== +Glossary +======== + +.. glossary:: + + along an axis + Axes are defined for arrays with more than one dimension. A + 2-dimensional array has two corresponding axes: the first running + vertically downwards across rows (axis 0), and the second running + horizontally across columns (axis 1). + + Many operations can take place along one of these axes. For example, + we can sum each row of an array, in which case we operate along + columns, or axis 1:: + + >>> x = np.arange(12).reshape((3,4)) + + >>> x + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + + >>> x.sum(axis=1) + array([ 6, 22, 38]) + + array + A homogeneous container of numerical elements. Each element in the + array occupies a fixed amount of memory (hence homogeneous), and + can be a numerical element of a single type (such as float, int + or complex) or a combination (such as ``(float, int, float)``). Each + array has an associated data-type (or ``dtype``), which describes + the numerical type of its elements:: + + >>> x = np.array([1, 2, 3], float) + + >>> x + array([ 1., 2., 3.]) + + >>> x.dtype # floating point number, 64 bits of memory per element + dtype('float64') + + + # More complicated data type: each array element is a combination of + # and integer and a floating point number + >>> np.array([(1, 2.0), (3, 4.0)], dtype=[('x', int), ('y', float)]) + array([(1, 2.0), (3, 4.0)], + dtype=[('x', '>> x = np.array([1, 2, 3]) + >>> x.shape + (3,) + + big-endian + When storing a multi-byte value in memory as a sequence of bytes, the + sequence addresses/sends/stores the most significant byte first (lowest + address) and the least significant byte last (highest address). Common in + micro-processors and used for transmission of data over network protocols. + + BLAS + `Basic Linear Algebra Subprograms `_ + + broadcast + NumPy can do operations on arrays whose shapes are mismatched:: + + >>> x = np.array([1, 2]) + >>> y = np.array([[3], [4]]) + + >>> x + array([1, 2]) + + >>> y + array([[3], + [4]]) + + >>> x + y + array([[4, 5], + [5, 6]]) + + See `numpy.doc.broadcasting` for more information. + + C order + See `row-major` + + column-major + A way to represent items in a N-dimensional array in the 1-dimensional + computer memory. In column-major order, the leftmost index "varies the + fastest": for example the array:: + + [[1, 2, 3], + [4, 5, 6]] + + is represented in the column-major order as:: + + [1, 4, 2, 5, 3, 6] + + Column-major order is also known as the Fortran order, as the Fortran + programming language uses it. + + decorator + An operator that transforms a function. For example, a ``log`` + decorator may be defined to print debugging information upon + function execution:: + + >>> def log(f): + ... def new_logging_func(*args, **kwargs): + ... print("Logging call with parameters:", args, kwargs) + ... return f(*args, **kwargs) + ... + ... return new_logging_func + + Now, when we define a function, we can "decorate" it using ``log``:: + + >>> @log + ... def add(a, b): + ... return a + b + + Calling ``add`` then yields: + + >>> add(1, 2) + Logging call with parameters: (1, 2) {} + 3 + + dictionary + Resembling a language dictionary, which provides a mapping between + words and descriptions thereof, a Python dictionary is a mapping + between two objects:: + + >>> x = {1: 'one', 'two': [1, 2]} + + Here, `x` is a dictionary mapping keys to values, in this case + the integer 1 to the string "one", and the string "two" to + the list ``[1, 2]``. The values may be accessed using their + corresponding keys:: + + >>> x[1] + 'one' + + >>> x['two'] + [1, 2] + + Note that dictionaries are not stored in any specific order. Also, + most mutable (see *immutable* below) objects, such as lists, may not + be used as keys. + + For more information on dictionaries, read the + `Python tutorial `_. + + field + In a :term:`structured data type`, each sub-type is called a `field`. + The `field` has a name (a string), a type (any valid :term:`dtype`, and + an optional `title`. See :ref:`arrays.dtypes` + + Fortran order + See `column-major` + + flattened + Collapsed to a one-dimensional array. See `numpy.ndarray.flatten` + for details. + + homogenous + Describes a block of memory comprised of blocks, each block comprised of + items and of the same size, and blocks are interpreted in exactly the + same way. In the simplest case each block contains a single item, for + instance int32 or float64. + + immutable + An object that cannot be modified after execution is called + immutable. Two common examples are strings and tuples. + + instance + A class definition gives the blueprint for constructing an object:: + + >>> class House(object): + ... wall_colour = 'white' + + Yet, we have to *build* a house before it exists:: + + >>> h = House() # build a house + + Now, ``h`` is called a ``House`` instance. An instance is therefore + a specific realisation of a class. + + iterable + A sequence that allows "walking" (iterating) over items, typically + using a loop such as:: + + >>> x = [1, 2, 3] + >>> [item**2 for item in x] + [1, 4, 9] + + It is often used in combination with ``enumerate``:: + >>> keys = ['a','b','c'] + >>> for n, k in enumerate(keys): + ... print("Key %d: %s" % (n, k)) + ... + Key 0: a + Key 1: b + Key 2: c + + list + A Python container that can hold any number of objects or items. + The items do not have to be of the same type, and can even be + lists themselves:: + + >>> x = [2, 2.0, "two", [2, 2.0]] + + The list `x` contains 4 items, each which can be accessed individually:: + + >>> x[2] # the string 'two' + 'two' + + >>> x[3] # a list, containing an integer 2 and a float 2.0 + [2, 2.0] + + It is also possible to select more than one item at a time, + using *slicing*:: + + >>> x[0:2] # or, equivalently, x[:2] + [2, 2.0] + + In code, arrays are often conveniently expressed as nested lists:: + + + >>> np.array([[1, 2], [3, 4]]) + array([[1, 2], + [3, 4]]) + + For more information, read the section on lists in the `Python + tutorial `_. For a mapping + type (key-value), see *dictionary*. + + little-endian + When storing a multi-byte value in memory as a sequence of bytes, the + sequence addresses/sends/stores the least significant byte first (lowest + address) and the most significant byte last (highest address). Common in + x86 processors. + + mask + A boolean array, used to select only certain elements for an operation:: + + >>> x = np.arange(5) + >>> x + array([0, 1, 2, 3, 4]) + + >>> mask = (x > 2) + >>> mask + array([False, False, False, True, True]) + + >>> x[mask] = -1 + >>> x + array([ 0, 1, 2, -1, -1]) + + masked array + Array that suppressed values indicated by a mask:: + + >>> x = np.ma.masked_array([np.nan, 2, np.nan], [True, False, True]) + >>> x + masked_array(data = [-- 2.0 --], + mask = [ True False True], + fill_value = 1e+20) + + + >>> x + [1, 2, 3] + masked_array(data = [-- 4.0 --], + mask = [ True False True], + fill_value = 1e+20) + + + + Masked arrays are often used when operating on arrays containing + missing or invalid entries. + + matrix + A 2-dimensional ndarray that preserves its two-dimensional nature + throughout operations. It has certain special operations, such as ``*`` + (matrix multiplication) and ``**`` (matrix power), defined:: + + >>> x = np.mat([[1, 2], [3, 4]]) + >>> x + matrix([[1, 2], + [3, 4]]) + + >>> x**2 + matrix([[ 7, 10], + [15, 22]]) + + method + A function associated with an object. For example, each ndarray has a + method called ``repeat``:: + + >>> x = np.array([1, 2, 3]) + >>> x.repeat(2) + array([1, 1, 2, 2, 3, 3]) + + ndarray + See *array*. + + record array + An :term:`ndarray` with :term:`structured data type` which has been + subclassed as ``np.recarray`` and whose dtype is of type ``np.record``, + making the fields of its data type to be accessible by attribute. + + reference + If ``a`` is a reference to ``b``, then ``(a is b) == True``. Therefore, + ``a`` and ``b`` are different names for the same Python object. + + row-major + A way to represent items in a N-dimensional array in the 1-dimensional + computer memory. In row-major order, the rightmost index "varies + the fastest": for example the array:: + + [[1, 2, 3], + [4, 5, 6]] + + is represented in the row-major order as:: + + [1, 2, 3, 4, 5, 6] + + Row-major order is also known as the C order, as the C programming + language uses it. New NumPy arrays are by default in row-major order. + + self + Often seen in method signatures, ``self`` refers to the instance + of the associated class. For example: + + >>> class Paintbrush(object): + ... color = 'blue' + ... + ... def paint(self): + ... print("Painting the city %s!" % self.color) + ... + >>> p = Paintbrush() + >>> p.color = 'red' + >>> p.paint() # self refers to 'p' + Painting the city red! + + slice + Used to select only certain elements from a sequence:: + + >>> x = range(5) + >>> x + [0, 1, 2, 3, 4] + + >>> x[1:3] # slice from 1 to 3 (excluding 3 itself) + [1, 2] + + >>> x[1:5:2] # slice from 1 to 5, but skipping every second element + [1, 3] + + >>> x[::-1] # slice a sequence in reverse + [4, 3, 2, 1, 0] + + Arrays may have more than one dimension, each which can be sliced + individually:: + + >>> x = np.array([[1, 2], [3, 4]]) + >>> x + array([[1, 2], + [3, 4]]) + + >>> x[:, 1] + array([2, 4]) + + structure + See :term:`structured data type` + + structured data type + A data type composed of other datatypes + + tuple + A sequence that may contain a variable number of types of any + kind. A tuple is immutable, i.e., once constructed it cannot be + changed. Similar to a list, it can be indexed and sliced:: + + >>> x = (1, 'one', [1, 2]) + >>> x + (1, 'one', [1, 2]) + + >>> x[0] + 1 + + >>> x[:2] + (1, 'one') + + A useful concept is "tuple unpacking", which allows variables to + be assigned to the contents of a tuple:: + + >>> x, y = (1, 2) + >>> x, y = 1, 2 + + This is often used when a function returns multiple values: + + >>> def return_many(): + ... return 1, 'alpha', None + + >>> a, b, c = return_many() + >>> a, b, c + (1, 'alpha', None) + + >>> a + 1 + >>> b + 'alpha' + + ufunc + Universal function. A fast element-wise array operation. Examples include + ``add``, ``sin`` and ``logical_or``. + + view + An array that does not own its data, but refers to another array's + data instead. For example, we may create a view that only shows + every second element of another array:: + + >>> x = np.arange(5) + >>> x + array([0, 1, 2, 3, 4]) + + >>> y = x[::2] + >>> y + array([0, 2, 4]) + + >>> x[0] = 3 # changing x changes y as well, since y is a view on x + >>> y + array([3, 2, 4]) + + wrapper + Python is a high-level (highly abstracted, or English-like) language. + This abstraction comes at a price in execution speed, and sometimes + it becomes necessary to use lower level languages to do fast + computations. A wrapper is code that provides a bridge between + high and the low level languages, allowing, e.g., Python to execute + code written in C or Fortran. + + Examples include ctypes, SWIG and Cython (which wraps C and C++) + and f2py (which wraps Fortran). + +""" +from __future__ import division, absolute_import, print_function diff --git a/project/venv/lib/python2.7/site-packages/numpy/doc/glossary.pyc b/project/venv/lib/python2.7/site-packages/numpy/doc/glossary.pyc new file mode 100644 index 0000000..e28d4eb Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/doc/glossary.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/doc/indexing.py b/project/venv/lib/python2.7/site-packages/numpy/doc/indexing.py new file mode 100644 index 0000000..087a688 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/doc/indexing.py @@ -0,0 +1,439 @@ +"""============== +Array indexing +============== + +Array indexing refers to any use of the square brackets ([]) to index +array values. There are many options to indexing, which give numpy +indexing great power, but with power comes some complexity and the +potential for confusion. This section is just an overview of the +various options and issues related to indexing. Aside from single +element indexing, the details on most of these options are to be +found in related sections. + +Assignment vs referencing +========================= + +Most of the following examples show the use of indexing when +referencing data in an array. The examples work just as well +when assigning to an array. See the section at the end for +specific examples and explanations on how assignments work. + +Single element indexing +======================= + +Single element indexing for a 1-D array is what one expects. It work +exactly like that for other standard Python sequences. It is 0-based, +and accepts negative indices for indexing from the end of the array. :: + + >>> x = np.arange(10) + >>> x[2] + 2 + >>> x[-2] + 8 + +Unlike lists and tuples, numpy arrays support multidimensional indexing +for multidimensional arrays. That means that it is not necessary to +separate each dimension's index into its own set of square brackets. :: + + >>> x.shape = (2,5) # now x is 2-dimensional + >>> x[1,3] + 8 + >>> x[1,-1] + 9 + +Note that if one indexes a multidimensional array with fewer indices +than dimensions, one gets a subdimensional array. For example: :: + + >>> x[0] + array([0, 1, 2, 3, 4]) + +That is, each index specified selects the array corresponding to the +rest of the dimensions selected. In the above example, choosing 0 +means that the remaining dimension of length 5 is being left unspecified, +and that what is returned is an array of that dimensionality and size. +It must be noted that the returned array is not a copy of the original, +but points to the same values in memory as does the original array. +In this case, the 1-D array at the first position (0) is returned. +So using a single index on the returned array, results in a single +element being returned. That is: :: + + >>> x[0][2] + 2 + +So note that ``x[0,2] = x[0][2]`` though the second case is more +inefficient as a new temporary array is created after the first index +that is subsequently indexed by 2. + +Note to those used to IDL or Fortran memory order as it relates to +indexing. NumPy uses C-order indexing. That means that the last +index usually represents the most rapidly changing memory location, +unlike Fortran or IDL, where the first index represents the most +rapidly changing location in memory. This difference represents a +great potential for confusion. + +Other indexing options +====================== + +It is possible to slice and stride arrays to extract arrays of the +same number of dimensions, but of different sizes than the original. +The slicing and striding works exactly the same way it does for lists +and tuples except that they can be applied to multiple dimensions as +well. A few examples illustrates best: :: + + >>> x = np.arange(10) + >>> x[2:5] + array([2, 3, 4]) + >>> x[:-7] + array([0, 1, 2]) + >>> x[1:7:2] + array([1, 3, 5]) + >>> y = np.arange(35).reshape(5,7) + >>> y[1:5:2,::3] + array([[ 7, 10, 13], + [21, 24, 27]]) + +Note that slices of arrays do not copy the internal array data but +only produce new views of the original data. + +It is possible to index arrays with other arrays for the purposes of +selecting lists of values out of arrays into new arrays. There are +two different ways of accomplishing this. One uses one or more arrays +of index values. The other involves giving a boolean array of the proper +shape to indicate the values to be selected. Index arrays are a very +powerful tool that allow one to avoid looping over individual elements in +arrays and thus greatly improve performance. + +It is possible to use special features to effectively increase the +number of dimensions in an array through indexing so the resulting +array aquires the shape needed for use in an expression or with a +specific function. + +Index arrays +============ + +NumPy arrays may be indexed with other arrays (or any other sequence- +like object that can be converted to an array, such as lists, with the +exception of tuples; see the end of this document for why this is). The +use of index arrays ranges from simple, straightforward cases to +complex, hard-to-understand cases. For all cases of index arrays, what +is returned is a copy of the original data, not a view as one gets for +slices. + +Index arrays must be of integer type. Each value in the array indicates +which value in the array to use in place of the index. To illustrate: :: + + >>> x = np.arange(10,1,-1) + >>> x + array([10, 9, 8, 7, 6, 5, 4, 3, 2]) + >>> x[np.array([3, 3, 1, 8])] + array([7, 7, 9, 2]) + + +The index array consisting of the values 3, 3, 1 and 8 correspondingly +create an array of length 4 (same as the index array) where each index +is replaced by the value the index array has in the array being indexed. + +Negative values are permitted and work as they do with single indices +or slices: :: + + >>> x[np.array([3,3,-3,8])] + array([7, 7, 4, 2]) + +It is an error to have index values out of bounds: :: + + >>> x[np.array([3, 3, 20, 8])] + : index 20 out of bounds 0<=index<9 + +Generally speaking, what is returned when index arrays are used is +an array with the same shape as the index array, but with the type +and values of the array being indexed. As an example, we can use a +multidimensional index array instead: :: + + >>> x[np.array([[1,1],[2,3]])] + array([[9, 9], + [8, 7]]) + +Indexing Multi-dimensional arrays +================================= + +Things become more complex when multidimensional arrays are indexed, +particularly with multidimensional index arrays. These tend to be +more unusual uses, but they are permitted, and they are useful for some +problems. We'll start with the simplest multidimensional case (using +the array y from the previous examples): :: + + >>> y[np.array([0,2,4]), np.array([0,1,2])] + array([ 0, 15, 30]) + +In this case, if the index arrays have a matching shape, and there is +an index array for each dimension of the array being indexed, the +resultant array has the same shape as the index arrays, and the values +correspond to the index set for each position in the index arrays. In +this example, the first index value is 0 for both index arrays, and +thus the first value of the resultant array is y[0,0]. The next value +is y[2,1], and the last is y[4,2]. + +If the index arrays do not have the same shape, there is an attempt to +broadcast them to the same shape. If they cannot be broadcast to the +same shape, an exception is raised: :: + + >>> y[np.array([0,2,4]), np.array([0,1])] + : shape mismatch: objects cannot be + broadcast to a single shape + +The broadcasting mechanism permits index arrays to be combined with +scalars for other indices. The effect is that the scalar value is used +for all the corresponding values of the index arrays: :: + + >>> y[np.array([0,2,4]), 1] + array([ 1, 15, 29]) + +Jumping to the next level of complexity, it is possible to only +partially index an array with index arrays. It takes a bit of thought +to understand what happens in such cases. For example if we just use +one index array with y: :: + + >>> y[np.array([0,2,4])] + array([[ 0, 1, 2, 3, 4, 5, 6], + [14, 15, 16, 17, 18, 19, 20], + [28, 29, 30, 31, 32, 33, 34]]) + +What results is the construction of a new array where each value of +the index array selects one row from the array being indexed and the +resultant array has the resulting shape (number of index elements, +size of row). + +An example of where this may be useful is for a color lookup table +where we want to map the values of an image into RGB triples for +display. The lookup table could have a shape (nlookup, 3). Indexing +such an array with an image with shape (ny, nx) with dtype=np.uint8 +(or any integer type so long as values are with the bounds of the +lookup table) will result in an array of shape (ny, nx, 3) where a +triple of RGB values is associated with each pixel location. + +In general, the shape of the resultant array will be the concatenation +of the shape of the index array (or the shape that all the index arrays +were broadcast to) with the shape of any unused dimensions (those not +indexed) in the array being indexed. + +Boolean or "mask" index arrays +============================== + +Boolean arrays used as indices are treated in a different manner +entirely than index arrays. Boolean arrays must be of the same shape +as the initial dimensions of the array being indexed. In the +most straightforward case, the boolean array has the same shape: :: + + >>> b = y>20 + >>> y[b] + array([21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34]) + +Unlike in the case of integer index arrays, in the boolean case, the +result is a 1-D array containing all the elements in the indexed array +corresponding to all the true elements in the boolean array. The +elements in the indexed array are always iterated and returned in +:term:`row-major` (C-style) order. The result is also identical to +``y[np.nonzero(b)]``. As with index arrays, what is returned is a copy +of the data, not a view as one gets with slices. + +The result will be multidimensional if y has more dimensions than b. +For example: :: + + >>> b[:,5] # use a 1-D boolean whose first dim agrees with the first dim of y + array([False, False, False, True, True]) + >>> y[b[:,5]] + array([[21, 22, 23, 24, 25, 26, 27], + [28, 29, 30, 31, 32, 33, 34]]) + +Here the 4th and 5th rows are selected from the indexed array and +combined to make a 2-D array. + +In general, when the boolean array has fewer dimensions than the array +being indexed, this is equivalent to y[b, ...], which means +y is indexed by b followed by as many : as are needed to fill +out the rank of y. +Thus the shape of the result is one dimension containing the number +of True elements of the boolean array, followed by the remaining +dimensions of the array being indexed. + +For example, using a 2-D boolean array of shape (2,3) +with four True elements to select rows from a 3-D array of shape +(2,3,5) results in a 2-D result of shape (4,5): :: + + >>> x = np.arange(30).reshape(2,3,5) + >>> x + array([[[ 0, 1, 2, 3, 4], + [ 5, 6, 7, 8, 9], + [10, 11, 12, 13, 14]], + [[15, 16, 17, 18, 19], + [20, 21, 22, 23, 24], + [25, 26, 27, 28, 29]]]) + >>> b = np.array([[True, True, False], [False, True, True]]) + >>> x[b] + array([[ 0, 1, 2, 3, 4], + [ 5, 6, 7, 8, 9], + [20, 21, 22, 23, 24], + [25, 26, 27, 28, 29]]) + +For further details, consult the numpy reference documentation on array indexing. + +Combining index arrays with slices +================================== + +Index arrays may be combined with slices. For example: :: + + >>> y[np.array([0,2,4]),1:3] + array([[ 1, 2], + [15, 16], + [29, 30]]) + +In effect, the slice is converted to an index array +np.array([[1,2]]) (shape (1,2)) that is broadcast with the index array +to produce a resultant array of shape (3,2). + +Likewise, slicing can be combined with broadcasted boolean indices: :: + + >>> y[b[:,5],1:3] + array([[22, 23], + [29, 30]]) + +Structural indexing tools +========================= + +To facilitate easy matching of array shapes with expressions and in +assignments, the np.newaxis object can be used within array indices +to add new dimensions with a size of 1. For example: :: + + >>> y.shape + (5, 7) + >>> y[:,np.newaxis,:].shape + (5, 1, 7) + +Note that there are no new elements in the array, just that the +dimensionality is increased. This can be handy to combine two +arrays in a way that otherwise would require explicitly reshaping +operations. For example: :: + + >>> x = np.arange(5) + >>> x[:,np.newaxis] + x[np.newaxis,:] + array([[0, 1, 2, 3, 4], + [1, 2, 3, 4, 5], + [2, 3, 4, 5, 6], + [3, 4, 5, 6, 7], + [4, 5, 6, 7, 8]]) + +The ellipsis syntax maybe used to indicate selecting in full any +remaining unspecified dimensions. For example: :: + + >>> z = np.arange(81).reshape(3,3,3,3) + >>> z[1,...,2] + array([[29, 32, 35], + [38, 41, 44], + [47, 50, 53]]) + +This is equivalent to: :: + + >>> z[1,:,:,2] + array([[29, 32, 35], + [38, 41, 44], + [47, 50, 53]]) + +Assigning values to indexed arrays +================================== + +As mentioned, one can select a subset of an array to assign to using +a single index, slices, and index and mask arrays. The value being +assigned to the indexed array must be shape consistent (the same shape +or broadcastable to the shape the index produces). For example, it is +permitted to assign a constant to a slice: :: + + >>> x = np.arange(10) + >>> x[2:7] = 1 + +or an array of the right size: :: + + >>> x[2:7] = np.arange(5) + +Note that assignments may result in changes if assigning +higher types to lower types (like floats to ints) or even +exceptions (assigning complex to floats or ints): :: + + >>> x[1] = 1.2 + >>> x[1] + 1 + >>> x[1] = 1.2j + : can't convert complex to long; use + long(abs(z)) + + +Unlike some of the references (such as array and mask indices) +assignments are always made to the original data in the array +(indeed, nothing else would make sense!). Note though, that some +actions may not work as one may naively expect. This particular +example is often surprising to people: :: + + >>> x = np.arange(0, 50, 10) + >>> x + array([ 0, 10, 20, 30, 40]) + >>> x[np.array([1, 1, 3, 1])] += 1 + >>> x + array([ 0, 11, 20, 31, 40]) + +Where people expect that the 1st location will be incremented by 3. +In fact, it will only be incremented by 1. The reason is because +a new array is extracted from the original (as a temporary) containing +the values at 1, 1, 3, 1, then the value 1 is added to the temporary, +and then the temporary is assigned back to the original array. Thus +the value of the array at x[1]+1 is assigned to x[1] three times, +rather than being incremented 3 times. + +Dealing with variable numbers of indices within programs +======================================================== + +The index syntax is very powerful but limiting when dealing with +a variable number of indices. For example, if you want to write +a function that can handle arguments with various numbers of +dimensions without having to write special case code for each +number of possible dimensions, how can that be done? If one +supplies to the index a tuple, the tuple will be interpreted +as a list of indices. For example (using the previous definition +for the array z): :: + + >>> indices = (1,1,1,1) + >>> z[indices] + 40 + +So one can use code to construct tuples of any number of indices +and then use these within an index. + +Slices can be specified within programs by using the slice() function +in Python. For example: :: + + >>> indices = (1,1,1,slice(0,2)) # same as [1,1,1,0:2] + >>> z[indices] + array([39, 40]) + +Likewise, ellipsis can be specified by code by using the Ellipsis +object: :: + + >>> indices = (1, Ellipsis, 1) # same as [1,...,1] + >>> z[indices] + array([[28, 31, 34], + [37, 40, 43], + [46, 49, 52]]) + +For this reason it is possible to use the output from the np.nonzero() +function directly as an index since it always returns a tuple of index +arrays. + +Because the special treatment of tuples, they are not automatically +converted to an array as a list would be. As an example: :: + + >>> z[[1,1,1,1]] # produces a large array + array([[[[27, 28, 29], + [30, 31, 32], ... + >>> z[(1,1,1,1)] # returns a single value + 40 + +""" +from __future__ import division, absolute_import, print_function diff --git a/project/venv/lib/python2.7/site-packages/numpy/doc/indexing.pyc b/project/venv/lib/python2.7/site-packages/numpy/doc/indexing.pyc new file mode 100644 index 0000000..4f469bb Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/doc/indexing.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/doc/internals.py b/project/venv/lib/python2.7/site-packages/numpy/doc/internals.py new file mode 100644 index 0000000..a14fee7 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/doc/internals.py @@ -0,0 +1,163 @@ +""" +=============== +Array Internals +=============== + +Internal organization of numpy arrays +===================================== + +It helps to understand a bit about how numpy arrays are handled under the covers to help understand numpy better. This section will not go into great detail. Those wishing to understand the full details are referred to Travis Oliphant's book "Guide to NumPy". + +NumPy arrays consist of two major components, the raw array data (from now on, +referred to as the data buffer), and the information about the raw array data. +The data buffer is typically what people think of as arrays in C or Fortran, +a contiguous (and fixed) block of memory containing fixed sized data items. +NumPy also contains a significant set of data that describes how to interpret +the data in the data buffer. This extra information contains (among other things): + + 1) The basic data element's size in bytes + 2) The start of the data within the data buffer (an offset relative to the + beginning of the data buffer). + 3) The number of dimensions and the size of each dimension + 4) The separation between elements for each dimension (the 'stride'). This + does not have to be a multiple of the element size + 5) The byte order of the data (which may not be the native byte order) + 6) Whether the buffer is read-only + 7) Information (via the dtype object) about the interpretation of the basic + data element. The basic data element may be as simple as a int or a float, + or it may be a compound object (e.g., struct-like), a fixed character field, + or Python object pointers. + 8) Whether the array is to interpreted as C-order or Fortran-order. + +This arrangement allow for very flexible use of arrays. One thing that it allows +is simple changes of the metadata to change the interpretation of the array buffer. +Changing the byteorder of the array is a simple change involving no rearrangement +of the data. The shape of the array can be changed very easily without changing +anything in the data buffer or any data copying at all + +Among other things that are made possible is one can create a new array metadata +object that uses the same data buffer +to create a new view of that data buffer that has a different interpretation +of the buffer (e.g., different shape, offset, byte order, strides, etc) but +shares the same data bytes. Many operations in numpy do just this such as +slices. Other operations, such as transpose, don't move data elements +around in the array, but rather change the information about the shape and strides so that the indexing of the array changes, but the data in the doesn't move. + +Typically these new versions of the array metadata but the same data buffer are +new 'views' into the data buffer. There is a different ndarray object, but it +uses the same data buffer. This is why it is necessary to force copies through +use of the .copy() method if one really wants to make a new and independent +copy of the data buffer. + +New views into arrays mean the object reference counts for the data buffer +increase. Simply doing away with the original array object will not remove the +data buffer if other views of it still exist. + +Multidimensional Array Indexing Order Issues +============================================ + +What is the right way to index +multi-dimensional arrays? Before you jump to conclusions about the one and +true way to index multi-dimensional arrays, it pays to understand why this is +a confusing issue. This section will try to explain in detail how numpy +indexing works and why we adopt the convention we do for images, and when it +may be appropriate to adopt other conventions. + +The first thing to understand is +that there are two conflicting conventions for indexing 2-dimensional arrays. +Matrix notation uses the first index to indicate which row is being selected and +the second index to indicate which column is selected. This is opposite the +geometrically oriented-convention for images where people generally think the +first index represents x position (i.e., column) and the second represents y +position (i.e., row). This alone is the source of much confusion; +matrix-oriented users and image-oriented users expect two different things with +regard to indexing. + +The second issue to understand is how indices correspond +to the order the array is stored in memory. In Fortran the first index is the +most rapidly varying index when moving through the elements of a two +dimensional array as it is stored in memory. If you adopt the matrix +convention for indexing, then this means the matrix is stored one column at a +time (since the first index moves to the next row as it changes). Thus Fortran +is considered a Column-major language. C has just the opposite convention. In +C, the last index changes most rapidly as one moves through the array as +stored in memory. Thus C is a Row-major language. The matrix is stored by +rows. Note that in both cases it presumes that the matrix convention for +indexing is being used, i.e., for both Fortran and C, the first index is the +row. Note this convention implies that the indexing convention is invariant +and that the data order changes to keep that so. + +But that's not the only way +to look at it. Suppose one has large two-dimensional arrays (images or +matrices) stored in data files. Suppose the data are stored by rows rather than +by columns. If we are to preserve our index convention (whether matrix or +image) that means that depending on the language we use, we may be forced to +reorder the data if it is read into memory to preserve our indexing +convention. For example if we read row-ordered data into memory without +reordering, it will match the matrix indexing convention for C, but not for +Fortran. Conversely, it will match the image indexing convention for Fortran, +but not for C. For C, if one is using data stored in row order, and one wants +to preserve the image index convention, the data must be reordered when +reading into memory. + +In the end, which you do for Fortran or C depends on +which is more important, not reordering data or preserving the indexing +convention. For large images, reordering data is potentially expensive, and +often the indexing convention is inverted to avoid that. + +The situation with +numpy makes this issue yet more complicated. The internal machinery of numpy +arrays is flexible enough to accept any ordering of indices. One can simply +reorder indices by manipulating the internal stride information for arrays +without reordering the data at all. NumPy will know how to map the new index +order to the data without moving the data. + +So if this is true, why not choose +the index order that matches what you most expect? In particular, why not define +row-ordered images to use the image convention? (This is sometimes referred +to as the Fortran convention vs the C convention, thus the 'C' and 'FORTRAN' +order options for array ordering in numpy.) The drawback of doing this is +potential performance penalties. It's common to access the data sequentially, +either implicitly in array operations or explicitly by looping over rows of an +image. When that is done, then the data will be accessed in non-optimal order. +As the first index is incremented, what is actually happening is that elements +spaced far apart in memory are being sequentially accessed, with usually poor +memory access speeds. For example, for a two dimensional image 'im' defined so +that im[0, 10] represents the value at x=0, y=10. To be consistent with usual +Python behavior then im[0] would represent a column at x=0. Yet that data +would be spread over the whole array since the data are stored in row order. +Despite the flexibility of numpy's indexing, it can't really paper over the fact +basic operations are rendered inefficient because of data order or that getting +contiguous subarrays is still awkward (e.g., im[:,0] for the first row, vs +im[0]), thus one can't use an idiom such as for row in im; for col in im does +work, but doesn't yield contiguous column data. + +As it turns out, numpy is +smart enough when dealing with ufuncs to determine which index is the most +rapidly varying one in memory and uses that for the innermost loop. Thus for +ufuncs there is no large intrinsic advantage to either approach in most cases. +On the other hand, use of .flat with an FORTRAN ordered array will lead to +non-optimal memory access as adjacent elements in the flattened array (iterator, +actually) are not contiguous in memory. + +Indeed, the fact is that Python +indexing on lists and other sequences naturally leads to an outside-to inside +ordering (the first index gets the largest grouping, the next the next largest, +and the last gets the smallest element). Since image data are normally stored +by rows, this corresponds to position within rows being the last item indexed. + +If you do want to use Fortran ordering realize that +there are two approaches to consider: 1) accept that the first index is just not +the most rapidly changing in memory and have all your I/O routines reorder +your data when going from memory to disk or visa versa, or use numpy's +mechanism for mapping the first index to the most rapidly varying data. We +recommend the former if possible. The disadvantage of the latter is that many +of numpy's functions will yield arrays without Fortran ordering unless you are +careful to use the 'order' keyword. Doing this would be highly inconvenient. + +Otherwise we recommend simply learning to reverse the usual order of indices +when accessing elements of an array. Granted, it goes against the grain, but +it is more in line with Python semantics and the natural order of the data. + +""" +from __future__ import division, absolute_import, print_function diff --git a/project/venv/lib/python2.7/site-packages/numpy/doc/internals.pyc b/project/venv/lib/python2.7/site-packages/numpy/doc/internals.pyc new file mode 100644 index 0000000..89bebf2 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/doc/internals.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/doc/misc.py b/project/venv/lib/python2.7/site-packages/numpy/doc/misc.py new file mode 100644 index 0000000..a76abe1 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/doc/misc.py @@ -0,0 +1,227 @@ +""" +============= +Miscellaneous +============= + +IEEE 754 Floating Point Special Values +-------------------------------------- + +Special values defined in numpy: nan, inf, + +NaNs can be used as a poor-man's mask (if you don't care what the +original value was) + +Note: cannot use equality to test NaNs. E.g.: :: + + >>> myarr = np.array([1., 0., np.nan, 3.]) + >>> np.nonzero(myarr == np.nan) + (array([], dtype=int64),) + >>> np.nan == np.nan # is always False! Use special numpy functions instead. + False + >>> myarr[myarr == np.nan] = 0. # doesn't work + >>> myarr + array([ 1., 0., NaN, 3.]) + >>> myarr[np.isnan(myarr)] = 0. # use this instead find + >>> myarr + array([ 1., 0., 0., 3.]) + +Other related special value functions: :: + + isinf(): True if value is inf + isfinite(): True if not nan or inf + nan_to_num(): Map nan to 0, inf to max float, -inf to min float + +The following corresponds to the usual functions except that nans are excluded +from the results: :: + + nansum() + nanmax() + nanmin() + nanargmax() + nanargmin() + + >>> x = np.arange(10.) + >>> x[3] = np.nan + >>> x.sum() + nan + >>> np.nansum(x) + 42.0 + +How numpy handles numerical exceptions +-------------------------------------- + +The default is to ``'warn'`` for ``invalid``, ``divide``, and ``overflow`` +and ``'ignore'`` for ``underflow``. But this can be changed, and it can be +set individually for different kinds of exceptions. The different behaviors +are: + + - 'ignore' : Take no action when the exception occurs. + - 'warn' : Print a `RuntimeWarning` (via the Python `warnings` module). + - 'raise' : Raise a `FloatingPointError`. + - 'call' : Call a function specified using the `seterrcall` function. + - 'print' : Print a warning directly to ``stdout``. + - 'log' : Record error in a Log object specified by `seterrcall`. + +These behaviors can be set for all kinds of errors or specific ones: + + - all : apply to all numeric exceptions + - invalid : when NaNs are generated + - divide : divide by zero (for integers as well!) + - overflow : floating point overflows + - underflow : floating point underflows + +Note that integer divide-by-zero is handled by the same machinery. +These behaviors are set on a per-thread basis. + +Examples +-------- + +:: + + >>> oldsettings = np.seterr(all='warn') + >>> np.zeros(5,dtype=np.float32)/0. + invalid value encountered in divide + >>> j = np.seterr(under='ignore') + >>> np.array([1.e-100])**10 + >>> j = np.seterr(invalid='raise') + >>> np.sqrt(np.array([-1.])) + FloatingPointError: invalid value encountered in sqrt + >>> def errorhandler(errstr, errflag): + ... print("saw stupid error!") + >>> np.seterrcall(errorhandler) + + >>> j = np.seterr(all='call') + >>> np.zeros(5, dtype=np.int32)/0 + FloatingPointError: invalid value encountered in divide + saw stupid error! + >>> j = np.seterr(**oldsettings) # restore previous + ... # error-handling settings + +Interfacing to C +---------------- +Only a survey of the choices. Little detail on how each works. + +1) Bare metal, wrap your own C-code manually. + + - Plusses: + + - Efficient + - No dependencies on other tools + + - Minuses: + + - Lots of learning overhead: + + - need to learn basics of Python C API + - need to learn basics of numpy C API + - need to learn how to handle reference counting and love it. + + - Reference counting often difficult to get right. + + - getting it wrong leads to memory leaks, and worse, segfaults + + - API will change for Python 3.0! + +2) Cython + + - Plusses: + + - avoid learning C API's + - no dealing with reference counting + - can code in pseudo python and generate C code + - can also interface to existing C code + - should shield you from changes to Python C api + - has become the de-facto standard within the scientific Python community + - fast indexing support for arrays + + - Minuses: + + - Can write code in non-standard form which may become obsolete + - Not as flexible as manual wrapping + +3) ctypes + + - Plusses: + + - part of Python standard library + - good for interfacing to existing sharable libraries, particularly + Windows DLLs + - avoids API/reference counting issues + - good numpy support: arrays have all these in their ctypes + attribute: :: + + a.ctypes.data a.ctypes.get_strides + a.ctypes.data_as a.ctypes.shape + a.ctypes.get_as_parameter a.ctypes.shape_as + a.ctypes.get_data a.ctypes.strides + a.ctypes.get_shape a.ctypes.strides_as + + - Minuses: + + - can't use for writing code to be turned into C extensions, only a wrapper + tool. + +4) SWIG (automatic wrapper generator) + + - Plusses: + + - around a long time + - multiple scripting language support + - C++ support + - Good for wrapping large (many functions) existing C libraries + + - Minuses: + + - generates lots of code between Python and the C code + - can cause performance problems that are nearly impossible to optimize + out + - interface files can be hard to write + - doesn't necessarily avoid reference counting issues or needing to know + API's + +5) scipy.weave + + - Plusses: + + - can turn many numpy expressions into C code + - dynamic compiling and loading of generated C code + - can embed pure C code in Python module and have weave extract, generate + interfaces and compile, etc. + + - Minuses: + + - Future very uncertain: it's the only part of Scipy not ported to Python 3 + and is effectively deprecated in favor of Cython. + +6) Psyco + + - Plusses: + + - Turns pure python into efficient machine code through jit-like + optimizations + - very fast when it optimizes well + + - Minuses: + + - Only on intel (windows?) + - Doesn't do much for numpy? + +Interfacing to Fortran: +----------------------- +The clear choice to wrap Fortran code is +`f2py `_. + +Pyfort is an older alternative, but not supported any longer. +Fwrap is a newer project that looked promising but isn't being developed any +longer. + +Interfacing to C++: +------------------- + 1) Cython + 2) CXX + 3) Boost.python + 4) SWIG + 5) SIP (used mainly in PyQT) + +""" +from __future__ import division, absolute_import, print_function diff --git a/project/venv/lib/python2.7/site-packages/numpy/doc/misc.pyc b/project/venv/lib/python2.7/site-packages/numpy/doc/misc.pyc new file mode 100644 index 0000000..6b1b8f9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/doc/misc.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/doc/structured_arrays.py b/project/venv/lib/python2.7/site-packages/numpy/doc/structured_arrays.py new file mode 100644 index 0000000..e92a061 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/doc/structured_arrays.py @@ -0,0 +1,639 @@ +""" +================= +Structured Arrays +================= + +Introduction +============ + +Structured arrays are ndarrays whose datatype is a composition of simpler +datatypes organized as a sequence of named :term:`fields `. For example, +:: + + >>> x = np.array([('Rex', 9, 81.0), ('Fido', 3, 27.0)], + ... dtype=[('name', 'U10'), ('age', 'i4'), ('weight', 'f4')]) + >>> x + array([('Rex', 9, 81.0), ('Fido', 3, 27.0)], + dtype=[('name', 'S10'), ('age', '>> x[1] + ('Fido', 3, 27.0) + +You can access and modify individual fields of a structured array by indexing +with the field name:: + + >>> x['age'] + array([9, 3], dtype=int32) + >>> x['age'] = 5 + >>> x + array([('Rex', 5, 81.0), ('Fido', 5, 27.0)], + dtype=[('name', 'S10'), ('age', '` reference page, and in +summary they are: + +1. A list of tuples, one tuple per field + + Each tuple has the form ``(fieldname, datatype, shape)`` where shape is + optional. ``fieldname`` is a string (or tuple if titles are used, see + :ref:`Field Titles ` below), ``datatype`` may be any object + convertible to a datatype, and ``shape`` is a tuple of integers specifying + subarray shape. + + >>> np.dtype([('x', 'f4'), ('y', np.float32), ('z', 'f4', (2,2))]) + dtype=[('x', '>> np.dtype([('x', 'f4'),('', 'i4'),('z', 'i8')]) + dtype([('x', '` may be used in a string and separated by + commas. The itemsize and byte offsets of the fields are determined + automatically, and the field names are given the default names ``f0``, + ``f1``, etc. :: + + >>> np.dtype('i8,f4,S3') + dtype([('f0', '>> np.dtype('3int8, float32, (2,3)float64') + dtype([('f0', 'i1', 3), ('f1', '>> np.dtype({'names': ['col1', 'col2'], 'formats': ['i4','f4']}) + dtype([('col1', '>> np.dtype({'names': ['col1', 'col2'], + ... 'formats': ['i4','f4'], + ... 'offsets': [0, 4], + ... 'itemsize': 12}) + dtype({'names':['col1','col2'], 'formats':['` below. + +4. A dictionary of field names + + The use of this form of specification is discouraged, but documented here + because older numpy code may use it. The keys of the dictionary are the + field names and the values are tuples specifying type and offset:: + + >>> np.dtype=({'col1': ('i1',0), 'col2': ('f4',1)}) + dtype([(('col1'), 'i1'), (('col2'), '>f4')]) + + This form is discouraged because Python dictionaries do not preserve order + in Python versions before Python 3.6, and the order of the fields in a + structured dtype has meaning. :ref:`Field Titles ` may be + specified by using a 3-tuple, see below. + +Manipulating and Displaying Structured Datatypes +------------------------------------------------ + +The list of field names of a structured datatype can be found in the ``names`` +attribute of the dtype object:: + + >>> d = np.dtype([('x', 'i8'), ('y', 'f4')]) + >>> d.names + ('x', 'y') + +The field names may be modified by assigning to the ``names`` attribute using a +sequence of strings of the same length. + +The dtype object also has a dictionary-like attribute, ``fields``, whose keys +are the field names (and :ref:`Field Titles `, see below) and whose +values are tuples containing the dtype and byte offset of each field. :: + + >>> d.fields + mappingproxy({'x': (dtype('int64'), 0), 'y': (dtype('float32'), 8)}) + +Both the ``names`` and ``fields`` attributes will equal ``None`` for +unstructured arrays. The recommended way to test if a dtype is structured is +with `if dt.names is not None` rather than `if dt.names`, to account for dtypes +with 0 fields. + +The string representation of a structured datatype is shown in the "list of +tuples" form if possible, otherwise numpy falls back to using the more general +dictionary form. + +.. _offsets-and-alignment: + +Automatic Byte Offsets and Alignment +------------------------------------ + +Numpy uses one of two methods to automatically determine the field byte offsets +and the overall itemsize of a structured datatype, depending on whether +``align=True`` was specified as a keyword argument to :func:`numpy.dtype`. + +By default (``align=False``), numpy will pack the fields together such that +each field starts at the byte offset the previous field ended, and the fields +are contiguous in memory. :: + + >>> def print_offsets(d): + ... print("offsets:", [d.fields[name][1] for name in d.names]) + ... print("itemsize:", d.itemsize) + >>> print_offsets(np.dtype('u1,u1,i4,u1,i8,u2')) + offsets: [0, 1, 2, 6, 7, 15] + itemsize: 17 + +If ``align=True`` is set, numpy will pad the structure in the same way many C +compilers would pad a C-struct. Aligned structures can give a performance +improvement in some cases, at the cost of increased datatype size. Padding +bytes are inserted between fields such that each field's byte offset will be a +multiple of that field's alignment, which is usually equal to the field's size +in bytes for simple datatypes, see :c:member:`PyArray_Descr.alignment`. The +structure will also have trailing padding added so that its itemsize is a +multiple of the largest field's alignment. :: + + >>> print_offsets(np.dtype('u1,u1,i4,u1,i8,u2', align=True)) + offsets: [0, 1, 4, 8, 16, 24] + itemsize: 32 + +Note that although almost all modern C compilers pad in this way by default, +padding in C structs is C-implementation-dependent so this memory layout is not +guaranteed to exactly match that of a corresponding struct in a C program. Some +work may be needed, either on the numpy side or the C side, to obtain exact +correspondence. + +If offsets were specified using the optional ``offsets`` key in the +dictionary-based dtype specification, setting ``align=True`` will check that +each field's offset is a multiple of its size and that the itemsize is a +multiple of the largest field size, and raise an exception if not. + +If the offsets of the fields and itemsize of a structured array satisfy the +alignment conditions, the array will have the ``ALIGNED`` :ref:`flag +` set. + +A convenience function :func:`numpy.lib.recfunctions.repack_fields` converts an +aligned dtype or array to a packed one and vice versa. It takes either a dtype +or structured ndarray as an argument, and returns a copy with fields re-packed, +with or without padding bytes. + +.. _titles: + +Field Titles +------------ + +In addition to field names, fields may also have an associated :term:`title`, +an alternate name, which is sometimes used as an additional description or +alias for the field. The title may be used to index an array, just like a +field name. + +To add titles when using the list-of-tuples form of dtype specification, the +field name may be specified as a tuple of two strings instead of a single +string, which will be the field's title and field name respectively. For +example:: + + >>> np.dtype([(('my title', 'name'), 'f4')]) + +When using the first form of dictionary-based specification, the titles may be +supplied as an extra ``'titles'`` key as described above. When using the second +(discouraged) dictionary-based specification, the title can be supplied by +providing a 3-element tuple ``(datatype, offset, title)`` instead of the usual +2-element tuple:: + + >>> np.dtype({'name': ('i4', 0, 'my title')}) + +The ``dtype.fields`` dictionary will contain :term:`titles` as keys, if any +titles are used. This means effectively that a field with a title will be +represented twice in the fields dictionary. The tuple values for these fields +will also have a third element, the field title. Because of this, and because +the ``names`` attribute preserves the field order while the ``fields`` +attribute may not, it is recommended to iterate through the fields of a dtype +using the ``names`` attribute of the dtype, which will not list titles, as +in:: + + >>> for name in d.names: + ... print(d.fields[name][:2]) + +Union types +----------- + +Structured datatypes are implemented in numpy to have base type +:class:`numpy.void` by default, but it is possible to interpret other numpy +types as structured types using the ``(base_dtype, dtype)`` form of dtype +specification described in +:ref:`Data Type Objects `. Here, ``base_dtype`` is +the desired underlying dtype, and fields and flags will be copied from +``dtype``. This dtype is similar to a 'union' in C. + +Indexing and Assignment to Structured arrays +============================================ + +Assigning data to a Structured Array +------------------------------------ + +There are a number of ways to assign values to a structured array: Using python +tuples, using scalar values, or using other structured arrays. + +Assignment from Python Native Types (Tuples) +```````````````````````````````````````````` + +The simplest way to assign values to a structured array is using python tuples. +Each assigned value should be a tuple of length equal to the number of fields +in the array, and not a list or array as these will trigger numpy's +broadcasting rules. The tuple's elements are assigned to the successive fields +of the array, from left to right:: + + >>> x = np.array([(1,2,3),(4,5,6)], dtype='i8,f4,f8') + >>> x[1] = (7,8,9) + >>> x + array([(1, 2., 3.), (7, 8., 9.)], + dtype=[('f0', '>> x = np.zeros(2, dtype='i8,f4,?,S1') + >>> x[:] = 3 + >>> x + array([(3, 3.0, True, b'3'), (3, 3.0, True, b'3')], + dtype=[('f0', '>> x[:] = np.arange(2) + >>> x + array([(0, 0.0, False, b'0'), (1, 1.0, True, b'1')], + dtype=[('f0', '>> twofield = np.zeros(2, dtype=[('A', 'i4'), ('B', 'i4')]) + >>> onefield = np.zeros(2, dtype=[('A', 'i4')]) + >>> nostruct = np.zeros(2, dtype='i4') + >>> nostruct[:] = twofield + ValueError: Can't cast from structure to non-structure, except if the structure only has a single field. + >>> nostruct[:] = onefield + >>> nostruct + array([0, 0], dtype=int32) + +Assignment from other Structured Arrays +``````````````````````````````````````` + +Assignment between two structured arrays occurs as if the source elements had +been converted to tuples and then assigned to the destination elements. That +is, the first field of the source array is assigned to the first field of the +destination array, and the second field likewise, and so on, regardless of +field names. Structured arrays with a different number of fields cannot be +assigned to each other. Bytes of the destination structure which are not +included in any of the fields are unaffected. :: + + >>> a = np.zeros(3, dtype=[('a', 'i8'), ('b', 'f4'), ('c', 'S3')]) + >>> b = np.ones(3, dtype=[('x', 'f4'), ('y', 'S3'), ('z', 'O')]) + >>> b[:] = a + >>> b + array([(0.0, b'0.0', b''), (0.0, b'0.0', b''), (0.0, b'0.0', b'')], + dtype=[('x', '>> x = np.array([(1,2),(3,4)], dtype=[('foo', 'i8'), ('bar', 'f4')]) + >>> x['foo'] + array([1, 3]) + >>> x['foo'] = 10 + >>> x + array([(10, 2.), (10, 4.)], + dtype=[('foo', '>> y = x['bar'] + >>> y[:] = 10 + >>> x + array([(10, 5.), (10, 5.)], + dtype=[('foo', '>> y.dtype, y.shape, y.strides + (dtype('float32'), (2,), (12,)) + +If the accessed field is a subarray, the dimensions of the subarray +are appended to the shape of the result:: + + >>> x = np.zeros((2,2), dtype=[('a', np.int32), ('b', np.float64, (3,3))]) + >>> x['a'].shape + (2, 2) + >>> x['b'].shape + (2, 2, 3, 3) + +Accessing Multiple Fields +``````````````````````````` + +One can index and assign to a structured array with a multi-field index, where +the index is a list of field names. + +.. warning:: + The behavior of multi-field indexes changed from Numpy 1.15 to Numpy 1.16. + +The result of indexing with a multi-field index is a view into the original +array, as follows:: + + >>> a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'i4'), ('c', 'f4')]) + >>> a[['a', 'c']] + array([(0, 0.), (0, 0.), (0, 0.)], + dtype={'names':['a','c'], 'formats':['>> a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'i4'), ('c', 'f4')]) + >>> a[['a','c']].view('i8') # Fails in Numpy 1.16 + ValueError: When changing to a smaller dtype, its size must be a divisor of the size of original dtype + + will need to be changed. This code has raised a ``FutureWarning`` since + Numpy 1.12, and similar code has raised ``FutureWarning`` since 1.7. + + In 1.16 a number of functions have been introduced in the + :module:`numpy.lib.recfunctions` module to help users account for this + change. These are + :func:`numpy.lib.recfunctions.repack_fields`. + :func:`numpy.lib.recfunctions.structured_to_unstructured`, + :func:`numpy.lib.recfunctions.unstructured_to_structured`, + :func:`numpy.lib.recfunctions.apply_along_fields`, + :func:`numpy.lib.recfunctions.assign_fields_by_name`, and + :func:`numpy.lib.recfunctions.require_fields`. + + The function :func:`numpy.lib.recfunctions.repack_fields` can always be + used to reproduce the old behavior, as it will return a packed copy of the + structured array. The code above, for example, can be replaced with: + + >>> repack_fields(a[['a','c']]).view('i8') # supported in 1.16 + array([0, 0, 0]) + + Furthermore, numpy now provides a new function + :func:`numpy.lib.recfunctions.structured_to_unstructured` which is a safer + and more efficient alternative for users who wish to convert structured + arrays to unstructured arrays, as the view above is often indeded to do. + This function allows safe conversion to an unstructured type taking into + account padding, often avoids a copy, and also casts the datatypes + as needed, unlike the view. Code such as: + + >>> a = np.zeros(3, dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')]) + >>> a[['x', 'z']].view('f4') + + can be made safer by replacing with: + + >>> structured_to_unstructured(a[['x', 'z']]) + array([0, 0, 0]) + + +Assignment to an array with a multi-field index modifies the original array:: + + >>> a[['a', 'c']] = (2, 3) + >>> a + array([(2, 0, 3.0), (2, 0, 3.0), (2, 0, 3.0)], + dtype=[('a', '>> a[['a', 'c']] = a[['c', 'a']] + +Indexing with an Integer to get a Structured Scalar +``````````````````````````````````````````````````` + +Indexing a single element of a structured array (with an integer index) returns +a structured scalar:: + + >>> x = np.array([(1, 2., 3.)], dtype='i,f,f') + >>> scalar = x[0] + >>> scalar + (1, 2., 3.) + >>> type(scalar) + numpy.void + +Unlike other numpy scalars, structured scalars are mutable and act like views +into the original array, such that modifying the scalar will modify the +original array. Structured scalars also support access and assignment by field +name:: + + >>> x = np.array([(1,2),(3,4)], dtype=[('foo', 'i8'), ('bar', 'f4')]) + >>> s = x[0] + >>> s['bar'] = 100 + >>> x + array([(1, 100.), (3, 4.)], + dtype=[('foo', '>> scalar = np.array([(1, 2., 3.)], dtype='i,f,f')[0] + >>> scalar[0] + 1 + >>> scalar[1] = 4 + +Thus, tuples might be thought of as the native Python equivalent to numpy's +structured types, much like native python integers are the equivalent to +numpy's integer types. Structured scalars may be converted to a tuple by +calling :func:`ndarray.item`:: + + >>> scalar.item(), type(scalar.item()) + ((1, 2.0, 3.0), tuple) + +Viewing Structured Arrays Containing Objects +-------------------------------------------- + +In order to prevent clobbering object pointers in fields of +:class:`numpy.object` type, numpy currently does not allow views of structured +arrays containing objects. + +Structure Comparison +-------------------- + +If the dtypes of two void structured arrays are equal, testing the equality of +the arrays will result in a boolean array with the dimensions of the original +arrays, with elements set to ``True`` where all fields of the corresponding +structures are equal. Structured dtypes are equal if the field names, +dtypes and titles are the same, ignoring endianness, and the fields are in +the same order:: + + >>> a = np.zeros(2, dtype=[('a', 'i4'), ('b', 'i4')]) + >>> b = np.ones(2, dtype=[('a', 'i4'), ('b', 'i4')]) + >>> a == b + array([False, False]) + +Currently, if the dtypes of two void structured arrays are not equivalent the +comparison fails, returning the scalar value ``False``. This behavior is +deprecated as of numpy 1.10 and will raise an error or perform elementwise +comparison in the future. + +The ``<`` and ``>`` operators always return ``False`` when comparing void +structured arrays, and arithmetic and bitwise operations are not supported. + +Record Arrays +============= + +As an optional convenience numpy provides an ndarray subclass, +:class:`numpy.recarray`, and associated helper functions in the +:mod:`numpy.rec` submodule, that allows access to fields of structured arrays +by attribute instead of only by index. Record arrays also use a special +datatype, :class:`numpy.record`, that allows field access by attribute on the +structured scalars obtained from the array. + +The simplest way to create a record array is with :func:`numpy.rec.array`:: + + >>> recordarr = np.rec.array([(1,2.,'Hello'),(2,3.,"World")], + ... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'S10')]) + >>> recordarr.bar + array([ 2., 3.], dtype=float32) + >>> recordarr[1:2] + rec.array([(2, 3.0, 'World')], + dtype=[('foo', '>> recordarr[1:2].foo + array([2], dtype=int32) + >>> recordarr.foo[1:2] + array([2], dtype=int32) + >>> recordarr[1].baz + 'World' + +:func:`numpy.rec.array` can convert a wide variety of arguments into record +arrays, including structured arrays:: + + >>> arr = array([(1,2.,'Hello'),(2,3.,"World")], + ... dtype=[('foo', 'i4'), ('bar', 'f4'), ('baz', 'S10')]) + >>> recordarr = np.rec.array(arr) + +The :mod:`numpy.rec` module provides a number of other convenience functions for +creating record arrays, see :ref:`record array creation routines +`. + +A record array representation of a structured array can be obtained using the +appropriate :ref:`view`:: + + >>> arr = np.array([(1,2.,'Hello'),(2,3.,"World")], + ... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'a10')]) + >>> recordarr = arr.view(dtype=dtype((np.record, arr.dtype)), + ... type=np.recarray) + +For convenience, viewing an ndarray as type :class:`np.recarray` will +automatically convert to :class:`np.record` datatype, so the dtype can be left +out of the view:: + + >>> recordarr = arr.view(np.recarray) + >>> recordarr.dtype + dtype((numpy.record, [('foo', '>> arr2 = recordarr.view(recordarr.dtype.fields or recordarr.dtype, np.ndarray) + +Record array fields accessed by index or by attribute are returned as a record +array if the field has a structured type but as a plain ndarray otherwise. :: + + >>> recordarr = np.rec.array([('Hello', (1,2)),("World", (3,4))], + ... dtype=[('foo', 'S6'),('bar', [('A', int), ('B', int)])]) + >>> type(recordarr.foo) + + >>> type(recordarr.bar) + + +Note that if a field has the same name as an ndarray attribute, the ndarray +attribute takes precedence. Such fields will be inaccessible by attribute but +will still be accessible by index. + +""" +from __future__ import division, absolute_import, print_function diff --git a/project/venv/lib/python2.7/site-packages/numpy/doc/structured_arrays.pyc b/project/venv/lib/python2.7/site-packages/numpy/doc/structured_arrays.pyc new file mode 100644 index 0000000..995cbd1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/doc/structured_arrays.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/doc/subclassing.py b/project/venv/lib/python2.7/site-packages/numpy/doc/subclassing.py new file mode 100644 index 0000000..4b98389 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/doc/subclassing.py @@ -0,0 +1,752 @@ +"""============================= +Subclassing ndarray in python +============================= + +Introduction +------------ + +Subclassing ndarray is relatively simple, but it has some complications +compared to other Python objects. On this page we explain the machinery +that allows you to subclass ndarray, and the implications for +implementing a subclass. + +ndarrays and object creation +============================ + +Subclassing ndarray is complicated by the fact that new instances of +ndarray classes can come about in three different ways. These are: + +#. Explicit constructor call - as in ``MySubClass(params)``. This is + the usual route to Python instance creation. +#. View casting - casting an existing ndarray as a given subclass +#. New from template - creating a new instance from a template + instance. Examples include returning slices from a subclassed array, + creating return types from ufuncs, and copying arrays. See + :ref:`new-from-template` for more details + +The last two are characteristics of ndarrays - in order to support +things like array slicing. The complications of subclassing ndarray are +due to the mechanisms numpy has to support these latter two routes of +instance creation. + +.. _view-casting: + +View casting +------------ + +*View casting* is the standard ndarray mechanism by which you take an +ndarray of any subclass, and return a view of the array as another +(specified) subclass: + +>>> import numpy as np +>>> # create a completely useless ndarray subclass +>>> class C(np.ndarray): pass +>>> # create a standard ndarray +>>> arr = np.zeros((3,)) +>>> # take a view of it, as our useless subclass +>>> c_arr = arr.view(C) +>>> type(c_arr) + + +.. _new-from-template: + +Creating new from template +-------------------------- + +New instances of an ndarray subclass can also come about by a very +similar mechanism to :ref:`view-casting`, when numpy finds it needs to +create a new instance from a template instance. The most obvious place +this has to happen is when you are taking slices of subclassed arrays. +For example: + +>>> v = c_arr[1:] +>>> type(v) # the view is of type 'C' + +>>> v is c_arr # but it's a new instance +False + +The slice is a *view* onto the original ``c_arr`` data. So, when we +take a view from the ndarray, we return a new ndarray, of the same +class, that points to the data in the original. + +There are other points in the use of ndarrays where we need such views, +such as copying arrays (``c_arr.copy()``), creating ufunc output arrays +(see also :ref:`array-wrap`), and reducing methods (like +``c_arr.mean()``. + +Relationship of view casting and new-from-template +-------------------------------------------------- + +These paths both use the same machinery. We make the distinction here, +because they result in different input to your methods. Specifically, +:ref:`view-casting` means you have created a new instance of your array +type from any potential subclass of ndarray. :ref:`new-from-template` +means you have created a new instance of your class from a pre-existing +instance, allowing you - for example - to copy across attributes that +are particular to your subclass. + +Implications for subclassing +---------------------------- + +If we subclass ndarray, we need to deal not only with explicit +construction of our array type, but also :ref:`view-casting` or +:ref:`new-from-template`. NumPy has the machinery to do this, and this +machinery that makes subclassing slightly non-standard. + +There are two aspects to the machinery that ndarray uses to support +views and new-from-template in subclasses. + +The first is the use of the ``ndarray.__new__`` method for the main work +of object initialization, rather then the more usual ``__init__`` +method. The second is the use of the ``__array_finalize__`` method to +allow subclasses to clean up after the creation of views and new +instances from templates. + +A brief Python primer on ``__new__`` and ``__init__`` +===================================================== + +``__new__`` is a standard Python method, and, if present, is called +before ``__init__`` when we create a class instance. See the `python +__new__ documentation +`_ for more detail. + +For example, consider the following Python code: + +.. testcode:: + + class C(object): + def __new__(cls, *args): + print('Cls in __new__:', cls) + print('Args in __new__:', args) + return object.__new__(cls, *args) + + def __init__(self, *args): + print('type(self) in __init__:', type(self)) + print('Args in __init__:', args) + +meaning that we get: + +>>> c = C('hello') +Cls in __new__: +Args in __new__: ('hello',) +type(self) in __init__: +Args in __init__: ('hello',) + +When we call ``C('hello')``, the ``__new__`` method gets its own class +as first argument, and the passed argument, which is the string +``'hello'``. After python calls ``__new__``, it usually (see below) +calls our ``__init__`` method, with the output of ``__new__`` as the +first argument (now a class instance), and the passed arguments +following. + +As you can see, the object can be initialized in the ``__new__`` +method or the ``__init__`` method, or both, and in fact ndarray does +not have an ``__init__`` method, because all the initialization is +done in the ``__new__`` method. + +Why use ``__new__`` rather than just the usual ``__init__``? Because +in some cases, as for ndarray, we want to be able to return an object +of some other class. Consider the following: + +.. testcode:: + + class D(C): + def __new__(cls, *args): + print('D cls is:', cls) + print('D args in __new__:', args) + return C.__new__(C, *args) + + def __init__(self, *args): + # we never get here + print('In D __init__') + +meaning that: + +>>> obj = D('hello') +D cls is: +D args in __new__: ('hello',) +Cls in __new__: +Args in __new__: ('hello',) +>>> type(obj) + + +The definition of ``C`` is the same as before, but for ``D``, the +``__new__`` method returns an instance of class ``C`` rather than +``D``. Note that the ``__init__`` method of ``D`` does not get +called. In general, when the ``__new__`` method returns an object of +class other than the class in which it is defined, the ``__init__`` +method of that class is not called. + +This is how subclasses of the ndarray class are able to return views +that preserve the class type. When taking a view, the standard +ndarray machinery creates the new ndarray object with something +like:: + + obj = ndarray.__new__(subtype, shape, ... + +where ``subdtype`` is the subclass. Thus the returned view is of the +same class as the subclass, rather than being of class ``ndarray``. + +That solves the problem of returning views of the same type, but now +we have a new problem. The machinery of ndarray can set the class +this way, in its standard methods for taking views, but the ndarray +``__new__`` method knows nothing of what we have done in our own +``__new__`` method in order to set attributes, and so on. (Aside - +why not call ``obj = subdtype.__new__(...`` then? Because we may not +have a ``__new__`` method with the same call signature). + +The role of ``__array_finalize__`` +================================== + +``__array_finalize__`` is the mechanism that numpy provides to allow +subclasses to handle the various ways that new instances get created. + +Remember that subclass instances can come about in these three ways: + +#. explicit constructor call (``obj = MySubClass(params)``). This will + call the usual sequence of ``MySubClass.__new__`` then (if it exists) + ``MySubClass.__init__``. +#. :ref:`view-casting` +#. :ref:`new-from-template` + +Our ``MySubClass.__new__`` method only gets called in the case of the +explicit constructor call, so we can't rely on ``MySubClass.__new__`` or +``MySubClass.__init__`` to deal with the view casting and +new-from-template. It turns out that ``MySubClass.__array_finalize__`` +*does* get called for all three methods of object creation, so this is +where our object creation housekeeping usually goes. + +* For the explicit constructor call, our subclass will need to create a + new ndarray instance of its own class. In practice this means that + we, the authors of the code, will need to make a call to + ``ndarray.__new__(MySubClass,...)``, a class-hierarchy prepared call to + ``super(MySubClass, cls).__new__(cls, ...)``, or do view casting of an + existing array (see below) +* For view casting and new-from-template, the equivalent of + ``ndarray.__new__(MySubClass,...`` is called, at the C level. + +The arguments that ``__array_finalize__`` receives differ for the three +methods of instance creation above. + +The following code allows us to look at the call sequences and arguments: + +.. testcode:: + + import numpy as np + + class C(np.ndarray): + def __new__(cls, *args, **kwargs): + print('In __new__ with class %s' % cls) + return super(C, cls).__new__(cls, *args, **kwargs) + + def __init__(self, *args, **kwargs): + # in practice you probably will not need or want an __init__ + # method for your subclass + print('In __init__ with class %s' % self.__class__) + + def __array_finalize__(self, obj): + print('In array_finalize:') + print(' self type is %s' % type(self)) + print(' obj type is %s' % type(obj)) + + +Now: + +>>> # Explicit constructor +>>> c = C((10,)) +In __new__ with class +In array_finalize: + self type is + obj type is +In __init__ with class +>>> # View casting +>>> a = np.arange(10) +>>> cast_a = a.view(C) +In array_finalize: + self type is + obj type is +>>> # Slicing (example of new-from-template) +>>> cv = c[:1] +In array_finalize: + self type is + obj type is + +The signature of ``__array_finalize__`` is:: + + def __array_finalize__(self, obj): + +One sees that the ``super`` call, which goes to +``ndarray.__new__``, passes ``__array_finalize__`` the new object, of our +own class (``self``) as well as the object from which the view has been +taken (``obj``). As you can see from the output above, the ``self`` is +always a newly created instance of our subclass, and the type of ``obj`` +differs for the three instance creation methods: + +* When called from the explicit constructor, ``obj`` is ``None`` +* When called from view casting, ``obj`` can be an instance of any + subclass of ndarray, including our own. +* When called in new-from-template, ``obj`` is another instance of our + own subclass, that we might use to update the new ``self`` instance. + +Because ``__array_finalize__`` is the only method that always sees new +instances being created, it is the sensible place to fill in instance +defaults for new object attributes, among other tasks. + +This may be clearer with an example. + +Simple example - adding an extra attribute to ndarray +----------------------------------------------------- + +.. testcode:: + + import numpy as np + + class InfoArray(np.ndarray): + + def __new__(subtype, shape, dtype=float, buffer=None, offset=0, + strides=None, order=None, info=None): + # Create the ndarray instance of our type, given the usual + # ndarray input arguments. This will call the standard + # ndarray constructor, but return an object of our type. + # It also triggers a call to InfoArray.__array_finalize__ + obj = super(InfoArray, subtype).__new__(subtype, shape, dtype, + buffer, offset, strides, + order) + # set the new 'info' attribute to the value passed + obj.info = info + # Finally, we must return the newly created object: + return obj + + def __array_finalize__(self, obj): + # ``self`` is a new object resulting from + # ndarray.__new__(InfoArray, ...), therefore it only has + # attributes that the ndarray.__new__ constructor gave it - + # i.e. those of a standard ndarray. + # + # We could have got to the ndarray.__new__ call in 3 ways: + # From an explicit constructor - e.g. InfoArray(): + # obj is None + # (we're in the middle of the InfoArray.__new__ + # constructor, and self.info will be set when we return to + # InfoArray.__new__) + if obj is None: return + # From view casting - e.g arr.view(InfoArray): + # obj is arr + # (type(obj) can be InfoArray) + # From new-from-template - e.g infoarr[:3] + # type(obj) is InfoArray + # + # Note that it is here, rather than in the __new__ method, + # that we set the default value for 'info', because this + # method sees all creation of default objects - with the + # InfoArray.__new__ constructor, but also with + # arr.view(InfoArray). + self.info = getattr(obj, 'info', None) + # We do not need to return anything + + +Using the object looks like this: + + >>> obj = InfoArray(shape=(3,)) # explicit constructor + >>> type(obj) + + >>> obj.info is None + True + >>> obj = InfoArray(shape=(3,), info='information') + >>> obj.info + 'information' + >>> v = obj[1:] # new-from-template - here - slicing + >>> type(v) + + >>> v.info + 'information' + >>> arr = np.arange(10) + >>> cast_arr = arr.view(InfoArray) # view casting + >>> type(cast_arr) + + >>> cast_arr.info is None + True + +This class isn't very useful, because it has the same constructor as the +bare ndarray object, including passing in buffers and shapes and so on. +We would probably prefer the constructor to be able to take an already +formed ndarray from the usual numpy calls to ``np.array`` and return an +object. + +Slightly more realistic example - attribute added to existing array +------------------------------------------------------------------- + +Here is a class that takes a standard ndarray that already exists, casts +as our type, and adds an extra attribute. + +.. testcode:: + + import numpy as np + + class RealisticInfoArray(np.ndarray): + + def __new__(cls, input_array, info=None): + # Input array is an already formed ndarray instance + # We first cast to be our class type + obj = np.asarray(input_array).view(cls) + # add the new attribute to the created instance + obj.info = info + # Finally, we must return the newly created object: + return obj + + def __array_finalize__(self, obj): + # see InfoArray.__array_finalize__ for comments + if obj is None: return + self.info = getattr(obj, 'info', None) + + +So: + + >>> arr = np.arange(5) + >>> obj = RealisticInfoArray(arr, info='information') + >>> type(obj) + + >>> obj.info + 'information' + >>> v = obj[1:] + >>> type(v) + + >>> v.info + 'information' + +.. _array-ufunc: + +``__array_ufunc__`` for ufuncs +------------------------------ + + .. versionadded:: 1.13 + +A subclass can override what happens when executing numpy ufuncs on it by +overriding the default ``ndarray.__array_ufunc__`` method. This method is +executed *instead* of the ufunc and should return either the result of the +operation, or :obj:`NotImplemented` if the operation requested is not +implemented. + +The signature of ``__array_ufunc__`` is:: + + def __array_ufunc__(ufunc, method, *inputs, **kwargs): + + - *ufunc* is the ufunc object that was called. + - *method* is a string indicating how the Ufunc was called, either + ``"__call__"`` to indicate it was called directly, or one of its + :ref:`methods`: ``"reduce"``, ``"accumulate"``, + ``"reduceat"``, ``"outer"``, or ``"at"``. + - *inputs* is a tuple of the input arguments to the ``ufunc`` + - *kwargs* contains any optional or keyword arguments passed to the + function. This includes any ``out`` arguments, which are always + contained in a tuple. + +A typical implementation would convert any inputs or outputs that are +instances of one's own class, pass everything on to a superclass using +``super()``, and finally return the results after possible +back-conversion. An example, taken from the test case +``test_ufunc_override_with_super`` in ``core/tests/test_umath.py``, is the +following. + +.. testcode:: + + input numpy as np + + class A(np.ndarray): + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + args = [] + in_no = [] + for i, input_ in enumerate(inputs): + if isinstance(input_, A): + in_no.append(i) + args.append(input_.view(np.ndarray)) + else: + args.append(input_) + + outputs = kwargs.pop('out', None) + out_no = [] + if outputs: + out_args = [] + for j, output in enumerate(outputs): + if isinstance(output, A): + out_no.append(j) + out_args.append(output.view(np.ndarray)) + else: + out_args.append(output) + kwargs['out'] = tuple(out_args) + else: + outputs = (None,) * ufunc.nout + + info = {} + if in_no: + info['inputs'] = in_no + if out_no: + info['outputs'] = out_no + + results = super(A, self).__array_ufunc__(ufunc, method, + *args, **kwargs) + if results is NotImplemented: + return NotImplemented + + if method == 'at': + if isinstance(inputs[0], A): + inputs[0].info = info + return + + if ufunc.nout == 1: + results = (results,) + + results = tuple((np.asarray(result).view(A) + if output is None else output) + for result, output in zip(results, outputs)) + if results and isinstance(results[0], A): + results[0].info = info + + return results[0] if len(results) == 1 else results + +So, this class does not actually do anything interesting: it just +converts any instances of its own to regular ndarray (otherwise, we'd +get infinite recursion!), and adds an ``info`` dictionary that tells +which inputs and outputs it converted. Hence, e.g., + +>>> a = np.arange(5.).view(A) +>>> b = np.sin(a) +>>> b.info +{'inputs': [0]} +>>> b = np.sin(np.arange(5.), out=(a,)) +>>> b.info +{'outputs': [0]} +>>> a = np.arange(5.).view(A) +>>> b = np.ones(1).view(A) +>>> c = a + b +>>> c.info +{'inputs': [0, 1]} +>>> a += b +>>> a.info +{'inputs': [0, 1], 'outputs': [0]} + +Note that another approach would be to to use ``getattr(ufunc, +methods)(*inputs, **kwargs)`` instead of the ``super`` call. For this example, +the result would be identical, but there is a difference if another operand +also defines ``__array_ufunc__``. E.g., lets assume that we evalulate +``np.add(a, b)``, where ``b`` is an instance of another class ``B`` that has +an override. If you use ``super`` as in the example, +``ndarray.__array_ufunc__`` will notice that ``b`` has an override, which +means it cannot evaluate the result itself. Thus, it will return +`NotImplemented` and so will our class ``A``. Then, control will be passed +over to ``b``, which either knows how to deal with us and produces a result, +or does not and returns `NotImplemented`, raising a ``TypeError``. + +If instead, we replace our ``super`` call with ``getattr(ufunc, method)``, we +effectively do ``np.add(a.view(np.ndarray), b)``. Again, ``B.__array_ufunc__`` +will be called, but now it sees an ``ndarray`` as the other argument. Likely, +it will know how to handle this, and return a new instance of the ``B`` class +to us. Our example class is not set up to handle this, but it might well be +the best approach if, e.g., one were to re-implement ``MaskedArray`` using +``__array_ufunc__``. + +As a final note: if the ``super`` route is suited to a given class, an +advantage of using it is that it helps in constructing class hierarchies. +E.g., suppose that our other class ``B`` also used the ``super`` in its +``__array_ufunc__`` implementation, and we created a class ``C`` that depended +on both, i.e., ``class C(A, B)`` (with, for simplicity, not another +``__array_ufunc__`` override). Then any ufunc on an instance of ``C`` would +pass on to ``A.__array_ufunc__``, the ``super`` call in ``A`` would go to +``B.__array_ufunc__``, and the ``super`` call in ``B`` would go to +``ndarray.__array_ufunc__``, thus allowing ``A`` and ``B`` to collaborate. + +.. _array-wrap: + +``__array_wrap__`` for ufuncs and other functions +------------------------------------------------- + +Prior to numpy 1.13, the behaviour of ufuncs could only be tuned using +``__array_wrap__`` and ``__array_prepare__``. These two allowed one to +change the output type of a ufunc, but, in contrast to +``__array_ufunc__``, did not allow one to make any changes to the inputs. +It is hoped to eventually deprecate these, but ``__array_wrap__`` is also +used by other numpy functions and methods, such as ``squeeze``, so at the +present time is still needed for full functionality. + +Conceptually, ``__array_wrap__`` "wraps up the action" in the sense of +allowing a subclass to set the type of the return value and update +attributes and metadata. Let's show how this works with an example. First +we return to the simpler example subclass, but with a different name and +some print statements: + +.. testcode:: + + import numpy as np + + class MySubClass(np.ndarray): + + def __new__(cls, input_array, info=None): + obj = np.asarray(input_array).view(cls) + obj.info = info + return obj + + def __array_finalize__(self, obj): + print('In __array_finalize__:') + print(' self is %s' % repr(self)) + print(' obj is %s' % repr(obj)) + if obj is None: return + self.info = getattr(obj, 'info', None) + + def __array_wrap__(self, out_arr, context=None): + print('In __array_wrap__:') + print(' self is %s' % repr(self)) + print(' arr is %s' % repr(out_arr)) + # then just call the parent + return super(MySubClass, self).__array_wrap__(self, out_arr, context) + +We run a ufunc on an instance of our new array: + +>>> obj = MySubClass(np.arange(5), info='spam') +In __array_finalize__: + self is MySubClass([0, 1, 2, 3, 4]) + obj is array([0, 1, 2, 3, 4]) +>>> arr2 = np.arange(5)+1 +>>> ret = np.add(arr2, obj) +In __array_wrap__: + self is MySubClass([0, 1, 2, 3, 4]) + arr is array([1, 3, 5, 7, 9]) +In __array_finalize__: + self is MySubClass([1, 3, 5, 7, 9]) + obj is MySubClass([0, 1, 2, 3, 4]) +>>> ret +MySubClass([1, 3, 5, 7, 9]) +>>> ret.info +'spam' + +Note that the ufunc (``np.add``) has called the ``__array_wrap__`` method +with arguments ``self`` as ``obj``, and ``out_arr`` as the (ndarray) result +of the addition. In turn, the default ``__array_wrap__`` +(``ndarray.__array_wrap__``) has cast the result to class ``MySubClass``, +and called ``__array_finalize__`` - hence the copying of the ``info`` +attribute. This has all happened at the C level. + +But, we could do anything we wanted: + +.. testcode:: + + class SillySubClass(np.ndarray): + + def __array_wrap__(self, arr, context=None): + return 'I lost your data' + +>>> arr1 = np.arange(5) +>>> obj = arr1.view(SillySubClass) +>>> arr2 = np.arange(5) +>>> ret = np.multiply(obj, arr2) +>>> ret +'I lost your data' + +So, by defining a specific ``__array_wrap__`` method for our subclass, +we can tweak the output from ufuncs. The ``__array_wrap__`` method +requires ``self``, then an argument - which is the result of the ufunc - +and an optional parameter *context*. This parameter is returned by +ufuncs as a 3-element tuple: (name of the ufunc, arguments of the ufunc, +domain of the ufunc), but is not set by other numpy functions. Though, +as seen above, it is possible to do otherwise, ``__array_wrap__`` should +return an instance of its containing class. See the masked array +subclass for an implementation. + +In addition to ``__array_wrap__``, which is called on the way out of the +ufunc, there is also an ``__array_prepare__`` method which is called on +the way into the ufunc, after the output arrays are created but before any +computation has been performed. The default implementation does nothing +but pass through the array. ``__array_prepare__`` should not attempt to +access the array data or resize the array, it is intended for setting the +output array type, updating attributes and metadata, and performing any +checks based on the input that may be desired before computation begins. +Like ``__array_wrap__``, ``__array_prepare__`` must return an ndarray or +subclass thereof or raise an error. + +Extra gotchas - custom ``__del__`` methods and ndarray.base +----------------------------------------------------------- + +One of the problems that ndarray solves is keeping track of memory +ownership of ndarrays and their views. Consider the case where we have +created an ndarray, ``arr`` and have taken a slice with ``v = arr[1:]``. +The two objects are looking at the same memory. NumPy keeps track of +where the data came from for a particular array or view, with the +``base`` attribute: + +>>> # A normal ndarray, that owns its own data +>>> arr = np.zeros((4,)) +>>> # In this case, base is None +>>> arr.base is None +True +>>> # We take a view +>>> v1 = arr[1:] +>>> # base now points to the array that it derived from +>>> v1.base is arr +True +>>> # Take a view of a view +>>> v2 = v1[1:] +>>> # base points to the view it derived from +>>> v2.base is v1 +True + +In general, if the array owns its own memory, as for ``arr`` in this +case, then ``arr.base`` will be None - there are some exceptions to this +- see the numpy book for more details. + +The ``base`` attribute is useful in being able to tell whether we have +a view or the original array. This in turn can be useful if we need +to know whether or not to do some specific cleanup when the subclassed +array is deleted. For example, we may only want to do the cleanup if +the original array is deleted, but not the views. For an example of +how this can work, have a look at the ``memmap`` class in +``numpy.core``. + +Subclassing and Downstream Compatibility +---------------------------------------- + +When sub-classing ``ndarray`` or creating duck-types that mimic the ``ndarray`` +interface, it is your responsibility to decide how aligned your APIs will be +with those of numpy. For convenience, many numpy functions that have a corresponding +``ndarray`` method (e.g., ``sum``, ``mean``, ``take``, ``reshape``) work by checking +if the first argument to a function has a method of the same name. If it exists, the +method is called instead of coercing the arguments to a numpy array. + +For example, if you want your sub-class or duck-type to be compatible with +numpy's ``sum`` function, the method signature for this object's ``sum`` method +should be the following: + +.. testcode:: + + def sum(self, axis=None, dtype=None, out=None, keepdims=False): + ... + +This is the exact same method signature for ``np.sum``, so now if a user calls +``np.sum`` on this object, numpy will call the object's own ``sum`` method and +pass in these arguments enumerated above in the signature, and no errors will +be raised because the signatures are completely compatible with each other. + +If, however, you decide to deviate from this signature and do something like this: + +.. testcode:: + + def sum(self, axis=None, dtype=None): + ... + +This object is no longer compatible with ``np.sum`` because if you call ``np.sum``, +it will pass in unexpected arguments ``out`` and ``keepdims``, causing a TypeError +to be raised. + +If you wish to maintain compatibility with numpy and its subsequent versions (which +might add new keyword arguments) but do not want to surface all of numpy's arguments, +your function's signature should accept ``**kwargs``. For example: + +.. testcode:: + + def sum(self, axis=None, dtype=None, **unused_kwargs): + ... + +This object is now compatible with ``np.sum`` again because any extraneous arguments +(i.e. keywords that are not ``axis`` or ``dtype``) will be hidden away in the +``**unused_kwargs`` parameter. + +""" +from __future__ import division, absolute_import, print_function diff --git a/project/venv/lib/python2.7/site-packages/numpy/doc/subclassing.pyc b/project/venv/lib/python2.7/site-packages/numpy/doc/subclassing.pyc new file mode 100644 index 0000000..f0025cb Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/doc/subclassing.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/doc/ufuncs.py b/project/venv/lib/python2.7/site-packages/numpy/doc/ufuncs.py new file mode 100644 index 0000000..a112e55 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/doc/ufuncs.py @@ -0,0 +1,138 @@ +""" +=================== +Universal Functions +=================== + +Ufuncs are, generally speaking, mathematical functions or operations that are +applied element-by-element to the contents of an array. That is, the result +in each output array element only depends on the value in the corresponding +input array (or arrays) and on no other array elements. NumPy comes with a +large suite of ufuncs, and scipy extends that suite substantially. The simplest +example is the addition operator: :: + + >>> np.array([0,2,3,4]) + np.array([1,1,-1,2]) + array([1, 3, 2, 6]) + +The unfunc module lists all the available ufuncs in numpy. Documentation on +the specific ufuncs may be found in those modules. This documentation is +intended to address the more general aspects of unfuncs common to most of +them. All of the ufuncs that make use of Python operators (e.g., +, -, etc.) +have equivalent functions defined (e.g. add() for +) + +Type coercion +============= + +What happens when a binary operator (e.g., +,-,\\*,/, etc) deals with arrays of +two different types? What is the type of the result? Typically, the result is +the higher of the two types. For example: :: + + float32 + float64 -> float64 + int8 + int32 -> int32 + int16 + float32 -> float32 + float32 + complex64 -> complex64 + +There are some less obvious cases generally involving mixes of types +(e.g. uints, ints and floats) where equal bit sizes for each are not +capable of saving all the information in a different type of equivalent +bit size. Some examples are int32 vs float32 or uint32 vs int32. +Generally, the result is the higher type of larger size than both +(if available). So: :: + + int32 + float32 -> float64 + uint32 + int32 -> int64 + +Finally, the type coercion behavior when expressions involve Python +scalars is different than that seen for arrays. Since Python has a +limited number of types, combining a Python int with a dtype=np.int8 +array does not coerce to the higher type but instead, the type of the +array prevails. So the rules for Python scalars combined with arrays is +that the result will be that of the array equivalent the Python scalar +if the Python scalar is of a higher 'kind' than the array (e.g., float +vs. int), otherwise the resultant type will be that of the array. +For example: :: + + Python int + int8 -> int8 + Python float + int8 -> float64 + +ufunc methods +============= + +Binary ufuncs support 4 methods. + +**.reduce(arr)** applies the binary operator to elements of the array in + sequence. For example: :: + + >>> np.add.reduce(np.arange(10)) # adds all elements of array + 45 + +For multidimensional arrays, the first dimension is reduced by default: :: + + >>> np.add.reduce(np.arange(10).reshape(2,5)) + array([ 5, 7, 9, 11, 13]) + +The axis keyword can be used to specify different axes to reduce: :: + + >>> np.add.reduce(np.arange(10).reshape(2,5),axis=1) + array([10, 35]) + +**.accumulate(arr)** applies the binary operator and generates an an +equivalently shaped array that includes the accumulated amount for each +element of the array. A couple examples: :: + + >>> np.add.accumulate(np.arange(10)) + array([ 0, 1, 3, 6, 10, 15, 21, 28, 36, 45]) + >>> np.multiply.accumulate(np.arange(1,9)) + array([ 1, 2, 6, 24, 120, 720, 5040, 40320]) + +The behavior for multidimensional arrays is the same as for .reduce(), +as is the use of the axis keyword). + +**.reduceat(arr,indices)** allows one to apply reduce to selected parts + of an array. It is a difficult method to understand. See the documentation + at: + +**.outer(arr1,arr2)** generates an outer operation on the two arrays arr1 and + arr2. It will work on multidimensional arrays (the shape of the result is + the concatenation of the two input shapes.: :: + + >>> np.multiply.outer(np.arange(3),np.arange(4)) + array([[0, 0, 0, 0], + [0, 1, 2, 3], + [0, 2, 4, 6]]) + +Output arguments +================ + +All ufuncs accept an optional output array. The array must be of the expected +output shape. Beware that if the type of the output array is of a different +(and lower) type than the output result, the results may be silently truncated +or otherwise corrupted in the downcast to the lower type. This usage is useful +when one wants to avoid creating large temporary arrays and instead allows one +to reuse the same array memory repeatedly (at the expense of not being able to +use more convenient operator notation in expressions). Note that when the +output argument is used, the ufunc still returns a reference to the result. + + >>> x = np.arange(2) + >>> np.add(np.arange(2),np.arange(2.),x) + array([0, 2]) + >>> x + array([0, 2]) + +and & or as ufuncs +================== + +Invariably people try to use the python 'and' and 'or' as logical operators +(and quite understandably). But these operators do not behave as normal +operators since Python treats these quite differently. They cannot be +overloaded with array equivalents. Thus using 'and' or 'or' with an array +results in an error. There are two alternatives: + + 1) use the ufunc functions logical_and() and logical_or(). + 2) use the bitwise operators & and \\|. The drawback of these is that if + the arguments to these operators are not boolean arrays, the result is + likely incorrect. On the other hand, most usages of logical_and and + logical_or are with boolean arrays. As long as one is careful, this is + a convenient way to apply these operators. + +""" +from __future__ import division, absolute_import, print_function diff --git a/project/venv/lib/python2.7/site-packages/numpy/doc/ufuncs.pyc b/project/venv/lib/python2.7/site-packages/numpy/doc/ufuncs.pyc new file mode 100644 index 0000000..1a20a24 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/doc/ufuncs.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/dual.py b/project/venv/lib/python2.7/site-packages/numpy/dual.py new file mode 100644 index 0000000..3a16a8e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/dual.py @@ -0,0 +1,71 @@ +""" +Aliases for functions which may be accelerated by Scipy. + +Scipy_ can be built to use accelerated or otherwise improved libraries +for FFTs, linear algebra, and special functions. This module allows +developers to transparently support these accelerated functions when +scipy is available but still support users who have only installed +NumPy. + +.. _Scipy : https://www.scipy.org + +""" +from __future__ import division, absolute_import, print_function + +# This module should be used for functions both in numpy and scipy if +# you want to use the numpy version if available but the scipy version +# otherwise. +# Usage --- from numpy.dual import fft, inv + +__all__ = ['fft', 'ifft', 'fftn', 'ifftn', 'fft2', 'ifft2', + 'norm', 'inv', 'svd', 'solve', 'det', 'eig', 'eigvals', + 'eigh', 'eigvalsh', 'lstsq', 'pinv', 'cholesky', 'i0'] + +import numpy.linalg as linpkg +import numpy.fft as fftpkg +from numpy.lib import i0 +import sys + + +fft = fftpkg.fft +ifft = fftpkg.ifft +fftn = fftpkg.fftn +ifftn = fftpkg.ifftn +fft2 = fftpkg.fft2 +ifft2 = fftpkg.ifft2 + +norm = linpkg.norm +inv = linpkg.inv +svd = linpkg.svd +solve = linpkg.solve +det = linpkg.det +eig = linpkg.eig +eigvals = linpkg.eigvals +eigh = linpkg.eigh +eigvalsh = linpkg.eigvalsh +lstsq = linpkg.lstsq +pinv = linpkg.pinv +cholesky = linpkg.cholesky + +_restore_dict = {} + +def register_func(name, func): + if name not in __all__: + raise ValueError("%s not a dual function." % name) + f = sys._getframe(0).f_globals + _restore_dict[name] = f[name] + f[name] = func + +def restore_func(name): + if name not in __all__: + raise ValueError("%s not a dual function." % name) + try: + val = _restore_dict[name] + except KeyError: + return + else: + sys._getframe(0).f_globals[name] = val + +def restore_all(): + for name in _restore_dict.keys(): + restore_func(name) diff --git a/project/venv/lib/python2.7/site-packages/numpy/dual.pyc b/project/venv/lib/python2.7/site-packages/numpy/dual.pyc new file mode 100644 index 0000000..f338ce6 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/dual.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/__init__.py b/project/venv/lib/python2.7/site-packages/numpy/f2py/__init__.py new file mode 100644 index 0000000..d146739 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/__init__.py @@ -0,0 +1,121 @@ +#!/usr/bin/env python +"""Fortran to Python Interface Generator. + +""" +from __future__ import division, absolute_import, print_function + +__all__ = ['run_main', 'compile', 'f2py_testing'] + +import sys +import subprocess +import os + +import numpy as np + +from . import f2py2e +from . import f2py_testing +from . import diagnose + +run_main = f2py2e.run_main +main = f2py2e.main + + +def compile(source, + modulename='untitled', + extra_args='', + verbose=True, + source_fn=None, + extension='.f' + ): + """ + Build extension module from a Fortran 77 source string with f2py. + + Parameters + ---------- + source : str or bytes + Fortran source of module / subroutine to compile + + .. versionchanged:: 1.16.0 + Accept str as well as bytes + + modulename : str, optional + The name of the compiled python module + extra_args : str or list, optional + Additional parameters passed to f2py + + .. versionchanged:: 1.16.0 + A list of args may also be provided. + + verbose : bool, optional + Print f2py output to screen + source_fn : str, optional + Name of the file where the fortran source is written. + The default is to use a temporary file with the extension + provided by the `extension` parameter + extension : {'.f', '.f90'}, optional + Filename extension if `source_fn` is not provided. + The extension tells which fortran standard is used. + The default is `.f`, which implies F77 standard. + + .. versionadded:: 1.11.0 + + Returns + ------- + result : int + 0 on success + + Examples + -------- + .. include:: compile_session.dat + :literal: + + """ + import tempfile + import shlex + + if source_fn is None: + f, fname = tempfile.mkstemp(suffix=extension) + # f is a file descriptor so need to close it + # carefully -- not with .close() directly + os.close(f) + else: + fname = source_fn + + if not isinstance(source, str): + source = str(source, 'utf-8') + try: + with open(fname, 'w') as f: + f.write(source) + + args = ['-c', '-m', modulename, f.name] + + if isinstance(extra_args, np.compat.basestring): + is_posix = (os.name == 'posix') + extra_args = shlex.split(extra_args, posix=is_posix) + + args.extend(extra_args) + + c = [sys.executable, + '-c', + 'import numpy.f2py as f2py2e;f2py2e.main()'] + args + try: + output = subprocess.check_output(c) + except subprocess.CalledProcessError as exc: + status = exc.returncode + output = '' + except OSError: + # preserve historic status code used by exec_command() + status = 127 + output = '' + else: + status = 0 + if verbose: + print(output) + finally: + if source_fn is None: + os.remove(fname) + return status + +from numpy._pytesttester import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/__init__.pyc b/project/venv/lib/python2.7/site-packages/numpy/f2py/__init__.pyc new file mode 100644 index 0000000..cebffad Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/f2py/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/__main__.py b/project/venv/lib/python2.7/site-packages/numpy/f2py/__main__.py new file mode 100644 index 0000000..708f7f3 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/__main__.py @@ -0,0 +1,6 @@ +# See http://cens.ioc.ee/projects/f2py2e/ +from __future__ import division, print_function + +from numpy.f2py.f2py2e import main + +main() diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/__main__.pyc b/project/venv/lib/python2.7/site-packages/numpy/f2py/__main__.pyc new file mode 100644 index 0000000..914bb10 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/f2py/__main__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/__version__.py b/project/venv/lib/python2.7/site-packages/numpy/f2py/__version__.py new file mode 100644 index 0000000..49a2199 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/__version__.py @@ -0,0 +1,10 @@ +from __future__ import division, absolute_import, print_function + +major = 2 + +try: + from __svn_version__ import version + version_info = (major, version) + version = '%s_%s' % version_info +except (ImportError, ValueError): + version = str(major) diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/__version__.pyc b/project/venv/lib/python2.7/site-packages/numpy/f2py/__version__.pyc new file mode 100644 index 0000000..c6f4652 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/f2py/__version__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/auxfuncs.py b/project/venv/lib/python2.7/site-packages/numpy/f2py/auxfuncs.py new file mode 100644 index 0000000..404bdbd --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/auxfuncs.py @@ -0,0 +1,854 @@ +#!/usr/bin/env python +""" + +Auxiliary functions for f2py2e. + +Copyright 1999,2000 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy (BSD style) LICENSE. + + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +$Date: 2005/07/24 19:01:55 $ +Pearu Peterson + +""" +from __future__ import division, absolute_import, print_function + +import pprint +import sys +import types +from functools import reduce + +from . import __version__ +from . import cfuncs + +__all__ = [ + 'applyrules', 'debugcapi', 'dictappend', 'errmess', 'gentitle', + 'getargs2', 'getcallprotoargument', 'getcallstatement', + 'getfortranname', 'getpymethoddef', 'getrestdoc', 'getusercode', + 'getusercode1', 'hasbody', 'hascallstatement', 'hascommon', + 'hasexternals', 'hasinitvalue', 'hasnote', 'hasresultnote', + 'isallocatable', 'isarray', 'isarrayofstrings', 'iscomplex', + 'iscomplexarray', 'iscomplexfunction', 'iscomplexfunction_warn', + 'isdouble', 'isdummyroutine', 'isexternal', 'isfunction', + 'isfunction_wrap', 'isint1array', 'isinteger', 'isintent_aux', + 'isintent_c', 'isintent_callback', 'isintent_copy', 'isintent_dict', + 'isintent_hide', 'isintent_in', 'isintent_inout', 'isintent_inplace', + 'isintent_nothide', 'isintent_out', 'isintent_overwrite', 'islogical', + 'islogicalfunction', 'islong_complex', 'islong_double', + 'islong_doublefunction', 'islong_long', 'islong_longfunction', + 'ismodule', 'ismoduleroutine', 'isoptional', 'isprivate', 'isrequired', + 'isroutine', 'isscalar', 'issigned_long_longarray', 'isstring', + 'isstringarray', 'isstringfunction', 'issubroutine', + 'issubroutine_wrap', 'isthreadsafe', 'isunsigned', 'isunsigned_char', + 'isunsigned_chararray', 'isunsigned_long_long', + 'isunsigned_long_longarray', 'isunsigned_short', + 'isunsigned_shortarray', 'l_and', 'l_not', 'l_or', 'outmess', + 'replace', 'show', 'stripcomma', 'throw_error', +] + + +f2py_version = __version__.version + + +errmess = sys.stderr.write +show = pprint.pprint + +options = {} +debugoptions = [] +wrapfuncs = 1 + + +def outmess(t): + if options.get('verbose', 1): + sys.stdout.write(t) + + +def debugcapi(var): + return 'capi' in debugoptions + + +def _isstring(var): + return 'typespec' in var and var['typespec'] == 'character' and \ + not isexternal(var) + + +def isstring(var): + return _isstring(var) and not isarray(var) + + +def ischaracter(var): + return isstring(var) and 'charselector' not in var + + +def isstringarray(var): + return isarray(var) and _isstring(var) + + +def isarrayofstrings(var): + # leaving out '*' for now so that `character*(*) a(m)` and `character + # a(m,*)` are treated differently. Luckily `character**` is illegal. + return isstringarray(var) and var['dimension'][-1] == '(*)' + + +def isarray(var): + return 'dimension' in var and not isexternal(var) + + +def isscalar(var): + return not (isarray(var) or isstring(var) or isexternal(var)) + + +def iscomplex(var): + return isscalar(var) and \ + var.get('typespec') in ['complex', 'double complex'] + + +def islogical(var): + return isscalar(var) and var.get('typespec') == 'logical' + + +def isinteger(var): + return isscalar(var) and var.get('typespec') == 'integer' + + +def isreal(var): + return isscalar(var) and var.get('typespec') == 'real' + + +def get_kind(var): + try: + return var['kindselector']['*'] + except KeyError: + try: + return var['kindselector']['kind'] + except KeyError: + pass + + +def islong_long(var): + if not isscalar(var): + return 0 + if var.get('typespec') not in ['integer', 'logical']: + return 0 + return get_kind(var) == '8' + + +def isunsigned_char(var): + if not isscalar(var): + return 0 + if var.get('typespec') != 'integer': + return 0 + return get_kind(var) == '-1' + + +def isunsigned_short(var): + if not isscalar(var): + return 0 + if var.get('typespec') != 'integer': + return 0 + return get_kind(var) == '-2' + + +def isunsigned(var): + if not isscalar(var): + return 0 + if var.get('typespec') != 'integer': + return 0 + return get_kind(var) == '-4' + + +def isunsigned_long_long(var): + if not isscalar(var): + return 0 + if var.get('typespec') != 'integer': + return 0 + return get_kind(var) == '-8' + + +def isdouble(var): + if not isscalar(var): + return 0 + if not var.get('typespec') == 'real': + return 0 + return get_kind(var) == '8' + + +def islong_double(var): + if not isscalar(var): + return 0 + if not var.get('typespec') == 'real': + return 0 + return get_kind(var) == '16' + + +def islong_complex(var): + if not iscomplex(var): + return 0 + return get_kind(var) == '32' + + +def iscomplexarray(var): + return isarray(var) and \ + var.get('typespec') in ['complex', 'double complex'] + + +def isint1array(var): + return isarray(var) and var.get('typespec') == 'integer' \ + and get_kind(var) == '1' + + +def isunsigned_chararray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '-1' + + +def isunsigned_shortarray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '-2' + + +def isunsignedarray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '-4' + + +def isunsigned_long_longarray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '-8' + + +def issigned_chararray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '1' + + +def issigned_shortarray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '2' + + +def issigned_array(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '4' + + +def issigned_long_longarray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '8' + + +def isallocatable(var): + return 'attrspec' in var and 'allocatable' in var['attrspec'] + + +def ismutable(var): + return not ('dimension' not in var or isstring(var)) + + +def ismoduleroutine(rout): + return 'modulename' in rout + + +def ismodule(rout): + return 'block' in rout and 'module' == rout['block'] + + +def isfunction(rout): + return 'block' in rout and 'function' == rout['block'] + +def isfunction_wrap(rout): + if isintent_c(rout): + return 0 + return wrapfuncs and isfunction(rout) and (not isexternal(rout)) + + +def issubroutine(rout): + return 'block' in rout and 'subroutine' == rout['block'] + + +def issubroutine_wrap(rout): + if isintent_c(rout): + return 0 + return issubroutine(rout) and hasassumedshape(rout) + + +def hasassumedshape(rout): + if rout.get('hasassumedshape'): + return True + for a in rout['args']: + for d in rout['vars'].get(a, {}).get('dimension', []): + if d == ':': + rout['hasassumedshape'] = True + return True + return False + + +def isroutine(rout): + return isfunction(rout) or issubroutine(rout) + + +def islogicalfunction(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if a in rout['vars']: + return islogical(rout['vars'][a]) + return 0 + + +def islong_longfunction(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if a in rout['vars']: + return islong_long(rout['vars'][a]) + return 0 + + +def islong_doublefunction(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if a in rout['vars']: + return islong_double(rout['vars'][a]) + return 0 + + +def iscomplexfunction(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if a in rout['vars']: + return iscomplex(rout['vars'][a]) + return 0 + + +def iscomplexfunction_warn(rout): + if iscomplexfunction(rout): + outmess("""\ + ************************************************************** + Warning: code with a function returning complex value + may not work correctly with your Fortran compiler. + Run the following test before using it in your applications: + $(f2py install dir)/test-site/{b/runme_scalar,e/runme} + When using GNU gcc/g77 compilers, codes should work correctly. + **************************************************************\n""") + return 1 + return 0 + + +def isstringfunction(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if a in rout['vars']: + return isstring(rout['vars'][a]) + return 0 + + +def hasexternals(rout): + return 'externals' in rout and rout['externals'] + + +def isthreadsafe(rout): + return 'f2pyenhancements' in rout and \ + 'threadsafe' in rout['f2pyenhancements'] + + +def hasvariables(rout): + return 'vars' in rout and rout['vars'] + + +def isoptional(var): + return ('attrspec' in var and 'optional' in var['attrspec'] and + 'required' not in var['attrspec']) and isintent_nothide(var) + + +def isexternal(var): + return 'attrspec' in var and 'external' in var['attrspec'] + + +def isrequired(var): + return not isoptional(var) and isintent_nothide(var) + + +def isintent_in(var): + if 'intent' not in var: + return 1 + if 'hide' in var['intent']: + return 0 + if 'inplace' in var['intent']: + return 0 + if 'in' in var['intent']: + return 1 + if 'out' in var['intent']: + return 0 + if 'inout' in var['intent']: + return 0 + if 'outin' in var['intent']: + return 0 + return 1 + + +def isintent_inout(var): + return ('intent' in var and ('inout' in var['intent'] or + 'outin' in var['intent']) and 'in' not in var['intent'] and + 'hide' not in var['intent'] and 'inplace' not in var['intent']) + + +def isintent_out(var): + return 'out' in var.get('intent', []) + + +def isintent_hide(var): + return ('intent' in var and ('hide' in var['intent'] or + ('out' in var['intent'] and 'in' not in var['intent'] and + (not l_or(isintent_inout, isintent_inplace)(var))))) + +def isintent_nothide(var): + return not isintent_hide(var) + + +def isintent_c(var): + return 'c' in var.get('intent', []) + + +def isintent_cache(var): + return 'cache' in var.get('intent', []) + + +def isintent_copy(var): + return 'copy' in var.get('intent', []) + + +def isintent_overwrite(var): + return 'overwrite' in var.get('intent', []) + + +def isintent_callback(var): + return 'callback' in var.get('intent', []) + + +def isintent_inplace(var): + return 'inplace' in var.get('intent', []) + + +def isintent_aux(var): + return 'aux' in var.get('intent', []) + + +def isintent_aligned4(var): + return 'aligned4' in var.get('intent', []) + + +def isintent_aligned8(var): + return 'aligned8' in var.get('intent', []) + + +def isintent_aligned16(var): + return 'aligned16' in var.get('intent', []) + +isintent_dict = {isintent_in: 'INTENT_IN', isintent_inout: 'INTENT_INOUT', + isintent_out: 'INTENT_OUT', isintent_hide: 'INTENT_HIDE', + isintent_cache: 'INTENT_CACHE', + isintent_c: 'INTENT_C', isoptional: 'OPTIONAL', + isintent_inplace: 'INTENT_INPLACE', + isintent_aligned4: 'INTENT_ALIGNED4', + isintent_aligned8: 'INTENT_ALIGNED8', + isintent_aligned16: 'INTENT_ALIGNED16', + } + + +def isprivate(var): + return 'attrspec' in var and 'private' in var['attrspec'] + + +def hasinitvalue(var): + return '=' in var + + +def hasinitvalueasstring(var): + if not hasinitvalue(var): + return 0 + return var['='][0] in ['"', "'"] + + +def hasnote(var): + return 'note' in var + + +def hasresultnote(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if a in rout['vars']: + return hasnote(rout['vars'][a]) + return 0 + + +def hascommon(rout): + return 'common' in rout + + +def containscommon(rout): + if hascommon(rout): + return 1 + if hasbody(rout): + for b in rout['body']: + if containscommon(b): + return 1 + return 0 + + +def containsmodule(block): + if ismodule(block): + return 1 + if not hasbody(block): + return 0 + for b in block['body']: + if containsmodule(b): + return 1 + return 0 + + +def hasbody(rout): + return 'body' in rout + + +def hascallstatement(rout): + return getcallstatement(rout) is not None + + +def istrue(var): + return 1 + + +def isfalse(var): + return 0 + + +class F2PYError(Exception): + pass + + +class throw_error(object): + + def __init__(self, mess): + self.mess = mess + + def __call__(self, var): + mess = '\n\n var = %s\n Message: %s\n' % (var, self.mess) + raise F2PYError(mess) + + +def l_and(*f): + l, l2 = 'lambda v', [] + for i in range(len(f)): + l = '%s,f%d=f[%d]' % (l, i, i) + l2.append('f%d(v)' % (i)) + return eval('%s:%s' % (l, ' and '.join(l2))) + + +def l_or(*f): + l, l2 = 'lambda v', [] + for i in range(len(f)): + l = '%s,f%d=f[%d]' % (l, i, i) + l2.append('f%d(v)' % (i)) + return eval('%s:%s' % (l, ' or '.join(l2))) + + +def l_not(f): + return eval('lambda v,f=f:not f(v)') + + +def isdummyroutine(rout): + try: + return rout['f2pyenhancements']['fortranname'] == '' + except KeyError: + return 0 + + +def getfortranname(rout): + try: + name = rout['f2pyenhancements']['fortranname'] + if name == '': + raise KeyError + if not name: + errmess('Failed to use fortranname from %s\n' % + (rout['f2pyenhancements'])) + raise KeyError + except KeyError: + name = rout['name'] + return name + + +def getmultilineblock(rout, blockname, comment=1, counter=0): + try: + r = rout['f2pyenhancements'].get(blockname) + except KeyError: + return + if not r: + return + if counter > 0 and isinstance(r, str): + return + if isinstance(r, list): + if counter >= len(r): + return + r = r[counter] + if r[:3] == "'''": + if comment: + r = '\t/* start ' + blockname + \ + ' multiline (' + repr(counter) + ') */\n' + r[3:] + else: + r = r[3:] + if r[-3:] == "'''": + if comment: + r = r[:-3] + '\n\t/* end multiline (' + repr(counter) + ')*/' + else: + r = r[:-3] + else: + errmess("%s multiline block should end with `'''`: %s\n" + % (blockname, repr(r))) + return r + + +def getcallstatement(rout): + return getmultilineblock(rout, 'callstatement') + + +def getcallprotoargument(rout, cb_map={}): + r = getmultilineblock(rout, 'callprotoargument', comment=0) + if r: + return r + if hascallstatement(rout): + outmess( + 'warning: callstatement is defined without callprotoargument\n') + return + from .capi_maps import getctype + arg_types, arg_types2 = [], [] + if l_and(isstringfunction, l_not(isfunction_wrap))(rout): + arg_types.extend(['char*', 'size_t']) + for n in rout['args']: + var = rout['vars'][n] + if isintent_callback(var): + continue + if n in cb_map: + ctype = cb_map[n] + '_typedef' + else: + ctype = getctype(var) + if l_and(isintent_c, l_or(isscalar, iscomplex))(var): + pass + elif isstring(var): + pass + else: + ctype = ctype + '*' + if isstring(var) or isarrayofstrings(var): + arg_types2.append('size_t') + arg_types.append(ctype) + + proto_args = ','.join(arg_types + arg_types2) + if not proto_args: + proto_args = 'void' + return proto_args + + +def getusercode(rout): + return getmultilineblock(rout, 'usercode') + + +def getusercode1(rout): + return getmultilineblock(rout, 'usercode', counter=1) + + +def getpymethoddef(rout): + return getmultilineblock(rout, 'pymethoddef') + + +def getargs(rout): + sortargs, args = [], [] + if 'args' in rout: + args = rout['args'] + if 'sortvars' in rout: + for a in rout['sortvars']: + if a in args: + sortargs.append(a) + for a in args: + if a not in sortargs: + sortargs.append(a) + else: + sortargs = rout['args'] + return args, sortargs + + +def getargs2(rout): + sortargs, args = [], rout.get('args', []) + auxvars = [a for a in rout['vars'].keys() if isintent_aux(rout['vars'][a]) + and a not in args] + args = auxvars + args + if 'sortvars' in rout: + for a in rout['sortvars']: + if a in args: + sortargs.append(a) + for a in args: + if a not in sortargs: + sortargs.append(a) + else: + sortargs = auxvars + rout['args'] + return args, sortargs + + +def getrestdoc(rout): + if 'f2pymultilines' not in rout: + return None + k = None + if rout['block'] == 'python module': + k = rout['block'], rout['name'] + return rout['f2pymultilines'].get(k, None) + + +def gentitle(name): + l = (80 - len(name) - 6) // 2 + return '/*%s %s %s*/' % (l * '*', name, l * '*') + + +def flatlist(l): + if isinstance(l, list): + return reduce(lambda x, y, f=flatlist: x + f(y), l, []) + return [l] + + +def stripcomma(s): + if s and s[-1] == ',': + return s[:-1] + return s + + +def replace(str, d, defaultsep=''): + if isinstance(d, list): + return [replace(str, _m, defaultsep) for _m in d] + if isinstance(str, list): + return [replace(_m, d, defaultsep) for _m in str] + for k in 2 * list(d.keys()): + if k == 'separatorsfor': + continue + if 'separatorsfor' in d and k in d['separatorsfor']: + sep = d['separatorsfor'][k] + else: + sep = defaultsep + if isinstance(d[k], list): + str = str.replace('#%s#' % (k), sep.join(flatlist(d[k]))) + else: + str = str.replace('#%s#' % (k), d[k]) + return str + + +def dictappend(rd, ar): + if isinstance(ar, list): + for a in ar: + rd = dictappend(rd, a) + return rd + for k in ar.keys(): + if k[0] == '_': + continue + if k in rd: + if isinstance(rd[k], str): + rd[k] = [rd[k]] + if isinstance(rd[k], list): + if isinstance(ar[k], list): + rd[k] = rd[k] + ar[k] + else: + rd[k].append(ar[k]) + elif isinstance(rd[k], dict): + if isinstance(ar[k], dict): + if k == 'separatorsfor': + for k1 in ar[k].keys(): + if k1 not in rd[k]: + rd[k][k1] = ar[k][k1] + else: + rd[k] = dictappend(rd[k], ar[k]) + else: + rd[k] = ar[k] + return rd + + +def applyrules(rules, d, var={}): + ret = {} + if isinstance(rules, list): + for r in rules: + rr = applyrules(r, d, var) + ret = dictappend(ret, rr) + if '_break' in rr: + break + return ret + if '_check' in rules and (not rules['_check'](var)): + return ret + if 'need' in rules: + res = applyrules({'needs': rules['need']}, d, var) + if 'needs' in res: + cfuncs.append_needs(res['needs']) + + for k in rules.keys(): + if k == 'separatorsfor': + ret[k] = rules[k] + continue + if isinstance(rules[k], str): + ret[k] = replace(rules[k], d) + elif isinstance(rules[k], list): + ret[k] = [] + for i in rules[k]: + ar = applyrules({k: i}, d, var) + if k in ar: + ret[k].append(ar[k]) + elif k[0] == '_': + continue + elif isinstance(rules[k], dict): + ret[k] = [] + for k1 in rules[k].keys(): + if isinstance(k1, types.FunctionType) and k1(var): + if isinstance(rules[k][k1], list): + for i in rules[k][k1]: + if isinstance(i, dict): + res = applyrules({'supertext': i}, d, var) + if 'supertext' in res: + i = res['supertext'] + else: + i = '' + ret[k].append(replace(i, d)) + else: + i = rules[k][k1] + if isinstance(i, dict): + res = applyrules({'supertext': i}, d) + if 'supertext' in res: + i = res['supertext'] + else: + i = '' + ret[k].append(replace(i, d)) + else: + errmess('applyrules: ignoring rule %s.\n' % repr(rules[k])) + if isinstance(ret[k], list): + if len(ret[k]) == 1: + ret[k] = ret[k][0] + if ret[k] == []: + del ret[k] + return ret diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/auxfuncs.pyc b/project/venv/lib/python2.7/site-packages/numpy/f2py/auxfuncs.pyc new file mode 100644 index 0000000..6824537 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/f2py/auxfuncs.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/capi_maps.py b/project/venv/lib/python2.7/site-packages/numpy/f2py/capi_maps.py new file mode 100644 index 0000000..c41dd77 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/capi_maps.py @@ -0,0 +1,837 @@ +#!/usr/bin/env python +""" + +Copyright 1999,2000 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +$Date: 2005/05/06 10:57:33 $ +Pearu Peterson + +""" +from __future__ import division, absolute_import, print_function + +__version__ = "$Revision: 1.60 $"[10:-1] + +from . import __version__ +f2py_version = __version__.version + +import copy +import re +import os +import sys +from .crackfortran import markoutercomma +from . import cb_rules + +# The eviroment provided by auxfuncs.py is needed for some calls to eval. +# As the needed functions cannot be determined by static inspection of the +# code, it is safest to use import * pending a major refactoring of f2py. +from .auxfuncs import * + +__all__ = [ + 'getctype', 'getstrlength', 'getarrdims', 'getpydocsign', + 'getarrdocsign', 'getinit', 'sign2map', 'routsign2map', 'modsign2map', + 'cb_sign2map', 'cb_routsign2map', 'common_sign2map' +] + + +# Numarray and Numeric users should set this False +using_newcore = True + +depargs = [] +lcb_map = {} +lcb2_map = {} +# forced casting: mainly caused by the fact that Python or Numeric +# C/APIs do not support the corresponding C types. +c2py_map = {'double': 'float', + 'float': 'float', # forced casting + 'long_double': 'float', # forced casting + 'char': 'int', # forced casting + 'signed_char': 'int', # forced casting + 'unsigned_char': 'int', # forced casting + 'short': 'int', # forced casting + 'unsigned_short': 'int', # forced casting + 'int': 'int', # (forced casting) + 'long': 'int', + 'long_long': 'long', + 'unsigned': 'int', # forced casting + 'complex_float': 'complex', # forced casting + 'complex_double': 'complex', + 'complex_long_double': 'complex', # forced casting + 'string': 'string', + } +c2capi_map = {'double': 'NPY_DOUBLE', + 'float': 'NPY_FLOAT', + 'long_double': 'NPY_DOUBLE', # forced casting + 'char': 'NPY_STRING', + 'unsigned_char': 'NPY_UBYTE', + 'signed_char': 'NPY_BYTE', + 'short': 'NPY_SHORT', + 'unsigned_short': 'NPY_USHORT', + 'int': 'NPY_INT', + 'unsigned': 'NPY_UINT', + 'long': 'NPY_LONG', + 'long_long': 'NPY_LONG', # forced casting + 'complex_float': 'NPY_CFLOAT', + 'complex_double': 'NPY_CDOUBLE', + 'complex_long_double': 'NPY_CDOUBLE', # forced casting + 'string': 'NPY_STRING'} + +# These new maps aren't used anyhere yet, but should be by default +# unless building numeric or numarray extensions. +if using_newcore: + c2capi_map = {'double': 'NPY_DOUBLE', + 'float': 'NPY_FLOAT', + 'long_double': 'NPY_LONGDOUBLE', + 'char': 'NPY_BYTE', + 'unsigned_char': 'NPY_UBYTE', + 'signed_char': 'NPY_BYTE', + 'short': 'NPY_SHORT', + 'unsigned_short': 'NPY_USHORT', + 'int': 'NPY_INT', + 'unsigned': 'NPY_UINT', + 'long': 'NPY_LONG', + 'unsigned_long': 'NPY_ULONG', + 'long_long': 'NPY_LONGLONG', + 'unsigned_long_long': 'NPY_ULONGLONG', + 'complex_float': 'NPY_CFLOAT', + 'complex_double': 'NPY_CDOUBLE', + 'complex_long_double': 'NPY_CDOUBLE', + 'string':'NPY_STRING' + + } +c2pycode_map = {'double': 'd', + 'float': 'f', + 'long_double': 'd', # forced casting + 'char': '1', + 'signed_char': '1', + 'unsigned_char': 'b', + 'short': 's', + 'unsigned_short': 'w', + 'int': 'i', + 'unsigned': 'u', + 'long': 'l', + 'long_long': 'L', + 'complex_float': 'F', + 'complex_double': 'D', + 'complex_long_double': 'D', # forced casting + 'string': 'c' + } +if using_newcore: + c2pycode_map = {'double': 'd', + 'float': 'f', + 'long_double': 'g', + 'char': 'b', + 'unsigned_char': 'B', + 'signed_char': 'b', + 'short': 'h', + 'unsigned_short': 'H', + 'int': 'i', + 'unsigned': 'I', + 'long': 'l', + 'unsigned_long': 'L', + 'long_long': 'q', + 'unsigned_long_long': 'Q', + 'complex_float': 'F', + 'complex_double': 'D', + 'complex_long_double': 'G', + 'string': 'S'} +c2buildvalue_map = {'double': 'd', + 'float': 'f', + 'char': 'b', + 'signed_char': 'b', + 'short': 'h', + 'int': 'i', + 'long': 'l', + 'long_long': 'L', + 'complex_float': 'N', + 'complex_double': 'N', + 'complex_long_double': 'N', + 'string': 'z'} + +if sys.version_info[0] >= 3: + # Bytes, not Unicode strings + c2buildvalue_map['string'] = 'y' + +if using_newcore: + # c2buildvalue_map=??? + pass + +f2cmap_all = {'real': {'': 'float', '4': 'float', '8': 'double', + '12': 'long_double', '16': 'long_double'}, + 'integer': {'': 'int', '1': 'signed_char', '2': 'short', + '4': 'int', '8': 'long_long', + '-1': 'unsigned_char', '-2': 'unsigned_short', + '-4': 'unsigned', '-8': 'unsigned_long_long'}, + 'complex': {'': 'complex_float', '8': 'complex_float', + '16': 'complex_double', '24': 'complex_long_double', + '32': 'complex_long_double'}, + 'complexkind': {'': 'complex_float', '4': 'complex_float', + '8': 'complex_double', '12': 'complex_long_double', + '16': 'complex_long_double'}, + 'logical': {'': 'int', '1': 'char', '2': 'short', '4': 'int', + '8': 'long_long'}, + 'double complex': {'': 'complex_double'}, + 'double precision': {'': 'double'}, + 'byte': {'': 'char'}, + 'character': {'': 'string'} + } + +if os.path.isfile('.f2py_f2cmap'): + # User defined additions to f2cmap_all. + # .f2py_f2cmap must contain a dictionary of dictionaries, only. For + # example, {'real':{'low':'float'}} means that Fortran 'real(low)' is + # interpreted as C 'float'. This feature is useful for F90/95 users if + # they use PARAMETERSs in type specifications. + try: + outmess('Reading .f2py_f2cmap ...\n') + f = open('.f2py_f2cmap', 'r') + d = eval(f.read(), {}, {}) + f.close() + for k, d1 in list(d.items()): + for k1 in list(d1.keys()): + d1[k1.lower()] = d1[k1] + d[k.lower()] = d[k] + for k in list(d.keys()): + if k not in f2cmap_all: + f2cmap_all[k] = {} + for k1 in list(d[k].keys()): + if d[k][k1] in c2py_map: + if k1 in f2cmap_all[k]: + outmess( + "\tWarning: redefinition of {'%s':{'%s':'%s'->'%s'}}\n" % (k, k1, f2cmap_all[k][k1], d[k][k1])) + f2cmap_all[k][k1] = d[k][k1] + outmess('\tMapping "%s(kind=%s)" to "%s"\n' % + (k, k1, d[k][k1])) + else: + errmess("\tIgnoring map {'%s':{'%s':'%s'}}: '%s' must be in %s\n" % ( + k, k1, d[k][k1], d[k][k1], list(c2py_map.keys()))) + outmess('Successfully applied user defined changes from .f2py_f2cmap\n') + except Exception as msg: + errmess( + 'Failed to apply user defined changes from .f2py_f2cmap: %s. Skipping.\n' % (msg)) + +cformat_map = {'double': '%g', + 'float': '%g', + 'long_double': '%Lg', + 'char': '%d', + 'signed_char': '%d', + 'unsigned_char': '%hhu', + 'short': '%hd', + 'unsigned_short': '%hu', + 'int': '%d', + 'unsigned': '%u', + 'long': '%ld', + 'unsigned_long': '%lu', + 'long_long': '%ld', + 'complex_float': '(%g,%g)', + 'complex_double': '(%g,%g)', + 'complex_long_double': '(%Lg,%Lg)', + 'string': '%s', + } + +# Auxiliary functions + + +def getctype(var): + """ + Determines C type + """ + ctype = 'void' + if isfunction(var): + if 'result' in var: + a = var['result'] + else: + a = var['name'] + if a in var['vars']: + return getctype(var['vars'][a]) + else: + errmess('getctype: function %s has no return value?!\n' % a) + elif issubroutine(var): + return ctype + elif 'typespec' in var and var['typespec'].lower() in f2cmap_all: + typespec = var['typespec'].lower() + f2cmap = f2cmap_all[typespec] + ctype = f2cmap[''] # default type + if 'kindselector' in var: + if '*' in var['kindselector']: + try: + ctype = f2cmap[var['kindselector']['*']] + except KeyError: + errmess('getctype: "%s %s %s" not supported.\n' % + (var['typespec'], '*', var['kindselector']['*'])) + elif 'kind' in var['kindselector']: + if typespec + 'kind' in f2cmap_all: + f2cmap = f2cmap_all[typespec + 'kind'] + try: + ctype = f2cmap[var['kindselector']['kind']] + except KeyError: + if typespec in f2cmap_all: + f2cmap = f2cmap_all[typespec] + try: + ctype = f2cmap[str(var['kindselector']['kind'])] + except KeyError: + errmess('getctype: "%s(kind=%s)" is mapped to C "%s" (to override define dict(%s = dict(%s="")) in %s/.f2py_f2cmap file).\n' + % (typespec, var['kindselector']['kind'], ctype, + typespec, var['kindselector']['kind'], os.getcwd())) + + else: + if not isexternal(var): + errmess( + 'getctype: No C-type found in "%s", assuming void.\n' % var) + return ctype + + +def getstrlength(var): + if isstringfunction(var): + if 'result' in var: + a = var['result'] + else: + a = var['name'] + if a in var['vars']: + return getstrlength(var['vars'][a]) + else: + errmess('getstrlength: function %s has no return value?!\n' % a) + if not isstring(var): + errmess( + 'getstrlength: expected a signature of a string but got: %s\n' % (repr(var))) + len = '1' + if 'charselector' in var: + a = var['charselector'] + if '*' in a: + len = a['*'] + elif 'len' in a: + len = a['len'] + if re.match(r'\(\s*([*]|[:])\s*\)', len) or re.match(r'([*]|[:])', len): + if isintent_hide(var): + errmess('getstrlength:intent(hide): expected a string with defined length but got: %s\n' % ( + repr(var))) + len = '-1' + return len + + +def getarrdims(a, var, verbose=0): + global depargs + ret = {} + if isstring(var) and not isarray(var): + ret['dims'] = getstrlength(var) + ret['size'] = ret['dims'] + ret['rank'] = '1' + elif isscalar(var): + ret['size'] = '1' + ret['rank'] = '0' + ret['dims'] = '' + elif isarray(var): + dim = copy.copy(var['dimension']) + ret['size'] = '*'.join(dim) + try: + ret['size'] = repr(eval(ret['size'])) + except Exception: + pass + ret['dims'] = ','.join(dim) + ret['rank'] = repr(len(dim)) + ret['rank*[-1]'] = repr(len(dim) * [-1])[1:-1] + for i in range(len(dim)): # solve dim for dependencies + v = [] + if dim[i] in depargs: + v = [dim[i]] + else: + for va in depargs: + if re.match(r'.*?\b%s\b.*' % va, dim[i]): + v.append(va) + for va in v: + if depargs.index(va) > depargs.index(a): + dim[i] = '*' + break + ret['setdims'], i = '', -1 + for d in dim: + i = i + 1 + if d not in ['*', ':', '(*)', '(:)']: + ret['setdims'] = '%s#varname#_Dims[%d]=%s,' % ( + ret['setdims'], i, d) + if ret['setdims']: + ret['setdims'] = ret['setdims'][:-1] + ret['cbsetdims'], i = '', -1 + for d in var['dimension']: + i = i + 1 + if d not in ['*', ':', '(*)', '(:)']: + ret['cbsetdims'] = '%s#varname#_Dims[%d]=%s,' % ( + ret['cbsetdims'], i, d) + elif isintent_in(var): + outmess('getarrdims:warning: assumed shape array, using 0 instead of %r\n' + % (d)) + ret['cbsetdims'] = '%s#varname#_Dims[%d]=%s,' % ( + ret['cbsetdims'], i, 0) + elif verbose: + errmess( + 'getarrdims: If in call-back function: array argument %s must have bounded dimensions: got %s\n' % (repr(a), repr(d))) + if ret['cbsetdims']: + ret['cbsetdims'] = ret['cbsetdims'][:-1] +# if not isintent_c(var): +# var['dimension'].reverse() + return ret + + +def getpydocsign(a, var): + global lcb_map + if isfunction(var): + if 'result' in var: + af = var['result'] + else: + af = var['name'] + if af in var['vars']: + return getpydocsign(af, var['vars'][af]) + else: + errmess('getctype: function %s has no return value?!\n' % af) + return '', '' + sig, sigout = a, a + opt = '' + if isintent_in(var): + opt = 'input' + elif isintent_inout(var): + opt = 'in/output' + out_a = a + if isintent_out(var): + for k in var['intent']: + if k[:4] == 'out=': + out_a = k[4:] + break + init = '' + ctype = getctype(var) + + if hasinitvalue(var): + init, showinit = getinit(a, var) + init = ', optional\\n Default: %s' % showinit + if isscalar(var): + if isintent_inout(var): + sig = '%s : %s rank-0 array(%s,\'%s\')%s' % (a, opt, c2py_map[ctype], + c2pycode_map[ctype], init) + else: + sig = '%s : %s %s%s' % (a, opt, c2py_map[ctype], init) + sigout = '%s : %s' % (out_a, c2py_map[ctype]) + elif isstring(var): + if isintent_inout(var): + sig = '%s : %s rank-0 array(string(len=%s),\'c\')%s' % ( + a, opt, getstrlength(var), init) + else: + sig = '%s : %s string(len=%s)%s' % ( + a, opt, getstrlength(var), init) + sigout = '%s : string(len=%s)' % (out_a, getstrlength(var)) + elif isarray(var): + dim = var['dimension'] + rank = repr(len(dim)) + sig = '%s : %s rank-%s array(\'%s\') with bounds (%s)%s' % (a, opt, rank, + c2pycode_map[ + ctype], + ','.join(dim), init) + if a == out_a: + sigout = '%s : rank-%s array(\'%s\') with bounds (%s)'\ + % (a, rank, c2pycode_map[ctype], ','.join(dim)) + else: + sigout = '%s : rank-%s array(\'%s\') with bounds (%s) and %s storage'\ + % (out_a, rank, c2pycode_map[ctype], ','.join(dim), a) + elif isexternal(var): + ua = '' + if a in lcb_map and lcb_map[a] in lcb2_map and 'argname' in lcb2_map[lcb_map[a]]: + ua = lcb2_map[lcb_map[a]]['argname'] + if not ua == a: + ua = ' => %s' % ua + else: + ua = '' + sig = '%s : call-back function%s' % (a, ua) + sigout = sig + else: + errmess( + 'getpydocsign: Could not resolve docsignature for "%s".\\n' % a) + return sig, sigout + + +def getarrdocsign(a, var): + ctype = getctype(var) + if isstring(var) and (not isarray(var)): + sig = '%s : rank-0 array(string(len=%s),\'c\')' % (a, + getstrlength(var)) + elif isscalar(var): + sig = '%s : rank-0 array(%s,\'%s\')' % (a, c2py_map[ctype], + c2pycode_map[ctype],) + elif isarray(var): + dim = var['dimension'] + rank = repr(len(dim)) + sig = '%s : rank-%s array(\'%s\') with bounds (%s)' % (a, rank, + c2pycode_map[ + ctype], + ','.join(dim)) + return sig + + +def getinit(a, var): + if isstring(var): + init, showinit = '""', "''" + else: + init, showinit = '', '' + if hasinitvalue(var): + init = var['='] + showinit = init + if iscomplex(var) or iscomplexarray(var): + ret = {} + + try: + v = var["="] + if ',' in v: + ret['init.r'], ret['init.i'] = markoutercomma( + v[1:-1]).split('@,@') + else: + v = eval(v, {}, {}) + ret['init.r'], ret['init.i'] = str(v.real), str(v.imag) + except Exception: + raise ValueError( + 'getinit: expected complex number `(r,i)\' but got `%s\' as initial value of %r.' % (init, a)) + if isarray(var): + init = '(capi_c.r=%s,capi_c.i=%s,capi_c)' % ( + ret['init.r'], ret['init.i']) + elif isstring(var): + if not init: + init, showinit = '""', "''" + if init[0] == "'": + init = '"%s"' % (init[1:-1].replace('"', '\\"')) + if init[0] == '"': + showinit = "'%s'" % (init[1:-1]) + return init, showinit + + +def sign2map(a, var): + """ + varname,ctype,atype + init,init.r,init.i,pytype + vardebuginfo,vardebugshowvalue,varshowvalue + varrfromat + intent + """ + global lcb_map, cb_map + out_a = a + if isintent_out(var): + for k in var['intent']: + if k[:4] == 'out=': + out_a = k[4:] + break + ret = {'varname': a, 'outvarname': out_a, 'ctype': getctype(var)} + intent_flags = [] + for f, s in isintent_dict.items(): + if f(var): + intent_flags.append('F2PY_%s' % s) + if intent_flags: + # XXX: Evaluate intent_flags here. + ret['intent'] = '|'.join(intent_flags) + else: + ret['intent'] = 'F2PY_INTENT_IN' + if isarray(var): + ret['varrformat'] = 'N' + elif ret['ctype'] in c2buildvalue_map: + ret['varrformat'] = c2buildvalue_map[ret['ctype']] + else: + ret['varrformat'] = 'O' + ret['init'], ret['showinit'] = getinit(a, var) + if hasinitvalue(var) and iscomplex(var) and not isarray(var): + ret['init.r'], ret['init.i'] = markoutercomma( + ret['init'][1:-1]).split('@,@') + if isexternal(var): + ret['cbnamekey'] = a + if a in lcb_map: + ret['cbname'] = lcb_map[a] + ret['maxnofargs'] = lcb2_map[lcb_map[a]]['maxnofargs'] + ret['nofoptargs'] = lcb2_map[lcb_map[a]]['nofoptargs'] + ret['cbdocstr'] = lcb2_map[lcb_map[a]]['docstr'] + ret['cblatexdocstr'] = lcb2_map[lcb_map[a]]['latexdocstr'] + else: + ret['cbname'] = a + errmess('sign2map: Confused: external %s is not in lcb_map%s.\n' % ( + a, list(lcb_map.keys()))) + if isstring(var): + ret['length'] = getstrlength(var) + if isarray(var): + ret = dictappend(ret, getarrdims(a, var)) + dim = copy.copy(var['dimension']) + if ret['ctype'] in c2capi_map: + ret['atype'] = c2capi_map[ret['ctype']] + # Debug info + if debugcapi(var): + il = [isintent_in, 'input', isintent_out, 'output', + isintent_inout, 'inoutput', isrequired, 'required', + isoptional, 'optional', isintent_hide, 'hidden', + iscomplex, 'complex scalar', + l_and(isscalar, l_not(iscomplex)), 'scalar', + isstring, 'string', isarray, 'array', + iscomplexarray, 'complex array', isstringarray, 'string array', + iscomplexfunction, 'complex function', + l_and(isfunction, l_not(iscomplexfunction)), 'function', + isexternal, 'callback', + isintent_callback, 'callback', + isintent_aux, 'auxiliary', + ] + rl = [] + for i in range(0, len(il), 2): + if il[i](var): + rl.append(il[i + 1]) + if isstring(var): + rl.append('slen(%s)=%s' % (a, ret['length'])) + if isarray(var): + ddim = ','.join( + map(lambda x, y: '%s|%s' % (x, y), var['dimension'], dim)) + rl.append('dims(%s)' % ddim) + if isexternal(var): + ret['vardebuginfo'] = 'debug-capi:%s=>%s:%s' % ( + a, ret['cbname'], ','.join(rl)) + else: + ret['vardebuginfo'] = 'debug-capi:%s %s=%s:%s' % ( + ret['ctype'], a, ret['showinit'], ','.join(rl)) + if isscalar(var): + if ret['ctype'] in cformat_map: + ret['vardebugshowvalue'] = 'debug-capi:%s=%s' % ( + a, cformat_map[ret['ctype']]) + if isstring(var): + ret['vardebugshowvalue'] = 'debug-capi:slen(%s)=%%d %s=\\"%%s\\"' % ( + a, a) + if isexternal(var): + ret['vardebugshowvalue'] = 'debug-capi:%s=%%p' % (a) + if ret['ctype'] in cformat_map: + ret['varshowvalue'] = '#name#:%s=%s' % (a, cformat_map[ret['ctype']]) + ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']]) + if isstring(var): + ret['varshowvalue'] = '#name#:slen(%s)=%%d %s=\\"%%s\\"' % (a, a) + ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var) + if hasnote(var): + ret['note'] = var['note'] + return ret + + +def routsign2map(rout): + """ + name,NAME,begintitle,endtitle + rname,ctype,rformat + routdebugshowvalue + """ + global lcb_map + name = rout['name'] + fname = getfortranname(rout) + ret = {'name': name, + 'texname': name.replace('_', '\\_'), + 'name_lower': name.lower(), + 'NAME': name.upper(), + 'begintitle': gentitle(name), + 'endtitle': gentitle('end of %s' % name), + 'fortranname': fname, + 'FORTRANNAME': fname.upper(), + 'callstatement': getcallstatement(rout) or '', + 'usercode': getusercode(rout) or '', + 'usercode1': getusercode1(rout) or '', + } + if '_' in fname: + ret['F_FUNC'] = 'F_FUNC_US' + else: + ret['F_FUNC'] = 'F_FUNC' + if '_' in name: + ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC_US' + else: + ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC' + lcb_map = {} + if 'use' in rout: + for u in rout['use'].keys(): + if u in cb_rules.cb_map: + for un in cb_rules.cb_map[u]: + ln = un[0] + if 'map' in rout['use'][u]: + for k in rout['use'][u]['map'].keys(): + if rout['use'][u]['map'][k] == un[0]: + ln = k + break + lcb_map[ln] = un[1] + elif 'externals' in rout and rout['externals']: + errmess('routsign2map: Confused: function %s has externals %s but no "use" statement.\n' % ( + ret['name'], repr(rout['externals']))) + ret['callprotoargument'] = getcallprotoargument(rout, lcb_map) or '' + if isfunction(rout): + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + ret['rname'] = a + ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, rout) + ret['ctype'] = getctype(rout['vars'][a]) + if hasresultnote(rout): + ret['resultnote'] = rout['vars'][a]['note'] + rout['vars'][a]['note'] = ['See elsewhere.'] + if ret['ctype'] in c2buildvalue_map: + ret['rformat'] = c2buildvalue_map[ret['ctype']] + else: + ret['rformat'] = 'O' + errmess('routsign2map: no c2buildvalue key for type %s\n' % + (repr(ret['ctype']))) + if debugcapi(rout): + if ret['ctype'] in cformat_map: + ret['routdebugshowvalue'] = 'debug-capi:%s=%s' % ( + a, cformat_map[ret['ctype']]) + if isstringfunction(rout): + ret['routdebugshowvalue'] = 'debug-capi:slen(%s)=%%d %s=\\"%%s\\"' % ( + a, a) + if isstringfunction(rout): + ret['rlength'] = getstrlength(rout['vars'][a]) + if ret['rlength'] == '-1': + errmess('routsign2map: expected explicit specification of the length of the string returned by the fortran function %s; taking 10.\n' % ( + repr(rout['name']))) + ret['rlength'] = '10' + if hasnote(rout): + ret['note'] = rout['note'] + rout['note'] = ['See elsewhere.'] + return ret + + +def modsign2map(m): + """ + modulename + """ + if ismodule(m): + ret = {'f90modulename': m['name'], + 'F90MODULENAME': m['name'].upper(), + 'texf90modulename': m['name'].replace('_', '\\_')} + else: + ret = {'modulename': m['name'], + 'MODULENAME': m['name'].upper(), + 'texmodulename': m['name'].replace('_', '\\_')} + ret['restdoc'] = getrestdoc(m) or [] + if hasnote(m): + ret['note'] = m['note'] + ret['usercode'] = getusercode(m) or '' + ret['usercode1'] = getusercode1(m) or '' + if m['body']: + ret['interface_usercode'] = getusercode(m['body'][0]) or '' + else: + ret['interface_usercode'] = '' + ret['pymethoddef'] = getpymethoddef(m) or '' + if 'coutput' in m: + ret['coutput'] = m['coutput'] + if 'f2py_wrapper_output' in m: + ret['f2py_wrapper_output'] = m['f2py_wrapper_output'] + return ret + + +def cb_sign2map(a, var, index=None): + ret = {'varname': a} + ret['varname_i'] = ret['varname'] + ret['ctype'] = getctype(var) + if ret['ctype'] in c2capi_map: + ret['atype'] = c2capi_map[ret['ctype']] + if ret['ctype'] in cformat_map: + ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']]) + if isarray(var): + ret = dictappend(ret, getarrdims(a, var)) + ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var) + if hasnote(var): + ret['note'] = var['note'] + var['note'] = ['See elsewhere.'] + return ret + + +def cb_routsign2map(rout, um): + """ + name,begintitle,endtitle,argname + ctype,rctype,maxnofargs,nofoptargs,returncptr + """ + ret = {'name': 'cb_%s_in_%s' % (rout['name'], um), + 'returncptr': ''} + if isintent_callback(rout): + if '_' in rout['name']: + F_FUNC = 'F_FUNC_US' + else: + F_FUNC = 'F_FUNC' + ret['callbackname'] = '%s(%s,%s)' \ + % (F_FUNC, + rout['name'].lower(), + rout['name'].upper(), + ) + ret['static'] = 'extern' + else: + ret['callbackname'] = ret['name'] + ret['static'] = 'static' + ret['argname'] = rout['name'] + ret['begintitle'] = gentitle(ret['name']) + ret['endtitle'] = gentitle('end of %s' % ret['name']) + ret['ctype'] = getctype(rout) + ret['rctype'] = 'void' + if ret['ctype'] == 'string': + ret['rctype'] = 'void' + else: + ret['rctype'] = ret['ctype'] + if ret['rctype'] != 'void': + if iscomplexfunction(rout): + ret['returncptr'] = """ +#ifdef F2PY_CB_RETURNCOMPLEX +return_value= +#endif +""" + else: + ret['returncptr'] = 'return_value=' + if ret['ctype'] in cformat_map: + ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']]) + if isstringfunction(rout): + ret['strlength'] = getstrlength(rout) + if isfunction(rout): + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if hasnote(rout['vars'][a]): + ret['note'] = rout['vars'][a]['note'] + rout['vars'][a]['note'] = ['See elsewhere.'] + ret['rname'] = a + ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, rout) + if iscomplexfunction(rout): + ret['rctype'] = """ +#ifdef F2PY_CB_RETURNCOMPLEX +#ctype# +#else +void +#endif +""" + else: + if hasnote(rout): + ret['note'] = rout['note'] + rout['note'] = ['See elsewhere.'] + nofargs = 0 + nofoptargs = 0 + if 'args' in rout and 'vars' in rout: + for a in rout['args']: + var = rout['vars'][a] + if l_or(isintent_in, isintent_inout)(var): + nofargs = nofargs + 1 + if isoptional(var): + nofoptargs = nofoptargs + 1 + ret['maxnofargs'] = repr(nofargs) + ret['nofoptargs'] = repr(nofoptargs) + if hasnote(rout) and isfunction(rout) and 'result' in rout: + ret['routnote'] = rout['note'] + rout['note'] = ['See elsewhere.'] + return ret + + +def common_sign2map(a, var): # obsolute + ret = {'varname': a, 'ctype': getctype(var)} + if isstringarray(var): + ret['ctype'] = 'char' + if ret['ctype'] in c2capi_map: + ret['atype'] = c2capi_map[ret['ctype']] + if ret['ctype'] in cformat_map: + ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']]) + if isarray(var): + ret = dictappend(ret, getarrdims(a, var)) + elif isstring(var): + ret['size'] = getstrlength(var) + ret['rank'] = '1' + ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var) + if hasnote(var): + ret['note'] = var['note'] + var['note'] = ['See elsewhere.'] + # for strings this returns 0-rank but actually is 1-rank + ret['arrdocstr'] = getarrdocsign(a, var) + return ret diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/capi_maps.pyc b/project/venv/lib/python2.7/site-packages/numpy/f2py/capi_maps.pyc new file mode 100644 index 0000000..2924e6b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/f2py/capi_maps.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/cb_rules.py b/project/venv/lib/python2.7/site-packages/numpy/f2py/cb_rules.py new file mode 100644 index 0000000..183d7c2 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/cb_rules.py @@ -0,0 +1,578 @@ +#!/usr/bin/env python +""" + +Build call-back mechanism for f2py2e. + +Copyright 2000 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +$Date: 2005/07/20 11:27:58 $ +Pearu Peterson + +""" +from __future__ import division, absolute_import, print_function + +from . import __version__ +from .auxfuncs import ( + applyrules, debugcapi, dictappend, errmess, getargs, hasnote, isarray, + iscomplex, iscomplexarray, iscomplexfunction, isfunction, isintent_c, + isintent_hide, isintent_in, isintent_inout, isintent_nothide, + isintent_out, isoptional, isrequired, isscalar, isstring, + isstringfunction, issubroutine, l_and, l_not, l_or, outmess, replace, + stripcomma, throw_error +) +from . import cfuncs + +f2py_version = __version__.version + + +################## Rules for callback function ############## + +cb_routine_rules = { + 'cbtypedefs': 'typedef #rctype#(*#name#_typedef)(#optargs_td##args_td##strarglens_td##noargs#);', + 'body': """ +#begintitle# +PyObject *#name#_capi = NULL;/*was Py_None*/ +PyTupleObject *#name#_args_capi = NULL; +int #name#_nofargs = 0; +jmp_buf #name#_jmpbuf; +/*typedef #rctype#(*#name#_typedef)(#optargs_td##args_td##strarglens_td##noargs#);*/ +#static# #rctype# #callbackname# (#optargs##args##strarglens##noargs#) { +\tPyTupleObject *capi_arglist = #name#_args_capi; +\tPyObject *capi_return = NULL; +\tPyObject *capi_tmp = NULL; +\tPyObject *capi_arglist_list = NULL; +\tint capi_j,capi_i = 0; +\tint capi_longjmp_ok = 1; +#decl# +#ifdef F2PY_REPORT_ATEXIT +f2py_cb_start_clock(); +#endif +\tCFUNCSMESS(\"cb:Call-back function #name# (maxnofargs=#maxnofargs#(-#nofoptargs#))\\n\"); +\tCFUNCSMESSPY(\"cb:#name#_capi=\",#name#_capi); +\tif (#name#_capi==NULL) { +\t\tcapi_longjmp_ok = 0; +\t\t#name#_capi = PyObject_GetAttrString(#modulename#_module,\"#argname#\"); +\t} +\tif (#name#_capi==NULL) { +\t\tPyErr_SetString(#modulename#_error,\"cb: Callback #argname# not defined (as an argument or module #modulename# attribute).\\n\"); +\t\tgoto capi_fail; +\t} +\tif (F2PyCapsule_Check(#name#_capi)) { +\t#name#_typedef #name#_cptr; +\t#name#_cptr = F2PyCapsule_AsVoidPtr(#name#_capi); +\t#returncptr#(*#name#_cptr)(#optargs_nm##args_nm##strarglens_nm#); +\t#return# +\t} +\tif (capi_arglist==NULL) { +\t\tcapi_longjmp_ok = 0; +\t\tcapi_tmp = PyObject_GetAttrString(#modulename#_module,\"#argname#_extra_args\"); +\t\tif (capi_tmp) { +\t\t\tcapi_arglist = (PyTupleObject *)PySequence_Tuple(capi_tmp); +\t\t\tif (capi_arglist==NULL) { +\t\t\t\tPyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#argname#_extra_args to tuple.\\n\"); +\t\t\t\tgoto capi_fail; +\t\t\t} +\t\t} else { +\t\t\tPyErr_Clear(); +\t\t\tcapi_arglist = (PyTupleObject *)Py_BuildValue(\"()\"); +\t\t} +\t} +\tif (capi_arglist == NULL) { +\t\tPyErr_SetString(#modulename#_error,\"Callback #argname# argument list is not set.\\n\"); +\t\tgoto capi_fail; +\t} +#setdims# +#ifdef PYPY_VERSION +#define CAPI_ARGLIST_SETITEM(idx, value) PyList_SetItem((PyObject *)capi_arglist_list, idx, value) +\tcapi_arglist_list = PySequence_List(capi_arglist); +\tif (capi_arglist_list == NULL) goto capi_fail; +#else +#define CAPI_ARGLIST_SETITEM(idx, value) PyTuple_SetItem((PyObject *)capi_arglist, idx, value) +#endif +#pyobjfrom# +#undef CAPI_ARGLIST_SETITEM +#ifdef PYPY_VERSION +\tCFUNCSMESSPY(\"cb:capi_arglist=\",capi_arglist_list); +#else +\tCFUNCSMESSPY(\"cb:capi_arglist=\",capi_arglist); +#endif +\tCFUNCSMESS(\"cb:Call-back calling Python function #argname#.\\n\"); +#ifdef F2PY_REPORT_ATEXIT +f2py_cb_start_call_clock(); +#endif +#ifdef PYPY_VERSION +\tcapi_return = PyObject_CallObject(#name#_capi,(PyObject *)capi_arglist_list); +\tPy_DECREF(capi_arglist_list); +\tcapi_arglist_list = NULL; +#else +\tcapi_return = PyObject_CallObject(#name#_capi,(PyObject *)capi_arglist); +#endif +#ifdef F2PY_REPORT_ATEXIT +f2py_cb_stop_call_clock(); +#endif +\tCFUNCSMESSPY(\"cb:capi_return=\",capi_return); +\tif (capi_return == NULL) { +\t\tfprintf(stderr,\"capi_return is NULL\\n\"); +\t\tgoto capi_fail; +\t} +\tif (capi_return == Py_None) { +\t\tPy_DECREF(capi_return); +\t\tcapi_return = Py_BuildValue(\"()\"); +\t} +\telse if (!PyTuple_Check(capi_return)) { +\t\tcapi_return = Py_BuildValue(\"(N)\",capi_return); +\t} +\tcapi_j = PyTuple_Size(capi_return); +\tcapi_i = 0; +#frompyobj# +\tCFUNCSMESS(\"cb:#name#:successful\\n\"); +\tPy_DECREF(capi_return); +#ifdef F2PY_REPORT_ATEXIT +f2py_cb_stop_clock(); +#endif +\tgoto capi_return_pt; +capi_fail: +\tfprintf(stderr,\"Call-back #name# failed.\\n\"); +\tPy_XDECREF(capi_return); +\tPy_XDECREF(capi_arglist_list); +\tif (capi_longjmp_ok) +\t\tlongjmp(#name#_jmpbuf,-1); +capi_return_pt: +\t; +#return# +} +#endtitle# +""", + 'need': ['setjmp.h', 'CFUNCSMESS'], + 'maxnofargs': '#maxnofargs#', + 'nofoptargs': '#nofoptargs#', + 'docstr': """\ +\tdef #argname#(#docsignature#): return #docreturn#\\n\\ +#docstrsigns#""", + 'latexdocstr': """ +{{}\\verb@def #argname#(#latexdocsignature#): return #docreturn#@{}} +#routnote# + +#latexdocstrsigns#""", + 'docstrshort': 'def #argname#(#docsignature#): return #docreturn#' +} +cb_rout_rules = [ + { # Init + 'separatorsfor': {'decl': '\n', + 'args': ',', 'optargs': '', 'pyobjfrom': '\n', 'freemem': '\n', + 'args_td': ',', 'optargs_td': '', + 'args_nm': ',', 'optargs_nm': '', + 'frompyobj': '\n', 'setdims': '\n', + 'docstrsigns': '\\n"\n"', + 'latexdocstrsigns': '\n', + 'latexdocstrreq': '\n', 'latexdocstropt': '\n', + 'latexdocstrout': '\n', 'latexdocstrcbs': '\n', + }, + 'decl': '/*decl*/', 'pyobjfrom': '/*pyobjfrom*/', 'frompyobj': '/*frompyobj*/', + 'args': [], 'optargs': '', 'return': '', 'strarglens': '', 'freemem': '/*freemem*/', + 'args_td': [], 'optargs_td': '', 'strarglens_td': '', + 'args_nm': [], 'optargs_nm': '', 'strarglens_nm': '', + 'noargs': '', + 'setdims': '/*setdims*/', + 'docstrsigns': '', 'latexdocstrsigns': '', + 'docstrreq': '\tRequired arguments:', + 'docstropt': '\tOptional arguments:', + 'docstrout': '\tReturn objects:', + 'docstrcbs': '\tCall-back functions:', + 'docreturn': '', 'docsign': '', 'docsignopt': '', + 'latexdocstrreq': '\\noindent Required arguments:', + 'latexdocstropt': '\\noindent Optional arguments:', + 'latexdocstrout': '\\noindent Return objects:', + 'latexdocstrcbs': '\\noindent Call-back functions:', + 'routnote': {hasnote: '--- #note#', l_not(hasnote): ''}, + }, { # Function + 'decl': '\t#ctype# return_value;', + 'frompyobj': [{debugcapi: '\tCFUNCSMESS("cb:Getting return_value->");'}, + '\tif (capi_j>capi_i)\n\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,&return_value,#ctype#,"#ctype#_from_pyobj failed in converting return_value of call-back function #name# to C #ctype#\\n");', + {debugcapi: + '\tfprintf(stderr,"#showvalueformat#.\\n",return_value);'} + ], + 'need': ['#ctype#_from_pyobj', {debugcapi: 'CFUNCSMESS'}, 'GETSCALARFROMPYTUPLE'], + 'return': '\treturn return_value;', + '_check': l_and(isfunction, l_not(isstringfunction), l_not(iscomplexfunction)) + }, + { # String function + 'pyobjfrom': {debugcapi: '\tfprintf(stderr,"debug-capi:cb:#name#:%d:\\n",return_value_len);'}, + 'args': '#ctype# return_value,int return_value_len', + 'args_nm': 'return_value,&return_value_len', + 'args_td': '#ctype# ,int', + 'frompyobj': [{debugcapi: '\tCFUNCSMESS("cb:Getting return_value->\\"");'}, + """\tif (capi_j>capi_i) +\t\tGETSTRFROMPYTUPLE(capi_return,capi_i++,return_value,return_value_len);""", + {debugcapi: + '\tfprintf(stderr,"#showvalueformat#\\".\\n",return_value);'} + ], + 'need': ['#ctype#_from_pyobj', {debugcapi: 'CFUNCSMESS'}, + 'string.h', 'GETSTRFROMPYTUPLE'], + 'return': 'return;', + '_check': isstringfunction + }, + { # Complex function + 'optargs': """ +#ifndef F2PY_CB_RETURNCOMPLEX +#ctype# *return_value +#endif +""", + 'optargs_nm': """ +#ifndef F2PY_CB_RETURNCOMPLEX +return_value +#endif +""", + 'optargs_td': """ +#ifndef F2PY_CB_RETURNCOMPLEX +#ctype# * +#endif +""", + 'decl': """ +#ifdef F2PY_CB_RETURNCOMPLEX +\t#ctype# return_value; +#endif +""", + 'frompyobj': [{debugcapi: '\tCFUNCSMESS("cb:Getting return_value->");'}, + """\ +\tif (capi_j>capi_i) +#ifdef F2PY_CB_RETURNCOMPLEX +\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,&return_value,#ctype#,\"#ctype#_from_pyobj failed in converting return_value of call-back function #name# to C #ctype#\\n\"); +#else +\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,return_value,#ctype#,\"#ctype#_from_pyobj failed in converting return_value of call-back function #name# to C #ctype#\\n\"); +#endif +""", + {debugcapi: """ +#ifdef F2PY_CB_RETURNCOMPLEX +\tfprintf(stderr,\"#showvalueformat#.\\n\",(return_value).r,(return_value).i); +#else +\tfprintf(stderr,\"#showvalueformat#.\\n\",(*return_value).r,(*return_value).i); +#endif + +"""} + ], + 'return': """ +#ifdef F2PY_CB_RETURNCOMPLEX +\treturn return_value; +#else +\treturn; +#endif +""", + 'need': ['#ctype#_from_pyobj', {debugcapi: 'CFUNCSMESS'}, + 'string.h', 'GETSCALARFROMPYTUPLE', '#ctype#'], + '_check': iscomplexfunction + }, + {'docstrout': '\t\t#pydocsignout#', + 'latexdocstrout': ['\\item[]{{}\\verb@#pydocsignout#@{}}', + {hasnote: '--- #note#'}], + 'docreturn': '#rname#,', + '_check': isfunction}, + {'_check': issubroutine, 'return': 'return;'} +] + +cb_arg_rules = [ + { # Doc + 'docstropt': {l_and(isoptional, isintent_nothide): '\t\t#pydocsign#'}, + 'docstrreq': {l_and(isrequired, isintent_nothide): '\t\t#pydocsign#'}, + 'docstrout': {isintent_out: '\t\t#pydocsignout#'}, + 'latexdocstropt': {l_and(isoptional, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}', + {hasnote: '--- #note#'}]}, + 'latexdocstrreq': {l_and(isrequired, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}', + {hasnote: '--- #note#'}]}, + 'latexdocstrout': {isintent_out: ['\\item[]{{}\\verb@#pydocsignout#@{}}', + {l_and(hasnote, isintent_hide): '--- #note#', + l_and(hasnote, isintent_nothide): '--- See above.'}]}, + 'docsign': {l_and(isrequired, isintent_nothide): '#varname#,'}, + 'docsignopt': {l_and(isoptional, isintent_nothide): '#varname#,'}, + 'depend': '' + }, + { + 'args': { + l_and(isscalar, isintent_c): '#ctype# #varname_i#', + l_and(isscalar, l_not(isintent_c)): '#ctype# *#varname_i#_cb_capi', + isarray: '#ctype# *#varname_i#', + isstring: '#ctype# #varname_i#' + }, + 'args_nm': { + l_and(isscalar, isintent_c): '#varname_i#', + l_and(isscalar, l_not(isintent_c)): '#varname_i#_cb_capi', + isarray: '#varname_i#', + isstring: '#varname_i#' + }, + 'args_td': { + l_and(isscalar, isintent_c): '#ctype#', + l_and(isscalar, l_not(isintent_c)): '#ctype# *', + isarray: '#ctype# *', + isstring: '#ctype#' + }, + # untested with multiple args + 'strarglens': {isstring: ',int #varname_i#_cb_len'}, + 'strarglens_td': {isstring: ',int'}, # untested with multiple args + # untested with multiple args + 'strarglens_nm': {isstring: ',#varname_i#_cb_len'}, + }, + { # Scalars + 'decl': {l_not(isintent_c): '\t#ctype# #varname_i#=(*#varname_i#_cb_capi);'}, + 'error': {l_and(isintent_c, isintent_out, + throw_error('intent(c,out) is forbidden for callback scalar arguments')): + ''}, + 'frompyobj': [{debugcapi: '\tCFUNCSMESS("cb:Getting #varname#->");'}, + {isintent_out: + '\tif (capi_j>capi_i)\n\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,#varname_i#_cb_capi,#ctype#,"#ctype#_from_pyobj failed in converting argument #varname# of call-back function #name# to C #ctype#\\n");'}, + {l_and(debugcapi, l_and(l_not(iscomplex), isintent_c)): + '\tfprintf(stderr,"#showvalueformat#.\\n",#varname_i#);'}, + {l_and(debugcapi, l_and(l_not(iscomplex), l_not( isintent_c))): + '\tfprintf(stderr,"#showvalueformat#.\\n",*#varname_i#_cb_capi);'}, + {l_and(debugcapi, l_and(iscomplex, isintent_c)): + '\tfprintf(stderr,"#showvalueformat#.\\n",(#varname_i#).r,(#varname_i#).i);'}, + {l_and(debugcapi, l_and(iscomplex, l_not( isintent_c))): + '\tfprintf(stderr,"#showvalueformat#.\\n",(*#varname_i#_cb_capi).r,(*#varname_i#_cb_capi).i);'}, + ], + 'need': [{isintent_out: ['#ctype#_from_pyobj', 'GETSCALARFROMPYTUPLE']}, + {debugcapi: 'CFUNCSMESS'}], + '_check': isscalar + }, { + 'pyobjfrom': [{isintent_in: """\ +\tif (#name#_nofargs>capi_i) +\t\tif (CAPI_ARGLIST_SETITEM(capi_i++,pyobj_from_#ctype#1(#varname_i#))) +\t\t\tgoto capi_fail;"""}, + {isintent_inout: """\ +\tif (#name#_nofargs>capi_i) +\t\tif (CAPI_ARGLIST_SETITEM(capi_i++,pyarr_from_p_#ctype#1(#varname_i#_cb_capi))) +\t\t\tgoto capi_fail;"""}], + 'need': [{isintent_in: 'pyobj_from_#ctype#1'}, + {isintent_inout: 'pyarr_from_p_#ctype#1'}, + {iscomplex: '#ctype#'}], + '_check': l_and(isscalar, isintent_nothide), + '_optional': '' + }, { # String + 'frompyobj': [{debugcapi: '\tCFUNCSMESS("cb:Getting #varname#->\\"");'}, + """\tif (capi_j>capi_i) +\t\tGETSTRFROMPYTUPLE(capi_return,capi_i++,#varname_i#,#varname_i#_cb_len);""", + {debugcapi: + '\tfprintf(stderr,"#showvalueformat#\\":%d:.\\n",#varname_i#,#varname_i#_cb_len);'}, + ], + 'need': ['#ctype#', 'GETSTRFROMPYTUPLE', + {debugcapi: 'CFUNCSMESS'}, 'string.h'], + '_check': l_and(isstring, isintent_out) + }, { + 'pyobjfrom': [{debugcapi: '\tfprintf(stderr,"debug-capi:cb:#varname#=\\"#showvalueformat#\\":%d:\\n",#varname_i#,#varname_i#_cb_len);'}, + {isintent_in: """\ +\tif (#name#_nofargs>capi_i) +\t\tif (CAPI_ARGLIST_SETITEM(capi_i++,pyobj_from_#ctype#1size(#varname_i#,#varname_i#_cb_len))) +\t\t\tgoto capi_fail;"""}, + {isintent_inout: """\ +\tif (#name#_nofargs>capi_i) { +\t\tint #varname_i#_cb_dims[] = {#varname_i#_cb_len}; +\t\tif (CAPI_ARGLIST_SETITEM(capi_i++,pyarr_from_p_#ctype#1(#varname_i#,#varname_i#_cb_dims))) +\t\t\tgoto capi_fail; +\t}"""}], + 'need': [{isintent_in: 'pyobj_from_#ctype#1size'}, + {isintent_inout: 'pyarr_from_p_#ctype#1'}], + '_check': l_and(isstring, isintent_nothide), + '_optional': '' + }, + # Array ... + { + 'decl': '\tnpy_intp #varname_i#_Dims[#rank#] = {#rank*[-1]#};', + 'setdims': '\t#cbsetdims#;', + '_check': isarray, + '_depend': '' + }, + { + 'pyobjfrom': [{debugcapi: '\tfprintf(stderr,"debug-capi:cb:#varname#\\n");'}, + {isintent_c: """\ +\tif (#name#_nofargs>capi_i) { +\t\tint itemsize_ = #atype# == NPY_STRING ? 1 : 0; +\t\t/*XXX: Hmm, what will destroy this array??? */ +\t\tPyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type,#rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,itemsize_,NPY_ARRAY_CARRAY,NULL); +""", + l_not(isintent_c): """\ +\tif (#name#_nofargs>capi_i) { +\t\tint itemsize_ = #atype# == NPY_STRING ? 1 : 0; +\t\t/*XXX: Hmm, what will destroy this array??? */ +\t\tPyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type,#rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,itemsize_,NPY_ARRAY_FARRAY,NULL); +""", + }, + """ +\t\tif (tmp_arr==NULL) +\t\t\tgoto capi_fail; +\t\tif (CAPI_ARGLIST_SETITEM(capi_i++,(PyObject *)tmp_arr)) +\t\t\tgoto capi_fail; +}"""], + '_check': l_and(isarray, isintent_nothide, l_or(isintent_in, isintent_inout)), + '_optional': '', + }, { + 'frompyobj': [{debugcapi: '\tCFUNCSMESS("cb:Getting #varname#->");'}, + """\tif (capi_j>capi_i) { +\t\tPyArrayObject *rv_cb_arr = NULL; +\t\tif ((capi_tmp = PyTuple_GetItem(capi_return,capi_i++))==NULL) goto capi_fail; +\t\trv_cb_arr = array_from_pyobj(#atype#,#varname_i#_Dims,#rank#,F2PY_INTENT_IN""", + {isintent_c: '|F2PY_INTENT_C'}, + """,capi_tmp); +\t\tif (rv_cb_arr == NULL) { +\t\t\tfprintf(stderr,\"rv_cb_arr is NULL\\n\"); +\t\t\tgoto capi_fail; +\t\t} +\t\tMEMCOPY(#varname_i#,PyArray_DATA(rv_cb_arr),PyArray_NBYTES(rv_cb_arr)); +\t\tif (capi_tmp != (PyObject *)rv_cb_arr) { +\t\t\tPy_DECREF(rv_cb_arr); +\t\t} +\t}""", + {debugcapi: '\tfprintf(stderr,"<-.\\n");'}, + ], + 'need': ['MEMCOPY', {iscomplexarray: '#ctype#'}], + '_check': l_and(isarray, isintent_out) + }, { + 'docreturn': '#varname#,', + '_check': isintent_out + } +] + +################## Build call-back module ############# +cb_map = {} + + +def buildcallbacks(m): + global cb_map + cb_map[m['name']] = [] + for bi in m['body']: + if bi['block'] == 'interface': + for b in bi['body']: + if b: + buildcallback(b, m['name']) + else: + errmess('warning: empty body for %s\n' % (m['name'])) + + +def buildcallback(rout, um): + global cb_map + from . import capi_maps + + outmess('\tConstructing call-back function "cb_%s_in_%s"\n' % + (rout['name'], um)) + args, depargs = getargs(rout) + capi_maps.depargs = depargs + var = rout['vars'] + vrd = capi_maps.cb_routsign2map(rout, um) + rd = dictappend({}, vrd) + cb_map[um].append([rout['name'], rd['name']]) + for r in cb_rout_rules: + if ('_check' in r and r['_check'](rout)) or ('_check' not in r): + ar = applyrules(r, vrd, rout) + rd = dictappend(rd, ar) + savevrd = {} + for i, a in enumerate(args): + vrd = capi_maps.cb_sign2map(a, var[a], index=i) + savevrd[a] = vrd + for r in cb_arg_rules: + if '_depend' in r: + continue + if '_optional' in r and isoptional(var[a]): + continue + if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): + ar = applyrules(r, vrd, var[a]) + rd = dictappend(rd, ar) + if '_break' in r: + break + for a in args: + vrd = savevrd[a] + for r in cb_arg_rules: + if '_depend' in r: + continue + if ('_optional' not in r) or ('_optional' in r and isrequired(var[a])): + continue + if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): + ar = applyrules(r, vrd, var[a]) + rd = dictappend(rd, ar) + if '_break' in r: + break + for a in depargs: + vrd = savevrd[a] + for r in cb_arg_rules: + if '_depend' not in r: + continue + if '_optional' in r: + continue + if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): + ar = applyrules(r, vrd, var[a]) + rd = dictappend(rd, ar) + if '_break' in r: + break + if 'args' in rd and 'optargs' in rd: + if isinstance(rd['optargs'], list): + rd['optargs'] = rd['optargs'] + [""" +#ifndef F2PY_CB_RETURNCOMPLEX +, +#endif +"""] + rd['optargs_nm'] = rd['optargs_nm'] + [""" +#ifndef F2PY_CB_RETURNCOMPLEX +, +#endif +"""] + rd['optargs_td'] = rd['optargs_td'] + [""" +#ifndef F2PY_CB_RETURNCOMPLEX +, +#endif +"""] + if isinstance(rd['docreturn'], list): + rd['docreturn'] = stripcomma( + replace('#docreturn#', {'docreturn': rd['docreturn']})) + optargs = stripcomma(replace('#docsignopt#', + {'docsignopt': rd['docsignopt']} + )) + if optargs == '': + rd['docsignature'] = stripcomma( + replace('#docsign#', {'docsign': rd['docsign']})) + else: + rd['docsignature'] = replace('#docsign#[#docsignopt#]', + {'docsign': rd['docsign'], + 'docsignopt': optargs, + }) + rd['latexdocsignature'] = rd['docsignature'].replace('_', '\\_') + rd['latexdocsignature'] = rd['latexdocsignature'].replace(',', ', ') + rd['docstrsigns'] = [] + rd['latexdocstrsigns'] = [] + for k in ['docstrreq', 'docstropt', 'docstrout', 'docstrcbs']: + if k in rd and isinstance(rd[k], list): + rd['docstrsigns'] = rd['docstrsigns'] + rd[k] + k = 'latex' + k + if k in rd and isinstance(rd[k], list): + rd['latexdocstrsigns'] = rd['latexdocstrsigns'] + rd[k][0:1] +\ + ['\\begin{description}'] + rd[k][1:] +\ + ['\\end{description}'] + if 'args' not in rd: + rd['args'] = '' + rd['args_td'] = '' + rd['args_nm'] = '' + if not (rd.get('args') or rd.get('optargs') or rd.get('strarglens')): + rd['noargs'] = 'void' + + ar = applyrules(cb_routine_rules, rd) + cfuncs.callbacks[rd['name']] = ar['body'] + if isinstance(ar['need'], str): + ar['need'] = [ar['need']] + + if 'need' in rd: + for t in cfuncs.typedefs.keys(): + if t in rd['need']: + ar['need'].append(t) + + cfuncs.typedefs_generated[rd['name'] + '_typedef'] = ar['cbtypedefs'] + ar['need'].append(rd['name'] + '_typedef') + cfuncs.needs[rd['name']] = ar['need'] + + capi_maps.lcb2_map[rd['name']] = {'maxnofargs': ar['maxnofargs'], + 'nofoptargs': ar['nofoptargs'], + 'docstr': ar['docstr'], + 'latexdocstr': ar['latexdocstr'], + 'argname': rd['argname'] + } + outmess('\t %s\n' % (ar['docstrshort'])) + return +################## Build call-back function ############# diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/cb_rules.pyc b/project/venv/lib/python2.7/site-packages/numpy/f2py/cb_rules.pyc new file mode 100644 index 0000000..bfee393 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/f2py/cb_rules.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/cfuncs.py b/project/venv/lib/python2.7/site-packages/numpy/f2py/cfuncs.py new file mode 100644 index 0000000..d59b630 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/cfuncs.py @@ -0,0 +1,1262 @@ +#!/usr/bin/env python +""" + +C declarations, CPP macros, and C functions for f2py2e. +Only required declarations/macros/functions will be used. + +Copyright 1999,2000 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +$Date: 2005/05/06 11:42:34 $ +Pearu Peterson + +""" +from __future__ import division, absolute_import, print_function + +import sys +import copy + +from . import __version__ + +f2py_version = __version__.version +errmess = sys.stderr.write + +##################### Definitions ################## + +outneeds = {'includes0': [], 'includes': [], 'typedefs': [], 'typedefs_generated': [], + 'userincludes': [], + 'cppmacros': [], 'cfuncs': [], 'callbacks': [], 'f90modhooks': [], + 'commonhooks': []} +needs = {} +includes0 = {'includes0': '/*need_includes0*/'} +includes = {'includes': '/*need_includes*/'} +userincludes = {'userincludes': '/*need_userincludes*/'} +typedefs = {'typedefs': '/*need_typedefs*/'} +typedefs_generated = {'typedefs_generated': '/*need_typedefs_generated*/'} +cppmacros = {'cppmacros': '/*need_cppmacros*/'} +cfuncs = {'cfuncs': '/*need_cfuncs*/'} +callbacks = {'callbacks': '/*need_callbacks*/'} +f90modhooks = {'f90modhooks': '/*need_f90modhooks*/', + 'initf90modhooksstatic': '/*initf90modhooksstatic*/', + 'initf90modhooksdynamic': '/*initf90modhooksdynamic*/', + } +commonhooks = {'commonhooks': '/*need_commonhooks*/', + 'initcommonhooks': '/*need_initcommonhooks*/', + } + +############ Includes ################### + +includes0['math.h'] = '#include ' +includes0['string.h'] = '#include ' +includes0['setjmp.h'] = '#include ' + +includes['Python.h'] = '#include "Python.h"' +needs['arrayobject.h'] = ['Python.h'] +includes['arrayobject.h'] = '''#define PY_ARRAY_UNIQUE_SYMBOL PyArray_API +#include "arrayobject.h"''' + +includes['arrayobject.h'] = '#include "fortranobject.h"' +includes['stdarg.h'] = '#include ' + +############# Type definitions ############### + +typedefs['unsigned_char'] = 'typedef unsigned char unsigned_char;' +typedefs['unsigned_short'] = 'typedef unsigned short unsigned_short;' +typedefs['unsigned_long'] = 'typedef unsigned long unsigned_long;' +typedefs['signed_char'] = 'typedef signed char signed_char;' +typedefs['long_long'] = """\ +#ifdef _WIN32 +typedef __int64 long_long; +#else +typedef long long long_long; +typedef unsigned long long unsigned_long_long; +#endif +""" +typedefs['unsigned_long_long'] = """\ +#ifdef _WIN32 +typedef __uint64 long_long; +#else +typedef unsigned long long unsigned_long_long; +#endif +""" +typedefs['long_double'] = """\ +#ifndef _LONG_DOUBLE +typedef long double long_double; +#endif +""" +typedefs[ + 'complex_long_double'] = 'typedef struct {long double r,i;} complex_long_double;' +typedefs['complex_float'] = 'typedef struct {float r,i;} complex_float;' +typedefs['complex_double'] = 'typedef struct {double r,i;} complex_double;' +typedefs['string'] = """typedef char * string;""" + + +############### CPP macros #################### +cppmacros['CFUNCSMESS'] = """\ +#ifdef DEBUGCFUNCS +#define CFUNCSMESS(mess) fprintf(stderr,\"debug-capi:\"mess); +#define CFUNCSMESSPY(mess,obj) CFUNCSMESS(mess) \\ + PyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\ + fprintf(stderr,\"\\n\"); +#else +#define CFUNCSMESS(mess) +#define CFUNCSMESSPY(mess,obj) +#endif +""" +cppmacros['F_FUNC'] = """\ +#if defined(PREPEND_FORTRAN) +#if defined(NO_APPEND_FORTRAN) +#if defined(UPPERCASE_FORTRAN) +#define F_FUNC(f,F) _##F +#else +#define F_FUNC(f,F) _##f +#endif +#else +#if defined(UPPERCASE_FORTRAN) +#define F_FUNC(f,F) _##F##_ +#else +#define F_FUNC(f,F) _##f##_ +#endif +#endif +#else +#if defined(NO_APPEND_FORTRAN) +#if defined(UPPERCASE_FORTRAN) +#define F_FUNC(f,F) F +#else +#define F_FUNC(f,F) f +#endif +#else +#if defined(UPPERCASE_FORTRAN) +#define F_FUNC(f,F) F##_ +#else +#define F_FUNC(f,F) f##_ +#endif +#endif +#endif +#if defined(UNDERSCORE_G77) +#define F_FUNC_US(f,F) F_FUNC(f##_,F##_) +#else +#define F_FUNC_US(f,F) F_FUNC(f,F) +#endif +""" +cppmacros['F_WRAPPEDFUNC'] = """\ +#if defined(PREPEND_FORTRAN) +#if defined(NO_APPEND_FORTRAN) +#if defined(UPPERCASE_FORTRAN) +#define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F +#else +#define F_WRAPPEDFUNC(f,F) _f2pywrap##f +#endif +#else +#if defined(UPPERCASE_FORTRAN) +#define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F##_ +#else +#define F_WRAPPEDFUNC(f,F) _f2pywrap##f##_ +#endif +#endif +#else +#if defined(NO_APPEND_FORTRAN) +#if defined(UPPERCASE_FORTRAN) +#define F_WRAPPEDFUNC(f,F) F2PYWRAP##F +#else +#define F_WRAPPEDFUNC(f,F) f2pywrap##f +#endif +#else +#if defined(UPPERCASE_FORTRAN) +#define F_WRAPPEDFUNC(f,F) F2PYWRAP##F##_ +#else +#define F_WRAPPEDFUNC(f,F) f2pywrap##f##_ +#endif +#endif +#endif +#if defined(UNDERSCORE_G77) +#define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f##_,F##_) +#else +#define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f,F) +#endif +""" +cppmacros['F_MODFUNC'] = """\ +#if defined(F90MOD2CCONV1) /*E.g. Compaq Fortran */ +#if defined(NO_APPEND_FORTRAN) +#define F_MODFUNCNAME(m,f) $ ## m ## $ ## f +#else +#define F_MODFUNCNAME(m,f) $ ## m ## $ ## f ## _ +#endif +#endif + +#if defined(F90MOD2CCONV2) /*E.g. IBM XL Fortran, not tested though */ +#if defined(NO_APPEND_FORTRAN) +#define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f +#else +#define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f ## _ +#endif +#endif + +#if defined(F90MOD2CCONV3) /*E.g. MIPSPro Compilers */ +#if defined(NO_APPEND_FORTRAN) +#define F_MODFUNCNAME(m,f) f ## .in. ## m +#else +#define F_MODFUNCNAME(m,f) f ## .in. ## m ## _ +#endif +#endif +/* +#if defined(UPPERCASE_FORTRAN) +#define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(M,F) +#else +#define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(m,f) +#endif +*/ + +#define F_MODFUNC(m,f) (*(f2pymodstruct##m##.##f)) +""" +cppmacros['SWAPUNSAFE'] = """\ +#define SWAP(a,b) (size_t)(a) = ((size_t)(a) ^ (size_t)(b));\\ + (size_t)(b) = ((size_t)(a) ^ (size_t)(b));\\ + (size_t)(a) = ((size_t)(a) ^ (size_t)(b)) +""" +cppmacros['SWAP'] = """\ +#define SWAP(a,b,t) {\\ + t *c;\\ + c = a;\\ + a = b;\\ + b = c;} +""" +# cppmacros['ISCONTIGUOUS']='#define ISCONTIGUOUS(m) (PyArray_FLAGS(m) & +# NPY_ARRAY_C_CONTIGUOUS)' +cppmacros['PRINTPYOBJERR'] = """\ +#define PRINTPYOBJERR(obj)\\ + fprintf(stderr,\"#modulename#.error is related to \");\\ + PyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\ + fprintf(stderr,\"\\n\"); +""" +cppmacros['MINMAX'] = """\ +#ifndef max +#define max(a,b) ((a > b) ? (a) : (b)) +#endif +#ifndef min +#define min(a,b) ((a < b) ? (a) : (b)) +#endif +#ifndef MAX +#define MAX(a,b) ((a > b) ? (a) : (b)) +#endif +#ifndef MIN +#define MIN(a,b) ((a < b) ? (a) : (b)) +#endif +""" +needs['len..'] = ['f2py_size'] +cppmacros['len..'] = """\ +#define rank(var) var ## _Rank +#define shape(var,dim) var ## _Dims[dim] +#define old_rank(var) (PyArray_NDIM((PyArrayObject *)(capi_ ## var ## _tmp))) +#define old_shape(var,dim) PyArray_DIM(((PyArrayObject *)(capi_ ## var ## _tmp)),dim) +#define fshape(var,dim) shape(var,rank(var)-dim-1) +#define len(var) shape(var,0) +#define flen(var) fshape(var,0) +#define old_size(var) PyArray_SIZE((PyArrayObject *)(capi_ ## var ## _tmp)) +/* #define index(i) capi_i ## i */ +#define slen(var) capi_ ## var ## _len +#define size(var, ...) f2py_size((PyArrayObject *)(capi_ ## var ## _tmp), ## __VA_ARGS__, -1) +""" +needs['f2py_size'] = ['stdarg.h'] +cfuncs['f2py_size'] = """\ +static int f2py_size(PyArrayObject* var, ...) +{ + npy_int sz = 0; + npy_int dim; + npy_int rank; + va_list argp; + va_start(argp, var); + dim = va_arg(argp, npy_int); + if (dim==-1) + { + sz = PyArray_SIZE(var); + } + else + { + rank = PyArray_NDIM(var); + if (dim>=1 && dim<=rank) + sz = PyArray_DIM(var, dim-1); + else + fprintf(stderr, \"f2py_size: 2nd argument value=%d fails to satisfy 1<=value<=%d. Result will be 0.\\n\", dim, rank); + } + va_end(argp); + return sz; +} +""" + +cppmacros[ + 'pyobj_from_char1'] = '#define pyobj_from_char1(v) (PyInt_FromLong(v))' +cppmacros[ + 'pyobj_from_short1'] = '#define pyobj_from_short1(v) (PyInt_FromLong(v))' +needs['pyobj_from_int1'] = ['signed_char'] +cppmacros['pyobj_from_int1'] = '#define pyobj_from_int1(v) (PyInt_FromLong(v))' +cppmacros[ + 'pyobj_from_long1'] = '#define pyobj_from_long1(v) (PyLong_FromLong(v))' +needs['pyobj_from_long_long1'] = ['long_long'] +cppmacros['pyobj_from_long_long1'] = """\ +#ifdef HAVE_LONG_LONG +#define pyobj_from_long_long1(v) (PyLong_FromLongLong(v)) +#else +#warning HAVE_LONG_LONG is not available. Redefining pyobj_from_long_long. +#define pyobj_from_long_long1(v) (PyLong_FromLong(v)) +#endif +""" +needs['pyobj_from_long_double1'] = ['long_double'] +cppmacros[ + 'pyobj_from_long_double1'] = '#define pyobj_from_long_double1(v) (PyFloat_FromDouble(v))' +cppmacros[ + 'pyobj_from_double1'] = '#define pyobj_from_double1(v) (PyFloat_FromDouble(v))' +cppmacros[ + 'pyobj_from_float1'] = '#define pyobj_from_float1(v) (PyFloat_FromDouble(v))' +needs['pyobj_from_complex_long_double1'] = ['complex_long_double'] +cppmacros[ + 'pyobj_from_complex_long_double1'] = '#define pyobj_from_complex_long_double1(v) (PyComplex_FromDoubles(v.r,v.i))' +needs['pyobj_from_complex_double1'] = ['complex_double'] +cppmacros[ + 'pyobj_from_complex_double1'] = '#define pyobj_from_complex_double1(v) (PyComplex_FromDoubles(v.r,v.i))' +needs['pyobj_from_complex_float1'] = ['complex_float'] +cppmacros[ + 'pyobj_from_complex_float1'] = '#define pyobj_from_complex_float1(v) (PyComplex_FromDoubles(v.r,v.i))' +needs['pyobj_from_string1'] = ['string'] +cppmacros[ + 'pyobj_from_string1'] = '#define pyobj_from_string1(v) (PyString_FromString((char *)v))' +needs['pyobj_from_string1size'] = ['string'] +cppmacros[ + 'pyobj_from_string1size'] = '#define pyobj_from_string1size(v,len) (PyUString_FromStringAndSize((char *)v, len))' +needs['TRYPYARRAYTEMPLATE'] = ['PRINTPYOBJERR'] +cppmacros['TRYPYARRAYTEMPLATE'] = """\ +/* New SciPy */ +#define TRYPYARRAYTEMPLATECHAR case NPY_STRING: *(char *)(PyArray_DATA(arr))=*v; break; +#define TRYPYARRAYTEMPLATELONG case NPY_LONG: *(long *)(PyArray_DATA(arr))=*v; break; +#define TRYPYARRAYTEMPLATEOBJECT case NPY_OBJECT: PyArray_SETITEM(arr,PyArray_DATA(arr),pyobj_from_ ## ctype ## 1(*v)); break; + +#define TRYPYARRAYTEMPLATE(ctype,typecode) \\ + PyArrayObject *arr = NULL;\\ + if (!obj) return -2;\\ + if (!PyArray_Check(obj)) return -1;\\ + if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\ + if (PyArray_DESCR(arr)->type==typecode) {*(ctype *)(PyArray_DATA(arr))=*v; return 1;}\\ + switch (PyArray_TYPE(arr)) {\\ + case NPY_DOUBLE: *(double *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_INT: *(int *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_LONG: *(long *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_FLOAT: *(float *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_CDOUBLE: *(double *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_CFLOAT: *(float *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_BOOL: *(npy_bool *)(PyArray_DATA(arr))=(*v!=0); break;\\ + case NPY_UBYTE: *(unsigned char *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_BYTE: *(signed char *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_SHORT: *(short *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_USHORT: *(npy_ushort *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_UINT: *(npy_uint *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_ULONG: *(npy_ulong *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_LONGLONG: *(npy_longlong *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_ULONGLONG: *(npy_ulonglong *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_LONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_CLONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_ ## ctype ## 1(*v)); break;\\ + default: return -2;\\ + };\\ + return 1 +""" + +needs['TRYCOMPLEXPYARRAYTEMPLATE'] = ['PRINTPYOBJERR'] +cppmacros['TRYCOMPLEXPYARRAYTEMPLATE'] = """\ +#define TRYCOMPLEXPYARRAYTEMPLATEOBJECT case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_complex_ ## ctype ## 1((*v))); break; +#define TRYCOMPLEXPYARRAYTEMPLATE(ctype,typecode)\\ + PyArrayObject *arr = NULL;\\ + if (!obj) return -2;\\ + if (!PyArray_Check(obj)) return -1;\\ + if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYCOMPLEXPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\ + if (PyArray_DESCR(arr)->type==typecode) {\\ + *(ctype *)(PyArray_DATA(arr))=(*v).r;\\ + *(ctype *)(PyArray_DATA(arr)+sizeof(ctype))=(*v).i;\\ + return 1;\\ + }\\ + switch (PyArray_TYPE(arr)) {\\ + case NPY_CDOUBLE: *(double *)(PyArray_DATA(arr))=(*v).r;*(double *)(PyArray_DATA(arr)+sizeof(double))=(*v).i;break;\\ + case NPY_CFLOAT: *(float *)(PyArray_DATA(arr))=(*v).r;*(float *)(PyArray_DATA(arr)+sizeof(float))=(*v).i;break;\\ + case NPY_DOUBLE: *(double *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_LONG: *(long *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_FLOAT: *(float *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_INT: *(int *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_SHORT: *(short *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_UBYTE: *(unsigned char *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_BYTE: *(signed char *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_BOOL: *(npy_bool *)(PyArray_DATA(arr))=((*v).r!=0 && (*v).i!=0); break;\\ + case NPY_USHORT: *(npy_ushort *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_UINT: *(npy_uint *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_ULONG: *(npy_ulong *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_LONGLONG: *(npy_longlong *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_ULONGLONG: *(npy_ulonglong *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_LONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_CLONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=(*v).r;*(npy_longdouble *)(PyArray_DATA(arr)+sizeof(npy_longdouble))=(*v).i;break;\\ + case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_complex_ ## ctype ## 1((*v))); break;\\ + default: return -2;\\ + };\\ + return -1; +""" +# cppmacros['NUMFROMARROBJ']="""\ +# define NUMFROMARROBJ(typenum,ctype) \\ +# if (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\ +# else arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\ +# if (arr) {\\ +# if (PyArray_TYPE(arr)==NPY_OBJECT) {\\ +# if (!ctype ## _from_pyobj(v,(PyArray_DESCR(arr)->getitem)(PyArray_DATA(arr)),\"\"))\\ +# goto capi_fail;\\ +# } else {\\ +# (PyArray_DESCR(arr)->cast[typenum])(PyArray_DATA(arr),1,(char*)v,1,1);\\ +# }\\ +# if ((PyObject *)arr != obj) { Py_DECREF(arr); }\\ +# return 1;\\ +# } +# """ +# XXX: Note that CNUMFROMARROBJ is identical with NUMFROMARROBJ +# cppmacros['CNUMFROMARROBJ']="""\ +# define CNUMFROMARROBJ(typenum,ctype) \\ +# if (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\ +# else arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\ +# if (arr) {\\ +# if (PyArray_TYPE(arr)==NPY_OBJECT) {\\ +# if (!ctype ## _from_pyobj(v,(PyArray_DESCR(arr)->getitem)(PyArray_DATA(arr)),\"\"))\\ +# goto capi_fail;\\ +# } else {\\ +# (PyArray_DESCR(arr)->cast[typenum])((void *)(PyArray_DATA(arr)),1,(void *)(v),1,1);\\ +# }\\ +# if ((PyObject *)arr != obj) { Py_DECREF(arr); }\\ +# return 1;\\ +# } +# """ + + +needs['GETSTRFROMPYTUPLE'] = ['STRINGCOPYN', 'PRINTPYOBJERR'] +cppmacros['GETSTRFROMPYTUPLE'] = """\ +#define GETSTRFROMPYTUPLE(tuple,index,str,len) {\\ + PyObject *rv_cb_str = PyTuple_GetItem((tuple),(index));\\ + if (rv_cb_str == NULL)\\ + goto capi_fail;\\ + if (PyString_Check(rv_cb_str)) {\\ + str[len-1]='\\0';\\ + STRINGCOPYN((str),PyString_AS_STRING((PyStringObject*)rv_cb_str),(len));\\ + } else {\\ + PRINTPYOBJERR(rv_cb_str);\\ + PyErr_SetString(#modulename#_error,\"string object expected\");\\ + goto capi_fail;\\ + }\\ + } +""" +cppmacros['GETSCALARFROMPYTUPLE'] = """\ +#define GETSCALARFROMPYTUPLE(tuple,index,var,ctype,mess) {\\ + if ((capi_tmp = PyTuple_GetItem((tuple),(index)))==NULL) goto capi_fail;\\ + if (!(ctype ## _from_pyobj((var),capi_tmp,mess)))\\ + goto capi_fail;\\ + } +""" + +cppmacros['FAILNULL'] = """\\ +#define FAILNULL(p) do { \\ + if ((p) == NULL) { \\ + PyErr_SetString(PyExc_MemoryError, "NULL pointer found"); \\ + goto capi_fail; \\ + } \\ +} while (0) +""" +needs['MEMCOPY'] = ['string.h', 'FAILNULL'] +cppmacros['MEMCOPY'] = """\ +#define MEMCOPY(to,from,n)\\ + do { FAILNULL(to); FAILNULL(from); (void)memcpy(to,from,n); } while (0) +""" +cppmacros['STRINGMALLOC'] = """\ +#define STRINGMALLOC(str,len)\\ + if ((str = (string)malloc(sizeof(char)*(len+1))) == NULL) {\\ + PyErr_SetString(PyExc_MemoryError, \"out of memory\");\\ + goto capi_fail;\\ + } else {\\ + (str)[len] = '\\0';\\ + } +""" +cppmacros['STRINGFREE'] = """\ +#define STRINGFREE(str) do {if (!(str == NULL)) free(str);} while (0) +""" +needs['STRINGCOPYN'] = ['string.h', 'FAILNULL'] +cppmacros['STRINGCOPYN'] = """\ +#define STRINGCOPYN(to,from,buf_size) \\ + do { \\ + int _m = (buf_size); \\ + char *_to = (to); \\ + char *_from = (from); \\ + FAILNULL(_to); FAILNULL(_from); \\ + (void)strncpy(_to, _from, sizeof(char)*_m); \\ + _to[_m-1] = '\\0'; \\ + /* Padding with spaces instead of nulls */ \\ + for (_m -= 2; _m >= 0 && _to[_m] == '\\0'; _m--) { \\ + _to[_m] = ' '; \\ + } \\ + } while (0) +""" +needs['STRINGCOPY'] = ['string.h', 'FAILNULL'] +cppmacros['STRINGCOPY'] = """\ +#define STRINGCOPY(to,from)\\ + do { FAILNULL(to); FAILNULL(from); (void)strcpy(to,from); } while (0) +""" +cppmacros['CHECKGENERIC'] = """\ +#define CHECKGENERIC(check,tcheck,name) \\ + if (!(check)) {\\ + PyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\ + /*goto capi_fail;*/\\ + } else """ +cppmacros['CHECKARRAY'] = """\ +#define CHECKARRAY(check,tcheck,name) \\ + if (!(check)) {\\ + PyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\ + /*goto capi_fail;*/\\ + } else """ +cppmacros['CHECKSTRING'] = """\ +#define CHECKSTRING(check,tcheck,name,show,var)\\ + if (!(check)) {\\ + char errstring[256];\\ + sprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, slen(var), var);\\ + PyErr_SetString(#modulename#_error, errstring);\\ + /*goto capi_fail;*/\\ + } else """ +cppmacros['CHECKSCALAR'] = """\ +#define CHECKSCALAR(check,tcheck,name,show,var)\\ + if (!(check)) {\\ + char errstring[256];\\ + sprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, var);\\ + PyErr_SetString(#modulename#_error,errstring);\\ + /*goto capi_fail;*/\\ + } else """ +# cppmacros['CHECKDIMS']="""\ +# define CHECKDIMS(dims,rank) \\ +# for (int i=0;i<(rank);i++)\\ +# if (dims[i]<0) {\\ +# fprintf(stderr,\"Unspecified array argument requires a complete dimension specification.\\n\");\\ +# goto capi_fail;\\ +# } +# """ +cppmacros[ + 'ARRSIZE'] = '#define ARRSIZE(dims,rank) (_PyArray_multiply_list(dims,rank))' +cppmacros['OLDPYNUM'] = """\ +#ifdef OLDPYNUM +#error You need to install Numeric Python version 13 or higher. Get it from http:/sourceforge.net/project/?group_id=1369 +#endif +""" +################# C functions ############### + +cfuncs['calcarrindex'] = """\ +static int calcarrindex(int *i,PyArrayObject *arr) { + int k,ii = i[0]; + for (k=1; k < PyArray_NDIM(arr); k++) + ii += (ii*(PyArray_DIM(arr,k) - 1)+i[k]); /* assuming contiguous arr */ + return ii; +}""" +cfuncs['calcarrindextr'] = """\ +static int calcarrindextr(int *i,PyArrayObject *arr) { + int k,ii = i[PyArray_NDIM(arr)-1]; + for (k=1; k < PyArray_NDIM(arr); k++) + ii += (ii*(PyArray_DIM(arr,PyArray_NDIM(arr)-k-1) - 1)+i[PyArray_NDIM(arr)-k-1]); /* assuming contiguous arr */ + return ii; +}""" +cfuncs['forcomb'] = """\ +static struct { int nd;npy_intp *d;int *i,*i_tr,tr; } forcombcache; +static int initforcomb(npy_intp *dims,int nd,int tr) { + int k; + if (dims==NULL) return 0; + if (nd<0) return 0; + forcombcache.nd = nd; + forcombcache.d = dims; + forcombcache.tr = tr; + if ((forcombcache.i = (int *)malloc(sizeof(int)*nd))==NULL) return 0; + if ((forcombcache.i_tr = (int *)malloc(sizeof(int)*nd))==NULL) return 0; + for (k=1;k= 0x03000000 + else if (PyUnicode_Check(obj)) { + tmp = PyUnicode_AsASCIIString(obj); + } + else { + PyObject *tmp2; + tmp2 = PyObject_Str(obj); + if (tmp2) { + tmp = PyUnicode_AsASCIIString(tmp2); + Py_DECREF(tmp2); + } + else { + tmp = NULL; + } + } +#else + else { + tmp = PyObject_Str(obj); + } +#endif + if (tmp == NULL) goto capi_fail; + if (*len == -1) + *len = PyString_GET_SIZE(tmp); + STRINGMALLOC(*str,*len); + STRINGCOPYN(*str,PyString_AS_STRING(tmp),*len+1); + Py_DECREF(tmp); + return 1; +capi_fail: + Py_XDECREF(tmp); + { + PyObject* err = PyErr_Occurred(); + if (err==NULL) err = #modulename#_error; + PyErr_SetString(err,errmess); + } + return 0; +} +""" +needs['char_from_pyobj'] = ['int_from_pyobj'] +cfuncs['char_from_pyobj'] = """\ +static int char_from_pyobj(char* v,PyObject *obj,const char *errmess) { + int i=0; + if (int_from_pyobj(&i,obj,errmess)) { + *v = (char)i; + return 1; + } + return 0; +} +""" +needs['signed_char_from_pyobj'] = ['int_from_pyobj', 'signed_char'] +cfuncs['signed_char_from_pyobj'] = """\ +static int signed_char_from_pyobj(signed_char* v,PyObject *obj,const char *errmess) { + int i=0; + if (int_from_pyobj(&i,obj,errmess)) { + *v = (signed_char)i; + return 1; + } + return 0; +} +""" +needs['short_from_pyobj'] = ['int_from_pyobj'] +cfuncs['short_from_pyobj'] = """\ +static int short_from_pyobj(short* v,PyObject *obj,const char *errmess) { + int i=0; + if (int_from_pyobj(&i,obj,errmess)) { + *v = (short)i; + return 1; + } + return 0; +} +""" +cfuncs['int_from_pyobj'] = """\ +static int int_from_pyobj(int* v,PyObject *obj,const char *errmess) { + PyObject* tmp = NULL; + if (PyInt_Check(obj)) { + *v = (int)PyInt_AS_LONG(obj); + return 1; + } + tmp = PyNumber_Int(obj); + if (tmp) { + *v = PyInt_AS_LONG(tmp); + Py_DECREF(tmp); + return 1; + } + if (PyComplex_Check(obj)) + tmp = PyObject_GetAttrString(obj,\"real\"); + else if (PyString_Check(obj) || PyUnicode_Check(obj)) + /*pass*/; + else if (PySequence_Check(obj)) + tmp = PySequence_GetItem(obj,0); + if (tmp) { + PyErr_Clear(); + if (int_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} + Py_DECREF(tmp); + } + { + PyObject* err = PyErr_Occurred(); + if (err==NULL) err = #modulename#_error; + PyErr_SetString(err,errmess); + } + return 0; +} +""" +cfuncs['long_from_pyobj'] = """\ +static int long_from_pyobj(long* v,PyObject *obj,const char *errmess) { + PyObject* tmp = NULL; + if (PyInt_Check(obj)) { + *v = PyInt_AS_LONG(obj); + return 1; + } + tmp = PyNumber_Int(obj); + if (tmp) { + *v = PyInt_AS_LONG(tmp); + Py_DECREF(tmp); + return 1; + } + if (PyComplex_Check(obj)) + tmp = PyObject_GetAttrString(obj,\"real\"); + else if (PyString_Check(obj) || PyUnicode_Check(obj)) + /*pass*/; + else if (PySequence_Check(obj)) + tmp = PySequence_GetItem(obj,0); + if (tmp) { + PyErr_Clear(); + if (long_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} + Py_DECREF(tmp); + } + { + PyObject* err = PyErr_Occurred(); + if (err==NULL) err = #modulename#_error; + PyErr_SetString(err,errmess); + } + return 0; +} +""" +needs['long_long_from_pyobj'] = ['long_long'] +cfuncs['long_long_from_pyobj'] = """\ +static int long_long_from_pyobj(long_long* v,PyObject *obj,const char *errmess) { + PyObject* tmp = NULL; + if (PyLong_Check(obj)) { + *v = PyLong_AsLongLong(obj); + return (!PyErr_Occurred()); + } + if (PyInt_Check(obj)) { + *v = (long_long)PyInt_AS_LONG(obj); + return 1; + } + tmp = PyNumber_Long(obj); + if (tmp) { + *v = PyLong_AsLongLong(tmp); + Py_DECREF(tmp); + return (!PyErr_Occurred()); + } + if (PyComplex_Check(obj)) + tmp = PyObject_GetAttrString(obj,\"real\"); + else if (PyString_Check(obj) || PyUnicode_Check(obj)) + /*pass*/; + else if (PySequence_Check(obj)) + tmp = PySequence_GetItem(obj,0); + if (tmp) { + PyErr_Clear(); + if (long_long_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} + Py_DECREF(tmp); + } + { + PyObject* err = PyErr_Occurred(); + if (err==NULL) err = #modulename#_error; + PyErr_SetString(err,errmess); + } + return 0; +} +""" +needs['long_double_from_pyobj'] = ['double_from_pyobj', 'long_double'] +cfuncs['long_double_from_pyobj'] = """\ +static int long_double_from_pyobj(long_double* v,PyObject *obj,const char *errmess) { + double d=0; + if (PyArray_CheckScalar(obj)){ + if PyArray_IsScalar(obj, LongDouble) { + PyArray_ScalarAsCtype(obj, v); + return 1; + } + else if (PyArray_Check(obj) && PyArray_TYPE(obj)==NPY_LONGDOUBLE) { + (*v) = *((npy_longdouble *)PyArray_DATA(obj)); + return 1; + } + } + if (double_from_pyobj(&d,obj,errmess)) { + *v = (long_double)d; + return 1; + } + return 0; +} +""" +cfuncs['double_from_pyobj'] = """\ +static int double_from_pyobj(double* v,PyObject *obj,const char *errmess) { + PyObject* tmp = NULL; + if (PyFloat_Check(obj)) { +#ifdef __sgi + *v = PyFloat_AsDouble(obj); +#else + *v = PyFloat_AS_DOUBLE(obj); +#endif + return 1; + } + tmp = PyNumber_Float(obj); + if (tmp) { +#ifdef __sgi + *v = PyFloat_AsDouble(tmp); +#else + *v = PyFloat_AS_DOUBLE(tmp); +#endif + Py_DECREF(tmp); + return 1; + } + if (PyComplex_Check(obj)) + tmp = PyObject_GetAttrString(obj,\"real\"); + else if (PyString_Check(obj) || PyUnicode_Check(obj)) + /*pass*/; + else if (PySequence_Check(obj)) + tmp = PySequence_GetItem(obj,0); + if (tmp) { + PyErr_Clear(); + if (double_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} + Py_DECREF(tmp); + } + { + PyObject* err = PyErr_Occurred(); + if (err==NULL) err = #modulename#_error; + PyErr_SetString(err,errmess); + } + return 0; +} +""" +needs['float_from_pyobj'] = ['double_from_pyobj'] +cfuncs['float_from_pyobj'] = """\ +static int float_from_pyobj(float* v,PyObject *obj,const char *errmess) { + double d=0.0; + if (double_from_pyobj(&d,obj,errmess)) { + *v = (float)d; + return 1; + } + return 0; +} +""" +needs['complex_long_double_from_pyobj'] = ['complex_long_double', 'long_double', + 'complex_double_from_pyobj'] +cfuncs['complex_long_double_from_pyobj'] = """\ +static int complex_long_double_from_pyobj(complex_long_double* v,PyObject *obj,const char *errmess) { + complex_double cd={0.0,0.0}; + if (PyArray_CheckScalar(obj)){ + if PyArray_IsScalar(obj, CLongDouble) { + PyArray_ScalarAsCtype(obj, v); + return 1; + } + else if (PyArray_Check(obj) && PyArray_TYPE(obj)==NPY_CLONGDOUBLE) { + (*v).r = ((npy_clongdouble *)PyArray_DATA(obj))->real; + (*v).i = ((npy_clongdouble *)PyArray_DATA(obj))->imag; + return 1; + } + } + if (complex_double_from_pyobj(&cd,obj,errmess)) { + (*v).r = (long_double)cd.r; + (*v).i = (long_double)cd.i; + return 1; + } + return 0; +} +""" +needs['complex_double_from_pyobj'] = ['complex_double'] +cfuncs['complex_double_from_pyobj'] = """\ +static int complex_double_from_pyobj(complex_double* v,PyObject *obj,const char *errmess) { + Py_complex c; + if (PyComplex_Check(obj)) { + c=PyComplex_AsCComplex(obj); + (*v).r=c.real, (*v).i=c.imag; + return 1; + } + if (PyArray_IsScalar(obj, ComplexFloating)) { + if (PyArray_IsScalar(obj, CFloat)) { + npy_cfloat new; + PyArray_ScalarAsCtype(obj, &new); + (*v).r = (double)new.real; + (*v).i = (double)new.imag; + } + else if (PyArray_IsScalar(obj, CLongDouble)) { + npy_clongdouble new; + PyArray_ScalarAsCtype(obj, &new); + (*v).r = (double)new.real; + (*v).i = (double)new.imag; + } + else { /* if (PyArray_IsScalar(obj, CDouble)) */ + PyArray_ScalarAsCtype(obj, v); + } + return 1; + } + if (PyArray_CheckScalar(obj)) { /* 0-dim array or still array scalar */ + PyObject *arr; + if (PyArray_Check(obj)) { + arr = PyArray_Cast((PyArrayObject *)obj, NPY_CDOUBLE); + } + else { + arr = PyArray_FromScalar(obj, PyArray_DescrFromType(NPY_CDOUBLE)); + } + if (arr==NULL) return 0; + (*v).r = ((npy_cdouble *)PyArray_DATA(arr))->real; + (*v).i = ((npy_cdouble *)PyArray_DATA(arr))->imag; + return 1; + } + /* Python does not provide PyNumber_Complex function :-( */ + (*v).i=0.0; + if (PyFloat_Check(obj)) { +#ifdef __sgi + (*v).r = PyFloat_AsDouble(obj); +#else + (*v).r = PyFloat_AS_DOUBLE(obj); +#endif + return 1; + } + if (PyInt_Check(obj)) { + (*v).r = (double)PyInt_AS_LONG(obj); + return 1; + } + if (PyLong_Check(obj)) { + (*v).r = PyLong_AsDouble(obj); + return (!PyErr_Occurred()); + } + if (PySequence_Check(obj) && !(PyString_Check(obj) || PyUnicode_Check(obj))) { + PyObject *tmp = PySequence_GetItem(obj,0); + if (tmp) { + if (complex_double_from_pyobj(v,tmp,errmess)) { + Py_DECREF(tmp); + return 1; + } + Py_DECREF(tmp); + } + } + { + PyObject* err = PyErr_Occurred(); + if (err==NULL) + err = PyExc_TypeError; + PyErr_SetString(err,errmess); + } + return 0; +} +""" +needs['complex_float_from_pyobj'] = [ + 'complex_float', 'complex_double_from_pyobj'] +cfuncs['complex_float_from_pyobj'] = """\ +static int complex_float_from_pyobj(complex_float* v,PyObject *obj,const char *errmess) { + complex_double cd={0.0,0.0}; + if (complex_double_from_pyobj(&cd,obj,errmess)) { + (*v).r = (float)cd.r; + (*v).i = (float)cd.i; + return 1; + } + return 0; +} +""" +needs['try_pyarr_from_char'] = ['pyobj_from_char1', 'TRYPYARRAYTEMPLATE'] +cfuncs[ + 'try_pyarr_from_char'] = 'static int try_pyarr_from_char(PyObject* obj,char* v) {\n TRYPYARRAYTEMPLATE(char,\'c\');\n}\n' +needs['try_pyarr_from_signed_char'] = ['TRYPYARRAYTEMPLATE', 'unsigned_char'] +cfuncs[ + 'try_pyarr_from_unsigned_char'] = 'static int try_pyarr_from_unsigned_char(PyObject* obj,unsigned_char* v) {\n TRYPYARRAYTEMPLATE(unsigned_char,\'b\');\n}\n' +needs['try_pyarr_from_signed_char'] = ['TRYPYARRAYTEMPLATE', 'signed_char'] +cfuncs[ + 'try_pyarr_from_signed_char'] = 'static int try_pyarr_from_signed_char(PyObject* obj,signed_char* v) {\n TRYPYARRAYTEMPLATE(signed_char,\'1\');\n}\n' +needs['try_pyarr_from_short'] = ['pyobj_from_short1', 'TRYPYARRAYTEMPLATE'] +cfuncs[ + 'try_pyarr_from_short'] = 'static int try_pyarr_from_short(PyObject* obj,short* v) {\n TRYPYARRAYTEMPLATE(short,\'s\');\n}\n' +needs['try_pyarr_from_int'] = ['pyobj_from_int1', 'TRYPYARRAYTEMPLATE'] +cfuncs[ + 'try_pyarr_from_int'] = 'static int try_pyarr_from_int(PyObject* obj,int* v) {\n TRYPYARRAYTEMPLATE(int,\'i\');\n}\n' +needs['try_pyarr_from_long'] = ['pyobj_from_long1', 'TRYPYARRAYTEMPLATE'] +cfuncs[ + 'try_pyarr_from_long'] = 'static int try_pyarr_from_long(PyObject* obj,long* v) {\n TRYPYARRAYTEMPLATE(long,\'l\');\n}\n' +needs['try_pyarr_from_long_long'] = [ + 'pyobj_from_long_long1', 'TRYPYARRAYTEMPLATE', 'long_long'] +cfuncs[ + 'try_pyarr_from_long_long'] = 'static int try_pyarr_from_long_long(PyObject* obj,long_long* v) {\n TRYPYARRAYTEMPLATE(long_long,\'L\');\n}\n' +needs['try_pyarr_from_float'] = ['pyobj_from_float1', 'TRYPYARRAYTEMPLATE'] +cfuncs[ + 'try_pyarr_from_float'] = 'static int try_pyarr_from_float(PyObject* obj,float* v) {\n TRYPYARRAYTEMPLATE(float,\'f\');\n}\n' +needs['try_pyarr_from_double'] = ['pyobj_from_double1', 'TRYPYARRAYTEMPLATE'] +cfuncs[ + 'try_pyarr_from_double'] = 'static int try_pyarr_from_double(PyObject* obj,double* v) {\n TRYPYARRAYTEMPLATE(double,\'d\');\n}\n' +needs['try_pyarr_from_complex_float'] = [ + 'pyobj_from_complex_float1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_float'] +cfuncs[ + 'try_pyarr_from_complex_float'] = 'static int try_pyarr_from_complex_float(PyObject* obj,complex_float* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(float,\'F\');\n}\n' +needs['try_pyarr_from_complex_double'] = [ + 'pyobj_from_complex_double1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_double'] +cfuncs[ + 'try_pyarr_from_complex_double'] = 'static int try_pyarr_from_complex_double(PyObject* obj,complex_double* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(double,\'D\');\n}\n' + +needs['create_cb_arglist'] = ['CFUNCSMESS', 'PRINTPYOBJERR', 'MINMAX'] +cfuncs['create_cb_arglist'] = """\ +static int create_cb_arglist(PyObject* fun,PyTupleObject* xa,const int maxnofargs,const int nofoptargs,int *nofargs,PyTupleObject **args,const char *errmess) { + PyObject *tmp = NULL; + PyObject *tmp_fun = NULL; + int tot,opt,ext,siz,i,di=0; + CFUNCSMESS(\"create_cb_arglist\\n\"); + tot=opt=ext=siz=0; + /* Get the total number of arguments */ + if (PyFunction_Check(fun)) + tmp_fun = fun; + else { + di = 1; + if (PyObject_HasAttrString(fun,\"im_func\")) { + tmp_fun = PyObject_GetAttrString(fun,\"im_func\"); + } + else if (PyObject_HasAttrString(fun,\"__call__\")) { + tmp = PyObject_GetAttrString(fun,\"__call__\"); + if (PyObject_HasAttrString(tmp,\"im_func\")) + tmp_fun = PyObject_GetAttrString(tmp,\"im_func\"); + else { + tmp_fun = fun; /* built-in function */ + tot = maxnofargs; + if (xa != NULL) + tot += PyTuple_Size((PyObject *)xa); + } + Py_XDECREF(tmp); + } + else if (PyFortran_Check(fun) || PyFortran_Check1(fun)) { + tot = maxnofargs; + if (xa != NULL) + tot += PyTuple_Size((PyObject *)xa); + tmp_fun = fun; + } + else if (F2PyCapsule_Check(fun)) { + tot = maxnofargs; + if (xa != NULL) + ext = PyTuple_Size((PyObject *)xa); + if(ext>0) { + fprintf(stderr,\"extra arguments tuple cannot be used with CObject call-back\\n\"); + goto capi_fail; + } + tmp_fun = fun; + } + } +if (tmp_fun==NULL) { +fprintf(stderr,\"Call-back argument must be function|instance|instance.__call__|f2py-function but got %s.\\n\",(fun==NULL?\"NULL\":Py_TYPE(fun)->tp_name)); +goto capi_fail; +} +#if PY_VERSION_HEX >= 0x03000000 + if (PyObject_HasAttrString(tmp_fun,\"__code__\")) { + if (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"__code__\"),\"co_argcount\")) +#else + if (PyObject_HasAttrString(tmp_fun,\"func_code\")) { + if (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"func_code\"),\"co_argcount\")) +#endif + tot = PyInt_AsLong(PyObject_GetAttrString(tmp,\"co_argcount\")) - di; + Py_XDECREF(tmp); + } + /* Get the number of optional arguments */ +#if PY_VERSION_HEX >= 0x03000000 + if (PyObject_HasAttrString(tmp_fun,\"__defaults__\")) { + if (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"__defaults__\"))) +#else + if (PyObject_HasAttrString(tmp_fun,\"func_defaults\")) { + if (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"func_defaults\"))) +#endif + opt = PyTuple_Size(tmp); + Py_XDECREF(tmp); + } + /* Get the number of extra arguments */ + if (xa != NULL) + ext = PyTuple_Size((PyObject *)xa); + /* Calculate the size of call-backs argument list */ + siz = MIN(maxnofargs+ext,tot); + *nofargs = MAX(0,siz-ext); +#ifdef DEBUGCFUNCS + fprintf(stderr,\"debug-capi:create_cb_arglist:maxnofargs(-nofoptargs),tot,opt,ext,siz,nofargs=%d(-%d),%d,%d,%d,%d,%d\\n\",maxnofargs,nofoptargs,tot,opt,ext,siz,*nofargs); +#endif + if (siz 0: + if outneeds[n][0] not in needs: + out.append(outneeds[n][0]) + del outneeds[n][0] + else: + flag = 0 + for k in outneeds[n][1:]: + if k in needs[outneeds[n][0]]: + flag = 1 + break + if flag: + outneeds[n] = outneeds[n][1:] + [outneeds[n][0]] + else: + out.append(outneeds[n][0]) + del outneeds[n][0] + if saveout and (0 not in map(lambda x, y: x == y, saveout, outneeds[n])) \ + and outneeds[n] != []: + print(n, saveout) + errmess( + 'get_needs: no progress in sorting needs, probably circular dependence, skipping.\n') + out = out + saveout + break + saveout = copy.copy(outneeds[n]) + if out == []: + out = [n] + res[n] = out + return res diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/cfuncs.pyc b/project/venv/lib/python2.7/site-packages/numpy/f2py/cfuncs.pyc new file mode 100644 index 0000000..0ba9471 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/f2py/cfuncs.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/common_rules.py b/project/venv/lib/python2.7/site-packages/numpy/f2py/common_rules.py new file mode 100644 index 0000000..62c1ba2 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/common_rules.py @@ -0,0 +1,148 @@ +#!/usr/bin/env python +""" + +Build common block mechanism for f2py2e. + +Copyright 2000 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +$Date: 2005/05/06 10:57:33 $ +Pearu Peterson + +""" +from __future__ import division, absolute_import, print_function + +__version__ = "$Revision: 1.19 $"[10:-1] + +from . import __version__ +f2py_version = __version__.version + +from .auxfuncs import ( + hasbody, hascommon, hasnote, isintent_hide, outmess +) +from . import capi_maps +from . import func2subr +from .crackfortran import rmbadname + + +def findcommonblocks(block, top=1): + ret = [] + if hascommon(block): + for key, value in block['common'].items(): + vars_ = {v: block['vars'][v] for v in value} + ret.append((key, value, vars_)) + elif hasbody(block): + for b in block['body']: + ret = ret + findcommonblocks(b, 0) + if top: + tret = [] + names = [] + for t in ret: + if t[0] not in names: + names.append(t[0]) + tret.append(t) + return tret + return ret + + +def buildhooks(m): + ret = {'commonhooks': [], 'initcommonhooks': [], + 'docs': ['"COMMON blocks:\\n"']} + fwrap = [''] + + def fadd(line, s=fwrap): + s[0] = '%s\n %s' % (s[0], line) + chooks = [''] + + def cadd(line, s=chooks): + s[0] = '%s\n%s' % (s[0], line) + ihooks = [''] + + def iadd(line, s=ihooks): + s[0] = '%s\n%s' % (s[0], line) + doc = [''] + + def dadd(line, s=doc): + s[0] = '%s\n%s' % (s[0], line) + for (name, vnames, vars) in findcommonblocks(m): + lower_name = name.lower() + hnames, inames = [], [] + for n in vnames: + if isintent_hide(vars[n]): + hnames.append(n) + else: + inames.append(n) + if hnames: + outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n\t\t Hidden: %s\n' % ( + name, ','.join(inames), ','.join(hnames))) + else: + outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n' % ( + name, ','.join(inames))) + fadd('subroutine f2pyinit%s(setupfunc)' % name) + fadd('external setupfunc') + for n in vnames: + fadd(func2subr.var2fixfortran(vars, n)) + if name == '_BLNK_': + fadd('common %s' % (','.join(vnames))) + else: + fadd('common /%s/ %s' % (name, ','.join(vnames))) + fadd('call setupfunc(%s)' % (','.join(inames))) + fadd('end\n') + cadd('static FortranDataDef f2py_%s_def[] = {' % (name)) + idims = [] + for n in inames: + ct = capi_maps.getctype(vars[n]) + at = capi_maps.c2capi_map[ct] + dm = capi_maps.getarrdims(n, vars[n]) + if dm['dims']: + idims.append('(%s)' % (dm['dims'])) + else: + idims.append('') + dms = dm['dims'].strip() + if not dms: + dms = '-1' + cadd('\t{\"%s\",%s,{{%s}},%s},' % (n, dm['rank'], dms, at)) + cadd('\t{NULL}\n};') + inames1 = rmbadname(inames) + inames1_tps = ','.join(['char *' + s for s in inames1]) + cadd('static void f2py_setup_%s(%s) {' % (name, inames1_tps)) + cadd('\tint i_f2py=0;') + for n in inames1: + cadd('\tf2py_%s_def[i_f2py++].data = %s;' % (name, n)) + cadd('}') + if '_' in lower_name: + F_FUNC = 'F_FUNC_US' + else: + F_FUNC = 'F_FUNC' + cadd('extern void %s(f2pyinit%s,F2PYINIT%s)(void(*)(%s));' + % (F_FUNC, lower_name, name.upper(), + ','.join(['char*'] * len(inames1)))) + cadd('static void f2py_init_%s(void) {' % name) + cadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);' + % (F_FUNC, lower_name, name.upper(), name)) + cadd('}\n') + iadd('\tF2PyDict_SetItemString(d, \"%s\", PyFortranObject_New(f2py_%s_def,f2py_init_%s));' % ( + name, name, name)) + tname = name.replace('_', '\\_') + dadd('\\subsection{Common block \\texttt{%s}}\n' % (tname)) + dadd('\\begin{description}') + for n in inames: + dadd('\\item[]{{}\\verb@%s@{}}' % + (capi_maps.getarrdocsign(n, vars[n]))) + if hasnote(vars[n]): + note = vars[n]['note'] + if isinstance(note, list): + note = '\n'.join(note) + dadd('--- %s' % (note)) + dadd('\\end{description}') + ret['docs'].append( + '"\t/%s/ %s\\n"' % (name, ','.join(map(lambda v, d: v + d, inames, idims)))) + ret['commonhooks'] = chooks + ret['initcommonhooks'] = ihooks + ret['latexdoc'] = doc[0] + if len(ret['docs']) <= 1: + ret['docs'] = '' + return ret, fwrap[0] diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/common_rules.pyc b/project/venv/lib/python2.7/site-packages/numpy/f2py/common_rules.pyc new file mode 100644 index 0000000..5ce924f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/f2py/common_rules.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/crackfortran.py b/project/venv/lib/python2.7/site-packages/numpy/f2py/crackfortran.py new file mode 100644 index 0000000..c4a6505 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/crackfortran.py @@ -0,0 +1,3345 @@ +#!/usr/bin/env python +""" +crackfortran --- read fortran (77,90) code and extract declaration information. + +Copyright 1999-2004 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +$Date: 2005/09/27 07:13:49 $ +Pearu Peterson + + +Usage of crackfortran: +====================== +Command line keys: -quiet,-verbose,-fix,-f77,-f90,-show,-h + -m ,--ignore-contains +Functions: crackfortran, crack2fortran +The following Fortran statements/constructions are supported +(or will be if needed): + block data,byte,call,character,common,complex,contains,data, + dimension,double complex,double precision,end,external,function, + implicit,integer,intent,interface,intrinsic, + logical,module,optional,parameter,private,public, + program,real,(sequence?),subroutine,type,use,virtual, + include,pythonmodule +Note: 'virtual' is mapped to 'dimension'. +Note: 'implicit integer (z) static (z)' is 'implicit static (z)' (this is minor bug). +Note: code after 'contains' will be ignored until its scope ends. +Note: 'common' statement is extended: dimensions are moved to variable definitions +Note: f2py directive: f2py is read as +Note: pythonmodule is introduced to represent Python module + +Usage: + `postlist=crackfortran(files)` + `postlist` contains declaration information read from the list of files `files`. + `crack2fortran(postlist)` returns a fortran code to be saved to pyf-file + + `postlist` has the following structure: + *** it is a list of dictionaries containing `blocks': + B = {'block','body','vars','parent_block'[,'name','prefix','args','result', + 'implicit','externals','interfaced','common','sortvars', + 'commonvars','note']} + B['block'] = 'interface' | 'function' | 'subroutine' | 'module' | + 'program' | 'block data' | 'type' | 'pythonmodule' + B['body'] --- list containing `subblocks' with the same structure as `blocks' + B['parent_block'] --- dictionary of a parent block: + C['body'][]['parent_block'] is C + B['vars'] --- dictionary of variable definitions + B['sortvars'] --- dictionary of variable definitions sorted by dependence (independent first) + B['name'] --- name of the block (not if B['block']=='interface') + B['prefix'] --- prefix string (only if B['block']=='function') + B['args'] --- list of argument names if B['block']== 'function' | 'subroutine' + B['result'] --- name of the return value (only if B['block']=='function') + B['implicit'] --- dictionary {'a':,'b':...} | None + B['externals'] --- list of variables being external + B['interfaced'] --- list of variables being external and defined + B['common'] --- dictionary of common blocks (list of objects) + B['commonvars'] --- list of variables used in common blocks (dimensions are moved to variable definitions) + B['from'] --- string showing the 'parents' of the current block + B['use'] --- dictionary of modules used in current block: + {:{['only':<0|1>],['map':{:,...}]}} + B['note'] --- list of LaTeX comments on the block + B['f2pyenhancements'] --- optional dictionary + {'threadsafe':'','fortranname':, + 'callstatement':|, + 'callprotoargument':, + 'usercode':|, + 'pymethoddef:' + } + B['entry'] --- dictionary {entryname:argslist,..} + B['varnames'] --- list of variable names given in the order of reading the + Fortran code, useful for derived types. + B['saved_interface'] --- a string of scanned routine signature, defines explicit interface + *** Variable definition is a dictionary + D = B['vars'][] = + {'typespec'[,'attrspec','kindselector','charselector','=','typename']} + D['typespec'] = 'byte' | 'character' | 'complex' | 'double complex' | + 'double precision' | 'integer' | 'logical' | 'real' | 'type' + D['attrspec'] --- list of attributes (e.g. 'dimension()', + 'external','intent(in|out|inout|hide|c|callback|cache|aligned4|aligned8|aligned16)', + 'optional','required', etc) + K = D['kindselector'] = {['*','kind']} (only if D['typespec'] = + 'complex' | 'integer' | 'logical' | 'real' ) + C = D['charselector'] = {['*','len','kind']} + (only if D['typespec']=='character') + D['='] --- initialization expression string + D['typename'] --- name of the type if D['typespec']=='type' + D['dimension'] --- list of dimension bounds + D['intent'] --- list of intent specifications + D['depend'] --- list of variable names on which current variable depends on + D['check'] --- list of C-expressions; if C-expr returns zero, exception is raised + D['note'] --- list of LaTeX comments on the variable + *** Meaning of kind/char selectors (few examples): + D['typespec>']*K['*'] + D['typespec'](kind=K['kind']) + character*C['*'] + character(len=C['len'],kind=C['kind']) + (see also fortran type declaration statement formats below) + +Fortran 90 type declaration statement format (F77 is subset of F90) +==================================================================== +(Main source: IBM XL Fortran 5.1 Language Reference Manual) +type declaration = [[]::] + = byte | + character[] | + complex[] | + double complex | + double precision | + integer[] | + logical[] | + real[] | + type() + = * | + ([len=][,[kind=]]) | + (kind=[,len=]) + = * | + ([kind=]) + = comma separated list of attributes. + Only the following attributes are used in + building up the interface: + external + (parameter --- affects '=' key) + optional + intent + Other attributes are ignored. + = in | out | inout + = comma separated list of dimension bounds. + = [[*][()] | [()]*] + [// | =] [,] + +In addition, the following attributes are used: check,depend,note + +TODO: + * Apply 'parameter' attribute (e.g. 'integer parameter :: i=2' 'real x(i)' + -> 'real x(2)') + The above may be solved by creating appropriate preprocessor program, for example. + +""" +from __future__ import division, absolute_import, print_function + +import sys +import string +import fileinput +import re +import os +import copy +import platform + +from . import __version__ + +# The eviroment provided by auxfuncs.py is needed for some calls to eval. +# As the needed functions cannot be determined by static inspection of the +# code, it is safest to use import * pending a major refactoring of f2py. +from .auxfuncs import * + + +f2py_version = __version__.version + +# Global flags: +strictf77 = 1 # Ignore `!' comments unless line[0]=='!' +sourcecodeform = 'fix' # 'fix','free' +quiet = 0 # Be verbose if 0 (Obsolete: not used any more) +verbose = 1 # Be quiet if 0, extra verbose if > 1. +tabchar = 4 * ' ' +pyffilename = '' +f77modulename = '' +skipemptyends = 0 # for old F77 programs without 'program' statement +ignorecontains = 1 +dolowercase = 1 +debug = [] + +# Global variables +beginpattern = '' +currentfilename = '' +expectbegin = 1 +f90modulevars = {} +filepositiontext = '' +gotnextfile = 1 +groupcache = None +groupcounter = 0 +grouplist = {groupcounter: []} +groupname = '' +include_paths = [] +neededmodule = -1 +onlyfuncs = [] +previous_context = None +skipblocksuntil = -1 +skipfuncs = [] +skipfunctions = [] +usermodules = [] + + +def reset_global_f2py_vars(): + global groupcounter, grouplist, neededmodule, expectbegin + global skipblocksuntil, usermodules, f90modulevars, gotnextfile + global filepositiontext, currentfilename, skipfunctions, skipfuncs + global onlyfuncs, include_paths, previous_context + global strictf77, sourcecodeform, quiet, verbose, tabchar, pyffilename + global f77modulename, skipemptyends, ignorecontains, dolowercase, debug + + # flags + strictf77 = 1 + sourcecodeform = 'fix' + quiet = 0 + verbose = 1 + tabchar = 4 * ' ' + pyffilename = '' + f77modulename = '' + skipemptyends = 0 + ignorecontains = 1 + dolowercase = 1 + debug = [] + # variables + groupcounter = 0 + grouplist = {groupcounter: []} + neededmodule = -1 + expectbegin = 1 + skipblocksuntil = -1 + usermodules = [] + f90modulevars = {} + gotnextfile = 1 + filepositiontext = '' + currentfilename = '' + skipfunctions = [] + skipfuncs = [] + onlyfuncs = [] + include_paths = [] + previous_context = None + + +def outmess(line, flag=1): + global filepositiontext + + if not verbose: + return + if not quiet: + if flag: + sys.stdout.write(filepositiontext) + sys.stdout.write(line) + +re._MAXCACHE = 50 +defaultimplicitrules = {} +for c in "abcdefghopqrstuvwxyz$_": + defaultimplicitrules[c] = {'typespec': 'real'} +for c in "ijklmn": + defaultimplicitrules[c] = {'typespec': 'integer'} +del c +badnames = {} +invbadnames = {} +for n in ['int', 'double', 'float', 'char', 'short', 'long', 'void', 'case', 'while', + 'return', 'signed', 'unsigned', 'if', 'for', 'typedef', 'sizeof', 'union', + 'struct', 'static', 'register', 'new', 'break', 'do', 'goto', 'switch', + 'continue', 'else', 'inline', 'extern', 'delete', 'const', 'auto', + 'len', 'rank', 'shape', 'index', 'slen', 'size', '_i', + 'max', 'min', + 'flen', 'fshape', + 'string', 'complex_double', 'float_double', 'stdin', 'stderr', 'stdout', + 'type', 'default']: + badnames[n] = n + '_bn' + invbadnames[n + '_bn'] = n + + +def rmbadname1(name): + if name in badnames: + errmess('rmbadname1: Replacing "%s" with "%s".\n' % + (name, badnames[name])) + return badnames[name] + return name + + +def rmbadname(names): + return [rmbadname1(_m) for _m in names] + + +def undo_rmbadname1(name): + if name in invbadnames: + errmess('undo_rmbadname1: Replacing "%s" with "%s".\n' + % (name, invbadnames[name])) + return invbadnames[name] + return name + + +def undo_rmbadname(names): + return [undo_rmbadname1(_m) for _m in names] + + +def getextension(name): + i = name.rfind('.') + if i == -1: + return '' + if '\\' in name[i:]: + return '' + if '/' in name[i:]: + return '' + return name[i + 1:] + +is_f_file = re.compile(r'.*[.](for|ftn|f77|f)\Z', re.I).match +_has_f_header = re.compile(r'-[*]-\s*fortran\s*-[*]-', re.I).search +_has_f90_header = re.compile(r'-[*]-\s*f90\s*-[*]-', re.I).search +_has_fix_header = re.compile(r'-[*]-\s*fix\s*-[*]-', re.I).search +_free_f90_start = re.compile(r'[^c*]\s*[^\s\d\t]', re.I).match + + +def is_free_format(file): + """Check if file is in free format Fortran.""" + # f90 allows both fixed and free format, assuming fixed unless + # signs of free format are detected. + result = 0 + with open(file, 'r') as f: + line = f.readline() + n = 15 # the number of non-comment lines to scan for hints + if _has_f_header(line): + n = 0 + elif _has_f90_header(line): + n = 0 + result = 1 + while n > 0 and line: + if line[0] != '!' and line.strip(): + n -= 1 + if (line[0] != '\t' and _free_f90_start(line[:5])) or line[-2:-1] == '&': + result = 1 + break + line = f.readline() + return result + + +# Read fortran (77,90) code +def readfortrancode(ffile, dowithline=show, istop=1): + """ + Read fortran codes from files and + 1) Get rid of comments, line continuations, and empty lines; lower cases. + 2) Call dowithline(line) on every line. + 3) Recursively call itself when statement \"include ''\" is met. + """ + global gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77 + global beginpattern, quiet, verbose, dolowercase, include_paths + + if not istop: + saveglobals = gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\ + beginpattern, quiet, verbose, dolowercase + if ffile == []: + return + localdolowercase = dolowercase + cont = 0 + finalline = '' + ll = '' + includeline = re.compile( + r'\s*include\s*(\'|")(?P[^\'"]*)(\'|")', re.I) + cont1 = re.compile(r'(?P.*)&\s*\Z') + cont2 = re.compile(r'(\s*&|)(?P.*)') + mline_mark = re.compile(r".*?'''") + if istop: + dowithline('', -1) + ll, l1 = '', '' + spacedigits = [' '] + [str(_m) for _m in range(10)] + filepositiontext = '' + fin = fileinput.FileInput(ffile) + while True: + l = fin.readline() + if not l: + break + if fin.isfirstline(): + filepositiontext = '' + currentfilename = fin.filename() + gotnextfile = 1 + l1 = l + strictf77 = 0 + sourcecodeform = 'fix' + ext = os.path.splitext(currentfilename)[1] + if is_f_file(currentfilename) and \ + not (_has_f90_header(l) or _has_fix_header(l)): + strictf77 = 1 + elif is_free_format(currentfilename) and not _has_fix_header(l): + sourcecodeform = 'free' + if strictf77: + beginpattern = beginpattern77 + else: + beginpattern = beginpattern90 + outmess('\tReading file %s (format:%s%s)\n' + % (repr(currentfilename), sourcecodeform, + strictf77 and ',strict' or '')) + + l = l.expandtabs().replace('\xa0', ' ') + # Get rid of newline characters + while not l == '': + if l[-1] not in "\n\r\f": + break + l = l[:-1] + if not strictf77: + (l, rl) = split_by_unquoted(l, '!') + l += ' ' + if rl[:5].lower() == '!f2py': # f2py directive + l, _ = split_by_unquoted(l + 4 * ' ' + rl[5:], '!') + if l.strip() == '': # Skip empty line + cont = 0 + continue + if sourcecodeform == 'fix': + if l[0] in ['*', 'c', '!', 'C', '#']: + if l[1:5].lower() == 'f2py': # f2py directive + l = ' ' + l[5:] + else: # Skip comment line + cont = 0 + continue + elif strictf77: + if len(l) > 72: + l = l[:72] + if not (l[0] in spacedigits): + raise Exception('readfortrancode: Found non-(space,digit) char ' + 'in the first column.\n\tAre you sure that ' + 'this code is in fix form?\n\tline=%s' % repr(l)) + + if (not cont or strictf77) and (len(l) > 5 and not l[5] == ' '): + # Continuation of a previous line + ll = ll + l[6:] + finalline = '' + origfinalline = '' + else: + if not strictf77: + # F90 continuation + r = cont1.match(l) + if r: + l = r.group('line') # Continuation follows .. + if cont: + ll = ll + cont2.match(l).group('line') + finalline = '' + origfinalline = '' + else: + # clean up line beginning from possible digits. + l = ' ' + l[5:] + if localdolowercase: + finalline = ll.lower() + else: + finalline = ll + origfinalline = ll + ll = l + cont = (r is not None) + else: + # clean up line beginning from possible digits. + l = ' ' + l[5:] + if localdolowercase: + finalline = ll.lower() + else: + finalline = ll + origfinalline = ll + ll = l + + elif sourcecodeform == 'free': + if not cont and ext == '.pyf' and mline_mark.match(l): + l = l + '\n' + while True: + lc = fin.readline() + if not lc: + errmess( + 'Unexpected end of file when reading multiline\n') + break + l = l + lc + if mline_mark.match(lc): + break + l = l.rstrip() + r = cont1.match(l) + if r: + l = r.group('line') # Continuation follows .. + if cont: + ll = ll + cont2.match(l).group('line') + finalline = '' + origfinalline = '' + else: + if localdolowercase: + finalline = ll.lower() + else: + finalline = ll + origfinalline = ll + ll = l + cont = (r is not None) + else: + raise ValueError( + "Flag sourcecodeform must be either 'fix' or 'free': %s" % repr(sourcecodeform)) + filepositiontext = 'Line #%d in %s:"%s"\n\t' % ( + fin.filelineno() - 1, currentfilename, l1) + m = includeline.match(origfinalline) + if m: + fn = m.group('name') + if os.path.isfile(fn): + readfortrancode(fn, dowithline=dowithline, istop=0) + else: + include_dirs = [ + os.path.dirname(currentfilename)] + include_paths + foundfile = 0 + for inc_dir in include_dirs: + fn1 = os.path.join(inc_dir, fn) + if os.path.isfile(fn1): + foundfile = 1 + readfortrancode(fn1, dowithline=dowithline, istop=0) + break + if not foundfile: + outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n' % ( + repr(fn), os.pathsep.join(include_dirs))) + else: + dowithline(finalline) + l1 = ll + if localdolowercase: + finalline = ll.lower() + else: + finalline = ll + origfinalline = ll + filepositiontext = 'Line #%d in %s:"%s"\n\t' % ( + fin.filelineno() - 1, currentfilename, l1) + m = includeline.match(origfinalline) + if m: + fn = m.group('name') + if os.path.isfile(fn): + readfortrancode(fn, dowithline=dowithline, istop=0) + else: + include_dirs = [os.path.dirname(currentfilename)] + include_paths + foundfile = 0 + for inc_dir in include_dirs: + fn1 = os.path.join(inc_dir, fn) + if os.path.isfile(fn1): + foundfile = 1 + readfortrancode(fn1, dowithline=dowithline, istop=0) + break + if not foundfile: + outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n' % ( + repr(fn), os.pathsep.join(include_dirs))) + else: + dowithline(finalline) + filepositiontext = '' + fin.close() + if istop: + dowithline('', 1) + else: + gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\ + beginpattern, quiet, verbose, dolowercase = saveglobals + +# Crack line +beforethisafter = r'\s*(?P%s(?=\s*(\b(%s)\b)))' + \ + r'\s*(?P(\b(%s)\b))' + \ + r'\s*(?P%s)\s*\Z' +## +fortrantypes = r'character|logical|integer|real|complex|double\s*(precision\s*(complex|)|complex)|type(?=\s*\([\w\s,=(*)]*\))|byte' +typespattern = re.compile( + beforethisafter % ('', fortrantypes, fortrantypes, '.*'), re.I), 'type' +typespattern4implicit = re.compile(beforethisafter % ( + '', fortrantypes + '|static|automatic|undefined', fortrantypes + '|static|automatic|undefined', '.*'), re.I) +# +functionpattern = re.compile(beforethisafter % ( + r'([a-z]+[\w\s(=*+-/)]*?|)', 'function', 'function', '.*'), re.I), 'begin' +subroutinepattern = re.compile(beforethisafter % ( + r'[a-z\s]*?', 'subroutine', 'subroutine', '.*'), re.I), 'begin' +# modulepattern=re.compile(beforethisafter%('[a-z\s]*?','module','module','.*'),re.I),'begin' +# +groupbegins77 = r'program|block\s*data' +beginpattern77 = re.compile( + beforethisafter % ('', groupbegins77, groupbegins77, '.*'), re.I), 'begin' +groupbegins90 = groupbegins77 + \ + r'|module(?!\s*procedure)|python\s*module|interface|type(?!\s*\()' +beginpattern90 = re.compile( + beforethisafter % ('', groupbegins90, groupbegins90, '.*'), re.I), 'begin' +groupends = r'end|endprogram|endblockdata|endmodule|endpythonmodule|endinterface' +endpattern = re.compile( + beforethisafter % ('', groupends, groupends, r'[\w\s]*'), re.I), 'end' +# endifs='end\s*(if|do|where|select|while|forall)' +endifs = r'(end\s*(if|do|where|select|while|forall))|(module\s*procedure)' +endifpattern = re.compile( + beforethisafter % (r'[\w]*?', endifs, endifs, r'[\w\s]*'), re.I), 'endif' +# +implicitpattern = re.compile( + beforethisafter % ('', 'implicit', 'implicit', '.*'), re.I), 'implicit' +dimensionpattern = re.compile(beforethisafter % ( + '', 'dimension|virtual', 'dimension|virtual', '.*'), re.I), 'dimension' +externalpattern = re.compile( + beforethisafter % ('', 'external', 'external', '.*'), re.I), 'external' +optionalpattern = re.compile( + beforethisafter % ('', 'optional', 'optional', '.*'), re.I), 'optional' +requiredpattern = re.compile( + beforethisafter % ('', 'required', 'required', '.*'), re.I), 'required' +publicpattern = re.compile( + beforethisafter % ('', 'public', 'public', '.*'), re.I), 'public' +privatepattern = re.compile( + beforethisafter % ('', 'private', 'private', '.*'), re.I), 'private' +intrisicpattern = re.compile( + beforethisafter % ('', 'intrisic', 'intrisic', '.*'), re.I), 'intrisic' +intentpattern = re.compile(beforethisafter % ( + '', 'intent|depend|note|check', 'intent|depend|note|check', r'\s*\(.*?\).*'), re.I), 'intent' +parameterpattern = re.compile( + beforethisafter % ('', 'parameter', 'parameter', r'\s*\(.*'), re.I), 'parameter' +datapattern = re.compile( + beforethisafter % ('', 'data', 'data', '.*'), re.I), 'data' +callpattern = re.compile( + beforethisafter % ('', 'call', 'call', '.*'), re.I), 'call' +entrypattern = re.compile( + beforethisafter % ('', 'entry', 'entry', '.*'), re.I), 'entry' +callfunpattern = re.compile( + beforethisafter % ('', 'callfun', 'callfun', '.*'), re.I), 'callfun' +commonpattern = re.compile( + beforethisafter % ('', 'common', 'common', '.*'), re.I), 'common' +usepattern = re.compile( + beforethisafter % ('', 'use', 'use', '.*'), re.I), 'use' +containspattern = re.compile( + beforethisafter % ('', 'contains', 'contains', ''), re.I), 'contains' +formatpattern = re.compile( + beforethisafter % ('', 'format', 'format', '.*'), re.I), 'format' +# Non-fortran and f2py-specific statements +f2pyenhancementspattern = re.compile(beforethisafter % ('', 'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef', + 'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef', '.*'), re.I | re.S), 'f2pyenhancements' +multilinepattern = re.compile( + r"\s*(?P''')(?P.*?)(?P''')\s*\Z", re.S), 'multiline' +## + +def split_by_unquoted(line, characters): + """ + Splits the line into (line[:i], line[i:]), + where i is the index of first occurrence of one of the characters + not within quotes, or len(line) if no such index exists + """ + assert not (set('"\'') & set(characters)), "cannot split by unquoted quotes" + r = re.compile( + r"\A(?P({single_quoted}|{double_quoted}|{not_quoted})*)" + r"(?P{char}.*)\Z".format( + not_quoted="[^\"'{}]".format(re.escape(characters)), + char="[{}]".format(re.escape(characters)), + single_quoted=r"('([^'\\]|(\\.))*')", + double_quoted=r'("([^"\\]|(\\.))*")')) + m = r.match(line) + if m: + d = m.groupdict() + return (d["before"], d["after"]) + return (line, "") + +def _simplifyargs(argsline): + a = [] + for n in markoutercomma(argsline).split('@,@'): + for r in '(),': + n = n.replace(r, '_') + a.append(n) + return ','.join(a) + +crackline_re_1 = re.compile(r'\s*(?P\b[a-z]+[\w]*\b)\s*[=].*', re.I) + + +def crackline(line, reset=0): + """ + reset=-1 --- initialize + reset=0 --- crack the line + reset=1 --- final check if mismatch of blocks occurred + + Cracked data is saved in grouplist[0]. + """ + global beginpattern, groupcounter, groupname, groupcache, grouplist + global filepositiontext, currentfilename, neededmodule, expectbegin + global skipblocksuntil, skipemptyends, previous_context, gotnextfile + + _, has_semicolon = split_by_unquoted(line, ";") + if has_semicolon and not (f2pyenhancementspattern[0].match(line) or + multilinepattern[0].match(line)): + # XXX: non-zero reset values need testing + assert reset == 0, repr(reset) + # split line on unquoted semicolons + line, semicolon_line = split_by_unquoted(line, ";") + while semicolon_line: + crackline(line, reset) + line, semicolon_line = split_by_unquoted(semicolon_line[1:], ";") + crackline(line, reset) + return + if reset < 0: + groupcounter = 0 + groupname = {groupcounter: ''} + groupcache = {groupcounter: {}} + grouplist = {groupcounter: []} + groupcache[groupcounter]['body'] = [] + groupcache[groupcounter]['vars'] = {} + groupcache[groupcounter]['block'] = '' + groupcache[groupcounter]['name'] = '' + neededmodule = -1 + skipblocksuntil = -1 + return + if reset > 0: + fl = 0 + if f77modulename and neededmodule == groupcounter: + fl = 2 + while groupcounter > fl: + outmess('crackline: groupcounter=%s groupname=%s\n' % + (repr(groupcounter), repr(groupname))) + outmess( + 'crackline: Mismatch of blocks encountered. Trying to fix it by assuming "end" statement.\n') + grouplist[groupcounter - 1].append(groupcache[groupcounter]) + grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter = groupcounter - 1 + if f77modulename and neededmodule == groupcounter: + grouplist[groupcounter - 1].append(groupcache[groupcounter]) + grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter = groupcounter - 1 # end interface + grouplist[groupcounter - 1].append(groupcache[groupcounter]) + grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter = groupcounter - 1 # end module + neededmodule = -1 + return + if line == '': + return + flag = 0 + for pat in [dimensionpattern, externalpattern, intentpattern, optionalpattern, + requiredpattern, + parameterpattern, datapattern, publicpattern, privatepattern, + intrisicpattern, + endifpattern, endpattern, + formatpattern, + beginpattern, functionpattern, subroutinepattern, + implicitpattern, typespattern, commonpattern, + callpattern, usepattern, containspattern, + entrypattern, + f2pyenhancementspattern, + multilinepattern + ]: + m = pat[0].match(line) + if m: + break + flag = flag + 1 + if not m: + re_1 = crackline_re_1 + if 0 <= skipblocksuntil <= groupcounter: + return + if 'externals' in groupcache[groupcounter]: + for name in groupcache[groupcounter]['externals']: + if name in invbadnames: + name = invbadnames[name] + if 'interfaced' in groupcache[groupcounter] and name in groupcache[groupcounter]['interfaced']: + continue + m1 = re.match( + r'(?P[^"]*)\b%s\b\s*@\(@(?P[^@]*)@\)@.*\Z' % name, markouterparen(line), re.I) + if m1: + m2 = re_1.match(m1.group('before')) + a = _simplifyargs(m1.group('args')) + if m2: + line = 'callfun %s(%s) result (%s)' % ( + name, a, m2.group('result')) + else: + line = 'callfun %s(%s)' % (name, a) + m = callfunpattern[0].match(line) + if not m: + outmess( + 'crackline: could not resolve function call for line=%s.\n' % repr(line)) + return + analyzeline(m, 'callfun', line) + return + if verbose > 1 or (verbose == 1 and currentfilename.lower().endswith('.pyf')): + previous_context = None + outmess('crackline:%d: No pattern for line\n' % (groupcounter)) + return + elif pat[1] == 'end': + if 0 <= skipblocksuntil < groupcounter: + groupcounter = groupcounter - 1 + if skipblocksuntil <= groupcounter: + return + if groupcounter <= 0: + raise Exception('crackline: groupcounter(=%s) is nonpositive. ' + 'Check the blocks.' + % (groupcounter)) + m1 = beginpattern[0].match((line)) + if (m1) and (not m1.group('this') == groupname[groupcounter]): + raise Exception('crackline: End group %s does not match with ' + 'previous Begin group %s\n\t%s' % + (repr(m1.group('this')), repr(groupname[groupcounter]), + filepositiontext) + ) + if skipblocksuntil == groupcounter: + skipblocksuntil = -1 + grouplist[groupcounter - 1].append(groupcache[groupcounter]) + grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter = groupcounter - 1 + if not skipemptyends: + expectbegin = 1 + elif pat[1] == 'begin': + if 0 <= skipblocksuntil <= groupcounter: + groupcounter = groupcounter + 1 + return + gotnextfile = 0 + analyzeline(m, pat[1], line) + expectbegin = 0 + elif pat[1] == 'endif': + pass + elif pat[1] == 'contains': + if ignorecontains: + return + if 0 <= skipblocksuntil <= groupcounter: + return + skipblocksuntil = groupcounter + else: + if 0 <= skipblocksuntil <= groupcounter: + return + analyzeline(m, pat[1], line) + + +def markouterparen(line): + l = '' + f = 0 + for c in line: + if c == '(': + f = f + 1 + if f == 1: + l = l + '@(@' + continue + elif c == ')': + f = f - 1 + if f == 0: + l = l + '@)@' + continue + l = l + c + return l + + +def markoutercomma(line, comma=','): + l = '' + f = 0 + before, after = split_by_unquoted(line, comma + '()') + l += before + while after: + if (after[0] == comma) and (f == 0): + l += '@' + comma + '@' + else: + l += after[0] + if after[0] == '(': + f += 1 + elif after[0] == ')': + f -= 1 + before, after = split_by_unquoted(after[1:], comma + '()') + l += before + assert not f, repr((f, line, l)) + return l + +def unmarkouterparen(line): + r = line.replace('@(@', '(').replace('@)@', ')') + return r + + +def appenddecl(decl, decl2, force=1): + if not decl: + decl = {} + if not decl2: + return decl + if decl is decl2: + return decl + for k in list(decl2.keys()): + if k == 'typespec': + if force or k not in decl: + decl[k] = decl2[k] + elif k == 'attrspec': + for l in decl2[k]: + decl = setattrspec(decl, l, force) + elif k == 'kindselector': + decl = setkindselector(decl, decl2[k], force) + elif k == 'charselector': + decl = setcharselector(decl, decl2[k], force) + elif k in ['=', 'typename']: + if force or k not in decl: + decl[k] = decl2[k] + elif k == 'note': + pass + elif k in ['intent', 'check', 'dimension', 'optional', 'required']: + errmess('appenddecl: "%s" not implemented.\n' % k) + else: + raise Exception('appenddecl: Unknown variable definition key:' + + str(k)) + return decl + +selectpattern = re.compile( + r'\s*(?P(@\(@.*?@\)@|[*][\d*]+|[*]\s*@\(@.*?@\)@|))(?P.*)\Z', re.I) +nameargspattern = re.compile( + r'\s*(?P\b[\w$]+\b)\s*(@\(@\s*(?P[\w\s,]*)\s*@\)@|)\s*((result(\s*@\(@\s*(?P\b[\w$]+\b)\s*@\)@|))|(bind\s*@\(@\s*(?P.*)\s*@\)@))*\s*\Z', re.I) +callnameargspattern = re.compile( + r'\s*(?P\b[\w$]+\b)\s*@\(@\s*(?P.*)\s*@\)@\s*\Z', re.I) +real16pattern = re.compile( + r'([-+]?(?:\d+(?:\.\d*)?|\d*\.\d+))[dD]((?:[-+]?\d+)?)') +real8pattern = re.compile( + r'([-+]?((?:\d+(?:\.\d*)?|\d*\.\d+))[eE]((?:[-+]?\d+)?)|(\d+\.\d*))') + +_intentcallbackpattern = re.compile(r'intent\s*\(.*?\bcallback\b', re.I) + + +def _is_intent_callback(vdecl): + for a in vdecl.get('attrspec', []): + if _intentcallbackpattern.match(a): + return 1 + return 0 + + +def _resolvenameargspattern(line): + line = markouterparen(line) + m1 = nameargspattern.match(line) + if m1: + return m1.group('name'), m1.group('args'), m1.group('result'), m1.group('bind') + m1 = callnameargspattern.match(line) + if m1: + return m1.group('name'), m1.group('args'), None, None + return None, [], None, None + + +def analyzeline(m, case, line): + global groupcounter, groupname, groupcache, grouplist, filepositiontext + global currentfilename, f77modulename, neededinterface, neededmodule + global expectbegin, gotnextfile, previous_context + + block = m.group('this') + if case != 'multiline': + previous_context = None + if expectbegin and case not in ['begin', 'call', 'callfun', 'type'] \ + and not skipemptyends and groupcounter < 1: + newname = os.path.basename(currentfilename).split('.')[0] + outmess( + 'analyzeline: no group yet. Creating program group with name "%s".\n' % newname) + gotnextfile = 0 + groupcounter = groupcounter + 1 + groupname[groupcounter] = 'program' + groupcache[groupcounter] = {} + grouplist[groupcounter] = [] + groupcache[groupcounter]['body'] = [] + groupcache[groupcounter]['vars'] = {} + groupcache[groupcounter]['block'] = 'program' + groupcache[groupcounter]['name'] = newname + groupcache[groupcounter]['from'] = 'fromsky' + expectbegin = 0 + if case in ['begin', 'call', 'callfun']: + # Crack line => block,name,args,result + block = block.lower() + if re.match(r'block\s*data', block, re.I): + block = 'block data' + if re.match(r'python\s*module', block, re.I): + block = 'python module' + name, args, result, bind = _resolvenameargspattern(m.group('after')) + if name is None: + if block == 'block data': + name = '_BLOCK_DATA_' + else: + name = '' + if block not in ['interface', 'block data']: + outmess('analyzeline: No name/args pattern found for line.\n') + + previous_context = (block, name, groupcounter) + if args: + args = rmbadname([x.strip() + for x in markoutercomma(args).split('@,@')]) + else: + args = [] + if '' in args: + while '' in args: + args.remove('') + outmess( + 'analyzeline: argument list is malformed (missing argument).\n') + + # end of crack line => block,name,args,result + needmodule = 0 + needinterface = 0 + + if case in ['call', 'callfun']: + needinterface = 1 + if 'args' not in groupcache[groupcounter]: + return + if name not in groupcache[groupcounter]['args']: + return + for it in grouplist[groupcounter]: + if it['name'] == name: + return + if name in groupcache[groupcounter]['interfaced']: + return + block = {'call': 'subroutine', 'callfun': 'function'}[case] + if f77modulename and neededmodule == -1 and groupcounter <= 1: + neededmodule = groupcounter + 2 + needmodule = 1 + if block != 'interface': + needinterface = 1 + # Create new block(s) + groupcounter = groupcounter + 1 + groupcache[groupcounter] = {} + grouplist[groupcounter] = [] + if needmodule: + if verbose > 1: + outmess('analyzeline: Creating module block %s\n' % + repr(f77modulename), 0) + groupname[groupcounter] = 'module' + groupcache[groupcounter]['block'] = 'python module' + groupcache[groupcounter]['name'] = f77modulename + groupcache[groupcounter]['from'] = '' + groupcache[groupcounter]['body'] = [] + groupcache[groupcounter]['externals'] = [] + groupcache[groupcounter]['interfaced'] = [] + groupcache[groupcounter]['vars'] = {} + groupcounter = groupcounter + 1 + groupcache[groupcounter] = {} + grouplist[groupcounter] = [] + if needinterface: + if verbose > 1: + outmess('analyzeline: Creating additional interface block (groupcounter=%s).\n' % ( + groupcounter), 0) + groupname[groupcounter] = 'interface' + groupcache[groupcounter]['block'] = 'interface' + groupcache[groupcounter]['name'] = 'unknown_interface' + groupcache[groupcounter]['from'] = '%s:%s' % ( + groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name']) + groupcache[groupcounter]['body'] = [] + groupcache[groupcounter]['externals'] = [] + groupcache[groupcounter]['interfaced'] = [] + groupcache[groupcounter]['vars'] = {} + groupcounter = groupcounter + 1 + groupcache[groupcounter] = {} + grouplist[groupcounter] = [] + groupname[groupcounter] = block + groupcache[groupcounter]['block'] = block + if not name: + name = 'unknown_' + block + groupcache[groupcounter]['prefix'] = m.group('before') + groupcache[groupcounter]['name'] = rmbadname1(name) + groupcache[groupcounter]['result'] = result + if groupcounter == 1: + groupcache[groupcounter]['from'] = currentfilename + else: + if f77modulename and groupcounter == 3: + groupcache[groupcounter]['from'] = '%s:%s' % ( + groupcache[groupcounter - 1]['from'], currentfilename) + else: + groupcache[groupcounter]['from'] = '%s:%s' % ( + groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name']) + for k in list(groupcache[groupcounter].keys()): + if not groupcache[groupcounter][k]: + del groupcache[groupcounter][k] + + groupcache[groupcounter]['args'] = args + groupcache[groupcounter]['body'] = [] + groupcache[groupcounter]['externals'] = [] + groupcache[groupcounter]['interfaced'] = [] + groupcache[groupcounter]['vars'] = {} + groupcache[groupcounter]['entry'] = {} + # end of creation + if block == 'type': + groupcache[groupcounter]['varnames'] = [] + + if case in ['call', 'callfun']: # set parents variables + if name not in groupcache[groupcounter - 2]['externals']: + groupcache[groupcounter - 2]['externals'].append(name) + groupcache[groupcounter]['vars'] = copy.deepcopy( + groupcache[groupcounter - 2]['vars']) + try: + del groupcache[groupcounter]['vars'][name][ + groupcache[groupcounter]['vars'][name]['attrspec'].index('external')] + except Exception: + pass + if block in ['function', 'subroutine']: # set global attributes + try: + groupcache[groupcounter]['vars'][name] = appenddecl( + groupcache[groupcounter]['vars'][name], groupcache[groupcounter - 2]['vars']['']) + except Exception: + pass + if case == 'callfun': # return type + if result and result in groupcache[groupcounter]['vars']: + if not name == result: + groupcache[groupcounter]['vars'][name] = appenddecl( + groupcache[groupcounter]['vars'][name], groupcache[groupcounter]['vars'][result]) + # if groupcounter>1: # name is interfaced + try: + groupcache[groupcounter - 2]['interfaced'].append(name) + except Exception: + pass + if block == 'function': + t = typespattern[0].match(m.group('before') + ' ' + name) + if t: + typespec, selector, attr, edecl = cracktypespec0( + t.group('this'), t.group('after')) + updatevars(typespec, selector, attr, edecl) + + if case in ['call', 'callfun']: + grouplist[groupcounter - 1].append(groupcache[groupcounter]) + grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter = groupcounter - 1 # end routine + grouplist[groupcounter - 1].append(groupcache[groupcounter]) + grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter = groupcounter - 1 # end interface + + elif case == 'entry': + name, args, result, bind = _resolvenameargspattern(m.group('after')) + if name is not None: + if args: + args = rmbadname([x.strip() + for x in markoutercomma(args).split('@,@')]) + else: + args = [] + assert result is None, repr(result) + groupcache[groupcounter]['entry'][name] = args + previous_context = ('entry', name, groupcounter) + elif case == 'type': + typespec, selector, attr, edecl = cracktypespec0( + block, m.group('after')) + last_name = updatevars(typespec, selector, attr, edecl) + if last_name is not None: + previous_context = ('variable', last_name, groupcounter) + elif case in ['dimension', 'intent', 'optional', 'required', 'external', 'public', 'private', 'intrisic']: + edecl = groupcache[groupcounter]['vars'] + ll = m.group('after').strip() + i = ll.find('::') + if i < 0 and case == 'intent': + i = markouterparen(ll).find('@)@') - 2 + ll = ll[:i + 1] + '::' + ll[i + 1:] + i = ll.find('::') + if ll[i:] == '::' and 'args' in groupcache[groupcounter]: + outmess('All arguments will have attribute %s%s\n' % + (m.group('this'), ll[:i])) + ll = ll + ','.join(groupcache[groupcounter]['args']) + if i < 0: + i = 0 + pl = '' + else: + pl = ll[:i].strip() + ll = ll[i + 2:] + ch = markoutercomma(pl).split('@,@') + if len(ch) > 1: + pl = ch[0] + outmess('analyzeline: cannot handle multiple attributes without type specification. Ignoring %r.\n' % ( + ','.join(ch[1:]))) + last_name = None + + for e in [x.strip() for x in markoutercomma(ll).split('@,@')]: + m1 = namepattern.match(e) + if not m1: + if case in ['public', 'private']: + k = '' + else: + print(m.groupdict()) + outmess('analyzeline: no name pattern found in %s statement for %s. Skipping.\n' % ( + case, repr(e))) + continue + else: + k = rmbadname1(m1.group('name')) + if k not in edecl: + edecl[k] = {} + if case == 'dimension': + ap = case + m1.group('after') + if case == 'intent': + ap = m.group('this') + pl + if _intentcallbackpattern.match(ap): + if k not in groupcache[groupcounter]['args']: + if groupcounter > 1: + if '__user__' not in groupcache[groupcounter - 2]['name']: + outmess( + 'analyzeline: missing __user__ module (could be nothing)\n') + # fixes ticket 1693 + if k != groupcache[groupcounter]['name']: + outmess('analyzeline: appending intent(callback) %s' + ' to %s arguments\n' % (k, groupcache[groupcounter]['name'])) + groupcache[groupcounter]['args'].append(k) + else: + errmess( + 'analyzeline: intent(callback) %s is ignored' % (k)) + else: + errmess('analyzeline: intent(callback) %s is already' + ' in argument list' % (k)) + if case in ['optional', 'required', 'public', 'external', 'private', 'intrisic']: + ap = case + if 'attrspec' in edecl[k]: + edecl[k]['attrspec'].append(ap) + else: + edecl[k]['attrspec'] = [ap] + if case == 'external': + if groupcache[groupcounter]['block'] == 'program': + outmess('analyzeline: ignoring program arguments\n') + continue + if k not in groupcache[groupcounter]['args']: + continue + if 'externals' not in groupcache[groupcounter]: + groupcache[groupcounter]['externals'] = [] + groupcache[groupcounter]['externals'].append(k) + last_name = k + groupcache[groupcounter]['vars'] = edecl + if last_name is not None: + previous_context = ('variable', last_name, groupcounter) + elif case == 'parameter': + edecl = groupcache[groupcounter]['vars'] + ll = m.group('after').strip()[1:-1] + last_name = None + for e in markoutercomma(ll).split('@,@'): + try: + k, initexpr = [x.strip() for x in e.split('=')] + except Exception: + outmess( + 'analyzeline: could not extract name,expr in parameter statement "%s" of "%s"\n' % (e, ll)) + continue + params = get_parameters(edecl) + k = rmbadname1(k) + if k not in edecl: + edecl[k] = {} + if '=' in edecl[k] and (not edecl[k]['='] == initexpr): + outmess('analyzeline: Overwriting the value of parameter "%s" ("%s") with "%s".\n' % ( + k, edecl[k]['='], initexpr)) + t = determineexprtype(initexpr, params) + if t: + if t.get('typespec') == 'real': + tt = list(initexpr) + for m in real16pattern.finditer(initexpr): + tt[m.start():m.end()] = list( + initexpr[m.start():m.end()].lower().replace('d', 'e')) + initexpr = ''.join(tt) + elif t.get('typespec') == 'complex': + initexpr = initexpr[1:].lower().replace('d', 'e').\ + replace(',', '+1j*(') + try: + v = eval(initexpr, {}, params) + except (SyntaxError, NameError, TypeError) as msg: + errmess('analyzeline: Failed to evaluate %r. Ignoring: %s\n' + % (initexpr, msg)) + continue + edecl[k]['='] = repr(v) + if 'attrspec' in edecl[k]: + edecl[k]['attrspec'].append('parameter') + else: + edecl[k]['attrspec'] = ['parameter'] + last_name = k + groupcache[groupcounter]['vars'] = edecl + if last_name is not None: + previous_context = ('variable', last_name, groupcounter) + elif case == 'implicit': + if m.group('after').strip().lower() == 'none': + groupcache[groupcounter]['implicit'] = None + elif m.group('after'): + if 'implicit' in groupcache[groupcounter]: + impl = groupcache[groupcounter]['implicit'] + else: + impl = {} + if impl is None: + outmess( + 'analyzeline: Overwriting earlier "implicit none" statement.\n') + impl = {} + for e in markoutercomma(m.group('after')).split('@,@'): + decl = {} + m1 = re.match( + r'\s*(?P.*?)\s*(\(\s*(?P[a-z-, ]+)\s*\)\s*|)\Z', e, re.I) + if not m1: + outmess( + 'analyzeline: could not extract info of implicit statement part "%s"\n' % (e)) + continue + m2 = typespattern4implicit.match(m1.group('this')) + if not m2: + outmess( + 'analyzeline: could not extract types pattern of implicit statement part "%s"\n' % (e)) + continue + typespec, selector, attr, edecl = cracktypespec0( + m2.group('this'), m2.group('after')) + kindselect, charselect, typename = cracktypespec( + typespec, selector) + decl['typespec'] = typespec + decl['kindselector'] = kindselect + decl['charselector'] = charselect + decl['typename'] = typename + for k in list(decl.keys()): + if not decl[k]: + del decl[k] + for r in markoutercomma(m1.group('after')).split('@,@'): + if '-' in r: + try: + begc, endc = [x.strip() for x in r.split('-')] + except Exception: + outmess( + 'analyzeline: expected "-" instead of "%s" in range list of implicit statement\n' % r) + continue + else: + begc = endc = r.strip() + if not len(begc) == len(endc) == 1: + outmess( + 'analyzeline: expected "-" instead of "%s" in range list of implicit statement (2)\n' % r) + continue + for o in range(ord(begc), ord(endc) + 1): + impl[chr(o)] = decl + groupcache[groupcounter]['implicit'] = impl + elif case == 'data': + ll = [] + dl = '' + il = '' + f = 0 + fc = 1 + inp = 0 + for c in m.group('after'): + if not inp: + if c == "'": + fc = not fc + if c == '/' and fc: + f = f + 1 + continue + if c == '(': + inp = inp + 1 + elif c == ')': + inp = inp - 1 + if f == 0: + dl = dl + c + elif f == 1: + il = il + c + elif f == 2: + dl = dl.strip() + if dl.startswith(','): + dl = dl[1:].strip() + ll.append([dl, il]) + dl = c + il = '' + f = 0 + if f == 2: + dl = dl.strip() + if dl.startswith(','): + dl = dl[1:].strip() + ll.append([dl, il]) + vars = {} + if 'vars' in groupcache[groupcounter]: + vars = groupcache[groupcounter]['vars'] + last_name = None + for l in ll: + l = [x.strip() for x in l] + if l[0][0] == ',': + l[0] = l[0][1:] + if l[0][0] == '(': + outmess( + 'analyzeline: implied-DO list "%s" is not supported. Skipping.\n' % l[0]) + continue + i = 0 + j = 0 + llen = len(l[1]) + for v in rmbadname([x.strip() for x in markoutercomma(l[0]).split('@,@')]): + if v[0] == '(': + outmess( + 'analyzeline: implied-DO list "%s" is not supported. Skipping.\n' % v) + # XXX: subsequent init expressions may get wrong values. + # Ignoring since data statements are irrelevant for + # wrapping. + continue + fc = 0 + while (i < llen) and (fc or not l[1][i] == ','): + if l[1][i] == "'": + fc = not fc + i = i + 1 + i = i + 1 + if v not in vars: + vars[v] = {} + if '=' in vars[v] and not vars[v]['='] == l[1][j:i - 1]: + outmess('analyzeline: changing init expression of "%s" ("%s") to "%s"\n' % ( + v, vars[v]['='], l[1][j:i - 1])) + vars[v]['='] = l[1][j:i - 1] + j = i + last_name = v + groupcache[groupcounter]['vars'] = vars + if last_name is not None: + previous_context = ('variable', last_name, groupcounter) + elif case == 'common': + line = m.group('after').strip() + if not line[0] == '/': + line = '//' + line + cl = [] + f = 0 + bn = '' + ol = '' + for c in line: + if c == '/': + f = f + 1 + continue + if f >= 3: + bn = bn.strip() + if not bn: + bn = '_BLNK_' + cl.append([bn, ol]) + f = f - 2 + bn = '' + ol = '' + if f % 2: + bn = bn + c + else: + ol = ol + c + bn = bn.strip() + if not bn: + bn = '_BLNK_' + cl.append([bn, ol]) + commonkey = {} + if 'common' in groupcache[groupcounter]: + commonkey = groupcache[groupcounter]['common'] + for c in cl: + if c[0] not in commonkey: + commonkey[c[0]] = [] + for i in [x.strip() for x in markoutercomma(c[1]).split('@,@')]: + if i: + commonkey[c[0]].append(i) + groupcache[groupcounter]['common'] = commonkey + previous_context = ('common', bn, groupcounter) + elif case == 'use': + m1 = re.match( + r'\A\s*(?P\b[\w]+\b)\s*((,(\s*\bonly\b\s*:|(?P))\s*(?P.*))|)\s*\Z', m.group('after'), re.I) + if m1: + mm = m1.groupdict() + if 'use' not in groupcache[groupcounter]: + groupcache[groupcounter]['use'] = {} + name = m1.group('name') + groupcache[groupcounter]['use'][name] = {} + isonly = 0 + if 'list' in mm and mm['list'] is not None: + if 'notonly' in mm and mm['notonly'] is None: + isonly = 1 + groupcache[groupcounter]['use'][name]['only'] = isonly + ll = [x.strip() for x in mm['list'].split(',')] + rl = {} + for l in ll: + if '=' in l: + m2 = re.match( + r'\A\s*(?P\b[\w]+\b)\s*=\s*>\s*(?P\b[\w]+\b)\s*\Z', l, re.I) + if m2: + rl[m2.group('local').strip()] = m2.group( + 'use').strip() + else: + outmess( + 'analyzeline: Not local=>use pattern found in %s\n' % repr(l)) + else: + rl[l] = l + groupcache[groupcounter]['use'][name]['map'] = rl + else: + pass + else: + print(m.groupdict()) + outmess('analyzeline: Could not crack the use statement.\n') + elif case in ['f2pyenhancements']: + if 'f2pyenhancements' not in groupcache[groupcounter]: + groupcache[groupcounter]['f2pyenhancements'] = {} + d = groupcache[groupcounter]['f2pyenhancements'] + if m.group('this') == 'usercode' and 'usercode' in d: + if isinstance(d['usercode'], str): + d['usercode'] = [d['usercode']] + d['usercode'].append(m.group('after')) + else: + d[m.group('this')] = m.group('after') + elif case == 'multiline': + if previous_context is None: + if verbose: + outmess('analyzeline: No context for multiline block.\n') + return + gc = groupcounter + appendmultiline(groupcache[gc], + previous_context[:2], + m.group('this')) + else: + if verbose > 1: + print(m.groupdict()) + outmess('analyzeline: No code implemented for line.\n') + + +def appendmultiline(group, context_name, ml): + if 'f2pymultilines' not in group: + group['f2pymultilines'] = {} + d = group['f2pymultilines'] + if context_name not in d: + d[context_name] = [] + d[context_name].append(ml) + return + + +def cracktypespec0(typespec, ll): + selector = None + attr = None + if re.match(r'double\s*complex', typespec, re.I): + typespec = 'double complex' + elif re.match(r'double\s*precision', typespec, re.I): + typespec = 'double precision' + else: + typespec = typespec.strip().lower() + m1 = selectpattern.match(markouterparen(ll)) + if not m1: + outmess( + 'cracktypespec0: no kind/char_selector pattern found for line.\n') + return + d = m1.groupdict() + for k in list(d.keys()): + d[k] = unmarkouterparen(d[k]) + if typespec in ['complex', 'integer', 'logical', 'real', 'character', 'type']: + selector = d['this'] + ll = d['after'] + i = ll.find('::') + if i >= 0: + attr = ll[:i].strip() + ll = ll[i + 2:] + return typespec, selector, attr, ll +##### +namepattern = re.compile(r'\s*(?P\b[\w]+\b)\s*(?P.*)\s*\Z', re.I) +kindselector = re.compile( + r'\s*(\(\s*(kind\s*=)?\s*(?P.*)\s*\)|[*]\s*(?P.*?))\s*\Z', re.I) +charselector = re.compile( + r'\s*(\((?P.*)\)|[*]\s*(?P.*))\s*\Z', re.I) +lenkindpattern = re.compile( + r'\s*(kind\s*=\s*(?P.*?)\s*(@,@\s*len\s*=\s*(?P.*)|)|(len\s*=\s*|)(?P.*?)\s*(@,@\s*(kind\s*=\s*|)(?P.*)|))\s*\Z', re.I) +lenarraypattern = re.compile( + r'\s*(@\(@\s*(?!/)\s*(?P.*?)\s*@\)@\s*[*]\s*(?P.*?)|([*]\s*(?P.*?)|)\s*(@\(@\s*(?!/)\s*(?P.*?)\s*@\)@|))\s*(=\s*(?P.*?)|(@\(@|)/\s*(?P.*?)\s*/(@\)@|)|)\s*\Z', re.I) + + +def removespaces(expr): + expr = expr.strip() + if len(expr) <= 1: + return expr + expr2 = expr[0] + for i in range(1, len(expr) - 1): + if (expr[i] == ' ' and + ((expr[i + 1] in "()[]{}=+-/* ") or + (expr[i - 1] in "()[]{}=+-/* "))): + continue + expr2 = expr2 + expr[i] + expr2 = expr2 + expr[-1] + return expr2 + + +def markinnerspaces(line): + l = '' + f = 0 + cc = '\'' + cb = '' + for c in line: + if cb == '\\' and c in ['\\', '\'', '"']: + l = l + c + cb = c + continue + if f == 0 and c in ['\'', '"']: + cc = c + if c == cc: + f = f + 1 + elif c == cc: + f = f - 1 + elif c == ' ' and f == 1: + l = l + '@_@' + continue + l = l + c + cb = c + return l + + +def updatevars(typespec, selector, attrspec, entitydecl): + global groupcache, groupcounter + + last_name = None + kindselect, charselect, typename = cracktypespec(typespec, selector) + if attrspec: + attrspec = [x.strip() for x in markoutercomma(attrspec).split('@,@')] + l = [] + c = re.compile(r'(?P[a-zA-Z]+)') + for a in attrspec: + if not a: + continue + m = c.match(a) + if m: + s = m.group('start').lower() + a = s + a[len(s):] + l.append(a) + attrspec = l + el = [x.strip() for x in markoutercomma(entitydecl).split('@,@')] + el1 = [] + for e in el: + for e1 in [x.strip() for x in markoutercomma(removespaces(markinnerspaces(e)), comma=' ').split('@ @')]: + if e1: + el1.append(e1.replace('@_@', ' ')) + for e in el1: + m = namepattern.match(e) + if not m: + outmess( + 'updatevars: no name pattern found for entity=%s. Skipping.\n' % (repr(e))) + continue + ename = rmbadname1(m.group('name')) + edecl = {} + if ename in groupcache[groupcounter]['vars']: + edecl = groupcache[groupcounter]['vars'][ename].copy() + not_has_typespec = 'typespec' not in edecl + if not_has_typespec: + edecl['typespec'] = typespec + elif typespec and (not typespec == edecl['typespec']): + outmess('updatevars: attempt to change the type of "%s" ("%s") to "%s". Ignoring.\n' % ( + ename, edecl['typespec'], typespec)) + if 'kindselector' not in edecl: + edecl['kindselector'] = copy.copy(kindselect) + elif kindselect: + for k in list(kindselect.keys()): + if k in edecl['kindselector'] and (not kindselect[k] == edecl['kindselector'][k]): + outmess('updatevars: attempt to change the kindselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % ( + k, ename, edecl['kindselector'][k], kindselect[k])) + else: + edecl['kindselector'][k] = copy.copy(kindselect[k]) + if 'charselector' not in edecl and charselect: + if not_has_typespec: + edecl['charselector'] = charselect + else: + errmess('updatevars:%s: attempt to change empty charselector to %r. Ignoring.\n' + % (ename, charselect)) + elif charselect: + for k in list(charselect.keys()): + if k in edecl['charselector'] and (not charselect[k] == edecl['charselector'][k]): + outmess('updatevars: attempt to change the charselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % ( + k, ename, edecl['charselector'][k], charselect[k])) + else: + edecl['charselector'][k] = copy.copy(charselect[k]) + if 'typename' not in edecl: + edecl['typename'] = typename + elif typename and (not edecl['typename'] == typename): + outmess('updatevars: attempt to change the typename of "%s" ("%s") to "%s". Ignoring.\n' % ( + ename, edecl['typename'], typename)) + if 'attrspec' not in edecl: + edecl['attrspec'] = copy.copy(attrspec) + elif attrspec: + for a in attrspec: + if a not in edecl['attrspec']: + edecl['attrspec'].append(a) + else: + edecl['typespec'] = copy.copy(typespec) + edecl['kindselector'] = copy.copy(kindselect) + edecl['charselector'] = copy.copy(charselect) + edecl['typename'] = typename + edecl['attrspec'] = copy.copy(attrspec) + if m.group('after'): + m1 = lenarraypattern.match(markouterparen(m.group('after'))) + if m1: + d1 = m1.groupdict() + for lk in ['len', 'array', 'init']: + if d1[lk + '2'] is not None: + d1[lk] = d1[lk + '2'] + del d1[lk + '2'] + for k in list(d1.keys()): + if d1[k] is not None: + d1[k] = unmarkouterparen(d1[k]) + else: + del d1[k] + if 'len' in d1 and 'array' in d1: + if d1['len'] == '': + d1['len'] = d1['array'] + del d1['array'] + else: + d1['array'] = d1['array'] + ',' + d1['len'] + del d1['len'] + errmess('updatevars: "%s %s" is mapped to "%s %s(%s)"\n' % ( + typespec, e, typespec, ename, d1['array'])) + if 'array' in d1: + dm = 'dimension(%s)' % d1['array'] + if 'attrspec' not in edecl or (not edecl['attrspec']): + edecl['attrspec'] = [dm] + else: + edecl['attrspec'].append(dm) + for dm1 in edecl['attrspec']: + if dm1[:9] == 'dimension' and dm1 != dm: + del edecl['attrspec'][-1] + errmess('updatevars:%s: attempt to change %r to %r. Ignoring.\n' + % (ename, dm1, dm)) + break + + if 'len' in d1: + if typespec in ['complex', 'integer', 'logical', 'real']: + if ('kindselector' not in edecl) or (not edecl['kindselector']): + edecl['kindselector'] = {} + edecl['kindselector']['*'] = d1['len'] + elif typespec == 'character': + if ('charselector' not in edecl) or (not edecl['charselector']): + edecl['charselector'] = {} + if 'len' in edecl['charselector']: + del edecl['charselector']['len'] + edecl['charselector']['*'] = d1['len'] + if 'init' in d1: + if '=' in edecl and (not edecl['='] == d1['init']): + outmess('updatevars: attempt to change the init expression of "%s" ("%s") to "%s". Ignoring.\n' % ( + ename, edecl['='], d1['init'])) + else: + edecl['='] = d1['init'] + else: + outmess('updatevars: could not crack entity declaration "%s". Ignoring.\n' % ( + ename + m.group('after'))) + for k in list(edecl.keys()): + if not edecl[k]: + del edecl[k] + groupcache[groupcounter]['vars'][ename] = edecl + if 'varnames' in groupcache[groupcounter]: + groupcache[groupcounter]['varnames'].append(ename) + last_name = ename + return last_name + + +def cracktypespec(typespec, selector): + kindselect = None + charselect = None + typename = None + if selector: + if typespec in ['complex', 'integer', 'logical', 'real']: + kindselect = kindselector.match(selector) + if not kindselect: + outmess( + 'cracktypespec: no kindselector pattern found for %s\n' % (repr(selector))) + return + kindselect = kindselect.groupdict() + kindselect['*'] = kindselect['kind2'] + del kindselect['kind2'] + for k in list(kindselect.keys()): + if not kindselect[k]: + del kindselect[k] + for k, i in list(kindselect.items()): + kindselect[k] = rmbadname1(i) + elif typespec == 'character': + charselect = charselector.match(selector) + if not charselect: + outmess( + 'cracktypespec: no charselector pattern found for %s\n' % (repr(selector))) + return + charselect = charselect.groupdict() + charselect['*'] = charselect['charlen'] + del charselect['charlen'] + if charselect['lenkind']: + lenkind = lenkindpattern.match( + markoutercomma(charselect['lenkind'])) + lenkind = lenkind.groupdict() + for lk in ['len', 'kind']: + if lenkind[lk + '2']: + lenkind[lk] = lenkind[lk + '2'] + charselect[lk] = lenkind[lk] + del lenkind[lk + '2'] + del charselect['lenkind'] + for k in list(charselect.keys()): + if not charselect[k]: + del charselect[k] + for k, i in list(charselect.items()): + charselect[k] = rmbadname1(i) + elif typespec == 'type': + typename = re.match(r'\s*\(\s*(?P\w+)\s*\)', selector, re.I) + if typename: + typename = typename.group('name') + else: + outmess('cracktypespec: no typename found in %s\n' % + (repr(typespec + selector))) + else: + outmess('cracktypespec: no selector used for %s\n' % + (repr(selector))) + return kindselect, charselect, typename +###### + + +def setattrspec(decl, attr, force=0): + if not decl: + decl = {} + if not attr: + return decl + if 'attrspec' not in decl: + decl['attrspec'] = [attr] + return decl + if force: + decl['attrspec'].append(attr) + if attr in decl['attrspec']: + return decl + if attr == 'static' and 'automatic' not in decl['attrspec']: + decl['attrspec'].append(attr) + elif attr == 'automatic' and 'static' not in decl['attrspec']: + decl['attrspec'].append(attr) + elif attr == 'public' and 'private' not in decl['attrspec']: + decl['attrspec'].append(attr) + elif attr == 'private' and 'public' not in decl['attrspec']: + decl['attrspec'].append(attr) + else: + decl['attrspec'].append(attr) + return decl + + +def setkindselector(decl, sel, force=0): + if not decl: + decl = {} + if not sel: + return decl + if 'kindselector' not in decl: + decl['kindselector'] = sel + return decl + for k in list(sel.keys()): + if force or k not in decl['kindselector']: + decl['kindselector'][k] = sel[k] + return decl + + +def setcharselector(decl, sel, force=0): + if not decl: + decl = {} + if not sel: + return decl + if 'charselector' not in decl: + decl['charselector'] = sel + return decl + for k in list(sel.keys()): + if force or k not in decl['charselector']: + decl['charselector'][k] = sel[k] + return decl + + +def getblockname(block, unknown='unknown'): + if 'name' in block: + return block['name'] + return unknown + +# post processing + + +def setmesstext(block): + global filepositiontext + + try: + filepositiontext = 'In: %s:%s\n' % (block['from'], block['name']) + except Exception: + pass + + +def get_usedict(block): + usedict = {} + if 'parent_block' in block: + usedict = get_usedict(block['parent_block']) + if 'use' in block: + usedict.update(block['use']) + return usedict + + +def get_useparameters(block, param_map=None): + global f90modulevars + + if param_map is None: + param_map = {} + usedict = get_usedict(block) + if not usedict: + return param_map + for usename, mapping in list(usedict.items()): + usename = usename.lower() + if usename not in f90modulevars: + outmess('get_useparameters: no module %s info used by %s\n' % + (usename, block.get('name'))) + continue + mvars = f90modulevars[usename] + params = get_parameters(mvars) + if not params: + continue + # XXX: apply mapping + if mapping: + errmess('get_useparameters: mapping for %s not impl.' % (mapping)) + for k, v in list(params.items()): + if k in param_map: + outmess('get_useparameters: overriding parameter %s with' + ' value from module %s' % (repr(k), repr(usename))) + param_map[k] = v + + return param_map + + +def postcrack2(block, tab='', param_map=None): + global f90modulevars + + if not f90modulevars: + return block + if isinstance(block, list): + ret = [postcrack2(g, tab=tab + '\t', param_map=param_map) + for g in block] + return ret + setmesstext(block) + outmess('%sBlock: %s\n' % (tab, block['name']), 0) + + if param_map is None: + param_map = get_useparameters(block) + + if param_map is not None and 'vars' in block: + vars = block['vars'] + for n in list(vars.keys()): + var = vars[n] + if 'kindselector' in var: + kind = var['kindselector'] + if 'kind' in kind: + val = kind['kind'] + if val in param_map: + kind['kind'] = param_map[val] + new_body = [postcrack2(b, tab=tab + '\t', param_map=param_map) + for b in block['body']] + block['body'] = new_body + + return block + + +def postcrack(block, args=None, tab=''): + """ + TODO: + function return values + determine expression types if in argument list + """ + global usermodules, onlyfunctions + + if isinstance(block, list): + gret = [] + uret = [] + for g in block: + setmesstext(g) + g = postcrack(g, tab=tab + '\t') + # sort user routines to appear first + if 'name' in g and '__user__' in g['name']: + uret.append(g) + else: + gret.append(g) + return uret + gret + setmesstext(block) + if not isinstance(block, dict) and 'block' not in block: + raise Exception('postcrack: Expected block dictionary instead of ' + + str(block)) + if 'name' in block and not block['name'] == 'unknown_interface': + outmess('%sBlock: %s\n' % (tab, block['name']), 0) + block = analyzeargs(block) + block = analyzecommon(block) + block['vars'] = analyzevars(block) + block['sortvars'] = sortvarnames(block['vars']) + if 'args' in block and block['args']: + args = block['args'] + block['body'] = analyzebody(block, args, tab=tab) + + userisdefined = [] + if 'use' in block: + useblock = block['use'] + for k in list(useblock.keys()): + if '__user__' in k: + userisdefined.append(k) + else: + useblock = {} + name = '' + if 'name' in block: + name = block['name'] + # and not userisdefined: # Build a __user__ module + if 'externals' in block and block['externals']: + interfaced = [] + if 'interfaced' in block: + interfaced = block['interfaced'] + mvars = copy.copy(block['vars']) + if name: + mname = name + '__user__routines' + else: + mname = 'unknown__user__routines' + if mname in userisdefined: + i = 1 + while '%s_%i' % (mname, i) in userisdefined: + i = i + 1 + mname = '%s_%i' % (mname, i) + interface = {'block': 'interface', 'body': [], + 'vars': {}, 'name': name + '_user_interface'} + for e in block['externals']: + if e in interfaced: + edef = [] + j = -1 + for b in block['body']: + j = j + 1 + if b['block'] == 'interface': + i = -1 + for bb in b['body']: + i = i + 1 + if 'name' in bb and bb['name'] == e: + edef = copy.copy(bb) + del b['body'][i] + break + if edef: + if not b['body']: + del block['body'][j] + del interfaced[interfaced.index(e)] + break + interface['body'].append(edef) + else: + if e in mvars and not isexternal(mvars[e]): + interface['vars'][e] = mvars[e] + if interface['vars'] or interface['body']: + block['interfaced'] = interfaced + mblock = {'block': 'python module', 'body': [ + interface], 'vars': {}, 'name': mname, 'interfaced': block['externals']} + useblock[mname] = {} + usermodules.append(mblock) + if useblock: + block['use'] = useblock + return block + + +def sortvarnames(vars): + indep = [] + dep = [] + for v in list(vars.keys()): + if 'depend' in vars[v] and vars[v]['depend']: + dep.append(v) + else: + indep.append(v) + n = len(dep) + i = 0 + while dep: # XXX: How to catch dependence cycles correctly? + v = dep[0] + fl = 0 + for w in dep[1:]: + if w in vars[v]['depend']: + fl = 1 + break + if fl: + dep = dep[1:] + [v] + i = i + 1 + if i > n: + errmess('sortvarnames: failed to compute dependencies because' + ' of cyclic dependencies between ' + + ', '.join(dep) + '\n') + indep = indep + dep + break + else: + indep.append(v) + dep = dep[1:] + n = len(dep) + i = 0 + return indep + + +def analyzecommon(block): + if not hascommon(block): + return block + commonvars = [] + for k in list(block['common'].keys()): + comvars = [] + for e in block['common'][k]: + m = re.match( + r'\A\s*\b(?P.*?)\b\s*(\((?P.*?)\)|)\s*\Z', e, re.I) + if m: + dims = [] + if m.group('dims'): + dims = [x.strip() + for x in markoutercomma(m.group('dims')).split('@,@')] + n = rmbadname1(m.group('name').strip()) + if n in block['vars']: + if 'attrspec' in block['vars'][n]: + block['vars'][n]['attrspec'].append( + 'dimension(%s)' % (','.join(dims))) + else: + block['vars'][n]['attrspec'] = [ + 'dimension(%s)' % (','.join(dims))] + else: + if dims: + block['vars'][n] = { + 'attrspec': ['dimension(%s)' % (','.join(dims))]} + else: + block['vars'][n] = {} + if n not in commonvars: + commonvars.append(n) + else: + n = e + errmess( + 'analyzecommon: failed to extract "[()]" from "%s" in common /%s/.\n' % (e, k)) + comvars.append(n) + block['common'][k] = comvars + if 'commonvars' not in block: + block['commonvars'] = commonvars + else: + block['commonvars'] = block['commonvars'] + commonvars + return block + + +def analyzebody(block, args, tab=''): + global usermodules, skipfuncs, onlyfuncs, f90modulevars + + setmesstext(block) + body = [] + for b in block['body']: + b['parent_block'] = block + if b['block'] in ['function', 'subroutine']: + if args is not None and b['name'] not in args: + continue + else: + as_ = b['args'] + if b['name'] in skipfuncs: + continue + if onlyfuncs and b['name'] not in onlyfuncs: + continue + b['saved_interface'] = crack2fortrangen( + b, '\n' + ' ' * 6, as_interface=True) + + else: + as_ = args + b = postcrack(b, as_, tab=tab + '\t') + if b['block'] == 'interface' and not b['body']: + if 'f2pyenhancements' not in b: + continue + if b['block'].replace(' ', '') == 'pythonmodule': + usermodules.append(b) + else: + if b['block'] == 'module': + f90modulevars[b['name']] = b['vars'] + body.append(b) + return body + + +def buildimplicitrules(block): + setmesstext(block) + implicitrules = defaultimplicitrules + attrrules = {} + if 'implicit' in block: + if block['implicit'] is None: + implicitrules = None + if verbose > 1: + outmess( + 'buildimplicitrules: no implicit rules for routine %s.\n' % repr(block['name'])) + else: + for k in list(block['implicit'].keys()): + if block['implicit'][k].get('typespec') not in ['static', 'automatic']: + implicitrules[k] = block['implicit'][k] + else: + attrrules[k] = block['implicit'][k]['typespec'] + return implicitrules, attrrules + + +def myeval(e, g=None, l=None): + r = eval(e, g, l) + if type(r) in [type(0), type(0.0)]: + return r + raise ValueError('r=%r' % (r)) + +getlincoef_re_1 = re.compile(r'\A\b\w+\b\Z', re.I) + + +def getlincoef(e, xset): # e = a*x+b ; x in xset + try: + c = int(myeval(e, {}, {})) + return 0, c, None + except Exception: + pass + if getlincoef_re_1.match(e): + return 1, 0, e + len_e = len(e) + for x in xset: + if len(x) > len_e: + continue + if re.search(r'\w\s*\([^)]*\b' + x + r'\b', e): + # skip function calls having x as an argument, e.g max(1, x) + continue + re_1 = re.compile(r'(?P.*?)\b' + x + r'\b(?P.*)', re.I) + m = re_1.match(e) + if m: + try: + m1 = re_1.match(e) + while m1: + ee = '%s(%s)%s' % ( + m1.group('before'), 0, m1.group('after')) + m1 = re_1.match(ee) + b = myeval(ee, {}, {}) + m1 = re_1.match(e) + while m1: + ee = '%s(%s)%s' % ( + m1.group('before'), 1, m1.group('after')) + m1 = re_1.match(ee) + a = myeval(ee, {}, {}) - b + m1 = re_1.match(e) + while m1: + ee = '%s(%s)%s' % ( + m1.group('before'), 0.5, m1.group('after')) + m1 = re_1.match(ee) + c = myeval(ee, {}, {}) + # computing another point to be sure that expression is linear + m1 = re_1.match(e) + while m1: + ee = '%s(%s)%s' % ( + m1.group('before'), 1.5, m1.group('after')) + m1 = re_1.match(ee) + c2 = myeval(ee, {}, {}) + if (a * 0.5 + b == c and a * 1.5 + b == c2): + return a, b, x + except Exception: + pass + break + return None, None, None + +_varname_match = re.compile(r'\A[a-z]\w*\Z').match + + +def getarrlen(dl, args, star='*'): + edl = [] + try: + edl.append(myeval(dl[0], {}, {})) + except Exception: + edl.append(dl[0]) + try: + edl.append(myeval(dl[1], {}, {})) + except Exception: + edl.append(dl[1]) + if isinstance(edl[0], int): + p1 = 1 - edl[0] + if p1 == 0: + d = str(dl[1]) + elif p1 < 0: + d = '%s-%s' % (dl[1], -p1) + else: + d = '%s+%s' % (dl[1], p1) + elif isinstance(edl[1], int): + p1 = 1 + edl[1] + if p1 == 0: + d = '-(%s)' % (dl[0]) + else: + d = '%s-(%s)' % (p1, dl[0]) + else: + d = '%s-(%s)+1' % (dl[1], dl[0]) + try: + return repr(myeval(d, {}, {})), None, None + except Exception: + pass + d1, d2 = getlincoef(dl[0], args), getlincoef(dl[1], args) + if None not in [d1[0], d2[0]]: + if (d1[0], d2[0]) == (0, 0): + return repr(d2[1] - d1[1] + 1), None, None + b = d2[1] - d1[1] + 1 + d1 = (d1[0], 0, d1[2]) + d2 = (d2[0], b, d2[2]) + if d1[0] == 0 and d2[2] in args: + if b < 0: + return '%s * %s - %s' % (d2[0], d2[2], -b), d2[2], '+%s)/(%s)' % (-b, d2[0]) + elif b: + return '%s * %s + %s' % (d2[0], d2[2], b), d2[2], '-%s)/(%s)' % (b, d2[0]) + else: + return '%s * %s' % (d2[0], d2[2]), d2[2], ')/(%s)' % (d2[0]) + if d2[0] == 0 and d1[2] in args: + + if b < 0: + return '%s * %s - %s' % (-d1[0], d1[2], -b), d1[2], '+%s)/(%s)' % (-b, -d1[0]) + elif b: + return '%s * %s + %s' % (-d1[0], d1[2], b), d1[2], '-%s)/(%s)' % (b, -d1[0]) + else: + return '%s * %s' % (-d1[0], d1[2]), d1[2], ')/(%s)' % (-d1[0]) + if d1[2] == d2[2] and d1[2] in args: + a = d2[0] - d1[0] + if not a: + return repr(b), None, None + if b < 0: + return '%s * %s - %s' % (a, d1[2], -b), d2[2], '+%s)/(%s)' % (-b, a) + elif b: + return '%s * %s + %s' % (a, d1[2], b), d2[2], '-%s)/(%s)' % (b, a) + else: + return '%s * %s' % (a, d1[2]), d2[2], ')/(%s)' % (a) + if d1[0] == d2[0] == 1: + c = str(d1[2]) + if c not in args: + if _varname_match(c): + outmess('\tgetarrlen:variable "%s" undefined\n' % (c)) + c = '(%s)' % c + if b == 0: + d = '%s-%s' % (d2[2], c) + elif b < 0: + d = '%s-%s-%s' % (d2[2], c, -b) + else: + d = '%s-%s+%s' % (d2[2], c, b) + elif d1[0] == 0: + c2 = str(d2[2]) + if c2 not in args: + if _varname_match(c2): + outmess('\tgetarrlen:variable "%s" undefined\n' % (c2)) + c2 = '(%s)' % c2 + if d2[0] == 1: + pass + elif d2[0] == -1: + c2 = '-%s' % c2 + else: + c2 = '%s*%s' % (d2[0], c2) + + if b == 0: + d = c2 + elif b < 0: + d = '%s-%s' % (c2, -b) + else: + d = '%s+%s' % (c2, b) + elif d2[0] == 0: + c1 = str(d1[2]) + if c1 not in args: + if _varname_match(c1): + outmess('\tgetarrlen:variable "%s" undefined\n' % (c1)) + c1 = '(%s)' % c1 + if d1[0] == 1: + c1 = '-%s' % c1 + elif d1[0] == -1: + c1 = '+%s' % c1 + elif d1[0] < 0: + c1 = '+%s*%s' % (-d1[0], c1) + else: + c1 = '-%s*%s' % (d1[0], c1) + + if b == 0: + d = c1 + elif b < 0: + d = '%s-%s' % (c1, -b) + else: + d = '%s+%s' % (c1, b) + else: + c1 = str(d1[2]) + if c1 not in args: + if _varname_match(c1): + outmess('\tgetarrlen:variable "%s" undefined\n' % (c1)) + c1 = '(%s)' % c1 + if d1[0] == 1: + c1 = '-%s' % c1 + elif d1[0] == -1: + c1 = '+%s' % c1 + elif d1[0] < 0: + c1 = '+%s*%s' % (-d1[0], c1) + else: + c1 = '-%s*%s' % (d1[0], c1) + + c2 = str(d2[2]) + if c2 not in args: + if _varname_match(c2): + outmess('\tgetarrlen:variable "%s" undefined\n' % (c2)) + c2 = '(%s)' % c2 + if d2[0] == 1: + pass + elif d2[0] == -1: + c2 = '-%s' % c2 + else: + c2 = '%s*%s' % (d2[0], c2) + + if b == 0: + d = '%s%s' % (c2, c1) + elif b < 0: + d = '%s%s-%s' % (c2, c1, -b) + else: + d = '%s%s+%s' % (c2, c1, b) + return d, None, None + +word_pattern = re.compile(r'\b[a-z][\w$]*\b', re.I) + + +def _get_depend_dict(name, vars, deps): + if name in vars: + words = vars[name].get('depend', []) + + if '=' in vars[name] and not isstring(vars[name]): + for word in word_pattern.findall(vars[name]['=']): + if word not in words and word in vars: + words.append(word) + for word in words[:]: + for w in deps.get(word, []) \ + or _get_depend_dict(word, vars, deps): + if w not in words: + words.append(w) + else: + outmess('_get_depend_dict: no dependence info for %s\n' % (repr(name))) + words = [] + deps[name] = words + return words + + +def _calc_depend_dict(vars): + names = list(vars.keys()) + depend_dict = {} + for n in names: + _get_depend_dict(n, vars, depend_dict) + return depend_dict + + +def get_sorted_names(vars): + """ + """ + depend_dict = _calc_depend_dict(vars) + names = [] + for name in list(depend_dict.keys()): + if not depend_dict[name]: + names.append(name) + del depend_dict[name] + while depend_dict: + for name, lst in list(depend_dict.items()): + new_lst = [n for n in lst if n in depend_dict] + if not new_lst: + names.append(name) + del depend_dict[name] + else: + depend_dict[name] = new_lst + return [name for name in names if name in vars] + + +def _kind_func(string): + # XXX: return something sensible. + if string[0] in "'\"": + string = string[1:-1] + if real16pattern.match(string): + return 8 + elif real8pattern.match(string): + return 4 + return 'kind(' + string + ')' + + +def _selected_int_kind_func(r): + # XXX: This should be processor dependent + m = 10 ** r + if m <= 2 ** 8: + return 1 + if m <= 2 ** 16: + return 2 + if m <= 2 ** 32: + return 4 + if m <= 2 ** 63: + return 8 + if m <= 2 ** 128: + return 16 + return -1 + + +def _selected_real_kind_func(p, r=0, radix=0): + # XXX: This should be processor dependent + # This is only good for 0 <= p <= 20 + if p < 7: + return 4 + if p < 16: + return 8 + machine = platform.machine().lower() + if machine.startswith(('aarch64', 'power', 'ppc64', 's390x', 'sparc')): + if p <= 20: + return 16 + else: + if p < 19: + return 10 + elif p <= 20: + return 16 + return -1 + + +def get_parameters(vars, global_params={}): + params = copy.copy(global_params) + g_params = copy.copy(global_params) + for name, func in [('kind', _kind_func), + ('selected_int_kind', _selected_int_kind_func), + ('selected_real_kind', _selected_real_kind_func), ]: + if name not in g_params: + g_params[name] = func + param_names = [] + for n in get_sorted_names(vars): + if 'attrspec' in vars[n] and 'parameter' in vars[n]['attrspec']: + param_names.append(n) + kind_re = re.compile(r'\bkind\s*\(\s*(?P.*)\s*\)', re.I) + selected_int_kind_re = re.compile( + r'\bselected_int_kind\s*\(\s*(?P.*)\s*\)', re.I) + selected_kind_re = re.compile( + r'\bselected_(int|real)_kind\s*\(\s*(?P.*)\s*\)', re.I) + for n in param_names: + if '=' in vars[n]: + v = vars[n]['='] + if islogical(vars[n]): + v = v.lower() + for repl in [ + ('.false.', 'False'), + ('.true.', 'True'), + # TODO: test .eq., .neq., etc replacements. + ]: + v = v.replace(*repl) + v = kind_re.sub(r'kind("\1")', v) + v = selected_int_kind_re.sub(r'selected_int_kind(\1)', v) + + # We need to act according to the data. + # The easy case is if the data has a kind-specifier, + # then we may easily remove those specifiers. + # However, it may be that the user uses other specifiers...(!) + is_replaced = False + if 'kindselector' in vars[n]: + if 'kind' in vars[n]['kindselector']: + orig_v_len = len(v) + v = v.replace('_' + vars[n]['kindselector']['kind'], '') + # Again, this will be true if even a single specifier + # has been replaced, see comment above. + is_replaced = len(v) < orig_v_len + + if not is_replaced: + if not selected_kind_re.match(v): + v_ = v.split('_') + # In case there are additive parameters + if len(v_) > 1: + v = ''.join(v_[:-1]).lower().replace(v_[-1].lower(), '') + + # Currently this will not work for complex numbers. + # There is missing code for extracting a complex number, + # which may be defined in either of these: + # a) (Re, Im) + # b) cmplx(Re, Im) + # c) dcmplx(Re, Im) + # d) cmplx(Re, Im, ) + + if isdouble(vars[n]): + tt = list(v) + for m in real16pattern.finditer(v): + tt[m.start():m.end()] = list( + v[m.start():m.end()].lower().replace('d', 'e')) + v = ''.join(tt) + + elif iscomplex(vars[n]): + # FIXME complex numbers may also have exponents + if v[0] == '(' and v[-1] == ')': + # FIXME, unused l looks like potential bug + l = markoutercomma(v[1:-1]).split('@,@') + + try: + params[n] = eval(v, g_params, params) + except Exception as msg: + params[n] = v + outmess('get_parameters: got "%s" on %s\n' % (msg, repr(v))) + if isstring(vars[n]) and isinstance(params[n], int): + params[n] = chr(params[n]) + nl = n.lower() + if nl != n: + params[nl] = params[n] + else: + print(vars[n]) + outmess( + 'get_parameters:parameter %s does not have value?!\n' % (repr(n))) + return params + + +def _eval_length(length, params): + if length in ['(:)', '(*)', '*']: + return '(*)' + return _eval_scalar(length, params) + +_is_kind_number = re.compile(r'\d+_').match + + +def _eval_scalar(value, params): + if _is_kind_number(value): + value = value.split('_')[0] + try: + value = str(eval(value, {}, params)) + except (NameError, SyntaxError): + return value + except Exception as msg: + errmess('"%s" in evaluating %r ' + '(available names: %s)\n' + % (msg, value, list(params.keys()))) + return value + + +def analyzevars(block): + global f90modulevars + + setmesstext(block) + implicitrules, attrrules = buildimplicitrules(block) + vars = copy.copy(block['vars']) + if block['block'] == 'function' and block['name'] not in vars: + vars[block['name']] = {} + if '' in block['vars']: + del vars[''] + if 'attrspec' in block['vars']['']: + gen = block['vars']['']['attrspec'] + for n in list(vars.keys()): + for k in ['public', 'private']: + if k in gen: + vars[n] = setattrspec(vars[n], k) + svars = [] + args = block['args'] + for a in args: + try: + vars[a] + svars.append(a) + except KeyError: + pass + for n in list(vars.keys()): + if n not in args: + svars.append(n) + + params = get_parameters(vars, get_useparameters(block)) + + dep_matches = {} + name_match = re.compile(r'\w[\w\d_$]*').match + for v in list(vars.keys()): + m = name_match(v) + if m: + n = v[m.start():m.end()] + try: + dep_matches[n] + except KeyError: + dep_matches[n] = re.compile(r'.*\b%s\b' % (v), re.I).match + for n in svars: + if n[0] in list(attrrules.keys()): + vars[n] = setattrspec(vars[n], attrrules[n[0]]) + if 'typespec' not in vars[n]: + if not('attrspec' in vars[n] and 'external' in vars[n]['attrspec']): + if implicitrules: + ln0 = n[0].lower() + for k in list(implicitrules[ln0].keys()): + if k == 'typespec' and implicitrules[ln0][k] == 'undefined': + continue + if k not in vars[n]: + vars[n][k] = implicitrules[ln0][k] + elif k == 'attrspec': + for l in implicitrules[ln0][k]: + vars[n] = setattrspec(vars[n], l) + elif n in block['args']: + outmess('analyzevars: typespec of variable %s is not defined in routine %s.\n' % ( + repr(n), block['name'])) + + if 'charselector' in vars[n]: + if 'len' in vars[n]['charselector']: + l = vars[n]['charselector']['len'] + try: + l = str(eval(l, {}, params)) + except Exception: + pass + vars[n]['charselector']['len'] = l + + if 'kindselector' in vars[n]: + if 'kind' in vars[n]['kindselector']: + l = vars[n]['kindselector']['kind'] + try: + l = str(eval(l, {}, params)) + except Exception: + pass + vars[n]['kindselector']['kind'] = l + + savelindims = {} + if 'attrspec' in vars[n]: + attr = vars[n]['attrspec'] + attr.reverse() + vars[n]['attrspec'] = [] + dim, intent, depend, check, note = None, None, None, None, None + for a in attr: + if a[:9] == 'dimension': + dim = (a[9:].strip())[1:-1] + elif a[:6] == 'intent': + intent = (a[6:].strip())[1:-1] + elif a[:6] == 'depend': + depend = (a[6:].strip())[1:-1] + elif a[:5] == 'check': + check = (a[5:].strip())[1:-1] + elif a[:4] == 'note': + note = (a[4:].strip())[1:-1] + else: + vars[n] = setattrspec(vars[n], a) + if intent: + if 'intent' not in vars[n]: + vars[n]['intent'] = [] + for c in [x.strip() for x in markoutercomma(intent).split('@,@')]: + # Remove spaces so that 'in out' becomes 'inout' + tmp = c.replace(' ', '') + if tmp not in vars[n]['intent']: + vars[n]['intent'].append(tmp) + intent = None + if note: + note = note.replace('\\n\\n', '\n\n') + note = note.replace('\\n ', '\n') + if 'note' not in vars[n]: + vars[n]['note'] = [note] + else: + vars[n]['note'].append(note) + note = None + if depend is not None: + if 'depend' not in vars[n]: + vars[n]['depend'] = [] + for c in rmbadname([x.strip() for x in markoutercomma(depend).split('@,@')]): + if c not in vars[n]['depend']: + vars[n]['depend'].append(c) + depend = None + if check is not None: + if 'check' not in vars[n]: + vars[n]['check'] = [] + for c in [x.strip() for x in markoutercomma(check).split('@,@')]: + if c not in vars[n]['check']: + vars[n]['check'].append(c) + check = None + if dim and 'dimension' not in vars[n]: + vars[n]['dimension'] = [] + for d in rmbadname([x.strip() for x in markoutercomma(dim).split('@,@')]): + star = '*' + if d == ':': + star = ':' + if d in params: + d = str(params[d]) + for p in list(params.keys()): + re_1 = re.compile(r'(?P.*?)\b' + p + r'\b(?P.*)', re.I) + m = re_1.match(d) + while m: + d = m.group('before') + \ + str(params[p]) + m.group('after') + m = re_1.match(d) + if d == star: + dl = [star] + else: + dl = markoutercomma(d, ':').split('@:@') + if len(dl) == 2 and '*' in dl: # e.g. dimension(5:*) + dl = ['*'] + d = '*' + if len(dl) == 1 and not dl[0] == star: + dl = ['1', dl[0]] + if len(dl) == 2: + d, v, di = getarrlen(dl, list(block['vars'].keys())) + if d[:4] == '1 * ': + d = d[4:] + if di and di[-4:] == '/(1)': + di = di[:-4] + if v: + savelindims[d] = v, di + vars[n]['dimension'].append(d) + if 'dimension' in vars[n]: + if isintent_c(vars[n]): + shape_macro = 'shape' + else: + shape_macro = 'shape' # 'fshape' + if isstringarray(vars[n]): + if 'charselector' in vars[n]: + d = vars[n]['charselector'] + if '*' in d: + d = d['*'] + errmess('analyzevars: character array "character*%s %s(%s)" is considered as "character %s(%s)"; "intent(c)" is forced.\n' + % (d, n, + ','.join(vars[n]['dimension']), + n, ','.join(vars[n]['dimension'] + [d]))) + vars[n]['dimension'].append(d) + del vars[n]['charselector'] + if 'intent' not in vars[n]: + vars[n]['intent'] = [] + if 'c' not in vars[n]['intent']: + vars[n]['intent'].append('c') + else: + errmess( + "analyzevars: charselector=%r unhandled." % (d)) + if 'check' not in vars[n] and 'args' in block and n in block['args']: + flag = 'depend' not in vars[n] + if flag: + vars[n]['depend'] = [] + vars[n]['check'] = [] + if 'dimension' in vars[n]: + #/----< no check + i = -1 + ni = len(vars[n]['dimension']) + for d in vars[n]['dimension']: + ddeps = [] # dependencies of 'd' + ad = '' + pd = '' + if d not in vars: + if d in savelindims: + pd, ad = '(', savelindims[d][1] + d = savelindims[d][0] + else: + for r in block['args']: + if r not in vars: + continue + if re.match(r'.*?\b' + r + r'\b', d, re.I): + ddeps.append(r) + if d in vars: + if 'attrspec' in vars[d]: + for aa in vars[d]['attrspec']: + if aa[:6] == 'depend': + ddeps += aa[6:].strip()[1:-1].split(',') + if 'depend' in vars[d]: + ddeps = ddeps + vars[d]['depend'] + i = i + 1 + if d in vars and ('depend' not in vars[d]) \ + and ('=' not in vars[d]) and (d not in vars[n]['depend']) \ + and l_or(isintent_in, isintent_inout, isintent_inplace)(vars[n]): + vars[d]['depend'] = [n] + if ni > 1: + vars[d]['='] = '%s%s(%s,%s)%s' % ( + pd, shape_macro, n, i, ad) + else: + vars[d]['='] = '%slen(%s)%s' % (pd, n, ad) + # /---< no check + if 1 and 'check' not in vars[d]: + if ni > 1: + vars[d]['check'] = ['%s%s(%s,%i)%s==%s' + % (pd, shape_macro, n, i, ad, d)] + else: + vars[d]['check'] = [ + '%slen(%s)%s>=%s' % (pd, n, ad, d)] + if 'attrspec' not in vars[d]: + vars[d]['attrspec'] = ['optional'] + if ('optional' not in vars[d]['attrspec']) and\ + ('required' not in vars[d]['attrspec']): + vars[d]['attrspec'].append('optional') + elif d not in ['*', ':']: + #/----< no check + if flag: + if d in vars: + if n not in ddeps: + vars[n]['depend'].append(d) + else: + vars[n]['depend'] = vars[n]['depend'] + ddeps + elif isstring(vars[n]): + length = '1' + if 'charselector' in vars[n]: + if '*' in vars[n]['charselector']: + length = _eval_length(vars[n]['charselector']['*'], + params) + vars[n]['charselector']['*'] = length + elif 'len' in vars[n]['charselector']: + length = _eval_length(vars[n]['charselector']['len'], + params) + del vars[n]['charselector']['len'] + vars[n]['charselector']['*'] = length + + if not vars[n]['check']: + del vars[n]['check'] + if flag and not vars[n]['depend']: + del vars[n]['depend'] + if '=' in vars[n]: + if 'attrspec' not in vars[n]: + vars[n]['attrspec'] = [] + if ('optional' not in vars[n]['attrspec']) and \ + ('required' not in vars[n]['attrspec']): + vars[n]['attrspec'].append('optional') + if 'depend' not in vars[n]: + vars[n]['depend'] = [] + for v, m in list(dep_matches.items()): + if m(vars[n]['=']): + vars[n]['depend'].append(v) + if not vars[n]['depend']: + del vars[n]['depend'] + if isscalar(vars[n]): + vars[n]['='] = _eval_scalar(vars[n]['='], params) + + for n in list(vars.keys()): + if n == block['name']: # n is block name + if 'note' in vars[n]: + block['note'] = vars[n]['note'] + if block['block'] == 'function': + if 'result' in block and block['result'] in vars: + vars[n] = appenddecl(vars[n], vars[block['result']]) + if 'prefix' in block: + pr = block['prefix'] + ispure = 0 + isrec = 1 + pr1 = pr.replace('pure', '') + ispure = (not pr == pr1) + pr = pr1.replace('recursive', '') + isrec = (not pr == pr1) + m = typespattern[0].match(pr) + if m: + typespec, selector, attr, edecl = cracktypespec0( + m.group('this'), m.group('after')) + kindselect, charselect, typename = cracktypespec( + typespec, selector) + vars[n]['typespec'] = typespec + if kindselect: + if 'kind' in kindselect: + try: + kindselect['kind'] = eval( + kindselect['kind'], {}, params) + except Exception: + pass + vars[n]['kindselector'] = kindselect + if charselect: + vars[n]['charselector'] = charselect + if typename: + vars[n]['typename'] = typename + if ispure: + vars[n] = setattrspec(vars[n], 'pure') + if isrec: + vars[n] = setattrspec(vars[n], 'recursive') + else: + outmess( + 'analyzevars: prefix (%s) were not used\n' % repr(block['prefix'])) + if not block['block'] in ['module', 'pythonmodule', 'python module', 'block data']: + if 'commonvars' in block: + neededvars = copy.copy(block['args'] + block['commonvars']) + else: + neededvars = copy.copy(block['args']) + for n in list(vars.keys()): + if l_or(isintent_callback, isintent_aux)(vars[n]): + neededvars.append(n) + if 'entry' in block: + neededvars.extend(list(block['entry'].keys())) + for k in list(block['entry'].keys()): + for n in block['entry'][k]: + if n not in neededvars: + neededvars.append(n) + if block['block'] == 'function': + if 'result' in block: + neededvars.append(block['result']) + else: + neededvars.append(block['name']) + if block['block'] in ['subroutine', 'function']: + name = block['name'] + if name in vars and 'intent' in vars[name]: + block['intent'] = vars[name]['intent'] + if block['block'] == 'type': + neededvars.extend(list(vars.keys())) + for n in list(vars.keys()): + if n not in neededvars: + del vars[n] + return vars + +analyzeargs_re_1 = re.compile(r'\A[a-z]+[\w$]*\Z', re.I) + + +def expr2name(a, block, args=[]): + orig_a = a + a_is_expr = not analyzeargs_re_1.match(a) + if a_is_expr: # `a` is an expression + implicitrules, attrrules = buildimplicitrules(block) + at = determineexprtype(a, block['vars'], implicitrules) + na = 'e_' + for c in a: + c = c.lower() + if c not in string.ascii_lowercase + string.digits: + c = '_' + na = na + c + if na[-1] == '_': + na = na + 'e' + else: + na = na + '_e' + a = na + while a in block['vars'] or a in block['args']: + a = a + 'r' + if a in args: + k = 1 + while a + str(k) in args: + k = k + 1 + a = a + str(k) + if a_is_expr: + block['vars'][a] = at + else: + if a not in block['vars']: + if orig_a in block['vars']: + block['vars'][a] = block['vars'][orig_a] + else: + block['vars'][a] = {} + if 'externals' in block and orig_a in block['externals'] + block['interfaced']: + block['vars'][a] = setattrspec(block['vars'][a], 'external') + return a + + +def analyzeargs(block): + setmesstext(block) + implicitrules, attrrules = buildimplicitrules(block) + if 'args' not in block: + block['args'] = [] + args = [] + for a in block['args']: + a = expr2name(a, block, args) + args.append(a) + block['args'] = args + if 'entry' in block: + for k, args1 in list(block['entry'].items()): + for a in args1: + if a not in block['vars']: + block['vars'][a] = {} + + for b in block['body']: + if b['name'] in args: + if 'externals' not in block: + block['externals'] = [] + if b['name'] not in block['externals']: + block['externals'].append(b['name']) + if 'result' in block and block['result'] not in block['vars']: + block['vars'][block['result']] = {} + return block + +determineexprtype_re_1 = re.compile(r'\A\(.+?[,].+?\)\Z', re.I) +determineexprtype_re_2 = re.compile(r'\A[+-]?\d+(_(?P[\w]+)|)\Z', re.I) +determineexprtype_re_3 = re.compile( + r'\A[+-]?[\d.]+[\d+\-de.]*(_(?P[\w]+)|)\Z', re.I) +determineexprtype_re_4 = re.compile(r'\A\(.*\)\Z', re.I) +determineexprtype_re_5 = re.compile(r'\A(?P\w+)\s*\(.*?\)\s*\Z', re.I) + + +def _ensure_exprdict(r): + if isinstance(r, int): + return {'typespec': 'integer'} + if isinstance(r, float): + return {'typespec': 'real'} + if isinstance(r, complex): + return {'typespec': 'complex'} + if isinstance(r, dict): + return r + raise AssertionError(repr(r)) + + +def determineexprtype(expr, vars, rules={}): + if expr in vars: + return _ensure_exprdict(vars[expr]) + expr = expr.strip() + if determineexprtype_re_1.match(expr): + return {'typespec': 'complex'} + m = determineexprtype_re_2.match(expr) + if m: + if 'name' in m.groupdict() and m.group('name'): + outmess( + 'determineexprtype: selected kind types not supported (%s)\n' % repr(expr)) + return {'typespec': 'integer'} + m = determineexprtype_re_3.match(expr) + if m: + if 'name' in m.groupdict() and m.group('name'): + outmess( + 'determineexprtype: selected kind types not supported (%s)\n' % repr(expr)) + return {'typespec': 'real'} + for op in ['+', '-', '*', '/']: + for e in [x.strip() for x in markoutercomma(expr, comma=op).split('@' + op + '@')]: + if e in vars: + return _ensure_exprdict(vars[e]) + t = {} + if determineexprtype_re_4.match(expr): # in parenthesis + t = determineexprtype(expr[1:-1], vars, rules) + else: + m = determineexprtype_re_5.match(expr) + if m: + rn = m.group('name') + t = determineexprtype(m.group('name'), vars, rules) + if t and 'attrspec' in t: + del t['attrspec'] + if not t: + if rn[0] in rules: + return _ensure_exprdict(rules[rn[0]]) + if expr[0] in '\'"': + return {'typespec': 'character', 'charselector': {'*': '*'}} + if not t: + outmess( + 'determineexprtype: could not determine expressions (%s) type.\n' % (repr(expr))) + return t + +###### + + +def crack2fortrangen(block, tab='\n', as_interface=False): + global skipfuncs, onlyfuncs + + setmesstext(block) + ret = '' + if isinstance(block, list): + for g in block: + if g and g['block'] in ['function', 'subroutine']: + if g['name'] in skipfuncs: + continue + if onlyfuncs and g['name'] not in onlyfuncs: + continue + ret = ret + crack2fortrangen(g, tab, as_interface=as_interface) + return ret + prefix = '' + name = '' + args = '' + blocktype = block['block'] + if blocktype == 'program': + return '' + argsl = [] + if 'name' in block: + name = block['name'] + if 'args' in block: + vars = block['vars'] + for a in block['args']: + a = expr2name(a, block, argsl) + if not isintent_callback(vars[a]): + argsl.append(a) + if block['block'] == 'function' or argsl: + args = '(%s)' % ','.join(argsl) + f2pyenhancements = '' + if 'f2pyenhancements' in block: + for k in list(block['f2pyenhancements'].keys()): + f2pyenhancements = '%s%s%s %s' % ( + f2pyenhancements, tab + tabchar, k, block['f2pyenhancements'][k]) + intent_lst = block.get('intent', [])[:] + if blocktype == 'function' and 'callback' in intent_lst: + intent_lst.remove('callback') + if intent_lst: + f2pyenhancements = '%s%sintent(%s) %s' %\ + (f2pyenhancements, tab + tabchar, + ','.join(intent_lst), name) + use = '' + if 'use' in block: + use = use2fortran(block['use'], tab + tabchar) + common = '' + if 'common' in block: + common = common2fortran(block['common'], tab + tabchar) + if name == 'unknown_interface': + name = '' + result = '' + if 'result' in block: + result = ' result (%s)' % block['result'] + if block['result'] not in argsl: + argsl.append(block['result']) + body = crack2fortrangen(block['body'], tab + tabchar) + vars = vars2fortran( + block, block['vars'], argsl, tab + tabchar, as_interface=as_interface) + mess = '' + if 'from' in block and not as_interface: + mess = '! in %s' % block['from'] + if 'entry' in block: + entry_stmts = '' + for k, i in list(block['entry'].items()): + entry_stmts = '%s%sentry %s(%s)' \ + % (entry_stmts, tab + tabchar, k, ','.join(i)) + body = body + entry_stmts + if blocktype == 'block data' and name == '_BLOCK_DATA_': + name = '' + ret = '%s%s%s %s%s%s %s%s%s%s%s%s%send %s %s' % ( + tab, prefix, blocktype, name, args, result, mess, f2pyenhancements, use, vars, common, body, tab, blocktype, name) + return ret + + +def common2fortran(common, tab=''): + ret = '' + for k in list(common.keys()): + if k == '_BLNK_': + ret = '%s%scommon %s' % (ret, tab, ','.join(common[k])) + else: + ret = '%s%scommon /%s/ %s' % (ret, tab, k, ','.join(common[k])) + return ret + + +def use2fortran(use, tab=''): + ret = '' + for m in list(use.keys()): + ret = '%s%suse %s,' % (ret, tab, m) + if use[m] == {}: + if ret and ret[-1] == ',': + ret = ret[:-1] + continue + if 'only' in use[m] and use[m]['only']: + ret = '%s only:' % (ret) + if 'map' in use[m] and use[m]['map']: + c = ' ' + for k in list(use[m]['map'].keys()): + if k == use[m]['map'][k]: + ret = '%s%s%s' % (ret, c, k) + c = ',' + else: + ret = '%s%s%s=>%s' % (ret, c, k, use[m]['map'][k]) + c = ',' + if ret and ret[-1] == ',': + ret = ret[:-1] + return ret + + +def true_intent_list(var): + lst = var['intent'] + ret = [] + for intent in lst: + try: + c = eval('isintent_%s(var)' % intent) + except NameError: + c = 0 + if c: + ret.append(intent) + return ret + + +def vars2fortran(block, vars, args, tab='', as_interface=False): + """ + TODO: + public sub + ... + """ + setmesstext(block) + ret = '' + nout = [] + for a in args: + if a in block['vars']: + nout.append(a) + if 'commonvars' in block: + for a in block['commonvars']: + if a in vars: + if a not in nout: + nout.append(a) + else: + errmess( + 'vars2fortran: Confused?!: "%s" is not defined in vars.\n' % a) + if 'varnames' in block: + nout.extend(block['varnames']) + if not as_interface: + for a in list(vars.keys()): + if a not in nout: + nout.append(a) + for a in nout: + if 'depend' in vars[a]: + for d in vars[a]['depend']: + if d in vars and 'depend' in vars[d] and a in vars[d]['depend']: + errmess( + 'vars2fortran: Warning: cross-dependence between variables "%s" and "%s"\n' % (a, d)) + if 'externals' in block and a in block['externals']: + if isintent_callback(vars[a]): + ret = '%s%sintent(callback) %s' % (ret, tab, a) + ret = '%s%sexternal %s' % (ret, tab, a) + if isoptional(vars[a]): + ret = '%s%soptional %s' % (ret, tab, a) + if a in vars and 'typespec' not in vars[a]: + continue + cont = 1 + for b in block['body']: + if a == b['name'] and b['block'] == 'function': + cont = 0 + break + if cont: + continue + if a not in vars: + show(vars) + outmess('vars2fortran: No definition for argument "%s".\n' % a) + continue + if a == block['name'] and not block['block'] == 'function': + continue + if 'typespec' not in vars[a]: + if 'attrspec' in vars[a] and 'external' in vars[a]['attrspec']: + if a in args: + ret = '%s%sexternal %s' % (ret, tab, a) + continue + show(vars[a]) + outmess('vars2fortran: No typespec for argument "%s".\n' % a) + continue + vardef = vars[a]['typespec'] + if vardef == 'type' and 'typename' in vars[a]: + vardef = '%s(%s)' % (vardef, vars[a]['typename']) + selector = {} + if 'kindselector' in vars[a]: + selector = vars[a]['kindselector'] + elif 'charselector' in vars[a]: + selector = vars[a]['charselector'] + if '*' in selector: + if selector['*'] in ['*', ':']: + vardef = '%s*(%s)' % (vardef, selector['*']) + else: + vardef = '%s*%s' % (vardef, selector['*']) + else: + if 'len' in selector: + vardef = '%s(len=%s' % (vardef, selector['len']) + if 'kind' in selector: + vardef = '%s,kind=%s)' % (vardef, selector['kind']) + else: + vardef = '%s)' % (vardef) + elif 'kind' in selector: + vardef = '%s(kind=%s)' % (vardef, selector['kind']) + c = ' ' + if 'attrspec' in vars[a]: + attr = [l for l in vars[a]['attrspec'] + if l not in ['external']] + if attr: + vardef = '%s, %s' % (vardef, ','.join(attr)) + c = ',' + if 'dimension' in vars[a]: + vardef = '%s%sdimension(%s)' % ( + vardef, c, ','.join(vars[a]['dimension'])) + c = ',' + if 'intent' in vars[a]: + lst = true_intent_list(vars[a]) + if lst: + vardef = '%s%sintent(%s)' % (vardef, c, ','.join(lst)) + c = ',' + if 'check' in vars[a]: + vardef = '%s%scheck(%s)' % (vardef, c, ','.join(vars[a]['check'])) + c = ',' + if 'depend' in vars[a]: + vardef = '%s%sdepend(%s)' % ( + vardef, c, ','.join(vars[a]['depend'])) + c = ',' + if '=' in vars[a]: + v = vars[a]['='] + if vars[a]['typespec'] in ['complex', 'double complex']: + try: + v = eval(v) + v = '(%s,%s)' % (v.real, v.imag) + except Exception: + pass + vardef = '%s :: %s=%s' % (vardef, a, v) + else: + vardef = '%s :: %s' % (vardef, a) + ret = '%s%s%s' % (ret, tab, vardef) + return ret +###### + + +def crackfortran(files): + global usermodules + + outmess('Reading fortran codes...\n', 0) + readfortrancode(files, crackline) + outmess('Post-processing...\n', 0) + usermodules = [] + postlist = postcrack(grouplist[0]) + outmess('Post-processing (stage 2)...\n', 0) + postlist = postcrack2(postlist) + return usermodules + postlist + + +def crack2fortran(block): + global f2py_version + + pyf = crack2fortrangen(block) + '\n' + header = """! -*- f90 -*- +! Note: the context of this file is case sensitive. +""" + footer = """ +! This file was auto-generated with f2py (version:%s). +! See http://cens.ioc.ee/projects/f2py2e/ +""" % (f2py_version) + return header + pyf + footer + +if __name__ == "__main__": + files = [] + funcs = [] + f = 1 + f2 = 0 + f3 = 0 + showblocklist = 0 + for l in sys.argv[1:]: + if l == '': + pass + elif l[0] == ':': + f = 0 + elif l == '-quiet': + quiet = 1 + verbose = 0 + elif l == '-verbose': + verbose = 2 + quiet = 0 + elif l == '-fix': + if strictf77: + outmess( + 'Use option -f90 before -fix if Fortran 90 code is in fix form.\n', 0) + skipemptyends = 1 + sourcecodeform = 'fix' + elif l == '-skipemptyends': + skipemptyends = 1 + elif l == '--ignore-contains': + ignorecontains = 1 + elif l == '-f77': + strictf77 = 1 + sourcecodeform = 'fix' + elif l == '-f90': + strictf77 = 0 + sourcecodeform = 'free' + skipemptyends = 1 + elif l == '-h': + f2 = 1 + elif l == '-show': + showblocklist = 1 + elif l == '-m': + f3 = 1 + elif l[0] == '-': + errmess('Unknown option %s\n' % repr(l)) + elif f2: + f2 = 0 + pyffilename = l + elif f3: + f3 = 0 + f77modulename = l + elif f: + try: + open(l).close() + files.append(l) + except IOError as detail: + errmess('IOError: %s\n' % str(detail)) + else: + funcs.append(l) + if not strictf77 and f77modulename and not skipemptyends: + outmess("""\ + Warning: You have specified module name for non Fortran 77 code + that should not need one (expect if you are scanning F90 code + for non module blocks but then you should use flag -skipemptyends + and also be sure that the files do not contain programs without program statement). +""", 0) + + postlist = crackfortran(files) + if pyffilename: + outmess('Writing fortran code to file %s\n' % repr(pyffilename), 0) + pyf = crack2fortran(postlist) + with open(pyffilename, 'w') as f: + f.write(pyf) + if showblocklist: + show(postlist) diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/crackfortran.pyc b/project/venv/lib/python2.7/site-packages/numpy/f2py/crackfortran.pyc new file mode 100644 index 0000000..b835e3c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/f2py/crackfortran.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/diagnose.py b/project/venv/lib/python2.7/site-packages/numpy/f2py/diagnose.py new file mode 100644 index 0000000..0241fed --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/diagnose.py @@ -0,0 +1,156 @@ +#!/usr/bin/env python +from __future__ import division, absolute_import, print_function + +import os +import sys +import tempfile + + +def run_command(cmd): + print('Running %r:' % (cmd)) + os.system(cmd) + print('------') + + +def run(): + _path = os.getcwd() + os.chdir(tempfile.gettempdir()) + print('------') + print('os.name=%r' % (os.name)) + print('------') + print('sys.platform=%r' % (sys.platform)) + print('------') + print('sys.version:') + print(sys.version) + print('------') + print('sys.prefix:') + print(sys.prefix) + print('------') + print('sys.path=%r' % (':'.join(sys.path))) + print('------') + + try: + import numpy + has_newnumpy = 1 + except ImportError: + print('Failed to import new numpy:', sys.exc_info()[1]) + has_newnumpy = 0 + + try: + from numpy.f2py import f2py2e + has_f2py2e = 1 + except ImportError: + print('Failed to import f2py2e:', sys.exc_info()[1]) + has_f2py2e = 0 + + try: + import numpy.distutils + has_numpy_distutils = 2 + except ImportError: + try: + import numpy_distutils + has_numpy_distutils = 1 + except ImportError: + print('Failed to import numpy_distutils:', sys.exc_info()[1]) + has_numpy_distutils = 0 + + if has_newnumpy: + try: + print('Found new numpy version %r in %s' % + (numpy.__version__, numpy.__file__)) + except Exception as msg: + print('error:', msg) + print('------') + + if has_f2py2e: + try: + print('Found f2py2e version %r in %s' % + (f2py2e.__version__.version, f2py2e.__file__)) + except Exception as msg: + print('error:', msg) + print('------') + + if has_numpy_distutils: + try: + if has_numpy_distutils == 2: + print('Found numpy.distutils version %r in %r' % ( + numpy.distutils.__version__, + numpy.distutils.__file__)) + else: + print('Found numpy_distutils version %r in %r' % ( + numpy_distutils.numpy_distutils_version.numpy_distutils_version, + numpy_distutils.__file__)) + print('------') + except Exception as msg: + print('error:', msg) + print('------') + try: + if has_numpy_distutils == 1: + print( + 'Importing numpy_distutils.command.build_flib ...', end=' ') + import numpy_distutils.command.build_flib as build_flib + print('ok') + print('------') + try: + print( + 'Checking availability of supported Fortran compilers:') + for compiler_class in build_flib.all_compilers: + compiler_class(verbose=1).is_available() + print('------') + except Exception as msg: + print('error:', msg) + print('------') + except Exception as msg: + print( + 'error:', msg, '(ignore it, build_flib is obsolute for numpy.distutils 0.2.2 and up)') + print('------') + try: + if has_numpy_distutils == 2: + print('Importing numpy.distutils.fcompiler ...', end=' ') + import numpy.distutils.fcompiler as fcompiler + else: + print('Importing numpy_distutils.fcompiler ...', end=' ') + import numpy_distutils.fcompiler as fcompiler + print('ok') + print('------') + try: + print('Checking availability of supported Fortran compilers:') + fcompiler.show_fcompilers() + print('------') + except Exception as msg: + print('error:', msg) + print('------') + except Exception as msg: + print('error:', msg) + print('------') + try: + if has_numpy_distutils == 2: + print('Importing numpy.distutils.cpuinfo ...', end=' ') + from numpy.distutils.cpuinfo import cpuinfo + print('ok') + print('------') + else: + try: + print( + 'Importing numpy_distutils.command.cpuinfo ...', end=' ') + from numpy_distutils.command.cpuinfo import cpuinfo + print('ok') + print('------') + except Exception as msg: + print('error:', msg, '(ignore it)') + print('Importing numpy_distutils.cpuinfo ...', end=' ') + from numpy_distutils.cpuinfo import cpuinfo + print('ok') + print('------') + cpu = cpuinfo() + print('CPU information:', end=' ') + for name in dir(cpuinfo): + if name[0] == '_' and name[1] != '_' and getattr(cpu, name[1:])(): + print(name[1:], end=' ') + print('------') + except Exception as msg: + print('error:', msg) + print('------') + os.chdir(_path) +if __name__ == "__main__": + run() diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/diagnose.pyc b/project/venv/lib/python2.7/site-packages/numpy/f2py/diagnose.pyc new file mode 100644 index 0000000..3ccc308 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/f2py/diagnose.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/f2py2e.py b/project/venv/lib/python2.7/site-packages/numpy/f2py/f2py2e.py new file mode 100644 index 0000000..4722315 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/f2py2e.py @@ -0,0 +1,685 @@ +#!/usr/bin/env python +""" + +f2py2e - Fortran to Python C/API generator. 2nd Edition. + See __usage__ below. + +Copyright 1999--2011 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +$Date: 2005/05/06 08:31:19 $ +Pearu Peterson + +""" +from __future__ import division, absolute_import, print_function + +import sys +import os +import pprint +import re + +from . import crackfortran +from . import rules +from . import cb_rules +from . import auxfuncs +from . import cfuncs +from . import f90mod_rules +from . import __version__ + +f2py_version = __version__.version +errmess = sys.stderr.write +# outmess=sys.stdout.write +show = pprint.pprint +outmess = auxfuncs.outmess + +try: + from numpy import __version__ as numpy_version +except ImportError: + numpy_version = 'N/A' + +__usage__ = """\ +Usage: + +1) To construct extension module sources: + + f2py [] [[[only:]||[skip:]] \\ + ] \\ + [: ...] + +2) To compile fortran files and build extension modules: + + f2py -c [, , ] + +3) To generate signature files: + + f2py -h ...< same options as in (1) > + +Description: This program generates a Python C/API file (module.c) + that contains wrappers for given fortran functions so that they + can be called from Python. With the -c option the corresponding + extension modules are built. + +Options: + + --2d-numpy Use numpy.f2py tool with NumPy support. [DEFAULT] + --2d-numeric Use f2py2e tool with Numeric support. + --2d-numarray Use f2py2e tool with Numarray support. + --g3-numpy Use 3rd generation f2py from the separate f2py package. + [NOT AVAILABLE YET] + + -h Write signatures of the fortran routines to file + and exit. You can then edit and use it instead + of . If ==stdout then the + signatures are printed to stdout. + Names of fortran routines for which Python C/API + functions will be generated. Default is all that are found + in . + Paths to fortran/signature files that will be scanned for + in order to determine their signatures. + skip: Ignore fortran functions that follow until `:'. + only: Use only fortran functions that follow until `:'. + : Get back to mode. + + -m Name of the module; f2py generates a Python/C API + file module.c or extension module . + Default is 'untitled'. + + --[no-]lower Do [not] lower the cases in . By default, + --lower is assumed with -h key, and --no-lower without -h key. + + --build-dir All f2py generated files are created in . + Default is tempfile.mkdtemp(). + + --overwrite-signature Overwrite existing signature file. + + --[no-]latex-doc Create (or not) module.tex. + Default is --no-latex-doc. + --short-latex Create 'incomplete' LaTeX document (without commands + \\documentclass, \\tableofcontents, and \\begin{document}, + \\end{document}). + + --[no-]rest-doc Create (or not) module.rst. + Default is --no-rest-doc. + + --debug-capi Create C/API code that reports the state of the wrappers + during runtime. Useful for debugging. + + --[no-]wrap-functions Create Fortran subroutine wrappers to Fortran 77 + functions. --wrap-functions is default because it ensures + maximum portability/compiler independence. + + --include-paths ::... Search include files from the given + directories. + + --help-link [..] List system resources found by system_info.py. See also + --link- switch below. [..] is optional list + of resources names. E.g. try 'f2py --help-link lapack_opt'. + + --quiet Run quietly. + --verbose Run with extra verbosity. + -v Print f2py version ID and exit. + + +numpy.distutils options (only effective with -c): + + --fcompiler= Specify Fortran compiler type by vendor + --compiler= Specify C compiler type (as defined by distutils) + + --help-fcompiler List available Fortran compilers and exit + --f77exec= Specify the path to F77 compiler + --f90exec= Specify the path to F90 compiler + --f77flags= Specify F77 compiler flags + --f90flags= Specify F90 compiler flags + --opt= Specify optimization flags + --arch= Specify architecture specific optimization flags + --noopt Compile without optimization + --noarch Compile without arch-dependent optimization + --debug Compile with debugging information + +Extra options (only effective with -c): + + --link- Link extension module with as defined + by numpy.distutils/system_info.py. E.g. to link + with optimized LAPACK libraries (vecLib on MacOSX, + ATLAS elsewhere), use --link-lapack_opt. + See also --help-link switch. + + -L/path/to/lib/ -l + -D -U + -I/path/to/include/ + .o .so .a + + Using the following macros may be required with non-gcc Fortran + compilers: + -DPREPEND_FORTRAN -DNO_APPEND_FORTRAN -DUPPERCASE_FORTRAN + -DUNDERSCORE_G77 + + When using -DF2PY_REPORT_ATEXIT, a performance report of F2PY + interface is printed out at exit (platforms: Linux). + + When using -DF2PY_REPORT_ON_ARRAY_COPY=, a message is + sent to stderr whenever F2PY interface makes a copy of an + array. Integer sets the threshold for array sizes when + a message should be shown. + +Version: %s +numpy Version: %s +Requires: Python 2.3 or higher. +License: NumPy license (see LICENSE.txt in the NumPy source code) +Copyright 1999 - 2011 Pearu Peterson all rights reserved. +http://cens.ioc.ee/projects/f2py2e/""" % (f2py_version, numpy_version) + + +def scaninputline(inputline): + files, skipfuncs, onlyfuncs, debug = [], [], [], [] + f, f2, f3, f5, f6, f7, f8, f9 = 1, 0, 0, 0, 0, 0, 0, 0 + verbose = 1 + dolc = -1 + dolatexdoc = 0 + dorestdoc = 0 + wrapfuncs = 1 + buildpath = '.' + include_paths = [] + signsfile, modulename = None, None + options = {'buildpath': buildpath, + 'coutput': None, + 'f2py_wrapper_output': None} + for l in inputline: + if l == '': + pass + elif l == 'only:': + f = 0 + elif l == 'skip:': + f = -1 + elif l == ':': + f = 1 + elif l[:8] == '--debug-': + debug.append(l[8:]) + elif l == '--lower': + dolc = 1 + elif l == '--build-dir': + f6 = 1 + elif l == '--no-lower': + dolc = 0 + elif l == '--quiet': + verbose = 0 + elif l == '--verbose': + verbose += 1 + elif l == '--latex-doc': + dolatexdoc = 1 + elif l == '--no-latex-doc': + dolatexdoc = 0 + elif l == '--rest-doc': + dorestdoc = 1 + elif l == '--no-rest-doc': + dorestdoc = 0 + elif l == '--wrap-functions': + wrapfuncs = 1 + elif l == '--no-wrap-functions': + wrapfuncs = 0 + elif l == '--short-latex': + options['shortlatex'] = 1 + elif l == '--coutput': + f8 = 1 + elif l == '--f2py-wrapper-output': + f9 = 1 + elif l == '--overwrite-signature': + options['h-overwrite'] = 1 + elif l == '-h': + f2 = 1 + elif l == '-m': + f3 = 1 + elif l[:2] == '-v': + print(f2py_version) + sys.exit() + elif l == '--show-compilers': + f5 = 1 + elif l[:8] == '-include': + cfuncs.outneeds['userincludes'].append(l[9:-1]) + cfuncs.userincludes[l[9:-1]] = '#include ' + l[8:] + elif l[:15] in '--include_paths': + outmess( + 'f2py option --include_paths is deprecated, use --include-paths instead.\n') + f7 = 1 + elif l[:15] in '--include-paths': + f7 = 1 + elif l[0] == '-': + errmess('Unknown option %s\n' % repr(l)) + sys.exit() + elif f2: + f2 = 0 + signsfile = l + elif f3: + f3 = 0 + modulename = l + elif f6: + f6 = 0 + buildpath = l + elif f7: + f7 = 0 + include_paths.extend(l.split(os.pathsep)) + elif f8: + f8 = 0 + options["coutput"] = l + elif f9: + f9 = 0 + options["f2py_wrapper_output"] = l + elif f == 1: + try: + open(l).close() + files.append(l) + except IOError as detail: + errmess('IOError: %s. Skipping file "%s".\n' % + (str(detail), l)) + elif f == -1: + skipfuncs.append(l) + elif f == 0: + onlyfuncs.append(l) + if not f5 and not files and not modulename: + print(__usage__) + sys.exit() + if not os.path.isdir(buildpath): + if not verbose: + outmess('Creating build directory %s' % (buildpath)) + os.mkdir(buildpath) + if signsfile: + signsfile = os.path.join(buildpath, signsfile) + if signsfile and os.path.isfile(signsfile) and 'h-overwrite' not in options: + errmess( + 'Signature file "%s" exists!!! Use --overwrite-signature to overwrite.\n' % (signsfile)) + sys.exit() + + options['debug'] = debug + options['verbose'] = verbose + if dolc == -1 and not signsfile: + options['do-lower'] = 0 + else: + options['do-lower'] = dolc + if modulename: + options['module'] = modulename + if signsfile: + options['signsfile'] = signsfile + if onlyfuncs: + options['onlyfuncs'] = onlyfuncs + if skipfuncs: + options['skipfuncs'] = skipfuncs + options['dolatexdoc'] = dolatexdoc + options['dorestdoc'] = dorestdoc + options['wrapfuncs'] = wrapfuncs + options['buildpath'] = buildpath + options['include_paths'] = include_paths + return files, options + + +def callcrackfortran(files, options): + rules.options = options + crackfortran.debug = options['debug'] + crackfortran.verbose = options['verbose'] + if 'module' in options: + crackfortran.f77modulename = options['module'] + if 'skipfuncs' in options: + crackfortran.skipfuncs = options['skipfuncs'] + if 'onlyfuncs' in options: + crackfortran.onlyfuncs = options['onlyfuncs'] + crackfortran.include_paths[:] = options['include_paths'] + crackfortran.dolowercase = options['do-lower'] + postlist = crackfortran.crackfortran(files) + if 'signsfile' in options: + outmess('Saving signatures to file "%s"\n' % (options['signsfile'])) + pyf = crackfortran.crack2fortran(postlist) + if options['signsfile'][-6:] == 'stdout': + sys.stdout.write(pyf) + else: + f = open(options['signsfile'], 'w') + f.write(pyf) + f.close() + if options["coutput"] is None: + for mod in postlist: + mod["coutput"] = "%smodule.c" % mod["name"] + else: + for mod in postlist: + mod["coutput"] = options["coutput"] + if options["f2py_wrapper_output"] is None: + for mod in postlist: + mod["f2py_wrapper_output"] = "%s-f2pywrappers.f" % mod["name"] + else: + for mod in postlist: + mod["f2py_wrapper_output"] = options["f2py_wrapper_output"] + return postlist + + +def buildmodules(lst): + cfuncs.buildcfuncs() + outmess('Building modules...\n') + modules, mnames, isusedby = [], [], {} + for i in range(len(lst)): + if '__user__' in lst[i]['name']: + cb_rules.buildcallbacks(lst[i]) + else: + if 'use' in lst[i]: + for u in lst[i]['use'].keys(): + if u not in isusedby: + isusedby[u] = [] + isusedby[u].append(lst[i]['name']) + modules.append(lst[i]) + mnames.append(lst[i]['name']) + ret = {} + for i in range(len(mnames)): + if mnames[i] in isusedby: + outmess('\tSkipping module "%s" which is used by %s.\n' % ( + mnames[i], ','.join(['"%s"' % s for s in isusedby[mnames[i]]]))) + else: + um = [] + if 'use' in modules[i]: + for u in modules[i]['use'].keys(): + if u in isusedby and u in mnames: + um.append(modules[mnames.index(u)]) + else: + outmess( + '\tModule "%s" uses nonexisting "%s" which will be ignored.\n' % (mnames[i], u)) + ret[mnames[i]] = {} + dict_append(ret[mnames[i]], rules.buildmodule(modules[i], um)) + return ret + + +def dict_append(d_out, d_in): + for (k, v) in d_in.items(): + if k not in d_out: + d_out[k] = [] + if isinstance(v, list): + d_out[k] = d_out[k] + v + else: + d_out[k].append(v) + + +def run_main(comline_list): + """ + Equivalent to running:: + + f2py + + where ``=string.join(,' ')``, but in Python. Unless + ``-h`` is used, this function returns a dictionary containing + information on generated modules and their dependencies on source + files. For example, the command ``f2py -m scalar scalar.f`` can be + executed from Python as follows + + You cannot build extension modules with this function, that is, + using ``-c`` is not allowed. Use ``compile`` command instead + + Examples + -------- + .. include:: run_main_session.dat + :literal: + + """ + crackfortran.reset_global_f2py_vars() + f2pydir = os.path.dirname(os.path.abspath(cfuncs.__file__)) + fobjhsrc = os.path.join(f2pydir, 'src', 'fortranobject.h') + fobjcsrc = os.path.join(f2pydir, 'src', 'fortranobject.c') + files, options = scaninputline(comline_list) + auxfuncs.options = options + postlist = callcrackfortran(files, options) + isusedby = {} + for i in range(len(postlist)): + if 'use' in postlist[i]: + for u in postlist[i]['use'].keys(): + if u not in isusedby: + isusedby[u] = [] + isusedby[u].append(postlist[i]['name']) + for i in range(len(postlist)): + if postlist[i]['block'] == 'python module' and '__user__' in postlist[i]['name']: + if postlist[i]['name'] in isusedby: + # if not quiet: + outmess('Skipping Makefile build for module "%s" which is used by %s\n' % ( + postlist[i]['name'], ','.join(['"%s"' % s for s in isusedby[postlist[i]['name']]]))) + if 'signsfile' in options: + if options['verbose'] > 1: + outmess( + 'Stopping. Edit the signature file and then run f2py on the signature file: ') + outmess('%s %s\n' % + (os.path.basename(sys.argv[0]), options['signsfile'])) + return + for i in range(len(postlist)): + if postlist[i]['block'] != 'python module': + if 'python module' not in options: + errmess( + 'Tip: If your original code is Fortran source then you must use -m option.\n') + raise TypeError('All blocks must be python module blocks but got %s' % ( + repr(postlist[i]['block']))) + auxfuncs.debugoptions = options['debug'] + f90mod_rules.options = options + auxfuncs.wrapfuncs = options['wrapfuncs'] + + ret = buildmodules(postlist) + + for mn in ret.keys(): + dict_append(ret[mn], {'csrc': fobjcsrc, 'h': fobjhsrc}) + return ret + + +def filter_files(prefix, suffix, files, remove_prefix=None): + """ + Filter files by prefix and suffix. + """ + filtered, rest = [], [] + match = re.compile(prefix + r'.*' + suffix + r'\Z').match + if remove_prefix: + ind = len(prefix) + else: + ind = 0 + for file in [x.strip() for x in files]: + if match(file): + filtered.append(file[ind:]) + else: + rest.append(file) + return filtered, rest + + +def get_prefix(module): + p = os.path.dirname(os.path.dirname(module.__file__)) + return p + + +def run_compile(): + """ + Do it all in one call! + """ + import tempfile + + i = sys.argv.index('-c') + del sys.argv[i] + + remove_build_dir = 0 + try: + i = sys.argv.index('--build-dir') + except ValueError: + i = None + if i is not None: + build_dir = sys.argv[i + 1] + del sys.argv[i + 1] + del sys.argv[i] + else: + remove_build_dir = 1 + build_dir = tempfile.mkdtemp() + + _reg1 = re.compile(r'[-][-]link[-]') + sysinfo_flags = [_m for _m in sys.argv[1:] if _reg1.match(_m)] + sys.argv = [_m for _m in sys.argv if _m not in sysinfo_flags] + if sysinfo_flags: + sysinfo_flags = [f[7:] for f in sysinfo_flags] + + _reg2 = re.compile( + r'[-][-]((no[-]|)(wrap[-]functions|lower)|debug[-]capi|quiet)|[-]include') + f2py_flags = [_m for _m in sys.argv[1:] if _reg2.match(_m)] + sys.argv = [_m for _m in sys.argv if _m not in f2py_flags] + f2py_flags2 = [] + fl = 0 + for a in sys.argv[1:]: + if a in ['only:', 'skip:']: + fl = 1 + elif a == ':': + fl = 0 + if fl or a == ':': + f2py_flags2.append(a) + if f2py_flags2 and f2py_flags2[-1] != ':': + f2py_flags2.append(':') + f2py_flags.extend(f2py_flags2) + + sys.argv = [_m for _m in sys.argv if _m not in f2py_flags2] + _reg3 = re.compile( + r'[-][-]((f(90)?compiler([-]exec|)|compiler)=|help[-]compiler)') + flib_flags = [_m for _m in sys.argv[1:] if _reg3.match(_m)] + sys.argv = [_m for _m in sys.argv if _m not in flib_flags] + _reg4 = re.compile( + r'[-][-]((f(77|90)(flags|exec)|opt|arch)=|(debug|noopt|noarch|help[-]fcompiler))') + fc_flags = [_m for _m in sys.argv[1:] if _reg4.match(_m)] + sys.argv = [_m for _m in sys.argv if _m not in fc_flags] + + if 1: + del_list = [] + for s in flib_flags: + v = '--fcompiler=' + if s[:len(v)] == v: + from numpy.distutils import fcompiler + fcompiler.load_all_fcompiler_classes() + allowed_keys = list(fcompiler.fcompiler_class.keys()) + nv = ov = s[len(v):].lower() + if ov not in allowed_keys: + vmap = {} # XXX + try: + nv = vmap[ov] + except KeyError: + if ov not in vmap.values(): + print('Unknown vendor: "%s"' % (s[len(v):])) + nv = ov + i = flib_flags.index(s) + flib_flags[i] = '--fcompiler=' + nv + continue + for s in del_list: + i = flib_flags.index(s) + del flib_flags[i] + assert len(flib_flags) <= 2, repr(flib_flags) + + _reg5 = re.compile(r'[-][-](verbose)') + setup_flags = [_m for _m in sys.argv[1:] if _reg5.match(_m)] + sys.argv = [_m for _m in sys.argv if _m not in setup_flags] + + if '--quiet' in f2py_flags: + setup_flags.append('--quiet') + + modulename = 'untitled' + sources = sys.argv[1:] + + for optname in ['--include_paths', '--include-paths']: + if optname in sys.argv: + i = sys.argv.index(optname) + f2py_flags.extend(sys.argv[i:i + 2]) + del sys.argv[i + 1], sys.argv[i] + sources = sys.argv[1:] + + if '-m' in sys.argv: + i = sys.argv.index('-m') + modulename = sys.argv[i + 1] + del sys.argv[i + 1], sys.argv[i] + sources = sys.argv[1:] + else: + from numpy.distutils.command.build_src import get_f2py_modulename + pyf_files, sources = filter_files('', '[.]pyf([.]src|)', sources) + sources = pyf_files + sources + for f in pyf_files: + modulename = get_f2py_modulename(f) + if modulename: + break + + extra_objects, sources = filter_files('', '[.](o|a|so)', sources) + include_dirs, sources = filter_files('-I', '', sources, remove_prefix=1) + library_dirs, sources = filter_files('-L', '', sources, remove_prefix=1) + libraries, sources = filter_files('-l', '', sources, remove_prefix=1) + undef_macros, sources = filter_files('-U', '', sources, remove_prefix=1) + define_macros, sources = filter_files('-D', '', sources, remove_prefix=1) + for i in range(len(define_macros)): + name_value = define_macros[i].split('=', 1) + if len(name_value) == 1: + name_value.append(None) + if len(name_value) == 2: + define_macros[i] = tuple(name_value) + else: + print('Invalid use of -D:', name_value) + + from numpy.distutils.system_info import get_info + + num_info = {} + if num_info: + include_dirs.extend(num_info.get('include_dirs', [])) + + from numpy.distutils.core import setup, Extension + ext_args = {'name': modulename, 'sources': sources, + 'include_dirs': include_dirs, + 'library_dirs': library_dirs, + 'libraries': libraries, + 'define_macros': define_macros, + 'undef_macros': undef_macros, + 'extra_objects': extra_objects, + 'f2py_options': f2py_flags, + } + + if sysinfo_flags: + from numpy.distutils.misc_util import dict_append + for n in sysinfo_flags: + i = get_info(n) + if not i: + outmess('No %s resources found in system' + ' (try `f2py --help-link`)\n' % (repr(n))) + dict_append(ext_args, **i) + + ext = Extension(**ext_args) + sys.argv = [sys.argv[0]] + setup_flags + sys.argv.extend(['build', + '--build-temp', build_dir, + '--build-base', build_dir, + '--build-platlib', '.']) + if fc_flags: + sys.argv.extend(['config_fc'] + fc_flags) + if flib_flags: + sys.argv.extend(['build_ext'] + flib_flags) + + setup(ext_modules=[ext]) + + if remove_build_dir and os.path.exists(build_dir): + import shutil + outmess('Removing build directory %s\n' % (build_dir)) + shutil.rmtree(build_dir) + + +def main(): + if '--help-link' in sys.argv[1:]: + sys.argv.remove('--help-link') + from numpy.distutils.system_info import show_all + show_all() + return + + # Probably outdated options that were not working before 1.16 + if '--g3-numpy' in sys.argv[1:]: + sys.stderr.write("G3 f2py support is not implemented, yet.\\n") + sys.exit(1) + elif '--2e-numeric' in sys.argv[1:]: + sys.argv.remove('--2e-numeric') + elif '--2e-numarray' in sys.argv[1:]: + # Note that this errors becaust the -DNUMARRAY argument is + # not recognized. Just here for back compatibility and the + # error message. + sys.argv.append("-DNUMARRAY") + sys.argv.remove('--2e-numarray') + elif '--2e-numpy' in sys.argv[1:]: + sys.argv.remove('--2e-numpy') + else: + pass + + if '-c' in sys.argv[1:]: + run_compile() + else: + run_main(sys.argv[1:]) diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/f2py2e.pyc b/project/venv/lib/python2.7/site-packages/numpy/f2py/f2py2e.pyc new file mode 100644 index 0000000..662b46c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/f2py/f2py2e.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/f2py_testing.py b/project/venv/lib/python2.7/site-packages/numpy/f2py/f2py_testing.py new file mode 100644 index 0000000..f5d5fa6 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/f2py_testing.py @@ -0,0 +1,48 @@ +from __future__ import division, absolute_import, print_function + +import sys +import re + +from numpy.testing import jiffies, memusage + + +def cmdline(): + m = re.compile(r'\A\d+\Z') + args = [] + repeat = 1 + for a in sys.argv[1:]: + if m.match(a): + repeat = eval(a) + else: + args.append(a) + f2py_opts = ' '.join(args) + return repeat, f2py_opts + + +def run(runtest, test_functions, repeat=1): + l = [(t, repr(t.__doc__.split('\n')[1].strip())) for t in test_functions] + start_memusage = memusage() + diff_memusage = None + start_jiffies = jiffies() + i = 0 + while i < repeat: + i += 1 + for t, fname in l: + runtest(t) + if start_memusage is None: + continue + if diff_memusage is None: + diff_memusage = memusage() - start_memusage + else: + diff_memusage2 = memusage() - start_memusage + if diff_memusage2 != diff_memusage: + print('memory usage change at step %i:' % i, + diff_memusage2 - diff_memusage, + fname) + diff_memusage = diff_memusage2 + current_memusage = memusage() + print('run', repeat * len(test_functions), 'tests', + 'in %.2f seconds' % ((jiffies() - start_jiffies) / 100.0)) + if start_memusage: + print('initial virtual memory size:', start_memusage, 'bytes') + print('current virtual memory size:', current_memusage, 'bytes') diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/f2py_testing.pyc b/project/venv/lib/python2.7/site-packages/numpy/f2py/f2py_testing.pyc new file mode 100644 index 0000000..d58aa60 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/f2py/f2py_testing.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/f90mod_rules.py b/project/venv/lib/python2.7/site-packages/numpy/f2py/f90mod_rules.py new file mode 100644 index 0000000..85eae80 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/f90mod_rules.py @@ -0,0 +1,272 @@ +#!/usr/bin/env python +""" + +Build F90 module support for f2py2e. + +Copyright 2000 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +$Date: 2005/02/03 19:30:23 $ +Pearu Peterson + +""" +from __future__ import division, absolute_import, print_function + +__version__ = "$Revision: 1.27 $"[10:-1] + +f2py_version = 'See `f2py -v`' + +import numpy as np + +from . import capi_maps +from . import func2subr +from .crackfortran import undo_rmbadname, undo_rmbadname1 + +# The eviroment provided by auxfuncs.py is needed for some calls to eval. +# As the needed functions cannot be determined by static inspection of the +# code, it is safest to use import * pending a major refactoring of f2py. +from .auxfuncs import * + +options = {} + + +def findf90modules(m): + if ismodule(m): + return [m] + if not hasbody(m): + return [] + ret = [] + for b in m['body']: + if ismodule(b): + ret.append(b) + else: + ret = ret + findf90modules(b) + return ret + +fgetdims1 = """\ + external f2pysetdata + logical ns + integer r,i + integer(%d) s(*) + ns = .FALSE. + if (allocated(d)) then + do i=1,r + if ((size(d,i).ne.s(i)).and.(s(i).ge.0)) then + ns = .TRUE. + end if + end do + if (ns) then + deallocate(d) + end if + end if + if ((.not.allocated(d)).and.(s(1).ge.1)) then""" % np.intp().itemsize + +fgetdims2 = """\ + end if + if (allocated(d)) then + do i=1,r + s(i) = size(d,i) + end do + end if + flag = 1 + call f2pysetdata(d,allocated(d))""" + +fgetdims2_sa = """\ + end if + if (allocated(d)) then + do i=1,r + s(i) = size(d,i) + end do + !s(r) must be equal to len(d(1)) + end if + flag = 2 + call f2pysetdata(d,allocated(d))""" + + +def buildhooks(pymod): + global fgetdims1, fgetdims2 + from . import rules + ret = {'f90modhooks': [], 'initf90modhooks': [], 'body': [], + 'need': ['F_FUNC', 'arrayobject.h'], + 'separatorsfor': {'includes0': '\n', 'includes': '\n'}, + 'docs': ['"Fortran 90/95 modules:\\n"'], + 'latexdoc': []} + fhooks = [''] + + def fadd(line, s=fhooks): + s[0] = '%s\n %s' % (s[0], line) + doc = [''] + + def dadd(line, s=doc): + s[0] = '%s\n%s' % (s[0], line) + for m in findf90modules(pymod): + sargs, fargs, efargs, modobjs, notvars, onlyvars = [], [], [], [], [ + m['name']], [] + sargsp = [] + ifargs = [] + mfargs = [] + if hasbody(m): + for b in m['body']: + notvars.append(b['name']) + for n in m['vars'].keys(): + var = m['vars'][n] + if (n not in notvars) and (not l_or(isintent_hide, isprivate)(var)): + onlyvars.append(n) + mfargs.append(n) + outmess('\t\tConstructing F90 module support for "%s"...\n' % + (m['name'])) + if onlyvars: + outmess('\t\t Variables: %s\n' % (' '.join(onlyvars))) + chooks = [''] + + def cadd(line, s=chooks): + s[0] = '%s\n%s' % (s[0], line) + ihooks = [''] + + def iadd(line, s=ihooks): + s[0] = '%s\n%s' % (s[0], line) + + vrd = capi_maps.modsign2map(m) + cadd('static FortranDataDef f2py_%s_def[] = {' % (m['name'])) + dadd('\\subsection{Fortran 90/95 module \\texttt{%s}}\n' % (m['name'])) + if hasnote(m): + note = m['note'] + if isinstance(note, list): + note = '\n'.join(note) + dadd(note) + if onlyvars: + dadd('\\begin{description}') + for n in onlyvars: + var = m['vars'][n] + modobjs.append(n) + ct = capi_maps.getctype(var) + at = capi_maps.c2capi_map[ct] + dm = capi_maps.getarrdims(n, var) + dms = dm['dims'].replace('*', '-1').strip() + dms = dms.replace(':', '-1').strip() + if not dms: + dms = '-1' + use_fgetdims2 = fgetdims2 + if isstringarray(var): + if 'charselector' in var and 'len' in var['charselector']: + cadd('\t{"%s",%s,{{%s,%s}},%s},' + % (undo_rmbadname1(n), dm['rank'], dms, var['charselector']['len'], at)) + use_fgetdims2 = fgetdims2_sa + else: + cadd('\t{"%s",%s,{{%s}},%s},' % + (undo_rmbadname1(n), dm['rank'], dms, at)) + else: + cadd('\t{"%s",%s,{{%s}},%s},' % + (undo_rmbadname1(n), dm['rank'], dms, at)) + dadd('\\item[]{{}\\verb@%s@{}}' % + (capi_maps.getarrdocsign(n, var))) + if hasnote(var): + note = var['note'] + if isinstance(note, list): + note = '\n'.join(note) + dadd('--- %s' % (note)) + if isallocatable(var): + fargs.append('f2py_%s_getdims_%s' % (m['name'], n)) + efargs.append(fargs[-1]) + sargs.append( + 'void (*%s)(int*,int*,void(*)(char*,int*),int*)' % (n)) + sargsp.append('void (*)(int*,int*,void(*)(char*,int*),int*)') + iadd('\tf2py_%s_def[i_f2py++].func = %s;' % (m['name'], n)) + fadd('subroutine %s(r,s,f2pysetdata,flag)' % (fargs[-1])) + fadd('use %s, only: d => %s\n' % + (m['name'], undo_rmbadname1(n))) + fadd('integer flag\n') + fhooks[0] = fhooks[0] + fgetdims1 + dms = eval('range(1,%s+1)' % (dm['rank'])) + fadd(' allocate(d(%s))\n' % + (','.join(['s(%s)' % i for i in dms]))) + fhooks[0] = fhooks[0] + use_fgetdims2 + fadd('end subroutine %s' % (fargs[-1])) + else: + fargs.append(n) + sargs.append('char *%s' % (n)) + sargsp.append('char*') + iadd('\tf2py_%s_def[i_f2py++].data = %s;' % (m['name'], n)) + if onlyvars: + dadd('\\end{description}') + if hasbody(m): + for b in m['body']: + if not isroutine(b): + print('Skipping', b['block'], b['name']) + continue + modobjs.append('%s()' % (b['name'])) + b['modulename'] = m['name'] + api, wrap = rules.buildapi(b) + if isfunction(b): + fhooks[0] = fhooks[0] + wrap + fargs.append('f2pywrap_%s_%s' % (m['name'], b['name'])) + ifargs.append(func2subr.createfuncwrapper(b, signature=1)) + else: + if wrap: + fhooks[0] = fhooks[0] + wrap + fargs.append('f2pywrap_%s_%s' % (m['name'], b['name'])) + ifargs.append( + func2subr.createsubrwrapper(b, signature=1)) + else: + fargs.append(b['name']) + mfargs.append(fargs[-1]) + api['externroutines'] = [] + ar = applyrules(api, vrd) + ar['docs'] = [] + ar['docshort'] = [] + ret = dictappend(ret, ar) + cadd('\t{"%s",-1,{{-1}},0,NULL,(void *)f2py_rout_#modulename#_%s_%s,doc_f2py_rout_#modulename#_%s_%s},' % + (b['name'], m['name'], b['name'], m['name'], b['name'])) + sargs.append('char *%s' % (b['name'])) + sargsp.append('char *') + iadd('\tf2py_%s_def[i_f2py++].data = %s;' % + (m['name'], b['name'])) + cadd('\t{NULL}\n};\n') + iadd('}') + ihooks[0] = 'static void f2py_setup_%s(%s) {\n\tint i_f2py=0;%s' % ( + m['name'], ','.join(sargs), ihooks[0]) + if '_' in m['name']: + F_FUNC = 'F_FUNC_US' + else: + F_FUNC = 'F_FUNC' + iadd('extern void %s(f2pyinit%s,F2PYINIT%s)(void (*)(%s));' + % (F_FUNC, m['name'], m['name'].upper(), ','.join(sargsp))) + iadd('static void f2py_init_%s(void) {' % (m['name'])) + iadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);' + % (F_FUNC, m['name'], m['name'].upper(), m['name'])) + iadd('}\n') + ret['f90modhooks'] = ret['f90modhooks'] + chooks + ihooks + ret['initf90modhooks'] = ['\tPyDict_SetItemString(d, "%s", PyFortranObject_New(f2py_%s_def,f2py_init_%s));' % ( + m['name'], m['name'], m['name'])] + ret['initf90modhooks'] + fadd('') + fadd('subroutine f2pyinit%s(f2pysetupfunc)' % (m['name'])) + if mfargs: + for a in undo_rmbadname(mfargs): + fadd('use %s, only : %s' % (m['name'], a)) + if ifargs: + fadd(' '.join(['interface'] + ifargs)) + fadd('end interface') + fadd('external f2pysetupfunc') + if efargs: + for a in undo_rmbadname(efargs): + fadd('external %s' % (a)) + fadd('call f2pysetupfunc(%s)' % (','.join(undo_rmbadname(fargs)))) + fadd('end subroutine f2pyinit%s\n' % (m['name'])) + + dadd('\n'.join(ret['latexdoc']).replace( + r'\subsection{', r'\subsubsection{')) + + ret['latexdoc'] = [] + ret['docs'].append('"\t%s --- %s"' % (m['name'], + ','.join(undo_rmbadname(modobjs)))) + + ret['routine_defs'] = '' + ret['doc'] = [] + ret['docshort'] = [] + ret['latexdoc'] = doc[0] + if len(ret['docs']) <= 1: + ret['docs'] = '' + return ret, fhooks[0] diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/f90mod_rules.pyc b/project/venv/lib/python2.7/site-packages/numpy/f2py/f90mod_rules.pyc new file mode 100644 index 0000000..c1412ca Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/f2py/f90mod_rules.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/func2subr.py b/project/venv/lib/python2.7/site-packages/numpy/f2py/func2subr.py new file mode 100644 index 0000000..6010d5a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/func2subr.py @@ -0,0 +1,299 @@ +#!/usr/bin/env python +""" + +Rules for building C/API module with f2py2e. + +Copyright 1999,2000 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +$Date: 2004/11/26 11:13:06 $ +Pearu Peterson + +""" +from __future__ import division, absolute_import, print_function + +__version__ = "$Revision: 1.16 $"[10:-1] + +f2py_version = 'See `f2py -v`' + +import copy + +from .auxfuncs import ( + getfortranname, isexternal, isfunction, isfunction_wrap, isintent_in, + isintent_out, islogicalfunction, ismoduleroutine, isscalar, + issubroutine, issubroutine_wrap, outmess, show +) + + +def var2fixfortran(vars, a, fa=None, f90mode=None): + if fa is None: + fa = a + if a not in vars: + show(vars) + outmess('var2fixfortran: No definition for argument "%s".\n' % a) + return '' + if 'typespec' not in vars[a]: + show(vars[a]) + outmess('var2fixfortran: No typespec for argument "%s".\n' % a) + return '' + vardef = vars[a]['typespec'] + if vardef == 'type' and 'typename' in vars[a]: + vardef = '%s(%s)' % (vardef, vars[a]['typename']) + selector = {} + lk = '' + if 'kindselector' in vars[a]: + selector = vars[a]['kindselector'] + lk = 'kind' + elif 'charselector' in vars[a]: + selector = vars[a]['charselector'] + lk = 'len' + if '*' in selector: + if f90mode: + if selector['*'] in ['*', ':', '(*)']: + vardef = '%s(len=*)' % (vardef) + else: + vardef = '%s(%s=%s)' % (vardef, lk, selector['*']) + else: + if selector['*'] in ['*', ':']: + vardef = '%s*(%s)' % (vardef, selector['*']) + else: + vardef = '%s*%s' % (vardef, selector['*']) + else: + if 'len' in selector: + vardef = '%s(len=%s' % (vardef, selector['len']) + if 'kind' in selector: + vardef = '%s,kind=%s)' % (vardef, selector['kind']) + else: + vardef = '%s)' % (vardef) + elif 'kind' in selector: + vardef = '%s(kind=%s)' % (vardef, selector['kind']) + + vardef = '%s %s' % (vardef, fa) + if 'dimension' in vars[a]: + vardef = '%s(%s)' % (vardef, ','.join(vars[a]['dimension'])) + return vardef + + +def createfuncwrapper(rout, signature=0): + assert isfunction(rout) + + extra_args = [] + vars = rout['vars'] + for a in rout['args']: + v = rout['vars'][a] + for i, d in enumerate(v.get('dimension', [])): + if d == ':': + dn = 'f2py_%s_d%s' % (a, i) + dv = dict(typespec='integer', intent=['hide']) + dv['='] = 'shape(%s, %s)' % (a, i) + extra_args.append(dn) + vars[dn] = dv + v['dimension'][i] = dn + rout['args'].extend(extra_args) + need_interface = bool(extra_args) + + ret = [''] + + def add(line, ret=ret): + ret[0] = '%s\n %s' % (ret[0], line) + name = rout['name'] + fortranname = getfortranname(rout) + f90mode = ismoduleroutine(rout) + newname = '%sf2pywrap' % (name) + + if newname not in vars: + vars[newname] = vars[name] + args = [newname] + rout['args'][1:] + else: + args = [newname] + rout['args'] + + l = var2fixfortran(vars, name, newname, f90mode) + if l[:13] == 'character*(*)': + if f90mode: + l = 'character(len=10)' + l[13:] + else: + l = 'character*10' + l[13:] + charselect = vars[name]['charselector'] + if charselect.get('*', '') == '(*)': + charselect['*'] = '10' + sargs = ', '.join(args) + if f90mode: + add('subroutine f2pywrap_%s_%s (%s)' % + (rout['modulename'], name, sargs)) + if not signature: + add('use %s, only : %s' % (rout['modulename'], fortranname)) + else: + add('subroutine f2pywrap%s (%s)' % (name, sargs)) + if not need_interface: + add('external %s' % (fortranname)) + l = l + ', ' + fortranname + if need_interface: + for line in rout['saved_interface'].split('\n'): + if line.lstrip().startswith('use '): + add(line) + + args = args[1:] + dumped_args = [] + for a in args: + if isexternal(vars[a]): + add('external %s' % (a)) + dumped_args.append(a) + for a in args: + if a in dumped_args: + continue + if isscalar(vars[a]): + add(var2fixfortran(vars, a, f90mode=f90mode)) + dumped_args.append(a) + for a in args: + if a in dumped_args: + continue + if isintent_in(vars[a]): + add(var2fixfortran(vars, a, f90mode=f90mode)) + dumped_args.append(a) + for a in args: + if a in dumped_args: + continue + add(var2fixfortran(vars, a, f90mode=f90mode)) + + add(l) + + if need_interface: + if f90mode: + # f90 module already defines needed interface + pass + else: + add('interface') + add(rout['saved_interface'].lstrip()) + add('end interface') + + sargs = ', '.join([a for a in args if a not in extra_args]) + + if not signature: + if islogicalfunction(rout): + add('%s = .not.(.not.%s(%s))' % (newname, fortranname, sargs)) + else: + add('%s = %s(%s)' % (newname, fortranname, sargs)) + if f90mode: + add('end subroutine f2pywrap_%s_%s' % (rout['modulename'], name)) + else: + add('end') + return ret[0] + + +def createsubrwrapper(rout, signature=0): + assert issubroutine(rout) + + extra_args = [] + vars = rout['vars'] + for a in rout['args']: + v = rout['vars'][a] + for i, d in enumerate(v.get('dimension', [])): + if d == ':': + dn = 'f2py_%s_d%s' % (a, i) + dv = dict(typespec='integer', intent=['hide']) + dv['='] = 'shape(%s, %s)' % (a, i) + extra_args.append(dn) + vars[dn] = dv + v['dimension'][i] = dn + rout['args'].extend(extra_args) + need_interface = bool(extra_args) + + ret = [''] + + def add(line, ret=ret): + ret[0] = '%s\n %s' % (ret[0], line) + name = rout['name'] + fortranname = getfortranname(rout) + f90mode = ismoduleroutine(rout) + + args = rout['args'] + + sargs = ', '.join(args) + if f90mode: + add('subroutine f2pywrap_%s_%s (%s)' % + (rout['modulename'], name, sargs)) + if not signature: + add('use %s, only : %s' % (rout['modulename'], fortranname)) + else: + add('subroutine f2pywrap%s (%s)' % (name, sargs)) + if not need_interface: + add('external %s' % (fortranname)) + + if need_interface: + for line in rout['saved_interface'].split('\n'): + if line.lstrip().startswith('use '): + add(line) + + dumped_args = [] + for a in args: + if isexternal(vars[a]): + add('external %s' % (a)) + dumped_args.append(a) + for a in args: + if a in dumped_args: + continue + if isscalar(vars[a]): + add(var2fixfortran(vars, a, f90mode=f90mode)) + dumped_args.append(a) + for a in args: + if a in dumped_args: + continue + add(var2fixfortran(vars, a, f90mode=f90mode)) + + if need_interface: + if f90mode: + # f90 module already defines needed interface + pass + else: + add('interface') + add(rout['saved_interface'].lstrip()) + add('end interface') + + sargs = ', '.join([a for a in args if a not in extra_args]) + + if not signature: + add('call %s(%s)' % (fortranname, sargs)) + if f90mode: + add('end subroutine f2pywrap_%s_%s' % (rout['modulename'], name)) + else: + add('end') + return ret[0] + + +def assubr(rout): + if isfunction_wrap(rout): + fortranname = getfortranname(rout) + name = rout['name'] + outmess('\t\tCreating wrapper for Fortran function "%s"("%s")...\n' % ( + name, fortranname)) + rout = copy.copy(rout) + fname = name + rname = fname + if 'result' in rout: + rname = rout['result'] + rout['vars'][fname] = rout['vars'][rname] + fvar = rout['vars'][fname] + if not isintent_out(fvar): + if 'intent' not in fvar: + fvar['intent'] = [] + fvar['intent'].append('out') + flag = 1 + for i in fvar['intent']: + if i.startswith('out='): + flag = 0 + break + if flag: + fvar['intent'].append('out=%s' % (rname)) + rout['args'][:] = [fname] + rout['args'] + return rout, createfuncwrapper(rout) + if issubroutine_wrap(rout): + fortranname = getfortranname(rout) + name = rout['name'] + outmess('\t\tCreating wrapper for Fortran subroutine "%s"("%s")...\n' % ( + name, fortranname)) + rout = copy.copy(rout) + return rout, createsubrwrapper(rout) + return rout, '' diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/func2subr.pyc b/project/venv/lib/python2.7/site-packages/numpy/f2py/func2subr.pyc new file mode 100644 index 0000000..c0d0c25 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/f2py/func2subr.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/info.py b/project/venv/lib/python2.7/site-packages/numpy/f2py/info.py new file mode 100644 index 0000000..c895c5d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/info.py @@ -0,0 +1,6 @@ +"""Fortran to Python Interface Generator. + +""" +from __future__ import division, absolute_import, print_function + +postpone_import = True diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/info.pyc b/project/venv/lib/python2.7/site-packages/numpy/f2py/info.pyc new file mode 100644 index 0000000..e76c78d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/f2py/info.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/rules.py b/project/venv/lib/python2.7/site-packages/numpy/f2py/rules.py new file mode 100644 index 0000000..23d36b2 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/rules.py @@ -0,0 +1,1473 @@ +#!/usr/bin/env python +""" + +Rules for building C/API module with f2py2e. + +Here is a skeleton of a new wrapper function (13Dec2001): + +wrapper_function(args) + declarations + get_python_arguments, say, `a' and `b' + + get_a_from_python + if (successful) { + + get_b_from_python + if (successful) { + + callfortran + if (successful) { + + put_a_to_python + if (successful) { + + put_b_to_python + if (successful) { + + buildvalue = ... + + } + + } + + } + + } + cleanup_b + + } + cleanup_a + + return buildvalue + +Copyright 1999,2000 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +$Date: 2005/08/30 08:58:42 $ +Pearu Peterson + +""" +from __future__ import division, absolute_import, print_function + +__version__ = "$Revision: 1.129 $"[10:-1] + +from . import __version__ +f2py_version = __version__.version + +import os +import time +import copy + +from .auxfuncs import ( + applyrules, debugcapi, dictappend, errmess, gentitle, getargs2, + hascallstatement, hasexternals, hasinitvalue, hasnote, hasresultnote, + isarray, isarrayofstrings, iscomplex, iscomplexarray, + iscomplexfunction, iscomplexfunction_warn, isdummyroutine, isexternal, + isfunction, isfunction_wrap, isint1array, isintent_aux, isintent_c, + isintent_callback, isintent_copy, isintent_hide, isintent_inout, + isintent_nothide, isintent_out, isintent_overwrite, islogical, + islong_complex, islong_double, islong_doublefunction, islong_long, + islong_longfunction, ismoduleroutine, isoptional, isrequired, isscalar, + issigned_long_longarray, isstring, isstringarray, isstringfunction, + issubroutine, issubroutine_wrap, isthreadsafe, isunsigned, + isunsigned_char, isunsigned_chararray, isunsigned_long_long, + isunsigned_long_longarray, isunsigned_short, isunsigned_shortarray, + l_and, l_not, l_or, outmess, replace, stripcomma, +) + +from . import capi_maps +from . import cfuncs +from . import common_rules +from . import use_rules +from . import f90mod_rules +from . import func2subr + +options = {} +sepdict = {} +#for k in ['need_cfuncs']: sepdict[k]=',' +for k in ['decl', + 'frompyobj', + 'cleanupfrompyobj', + 'topyarr', 'method', + 'pyobjfrom', 'closepyobjfrom', + 'freemem', + 'userincludes', + 'includes0', 'includes', 'typedefs', 'typedefs_generated', + 'cppmacros', 'cfuncs', 'callbacks', + 'latexdoc', + 'restdoc', + 'routine_defs', 'externroutines', + 'initf2pywraphooks', + 'commonhooks', 'initcommonhooks', + 'f90modhooks', 'initf90modhooks']: + sepdict[k] = '\n' + +#################### Rules for C/API module ################# + +generationtime = int(os.environ.get('SOURCE_DATE_EPOCH', time.time())) +module_rules = { + 'modulebody': """\ +/* File: #modulename#module.c + * This file is auto-generated with f2py (version:#f2py_version#). + * f2py is a Fortran to Python Interface Generator (FPIG), Second Edition, + * written by Pearu Peterson . + * Generation date: """ + time.asctime(time.gmtime(generationtime)) + """ + * Do not edit this file directly unless you know what you are doing!!! + */ + +#ifdef __cplusplus +extern \"C\" { +#endif + +""" + gentitle("See f2py2e/cfuncs.py: includes") + """ +#includes# +#includes0# + +""" + gentitle("See f2py2e/rules.py: mod_rules['modulebody']") + """ +static PyObject *#modulename#_error; +static PyObject *#modulename#_module; + +""" + gentitle("See f2py2e/cfuncs.py: typedefs") + """ +#typedefs# + +""" + gentitle("See f2py2e/cfuncs.py: typedefs_generated") + """ +#typedefs_generated# + +""" + gentitle("See f2py2e/cfuncs.py: cppmacros") + """ +#cppmacros# + +""" + gentitle("See f2py2e/cfuncs.py: cfuncs") + """ +#cfuncs# + +""" + gentitle("See f2py2e/cfuncs.py: userincludes") + """ +#userincludes# + +""" + gentitle("See f2py2e/capi_rules.py: usercode") + """ +#usercode# + +/* See f2py2e/rules.py */ +#externroutines# + +""" + gentitle("See f2py2e/capi_rules.py: usercode1") + """ +#usercode1# + +""" + gentitle("See f2py2e/cb_rules.py: buildcallback") + """ +#callbacks# + +""" + gentitle("See f2py2e/rules.py: buildapi") + """ +#body# + +""" + gentitle("See f2py2e/f90mod_rules.py: buildhooks") + """ +#f90modhooks# + +""" + gentitle("See f2py2e/rules.py: module_rules['modulebody']") + """ + +""" + gentitle("See f2py2e/common_rules.py: buildhooks") + """ +#commonhooks# + +""" + gentitle("See f2py2e/rules.py") + """ + +static FortranDataDef f2py_routine_defs[] = { +#routine_defs# +\t{NULL} +}; + +static PyMethodDef f2py_module_methods[] = { +#pymethoddef# +\t{NULL,NULL} +}; + +#if PY_VERSION_HEX >= 0x03000000 +static struct PyModuleDef moduledef = { +\tPyModuleDef_HEAD_INIT, +\t"#modulename#", +\tNULL, +\t-1, +\tf2py_module_methods, +\tNULL, +\tNULL, +\tNULL, +\tNULL +}; +#endif + +#if PY_VERSION_HEX >= 0x03000000 +#define RETVAL m +PyMODINIT_FUNC PyInit_#modulename#(void) { +#else +#define RETVAL +PyMODINIT_FUNC init#modulename#(void) { +#endif +\tint i; +\tPyObject *m,*d, *s; +#if PY_VERSION_HEX >= 0x03000000 +\tm = #modulename#_module = PyModule_Create(&moduledef); +#else +\tm = #modulename#_module = Py_InitModule(\"#modulename#\", f2py_module_methods); +#endif +\tPy_TYPE(&PyFortran_Type) = &PyType_Type; +\timport_array(); +\tif (PyErr_Occurred()) +\t\t{PyErr_SetString(PyExc_ImportError, \"can't initialize module #modulename# (failed to import numpy)\"); return RETVAL;} +\td = PyModule_GetDict(m); +\ts = PyString_FromString(\"$R""" + """evision: $\"); +\tPyDict_SetItemString(d, \"__version__\", s); +#if PY_VERSION_HEX >= 0x03000000 +\ts = PyUnicode_FromString( +#else +\ts = PyString_FromString( +#endif +\t\t\"This module '#modulename#' is auto-generated with f2py (version:#f2py_version#).\\nFunctions:\\n\"\n#docs#\".\"); +\tPyDict_SetItemString(d, \"__doc__\", s); +\t#modulename#_error = PyErr_NewException (\"#modulename#.error\", NULL, NULL); +\tPy_DECREF(s); +\tfor(i=0;f2py_routine_defs[i].name!=NULL;i++) +\t\tPyDict_SetItemString(d, f2py_routine_defs[i].name,PyFortranObject_NewAsAttr(&f2py_routine_defs[i])); +#initf2pywraphooks# +#initf90modhooks# +#initcommonhooks# +#interface_usercode# + +#ifdef F2PY_REPORT_ATEXIT +\tif (! PyErr_Occurred()) +\t\ton_exit(f2py_report_on_exit,(void*)\"#modulename#\"); +#endif + +\treturn RETVAL; +} +#ifdef __cplusplus +} +#endif +""", + 'separatorsfor': {'latexdoc': '\n\n', + 'restdoc': '\n\n'}, + 'latexdoc': ['\\section{Module \\texttt{#texmodulename#}}\n', + '#modnote#\n', + '#latexdoc#'], + 'restdoc': ['Module #modulename#\n' + '=' * 80, + '\n#restdoc#'] +} + +defmod_rules = [ + {'body': '/*eof body*/', + 'method': '/*eof method*/', + 'externroutines': '/*eof externroutines*/', + 'routine_defs': '/*eof routine_defs*/', + 'initf90modhooks': '/*eof initf90modhooks*/', + 'initf2pywraphooks': '/*eof initf2pywraphooks*/', + 'initcommonhooks': '/*eof initcommonhooks*/', + 'latexdoc': '', + 'restdoc': '', + 'modnote': {hasnote: '#note#', l_not(hasnote): ''}, + } +] + +routine_rules = { + 'separatorsfor': sepdict, + 'body': """ +#begintitle# +static char doc_#apiname#[] = \"\\\n#docreturn##name#(#docsignatureshort#)\\n\\nWrapper for ``#name#``.\\\n\\n#docstrsigns#\"; +/* #declfortranroutine# */ +static PyObject *#apiname#(const PyObject *capi_self, + PyObject *capi_args, + PyObject *capi_keywds, + #functype# (*f2py_func)(#callprotoargument#)) { +\tPyObject * volatile capi_buildvalue = NULL; +\tvolatile int f2py_success = 1; +#decl# +\tstatic char *capi_kwlist[] = {#kwlist##kwlistopt##kwlistxa#NULL}; +#usercode# +#routdebugenter# +#ifdef F2PY_REPORT_ATEXIT +f2py_start_clock(); +#endif +\tif (!PyArg_ParseTupleAndKeywords(capi_args,capi_keywds,\\ +\t\t\"#argformat##keyformat##xaformat#:#pyname#\",\\ +\t\tcapi_kwlist#args_capi##keys_capi##keys_xa#))\n\t\treturn NULL; +#frompyobj# +/*end of frompyobj*/ +#ifdef F2PY_REPORT_ATEXIT +f2py_start_call_clock(); +#endif +#callfortranroutine# +if (PyErr_Occurred()) + f2py_success = 0; +#ifdef F2PY_REPORT_ATEXIT +f2py_stop_call_clock(); +#endif +/*end of callfortranroutine*/ +\t\tif (f2py_success) { +#pyobjfrom# +/*end of pyobjfrom*/ +\t\tCFUNCSMESS(\"Building return value.\\n\"); +\t\tcapi_buildvalue = Py_BuildValue(\"#returnformat#\"#return#); +/*closepyobjfrom*/ +#closepyobjfrom# +\t\t} /*if (f2py_success) after callfortranroutine*/ +/*cleanupfrompyobj*/ +#cleanupfrompyobj# +\tif (capi_buildvalue == NULL) { +#routdebugfailure# +\t} else { +#routdebugleave# +\t} +\tCFUNCSMESS(\"Freeing memory.\\n\"); +#freemem# +#ifdef F2PY_REPORT_ATEXIT +f2py_stop_clock(); +#endif +\treturn capi_buildvalue; +} +#endtitle# +""", + 'routine_defs': '#routine_def#', + 'initf2pywraphooks': '#initf2pywraphook#', + 'externroutines': '#declfortranroutine#', + 'doc': '#docreturn##name#(#docsignature#)', + 'docshort': '#docreturn##name#(#docsignatureshort#)', + 'docs': '"\t#docreturn##name#(#docsignature#)\\n"\n', + 'need': ['arrayobject.h', 'CFUNCSMESS', 'MINMAX'], + 'cppmacros': {debugcapi: '#define DEBUGCFUNCS'}, + 'latexdoc': ['\\subsection{Wrapper function \\texttt{#texname#}}\n', + """ +\\noindent{{}\\verb@#docreturn##name#@{}}\\texttt{(#latexdocsignatureshort#)} +#routnote# + +#latexdocstrsigns# +"""], + 'restdoc': ['Wrapped function ``#name#``\n' + '-' * 80, + + ] +} + +################## Rules for C/API function ############## + +rout_rules = [ + { # Init + 'separatorsfor': {'callfortranroutine': '\n', 'routdebugenter': '\n', 'decl': '\n', + 'routdebugleave': '\n', 'routdebugfailure': '\n', + 'setjmpbuf': ' || ', + 'docstrreq': '\n', 'docstropt': '\n', 'docstrout': '\n', + 'docstrcbs': '\n', 'docstrsigns': '\\n"\n"', + 'latexdocstrsigns': '\n', + 'latexdocstrreq': '\n', 'latexdocstropt': '\n', + 'latexdocstrout': '\n', 'latexdocstrcbs': '\n', + }, + 'kwlist': '', 'kwlistopt': '', 'callfortran': '', 'callfortranappend': '', + 'docsign': '', 'docsignopt': '', 'decl': '/*decl*/', + 'freemem': '/*freemem*/', + 'docsignshort': '', 'docsignoptshort': '', + 'docstrsigns': '', 'latexdocstrsigns': '', + 'docstrreq': '\\nParameters\\n----------', + 'docstropt': '\\nOther Parameters\\n----------------', + 'docstrout': '\\nReturns\\n-------', + 'docstrcbs': '\\nNotes\\n-----\\nCall-back functions::\\n', + 'latexdocstrreq': '\\noindent Required arguments:', + 'latexdocstropt': '\\noindent Optional arguments:', + 'latexdocstrout': '\\noindent Return objects:', + 'latexdocstrcbs': '\\noindent Call-back functions:', + 'args_capi': '', 'keys_capi': '', 'functype': '', + 'frompyobj': '/*frompyobj*/', + # this list will be reversed + 'cleanupfrompyobj': ['/*end of cleanupfrompyobj*/'], + 'pyobjfrom': '/*pyobjfrom*/', + # this list will be reversed + 'closepyobjfrom': ['/*end of closepyobjfrom*/'], + 'topyarr': '/*topyarr*/', 'routdebugleave': '/*routdebugleave*/', + 'routdebugenter': '/*routdebugenter*/', + 'routdebugfailure': '/*routdebugfailure*/', + 'callfortranroutine': '/*callfortranroutine*/', + 'argformat': '', 'keyformat': '', 'need_cfuncs': '', + 'docreturn': '', 'return': '', 'returnformat': '', 'rformat': '', + 'kwlistxa': '', 'keys_xa': '', 'xaformat': '', 'docsignxa': '', 'docsignxashort': '', + 'initf2pywraphook': '', + 'routnote': {hasnote: '--- #note#', l_not(hasnote): ''}, + }, { + 'apiname': 'f2py_rout_#modulename#_#name#', + 'pyname': '#modulename#.#name#', + 'decl': '', + '_check': l_not(ismoduleroutine) + }, { + 'apiname': 'f2py_rout_#modulename#_#f90modulename#_#name#', + 'pyname': '#modulename#.#f90modulename#.#name#', + 'decl': '', + '_check': ismoduleroutine + }, { # Subroutine + 'functype': 'void', + 'declfortranroutine': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', + l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): 'extern void #fortranname#(#callprotoargument#);', + ismoduleroutine: '', + isdummyroutine: '' + }, + 'routine_def': {l_not(l_or(ismoduleroutine, isintent_c, isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},', + l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},', + l_and(l_not(ismoduleroutine), isdummyroutine): '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', + }, + 'need': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'F_FUNC'}, + 'callfortranroutine': [ + {debugcapi: [ + """\tfprintf(stderr,\"debug-capi:Fortran subroutine `#fortranname#(#callfortran#)\'\\n\");"""]}, + {hasexternals: """\ +\t\tif (#setjmpbuf#) { +\t\t\tf2py_success = 0; +\t\t} else {"""}, + {isthreadsafe: '\t\t\tPy_BEGIN_ALLOW_THREADS'}, + {hascallstatement: '''\t\t\t\t#callstatement#; +\t\t\t\t/*(*f2py_func)(#callfortran#);*/'''}, + {l_not(l_or(hascallstatement, isdummyroutine)) + : '\t\t\t\t(*f2py_func)(#callfortran#);'}, + {isthreadsafe: '\t\t\tPy_END_ALLOW_THREADS'}, + {hasexternals: """\t\t}"""} + ], + '_check': l_and(issubroutine, l_not(issubroutine_wrap)), + }, { # Wrapped function + 'functype': 'void', + 'declfortranroutine': {l_not(l_or(ismoduleroutine, isdummyroutine)): 'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);', + isdummyroutine: '', + }, + + 'routine_def': {l_not(l_or(ismoduleroutine, isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},', + isdummyroutine: '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', + }, + 'initf2pywraphook': {l_not(l_or(ismoduleroutine, isdummyroutine)): ''' + { + extern #ctype# #F_FUNC#(#name_lower#,#NAME#)(void); + PyObject* o = PyDict_GetItemString(d,"#name#"); + PyObject_SetAttrString(o,"_cpointer", F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL)); +#if PY_VERSION_HEX >= 0x03000000 + PyObject_SetAttrString(o,"__name__", PyUnicode_FromString("#name#")); +#else + PyObject_SetAttrString(o,"__name__", PyString_FromString("#name#")); +#endif + } + '''}, + 'need': {l_not(l_or(ismoduleroutine, isdummyroutine)): ['F_WRAPPEDFUNC', 'F_FUNC']}, + 'callfortranroutine': [ + {debugcapi: [ + """\tfprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]}, + {hasexternals: """\ +\tif (#setjmpbuf#) { +\t\tf2py_success = 0; +\t} else {"""}, + {isthreadsafe: '\tPy_BEGIN_ALLOW_THREADS'}, + {l_not(l_or(hascallstatement, isdummyroutine)) + : '\t(*f2py_func)(#callfortran#);'}, + {hascallstatement: + '\t#callstatement#;\n\t/*(*f2py_func)(#callfortran#);*/'}, + {isthreadsafe: '\tPy_END_ALLOW_THREADS'}, + {hasexternals: '\t}'} + ], + '_check': isfunction_wrap, + }, { # Wrapped subroutine + 'functype': 'void', + 'declfortranroutine': {l_not(l_or(ismoduleroutine, isdummyroutine)): 'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);', + isdummyroutine: '', + }, + + 'routine_def': {l_not(l_or(ismoduleroutine, isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},', + isdummyroutine: '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', + }, + 'initf2pywraphook': {l_not(l_or(ismoduleroutine, isdummyroutine)): ''' + { + extern void #F_FUNC#(#name_lower#,#NAME#)(void); + PyObject* o = PyDict_GetItemString(d,"#name#"); + PyObject_SetAttrString(o,"_cpointer", F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL)); +#if PY_VERSION_HEX >= 0x03000000 + PyObject_SetAttrString(o,"__name__", PyUnicode_FromString("#name#")); +#else + PyObject_SetAttrString(o,"__name__", PyString_FromString("#name#")); +#endif + } + '''}, + 'need': {l_not(l_or(ismoduleroutine, isdummyroutine)): ['F_WRAPPEDFUNC', 'F_FUNC']}, + 'callfortranroutine': [ + {debugcapi: [ + """\tfprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]}, + {hasexternals: """\ +\tif (#setjmpbuf#) { +\t\tf2py_success = 0; +\t} else {"""}, + {isthreadsafe: '\tPy_BEGIN_ALLOW_THREADS'}, + {l_not(l_or(hascallstatement, isdummyroutine)) + : '\t(*f2py_func)(#callfortran#);'}, + {hascallstatement: + '\t#callstatement#;\n\t/*(*f2py_func)(#callfortran#);*/'}, + {isthreadsafe: '\tPy_END_ALLOW_THREADS'}, + {hasexternals: '\t}'} + ], + '_check': issubroutine_wrap, + }, { # Function + 'functype': '#ctype#', + 'docreturn': {l_not(isintent_hide): '#rname#,'}, + 'docstrout': '#pydocsignout#', + 'latexdocstrout': ['\\item[]{{}\\verb@#pydocsignout#@{}}', + {hasresultnote: '--- #resultnote#'}], + 'callfortranroutine': [{l_and(debugcapi, isstringfunction): """\ +#ifdef USESCOMPAQFORTRAN +\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callcompaqfortran#)\\n\"); +#else +\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\"); +#endif +"""}, + {l_and(debugcapi, l_not(isstringfunction)): """\ +\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\"); +"""} + ], + '_check': l_and(isfunction, l_not(isfunction_wrap)) + }, { # Scalar function + 'declfortranroutine': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'extern #ctype# #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', + l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): 'extern #ctype# #fortranname#(#callprotoargument#);', + isdummyroutine: '' + }, + 'routine_def': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},', + l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},', + isdummyroutine: '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', + }, + 'decl': [{iscomplexfunction_warn: '\t#ctype# #name#_return_value={0,0};', + l_not(iscomplexfunction): '\t#ctype# #name#_return_value=0;'}, + {iscomplexfunction: + '\tPyObject *#name#_return_value_capi = Py_None;'} + ], + 'callfortranroutine': [ + {hasexternals: """\ +\tif (#setjmpbuf#) { +\t\tf2py_success = 0; +\t} else {"""}, + {isthreadsafe: '\tPy_BEGIN_ALLOW_THREADS'}, + {hascallstatement: '''\t#callstatement#; +/*\t#name#_return_value = (*f2py_func)(#callfortran#);*/ +'''}, + {l_not(l_or(hascallstatement, isdummyroutine)) + : '\t#name#_return_value = (*f2py_func)(#callfortran#);'}, + {isthreadsafe: '\tPy_END_ALLOW_THREADS'}, + {hasexternals: '\t}'}, + {l_and(debugcapi, iscomplexfunction) + : '\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value.r,#name#_return_value.i);'}, + {l_and(debugcapi, l_not(iscomplexfunction)): '\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value);'}], + 'pyobjfrom': {iscomplexfunction: '\t#name#_return_value_capi = pyobj_from_#ctype#1(#name#_return_value);'}, + 'need': [{l_not(isdummyroutine): 'F_FUNC'}, + {iscomplexfunction: 'pyobj_from_#ctype#1'}, + {islong_longfunction: 'long_long'}, + {islong_doublefunction: 'long_double'}], + 'returnformat': {l_not(isintent_hide): '#rformat#'}, + 'return': {iscomplexfunction: ',#name#_return_value_capi', + l_not(l_or(iscomplexfunction, isintent_hide)): ',#name#_return_value'}, + '_check': l_and(isfunction, l_not(isstringfunction), l_not(isfunction_wrap)) + }, { # String function # in use for --no-wrap + 'declfortranroutine': 'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', + 'routine_def': {l_not(l_or(ismoduleroutine, isintent_c)): + '\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},', + l_and(l_not(ismoduleroutine), isintent_c): + '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},' + }, + 'decl': ['\t#ctype# #name#_return_value = NULL;', + '\tint #name#_return_value_len = 0;'], + 'callfortran':'#name#_return_value,#name#_return_value_len,', + 'callfortranroutine':['\t#name#_return_value_len = #rlength#;', + '\tif ((#name#_return_value = (string)malloc(sizeof(char)*(#name#_return_value_len+1))) == NULL) {', + '\t\tPyErr_SetString(PyExc_MemoryError, \"out of memory\");', + '\t\tf2py_success = 0;', + '\t} else {', + "\t\t(#name#_return_value)[#name#_return_value_len] = '\\0';", + '\t}', + '\tif (f2py_success) {', + {hasexternals: """\ +\t\tif (#setjmpbuf#) { +\t\t\tf2py_success = 0; +\t\t} else {"""}, + {isthreadsafe: '\t\tPy_BEGIN_ALLOW_THREADS'}, + """\ +#ifdef USESCOMPAQFORTRAN +\t\t(*f2py_func)(#callcompaqfortran#); +#else +\t\t(*f2py_func)(#callfortran#); +#endif +""", + {isthreadsafe: '\t\tPy_END_ALLOW_THREADS'}, + {hasexternals: '\t\t}'}, + {debugcapi: + '\t\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value_len,#name#_return_value);'}, + '\t} /* if (f2py_success) after (string)malloc */', + ], + 'returnformat': '#rformat#', + 'return': ',#name#_return_value', + 'freemem': '\tSTRINGFREE(#name#_return_value);', + 'need': ['F_FUNC', '#ctype#', 'STRINGFREE'], + '_check':l_and(isstringfunction, l_not(isfunction_wrap)) # ???obsolete + }, + { # Debugging + 'routdebugenter': '\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#(#docsignature#)\\n");', + 'routdebugleave': '\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: successful.\\n");', + 'routdebugfailure': '\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: failure.\\n");', + '_check': debugcapi + } +] + +################ Rules for arguments ################## + +typedef_need_dict = {islong_long: 'long_long', + islong_double: 'long_double', + islong_complex: 'complex_long_double', + isunsigned_char: 'unsigned_char', + isunsigned_short: 'unsigned_short', + isunsigned: 'unsigned', + isunsigned_long_long: 'unsigned_long_long', + isunsigned_chararray: 'unsigned_char', + isunsigned_shortarray: 'unsigned_short', + isunsigned_long_longarray: 'unsigned_long_long', + issigned_long_longarray: 'long_long', + } + +aux_rules = [ + { + 'separatorsfor': sepdict + }, + { # Common + 'frompyobj': ['\t/* Processing auxiliary variable #varname# */', + {debugcapi: '\tfprintf(stderr,"#vardebuginfo#\\n");'}, ], + 'cleanupfrompyobj': '\t/* End of cleaning variable #varname# */', + 'need': typedef_need_dict, + }, + # Scalars (not complex) + { # Common + 'decl': '\t#ctype# #varname# = 0;', + 'need': {hasinitvalue: 'math.h'}, + 'frompyobj': {hasinitvalue: '\t#varname# = #init#;'}, + '_check': l_and(isscalar, l_not(iscomplex)), + }, + { + 'return': ',#varname#', + 'docstrout': '#pydocsignout#', + 'docreturn': '#outvarname#,', + 'returnformat': '#varrformat#', + '_check': l_and(isscalar, l_not(iscomplex), isintent_out), + }, + # Complex scalars + { # Common + 'decl': '\t#ctype# #varname#;', + 'frompyobj': {hasinitvalue: '\t#varname#.r = #init.r#, #varname#.i = #init.i#;'}, + '_check': iscomplex + }, + # String + { # Common + 'decl': ['\t#ctype# #varname# = NULL;', + '\tint slen(#varname#);', + ], + 'need':['len..'], + '_check':isstring + }, + # Array + { # Common + 'decl': ['\t#ctype# *#varname# = NULL;', + '\tnpy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};', + '\tconst int #varname#_Rank = #rank#;', + ], + 'need':['len..', {hasinitvalue: 'forcomb'}, {hasinitvalue: 'CFUNCSMESS'}], + '_check': isarray + }, + # Scalararray + { # Common + '_check': l_and(isarray, l_not(iscomplexarray)) + }, { # Not hidden + '_check': l_and(isarray, l_not(iscomplexarray), isintent_nothide) + }, + # Integer*1 array + {'need': '#ctype#', + '_check': isint1array, + '_depend': '' + }, + # Integer*-1 array + {'need': '#ctype#', + '_check': isunsigned_chararray, + '_depend': '' + }, + # Integer*-2 array + {'need': '#ctype#', + '_check': isunsigned_shortarray, + '_depend': '' + }, + # Integer*-8 array + {'need': '#ctype#', + '_check': isunsigned_long_longarray, + '_depend': '' + }, + # Complexarray + {'need': '#ctype#', + '_check': iscomplexarray, + '_depend': '' + }, + # Stringarray + { + 'callfortranappend': {isarrayofstrings: 'flen(#varname#),'}, + 'need': 'string', + '_check': isstringarray + } +] + +arg_rules = [ + { + 'separatorsfor': sepdict + }, + { # Common + 'frompyobj': ['\t/* Processing variable #varname# */', + {debugcapi: '\tfprintf(stderr,"#vardebuginfo#\\n");'}, ], + 'cleanupfrompyobj': '\t/* End of cleaning variable #varname# */', + '_depend': '', + 'need': typedef_need_dict, + }, + # Doc signatures + { + 'docstropt': {l_and(isoptional, isintent_nothide): '#pydocsign#'}, + 'docstrreq': {l_and(isrequired, isintent_nothide): '#pydocsign#'}, + 'docstrout': {isintent_out: '#pydocsignout#'}, + 'latexdocstropt': {l_and(isoptional, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}', + {hasnote: '--- #note#'}]}, + 'latexdocstrreq': {l_and(isrequired, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}', + {hasnote: '--- #note#'}]}, + 'latexdocstrout': {isintent_out: ['\\item[]{{}\\verb@#pydocsignout#@{}}', + {l_and(hasnote, isintent_hide): '--- #note#', + l_and(hasnote, isintent_nothide): '--- See above.'}]}, + 'depend': '' + }, + # Required/Optional arguments + { + 'kwlist': '"#varname#",', + 'docsign': '#varname#,', + '_check': l_and(isintent_nothide, l_not(isoptional)) + }, + { + 'kwlistopt': '"#varname#",', + 'docsignopt': '#varname#=#showinit#,', + 'docsignoptshort': '#varname#,', + '_check': l_and(isintent_nothide, isoptional) + }, + # Docstring/BuildValue + { + 'docreturn': '#outvarname#,', + 'returnformat': '#varrformat#', + '_check': isintent_out + }, + # Externals (call-back functions) + { # Common + 'docsignxa': {isintent_nothide: '#varname#_extra_args=(),'}, + 'docsignxashort': {isintent_nothide: '#varname#_extra_args,'}, + 'docstropt': {isintent_nothide: '#varname#_extra_args : input tuple, optional\\n Default: ()'}, + 'docstrcbs': '#cbdocstr#', + 'latexdocstrcbs': '\\item[] #cblatexdocstr#', + 'latexdocstropt': {isintent_nothide: '\\item[]{{}\\verb@#varname#_extra_args := () input tuple@{}} --- Extra arguments for call-back function {{}\\verb@#varname#@{}}.'}, + 'decl': ['\tPyObject *#varname#_capi = Py_None;', + '\tPyTupleObject *#varname#_xa_capi = NULL;', + '\tPyTupleObject *#varname#_args_capi = NULL;', + '\tint #varname#_nofargs_capi = 0;', + {l_not(isintent_callback): + '\t#cbname#_typedef #varname#_cptr;'} + ], + 'kwlistxa': {isintent_nothide: '"#varname#_extra_args",'}, + 'argformat': {isrequired: 'O'}, + 'keyformat': {isoptional: 'O'}, + 'xaformat': {isintent_nothide: 'O!'}, + 'args_capi': {isrequired: ',&#varname#_capi'}, + 'keys_capi': {isoptional: ',&#varname#_capi'}, + 'keys_xa': ',&PyTuple_Type,&#varname#_xa_capi', + 'setjmpbuf': '(setjmp(#cbname#_jmpbuf))', + 'callfortran': {l_not(isintent_callback): '#varname#_cptr,'}, + 'need': ['#cbname#', 'setjmp.h'], + '_check':isexternal + }, + { + 'frompyobj': [{l_not(isintent_callback): """\ +if(F2PyCapsule_Check(#varname#_capi)) { + #varname#_cptr = F2PyCapsule_AsVoidPtr(#varname#_capi); +} else { + #varname#_cptr = #cbname#; +} +"""}, {isintent_callback: """\ +if (#varname#_capi==Py_None) { + #varname#_capi = PyObject_GetAttrString(#modulename#_module,\"#varname#\"); + if (#varname#_capi) { + if (#varname#_xa_capi==NULL) { + if (PyObject_HasAttrString(#modulename#_module,\"#varname#_extra_args\")) { + PyObject* capi_tmp = PyObject_GetAttrString(#modulename#_module,\"#varname#_extra_args\"); + if (capi_tmp) + #varname#_xa_capi = (PyTupleObject *)PySequence_Tuple(capi_tmp); + else + #varname#_xa_capi = (PyTupleObject *)Py_BuildValue(\"()\"); + if (#varname#_xa_capi==NULL) { + PyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#varname#_extra_args to tuple.\\n\"); + return NULL; + } + } + } + } + if (#varname#_capi==NULL) { + PyErr_SetString(#modulename#_error,\"Callback #varname# not defined (as an argument or module #modulename# attribute).\\n\"); + return NULL; + } +} +"""}, + """\ +\t#varname#_nofargs_capi = #cbname#_nofargs; +\tif (create_cb_arglist(#varname#_capi,#varname#_xa_capi,#maxnofargs#,#nofoptargs#,&#cbname#_nofargs,&#varname#_args_capi,\"failed in processing argument list for call-back #varname#.\")) { +\t\tjmp_buf #varname#_jmpbuf;""", + {debugcapi: ["""\ +\t\tfprintf(stderr,\"debug-capi:Assuming %d arguments; at most #maxnofargs#(-#nofoptargs#) is expected.\\n\",#cbname#_nofargs); +\t\tCFUNCSMESSPY(\"for #varname#=\",#cbname#_capi);""", + {l_not(isintent_callback): """\t\tfprintf(stderr,\"#vardebugshowvalue# (call-back in C).\\n\",#cbname#);"""}]}, + """\ +\t\tCFUNCSMESS(\"Saving jmpbuf for `#varname#`.\\n\"); +\t\tSWAP(#varname#_capi,#cbname#_capi,PyObject); +\t\tSWAP(#varname#_args_capi,#cbname#_args_capi,PyTupleObject); +\t\tmemcpy(&#varname#_jmpbuf,&#cbname#_jmpbuf,sizeof(jmp_buf));""", + ], + 'cleanupfrompyobj': + """\ +\t\tCFUNCSMESS(\"Restoring jmpbuf for `#varname#`.\\n\"); +\t\t#cbname#_capi = #varname#_capi; +\t\tPy_DECREF(#cbname#_args_capi); +\t\t#cbname#_args_capi = #varname#_args_capi; +\t\t#cbname#_nofargs = #varname#_nofargs_capi; +\t\tmemcpy(&#cbname#_jmpbuf,&#varname#_jmpbuf,sizeof(jmp_buf)); +\t}""", + 'need': ['SWAP', 'create_cb_arglist'], + '_check':isexternal, + '_depend':'' + }, + # Scalars (not complex) + { # Common + 'decl': '\t#ctype# #varname# = 0;', + 'pyobjfrom': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'}, + 'callfortran': {isintent_c: '#varname#,', l_not(isintent_c): '&#varname#,'}, + 'return': {isintent_out: ',#varname#'}, + '_check': l_and(isscalar, l_not(iscomplex)) + }, { + 'need': {hasinitvalue: 'math.h'}, + '_check': l_and(isscalar, l_not(iscomplex)), + }, { # Not hidden + 'decl': '\tPyObject *#varname#_capi = Py_None;', + 'argformat': {isrequired: 'O'}, + 'keyformat': {isoptional: 'O'}, + 'args_capi': {isrequired: ',&#varname#_capi'}, + 'keys_capi': {isoptional: ',&#varname#_capi'}, + 'pyobjfrom': {isintent_inout: """\ +\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#); +\tif (f2py_success) {"""}, + 'closepyobjfrom': {isintent_inout: "\t} /*if (f2py_success) of #varname# pyobjfrom*/"}, + 'need': {isintent_inout: 'try_pyarr_from_#ctype#'}, + '_check': l_and(isscalar, l_not(iscomplex), isintent_nothide) + }, { + 'frompyobj': [ + # hasinitvalue... + # if pyobj is None: + # varname = init + # else + # from_pyobj(varname) + # + # isoptional and noinitvalue... + # if pyobj is not None: + # from_pyobj(varname) + # else: + # varname is uninitialized + # + # ... + # from_pyobj(varname) + # + {hasinitvalue: '\tif (#varname#_capi == Py_None) #varname# = #init#; else', + '_depend': ''}, + {l_and(isoptional, l_not(hasinitvalue)): '\tif (#varname#_capi != Py_None)', + '_depend': ''}, + {l_not(islogical): '''\ +\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#"); +\tif (f2py_success) {'''}, + {islogical: '''\ +\t\t#varname# = (#ctype#)PyObject_IsTrue(#varname#_capi); +\t\tf2py_success = 1; +\tif (f2py_success) {'''}, + ], + 'cleanupfrompyobj': '\t} /*if (f2py_success) of #varname#*/', + 'need': {l_not(islogical): '#ctype#_from_pyobj'}, + '_check': l_and(isscalar, l_not(iscomplex), isintent_nothide), + '_depend': '' + }, { # Hidden + 'frompyobj': {hasinitvalue: '\t#varname# = #init#;'}, + 'need': typedef_need_dict, + '_check': l_and(isscalar, l_not(iscomplex), isintent_hide), + '_depend': '' + }, { # Common + 'frompyobj': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'}, + '_check': l_and(isscalar, l_not(iscomplex)), + '_depend': '' + }, + # Complex scalars + { # Common + 'decl': '\t#ctype# #varname#;', + 'callfortran': {isintent_c: '#varname#,', l_not(isintent_c): '&#varname#,'}, + 'pyobjfrom': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'}, + 'return': {isintent_out: ',#varname#_capi'}, + '_check': iscomplex + }, { # Not hidden + 'decl': '\tPyObject *#varname#_capi = Py_None;', + 'argformat': {isrequired: 'O'}, + 'keyformat': {isoptional: 'O'}, + 'args_capi': {isrequired: ',&#varname#_capi'}, + 'keys_capi': {isoptional: ',&#varname#_capi'}, + 'need': {isintent_inout: 'try_pyarr_from_#ctype#'}, + 'pyobjfrom': {isintent_inout: """\ +\t\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#); +\t\tif (f2py_success) {"""}, + 'closepyobjfrom': {isintent_inout: "\t\t} /*if (f2py_success) of #varname# pyobjfrom*/"}, + '_check': l_and(iscomplex, isintent_nothide) + }, { + 'frompyobj': [{hasinitvalue: '\tif (#varname#_capi==Py_None) {#varname#.r = #init.r#, #varname#.i = #init.i#;} else'}, + {l_and(isoptional, l_not(hasinitvalue)) + : '\tif (#varname#_capi != Py_None)'}, + '\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");' + '\n\tif (f2py_success) {'], + 'cleanupfrompyobj': '\t} /*if (f2py_success) of #varname# frompyobj*/', + 'need': ['#ctype#_from_pyobj'], + '_check': l_and(iscomplex, isintent_nothide), + '_depend': '' + }, { # Hidden + 'decl': {isintent_out: '\tPyObject *#varname#_capi = Py_None;'}, + '_check': l_and(iscomplex, isintent_hide) + }, { + 'frompyobj': {hasinitvalue: '\t#varname#.r = #init.r#, #varname#.i = #init.i#;'}, + '_check': l_and(iscomplex, isintent_hide), + '_depend': '' + }, { # Common + 'pyobjfrom': {isintent_out: '\t#varname#_capi = pyobj_from_#ctype#1(#varname#);'}, + 'need': ['pyobj_from_#ctype#1'], + '_check': iscomplex + }, { + 'frompyobj': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'}, + '_check': iscomplex, + '_depend': '' + }, + # String + { # Common + 'decl': ['\t#ctype# #varname# = NULL;', + '\tint slen(#varname#);', + '\tPyObject *#varname#_capi = Py_None;'], + 'callfortran':'#varname#,', + 'callfortranappend':'slen(#varname#),', + 'pyobjfrom':{debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'}, + 'return': {isintent_out: ',#varname#'}, + 'need': ['len..'], # 'STRINGFREE'], + '_check':isstring + }, { # Common + 'frompyobj': """\ +\tslen(#varname#) = #length#; +\tf2py_success = #ctype#_from_pyobj(&#varname#,&slen(#varname#),#init#,#varname#_capi,\"#ctype#_from_pyobj failed in converting #nth# `#varname#\' of #pyname# to C #ctype#\"); +\tif (f2py_success) {""", + 'cleanupfrompyobj': """\ +\t\tSTRINGFREE(#varname#); +\t} /*if (f2py_success) of #varname#*/""", + 'need': ['#ctype#_from_pyobj', 'len..', 'STRINGFREE'], + '_check':isstring, + '_depend':'' + }, { # Not hidden + 'argformat': {isrequired: 'O'}, + 'keyformat': {isoptional: 'O'}, + 'args_capi': {isrequired: ',&#varname#_capi'}, + 'keys_capi': {isoptional: ',&#varname#_capi'}, + 'pyobjfrom': {isintent_inout: '''\ +\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,#varname#); +\tif (f2py_success) {'''}, + 'closepyobjfrom': {isintent_inout: '\t} /*if (f2py_success) of #varname# pyobjfrom*/'}, + 'need': {isintent_inout: 'try_pyarr_from_#ctype#'}, + '_check': l_and(isstring, isintent_nothide) + }, { # Hidden + '_check': l_and(isstring, isintent_hide) + }, { + 'frompyobj': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'}, + '_check': isstring, + '_depend': '' + }, + # Array + { # Common + 'decl': ['\t#ctype# *#varname# = NULL;', + '\tnpy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};', + '\tconst int #varname#_Rank = #rank#;', + '\tPyArrayObject *capi_#varname#_tmp = NULL;', + '\tint capi_#varname#_intent = 0;', + ], + 'callfortran':'#varname#,', + 'return':{isintent_out: ',capi_#varname#_tmp'}, + 'need': 'len..', + '_check': isarray + }, { # intent(overwrite) array + 'decl': '\tint capi_overwrite_#varname# = 1;', + 'kwlistxa': '"overwrite_#varname#",', + 'xaformat': 'i', + 'keys_xa': ',&capi_overwrite_#varname#', + 'docsignxa': 'overwrite_#varname#=1,', + 'docsignxashort': 'overwrite_#varname#,', + 'docstropt': 'overwrite_#varname# : input int, optional\\n Default: 1', + '_check': l_and(isarray, isintent_overwrite), + }, { + 'frompyobj': '\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);', + '_check': l_and(isarray, isintent_overwrite), + '_depend': '', + }, + { # intent(copy) array + 'decl': '\tint capi_overwrite_#varname# = 0;', + 'kwlistxa': '"overwrite_#varname#",', + 'xaformat': 'i', + 'keys_xa': ',&capi_overwrite_#varname#', + 'docsignxa': 'overwrite_#varname#=0,', + 'docsignxashort': 'overwrite_#varname#,', + 'docstropt': 'overwrite_#varname# : input int, optional\\n Default: 0', + '_check': l_and(isarray, isintent_copy), + }, { + 'frompyobj': '\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);', + '_check': l_and(isarray, isintent_copy), + '_depend': '', + }, { + 'need': [{hasinitvalue: 'forcomb'}, {hasinitvalue: 'CFUNCSMESS'}], + '_check': isarray, + '_depend': '' + }, { # Not hidden + 'decl': '\tPyObject *#varname#_capi = Py_None;', + 'argformat': {isrequired: 'O'}, + 'keyformat': {isoptional: 'O'}, + 'args_capi': {isrequired: ',&#varname#_capi'}, + 'keys_capi': {isoptional: ',&#varname#_capi'}, + '_check': l_and(isarray, isintent_nothide) + }, { + 'frompyobj': ['\t#setdims#;', + '\tcapi_#varname#_intent |= #intent#;', + {isintent_hide: + '\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,Py_None);'}, + {isintent_nothide: + '\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,#varname#_capi);'}, + """\ +\tif (capi_#varname#_tmp == NULL) { +\t\tif (!PyErr_Occurred()) +\t\t\tPyErr_SetString(#modulename#_error,\"failed in converting #nth# `#varname#\' of #pyname# to C/Fortran array\" ); +\t} else { +\t\t#varname# = (#ctype# *)(PyArray_DATA(capi_#varname#_tmp)); +""", + {hasinitvalue: [ + {isintent_nothide: + '\tif (#varname#_capi == Py_None) {'}, + {isintent_hide: '\t{'}, + {iscomplexarray: '\t\t#ctype# capi_c;'}, + """\ +\t\tint *_i,capi_i=0; +\t\tCFUNCSMESS(\"#name#: Initializing #varname#=#init#\\n\"); +\t\tif (initforcomb(PyArray_DIMS(capi_#varname#_tmp),PyArray_NDIM(capi_#varname#_tmp),1)) { +\t\t\twhile ((_i = nextforcomb())) +\t\t\t\t#varname#[capi_i++] = #init#; /* fortran way */ +\t\t} else { +\t\t\tif (!PyErr_Occurred()) +\t\t\t\tPyErr_SetString(#modulename#_error,\"Initialization of #nth# #varname# failed (initforcomb).\"); +\t\t\tf2py_success = 0; +\t\t} +\t} +\tif (f2py_success) {"""]}, + ], + 'cleanupfrompyobj': [ # note that this list will be reversed + '\t} /*if (capi_#varname#_tmp == NULL) ... else of #varname#*/', + {l_not(l_or(isintent_out, isintent_hide)): """\ +\tif((PyObject *)capi_#varname#_tmp!=#varname#_capi) { +\t\tPy_XDECREF(capi_#varname#_tmp); }"""}, + {l_and(isintent_hide, l_not(isintent_out)) + : """\t\tPy_XDECREF(capi_#varname#_tmp);"""}, + {hasinitvalue: '\t} /*if (f2py_success) of #varname# init*/'}, + ], + '_check': isarray, + '_depend': '' + }, + # Scalararray + { # Common + '_check': l_and(isarray, l_not(iscomplexarray)) + }, { # Not hidden + '_check': l_and(isarray, l_not(iscomplexarray), isintent_nothide) + }, + # Integer*1 array + {'need': '#ctype#', + '_check': isint1array, + '_depend': '' + }, + # Integer*-1 array + {'need': '#ctype#', + '_check': isunsigned_chararray, + '_depend': '' + }, + # Integer*-2 array + {'need': '#ctype#', + '_check': isunsigned_shortarray, + '_depend': '' + }, + # Integer*-8 array + {'need': '#ctype#', + '_check': isunsigned_long_longarray, + '_depend': '' + }, + # Complexarray + {'need': '#ctype#', + '_check': iscomplexarray, + '_depend': '' + }, + # Stringarray + { + 'callfortranappend': {isarrayofstrings: 'flen(#varname#),'}, + 'need': 'string', + '_check': isstringarray + } +] + +################# Rules for checking ############### + +check_rules = [ + { + 'frompyobj': {debugcapi: '\tfprintf(stderr,\"debug-capi:Checking `#check#\'\\n\");'}, + 'need': 'len..' + }, { + 'frompyobj': '\tCHECKSCALAR(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {', + 'cleanupfrompyobj': '\t} /*CHECKSCALAR(#check#)*/', + 'need': 'CHECKSCALAR', + '_check': l_and(isscalar, l_not(iscomplex)), + '_break': '' + }, { + 'frompyobj': '\tCHECKSTRING(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {', + 'cleanupfrompyobj': '\t} /*CHECKSTRING(#check#)*/', + 'need': 'CHECKSTRING', + '_check': isstring, + '_break': '' + }, { + 'need': 'CHECKARRAY', + 'frompyobj': '\tCHECKARRAY(#check#,\"#check#\",\"#nth# #varname#\") {', + 'cleanupfrompyobj': '\t} /*CHECKARRAY(#check#)*/', + '_check': isarray, + '_break': '' + }, { + 'need': 'CHECKGENERIC', + 'frompyobj': '\tCHECKGENERIC(#check#,\"#check#\",\"#nth# #varname#\") {', + 'cleanupfrompyobj': '\t} /*CHECKGENERIC(#check#)*/', + } +] + +########## Applying the rules. No need to modify what follows ############# + +#################### Build C/API module ####################### + + +def buildmodule(m, um): + """ + Return + """ + global f2py_version, options + outmess('\tBuilding module "%s"...\n' % (m['name'])) + ret = {} + mod_rules = defmod_rules[:] + vrd = capi_maps.modsign2map(m) + rd = dictappend({'f2py_version': f2py_version}, vrd) + funcwrappers = [] + funcwrappers2 = [] # F90 codes + for n in m['interfaced']: + nb = None + for bi in m['body']: + if not bi['block'] == 'interface': + errmess('buildmodule: Expected interface block. Skipping.\n') + continue + for b in bi['body']: + if b['name'] == n: + nb = b + break + + if not nb: + errmess( + 'buildmodule: Could not found the body of interfaced routine "%s". Skipping.\n' % (n)) + continue + nb_list = [nb] + if 'entry' in nb: + for k, a in nb['entry'].items(): + nb1 = copy.deepcopy(nb) + del nb1['entry'] + nb1['name'] = k + nb1['args'] = a + nb_list.append(nb1) + for nb in nb_list: + api, wrap = buildapi(nb) + if wrap: + if ismoduleroutine(nb): + funcwrappers2.append(wrap) + else: + funcwrappers.append(wrap) + ar = applyrules(api, vrd) + rd = dictappend(rd, ar) + + # Construct COMMON block support + cr, wrap = common_rules.buildhooks(m) + if wrap: + funcwrappers.append(wrap) + ar = applyrules(cr, vrd) + rd = dictappend(rd, ar) + + # Construct F90 module support + mr, wrap = f90mod_rules.buildhooks(m) + if wrap: + funcwrappers2.append(wrap) + ar = applyrules(mr, vrd) + rd = dictappend(rd, ar) + + for u in um: + ar = use_rules.buildusevars(u, m['use'][u['name']]) + rd = dictappend(rd, ar) + + needs = cfuncs.get_needs() + code = {} + for n in needs.keys(): + code[n] = [] + for k in needs[n]: + c = '' + if k in cfuncs.includes0: + c = cfuncs.includes0[k] + elif k in cfuncs.includes: + c = cfuncs.includes[k] + elif k in cfuncs.userincludes: + c = cfuncs.userincludes[k] + elif k in cfuncs.typedefs: + c = cfuncs.typedefs[k] + elif k in cfuncs.typedefs_generated: + c = cfuncs.typedefs_generated[k] + elif k in cfuncs.cppmacros: + c = cfuncs.cppmacros[k] + elif k in cfuncs.cfuncs: + c = cfuncs.cfuncs[k] + elif k in cfuncs.callbacks: + c = cfuncs.callbacks[k] + elif k in cfuncs.f90modhooks: + c = cfuncs.f90modhooks[k] + elif k in cfuncs.commonhooks: + c = cfuncs.commonhooks[k] + else: + errmess('buildmodule: unknown need %s.\n' % (repr(k))) + continue + code[n].append(c) + mod_rules.append(code) + for r in mod_rules: + if ('_check' in r and r['_check'](m)) or ('_check' not in r): + ar = applyrules(r, vrd, m) + rd = dictappend(rd, ar) + ar = applyrules(module_rules, rd) + + fn = os.path.join(options['buildpath'], vrd['coutput']) + ret['csrc'] = fn + f = open(fn, 'w') + f.write(ar['modulebody'].replace('\t', 2 * ' ')) + f.close() + outmess('\tWrote C/API module "%s" to file "%s"\n' % (m['name'], fn)) + + if options['dorestdoc']: + fn = os.path.join( + options['buildpath'], vrd['modulename'] + 'module.rest') + f = open(fn, 'w') + f.write('.. -*- rest -*-\n') + f.write('\n'.join(ar['restdoc'])) + f.close() + outmess('\tReST Documentation is saved to file "%s/%smodule.rest"\n' % + (options['buildpath'], vrd['modulename'])) + if options['dolatexdoc']: + fn = os.path.join( + options['buildpath'], vrd['modulename'] + 'module.tex') + ret['ltx'] = fn + f = open(fn, 'w') + f.write( + '%% This file is auto-generated with f2py (version:%s)\n' % (f2py_version)) + if 'shortlatex' not in options: + f.write( + '\\documentclass{article}\n\\usepackage{a4wide}\n\\begin{document}\n\\tableofcontents\n\n') + f.write('\n'.join(ar['latexdoc'])) + if 'shortlatex' not in options: + f.write('\\end{document}') + f.close() + outmess('\tDocumentation is saved to file "%s/%smodule.tex"\n' % + (options['buildpath'], vrd['modulename'])) + if funcwrappers: + wn = os.path.join(options['buildpath'], vrd['f2py_wrapper_output']) + ret['fsrc'] = wn + f = open(wn, 'w') + f.write('C -*- fortran -*-\n') + f.write( + 'C This file is autogenerated with f2py (version:%s)\n' % (f2py_version)) + f.write( + 'C It contains Fortran 77 wrappers to fortran functions.\n') + lines = [] + for l in ('\n\n'.join(funcwrappers) + '\n').split('\n'): + if l and l[0] == ' ': + while len(l) >= 66: + lines.append(l[:66] + '\n &') + l = l[66:] + lines.append(l + '\n') + else: + lines.append(l + '\n') + lines = ''.join(lines).replace('\n &\n', '\n') + f.write(lines) + f.close() + outmess('\tFortran 77 wrappers are saved to "%s"\n' % (wn)) + if funcwrappers2: + wn = os.path.join( + options['buildpath'], '%s-f2pywrappers2.f90' % (vrd['modulename'])) + ret['fsrc'] = wn + f = open(wn, 'w') + f.write('! -*- f90 -*-\n') + f.write( + '! This file is autogenerated with f2py (version:%s)\n' % (f2py_version)) + f.write( + '! It contains Fortran 90 wrappers to fortran functions.\n') + lines = [] + for l in ('\n\n'.join(funcwrappers2) + '\n').split('\n'): + if len(l) > 72 and l[0] == ' ': + lines.append(l[:72] + '&\n &') + l = l[72:] + while len(l) > 66: + lines.append(l[:66] + '&\n &') + l = l[66:] + lines.append(l + '\n') + else: + lines.append(l + '\n') + lines = ''.join(lines).replace('\n &\n', '\n') + f.write(lines) + f.close() + outmess('\tFortran 90 wrappers are saved to "%s"\n' % (wn)) + return ret + +################## Build C/API function ############# + +stnd = {1: 'st', 2: 'nd', 3: 'rd', 4: 'th', 5: 'th', + 6: 'th', 7: 'th', 8: 'th', 9: 'th', 0: 'th'} + + +def buildapi(rout): + rout, wrap = func2subr.assubr(rout) + args, depargs = getargs2(rout) + capi_maps.depargs = depargs + var = rout['vars'] + + if ismoduleroutine(rout): + outmess('\t\t\tConstructing wrapper function "%s.%s"...\n' % + (rout['modulename'], rout['name'])) + else: + outmess('\t\tConstructing wrapper function "%s"...\n' % (rout['name'])) + # Routine + vrd = capi_maps.routsign2map(rout) + rd = dictappend({}, vrd) + for r in rout_rules: + if ('_check' in r and r['_check'](rout)) or ('_check' not in r): + ar = applyrules(r, vrd, rout) + rd = dictappend(rd, ar) + + # Args + nth, nthk = 0, 0 + savevrd = {} + for a in args: + vrd = capi_maps.sign2map(a, var[a]) + if isintent_aux(var[a]): + _rules = aux_rules + else: + _rules = arg_rules + if not isintent_hide(var[a]): + if not isoptional(var[a]): + nth = nth + 1 + vrd['nth'] = repr(nth) + stnd[nth % 10] + ' argument' + else: + nthk = nthk + 1 + vrd['nth'] = repr(nthk) + stnd[nthk % 10] + ' keyword' + else: + vrd['nth'] = 'hidden' + savevrd[a] = vrd + for r in _rules: + if '_depend' in r: + continue + if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): + ar = applyrules(r, vrd, var[a]) + rd = dictappend(rd, ar) + if '_break' in r: + break + for a in depargs: + if isintent_aux(var[a]): + _rules = aux_rules + else: + _rules = arg_rules + vrd = savevrd[a] + for r in _rules: + if '_depend' not in r: + continue + if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): + ar = applyrules(r, vrd, var[a]) + rd = dictappend(rd, ar) + if '_break' in r: + break + if 'check' in var[a]: + for c in var[a]['check']: + vrd['check'] = c + ar = applyrules(check_rules, vrd, var[a]) + rd = dictappend(rd, ar) + if isinstance(rd['cleanupfrompyobj'], list): + rd['cleanupfrompyobj'].reverse() + if isinstance(rd['closepyobjfrom'], list): + rd['closepyobjfrom'].reverse() + rd['docsignature'] = stripcomma(replace('#docsign##docsignopt##docsignxa#', + {'docsign': rd['docsign'], + 'docsignopt': rd['docsignopt'], + 'docsignxa': rd['docsignxa']})) + optargs = stripcomma(replace('#docsignopt##docsignxa#', + {'docsignxa': rd['docsignxashort'], + 'docsignopt': rd['docsignoptshort']} + )) + if optargs == '': + rd['docsignatureshort'] = stripcomma( + replace('#docsign#', {'docsign': rd['docsign']})) + else: + rd['docsignatureshort'] = replace('#docsign#[#docsignopt#]', + {'docsign': rd['docsign'], + 'docsignopt': optargs, + }) + rd['latexdocsignatureshort'] = rd['docsignatureshort'].replace('_', '\\_') + rd['latexdocsignatureshort'] = rd[ + 'latexdocsignatureshort'].replace(',', ', ') + cfs = stripcomma(replace('#callfortran##callfortranappend#', { + 'callfortran': rd['callfortran'], 'callfortranappend': rd['callfortranappend']})) + if len(rd['callfortranappend']) > 1: + rd['callcompaqfortran'] = stripcomma(replace('#callfortran# 0,#callfortranappend#', { + 'callfortran': rd['callfortran'], 'callfortranappend': rd['callfortranappend']})) + else: + rd['callcompaqfortran'] = cfs + rd['callfortran'] = cfs + if isinstance(rd['docreturn'], list): + rd['docreturn'] = stripcomma( + replace('#docreturn#', {'docreturn': rd['docreturn']})) + ' = ' + rd['docstrsigns'] = [] + rd['latexdocstrsigns'] = [] + for k in ['docstrreq', 'docstropt', 'docstrout', 'docstrcbs']: + if k in rd and isinstance(rd[k], list): + rd['docstrsigns'] = rd['docstrsigns'] + rd[k] + k = 'latex' + k + if k in rd and isinstance(rd[k], list): + rd['latexdocstrsigns'] = rd['latexdocstrsigns'] + rd[k][0:1] +\ + ['\\begin{description}'] + rd[k][1:] +\ + ['\\end{description}'] + + # Workaround for Python 2.6, 2.6.1 bug: https://bugs.python.org/issue4720 + if rd['keyformat'] or rd['xaformat']: + argformat = rd['argformat'] + if isinstance(argformat, list): + argformat.append('|') + else: + assert isinstance(argformat, str), repr( + (argformat, type(argformat))) + rd['argformat'] += '|' + + ar = applyrules(routine_rules, rd) + if ismoduleroutine(rout): + outmess('\t\t\t %s\n' % (ar['docshort'])) + else: + outmess('\t\t %s\n' % (ar['docshort'])) + return ar, wrap + + +#################### EOF rules.py ####################### diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/rules.pyc b/project/venv/lib/python2.7/site-packages/numpy/f2py/rules.pyc new file mode 100644 index 0000000..6cf49e7 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/f2py/rules.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/setup.py b/project/venv/lib/python2.7/site-packages/numpy/f2py/setup.py new file mode 100644 index 0000000..c0c50ce --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/setup.py @@ -0,0 +1,73 @@ +#!/usr/bin/env python +""" +setup.py for installing F2PY + +Usage: + python setup.py install + +Copyright 2001-2005 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +$Revision: 1.32 $ +$Date: 2005/01/30 17:22:14 $ +Pearu Peterson + +""" +from __future__ import division, print_function + +from numpy.distutils.core import setup +from numpy.distutils.misc_util import Configuration + + +from __version__ import version + + +def configuration(parent_package='', top_path=None): + config = Configuration('f2py', parent_package, top_path) + config.add_data_dir('tests') + config.add_data_files( + 'src/fortranobject.c', + 'src/fortranobject.h') + return config + + +if __name__ == "__main__": + + config = configuration(top_path='') + config = config.todict() + + config['download_url'] = "http://cens.ioc.ee/projects/f2py2e/2.x"\ + "/F2PY-2-latest.tar.gz" + config['classifiers'] = [ + 'Development Status :: 5 - Production/Stable', + 'Intended Audience :: Developers', + 'Intended Audience :: Science/Research', + 'License :: OSI Approved :: NumPy License', + 'Natural Language :: English', + 'Operating System :: OS Independent', + 'Programming Language :: C', + 'Programming Language :: Fortran', + 'Programming Language :: Python', + 'Topic :: Scientific/Engineering', + 'Topic :: Software Development :: Code Generators', + ] + setup(version=version, + description="F2PY - Fortran to Python Interface Generator", + author="Pearu Peterson", + author_email="pearu@cens.ioc.ee", + maintainer="Pearu Peterson", + maintainer_email="pearu@cens.ioc.ee", + license="BSD", + platforms="Unix, Windows (mingw|cygwin), Mac OSX", + long_description="""\ +The Fortran to Python Interface Generator, or F2PY for short, is a +command line tool (f2py) for generating Python C/API modules for +wrapping Fortran 77/90/95 subroutines, accessing common blocks from +Python, and calling Python functions from Fortran (call-backs). +Interfacing subroutines/data from Fortran 90/95 modules is supported.""", + url="http://cens.ioc.ee/projects/f2py2e/", + keywords=['Fortran', 'f2py'], + **config) diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/setup.pyc b/project/venv/lib/python2.7/site-packages/numpy/f2py/setup.pyc new file mode 100644 index 0000000..6c4ccad Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/f2py/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/src/fortranobject.c b/project/venv/lib/python2.7/site-packages/numpy/f2py/src/fortranobject.c new file mode 100644 index 0000000..78b06f0 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/src/fortranobject.c @@ -0,0 +1,1089 @@ +#define FORTRANOBJECT_C +#include "fortranobject.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +/* + This file implements: FortranObject, array_from_pyobj, copy_ND_array + + Author: Pearu Peterson + $Revision: 1.52 $ + $Date: 2005/07/11 07:44:20 $ +*/ + +int +F2PyDict_SetItemString(PyObject *dict, char *name, PyObject *obj) +{ + if (obj==NULL) { + fprintf(stderr, "Error loading %s\n", name); + if (PyErr_Occurred()) { + PyErr_Print(); + PyErr_Clear(); + } + return -1; + } + return PyDict_SetItemString(dict, name, obj); +} + +/************************* FortranObject *******************************/ + +typedef PyObject *(*fortranfunc)(PyObject *,PyObject *,PyObject *,void *); + +PyObject * +PyFortranObject_New(FortranDataDef* defs, f2py_void_func init) { + int i; + PyFortranObject *fp = NULL; + PyObject *v = NULL; + if (init!=NULL) /* Initialize F90 module objects */ + (*(init))(); + if ((fp = PyObject_New(PyFortranObject, &PyFortran_Type))==NULL) return NULL; + if ((fp->dict = PyDict_New())==NULL) return NULL; + fp->len = 0; + while (defs[fp->len].name != NULL) fp->len++; + if (fp->len == 0) goto fail; + fp->defs = defs; + for (i=0;ilen;i++) + if (fp->defs[i].rank == -1) { /* Is Fortran routine */ + v = PyFortranObject_NewAsAttr(&(fp->defs[i])); + if (v==NULL) return NULL; + PyDict_SetItemString(fp->dict,fp->defs[i].name,v); + } else + if ((fp->defs[i].data)!=NULL) { /* Is Fortran variable or array (not allocatable) */ + if (fp->defs[i].type == NPY_STRING) { + int n = fp->defs[i].rank-1; + v = PyArray_New(&PyArray_Type, n, fp->defs[i].dims.d, + NPY_STRING, NULL, fp->defs[i].data, fp->defs[i].dims.d[n], + NPY_ARRAY_FARRAY, NULL); + } + else { + v = PyArray_New(&PyArray_Type, fp->defs[i].rank, fp->defs[i].dims.d, + fp->defs[i].type, NULL, fp->defs[i].data, 0, NPY_ARRAY_FARRAY, + NULL); + } + if (v==NULL) return NULL; + PyDict_SetItemString(fp->dict,fp->defs[i].name,v); + } + Py_XDECREF(v); + return (PyObject *)fp; + fail: + Py_XDECREF(v); + return NULL; +} + +PyObject * +PyFortranObject_NewAsAttr(FortranDataDef* defs) { /* used for calling F90 module routines */ + PyFortranObject *fp = NULL; + fp = PyObject_New(PyFortranObject, &PyFortran_Type); + if (fp == NULL) return NULL; + if ((fp->dict = PyDict_New())==NULL) return NULL; + fp->len = 1; + fp->defs = defs; + return (PyObject *)fp; +} + +/* Fortran methods */ + +static void +fortran_dealloc(PyFortranObject *fp) { + Py_XDECREF(fp->dict); + PyMem_Del(fp); +} + + +#if PY_VERSION_HEX >= 0x03000000 +#else +static PyMethodDef fortran_methods[] = { + {NULL, NULL} /* sentinel */ +}; +#endif + + +/* Returns number of bytes consumed from buf, or -1 on error. */ +static Py_ssize_t +format_def(char *buf, Py_ssize_t size, FortranDataDef def) +{ + char *p = buf; + int i, n; + + n = PyOS_snprintf(p, size, "array(%" NPY_INTP_FMT, def.dims.d[0]); + if (n < 0 || n >= size) { + return -1; + } + p += n; + size -= n; + + for (i = 1; i < def.rank; i++) { + n = PyOS_snprintf(p, size, ",%" NPY_INTP_FMT, def.dims.d[i]); + if (n < 0 || n >= size) { + return -1; + } + p += n; + size -= n; + } + + if (size <= 0) { + return -1; + } + + *p++ = ')'; + size--; + + if (def.data == NULL) { + static const char notalloc[] = ", not allocated"; + if (size < sizeof(notalloc)) { + return -1; + } + memcpy(p, notalloc, sizeof(notalloc)); + } + + return p - buf; +} + +static PyObject * +fortran_doc(FortranDataDef def) +{ + char *buf, *p; + PyObject *s = NULL; + Py_ssize_t n, origsize, size = 100; + + if (def.doc != NULL) { + size += strlen(def.doc); + } + origsize = size; + buf = p = (char *)PyMem_Malloc(size); + if (buf == NULL) { + return PyErr_NoMemory(); + } + + if (def.rank == -1) { + if (def.doc) { + n = strlen(def.doc); + if (n > size) { + goto fail; + } + memcpy(p, def.doc, n); + p += n; + size -= n; + } + else { + n = PyOS_snprintf(p, size, "%s - no docs available", def.name); + if (n < 0 || n >= size) { + goto fail; + } + p += n; + size -= n; + } + } + else { + PyArray_Descr *d = PyArray_DescrFromType(def.type); + n = PyOS_snprintf(p, size, "'%c'-", d->type); + Py_DECREF(d); + if (n < 0 || n >= size) { + goto fail; + } + p += n; + size -= n; + + if (def.data == NULL) { + n = format_def(p, size, def) == -1; + if (n < 0) { + goto fail; + } + p += n; + size -= n; + } + else if (def.rank > 0) { + n = format_def(p, size, def); + if (n < 0) { + goto fail; + } + p += n; + size -= n; + } + else { + n = strlen("scalar"); + if (size < n) { + goto fail; + } + memcpy(p, "scalar", n); + p += n; + size -= n; + } + } + if (size <= 1) { + goto fail; + } + *p++ = '\n'; + size--; + + /* p now points one beyond the last character of the string in buf */ +#if PY_VERSION_HEX >= 0x03000000 + s = PyUnicode_FromStringAndSize(buf, p - buf); +#else + s = PyString_FromStringAndSize(buf, p - buf); +#endif + + PyMem_Free(buf); + return s; + + fail: + fprintf(stderr, "fortranobject.c: fortran_doc: len(p)=%zd>%zd=size:" + " too long docstring required, increase size\n", + p - buf, origsize); + PyMem_Free(buf); + return NULL; +} + +static FortranDataDef *save_def; /* save pointer of an allocatable array */ +static void set_data(char *d,npy_intp *f) { /* callback from Fortran */ + if (*f) /* In fortran f=allocated(d) */ + save_def->data = d; + else + save_def->data = NULL; + /* printf("set_data: d=%p,f=%d\n",d,*f); */ +} + +static PyObject * +fortran_getattr(PyFortranObject *fp, char *name) { + int i,j,k,flag; + if (fp->dict != NULL) { + PyObject *v = PyDict_GetItemString(fp->dict, name); + if (v != NULL) { + Py_INCREF(v); + return v; + } + } + for (i=0,j=1;ilen && (j=strcmp(name,fp->defs[i].name));i++); + if (j==0) + if (fp->defs[i].rank!=-1) { /* F90 allocatable array */ + if (fp->defs[i].func==NULL) return NULL; + for(k=0;kdefs[i].rank;++k) + fp->defs[i].dims.d[k]=-1; + save_def = &fp->defs[i]; + (*(fp->defs[i].func))(&fp->defs[i].rank,fp->defs[i].dims.d,set_data,&flag); + if (flag==2) + k = fp->defs[i].rank + 1; + else + k = fp->defs[i].rank; + if (fp->defs[i].data !=NULL) { /* array is allocated */ + PyObject *v = PyArray_New(&PyArray_Type, k, fp->defs[i].dims.d, + fp->defs[i].type, NULL, fp->defs[i].data, 0, NPY_ARRAY_FARRAY, + NULL); + if (v==NULL) return NULL; + /* Py_INCREF(v); */ + return v; + } else { /* array is not allocated */ + Py_RETURN_NONE; + } + } + if (strcmp(name,"__dict__")==0) { + Py_INCREF(fp->dict); + return fp->dict; + } + if (strcmp(name,"__doc__")==0) { +#if PY_VERSION_HEX >= 0x03000000 + PyObject *s = PyUnicode_FromString(""), *s2, *s3; + for (i=0;ilen;i++) { + s2 = fortran_doc(fp->defs[i]); + s3 = PyUnicode_Concat(s, s2); + Py_DECREF(s2); + Py_DECREF(s); + s = s3; + } +#else + PyObject *s = PyString_FromString(""); + for (i=0;ilen;i++) + PyString_ConcatAndDel(&s,fortran_doc(fp->defs[i])); +#endif + if (PyDict_SetItemString(fp->dict, name, s)) + return NULL; + return s; + } + if ((strcmp(name,"_cpointer")==0) && (fp->len==1)) { + PyObject *cobj = F2PyCapsule_FromVoidPtr((void *)(fp->defs[0].data),NULL); + if (PyDict_SetItemString(fp->dict, name, cobj)) + return NULL; + return cobj; + } +#if PY_VERSION_HEX >= 0x03000000 + if (1) { + PyObject *str, *ret; + str = PyUnicode_FromString(name); + ret = PyObject_GenericGetAttr((PyObject *)fp, str); + Py_DECREF(str); + return ret; + } +#else + return Py_FindMethod(fortran_methods, (PyObject *)fp, name); +#endif +} + +static int +fortran_setattr(PyFortranObject *fp, char *name, PyObject *v) { + int i,j,flag; + PyArrayObject *arr = NULL; + for (i=0,j=1;ilen && (j=strcmp(name,fp->defs[i].name));i++); + if (j==0) { + if (fp->defs[i].rank==-1) { + PyErr_SetString(PyExc_AttributeError,"over-writing fortran routine"); + return -1; + } + if (fp->defs[i].func!=NULL) { /* is allocatable array */ + npy_intp dims[F2PY_MAX_DIMS]; + int k; + save_def = &fp->defs[i]; + if (v!=Py_None) { /* set new value (reallocate if needed -- + see f2py generated code for more + details ) */ + for(k=0;kdefs[i].rank;k++) dims[k]=-1; + if ((arr = array_from_pyobj(fp->defs[i].type,dims,fp->defs[i].rank,F2PY_INTENT_IN,v))==NULL) + return -1; + (*(fp->defs[i].func))(&fp->defs[i].rank,PyArray_DIMS(arr),set_data,&flag); + } else { /* deallocate */ + for(k=0;kdefs[i].rank;k++) dims[k]=0; + (*(fp->defs[i].func))(&fp->defs[i].rank,dims,set_data,&flag); + for(k=0;kdefs[i].rank;k++) dims[k]=-1; + } + memcpy(fp->defs[i].dims.d,dims,fp->defs[i].rank*sizeof(npy_intp)); + } else { /* not allocatable array */ + if ((arr = array_from_pyobj(fp->defs[i].type,fp->defs[i].dims.d,fp->defs[i].rank,F2PY_INTENT_IN,v))==NULL) + return -1; + } + if (fp->defs[i].data!=NULL) { /* copy Python object to Fortran array */ + npy_intp s = PyArray_MultiplyList(fp->defs[i].dims.d,PyArray_NDIM(arr)); + if (s==-1) + s = PyArray_MultiplyList(PyArray_DIMS(arr),PyArray_NDIM(arr)); + if (s<0 || + (memcpy(fp->defs[i].data,PyArray_DATA(arr),s*PyArray_ITEMSIZE(arr)))==NULL) { + if ((PyObject*)arr!=v) { + Py_DECREF(arr); + } + return -1; + } + if ((PyObject*)arr!=v) { + Py_DECREF(arr); + } + } else return (fp->defs[i].func==NULL?-1:0); + return 0; /* successful */ + } + if (fp->dict == NULL) { + fp->dict = PyDict_New(); + if (fp->dict == NULL) + return -1; + } + if (v == NULL) { + int rv = PyDict_DelItemString(fp->dict, name); + if (rv < 0) + PyErr_SetString(PyExc_AttributeError,"delete non-existing fortran attribute"); + return rv; + } + else + return PyDict_SetItemString(fp->dict, name, v); +} + +static PyObject* +fortran_call(PyFortranObject *fp, PyObject *arg, PyObject *kw) { + int i = 0; + /* printf("fortran call + name=%s,func=%p,data=%p,%p\n",fp->defs[i].name, + fp->defs[i].func,fp->defs[i].data,&fp->defs[i].data); */ + if (fp->defs[i].rank==-1) {/* is Fortran routine */ + if (fp->defs[i].func==NULL) { + PyErr_Format(PyExc_RuntimeError, "no function to call"); + return NULL; + } + else if (fp->defs[i].data==NULL) + /* dummy routine */ + return (*((fortranfunc)(fp->defs[i].func)))((PyObject *)fp,arg,kw,NULL); + else + return (*((fortranfunc)(fp->defs[i].func)))((PyObject *)fp,arg,kw, + (void *)fp->defs[i].data); + } + PyErr_Format(PyExc_TypeError, "this fortran object is not callable"); + return NULL; +} + +static PyObject * +fortran_repr(PyFortranObject *fp) +{ + PyObject *name = NULL, *repr = NULL; + name = PyObject_GetAttrString((PyObject *)fp, "__name__"); + PyErr_Clear(); +#if PY_VERSION_HEX >= 0x03000000 + if (name != NULL && PyUnicode_Check(name)) { + repr = PyUnicode_FromFormat("", name); + } + else { + repr = PyUnicode_FromString(""); + } +#else + if (name != NULL && PyString_Check(name)) { + repr = PyString_FromFormat("", PyString_AsString(name)); + } + else { + repr = PyString_FromString(""); + } +#endif + Py_XDECREF(name); + return repr; +} + + +PyTypeObject PyFortran_Type = { +#if PY_VERSION_HEX >= 0x03000000 + PyVarObject_HEAD_INIT(NULL, 0) +#else + PyObject_HEAD_INIT(0) + 0, /*ob_size*/ +#endif + "fortran", /*tp_name*/ + sizeof(PyFortranObject), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + /* methods */ + (destructor)fortran_dealloc, /*tp_dealloc*/ + 0, /*tp_print*/ + (getattrfunc)fortran_getattr, /*tp_getattr*/ + (setattrfunc)fortran_setattr, /*tp_setattr*/ + 0, /*tp_compare/tp_reserved*/ + (reprfunc)fortran_repr, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + (ternaryfunc)fortran_call, /*tp_call*/ +}; + +/************************* f2py_report_atexit *******************************/ + +#ifdef F2PY_REPORT_ATEXIT +static int passed_time = 0; +static int passed_counter = 0; +static int passed_call_time = 0; +static struct timeb start_time; +static struct timeb stop_time; +static struct timeb start_call_time; +static struct timeb stop_call_time; +static int cb_passed_time = 0; +static int cb_passed_counter = 0; +static int cb_passed_call_time = 0; +static struct timeb cb_start_time; +static struct timeb cb_stop_time; +static struct timeb cb_start_call_time; +static struct timeb cb_stop_call_time; + +extern void f2py_start_clock(void) { ftime(&start_time); } +extern +void f2py_start_call_clock(void) { + f2py_stop_clock(); + ftime(&start_call_time); +} +extern +void f2py_stop_clock(void) { + ftime(&stop_time); + passed_time += 1000*(stop_time.time - start_time.time); + passed_time += stop_time.millitm - start_time.millitm; +} +extern +void f2py_stop_call_clock(void) { + ftime(&stop_call_time); + passed_call_time += 1000*(stop_call_time.time - start_call_time.time); + passed_call_time += stop_call_time.millitm - start_call_time.millitm; + passed_counter += 1; + f2py_start_clock(); +} + +extern void f2py_cb_start_clock(void) { ftime(&cb_start_time); } +extern +void f2py_cb_start_call_clock(void) { + f2py_cb_stop_clock(); + ftime(&cb_start_call_time); +} +extern +void f2py_cb_stop_clock(void) { + ftime(&cb_stop_time); + cb_passed_time += 1000*(cb_stop_time.time - cb_start_time.time); + cb_passed_time += cb_stop_time.millitm - cb_start_time.millitm; +} +extern +void f2py_cb_stop_call_clock(void) { + ftime(&cb_stop_call_time); + cb_passed_call_time += 1000*(cb_stop_call_time.time - cb_start_call_time.time); + cb_passed_call_time += cb_stop_call_time.millitm - cb_start_call_time.millitm; + cb_passed_counter += 1; + f2py_cb_start_clock(); +} + +static int f2py_report_on_exit_been_here = 0; +extern +void f2py_report_on_exit(int exit_flag,void *name) { + if (f2py_report_on_exit_been_here) { + fprintf(stderr," %s\n",(char*)name); + return; + } + f2py_report_on_exit_been_here = 1; + fprintf(stderr," /-----------------------\\\n"); + fprintf(stderr," < F2PY performance report >\n"); + fprintf(stderr," \\-----------------------/\n"); + fprintf(stderr,"Overall time spent in ...\n"); + fprintf(stderr,"(a) wrapped (Fortran/C) functions : %8d msec\n", + passed_call_time); + fprintf(stderr,"(b) f2py interface, %6d calls : %8d msec\n", + passed_counter,passed_time); + fprintf(stderr,"(c) call-back (Python) functions : %8d msec\n", + cb_passed_call_time); + fprintf(stderr,"(d) f2py call-back interface, %6d calls : %8d msec\n", + cb_passed_counter,cb_passed_time); + + fprintf(stderr,"(e) wrapped (Fortran/C) functions (actual) : %8d msec\n\n", + passed_call_time-cb_passed_call_time-cb_passed_time); + fprintf(stderr,"Use -DF2PY_REPORT_ATEXIT_DISABLE to disable this message.\n"); + fprintf(stderr,"Exit status: %d\n",exit_flag); + fprintf(stderr,"Modules : %s\n",(char*)name); +} +#endif + +/********************** report on array copy ****************************/ + +#ifdef F2PY_REPORT_ON_ARRAY_COPY +static void f2py_report_on_array_copy(PyArrayObject* arr) { + const npy_intp arr_size = PyArray_Size((PyObject *)arr); + if (arr_size>F2PY_REPORT_ON_ARRAY_COPY) { + fprintf(stderr,"copied an array: size=%ld, elsize=%"NPY_INTP_FMT"\n", + arr_size, (npy_intp)PyArray_ITEMSIZE(arr)); + } +} +static void f2py_report_on_array_copy_fromany(void) { + fprintf(stderr,"created an array from object\n"); +} + +#define F2PY_REPORT_ON_ARRAY_COPY_FROMARR f2py_report_on_array_copy((PyArrayObject *)arr) +#define F2PY_REPORT_ON_ARRAY_COPY_FROMANY f2py_report_on_array_copy_fromany() +#else +#define F2PY_REPORT_ON_ARRAY_COPY_FROMARR +#define F2PY_REPORT_ON_ARRAY_COPY_FROMANY +#endif + + +/************************* array_from_obj *******************************/ + +/* + * File: array_from_pyobj.c + * + * Description: + * ------------ + * Provides array_from_pyobj function that returns a contiguous array + * object with the given dimensions and required storage order, either + * in row-major (C) or column-major (Fortran) order. The function + * array_from_pyobj is very flexible about its Python object argument + * that can be any number, list, tuple, or array. + * + * array_from_pyobj is used in f2py generated Python extension + * modules. + * + * Author: Pearu Peterson + * Created: 13-16 January 2002 + * $Id: fortranobject.c,v 1.52 2005/07/11 07:44:20 pearu Exp $ + */ + +static int check_and_fix_dimensions(const PyArrayObject* arr, + const int rank, + npy_intp *dims); + +static int +count_negative_dimensions(const int rank, + const npy_intp *dims) { + int i=0,r=0; + while (iflags,size); + printf("\tstrides = "); + dump_dims(rank,arr->strides); + printf("\tdimensions = "); + dump_dims(rank,arr->dimensions); +} +#endif + +#define SWAPTYPE(a,b,t) {t c; c = (a); (a) = (b); (b) = c; } + +static int swap_arrays(PyArrayObject* obj1, PyArrayObject* obj2) { + PyArrayObject_fields *arr1 = (PyArrayObject_fields*) obj1, + *arr2 = (PyArrayObject_fields*) obj2; + SWAPTYPE(arr1->data,arr2->data,char*); + SWAPTYPE(arr1->nd,arr2->nd,int); + SWAPTYPE(arr1->dimensions,arr2->dimensions,npy_intp*); + SWAPTYPE(arr1->strides,arr2->strides,npy_intp*); + SWAPTYPE(arr1->base,arr2->base,PyObject*); + SWAPTYPE(arr1->descr,arr2->descr,PyArray_Descr*); + SWAPTYPE(arr1->flags,arr2->flags,int); + /* SWAPTYPE(arr1->weakreflist,arr2->weakreflist,PyObject*); */ + return 0; +} + +#define ARRAY_ISCOMPATIBLE(arr,type_num) \ + ( (PyArray_ISINTEGER(arr) && PyTypeNum_ISINTEGER(type_num)) \ + ||(PyArray_ISFLOAT(arr) && PyTypeNum_ISFLOAT(type_num)) \ + ||(PyArray_ISCOMPLEX(arr) && PyTypeNum_ISCOMPLEX(type_num)) \ + ||(PyArray_ISBOOL(arr) && PyTypeNum_ISBOOL(type_num)) \ + ) + +extern +PyArrayObject* array_from_pyobj(const int type_num, + npy_intp *dims, + const int rank, + const int intent, + PyObject *obj) { + /* + * Note about reference counting + * ----------------------------- + * If the caller returns the array to Python, it must be done with + * Py_BuildValue("N",arr). + * Otherwise, if obj!=arr then the caller must call Py_DECREF(arr). + * + * Note on intent(cache,out,..) + * --------------------- + * Don't expect correct data when returning intent(cache) array. + * + */ + char mess[200]; + PyArrayObject *arr = NULL; + PyArray_Descr *descr; + char typechar; + int elsize; + + if ((intent & F2PY_INTENT_HIDE) + || ((intent & F2PY_INTENT_CACHE) && (obj==Py_None)) + || ((intent & F2PY_OPTIONAL) && (obj==Py_None)) + ) { + /* intent(cache), optional, intent(hide) */ + if (count_negative_dimensions(rank,dims) > 0) { + int i; + strcpy(mess, "failed to create intent(cache|hide)|optional array" + "-- must have defined dimensions but got ("); + for(i=0;ielsize = 1; + descr->type = NPY_CHARLTR; + } + elsize = descr->elsize; + typechar = descr->type; + Py_DECREF(descr); + if (PyArray_Check(obj)) { + arr = (PyArrayObject *)obj; + + if (intent & F2PY_INTENT_CACHE) { + /* intent(cache) */ + if (PyArray_ISONESEGMENT(arr) + && PyArray_ITEMSIZE(arr)>=elsize) { + if (check_and_fix_dimensions(arr, rank, dims)) { + return NULL; + } + if (intent & F2PY_INTENT_OUT) + Py_INCREF(arr); + return arr; + } + strcpy(mess, "failed to initialize intent(cache) array"); + if (!PyArray_ISONESEGMENT(arr)) + strcat(mess, " -- input must be in one segment"); + if (PyArray_ITEMSIZE(arr)type,typechar); + if (!(F2PY_CHECK_ALIGNMENT(arr, intent))) + sprintf(mess+strlen(mess)," -- input not %d-aligned", F2PY_GET_ALIGNMENT(intent)); + PyErr_SetString(PyExc_ValueError,mess); + return NULL; + } + + /* here we have always intent(in) or intent(inplace) */ + + { + PyArrayObject * retarr; + retarr = (PyArrayObject *) \ + PyArray_New(&PyArray_Type, PyArray_NDIM(arr), PyArray_DIMS(arr), type_num, + NULL,NULL,1, + !(intent&F2PY_INTENT_C), + NULL); + if (retarr==NULL) + return NULL; + F2PY_REPORT_ON_ARRAY_COPY_FROMARR; + if (PyArray_CopyInto(retarr, arr)) { + Py_DECREF(retarr); + return NULL; + } + if (intent & F2PY_INTENT_INPLACE) { + if (swap_arrays(arr,retarr)) + return NULL; /* XXX: set exception */ + Py_XDECREF(retarr); + if (intent & F2PY_INTENT_OUT) + Py_INCREF(arr); + } else { + arr = retarr; + } + } + return arr; + } + + if ((intent & F2PY_INTENT_INOUT) || + (intent & F2PY_INTENT_INPLACE) || + (intent & F2PY_INTENT_CACHE)) { + PyErr_SetString(PyExc_TypeError, + "failed to initialize intent(inout|inplace|cache) " + "array, input not an array"); + return NULL; + } + + { + PyArray_Descr * descr = PyArray_DescrFromType(type_num); + /* compatibility with NPY_CHAR */ + if (type_num == NPY_STRING) { + PyArray_DESCR_REPLACE(descr); + if (descr == NULL) { + return NULL; + } + descr->elsize = 1; + descr->type = NPY_CHARLTR; + } + F2PY_REPORT_ON_ARRAY_COPY_FROMANY; + arr = (PyArrayObject *) \ + PyArray_FromAny(obj, descr, 0,0, + ((intent & F2PY_INTENT_C)?NPY_ARRAY_CARRAY:NPY_ARRAY_FARRAY) \ + | NPY_ARRAY_FORCECAST, NULL); + if (arr==NULL) + return NULL; + if (check_and_fix_dimensions(arr, rank, dims)) { + return NULL; + } + return arr; + } + +} + +/*****************************************/ +/* Helper functions for array_from_pyobj */ +/*****************************************/ + +static +int check_and_fix_dimensions(const PyArrayObject* arr, const int rank, npy_intp *dims) +{ + /* + * This function fills in blanks (that are -1's) in dims list using + * the dimensions from arr. It also checks that non-blank dims will + * match with the corresponding values in arr dimensions. + * + * Returns 0 if the function is successful. + * + * If an error condition is detected, an exception is set and 1 is returned. + */ + const npy_intp arr_size = (PyArray_NDIM(arr))?PyArray_Size((PyObject *)arr):1; +#ifdef DEBUG_COPY_ND_ARRAY + dump_attrs(arr); + printf("check_and_fix_dimensions:init: dims="); + dump_dims(rank,dims); +#endif + if (rank > PyArray_NDIM(arr)) { /* [1,2] -> [[1],[2]]; 1 -> [[1]] */ + npy_intp new_size = 1; + int free_axe = -1; + int i; + npy_intp d; + /* Fill dims where -1 or 0; check dimensions; calc new_size; */ + for(i=0;i= 0) { + if (d>1 && dims[i]!=d) { + PyErr_Format(PyExc_ValueError, + "%d-th dimension must be fixed to %" + NPY_INTP_FMT " but got %" NPY_INTP_FMT "\n", + i, dims[i], d); + return 1; + } + if (!dims[i]) dims[i] = 1; + } else { + dims[i] = d ? d : 1; + } + new_size *= dims[i]; + } + for(i=PyArray_NDIM(arr);i1) { + PyErr_Format(PyExc_ValueError, + "%d-th dimension must be %" NPY_INTP_FMT + " but got 0 (not defined).\n", + i, dims[i]); + return 1; + } else if (free_axe<0) + free_axe = i; + else + dims[i] = 1; + if (free_axe>=0) { + dims[free_axe] = arr_size/new_size; + new_size *= dims[free_axe]; + } + if (new_size != arr_size) { + PyErr_Format(PyExc_ValueError, + "unexpected array size: new_size=%" NPY_INTP_FMT + ", got array with arr_size=%" NPY_INTP_FMT + " (maybe too many free indices)\n", + new_size, arr_size); + return 1; + } + } else if (rank==PyArray_NDIM(arr)) { + npy_intp new_size = 1; + int i; + npy_intp d; + for (i=0; i=0) { + if (d > 1 && d!=dims[i]) { + PyErr_Format(PyExc_ValueError, + "%d-th dimension must be fixed to %" + NPY_INTP_FMT " but got %" NPY_INTP_FMT "\n", + i, dims[i], d); + return 1; + } + if (!dims[i]) dims[i] = 1; + } else dims[i] = d; + new_size *= dims[i]; + } + if (new_size != arr_size) { + PyErr_Format(PyExc_ValueError, + "unexpected array size: new_size=%" NPY_INTP_FMT + ", got array with arr_size=%" NPY_INTP_FMT "\n", + new_size, arr_size); + return 1; + } + } else { /* [[1,2]] -> [[1],[2]] */ + int i,j; + npy_intp d; + int effrank; + npy_intp size; + for (i=0,effrank=0;i1) ++effrank; + if (dims[rank-1]>=0) + if (effrank>rank) { + PyErr_Format(PyExc_ValueError, + "too many axes: %d (effrank=%d), " + "expected rank=%d\n", + PyArray_NDIM(arr), effrank, rank); + return 1; + } + + for (i=0,j=0;i=PyArray_NDIM(arr)) d = 1; + else d = PyArray_DIM(arr,j++); + if (dims[i]>=0) { + if (d>1 && d!=dims[i]) { + PyErr_Format(PyExc_ValueError, + "%d-th dimension must be fixed to %" + NPY_INTP_FMT " but got %" NPY_INTP_FMT + " (real index=%d)\n", + i, dims[i], d, j-1); + return 1; + } + if (!dims[i]) dims[i] = 1; + } else + dims[i] = d; + } + + for (i=rank;i [1,2,3,4] */ + while (j=PyArray_NDIM(arr)) d = 1; + else d = PyArray_DIM(arr,j++); + dims[rank-1] *= d; + } + for (i=0,size=1;i= 0x03000000 +#define PyString_Check PyBytes_Check +#define PyString_GET_SIZE PyBytes_GET_SIZE +#define PyString_AS_STRING PyBytes_AS_STRING +#define PyString_FromString PyBytes_FromString +#define PyUString_FromStringAndSize PyUnicode_FromStringAndSize +#define PyString_ConcatAndDel PyBytes_ConcatAndDel +#define PyString_AsString PyBytes_AsString + +#define PyInt_Check PyLong_Check +#define PyInt_FromLong PyLong_FromLong +#define PyInt_AS_LONG PyLong_AsLong +#define PyInt_AsLong PyLong_AsLong + +#define PyNumber_Int PyNumber_Long + +#else + +#define PyUString_FromStringAndSize PyString_FromStringAndSize +#endif + + +#ifdef F2PY_REPORT_ATEXIT +#include + extern void f2py_start_clock(void); + extern void f2py_stop_clock(void); + extern void f2py_start_call_clock(void); + extern void f2py_stop_call_clock(void); + extern void f2py_cb_start_clock(void); + extern void f2py_cb_stop_clock(void); + extern void f2py_cb_start_call_clock(void); + extern void f2py_cb_stop_call_clock(void); + extern void f2py_report_on_exit(int,void*); +#endif + +#ifdef DMALLOC +#include "dmalloc.h" +#endif + +/* Fortran object interface */ + +/* +123456789-123456789-123456789-123456789-123456789-123456789-123456789-12 + +PyFortranObject represents various Fortran objects: +Fortran (module) routines, COMMON blocks, module data. + +Author: Pearu Peterson +*/ + +#define F2PY_MAX_DIMS 40 + +typedef void (*f2py_set_data_func)(char*,npy_intp*); +typedef void (*f2py_void_func)(void); +typedef void (*f2py_init_func)(int*,npy_intp*,f2py_set_data_func,int*); + + /*typedef void* (*f2py_c_func)(void*,...);*/ + +typedef void *(*f2pycfunc)(void); + +typedef struct { + char *name; /* attribute (array||routine) name */ + int rank; /* array rank, 0 for scalar, max is F2PY_MAX_DIMS, + || rank=-1 for Fortran routine */ + struct {npy_intp d[F2PY_MAX_DIMS];} dims; /* dimensions of the array, || not used */ + int type; /* PyArray_ || not used */ + char *data; /* pointer to array || Fortran routine */ + f2py_init_func func; /* initialization function for + allocatable arrays: + func(&rank,dims,set_ptr_func,name,len(name)) + || C/API wrapper for Fortran routine */ + char *doc; /* documentation string; only recommended + for routines. */ +} FortranDataDef; + +typedef struct { + PyObject_HEAD + int len; /* Number of attributes */ + FortranDataDef *defs; /* An array of FortranDataDef's */ + PyObject *dict; /* Fortran object attribute dictionary */ +} PyFortranObject; + +#define PyFortran_Check(op) (Py_TYPE(op) == &PyFortran_Type) +#define PyFortran_Check1(op) (0==strcmp(Py_TYPE(op)->tp_name,"fortran")) + + extern PyTypeObject PyFortran_Type; + extern int F2PyDict_SetItemString(PyObject* dict, char *name, PyObject *obj); + extern PyObject * PyFortranObject_New(FortranDataDef* defs, f2py_void_func init); + extern PyObject * PyFortranObject_NewAsAttr(FortranDataDef* defs); + +#if PY_VERSION_HEX >= 0x03000000 + +PyObject * F2PyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *)); +void * F2PyCapsule_AsVoidPtr(PyObject *obj); +int F2PyCapsule_Check(PyObject *ptr); + +#else + +PyObject * F2PyCapsule_FromVoidPtr(void *ptr, void (*dtor)(void *)); +void * F2PyCapsule_AsVoidPtr(PyObject *ptr); +int F2PyCapsule_Check(PyObject *ptr); + +#endif + +#define ISCONTIGUOUS(m) (PyArray_FLAGS(m) & NPY_ARRAY_C_CONTIGUOUS) +#define F2PY_INTENT_IN 1 +#define F2PY_INTENT_INOUT 2 +#define F2PY_INTENT_OUT 4 +#define F2PY_INTENT_HIDE 8 +#define F2PY_INTENT_CACHE 16 +#define F2PY_INTENT_COPY 32 +#define F2PY_INTENT_C 64 +#define F2PY_OPTIONAL 128 +#define F2PY_INTENT_INPLACE 256 +#define F2PY_INTENT_ALIGNED4 512 +#define F2PY_INTENT_ALIGNED8 1024 +#define F2PY_INTENT_ALIGNED16 2048 + +#define ARRAY_ISALIGNED(ARR, SIZE) ((size_t)(PyArray_DATA(ARR)) % (SIZE) == 0) +#define F2PY_ALIGN4(intent) (intent & F2PY_INTENT_ALIGNED4) +#define F2PY_ALIGN8(intent) (intent & F2PY_INTENT_ALIGNED8) +#define F2PY_ALIGN16(intent) (intent & F2PY_INTENT_ALIGNED16) + +#define F2PY_GET_ALIGNMENT(intent) \ + (F2PY_ALIGN4(intent) ? 4 : \ + (F2PY_ALIGN8(intent) ? 8 : \ + (F2PY_ALIGN16(intent) ? 16 : 1) )) +#define F2PY_CHECK_ALIGNMENT(arr, intent) ARRAY_ISALIGNED(arr, F2PY_GET_ALIGNMENT(intent)) + + extern PyArrayObject* array_from_pyobj(const int type_num, + npy_intp *dims, + const int rank, + const int intent, + PyObject *obj); + extern int copy_ND_array(const PyArrayObject *in, PyArrayObject *out); + +#ifdef DEBUG_COPY_ND_ARRAY + extern void dump_attrs(const PyArrayObject* arr); +#endif + + +#ifdef __cplusplus +} +#endif +#endif /* !Py_FORTRANOBJECT_H */ diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/__init__.py b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/__init__.pyc b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/__init__.pyc new file mode 100644 index 0000000..2ebc91d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c new file mode 100644 index 0000000..7f46303 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c @@ -0,0 +1,224 @@ +/* File: wrapmodule.c + * This file is auto-generated with f2py (version:2_1330). + * Hand edited by Pearu. + * f2py is a Fortran to Python Interface Generator (FPIG), Second Edition, + * written by Pearu Peterson . + * See http://cens.ioc.ee/projects/f2py2e/ + * Generation date: Fri Oct 21 22:41:12 2005 + * $Revision:$ + * $Date:$ + * Do not edit this file directly unless you know what you are doing!!! + */ +#ifdef __cplusplus +extern "C" { +#endif + +/*********************** See f2py2e/cfuncs.py: includes ***********************/ +#include "Python.h" +#include "fortranobject.h" +#include + +static PyObject *wrap_error; +static PyObject *wrap_module; + +/************************************ call ************************************/ +static char doc_f2py_rout_wrap_call[] = "\ +Function signature:\n\ + arr = call(type_num,dims,intent,obj)\n\ +Required arguments:\n" +" type_num : input int\n" +" dims : input int-sequence\n" +" intent : input int\n" +" obj : input python object\n" +"Return objects:\n" +" arr : array"; +static PyObject *f2py_rout_wrap_call(PyObject *capi_self, + PyObject *capi_args) { + PyObject * volatile capi_buildvalue = NULL; + int type_num = 0; + npy_intp *dims = NULL; + PyObject *dims_capi = Py_None; + int rank = 0; + int intent = 0; + PyArrayObject *capi_arr_tmp = NULL; + PyObject *arr_capi = Py_None; + int i; + + if (!PyArg_ParseTuple(capi_args,"iOiO|:wrap.call",\ + &type_num,&dims_capi,&intent,&arr_capi)) + return NULL; + rank = PySequence_Length(dims_capi); + dims = malloc(rank*sizeof(npy_intp)); + for (i=0;ikind, + PyArray_DESCR(arr)->type, + PyArray_TYPE(arr), + PyArray_ITEMSIZE(arr), + PyArray_DESCR(arr)->alignment, + PyArray_FLAGS(arr), + PyArray_ITEMSIZE(arr)); +} + +static PyMethodDef f2py_module_methods[] = { + + {"call",f2py_rout_wrap_call,METH_VARARGS,doc_f2py_rout_wrap_call}, + {"array_attrs",f2py_rout_wrap_attrs,METH_VARARGS,doc_f2py_rout_wrap_attrs}, + {NULL,NULL} +}; + +#if PY_VERSION_HEX >= 0x03000000 +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "test_array_from_pyobj_ext", + NULL, + -1, + f2py_module_methods, + NULL, + NULL, + NULL, + NULL +}; +#endif + +#if PY_VERSION_HEX >= 0x03000000 +#define RETVAL m +PyMODINIT_FUNC PyInit_test_array_from_pyobj_ext(void) { +#else +#define RETVAL +PyMODINIT_FUNC inittest_array_from_pyobj_ext(void) { +#endif + PyObject *m,*d, *s; +#if PY_VERSION_HEX >= 0x03000000 + m = wrap_module = PyModule_Create(&moduledef); +#else + m = wrap_module = Py_InitModule("test_array_from_pyobj_ext", f2py_module_methods); +#endif + Py_TYPE(&PyFortran_Type) = &PyType_Type; + import_array(); + if (PyErr_Occurred()) + Py_FatalError("can't initialize module wrap (failed to import numpy)"); + d = PyModule_GetDict(m); + s = PyString_FromString("This module 'wrap' is auto-generated with f2py (version:2_1330).\nFunctions:\n" +" arr = call(type_num,dims,intent,obj)\n" +"."); + PyDict_SetItemString(d, "__doc__", s); + wrap_error = PyErr_NewException ("wrap.error", NULL, NULL); + Py_DECREF(s); + PyDict_SetItemString(d, "F2PY_INTENT_IN", PyInt_FromLong(F2PY_INTENT_IN)); + PyDict_SetItemString(d, "F2PY_INTENT_INOUT", PyInt_FromLong(F2PY_INTENT_INOUT)); + PyDict_SetItemString(d, "F2PY_INTENT_OUT", PyInt_FromLong(F2PY_INTENT_OUT)); + PyDict_SetItemString(d, "F2PY_INTENT_HIDE", PyInt_FromLong(F2PY_INTENT_HIDE)); + PyDict_SetItemString(d, "F2PY_INTENT_CACHE", PyInt_FromLong(F2PY_INTENT_CACHE)); + PyDict_SetItemString(d, "F2PY_INTENT_COPY", PyInt_FromLong(F2PY_INTENT_COPY)); + PyDict_SetItemString(d, "F2PY_INTENT_C", PyInt_FromLong(F2PY_INTENT_C)); + PyDict_SetItemString(d, "F2PY_OPTIONAL", PyInt_FromLong(F2PY_OPTIONAL)); + PyDict_SetItemString(d, "F2PY_INTENT_INPLACE", PyInt_FromLong(F2PY_INTENT_INPLACE)); + PyDict_SetItemString(d, "NPY_BOOL", PyInt_FromLong(NPY_BOOL)); + PyDict_SetItemString(d, "NPY_BYTE", PyInt_FromLong(NPY_BYTE)); + PyDict_SetItemString(d, "NPY_UBYTE", PyInt_FromLong(NPY_UBYTE)); + PyDict_SetItemString(d, "NPY_SHORT", PyInt_FromLong(NPY_SHORT)); + PyDict_SetItemString(d, "NPY_USHORT", PyInt_FromLong(NPY_USHORT)); + PyDict_SetItemString(d, "NPY_INT", PyInt_FromLong(NPY_INT)); + PyDict_SetItemString(d, "NPY_UINT", PyInt_FromLong(NPY_UINT)); + PyDict_SetItemString(d, "NPY_INTP", PyInt_FromLong(NPY_INTP)); + PyDict_SetItemString(d, "NPY_UINTP", PyInt_FromLong(NPY_UINTP)); + PyDict_SetItemString(d, "NPY_LONG", PyInt_FromLong(NPY_LONG)); + PyDict_SetItemString(d, "NPY_ULONG", PyInt_FromLong(NPY_ULONG)); + PyDict_SetItemString(d, "NPY_LONGLONG", PyInt_FromLong(NPY_LONGLONG)); + PyDict_SetItemString(d, "NPY_ULONGLONG", PyInt_FromLong(NPY_ULONGLONG)); + PyDict_SetItemString(d, "NPY_FLOAT", PyInt_FromLong(NPY_FLOAT)); + PyDict_SetItemString(d, "NPY_DOUBLE", PyInt_FromLong(NPY_DOUBLE)); + PyDict_SetItemString(d, "NPY_LONGDOUBLE", PyInt_FromLong(NPY_LONGDOUBLE)); + PyDict_SetItemString(d, "NPY_CFLOAT", PyInt_FromLong(NPY_CFLOAT)); + PyDict_SetItemString(d, "NPY_CDOUBLE", PyInt_FromLong(NPY_CDOUBLE)); + PyDict_SetItemString(d, "NPY_CLONGDOUBLE", PyInt_FromLong(NPY_CLONGDOUBLE)); + PyDict_SetItemString(d, "NPY_OBJECT", PyInt_FromLong(NPY_OBJECT)); + PyDict_SetItemString(d, "NPY_STRING", PyInt_FromLong(NPY_STRING)); + PyDict_SetItemString(d, "NPY_UNICODE", PyInt_FromLong(NPY_UNICODE)); + PyDict_SetItemString(d, "NPY_VOID", PyInt_FromLong(NPY_VOID)); + PyDict_SetItemString(d, "NPY_NTYPES", PyInt_FromLong(NPY_NTYPES)); + PyDict_SetItemString(d, "NPY_NOTYPE", PyInt_FromLong(NPY_NOTYPE)); + PyDict_SetItemString(d, "NPY_USERDEF", PyInt_FromLong(NPY_USERDEF)); + + PyDict_SetItemString(d, "CONTIGUOUS", PyInt_FromLong(NPY_ARRAY_C_CONTIGUOUS)); + PyDict_SetItemString(d, "FORTRAN", PyInt_FromLong(NPY_ARRAY_F_CONTIGUOUS)); + PyDict_SetItemString(d, "OWNDATA", PyInt_FromLong(NPY_ARRAY_OWNDATA)); + PyDict_SetItemString(d, "FORCECAST", PyInt_FromLong(NPY_ARRAY_FORCECAST)); + PyDict_SetItemString(d, "ENSURECOPY", PyInt_FromLong(NPY_ARRAY_ENSURECOPY)); + PyDict_SetItemString(d, "ENSUREARRAY", PyInt_FromLong(NPY_ARRAY_ENSUREARRAY)); + PyDict_SetItemString(d, "ALIGNED", PyInt_FromLong(NPY_ARRAY_ALIGNED)); + PyDict_SetItemString(d, "WRITEABLE", PyInt_FromLong(NPY_ARRAY_WRITEABLE)); + PyDict_SetItemString(d, "UPDATEIFCOPY", PyInt_FromLong(NPY_ARRAY_UPDATEIFCOPY)); + PyDict_SetItemString(d, "WRITEBACKIFCOPY", PyInt_FromLong(NPY_ARRAY_WRITEBACKIFCOPY)); + + PyDict_SetItemString(d, "BEHAVED", PyInt_FromLong(NPY_ARRAY_BEHAVED)); + PyDict_SetItemString(d, "BEHAVED_NS", PyInt_FromLong(NPY_ARRAY_BEHAVED_NS)); + PyDict_SetItemString(d, "CARRAY", PyInt_FromLong(NPY_ARRAY_CARRAY)); + PyDict_SetItemString(d, "FARRAY", PyInt_FromLong(NPY_ARRAY_FARRAY)); + PyDict_SetItemString(d, "CARRAY_RO", PyInt_FromLong(NPY_ARRAY_CARRAY_RO)); + PyDict_SetItemString(d, "FARRAY_RO", PyInt_FromLong(NPY_ARRAY_FARRAY_RO)); + PyDict_SetItemString(d, "DEFAULT", PyInt_FromLong(NPY_ARRAY_DEFAULT)); + PyDict_SetItemString(d, "UPDATE_ALL", PyInt_FromLong(NPY_ARRAY_UPDATE_ALL)); + + if (PyErr_Occurred()) + Py_FatalError("can't initialize module wrap"); + +#ifdef F2PY_REPORT_ATEXIT + on_exit(f2py_report_on_exit,(void*)"array_from_pyobj.wrap.call"); +#endif + + return RETVAL; +} +#ifdef __cplusplus +} +#endif diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap new file mode 100644 index 0000000..2665f89 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap @@ -0,0 +1 @@ +dict(real=dict(rk="double")) diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/assumed_shape/foo_free.f90 b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/assumed_shape/foo_free.f90 new file mode 100644 index 0000000..b301710 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/assumed_shape/foo_free.f90 @@ -0,0 +1,34 @@ + +subroutine sum(x, res) + implicit none + real, intent(in) :: x(:) + real, intent(out) :: res + + integer :: i + + !print *, "sum: size(x) = ", size(x) + + res = 0.0 + + do i = 1, size(x) + res = res + x(i) + enddo + +end subroutine sum + +function fsum(x) result (res) + implicit none + real, intent(in) :: x(:) + real :: res + + integer :: i + + !print *, "fsum: size(x) = ", size(x) + + res = 0.0 + + do i = 1, size(x) + res = res + x(i) + enddo + +end function fsum diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/assumed_shape/foo_mod.f90 b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/assumed_shape/foo_mod.f90 new file mode 100644 index 0000000..cbe6317 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/assumed_shape/foo_mod.f90 @@ -0,0 +1,41 @@ + +module mod + +contains + +subroutine sum(x, res) + implicit none + real, intent(in) :: x(:) + real, intent(out) :: res + + integer :: i + + !print *, "sum: size(x) = ", size(x) + + res = 0.0 + + do i = 1, size(x) + res = res + x(i) + enddo + +end subroutine sum + +function fsum(x) result (res) + implicit none + real, intent(in) :: x(:) + real :: res + + integer :: i + + !print *, "fsum: size(x) = ", size(x) + + res = 0.0 + + do i = 1, size(x) + res = res + x(i) + enddo + +end function fsum + + +end module mod diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/assumed_shape/foo_use.f90 b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/assumed_shape/foo_use.f90 new file mode 100644 index 0000000..337465a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/assumed_shape/foo_use.f90 @@ -0,0 +1,19 @@ +subroutine sum_with_use(x, res) + use precision + + implicit none + + real(kind=rk), intent(in) :: x(:) + real(kind=rk), intent(out) :: res + + integer :: i + + !print *, "size(x) = ", size(x) + + res = 0.0 + + do i = 1, size(x) + res = res + x(i) + enddo + + end subroutine diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/assumed_shape/precision.f90 b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/assumed_shape/precision.f90 new file mode 100644 index 0000000..ed6c70c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/assumed_shape/precision.f90 @@ -0,0 +1,4 @@ +module precision + integer, parameter :: rk = selected_real_kind(8) + integer, parameter :: ik = selected_real_kind(4) +end module diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/common/block.f b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/common/block.f new file mode 100644 index 0000000..7ea7968 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/common/block.f @@ -0,0 +1,11 @@ + SUBROUTINE INITCB + DOUBLE PRECISION LONG + CHARACTER STRING + INTEGER OK + + COMMON /BLOCK/ LONG, STRING, OK + LONG = 1.0 + STRING = '2' + OK = 3 + RETURN + END diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/kind/foo.f90 b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/kind/foo.f90 new file mode 100644 index 0000000..d3d15cf --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/kind/foo.f90 @@ -0,0 +1,20 @@ + + +subroutine selectedrealkind(p, r, res) + implicit none + + integer, intent(in) :: p, r + !f2py integer :: r=0 + integer, intent(out) :: res + res = selected_real_kind(p, r) + +end subroutine + +subroutine selectedintkind(p, res) + implicit none + + integer, intent(in) :: p + integer, intent(out) :: res + res = selected_int_kind(p) + +end subroutine diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/mixed/foo.f b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/mixed/foo.f new file mode 100644 index 0000000..c347425 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/mixed/foo.f @@ -0,0 +1,5 @@ + subroutine bar11(a) +cf2py intent(out) a + integer a + a = 11 + end diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/mixed/foo_fixed.f90 b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/mixed/foo_fixed.f90 new file mode 100644 index 0000000..7543a6a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/mixed/foo_fixed.f90 @@ -0,0 +1,8 @@ + module foo_fixed + contains + subroutine bar12(a) +!f2py intent(out) a + integer a + a = 12 + end subroutine bar12 + end module foo_fixed diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/mixed/foo_free.f90 b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/mixed/foo_free.f90 new file mode 100644 index 0000000..c1b641f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/mixed/foo_free.f90 @@ -0,0 +1,8 @@ +module foo_free +contains + subroutine bar13(a) + !f2py intent(out) a + integer a + a = 13 + end subroutine bar13 +end module foo_free diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/parameter/constant_both.f90 b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/parameter/constant_both.f90 new file mode 100644 index 0000000..ac90ced --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/parameter/constant_both.f90 @@ -0,0 +1,57 @@ +! Check that parameters are correct intercepted. +! Constants with comma separations are commonly +! used, for instance Pi = 3._dp +subroutine foo(x) + implicit none + integer, parameter :: sp = selected_real_kind(6) + integer, parameter :: dp = selected_real_kind(15) + integer, parameter :: ii = selected_int_kind(9) + integer, parameter :: il = selected_int_kind(18) + real(dp), intent(inout) :: x + dimension x(3) + real(sp), parameter :: three_s = 3._sp + real(dp), parameter :: three_d = 3._dp + integer(ii), parameter :: three_i = 3_ii + integer(il), parameter :: three_l = 3_il + x(1) = x(1) + x(2) * three_s * three_i + x(3) * three_d * three_l + x(2) = x(2) * three_s + x(3) = x(3) * three_l + return +end subroutine + + +subroutine foo_no(x) + implicit none + integer, parameter :: sp = selected_real_kind(6) + integer, parameter :: dp = selected_real_kind(15) + integer, parameter :: ii = selected_int_kind(9) + integer, parameter :: il = selected_int_kind(18) + real(dp), intent(inout) :: x + dimension x(3) + real(sp), parameter :: three_s = 3. + real(dp), parameter :: three_d = 3. + integer(ii), parameter :: three_i = 3 + integer(il), parameter :: three_l = 3 + x(1) = x(1) + x(2) * three_s * three_i + x(3) * three_d * three_l + x(2) = x(2) * three_s + x(3) = x(3) * three_l + return +end subroutine + +subroutine foo_sum(x) + implicit none + integer, parameter :: sp = selected_real_kind(6) + integer, parameter :: dp = selected_real_kind(15) + integer, parameter :: ii = selected_int_kind(9) + integer, parameter :: il = selected_int_kind(18) + real(dp), intent(inout) :: x + dimension x(3) + real(sp), parameter :: three_s = 2._sp + 1._sp + real(dp), parameter :: three_d = 1._dp + 2._dp + integer(ii), parameter :: three_i = 2_ii + 1_ii + integer(il), parameter :: three_l = 1_il + 2_il + x(1) = x(1) + x(2) * three_s * three_i + x(3) * three_d * three_l + x(2) = x(2) * three_s + x(3) = x(3) * three_l + return +end subroutine diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/parameter/constant_compound.f90 b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/parameter/constant_compound.f90 new file mode 100644 index 0000000..e51f5e9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/parameter/constant_compound.f90 @@ -0,0 +1,15 @@ +! Check that parameters are correct intercepted. +! Constants with comma separations are commonly +! used, for instance Pi = 3._dp +subroutine foo_compound_int(x) + implicit none + integer, parameter :: ii = selected_int_kind(9) + integer(ii), intent(inout) :: x + dimension x(3) + integer(ii), parameter :: three = 3_ii + integer(ii), parameter :: two = 2_ii + integer(ii), parameter :: six = three * 1_ii * two + + x(1) = x(1) + x(2) + x(3) * six + return +end subroutine diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/parameter/constant_integer.f90 b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/parameter/constant_integer.f90 new file mode 100644 index 0000000..aaa83d2 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/parameter/constant_integer.f90 @@ -0,0 +1,22 @@ +! Check that parameters are correct intercepted. +! Constants with comma separations are commonly +! used, for instance Pi = 3._dp +subroutine foo_int(x) + implicit none + integer, parameter :: ii = selected_int_kind(9) + integer(ii), intent(inout) :: x + dimension x(3) + integer(ii), parameter :: three = 3_ii + x(1) = x(1) + x(2) + x(3) * three + return +end subroutine + +subroutine foo_long(x) + implicit none + integer, parameter :: ii = selected_int_kind(18) + integer(ii), intent(inout) :: x + dimension x(3) + integer(ii), parameter :: three = 3_ii + x(1) = x(1) + x(2) + x(3) * three + return +end subroutine diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/parameter/constant_non_compound.f90 b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/parameter/constant_non_compound.f90 new file mode 100644 index 0000000..62c9a5b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/parameter/constant_non_compound.f90 @@ -0,0 +1,23 @@ +! Check that parameters are correct intercepted. +! Specifically that types of constants without +! compound kind specs are correctly inferred +! adapted Gibbs iteration code from pymc +! for this test case +subroutine foo_non_compound_int(x) + implicit none + integer, parameter :: ii = selected_int_kind(9) + + integer(ii) maxiterates + parameter (maxiterates=2) + + integer(ii) maxseries + parameter (maxseries=2) + + integer(ii) wasize + parameter (wasize=maxiterates*maxseries) + integer(ii), intent(inout) :: x + dimension x(wasize) + + x(1) = x(1) + x(2) + x(3) + x(4) * wasize + return +end subroutine diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/parameter/constant_real.f90 b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/parameter/constant_real.f90 new file mode 100644 index 0000000..02ac9dd --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/parameter/constant_real.f90 @@ -0,0 +1,23 @@ +! Check that parameters are correct intercepted. +! Constants with comma separations are commonly +! used, for instance Pi = 3._dp +subroutine foo_single(x) + implicit none + integer, parameter :: rp = selected_real_kind(6) + real(rp), intent(inout) :: x + dimension x(3) + real(rp), parameter :: three = 3._rp + x(1) = x(1) + x(2) + x(3) * three + return +end subroutine + +subroutine foo_double(x) + implicit none + integer, parameter :: rp = selected_real_kind(15) + real(rp), intent(inout) :: x + dimension x(3) + real(rp), parameter :: three = 3._rp + x(1) = x(1) + x(2) + x(3) * three + return +end subroutine + diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/regression/inout.f90 b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/regression/inout.f90 new file mode 100644 index 0000000..80cdad9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/regression/inout.f90 @@ -0,0 +1,9 @@ +! Check that intent(in out) translates as intent(inout). +! The separation seems to be a common usage. + subroutine foo(x) + implicit none + real(4), intent(in out) :: x + dimension x(3) + x(1) = x(1) + x(2) + x(3) + return + end diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/size/foo.f90 b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/size/foo.f90 new file mode 100644 index 0000000..5b66f8c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/size/foo.f90 @@ -0,0 +1,44 @@ + +subroutine foo(a, n, m, b) + implicit none + + real, intent(in) :: a(n, m) + integer, intent(in) :: n, m + real, intent(out) :: b(size(a, 1)) + + integer :: i + + do i = 1, size(b) + b(i) = sum(a(i,:)) + enddo +end subroutine + +subroutine trans(x,y) + implicit none + real, intent(in), dimension(:,:) :: x + real, intent(out), dimension( size(x,2), size(x,1) ) :: y + integer :: N, M, i, j + N = size(x,1) + M = size(x,2) + DO i=1,N + do j=1,M + y(j,i) = x(i,j) + END DO + END DO +end subroutine trans + +subroutine flatten(x,y) + implicit none + real, intent(in), dimension(:,:) :: x + real, intent(out), dimension( size(x) ) :: y + integer :: N, M, i, j, k + N = size(x,1) + M = size(x,2) + k = 1 + DO i=1,N + do j=1,M + y(k) = x(i,j) + k = k + 1 + END DO + END DO +end subroutine flatten diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/string/char.f90 b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/string/char.f90 new file mode 100644 index 0000000..bb7985c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/src/string/char.f90 @@ -0,0 +1,29 @@ +MODULE char_test + +CONTAINS + +SUBROUTINE change_strings(strings, n_strs, out_strings) + IMPLICIT NONE + + ! Inputs + INTEGER, INTENT(IN) :: n_strs + CHARACTER, INTENT(IN), DIMENSION(2,n_strs) :: strings + CHARACTER, INTENT(OUT), DIMENSION(2,n_strs) :: out_strings + +!f2py INTEGER, INTENT(IN) :: n_strs +!f2py CHARACTER, INTENT(IN), DIMENSION(2,n_strs) :: strings +!f2py CHARACTER, INTENT(OUT), DIMENSION(2,n_strs) :: strings + + ! Misc. + INTEGER*4 :: j + + + DO j=1, n_strs + out_strings(1,j) = strings(1,j) + out_strings(2,j) = 'A' + END DO + +END SUBROUTINE change_strings + +END MODULE char_test + diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_array_from_pyobj.py b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_array_from_pyobj.py new file mode 100644 index 0000000..a800901 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_array_from_pyobj.py @@ -0,0 +1,581 @@ +from __future__ import division, absolute_import, print_function + +import os +import sys +import copy +import pytest + +from numpy import ( + array, alltrue, ndarray, zeros, dtype, intp, clongdouble + ) +from numpy.testing import assert_, assert_equal +from numpy.core.multiarray import typeinfo +from . import util + +wrap = None + + +def setup_module(): + """ + Build the required testing extension module + + """ + global wrap + + # Check compiler availability first + if not util.has_c_compiler(): + pytest.skip("No C compiler available") + + if wrap is None: + config_code = """ + config.add_extension('test_array_from_pyobj_ext', + sources=['wrapmodule.c', 'fortranobject.c'], + define_macros=[]) + """ + d = os.path.dirname(__file__) + src = [os.path.join(d, 'src', 'array_from_pyobj', 'wrapmodule.c'), + os.path.join(d, '..', 'src', 'fortranobject.c'), + os.path.join(d, '..', 'src', 'fortranobject.h')] + wrap = util.build_module_distutils(src, config_code, + 'test_array_from_pyobj_ext') + + +def flags_info(arr): + flags = wrap.array_attrs(arr)[6] + return flags2names(flags) + + +def flags2names(flags): + info = [] + for flagname in ['CONTIGUOUS', 'FORTRAN', 'OWNDATA', 'ENSURECOPY', + 'ENSUREARRAY', 'ALIGNED', 'NOTSWAPPED', 'WRITEABLE', + 'WRITEBACKIFCOPY', 'UPDATEIFCOPY', 'BEHAVED', 'BEHAVED_RO', + 'CARRAY', 'FARRAY' + ]: + if abs(flags) & getattr(wrap, flagname, 0): + info.append(flagname) + return info + + +class Intent(object): + + def __init__(self, intent_list=[]): + self.intent_list = intent_list[:] + flags = 0 + for i in intent_list: + if i == 'optional': + flags |= wrap.F2PY_OPTIONAL + else: + flags |= getattr(wrap, 'F2PY_INTENT_' + i.upper()) + self.flags = flags + + def __getattr__(self, name): + name = name.lower() + if name == 'in_': + name = 'in' + return self.__class__(self.intent_list + [name]) + + def __str__(self): + return 'intent(%s)' % (','.join(self.intent_list)) + + def __repr__(self): + return 'Intent(%r)' % (self.intent_list) + + def is_intent(self, *names): + for name in names: + if name not in self.intent_list: + return False + return True + + def is_intent_exact(self, *names): + return len(self.intent_list) == len(names) and self.is_intent(*names) + +intent = Intent() + +_type_names = ['BOOL', 'BYTE', 'UBYTE', 'SHORT', 'USHORT', 'INT', 'UINT', + 'LONG', 'ULONG', 'LONGLONG', 'ULONGLONG', + 'FLOAT', 'DOUBLE', 'CFLOAT'] + +_cast_dict = {'BOOL': ['BOOL']} +_cast_dict['BYTE'] = _cast_dict['BOOL'] + ['BYTE'] +_cast_dict['UBYTE'] = _cast_dict['BOOL'] + ['UBYTE'] +_cast_dict['BYTE'] = ['BYTE'] +_cast_dict['UBYTE'] = ['UBYTE'] +_cast_dict['SHORT'] = _cast_dict['BYTE'] + ['UBYTE', 'SHORT'] +_cast_dict['USHORT'] = _cast_dict['UBYTE'] + ['BYTE', 'USHORT'] +_cast_dict['INT'] = _cast_dict['SHORT'] + ['USHORT', 'INT'] +_cast_dict['UINT'] = _cast_dict['USHORT'] + ['SHORT', 'UINT'] + +_cast_dict['LONG'] = _cast_dict['INT'] + ['LONG'] +_cast_dict['ULONG'] = _cast_dict['UINT'] + ['ULONG'] + +_cast_dict['LONGLONG'] = _cast_dict['LONG'] + ['LONGLONG'] +_cast_dict['ULONGLONG'] = _cast_dict['ULONG'] + ['ULONGLONG'] + +_cast_dict['FLOAT'] = _cast_dict['SHORT'] + ['USHORT', 'FLOAT'] +_cast_dict['DOUBLE'] = _cast_dict['INT'] + ['UINT', 'FLOAT', 'DOUBLE'] + +_cast_dict['CFLOAT'] = _cast_dict['FLOAT'] + ['CFLOAT'] + +# 32 bit system malloc typically does not provide the alignment required by +# 16 byte long double types this means the inout intent cannot be satisfied +# and several tests fail as the alignment flag can be randomly true or fals +# when numpy gains an aligned allocator the tests could be enabled again +if ((intp().dtype.itemsize != 4 or clongdouble().dtype.alignment <= 8) and + sys.platform != 'win32'): + _type_names.extend(['LONGDOUBLE', 'CDOUBLE', 'CLONGDOUBLE']) + _cast_dict['LONGDOUBLE'] = _cast_dict['LONG'] + \ + ['ULONG', 'FLOAT', 'DOUBLE', 'LONGDOUBLE'] + _cast_dict['CLONGDOUBLE'] = _cast_dict['LONGDOUBLE'] + \ + ['CFLOAT', 'CDOUBLE', 'CLONGDOUBLE'] + _cast_dict['CDOUBLE'] = _cast_dict['DOUBLE'] + ['CFLOAT', 'CDOUBLE'] + + +class Type(object): + _type_cache = {} + + def __new__(cls, name): + if isinstance(name, dtype): + dtype0 = name + name = None + for n, i in typeinfo.items(): + if not isinstance(i, type) and dtype0.type is i.type: + name = n + break + obj = cls._type_cache.get(name.upper(), None) + if obj is not None: + return obj + obj = object.__new__(cls) + obj._init(name) + cls._type_cache[name.upper()] = obj + return obj + + def _init(self, name): + self.NAME = name.upper() + info = typeinfo[self.NAME] + self.type_num = getattr(wrap, 'NPY_' + self.NAME) + assert_equal(self.type_num, info.num) + self.dtype = info.type + self.elsize = info.bits / 8 + self.dtypechar = info.char + + def cast_types(self): + return [self.__class__(_m) for _m in _cast_dict[self.NAME]] + + def all_types(self): + return [self.__class__(_m) for _m in _type_names] + + def smaller_types(self): + bits = typeinfo[self.NAME].alignment + types = [] + for name in _type_names: + if typeinfo[name].alignment < bits: + types.append(Type(name)) + return types + + def equal_types(self): + bits = typeinfo[self.NAME].alignment + types = [] + for name in _type_names: + if name == self.NAME: + continue + if typeinfo[name].alignment == bits: + types.append(Type(name)) + return types + + def larger_types(self): + bits = typeinfo[self.NAME].alignment + types = [] + for name in _type_names: + if typeinfo[name].alignment > bits: + types.append(Type(name)) + return types + + +class Array(object): + + def __init__(self, typ, dims, intent, obj): + self.type = typ + self.dims = dims + self.intent = intent + self.obj_copy = copy.deepcopy(obj) + self.obj = obj + + # arr.dtypechar may be different from typ.dtypechar + self.arr = wrap.call(typ.type_num, dims, intent.flags, obj) + + assert_(isinstance(self.arr, ndarray), repr(type(self.arr))) + + self.arr_attr = wrap.array_attrs(self.arr) + + if len(dims) > 1: + if self.intent.is_intent('c'): + assert_(intent.flags & wrap.F2PY_INTENT_C) + assert_(not self.arr.flags['FORTRAN'], + repr((self.arr.flags, getattr(obj, 'flags', None)))) + assert_(self.arr.flags['CONTIGUOUS']) + assert_(not self.arr_attr[6] & wrap.FORTRAN) + else: + assert_(not intent.flags & wrap.F2PY_INTENT_C) + assert_(self.arr.flags['FORTRAN']) + assert_(not self.arr.flags['CONTIGUOUS']) + assert_(self.arr_attr[6] & wrap.FORTRAN) + + if obj is None: + self.pyarr = None + self.pyarr_attr = None + return + + if intent.is_intent('cache'): + assert_(isinstance(obj, ndarray), repr(type(obj))) + self.pyarr = array(obj).reshape(*dims).copy() + else: + self.pyarr = array(array(obj, dtype=typ.dtypechar).reshape(*dims), + order=self.intent.is_intent('c') and 'C' or 'F') + assert_(self.pyarr.dtype == typ, + repr((self.pyarr.dtype, typ))) + assert_(self.pyarr.flags['OWNDATA'], (obj, intent)) + self.pyarr_attr = wrap.array_attrs(self.pyarr) + + if len(dims) > 1: + if self.intent.is_intent('c'): + assert_(not self.pyarr.flags['FORTRAN']) + assert_(self.pyarr.flags['CONTIGUOUS']) + assert_(not self.pyarr_attr[6] & wrap.FORTRAN) + else: + assert_(self.pyarr.flags['FORTRAN']) + assert_(not self.pyarr.flags['CONTIGUOUS']) + assert_(self.pyarr_attr[6] & wrap.FORTRAN) + + assert_(self.arr_attr[1] == self.pyarr_attr[1]) # nd + assert_(self.arr_attr[2] == self.pyarr_attr[2]) # dimensions + if self.arr_attr[1] <= 1: + assert_(self.arr_attr[3] == self.pyarr_attr[3], + repr((self.arr_attr[3], self.pyarr_attr[3], + self.arr.tobytes(), self.pyarr.tobytes()))) # strides + assert_(self.arr_attr[5][-2:] == self.pyarr_attr[5][-2:], + repr((self.arr_attr[5], self.pyarr_attr[5]))) # descr + assert_(self.arr_attr[6] == self.pyarr_attr[6], + repr((self.arr_attr[6], self.pyarr_attr[6], + flags2names(0 * self.arr_attr[6] - self.pyarr_attr[6]), + flags2names(self.arr_attr[6]), intent))) # flags + + if intent.is_intent('cache'): + assert_(self.arr_attr[5][3] >= self.type.elsize, + repr((self.arr_attr[5][3], self.type.elsize))) + else: + assert_(self.arr_attr[5][3] == self.type.elsize, + repr((self.arr_attr[5][3], self.type.elsize))) + assert_(self.arr_equal(self.pyarr, self.arr)) + + if isinstance(self.obj, ndarray): + if typ.elsize == Type(obj.dtype).elsize: + if not intent.is_intent('copy') and self.arr_attr[1] <= 1: + assert_(self.has_shared_memory()) + + def arr_equal(self, arr1, arr2): + if arr1.shape != arr2.shape: + return False + s = arr1 == arr2 + return alltrue(s.flatten()) + + def __str__(self): + return str(self.arr) + + def has_shared_memory(self): + """Check that created array shares data with input array. + """ + if self.obj is self.arr: + return True + if not isinstance(self.obj, ndarray): + return False + obj_attr = wrap.array_attrs(self.obj) + return obj_attr[0] == self.arr_attr[0] + + +class TestIntent(object): + + def test_in_out(self): + assert_equal(str(intent.in_.out), 'intent(in,out)') + assert_(intent.in_.c.is_intent('c')) + assert_(not intent.in_.c.is_intent_exact('c')) + assert_(intent.in_.c.is_intent_exact('c', 'in')) + assert_(intent.in_.c.is_intent_exact('in', 'c')) + assert_(not intent.in_.is_intent('c')) + + +class TestSharedMemory(object): + num2seq = [1, 2] + num23seq = [[1, 2, 3], [4, 5, 6]] + + @pytest.fixture(autouse=True, scope='class', params=_type_names) + def setup_type(self, request): + request.cls.type = Type(request.param) + request.cls.array = lambda self, dims, intent, obj: \ + Array(Type(request.param), dims, intent, obj) + + def test_in_from_2seq(self): + a = self.array([2], intent.in_, self.num2seq) + assert_(not a.has_shared_memory()) + + def test_in_from_2casttype(self): + for t in self.type.cast_types(): + obj = array(self.num2seq, dtype=t.dtype) + a = self.array([len(self.num2seq)], intent.in_, obj) + if t.elsize == self.type.elsize: + assert_( + a.has_shared_memory(), repr((self.type.dtype, t.dtype))) + else: + assert_(not a.has_shared_memory(), repr(t.dtype)) + + def test_inout_2seq(self): + obj = array(self.num2seq, dtype=self.type.dtype) + a = self.array([len(self.num2seq)], intent.inout, obj) + assert_(a.has_shared_memory()) + + try: + a = self.array([2], intent.in_.inout, self.num2seq) + except TypeError as msg: + if not str(msg).startswith('failed to initialize intent' + '(inout|inplace|cache) array'): + raise + else: + raise SystemError('intent(inout) should have failed on sequence') + + def test_f_inout_23seq(self): + obj = array(self.num23seq, dtype=self.type.dtype, order='F') + shape = (len(self.num23seq), len(self.num23seq[0])) + a = self.array(shape, intent.in_.inout, obj) + assert_(a.has_shared_memory()) + + obj = array(self.num23seq, dtype=self.type.dtype, order='C') + shape = (len(self.num23seq), len(self.num23seq[0])) + try: + a = self.array(shape, intent.in_.inout, obj) + except ValueError as msg: + if not str(msg).startswith('failed to initialize intent' + '(inout) array'): + raise + else: + raise SystemError( + 'intent(inout) should have failed on improper array') + + def test_c_inout_23seq(self): + obj = array(self.num23seq, dtype=self.type.dtype) + shape = (len(self.num23seq), len(self.num23seq[0])) + a = self.array(shape, intent.in_.c.inout, obj) + assert_(a.has_shared_memory()) + + def test_in_copy_from_2casttype(self): + for t in self.type.cast_types(): + obj = array(self.num2seq, dtype=t.dtype) + a = self.array([len(self.num2seq)], intent.in_.copy, obj) + assert_(not a.has_shared_memory(), repr(t.dtype)) + + def test_c_in_from_23seq(self): + a = self.array([len(self.num23seq), len(self.num23seq[0])], + intent.in_, self.num23seq) + assert_(not a.has_shared_memory()) + + def test_in_from_23casttype(self): + for t in self.type.cast_types(): + obj = array(self.num23seq, dtype=t.dtype) + a = self.array([len(self.num23seq), len(self.num23seq[0])], + intent.in_, obj) + assert_(not a.has_shared_memory(), repr(t.dtype)) + + def test_f_in_from_23casttype(self): + for t in self.type.cast_types(): + obj = array(self.num23seq, dtype=t.dtype, order='F') + a = self.array([len(self.num23seq), len(self.num23seq[0])], + intent.in_, obj) + if t.elsize == self.type.elsize: + assert_(a.has_shared_memory(), repr(t.dtype)) + else: + assert_(not a.has_shared_memory(), repr(t.dtype)) + + def test_c_in_from_23casttype(self): + for t in self.type.cast_types(): + obj = array(self.num23seq, dtype=t.dtype) + a = self.array([len(self.num23seq), len(self.num23seq[0])], + intent.in_.c, obj) + if t.elsize == self.type.elsize: + assert_(a.has_shared_memory(), repr(t.dtype)) + else: + assert_(not a.has_shared_memory(), repr(t.dtype)) + + def test_f_copy_in_from_23casttype(self): + for t in self.type.cast_types(): + obj = array(self.num23seq, dtype=t.dtype, order='F') + a = self.array([len(self.num23seq), len(self.num23seq[0])], + intent.in_.copy, obj) + assert_(not a.has_shared_memory(), repr(t.dtype)) + + def test_c_copy_in_from_23casttype(self): + for t in self.type.cast_types(): + obj = array(self.num23seq, dtype=t.dtype) + a = self.array([len(self.num23seq), len(self.num23seq[0])], + intent.in_.c.copy, obj) + assert_(not a.has_shared_memory(), repr(t.dtype)) + + def test_in_cache_from_2casttype(self): + for t in self.type.all_types(): + if t.elsize != self.type.elsize: + continue + obj = array(self.num2seq, dtype=t.dtype) + shape = (len(self.num2seq),) + a = self.array(shape, intent.in_.c.cache, obj) + assert_(a.has_shared_memory(), repr(t.dtype)) + + a = self.array(shape, intent.in_.cache, obj) + assert_(a.has_shared_memory(), repr(t.dtype)) + + obj = array(self.num2seq, dtype=t.dtype, order='F') + a = self.array(shape, intent.in_.c.cache, obj) + assert_(a.has_shared_memory(), repr(t.dtype)) + + a = self.array(shape, intent.in_.cache, obj) + assert_(a.has_shared_memory(), repr(t.dtype)) + + try: + a = self.array(shape, intent.in_.cache, obj[::-1]) + except ValueError as msg: + if not str(msg).startswith('failed to initialize' + ' intent(cache) array'): + raise + else: + raise SystemError( + 'intent(cache) should have failed on multisegmented array') + + def test_in_cache_from_2casttype_failure(self): + for t in self.type.all_types(): + if t.elsize >= self.type.elsize: + continue + obj = array(self.num2seq, dtype=t.dtype) + shape = (len(self.num2seq),) + try: + self.array(shape, intent.in_.cache, obj) # Should succeed + except ValueError as msg: + if not str(msg).startswith('failed to initialize' + ' intent(cache) array'): + raise + else: + raise SystemError( + 'intent(cache) should have failed on smaller array') + + def test_cache_hidden(self): + shape = (2,) + a = self.array(shape, intent.cache.hide, None) + assert_(a.arr.shape == shape) + + shape = (2, 3) + a = self.array(shape, intent.cache.hide, None) + assert_(a.arr.shape == shape) + + shape = (-1, 3) + try: + a = self.array(shape, intent.cache.hide, None) + except ValueError as msg: + if not str(msg).startswith('failed to create intent' + '(cache|hide)|optional array'): + raise + else: + raise SystemError( + 'intent(cache) should have failed on undefined dimensions') + + def test_hidden(self): + shape = (2,) + a = self.array(shape, intent.hide, None) + assert_(a.arr.shape == shape) + assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) + + shape = (2, 3) + a = self.array(shape, intent.hide, None) + assert_(a.arr.shape == shape) + assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) + assert_(a.arr.flags['FORTRAN'] and not a.arr.flags['CONTIGUOUS']) + + shape = (2, 3) + a = self.array(shape, intent.c.hide, None) + assert_(a.arr.shape == shape) + assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) + assert_(not a.arr.flags['FORTRAN'] and a.arr.flags['CONTIGUOUS']) + + shape = (-1, 3) + try: + a = self.array(shape, intent.hide, None) + except ValueError as msg: + if not str(msg).startswith('failed to create intent' + '(cache|hide)|optional array'): + raise + else: + raise SystemError('intent(hide) should have failed' + ' on undefined dimensions') + + def test_optional_none(self): + shape = (2,) + a = self.array(shape, intent.optional, None) + assert_(a.arr.shape == shape) + assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) + + shape = (2, 3) + a = self.array(shape, intent.optional, None) + assert_(a.arr.shape == shape) + assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) + assert_(a.arr.flags['FORTRAN'] and not a.arr.flags['CONTIGUOUS']) + + shape = (2, 3) + a = self.array(shape, intent.c.optional, None) + assert_(a.arr.shape == shape) + assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) + assert_(not a.arr.flags['FORTRAN'] and a.arr.flags['CONTIGUOUS']) + + def test_optional_from_2seq(self): + obj = self.num2seq + shape = (len(obj),) + a = self.array(shape, intent.optional, obj) + assert_(a.arr.shape == shape) + assert_(not a.has_shared_memory()) + + def test_optional_from_23seq(self): + obj = self.num23seq + shape = (len(obj), len(obj[0])) + a = self.array(shape, intent.optional, obj) + assert_(a.arr.shape == shape) + assert_(not a.has_shared_memory()) + + a = self.array(shape, intent.optional.c, obj) + assert_(a.arr.shape == shape) + assert_(not a.has_shared_memory()) + + def test_inplace(self): + obj = array(self.num23seq, dtype=self.type.dtype) + assert_(not obj.flags['FORTRAN'] and obj.flags['CONTIGUOUS']) + shape = obj.shape + a = self.array(shape, intent.inplace, obj) + assert_(obj[1][2] == a.arr[1][2], repr((obj, a.arr))) + a.arr[1][2] = 54 + assert_(obj[1][2] == a.arr[1][2] == + array(54, dtype=self.type.dtype), repr((obj, a.arr))) + assert_(a.arr is obj) + assert_(obj.flags['FORTRAN']) # obj attributes are changed inplace! + assert_(not obj.flags['CONTIGUOUS']) + + def test_inplace_from_casttype(self): + for t in self.type.cast_types(): + if t is self.type: + continue + obj = array(self.num23seq, dtype=t.dtype) + assert_(obj.dtype.type == t.dtype) + assert_(obj.dtype.type is not self.type.dtype) + assert_(not obj.flags['FORTRAN'] and obj.flags['CONTIGUOUS']) + shape = obj.shape + a = self.array(shape, intent.inplace, obj) + assert_(obj[1][2] == a.arr[1][2], repr((obj, a.arr))) + a.arr[1][2] = 54 + assert_(obj[1][2] == a.arr[1][2] == + array(54, dtype=self.type.dtype), repr((obj, a.arr))) + assert_(a.arr is obj) + assert_(obj.flags['FORTRAN']) # obj attributes changed inplace! + assert_(not obj.flags['CONTIGUOUS']) + assert_(obj.dtype.type is self.type.dtype) # obj changed inplace! diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_array_from_pyobj.pyc b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_array_from_pyobj.pyc new file mode 100644 index 0000000..e5b62de Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_array_from_pyobj.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_assumed_shape.py b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_assumed_shape.py new file mode 100644 index 0000000..460afd6 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_assumed_shape.py @@ -0,0 +1,33 @@ +from __future__ import division, absolute_import, print_function + +import os +import pytest + +from numpy.testing import assert_ +from . import util + + +def _path(*a): + return os.path.join(*((os.path.dirname(__file__),) + a)) + + +class TestAssumedShapeSumExample(util.F2PyTest): + sources = [_path('src', 'assumed_shape', 'foo_free.f90'), + _path('src', 'assumed_shape', 'foo_use.f90'), + _path('src', 'assumed_shape', 'precision.f90'), + _path('src', 'assumed_shape', 'foo_mod.f90'), + ] + + @pytest.mark.slow + def test_all(self): + r = self.module.fsum([1, 2]) + assert_(r == 3, repr(r)) + r = self.module.sum([1, 2]) + assert_(r == 3, repr(r)) + r = self.module.sum_with_use([1, 2]) + assert_(r == 3, repr(r)) + + r = self.module.mod.sum([1, 2]) + assert_(r == 3, repr(r)) + r = self.module.mod.fsum([1, 2]) + assert_(r == 3, repr(r)) diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_assumed_shape.pyc b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_assumed_shape.pyc new file mode 100644 index 0000000..ec61188 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_assumed_shape.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_block_docstring.py b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_block_docstring.py new file mode 100644 index 0000000..8fc072a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_block_docstring.py @@ -0,0 +1,23 @@ +from __future__ import division, absolute_import, print_function + +import sys +import pytest +from . import util + +from numpy.testing import assert_equal + +class TestBlockDocString(util.F2PyTest): + code = """ + SUBROUTINE FOO() + INTEGER BAR(2, 3) + + COMMON /BLOCK/ BAR + RETURN + END + """ + + @pytest.mark.skipif(sys.platform=='win32', + reason='Fails with MinGW64 Gfortran (Issue #9673)') + def test_block_docstring(self): + expected = "'i'-array(2,3)\n" + assert_equal(self.module.block.__doc__, expected) diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_block_docstring.pyc b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_block_docstring.pyc new file mode 100644 index 0000000..14fe9fc Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_block_docstring.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_callback.py b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_callback.py new file mode 100644 index 0000000..824ef7b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_callback.py @@ -0,0 +1,165 @@ +from __future__ import division, absolute_import, print_function + +import math +import textwrap +import sys +import pytest + +import numpy as np +from numpy.testing import assert_, assert_equal +from . import util + + +class TestF77Callback(util.F2PyTest): + code = """ + subroutine t(fun,a) + integer a +cf2py intent(out) a + external fun + call fun(a) + end + + subroutine func(a) +cf2py intent(in,out) a + integer a + a = a + 11 + end + + subroutine func0(a) +cf2py intent(out) a + integer a + a = 11 + end + + subroutine t2(a) +cf2py intent(callback) fun + integer a +cf2py intent(out) a + external fun + call fun(a) + end + + subroutine string_callback(callback, a) + external callback + double precision callback + double precision a + character*1 r +cf2py intent(out) a + r = 'r' + a = callback(r) + end + + subroutine string_callback_array(callback, cu, lencu, a) + external callback + integer callback + integer lencu + character*8 cu(lencu) + integer a +cf2py intent(out) a + + a = callback(cu, lencu) + end + """ + + @pytest.mark.slow + @pytest.mark.parametrize('name', 't,t2'.split(',')) + def test_all(self, name): + self.check_function(name) + + @pytest.mark.slow + def test_docstring(self): + expected = """ + a = t(fun,[fun_extra_args]) + + Wrapper for ``t``. + + Parameters + ---------- + fun : call-back function + + Other Parameters + ---------------- + fun_extra_args : input tuple, optional + Default: () + + Returns + ------- + a : int + + Notes + ----- + Call-back functions:: + + def fun(): return a + Return objects: + a : int + """ + assert_equal(self.module.t.__doc__, textwrap.dedent(expected).lstrip()) + + def check_function(self, name): + t = getattr(self.module, name) + r = t(lambda: 4) + assert_(r == 4, repr(r)) + r = t(lambda a: 5, fun_extra_args=(6,)) + assert_(r == 5, repr(r)) + r = t(lambda a: a, fun_extra_args=(6,)) + assert_(r == 6, repr(r)) + r = t(lambda a: 5 + a, fun_extra_args=(7,)) + assert_(r == 12, repr(r)) + r = t(lambda a: math.degrees(a), fun_extra_args=(math.pi,)) + assert_(r == 180, repr(r)) + r = t(math.degrees, fun_extra_args=(math.pi,)) + assert_(r == 180, repr(r)) + + r = t(self.module.func, fun_extra_args=(6,)) + assert_(r == 17, repr(r)) + r = t(self.module.func0) + assert_(r == 11, repr(r)) + r = t(self.module.func0._cpointer) + assert_(r == 11, repr(r)) + + class A(object): + + def __call__(self): + return 7 + + def mth(self): + return 9 + a = A() + r = t(a) + assert_(r == 7, repr(r)) + r = t(a.mth) + assert_(r == 9, repr(r)) + + @pytest.mark.skipif(sys.platform=='win32', + reason='Fails with MinGW64 Gfortran (Issue #9673)') + def test_string_callback(self): + + def callback(code): + if code == 'r': + return 0 + else: + return 1 + + f = getattr(self.module, 'string_callback') + r = f(callback) + assert_(r == 0, repr(r)) + + @pytest.mark.skipif(sys.platform=='win32', + reason='Fails with MinGW64 Gfortran (Issue #9673)') + def test_string_callback_array(self): + # See gh-10027 + cu = np.zeros((1, 8), 'S1') + + def callback(cu, lencu): + if cu.shape != (lencu, 8): + return 1 + if cu.dtype != 'S1': + return 2 + if not np.all(cu == b''): + return 3 + return 0 + + f = getattr(self.module, 'string_callback_array') + res = f(callback, cu, len(cu)) + assert_(res == 0, repr(res)) diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_callback.pyc b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_callback.pyc new file mode 100644 index 0000000..7a6f95b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_callback.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_common.py b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_common.py new file mode 100644 index 0000000..dcb01b0 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_common.py @@ -0,0 +1,27 @@ +from __future__ import division, absolute_import, print_function + +import os +import sys +import pytest + +import numpy as np +from . import util + +from numpy.testing import assert_array_equal + +def _path(*a): + return os.path.join(*((os.path.dirname(__file__),) + a)) + +class TestCommonBlock(util.F2PyTest): + sources = [_path('src', 'common', 'block.f')] + + @pytest.mark.skipif(sys.platform=='win32', + reason='Fails with MinGW64 Gfortran (Issue #9673)') + def test_common_block(self): + self.module.initcb() + assert_array_equal(self.module.block.long_bn, + np.array(1.0, dtype=np.float64)) + assert_array_equal(self.module.block.string_bn, + np.array('2', dtype='|S1')) + assert_array_equal(self.module.block.ok, + np.array(3, dtype=np.int32)) diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_common.pyc b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_common.pyc new file mode 100644 index 0000000..aa1c457 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_common.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_compile_function.py b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_compile_function.py new file mode 100644 index 0000000..36abf05 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_compile_function.py @@ -0,0 +1,125 @@ +"""See https://github.com/numpy/numpy/pull/11937. + +""" +from __future__ import division, absolute_import, print_function + +import sys +import os +import uuid +from importlib import import_module +import pytest + +import numpy.f2py + +from numpy.testing import assert_equal +from . import util + + +def setup_module(): + if sys.platform == 'win32' and sys.version_info[0] < 3: + pytest.skip('Fails with MinGW64 Gfortran (Issue #9673)') + if not util.has_c_compiler(): + pytest.skip("Needs C compiler") + if not util.has_f77_compiler(): + pytest.skip('Needs FORTRAN 77 compiler') + + +# extra_args can be a list (since gh-11937) or string. +# also test absence of extra_args +@pytest.mark.parametrize( + "extra_args", [['--noopt', '--debug'], '--noopt --debug', ''] + ) +def test_f2py_init_compile(extra_args): + # flush through the f2py __init__ compile() function code path as a + # crude test for input handling following migration from + # exec_command() to subprocess.check_output() in gh-11937 + + # the Fortran 77 syntax requires 6 spaces before any commands, but + # more space may be added/ + fsource = """ + integer function foo() + foo = 10 + 5 + return + end + """ + # use various helper functions in util.py to enable robust build / + # compile and reimport cycle in test suite + moddir = util.get_module_dir() + modname = util.get_temp_module_name() + + cwd = os.getcwd() + target = os.path.join(moddir, str(uuid.uuid4()) + '.f') + # try running compile() with and without a source_fn provided so + # that the code path where a temporary file for writing Fortran + # source is created is also explored + for source_fn in [target, None]: + # mimic the path changing behavior used by build_module() in + # util.py, but don't actually use build_module() because it has + # its own invocation of subprocess that circumvents the + # f2py.compile code block under test + try: + os.chdir(moddir) + ret_val = numpy.f2py.compile( + fsource, + modulename=modname, + extra_args=extra_args, + source_fn=source_fn + ) + finally: + os.chdir(cwd) + + # check for compile success return value + assert_equal(ret_val, 0) + + # we are not currently able to import the Python-Fortran + # interface module on Windows / Appveyor, even though we do get + # successful compilation on that platform with Python 3.x + if sys.platform != 'win32': + # check for sensible result of Fortran function; that means + # we can import the module name in Python and retrieve the + # result of the sum operation + return_check = import_module(modname) + calc_result = return_check.foo() + assert_equal(calc_result, 15) + + +def test_f2py_init_compile_failure(): + # verify an appropriate integer status value returned by + # f2py.compile() when invalid Fortran is provided + ret_val = numpy.f2py.compile(b"invalid") + assert_equal(ret_val, 1) + + +def test_f2py_init_compile_bad_cmd(): + # verify that usage of invalid command in f2py.compile() returns + # status value of 127 for historic consistency with exec_command() + # error handling + + # patch the sys Python exe path temporarily to induce an OSError + # downstream NOTE: how bad of an idea is this patching? + try: + temp = sys.executable + sys.executable = 'does not exist' + + # the OSError should take precedence over invalid Fortran + ret_val = numpy.f2py.compile(b"invalid") + assert_equal(ret_val, 127) + finally: + sys.executable = temp + + +@pytest.mark.parametrize('fsource', + ['program test_f2py\nend program test_f2py', + b'program test_f2py\nend program test_f2py',]) +def test_compile_from_strings(tmpdir, fsource): + # Make sure we can compile str and bytes gh-12796 + cwd = os.getcwd() + try: + os.chdir(str(tmpdir)) + ret_val = numpy.f2py.compile( + fsource, + modulename='test_compile_from_strings', + extension='.f90') + assert_equal(ret_val, 0) + finally: + os.chdir(cwd) diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_compile_function.pyc b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_compile_function.pyc new file mode 100644 index 0000000..78d0f51 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_compile_function.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_kind.py b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_kind.py new file mode 100644 index 0000000..1f7762a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_kind.py @@ -0,0 +1,34 @@ +from __future__ import division, absolute_import, print_function + +import os +import pytest + +from numpy.testing import assert_ +from numpy.f2py.crackfortran import ( + _selected_int_kind_func as selected_int_kind, + _selected_real_kind_func as selected_real_kind + ) +from . import util + + +def _path(*a): + return os.path.join(*((os.path.dirname(__file__),) + a)) + + +class TestKind(util.F2PyTest): + sources = [_path('src', 'kind', 'foo.f90')] + + @pytest.mark.slow + def test_all(self): + selectedrealkind = self.module.selectedrealkind + selectedintkind = self.module.selectedintkind + + for i in range(40): + assert_(selectedintkind(i) in [selected_int_kind(i), -1], + 'selectedintkind(%s): expected %r but got %r' % + (i, selected_int_kind(i), selectedintkind(i))) + + for i in range(20): + assert_(selectedrealkind(i) in [selected_real_kind(i), -1], + 'selectedrealkind(%s): expected %r but got %r' % + (i, selected_real_kind(i), selectedrealkind(i))) diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_kind.pyc b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_kind.pyc new file mode 100644 index 0000000..e8faab9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_kind.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_mixed.py b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_mixed.py new file mode 100644 index 0000000..28268ec --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_mixed.py @@ -0,0 +1,38 @@ +from __future__ import division, absolute_import, print_function + +import os +import textwrap +import pytest + +from numpy.testing import assert_, assert_equal +from . import util + + +def _path(*a): + return os.path.join(*((os.path.dirname(__file__),) + a)) + + +class TestMixed(util.F2PyTest): + sources = [_path('src', 'mixed', 'foo.f'), + _path('src', 'mixed', 'foo_fixed.f90'), + _path('src', 'mixed', 'foo_free.f90')] + + @pytest.mark.slow + def test_all(self): + assert_(self.module.bar11() == 11) + assert_(self.module.foo_fixed.bar12() == 12) + assert_(self.module.foo_free.bar13() == 13) + + @pytest.mark.slow + def test_docstring(self): + expected = """ + a = bar11() + + Wrapper for ``bar11``. + + Returns + ------- + a : int + """ + assert_equal(self.module.bar11.__doc__, + textwrap.dedent(expected).lstrip()) diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_mixed.pyc b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_mixed.pyc new file mode 100644 index 0000000..103ae16 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_mixed.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_parameter.py b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_parameter.py new file mode 100644 index 0000000..6a37868 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_parameter.py @@ -0,0 +1,118 @@ +from __future__ import division, absolute_import, print_function + +import os +import pytest + +import numpy as np +from numpy.testing import assert_raises, assert_equal + +from . import util + + +def _path(*a): + return os.path.join(*((os.path.dirname(__file__),) + a)) + + +class TestParameters(util.F2PyTest): + # Check that intent(in out) translates as intent(inout) + sources = [_path('src', 'parameter', 'constant_real.f90'), + _path('src', 'parameter', 'constant_integer.f90'), + _path('src', 'parameter', 'constant_both.f90'), + _path('src', 'parameter', 'constant_compound.f90'), + _path('src', 'parameter', 'constant_non_compound.f90'), + ] + + @pytest.mark.slow + def test_constant_real_single(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.float32)[::2] + assert_raises(ValueError, self.module.foo_single, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.float32) + self.module.foo_single(x) + assert_equal(x, [0 + 1 + 2*3, 1, 2]) + + @pytest.mark.slow + def test_constant_real_double(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.float64)[::2] + assert_raises(ValueError, self.module.foo_double, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.float64) + self.module.foo_double(x) + assert_equal(x, [0 + 1 + 2*3, 1, 2]) + + @pytest.mark.slow + def test_constant_compound_int(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.int32)[::2] + assert_raises(ValueError, self.module.foo_compound_int, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.int32) + self.module.foo_compound_int(x) + assert_equal(x, [0 + 1 + 2*6, 1, 2]) + + @pytest.mark.slow + def test_constant_non_compound_int(self): + # check values + x = np.arange(4, dtype=np.int32) + self.module.foo_non_compound_int(x) + assert_equal(x, [0 + 1 + 2 + 3*4, 1, 2, 3]) + + @pytest.mark.slow + def test_constant_integer_int(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.int32)[::2] + assert_raises(ValueError, self.module.foo_int, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.int32) + self.module.foo_int(x) + assert_equal(x, [0 + 1 + 2*3, 1, 2]) + + @pytest.mark.slow + def test_constant_integer_long(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.int64)[::2] + assert_raises(ValueError, self.module.foo_long, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.int64) + self.module.foo_long(x) + assert_equal(x, [0 + 1 + 2*3, 1, 2]) + + @pytest.mark.slow + def test_constant_both(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.float64)[::2] + assert_raises(ValueError, self.module.foo, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.float64) + self.module.foo(x) + assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3]) + + @pytest.mark.slow + def test_constant_no(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.float64)[::2] + assert_raises(ValueError, self.module.foo_no, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.float64) + self.module.foo_no(x) + assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3]) + + @pytest.mark.slow + def test_constant_sum(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.float64)[::2] + assert_raises(ValueError, self.module.foo_sum, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.float64) + self.module.foo_sum(x) + assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3]) diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_parameter.pyc b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_parameter.pyc new file mode 100644 index 0000000..38b9f7f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_parameter.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_quoted_character.py b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_quoted_character.py new file mode 100644 index 0000000..c9a1c36 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_quoted_character.py @@ -0,0 +1,35 @@ +"""See https://github.com/numpy/numpy/pull/10676. + +""" +from __future__ import division, absolute_import, print_function + +import sys +from importlib import import_module +import pytest + +from numpy.testing import assert_equal +from . import util + + +class TestQuotedCharacter(util.F2PyTest): + code = """ + SUBROUTINE FOO(OUT1, OUT2, OUT3, OUT4, OUT5, OUT6) + CHARACTER SINGLE, DOUBLE, SEMICOL, EXCLA, OPENPAR, CLOSEPAR + PARAMETER (SINGLE="'", DOUBLE='"', SEMICOL=';', EXCLA="!", + 1 OPENPAR="(", CLOSEPAR=")") + CHARACTER OUT1, OUT2, OUT3, OUT4, OUT5, OUT6 +Cf2py intent(out) OUT1, OUT2, OUT3, OUT4, OUT5, OUT6 + OUT1 = SINGLE + OUT2 = DOUBLE + OUT3 = SEMICOL + OUT4 = EXCLA + OUT5 = OPENPAR + OUT6 = CLOSEPAR + RETURN + END + """ + + @pytest.mark.skipif(sys.platform=='win32', + reason='Fails with MinGW64 Gfortran (Issue #9673)') + def test_quoted_character(self): + assert_equal(self.module.foo(), (b"'", b'"', b';', b'!', b'(', b')')) diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_quoted_character.pyc b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_quoted_character.pyc new file mode 100644 index 0000000..2db2f5a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_quoted_character.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_regression.py b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_regression.py new file mode 100644 index 0000000..3adae63 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_regression.py @@ -0,0 +1,29 @@ +from __future__ import division, absolute_import, print_function + +import os +import pytest + +import numpy as np +from numpy.testing import assert_raises, assert_equal + +from . import util + + +def _path(*a): + return os.path.join(*((os.path.dirname(__file__),) + a)) + + +class TestIntentInOut(util.F2PyTest): + # Check that intent(in out) translates as intent(inout) + sources = [_path('src', 'regression', 'inout.f90')] + + @pytest.mark.slow + def test_inout(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.float32)[::2] + assert_raises(ValueError, self.module.foo, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.float32) + self.module.foo(x) + assert_equal(x, [3, 1, 2]) diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_regression.pyc b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_regression.pyc new file mode 100644 index 0000000..85f648c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_regression.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_return_character.py b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_return_character.py new file mode 100644 index 0000000..fc3a58d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_return_character.py @@ -0,0 +1,146 @@ +from __future__ import division, absolute_import, print_function + +import pytest + +from numpy import array +from numpy.testing import assert_ +from . import util + + +class TestReturnCharacter(util.F2PyTest): + + def check_function(self, t): + tname = t.__doc__.split()[0] + if tname in ['t0', 't1', 's0', 's1']: + assert_(t(23) == b'2') + r = t('ab') + assert_(r == b'a', repr(r)) + r = t(array('ab')) + assert_(r == b'a', repr(r)) + r = t(array(77, 'u1')) + assert_(r == b'M', repr(r)) + #assert_(_raises(ValueError, t, array([77,87]))) + #assert_(_raises(ValueError, t, array(77))) + elif tname in ['ts', 'ss']: + assert_(t(23) == b'23 ', repr(t(23))) + assert_(t('123456789abcdef') == b'123456789a') + elif tname in ['t5', 's5']: + assert_(t(23) == b'23 ', repr(t(23))) + assert_(t('ab') == b'ab ', repr(t('ab'))) + assert_(t('123456789abcdef') == b'12345') + else: + raise NotImplementedError + + +class TestF77ReturnCharacter(TestReturnCharacter): + code = """ + function t0(value) + character value + character t0 + t0 = value + end + function t1(value) + character*1 value + character*1 t1 + t1 = value + end + function t5(value) + character*5 value + character*5 t5 + t5 = value + end + function ts(value) + character*(*) value + character*(*) ts + ts = value + end + + subroutine s0(t0,value) + character value + character t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s1(t1,value) + character*1 value + character*1 t1 +cf2py intent(out) t1 + t1 = value + end + subroutine s5(t5,value) + character*5 value + character*5 t5 +cf2py intent(out) t5 + t5 = value + end + subroutine ss(ts,value) + character*(*) value + character*10 ts +cf2py intent(out) ts + ts = value + end + """ + + @pytest.mark.slow + @pytest.mark.parametrize('name', 't0,t1,t5,s0,s1,s5,ss'.split(',')) + def test_all(self, name): + self.check_function(getattr(self.module, name)) + + +class TestF90ReturnCharacter(TestReturnCharacter): + suffix = ".f90" + code = """ +module f90_return_char + contains + function t0(value) + character :: value + character :: t0 + t0 = value + end function t0 + function t1(value) + character(len=1) :: value + character(len=1) :: t1 + t1 = value + end function t1 + function t5(value) + character(len=5) :: value + character(len=5) :: t5 + t5 = value + end function t5 + function ts(value) + character(len=*) :: value + character(len=10) :: ts + ts = value + end function ts + + subroutine s0(t0,value) + character :: value + character :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s1(t1,value) + character(len=1) :: value + character(len=1) :: t1 +!f2py intent(out) t1 + t1 = value + end subroutine s1 + subroutine s5(t5,value) + character(len=5) :: value + character(len=5) :: t5 +!f2py intent(out) t5 + t5 = value + end subroutine s5 + subroutine ss(ts,value) + character(len=*) :: value + character(len=10) :: ts +!f2py intent(out) ts + ts = value + end subroutine ss +end module f90_return_char + """ + + @pytest.mark.slow + @pytest.mark.parametrize('name', 't0,t1,t5,ts,s0,s1,s5,ss'.split(',')) + def test_all(self, name): + self.check_function(getattr(self.module.f90_return_char, name)) diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_return_character.pyc b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_return_character.pyc new file mode 100644 index 0000000..d56928b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_return_character.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_return_complex.py b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_return_complex.py new file mode 100644 index 0000000..43c884d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_return_complex.py @@ -0,0 +1,169 @@ +from __future__ import division, absolute_import, print_function + +import pytest + +from numpy import array +from numpy.compat import long +from numpy.testing import assert_, assert_raises +from . import util + + +class TestReturnComplex(util.F2PyTest): + + def check_function(self, t): + tname = t.__doc__.split()[0] + if tname in ['t0', 't8', 's0', 's8']: + err = 1e-5 + else: + err = 0.0 + assert_(abs(t(234j) - 234.0j) <= err) + assert_(abs(t(234.6) - 234.6) <= err) + assert_(abs(t(long(234)) - 234.0) <= err) + assert_(abs(t(234.6 + 3j) - (234.6 + 3j)) <= err) + #assert_( abs(t('234')-234.)<=err) + #assert_( abs(t('234.6')-234.6)<=err) + assert_(abs(t(-234) + 234.) <= err) + assert_(abs(t([234]) - 234.) <= err) + assert_(abs(t((234,)) - 234.) <= err) + assert_(abs(t(array(234)) - 234.) <= err) + assert_(abs(t(array(23 + 4j, 'F')) - (23 + 4j)) <= err) + assert_(abs(t(array([234])) - 234.) <= err) + assert_(abs(t(array([[234]])) - 234.) <= err) + assert_(abs(t(array([234], 'b')) + 22.) <= err) + assert_(abs(t(array([234], 'h')) - 234.) <= err) + assert_(abs(t(array([234], 'i')) - 234.) <= err) + assert_(abs(t(array([234], 'l')) - 234.) <= err) + assert_(abs(t(array([234], 'q')) - 234.) <= err) + assert_(abs(t(array([234], 'f')) - 234.) <= err) + assert_(abs(t(array([234], 'd')) - 234.) <= err) + assert_(abs(t(array([234 + 3j], 'F')) - (234 + 3j)) <= err) + assert_(abs(t(array([234], 'D')) - 234.) <= err) + + #assert_raises(TypeError, t, array([234], 'a1')) + assert_raises(TypeError, t, 'abc') + + assert_raises(IndexError, t, []) + assert_raises(IndexError, t, ()) + + assert_raises(TypeError, t, t) + assert_raises(TypeError, t, {}) + + try: + r = t(10 ** 400) + assert_(repr(r) in ['(inf+0j)', '(Infinity+0j)'], repr(r)) + except OverflowError: + pass + + +class TestF77ReturnComplex(TestReturnComplex): + code = """ + function t0(value) + complex value + complex t0 + t0 = value + end + function t8(value) + complex*8 value + complex*8 t8 + t8 = value + end + function t16(value) + complex*16 value + complex*16 t16 + t16 = value + end + function td(value) + double complex value + double complex td + td = value + end + + subroutine s0(t0,value) + complex value + complex t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s8(t8,value) + complex*8 value + complex*8 t8 +cf2py intent(out) t8 + t8 = value + end + subroutine s16(t16,value) + complex*16 value + complex*16 t16 +cf2py intent(out) t16 + t16 = value + end + subroutine sd(td,value) + double complex value + double complex td +cf2py intent(out) td + td = value + end + """ + + @pytest.mark.slow + @pytest.mark.parametrize('name', 't0,t8,t16,td,s0,s8,s16,sd'.split(',')) + def test_all(self, name): + self.check_function(getattr(self.module, name)) + + +class TestF90ReturnComplex(TestReturnComplex): + suffix = ".f90" + code = """ +module f90_return_complex + contains + function t0(value) + complex :: value + complex :: t0 + t0 = value + end function t0 + function t8(value) + complex(kind=4) :: value + complex(kind=4) :: t8 + t8 = value + end function t8 + function t16(value) + complex(kind=8) :: value + complex(kind=8) :: t16 + t16 = value + end function t16 + function td(value) + double complex :: value + double complex :: td + td = value + end function td + + subroutine s0(t0,value) + complex :: value + complex :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s8(t8,value) + complex(kind=4) :: value + complex(kind=4) :: t8 +!f2py intent(out) t8 + t8 = value + end subroutine s8 + subroutine s16(t16,value) + complex(kind=8) :: value + complex(kind=8) :: t16 +!f2py intent(out) t16 + t16 = value + end subroutine s16 + subroutine sd(td,value) + double complex :: value + double complex :: td +!f2py intent(out) td + td = value + end subroutine sd +end module f90_return_complex + """ + + @pytest.mark.slow + @pytest.mark.parametrize('name', 't0,t8,t16,td,s0,s8,s16,sd'.split(',')) + def test_all(self, name): + self.check_function(getattr(self.module.f90_return_complex, name)) diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_return_complex.pyc b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_return_complex.pyc new file mode 100644 index 0000000..9861df9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_return_complex.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_return_integer.py b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_return_integer.py new file mode 100644 index 0000000..22f4acf --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_return_integer.py @@ -0,0 +1,181 @@ +from __future__ import division, absolute_import, print_function + +import pytest + +from numpy import array +from numpy.compat import long +from numpy.testing import assert_, assert_raises +from . import util + + +class TestReturnInteger(util.F2PyTest): + + def check_function(self, t): + assert_(t(123) == 123, repr(t(123))) + assert_(t(123.6) == 123) + assert_(t(long(123)) == 123) + assert_(t('123') == 123) + assert_(t(-123) == -123) + assert_(t([123]) == 123) + assert_(t((123,)) == 123) + assert_(t(array(123)) == 123) + assert_(t(array([123])) == 123) + assert_(t(array([[123]])) == 123) + assert_(t(array([123], 'b')) == 123) + assert_(t(array([123], 'h')) == 123) + assert_(t(array([123], 'i')) == 123) + assert_(t(array([123], 'l')) == 123) + assert_(t(array([123], 'B')) == 123) + assert_(t(array([123], 'f')) == 123) + assert_(t(array([123], 'd')) == 123) + + #assert_raises(ValueError, t, array([123],'S3')) + assert_raises(ValueError, t, 'abc') + + assert_raises(IndexError, t, []) + assert_raises(IndexError, t, ()) + + assert_raises(Exception, t, t) + assert_raises(Exception, t, {}) + + if t.__doc__.split()[0] in ['t8', 's8']: + assert_raises(OverflowError, t, 100000000000000000000000) + assert_raises(OverflowError, t, 10000000011111111111111.23) + + +class TestF77ReturnInteger(TestReturnInteger): + code = """ + function t0(value) + integer value + integer t0 + t0 = value + end + function t1(value) + integer*1 value + integer*1 t1 + t1 = value + end + function t2(value) + integer*2 value + integer*2 t2 + t2 = value + end + function t4(value) + integer*4 value + integer*4 t4 + t4 = value + end + function t8(value) + integer*8 value + integer*8 t8 + t8 = value + end + + subroutine s0(t0,value) + integer value + integer t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s1(t1,value) + integer*1 value + integer*1 t1 +cf2py intent(out) t1 + t1 = value + end + subroutine s2(t2,value) + integer*2 value + integer*2 t2 +cf2py intent(out) t2 + t2 = value + end + subroutine s4(t4,value) + integer*4 value + integer*4 t4 +cf2py intent(out) t4 + t4 = value + end + subroutine s8(t8,value) + integer*8 value + integer*8 t8 +cf2py intent(out) t8 + t8 = value + end + """ + + @pytest.mark.slow + @pytest.mark.parametrize('name', + 't0,t1,t2,t4,t8,s0,s1,s2,s4,s8'.split(',')) + def test_all(self, name): + self.check_function(getattr(self.module, name)) + + +class TestF90ReturnInteger(TestReturnInteger): + suffix = ".f90" + code = """ +module f90_return_integer + contains + function t0(value) + integer :: value + integer :: t0 + t0 = value + end function t0 + function t1(value) + integer(kind=1) :: value + integer(kind=1) :: t1 + t1 = value + end function t1 + function t2(value) + integer(kind=2) :: value + integer(kind=2) :: t2 + t2 = value + end function t2 + function t4(value) + integer(kind=4) :: value + integer(kind=4) :: t4 + t4 = value + end function t4 + function t8(value) + integer(kind=8) :: value + integer(kind=8) :: t8 + t8 = value + end function t8 + + subroutine s0(t0,value) + integer :: value + integer :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s1(t1,value) + integer(kind=1) :: value + integer(kind=1) :: t1 +!f2py intent(out) t1 + t1 = value + end subroutine s1 + subroutine s2(t2,value) + integer(kind=2) :: value + integer(kind=2) :: t2 +!f2py intent(out) t2 + t2 = value + end subroutine s2 + subroutine s4(t4,value) + integer(kind=4) :: value + integer(kind=4) :: t4 +!f2py intent(out) t4 + t4 = value + end subroutine s4 + subroutine s8(t8,value) + integer(kind=8) :: value + integer(kind=8) :: t8 +!f2py intent(out) t8 + t8 = value + end subroutine s8 +end module f90_return_integer + """ + + @pytest.mark.slow + @pytest.mark.parametrize('name', + 't0,t1,t2,t4,t8,s0,s1,s2,s4,s8'.split(',')) + def test_all(self, name): + self.check_function(getattr(self.module.f90_return_integer, name)) diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_return_integer.pyc b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_return_integer.pyc new file mode 100644 index 0000000..c255d7d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_return_integer.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_return_logical.py b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_return_logical.py new file mode 100644 index 0000000..96f215a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_return_logical.py @@ -0,0 +1,189 @@ +from __future__ import division, absolute_import, print_function + +import pytest + +from numpy import array +from numpy.compat import long +from numpy.testing import assert_, assert_raises +from . import util + + +class TestReturnLogical(util.F2PyTest): + + def check_function(self, t): + assert_(t(True) == 1, repr(t(True))) + assert_(t(False) == 0, repr(t(False))) + assert_(t(0) == 0) + assert_(t(None) == 0) + assert_(t(0.0) == 0) + assert_(t(0j) == 0) + assert_(t(1j) == 1) + assert_(t(234) == 1) + assert_(t(234.6) == 1) + assert_(t(long(234)) == 1) + assert_(t(234.6 + 3j) == 1) + assert_(t('234') == 1) + assert_(t('aaa') == 1) + assert_(t('') == 0) + assert_(t([]) == 0) + assert_(t(()) == 0) + assert_(t({}) == 0) + assert_(t(t) == 1) + assert_(t(-234) == 1) + assert_(t(10 ** 100) == 1) + assert_(t([234]) == 1) + assert_(t((234,)) == 1) + assert_(t(array(234)) == 1) + assert_(t(array([234])) == 1) + assert_(t(array([[234]])) == 1) + assert_(t(array([234], 'b')) == 1) + assert_(t(array([234], 'h')) == 1) + assert_(t(array([234], 'i')) == 1) + assert_(t(array([234], 'l')) == 1) + assert_(t(array([234], 'f')) == 1) + assert_(t(array([234], 'd')) == 1) + assert_(t(array([234 + 3j], 'F')) == 1) + assert_(t(array([234], 'D')) == 1) + assert_(t(array(0)) == 0) + assert_(t(array([0])) == 0) + assert_(t(array([[0]])) == 0) + assert_(t(array([0j])) == 0) + assert_(t(array([1])) == 1) + assert_raises(ValueError, t, array([0, 0])) + + +class TestF77ReturnLogical(TestReturnLogical): + code = """ + function t0(value) + logical value + logical t0 + t0 = value + end + function t1(value) + logical*1 value + logical*1 t1 + t1 = value + end + function t2(value) + logical*2 value + logical*2 t2 + t2 = value + end + function t4(value) + logical*4 value + logical*4 t4 + t4 = value + end +c function t8(value) +c logical*8 value +c logical*8 t8 +c t8 = value +c end + + subroutine s0(t0,value) + logical value + logical t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s1(t1,value) + logical*1 value + logical*1 t1 +cf2py intent(out) t1 + t1 = value + end + subroutine s2(t2,value) + logical*2 value + logical*2 t2 +cf2py intent(out) t2 + t2 = value + end + subroutine s4(t4,value) + logical*4 value + logical*4 t4 +cf2py intent(out) t4 + t4 = value + end +c subroutine s8(t8,value) +c logical*8 value +c logical*8 t8 +cf2py intent(out) t8 +c t8 = value +c end + """ + + @pytest.mark.slow + @pytest.mark.parametrize('name', 't0,t1,t2,t4,s0,s1,s2,s4'.split(',')) + def test_all(self, name): + self.check_function(getattr(self.module, name)) + + +class TestF90ReturnLogical(TestReturnLogical): + suffix = ".f90" + code = """ +module f90_return_logical + contains + function t0(value) + logical :: value + logical :: t0 + t0 = value + end function t0 + function t1(value) + logical(kind=1) :: value + logical(kind=1) :: t1 + t1 = value + end function t1 + function t2(value) + logical(kind=2) :: value + logical(kind=2) :: t2 + t2 = value + end function t2 + function t4(value) + logical(kind=4) :: value + logical(kind=4) :: t4 + t4 = value + end function t4 + function t8(value) + logical(kind=8) :: value + logical(kind=8) :: t8 + t8 = value + end function t8 + + subroutine s0(t0,value) + logical :: value + logical :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s1(t1,value) + logical(kind=1) :: value + logical(kind=1) :: t1 +!f2py intent(out) t1 + t1 = value + end subroutine s1 + subroutine s2(t2,value) + logical(kind=2) :: value + logical(kind=2) :: t2 +!f2py intent(out) t2 + t2 = value + end subroutine s2 + subroutine s4(t4,value) + logical(kind=4) :: value + logical(kind=4) :: t4 +!f2py intent(out) t4 + t4 = value + end subroutine s4 + subroutine s8(t8,value) + logical(kind=8) :: value + logical(kind=8) :: t8 +!f2py intent(out) t8 + t8 = value + end subroutine s8 +end module f90_return_logical + """ + + @pytest.mark.slow + @pytest.mark.parametrize('name', + 't0,t1,t2,t4,t8,s0,s1,s2,s4,s8'.split(',')) + def test_all(self, name): + self.check_function(getattr(self.module.f90_return_logical, name)) diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_return_logical.pyc b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_return_logical.pyc new file mode 100644 index 0000000..4c9639f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_return_logical.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_return_real.py b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_return_real.py new file mode 100644 index 0000000..315cfe4 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_return_real.py @@ -0,0 +1,210 @@ +from __future__ import division, absolute_import, print_function + +import platform +import pytest + +from numpy import array +from numpy.compat import long +from numpy.testing import assert_, assert_raises +from . import util + + +class TestReturnReal(util.F2PyTest): + + def check_function(self, t): + if t.__doc__.split()[0] in ['t0', 't4', 's0', 's4']: + err = 1e-5 + else: + err = 0.0 + assert_(abs(t(234) - 234.0) <= err) + assert_(abs(t(234.6) - 234.6) <= err) + assert_(abs(t(long(234)) - 234.0) <= err) + assert_(abs(t('234') - 234) <= err) + assert_(abs(t('234.6') - 234.6) <= err) + assert_(abs(t(-234) + 234) <= err) + assert_(abs(t([234]) - 234) <= err) + assert_(abs(t((234,)) - 234.) <= err) + assert_(abs(t(array(234)) - 234.) <= err) + assert_(abs(t(array([234])) - 234.) <= err) + assert_(abs(t(array([[234]])) - 234.) <= err) + assert_(abs(t(array([234], 'b')) + 22) <= err) + assert_(abs(t(array([234], 'h')) - 234.) <= err) + assert_(abs(t(array([234], 'i')) - 234.) <= err) + assert_(abs(t(array([234], 'l')) - 234.) <= err) + assert_(abs(t(array([234], 'B')) - 234.) <= err) + assert_(abs(t(array([234], 'f')) - 234.) <= err) + assert_(abs(t(array([234], 'd')) - 234.) <= err) + if t.__doc__.split()[0] in ['t0', 't4', 's0', 's4']: + assert_(t(1e200) == t(1e300)) # inf + + #assert_raises(ValueError, t, array([234], 'S1')) + assert_raises(ValueError, t, 'abc') + + assert_raises(IndexError, t, []) + assert_raises(IndexError, t, ()) + + assert_raises(Exception, t, t) + assert_raises(Exception, t, {}) + + try: + r = t(10 ** 400) + assert_(repr(r) in ['inf', 'Infinity'], repr(r)) + except OverflowError: + pass + + + +@pytest.mark.skipif( + platform.system() == 'Darwin', + reason="Prone to error when run with numpy/f2py/tests on mac os, " + "but not when run in isolation") +class TestCReturnReal(TestReturnReal): + suffix = ".pyf" + module_name = "c_ext_return_real" + code = """ +python module c_ext_return_real +usercode \'\'\' +float t4(float value) { return value; } +void s4(float *t4, float value) { *t4 = value; } +double t8(double value) { return value; } +void s8(double *t8, double value) { *t8 = value; } +\'\'\' +interface + function t4(value) + real*4 intent(c) :: t4,value + end + function t8(value) + real*8 intent(c) :: t8,value + end + subroutine s4(t4,value) + intent(c) s4 + real*4 intent(out) :: t4 + real*4 intent(c) :: value + end + subroutine s8(t8,value) + intent(c) s8 + real*8 intent(out) :: t8 + real*8 intent(c) :: value + end +end interface +end python module c_ext_return_real + """ + + @pytest.mark.slow + @pytest.mark.parametrize('name', 't4,t8,s4,s8'.split(',')) + def test_all(self, name): + self.check_function(getattr(self.module, name)) + + +class TestF77ReturnReal(TestReturnReal): + code = """ + function t0(value) + real value + real t0 + t0 = value + end + function t4(value) + real*4 value + real*4 t4 + t4 = value + end + function t8(value) + real*8 value + real*8 t8 + t8 = value + end + function td(value) + double precision value + double precision td + td = value + end + + subroutine s0(t0,value) + real value + real t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s4(t4,value) + real*4 value + real*4 t4 +cf2py intent(out) t4 + t4 = value + end + subroutine s8(t8,value) + real*8 value + real*8 t8 +cf2py intent(out) t8 + t8 = value + end + subroutine sd(td,value) + double precision value + double precision td +cf2py intent(out) td + td = value + end + """ + + @pytest.mark.slow + @pytest.mark.parametrize('name', 't0,t4,t8,td,s0,s4,s8,sd'.split(',')) + def test_all(self, name): + self.check_function(getattr(self.module, name)) + + +class TestF90ReturnReal(TestReturnReal): + suffix = ".f90" + code = """ +module f90_return_real + contains + function t0(value) + real :: value + real :: t0 + t0 = value + end function t0 + function t4(value) + real(kind=4) :: value + real(kind=4) :: t4 + t4 = value + end function t4 + function t8(value) + real(kind=8) :: value + real(kind=8) :: t8 + t8 = value + end function t8 + function td(value) + double precision :: value + double precision :: td + td = value + end function td + + subroutine s0(t0,value) + real :: value + real :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s4(t4,value) + real(kind=4) :: value + real(kind=4) :: t4 +!f2py intent(out) t4 + t4 = value + end subroutine s4 + subroutine s8(t8,value) + real(kind=8) :: value + real(kind=8) :: t8 +!f2py intent(out) t8 + t8 = value + end subroutine s8 + subroutine sd(td,value) + double precision :: value + double precision :: td +!f2py intent(out) td + td = value + end subroutine sd +end module f90_return_real + """ + + @pytest.mark.slow + @pytest.mark.parametrize('name', 't0,t4,t8,td,s0,s4,s8,sd'.split(',')) + def test_all(self, name): + self.check_function(getattr(self.module.f90_return_real, name)) diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_return_real.pyc b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_return_real.pyc new file mode 100644 index 0000000..417cbdd Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_return_real.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_semicolon_split.py b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_semicolon_split.py new file mode 100644 index 0000000..bcd18c8 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_semicolon_split.py @@ -0,0 +1,65 @@ +from __future__ import division, absolute_import, print_function + +import platform +import pytest + +from . import util +from numpy.testing import assert_equal + +@pytest.mark.skipif( + platform.system() == 'Darwin', + reason="Prone to error when run with numpy/f2py/tests on mac os, " + "but not when run in isolation") +class TestMultiline(util.F2PyTest): + suffix = ".pyf" + module_name = "multiline" + code = """ +python module {module} + usercode ''' +void foo(int* x) {{ + char dummy = ';'; + *x = 42; +}} +''' + interface + subroutine foo(x) + intent(c) foo + integer intent(out) :: x + end subroutine foo + end interface +end python module {module} + """.format(module=module_name) + + def test_multiline(self): + assert_equal(self.module.foo(), 42) + + +@pytest.mark.skipif( + platform.system() == 'Darwin', + reason="Prone to error when run with numpy/f2py/tests on mac os, " + "but not when run in isolation") +class TestCallstatement(util.F2PyTest): + suffix = ".pyf" + module_name = "callstatement" + code = """ +python module {module} + usercode ''' +void foo(int* x) {{ +}} +''' + interface + subroutine foo(x) + intent(c) foo + integer intent(out) :: x + callprotoargument int* + callstatement {{ & + ; & + x = 42; & + }} + end subroutine foo + end interface +end python module {module} + """.format(module=module_name) + + def test_callstatement(self): + assert_equal(self.module.foo(), 42) diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_semicolon_split.pyc b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_semicolon_split.pyc new file mode 100644 index 0000000..9a5653f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_semicolon_split.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_size.py b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_size.py new file mode 100644 index 0000000..e2af618 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_size.py @@ -0,0 +1,51 @@ +from __future__ import division, absolute_import, print_function + +import os +import pytest + +from numpy.testing import assert_equal +from . import util + + +def _path(*a): + return os.path.join(*((os.path.dirname(__file__),) + a)) + + +class TestSizeSumExample(util.F2PyTest): + sources = [_path('src', 'size', 'foo.f90')] + + @pytest.mark.slow + def test_all(self): + r = self.module.foo([[]]) + assert_equal(r, [0], repr(r)) + + r = self.module.foo([[1, 2]]) + assert_equal(r, [3], repr(r)) + + r = self.module.foo([[1, 2], [3, 4]]) + assert_equal(r, [3, 7], repr(r)) + + r = self.module.foo([[1, 2], [3, 4], [5, 6]]) + assert_equal(r, [3, 7, 11], repr(r)) + + @pytest.mark.slow + def test_transpose(self): + r = self.module.trans([[]]) + assert_equal(r.T, [[]], repr(r)) + + r = self.module.trans([[1, 2]]) + assert_equal(r, [[1], [2]], repr(r)) + + r = self.module.trans([[1, 2, 3], [4, 5, 6]]) + assert_equal(r, [[1, 4], [2, 5], [3, 6]], repr(r)) + + @pytest.mark.slow + def test_flatten(self): + r = self.module.flatten([[]]) + assert_equal(r, [], repr(r)) + + r = self.module.flatten([[1, 2]]) + assert_equal(r, [1, 2], repr(r)) + + r = self.module.flatten([[1, 2, 3], [4, 5, 6]]) + assert_equal(r, [1, 2, 3, 4, 5, 6], repr(r)) diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_size.pyc b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_size.pyc new file mode 100644 index 0000000..5adcbc8 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_size.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_string.py b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_string.py new file mode 100644 index 0000000..0493c99 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_string.py @@ -0,0 +1,24 @@ +from __future__ import division, absolute_import, print_function + +import os +import pytest + +from numpy.testing import assert_array_equal +import numpy as np +from . import util + + +def _path(*a): + return os.path.join(*((os.path.dirname(__file__),) + a)) + +class TestString(util.F2PyTest): + sources = [_path('src', 'string', 'char.f90')] + + @pytest.mark.slow + def test_char(self): + strings = np.array(['ab', 'cd', 'ef'], dtype='c').T + inp, out = self.module.char_test.change_strings(strings, strings.shape[1]) + assert_array_equal(inp, strings) + expected = strings.copy() + expected[1, :] = 'AAA' + assert_array_equal(out, expected) diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_string.pyc b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_string.pyc new file mode 100644 index 0000000..801fb34 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/test_string.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/util.py b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/util.py new file mode 100644 index 0000000..5fa5dad --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/util.py @@ -0,0 +1,360 @@ +""" +Utility functions for + +- building and importing modules on test time, using a temporary location +- detecting if compilers are present + +""" +from __future__ import division, absolute_import, print_function + +import os +import sys +import subprocess +import tempfile +import shutil +import atexit +import textwrap +import re +import pytest + +from numpy.compat import asbytes, asstr +from numpy.testing import temppath +from importlib import import_module + +try: + from hashlib import md5 +except ImportError: + from md5 import new as md5 # noqa: F401 + +# +# Maintaining a temporary module directory +# + +_module_dir = None + + +def _cleanup(): + global _module_dir + if _module_dir is not None: + try: + sys.path.remove(_module_dir) + except ValueError: + pass + try: + shutil.rmtree(_module_dir) + except (IOError, OSError): + pass + _module_dir = None + + +def get_module_dir(): + global _module_dir + if _module_dir is None: + _module_dir = tempfile.mkdtemp() + atexit.register(_cleanup) + if _module_dir not in sys.path: + sys.path.insert(0, _module_dir) + return _module_dir + + +def get_temp_module_name(): + # Assume single-threaded, and the module dir usable only by this thread + d = get_module_dir() + for j in range(5403, 9999999): + name = "_test_ext_module_%d" % j + fn = os.path.join(d, name) + if name not in sys.modules and not os.path.isfile(fn + '.py'): + return name + raise RuntimeError("Failed to create a temporary module name") + + +def _memoize(func): + memo = {} + + def wrapper(*a, **kw): + key = repr((a, kw)) + if key not in memo: + try: + memo[key] = func(*a, **kw) + except Exception as e: + memo[key] = e + raise + ret = memo[key] + if isinstance(ret, Exception): + raise ret + return ret + wrapper.__name__ = func.__name__ + return wrapper + +# +# Building modules +# + + +@_memoize +def build_module(source_files, options=[], skip=[], only=[], module_name=None): + """ + Compile and import a f2py module, built from the given files. + + """ + + code = ("import sys; sys.path = %s; import numpy.f2py as f2py2e; " + "f2py2e.main()" % repr(sys.path)) + + d = get_module_dir() + + # Copy files + dst_sources = [] + for fn in source_files: + if not os.path.isfile(fn): + raise RuntimeError("%s is not a file" % fn) + dst = os.path.join(d, os.path.basename(fn)) + shutil.copyfile(fn, dst) + dst_sources.append(dst) + + fn = os.path.join(os.path.dirname(fn), '.f2py_f2cmap') + if os.path.isfile(fn): + dst = os.path.join(d, os.path.basename(fn)) + if not os.path.isfile(dst): + shutil.copyfile(fn, dst) + + # Prepare options + if module_name is None: + module_name = get_temp_module_name() + f2py_opts = ['-c', '-m', module_name] + options + dst_sources + if skip: + f2py_opts += ['skip:'] + skip + if only: + f2py_opts += ['only:'] + only + + # Build + cwd = os.getcwd() + try: + os.chdir(d) + cmd = [sys.executable, '-c', code] + f2py_opts + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + out, err = p.communicate() + if p.returncode != 0: + raise RuntimeError("Running f2py failed: %s\n%s" + % (cmd[4:], asstr(out))) + finally: + os.chdir(cwd) + + # Partial cleanup + for fn in dst_sources: + os.unlink(fn) + + # Import + return import_module(module_name) + + +@_memoize +def build_code(source_code, options=[], skip=[], only=[], suffix=None, + module_name=None): + """ + Compile and import Fortran code using f2py. + + """ + if suffix is None: + suffix = '.f' + with temppath(suffix=suffix) as path: + with open(path, 'w') as f: + f.write(source_code) + return build_module([path], options=options, skip=skip, only=only, + module_name=module_name) + +# +# Check if compilers are available at all... +# + +_compiler_status = None + + +def _get_compiler_status(): + global _compiler_status + if _compiler_status is not None: + return _compiler_status + + _compiler_status = (False, False, False) + + # XXX: this is really ugly. But I don't know how to invoke Distutils + # in a safer way... + code = """ +import os +import sys +sys.path = %(syspath)s + +def configuration(parent_name='',top_path=None): + global config + from numpy.distutils.misc_util import Configuration + config = Configuration('', parent_name, top_path) + return config + +from numpy.distutils.core import setup +setup(configuration=configuration) + +config_cmd = config.get_config_cmd() +have_c = config_cmd.try_compile('void foo() {}') +print('COMPILERS:%%d,%%d,%%d' %% (have_c, + config.have_f77c(), + config.have_f90c())) +sys.exit(99) +""" + code = code % dict(syspath=repr(sys.path)) + + with temppath(suffix='.py') as script: + with open(script, 'w') as f: + f.write(code) + + cmd = [sys.executable, script, 'config'] + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + out, err = p.communicate() + + m = re.search(br'COMPILERS:(\d+),(\d+),(\d+)', out) + if m: + _compiler_status = (bool(int(m.group(1))), bool(int(m.group(2))), + bool(int(m.group(3)))) + # Finished + return _compiler_status + + +def has_c_compiler(): + return _get_compiler_status()[0] + + +def has_f77_compiler(): + return _get_compiler_status()[1] + + +def has_f90_compiler(): + return _get_compiler_status()[2] + +# +# Building with distutils +# + + +@_memoize +def build_module_distutils(source_files, config_code, module_name, **kw): + """ + Build a module via distutils and import it. + + """ + from numpy.distutils.misc_util import Configuration + from numpy.distutils.core import setup + + d = get_module_dir() + + # Copy files + dst_sources = [] + for fn in source_files: + if not os.path.isfile(fn): + raise RuntimeError("%s is not a file" % fn) + dst = os.path.join(d, os.path.basename(fn)) + shutil.copyfile(fn, dst) + dst_sources.append(dst) + + # Build script + config_code = textwrap.dedent(config_code).replace("\n", "\n ") + + code = """\ +import os +import sys +sys.path = %(syspath)s + +def configuration(parent_name='',top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('', parent_name, top_path) + %(config_code)s + return config + +if __name__ == "__main__": + from numpy.distutils.core import setup + setup(configuration=configuration) +""" % dict(config_code=config_code, syspath=repr(sys.path)) + + script = os.path.join(d, get_temp_module_name() + '.py') + dst_sources.append(script) + f = open(script, 'wb') + f.write(asbytes(code)) + f.close() + + # Build + cwd = os.getcwd() + try: + os.chdir(d) + cmd = [sys.executable, script, 'build_ext', '-i'] + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + out, err = p.communicate() + if p.returncode != 0: + raise RuntimeError("Running distutils build failed: %s\n%s" + % (cmd[4:], asstr(out))) + finally: + os.chdir(cwd) + + # Partial cleanup + for fn in dst_sources: + os.unlink(fn) + + # Import + __import__(module_name) + return sys.modules[module_name] + +# +# Unittest convenience +# + + +class F2PyTest(object): + code = None + sources = None + options = [] + skip = [] + only = [] + suffix = '.f' + module = None + module_name = None + + def setup(self): + if sys.platform == 'win32': + pytest.skip('Fails with MinGW64 Gfortran (Issue #9673)') + + if self.module is not None: + return + + # Check compiler availability first + if not has_c_compiler(): + pytest.skip("No C compiler available") + + codes = [] + if self.sources: + codes.extend(self.sources) + if self.code is not None: + codes.append(self.suffix) + + needs_f77 = False + needs_f90 = False + for fn in codes: + if fn.endswith('.f'): + needs_f77 = True + elif fn.endswith('.f90'): + needs_f90 = True + if needs_f77 and not has_f77_compiler(): + pytest.skip("No Fortran 77 compiler available") + if needs_f90 and not has_f90_compiler(): + pytest.skip("No Fortran 90 compiler available") + + # Build the module + if self.code is not None: + self.module = build_code(self.code, options=self.options, + skip=self.skip, only=self.only, + suffix=self.suffix, + module_name=self.module_name) + + if self.sources is not None: + self.module = build_module(self.sources, options=self.options, + skip=self.skip, only=self.only, + module_name=self.module_name) diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/util.pyc b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/util.pyc new file mode 100644 index 0000000..69e1a92 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/f2py/tests/util.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/use_rules.py b/project/venv/lib/python2.7/site-packages/numpy/f2py/use_rules.py new file mode 100644 index 0000000..6f44f16 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/f2py/use_rules.py @@ -0,0 +1,115 @@ +#!/usr/bin/env python +""" + +Build 'use others module data' mechanism for f2py2e. + +Unfinished. + +Copyright 2000 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +$Date: 2000/09/10 12:35:43 $ +Pearu Peterson + +""" +from __future__ import division, absolute_import, print_function + +__version__ = "$Revision: 1.3 $"[10:-1] + +f2py_version = 'See `f2py -v`' + + +from .auxfuncs import ( + applyrules, dictappend, gentitle, hasnote, outmess +) + + +usemodule_rules = { + 'body': """ +#begintitle# +static char doc_#apiname#[] = \"\\\nVariable wrapper signature:\\n\\ +\t #name# = get_#name#()\\n\\ +Arguments:\\n\\ +#docstr#\"; +extern F_MODFUNC(#usemodulename#,#USEMODULENAME#,#realname#,#REALNAME#); +static PyObject *#apiname#(PyObject *capi_self, PyObject *capi_args) { +/*#decl#*/ +\tif (!PyArg_ParseTuple(capi_args, \"\")) goto capi_fail; +printf(\"c: %d\\n\",F_MODFUNC(#usemodulename#,#USEMODULENAME#,#realname#,#REALNAME#)); +\treturn Py_BuildValue(\"\"); +capi_fail: +\treturn NULL; +} +""", + 'method': '\t{\"get_#name#\",#apiname#,METH_VARARGS|METH_KEYWORDS,doc_#apiname#},', + 'need': ['F_MODFUNC'] +} + +################ + + +def buildusevars(m, r): + ret = {} + outmess( + '\t\tBuilding use variable hooks for module "%s" (feature only for F90/F95)...\n' % (m['name'])) + varsmap = {} + revmap = {} + if 'map' in r: + for k in r['map'].keys(): + if r['map'][k] in revmap: + outmess('\t\t\tVariable "%s<=%s" is already mapped by "%s". Skipping.\n' % ( + r['map'][k], k, revmap[r['map'][k]])) + else: + revmap[r['map'][k]] = k + if 'only' in r and r['only']: + for v in r['map'].keys(): + if r['map'][v] in m['vars']: + + if revmap[r['map'][v]] == v: + varsmap[v] = r['map'][v] + else: + outmess('\t\t\tIgnoring map "%s=>%s". See above.\n' % + (v, r['map'][v])) + else: + outmess( + '\t\t\tNo definition for variable "%s=>%s". Skipping.\n' % (v, r['map'][v])) + else: + for v in m['vars'].keys(): + if v in revmap: + varsmap[v] = revmap[v] + else: + varsmap[v] = v + for v in varsmap.keys(): + ret = dictappend(ret, buildusevar(v, varsmap[v], m['vars'], m['name'])) + return ret + + +def buildusevar(name, realname, vars, usemodulename): + outmess('\t\t\tConstructing wrapper function for variable "%s=>%s"...\n' % ( + name, realname)) + ret = {} + vrd = {'name': name, + 'realname': realname, + 'REALNAME': realname.upper(), + 'usemodulename': usemodulename, + 'USEMODULENAME': usemodulename.upper(), + 'texname': name.replace('_', '\\_'), + 'begintitle': gentitle('%s=>%s' % (name, realname)), + 'endtitle': gentitle('end of %s=>%s' % (name, realname)), + 'apiname': '#modulename#_use_%s_from_%s' % (realname, usemodulename) + } + nummap = {0: 'Ro', 1: 'Ri', 2: 'Rii', 3: 'Riii', 4: 'Riv', + 5: 'Rv', 6: 'Rvi', 7: 'Rvii', 8: 'Rviii', 9: 'Rix'} + vrd['texnamename'] = name + for i in nummap.keys(): + vrd['texnamename'] = vrd['texnamename'].replace(repr(i), nummap[i]) + if hasnote(vars[realname]): + vrd['note'] = vars[realname]['note'] + rd = dictappend({}, vrd) + + print(name, realname, vars[realname]) + ret = applyrules(usemodule_rules, rd) + return ret diff --git a/project/venv/lib/python2.7/site-packages/numpy/f2py/use_rules.pyc b/project/venv/lib/python2.7/site-packages/numpy/f2py/use_rules.pyc new file mode 100644 index 0000000..f9567f0 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/f2py/use_rules.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/fft/__init__.py b/project/venv/lib/python2.7/site-packages/numpy/fft/__init__.py new file mode 100644 index 0000000..44243b4 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/fft/__init__.py @@ -0,0 +1,11 @@ +from __future__ import division, absolute_import, print_function + +# To get sub-modules +from .info import __doc__ + +from .fftpack import * +from .helper import * + +from numpy._pytesttester import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/project/venv/lib/python2.7/site-packages/numpy/fft/__init__.pyc b/project/venv/lib/python2.7/site-packages/numpy/fft/__init__.pyc new file mode 100644 index 0000000..277804a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/fft/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/fft/fftpack.py b/project/venv/lib/python2.7/site-packages/numpy/fft/fftpack.py new file mode 100644 index 0000000..de67593 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/fft/fftpack.py @@ -0,0 +1,1302 @@ +""" +Discrete Fourier Transforms + +Routines in this module: + +fft(a, n=None, axis=-1) +ifft(a, n=None, axis=-1) +rfft(a, n=None, axis=-1) +irfft(a, n=None, axis=-1) +hfft(a, n=None, axis=-1) +ihfft(a, n=None, axis=-1) +fftn(a, s=None, axes=None) +ifftn(a, s=None, axes=None) +rfftn(a, s=None, axes=None) +irfftn(a, s=None, axes=None) +fft2(a, s=None, axes=(-2,-1)) +ifft2(a, s=None, axes=(-2, -1)) +rfft2(a, s=None, axes=(-2,-1)) +irfft2(a, s=None, axes=(-2, -1)) + +i = inverse transform +r = transform of purely real data +h = Hermite transform +n = n-dimensional transform +2 = 2-dimensional transform +(Note: 2D routines are just nD routines with different default +behavior.) + +The underlying code for these functions is an f2c-translated and modified +version of the FFTPACK routines. + +""" +from __future__ import division, absolute_import, print_function + +__all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn', + 'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn'] + +import functools + +from numpy.core import (array, asarray, zeros, swapaxes, shape, conjugate, + take, sqrt) +from numpy.core.multiarray import normalize_axis_index +from numpy.core import overrides +from . import fftpack_lite as fftpack +from .helper import _FFTCache + +_fft_cache = _FFTCache(max_size_in_mb=100, max_item_count=32) +_real_fft_cache = _FFTCache(max_size_in_mb=100, max_item_count=32) + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy.fft') + + +def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti, + work_function=fftpack.cfftf, fft_cache=_fft_cache): + a = asarray(a) + axis = normalize_axis_index(axis, a.ndim) + + if n is None: + n = a.shape[axis] + + if n < 1: + raise ValueError("Invalid number of FFT data points (%d) specified." + % n) + + # We have to ensure that only a single thread can access a wsave array + # at any given time. Thus we remove it from the cache and insert it + # again after it has been used. Multiple threads might create multiple + # copies of the wsave array. This is intentional and a limitation of + # the current C code. + wsave = fft_cache.pop_twiddle_factors(n) + if wsave is None: + wsave = init_function(n) + + if a.shape[axis] != n: + s = list(a.shape) + if s[axis] > n: + index = [slice(None)]*len(s) + index[axis] = slice(0, n) + a = a[tuple(index)] + else: + index = [slice(None)]*len(s) + index[axis] = slice(0, s[axis]) + s[axis] = n + z = zeros(s, a.dtype.char) + z[tuple(index)] = a + a = z + + if axis != a.ndim - 1: + a = swapaxes(a, axis, -1) + r = work_function(a, wsave) + if axis != a.ndim - 1: + r = swapaxes(r, axis, -1) + + # As soon as we put wsave back into the cache, another thread could pick it + # up and start using it, so we must not do this until after we're + # completely done using it ourselves. + fft_cache.put_twiddle_factors(n, wsave) + + return r + + +def _unitary(norm): + if norm not in (None, "ortho"): + raise ValueError("Invalid norm value %s, should be None or \"ortho\"." + % norm) + return norm is not None + + +def _fft_dispatcher(a, n=None, axis=None, norm=None): + return (a,) + + +@array_function_dispatch(_fft_dispatcher) +def fft(a, n=None, axis=-1, norm=None): + """ + Compute the one-dimensional discrete Fourier Transform. + + This function computes the one-dimensional *n*-point discrete Fourier + Transform (DFT) with the efficient Fast Fourier Transform (FFT) + algorithm [CT]. + + Parameters + ---------- + a : array_like + Input array, can be complex. + n : int, optional + Length of the transformed axis of the output. + If `n` is smaller than the length of the input, the input is cropped. + If it is larger, the input is padded with zeros. If `n` is not given, + the length of the input along the axis specified by `axis` is used. + axis : int, optional + Axis over which to compute the FFT. If not given, the last axis is + used. + norm : {None, "ortho"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is None. + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + + Raises + ------ + IndexError + if `axes` is larger than the last axis of `a`. + + See Also + -------- + numpy.fft : for definition of the DFT and conventions used. + ifft : The inverse of `fft`. + fft2 : The two-dimensional FFT. + fftn : The *n*-dimensional FFT. + rfftn : The *n*-dimensional FFT of real input. + fftfreq : Frequency bins for given FFT parameters. + + Notes + ----- + FFT (Fast Fourier Transform) refers to a way the discrete Fourier + Transform (DFT) can be calculated efficiently, by using symmetries in the + calculated terms. The symmetry is highest when `n` is a power of 2, and + the transform is therefore most efficient for these sizes. + + The DFT is defined, with the conventions used in this implementation, in + the documentation for the `numpy.fft` module. + + References + ---------- + .. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the + machine calculation of complex Fourier series," *Math. Comput.* + 19: 297-301. + + Examples + -------- + >>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8)) + array([ -3.44505240e-16 +1.14383329e-17j, + 8.00000000e+00 -5.71092652e-15j, + 2.33482938e-16 +1.22460635e-16j, + 1.64863782e-15 +1.77635684e-15j, + 9.95839695e-17 +2.33482938e-16j, + 0.00000000e+00 +1.66837030e-15j, + 1.14383329e-17 +1.22460635e-16j, + -1.64863782e-15 +1.77635684e-15j]) + + In this example, real input has an FFT which is Hermitian, i.e., symmetric + in the real part and anti-symmetric in the imaginary part, as described in + the `numpy.fft` documentation: + + >>> import matplotlib.pyplot as plt + >>> t = np.arange(256) + >>> sp = np.fft.fft(np.sin(t)) + >>> freq = np.fft.fftfreq(t.shape[-1]) + >>> plt.plot(freq, sp.real, freq, sp.imag) + [, ] + >>> plt.show() + + """ + + a = asarray(a).astype(complex, copy=False) + if n is None: + n = a.shape[axis] + output = _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache) + if _unitary(norm): + output *= 1 / sqrt(n) + return output + + +@array_function_dispatch(_fft_dispatcher) +def ifft(a, n=None, axis=-1, norm=None): + """ + Compute the one-dimensional inverse discrete Fourier Transform. + + This function computes the inverse of the one-dimensional *n*-point + discrete Fourier transform computed by `fft`. In other words, + ``ifft(fft(a)) == a`` to within numerical accuracy. + For a general description of the algorithm and definitions, + see `numpy.fft`. + + The input should be ordered in the same way as is returned by `fft`, + i.e., + + * ``a[0]`` should contain the zero frequency term, + * ``a[1:n//2]`` should contain the positive-frequency terms, + * ``a[n//2 + 1:]`` should contain the negative-frequency terms, in + increasing order starting from the most negative frequency. + + For an even number of input points, ``A[n//2]`` represents the sum of + the values at the positive and negative Nyquist frequencies, as the two + are aliased together. See `numpy.fft` for details. + + Parameters + ---------- + a : array_like + Input array, can be complex. + n : int, optional + Length of the transformed axis of the output. + If `n` is smaller than the length of the input, the input is cropped. + If it is larger, the input is padded with zeros. If `n` is not given, + the length of the input along the axis specified by `axis` is used. + See notes about padding issues. + axis : int, optional + Axis over which to compute the inverse DFT. If not given, the last + axis is used. + norm : {None, "ortho"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is None. + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + + Raises + ------ + IndexError + If `axes` is larger than the last axis of `a`. + + See Also + -------- + numpy.fft : An introduction, with definitions and general explanations. + fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse + ifft2 : The two-dimensional inverse FFT. + ifftn : The n-dimensional inverse FFT. + + Notes + ----- + If the input parameter `n` is larger than the size of the input, the input + is padded by appending zeros at the end. Even though this is the common + approach, it might lead to surprising results. If a different padding is + desired, it must be performed before calling `ifft`. + + Examples + -------- + >>> np.fft.ifft([0, 4, 0, 0]) + array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j]) + + Create and plot a band-limited signal with random phases: + + >>> import matplotlib.pyplot as plt + >>> t = np.arange(400) + >>> n = np.zeros((400,), dtype=complex) + >>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,))) + >>> s = np.fft.ifft(n) + >>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--') + ... + >>> plt.legend(('real', 'imaginary')) + ... + >>> plt.show() + + """ + # The copy may be required for multithreading. + a = array(a, copy=True, dtype=complex) + if n is None: + n = a.shape[axis] + unitary = _unitary(norm) + output = _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftb, _fft_cache) + return output * (1 / (sqrt(n) if unitary else n)) + + + +@array_function_dispatch(_fft_dispatcher) +def rfft(a, n=None, axis=-1, norm=None): + """ + Compute the one-dimensional discrete Fourier Transform for real input. + + This function computes the one-dimensional *n*-point discrete Fourier + Transform (DFT) of a real-valued array by means of an efficient algorithm + called the Fast Fourier Transform (FFT). + + Parameters + ---------- + a : array_like + Input array + n : int, optional + Number of points along transformation axis in the input to use. + If `n` is smaller than the length of the input, the input is cropped. + If it is larger, the input is padded with zeros. If `n` is not given, + the length of the input along the axis specified by `axis` is used. + axis : int, optional + Axis over which to compute the FFT. If not given, the last axis is + used. + norm : {None, "ortho"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is None. + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + If `n` is even, the length of the transformed axis is ``(n/2)+1``. + If `n` is odd, the length is ``(n+1)/2``. + + Raises + ------ + IndexError + If `axis` is larger than the last axis of `a`. + + See Also + -------- + numpy.fft : For definition of the DFT and conventions used. + irfft : The inverse of `rfft`. + fft : The one-dimensional FFT of general (complex) input. + fftn : The *n*-dimensional FFT. + rfftn : The *n*-dimensional FFT of real input. + + Notes + ----- + When the DFT is computed for purely real input, the output is + Hermitian-symmetric, i.e. the negative frequency terms are just the complex + conjugates of the corresponding positive-frequency terms, and the + negative-frequency terms are therefore redundant. This function does not + compute the negative frequency terms, and the length of the transformed + axis of the output is therefore ``n//2 + 1``. + + When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains + the zero-frequency term 0*fs, which is real due to Hermitian symmetry. + + If `n` is even, ``A[-1]`` contains the term representing both positive + and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely + real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains + the largest positive frequency (fs/2*(n-1)/n), and is complex in the + general case. + + If the input `a` contains an imaginary part, it is silently discarded. + + Examples + -------- + >>> np.fft.fft([0, 1, 0, 0]) + array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j]) + >>> np.fft.rfft([0, 1, 0, 0]) + array([ 1.+0.j, 0.-1.j, -1.+0.j]) + + Notice how the final element of the `fft` output is the complex conjugate + of the second element, for real input. For `rfft`, this symmetry is + exploited to compute only the non-negative frequency terms. + + """ + # The copy may be required for multithreading. + a = array(a, copy=True, dtype=float) + output = _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftf, + _real_fft_cache) + if _unitary(norm): + if n is None: + n = a.shape[axis] + output *= 1 / sqrt(n) + return output + + +@array_function_dispatch(_fft_dispatcher) +def irfft(a, n=None, axis=-1, norm=None): + """ + Compute the inverse of the n-point DFT for real input. + + This function computes the inverse of the one-dimensional *n*-point + discrete Fourier Transform of real input computed by `rfft`. + In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical + accuracy. (See Notes below for why ``len(a)`` is necessary here.) + + The input is expected to be in the form returned by `rfft`, i.e. the + real zero-frequency term followed by the complex positive frequency terms + in order of increasing frequency. Since the discrete Fourier Transform of + real input is Hermitian-symmetric, the negative frequency terms are taken + to be the complex conjugates of the corresponding positive frequency terms. + + Parameters + ---------- + a : array_like + The input array. + n : int, optional + Length of the transformed axis of the output. + For `n` output points, ``n//2+1`` input points are necessary. If the + input is longer than this, it is cropped. If it is shorter than this, + it is padded with zeros. If `n` is not given, it is determined from + the length of the input along the axis specified by `axis`. + axis : int, optional + Axis over which to compute the inverse FFT. If not given, the last + axis is used. + norm : {None, "ortho"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is None. + + Returns + ------- + out : ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + The length of the transformed axis is `n`, or, if `n` is not given, + ``2*(m-1)`` where ``m`` is the length of the transformed axis of the + input. To get an odd number of output points, `n` must be specified. + + Raises + ------ + IndexError + If `axis` is larger than the last axis of `a`. + + See Also + -------- + numpy.fft : For definition of the DFT and conventions used. + rfft : The one-dimensional FFT of real input, of which `irfft` is inverse. + fft : The one-dimensional FFT. + irfft2 : The inverse of the two-dimensional FFT of real input. + irfftn : The inverse of the *n*-dimensional FFT of real input. + + Notes + ----- + Returns the real valued `n`-point inverse discrete Fourier transform + of `a`, where `a` contains the non-negative frequency terms of a + Hermitian-symmetric sequence. `n` is the length of the result, not the + input. + + If you specify an `n` such that `a` must be zero-padded or truncated, the + extra/removed values will be added/removed at high frequencies. One can + thus resample a series to `m` points via Fourier interpolation by: + ``a_resamp = irfft(rfft(a), m)``. + + Examples + -------- + >>> np.fft.ifft([1, -1j, -1, 1j]) + array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]) + >>> np.fft.irfft([1, -1j, -1]) + array([ 0., 1., 0., 0.]) + + Notice how the last term in the input to the ordinary `ifft` is the + complex conjugate of the second term, and the output has zero imaginary + part everywhere. When calling `irfft`, the negative frequencies are not + specified, and the output array is purely real. + + """ + # The copy may be required for multithreading. + a = array(a, copy=True, dtype=complex) + if n is None: + n = (a.shape[axis] - 1) * 2 + unitary = _unitary(norm) + output = _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb, + _real_fft_cache) + return output * (1 / (sqrt(n) if unitary else n)) + + +@array_function_dispatch(_fft_dispatcher) +def hfft(a, n=None, axis=-1, norm=None): + """ + Compute the FFT of a signal that has Hermitian symmetry, i.e., a real + spectrum. + + Parameters + ---------- + a : array_like + The input array. + n : int, optional + Length of the transformed axis of the output. For `n` output + points, ``n//2 + 1`` input points are necessary. If the input is + longer than this, it is cropped. If it is shorter than this, it is + padded with zeros. If `n` is not given, it is determined from the + length of the input along the axis specified by `axis`. + axis : int, optional + Axis over which to compute the FFT. If not given, the last + axis is used. + norm : {None, "ortho"}, optional + Normalization mode (see `numpy.fft`). Default is None. + + .. versionadded:: 1.10.0 + + Returns + ------- + out : ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + The length of the transformed axis is `n`, or, if `n` is not given, + ``2*m - 2`` where ``m`` is the length of the transformed axis of + the input. To get an odd number of output points, `n` must be + specified, for instance as ``2*m - 1`` in the typical case, + + Raises + ------ + IndexError + If `axis` is larger than the last axis of `a`. + + See also + -------- + rfft : Compute the one-dimensional FFT for real input. + ihfft : The inverse of `hfft`. + + Notes + ----- + `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the + opposite case: here the signal has Hermitian symmetry in the time + domain and is real in the frequency domain. So here it's `hfft` for + which you must supply the length of the result if it is to be odd. + + * even: ``ihfft(hfft(a, 2*len(a) - 2) == a``, within roundoff error, + * odd: ``ihfft(hfft(a, 2*len(a) - 1) == a``, within roundoff error. + + Examples + -------- + >>> signal = np.array([1, 2, 3, 4, 3, 2]) + >>> np.fft.fft(signal) + array([ 15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j]) + >>> np.fft.hfft(signal[:4]) # Input first half of signal + array([ 15., -4., 0., -1., 0., -4.]) + >>> np.fft.hfft(signal, 6) # Input entire signal and truncate + array([ 15., -4., 0., -1., 0., -4.]) + + + >>> signal = np.array([[1, 1.j], [-1.j, 2]]) + >>> np.conj(signal.T) - signal # check Hermitian symmetry + array([[ 0.-0.j, 0.+0.j], + [ 0.+0.j, 0.-0.j]]) + >>> freq_spectrum = np.fft.hfft(signal) + >>> freq_spectrum + array([[ 1., 1.], + [ 2., -2.]]) + + """ + # The copy may be required for multithreading. + a = array(a, copy=True, dtype=complex) + if n is None: + n = (a.shape[axis] - 1) * 2 + unitary = _unitary(norm) + return irfft(conjugate(a), n, axis) * (sqrt(n) if unitary else n) + + +@array_function_dispatch(_fft_dispatcher) +def ihfft(a, n=None, axis=-1, norm=None): + """ + Compute the inverse FFT of a signal that has Hermitian symmetry. + + Parameters + ---------- + a : array_like + Input array. + n : int, optional + Length of the inverse FFT, the number of points along + transformation axis in the input to use. If `n` is smaller than + the length of the input, the input is cropped. If it is larger, + the input is padded with zeros. If `n` is not given, the length of + the input along the axis specified by `axis` is used. + axis : int, optional + Axis over which to compute the inverse FFT. If not given, the last + axis is used. + norm : {None, "ortho"}, optional + Normalization mode (see `numpy.fft`). Default is None. + + .. versionadded:: 1.10.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + The length of the transformed axis is ``n//2 + 1``. + + See also + -------- + hfft, irfft + + Notes + ----- + `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the + opposite case: here the signal has Hermitian symmetry in the time + domain and is real in the frequency domain. So here it's `hfft` for + which you must supply the length of the result if it is to be odd: + + * even: ``ihfft(hfft(a, 2*len(a) - 2) == a``, within roundoff error, + * odd: ``ihfft(hfft(a, 2*len(a) - 1) == a``, within roundoff error. + + Examples + -------- + >>> spectrum = np.array([ 15, -4, 0, -1, 0, -4]) + >>> np.fft.ifft(spectrum) + array([ 1.+0.j, 2.-0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.-0.j]) + >>> np.fft.ihfft(spectrum) + array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j]) + + """ + # The copy may be required for multithreading. + a = array(a, copy=True, dtype=float) + if n is None: + n = a.shape[axis] + unitary = _unitary(norm) + output = conjugate(rfft(a, n, axis)) + return output * (1 / (sqrt(n) if unitary else n)) + + +def _cook_nd_args(a, s=None, axes=None, invreal=0): + if s is None: + shapeless = 1 + if axes is None: + s = list(a.shape) + else: + s = take(a.shape, axes) + else: + shapeless = 0 + s = list(s) + if axes is None: + axes = list(range(-len(s), 0)) + if len(s) != len(axes): + raise ValueError("Shape and axes have different lengths.") + if invreal and shapeless: + s[-1] = (a.shape[axes[-1]] - 1) * 2 + return s, axes + + +def _raw_fftnd(a, s=None, axes=None, function=fft, norm=None): + a = asarray(a) + s, axes = _cook_nd_args(a, s, axes) + itl = list(range(len(axes))) + itl.reverse() + for ii in itl: + a = function(a, n=s[ii], axis=axes[ii], norm=norm) + return a + + +def _fftn_dispatcher(a, s=None, axes=None, norm=None): + return (a,) + + +@array_function_dispatch(_fftn_dispatcher) +def fftn(a, s=None, axes=None, norm=None): + """ + Compute the N-dimensional discrete Fourier Transform. + + This function computes the *N*-dimensional discrete Fourier Transform over + any number of axes in an *M*-dimensional array by means of the Fast Fourier + Transform (FFT). + + Parameters + ---------- + a : array_like + Input array, can be complex. + s : sequence of ints, optional + Shape (length of each transformed axis) of the output + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). + This corresponds to ``n`` for ``fft(x, n)``. + Along any axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + if `s` is not given, the shape of the input along the axes specified + by `axes` is used. + axes : sequence of ints, optional + Axes over which to compute the FFT. If not given, the last ``len(s)`` + axes are used, or all axes if `s` is also not specified. + Repeated indices in `axes` means that the transform over that axis is + performed multiple times. + norm : {None, "ortho"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is None. + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` and `a`, + as explained in the parameters section above. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + numpy.fft : Overall view of discrete Fourier transforms, with definitions + and conventions used. + ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT. + fft : The one-dimensional FFT, with definitions and conventions used. + rfftn : The *n*-dimensional FFT of real input. + fft2 : The two-dimensional FFT. + fftshift : Shifts zero-frequency terms to centre of array + + Notes + ----- + The output, analogously to `fft`, contains the term for zero frequency in + the low-order corner of all axes, the positive frequency terms in the + first half of all axes, the term for the Nyquist frequency in the middle + of all axes and the negative frequency terms in the second half of all + axes, in order of decreasingly negative frequency. + + See `numpy.fft` for details, definitions and conventions used. + + Examples + -------- + >>> a = np.mgrid[:3, :3, :3][0] + >>> np.fft.fftn(a, axes=(1, 2)) + array([[[ 0.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]], + [[ 9.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]], + [[ 18.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]]]) + >>> np.fft.fftn(a, (2, 2), axes=(0, 1)) + array([[[ 2.+0.j, 2.+0.j, 2.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]], + [[-2.+0.j, -2.+0.j, -2.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]]]) + + >>> import matplotlib.pyplot as plt + >>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12, + ... 2 * np.pi * np.arange(200) / 34) + >>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape) + >>> FS = np.fft.fftn(S) + >>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2)) + + >>> plt.show() + + """ + + return _raw_fftnd(a, s, axes, fft, norm) + + +@array_function_dispatch(_fftn_dispatcher) +def ifftn(a, s=None, axes=None, norm=None): + """ + Compute the N-dimensional inverse discrete Fourier Transform. + + This function computes the inverse of the N-dimensional discrete + Fourier Transform over any number of axes in an M-dimensional array by + means of the Fast Fourier Transform (FFT). In other words, + ``ifftn(fftn(a)) == a`` to within numerical accuracy. + For a description of the definitions and conventions used, see `numpy.fft`. + + The input, analogously to `ifft`, should be ordered in the same way as is + returned by `fftn`, i.e. it should have the term for zero frequency + in all axes in the low-order corner, the positive frequency terms in the + first half of all axes, the term for the Nyquist frequency in the middle + of all axes and the negative frequency terms in the second half of all + axes, in order of decreasingly negative frequency. + + Parameters + ---------- + a : array_like + Input array, can be complex. + s : sequence of ints, optional + Shape (length of each transformed axis) of the output + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). + This corresponds to ``n`` for ``ifft(x, n)``. + Along any axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + if `s` is not given, the shape of the input along the axes specified + by `axes` is used. See notes for issue on `ifft` zero padding. + axes : sequence of ints, optional + Axes over which to compute the IFFT. If not given, the last ``len(s)`` + axes are used, or all axes if `s` is also not specified. + Repeated indices in `axes` means that the inverse transform over that + axis is performed multiple times. + norm : {None, "ortho"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is None. + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` or `a`, + as explained in the parameters section above. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + numpy.fft : Overall view of discrete Fourier transforms, with definitions + and conventions used. + fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse. + ifft : The one-dimensional inverse FFT. + ifft2 : The two-dimensional inverse FFT. + ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning + of array. + + Notes + ----- + See `numpy.fft` for definitions and conventions used. + + Zero-padding, analogously with `ifft`, is performed by appending zeros to + the input along the specified dimension. Although this is the common + approach, it might lead to surprising results. If another form of zero + padding is desired, it must be performed before `ifftn` is called. + + Examples + -------- + >>> a = np.eye(4) + >>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,)) + array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]]) + + + Create and plot an image with band-limited frequency content: + + >>> import matplotlib.pyplot as plt + >>> n = np.zeros((200,200), dtype=complex) + >>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20))) + >>> im = np.fft.ifftn(n).real + >>> plt.imshow(im) + + >>> plt.show() + + """ + + return _raw_fftnd(a, s, axes, ifft, norm) + + +@array_function_dispatch(_fftn_dispatcher) +def fft2(a, s=None, axes=(-2, -1), norm=None): + """ + Compute the 2-dimensional discrete Fourier Transform + + This function computes the *n*-dimensional discrete Fourier Transform + over any axes in an *M*-dimensional array by means of the + Fast Fourier Transform (FFT). By default, the transform is computed over + the last two axes of the input array, i.e., a 2-dimensional FFT. + + Parameters + ---------- + a : array_like + Input array, can be complex + s : sequence of ints, optional + Shape (length of each transformed axis) of the output + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). + This corresponds to ``n`` for ``fft(x, n)``. + Along each axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + if `s` is not given, the shape of the input along the axes specified + by `axes` is used. + axes : sequence of ints, optional + Axes over which to compute the FFT. If not given, the last two + axes are used. A repeated index in `axes` means the transform over + that axis is performed multiple times. A one-element sequence means + that a one-dimensional FFT is performed. + norm : {None, "ortho"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is None. + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or the last two axes if `axes` is not given. + + Raises + ------ + ValueError + If `s` and `axes` have different length, or `axes` not given and + ``len(s) != 2``. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + numpy.fft : Overall view of discrete Fourier transforms, with definitions + and conventions used. + ifft2 : The inverse two-dimensional FFT. + fft : The one-dimensional FFT. + fftn : The *n*-dimensional FFT. + fftshift : Shifts zero-frequency terms to the center of the array. + For two-dimensional input, swaps first and third quadrants, and second + and fourth quadrants. + + Notes + ----- + `fft2` is just `fftn` with a different default for `axes`. + + The output, analogously to `fft`, contains the term for zero frequency in + the low-order corner of the transformed axes, the positive frequency terms + in the first half of these axes, the term for the Nyquist frequency in the + middle of the axes and the negative frequency terms in the second half of + the axes, in order of decreasingly negative frequency. + + See `fftn` for details and a plotting example, and `numpy.fft` for + definitions and conventions used. + + + Examples + -------- + >>> a = np.mgrid[:5, :5][0] + >>> np.fft.fft2(a) + array([[ 50.0 +0.j , 0.0 +0.j , 0.0 +0.j , + 0.0 +0.j , 0.0 +0.j ], + [-12.5+17.20477401j, 0.0 +0.j , 0.0 +0.j , + 0.0 +0.j , 0.0 +0.j ], + [-12.5 +4.0614962j , 0.0 +0.j , 0.0 +0.j , + 0.0 +0.j , 0.0 +0.j ], + [-12.5 -4.0614962j , 0.0 +0.j , 0.0 +0.j , + 0.0 +0.j , 0.0 +0.j ], + [-12.5-17.20477401j, 0.0 +0.j , 0.0 +0.j , + 0.0 +0.j , 0.0 +0.j ]]) + + """ + + return _raw_fftnd(a, s, axes, fft, norm) + + +@array_function_dispatch(_fftn_dispatcher) +def ifft2(a, s=None, axes=(-2, -1), norm=None): + """ + Compute the 2-dimensional inverse discrete Fourier Transform. + + This function computes the inverse of the 2-dimensional discrete Fourier + Transform over any number of axes in an M-dimensional array by means of + the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a`` + to within numerical accuracy. By default, the inverse transform is + computed over the last two axes of the input array. + + The input, analogously to `ifft`, should be ordered in the same way as is + returned by `fft2`, i.e. it should have the term for zero frequency + in the low-order corner of the two axes, the positive frequency terms in + the first half of these axes, the term for the Nyquist frequency in the + middle of the axes and the negative frequency terms in the second half of + both axes, in order of decreasingly negative frequency. + + Parameters + ---------- + a : array_like + Input array, can be complex. + s : sequence of ints, optional + Shape (length of each axis) of the output (``s[0]`` refers to axis 0, + ``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``. + Along each axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + if `s` is not given, the shape of the input along the axes specified + by `axes` is used. See notes for issue on `ifft` zero padding. + axes : sequence of ints, optional + Axes over which to compute the FFT. If not given, the last two + axes are used. A repeated index in `axes` means the transform over + that axis is performed multiple times. A one-element sequence means + that a one-dimensional FFT is performed. + norm : {None, "ortho"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is None. + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or the last two axes if `axes` is not given. + + Raises + ------ + ValueError + If `s` and `axes` have different length, or `axes` not given and + ``len(s) != 2``. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + numpy.fft : Overall view of discrete Fourier transforms, with definitions + and conventions used. + fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse. + ifftn : The inverse of the *n*-dimensional FFT. + fft : The one-dimensional FFT. + ifft : The one-dimensional inverse FFT. + + Notes + ----- + `ifft2` is just `ifftn` with a different default for `axes`. + + See `ifftn` for details and a plotting example, and `numpy.fft` for + definition and conventions used. + + Zero-padding, analogously with `ifft`, is performed by appending zeros to + the input along the specified dimension. Although this is the common + approach, it might lead to surprising results. If another form of zero + padding is desired, it must be performed before `ifft2` is called. + + Examples + -------- + >>> a = 4 * np.eye(4) + >>> np.fft.ifft2(a) + array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j], + [ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j], + [ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]]) + + """ + + return _raw_fftnd(a, s, axes, ifft, norm) + + +@array_function_dispatch(_fftn_dispatcher) +def rfftn(a, s=None, axes=None, norm=None): + """ + Compute the N-dimensional discrete Fourier Transform for real input. + + This function computes the N-dimensional discrete Fourier Transform over + any number of axes in an M-dimensional real array by means of the Fast + Fourier Transform (FFT). By default, all axes are transformed, with the + real transform performed over the last axis, while the remaining + transforms are complex. + + Parameters + ---------- + a : array_like + Input array, taken to be real. + s : sequence of ints, optional + Shape (length along each transformed axis) to use from the input. + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). + The final element of `s` corresponds to `n` for ``rfft(x, n)``, while + for the remaining axes, it corresponds to `n` for ``fft(x, n)``. + Along any axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + if `s` is not given, the shape of the input along the axes specified + by `axes` is used. + axes : sequence of ints, optional + Axes over which to compute the FFT. If not given, the last ``len(s)`` + axes are used, or all axes if `s` is also not specified. + norm : {None, "ortho"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is None. + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` and `a`, + as explained in the parameters section above. + The length of the last axis transformed will be ``s[-1]//2+1``, + while the remaining transformed axes will have lengths according to + `s`, or unchanged from the input. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT + of real input. + fft : The one-dimensional FFT, with definitions and conventions used. + rfft : The one-dimensional FFT of real input. + fftn : The n-dimensional FFT. + rfft2 : The two-dimensional FFT of real input. + + Notes + ----- + The transform for real input is performed over the last transformation + axis, as by `rfft`, then the transform over the remaining axes is + performed as by `fftn`. The order of the output is as for `rfft` for the + final transformation axis, and as for `fftn` for the remaining + transformation axes. + + See `fft` for details, definitions and conventions used. + + Examples + -------- + >>> a = np.ones((2, 2, 2)) + >>> np.fft.rfftn(a) + array([[[ 8.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j]], + [[ 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j]]]) + + >>> np.fft.rfftn(a, axes=(2, 0)) + array([[[ 4.+0.j, 0.+0.j], + [ 4.+0.j, 0.+0.j]], + [[ 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j]]]) + + """ + # The copy may be required for multithreading. + a = array(a, copy=True, dtype=float) + s, axes = _cook_nd_args(a, s, axes) + a = rfft(a, s[-1], axes[-1], norm) + for ii in range(len(axes)-1): + a = fft(a, s[ii], axes[ii], norm) + return a + + +@array_function_dispatch(_fftn_dispatcher) +def rfft2(a, s=None, axes=(-2, -1), norm=None): + """ + Compute the 2-dimensional FFT of a real array. + + Parameters + ---------- + a : array + Input array, taken to be real. + s : sequence of ints, optional + Shape of the FFT. + axes : sequence of ints, optional + Axes over which to compute the FFT. + norm : {None, "ortho"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is None. + + Returns + ------- + out : ndarray + The result of the real 2-D FFT. + + See Also + -------- + rfftn : Compute the N-dimensional discrete Fourier Transform for real + input. + + Notes + ----- + This is really just `rfftn` with different default behavior. + For more details see `rfftn`. + + """ + + return rfftn(a, s, axes, norm) + + +@array_function_dispatch(_fftn_dispatcher) +def irfftn(a, s=None, axes=None, norm=None): + """ + Compute the inverse of the N-dimensional FFT of real input. + + This function computes the inverse of the N-dimensional discrete + Fourier Transform for real input over any number of axes in an + M-dimensional array by means of the Fast Fourier Transform (FFT). In + other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical + accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`, + and for the same reason.) + + The input should be ordered in the same way as is returned by `rfftn`, + i.e. as for `irfft` for the final transformation axis, and as for `ifftn` + along all the other axes. + + Parameters + ---------- + a : array_like + Input array. + s : sequence of ints, optional + Shape (length of each transformed axis) of the output + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the + number of input points used along this axis, except for the last axis, + where ``s[-1]//2+1`` points of the input are used. + Along any axis, if the shape indicated by `s` is smaller than that of + the input, the input is cropped. If it is larger, the input is padded + with zeros. If `s` is not given, the shape of the input along the + axes specified by `axes` is used. + axes : sequence of ints, optional + Axes over which to compute the inverse FFT. If not given, the last + `len(s)` axes are used, or all axes if `s` is also not specified. + Repeated indices in `axes` means that the inverse transform over that + axis is performed multiple times. + norm : {None, "ortho"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is None. + + Returns + ------- + out : ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` or `a`, + as explained in the parameters section above. + The length of each transformed axis is as given by the corresponding + element of `s`, or the length of the input in every axis except for the + last one if `s` is not given. In the final transformed axis the length + of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the + length of the final transformed axis of the input. To get an odd + number of output points in the final axis, `s` must be specified. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + rfftn : The forward n-dimensional FFT of real input, + of which `ifftn` is the inverse. + fft : The one-dimensional FFT, with definitions and conventions used. + irfft : The inverse of the one-dimensional FFT of real input. + irfft2 : The inverse of the two-dimensional FFT of real input. + + Notes + ----- + See `fft` for definitions and conventions used. + + See `rfft` for definitions and conventions used for real input. + + Examples + -------- + >>> a = np.zeros((3, 2, 2)) + >>> a[0, 0, 0] = 3 * 2 * 2 + >>> np.fft.irfftn(a) + array([[[ 1., 1.], + [ 1., 1.]], + [[ 1., 1.], + [ 1., 1.]], + [[ 1., 1.], + [ 1., 1.]]]) + + """ + # The copy may be required for multithreading. + a = array(a, copy=True, dtype=complex) + s, axes = _cook_nd_args(a, s, axes, invreal=1) + for ii in range(len(axes)-1): + a = ifft(a, s[ii], axes[ii], norm) + a = irfft(a, s[-1], axes[-1], norm) + return a + + +@array_function_dispatch(_fftn_dispatcher) +def irfft2(a, s=None, axes=(-2, -1), norm=None): + """ + Compute the 2-dimensional inverse FFT of a real array. + + Parameters + ---------- + a : array_like + The input array + s : sequence of ints, optional + Shape of the inverse FFT. + axes : sequence of ints, optional + The axes over which to compute the inverse fft. + Default is the last two axes. + norm : {None, "ortho"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is None. + + Returns + ------- + out : ndarray + The result of the inverse real 2-D FFT. + + See Also + -------- + irfftn : Compute the inverse of the N-dimensional FFT of real input. + + Notes + ----- + This is really `irfftn` with different defaults. + For more details see `irfftn`. + + """ + + return irfftn(a, s, axes, norm) diff --git a/project/venv/lib/python2.7/site-packages/numpy/fft/fftpack.pyc b/project/venv/lib/python2.7/site-packages/numpy/fft/fftpack.pyc new file mode 100644 index 0000000..e860dcf Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/fft/fftpack.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/fft/fftpack_lite.so b/project/venv/lib/python2.7/site-packages/numpy/fft/fftpack_lite.so new file mode 100755 index 0000000..65230d5 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/fft/fftpack_lite.so differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/fft/helper.py b/project/venv/lib/python2.7/site-packages/numpy/fft/helper.py new file mode 100644 index 0000000..864768d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/fft/helper.py @@ -0,0 +1,325 @@ +""" +Discrete Fourier Transforms - helper.py + +""" +from __future__ import division, absolute_import, print_function + +import collections +try: + import threading +except ImportError: + import dummy_threading as threading +from numpy.compat import integer_types +from numpy.core import integer, empty, arange, asarray, roll +from numpy.core.overrides import array_function_dispatch, set_module + +# Created by Pearu Peterson, September 2002 + +__all__ = ['fftshift', 'ifftshift', 'fftfreq', 'rfftfreq'] + +integer_types = integer_types + (integer,) + + +def _fftshift_dispatcher(x, axes=None): + return (x,) + + +@array_function_dispatch(_fftshift_dispatcher, module='numpy.fft') +def fftshift(x, axes=None): + """ + Shift the zero-frequency component to the center of the spectrum. + + This function swaps half-spaces for all axes listed (defaults to all). + Note that ``y[0]`` is the Nyquist component only if ``len(x)`` is even. + + Parameters + ---------- + x : array_like + Input array. + axes : int or shape tuple, optional + Axes over which to shift. Default is None, which shifts all axes. + + Returns + ------- + y : ndarray + The shifted array. + + See Also + -------- + ifftshift : The inverse of `fftshift`. + + Examples + -------- + >>> freqs = np.fft.fftfreq(10, 0.1) + >>> freqs + array([ 0., 1., 2., 3., 4., -5., -4., -3., -2., -1.]) + >>> np.fft.fftshift(freqs) + array([-5., -4., -3., -2., -1., 0., 1., 2., 3., 4.]) + + Shift the zero-frequency component only along the second axis: + + >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3) + >>> freqs + array([[ 0., 1., 2.], + [ 3., 4., -4.], + [-3., -2., -1.]]) + >>> np.fft.fftshift(freqs, axes=(1,)) + array([[ 2., 0., 1.], + [-4., 3., 4.], + [-1., -3., -2.]]) + + """ + x = asarray(x) + if axes is None: + axes = tuple(range(x.ndim)) + shift = [dim // 2 for dim in x.shape] + elif isinstance(axes, integer_types): + shift = x.shape[axes] // 2 + else: + shift = [x.shape[ax] // 2 for ax in axes] + + return roll(x, shift, axes) + + +@array_function_dispatch(_fftshift_dispatcher, module='numpy.fft') +def ifftshift(x, axes=None): + """ + The inverse of `fftshift`. Although identical for even-length `x`, the + functions differ by one sample for odd-length `x`. + + Parameters + ---------- + x : array_like + Input array. + axes : int or shape tuple, optional + Axes over which to calculate. Defaults to None, which shifts all axes. + + Returns + ------- + y : ndarray + The shifted array. + + See Also + -------- + fftshift : Shift zero-frequency component to the center of the spectrum. + + Examples + -------- + >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3) + >>> freqs + array([[ 0., 1., 2.], + [ 3., 4., -4.], + [-3., -2., -1.]]) + >>> np.fft.ifftshift(np.fft.fftshift(freqs)) + array([[ 0., 1., 2.], + [ 3., 4., -4.], + [-3., -2., -1.]]) + + """ + x = asarray(x) + if axes is None: + axes = tuple(range(x.ndim)) + shift = [-(dim // 2) for dim in x.shape] + elif isinstance(axes, integer_types): + shift = -(x.shape[axes] // 2) + else: + shift = [-(x.shape[ax] // 2) for ax in axes] + + return roll(x, shift, axes) + + +@set_module('numpy.fft') +def fftfreq(n, d=1.0): + """ + Return the Discrete Fourier Transform sample frequencies. + + The returned float array `f` contains the frequency bin centers in cycles + per unit of the sample spacing (with zero at the start). For instance, if + the sample spacing is in seconds, then the frequency unit is cycles/second. + + Given a window length `n` and a sample spacing `d`:: + + f = [0, 1, ..., n/2-1, -n/2, ..., -1] / (d*n) if n is even + f = [0, 1, ..., (n-1)/2, -(n-1)/2, ..., -1] / (d*n) if n is odd + + Parameters + ---------- + n : int + Window length. + d : scalar, optional + Sample spacing (inverse of the sampling rate). Defaults to 1. + + Returns + ------- + f : ndarray + Array of length `n` containing the sample frequencies. + + Examples + -------- + >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float) + >>> fourier = np.fft.fft(signal) + >>> n = signal.size + >>> timestep = 0.1 + >>> freq = np.fft.fftfreq(n, d=timestep) + >>> freq + array([ 0. , 1.25, 2.5 , 3.75, -5. , -3.75, -2.5 , -1.25]) + + """ + if not isinstance(n, integer_types): + raise ValueError("n should be an integer") + val = 1.0 / (n * d) + results = empty(n, int) + N = (n-1)//2 + 1 + p1 = arange(0, N, dtype=int) + results[:N] = p1 + p2 = arange(-(n//2), 0, dtype=int) + results[N:] = p2 + return results * val + + +@set_module('numpy.fft') +def rfftfreq(n, d=1.0): + """ + Return the Discrete Fourier Transform sample frequencies + (for usage with rfft, irfft). + + The returned float array `f` contains the frequency bin centers in cycles + per unit of the sample spacing (with zero at the start). For instance, if + the sample spacing is in seconds, then the frequency unit is cycles/second. + + Given a window length `n` and a sample spacing `d`:: + + f = [0, 1, ..., n/2-1, n/2] / (d*n) if n is even + f = [0, 1, ..., (n-1)/2-1, (n-1)/2] / (d*n) if n is odd + + Unlike `fftfreq` (but like `scipy.fftpack.rfftfreq`) + the Nyquist frequency component is considered to be positive. + + Parameters + ---------- + n : int + Window length. + d : scalar, optional + Sample spacing (inverse of the sampling rate). Defaults to 1. + + Returns + ------- + f : ndarray + Array of length ``n//2 + 1`` containing the sample frequencies. + + Examples + -------- + >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5, -3, 4], dtype=float) + >>> fourier = np.fft.rfft(signal) + >>> n = signal.size + >>> sample_rate = 100 + >>> freq = np.fft.fftfreq(n, d=1./sample_rate) + >>> freq + array([ 0., 10., 20., 30., 40., -50., -40., -30., -20., -10.]) + >>> freq = np.fft.rfftfreq(n, d=1./sample_rate) + >>> freq + array([ 0., 10., 20., 30., 40., 50.]) + + """ + if not isinstance(n, integer_types): + raise ValueError("n should be an integer") + val = 1.0/(n*d) + N = n//2 + 1 + results = arange(0, N, dtype=int) + return results * val + + +class _FFTCache(object): + """ + Cache for the FFT twiddle factors as an LRU (least recently used) cache. + + Parameters + ---------- + max_size_in_mb : int + Maximum memory usage of the cache before items are being evicted. + max_item_count : int + Maximum item count of the cache before items are being evicted. + + Notes + ----- + Items will be evicted if either limit has been reached upon getting and + setting. The maximum memory usages is not strictly the given + ``max_size_in_mb`` but rather + ``max(max_size_in_mb, 1.5 * size_of_largest_item)``. Thus the cache will + never be completely cleared - at least one item will remain and a single + large item can cause the cache to retain several smaller items even if the + given maximum cache size has been exceeded. + """ + def __init__(self, max_size_in_mb, max_item_count): + self._max_size_in_bytes = max_size_in_mb * 1024 ** 2 + self._max_item_count = max_item_count + self._dict = collections.OrderedDict() + self._lock = threading.Lock() + + def put_twiddle_factors(self, n, factors): + """ + Store twiddle factors for an FFT of length n in the cache. + + Putting multiple twiddle factors for a certain n will store it multiple + times. + + Parameters + ---------- + n : int + Data length for the FFT. + factors : ndarray + The actual twiddle values. + """ + with self._lock: + # Pop + later add to move it to the end for LRU behavior. + # Internally everything is stored in a dictionary whose values are + # lists. + try: + value = self._dict.pop(n) + except KeyError: + value = [] + value.append(factors) + self._dict[n] = value + self._prune_cache() + + def pop_twiddle_factors(self, n): + """ + Pop twiddle factors for an FFT of length n from the cache. + + Will return None if the requested twiddle factors are not available in + the cache. + + Parameters + ---------- + n : int + Data length for the FFT. + + Returns + ------- + out : ndarray or None + The retrieved twiddle factors if available, else None. + """ + with self._lock: + if n not in self._dict or not self._dict[n]: + return None + # Pop + later add to move it to the end for LRU behavior. + all_values = self._dict.pop(n) + value = all_values.pop() + # Only put pack if there are still some arrays left in the list. + if all_values: + self._dict[n] = all_values + return value + + def _prune_cache(self): + # Always keep at least one item. + while len(self._dict) > 1 and ( + len(self._dict) > self._max_item_count or self._check_size()): + self._dict.popitem(last=False) + + def _check_size(self): + item_sizes = [sum(_j.nbytes for _j in _i) + for _i in self._dict.values() if _i] + if not item_sizes: + return False + max_size = max(self._max_size_in_bytes, 1.5 * max(item_sizes)) + return sum(item_sizes) > max_size diff --git a/project/venv/lib/python2.7/site-packages/numpy/fft/helper.pyc b/project/venv/lib/python2.7/site-packages/numpy/fft/helper.pyc new file mode 100644 index 0000000..26315c2 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/fft/helper.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/fft/info.py b/project/venv/lib/python2.7/site-packages/numpy/fft/info.py new file mode 100644 index 0000000..cb6526b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/fft/info.py @@ -0,0 +1,187 @@ +""" +Discrete Fourier Transform (:mod:`numpy.fft`) +============================================= + +.. currentmodule:: numpy.fft + +Standard FFTs +------------- + +.. autosummary:: + :toctree: generated/ + + fft Discrete Fourier transform. + ifft Inverse discrete Fourier transform. + fft2 Discrete Fourier transform in two dimensions. + ifft2 Inverse discrete Fourier transform in two dimensions. + fftn Discrete Fourier transform in N-dimensions. + ifftn Inverse discrete Fourier transform in N dimensions. + +Real FFTs +--------- + +.. autosummary:: + :toctree: generated/ + + rfft Real discrete Fourier transform. + irfft Inverse real discrete Fourier transform. + rfft2 Real discrete Fourier transform in two dimensions. + irfft2 Inverse real discrete Fourier transform in two dimensions. + rfftn Real discrete Fourier transform in N dimensions. + irfftn Inverse real discrete Fourier transform in N dimensions. + +Hermitian FFTs +-------------- + +.. autosummary:: + :toctree: generated/ + + hfft Hermitian discrete Fourier transform. + ihfft Inverse Hermitian discrete Fourier transform. + +Helper routines +--------------- + +.. autosummary:: + :toctree: generated/ + + fftfreq Discrete Fourier Transform sample frequencies. + rfftfreq DFT sample frequencies (for usage with rfft, irfft). + fftshift Shift zero-frequency component to center of spectrum. + ifftshift Inverse of fftshift. + + +Background information +---------------------- + +Fourier analysis is fundamentally a method for expressing a function as a +sum of periodic components, and for recovering the function from those +components. When both the function and its Fourier transform are +replaced with discretized counterparts, it is called the discrete Fourier +transform (DFT). The DFT has become a mainstay of numerical computing in +part because of a very fast algorithm for computing it, called the Fast +Fourier Transform (FFT), which was known to Gauss (1805) and was brought +to light in its current form by Cooley and Tukey [CT]_. Press et al. [NR]_ +provide an accessible introduction to Fourier analysis and its +applications. + +Because the discrete Fourier transform separates its input into +components that contribute at discrete frequencies, it has a great number +of applications in digital signal processing, e.g., for filtering, and in +this context the discretized input to the transform is customarily +referred to as a *signal*, which exists in the *time domain*. The output +is called a *spectrum* or *transform* and exists in the *frequency +domain*. + +Implementation details +---------------------- + +There are many ways to define the DFT, varying in the sign of the +exponent, normalization, etc. In this implementation, the DFT is defined +as + +.. math:: + A_k = \\sum_{m=0}^{n-1} a_m \\exp\\left\\{-2\\pi i{mk \\over n}\\right\\} + \\qquad k = 0,\\ldots,n-1. + +The DFT is in general defined for complex inputs and outputs, and a +single-frequency component at linear frequency :math:`f` is +represented by a complex exponential +:math:`a_m = \\exp\\{2\\pi i\\,f m\\Delta t\\}`, where :math:`\\Delta t` +is the sampling interval. + +The values in the result follow so-called "standard" order: If ``A = +fft(a, n)``, then ``A[0]`` contains the zero-frequency term (the sum of +the signal), which is always purely real for real inputs. Then ``A[1:n/2]`` +contains the positive-frequency terms, and ``A[n/2+1:]`` contains the +negative-frequency terms, in order of decreasingly negative frequency. +For an even number of input points, ``A[n/2]`` represents both positive and +negative Nyquist frequency, and is also purely real for real input. For +an odd number of input points, ``A[(n-1)/2]`` contains the largest positive +frequency, while ``A[(n+1)/2]`` contains the largest negative frequency. +The routine ``np.fft.fftfreq(n)`` returns an array giving the frequencies +of corresponding elements in the output. The routine +``np.fft.fftshift(A)`` shifts transforms and their frequencies to put the +zero-frequency components in the middle, and ``np.fft.ifftshift(A)`` undoes +that shift. + +When the input `a` is a time-domain signal and ``A = fft(a)``, ``np.abs(A)`` +is its amplitude spectrum and ``np.abs(A)**2`` is its power spectrum. +The phase spectrum is obtained by ``np.angle(A)``. + +The inverse DFT is defined as + +.. math:: + a_m = \\frac{1}{n}\\sum_{k=0}^{n-1}A_k\\exp\\left\\{2\\pi i{mk\\over n}\\right\\} + \\qquad m = 0,\\ldots,n-1. + +It differs from the forward transform by the sign of the exponential +argument and the default normalization by :math:`1/n`. + +Normalization +------------- +The default normalization has the direct transforms unscaled and the inverse +transforms are scaled by :math:`1/n`. It is possible to obtain unitary +transforms by setting the keyword argument ``norm`` to ``"ortho"`` (default is +`None`) so that both direct and inverse transforms will be scaled by +:math:`1/\\sqrt{n}`. + +Real and Hermitian transforms +----------------------------- + +When the input is purely real, its transform is Hermitian, i.e., the +component at frequency :math:`f_k` is the complex conjugate of the +component at frequency :math:`-f_k`, which means that for real +inputs there is no information in the negative frequency components that +is not already available from the positive frequency components. +The family of `rfft` functions is +designed to operate on real inputs, and exploits this symmetry by +computing only the positive frequency components, up to and including the +Nyquist frequency. Thus, ``n`` input points produce ``n/2+1`` complex +output points. The inverses of this family assumes the same symmetry of +its input, and for an output of ``n`` points uses ``n/2+1`` input points. + +Correspondingly, when the spectrum is purely real, the signal is +Hermitian. The `hfft` family of functions exploits this symmetry by +using ``n/2+1`` complex points in the input (time) domain for ``n`` real +points in the frequency domain. + +In higher dimensions, FFTs are used, e.g., for image analysis and +filtering. The computational efficiency of the FFT means that it can +also be a faster way to compute large convolutions, using the property +that a convolution in the time domain is equivalent to a point-by-point +multiplication in the frequency domain. + +Higher dimensions +----------------- + +In two dimensions, the DFT is defined as + +.. math:: + A_{kl} = \\sum_{m=0}^{M-1} \\sum_{n=0}^{N-1} + a_{mn}\\exp\\left\\{-2\\pi i \\left({mk\\over M}+{nl\\over N}\\right)\\right\\} + \\qquad k = 0, \\ldots, M-1;\\quad l = 0, \\ldots, N-1, + +which extends in the obvious way to higher dimensions, and the inverses +in higher dimensions also extend in the same way. + +References +---------- + +.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the + machine calculation of complex Fourier series," *Math. Comput.* + 19: 297-301. + +.. [NR] Press, W., Teukolsky, S., Vetterline, W.T., and Flannery, B.P., + 2007, *Numerical Recipes: The Art of Scientific Computing*, ch. + 12-13. Cambridge Univ. Press, Cambridge, UK. + +Examples +-------- + +For examples, see the various functions. + +""" +from __future__ import division, absolute_import, print_function + +depends = ['core'] diff --git a/project/venv/lib/python2.7/site-packages/numpy/fft/info.pyc b/project/venv/lib/python2.7/site-packages/numpy/fft/info.pyc new file mode 100644 index 0000000..a2abfd6 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/fft/info.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/fft/setup.py b/project/venv/lib/python2.7/site-packages/numpy/fft/setup.py new file mode 100644 index 0000000..cd99a82 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/fft/setup.py @@ -0,0 +1,19 @@ +from __future__ import division, print_function + + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('fft', parent_package, top_path) + + config.add_data_dir('tests') + + # Configure fftpack_lite + config.add_extension('fftpack_lite', + sources=['fftpack_litemodule.c', 'fftpack.c'] + ) + + return config + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(configuration=configuration) diff --git a/project/venv/lib/python2.7/site-packages/numpy/fft/setup.pyc b/project/venv/lib/python2.7/site-packages/numpy/fft/setup.pyc new file mode 100644 index 0000000..9e392bb Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/fft/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/fft/tests/__init__.py b/project/venv/lib/python2.7/site-packages/numpy/fft/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/numpy/fft/tests/__init__.pyc b/project/venv/lib/python2.7/site-packages/numpy/fft/tests/__init__.pyc new file mode 100644 index 0000000..b700526 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/fft/tests/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/fft/tests/test_fftpack.py b/project/venv/lib/python2.7/site-packages/numpy/fft/tests/test_fftpack.py new file mode 100644 index 0000000..8d6cd84 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/fft/tests/test_fftpack.py @@ -0,0 +1,185 @@ +from __future__ import division, absolute_import, print_function + +import numpy as np +from numpy.random import random +from numpy.testing import ( + assert_array_almost_equal, assert_array_equal, assert_raises, + ) +import threading +import sys +if sys.version_info[0] >= 3: + import queue +else: + import Queue as queue + + +def fft1(x): + L = len(x) + phase = -2j*np.pi*(np.arange(L)/float(L)) + phase = np.arange(L).reshape(-1, 1) * phase + return np.sum(x*np.exp(phase), axis=1) + + +class TestFFTShift(object): + + def test_fft_n(self): + assert_raises(ValueError, np.fft.fft, [1, 2, 3], 0) + + +class TestFFT1D(object): + + def test_fft(self): + x = random(30) + 1j*random(30) + assert_array_almost_equal(fft1(x), np.fft.fft(x)) + assert_array_almost_equal(fft1(x) / np.sqrt(30), + np.fft.fft(x, norm="ortho")) + + def test_ifft(self): + x = random(30) + 1j*random(30) + assert_array_almost_equal(x, np.fft.ifft(np.fft.fft(x))) + assert_array_almost_equal( + x, np.fft.ifft(np.fft.fft(x, norm="ortho"), norm="ortho")) + + def test_fft2(self): + x = random((30, 20)) + 1j*random((30, 20)) + assert_array_almost_equal(np.fft.fft(np.fft.fft(x, axis=1), axis=0), + np.fft.fft2(x)) + assert_array_almost_equal(np.fft.fft2(x) / np.sqrt(30 * 20), + np.fft.fft2(x, norm="ortho")) + + def test_ifft2(self): + x = random((30, 20)) + 1j*random((30, 20)) + assert_array_almost_equal(np.fft.ifft(np.fft.ifft(x, axis=1), axis=0), + np.fft.ifft2(x)) + assert_array_almost_equal(np.fft.ifft2(x) * np.sqrt(30 * 20), + np.fft.ifft2(x, norm="ortho")) + + def test_fftn(self): + x = random((30, 20, 10)) + 1j*random((30, 20, 10)) + assert_array_almost_equal( + np.fft.fft(np.fft.fft(np.fft.fft(x, axis=2), axis=1), axis=0), + np.fft.fftn(x)) + assert_array_almost_equal(np.fft.fftn(x) / np.sqrt(30 * 20 * 10), + np.fft.fftn(x, norm="ortho")) + + def test_ifftn(self): + x = random((30, 20, 10)) + 1j*random((30, 20, 10)) + assert_array_almost_equal( + np.fft.ifft(np.fft.ifft(np.fft.ifft(x, axis=2), axis=1), axis=0), + np.fft.ifftn(x)) + assert_array_almost_equal(np.fft.ifftn(x) * np.sqrt(30 * 20 * 10), + np.fft.ifftn(x, norm="ortho")) + + def test_rfft(self): + x = random(30) + for n in [x.size, 2*x.size]: + for norm in [None, 'ortho']: + assert_array_almost_equal( + np.fft.fft(x, n=n, norm=norm)[:(n//2 + 1)], + np.fft.rfft(x, n=n, norm=norm)) + assert_array_almost_equal(np.fft.rfft(x, n=n) / np.sqrt(n), + np.fft.rfft(x, n=n, norm="ortho")) + + def test_irfft(self): + x = random(30) + assert_array_almost_equal(x, np.fft.irfft(np.fft.rfft(x))) + assert_array_almost_equal( + x, np.fft.irfft(np.fft.rfft(x, norm="ortho"), norm="ortho")) + + def test_rfft2(self): + x = random((30, 20)) + assert_array_almost_equal(np.fft.fft2(x)[:, :11], np.fft.rfft2(x)) + assert_array_almost_equal(np.fft.rfft2(x) / np.sqrt(30 * 20), + np.fft.rfft2(x, norm="ortho")) + + def test_irfft2(self): + x = random((30, 20)) + assert_array_almost_equal(x, np.fft.irfft2(np.fft.rfft2(x))) + assert_array_almost_equal( + x, np.fft.irfft2(np.fft.rfft2(x, norm="ortho"), norm="ortho")) + + def test_rfftn(self): + x = random((30, 20, 10)) + assert_array_almost_equal(np.fft.fftn(x)[:, :, :6], np.fft.rfftn(x)) + assert_array_almost_equal(np.fft.rfftn(x) / np.sqrt(30 * 20 * 10), + np.fft.rfftn(x, norm="ortho")) + + def test_irfftn(self): + x = random((30, 20, 10)) + assert_array_almost_equal(x, np.fft.irfftn(np.fft.rfftn(x))) + assert_array_almost_equal( + x, np.fft.irfftn(np.fft.rfftn(x, norm="ortho"), norm="ortho")) + + def test_hfft(self): + x = random(14) + 1j*random(14) + x_herm = np.concatenate((random(1), x, random(1))) + x = np.concatenate((x_herm, x[::-1].conj())) + assert_array_almost_equal(np.fft.fft(x), np.fft.hfft(x_herm)) + assert_array_almost_equal(np.fft.hfft(x_herm) / np.sqrt(30), + np.fft.hfft(x_herm, norm="ortho")) + + def test_ihttf(self): + x = random(14) + 1j*random(14) + x_herm = np.concatenate((random(1), x, random(1))) + x = np.concatenate((x_herm, x[::-1].conj())) + assert_array_almost_equal(x_herm, np.fft.ihfft(np.fft.hfft(x_herm))) + assert_array_almost_equal( + x_herm, np.fft.ihfft(np.fft.hfft(x_herm, norm="ortho"), + norm="ortho")) + + def test_all_1d_norm_preserving(self): + # verify that round-trip transforms are norm-preserving + x = random(30) + x_norm = np.linalg.norm(x) + n = x.size * 2 + func_pairs = [(np.fft.fft, np.fft.ifft), + (np.fft.rfft, np.fft.irfft), + # hfft: order so the first function takes x.size samples + # (necessary for comparison to x_norm above) + (np.fft.ihfft, np.fft.hfft), + ] + for forw, back in func_pairs: + for n in [x.size, 2*x.size]: + for norm in [None, 'ortho']: + tmp = forw(x, n=n, norm=norm) + tmp = back(tmp, n=n, norm=norm) + assert_array_almost_equal(x_norm, + np.linalg.norm(tmp)) + +class TestFFTThreadSafe(object): + threads = 16 + input_shape = (800, 200) + + def _test_mtsame(self, func, *args): + def worker(args, q): + q.put(func(*args)) + + q = queue.Queue() + expected = func(*args) + + # Spin off a bunch of threads to call the same function simultaneously + t = [threading.Thread(target=worker, args=(args, q)) + for i in range(self.threads)] + [x.start() for x in t] + + [x.join() for x in t] + # Make sure all threads returned the correct value + for i in range(self.threads): + assert_array_equal(q.get(timeout=5), expected, + 'Function returned wrong value in multithreaded context') + + def test_fft(self): + a = np.ones(self.input_shape) * 1+0j + self._test_mtsame(np.fft.fft, a) + + def test_ifft(self): + a = np.ones(self.input_shape) * 1+0j + self._test_mtsame(np.fft.ifft, a) + + def test_rfft(self): + a = np.ones(self.input_shape) + self._test_mtsame(np.fft.rfft, a) + + def test_irfft(self): + a = np.ones(self.input_shape) * 1+0j + self._test_mtsame(np.fft.irfft, a) diff --git a/project/venv/lib/python2.7/site-packages/numpy/fft/tests/test_fftpack.pyc b/project/venv/lib/python2.7/site-packages/numpy/fft/tests/test_fftpack.pyc new file mode 100644 index 0000000..0639125 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/fft/tests/test_fftpack.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/fft/tests/test_helper.py b/project/venv/lib/python2.7/site-packages/numpy/fft/tests/test_helper.py new file mode 100644 index 0000000..8d315fa --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/fft/tests/test_helper.py @@ -0,0 +1,248 @@ +"""Test functions for fftpack.helper module + +Copied from fftpack.helper by Pearu Peterson, October 2005 + +""" +from __future__ import division, absolute_import, print_function +import numpy as np +from numpy.testing import assert_array_almost_equal, assert_equal +from numpy import fft, pi +from numpy.fft.helper import _FFTCache + + +class TestFFTShift(object): + + def test_definition(self): + x = [0, 1, 2, 3, 4, -4, -3, -2, -1] + y = [-4, -3, -2, -1, 0, 1, 2, 3, 4] + assert_array_almost_equal(fft.fftshift(x), y) + assert_array_almost_equal(fft.ifftshift(y), x) + x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1] + y = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4] + assert_array_almost_equal(fft.fftshift(x), y) + assert_array_almost_equal(fft.ifftshift(y), x) + + def test_inverse(self): + for n in [1, 4, 9, 100, 211]: + x = np.random.random((n,)) + assert_array_almost_equal(fft.ifftshift(fft.fftshift(x)), x) + + def test_axes_keyword(self): + freqs = [[0, 1, 2], [3, 4, -4], [-3, -2, -1]] + shifted = [[-1, -3, -2], [2, 0, 1], [-4, 3, 4]] + assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shifted) + assert_array_almost_equal(fft.fftshift(freqs, axes=0), + fft.fftshift(freqs, axes=(0,))) + assert_array_almost_equal(fft.ifftshift(shifted, axes=(0, 1)), freqs) + assert_array_almost_equal(fft.ifftshift(shifted, axes=0), + fft.ifftshift(shifted, axes=(0,))) + + assert_array_almost_equal(fft.fftshift(freqs), shifted) + assert_array_almost_equal(fft.ifftshift(shifted), freqs) + + def test_uneven_dims(self): + """ Test 2D input, which has uneven dimension sizes """ + freqs = [ + [0, 1], + [2, 3], + [4, 5] + ] + + # shift in dimension 0 + shift_dim0 = [ + [4, 5], + [0, 1], + [2, 3] + ] + assert_array_almost_equal(fft.fftshift(freqs, axes=0), shift_dim0) + assert_array_almost_equal(fft.ifftshift(shift_dim0, axes=0), freqs) + assert_array_almost_equal(fft.fftshift(freqs, axes=(0,)), shift_dim0) + assert_array_almost_equal(fft.ifftshift(shift_dim0, axes=[0]), freqs) + + # shift in dimension 1 + shift_dim1 = [ + [1, 0], + [3, 2], + [5, 4] + ] + assert_array_almost_equal(fft.fftshift(freqs, axes=1), shift_dim1) + assert_array_almost_equal(fft.ifftshift(shift_dim1, axes=1), freqs) + + # shift in both dimensions + shift_dim_both = [ + [5, 4], + [1, 0], + [3, 2] + ] + assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shift_dim_both) + assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=(0, 1)), freqs) + assert_array_almost_equal(fft.fftshift(freqs, axes=[0, 1]), shift_dim_both) + assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=[0, 1]), freqs) + + # axes=None (default) shift in all dimensions + assert_array_almost_equal(fft.fftshift(freqs, axes=None), shift_dim_both) + assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=None), freqs) + assert_array_almost_equal(fft.fftshift(freqs), shift_dim_both) + assert_array_almost_equal(fft.ifftshift(shift_dim_both), freqs) + + def test_equal_to_original(self): + """ Test that the new (>=v1.15) implementation (see #10073) is equal to the original (<=v1.14) """ + from numpy.compat import integer_types + from numpy.core import asarray, concatenate, arange, take + + def original_fftshift(x, axes=None): + """ How fftshift was implemented in v1.14""" + tmp = asarray(x) + ndim = tmp.ndim + if axes is None: + axes = list(range(ndim)) + elif isinstance(axes, integer_types): + axes = (axes,) + y = tmp + for k in axes: + n = tmp.shape[k] + p2 = (n + 1) // 2 + mylist = concatenate((arange(p2, n), arange(p2))) + y = take(y, mylist, k) + return y + + def original_ifftshift(x, axes=None): + """ How ifftshift was implemented in v1.14 """ + tmp = asarray(x) + ndim = tmp.ndim + if axes is None: + axes = list(range(ndim)) + elif isinstance(axes, integer_types): + axes = (axes,) + y = tmp + for k in axes: + n = tmp.shape[k] + p2 = n - (n + 1) // 2 + mylist = concatenate((arange(p2, n), arange(p2))) + y = take(y, mylist, k) + return y + + # create possible 2d array combinations and try all possible keywords + # compare output to original functions + for i in range(16): + for j in range(16): + for axes_keyword in [0, 1, None, (0,), (0, 1)]: + inp = np.random.rand(i, j) + + assert_array_almost_equal(fft.fftshift(inp, axes_keyword), + original_fftshift(inp, axes_keyword)) + + assert_array_almost_equal(fft.ifftshift(inp, axes_keyword), + original_ifftshift(inp, axes_keyword)) + + +class TestFFTFreq(object): + + def test_definition(self): + x = [0, 1, 2, 3, 4, -4, -3, -2, -1] + assert_array_almost_equal(9*fft.fftfreq(9), x) + assert_array_almost_equal(9*pi*fft.fftfreq(9, pi), x) + x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1] + assert_array_almost_equal(10*fft.fftfreq(10), x) + assert_array_almost_equal(10*pi*fft.fftfreq(10, pi), x) + + +class TestRFFTFreq(object): + + def test_definition(self): + x = [0, 1, 2, 3, 4] + assert_array_almost_equal(9*fft.rfftfreq(9), x) + assert_array_almost_equal(9*pi*fft.rfftfreq(9, pi), x) + x = [0, 1, 2, 3, 4, 5] + assert_array_almost_equal(10*fft.rfftfreq(10), x) + assert_array_almost_equal(10*pi*fft.rfftfreq(10, pi), x) + + +class TestIRFFTN(object): + + def test_not_last_axis_success(self): + ar, ai = np.random.random((2, 16, 8, 32)) + a = ar + 1j*ai + + axes = (-2,) + + # Should not raise error + fft.irfftn(a, axes=axes) + + +class TestFFTCache(object): + + def test_basic_behaviour(self): + c = _FFTCache(max_size_in_mb=1, max_item_count=4) + + # Put + c.put_twiddle_factors(1, np.ones(2, dtype=np.float32)) + c.put_twiddle_factors(2, np.zeros(2, dtype=np.float32)) + + # Get + assert_array_almost_equal(c.pop_twiddle_factors(1), + np.ones(2, dtype=np.float32)) + assert_array_almost_equal(c.pop_twiddle_factors(2), + np.zeros(2, dtype=np.float32)) + + # Nothing should be left. + assert_equal(len(c._dict), 0) + + # Now put everything in twice so it can be retrieved once and each will + # still have one item left. + for _ in range(2): + c.put_twiddle_factors(1, np.ones(2, dtype=np.float32)) + c.put_twiddle_factors(2, np.zeros(2, dtype=np.float32)) + assert_array_almost_equal(c.pop_twiddle_factors(1), + np.ones(2, dtype=np.float32)) + assert_array_almost_equal(c.pop_twiddle_factors(2), + np.zeros(2, dtype=np.float32)) + assert_equal(len(c._dict), 2) + + def test_automatic_pruning(self): + # That's around 2600 single precision samples. + c = _FFTCache(max_size_in_mb=0.01, max_item_count=4) + + c.put_twiddle_factors(1, np.ones(200, dtype=np.float32)) + c.put_twiddle_factors(2, np.ones(200, dtype=np.float32)) + assert_equal(list(c._dict.keys()), [1, 2]) + + # This is larger than the limit but should still be kept. + c.put_twiddle_factors(3, np.ones(3000, dtype=np.float32)) + assert_equal(list(c._dict.keys()), [1, 2, 3]) + # Add one more. + c.put_twiddle_factors(4, np.ones(3000, dtype=np.float32)) + # The other three should no longer exist. + assert_equal(list(c._dict.keys()), [4]) + + # Now test the max item count pruning. + c = _FFTCache(max_size_in_mb=0.01, max_item_count=2) + c.put_twiddle_factors(2, np.empty(2)) + c.put_twiddle_factors(1, np.empty(2)) + # Can still be accessed. + assert_equal(list(c._dict.keys()), [2, 1]) + + c.put_twiddle_factors(3, np.empty(2)) + # 1 and 3 can still be accessed - c[2] has been touched least recently + # and is thus evicted. + assert_equal(list(c._dict.keys()), [1, 3]) + + # One last test. We will add a single large item that is slightly + # bigger then the cache size. Some small items can still be added. + c = _FFTCache(max_size_in_mb=0.01, max_item_count=5) + c.put_twiddle_factors(1, np.ones(3000, dtype=np.float32)) + c.put_twiddle_factors(2, np.ones(2, dtype=np.float32)) + c.put_twiddle_factors(3, np.ones(2, dtype=np.float32)) + c.put_twiddle_factors(4, np.ones(2, dtype=np.float32)) + assert_equal(list(c._dict.keys()), [1, 2, 3, 4]) + + # One more big item. This time it is 6 smaller ones but they are + # counted as one big item. + for _ in range(6): + c.put_twiddle_factors(5, np.ones(500, dtype=np.float32)) + # '1' no longer in the cache. Rest still in the cache. + assert_equal(list(c._dict.keys()), [2, 3, 4, 5]) + + # Another big item - should now be the only item in the cache. + c.put_twiddle_factors(6, np.ones(4000, dtype=np.float32)) + assert_equal(list(c._dict.keys()), [6]) diff --git a/project/venv/lib/python2.7/site-packages/numpy/fft/tests/test_helper.pyc b/project/venv/lib/python2.7/site-packages/numpy/fft/tests/test_helper.pyc new file mode 100644 index 0000000..40dc258 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/fft/tests/test_helper.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/__init__.py b/project/venv/lib/python2.7/site-packages/numpy/lib/__init__.py new file mode 100644 index 0000000..c175715 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/__init__.py @@ -0,0 +1,51 @@ +from __future__ import division, absolute_import, print_function + +import math + +from .info import __doc__ +from numpy.version import version as __version__ + +from .type_check import * +from .index_tricks import * +from .function_base import * +from .mixins import * +from .nanfunctions import * +from .shape_base import * +from .stride_tricks import * +from .twodim_base import * +from .ufunclike import * +from .histograms import * + +from . import scimath as emath +from .polynomial import * +#import convertcode +from .utils import * +from .arraysetops import * +from .npyio import * +from .financial import * +from .arrayterator import Arrayterator +from .arraypad import * +from ._version import * +from numpy.core._multiarray_umath import tracemalloc_domain + +__all__ = ['emath', 'math', 'tracemalloc_domain'] +__all__ += type_check.__all__ +__all__ += index_tricks.__all__ +__all__ += function_base.__all__ +__all__ += mixins.__all__ +__all__ += shape_base.__all__ +__all__ += stride_tricks.__all__ +__all__ += twodim_base.__all__ +__all__ += ufunclike.__all__ +__all__ += arraypad.__all__ +__all__ += polynomial.__all__ +__all__ += utils.__all__ +__all__ += arraysetops.__all__ +__all__ += npyio.__all__ +__all__ += financial.__all__ +__all__ += nanfunctions.__all__ +__all__ += histograms.__all__ + +from numpy._pytesttester import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/__init__.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/__init__.pyc new file mode 100644 index 0000000..4a8734c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/_datasource.py b/project/venv/lib/python2.7/site-packages/numpy/lib/_datasource.py new file mode 100644 index 0000000..463266e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/_datasource.py @@ -0,0 +1,795 @@ +"""A file interface for handling local and remote data files. + +The goal of datasource is to abstract some of the file system operations +when dealing with data files so the researcher doesn't have to know all the +low-level details. Through datasource, a researcher can obtain and use a +file with one function call, regardless of location of the file. + +DataSource is meant to augment standard python libraries, not replace them. +It should work seamlessly with standard file IO operations and the os +module. + +DataSource files can originate locally or remotely: + +- local files : '/home/guido/src/local/data.txt' +- URLs (http, ftp, ...) : 'http://www.scipy.org/not/real/data.txt' + +DataSource files can also be compressed or uncompressed. Currently only +gzip, bz2 and xz are supported. + +Example:: + + >>> # Create a DataSource, use os.curdir (default) for local storage. + >>> ds = datasource.DataSource() + >>> + >>> # Open a remote file. + >>> # DataSource downloads the file, stores it locally in: + >>> # './www.google.com/index.html' + >>> # opens the file and returns a file object. + >>> fp = ds.open('http://www.google.com/index.html') + >>> + >>> # Use the file as you normally would + >>> fp.read() + >>> fp.close() + +""" +from __future__ import division, absolute_import, print_function + +import os +import sys +import warnings +import shutil +import io + +from numpy.core.overrides import set_module + + +_open = open + + +def _check_mode(mode, encoding, newline): + """Check mode and that encoding and newline are compatible. + + Parameters + ---------- + mode : str + File open mode. + encoding : str + File encoding. + newline : str + Newline for text files. + + """ + if "t" in mode: + if "b" in mode: + raise ValueError("Invalid mode: %r" % (mode,)) + else: + if encoding is not None: + raise ValueError("Argument 'encoding' not supported in binary mode") + if newline is not None: + raise ValueError("Argument 'newline' not supported in binary mode") + + +def _python2_bz2open(fn, mode, encoding, newline): + """Wrapper to open bz2 in text mode. + + Parameters + ---------- + fn : str + File name + mode : {'r', 'w'} + File mode. Note that bz2 Text files are not supported. + encoding : str + Ignored, text bz2 files not supported in Python2. + newline : str + Ignored, text bz2 files not supported in Python2. + """ + import bz2 + + _check_mode(mode, encoding, newline) + + if "t" in mode: + # BZ2File is missing necessary functions for TextIOWrapper + warnings.warn("Assuming latin1 encoding for bz2 text file in Python2", + RuntimeWarning, stacklevel=5) + mode = mode.replace("t", "") + return bz2.BZ2File(fn, mode) + +def _python2_gzipopen(fn, mode, encoding, newline): + """ Wrapper to open gzip in text mode. + + Parameters + ---------- + fn : str, bytes, file + File path or opened file. + mode : str + File mode. The actual files are opened as binary, but will decoded + using the specified `encoding` and `newline`. + encoding : str + Encoding to be used when reading/writing as text. + newline : str + Newline to be used when reading/writing as text. + + """ + import gzip + # gzip is lacking read1 needed for TextIOWrapper + class GzipWrap(gzip.GzipFile): + def read1(self, n): + return self.read(n) + + _check_mode(mode, encoding, newline) + + gz_mode = mode.replace("t", "") + + if isinstance(fn, (str, bytes)): + binary_file = GzipWrap(fn, gz_mode) + elif hasattr(fn, "read") or hasattr(fn, "write"): + binary_file = GzipWrap(None, gz_mode, fileobj=fn) + else: + raise TypeError("filename must be a str or bytes object, or a file") + + if "t" in mode: + return io.TextIOWrapper(binary_file, encoding, newline=newline) + else: + return binary_file + + +# Using a class instead of a module-level dictionary +# to reduce the initial 'import numpy' overhead by +# deferring the import of lzma, bz2 and gzip until needed + +# TODO: .zip support, .tar support? +class _FileOpeners(object): + """ + Container for different methods to open (un-)compressed files. + + `_FileOpeners` contains a dictionary that holds one method for each + supported file format. Attribute lookup is implemented in such a way + that an instance of `_FileOpeners` itself can be indexed with the keys + of that dictionary. Currently uncompressed files as well as files + compressed with ``gzip``, ``bz2`` or ``xz`` compression are supported. + + Notes + ----- + `_file_openers`, an instance of `_FileOpeners`, is made available for + use in the `_datasource` module. + + Examples + -------- + >>> np.lib._datasource._file_openers.keys() + [None, '.bz2', '.gz', '.xz', '.lzma'] + >>> np.lib._datasource._file_openers['.gz'] is gzip.open + True + + """ + + def __init__(self): + self._loaded = False + self._file_openers = {None: io.open} + + def _load(self): + if self._loaded: + return + + try: + import bz2 + if sys.version_info[0] >= 3: + self._file_openers[".bz2"] = bz2.open + else: + self._file_openers[".bz2"] = _python2_bz2open + except ImportError: + pass + + try: + import gzip + if sys.version_info[0] >= 3: + self._file_openers[".gz"] = gzip.open + else: + self._file_openers[".gz"] = _python2_gzipopen + except ImportError: + pass + + try: + import lzma + self._file_openers[".xz"] = lzma.open + self._file_openers[".lzma"] = lzma.open + except (ImportError, AttributeError): + # There are incompatible backports of lzma that do not have the + # lzma.open attribute, so catch that as well as ImportError. + pass + + self._loaded = True + + def keys(self): + """ + Return the keys of currently supported file openers. + + Parameters + ---------- + None + + Returns + ------- + keys : list + The keys are None for uncompressed files and the file extension + strings (i.e. ``'.gz'``, ``'.xz'``) for supported compression + methods. + + """ + self._load() + return list(self._file_openers.keys()) + + def __getitem__(self, key): + self._load() + return self._file_openers[key] + +_file_openers = _FileOpeners() + +def open(path, mode='r', destpath=os.curdir, encoding=None, newline=None): + """ + Open `path` with `mode` and return the file object. + + If ``path`` is an URL, it will be downloaded, stored in the + `DataSource` `destpath` directory and opened from there. + + Parameters + ---------- + path : str + Local file path or URL to open. + mode : str, optional + Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to + append. Available modes depend on the type of object specified by + path. Default is 'r'. + destpath : str, optional + Path to the directory where the source file gets downloaded to for + use. If `destpath` is None, a temporary directory will be created. + The default path is the current directory. + encoding : {None, str}, optional + Open text file with given encoding. The default encoding will be + what `io.open` uses. + newline : {None, str}, optional + Newline to use when reading text file. + + Returns + ------- + out : file object + The opened file. + + Notes + ----- + This is a convenience function that instantiates a `DataSource` and + returns the file object from ``DataSource.open(path)``. + + """ + + ds = DataSource(destpath) + return ds.open(path, mode, encoding=encoding, newline=newline) + + +@set_module('numpy') +class DataSource(object): + """ + DataSource(destpath='.') + + A generic data source file (file, http, ftp, ...). + + DataSources can be local files or remote files/URLs. The files may + also be compressed or uncompressed. DataSource hides some of the + low-level details of downloading the file, allowing you to simply pass + in a valid file path (or URL) and obtain a file object. + + Parameters + ---------- + destpath : str or None, optional + Path to the directory where the source file gets downloaded to for + use. If `destpath` is None, a temporary directory will be created. + The default path is the current directory. + + Notes + ----- + URLs require a scheme string (``http://``) to be used, without it they + will fail:: + + >>> repos = DataSource() + >>> repos.exists('www.google.com/index.html') + False + >>> repos.exists('http://www.google.com/index.html') + True + + Temporary directories are deleted when the DataSource is deleted. + + Examples + -------- + :: + + >>> ds = DataSource('/home/guido') + >>> urlname = 'http://www.google.com/index.html' + >>> gfile = ds.open('http://www.google.com/index.html') # remote file + >>> ds.abspath(urlname) + '/home/guido/www.google.com/site/index.html' + + >>> ds = DataSource(None) # use with temporary file + >>> ds.open('/home/guido/foobar.txt') + + >>> ds.abspath('/home/guido/foobar.txt') + '/tmp/tmpy4pgsP/home/guido/foobar.txt' + + """ + + def __init__(self, destpath=os.curdir): + """Create a DataSource with a local path at destpath.""" + if destpath: + self._destpath = os.path.abspath(destpath) + self._istmpdest = False + else: + import tempfile # deferring import to improve startup time + self._destpath = tempfile.mkdtemp() + self._istmpdest = True + + def __del__(self): + # Remove temp directories + if hasattr(self, '_istmpdest') and self._istmpdest: + shutil.rmtree(self._destpath) + + def _iszip(self, filename): + """Test if the filename is a zip file by looking at the file extension. + + """ + fname, ext = os.path.splitext(filename) + return ext in _file_openers.keys() + + def _iswritemode(self, mode): + """Test if the given mode will open a file for writing.""" + + # Currently only used to test the bz2 files. + _writemodes = ("w", "+") + for c in mode: + if c in _writemodes: + return True + return False + + def _splitzipext(self, filename): + """Split zip extension from filename and return filename. + + *Returns*: + base, zip_ext : {tuple} + + """ + + if self._iszip(filename): + return os.path.splitext(filename) + else: + return filename, None + + def _possible_names(self, filename): + """Return a tuple containing compressed filename variations.""" + names = [filename] + if not self._iszip(filename): + for zipext in _file_openers.keys(): + if zipext: + names.append(filename+zipext) + return names + + def _isurl(self, path): + """Test if path is a net location. Tests the scheme and netloc.""" + + # We do this here to reduce the 'import numpy' initial import time. + if sys.version_info[0] >= 3: + from urllib.parse import urlparse + else: + from urlparse import urlparse + + # BUG : URLs require a scheme string ('http://') to be used. + # www.google.com will fail. + # Should we prepend the scheme for those that don't have it and + # test that also? Similar to the way we append .gz and test for + # for compressed versions of files. + + scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path) + return bool(scheme and netloc) + + def _cache(self, path): + """Cache the file specified by path. + + Creates a copy of the file in the datasource cache. + + """ + # We import these here because importing urllib2 is slow and + # a significant fraction of numpy's total import time. + if sys.version_info[0] >= 3: + from urllib.request import urlopen + from urllib.error import URLError + else: + from urllib2 import urlopen + from urllib2 import URLError + + upath = self.abspath(path) + + # ensure directory exists + if not os.path.exists(os.path.dirname(upath)): + os.makedirs(os.path.dirname(upath)) + + # TODO: Doesn't handle compressed files! + if self._isurl(path): + try: + openedurl = urlopen(path) + f = _open(upath, 'wb') + try: + shutil.copyfileobj(openedurl, f) + finally: + f.close() + openedurl.close() + except URLError: + raise URLError("URL not found: %s" % path) + else: + shutil.copyfile(path, upath) + return upath + + def _findfile(self, path): + """Searches for ``path`` and returns full path if found. + + If path is an URL, _findfile will cache a local copy and return the + path to the cached file. If path is a local file, _findfile will + return a path to that local file. + + The search will include possible compressed versions of the file + and return the first occurrence found. + + """ + + # Build list of possible local file paths + if not self._isurl(path): + # Valid local paths + filelist = self._possible_names(path) + # Paths in self._destpath + filelist += self._possible_names(self.abspath(path)) + else: + # Cached URLs in self._destpath + filelist = self._possible_names(self.abspath(path)) + # Remote URLs + filelist = filelist + self._possible_names(path) + + for name in filelist: + if self.exists(name): + if self._isurl(name): + name = self._cache(name) + return name + return None + + def abspath(self, path): + """ + Return absolute path of file in the DataSource directory. + + If `path` is an URL, then `abspath` will return either the location + the file exists locally or the location it would exist when opened + using the `open` method. + + Parameters + ---------- + path : str + Can be a local file or a remote URL. + + Returns + ------- + out : str + Complete path, including the `DataSource` destination directory. + + Notes + ----- + The functionality is based on `os.path.abspath`. + + """ + # We do this here to reduce the 'import numpy' initial import time. + if sys.version_info[0] >= 3: + from urllib.parse import urlparse + else: + from urlparse import urlparse + + # TODO: This should be more robust. Handles case where path includes + # the destpath, but not other sub-paths. Failing case: + # path = /home/guido/datafile.txt + # destpath = /home/alex/ + # upath = self.abspath(path) + # upath == '/home/alex/home/guido/datafile.txt' + + # handle case where path includes self._destpath + splitpath = path.split(self._destpath, 2) + if len(splitpath) > 1: + path = splitpath[1] + scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path) + netloc = self._sanitize_relative_path(netloc) + upath = self._sanitize_relative_path(upath) + return os.path.join(self._destpath, netloc, upath) + + def _sanitize_relative_path(self, path): + """Return a sanitised relative path for which + os.path.abspath(os.path.join(base, path)).startswith(base) + """ + last = None + path = os.path.normpath(path) + while path != last: + last = path + # Note: os.path.join treats '/' as os.sep on Windows + path = path.lstrip(os.sep).lstrip('/') + path = path.lstrip(os.pardir).lstrip('..') + drive, path = os.path.splitdrive(path) # for Windows + return path + + def exists(self, path): + """ + Test if path exists. + + Test if `path` exists as (and in this order): + + - a local file. + - a remote URL that has been downloaded and stored locally in the + `DataSource` directory. + - a remote URL that has not been downloaded, but is valid and + accessible. + + Parameters + ---------- + path : str + Can be a local file or a remote URL. + + Returns + ------- + out : bool + True if `path` exists. + + Notes + ----- + When `path` is an URL, `exists` will return True if it's either + stored locally in the `DataSource` directory, or is a valid remote + URL. `DataSource` does not discriminate between the two, the file + is accessible if it exists in either location. + + """ + + # First test for local path + if os.path.exists(path): + return True + + # We import this here because importing urllib2 is slow and + # a significant fraction of numpy's total import time. + if sys.version_info[0] >= 3: + from urllib.request import urlopen + from urllib.error import URLError + else: + from urllib2 import urlopen + from urllib2 import URLError + + # Test cached url + upath = self.abspath(path) + if os.path.exists(upath): + return True + + # Test remote url + if self._isurl(path): + try: + netfile = urlopen(path) + netfile.close() + del(netfile) + return True + except URLError: + return False + return False + + def open(self, path, mode='r', encoding=None, newline=None): + """ + Open and return file-like object. + + If `path` is an URL, it will be downloaded, stored in the + `DataSource` directory and opened from there. + + Parameters + ---------- + path : str + Local file path or URL to open. + mode : {'r', 'w', 'a'}, optional + Mode to open `path`. Mode 'r' for reading, 'w' for writing, + 'a' to append. Available modes depend on the type of object + specified by `path`. Default is 'r'. + encoding : {None, str}, optional + Open text file with given encoding. The default encoding will be + what `io.open` uses. + newline : {None, str}, optional + Newline to use when reading text file. + + Returns + ------- + out : file object + File object. + + """ + + # TODO: There is no support for opening a file for writing which + # doesn't exist yet (creating a file). Should there be? + + # TODO: Add a ``subdir`` parameter for specifying the subdirectory + # used to store URLs in self._destpath. + + if self._isurl(path) and self._iswritemode(mode): + raise ValueError("URLs are not writeable") + + # NOTE: _findfile will fail on a new file opened for writing. + found = self._findfile(path) + if found: + _fname, ext = self._splitzipext(found) + if ext == 'bz2': + mode.replace("+", "") + return _file_openers[ext](found, mode=mode, + encoding=encoding, newline=newline) + else: + raise IOError("%s not found." % path) + + +class Repository (DataSource): + """ + Repository(baseurl, destpath='.') + + A data repository where multiple DataSource's share a base + URL/directory. + + `Repository` extends `DataSource` by prepending a base URL (or + directory) to all the files it handles. Use `Repository` when you will + be working with multiple files from one base URL. Initialize + `Repository` with the base URL, then refer to each file by its filename + only. + + Parameters + ---------- + baseurl : str + Path to the local directory or remote location that contains the + data files. + destpath : str or None, optional + Path to the directory where the source file gets downloaded to for + use. If `destpath` is None, a temporary directory will be created. + The default path is the current directory. + + Examples + -------- + To analyze all files in the repository, do something like this + (note: this is not self-contained code):: + + >>> repos = np.lib._datasource.Repository('/home/user/data/dir/') + >>> for filename in filelist: + ... fp = repos.open(filename) + ... fp.analyze() + ... fp.close() + + Similarly you could use a URL for a repository:: + + >>> repos = np.lib._datasource.Repository('http://www.xyz.edu/data') + + """ + + def __init__(self, baseurl, destpath=os.curdir): + """Create a Repository with a shared url or directory of baseurl.""" + DataSource.__init__(self, destpath=destpath) + self._baseurl = baseurl + + def __del__(self): + DataSource.__del__(self) + + def _fullpath(self, path): + """Return complete path for path. Prepends baseurl if necessary.""" + splitpath = path.split(self._baseurl, 2) + if len(splitpath) == 1: + result = os.path.join(self._baseurl, path) + else: + result = path # path contains baseurl already + return result + + def _findfile(self, path): + """Extend DataSource method to prepend baseurl to ``path``.""" + return DataSource._findfile(self, self._fullpath(path)) + + def abspath(self, path): + """ + Return absolute path of file in the Repository directory. + + If `path` is an URL, then `abspath` will return either the location + the file exists locally or the location it would exist when opened + using the `open` method. + + Parameters + ---------- + path : str + Can be a local file or a remote URL. This may, but does not + have to, include the `baseurl` with which the `Repository` was + initialized. + + Returns + ------- + out : str + Complete path, including the `DataSource` destination directory. + + """ + return DataSource.abspath(self, self._fullpath(path)) + + def exists(self, path): + """ + Test if path exists prepending Repository base URL to path. + + Test if `path` exists as (and in this order): + + - a local file. + - a remote URL that has been downloaded and stored locally in the + `DataSource` directory. + - a remote URL that has not been downloaded, but is valid and + accessible. + + Parameters + ---------- + path : str + Can be a local file or a remote URL. This may, but does not + have to, include the `baseurl` with which the `Repository` was + initialized. + + Returns + ------- + out : bool + True if `path` exists. + + Notes + ----- + When `path` is an URL, `exists` will return True if it's either + stored locally in the `DataSource` directory, or is a valid remote + URL. `DataSource` does not discriminate between the two, the file + is accessible if it exists in either location. + + """ + return DataSource.exists(self, self._fullpath(path)) + + def open(self, path, mode='r', encoding=None, newline=None): + """ + Open and return file-like object prepending Repository base URL. + + If `path` is an URL, it will be downloaded, stored in the + DataSource directory and opened from there. + + Parameters + ---------- + path : str + Local file path or URL to open. This may, but does not have to, + include the `baseurl` with which the `Repository` was + initialized. + mode : {'r', 'w', 'a'}, optional + Mode to open `path`. Mode 'r' for reading, 'w' for writing, + 'a' to append. Available modes depend on the type of object + specified by `path`. Default is 'r'. + encoding : {None, str}, optional + Open text file with given encoding. The default encoding will be + what `io.open` uses. + newline : {None, str}, optional + Newline to use when reading text file. + + Returns + ------- + out : file object + File object. + + """ + return DataSource.open(self, self._fullpath(path), mode, + encoding=encoding, newline=newline) + + def listdir(self): + """ + List files in the source Repository. + + Returns + ------- + files : list of str + List of file names (not containing a directory part). + + Notes + ----- + Does not currently work for remote repositories. + + """ + if self._isurl(self._baseurl): + raise NotImplementedError( + "Directory listing of URLs, not supported yet.") + else: + return os.listdir(self._baseurl) diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/_datasource.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/_datasource.pyc new file mode 100644 index 0000000..7b01c30 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/_datasource.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/_iotools.py b/project/venv/lib/python2.7/site-packages/numpy/lib/_iotools.py new file mode 100644 index 0000000..8a042f1 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/_iotools.py @@ -0,0 +1,953 @@ +"""A collection of functions designed to help I/O with ascii files. + +""" +from __future__ import division, absolute_import, print_function + +__docformat__ = "restructuredtext en" + +import sys +import numpy as np +import numpy.core.numeric as nx +from numpy.compat import asbytes, asunicode, bytes, basestring + +if sys.version_info[0] >= 3: + from builtins import bool, int, float, complex, object, str + unicode = str +else: + from __builtin__ import bool, int, float, complex, object, unicode, str + + +def _decode_line(line, encoding=None): + """Decode bytes from binary input streams. + + Defaults to decoding from 'latin1'. That differs from the behavior of + np.compat.asunicode that decodes from 'ascii'. + + Parameters + ---------- + line : str or bytes + Line to be decoded. + + Returns + ------- + decoded_line : unicode + Unicode in Python 2, a str (unicode) in Python 3. + + """ + if type(line) is bytes: + if encoding is None: + line = line.decode('latin1') + else: + line = line.decode(encoding) + + return line + + +def _is_string_like(obj): + """ + Check whether obj behaves like a string. + """ + try: + obj + '' + except (TypeError, ValueError): + return False + return True + + +def _is_bytes_like(obj): + """ + Check whether obj behaves like a bytes object. + """ + try: + obj + b'' + except (TypeError, ValueError): + return False + return True + + +def _to_filehandle(fname, flag='r', return_opened=False): + """ + Returns the filehandle corresponding to a string or a file. + If the string ends in '.gz', the file is automatically unzipped. + + Parameters + ---------- + fname : string, filehandle + Name of the file whose filehandle must be returned. + flag : string, optional + Flag indicating the status of the file ('r' for read, 'w' for write). + return_opened : boolean, optional + Whether to return the opening status of the file. + """ + if _is_string_like(fname): + if fname.endswith('.gz'): + import gzip + fhd = gzip.open(fname, flag) + elif fname.endswith('.bz2'): + import bz2 + fhd = bz2.BZ2File(fname) + else: + fhd = file(fname, flag) + opened = True + elif hasattr(fname, 'seek'): + fhd = fname + opened = False + else: + raise ValueError('fname must be a string or file handle') + if return_opened: + return fhd, opened + return fhd + + +def has_nested_fields(ndtype): + """ + Returns whether one or several fields of a dtype are nested. + + Parameters + ---------- + ndtype : dtype + Data-type of a structured array. + + Raises + ------ + AttributeError + If `ndtype` does not have a `names` attribute. + + Examples + -------- + >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float)]) + >>> np.lib._iotools.has_nested_fields(dt) + False + + """ + for name in ndtype.names or (): + if ndtype[name].names: + return True + return False + + +def flatten_dtype(ndtype, flatten_base=False): + """ + Unpack a structured data-type by collapsing nested fields and/or fields + with a shape. + + Note that the field names are lost. + + Parameters + ---------- + ndtype : dtype + The datatype to collapse + flatten_base : bool, optional + If True, transform a field with a shape into several fields. Default is + False. + + Examples + -------- + >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), + ... ('block', int, (2, 3))]) + >>> np.lib._iotools.flatten_dtype(dt) + [dtype('|S4'), dtype('float64'), dtype('float64'), dtype('int32')] + >>> np.lib._iotools.flatten_dtype(dt, flatten_base=True) + [dtype('|S4'), dtype('float64'), dtype('float64'), dtype('int32'), + dtype('int32'), dtype('int32'), dtype('int32'), dtype('int32'), + dtype('int32')] + + """ + names = ndtype.names + if names is None: + if flatten_base: + return [ndtype.base] * int(np.prod(ndtype.shape)) + return [ndtype.base] + else: + types = [] + for field in names: + info = ndtype.fields[field] + flat_dt = flatten_dtype(info[0], flatten_base) + types.extend(flat_dt) + return types + + +class LineSplitter(object): + """ + Object to split a string at a given delimiter or at given places. + + Parameters + ---------- + delimiter : str, int, or sequence of ints, optional + If a string, character used to delimit consecutive fields. + If an integer or a sequence of integers, width(s) of each field. + comments : str, optional + Character used to mark the beginning of a comment. Default is '#'. + autostrip : bool, optional + Whether to strip each individual field. Default is True. + + """ + + def autostrip(self, method): + """ + Wrapper to strip each member of the output of `method`. + + Parameters + ---------- + method : function + Function that takes a single argument and returns a sequence of + strings. + + Returns + ------- + wrapped : function + The result of wrapping `method`. `wrapped` takes a single input + argument and returns a list of strings that are stripped of + white-space. + + """ + return lambda input: [_.strip() for _ in method(input)] + # + + def __init__(self, delimiter=None, comments='#', autostrip=True, encoding=None): + delimiter = _decode_line(delimiter) + comments = _decode_line(comments) + + self.comments = comments + + # Delimiter is a character + if (delimiter is None) or isinstance(delimiter, basestring): + delimiter = delimiter or None + _handyman = self._delimited_splitter + # Delimiter is a list of field widths + elif hasattr(delimiter, '__iter__'): + _handyman = self._variablewidth_splitter + idx = np.cumsum([0] + list(delimiter)) + delimiter = [slice(i, j) for (i, j) in zip(idx[:-1], idx[1:])] + # Delimiter is a single integer + elif int(delimiter): + (_handyman, delimiter) = ( + self._fixedwidth_splitter, int(delimiter)) + else: + (_handyman, delimiter) = (self._delimited_splitter, None) + self.delimiter = delimiter + if autostrip: + self._handyman = self.autostrip(_handyman) + else: + self._handyman = _handyman + self.encoding = encoding + # + + def _delimited_splitter(self, line): + """Chop off comments, strip, and split at delimiter. """ + if self.comments is not None: + line = line.split(self.comments)[0] + line = line.strip(" \r\n") + if not line: + return [] + return line.split(self.delimiter) + # + + def _fixedwidth_splitter(self, line): + if self.comments is not None: + line = line.split(self.comments)[0] + line = line.strip("\r\n") + if not line: + return [] + fixed = self.delimiter + slices = [slice(i, i + fixed) for i in range(0, len(line), fixed)] + return [line[s] for s in slices] + # + + def _variablewidth_splitter(self, line): + if self.comments is not None: + line = line.split(self.comments)[0] + if not line: + return [] + slices = self.delimiter + return [line[s] for s in slices] + # + + def __call__(self, line): + return self._handyman(_decode_line(line, self.encoding)) + + +class NameValidator(object): + """ + Object to validate a list of strings to use as field names. + + The strings are stripped of any non alphanumeric character, and spaces + are replaced by '_'. During instantiation, the user can define a list + of names to exclude, as well as a list of invalid characters. Names in + the exclusion list are appended a '_' character. + + Once an instance has been created, it can be called with a list of + names, and a list of valid names will be created. The `__call__` + method accepts an optional keyword "default" that sets the default name + in case of ambiguity. By default this is 'f', so that names will + default to `f0`, `f1`, etc. + + Parameters + ---------- + excludelist : sequence, optional + A list of names to exclude. This list is appended to the default + list ['return', 'file', 'print']. Excluded names are appended an + underscore: for example, `file` becomes `file_` if supplied. + deletechars : str, optional + A string combining invalid characters that must be deleted from the + names. + case_sensitive : {True, False, 'upper', 'lower'}, optional + * If True, field names are case-sensitive. + * If False or 'upper', field names are converted to upper case. + * If 'lower', field names are converted to lower case. + + The default value is True. + replace_space : '_', optional + Character(s) used in replacement of white spaces. + + Notes + ----- + Calling an instance of `NameValidator` is the same as calling its + method `validate`. + + Examples + -------- + >>> validator = np.lib._iotools.NameValidator() + >>> validator(['file', 'field2', 'with space', 'CaSe']) + ['file_', 'field2', 'with_space', 'CaSe'] + + >>> validator = np.lib._iotools.NameValidator(excludelist=['excl'], + deletechars='q', + case_sensitive='False') + >>> validator(['excl', 'field2', 'no_q', 'with space', 'CaSe']) + ['excl_', 'field2', 'no_', 'with_space', 'case'] + + """ + # + defaultexcludelist = ['return', 'file', 'print'] + defaultdeletechars = set(r"""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""") + # + + def __init__(self, excludelist=None, deletechars=None, + case_sensitive=None, replace_space='_'): + # Process the exclusion list .. + if excludelist is None: + excludelist = [] + excludelist.extend(self.defaultexcludelist) + self.excludelist = excludelist + # Process the list of characters to delete + if deletechars is None: + delete = self.defaultdeletechars + else: + delete = set(deletechars) + delete.add('"') + self.deletechars = delete + # Process the case option ..... + if (case_sensitive is None) or (case_sensitive is True): + self.case_converter = lambda x: x + elif (case_sensitive is False) or case_sensitive.startswith('u'): + self.case_converter = lambda x: x.upper() + elif case_sensitive.startswith('l'): + self.case_converter = lambda x: x.lower() + else: + msg = 'unrecognized case_sensitive value %s.' % case_sensitive + raise ValueError(msg) + # + self.replace_space = replace_space + + def validate(self, names, defaultfmt="f%i", nbfields=None): + """ + Validate a list of strings as field names for a structured array. + + Parameters + ---------- + names : sequence of str + Strings to be validated. + defaultfmt : str, optional + Default format string, used if validating a given string + reduces its length to zero. + nbfields : integer, optional + Final number of validated names, used to expand or shrink the + initial list of names. + + Returns + ------- + validatednames : list of str + The list of validated field names. + + Notes + ----- + A `NameValidator` instance can be called directly, which is the + same as calling `validate`. For examples, see `NameValidator`. + + """ + # Initial checks .............. + if (names is None): + if (nbfields is None): + return None + names = [] + if isinstance(names, basestring): + names = [names, ] + if nbfields is not None: + nbnames = len(names) + if (nbnames < nbfields): + names = list(names) + [''] * (nbfields - nbnames) + elif (nbnames > nbfields): + names = names[:nbfields] + # Set some shortcuts ........... + deletechars = self.deletechars + excludelist = self.excludelist + case_converter = self.case_converter + replace_space = self.replace_space + # Initializes some variables ... + validatednames = [] + seen = dict() + nbempty = 0 + # + for item in names: + item = case_converter(item).strip() + if replace_space: + item = item.replace(' ', replace_space) + item = ''.join([c for c in item if c not in deletechars]) + if item == '': + item = defaultfmt % nbempty + while item in names: + nbempty += 1 + item = defaultfmt % nbempty + nbempty += 1 + elif item in excludelist: + item += '_' + cnt = seen.get(item, 0) + if cnt > 0: + validatednames.append(item + '_%d' % cnt) + else: + validatednames.append(item) + seen[item] = cnt + 1 + return tuple(validatednames) + # + + def __call__(self, names, defaultfmt="f%i", nbfields=None): + return self.validate(names, defaultfmt=defaultfmt, nbfields=nbfields) + + +def str2bool(value): + """ + Tries to transform a string supposed to represent a boolean to a boolean. + + Parameters + ---------- + value : str + The string that is transformed to a boolean. + + Returns + ------- + boolval : bool + The boolean representation of `value`. + + Raises + ------ + ValueError + If the string is not 'True' or 'False' (case independent) + + Examples + -------- + >>> np.lib._iotools.str2bool('TRUE') + True + >>> np.lib._iotools.str2bool('false') + False + + """ + value = value.upper() + if value == 'TRUE': + return True + elif value == 'FALSE': + return False + else: + raise ValueError("Invalid boolean") + + +class ConverterError(Exception): + """ + Exception raised when an error occurs in a converter for string values. + + """ + pass + + +class ConverterLockError(ConverterError): + """ + Exception raised when an attempt is made to upgrade a locked converter. + + """ + pass + + +class ConversionWarning(UserWarning): + """ + Warning issued when a string converter has a problem. + + Notes + ----- + In `genfromtxt` a `ConversionWarning` is issued if raising exceptions + is explicitly suppressed with the "invalid_raise" keyword. + + """ + pass + + +class StringConverter(object): + """ + Factory class for function transforming a string into another object + (int, float). + + After initialization, an instance can be called to transform a string + into another object. If the string is recognized as representing a + missing value, a default value is returned. + + Attributes + ---------- + func : function + Function used for the conversion. + default : any + Default value to return when the input corresponds to a missing + value. + type : type + Type of the output. + _status : int + Integer representing the order of the conversion. + _mapper : sequence of tuples + Sequence of tuples (dtype, function, default value) to evaluate in + order. + _locked : bool + Holds `locked` parameter. + + Parameters + ---------- + dtype_or_func : {None, dtype, function}, optional + If a `dtype`, specifies the input data type, used to define a basic + function and a default value for missing data. For example, when + `dtype` is float, the `func` attribute is set to `float` and the + default value to `np.nan`. If a function, this function is used to + convert a string to another object. In this case, it is recommended + to give an associated default value as input. + default : any, optional + Value to return by default, that is, when the string to be + converted is flagged as missing. If not given, `StringConverter` + tries to supply a reasonable default value. + missing_values : {None, sequence of str}, optional + ``None`` or sequence of strings indicating a missing value. If ``None`` + then missing values are indicated by empty entries. The default is + ``None``. + locked : bool, optional + Whether the StringConverter should be locked to prevent automatic + upgrade or not. Default is False. + + """ + # + _mapper = [(nx.bool_, str2bool, False), + (nx.integer, int, -1)] + + # On 32-bit systems, we need to make sure that we explicitly include + # nx.int64 since ns.integer is nx.int32. + if nx.dtype(nx.integer).itemsize < nx.dtype(nx.int64).itemsize: + _mapper.append((nx.int64, int, -1)) + + _mapper.extend([(nx.floating, float, nx.nan), + (nx.complexfloating, complex, nx.nan + 0j), + (nx.longdouble, nx.longdouble, nx.nan), + (nx.unicode_, asunicode, '???'), + (nx.string_, asbytes, '???')]) + + (_defaulttype, _defaultfunc, _defaultfill) = zip(*_mapper) + + @classmethod + def _getdtype(cls, val): + """Returns the dtype of the input variable.""" + return np.array(val).dtype + # + + @classmethod + def _getsubdtype(cls, val): + """Returns the type of the dtype of the input variable.""" + return np.array(val).dtype.type + # + # This is a bit annoying. We want to return the "general" type in most + # cases (ie. "string" rather than "S10"), but we want to return the + # specific type for datetime64 (ie. "datetime64[us]" rather than + # "datetime64"). + + @classmethod + def _dtypeortype(cls, dtype): + """Returns dtype for datetime64 and type of dtype otherwise.""" + if dtype.type == np.datetime64: + return dtype + return dtype.type + # + + @classmethod + def upgrade_mapper(cls, func, default=None): + """ + Upgrade the mapper of a StringConverter by adding a new function and + its corresponding default. + + The input function (or sequence of functions) and its associated + default value (if any) is inserted in penultimate position of the + mapper. The corresponding type is estimated from the dtype of the + default value. + + Parameters + ---------- + func : var + Function, or sequence of functions + + Examples + -------- + >>> import dateutil.parser + >>> import datetime + >>> dateparser = datetustil.parser.parse + >>> defaultdate = datetime.date(2000, 1, 1) + >>> StringConverter.upgrade_mapper(dateparser, default=defaultdate) + """ + # Func is a single functions + if hasattr(func, '__call__'): + cls._mapper.insert(-1, (cls._getsubdtype(default), func, default)) + return + elif hasattr(func, '__iter__'): + if isinstance(func[0], (tuple, list)): + for _ in func: + cls._mapper.insert(-1, _) + return + if default is None: + default = [None] * len(func) + else: + default = list(default) + default.append([None] * (len(func) - len(default))) + for (fct, dft) in zip(func, default): + cls._mapper.insert(-1, (cls._getsubdtype(dft), fct, dft)) + # + + def __init__(self, dtype_or_func=None, default=None, missing_values=None, + locked=False): + # Defines a lock for upgrade + self._locked = bool(locked) + # No input dtype: minimal initialization + if dtype_or_func is None: + self.func = str2bool + self._status = 0 + self.default = default or False + dtype = np.dtype('bool') + else: + # Is the input a np.dtype ? + try: + self.func = None + dtype = np.dtype(dtype_or_func) + except TypeError: + # dtype_or_func must be a function, then + if not hasattr(dtype_or_func, '__call__'): + errmsg = ("The input argument `dtype` is neither a" + " function nor a dtype (got '%s' instead)") + raise TypeError(errmsg % type(dtype_or_func)) + # Set the function + self.func = dtype_or_func + # If we don't have a default, try to guess it or set it to + # None + if default is None: + try: + default = self.func('0') + except ValueError: + default = None + dtype = self._getdtype(default) + # Set the status according to the dtype + _status = -1 + for (i, (deftype, func, default_def)) in enumerate(self._mapper): + if np.issubdtype(dtype.type, deftype): + _status = i + if default is None: + self.default = default_def + else: + self.default = default + break + # if a converter for the specific dtype is available use that + last_func = func + for (i, (deftype, func, default_def)) in enumerate(self._mapper): + if dtype.type == deftype: + _status = i + last_func = func + if default is None: + self.default = default_def + else: + self.default = default + break + func = last_func + if _status == -1: + # We never found a match in the _mapper... + _status = 0 + self.default = default + self._status = _status + # If the input was a dtype, set the function to the last we saw + if self.func is None: + self.func = func + # If the status is 1 (int), change the function to + # something more robust. + if self.func == self._mapper[1][1]: + if issubclass(dtype.type, np.uint64): + self.func = np.uint64 + elif issubclass(dtype.type, np.int64): + self.func = np.int64 + else: + self.func = lambda x: int(float(x)) + # Store the list of strings corresponding to missing values. + if missing_values is None: + self.missing_values = {''} + else: + if isinstance(missing_values, basestring): + missing_values = missing_values.split(",") + self.missing_values = set(list(missing_values) + ['']) + # + self._callingfunction = self._strict_call + self.type = self._dtypeortype(dtype) + self._checked = False + self._initial_default = default + # + + def _loose_call(self, value): + try: + return self.func(value) + except ValueError: + return self.default + # + + def _strict_call(self, value): + try: + + # We check if we can convert the value using the current function + new_value = self.func(value) + + # In addition to having to check whether func can convert the + # value, we also have to make sure that we don't get overflow + # errors for integers. + if self.func is int: + try: + np.array(value, dtype=self.type) + except OverflowError: + raise ValueError + + # We're still here so we can now return the new value + return new_value + + except ValueError: + if value.strip() in self.missing_values: + if not self._status: + self._checked = False + return self.default + raise ValueError("Cannot convert string '%s'" % value) + # + + def __call__(self, value): + return self._callingfunction(value) + # + + def upgrade(self, value): + """ + Find the best converter for a given string, and return the result. + + The supplied string `value` is converted by testing different + converters in order. First the `func` method of the + `StringConverter` instance is tried, if this fails other available + converters are tried. The order in which these other converters + are tried is determined by the `_status` attribute of the instance. + + Parameters + ---------- + value : str + The string to convert. + + Returns + ------- + out : any + The result of converting `value` with the appropriate converter. + + """ + self._checked = True + try: + return self._strict_call(value) + except ValueError: + # Raise an exception if we locked the converter... + if self._locked: + errmsg = "Converter is locked and cannot be upgraded" + raise ConverterLockError(errmsg) + _statusmax = len(self._mapper) + # Complains if we try to upgrade by the maximum + _status = self._status + if _status == _statusmax: + errmsg = "Could not find a valid conversion function" + raise ConverterError(errmsg) + elif _status < _statusmax - 1: + _status += 1 + (self.type, self.func, default) = self._mapper[_status] + self._status = _status + if self._initial_default is not None: + self.default = self._initial_default + else: + self.default = default + return self.upgrade(value) + + def iterupgrade(self, value): + self._checked = True + if not hasattr(value, '__iter__'): + value = (value,) + _strict_call = self._strict_call + try: + for _m in value: + _strict_call(_m) + except ValueError: + # Raise an exception if we locked the converter... + if self._locked: + errmsg = "Converter is locked and cannot be upgraded" + raise ConverterLockError(errmsg) + _statusmax = len(self._mapper) + # Complains if we try to upgrade by the maximum + _status = self._status + if _status == _statusmax: + raise ConverterError( + "Could not find a valid conversion function" + ) + elif _status < _statusmax - 1: + _status += 1 + (self.type, self.func, default) = self._mapper[_status] + if self._initial_default is not None: + self.default = self._initial_default + else: + self.default = default + self._status = _status + self.iterupgrade(value) + + def update(self, func, default=None, testing_value=None, + missing_values='', locked=False): + """ + Set StringConverter attributes directly. + + Parameters + ---------- + func : function + Conversion function. + default : any, optional + Value to return by default, that is, when the string to be + converted is flagged as missing. If not given, + `StringConverter` tries to supply a reasonable default value. + testing_value : str, optional + A string representing a standard input value of the converter. + This string is used to help defining a reasonable default + value. + missing_values : {sequence of str, None}, optional + Sequence of strings indicating a missing value. If ``None``, then + the existing `missing_values` are cleared. The default is `''`. + locked : bool, optional + Whether the StringConverter should be locked to prevent + automatic upgrade or not. Default is False. + + Notes + ----- + `update` takes the same parameters as the constructor of + `StringConverter`, except that `func` does not accept a `dtype` + whereas `dtype_or_func` in the constructor does. + + """ + self.func = func + self._locked = locked + + # Don't reset the default to None if we can avoid it + if default is not None: + self.default = default + self.type = self._dtypeortype(self._getdtype(default)) + else: + try: + tester = func(testing_value or '1') + except (TypeError, ValueError): + tester = None + self.type = self._dtypeortype(self._getdtype(tester)) + + # Add the missing values to the existing set or clear it. + if missing_values is None: + # Clear all missing values even though the ctor initializes it to + # set(['']) when the argument is None. + self.missing_values = set() + else: + if not np.iterable(missing_values): + missing_values = [missing_values] + if not all(isinstance(v, basestring) for v in missing_values): + raise TypeError("missing_values must be strings or unicode") + self.missing_values.update(missing_values) + + +def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs): + """ + Convenience function to create a `np.dtype` object. + + The function processes the input `dtype` and matches it with the given + names. + + Parameters + ---------- + ndtype : var + Definition of the dtype. Can be any string or dictionary recognized + by the `np.dtype` function, or a sequence of types. + names : str or sequence, optional + Sequence of strings to use as field names for a structured dtype. + For convenience, `names` can be a string of a comma-separated list + of names. + defaultfmt : str, optional + Format string used to define missing names, such as ``"f%i"`` + (default) or ``"fields_%02i"``. + validationargs : optional + A series of optional arguments used to initialize a + `NameValidator`. + + Examples + -------- + >>> np.lib._iotools.easy_dtype(float) + dtype('float64') + >>> np.lib._iotools.easy_dtype("i4, f8") + dtype([('f0', '>> np.lib._iotools.easy_dtype("i4, f8", defaultfmt="field_%03i") + dtype([('field_000', '>> np.lib._iotools.easy_dtype((int, float, float), names="a,b,c") + dtype([('a', '>> np.lib._iotools.easy_dtype(float, names="a,b,c") + dtype([('a', ' 0): + validate = NameValidator(**validationargs) + # Default initial names : should we change the format ? + if ((ndtype.names == tuple("f%i" % i for i in range(nbtypes))) and + (defaultfmt != "f%i")): + ndtype.names = validate([''] * nbtypes, defaultfmt=defaultfmt) + # Explicit initial names : just validate + else: + ndtype.names = validate(ndtype.names, defaultfmt=defaultfmt) + return ndtype diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/_iotools.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/_iotools.pyc new file mode 100644 index 0000000..a3805df Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/_iotools.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/_version.py b/project/venv/lib/python2.7/site-packages/numpy/lib/_version.py new file mode 100644 index 0000000..c3563a7 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/_version.py @@ -0,0 +1,156 @@ +"""Utility to compare (NumPy) version strings. + +The NumpyVersion class allows properly comparing numpy version strings. +The LooseVersion and StrictVersion classes that distutils provides don't +work; they don't recognize anything like alpha/beta/rc/dev versions. + +""" +from __future__ import division, absolute_import, print_function + +import re + +from numpy.compat import basestring + + +__all__ = ['NumpyVersion'] + + +class NumpyVersion(): + """Parse and compare numpy version strings. + + NumPy has the following versioning scheme (numbers given are examples; they + can be > 9) in principle): + + - Released version: '1.8.0', '1.8.1', etc. + - Alpha: '1.8.0a1', '1.8.0a2', etc. + - Beta: '1.8.0b1', '1.8.0b2', etc. + - Release candidates: '1.8.0rc1', '1.8.0rc2', etc. + - Development versions: '1.8.0.dev-f1234afa' (git commit hash appended) + - Development versions after a1: '1.8.0a1.dev-f1234afa', + '1.8.0b2.dev-f1234afa', + '1.8.1rc1.dev-f1234afa', etc. + - Development versions (no git hash available): '1.8.0.dev-Unknown' + + Comparing needs to be done against a valid version string or other + `NumpyVersion` instance. Note that all development versions of the same + (pre-)release compare equal. + + .. versionadded:: 1.9.0 + + Parameters + ---------- + vstring : str + NumPy version string (``np.__version__``). + + Examples + -------- + >>> from numpy.lib import NumpyVersion + >>> if NumpyVersion(np.__version__) < '1.7.0': + ... print('skip') + skip + + >>> NumpyVersion('1.7') # raises ValueError, add ".0" + + """ + + def __init__(self, vstring): + self.vstring = vstring + ver_main = re.match(r'\d[.]\d+[.]\d+', vstring) + if not ver_main: + raise ValueError("Not a valid numpy version string") + + self.version = ver_main.group() + self.major, self.minor, self.bugfix = [int(x) for x in + self.version.split('.')] + if len(vstring) == ver_main.end(): + self.pre_release = 'final' + else: + alpha = re.match(r'a\d', vstring[ver_main.end():]) + beta = re.match(r'b\d', vstring[ver_main.end():]) + rc = re.match(r'rc\d', vstring[ver_main.end():]) + pre_rel = [m for m in [alpha, beta, rc] if m is not None] + if pre_rel: + self.pre_release = pre_rel[0].group() + else: + self.pre_release = '' + + self.is_devversion = bool(re.search(r'.dev', vstring)) + + def _compare_version(self, other): + """Compare major.minor.bugfix""" + if self.major == other.major: + if self.minor == other.minor: + if self.bugfix == other.bugfix: + vercmp = 0 + elif self.bugfix > other.bugfix: + vercmp = 1 + else: + vercmp = -1 + elif self.minor > other.minor: + vercmp = 1 + else: + vercmp = -1 + elif self.major > other.major: + vercmp = 1 + else: + vercmp = -1 + + return vercmp + + def _compare_pre_release(self, other): + """Compare alpha/beta/rc/final.""" + if self.pre_release == other.pre_release: + vercmp = 0 + elif self.pre_release == 'final': + vercmp = 1 + elif other.pre_release == 'final': + vercmp = -1 + elif self.pre_release > other.pre_release: + vercmp = 1 + else: + vercmp = -1 + + return vercmp + + def _compare(self, other): + if not isinstance(other, (basestring, NumpyVersion)): + raise ValueError("Invalid object to compare with NumpyVersion.") + + if isinstance(other, basestring): + other = NumpyVersion(other) + + vercmp = self._compare_version(other) + if vercmp == 0: + # Same x.y.z version, check for alpha/beta/rc + vercmp = self._compare_pre_release(other) + if vercmp == 0: + # Same version and same pre-release, check if dev version + if self.is_devversion is other.is_devversion: + vercmp = 0 + elif self.is_devversion: + vercmp = -1 + else: + vercmp = 1 + + return vercmp + + def __lt__(self, other): + return self._compare(other) < 0 + + def __le__(self, other): + return self._compare(other) <= 0 + + def __eq__(self, other): + return self._compare(other) == 0 + + def __ne__(self, other): + return self._compare(other) != 0 + + def __gt__(self, other): + return self._compare(other) > 0 + + def __ge__(self, other): + return self._compare(other) >= 0 + + def __repr(self): + return "NumpyVersion(%s)" % self.vstring diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/_version.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/_version.pyc new file mode 100644 index 0000000..ff1b82f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/_version.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/arraypad.py b/project/venv/lib/python2.7/site-packages/numpy/lib/arraypad.py new file mode 100644 index 0000000..4f63710 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/arraypad.py @@ -0,0 +1,1355 @@ +""" +The arraypad module contains a group of functions to pad values onto the edges +of an n-dimensional array. + +""" +from __future__ import division, absolute_import, print_function + +import numpy as np +from numpy.core.overrides import array_function_dispatch + + +__all__ = ['pad'] + + +############################################################################### +# Private utility functions. + + +def _arange_ndarray(arr, shape, axis, reverse=False): + """ + Create an ndarray of `shape` with increments along specified `axis` + + Parameters + ---------- + arr : ndarray + Input array of arbitrary shape. + shape : tuple of ints + Shape of desired array. Should be equivalent to `arr.shape` except + `shape[axis]` which may have any positive value. + axis : int + Axis to increment along. + reverse : bool + If False, increment in a positive fashion from 1 to `shape[axis]`, + inclusive. If True, the bounds are the same but the order reversed. + + Returns + ------- + padarr : ndarray + Output array sized to pad `arr` along `axis`, with linear range from + 1 to `shape[axis]` along specified `axis`. + + Notes + ----- + The range is deliberately 1-indexed for this specific use case. Think of + this algorithm as broadcasting `np.arange` to a single `axis` of an + arbitrarily shaped ndarray. + + """ + initshape = tuple(1 if i != axis else shape[axis] + for (i, x) in enumerate(arr.shape)) + if not reverse: + padarr = np.arange(1, shape[axis] + 1) + else: + padarr = np.arange(shape[axis], 0, -1) + padarr = padarr.reshape(initshape) + for i, dim in enumerate(shape): + if padarr.shape[i] != dim: + padarr = padarr.repeat(dim, axis=i) + return padarr + + +def _round_ifneeded(arr, dtype): + """ + Rounds arr inplace if destination dtype is integer. + + Parameters + ---------- + arr : ndarray + Input array. + dtype : dtype + The dtype of the destination array. + + """ + if np.issubdtype(dtype, np.integer): + arr.round(out=arr) + + +def _slice_at_axis(shape, sl, axis): + """ + Construct a slice tuple the length of shape, with sl at the specified axis + """ + slice_tup = (slice(None),) + return slice_tup * axis + (sl,) + slice_tup * (len(shape) - axis - 1) + + +def _slice_first(shape, n, axis): + """ Construct a slice tuple to take the first n elements along axis """ + return _slice_at_axis(shape, slice(0, n), axis=axis) + + +def _slice_last(shape, n, axis): + """ Construct a slice tuple to take the last n elements along axis """ + dim = shape[axis] # doing this explicitly makes n=0 work + return _slice_at_axis(shape, slice(dim - n, dim), axis=axis) + + +def _do_prepend(arr, pad_chunk, axis): + return np.concatenate( + (pad_chunk.astype(arr.dtype, copy=False), arr), axis=axis) + + +def _do_append(arr, pad_chunk, axis): + return np.concatenate( + (arr, pad_chunk.astype(arr.dtype, copy=False)), axis=axis) + + +def _prepend_const(arr, pad_amt, val, axis=-1): + """ + Prepend constant `val` along `axis` of `arr`. + + Parameters + ---------- + arr : ndarray + Input array of arbitrary shape. + pad_amt : int + Amount of padding to prepend. + val : scalar + Constant value to use. For best results should be of type `arr.dtype`; + if not `arr.dtype` will be cast to `arr.dtype`. + axis : int + Axis along which to pad `arr`. + + Returns + ------- + padarr : ndarray + Output array, with `pad_amt` constant `val` prepended along `axis`. + + """ + if pad_amt == 0: + return arr + padshape = tuple(x if i != axis else pad_amt + for (i, x) in enumerate(arr.shape)) + return _do_prepend(arr, np.full(padshape, val, dtype=arr.dtype), axis) + + +def _append_const(arr, pad_amt, val, axis=-1): + """ + Append constant `val` along `axis` of `arr`. + + Parameters + ---------- + arr : ndarray + Input array of arbitrary shape. + pad_amt : int + Amount of padding to append. + val : scalar + Constant value to use. For best results should be of type `arr.dtype`; + if not `arr.dtype` will be cast to `arr.dtype`. + axis : int + Axis along which to pad `arr`. + + Returns + ------- + padarr : ndarray + Output array, with `pad_amt` constant `val` appended along `axis`. + + """ + if pad_amt == 0: + return arr + padshape = tuple(x if i != axis else pad_amt + for (i, x) in enumerate(arr.shape)) + return _do_append(arr, np.full(padshape, val, dtype=arr.dtype), axis) + + + +def _prepend_edge(arr, pad_amt, axis=-1): + """ + Prepend `pad_amt` to `arr` along `axis` by extending edge values. + + Parameters + ---------- + arr : ndarray + Input array of arbitrary shape. + pad_amt : int + Amount of padding to prepend. + axis : int + Axis along which to pad `arr`. + + Returns + ------- + padarr : ndarray + Output array, extended by `pad_amt` edge values appended along `axis`. + + """ + if pad_amt == 0: + return arr + + edge_slice = _slice_first(arr.shape, 1, axis=axis) + edge_arr = arr[edge_slice] + return _do_prepend(arr, edge_arr.repeat(pad_amt, axis=axis), axis) + + +def _append_edge(arr, pad_amt, axis=-1): + """ + Append `pad_amt` to `arr` along `axis` by extending edge values. + + Parameters + ---------- + arr : ndarray + Input array of arbitrary shape. + pad_amt : int + Amount of padding to append. + axis : int + Axis along which to pad `arr`. + + Returns + ------- + padarr : ndarray + Output array, extended by `pad_amt` edge values prepended along + `axis`. + + """ + if pad_amt == 0: + return arr + + edge_slice = _slice_last(arr.shape, 1, axis=axis) + edge_arr = arr[edge_slice] + return _do_append(arr, edge_arr.repeat(pad_amt, axis=axis), axis) + + +def _prepend_ramp(arr, pad_amt, end, axis=-1): + """ + Prepend linear ramp along `axis`. + + Parameters + ---------- + arr : ndarray + Input array of arbitrary shape. + pad_amt : int + Amount of padding to prepend. + end : scalar + Constal value to use. For best results should be of type `arr.dtype`; + if not `arr.dtype` will be cast to `arr.dtype`. + axis : int + Axis along which to pad `arr`. + + Returns + ------- + padarr : ndarray + Output array, with `pad_amt` values prepended along `axis`. The + prepended region ramps linearly from the edge value to `end`. + + """ + if pad_amt == 0: + return arr + + # Generate shape for final concatenated array + padshape = tuple(x if i != axis else pad_amt + for (i, x) in enumerate(arr.shape)) + + # Generate an n-dimensional array incrementing along `axis` + ramp_arr = _arange_ndarray(arr, padshape, axis, + reverse=True).astype(np.float64) + + # Appropriate slicing to extract n-dimensional edge along `axis` + edge_slice = _slice_first(arr.shape, 1, axis=axis) + + # Extract edge, and extend along `axis` + edge_pad = arr[edge_slice].repeat(pad_amt, axis) + + # Linear ramp + slope = (end - edge_pad) / float(pad_amt) + ramp_arr = ramp_arr * slope + ramp_arr += edge_pad + _round_ifneeded(ramp_arr, arr.dtype) + + # Ramp values will most likely be float, cast them to the same type as arr + return _do_prepend(arr, ramp_arr, axis) + + +def _append_ramp(arr, pad_amt, end, axis=-1): + """ + Append linear ramp along `axis`. + + Parameters + ---------- + arr : ndarray + Input array of arbitrary shape. + pad_amt : int + Amount of padding to append. + end : scalar + Constal value to use. For best results should be of type `arr.dtype`; + if not `arr.dtype` will be cast to `arr.dtype`. + axis : int + Axis along which to pad `arr`. + + Returns + ------- + padarr : ndarray + Output array, with `pad_amt` values appended along `axis`. The + appended region ramps linearly from the edge value to `end`. + + """ + if pad_amt == 0: + return arr + + # Generate shape for final concatenated array + padshape = tuple(x if i != axis else pad_amt + for (i, x) in enumerate(arr.shape)) + + # Generate an n-dimensional array incrementing along `axis` + ramp_arr = _arange_ndarray(arr, padshape, axis, + reverse=False).astype(np.float64) + + # Slice a chunk from the edge to calculate stats on + edge_slice = _slice_last(arr.shape, 1, axis=axis) + + # Extract edge, and extend along `axis` + edge_pad = arr[edge_slice].repeat(pad_amt, axis) + + # Linear ramp + slope = (end - edge_pad) / float(pad_amt) + ramp_arr = ramp_arr * slope + ramp_arr += edge_pad + _round_ifneeded(ramp_arr, arr.dtype) + + # Ramp values will most likely be float, cast them to the same type as arr + return _do_append(arr, ramp_arr, axis) + + +def _prepend_max(arr, pad_amt, num, axis=-1): + """ + Prepend `pad_amt` maximum values along `axis`. + + Parameters + ---------- + arr : ndarray + Input array of arbitrary shape. + pad_amt : int + Amount of padding to prepend. + num : int + Depth into `arr` along `axis` to calculate maximum. + Range: [1, `arr.shape[axis]`] or None (entire axis) + axis : int + Axis along which to pad `arr`. + + Returns + ------- + padarr : ndarray + Output array, with `pad_amt` values appended along `axis`. The + prepended region is the maximum of the first `num` values along + `axis`. + + """ + if pad_amt == 0: + return arr + + # Equivalent to edge padding for single value, so do that instead + if num == 1: + return _prepend_edge(arr, pad_amt, axis) + + # Use entire array if `num` is too large + if num is not None: + if num >= arr.shape[axis]: + num = None + + # Slice a chunk from the edge to calculate stats on + max_slice = _slice_first(arr.shape, num, axis=axis) + + # Extract slice, calculate max + max_chunk = arr[max_slice].max(axis=axis, keepdims=True) + + # Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt` + return _do_prepend(arr, max_chunk.repeat(pad_amt, axis=axis), axis) + + +def _append_max(arr, pad_amt, num, axis=-1): + """ + Pad one `axis` of `arr` with the maximum of the last `num` elements. + + Parameters + ---------- + arr : ndarray + Input array of arbitrary shape. + pad_amt : int + Amount of padding to append. + num : int + Depth into `arr` along `axis` to calculate maximum. + Range: [1, `arr.shape[axis]`] or None (entire axis) + axis : int + Axis along which to pad `arr`. + + Returns + ------- + padarr : ndarray + Output array, with `pad_amt` values appended along `axis`. The + appended region is the maximum of the final `num` values along `axis`. + + """ + if pad_amt == 0: + return arr + + # Equivalent to edge padding for single value, so do that instead + if num == 1: + return _append_edge(arr, pad_amt, axis) + + # Use entire array if `num` is too large + if num is not None: + if num >= arr.shape[axis]: + num = None + + # Slice a chunk from the edge to calculate stats on + if num is not None: + max_slice = _slice_last(arr.shape, num, axis=axis) + else: + max_slice = tuple(slice(None) for x in arr.shape) + + # Extract slice, calculate max + max_chunk = arr[max_slice].max(axis=axis, keepdims=True) + + # Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt` + return _do_append(arr, max_chunk.repeat(pad_amt, axis=axis), axis) + + +def _prepend_mean(arr, pad_amt, num, axis=-1): + """ + Prepend `pad_amt` mean values along `axis`. + + Parameters + ---------- + arr : ndarray + Input array of arbitrary shape. + pad_amt : int + Amount of padding to prepend. + num : int + Depth into `arr` along `axis` to calculate mean. + Range: [1, `arr.shape[axis]`] or None (entire axis) + axis : int + Axis along which to pad `arr`. + + Returns + ------- + padarr : ndarray + Output array, with `pad_amt` values prepended along `axis`. The + prepended region is the mean of the first `num` values along `axis`. + + """ + if pad_amt == 0: + return arr + + # Equivalent to edge padding for single value, so do that instead + if num == 1: + return _prepend_edge(arr, pad_amt, axis) + + # Use entire array if `num` is too large + if num is not None: + if num >= arr.shape[axis]: + num = None + + # Slice a chunk from the edge to calculate stats on + mean_slice = _slice_first(arr.shape, num, axis=axis) + + # Extract slice, calculate mean + mean_chunk = arr[mean_slice].mean(axis, keepdims=True) + _round_ifneeded(mean_chunk, arr.dtype) + + # Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt` + return _do_prepend(arr, mean_chunk.repeat(pad_amt, axis), axis=axis) + + +def _append_mean(arr, pad_amt, num, axis=-1): + """ + Append `pad_amt` mean values along `axis`. + + Parameters + ---------- + arr : ndarray + Input array of arbitrary shape. + pad_amt : int + Amount of padding to append. + num : int + Depth into `arr` along `axis` to calculate mean. + Range: [1, `arr.shape[axis]`] or None (entire axis) + axis : int + Axis along which to pad `arr`. + + Returns + ------- + padarr : ndarray + Output array, with `pad_amt` values appended along `axis`. The + appended region is the maximum of the final `num` values along `axis`. + + """ + if pad_amt == 0: + return arr + + # Equivalent to edge padding for single value, so do that instead + if num == 1: + return _append_edge(arr, pad_amt, axis) + + # Use entire array if `num` is too large + if num is not None: + if num >= arr.shape[axis]: + num = None + + # Slice a chunk from the edge to calculate stats on + if num is not None: + mean_slice = _slice_last(arr.shape, num, axis=axis) + else: + mean_slice = tuple(slice(None) for x in arr.shape) + + # Extract slice, calculate mean + mean_chunk = arr[mean_slice].mean(axis=axis, keepdims=True) + _round_ifneeded(mean_chunk, arr.dtype) + + # Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt` + return _do_append(arr, mean_chunk.repeat(pad_amt, axis), axis=axis) + + +def _prepend_med(arr, pad_amt, num, axis=-1): + """ + Prepend `pad_amt` median values along `axis`. + + Parameters + ---------- + arr : ndarray + Input array of arbitrary shape. + pad_amt : int + Amount of padding to prepend. + num : int + Depth into `arr` along `axis` to calculate median. + Range: [1, `arr.shape[axis]`] or None (entire axis) + axis : int + Axis along which to pad `arr`. + + Returns + ------- + padarr : ndarray + Output array, with `pad_amt` values prepended along `axis`. The + prepended region is the median of the first `num` values along `axis`. + + """ + if pad_amt == 0: + return arr + + # Equivalent to edge padding for single value, so do that instead + if num == 1: + return _prepend_edge(arr, pad_amt, axis) + + # Use entire array if `num` is too large + if num is not None: + if num >= arr.shape[axis]: + num = None + + # Slice a chunk from the edge to calculate stats on + med_slice = _slice_first(arr.shape, num, axis=axis) + + # Extract slice, calculate median + med_chunk = np.median(arr[med_slice], axis=axis, keepdims=True) + _round_ifneeded(med_chunk, arr.dtype) + + # Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt` + return _do_prepend(arr, med_chunk.repeat(pad_amt, axis), axis=axis) + + +def _append_med(arr, pad_amt, num, axis=-1): + """ + Append `pad_amt` median values along `axis`. + + Parameters + ---------- + arr : ndarray + Input array of arbitrary shape. + pad_amt : int + Amount of padding to append. + num : int + Depth into `arr` along `axis` to calculate median. + Range: [1, `arr.shape[axis]`] or None (entire axis) + axis : int + Axis along which to pad `arr`. + + Returns + ------- + padarr : ndarray + Output array, with `pad_amt` values appended along `axis`. The + appended region is the median of the final `num` values along `axis`. + + """ + if pad_amt == 0: + return arr + + # Equivalent to edge padding for single value, so do that instead + if num == 1: + return _append_edge(arr, pad_amt, axis) + + # Use entire array if `num` is too large + if num is not None: + if num >= arr.shape[axis]: + num = None + + # Slice a chunk from the edge to calculate stats on + if num is not None: + med_slice = _slice_last(arr.shape, num, axis=axis) + else: + med_slice = tuple(slice(None) for x in arr.shape) + + # Extract slice, calculate median + med_chunk = np.median(arr[med_slice], axis=axis, keepdims=True) + _round_ifneeded(med_chunk, arr.dtype) + + # Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt` + return _do_append(arr, med_chunk.repeat(pad_amt, axis), axis=axis) + + +def _prepend_min(arr, pad_amt, num, axis=-1): + """ + Prepend `pad_amt` minimum values along `axis`. + + Parameters + ---------- + arr : ndarray + Input array of arbitrary shape. + pad_amt : int + Amount of padding to prepend. + num : int + Depth into `arr` along `axis` to calculate minimum. + Range: [1, `arr.shape[axis]`] or None (entire axis) + axis : int + Axis along which to pad `arr`. + + Returns + ------- + padarr : ndarray + Output array, with `pad_amt` values prepended along `axis`. The + prepended region is the minimum of the first `num` values along + `axis`. + + """ + if pad_amt == 0: + return arr + + # Equivalent to edge padding for single value, so do that instead + if num == 1: + return _prepend_edge(arr, pad_amt, axis) + + # Use entire array if `num` is too large + if num is not None: + if num >= arr.shape[axis]: + num = None + + # Slice a chunk from the edge to calculate stats on + min_slice = _slice_first(arr.shape, num, axis=axis) + + # Extract slice, calculate min + min_chunk = arr[min_slice].min(axis=axis, keepdims=True) + + # Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt` + return _do_prepend(arr, min_chunk.repeat(pad_amt, axis), axis=axis) + + +def _append_min(arr, pad_amt, num, axis=-1): + """ + Append `pad_amt` median values along `axis`. + + Parameters + ---------- + arr : ndarray + Input array of arbitrary shape. + pad_amt : int + Amount of padding to append. + num : int + Depth into `arr` along `axis` to calculate minimum. + Range: [1, `arr.shape[axis]`] or None (entire axis) + axis : int + Axis along which to pad `arr`. + + Returns + ------- + padarr : ndarray + Output array, with `pad_amt` values appended along `axis`. The + appended region is the minimum of the final `num` values along `axis`. + + """ + if pad_amt == 0: + return arr + + # Equivalent to edge padding for single value, so do that instead + if num == 1: + return _append_edge(arr, pad_amt, axis) + + # Use entire array if `num` is too large + if num is not None: + if num >= arr.shape[axis]: + num = None + + # Slice a chunk from the edge to calculate stats on + if num is not None: + min_slice = _slice_last(arr.shape, num, axis=axis) + else: + min_slice = tuple(slice(None) for x in arr.shape) + + # Extract slice, calculate min + min_chunk = arr[min_slice].min(axis=axis, keepdims=True) + + # Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt` + return _do_append(arr, min_chunk.repeat(pad_amt, axis), axis=axis) + + +def _pad_ref(arr, pad_amt, method, axis=-1): + """ + Pad `axis` of `arr` by reflection. + + Parameters + ---------- + arr : ndarray + Input array of arbitrary shape. + pad_amt : tuple of ints, length 2 + Padding to (prepend, append) along `axis`. + method : str + Controls method of reflection; options are 'even' or 'odd'. + axis : int + Axis along which to pad `arr`. + + Returns + ------- + padarr : ndarray + Output array, with `pad_amt[0]` values prepended and `pad_amt[1]` + values appended along `axis`. Both regions are padded with reflected + values from the original array. + + Notes + ----- + This algorithm does not pad with repetition, i.e. the edges are not + repeated in the reflection. For that behavior, use `mode='symmetric'`. + + The modes 'reflect', 'symmetric', and 'wrap' must be padded with a + single function, lest the indexing tricks in non-integer multiples of the + original shape would violate repetition in the final iteration. + + """ + # Implicit booleanness to test for zero (or None) in any scalar type + if pad_amt[0] == 0 and pad_amt[1] == 0: + return arr + + ########################################################################## + # Prepended region + + # Slice off a reverse indexed chunk from near edge to pad `arr` before + ref_slice = _slice_at_axis(arr.shape, slice(pad_amt[0], 0, -1), axis=axis) + + ref_chunk1 = arr[ref_slice] + + # Memory/computationally more expensive, only do this if `method='odd'` + if 'odd' in method and pad_amt[0] > 0: + edge_slice1 = _slice_first(arr.shape, 1, axis=axis) + edge_chunk = arr[edge_slice1] + ref_chunk1 = 2 * edge_chunk - ref_chunk1 + del edge_chunk + + ########################################################################## + # Appended region + + # Slice off a reverse indexed chunk from far edge to pad `arr` after + start = arr.shape[axis] - pad_amt[1] - 1 + end = arr.shape[axis] - 1 + ref_slice = _slice_at_axis(arr.shape, slice(start, end), axis=axis) + rev_idx = _slice_at_axis(arr.shape, slice(None, None, -1), axis=axis) + ref_chunk2 = arr[ref_slice][rev_idx] + + if 'odd' in method: + edge_slice2 = _slice_last(arr.shape, 1, axis=axis) + edge_chunk = arr[edge_slice2] + ref_chunk2 = 2 * edge_chunk - ref_chunk2 + del edge_chunk + + # Concatenate `arr` with both chunks, extending along `axis` + return np.concatenate((ref_chunk1, arr, ref_chunk2), axis=axis) + + +def _pad_sym(arr, pad_amt, method, axis=-1): + """ + Pad `axis` of `arr` by symmetry. + + Parameters + ---------- + arr : ndarray + Input array of arbitrary shape. + pad_amt : tuple of ints, length 2 + Padding to (prepend, append) along `axis`. + method : str + Controls method of symmetry; options are 'even' or 'odd'. + axis : int + Axis along which to pad `arr`. + + Returns + ------- + padarr : ndarray + Output array, with `pad_amt[0]` values prepended and `pad_amt[1]` + values appended along `axis`. Both regions are padded with symmetric + values from the original array. + + Notes + ----- + This algorithm DOES pad with repetition, i.e. the edges are repeated. + For padding without repeated edges, use `mode='reflect'`. + + The modes 'reflect', 'symmetric', and 'wrap' must be padded with a + single function, lest the indexing tricks in non-integer multiples of the + original shape would violate repetition in the final iteration. + + """ + # Implicit booleanness to test for zero (or None) in any scalar type + if pad_amt[0] == 0 and pad_amt[1] == 0: + return arr + + ########################################################################## + # Prepended region + + # Slice off a reverse indexed chunk from near edge to pad `arr` before + sym_slice = _slice_first(arr.shape, pad_amt[0], axis=axis) + rev_idx = _slice_at_axis(arr.shape, slice(None, None, -1), axis=axis) + sym_chunk1 = arr[sym_slice][rev_idx] + + # Memory/computationally more expensive, only do this if `method='odd'` + if 'odd' in method and pad_amt[0] > 0: + edge_slice1 = _slice_first(arr.shape, 1, axis=axis) + edge_chunk = arr[edge_slice1] + sym_chunk1 = 2 * edge_chunk - sym_chunk1 + del edge_chunk + + ########################################################################## + # Appended region + + # Slice off a reverse indexed chunk from far edge to pad `arr` after + sym_slice = _slice_last(arr.shape, pad_amt[1], axis=axis) + sym_chunk2 = arr[sym_slice][rev_idx] + + if 'odd' in method: + edge_slice2 = _slice_last(arr.shape, 1, axis=axis) + edge_chunk = arr[edge_slice2] + sym_chunk2 = 2 * edge_chunk - sym_chunk2 + del edge_chunk + + # Concatenate `arr` with both chunks, extending along `axis` + return np.concatenate((sym_chunk1, arr, sym_chunk2), axis=axis) + + +def _pad_wrap(arr, pad_amt, axis=-1): + """ + Pad `axis` of `arr` via wrapping. + + Parameters + ---------- + arr : ndarray + Input array of arbitrary shape. + pad_amt : tuple of ints, length 2 + Padding to (prepend, append) along `axis`. + axis : int + Axis along which to pad `arr`. + + Returns + ------- + padarr : ndarray + Output array, with `pad_amt[0]` values prepended and `pad_amt[1]` + values appended along `axis`. Both regions are padded wrapped values + from the opposite end of `axis`. + + Notes + ----- + This method of padding is also known as 'tile' or 'tiling'. + + The modes 'reflect', 'symmetric', and 'wrap' must be padded with a + single function, lest the indexing tricks in non-integer multiples of the + original shape would violate repetition in the final iteration. + + """ + # Implicit booleanness to test for zero (or None) in any scalar type + if pad_amt[0] == 0 and pad_amt[1] == 0: + return arr + + ########################################################################## + # Prepended region + + # Slice off a reverse indexed chunk from near edge to pad `arr` before + wrap_slice = _slice_last(arr.shape, pad_amt[0], axis=axis) + wrap_chunk1 = arr[wrap_slice] + + ########################################################################## + # Appended region + + # Slice off a reverse indexed chunk from far edge to pad `arr` after + wrap_slice = _slice_first(arr.shape, pad_amt[1], axis=axis) + wrap_chunk2 = arr[wrap_slice] + + # Concatenate `arr` with both chunks, extending along `axis` + return np.concatenate((wrap_chunk1, arr, wrap_chunk2), axis=axis) + + +def _as_pairs(x, ndim, as_index=False): + """ + Broadcast `x` to an array with the shape (`ndim`, 2). + + A helper function for `pad` that prepares and validates arguments like + `pad_width` for iteration in pairs. + + Parameters + ---------- + x : {None, scalar, array-like} + The object to broadcast to the shape (`ndim`, 2). + ndim : int + Number of pairs the broadcasted `x` will have. + as_index : bool, optional + If `x` is not None, try to round each element of `x` to an integer + (dtype `np.intp`) and ensure every element is positive. + + Returns + ------- + pairs : nested iterables, shape (`ndim`, 2) + The broadcasted version of `x`. + + Raises + ------ + ValueError + If `as_index` is True and `x` contains negative elements. + Or if `x` is not broadcastable to the shape (`ndim`, 2). + """ + if x is None: + # Pass through None as a special case, otherwise np.round(x) fails + # with an AttributeError + return ((None, None),) * ndim + + x = np.array(x) + if as_index: + x = np.round(x).astype(np.intp, copy=False) + + if x.ndim < 3: + # Optimization: Possibly use faster paths for cases where `x` has + # only 1 or 2 elements. `np.broadcast_to` could handle these as well + # but is currently slower + + if x.size == 1: + # x was supplied as a single value + x = x.ravel() # Ensure x[0] works for x.ndim == 0, 1, 2 + if as_index and x < 0: + raise ValueError("index can't contain negative values") + return ((x[0], x[0]),) * ndim + + if x.size == 2 and x.shape != (2, 1): + # x was supplied with a single value for each side + # but except case when each dimension has a single value + # which should be broadcasted to a pair, + # e.g. [[1], [2]] -> [[1, 1], [2, 2]] not [[1, 2], [1, 2]] + x = x.ravel() # Ensure x[0], x[1] works + if as_index and (x[0] < 0 or x[1] < 0): + raise ValueError("index can't contain negative values") + return ((x[0], x[1]),) * ndim + + if as_index and x.min() < 0: + raise ValueError("index can't contain negative values") + + # Converting the array with `tolist` seems to improve performance + # when iterating and indexing the result (see usage in `pad`) + return np.broadcast_to(x, (ndim, 2)).tolist() + + +############################################################################### +# Public functions + + +def _pad_dispatcher(array, pad_width, mode, **kwargs): + return (array,) + + +@array_function_dispatch(_pad_dispatcher, module='numpy') +def pad(array, pad_width, mode, **kwargs): + """ + Pads an array. + + Parameters + ---------- + array : array_like of rank N + Input array + pad_width : {sequence, array_like, int} + Number of values padded to the edges of each axis. + ((before_1, after_1), ... (before_N, after_N)) unique pad widths + for each axis. + ((before, after),) yields same before and after pad for each axis. + (pad,) or int is a shortcut for before = after = pad width for all + axes. + mode : str or function + One of the following string values or a user supplied function. + + 'constant' + Pads with a constant value. + 'edge' + Pads with the edge values of array. + 'linear_ramp' + Pads with the linear ramp between end_value and the + array edge value. + 'maximum' + Pads with the maximum value of all or part of the + vector along each axis. + 'mean' + Pads with the mean value of all or part of the + vector along each axis. + 'median' + Pads with the median value of all or part of the + vector along each axis. + 'minimum' + Pads with the minimum value of all or part of the + vector along each axis. + 'reflect' + Pads with the reflection of the vector mirrored on + the first and last values of the vector along each + axis. + 'symmetric' + Pads with the reflection of the vector mirrored + along the edge of the array. + 'wrap' + Pads with the wrap of the vector along the axis. + The first values are used to pad the end and the + end values are used to pad the beginning. + + Padding function, see Notes. + stat_length : sequence or int, optional + Used in 'maximum', 'mean', 'median', and 'minimum'. Number of + values at edge of each axis used to calculate the statistic value. + + ((before_1, after_1), ... (before_N, after_N)) unique statistic + lengths for each axis. + + ((before, after),) yields same before and after statistic lengths + for each axis. + + (stat_length,) or int is a shortcut for before = after = statistic + length for all axes. + + Default is ``None``, to use the entire axis. + constant_values : sequence or int, optional + Used in 'constant'. The values to set the padded values for each + axis. + + ((before_1, after_1), ... (before_N, after_N)) unique pad constants + for each axis. + + ((before, after),) yields same before and after constants for each + axis. + + (constant,) or int is a shortcut for before = after = constant for + all axes. + + Default is 0. + end_values : sequence or int, optional + Used in 'linear_ramp'. The values used for the ending value of the + linear_ramp and that will form the edge of the padded array. + + ((before_1, after_1), ... (before_N, after_N)) unique end values + for each axis. + + ((before, after),) yields same before and after end values for each + axis. + + (constant,) or int is a shortcut for before = after = end value for + all axes. + + Default is 0. + reflect_type : {'even', 'odd'}, optional + Used in 'reflect', and 'symmetric'. The 'even' style is the + default with an unaltered reflection around the edge value. For + the 'odd' style, the extended part of the array is created by + subtracting the reflected values from two times the edge value. + + Returns + ------- + pad : ndarray + Padded array of rank equal to `array` with shape increased + according to `pad_width`. + + Notes + ----- + .. versionadded:: 1.7.0 + + For an array with rank greater than 1, some of the padding of later + axes is calculated from padding of previous axes. This is easiest to + think about with a rank 2 array where the corners of the padded array + are calculated by using padded values from the first axis. + + The padding function, if used, should return a rank 1 array equal in + length to the vector argument with padded values replaced. It has the + following signature:: + + padding_func(vector, iaxis_pad_width, iaxis, kwargs) + + where + + vector : ndarray + A rank 1 array already padded with zeros. Padded values are + vector[:pad_tuple[0]] and vector[-pad_tuple[1]:]. + iaxis_pad_width : tuple + A 2-tuple of ints, iaxis_pad_width[0] represents the number of + values padded at the beginning of vector where + iaxis_pad_width[1] represents the number of values padded at + the end of vector. + iaxis : int + The axis currently being calculated. + kwargs : dict + Any keyword arguments the function requires. + + Examples + -------- + >>> a = [1, 2, 3, 4, 5] + >>> np.pad(a, (2,3), 'constant', constant_values=(4, 6)) + array([4, 4, 1, 2, 3, 4, 5, 6, 6, 6]) + + >>> np.pad(a, (2, 3), 'edge') + array([1, 1, 1, 2, 3, 4, 5, 5, 5, 5]) + + >>> np.pad(a, (2, 3), 'linear_ramp', end_values=(5, -4)) + array([ 5, 3, 1, 2, 3, 4, 5, 2, -1, -4]) + + >>> np.pad(a, (2,), 'maximum') + array([5, 5, 1, 2, 3, 4, 5, 5, 5]) + + >>> np.pad(a, (2,), 'mean') + array([3, 3, 1, 2, 3, 4, 5, 3, 3]) + + >>> np.pad(a, (2,), 'median') + array([3, 3, 1, 2, 3, 4, 5, 3, 3]) + + >>> a = [[1, 2], [3, 4]] + >>> np.pad(a, ((3, 2), (2, 3)), 'minimum') + array([[1, 1, 1, 2, 1, 1, 1], + [1, 1, 1, 2, 1, 1, 1], + [1, 1, 1, 2, 1, 1, 1], + [1, 1, 1, 2, 1, 1, 1], + [3, 3, 3, 4, 3, 3, 3], + [1, 1, 1, 2, 1, 1, 1], + [1, 1, 1, 2, 1, 1, 1]]) + + >>> a = [1, 2, 3, 4, 5] + >>> np.pad(a, (2, 3), 'reflect') + array([3, 2, 1, 2, 3, 4, 5, 4, 3, 2]) + + >>> np.pad(a, (2, 3), 'reflect', reflect_type='odd') + array([-1, 0, 1, 2, 3, 4, 5, 6, 7, 8]) + + >>> np.pad(a, (2, 3), 'symmetric') + array([2, 1, 1, 2, 3, 4, 5, 5, 4, 3]) + + >>> np.pad(a, (2, 3), 'symmetric', reflect_type='odd') + array([0, 1, 1, 2, 3, 4, 5, 5, 6, 7]) + + >>> np.pad(a, (2, 3), 'wrap') + array([4, 5, 1, 2, 3, 4, 5, 1, 2, 3]) + + >>> def pad_with(vector, pad_width, iaxis, kwargs): + ... pad_value = kwargs.get('padder', 10) + ... vector[:pad_width[0]] = pad_value + ... vector[-pad_width[1]:] = pad_value + ... return vector + >>> a = np.arange(6) + >>> a = a.reshape((2, 3)) + >>> np.pad(a, 2, pad_with) + array([[10, 10, 10, 10, 10, 10, 10], + [10, 10, 10, 10, 10, 10, 10], + [10, 10, 0, 1, 2, 10, 10], + [10, 10, 3, 4, 5, 10, 10], + [10, 10, 10, 10, 10, 10, 10], + [10, 10, 10, 10, 10, 10, 10]]) + >>> np.pad(a, 2, pad_with, padder=100) + array([[100, 100, 100, 100, 100, 100, 100], + [100, 100, 100, 100, 100, 100, 100], + [100, 100, 0, 1, 2, 100, 100], + [100, 100, 3, 4, 5, 100, 100], + [100, 100, 100, 100, 100, 100, 100], + [100, 100, 100, 100, 100, 100, 100]]) + """ + if not np.asarray(pad_width).dtype.kind == 'i': + raise TypeError('`pad_width` must be of integral type.') + + narray = np.array(array) + pad_width = _as_pairs(pad_width, narray.ndim, as_index=True) + + allowedkwargs = { + 'constant': ['constant_values'], + 'edge': [], + 'linear_ramp': ['end_values'], + 'maximum': ['stat_length'], + 'mean': ['stat_length'], + 'median': ['stat_length'], + 'minimum': ['stat_length'], + 'reflect': ['reflect_type'], + 'symmetric': ['reflect_type'], + 'wrap': [], + } + + kwdefaults = { + 'stat_length': None, + 'constant_values': 0, + 'end_values': 0, + 'reflect_type': 'even', + } + + if isinstance(mode, np.compat.basestring): + # Make sure have allowed kwargs appropriate for mode + for key in kwargs: + if key not in allowedkwargs[mode]: + raise ValueError('%s keyword not in allowed keywords %s' % + (key, allowedkwargs[mode])) + + # Set kwarg defaults + for kw in allowedkwargs[mode]: + kwargs.setdefault(kw, kwdefaults[kw]) + + # Need to only normalize particular keywords. + for i in kwargs: + if i == 'stat_length': + kwargs[i] = _as_pairs(kwargs[i], narray.ndim, as_index=True) + if i in ['end_values', 'constant_values']: + kwargs[i] = _as_pairs(kwargs[i], narray.ndim) + else: + # Drop back to old, slower np.apply_along_axis mode for user-supplied + # vector function + function = mode + + # Create a new padded array + rank = list(range(narray.ndim)) + total_dim_increase = [np.sum(pad_width[i]) for i in rank] + offset_slices = tuple( + slice(pad_width[i][0], pad_width[i][0] + narray.shape[i]) + for i in rank) + new_shape = np.array(narray.shape) + total_dim_increase + newmat = np.zeros(new_shape, narray.dtype) + + # Insert the original array into the padded array + newmat[offset_slices] = narray + + # This is the core of pad ... + for iaxis in rank: + np.apply_along_axis(function, + iaxis, + newmat, + pad_width[iaxis], + iaxis, + kwargs) + return newmat + + # If we get here, use new padding method + newmat = narray.copy() + + # API preserved, but completely new algorithm which pads by building the + # entire block to pad before/after `arr` with in one step, for each axis. + if mode == 'constant': + for axis, ((pad_before, pad_after), (before_val, after_val)) \ + in enumerate(zip(pad_width, kwargs['constant_values'])): + newmat = _prepend_const(newmat, pad_before, before_val, axis) + newmat = _append_const(newmat, pad_after, after_val, axis) + + elif mode == 'edge': + for axis, (pad_before, pad_after) in enumerate(pad_width): + newmat = _prepend_edge(newmat, pad_before, axis) + newmat = _append_edge(newmat, pad_after, axis) + + elif mode == 'linear_ramp': + for axis, ((pad_before, pad_after), (before_val, after_val)) \ + in enumerate(zip(pad_width, kwargs['end_values'])): + newmat = _prepend_ramp(newmat, pad_before, before_val, axis) + newmat = _append_ramp(newmat, pad_after, after_val, axis) + + elif mode == 'maximum': + for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \ + in enumerate(zip(pad_width, kwargs['stat_length'])): + newmat = _prepend_max(newmat, pad_before, chunk_before, axis) + newmat = _append_max(newmat, pad_after, chunk_after, axis) + + elif mode == 'mean': + for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \ + in enumerate(zip(pad_width, kwargs['stat_length'])): + newmat = _prepend_mean(newmat, pad_before, chunk_before, axis) + newmat = _append_mean(newmat, pad_after, chunk_after, axis) + + elif mode == 'median': + for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \ + in enumerate(zip(pad_width, kwargs['stat_length'])): + newmat = _prepend_med(newmat, pad_before, chunk_before, axis) + newmat = _append_med(newmat, pad_after, chunk_after, axis) + + elif mode == 'minimum': + for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \ + in enumerate(zip(pad_width, kwargs['stat_length'])): + newmat = _prepend_min(newmat, pad_before, chunk_before, axis) + newmat = _append_min(newmat, pad_after, chunk_after, axis) + + elif mode == 'reflect': + for axis, (pad_before, pad_after) in enumerate(pad_width): + if narray.shape[axis] == 0: + # Axes with non-zero padding cannot be empty. + if pad_before > 0 or pad_after > 0: + raise ValueError("There aren't any elements to reflect" + " in axis {} of `array`".format(axis)) + # Skip zero padding on empty axes. + continue + + # Recursive padding along any axis where `pad_amt` is too large + # for indexing tricks. We can only safely pad the original axis + # length, to keep the period of the reflections consistent. + if ((pad_before > 0) or + (pad_after > 0)) and newmat.shape[axis] == 1: + # Extending singleton dimension for 'reflect' is legacy + # behavior; it really should raise an error. + newmat = _prepend_edge(newmat, pad_before, axis) + newmat = _append_edge(newmat, pad_after, axis) + continue + + method = kwargs['reflect_type'] + safe_pad = newmat.shape[axis] - 1 + while ((pad_before > safe_pad) or (pad_after > safe_pad)): + pad_iter_b = min(safe_pad, + safe_pad * (pad_before // safe_pad)) + pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad)) + newmat = _pad_ref(newmat, (pad_iter_b, + pad_iter_a), method, axis) + pad_before -= pad_iter_b + pad_after -= pad_iter_a + safe_pad += pad_iter_b + pad_iter_a + newmat = _pad_ref(newmat, (pad_before, pad_after), method, axis) + + elif mode == 'symmetric': + for axis, (pad_before, pad_after) in enumerate(pad_width): + # Recursive padding along any axis where `pad_amt` is too large + # for indexing tricks. We can only safely pad the original axis + # length, to keep the period of the reflections consistent. + method = kwargs['reflect_type'] + safe_pad = newmat.shape[axis] + while ((pad_before > safe_pad) or + (pad_after > safe_pad)): + pad_iter_b = min(safe_pad, + safe_pad * (pad_before // safe_pad)) + pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad)) + newmat = _pad_sym(newmat, (pad_iter_b, + pad_iter_a), method, axis) + pad_before -= pad_iter_b + pad_after -= pad_iter_a + safe_pad += pad_iter_b + pad_iter_a + newmat = _pad_sym(newmat, (pad_before, pad_after), method, axis) + + elif mode == 'wrap': + for axis, (pad_before, pad_after) in enumerate(pad_width): + # Recursive padding along any axis where `pad_amt` is too large + # for indexing tricks. We can only safely pad the original axis + # length, to keep the period of the reflections consistent. + safe_pad = newmat.shape[axis] + while ((pad_before > safe_pad) or + (pad_after > safe_pad)): + pad_iter_b = min(safe_pad, + safe_pad * (pad_before // safe_pad)) + pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad)) + newmat = _pad_wrap(newmat, (pad_iter_b, pad_iter_a), axis) + + pad_before -= pad_iter_b + pad_after -= pad_iter_a + safe_pad += pad_iter_b + pad_iter_a + newmat = _pad_wrap(newmat, (pad_before, pad_after), axis) + + return newmat diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/arraypad.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/arraypad.pyc new file mode 100644 index 0000000..5778f30 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/arraypad.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/arraysetops.py b/project/venv/lib/python2.7/site-packages/numpy/lib/arraysetops.py new file mode 100644 index 0000000..3356904 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/arraysetops.py @@ -0,0 +1,788 @@ +""" +Set operations for arrays based on sorting. + +:Contains: + unique, + isin, + ediff1d, + intersect1d, + setxor1d, + in1d, + union1d, + setdiff1d + +:Notes: + +For floating point arrays, inaccurate results may appear due to usual round-off +and floating point comparison issues. + +Speed could be gained in some operations by an implementation of +sort(), that can provide directly the permutation vectors, avoiding +thus calls to argsort(). + +To do: Optionally return indices analogously to unique for all functions. + +:Author: Robert Cimrman + +""" +from __future__ import division, absolute_import, print_function + +import functools + +import numpy as np +from numpy.core import overrides + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +__all__ = [ + 'ediff1d', 'intersect1d', 'setxor1d', 'union1d', 'setdiff1d', 'unique', + 'in1d', 'isin' + ] + + +def _ediff1d_dispatcher(ary, to_end=None, to_begin=None): + return (ary, to_end, to_begin) + + +@array_function_dispatch(_ediff1d_dispatcher) +def ediff1d(ary, to_end=None, to_begin=None): + """ + The differences between consecutive elements of an array. + + Parameters + ---------- + ary : array_like + If necessary, will be flattened before the differences are taken. + to_end : array_like, optional + Number(s) to append at the end of the returned differences. + to_begin : array_like, optional + Number(s) to prepend at the beginning of the returned differences. + + Returns + ------- + ediff1d : ndarray + The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``. + + See Also + -------- + diff, gradient + + Notes + ----- + When applied to masked arrays, this function drops the mask information + if the `to_begin` and/or `to_end` parameters are used. + + Examples + -------- + >>> x = np.array([1, 2, 4, 7, 0]) + >>> np.ediff1d(x) + array([ 1, 2, 3, -7]) + + >>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99])) + array([-99, 1, 2, 3, -7, 88, 99]) + + The returned array is always 1D. + + >>> y = [[1, 2, 4], [1, 6, 24]] + >>> np.ediff1d(y) + array([ 1, 2, -3, 5, 18]) + + """ + # force a 1d array + ary = np.asanyarray(ary).ravel() + + # enforce propagation of the dtype of input + # ary to returned result + dtype_req = ary.dtype + + # fast track default case + if to_begin is None and to_end is None: + return ary[1:] - ary[:-1] + + if to_begin is None: + l_begin = 0 + else: + _to_begin = np.asanyarray(to_begin, dtype=dtype_req) + if not np.all(_to_begin == to_begin): + raise ValueError("cannot convert 'to_begin' to array with dtype " + "'%r' as required for input ary" % dtype_req) + to_begin = _to_begin.ravel() + l_begin = len(to_begin) + + if to_end is None: + l_end = 0 + else: + _to_end = np.asanyarray(to_end, dtype=dtype_req) + # check that casting has not overflowed + if not np.all(_to_end == to_end): + raise ValueError("cannot convert 'to_end' to array with dtype " + "'%r' as required for input ary" % dtype_req) + to_end = _to_end.ravel() + l_end = len(to_end) + + # do the calculation in place and copy to_begin and to_end + l_diff = max(len(ary) - 1, 0) + result = np.empty(l_diff + l_begin + l_end, dtype=ary.dtype) + result = ary.__array_wrap__(result) + if l_begin > 0: + result[:l_begin] = to_begin + if l_end > 0: + result[l_begin + l_diff:] = to_end + np.subtract(ary[1:], ary[:-1], result[l_begin:l_begin + l_diff]) + return result + + +def _unpack_tuple(x): + """ Unpacks one-element tuples for use as return values """ + if len(x) == 1: + return x[0] + else: + return x + + +def _unique_dispatcher(ar, return_index=None, return_inverse=None, + return_counts=None, axis=None): + return (ar,) + + +@array_function_dispatch(_unique_dispatcher) +def unique(ar, return_index=False, return_inverse=False, + return_counts=False, axis=None): + """ + Find the unique elements of an array. + + Returns the sorted unique elements of an array. There are three optional + outputs in addition to the unique elements: + + * the indices of the input array that give the unique values + * the indices of the unique array that reconstruct the input array + * the number of times each unique value comes up in the input array + + Parameters + ---------- + ar : array_like + Input array. Unless `axis` is specified, this will be flattened if it + is not already 1-D. + return_index : bool, optional + If True, also return the indices of `ar` (along the specified axis, + if provided, or in the flattened array) that result in the unique array. + return_inverse : bool, optional + If True, also return the indices of the unique array (for the specified + axis, if provided) that can be used to reconstruct `ar`. + return_counts : bool, optional + If True, also return the number of times each unique item appears + in `ar`. + + .. versionadded:: 1.9.0 + + axis : int or None, optional + The axis to operate on. If None, `ar` will be flattened. If an integer, + the subarrays indexed by the given axis will be flattened and treated + as the elements of a 1-D array with the dimension of the given axis, + see the notes for more details. Object arrays or structured arrays + that contain objects are not supported if the `axis` kwarg is used. The + default is None. + + .. versionadded:: 1.13.0 + + Returns + ------- + unique : ndarray + The sorted unique values. + unique_indices : ndarray, optional + The indices of the first occurrences of the unique values in the + original array. Only provided if `return_index` is True. + unique_inverse : ndarray, optional + The indices to reconstruct the original array from the + unique array. Only provided if `return_inverse` is True. + unique_counts : ndarray, optional + The number of times each of the unique values comes up in the + original array. Only provided if `return_counts` is True. + + .. versionadded:: 1.9.0 + + See Also + -------- + numpy.lib.arraysetops : Module with a number of other functions for + performing set operations on arrays. + + Notes + ----- + When an axis is specified the subarrays indexed by the axis are sorted. + This is done by making the specified axis the first dimension of the array + and then flattening the subarrays in C order. The flattened subarrays are + then viewed as a structured type with each element given a label, with the + effect that we end up with a 1-D array of structured types that can be + treated in the same way as any other 1-D array. The result is that the + flattened subarrays are sorted in lexicographic order starting with the + first element. + + Examples + -------- + >>> np.unique([1, 1, 2, 2, 3, 3]) + array([1, 2, 3]) + >>> a = np.array([[1, 1], [2, 3]]) + >>> np.unique(a) + array([1, 2, 3]) + + Return the unique rows of a 2D array + + >>> a = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]]) + >>> np.unique(a, axis=0) + array([[1, 0, 0], [2, 3, 4]]) + + Return the indices of the original array that give the unique values: + + >>> a = np.array(['a', 'b', 'b', 'c', 'a']) + >>> u, indices = np.unique(a, return_index=True) + >>> u + array(['a', 'b', 'c'], + dtype='|S1') + >>> indices + array([0, 1, 3]) + >>> a[indices] + array(['a', 'b', 'c'], + dtype='|S1') + + Reconstruct the input array from the unique values: + + >>> a = np.array([1, 2, 6, 4, 2, 3, 2]) + >>> u, indices = np.unique(a, return_inverse=True) + >>> u + array([1, 2, 3, 4, 6]) + >>> indices + array([0, 1, 4, 3, 1, 2, 1]) + >>> u[indices] + array([1, 2, 6, 4, 2, 3, 2]) + + """ + ar = np.asanyarray(ar) + if axis is None: + ret = _unique1d(ar, return_index, return_inverse, return_counts) + return _unpack_tuple(ret) + + # axis was specified and not None + try: + ar = np.swapaxes(ar, axis, 0) + except np.AxisError: + # this removes the "axis1" or "axis2" prefix from the error message + raise np.AxisError(axis, ar.ndim) + + # Must reshape to a contiguous 2D array for this to work... + orig_shape, orig_dtype = ar.shape, ar.dtype + ar = ar.reshape(orig_shape[0], -1) + ar = np.ascontiguousarray(ar) + dtype = [('f{i}'.format(i=i), ar.dtype) for i in range(ar.shape[1])] + + try: + consolidated = ar.view(dtype) + except TypeError: + # There's no good way to do this for object arrays, etc... + msg = 'The axis argument to unique is not supported for dtype {dt}' + raise TypeError(msg.format(dt=ar.dtype)) + + def reshape_uniq(uniq): + uniq = uniq.view(orig_dtype) + uniq = uniq.reshape(-1, *orig_shape[1:]) + uniq = np.swapaxes(uniq, 0, axis) + return uniq + + output = _unique1d(consolidated, return_index, + return_inverse, return_counts) + output = (reshape_uniq(output[0]),) + output[1:] + return _unpack_tuple(output) + + +def _unique1d(ar, return_index=False, return_inverse=False, + return_counts=False): + """ + Find the unique elements of an array, ignoring shape. + """ + ar = np.asanyarray(ar).flatten() + + optional_indices = return_index or return_inverse + + if optional_indices: + perm = ar.argsort(kind='mergesort' if return_index else 'quicksort') + aux = ar[perm] + else: + ar.sort() + aux = ar + mask = np.empty(aux.shape, dtype=np.bool_) + mask[:1] = True + mask[1:] = aux[1:] != aux[:-1] + + ret = (aux[mask],) + if return_index: + ret += (perm[mask],) + if return_inverse: + imask = np.cumsum(mask) - 1 + inv_idx = np.empty(mask.shape, dtype=np.intp) + inv_idx[perm] = imask + ret += (inv_idx,) + if return_counts: + idx = np.concatenate(np.nonzero(mask) + ([mask.size],)) + ret += (np.diff(idx),) + return ret + + +def _intersect1d_dispatcher( + ar1, ar2, assume_unique=None, return_indices=None): + return (ar1, ar2) + + +@array_function_dispatch(_intersect1d_dispatcher) +def intersect1d(ar1, ar2, assume_unique=False, return_indices=False): + """ + Find the intersection of two arrays. + + Return the sorted, unique values that are in both of the input arrays. + + Parameters + ---------- + ar1, ar2 : array_like + Input arrays. Will be flattened if not already 1D. + assume_unique : bool + If True, the input arrays are both assumed to be unique, which + can speed up the calculation. Default is False. + return_indices : bool + If True, the indices which correspond to the intersection of the two + arrays are returned. The first instance of a value is used if there are + multiple. Default is False. + + .. versionadded:: 1.15.0 + + Returns + ------- + intersect1d : ndarray + Sorted 1D array of common and unique elements. + comm1 : ndarray + The indices of the first occurrences of the common values in `ar1`. + Only provided if `return_indices` is True. + comm2 : ndarray + The indices of the first occurrences of the common values in `ar2`. + Only provided if `return_indices` is True. + + + See Also + -------- + numpy.lib.arraysetops : Module with a number of other functions for + performing set operations on arrays. + + Examples + -------- + >>> np.intersect1d([1, 3, 4, 3], [3, 1, 2, 1]) + array([1, 3]) + + To intersect more than two arrays, use functools.reduce: + + >>> from functools import reduce + >>> reduce(np.intersect1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2])) + array([3]) + + To return the indices of the values common to the input arrays + along with the intersected values: + >>> x = np.array([1, 1, 2, 3, 4]) + >>> y = np.array([2, 1, 4, 6]) + >>> xy, x_ind, y_ind = np.intersect1d(x, y, return_indices=True) + >>> x_ind, y_ind + (array([0, 2, 4]), array([1, 0, 2])) + >>> xy, x[x_ind], y[y_ind] + (array([1, 2, 4]), array([1, 2, 4]), array([1, 2, 4])) + + """ + ar1 = np.asanyarray(ar1) + ar2 = np.asanyarray(ar2) + + if not assume_unique: + if return_indices: + ar1, ind1 = unique(ar1, return_index=True) + ar2, ind2 = unique(ar2, return_index=True) + else: + ar1 = unique(ar1) + ar2 = unique(ar2) + else: + ar1 = ar1.ravel() + ar2 = ar2.ravel() + + aux = np.concatenate((ar1, ar2)) + if return_indices: + aux_sort_indices = np.argsort(aux, kind='mergesort') + aux = aux[aux_sort_indices] + else: + aux.sort() + + mask = aux[1:] == aux[:-1] + int1d = aux[:-1][mask] + + if return_indices: + ar1_indices = aux_sort_indices[:-1][mask] + ar2_indices = aux_sort_indices[1:][mask] - ar1.size + if not assume_unique: + ar1_indices = ind1[ar1_indices] + ar2_indices = ind2[ar2_indices] + + return int1d, ar1_indices, ar2_indices + else: + return int1d + + +def _setxor1d_dispatcher(ar1, ar2, assume_unique=None): + return (ar1, ar2) + + +@array_function_dispatch(_setxor1d_dispatcher) +def setxor1d(ar1, ar2, assume_unique=False): + """ + Find the set exclusive-or of two arrays. + + Return the sorted, unique values that are in only one (not both) of the + input arrays. + + Parameters + ---------- + ar1, ar2 : array_like + Input arrays. + assume_unique : bool + If True, the input arrays are both assumed to be unique, which + can speed up the calculation. Default is False. + + Returns + ------- + setxor1d : ndarray + Sorted 1D array of unique values that are in only one of the input + arrays. + + Examples + -------- + >>> a = np.array([1, 2, 3, 2, 4]) + >>> b = np.array([2, 3, 5, 7, 5]) + >>> np.setxor1d(a,b) + array([1, 4, 5, 7]) + + """ + if not assume_unique: + ar1 = unique(ar1) + ar2 = unique(ar2) + + aux = np.concatenate((ar1, ar2)) + if aux.size == 0: + return aux + + aux.sort() + flag = np.concatenate(([True], aux[1:] != aux[:-1], [True])) + return aux[flag[1:] & flag[:-1]] + + +def _in1d_dispatcher(ar1, ar2, assume_unique=None, invert=None): + return (ar1, ar2) + + +@array_function_dispatch(_in1d_dispatcher) +def in1d(ar1, ar2, assume_unique=False, invert=False): + """ + Test whether each element of a 1-D array is also present in a second array. + + Returns a boolean array the same length as `ar1` that is True + where an element of `ar1` is in `ar2` and False otherwise. + + We recommend using :func:`isin` instead of `in1d` for new code. + + Parameters + ---------- + ar1 : (M,) array_like + Input array. + ar2 : array_like + The values against which to test each value of `ar1`. + assume_unique : bool, optional + If True, the input arrays are both assumed to be unique, which + can speed up the calculation. Default is False. + invert : bool, optional + If True, the values in the returned array are inverted (that is, + False where an element of `ar1` is in `ar2` and True otherwise). + Default is False. ``np.in1d(a, b, invert=True)`` is equivalent + to (but is faster than) ``np.invert(in1d(a, b))``. + + .. versionadded:: 1.8.0 + + Returns + ------- + in1d : (M,) ndarray, bool + The values `ar1[in1d]` are in `ar2`. + + See Also + -------- + isin : Version of this function that preserves the + shape of ar1. + numpy.lib.arraysetops : Module with a number of other functions for + performing set operations on arrays. + + Notes + ----- + `in1d` can be considered as an element-wise function version of the + python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is roughly + equivalent to ``np.array([item in b for item in a])``. + However, this idea fails if `ar2` is a set, or similar (non-sequence) + container: As ``ar2`` is converted to an array, in those cases + ``asarray(ar2)`` is an object array rather than the expected array of + contained values. + + .. versionadded:: 1.4.0 + + Examples + -------- + >>> test = np.array([0, 1, 2, 5, 0]) + >>> states = [0, 2] + >>> mask = np.in1d(test, states) + >>> mask + array([ True, False, True, False, True]) + >>> test[mask] + array([0, 2, 0]) + >>> mask = np.in1d(test, states, invert=True) + >>> mask + array([False, True, False, True, False]) + >>> test[mask] + array([1, 5]) + """ + # Ravel both arrays, behavior for the first array could be different + ar1 = np.asarray(ar1).ravel() + ar2 = np.asarray(ar2).ravel() + + # Check if one of the arrays may contain arbitrary objects + contains_object = ar1.dtype.hasobject or ar2.dtype.hasobject + + # This code is run when + # a) the first condition is true, making the code significantly faster + # b) the second condition is true (i.e. `ar1` or `ar2` may contain + # arbitrary objects), since then sorting is not guaranteed to work + if len(ar2) < 10 * len(ar1) ** 0.145 or contains_object: + if invert: + mask = np.ones(len(ar1), dtype=bool) + for a in ar2: + mask &= (ar1 != a) + else: + mask = np.zeros(len(ar1), dtype=bool) + for a in ar2: + mask |= (ar1 == a) + return mask + + # Otherwise use sorting + if not assume_unique: + ar1, rev_idx = np.unique(ar1, return_inverse=True) + ar2 = np.unique(ar2) + + ar = np.concatenate((ar1, ar2)) + # We need this to be a stable sort, so always use 'mergesort' + # here. The values from the first array should always come before + # the values from the second array. + order = ar.argsort(kind='mergesort') + sar = ar[order] + if invert: + bool_ar = (sar[1:] != sar[:-1]) + else: + bool_ar = (sar[1:] == sar[:-1]) + flag = np.concatenate((bool_ar, [invert])) + ret = np.empty(ar.shape, dtype=bool) + ret[order] = flag + + if assume_unique: + return ret[:len(ar1)] + else: + return ret[rev_idx] + + +def _isin_dispatcher(element, test_elements, assume_unique=None, invert=None): + return (element, test_elements) + + +@array_function_dispatch(_isin_dispatcher) +def isin(element, test_elements, assume_unique=False, invert=False): + """ + Calculates `element in test_elements`, broadcasting over `element` only. + Returns a boolean array of the same shape as `element` that is True + where an element of `element` is in `test_elements` and False otherwise. + + Parameters + ---------- + element : array_like + Input array. + test_elements : array_like + The values against which to test each value of `element`. + This argument is flattened if it is an array or array_like. + See notes for behavior with non-array-like parameters. + assume_unique : bool, optional + If True, the input arrays are both assumed to be unique, which + can speed up the calculation. Default is False. + invert : bool, optional + If True, the values in the returned array are inverted, as if + calculating `element not in test_elements`. Default is False. + ``np.isin(a, b, invert=True)`` is equivalent to (but faster + than) ``np.invert(np.isin(a, b))``. + + Returns + ------- + isin : ndarray, bool + Has the same shape as `element`. The values `element[isin]` + are in `test_elements`. + + See Also + -------- + in1d : Flattened version of this function. + numpy.lib.arraysetops : Module with a number of other functions for + performing set operations on arrays. + + Notes + ----- + + `isin` is an element-wise function version of the python keyword `in`. + ``isin(a, b)`` is roughly equivalent to + ``np.array([item in b for item in a])`` if `a` and `b` are 1-D sequences. + + `element` and `test_elements` are converted to arrays if they are not + already. If `test_elements` is a set (or other non-sequence collection) + it will be converted to an object array with one element, rather than an + array of the values contained in `test_elements`. This is a consequence + of the `array` constructor's way of handling non-sequence collections. + Converting the set to a list usually gives the desired behavior. + + .. versionadded:: 1.13.0 + + Examples + -------- + >>> element = 2*np.arange(4).reshape((2, 2)) + >>> element + array([[0, 2], + [4, 6]]) + >>> test_elements = [1, 2, 4, 8] + >>> mask = np.isin(element, test_elements) + >>> mask + array([[ False, True], + [ True, False]]) + >>> element[mask] + array([2, 4]) + + The indices of the matched values can be obtained with `nonzero`: + + >>> np.nonzero(mask) + (array([0, 1]), array([1, 0])) + + The test can also be inverted: + + >>> mask = np.isin(element, test_elements, invert=True) + >>> mask + array([[ True, False], + [ False, True]]) + >>> element[mask] + array([0, 6]) + + Because of how `array` handles sets, the following does not + work as expected: + + >>> test_set = {1, 2, 4, 8} + >>> np.isin(element, test_set) + array([[ False, False], + [ False, False]]) + + Casting the set to a list gives the expected result: + + >>> np.isin(element, list(test_set)) + array([[ False, True], + [ True, False]]) + """ + element = np.asarray(element) + return in1d(element, test_elements, assume_unique=assume_unique, + invert=invert).reshape(element.shape) + + +def _union1d_dispatcher(ar1, ar2): + return (ar1, ar2) + + +@array_function_dispatch(_union1d_dispatcher) +def union1d(ar1, ar2): + """ + Find the union of two arrays. + + Return the unique, sorted array of values that are in either of the two + input arrays. + + Parameters + ---------- + ar1, ar2 : array_like + Input arrays. They are flattened if they are not already 1D. + + Returns + ------- + union1d : ndarray + Unique, sorted union of the input arrays. + + See Also + -------- + numpy.lib.arraysetops : Module with a number of other functions for + performing set operations on arrays. + + Examples + -------- + >>> np.union1d([-1, 0, 1], [-2, 0, 2]) + array([-2, -1, 0, 1, 2]) + + To find the union of more than two arrays, use functools.reduce: + + >>> from functools import reduce + >>> reduce(np.union1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2])) + array([1, 2, 3, 4, 6]) + """ + return unique(np.concatenate((ar1, ar2), axis=None)) + + +def _setdiff1d_dispatcher(ar1, ar2, assume_unique=None): + return (ar1, ar2) + + +@array_function_dispatch(_setdiff1d_dispatcher) +def setdiff1d(ar1, ar2, assume_unique=False): + """ + Find the set difference of two arrays. + + Return the unique values in `ar1` that are not in `ar2`. + + Parameters + ---------- + ar1 : array_like + Input array. + ar2 : array_like + Input comparison array. + assume_unique : bool + If True, the input arrays are both assumed to be unique, which + can speed up the calculation. Default is False. + + Returns + ------- + setdiff1d : ndarray + 1D array of values in `ar1` that are not in `ar2`. The result + is sorted when `assume_unique=False`, but otherwise only sorted + if the input is sorted. + + See Also + -------- + numpy.lib.arraysetops : Module with a number of other functions for + performing set operations on arrays. + + Examples + -------- + >>> a = np.array([1, 2, 3, 2, 4, 1]) + >>> b = np.array([3, 4, 5, 6]) + >>> np.setdiff1d(a, b) + array([1, 2]) + + """ + if assume_unique: + ar1 = np.asarray(ar1).ravel() + else: + ar1 = unique(ar1) + ar2 = unique(ar2) + return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)] + diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/arraysetops.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/arraysetops.pyc new file mode 100644 index 0000000..512c2ae Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/arraysetops.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/arrayterator.py b/project/venv/lib/python2.7/site-packages/numpy/lib/arrayterator.py new file mode 100644 index 0000000..f2d4fe9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/arrayterator.py @@ -0,0 +1,225 @@ +""" +A buffered iterator for big arrays. + +This module solves the problem of iterating over a big file-based array +without having to read it into memory. The `Arrayterator` class wraps +an array object, and when iterated it will return sub-arrays with at most +a user-specified number of elements. + +""" +from __future__ import division, absolute_import, print_function + +from operator import mul +from functools import reduce + +from numpy.compat import long + +__all__ = ['Arrayterator'] + + +class Arrayterator(object): + """ + Buffered iterator for big arrays. + + `Arrayterator` creates a buffered iterator for reading big arrays in small + contiguous blocks. The class is useful for objects stored in the + file system. It allows iteration over the object *without* reading + everything in memory; instead, small blocks are read and iterated over. + + `Arrayterator` can be used with any object that supports multidimensional + slices. This includes NumPy arrays, but also variables from + Scientific.IO.NetCDF or pynetcdf for example. + + Parameters + ---------- + var : array_like + The object to iterate over. + buf_size : int, optional + The buffer size. If `buf_size` is supplied, the maximum amount of + data that will be read into memory is `buf_size` elements. + Default is None, which will read as many element as possible + into memory. + + Attributes + ---------- + var + buf_size + start + stop + step + shape + flat + + See Also + -------- + ndenumerate : Multidimensional array iterator. + flatiter : Flat array iterator. + memmap : Create a memory-map to an array stored in a binary file on disk. + + Notes + ----- + The algorithm works by first finding a "running dimension", along which + the blocks will be extracted. Given an array of dimensions + ``(d1, d2, ..., dn)``, e.g. if `buf_size` is smaller than ``d1``, the + first dimension will be used. If, on the other hand, + ``d1 < buf_size < d1*d2`` the second dimension will be used, and so on. + Blocks are extracted along this dimension, and when the last block is + returned the process continues from the next dimension, until all + elements have been read. + + Examples + -------- + >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) + >>> a_itor = np.lib.Arrayterator(a, 2) + >>> a_itor.shape + (3, 4, 5, 6) + + Now we can iterate over ``a_itor``, and it will return arrays of size + two. Since `buf_size` was smaller than any dimension, the first + dimension will be iterated over first: + + >>> for subarr in a_itor: + ... if not subarr.all(): + ... print(subarr, subarr.shape) + ... + [[[[0 1]]]] (1, 1, 1, 2) + + """ + + def __init__(self, var, buf_size=None): + self.var = var + self.buf_size = buf_size + + self.start = [0 for dim in var.shape] + self.stop = [dim for dim in var.shape] + self.step = [1 for dim in var.shape] + + def __getattr__(self, attr): + return getattr(self.var, attr) + + def __getitem__(self, index): + """ + Return a new arrayterator. + + """ + # Fix index, handling ellipsis and incomplete slices. + if not isinstance(index, tuple): + index = (index,) + fixed = [] + length, dims = len(index), self.ndim + for slice_ in index: + if slice_ is Ellipsis: + fixed.extend([slice(None)] * (dims-length+1)) + length = len(fixed) + elif isinstance(slice_, (int, long)): + fixed.append(slice(slice_, slice_+1, 1)) + else: + fixed.append(slice_) + index = tuple(fixed) + if len(index) < dims: + index += (slice(None),) * (dims-len(index)) + + # Return a new arrayterator object. + out = self.__class__(self.var, self.buf_size) + for i, (start, stop, step, slice_) in enumerate( + zip(self.start, self.stop, self.step, index)): + out.start[i] = start + (slice_.start or 0) + out.step[i] = step * (slice_.step or 1) + out.stop[i] = start + (slice_.stop or stop-start) + out.stop[i] = min(stop, out.stop[i]) + return out + + def __array__(self): + """ + Return corresponding data. + + """ + slice_ = tuple(slice(*t) for t in zip( + self.start, self.stop, self.step)) + return self.var[slice_] + + @property + def flat(self): + """ + A 1-D flat iterator for Arrayterator objects. + + This iterator returns elements of the array to be iterated over in + `Arrayterator` one by one. It is similar to `flatiter`. + + See Also + -------- + Arrayterator + flatiter + + Examples + -------- + >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) + >>> a_itor = np.lib.Arrayterator(a, 2) + + >>> for subarr in a_itor.flat: + ... if not subarr: + ... print(subarr, type(subarr)) + ... + 0 + + """ + for block in self: + for value in block.flat: + yield value + + @property + def shape(self): + """ + The shape of the array to be iterated over. + + For an example, see `Arrayterator`. + + """ + return tuple(((stop-start-1)//step+1) for start, stop, step in + zip(self.start, self.stop, self.step)) + + def __iter__(self): + # Skip arrays with degenerate dimensions + if [dim for dim in self.shape if dim <= 0]: + return + + start = self.start[:] + stop = self.stop[:] + step = self.step[:] + ndims = self.var.ndim + + while True: + count = self.buf_size or reduce(mul, self.shape) + + # iterate over each dimension, looking for the + # running dimension (ie, the dimension along which + # the blocks will be built from) + rundim = 0 + for i in range(ndims-1, -1, -1): + # if count is zero we ran out of elements to read + # along higher dimensions, so we read only a single position + if count == 0: + stop[i] = start[i]+1 + elif count <= self.shape[i]: + # limit along this dimension + stop[i] = start[i] + count*step[i] + rundim = i + else: + # read everything along this dimension + stop[i] = self.stop[i] + stop[i] = min(self.stop[i], stop[i]) + count = count//self.shape[i] + + # yield a block + slice_ = tuple(slice(*t) for t in zip(start, stop, step)) + yield self.var[slice_] + + # Update start position, taking care of overflow to + # other dimensions + start[rundim] = stop[rundim] # start where we stopped + for i in range(ndims-1, 0, -1): + if start[i] >= self.stop[i]: + start[i] = self.start[i] + start[i-1] += self.step[i-1] + if start[0] >= self.stop[0]: + return diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/arrayterator.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/arrayterator.pyc new file mode 100644 index 0000000..6842dcd Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/arrayterator.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/financial.py b/project/venv/lib/python2.7/site-packages/numpy/lib/financial.py new file mode 100644 index 0000000..e1e2974 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/financial.py @@ -0,0 +1,830 @@ +"""Some simple financial calculations + +patterned after spreadsheet computations. + +There is some complexity in each function +so that the functions behave like ufuncs with +broadcasting and being able to be called with scalars +or arrays (or other sequences). + +Functions support the :class:`decimal.Decimal` type unless +otherwise stated. +""" +from __future__ import division, absolute_import, print_function + +from decimal import Decimal +import functools + +import numpy as np +from numpy.core import overrides + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +__all__ = ['fv', 'pmt', 'nper', 'ipmt', 'ppmt', 'pv', 'rate', + 'irr', 'npv', 'mirr'] + +_when_to_num = {'end':0, 'begin':1, + 'e':0, 'b':1, + 0:0, 1:1, + 'beginning':1, + 'start':1, + 'finish':0} + +def _convert_when(when): + #Test to see if when has already been converted to ndarray + #This will happen if one function calls another, for example ppmt + if isinstance(when, np.ndarray): + return when + try: + return _when_to_num[when] + except (KeyError, TypeError): + return [_when_to_num[x] for x in when] + + +def _fv_dispatcher(rate, nper, pmt, pv, when=None): + return (rate, nper, pmt, pv) + + +@array_function_dispatch(_fv_dispatcher) +def fv(rate, nper, pmt, pv, when='end'): + """ + Compute the future value. + + Given: + * a present value, `pv` + * an interest `rate` compounded once per period, of which + there are + * `nper` total + * a (fixed) payment, `pmt`, paid either + * at the beginning (`when` = {'begin', 1}) or the end + (`when` = {'end', 0}) of each period + + Return: + the value at the end of the `nper` periods + + Parameters + ---------- + rate : scalar or array_like of shape(M, ) + Rate of interest as decimal (not per cent) per period + nper : scalar or array_like of shape(M, ) + Number of compounding periods + pmt : scalar or array_like of shape(M, ) + Payment + pv : scalar or array_like of shape(M, ) + Present value + when : {{'begin', 1}, {'end', 0}}, {string, int}, optional + When payments are due ('begin' (1) or 'end' (0)). + Defaults to {'end', 0}. + + Returns + ------- + out : ndarray + Future values. If all input is scalar, returns a scalar float. If + any input is array_like, returns future values for each input element. + If multiple inputs are array_like, they all must have the same shape. + + Notes + ----- + The future value is computed by solving the equation:: + + fv + + pv*(1+rate)**nper + + pmt*(1 + rate*when)/rate*((1 + rate)**nper - 1) == 0 + + or, when ``rate == 0``:: + + fv + pv + pmt * nper == 0 + + References + ---------- + .. [WRW] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May). + Open Document Format for Office Applications (OpenDocument)v1.2, + Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version, + Pre-Draft 12. Organization for the Advancement of Structured Information + Standards (OASIS). Billerica, MA, USA. [ODT Document]. + Available: + http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula + OpenDocument-formula-20090508.odt + + Examples + -------- + What is the future value after 10 years of saving $100 now, with + an additional monthly savings of $100. Assume the interest rate is + 5% (annually) compounded monthly? + + >>> np.fv(0.05/12, 10*12, -100, -100) + 15692.928894335748 + + By convention, the negative sign represents cash flow out (i.e. money not + available today). Thus, saving $100 a month at 5% annual interest leads + to $15,692.93 available to spend in 10 years. + + If any input is array_like, returns an array of equal shape. Let's + compare different interest rates from the example above. + + >>> a = np.array((0.05, 0.06, 0.07))/12 + >>> np.fv(a, 10*12, -100, -100) + array([ 15692.92889434, 16569.87435405, 17509.44688102]) + + """ + when = _convert_when(when) + (rate, nper, pmt, pv, when) = map(np.asarray, [rate, nper, pmt, pv, when]) + temp = (1+rate)**nper + fact = np.where(rate == 0, nper, + (1 + rate*when)*(temp - 1)/rate) + return -(pv*temp + pmt*fact) + + +def _pmt_dispatcher(rate, nper, pv, fv=None, when=None): + return (rate, nper, pv, fv) + + +@array_function_dispatch(_pmt_dispatcher) +def pmt(rate, nper, pv, fv=0, when='end'): + """ + Compute the payment against loan principal plus interest. + + Given: + * a present value, `pv` (e.g., an amount borrowed) + * a future value, `fv` (e.g., 0) + * an interest `rate` compounded once per period, of which + there are + * `nper` total + * and (optional) specification of whether payment is made + at the beginning (`when` = {'begin', 1}) or the end + (`when` = {'end', 0}) of each period + + Return: + the (fixed) periodic payment. + + Parameters + ---------- + rate : array_like + Rate of interest (per period) + nper : array_like + Number of compounding periods + pv : array_like + Present value + fv : array_like, optional + Future value (default = 0) + when : {{'begin', 1}, {'end', 0}}, {string, int} + When payments are due ('begin' (1) or 'end' (0)) + + Returns + ------- + out : ndarray + Payment against loan plus interest. If all input is scalar, returns a + scalar float. If any input is array_like, returns payment for each + input element. If multiple inputs are array_like, they all must have + the same shape. + + Notes + ----- + The payment is computed by solving the equation:: + + fv + + pv*(1 + rate)**nper + + pmt*(1 + rate*when)/rate*((1 + rate)**nper - 1) == 0 + + or, when ``rate == 0``:: + + fv + pv + pmt * nper == 0 + + for ``pmt``. + + Note that computing a monthly mortgage payment is only + one use for this function. For example, pmt returns the + periodic deposit one must make to achieve a specified + future balance given an initial deposit, a fixed, + periodically compounded interest rate, and the total + number of periods. + + References + ---------- + .. [WRW] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May). + Open Document Format for Office Applications (OpenDocument)v1.2, + Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version, + Pre-Draft 12. Organization for the Advancement of Structured Information + Standards (OASIS). Billerica, MA, USA. [ODT Document]. + Available: + http://www.oasis-open.org/committees/documents.php + ?wg_abbrev=office-formulaOpenDocument-formula-20090508.odt + + Examples + -------- + What is the monthly payment needed to pay off a $200,000 loan in 15 + years at an annual interest rate of 7.5%? + + >>> np.pmt(0.075/12, 12*15, 200000) + -1854.0247200054619 + + In order to pay-off (i.e., have a future-value of 0) the $200,000 obtained + today, a monthly payment of $1,854.02 would be required. Note that this + example illustrates usage of `fv` having a default value of 0. + + """ + when = _convert_when(when) + (rate, nper, pv, fv, when) = map(np.array, [rate, nper, pv, fv, when]) + temp = (1 + rate)**nper + mask = (rate == 0) + masked_rate = np.where(mask, 1, rate) + fact = np.where(mask != 0, nper, + (1 + masked_rate*when)*(temp - 1)/masked_rate) + return -(fv + pv*temp) / fact + + +def _nper_dispatcher(rate, pmt, pv, fv=None, when=None): + return (rate, pmt, pv, fv) + + +@array_function_dispatch(_nper_dispatcher) +def nper(rate, pmt, pv, fv=0, when='end'): + """ + Compute the number of periodic payments. + + :class:`decimal.Decimal` type is not supported. + + Parameters + ---------- + rate : array_like + Rate of interest (per period) + pmt : array_like + Payment + pv : array_like + Present value + fv : array_like, optional + Future value + when : {{'begin', 1}, {'end', 0}}, {string, int}, optional + When payments are due ('begin' (1) or 'end' (0)) + + Notes + ----- + The number of periods ``nper`` is computed by solving the equation:: + + fv + pv*(1+rate)**nper + pmt*(1+rate*when)/rate*((1+rate)**nper-1) = 0 + + but if ``rate = 0`` then:: + + fv + pv + pmt*nper = 0 + + Examples + -------- + If you only had $150/month to pay towards the loan, how long would it take + to pay-off a loan of $8,000 at 7% annual interest? + + >>> print(round(np.nper(0.07/12, -150, 8000), 5)) + 64.07335 + + So, over 64 months would be required to pay off the loan. + + The same analysis could be done with several different interest rates + and/or payments and/or total amounts to produce an entire table. + + >>> np.nper(*(np.ogrid[0.07/12: 0.08/12: 0.01/12, + ... -150 : -99 : 50 , + ... 8000 : 9001 : 1000])) + array([[[ 64.07334877, 74.06368256], + [ 108.07548412, 127.99022654]], + [[ 66.12443902, 76.87897353], + [ 114.70165583, 137.90124779]]]) + + """ + when = _convert_when(when) + (rate, pmt, pv, fv, when) = map(np.asarray, [rate, pmt, pv, fv, when]) + + use_zero_rate = False + with np.errstate(divide="raise"): + try: + z = pmt*(1+rate*when)/rate + except FloatingPointError: + use_zero_rate = True + + if use_zero_rate: + return (-fv + pv) / pmt + else: + A = -(fv + pv)/(pmt+0) + B = np.log((-fv+z) / (pv+z))/np.log(1+rate) + return np.where(rate == 0, A, B) + + +def _ipmt_dispatcher(rate, per, nper, pv, fv=None, when=None): + return (rate, per, nper, pv, fv) + + +@array_function_dispatch(_ipmt_dispatcher) +def ipmt(rate, per, nper, pv, fv=0, when='end'): + """ + Compute the interest portion of a payment. + + Parameters + ---------- + rate : scalar or array_like of shape(M, ) + Rate of interest as decimal (not per cent) per period + per : scalar or array_like of shape(M, ) + Interest paid against the loan changes during the life or the loan. + The `per` is the payment period to calculate the interest amount. + nper : scalar or array_like of shape(M, ) + Number of compounding periods + pv : scalar or array_like of shape(M, ) + Present value + fv : scalar or array_like of shape(M, ), optional + Future value + when : {{'begin', 1}, {'end', 0}}, {string, int}, optional + When payments are due ('begin' (1) or 'end' (0)). + Defaults to {'end', 0}. + + Returns + ------- + out : ndarray + Interest portion of payment. If all input is scalar, returns a scalar + float. If any input is array_like, returns interest payment for each + input element. If multiple inputs are array_like, they all must have + the same shape. + + See Also + -------- + ppmt, pmt, pv + + Notes + ----- + The total payment is made up of payment against principal plus interest. + + ``pmt = ppmt + ipmt`` + + Examples + -------- + What is the amortization schedule for a 1 year loan of $2500 at + 8.24% interest per year compounded monthly? + + >>> principal = 2500.00 + + The 'per' variable represents the periods of the loan. Remember that + financial equations start the period count at 1! + + >>> per = np.arange(1*12) + 1 + >>> ipmt = np.ipmt(0.0824/12, per, 1*12, principal) + >>> ppmt = np.ppmt(0.0824/12, per, 1*12, principal) + + Each element of the sum of the 'ipmt' and 'ppmt' arrays should equal + 'pmt'. + + >>> pmt = np.pmt(0.0824/12, 1*12, principal) + >>> np.allclose(ipmt + ppmt, pmt) + True + + >>> fmt = '{0:2d} {1:8.2f} {2:8.2f} {3:8.2f}' + >>> for payment in per: + ... index = payment - 1 + ... principal = principal + ppmt[index] + ... print(fmt.format(payment, ppmt[index], ipmt[index], principal)) + 1 -200.58 -17.17 2299.42 + 2 -201.96 -15.79 2097.46 + 3 -203.35 -14.40 1894.11 + 4 -204.74 -13.01 1689.37 + 5 -206.15 -11.60 1483.22 + 6 -207.56 -10.18 1275.66 + 7 -208.99 -8.76 1066.67 + 8 -210.42 -7.32 856.25 + 9 -211.87 -5.88 644.38 + 10 -213.32 -4.42 431.05 + 11 -214.79 -2.96 216.26 + 12 -216.26 -1.49 -0.00 + + >>> interestpd = np.sum(ipmt) + >>> np.round(interestpd, 2) + -112.98 + + """ + when = _convert_when(when) + rate, per, nper, pv, fv, when = np.broadcast_arrays(rate, per, nper, + pv, fv, when) + total_pmt = pmt(rate, nper, pv, fv, when) + ipmt = _rbl(rate, per, total_pmt, pv, when)*rate + try: + ipmt = np.where(when == 1, ipmt/(1 + rate), ipmt) + ipmt = np.where(np.logical_and(when == 1, per == 1), 0, ipmt) + except IndexError: + pass + return ipmt + + +def _rbl(rate, per, pmt, pv, when): + """ + This function is here to simply have a different name for the 'fv' + function to not interfere with the 'fv' keyword argument within the 'ipmt' + function. It is the 'remaining balance on loan' which might be useful as + it's own function, but is easily calculated with the 'fv' function. + """ + return fv(rate, (per - 1), pmt, pv, when) + + +def _ppmt_dispatcher(rate, per, nper, pv, fv=None, when=None): + return (rate, per, nper, pv, fv) + + +@array_function_dispatch(_ppmt_dispatcher) +def ppmt(rate, per, nper, pv, fv=0, when='end'): + """ + Compute the payment against loan principal. + + Parameters + ---------- + rate : array_like + Rate of interest (per period) + per : array_like, int + Amount paid against the loan changes. The `per` is the period of + interest. + nper : array_like + Number of compounding periods + pv : array_like + Present value + fv : array_like, optional + Future value + when : {{'begin', 1}, {'end', 0}}, {string, int} + When payments are due ('begin' (1) or 'end' (0)) + + See Also + -------- + pmt, pv, ipmt + + """ + total = pmt(rate, nper, pv, fv, when) + return total - ipmt(rate, per, nper, pv, fv, when) + + +def _pv_dispatcher(rate, nper, pmt, fv=None, when=None): + return (rate, nper, nper, pv, fv) + + +@array_function_dispatch(_pv_dispatcher) +def pv(rate, nper, pmt, fv=0, when='end'): + """ + Compute the present value. + + Given: + * a future value, `fv` + * an interest `rate` compounded once per period, of which + there are + * `nper` total + * a (fixed) payment, `pmt`, paid either + * at the beginning (`when` = {'begin', 1}) or the end + (`when` = {'end', 0}) of each period + + Return: + the value now + + Parameters + ---------- + rate : array_like + Rate of interest (per period) + nper : array_like + Number of compounding periods + pmt : array_like + Payment + fv : array_like, optional + Future value + when : {{'begin', 1}, {'end', 0}}, {string, int}, optional + When payments are due ('begin' (1) or 'end' (0)) + + Returns + ------- + out : ndarray, float + Present value of a series of payments or investments. + + Notes + ----- + The present value is computed by solving the equation:: + + fv + + pv*(1 + rate)**nper + + pmt*(1 + rate*when)/rate*((1 + rate)**nper - 1) = 0 + + or, when ``rate = 0``:: + + fv + pv + pmt * nper = 0 + + for `pv`, which is then returned. + + References + ---------- + .. [WRW] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May). + Open Document Format for Office Applications (OpenDocument)v1.2, + Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version, + Pre-Draft 12. Organization for the Advancement of Structured Information + Standards (OASIS). Billerica, MA, USA. [ODT Document]. + Available: + http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula + OpenDocument-formula-20090508.odt + + Examples + -------- + What is the present value (e.g., the initial investment) + of an investment that needs to total $15692.93 + after 10 years of saving $100 every month? Assume the + interest rate is 5% (annually) compounded monthly. + + >>> np.pv(0.05/12, 10*12, -100, 15692.93) + -100.00067131625819 + + By convention, the negative sign represents cash flow out + (i.e., money not available today). Thus, to end up with + $15,692.93 in 10 years saving $100 a month at 5% annual + interest, one's initial deposit should also be $100. + + If any input is array_like, ``pv`` returns an array of equal shape. + Let's compare different interest rates in the example above: + + >>> a = np.array((0.05, 0.04, 0.03))/12 + >>> np.pv(a, 10*12, -100, 15692.93) + array([ -100.00067132, -649.26771385, -1273.78633713]) + + So, to end up with the same $15692.93 under the same $100 per month + "savings plan," for annual interest rates of 4% and 3%, one would + need initial investments of $649.27 and $1273.79, respectively. + + """ + when = _convert_when(when) + (rate, nper, pmt, fv, when) = map(np.asarray, [rate, nper, pmt, fv, when]) + temp = (1+rate)**nper + fact = np.where(rate == 0, nper, (1+rate*when)*(temp-1)/rate) + return -(fv + pmt*fact)/temp + +# Computed with Sage +# (y + (r + 1)^n*x + p*((r + 1)^n - 1)*(r*w + 1)/r)/(n*(r + 1)^(n - 1)*x - +# p*((r + 1)^n - 1)*(r*w + 1)/r^2 + n*p*(r + 1)^(n - 1)*(r*w + 1)/r + +# p*((r + 1)^n - 1)*w/r) + +def _g_div_gp(r, n, p, x, y, w): + t1 = (r+1)**n + t2 = (r+1)**(n-1) + return ((y + t1*x + p*(t1 - 1)*(r*w + 1)/r) / + (n*t2*x - p*(t1 - 1)*(r*w + 1)/(r**2) + n*p*t2*(r*w + 1)/r + + p*(t1 - 1)*w/r)) + + +def _rate_dispatcher(nper, pmt, pv, fv, when=None, guess=None, tol=None, + maxiter=None): + return (nper, pmt, pv, fv) + + +# Use Newton's iteration until the change is less than 1e-6 +# for all values or a maximum of 100 iterations is reached. +# Newton's rule is +# r_{n+1} = r_{n} - g(r_n)/g'(r_n) +# where +# g(r) is the formula +# g'(r) is the derivative with respect to r. +@array_function_dispatch(_rate_dispatcher) +def rate(nper, pmt, pv, fv, when='end', guess=None, tol=None, maxiter=100): + """ + Compute the rate of interest per period. + + Parameters + ---------- + nper : array_like + Number of compounding periods + pmt : array_like + Payment + pv : array_like + Present value + fv : array_like + Future value + when : {{'begin', 1}, {'end', 0}}, {string, int}, optional + When payments are due ('begin' (1) or 'end' (0)) + guess : Number, optional + Starting guess for solving the rate of interest, default 0.1 + tol : Number, optional + Required tolerance for the solution, default 1e-6 + maxiter : int, optional + Maximum iterations in finding the solution + + Notes + ----- + The rate of interest is computed by iteratively solving the + (non-linear) equation:: + + fv + pv*(1+rate)**nper + pmt*(1+rate*when)/rate * ((1+rate)**nper - 1) = 0 + + for ``rate``. + + References + ---------- + Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May). Open Document + Format for Office Applications (OpenDocument)v1.2, Part 2: Recalculated + Formula (OpenFormula) Format - Annotated Version, Pre-Draft 12. + Organization for the Advancement of Structured Information Standards + (OASIS). Billerica, MA, USA. [ODT Document]. Available: + http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula + OpenDocument-formula-20090508.odt + + """ + when = _convert_when(when) + default_type = Decimal if isinstance(pmt, Decimal) else float + + # Handle casting defaults to Decimal if/when pmt is a Decimal and + # guess and/or tol are not given default values + if guess is None: + guess = default_type('0.1') + + if tol is None: + tol = default_type('1e-6') + + (nper, pmt, pv, fv, when) = map(np.asarray, [nper, pmt, pv, fv, when]) + + rn = guess + iterator = 0 + close = False + while (iterator < maxiter) and not close: + rnp1 = rn - _g_div_gp(rn, nper, pmt, pv, fv, when) + diff = abs(rnp1-rn) + close = np.all(diff < tol) + iterator += 1 + rn = rnp1 + if not close: + # Return nan's in array of the same shape as rn + return np.nan + rn + else: + return rn + + +def _irr_dispatcher(values): + return (values,) + + +@array_function_dispatch(_irr_dispatcher) +def irr(values): + """ + Return the Internal Rate of Return (IRR). + + This is the "average" periodically compounded rate of return + that gives a net present value of 0.0; for a more complete explanation, + see Notes below. + + :class:`decimal.Decimal` type is not supported. + + Parameters + ---------- + values : array_like, shape(N,) + Input cash flows per time period. By convention, net "deposits" + are negative and net "withdrawals" are positive. Thus, for + example, at least the first element of `values`, which represents + the initial investment, will typically be negative. + + Returns + ------- + out : float + Internal Rate of Return for periodic input values. + + Notes + ----- + The IRR is perhaps best understood through an example (illustrated + using np.irr in the Examples section below). Suppose one invests 100 + units and then makes the following withdrawals at regular (fixed) + intervals: 39, 59, 55, 20. Assuming the ending value is 0, one's 100 + unit investment yields 173 units; however, due to the combination of + compounding and the periodic withdrawals, the "average" rate of return + is neither simply 0.73/4 nor (1.73)^0.25-1. Rather, it is the solution + (for :math:`r`) of the equation: + + .. math:: -100 + \\frac{39}{1+r} + \\frac{59}{(1+r)^2} + + \\frac{55}{(1+r)^3} + \\frac{20}{(1+r)^4} = 0 + + In general, for `values` :math:`= [v_0, v_1, ... v_M]`, + irr is the solution of the equation: [G]_ + + .. math:: \\sum_{t=0}^M{\\frac{v_t}{(1+irr)^{t}}} = 0 + + References + ---------- + .. [G] L. J. Gitman, "Principles of Managerial Finance, Brief," 3rd ed., + Addison-Wesley, 2003, pg. 348. + + Examples + -------- + >>> round(irr([-100, 39, 59, 55, 20]), 5) + 0.28095 + >>> round(irr([-100, 0, 0, 74]), 5) + -0.0955 + >>> round(irr([-100, 100, 0, -7]), 5) + -0.0833 + >>> round(irr([-100, 100, 0, 7]), 5) + 0.06206 + >>> round(irr([-5, 10.5, 1, -8, 1]), 5) + 0.0886 + + (Compare with the Example given for numpy.lib.financial.npv) + + """ + # `np.roots` call is why this function does not support Decimal type. + # + # Ultimately Decimal support needs to be added to np.roots, which has + # greater implications on the entire linear algebra module and how it does + # eigenvalue computations. + res = np.roots(values[::-1]) + mask = (res.imag == 0) & (res.real > 0) + if not mask.any(): + return np.nan + res = res[mask].real + # NPV(rate) = 0 can have more than one solution so we return + # only the solution closest to zero. + rate = 1/res - 1 + rate = rate.item(np.argmin(np.abs(rate))) + return rate + + +def _npv_dispatcher(rate, values): + return (values,) + + +@array_function_dispatch(_npv_dispatcher) +def npv(rate, values): + """ + Returns the NPV (Net Present Value) of a cash flow series. + + Parameters + ---------- + rate : scalar + The discount rate. + values : array_like, shape(M, ) + The values of the time series of cash flows. The (fixed) time + interval between cash flow "events" must be the same as that for + which `rate` is given (i.e., if `rate` is per year, then precisely + a year is understood to elapse between each cash flow event). By + convention, investments or "deposits" are negative, income or + "withdrawals" are positive; `values` must begin with the initial + investment, thus `values[0]` will typically be negative. + + Returns + ------- + out : float + The NPV of the input cash flow series `values` at the discount + `rate`. + + Notes + ----- + Returns the result of: [G]_ + + .. math :: \\sum_{t=0}^{M-1}{\\frac{values_t}{(1+rate)^{t}}} + + References + ---------- + .. [G] L. J. Gitman, "Principles of Managerial Finance, Brief," 3rd ed., + Addison-Wesley, 2003, pg. 346. + + Examples + -------- + >>> np.npv(0.281,[-100, 39, 59, 55, 20]) + -0.0084785916384548798 + + (Compare with the Example given for numpy.lib.financial.irr) + + """ + values = np.asarray(values) + return (values / (1+rate)**np.arange(0, len(values))).sum(axis=0) + + +def _mirr_dispatcher(values, finance_rate, reinvest_rate): + return (values,) + + +@array_function_dispatch(_mirr_dispatcher) +def mirr(values, finance_rate, reinvest_rate): + """ + Modified internal rate of return. + + Parameters + ---------- + values : array_like + Cash flows (must contain at least one positive and one negative + value) or nan is returned. The first value is considered a sunk + cost at time zero. + finance_rate : scalar + Interest rate paid on the cash flows + reinvest_rate : scalar + Interest rate received on the cash flows upon reinvestment + + Returns + ------- + out : float + Modified internal rate of return + + """ + values = np.asarray(values) + n = values.size + + # Without this explicit cast the 1/(n - 1) computation below + # becomes a float, which causes TypeError when using Decimal + # values. + if isinstance(finance_rate, Decimal): + n = Decimal(n) + + pos = values > 0 + neg = values < 0 + if not (pos.any() and neg.any()): + return np.nan + numer = np.abs(npv(reinvest_rate, values*pos)) + denom = np.abs(npv(finance_rate, values*neg)) + return (numer/denom)**(1/(n - 1))*(1 + reinvest_rate) - 1 diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/financial.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/financial.pyc new file mode 100644 index 0000000..030fbf3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/financial.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/format.py b/project/venv/lib/python2.7/site-packages/numpy/lib/format.py new file mode 100644 index 0000000..10945e5 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/format.py @@ -0,0 +1,875 @@ +""" +Binary serialization + +NPY format +========== + +A simple format for saving numpy arrays to disk with the full +information about them. + +The ``.npy`` format is the standard binary file format in NumPy for +persisting a *single* arbitrary NumPy array on disk. The format stores all +of the shape and dtype information necessary to reconstruct the array +correctly even on another machine with a different architecture. +The format is designed to be as simple as possible while achieving +its limited goals. + +The ``.npz`` format is the standard format for persisting *multiple* NumPy +arrays on disk. A ``.npz`` file is a zip file containing multiple ``.npy`` +files, one for each array. + +Capabilities +------------ + +- Can represent all NumPy arrays including nested record arrays and + object arrays. + +- Represents the data in its native binary form. + +- Supports Fortran-contiguous arrays directly. + +- Stores all of the necessary information to reconstruct the array + including shape and dtype on a machine of a different + architecture. Both little-endian and big-endian arrays are + supported, and a file with little-endian numbers will yield + a little-endian array on any machine reading the file. The + types are described in terms of their actual sizes. For example, + if a machine with a 64-bit C "long int" writes out an array with + "long ints", a reading machine with 32-bit C "long ints" will yield + an array with 64-bit integers. + +- Is straightforward to reverse engineer. Datasets often live longer than + the programs that created them. A competent developer should be + able to create a solution in their preferred programming language to + read most ``.npy`` files that he has been given without much + documentation. + +- Allows memory-mapping of the data. See `open_memmep`. + +- Can be read from a filelike stream object instead of an actual file. + +- Stores object arrays, i.e. arrays containing elements that are arbitrary + Python objects. Files with object arrays are not to be mmapable, but + can be read and written to disk. + +Limitations +----------- + +- Arbitrary subclasses of numpy.ndarray are not completely preserved. + Subclasses will be accepted for writing, but only the array data will + be written out. A regular numpy.ndarray object will be created + upon reading the file. + +.. warning:: + + Due to limitations in the interpretation of structured dtypes, dtypes + with fields with empty names will have the names replaced by 'f0', 'f1', + etc. Such arrays will not round-trip through the format entirely + accurately. The data is intact; only the field names will differ. We are + working on a fix for this. This fix will not require a change in the + file format. The arrays with such structures can still be saved and + restored, and the correct dtype may be restored by using the + ``loadedarray.view(correct_dtype)`` method. + +File extensions +--------------- + +We recommend using the ``.npy`` and ``.npz`` extensions for files saved +in this format. This is by no means a requirement; applications may wish +to use these file formats but use an extension specific to the +application. In the absence of an obvious alternative, however, +we suggest using ``.npy`` and ``.npz``. + +Version numbering +----------------- + +The version numbering of these formats is independent of NumPy version +numbering. If the format is upgraded, the code in `numpy.io` will still +be able to read and write Version 1.0 files. + +Format Version 1.0 +------------------ + +The first 6 bytes are a magic string: exactly ``\\x93NUMPY``. + +The next 1 byte is an unsigned byte: the major version number of the file +format, e.g. ``\\x01``. + +The next 1 byte is an unsigned byte: the minor version number of the file +format, e.g. ``\\x00``. Note: the version of the file format is not tied +to the version of the numpy package. + +The next 2 bytes form a little-endian unsigned short int: the length of +the header data HEADER_LEN. + +The next HEADER_LEN bytes form the header data describing the array's +format. It is an ASCII string which contains a Python literal expression +of a dictionary. It is terminated by a newline (``\\n``) and padded with +spaces (``\\x20``) to make the total of +``len(magic string) + 2 + len(length) + HEADER_LEN`` be evenly divisible +by 64 for alignment purposes. + +The dictionary contains three keys: + + "descr" : dtype.descr + An object that can be passed as an argument to the `numpy.dtype` + constructor to create the array's dtype. + "fortran_order" : bool + Whether the array data is Fortran-contiguous or not. Since + Fortran-contiguous arrays are a common form of non-C-contiguity, + we allow them to be written directly to disk for efficiency. + "shape" : tuple of int + The shape of the array. + +For repeatability and readability, the dictionary keys are sorted in +alphabetic order. This is for convenience only. A writer SHOULD implement +this if possible. A reader MUST NOT depend on this. + +Following the header comes the array data. If the dtype contains Python +objects (i.e. ``dtype.hasobject is True``), then the data is a Python +pickle of the array. Otherwise the data is the contiguous (either C- +or Fortran-, depending on ``fortran_order``) bytes of the array. +Consumers can figure out the number of bytes by multiplying the number +of elements given by the shape (noting that ``shape=()`` means there is +1 element) by ``dtype.itemsize``. + +Format Version 2.0 +------------------ + +The version 1.0 format only allowed the array header to have a total size of +65535 bytes. This can be exceeded by structured arrays with a large number of +columns. The version 2.0 format extends the header size to 4 GiB. +`numpy.save` will automatically save in 2.0 format if the data requires it, +else it will always use the more compatible 1.0 format. + +The description of the fourth element of the header therefore has become: +"The next 4 bytes form a little-endian unsigned int: the length of the header +data HEADER_LEN." + +Notes +----- +The ``.npy`` format, including motivation for creating it and a comparison of +alternatives, is described in the `"npy-format" NEP +`_, however details have +evolved with time and this document is more current. + +""" +from __future__ import division, absolute_import, print_function + +import numpy +import sys +import io +import warnings +from numpy.lib.utils import safe_eval +from numpy.compat import ( + asbytes, asstr, isfileobj, long, os_fspath + ) +from numpy.core.numeric import pickle + + +MAGIC_PREFIX = b'\x93NUMPY' +MAGIC_LEN = len(MAGIC_PREFIX) + 2 +ARRAY_ALIGN = 64 # plausible values are powers of 2 between 16 and 4096 +BUFFER_SIZE = 2**18 # size of buffer for reading npz files in bytes + +# difference between version 1.0 and 2.0 is a 4 byte (I) header length +# instead of 2 bytes (H) allowing storage of large structured arrays + +def _check_version(version): + if version not in [(1, 0), (2, 0), None]: + msg = "we only support format version (1,0) and (2, 0), not %s" + raise ValueError(msg % (version,)) + +def magic(major, minor): + """ Return the magic string for the given file format version. + + Parameters + ---------- + major : int in [0, 255] + minor : int in [0, 255] + + Returns + ------- + magic : str + + Raises + ------ + ValueError if the version cannot be formatted. + """ + if major < 0 or major > 255: + raise ValueError("major version must be 0 <= major < 256") + if minor < 0 or minor > 255: + raise ValueError("minor version must be 0 <= minor < 256") + if sys.version_info[0] < 3: + return MAGIC_PREFIX + chr(major) + chr(minor) + else: + return MAGIC_PREFIX + bytes([major, minor]) + +def read_magic(fp): + """ Read the magic string to get the version of the file format. + + Parameters + ---------- + fp : filelike object + + Returns + ------- + major : int + minor : int + """ + magic_str = _read_bytes(fp, MAGIC_LEN, "magic string") + if magic_str[:-2] != MAGIC_PREFIX: + msg = "the magic string is not correct; expected %r, got %r" + raise ValueError(msg % (MAGIC_PREFIX, magic_str[:-2])) + if sys.version_info[0] < 3: + major, minor = map(ord, magic_str[-2:]) + else: + major, minor = magic_str[-2:] + return major, minor + +def dtype_to_descr(dtype): + """ + Get a serializable descriptor from the dtype. + + The .descr attribute of a dtype object cannot be round-tripped through + the dtype() constructor. Simple types, like dtype('float32'), have + a descr which looks like a record array with one field with '' as + a name. The dtype() constructor interprets this as a request to give + a default name. Instead, we construct descriptor that can be passed to + dtype(). + + Parameters + ---------- + dtype : dtype + The dtype of the array that will be written to disk. + + Returns + ------- + descr : object + An object that can be passed to `numpy.dtype()` in order to + replicate the input dtype. + + """ + if dtype.names is not None: + # This is a record array. The .descr is fine. XXX: parts of the + # record array with an empty name, like padding bytes, still get + # fiddled with. This needs to be fixed in the C implementation of + # dtype(). + return dtype.descr + else: + return dtype.str + +def descr_to_dtype(descr): + ''' + descr may be stored as dtype.descr, which is a list of + (name, format, [shape]) tuples. Offsets are not explicitly saved, rather + empty fields with name,format == '', '|Vn' are added as padding. + + This function reverses the process, eliminating the empty padding fields. + ''' + if isinstance(descr, (str, dict)): + # No padding removal needed + return numpy.dtype(descr) + + fields = [] + offset = 0 + for field in descr: + if len(field) == 2: + name, descr_str = field + dt = descr_to_dtype(descr_str) + else: + name, descr_str, shape = field + dt = numpy.dtype((descr_to_dtype(descr_str), shape)) + + # Ignore padding bytes, which will be void bytes with '' as name + # Once support for blank names is removed, only "if name == ''" needed) + is_pad = (name == '' and dt.type is numpy.void and dt.names is None) + if not is_pad: + fields.append((name, dt, offset)) + + offset += dt.itemsize + + names, formats, offsets = zip(*fields) + # names may be (title, names) tuples + nametups = (n if isinstance(n, tuple) else (None, n) for n in names) + titles, names = zip(*nametups) + return numpy.dtype({'names': names, 'formats': formats, 'titles': titles, + 'offsets': offsets, 'itemsize': offset}) + +def header_data_from_array_1_0(array): + """ Get the dictionary of header metadata from a numpy.ndarray. + + Parameters + ---------- + array : numpy.ndarray + + Returns + ------- + d : dict + This has the appropriate entries for writing its string representation + to the header of the file. + """ + d = {'shape': array.shape} + if array.flags.c_contiguous: + d['fortran_order'] = False + elif array.flags.f_contiguous: + d['fortran_order'] = True + else: + # Totally non-contiguous data. We will have to make it C-contiguous + # before writing. Note that we need to test for C_CONTIGUOUS first + # because a 1-D array is both C_CONTIGUOUS and F_CONTIGUOUS. + d['fortran_order'] = False + + d['descr'] = dtype_to_descr(array.dtype) + return d + +def _write_array_header(fp, d, version=None): + """ Write the header for an array and returns the version used + + Parameters + ---------- + fp : filelike object + d : dict + This has the appropriate entries for writing its string representation + to the header of the file. + version: tuple or None + None means use oldest that works + explicit version will raise a ValueError if the format does not + allow saving this data. Default: None + Returns + ------- + version : tuple of int + the file version which needs to be used to store the data + """ + import struct + header = ["{"] + for key, value in sorted(d.items()): + # Need to use repr here, since we eval these when reading + header.append("'%s': %s, " % (key, repr(value))) + header.append("}") + header = "".join(header) + header = asbytes(_filter_header(header)) + + hlen = len(header) + 1 # 1 for newline + padlen_v1 = ARRAY_ALIGN - ((MAGIC_LEN + struct.calcsize('= 3: + from io import StringIO + else: + from StringIO import StringIO + + tokens = [] + last_token_was_number = False + # adding newline as python 2.7.5 workaround + string = asstr(s) + "\n" + for token in tokenize.generate_tokens(StringIO(string).readline): + token_type = token[0] + token_string = token[1] + if (last_token_was_number and + token_type == tokenize.NAME and + token_string == "L"): + continue + else: + tokens.append(token) + last_token_was_number = (token_type == tokenize.NUMBER) + # removing newline (see above) as python 2.7.5 workaround + return tokenize.untokenize(tokens)[:-1] + + +def _read_array_header(fp, version): + """ + see read_array_header_1_0 + """ + # Read an unsigned, little-endian short int which has the length of the + # header. + import struct + if version == (1, 0): + hlength_type = '= 1.9", UserWarning, stacklevel=2) + + if array.itemsize == 0: + buffersize = 0 + else: + # Set buffer size to 16 MiB to hide the Python loop overhead. + buffersize = max(16 * 1024 ** 2 // array.itemsize, 1) + + if array.dtype.hasobject: + # We contain Python objects so we cannot write out the data + # directly. Instead, we will pickle it out with version 2 of the + # pickle protocol. + if not allow_pickle: + raise ValueError("Object arrays cannot be saved when " + "allow_pickle=False") + if pickle_kwargs is None: + pickle_kwargs = {} + pickle.dump(array, fp, protocol=2, **pickle_kwargs) + elif array.flags.f_contiguous and not array.flags.c_contiguous: + if isfileobj(fp): + array.T.tofile(fp) + else: + for chunk in numpy.nditer( + array, flags=['external_loop', 'buffered', 'zerosize_ok'], + buffersize=buffersize, order='F'): + fp.write(chunk.tobytes('C')) + else: + if isfileobj(fp): + array.tofile(fp) + else: + for chunk in numpy.nditer( + array, flags=['external_loop', 'buffered', 'zerosize_ok'], + buffersize=buffersize, order='C'): + fp.write(chunk.tobytes('C')) + + +def read_array(fp, allow_pickle=True, pickle_kwargs=None): + """ + Read an array from an NPY file. + + Parameters + ---------- + fp : file_like object + If this is not a real file object, then this may take extra memory + and time. + allow_pickle : bool, optional + Whether to allow reading pickled data. Default: True + pickle_kwargs : dict + Additional keyword arguments to pass to pickle.load. These are only + useful when loading object arrays saved on Python 2 when using + Python 3. + + Returns + ------- + array : ndarray + The array from the data on disk. + + Raises + ------ + ValueError + If the data is invalid, or allow_pickle=False and the file contains + an object array. + + """ + version = read_magic(fp) + _check_version(version) + shape, fortran_order, dtype = _read_array_header(fp, version) + if len(shape) == 0: + count = 1 + else: + count = numpy.multiply.reduce(shape, dtype=numpy.int64) + + # Now read the actual data. + if dtype.hasobject: + # The array contained Python objects. We need to unpickle the data. + if not allow_pickle: + raise ValueError("Object arrays cannot be loaded when " + "allow_pickle=False") + if pickle_kwargs is None: + pickle_kwargs = {} + try: + array = pickle.load(fp, **pickle_kwargs) + except UnicodeError as err: + if sys.version_info[0] >= 3: + # Friendlier error message + raise UnicodeError("Unpickling a python object failed: %r\n" + "You may need to pass the encoding= option " + "to numpy.load" % (err,)) + raise + else: + if isfileobj(fp): + # We can use the fast fromfile() function. + array = numpy.fromfile(fp, dtype=dtype, count=count) + else: + # This is not a real file. We have to read it the + # memory-intensive way. + # crc32 module fails on reads greater than 2 ** 32 bytes, + # breaking large reads from gzip streams. Chunk reads to + # BUFFER_SIZE bytes to avoid issue and reduce memory overhead + # of the read. In non-chunked case count < max_read_count, so + # only one read is performed. + + # Use np.ndarray instead of np.empty since the latter does + # not correctly instantiate zero-width string dtypes; see + # https://github.com/numpy/numpy/pull/6430 + array = numpy.ndarray(count, dtype=dtype) + + if dtype.itemsize > 0: + # If dtype.itemsize == 0 then there's nothing more to read + max_read_count = BUFFER_SIZE // min(BUFFER_SIZE, dtype.itemsize) + + for i in range(0, count, max_read_count): + read_count = min(max_read_count, count - i) + read_size = int(read_count * dtype.itemsize) + data = _read_bytes(fp, read_size, "array data") + array[i:i+read_count] = numpy.frombuffer(data, dtype=dtype, + count=read_count) + + if fortran_order: + array.shape = shape[::-1] + array = array.transpose() + else: + array.shape = shape + + return array + + +def open_memmap(filename, mode='r+', dtype=None, shape=None, + fortran_order=False, version=None): + """ + Open a .npy file as a memory-mapped array. + + This may be used to read an existing file or create a new one. + + Parameters + ---------- + filename : str or path-like + The name of the file on disk. This may *not* be a file-like + object. + mode : str, optional + The mode in which to open the file; the default is 'r+'. In + addition to the standard file modes, 'c' is also accepted to mean + "copy on write." See `memmap` for the available mode strings. + dtype : data-type, optional + The data type of the array if we are creating a new file in "write" + mode, if not, `dtype` is ignored. The default value is None, which + results in a data-type of `float64`. + shape : tuple of int + The shape of the array if we are creating a new file in "write" + mode, in which case this parameter is required. Otherwise, this + parameter is ignored and is thus optional. + fortran_order : bool, optional + Whether the array should be Fortran-contiguous (True) or + C-contiguous (False, the default) if we are creating a new file in + "write" mode. + version : tuple of int (major, minor) or None + If the mode is a "write" mode, then this is the version of the file + format used to create the file. None means use the oldest + supported version that is able to store the data. Default: None + + Returns + ------- + marray : memmap + The memory-mapped array. + + Raises + ------ + ValueError + If the data or the mode is invalid. + IOError + If the file is not found or cannot be opened correctly. + + See Also + -------- + memmap + + """ + if isfileobj(filename): + raise ValueError("Filename must be a string or a path-like object." + " Memmap cannot use existing file handles.") + + if 'w' in mode: + # We are creating the file, not reading it. + # Check if we ought to create the file. + _check_version(version) + # Ensure that the given dtype is an authentic dtype object rather + # than just something that can be interpreted as a dtype object. + dtype = numpy.dtype(dtype) + if dtype.hasobject: + msg = "Array can't be memory-mapped: Python objects in dtype." + raise ValueError(msg) + d = dict( + descr=dtype_to_descr(dtype), + fortran_order=fortran_order, + shape=shape, + ) + # If we got here, then it should be safe to create the file. + fp = open(os_fspath(filename), mode+'b') + try: + used_ver = _write_array_header(fp, d, version) + # this warning can be removed when 1.9 has aged enough + if version != (2, 0) and used_ver == (2, 0): + warnings.warn("Stored array in format 2.0. It can only be" + "read by NumPy >= 1.9", UserWarning, stacklevel=2) + offset = fp.tell() + finally: + fp.close() + else: + # Read the header of the file first. + fp = open(os_fspath(filename), 'rb') + try: + version = read_magic(fp) + _check_version(version) + + shape, fortran_order, dtype = _read_array_header(fp, version) + if dtype.hasobject: + msg = "Array can't be memory-mapped: Python objects in dtype." + raise ValueError(msg) + offset = fp.tell() + finally: + fp.close() + + if fortran_order: + order = 'F' + else: + order = 'C' + + # We need to change a write-only mode to a read-write mode since we've + # already written data to the file. + if mode == 'w+': + mode = 'r+' + + marray = numpy.memmap(filename, dtype=dtype, shape=shape, order=order, + mode=mode, offset=offset) + + return marray + + +def _read_bytes(fp, size, error_template="ran out of data"): + """ + Read from file-like object until size bytes are read. + Raises ValueError if not EOF is encountered before size bytes are read. + Non-blocking objects only supported if they derive from io objects. + + Required as e.g. ZipExtFile in python 2.6 can return less data than + requested. + """ + data = bytes() + while True: + # io files (default in python3) return None or raise on + # would-block, python2 file will truncate, probably nothing can be + # done about that. note that regular files can't be non-blocking + try: + r = fp.read(size - len(data)) + data += r + if len(r) == 0 or len(data) == size: + break + except io.BlockingIOError: + pass + if len(data) != size: + msg = "EOF: reading %s, expected %d bytes got %d" + raise ValueError(msg % (error_template, size, len(data))) + else: + return data diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/format.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/format.pyc new file mode 100644 index 0000000..e440fec Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/format.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/function_base.py b/project/venv/lib/python2.7/site-packages/numpy/lib/function_base.py new file mode 100644 index 0000000..d9ce3f8 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/function_base.py @@ -0,0 +1,4809 @@ +from __future__ import division, absolute_import, print_function + +try: + # Accessing collections abstact classes from collections + # has been deprecated since Python 3.3 + import collections.abc as collections_abc +except ImportError: + import collections as collections_abc +import functools +import re +import sys +import warnings + +import numpy as np +import numpy.core.numeric as _nx +from numpy.core import atleast_1d, transpose +from numpy.core.numeric import ( + ones, zeros, arange, concatenate, array, asarray, asanyarray, empty, + empty_like, ndarray, around, floor, ceil, take, dot, where, intp, + integer, isscalar, absolute + ) +from numpy.core.umath import ( + pi, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin, + mod, exp, not_equal, subtract + ) +from numpy.core.fromnumeric import ( + ravel, nonzero, partition, mean, any, sum + ) +from numpy.core.numerictypes import typecodes +from numpy.core.overrides import set_module +from numpy.core import overrides +from numpy.core.function_base import add_newdoc +from numpy.lib.twodim_base import diag +from .utils import deprecate +from numpy.core.multiarray import ( + _insert, add_docstring, bincount, normalize_axis_index, _monotonicity, + interp as compiled_interp, interp_complex as compiled_interp_complex + ) +from numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc +from numpy.compat import long + +if sys.version_info[0] < 3: + # Force range to be a generator, for np.delete's usage. + range = xrange + import __builtin__ as builtins +else: + import builtins + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +# needed in this module for compatibility +from numpy.lib.histograms import histogram, histogramdd + +__all__ = [ + 'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile', + 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp', 'flip', + 'rot90', 'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average', + 'bincount', 'digitize', 'cov', 'corrcoef', + 'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett', + 'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring', + 'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc', + 'quantile' + ] + + +def _rot90_dispatcher(m, k=None, axes=None): + return (m,) + + +@array_function_dispatch(_rot90_dispatcher) +def rot90(m, k=1, axes=(0,1)): + """ + Rotate an array by 90 degrees in the plane specified by axes. + + Rotation direction is from the first towards the second axis. + + Parameters + ---------- + m : array_like + Array of two or more dimensions. + k : integer + Number of times the array is rotated by 90 degrees. + axes: (2,) array_like + The array is rotated in the plane defined by the axes. + Axes must be different. + + .. versionadded:: 1.12.0 + + Returns + ------- + y : ndarray + A rotated view of `m`. + + See Also + -------- + flip : Reverse the order of elements in an array along the given axis. + fliplr : Flip an array horizontally. + flipud : Flip an array vertically. + + Notes + ----- + rot90(m, k=1, axes=(1,0)) is the reverse of rot90(m, k=1, axes=(0,1)) + rot90(m, k=1, axes=(1,0)) is equivalent to rot90(m, k=-1, axes=(0,1)) + + Examples + -------- + >>> m = np.array([[1,2],[3,4]], int) + >>> m + array([[1, 2], + [3, 4]]) + >>> np.rot90(m) + array([[2, 4], + [1, 3]]) + >>> np.rot90(m, 2) + array([[4, 3], + [2, 1]]) + >>> m = np.arange(8).reshape((2,2,2)) + >>> np.rot90(m, 1, (1,2)) + array([[[1, 3], + [0, 2]], + [[5, 7], + [4, 6]]]) + + """ + axes = tuple(axes) + if len(axes) != 2: + raise ValueError("len(axes) must be 2.") + + m = asanyarray(m) + + if axes[0] == axes[1] or absolute(axes[0] - axes[1]) == m.ndim: + raise ValueError("Axes must be different.") + + if (axes[0] >= m.ndim or axes[0] < -m.ndim + or axes[1] >= m.ndim or axes[1] < -m.ndim): + raise ValueError("Axes={} out of range for array of ndim={}." + .format(axes, m.ndim)) + + k %= 4 + + if k == 0: + return m[:] + if k == 2: + return flip(flip(m, axes[0]), axes[1]) + + axes_list = arange(0, m.ndim) + (axes_list[axes[0]], axes_list[axes[1]]) = (axes_list[axes[1]], + axes_list[axes[0]]) + + if k == 1: + return transpose(flip(m,axes[1]), axes_list) + else: + # k == 3 + return flip(transpose(m, axes_list), axes[1]) + + +def _flip_dispatcher(m, axis=None): + return (m,) + + +@array_function_dispatch(_flip_dispatcher) +def flip(m, axis=None): + """ + Reverse the order of elements in an array along the given axis. + + The shape of the array is preserved, but the elements are reordered. + + .. versionadded:: 1.12.0 + + Parameters + ---------- + m : array_like + Input array. + axis : None or int or tuple of ints, optional + Axis or axes along which to flip over. The default, + axis=None, will flip over all of the axes of the input array. + If axis is negative it counts from the last to the first axis. + + If axis is a tuple of ints, flipping is performed on all of the axes + specified in the tuple. + + .. versionchanged:: 1.15.0 + None and tuples of axes are supported + + Returns + ------- + out : array_like + A view of `m` with the entries of axis reversed. Since a view is + returned, this operation is done in constant time. + + See Also + -------- + flipud : Flip an array vertically (axis=0). + fliplr : Flip an array horizontally (axis=1). + + Notes + ----- + flip(m, 0) is equivalent to flipud(m). + + flip(m, 1) is equivalent to fliplr(m). + + flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at position n. + + flip(m) corresponds to ``m[::-1,::-1,...,::-1]`` with ``::-1`` at all + positions. + + flip(m, (0, 1)) corresponds to ``m[::-1,::-1,...]`` with ``::-1`` at + position 0 and position 1. + + Examples + -------- + >>> A = np.arange(8).reshape((2,2,2)) + >>> A + array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + >>> flip(A, 0) + array([[[4, 5], + [6, 7]], + [[0, 1], + [2, 3]]]) + >>> flip(A, 1) + array([[[2, 3], + [0, 1]], + [[6, 7], + [4, 5]]]) + >>> np.flip(A) + array([[[7, 6], + [5, 4]], + [[3, 2], + [1, 0]]]) + >>> np.flip(A, (0, 2)) + array([[[5, 4], + [7, 6]], + [[1, 0], + [3, 2]]]) + >>> A = np.random.randn(3,4,5) + >>> np.all(flip(A,2) == A[:,:,::-1,...]) + True + """ + if not hasattr(m, 'ndim'): + m = asarray(m) + if axis is None: + indexer = (np.s_[::-1],) * m.ndim + else: + axis = _nx.normalize_axis_tuple(axis, m.ndim) + indexer = [np.s_[:]] * m.ndim + for ax in axis: + indexer[ax] = np.s_[::-1] + indexer = tuple(indexer) + return m[indexer] + + +@set_module('numpy') +def iterable(y): + """ + Check whether or not an object can be iterated over. + + Parameters + ---------- + y : object + Input object. + + Returns + ------- + b : bool + Return ``True`` if the object has an iterator method or is a + sequence and ``False`` otherwise. + + + Examples + -------- + >>> np.iterable([1, 2, 3]) + True + >>> np.iterable(2) + False + + """ + try: + iter(y) + except TypeError: + return False + return True + + +def _average_dispatcher(a, axis=None, weights=None, returned=None): + return (a, weights) + + +@array_function_dispatch(_average_dispatcher) +def average(a, axis=None, weights=None, returned=False): + """ + Compute the weighted average along the specified axis. + + Parameters + ---------- + a : array_like + Array containing data to be averaged. If `a` is not an array, a + conversion is attempted. + axis : None or int or tuple of ints, optional + Axis or axes along which to average `a`. The default, + axis=None, will average over all of the elements of the input array. + If axis is negative it counts from the last to the first axis. + + .. versionadded:: 1.7.0 + + If axis is a tuple of ints, averaging is performed on all of the axes + specified in the tuple instead of a single axis or all the axes as + before. + weights : array_like, optional + An array of weights associated with the values in `a`. Each value in + `a` contributes to the average according to its associated weight. + The weights array can either be 1-D (in which case its length must be + the size of `a` along the given axis) or of the same shape as `a`. + If `weights=None`, then all data in `a` are assumed to have a + weight equal to one. + returned : bool, optional + Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`) + is returned, otherwise only the average is returned. + If `weights=None`, `sum_of_weights` is equivalent to the number of + elements over which the average is taken. + + + Returns + ------- + retval, [sum_of_weights] : array_type or double + Return the average along the specified axis. When `returned` is `True`, + return a tuple with the average as the first element and the sum + of the weights as the second element. `sum_of_weights` is of the + same type as `retval`. The result dtype follows a genereal pattern. + If `weights` is None, the result dtype will be that of `a` , or ``float64`` + if `a` is integral. Otherwise, if `weights` is not None and `a` is non- + integral, the result type will be the type of lowest precision capable of + representing values of both `a` and `weights`. If `a` happens to be + integral, the previous rules still applies but the result dtype will + at least be ``float64``. + + Raises + ------ + ZeroDivisionError + When all weights along axis are zero. See `numpy.ma.average` for a + version robust to this type of error. + TypeError + When the length of 1D `weights` is not the same as the shape of `a` + along axis. + + See Also + -------- + mean + + ma.average : average for masked arrays -- useful if your data contains + "missing" values + numpy.result_type : Returns the type that results from applying the + numpy type promotion rules to the arguments. + + Examples + -------- + >>> data = range(1,5) + >>> data + [1, 2, 3, 4] + >>> np.average(data) + 2.5 + >>> np.average(range(1,11), weights=range(10,0,-1)) + 4.0 + + >>> data = np.arange(6).reshape((3,2)) + >>> data + array([[0, 1], + [2, 3], + [4, 5]]) + >>> np.average(data, axis=1, weights=[1./4, 3./4]) + array([ 0.75, 2.75, 4.75]) + >>> np.average(data, weights=[1./4, 3./4]) + + Traceback (most recent call last): + ... + TypeError: Axis must be specified when shapes of a and weights differ. + + >>> a = np.ones(5, dtype=np.float128) + >>> w = np.ones(5, dtype=np.complex64) + >>> avg = np.average(a, weights=w) + >>> print(avg.dtype) + complex256 + """ + a = np.asanyarray(a) + + if weights is None: + avg = a.mean(axis) + scl = avg.dtype.type(a.size/avg.size) + else: + wgt = np.asanyarray(weights) + + if issubclass(a.dtype.type, (np.integer, np.bool_)): + result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8') + else: + result_dtype = np.result_type(a.dtype, wgt.dtype) + + # Sanity checks + if a.shape != wgt.shape: + if axis is None: + raise TypeError( + "Axis must be specified when shapes of a and weights " + "differ.") + if wgt.ndim != 1: + raise TypeError( + "1D weights expected when shapes of a and weights differ.") + if wgt.shape[0] != a.shape[axis]: + raise ValueError( + "Length of weights not compatible with specified axis.") + + # setup wgt to broadcast along axis + wgt = np.broadcast_to(wgt, (a.ndim-1)*(1,) + wgt.shape) + wgt = wgt.swapaxes(-1, axis) + + scl = wgt.sum(axis=axis, dtype=result_dtype) + if np.any(scl == 0.0): + raise ZeroDivisionError( + "Weights sum to zero, can't be normalized") + + avg = np.multiply(a, wgt, dtype=result_dtype).sum(axis)/scl + + if returned: + if scl.shape != avg.shape: + scl = np.broadcast_to(scl, avg.shape).copy() + return avg, scl + else: + return avg + + +@set_module('numpy') +def asarray_chkfinite(a, dtype=None, order=None): + """Convert the input to an array, checking for NaNs or Infs. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. This + includes lists, lists of tuples, tuples, tuples of tuples, tuples + of lists and ndarrays. Success requires no NaNs or Infs. + dtype : data-type, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F'}, optional + Whether to use row-major (C-style) or + column-major (Fortran-style) memory representation. + Defaults to 'C'. + + Returns + ------- + out : ndarray + Array interpretation of `a`. No copy is performed if the input + is already an ndarray. If `a` is a subclass of ndarray, a base + class ndarray is returned. + + Raises + ------ + ValueError + Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity). + + See Also + -------- + asarray : Create and array. + asanyarray : Similar function which passes through subclasses. + ascontiguousarray : Convert input to a contiguous array. + asfarray : Convert input to a floating point ndarray. + asfortranarray : Convert input to an ndarray with column-major + memory order. + fromiter : Create an array from an iterator. + fromfunction : Construct an array by executing a function on grid + positions. + + Examples + -------- + Convert a list into an array. If all elements are finite + ``asarray_chkfinite`` is identical to ``asarray``. + + >>> a = [1, 2] + >>> np.asarray_chkfinite(a, dtype=float) + array([1., 2.]) + + Raises ValueError if array_like contains Nans or Infs. + + >>> a = [1, 2, np.inf] + >>> try: + ... np.asarray_chkfinite(a) + ... except ValueError: + ... print('ValueError') + ... + ValueError + + """ + a = asarray(a, dtype=dtype, order=order) + if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all(): + raise ValueError( + "array must not contain infs or NaNs") + return a + + +def _piecewise_dispatcher(x, condlist, funclist, *args, **kw): + yield x + # support the undocumented behavior of allowing scalars + if np.iterable(condlist): + for c in condlist: + yield c + + +@array_function_dispatch(_piecewise_dispatcher) +def piecewise(x, condlist, funclist, *args, **kw): + """ + Evaluate a piecewise-defined function. + + Given a set of conditions and corresponding functions, evaluate each + function on the input data wherever its condition is true. + + Parameters + ---------- + x : ndarray or scalar + The input domain. + condlist : list of bool arrays or bool scalars + Each boolean array corresponds to a function in `funclist`. Wherever + `condlist[i]` is True, `funclist[i](x)` is used as the output value. + + Each boolean array in `condlist` selects a piece of `x`, + and should therefore be of the same shape as `x`. + + The length of `condlist` must correspond to that of `funclist`. + If one extra function is given, i.e. if + ``len(funclist) == len(condlist) + 1``, then that extra function + is the default value, used wherever all conditions are false. + funclist : list of callables, f(x,*args,**kw), or scalars + Each function is evaluated over `x` wherever its corresponding + condition is True. It should take a 1d array as input and give an 1d + array or a scalar value as output. If, instead of a callable, + a scalar is provided then a constant function (``lambda x: scalar``) is + assumed. + args : tuple, optional + Any further arguments given to `piecewise` are passed to the functions + upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then + each function is called as ``f(x, 1, 'a')``. + kw : dict, optional + Keyword arguments used in calling `piecewise` are passed to the + functions upon execution, i.e., if called + ``piecewise(..., ..., alpha=1)``, then each function is called as + ``f(x, alpha=1)``. + + Returns + ------- + out : ndarray + The output is the same shape and type as x and is found by + calling the functions in `funclist` on the appropriate portions of `x`, + as defined by the boolean arrays in `condlist`. Portions not covered + by any condition have a default value of 0. + + + See Also + -------- + choose, select, where + + Notes + ----- + This is similar to choose or select, except that functions are + evaluated on elements of `x` that satisfy the corresponding condition from + `condlist`. + + The result is:: + + |-- + |funclist[0](x[condlist[0]]) + out = |funclist[1](x[condlist[1]]) + |... + |funclist[n2](x[condlist[n2]]) + |-- + + Examples + -------- + Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``. + + >>> x = np.linspace(-2.5, 2.5, 6) + >>> np.piecewise(x, [x < 0, x >= 0], [-1, 1]) + array([-1., -1., -1., 1., 1., 1.]) + + Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for + ``x >= 0``. + + >>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x]) + array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5]) + + Apply the same function to a scalar value. + + >>> y = -2 + >>> np.piecewise(y, [y < 0, y >= 0], [lambda x: -x, lambda x: x]) + array(2) + + """ + x = asanyarray(x) + n2 = len(funclist) + + # undocumented: single condition is promoted to a list of one condition + if isscalar(condlist) or ( + not isinstance(condlist[0], (list, ndarray)) and x.ndim != 0): + condlist = [condlist] + + condlist = array(condlist, dtype=bool) + n = len(condlist) + + if n == n2 - 1: # compute the "otherwise" condition. + condelse = ~np.any(condlist, axis=0, keepdims=True) + condlist = np.concatenate([condlist, condelse], axis=0) + n += 1 + elif n != n2: + raise ValueError( + "with {} condition(s), either {} or {} functions are expected" + .format(n, n, n+1) + ) + + y = zeros(x.shape, x.dtype) + for k in range(n): + item = funclist[k] + if not isinstance(item, collections_abc.Callable): + y[condlist[k]] = item + else: + vals = x[condlist[k]] + if vals.size > 0: + y[condlist[k]] = item(vals, *args, **kw) + + return y + + +def _select_dispatcher(condlist, choicelist, default=None): + for c in condlist: + yield c + for c in choicelist: + yield c + + +@array_function_dispatch(_select_dispatcher) +def select(condlist, choicelist, default=0): + """ + Return an array drawn from elements in choicelist, depending on conditions. + + Parameters + ---------- + condlist : list of bool ndarrays + The list of conditions which determine from which array in `choicelist` + the output elements are taken. When multiple conditions are satisfied, + the first one encountered in `condlist` is used. + choicelist : list of ndarrays + The list of arrays from which the output elements are taken. It has + to be of the same length as `condlist`. + default : scalar, optional + The element inserted in `output` when all conditions evaluate to False. + + Returns + ------- + output : ndarray + The output at position m is the m-th element of the array in + `choicelist` where the m-th element of the corresponding array in + `condlist` is True. + + See Also + -------- + where : Return elements from one of two arrays depending on condition. + take, choose, compress, diag, diagonal + + Examples + -------- + >>> x = np.arange(10) + >>> condlist = [x<3, x>5] + >>> choicelist = [x, x**2] + >>> np.select(condlist, choicelist) + array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81]) + + """ + # Check the size of condlist and choicelist are the same, or abort. + if len(condlist) != len(choicelist): + raise ValueError( + 'list of cases must be same length as list of conditions') + + # Now that the dtype is known, handle the deprecated select([], []) case + if len(condlist) == 0: + # 2014-02-24, 1.9 + warnings.warn("select with an empty condition list is not possible" + "and will be deprecated", + DeprecationWarning, stacklevel=2) + return np.asarray(default)[()] + + choicelist = [np.asarray(choice) for choice in choicelist] + choicelist.append(np.asarray(default)) + + # need to get the result type before broadcasting for correct scalar + # behaviour + dtype = np.result_type(*choicelist) + + # Convert conditions to arrays and broadcast conditions and choices + # as the shape is needed for the result. Doing it separately optimizes + # for example when all choices are scalars. + condlist = np.broadcast_arrays(*condlist) + choicelist = np.broadcast_arrays(*choicelist) + + # If cond array is not an ndarray in boolean format or scalar bool, abort. + deprecated_ints = False + for i in range(len(condlist)): + cond = condlist[i] + if cond.dtype.type is not np.bool_: + if np.issubdtype(cond.dtype, np.integer): + # A previous implementation accepted int ndarrays accidentally. + # Supported here deliberately, but deprecated. + condlist[i] = condlist[i].astype(bool) + deprecated_ints = True + else: + raise ValueError( + 'invalid entry {} in condlist: should be boolean ndarray'.format(i)) + + if deprecated_ints: + # 2014-02-24, 1.9 + msg = "select condlists containing integer ndarrays is deprecated " \ + "and will be removed in the future. Use `.astype(bool)` to " \ + "convert to bools." + warnings.warn(msg, DeprecationWarning, stacklevel=2) + + if choicelist[0].ndim == 0: + # This may be common, so avoid the call. + result_shape = condlist[0].shape + else: + result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape + + result = np.full(result_shape, choicelist[-1], dtype) + + # Use np.copyto to burn each choicelist array onto result, using the + # corresponding condlist as a boolean mask. This is done in reverse + # order since the first choice should take precedence. + choicelist = choicelist[-2::-1] + condlist = condlist[::-1] + for choice, cond in zip(choicelist, condlist): + np.copyto(result, choice, where=cond) + + return result + + +def _copy_dispatcher(a, order=None): + return (a,) + + +@array_function_dispatch(_copy_dispatcher) +def copy(a, order='K'): + """ + Return an array copy of the given object. + + Parameters + ---------- + a : array_like + Input data. + order : {'C', 'F', 'A', 'K'}, optional + Controls the memory layout of the copy. 'C' means C-order, + 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, + 'C' otherwise. 'K' means match the layout of `a` as closely + as possible. (Note that this function and :meth:`ndarray.copy` are very + similar, but have different default values for their order= + arguments.) + + Returns + ------- + arr : ndarray + Array interpretation of `a`. + + Notes + ----- + This is equivalent to: + + >>> np.array(a, copy=True) #doctest: +SKIP + + Examples + -------- + Create an array x, with a reference y and a copy z: + + >>> x = np.array([1, 2, 3]) + >>> y = x + >>> z = np.copy(x) + + Note that, when we modify x, y changes, but not z: + + >>> x[0] = 10 + >>> x[0] == y[0] + True + >>> x[0] == z[0] + False + + """ + return array(a, order=order, copy=True) + +# Basic operations + + +def _gradient_dispatcher(f, *varargs, **kwargs): + yield f + for v in varargs: + yield v + + +@array_function_dispatch(_gradient_dispatcher) +def gradient(f, *varargs, **kwargs): + """ + Return the gradient of an N-dimensional array. + + The gradient is computed using second order accurate central differences + in the interior points and either first or second order accurate one-sides + (forward or backwards) differences at the boundaries. + The returned gradient hence has the same shape as the input array. + + Parameters + ---------- + f : array_like + An N-dimensional array containing samples of a scalar function. + varargs : list of scalar or array, optional + Spacing between f values. Default unitary spacing for all dimensions. + Spacing can be specified using: + + 1. single scalar to specify a sample distance for all dimensions. + 2. N scalars to specify a constant sample distance for each dimension. + i.e. `dx`, `dy`, `dz`, ... + 3. N arrays to specify the coordinates of the values along each + dimension of F. The length of the array must match the size of + the corresponding dimension + 4. Any combination of N scalars/arrays with the meaning of 2. and 3. + + If `axis` is given, the number of varargs must equal the number of axes. + Default: 1. + + edge_order : {1, 2}, optional + Gradient is calculated using N-th order accurate differences + at the boundaries. Default: 1. + + .. versionadded:: 1.9.1 + + axis : None or int or tuple of ints, optional + Gradient is calculated only along the given axis or axes + The default (axis = None) is to calculate the gradient for all the axes + of the input array. axis may be negative, in which case it counts from + the last to the first axis. + + .. versionadded:: 1.11.0 + + Returns + ------- + gradient : ndarray or list of ndarray + A set of ndarrays (or a single ndarray if there is only one dimension) + corresponding to the derivatives of f with respect to each dimension. + Each derivative has the same shape as f. + + Examples + -------- + >>> f = np.array([1, 2, 4, 7, 11, 16], dtype=float) + >>> np.gradient(f) + array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ]) + >>> np.gradient(f, 2) + array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ]) + + Spacing can be also specified with an array that represents the coordinates + of the values F along the dimensions. + For instance a uniform spacing: + + >>> x = np.arange(f.size) + >>> np.gradient(f, x) + array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ]) + + Or a non uniform one: + + >>> x = np.array([0., 1., 1.5, 3.5, 4., 6.], dtype=float) + >>> np.gradient(f, x) + array([ 1. , 3. , 3.5, 6.7, 6.9, 2.5]) + + For two dimensional arrays, the return will be two arrays ordered by + axis. In this example the first array stands for the gradient in + rows and the second one in columns direction: + + >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float)) + [array([[ 2., 2., -1.], + [ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ], + [ 1. , 1. , 1. ]])] + + In this example the spacing is also specified: + uniform for axis=0 and non uniform for axis=1 + + >>> dx = 2. + >>> y = [1., 1.5, 3.5] + >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), dx, y) + [array([[ 1. , 1. , -0.5], + [ 1. , 1. , -0.5]]), array([[ 2. , 2. , 2. ], + [ 2. , 1.7, 0.5]])] + + It is possible to specify how boundaries are treated using `edge_order` + + >>> x = np.array([0, 1, 2, 3, 4]) + >>> f = x**2 + >>> np.gradient(f, edge_order=1) + array([ 1., 2., 4., 6., 7.]) + >>> np.gradient(f, edge_order=2) + array([-0., 2., 4., 6., 8.]) + + The `axis` keyword can be used to specify a subset of axes of which the + gradient is calculated + + >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), axis=0) + array([[ 2., 2., -1.], + [ 2., 2., -1.]]) + + Notes + ----- + Assuming that :math:`f\\in C^{3}` (i.e., :math:`f` has at least 3 continuous + derivatives) and let :math:`h_{*}` be a non-homogeneous stepsize, we + minimize the "consistency error" :math:`\\eta_{i}` between the true gradient + and its estimate from a linear combination of the neighboring grid-points: + + .. math:: + + \\eta_{i} = f_{i}^{\\left(1\\right)} - + \\left[ \\alpha f\\left(x_{i}\\right) + + \\beta f\\left(x_{i} + h_{d}\\right) + + \\gamma f\\left(x_{i}-h_{s}\\right) + \\right] + + By substituting :math:`f(x_{i} + h_{d})` and :math:`f(x_{i} - h_{s})` + with their Taylor series expansion, this translates into solving + the following the linear system: + + .. math:: + + \\left\\{ + \\begin{array}{r} + \\alpha+\\beta+\\gamma=0 \\\\ + \\beta h_{d}-\\gamma h_{s}=1 \\\\ + \\beta h_{d}^{2}+\\gamma h_{s}^{2}=0 + \\end{array} + \\right. + + The resulting approximation of :math:`f_{i}^{(1)}` is the following: + + .. math:: + + \\hat f_{i}^{(1)} = + \\frac{ + h_{s}^{2}f\\left(x_{i} + h_{d}\\right) + + \\left(h_{d}^{2} - h_{s}^{2}\\right)f\\left(x_{i}\\right) + - h_{d}^{2}f\\left(x_{i}-h_{s}\\right)} + { h_{s}h_{d}\\left(h_{d} + h_{s}\\right)} + + \\mathcal{O}\\left(\\frac{h_{d}h_{s}^{2} + + h_{s}h_{d}^{2}}{h_{d} + + h_{s}}\\right) + + It is worth noting that if :math:`h_{s}=h_{d}` + (i.e., data are evenly spaced) + we find the standard second order approximation: + + .. math:: + + \\hat f_{i}^{(1)}= + \\frac{f\\left(x_{i+1}\\right) - f\\left(x_{i-1}\\right)}{2h} + + \\mathcal{O}\\left(h^{2}\\right) + + With a similar procedure the forward/backward approximations used for + boundaries can be derived. + + References + ---------- + .. [1] Quarteroni A., Sacco R., Saleri F. (2007) Numerical Mathematics + (Texts in Applied Mathematics). New York: Springer. + .. [2] Durran D. R. (1999) Numerical Methods for Wave Equations + in Geophysical Fluid Dynamics. New York: Springer. + .. [3] Fornberg B. (1988) Generation of Finite Difference Formulas on + Arbitrarily Spaced Grids, + Mathematics of Computation 51, no. 184 : 699-706. + `PDF `_. + """ + f = np.asanyarray(f) + N = f.ndim # number of dimensions + + axes = kwargs.pop('axis', None) + if axes is None: + axes = tuple(range(N)) + else: + axes = _nx.normalize_axis_tuple(axes, N) + + len_axes = len(axes) + n = len(varargs) + if n == 0: + # no spacing argument - use 1 in all axes + dx = [1.0] * len_axes + elif n == 1 and np.ndim(varargs[0]) == 0: + # single scalar for all axes + dx = varargs * len_axes + elif n == len_axes: + # scalar or 1d array for each axis + dx = list(varargs) + for i, distances in enumerate(dx): + if np.ndim(distances) == 0: + continue + elif np.ndim(distances) != 1: + raise ValueError("distances must be either scalars or 1d") + if len(distances) != f.shape[axes[i]]: + raise ValueError("when 1d, distances must match " + "the length of the corresponding dimension") + diffx = np.diff(distances) + # if distances are constant reduce to the scalar case + # since it brings a consistent speedup + if (diffx == diffx[0]).all(): + diffx = diffx[0] + dx[i] = diffx + else: + raise TypeError("invalid number of arguments") + + edge_order = kwargs.pop('edge_order', 1) + if kwargs: + raise TypeError('"{}" are not valid keyword arguments.'.format( + '", "'.join(kwargs.keys()))) + if edge_order > 2: + raise ValueError("'edge_order' greater than 2 not supported") + + # use central differences on interior and one-sided differences on the + # endpoints. This preserves second order-accuracy over the full domain. + + outvals = [] + + # create slice objects --- initially all are [:, :, ..., :] + slice1 = [slice(None)]*N + slice2 = [slice(None)]*N + slice3 = [slice(None)]*N + slice4 = [slice(None)]*N + + otype = f.dtype + if otype.type is np.datetime64: + # the timedelta dtype with the same unit information + otype = np.dtype(otype.name.replace('datetime', 'timedelta')) + # view as timedelta to allow addition + f = f.view(otype) + elif otype.type is np.timedelta64: + pass + elif np.issubdtype(otype, np.inexact): + pass + else: + # all other types convert to floating point + otype = np.double + + for axis, ax_dx in zip(axes, dx): + if f.shape[axis] < edge_order + 1: + raise ValueError( + "Shape of array too small to calculate a numerical gradient, " + "at least (edge_order + 1) elements are required.") + # result allocation + out = np.empty_like(f, dtype=otype) + + # spacing for the current axis + uniform_spacing = np.ndim(ax_dx) == 0 + + # Numerical differentiation: 2nd order interior + slice1[axis] = slice(1, -1) + slice2[axis] = slice(None, -2) + slice3[axis] = slice(1, -1) + slice4[axis] = slice(2, None) + + if uniform_spacing: + out[tuple(slice1)] = (f[tuple(slice4)] - f[tuple(slice2)]) / (2. * ax_dx) + else: + dx1 = ax_dx[0:-1] + dx2 = ax_dx[1:] + a = -(dx2)/(dx1 * (dx1 + dx2)) + b = (dx2 - dx1) / (dx1 * dx2) + c = dx1 / (dx2 * (dx1 + dx2)) + # fix the shape for broadcasting + shape = np.ones(N, dtype=int) + shape[axis] = -1 + a.shape = b.shape = c.shape = shape + # 1D equivalent -- out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:] + out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)] + + # Numerical differentiation: 1st order edges + if edge_order == 1: + slice1[axis] = 0 + slice2[axis] = 1 + slice3[axis] = 0 + dx_0 = ax_dx if uniform_spacing else ax_dx[0] + # 1D equivalent -- out[0] = (f[1] - f[0]) / (x[1] - x[0]) + out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_0 + + slice1[axis] = -1 + slice2[axis] = -1 + slice3[axis] = -2 + dx_n = ax_dx if uniform_spacing else ax_dx[-1] + # 1D equivalent -- out[-1] = (f[-1] - f[-2]) / (x[-1] - x[-2]) + out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_n + + # Numerical differentiation: 2nd order edges + else: + slice1[axis] = 0 + slice2[axis] = 0 + slice3[axis] = 1 + slice4[axis] = 2 + if uniform_spacing: + a = -1.5 / ax_dx + b = 2. / ax_dx + c = -0.5 / ax_dx + else: + dx1 = ax_dx[0] + dx2 = ax_dx[1] + a = -(2. * dx1 + dx2)/(dx1 * (dx1 + dx2)) + b = (dx1 + dx2) / (dx1 * dx2) + c = - dx1 / (dx2 * (dx1 + dx2)) + # 1D equivalent -- out[0] = a * f[0] + b * f[1] + c * f[2] + out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)] + + slice1[axis] = -1 + slice2[axis] = -3 + slice3[axis] = -2 + slice4[axis] = -1 + if uniform_spacing: + a = 0.5 / ax_dx + b = -2. / ax_dx + c = 1.5 / ax_dx + else: + dx1 = ax_dx[-2] + dx2 = ax_dx[-1] + a = (dx2) / (dx1 * (dx1 + dx2)) + b = - (dx2 + dx1) / (dx1 * dx2) + c = (2. * dx2 + dx1) / (dx2 * (dx1 + dx2)) + # 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1] + out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)] + + outvals.append(out) + + # reset the slice object in this dimension to ":" + slice1[axis] = slice(None) + slice2[axis] = slice(None) + slice3[axis] = slice(None) + slice4[axis] = slice(None) + + if len_axes == 1: + return outvals[0] + else: + return outvals + + +def _diff_dispatcher(a, n=None, axis=None, prepend=None, append=None): + return (a, prepend, append) + + +@array_function_dispatch(_diff_dispatcher) +def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue): + """ + Calculate the n-th discrete difference along the given axis. + + The first difference is given by ``out[n] = a[n+1] - a[n]`` along + the given axis, higher differences are calculated by using `diff` + recursively. + + Parameters + ---------- + a : array_like + Input array + n : int, optional + The number of times values are differenced. If zero, the input + is returned as-is. + axis : int, optional + The axis along which the difference is taken, default is the + last axis. + prepend, append : array_like, optional + Values to prepend or append to "a" along axis prior to + performing the difference. Scalar values are expanded to + arrays with length 1 in the direction of axis and the shape + of the input array in along all other axes. Otherwise the + dimension and shape must match "a" except along axis. + + Returns + ------- + diff : ndarray + The n-th differences. The shape of the output is the same as `a` + except along `axis` where the dimension is smaller by `n`. The + type of the output is the same as the type of the difference + between any two elements of `a`. This is the same as the type of + `a` in most cases. A notable exception is `datetime64`, which + results in a `timedelta64` output array. + + See Also + -------- + gradient, ediff1d, cumsum + + Notes + ----- + Type is preserved for boolean arrays, so the result will contain + `False` when consecutive elements are the same and `True` when they + differ. + + For unsigned integer arrays, the results will also be unsigned. This + should not be surprising, as the result is consistent with + calculating the difference directly: + + >>> u8_arr = np.array([1, 0], dtype=np.uint8) + >>> np.diff(u8_arr) + array([255], dtype=uint8) + >>> u8_arr[1,...] - u8_arr[0,...] + array(255, np.uint8) + + If this is not desirable, then the array should be cast to a larger + integer type first: + + >>> i16_arr = u8_arr.astype(np.int16) + >>> np.diff(i16_arr) + array([-1], dtype=int16) + + Examples + -------- + >>> x = np.array([1, 2, 4, 7, 0]) + >>> np.diff(x) + array([ 1, 2, 3, -7]) + >>> np.diff(x, n=2) + array([ 1, 1, -10]) + + >>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]]) + >>> np.diff(x) + array([[2, 3, 4], + [5, 1, 2]]) + >>> np.diff(x, axis=0) + array([[-1, 2, 0, -2]]) + + >>> x = np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64) + >>> np.diff(x) + array([1, 1], dtype='timedelta64[D]') + + """ + if n == 0: + return a + if n < 0: + raise ValueError( + "order must be non-negative but got " + repr(n)) + + a = asanyarray(a) + nd = a.ndim + axis = normalize_axis_index(axis, nd) + + combined = [] + if prepend is not np._NoValue: + prepend = np.asanyarray(prepend) + if prepend.ndim == 0: + shape = list(a.shape) + shape[axis] = 1 + prepend = np.broadcast_to(prepend, tuple(shape)) + combined.append(prepend) + + combined.append(a) + + if append is not np._NoValue: + append = np.asanyarray(append) + if append.ndim == 0: + shape = list(a.shape) + shape[axis] = 1 + append = np.broadcast_to(append, tuple(shape)) + combined.append(append) + + if len(combined) > 1: + a = np.concatenate(combined, axis) + + slice1 = [slice(None)] * nd + slice2 = [slice(None)] * nd + slice1[axis] = slice(1, None) + slice2[axis] = slice(None, -1) + slice1 = tuple(slice1) + slice2 = tuple(slice2) + + op = not_equal if a.dtype == np.bool_ else subtract + for _ in range(n): + a = op(a[slice1], a[slice2]) + + return a + + +def _interp_dispatcher(x, xp, fp, left=None, right=None, period=None): + return (x, xp, fp) + + +@array_function_dispatch(_interp_dispatcher) +def interp(x, xp, fp, left=None, right=None, period=None): + """ + One-dimensional linear interpolation. + + Returns the one-dimensional piecewise linear interpolant to a function + with given discrete data points (`xp`, `fp`), evaluated at `x`. + + Parameters + ---------- + x : array_like + The x-coordinates at which to evaluate the interpolated values. + + xp : 1-D sequence of floats + The x-coordinates of the data points, must be increasing if argument + `period` is not specified. Otherwise, `xp` is internally sorted after + normalizing the periodic boundaries with ``xp = xp % period``. + + fp : 1-D sequence of float or complex + The y-coordinates of the data points, same length as `xp`. + + left : optional float or complex corresponding to fp + Value to return for `x < xp[0]`, default is `fp[0]`. + + right : optional float or complex corresponding to fp + Value to return for `x > xp[-1]`, default is `fp[-1]`. + + period : None or float, optional + A period for the x-coordinates. This parameter allows the proper + interpolation of angular x-coordinates. Parameters `left` and `right` + are ignored if `period` is specified. + + .. versionadded:: 1.10.0 + + Returns + ------- + y : float or complex (corresponding to fp) or ndarray + The interpolated values, same shape as `x`. + + Raises + ------ + ValueError + If `xp` and `fp` have different length + If `xp` or `fp` are not 1-D sequences + If `period == 0` + + Notes + ----- + Does not check that the x-coordinate sequence `xp` is increasing. + If `xp` is not increasing, the results are nonsense. + A simple check for increasing is:: + + np.all(np.diff(xp) > 0) + + Examples + -------- + >>> xp = [1, 2, 3] + >>> fp = [3, 2, 0] + >>> np.interp(2.5, xp, fp) + 1.0 + >>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp) + array([ 3. , 3. , 2.5 , 0.56, 0. ]) + >>> UNDEF = -99.0 + >>> np.interp(3.14, xp, fp, right=UNDEF) + -99.0 + + Plot an interpolant to the sine function: + + >>> x = np.linspace(0, 2*np.pi, 10) + >>> y = np.sin(x) + >>> xvals = np.linspace(0, 2*np.pi, 50) + >>> yinterp = np.interp(xvals, x, y) + >>> import matplotlib.pyplot as plt + >>> plt.plot(x, y, 'o') + [] + >>> plt.plot(xvals, yinterp, '-x') + [] + >>> plt.show() + + Interpolation with periodic x-coordinates: + + >>> x = [-180, -170, -185, 185, -10, -5, 0, 365] + >>> xp = [190, -190, 350, -350] + >>> fp = [5, 10, 3, 4] + >>> np.interp(x, xp, fp, period=360) + array([7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75]) + + Complex interpolation: + + >>> x = [1.5, 4.0] + >>> xp = [2,3,5] + >>> fp = [1.0j, 0, 2+3j] + >>> np.interp(x, xp, fp) + array([ 0.+1.j , 1.+1.5j]) + + """ + + fp = np.asarray(fp) + + if np.iscomplexobj(fp): + interp_func = compiled_interp_complex + input_dtype = np.complex128 + else: + interp_func = compiled_interp + input_dtype = np.float64 + + if period is not None: + if period == 0: + raise ValueError("period must be a non-zero value") + period = abs(period) + left = None + right = None + + x = np.asarray(x, dtype=np.float64) + xp = np.asarray(xp, dtype=np.float64) + fp = np.asarray(fp, dtype=input_dtype) + + if xp.ndim != 1 or fp.ndim != 1: + raise ValueError("Data points must be 1-D sequences") + if xp.shape[0] != fp.shape[0]: + raise ValueError("fp and xp are not of the same length") + # normalizing periodic boundaries + x = x % period + xp = xp % period + asort_xp = np.argsort(xp) + xp = xp[asort_xp] + fp = fp[asort_xp] + xp = np.concatenate((xp[-1:]-period, xp, xp[0:1]+period)) + fp = np.concatenate((fp[-1:], fp, fp[0:1])) + + return interp_func(x, xp, fp, left, right) + + +def _angle_dispatcher(z, deg=None): + return (z,) + + +@array_function_dispatch(_angle_dispatcher) +def angle(z, deg=False): + """ + Return the angle of the complex argument. + + Parameters + ---------- + z : array_like + A complex number or sequence of complex numbers. + deg : bool, optional + Return angle in degrees if True, radians if False (default). + + Returns + ------- + angle : ndarray or scalar + The counterclockwise angle from the positive real axis on + the complex plane, with dtype as numpy.float64. + + ..versionchanged:: 1.16.0 + This function works on subclasses of ndarray like `ma.array`. + + See Also + -------- + arctan2 + absolute + + Examples + -------- + >>> np.angle([1.0, 1.0j, 1+1j]) # in radians + array([ 0. , 1.57079633, 0.78539816]) + >>> np.angle(1+1j, deg=True) # in degrees + 45.0 + + """ + z = asanyarray(z) + if issubclass(z.dtype.type, _nx.complexfloating): + zimag = z.imag + zreal = z.real + else: + zimag = 0 + zreal = z + + a = arctan2(zimag, zreal) + if deg: + a *= 180/pi + return a + + +def _unwrap_dispatcher(p, discont=None, axis=None): + return (p,) + + +@array_function_dispatch(_unwrap_dispatcher) +def unwrap(p, discont=pi, axis=-1): + """ + Unwrap by changing deltas between values to 2*pi complement. + + Unwrap radian phase `p` by changing absolute jumps greater than + `discont` to their 2*pi complement along the given axis. + + Parameters + ---------- + p : array_like + Input array. + discont : float, optional + Maximum discontinuity between values, default is ``pi``. + axis : int, optional + Axis along which unwrap will operate, default is the last axis. + + Returns + ------- + out : ndarray + Output array. + + See Also + -------- + rad2deg, deg2rad + + Notes + ----- + If the discontinuity in `p` is smaller than ``pi``, but larger than + `discont`, no unwrapping is done because taking the 2*pi complement + would only make the discontinuity larger. + + Examples + -------- + >>> phase = np.linspace(0, np.pi, num=5) + >>> phase[3:] += np.pi + >>> phase + array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531]) + >>> np.unwrap(phase) + array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ]) + + """ + p = asarray(p) + nd = p.ndim + dd = diff(p, axis=axis) + slice1 = [slice(None, None)]*nd # full slices + slice1[axis] = slice(1, None) + slice1 = tuple(slice1) + ddmod = mod(dd + pi, 2*pi) - pi + _nx.copyto(ddmod, pi, where=(ddmod == -pi) & (dd > 0)) + ph_correct = ddmod - dd + _nx.copyto(ph_correct, 0, where=abs(dd) < discont) + up = array(p, copy=True, dtype='d') + up[slice1] = p[slice1] + ph_correct.cumsum(axis) + return up + + +def _sort_complex(a): + return (a,) + + +@array_function_dispatch(_sort_complex) +def sort_complex(a): + """ + Sort a complex array using the real part first, then the imaginary part. + + Parameters + ---------- + a : array_like + Input array + + Returns + ------- + out : complex ndarray + Always returns a sorted complex array. + + Examples + -------- + >>> np.sort_complex([5, 3, 6, 2, 1]) + array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j]) + + >>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j]) + array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j]) + + """ + b = array(a, copy=True) + b.sort() + if not issubclass(b.dtype.type, _nx.complexfloating): + if b.dtype.char in 'bhBH': + return b.astype('F') + elif b.dtype.char == 'g': + return b.astype('G') + else: + return b.astype('D') + else: + return b + + +def _trim_zeros(filt, trim=None): + return (filt,) + + +@array_function_dispatch(_trim_zeros) +def trim_zeros(filt, trim='fb'): + """ + Trim the leading and/or trailing zeros from a 1-D array or sequence. + + Parameters + ---------- + filt : 1-D array or sequence + Input array. + trim : str, optional + A string with 'f' representing trim from front and 'b' to trim from + back. Default is 'fb', trim zeros from both front and back of the + array. + + Returns + ------- + trimmed : 1-D array or sequence + The result of trimming the input. The input data type is preserved. + + Examples + -------- + >>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0)) + >>> np.trim_zeros(a) + array([1, 2, 3, 0, 2, 1]) + + >>> np.trim_zeros(a, 'b') + array([0, 0, 0, 1, 2, 3, 0, 2, 1]) + + The input data type is preserved, list/tuple in means list/tuple out. + + >>> np.trim_zeros([0, 1, 2, 0]) + [1, 2] + + """ + first = 0 + trim = trim.upper() + if 'F' in trim: + for i in filt: + if i != 0.: + break + else: + first = first + 1 + last = len(filt) + if 'B' in trim: + for i in filt[::-1]: + if i != 0.: + break + else: + last = last - 1 + return filt[first:last] + +def _extract_dispatcher(condition, arr): + return (condition, arr) + + +@array_function_dispatch(_extract_dispatcher) +def extract(condition, arr): + """ + Return the elements of an array that satisfy some condition. + + This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If + `condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``. + + Note that `place` does the exact opposite of `extract`. + + Parameters + ---------- + condition : array_like + An array whose nonzero or True entries indicate the elements of `arr` + to extract. + arr : array_like + Input array of the same size as `condition`. + + Returns + ------- + extract : ndarray + Rank 1 array of values from `arr` where `condition` is True. + + See Also + -------- + take, put, copyto, compress, place + + Examples + -------- + >>> arr = np.arange(12).reshape((3, 4)) + >>> arr + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> condition = np.mod(arr, 3)==0 + >>> condition + array([[ True, False, False, True], + [False, False, True, False], + [False, True, False, False]]) + >>> np.extract(condition, arr) + array([0, 3, 6, 9]) + + + If `condition` is boolean: + + >>> arr[condition] + array([0, 3, 6, 9]) + + """ + return _nx.take(ravel(arr), nonzero(ravel(condition))[0]) + + +def _place_dispatcher(arr, mask, vals): + return (arr, mask, vals) + + +@array_function_dispatch(_place_dispatcher) +def place(arr, mask, vals): + """ + Change elements of an array based on conditional and input values. + + Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that + `place` uses the first N elements of `vals`, where N is the number of + True values in `mask`, while `copyto` uses the elements where `mask` + is True. + + Note that `extract` does the exact opposite of `place`. + + Parameters + ---------- + arr : ndarray + Array to put data into. + mask : array_like + Boolean mask array. Must have the same size as `a`. + vals : 1-D sequence + Values to put into `a`. Only the first N elements are used, where + N is the number of True values in `mask`. If `vals` is smaller + than N, it will be repeated, and if elements of `a` are to be masked, + this sequence must be non-empty. + + See Also + -------- + copyto, put, take, extract + + Examples + -------- + >>> arr = np.arange(6).reshape(2, 3) + >>> np.place(arr, arr>2, [44, 55]) + >>> arr + array([[ 0, 1, 2], + [44, 55, 44]]) + + """ + if not isinstance(arr, np.ndarray): + raise TypeError("argument 1 must be numpy.ndarray, " + "not {name}".format(name=type(arr).__name__)) + + return _insert(arr, mask, vals) + + +def disp(mesg, device=None, linefeed=True): + """ + Display a message on a device. + + Parameters + ---------- + mesg : str + Message to display. + device : object + Device to write message. If None, defaults to ``sys.stdout`` which is + very similar to ``print``. `device` needs to have ``write()`` and + ``flush()`` methods. + linefeed : bool, optional + Option whether to print a line feed or not. Defaults to True. + + Raises + ------ + AttributeError + If `device` does not have a ``write()`` or ``flush()`` method. + + Examples + -------- + Besides ``sys.stdout``, a file-like object can also be used as it has + both required methods: + + >>> from io import StringIO + >>> buf = StringIO() + >>> np.disp(u'"Display" in a file', device=buf) + >>> buf.getvalue() + '"Display" in a file\\n' + + """ + if device is None: + device = sys.stdout + if linefeed: + device.write('%s\n' % mesg) + else: + device.write('%s' % mesg) + device.flush() + return + + +# See https://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html +_DIMENSION_NAME = r'\w+' +_CORE_DIMENSION_LIST = '(?:{0:}(?:,{0:})*)?'.format(_DIMENSION_NAME) +_ARGUMENT = r'\({}\)'.format(_CORE_DIMENSION_LIST) +_ARGUMENT_LIST = '{0:}(?:,{0:})*'.format(_ARGUMENT) +_SIGNATURE = '^{0:}->{0:}$'.format(_ARGUMENT_LIST) + + +def _parse_gufunc_signature(signature): + """ + Parse string signatures for a generalized universal function. + + Arguments + --------- + signature : string + Generalized universal function signature, e.g., ``(m,n),(n,p)->(m,p)`` + for ``np.matmul``. + + Returns + ------- + Tuple of input and output core dimensions parsed from the signature, each + of the form List[Tuple[str, ...]]. + """ + if not re.match(_SIGNATURE, signature): + raise ValueError( + 'not a valid gufunc signature: {}'.format(signature)) + return tuple([tuple(re.findall(_DIMENSION_NAME, arg)) + for arg in re.findall(_ARGUMENT, arg_list)] + for arg_list in signature.split('->')) + + +def _update_dim_sizes(dim_sizes, arg, core_dims): + """ + Incrementally check and update core dimension sizes for a single argument. + + Arguments + --------- + dim_sizes : Dict[str, int] + Sizes of existing core dimensions. Will be updated in-place. + arg : ndarray + Argument to examine. + core_dims : Tuple[str, ...] + Core dimensions for this argument. + """ + if not core_dims: + return + + num_core_dims = len(core_dims) + if arg.ndim < num_core_dims: + raise ValueError( + '%d-dimensional argument does not have enough ' + 'dimensions for all core dimensions %r' + % (arg.ndim, core_dims)) + + core_shape = arg.shape[-num_core_dims:] + for dim, size in zip(core_dims, core_shape): + if dim in dim_sizes: + if size != dim_sizes[dim]: + raise ValueError( + 'inconsistent size for core dimension %r: %r vs %r' + % (dim, size, dim_sizes[dim])) + else: + dim_sizes[dim] = size + + +def _parse_input_dimensions(args, input_core_dims): + """ + Parse broadcast and core dimensions for vectorize with a signature. + + Arguments + --------- + args : Tuple[ndarray, ...] + Tuple of input arguments to examine. + input_core_dims : List[Tuple[str, ...]] + List of core dimensions corresponding to each input. + + Returns + ------- + broadcast_shape : Tuple[int, ...] + Common shape to broadcast all non-core dimensions to. + dim_sizes : Dict[str, int] + Common sizes for named core dimensions. + """ + broadcast_args = [] + dim_sizes = {} + for arg, core_dims in zip(args, input_core_dims): + _update_dim_sizes(dim_sizes, arg, core_dims) + ndim = arg.ndim - len(core_dims) + dummy_array = np.lib.stride_tricks.as_strided(0, arg.shape[:ndim]) + broadcast_args.append(dummy_array) + broadcast_shape = np.lib.stride_tricks._broadcast_shape(*broadcast_args) + return broadcast_shape, dim_sizes + + +def _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims): + """Helper for calculating broadcast shapes with core dimensions.""" + return [broadcast_shape + tuple(dim_sizes[dim] for dim in core_dims) + for core_dims in list_of_core_dims] + + +def _create_arrays(broadcast_shape, dim_sizes, list_of_core_dims, dtypes): + """Helper for creating output arrays in vectorize.""" + shapes = _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims) + arrays = tuple(np.empty(shape, dtype=dtype) + for shape, dtype in zip(shapes, dtypes)) + return arrays + + +@set_module('numpy') +class vectorize(object): + """ + vectorize(pyfunc, otypes=None, doc=None, excluded=None, cache=False, + signature=None) + + Generalized function class. + + Define a vectorized function which takes a nested sequence of objects or + numpy arrays as inputs and returns a single numpy array or a tuple of numpy + arrays. The vectorized function evaluates `pyfunc` over successive tuples + of the input arrays like the python map function, except it uses the + broadcasting rules of numpy. + + The data type of the output of `vectorized` is determined by calling + the function with the first element of the input. This can be avoided + by specifying the `otypes` argument. + + Parameters + ---------- + pyfunc : callable + A python function or method. + otypes : str or list of dtypes, optional + The output data type. It must be specified as either a string of + typecode characters or a list of data type specifiers. There should + be one data type specifier for each output. + doc : str, optional + The docstring for the function. If `None`, the docstring will be the + ``pyfunc.__doc__``. + excluded : set, optional + Set of strings or integers representing the positional or keyword + arguments for which the function will not be vectorized. These will be + passed directly to `pyfunc` unmodified. + + .. versionadded:: 1.7.0 + + cache : bool, optional + If `True`, then cache the first function call that determines the number + of outputs if `otypes` is not provided. + + .. versionadded:: 1.7.0 + + signature : string, optional + Generalized universal function signature, e.g., ``(m,n),(n)->(m)`` for + vectorized matrix-vector multiplication. If provided, ``pyfunc`` will + be called with (and expected to return) arrays with shapes given by the + size of corresponding core dimensions. By default, ``pyfunc`` is + assumed to take scalars as input and output. + + .. versionadded:: 1.12.0 + + Returns + ------- + vectorized : callable + Vectorized function. + + Examples + -------- + >>> def myfunc(a, b): + ... "Return a-b if a>b, otherwise return a+b" + ... if a > b: + ... return a - b + ... else: + ... return a + b + + >>> vfunc = np.vectorize(myfunc) + >>> vfunc([1, 2, 3, 4], 2) + array([3, 4, 1, 2]) + + The docstring is taken from the input function to `vectorize` unless it + is specified: + + >>> vfunc.__doc__ + 'Return a-b if a>b, otherwise return a+b' + >>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`') + >>> vfunc.__doc__ + 'Vectorized `myfunc`' + + The output type is determined by evaluating the first element of the input, + unless it is specified: + + >>> out = vfunc([1, 2, 3, 4], 2) + >>> type(out[0]) + + >>> vfunc = np.vectorize(myfunc, otypes=[float]) + >>> out = vfunc([1, 2, 3, 4], 2) + >>> type(out[0]) + + + The `excluded` argument can be used to prevent vectorizing over certain + arguments. This can be useful for array-like arguments of a fixed length + such as the coefficients for a polynomial as in `polyval`: + + >>> def mypolyval(p, x): + ... _p = list(p) + ... res = _p.pop(0) + ... while _p: + ... res = res*x + _p.pop(0) + ... return res + >>> vpolyval = np.vectorize(mypolyval, excluded=['p']) + >>> vpolyval(p=[1, 2, 3], x=[0, 1]) + array([3, 6]) + + Positional arguments may also be excluded by specifying their position: + + >>> vpolyval.excluded.add(0) + >>> vpolyval([1, 2, 3], x=[0, 1]) + array([3, 6]) + + The `signature` argument allows for vectorizing functions that act on + non-scalar arrays of fixed length. For example, you can use it for a + vectorized calculation of Pearson correlation coefficient and its p-value: + + >>> import scipy.stats + >>> pearsonr = np.vectorize(scipy.stats.pearsonr, + ... signature='(n),(n)->(),()') + >>> pearsonr([[0, 1, 2, 3]], [[1, 2, 3, 4], [4, 3, 2, 1]]) + (array([ 1., -1.]), array([ 0., 0.])) + + Or for a vectorized convolution: + + >>> convolve = np.vectorize(np.convolve, signature='(n),(m)->(k)') + >>> convolve(np.eye(4), [1, 2, 1]) + array([[ 1., 2., 1., 0., 0., 0.], + [ 0., 1., 2., 1., 0., 0.], + [ 0., 0., 1., 2., 1., 0.], + [ 0., 0., 0., 1., 2., 1.]]) + + See Also + -------- + frompyfunc : Takes an arbitrary Python function and returns a ufunc + + Notes + ----- + The `vectorize` function is provided primarily for convenience, not for + performance. The implementation is essentially a for loop. + + If `otypes` is not specified, then a call to the function with the + first argument will be used to determine the number of outputs. The + results of this call will be cached if `cache` is `True` to prevent + calling the function twice. However, to implement the cache, the + original function must be wrapped which will slow down subsequent + calls, so only do this if your function is expensive. + + The new keyword argument interface and `excluded` argument support + further degrades performance. + + References + ---------- + .. [1] NumPy Reference, section `Generalized Universal Function API + `_. + """ + + def __init__(self, pyfunc, otypes=None, doc=None, excluded=None, + cache=False, signature=None): + self.pyfunc = pyfunc + self.cache = cache + self.signature = signature + self._ufunc = None # Caching to improve default performance + + if doc is None: + self.__doc__ = pyfunc.__doc__ + else: + self.__doc__ = doc + + if isinstance(otypes, str): + for char in otypes: + if char not in typecodes['All']: + raise ValueError("Invalid otype specified: %s" % (char,)) + elif iterable(otypes): + otypes = ''.join([_nx.dtype(x).char for x in otypes]) + elif otypes is not None: + raise ValueError("Invalid otype specification") + self.otypes = otypes + + # Excluded variable support + if excluded is None: + excluded = set() + self.excluded = set(excluded) + + if signature is not None: + self._in_and_out_core_dims = _parse_gufunc_signature(signature) + else: + self._in_and_out_core_dims = None + + def __call__(self, *args, **kwargs): + """ + Return arrays with the results of `pyfunc` broadcast (vectorized) over + `args` and `kwargs` not in `excluded`. + """ + excluded = self.excluded + if not kwargs and not excluded: + func = self.pyfunc + vargs = args + else: + # The wrapper accepts only positional arguments: we use `names` and + # `inds` to mutate `the_args` and `kwargs` to pass to the original + # function. + nargs = len(args) + + names = [_n for _n in kwargs if _n not in excluded] + inds = [_i for _i in range(nargs) if _i not in excluded] + the_args = list(args) + + def func(*vargs): + for _n, _i in enumerate(inds): + the_args[_i] = vargs[_n] + kwargs.update(zip(names, vargs[len(inds):])) + return self.pyfunc(*the_args, **kwargs) + + vargs = [args[_i] for _i in inds] + vargs.extend([kwargs[_n] for _n in names]) + + return self._vectorize_call(func=func, args=vargs) + + def _get_ufunc_and_otypes(self, func, args): + """Return (ufunc, otypes).""" + # frompyfunc will fail if args is empty + if not args: + raise ValueError('args can not be empty') + + if self.otypes is not None: + otypes = self.otypes + nout = len(otypes) + + # Note logic here: We only *use* self._ufunc if func is self.pyfunc + # even though we set self._ufunc regardless. + if func is self.pyfunc and self._ufunc is not None: + ufunc = self._ufunc + else: + ufunc = self._ufunc = frompyfunc(func, len(args), nout) + else: + # Get number of outputs and output types by calling the function on + # the first entries of args. We also cache the result to prevent + # the subsequent call when the ufunc is evaluated. + # Assumes that ufunc first evaluates the 0th elements in the input + # arrays (the input values are not checked to ensure this) + args = [asarray(arg) for arg in args] + if builtins.any(arg.size == 0 for arg in args): + raise ValueError('cannot call `vectorize` on size 0 inputs ' + 'unless `otypes` is set') + + inputs = [arg.flat[0] for arg in args] + outputs = func(*inputs) + + # Performance note: profiling indicates that -- for simple + # functions at least -- this wrapping can almost double the + # execution time. + # Hence we make it optional. + if self.cache: + _cache = [outputs] + + def _func(*vargs): + if _cache: + return _cache.pop() + else: + return func(*vargs) + else: + _func = func + + if isinstance(outputs, tuple): + nout = len(outputs) + else: + nout = 1 + outputs = (outputs,) + + otypes = ''.join([asarray(outputs[_k]).dtype.char + for _k in range(nout)]) + + # Performance note: profiling indicates that creating the ufunc is + # not a significant cost compared with wrapping so it seems not + # worth trying to cache this. + ufunc = frompyfunc(_func, len(args), nout) + + return ufunc, otypes + + def _vectorize_call(self, func, args): + """Vectorized call to `func` over positional `args`.""" + if self.signature is not None: + res = self._vectorize_call_with_signature(func, args) + elif not args: + res = func() + else: + ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args) + + # Convert args to object arrays first + inputs = [array(a, copy=False, subok=True, dtype=object) + for a in args] + + outputs = ufunc(*inputs) + + if ufunc.nout == 1: + res = array(outputs, copy=False, subok=True, dtype=otypes[0]) + else: + res = tuple([array(x, copy=False, subok=True, dtype=t) + for x, t in zip(outputs, otypes)]) + return res + + def _vectorize_call_with_signature(self, func, args): + """Vectorized call over positional arguments with a signature.""" + input_core_dims, output_core_dims = self._in_and_out_core_dims + + if len(args) != len(input_core_dims): + raise TypeError('wrong number of positional arguments: ' + 'expected %r, got %r' + % (len(input_core_dims), len(args))) + args = tuple(asanyarray(arg) for arg in args) + + broadcast_shape, dim_sizes = _parse_input_dimensions( + args, input_core_dims) + input_shapes = _calculate_shapes(broadcast_shape, dim_sizes, + input_core_dims) + args = [np.broadcast_to(arg, shape, subok=True) + for arg, shape in zip(args, input_shapes)] + + outputs = None + otypes = self.otypes + nout = len(output_core_dims) + + for index in np.ndindex(*broadcast_shape): + results = func(*(arg[index] for arg in args)) + + n_results = len(results) if isinstance(results, tuple) else 1 + + if nout != n_results: + raise ValueError( + 'wrong number of outputs from pyfunc: expected %r, got %r' + % (nout, n_results)) + + if nout == 1: + results = (results,) + + if outputs is None: + for result, core_dims in zip(results, output_core_dims): + _update_dim_sizes(dim_sizes, result, core_dims) + + if otypes is None: + otypes = [asarray(result).dtype for result in results] + + outputs = _create_arrays(broadcast_shape, dim_sizes, + output_core_dims, otypes) + + for output, result in zip(outputs, results): + output[index] = result + + if outputs is None: + # did not call the function even once + if otypes is None: + raise ValueError('cannot call `vectorize` on size 0 inputs ' + 'unless `otypes` is set') + if builtins.any(dim not in dim_sizes + for dims in output_core_dims + for dim in dims): + raise ValueError('cannot call `vectorize` with a signature ' + 'including new output dimensions on size 0 ' + 'inputs') + outputs = _create_arrays(broadcast_shape, dim_sizes, + output_core_dims, otypes) + + return outputs[0] if nout == 1 else outputs + + +def _cov_dispatcher(m, y=None, rowvar=None, bias=None, ddof=None, + fweights=None, aweights=None): + return (m, y, fweights, aweights) + + +@array_function_dispatch(_cov_dispatcher) +def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, + aweights=None): + """ + Estimate a covariance matrix, given data and weights. + + Covariance indicates the level to which two variables vary together. + If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`, + then the covariance matrix element :math:`C_{ij}` is the covariance of + :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance + of :math:`x_i`. + + See the notes for an outline of the algorithm. + + Parameters + ---------- + m : array_like + A 1-D or 2-D array containing multiple variables and observations. + Each row of `m` represents a variable, and each column a single + observation of all those variables. Also see `rowvar` below. + y : array_like, optional + An additional set of variables and observations. `y` has the same form + as that of `m`. + rowvar : bool, optional + If `rowvar` is True (default), then each row represents a + variable, with observations in the columns. Otherwise, the relationship + is transposed: each column represents a variable, while the rows + contain observations. + bias : bool, optional + Default normalization (False) is by ``(N - 1)``, where ``N`` is the + number of observations given (unbiased estimate). If `bias` is True, + then normalization is by ``N``. These values can be overridden by using + the keyword ``ddof`` in numpy versions >= 1.5. + ddof : int, optional + If not ``None`` the default value implied by `bias` is overridden. + Note that ``ddof=1`` will return the unbiased estimate, even if both + `fweights` and `aweights` are specified, and ``ddof=0`` will return + the simple average. See the notes for the details. The default value + is ``None``. + + .. versionadded:: 1.5 + fweights : array_like, int, optional + 1-D array of integer frequency weights; the number of times each + observation vector should be repeated. + + .. versionadded:: 1.10 + aweights : array_like, optional + 1-D array of observation vector weights. These relative weights are + typically large for observations considered "important" and smaller for + observations considered less "important". If ``ddof=0`` the array of + weights can be used to assign probabilities to observation vectors. + + .. versionadded:: 1.10 + + Returns + ------- + out : ndarray + The covariance matrix of the variables. + + See Also + -------- + corrcoef : Normalized covariance matrix + + Notes + ----- + Assume that the observations are in the columns of the observation + array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The + steps to compute the weighted covariance are as follows:: + + >>> w = f * a + >>> v1 = np.sum(w) + >>> v2 = np.sum(w * a) + >>> m -= np.sum(m * w, axis=1, keepdims=True) / v1 + >>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2) + + Note that when ``a == 1``, the normalization factor + ``v1 / (v1**2 - ddof * v2)`` goes over to ``1 / (np.sum(f) - ddof)`` + as it should. + + Examples + -------- + Consider two variables, :math:`x_0` and :math:`x_1`, which + correlate perfectly, but in opposite directions: + + >>> x = np.array([[0, 2], [1, 1], [2, 0]]).T + >>> x + array([[0, 1, 2], + [2, 1, 0]]) + + Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance + matrix shows this clearly: + + >>> np.cov(x) + array([[ 1., -1.], + [-1., 1.]]) + + Note that element :math:`C_{0,1}`, which shows the correlation between + :math:`x_0` and :math:`x_1`, is negative. + + Further, note how `x` and `y` are combined: + + >>> x = [-2.1, -1, 4.3] + >>> y = [3, 1.1, 0.12] + >>> X = np.stack((x, y), axis=0) + >>> print(np.cov(X)) + [[ 11.71 -4.286 ] + [ -4.286 2.14413333]] + >>> print(np.cov(x, y)) + [[ 11.71 -4.286 ] + [ -4.286 2.14413333]] + >>> print(np.cov(x)) + 11.71 + + """ + # Check inputs + if ddof is not None and ddof != int(ddof): + raise ValueError( + "ddof must be integer") + + # Handles complex arrays too + m = np.asarray(m) + if m.ndim > 2: + raise ValueError("m has more than 2 dimensions") + + if y is None: + dtype = np.result_type(m, np.float64) + else: + y = np.asarray(y) + if y.ndim > 2: + raise ValueError("y has more than 2 dimensions") + dtype = np.result_type(m, y, np.float64) + + X = array(m, ndmin=2, dtype=dtype) + if not rowvar and X.shape[0] != 1: + X = X.T + if X.shape[0] == 0: + return np.array([]).reshape(0, 0) + if y is not None: + y = array(y, copy=False, ndmin=2, dtype=dtype) + if not rowvar and y.shape[0] != 1: + y = y.T + X = np.concatenate((X, y), axis=0) + + if ddof is None: + if bias == 0: + ddof = 1 + else: + ddof = 0 + + # Get the product of frequencies and weights + w = None + if fweights is not None: + fweights = np.asarray(fweights, dtype=float) + if not np.all(fweights == np.around(fweights)): + raise TypeError( + "fweights must be integer") + if fweights.ndim > 1: + raise RuntimeError( + "cannot handle multidimensional fweights") + if fweights.shape[0] != X.shape[1]: + raise RuntimeError( + "incompatible numbers of samples and fweights") + if any(fweights < 0): + raise ValueError( + "fweights cannot be negative") + w = fweights + if aweights is not None: + aweights = np.asarray(aweights, dtype=float) + if aweights.ndim > 1: + raise RuntimeError( + "cannot handle multidimensional aweights") + if aweights.shape[0] != X.shape[1]: + raise RuntimeError( + "incompatible numbers of samples and aweights") + if any(aweights < 0): + raise ValueError( + "aweights cannot be negative") + if w is None: + w = aweights + else: + w *= aweights + + avg, w_sum = average(X, axis=1, weights=w, returned=True) + w_sum = w_sum[0] + + # Determine the normalization + if w is None: + fact = X.shape[1] - ddof + elif ddof == 0: + fact = w_sum + elif aweights is None: + fact = w_sum - ddof + else: + fact = w_sum - ddof*sum(w*aweights)/w_sum + + if fact <= 0: + warnings.warn("Degrees of freedom <= 0 for slice", + RuntimeWarning, stacklevel=2) + fact = 0.0 + + X -= avg[:, None] + if w is None: + X_T = X.T + else: + X_T = (X*w).T + c = dot(X, X_T.conj()) + c *= np.true_divide(1, fact) + return c.squeeze() + + +def _corrcoef_dispatcher(x, y=None, rowvar=None, bias=None, ddof=None): + return (x, y) + + +@array_function_dispatch(_corrcoef_dispatcher) +def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue): + """ + Return Pearson product-moment correlation coefficients. + + Please refer to the documentation for `cov` for more detail. The + relationship between the correlation coefficient matrix, `R`, and the + covariance matrix, `C`, is + + .. math:: R_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } } + + The values of `R` are between -1 and 1, inclusive. + + Parameters + ---------- + x : array_like + A 1-D or 2-D array containing multiple variables and observations. + Each row of `x` represents a variable, and each column a single + observation of all those variables. Also see `rowvar` below. + y : array_like, optional + An additional set of variables and observations. `y` has the same + shape as `x`. + rowvar : bool, optional + If `rowvar` is True (default), then each row represents a + variable, with observations in the columns. Otherwise, the relationship + is transposed: each column represents a variable, while the rows + contain observations. + bias : _NoValue, optional + Has no effect, do not use. + + .. deprecated:: 1.10.0 + ddof : _NoValue, optional + Has no effect, do not use. + + .. deprecated:: 1.10.0 + + Returns + ------- + R : ndarray + The correlation coefficient matrix of the variables. + + See Also + -------- + cov : Covariance matrix + + Notes + ----- + Due to floating point rounding the resulting array may not be Hermitian, + the diagonal elements may not be 1, and the elements may not satisfy the + inequality abs(a) <= 1. The real and imaginary parts are clipped to the + interval [-1, 1] in an attempt to improve on that situation but is not + much help in the complex case. + + This function accepts but discards arguments `bias` and `ddof`. This is + for backwards compatibility with previous versions of this function. These + arguments had no effect on the return values of the function and can be + safely ignored in this and previous versions of numpy. + + """ + if bias is not np._NoValue or ddof is not np._NoValue: + # 2015-03-15, 1.10 + warnings.warn('bias and ddof have no effect and are deprecated', + DeprecationWarning, stacklevel=2) + c = cov(x, y, rowvar) + try: + d = diag(c) + except ValueError: + # scalar covariance + # nan if incorrect value (nan, inf, 0), 1 otherwise + return c / c + stddev = sqrt(d.real) + c /= stddev[:, None] + c /= stddev[None, :] + + # Clip real and imaginary parts to [-1, 1]. This does not guarantee + # abs(a[i,j]) <= 1 for complex arrays, but is the best we can do without + # excessive work. + np.clip(c.real, -1, 1, out=c.real) + if np.iscomplexobj(c): + np.clip(c.imag, -1, 1, out=c.imag) + + return c + + +@set_module('numpy') +def blackman(M): + """ + Return the Blackman window. + + The Blackman window is a taper formed by using the first three + terms of a summation of cosines. It was designed to have close to the + minimal leakage possible. It is close to optimal, only slightly worse + than a Kaiser window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + + Returns + ------- + out : ndarray + The window, with the maximum value normalized to one (the value one + appears only if the number of samples is odd). + + See Also + -------- + bartlett, hamming, hanning, kaiser + + Notes + ----- + The Blackman window is defined as + + .. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M) + + Most references to the Blackman window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. It is known as a + "near optimal" tapering function, almost as good (by some measures) + as the kaiser window. + + References + ---------- + Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, + Dover Publications, New York. + + Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing. + Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> np.blackman(12) + array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01, + 4.14397981e-01, 7.36045180e-01, 9.67046769e-01, + 9.67046769e-01, 7.36045180e-01, 4.14397981e-01, + 1.59903635e-01, 3.26064346e-02, -1.38777878e-17]) + + + Plot the window and the frequency response: + + >>> from numpy.fft import fft, fftshift + >>> window = np.blackman(51) + >>> plt.plot(window) + [] + >>> plt.title("Blackman window") + + >>> plt.ylabel("Amplitude") + + >>> plt.xlabel("Sample") + + >>> plt.show() + + >>> plt.figure() + + >>> A = fft(window, 2048) / 25.5 + >>> mag = np.abs(fftshift(A)) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(mag) + >>> response = np.clip(response, -100, 100) + >>> plt.plot(freq, response) + [] + >>> plt.title("Frequency response of Blackman window") + + >>> plt.ylabel("Magnitude [dB]") + + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + >>> plt.axis('tight') + (-0.5, 0.5, -100.0, ...) + >>> plt.show() + + """ + if M < 1: + return array([]) + if M == 1: + return ones(1, float) + n = arange(0, M) + return 0.42 - 0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1)) + + +@set_module('numpy') +def bartlett(M): + """ + Return the Bartlett window. + + The Bartlett window is very similar to a triangular window, except + that the end points are at zero. It is often used in signal + processing for tapering a signal, without generating too much + ripple in the frequency domain. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an + empty array is returned. + + Returns + ------- + out : array + The triangular window, with the maximum value normalized to one + (the value one appears only if the number of samples is odd), with + the first and last samples equal to zero. + + See Also + -------- + blackman, hamming, hanning, kaiser + + Notes + ----- + The Bartlett window is defined as + + .. math:: w(n) = \\frac{2}{M-1} \\left( + \\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right| + \\right) + + Most references to the Bartlett window come from the signal + processing literature, where it is used as one of many windowing + functions for smoothing values. Note that convolution with this + window produces linear interpolation. It is also known as an + apodization (which means"removing the foot", i.e. smoothing + discontinuities at the beginning and end of the sampled signal) or + tapering function. The fourier transform of the Bartlett is the product + of two sinc functions. + Note the excellent discussion in Kanasewich. + + References + ---------- + .. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra", + Biometrika 37, 1-16, 1950. + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", + The University of Alberta Press, 1975, pp. 109-110. + .. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal + Processing", Prentice-Hall, 1999, pp. 468-471. + .. [4] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function + .. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, + "Numerical Recipes", Cambridge University Press, 1986, page 429. + + Examples + -------- + >>> np.bartlett(12) + array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273, + 0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636, + 0.18181818, 0. ]) + + Plot the window and its frequency response (requires SciPy and matplotlib): + + >>> from numpy.fft import fft, fftshift + >>> window = np.bartlett(51) + >>> plt.plot(window) + [] + >>> plt.title("Bartlett window") + + >>> plt.ylabel("Amplitude") + + >>> plt.xlabel("Sample") + + >>> plt.show() + + >>> plt.figure() + + >>> A = fft(window, 2048) / 25.5 + >>> mag = np.abs(fftshift(A)) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(mag) + >>> response = np.clip(response, -100, 100) + >>> plt.plot(freq, response) + [] + >>> plt.title("Frequency response of Bartlett window") + + >>> plt.ylabel("Magnitude [dB]") + + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + >>> plt.axis('tight') + (-0.5, 0.5, -100.0, ...) + >>> plt.show() + + """ + if M < 1: + return array([]) + if M == 1: + return ones(1, float) + n = arange(0, M) + return where(less_equal(n, (M-1)/2.0), 2.0*n/(M-1), 2.0 - 2.0*n/(M-1)) + + +@set_module('numpy') +def hanning(M): + """ + Return the Hanning window. + + The Hanning window is a taper formed by using a weighted cosine. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an + empty array is returned. + + Returns + ------- + out : ndarray, shape(M,) + The window, with the maximum value normalized to one (the value + one appears only if `M` is odd). + + See Also + -------- + bartlett, blackman, hamming, kaiser + + Notes + ----- + The Hanning window is defined as + + .. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right) + \\qquad 0 \\leq n \\leq M-1 + + The Hanning was named for Julius von Hann, an Austrian meteorologist. + It is also known as the Cosine Bell. Some authors prefer that it be + called a Hann window, to help avoid confusion with the very similar + Hamming window. + + Most references to the Hanning window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. + + References + ---------- + .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power + spectra, Dover Publications, New York. + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", + The University of Alberta Press, 1975, pp. 106-108. + .. [3] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function + .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, + "Numerical Recipes", Cambridge University Press, 1986, page 425. + + Examples + -------- + >>> np.hanning(12) + array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037, + 0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249, + 0.07937323, 0. ]) + + Plot the window and its frequency response: + + >>> import matplotlib.pyplot as plt + >>> from numpy.fft import fft, fftshift + >>> window = np.hanning(51) + >>> plt.plot(window) + [] + >>> plt.title("Hann window") + + >>> plt.ylabel("Amplitude") + + >>> plt.xlabel("Sample") + + >>> plt.show() + + >>> plt.figure() + + >>> A = fft(window, 2048) / 25.5 + >>> mag = np.abs(fftshift(A)) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(mag) + >>> response = np.clip(response, -100, 100) + >>> plt.plot(freq, response) + [] + >>> plt.title("Frequency response of the Hann window") + + >>> plt.ylabel("Magnitude [dB]") + + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + >>> plt.axis('tight') + (-0.5, 0.5, -100.0, ...) + >>> plt.show() + + """ + if M < 1: + return array([]) + if M == 1: + return ones(1, float) + n = arange(0, M) + return 0.5 - 0.5*cos(2.0*pi*n/(M-1)) + + +@set_module('numpy') +def hamming(M): + """ + Return the Hamming window. + + The Hamming window is a taper formed by using a weighted cosine. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an + empty array is returned. + + Returns + ------- + out : ndarray + The window, with the maximum value normalized to one (the value + one appears only if the number of samples is odd). + + See Also + -------- + bartlett, blackman, hanning, kaiser + + Notes + ----- + The Hamming window is defined as + + .. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right) + \\qquad 0 \\leq n \\leq M-1 + + The Hamming was named for R. W. Hamming, an associate of J. W. Tukey + and is described in Blackman and Tukey. It was recommended for + smoothing the truncated autocovariance function in the time domain. + Most references to the Hamming window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. + + References + ---------- + .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power + spectra, Dover Publications, New York. + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The + University of Alberta Press, 1975, pp. 109-110. + .. [3] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function + .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, + "Numerical Recipes", Cambridge University Press, 1986, page 425. + + Examples + -------- + >>> np.hamming(12) + array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594, + 0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909, + 0.15302337, 0.08 ]) + + Plot the window and the frequency response: + + >>> import matplotlib.pyplot as plt + >>> from numpy.fft import fft, fftshift + >>> window = np.hamming(51) + >>> plt.plot(window) + [] + >>> plt.title("Hamming window") + + >>> plt.ylabel("Amplitude") + + >>> plt.xlabel("Sample") + + >>> plt.show() + + >>> plt.figure() + + >>> A = fft(window, 2048) / 25.5 + >>> mag = np.abs(fftshift(A)) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(mag) + >>> response = np.clip(response, -100, 100) + >>> plt.plot(freq, response) + [] + >>> plt.title("Frequency response of Hamming window") + + >>> plt.ylabel("Magnitude [dB]") + + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + >>> plt.axis('tight') + (-0.5, 0.5, -100.0, ...) + >>> plt.show() + + """ + if M < 1: + return array([]) + if M == 1: + return ones(1, float) + n = arange(0, M) + return 0.54 - 0.46*cos(2.0*pi*n/(M-1)) + +## Code from cephes for i0 + +_i0A = [ + -4.41534164647933937950E-18, + 3.33079451882223809783E-17, + -2.43127984654795469359E-16, + 1.71539128555513303061E-15, + -1.16853328779934516808E-14, + 7.67618549860493561688E-14, + -4.85644678311192946090E-13, + 2.95505266312963983461E-12, + -1.72682629144155570723E-11, + 9.67580903537323691224E-11, + -5.18979560163526290666E-10, + 2.65982372468238665035E-9, + -1.30002500998624804212E-8, + 6.04699502254191894932E-8, + -2.67079385394061173391E-7, + 1.11738753912010371815E-6, + -4.41673835845875056359E-6, + 1.64484480707288970893E-5, + -5.75419501008210370398E-5, + 1.88502885095841655729E-4, + -5.76375574538582365885E-4, + 1.63947561694133579842E-3, + -4.32430999505057594430E-3, + 1.05464603945949983183E-2, + -2.37374148058994688156E-2, + 4.93052842396707084878E-2, + -9.49010970480476444210E-2, + 1.71620901522208775349E-1, + -3.04682672343198398683E-1, + 6.76795274409476084995E-1 + ] + +_i0B = [ + -7.23318048787475395456E-18, + -4.83050448594418207126E-18, + 4.46562142029675999901E-17, + 3.46122286769746109310E-17, + -2.82762398051658348494E-16, + -3.42548561967721913462E-16, + 1.77256013305652638360E-15, + 3.81168066935262242075E-15, + -9.55484669882830764870E-15, + -4.15056934728722208663E-14, + 1.54008621752140982691E-14, + 3.85277838274214270114E-13, + 7.18012445138366623367E-13, + -1.79417853150680611778E-12, + -1.32158118404477131188E-11, + -3.14991652796324136454E-11, + 1.18891471078464383424E-11, + 4.94060238822496958910E-10, + 3.39623202570838634515E-9, + 2.26666899049817806459E-8, + 2.04891858946906374183E-7, + 2.89137052083475648297E-6, + 6.88975834691682398426E-5, + 3.36911647825569408990E-3, + 8.04490411014108831608E-1 + ] + + +def _chbevl(x, vals): + b0 = vals[0] + b1 = 0.0 + + for i in range(1, len(vals)): + b2 = b1 + b1 = b0 + b0 = x*b1 - b2 + vals[i] + + return 0.5*(b0 - b2) + + +def _i0_1(x): + return exp(x) * _chbevl(x/2.0-2, _i0A) + + +def _i0_2(x): + return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x) + + +def _i0_dispatcher(x): + return (x,) + + +@array_function_dispatch(_i0_dispatcher) +def i0(x): + """ + Modified Bessel function of the first kind, order 0. + + Usually denoted :math:`I_0`. This function does broadcast, but will *not* + "up-cast" int dtype arguments unless accompanied by at least one float or + complex dtype argument (see Raises below). + + Parameters + ---------- + x : array_like, dtype float or complex + Argument of the Bessel function. + + Returns + ------- + out : ndarray, shape = x.shape, dtype = x.dtype + The modified Bessel function evaluated at each of the elements of `x`. + + Raises + ------ + TypeError: array cannot be safely cast to required type + If argument consists exclusively of int dtypes. + + See Also + -------- + scipy.special.iv, scipy.special.ive + + Notes + ----- + We use the algorithm published by Clenshaw [1]_ and referenced by + Abramowitz and Stegun [2]_, for which the function domain is + partitioned into the two intervals [0,8] and (8,inf), and Chebyshev + polynomial expansions are employed in each interval. Relative error on + the domain [0,30] using IEEE arithmetic is documented [3]_ as having a + peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000). + + References + ---------- + .. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in + *National Physical Laboratory Mathematical Tables*, vol. 5, London: + Her Majesty's Stationery Office, 1962. + .. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical + Functions*, 10th printing, New York: Dover, 1964, pp. 379. + http://www.math.sfu.ca/~cbm/aands/page_379.htm + .. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html + + Examples + -------- + >>> np.i0([0.]) + array(1.0) + >>> np.i0([0., 1. + 2j]) + array([ 1.00000000+0.j , 0.18785373+0.64616944j]) + + """ + x = atleast_1d(x).copy() + y = empty_like(x) + ind = (x < 0) + x[ind] = -x[ind] + ind = (x <= 8.0) + y[ind] = _i0_1(x[ind]) + ind2 = ~ind + y[ind2] = _i0_2(x[ind2]) + return y.squeeze() + +## End of cephes code for i0 + + +@set_module('numpy') +def kaiser(M, beta): + """ + Return the Kaiser window. + + The Kaiser window is a taper formed by using a Bessel function. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an + empty array is returned. + beta : float + Shape parameter for window. + + Returns + ------- + out : array + The window, with the maximum value normalized to one (the value + one appears only if the number of samples is odd). + + See Also + -------- + bartlett, blackman, hamming, hanning + + Notes + ----- + The Kaiser window is defined as + + .. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}} + \\right)/I_0(\\beta) + + with + + .. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2}, + + where :math:`I_0` is the modified zeroth-order Bessel function. + + The Kaiser was named for Jim Kaiser, who discovered a simple + approximation to the DPSS window based on Bessel functions. The Kaiser + window is a very good approximation to the Digital Prolate Spheroidal + Sequence, or Slepian window, which is the transform which maximizes the + energy in the main lobe of the window relative to total energy. + + The Kaiser can approximate many other windows by varying the beta + parameter. + + ==== ======================= + beta Window shape + ==== ======================= + 0 Rectangular + 5 Similar to a Hamming + 6 Similar to a Hanning + 8.6 Similar to a Blackman + ==== ======================= + + A beta value of 14 is probably a good starting point. Note that as beta + gets large, the window narrows, and so the number of samples needs to be + large enough to sample the increasingly narrow spike, otherwise NaNs will + get returned. + + Most references to the Kaiser window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. + + References + ---------- + .. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by + digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285. + John Wiley and Sons, New York, (1966). + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The + University of Alberta Press, 1975, pp. 177-178. + .. [3] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> np.kaiser(12, 14) + array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02, + 2.29737120e-01, 5.99885316e-01, 9.45674898e-01, + 9.45674898e-01, 5.99885316e-01, 2.29737120e-01, + 4.65200189e-02, 3.46009194e-03, 7.72686684e-06]) + + + Plot the window and the frequency response: + + >>> from numpy.fft import fft, fftshift + >>> window = np.kaiser(51, 14) + >>> plt.plot(window) + [] + >>> plt.title("Kaiser window") + + >>> plt.ylabel("Amplitude") + + >>> plt.xlabel("Sample") + + >>> plt.show() + + >>> plt.figure() + + >>> A = fft(window, 2048) / 25.5 + >>> mag = np.abs(fftshift(A)) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(mag) + >>> response = np.clip(response, -100, 100) + >>> plt.plot(freq, response) + [] + >>> plt.title("Frequency response of Kaiser window") + + >>> plt.ylabel("Magnitude [dB]") + + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + >>> plt.axis('tight') + (-0.5, 0.5, -100.0, ...) + >>> plt.show() + + """ + from numpy.dual import i0 + if M == 1: + return np.array([1.]) + n = arange(0, M) + alpha = (M-1)/2.0 + return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta)) + + +def _sinc_dispatcher(x): + return (x,) + + +@array_function_dispatch(_sinc_dispatcher) +def sinc(x): + """ + Return the sinc function. + + The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`. + + Parameters + ---------- + x : ndarray + Array (possibly multi-dimensional) of values for which to to + calculate ``sinc(x)``. + + Returns + ------- + out : ndarray + ``sinc(x)``, which has the same shape as the input. + + Notes + ----- + ``sinc(0)`` is the limit value 1. + + The name sinc is short for "sine cardinal" or "sinus cardinalis". + + The sinc function is used in various signal processing applications, + including in anti-aliasing, in the construction of a Lanczos resampling + filter, and in interpolation. + + For bandlimited interpolation of discrete-time signals, the ideal + interpolation kernel is proportional to the sinc function. + + References + ---------- + .. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web + Resource. http://mathworld.wolfram.com/SincFunction.html + .. [2] Wikipedia, "Sinc function", + https://en.wikipedia.org/wiki/Sinc_function + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(-4, 4, 41) + >>> np.sinc(x) + array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02, + -8.90384387e-02, -5.84680802e-02, 3.89804309e-17, + 6.68206631e-02, 1.16434881e-01, 1.26137788e-01, + 8.50444803e-02, -3.89804309e-17, -1.03943254e-01, + -1.89206682e-01, -2.16236208e-01, -1.55914881e-01, + 3.89804309e-17, 2.33872321e-01, 5.04551152e-01, + 7.56826729e-01, 9.35489284e-01, 1.00000000e+00, + 9.35489284e-01, 7.56826729e-01, 5.04551152e-01, + 2.33872321e-01, 3.89804309e-17, -1.55914881e-01, + -2.16236208e-01, -1.89206682e-01, -1.03943254e-01, + -3.89804309e-17, 8.50444803e-02, 1.26137788e-01, + 1.16434881e-01, 6.68206631e-02, 3.89804309e-17, + -5.84680802e-02, -8.90384387e-02, -8.40918587e-02, + -4.92362781e-02, -3.89804309e-17]) + + >>> plt.plot(x, np.sinc(x)) + [] + >>> plt.title("Sinc Function") + + >>> plt.ylabel("Amplitude") + + >>> plt.xlabel("X") + + >>> plt.show() + + It works in 2-D as well: + + >>> x = np.linspace(-4, 4, 401) + >>> xx = np.outer(x, x) + >>> plt.imshow(np.sinc(xx)) + + + """ + x = np.asanyarray(x) + y = pi * where(x == 0, 1.0e-20, x) + return sin(y)/y + + +def _msort_dispatcher(a): + return (a,) + + +@array_function_dispatch(_msort_dispatcher) +def msort(a): + """ + Return a copy of an array sorted along the first axis. + + Parameters + ---------- + a : array_like + Array to be sorted. + + Returns + ------- + sorted_array : ndarray + Array of the same type and shape as `a`. + + See Also + -------- + sort + + Notes + ----- + ``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``. + + """ + b = array(a, subok=True, copy=True) + b.sort(0) + return b + + +def _ureduce(a, func, **kwargs): + """ + Internal Function. + Call `func` with `a` as first argument swapping the axes to use extended + axis on functions that don't support it natively. + + Returns result and a.shape with axis dims set to 1. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + func : callable + Reduction function capable of receiving a single axis argument. + It is called with `a` as first argument followed by `kwargs`. + kwargs : keyword arguments + additional keyword arguments to pass to `func`. + + Returns + ------- + result : tuple + Result of func(a, **kwargs) and a.shape with axis dims set to 1 + which can be used to reshape the result to the same shape a ufunc with + keepdims=True would produce. + + """ + a = np.asanyarray(a) + axis = kwargs.get('axis', None) + if axis is not None: + keepdim = list(a.shape) + nd = a.ndim + axis = _nx.normalize_axis_tuple(axis, nd) + + for ax in axis: + keepdim[ax] = 1 + + if len(axis) == 1: + kwargs['axis'] = axis[0] + else: + keep = set(range(nd)) - set(axis) + nkeep = len(keep) + # swap axis that should not be reduced to front + for i, s in enumerate(sorted(keep)): + a = a.swapaxes(i, s) + # merge reduced axis + a = a.reshape(a.shape[:nkeep] + (-1,)) + kwargs['axis'] = -1 + keepdim = tuple(keepdim) + else: + keepdim = (1,) * a.ndim + + r = func(a, **kwargs) + return r, keepdim + + +def _median_dispatcher( + a, axis=None, out=None, overwrite_input=None, keepdims=None): + return (a, out) + + +@array_function_dispatch(_median_dispatcher) +def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): + """ + Compute the median along the specified axis. + + Returns the median of the array elements. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + axis : {int, sequence of int, None}, optional + Axis or axes along which the medians are computed. The default + is to compute the median along a flattened version of the array. + A sequence of axes is supported since version 1.9.0. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow use of memory of input array `a` for + calculations. The input array will be modified by the call to + `median`. This will save memory when you do not need to preserve + the contents of the input array. Treat the input as undefined, + but it will probably be fully or partially sorted. Default is + False. If `overwrite_input` is ``True`` and `a` is not already an + `ndarray`, an error will be raised. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `arr`. + + .. versionadded:: 1.9.0 + + Returns + ------- + median : ndarray + A new array holding the result. If the input contains integers + or floats smaller than ``float64``, then the output data-type is + ``np.float64``. Otherwise, the data-type of the output is the + same as that of the input. If `out` is specified, that array is + returned instead. + + See Also + -------- + mean, percentile + + Notes + ----- + Given a vector ``V`` of length ``N``, the median of ``V`` is the + middle value of a sorted copy of ``V``, ``V_sorted`` - i + e., ``V_sorted[(N-1)/2]``, when ``N`` is odd, and the average of the + two middle values of ``V_sorted`` when ``N`` is even. + + Examples + -------- + >>> a = np.array([[10, 7, 4], [3, 2, 1]]) + >>> a + array([[10, 7, 4], + [ 3, 2, 1]]) + >>> np.median(a) + 3.5 + >>> np.median(a, axis=0) + array([ 6.5, 4.5, 2.5]) + >>> np.median(a, axis=1) + array([ 7., 2.]) + >>> m = np.median(a, axis=0) + >>> out = np.zeros_like(m) + >>> np.median(a, axis=0, out=m) + array([ 6.5, 4.5, 2.5]) + >>> m + array([ 6.5, 4.5, 2.5]) + >>> b = a.copy() + >>> np.median(b, axis=1, overwrite_input=True) + array([ 7., 2.]) + >>> assert not np.all(a==b) + >>> b = a.copy() + >>> np.median(b, axis=None, overwrite_input=True) + 3.5 + >>> assert not np.all(a==b) + + """ + r, k = _ureduce(a, func=_median, axis=axis, out=out, + overwrite_input=overwrite_input) + if keepdims: + return r.reshape(k) + else: + return r + +def _median(a, axis=None, out=None, overwrite_input=False): + # can't be reasonably be implemented in terms of percentile as we have to + # call mean to not break astropy + a = np.asanyarray(a) + + # Set the partition indexes + if axis is None: + sz = a.size + else: + sz = a.shape[axis] + if sz % 2 == 0: + szh = sz // 2 + kth = [szh - 1, szh] + else: + kth = [(sz - 1) // 2] + # Check if the array contains any nan's + if np.issubdtype(a.dtype, np.inexact): + kth.append(-1) + + if overwrite_input: + if axis is None: + part = a.ravel() + part.partition(kth) + else: + a.partition(kth, axis=axis) + part = a + else: + part = partition(a, kth, axis=axis) + + if part.shape == (): + # make 0-D arrays work + return part.item() + if axis is None: + axis = 0 + + indexer = [slice(None)] * part.ndim + index = part.shape[axis] // 2 + if part.shape[axis] % 2 == 1: + # index with slice to allow mean (below) to work + indexer[axis] = slice(index, index+1) + else: + indexer[axis] = slice(index-1, index+1) + indexer = tuple(indexer) + + # Check if the array contains any nan's + if np.issubdtype(a.dtype, np.inexact) and sz > 0: + # warn and return nans like mean would + rout = mean(part[indexer], axis=axis, out=out) + return np.lib.utils._median_nancheck(part, rout, axis, out) + else: + # if there are no nans + # Use mean in odd and even case to coerce data type + # and check, use out array. + return mean(part[indexer], axis=axis, out=out) + + +def _percentile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, + interpolation=None, keepdims=None): + return (a, q, out) + + +@array_function_dispatch(_percentile_dispatcher) +def percentile(a, q, axis=None, out=None, + overwrite_input=False, interpolation='linear', keepdims=False): + """ + Compute the q-th percentile of the data along the specified axis. + + Returns the q-th percentile(s) of the array elements. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + q : array_like of float + Percentile or sequence of percentiles to compute, which must be between + 0 and 100 inclusive. + axis : {int, tuple of int, None}, optional + Axis or axes along which the percentiles are computed. The + default is to compute the percentile(s) along a flattened + version of the array. + + .. versionchanged:: 1.9.0 + A tuple of axes is supported + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow the input array `a` to be modified by intermediate + calculations, to save memory. In this case, the contents of the input + `a` after this function completes is undefined. + + interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} + This optional parameter specifies the interpolation method to + use when the desired percentile lies between two data points + ``i < j``: + + * 'linear': ``i + (j - i) * fraction``, where ``fraction`` + is the fractional part of the index surrounded by ``i`` + and ``j``. + * 'lower': ``i``. + * 'higher': ``j``. + * 'nearest': ``i`` or ``j``, whichever is nearest. + * 'midpoint': ``(i + j) / 2``. + + .. versionadded:: 1.9.0 + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in + the result as dimensions with size one. With this option, the + result will broadcast correctly against the original array `a`. + + .. versionadded:: 1.9.0 + + Returns + ------- + percentile : scalar or ndarray + If `q` is a single percentile and `axis=None`, then the result + is a scalar. If multiple percentiles are given, first axis of + the result corresponds to the percentiles. The other axes are + the axes that remain after the reduction of `a`. If the input + contains integers or floats smaller than ``float64``, the output + data-type is ``float64``. Otherwise, the output data-type is the + same as that of the input. If `out` is specified, that array is + returned instead. + + See Also + -------- + mean + median : equivalent to ``percentile(..., 50)`` + nanpercentile + quantile : equivalent to percentile, except with q in the range [0, 1]. + + Notes + ----- + Given a vector ``V`` of length ``N``, the q-th percentile of + ``V`` is the value ``q/100`` of the way from the minimum to the + maximum in a sorted copy of ``V``. The values and distances of + the two nearest neighbors as well as the `interpolation` parameter + will determine the percentile if the normalized ranking does not + match the location of ``q`` exactly. This function is the same as + the median if ``q=50``, the same as the minimum if ``q=0`` and the + same as the maximum if ``q=100``. + + Examples + -------- + >>> a = np.array([[10, 7, 4], [3, 2, 1]]) + >>> a + array([[10, 7, 4], + [ 3, 2, 1]]) + >>> np.percentile(a, 50) + 3.5 + >>> np.percentile(a, 50, axis=0) + array([[ 6.5, 4.5, 2.5]]) + >>> np.percentile(a, 50, axis=1) + array([ 7., 2.]) + >>> np.percentile(a, 50, axis=1, keepdims=True) + array([[ 7.], + [ 2.]]) + + >>> m = np.percentile(a, 50, axis=0) + >>> out = np.zeros_like(m) + >>> np.percentile(a, 50, axis=0, out=out) + array([[ 6.5, 4.5, 2.5]]) + >>> m + array([[ 6.5, 4.5, 2.5]]) + + >>> b = a.copy() + >>> np.percentile(b, 50, axis=1, overwrite_input=True) + array([ 7., 2.]) + >>> assert not np.all(a == b) + + The different types of interpolation can be visualized graphically: + + .. plot:: + + import matplotlib.pyplot as plt + + a = np.arange(4) + p = np.linspace(0, 100, 6001) + ax = plt.gca() + lines = [ + ('linear', None), + ('higher', '--'), + ('lower', '--'), + ('nearest', '-.'), + ('midpoint', '-.'), + ] + for interpolation, style in lines: + ax.plot( + p, np.percentile(a, p, interpolation=interpolation), + label=interpolation, linestyle=style) + ax.set( + title='Interpolation methods for list: ' + str(a), + xlabel='Percentile', + ylabel='List item returned', + yticks=a) + ax.legend() + plt.show() + + """ + q = np.true_divide(q, 100.0) # handles the asarray for us too + if not _quantile_is_valid(q): + raise ValueError("Percentiles must be in the range [0, 100]") + return _quantile_unchecked( + a, q, axis, out, overwrite_input, interpolation, keepdims) + + +def _quantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, + interpolation=None, keepdims=None): + return (a, q, out) + + +@array_function_dispatch(_quantile_dispatcher) +def quantile(a, q, axis=None, out=None, + overwrite_input=False, interpolation='linear', keepdims=False): + """ + Compute the q-th quantile of the data along the specified axis. + ..versionadded:: 1.15.0 + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + q : array_like of float + Quantile or sequence of quantiles to compute, which must be between + 0 and 1 inclusive. + axis : {int, tuple of int, None}, optional + Axis or axes along which the quantiles are computed. The + default is to compute the quantile(s) along a flattened + version of the array. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow the input array `a` to be modified by intermediate + calculations, to save memory. In this case, the contents of the input + `a` after this function completes is undefined. + interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} + This optional parameter specifies the interpolation method to + use when the desired quantile lies between two data points + ``i < j``: + + * linear: ``i + (j - i) * fraction``, where ``fraction`` + is the fractional part of the index surrounded by ``i`` + and ``j``. + * lower: ``i``. + * higher: ``j``. + * nearest: ``i`` or ``j``, whichever is nearest. + * midpoint: ``(i + j) / 2``. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in + the result as dimensions with size one. With this option, the + result will broadcast correctly against the original array `a`. + + Returns + ------- + quantile : scalar or ndarray + If `q` is a single quantile and `axis=None`, then the result + is a scalar. If multiple quantiles are given, first axis of + the result corresponds to the quantiles. The other axes are + the axes that remain after the reduction of `a`. If the input + contains integers or floats smaller than ``float64``, the output + data-type is ``float64``. Otherwise, the output data-type is the + same as that of the input. If `out` is specified, that array is + returned instead. + + See Also + -------- + mean + percentile : equivalent to quantile, but with q in the range [0, 100]. + median : equivalent to ``quantile(..., 0.5)`` + nanquantile + + Notes + ----- + Given a vector ``V`` of length ``N``, the q-th quantile of + ``V`` is the value ``q`` of the way from the minimum to the + maximum in a sorted copy of ``V``. The values and distances of + the two nearest neighbors as well as the `interpolation` parameter + will determine the quantile if the normalized ranking does not + match the location of ``q`` exactly. This function is the same as + the median if ``q=0.5``, the same as the minimum if ``q=0.0`` and the + same as the maximum if ``q=1.0``. + + Examples + -------- + >>> a = np.array([[10, 7, 4], [3, 2, 1]]) + >>> a + array([[10, 7, 4], + [ 3, 2, 1]]) + >>> np.quantile(a, 0.5) + 3.5 + >>> np.quantile(a, 0.5, axis=0) + array([[ 6.5, 4.5, 2.5]]) + >>> np.quantile(a, 0.5, axis=1) + array([ 7., 2.]) + >>> np.quantile(a, 0.5, axis=1, keepdims=True) + array([[ 7.], + [ 2.]]) + >>> m = np.quantile(a, 0.5, axis=0) + >>> out = np.zeros_like(m) + >>> np.quantile(a, 0.5, axis=0, out=out) + array([[ 6.5, 4.5, 2.5]]) + >>> m + array([[ 6.5, 4.5, 2.5]]) + >>> b = a.copy() + >>> np.quantile(b, 0.5, axis=1, overwrite_input=True) + array([ 7., 2.]) + >>> assert not np.all(a == b) + """ + q = np.asanyarray(q) + if not _quantile_is_valid(q): + raise ValueError("Quantiles must be in the range [0, 1]") + return _quantile_unchecked( + a, q, axis, out, overwrite_input, interpolation, keepdims) + + +def _quantile_unchecked(a, q, axis=None, out=None, overwrite_input=False, + interpolation='linear', keepdims=False): + """Assumes that q is in [0, 1], and is an ndarray""" + r, k = _ureduce(a, func=_quantile_ureduce_func, q=q, axis=axis, out=out, + overwrite_input=overwrite_input, + interpolation=interpolation) + if keepdims: + return r.reshape(q.shape + k) + else: + return r + + +def _quantile_is_valid(q): + # avoid expensive reductions, relevant for arrays with < O(1000) elements + if q.ndim == 1 and q.size < 10: + for i in range(q.size): + if q[i] < 0.0 or q[i] > 1.0: + return False + else: + # faster than any() + if np.count_nonzero(q < 0.0) or np.count_nonzero(q > 1.0): + return False + return True + + +def _quantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False, + interpolation='linear', keepdims=False): + a = asarray(a) + if q.ndim == 0: + # Do not allow 0-d arrays because following code fails for scalar + zerod = True + q = q[None] + else: + zerod = False + + # prepare a for partitioning + if overwrite_input: + if axis is None: + ap = a.ravel() + else: + ap = a + else: + if axis is None: + ap = a.flatten() + else: + ap = a.copy() + + if axis is None: + axis = 0 + + Nx = ap.shape[axis] + indices = q * (Nx - 1) + + # round fractional indices according to interpolation method + if interpolation == 'lower': + indices = floor(indices).astype(intp) + elif interpolation == 'higher': + indices = ceil(indices).astype(intp) + elif interpolation == 'midpoint': + indices = 0.5 * (floor(indices) + ceil(indices)) + elif interpolation == 'nearest': + indices = around(indices).astype(intp) + elif interpolation == 'linear': + pass # keep index as fraction and interpolate + else: + raise ValueError( + "interpolation can only be 'linear', 'lower' 'higher', " + "'midpoint', or 'nearest'") + + n = np.array(False, dtype=bool) # check for nan's flag + if indices.dtype == intp: # take the points along axis + # Check if the array contains any nan's + if np.issubdtype(a.dtype, np.inexact): + indices = concatenate((indices, [-1])) + + ap.partition(indices, axis=axis) + # ensure axis with q-th is first + ap = np.moveaxis(ap, axis, 0) + axis = 0 + + # Check if the array contains any nan's + if np.issubdtype(a.dtype, np.inexact): + indices = indices[:-1] + n = np.isnan(ap[-1:, ...]) + + if zerod: + indices = indices[0] + r = take(ap, indices, axis=axis, out=out) + + + else: # weight the points above and below the indices + indices_below = floor(indices).astype(intp) + indices_above = indices_below + 1 + indices_above[indices_above > Nx - 1] = Nx - 1 + + # Check if the array contains any nan's + if np.issubdtype(a.dtype, np.inexact): + indices_above = concatenate((indices_above, [-1])) + + weights_above = indices - indices_below + weights_below = 1.0 - weights_above + + weights_shape = [1, ] * ap.ndim + weights_shape[axis] = len(indices) + weights_below.shape = weights_shape + weights_above.shape = weights_shape + + ap.partition(concatenate((indices_below, indices_above)), axis=axis) + + # ensure axis with q-th is first + ap = np.moveaxis(ap, axis, 0) + weights_below = np.moveaxis(weights_below, axis, 0) + weights_above = np.moveaxis(weights_above, axis, 0) + axis = 0 + + # Check if the array contains any nan's + if np.issubdtype(a.dtype, np.inexact): + indices_above = indices_above[:-1] + n = np.isnan(ap[-1:, ...]) + + x1 = take(ap, indices_below, axis=axis) * weights_below + x2 = take(ap, indices_above, axis=axis) * weights_above + + # ensure axis with q-th is first + x1 = np.moveaxis(x1, axis, 0) + x2 = np.moveaxis(x2, axis, 0) + + if zerod: + x1 = x1.squeeze(0) + x2 = x2.squeeze(0) + + if out is not None: + r = add(x1, x2, out=out) + else: + r = add(x1, x2) + + if np.any(n): + warnings.warn("Invalid value encountered in percentile", + RuntimeWarning, stacklevel=3) + if zerod: + if ap.ndim == 1: + if out is not None: + out[...] = a.dtype.type(np.nan) + r = out + else: + r = a.dtype.type(np.nan) + else: + r[..., n.squeeze(0)] = a.dtype.type(np.nan) + else: + if r.ndim == 1: + r[:] = a.dtype.type(np.nan) + else: + r[..., n.repeat(q.size, 0)] = a.dtype.type(np.nan) + + return r + + +def _trapz_dispatcher(y, x=None, dx=None, axis=None): + return (y, x) + + +@array_function_dispatch(_trapz_dispatcher) +def trapz(y, x=None, dx=1.0, axis=-1): + """ + Integrate along the given axis using the composite trapezoidal rule. + + Integrate `y` (`x`) along given axis. + + Parameters + ---------- + y : array_like + Input array to integrate. + x : array_like, optional + The sample points corresponding to the `y` values. If `x` is None, + the sample points are assumed to be evenly spaced `dx` apart. The + default is None. + dx : scalar, optional + The spacing between sample points when `x` is None. The default is 1. + axis : int, optional + The axis along which to integrate. + + Returns + ------- + trapz : float + Definite integral as approximated by trapezoidal rule. + + See Also + -------- + sum, cumsum + + Notes + ----- + Image [2]_ illustrates trapezoidal rule -- y-axis locations of points + will be taken from `y` array, by default x-axis distances between + points will be 1.0, alternatively they can be provided with `x` array + or with `dx` scalar. Return value will be equal to combined area under + the red lines. + + + References + ---------- + .. [1] Wikipedia page: https://en.wikipedia.org/wiki/Trapezoidal_rule + + .. [2] Illustration image: + https://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png + + Examples + -------- + >>> np.trapz([1,2,3]) + 4.0 + >>> np.trapz([1,2,3], x=[4,6,8]) + 8.0 + >>> np.trapz([1,2,3], dx=2) + 8.0 + >>> a = np.arange(6).reshape(2, 3) + >>> a + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.trapz(a, axis=0) + array([ 1.5, 2.5, 3.5]) + >>> np.trapz(a, axis=1) + array([ 2., 8.]) + + """ + y = asanyarray(y) + if x is None: + d = dx + else: + x = asanyarray(x) + if x.ndim == 1: + d = diff(x) + # reshape to correct shape + shape = [1]*y.ndim + shape[axis] = d.shape[0] + d = d.reshape(shape) + else: + d = diff(x, axis=axis) + nd = y.ndim + slice1 = [slice(None)]*nd + slice2 = [slice(None)]*nd + slice1[axis] = slice(1, None) + slice2[axis] = slice(None, -1) + try: + ret = (d * (y[tuple(slice1)] + y[tuple(slice2)]) / 2.0).sum(axis) + except ValueError: + # Operations didn't work, cast to ndarray + d = np.asarray(d) + y = np.asarray(y) + ret = add.reduce(d * (y[tuple(slice1)]+y[tuple(slice2)])/2.0, axis) + return ret + + +def _meshgrid_dispatcher(*xi, **kwargs): + return xi + + +# Based on scitools meshgrid +@array_function_dispatch(_meshgrid_dispatcher) +def meshgrid(*xi, **kwargs): + """ + Return coordinate matrices from coordinate vectors. + + Make N-D coordinate arrays for vectorized evaluations of + N-D scalar/vector fields over N-D grids, given + one-dimensional coordinate arrays x1, x2,..., xn. + + .. versionchanged:: 1.9 + 1-D and 0-D cases are allowed. + + Parameters + ---------- + x1, x2,..., xn : array_like + 1-D arrays representing the coordinates of a grid. + indexing : {'xy', 'ij'}, optional + Cartesian ('xy', default) or matrix ('ij') indexing of output. + See Notes for more details. + + .. versionadded:: 1.7.0 + sparse : bool, optional + If True a sparse grid is returned in order to conserve memory. + Default is False. + + .. versionadded:: 1.7.0 + copy : bool, optional + If False, a view into the original arrays are returned in order to + conserve memory. Default is True. Please note that + ``sparse=False, copy=False`` will likely return non-contiguous + arrays. Furthermore, more than one element of a broadcast array + may refer to a single memory location. If you need to write to the + arrays, make copies first. + + .. versionadded:: 1.7.0 + + Returns + ------- + X1, X2,..., XN : ndarray + For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` , + return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij' + or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy' + with the elements of `xi` repeated to fill the matrix along + the first dimension for `x1`, the second for `x2` and so on. + + Notes + ----- + This function supports both indexing conventions through the indexing + keyword argument. Giving the string 'ij' returns a meshgrid with + matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing. + In the 2-D case with inputs of length M and N, the outputs are of shape + (N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case + with inputs of length M, N and P, outputs are of shape (N, M, P) for + 'xy' indexing and (M, N, P) for 'ij' indexing. The difference is + illustrated by the following code snippet:: + + xv, yv = np.meshgrid(x, y, sparse=False, indexing='ij') + for i in range(nx): + for j in range(ny): + # treat xv[i,j], yv[i,j] + + xv, yv = np.meshgrid(x, y, sparse=False, indexing='xy') + for i in range(nx): + for j in range(ny): + # treat xv[j,i], yv[j,i] + + In the 1-D and 0-D case, the indexing and sparse keywords have no effect. + + See Also + -------- + index_tricks.mgrid : Construct a multi-dimensional "meshgrid" + using indexing notation. + index_tricks.ogrid : Construct an open multi-dimensional "meshgrid" + using indexing notation. + + Examples + -------- + >>> nx, ny = (3, 2) + >>> x = np.linspace(0, 1, nx) + >>> y = np.linspace(0, 1, ny) + >>> xv, yv = np.meshgrid(x, y) + >>> xv + array([[ 0. , 0.5, 1. ], + [ 0. , 0.5, 1. ]]) + >>> yv + array([[ 0., 0., 0.], + [ 1., 1., 1.]]) + >>> xv, yv = np.meshgrid(x, y, sparse=True) # make sparse output arrays + >>> xv + array([[ 0. , 0.5, 1. ]]) + >>> yv + array([[ 0.], + [ 1.]]) + + `meshgrid` is very useful to evaluate functions on a grid. + + >>> import matplotlib.pyplot as plt + >>> x = np.arange(-5, 5, 0.1) + >>> y = np.arange(-5, 5, 0.1) + >>> xx, yy = np.meshgrid(x, y, sparse=True) + >>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2) + >>> h = plt.contourf(x,y,z) + >>> plt.show() + + """ + ndim = len(xi) + + copy_ = kwargs.pop('copy', True) + sparse = kwargs.pop('sparse', False) + indexing = kwargs.pop('indexing', 'xy') + + if kwargs: + raise TypeError("meshgrid() got an unexpected keyword argument '%s'" + % (list(kwargs)[0],)) + + if indexing not in ['xy', 'ij']: + raise ValueError( + "Valid values for `indexing` are 'xy' and 'ij'.") + + s0 = (1,) * ndim + output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1:]) + for i, x in enumerate(xi)] + + if indexing == 'xy' and ndim > 1: + # switch first and second axis + output[0].shape = (1, -1) + s0[2:] + output[1].shape = (-1, 1) + s0[2:] + + if not sparse: + # Return the full N-D matrix (not only the 1-D vector) + output = np.broadcast_arrays(*output, subok=True) + + if copy_: + output = [x.copy() for x in output] + + return output + + +def _delete_dispatcher(arr, obj, axis=None): + return (arr, obj) + + +@array_function_dispatch(_delete_dispatcher) +def delete(arr, obj, axis=None): + """ + Return a new array with sub-arrays along an axis deleted. For a one + dimensional array, this returns those entries not returned by + `arr[obj]`. + + Parameters + ---------- + arr : array_like + Input array. + obj : slice, int or array of ints + Indicate which sub-arrays to remove. + axis : int, optional + The axis along which to delete the subarray defined by `obj`. + If `axis` is None, `obj` is applied to the flattened array. + + Returns + ------- + out : ndarray + A copy of `arr` with the elements specified by `obj` removed. Note + that `delete` does not occur in-place. If `axis` is None, `out` is + a flattened array. + + See Also + -------- + insert : Insert elements into an array. + append : Append elements at the end of an array. + + Notes + ----- + Often it is preferable to use a boolean mask. For example: + + >>> mask = np.ones(len(arr), dtype=bool) + >>> mask[[0,2,4]] = False + >>> result = arr[mask,...] + + Is equivalent to `np.delete(arr, [0,2,4], axis=0)`, but allows further + use of `mask`. + + Examples + -------- + >>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) + >>> arr + array([[ 1, 2, 3, 4], + [ 5, 6, 7, 8], + [ 9, 10, 11, 12]]) + >>> np.delete(arr, 1, 0) + array([[ 1, 2, 3, 4], + [ 9, 10, 11, 12]]) + + >>> np.delete(arr, np.s_[::2], 1) + array([[ 2, 4], + [ 6, 8], + [10, 12]]) + >>> np.delete(arr, [1,3,5], None) + array([ 1, 3, 5, 7, 8, 9, 10, 11, 12]) + + """ + wrap = None + if type(arr) is not ndarray: + try: + wrap = arr.__array_wrap__ + except AttributeError: + pass + + arr = asarray(arr) + ndim = arr.ndim + arrorder = 'F' if arr.flags.fnc else 'C' + if axis is None: + if ndim != 1: + arr = arr.ravel() + ndim = arr.ndim + axis = -1 + + if ndim == 0: + # 2013-09-24, 1.9 + warnings.warn( + "in the future the special handling of scalars will be removed " + "from delete and raise an error", DeprecationWarning, stacklevel=2) + if wrap: + return wrap(arr) + else: + return arr.copy(order=arrorder) + + axis = normalize_axis_index(axis, ndim) + + slobj = [slice(None)]*ndim + N = arr.shape[axis] + newshape = list(arr.shape) + + if isinstance(obj, slice): + start, stop, step = obj.indices(N) + xr = range(start, stop, step) + numtodel = len(xr) + + if numtodel <= 0: + if wrap: + return wrap(arr.copy(order=arrorder)) + else: + return arr.copy(order=arrorder) + + # Invert if step is negative: + if step < 0: + step = -step + start = xr[-1] + stop = xr[0] + 1 + + newshape[axis] -= numtodel + new = empty(newshape, arr.dtype, arrorder) + # copy initial chunk + if start == 0: + pass + else: + slobj[axis] = slice(None, start) + new[tuple(slobj)] = arr[tuple(slobj)] + # copy end chunck + if stop == N: + pass + else: + slobj[axis] = slice(stop-numtodel, None) + slobj2 = [slice(None)]*ndim + slobj2[axis] = slice(stop, None) + new[tuple(slobj)] = arr[tuple(slobj2)] + # copy middle pieces + if step == 1: + pass + else: # use array indexing. + keep = ones(stop-start, dtype=bool) + keep[:stop-start:step] = False + slobj[axis] = slice(start, stop-numtodel) + slobj2 = [slice(None)]*ndim + slobj2[axis] = slice(start, stop) + arr = arr[tuple(slobj2)] + slobj2[axis] = keep + new[tuple(slobj)] = arr[tuple(slobj2)] + if wrap: + return wrap(new) + else: + return new + + _obj = obj + obj = np.asarray(obj) + # After removing the special handling of booleans and out of + # bounds values, the conversion to the array can be removed. + if obj.dtype == bool: + warnings.warn("in the future insert will treat boolean arrays and " + "array-likes as boolean index instead of casting it " + "to integer", FutureWarning, stacklevel=2) + obj = obj.astype(intp) + if isinstance(_obj, (int, long, integer)): + # optimization for a single value + obj = obj.item() + if (obj < -N or obj >= N): + raise IndexError( + "index %i is out of bounds for axis %i with " + "size %i" % (obj, axis, N)) + if (obj < 0): + obj += N + newshape[axis] -= 1 + new = empty(newshape, arr.dtype, arrorder) + slobj[axis] = slice(None, obj) + new[tuple(slobj)] = arr[tuple(slobj)] + slobj[axis] = slice(obj, None) + slobj2 = [slice(None)]*ndim + slobj2[axis] = slice(obj+1, None) + new[tuple(slobj)] = arr[tuple(slobj2)] + else: + if obj.size == 0 and not isinstance(_obj, np.ndarray): + obj = obj.astype(intp) + if not np.can_cast(obj, intp, 'same_kind'): + # obj.size = 1 special case always failed and would just + # give superfluous warnings. + # 2013-09-24, 1.9 + warnings.warn( + "using a non-integer array as obj in delete will result in an " + "error in the future", DeprecationWarning, stacklevel=2) + obj = obj.astype(intp) + keep = ones(N, dtype=bool) + + # Test if there are out of bound indices, this is deprecated + inside_bounds = (obj < N) & (obj >= -N) + if not inside_bounds.all(): + # 2013-09-24, 1.9 + warnings.warn( + "in the future out of bounds indices will raise an error " + "instead of being ignored by `numpy.delete`.", + DeprecationWarning, stacklevel=2) + obj = obj[inside_bounds] + positive_indices = obj >= 0 + if not positive_indices.all(): + warnings.warn( + "in the future negative indices will not be ignored by " + "`numpy.delete`.", FutureWarning, stacklevel=2) + obj = obj[positive_indices] + + keep[obj, ] = False + slobj[axis] = keep + new = arr[tuple(slobj)] + + if wrap: + return wrap(new) + else: + return new + + +def _insert_dispatcher(arr, obj, values, axis=None): + return (arr, obj, values) + + +@array_function_dispatch(_insert_dispatcher) +def insert(arr, obj, values, axis=None): + """ + Insert values along the given axis before the given indices. + + Parameters + ---------- + arr : array_like + Input array. + obj : int, slice or sequence of ints + Object that defines the index or indices before which `values` is + inserted. + + .. versionadded:: 1.8.0 + + Support for multiple insertions when `obj` is a single scalar or a + sequence with one element (similar to calling insert multiple + times). + values : array_like + Values to insert into `arr`. If the type of `values` is different + from that of `arr`, `values` is converted to the type of `arr`. + `values` should be shaped so that ``arr[...,obj,...] = values`` + is legal. + axis : int, optional + Axis along which to insert `values`. If `axis` is None then `arr` + is flattened first. + + Returns + ------- + out : ndarray + A copy of `arr` with `values` inserted. Note that `insert` + does not occur in-place: a new array is returned. If + `axis` is None, `out` is a flattened array. + + See Also + -------- + append : Append elements at the end of an array. + concatenate : Join a sequence of arrays along an existing axis. + delete : Delete elements from an array. + + Notes + ----- + Note that for higher dimensional inserts `obj=0` behaves very different + from `obj=[0]` just like `arr[:,0,:] = values` is different from + `arr[:,[0],:] = values`. + + Examples + -------- + >>> a = np.array([[1, 1], [2, 2], [3, 3]]) + >>> a + array([[1, 1], + [2, 2], + [3, 3]]) + >>> np.insert(a, 1, 5) + array([1, 5, 1, 2, 2, 3, 3]) + >>> np.insert(a, 1, 5, axis=1) + array([[1, 5, 1], + [2, 5, 2], + [3, 5, 3]]) + + Difference between sequence and scalars: + + >>> np.insert(a, [1], [[1],[2],[3]], axis=1) + array([[1, 1, 1], + [2, 2, 2], + [3, 3, 3]]) + >>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1), + ... np.insert(a, [1], [[1],[2],[3]], axis=1)) + True + + >>> b = a.flatten() + >>> b + array([1, 1, 2, 2, 3, 3]) + >>> np.insert(b, [2, 2], [5, 6]) + array([1, 1, 5, 6, 2, 2, 3, 3]) + + >>> np.insert(b, slice(2, 4), [5, 6]) + array([1, 1, 5, 2, 6, 2, 3, 3]) + + >>> np.insert(b, [2, 2], [7.13, False]) # type casting + array([1, 1, 7, 0, 2, 2, 3, 3]) + + >>> x = np.arange(8).reshape(2, 4) + >>> idx = (1, 3) + >>> np.insert(x, idx, 999, axis=1) + array([[ 0, 999, 1, 2, 999, 3], + [ 4, 999, 5, 6, 999, 7]]) + + """ + wrap = None + if type(arr) is not ndarray: + try: + wrap = arr.__array_wrap__ + except AttributeError: + pass + + arr = asarray(arr) + ndim = arr.ndim + arrorder = 'F' if arr.flags.fnc else 'C' + if axis is None: + if ndim != 1: + arr = arr.ravel() + ndim = arr.ndim + axis = ndim - 1 + elif ndim == 0: + # 2013-09-24, 1.9 + warnings.warn( + "in the future the special handling of scalars will be removed " + "from insert and raise an error", DeprecationWarning, stacklevel=2) + arr = arr.copy(order=arrorder) + arr[...] = values + if wrap: + return wrap(arr) + else: + return arr + else: + axis = normalize_axis_index(axis, ndim) + slobj = [slice(None)]*ndim + N = arr.shape[axis] + newshape = list(arr.shape) + + if isinstance(obj, slice): + # turn it into a range object + indices = arange(*obj.indices(N), **{'dtype': intp}) + else: + # need to copy obj, because indices will be changed in-place + indices = np.array(obj) + if indices.dtype == bool: + # See also delete + warnings.warn( + "in the future insert will treat boolean arrays and " + "array-likes as a boolean index instead of casting it to " + "integer", FutureWarning, stacklevel=2) + indices = indices.astype(intp) + # Code after warning period: + #if obj.ndim != 1: + # raise ValueError('boolean array argument obj to insert ' + # 'must be one dimensional') + #indices = np.flatnonzero(obj) + elif indices.ndim > 1: + raise ValueError( + "index array argument obj to insert must be one dimensional " + "or scalar") + if indices.size == 1: + index = indices.item() + if index < -N or index > N: + raise IndexError( + "index %i is out of bounds for axis %i with " + "size %i" % (obj, axis, N)) + if (index < 0): + index += N + + # There are some object array corner cases here, but we cannot avoid + # that: + values = array(values, copy=False, ndmin=arr.ndim, dtype=arr.dtype) + if indices.ndim == 0: + # broadcasting is very different here, since a[:,0,:] = ... behaves + # very different from a[:,[0],:] = ...! This changes values so that + # it works likes the second case. (here a[:,0:1,:]) + values = np.moveaxis(values, 0, axis) + numnew = values.shape[axis] + newshape[axis] += numnew + new = empty(newshape, arr.dtype, arrorder) + slobj[axis] = slice(None, index) + new[tuple(slobj)] = arr[tuple(slobj)] + slobj[axis] = slice(index, index+numnew) + new[tuple(slobj)] = values + slobj[axis] = slice(index+numnew, None) + slobj2 = [slice(None)] * ndim + slobj2[axis] = slice(index, None) + new[tuple(slobj)] = arr[tuple(slobj2)] + if wrap: + return wrap(new) + return new + elif indices.size == 0 and not isinstance(obj, np.ndarray): + # Can safely cast the empty list to intp + indices = indices.astype(intp) + + if not np.can_cast(indices, intp, 'same_kind'): + # 2013-09-24, 1.9 + warnings.warn( + "using a non-integer array as obj in insert will result in an " + "error in the future", DeprecationWarning, stacklevel=2) + indices = indices.astype(intp) + + indices[indices < 0] += N + + numnew = len(indices) + order = indices.argsort(kind='mergesort') # stable sort + indices[order] += np.arange(numnew) + + newshape[axis] += numnew + old_mask = ones(newshape[axis], dtype=bool) + old_mask[indices] = False + + new = empty(newshape, arr.dtype, arrorder) + slobj2 = [slice(None)]*ndim + slobj[axis] = indices + slobj2[axis] = old_mask + new[tuple(slobj)] = values + new[tuple(slobj2)] = arr + + if wrap: + return wrap(new) + return new + + +def _append_dispatcher(arr, values, axis=None): + return (arr, values) + + +@array_function_dispatch(_append_dispatcher) +def append(arr, values, axis=None): + """ + Append values to the end of an array. + + Parameters + ---------- + arr : array_like + Values are appended to a copy of this array. + values : array_like + These values are appended to a copy of `arr`. It must be of the + correct shape (the same shape as `arr`, excluding `axis`). If + `axis` is not specified, `values` can be any shape and will be + flattened before use. + axis : int, optional + The axis along which `values` are appended. If `axis` is not + given, both `arr` and `values` are flattened before use. + + Returns + ------- + append : ndarray + A copy of `arr` with `values` appended to `axis`. Note that + `append` does not occur in-place: a new array is allocated and + filled. If `axis` is None, `out` is a flattened array. + + See Also + -------- + insert : Insert elements into an array. + delete : Delete elements from an array. + + Examples + -------- + >>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]]) + array([1, 2, 3, 4, 5, 6, 7, 8, 9]) + + When `axis` is specified, `values` must have the correct shape. + + >>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0) + array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + >>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0) + Traceback (most recent call last): + ... + ValueError: arrays must have same number of dimensions + + """ + arr = asanyarray(arr) + if axis is None: + if arr.ndim != 1: + arr = arr.ravel() + values = ravel(values) + axis = arr.ndim-1 + return concatenate((arr, values), axis=axis) + + +def _digitize_dispatcher(x, bins, right=None): + return (x, bins) + + +@array_function_dispatch(_digitize_dispatcher) +def digitize(x, bins, right=False): + """ + Return the indices of the bins to which each value in input array belongs. + + ========= ============= ============================ + `right` order of bins returned index `i` satisfies + ========= ============= ============================ + ``False`` increasing ``bins[i-1] <= x < bins[i]`` + ``True`` increasing ``bins[i-1] < x <= bins[i]`` + ``False`` decreasing ``bins[i-1] > x >= bins[i]`` + ``True`` decreasing ``bins[i-1] >= x > bins[i]`` + ========= ============= ============================ + + If values in `x` are beyond the bounds of `bins`, 0 or ``len(bins)`` is + returned as appropriate. + + Parameters + ---------- + x : array_like + Input array to be binned. Prior to NumPy 1.10.0, this array had to + be 1-dimensional, but can now have any shape. + bins : array_like + Array of bins. It has to be 1-dimensional and monotonic. + right : bool, optional + Indicating whether the intervals include the right or the left bin + edge. Default behavior is (right==False) indicating that the interval + does not include the right edge. The left bin end is open in this + case, i.e., bins[i-1] <= x < bins[i] is the default behavior for + monotonically increasing bins. + + Returns + ------- + indices : ndarray of ints + Output array of indices, of same shape as `x`. + + Raises + ------ + ValueError + If `bins` is not monotonic. + TypeError + If the type of the input is complex. + + See Also + -------- + bincount, histogram, unique, searchsorted + + Notes + ----- + If values in `x` are such that they fall outside the bin range, + attempting to index `bins` with the indices that `digitize` returns + will result in an IndexError. + + .. versionadded:: 1.10.0 + + `np.digitize` is implemented in terms of `np.searchsorted`. This means + that a binary search is used to bin the values, which scales much better + for larger number of bins than the previous linear search. It also removes + the requirement for the input array to be 1-dimensional. + + For monotonically _increasing_ `bins`, the following are equivalent:: + + np.digitize(x, bins, right=True) + np.searchsorted(bins, x, side='left') + + Note that as the order of the arguments are reversed, the side must be too. + The `searchsorted` call is marginally faster, as it does not do any + monotonicity checks. Perhaps more importantly, it supports all dtypes. + + Examples + -------- + >>> x = np.array([0.2, 6.4, 3.0, 1.6]) + >>> bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0]) + >>> inds = np.digitize(x, bins) + >>> inds + array([1, 4, 3, 2]) + >>> for n in range(x.size): + ... print(bins[inds[n]-1], "<=", x[n], "<", bins[inds[n]]) + ... + 0.0 <= 0.2 < 1.0 + 4.0 <= 6.4 < 10.0 + 2.5 <= 3.0 < 4.0 + 1.0 <= 1.6 < 2.5 + + >>> x = np.array([1.2, 10.0, 12.4, 15.5, 20.]) + >>> bins = np.array([0, 5, 10, 15, 20]) + >>> np.digitize(x,bins,right=True) + array([1, 2, 3, 4, 4]) + >>> np.digitize(x,bins,right=False) + array([1, 3, 3, 4, 5]) + """ + x = _nx.asarray(x) + bins = _nx.asarray(bins) + + # here for compatibility, searchsorted below is happy to take this + if np.issubdtype(x.dtype, _nx.complexfloating): + raise TypeError("x may not be complex") + + mono = _monotonicity(bins) + if mono == 0: + raise ValueError("bins must be monotonically increasing or decreasing") + + # this is backwards because the arguments below are swapped + side = 'left' if right else 'right' + if mono == -1: + # reverse the bins, and invert the results + return len(bins) - _nx.searchsorted(bins[::-1], x, side=side) + else: + return _nx.searchsorted(bins, x, side=side) diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/function_base.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/function_base.pyc new file mode 100644 index 0000000..fa346e2 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/function_base.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/histograms.py b/project/venv/lib/python2.7/site-packages/numpy/lib/histograms.py new file mode 100644 index 0000000..482eabe --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/histograms.py @@ -0,0 +1,1102 @@ +""" +Histogram-related functions +""" +from __future__ import division, absolute_import, print_function + +import functools +import operator +import warnings + +import numpy as np +from numpy.compat.py3k import basestring +from numpy.core import overrides + +__all__ = ['histogram', 'histogramdd', 'histogram_bin_edges'] + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + +# range is a keyword argument to many functions, so save the builtin so they can +# use it. +_range = range + + +def _hist_bin_sqrt(x, range): + """ + Square root histogram bin estimator. + + Bin width is inversely proportional to the data size. Used by many + programs for its simplicity. + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + del range # unused + return x.ptp() / np.sqrt(x.size) + + +def _hist_bin_sturges(x, range): + """ + Sturges histogram bin estimator. + + A very simplistic estimator based on the assumption of normality of + the data. This estimator has poor performance for non-normal data, + which becomes especially obvious for large data sets. The estimate + depends only on size of the data. + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + del range # unused + return x.ptp() / (np.log2(x.size) + 1.0) + + +def _hist_bin_rice(x, range): + """ + Rice histogram bin estimator. + + Another simple estimator with no normality assumption. It has better + performance for large data than Sturges, but tends to overestimate + the number of bins. The number of bins is proportional to the cube + root of data size (asymptotically optimal). The estimate depends + only on size of the data. + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + del range # unused + return x.ptp() / (2.0 * x.size ** (1.0 / 3)) + + +def _hist_bin_scott(x, range): + """ + Scott histogram bin estimator. + + The binwidth is proportional to the standard deviation of the data + and inversely proportional to the cube root of data size + (asymptotically optimal). + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + del range # unused + return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x) + + +def _hist_bin_stone(x, range): + """ + Histogram bin estimator based on minimizing the estimated integrated squared error (ISE). + + The number of bins is chosen by minimizing the estimated ISE against the unknown true distribution. + The ISE is estimated using cross-validation and can be regarded as a generalization of Scott's rule. + https://en.wikipedia.org/wiki/Histogram#Scott.27s_normal_reference_rule + + This paper by Stone appears to be the origination of this rule. + http://digitalassets.lib.berkeley.edu/sdtr/ucb/text/34.pdf + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + range : (float, float) + The lower and upper range of the bins. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + + n = x.size + ptp_x = np.ptp(x) + if n <= 1 or ptp_x == 0: + return 0 + + def jhat(nbins): + hh = ptp_x / nbins + p_k = np.histogram(x, bins=nbins, range=range)[0] / n + return (2 - (n + 1) * p_k.dot(p_k)) / hh + + nbins_upper_bound = max(100, int(np.sqrt(n))) + nbins = min(_range(1, nbins_upper_bound + 1), key=jhat) + if nbins == nbins_upper_bound: + warnings.warn("The number of bins estimated may be suboptimal.", RuntimeWarning, stacklevel=2) + return ptp_x / nbins + + +def _hist_bin_doane(x, range): + """ + Doane's histogram bin estimator. + + Improved version of Sturges' formula which works better for + non-normal data. See + stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + del range # unused + if x.size > 2: + sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3))) + sigma = np.std(x) + if sigma > 0.0: + # These three operations add up to + # g1 = np.mean(((x - np.mean(x)) / sigma)**3) + # but use only one temp array instead of three + temp = x - np.mean(x) + np.true_divide(temp, sigma, temp) + np.power(temp, 3, temp) + g1 = np.mean(temp) + return x.ptp() / (1.0 + np.log2(x.size) + + np.log2(1.0 + np.absolute(g1) / sg1)) + return 0.0 + + +def _hist_bin_fd(x, range): + """ + The Freedman-Diaconis histogram bin estimator. + + The Freedman-Diaconis rule uses interquartile range (IQR) to + estimate binwidth. It is considered a variation of the Scott rule + with more robustness as the IQR is less affected by outliers than + the standard deviation. However, the IQR depends on fewer points + than the standard deviation, so it is less accurate, especially for + long tailed distributions. + + If the IQR is 0, this function returns 1 for the number of bins. + Binwidth is inversely proportional to the cube root of data size + (asymptotically optimal). + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + del range # unused + iqr = np.subtract(*np.percentile(x, [75, 25])) + return 2.0 * iqr * x.size ** (-1.0 / 3.0) + + +def _hist_bin_auto(x, range): + """ + Histogram bin estimator that uses the minimum width of the + Freedman-Diaconis and Sturges estimators if the FD bandwidth is non zero + and the Sturges estimator if the FD bandwidth is 0. + + The FD estimator is usually the most robust method, but its width + estimate tends to be too large for small `x` and bad for data with limited + variance. The Sturges estimator is quite good for small (<1000) datasets + and is the default in the R language. This method gives good off the shelf + behaviour. + + .. versionchanged:: 1.15.0 + If there is limited variance the IQR can be 0, which results in the + FD bin width being 0 too. This is not a valid bin width, so + ``np.histogram_bin_edges`` chooses 1 bin instead, which may not be optimal. + If the IQR is 0, it's unlikely any variance based estimators will be of + use, so we revert to the sturges estimator, which only uses the size of the + dataset in its calculation. + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + + See Also + -------- + _hist_bin_fd, _hist_bin_sturges + """ + fd_bw = _hist_bin_fd(x, range) + sturges_bw = _hist_bin_sturges(x, range) + del range # unused + if fd_bw: + return min(fd_bw, sturges_bw) + else: + # limited variance, so we return a len dependent bw estimator + return sturges_bw + +# Private dict initialized at module load time +_hist_bin_selectors = {'stone': _hist_bin_stone, + 'auto': _hist_bin_auto, + 'doane': _hist_bin_doane, + 'fd': _hist_bin_fd, + 'rice': _hist_bin_rice, + 'scott': _hist_bin_scott, + 'sqrt': _hist_bin_sqrt, + 'sturges': _hist_bin_sturges} + + +def _ravel_and_check_weights(a, weights): + """ Check a and weights have matching shapes, and ravel both """ + a = np.asarray(a) + + # Ensure that the array is a "subtractable" dtype + if a.dtype == np.bool_: + warnings.warn("Converting input from {} to {} for compatibility." + .format(a.dtype, np.uint8), + RuntimeWarning, stacklevel=2) + a = a.astype(np.uint8) + + if weights is not None: + weights = np.asarray(weights) + if weights.shape != a.shape: + raise ValueError( + 'weights should have the same shape as a.') + weights = weights.ravel() + a = a.ravel() + return a, weights + + +def _get_outer_edges(a, range): + """ + Determine the outer bin edges to use, from either the data or the range + argument + """ + if range is not None: + first_edge, last_edge = range + if first_edge > last_edge: + raise ValueError( + 'max must be larger than min in range parameter.') + if not (np.isfinite(first_edge) and np.isfinite(last_edge)): + raise ValueError( + "supplied range of [{}, {}] is not finite".format(first_edge, last_edge)) + elif a.size == 0: + # handle empty arrays. Can't determine range, so use 0-1. + first_edge, last_edge = 0, 1 + else: + first_edge, last_edge = a.min(), a.max() + if not (np.isfinite(first_edge) and np.isfinite(last_edge)): + raise ValueError( + "autodetected range of [{}, {}] is not finite".format(first_edge, last_edge)) + + # expand empty range to avoid divide by zero + if first_edge == last_edge: + first_edge = first_edge - 0.5 + last_edge = last_edge + 0.5 + + return first_edge, last_edge + + +def _unsigned_subtract(a, b): + """ + Subtract two values where a >= b, and produce an unsigned result + + This is needed when finding the difference between the upper and lower + bound of an int16 histogram + """ + # coerce to a single type + signed_to_unsigned = { + np.byte: np.ubyte, + np.short: np.ushort, + np.intc: np.uintc, + np.int_: np.uint, + np.longlong: np.ulonglong + } + dt = np.result_type(a, b) + try: + dt = signed_to_unsigned[dt.type] + except KeyError: + return np.subtract(a, b, dtype=dt) + else: + # we know the inputs are integers, and we are deliberately casting + # signed to unsigned + return np.subtract(a, b, casting='unsafe', dtype=dt) + + +def _get_bin_edges(a, bins, range, weights): + """ + Computes the bins used internally by `histogram`. + + Parameters + ========== + a : ndarray + Ravelled data array + bins, range + Forwarded arguments from `histogram`. + weights : ndarray, optional + Ravelled weights array, or None + + Returns + ======= + bin_edges : ndarray + Array of bin edges + uniform_bins : (Number, Number, int): + The upper bound, lowerbound, and number of bins, used in the optimized + implementation of `histogram` that works on uniform bins. + """ + # parse the overloaded bins argument + n_equal_bins = None + bin_edges = None + + if isinstance(bins, basestring): + bin_name = bins + # if `bins` is a string for an automatic method, + # this will replace it with the number of bins calculated + if bin_name not in _hist_bin_selectors: + raise ValueError( + "{!r} is not a valid estimator for `bins`".format(bin_name)) + if weights is not None: + raise TypeError("Automated estimation of the number of " + "bins is not supported for weighted data") + + first_edge, last_edge = _get_outer_edges(a, range) + + # truncate the range if needed + if range is not None: + keep = (a >= first_edge) + keep &= (a <= last_edge) + if not np.logical_and.reduce(keep): + a = a[keep] + + if a.size == 0: + n_equal_bins = 1 + else: + # Do not call selectors on empty arrays + width = _hist_bin_selectors[bin_name](a, (first_edge, last_edge)) + if width: + n_equal_bins = int(np.ceil(_unsigned_subtract(last_edge, first_edge) / width)) + else: + # Width can be zero for some estimators, e.g. FD when + # the IQR of the data is zero. + n_equal_bins = 1 + + elif np.ndim(bins) == 0: + try: + n_equal_bins = operator.index(bins) + except TypeError: + raise TypeError( + '`bins` must be an integer, a string, or an array') + if n_equal_bins < 1: + raise ValueError('`bins` must be positive, when an integer') + + first_edge, last_edge = _get_outer_edges(a, range) + + elif np.ndim(bins) == 1: + bin_edges = np.asarray(bins) + if np.any(bin_edges[:-1] > bin_edges[1:]): + raise ValueError( + '`bins` must increase monotonically, when an array') + + else: + raise ValueError('`bins` must be 1d, when an array') + + if n_equal_bins is not None: + # gh-10322 means that type resolution rules are dependent on array + # shapes. To avoid this causing problems, we pick a type now and stick + # with it throughout. + bin_type = np.result_type(first_edge, last_edge, a) + if np.issubdtype(bin_type, np.integer): + bin_type = np.result_type(bin_type, float) + + # bin edges must be computed + bin_edges = np.linspace( + first_edge, last_edge, n_equal_bins + 1, + endpoint=True, dtype=bin_type) + return bin_edges, (first_edge, last_edge, n_equal_bins) + else: + return bin_edges, None + + +def _search_sorted_inclusive(a, v): + """ + Like `searchsorted`, but where the last item in `v` is placed on the right. + + In the context of a histogram, this makes the last bin edge inclusive + """ + return np.concatenate(( + a.searchsorted(v[:-1], 'left'), + a.searchsorted(v[-1:], 'right') + )) + + +def _histogram_bin_edges_dispatcher(a, bins=None, range=None, weights=None): + return (a, bins, weights) + + +@array_function_dispatch(_histogram_bin_edges_dispatcher) +def histogram_bin_edges(a, bins=10, range=None, weights=None): + r""" + Function to calculate only the edges of the bins used by the `histogram` function. + + Parameters + ---------- + a : array_like + Input data. The histogram is computed over the flattened array. + bins : int or sequence of scalars or str, optional + If `bins` is an int, it defines the number of equal-width + bins in the given range (10, by default). If `bins` is a + sequence, it defines the bin edges, including the rightmost + edge, allowing for non-uniform bin widths. + + If `bins` is a string from the list below, `histogram_bin_edges` will use + the method chosen to calculate the optimal bin width and + consequently the number of bins (see `Notes` for more detail on + the estimators) from the data that falls within the requested + range. While the bin width will be optimal for the actual data + in the range, the number of bins will be computed to fill the + entire range, including the empty portions. For visualisation, + using the 'auto' option is suggested. Weighted data is not + supported for automated bin size selection. + + 'auto' + Maximum of the 'sturges' and 'fd' estimators. Provides good + all around performance. + + 'fd' (Freedman Diaconis Estimator) + Robust (resilient to outliers) estimator that takes into + account data variability and data size. + + 'doane' + An improved version of Sturges' estimator that works better + with non-normal datasets. + + 'scott' + Less robust estimator that that takes into account data + variability and data size. + + 'stone' + Estimator based on leave-one-out cross-validation estimate of + the integrated squared error. Can be regarded as a generalization + of Scott's rule. + + 'rice' + Estimator does not take variability into account, only data + size. Commonly overestimates number of bins required. + + 'sturges' + R's default method, only accounts for data size. Only + optimal for gaussian data and underestimates number of bins + for large non-gaussian datasets. + + 'sqrt' + Square root (of data size) estimator, used by Excel and + other programs for its speed and simplicity. + + range : (float, float), optional + The lower and upper range of the bins. If not provided, range + is simply ``(a.min(), a.max())``. Values outside the range are + ignored. The first element of the range must be less than or + equal to the second. `range` affects the automatic bin + computation as well. While bin width is computed to be optimal + based on the actual data within `range`, the bin count will fill + the entire range including portions containing no data. + + weights : array_like, optional + An array of weights, of the same shape as `a`. Each value in + `a` only contributes its associated weight towards the bin count + (instead of 1). This is currently not used by any of the bin estimators, + but may be in the future. + + Returns + ------- + bin_edges : array of dtype float + The edges to pass into `histogram` + + See Also + -------- + histogram + + Notes + ----- + The methods to estimate the optimal number of bins are well founded + in literature, and are inspired by the choices R provides for + histogram visualisation. Note that having the number of bins + proportional to :math:`n^{1/3}` is asymptotically optimal, which is + why it appears in most estimators. These are simply plug-in methods + that give good starting points for number of bins. In the equations + below, :math:`h` is the binwidth and :math:`n_h` is the number of + bins. All estimators that compute bin counts are recast to bin width + using the `ptp` of the data. The final bin count is obtained from + ``np.round(np.ceil(range / h))``. + + 'Auto' (maximum of the 'Sturges' and 'FD' estimators) + A compromise to get a good value. For small datasets the Sturges + value will usually be chosen, while larger datasets will usually + default to FD. Avoids the overly conservative behaviour of FD + and Sturges for small and large datasets respectively. + Switchover point is usually :math:`a.size \approx 1000`. + + 'FD' (Freedman Diaconis Estimator) + .. math:: h = 2 \frac{IQR}{n^{1/3}} + + The binwidth is proportional to the interquartile range (IQR) + and inversely proportional to cube root of a.size. Can be too + conservative for small datasets, but is quite good for large + datasets. The IQR is very robust to outliers. + + 'Scott' + .. math:: h = \sigma \sqrt[3]{\frac{24 * \sqrt{\pi}}{n}} + + The binwidth is proportional to the standard deviation of the + data and inversely proportional to cube root of ``x.size``. Can + be too conservative for small datasets, but is quite good for + large datasets. The standard deviation is not very robust to + outliers. Values are very similar to the Freedman-Diaconis + estimator in the absence of outliers. + + 'Rice' + .. math:: n_h = 2n^{1/3} + + The number of bins is only proportional to cube root of + ``a.size``. It tends to overestimate the number of bins and it + does not take into account data variability. + + 'Sturges' + .. math:: n_h = \log _{2}n+1 + + The number of bins is the base 2 log of ``a.size``. This + estimator assumes normality of data and is too conservative for + larger, non-normal datasets. This is the default method in R's + ``hist`` method. + + 'Doane' + .. math:: n_h = 1 + \log_{2}(n) + + \log_{2}(1 + \frac{|g_1|}{\sigma_{g_1}}) + + g_1 = mean[(\frac{x - \mu}{\sigma})^3] + + \sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}} + + An improved version of Sturges' formula that produces better + estimates for non-normal datasets. This estimator attempts to + account for the skew of the data. + + 'Sqrt' + .. math:: n_h = \sqrt n + The simplest and fastest estimator. Only takes into account the + data size. + + Examples + -------- + >>> arr = np.array([0, 0, 0, 1, 2, 3, 3, 4, 5]) + >>> np.histogram_bin_edges(arr, bins='auto', range=(0, 1)) + array([0. , 0.25, 0.5 , 0.75, 1. ]) + >>> np.histogram_bin_edges(arr, bins=2) + array([0. , 2.5, 5. ]) + + For consistency with histogram, an array of pre-computed bins is + passed through unmodified: + + >>> np.histogram_bin_edges(arr, [1, 2]) + array([1, 2]) + + This function allows one set of bins to be computed, and reused across + multiple histograms: + + >>> shared_bins = np.histogram_bin_edges(arr, bins='auto') + >>> shared_bins + array([0., 1., 2., 3., 4., 5.]) + + >>> group_id = np.array([0, 1, 1, 0, 1, 1, 0, 1, 1]) + >>> hist_0, _ = np.histogram(arr[group_id == 0], bins=shared_bins) + >>> hist_1, _ = np.histogram(arr[group_id == 1], bins=shared_bins) + + >>> hist_0; hist_1 + array([1, 1, 0, 1, 0]) + array([2, 0, 1, 1, 2]) + + Which gives more easily comparable results than using separate bins for + each histogram: + + >>> hist_0, bins_0 = np.histogram(arr[group_id == 0], bins='auto') + >>> hist_1, bins_1 = np.histogram(arr[group_id == 1], bins='auto') + >>> hist_0; hist1 + array([1, 1, 1]) + array([2, 1, 1, 2]) + >>> bins_0; bins_1 + array([0., 1., 2., 3.]) + array([0. , 1.25, 2.5 , 3.75, 5. ]) + + """ + a, weights = _ravel_and_check_weights(a, weights) + bin_edges, _ = _get_bin_edges(a, bins, range, weights) + return bin_edges + + +def _histogram_dispatcher( + a, bins=None, range=None, normed=None, weights=None, density=None): + return (a, bins, weights) + + +@array_function_dispatch(_histogram_dispatcher) +def histogram(a, bins=10, range=None, normed=None, weights=None, + density=None): + r""" + Compute the histogram of a set of data. + + Parameters + ---------- + a : array_like + Input data. The histogram is computed over the flattened array. + bins : int or sequence of scalars or str, optional + If `bins` is an int, it defines the number of equal-width + bins in the given range (10, by default). If `bins` is a + sequence, it defines a monotonically increasing array of bin edges, + including the rightmost edge, allowing for non-uniform bin widths. + + .. versionadded:: 1.11.0 + + If `bins` is a string, it defines the method used to calculate the + optimal bin width, as defined by `histogram_bin_edges`. + + range : (float, float), optional + The lower and upper range of the bins. If not provided, range + is simply ``(a.min(), a.max())``. Values outside the range are + ignored. The first element of the range must be less than or + equal to the second. `range` affects the automatic bin + computation as well. While bin width is computed to be optimal + based on the actual data within `range`, the bin count will fill + the entire range including portions containing no data. + normed : bool, optional + + .. deprecated:: 1.6.0 + + This is equivalent to the `density` argument, but produces incorrect + results for unequal bin widths. It should not be used. + + .. versionchanged:: 1.15.0 + DeprecationWarnings are actually emitted. + + weights : array_like, optional + An array of weights, of the same shape as `a`. Each value in + `a` only contributes its associated weight towards the bin count + (instead of 1). If `density` is True, the weights are + normalized, so that the integral of the density over the range + remains 1. + density : bool, optional + If ``False``, the result will contain the number of samples in + each bin. If ``True``, the result is the value of the + probability *density* function at the bin, normalized such that + the *integral* over the range is 1. Note that the sum of the + histogram values will not be equal to 1 unless bins of unity + width are chosen; it is not a probability *mass* function. + + Overrides the ``normed`` keyword if given. + + Returns + ------- + hist : array + The values of the histogram. See `density` and `weights` for a + description of the possible semantics. + bin_edges : array of dtype float + Return the bin edges ``(length(hist)+1)``. + + + See Also + -------- + histogramdd, bincount, searchsorted, digitize, histogram_bin_edges + + Notes + ----- + All but the last (righthand-most) bin is half-open. In other words, + if `bins` is:: + + [1, 2, 3, 4] + + then the first bin is ``[1, 2)`` (including 1, but excluding 2) and + the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which + *includes* 4. + + + Examples + -------- + >>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3]) + (array([0, 2, 1]), array([0, 1, 2, 3])) + >>> np.histogram(np.arange(4), bins=np.arange(5), density=True) + (array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4])) + >>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3]) + (array([1, 4, 1]), array([0, 1, 2, 3])) + + >>> a = np.arange(5) + >>> hist, bin_edges = np.histogram(a, density=True) + >>> hist + array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5]) + >>> hist.sum() + 2.4999999999999996 + >>> np.sum(hist * np.diff(bin_edges)) + 1.0 + + .. versionadded:: 1.11.0 + + Automated Bin Selection Methods example, using 2 peak random data + with 2000 points: + + >>> import matplotlib.pyplot as plt + >>> rng = np.random.RandomState(10) # deterministic random data + >>> a = np.hstack((rng.normal(size=1000), + ... rng.normal(loc=5, scale=2, size=1000))) + >>> plt.hist(a, bins='auto') # arguments are passed to np.histogram + >>> plt.title("Histogram with 'auto' bins") + >>> plt.show() + + """ + a, weights = _ravel_and_check_weights(a, weights) + + bin_edges, uniform_bins = _get_bin_edges(a, bins, range, weights) + + # Histogram is an integer or a float array depending on the weights. + if weights is None: + ntype = np.dtype(np.intp) + else: + ntype = weights.dtype + + # We set a block size, as this allows us to iterate over chunks when + # computing histograms, to minimize memory usage. + BLOCK = 65536 + + # The fast path uses bincount, but that only works for certain types + # of weight + simple_weights = ( + weights is None or + np.can_cast(weights.dtype, np.double) or + np.can_cast(weights.dtype, complex) + ) + + if uniform_bins is not None and simple_weights: + # Fast algorithm for equal bins + # We now convert values of a to bin indices, under the assumption of + # equal bin widths (which is valid here). + first_edge, last_edge, n_equal_bins = uniform_bins + + # Initialize empty histogram + n = np.zeros(n_equal_bins, ntype) + + # Pre-compute histogram scaling factor + norm = n_equal_bins / _unsigned_subtract(last_edge, first_edge) + + # We iterate over blocks here for two reasons: the first is that for + # large arrays, it is actually faster (for example for a 10^8 array it + # is 2x as fast) and it results in a memory footprint 3x lower in the + # limit of large arrays. + for i in _range(0, len(a), BLOCK): + tmp_a = a[i:i+BLOCK] + if weights is None: + tmp_w = None + else: + tmp_w = weights[i:i + BLOCK] + + # Only include values in the right range + keep = (tmp_a >= first_edge) + keep &= (tmp_a <= last_edge) + if not np.logical_and.reduce(keep): + tmp_a = tmp_a[keep] + if tmp_w is not None: + tmp_w = tmp_w[keep] + + # This cast ensures no type promotions occur below, which gh-10322 + # make unpredictable. Getting it wrong leads to precision errors + # like gh-8123. + tmp_a = tmp_a.astype(bin_edges.dtype, copy=False) + + # Compute the bin indices, and for values that lie exactly on + # last_edge we need to subtract one + f_indices = _unsigned_subtract(tmp_a, first_edge) * norm + indices = f_indices.astype(np.intp) + indices[indices == n_equal_bins] -= 1 + + # The index computation is not guaranteed to give exactly + # consistent results within ~1 ULP of the bin edges. + decrement = tmp_a < bin_edges[indices] + indices[decrement] -= 1 + # The last bin includes the right edge. The other bins do not. + increment = ((tmp_a >= bin_edges[indices + 1]) + & (indices != n_equal_bins - 1)) + indices[increment] += 1 + + # We now compute the histogram using bincount + if ntype.kind == 'c': + n.real += np.bincount(indices, weights=tmp_w.real, + minlength=n_equal_bins) + n.imag += np.bincount(indices, weights=tmp_w.imag, + minlength=n_equal_bins) + else: + n += np.bincount(indices, weights=tmp_w, + minlength=n_equal_bins).astype(ntype) + else: + # Compute via cumulative histogram + cum_n = np.zeros(bin_edges.shape, ntype) + if weights is None: + for i in _range(0, len(a), BLOCK): + sa = np.sort(a[i:i+BLOCK]) + cum_n += _search_sorted_inclusive(sa, bin_edges) + else: + zero = np.zeros(1, dtype=ntype) + for i in _range(0, len(a), BLOCK): + tmp_a = a[i:i+BLOCK] + tmp_w = weights[i:i+BLOCK] + sorting_index = np.argsort(tmp_a) + sa = tmp_a[sorting_index] + sw = tmp_w[sorting_index] + cw = np.concatenate((zero, sw.cumsum())) + bin_index = _search_sorted_inclusive(sa, bin_edges) + cum_n += cw[bin_index] + + n = np.diff(cum_n) + + # density overrides the normed keyword + if density is not None: + if normed is not None: + # 2018-06-13, numpy 1.15.0 (this was not noisily deprecated in 1.6) + warnings.warn( + "The normed argument is ignored when density is provided. " + "In future passing both will result in an error.", + DeprecationWarning, stacklevel=2) + normed = None + + if density: + db = np.array(np.diff(bin_edges), float) + return n/db/n.sum(), bin_edges + elif normed: + # 2018-06-13, numpy 1.15.0 (this was not noisily deprecated in 1.6) + warnings.warn( + "Passing `normed=True` on non-uniform bins has always been " + "broken, and computes neither the probability density " + "function nor the probability mass function. " + "The result is only correct if the bins are uniform, when " + "density=True will produce the same result anyway. " + "The argument will be removed in a future version of " + "numpy.", + np.VisibleDeprecationWarning, stacklevel=2) + + # this normalization is incorrect, but + db = np.array(np.diff(bin_edges), float) + return n/(n*db).sum(), bin_edges + else: + if normed is not None: + # 2018-06-13, numpy 1.15.0 (this was not noisily deprecated in 1.6) + warnings.warn( + "Passing normed=False is deprecated, and has no effect. " + "Consider passing the density argument instead.", + DeprecationWarning, stacklevel=2) + return n, bin_edges + + +def _histogramdd_dispatcher(sample, bins=None, range=None, normed=None, + weights=None, density=None): + return (sample, bins, weights) + + +@array_function_dispatch(_histogramdd_dispatcher) +def histogramdd(sample, bins=10, range=None, normed=None, weights=None, + density=None): + """ + Compute the multidimensional histogram of some data. + + Parameters + ---------- + sample : (N, D) array, or (D, N) array_like + The data to be histogrammed. + + Note the unusual interpretation of sample when an array_like: + + * When an array, each row is a coordinate in a D-dimensional space - + such as ``histogramgramdd(np.array([p1, p2, p3]))``. + * When an array_like, each element is the list of values for single + coordinate - such as ``histogramgramdd((X, Y, Z))``. + + The first form should be preferred. + + bins : sequence or int, optional + The bin specification: + + * A sequence of arrays describing the monotonically increasing bin + edges along each dimension. + * The number of bins for each dimension (nx, ny, ... =bins) + * The number of bins for all dimensions (nx=ny=...=bins). + + range : sequence, optional + A sequence of length D, each an optional (lower, upper) tuple giving + the outer bin edges to be used if the edges are not given explicitly in + `bins`. + An entry of None in the sequence results in the minimum and maximum + values being used for the corresponding dimension. + The default, None, is equivalent to passing a tuple of D None values. + density : bool, optional + If False, the default, returns the number of samples in each bin. + If True, returns the probability *density* function at the bin, + ``bin_count / sample_count / bin_volume``. + normed : bool, optional + An alias for the density argument that behaves identically. To avoid + confusion with the broken normed argument to `histogram`, `density` + should be preferred. + weights : (N,) array_like, optional + An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`. + Weights are normalized to 1 if normed is True. If normed is False, + the values of the returned histogram are equal to the sum of the + weights belonging to the samples falling into each bin. + + Returns + ------- + H : ndarray + The multidimensional histogram of sample x. See normed and weights + for the different possible semantics. + edges : list + A list of D arrays describing the bin edges for each dimension. + + See Also + -------- + histogram: 1-D histogram + histogram2d: 2-D histogram + + Examples + -------- + >>> r = np.random.randn(100,3) + >>> H, edges = np.histogramdd(r, bins = (5, 8, 4)) + >>> H.shape, edges[0].size, edges[1].size, edges[2].size + ((5, 8, 4), 6, 9, 5) + + """ + + try: + # Sample is an ND-array. + N, D = sample.shape + except (AttributeError, ValueError): + # Sample is a sequence of 1D arrays. + sample = np.atleast_2d(sample).T + N, D = sample.shape + + nbin = np.empty(D, int) + edges = D*[None] + dedges = D*[None] + if weights is not None: + weights = np.asarray(weights) + + try: + M = len(bins) + if M != D: + raise ValueError( + 'The dimension of bins must be equal to the dimension of the ' + ' sample x.') + except TypeError: + # bins is an integer + bins = D*[bins] + + # normalize the range argument + if range is None: + range = (None,) * D + elif len(range) != D: + raise ValueError('range argument must have one entry per dimension') + + # Create edge arrays + for i in _range(D): + if np.ndim(bins[i]) == 0: + if bins[i] < 1: + raise ValueError( + '`bins[{}]` must be positive, when an integer'.format(i)) + smin, smax = _get_outer_edges(sample[:,i], range[i]) + edges[i] = np.linspace(smin, smax, bins[i] + 1) + elif np.ndim(bins[i]) == 1: + edges[i] = np.asarray(bins[i]) + if np.any(edges[i][:-1] > edges[i][1:]): + raise ValueError( + '`bins[{}]` must be monotonically increasing, when an array' + .format(i)) + else: + raise ValueError( + '`bins[{}]` must be a scalar or 1d array'.format(i)) + + nbin[i] = len(edges[i]) + 1 # includes an outlier on each end + dedges[i] = np.diff(edges[i]) + + # Compute the bin number each sample falls into. + Ncount = tuple( + # avoid np.digitize to work around gh-11022 + np.searchsorted(edges[i], sample[:, i], side='right') + for i in _range(D) + ) + + # Using digitize, values that fall on an edge are put in the right bin. + # For the rightmost bin, we want values equal to the right edge to be + # counted in the last bin, and not as an outlier. + for i in _range(D): + # Find which points are on the rightmost edge. + on_edge = (sample[:, i] == edges[i][-1]) + # Shift these points one bin to the left. + Ncount[i][on_edge] -= 1 + + # Compute the sample indices in the flattened histogram matrix. + # This raises an error if the array is too large. + xy = np.ravel_multi_index(Ncount, nbin) + + # Compute the number of repetitions in xy and assign it to the + # flattened histmat. + hist = np.bincount(xy, weights, minlength=nbin.prod()) + + # Shape into a proper matrix + hist = hist.reshape(nbin) + + # This preserves the (bad) behavior observed in gh-7845, for now. + hist = hist.astype(float, casting='safe') + + # Remove outliers (indices 0 and -1 for each dimension). + core = D*(slice(1, -1),) + hist = hist[core] + + # handle the aliasing normed argument + if normed is None: + if density is None: + density = False + elif density is None: + # an explicit normed argument was passed, alias it to the new name + density = normed + else: + raise TypeError("Cannot specify both 'normed' and 'density'") + + if density: + # calculate the probability density function + s = hist.sum() + for i in _range(D): + shape = np.ones(D, int) + shape[i] = nbin[i] - 2 + hist = hist / dedges[i].reshape(shape) + hist /= s + + if (hist.shape != nbin - 2).any(): + raise RuntimeError( + "Internal Shape Error") + return hist, edges diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/histograms.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/histograms.pyc new file mode 100644 index 0000000..d6f1b17 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/histograms.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/index_tricks.py b/project/venv/lib/python2.7/site-packages/numpy/lib/index_tricks.py new file mode 100644 index 0000000..56abe29 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/index_tricks.py @@ -0,0 +1,965 @@ +from __future__ import division, absolute_import, print_function + +import functools +import sys +import math + +import numpy.core.numeric as _nx +from numpy.core.numeric import ( + asarray, ScalarType, array, alltrue, cumprod, arange, ndim + ) +from numpy.core.numerictypes import find_common_type, issubdtype + +import numpy.matrixlib as matrixlib +from .function_base import diff +from numpy.core.multiarray import ravel_multi_index, unravel_index +from numpy.core.overrides import set_module +from numpy.core import overrides, linspace +from numpy.lib.stride_tricks import as_strided + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +__all__ = [ + 'ravel_multi_index', 'unravel_index', 'mgrid', 'ogrid', 'r_', 'c_', + 's_', 'index_exp', 'ix_', 'ndenumerate', 'ndindex', 'fill_diagonal', + 'diag_indices', 'diag_indices_from' + ] + + +def _ix__dispatcher(*args): + return args + + +@array_function_dispatch(_ix__dispatcher) +def ix_(*args): + """ + Construct an open mesh from multiple sequences. + + This function takes N 1-D sequences and returns N outputs with N + dimensions each, such that the shape is 1 in all but one dimension + and the dimension with the non-unit shape value cycles through all + N dimensions. + + Using `ix_` one can quickly construct index arrays that will index + the cross product. ``a[np.ix_([1,3],[2,5])]`` returns the array + ``[[a[1,2] a[1,5]], [a[3,2] a[3,5]]]``. + + Parameters + ---------- + args : 1-D sequences + Each sequence should be of integer or boolean type. + Boolean sequences will be interpreted as boolean masks for the + corresponding dimension (equivalent to passing in + ``np.nonzero(boolean_sequence)``). + + Returns + ------- + out : tuple of ndarrays + N arrays with N dimensions each, with N the number of input + sequences. Together these arrays form an open mesh. + + See Also + -------- + ogrid, mgrid, meshgrid + + Examples + -------- + >>> a = np.arange(10).reshape(2, 5) + >>> a + array([[0, 1, 2, 3, 4], + [5, 6, 7, 8, 9]]) + >>> ixgrid = np.ix_([0, 1], [2, 4]) + >>> ixgrid + (array([[0], + [1]]), array([[2, 4]])) + >>> ixgrid[0].shape, ixgrid[1].shape + ((2, 1), (1, 2)) + >>> a[ixgrid] + array([[2, 4], + [7, 9]]) + + >>> ixgrid = np.ix_([True, True], [2, 4]) + >>> a[ixgrid] + array([[2, 4], + [7, 9]]) + >>> ixgrid = np.ix_([True, True], [False, False, True, False, True]) + >>> a[ixgrid] + array([[2, 4], + [7, 9]]) + + """ + out = [] + nd = len(args) + for k, new in enumerate(args): + new = asarray(new) + if new.ndim != 1: + raise ValueError("Cross index must be 1 dimensional") + if new.size == 0: + # Explicitly type empty arrays to avoid float default + new = new.astype(_nx.intp) + if issubdtype(new.dtype, _nx.bool_): + new, = new.nonzero() + new = new.reshape((1,)*k + (new.size,) + (1,)*(nd-k-1)) + out.append(new) + return tuple(out) + +class nd_grid(object): + """ + Construct a multi-dimensional "meshgrid". + + ``grid = nd_grid()`` creates an instance which will return a mesh-grid + when indexed. The dimension and number of the output arrays are equal + to the number of indexing dimensions. If the step length is not a + complex number, then the stop is not inclusive. + + However, if the step length is a **complex number** (e.g. 5j), then the + integer part of its magnitude is interpreted as specifying the + number of points to create between the start and stop values, where + the stop value **is inclusive**. + + If instantiated with an argument of ``sparse=True``, the mesh-grid is + open (or not fleshed out) so that only one-dimension of each returned + argument is greater than 1. + + Parameters + ---------- + sparse : bool, optional + Whether the grid is sparse or not. Default is False. + + Notes + ----- + Two instances of `nd_grid` are made available in the NumPy namespace, + `mgrid` and `ogrid`, approximately defined as:: + + mgrid = nd_grid(sparse=False) + ogrid = nd_grid(sparse=True) + + Users should use these pre-defined instances instead of using `nd_grid` + directly. + """ + + def __init__(self, sparse=False): + self.sparse = sparse + + def __getitem__(self, key): + try: + size = [] + typ = int + for k in range(len(key)): + step = key[k].step + start = key[k].start + if start is None: + start = 0 + if step is None: + step = 1 + if isinstance(step, complex): + size.append(int(abs(step))) + typ = float + else: + size.append( + int(math.ceil((key[k].stop - start)/(step*1.0)))) + if (isinstance(step, float) or + isinstance(start, float) or + isinstance(key[k].stop, float)): + typ = float + if self.sparse: + nn = [_nx.arange(_x, dtype=_t) + for _x, _t in zip(size, (typ,)*len(size))] + else: + nn = _nx.indices(size, typ) + for k in range(len(size)): + step = key[k].step + start = key[k].start + if start is None: + start = 0 + if step is None: + step = 1 + if isinstance(step, complex): + step = int(abs(step)) + if step != 1: + step = (key[k].stop - start)/float(step-1) + nn[k] = (nn[k]*step+start) + if self.sparse: + slobj = [_nx.newaxis]*len(size) + for k in range(len(size)): + slobj[k] = slice(None, None) + nn[k] = nn[k][tuple(slobj)] + slobj[k] = _nx.newaxis + return nn + except (IndexError, TypeError): + step = key.step + stop = key.stop + start = key.start + if start is None: + start = 0 + if isinstance(step, complex): + step = abs(step) + length = int(step) + if step != 1: + step = (key.stop-start)/float(step-1) + stop = key.stop + step + return _nx.arange(0, length, 1, float)*step + start + else: + return _nx.arange(start, stop, step) + + +class MGridClass(nd_grid): + """ + `nd_grid` instance which returns a dense multi-dimensional "meshgrid". + + An instance of `numpy.lib.index_tricks.nd_grid` which returns an dense + (or fleshed out) mesh-grid when indexed, so that each returned argument + has the same shape. The dimensions and number of the output arrays are + equal to the number of indexing dimensions. If the step length is not a + complex number, then the stop is not inclusive. + + However, if the step length is a **complex number** (e.g. 5j), then + the integer part of its magnitude is interpreted as specifying the + number of points to create between the start and stop values, where + the stop value **is inclusive**. + + Returns + ---------- + mesh-grid `ndarrays` all of the same dimensions + + See Also + -------- + numpy.lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects + ogrid : like mgrid but returns open (not fleshed out) mesh grids + r_ : array concatenator + + Examples + -------- + >>> np.mgrid[0:5,0:5] + array([[[0, 0, 0, 0, 0], + [1, 1, 1, 1, 1], + [2, 2, 2, 2, 2], + [3, 3, 3, 3, 3], + [4, 4, 4, 4, 4]], + [[0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4]]]) + >>> np.mgrid[-1:1:5j] + array([-1. , -0.5, 0. , 0.5, 1. ]) + + """ + def __init__(self): + super(MGridClass, self).__init__(sparse=False) + +mgrid = MGridClass() + +class OGridClass(nd_grid): + """ + `nd_grid` instance which returns an open multi-dimensional "meshgrid". + + An instance of `numpy.lib.index_tricks.nd_grid` which returns an open + (i.e. not fleshed out) mesh-grid when indexed, so that only one dimension + of each returned array is greater than 1. The dimension and number of the + output arrays are equal to the number of indexing dimensions. If the step + length is not a complex number, then the stop is not inclusive. + + However, if the step length is a **complex number** (e.g. 5j), then + the integer part of its magnitude is interpreted as specifying the + number of points to create between the start and stop values, where + the stop value **is inclusive**. + + Returns + ---------- + mesh-grid `ndarrays` with only one dimension :math:`\\neq 1` + + See Also + -------- + np.lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects + mgrid : like `ogrid` but returns dense (or fleshed out) mesh grids + r_ : array concatenator + + Examples + -------- + >>> from numpy import ogrid + >>> ogrid[-1:1:5j] + array([-1. , -0.5, 0. , 0.5, 1. ]) + >>> ogrid[0:5,0:5] + [array([[0], + [1], + [2], + [3], + [4]]), array([[0, 1, 2, 3, 4]])] + + """ + def __init__(self): + super(OGridClass, self).__init__(sparse=True) + +ogrid = OGridClass() + + +class AxisConcatenator(object): + """ + Translates slice objects to concatenation along an axis. + + For detailed documentation on usage, see `r_`. + """ + # allow ma.mr_ to override this + concatenate = staticmethod(_nx.concatenate) + makemat = staticmethod(matrixlib.matrix) + + def __init__(self, axis=0, matrix=False, ndmin=1, trans1d=-1): + self.axis = axis + self.matrix = matrix + self.trans1d = trans1d + self.ndmin = ndmin + + def __getitem__(self, key): + # handle matrix builder syntax + if isinstance(key, str): + frame = sys._getframe().f_back + mymat = matrixlib.bmat(key, frame.f_globals, frame.f_locals) + return mymat + + if not isinstance(key, tuple): + key = (key,) + + # copy attributes, since they can be overridden in the first argument + trans1d = self.trans1d + ndmin = self.ndmin + matrix = self.matrix + axis = self.axis + + objs = [] + scalars = [] + arraytypes = [] + scalartypes = [] + + for k, item in enumerate(key): + scalar = False + if isinstance(item, slice): + step = item.step + start = item.start + stop = item.stop + if start is None: + start = 0 + if step is None: + step = 1 + if isinstance(step, complex): + size = int(abs(step)) + newobj = linspace(start, stop, num=size) + else: + newobj = _nx.arange(start, stop, step) + if ndmin > 1: + newobj = array(newobj, copy=False, ndmin=ndmin) + if trans1d != -1: + newobj = newobj.swapaxes(-1, trans1d) + elif isinstance(item, str): + if k != 0: + raise ValueError("special directives must be the " + "first entry.") + if item in ('r', 'c'): + matrix = True + col = (item == 'c') + continue + if ',' in item: + vec = item.split(',') + try: + axis, ndmin = [int(x) for x in vec[:2]] + if len(vec) == 3: + trans1d = int(vec[2]) + continue + except Exception: + raise ValueError("unknown special directive") + try: + axis = int(item) + continue + except (ValueError, TypeError): + raise ValueError("unknown special directive") + elif type(item) in ScalarType: + newobj = array(item, ndmin=ndmin) + scalars.append(len(objs)) + scalar = True + scalartypes.append(newobj.dtype) + else: + item_ndim = ndim(item) + newobj = array(item, copy=False, subok=True, ndmin=ndmin) + if trans1d != -1 and item_ndim < ndmin: + k2 = ndmin - item_ndim + k1 = trans1d + if k1 < 0: + k1 += k2 + 1 + defaxes = list(range(ndmin)) + axes = defaxes[:k1] + defaxes[k2:] + defaxes[k1:k2] + newobj = newobj.transpose(axes) + objs.append(newobj) + if not scalar and isinstance(newobj, _nx.ndarray): + arraytypes.append(newobj.dtype) + + # Ensure that scalars won't up-cast unless warranted + final_dtype = find_common_type(arraytypes, scalartypes) + if final_dtype is not None: + for k in scalars: + objs[k] = objs[k].astype(final_dtype) + + res = self.concatenate(tuple(objs), axis=axis) + + if matrix: + oldndim = res.ndim + res = self.makemat(res) + if oldndim == 1 and col: + res = res.T + return res + + def __len__(self): + return 0 + +# separate classes are used here instead of just making r_ = concatentor(0), +# etc. because otherwise we couldn't get the doc string to come out right +# in help(r_) + +class RClass(AxisConcatenator): + """ + Translates slice objects to concatenation along the first axis. + + This is a simple way to build up arrays quickly. There are two use cases. + + 1. If the index expression contains comma separated arrays, then stack + them along their first axis. + 2. If the index expression contains slice notation or scalars then create + a 1-D array with a range indicated by the slice notation. + + If slice notation is used, the syntax ``start:stop:step`` is equivalent + to ``np.arange(start, stop, step)`` inside of the brackets. However, if + ``step`` is an imaginary number (i.e. 100j) then its integer portion is + interpreted as a number-of-points desired and the start and stop are + inclusive. In other words ``start:stop:stepj`` is interpreted as + ``np.linspace(start, stop, step, endpoint=1)`` inside of the brackets. + After expansion of slice notation, all comma separated sequences are + concatenated together. + + Optional character strings placed as the first element of the index + expression can be used to change the output. The strings 'r' or 'c' result + in matrix output. If the result is 1-D and 'r' is specified a 1 x N (row) + matrix is produced. If the result is 1-D and 'c' is specified, then a N x 1 + (column) matrix is produced. If the result is 2-D then both provide the + same matrix result. + + A string integer specifies which axis to stack multiple comma separated + arrays along. A string of two comma-separated integers allows indication + of the minimum number of dimensions to force each entry into as the + second integer (the axis to concatenate along is still the first integer). + + A string with three comma-separated integers allows specification of the + axis to concatenate along, the minimum number of dimensions to force the + entries to, and which axis should contain the start of the arrays which + are less than the specified number of dimensions. In other words the third + integer allows you to specify where the 1's should be placed in the shape + of the arrays that have their shapes upgraded. By default, they are placed + in the front of the shape tuple. The third argument allows you to specify + where the start of the array should be instead. Thus, a third argument of + '0' would place the 1's at the end of the array shape. Negative integers + specify where in the new shape tuple the last dimension of upgraded arrays + should be placed, so the default is '-1'. + + Parameters + ---------- + Not a function, so takes no parameters + + + Returns + ------- + A concatenated ndarray or matrix. + + See Also + -------- + concatenate : Join a sequence of arrays along an existing axis. + c_ : Translates slice objects to concatenation along the second axis. + + Examples + -------- + >>> np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])] + array([1, 2, 3, 0, 0, 4, 5, 6]) + >>> np.r_[-1:1:6j, [0]*3, 5, 6] + array([-1. , -0.6, -0.2, 0.2, 0.6, 1. , 0. , 0. , 0. , 5. , 6. ]) + + String integers specify the axis to concatenate along or the minimum + number of dimensions to force entries into. + + >>> a = np.array([[0, 1, 2], [3, 4, 5]]) + >>> np.r_['-1', a, a] # concatenate along last axis + array([[0, 1, 2, 0, 1, 2], + [3, 4, 5, 3, 4, 5]]) + >>> np.r_['0,2', [1,2,3], [4,5,6]] # concatenate along first axis, dim>=2 + array([[1, 2, 3], + [4, 5, 6]]) + + >>> np.r_['0,2,0', [1,2,3], [4,5,6]] + array([[1], + [2], + [3], + [4], + [5], + [6]]) + >>> np.r_['1,2,0', [1,2,3], [4,5,6]] + array([[1, 4], + [2, 5], + [3, 6]]) + + Using 'r' or 'c' as a first string argument creates a matrix. + + >>> np.r_['r',[1,2,3], [4,5,6]] + matrix([[1, 2, 3, 4, 5, 6]]) + + """ + + def __init__(self): + AxisConcatenator.__init__(self, 0) + +r_ = RClass() + +class CClass(AxisConcatenator): + """ + Translates slice objects to concatenation along the second axis. + + This is short-hand for ``np.r_['-1,2,0', index expression]``, which is + useful because of its common occurrence. In particular, arrays will be + stacked along their last axis after being upgraded to at least 2-D with + 1's post-pended to the shape (column vectors made out of 1-D arrays). + + See Also + -------- + column_stack : Stack 1-D arrays as columns into a 2-D array. + r_ : For more detailed documentation. + + Examples + -------- + >>> np.c_[np.array([1,2,3]), np.array([4,5,6])] + array([[1, 4], + [2, 5], + [3, 6]]) + >>> np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])] + array([[1, 2, 3, 0, 0, 4, 5, 6]]) + + """ + + def __init__(self): + AxisConcatenator.__init__(self, -1, ndmin=2, trans1d=0) + + +c_ = CClass() + + +@set_module('numpy') +class ndenumerate(object): + """ + Multidimensional index iterator. + + Return an iterator yielding pairs of array coordinates and values. + + Parameters + ---------- + arr : ndarray + Input array. + + See Also + -------- + ndindex, flatiter + + Examples + -------- + >>> a = np.array([[1, 2], [3, 4]]) + >>> for index, x in np.ndenumerate(a): + ... print(index, x) + (0, 0) 1 + (0, 1) 2 + (1, 0) 3 + (1, 1) 4 + + """ + + def __init__(self, arr): + self.iter = asarray(arr).flat + + def __next__(self): + """ + Standard iterator method, returns the index tuple and array value. + + Returns + ------- + coords : tuple of ints + The indices of the current iteration. + val : scalar + The array element of the current iteration. + + """ + return self.iter.coords, next(self.iter) + + def __iter__(self): + return self + + next = __next__ + + +@set_module('numpy') +class ndindex(object): + """ + An N-dimensional iterator object to index arrays. + + Given the shape of an array, an `ndindex` instance iterates over + the N-dimensional index of the array. At each iteration a tuple + of indices is returned, the last dimension is iterated over first. + + Parameters + ---------- + `*args` : ints + The size of each dimension of the array. + + See Also + -------- + ndenumerate, flatiter + + Examples + -------- + >>> for index in np.ndindex(3, 2, 1): + ... print(index) + (0, 0, 0) + (0, 1, 0) + (1, 0, 0) + (1, 1, 0) + (2, 0, 0) + (2, 1, 0) + + """ + + def __init__(self, *shape): + if len(shape) == 1 and isinstance(shape[0], tuple): + shape = shape[0] + x = as_strided(_nx.zeros(1), shape=shape, + strides=_nx.zeros_like(shape)) + self._it = _nx.nditer(x, flags=['multi_index', 'zerosize_ok'], + order='C') + + def __iter__(self): + return self + + def ndincr(self): + """ + Increment the multi-dimensional index by one. + + This method is for backward compatibility only: do not use. + """ + next(self) + + def __next__(self): + """ + Standard iterator method, updates the index and returns the index + tuple. + + Returns + ------- + val : tuple of ints + Returns a tuple containing the indices of the current + iteration. + + """ + next(self._it) + return self._it.multi_index + + next = __next__ + + +# You can do all this with slice() plus a few special objects, +# but there's a lot to remember. This version is simpler because +# it uses the standard array indexing syntax. +# +# Written by Konrad Hinsen +# last revision: 1999-7-23 +# +# Cosmetic changes by T. Oliphant 2001 +# +# + +class IndexExpression(object): + """ + A nicer way to build up index tuples for arrays. + + .. note:: + Use one of the two predefined instances `index_exp` or `s_` + rather than directly using `IndexExpression`. + + For any index combination, including slicing and axis insertion, + ``a[indices]`` is the same as ``a[np.index_exp[indices]]`` for any + array `a`. However, ``np.index_exp[indices]`` can be used anywhere + in Python code and returns a tuple of slice objects that can be + used in the construction of complex index expressions. + + Parameters + ---------- + maketuple : bool + If True, always returns a tuple. + + See Also + -------- + index_exp : Predefined instance that always returns a tuple: + `index_exp = IndexExpression(maketuple=True)`. + s_ : Predefined instance without tuple conversion: + `s_ = IndexExpression(maketuple=False)`. + + Notes + ----- + You can do all this with `slice()` plus a few special objects, + but there's a lot to remember and this version is simpler because + it uses the standard array indexing syntax. + + Examples + -------- + >>> np.s_[2::2] + slice(2, None, 2) + >>> np.index_exp[2::2] + (slice(2, None, 2),) + + >>> np.array([0, 1, 2, 3, 4])[np.s_[2::2]] + array([2, 4]) + + """ + + def __init__(self, maketuple): + self.maketuple = maketuple + + def __getitem__(self, item): + if self.maketuple and not isinstance(item, tuple): + return (item,) + else: + return item + +index_exp = IndexExpression(maketuple=True) +s_ = IndexExpression(maketuple=False) + +# End contribution from Konrad. + + +# The following functions complement those in twodim_base, but are +# applicable to N-dimensions. + + +def _fill_diagonal_dispatcher(a, val, wrap=None): + return (a,) + + +@array_function_dispatch(_fill_diagonal_dispatcher) +def fill_diagonal(a, val, wrap=False): + """Fill the main diagonal of the given array of any dimensionality. + + For an array `a` with ``a.ndim >= 2``, the diagonal is the list of + locations with indices ``a[i, ..., i]`` all identical. This function + modifies the input array in-place, it does not return a value. + + Parameters + ---------- + a : array, at least 2-D. + Array whose diagonal is to be filled, it gets modified in-place. + + val : scalar + Value to be written on the diagonal, its type must be compatible with + that of the array a. + + wrap : bool + For tall matrices in NumPy version up to 1.6.2, the + diagonal "wrapped" after N columns. You can have this behavior + with this option. This affects only tall matrices. + + See also + -------- + diag_indices, diag_indices_from + + Notes + ----- + .. versionadded:: 1.4.0 + + This functionality can be obtained via `diag_indices`, but internally + this version uses a much faster implementation that never constructs the + indices and uses simple slicing. + + Examples + -------- + >>> a = np.zeros((3, 3), int) + >>> np.fill_diagonal(a, 5) + >>> a + array([[5, 0, 0], + [0, 5, 0], + [0, 0, 5]]) + + The same function can operate on a 4-D array: + + >>> a = np.zeros((3, 3, 3, 3), int) + >>> np.fill_diagonal(a, 4) + + We only show a few blocks for clarity: + + >>> a[0, 0] + array([[4, 0, 0], + [0, 0, 0], + [0, 0, 0]]) + >>> a[1, 1] + array([[0, 0, 0], + [0, 4, 0], + [0, 0, 0]]) + >>> a[2, 2] + array([[0, 0, 0], + [0, 0, 0], + [0, 0, 4]]) + + The wrap option affects only tall matrices: + + >>> # tall matrices no wrap + >>> a = np.zeros((5, 3),int) + >>> fill_diagonal(a, 4) + >>> a + array([[4, 0, 0], + [0, 4, 0], + [0, 0, 4], + [0, 0, 0], + [0, 0, 0]]) + + >>> # tall matrices wrap + >>> a = np.zeros((5, 3),int) + >>> fill_diagonal(a, 4, wrap=True) + >>> a + array([[4, 0, 0], + [0, 4, 0], + [0, 0, 4], + [0, 0, 0], + [4, 0, 0]]) + + >>> # wide matrices + >>> a = np.zeros((3, 5),int) + >>> fill_diagonal(a, 4, wrap=True) + >>> a + array([[4, 0, 0, 0, 0], + [0, 4, 0, 0, 0], + [0, 0, 4, 0, 0]]) + + """ + if a.ndim < 2: + raise ValueError("array must be at least 2-d") + end = None + if a.ndim == 2: + # Explicit, fast formula for the common case. For 2-d arrays, we + # accept rectangular ones. + step = a.shape[1] + 1 + #This is needed to don't have tall matrix have the diagonal wrap. + if not wrap: + end = a.shape[1] * a.shape[1] + else: + # For more than d=2, the strided formula is only valid for arrays with + # all dimensions equal, so we check first. + if not alltrue(diff(a.shape) == 0): + raise ValueError("All dimensions of input must be of equal length") + step = 1 + (cumprod(a.shape[:-1])).sum() + + # Write the value out into the diagonal. + a.flat[:end:step] = val + + +@set_module('numpy') +def diag_indices(n, ndim=2): + """ + Return the indices to access the main diagonal of an array. + + This returns a tuple of indices that can be used to access the main + diagonal of an array `a` with ``a.ndim >= 2`` dimensions and shape + (n, n, ..., n). For ``a.ndim = 2`` this is the usual diagonal, for + ``a.ndim > 2`` this is the set of indices to access ``a[i, i, ..., i]`` + for ``i = [0..n-1]``. + + Parameters + ---------- + n : int + The size, along each dimension, of the arrays for which the returned + indices can be used. + + ndim : int, optional + The number of dimensions. + + See also + -------- + diag_indices_from + + Notes + ----- + .. versionadded:: 1.4.0 + + Examples + -------- + Create a set of indices to access the diagonal of a (4, 4) array: + + >>> di = np.diag_indices(4) + >>> di + (array([0, 1, 2, 3]), array([0, 1, 2, 3])) + >>> a = np.arange(16).reshape(4, 4) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + >>> a[di] = 100 + >>> a + array([[100, 1, 2, 3], + [ 4, 100, 6, 7], + [ 8, 9, 100, 11], + [ 12, 13, 14, 100]]) + + Now, we create indices to manipulate a 3-D array: + + >>> d3 = np.diag_indices(2, 3) + >>> d3 + (array([0, 1]), array([0, 1]), array([0, 1])) + + And use it to set the diagonal of an array of zeros to 1: + + >>> a = np.zeros((2, 2, 2), dtype=int) + >>> a[d3] = 1 + >>> a + array([[[1, 0], + [0, 0]], + [[0, 0], + [0, 1]]]) + + """ + idx = arange(n) + return (idx,) * ndim + + +def _diag_indices_from(arr): + return (arr,) + + +@array_function_dispatch(_diag_indices_from) +def diag_indices_from(arr): + """ + Return the indices to access the main diagonal of an n-dimensional array. + + See `diag_indices` for full details. + + Parameters + ---------- + arr : array, at least 2-D + + See Also + -------- + diag_indices + + Notes + ----- + .. versionadded:: 1.4.0 + + """ + + if not arr.ndim >= 2: + raise ValueError("input array must be at least 2-d") + # For more than d=2, the strided formula is only valid for arrays with + # all dimensions equal, so we check first. + if not alltrue(diff(arr.shape) == 0): + raise ValueError("All dimensions of input must be of equal length") + + return diag_indices(arr.shape[0], arr.ndim) diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/index_tricks.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/index_tricks.pyc new file mode 100644 index 0000000..92fdb56 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/index_tricks.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/info.py b/project/venv/lib/python2.7/site-packages/numpy/lib/info.py new file mode 100644 index 0000000..8815a52 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/info.py @@ -0,0 +1,160 @@ +""" +Basic functions used by several sub-packages and +useful to have in the main name-space. + +Type Handling +------------- +================ =================== +iscomplexobj Test for complex object, scalar result +isrealobj Test for real object, scalar result +iscomplex Test for complex elements, array result +isreal Test for real elements, array result +imag Imaginary part +real Real part +real_if_close Turns complex number with tiny imaginary part to real +isneginf Tests for negative infinity, array result +isposinf Tests for positive infinity, array result +isnan Tests for nans, array result +isinf Tests for infinity, array result +isfinite Tests for finite numbers, array result +isscalar True if argument is a scalar +nan_to_num Replaces NaN's with 0 and infinities with large numbers +cast Dictionary of functions to force cast to each type +common_type Determine the minimum common type code for a group + of arrays +mintypecode Return minimal allowed common typecode. +================ =================== + +Index Tricks +------------ +================ =================== +mgrid Method which allows easy construction of N-d + 'mesh-grids' +``r_`` Append and construct arrays: turns slice objects into + ranges and concatenates them, for 2d arrays appends rows. +index_exp Konrad Hinsen's index_expression class instance which + can be useful for building complicated slicing syntax. +================ =================== + +Useful Functions +---------------- +================ =================== +select Extension of where to multiple conditions and choices +extract Extract 1d array from flattened array according to mask +insert Insert 1d array of values into Nd array according to mask +linspace Evenly spaced samples in linear space +logspace Evenly spaced samples in logarithmic space +fix Round x to nearest integer towards zero +mod Modulo mod(x,y) = x % y except keeps sign of y +amax Array maximum along axis +amin Array minimum along axis +ptp Array max-min along axis +cumsum Cumulative sum along axis +prod Product of elements along axis +cumprod Cumluative product along axis +diff Discrete differences along axis +angle Returns angle of complex argument +unwrap Unwrap phase along given axis (1-d algorithm) +sort_complex Sort a complex-array (based on real, then imaginary) +trim_zeros Trim the leading and trailing zeros from 1D array. +vectorize A class that wraps a Python function taking scalar + arguments into a generalized function which can handle + arrays of arguments using the broadcast rules of + numerix Python. +================ =================== + +Shape Manipulation +------------------ +================ =================== +squeeze Return a with length-one dimensions removed. +atleast_1d Force arrays to be >= 1D +atleast_2d Force arrays to be >= 2D +atleast_3d Force arrays to be >= 3D +vstack Stack arrays vertically (row on row) +hstack Stack arrays horizontally (column on column) +column_stack Stack 1D arrays as columns into 2D array +dstack Stack arrays depthwise (along third dimension) +stack Stack arrays along a new axis +split Divide array into a list of sub-arrays +hsplit Split into columns +vsplit Split into rows +dsplit Split along third dimension +================ =================== + +Matrix (2D Array) Manipulations +------------------------------- +================ =================== +fliplr 2D array with columns flipped +flipud 2D array with rows flipped +rot90 Rotate a 2D array a multiple of 90 degrees +eye Return a 2D array with ones down a given diagonal +diag Construct a 2D array from a vector, or return a given + diagonal from a 2D array. +mat Construct a Matrix +bmat Build a Matrix from blocks +================ =================== + +Polynomials +----------- +================ =================== +poly1d A one-dimensional polynomial class +poly Return polynomial coefficients from roots +roots Find roots of polynomial given coefficients +polyint Integrate polynomial +polyder Differentiate polynomial +polyadd Add polynomials +polysub Subtract polynomials +polymul Multiply polynomials +polydiv Divide polynomials +polyval Evaluate polynomial at given argument +================ =================== + +Iterators +--------- +================ =================== +Arrayterator A buffered iterator for big arrays. +================ =================== + +Import Tricks +------------- +================ =================== +ppimport Postpone module import until trying to use it +ppimport_attr Postpone module import until trying to use its attribute +ppresolve Import postponed module and return it. +================ =================== + +Machine Arithmetics +------------------- +================ =================== +machar_single Single precision floating point arithmetic parameters +machar_double Double precision floating point arithmetic parameters +================ =================== + +Threading Tricks +---------------- +================ =================== +ParallelExec Execute commands in parallel thread. +================ =================== + +Array Set Operations +----------------------- +Set operations for numeric arrays based on sort() function. + +================ =================== +unique Unique elements of an array. +isin Test whether each element of an ND array is present + anywhere within a second array. +ediff1d Array difference (auxiliary function). +intersect1d Intersection of 1D arrays with unique elements. +setxor1d Set exclusive-or of 1D arrays with unique elements. +in1d Test whether elements in a 1D array are also present in + another array. +union1d Union of 1D arrays with unique elements. +setdiff1d Set difference of 1D arrays with unique elements. +================ =================== + +""" +from __future__ import division, absolute_import, print_function + +depends = ['core', 'testing'] +global_symbols = ['*'] diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/info.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/info.pyc new file mode 100644 index 0000000..4f10f74 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/info.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/mixins.py b/project/venv/lib/python2.7/site-packages/numpy/lib/mixins.py new file mode 100644 index 0000000..52ad45b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/mixins.py @@ -0,0 +1,182 @@ +"""Mixin classes for custom array types that don't inherit from ndarray.""" +from __future__ import division, absolute_import, print_function + +import sys + +from numpy.core import umath as um + +# Nothing should be exposed in the top-level NumPy module. +__all__ = [] + + +def _disables_array_ufunc(obj): + """True when __array_ufunc__ is set to None.""" + try: + return obj.__array_ufunc__ is None + except AttributeError: + return False + + +def _binary_method(ufunc, name): + """Implement a forward binary method with a ufunc, e.g., __add__.""" + def func(self, other): + if _disables_array_ufunc(other): + return NotImplemented + return ufunc(self, other) + func.__name__ = '__{}__'.format(name) + return func + + +def _reflected_binary_method(ufunc, name): + """Implement a reflected binary method with a ufunc, e.g., __radd__.""" + def func(self, other): + if _disables_array_ufunc(other): + return NotImplemented + return ufunc(other, self) + func.__name__ = '__r{}__'.format(name) + return func + + +def _inplace_binary_method(ufunc, name): + """Implement an in-place binary method with a ufunc, e.g., __iadd__.""" + def func(self, other): + return ufunc(self, other, out=(self,)) + func.__name__ = '__i{}__'.format(name) + return func + + +def _numeric_methods(ufunc, name): + """Implement forward, reflected and inplace binary methods with a ufunc.""" + return (_binary_method(ufunc, name), + _reflected_binary_method(ufunc, name), + _inplace_binary_method(ufunc, name)) + + +def _unary_method(ufunc, name): + """Implement a unary special method with a ufunc.""" + def func(self): + return ufunc(self) + func.__name__ = '__{}__'.format(name) + return func + + +class NDArrayOperatorsMixin(object): + """Mixin defining all operator special methods using __array_ufunc__. + + This class implements the special methods for almost all of Python's + builtin operators defined in the `operator` module, including comparisons + (``==``, ``>``, etc.) and arithmetic (``+``, ``*``, ``-``, etc.), by + deferring to the ``__array_ufunc__`` method, which subclasses must + implement. + + It is useful for writing classes that do not inherit from `numpy.ndarray`, + but that should support arithmetic and numpy universal functions like + arrays as described in `A Mechanism for Overriding Ufuncs + <../../neps/nep-0013-ufunc-overrides.html>`_. + + As an trivial example, consider this implementation of an ``ArrayLike`` + class that simply wraps a NumPy array and ensures that the result of any + arithmetic operation is also an ``ArrayLike`` object:: + + class ArrayLike(np.lib.mixins.NDArrayOperatorsMixin): + def __init__(self, value): + self.value = np.asarray(value) + + # One might also consider adding the built-in list type to this + # list, to support operations like np.add(array_like, list) + _HANDLED_TYPES = (np.ndarray, numbers.Number) + + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + out = kwargs.get('out', ()) + for x in inputs + out: + # Only support operations with instances of _HANDLED_TYPES. + # Use ArrayLike instead of type(self) for isinstance to + # allow subclasses that don't override __array_ufunc__ to + # handle ArrayLike objects. + if not isinstance(x, self._HANDLED_TYPES + (ArrayLike,)): + return NotImplemented + + # Defer to the implementation of the ufunc on unwrapped values. + inputs = tuple(x.value if isinstance(x, ArrayLike) else x + for x in inputs) + if out: + kwargs['out'] = tuple( + x.value if isinstance(x, ArrayLike) else x + for x in out) + result = getattr(ufunc, method)(*inputs, **kwargs) + + if type(result) is tuple: + # multiple return values + return tuple(type(self)(x) for x in result) + elif method == 'at': + # no return value + return None + else: + # one return value + return type(self)(result) + + def __repr__(self): + return '%s(%r)' % (type(self).__name__, self.value) + + In interactions between ``ArrayLike`` objects and numbers or numpy arrays, + the result is always another ``ArrayLike``: + + >>> x = ArrayLike([1, 2, 3]) + >>> x - 1 + ArrayLike(array([0, 1, 2])) + >>> 1 - x + ArrayLike(array([ 0, -1, -2])) + >>> np.arange(3) - x + ArrayLike(array([-1, -1, -1])) + >>> x - np.arange(3) + ArrayLike(array([1, 1, 1])) + + Note that unlike ``numpy.ndarray``, ``ArrayLike`` does not allow operations + with arbitrary, unrecognized types. This ensures that interactions with + ArrayLike preserve a well-defined casting hierarchy. + + .. versionadded:: 1.13 + """ + # Like np.ndarray, this mixin class implements "Option 1" from the ufunc + # overrides NEP. + + # comparisons don't have reflected and in-place versions + __lt__ = _binary_method(um.less, 'lt') + __le__ = _binary_method(um.less_equal, 'le') + __eq__ = _binary_method(um.equal, 'eq') + __ne__ = _binary_method(um.not_equal, 'ne') + __gt__ = _binary_method(um.greater, 'gt') + __ge__ = _binary_method(um.greater_equal, 'ge') + + # numeric methods + __add__, __radd__, __iadd__ = _numeric_methods(um.add, 'add') + __sub__, __rsub__, __isub__ = _numeric_methods(um.subtract, 'sub') + __mul__, __rmul__, __imul__ = _numeric_methods(um.multiply, 'mul') + __matmul__, __rmatmul__, __imatmul__ = _numeric_methods( + um.matmul, 'matmul') + if sys.version_info.major < 3: + # Python 3 uses only __truediv__ and __floordiv__ + __div__, __rdiv__, __idiv__ = _numeric_methods(um.divide, 'div') + __truediv__, __rtruediv__, __itruediv__ = _numeric_methods( + um.true_divide, 'truediv') + __floordiv__, __rfloordiv__, __ifloordiv__ = _numeric_methods( + um.floor_divide, 'floordiv') + __mod__, __rmod__, __imod__ = _numeric_methods(um.remainder, 'mod') + __divmod__ = _binary_method(um.divmod, 'divmod') + __rdivmod__ = _reflected_binary_method(um.divmod, 'divmod') + # __idivmod__ does not exist + # TODO: handle the optional third argument for __pow__? + __pow__, __rpow__, __ipow__ = _numeric_methods(um.power, 'pow') + __lshift__, __rlshift__, __ilshift__ = _numeric_methods( + um.left_shift, 'lshift') + __rshift__, __rrshift__, __irshift__ = _numeric_methods( + um.right_shift, 'rshift') + __and__, __rand__, __iand__ = _numeric_methods(um.bitwise_and, 'and') + __xor__, __rxor__, __ixor__ = _numeric_methods(um.bitwise_xor, 'xor') + __or__, __ror__, __ior__ = _numeric_methods(um.bitwise_or, 'or') + + # unary methods + __neg__ = _unary_method(um.negative, 'neg') + __pos__ = _unary_method(um.positive, 'pos') + __abs__ = _unary_method(um.absolute, 'abs') + __invert__ = _unary_method(um.invert, 'invert') diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/mixins.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/mixins.pyc new file mode 100644 index 0000000..b27218e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/mixins.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/nanfunctions.py b/project/venv/lib/python2.7/site-packages/numpy/lib/nanfunctions.py new file mode 100644 index 0000000..d73d844 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/nanfunctions.py @@ -0,0 +1,1633 @@ +""" +Functions that ignore NaN. + +Functions +--------- + +- `nanmin` -- minimum non-NaN value +- `nanmax` -- maximum non-NaN value +- `nanargmin` -- index of minimum non-NaN value +- `nanargmax` -- index of maximum non-NaN value +- `nansum` -- sum of non-NaN values +- `nanprod` -- product of non-NaN values +- `nancumsum` -- cumulative sum of non-NaN values +- `nancumprod` -- cumulative product of non-NaN values +- `nanmean` -- mean of non-NaN values +- `nanvar` -- variance of non-NaN values +- `nanstd` -- standard deviation of non-NaN values +- `nanmedian` -- median of non-NaN values +- `nanquantile` -- qth quantile of non-NaN values +- `nanpercentile` -- qth percentile of non-NaN values + +""" +from __future__ import division, absolute_import, print_function + +import functools +import warnings +import numpy as np +from numpy.lib import function_base +from numpy.core import overrides + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +__all__ = [ + 'nansum', 'nanmax', 'nanmin', 'nanargmax', 'nanargmin', 'nanmean', + 'nanmedian', 'nanpercentile', 'nanvar', 'nanstd', 'nanprod', + 'nancumsum', 'nancumprod', 'nanquantile' + ] + + +def _replace_nan(a, val): + """ + If `a` is of inexact type, make a copy of `a`, replace NaNs with + the `val` value, and return the copy together with a boolean mask + marking the locations where NaNs were present. If `a` is not of + inexact type, do nothing and return `a` together with a mask of None. + + Note that scalars will end up as array scalars, which is important + for using the result as the value of the out argument in some + operations. + + Parameters + ---------- + a : array-like + Input array. + val : float + NaN values are set to val before doing the operation. + + Returns + ------- + y : ndarray + If `a` is of inexact type, return a copy of `a` with the NaNs + replaced by the fill value, otherwise return `a`. + mask: {bool, None} + If `a` is of inexact type, return a boolean mask marking locations of + NaNs, otherwise return None. + + """ + a = np.array(a, subok=True, copy=True) + + if a.dtype == np.object_: + # object arrays do not support `isnan` (gh-9009), so make a guess + mask = a != a + elif issubclass(a.dtype.type, np.inexact): + mask = np.isnan(a) + else: + mask = None + + if mask is not None: + np.copyto(a, val, where=mask) + + return a, mask + + +def _copyto(a, val, mask): + """ + Replace values in `a` with NaN where `mask` is True. This differs from + copyto in that it will deal with the case where `a` is a numpy scalar. + + Parameters + ---------- + a : ndarray or numpy scalar + Array or numpy scalar some of whose values are to be replaced + by val. + val : numpy scalar + Value used a replacement. + mask : ndarray, scalar + Boolean array. Where True the corresponding element of `a` is + replaced by `val`. Broadcasts. + + Returns + ------- + res : ndarray, scalar + Array with elements replaced or scalar `val`. + + """ + if isinstance(a, np.ndarray): + np.copyto(a, val, where=mask, casting='unsafe') + else: + a = a.dtype.type(val) + return a + + +def _remove_nan_1d(arr1d, overwrite_input=False): + """ + Equivalent to arr1d[~arr1d.isnan()], but in a different order + + Presumably faster as it incurs fewer copies + + Parameters + ---------- + arr1d : ndarray + Array to remove nans from + overwrite_input : bool + True if `arr1d` can be modified in place + + Returns + ------- + res : ndarray + Array with nan elements removed + overwrite_input : bool + True if `res` can be modified in place, given the constraint on the + input + """ + + c = np.isnan(arr1d) + s = np.nonzero(c)[0] + if s.size == arr1d.size: + warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=4) + return arr1d[:0], True + elif s.size == 0: + return arr1d, overwrite_input + else: + if not overwrite_input: + arr1d = arr1d.copy() + # select non-nans at end of array + enonan = arr1d[-s.size:][~c[-s.size:]] + # fill nans in beginning of array with non-nans of end + arr1d[s[:enonan.size]] = enonan + + return arr1d[:-s.size], True + + +def _divide_by_count(a, b, out=None): + """ + Compute a/b ignoring invalid results. If `a` is an array the division + is done in place. If `a` is a scalar, then its type is preserved in the + output. If out is None, then then a is used instead so that the + division is in place. Note that this is only called with `a` an inexact + type. + + Parameters + ---------- + a : {ndarray, numpy scalar} + Numerator. Expected to be of inexact type but not checked. + b : {ndarray, numpy scalar} + Denominator. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``; if provided, it must have the same shape as the + expected output, but the type will be cast if necessary. + + Returns + ------- + ret : {ndarray, numpy scalar} + The return value is a/b. If `a` was an ndarray the division is done + in place. If `a` is a numpy scalar, the division preserves its type. + + """ + with np.errstate(invalid='ignore', divide='ignore'): + if isinstance(a, np.ndarray): + if out is None: + return np.divide(a, b, out=a, casting='unsafe') + else: + return np.divide(a, b, out=out, casting='unsafe') + else: + if out is None: + return a.dtype.type(a / b) + else: + # This is questionable, but currently a numpy scalar can + # be output to a zero dimensional array. + return np.divide(a, b, out=out, casting='unsafe') + + +def _nanmin_dispatcher(a, axis=None, out=None, keepdims=None): + return (a, out) + + +@array_function_dispatch(_nanmin_dispatcher) +def nanmin(a, axis=None, out=None, keepdims=np._NoValue): + """ + Return minimum of an array or minimum along an axis, ignoring any NaNs. + When all-NaN slices are encountered a ``RuntimeWarning`` is raised and + Nan is returned for that slice. + + Parameters + ---------- + a : array_like + Array containing numbers whose minimum is desired. If `a` is not an + array, a conversion is attempted. + axis : {int, tuple of int, None}, optional + Axis or axes along which the minimum is computed. The default is to compute + the minimum of the flattened array. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``; if provided, it must have the same shape as the + expected output, but the type will be cast if necessary. See + `doc.ufuncs` for details. + + .. versionadded:: 1.8.0 + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + If the value is anything but the default, then + `keepdims` will be passed through to the `min` method + of sub-classes of `ndarray`. If the sub-classes methods + does not implement `keepdims` any exceptions will be raised. + + .. versionadded:: 1.8.0 + + Returns + ------- + nanmin : ndarray + An array with the same shape as `a`, with the specified axis + removed. If `a` is a 0-d array, or if axis is None, an ndarray + scalar is returned. The same dtype as `a` is returned. + + See Also + -------- + nanmax : + The maximum value of an array along a given axis, ignoring any NaNs. + amin : + The minimum value of an array along a given axis, propagating any NaNs. + fmin : + Element-wise minimum of two arrays, ignoring any NaNs. + minimum : + Element-wise minimum of two arrays, propagating any NaNs. + isnan : + Shows which elements are Not a Number (NaN). + isfinite: + Shows which elements are neither NaN nor infinity. + + amax, fmax, maximum + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). This means that Not a Number is not equivalent to infinity. + Positive infinity is treated as a very large number and negative + infinity is treated as a very small (i.e. negative) number. + + If the input has a integer type the function is equivalent to np.min. + + Examples + -------- + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nanmin(a) + 1.0 + >>> np.nanmin(a, axis=0) + array([ 1., 2.]) + >>> np.nanmin(a, axis=1) + array([ 1., 3.]) + + When positive infinity and negative infinity are present: + + >>> np.nanmin([1, 2, np.nan, np.inf]) + 1.0 + >>> np.nanmin([1, 2, np.nan, np.NINF]) + -inf + + """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + if type(a) is np.ndarray and a.dtype != np.object_: + # Fast, but not safe for subclasses of ndarray, or object arrays, + # which do not implement isnan (gh-9009), or fmin correctly (gh-8975) + res = np.fmin.reduce(a, axis=axis, out=out, **kwargs) + if np.isnan(res).any(): + warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=2) + else: + # Slow, but safe for subclasses of ndarray + a, mask = _replace_nan(a, +np.inf) + res = np.amin(a, axis=axis, out=out, **kwargs) + if mask is None: + return res + + # Check for all-NaN axis + mask = np.all(mask, axis=axis, **kwargs) + if np.any(mask): + res = _copyto(res, np.nan, mask) + warnings.warn("All-NaN axis encountered", RuntimeWarning, stacklevel=2) + return res + + +def _nanmax_dispatcher(a, axis=None, out=None, keepdims=None): + return (a, out) + + +@array_function_dispatch(_nanmax_dispatcher) +def nanmax(a, axis=None, out=None, keepdims=np._NoValue): + """ + Return the maximum of an array or maximum along an axis, ignoring any + NaNs. When all-NaN slices are encountered a ``RuntimeWarning`` is + raised and NaN is returned for that slice. + + Parameters + ---------- + a : array_like + Array containing numbers whose maximum is desired. If `a` is not an + array, a conversion is attempted. + axis : {int, tuple of int, None}, optional + Axis or axes along which the maximum is computed. The default is to compute + the maximum of the flattened array. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``; if provided, it must have the same shape as the + expected output, but the type will be cast if necessary. See + `doc.ufuncs` for details. + + .. versionadded:: 1.8.0 + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + If the value is anything but the default, then + `keepdims` will be passed through to the `max` method + of sub-classes of `ndarray`. If the sub-classes methods + does not implement `keepdims` any exceptions will be raised. + + .. versionadded:: 1.8.0 + + Returns + ------- + nanmax : ndarray + An array with the same shape as `a`, with the specified axis removed. + If `a` is a 0-d array, or if axis is None, an ndarray scalar is + returned. The same dtype as `a` is returned. + + See Also + -------- + nanmin : + The minimum value of an array along a given axis, ignoring any NaNs. + amax : + The maximum value of an array along a given axis, propagating any NaNs. + fmax : + Element-wise maximum of two arrays, ignoring any NaNs. + maximum : + Element-wise maximum of two arrays, propagating any NaNs. + isnan : + Shows which elements are Not a Number (NaN). + isfinite: + Shows which elements are neither NaN nor infinity. + + amin, fmin, minimum + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). This means that Not a Number is not equivalent to infinity. + Positive infinity is treated as a very large number and negative + infinity is treated as a very small (i.e. negative) number. + + If the input has a integer type the function is equivalent to np.max. + + Examples + -------- + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nanmax(a) + 3.0 + >>> np.nanmax(a, axis=0) + array([ 3., 2.]) + >>> np.nanmax(a, axis=1) + array([ 2., 3.]) + + When positive infinity and negative infinity are present: + + >>> np.nanmax([1, 2, np.nan, np.NINF]) + 2.0 + >>> np.nanmax([1, 2, np.nan, np.inf]) + inf + + """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + if type(a) is np.ndarray and a.dtype != np.object_: + # Fast, but not safe for subclasses of ndarray, or object arrays, + # which do not implement isnan (gh-9009), or fmax correctly (gh-8975) + res = np.fmax.reduce(a, axis=axis, out=out, **kwargs) + if np.isnan(res).any(): + warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=2) + else: + # Slow, but safe for subclasses of ndarray + a, mask = _replace_nan(a, -np.inf) + res = np.amax(a, axis=axis, out=out, **kwargs) + if mask is None: + return res + + # Check for all-NaN axis + mask = np.all(mask, axis=axis, **kwargs) + if np.any(mask): + res = _copyto(res, np.nan, mask) + warnings.warn("All-NaN axis encountered", RuntimeWarning, stacklevel=2) + return res + + +def _nanargmin_dispatcher(a, axis=None): + return (a,) + + +@array_function_dispatch(_nanargmin_dispatcher) +def nanargmin(a, axis=None): + """ + Return the indices of the minimum values in the specified axis ignoring + NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the results + cannot be trusted if a slice contains only NaNs and Infs. + + Parameters + ---------- + a : array_like + Input data. + axis : int, optional + Axis along which to operate. By default flattened input is used. + + Returns + ------- + index_array : ndarray + An array of indices or a single index value. + + See Also + -------- + argmin, nanargmax + + Examples + -------- + >>> a = np.array([[np.nan, 4], [2, 3]]) + >>> np.argmin(a) + 0 + >>> np.nanargmin(a) + 2 + >>> np.nanargmin(a, axis=0) + array([1, 1]) + >>> np.nanargmin(a, axis=1) + array([1, 0]) + + """ + a, mask = _replace_nan(a, np.inf) + res = np.argmin(a, axis=axis) + if mask is not None: + mask = np.all(mask, axis=axis) + if np.any(mask): + raise ValueError("All-NaN slice encountered") + return res + + +def _nanargmax_dispatcher(a, axis=None): + return (a,) + + +@array_function_dispatch(_nanargmax_dispatcher) +def nanargmax(a, axis=None): + """ + Return the indices of the maximum values in the specified axis ignoring + NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the + results cannot be trusted if a slice contains only NaNs and -Infs. + + + Parameters + ---------- + a : array_like + Input data. + axis : int, optional + Axis along which to operate. By default flattened input is used. + + Returns + ------- + index_array : ndarray + An array of indices or a single index value. + + See Also + -------- + argmax, nanargmin + + Examples + -------- + >>> a = np.array([[np.nan, 4], [2, 3]]) + >>> np.argmax(a) + 0 + >>> np.nanargmax(a) + 1 + >>> np.nanargmax(a, axis=0) + array([1, 0]) + >>> np.nanargmax(a, axis=1) + array([1, 1]) + + """ + a, mask = _replace_nan(a, -np.inf) + res = np.argmax(a, axis=axis) + if mask is not None: + mask = np.all(mask, axis=axis) + if np.any(mask): + raise ValueError("All-NaN slice encountered") + return res + + +def _nansum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None): + return (a, out) + + +@array_function_dispatch(_nansum_dispatcher) +def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): + """ + Return the sum of array elements over a given axis treating Not a + Numbers (NaNs) as zero. + + In NumPy versions <= 1.9.0 Nan is returned for slices that are all-NaN or + empty. In later versions zero is returned. + + Parameters + ---------- + a : array_like + Array containing numbers whose sum is desired. If `a` is not an + array, a conversion is attempted. + axis : {int, tuple of int, None}, optional + Axis or axes along which the sum is computed. The default is to compute the + sum of the flattened array. + dtype : data-type, optional + The type of the returned array and of the accumulator in which the + elements are summed. By default, the dtype of `a` is used. An + exception is when `a` has an integer type with less precision than + the platform (u)intp. In that case, the default will be either + (u)int32 or (u)int64 depending on whether the platform is 32 or 64 + bits. For inexact inputs, dtype must be inexact. + + .. versionadded:: 1.8.0 + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``. If provided, it must have the same shape as the + expected output, but the type will be cast if necessary. See + `doc.ufuncs` for details. The casting of NaN to integer can yield + unexpected results. + + .. versionadded:: 1.8.0 + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + + If the value is anything but the default, then + `keepdims` will be passed through to the `mean` or `sum` methods + of sub-classes of `ndarray`. If the sub-classes methods + does not implement `keepdims` any exceptions will be raised. + + .. versionadded:: 1.8.0 + + Returns + ------- + nansum : ndarray. + A new array holding the result is returned unless `out` is + specified, in which it is returned. The result has the same + size as `a`, and the same shape as `a` if `axis` is not None + or `a` is a 1-d array. + + See Also + -------- + numpy.sum : Sum across array propagating NaNs. + isnan : Show which elements are NaN. + isfinite: Show which elements are not NaN or +/-inf. + + Notes + ----- + If both positive and negative infinity are present, the sum will be Not + A Number (NaN). + + Examples + -------- + >>> np.nansum(1) + 1 + >>> np.nansum([1]) + 1 + >>> np.nansum([1, np.nan]) + 1.0 + >>> a = np.array([[1, 1], [1, np.nan]]) + >>> np.nansum(a) + 3.0 + >>> np.nansum(a, axis=0) + array([ 2., 1.]) + >>> np.nansum([1, np.nan, np.inf]) + inf + >>> np.nansum([1, np.nan, np.NINF]) + -inf + >>> np.nansum([1, np.nan, np.inf, -np.inf]) # both +/- infinity present + nan + + """ + a, mask = _replace_nan(a, 0) + return np.sum(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims) + + +def _nanprod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None): + return (a, out) + + +@array_function_dispatch(_nanprod_dispatcher) +def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): + """ + Return the product of array elements over a given axis treating Not a + Numbers (NaNs) as ones. + + One is returned for slices that are all-NaN or empty. + + .. versionadded:: 1.10.0 + + Parameters + ---------- + a : array_like + Array containing numbers whose product is desired. If `a` is not an + array, a conversion is attempted. + axis : {int, tuple of int, None}, optional + Axis or axes along which the product is computed. The default is to compute + the product of the flattened array. + dtype : data-type, optional + The type of the returned array and of the accumulator in which the + elements are summed. By default, the dtype of `a` is used. An + exception is when `a` has an integer type with less precision than + the platform (u)intp. In that case, the default will be either + (u)int32 or (u)int64 depending on whether the platform is 32 or 64 + bits. For inexact inputs, dtype must be inexact. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``. If provided, it must have the same shape as the + expected output, but the type will be cast if necessary. See + `doc.ufuncs` for details. The casting of NaN to integer can yield + unexpected results. + keepdims : bool, optional + If True, the axes which are reduced are left in the result as + dimensions with size one. With this option, the result will + broadcast correctly against the original `arr`. + + Returns + ------- + nanprod : ndarray + A new array holding the result is returned unless `out` is + specified, in which case it is returned. + + See Also + -------- + numpy.prod : Product across array propagating NaNs. + isnan : Show which elements are NaN. + + Examples + -------- + >>> np.nanprod(1) + 1 + >>> np.nanprod([1]) + 1 + >>> np.nanprod([1, np.nan]) + 1.0 + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nanprod(a) + 6.0 + >>> np.nanprod(a, axis=0) + array([ 3., 2.]) + + """ + a, mask = _replace_nan(a, 1) + return np.prod(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims) + + +def _nancumsum_dispatcher(a, axis=None, dtype=None, out=None): + return (a, out) + + +@array_function_dispatch(_nancumsum_dispatcher) +def nancumsum(a, axis=None, dtype=None, out=None): + """ + Return the cumulative sum of array elements over a given axis treating Not a + Numbers (NaNs) as zero. The cumulative sum does not change when NaNs are + encountered and leading NaNs are replaced by zeros. + + Zeros are returned for slices that are all-NaN or empty. + + .. versionadded:: 1.12.0 + + Parameters + ---------- + a : array_like + Input array. + axis : int, optional + Axis along which the cumulative sum is computed. The default + (None) is to compute the cumsum over the flattened array. + dtype : dtype, optional + Type of the returned array and of the accumulator in which the + elements are summed. If `dtype` is not specified, it defaults + to the dtype of `a`, unless `a` has an integer dtype with a + precision less than that of the default platform integer. In + that case, the default platform integer is used. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type will be cast if necessary. See `doc.ufuncs` + (Section "Output arguments") for more details. + + Returns + ------- + nancumsum : ndarray. + A new array holding the result is returned unless `out` is + specified, in which it is returned. The result has the same + size as `a`, and the same shape as `a` if `axis` is not None + or `a` is a 1-d array. + + See Also + -------- + numpy.cumsum : Cumulative sum across array propagating NaNs. + isnan : Show which elements are NaN. + + Examples + -------- + >>> np.nancumsum(1) + array([1]) + >>> np.nancumsum([1]) + array([1]) + >>> np.nancumsum([1, np.nan]) + array([ 1., 1.]) + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nancumsum(a) + array([ 1., 3., 6., 6.]) + >>> np.nancumsum(a, axis=0) + array([[ 1., 2.], + [ 4., 2.]]) + >>> np.nancumsum(a, axis=1) + array([[ 1., 3.], + [ 3., 3.]]) + + """ + a, mask = _replace_nan(a, 0) + return np.cumsum(a, axis=axis, dtype=dtype, out=out) + + +def _nancumprod_dispatcher(a, axis=None, dtype=None, out=None): + return (a, out) + + +@array_function_dispatch(_nancumprod_dispatcher) +def nancumprod(a, axis=None, dtype=None, out=None): + """ + Return the cumulative product of array elements over a given axis treating Not a + Numbers (NaNs) as one. The cumulative product does not change when NaNs are + encountered and leading NaNs are replaced by ones. + + Ones are returned for slices that are all-NaN or empty. + + .. versionadded:: 1.12.0 + + Parameters + ---------- + a : array_like + Input array. + axis : int, optional + Axis along which the cumulative product is computed. By default + the input is flattened. + dtype : dtype, optional + Type of the returned array, as well as of the accumulator in which + the elements are multiplied. If *dtype* is not specified, it + defaults to the dtype of `a`, unless `a` has an integer dtype with + a precision less than that of the default platform integer. In + that case, the default platform integer is used instead. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type of the resulting values will be cast if necessary. + + Returns + ------- + nancumprod : ndarray + A new array holding the result is returned unless `out` is + specified, in which case it is returned. + + See Also + -------- + numpy.cumprod : Cumulative product across array propagating NaNs. + isnan : Show which elements are NaN. + + Examples + -------- + >>> np.nancumprod(1) + array([1]) + >>> np.nancumprod([1]) + array([1]) + >>> np.nancumprod([1, np.nan]) + array([ 1., 1.]) + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nancumprod(a) + array([ 1., 2., 6., 6.]) + >>> np.nancumprod(a, axis=0) + array([[ 1., 2.], + [ 3., 2.]]) + >>> np.nancumprod(a, axis=1) + array([[ 1., 2.], + [ 3., 3.]]) + + """ + a, mask = _replace_nan(a, 1) + return np.cumprod(a, axis=axis, dtype=dtype, out=out) + + +def _nanmean_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None): + return (a, out) + + +@array_function_dispatch(_nanmean_dispatcher) +def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): + """ + Compute the arithmetic mean along the specified axis, ignoring NaNs. + + Returns the average of the array elements. The average is taken over + the flattened array by default, otherwise over the specified axis. + `float64` intermediate and return values are used for integer inputs. + + For all-NaN slices, NaN is returned and a `RuntimeWarning` is raised. + + .. versionadded:: 1.8.0 + + Parameters + ---------- + a : array_like + Array containing numbers whose mean is desired. If `a` is not an + array, a conversion is attempted. + axis : {int, tuple of int, None}, optional + Axis or axes along which the means are computed. The default is to compute + the mean of the flattened array. + dtype : data-type, optional + Type to use in computing the mean. For integer inputs, the default + is `float64`; for inexact inputs, it is the same as the input + dtype. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``; if provided, it must have the same shape as the + expected output, but the type will be cast if necessary. See + `doc.ufuncs` for details. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + If the value is anything but the default, then + `keepdims` will be passed through to the `mean` or `sum` methods + of sub-classes of `ndarray`. If the sub-classes methods + does not implement `keepdims` any exceptions will be raised. + + Returns + ------- + m : ndarray, see dtype parameter above + If `out=None`, returns a new array containing the mean values, + otherwise a reference to the output array is returned. Nan is + returned for slices that contain only NaNs. + + See Also + -------- + average : Weighted average + mean : Arithmetic mean taken while not ignoring NaNs + var, nanvar + + Notes + ----- + The arithmetic mean is the sum of the non-NaN elements along the axis + divided by the number of non-NaN elements. + + Note that for floating-point input, the mean is computed using the same + precision the input has. Depending on the input data, this can cause + the results to be inaccurate, especially for `float32`. Specifying a + higher-precision accumulator using the `dtype` keyword can alleviate + this issue. + + Examples + -------- + >>> a = np.array([[1, np.nan], [3, 4]]) + >>> np.nanmean(a) + 2.6666666666666665 + >>> np.nanmean(a, axis=0) + array([ 2., 4.]) + >>> np.nanmean(a, axis=1) + array([ 1., 3.5]) + + """ + arr, mask = _replace_nan(a, 0) + if mask is None: + return np.mean(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims) + + if dtype is not None: + dtype = np.dtype(dtype) + if dtype is not None and not issubclass(dtype.type, np.inexact): + raise TypeError("If a is inexact, then dtype must be inexact") + if out is not None and not issubclass(out.dtype.type, np.inexact): + raise TypeError("If a is inexact, then out must be inexact") + + cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=keepdims) + tot = np.sum(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims) + avg = _divide_by_count(tot, cnt, out=out) + + isbad = (cnt == 0) + if isbad.any(): + warnings.warn("Mean of empty slice", RuntimeWarning, stacklevel=2) + # NaN is the only possible bad value, so no further + # action is needed to handle bad results. + return avg + + +def _nanmedian1d(arr1d, overwrite_input=False): + """ + Private function for rank 1 arrays. Compute the median ignoring NaNs. + See nanmedian for parameter usage + """ + arr1d, overwrite_input = _remove_nan_1d(arr1d, + overwrite_input=overwrite_input) + if arr1d.size == 0: + return np.nan + + return np.median(arr1d, overwrite_input=overwrite_input) + + +def _nanmedian(a, axis=None, out=None, overwrite_input=False): + """ + Private function that doesn't support extended axis or keepdims. + These methods are extended to this function using _ureduce + See nanmedian for parameter usage + + """ + if axis is None or a.ndim == 1: + part = a.ravel() + if out is None: + return _nanmedian1d(part, overwrite_input) + else: + out[...] = _nanmedian1d(part, overwrite_input) + return out + else: + # for small medians use sort + indexing which is still faster than + # apply_along_axis + # benchmarked with shuffled (50, 50, x) containing a few NaN + if a.shape[axis] < 600: + return _nanmedian_small(a, axis, out, overwrite_input) + result = np.apply_along_axis(_nanmedian1d, axis, a, overwrite_input) + if out is not None: + out[...] = result + return result + + +def _nanmedian_small(a, axis=None, out=None, overwrite_input=False): + """ + sort + indexing median, faster for small medians along multiple + dimensions due to the high overhead of apply_along_axis + + see nanmedian for parameter usage + """ + a = np.ma.masked_array(a, np.isnan(a)) + m = np.ma.median(a, axis=axis, overwrite_input=overwrite_input) + for i in range(np.count_nonzero(m.mask.ravel())): + warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=3) + if out is not None: + out[...] = m.filled(np.nan) + return out + return m.filled(np.nan) + + +def _nanmedian_dispatcher( + a, axis=None, out=None, overwrite_input=None, keepdims=None): + return (a, out) + + +@array_function_dispatch(_nanmedian_dispatcher) +def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=np._NoValue): + """ + Compute the median along the specified axis, while ignoring NaNs. + + Returns the median of the array elements. + + .. versionadded:: 1.9.0 + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + axis : {int, sequence of int, None}, optional + Axis or axes along which the medians are computed. The default + is to compute the median along a flattened version of the array. + A sequence of axes is supported since version 1.9.0. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow use of memory of input array `a` for + calculations. The input array will be modified by the call to + `median`. This will save memory when you do not need to preserve + the contents of the input array. Treat the input as undefined, + but it will probably be fully or partially sorted. Default is + False. If `overwrite_input` is ``True`` and `a` is not already an + `ndarray`, an error will be raised. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + If this is anything but the default value it will be passed + through (in the special case of an empty array) to the + `mean` function of the underlying array. If the array is + a sub-class and `mean` does not have the kwarg `keepdims` this + will raise a RuntimeError. + + Returns + ------- + median : ndarray + A new array holding the result. If the input contains integers + or floats smaller than ``float64``, then the output data-type is + ``np.float64``. Otherwise, the data-type of the output is the + same as that of the input. If `out` is specified, that array is + returned instead. + + See Also + -------- + mean, median, percentile + + Notes + ----- + Given a vector ``V`` of length ``N``, the median of ``V`` is the + middle value of a sorted copy of ``V``, ``V_sorted`` - i.e., + ``V_sorted[(N-1)/2]``, when ``N`` is odd and the average of the two + middle values of ``V_sorted`` when ``N`` is even. + + Examples + -------- + >>> a = np.array([[10.0, 7, 4], [3, 2, 1]]) + >>> a[0, 1] = np.nan + >>> a + array([[ 10., nan, 4.], + [ 3., 2., 1.]]) + >>> np.median(a) + nan + >>> np.nanmedian(a) + 3.0 + >>> np.nanmedian(a, axis=0) + array([ 6.5, 2., 2.5]) + >>> np.median(a, axis=1) + array([ 7., 2.]) + >>> b = a.copy() + >>> np.nanmedian(b, axis=1, overwrite_input=True) + array([ 7., 2.]) + >>> assert not np.all(a==b) + >>> b = a.copy() + >>> np.nanmedian(b, axis=None, overwrite_input=True) + 3.0 + >>> assert not np.all(a==b) + + """ + a = np.asanyarray(a) + # apply_along_axis in _nanmedian doesn't handle empty arrays well, + # so deal them upfront + if a.size == 0: + return np.nanmean(a, axis, out=out, keepdims=keepdims) + + r, k = function_base._ureduce(a, func=_nanmedian, axis=axis, out=out, + overwrite_input=overwrite_input) + if keepdims and keepdims is not np._NoValue: + return r.reshape(k) + else: + return r + + +def _nanpercentile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, + interpolation=None, keepdims=None): + return (a, q, out) + + +@array_function_dispatch(_nanpercentile_dispatcher) +def nanpercentile(a, q, axis=None, out=None, overwrite_input=False, + interpolation='linear', keepdims=np._NoValue): + """ + Compute the qth percentile of the data along the specified axis, + while ignoring nan values. + + Returns the qth percentile(s) of the array elements. + + .. versionadded:: 1.9.0 + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array, containing + nan values to be ignored. + q : array_like of float + Percentile or sequence of percentiles to compute, which must be between + 0 and 100 inclusive. + axis : {int, tuple of int, None}, optional + Axis or axes along which the percentiles are computed. The + default is to compute the percentile(s) along a flattened + version of the array. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow the input array `a` to be modified by intermediate + calculations, to save memory. In this case, the contents of the input + `a` after this function completes is undefined. + interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} + This optional parameter specifies the interpolation method to + use when the desired percentile lies between two data points + ``i < j``: + + * 'linear': ``i + (j - i) * fraction``, where ``fraction`` + is the fractional part of the index surrounded by ``i`` + and ``j``. + * 'lower': ``i``. + * 'higher': ``j``. + * 'nearest': ``i`` or ``j``, whichever is nearest. + * 'midpoint': ``(i + j) / 2``. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in + the result as dimensions with size one. With this option, the + result will broadcast correctly against the original array `a`. + + If this is anything but the default value it will be passed + through (in the special case of an empty array) to the + `mean` function of the underlying array. If the array is + a sub-class and `mean` does not have the kwarg `keepdims` this + will raise a RuntimeError. + + Returns + ------- + percentile : scalar or ndarray + If `q` is a single percentile and `axis=None`, then the result + is a scalar. If multiple percentiles are given, first axis of + the result corresponds to the percentiles. The other axes are + the axes that remain after the reduction of `a`. If the input + contains integers or floats smaller than ``float64``, the output + data-type is ``float64``. Otherwise, the output data-type is the + same as that of the input. If `out` is specified, that array is + returned instead. + + See Also + -------- + nanmean + nanmedian : equivalent to ``nanpercentile(..., 50)`` + percentile, median, mean + nanquantile : equivalent to nanpercentile, but with q in the range [0, 1]. + + Notes + ----- + Given a vector ``V`` of length ``N``, the ``q``-th percentile of + ``V`` is the value ``q/100`` of the way from the minimum to the + maximum in a sorted copy of ``V``. The values and distances of + the two nearest neighbors as well as the `interpolation` parameter + will determine the percentile if the normalized ranking does not + match the location of ``q`` exactly. This function is the same as + the median if ``q=50``, the same as the minimum if ``q=0`` and the + same as the maximum if ``q=100``. + + Examples + -------- + >>> a = np.array([[10., 7., 4.], [3., 2., 1.]]) + >>> a[0][1] = np.nan + >>> a + array([[ 10., nan, 4.], + [ 3., 2., 1.]]) + >>> np.percentile(a, 50) + nan + >>> np.nanpercentile(a, 50) + 3.5 + >>> np.nanpercentile(a, 50, axis=0) + array([ 6.5, 2., 2.5]) + >>> np.nanpercentile(a, 50, axis=1, keepdims=True) + array([[ 7.], + [ 2.]]) + >>> m = np.nanpercentile(a, 50, axis=0) + >>> out = np.zeros_like(m) + >>> np.nanpercentile(a, 50, axis=0, out=out) + array([ 6.5, 2., 2.5]) + >>> m + array([ 6.5, 2. , 2.5]) + + >>> b = a.copy() + >>> np.nanpercentile(b, 50, axis=1, overwrite_input=True) + array([ 7., 2.]) + >>> assert not np.all(a==b) + + """ + a = np.asanyarray(a) + q = np.true_divide(q, 100.0) # handles the asarray for us too + if not function_base._quantile_is_valid(q): + raise ValueError("Percentiles must be in the range [0, 100]") + return _nanquantile_unchecked( + a, q, axis, out, overwrite_input, interpolation, keepdims) + + +def _nanquantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, + interpolation=None, keepdims=None): + return (a, q, out) + + +@array_function_dispatch(_nanquantile_dispatcher) +def nanquantile(a, q, axis=None, out=None, overwrite_input=False, + interpolation='linear', keepdims=np._NoValue): + """ + Compute the qth quantile of the data along the specified axis, + while ignoring nan values. + Returns the qth quantile(s) of the array elements. + .. versionadded:: 1.15.0 + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array, containing + nan values to be ignored + q : array_like of float + Quantile or sequence of quantiles to compute, which must be between + 0 and 1 inclusive. + axis : {int, tuple of int, None}, optional + Axis or axes along which the quantiles are computed. The + default is to compute the quantile(s) along a flattened + version of the array. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow the input array `a` to be modified by intermediate + calculations, to save memory. In this case, the contents of the input + `a` after this function completes is undefined. + interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} + This optional parameter specifies the interpolation method to + use when the desired quantile lies between two data points + ``i < j``: + + * linear: ``i + (j - i) * fraction``, where ``fraction`` + is the fractional part of the index surrounded by ``i`` + and ``j``. + * lower: ``i``. + * higher: ``j``. + * nearest: ``i`` or ``j``, whichever is nearest. + * midpoint: ``(i + j) / 2``. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in + the result as dimensions with size one. With this option, the + result will broadcast correctly against the original array `a`. + + If this is anything but the default value it will be passed + through (in the special case of an empty array) to the + `mean` function of the underlying array. If the array is + a sub-class and `mean` does not have the kwarg `keepdims` this + will raise a RuntimeError. + + Returns + ------- + quantile : scalar or ndarray + If `q` is a single percentile and `axis=None`, then the result + is a scalar. If multiple quantiles are given, first axis of + the result corresponds to the quantiles. The other axes are + the axes that remain after the reduction of `a`. If the input + contains integers or floats smaller than ``float64``, the output + data-type is ``float64``. Otherwise, the output data-type is the + same as that of the input. If `out` is specified, that array is + returned instead. + + See Also + -------- + quantile + nanmean, nanmedian + nanmedian : equivalent to ``nanquantile(..., 0.5)`` + nanpercentile : same as nanquantile, but with q in the range [0, 100]. + + Examples + -------- + >>> a = np.array([[10., 7., 4.], [3., 2., 1.]]) + >>> a[0][1] = np.nan + >>> a + array([[ 10., nan, 4.], + [ 3., 2., 1.]]) + >>> np.quantile(a, 0.5) + nan + >>> np.nanquantile(a, 0.5) + 3.5 + >>> np.nanquantile(a, 0.5, axis=0) + array([ 6.5, 2., 2.5]) + >>> np.nanquantile(a, 0.5, axis=1, keepdims=True) + array([[ 7.], + [ 2.]]) + >>> m = np.nanquantile(a, 0.5, axis=0) + >>> out = np.zeros_like(m) + >>> np.nanquantile(a, 0.5, axis=0, out=out) + array([ 6.5, 2., 2.5]) + >>> m + array([ 6.5, 2. , 2.5]) + >>> b = a.copy() + >>> np.nanquantile(b, 0.5, axis=1, overwrite_input=True) + array([ 7., 2.]) + >>> assert not np.all(a==b) + """ + a = np.asanyarray(a) + q = np.asanyarray(q) + if not function_base._quantile_is_valid(q): + raise ValueError("Quantiles must be in the range [0, 1]") + return _nanquantile_unchecked( + a, q, axis, out, overwrite_input, interpolation, keepdims) + + +def _nanquantile_unchecked(a, q, axis=None, out=None, overwrite_input=False, + interpolation='linear', keepdims=np._NoValue): + """Assumes that q is in [0, 1], and is an ndarray""" + # apply_along_axis in _nanpercentile doesn't handle empty arrays well, + # so deal them upfront + if a.size == 0: + return np.nanmean(a, axis, out=out, keepdims=keepdims) + + r, k = function_base._ureduce( + a, func=_nanquantile_ureduce_func, q=q, axis=axis, out=out, + overwrite_input=overwrite_input, interpolation=interpolation + ) + if keepdims and keepdims is not np._NoValue: + return r.reshape(q.shape + k) + else: + return r + + +def _nanquantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False, + interpolation='linear'): + """ + Private function that doesn't support extended axis or keepdims. + These methods are extended to this function using _ureduce + See nanpercentile for parameter usage + """ + if axis is None or a.ndim == 1: + part = a.ravel() + result = _nanquantile_1d(part, q, overwrite_input, interpolation) + else: + result = np.apply_along_axis(_nanquantile_1d, axis, a, q, + overwrite_input, interpolation) + # apply_along_axis fills in collapsed axis with results. + # Move that axis to the beginning to match percentile's + # convention. + if q.ndim != 0: + result = np.moveaxis(result, axis, 0) + + if out is not None: + out[...] = result + return result + + +def _nanquantile_1d(arr1d, q, overwrite_input=False, interpolation='linear'): + """ + Private function for rank 1 arrays. Compute quantile ignoring NaNs. + See nanpercentile for parameter usage + """ + arr1d, overwrite_input = _remove_nan_1d(arr1d, + overwrite_input=overwrite_input) + if arr1d.size == 0: + return np.full(q.shape, np.nan)[()] # convert to scalar + + return function_base._quantile_unchecked( + arr1d, q, overwrite_input=overwrite_input, interpolation=interpolation) + + +def _nanvar_dispatcher( + a, axis=None, dtype=None, out=None, ddof=None, keepdims=None): + return (a, out) + + +@array_function_dispatch(_nanvar_dispatcher) +def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): + """ + Compute the variance along the specified axis, while ignoring NaNs. + + Returns the variance of the array elements, a measure of the spread of + a distribution. The variance is computed for the flattened array by + default, otherwise over the specified axis. + + For all-NaN slices or slices with zero degrees of freedom, NaN is + returned and a `RuntimeWarning` is raised. + + .. versionadded:: 1.8.0 + + Parameters + ---------- + a : array_like + Array containing numbers whose variance is desired. If `a` is not an + array, a conversion is attempted. + axis : {int, tuple of int, None}, optional + Axis or axes along which the variance is computed. The default is to compute + the variance of the flattened array. + dtype : data-type, optional + Type to use in computing the variance. For arrays of integer type + the default is `float32`; for arrays of float types it is the same as + the array type. + out : ndarray, optional + Alternate output array in which to place the result. It must have + the same shape as the expected output, but the type is cast if + necessary. + ddof : int, optional + "Delta Degrees of Freedom": the divisor used in the calculation is + ``N - ddof``, where ``N`` represents the number of non-NaN + elements. By default `ddof` is zero. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + + Returns + ------- + variance : ndarray, see dtype parameter above + If `out` is None, return a new array containing the variance, + otherwise return a reference to the output array. If ddof is >= the + number of non-NaN elements in a slice or the slice contains only + NaNs, then the result for that slice is NaN. + + See Also + -------- + std : Standard deviation + mean : Average + var : Variance while not ignoring NaNs + nanstd, nanmean + numpy.doc.ufuncs : Section "Output arguments" + + Notes + ----- + The variance is the average of the squared deviations from the mean, + i.e., ``var = mean(abs(x - x.mean())**2)``. + + The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``. + If, however, `ddof` is specified, the divisor ``N - ddof`` is used + instead. In standard statistical practice, ``ddof=1`` provides an + unbiased estimator of the variance of a hypothetical infinite + population. ``ddof=0`` provides a maximum likelihood estimate of the + variance for normally distributed variables. + + Note that for complex numbers, the absolute value is taken before + squaring, so that the result is always real and nonnegative. + + For floating-point input, the variance is computed using the same + precision the input has. Depending on the input data, this can cause + the results to be inaccurate, especially for `float32` (see example + below). Specifying a higher-accuracy accumulator using the ``dtype`` + keyword can alleviate this issue. + + For this function to work on sub-classes of ndarray, they must define + `sum` with the kwarg `keepdims` + + Examples + -------- + >>> a = np.array([[1, np.nan], [3, 4]]) + >>> np.var(a) + 1.5555555555555554 + >>> np.nanvar(a, axis=0) + array([ 1., 0.]) + >>> np.nanvar(a, axis=1) + array([ 0., 0.25]) + + """ + arr, mask = _replace_nan(a, 0) + if mask is None: + return np.var(arr, axis=axis, dtype=dtype, out=out, ddof=ddof, + keepdims=keepdims) + + if dtype is not None: + dtype = np.dtype(dtype) + if dtype is not None and not issubclass(dtype.type, np.inexact): + raise TypeError("If a is inexact, then dtype must be inexact") + if out is not None and not issubclass(out.dtype.type, np.inexact): + raise TypeError("If a is inexact, then out must be inexact") + + # Compute mean + if type(arr) is np.matrix: + _keepdims = np._NoValue + else: + _keepdims = True + # we need to special case matrix for reverse compatibility + # in order for this to work, these sums need to be called with + # keepdims=True, however matrix now raises an error in this case, but + # the reason that it drops the keepdims kwarg is to force keepdims=True + # so this used to work by serendipity. + cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=_keepdims) + avg = np.sum(arr, axis=axis, dtype=dtype, keepdims=_keepdims) + avg = _divide_by_count(avg, cnt) + + # Compute squared deviation from mean. + np.subtract(arr, avg, out=arr, casting='unsafe') + arr = _copyto(arr, 0, mask) + if issubclass(arr.dtype.type, np.complexfloating): + sqr = np.multiply(arr, arr.conj(), out=arr).real + else: + sqr = np.multiply(arr, arr, out=arr) + + # Compute variance. + var = np.sum(sqr, axis=axis, dtype=dtype, out=out, keepdims=keepdims) + if var.ndim < cnt.ndim: + # Subclasses of ndarray may ignore keepdims, so check here. + cnt = cnt.squeeze(axis) + dof = cnt - ddof + var = _divide_by_count(var, dof) + + isbad = (dof <= 0) + if np.any(isbad): + warnings.warn("Degrees of freedom <= 0 for slice.", RuntimeWarning, stacklevel=2) + # NaN, inf, or negative numbers are all possible bad + # values, so explicitly replace them with NaN. + var = _copyto(var, np.nan, isbad) + return var + + +def _nanstd_dispatcher( + a, axis=None, dtype=None, out=None, ddof=None, keepdims=None): + return (a, out) + + +@array_function_dispatch(_nanstd_dispatcher) +def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): + """ + Compute the standard deviation along the specified axis, while + ignoring NaNs. + + Returns the standard deviation, a measure of the spread of a + distribution, of the non-NaN array elements. The standard deviation is + computed for the flattened array by default, otherwise over the + specified axis. + + For all-NaN slices or slices with zero degrees of freedom, NaN is + returned and a `RuntimeWarning` is raised. + + .. versionadded:: 1.8.0 + + Parameters + ---------- + a : array_like + Calculate the standard deviation of the non-NaN values. + axis : {int, tuple of int, None}, optional + Axis or axes along which the standard deviation is computed. The default is + to compute the standard deviation of the flattened array. + dtype : dtype, optional + Type to use in computing the standard deviation. For arrays of + integer type the default is float64, for arrays of float types it + is the same as the array type. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output but the type (of the + calculated values) will be cast if necessary. + ddof : int, optional + Means Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of non-NaN + elements. By default `ddof` is zero. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + If this value is anything but the default it is passed through + as-is to the relevant functions of the sub-classes. If these + functions do not have a `keepdims` kwarg, a RuntimeError will + be raised. + + Returns + ------- + standard_deviation : ndarray, see dtype parameter above. + If `out` is None, return a new array containing the standard + deviation, otherwise return a reference to the output array. If + ddof is >= the number of non-NaN elements in a slice or the slice + contains only NaNs, then the result for that slice is NaN. + + See Also + -------- + var, mean, std + nanvar, nanmean + numpy.doc.ufuncs : Section "Output arguments" + + Notes + ----- + The standard deviation is the square root of the average of the squared + deviations from the mean: ``std = sqrt(mean(abs(x - x.mean())**2))``. + + The average squared deviation is normally calculated as + ``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is + specified, the divisor ``N - ddof`` is used instead. In standard + statistical practice, ``ddof=1`` provides an unbiased estimator of the + variance of the infinite population. ``ddof=0`` provides a maximum + likelihood estimate of the variance for normally distributed variables. + The standard deviation computed in this function is the square root of + the estimated variance, so even with ``ddof=1``, it will not be an + unbiased estimate of the standard deviation per se. + + Note that, for complex numbers, `std` takes the absolute value before + squaring, so that the result is always real and nonnegative. + + For floating-point input, the *std* is computed using the same + precision the input has. Depending on the input data, this can cause + the results to be inaccurate, especially for float32 (see example + below). Specifying a higher-accuracy accumulator using the `dtype` + keyword can alleviate this issue. + + Examples + -------- + >>> a = np.array([[1, np.nan], [3, 4]]) + >>> np.nanstd(a) + 1.247219128924647 + >>> np.nanstd(a, axis=0) + array([ 1., 0.]) + >>> np.nanstd(a, axis=1) + array([ 0., 0.5]) + + """ + var = nanvar(a, axis=axis, dtype=dtype, out=out, ddof=ddof, + keepdims=keepdims) + if isinstance(var, np.ndarray): + std = np.sqrt(var, out=var) + else: + std = var.dtype.type(np.sqrt(var)) + return std diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/nanfunctions.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/nanfunctions.pyc new file mode 100644 index 0000000..0567d88 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/nanfunctions.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/npyio.py b/project/venv/lib/python2.7/site-packages/numpy/lib/npyio.py new file mode 100644 index 0000000..db6a8e5 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/npyio.py @@ -0,0 +1,2323 @@ +from __future__ import division, absolute_import, print_function + +import sys +import os +import re +import functools +import itertools +import warnings +import weakref +from operator import itemgetter, index as opindex + +import numpy as np +from . import format +from ._datasource import DataSource +from numpy.core import overrides +from numpy.core.multiarray import packbits, unpackbits +from numpy.core.overrides import set_module +from numpy.core._internal import recursive +from ._iotools import ( + LineSplitter, NameValidator, StringConverter, ConverterError, + ConverterLockError, ConversionWarning, _is_string_like, + has_nested_fields, flatten_dtype, easy_dtype, _decode_line + ) + +from numpy.compat import ( + asbytes, asstr, asunicode, asbytes_nested, bytes, basestring, unicode, + os_fspath, os_PathLike + ) +from numpy.core.numeric import pickle + +if sys.version_info[0] >= 3: + from collections.abc import Mapping +else: + from future_builtins import map + from collections import Mapping + + +@set_module('numpy') +def loads(*args, **kwargs): + # NumPy 1.15.0, 2017-12-10 + warnings.warn( + "np.loads is deprecated, use pickle.loads instead", + DeprecationWarning, stacklevel=2) + return pickle.loads(*args, **kwargs) + + +__all__ = [ + 'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt', + 'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez', + 'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource' + ] + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +class BagObj(object): + """ + BagObj(obj) + + Convert attribute look-ups to getitems on the object passed in. + + Parameters + ---------- + obj : class instance + Object on which attribute look-up is performed. + + Examples + -------- + >>> from numpy.lib.npyio import BagObj as BO + >>> class BagDemo(object): + ... def __getitem__(self, key): # An instance of BagObj(BagDemo) + ... # will call this method when any + ... # attribute look-up is required + ... result = "Doesn't matter what you want, " + ... return result + "you're gonna get this" + ... + >>> demo_obj = BagDemo() + >>> bagobj = BO(demo_obj) + >>> bagobj.hello_there + "Doesn't matter what you want, you're gonna get this" + >>> bagobj.I_can_be_anything + "Doesn't matter what you want, you're gonna get this" + + """ + + def __init__(self, obj): + # Use weakref to make NpzFile objects collectable by refcount + self._obj = weakref.proxy(obj) + + def __getattribute__(self, key): + try: + return object.__getattribute__(self, '_obj')[key] + except KeyError: + raise AttributeError(key) + + def __dir__(self): + """ + Enables dir(bagobj) to list the files in an NpzFile. + + This also enables tab-completion in an interpreter or IPython. + """ + return list(object.__getattribute__(self, '_obj').keys()) + + +def zipfile_factory(file, *args, **kwargs): + """ + Create a ZipFile. + + Allows for Zip64, and the `file` argument can accept file, str, or + pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile + constructor. + """ + if not hasattr(file, 'read'): + file = os_fspath(file) + import zipfile + kwargs['allowZip64'] = True + return zipfile.ZipFile(file, *args, **kwargs) + + +class NpzFile(Mapping): + """ + NpzFile(fid) + + A dictionary-like object with lazy-loading of files in the zipped + archive provided on construction. + + `NpzFile` is used to load files in the NumPy ``.npz`` data archive + format. It assumes that files in the archive have a ``.npy`` extension, + other files are ignored. + + The arrays and file strings are lazily loaded on either + getitem access using ``obj['key']`` or attribute lookup using + ``obj.f.key``. A list of all files (without ``.npy`` extensions) can + be obtained with ``obj.files`` and the ZipFile object itself using + ``obj.zip``. + + Attributes + ---------- + files : list of str + List of all files in the archive with a ``.npy`` extension. + zip : ZipFile instance + The ZipFile object initialized with the zipped archive. + f : BagObj instance + An object on which attribute can be performed as an alternative + to getitem access on the `NpzFile` instance itself. + allow_pickle : bool, optional + Allow loading pickled data. Default: True + pickle_kwargs : dict, optional + Additional keyword arguments to pass on to pickle.load. + These are only useful when loading object arrays saved on + Python 2 when using Python 3. + + Parameters + ---------- + fid : file or str + The zipped archive to open. This is either a file-like object + or a string containing the path to the archive. + own_fid : bool, optional + Whether NpzFile should close the file handle. + Requires that `fid` is a file-like object. + + Examples + -------- + >>> from tempfile import TemporaryFile + >>> outfile = TemporaryFile() + >>> x = np.arange(10) + >>> y = np.sin(x) + >>> np.savez(outfile, x=x, y=y) + >>> outfile.seek(0) + + >>> npz = np.load(outfile) + >>> isinstance(npz, np.lib.io.NpzFile) + True + >>> npz.files + ['y', 'x'] + >>> npz['x'] # getitem access + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> npz.f.x # attribute lookup + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + + """ + + def __init__(self, fid, own_fid=False, allow_pickle=True, + pickle_kwargs=None): + # Import is postponed to here since zipfile depends on gzip, an + # optional component of the so-called standard library. + _zip = zipfile_factory(fid) + self._files = _zip.namelist() + self.files = [] + self.allow_pickle = allow_pickle + self.pickle_kwargs = pickle_kwargs + for x in self._files: + if x.endswith('.npy'): + self.files.append(x[:-4]) + else: + self.files.append(x) + self.zip = _zip + self.f = BagObj(self) + if own_fid: + self.fid = fid + else: + self.fid = None + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + + def close(self): + """ + Close the file. + + """ + if self.zip is not None: + self.zip.close() + self.zip = None + if self.fid is not None: + self.fid.close() + self.fid = None + self.f = None # break reference cycle + + def __del__(self): + self.close() + + # Implement the Mapping ABC + def __iter__(self): + return iter(self.files) + + def __len__(self): + return len(self.files) + + def __getitem__(self, key): + # FIXME: This seems like it will copy strings around + # more than is strictly necessary. The zipfile + # will read the string and then + # the format.read_array will copy the string + # to another place in memory. + # It would be better if the zipfile could read + # (or at least uncompress) the data + # directly into the array memory. + member = False + if key in self._files: + member = True + elif key in self.files: + member = True + key += '.npy' + if member: + bytes = self.zip.open(key) + magic = bytes.read(len(format.MAGIC_PREFIX)) + bytes.close() + if magic == format.MAGIC_PREFIX: + bytes = self.zip.open(key) + return format.read_array(bytes, + allow_pickle=self.allow_pickle, + pickle_kwargs=self.pickle_kwargs) + else: + return self.zip.read(key) + else: + raise KeyError("%s is not a file in the archive" % key) + + + if sys.version_info.major == 3: + # deprecate the python 2 dict apis that we supported by accident in + # python 3. We forgot to implement itervalues() at all in earlier + # versions of numpy, so no need to deprecated it here. + + def iteritems(self): + # Numpy 1.15, 2018-02-20 + warnings.warn( + "NpzFile.iteritems is deprecated in python 3, to match the " + "removal of dict.itertems. Use .items() instead.", + DeprecationWarning, stacklevel=2) + return self.items() + + def iterkeys(self): + # Numpy 1.15, 2018-02-20 + warnings.warn( + "NpzFile.iterkeys is deprecated in python 3, to match the " + "removal of dict.iterkeys. Use .keys() instead.", + DeprecationWarning, stacklevel=2) + return self.keys() + + +@set_module('numpy') +def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True, + encoding='ASCII'): + """ + Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files. + + Parameters + ---------- + file : file-like object, string, or pathlib.Path + The file to read. File-like objects must support the + ``seek()`` and ``read()`` methods. Pickled files require that the + file-like object support the ``readline()`` method as well. + mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional + If not None, then memory-map the file, using the given mode (see + `numpy.memmap` for a detailed description of the modes). A + memory-mapped array is kept on disk. However, it can be accessed + and sliced like any ndarray. Memory mapping is especially useful + for accessing small fragments of large files without reading the + entire file into memory. + allow_pickle : bool, optional + Allow loading pickled object arrays stored in npy files. Reasons for + disallowing pickles include security, as loading pickled data can + execute arbitrary code. If pickles are disallowed, loading object + arrays will fail. + Default: True + fix_imports : bool, optional + Only useful when loading Python 2 generated pickled files on Python 3, + which includes npy/npz files containing object arrays. If `fix_imports` + is True, pickle will try to map the old Python 2 names to the new names + used in Python 3. + encoding : str, optional + What encoding to use when reading Python 2 strings. Only useful when + loading Python 2 generated pickled files in Python 3, which includes + npy/npz files containing object arrays. Values other than 'latin1', + 'ASCII', and 'bytes' are not allowed, as they can corrupt numerical + data. Default: 'ASCII' + + Returns + ------- + result : array, tuple, dict, etc. + Data stored in the file. For ``.npz`` files, the returned instance + of NpzFile class must be closed to avoid leaking file descriptors. + + Raises + ------ + IOError + If the input file does not exist or cannot be read. + ValueError + The file contains an object array, but allow_pickle=False given. + + See Also + -------- + save, savez, savez_compressed, loadtxt + memmap : Create a memory-map to an array stored in a file on disk. + lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file. + + Notes + ----- + - If the file contains pickle data, then whatever object is stored + in the pickle is returned. + - If the file is a ``.npy`` file, then a single array is returned. + - If the file is a ``.npz`` file, then a dictionary-like object is + returned, containing ``{filename: array}`` key-value pairs, one for + each file in the archive. + - If the file is a ``.npz`` file, the returned value supports the + context manager protocol in a similar fashion to the open function:: + + with load('foo.npz') as data: + a = data['a'] + + The underlying file descriptor is closed when exiting the 'with' + block. + + Examples + -------- + Store data to disk, and load it again: + + >>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]])) + >>> np.load('/tmp/123.npy') + array([[1, 2, 3], + [4, 5, 6]]) + + Store compressed data to disk, and load it again: + + >>> a=np.array([[1, 2, 3], [4, 5, 6]]) + >>> b=np.array([1, 2]) + >>> np.savez('/tmp/123.npz', a=a, b=b) + >>> data = np.load('/tmp/123.npz') + >>> data['a'] + array([[1, 2, 3], + [4, 5, 6]]) + >>> data['b'] + array([1, 2]) + >>> data.close() + + Mem-map the stored array, and then access the second row + directly from disk: + + >>> X = np.load('/tmp/123.npy', mmap_mode='r') + >>> X[1, :] + memmap([4, 5, 6]) + + """ + if encoding not in ('ASCII', 'latin1', 'bytes'): + # The 'encoding' value for pickle also affects what encoding + # the serialized binary data of NumPy arrays is loaded + # in. Pickle does not pass on the encoding information to + # NumPy. The unpickling code in numpy.core.multiarray is + # written to assume that unicode data appearing where binary + # should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'. + # + # Other encoding values can corrupt binary data, and we + # purposefully disallow them. For the same reason, the errors= + # argument is not exposed, as values other than 'strict' + # result can similarly silently corrupt numerical data. + raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'") + + if sys.version_info[0] >= 3: + pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports) + else: + # Nothing to do on Python 2 + pickle_kwargs = {} + + # TODO: Use contextlib.ExitStack once we drop Python 2 + if hasattr(file, 'read'): + fid = file + own_fid = False + else: + fid = open(os_fspath(file), "rb") + own_fid = True + + try: + # Code to distinguish from NumPy binary files and pickles. + _ZIP_PREFIX = b'PK\x03\x04' + _ZIP_SUFFIX = b'PK\x05\x06' # empty zip files start with this + N = len(format.MAGIC_PREFIX) + magic = fid.read(N) + # If the file size is less than N, we need to make sure not + # to seek past the beginning of the file + fid.seek(-min(N, len(magic)), 1) # back-up + if magic.startswith(_ZIP_PREFIX) or magic.startswith(_ZIP_SUFFIX): + # zip-file (assume .npz) + # Transfer file ownership to NpzFile + ret = NpzFile(fid, own_fid=own_fid, allow_pickle=allow_pickle, + pickle_kwargs=pickle_kwargs) + own_fid = False + return ret + elif magic == format.MAGIC_PREFIX: + # .npy file + if mmap_mode: + return format.open_memmap(file, mode=mmap_mode) + else: + return format.read_array(fid, allow_pickle=allow_pickle, + pickle_kwargs=pickle_kwargs) + else: + # Try a pickle + if not allow_pickle: + raise ValueError("Cannot load file containing pickled data " + "when allow_pickle=False") + try: + return pickle.load(fid, **pickle_kwargs) + except Exception: + raise IOError( + "Failed to interpret file %s as a pickle" % repr(file)) + finally: + if own_fid: + fid.close() + + +def _save_dispatcher(file, arr, allow_pickle=None, fix_imports=None): + return (arr,) + + +@array_function_dispatch(_save_dispatcher) +def save(file, arr, allow_pickle=True, fix_imports=True): + """ + Save an array to a binary file in NumPy ``.npy`` format. + + Parameters + ---------- + file : file, str, or pathlib.Path + File or filename to which the data is saved. If file is a file-object, + then the filename is unchanged. If file is a string or Path, a ``.npy`` + extension will be appended to the file name if it does not already + have one. + arr : array_like + Array data to be saved. + allow_pickle : bool, optional + Allow saving object arrays using Python pickles. Reasons for disallowing + pickles include security (loading pickled data can execute arbitrary + code) and portability (pickled objects may not be loadable on different + Python installations, for example if the stored objects require libraries + that are not available, and not all pickled data is compatible between + Python 2 and Python 3). + Default: True + fix_imports : bool, optional + Only useful in forcing objects in object arrays on Python 3 to be + pickled in a Python 2 compatible way. If `fix_imports` is True, pickle + will try to map the new Python 3 names to the old module names used in + Python 2, so that the pickle data stream is readable with Python 2. + + See Also + -------- + savez : Save several arrays into a ``.npz`` archive + savetxt, load + + Notes + ----- + For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`. + + Examples + -------- + >>> from tempfile import TemporaryFile + >>> outfile = TemporaryFile() + + >>> x = np.arange(10) + >>> np.save(outfile, x) + + >>> outfile.seek(0) # Only needed here to simulate closing & reopening file + >>> np.load(outfile) + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + + """ + own_fid = False + if hasattr(file, 'read'): + fid = file + else: + file = os_fspath(file) + if not file.endswith('.npy'): + file = file + '.npy' + fid = open(file, "wb") + own_fid = True + + if sys.version_info[0] >= 3: + pickle_kwargs = dict(fix_imports=fix_imports) + else: + # Nothing to do on Python 2 + pickle_kwargs = None + + try: + arr = np.asanyarray(arr) + format.write_array(fid, arr, allow_pickle=allow_pickle, + pickle_kwargs=pickle_kwargs) + finally: + if own_fid: + fid.close() + + +def _savez_dispatcher(file, *args, **kwds): + for a in args: + yield a + for v in kwds.values(): + yield v + + +@array_function_dispatch(_savez_dispatcher) +def savez(file, *args, **kwds): + """ + Save several arrays into a single file in uncompressed ``.npz`` format. + + If arguments are passed in with no keywords, the corresponding variable + names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword + arguments are given, the corresponding variable names, in the ``.npz`` + file will match the keyword names. + + Parameters + ---------- + file : str or file + Either the file name (string) or an open file (file-like object) + where the data will be saved. If file is a string or a Path, the + ``.npz`` extension will be appended to the file name if it is not + already there. + args : Arguments, optional + Arrays to save to the file. Since it is not possible for Python to + know the names of the arrays outside `savez`, the arrays will be saved + with names "arr_0", "arr_1", and so on. These arguments can be any + expression. + kwds : Keyword arguments, optional + Arrays to save to the file. Arrays will be saved in the file with the + keyword names. + + Returns + ------- + None + + See Also + -------- + save : Save a single array to a binary file in NumPy format. + savetxt : Save an array to a file as plain text. + savez_compressed : Save several arrays into a compressed ``.npz`` archive + + Notes + ----- + The ``.npz`` file format is a zipped archive of files named after the + variables they contain. The archive is not compressed and each file + in the archive contains one variable in ``.npy`` format. For a + description of the ``.npy`` format, see :py:mod:`numpy.lib.format`. + + When opening the saved ``.npz`` file with `load` a `NpzFile` object is + returned. This is a dictionary-like object which can be queried for + its list of arrays (with the ``.files`` attribute), and for the arrays + themselves. + + Examples + -------- + >>> from tempfile import TemporaryFile + >>> outfile = TemporaryFile() + >>> x = np.arange(10) + >>> y = np.sin(x) + + Using `savez` with \\*args, the arrays are saved with default names. + + >>> np.savez(outfile, x, y) + >>> outfile.seek(0) # Only needed here to simulate closing & reopening file + >>> npzfile = np.load(outfile) + >>> npzfile.files + ['arr_1', 'arr_0'] + >>> npzfile['arr_0'] + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + + Using `savez` with \\**kwds, the arrays are saved with the keyword names. + + >>> outfile = TemporaryFile() + >>> np.savez(outfile, x=x, y=y) + >>> outfile.seek(0) + >>> npzfile = np.load(outfile) + >>> npzfile.files + ['y', 'x'] + >>> npzfile['x'] + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + + """ + _savez(file, args, kwds, False) + + +def _savez_compressed_dispatcher(file, *args, **kwds): + for a in args: + yield a + for v in kwds.values(): + yield v + + +@array_function_dispatch(_savez_compressed_dispatcher) +def savez_compressed(file, *args, **kwds): + """ + Save several arrays into a single file in compressed ``.npz`` format. + + If keyword arguments are given, then filenames are taken from the keywords. + If arguments are passed in with no keywords, then stored file names are + arr_0, arr_1, etc. + + Parameters + ---------- + file : str or file + Either the file name (string) or an open file (file-like object) + where the data will be saved. If file is a string or a Path, the + ``.npz`` extension will be appended to the file name if it is not + already there. + args : Arguments, optional + Arrays to save to the file. Since it is not possible for Python to + know the names of the arrays outside `savez`, the arrays will be saved + with names "arr_0", "arr_1", and so on. These arguments can be any + expression. + kwds : Keyword arguments, optional + Arrays to save to the file. Arrays will be saved in the file with the + keyword names. + + Returns + ------- + None + + See Also + -------- + numpy.save : Save a single array to a binary file in NumPy format. + numpy.savetxt : Save an array to a file as plain text. + numpy.savez : Save several arrays into an uncompressed ``.npz`` file format + numpy.load : Load the files created by savez_compressed. + + Notes + ----- + The ``.npz`` file format is a zipped archive of files named after the + variables they contain. The archive is compressed with + ``zipfile.ZIP_DEFLATED`` and each file in the archive contains one variable + in ``.npy`` format. For a description of the ``.npy`` format, see + :py:mod:`numpy.lib.format`. + + + When opening the saved ``.npz`` file with `load` a `NpzFile` object is + returned. This is a dictionary-like object which can be queried for + its list of arrays (with the ``.files`` attribute), and for the arrays + themselves. + + Examples + -------- + >>> test_array = np.random.rand(3, 2) + >>> test_vector = np.random.rand(4) + >>> np.savez_compressed('/tmp/123', a=test_array, b=test_vector) + >>> loaded = np.load('/tmp/123.npz') + >>> print(np.array_equal(test_array, loaded['a'])) + True + >>> print(np.array_equal(test_vector, loaded['b'])) + True + + """ + _savez(file, args, kwds, True) + + +def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None): + # Import is postponed to here since zipfile depends on gzip, an optional + # component of the so-called standard library. + import zipfile + + if not hasattr(file, 'read'): + file = os_fspath(file) + if not file.endswith('.npz'): + file = file + '.npz' + + namedict = kwds + for i, val in enumerate(args): + key = 'arr_%d' % i + if key in namedict.keys(): + raise ValueError( + "Cannot use un-named variables and keyword %s" % key) + namedict[key] = val + + if compress: + compression = zipfile.ZIP_DEFLATED + else: + compression = zipfile.ZIP_STORED + + zipf = zipfile_factory(file, mode="w", compression=compression) + + if sys.version_info >= (3, 6): + # Since Python 3.6 it is possible to write directly to a ZIP file. + for key, val in namedict.items(): + fname = key + '.npy' + val = np.asanyarray(val) + force_zip64 = val.nbytes >= 2**30 + with zipf.open(fname, 'w', force_zip64=force_zip64) as fid: + format.write_array(fid, val, + allow_pickle=allow_pickle, + pickle_kwargs=pickle_kwargs) + else: + # Stage arrays in a temporary file on disk, before writing to zip. + + # Import deferred for startup time improvement + import tempfile + # Since target file might be big enough to exceed capacity of a global + # temporary directory, create temp file side-by-side with the target file. + file_dir, file_prefix = os.path.split(file) if _is_string_like(file) else (None, 'tmp') + fd, tmpfile = tempfile.mkstemp(prefix=file_prefix, dir=file_dir, suffix='-numpy.npy') + os.close(fd) + try: + for key, val in namedict.items(): + fname = key + '.npy' + fid = open(tmpfile, 'wb') + try: + format.write_array(fid, np.asanyarray(val), + allow_pickle=allow_pickle, + pickle_kwargs=pickle_kwargs) + fid.close() + fid = None + zipf.write(tmpfile, arcname=fname) + except IOError as exc: + raise IOError("Failed to write to %s: %s" % (tmpfile, exc)) + finally: + if fid: + fid.close() + finally: + os.remove(tmpfile) + + zipf.close() + + +def _getconv(dtype): + """ Find the correct dtype converter. Adapted from matplotlib """ + + def floatconv(x): + x.lower() + if '0x' in x: + return float.fromhex(x) + return float(x) + + typ = dtype.type + if issubclass(typ, np.bool_): + return lambda x: bool(int(x)) + if issubclass(typ, np.uint64): + return np.uint64 + if issubclass(typ, np.int64): + return np.int64 + if issubclass(typ, np.integer): + return lambda x: int(float(x)) + elif issubclass(typ, np.longdouble): + return np.longdouble + elif issubclass(typ, np.floating): + return floatconv + elif issubclass(typ, complex): + return lambda x: complex(asstr(x).replace('+-', '-')) + elif issubclass(typ, np.bytes_): + return asbytes + elif issubclass(typ, np.unicode_): + return asunicode + else: + return asstr + +# amount of lines loadtxt reads in one chunk, can be overridden for testing +_loadtxt_chunksize = 50000 + + +@set_module('numpy') +def loadtxt(fname, dtype=float, comments='#', delimiter=None, + converters=None, skiprows=0, usecols=None, unpack=False, + ndmin=0, encoding='bytes', max_rows=None): + """ + Load data from a text file. + + Each row in the text file must have the same number of values. + + Parameters + ---------- + fname : file, str, or pathlib.Path + File, filename, or generator to read. If the filename extension is + ``.gz`` or ``.bz2``, the file is first decompressed. Note that + generators should return byte strings for Python 3k. + dtype : data-type, optional + Data-type of the resulting array; default: float. If this is a + structured data-type, the resulting array will be 1-dimensional, and + each row will be interpreted as an element of the array. In this + case, the number of columns used must match the number of fields in + the data-type. + comments : str or sequence of str, optional + The characters or list of characters used to indicate the start of a + comment. None implies no comments. For backwards compatibility, byte + strings will be decoded as 'latin1'. The default is '#'. + delimiter : str, optional + The string used to separate values. For backwards compatibility, byte + strings will be decoded as 'latin1'. The default is whitespace. + converters : dict, optional + A dictionary mapping column number to a function that will parse the + column string into the desired value. E.g., if column 0 is a date + string: ``converters = {0: datestr2num}``. Converters can also be + used to provide a default value for missing data (but see also + `genfromtxt`): ``converters = {3: lambda s: float(s.strip() or 0)}``. + Default: None. + skiprows : int, optional + Skip the first `skiprows` lines; default: 0. + usecols : int or sequence, optional + Which columns to read, with 0 being the first. For example, + ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns. + The default, None, results in all columns being read. + + .. versionchanged:: 1.11.0 + When a single column has to be read it is possible to use + an integer instead of a tuple. E.g ``usecols = 3`` reads the + fourth column the same way as ``usecols = (3,)`` would. + unpack : bool, optional + If True, the returned array is transposed, so that arguments may be + unpacked using ``x, y, z = loadtxt(...)``. When used with a structured + data-type, arrays are returned for each field. Default is False. + ndmin : int, optional + The returned array will have at least `ndmin` dimensions. + Otherwise mono-dimensional axes will be squeezed. + Legal values: 0 (default), 1 or 2. + + .. versionadded:: 1.6.0 + encoding : str, optional + Encoding used to decode the inputfile. Does not apply to input streams. + The special value 'bytes' enables backward compatibility workarounds + that ensures you receive byte arrays as results if possible and passes + 'latin1' encoded strings to converters. Override this value to receive + unicode arrays and pass strings as input to converters. If set to None + the system default is used. The default value is 'bytes'. + + .. versionadded:: 1.14.0 + max_rows : int, optional + Read `max_rows` lines of content after `skiprows` lines. The default + is to read all the lines. + + .. versionadded:: 1.16.0 + + Returns + ------- + out : ndarray + Data read from the text file. + + See Also + -------- + load, fromstring, fromregex + genfromtxt : Load data with missing values handled as specified. + scipy.io.loadmat : reads MATLAB data files + + Notes + ----- + This function aims to be a fast reader for simply formatted files. The + `genfromtxt` function provides more sophisticated handling of, e.g., + lines with missing values. + + .. versionadded:: 1.10.0 + + The strings produced by the Python float.hex method can be used as + input for floats. + + Examples + -------- + >>> from io import StringIO # StringIO behaves like a file object + >>> c = StringIO(u"0 1\\n2 3") + >>> np.loadtxt(c) + array([[ 0., 1.], + [ 2., 3.]]) + + >>> d = StringIO(u"M 21 72\\nF 35 58") + >>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'), + ... 'formats': ('S1', 'i4', 'f4')}) + array([('M', 21, 72.0), ('F', 35, 58.0)], + dtype=[('gender', '|S1'), ('age', '>> c = StringIO(u"1,0,2\\n3,0,4") + >>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True) + >>> x + array([ 1., 3.]) + >>> y + array([ 2., 4.]) + + """ + # Type conversions for Py3 convenience + if comments is not None: + if isinstance(comments, (basestring, bytes)): + comments = [comments] + comments = [_decode_line(x) for x in comments] + # Compile regex for comments beforehand + comments = (re.escape(comment) for comment in comments) + regex_comments = re.compile('|'.join(comments)) + + if delimiter is not None: + delimiter = _decode_line(delimiter) + + user_converters = converters + + if encoding == 'bytes': + encoding = None + byte_converters = True + else: + byte_converters = False + + if usecols is not None: + # Allow usecols to be a single int or a sequence of ints + try: + usecols_as_list = list(usecols) + except TypeError: + usecols_as_list = [usecols] + for col_idx in usecols_as_list: + try: + opindex(col_idx) + except TypeError as e: + e.args = ( + "usecols must be an int or a sequence of ints but " + "it contains at least one element of type %s" % + type(col_idx), + ) + raise + # Fall back to existing code + usecols = usecols_as_list + + fown = False + try: + if isinstance(fname, os_PathLike): + fname = os_fspath(fname) + if _is_string_like(fname): + fh = np.lib._datasource.open(fname, 'rt', encoding=encoding) + fencoding = getattr(fh, 'encoding', 'latin1') + fh = iter(fh) + fown = True + else: + fh = iter(fname) + fencoding = getattr(fname, 'encoding', 'latin1') + except TypeError: + raise ValueError('fname must be a string, file handle, or generator') + + # input may be a python2 io stream + if encoding is not None: + fencoding = encoding + # we must assume local encoding + # TODO emit portability warning? + elif fencoding is None: + import locale + fencoding = locale.getpreferredencoding() + + # not to be confused with the flatten_dtype we import... + @recursive + def flatten_dtype_internal(self, dt): + """Unpack a structured data-type, and produce re-packing info.""" + if dt.names is None: + # If the dtype is flattened, return. + # If the dtype has a shape, the dtype occurs + # in the list more than once. + shape = dt.shape + if len(shape) == 0: + return ([dt.base], None) + else: + packing = [(shape[-1], list)] + if len(shape) > 1: + for dim in dt.shape[-2::-1]: + packing = [(dim*packing[0][0], packing*dim)] + return ([dt.base] * int(np.prod(dt.shape)), packing) + else: + types = [] + packing = [] + for field in dt.names: + tp, bytes = dt.fields[field] + flat_dt, flat_packing = self(tp) + types.extend(flat_dt) + # Avoid extra nesting for subarrays + if tp.ndim > 0: + packing.extend(flat_packing) + else: + packing.append((len(flat_dt), flat_packing)) + return (types, packing) + + @recursive + def pack_items(self, items, packing): + """Pack items into nested lists based on re-packing info.""" + if packing is None: + return items[0] + elif packing is tuple: + return tuple(items) + elif packing is list: + return list(items) + else: + start = 0 + ret = [] + for length, subpacking in packing: + ret.append(self(items[start:start+length], subpacking)) + start += length + return tuple(ret) + + def split_line(line): + """Chop off comments, strip, and split at delimiter. """ + line = _decode_line(line, encoding=encoding) + + if comments is not None: + line = regex_comments.split(line, maxsplit=1)[0] + line = line.strip('\r\n') + if line: + return line.split(delimiter) + else: + return [] + + def read_data(chunk_size): + """Parse each line, including the first. + + The file read, `fh`, is a global defined above. + + Parameters + ---------- + chunk_size : int + At most `chunk_size` lines are read at a time, with iteration + until all lines are read. + + """ + X = [] + line_iter = itertools.chain([first_line], fh) + line_iter = itertools.islice(line_iter, max_rows) + for i, line in enumerate(line_iter): + vals = split_line(line) + if len(vals) == 0: + continue + if usecols: + vals = [vals[j] for j in usecols] + if len(vals) != N: + line_num = i + skiprows + 1 + raise ValueError("Wrong number of columns at line %d" + % line_num) + + # Convert each value according to its column and store + items = [conv(val) for (conv, val) in zip(converters, vals)] + + # Then pack it according to the dtype's nesting + items = pack_items(items, packing) + X.append(items) + if len(X) > chunk_size: + yield X + X = [] + if X: + yield X + + try: + # Make sure we're dealing with a proper dtype + dtype = np.dtype(dtype) + defconv = _getconv(dtype) + + # Skip the first `skiprows` lines + for i in range(skiprows): + next(fh) + + # Read until we find a line with some values, and use + # it to estimate the number of columns, N. + first_vals = None + try: + while not first_vals: + first_line = next(fh) + first_vals = split_line(first_line) + except StopIteration: + # End of lines reached + first_line = '' + first_vals = [] + warnings.warn('loadtxt: Empty input file: "%s"' % fname, stacklevel=2) + N = len(usecols or first_vals) + + dtype_types, packing = flatten_dtype_internal(dtype) + if len(dtype_types) > 1: + # We're dealing with a structured array, each field of + # the dtype matches a column + converters = [_getconv(dt) for dt in dtype_types] + else: + # All fields have the same dtype + converters = [defconv for i in range(N)] + if N > 1: + packing = [(N, tuple)] + + # By preference, use the converters specified by the user + for i, conv in (user_converters or {}).items(): + if usecols: + try: + i = usecols.index(i) + except ValueError: + # Unused converter specified + continue + if byte_converters: + # converters may use decode to workaround numpy's old behaviour, + # so encode the string again before passing to the user converter + def tobytes_first(x, conv): + if type(x) is bytes: + return conv(x) + return conv(x.encode("latin1")) + import functools + converters[i] = functools.partial(tobytes_first, conv=conv) + else: + converters[i] = conv + + converters = [conv if conv is not bytes else + lambda x: x.encode(fencoding) for conv in converters] + + # read data in chunks and fill it into an array via resize + # over-allocating and shrinking the array later may be faster but is + # probably not relevant compared to the cost of actually reading and + # converting the data + X = None + for x in read_data(_loadtxt_chunksize): + if X is None: + X = np.array(x, dtype) + else: + nshape = list(X.shape) + pos = nshape[0] + nshape[0] += len(x) + X.resize(nshape, refcheck=False) + X[pos:, ...] = x + finally: + if fown: + fh.close() + + if X is None: + X = np.array([], dtype) + + # Multicolumn data are returned with shape (1, N, M), i.e. + # (1, 1, M) for a single row - remove the singleton dimension there + if X.ndim == 3 and X.shape[:2] == (1, 1): + X.shape = (1, -1) + + # Verify that the array has at least dimensions `ndmin`. + # Check correctness of the values of `ndmin` + if ndmin not in [0, 1, 2]: + raise ValueError('Illegal value of ndmin keyword: %s' % ndmin) + # Tweak the size and shape of the arrays - remove extraneous dimensions + if X.ndim > ndmin: + X = np.squeeze(X) + # and ensure we have the minimum number of dimensions asked for + # - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0 + if X.ndim < ndmin: + if ndmin == 1: + X = np.atleast_1d(X) + elif ndmin == 2: + X = np.atleast_2d(X).T + + if unpack: + if len(dtype_types) > 1: + # For structured arrays, return an array for each field. + return [X[field] for field in dtype.names] + else: + return X.T + else: + return X + + +def _savetxt_dispatcher(fname, X, fmt=None, delimiter=None, newline=None, + header=None, footer=None, comments=None, + encoding=None): + return (X,) + + +@array_function_dispatch(_savetxt_dispatcher) +def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='', + footer='', comments='# ', encoding=None): + """ + Save an array to a text file. + + Parameters + ---------- + fname : filename or file handle + If the filename ends in ``.gz``, the file is automatically saved in + compressed gzip format. `loadtxt` understands gzipped files + transparently. + X : 1D or 2D array_like + Data to be saved to a text file. + fmt : str or sequence of strs, optional + A single format (%10.5f), a sequence of formats, or a + multi-format string, e.g. 'Iteration %d -- %10.5f', in which + case `delimiter` is ignored. For complex `X`, the legal options + for `fmt` are: + + * a single specifier, `fmt='%.4e'`, resulting in numbers formatted + like `' (%s+%sj)' % (fmt, fmt)` + * a full string specifying every real and imaginary part, e.g. + `' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'` for 3 columns + * a list of specifiers, one per column - in this case, the real + and imaginary part must have separate specifiers, + e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns + delimiter : str, optional + String or character separating columns. + newline : str, optional + String or character separating lines. + + .. versionadded:: 1.5.0 + header : str, optional + String that will be written at the beginning of the file. + + .. versionadded:: 1.7.0 + footer : str, optional + String that will be written at the end of the file. + + .. versionadded:: 1.7.0 + comments : str, optional + String that will be prepended to the ``header`` and ``footer`` strings, + to mark them as comments. Default: '# ', as expected by e.g. + ``numpy.loadtxt``. + + .. versionadded:: 1.7.0 + encoding : {None, str}, optional + Encoding used to encode the outputfile. Does not apply to output + streams. If the encoding is something other than 'bytes' or 'latin1' + you will not be able to load the file in NumPy versions < 1.14. Default + is 'latin1'. + + .. versionadded:: 1.14.0 + + + See Also + -------- + save : Save an array to a binary file in NumPy ``.npy`` format + savez : Save several arrays into an uncompressed ``.npz`` archive + savez_compressed : Save several arrays into a compressed ``.npz`` archive + + Notes + ----- + Further explanation of the `fmt` parameter + (``%[flag]width[.precision]specifier``): + + flags: + ``-`` : left justify + + ``+`` : Forces to precede result with + or -. + + ``0`` : Left pad the number with zeros instead of space (see width). + + width: + Minimum number of characters to be printed. The value is not truncated + if it has more characters. + + precision: + - For integer specifiers (eg. ``d,i,o,x``), the minimum number of + digits. + - For ``e, E`` and ``f`` specifiers, the number of digits to print + after the decimal point. + - For ``g`` and ``G``, the maximum number of significant digits. + - For ``s``, the maximum number of characters. + + specifiers: + ``c`` : character + + ``d`` or ``i`` : signed decimal integer + + ``e`` or ``E`` : scientific notation with ``e`` or ``E``. + + ``f`` : decimal floating point + + ``g,G`` : use the shorter of ``e,E`` or ``f`` + + ``o`` : signed octal + + ``s`` : string of characters + + ``u`` : unsigned decimal integer + + ``x,X`` : unsigned hexadecimal integer + + This explanation of ``fmt`` is not complete, for an exhaustive + specification see [1]_. + + References + ---------- + .. [1] `Format Specification Mini-Language + `_, + Python Documentation. + + Examples + -------- + >>> x = y = z = np.arange(0.0,5.0,1.0) + >>> np.savetxt('test.out', x, delimiter=',') # X is an array + >>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays + >>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation + + """ + + # Py3 conversions first + if isinstance(fmt, bytes): + fmt = asstr(fmt) + delimiter = asstr(delimiter) + + class WriteWrap(object): + """Convert to unicode in py2 or to bytes on bytestream inputs. + + """ + def __init__(self, fh, encoding): + self.fh = fh + self.encoding = encoding + self.do_write = self.first_write + + def close(self): + self.fh.close() + + def write(self, v): + self.do_write(v) + + def write_bytes(self, v): + if isinstance(v, bytes): + self.fh.write(v) + else: + self.fh.write(v.encode(self.encoding)) + + def write_normal(self, v): + self.fh.write(asunicode(v)) + + def first_write(self, v): + try: + self.write_normal(v) + self.write = self.write_normal + except TypeError: + # input is probably a bytestream + self.write_bytes(v) + self.write = self.write_bytes + + own_fh = False + if isinstance(fname, os_PathLike): + fname = os_fspath(fname) + if _is_string_like(fname): + # datasource doesn't support creating a new file ... + open(fname, 'wt').close() + fh = np.lib._datasource.open(fname, 'wt', encoding=encoding) + own_fh = True + # need to convert str to unicode for text io output + if sys.version_info[0] == 2: + fh = WriteWrap(fh, encoding or 'latin1') + elif hasattr(fname, 'write'): + # wrap to handle byte output streams + fh = WriteWrap(fname, encoding or 'latin1') + else: + raise ValueError('fname must be a string or file handle') + + try: + X = np.asarray(X) + + # Handle 1-dimensional arrays + if X.ndim == 0 or X.ndim > 2: + raise ValueError( + "Expected 1D or 2D array, got %dD array instead" % X.ndim) + elif X.ndim == 1: + # Common case -- 1d array of numbers + if X.dtype.names is None: + X = np.atleast_2d(X).T + ncol = 1 + + # Complex dtype -- each field indicates a separate column + else: + ncol = len(X.dtype.descr) + else: + ncol = X.shape[1] + + iscomplex_X = np.iscomplexobj(X) + # `fmt` can be a string with multiple insertion points or a + # list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d') + if type(fmt) in (list, tuple): + if len(fmt) != ncol: + raise AttributeError('fmt has wrong shape. %s' % str(fmt)) + format = asstr(delimiter).join(map(asstr, fmt)) + elif isinstance(fmt, str): + n_fmt_chars = fmt.count('%') + error = ValueError('fmt has wrong number of %% formats: %s' % fmt) + if n_fmt_chars == 1: + if iscomplex_X: + fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol + else: + fmt = [fmt, ] * ncol + format = delimiter.join(fmt) + elif iscomplex_X and n_fmt_chars != (2 * ncol): + raise error + elif ((not iscomplex_X) and n_fmt_chars != ncol): + raise error + else: + format = fmt + else: + raise ValueError('invalid fmt: %r' % (fmt,)) + + if len(header) > 0: + header = header.replace('\n', '\n' + comments) + fh.write(comments + header + newline) + if iscomplex_X: + for row in X: + row2 = [] + for number in row: + row2.append(number.real) + row2.append(number.imag) + s = format % tuple(row2) + newline + fh.write(s.replace('+-', '-')) + else: + for row in X: + try: + v = format % tuple(row) + newline + except TypeError: + raise TypeError("Mismatch between array dtype ('%s') and " + "format specifier ('%s')" + % (str(X.dtype), format)) + fh.write(v) + + if len(footer) > 0: + footer = footer.replace('\n', '\n' + comments) + fh.write(comments + footer + newline) + finally: + if own_fh: + fh.close() + + +@set_module('numpy') +def fromregex(file, regexp, dtype, encoding=None): + """ + Construct an array from a text file, using regular expression parsing. + + The returned array is always a structured array, and is constructed from + all matches of the regular expression in the file. Groups in the regular + expression are converted to fields of the structured array. + + Parameters + ---------- + file : str or file + File name or file object to read. + regexp : str or regexp + Regular expression used to parse the file. + Groups in the regular expression correspond to fields in the dtype. + dtype : dtype or list of dtypes + Dtype for the structured array. + encoding : str, optional + Encoding used to decode the inputfile. Does not apply to input streams. + + .. versionadded:: 1.14.0 + + Returns + ------- + output : ndarray + The output array, containing the part of the content of `file` that + was matched by `regexp`. `output` is always a structured array. + + Raises + ------ + TypeError + When `dtype` is not a valid dtype for a structured array. + + See Also + -------- + fromstring, loadtxt + + Notes + ----- + Dtypes for structured arrays can be specified in several forms, but all + forms specify at least the data type and field name. For details see + `doc.structured_arrays`. + + Examples + -------- + >>> f = open('test.dat', 'w') + >>> f.write("1312 foo\\n1534 bar\\n444 qux") + >>> f.close() + + >>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything] + >>> output = np.fromregex('test.dat', regexp, + ... [('num', np.int64), ('key', 'S3')]) + >>> output + array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')], + dtype=[('num', '>> output['num'] + array([1312, 1534, 444], dtype=int64) + + """ + own_fh = False + if not hasattr(file, "read"): + file = np.lib._datasource.open(file, 'rt', encoding=encoding) + own_fh = True + + try: + if not isinstance(dtype, np.dtype): + dtype = np.dtype(dtype) + + content = file.read() + if isinstance(content, bytes) and isinstance(regexp, np.unicode): + regexp = asbytes(regexp) + elif isinstance(content, np.unicode) and isinstance(regexp, bytes): + regexp = asstr(regexp) + + if not hasattr(regexp, 'match'): + regexp = re.compile(regexp) + seq = regexp.findall(content) + if seq and not isinstance(seq[0], tuple): + # Only one group is in the regexp. + # Create the new array as a single data-type and then + # re-interpret as a single-field structured array. + newdtype = np.dtype(dtype[dtype.names[0]]) + output = np.array(seq, dtype=newdtype) + output.dtype = dtype + else: + output = np.array(seq, dtype=dtype) + + return output + finally: + if own_fh: + file.close() + + +#####-------------------------------------------------------------------------- +#---- --- ASCII functions --- +#####-------------------------------------------------------------------------- + + +@set_module('numpy') +def genfromtxt(fname, dtype=float, comments='#', delimiter=None, + skip_header=0, skip_footer=0, converters=None, + missing_values=None, filling_values=None, usecols=None, + names=None, excludelist=None, deletechars=None, + replace_space='_', autostrip=False, case_sensitive=True, + defaultfmt="f%i", unpack=None, usemask=False, loose=True, + invalid_raise=True, max_rows=None, encoding='bytes'): + """ + Load data from a text file, with missing values handled as specified. + + Each line past the first `skip_header` lines is split at the `delimiter` + character, and characters following the `comments` character are discarded. + + Parameters + ---------- + fname : file, str, pathlib.Path, list of str, generator + File, filename, list, or generator to read. If the filename + extension is `.gz` or `.bz2`, the file is first decompressed. Note + that generators must return byte strings in Python 3k. The strings + in a list or produced by a generator are treated as lines. + dtype : dtype, optional + Data type of the resulting array. + If None, the dtypes will be determined by the contents of each + column, individually. + comments : str, optional + The character used to indicate the start of a comment. + All the characters occurring on a line after a comment are discarded + delimiter : str, int, or sequence, optional + The string used to separate values. By default, any consecutive + whitespaces act as delimiter. An integer or sequence of integers + can also be provided as width(s) of each field. + skiprows : int, optional + `skiprows` was removed in numpy 1.10. Please use `skip_header` instead. + skip_header : int, optional + The number of lines to skip at the beginning of the file. + skip_footer : int, optional + The number of lines to skip at the end of the file. + converters : variable, optional + The set of functions that convert the data of a column to a value. + The converters can also be used to provide a default value + for missing data: ``converters = {3: lambda s: float(s or 0)}``. + missing : variable, optional + `missing` was removed in numpy 1.10. Please use `missing_values` + instead. + missing_values : variable, optional + The set of strings corresponding to missing data. + filling_values : variable, optional + The set of values to be used as default when the data are missing. + usecols : sequence, optional + Which columns to read, with 0 being the first. For example, + ``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns. + names : {None, True, str, sequence}, optional + If `names` is True, the field names are read from the first line after + the first `skip_header` lines. This line can optionally be proceeded + by a comment delimiter. If `names` is a sequence or a single-string of + comma-separated names, the names will be used to define the field names + in a structured dtype. If `names` is None, the names of the dtype + fields will be used, if any. + excludelist : sequence, optional + A list of names to exclude. This list is appended to the default list + ['return','file','print']. Excluded names are appended an underscore: + for example, `file` would become `file_`. + deletechars : str, optional + A string combining invalid characters that must be deleted from the + names. + defaultfmt : str, optional + A format used to define default field names, such as "f%i" or "f_%02i". + autostrip : bool, optional + Whether to automatically strip white spaces from the variables. + replace_space : char, optional + Character(s) used in replacement of white spaces in the variables + names. By default, use a '_'. + case_sensitive : {True, False, 'upper', 'lower'}, optional + If True, field names are case sensitive. + If False or 'upper', field names are converted to upper case. + If 'lower', field names are converted to lower case. + unpack : bool, optional + If True, the returned array is transposed, so that arguments may be + unpacked using ``x, y, z = loadtxt(...)`` + usemask : bool, optional + If True, return a masked array. + If False, return a regular array. + loose : bool, optional + If True, do not raise errors for invalid values. + invalid_raise : bool, optional + If True, an exception is raised if an inconsistency is detected in the + number of columns. + If False, a warning is emitted and the offending lines are skipped. + max_rows : int, optional + The maximum number of rows to read. Must not be used with skip_footer + at the same time. If given, the value must be at least 1. Default is + to read the entire file. + + .. versionadded:: 1.10.0 + encoding : str, optional + Encoding used to decode the inputfile. Does not apply when `fname` is + a file object. The special value 'bytes' enables backward compatibility + workarounds that ensure that you receive byte arrays when possible + and passes latin1 encoded strings to converters. Override this value to + receive unicode arrays and pass strings as input to converters. If set + to None the system default is used. The default value is 'bytes'. + + .. versionadded:: 1.14.0 + + Returns + ------- + out : ndarray + Data read from the text file. If `usemask` is True, this is a + masked array. + + See Also + -------- + numpy.loadtxt : equivalent function when no data is missing. + + Notes + ----- + * When spaces are used as delimiters, or when no delimiter has been given + as input, there should not be any missing data between two fields. + * When the variables are named (either by a flexible dtype or with `names`, + there must not be any header in the file (else a ValueError + exception is raised). + * Individual values are not stripped of spaces by default. + When using a custom converter, make sure the function does remove spaces. + + References + ---------- + .. [1] NumPy User Guide, section `I/O with NumPy + `_. + + Examples + --------- + >>> from io import StringIO + >>> import numpy as np + + Comma delimited file with mixed dtype + + >>> s = StringIO(u"1,1.3,abcde") + >>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'), + ... ('mystring','S5')], delimiter=",") + >>> data + array((1, 1.3, 'abcde'), + dtype=[('myint', '>> s.seek(0) # needed for StringIO example only + >>> data = np.genfromtxt(s, dtype=None, + ... names = ['myint','myfloat','mystring'], delimiter=",") + >>> data + array((1, 1.3, 'abcde'), + dtype=[('myint', '>> s.seek(0) + >>> data = np.genfromtxt(s, dtype="i8,f8,S5", + ... names=['myint','myfloat','mystring'], delimiter=",") + >>> data + array((1, 1.3, 'abcde'), + dtype=[('myint', '>> s = StringIO(u"11.3abcde") + >>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'], + ... delimiter=[1,3,5]) + >>> data + array((1, 1.3, 'abcde'), + dtype=[('intvar', ' nbcols): + descr = dtype.descr + dtype = np.dtype([descr[_] for _ in usecols]) + names = list(dtype.names) + # If `names` is not None, update the names + elif (names is not None) and (len(names) > nbcols): + names = [names[_] for _ in usecols] + elif (names is not None) and (dtype is not None): + names = list(dtype.names) + + # Process the missing values ............................... + # Rename missing_values for convenience + user_missing_values = missing_values or () + if isinstance(user_missing_values, bytes): + user_missing_values = user_missing_values.decode('latin1') + + # Define the list of missing_values (one column: one list) + missing_values = [list(['']) for _ in range(nbcols)] + + # We have a dictionary: process it field by field + if isinstance(user_missing_values, dict): + # Loop on the items + for (key, val) in user_missing_values.items(): + # Is the key a string ? + if _is_string_like(key): + try: + # Transform it into an integer + key = names.index(key) + except ValueError: + # We couldn't find it: the name must have been dropped + continue + # Redefine the key as needed if it's a column number + if usecols: + try: + key = usecols.index(key) + except ValueError: + pass + # Transform the value as a list of string + if isinstance(val, (list, tuple)): + val = [str(_) for _ in val] + else: + val = [str(val), ] + # Add the value(s) to the current list of missing + if key is None: + # None acts as default + for miss in missing_values: + miss.extend(val) + else: + missing_values[key].extend(val) + # We have a sequence : each item matches a column + elif isinstance(user_missing_values, (list, tuple)): + for (value, entry) in zip(user_missing_values, missing_values): + value = str(value) + if value not in entry: + entry.append(value) + # We have a string : apply it to all entries + elif isinstance(user_missing_values, basestring): + user_value = user_missing_values.split(",") + for entry in missing_values: + entry.extend(user_value) + # We have something else: apply it to all entries + else: + for entry in missing_values: + entry.extend([str(user_missing_values)]) + + # Process the filling_values ............................... + # Rename the input for convenience + user_filling_values = filling_values + if user_filling_values is None: + user_filling_values = [] + # Define the default + filling_values = [None] * nbcols + # We have a dictionary : update each entry individually + if isinstance(user_filling_values, dict): + for (key, val) in user_filling_values.items(): + if _is_string_like(key): + try: + # Transform it into an integer + key = names.index(key) + except ValueError: + # We couldn't find it: the name must have been dropped, + continue + # Redefine the key if it's a column number and usecols is defined + if usecols: + try: + key = usecols.index(key) + except ValueError: + pass + # Add the value to the list + filling_values[key] = val + # We have a sequence : update on a one-to-one basis + elif isinstance(user_filling_values, (list, tuple)): + n = len(user_filling_values) + if (n <= nbcols): + filling_values[:n] = user_filling_values + else: + filling_values = user_filling_values[:nbcols] + # We have something else : use it for all entries + else: + filling_values = [user_filling_values] * nbcols + + # Initialize the converters ................................ + if dtype is None: + # Note: we can't use a [...]*nbcols, as we would have 3 times the same + # ... converter, instead of 3 different converters. + converters = [StringConverter(None, missing_values=miss, default=fill) + for (miss, fill) in zip(missing_values, filling_values)] + else: + dtype_flat = flatten_dtype(dtype, flatten_base=True) + # Initialize the converters + if len(dtype_flat) > 1: + # Flexible type : get a converter from each dtype + zipit = zip(dtype_flat, missing_values, filling_values) + converters = [StringConverter(dt, locked=True, + missing_values=miss, default=fill) + for (dt, miss, fill) in zipit] + else: + # Set to a default converter (but w/ different missing values) + zipit = zip(missing_values, filling_values) + converters = [StringConverter(dtype, locked=True, + missing_values=miss, default=fill) + for (miss, fill) in zipit] + # Update the converters to use the user-defined ones + uc_update = [] + for (j, conv) in user_converters.items(): + # If the converter is specified by column names, use the index instead + if _is_string_like(j): + try: + j = names.index(j) + i = j + except ValueError: + continue + elif usecols: + try: + i = usecols.index(j) + except ValueError: + # Unused converter specified + continue + else: + i = j + # Find the value to test - first_line is not filtered by usecols: + if len(first_line): + testing_value = first_values[j] + else: + testing_value = None + if conv is bytes: + user_conv = asbytes + elif byte_converters: + # converters may use decode to workaround numpy's old behaviour, + # so encode the string again before passing to the user converter + def tobytes_first(x, conv): + if type(x) is bytes: + return conv(x) + return conv(x.encode("latin1")) + import functools + user_conv = functools.partial(tobytes_first, conv=conv) + else: + user_conv = conv + converters[i].update(user_conv, locked=True, + testing_value=testing_value, + default=filling_values[i], + missing_values=missing_values[i],) + uc_update.append((i, user_conv)) + # Make sure we have the corrected keys in user_converters... + user_converters.update(uc_update) + + # Fixme: possible error as following variable never used. + # miss_chars = [_.missing_values for _ in converters] + + # Initialize the output lists ... + # ... rows + rows = [] + append_to_rows = rows.append + # ... masks + if usemask: + masks = [] + append_to_masks = masks.append + # ... invalid + invalid = [] + append_to_invalid = invalid.append + + # Parse each line + for (i, line) in enumerate(itertools.chain([first_line, ], fhd)): + values = split_line(line) + nbvalues = len(values) + # Skip an empty line + if nbvalues == 0: + continue + if usecols: + # Select only the columns we need + try: + values = [values[_] for _ in usecols] + except IndexError: + append_to_invalid((i + skip_header + 1, nbvalues)) + continue + elif nbvalues != nbcols: + append_to_invalid((i + skip_header + 1, nbvalues)) + continue + # Store the values + append_to_rows(tuple(values)) + if usemask: + append_to_masks(tuple([v.strip() in m + for (v, m) in zip(values, + missing_values)])) + if len(rows) == max_rows: + break + + if own_fhd: + fhd.close() + + # Upgrade the converters (if needed) + if dtype is None: + for (i, converter) in enumerate(converters): + current_column = [itemgetter(i)(_m) for _m in rows] + try: + converter.iterupgrade(current_column) + except ConverterLockError: + errmsg = "Converter #%i is locked and cannot be upgraded: " % i + current_column = map(itemgetter(i), rows) + for (j, value) in enumerate(current_column): + try: + converter.upgrade(value) + except (ConverterError, ValueError): + errmsg += "(occurred line #%i for value '%s')" + errmsg %= (j + 1 + skip_header, value) + raise ConverterError(errmsg) + + # Check that we don't have invalid values + nbinvalid = len(invalid) + if nbinvalid > 0: + nbrows = len(rows) + nbinvalid - skip_footer + # Construct the error message + template = " Line #%%i (got %%i columns instead of %i)" % nbcols + if skip_footer > 0: + nbinvalid_skipped = len([_ for _ in invalid + if _[0] > nbrows + skip_header]) + invalid = invalid[:nbinvalid - nbinvalid_skipped] + skip_footer -= nbinvalid_skipped +# +# nbrows -= skip_footer +# errmsg = [template % (i, nb) +# for (i, nb) in invalid if i < nbrows] +# else: + errmsg = [template % (i, nb) + for (i, nb) in invalid] + if len(errmsg): + errmsg.insert(0, "Some errors were detected !") + errmsg = "\n".join(errmsg) + # Raise an exception ? + if invalid_raise: + raise ValueError(errmsg) + # Issue a warning ? + else: + warnings.warn(errmsg, ConversionWarning, stacklevel=2) + + # Strip the last skip_footer data + if skip_footer > 0: + rows = rows[:-skip_footer] + if usemask: + masks = masks[:-skip_footer] + + # Convert each value according to the converter: + # We want to modify the list in place to avoid creating a new one... + if loose: + rows = list( + zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)] + for (i, conv) in enumerate(converters)])) + else: + rows = list( + zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)] + for (i, conv) in enumerate(converters)])) + + # Reset the dtype + data = rows + if dtype is None: + # Get the dtypes from the types of the converters + column_types = [conv.type for conv in converters] + # Find the columns with strings... + strcolidx = [i for (i, v) in enumerate(column_types) + if v == np.unicode_] + + if byte_converters and strcolidx: + # convert strings back to bytes for backward compatibility + warnings.warn( + "Reading unicode strings without specifying the encoding " + "argument is deprecated. Set the encoding, use None for the " + "system default.", + np.VisibleDeprecationWarning, stacklevel=2) + def encode_unicode_cols(row_tup): + row = list(row_tup) + for i in strcolidx: + row[i] = row[i].encode('latin1') + return tuple(row) + + try: + data = [encode_unicode_cols(r) for r in data] + except UnicodeEncodeError: + pass + else: + for i in strcolidx: + column_types[i] = np.bytes_ + + # Update string types to be the right length + sized_column_types = column_types[:] + for i, col_type in enumerate(column_types): + if np.issubdtype(col_type, np.character): + n_chars = max(len(row[i]) for row in data) + sized_column_types[i] = (col_type, n_chars) + + if names is None: + # If the dtype is uniform (before sizing strings) + base = { + c_type + for c, c_type in zip(converters, column_types) + if c._checked} + if len(base) == 1: + uniform_type, = base + (ddtype, mdtype) = (uniform_type, bool) + else: + ddtype = [(defaultfmt % i, dt) + for (i, dt) in enumerate(sized_column_types)] + if usemask: + mdtype = [(defaultfmt % i, bool) + for (i, dt) in enumerate(sized_column_types)] + else: + ddtype = list(zip(names, sized_column_types)) + mdtype = list(zip(names, [bool] * len(sized_column_types))) + output = np.array(data, dtype=ddtype) + if usemask: + outputmask = np.array(masks, dtype=mdtype) + else: + # Overwrite the initial dtype names if needed + if names and dtype.names: + dtype.names = names + # Case 1. We have a structured type + if len(dtype_flat) > 1: + # Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])] + # First, create the array using a flattened dtype: + # [('a', int), ('b1', int), ('b2', float)] + # Then, view the array using the specified dtype. + if 'O' in (_.char for _ in dtype_flat): + if has_nested_fields(dtype): + raise NotImplementedError( + "Nested fields involving objects are not supported...") + else: + output = np.array(data, dtype=dtype) + else: + rows = np.array(data, dtype=[('', _) for _ in dtype_flat]) + output = rows.view(dtype) + # Now, process the rowmasks the same way + if usemask: + rowmasks = np.array( + masks, dtype=np.dtype([('', bool) for t in dtype_flat])) + # Construct the new dtype + mdtype = make_mask_descr(dtype) + outputmask = rowmasks.view(mdtype) + # Case #2. We have a basic dtype + else: + # We used some user-defined converters + if user_converters: + ishomogeneous = True + descr = [] + for i, ttype in enumerate([conv.type for conv in converters]): + # Keep the dtype of the current converter + if i in user_converters: + ishomogeneous &= (ttype == dtype.type) + if np.issubdtype(ttype, np.character): + ttype = (ttype, max(len(row[i]) for row in data)) + descr.append(('', ttype)) + else: + descr.append(('', dtype)) + # So we changed the dtype ? + if not ishomogeneous: + # We have more than one field + if len(descr) > 1: + dtype = np.dtype(descr) + # We have only one field: drop the name if not needed. + else: + dtype = np.dtype(ttype) + # + output = np.array(data, dtype) + if usemask: + if dtype.names: + mdtype = [(_, bool) for _ in dtype.names] + else: + mdtype = bool + outputmask = np.array(masks, dtype=mdtype) + # Try to take care of the missing data we missed + names = output.dtype.names + if usemask and names: + for (name, conv) in zip(names, converters): + missing_values = [conv(_) for _ in conv.missing_values + if _ != ''] + for mval in missing_values: + outputmask[name] |= (output[name] == mval) + # Construct the final array + if usemask: + output = output.view(MaskedArray) + output._mask = outputmask + if unpack: + return output.squeeze().T + return output.squeeze() + + +def ndfromtxt(fname, **kwargs): + """ + Load ASCII data stored in a file and return it as a single array. + + Parameters + ---------- + fname, kwargs : For a description of input parameters, see `genfromtxt`. + + See Also + -------- + numpy.genfromtxt : generic function. + + """ + kwargs['usemask'] = False + return genfromtxt(fname, **kwargs) + + +def mafromtxt(fname, **kwargs): + """ + Load ASCII data stored in a text file and return a masked array. + + Parameters + ---------- + fname, kwargs : For a description of input parameters, see `genfromtxt`. + + See Also + -------- + numpy.genfromtxt : generic function to load ASCII data. + + """ + kwargs['usemask'] = True + return genfromtxt(fname, **kwargs) + + +def recfromtxt(fname, **kwargs): + """ + Load ASCII data from a file and return it in a record array. + + If ``usemask=False`` a standard `recarray` is returned, + if ``usemask=True`` a MaskedRecords array is returned. + + Parameters + ---------- + fname, kwargs : For a description of input parameters, see `genfromtxt`. + + See Also + -------- + numpy.genfromtxt : generic function + + Notes + ----- + By default, `dtype` is None, which means that the data-type of the output + array will be determined from the data. + + """ + kwargs.setdefault("dtype", None) + usemask = kwargs.get('usemask', False) + output = genfromtxt(fname, **kwargs) + if usemask: + from numpy.ma.mrecords import MaskedRecords + output = output.view(MaskedRecords) + else: + output = output.view(np.recarray) + return output + + +def recfromcsv(fname, **kwargs): + """ + Load ASCII data stored in a comma-separated file. + + The returned array is a record array (if ``usemask=False``, see + `recarray`) or a masked record array (if ``usemask=True``, + see `ma.mrecords.MaskedRecords`). + + Parameters + ---------- + fname, kwargs : For a description of input parameters, see `genfromtxt`. + + See Also + -------- + numpy.genfromtxt : generic function to load ASCII data. + + Notes + ----- + By default, `dtype` is None, which means that the data-type of the output + array will be determined from the data. + + """ + # Set default kwargs for genfromtxt as relevant to csv import. + kwargs.setdefault("case_sensitive", "lower") + kwargs.setdefault("names", True) + kwargs.setdefault("delimiter", ",") + kwargs.setdefault("dtype", None) + output = genfromtxt(fname, **kwargs) + + usemask = kwargs.get("usemask", False) + if usemask: + from numpy.ma.mrecords import MaskedRecords + output = output.view(MaskedRecords) + else: + output = output.view(np.recarray) + return output diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/npyio.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/npyio.pyc new file mode 100644 index 0000000..b66e8d1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/npyio.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/polynomial.py b/project/venv/lib/python2.7/site-packages/numpy/lib/polynomial.py new file mode 100644 index 0000000..e3defdc --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/polynomial.py @@ -0,0 +1,1369 @@ +""" +Functions to operate on polynomials. + +""" +from __future__ import division, absolute_import, print_function + +__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd', + 'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d', + 'polyfit', 'RankWarning'] + +import functools +import re +import warnings +import numpy.core.numeric as NX + +from numpy.core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array, + ones) +from numpy.core import overrides +from numpy.core.overrides import set_module +from numpy.lib.twodim_base import diag, vander +from numpy.lib.function_base import trim_zeros +from numpy.lib.type_check import iscomplex, real, imag, mintypecode +from numpy.linalg import eigvals, lstsq, inv + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +@set_module('numpy') +class RankWarning(UserWarning): + """ + Issued by `polyfit` when the Vandermonde matrix is rank deficient. + + For more information, a way to suppress the warning, and an example of + `RankWarning` being issued, see `polyfit`. + + """ + pass + + +def _poly_dispatcher(seq_of_zeros): + return seq_of_zeros + + +@array_function_dispatch(_poly_dispatcher) +def poly(seq_of_zeros): + """ + Find the coefficients of a polynomial with the given sequence of roots. + + Returns the coefficients of the polynomial whose leading coefficient + is one for the given sequence of zeros (multiple roots must be included + in the sequence as many times as their multiplicity; see Examples). + A square matrix (or array, which will be treated as a matrix) can also + be given, in which case the coefficients of the characteristic polynomial + of the matrix are returned. + + Parameters + ---------- + seq_of_zeros : array_like, shape (N,) or (N, N) + A sequence of polynomial roots, or a square array or matrix object. + + Returns + ------- + c : ndarray + 1D array of polynomial coefficients from highest to lowest degree: + + ``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]`` + where c[0] always equals 1. + + Raises + ------ + ValueError + If input is the wrong shape (the input must be a 1-D or square + 2-D array). + + See Also + -------- + polyval : Compute polynomial values. + roots : Return the roots of a polynomial. + polyfit : Least squares polynomial fit. + poly1d : A one-dimensional polynomial class. + + Notes + ----- + Specifying the roots of a polynomial still leaves one degree of + freedom, typically represented by an undetermined leading + coefficient. [1]_ In the case of this function, that coefficient - + the first one in the returned array - is always taken as one. (If + for some reason you have one other point, the only automatic way + presently to leverage that information is to use ``polyfit``.) + + The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n` + matrix **A** is given by + + :math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`, + + where **I** is the `n`-by-`n` identity matrix. [2]_ + + References + ---------- + .. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry, + Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996. + + .. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition," + Academic Press, pg. 182, 1980. + + Examples + -------- + Given a sequence of a polynomial's zeros: + + >>> np.poly((0, 0, 0)) # Multiple root example + array([1, 0, 0, 0]) + + The line above represents z**3 + 0*z**2 + 0*z + 0. + + >>> np.poly((-1./2, 0, 1./2)) + array([ 1. , 0. , -0.25, 0. ]) + + The line above represents z**3 - z/4 + + >>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0])) + array([ 1. , -0.77086955, 0.08618131, 0. ]) #random + + Given a square array object: + + >>> P = np.array([[0, 1./3], [-1./2, 0]]) + >>> np.poly(P) + array([ 1. , 0. , 0.16666667]) + + Note how in all cases the leading coefficient is always 1. + + """ + seq_of_zeros = atleast_1d(seq_of_zeros) + sh = seq_of_zeros.shape + + if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0: + seq_of_zeros = eigvals(seq_of_zeros) + elif len(sh) == 1: + dt = seq_of_zeros.dtype + # Let object arrays slip through, e.g. for arbitrary precision + if dt != object: + seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char)) + else: + raise ValueError("input must be 1d or non-empty square 2d array.") + + if len(seq_of_zeros) == 0: + return 1.0 + dt = seq_of_zeros.dtype + a = ones((1,), dtype=dt) + for k in range(len(seq_of_zeros)): + a = NX.convolve(a, array([1, -seq_of_zeros[k]], dtype=dt), + mode='full') + + if issubclass(a.dtype.type, NX.complexfloating): + # if complex roots are all complex conjugates, the roots are real. + roots = NX.asarray(seq_of_zeros, complex) + if NX.all(NX.sort(roots) == NX.sort(roots.conjugate())): + a = a.real.copy() + + return a + + +def _roots_dispatcher(p): + return p + + +@array_function_dispatch(_roots_dispatcher) +def roots(p): + """ + Return the roots of a polynomial with coefficients given in p. + + The values in the rank-1 array `p` are coefficients of a polynomial. + If the length of `p` is n+1 then the polynomial is described by:: + + p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n] + + Parameters + ---------- + p : array_like + Rank-1 array of polynomial coefficients. + + Returns + ------- + out : ndarray + An array containing the roots of the polynomial. + + Raises + ------ + ValueError + When `p` cannot be converted to a rank-1 array. + + See also + -------- + poly : Find the coefficients of a polynomial with a given sequence + of roots. + polyval : Compute polynomial values. + polyfit : Least squares polynomial fit. + poly1d : A one-dimensional polynomial class. + + Notes + ----- + The algorithm relies on computing the eigenvalues of the + companion matrix [1]_. + + References + ---------- + .. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK: + Cambridge University Press, 1999, pp. 146-7. + + Examples + -------- + >>> coeff = [3.2, 2, 1] + >>> np.roots(coeff) + array([-0.3125+0.46351241j, -0.3125-0.46351241j]) + + """ + # If input is scalar, this makes it an array + p = atleast_1d(p) + if p.ndim != 1: + raise ValueError("Input must be a rank-1 array.") + + # find non-zero array entries + non_zero = NX.nonzero(NX.ravel(p))[0] + + # Return an empty array if polynomial is all zeros + if len(non_zero) == 0: + return NX.array([]) + + # find the number of trailing zeros -- this is the number of roots at 0. + trailing_zeros = len(p) - non_zero[-1] - 1 + + # strip leading and trailing zeros + p = p[int(non_zero[0]):int(non_zero[-1])+1] + + # casting: if incoming array isn't floating point, make it floating point. + if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)): + p = p.astype(float) + + N = len(p) + if N > 1: + # build companion matrix and find its eigenvalues (the roots) + A = diag(NX.ones((N-2,), p.dtype), -1) + A[0,:] = -p[1:] / p[0] + roots = eigvals(A) + else: + roots = NX.array([]) + + # tack any zeros onto the back of the array + roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype))) + return roots + + +def _polyint_dispatcher(p, m=None, k=None): + return (p,) + + +@array_function_dispatch(_polyint_dispatcher) +def polyint(p, m=1, k=None): + """ + Return an antiderivative (indefinite integral) of a polynomial. + + The returned order `m` antiderivative `P` of polynomial `p` satisfies + :math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1` + integration constants `k`. The constants determine the low-order + polynomial part + + .. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1} + + of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`. + + Parameters + ---------- + p : array_like or poly1d + Polynomial to integrate. + A sequence is interpreted as polynomial coefficients, see `poly1d`. + m : int, optional + Order of the antiderivative. (Default: 1) + k : list of `m` scalars or scalar, optional + Integration constants. They are given in the order of integration: + those corresponding to highest-order terms come first. + + If ``None`` (default), all constants are assumed to be zero. + If `m = 1`, a single scalar can be given instead of a list. + + See Also + -------- + polyder : derivative of a polynomial + poly1d.integ : equivalent method + + Examples + -------- + The defining property of the antiderivative: + + >>> p = np.poly1d([1,1,1]) + >>> P = np.polyint(p) + >>> P + poly1d([ 0.33333333, 0.5 , 1. , 0. ]) + >>> np.polyder(P) == p + True + + The integration constants default to zero, but can be specified: + + >>> P = np.polyint(p, 3) + >>> P(0) + 0.0 + >>> np.polyder(P)(0) + 0.0 + >>> np.polyder(P, 2)(0) + 0.0 + >>> P = np.polyint(p, 3, k=[6,5,3]) + >>> P + poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ]) + + Note that 3 = 6 / 2!, and that the constants are given in the order of + integrations. Constant of the highest-order polynomial term comes first: + + >>> np.polyder(P, 2)(0) + 6.0 + >>> np.polyder(P, 1)(0) + 5.0 + >>> P(0) + 3.0 + + """ + m = int(m) + if m < 0: + raise ValueError("Order of integral must be positive (see polyder)") + if k is None: + k = NX.zeros(m, float) + k = atleast_1d(k) + if len(k) == 1 and m > 1: + k = k[0]*NX.ones(m, float) + if len(k) < m: + raise ValueError( + "k must be a scalar or a rank-1 array of length 1 or >m.") + + truepoly = isinstance(p, poly1d) + p = NX.asarray(p) + if m == 0: + if truepoly: + return poly1d(p) + return p + else: + # Note: this must work also with object and integer arrays + y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]])) + val = polyint(y, m - 1, k=k[1:]) + if truepoly: + return poly1d(val) + return val + + +def _polyder_dispatcher(p, m=None): + return (p,) + + +@array_function_dispatch(_polyder_dispatcher) +def polyder(p, m=1): + """ + Return the derivative of the specified order of a polynomial. + + Parameters + ---------- + p : poly1d or sequence + Polynomial to differentiate. + A sequence is interpreted as polynomial coefficients, see `poly1d`. + m : int, optional + Order of differentiation (default: 1) + + Returns + ------- + der : poly1d + A new polynomial representing the derivative. + + See Also + -------- + polyint : Anti-derivative of a polynomial. + poly1d : Class for one-dimensional polynomials. + + Examples + -------- + The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is: + + >>> p = np.poly1d([1,1,1,1]) + >>> p2 = np.polyder(p) + >>> p2 + poly1d([3, 2, 1]) + + which evaluates to: + + >>> p2(2.) + 17.0 + + We can verify this, approximating the derivative with + ``(f(x + h) - f(x))/h``: + + >>> (p(2. + 0.001) - p(2.)) / 0.001 + 17.007000999997857 + + The fourth-order derivative of a 3rd-order polynomial is zero: + + >>> np.polyder(p, 2) + poly1d([6, 2]) + >>> np.polyder(p, 3) + poly1d([6]) + >>> np.polyder(p, 4) + poly1d([ 0.]) + + """ + m = int(m) + if m < 0: + raise ValueError("Order of derivative must be positive (see polyint)") + + truepoly = isinstance(p, poly1d) + p = NX.asarray(p) + n = len(p) - 1 + y = p[:-1] * NX.arange(n, 0, -1) + if m == 0: + val = p + else: + val = polyder(y, m - 1) + if truepoly: + val = poly1d(val) + return val + + +def _polyfit_dispatcher(x, y, deg, rcond=None, full=None, w=None, cov=None): + return (x, y, w) + + +@array_function_dispatch(_polyfit_dispatcher) +def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): + """ + Least squares polynomial fit. + + Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg` + to points `(x, y)`. Returns a vector of coefficients `p` that minimises + the squared error in the order `deg`, `deg-1`, ... `0`. + + The `Polynomial.fit ` class + method is recommended for new code as it is more stable numerically. See + the documentation of the method for more information. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int + Degree of the fitting polynomial + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (M,), optional + Weights to apply to the y-coordinates of the sample points. For + gaussian uncertainties, use 1/sigma (not 1/sigma**2). + cov : bool or str, optional + If given and not `False`, return not just the estimate but also its + covariance matrix. By default, the covariance are scaled by + chi2/sqrt(N-dof), i.e., the weights are presumed to be unreliable + except in a relative sense and everything is scaled such that the + reduced chi2 is unity. This scaling is omitted if ``cov='unscaled'``, + as is relevant for the case that the weights are 1/sigma**2, with + sigma known to be a reliable estimate of the uncertainty. + + Returns + ------- + p : ndarray, shape (deg + 1,) or (deg + 1, K) + Polynomial coefficients, highest power first. If `y` was 2-D, the + coefficients for `k`-th data set are in ``p[:,k]``. + + residuals, rank, singular_values, rcond + Present only if `full` = True. Residuals of the least-squares fit, + the effective rank of the scaled Vandermonde coefficient matrix, + its singular values, and the specified value of `rcond`. For more + details, see `linalg.lstsq`. + + V : ndarray, shape (M,M) or (M,M,K) + Present only if `full` = False and `cov`=True. The covariance + matrix of the polynomial coefficient estimates. The diagonal of + this matrix are the variance estimates for each coefficient. If y + is a 2-D array, then the covariance matrix for the `k`-th data set + are in ``V[:,:,k]`` + + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if `full` = False. + + The warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', np.RankWarning) + + See Also + -------- + polyval : Compute polynomial values. + linalg.lstsq : Computes a least-squares fit. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution minimizes the squared error + + .. math :: + E = \\sum_{j=0}^k |p(x_j) - y_j|^2 + + in the equations:: + + x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0] + x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1] + ... + x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k] + + The coefficient matrix of the coefficients `p` is a Vandermonde matrix. + + `polyfit` issues a `RankWarning` when the least-squares fit is badly + conditioned. This implies that the best fit is not well-defined due + to numerical error. The results may be improved by lowering the polynomial + degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter + can also be set to a value smaller than its default, but the resulting + fit may be spurious: including contributions from the small singular + values can add numerical noise to the result. + + Note that fitting polynomial coefficients is inherently badly conditioned + when the degree of the polynomial is large or the interval of sample points + is badly centered. The quality of the fit should always be checked in these + cases. When polynomial fits are not satisfactory, splines may be a good + alternative. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + https://en.wikipedia.org/wiki/Curve_fitting + .. [2] Wikipedia, "Polynomial interpolation", + https://en.wikipedia.org/wiki/Polynomial_interpolation + + Examples + -------- + >>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0]) + >>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0]) + >>> z = np.polyfit(x, y, 3) + >>> z + array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254]) + + It is convenient to use `poly1d` objects for dealing with polynomials: + + >>> p = np.poly1d(z) + >>> p(0.5) + 0.6143849206349179 + >>> p(3.5) + -0.34732142857143039 + >>> p(10) + 22.579365079365115 + + High-order polynomials may oscillate wildly: + + >>> p30 = np.poly1d(np.polyfit(x, y, 30)) + /... RankWarning: Polyfit may be poorly conditioned... + >>> p30(4) + -0.80000000000000204 + >>> p30(5) + -0.99999999999999445 + >>> p30(4.5) + -0.10547061179440398 + + Illustration: + + >>> import matplotlib.pyplot as plt + >>> xp = np.linspace(-2, 6, 100) + >>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--') + >>> plt.ylim(-2,2) + (-2, 2) + >>> plt.show() + + """ + order = int(deg) + 1 + x = NX.asarray(x) + 0.0 + y = NX.asarray(y) + 0.0 + + # check arguments. + if deg < 0: + raise ValueError("expected deg >= 0") + if x.ndim != 1: + raise TypeError("expected 1D vector for x") + if x.size == 0: + raise TypeError("expected non-empty vector for x") + if y.ndim < 1 or y.ndim > 2: + raise TypeError("expected 1D or 2D array for y") + if x.shape[0] != y.shape[0]: + raise TypeError("expected x and y to have same length") + + # set rcond + if rcond is None: + rcond = len(x)*finfo(x.dtype).eps + + # set up least squares equation for powers of x + lhs = vander(x, order) + rhs = y + + # apply weighting + if w is not None: + w = NX.asarray(w) + 0.0 + if w.ndim != 1: + raise TypeError("expected a 1-d array for weights") + if w.shape[0] != y.shape[0]: + raise TypeError("expected w and y to have the same length") + lhs *= w[:, NX.newaxis] + if rhs.ndim == 2: + rhs *= w[:, NX.newaxis] + else: + rhs *= w + + # scale lhs to improve condition number and solve + scale = NX.sqrt((lhs*lhs).sum(axis=0)) + lhs /= scale + c, resids, rank, s = lstsq(lhs, rhs, rcond) + c = (c.T/scale).T # broadcast scale coefficients + + # warn on rank reduction, which indicates an ill conditioned matrix + if rank != order and not full: + msg = "Polyfit may be poorly conditioned" + warnings.warn(msg, RankWarning, stacklevel=2) + + if full: + return c, resids, rank, s, rcond + elif cov: + Vbase = inv(dot(lhs.T, lhs)) + Vbase /= NX.outer(scale, scale) + if cov == "unscaled": + fac = 1 + else: + if len(x) <= order: + raise ValueError("the number of data points must exceed order " + "to scale the covariance matrix") + # note, this used to be: fac = resids / (len(x) - order - 2.0) + # it was deciced that the "- 2" (originally justified by "Bayesian + # uncertainty analysis") is not was the user expects + # (see gh-11196 and gh-11197) + fac = resids / (len(x) - order) + if y.ndim == 1: + return c, Vbase * fac + else: + return c, Vbase[:,:, NX.newaxis] * fac + else: + return c + + +def _polyval_dispatcher(p, x): + return (p, x) + + +@array_function_dispatch(_polyval_dispatcher) +def polyval(p, x): + """ + Evaluate a polynomial at specific values. + + If `p` is of length N, this function returns the value: + + ``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]`` + + If `x` is a sequence, then `p(x)` is returned for each element of `x`. + If `x` is another polynomial then the composite polynomial `p(x(t))` + is returned. + + Parameters + ---------- + p : array_like or poly1d object + 1D array of polynomial coefficients (including coefficients equal + to zero) from highest degree to the constant term, or an + instance of poly1d. + x : array_like or poly1d object + A number, an array of numbers, or an instance of poly1d, at + which to evaluate `p`. + + Returns + ------- + values : ndarray or poly1d + If `x` is a poly1d instance, the result is the composition of the two + polynomials, i.e., `x` is "substituted" in `p` and the simplified + result is returned. In addition, the type of `x` - array_like or + poly1d - governs the type of the output: `x` array_like => `values` + array_like, `x` a poly1d object => `values` is also. + + See Also + -------- + poly1d: A polynomial class. + + Notes + ----- + Horner's scheme [1]_ is used to evaluate the polynomial. Even so, + for polynomials of high degree the values may be inaccurate due to + rounding errors. Use carefully. + + References + ---------- + .. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng. + trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand + Reinhold Co., 1985, pg. 720. + + Examples + -------- + >>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1 + 76 + >>> np.polyval([3,0,1], np.poly1d(5)) + poly1d([ 76.]) + >>> np.polyval(np.poly1d([3,0,1]), 5) + 76 + >>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5)) + poly1d([ 76.]) + + """ + p = NX.asarray(p) + if isinstance(x, poly1d): + y = 0 + else: + x = NX.asarray(x) + y = NX.zeros_like(x) + for i in range(len(p)): + y = y * x + p[i] + return y + + +def _binary_op_dispatcher(a1, a2): + return (a1, a2) + + +@array_function_dispatch(_binary_op_dispatcher) +def polyadd(a1, a2): + """ + Find the sum of two polynomials. + + Returns the polynomial resulting from the sum of two input polynomials. + Each input must be either a poly1d object or a 1D sequence of polynomial + coefficients, from highest to lowest degree. + + Parameters + ---------- + a1, a2 : array_like or poly1d object + Input polynomials. + + Returns + ------- + out : ndarray or poly1d object + The sum of the inputs. If either input is a poly1d object, then the + output is also a poly1d object. Otherwise, it is a 1D array of + polynomial coefficients from highest to lowest degree. + + See Also + -------- + poly1d : A one-dimensional polynomial class. + poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval + + Examples + -------- + >>> np.polyadd([1, 2], [9, 5, 4]) + array([9, 6, 6]) + + Using poly1d objects: + + >>> p1 = np.poly1d([1, 2]) + >>> p2 = np.poly1d([9, 5, 4]) + >>> print(p1) + 1 x + 2 + >>> print(p2) + 2 + 9 x + 5 x + 4 + >>> print(np.polyadd(p1, p2)) + 2 + 9 x + 6 x + 6 + + """ + truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d)) + a1 = atleast_1d(a1) + a2 = atleast_1d(a2) + diff = len(a2) - len(a1) + if diff == 0: + val = a1 + a2 + elif diff > 0: + zr = NX.zeros(diff, a1.dtype) + val = NX.concatenate((zr, a1)) + a2 + else: + zr = NX.zeros(abs(diff), a2.dtype) + val = a1 + NX.concatenate((zr, a2)) + if truepoly: + val = poly1d(val) + return val + + +@array_function_dispatch(_binary_op_dispatcher) +def polysub(a1, a2): + """ + Difference (subtraction) of two polynomials. + + Given two polynomials `a1` and `a2`, returns ``a1 - a2``. + `a1` and `a2` can be either array_like sequences of the polynomials' + coefficients (including coefficients equal to zero), or `poly1d` objects. + + Parameters + ---------- + a1, a2 : array_like or poly1d + Minuend and subtrahend polynomials, respectively. + + Returns + ------- + out : ndarray or poly1d + Array or `poly1d` object of the difference polynomial's coefficients. + + See Also + -------- + polyval, polydiv, polymul, polyadd + + Examples + -------- + .. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2) + + >>> np.polysub([2, 10, -2], [3, 10, -4]) + array([-1, 0, 2]) + + """ + truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d)) + a1 = atleast_1d(a1) + a2 = atleast_1d(a2) + diff = len(a2) - len(a1) + if diff == 0: + val = a1 - a2 + elif diff > 0: + zr = NX.zeros(diff, a1.dtype) + val = NX.concatenate((zr, a1)) - a2 + else: + zr = NX.zeros(abs(diff), a2.dtype) + val = a1 - NX.concatenate((zr, a2)) + if truepoly: + val = poly1d(val) + return val + + +@array_function_dispatch(_binary_op_dispatcher) +def polymul(a1, a2): + """ + Find the product of two polynomials. + + Finds the polynomial resulting from the multiplication of the two input + polynomials. Each input must be either a poly1d object or a 1D sequence + of polynomial coefficients, from highest to lowest degree. + + Parameters + ---------- + a1, a2 : array_like or poly1d object + Input polynomials. + + Returns + ------- + out : ndarray or poly1d object + The polynomial resulting from the multiplication of the inputs. If + either inputs is a poly1d object, then the output is also a poly1d + object. Otherwise, it is a 1D array of polynomial coefficients from + highest to lowest degree. + + See Also + -------- + poly1d : A one-dimensional polynomial class. + poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, + polyval + convolve : Array convolution. Same output as polymul, but has parameter + for overlap mode. + + Examples + -------- + >>> np.polymul([1, 2, 3], [9, 5, 1]) + array([ 9, 23, 38, 17, 3]) + + Using poly1d objects: + + >>> p1 = np.poly1d([1, 2, 3]) + >>> p2 = np.poly1d([9, 5, 1]) + >>> print(p1) + 2 + 1 x + 2 x + 3 + >>> print(p2) + 2 + 9 x + 5 x + 1 + >>> print(np.polymul(p1, p2)) + 4 3 2 + 9 x + 23 x + 38 x + 17 x + 3 + + """ + truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d)) + a1, a2 = poly1d(a1), poly1d(a2) + val = NX.convolve(a1, a2) + if truepoly: + val = poly1d(val) + return val + + +def _polydiv_dispatcher(u, v): + return (u, v) + + +@array_function_dispatch(_polydiv_dispatcher) +def polydiv(u, v): + """ + Returns the quotient and remainder of polynomial division. + + The input arrays are the coefficients (including any coefficients + equal to zero) of the "numerator" (dividend) and "denominator" + (divisor) polynomials, respectively. + + Parameters + ---------- + u : array_like or poly1d + Dividend polynomial's coefficients. + + v : array_like or poly1d + Divisor polynomial's coefficients. + + Returns + ------- + q : ndarray + Coefficients, including those equal to zero, of the quotient. + r : ndarray + Coefficients, including those equal to zero, of the remainder. + + See Also + -------- + poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub, + polyval + + Notes + ----- + Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need + not equal `v.ndim`. In other words, all four possible combinations - + ``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``, + ``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work. + + Examples + -------- + .. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25 + + >>> x = np.array([3.0, 5.0, 2.0]) + >>> y = np.array([2.0, 1.0]) + >>> np.polydiv(x, y) + (array([ 1.5 , 1.75]), array([ 0.25])) + + """ + truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d)) + u = atleast_1d(u) + 0.0 + v = atleast_1d(v) + 0.0 + # w has the common type + w = u[0] + v[0] + m = len(u) - 1 + n = len(v) - 1 + scale = 1. / v[0] + q = NX.zeros((max(m - n + 1, 1),), w.dtype) + r = u.astype(w.dtype) + for k in range(0, m-n+1): + d = scale * r[k] + q[k] = d + r[k:k+n+1] -= d*v + while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1): + r = r[1:] + if truepoly: + return poly1d(q), poly1d(r) + return q, r + +_poly_mat = re.compile(r"[*][*]([0-9]*)") +def _raise_power(astr, wrap=70): + n = 0 + line1 = '' + line2 = '' + output = ' ' + while True: + mat = _poly_mat.search(astr, n) + if mat is None: + break + span = mat.span() + power = mat.groups()[0] + partstr = astr[n:span[0]] + n = span[1] + toadd2 = partstr + ' '*(len(power)-1) + toadd1 = ' '*(len(partstr)-1) + power + if ((len(line2) + len(toadd2) > wrap) or + (len(line1) + len(toadd1) > wrap)): + output += line1 + "\n" + line2 + "\n " + line1 = toadd1 + line2 = toadd2 + else: + line2 += partstr + ' '*(len(power)-1) + line1 += ' '*(len(partstr)-1) + power + output += line1 + "\n" + line2 + return output + astr[n:] + + +@set_module('numpy') +class poly1d(object): + """ + A one-dimensional polynomial class. + + A convenience class, used to encapsulate "natural" operations on + polynomials so that said operations may take on their customary + form in code (see Examples). + + Parameters + ---------- + c_or_r : array_like + The polynomial's coefficients, in decreasing powers, or if + the value of the second parameter is True, the polynomial's + roots (values where the polynomial evaluates to 0). For example, + ``poly1d([1, 2, 3])`` returns an object that represents + :math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns + one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`. + r : bool, optional + If True, `c_or_r` specifies the polynomial's roots; the default + is False. + variable : str, optional + Changes the variable used when printing `p` from `x` to `variable` + (see Examples). + + Examples + -------- + Construct the polynomial :math:`x^2 + 2x + 3`: + + >>> p = np.poly1d([1, 2, 3]) + >>> print(np.poly1d(p)) + 2 + 1 x + 2 x + 3 + + Evaluate the polynomial at :math:`x = 0.5`: + + >>> p(0.5) + 4.25 + + Find the roots: + + >>> p.r + array([-1.+1.41421356j, -1.-1.41421356j]) + >>> p(p.r) + array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j]) + + These numbers in the previous line represent (0, 0) to machine precision + + Show the coefficients: + + >>> p.c + array([1, 2, 3]) + + Display the order (the leading zero-coefficients are removed): + + >>> p.order + 2 + + Show the coefficient of the k-th power in the polynomial + (which is equivalent to ``p.c[-(i+1)]``): + + >>> p[1] + 2 + + Polynomials can be added, subtracted, multiplied, and divided + (returns quotient and remainder): + + >>> p * p + poly1d([ 1, 4, 10, 12, 9]) + + >>> (p**3 + 4) / p + (poly1d([ 1., 4., 10., 12., 9.]), poly1d([ 4.])) + + ``asarray(p)`` gives the coefficient array, so polynomials can be + used in all functions that accept arrays: + + >>> p**2 # square of polynomial + poly1d([ 1, 4, 10, 12, 9]) + + >>> np.square(p) # square of individual coefficients + array([1, 4, 9]) + + The variable used in the string representation of `p` can be modified, + using the `variable` parameter: + + >>> p = np.poly1d([1,2,3], variable='z') + >>> print(p) + 2 + 1 z + 2 z + 3 + + Construct a polynomial from its roots: + + >>> np.poly1d([1, 2], True) + poly1d([ 1, -3, 2]) + + This is the same polynomial as obtained by: + + >>> np.poly1d([1, -1]) * np.poly1d([1, -2]) + poly1d([ 1, -3, 2]) + + """ + __hash__ = None + + @property + def coeffs(self): + """ A copy of the polynomial coefficients """ + return self._coeffs.copy() + + @property + def variable(self): + """ The name of the polynomial variable """ + return self._variable + + # calculated attributes + @property + def order(self): + """ The order or degree of the polynomial """ + return len(self._coeffs) - 1 + + @property + def roots(self): + """ The roots of the polynomial, where self(x) == 0 """ + return roots(self._coeffs) + + # our internal _coeffs property need to be backed by __dict__['coeffs'] for + # scipy to work correctly. + @property + def _coeffs(self): + return self.__dict__['coeffs'] + @_coeffs.setter + def _coeffs(self, coeffs): + self.__dict__['coeffs'] = coeffs + + # alias attributes + r = roots + c = coef = coefficients = coeffs + o = order + + def __init__(self, c_or_r, r=False, variable=None): + if isinstance(c_or_r, poly1d): + self._variable = c_or_r._variable + self._coeffs = c_or_r._coeffs + + if set(c_or_r.__dict__) - set(self.__dict__): + msg = ("In the future extra properties will not be copied " + "across when constructing one poly1d from another") + warnings.warn(msg, FutureWarning, stacklevel=2) + self.__dict__.update(c_or_r.__dict__) + + if variable is not None: + self._variable = variable + return + if r: + c_or_r = poly(c_or_r) + c_or_r = atleast_1d(c_or_r) + if c_or_r.ndim > 1: + raise ValueError("Polynomial must be 1d only.") + c_or_r = trim_zeros(c_or_r, trim='f') + if len(c_or_r) == 0: + c_or_r = NX.array([0.]) + self._coeffs = c_or_r + if variable is None: + variable = 'x' + self._variable = variable + + def __array__(self, t=None): + if t: + return NX.asarray(self.coeffs, t) + else: + return NX.asarray(self.coeffs) + + def __repr__(self): + vals = repr(self.coeffs) + vals = vals[6:-1] + return "poly1d(%s)" % vals + + def __len__(self): + return self.order + + def __str__(self): + thestr = "0" + var = self.variable + + # Remove leading zeros + coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)] + N = len(coeffs)-1 + + def fmt_float(q): + s = '%.4g' % q + if s.endswith('.0000'): + s = s[:-5] + return s + + for k in range(len(coeffs)): + if not iscomplex(coeffs[k]): + coefstr = fmt_float(real(coeffs[k])) + elif real(coeffs[k]) == 0: + coefstr = '%sj' % fmt_float(imag(coeffs[k])) + else: + coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])), + fmt_float(imag(coeffs[k]))) + + power = (N-k) + if power == 0: + if coefstr != '0': + newstr = '%s' % (coefstr,) + else: + if k == 0: + newstr = '0' + else: + newstr = '' + elif power == 1: + if coefstr == '0': + newstr = '' + elif coefstr == 'b': + newstr = var + else: + newstr = '%s %s' % (coefstr, var) + else: + if coefstr == '0': + newstr = '' + elif coefstr == 'b': + newstr = '%s**%d' % (var, power,) + else: + newstr = '%s %s**%d' % (coefstr, var, power) + + if k > 0: + if newstr != '': + if newstr.startswith('-'): + thestr = "%s - %s" % (thestr, newstr[1:]) + else: + thestr = "%s + %s" % (thestr, newstr) + else: + thestr = newstr + return _raise_power(thestr) + + def __call__(self, val): + return polyval(self.coeffs, val) + + def __neg__(self): + return poly1d(-self.coeffs) + + def __pos__(self): + return self + + def __mul__(self, other): + if isscalar(other): + return poly1d(self.coeffs * other) + else: + other = poly1d(other) + return poly1d(polymul(self.coeffs, other.coeffs)) + + def __rmul__(self, other): + if isscalar(other): + return poly1d(other * self.coeffs) + else: + other = poly1d(other) + return poly1d(polymul(self.coeffs, other.coeffs)) + + def __add__(self, other): + other = poly1d(other) + return poly1d(polyadd(self.coeffs, other.coeffs)) + + def __radd__(self, other): + other = poly1d(other) + return poly1d(polyadd(self.coeffs, other.coeffs)) + + def __pow__(self, val): + if not isscalar(val) or int(val) != val or val < 0: + raise ValueError("Power to non-negative integers only.") + res = [1] + for _ in range(val): + res = polymul(self.coeffs, res) + return poly1d(res) + + def __sub__(self, other): + other = poly1d(other) + return poly1d(polysub(self.coeffs, other.coeffs)) + + def __rsub__(self, other): + other = poly1d(other) + return poly1d(polysub(other.coeffs, self.coeffs)) + + def __div__(self, other): + if isscalar(other): + return poly1d(self.coeffs/other) + else: + other = poly1d(other) + return polydiv(self, other) + + __truediv__ = __div__ + + def __rdiv__(self, other): + if isscalar(other): + return poly1d(other/self.coeffs) + else: + other = poly1d(other) + return polydiv(other, self) + + __rtruediv__ = __rdiv__ + + def __eq__(self, other): + if not isinstance(other, poly1d): + return NotImplemented + if self.coeffs.shape != other.coeffs.shape: + return False + return (self.coeffs == other.coeffs).all() + + def __ne__(self, other): + if not isinstance(other, poly1d): + return NotImplemented + return not self.__eq__(other) + + + def __getitem__(self, val): + ind = self.order - val + if val > self.order: + return 0 + if val < 0: + return 0 + return self.coeffs[ind] + + def __setitem__(self, key, val): + ind = self.order - key + if key < 0: + raise ValueError("Does not support negative powers.") + if key > self.order: + zr = NX.zeros(key-self.order, self.coeffs.dtype) + self._coeffs = NX.concatenate((zr, self.coeffs)) + ind = 0 + self._coeffs[ind] = val + return + + def __iter__(self): + return iter(self.coeffs) + + def integ(self, m=1, k=0): + """ + Return an antiderivative (indefinite integral) of this polynomial. + + Refer to `polyint` for full documentation. + + See Also + -------- + polyint : equivalent function + + """ + return poly1d(polyint(self.coeffs, m=m, k=k)) + + def deriv(self, m=1): + """ + Return a derivative of this polynomial. + + Refer to `polyder` for full documentation. + + See Also + -------- + polyder : equivalent function + + """ + return poly1d(polyder(self.coeffs, m=m)) + +# Stuff to do on module import + +warnings.simplefilter('always', RankWarning) diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/polynomial.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/polynomial.pyc new file mode 100644 index 0000000..c5d09f3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/polynomial.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/recfunctions.py b/project/venv/lib/python2.7/site-packages/numpy/lib/recfunctions.py new file mode 100644 index 0000000..fcc0d9a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/recfunctions.py @@ -0,0 +1,1570 @@ +""" +Collection of utilities to manipulate structured arrays. + +Most of these functions were initially implemented by John Hunter for +matplotlib. They have been rewritten and extended for convenience. + +""" +from __future__ import division, absolute_import, print_function + +import sys +import itertools +import numpy as np +import numpy.ma as ma +from numpy import ndarray, recarray +from numpy.ma import MaskedArray +from numpy.ma.mrecords import MaskedRecords +from numpy.core.overrides import array_function_dispatch +from numpy.lib._iotools import _is_string_like +from numpy.compat import basestring +from numpy.testing import suppress_warnings + +if sys.version_info[0] < 3: + from future_builtins import zip + +_check_fill_value = np.ma.core._check_fill_value + + +__all__ = [ + 'append_fields', 'drop_fields', 'find_duplicates', + 'get_fieldstructure', 'join_by', 'merge_arrays', + 'rec_append_fields', 'rec_drop_fields', 'rec_join', + 'recursive_fill_fields', 'rename_fields', 'stack_arrays', + ] + + +def _recursive_fill_fields_dispatcher(input, output): + return (input, output) + + +@array_function_dispatch(_recursive_fill_fields_dispatcher) +def recursive_fill_fields(input, output): + """ + Fills fields from output with fields from input, + with support for nested structures. + + Parameters + ---------- + input : ndarray + Input array. + output : ndarray + Output array. + + Notes + ----- + * `output` should be at least the same size as `input` + + Examples + -------- + >>> from numpy.lib import recfunctions as rfn + >>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)]) + >>> b = np.zeros((3,), dtype=a.dtype) + >>> rfn.recursive_fill_fields(a, b) + array([(1, 10.0), (2, 20.0), (0, 0.0)], + dtype=[('A', '>> dt = np.dtype([(('a', 'A'), int), ('b', float, 3)]) + >>> dt.descr + [(('a', 'A'), '>> get_fieldspec(dt) + [(('a', 'A'), dtype('int32')), ('b', dtype(('>> from numpy.lib import recfunctions as rfn + >>> rfn.get_names(np.empty((1,), dtype=int)) is None + True + >>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)])) + ('A', 'B') + >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])]) + >>> rfn.get_names(adtype) + ('a', ('b', ('ba', 'bb'))) + """ + listnames = [] + names = adtype.names + for name in names: + current = adtype[name] + if current.names: + listnames.append((name, tuple(get_names(current)))) + else: + listnames.append(name) + return tuple(listnames) or None + + +def get_names_flat(adtype): + """ + Returns the field names of the input datatype as a tuple. Nested structure + are flattend beforehand. + + Parameters + ---------- + adtype : dtype + Input datatype + + Examples + -------- + >>> from numpy.lib import recfunctions as rfn + >>> rfn.get_names_flat(np.empty((1,), dtype=int)) is None + True + >>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', float)])) + ('A', 'B') + >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])]) + >>> rfn.get_names_flat(adtype) + ('a', 'b', 'ba', 'bb') + """ + listnames = [] + names = adtype.names + for name in names: + listnames.append(name) + current = adtype[name] + if current.names: + listnames.extend(get_names_flat(current)) + return tuple(listnames) or None + + +def flatten_descr(ndtype): + """ + Flatten a structured data-type description. + + Examples + -------- + >>> from numpy.lib import recfunctions as rfn + >>> ndtype = np.dtype([('a', '>> rfn.flatten_descr(ndtype) + (('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32'))) + + """ + names = ndtype.names + if names is None: + return (('', ndtype),) + else: + descr = [] + for field in names: + (typ, _) = ndtype.fields[field] + if typ.names: + descr.extend(flatten_descr(typ)) + else: + descr.append((field, typ)) + return tuple(descr) + + +def _zip_dtype_dispatcher(seqarrays, flatten=None): + return seqarrays + + +@array_function_dispatch(_zip_dtype_dispatcher) +def zip_dtype(seqarrays, flatten=False): + newdtype = [] + if flatten: + for a in seqarrays: + newdtype.extend(flatten_descr(a.dtype)) + else: + for a in seqarrays: + current = a.dtype + if current.names and len(current.names) <= 1: + # special case - dtypes of 0 or 1 field are flattened + newdtype.extend(get_fieldspec(current)) + else: + newdtype.append(('', current)) + return np.dtype(newdtype) + + +@array_function_dispatch(_zip_dtype_dispatcher) +def zip_descr(seqarrays, flatten=False): + """ + Combine the dtype description of a series of arrays. + + Parameters + ---------- + seqarrays : sequence of arrays + Sequence of arrays + flatten : {boolean}, optional + Whether to collapse nested descriptions. + """ + return zip_dtype(seqarrays, flatten=flatten).descr + + +def get_fieldstructure(adtype, lastname=None, parents=None,): + """ + Returns a dictionary with fields indexing lists of their parent fields. + + This function is used to simplify access to fields nested in other fields. + + Parameters + ---------- + adtype : np.dtype + Input datatype + lastname : optional + Last processed field name (used internally during recursion). + parents : dictionary + Dictionary of parent fields (used interbally during recursion). + + Examples + -------- + >>> from numpy.lib import recfunctions as rfn + >>> ndtype = np.dtype([('A', int), + ... ('B', [('BA', int), + ... ('BB', [('BBA', int), ('BBB', int)])])]) + >>> rfn.get_fieldstructure(ndtype) + ... # XXX: possible regression, order of BBA and BBB is swapped + {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']} + + """ + if parents is None: + parents = {} + names = adtype.names + for name in names: + current = adtype[name] + if current.names: + if lastname: + parents[name] = [lastname, ] + else: + parents[name] = [] + parents.update(get_fieldstructure(current, name, parents)) + else: + lastparent = [_ for _ in (parents.get(lastname, []) or [])] + if lastparent: + lastparent.append(lastname) + elif lastname: + lastparent = [lastname, ] + parents[name] = lastparent or [] + return parents or None + + +def _izip_fields_flat(iterable): + """ + Returns an iterator of concatenated fields from a sequence of arrays, + collapsing any nested structure. + + """ + for element in iterable: + if isinstance(element, np.void): + for f in _izip_fields_flat(tuple(element)): + yield f + else: + yield element + + +def _izip_fields(iterable): + """ + Returns an iterator of concatenated fields from a sequence of arrays. + + """ + for element in iterable: + if (hasattr(element, '__iter__') and + not isinstance(element, basestring)): + for f in _izip_fields(element): + yield f + elif isinstance(element, np.void) and len(tuple(element)) == 1: + for f in _izip_fields(element): + yield f + else: + yield element + + +def _izip_records_dispatcher(seqarrays, fill_value=None, flatten=None): + return seqarrays + + +@array_function_dispatch(_izip_records_dispatcher) +def izip_records(seqarrays, fill_value=None, flatten=True): + """ + Returns an iterator of concatenated items from a sequence of arrays. + + Parameters + ---------- + seqarrays : sequence of arrays + Sequence of arrays. + fill_value : {None, integer} + Value used to pad shorter iterables. + flatten : {True, False}, + Whether to + """ + + # Should we flatten the items, or just use a nested approach + if flatten: + zipfunc = _izip_fields_flat + else: + zipfunc = _izip_fields + + if sys.version_info[0] >= 3: + zip_longest = itertools.zip_longest + else: + zip_longest = itertools.izip_longest + + for tup in zip_longest(*seqarrays, fillvalue=fill_value): + yield tuple(zipfunc(tup)) + + +def _fix_output(output, usemask=True, asrecarray=False): + """ + Private function: return a recarray, a ndarray, a MaskedArray + or a MaskedRecords depending on the input parameters + """ + if not isinstance(output, MaskedArray): + usemask = False + if usemask: + if asrecarray: + output = output.view(MaskedRecords) + else: + output = ma.filled(output) + if asrecarray: + output = output.view(recarray) + return output + + +def _fix_defaults(output, defaults=None): + """ + Update the fill_value and masked data of `output` + from the default given in a dictionary defaults. + """ + names = output.dtype.names + (data, mask, fill_value) = (output.data, output.mask, output.fill_value) + for (k, v) in (defaults or {}).items(): + if k in names: + fill_value[k] = v + data[k][mask[k]] = v + return output + + +def _merge_arrays_dispatcher(seqarrays, fill_value=None, flatten=None, + usemask=None, asrecarray=None): + return seqarrays + + +@array_function_dispatch(_merge_arrays_dispatcher) +def merge_arrays(seqarrays, fill_value=-1, flatten=False, + usemask=False, asrecarray=False): + """ + Merge arrays field by field. + + Parameters + ---------- + seqarrays : sequence of ndarrays + Sequence of arrays + fill_value : {float}, optional + Filling value used to pad missing data on the shorter arrays. + flatten : {False, True}, optional + Whether to collapse nested fields. + usemask : {False, True}, optional + Whether to return a masked array or not. + asrecarray : {False, True}, optional + Whether to return a recarray (MaskedRecords) or not. + + Examples + -------- + >>> from numpy.lib import recfunctions as rfn + >>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.]))) + masked_array(data = [(1, 10.0) (2, 20.0) (--, 30.0)], + mask = [(False, False) (False, False) (True, False)], + fill_value = (999999, 1e+20), + dtype = [('f0', '>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])), + ... usemask=False) + array([(1, 10.0), (2, 20.0), (-1, 30.0)], + dtype=[('f0', '>> rfn.merge_arrays((np.array([1, 2]).view([('a', int)]), + ... np.array([10., 20., 30.])), + ... usemask=False, asrecarray=True) + rec.array([(1, 10.0), (2, 20.0), (-1, 30.0)], + dtype=[('a', '>> from numpy.lib import recfunctions as rfn + >>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], + ... dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) + >>> rfn.drop_fields(a, 'a') + array([((2.0, 3),), ((5.0, 6),)], + dtype=[('b', [('ba', '>> rfn.drop_fields(a, 'ba') + array([(1, (3,)), (4, (6,))], + dtype=[('a', '>> rfn.drop_fields(a, ['ba', 'bb']) + array([(1,), (4,)], + dtype=[('a', '>> from numpy.lib import recfunctions as rfn + >>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))], + ... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])]) + >>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'}) + array([(1, (2.0, [3.0, 30.0])), (4, (5.0, [6.0, 60.0]))], + dtype=[('A', ' 1: + data = merge_arrays(data, flatten=True, usemask=usemask, + fill_value=fill_value) + else: + data = data.pop() + # + output = ma.masked_all( + max(len(base), len(data)), + dtype=get_fieldspec(base.dtype) + get_fieldspec(data.dtype)) + output = recursive_fill_fields(base, output) + output = recursive_fill_fields(data, output) + # + return _fix_output(output, usemask=usemask, asrecarray=asrecarray) + + +def _rec_append_fields_dispatcher(base, names, data, dtypes=None): + yield base + for d in data: + yield d + + +@array_function_dispatch(_rec_append_fields_dispatcher) +def rec_append_fields(base, names, data, dtypes=None): + """ + Add new fields to an existing array. + + The names of the fields are given with the `names` arguments, + the corresponding values with the `data` arguments. + If a single field is appended, `names`, `data` and `dtypes` do not have + to be lists but just values. + + Parameters + ---------- + base : array + Input array to extend. + names : string, sequence + String or sequence of strings corresponding to the names + of the new fields. + data : array or sequence of arrays + Array or sequence of arrays storing the fields to add to the base. + dtypes : sequence of datatypes, optional + Datatype or sequence of datatypes. + If None, the datatypes are estimated from the `data`. + + See Also + -------- + append_fields + + Returns + ------- + appended_array : np.recarray + """ + return append_fields(base, names, data=data, dtypes=dtypes, + asrecarray=True, usemask=False) + + +def _repack_fields_dispatcher(a, align=None, recurse=None): + return (a,) + + +@array_function_dispatch(_repack_fields_dispatcher) +def repack_fields(a, align=False, recurse=False): + """ + Re-pack the fields of a structured array or dtype in memory. + + The memory layout of structured datatypes allows fields at arbitrary + byte offsets. This means the fields can be separated by padding bytes, + their offsets can be non-monotonically increasing, and they can overlap. + + This method removes any overlaps and reorders the fields in memory so they + have increasing byte offsets, and adds or removes padding bytes depending + on the `align` option, which behaves like the `align` option to `np.dtype`. + + If `align=False`, this method produces a "packed" memory layout in which + each field starts at the byte the previous field ended, and any padding + bytes are removed. + + If `align=True`, this methods produces an "aligned" memory layout in which + each field's offset is a multiple of its alignment, and the total itemsize + is a multiple of the largest alignment, by adding padding bytes as needed. + + Parameters + ---------- + a : ndarray or dtype + array or dtype for which to repack the fields. + align : boolean + If true, use an "aligned" memory layout, otherwise use a "packed" layout. + recurse : boolean + If True, also repack nested structures. + + Returns + ------- + repacked : ndarray or dtype + Copy of `a` with fields repacked, or `a` itself if no repacking was + needed. + + Examples + -------- + + >>> def print_offsets(d): + ... print("offsets:", [d.fields[name][1] for name in d.names]) + ... print("itemsize:", d.itemsize) + ... + >>> dt = np.dtype('u1,i4,f4', align=True) + >>> dt + dtype({'names':['f0','f1','f2'], 'formats':['u1','>> print_offsets(dt) + offsets: [0, 4, 8] + itemsize: 16 + >>> packed_dt = repack_fields(dt) + >>> packed_dt + dtype([('f0', 'u1'), ('f1', '>> print_offsets(packed_dt) + offsets: [0, 1, 5] + itemsize: 13 + + """ + if not isinstance(a, np.dtype): + dt = repack_fields(a.dtype, align=align, recurse=recurse) + return a.astype(dt, copy=False) + + if a.names is None: + return a + + fieldinfo = [] + for name in a.names: + tup = a.fields[name] + if recurse: + fmt = repack_fields(tup[0], align=align, recurse=True) + else: + fmt = tup[0] + + if len(tup) == 3: + name = (tup[2], name) + + fieldinfo.append((name, fmt)) + + dt = np.dtype(fieldinfo, align=align) + return np.dtype((a.type, dt)) + +def _get_fields_and_offsets(dt, offset=0): + """ + Returns a flat list of (dtype, count, offset) tuples of all the + scalar fields in the dtype "dt", including nested fields, in left + to right order. + """ + fields = [] + for name in dt.names: + field = dt.fields[name] + if field[0].names is None: + count = 1 + for size in field[0].shape: + count *= size + fields.append((field[0], count, field[1] + offset)) + else: + fields.extend(_get_fields_and_offsets(field[0], field[1] + offset)) + return fields + + +def _structured_to_unstructured_dispatcher(arr, dtype=None, copy=None, + casting=None): + return (arr,) + +@array_function_dispatch(_structured_to_unstructured_dispatcher) +def structured_to_unstructured(arr, dtype=None, copy=False, casting='unsafe'): + """ + Converts and n-D structured array into an (n+1)-D unstructured array. + + The new array will have a new last dimension equal in size to the + number of field-elements of the input array. If not supplied, the output + datatype is determined from the numpy type promotion rules applied to all + the field datatypes. + + Nested fields, as well as each element of any subarray fields, all count + as a single field-elements. + + Parameters + ---------- + arr : ndarray + Structured array or dtype to convert. Cannot contain object datatype. + dtype : dtype, optional + The dtype of the output unstructured array. + copy : bool, optional + See copy argument to `ndarray.astype`. If true, always return a copy. + If false, and `dtype` requirements are satisfied, a view is returned. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + See casting argument of `ndarray.astype`. Controls what kind of data + casting may occur. + + Returns + ------- + unstructured : ndarray + Unstructured array with one more dimension. + + Examples + -------- + + >>> a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)]) + >>> a + array([(0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.]), + (0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.])], + dtype=[('a', '>> structured_to_unstructured(arr) + array([[0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0.]]) + + >>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)], + ... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')]) + >>> np.mean(structured_to_unstructured(b[['x', 'z']]), axis=-1) + array([ 3. , 5.5, 9. , 11. ]) + + """ + if arr.dtype.names is None: + raise ValueError('arr must be a structured array') + + fields = _get_fields_and_offsets(arr.dtype) + n_fields = len(fields) + dts, counts, offsets = zip(*fields) + names = ['f{}'.format(n) for n in range(n_fields)] + + if dtype is None: + out_dtype = np.result_type(*[dt.base for dt in dts]) + else: + out_dtype = dtype + + # Use a series of views and casts to convert to an unstructured array: + + # first view using flattened fields (doesn't work for object arrays) + # Note: dts may include a shape for subarrays + flattened_fields = np.dtype({'names': names, + 'formats': dts, + 'offsets': offsets, + 'itemsize': arr.dtype.itemsize}) + with suppress_warnings() as sup: # until 1.16 (gh-12447) + sup.filter(FutureWarning, "Numpy has detected") + arr = arr.view(flattened_fields) + + # next cast to a packed format with all fields converted to new dtype + packed_fields = np.dtype({'names': names, + 'formats': [(out_dtype, c) for c in counts]}) + arr = arr.astype(packed_fields, copy=copy, casting=casting) + + # finally is it safe to view the packed fields as the unstructured type + return arr.view((out_dtype, sum(counts))) + +def _unstructured_to_structured_dispatcher(arr, dtype=None, names=None, + align=None, copy=None, casting=None): + return (arr,) + +@array_function_dispatch(_unstructured_to_structured_dispatcher) +def unstructured_to_structured(arr, dtype=None, names=None, align=False, + copy=False, casting='unsafe'): + """ + Converts and n-D unstructured array into an (n-1)-D structured array. + + The last dimension of the input array is converted into a structure, with + number of field-elements equal to the size of the last dimension of the + input array. By default all output fields have the input array's dtype, but + an output structured dtype with an equal number of fields-elements can be + supplied instead. + + Nested fields, as well as each element of any subarray fields, all count + towards the number of field-elements. + + Parameters + ---------- + arr : ndarray + Unstructured array or dtype to convert. + dtype : dtype, optional + The structured dtype of the output array + names : list of strings, optional + If dtype is not supplied, this specifies the field names for the output + dtype, in order. The field dtypes will be the same as the input array. + align : boolean, optional + Whether to create an aligned memory layout. + copy : bool, optional + See copy argument to `ndarray.astype`. If true, always return a copy. + If false, and `dtype` requirements are satisfied, a view is returned. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + See casting argument of `ndarray.astype`. Controls what kind of data + casting may occur. + + Returns + ------- + structured : ndarray + Structured array with fewer dimensions. + + Examples + -------- + + >>> dt = np.dtype([('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)]) + >>> a = np.arange(20).reshape((4,5)) + >>> a + array([[ 0, 1, 2, 3, 4], + [ 5, 6, 7, 8, 9], + [10, 11, 12, 13, 14], + [15, 16, 17, 18, 19]]) + >>> unstructured_to_structured(a, dt) + array([( 0, ( 1., 2), [ 3., 4.]), ( 5, ( 6., 7), [ 8., 9.]), + (10, (11., 12), [13., 14.]), (15, (16., 17), [18., 19.])], + dtype=[('a', '>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)], + ... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')]) + >>> apply_along_fields(np.mean, b) + array([ 2.66666667, 5.33333333, 8.66666667, 11. ]) + >>> apply_along_fields(np.mean, b[['x', 'z']]) + array([ 3. , 5.5, 9. , 11. ]) + + """ + if arr.dtype.names is None: + raise ValueError('arr must be a structured array') + + uarr = structured_to_unstructured(arr) + return func(uarr, axis=-1) + # works and avoids axis requirement, but very, very slow: + #return np.apply_along_axis(func, -1, uarr) + +def _assign_fields_by_name_dispatcher(dst, src, zero_unassigned=None): + return dst, src + +@array_function_dispatch(_assign_fields_by_name_dispatcher) +def assign_fields_by_name(dst, src, zero_unassigned=True): + """ + Assigns values from one structured array to another by field name. + + Normally in numpy >= 1.14, assignment of one structured array to another + copies fields "by position", meaning that the first field from the src is + copied to the first field of the dst, and so on, regardless of field name. + + This function instead copies "by field name", such that fields in the dst + are assigned from the identically named field in the src. This applies + recursively for nested structures. This is how structure assignment worked + in numpy >= 1.6 to <= 1.13. + + Parameters + ---------- + dst : ndarray + src : ndarray + The source and destination arrays during assignment. + zero_unassigned : bool, optional + If True, fields in the dst for which there was no matching + field in the src are filled with the value 0 (zero). This + was the behavior of numpy <= 1.13. If False, those fields + are not modified. + """ + + if dst.dtype.names is None: + dst[...] = src + return + + for name in dst.dtype.names: + if name not in src.dtype.names: + if zero_unassigned: + dst[name] = 0 + else: + assign_fields_by_name(dst[name], src[name], + zero_unassigned) + +def _require_fields_dispatcher(array, required_dtype): + return (array,) + +@array_function_dispatch(_require_fields_dispatcher) +def require_fields(array, required_dtype): + """ + Casts a structured array to a new dtype using assignment by field-name. + + This function assigns from the old to the new array by name, so the + value of a field in the output array is the value of the field with the + same name in the source array. This has the effect of creating a new + ndarray containing only the fields "required" by the required_dtype. + + If a field name in the required_dtype does not exist in the + input array, that field is created and set to 0 in the output array. + + Parameters + ---------- + a : ndarray + array to cast + required_dtype : dtype + datatype for output array + + Returns + ------- + out : ndarray + array with the new dtype, with field values copied from the fields in + the input array with the same name + + Examples + -------- + + >>> a = np.ones(4, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')]) + >>> require_fields(a, [('b', 'f4'), ('c', 'u1')]) + array([(1., 1), (1., 1), (1., 1), (1., 1)], + dtype=[('b', '>> require_fields(a, [('b', 'f4'), ('newf', 'u1')]) + array([(1., 0), (1., 0), (1., 0), (1., 0)], + dtype=[('b', '>> from numpy.lib import recfunctions as rfn + >>> x = np.array([1, 2,]) + >>> rfn.stack_arrays(x) is x + True + >>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)]) + >>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], + ... dtype=[('A', '|S3'), ('B', float), ('C', float)]) + >>> test = rfn.stack_arrays((z,zz)) + >>> test + masked_array(data = [('A', 1.0, --) ('B', 2.0, --) ('a', 10.0, 100.0) ('b', 20.0, 200.0) + ('c', 30.0, 300.0)], + mask = [(False, False, True) (False, False, True) (False, False, False) + (False, False, False) (False, False, False)], + fill_value = ('N/A', 1e+20, 1e+20), + dtype = [('A', '|S3'), ('B', ' '%s'" % + (cdtype, fdtype)) + # Only one field: use concatenate + if len(newdescr) == 1: + output = ma.concatenate(seqarrays) + else: + # + output = ma.masked_all((np.sum(nrecords),), newdescr) + offset = np.cumsum(np.r_[0, nrecords]) + seen = [] + for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]): + names = a.dtype.names + if names is None: + output['f%i' % len(seen)][i:j] = a + else: + for name in n: + output[name][i:j] = a[name] + if name not in seen: + seen.append(name) + # + return _fix_output(_fix_defaults(output, defaults), + usemask=usemask, asrecarray=asrecarray) + + +def _find_duplicates_dispatcher( + a, key=None, ignoremask=None, return_index=None): + return (a,) + + +@array_function_dispatch(_find_duplicates_dispatcher) +def find_duplicates(a, key=None, ignoremask=True, return_index=False): + """ + Find the duplicates in a structured array along a given key + + Parameters + ---------- + a : array-like + Input array + key : {string, None}, optional + Name of the fields along which to check the duplicates. + If None, the search is performed by records + ignoremask : {True, False}, optional + Whether masked data should be discarded or considered as duplicates. + return_index : {False, True}, optional + Whether to return the indices of the duplicated values. + + Examples + -------- + >>> from numpy.lib import recfunctions as rfn + >>> ndtype = [('a', int)] + >>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3], + ... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype) + >>> rfn.find_duplicates(a, ignoremask=True, return_index=True) + ... # XXX: judging by the output, the ignoremask flag has no effect + """ + a = np.asanyarray(a).ravel() + # Get a dictionary of fields + fields = get_fieldstructure(a.dtype) + # Get the sorting data (by selecting the corresponding field) + base = a + if key: + for f in fields[key]: + base = base[f] + base = base[key] + # Get the sorting indices and the sorted data + sortidx = base.argsort() + sortedbase = base[sortidx] + sorteddata = sortedbase.filled() + # Compare the sorting data + flag = (sorteddata[:-1] == sorteddata[1:]) + # If masked data must be ignored, set the flag to false where needed + if ignoremask: + sortedmask = sortedbase.recordmask + flag[sortedmask[1:]] = False + flag = np.concatenate(([False], flag)) + # We need to take the point on the left as well (else we're missing it) + flag[:-1] = flag[:-1] + flag[1:] + duplicates = a[sortidx][flag] + if return_index: + return (duplicates, sortidx[flag]) + else: + return duplicates + + +def _join_by_dispatcher( + key, r1, r2, jointype=None, r1postfix=None, r2postfix=None, + defaults=None, usemask=None, asrecarray=None): + return (r1, r2) + + +@array_function_dispatch(_join_by_dispatcher) +def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2', + defaults=None, usemask=True, asrecarray=False): + """ + Join arrays `r1` and `r2` on key `key`. + + The key should be either a string or a sequence of string corresponding + to the fields used to join the array. An exception is raised if the + `key` field cannot be found in the two input arrays. Neither `r1` nor + `r2` should have any duplicates along `key`: the presence of duplicates + will make the output quite unreliable. Note that duplicates are not + looked for by the algorithm. + + Parameters + ---------- + key : {string, sequence} + A string or a sequence of strings corresponding to the fields used + for comparison. + r1, r2 : arrays + Structured arrays. + jointype : {'inner', 'outer', 'leftouter'}, optional + If 'inner', returns the elements common to both r1 and r2. + If 'outer', returns the common elements as well as the elements of + r1 not in r2 and the elements of not in r2. + If 'leftouter', returns the common elements and the elements of r1 + not in r2. + r1postfix : string, optional + String appended to the names of the fields of r1 that are present + in r2 but absent of the key. + r2postfix : string, optional + String appended to the names of the fields of r2 that are present + in r1 but absent of the key. + defaults : {dictionary}, optional + Dictionary mapping field names to the corresponding default values. + usemask : {True, False}, optional + Whether to return a MaskedArray (or MaskedRecords is + `asrecarray==True`) or a ndarray. + asrecarray : {False, True}, optional + Whether to return a recarray (or MaskedRecords if `usemask==True`) + or just a flexible-type ndarray. + + Notes + ----- + * The output is sorted along the key. + * A temporary array is formed by dropping the fields not in the key for + the two arrays and concatenating the result. This array is then + sorted, and the common entries selected. The output is constructed by + filling the fields with the selected entries. Matching is not + preserved if there are some duplicates... + + """ + # Check jointype + if jointype not in ('inner', 'outer', 'leftouter'): + raise ValueError( + "The 'jointype' argument should be in 'inner', " + "'outer' or 'leftouter' (got '%s' instead)" % jointype + ) + # If we have a single key, put it in a tuple + if isinstance(key, basestring): + key = (key,) + + # Check the keys + if len(set(key)) != len(key): + dup = next(x for n,x in enumerate(key) if x in key[n+1:]) + raise ValueError("duplicate join key %r" % dup) + for name in key: + if name not in r1.dtype.names: + raise ValueError('r1 does not have key field %r' % name) + if name not in r2.dtype.names: + raise ValueError('r2 does not have key field %r' % name) + + # Make sure we work with ravelled arrays + r1 = r1.ravel() + r2 = r2.ravel() + # Fixme: nb2 below is never used. Commenting out for pyflakes. + # (nb1, nb2) = (len(r1), len(r2)) + nb1 = len(r1) + (r1names, r2names) = (r1.dtype.names, r2.dtype.names) + + # Check the names for collision + collisions = (set(r1names) & set(r2names)) - set(key) + if collisions and not (r1postfix or r2postfix): + msg = "r1 and r2 contain common names, r1postfix and r2postfix " + msg += "can't both be empty" + raise ValueError(msg) + + # Make temporary arrays of just the keys + # (use order of keys in `r1` for back-compatibility) + key1 = [ n for n in r1names if n in key ] + r1k = _keep_fields(r1, key1) + r2k = _keep_fields(r2, key1) + + # Concatenate the two arrays for comparison + aux = ma.concatenate((r1k, r2k)) + idx_sort = aux.argsort(order=key) + aux = aux[idx_sort] + # + # Get the common keys + flag_in = ma.concatenate(([False], aux[1:] == aux[:-1])) + flag_in[:-1] = flag_in[1:] + flag_in[:-1] + idx_in = idx_sort[flag_in] + idx_1 = idx_in[(idx_in < nb1)] + idx_2 = idx_in[(idx_in >= nb1)] - nb1 + (r1cmn, r2cmn) = (len(idx_1), len(idx_2)) + if jointype == 'inner': + (r1spc, r2spc) = (0, 0) + elif jointype == 'outer': + idx_out = idx_sort[~flag_in] + idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)])) + idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1)) + (r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn) + elif jointype == 'leftouter': + idx_out = idx_sort[~flag_in] + idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)])) + (r1spc, r2spc) = (len(idx_1) - r1cmn, 0) + # Select the entries from each input + (s1, s2) = (r1[idx_1], r2[idx_2]) + # + # Build the new description of the output array ....... + # Start with the key fields + ndtype = get_fieldspec(r1k.dtype) + + # Add the fields from r1 + for fname, fdtype in get_fieldspec(r1.dtype): + if fname not in key: + ndtype.append((fname, fdtype)) + + # Add the fields from r2 + for fname, fdtype in get_fieldspec(r2.dtype): + # Have we seen the current name already ? + # we need to rebuild this list every time + names = list(name for name, dtype in ndtype) + try: + nameidx = names.index(fname) + except ValueError: + #... we haven't: just add the description to the current list + ndtype.append((fname, fdtype)) + else: + # collision + _, cdtype = ndtype[nameidx] + if fname in key: + # The current field is part of the key: take the largest dtype + ndtype[nameidx] = (fname, max(fdtype, cdtype)) + else: + # The current field is not part of the key: add the suffixes, + # and place the new field adjacent to the old one + ndtype[nameidx:nameidx + 1] = [ + (fname + r1postfix, cdtype), + (fname + r2postfix, fdtype) + ] + # Rebuild a dtype from the new fields + ndtype = np.dtype(ndtype) + # Find the largest nb of common fields : + # r1cmn and r2cmn should be equal, but... + cmn = max(r1cmn, r2cmn) + # Construct an empty array + output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype) + names = output.dtype.names + for f in r1names: + selected = s1[f] + if f not in names or (f in r2names and not r2postfix and f not in key): + f += r1postfix + current = output[f] + current[:r1cmn] = selected[:r1cmn] + if jointype in ('outer', 'leftouter'): + current[cmn:cmn + r1spc] = selected[r1cmn:] + for f in r2names: + selected = s2[f] + if f not in names or (f in r1names and not r1postfix and f not in key): + f += r2postfix + current = output[f] + current[:r2cmn] = selected[:r2cmn] + if (jointype == 'outer') and r2spc: + current[-r2spc:] = selected[r2cmn:] + # Sort and finalize the output + output.sort(order=key) + kwargs = dict(usemask=usemask, asrecarray=asrecarray) + return _fix_output(_fix_defaults(output, defaults), **kwargs) + + +def _rec_join_dispatcher( + key, r1, r2, jointype=None, r1postfix=None, r2postfix=None, + defaults=None): + return (r1, r2) + + +@array_function_dispatch(_rec_join_dispatcher) +def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2', + defaults=None): + """ + Join arrays `r1` and `r2` on keys. + Alternative to join_by, that always returns a np.recarray. + + See Also + -------- + join_by : equivalent function + """ + kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix, + defaults=defaults, usemask=False, asrecarray=True) + return join_by(key, r1, r2, **kwargs) diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/recfunctions.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/recfunctions.pyc new file mode 100644 index 0000000..8451f57 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/recfunctions.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/scimath.py b/project/venv/lib/python2.7/site-packages/numpy/lib/scimath.py new file mode 100644 index 0000000..9ca0068 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/scimath.py @@ -0,0 +1,600 @@ +""" +Wrapper functions to more user-friendly calling of certain math functions +whose output data-type is different than the input data-type in certain +domains of the input. + +For example, for functions like `log` with branch cuts, the versions in this +module provide the mathematically valid answers in the complex plane:: + + >>> import math + >>> from numpy.lib import scimath + >>> scimath.log(-math.exp(1)) == (1+1j*math.pi) + True + +Similarly, `sqrt`, other base logarithms, `power` and trig functions are +correctly handled. See their respective docstrings for specific examples. + +""" +from __future__ import division, absolute_import, print_function + +import numpy.core.numeric as nx +import numpy.core.numerictypes as nt +from numpy.core.numeric import asarray, any +from numpy.core.overrides import array_function_dispatch +from numpy.lib.type_check import isreal + + +__all__ = [ + 'sqrt', 'log', 'log2', 'logn', 'log10', 'power', 'arccos', 'arcsin', + 'arctanh' + ] + + +_ln2 = nx.log(2.0) + + +def _tocomplex(arr): + """Convert its input `arr` to a complex array. + + The input is returned as a complex array of the smallest type that will fit + the original data: types like single, byte, short, etc. become csingle, + while others become cdouble. + + A copy of the input is always made. + + Parameters + ---------- + arr : array + + Returns + ------- + array + An array with the same input data as the input but in complex form. + + Examples + -------- + + First, consider an input of type short: + + >>> a = np.array([1,2,3],np.short) + + >>> ac = np.lib.scimath._tocomplex(a); ac + array([ 1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) + + >>> ac.dtype + dtype('complex64') + + If the input is of type double, the output is correspondingly of the + complex double type as well: + + >>> b = np.array([1,2,3],np.double) + + >>> bc = np.lib.scimath._tocomplex(b); bc + array([ 1.+0.j, 2.+0.j, 3.+0.j]) + + >>> bc.dtype + dtype('complex128') + + Note that even if the input was complex to begin with, a copy is still + made, since the astype() method always copies: + + >>> c = np.array([1,2,3],np.csingle) + + >>> cc = np.lib.scimath._tocomplex(c); cc + array([ 1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) + + >>> c *= 2; c + array([ 2.+0.j, 4.+0.j, 6.+0.j], dtype=complex64) + + >>> cc + array([ 1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) + """ + if issubclass(arr.dtype.type, (nt.single, nt.byte, nt.short, nt.ubyte, + nt.ushort, nt.csingle)): + return arr.astype(nt.csingle) + else: + return arr.astype(nt.cdouble) + + +def _fix_real_lt_zero(x): + """Convert `x` to complex if it has real, negative components. + + Otherwise, output is just the array version of the input (via asarray). + + Parameters + ---------- + x : array_like + + Returns + ------- + array + + Examples + -------- + >>> np.lib.scimath._fix_real_lt_zero([1,2]) + array([1, 2]) + + >>> np.lib.scimath._fix_real_lt_zero([-1,2]) + array([-1.+0.j, 2.+0.j]) + + """ + x = asarray(x) + if any(isreal(x) & (x < 0)): + x = _tocomplex(x) + return x + + +def _fix_int_lt_zero(x): + """Convert `x` to double if it has real, negative components. + + Otherwise, output is just the array version of the input (via asarray). + + Parameters + ---------- + x : array_like + + Returns + ------- + array + + Examples + -------- + >>> np.lib.scimath._fix_int_lt_zero([1,2]) + array([1, 2]) + + >>> np.lib.scimath._fix_int_lt_zero([-1,2]) + array([-1., 2.]) + """ + x = asarray(x) + if any(isreal(x) & (x < 0)): + x = x * 1.0 + return x + + +def _fix_real_abs_gt_1(x): + """Convert `x` to complex if it has real components x_i with abs(x_i)>1. + + Otherwise, output is just the array version of the input (via asarray). + + Parameters + ---------- + x : array_like + + Returns + ------- + array + + Examples + -------- + >>> np.lib.scimath._fix_real_abs_gt_1([0,1]) + array([0, 1]) + + >>> np.lib.scimath._fix_real_abs_gt_1([0,2]) + array([ 0.+0.j, 2.+0.j]) + """ + x = asarray(x) + if any(isreal(x) & (abs(x) > 1)): + x = _tocomplex(x) + return x + + +def _unary_dispatcher(x): + return (x,) + + +@array_function_dispatch(_unary_dispatcher) +def sqrt(x): + """ + Compute the square root of x. + + For negative input elements, a complex value is returned + (unlike `numpy.sqrt` which returns NaN). + + Parameters + ---------- + x : array_like + The input value(s). + + Returns + ------- + out : ndarray or scalar + The square root of `x`. If `x` was a scalar, so is `out`, + otherwise an array is returned. + + See Also + -------- + numpy.sqrt + + Examples + -------- + For real, non-negative inputs this works just like `numpy.sqrt`: + + >>> np.lib.scimath.sqrt(1) + 1.0 + >>> np.lib.scimath.sqrt([1, 4]) + array([ 1., 2.]) + + But it automatically handles negative inputs: + + >>> np.lib.scimath.sqrt(-1) + (0.0+1.0j) + >>> np.lib.scimath.sqrt([-1,4]) + array([ 0.+1.j, 2.+0.j]) + + """ + x = _fix_real_lt_zero(x) + return nx.sqrt(x) + + +@array_function_dispatch(_unary_dispatcher) +def log(x): + """ + Compute the natural logarithm of `x`. + + Return the "principal value" (for a description of this, see `numpy.log`) + of :math:`log_e(x)`. For real `x > 0`, this is a real number (``log(0)`` + returns ``-inf`` and ``log(np.inf)`` returns ``inf``). Otherwise, the + complex principle value is returned. + + Parameters + ---------- + x : array_like + The value(s) whose log is (are) required. + + Returns + ------- + out : ndarray or scalar + The log of the `x` value(s). If `x` was a scalar, so is `out`, + otherwise an array is returned. + + See Also + -------- + numpy.log + + Notes + ----- + For a log() that returns ``NAN`` when real `x < 0`, use `numpy.log` + (note, however, that otherwise `numpy.log` and this `log` are identical, + i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, and, + notably, the complex principle value if ``x.imag != 0``). + + Examples + -------- + >>> np.emath.log(np.exp(1)) + 1.0 + + Negative arguments are handled "correctly" (recall that + ``exp(log(x)) == x`` does *not* hold for real ``x < 0``): + + >>> np.emath.log(-np.exp(1)) == (1 + np.pi * 1j) + True + + """ + x = _fix_real_lt_zero(x) + return nx.log(x) + + +@array_function_dispatch(_unary_dispatcher) +def log10(x): + """ + Compute the logarithm base 10 of `x`. + + Return the "principal value" (for a description of this, see + `numpy.log10`) of :math:`log_{10}(x)`. For real `x > 0`, this + is a real number (``log10(0)`` returns ``-inf`` and ``log10(np.inf)`` + returns ``inf``). Otherwise, the complex principle value is returned. + + Parameters + ---------- + x : array_like or scalar + The value(s) whose log base 10 is (are) required. + + Returns + ------- + out : ndarray or scalar + The log base 10 of the `x` value(s). If `x` was a scalar, so is `out`, + otherwise an array object is returned. + + See Also + -------- + numpy.log10 + + Notes + ----- + For a log10() that returns ``NAN`` when real `x < 0`, use `numpy.log10` + (note, however, that otherwise `numpy.log10` and this `log10` are + identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, + and, notably, the complex principle value if ``x.imag != 0``). + + Examples + -------- + + (We set the printing precision so the example can be auto-tested) + + >>> np.set_printoptions(precision=4) + + >>> np.emath.log10(10**1) + 1.0 + + >>> np.emath.log10([-10**1, -10**2, 10**2]) + array([ 1.+1.3644j, 2.+1.3644j, 2.+0.j ]) + + """ + x = _fix_real_lt_zero(x) + return nx.log10(x) + + +def _logn_dispatcher(n, x): + return (n, x,) + + +@array_function_dispatch(_logn_dispatcher) +def logn(n, x): + """ + Take log base n of x. + + If `x` contains negative inputs, the answer is computed and returned in the + complex domain. + + Parameters + ---------- + n : array_like + The integer base(s) in which the log is taken. + x : array_like + The value(s) whose log base `n` is (are) required. + + Returns + ------- + out : ndarray or scalar + The log base `n` of the `x` value(s). If `x` was a scalar, so is + `out`, otherwise an array is returned. + + Examples + -------- + >>> np.set_printoptions(precision=4) + + >>> np.lib.scimath.logn(2, [4, 8]) + array([ 2., 3.]) + >>> np.lib.scimath.logn(2, [-4, -8, 8]) + array([ 2.+4.5324j, 3.+4.5324j, 3.+0.j ]) + + """ + x = _fix_real_lt_zero(x) + n = _fix_real_lt_zero(n) + return nx.log(x)/nx.log(n) + + +@array_function_dispatch(_unary_dispatcher) +def log2(x): + """ + Compute the logarithm base 2 of `x`. + + Return the "principal value" (for a description of this, see + `numpy.log2`) of :math:`log_2(x)`. For real `x > 0`, this is + a real number (``log2(0)`` returns ``-inf`` and ``log2(np.inf)`` returns + ``inf``). Otherwise, the complex principle value is returned. + + Parameters + ---------- + x : array_like + The value(s) whose log base 2 is (are) required. + + Returns + ------- + out : ndarray or scalar + The log base 2 of the `x` value(s). If `x` was a scalar, so is `out`, + otherwise an array is returned. + + See Also + -------- + numpy.log2 + + Notes + ----- + For a log2() that returns ``NAN`` when real `x < 0`, use `numpy.log2` + (note, however, that otherwise `numpy.log2` and this `log2` are + identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, + and, notably, the complex principle value if ``x.imag != 0``). + + Examples + -------- + We set the printing precision so the example can be auto-tested: + + >>> np.set_printoptions(precision=4) + + >>> np.emath.log2(8) + 3.0 + >>> np.emath.log2([-4, -8, 8]) + array([ 2.+4.5324j, 3.+4.5324j, 3.+0.j ]) + + """ + x = _fix_real_lt_zero(x) + return nx.log2(x) + + +def _power_dispatcher(x, p): + return (x, p) + + +@array_function_dispatch(_power_dispatcher) +def power(x, p): + """ + Return x to the power p, (x**p). + + If `x` contains negative values, the output is converted to the + complex domain. + + Parameters + ---------- + x : array_like + The input value(s). + p : array_like of ints + The power(s) to which `x` is raised. If `x` contains multiple values, + `p` has to either be a scalar, or contain the same number of values + as `x`. In the latter case, the result is + ``x[0]**p[0], x[1]**p[1], ...``. + + Returns + ------- + out : ndarray or scalar + The result of ``x**p``. If `x` and `p` are scalars, so is `out`, + otherwise an array is returned. + + See Also + -------- + numpy.power + + Examples + -------- + >>> np.set_printoptions(precision=4) + + >>> np.lib.scimath.power([2, 4], 2) + array([ 4, 16]) + >>> np.lib.scimath.power([2, 4], -2) + array([ 0.25 , 0.0625]) + >>> np.lib.scimath.power([-2, 4], 2) + array([ 4.+0.j, 16.+0.j]) + + """ + x = _fix_real_lt_zero(x) + p = _fix_int_lt_zero(p) + return nx.power(x, p) + + +@array_function_dispatch(_unary_dispatcher) +def arccos(x): + """ + Compute the inverse cosine of x. + + Return the "principal value" (for a description of this, see + `numpy.arccos`) of the inverse cosine of `x`. For real `x` such that + `abs(x) <= 1`, this is a real number in the closed interval + :math:`[0, \\pi]`. Otherwise, the complex principle value is returned. + + Parameters + ---------- + x : array_like or scalar + The value(s) whose arccos is (are) required. + + Returns + ------- + out : ndarray or scalar + The inverse cosine(s) of the `x` value(s). If `x` was a scalar, so + is `out`, otherwise an array object is returned. + + See Also + -------- + numpy.arccos + + Notes + ----- + For an arccos() that returns ``NAN`` when real `x` is not in the + interval ``[-1,1]``, use `numpy.arccos`. + + Examples + -------- + >>> np.set_printoptions(precision=4) + + >>> np.emath.arccos(1) # a scalar is returned + 0.0 + + >>> np.emath.arccos([1,2]) + array([ 0.-0.j , 0.+1.317j]) + + """ + x = _fix_real_abs_gt_1(x) + return nx.arccos(x) + + +@array_function_dispatch(_unary_dispatcher) +def arcsin(x): + """ + Compute the inverse sine of x. + + Return the "principal value" (for a description of this, see + `numpy.arcsin`) of the inverse sine of `x`. For real `x` such that + `abs(x) <= 1`, this is a real number in the closed interval + :math:`[-\\pi/2, \\pi/2]`. Otherwise, the complex principle value is + returned. + + Parameters + ---------- + x : array_like or scalar + The value(s) whose arcsin is (are) required. + + Returns + ------- + out : ndarray or scalar + The inverse sine(s) of the `x` value(s). If `x` was a scalar, so + is `out`, otherwise an array object is returned. + + See Also + -------- + numpy.arcsin + + Notes + ----- + For an arcsin() that returns ``NAN`` when real `x` is not in the + interval ``[-1,1]``, use `numpy.arcsin`. + + Examples + -------- + >>> np.set_printoptions(precision=4) + + >>> np.emath.arcsin(0) + 0.0 + + >>> np.emath.arcsin([0,1]) + array([ 0. , 1.5708]) + + """ + x = _fix_real_abs_gt_1(x) + return nx.arcsin(x) + + +@array_function_dispatch(_unary_dispatcher) +def arctanh(x): + """ + Compute the inverse hyperbolic tangent of `x`. + + Return the "principal value" (for a description of this, see + `numpy.arctanh`) of `arctanh(x)`. For real `x` such that + `abs(x) < 1`, this is a real number. If `abs(x) > 1`, or if `x` is + complex, the result is complex. Finally, `x = 1` returns``inf`` and + `x=-1` returns ``-inf``. + + Parameters + ---------- + x : array_like + The value(s) whose arctanh is (are) required. + + Returns + ------- + out : ndarray or scalar + The inverse hyperbolic tangent(s) of the `x` value(s). If `x` was + a scalar so is `out`, otherwise an array is returned. + + + See Also + -------- + numpy.arctanh + + Notes + ----- + For an arctanh() that returns ``NAN`` when real `x` is not in the + interval ``(-1,1)``, use `numpy.arctanh` (this latter, however, does + return +/-inf for `x = +/-1`). + + Examples + -------- + >>> np.set_printoptions(precision=4) + + >>> np.emath.arctanh(np.eye(2)) + array([[ Inf, 0.], + [ 0., Inf]]) + >>> np.emath.arctanh([1j]) + array([ 0.+0.7854j]) + + """ + x = _fix_real_abs_gt_1(x) + return nx.arctanh(x) diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/scimath.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/scimath.pyc new file mode 100644 index 0000000..b62d51c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/scimath.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/setup.py b/project/venv/lib/python2.7/site-packages/numpy/lib/setup.py new file mode 100644 index 0000000..d342410 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/setup.py @@ -0,0 +1,12 @@ +from __future__ import division, print_function + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + + config = Configuration('lib', parent_package, top_path) + config.add_data_dir('tests') + return config + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(configuration=configuration) diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/setup.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/setup.pyc new file mode 100644 index 0000000..4a32112 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/shape_base.py b/project/venv/lib/python2.7/site-packages/numpy/lib/shape_base.py new file mode 100644 index 0000000..f56c4f4 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/shape_base.py @@ -0,0 +1,1243 @@ +from __future__ import division, absolute_import, print_function + +import functools +import warnings + +import numpy.core.numeric as _nx +from numpy.core.numeric import ( + asarray, zeros, outer, concatenate, array, asanyarray + ) +from numpy.core.fromnumeric import product, reshape, transpose +from numpy.core.multiarray import normalize_axis_index +from numpy.core import overrides +from numpy.core import vstack, atleast_3d +from numpy.core.shape_base import ( + _arrays_for_stack_dispatcher, _warn_for_nonsequence) +from numpy.lib.index_tricks import ndindex +from numpy.matrixlib.defmatrix import matrix # this raises all the right alarm bells + + +__all__ = [ + 'column_stack', 'row_stack', 'dstack', 'array_split', 'split', + 'hsplit', 'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims', + 'apply_along_axis', 'kron', 'tile', 'get_array_wrap', 'take_along_axis', + 'put_along_axis' + ] + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +def _make_along_axis_idx(arr_shape, indices, axis): + # compute dimensions to iterate over + if not _nx.issubdtype(indices.dtype, _nx.integer): + raise IndexError('`indices` must be an integer array') + if len(arr_shape) != indices.ndim: + raise ValueError( + "`indices` and `arr` must have the same number of dimensions") + shape_ones = (1,) * indices.ndim + dest_dims = list(range(axis)) + [None] + list(range(axis+1, indices.ndim)) + + # build a fancy index, consisting of orthogonal aranges, with the + # requested index inserted at the right location + fancy_index = [] + for dim, n in zip(dest_dims, arr_shape): + if dim is None: + fancy_index.append(indices) + else: + ind_shape = shape_ones[:dim] + (-1,) + shape_ones[dim+1:] + fancy_index.append(_nx.arange(n).reshape(ind_shape)) + + return tuple(fancy_index) + + +def _take_along_axis_dispatcher(arr, indices, axis): + return (arr, indices) + + +@array_function_dispatch(_take_along_axis_dispatcher) +def take_along_axis(arr, indices, axis): + """ + Take values from the input array by matching 1d index and data slices. + + This iterates over matching 1d slices oriented along the specified axis in + the index and data arrays, and uses the former to look up values in the + latter. These slices can be different lengths. + + Functions returning an index along an axis, like `argsort` and + `argpartition`, produce suitable indices for this function. + + .. versionadded:: 1.15.0 + + Parameters + ---------- + arr: ndarray (Ni..., M, Nk...) + Source array + indices: ndarray (Ni..., J, Nk...) + Indices to take along each 1d slice of `arr`. This must match the + dimension of arr, but dimensions Ni and Nj only need to broadcast + against `arr`. + axis: int + The axis to take 1d slices along. If axis is None, the input array is + treated as if it had first been flattened to 1d, for consistency with + `sort` and `argsort`. + + Returns + ------- + out: ndarray (Ni..., J, Nk...) + The indexed result. + + Notes + ----- + This is equivalent to (but faster than) the following use of `ndindex` and + `s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices:: + + Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:] + J = indices.shape[axis] # Need not equal M + out = np.empty(Nk + (J,) + Nk) + + for ii in ndindex(Ni): + for kk in ndindex(Nk): + a_1d = a [ii + s_[:,] + kk] + indices_1d = indices[ii + s_[:,] + kk] + out_1d = out [ii + s_[:,] + kk] + for j in range(J): + out_1d[j] = a_1d[indices_1d[j]] + + Equivalently, eliminating the inner loop, the last two lines would be:: + + out_1d[:] = a_1d[indices_1d] + + See Also + -------- + take : Take along an axis, using the same indices for every 1d slice + put_along_axis : + Put values into the destination array by matching 1d index and data slices + + Examples + -------- + + For this sample array + + >>> a = np.array([[10, 30, 20], [60, 40, 50]]) + + We can sort either by using sort directly, or argsort and this function + + >>> np.sort(a, axis=1) + array([[10, 20, 30], + [40, 50, 60]]) + >>> ai = np.argsort(a, axis=1); ai + array([[0, 2, 1], + [1, 2, 0]], dtype=int64) + >>> np.take_along_axis(a, ai, axis=1) + array([[10, 20, 30], + [40, 50, 60]]) + + The same works for max and min, if you expand the dimensions: + + >>> np.expand_dims(np.max(a, axis=1), axis=1) + array([[30], + [60]]) + >>> ai = np.expand_dims(np.argmax(a, axis=1), axis=1) + >>> ai + array([[1], + [0], dtype=int64) + >>> np.take_along_axis(a, ai, axis=1) + array([[30], + [60]]) + + If we want to get the max and min at the same time, we can stack the + indices first + + >>> ai_min = np.expand_dims(np.argmin(a, axis=1), axis=1) + >>> ai_max = np.expand_dims(np.argmax(a, axis=1), axis=1) + >>> ai = np.concatenate([ai_min, ai_max], axis=axis) + >> ai + array([[0, 1], + [1, 0]], dtype=int64) + >>> np.take_along_axis(a, ai, axis=1) + array([[10, 30], + [40, 60]]) + """ + # normalize inputs + if axis is None: + arr = arr.flat + arr_shape = (len(arr),) # flatiter has no .shape + axis = 0 + else: + axis = normalize_axis_index(axis, arr.ndim) + arr_shape = arr.shape + + # use the fancy index + return arr[_make_along_axis_idx(arr_shape, indices, axis)] + + +def _put_along_axis_dispatcher(arr, indices, values, axis): + return (arr, indices, values) + + +@array_function_dispatch(_put_along_axis_dispatcher) +def put_along_axis(arr, indices, values, axis): + """ + Put values into the destination array by matching 1d index and data slices. + + This iterates over matching 1d slices oriented along the specified axis in + the index and data arrays, and uses the former to place values into the + latter. These slices can be different lengths. + + Functions returning an index along an axis, like `argsort` and + `argpartition`, produce suitable indices for this function. + + .. versionadded:: 1.15.0 + + Parameters + ---------- + arr: ndarray (Ni..., M, Nk...) + Destination array. + indices: ndarray (Ni..., J, Nk...) + Indices to change along each 1d slice of `arr`. This must match the + dimension of arr, but dimensions in Ni and Nj may be 1 to broadcast + against `arr`. + values: array_like (Ni..., J, Nk...) + values to insert at those indices. Its shape and dimension are + broadcast to match that of `indices`. + axis: int + The axis to take 1d slices along. If axis is None, the destination + array is treated as if a flattened 1d view had been created of it. + + Notes + ----- + This is equivalent to (but faster than) the following use of `ndindex` and + `s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices:: + + Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:] + J = indices.shape[axis] # Need not equal M + + for ii in ndindex(Ni): + for kk in ndindex(Nk): + a_1d = a [ii + s_[:,] + kk] + indices_1d = indices[ii + s_[:,] + kk] + values_1d = values [ii + s_[:,] + kk] + for j in range(J): + a_1d[indices_1d[j]] = values_1d[j] + + Equivalently, eliminating the inner loop, the last two lines would be:: + + a_1d[indices_1d] = values_1d + + See Also + -------- + take_along_axis : + Take values from the input array by matching 1d index and data slices + + Examples + -------- + + For this sample array + + >>> a = np.array([[10, 30, 20], [60, 40, 50]]) + + We can replace the maximum values with: + + >>> ai = np.expand_dims(np.argmax(a, axis=1), axis=1) + >>> ai + array([[1], + [0]], dtype=int64) + >>> np.put_along_axis(a, ai, 99, axis=1) + >>> a + array([[10, 99, 20], + [99, 40, 50]]) + + """ + # normalize inputs + if axis is None: + arr = arr.flat + axis = 0 + arr_shape = (len(arr),) # flatiter has no .shape + else: + axis = normalize_axis_index(axis, arr.ndim) + arr_shape = arr.shape + + # use the fancy index + arr[_make_along_axis_idx(arr_shape, indices, axis)] = values + + +def _apply_along_axis_dispatcher(func1d, axis, arr, *args, **kwargs): + return (arr,) + + +@array_function_dispatch(_apply_along_axis_dispatcher) +def apply_along_axis(func1d, axis, arr, *args, **kwargs): + """ + Apply a function to 1-D slices along the given axis. + + Execute `func1d(a, *args)` where `func1d` operates on 1-D arrays and `a` + is a 1-D slice of `arr` along `axis`. + + This is equivalent to (but faster than) the following use of `ndindex` and + `s_`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of indices:: + + Ni, Nk = a.shape[:axis], a.shape[axis+1:] + for ii in ndindex(Ni): + for kk in ndindex(Nk): + f = func1d(arr[ii + s_[:,] + kk]) + Nj = f.shape + for jj in ndindex(Nj): + out[ii + jj + kk] = f[jj] + + Equivalently, eliminating the inner loop, this can be expressed as:: + + Ni, Nk = a.shape[:axis], a.shape[axis+1:] + for ii in ndindex(Ni): + for kk in ndindex(Nk): + out[ii + s_[...,] + kk] = func1d(arr[ii + s_[:,] + kk]) + + Parameters + ---------- + func1d : function (M,) -> (Nj...) + This function should accept 1-D arrays. It is applied to 1-D + slices of `arr` along the specified axis. + axis : integer + Axis along which `arr` is sliced. + arr : ndarray (Ni..., M, Nk...) + Input array. + args : any + Additional arguments to `func1d`. + kwargs : any + Additional named arguments to `func1d`. + + .. versionadded:: 1.9.0 + + + Returns + ------- + out : ndarray (Ni..., Nj..., Nk...) + The output array. The shape of `out` is identical to the shape of + `arr`, except along the `axis` dimension. This axis is removed, and + replaced with new dimensions equal to the shape of the return value + of `func1d`. So if `func1d` returns a scalar `out` will have one + fewer dimensions than `arr`. + + See Also + -------- + apply_over_axes : Apply a function repeatedly over multiple axes. + + Examples + -------- + >>> def my_func(a): + ... \"\"\"Average first and last element of a 1-D array\"\"\" + ... return (a[0] + a[-1]) * 0.5 + >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]]) + >>> np.apply_along_axis(my_func, 0, b) + array([ 4., 5., 6.]) + >>> np.apply_along_axis(my_func, 1, b) + array([ 2., 5., 8.]) + + For a function that returns a 1D array, the number of dimensions in + `outarr` is the same as `arr`. + + >>> b = np.array([[8,1,7], [4,3,9], [5,2,6]]) + >>> np.apply_along_axis(sorted, 1, b) + array([[1, 7, 8], + [3, 4, 9], + [2, 5, 6]]) + + For a function that returns a higher dimensional array, those dimensions + are inserted in place of the `axis` dimension. + + >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]]) + >>> np.apply_along_axis(np.diag, -1, b) + array([[[1, 0, 0], + [0, 2, 0], + [0, 0, 3]], + [[4, 0, 0], + [0, 5, 0], + [0, 0, 6]], + [[7, 0, 0], + [0, 8, 0], + [0, 0, 9]]]) + """ + # handle negative axes + arr = asanyarray(arr) + nd = arr.ndim + axis = normalize_axis_index(axis, nd) + + # arr, with the iteration axis at the end + in_dims = list(range(nd)) + inarr_view = transpose(arr, in_dims[:axis] + in_dims[axis+1:] + [axis]) + + # compute indices for the iteration axes, and append a trailing ellipsis to + # prevent 0d arrays decaying to scalars, which fixes gh-8642 + inds = ndindex(inarr_view.shape[:-1]) + inds = (ind + (Ellipsis,) for ind in inds) + + # invoke the function on the first item + try: + ind0 = next(inds) + except StopIteration: + raise ValueError('Cannot apply_along_axis when any iteration dimensions are 0') + res = asanyarray(func1d(inarr_view[ind0], *args, **kwargs)) + + # build a buffer for storing evaluations of func1d. + # remove the requested axis, and add the new ones on the end. + # laid out so that each write is contiguous. + # for a tuple index inds, buff[inds] = func1d(inarr_view[inds]) + buff = zeros(inarr_view.shape[:-1] + res.shape, res.dtype) + + # permutation of axes such that out = buff.transpose(buff_permute) + buff_dims = list(range(buff.ndim)) + buff_permute = ( + buff_dims[0 : axis] + + buff_dims[buff.ndim-res.ndim : buff.ndim] + + buff_dims[axis : buff.ndim-res.ndim] + ) + + # matrices have a nasty __array_prepare__ and __array_wrap__ + if not isinstance(res, matrix): + buff = res.__array_prepare__(buff) + + # save the first result, then compute and save all remaining results + buff[ind0] = res + for ind in inds: + buff[ind] = asanyarray(func1d(inarr_view[ind], *args, **kwargs)) + + if not isinstance(res, matrix): + # wrap the array, to preserve subclasses + buff = res.__array_wrap__(buff) + + # finally, rotate the inserted axes back to where they belong + return transpose(buff, buff_permute) + + else: + # matrices have to be transposed first, because they collapse dimensions! + out_arr = transpose(buff, buff_permute) + return res.__array_wrap__(out_arr) + + +def _apply_over_axes_dispatcher(func, a, axes): + return (a,) + + +@array_function_dispatch(_apply_over_axes_dispatcher) +def apply_over_axes(func, a, axes): + """ + Apply a function repeatedly over multiple axes. + + `func` is called as `res = func(a, axis)`, where `axis` is the first + element of `axes`. The result `res` of the function call must have + either the same dimensions as `a` or one less dimension. If `res` + has one less dimension than `a`, a dimension is inserted before + `axis`. The call to `func` is then repeated for each axis in `axes`, + with `res` as the first argument. + + Parameters + ---------- + func : function + This function must take two arguments, `func(a, axis)`. + a : array_like + Input array. + axes : array_like + Axes over which `func` is applied; the elements must be integers. + + Returns + ------- + apply_over_axis : ndarray + The output array. The number of dimensions is the same as `a`, + but the shape can be different. This depends on whether `func` + changes the shape of its output with respect to its input. + + See Also + -------- + apply_along_axis : + Apply a function to 1-D slices of an array along the given axis. + + Notes + ------ + This function is equivalent to tuple axis arguments to reorderable ufuncs + with keepdims=True. Tuple axis arguments to ufuncs have been available since + version 1.7.0. + + Examples + -------- + >>> a = np.arange(24).reshape(2,3,4) + >>> a + array([[[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]], + [[12, 13, 14, 15], + [16, 17, 18, 19], + [20, 21, 22, 23]]]) + + Sum over axes 0 and 2. The result has same number of dimensions + as the original array: + + >>> np.apply_over_axes(np.sum, a, [0,2]) + array([[[ 60], + [ 92], + [124]]]) + + Tuple axis arguments to ufuncs are equivalent: + + >>> np.sum(a, axis=(0,2), keepdims=True) + array([[[ 60], + [ 92], + [124]]]) + + """ + val = asarray(a) + N = a.ndim + if array(axes).ndim == 0: + axes = (axes,) + for axis in axes: + if axis < 0: + axis = N + axis + args = (val, axis) + res = func(*args) + if res.ndim == val.ndim: + val = res + else: + res = expand_dims(res, axis) + if res.ndim == val.ndim: + val = res + else: + raise ValueError("function is not returning " + "an array of the correct shape") + return val + + +def _expand_dims_dispatcher(a, axis): + return (a,) + + +@array_function_dispatch(_expand_dims_dispatcher) +def expand_dims(a, axis): + """ + Expand the shape of an array. + + Insert a new axis that will appear at the `axis` position in the expanded + array shape. + + .. note:: Previous to NumPy 1.13.0, neither ``axis < -a.ndim - 1`` nor + ``axis > a.ndim`` raised errors or put the new axis where documented. + Those axis values are now deprecated and will raise an AxisError in the + future. + + Parameters + ---------- + a : array_like + Input array. + axis : int + Position in the expanded axes where the new axis is placed. + + Returns + ------- + res : ndarray + Output array. The number of dimensions is one greater than that of + the input array. + + See Also + -------- + squeeze : The inverse operation, removing singleton dimensions + reshape : Insert, remove, and combine dimensions, and resize existing ones + doc.indexing, atleast_1d, atleast_2d, atleast_3d + + Examples + -------- + >>> x = np.array([1,2]) + >>> x.shape + (2,) + + The following is equivalent to ``x[np.newaxis,:]`` or ``x[np.newaxis]``: + + >>> y = np.expand_dims(x, axis=0) + >>> y + array([[1, 2]]) + >>> y.shape + (1, 2) + + >>> y = np.expand_dims(x, axis=1) # Equivalent to x[:,np.newaxis] + >>> y + array([[1], + [2]]) + >>> y.shape + (2, 1) + + Note that some examples may use ``None`` instead of ``np.newaxis``. These + are the same objects: + + >>> np.newaxis is None + True + + """ + if isinstance(a, matrix): + a = asarray(a) + else: + a = asanyarray(a) + + shape = a.shape + if axis > a.ndim or axis < -a.ndim - 1: + # 2017-05-17, 1.13.0 + warnings.warn("Both axis > a.ndim and axis < -a.ndim - 1 are " + "deprecated and will raise an AxisError in the future.", + DeprecationWarning, stacklevel=2) + # When the deprecation period expires, delete this if block, + if axis < 0: + axis = axis + a.ndim + 1 + # and uncomment the following line. + # axis = normalize_axis_index(axis, a.ndim + 1) + return a.reshape(shape[:axis] + (1,) + shape[axis:]) + + +row_stack = vstack + + +def _column_stack_dispatcher(tup): + return _arrays_for_stack_dispatcher(tup) + + +@array_function_dispatch(_column_stack_dispatcher) +def column_stack(tup): + """ + Stack 1-D arrays as columns into a 2-D array. + + Take a sequence of 1-D arrays and stack them as columns + to make a single 2-D array. 2-D arrays are stacked as-is, + just like with `hstack`. 1-D arrays are turned into 2-D columns + first. + + Parameters + ---------- + tup : sequence of 1-D or 2-D arrays. + Arrays to stack. All of them must have the same first dimension. + + Returns + ------- + stacked : 2-D array + The array formed by stacking the given arrays. + + See Also + -------- + stack, hstack, vstack, concatenate + + Examples + -------- + >>> a = np.array((1,2,3)) + >>> b = np.array((2,3,4)) + >>> np.column_stack((a,b)) + array([[1, 2], + [2, 3], + [3, 4]]) + + """ + _warn_for_nonsequence(tup) + arrays = [] + for v in tup: + arr = array(v, copy=False, subok=True) + if arr.ndim < 2: + arr = array(arr, copy=False, subok=True, ndmin=2).T + arrays.append(arr) + return _nx.concatenate(arrays, 1) + + +def _dstack_dispatcher(tup): + return _arrays_for_stack_dispatcher(tup) + + +@array_function_dispatch(_dstack_dispatcher) +def dstack(tup): + """ + Stack arrays in sequence depth wise (along third axis). + + This is equivalent to concatenation along the third axis after 2-D arrays + of shape `(M,N)` have been reshaped to `(M,N,1)` and 1-D arrays of shape + `(N,)` have been reshaped to `(1,N,1)`. Rebuilds arrays divided by + `dsplit`. + + This function makes most sense for arrays with up to 3 dimensions. For + instance, for pixel-data with a height (first axis), width (second axis), + and r/g/b channels (third axis). The functions `concatenate`, `stack` and + `block` provide more general stacking and concatenation operations. + + Parameters + ---------- + tup : sequence of arrays + The arrays must have the same shape along all but the third axis. + 1-D or 2-D arrays must have the same shape. + + Returns + ------- + stacked : ndarray + The array formed by stacking the given arrays, will be at least 3-D. + + See Also + -------- + stack : Join a sequence of arrays along a new axis. + vstack : Stack along first axis. + hstack : Stack along second axis. + concatenate : Join a sequence of arrays along an existing axis. + dsplit : Split array along third axis. + + Examples + -------- + >>> a = np.array((1,2,3)) + >>> b = np.array((2,3,4)) + >>> np.dstack((a,b)) + array([[[1, 2], + [2, 3], + [3, 4]]]) + + >>> a = np.array([[1],[2],[3]]) + >>> b = np.array([[2],[3],[4]]) + >>> np.dstack((a,b)) + array([[[1, 2]], + [[2, 3]], + [[3, 4]]]) + + """ + _warn_for_nonsequence(tup) + return _nx.concatenate([atleast_3d(_m) for _m in tup], 2) + + +def _replace_zero_by_x_arrays(sub_arys): + for i in range(len(sub_arys)): + if _nx.ndim(sub_arys[i]) == 0: + sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype) + elif _nx.sometrue(_nx.equal(_nx.shape(sub_arys[i]), 0)): + sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype) + return sub_arys + + +def _array_split_dispatcher(ary, indices_or_sections, axis=None): + return (ary, indices_or_sections) + + +@array_function_dispatch(_array_split_dispatcher) +def array_split(ary, indices_or_sections, axis=0): + """ + Split an array into multiple sub-arrays. + + Please refer to the ``split`` documentation. The only difference + between these functions is that ``array_split`` allows + `indices_or_sections` to be an integer that does *not* equally + divide the axis. For an array of length l that should be split + into n sections, it returns l % n sub-arrays of size l//n + 1 + and the rest of size l//n. + + See Also + -------- + split : Split array into multiple sub-arrays of equal size. + + Examples + -------- + >>> x = np.arange(8.0) + >>> np.array_split(x, 3) + [array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7.])] + + >>> x = np.arange(7.0) + >>> np.array_split(x, 3) + [array([ 0., 1., 2.]), array([ 3., 4.]), array([ 5., 6.])] + + """ + try: + Ntotal = ary.shape[axis] + except AttributeError: + Ntotal = len(ary) + try: + # handle array case. + Nsections = len(indices_or_sections) + 1 + div_points = [0] + list(indices_or_sections) + [Ntotal] + except TypeError: + # indices_or_sections is a scalar, not an array. + Nsections = int(indices_or_sections) + if Nsections <= 0: + raise ValueError('number sections must be larger than 0.') + Neach_section, extras = divmod(Ntotal, Nsections) + section_sizes = ([0] + + extras * [Neach_section+1] + + (Nsections-extras) * [Neach_section]) + div_points = _nx.array(section_sizes, dtype=_nx.intp).cumsum() + + sub_arys = [] + sary = _nx.swapaxes(ary, axis, 0) + for i in range(Nsections): + st = div_points[i] + end = div_points[i + 1] + sub_arys.append(_nx.swapaxes(sary[st:end], axis, 0)) + + return sub_arys + + +def _split_dispatcher(ary, indices_or_sections, axis=None): + return (ary, indices_or_sections) + + +@array_function_dispatch(_split_dispatcher) +def split(ary, indices_or_sections, axis=0): + """ + Split an array into multiple sub-arrays. + + Parameters + ---------- + ary : ndarray + Array to be divided into sub-arrays. + indices_or_sections : int or 1-D array + If `indices_or_sections` is an integer, N, the array will be divided + into N equal arrays along `axis`. If such a split is not possible, + an error is raised. + + If `indices_or_sections` is a 1-D array of sorted integers, the entries + indicate where along `axis` the array is split. For example, + ``[2, 3]`` would, for ``axis=0``, result in + + - ary[:2] + - ary[2:3] + - ary[3:] + + If an index exceeds the dimension of the array along `axis`, + an empty sub-array is returned correspondingly. + axis : int, optional + The axis along which to split, default is 0. + + Returns + ------- + sub-arrays : list of ndarrays + A list of sub-arrays. + + Raises + ------ + ValueError + If `indices_or_sections` is given as an integer, but + a split does not result in equal division. + + See Also + -------- + array_split : Split an array into multiple sub-arrays of equal or + near-equal size. Does not raise an exception if + an equal division cannot be made. + hsplit : Split array into multiple sub-arrays horizontally (column-wise). + vsplit : Split array into multiple sub-arrays vertically (row wise). + dsplit : Split array into multiple sub-arrays along the 3rd axis (depth). + concatenate : Join a sequence of arrays along an existing axis. + stack : Join a sequence of arrays along a new axis. + hstack : Stack arrays in sequence horizontally (column wise). + vstack : Stack arrays in sequence vertically (row wise). + dstack : Stack arrays in sequence depth wise (along third dimension). + + Examples + -------- + >>> x = np.arange(9.0) + >>> np.split(x, 3) + [array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7., 8.])] + + >>> x = np.arange(8.0) + >>> np.split(x, [3, 5, 6, 10]) + [array([ 0., 1., 2.]), + array([ 3., 4.]), + array([ 5.]), + array([ 6., 7.]), + array([], dtype=float64)] + + """ + try: + len(indices_or_sections) + except TypeError: + sections = indices_or_sections + N = ary.shape[axis] + if N % sections: + raise ValueError( + 'array split does not result in an equal division') + res = array_split(ary, indices_or_sections, axis) + return res + + +def _hvdsplit_dispatcher(ary, indices_or_sections): + return (ary, indices_or_sections) + + +@array_function_dispatch(_hvdsplit_dispatcher) +def hsplit(ary, indices_or_sections): + """ + Split an array into multiple sub-arrays horizontally (column-wise). + + Please refer to the `split` documentation. `hsplit` is equivalent + to `split` with ``axis=1``, the array is always split along the second + axis regardless of the array dimension. + + See Also + -------- + split : Split an array into multiple sub-arrays of equal size. + + Examples + -------- + >>> x = np.arange(16.0).reshape(4, 4) + >>> x + array([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [ 12., 13., 14., 15.]]) + >>> np.hsplit(x, 2) + [array([[ 0., 1.], + [ 4., 5.], + [ 8., 9.], + [ 12., 13.]]), + array([[ 2., 3.], + [ 6., 7.], + [ 10., 11.], + [ 14., 15.]])] + >>> np.hsplit(x, np.array([3, 6])) + [array([[ 0., 1., 2.], + [ 4., 5., 6.], + [ 8., 9., 10.], + [ 12., 13., 14.]]), + array([[ 3.], + [ 7.], + [ 11.], + [ 15.]]), + array([], dtype=float64)] + + With a higher dimensional array the split is still along the second axis. + + >>> x = np.arange(8.0).reshape(2, 2, 2) + >>> x + array([[[ 0., 1.], + [ 2., 3.]], + [[ 4., 5.], + [ 6., 7.]]]) + >>> np.hsplit(x, 2) + [array([[[ 0., 1.]], + [[ 4., 5.]]]), + array([[[ 2., 3.]], + [[ 6., 7.]]])] + + """ + if _nx.ndim(ary) == 0: + raise ValueError('hsplit only works on arrays of 1 or more dimensions') + if ary.ndim > 1: + return split(ary, indices_or_sections, 1) + else: + return split(ary, indices_or_sections, 0) + + +@array_function_dispatch(_hvdsplit_dispatcher) +def vsplit(ary, indices_or_sections): + """ + Split an array into multiple sub-arrays vertically (row-wise). + + Please refer to the ``split`` documentation. ``vsplit`` is equivalent + to ``split`` with `axis=0` (default), the array is always split along the + first axis regardless of the array dimension. + + See Also + -------- + split : Split an array into multiple sub-arrays of equal size. + + Examples + -------- + >>> x = np.arange(16.0).reshape(4, 4) + >>> x + array([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [ 12., 13., 14., 15.]]) + >>> np.vsplit(x, 2) + [array([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.]]), + array([[ 8., 9., 10., 11.], + [ 12., 13., 14., 15.]])] + >>> np.vsplit(x, np.array([3, 6])) + [array([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]]), + array([[ 12., 13., 14., 15.]]), + array([], dtype=float64)] + + With a higher dimensional array the split is still along the first axis. + + >>> x = np.arange(8.0).reshape(2, 2, 2) + >>> x + array([[[ 0., 1.], + [ 2., 3.]], + [[ 4., 5.], + [ 6., 7.]]]) + >>> np.vsplit(x, 2) + [array([[[ 0., 1.], + [ 2., 3.]]]), + array([[[ 4., 5.], + [ 6., 7.]]])] + + """ + if _nx.ndim(ary) < 2: + raise ValueError('vsplit only works on arrays of 2 or more dimensions') + return split(ary, indices_or_sections, 0) + + +@array_function_dispatch(_hvdsplit_dispatcher) +def dsplit(ary, indices_or_sections): + """ + Split array into multiple sub-arrays along the 3rd axis (depth). + + Please refer to the `split` documentation. `dsplit` is equivalent + to `split` with ``axis=2``, the array is always split along the third + axis provided the array dimension is greater than or equal to 3. + + See Also + -------- + split : Split an array into multiple sub-arrays of equal size. + + Examples + -------- + >>> x = np.arange(16.0).reshape(2, 2, 4) + >>> x + array([[[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.]], + [[ 8., 9., 10., 11.], + [ 12., 13., 14., 15.]]]) + >>> np.dsplit(x, 2) + [array([[[ 0., 1.], + [ 4., 5.]], + [[ 8., 9.], + [ 12., 13.]]]), + array([[[ 2., 3.], + [ 6., 7.]], + [[ 10., 11.], + [ 14., 15.]]])] + >>> np.dsplit(x, np.array([3, 6])) + [array([[[ 0., 1., 2.], + [ 4., 5., 6.]], + [[ 8., 9., 10.], + [ 12., 13., 14.]]]), + array([[[ 3.], + [ 7.]], + [[ 11.], + [ 15.]]]), + array([], dtype=float64)] + + """ + if _nx.ndim(ary) < 3: + raise ValueError('dsplit only works on arrays of 3 or more dimensions') + return split(ary, indices_or_sections, 2) + +def get_array_prepare(*args): + """Find the wrapper for the array with the highest priority. + + In case of ties, leftmost wins. If no wrapper is found, return None + """ + wrappers = sorted((getattr(x, '__array_priority__', 0), -i, + x.__array_prepare__) for i, x in enumerate(args) + if hasattr(x, '__array_prepare__')) + if wrappers: + return wrappers[-1][-1] + return None + +def get_array_wrap(*args): + """Find the wrapper for the array with the highest priority. + + In case of ties, leftmost wins. If no wrapper is found, return None + """ + wrappers = sorted((getattr(x, '__array_priority__', 0), -i, + x.__array_wrap__) for i, x in enumerate(args) + if hasattr(x, '__array_wrap__')) + if wrappers: + return wrappers[-1][-1] + return None + + +def _kron_dispatcher(a, b): + return (a, b) + + +@array_function_dispatch(_kron_dispatcher) +def kron(a, b): + """ + Kronecker product of two arrays. + + Computes the Kronecker product, a composite array made of blocks of the + second array scaled by the first. + + Parameters + ---------- + a, b : array_like + + Returns + ------- + out : ndarray + + See Also + -------- + outer : The outer product + + Notes + ----- + The function assumes that the number of dimensions of `a` and `b` + are the same, if necessary prepending the smallest with ones. + If `a.shape = (r0,r1,..,rN)` and `b.shape = (s0,s1,...,sN)`, + the Kronecker product has shape `(r0*s0, r1*s1, ..., rN*SN)`. + The elements are products of elements from `a` and `b`, organized + explicitly by:: + + kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN] + + where:: + + kt = it * st + jt, t = 0,...,N + + In the common 2-D case (N=1), the block structure can be visualized:: + + [[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ], + [ ... ... ], + [ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]] + + + Examples + -------- + >>> np.kron([1,10,100], [5,6,7]) + array([ 5, 6, 7, 50, 60, 70, 500, 600, 700]) + >>> np.kron([5,6,7], [1,10,100]) + array([ 5, 50, 500, 6, 60, 600, 7, 70, 700]) + + >>> np.kron(np.eye(2), np.ones((2,2))) + array([[ 1., 1., 0., 0.], + [ 1., 1., 0., 0.], + [ 0., 0., 1., 1.], + [ 0., 0., 1., 1.]]) + + >>> a = np.arange(100).reshape((2,5,2,5)) + >>> b = np.arange(24).reshape((2,3,4)) + >>> c = np.kron(a,b) + >>> c.shape + (2, 10, 6, 20) + >>> I = (1,3,0,2) + >>> J = (0,2,1) + >>> J1 = (0,) + J # extend to ndim=4 + >>> S1 = (1,) + b.shape + >>> K = tuple(np.array(I) * np.array(S1) + np.array(J1)) + >>> c[K] == a[I]*b[J] + True + + """ + b = asanyarray(b) + a = array(a, copy=False, subok=True, ndmin=b.ndim) + ndb, nda = b.ndim, a.ndim + if (nda == 0 or ndb == 0): + return _nx.multiply(a, b) + as_ = a.shape + bs = b.shape + if not a.flags.contiguous: + a = reshape(a, as_) + if not b.flags.contiguous: + b = reshape(b, bs) + nd = ndb + if (ndb != nda): + if (ndb > nda): + as_ = (1,)*(ndb-nda) + as_ + else: + bs = (1,)*(nda-ndb) + bs + nd = nda + result = outer(a, b).reshape(as_+bs) + axis = nd-1 + for _ in range(nd): + result = concatenate(result, axis=axis) + wrapper = get_array_prepare(a, b) + if wrapper is not None: + result = wrapper(result) + wrapper = get_array_wrap(a, b) + if wrapper is not None: + result = wrapper(result) + return result + + +def _tile_dispatcher(A, reps): + return (A, reps) + + +@array_function_dispatch(_tile_dispatcher) +def tile(A, reps): + """ + Construct an array by repeating A the number of times given by reps. + + If `reps` has length ``d``, the result will have dimension of + ``max(d, A.ndim)``. + + If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new + axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication, + or shape (1, 1, 3) for 3-D replication. If this is not the desired + behavior, promote `A` to d-dimensions manually before calling this + function. + + If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it. + Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as + (1, 1, 2, 2). + + Note : Although tile may be used for broadcasting, it is strongly + recommended to use numpy's broadcasting operations and functions. + + Parameters + ---------- + A : array_like + The input array. + reps : array_like + The number of repetitions of `A` along each axis. + + Returns + ------- + c : ndarray + The tiled output array. + + See Also + -------- + repeat : Repeat elements of an array. + broadcast_to : Broadcast an array to a new shape + + Examples + -------- + >>> a = np.array([0, 1, 2]) + >>> np.tile(a, 2) + array([0, 1, 2, 0, 1, 2]) + >>> np.tile(a, (2, 2)) + array([[0, 1, 2, 0, 1, 2], + [0, 1, 2, 0, 1, 2]]) + >>> np.tile(a, (2, 1, 2)) + array([[[0, 1, 2, 0, 1, 2]], + [[0, 1, 2, 0, 1, 2]]]) + + >>> b = np.array([[1, 2], [3, 4]]) + >>> np.tile(b, 2) + array([[1, 2, 1, 2], + [3, 4, 3, 4]]) + >>> np.tile(b, (2, 1)) + array([[1, 2], + [3, 4], + [1, 2], + [3, 4]]) + + >>> c = np.array([1,2,3,4]) + >>> np.tile(c,(4,1)) + array([[1, 2, 3, 4], + [1, 2, 3, 4], + [1, 2, 3, 4], + [1, 2, 3, 4]]) + """ + try: + tup = tuple(reps) + except TypeError: + tup = (reps,) + d = len(tup) + if all(x == 1 for x in tup) and isinstance(A, _nx.ndarray): + # Fixes the problem that the function does not make a copy if A is a + # numpy array and the repetitions are 1 in all dimensions + return _nx.array(A, copy=True, subok=True, ndmin=d) + else: + # Note that no copy of zero-sized arrays is made. However since they + # have no data there is no risk of an inadvertent overwrite. + c = _nx.array(A, copy=False, subok=True, ndmin=d) + if (d < c.ndim): + tup = (1,)*(c.ndim-d) + tup + shape_out = tuple(s*t for s, t in zip(c.shape, tup)) + n = c.size + if n > 0: + for dim_in, nrep in zip(c.shape, tup): + if nrep != 1: + c = c.reshape(-1, n).repeat(nrep, 0) + n //= dim_in + return c.reshape(shape_out) diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/shape_base.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/shape_base.pyc new file mode 100644 index 0000000..fcd674e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/shape_base.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/stride_tricks.py b/project/venv/lib/python2.7/site-packages/numpy/lib/stride_tricks.py new file mode 100644 index 0000000..0dc36e4 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/stride_tricks.py @@ -0,0 +1,268 @@ +""" +Utilities that manipulate strides to achieve desirable effects. + +An explanation of strides can be found in the "ndarray.rst" file in the +NumPy reference guide. + +""" +from __future__ import division, absolute_import, print_function + +import numpy as np +from numpy.core.overrides import array_function_dispatch + +__all__ = ['broadcast_to', 'broadcast_arrays'] + + +class DummyArray(object): + """Dummy object that just exists to hang __array_interface__ dictionaries + and possibly keep alive a reference to a base array. + """ + + def __init__(self, interface, base=None): + self.__array_interface__ = interface + self.base = base + + +def _maybe_view_as_subclass(original_array, new_array): + if type(original_array) is not type(new_array): + # if input was an ndarray subclass and subclasses were OK, + # then view the result as that subclass. + new_array = new_array.view(type=type(original_array)) + # Since we have done something akin to a view from original_array, we + # should let the subclass finalize (if it has it implemented, i.e., is + # not None). + if new_array.__array_finalize__: + new_array.__array_finalize__(original_array) + return new_array + + +def as_strided(x, shape=None, strides=None, subok=False, writeable=True): + """ + Create a view into the array with the given shape and strides. + + .. warning:: This function has to be used with extreme care, see notes. + + Parameters + ---------- + x : ndarray + Array to create a new. + shape : sequence of int, optional + The shape of the new array. Defaults to ``x.shape``. + strides : sequence of int, optional + The strides of the new array. Defaults to ``x.strides``. + subok : bool, optional + .. versionadded:: 1.10 + + If True, subclasses are preserved. + writeable : bool, optional + .. versionadded:: 1.12 + + If set to False, the returned array will always be readonly. + Otherwise it will be writable if the original array was. It + is advisable to set this to False if possible (see Notes). + + Returns + ------- + view : ndarray + + See also + -------- + broadcast_to: broadcast an array to a given shape. + reshape : reshape an array. + + Notes + ----- + ``as_strided`` creates a view into the array given the exact strides + and shape. This means it manipulates the internal data structure of + ndarray and, if done incorrectly, the array elements can point to + invalid memory and can corrupt results or crash your program. + It is advisable to always use the original ``x.strides`` when + calculating new strides to avoid reliance on a contiguous memory + layout. + + Furthermore, arrays created with this function often contain self + overlapping memory, so that two elements are identical. + Vectorized write operations on such arrays will typically be + unpredictable. They may even give different results for small, large, + or transposed arrays. + Since writing to these arrays has to be tested and done with great + care, you may want to use ``writeable=False`` to avoid accidental write + operations. + + For these reasons it is advisable to avoid ``as_strided`` when + possible. + """ + # first convert input to array, possibly keeping subclass + x = np.array(x, copy=False, subok=subok) + interface = dict(x.__array_interface__) + if shape is not None: + interface['shape'] = tuple(shape) + if strides is not None: + interface['strides'] = tuple(strides) + + array = np.asarray(DummyArray(interface, base=x)) + # The route via `__interface__` does not preserve structured + # dtypes. Since dtype should remain unchanged, we set it explicitly. + array.dtype = x.dtype + + view = _maybe_view_as_subclass(x, array) + + if view.flags.writeable and not writeable: + view.flags.writeable = False + + return view + + +def _broadcast_to(array, shape, subok, readonly): + shape = tuple(shape) if np.iterable(shape) else (shape,) + array = np.array(array, copy=False, subok=subok) + if not shape and array.shape: + raise ValueError('cannot broadcast a non-scalar to a scalar array') + if any(size < 0 for size in shape): + raise ValueError('all elements of broadcast shape must be non-' + 'negative') + needs_writeable = not readonly and array.flags.writeable + extras = ['reduce_ok'] if needs_writeable else [] + op_flag = 'readwrite' if needs_writeable else 'readonly' + it = np.nditer( + (array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'] + extras, + op_flags=[op_flag], itershape=shape, order='C') + with it: + # never really has writebackifcopy semantics + broadcast = it.itviews[0] + result = _maybe_view_as_subclass(array, broadcast) + if needs_writeable and not result.flags.writeable: + result.flags.writeable = True + return result + + +def _broadcast_to_dispatcher(array, shape, subok=None): + return (array,) + + +@array_function_dispatch(_broadcast_to_dispatcher, module='numpy') +def broadcast_to(array, shape, subok=False): + """Broadcast an array to a new shape. + + Parameters + ---------- + array : array_like + The array to broadcast. + shape : tuple + The shape of the desired array. + subok : bool, optional + If True, then sub-classes will be passed-through, otherwise + the returned array will be forced to be a base-class array (default). + + Returns + ------- + broadcast : array + A readonly view on the original array with the given shape. It is + typically not contiguous. Furthermore, more than one element of a + broadcasted array may refer to a single memory location. + + Raises + ------ + ValueError + If the array is not compatible with the new shape according to NumPy's + broadcasting rules. + + Notes + ----- + .. versionadded:: 1.10.0 + + Examples + -------- + >>> x = np.array([1, 2, 3]) + >>> np.broadcast_to(x, (3, 3)) + array([[1, 2, 3], + [1, 2, 3], + [1, 2, 3]]) + """ + return _broadcast_to(array, shape, subok=subok, readonly=True) + + +def _broadcast_shape(*args): + """Returns the shape of the arrays that would result from broadcasting the + supplied arrays against each other. + """ + if not args: + return () + # use the old-iterator because np.nditer does not handle size 0 arrays + # consistently + b = np.broadcast(*args[:32]) + # unfortunately, it cannot handle 32 or more arguments directly + for pos in range(32, len(args), 31): + # ironically, np.broadcast does not properly handle np.broadcast + # objects (it treats them as scalars) + # use broadcasting to avoid allocating the full array + b = broadcast_to(0, b.shape) + b = np.broadcast(b, *args[pos:(pos + 31)]) + return b.shape + + +def _broadcast_arrays_dispatcher(*args, **kwargs): + return args + + +@array_function_dispatch(_broadcast_arrays_dispatcher, module='numpy') +def broadcast_arrays(*args, **kwargs): + """ + Broadcast any number of arrays against each other. + + Parameters + ---------- + `*args` : array_likes + The arrays to broadcast. + + subok : bool, optional + If True, then sub-classes will be passed-through, otherwise + the returned arrays will be forced to be a base-class array (default). + + Returns + ------- + broadcasted : list of arrays + These arrays are views on the original arrays. They are typically + not contiguous. Furthermore, more than one element of a + broadcasted array may refer to a single memory location. If you + need to write to the arrays, make copies first. + + Examples + -------- + >>> x = np.array([[1,2,3]]) + >>> y = np.array([[4],[5]]) + >>> np.broadcast_arrays(x, y) + [array([[1, 2, 3], + [1, 2, 3]]), array([[4, 4, 4], + [5, 5, 5]])] + + Here is a useful idiom for getting contiguous copies instead of + non-contiguous views. + + >>> [np.array(a) for a in np.broadcast_arrays(x, y)] + [array([[1, 2, 3], + [1, 2, 3]]), array([[4, 4, 4], + [5, 5, 5]])] + + """ + # nditer is not used here to avoid the limit of 32 arrays. + # Otherwise, something like the following one-liner would suffice: + # return np.nditer(args, flags=['multi_index', 'zerosize_ok'], + # order='C').itviews + + subok = kwargs.pop('subok', False) + if kwargs: + raise TypeError('broadcast_arrays() got an unexpected keyword ' + 'argument {!r}'.format(list(kwargs.keys())[0])) + args = [np.array(_m, copy=False, subok=subok) for _m in args] + + shape = _broadcast_shape(*args) + + if all(array.shape == shape for array in args): + # Common case where nothing needs to be broadcasted. + return args + + # TODO: consider making the results of broadcast_arrays readonly to match + # broadcast_to. This will require a deprecation cycle. + return [_broadcast_to(array, shape, subok=subok, readonly=False) + for array in args] diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/stride_tricks.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/stride_tricks.pyc new file mode 100644 index 0000000..e51e638 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/stride_tricks.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/__init__.py b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/__init__.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/__init__.pyc new file mode 100644 index 0000000..624b6e7 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/data/py2-objarr.npy b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/data/py2-objarr.npy new file mode 100644 index 0000000..12936c9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/data/py2-objarr.npy differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/data/py2-objarr.npz b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/data/py2-objarr.npz new file mode 100644 index 0000000..68a3b53 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/data/py2-objarr.npz differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/data/py3-objarr.npy b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/data/py3-objarr.npy new file mode 100644 index 0000000..6776074 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/data/py3-objarr.npy differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/data/py3-objarr.npz b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/data/py3-objarr.npz new file mode 100644 index 0000000..05eac0b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/data/py3-objarr.npz differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/data/python3.npy b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/data/python3.npy new file mode 100644 index 0000000..7c6997d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/data/python3.npy differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/data/win64python2.npy b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/data/win64python2.npy new file mode 100644 index 0000000..d9bc36a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/data/win64python2.npy differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test__datasource.py b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test__datasource.py new file mode 100644 index 0000000..8eac16b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test__datasource.py @@ -0,0 +1,378 @@ +from __future__ import division, absolute_import, print_function + +import os +import sys +import pytest +from tempfile import mkdtemp, mkstemp, NamedTemporaryFile +from shutil import rmtree + +import numpy.lib._datasource as datasource +from numpy.testing import ( + assert_, assert_equal, assert_raises, assert_warns + ) + +if sys.version_info[0] >= 3: + import urllib.request as urllib_request + from urllib.parse import urlparse + from urllib.error import URLError +else: + import urllib2 as urllib_request + from urlparse import urlparse + from urllib2 import URLError + + +def urlopen_stub(url, data=None): + '''Stub to replace urlopen for testing.''' + if url == valid_httpurl(): + tmpfile = NamedTemporaryFile(prefix='urltmp_') + return tmpfile + else: + raise URLError('Name or service not known') + +# setup and teardown +old_urlopen = None + + +def setup_module(): + global old_urlopen + + old_urlopen = urllib_request.urlopen + urllib_request.urlopen = urlopen_stub + + +def teardown_module(): + urllib_request.urlopen = old_urlopen + +# A valid website for more robust testing +http_path = 'http://www.google.com/' +http_file = 'index.html' + +http_fakepath = 'http://fake.abc.web/site/' +http_fakefile = 'fake.txt' + +malicious_files = ['/etc/shadow', '../../shadow', + '..\\system.dat', 'c:\\windows\\system.dat'] + +magic_line = b'three is the magic number' + + +# Utility functions used by many tests +def valid_textfile(filedir): + # Generate and return a valid temporary file. + fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir, text=True) + os.close(fd) + return path + + +def invalid_textfile(filedir): + # Generate and return an invalid filename. + fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir) + os.close(fd) + os.remove(path) + return path + + +def valid_httpurl(): + return http_path+http_file + + +def invalid_httpurl(): + return http_fakepath+http_fakefile + + +def valid_baseurl(): + return http_path + + +def invalid_baseurl(): + return http_fakepath + + +def valid_httpfile(): + return http_file + + +def invalid_httpfile(): + return http_fakefile + + +class TestDataSourceOpen(object): + def setup(self): + self.tmpdir = mkdtemp() + self.ds = datasource.DataSource(self.tmpdir) + + def teardown(self): + rmtree(self.tmpdir) + del self.ds + + def test_ValidHTTP(self): + fh = self.ds.open(valid_httpurl()) + assert_(fh) + fh.close() + + def test_InvalidHTTP(self): + url = invalid_httpurl() + assert_raises(IOError, self.ds.open, url) + try: + self.ds.open(url) + except IOError as e: + # Regression test for bug fixed in r4342. + assert_(e.errno is None) + + def test_InvalidHTTPCacheURLError(self): + assert_raises(URLError, self.ds._cache, invalid_httpurl()) + + def test_ValidFile(self): + local_file = valid_textfile(self.tmpdir) + fh = self.ds.open(local_file) + assert_(fh) + fh.close() + + def test_InvalidFile(self): + invalid_file = invalid_textfile(self.tmpdir) + assert_raises(IOError, self.ds.open, invalid_file) + + def test_ValidGzipFile(self): + try: + import gzip + except ImportError: + # We don't have the gzip capabilities to test. + pytest.skip() + # Test datasource's internal file_opener for Gzip files. + filepath = os.path.join(self.tmpdir, 'foobar.txt.gz') + fp = gzip.open(filepath, 'w') + fp.write(magic_line) + fp.close() + fp = self.ds.open(filepath) + result = fp.readline() + fp.close() + assert_equal(magic_line, result) + + def test_ValidBz2File(self): + try: + import bz2 + except ImportError: + # We don't have the bz2 capabilities to test. + pytest.skip() + # Test datasource's internal file_opener for BZip2 files. + filepath = os.path.join(self.tmpdir, 'foobar.txt.bz2') + fp = bz2.BZ2File(filepath, 'w') + fp.write(magic_line) + fp.close() + fp = self.ds.open(filepath) + result = fp.readline() + fp.close() + assert_equal(magic_line, result) + + @pytest.mark.skipif(sys.version_info[0] >= 3, reason="Python 2 only") + def test_Bz2File_text_mode_warning(self): + try: + import bz2 + except ImportError: + # We don't have the bz2 capabilities to test. + pytest.skip() + # Test datasource's internal file_opener for BZip2 files. + filepath = os.path.join(self.tmpdir, 'foobar.txt.bz2') + fp = bz2.BZ2File(filepath, 'w') + fp.write(magic_line) + fp.close() + with assert_warns(RuntimeWarning): + fp = self.ds.open(filepath, 'rt') + result = fp.readline() + fp.close() + assert_equal(magic_line, result) + + +class TestDataSourceExists(object): + def setup(self): + self.tmpdir = mkdtemp() + self.ds = datasource.DataSource(self.tmpdir) + + def teardown(self): + rmtree(self.tmpdir) + del self.ds + + def test_ValidHTTP(self): + assert_(self.ds.exists(valid_httpurl())) + + def test_InvalidHTTP(self): + assert_equal(self.ds.exists(invalid_httpurl()), False) + + def test_ValidFile(self): + # Test valid file in destpath + tmpfile = valid_textfile(self.tmpdir) + assert_(self.ds.exists(tmpfile)) + # Test valid local file not in destpath + localdir = mkdtemp() + tmpfile = valid_textfile(localdir) + assert_(self.ds.exists(tmpfile)) + rmtree(localdir) + + def test_InvalidFile(self): + tmpfile = invalid_textfile(self.tmpdir) + assert_equal(self.ds.exists(tmpfile), False) + + +class TestDataSourceAbspath(object): + def setup(self): + self.tmpdir = os.path.abspath(mkdtemp()) + self.ds = datasource.DataSource(self.tmpdir) + + def teardown(self): + rmtree(self.tmpdir) + del self.ds + + def test_ValidHTTP(self): + scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl()) + local_path = os.path.join(self.tmpdir, netloc, + upath.strip(os.sep).strip('/')) + assert_equal(local_path, self.ds.abspath(valid_httpurl())) + + def test_ValidFile(self): + tmpfile = valid_textfile(self.tmpdir) + tmpfilename = os.path.split(tmpfile)[-1] + # Test with filename only + assert_equal(tmpfile, self.ds.abspath(tmpfilename)) + # Test filename with complete path + assert_equal(tmpfile, self.ds.abspath(tmpfile)) + + def test_InvalidHTTP(self): + scheme, netloc, upath, pms, qry, frg = urlparse(invalid_httpurl()) + invalidhttp = os.path.join(self.tmpdir, netloc, + upath.strip(os.sep).strip('/')) + assert_(invalidhttp != self.ds.abspath(valid_httpurl())) + + def test_InvalidFile(self): + invalidfile = valid_textfile(self.tmpdir) + tmpfile = valid_textfile(self.tmpdir) + tmpfilename = os.path.split(tmpfile)[-1] + # Test with filename only + assert_(invalidfile != self.ds.abspath(tmpfilename)) + # Test filename with complete path + assert_(invalidfile != self.ds.abspath(tmpfile)) + + def test_sandboxing(self): + tmpfile = valid_textfile(self.tmpdir) + tmpfilename = os.path.split(tmpfile)[-1] + + tmp_path = lambda x: os.path.abspath(self.ds.abspath(x)) + + assert_(tmp_path(valid_httpurl()).startswith(self.tmpdir)) + assert_(tmp_path(invalid_httpurl()).startswith(self.tmpdir)) + assert_(tmp_path(tmpfile).startswith(self.tmpdir)) + assert_(tmp_path(tmpfilename).startswith(self.tmpdir)) + for fn in malicious_files: + assert_(tmp_path(http_path+fn).startswith(self.tmpdir)) + assert_(tmp_path(fn).startswith(self.tmpdir)) + + def test_windows_os_sep(self): + orig_os_sep = os.sep + try: + os.sep = '\\' + self.test_ValidHTTP() + self.test_ValidFile() + self.test_InvalidHTTP() + self.test_InvalidFile() + self.test_sandboxing() + finally: + os.sep = orig_os_sep + + +class TestRepositoryAbspath(object): + def setup(self): + self.tmpdir = os.path.abspath(mkdtemp()) + self.repos = datasource.Repository(valid_baseurl(), self.tmpdir) + + def teardown(self): + rmtree(self.tmpdir) + del self.repos + + def test_ValidHTTP(self): + scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl()) + local_path = os.path.join(self.repos._destpath, netloc, + upath.strip(os.sep).strip('/')) + filepath = self.repos.abspath(valid_httpfile()) + assert_equal(local_path, filepath) + + def test_sandboxing(self): + tmp_path = lambda x: os.path.abspath(self.repos.abspath(x)) + assert_(tmp_path(valid_httpfile()).startswith(self.tmpdir)) + for fn in malicious_files: + assert_(tmp_path(http_path+fn).startswith(self.tmpdir)) + assert_(tmp_path(fn).startswith(self.tmpdir)) + + def test_windows_os_sep(self): + orig_os_sep = os.sep + try: + os.sep = '\\' + self.test_ValidHTTP() + self.test_sandboxing() + finally: + os.sep = orig_os_sep + + +class TestRepositoryExists(object): + def setup(self): + self.tmpdir = mkdtemp() + self.repos = datasource.Repository(valid_baseurl(), self.tmpdir) + + def teardown(self): + rmtree(self.tmpdir) + del self.repos + + def test_ValidFile(self): + # Create local temp file + tmpfile = valid_textfile(self.tmpdir) + assert_(self.repos.exists(tmpfile)) + + def test_InvalidFile(self): + tmpfile = invalid_textfile(self.tmpdir) + assert_equal(self.repos.exists(tmpfile), False) + + def test_RemoveHTTPFile(self): + assert_(self.repos.exists(valid_httpurl())) + + def test_CachedHTTPFile(self): + localfile = valid_httpurl() + # Create a locally cached temp file with an URL based + # directory structure. This is similar to what Repository.open + # would do. + scheme, netloc, upath, pms, qry, frg = urlparse(localfile) + local_path = os.path.join(self.repos._destpath, netloc) + os.mkdir(local_path, 0o0700) + tmpfile = valid_textfile(local_path) + assert_(self.repos.exists(tmpfile)) + + +class TestOpenFunc(object): + def setup(self): + self.tmpdir = mkdtemp() + + def teardown(self): + rmtree(self.tmpdir) + + def test_DataSourceOpen(self): + local_file = valid_textfile(self.tmpdir) + # Test case where destpath is passed in + fp = datasource.open(local_file, destpath=self.tmpdir) + assert_(fp) + fp.close() + # Test case where default destpath is used + fp = datasource.open(local_file) + assert_(fp) + fp.close() + +def test_del_attr_handling(): + # DataSource __del__ can be called + # even if __init__ fails when the + # Exception object is caught by the + # caller as happens in refguide_check + # is_deprecated() function + + ds = datasource.DataSource() + # simulate failed __init__ by removing key attribute + # produced within __init__ and expected by __del__ + del ds._istmpdest + # should not raise an AttributeError if __del__ + # gracefully handles failed __init__: + ds.__del__() diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test__datasource.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test__datasource.pyc new file mode 100644 index 0000000..0bb5990 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test__datasource.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test__iotools.py b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test__iotools.py new file mode 100644 index 0000000..e04fdc8 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test__iotools.py @@ -0,0 +1,352 @@ +from __future__ import division, absolute_import, print_function + +import time +from datetime import date + +import numpy as np +from numpy.testing import ( + assert_, assert_equal, assert_allclose, assert_raises, + ) +from numpy.lib._iotools import ( + LineSplitter, NameValidator, StringConverter, + has_nested_fields, easy_dtype, flatten_dtype + ) +from numpy.compat import unicode + + +class TestLineSplitter(object): + "Tests the LineSplitter class." + + def test_no_delimiter(self): + "Test LineSplitter w/o delimiter" + strg = " 1 2 3 4 5 # test" + test = LineSplitter()(strg) + assert_equal(test, ['1', '2', '3', '4', '5']) + test = LineSplitter('')(strg) + assert_equal(test, ['1', '2', '3', '4', '5']) + + def test_space_delimiter(self): + "Test space delimiter" + strg = " 1 2 3 4 5 # test" + test = LineSplitter(' ')(strg) + assert_equal(test, ['1', '2', '3', '4', '', '5']) + test = LineSplitter(' ')(strg) + assert_equal(test, ['1 2 3 4', '5']) + + def test_tab_delimiter(self): + "Test tab delimiter" + strg = " 1\t 2\t 3\t 4\t 5 6" + test = LineSplitter('\t')(strg) + assert_equal(test, ['1', '2', '3', '4', '5 6']) + strg = " 1 2\t 3 4\t 5 6" + test = LineSplitter('\t')(strg) + assert_equal(test, ['1 2', '3 4', '5 6']) + + def test_other_delimiter(self): + "Test LineSplitter on delimiter" + strg = "1,2,3,4,,5" + test = LineSplitter(',')(strg) + assert_equal(test, ['1', '2', '3', '4', '', '5']) + # + strg = " 1,2,3,4,,5 # test" + test = LineSplitter(',')(strg) + assert_equal(test, ['1', '2', '3', '4', '', '5']) + + # gh-11028 bytes comment/delimiters should get encoded + strg = b" 1,2,3,4,,5 % test" + test = LineSplitter(delimiter=b',', comments=b'%')(strg) + assert_equal(test, ['1', '2', '3', '4', '', '5']) + + def test_constant_fixed_width(self): + "Test LineSplitter w/ fixed-width fields" + strg = " 1 2 3 4 5 # test" + test = LineSplitter(3)(strg) + assert_equal(test, ['1', '2', '3', '4', '', '5', '']) + # + strg = " 1 3 4 5 6# test" + test = LineSplitter(20)(strg) + assert_equal(test, ['1 3 4 5 6']) + # + strg = " 1 3 4 5 6# test" + test = LineSplitter(30)(strg) + assert_equal(test, ['1 3 4 5 6']) + + def test_variable_fixed_width(self): + strg = " 1 3 4 5 6# test" + test = LineSplitter((3, 6, 6, 3))(strg) + assert_equal(test, ['1', '3', '4 5', '6']) + # + strg = " 1 3 4 5 6# test" + test = LineSplitter((6, 6, 9))(strg) + assert_equal(test, ['1', '3 4', '5 6']) + +# ----------------------------------------------------------------------------- + + +class TestNameValidator(object): + + def test_case_sensitivity(self): + "Test case sensitivity" + names = ['A', 'a', 'b', 'c'] + test = NameValidator().validate(names) + assert_equal(test, ['A', 'a', 'b', 'c']) + test = NameValidator(case_sensitive=False).validate(names) + assert_equal(test, ['A', 'A_1', 'B', 'C']) + test = NameValidator(case_sensitive='upper').validate(names) + assert_equal(test, ['A', 'A_1', 'B', 'C']) + test = NameValidator(case_sensitive='lower').validate(names) + assert_equal(test, ['a', 'a_1', 'b', 'c']) + + # check exceptions + assert_raises(ValueError, NameValidator, case_sensitive='foobar') + + def test_excludelist(self): + "Test excludelist" + names = ['dates', 'data', 'Other Data', 'mask'] + validator = NameValidator(excludelist=['dates', 'data', 'mask']) + test = validator.validate(names) + assert_equal(test, ['dates_', 'data_', 'Other_Data', 'mask_']) + + def test_missing_names(self): + "Test validate missing names" + namelist = ('a', 'b', 'c') + validator = NameValidator() + assert_equal(validator(namelist), ['a', 'b', 'c']) + namelist = ('', 'b', 'c') + assert_equal(validator(namelist), ['f0', 'b', 'c']) + namelist = ('a', 'b', '') + assert_equal(validator(namelist), ['a', 'b', 'f0']) + namelist = ('', 'f0', '') + assert_equal(validator(namelist), ['f1', 'f0', 'f2']) + + def test_validate_nb_names(self): + "Test validate nb names" + namelist = ('a', 'b', 'c') + validator = NameValidator() + assert_equal(validator(namelist, nbfields=1), ('a',)) + assert_equal(validator(namelist, nbfields=5, defaultfmt="g%i"), + ['a', 'b', 'c', 'g0', 'g1']) + + def test_validate_wo_names(self): + "Test validate no names" + namelist = None + validator = NameValidator() + assert_(validator(namelist) is None) + assert_equal(validator(namelist, nbfields=3), ['f0', 'f1', 'f2']) + +# ----------------------------------------------------------------------------- + + +def _bytes_to_date(s): + return date(*time.strptime(s, "%Y-%m-%d")[:3]) + + +class TestStringConverter(object): + "Test StringConverter" + + def test_creation(self): + "Test creation of a StringConverter" + converter = StringConverter(int, -99999) + assert_equal(converter._status, 1) + assert_equal(converter.default, -99999) + + def test_upgrade(self): + "Tests the upgrade method." + + converter = StringConverter() + assert_equal(converter._status, 0) + + # test int + assert_equal(converter.upgrade('0'), 0) + assert_equal(converter._status, 1) + + # On systems where long defaults to 32-bit, the statuses will be + # offset by one, so we check for this here. + import numpy.core.numeric as nx + status_offset = int(nx.dtype(nx.int_).itemsize < nx.dtype(nx.int64).itemsize) + + # test int > 2**32 + assert_equal(converter.upgrade('17179869184'), 17179869184) + assert_equal(converter._status, 1 + status_offset) + + # test float + assert_allclose(converter.upgrade('0.'), 0.0) + assert_equal(converter._status, 2 + status_offset) + + # test complex + assert_equal(converter.upgrade('0j'), complex('0j')) + assert_equal(converter._status, 3 + status_offset) + + # test str + # note that the longdouble type has been skipped, so the + # _status increases by 2. Everything should succeed with + # unicode conversion (5). + for s in ['a', u'a', b'a']: + res = converter.upgrade(s) + assert_(type(res) is unicode) + assert_equal(res, u'a') + assert_equal(converter._status, 5 + status_offset) + + def test_missing(self): + "Tests the use of missing values." + converter = StringConverter(missing_values=('missing', + 'missed')) + converter.upgrade('0') + assert_equal(converter('0'), 0) + assert_equal(converter(''), converter.default) + assert_equal(converter('missing'), converter.default) + assert_equal(converter('missed'), converter.default) + try: + converter('miss') + except ValueError: + pass + + def test_upgrademapper(self): + "Tests updatemapper" + dateparser = _bytes_to_date + StringConverter.upgrade_mapper(dateparser, date(2000, 1, 1)) + convert = StringConverter(dateparser, date(2000, 1, 1)) + test = convert('2001-01-01') + assert_equal(test, date(2001, 1, 1)) + test = convert('2009-01-01') + assert_equal(test, date(2009, 1, 1)) + test = convert('') + assert_equal(test, date(2000, 1, 1)) + + def test_string_to_object(self): + "Make sure that string-to-object functions are properly recognized" + old_mapper = StringConverter._mapper[:] # copy of list + conv = StringConverter(_bytes_to_date) + assert_equal(conv._mapper, old_mapper) + assert_(hasattr(conv, 'default')) + + def test_keep_default(self): + "Make sure we don't lose an explicit default" + converter = StringConverter(None, missing_values='', + default=-999) + converter.upgrade('3.14159265') + assert_equal(converter.default, -999) + assert_equal(converter.type, np.dtype(float)) + # + converter = StringConverter( + None, missing_values='', default=0) + converter.upgrade('3.14159265') + assert_equal(converter.default, 0) + assert_equal(converter.type, np.dtype(float)) + + def test_keep_default_zero(self): + "Check that we don't lose a default of 0" + converter = StringConverter(int, default=0, + missing_values="N/A") + assert_equal(converter.default, 0) + + def test_keep_missing_values(self): + "Check that we're not losing missing values" + converter = StringConverter(int, default=0, + missing_values="N/A") + assert_equal( + converter.missing_values, {'', 'N/A'}) + + def test_int64_dtype(self): + "Check that int64 integer types can be specified" + converter = StringConverter(np.int64, default=0) + val = "-9223372036854775807" + assert_(converter(val) == -9223372036854775807) + val = "9223372036854775807" + assert_(converter(val) == 9223372036854775807) + + def test_uint64_dtype(self): + "Check that uint64 integer types can be specified" + converter = StringConverter(np.uint64, default=0) + val = "9223372043271415339" + assert_(converter(val) == 9223372043271415339) + + +class TestMiscFunctions(object): + + def test_has_nested_dtype(self): + "Test has_nested_dtype" + ndtype = np.dtype(float) + assert_equal(has_nested_fields(ndtype), False) + ndtype = np.dtype([('A', '|S3'), ('B', float)]) + assert_equal(has_nested_fields(ndtype), False) + ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])]) + assert_equal(has_nested_fields(ndtype), True) + + def test_easy_dtype(self): + "Test ndtype on dtypes" + # Simple case + ndtype = float + assert_equal(easy_dtype(ndtype), np.dtype(float)) + # As string w/o names + ndtype = "i4, f8" + assert_equal(easy_dtype(ndtype), + np.dtype([('f0', "i4"), ('f1', "f8")])) + # As string w/o names but different default format + assert_equal(easy_dtype(ndtype, defaultfmt="field_%03i"), + np.dtype([('field_000', "i4"), ('field_001', "f8")])) + # As string w/ names + ndtype = "i4, f8" + assert_equal(easy_dtype(ndtype, names="a, b"), + np.dtype([('a', "i4"), ('b', "f8")])) + # As string w/ names (too many) + ndtype = "i4, f8" + assert_equal(easy_dtype(ndtype, names="a, b, c"), + np.dtype([('a', "i4"), ('b', "f8")])) + # As string w/ names (not enough) + ndtype = "i4, f8" + assert_equal(easy_dtype(ndtype, names=", b"), + np.dtype([('f0', "i4"), ('b', "f8")])) + # ... (with different default format) + assert_equal(easy_dtype(ndtype, names="a", defaultfmt="f%02i"), + np.dtype([('a', "i4"), ('f00', "f8")])) + # As list of tuples w/o names + ndtype = [('A', int), ('B', float)] + assert_equal(easy_dtype(ndtype), np.dtype([('A', int), ('B', float)])) + # As list of tuples w/ names + assert_equal(easy_dtype(ndtype, names="a,b"), + np.dtype([('a', int), ('b', float)])) + # As list of tuples w/ not enough names + assert_equal(easy_dtype(ndtype, names="a"), + np.dtype([('a', int), ('f0', float)])) + # As list of tuples w/ too many names + assert_equal(easy_dtype(ndtype, names="a,b,c"), + np.dtype([('a', int), ('b', float)])) + # As list of types w/o names + ndtype = (int, float, float) + assert_equal(easy_dtype(ndtype), + np.dtype([('f0', int), ('f1', float), ('f2', float)])) + # As list of types w names + ndtype = (int, float, float) + assert_equal(easy_dtype(ndtype, names="a, b, c"), + np.dtype([('a', int), ('b', float), ('c', float)])) + # As simple dtype w/ names + ndtype = np.dtype(float) + assert_equal(easy_dtype(ndtype, names="a, b, c"), + np.dtype([(_, float) for _ in ('a', 'b', 'c')])) + # As simple dtype w/o names (but multiple fields) + ndtype = np.dtype(float) + assert_equal( + easy_dtype(ndtype, names=['', '', ''], defaultfmt="f%02i"), + np.dtype([(_, float) for _ in ('f00', 'f01', 'f02')])) + + def test_flatten_dtype(self): + "Testing flatten_dtype" + # Standard dtype + dt = np.dtype([("a", "f8"), ("b", "f8")]) + dt_flat = flatten_dtype(dt) + assert_equal(dt_flat, [float, float]) + # Recursive dtype + dt = np.dtype([("a", [("aa", '|S1'), ("ab", '|S2')]), ("b", int)]) + dt_flat = flatten_dtype(dt) + assert_equal(dt_flat, [np.dtype('|S1'), np.dtype('|S2'), int]) + # dtype with shaped fields + dt = np.dtype([("a", (float, 2)), ("b", (int, 3))]) + dt_flat = flatten_dtype(dt) + assert_equal(dt_flat, [float, int]) + dt_flat = flatten_dtype(dt, True) + assert_equal(dt_flat, [float] * 2 + [int] * 3) + # dtype w/ titles + dt = np.dtype([(("a", "A"), "f8"), (("b", "B"), "f8")]) + dt_flat = flatten_dtype(dt) + assert_equal(dt_flat, [float, float]) diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test__iotools.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test__iotools.pyc new file mode 100644 index 0000000..944fcd2 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test__iotools.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test__version.py b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test__version.py new file mode 100644 index 0000000..8e66a0c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test__version.py @@ -0,0 +1,66 @@ +"""Tests for the NumpyVersion class. + +""" +from __future__ import division, absolute_import, print_function + +from numpy.testing import assert_, assert_raises +from numpy.lib import NumpyVersion + + +def test_main_versions(): + assert_(NumpyVersion('1.8.0') == '1.8.0') + for ver in ['1.9.0', '2.0.0', '1.8.1']: + assert_(NumpyVersion('1.8.0') < ver) + + for ver in ['1.7.0', '1.7.1', '0.9.9']: + assert_(NumpyVersion('1.8.0') > ver) + + +def test_version_1_point_10(): + # regression test for gh-2998. + assert_(NumpyVersion('1.9.0') < '1.10.0') + assert_(NumpyVersion('1.11.0') < '1.11.1') + assert_(NumpyVersion('1.11.0') == '1.11.0') + assert_(NumpyVersion('1.99.11') < '1.99.12') + + +def test_alpha_beta_rc(): + assert_(NumpyVersion('1.8.0rc1') == '1.8.0rc1') + for ver in ['1.8.0', '1.8.0rc2']: + assert_(NumpyVersion('1.8.0rc1') < ver) + + for ver in ['1.8.0a2', '1.8.0b3', '1.7.2rc4']: + assert_(NumpyVersion('1.8.0rc1') > ver) + + assert_(NumpyVersion('1.8.0b1') > '1.8.0a2') + + +def test_dev_version(): + assert_(NumpyVersion('1.9.0.dev-Unknown') < '1.9.0') + for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev-ffffffff']: + assert_(NumpyVersion('1.9.0.dev-f16acvda') < ver) + + assert_(NumpyVersion('1.9.0.dev-f16acvda') == '1.9.0.dev-11111111') + + +def test_dev_a_b_rc_mixed(): + assert_(NumpyVersion('1.9.0a2.dev-f16acvda') == '1.9.0a2.dev-11111111') + assert_(NumpyVersion('1.9.0a2.dev-6acvda54') < '1.9.0a2') + + +def test_dev0_version(): + assert_(NumpyVersion('1.9.0.dev0+Unknown') < '1.9.0') + for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev0+ffffffff']: + assert_(NumpyVersion('1.9.0.dev0+f16acvda') < ver) + + assert_(NumpyVersion('1.9.0.dev0+f16acvda') == '1.9.0.dev0+11111111') + + +def test_dev0_a_b_rc_mixed(): + assert_(NumpyVersion('1.9.0a2.dev0+f16acvda') == '1.9.0a2.dev0+11111111') + assert_(NumpyVersion('1.9.0a2.dev0+6acvda54') < '1.9.0a2') + + +def test_raises(): + for ver in ['1.9', '1,9.0', '1.7.x']: + assert_raises(ValueError, NumpyVersion, ver) diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test__version.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test__version.pyc new file mode 100644 index 0000000..33a8f7e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test__version.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_arraypad.py b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_arraypad.py new file mode 100644 index 0000000..20f6e4a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_arraypad.py @@ -0,0 +1,1262 @@ +"""Tests for the array padding functions. + +""" +from __future__ import division, absolute_import, print_function + +import pytest + +import numpy as np +from numpy.testing import (assert_array_equal, assert_raises, assert_allclose, + assert_equal) +from numpy.lib import pad +from numpy.lib.arraypad import _as_pairs + + +class TestAsPairs(object): + + def test_single_value(self): + """Test casting for a single value.""" + expected = np.array([[3, 3]] * 10) + for x in (3, [3], [[3]]): + result = _as_pairs(x, 10) + assert_equal(result, expected) + # Test with dtype=object + obj = object() + assert_equal( + _as_pairs(obj, 10), + np.array([[obj, obj]] * 10) + ) + + def test_two_values(self): + """Test proper casting for two different values.""" + # Broadcasting in the first dimension with numbers + expected = np.array([[3, 4]] * 10) + for x in ([3, 4], [[3, 4]]): + result = _as_pairs(x, 10) + assert_equal(result, expected) + # and with dtype=object + obj = object() + assert_equal( + _as_pairs(["a", obj], 10), + np.array([["a", obj]] * 10) + ) + + # Broadcasting in the second / last dimension with numbers + assert_equal( + _as_pairs([[3], [4]], 2), + np.array([[3, 3], [4, 4]]) + ) + # and with dtype=object + assert_equal( + _as_pairs([["a"], [obj]], 2), + np.array([["a", "a"], [obj, obj]]) + ) + + def test_with_none(self): + expected = ((None, None), (None, None), (None, None)) + assert_equal( + _as_pairs(None, 3, as_index=False), + expected + ) + assert_equal( + _as_pairs(None, 3, as_index=True), + expected + ) + + def test_pass_through(self): + """Test if `x` already matching desired output are passed through.""" + expected = np.arange(12).reshape((6, 2)) + assert_equal( + _as_pairs(expected, 6), + expected + ) + + def test_as_index(self): + """Test results if `as_index=True`.""" + assert_equal( + _as_pairs([2.6, 3.3], 10, as_index=True), + np.array([[3, 3]] * 10, dtype=np.intp) + ) + assert_equal( + _as_pairs([2.6, 4.49], 10, as_index=True), + np.array([[3, 4]] * 10, dtype=np.intp) + ) + for x in (-3, [-3], [[-3]], [-3, 4], [3, -4], [[-3, 4]], [[4, -3]], + [[1, 2]] * 9 + [[1, -2]]): + with pytest.raises(ValueError, match="negative values"): + _as_pairs(x, 10, as_index=True) + + def test_exceptions(self): + """Ensure faulty usage is discovered.""" + with pytest.raises(ValueError, match="more dimensions than allowed"): + _as_pairs([[[3]]], 10) + with pytest.raises(ValueError, match="could not be broadcast"): + _as_pairs([[1, 2], [3, 4]], 3) + with pytest.raises(ValueError, match="could not be broadcast"): + _as_pairs(np.ones((2, 3)), 3) + + +class TestConditionalShortcuts(object): + def test_zero_padding_shortcuts(self): + test = np.arange(120).reshape(4, 5, 6) + pad_amt = [(0, 0) for axis in test.shape] + modes = ['constant', + 'edge', + 'linear_ramp', + 'maximum', + 'mean', + 'median', + 'minimum', + 'reflect', + 'symmetric', + 'wrap', + ] + for mode in modes: + assert_array_equal(test, pad(test, pad_amt, mode=mode)) + + def test_shallow_statistic_range(self): + test = np.arange(120).reshape(4, 5, 6) + pad_amt = [(1, 1) for axis in test.shape] + modes = ['maximum', + 'mean', + 'median', + 'minimum', + ] + for mode in modes: + assert_array_equal(pad(test, pad_amt, mode='edge'), + pad(test, pad_amt, mode=mode, stat_length=1)) + + def test_clip_statistic_range(self): + test = np.arange(30).reshape(5, 6) + pad_amt = [(3, 3) for axis in test.shape] + modes = ['maximum', + 'mean', + 'median', + 'minimum', + ] + for mode in modes: + assert_array_equal(pad(test, pad_amt, mode=mode), + pad(test, pad_amt, mode=mode, stat_length=30)) + + +class TestStatistic(object): + def test_check_mean_stat_length(self): + a = np.arange(100).astype('f') + a = pad(a, ((25, 20), ), 'mean', stat_length=((2, 3), )) + b = np.array( + [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, + 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, + 0.5, 0.5, 0.5, 0.5, 0.5, + + 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., + 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., + 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., + 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., + 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., + 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., + 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., + 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., + + 98., 98., 98., 98., 98., 98., 98., 98., 98., 98., + 98., 98., 98., 98., 98., 98., 98., 98., 98., 98. + ]) + assert_array_equal(a, b) + + def test_check_maximum_1(self): + a = np.arange(100) + a = pad(a, (25, 20), 'maximum') + b = np.array( + [99, 99, 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, 99, 99] + ) + assert_array_equal(a, b) + + def test_check_maximum_2(self): + a = np.arange(100) + 1 + a = pad(a, (25, 20), 'maximum') + b = np.array( + [100, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, 100, 100, 100, 100, + + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, + 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, + + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100] + ) + assert_array_equal(a, b) + + def test_check_maximum_stat_length(self): + a = np.arange(100) + 1 + a = pad(a, (25, 20), 'maximum', stat_length=10) + b = np.array( + [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, + + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, + 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, + + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100] + ) + assert_array_equal(a, b) + + def test_check_minimum_1(self): + a = np.arange(100) + a = pad(a, (25, 20), 'minimum') + b = np.array( + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + ) + assert_array_equal(a, b) + + def test_check_minimum_2(self): + a = np.arange(100) + 2 + a = pad(a, (25, 20), 'minimum') + b = np.array( + [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, + + 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, + 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, + 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, + 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, + 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, + + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2] + ) + assert_array_equal(a, b) + + def test_check_minimum_stat_length(self): + a = np.arange(100) + 1 + a = pad(a, (25, 20), 'minimum', stat_length=10) + b = np.array( + [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, + + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, + 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, + + 91, 91, 91, 91, 91, 91, 91, 91, 91, 91, + 91, 91, 91, 91, 91, 91, 91, 91, 91, 91] + ) + assert_array_equal(a, b) + + def test_check_median(self): + a = np.arange(100).astype('f') + a = pad(a, (25, 20), 'median') + b = np.array( + [49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, + + 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., + 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., + 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., + 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., + 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., + 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., + 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., + 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., + + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5] + ) + assert_array_equal(a, b) + + def test_check_median_01(self): + a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]]) + a = pad(a, 1, 'median') + b = np.array( + [[4, 4, 5, 4, 4], + + [3, 3, 1, 4, 3], + [5, 4, 5, 9, 5], + [8, 9, 8, 2, 8], + + [4, 4, 5, 4, 4]] + ) + assert_array_equal(a, b) + + def test_check_median_02(self): + a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]]) + a = pad(a.T, 1, 'median').T + b = np.array( + [[5, 4, 5, 4, 5], + + [3, 3, 1, 4, 3], + [5, 4, 5, 9, 5], + [8, 9, 8, 2, 8], + + [5, 4, 5, 4, 5]] + ) + assert_array_equal(a, b) + + def test_check_median_stat_length(self): + a = np.arange(100).astype('f') + a[1] = 2. + a[97] = 96. + a = pad(a, (25, 20), 'median', stat_length=(3, 5)) + b = np.array( + [ 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., + 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., + 2., 2., 2., 2., 2., + + 0., 2., 2., 3., 4., 5., 6., 7., 8., 9., + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., + 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., + 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., + 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., + 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., + 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., + 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., + 90., 91., 92., 93., 94., 95., 96., 96., 98., 99., + + 96., 96., 96., 96., 96., 96., 96., 96., 96., 96., + 96., 96., 96., 96., 96., 96., 96., 96., 96., 96.] + ) + assert_array_equal(a, b) + + def test_check_mean_shape_one(self): + a = [[4, 5, 6]] + a = pad(a, (5, 7), 'mean', stat_length=2) + b = np.array( + [[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6]] + ) + assert_array_equal(a, b) + + def test_check_mean_2(self): + a = np.arange(100).astype('f') + a = pad(a, (25, 20), 'mean') + b = np.array( + [49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, + + 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., + 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., + 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., + 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., + 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., + 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., + 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., + 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., + + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5] + ) + assert_array_equal(a, b) + + @pytest.mark.parametrize("mode", [ + pytest.param("mean", marks=pytest.mark.xfail(reason="gh-11216")), + "median", + "minimum", + "maximum" + ]) + def test_same_prepend_append(self, mode): + """ Test that appended and prepended values are equal """ + # This test is constructed to trigger floating point rounding errors in + # a way that caused gh-11216 for mode=='mean' + a = np.array([-1, 2, -1]) + np.array([0, 1e-12, 0], dtype=np.float64) + a = np.pad(a, (1, 1), mode) + assert_equal(a[0], a[-1]) + + +class TestConstant(object): + def test_check_constant(self): + a = np.arange(100) + a = pad(a, (25, 20), 'constant', constant_values=(10, 20)) + b = np.array( + [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, + 20, 20, 20, 20, 20, 20, 20, 20, 20, 20] + ) + assert_array_equal(a, b) + + def test_check_constant_zeros(self): + a = np.arange(100) + a = pad(a, (25, 20), 'constant') + b = np.array( + [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + ) + assert_array_equal(a, b) + + def test_check_constant_float(self): + # If input array is int, but constant_values are float, the dtype of + # the array to be padded is kept + arr = np.arange(30).reshape(5, 6) + test = pad(arr, (1, 2), mode='constant', + constant_values=1.1) + expected = np.array( + [[ 1, 1, 1, 1, 1, 1, 1, 1, 1], + + [ 1, 0, 1, 2, 3, 4, 5, 1, 1], + [ 1, 6, 7, 8, 9, 10, 11, 1, 1], + [ 1, 12, 13, 14, 15, 16, 17, 1, 1], + [ 1, 18, 19, 20, 21, 22, 23, 1, 1], + [ 1, 24, 25, 26, 27, 28, 29, 1, 1], + + [ 1, 1, 1, 1, 1, 1, 1, 1, 1], + [ 1, 1, 1, 1, 1, 1, 1, 1, 1]] + ) + assert_allclose(test, expected) + + def test_check_constant_float2(self): + # If input array is float, and constant_values are float, the dtype of + # the array to be padded is kept - here retaining the float constants + arr = np.arange(30).reshape(5, 6) + arr_float = arr.astype(np.float64) + test = pad(arr_float, ((1, 2), (1, 2)), mode='constant', + constant_values=1.1) + expected = np.array( + [[ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1], + + [ 1.1, 0. , 1. , 2. , 3. , 4. , 5. , 1.1, 1.1], + [ 1.1, 6. , 7. , 8. , 9. , 10. , 11. , 1.1, 1.1], + [ 1.1, 12. , 13. , 14. , 15. , 16. , 17. , 1.1, 1.1], + [ 1.1, 18. , 19. , 20. , 21. , 22. , 23. , 1.1, 1.1], + [ 1.1, 24. , 25. , 26. , 27. , 28. , 29. , 1.1, 1.1], + + [ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1], + [ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1]] + ) + assert_allclose(test, expected) + + def test_check_constant_float3(self): + a = np.arange(100, dtype=float) + a = pad(a, (25, 20), 'constant', constant_values=(-1.1, -1.2)) + b = np.array( + [-1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, + -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, + -1.1, -1.1, -1.1, -1.1, -1.1, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, + -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2] + ) + assert_allclose(a, b) + + def test_check_constant_odd_pad_amount(self): + arr = np.arange(30).reshape(5, 6) + test = pad(arr, ((1,), (2,)), mode='constant', + constant_values=3) + expected = np.array( + [[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3], + + [ 3, 3, 0, 1, 2, 3, 4, 5, 3, 3], + [ 3, 3, 6, 7, 8, 9, 10, 11, 3, 3], + [ 3, 3, 12, 13, 14, 15, 16, 17, 3, 3], + [ 3, 3, 18, 19, 20, 21, 22, 23, 3, 3], + [ 3, 3, 24, 25, 26, 27, 28, 29, 3, 3], + + [ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]] + ) + assert_allclose(test, expected) + + def test_check_constant_pad_2d(self): + arr = np.arange(4).reshape(2, 2) + test = np.lib.pad(arr, ((1, 2), (1, 3)), mode='constant', + constant_values=((1, 2), (3, 4))) + expected = np.array( + [[3, 1, 1, 4, 4, 4], + [3, 0, 1, 4, 4, 4], + [3, 2, 3, 4, 4, 4], + [3, 2, 2, 4, 4, 4], + [3, 2, 2, 4, 4, 4]] + ) + assert_allclose(test, expected) + + def test_check_large_integers(self): + uint64_max = 2 ** 64 - 1 + arr = np.full(5, uint64_max, dtype=np.uint64) + test = np.pad(arr, 1, mode="constant", constant_values=arr.min()) + expected = np.full(7, uint64_max, dtype=np.uint64) + assert_array_equal(test, expected) + + int64_max = 2 ** 63 - 1 + arr = np.full(5, int64_max, dtype=np.int64) + test = np.pad(arr, 1, mode="constant", constant_values=arr.min()) + expected = np.full(7, int64_max, dtype=np.int64) + assert_array_equal(test, expected) + + def test_check_object_array(self): + arr = np.empty(1, dtype=object) + obj_a = object() + arr[0] = obj_a + obj_b = object() + obj_c = object() + arr = np.pad(arr, pad_width=1, mode='constant', + constant_values=(obj_b, obj_c)) + + expected = np.empty((3,), dtype=object) + expected[0] = obj_b + expected[1] = obj_a + expected[2] = obj_c + + assert_array_equal(arr, expected) + + +class TestLinearRamp(object): + def test_check_simple(self): + a = np.arange(100).astype('f') + a = pad(a, (25, 20), 'linear_ramp', end_values=(4, 5)) + b = np.array( + [4.00, 3.84, 3.68, 3.52, 3.36, 3.20, 3.04, 2.88, 2.72, 2.56, + 2.40, 2.24, 2.08, 1.92, 1.76, 1.60, 1.44, 1.28, 1.12, 0.96, + 0.80, 0.64, 0.48, 0.32, 0.16, + + 0.00, 1.00, 2.00, 3.00, 4.00, 5.00, 6.00, 7.00, 8.00, 9.00, + 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, + 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, + 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, + 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0, + 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, + 60.0, 61.0, 62.0, 63.0, 64.0, 65.0, 66.0, 67.0, 68.0, 69.0, + 70.0, 71.0, 72.0, 73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0, + 80.0, 81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0, + 90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0, 97.0, 98.0, 99.0, + + 94.3, 89.6, 84.9, 80.2, 75.5, 70.8, 66.1, 61.4, 56.7, 52.0, + 47.3, 42.6, 37.9, 33.2, 28.5, 23.8, 19.1, 14.4, 9.7, 5.] + ) + assert_allclose(a, b, rtol=1e-5, atol=1e-5) + + def test_check_2d(self): + arr = np.arange(20).reshape(4, 5).astype(np.float64) + test = pad(arr, (2, 2), mode='linear_ramp', end_values=(0, 0)) + expected = np.array( + [[0., 0., 0., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 0.5, 1., 1.5, 2., 1., 0.], + [0., 0., 0., 1., 2., 3., 4., 2., 0.], + [0., 2.5, 5., 6., 7., 8., 9., 4.5, 0.], + [0., 5., 10., 11., 12., 13., 14., 7., 0.], + [0., 7.5, 15., 16., 17., 18., 19., 9.5, 0.], + [0., 3.75, 7.5, 8., 8.5, 9., 9.5, 4.75, 0.], + [0., 0., 0., 0., 0., 0., 0., 0., 0.]]) + assert_allclose(test, expected) + + @pytest.mark.xfail(exceptions=(AssertionError,)) + def test_object_array(self): + from fractions import Fraction + arr = np.array([Fraction(1, 2), Fraction(-1, 2)]) + actual = np.pad(arr, (2, 3), mode='linear_ramp', end_values=0) + + # deliberately chosen to have a non-power-of-2 denominator such that + # rounding to floats causes a failure. + expected = np.array([ + Fraction( 0, 12), + Fraction( 3, 12), + Fraction( 6, 12), + Fraction(-6, 12), + Fraction(-4, 12), + Fraction(-2, 12), + Fraction(-0, 12), + ]) + assert_equal(actual, expected) + + +class TestReflect(object): + def test_check_simple(self): + a = np.arange(100) + a = pad(a, (25, 20), 'reflect') + b = np.array( + [25, 24, 23, 22, 21, 20, 19, 18, 17, 16, + 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, + 5, 4, 3, 2, 1, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 98, 97, 96, 95, 94, 93, 92, 91, 90, 89, + 88, 87, 86, 85, 84, 83, 82, 81, 80, 79] + ) + assert_array_equal(a, b) + + def test_check_odd_method(self): + a = np.arange(100) + a = pad(a, (25, 20), 'reflect', reflect_type='odd') + b = np.array( + [-25, -24, -23, -22, -21, -20, -19, -18, -17, -16, + -15, -14, -13, -12, -11, -10, -9, -8, -7, -6, + -5, -4, -3, -2, -1, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, + 110, 111, 112, 113, 114, 115, 116, 117, 118, 119] + ) + assert_array_equal(a, b) + + def test_check_large_pad(self): + a = [[4, 5, 6], [6, 7, 8]] + a = pad(a, (5, 7), 'reflect') + b = np.array( + [[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]] + ) + assert_array_equal(a, b) + + def test_check_shape(self): + a = [[4, 5, 6]] + a = pad(a, (5, 7), 'reflect') + b = np.array( + [[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]] + ) + assert_array_equal(a, b) + + def test_check_01(self): + a = pad([1, 2, 3], 2, 'reflect') + b = np.array([3, 2, 1, 2, 3, 2, 1]) + assert_array_equal(a, b) + + def test_check_02(self): + a = pad([1, 2, 3], 3, 'reflect') + b = np.array([2, 3, 2, 1, 2, 3, 2, 1, 2]) + assert_array_equal(a, b) + + def test_check_03(self): + a = pad([1, 2, 3], 4, 'reflect') + b = np.array([1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3]) + assert_array_equal(a, b) + + def test_check_padding_an_empty_array(self): + a = pad(np.zeros((0, 3)), ((0,), (1,)), mode='reflect') + b = np.zeros((0, 5)) + assert_array_equal(a, b) + + +class TestSymmetric(object): + def test_check_simple(self): + a = np.arange(100) + a = pad(a, (25, 20), 'symmetric') + b = np.array( + [24, 23, 22, 21, 20, 19, 18, 17, 16, 15, + 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, + 4, 3, 2, 1, 0, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 99, 98, 97, 96, 95, 94, 93, 92, 91, 90, + 89, 88, 87, 86, 85, 84, 83, 82, 81, 80] + ) + assert_array_equal(a, b) + + def test_check_odd_method(self): + a = np.arange(100) + a = pad(a, (25, 20), 'symmetric', reflect_type='odd') + b = np.array( + [-24, -23, -22, -21, -20, -19, -18, -17, -16, -15, + -14, -13, -12, -11, -10, -9, -8, -7, -6, -5, + -4, -3, -2, -1, 0, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, + 109, 110, 111, 112, 113, 114, 115, 116, 117, 118] + ) + assert_array_equal(a, b) + + def test_check_large_pad(self): + a = [[4, 5, 6], [6, 7, 8]] + a = pad(a, (5, 7), 'symmetric') + b = np.array( + [[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], + [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], + + [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], + [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]] + ) + + assert_array_equal(a, b) + + def test_check_large_pad_odd(self): + a = [[4, 5, 6], [6, 7, 8]] + a = pad(a, (5, 7), 'symmetric', reflect_type='odd') + b = np.array( + [[-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6], + [-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6], + [-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8], + [-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8], + [ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10], + + [ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10], + [ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12], + + [ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12], + [ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14], + [ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14], + [ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16], + [ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16], + [ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18], + [ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18]] + ) + assert_array_equal(a, b) + + def test_check_shape(self): + a = [[4, 5, 6]] + a = pad(a, (5, 7), 'symmetric') + b = np.array( + [[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]] + ) + assert_array_equal(a, b) + + def test_check_01(self): + a = pad([1, 2, 3], 2, 'symmetric') + b = np.array([2, 1, 1, 2, 3, 3, 2]) + assert_array_equal(a, b) + + def test_check_02(self): + a = pad([1, 2, 3], 3, 'symmetric') + b = np.array([3, 2, 1, 1, 2, 3, 3, 2, 1]) + assert_array_equal(a, b) + + def test_check_03(self): + a = pad([1, 2, 3], 6, 'symmetric') + b = np.array([1, 2, 3, 3, 2, 1, 1, 2, 3, 3, 2, 1, 1, 2, 3]) + assert_array_equal(a, b) + + +class TestWrap(object): + def test_check_simple(self): + a = np.arange(100) + a = pad(a, (25, 20), 'wrap') + b = np.array( + [75, 76, 77, 78, 79, 80, 81, 82, 83, 84, + 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, + 95, 96, 97, 98, 99, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] + ) + assert_array_equal(a, b) + + def test_check_large_pad(self): + a = np.arange(12) + a = np.reshape(a, (3, 4)) + a = pad(a, (10, 12), 'wrap') + b = np.array( + [[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11]] + ) + assert_array_equal(a, b) + + def test_check_01(self): + a = pad([1, 2, 3], 3, 'wrap') + b = np.array([1, 2, 3, 1, 2, 3, 1, 2, 3]) + assert_array_equal(a, b) + + def test_check_02(self): + a = pad([1, 2, 3], 4, 'wrap') + b = np.array([3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1]) + assert_array_equal(a, b) + + def test_pad_with_zero(self): + a = np.ones((3, 5)) + b = np.pad(a, (0, 5), mode="wrap") + assert_array_equal(a, b[:-5, :-5]) + + +class TestStatLen(object): + def test_check_simple(self): + a = np.arange(30) + a = np.reshape(a, (6, 5)) + a = pad(a, ((2, 3), (3, 2)), mode='mean', stat_length=(3,)) + b = np.array( + [[6, 6, 6, 5, 6, 7, 8, 9, 8, 8], + [6, 6, 6, 5, 6, 7, 8, 9, 8, 8], + + [1, 1, 1, 0, 1, 2, 3, 4, 3, 3], + [6, 6, 6, 5, 6, 7, 8, 9, 8, 8], + [11, 11, 11, 10, 11, 12, 13, 14, 13, 13], + [16, 16, 16, 15, 16, 17, 18, 19, 18, 18], + [21, 21, 21, 20, 21, 22, 23, 24, 23, 23], + [26, 26, 26, 25, 26, 27, 28, 29, 28, 28], + + [21, 21, 21, 20, 21, 22, 23, 24, 23, 23], + [21, 21, 21, 20, 21, 22, 23, 24, 23, 23], + [21, 21, 21, 20, 21, 22, 23, 24, 23, 23]] + ) + assert_array_equal(a, b) + + +class TestEdge(object): + def test_check_simple(self): + a = np.arange(12) + a = np.reshape(a, (4, 3)) + a = pad(a, ((2, 3), (3, 2)), 'edge') + b = np.array( + [[0, 0, 0, 0, 1, 2, 2, 2], + [0, 0, 0, 0, 1, 2, 2, 2], + + [0, 0, 0, 0, 1, 2, 2, 2], + [3, 3, 3, 3, 4, 5, 5, 5], + [6, 6, 6, 6, 7, 8, 8, 8], + [9, 9, 9, 9, 10, 11, 11, 11], + + [9, 9, 9, 9, 10, 11, 11, 11], + [9, 9, 9, 9, 10, 11, 11, 11], + [9, 9, 9, 9, 10, 11, 11, 11]] + ) + assert_array_equal(a, b) + + def test_check_width_shape_1_2(self): + # Check a pad_width of the form ((1, 2),). + # Regression test for issue gh-7808. + a = np.array([1, 2, 3]) + padded = pad(a, ((1, 2),), 'edge') + expected = np.array([1, 1, 2, 3, 3, 3]) + assert_array_equal(padded, expected) + + a = np.array([[1, 2, 3], [4, 5, 6]]) + padded = pad(a, ((1, 2),), 'edge') + expected = pad(a, ((1, 2), (1, 2)), 'edge') + assert_array_equal(padded, expected) + + a = np.arange(24).reshape(2, 3, 4) + padded = pad(a, ((1, 2),), 'edge') + expected = pad(a, ((1, 2), (1, 2), (1, 2)), 'edge') + assert_array_equal(padded, expected) + + +class TestZeroPadWidth(object): + def test_zero_pad_width(self): + arr = np.arange(30) + arr = np.reshape(arr, (6, 5)) + for pad_width in (0, (0, 0), ((0, 0), (0, 0))): + assert_array_equal(arr, pad(arr, pad_width, mode='constant')) + + +class TestLegacyVectorFunction(object): + def test_legacy_vector_functionality(self): + def _padwithtens(vector, pad_width, iaxis, kwargs): + vector[:pad_width[0]] = 10 + vector[-pad_width[1]:] = 10 + return vector + + a = np.arange(6).reshape(2, 3) + a = pad(a, 2, _padwithtens) + b = np.array( + [[10, 10, 10, 10, 10, 10, 10], + [10, 10, 10, 10, 10, 10, 10], + + [10, 10, 0, 1, 2, 10, 10], + [10, 10, 3, 4, 5, 10, 10], + + [10, 10, 10, 10, 10, 10, 10], + [10, 10, 10, 10, 10, 10, 10]] + ) + assert_array_equal(a, b) + + +class TestNdarrayPadWidth(object): + def test_check_simple(self): + a = np.arange(12) + a = np.reshape(a, (4, 3)) + a = pad(a, np.array(((2, 3), (3, 2))), 'edge') + b = np.array( + [[0, 0, 0, 0, 1, 2, 2, 2], + [0, 0, 0, 0, 1, 2, 2, 2], + + [0, 0, 0, 0, 1, 2, 2, 2], + [3, 3, 3, 3, 4, 5, 5, 5], + [6, 6, 6, 6, 7, 8, 8, 8], + [9, 9, 9, 9, 10, 11, 11, 11], + + [9, 9, 9, 9, 10, 11, 11, 11], + [9, 9, 9, 9, 10, 11, 11, 11], + [9, 9, 9, 9, 10, 11, 11, 11]] + ) + assert_array_equal(a, b) + + +class TestUnicodeInput(object): + def test_unicode_mode(self): + constant_mode = u'constant' + a = np.pad([1], 2, mode=constant_mode) + b = np.array([0, 0, 1, 0, 0]) + assert_array_equal(a, b) + + +class TestObjectInput(object): + def test_object_input(self): + # Regression test for issue gh-11395. + a = np.full((4, 3), None) + pad_amt = ((2, 3), (3, 2)) + b = np.full((9, 8), None) + modes = ['edge', + 'symmetric', + 'reflect', + 'wrap', + ] + for mode in modes: + assert_array_equal(pad(a, pad_amt, mode=mode), b) + + +class TestValueError1(object): + def test_check_simple(self): + arr = np.arange(30) + arr = np.reshape(arr, (6, 5)) + kwargs = dict(mode='mean', stat_length=(3, )) + assert_raises(ValueError, pad, arr, ((2, 3), (3, 2), (4, 5)), + **kwargs) + + def test_check_negative_stat_length(self): + arr = np.arange(30) + arr = np.reshape(arr, (6, 5)) + kwargs = dict(mode='mean', stat_length=(-3, )) + assert_raises(ValueError, pad, arr, ((2, 3), (3, 2)), + **kwargs) + + def test_check_negative_pad_width(self): + arr = np.arange(30) + arr = np.reshape(arr, (6, 5)) + kwargs = dict(mode='mean', stat_length=(3, )) + assert_raises(ValueError, pad, arr, ((-2, 3), (3, 2)), + **kwargs) + + def test_check_empty_array(self): + assert_raises(ValueError, pad, [], 4, mode='reflect') + assert_raises(ValueError, pad, np.ndarray(0), 4, mode='reflect') + assert_raises(ValueError, pad, np.zeros((0, 3)), ((1,), (0,)), + mode='reflect') + + +class TestValueError2(object): + def test_check_negative_pad_amount(self): + arr = np.arange(30) + arr = np.reshape(arr, (6, 5)) + kwargs = dict(mode='mean', stat_length=(3, )) + assert_raises(ValueError, pad, arr, ((-2, 3), (3, 2)), + **kwargs) + + +class TestValueError3(object): + def test_check_kwarg_not_allowed(self): + arr = np.arange(30).reshape(5, 6) + assert_raises(ValueError, pad, arr, 4, mode='mean', + reflect_type='odd') + + def test_mode_not_set(self): + arr = np.arange(30).reshape(5, 6) + assert_raises(TypeError, pad, arr, 4) + + def test_malformed_pad_amount(self): + arr = np.arange(30).reshape(5, 6) + assert_raises(ValueError, pad, arr, (4, 5, 6, 7), mode='constant') + + def test_malformed_pad_amount2(self): + arr = np.arange(30).reshape(5, 6) + assert_raises(ValueError, pad, arr, ((3, 4, 5), (0, 1, 2)), + mode='constant') + + def test_pad_too_many_axes(self): + arr = np.arange(30).reshape(5, 6) + + # Attempt to pad using a 3D array equivalent + bad_shape = (((3,), (4,), (5,)), ((0,), (1,), (2,))) + assert_raises(ValueError, pad, arr, bad_shape, + mode='constant') + + +class TestTypeError1(object): + def test_float(self): + arr = np.arange(30) + assert_raises(TypeError, pad, arr, ((-2.1, 3), (3, 2))) + assert_raises(TypeError, pad, arr, np.array(((-2.1, 3), (3, 2)))) + + def test_str(self): + arr = np.arange(30) + assert_raises(TypeError, pad, arr, 'foo') + assert_raises(TypeError, pad, arr, np.array('foo')) + + def test_object(self): + class FooBar(object): + pass + arr = np.arange(30) + assert_raises(TypeError, pad, arr, FooBar()) + + def test_complex(self): + arr = np.arange(30) + assert_raises(TypeError, pad, arr, complex(1, -1)) + assert_raises(TypeError, pad, arr, np.array(complex(1, -1))) + + def test_check_wrong_pad_amount(self): + arr = np.arange(30) + arr = np.reshape(arr, (6, 5)) + kwargs = dict(mode='mean', stat_length=(3, )) + assert_raises(TypeError, pad, arr, ((2, 3, 4), (3, 2)), + **kwargs) diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_arraypad.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_arraypad.pyc new file mode 100644 index 0000000..c7f65e3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_arraypad.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_arraysetops.py b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_arraysetops.py new file mode 100644 index 0000000..93d4b27 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_arraysetops.py @@ -0,0 +1,623 @@ +"""Test functions for 1D array set operations. + +""" +from __future__ import division, absolute_import, print_function + +import numpy as np + +from numpy.testing import (assert_array_equal, assert_equal, + assert_raises, assert_raises_regex) +from numpy.lib.arraysetops import ( + ediff1d, intersect1d, setxor1d, union1d, setdiff1d, unique, in1d, isin + ) +import pytest + + + +class TestSetOps(object): + + def test_intersect1d(self): + # unique inputs + a = np.array([5, 7, 1, 2]) + b = np.array([2, 4, 3, 1, 5]) + + ec = np.array([1, 2, 5]) + c = intersect1d(a, b, assume_unique=True) + assert_array_equal(c, ec) + + # non-unique inputs + a = np.array([5, 5, 7, 1, 2]) + b = np.array([2, 1, 4, 3, 3, 1, 5]) + + ed = np.array([1, 2, 5]) + c = intersect1d(a, b) + assert_array_equal(c, ed) + assert_array_equal([], intersect1d([], [])) + + def test_intersect1d_array_like(self): + # See gh-11772 + class Test(object): + def __array__(self): + return np.arange(3) + + a = Test() + res = intersect1d(a, a) + assert_array_equal(res, a) + res = intersect1d([1, 2, 3], [1, 2, 3]) + assert_array_equal(res, [1, 2, 3]) + + def test_intersect1d_indices(self): + # unique inputs + a = np.array([1, 2, 3, 4]) + b = np.array([2, 1, 4, 6]) + c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True) + ee = np.array([1, 2, 4]) + assert_array_equal(c, ee) + assert_array_equal(a[i1], ee) + assert_array_equal(b[i2], ee) + + # non-unique inputs + a = np.array([1, 2, 2, 3, 4, 3, 2]) + b = np.array([1, 8, 4, 2, 2, 3, 2, 3]) + c, i1, i2 = intersect1d(a, b, return_indices=True) + ef = np.array([1, 2, 3, 4]) + assert_array_equal(c, ef) + assert_array_equal(a[i1], ef) + assert_array_equal(b[i2], ef) + + # non1d, unique inputs + a = np.array([[2, 4, 5, 6], [7, 8, 1, 15]]) + b = np.array([[3, 2, 7, 6], [10, 12, 8, 9]]) + c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True) + ui1 = np.unravel_index(i1, a.shape) + ui2 = np.unravel_index(i2, b.shape) + ea = np.array([2, 6, 7, 8]) + assert_array_equal(ea, a[ui1]) + assert_array_equal(ea, b[ui2]) + + # non1d, not assumed to be uniqueinputs + a = np.array([[2, 4, 5, 6, 6], [4, 7, 8, 7, 2]]) + b = np.array([[3, 2, 7, 7], [10, 12, 8, 7]]) + c, i1, i2 = intersect1d(a, b, return_indices=True) + ui1 = np.unravel_index(i1, a.shape) + ui2 = np.unravel_index(i2, b.shape) + ea = np.array([2, 7, 8]) + assert_array_equal(ea, a[ui1]) + assert_array_equal(ea, b[ui2]) + + def test_setxor1d(self): + a = np.array([5, 7, 1, 2]) + b = np.array([2, 4, 3, 1, 5]) + + ec = np.array([3, 4, 7]) + c = setxor1d(a, b) + assert_array_equal(c, ec) + + a = np.array([1, 2, 3]) + b = np.array([6, 5, 4]) + + ec = np.array([1, 2, 3, 4, 5, 6]) + c = setxor1d(a, b) + assert_array_equal(c, ec) + + a = np.array([1, 8, 2, 3]) + b = np.array([6, 5, 4, 8]) + + ec = np.array([1, 2, 3, 4, 5, 6]) + c = setxor1d(a, b) + assert_array_equal(c, ec) + + assert_array_equal([], setxor1d([], [])) + + def test_ediff1d(self): + zero_elem = np.array([]) + one_elem = np.array([1]) + two_elem = np.array([1, 2]) + + assert_array_equal([], ediff1d(zero_elem)) + assert_array_equal([0], ediff1d(zero_elem, to_begin=0)) + assert_array_equal([0], ediff1d(zero_elem, to_end=0)) + assert_array_equal([-1, 0], ediff1d(zero_elem, to_begin=-1, to_end=0)) + assert_array_equal([], ediff1d(one_elem)) + assert_array_equal([1], ediff1d(two_elem)) + assert_array_equal([7,1,9], ediff1d(two_elem, to_begin=7, to_end=9)) + assert_array_equal([5,6,1,7,8], ediff1d(two_elem, to_begin=[5,6], to_end=[7,8])) + assert_array_equal([1,9], ediff1d(two_elem, to_end=9)) + assert_array_equal([1,7,8], ediff1d(two_elem, to_end=[7,8])) + assert_array_equal([7,1], ediff1d(two_elem, to_begin=7)) + assert_array_equal([5,6,1], ediff1d(two_elem, to_begin=[5,6])) + + @pytest.mark.parametrize("ary, prepend, append", [ + # should fail because trying to cast + # np.nan standard floating point value + # into an integer array: + (np.array([1, 2, 3], dtype=np.int64), + None, + np.nan), + # should fail because attempting + # to downcast to smaller int type: + (np.array([1, 2, 3], dtype=np.int16), + np.array([5, 1<<20, 2], dtype=np.int32), + None), + # should fail because attempting to cast + # two special floating point values + # to integers (on both sides of ary): + (np.array([1., 3., 9.], dtype=np.int8), + np.nan, + np.nan), + ]) + def test_ediff1d_forbidden_type_casts(self, ary, prepend, append): + # verify resolution of gh-11490 + + # specifically, raise an appropriate + # Exception when attempting to append or + # prepend with an incompatible type + msg = 'cannot convert' + with assert_raises_regex(ValueError, msg): + ediff1d(ary=ary, + to_end=append, + to_begin=prepend) + + @pytest.mark.parametrize("ary," + "prepend," + "append," + "expected", [ + (np.array([1, 2, 3], dtype=np.int16), + 0, + None, + np.array([0, 1, 1], dtype=np.int16)), + (np.array([1, 2, 3], dtype=np.int32), + 0, + 0, + np.array([0, 1, 1, 0], dtype=np.int32)), + (np.array([1, 2, 3], dtype=np.int64), + 3, + -9, + np.array([3, 1, 1, -9], dtype=np.int64)), + ]) + def test_ediff1d_scalar_handling(self, + ary, + prepend, + append, + expected): + # maintain backwards-compatibility + # of scalar prepend / append behavior + # in ediff1d following fix for gh-11490 + actual = np.ediff1d(ary=ary, + to_end=append, + to_begin=prepend) + assert_equal(actual, expected) + + + def test_isin(self): + # the tests for in1d cover most of isin's behavior + # if in1d is removed, would need to change those tests to test + # isin instead. + def _isin_slow(a, b): + b = np.asarray(b).flatten().tolist() + return a in b + isin_slow = np.vectorize(_isin_slow, otypes=[bool], excluded={1}) + def assert_isin_equal(a, b): + x = isin(a, b) + y = isin_slow(a, b) + assert_array_equal(x, y) + + #multidimensional arrays in both arguments + a = np.arange(24).reshape([2, 3, 4]) + b = np.array([[10, 20, 30], [0, 1, 3], [11, 22, 33]]) + assert_isin_equal(a, b) + + #array-likes as both arguments + c = [(9, 8), (7, 6)] + d = (9, 7) + assert_isin_equal(c, d) + + #zero-d array: + f = np.array(3) + assert_isin_equal(f, b) + assert_isin_equal(a, f) + assert_isin_equal(f, f) + + #scalar: + assert_isin_equal(5, b) + assert_isin_equal(a, 6) + assert_isin_equal(5, 6) + + #empty array-like: + x = [] + assert_isin_equal(x, b) + assert_isin_equal(a, x) + assert_isin_equal(x, x) + + def test_in1d(self): + # we use two different sizes for the b array here to test the + # two different paths in in1d(). + for mult in (1, 10): + # One check without np.array to make sure lists are handled correct + a = [5, 7, 1, 2] + b = [2, 4, 3, 1, 5] * mult + ec = np.array([True, False, True, True]) + c = in1d(a, b, assume_unique=True) + assert_array_equal(c, ec) + + a[0] = 8 + ec = np.array([False, False, True, True]) + c = in1d(a, b, assume_unique=True) + assert_array_equal(c, ec) + + a[0], a[3] = 4, 8 + ec = np.array([True, False, True, False]) + c = in1d(a, b, assume_unique=True) + assert_array_equal(c, ec) + + a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5]) + b = [2, 3, 4] * mult + ec = [False, True, False, True, True, True, True, True, True, + False, True, False, False, False] + c = in1d(a, b) + assert_array_equal(c, ec) + + b = b + [5, 5, 4] * mult + ec = [True, True, True, True, True, True, True, True, True, True, + True, False, True, True] + c = in1d(a, b) + assert_array_equal(c, ec) + + a = np.array([5, 7, 1, 2]) + b = np.array([2, 4, 3, 1, 5] * mult) + ec = np.array([True, False, True, True]) + c = in1d(a, b) + assert_array_equal(c, ec) + + a = np.array([5, 7, 1, 1, 2]) + b = np.array([2, 4, 3, 3, 1, 5] * mult) + ec = np.array([True, False, True, True, True]) + c = in1d(a, b) + assert_array_equal(c, ec) + + a = np.array([5, 5]) + b = np.array([2, 2] * mult) + ec = np.array([False, False]) + c = in1d(a, b) + assert_array_equal(c, ec) + + a = np.array([5]) + b = np.array([2]) + ec = np.array([False]) + c = in1d(a, b) + assert_array_equal(c, ec) + + assert_array_equal(in1d([], []), []) + + def test_in1d_char_array(self): + a = np.array(['a', 'b', 'c', 'd', 'e', 'c', 'e', 'b']) + b = np.array(['a', 'c']) + + ec = np.array([True, False, True, False, False, True, False, False]) + c = in1d(a, b) + + assert_array_equal(c, ec) + + def test_in1d_invert(self): + "Test in1d's invert parameter" + # We use two different sizes for the b array here to test the + # two different paths in in1d(). + for mult in (1, 10): + a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5]) + b = [2, 3, 4] * mult + assert_array_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True)) + + def test_in1d_ravel(self): + # Test that in1d ravels its input arrays. This is not documented + # behavior however. The test is to ensure consistentency. + a = np.arange(6).reshape(2, 3) + b = np.arange(3, 9).reshape(3, 2) + long_b = np.arange(3, 63).reshape(30, 2) + ec = np.array([False, False, False, True, True, True]) + + assert_array_equal(in1d(a, b, assume_unique=True), ec) + assert_array_equal(in1d(a, b, assume_unique=False), ec) + assert_array_equal(in1d(a, long_b, assume_unique=True), ec) + assert_array_equal(in1d(a, long_b, assume_unique=False), ec) + + def test_in1d_first_array_is_object(self): + ar1 = [None] + ar2 = np.array([1]*10) + expected = np.array([False]) + result = np.in1d(ar1, ar2) + assert_array_equal(result, expected) + + def test_in1d_second_array_is_object(self): + ar1 = 1 + ar2 = np.array([None]*10) + expected = np.array([False]) + result = np.in1d(ar1, ar2) + assert_array_equal(result, expected) + + def test_in1d_both_arrays_are_object(self): + ar1 = [None] + ar2 = np.array([None]*10) + expected = np.array([True]) + result = np.in1d(ar1, ar2) + assert_array_equal(result, expected) + + def test_in1d_both_arrays_have_structured_dtype(self): + # Test arrays of a structured data type containing an integer field + # and a field of dtype `object` allowing for arbitrary Python objects + dt = np.dtype([('field1', int), ('field2', object)]) + ar1 = np.array([(1, None)], dtype=dt) + ar2 = np.array([(1, None)]*10, dtype=dt) + expected = np.array([True]) + result = np.in1d(ar1, ar2) + assert_array_equal(result, expected) + + def test_union1d(self): + a = np.array([5, 4, 7, 1, 2]) + b = np.array([2, 4, 3, 3, 2, 1, 5]) + + ec = np.array([1, 2, 3, 4, 5, 7]) + c = union1d(a, b) + assert_array_equal(c, ec) + + # Tests gh-10340, arguments to union1d should be + # flattened if they are not already 1D + x = np.array([[0, 1, 2], [3, 4, 5]]) + y = np.array([0, 1, 2, 3, 4]) + ez = np.array([0, 1, 2, 3, 4, 5]) + z = union1d(x, y) + assert_array_equal(z, ez) + + assert_array_equal([], union1d([], [])) + + def test_setdiff1d(self): + a = np.array([6, 5, 4, 7, 1, 2, 7, 4]) + b = np.array([2, 4, 3, 3, 2, 1, 5]) + + ec = np.array([6, 7]) + c = setdiff1d(a, b) + assert_array_equal(c, ec) + + a = np.arange(21) + b = np.arange(19) + ec = np.array([19, 20]) + c = setdiff1d(a, b) + assert_array_equal(c, ec) + + assert_array_equal([], setdiff1d([], [])) + a = np.array((), np.uint32) + assert_equal(setdiff1d(a, []).dtype, np.uint32) + + def test_setdiff1d_unique(self): + a = np.array([3, 2, 1]) + b = np.array([7, 5, 2]) + expected = np.array([3, 1]) + actual = setdiff1d(a, b, assume_unique=True) + assert_equal(actual, expected) + + def test_setdiff1d_char_array(self): + a = np.array(['a', 'b', 'c']) + b = np.array(['a', 'b', 's']) + assert_array_equal(setdiff1d(a, b), np.array(['c'])) + + def test_manyways(self): + a = np.array([5, 7, 1, 2, 8]) + b = np.array([9, 8, 2, 4, 3, 1, 5]) + + c1 = setxor1d(a, b) + aux1 = intersect1d(a, b) + aux2 = union1d(a, b) + c2 = setdiff1d(aux2, aux1) + assert_array_equal(c1, c2) + + +class TestUnique(object): + + def test_unique_1d(self): + + def check_all(a, b, i1, i2, c, dt): + base_msg = 'check {0} failed for type {1}' + + msg = base_msg.format('values', dt) + v = unique(a) + assert_array_equal(v, b, msg) + + msg = base_msg.format('return_index', dt) + v, j = unique(a, 1, 0, 0) + assert_array_equal(v, b, msg) + assert_array_equal(j, i1, msg) + + msg = base_msg.format('return_inverse', dt) + v, j = unique(a, 0, 1, 0) + assert_array_equal(v, b, msg) + assert_array_equal(j, i2, msg) + + msg = base_msg.format('return_counts', dt) + v, j = unique(a, 0, 0, 1) + assert_array_equal(v, b, msg) + assert_array_equal(j, c, msg) + + msg = base_msg.format('return_index and return_inverse', dt) + v, j1, j2 = unique(a, 1, 1, 0) + assert_array_equal(v, b, msg) + assert_array_equal(j1, i1, msg) + assert_array_equal(j2, i2, msg) + + msg = base_msg.format('return_index and return_counts', dt) + v, j1, j2 = unique(a, 1, 0, 1) + assert_array_equal(v, b, msg) + assert_array_equal(j1, i1, msg) + assert_array_equal(j2, c, msg) + + msg = base_msg.format('return_inverse and return_counts', dt) + v, j1, j2 = unique(a, 0, 1, 1) + assert_array_equal(v, b, msg) + assert_array_equal(j1, i2, msg) + assert_array_equal(j2, c, msg) + + msg = base_msg.format(('return_index, return_inverse ' + 'and return_counts'), dt) + v, j1, j2, j3 = unique(a, 1, 1, 1) + assert_array_equal(v, b, msg) + assert_array_equal(j1, i1, msg) + assert_array_equal(j2, i2, msg) + assert_array_equal(j3, c, msg) + + a = [5, 7, 1, 2, 1, 5, 7]*10 + b = [1, 2, 5, 7] + i1 = [2, 3, 0, 1] + i2 = [2, 3, 0, 1, 0, 2, 3]*10 + c = np.multiply([2, 1, 2, 2], 10) + + # test for numeric arrays + types = [] + types.extend(np.typecodes['AllInteger']) + types.extend(np.typecodes['AllFloat']) + types.append('datetime64[D]') + types.append('timedelta64[D]') + for dt in types: + aa = np.array(a, dt) + bb = np.array(b, dt) + check_all(aa, bb, i1, i2, c, dt) + + # test for object arrays + dt = 'O' + aa = np.empty(len(a), dt) + aa[:] = a + bb = np.empty(len(b), dt) + bb[:] = b + check_all(aa, bb, i1, i2, c, dt) + + # test for structured arrays + dt = [('', 'i'), ('', 'i')] + aa = np.array(list(zip(a, a)), dt) + bb = np.array(list(zip(b, b)), dt) + check_all(aa, bb, i1, i2, c, dt) + + # test for ticket #2799 + aa = [1. + 0.j, 1 - 1.j, 1] + assert_array_equal(np.unique(aa), [1. - 1.j, 1. + 0.j]) + + # test for ticket #4785 + a = [(1, 2), (1, 2), (2, 3)] + unq = [1, 2, 3] + inv = [0, 1, 0, 1, 1, 2] + a1 = unique(a) + assert_array_equal(a1, unq) + a2, a2_inv = unique(a, return_inverse=True) + assert_array_equal(a2, unq) + assert_array_equal(a2_inv, inv) + + # test for chararrays with return_inverse (gh-5099) + a = np.chararray(5) + a[...] = '' + a2, a2_inv = np.unique(a, return_inverse=True) + assert_array_equal(a2_inv, np.zeros(5)) + + # test for ticket #9137 + a = [] + a1_idx = np.unique(a, return_index=True)[1] + a2_inv = np.unique(a, return_inverse=True)[1] + a3_idx, a3_inv = np.unique(a, return_index=True, return_inverse=True)[1:] + assert_equal(a1_idx.dtype, np.intp) + assert_equal(a2_inv.dtype, np.intp) + assert_equal(a3_idx.dtype, np.intp) + assert_equal(a3_inv.dtype, np.intp) + + def test_unique_axis_errors(self): + assert_raises(TypeError, self._run_axis_tests, object) + assert_raises(TypeError, self._run_axis_tests, + [('a', int), ('b', object)]) + + assert_raises(np.AxisError, unique, np.arange(10), axis=2) + assert_raises(np.AxisError, unique, np.arange(10), axis=-2) + + def test_unique_axis_list(self): + msg = "Unique failed on list of lists" + inp = [[0, 1, 0], [0, 1, 0]] + inp_arr = np.asarray(inp) + assert_array_equal(unique(inp, axis=0), unique(inp_arr, axis=0), msg) + assert_array_equal(unique(inp, axis=1), unique(inp_arr, axis=1), msg) + + def test_unique_axis(self): + types = [] + types.extend(np.typecodes['AllInteger']) + types.extend(np.typecodes['AllFloat']) + types.append('datetime64[D]') + types.append('timedelta64[D]') + types.append([('a', int), ('b', int)]) + types.append([('a', int), ('b', float)]) + + for dtype in types: + self._run_axis_tests(dtype) + + msg = 'Non-bitwise-equal booleans test failed' + data = np.arange(10, dtype=np.uint8).reshape(-1, 2).view(bool) + result = np.array([[False, True], [True, True]], dtype=bool) + assert_array_equal(unique(data, axis=0), result, msg) + + msg = 'Negative zero equality test failed' + data = np.array([[-0.0, 0.0], [0.0, -0.0], [-0.0, 0.0], [0.0, -0.0]]) + result = np.array([[-0.0, 0.0]]) + assert_array_equal(unique(data, axis=0), result, msg) + + def test_unique_masked(self): + # issue 8664 + x = np.array([64, 0, 1, 2, 3, 63, 63, 0, 0, 0, 1, 2, 0, 63, 0], dtype='uint8') + y = np.ma.masked_equal(x, 0) + + v = np.unique(y) + v2, i, c = np.unique(y, return_index=True, return_counts=True) + + msg = 'Unique returned different results when asked for index' + assert_array_equal(v.data, v2.data, msg) + assert_array_equal(v.mask, v2.mask, msg) + + def test_unique_sort_order_with_axis(self): + # These tests fail if sorting along axis is done by treating subarrays + # as unsigned byte strings. See gh-10495. + fmt = "sort order incorrect for integer type '%s'" + for dt in 'bhilq': + a = np.array([[-1],[0]], dt) + b = np.unique(a, axis=0) + assert_array_equal(a, b, fmt % dt) + + def _run_axis_tests(self, dtype): + data = np.array([[0, 1, 0, 0], + [1, 0, 0, 0], + [0, 1, 0, 0], + [1, 0, 0, 0]]).astype(dtype) + + msg = 'Unique with 1d array and axis=0 failed' + result = np.array([0, 1]) + assert_array_equal(unique(data), result.astype(dtype), msg) + + msg = 'Unique with 2d array and axis=0 failed' + result = np.array([[0, 1, 0, 0], [1, 0, 0, 0]]) + assert_array_equal(unique(data, axis=0), result.astype(dtype), msg) + + msg = 'Unique with 2d array and axis=1 failed' + result = np.array([[0, 0, 1], [0, 1, 0], [0, 0, 1], [0, 1, 0]]) + assert_array_equal(unique(data, axis=1), result.astype(dtype), msg) + + msg = 'Unique with 3d array and axis=2 failed' + data3d = np.dstack([data] * 3) + result = data3d[..., :1] + assert_array_equal(unique(data3d, axis=2), result, msg) + + uniq, idx, inv, cnt = unique(data, axis=0, return_index=True, + return_inverse=True, return_counts=True) + msg = "Unique's return_index=True failed with axis=0" + assert_array_equal(data[idx], uniq, msg) + msg = "Unique's return_inverse=True failed with axis=0" + assert_array_equal(uniq[inv], data) + msg = "Unique's return_counts=True failed with axis=0" + assert_array_equal(cnt, np.array([2, 2]), msg) + + uniq, idx, inv, cnt = unique(data, axis=1, return_index=True, + return_inverse=True, return_counts=True) + msg = "Unique's return_index=True failed with axis=1" + assert_array_equal(data[:, idx], uniq) + msg = "Unique's return_inverse=True failed with axis=1" + assert_array_equal(uniq[:, inv], data) + msg = "Unique's return_counts=True failed with axis=1" + assert_array_equal(cnt, np.array([2, 1, 1]), msg) diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_arraysetops.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_arraysetops.pyc new file mode 100644 index 0000000..703e8a3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_arraysetops.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_arrayterator.py b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_arrayterator.py new file mode 100644 index 0000000..2ce4456 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_arrayterator.py @@ -0,0 +1,48 @@ +from __future__ import division, absolute_import, print_function + +from operator import mul +from functools import reduce + +import numpy as np +from numpy.random import randint +from numpy.lib import Arrayterator +from numpy.testing import assert_ + + +def test(): + np.random.seed(np.arange(10)) + + # Create a random array + ndims = randint(5)+1 + shape = tuple(randint(10)+1 for dim in range(ndims)) + els = reduce(mul, shape) + a = np.arange(els) + a.shape = shape + + buf_size = randint(2*els) + b = Arrayterator(a, buf_size) + + # Check that each block has at most ``buf_size`` elements + for block in b: + assert_(len(block.flat) <= (buf_size or els)) + + # Check that all elements are iterated correctly + assert_(list(b.flat) == list(a.flat)) + + # Slice arrayterator + start = [randint(dim) for dim in shape] + stop = [randint(dim)+1 for dim in shape] + step = [randint(dim)+1 for dim in shape] + slice_ = tuple(slice(*t) for t in zip(start, stop, step)) + c = b[slice_] + d = a[slice_] + + # Check that each block has at most ``buf_size`` elements + for block in c: + assert_(len(block.flat) <= (buf_size or els)) + + # Check that the arrayterator is sliced correctly + assert_(np.all(c.__array__() == d)) + + # Check that all elements are iterated correctly + assert_(list(c.flat) == list(d.flat)) diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_arrayterator.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_arrayterator.pyc new file mode 100644 index 0000000..656a09c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_arrayterator.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_financial.py b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_financial.py new file mode 100644 index 0000000..5249150 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_financial.py @@ -0,0 +1,340 @@ +from __future__ import division, absolute_import, print_function + +from decimal import Decimal + +import numpy as np +from numpy.testing import ( + assert_, assert_almost_equal, assert_allclose, assert_equal, assert_raises + ) + + +class TestFinancial(object): + def test_rate(self): + assert_almost_equal( + np.rate(10, 0, -3500, 10000), + 0.1107, 4) + + def test_rate_decimal(self): + rate = np.rate(Decimal('10'), Decimal('0'), Decimal('-3500'), Decimal('10000')) + assert_equal(Decimal('0.1106908537142689284704528100'), rate) + + def test_irr(self): + v = [-150000, 15000, 25000, 35000, 45000, 60000] + assert_almost_equal(np.irr(v), 0.0524, 2) + v = [-100, 0, 0, 74] + assert_almost_equal(np.irr(v), -0.0955, 2) + v = [-100, 39, 59, 55, 20] + assert_almost_equal(np.irr(v), 0.28095, 2) + v = [-100, 100, 0, -7] + assert_almost_equal(np.irr(v), -0.0833, 2) + v = [-100, 100, 0, 7] + assert_almost_equal(np.irr(v), 0.06206, 2) + v = [-5, 10.5, 1, -8, 1] + assert_almost_equal(np.irr(v), 0.0886, 2) + + # Test that if there is no solution then np.irr returns nan + # Fixes gh-6744 + v = [-1, -2, -3] + assert_equal(np.irr(v), np.nan) + + def test_pv(self): + assert_almost_equal(np.pv(0.07, 20, 12000, 0), -127128.17, 2) + + def test_pv_decimal(self): + assert_equal(np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0')), + Decimal('-127128.1709461939327295222005')) + + def test_fv(self): + assert_equal(np.fv(0.075, 20, -2000, 0, 0), 86609.362673042924) + + def test_fv_decimal(self): + assert_equal(np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), 0, 0), + Decimal('86609.36267304300040536731624')) + + def test_pmt(self): + res = np.pmt(0.08 / 12, 5 * 12, 15000) + tgt = -304.145914 + assert_allclose(res, tgt) + # Test the edge case where rate == 0.0 + res = np.pmt(0.0, 5 * 12, 15000) + tgt = -250.0 + assert_allclose(res, tgt) + # Test the case where we use broadcast and + # the arguments passed in are arrays. + res = np.pmt([[0.0, 0.8], [0.3, 0.8]], [12, 3], [2000, 20000]) + tgt = np.array([[-166.66667, -19311.258], [-626.90814, -19311.258]]) + assert_allclose(res, tgt) + + def test_pmt_decimal(self): + res = np.pmt(Decimal('0.08') / Decimal('12'), 5 * 12, 15000) + tgt = Decimal('-304.1459143262052370338701494') + assert_equal(res, tgt) + # Test the edge case where rate == 0.0 + res = np.pmt(Decimal('0'), Decimal('60'), Decimal('15000')) + tgt = -250 + assert_equal(res, tgt) + # Test the case where we use broadcast and + # the arguments passed in are arrays. + res = np.pmt([[Decimal('0'), Decimal('0.8')], [Decimal('0.3'), Decimal('0.8')]], + [Decimal('12'), Decimal('3')], [Decimal('2000'), Decimal('20000')]) + tgt = np.array([[Decimal('-166.6666666666666666666666667'), Decimal('-19311.25827814569536423841060')], + [Decimal('-626.9081401700757748402586600'), Decimal('-19311.25827814569536423841060')]]) + + # Cannot use the `assert_allclose` because it uses isfinite under the covers + # which does not support the Decimal type + # See issue: https://github.com/numpy/numpy/issues/9954 + assert_equal(res[0][0], tgt[0][0]) + assert_equal(res[0][1], tgt[0][1]) + assert_equal(res[1][0], tgt[1][0]) + assert_equal(res[1][1], tgt[1][1]) + + def test_ppmt(self): + assert_equal(np.round(np.ppmt(0.1 / 12, 1, 60, 55000), 2), -710.25) + + def test_ppmt_decimal(self): + assert_equal(np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000')), + Decimal('-710.2541257864217612489830917')) + + # Two tests showing how Decimal is actually getting at a more exact result + # .23 / 12 does not come out nicely as a float but does as a decimal + def test_ppmt_special_rate(self): + assert_equal(np.round(np.ppmt(0.23 / 12, 1, 60, 10000000000), 8), -90238044.232277036) + + def test_ppmt_special_rate_decimal(self): + # When rounded out to 8 decimal places like the float based test, this should not equal the same value + # as the float, substituted for the decimal + def raise_error_because_not_equal(): + assert_equal( + round(np.ppmt(Decimal('0.23') / Decimal('12'), 1, 60, Decimal('10000000000')), 8), + Decimal('-90238044.232277036')) + + assert_raises(AssertionError, raise_error_because_not_equal) + assert_equal(np.ppmt(Decimal('0.23') / Decimal('12'), 1, 60, Decimal('10000000000')), + Decimal('-90238044.2322778884413969909')) + + def test_ipmt(self): + assert_almost_equal(np.round(np.ipmt(0.1 / 12, 1, 24, 2000), 2), -16.67) + + def test_ipmt_decimal(self): + result = np.ipmt(Decimal('0.1') / Decimal('12'), 1, 24, 2000) + assert_equal(result.flat[0], Decimal('-16.66666666666666666666666667')) + + def test_nper(self): + assert_almost_equal(np.nper(0.075, -2000, 0, 100000.), + 21.54, 2) + + def test_nper2(self): + assert_almost_equal(np.nper(0.0, -2000, 0, 100000.), + 50.0, 1) + + def test_npv(self): + assert_almost_equal( + np.npv(0.05, [-15000, 1500, 2500, 3500, 4500, 6000]), + 122.89, 2) + + def test_npv_decimal(self): + assert_equal( + np.npv(Decimal('0.05'), [-15000, 1500, 2500, 3500, 4500, 6000]), + Decimal('122.894854950942692161628715')) + + def test_mirr(self): + val = [-4500, -800, 800, 800, 600, 600, 800, 800, 700, 3000] + assert_almost_equal(np.mirr(val, 0.08, 0.055), 0.0666, 4) + + val = [-120000, 39000, 30000, 21000, 37000, 46000] + assert_almost_equal(np.mirr(val, 0.10, 0.12), 0.126094, 6) + + val = [100, 200, -50, 300, -200] + assert_almost_equal(np.mirr(val, 0.05, 0.06), 0.3428, 4) + + val = [39000, 30000, 21000, 37000, 46000] + assert_(np.isnan(np.mirr(val, 0.10, 0.12))) + + def test_mirr_decimal(self): + val = [Decimal('-4500'), Decimal('-800'), Decimal('800'), Decimal('800'), + Decimal('600'), Decimal('600'), Decimal('800'), Decimal('800'), + Decimal('700'), Decimal('3000')] + assert_equal(np.mirr(val, Decimal('0.08'), Decimal('0.055')), + Decimal('0.066597175031553548874239618')) + + val = [Decimal('-120000'), Decimal('39000'), Decimal('30000'), + Decimal('21000'), Decimal('37000'), Decimal('46000')] + assert_equal(np.mirr(val, Decimal('0.10'), Decimal('0.12')), Decimal('0.126094130365905145828421880')) + + val = [Decimal('100'), Decimal('200'), Decimal('-50'), + Decimal('300'), Decimal('-200')] + assert_equal(np.mirr(val, Decimal('0.05'), Decimal('0.06')), Decimal('0.342823387842176663647819868')) + + val = [Decimal('39000'), Decimal('30000'), Decimal('21000'), Decimal('37000'), Decimal('46000')] + assert_(np.isnan(np.mirr(val, Decimal('0.10'), Decimal('0.12')))) + + def test_when(self): + # begin + assert_equal(np.rate(10, 20, -3500, 10000, 1), + np.rate(10, 20, -3500, 10000, 'begin')) + # end + assert_equal(np.rate(10, 20, -3500, 10000), + np.rate(10, 20, -3500, 10000, 'end')) + assert_equal(np.rate(10, 20, -3500, 10000, 0), + np.rate(10, 20, -3500, 10000, 'end')) + + # begin + assert_equal(np.pv(0.07, 20, 12000, 0, 1), + np.pv(0.07, 20, 12000, 0, 'begin')) + # end + assert_equal(np.pv(0.07, 20, 12000, 0), + np.pv(0.07, 20, 12000, 0, 'end')) + assert_equal(np.pv(0.07, 20, 12000, 0, 0), + np.pv(0.07, 20, 12000, 0, 'end')) + + # begin + assert_equal(np.fv(0.075, 20, -2000, 0, 1), + np.fv(0.075, 20, -2000, 0, 'begin')) + # end + assert_equal(np.fv(0.075, 20, -2000, 0), + np.fv(0.075, 20, -2000, 0, 'end')) + assert_equal(np.fv(0.075, 20, -2000, 0, 0), + np.fv(0.075, 20, -2000, 0, 'end')) + + # begin + assert_equal(np.pmt(0.08 / 12, 5 * 12, 15000., 0, 1), + np.pmt(0.08 / 12, 5 * 12, 15000., 0, 'begin')) + # end + assert_equal(np.pmt(0.08 / 12, 5 * 12, 15000., 0), + np.pmt(0.08 / 12, 5 * 12, 15000., 0, 'end')) + assert_equal(np.pmt(0.08 / 12, 5 * 12, 15000., 0, 0), + np.pmt(0.08 / 12, 5 * 12, 15000., 0, 'end')) + + # begin + assert_equal(np.ppmt(0.1 / 12, 1, 60, 55000, 0, 1), + np.ppmt(0.1 / 12, 1, 60, 55000, 0, 'begin')) + # end + assert_equal(np.ppmt(0.1 / 12, 1, 60, 55000, 0), + np.ppmt(0.1 / 12, 1, 60, 55000, 0, 'end')) + assert_equal(np.ppmt(0.1 / 12, 1, 60, 55000, 0, 0), + np.ppmt(0.1 / 12, 1, 60, 55000, 0, 'end')) + + # begin + assert_equal(np.ipmt(0.1 / 12, 1, 24, 2000, 0, 1), + np.ipmt(0.1 / 12, 1, 24, 2000, 0, 'begin')) + # end + assert_equal(np.ipmt(0.1 / 12, 1, 24, 2000, 0), + np.ipmt(0.1 / 12, 1, 24, 2000, 0, 'end')) + assert_equal(np.ipmt(0.1 / 12, 1, 24, 2000, 0, 0), + np.ipmt(0.1 / 12, 1, 24, 2000, 0, 'end')) + + # begin + assert_equal(np.nper(0.075, -2000, 0, 100000., 1), + np.nper(0.075, -2000, 0, 100000., 'begin')) + # end + assert_equal(np.nper(0.075, -2000, 0, 100000.), + np.nper(0.075, -2000, 0, 100000., 'end')) + assert_equal(np.nper(0.075, -2000, 0, 100000., 0), + np.nper(0.075, -2000, 0, 100000., 'end')) + + def test_decimal_with_when(self): + """Test that decimals are still supported if the when argument is passed""" + # begin + assert_equal(np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000'), Decimal('1')), + np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000'), 'begin')) + # end + assert_equal(np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000')), + np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000'), 'end')) + assert_equal(np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000'), Decimal('0')), + np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000'), 'end')) + + # begin + assert_equal(np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0'), Decimal('1')), + np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0'), 'begin')) + # end + assert_equal(np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0')), + np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0'), 'end')) + assert_equal(np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0'), Decimal('0')), + np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0'), 'end')) + + # begin + assert_equal(np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0'), Decimal('1')), + np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0'), 'begin')) + # end + assert_equal(np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0')), + np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0'), 'end')) + assert_equal(np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0'), Decimal('0')), + np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0'), 'end')) + + # begin + assert_equal(np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'), + Decimal('0'), Decimal('1')), + np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'), + Decimal('0'), 'begin')) + # end + assert_equal(np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'), + Decimal('0')), + np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'), + Decimal('0'), 'end')) + assert_equal(np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'), + Decimal('0'), Decimal('0')), + np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'), + Decimal('0'), 'end')) + + # begin + assert_equal(np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'), + Decimal('0'), Decimal('1')), + np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'), + Decimal('0'), 'begin')) + # end + assert_equal(np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'), + Decimal('0')), + np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'), + Decimal('0'), 'end')) + assert_equal(np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'), + Decimal('0'), Decimal('0')), + np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'), + Decimal('0'), 'end')) + + # begin + assert_equal(np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'), + Decimal('0'), Decimal('1')).flat[0], + np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'), + Decimal('0'), 'begin').flat[0]) + # end + assert_equal(np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'), + Decimal('0')).flat[0], + np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'), + Decimal('0'), 'end').flat[0]) + assert_equal(np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'), + Decimal('0'), Decimal('0')).flat[0], + np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'), + Decimal('0'), 'end').flat[0]) + + def test_broadcast(self): + assert_almost_equal(np.nper(0.075, -2000, 0, 100000., [0, 1]), + [21.5449442, 20.76156441], 4) + + assert_almost_equal(np.ipmt(0.1 / 12, list(range(5)), 24, 2000), + [-17.29165168, -16.66666667, -16.03647345, + -15.40102862, -14.76028842], 4) + + assert_almost_equal(np.ppmt(0.1 / 12, list(range(5)), 24, 2000), + [-74.998201, -75.62318601, -76.25337923, + -76.88882405, -77.52956425], 4) + + assert_almost_equal(np.ppmt(0.1 / 12, list(range(5)), 24, 2000, 0, + [0, 0, 1, 'end', 'begin']), + [-74.998201, -75.62318601, -75.62318601, + -76.88882405, -76.88882405], 4) + + def test_broadcast_decimal(self): + # Use almost equal because precision is tested in the explicit tests, this test is to ensure + # broadcast with Decimal is not broken. + assert_almost_equal(np.ipmt(Decimal('0.1') / Decimal('12'), list(range(5)), Decimal('24'), Decimal('2000')), + [Decimal('-17.29165168'), Decimal('-16.66666667'), Decimal('-16.03647345'), + Decimal('-15.40102862'), Decimal('-14.76028842')], 4) + + assert_almost_equal(np.ppmt(Decimal('0.1') / Decimal('12'), list(range(5)), Decimal('24'), Decimal('2000')), + [Decimal('-74.998201'), Decimal('-75.62318601'), Decimal('-76.25337923'), + Decimal('-76.88882405'), Decimal('-77.52956425')], 4) + + assert_almost_equal(np.ppmt(Decimal('0.1') / Decimal('12'), list(range(5)), Decimal('24'), Decimal('2000'), + Decimal('0'), [Decimal('0'), Decimal('0'), Decimal('1'), 'end', 'begin']), + [Decimal('-74.998201'), Decimal('-75.62318601'), Decimal('-75.62318601'), + Decimal('-76.88882405'), Decimal('-76.88882405')], 4) diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_financial.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_financial.pyc new file mode 100644 index 0000000..b36e363 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_financial.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_format.py b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_format.py new file mode 100644 index 0000000..0775070 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_format.py @@ -0,0 +1,881 @@ +from __future__ import division, absolute_import, print_function + +# doctest +r''' Test the .npy file format. + +Set up: + + >>> import sys + >>> from io import BytesIO + >>> from numpy.lib import format + >>> + >>> scalars = [ + ... np.uint8, + ... np.int8, + ... np.uint16, + ... np.int16, + ... np.uint32, + ... np.int32, + ... np.uint64, + ... np.int64, + ... np.float32, + ... np.float64, + ... np.complex64, + ... np.complex128, + ... object, + ... ] + >>> + >>> basic_arrays = [] + >>> + >>> for scalar in scalars: + ... for endian in '<>': + ... dtype = np.dtype(scalar).newbyteorder(endian) + ... basic = np.arange(15).astype(dtype) + ... basic_arrays.extend([ + ... np.array([], dtype=dtype), + ... np.array(10, dtype=dtype), + ... basic, + ... basic.reshape((3,5)), + ... basic.reshape((3,5)).T, + ... basic.reshape((3,5))[::-1,::2], + ... ]) + ... + >>> + >>> Pdescr = [ + ... ('x', 'i4', (2,)), + ... ('y', 'f8', (2, 2)), + ... ('z', 'u1')] + >>> + >>> + >>> PbufferT = [ + ... ([3,2], [[6.,4.],[6.,4.]], 8), + ... ([4,3], [[7.,5.],[7.,5.]], 9), + ... ] + >>> + >>> + >>> Ndescr = [ + ... ('x', 'i4', (2,)), + ... ('Info', [ + ... ('value', 'c16'), + ... ('y2', 'f8'), + ... ('Info2', [ + ... ('name', 'S2'), + ... ('value', 'c16', (2,)), + ... ('y3', 'f8', (2,)), + ... ('z3', 'u4', (2,))]), + ... ('name', 'S2'), + ... ('z2', 'b1')]), + ... ('color', 'S2'), + ... ('info', [ + ... ('Name', 'U8'), + ... ('Value', 'c16')]), + ... ('y', 'f8', (2, 2)), + ... ('z', 'u1')] + >>> + >>> + >>> NbufferT = [ + ... ([3,2], (6j, 6., ('nn', [6j,4j], [6.,4.], [1,2]), 'NN', True), 'cc', ('NN', 6j), [[6.,4.],[6.,4.]], 8), + ... ([4,3], (7j, 7., ('oo', [7j,5j], [7.,5.], [2,1]), 'OO', False), 'dd', ('OO', 7j), [[7.,5.],[7.,5.]], 9), + ... ] + >>> + >>> + >>> record_arrays = [ + ... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')), + ... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')), + ... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')), + ... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')), + ... ] + +Test the magic string writing. + + >>> format.magic(1, 0) + '\x93NUMPY\x01\x00' + >>> format.magic(0, 0) + '\x93NUMPY\x00\x00' + >>> format.magic(255, 255) + '\x93NUMPY\xff\xff' + >>> format.magic(2, 5) + '\x93NUMPY\x02\x05' + +Test the magic string reading. + + >>> format.read_magic(BytesIO(format.magic(1, 0))) + (1, 0) + >>> format.read_magic(BytesIO(format.magic(0, 0))) + (0, 0) + >>> format.read_magic(BytesIO(format.magic(255, 255))) + (255, 255) + >>> format.read_magic(BytesIO(format.magic(2, 5))) + (2, 5) + +Test the header writing. + + >>> for arr in basic_arrays + record_arrays: + ... f = BytesIO() + ... format.write_array_header_1_0(f, arr) # XXX: arr is not a dict, items gets called on it + ... print(repr(f.getvalue())) + ... + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'u2', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>u2', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'i2', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>i2', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'u4', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>u4', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'i4', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>i4', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'u8', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>u8', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'i8', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>i8', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'f4', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>f4', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'f8', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>f8', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'c8', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>c8', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'c16', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>c16', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n" + "v\x00{'descr': [('x', 'i4', (2,)), ('y', '>f8', (2, 2)), ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n" + "\x16\x02{'descr': [('x', '>i4', (2,)),\n ('Info',\n [('value', '>c16'),\n ('y2', '>f8'),\n ('Info2',\n [('name', '|S2'),\n ('value', '>c16', (2,)),\n ('y3', '>f8', (2,)),\n ('z3', '>u4', (2,))]),\n ('name', '|S2'),\n ('z2', '|b1')]),\n ('color', '|S2'),\n ('info', [('Name', '>U8'), ('Value', '>c16')]),\n ('y', '>f8', (2, 2)),\n ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n" +''' +import sys +import os +import shutil +import tempfile +import warnings +import pytest +from io import BytesIO + +import numpy as np +from numpy.testing import ( + assert_, assert_array_equal, assert_raises, assert_raises_regex, + ) +from numpy.lib import format + + +tempdir = None + +# Module-level setup. + + +def setup_module(): + global tempdir + tempdir = tempfile.mkdtemp() + + +def teardown_module(): + global tempdir + if tempdir is not None and os.path.isdir(tempdir): + shutil.rmtree(tempdir) + tempdir = None + + +# Generate some basic arrays to test with. +scalars = [ + np.uint8, + np.int8, + np.uint16, + np.int16, + np.uint32, + np.int32, + np.uint64, + np.int64, + np.float32, + np.float64, + np.complex64, + np.complex128, + object, +] +basic_arrays = [] +for scalar in scalars: + for endian in '<>': + dtype = np.dtype(scalar).newbyteorder(endian) + basic = np.arange(1500).astype(dtype) + basic_arrays.extend([ + # Empty + np.array([], dtype=dtype), + # Rank-0 + np.array(10, dtype=dtype), + # 1-D + basic, + # 2-D C-contiguous + basic.reshape((30, 50)), + # 2-D F-contiguous + basic.reshape((30, 50)).T, + # 2-D non-contiguous + basic.reshape((30, 50))[::-1, ::2], + ]) + +# More complicated record arrays. +# This is the structure of the table used for plain objects: +# +# +-+-+-+ +# |x|y|z| +# +-+-+-+ + +# Structure of a plain array description: +Pdescr = [ + ('x', 'i4', (2,)), + ('y', 'f8', (2, 2)), + ('z', 'u1')] + +# A plain list of tuples with values for testing: +PbufferT = [ + # x y z + ([3, 2], [[6., 4.], [6., 4.]], 8), + ([4, 3], [[7., 5.], [7., 5.]], 9), + ] + + +# This is the structure of the table used for nested objects (DON'T PANIC!): +# +# +-+---------------------------------+-----+----------+-+-+ +# |x|Info |color|info |y|z| +# | +-----+--+----------------+----+--+ +----+-----+ | | +# | |value|y2|Info2 |name|z2| |Name|Value| | | +# | | | +----+-----+--+--+ | | | | | | | +# | | | |name|value|y3|z3| | | | | | | | +# +-+-----+--+----+-----+--+--+----+--+-----+----+-----+-+-+ +# + +# The corresponding nested array description: +Ndescr = [ + ('x', 'i4', (2,)), + ('Info', [ + ('value', 'c16'), + ('y2', 'f8'), + ('Info2', [ + ('name', 'S2'), + ('value', 'c16', (2,)), + ('y3', 'f8', (2,)), + ('z3', 'u4', (2,))]), + ('name', 'S2'), + ('z2', 'b1')]), + ('color', 'S2'), + ('info', [ + ('Name', 'U8'), + ('Value', 'c16')]), + ('y', 'f8', (2, 2)), + ('z', 'u1')] + +NbufferT = [ + # x Info color info y z + # value y2 Info2 name z2 Name Value + # name value y3 z3 + ([3, 2], (6j, 6., ('nn', [6j, 4j], [6., 4.], [1, 2]), 'NN', True), + 'cc', ('NN', 6j), [[6., 4.], [6., 4.]], 8), + ([4, 3], (7j, 7., ('oo', [7j, 5j], [7., 5.], [2, 1]), 'OO', False), + 'dd', ('OO', 7j), [[7., 5.], [7., 5.]], 9), + ] + +record_arrays = [ + np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')), + np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')), + np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')), + np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')), +] + + +#BytesIO that reads a random number of bytes at a time +class BytesIOSRandomSize(BytesIO): + def read(self, size=None): + import random + size = random.randint(1, size) + return super(BytesIOSRandomSize, self).read(size) + + +def roundtrip(arr): + f = BytesIO() + format.write_array(f, arr) + f2 = BytesIO(f.getvalue()) + arr2 = format.read_array(f2) + return arr2 + + +def roundtrip_randsize(arr): + f = BytesIO() + format.write_array(f, arr) + f2 = BytesIOSRandomSize(f.getvalue()) + arr2 = format.read_array(f2) + return arr2 + + +def roundtrip_truncated(arr): + f = BytesIO() + format.write_array(f, arr) + #BytesIO is one byte short + f2 = BytesIO(f.getvalue()[0:-1]) + arr2 = format.read_array(f2) + return arr2 + + +def assert_equal_(o1, o2): + assert_(o1 == o2) + + +def test_roundtrip(): + for arr in basic_arrays + record_arrays: + arr2 = roundtrip(arr) + assert_array_equal(arr, arr2) + + +def test_roundtrip_randsize(): + for arr in basic_arrays + record_arrays: + if arr.dtype != object: + arr2 = roundtrip_randsize(arr) + assert_array_equal(arr, arr2) + + +def test_roundtrip_truncated(): + for arr in basic_arrays: + if arr.dtype != object: + assert_raises(ValueError, roundtrip_truncated, arr) + + +def test_long_str(): + # check items larger than internal buffer size, gh-4027 + long_str_arr = np.ones(1, dtype=np.dtype((str, format.BUFFER_SIZE + 1))) + long_str_arr2 = roundtrip(long_str_arr) + assert_array_equal(long_str_arr, long_str_arr2) + + +@pytest.mark.slow +def test_memmap_roundtrip(): + # Fixme: used to crash on windows + if not (sys.platform == 'win32' or sys.platform == 'cygwin'): + for arr in basic_arrays + record_arrays: + if arr.dtype.hasobject: + # Skip these since they can't be mmap'ed. + continue + # Write it out normally and through mmap. + nfn = os.path.join(tempdir, 'normal.npy') + mfn = os.path.join(tempdir, 'memmap.npy') + fp = open(nfn, 'wb') + try: + format.write_array(fp, arr) + finally: + fp.close() + + fortran_order = ( + arr.flags.f_contiguous and not arr.flags.c_contiguous) + ma = format.open_memmap(mfn, mode='w+', dtype=arr.dtype, + shape=arr.shape, fortran_order=fortran_order) + ma[...] = arr + del ma + + # Check that both of these files' contents are the same. + fp = open(nfn, 'rb') + normal_bytes = fp.read() + fp.close() + fp = open(mfn, 'rb') + memmap_bytes = fp.read() + fp.close() + assert_equal_(normal_bytes, memmap_bytes) + + # Check that reading the file using memmap works. + ma = format.open_memmap(nfn, mode='r') + del ma + + +def test_compressed_roundtrip(): + arr = np.random.rand(200, 200) + npz_file = os.path.join(tempdir, 'compressed.npz') + np.savez_compressed(npz_file, arr=arr) + arr1 = np.load(npz_file)['arr'] + assert_array_equal(arr, arr1) + + +# aligned +dt1 = np.dtype('i1, i4, i1', align=True) +# non-aligned, explicit offsets +dt2 = np.dtype({'names': ['a', 'b'], 'formats': ['i4', 'i4'], + 'offsets': [1, 6]}) +# nested struct-in-struct +dt3 = np.dtype({'names': ['c', 'd'], 'formats': ['i4', dt2]}) +# field with '' name +dt4 = np.dtype({'names': ['a', '', 'b'], 'formats': ['i4']*3}) +# titles +dt5 = np.dtype({'names': ['a', 'b'], 'formats': ['i4', 'i4'], + 'offsets': [1, 6], 'titles': ['aa', 'bb']}) + +@pytest.mark.parametrize("dt", [dt1, dt2, dt3, dt4, dt5]) +def test_load_padded_dtype(dt): + arr = np.zeros(3, dt) + for i in range(3): + arr[i] = i + 5 + npz_file = os.path.join(tempdir, 'aligned.npz') + np.savez(npz_file, arr=arr) + arr1 = np.load(npz_file)['arr'] + assert_array_equal(arr, arr1) + + +def test_python2_python3_interoperability(): + if sys.version_info[0] >= 3: + fname = 'win64python2.npy' + else: + fname = 'python3.npy' + path = os.path.join(os.path.dirname(__file__), 'data', fname) + data = np.load(path) + assert_array_equal(data, np.ones(2)) + +def test_pickle_python2_python3(): + # Test that loading object arrays saved on Python 2 works both on + # Python 2 and Python 3 and vice versa + data_dir = os.path.join(os.path.dirname(__file__), 'data') + + if sys.version_info[0] >= 3: + xrange = range + else: + import __builtin__ + xrange = __builtin__.xrange + + expected = np.array([None, xrange, u'\u512a\u826f', + b'\xe4\xb8\x8d\xe8\x89\xaf'], + dtype=object) + + for fname in ['py2-objarr.npy', 'py2-objarr.npz', + 'py3-objarr.npy', 'py3-objarr.npz']: + path = os.path.join(data_dir, fname) + + for encoding in ['bytes', 'latin1']: + data_f = np.load(path, encoding=encoding) + if fname.endswith('.npz'): + data = data_f['x'] + data_f.close() + else: + data = data_f + + if sys.version_info[0] >= 3: + if encoding == 'latin1' and fname.startswith('py2'): + assert_(isinstance(data[3], str)) + assert_array_equal(data[:-1], expected[:-1]) + # mojibake occurs + assert_array_equal(data[-1].encode(encoding), expected[-1]) + else: + assert_(isinstance(data[3], bytes)) + assert_array_equal(data, expected) + else: + assert_array_equal(data, expected) + + if sys.version_info[0] >= 3: + if fname.startswith('py2'): + if fname.endswith('.npz'): + data = np.load(path) + assert_raises(UnicodeError, data.__getitem__, 'x') + data.close() + data = np.load(path, fix_imports=False, encoding='latin1') + assert_raises(ImportError, data.__getitem__, 'x') + data.close() + else: + assert_raises(UnicodeError, np.load, path) + assert_raises(ImportError, np.load, path, + encoding='latin1', fix_imports=False) + + +def test_pickle_disallow(): + data_dir = os.path.join(os.path.dirname(__file__), 'data') + + path = os.path.join(data_dir, 'py2-objarr.npy') + assert_raises(ValueError, np.load, path, + allow_pickle=False, encoding='latin1') + + path = os.path.join(data_dir, 'py2-objarr.npz') + f = np.load(path, allow_pickle=False, encoding='latin1') + assert_raises(ValueError, f.__getitem__, 'x') + + path = os.path.join(tempdir, 'pickle-disabled.npy') + assert_raises(ValueError, np.save, path, np.array([None], dtype=object), + allow_pickle=False) + + +def test_version_2_0(): + f = BytesIO() + # requires more than 2 byte for header + dt = [(("%d" % i) * 100, float) for i in range(500)] + d = np.ones(1000, dtype=dt) + + format.write_array(f, d, version=(2, 0)) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', UserWarning) + format.write_array(f, d) + assert_(w[0].category is UserWarning) + + # check alignment of data portion + f.seek(0) + header = f.readline() + assert_(len(header) % format.ARRAY_ALIGN == 0) + + f.seek(0) + n = format.read_array(f) + assert_array_equal(d, n) + + # 1.0 requested but data cannot be saved this way + assert_raises(ValueError, format.write_array, f, d, (1, 0)) + + +@pytest.mark.slow +def test_version_2_0_memmap(): + # requires more than 2 byte for header + dt = [(("%d" % i) * 100, float) for i in range(500)] + d = np.ones(1000, dtype=dt) + tf = tempfile.mktemp('', 'mmap', dir=tempdir) + + # 1.0 requested but data cannot be saved this way + assert_raises(ValueError, format.open_memmap, tf, mode='w+', dtype=d.dtype, + shape=d.shape, version=(1, 0)) + + ma = format.open_memmap(tf, mode='w+', dtype=d.dtype, + shape=d.shape, version=(2, 0)) + ma[...] = d + del ma + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', UserWarning) + ma = format.open_memmap(tf, mode='w+', dtype=d.dtype, + shape=d.shape, version=None) + assert_(w[0].category is UserWarning) + ma[...] = d + del ma + + ma = format.open_memmap(tf, mode='r') + assert_array_equal(ma, d) + + +def test_write_version(): + f = BytesIO() + arr = np.arange(1) + # These should pass. + format.write_array(f, arr, version=(1, 0)) + format.write_array(f, arr) + + format.write_array(f, arr, version=None) + format.write_array(f, arr) + + format.write_array(f, arr, version=(2, 0)) + format.write_array(f, arr) + + # These should all fail. + bad_versions = [ + (1, 1), + (0, 0), + (0, 1), + (2, 2), + (255, 255), + ] + for version in bad_versions: + with assert_raises_regex(ValueError, + 'we only support format version.*'): + format.write_array(f, arr, version=version) + + +bad_version_magic = [ + b'\x93NUMPY\x01\x01', + b'\x93NUMPY\x00\x00', + b'\x93NUMPY\x00\x01', + b'\x93NUMPY\x02\x00', + b'\x93NUMPY\x02\x02', + b'\x93NUMPY\xff\xff', +] +malformed_magic = [ + b'\x92NUMPY\x01\x00', + b'\x00NUMPY\x01\x00', + b'\x93numpy\x01\x00', + b'\x93MATLB\x01\x00', + b'\x93NUMPY\x01', + b'\x93NUMPY', + b'', +] + +def test_read_magic(): + s1 = BytesIO() + s2 = BytesIO() + + arr = np.ones((3, 6), dtype=float) + + format.write_array(s1, arr, version=(1, 0)) + format.write_array(s2, arr, version=(2, 0)) + + s1.seek(0) + s2.seek(0) + + version1 = format.read_magic(s1) + version2 = format.read_magic(s2) + + assert_(version1 == (1, 0)) + assert_(version2 == (2, 0)) + + assert_(s1.tell() == format.MAGIC_LEN) + assert_(s2.tell() == format.MAGIC_LEN) + +def test_read_magic_bad_magic(): + for magic in malformed_magic: + f = BytesIO(magic) + assert_raises(ValueError, format.read_array, f) + + +def test_read_version_1_0_bad_magic(): + for magic in bad_version_magic + malformed_magic: + f = BytesIO(magic) + assert_raises(ValueError, format.read_array, f) + + +def test_bad_magic_args(): + assert_raises(ValueError, format.magic, -1, 1) + assert_raises(ValueError, format.magic, 256, 1) + assert_raises(ValueError, format.magic, 1, -1) + assert_raises(ValueError, format.magic, 1, 256) + + +def test_large_header(): + s = BytesIO() + d = {'a': 1, 'b': 2} + format.write_array_header_1_0(s, d) + + s = BytesIO() + d = {'a': 1, 'b': 2, 'c': 'x'*256*256} + assert_raises(ValueError, format.write_array_header_1_0, s, d) + + +def test_read_array_header_1_0(): + s = BytesIO() + + arr = np.ones((3, 6), dtype=float) + format.write_array(s, arr, version=(1, 0)) + + s.seek(format.MAGIC_LEN) + shape, fortran, dtype = format.read_array_header_1_0(s) + + assert_(s.tell() % format.ARRAY_ALIGN == 0) + assert_((shape, fortran, dtype) == ((3, 6), False, float)) + + +def test_read_array_header_2_0(): + s = BytesIO() + + arr = np.ones((3, 6), dtype=float) + format.write_array(s, arr, version=(2, 0)) + + s.seek(format.MAGIC_LEN) + shape, fortran, dtype = format.read_array_header_2_0(s) + + assert_(s.tell() % format.ARRAY_ALIGN == 0) + assert_((shape, fortran, dtype) == ((3, 6), False, float)) + + +def test_bad_header(): + # header of length less than 2 should fail + s = BytesIO() + assert_raises(ValueError, format.read_array_header_1_0, s) + s = BytesIO(b'1') + assert_raises(ValueError, format.read_array_header_1_0, s) + + # header shorter than indicated size should fail + s = BytesIO(b'\x01\x00') + assert_raises(ValueError, format.read_array_header_1_0, s) + + # headers without the exact keys required should fail + d = {"shape": (1, 2), + "descr": "x"} + s = BytesIO() + format.write_array_header_1_0(s, d) + assert_raises(ValueError, format.read_array_header_1_0, s) + + d = {"shape": (1, 2), + "fortran_order": False, + "descr": "x", + "extrakey": -1} + s = BytesIO() + format.write_array_header_1_0(s, d) + assert_raises(ValueError, format.read_array_header_1_0, s) + + +def test_large_file_support(): + if (sys.platform == 'win32' or sys.platform == 'cygwin'): + pytest.skip("Unknown if Windows has sparse filesystems") + # try creating a large sparse file + tf_name = os.path.join(tempdir, 'sparse_file') + try: + # seek past end would work too, but linux truncate somewhat + # increases the chances that we have a sparse filesystem and can + # avoid actually writing 5GB + import subprocess as sp + sp.check_call(["truncate", "-s", "5368709120", tf_name]) + except Exception: + pytest.skip("Could not create 5GB large file") + # write a small array to the end + with open(tf_name, "wb") as f: + f.seek(5368709120) + d = np.arange(5) + np.save(f, d) + # read it back + with open(tf_name, "rb") as f: + f.seek(5368709120) + r = np.load(f) + assert_array_equal(r, d) + + +@pytest.mark.skipif(np.dtype(np.intp).itemsize < 8, + reason="test requires 64-bit system") +@pytest.mark.slow +def test_large_archive(): + # Regression test for product of saving arrays with dimensions of array + # having a product that doesn't fit in int32. See gh-7598 for details. + try: + a = np.empty((2**30, 2), dtype=np.uint8) + except MemoryError: + pytest.skip("Could not create large file") + + fname = os.path.join(tempdir, "large_archive") + + with open(fname, "wb") as f: + np.savez(f, arr=a) + + with open(fname, "rb") as f: + new_a = np.load(f)["arr"] + + assert_(a.shape == new_a.shape) + + +def test_empty_npz(): + # Test for gh-9989 + fname = os.path.join(tempdir, "nothing.npz") + np.savez(fname) + np.load(fname) diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_format.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_format.pyc new file mode 100644 index 0000000..25fda32 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_format.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_function_base.py b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_function_base.py new file mode 100644 index 0000000..3d4b0e3 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_function_base.py @@ -0,0 +1,3140 @@ +from __future__ import division, absolute_import, print_function + +import operator +import warnings +import sys +import decimal +import pytest + +import numpy as np +from numpy import ma +from numpy.testing import ( + assert_, assert_equal, assert_array_equal, assert_almost_equal, + assert_array_almost_equal, assert_raises, assert_allclose, + assert_warns, assert_raises_regex, suppress_warnings, HAS_REFCOUNT, + ) +import numpy.lib.function_base as nfb +from numpy.random import rand +from numpy.lib import ( + add_newdoc_ufunc, angle, average, bartlett, blackman, corrcoef, cov, + delete, diff, digitize, extract, flipud, gradient, hamming, hanning, + i0, insert, interp, kaiser, meshgrid, msort, piecewise, place, rot90, + select, setxor1d, sinc, trapz, trim_zeros, unwrap, unique, vectorize + ) + +from numpy.compat import long + + +def get_mat(n): + data = np.arange(n) + data = np.add.outer(data, data) + return data + + +class TestRot90(object): + def test_basic(self): + assert_raises(ValueError, rot90, np.ones(4)) + assert_raises(ValueError, rot90, np.ones((2,2,2)), axes=(0,1,2)) + assert_raises(ValueError, rot90, np.ones((2,2)), axes=(0,2)) + assert_raises(ValueError, rot90, np.ones((2,2)), axes=(1,1)) + assert_raises(ValueError, rot90, np.ones((2,2,2)), axes=(-2,1)) + + a = [[0, 1, 2], + [3, 4, 5]] + b1 = [[2, 5], + [1, 4], + [0, 3]] + b2 = [[5, 4, 3], + [2, 1, 0]] + b3 = [[3, 0], + [4, 1], + [5, 2]] + b4 = [[0, 1, 2], + [3, 4, 5]] + + for k in range(-3, 13, 4): + assert_equal(rot90(a, k=k), b1) + for k in range(-2, 13, 4): + assert_equal(rot90(a, k=k), b2) + for k in range(-1, 13, 4): + assert_equal(rot90(a, k=k), b3) + for k in range(0, 13, 4): + assert_equal(rot90(a, k=k), b4) + + assert_equal(rot90(rot90(a, axes=(0,1)), axes=(1,0)), a) + assert_equal(rot90(a, k=1, axes=(1,0)), rot90(a, k=-1, axes=(0,1))) + + def test_axes(self): + a = np.ones((50, 40, 3)) + assert_equal(rot90(a).shape, (40, 50, 3)) + assert_equal(rot90(a, axes=(0,2)), rot90(a, axes=(0,-1))) + assert_equal(rot90(a, axes=(1,2)), rot90(a, axes=(-2,-1))) + + def test_rotation_axes(self): + a = np.arange(8).reshape((2,2,2)) + + a_rot90_01 = [[[2, 3], + [6, 7]], + [[0, 1], + [4, 5]]] + a_rot90_12 = [[[1, 3], + [0, 2]], + [[5, 7], + [4, 6]]] + a_rot90_20 = [[[4, 0], + [6, 2]], + [[5, 1], + [7, 3]]] + a_rot90_10 = [[[4, 5], + [0, 1]], + [[6, 7], + [2, 3]]] + + assert_equal(rot90(a, axes=(0, 1)), a_rot90_01) + assert_equal(rot90(a, axes=(1, 0)), a_rot90_10) + assert_equal(rot90(a, axes=(1, 2)), a_rot90_12) + + for k in range(1,5): + assert_equal(rot90(a, k=k, axes=(2, 0)), + rot90(a_rot90_20, k=k-1, axes=(2, 0))) + + +class TestFlip(object): + + def test_axes(self): + assert_raises(np.AxisError, np.flip, np.ones(4), axis=1) + assert_raises(np.AxisError, np.flip, np.ones((4, 4)), axis=2) + assert_raises(np.AxisError, np.flip, np.ones((4, 4)), axis=-3) + assert_raises(np.AxisError, np.flip, np.ones((4, 4)), axis=(0, 3)) + + def test_basic_lr(self): + a = get_mat(4) + b = a[:, ::-1] + assert_equal(np.flip(a, 1), b) + a = [[0, 1, 2], + [3, 4, 5]] + b = [[2, 1, 0], + [5, 4, 3]] + assert_equal(np.flip(a, 1), b) + + def test_basic_ud(self): + a = get_mat(4) + b = a[::-1, :] + assert_equal(np.flip(a, 0), b) + a = [[0, 1, 2], + [3, 4, 5]] + b = [[3, 4, 5], + [0, 1, 2]] + assert_equal(np.flip(a, 0), b) + + def test_3d_swap_axis0(self): + a = np.array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + + b = np.array([[[4, 5], + [6, 7]], + [[0, 1], + [2, 3]]]) + + assert_equal(np.flip(a, 0), b) + + def test_3d_swap_axis1(self): + a = np.array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + + b = np.array([[[2, 3], + [0, 1]], + [[6, 7], + [4, 5]]]) + + assert_equal(np.flip(a, 1), b) + + def test_3d_swap_axis2(self): + a = np.array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + + b = np.array([[[1, 0], + [3, 2]], + [[5, 4], + [7, 6]]]) + + assert_equal(np.flip(a, 2), b) + + def test_4d(self): + a = np.arange(2 * 3 * 4 * 5).reshape(2, 3, 4, 5) + for i in range(a.ndim): + assert_equal(np.flip(a, i), + np.flipud(a.swapaxes(0, i)).swapaxes(i, 0)) + + def test_default_axis(self): + a = np.array([[1, 2, 3], + [4, 5, 6]]) + b = np.array([[6, 5, 4], + [3, 2, 1]]) + assert_equal(np.flip(a), b) + + def test_multiple_axes(self): + a = np.array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + + assert_equal(np.flip(a, axis=()), a) + + b = np.array([[[5, 4], + [7, 6]], + [[1, 0], + [3, 2]]]) + + assert_equal(np.flip(a, axis=(0, 2)), b) + + c = np.array([[[3, 2], + [1, 0]], + [[7, 6], + [5, 4]]]) + + assert_equal(np.flip(a, axis=(1, 2)), c) + + +class TestAny(object): + + def test_basic(self): + y1 = [0, 0, 1, 0] + y2 = [0, 0, 0, 0] + y3 = [1, 0, 1, 0] + assert_(np.any(y1)) + assert_(np.any(y3)) + assert_(not np.any(y2)) + + def test_nd(self): + y1 = [[0, 0, 0], [0, 1, 0], [1, 1, 0]] + assert_(np.any(y1)) + assert_array_equal(np.sometrue(y1, axis=0), [1, 1, 0]) + assert_array_equal(np.sometrue(y1, axis=1), [0, 1, 1]) + + +class TestAll(object): + + def test_basic(self): + y1 = [0, 1, 1, 0] + y2 = [0, 0, 0, 0] + y3 = [1, 1, 1, 1] + assert_(not np.all(y1)) + assert_(np.all(y3)) + assert_(not np.all(y2)) + assert_(np.all(~np.array(y2))) + + def test_nd(self): + y1 = [[0, 0, 1], [0, 1, 1], [1, 1, 1]] + assert_(not np.all(y1)) + assert_array_equal(np.alltrue(y1, axis=0), [0, 0, 1]) + assert_array_equal(np.alltrue(y1, axis=1), [0, 0, 1]) + + +class TestCopy(object): + + def test_basic(self): + a = np.array([[1, 2], [3, 4]]) + a_copy = np.copy(a) + assert_array_equal(a, a_copy) + a_copy[0, 0] = 10 + assert_equal(a[0, 0], 1) + assert_equal(a_copy[0, 0], 10) + + def test_order(self): + # It turns out that people rely on np.copy() preserving order by + # default; changing this broke scikit-learn: + # github.com/scikit-learn/scikit-learn/commit/7842748cf777412c506a8c0ed28090711d3a3783 # noqa + a = np.array([[1, 2], [3, 4]]) + assert_(a.flags.c_contiguous) + assert_(not a.flags.f_contiguous) + a_fort = np.array([[1, 2], [3, 4]], order="F") + assert_(not a_fort.flags.c_contiguous) + assert_(a_fort.flags.f_contiguous) + a_copy = np.copy(a) + assert_(a_copy.flags.c_contiguous) + assert_(not a_copy.flags.f_contiguous) + a_fort_copy = np.copy(a_fort) + assert_(not a_fort_copy.flags.c_contiguous) + assert_(a_fort_copy.flags.f_contiguous) + + +class TestAverage(object): + + def test_basic(self): + y1 = np.array([1, 2, 3]) + assert_(average(y1, axis=0) == 2.) + y2 = np.array([1., 2., 3.]) + assert_(average(y2, axis=0) == 2.) + y3 = [0., 0., 0.] + assert_(average(y3, axis=0) == 0.) + + y4 = np.ones((4, 4)) + y4[0, 1] = 0 + y4[1, 0] = 2 + assert_almost_equal(y4.mean(0), average(y4, 0)) + assert_almost_equal(y4.mean(1), average(y4, 1)) + + y5 = rand(5, 5) + assert_almost_equal(y5.mean(0), average(y5, 0)) + assert_almost_equal(y5.mean(1), average(y5, 1)) + + def test_weights(self): + y = np.arange(10) + w = np.arange(10) + actual = average(y, weights=w) + desired = (np.arange(10) ** 2).sum() * 1. / np.arange(10).sum() + assert_almost_equal(actual, desired) + + y1 = np.array([[1, 2, 3], [4, 5, 6]]) + w0 = [1, 2] + actual = average(y1, weights=w0, axis=0) + desired = np.array([3., 4., 5.]) + assert_almost_equal(actual, desired) + + w1 = [0, 0, 1] + actual = average(y1, weights=w1, axis=1) + desired = np.array([3., 6.]) + assert_almost_equal(actual, desired) + + # This should raise an error. Can we test for that ? + # assert_equal(average(y1, weights=w1), 9./2.) + + # 2D Case + w2 = [[0, 0, 1], [0, 0, 2]] + desired = np.array([3., 6.]) + assert_array_equal(average(y1, weights=w2, axis=1), desired) + assert_equal(average(y1, weights=w2), 5.) + + y3 = rand(5).astype(np.float32) + w3 = rand(5).astype(np.float64) + + assert_(np.average(y3, weights=w3).dtype == np.result_type(y3, w3)) + + def test_returned(self): + y = np.array([[1, 2, 3], [4, 5, 6]]) + + # No weights + avg, scl = average(y, returned=True) + assert_equal(scl, 6.) + + avg, scl = average(y, 0, returned=True) + assert_array_equal(scl, np.array([2., 2., 2.])) + + avg, scl = average(y, 1, returned=True) + assert_array_equal(scl, np.array([3., 3.])) + + # With weights + w0 = [1, 2] + avg, scl = average(y, weights=w0, axis=0, returned=True) + assert_array_equal(scl, np.array([3., 3., 3.])) + + w1 = [1, 2, 3] + avg, scl = average(y, weights=w1, axis=1, returned=True) + assert_array_equal(scl, np.array([6., 6.])) + + w2 = [[0, 0, 1], [1, 2, 3]] + avg, scl = average(y, weights=w2, axis=1, returned=True) + assert_array_equal(scl, np.array([1., 6.])) + + def test_subclasses(self): + class subclass(np.ndarray): + pass + a = np.array([[1,2],[3,4]]).view(subclass) + w = np.array([[1,2],[3,4]]).view(subclass) + + assert_equal(type(np.average(a)), subclass) + assert_equal(type(np.average(a, weights=w)), subclass) + + def test_upcasting(self): + types = [('i4', 'i4', 'f8'), ('i4', 'f4', 'f8'), ('f4', 'i4', 'f8'), + ('f4', 'f4', 'f4'), ('f4', 'f8', 'f8')] + for at, wt, rt in types: + a = np.array([[1,2],[3,4]], dtype=at) + w = np.array([[1,2],[3,4]], dtype=wt) + assert_equal(np.average(a, weights=w).dtype, np.dtype(rt)) + + def test_object_dtype(self): + a = np.array([decimal.Decimal(x) for x in range(10)]) + w = np.array([decimal.Decimal(1) for _ in range(10)]) + w /= w.sum() + assert_almost_equal(a.mean(0), average(a, weights=w)) + +class TestSelect(object): + choices = [np.array([1, 2, 3]), + np.array([4, 5, 6]), + np.array([7, 8, 9])] + conditions = [np.array([False, False, False]), + np.array([False, True, False]), + np.array([False, False, True])] + + def _select(self, cond, values, default=0): + output = [] + for m in range(len(cond)): + output += [V[m] for V, C in zip(values, cond) if C[m]] or [default] + return output + + def test_basic(self): + choices = self.choices + conditions = self.conditions + assert_array_equal(select(conditions, choices, default=15), + self._select(conditions, choices, default=15)) + + assert_equal(len(choices), 3) + assert_equal(len(conditions), 3) + + def test_broadcasting(self): + conditions = [np.array(True), np.array([False, True, False])] + choices = [1, np.arange(12).reshape(4, 3)] + assert_array_equal(select(conditions, choices), np.ones((4, 3))) + # default can broadcast too: + assert_equal(select([True], [0], default=[0]).shape, (1,)) + + def test_return_dtype(self): + assert_equal(select(self.conditions, self.choices, 1j).dtype, + np.complex_) + # But the conditions need to be stronger then the scalar default + # if it is scalar. + choices = [choice.astype(np.int8) for choice in self.choices] + assert_equal(select(self.conditions, choices).dtype, np.int8) + + d = np.array([1, 2, 3, np.nan, 5, 7]) + m = np.isnan(d) + assert_equal(select([m], [d]), [0, 0, 0, np.nan, 0, 0]) + + def test_deprecated_empty(self): + with warnings.catch_warnings(record=True): + warnings.simplefilter("always") + assert_equal(select([], [], 3j), 3j) + + with warnings.catch_warnings(): + warnings.simplefilter("always") + assert_warns(DeprecationWarning, select, [], []) + warnings.simplefilter("error") + assert_raises(DeprecationWarning, select, [], []) + + def test_non_bool_deprecation(self): + choices = self.choices + conditions = self.conditions[:] + with warnings.catch_warnings(): + warnings.filterwarnings("always") + conditions[0] = conditions[0].astype(np.int_) + assert_warns(DeprecationWarning, select, conditions, choices) + conditions[0] = conditions[0].astype(np.uint8) + assert_warns(DeprecationWarning, select, conditions, choices) + warnings.filterwarnings("error") + assert_raises(DeprecationWarning, select, conditions, choices) + + def test_many_arguments(self): + # This used to be limited by NPY_MAXARGS == 32 + conditions = [np.array([False])] * 100 + choices = [np.array([1])] * 100 + select(conditions, choices) + + +class TestInsert(object): + + def test_basic(self): + a = [1, 2, 3] + assert_equal(insert(a, 0, 1), [1, 1, 2, 3]) + assert_equal(insert(a, 3, 1), [1, 2, 3, 1]) + assert_equal(insert(a, [1, 1, 1], [1, 2, 3]), [1, 1, 2, 3, 2, 3]) + assert_equal(insert(a, 1, [1, 2, 3]), [1, 1, 2, 3, 2, 3]) + assert_equal(insert(a, [1, -1, 3], 9), [1, 9, 2, 9, 3, 9]) + assert_equal(insert(a, slice(-1, None, -1), 9), [9, 1, 9, 2, 9, 3]) + assert_equal(insert(a, [-1, 1, 3], [7, 8, 9]), [1, 8, 2, 7, 3, 9]) + b = np.array([0, 1], dtype=np.float64) + assert_equal(insert(b, 0, b[0]), [0., 0., 1.]) + assert_equal(insert(b, [], []), b) + # Bools will be treated differently in the future: + # assert_equal(insert(a, np.array([True]*4), 9), [9, 1, 9, 2, 9, 3, 9]) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', FutureWarning) + assert_equal( + insert(a, np.array([True] * 4), 9), [1, 9, 9, 9, 9, 2, 3]) + assert_(w[0].category is FutureWarning) + + def test_multidim(self): + a = [[1, 1, 1]] + r = [[2, 2, 2], + [1, 1, 1]] + assert_equal(insert(a, 0, [1]), [1, 1, 1, 1]) + assert_equal(insert(a, 0, [2, 2, 2], axis=0), r) + assert_equal(insert(a, 0, 2, axis=0), r) + assert_equal(insert(a, 2, 2, axis=1), [[1, 1, 2, 1]]) + + a = np.array([[1, 1], [2, 2], [3, 3]]) + b = np.arange(1, 4).repeat(3).reshape(3, 3) + c = np.concatenate( + (a[:, 0:1], np.arange(1, 4).repeat(3).reshape(3, 3).T, + a[:, 1:2]), axis=1) + assert_equal(insert(a, [1], [[1], [2], [3]], axis=1), b) + assert_equal(insert(a, [1], [1, 2, 3], axis=1), c) + # scalars behave differently, in this case exactly opposite: + assert_equal(insert(a, 1, [1, 2, 3], axis=1), b) + assert_equal(insert(a, 1, [[1], [2], [3]], axis=1), c) + + a = np.arange(4).reshape(2, 2) + assert_equal(insert(a[:, :1], 1, a[:, 1], axis=1), a) + assert_equal(insert(a[:1,:], 1, a[1,:], axis=0), a) + + # negative axis value + a = np.arange(24).reshape((2, 3, 4)) + assert_equal(insert(a, 1, a[:,:, 3], axis=-1), + insert(a, 1, a[:,:, 3], axis=2)) + assert_equal(insert(a, 1, a[:, 2,:], axis=-2), + insert(a, 1, a[:, 2,:], axis=1)) + + # invalid axis value + assert_raises(np.AxisError, insert, a, 1, a[:, 2, :], axis=3) + assert_raises(np.AxisError, insert, a, 1, a[:, 2, :], axis=-4) + + # negative axis value + a = np.arange(24).reshape((2, 3, 4)) + assert_equal(insert(a, 1, a[:, :, 3], axis=-1), + insert(a, 1, a[:, :, 3], axis=2)) + assert_equal(insert(a, 1, a[:, 2, :], axis=-2), + insert(a, 1, a[:, 2, :], axis=1)) + + def test_0d(self): + # This is an error in the future + a = np.array(1) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', DeprecationWarning) + assert_equal(insert(a, [], 2, axis=0), np.array(2)) + assert_(w[0].category is DeprecationWarning) + + def test_subclass(self): + class SubClass(np.ndarray): + pass + a = np.arange(10).view(SubClass) + assert_(isinstance(np.insert(a, 0, [0]), SubClass)) + assert_(isinstance(np.insert(a, [], []), SubClass)) + assert_(isinstance(np.insert(a, [0, 1], [1, 2]), SubClass)) + assert_(isinstance(np.insert(a, slice(1, 2), [1, 2]), SubClass)) + assert_(isinstance(np.insert(a, slice(1, -2, -1), []), SubClass)) + # This is an error in the future: + a = np.array(1).view(SubClass) + assert_(isinstance(np.insert(a, 0, [0]), SubClass)) + + def test_index_array_copied(self): + x = np.array([1, 1, 1]) + np.insert([0, 1, 2], x, [3, 4, 5]) + assert_equal(x, np.array([1, 1, 1])) + + def test_structured_array(self): + a = np.array([(1, 'a'), (2, 'b'), (3, 'c')], + dtype=[('foo', 'i'), ('bar', 'a1')]) + val = (4, 'd') + b = np.insert(a, 0, val) + assert_array_equal(b[0], np.array(val, dtype=b.dtype)) + val = [(4, 'd')] * 2 + b = np.insert(a, [0, 2], val) + assert_array_equal(b[[0, 3]], np.array(val, dtype=b.dtype)) + + +class TestAmax(object): + + def test_basic(self): + a = [3, 4, 5, 10, -3, -5, 6.0] + assert_equal(np.amax(a), 10.0) + b = [[3, 6.0, 9.0], + [4, 10.0, 5.0], + [8, 3.0, 2.0]] + assert_equal(np.amax(b, axis=0), [8.0, 10.0, 9.0]) + assert_equal(np.amax(b, axis=1), [9.0, 10.0, 8.0]) + + +class TestAmin(object): + + def test_basic(self): + a = [3, 4, 5, 10, -3, -5, 6.0] + assert_equal(np.amin(a), -5.0) + b = [[3, 6.0, 9.0], + [4, 10.0, 5.0], + [8, 3.0, 2.0]] + assert_equal(np.amin(b, axis=0), [3.0, 3.0, 2.0]) + assert_equal(np.amin(b, axis=1), [3.0, 4.0, 2.0]) + + +class TestPtp(object): + + def test_basic(self): + a = np.array([3, 4, 5, 10, -3, -5, 6.0]) + assert_equal(a.ptp(axis=0), 15.0) + b = np.array([[3, 6.0, 9.0], + [4, 10.0, 5.0], + [8, 3.0, 2.0]]) + assert_equal(b.ptp(axis=0), [5.0, 7.0, 7.0]) + assert_equal(b.ptp(axis=-1), [6.0, 6.0, 6.0]) + + assert_equal(b.ptp(axis=0, keepdims=True), [[5.0, 7.0, 7.0]]) + assert_equal(b.ptp(axis=(0,1), keepdims=True), [[8.0]]) + + +class TestCumsum(object): + + def test_basic(self): + ba = [1, 2, 10, 11, 6, 5, 4] + ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]] + for ctype in [np.int8, np.uint8, np.int16, np.uint16, np.int32, + np.uint32, np.float32, np.float64, np.complex64, + np.complex128]: + a = np.array(ba, ctype) + a2 = np.array(ba2, ctype) + + tgt = np.array([1, 3, 13, 24, 30, 35, 39], ctype) + assert_array_equal(np.cumsum(a, axis=0), tgt) + + tgt = np.array( + [[1, 2, 3, 4], [6, 8, 10, 13], [16, 11, 14, 18]], ctype) + assert_array_equal(np.cumsum(a2, axis=0), tgt) + + tgt = np.array( + [[1, 3, 6, 10], [5, 11, 18, 27], [10, 13, 17, 22]], ctype) + assert_array_equal(np.cumsum(a2, axis=1), tgt) + + +class TestProd(object): + + def test_basic(self): + ba = [1, 2, 10, 11, 6, 5, 4] + ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]] + for ctype in [np.int16, np.uint16, np.int32, np.uint32, + np.float32, np.float64, np.complex64, np.complex128]: + a = np.array(ba, ctype) + a2 = np.array(ba2, ctype) + if ctype in ['1', 'b']: + assert_raises(ArithmeticError, np.prod, a) + assert_raises(ArithmeticError, np.prod, a2, 1) + else: + assert_equal(a.prod(axis=0), 26400) + assert_array_equal(a2.prod(axis=0), + np.array([50, 36, 84, 180], ctype)) + assert_array_equal(a2.prod(axis=-1), + np.array([24, 1890, 600], ctype)) + + +class TestCumprod(object): + + def test_basic(self): + ba = [1, 2, 10, 11, 6, 5, 4] + ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]] + for ctype in [np.int16, np.uint16, np.int32, np.uint32, + np.float32, np.float64, np.complex64, np.complex128]: + a = np.array(ba, ctype) + a2 = np.array(ba2, ctype) + if ctype in ['1', 'b']: + assert_raises(ArithmeticError, np.cumprod, a) + assert_raises(ArithmeticError, np.cumprod, a2, 1) + assert_raises(ArithmeticError, np.cumprod, a) + else: + assert_array_equal(np.cumprod(a, axis=-1), + np.array([1, 2, 20, 220, + 1320, 6600, 26400], ctype)) + assert_array_equal(np.cumprod(a2, axis=0), + np.array([[1, 2, 3, 4], + [5, 12, 21, 36], + [50, 36, 84, 180]], ctype)) + assert_array_equal(np.cumprod(a2, axis=-1), + np.array([[1, 2, 6, 24], + [5, 30, 210, 1890], + [10, 30, 120, 600]], ctype)) + + +class TestDiff(object): + + def test_basic(self): + x = [1, 4, 6, 7, 12] + out = np.array([3, 2, 1, 5]) + out2 = np.array([-1, -1, 4]) + out3 = np.array([0, 5]) + assert_array_equal(diff(x), out) + assert_array_equal(diff(x, n=2), out2) + assert_array_equal(diff(x, n=3), out3) + + x = [1.1, 2.2, 3.0, -0.2, -0.1] + out = np.array([1.1, 0.8, -3.2, 0.1]) + assert_almost_equal(diff(x), out) + + x = [True, True, False, False] + out = np.array([False, True, False]) + out2 = np.array([True, True]) + assert_array_equal(diff(x), out) + assert_array_equal(diff(x, n=2), out2) + + def test_axis(self): + x = np.zeros((10, 20, 30)) + x[:, 1::2, :] = 1 + exp = np.ones((10, 19, 30)) + exp[:, 1::2, :] = -1 + assert_array_equal(diff(x), np.zeros((10, 20, 29))) + assert_array_equal(diff(x, axis=-1), np.zeros((10, 20, 29))) + assert_array_equal(diff(x, axis=0), np.zeros((9, 20, 30))) + assert_array_equal(diff(x, axis=1), exp) + assert_array_equal(diff(x, axis=-2), exp) + assert_raises(np.AxisError, diff, x, axis=3) + assert_raises(np.AxisError, diff, x, axis=-4) + + def test_nd(self): + x = 20 * rand(10, 20, 30) + out1 = x[:, :, 1:] - x[:, :, :-1] + out2 = out1[:, :, 1:] - out1[:, :, :-1] + out3 = x[1:, :, :] - x[:-1, :, :] + out4 = out3[1:, :, :] - out3[:-1, :, :] + assert_array_equal(diff(x), out1) + assert_array_equal(diff(x, n=2), out2) + assert_array_equal(diff(x, axis=0), out3) + assert_array_equal(diff(x, n=2, axis=0), out4) + + def test_n(self): + x = list(range(3)) + assert_raises(ValueError, diff, x, n=-1) + output = [diff(x, n=n) for n in range(1, 5)] + expected = [[1, 1], [0], [], []] + assert_(diff(x, n=0) is x) + for n, (expected, out) in enumerate(zip(expected, output), start=1): + assert_(type(out) is np.ndarray) + assert_array_equal(out, expected) + assert_equal(out.dtype, np.int_) + assert_equal(len(out), max(0, len(x) - n)) + + def test_times(self): + x = np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64) + expected = [ + np.array([1, 1], dtype='timedelta64[D]'), + np.array([0], dtype='timedelta64[D]'), + ] + expected.extend([np.array([], dtype='timedelta64[D]')] * 3) + for n, exp in enumerate(expected, start=1): + out = diff(x, n=n) + assert_array_equal(out, exp) + assert_equal(out.dtype, exp.dtype) + + def test_subclass(self): + x = ma.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]], + mask=[[False, False], [True, False], + [False, True], [True, True], [False, False]]) + out = diff(x) + assert_array_equal(out.data, [[1], [1], [1], [1], [1]]) + assert_array_equal(out.mask, [[False], [True], + [True], [True], [False]]) + assert_(type(out) is type(x)) + + out3 = diff(x, n=3) + assert_array_equal(out3.data, [[], [], [], [], []]) + assert_array_equal(out3.mask, [[], [], [], [], []]) + assert_(type(out3) is type(x)) + + def test_prepend(self): + x = np.arange(5) + 1 + assert_array_equal(diff(x, prepend=0), np.ones(5)) + assert_array_equal(diff(x, prepend=[0]), np.ones(5)) + assert_array_equal(np.cumsum(np.diff(x, prepend=0)), x) + assert_array_equal(diff(x, prepend=[-1, 0]), np.ones(6)) + + x = np.arange(4).reshape(2, 2) + result = np.diff(x, axis=1, prepend=0) + expected = [[0, 1], [2, 1]] + assert_array_equal(result, expected) + result = np.diff(x, axis=1, prepend=[[0], [0]]) + assert_array_equal(result, expected) + + result = np.diff(x, axis=0, prepend=0) + expected = [[0, 1], [2, 2]] + assert_array_equal(result, expected) + result = np.diff(x, axis=0, prepend=[[0, 0]]) + assert_array_equal(result, expected) + + assert_raises(ValueError, np.diff, x, prepend=np.zeros((3,3))) + + assert_raises(np.AxisError, diff, x, prepend=0, axis=3) + + def test_append(self): + x = np.arange(5) + result = diff(x, append=0) + expected = [1, 1, 1, 1, -4] + assert_array_equal(result, expected) + result = diff(x, append=[0]) + assert_array_equal(result, expected) + result = diff(x, append=[0, 2]) + expected = expected + [2] + assert_array_equal(result, expected) + + x = np.arange(4).reshape(2, 2) + result = np.diff(x, axis=1, append=0) + expected = [[1, -1], [1, -3]] + assert_array_equal(result, expected) + result = np.diff(x, axis=1, append=[[0], [0]]) + assert_array_equal(result, expected) + + result = np.diff(x, axis=0, append=0) + expected = [[2, 2], [-2, -3]] + assert_array_equal(result, expected) + result = np.diff(x, axis=0, append=[[0, 0]]) + assert_array_equal(result, expected) + + assert_raises(ValueError, np.diff, x, append=np.zeros((3,3))) + + assert_raises(np.AxisError, diff, x, append=0, axis=3) + + +class TestDelete(object): + + def setup(self): + self.a = np.arange(5) + self.nd_a = np.arange(5).repeat(2).reshape(1, 5, 2) + + def _check_inverse_of_slicing(self, indices): + a_del = delete(self.a, indices) + nd_a_del = delete(self.nd_a, indices, axis=1) + msg = 'Delete failed for obj: %r' % indices + # NOTE: The cast should be removed after warning phase for bools + if not isinstance(indices, (slice, int, long, np.integer)): + indices = np.asarray(indices, dtype=np.intp) + indices = indices[(indices >= 0) & (indices < 5)] + assert_array_equal(setxor1d(a_del, self.a[indices, ]), self.a, + err_msg=msg) + xor = setxor1d(nd_a_del[0,:, 0], self.nd_a[0, indices, 0]) + assert_array_equal(xor, self.nd_a[0,:, 0], err_msg=msg) + + def test_slices(self): + lims = [-6, -2, 0, 1, 2, 4, 5] + steps = [-3, -1, 1, 3] + for start in lims: + for stop in lims: + for step in steps: + s = slice(start, stop, step) + self._check_inverse_of_slicing(s) + + def test_fancy(self): + # Deprecation/FutureWarning tests should be kept after change. + self._check_inverse_of_slicing(np.array([[0, 1], [2, 1]])) + with warnings.catch_warnings(): + warnings.filterwarnings('error', category=DeprecationWarning) + assert_raises(DeprecationWarning, delete, self.a, [100]) + assert_raises(DeprecationWarning, delete, self.a, [-100]) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', category=FutureWarning) + self._check_inverse_of_slicing([0, -1, 2, 2]) + obj = np.array([True, False, False], dtype=bool) + self._check_inverse_of_slicing(obj) + assert_(w[0].category is FutureWarning) + assert_(w[1].category is FutureWarning) + + def test_single(self): + self._check_inverse_of_slicing(0) + self._check_inverse_of_slicing(-4) + + def test_0d(self): + a = np.array(1) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', DeprecationWarning) + assert_equal(delete(a, [], axis=0), a) + assert_(w[0].category is DeprecationWarning) + + def test_subclass(self): + class SubClass(np.ndarray): + pass + a = self.a.view(SubClass) + assert_(isinstance(delete(a, 0), SubClass)) + assert_(isinstance(delete(a, []), SubClass)) + assert_(isinstance(delete(a, [0, 1]), SubClass)) + assert_(isinstance(delete(a, slice(1, 2)), SubClass)) + assert_(isinstance(delete(a, slice(1, -2)), SubClass)) + + def test_array_order_preserve(self): + # See gh-7113 + k = np.arange(10).reshape(2, 5, order='F') + m = delete(k, slice(60, None), axis=1) + + # 'k' is Fortran ordered, and 'm' should have the + # same ordering as 'k' and NOT become C ordered + assert_equal(m.flags.c_contiguous, k.flags.c_contiguous) + assert_equal(m.flags.f_contiguous, k.flags.f_contiguous) + + +class TestGradient(object): + + def test_basic(self): + v = [[1, 1], [3, 4]] + x = np.array(v) + dx = [np.array([[2., 3.], [2., 3.]]), + np.array([[0., 0.], [1., 1.]])] + assert_array_equal(gradient(x), dx) + assert_array_equal(gradient(v), dx) + + def test_args(self): + dx = np.cumsum(np.ones(5)) + dx_uneven = [1., 2., 5., 9., 11.] + f_2d = np.arange(25).reshape(5, 5) + + # distances must be scalars or have size equal to gradient[axis] + gradient(np.arange(5), 3.) + gradient(np.arange(5), np.array(3.)) + gradient(np.arange(5), dx) + # dy is set equal to dx because scalar + gradient(f_2d, 1.5) + gradient(f_2d, np.array(1.5)) + + gradient(f_2d, dx_uneven, dx_uneven) + # mix between even and uneven spaces and + # mix between scalar and vector + gradient(f_2d, dx, 2) + + # 2D but axis specified + gradient(f_2d, dx, axis=1) + + # 2d coordinate arguments are not yet allowed + assert_raises_regex(ValueError, '.*scalars or 1d', + gradient, f_2d, np.stack([dx]*2, axis=-1), 1) + + def test_badargs(self): + f_2d = np.arange(25).reshape(5, 5) + x = np.cumsum(np.ones(5)) + + # wrong sizes + assert_raises(ValueError, gradient, f_2d, x, np.ones(2)) + assert_raises(ValueError, gradient, f_2d, 1, np.ones(2)) + assert_raises(ValueError, gradient, f_2d, np.ones(2), np.ones(2)) + # wrong number of arguments + assert_raises(TypeError, gradient, f_2d, x) + assert_raises(TypeError, gradient, f_2d, x, axis=(0,1)) + assert_raises(TypeError, gradient, f_2d, x, x, x) + assert_raises(TypeError, gradient, f_2d, 1, 1, 1) + assert_raises(TypeError, gradient, f_2d, x, x, axis=1) + assert_raises(TypeError, gradient, f_2d, 1, 1, axis=1) + + def test_datetime64(self): + # Make sure gradient() can handle special types like datetime64 + x = np.array( + ['1910-08-16', '1910-08-11', '1910-08-10', '1910-08-12', + '1910-10-12', '1910-12-12', '1912-12-12'], + dtype='datetime64[D]') + dx = np.array( + [-5, -3, 0, 31, 61, 396, 731], + dtype='timedelta64[D]') + assert_array_equal(gradient(x), dx) + assert_(dx.dtype == np.dtype('timedelta64[D]')) + + def test_masked(self): + # Make sure that gradient supports subclasses like masked arrays + x = np.ma.array([[1, 1], [3, 4]], + mask=[[False, False], [False, False]]) + out = gradient(x)[0] + assert_equal(type(out), type(x)) + # And make sure that the output and input don't have aliased mask + # arrays + assert_(x.mask is not out.mask) + # Also check that edge_order=2 doesn't alter the original mask + x2 = np.ma.arange(5) + x2[2] = np.ma.masked + np.gradient(x2, edge_order=2) + assert_array_equal(x2.mask, [False, False, True, False, False]) + + def test_second_order_accurate(self): + # Testing that the relative numerical error is less that 3% for + # this example problem. This corresponds to second order + # accurate finite differences for all interior and boundary + # points. + x = np.linspace(0, 1, 10) + dx = x[1] - x[0] + y = 2 * x ** 3 + 4 * x ** 2 + 2 * x + analytical = 6 * x ** 2 + 8 * x + 2 + num_error = np.abs((np.gradient(y, dx, edge_order=2) / analytical) - 1) + assert_(np.all(num_error < 0.03) == True) + + # test with unevenly spaced + np.random.seed(0) + x = np.sort(np.random.random(10)) + y = 2 * x ** 3 + 4 * x ** 2 + 2 * x + analytical = 6 * x ** 2 + 8 * x + 2 + num_error = np.abs((np.gradient(y, x, edge_order=2) / analytical) - 1) + assert_(np.all(num_error < 0.03) == True) + + def test_spacing(self): + f = np.array([0, 2., 3., 4., 5., 5.]) + f = np.tile(f, (6,1)) + f.reshape(-1, 1) + x_uneven = np.array([0., 0.5, 1., 3., 5., 7.]) + x_even = np.arange(6.) + + fdx_even_ord1 = np.tile([2., 1.5, 1., 1., 0.5, 0.], (6,1)) + fdx_even_ord2 = np.tile([2.5, 1.5, 1., 1., 0.5, -0.5], (6,1)) + fdx_uneven_ord1 = np.tile([4., 3., 1.7, 0.5, 0.25, 0.], (6,1)) + fdx_uneven_ord2 = np.tile([5., 3., 1.7, 0.5, 0.25, -0.25], (6,1)) + + # evenly spaced + for edge_order, exp_res in [(1, fdx_even_ord1), (2, fdx_even_ord2)]: + res1 = gradient(f, 1., axis=(0,1), edge_order=edge_order) + res2 = gradient(f, x_even, x_even, + axis=(0,1), edge_order=edge_order) + res3 = gradient(f, x_even, x_even, + axis=None, edge_order=edge_order) + assert_array_equal(res1, res2) + assert_array_equal(res2, res3) + assert_almost_equal(res1[0], exp_res.T) + assert_almost_equal(res1[1], exp_res) + + res1 = gradient(f, 1., axis=0, edge_order=edge_order) + res2 = gradient(f, x_even, axis=0, edge_order=edge_order) + assert_(res1.shape == res2.shape) + assert_almost_equal(res2, exp_res.T) + + res1 = gradient(f, 1., axis=1, edge_order=edge_order) + res2 = gradient(f, x_even, axis=1, edge_order=edge_order) + assert_(res1.shape == res2.shape) + assert_array_equal(res2, exp_res) + + # unevenly spaced + for edge_order, exp_res in [(1, fdx_uneven_ord1), (2, fdx_uneven_ord2)]: + res1 = gradient(f, x_uneven, x_uneven, + axis=(0,1), edge_order=edge_order) + res2 = gradient(f, x_uneven, x_uneven, + axis=None, edge_order=edge_order) + assert_array_equal(res1, res2) + assert_almost_equal(res1[0], exp_res.T) + assert_almost_equal(res1[1], exp_res) + + res1 = gradient(f, x_uneven, axis=0, edge_order=edge_order) + assert_almost_equal(res1, exp_res.T) + + res1 = gradient(f, x_uneven, axis=1, edge_order=edge_order) + assert_almost_equal(res1, exp_res) + + # mixed + res1 = gradient(f, x_even, x_uneven, axis=(0,1), edge_order=1) + res2 = gradient(f, x_uneven, x_even, axis=(1,0), edge_order=1) + assert_array_equal(res1[0], res2[1]) + assert_array_equal(res1[1], res2[0]) + assert_almost_equal(res1[0], fdx_even_ord1.T) + assert_almost_equal(res1[1], fdx_uneven_ord1) + + res1 = gradient(f, x_even, x_uneven, axis=(0,1), edge_order=2) + res2 = gradient(f, x_uneven, x_even, axis=(1,0), edge_order=2) + assert_array_equal(res1[0], res2[1]) + assert_array_equal(res1[1], res2[0]) + assert_almost_equal(res1[0], fdx_even_ord2.T) + assert_almost_equal(res1[1], fdx_uneven_ord2) + + def test_specific_axes(self): + # Testing that gradient can work on a given axis only + v = [[1, 1], [3, 4]] + x = np.array(v) + dx = [np.array([[2., 3.], [2., 3.]]), + np.array([[0., 0.], [1., 1.]])] + assert_array_equal(gradient(x, axis=0), dx[0]) + assert_array_equal(gradient(x, axis=1), dx[1]) + assert_array_equal(gradient(x, axis=-1), dx[1]) + assert_array_equal(gradient(x, axis=(1, 0)), [dx[1], dx[0]]) + + # test axis=None which means all axes + assert_almost_equal(gradient(x, axis=None), [dx[0], dx[1]]) + # and is the same as no axis keyword given + assert_almost_equal(gradient(x, axis=None), gradient(x)) + + # test vararg order + assert_array_equal(gradient(x, 2, 3, axis=(1, 0)), + [dx[1]/2.0, dx[0]/3.0]) + # test maximal number of varargs + assert_raises(TypeError, gradient, x, 1, 2, axis=1) + + assert_raises(np.AxisError, gradient, x, axis=3) + assert_raises(np.AxisError, gradient, x, axis=-3) + # assert_raises(TypeError, gradient, x, axis=[1,]) + + def test_timedelta64(self): + # Make sure gradient() can handle special types like timedelta64 + x = np.array( + [-5, -3, 10, 12, 61, 321, 300], + dtype='timedelta64[D]') + dx = np.array( + [2, 7, 7, 25, 154, 119, -21], + dtype='timedelta64[D]') + assert_array_equal(gradient(x), dx) + assert_(dx.dtype == np.dtype('timedelta64[D]')) + + def test_inexact_dtypes(self): + for dt in [np.float16, np.float32, np.float64]: + # dtypes should not be promoted in a different way to what diff does + x = np.array([1, 2, 3], dtype=dt) + assert_equal(gradient(x).dtype, np.diff(x).dtype) + + def test_values(self): + # needs at least 2 points for edge_order ==1 + gradient(np.arange(2), edge_order=1) + # needs at least 3 points for edge_order ==1 + gradient(np.arange(3), edge_order=2) + + assert_raises(ValueError, gradient, np.arange(0), edge_order=1) + assert_raises(ValueError, gradient, np.arange(0), edge_order=2) + assert_raises(ValueError, gradient, np.arange(1), edge_order=1) + assert_raises(ValueError, gradient, np.arange(1), edge_order=2) + assert_raises(ValueError, gradient, np.arange(2), edge_order=2) + + +class TestAngle(object): + + def test_basic(self): + x = [1 + 3j, np.sqrt(2) / 2.0 + 1j * np.sqrt(2) / 2, + 1, 1j, -1, -1j, 1 - 3j, -1 + 3j] + y = angle(x) + yo = [ + np.arctan(3.0 / 1.0), + np.arctan(1.0), 0, np.pi / 2, np.pi, -np.pi / 2.0, + -np.arctan(3.0 / 1.0), np.pi - np.arctan(3.0 / 1.0)] + z = angle(x, deg=1) + zo = np.array(yo) * 180 / np.pi + assert_array_almost_equal(y, yo, 11) + assert_array_almost_equal(z, zo, 11) + + def test_subclass(self): + x = np.ma.array([1 + 3j, 1, np.sqrt(2)/2 * (1 + 1j)]) + x[1] = np.ma.masked + expected = np.ma.array([np.arctan(3.0 / 1.0), 0, np.arctan(1.0)]) + expected[1] = np.ma.masked + actual = angle(x) + assert_equal(type(actual), type(expected)) + assert_equal(actual.mask, expected.mask) + assert_equal(actual, expected) + + +class TestTrimZeros(object): + + """ + Only testing for integer splits. + + """ + + def test_basic(self): + a = np.array([0, 0, 1, 2, 3, 4, 0]) + res = trim_zeros(a) + assert_array_equal(res, np.array([1, 2, 3, 4])) + + def test_leading_skip(self): + a = np.array([0, 0, 1, 0, 2, 3, 4, 0]) + res = trim_zeros(a) + assert_array_equal(res, np.array([1, 0, 2, 3, 4])) + + def test_trailing_skip(self): + a = np.array([0, 0, 1, 0, 2, 3, 0, 4, 0]) + res = trim_zeros(a) + assert_array_equal(res, np.array([1, 0, 2, 3, 0, 4])) + + +class TestExtins(object): + + def test_basic(self): + a = np.array([1, 3, 2, 1, 2, 3, 3]) + b = extract(a > 1, a) + assert_array_equal(b, [3, 2, 2, 3, 3]) + + def test_place(self): + # Make sure that non-np.ndarray objects + # raise an error instead of doing nothing + assert_raises(TypeError, place, [1, 2, 3], [True, False], [0, 1]) + + a = np.array([1, 4, 3, 2, 5, 8, 7]) + place(a, [0, 1, 0, 1, 0, 1, 0], [2, 4, 6]) + assert_array_equal(a, [1, 2, 3, 4, 5, 6, 7]) + + place(a, np.zeros(7), []) + assert_array_equal(a, np.arange(1, 8)) + + place(a, [1, 0, 1, 0, 1, 0, 1], [8, 9]) + assert_array_equal(a, [8, 2, 9, 4, 8, 6, 9]) + assert_raises_regex(ValueError, "Cannot insert from an empty array", + lambda: place(a, [0, 0, 0, 0, 0, 1, 0], [])) + + # See Issue #6974 + a = np.array(['12', '34']) + place(a, [0, 1], '9') + assert_array_equal(a, ['12', '9']) + + def test_both(self): + a = rand(10) + mask = a > 0.5 + ac = a.copy() + c = extract(mask, a) + place(a, mask, 0) + place(a, mask, c) + assert_array_equal(a, ac) + + +class TestVectorize(object): + + def test_simple(self): + def addsubtract(a, b): + if a > b: + return a - b + else: + return a + b + + f = vectorize(addsubtract) + r = f([0, 3, 6, 9], [1, 3, 5, 7]) + assert_array_equal(r, [1, 6, 1, 2]) + + def test_scalar(self): + def addsubtract(a, b): + if a > b: + return a - b + else: + return a + b + + f = vectorize(addsubtract) + r = f([0, 3, 6, 9], 5) + assert_array_equal(r, [5, 8, 1, 4]) + + def test_large(self): + x = np.linspace(-3, 2, 10000) + f = vectorize(lambda x: x) + y = f(x) + assert_array_equal(y, x) + + def test_ufunc(self): + import math + f = vectorize(math.cos) + args = np.array([0, 0.5 * np.pi, np.pi, 1.5 * np.pi, 2 * np.pi]) + r1 = f(args) + r2 = np.cos(args) + assert_array_almost_equal(r1, r2) + + def test_keywords(self): + + def foo(a, b=1): + return a + b + + f = vectorize(foo) + args = np.array([1, 2, 3]) + r1 = f(args) + r2 = np.array([2, 3, 4]) + assert_array_equal(r1, r2) + r1 = f(args, 2) + r2 = np.array([3, 4, 5]) + assert_array_equal(r1, r2) + + def test_keywords_no_func_code(self): + # This needs to test a function that has keywords but + # no func_code attribute, since otherwise vectorize will + # inspect the func_code. + import random + try: + vectorize(random.randrange) # Should succeed + except Exception: + raise AssertionError() + + def test_keywords2_ticket_2100(self): + # Test kwarg support: enhancement ticket 2100 + + def foo(a, b=1): + return a + b + + f = vectorize(foo) + args = np.array([1, 2, 3]) + r1 = f(a=args) + r2 = np.array([2, 3, 4]) + assert_array_equal(r1, r2) + r1 = f(b=1, a=args) + assert_array_equal(r1, r2) + r1 = f(args, b=2) + r2 = np.array([3, 4, 5]) + assert_array_equal(r1, r2) + + def test_keywords3_ticket_2100(self): + # Test excluded with mixed positional and kwargs: ticket 2100 + def mypolyval(x, p): + _p = list(p) + res = _p.pop(0) + while _p: + res = res * x + _p.pop(0) + return res + + vpolyval = np.vectorize(mypolyval, excluded=['p', 1]) + ans = [3, 6] + assert_array_equal(ans, vpolyval(x=[0, 1], p=[1, 2, 3])) + assert_array_equal(ans, vpolyval([0, 1], p=[1, 2, 3])) + assert_array_equal(ans, vpolyval([0, 1], [1, 2, 3])) + + def test_keywords4_ticket_2100(self): + # Test vectorizing function with no positional args. + @vectorize + def f(**kw): + res = 1.0 + for _k in kw: + res *= kw[_k] + return res + + assert_array_equal(f(a=[1, 2], b=[3, 4]), [3, 8]) + + def test_keywords5_ticket_2100(self): + # Test vectorizing function with no kwargs args. + @vectorize + def f(*v): + return np.prod(v) + + assert_array_equal(f([1, 2], [3, 4]), [3, 8]) + + def test_coverage1_ticket_2100(self): + def foo(): + return 1 + + f = vectorize(foo) + assert_array_equal(f(), 1) + + def test_assigning_docstring(self): + def foo(x): + """Original documentation""" + return x + + f = vectorize(foo) + assert_equal(f.__doc__, foo.__doc__) + + doc = "Provided documentation" + f = vectorize(foo, doc=doc) + assert_equal(f.__doc__, doc) + + def test_UnboundMethod_ticket_1156(self): + # Regression test for issue 1156 + class Foo: + b = 2 + + def bar(self, a): + return a ** self.b + + assert_array_equal(vectorize(Foo().bar)(np.arange(9)), + np.arange(9) ** 2) + assert_array_equal(vectorize(Foo.bar)(Foo(), np.arange(9)), + np.arange(9) ** 2) + + def test_execution_order_ticket_1487(self): + # Regression test for dependence on execution order: issue 1487 + f1 = vectorize(lambda x: x) + res1a = f1(np.arange(3)) + res1b = f1(np.arange(0.1, 3)) + f2 = vectorize(lambda x: x) + res2b = f2(np.arange(0.1, 3)) + res2a = f2(np.arange(3)) + assert_equal(res1a, res2a) + assert_equal(res1b, res2b) + + def test_string_ticket_1892(self): + # Test vectorization over strings: issue 1892. + f = np.vectorize(lambda x: x) + s = '0123456789' * 10 + assert_equal(s, f(s)) + + def test_cache(self): + # Ensure that vectorized func called exactly once per argument. + _calls = [0] + + @vectorize + def f(x): + _calls[0] += 1 + return x ** 2 + + f.cache = True + x = np.arange(5) + assert_array_equal(f(x), x * x) + assert_equal(_calls[0], len(x)) + + def test_otypes(self): + f = np.vectorize(lambda x: x) + f.otypes = 'i' + x = np.arange(5) + assert_array_equal(f(x), x) + + def test_parse_gufunc_signature(self): + assert_equal(nfb._parse_gufunc_signature('(x)->()'), ([('x',)], [()])) + assert_equal(nfb._parse_gufunc_signature('(x,y)->()'), + ([('x', 'y')], [()])) + assert_equal(nfb._parse_gufunc_signature('(x),(y)->()'), + ([('x',), ('y',)], [()])) + assert_equal(nfb._parse_gufunc_signature('(x)->(y)'), + ([('x',)], [('y',)])) + assert_equal(nfb._parse_gufunc_signature('(x)->(y),()'), + ([('x',)], [('y',), ()])) + assert_equal(nfb._parse_gufunc_signature('(),(a,b,c),(d)->(d,e)'), + ([(), ('a', 'b', 'c'), ('d',)], [('d', 'e')])) + with assert_raises(ValueError): + nfb._parse_gufunc_signature('(x)(y)->()') + with assert_raises(ValueError): + nfb._parse_gufunc_signature('(x),(y)->') + with assert_raises(ValueError): + nfb._parse_gufunc_signature('((x))->(x)') + + def test_signature_simple(self): + def addsubtract(a, b): + if a > b: + return a - b + else: + return a + b + + f = vectorize(addsubtract, signature='(),()->()') + r = f([0, 3, 6, 9], [1, 3, 5, 7]) + assert_array_equal(r, [1, 6, 1, 2]) + + def test_signature_mean_last(self): + def mean(a): + return a.mean() + + f = vectorize(mean, signature='(n)->()') + r = f([[1, 3], [2, 4]]) + assert_array_equal(r, [2, 3]) + + def test_signature_center(self): + def center(a): + return a - a.mean() + + f = vectorize(center, signature='(n)->(n)') + r = f([[1, 3], [2, 4]]) + assert_array_equal(r, [[-1, 1], [-1, 1]]) + + def test_signature_two_outputs(self): + f = vectorize(lambda x: (x, x), signature='()->(),()') + r = f([1, 2, 3]) + assert_(isinstance(r, tuple) and len(r) == 2) + assert_array_equal(r[0], [1, 2, 3]) + assert_array_equal(r[1], [1, 2, 3]) + + def test_signature_outer(self): + f = vectorize(np.outer, signature='(a),(b)->(a,b)') + r = f([1, 2], [1, 2, 3]) + assert_array_equal(r, [[1, 2, 3], [2, 4, 6]]) + + r = f([[[1, 2]]], [1, 2, 3]) + assert_array_equal(r, [[[[1, 2, 3], [2, 4, 6]]]]) + + r = f([[1, 0], [2, 0]], [1, 2, 3]) + assert_array_equal(r, [[[1, 2, 3], [0, 0, 0]], + [[2, 4, 6], [0, 0, 0]]]) + + r = f([1, 2], [[1, 2, 3], [0, 0, 0]]) + assert_array_equal(r, [[[1, 2, 3], [2, 4, 6]], + [[0, 0, 0], [0, 0, 0]]]) + + def test_signature_computed_size(self): + f = vectorize(lambda x: x[:-1], signature='(n)->(m)') + r = f([1, 2, 3]) + assert_array_equal(r, [1, 2]) + + r = f([[1, 2, 3], [2, 3, 4]]) + assert_array_equal(r, [[1, 2], [2, 3]]) + + def test_signature_excluded(self): + + def foo(a, b=1): + return a + b + + f = vectorize(foo, signature='()->()', excluded={'b'}) + assert_array_equal(f([1, 2, 3]), [2, 3, 4]) + assert_array_equal(f([1, 2, 3], b=0), [1, 2, 3]) + + def test_signature_otypes(self): + f = vectorize(lambda x: x, signature='(n)->(n)', otypes=['float64']) + r = f([1, 2, 3]) + assert_equal(r.dtype, np.dtype('float64')) + assert_array_equal(r, [1, 2, 3]) + + def test_signature_invalid_inputs(self): + f = vectorize(operator.add, signature='(n),(n)->(n)') + with assert_raises_regex(TypeError, 'wrong number of positional'): + f([1, 2]) + with assert_raises_regex( + ValueError, 'does not have enough dimensions'): + f(1, 2) + with assert_raises_regex( + ValueError, 'inconsistent size for core dimension'): + f([1, 2], [1, 2, 3]) + + f = vectorize(operator.add, signature='()->()') + with assert_raises_regex(TypeError, 'wrong number of positional'): + f(1, 2) + + def test_signature_invalid_outputs(self): + + f = vectorize(lambda x: x[:-1], signature='(n)->(n)') + with assert_raises_regex( + ValueError, 'inconsistent size for core dimension'): + f([1, 2, 3]) + + f = vectorize(lambda x: x, signature='()->(),()') + with assert_raises_regex(ValueError, 'wrong number of outputs'): + f(1) + + f = vectorize(lambda x: (x, x), signature='()->()') + with assert_raises_regex(ValueError, 'wrong number of outputs'): + f([1, 2]) + + def test_size_zero_output(self): + # see issue 5868 + f = np.vectorize(lambda x: x) + x = np.zeros([0, 5], dtype=int) + with assert_raises_regex(ValueError, 'otypes'): + f(x) + + f.otypes = 'i' + assert_array_equal(f(x), x) + + f = np.vectorize(lambda x: x, signature='()->()') + with assert_raises_regex(ValueError, 'otypes'): + f(x) + + f = np.vectorize(lambda x: x, signature='()->()', otypes='i') + assert_array_equal(f(x), x) + + f = np.vectorize(lambda x: x, signature='(n)->(n)', otypes='i') + assert_array_equal(f(x), x) + + f = np.vectorize(lambda x: x, signature='(n)->(n)') + assert_array_equal(f(x.T), x.T) + + f = np.vectorize(lambda x: [x], signature='()->(n)', otypes='i') + with assert_raises_regex(ValueError, 'new output dimensions'): + f(x) + + +class TestDigitize(object): + + def test_forward(self): + x = np.arange(-6, 5) + bins = np.arange(-5, 5) + assert_array_equal(digitize(x, bins), np.arange(11)) + + def test_reverse(self): + x = np.arange(5, -6, -1) + bins = np.arange(5, -5, -1) + assert_array_equal(digitize(x, bins), np.arange(11)) + + def test_random(self): + x = rand(10) + bin = np.linspace(x.min(), x.max(), 10) + assert_(np.all(digitize(x, bin) != 0)) + + def test_right_basic(self): + x = [1, 5, 4, 10, 8, 11, 0] + bins = [1, 5, 10] + default_answer = [1, 2, 1, 3, 2, 3, 0] + assert_array_equal(digitize(x, bins), default_answer) + right_answer = [0, 1, 1, 2, 2, 3, 0] + assert_array_equal(digitize(x, bins, True), right_answer) + + def test_right_open(self): + x = np.arange(-6, 5) + bins = np.arange(-6, 4) + assert_array_equal(digitize(x, bins, True), np.arange(11)) + + def test_right_open_reverse(self): + x = np.arange(5, -6, -1) + bins = np.arange(4, -6, -1) + assert_array_equal(digitize(x, bins, True), np.arange(11)) + + def test_right_open_random(self): + x = rand(10) + bins = np.linspace(x.min(), x.max(), 10) + assert_(np.all(digitize(x, bins, True) != 10)) + + def test_monotonic(self): + x = [-1, 0, 1, 2] + bins = [0, 0, 1] + assert_array_equal(digitize(x, bins, False), [0, 2, 3, 3]) + assert_array_equal(digitize(x, bins, True), [0, 0, 2, 3]) + bins = [1, 1, 0] + assert_array_equal(digitize(x, bins, False), [3, 2, 0, 0]) + assert_array_equal(digitize(x, bins, True), [3, 3, 2, 0]) + bins = [1, 1, 1, 1] + assert_array_equal(digitize(x, bins, False), [0, 0, 4, 4]) + assert_array_equal(digitize(x, bins, True), [0, 0, 0, 4]) + bins = [0, 0, 1, 0] + assert_raises(ValueError, digitize, x, bins) + bins = [1, 1, 0, 1] + assert_raises(ValueError, digitize, x, bins) + + def test_casting_error(self): + x = [1, 2, 3 + 1.j] + bins = [1, 2, 3] + assert_raises(TypeError, digitize, x, bins) + x, bins = bins, x + assert_raises(TypeError, digitize, x, bins) + + def test_return_type(self): + # Functions returning indices should always return base ndarrays + class A(np.ndarray): + pass + a = np.arange(5).view(A) + b = np.arange(1, 3).view(A) + assert_(not isinstance(digitize(b, a, False), A)) + assert_(not isinstance(digitize(b, a, True), A)) + + def test_large_integers_increasing(self): + # gh-11022 + x = 2**54 # loses precision in a float + assert_equal(np.digitize(x, [x - 1, x + 1]), 1) + + @pytest.mark.xfail( + reason="gh-11022: np.core.multiarray._monoticity loses precision") + def test_large_integers_decreasing(self): + # gh-11022 + x = 2**54 # loses precision in a float + assert_equal(np.digitize(x, [x + 1, x - 1]), 1) + + +class TestUnwrap(object): + + def test_simple(self): + # check that unwrap removes jumps greater that 2*pi + assert_array_equal(unwrap([1, 1 + 2 * np.pi]), [1, 1]) + # check that unwrap maintains continuity + assert_(np.all(diff(unwrap(rand(10) * 100)) < np.pi)) + + +class TestFilterwindows(object): + + def test_hanning(self): + # check symmetry + w = hanning(10) + assert_array_almost_equal(w, flipud(w), 7) + # check known value + assert_almost_equal(np.sum(w, axis=0), 4.500, 4) + + def test_hamming(self): + # check symmetry + w = hamming(10) + assert_array_almost_equal(w, flipud(w), 7) + # check known value + assert_almost_equal(np.sum(w, axis=0), 4.9400, 4) + + def test_bartlett(self): + # check symmetry + w = bartlett(10) + assert_array_almost_equal(w, flipud(w), 7) + # check known value + assert_almost_equal(np.sum(w, axis=0), 4.4444, 4) + + def test_blackman(self): + # check symmetry + w = blackman(10) + assert_array_almost_equal(w, flipud(w), 7) + # check known value + assert_almost_equal(np.sum(w, axis=0), 3.7800, 4) + + +class TestTrapz(object): + + def test_simple(self): + x = np.arange(-10, 10, .1) + r = trapz(np.exp(-.5 * x ** 2) / np.sqrt(2 * np.pi), dx=0.1) + # check integral of normal equals 1 + assert_almost_equal(r, 1, 7) + + def test_ndim(self): + x = np.linspace(0, 1, 3) + y = np.linspace(0, 2, 8) + z = np.linspace(0, 3, 13) + + wx = np.ones_like(x) * (x[1] - x[0]) + wx[0] /= 2 + wx[-1] /= 2 + wy = np.ones_like(y) * (y[1] - y[0]) + wy[0] /= 2 + wy[-1] /= 2 + wz = np.ones_like(z) * (z[1] - z[0]) + wz[0] /= 2 + wz[-1] /= 2 + + q = x[:, None, None] + y[None,:, None] + z[None, None,:] + + qx = (q * wx[:, None, None]).sum(axis=0) + qy = (q * wy[None, :, None]).sum(axis=1) + qz = (q * wz[None, None, :]).sum(axis=2) + + # n-d `x` + r = trapz(q, x=x[:, None, None], axis=0) + assert_almost_equal(r, qx) + r = trapz(q, x=y[None,:, None], axis=1) + assert_almost_equal(r, qy) + r = trapz(q, x=z[None, None,:], axis=2) + assert_almost_equal(r, qz) + + # 1-d `x` + r = trapz(q, x=x, axis=0) + assert_almost_equal(r, qx) + r = trapz(q, x=y, axis=1) + assert_almost_equal(r, qy) + r = trapz(q, x=z, axis=2) + assert_almost_equal(r, qz) + + def test_masked(self): + # Testing that masked arrays behave as if the function is 0 where + # masked + x = np.arange(5) + y = x * x + mask = x == 2 + ym = np.ma.array(y, mask=mask) + r = 13.0 # sum(0.5 * (0 + 1) * 1.0 + 0.5 * (9 + 16)) + assert_almost_equal(trapz(ym, x), r) + + xm = np.ma.array(x, mask=mask) + assert_almost_equal(trapz(ym, xm), r) + + xm = np.ma.array(x, mask=mask) + assert_almost_equal(trapz(y, xm), r) + + +class TestSinc(object): + + def test_simple(self): + assert_(sinc(0) == 1) + w = sinc(np.linspace(-1, 1, 100)) + # check symmetry + assert_array_almost_equal(w, flipud(w), 7) + + def test_array_like(self): + x = [0, 0.5] + y1 = sinc(np.array(x)) + y2 = sinc(list(x)) + y3 = sinc(tuple(x)) + assert_array_equal(y1, y2) + assert_array_equal(y1, y3) + + +class TestUnique(object): + + def test_simple(self): + x = np.array([4, 3, 2, 1, 1, 2, 3, 4, 0]) + assert_(np.all(unique(x) == [0, 1, 2, 3, 4])) + assert_(unique(np.array([1, 1, 1, 1, 1])) == np.array([1])) + x = ['widget', 'ham', 'foo', 'bar', 'foo', 'ham'] + assert_(np.all(unique(x) == ['bar', 'foo', 'ham', 'widget'])) + x = np.array([5 + 6j, 1 + 1j, 1 + 10j, 10, 5 + 6j]) + assert_(np.all(unique(x) == [1 + 1j, 1 + 10j, 5 + 6j, 10])) + + +class TestCheckFinite(object): + + def test_simple(self): + a = [1, 2, 3] + b = [1, 2, np.inf] + c = [1, 2, np.nan] + np.lib.asarray_chkfinite(a) + assert_raises(ValueError, np.lib.asarray_chkfinite, b) + assert_raises(ValueError, np.lib.asarray_chkfinite, c) + + def test_dtype_order(self): + # Regression test for missing dtype and order arguments + a = [1, 2, 3] + a = np.lib.asarray_chkfinite(a, order='F', dtype=np.float64) + assert_(a.dtype == np.float64) + + +class TestCorrCoef(object): + A = np.array( + [[0.15391142, 0.18045767, 0.14197213], + [0.70461506, 0.96474128, 0.27906989], + [0.9297531, 0.32296769, 0.19267156]]) + B = np.array( + [[0.10377691, 0.5417086, 0.49807457], + [0.82872117, 0.77801674, 0.39226705], + [0.9314666, 0.66800209, 0.03538394]]) + res1 = np.array( + [[1., 0.9379533, -0.04931983], + [0.9379533, 1., 0.30007991], + [-0.04931983, 0.30007991, 1.]]) + res2 = np.array( + [[1., 0.9379533, -0.04931983, 0.30151751, 0.66318558, 0.51532523], + [0.9379533, 1., 0.30007991, -0.04781421, 0.88157256, 0.78052386], + [-0.04931983, 0.30007991, 1., -0.96717111, 0.71483595, 0.83053601], + [0.30151751, -0.04781421, -0.96717111, 1., -0.51366032, -0.66173113], + [0.66318558, 0.88157256, 0.71483595, -0.51366032, 1., 0.98317823], + [0.51532523, 0.78052386, 0.83053601, -0.66173113, 0.98317823, 1.]]) + + def test_non_array(self): + assert_almost_equal(np.corrcoef([0, 1, 0], [1, 0, 1]), + [[1., -1.], [-1., 1.]]) + + def test_simple(self): + tgt1 = corrcoef(self.A) + assert_almost_equal(tgt1, self.res1) + assert_(np.all(np.abs(tgt1) <= 1.0)) + + tgt2 = corrcoef(self.A, self.B) + assert_almost_equal(tgt2, self.res2) + assert_(np.all(np.abs(tgt2) <= 1.0)) + + def test_ddof(self): + # ddof raises DeprecationWarning + with suppress_warnings() as sup: + warnings.simplefilter("always") + assert_warns(DeprecationWarning, corrcoef, self.A, ddof=-1) + sup.filter(DeprecationWarning) + # ddof has no or negligible effect on the function + assert_almost_equal(corrcoef(self.A, ddof=-1), self.res1) + assert_almost_equal(corrcoef(self.A, self.B, ddof=-1), self.res2) + assert_almost_equal(corrcoef(self.A, ddof=3), self.res1) + assert_almost_equal(corrcoef(self.A, self.B, ddof=3), self.res2) + + def test_bias(self): + # bias raises DeprecationWarning + with suppress_warnings() as sup: + warnings.simplefilter("always") + assert_warns(DeprecationWarning, corrcoef, self.A, self.B, 1, 0) + assert_warns(DeprecationWarning, corrcoef, self.A, bias=0) + sup.filter(DeprecationWarning) + # bias has no or negligible effect on the function + assert_almost_equal(corrcoef(self.A, bias=1), self.res1) + + def test_complex(self): + x = np.array([[1, 2, 3], [1j, 2j, 3j]]) + res = corrcoef(x) + tgt = np.array([[1., -1.j], [1.j, 1.]]) + assert_allclose(res, tgt) + assert_(np.all(np.abs(res) <= 1.0)) + + def test_xy(self): + x = np.array([[1, 2, 3]]) + y = np.array([[1j, 2j, 3j]]) + assert_allclose(np.corrcoef(x, y), np.array([[1., -1.j], [1.j, 1.]])) + + def test_empty(self): + with warnings.catch_warnings(record=True): + warnings.simplefilter('always', RuntimeWarning) + assert_array_equal(corrcoef(np.array([])), np.nan) + assert_array_equal(corrcoef(np.array([]).reshape(0, 2)), + np.array([]).reshape(0, 0)) + assert_array_equal(corrcoef(np.array([]).reshape(2, 0)), + np.array([[np.nan, np.nan], [np.nan, np.nan]])) + + def test_extreme(self): + x = [[1e-100, 1e100], [1e100, 1e-100]] + with np.errstate(all='raise'): + c = corrcoef(x) + assert_array_almost_equal(c, np.array([[1., -1.], [-1., 1.]])) + assert_(np.all(np.abs(c) <= 1.0)) + + +class TestCov(object): + x1 = np.array([[0, 2], [1, 1], [2, 0]]).T + res1 = np.array([[1., -1.], [-1., 1.]]) + x2 = np.array([0.0, 1.0, 2.0], ndmin=2) + frequencies = np.array([1, 4, 1]) + x2_repeats = np.array([[0.0], [1.0], [1.0], [1.0], [1.0], [2.0]]).T + res2 = np.array([[0.4, -0.4], [-0.4, 0.4]]) + unit_frequencies = np.ones(3, dtype=np.integer) + weights = np.array([1.0, 4.0, 1.0]) + res3 = np.array([[2. / 3., -2. / 3.], [-2. / 3., 2. / 3.]]) + unit_weights = np.ones(3) + x3 = np.array([0.3942, 0.5969, 0.7730, 0.9918, 0.7964]) + + def test_basic(self): + assert_allclose(cov(self.x1), self.res1) + + def test_complex(self): + x = np.array([[1, 2, 3], [1j, 2j, 3j]]) + res = np.array([[1., -1.j], [1.j, 1.]]) + assert_allclose(cov(x), res) + assert_allclose(cov(x, aweights=np.ones(3)), res) + + def test_xy(self): + x = np.array([[1, 2, 3]]) + y = np.array([[1j, 2j, 3j]]) + assert_allclose(cov(x, y), np.array([[1., -1.j], [1.j, 1.]])) + + def test_empty(self): + with warnings.catch_warnings(record=True): + warnings.simplefilter('always', RuntimeWarning) + assert_array_equal(cov(np.array([])), np.nan) + assert_array_equal(cov(np.array([]).reshape(0, 2)), + np.array([]).reshape(0, 0)) + assert_array_equal(cov(np.array([]).reshape(2, 0)), + np.array([[np.nan, np.nan], [np.nan, np.nan]])) + + def test_wrong_ddof(self): + with warnings.catch_warnings(record=True): + warnings.simplefilter('always', RuntimeWarning) + assert_array_equal(cov(self.x1, ddof=5), + np.array([[np.inf, -np.inf], + [-np.inf, np.inf]])) + + def test_1D_rowvar(self): + assert_allclose(cov(self.x3), cov(self.x3, rowvar=0)) + y = np.array([0.0780, 0.3107, 0.2111, 0.0334, 0.8501]) + assert_allclose(cov(self.x3, y), cov(self.x3, y, rowvar=0)) + + def test_1D_variance(self): + assert_allclose(cov(self.x3, ddof=1), np.var(self.x3, ddof=1)) + + def test_fweights(self): + assert_allclose(cov(self.x2, fweights=self.frequencies), + cov(self.x2_repeats)) + assert_allclose(cov(self.x1, fweights=self.frequencies), + self.res2) + assert_allclose(cov(self.x1, fweights=self.unit_frequencies), + self.res1) + nonint = self.frequencies + 0.5 + assert_raises(TypeError, cov, self.x1, fweights=nonint) + f = np.ones((2, 3), dtype=np.integer) + assert_raises(RuntimeError, cov, self.x1, fweights=f) + f = np.ones(2, dtype=np.integer) + assert_raises(RuntimeError, cov, self.x1, fweights=f) + f = -1 * np.ones(3, dtype=np.integer) + assert_raises(ValueError, cov, self.x1, fweights=f) + + def test_aweights(self): + assert_allclose(cov(self.x1, aweights=self.weights), self.res3) + assert_allclose(cov(self.x1, aweights=3.0 * self.weights), + cov(self.x1, aweights=self.weights)) + assert_allclose(cov(self.x1, aweights=self.unit_weights), self.res1) + w = np.ones((2, 3)) + assert_raises(RuntimeError, cov, self.x1, aweights=w) + w = np.ones(2) + assert_raises(RuntimeError, cov, self.x1, aweights=w) + w = -1.0 * np.ones(3) + assert_raises(ValueError, cov, self.x1, aweights=w) + + def test_unit_fweights_and_aweights(self): + assert_allclose(cov(self.x2, fweights=self.frequencies, + aweights=self.unit_weights), + cov(self.x2_repeats)) + assert_allclose(cov(self.x1, fweights=self.frequencies, + aweights=self.unit_weights), + self.res2) + assert_allclose(cov(self.x1, fweights=self.unit_frequencies, + aweights=self.unit_weights), + self.res1) + assert_allclose(cov(self.x1, fweights=self.unit_frequencies, + aweights=self.weights), + self.res3) + assert_allclose(cov(self.x1, fweights=self.unit_frequencies, + aweights=3.0 * self.weights), + cov(self.x1, aweights=self.weights)) + assert_allclose(cov(self.x1, fweights=self.unit_frequencies, + aweights=self.unit_weights), + self.res1) + + +class Test_I0(object): + + def test_simple(self): + assert_almost_equal( + i0(0.5), + np.array(1.0634833707413234)) + + A = np.array([0.49842636, 0.6969809, 0.22011976, 0.0155549]) + assert_almost_equal( + i0(A), + np.array([1.06307822, 1.12518299, 1.01214991, 1.00006049])) + + B = np.array([[0.827002, 0.99959078], + [0.89694769, 0.39298162], + [0.37954418, 0.05206293], + [0.36465447, 0.72446427], + [0.48164949, 0.50324519]]) + assert_almost_equal( + i0(B), + np.array([[1.17843223, 1.26583466], + [1.21147086, 1.03898290], + [1.03633899, 1.00067775], + [1.03352052, 1.13557954], + [1.05884290, 1.06432317]])) + + +class TestKaiser(object): + + def test_simple(self): + assert_(np.isfinite(kaiser(1, 1.0))) + assert_almost_equal(kaiser(0, 1.0), + np.array([])) + assert_almost_equal(kaiser(2, 1.0), + np.array([0.78984831, 0.78984831])) + assert_almost_equal(kaiser(5, 1.0), + np.array([0.78984831, 0.94503323, 1., + 0.94503323, 0.78984831])) + assert_almost_equal(kaiser(5, 1.56789), + np.array([0.58285404, 0.88409679, 1., + 0.88409679, 0.58285404])) + + def test_int_beta(self): + kaiser(3, 4) + + +class TestMsort(object): + + def test_simple(self): + A = np.array([[0.44567325, 0.79115165, 0.54900530], + [0.36844147, 0.37325583, 0.96098397], + [0.64864341, 0.52929049, 0.39172155]]) + assert_almost_equal( + msort(A), + np.array([[0.36844147, 0.37325583, 0.39172155], + [0.44567325, 0.52929049, 0.54900530], + [0.64864341, 0.79115165, 0.96098397]])) + + +class TestMeshgrid(object): + + def test_simple(self): + [X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7]) + assert_array_equal(X, np.array([[1, 2, 3], + [1, 2, 3], + [1, 2, 3], + [1, 2, 3]])) + assert_array_equal(Y, np.array([[4, 4, 4], + [5, 5, 5], + [6, 6, 6], + [7, 7, 7]])) + + def test_single_input(self): + [X] = meshgrid([1, 2, 3, 4]) + assert_array_equal(X, np.array([1, 2, 3, 4])) + + def test_no_input(self): + args = [] + assert_array_equal([], meshgrid(*args)) + assert_array_equal([], meshgrid(*args, copy=False)) + + def test_indexing(self): + x = [1, 2, 3] + y = [4, 5, 6, 7] + [X, Y] = meshgrid(x, y, indexing='ij') + assert_array_equal(X, np.array([[1, 1, 1, 1], + [2, 2, 2, 2], + [3, 3, 3, 3]])) + assert_array_equal(Y, np.array([[4, 5, 6, 7], + [4, 5, 6, 7], + [4, 5, 6, 7]])) + + # Test expected shapes: + z = [8, 9] + assert_(meshgrid(x, y)[0].shape == (4, 3)) + assert_(meshgrid(x, y, indexing='ij')[0].shape == (3, 4)) + assert_(meshgrid(x, y, z)[0].shape == (4, 3, 2)) + assert_(meshgrid(x, y, z, indexing='ij')[0].shape == (3, 4, 2)) + + assert_raises(ValueError, meshgrid, x, y, indexing='notvalid') + + def test_sparse(self): + [X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7], sparse=True) + assert_array_equal(X, np.array([[1, 2, 3]])) + assert_array_equal(Y, np.array([[4], [5], [6], [7]])) + + def test_invalid_arguments(self): + # Test that meshgrid complains about invalid arguments + # Regression test for issue #4755: + # https://github.com/numpy/numpy/issues/4755 + assert_raises(TypeError, meshgrid, + [1, 2, 3], [4, 5, 6, 7], indices='ij') + + def test_return_type(self): + # Test for appropriate dtype in returned arrays. + # Regression test for issue #5297 + # https://github.com/numpy/numpy/issues/5297 + x = np.arange(0, 10, dtype=np.float32) + y = np.arange(10, 20, dtype=np.float64) + + X, Y = np.meshgrid(x,y) + + assert_(X.dtype == x.dtype) + assert_(Y.dtype == y.dtype) + + # copy + X, Y = np.meshgrid(x,y, copy=True) + + assert_(X.dtype == x.dtype) + assert_(Y.dtype == y.dtype) + + # sparse + X, Y = np.meshgrid(x,y, sparse=True) + + assert_(X.dtype == x.dtype) + assert_(Y.dtype == y.dtype) + + def test_writeback(self): + # Issue 8561 + X = np.array([1.1, 2.2]) + Y = np.array([3.3, 4.4]) + x, y = np.meshgrid(X, Y, sparse=False, copy=True) + + x[0, :] = 0 + assert_equal(x[0, :], 0) + assert_equal(x[1, :], X) + + +class TestPiecewise(object): + + def test_simple(self): + # Condition is single bool list + x = piecewise([0, 0], [True, False], [1]) + assert_array_equal(x, [1, 0]) + + # List of conditions: single bool list + x = piecewise([0, 0], [[True, False]], [1]) + assert_array_equal(x, [1, 0]) + + # Conditions is single bool array + x = piecewise([0, 0], np.array([True, False]), [1]) + assert_array_equal(x, [1, 0]) + + # Condition is single int array + x = piecewise([0, 0], np.array([1, 0]), [1]) + assert_array_equal(x, [1, 0]) + + # List of conditions: int array + x = piecewise([0, 0], [np.array([1, 0])], [1]) + assert_array_equal(x, [1, 0]) + + x = piecewise([0, 0], [[False, True]], [lambda x:-1]) + assert_array_equal(x, [0, -1]) + + assert_raises_regex(ValueError, '1 or 2 functions are expected', + piecewise, [0, 0], [[False, True]], []) + assert_raises_regex(ValueError, '1 or 2 functions are expected', + piecewise, [0, 0], [[False, True]], [1, 2, 3]) + + def test_two_conditions(self): + x = piecewise([1, 2], [[True, False], [False, True]], [3, 4]) + assert_array_equal(x, [3, 4]) + + def test_scalar_domains_three_conditions(self): + x = piecewise(3, [True, False, False], [4, 2, 0]) + assert_equal(x, 4) + + def test_default(self): + # No value specified for x[1], should be 0 + x = piecewise([1, 2], [True, False], [2]) + assert_array_equal(x, [2, 0]) + + # Should set x[1] to 3 + x = piecewise([1, 2], [True, False], [2, 3]) + assert_array_equal(x, [2, 3]) + + def test_0d(self): + x = np.array(3) + y = piecewise(x, x > 3, [4, 0]) + assert_(y.ndim == 0) + assert_(y == 0) + + x = 5 + y = piecewise(x, [True, False], [1, 0]) + assert_(y.ndim == 0) + assert_(y == 1) + + # With 3 ranges (It was failing, before) + y = piecewise(x, [False, False, True], [1, 2, 3]) + assert_array_equal(y, 3) + + def test_0d_comparison(self): + x = 3 + y = piecewise(x, [x <= 3, x > 3], [4, 0]) # Should succeed. + assert_equal(y, 4) + + # With 3 ranges (It was failing, before) + x = 4 + y = piecewise(x, [x <= 3, (x > 3) * (x <= 5), x > 5], [1, 2, 3]) + assert_array_equal(y, 2) + + assert_raises_regex(ValueError, '2 or 3 functions are expected', + piecewise, x, [x <= 3, x > 3], [1]) + assert_raises_regex(ValueError, '2 or 3 functions are expected', + piecewise, x, [x <= 3, x > 3], [1, 1, 1, 1]) + + def test_0d_0d_condition(self): + x = np.array(3) + c = np.array(x > 3) + y = piecewise(x, [c], [1, 2]) + assert_equal(y, 2) + + def test_multidimensional_extrafunc(self): + x = np.array([[-2.5, -1.5, -0.5], + [0.5, 1.5, 2.5]]) + y = piecewise(x, [x < 0, x >= 2], [-1, 1, 3]) + assert_array_equal(y, np.array([[-1., -1., -1.], + [3., 3., 1.]])) + + +class TestBincount(object): + + def test_simple(self): + y = np.bincount(np.arange(4)) + assert_array_equal(y, np.ones(4)) + + def test_simple2(self): + y = np.bincount(np.array([1, 5, 2, 4, 1])) + assert_array_equal(y, np.array([0, 2, 1, 0, 1, 1])) + + def test_simple_weight(self): + x = np.arange(4) + w = np.array([0.2, 0.3, 0.5, 0.1]) + y = np.bincount(x, w) + assert_array_equal(y, w) + + def test_simple_weight2(self): + x = np.array([1, 2, 4, 5, 2]) + w = np.array([0.2, 0.3, 0.5, 0.1, 0.2]) + y = np.bincount(x, w) + assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1])) + + def test_with_minlength(self): + x = np.array([0, 1, 0, 1, 1]) + y = np.bincount(x, minlength=3) + assert_array_equal(y, np.array([2, 3, 0])) + x = [] + y = np.bincount(x, minlength=0) + assert_array_equal(y, np.array([])) + + def test_with_minlength_smaller_than_maxvalue(self): + x = np.array([0, 1, 1, 2, 2, 3, 3]) + y = np.bincount(x, minlength=2) + assert_array_equal(y, np.array([1, 2, 2, 2])) + y = np.bincount(x, minlength=0) + assert_array_equal(y, np.array([1, 2, 2, 2])) + + def test_with_minlength_and_weights(self): + x = np.array([1, 2, 4, 5, 2]) + w = np.array([0.2, 0.3, 0.5, 0.1, 0.2]) + y = np.bincount(x, w, 8) + assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1, 0, 0])) + + def test_empty(self): + x = np.array([], dtype=int) + y = np.bincount(x) + assert_array_equal(x, y) + + def test_empty_with_minlength(self): + x = np.array([], dtype=int) + y = np.bincount(x, minlength=5) + assert_array_equal(y, np.zeros(5, dtype=int)) + + def test_with_incorrect_minlength(self): + x = np.array([], dtype=int) + assert_raises_regex(TypeError, + "'str' object cannot be interpreted", + lambda: np.bincount(x, minlength="foobar")) + assert_raises_regex(ValueError, + "must not be negative", + lambda: np.bincount(x, minlength=-1)) + + x = np.arange(5) + assert_raises_regex(TypeError, + "'str' object cannot be interpreted", + lambda: np.bincount(x, minlength="foobar")) + assert_raises_regex(ValueError, + "must not be negative", + lambda: np.bincount(x, minlength=-1)) + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_dtype_reference_leaks(self): + # gh-6805 + intp_refcount = sys.getrefcount(np.dtype(np.intp)) + double_refcount = sys.getrefcount(np.dtype(np.double)) + + for j in range(10): + np.bincount([1, 2, 3]) + assert_equal(sys.getrefcount(np.dtype(np.intp)), intp_refcount) + assert_equal(sys.getrefcount(np.dtype(np.double)), double_refcount) + + for j in range(10): + np.bincount([1, 2, 3], [4, 5, 6]) + assert_equal(sys.getrefcount(np.dtype(np.intp)), intp_refcount) + assert_equal(sys.getrefcount(np.dtype(np.double)), double_refcount) + + +class TestInterp(object): + + def test_exceptions(self): + assert_raises(ValueError, interp, 0, [], []) + assert_raises(ValueError, interp, 0, [0], [1, 2]) + assert_raises(ValueError, interp, 0, [0, 1], [1, 2], period=0) + assert_raises(ValueError, interp, 0, [], [], period=360) + assert_raises(ValueError, interp, 0, [0], [1, 2], period=360) + + def test_basic(self): + x = np.linspace(0, 1, 5) + y = np.linspace(0, 1, 5) + x0 = np.linspace(0, 1, 50) + assert_almost_equal(np.interp(x0, x, y), x0) + + def test_right_left_behavior(self): + # Needs range of sizes to test different code paths. + # size ==1 is special cased, 1 < size < 5 is linear search, and + # size >= 5 goes through local search and possibly binary search. + for size in range(1, 10): + xp = np.arange(size, dtype=np.double) + yp = np.ones(size, dtype=np.double) + incpts = np.array([-1, 0, size - 1, size], dtype=np.double) + decpts = incpts[::-1] + + incres = interp(incpts, xp, yp) + decres = interp(decpts, xp, yp) + inctgt = np.array([1, 1, 1, 1], dtype=float) + dectgt = inctgt[::-1] + assert_equal(incres, inctgt) + assert_equal(decres, dectgt) + + incres = interp(incpts, xp, yp, left=0) + decres = interp(decpts, xp, yp, left=0) + inctgt = np.array([0, 1, 1, 1], dtype=float) + dectgt = inctgt[::-1] + assert_equal(incres, inctgt) + assert_equal(decres, dectgt) + + incres = interp(incpts, xp, yp, right=2) + decres = interp(decpts, xp, yp, right=2) + inctgt = np.array([1, 1, 1, 2], dtype=float) + dectgt = inctgt[::-1] + assert_equal(incres, inctgt) + assert_equal(decres, dectgt) + + incres = interp(incpts, xp, yp, left=0, right=2) + decres = interp(decpts, xp, yp, left=0, right=2) + inctgt = np.array([0, 1, 1, 2], dtype=float) + dectgt = inctgt[::-1] + assert_equal(incres, inctgt) + assert_equal(decres, dectgt) + + def test_scalar_interpolation_point(self): + x = np.linspace(0, 1, 5) + y = np.linspace(0, 1, 5) + x0 = 0 + assert_almost_equal(np.interp(x0, x, y), x0) + x0 = .3 + assert_almost_equal(np.interp(x0, x, y), x0) + x0 = np.float32(.3) + assert_almost_equal(np.interp(x0, x, y), x0) + x0 = np.float64(.3) + assert_almost_equal(np.interp(x0, x, y), x0) + x0 = np.nan + assert_almost_equal(np.interp(x0, x, y), x0) + + def test_non_finite_behavior(self): + x = [1, 2, 2.5, 3, 4] + xp = [1, 2, 3, 4] + fp = [1, 2, np.inf, 4] + assert_almost_equal(np.interp(x, xp, fp), [1, 2, np.inf, np.inf, 4]) + fp = [1, 2, np.nan, 4] + assert_almost_equal(np.interp(x, xp, fp), [1, 2, np.nan, np.nan, 4]) + + def test_complex_interp(self): + # test complex interpolation + x = np.linspace(0, 1, 5) + y = np.linspace(0, 1, 5) + (1 + np.linspace(0, 1, 5))*1.0j + x0 = 0.3 + y0 = x0 + (1+x0)*1.0j + assert_almost_equal(np.interp(x0, x, y), y0) + # test complex left and right + x0 = -1 + left = 2 + 3.0j + assert_almost_equal(np.interp(x0, x, y, left=left), left) + x0 = 2.0 + right = 2 + 3.0j + assert_almost_equal(np.interp(x0, x, y, right=right), right) + # test complex non finite + x = [1, 2, 2.5, 3, 4] + xp = [1, 2, 3, 4] + fp = [1, 2+1j, np.inf, 4] + y = [1, 2+1j, np.inf+0.5j, np.inf, 4] + assert_almost_equal(np.interp(x, xp, fp), y) + # test complex periodic + x = [-180, -170, -185, 185, -10, -5, 0, 365] + xp = [190, -190, 350, -350] + fp = [5+1.0j, 10+2j, 3+3j, 4+4j] + y = [7.5+1.5j, 5.+1.0j, 8.75+1.75j, 6.25+1.25j, 3.+3j, 3.25+3.25j, + 3.5+3.5j, 3.75+3.75j] + assert_almost_equal(np.interp(x, xp, fp, period=360), y) + + def test_zero_dimensional_interpolation_point(self): + x = np.linspace(0, 1, 5) + y = np.linspace(0, 1, 5) + x0 = np.array(.3) + assert_almost_equal(np.interp(x0, x, y), x0) + + xp = np.array([0, 2, 4]) + fp = np.array([1, -1, 1]) + + actual = np.interp(np.array(1), xp, fp) + assert_equal(actual, 0) + assert_(isinstance(actual, np.float64)) + + actual = np.interp(np.array(4.5), xp, fp, period=4) + assert_equal(actual, 0.5) + assert_(isinstance(actual, np.float64)) + + def test_if_len_x_is_small(self): + xp = np.arange(0, 10, 0.0001) + fp = np.sin(xp) + assert_almost_equal(np.interp(np.pi, xp, fp), 0.0) + + def test_period(self): + x = [-180, -170, -185, 185, -10, -5, 0, 365] + xp = [190, -190, 350, -350] + fp = [5, 10, 3, 4] + y = [7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75] + assert_almost_equal(np.interp(x, xp, fp, period=360), y) + x = np.array(x, order='F').reshape(2, -1) + y = np.array(y, order='C').reshape(2, -1) + assert_almost_equal(np.interp(x, xp, fp, period=360), y) + + +def compare_results(res, desired): + for i in range(len(desired)): + assert_array_equal(res[i], desired[i]) + + +class TestPercentile(object): + + def test_basic(self): + x = np.arange(8) * 0.5 + assert_equal(np.percentile(x, 0), 0.) + assert_equal(np.percentile(x, 100), 3.5) + assert_equal(np.percentile(x, 50), 1.75) + x[1] = np.nan + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.percentile(x, 0), np.nan) + assert_equal(np.percentile(x, 0, interpolation='nearest'), np.nan) + assert_(w[0].category is RuntimeWarning) + + def test_api(self): + d = np.ones(5) + np.percentile(d, 5, None, None, False) + np.percentile(d, 5, None, None, False, 'linear') + o = np.ones((1,)) + np.percentile(d, 5, None, o, False, 'linear') + + def test_2D(self): + x = np.array([[1, 1, 1], + [1, 1, 1], + [4, 4, 3], + [1, 1, 1], + [1, 1, 1]]) + assert_array_equal(np.percentile(x, 50, axis=0), [1, 1, 1]) + + def test_linear(self): + + # Test defaults + assert_equal(np.percentile(range(10), 50), 4.5) + + # explicitly specify interpolation_method 'linear' (the default) + assert_equal(np.percentile(range(10), 50, + interpolation='linear'), 4.5) + + def test_lower_higher(self): + + # interpolation_method 'lower'/'higher' + assert_equal(np.percentile(range(10), 50, + interpolation='lower'), 4) + assert_equal(np.percentile(range(10), 50, + interpolation='higher'), 5) + + def test_midpoint(self): + assert_equal(np.percentile(range(10), 51, + interpolation='midpoint'), 4.5) + assert_equal(np.percentile(range(11), 51, + interpolation='midpoint'), 5.5) + assert_equal(np.percentile(range(11), 50, + interpolation='midpoint'), 5) + + def test_nearest(self): + assert_equal(np.percentile(range(10), 51, + interpolation='nearest'), 5) + assert_equal(np.percentile(range(10), 49, + interpolation='nearest'), 4) + + def test_sequence(self): + x = np.arange(8) * 0.5 + assert_equal(np.percentile(x, [0, 100, 50]), [0, 3.5, 1.75]) + + def test_axis(self): + x = np.arange(12).reshape(3, 4) + + assert_equal(np.percentile(x, (25, 50, 100)), [2.75, 5.5, 11.0]) + + r0 = [[2, 3, 4, 5], [4, 5, 6, 7], [8, 9, 10, 11]] + assert_equal(np.percentile(x, (25, 50, 100), axis=0), r0) + + r1 = [[0.75, 1.5, 3], [4.75, 5.5, 7], [8.75, 9.5, 11]] + assert_equal(np.percentile(x, (25, 50, 100), axis=1), np.array(r1).T) + + # ensure qth axis is always first as with np.array(old_percentile(..)) + x = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) + assert_equal(np.percentile(x, (25, 50)).shape, (2,)) + assert_equal(np.percentile(x, (25, 50, 75)).shape, (3,)) + assert_equal(np.percentile(x, (25, 50), axis=0).shape, (2, 4, 5, 6)) + assert_equal(np.percentile(x, (25, 50), axis=1).shape, (2, 3, 5, 6)) + assert_equal(np.percentile(x, (25, 50), axis=2).shape, (2, 3, 4, 6)) + assert_equal(np.percentile(x, (25, 50), axis=3).shape, (2, 3, 4, 5)) + assert_equal( + np.percentile(x, (25, 50, 75), axis=1).shape, (3, 3, 5, 6)) + assert_equal(np.percentile(x, (25, 50), + interpolation="higher").shape, (2,)) + assert_equal(np.percentile(x, (25, 50, 75), + interpolation="higher").shape, (3,)) + assert_equal(np.percentile(x, (25, 50), axis=0, + interpolation="higher").shape, (2, 4, 5, 6)) + assert_equal(np.percentile(x, (25, 50), axis=1, + interpolation="higher").shape, (2, 3, 5, 6)) + assert_equal(np.percentile(x, (25, 50), axis=2, + interpolation="higher").shape, (2, 3, 4, 6)) + assert_equal(np.percentile(x, (25, 50), axis=3, + interpolation="higher").shape, (2, 3, 4, 5)) + assert_equal(np.percentile(x, (25, 50, 75), axis=1, + interpolation="higher").shape, (3, 3, 5, 6)) + + def test_scalar_q(self): + # test for no empty dimensions for compatibility with old percentile + x = np.arange(12).reshape(3, 4) + assert_equal(np.percentile(x, 50), 5.5) + assert_(np.isscalar(np.percentile(x, 50))) + r0 = np.array([4., 5., 6., 7.]) + assert_equal(np.percentile(x, 50, axis=0), r0) + assert_equal(np.percentile(x, 50, axis=0).shape, r0.shape) + r1 = np.array([1.5, 5.5, 9.5]) + assert_almost_equal(np.percentile(x, 50, axis=1), r1) + assert_equal(np.percentile(x, 50, axis=1).shape, r1.shape) + + out = np.empty(1) + assert_equal(np.percentile(x, 50, out=out), 5.5) + assert_equal(out, 5.5) + out = np.empty(4) + assert_equal(np.percentile(x, 50, axis=0, out=out), r0) + assert_equal(out, r0) + out = np.empty(3) + assert_equal(np.percentile(x, 50, axis=1, out=out), r1) + assert_equal(out, r1) + + # test for no empty dimensions for compatibility with old percentile + x = np.arange(12).reshape(3, 4) + assert_equal(np.percentile(x, 50, interpolation='lower'), 5.) + assert_(np.isscalar(np.percentile(x, 50))) + r0 = np.array([4., 5., 6., 7.]) + c0 = np.percentile(x, 50, interpolation='lower', axis=0) + assert_equal(c0, r0) + assert_equal(c0.shape, r0.shape) + r1 = np.array([1., 5., 9.]) + c1 = np.percentile(x, 50, interpolation='lower', axis=1) + assert_almost_equal(c1, r1) + assert_equal(c1.shape, r1.shape) + + out = np.empty((), dtype=x.dtype) + c = np.percentile(x, 50, interpolation='lower', out=out) + assert_equal(c, 5) + assert_equal(out, 5) + out = np.empty(4, dtype=x.dtype) + c = np.percentile(x, 50, interpolation='lower', axis=0, out=out) + assert_equal(c, r0) + assert_equal(out, r0) + out = np.empty(3, dtype=x.dtype) + c = np.percentile(x, 50, interpolation='lower', axis=1, out=out) + assert_equal(c, r1) + assert_equal(out, r1) + + def test_exception(self): + assert_raises(ValueError, np.percentile, [1, 2], 56, + interpolation='foobar') + assert_raises(ValueError, np.percentile, [1], 101) + assert_raises(ValueError, np.percentile, [1], -1) + assert_raises(ValueError, np.percentile, [1], list(range(50)) + [101]) + assert_raises(ValueError, np.percentile, [1], list(range(50)) + [-0.1]) + + def test_percentile_list(self): + assert_equal(np.percentile([1, 2, 3], 0), 1) + + def test_percentile_out(self): + x = np.array([1, 2, 3]) + y = np.zeros((3,)) + p = (1, 2, 3) + np.percentile(x, p, out=y) + assert_equal(y, np.percentile(x, p)) + + x = np.array([[1, 2, 3], + [4, 5, 6]]) + + y = np.zeros((3, 3)) + np.percentile(x, p, axis=0, out=y) + assert_equal(y, np.percentile(x, p, axis=0)) + + y = np.zeros((3, 2)) + np.percentile(x, p, axis=1, out=y) + assert_equal(y, np.percentile(x, p, axis=1)) + + x = np.arange(12).reshape(3, 4) + # q.dim > 1, float + r0 = np.array([[2., 3., 4., 5.], [4., 5., 6., 7.]]) + out = np.empty((2, 4)) + assert_equal(np.percentile(x, (25, 50), axis=0, out=out), r0) + assert_equal(out, r0) + r1 = np.array([[0.75, 4.75, 8.75], [1.5, 5.5, 9.5]]) + out = np.empty((2, 3)) + assert_equal(np.percentile(x, (25, 50), axis=1, out=out), r1) + assert_equal(out, r1) + + # q.dim > 1, int + r0 = np.array([[0, 1, 2, 3], [4, 5, 6, 7]]) + out = np.empty((2, 4), dtype=x.dtype) + c = np.percentile(x, (25, 50), interpolation='lower', axis=0, out=out) + assert_equal(c, r0) + assert_equal(out, r0) + r1 = np.array([[0, 4, 8], [1, 5, 9]]) + out = np.empty((2, 3), dtype=x.dtype) + c = np.percentile(x, (25, 50), interpolation='lower', axis=1, out=out) + assert_equal(c, r1) + assert_equal(out, r1) + + def test_percentile_empty_dim(self): + # empty dims are preserved + d = np.arange(11 * 2).reshape(11, 1, 2, 1) + assert_array_equal(np.percentile(d, 50, axis=0).shape, (1, 2, 1)) + assert_array_equal(np.percentile(d, 50, axis=1).shape, (11, 2, 1)) + assert_array_equal(np.percentile(d, 50, axis=2).shape, (11, 1, 1)) + assert_array_equal(np.percentile(d, 50, axis=3).shape, (11, 1, 2)) + assert_array_equal(np.percentile(d, 50, axis=-1).shape, (11, 1, 2)) + assert_array_equal(np.percentile(d, 50, axis=-2).shape, (11, 1, 1)) + assert_array_equal(np.percentile(d, 50, axis=-3).shape, (11, 2, 1)) + assert_array_equal(np.percentile(d, 50, axis=-4).shape, (1, 2, 1)) + + assert_array_equal(np.percentile(d, 50, axis=2, + interpolation='midpoint').shape, + (11, 1, 1)) + assert_array_equal(np.percentile(d, 50, axis=-2, + interpolation='midpoint').shape, + (11, 1, 1)) + + assert_array_equal(np.array(np.percentile(d, [10, 50], axis=0)).shape, + (2, 1, 2, 1)) + assert_array_equal(np.array(np.percentile(d, [10, 50], axis=1)).shape, + (2, 11, 2, 1)) + assert_array_equal(np.array(np.percentile(d, [10, 50], axis=2)).shape, + (2, 11, 1, 1)) + assert_array_equal(np.array(np.percentile(d, [10, 50], axis=3)).shape, + (2, 11, 1, 2)) + + def test_percentile_no_overwrite(self): + a = np.array([2, 3, 4, 1]) + np.percentile(a, [50], overwrite_input=False) + assert_equal(a, np.array([2, 3, 4, 1])) + + a = np.array([2, 3, 4, 1]) + np.percentile(a, [50]) + assert_equal(a, np.array([2, 3, 4, 1])) + + def test_no_p_overwrite(self): + p = np.linspace(0., 100., num=5) + np.percentile(np.arange(100.), p, interpolation="midpoint") + assert_array_equal(p, np.linspace(0., 100., num=5)) + p = np.linspace(0., 100., num=5).tolist() + np.percentile(np.arange(100.), p, interpolation="midpoint") + assert_array_equal(p, np.linspace(0., 100., num=5).tolist()) + + def test_percentile_overwrite(self): + a = np.array([2, 3, 4, 1]) + b = np.percentile(a, [50], overwrite_input=True) + assert_equal(b, np.array([2.5])) + + b = np.percentile([2, 3, 4, 1], [50], overwrite_input=True) + assert_equal(b, np.array([2.5])) + + def test_extended_axis(self): + o = np.random.normal(size=(71, 23)) + x = np.dstack([o] * 10) + assert_equal(np.percentile(x, 30, axis=(0, 1)), np.percentile(o, 30)) + x = np.moveaxis(x, -1, 0) + assert_equal(np.percentile(x, 30, axis=(-2, -1)), np.percentile(o, 30)) + x = x.swapaxes(0, 1).copy() + assert_equal(np.percentile(x, 30, axis=(0, -1)), np.percentile(o, 30)) + x = x.swapaxes(0, 1).copy() + + assert_equal(np.percentile(x, [25, 60], axis=(0, 1, 2)), + np.percentile(x, [25, 60], axis=None)) + assert_equal(np.percentile(x, [25, 60], axis=(0,)), + np.percentile(x, [25, 60], axis=0)) + + d = np.arange(3 * 5 * 7 * 11).reshape((3, 5, 7, 11)) + np.random.shuffle(d.ravel()) + assert_equal(np.percentile(d, 25, axis=(0, 1, 2))[0], + np.percentile(d[:,:,:, 0].flatten(), 25)) + assert_equal(np.percentile(d, [10, 90], axis=(0, 1, 3))[:, 1], + np.percentile(d[:,:, 1,:].flatten(), [10, 90])) + assert_equal(np.percentile(d, 25, axis=(3, 1, -4))[2], + np.percentile(d[:,:, 2,:].flatten(), 25)) + assert_equal(np.percentile(d, 25, axis=(3, 1, 2))[2], + np.percentile(d[2,:,:,:].flatten(), 25)) + assert_equal(np.percentile(d, 25, axis=(3, 2))[2, 1], + np.percentile(d[2, 1,:,:].flatten(), 25)) + assert_equal(np.percentile(d, 25, axis=(1, -2))[2, 1], + np.percentile(d[2,:,:, 1].flatten(), 25)) + assert_equal(np.percentile(d, 25, axis=(1, 3))[2, 2], + np.percentile(d[2,:, 2,:].flatten(), 25)) + + def test_extended_axis_invalid(self): + d = np.ones((3, 5, 7, 11)) + assert_raises(np.AxisError, np.percentile, d, axis=-5, q=25) + assert_raises(np.AxisError, np.percentile, d, axis=(0, -5), q=25) + assert_raises(np.AxisError, np.percentile, d, axis=4, q=25) + assert_raises(np.AxisError, np.percentile, d, axis=(0, 4), q=25) + # each of these refers to the same axis twice + assert_raises(ValueError, np.percentile, d, axis=(1, 1), q=25) + assert_raises(ValueError, np.percentile, d, axis=(-1, -1), q=25) + assert_raises(ValueError, np.percentile, d, axis=(3, -1), q=25) + + def test_keepdims(self): + d = np.ones((3, 5, 7, 11)) + assert_equal(np.percentile(d, 7, axis=None, keepdims=True).shape, + (1, 1, 1, 1)) + assert_equal(np.percentile(d, 7, axis=(0, 1), keepdims=True).shape, + (1, 1, 7, 11)) + assert_equal(np.percentile(d, 7, axis=(0, 3), keepdims=True).shape, + (1, 5, 7, 1)) + assert_equal(np.percentile(d, 7, axis=(1,), keepdims=True).shape, + (3, 1, 7, 11)) + assert_equal(np.percentile(d, 7, (0, 1, 2, 3), keepdims=True).shape, + (1, 1, 1, 1)) + assert_equal(np.percentile(d, 7, axis=(0, 1, 3), keepdims=True).shape, + (1, 1, 7, 1)) + + assert_equal(np.percentile(d, [1, 7], axis=(0, 1, 3), + keepdims=True).shape, (2, 1, 1, 7, 1)) + assert_equal(np.percentile(d, [1, 7], axis=(0, 3), + keepdims=True).shape, (2, 1, 5, 7, 1)) + + def test_out(self): + o = np.zeros((4,)) + d = np.ones((3, 4)) + assert_equal(np.percentile(d, 0, 0, out=o), o) + assert_equal(np.percentile(d, 0, 0, interpolation='nearest', out=o), o) + o = np.zeros((3,)) + assert_equal(np.percentile(d, 1, 1, out=o), o) + assert_equal(np.percentile(d, 1, 1, interpolation='nearest', out=o), o) + + o = np.zeros(()) + assert_equal(np.percentile(d, 2, out=o), o) + assert_equal(np.percentile(d, 2, interpolation='nearest', out=o), o) + + def test_out_nan(self): + with warnings.catch_warnings(record=True): + warnings.filterwarnings('always', '', RuntimeWarning) + o = np.zeros((4,)) + d = np.ones((3, 4)) + d[2, 1] = np.nan + assert_equal(np.percentile(d, 0, 0, out=o), o) + assert_equal( + np.percentile(d, 0, 0, interpolation='nearest', out=o), o) + o = np.zeros((3,)) + assert_equal(np.percentile(d, 1, 1, out=o), o) + assert_equal( + np.percentile(d, 1, 1, interpolation='nearest', out=o), o) + o = np.zeros(()) + assert_equal(np.percentile(d, 1, out=o), o) + assert_equal( + np.percentile(d, 1, interpolation='nearest', out=o), o) + + def test_nan_behavior(self): + a = np.arange(24, dtype=float) + a[2] = np.nan + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.percentile(a, 0.3), np.nan) + assert_equal(np.percentile(a, 0.3, axis=0), np.nan) + assert_equal(np.percentile(a, [0.3, 0.6], axis=0), + np.array([np.nan] * 2)) + assert_(w[0].category is RuntimeWarning) + assert_(w[1].category is RuntimeWarning) + assert_(w[2].category is RuntimeWarning) + + a = np.arange(24, dtype=float).reshape(2, 3, 4) + a[1, 2, 3] = np.nan + a[1, 1, 2] = np.nan + + # no axis + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.percentile(a, 0.3), np.nan) + assert_equal(np.percentile(a, 0.3).ndim, 0) + assert_(w[0].category is RuntimeWarning) + + # axis0 zerod + b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, 0) + b[2, 3] = np.nan + b[1, 2] = np.nan + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.percentile(a, 0.3, 0), b) + + # axis0 not zerod + b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), + [0.3, 0.6], 0) + b[:, 2, 3] = np.nan + b[:, 1, 2] = np.nan + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.percentile(a, [0.3, 0.6], 0), b) + + # axis1 zerod + b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, 1) + b[1, 3] = np.nan + b[1, 2] = np.nan + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.percentile(a, 0.3, 1), b) + # axis1 not zerod + b = np.percentile( + np.arange(24, dtype=float).reshape(2, 3, 4), [0.3, 0.6], 1) + b[:, 1, 3] = np.nan + b[:, 1, 2] = np.nan + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.percentile(a, [0.3, 0.6], 1), b) + + # axis02 zerod + b = np.percentile( + np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, (0, 2)) + b[1] = np.nan + b[2] = np.nan + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.percentile(a, 0.3, (0, 2)), b) + # axis02 not zerod + b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), + [0.3, 0.6], (0, 2)) + b[:, 1] = np.nan + b[:, 2] = np.nan + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.percentile(a, [0.3, 0.6], (0, 2)), b) + # axis02 not zerod with nearest interpolation + b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), + [0.3, 0.6], (0, 2), interpolation='nearest') + b[:, 1] = np.nan + b[:, 2] = np.nan + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.percentile( + a, [0.3, 0.6], (0, 2), interpolation='nearest'), b) + + +class TestQuantile(object): + # most of this is already tested by TestPercentile + + def test_basic(self): + x = np.arange(8) * 0.5 + assert_equal(np.quantile(x, 0), 0.) + assert_equal(np.quantile(x, 1), 3.5) + assert_equal(np.quantile(x, 0.5), 1.75) + + def test_no_p_overwrite(self): + # this is worth retesting, because quantile does not make a copy + p0 = np.array([0, 0.75, 0.25, 0.5, 1.0]) + p = p0.copy() + np.quantile(np.arange(100.), p, interpolation="midpoint") + assert_array_equal(p, p0) + + p0 = p0.tolist() + p = p.tolist() + np.quantile(np.arange(100.), p, interpolation="midpoint") + assert_array_equal(p, p0) + + +class TestMedian(object): + + def test_basic(self): + a0 = np.array(1) + a1 = np.arange(2) + a2 = np.arange(6).reshape(2, 3) + assert_equal(np.median(a0), 1) + assert_allclose(np.median(a1), 0.5) + assert_allclose(np.median(a2), 2.5) + assert_allclose(np.median(a2, axis=0), [1.5, 2.5, 3.5]) + assert_equal(np.median(a2, axis=1), [1, 4]) + assert_allclose(np.median(a2, axis=None), 2.5) + + a = np.array([0.0444502, 0.0463301, 0.141249, 0.0606775]) + assert_almost_equal((a[1] + a[3]) / 2., np.median(a)) + a = np.array([0.0463301, 0.0444502, 0.141249]) + assert_equal(a[0], np.median(a)) + a = np.array([0.0444502, 0.141249, 0.0463301]) + assert_equal(a[-1], np.median(a)) + # check array scalar result + assert_equal(np.median(a).ndim, 0) + a[1] = np.nan + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.median(a).ndim, 0) + assert_(w[0].category is RuntimeWarning) + + def test_axis_keyword(self): + a3 = np.array([[2, 3], + [0, 1], + [6, 7], + [4, 5]]) + for a in [a3, np.random.randint(0, 100, size=(2, 3, 4))]: + orig = a.copy() + np.median(a, axis=None) + for ax in range(a.ndim): + np.median(a, axis=ax) + assert_array_equal(a, orig) + + assert_allclose(np.median(a3, axis=0), [3, 4]) + assert_allclose(np.median(a3.T, axis=1), [3, 4]) + assert_allclose(np.median(a3), 3.5) + assert_allclose(np.median(a3, axis=None), 3.5) + assert_allclose(np.median(a3.T), 3.5) + + def test_overwrite_keyword(self): + a3 = np.array([[2, 3], + [0, 1], + [6, 7], + [4, 5]]) + a0 = np.array(1) + a1 = np.arange(2) + a2 = np.arange(6).reshape(2, 3) + assert_allclose(np.median(a0.copy(), overwrite_input=True), 1) + assert_allclose(np.median(a1.copy(), overwrite_input=True), 0.5) + assert_allclose(np.median(a2.copy(), overwrite_input=True), 2.5) + assert_allclose(np.median(a2.copy(), overwrite_input=True, axis=0), + [1.5, 2.5, 3.5]) + assert_allclose( + np.median(a2.copy(), overwrite_input=True, axis=1), [1, 4]) + assert_allclose( + np.median(a2.copy(), overwrite_input=True, axis=None), 2.5) + assert_allclose( + np.median(a3.copy(), overwrite_input=True, axis=0), [3, 4]) + assert_allclose(np.median(a3.T.copy(), overwrite_input=True, axis=1), + [3, 4]) + + a4 = np.arange(3 * 4 * 5, dtype=np.float32).reshape((3, 4, 5)) + np.random.shuffle(a4.ravel()) + assert_allclose(np.median(a4, axis=None), + np.median(a4.copy(), axis=None, overwrite_input=True)) + assert_allclose(np.median(a4, axis=0), + np.median(a4.copy(), axis=0, overwrite_input=True)) + assert_allclose(np.median(a4, axis=1), + np.median(a4.copy(), axis=1, overwrite_input=True)) + assert_allclose(np.median(a4, axis=2), + np.median(a4.copy(), axis=2, overwrite_input=True)) + + def test_array_like(self): + x = [1, 2, 3] + assert_almost_equal(np.median(x), 2) + x2 = [x] + assert_almost_equal(np.median(x2), 2) + assert_allclose(np.median(x2, axis=0), x) + + def test_subclass(self): + # gh-3846 + class MySubClass(np.ndarray): + + def __new__(cls, input_array, info=None): + obj = np.asarray(input_array).view(cls) + obj.info = info + return obj + + def mean(self, axis=None, dtype=None, out=None): + return -7 + + a = MySubClass([1, 2, 3]) + assert_equal(np.median(a), -7) + + def test_out(self): + o = np.zeros((4,)) + d = np.ones((3, 4)) + assert_equal(np.median(d, 0, out=o), o) + o = np.zeros((3,)) + assert_equal(np.median(d, 1, out=o), o) + o = np.zeros(()) + assert_equal(np.median(d, out=o), o) + + def test_out_nan(self): + with warnings.catch_warnings(record=True): + warnings.filterwarnings('always', '', RuntimeWarning) + o = np.zeros((4,)) + d = np.ones((3, 4)) + d[2, 1] = np.nan + assert_equal(np.median(d, 0, out=o), o) + o = np.zeros((3,)) + assert_equal(np.median(d, 1, out=o), o) + o = np.zeros(()) + assert_equal(np.median(d, out=o), o) + + def test_nan_behavior(self): + a = np.arange(24, dtype=float) + a[2] = np.nan + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.median(a), np.nan) + assert_equal(np.median(a, axis=0), np.nan) + assert_(w[0].category is RuntimeWarning) + assert_(w[1].category is RuntimeWarning) + + a = np.arange(24, dtype=float).reshape(2, 3, 4) + a[1, 2, 3] = np.nan + a[1, 1, 2] = np.nan + + # no axis + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.median(a), np.nan) + assert_equal(np.median(a).ndim, 0) + assert_(w[0].category is RuntimeWarning) + + # axis0 + b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), 0) + b[2, 3] = np.nan + b[1, 2] = np.nan + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.median(a, 0), b) + assert_equal(len(w), 1) + + # axis1 + b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), 1) + b[1, 3] = np.nan + b[1, 2] = np.nan + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.median(a, 1), b) + assert_equal(len(w), 1) + + # axis02 + b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), (0, 2)) + b[1] = np.nan + b[2] = np.nan + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.median(a, (0, 2)), b) + assert_equal(len(w), 1) + + def test_empty(self): + # empty arrays + a = np.array([], dtype=float) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.median(a), np.nan) + assert_(w[0].category is RuntimeWarning) + + # multiple dimensions + a = np.array([], dtype=float, ndmin=3) + # no axis + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.median(a), np.nan) + assert_(w[0].category is RuntimeWarning) + + # axis 0 and 1 + b = np.array([], dtype=float, ndmin=2) + assert_equal(np.median(a, axis=0), b) + assert_equal(np.median(a, axis=1), b) + + # axis 2 + b = np.array(np.nan, dtype=float, ndmin=2) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.median(a, axis=2), b) + assert_(w[0].category is RuntimeWarning) + + def test_object(self): + o = np.arange(7.) + assert_(type(np.median(o.astype(object))), float) + o[2] = np.nan + assert_(type(np.median(o.astype(object))), float) + + def test_extended_axis(self): + o = np.random.normal(size=(71, 23)) + x = np.dstack([o] * 10) + assert_equal(np.median(x, axis=(0, 1)), np.median(o)) + x = np.moveaxis(x, -1, 0) + assert_equal(np.median(x, axis=(-2, -1)), np.median(o)) + x = x.swapaxes(0, 1).copy() + assert_equal(np.median(x, axis=(0, -1)), np.median(o)) + + assert_equal(np.median(x, axis=(0, 1, 2)), np.median(x, axis=None)) + assert_equal(np.median(x, axis=(0, )), np.median(x, axis=0)) + assert_equal(np.median(x, axis=(-1, )), np.median(x, axis=-1)) + + d = np.arange(3 * 5 * 7 * 11).reshape((3, 5, 7, 11)) + np.random.shuffle(d.ravel()) + assert_equal(np.median(d, axis=(0, 1, 2))[0], + np.median(d[:,:,:, 0].flatten())) + assert_equal(np.median(d, axis=(0, 1, 3))[1], + np.median(d[:,:, 1,:].flatten())) + assert_equal(np.median(d, axis=(3, 1, -4))[2], + np.median(d[:,:, 2,:].flatten())) + assert_equal(np.median(d, axis=(3, 1, 2))[2], + np.median(d[2,:,:,:].flatten())) + assert_equal(np.median(d, axis=(3, 2))[2, 1], + np.median(d[2, 1,:,:].flatten())) + assert_equal(np.median(d, axis=(1, -2))[2, 1], + np.median(d[2,:,:, 1].flatten())) + assert_equal(np.median(d, axis=(1, 3))[2, 2], + np.median(d[2,:, 2,:].flatten())) + + def test_extended_axis_invalid(self): + d = np.ones((3, 5, 7, 11)) + assert_raises(np.AxisError, np.median, d, axis=-5) + assert_raises(np.AxisError, np.median, d, axis=(0, -5)) + assert_raises(np.AxisError, np.median, d, axis=4) + assert_raises(np.AxisError, np.median, d, axis=(0, 4)) + assert_raises(ValueError, np.median, d, axis=(1, 1)) + + def test_keepdims(self): + d = np.ones((3, 5, 7, 11)) + assert_equal(np.median(d, axis=None, keepdims=True).shape, + (1, 1, 1, 1)) + assert_equal(np.median(d, axis=(0, 1), keepdims=True).shape, + (1, 1, 7, 11)) + assert_equal(np.median(d, axis=(0, 3), keepdims=True).shape, + (1, 5, 7, 1)) + assert_equal(np.median(d, axis=(1,), keepdims=True).shape, + (3, 1, 7, 11)) + assert_equal(np.median(d, axis=(0, 1, 2, 3), keepdims=True).shape, + (1, 1, 1, 1)) + assert_equal(np.median(d, axis=(0, 1, 3), keepdims=True).shape, + (1, 1, 7, 1)) + + +class TestAdd_newdoc_ufunc(object): + + def test_ufunc_arg(self): + assert_raises(TypeError, add_newdoc_ufunc, 2, "blah") + assert_raises(ValueError, add_newdoc_ufunc, np.add, "blah") + + def test_string_arg(self): + assert_raises(TypeError, add_newdoc_ufunc, np.add, 3) + + +class TestAdd_newdoc(object): + + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") + def test_add_doc(self): + # test np.add_newdoc + tgt = "Current flat index into the array." + assert_equal(np.core.flatiter.index.__doc__[:len(tgt)], tgt) + assert_(len(np.core.ufunc.identity.__doc__) > 300) + assert_(len(np.lib.index_tricks.mgrid.__doc__) > 300) + +class TestSortComplex(object): + + @pytest.mark.parametrize("type_in, type_out", [ + ('l', 'D'), + ('h', 'F'), + ('H', 'F'), + ('b', 'F'), + ('B', 'F'), + ('g', 'G'), + ]) + def test_sort_real(self, type_in, type_out): + # sort_complex() type casting for real input types + a = np.array([5, 3, 6, 2, 1], dtype=type_in) + actual = np.sort_complex(a) + expected = np.sort(a).astype(type_out) + assert_equal(actual, expected) + assert_equal(actual.dtype, expected.dtype) + + def test_sort_complex(self): + # sort_complex() handling of complex input + a = np.array([2 + 3j, 1 - 2j, 1 - 3j, 2 + 1j], dtype='D') + expected = np.array([1 - 3j, 1 - 2j, 2 + 1j, 2 + 3j], dtype='D') + actual = np.sort_complex(a) + assert_equal(actual, expected) + assert_equal(actual.dtype, expected.dtype) diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_function_base.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_function_base.pyc new file mode 100644 index 0000000..89d1ec8 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_function_base.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_histograms.py b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_histograms.py new file mode 100644 index 0000000..c96b01d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_histograms.py @@ -0,0 +1,833 @@ +from __future__ import division, absolute_import, print_function + +import numpy as np + +from numpy.lib.histograms import histogram, histogramdd, histogram_bin_edges +from numpy.testing import ( + assert_, assert_equal, assert_array_equal, assert_almost_equal, + assert_array_almost_equal, assert_raises, assert_allclose, + assert_array_max_ulp, assert_raises_regex, suppress_warnings, + ) + + +class TestHistogram(object): + + def setup(self): + pass + + def teardown(self): + pass + + def test_simple(self): + n = 100 + v = np.random.rand(n) + (a, b) = histogram(v) + # check if the sum of the bins equals the number of samples + assert_equal(np.sum(a, axis=0), n) + # check that the bin counts are evenly spaced when the data is from + # a linear function + (a, b) = histogram(np.linspace(0, 10, 100)) + assert_array_equal(a, 10) + + def test_one_bin(self): + # Ticket 632 + hist, edges = histogram([1, 2, 3, 4], [1, 2]) + assert_array_equal(hist, [2, ]) + assert_array_equal(edges, [1, 2]) + assert_raises(ValueError, histogram, [1, 2], bins=0) + h, e = histogram([1, 2], bins=1) + assert_equal(h, np.array([2])) + assert_allclose(e, np.array([1., 2.])) + + def test_normed(self): + sup = suppress_warnings() + with sup: + rec = sup.record(np.VisibleDeprecationWarning, '.*normed.*') + # Check that the integral of the density equals 1. + n = 100 + v = np.random.rand(n) + a, b = histogram(v, normed=True) + area = np.sum(a * np.diff(b)) + assert_almost_equal(area, 1) + assert_equal(len(rec), 1) + + sup = suppress_warnings() + with sup: + rec = sup.record(np.VisibleDeprecationWarning, '.*normed.*') + # Check with non-constant bin widths (buggy but backwards + # compatible) + v = np.arange(10) + bins = [0, 1, 5, 9, 10] + a, b = histogram(v, bins, normed=True) + area = np.sum(a * np.diff(b)) + assert_almost_equal(area, 1) + assert_equal(len(rec), 1) + + def test_density(self): + # Check that the integral of the density equals 1. + n = 100 + v = np.random.rand(n) + a, b = histogram(v, density=True) + area = np.sum(a * np.diff(b)) + assert_almost_equal(area, 1) + + # Check with non-constant bin widths + v = np.arange(10) + bins = [0, 1, 3, 6, 10] + a, b = histogram(v, bins, density=True) + assert_array_equal(a, .1) + assert_equal(np.sum(a * np.diff(b)), 1) + + # Test that passing False works too + a, b = histogram(v, bins, density=False) + assert_array_equal(a, [1, 2, 3, 4]) + + # Variale bin widths are especially useful to deal with + # infinities. + v = np.arange(10) + bins = [0, 1, 3, 6, np.inf] + a, b = histogram(v, bins, density=True) + assert_array_equal(a, [.1, .1, .1, 0.]) + + # Taken from a bug report from N. Becker on the numpy-discussion + # mailing list Aug. 6, 2010. + counts, dmy = np.histogram( + [1, 2, 3, 4], [0.5, 1.5, np.inf], density=True) + assert_equal(counts, [.25, 0]) + + def test_outliers(self): + # Check that outliers are not tallied + a = np.arange(10) + .5 + + # Lower outliers + h, b = histogram(a, range=[0, 9]) + assert_equal(h.sum(), 9) + + # Upper outliers + h, b = histogram(a, range=[1, 10]) + assert_equal(h.sum(), 9) + + # Normalization + h, b = histogram(a, range=[1, 9], density=True) + assert_almost_equal((h * np.diff(b)).sum(), 1, decimal=15) + + # Weights + w = np.arange(10) + .5 + h, b = histogram(a, range=[1, 9], weights=w, density=True) + assert_equal((h * np.diff(b)).sum(), 1) + + h, b = histogram(a, bins=8, range=[1, 9], weights=w) + assert_equal(h, w[1:-1]) + + def test_arr_weights_mismatch(self): + a = np.arange(10) + .5 + w = np.arange(11) + .5 + with assert_raises_regex(ValueError, "same shape as"): + h, b = histogram(a, range=[1, 9], weights=w, density=True) + + + def test_type(self): + # Check the type of the returned histogram + a = np.arange(10) + .5 + h, b = histogram(a) + assert_(np.issubdtype(h.dtype, np.integer)) + + h, b = histogram(a, density=True) + assert_(np.issubdtype(h.dtype, np.floating)) + + h, b = histogram(a, weights=np.ones(10, int)) + assert_(np.issubdtype(h.dtype, np.integer)) + + h, b = histogram(a, weights=np.ones(10, float)) + assert_(np.issubdtype(h.dtype, np.floating)) + + def test_f32_rounding(self): + # gh-4799, check that the rounding of the edges works with float32 + x = np.array([276.318359, -69.593948, 21.329449], dtype=np.float32) + y = np.array([5005.689453, 4481.327637, 6010.369629], dtype=np.float32) + counts_hist, xedges, yedges = np.histogram2d(x, y, bins=100) + assert_equal(counts_hist.sum(), 3.) + + def test_bool_conversion(self): + # gh-12107 + # Reference integer histogram + a = np.array([1, 1, 0], dtype=np.uint8) + int_hist, int_edges = np.histogram(a) + + # Should raise an warning on booleans + # Ensure that the histograms are equivalent, need to suppress + # the warnings to get the actual outputs + with suppress_warnings() as sup: + rec = sup.record(RuntimeWarning, 'Converting input from .*') + hist, edges = np.histogram([True, True, False]) + # A warning should be issued + assert_equal(len(rec), 1) + assert_array_equal(hist, int_hist) + assert_array_equal(edges, int_edges) + + def test_weights(self): + v = np.random.rand(100) + w = np.ones(100) * 5 + a, b = histogram(v) + na, nb = histogram(v, density=True) + wa, wb = histogram(v, weights=w) + nwa, nwb = histogram(v, weights=w, density=True) + assert_array_almost_equal(a * 5, wa) + assert_array_almost_equal(na, nwa) + + # Check weights are properly applied. + v = np.linspace(0, 10, 10) + w = np.concatenate((np.zeros(5), np.ones(5))) + wa, wb = histogram(v, bins=np.arange(11), weights=w) + assert_array_almost_equal(wa, w) + + # Check with integer weights + wa, wb = histogram([1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1]) + assert_array_equal(wa, [4, 5, 0, 1]) + wa, wb = histogram( + [1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1], density=True) + assert_array_almost_equal(wa, np.array([4, 5, 0, 1]) / 10. / 3. * 4) + + # Check weights with non-uniform bin widths + a, b = histogram( + np.arange(9), [0, 1, 3, 6, 10], + weights=[2, 1, 1, 1, 1, 1, 1, 1, 1], density=True) + assert_almost_equal(a, [.2, .1, .1, .075]) + + def test_exotic_weights(self): + + # Test the use of weights that are not integer or floats, but e.g. + # complex numbers or object types. + + # Complex weights + values = np.array([1.3, 2.5, 2.3]) + weights = np.array([1, -1, 2]) + 1j * np.array([2, 1, 2]) + + # Check with custom bins + wa, wb = histogram(values, bins=[0, 2, 3], weights=weights) + assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3])) + + # Check with even bins + wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights) + assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3])) + + # Decimal weights + from decimal import Decimal + values = np.array([1.3, 2.5, 2.3]) + weights = np.array([Decimal(1), Decimal(2), Decimal(3)]) + + # Check with custom bins + wa, wb = histogram(values, bins=[0, 2, 3], weights=weights) + assert_array_almost_equal(wa, [Decimal(1), Decimal(5)]) + + # Check with even bins + wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights) + assert_array_almost_equal(wa, [Decimal(1), Decimal(5)]) + + def test_no_side_effects(self): + # This is a regression test that ensures that values passed to + # ``histogram`` are unchanged. + values = np.array([1.3, 2.5, 2.3]) + np.histogram(values, range=[-10, 10], bins=100) + assert_array_almost_equal(values, [1.3, 2.5, 2.3]) + + def test_empty(self): + a, b = histogram([], bins=([0, 1])) + assert_array_equal(a, np.array([0])) + assert_array_equal(b, np.array([0, 1])) + + def test_error_binnum_type (self): + # Tests if right Error is raised if bins argument is float + vals = np.linspace(0.0, 1.0, num=100) + histogram(vals, 5) + assert_raises(TypeError, histogram, vals, 2.4) + + def test_finite_range(self): + # Normal ranges should be fine + vals = np.linspace(0.0, 1.0, num=100) + histogram(vals, range=[0.25,0.75]) + assert_raises(ValueError, histogram, vals, range=[np.nan,0.75]) + assert_raises(ValueError, histogram, vals, range=[0.25,np.inf]) + + def test_invalid_range(self): + # start of range must be < end of range + vals = np.linspace(0.0, 1.0, num=100) + with assert_raises_regex(ValueError, "max must be larger than"): + np.histogram(vals, range=[0.1, 0.01]) + + def test_bin_edge_cases(self): + # Ensure that floating-point computations correctly place edge cases. + arr = np.array([337, 404, 739, 806, 1007, 1811, 2012]) + hist, edges = np.histogram(arr, bins=8296, range=(2, 2280)) + mask = hist > 0 + left_edges = edges[:-1][mask] + right_edges = edges[1:][mask] + for x, left, right in zip(arr, left_edges, right_edges): + assert_(x >= left) + assert_(x < right) + + def test_last_bin_inclusive_range(self): + arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.]) + hist, edges = np.histogram(arr, bins=30, range=(-0.5, 5)) + assert_equal(hist[-1], 1) + + def test_bin_array_dims(self): + # gracefully handle bins object > 1 dimension + vals = np.linspace(0.0, 1.0, num=100) + bins = np.array([[0, 0.5], [0.6, 1.0]]) + with assert_raises_regex(ValueError, "must be 1d"): + np.histogram(vals, bins=bins) + + def test_unsigned_monotonicity_check(self): + # Ensures ValueError is raised if bins not increasing monotonically + # when bins contain unsigned values (see #9222) + arr = np.array([2]) + bins = np.array([1, 3, 1], dtype='uint64') + with assert_raises(ValueError): + hist, edges = np.histogram(arr, bins=bins) + + def test_object_array_of_0d(self): + # gh-7864 + assert_raises(ValueError, + histogram, [np.array(0.4) for i in range(10)] + [-np.inf]) + assert_raises(ValueError, + histogram, [np.array(0.4) for i in range(10)] + [np.inf]) + + # these should not crash + np.histogram([np.array(0.5) for i in range(10)] + [.500000000000001]) + np.histogram([np.array(0.5) for i in range(10)] + [.5]) + + def test_some_nan_values(self): + # gh-7503 + one_nan = np.array([0, 1, np.nan]) + all_nan = np.array([np.nan, np.nan]) + + # the internal comparisons with NaN give warnings + sup = suppress_warnings() + sup.filter(RuntimeWarning) + with sup: + # can't infer range with nan + assert_raises(ValueError, histogram, one_nan, bins='auto') + assert_raises(ValueError, histogram, all_nan, bins='auto') + + # explicit range solves the problem + h, b = histogram(one_nan, bins='auto', range=(0, 1)) + assert_equal(h.sum(), 2) # nan is not counted + h, b = histogram(all_nan, bins='auto', range=(0, 1)) + assert_equal(h.sum(), 0) # nan is not counted + + # as does an explicit set of bins + h, b = histogram(one_nan, bins=[0, 1]) + assert_equal(h.sum(), 2) # nan is not counted + h, b = histogram(all_nan, bins=[0, 1]) + assert_equal(h.sum(), 0) # nan is not counted + + def test_datetime(self): + begin = np.datetime64('2000-01-01', 'D') + offsets = np.array([0, 0, 1, 1, 2, 3, 5, 10, 20]) + bins = np.array([0, 2, 7, 20]) + dates = begin + offsets + date_bins = begin + bins + + td = np.dtype('timedelta64[D]') + + # Results should be the same for integer offsets or datetime values. + # For now, only explicit bins are supported, since linspace does not + # work on datetimes or timedeltas + d_count, d_edge = histogram(dates, bins=date_bins) + t_count, t_edge = histogram(offsets.astype(td), bins=bins.astype(td)) + i_count, i_edge = histogram(offsets, bins=bins) + + assert_equal(d_count, i_count) + assert_equal(t_count, i_count) + + assert_equal((d_edge - begin).astype(int), i_edge) + assert_equal(t_edge.astype(int), i_edge) + + assert_equal(d_edge.dtype, dates.dtype) + assert_equal(t_edge.dtype, td) + + def do_signed_overflow_bounds(self, dtype): + exponent = 8 * np.dtype(dtype).itemsize - 1 + arr = np.array([-2**exponent + 4, 2**exponent - 4], dtype=dtype) + hist, e = histogram(arr, bins=2) + assert_equal(e, [-2**exponent + 4, 0, 2**exponent - 4]) + assert_equal(hist, [1, 1]) + + def test_signed_overflow_bounds(self): + self.do_signed_overflow_bounds(np.byte) + self.do_signed_overflow_bounds(np.short) + self.do_signed_overflow_bounds(np.intc) + self.do_signed_overflow_bounds(np.int_) + self.do_signed_overflow_bounds(np.longlong) + + def do_precision_lower_bound(self, float_small, float_large): + eps = np.finfo(float_large).eps + + arr = np.array([1.0], float_small) + range = np.array([1.0 + eps, 2.0], float_large) + + # test is looking for behavior when the bounds change between dtypes + if range.astype(float_small)[0] != 1: + return + + # previously crashed + count, x_loc = np.histogram(arr, bins=1, range=range) + assert_equal(count, [1]) + + # gh-10322 means that the type comes from arr - this may change + assert_equal(x_loc.dtype, float_small) + + def do_precision_upper_bound(self, float_small, float_large): + eps = np.finfo(float_large).eps + + arr = np.array([1.0], float_small) + range = np.array([0.0, 1.0 - eps], float_large) + + # test is looking for behavior when the bounds change between dtypes + if range.astype(float_small)[-1] != 1: + return + + # previously crashed + count, x_loc = np.histogram(arr, bins=1, range=range) + assert_equal(count, [1]) + + # gh-10322 means that the type comes from arr - this may change + assert_equal(x_loc.dtype, float_small) + + def do_precision(self, float_small, float_large): + self.do_precision_lower_bound(float_small, float_large) + self.do_precision_upper_bound(float_small, float_large) + + def test_precision(self): + # not looping results in a useful stack trace upon failure + self.do_precision(np.half, np.single) + self.do_precision(np.half, np.double) + self.do_precision(np.half, np.longdouble) + self.do_precision(np.single, np.double) + self.do_precision(np.single, np.longdouble) + self.do_precision(np.double, np.longdouble) + + def test_histogram_bin_edges(self): + hist, e = histogram([1, 2, 3, 4], [1, 2]) + edges = histogram_bin_edges([1, 2, 3, 4], [1, 2]) + assert_array_equal(edges, e) + + arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.]) + hist, e = histogram(arr, bins=30, range=(-0.5, 5)) + edges = histogram_bin_edges(arr, bins=30, range=(-0.5, 5)) + assert_array_equal(edges, e) + + hist, e = histogram(arr, bins='auto', range=(0, 1)) + edges = histogram_bin_edges(arr, bins='auto', range=(0, 1)) + assert_array_equal(edges, e) + + +class TestHistogramOptimBinNums(object): + """ + Provide test coverage when using provided estimators for optimal number of + bins + """ + + def test_empty(self): + estimator_list = ['fd', 'scott', 'rice', 'sturges', + 'doane', 'sqrt', 'auto', 'stone'] + # check it can deal with empty data + for estimator in estimator_list: + a, b = histogram([], bins=estimator) + assert_array_equal(a, np.array([0])) + assert_array_equal(b, np.array([0, 1])) + + def test_simple(self): + """ + Straightforward testing with a mixture of linspace data (for + consistency). All test values have been precomputed and the values + shouldn't change + """ + # Some basic sanity checking, with some fixed data. + # Checking for the correct number of bins + basic_test = {50: {'fd': 4, 'scott': 4, 'rice': 8, 'sturges': 7, + 'doane': 8, 'sqrt': 8, 'auto': 7, 'stone': 2}, + 500: {'fd': 8, 'scott': 8, 'rice': 16, 'sturges': 10, + 'doane': 12, 'sqrt': 23, 'auto': 10, 'stone': 9}, + 5000: {'fd': 17, 'scott': 17, 'rice': 35, 'sturges': 14, + 'doane': 17, 'sqrt': 71, 'auto': 17, 'stone': 20}} + + for testlen, expectedResults in basic_test.items(): + # Create some sort of non uniform data to test with + # (2 peak uniform mixture) + x1 = np.linspace(-10, -1, testlen // 5 * 2) + x2 = np.linspace(1, 10, testlen // 5 * 3) + x = np.concatenate((x1, x2)) + for estimator, numbins in expectedResults.items(): + a, b = np.histogram(x, estimator) + assert_equal(len(a), numbins, err_msg="For the {0} estimator " + "with datasize of {1}".format(estimator, testlen)) + + def test_small(self): + """ + Smaller datasets have the potential to cause issues with the data + adaptive methods, especially the FD method. All bin numbers have been + precalculated. + """ + small_dat = {1: {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1, + 'doane': 1, 'sqrt': 1, 'stone': 1}, + 2: {'fd': 2, 'scott': 1, 'rice': 3, 'sturges': 2, + 'doane': 1, 'sqrt': 2, 'stone': 1}, + 3: {'fd': 2, 'scott': 2, 'rice': 3, 'sturges': 3, + 'doane': 3, 'sqrt': 2, 'stone': 1}} + + for testlen, expectedResults in small_dat.items(): + testdat = np.arange(testlen) + for estimator, expbins in expectedResults.items(): + a, b = np.histogram(testdat, estimator) + assert_equal(len(a), expbins, err_msg="For the {0} estimator " + "with datasize of {1}".format(estimator, testlen)) + + def test_incorrect_methods(self): + """ + Check a Value Error is thrown when an unknown string is passed in + """ + check_list = ['mad', 'freeman', 'histograms', 'IQR'] + for estimator in check_list: + assert_raises(ValueError, histogram, [1, 2, 3], estimator) + + def test_novariance(self): + """ + Check that methods handle no variance in data + Primarily for Scott and FD as the SD and IQR are both 0 in this case + """ + novar_dataset = np.ones(100) + novar_resultdict = {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1, + 'doane': 1, 'sqrt': 1, 'auto': 1, 'stone': 1} + + for estimator, numbins in novar_resultdict.items(): + a, b = np.histogram(novar_dataset, estimator) + assert_equal(len(a), numbins, err_msg="{0} estimator, " + "No Variance test".format(estimator)) + + def test_limited_variance(self): + """ + Check when IQR is 0, but variance exists, we return the sturges value + and not the fd value. + """ + lim_var_data = np.ones(1000) + lim_var_data[:3] = 0 + lim_var_data[-4:] = 100 + + edges_auto = histogram_bin_edges(lim_var_data, 'auto') + assert_equal(edges_auto, np.linspace(0, 100, 12)) + + edges_fd = histogram_bin_edges(lim_var_data, 'fd') + assert_equal(edges_fd, np.array([0, 100])) + + edges_sturges = histogram_bin_edges(lim_var_data, 'sturges') + assert_equal(edges_sturges, np.linspace(0, 100, 12)) + + def test_outlier(self): + """ + Check the FD, Scott and Doane with outliers. + + The FD estimates a smaller binwidth since it's less affected by + outliers. Since the range is so (artificially) large, this means more + bins, most of which will be empty, but the data of interest usually is + unaffected. The Scott estimator is more affected and returns fewer bins, + despite most of the variance being in one area of the data. The Doane + estimator lies somewhere between the other two. + """ + xcenter = np.linspace(-10, 10, 50) + outlier_dataset = np.hstack((np.linspace(-110, -100, 5), xcenter)) + + outlier_resultdict = {'fd': 21, 'scott': 5, 'doane': 11, 'stone': 6} + + for estimator, numbins in outlier_resultdict.items(): + a, b = np.histogram(outlier_dataset, estimator) + assert_equal(len(a), numbins) + + def test_scott_vs_stone(self): + """Verify that Scott's rule and Stone's rule converges for normally distributed data""" + + def nbins_ratio(seed, size): + rng = np.random.RandomState(seed) + x = rng.normal(loc=0, scale=2, size=size) + a, b = len(np.histogram(x, 'stone')[0]), len(np.histogram(x, 'scott')[0]) + return a / (a + b) + + ll = [[nbins_ratio(seed, size) for size in np.geomspace(start=10, stop=100, num=4).round().astype(int)] + for seed in range(256)] + + # the average difference between the two methods decreases as the dataset size increases. + assert_almost_equal(abs(np.mean(ll, axis=0) - 0.5), + [0.1065248, + 0.0968844, + 0.0331818, + 0.0178057], + decimal=3) + + def test_simple_range(self): + """ + Straightforward testing with a mixture of linspace data (for + consistency). Adding in a 3rd mixture that will then be + completely ignored. All test values have been precomputed and + the shouldn't change. + """ + # some basic sanity checking, with some fixed data. + # Checking for the correct number of bins + basic_test = { + 50: {'fd': 8, 'scott': 8, 'rice': 15, + 'sturges': 14, 'auto': 14, 'stone': 8}, + 500: {'fd': 15, 'scott': 16, 'rice': 32, + 'sturges': 20, 'auto': 20, 'stone': 80}, + 5000: {'fd': 33, 'scott': 33, 'rice': 69, + 'sturges': 27, 'auto': 33, 'stone': 80} + } + + for testlen, expectedResults in basic_test.items(): + # create some sort of non uniform data to test with + # (3 peak uniform mixture) + x1 = np.linspace(-10, -1, testlen // 5 * 2) + x2 = np.linspace(1, 10, testlen // 5 * 3) + x3 = np.linspace(-100, -50, testlen) + x = np.hstack((x1, x2, x3)) + for estimator, numbins in expectedResults.items(): + a, b = np.histogram(x, estimator, range = (-20, 20)) + msg = "For the {0} estimator".format(estimator) + msg += " with datasize of {0}".format(testlen) + assert_equal(len(a), numbins, err_msg=msg) + + def test_simple_weighted(self): + """ + Check that weighted data raises a TypeError + """ + estimator_list = ['fd', 'scott', 'rice', 'sturges', 'auto'] + for estimator in estimator_list: + assert_raises(TypeError, histogram, [1, 2, 3], + estimator, weights=[1, 2, 3]) + + +class TestHistogramdd(object): + + def test_simple(self): + x = np.array([[-.5, .5, 1.5], [-.5, 1.5, 2.5], [-.5, 2.5, .5], + [.5, .5, 1.5], [.5, 1.5, 2.5], [.5, 2.5, 2.5]]) + H, edges = histogramdd(x, (2, 3, 3), + range=[[-1, 1], [0, 3], [0, 3]]) + answer = np.array([[[0, 1, 0], [0, 0, 1], [1, 0, 0]], + [[0, 1, 0], [0, 0, 1], [0, 0, 1]]]) + assert_array_equal(H, answer) + + # Check normalization + ed = [[-2, 0, 2], [0, 1, 2, 3], [0, 1, 2, 3]] + H, edges = histogramdd(x, bins=ed, density=True) + assert_(np.all(H == answer / 12.)) + + # Check that H has the correct shape. + H, edges = histogramdd(x, (2, 3, 4), + range=[[-1, 1], [0, 3], [0, 4]], + density=True) + answer = np.array([[[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]], + [[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0]]]) + assert_array_almost_equal(H, answer / 6., 4) + # Check that a sequence of arrays is accepted and H has the correct + # shape. + z = [np.squeeze(y) for y in np.split(x, 3, axis=1)] + H, edges = histogramdd( + z, bins=(4, 3, 2), range=[[-2, 2], [0, 3], [0, 2]]) + answer = np.array([[[0, 0], [0, 0], [0, 0]], + [[0, 1], [0, 0], [1, 0]], + [[0, 1], [0, 0], [0, 0]], + [[0, 0], [0, 0], [0, 0]]]) + assert_array_equal(H, answer) + + Z = np.zeros((5, 5, 5)) + Z[list(range(5)), list(range(5)), list(range(5))] = 1. + H, edges = histogramdd([np.arange(5), np.arange(5), np.arange(5)], 5) + assert_array_equal(H, Z) + + def test_shape_3d(self): + # All possible permutations for bins of different lengths in 3D. + bins = ((5, 4, 6), (6, 4, 5), (5, 6, 4), (4, 6, 5), (6, 5, 4), + (4, 5, 6)) + r = np.random.rand(10, 3) + for b in bins: + H, edges = histogramdd(r, b) + assert_(H.shape == b) + + def test_shape_4d(self): + # All possible permutations for bins of different lengths in 4D. + bins = ((7, 4, 5, 6), (4, 5, 7, 6), (5, 6, 4, 7), (7, 6, 5, 4), + (5, 7, 6, 4), (4, 6, 7, 5), (6, 5, 7, 4), (7, 5, 4, 6), + (7, 4, 6, 5), (6, 4, 7, 5), (6, 7, 5, 4), (4, 6, 5, 7), + (4, 7, 5, 6), (5, 4, 6, 7), (5, 7, 4, 6), (6, 7, 4, 5), + (6, 5, 4, 7), (4, 7, 6, 5), (4, 5, 6, 7), (7, 6, 4, 5), + (5, 4, 7, 6), (5, 6, 7, 4), (6, 4, 5, 7), (7, 5, 6, 4)) + + r = np.random.rand(10, 4) + for b in bins: + H, edges = histogramdd(r, b) + assert_(H.shape == b) + + def test_weights(self): + v = np.random.rand(100, 2) + hist, edges = histogramdd(v) + n_hist, edges = histogramdd(v, density=True) + w_hist, edges = histogramdd(v, weights=np.ones(100)) + assert_array_equal(w_hist, hist) + w_hist, edges = histogramdd(v, weights=np.ones(100) * 2, density=True) + assert_array_equal(w_hist, n_hist) + w_hist, edges = histogramdd(v, weights=np.ones(100, int) * 2) + assert_array_equal(w_hist, 2 * hist) + + def test_identical_samples(self): + x = np.zeros((10, 2), int) + hist, edges = histogramdd(x, bins=2) + assert_array_equal(edges[0], np.array([-0.5, 0., 0.5])) + + def test_empty(self): + a, b = histogramdd([[], []], bins=([0, 1], [0, 1])) + assert_array_max_ulp(a, np.array([[0.]])) + a, b = np.histogramdd([[], [], []], bins=2) + assert_array_max_ulp(a, np.zeros((2, 2, 2))) + + def test_bins_errors(self): + # There are two ways to specify bins. Check for the right errors + # when mixing those. + x = np.arange(8).reshape(2, 4) + assert_raises(ValueError, np.histogramdd, x, bins=[-1, 2, 4, 5]) + assert_raises(ValueError, np.histogramdd, x, bins=[1, 0.99, 1, 1]) + assert_raises( + ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 3, -3]]) + assert_(np.histogramdd(x, bins=[1, 1, 1, [1, 2, 3, 4]])) + + def test_inf_edges(self): + # Test using +/-inf bin edges works. See #1788. + with np.errstate(invalid='ignore'): + x = np.arange(6).reshape(3, 2) + expected = np.array([[1, 0], [0, 1], [0, 1]]) + h, e = np.histogramdd(x, bins=[3, [-np.inf, 2, 10]]) + assert_allclose(h, expected) + h, e = np.histogramdd(x, bins=[3, np.array([-1, 2, np.inf])]) + assert_allclose(h, expected) + h, e = np.histogramdd(x, bins=[3, [-np.inf, 3, np.inf]]) + assert_allclose(h, expected) + + def test_rightmost_binedge(self): + # Test event very close to rightmost binedge. See Github issue #4266 + x = [0.9999999995] + bins = [[0., 0.5, 1.0]] + hist, _ = histogramdd(x, bins=bins) + assert_(hist[0] == 0.0) + assert_(hist[1] == 1.) + x = [1.0] + bins = [[0., 0.5, 1.0]] + hist, _ = histogramdd(x, bins=bins) + assert_(hist[0] == 0.0) + assert_(hist[1] == 1.) + x = [1.0000000001] + bins = [[0., 0.5, 1.0]] + hist, _ = histogramdd(x, bins=bins) + assert_(hist[0] == 0.0) + assert_(hist[1] == 0.0) + x = [1.0001] + bins = [[0., 0.5, 1.0]] + hist, _ = histogramdd(x, bins=bins) + assert_(hist[0] == 0.0) + assert_(hist[1] == 0.0) + + def test_finite_range(self): + vals = np.random.random((100, 3)) + histogramdd(vals, range=[[0.0, 1.0], [0.25, 0.75], [0.25, 0.5]]) + assert_raises(ValueError, histogramdd, vals, + range=[[0.0, 1.0], [0.25, 0.75], [0.25, np.inf]]) + assert_raises(ValueError, histogramdd, vals, + range=[[0.0, 1.0], [np.nan, 0.75], [0.25, 0.5]]) + + def test_equal_edges(self): + """ Test that adjacent entries in an edge array can be equal """ + x = np.array([0, 1, 2]) + y = np.array([0, 1, 2]) + x_edges = np.array([0, 2, 2]) + y_edges = 1 + hist, edges = histogramdd((x, y), bins=(x_edges, y_edges)) + + hist_expected = np.array([ + [2.], + [1.], # x == 2 falls in the final bin + ]) + assert_equal(hist, hist_expected) + + def test_edge_dtype(self): + """ Test that if an edge array is input, its type is preserved """ + x = np.array([0, 10, 20]) + y = x / 10 + x_edges = np.array([0, 5, 15, 20]) + y_edges = x_edges / 10 + hist, edges = histogramdd((x, y), bins=(x_edges, y_edges)) + + assert_equal(edges[0].dtype, x_edges.dtype) + assert_equal(edges[1].dtype, y_edges.dtype) + + def test_large_integers(self): + big = 2**60 # Too large to represent with a full precision float + + x = np.array([0], np.int64) + x_edges = np.array([-1, +1], np.int64) + y = big + x + y_edges = big + x_edges + + hist, edges = histogramdd((x, y), bins=(x_edges, y_edges)) + + assert_equal(hist[0, 0], 1) + + def test_density_non_uniform_2d(self): + # Defines the following grid: + # + # 0 2 8 + # 0+-+-----+ + # + | + + # + | + + # 6+-+-----+ + # 8+-+-----+ + x_edges = np.array([0, 2, 8]) + y_edges = np.array([0, 6, 8]) + relative_areas = np.array([ + [3, 9], + [1, 3]]) + + # ensure the number of points in each region is proportional to its area + x = np.array([1] + [1]*3 + [7]*3 + [7]*9) + y = np.array([7] + [1]*3 + [7]*3 + [1]*9) + + # sanity check that the above worked as intended + hist, edges = histogramdd((y, x), bins=(y_edges, x_edges)) + assert_equal(hist, relative_areas) + + # resulting histogram should be uniform, since counts and areas are propotional + hist, edges = histogramdd((y, x), bins=(y_edges, x_edges), density=True) + assert_equal(hist, 1 / (8*8)) + + def test_density_non_uniform_1d(self): + # compare to histogram to show the results are the same + v = np.arange(10) + bins = np.array([0, 1, 3, 6, 10]) + hist, edges = histogram(v, bins, density=True) + hist_dd, edges_dd = histogramdd((v,), (bins,), density=True) + assert_equal(hist, hist_dd) + assert_equal(edges, edges_dd[0]) + + def test_density_via_normed(self): + # normed should simply alias to density argument + v = np.arange(10) + bins = np.array([0, 1, 3, 6, 10]) + hist, edges = histogram(v, bins, density=True) + hist_dd, edges_dd = histogramdd((v,), (bins,), normed=True) + assert_equal(hist, hist_dd) + assert_equal(edges, edges_dd[0]) + + def test_density_normed_redundancy(self): + v = np.arange(10) + bins = np.array([0, 1, 3, 6, 10]) + with assert_raises_regex(TypeError, "Cannot specify both"): + hist_dd, edges_dd = histogramdd((v,), (bins,), + density=True, + normed=True) diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_histograms.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_histograms.pyc new file mode 100644 index 0000000..962296c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_histograms.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_index_tricks.py b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_index_tricks.py new file mode 100644 index 0000000..3246f68 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_index_tricks.py @@ -0,0 +1,454 @@ +from __future__ import division, absolute_import, print_function + +import pytest + +import numpy as np +from numpy.testing import ( + assert_, assert_equal, assert_array_equal, assert_almost_equal, + assert_array_almost_equal, assert_raises, assert_raises_regex, + assert_warns + ) +from numpy.lib.index_tricks import ( + mgrid, ogrid, ndenumerate, fill_diagonal, diag_indices, diag_indices_from, + index_exp, ndindex, r_, s_, ix_ + ) + + +class TestRavelUnravelIndex(object): + def test_basic(self): + assert_equal(np.unravel_index(2, (2, 2)), (1, 0)) + + # test backwards compatibility with older dims + # keyword argument; see Issue #10586 + with assert_warns(DeprecationWarning): + # we should achieve the correct result + # AND raise the appropriate warning + # when using older "dims" kw argument + assert_equal(np.unravel_index(indices=2, + dims=(2, 2)), + (1, 0)) + + # test that new shape argument works properly + assert_equal(np.unravel_index(indices=2, + shape=(2, 2)), + (1, 0)) + + # test that an invalid second keyword argument + # is properly handled + with assert_raises(TypeError): + np.unravel_index(indices=2, hape=(2, 2)) + + with assert_raises(TypeError): + np.unravel_index(2, hape=(2, 2)) + + with assert_raises(TypeError): + np.unravel_index(254, ims=(17, 94)) + + assert_equal(np.ravel_multi_index((1, 0), (2, 2)), 2) + assert_equal(np.unravel_index(254, (17, 94)), (2, 66)) + assert_equal(np.ravel_multi_index((2, 66), (17, 94)), 254) + assert_raises(ValueError, np.unravel_index, -1, (2, 2)) + assert_raises(TypeError, np.unravel_index, 0.5, (2, 2)) + assert_raises(ValueError, np.unravel_index, 4, (2, 2)) + assert_raises(ValueError, np.ravel_multi_index, (-3, 1), (2, 2)) + assert_raises(ValueError, np.ravel_multi_index, (2, 1), (2, 2)) + assert_raises(ValueError, np.ravel_multi_index, (0, -3), (2, 2)) + assert_raises(ValueError, np.ravel_multi_index, (0, 2), (2, 2)) + assert_raises(TypeError, np.ravel_multi_index, (0.1, 0.), (2, 2)) + + assert_equal(np.unravel_index((2*3 + 1)*6 + 4, (4, 3, 6)), [2, 1, 4]) + assert_equal( + np.ravel_multi_index([2, 1, 4], (4, 3, 6)), (2*3 + 1)*6 + 4) + + arr = np.array([[3, 6, 6], [4, 5, 1]]) + assert_equal(np.ravel_multi_index(arr, (7, 6)), [22, 41, 37]) + assert_equal( + np.ravel_multi_index(arr, (7, 6), order='F'), [31, 41, 13]) + assert_equal( + np.ravel_multi_index(arr, (4, 6), mode='clip'), [22, 23, 19]) + assert_equal(np.ravel_multi_index(arr, (4, 4), mode=('clip', 'wrap')), + [12, 13, 13]) + assert_equal(np.ravel_multi_index((3, 1, 4, 1), (6, 7, 8, 9)), 1621) + + assert_equal(np.unravel_index(np.array([22, 41, 37]), (7, 6)), + [[3, 6, 6], [4, 5, 1]]) + assert_equal( + np.unravel_index(np.array([31, 41, 13]), (7, 6), order='F'), + [[3, 6, 6], [4, 5, 1]]) + assert_equal(np.unravel_index(1621, (6, 7, 8, 9)), [3, 1, 4, 1]) + + def test_big_indices(self): + # ravel_multi_index for big indices (issue #7546) + if np.intp == np.int64: + arr = ([1, 29], [3, 5], [3, 117], [19, 2], + [2379, 1284], [2, 2], [0, 1]) + assert_equal( + np.ravel_multi_index(arr, (41, 7, 120, 36, 2706, 8, 6)), + [5627771580, 117259570957]) + + # test overflow checking for too big array (issue #7546) + dummy_arr = ([0],[0]) + half_max = np.iinfo(np.intp).max // 2 + assert_equal( + np.ravel_multi_index(dummy_arr, (half_max, 2)), [0]) + assert_raises(ValueError, + np.ravel_multi_index, dummy_arr, (half_max+1, 2)) + assert_equal( + np.ravel_multi_index(dummy_arr, (half_max, 2), order='F'), [0]) + assert_raises(ValueError, + np.ravel_multi_index, dummy_arr, (half_max+1, 2), order='F') + + def test_dtypes(self): + # Test with different data types + for dtype in [np.int16, np.uint16, np.int32, + np.uint32, np.int64, np.uint64]: + coords = np.array( + [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0]], dtype=dtype) + shape = (5, 8) + uncoords = 8*coords[0]+coords[1] + assert_equal(np.ravel_multi_index(coords, shape), uncoords) + assert_equal(coords, np.unravel_index(uncoords, shape)) + uncoords = coords[0]+5*coords[1] + assert_equal( + np.ravel_multi_index(coords, shape, order='F'), uncoords) + assert_equal(coords, np.unravel_index(uncoords, shape, order='F')) + + coords = np.array( + [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0], [1, 3, 1, 0, 9, 5]], + dtype=dtype) + shape = (5, 8, 10) + uncoords = 10*(8*coords[0]+coords[1])+coords[2] + assert_equal(np.ravel_multi_index(coords, shape), uncoords) + assert_equal(coords, np.unravel_index(uncoords, shape)) + uncoords = coords[0]+5*(coords[1]+8*coords[2]) + assert_equal( + np.ravel_multi_index(coords, shape, order='F'), uncoords) + assert_equal(coords, np.unravel_index(uncoords, shape, order='F')) + + def test_clipmodes(self): + # Test clipmodes + assert_equal( + np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12), mode='wrap'), + np.ravel_multi_index([1, 1, 6, 2], (4, 3, 7, 12))) + assert_equal(np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12), + mode=( + 'wrap', 'raise', 'clip', 'raise')), + np.ravel_multi_index([1, 1, 0, 2], (4, 3, 7, 12))) + assert_raises( + ValueError, np.ravel_multi_index, [5, 1, -1, 2], (4, 3, 7, 12)) + + def test_writeability(self): + # See gh-7269 + x, y = np.unravel_index([1, 2, 3], (4, 5)) + assert_(x.flags.writeable) + assert_(y.flags.writeable) + + def test_0d(self): + # gh-580 + x = np.unravel_index(0, ()) + assert_equal(x, ()) + + assert_raises_regex(ValueError, "0d array", np.unravel_index, [0], ()) + assert_raises_regex( + ValueError, "out of bounds", np.unravel_index, [1], ()) + + +class TestGrid(object): + def test_basic(self): + a = mgrid[-1:1:10j] + b = mgrid[-1:1:0.1] + assert_(a.shape == (10,)) + assert_(b.shape == (20,)) + assert_(a[0] == -1) + assert_almost_equal(a[-1], 1) + assert_(b[0] == -1) + assert_almost_equal(b[1]-b[0], 0.1, 11) + assert_almost_equal(b[-1], b[0]+19*0.1, 11) + assert_almost_equal(a[1]-a[0], 2.0/9.0, 11) + + def test_linspace_equivalence(self): + y, st = np.linspace(2, 10, retstep=1) + assert_almost_equal(st, 8/49.0) + assert_array_almost_equal(y, mgrid[2:10:50j], 13) + + def test_nd(self): + c = mgrid[-1:1:10j, -2:2:10j] + d = mgrid[-1:1:0.1, -2:2:0.2] + assert_(c.shape == (2, 10, 10)) + assert_(d.shape == (2, 20, 20)) + assert_array_equal(c[0][0, :], -np.ones(10, 'd')) + assert_array_equal(c[1][:, 0], -2*np.ones(10, 'd')) + assert_array_almost_equal(c[0][-1, :], np.ones(10, 'd'), 11) + assert_array_almost_equal(c[1][:, -1], 2*np.ones(10, 'd'), 11) + assert_array_almost_equal(d[0, 1, :] - d[0, 0, :], + 0.1*np.ones(20, 'd'), 11) + assert_array_almost_equal(d[1, :, 1] - d[1, :, 0], + 0.2*np.ones(20, 'd'), 11) + + def test_sparse(self): + grid_full = mgrid[-1:1:10j, -2:2:10j] + grid_sparse = ogrid[-1:1:10j, -2:2:10j] + + # sparse grids can be made dense by broadcasting + grid_broadcast = np.broadcast_arrays(*grid_sparse) + for f, b in zip(grid_full, grid_broadcast): + assert_equal(f, b) + + @pytest.mark.parametrize("start, stop, step, expected", [ + (None, 10, 10j, (200, 10)), + (-10, 20, None, (1800, 30)), + ]) + def test_mgrid_size_none_handling(self, start, stop, step, expected): + # regression test None value handling for + # start and step values used by mgrid; + # internally, this aims to cover previously + # unexplored code paths in nd_grid() + grid = mgrid[start:stop:step, start:stop:step] + # need a smaller grid to explore one of the + # untested code paths + grid_small = mgrid[start:stop:step] + assert_equal(grid.size, expected[0]) + assert_equal(grid_small.size, expected[1]) + + +class TestConcatenator(object): + def test_1d(self): + assert_array_equal(r_[1, 2, 3, 4, 5, 6], np.array([1, 2, 3, 4, 5, 6])) + b = np.ones(5) + c = r_[b, 0, 0, b] + assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1]) + + def test_mixed_type(self): + g = r_[10.1, 1:10] + assert_(g.dtype == 'f8') + + def test_more_mixed_type(self): + g = r_[-10.1, np.array([1]), np.array([2, 3, 4]), 10.0] + assert_(g.dtype == 'f8') + + def test_complex_step(self): + # Regression test for #12262 + g = r_[0:36:100j] + assert_(g.shape == (100,)) + + def test_2d(self): + b = np.random.rand(5, 5) + c = np.random.rand(5, 5) + d = r_['1', b, c] # append columns + assert_(d.shape == (5, 10)) + assert_array_equal(d[:, :5], b) + assert_array_equal(d[:, 5:], c) + d = r_[b, c] + assert_(d.shape == (10, 5)) + assert_array_equal(d[:5, :], b) + assert_array_equal(d[5:, :], c) + + def test_0d(self): + assert_equal(r_[0, np.array(1), 2], [0, 1, 2]) + assert_equal(r_[[0, 1, 2], np.array(3)], [0, 1, 2, 3]) + assert_equal(r_[np.array(0), [1, 2, 3]], [0, 1, 2, 3]) + + +class TestNdenumerate(object): + def test_basic(self): + a = np.array([[1, 2], [3, 4]]) + assert_equal(list(ndenumerate(a)), + [((0, 0), 1), ((0, 1), 2), ((1, 0), 3), ((1, 1), 4)]) + + +class TestIndexExpression(object): + def test_regression_1(self): + # ticket #1196 + a = np.arange(2) + assert_equal(a[:-1], a[s_[:-1]]) + assert_equal(a[:-1], a[index_exp[:-1]]) + + def test_simple_1(self): + a = np.random.rand(4, 5, 6) + + assert_equal(a[:, :3, [1, 2]], a[index_exp[:, :3, [1, 2]]]) + assert_equal(a[:, :3, [1, 2]], a[s_[:, :3, [1, 2]]]) + + +class TestIx_(object): + def test_regression_1(self): + # Test empty inputs create outputs of indexing type, gh-5804 + # Test both lists and arrays + for func in (range, np.arange): + a, = np.ix_(func(0)) + assert_equal(a.dtype, np.intp) + + def test_shape_and_dtype(self): + sizes = (4, 5, 3, 2) + # Test both lists and arrays + for func in (range, np.arange): + arrays = np.ix_(*[func(sz) for sz in sizes]) + for k, (a, sz) in enumerate(zip(arrays, sizes)): + assert_equal(a.shape[k], sz) + assert_(all(sh == 1 for j, sh in enumerate(a.shape) if j != k)) + assert_(np.issubdtype(a.dtype, np.integer)) + + def test_bool(self): + bool_a = [True, False, True, True] + int_a, = np.nonzero(bool_a) + assert_equal(np.ix_(bool_a)[0], int_a) + + def test_1d_only(self): + idx2d = [[1, 2, 3], [4, 5, 6]] + assert_raises(ValueError, np.ix_, idx2d) + + def test_repeated_input(self): + length_of_vector = 5 + x = np.arange(length_of_vector) + out = ix_(x, x) + assert_equal(out[0].shape, (length_of_vector, 1)) + assert_equal(out[1].shape, (1, length_of_vector)) + # check that input shape is not modified + assert_equal(x.shape, (length_of_vector,)) + + +def test_c_(): + a = np.c_[np.array([[1, 2, 3]]), 0, 0, np.array([[4, 5, 6]])] + assert_equal(a, [[1, 2, 3, 0, 0, 4, 5, 6]]) + + +class TestFillDiagonal(object): + def test_basic(self): + a = np.zeros((3, 3), int) + fill_diagonal(a, 5) + assert_array_equal( + a, np.array([[5, 0, 0], + [0, 5, 0], + [0, 0, 5]]) + ) + + def test_tall_matrix(self): + a = np.zeros((10, 3), int) + fill_diagonal(a, 5) + assert_array_equal( + a, np.array([[5, 0, 0], + [0, 5, 0], + [0, 0, 5], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0]]) + ) + + def test_tall_matrix_wrap(self): + a = np.zeros((10, 3), int) + fill_diagonal(a, 5, True) + assert_array_equal( + a, np.array([[5, 0, 0], + [0, 5, 0], + [0, 0, 5], + [0, 0, 0], + [5, 0, 0], + [0, 5, 0], + [0, 0, 5], + [0, 0, 0], + [5, 0, 0], + [0, 5, 0]]) + ) + + def test_wide_matrix(self): + a = np.zeros((3, 10), int) + fill_diagonal(a, 5) + assert_array_equal( + a, np.array([[5, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 5, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 5, 0, 0, 0, 0, 0, 0, 0]]) + ) + + def test_operate_4d_array(self): + a = np.zeros((3, 3, 3, 3), int) + fill_diagonal(a, 4) + i = np.array([0, 1, 2]) + assert_equal(np.where(a != 0), (i, i, i, i)) + + def test_low_dim_handling(self): + # raise error with low dimensionality + a = np.zeros(3, int) + with assert_raises_regex(ValueError, "at least 2-d"): + fill_diagonal(a, 5) + + def test_hetero_shape_handling(self): + # raise error with high dimensionality and + # shape mismatch + a = np.zeros((3,3,7,3), int) + with assert_raises_regex(ValueError, "equal length"): + fill_diagonal(a, 2) + + +def test_diag_indices(): + di = diag_indices(4) + a = np.array([[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16]]) + a[di] = 100 + assert_array_equal( + a, np.array([[100, 2, 3, 4], + [5, 100, 7, 8], + [9, 10, 100, 12], + [13, 14, 15, 100]]) + ) + + # Now, we create indices to manipulate a 3-d array: + d3 = diag_indices(2, 3) + + # And use it to set the diagonal of a zeros array to 1: + a = np.zeros((2, 2, 2), int) + a[d3] = 1 + assert_array_equal( + a, np.array([[[1, 0], + [0, 0]], + [[0, 0], + [0, 1]]]) + ) + + +class TestDiagIndicesFrom(object): + + def test_diag_indices_from(self): + x = np.random.random((4, 4)) + r, c = diag_indices_from(x) + assert_array_equal(r, np.arange(4)) + assert_array_equal(c, np.arange(4)) + + def test_error_small_input(self): + x = np.ones(7) + with assert_raises_regex(ValueError, "at least 2-d"): + diag_indices_from(x) + + def test_error_shape_mismatch(self): + x = np.zeros((3, 3, 2, 3), int) + with assert_raises_regex(ValueError, "equal length"): + diag_indices_from(x) + + +def test_ndindex(): + x = list(ndindex(1, 2, 3)) + expected = [ix for ix, e in ndenumerate(np.zeros((1, 2, 3)))] + assert_array_equal(x, expected) + + x = list(ndindex((1, 2, 3))) + assert_array_equal(x, expected) + + # Test use of scalars and tuples + x = list(ndindex((3,))) + assert_array_equal(x, list(ndindex(3))) + + # Make sure size argument is optional + x = list(ndindex()) + assert_equal(x, [()]) + + x = list(ndindex(())) + assert_equal(x, [()]) + + # Make sure 0-sized ndindex works correctly + x = list(ndindex(*[0])) + assert_equal(x, []) diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_index_tricks.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_index_tricks.pyc new file mode 100644 index 0000000..2f30b16 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_index_tricks.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_io.py b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_io.py new file mode 100644 index 0000000..7ef2553 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_io.py @@ -0,0 +1,2501 @@ +from __future__ import division, absolute_import, print_function + +import sys +import gzip +import os +import threading +import time +import warnings +import io +import re +import pytest +from tempfile import NamedTemporaryFile +from io import BytesIO, StringIO +from datetime import datetime +import locale + +import numpy as np +import numpy.ma as ma +from numpy.lib._iotools import ConverterError, ConversionWarning +from numpy.compat import asbytes, bytes, Path +from numpy.ma.testutils import assert_equal +from numpy.testing import ( + assert_warns, assert_, assert_raises_regex, assert_raises, + assert_allclose, assert_array_equal, temppath, tempdir, IS_PYPY, + HAS_REFCOUNT, suppress_warnings, assert_no_gc_cycles, + ) + + +class TextIO(BytesIO): + """Helper IO class. + + Writes encode strings to bytes if needed, reads return bytes. + This makes it easier to emulate files opened in binary mode + without needing to explicitly convert strings to bytes in + setting up the test data. + + """ + def __init__(self, s=""): + BytesIO.__init__(self, asbytes(s)) + + def write(self, s): + BytesIO.write(self, asbytes(s)) + + def writelines(self, lines): + BytesIO.writelines(self, [asbytes(s) for s in lines]) + + +MAJVER, MINVER = sys.version_info[:2] +IS_64BIT = sys.maxsize > 2**32 +try: + import bz2 + HAS_BZ2 = True +except ImportError: + HAS_BZ2 = False +try: + import lzma + HAS_LZMA = True +except ImportError: + HAS_LZMA = False + + +def strptime(s, fmt=None): + """ + This function is available in the datetime module only from Python >= + 2.5. + + """ + if type(s) == bytes: + s = s.decode("latin1") + return datetime(*time.strptime(s, fmt)[:3]) + + +class RoundtripTest(object): + def roundtrip(self, save_func, *args, **kwargs): + """ + save_func : callable + Function used to save arrays to file. + file_on_disk : bool + If true, store the file on disk, instead of in a + string buffer. + save_kwds : dict + Parameters passed to `save_func`. + load_kwds : dict + Parameters passed to `numpy.load`. + args : tuple of arrays + Arrays stored to file. + + """ + save_kwds = kwargs.get('save_kwds', {}) + load_kwds = kwargs.get('load_kwds', {}) + file_on_disk = kwargs.get('file_on_disk', False) + + if file_on_disk: + target_file = NamedTemporaryFile(delete=False) + load_file = target_file.name + else: + target_file = BytesIO() + load_file = target_file + + try: + arr = args + + save_func(target_file, *arr, **save_kwds) + target_file.flush() + target_file.seek(0) + + if sys.platform == 'win32' and not isinstance(target_file, BytesIO): + target_file.close() + + arr_reloaded = np.load(load_file, **load_kwds) + + self.arr = arr + self.arr_reloaded = arr_reloaded + finally: + if not isinstance(target_file, BytesIO): + target_file.close() + # holds an open file descriptor so it can't be deleted on win + if 'arr_reloaded' in locals(): + if not isinstance(arr_reloaded, np.lib.npyio.NpzFile): + os.remove(target_file.name) + + def check_roundtrips(self, a): + self.roundtrip(a) + self.roundtrip(a, file_on_disk=True) + self.roundtrip(np.asfortranarray(a)) + self.roundtrip(np.asfortranarray(a), file_on_disk=True) + if a.shape[0] > 1: + # neither C nor Fortran contiguous for 2D arrays or more + self.roundtrip(np.asfortranarray(a)[1:]) + self.roundtrip(np.asfortranarray(a)[1:], file_on_disk=True) + + def test_array(self): + a = np.array([], float) + self.check_roundtrips(a) + + a = np.array([[1, 2], [3, 4]], float) + self.check_roundtrips(a) + + a = np.array([[1, 2], [3, 4]], int) + self.check_roundtrips(a) + + a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.csingle) + self.check_roundtrips(a) + + a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.cdouble) + self.check_roundtrips(a) + + def test_array_object(self): + a = np.array([], object) + self.check_roundtrips(a) + + a = np.array([[1, 2], [3, 4]], object) + self.check_roundtrips(a) + + def test_1D(self): + a = np.array([1, 2, 3, 4], int) + self.roundtrip(a) + + @pytest.mark.skipif(sys.platform == 'win32', reason="Fails on Win32") + def test_mmap(self): + a = np.array([[1, 2.5], [4, 7.3]]) + self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'}) + + a = np.asfortranarray([[1, 2.5], [4, 7.3]]) + self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'}) + + def test_record(self): + a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) + self.check_roundtrips(a) + + @pytest.mark.slow + def test_format_2_0(self): + dt = [(("%d" % i) * 100, float) for i in range(500)] + a = np.ones(1000, dtype=dt) + with warnings.catch_warnings(record=True): + warnings.filterwarnings('always', '', UserWarning) + self.check_roundtrips(a) + + +class TestSaveLoad(RoundtripTest): + def roundtrip(self, *args, **kwargs): + RoundtripTest.roundtrip(self, np.save, *args, **kwargs) + assert_equal(self.arr[0], self.arr_reloaded) + assert_equal(self.arr[0].dtype, self.arr_reloaded.dtype) + assert_equal(self.arr[0].flags.fnc, self.arr_reloaded.flags.fnc) + + +class TestSavezLoad(RoundtripTest): + def roundtrip(self, *args, **kwargs): + RoundtripTest.roundtrip(self, np.savez, *args, **kwargs) + try: + for n, arr in enumerate(self.arr): + reloaded = self.arr_reloaded['arr_%d' % n] + assert_equal(arr, reloaded) + assert_equal(arr.dtype, reloaded.dtype) + assert_equal(arr.flags.fnc, reloaded.flags.fnc) + finally: + # delete tempfile, must be done here on windows + if self.arr_reloaded.fid: + self.arr_reloaded.fid.close() + os.remove(self.arr_reloaded.fid.name) + + @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform") + @pytest.mark.slow + def test_big_arrays(self): + L = (1 << 31) + 100000 + a = np.empty(L, dtype=np.uint8) + with temppath(prefix="numpy_test_big_arrays_", suffix=".npz") as tmp: + np.savez(tmp, a=a) + del a + npfile = np.load(tmp) + a = npfile['a'] # Should succeed + npfile.close() + del a # Avoid pyflakes unused variable warning. + + def test_multiple_arrays(self): + a = np.array([[1, 2], [3, 4]], float) + b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex) + self.roundtrip(a, b) + + def test_named_arrays(self): + a = np.array([[1, 2], [3, 4]], float) + b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex) + c = BytesIO() + np.savez(c, file_a=a, file_b=b) + c.seek(0) + l = np.load(c) + assert_equal(a, l['file_a']) + assert_equal(b, l['file_b']) + + def test_BagObj(self): + a = np.array([[1, 2], [3, 4]], float) + b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex) + c = BytesIO() + np.savez(c, file_a=a, file_b=b) + c.seek(0) + l = np.load(c) + assert_equal(sorted(dir(l.f)), ['file_a','file_b']) + assert_equal(a, l.f.file_a) + assert_equal(b, l.f.file_b) + + def test_savez_filename_clashes(self): + # Test that issue #852 is fixed + # and savez functions in multithreaded environment + + def writer(error_list): + with temppath(suffix='.npz') as tmp: + arr = np.random.randn(500, 500) + try: + np.savez(tmp, arr=arr) + except OSError as err: + error_list.append(err) + + errors = [] + threads = [threading.Thread(target=writer, args=(errors,)) + for j in range(3)] + for t in threads: + t.start() + for t in threads: + t.join() + + if errors: + raise AssertionError(errors) + + def test_not_closing_opened_fid(self): + # Test that issue #2178 is fixed: + # verify could seek on 'loaded' file + with temppath(suffix='.npz') as tmp: + with open(tmp, 'wb') as fp: + np.savez(fp, data='LOVELY LOAD') + with open(tmp, 'rb', 10000) as fp: + fp.seek(0) + assert_(not fp.closed) + np.load(fp)['data'] + # fp must not get closed by .load + assert_(not fp.closed) + fp.seek(0) + assert_(not fp.closed) + + #FIXME: Is this still true? + @pytest.mark.skipif(IS_PYPY, reason="Missing context manager on PyPy") + def test_closing_fid(self): + # Test that issue #1517 (too many opened files) remains closed + # It might be a "weak" test since failed to get triggered on + # e.g. Debian sid of 2012 Jul 05 but was reported to + # trigger the failure on Ubuntu 10.04: + # http://projects.scipy.org/numpy/ticket/1517#comment:2 + with temppath(suffix='.npz') as tmp: + np.savez(tmp, data='LOVELY LOAD') + # We need to check if the garbage collector can properly close + # numpy npz file returned by np.load when their reference count + # goes to zero. Python 3 running in debug mode raises a + # ResourceWarning when file closing is left to the garbage + # collector, so we catch the warnings. Because ResourceWarning + # is unknown in Python < 3.x, we take the easy way out and + # catch all warnings. + with suppress_warnings() as sup: + sup.filter(Warning) # TODO: specify exact message + for i in range(1, 1025): + try: + np.load(tmp)["data"] + except Exception as e: + msg = "Failed to load data from a file: %s" % e + raise AssertionError(msg) + + def test_closing_zipfile_after_load(self): + # Check that zipfile owns file and can close it. This needs to + # pass a file name to load for the test. On windows failure will + # cause a second error will be raised when the attempt to remove + # the open file is made. + prefix = 'numpy_test_closing_zipfile_after_load_' + with temppath(suffix='.npz', prefix=prefix) as tmp: + np.savez(tmp, lab='place holder') + data = np.load(tmp) + fp = data.zip.fp + data.close() + assert_(fp.closed) + + +class TestSaveTxt(object): + def test_array(self): + a = np.array([[1, 2], [3, 4]], float) + fmt = "%.18e" + c = BytesIO() + np.savetxt(c, a, fmt=fmt) + c.seek(0) + assert_equal(c.readlines(), + [asbytes((fmt + ' ' + fmt + '\n') % (1, 2)), + asbytes((fmt + ' ' + fmt + '\n') % (3, 4))]) + + a = np.array([[1, 2], [3, 4]], int) + c = BytesIO() + np.savetxt(c, a, fmt='%d') + c.seek(0) + assert_equal(c.readlines(), [b'1 2\n', b'3 4\n']) + + def test_1D(self): + a = np.array([1, 2, 3, 4], int) + c = BytesIO() + np.savetxt(c, a, fmt='%d') + c.seek(0) + lines = c.readlines() + assert_equal(lines, [b'1\n', b'2\n', b'3\n', b'4\n']) + + def test_0D_3D(self): + c = BytesIO() + assert_raises(ValueError, np.savetxt, c, np.array(1)) + assert_raises(ValueError, np.savetxt, c, np.array([[[1], [2]]])) + + def test_record(self): + a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) + c = BytesIO() + np.savetxt(c, a, fmt='%d') + c.seek(0) + assert_equal(c.readlines(), [b'1 2\n', b'3 4\n']) + + @pytest.mark.skipif(Path is None, reason="No pathlib.Path") + def test_multifield_view(self): + a = np.ones(1, dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'f4')]) + v = a[['x', 'z']] + with temppath(suffix='.npy') as path: + path = Path(path) + np.save(path, v) + data = np.load(path) + assert_array_equal(data, v) + + def test_delimiter(self): + a = np.array([[1., 2.], [3., 4.]]) + c = BytesIO() + np.savetxt(c, a, delimiter=',', fmt='%d') + c.seek(0) + assert_equal(c.readlines(), [b'1,2\n', b'3,4\n']) + + def test_format(self): + a = np.array([(1, 2), (3, 4)]) + c = BytesIO() + # Sequence of formats + np.savetxt(c, a, fmt=['%02d', '%3.1f']) + c.seek(0) + assert_equal(c.readlines(), [b'01 2.0\n', b'03 4.0\n']) + + # A single multiformat string + c = BytesIO() + np.savetxt(c, a, fmt='%02d : %3.1f') + c.seek(0) + lines = c.readlines() + assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n']) + + # Specify delimiter, should be overridden + c = BytesIO() + np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',') + c.seek(0) + lines = c.readlines() + assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n']) + + # Bad fmt, should raise a ValueError + c = BytesIO() + assert_raises(ValueError, np.savetxt, c, a, fmt=99) + + def test_header_footer(self): + # Test the functionality of the header and footer keyword argument. + + c = BytesIO() + a = np.array([(1, 2), (3, 4)], dtype=int) + test_header_footer = 'Test header / footer' + # Test the header keyword argument + np.savetxt(c, a, fmt='%1d', header=test_header_footer) + c.seek(0) + assert_equal(c.read(), + asbytes('# ' + test_header_footer + '\n1 2\n3 4\n')) + # Test the footer keyword argument + c = BytesIO() + np.savetxt(c, a, fmt='%1d', footer=test_header_footer) + c.seek(0) + assert_equal(c.read(), + asbytes('1 2\n3 4\n# ' + test_header_footer + '\n')) + # Test the commentstr keyword argument used on the header + c = BytesIO() + commentstr = '% ' + np.savetxt(c, a, fmt='%1d', + header=test_header_footer, comments=commentstr) + c.seek(0) + assert_equal(c.read(), + asbytes(commentstr + test_header_footer + '\n' + '1 2\n3 4\n')) + # Test the commentstr keyword argument used on the footer + c = BytesIO() + commentstr = '% ' + np.savetxt(c, a, fmt='%1d', + footer=test_header_footer, comments=commentstr) + c.seek(0) + assert_equal(c.read(), + asbytes('1 2\n3 4\n' + commentstr + test_header_footer + '\n')) + + def test_file_roundtrip(self): + with temppath() as name: + a = np.array([(1, 2), (3, 4)]) + np.savetxt(name, a) + b = np.loadtxt(name) + assert_array_equal(a, b) + + def test_complex_arrays(self): + ncols = 2 + nrows = 2 + a = np.zeros((ncols, nrows), dtype=np.complex128) + re = np.pi + im = np.e + a[:] = re + 1.0j * im + + # One format only + c = BytesIO() + np.savetxt(c, a, fmt=' %+.3e') + c.seek(0) + lines = c.readlines() + assert_equal( + lines, + [b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n', + b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n']) + + # One format for each real and imaginary part + c = BytesIO() + np.savetxt(c, a, fmt=' %+.3e' * 2 * ncols) + c.seek(0) + lines = c.readlines() + assert_equal( + lines, + [b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n', + b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n']) + + # One format for each complex number + c = BytesIO() + np.savetxt(c, a, fmt=['(%.3e%+.3ej)'] * ncols) + c.seek(0) + lines = c.readlines() + assert_equal( + lines, + [b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n', + b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n']) + + def test_complex_negative_exponent(self): + # Previous to 1.15, some formats generated x+-yj, gh 7895 + ncols = 2 + nrows = 2 + a = np.zeros((ncols, nrows), dtype=np.complex128) + re = np.pi + im = np.e + a[:] = re - 1.0j * im + c = BytesIO() + np.savetxt(c, a, fmt='%.3e') + c.seek(0) + lines = c.readlines() + assert_equal( + lines, + [b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n', + b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n']) + + + + + def test_custom_writer(self): + + class CustomWriter(list): + def write(self, text): + self.extend(text.split(b'\n')) + + w = CustomWriter() + a = np.array([(1, 2), (3, 4)]) + np.savetxt(w, a) + b = np.loadtxt(w) + assert_array_equal(a, b) + + def test_unicode(self): + utf8 = b'\xcf\x96'.decode('UTF-8') + a = np.array([utf8], dtype=np.unicode) + with tempdir() as tmpdir: + # set encoding as on windows it may not be unicode even on py3 + np.savetxt(os.path.join(tmpdir, 'test.csv'), a, fmt=['%s'], + encoding='UTF-8') + + def test_unicode_roundtrip(self): + utf8 = b'\xcf\x96'.decode('UTF-8') + a = np.array([utf8], dtype=np.unicode) + # our gz wrapper support encoding + suffixes = ['', '.gz'] + # stdlib 2 versions do not support encoding + if MAJVER > 2: + if HAS_BZ2: + suffixes.append('.bz2') + if HAS_LZMA: + suffixes.extend(['.xz', '.lzma']) + with tempdir() as tmpdir: + for suffix in suffixes: + np.savetxt(os.path.join(tmpdir, 'test.csv' + suffix), a, + fmt=['%s'], encoding='UTF-16-LE') + b = np.loadtxt(os.path.join(tmpdir, 'test.csv' + suffix), + encoding='UTF-16-LE', dtype=np.unicode) + assert_array_equal(a, b) + + def test_unicode_bytestream(self): + utf8 = b'\xcf\x96'.decode('UTF-8') + a = np.array([utf8], dtype=np.unicode) + s = BytesIO() + np.savetxt(s, a, fmt=['%s'], encoding='UTF-8') + s.seek(0) + assert_equal(s.read().decode('UTF-8'), utf8 + '\n') + + def test_unicode_stringstream(self): + utf8 = b'\xcf\x96'.decode('UTF-8') + a = np.array([utf8], dtype=np.unicode) + s = StringIO() + np.savetxt(s, a, fmt=['%s'], encoding='UTF-8') + s.seek(0) + assert_equal(s.read(), utf8 + '\n') + + +class LoadTxtBase(object): + def check_compressed(self, fopen, suffixes): + # Test that we can load data from a compressed file + wanted = np.arange(6).reshape((2, 3)) + linesep = ('\n', '\r\n', '\r') + for sep in linesep: + data = '0 1 2' + sep + '3 4 5' + for suffix in suffixes: + with temppath(suffix=suffix) as name: + with fopen(name, mode='wt', encoding='UTF-32-LE') as f: + f.write(data) + res = self.loadfunc(name, encoding='UTF-32-LE') + assert_array_equal(res, wanted) + with fopen(name, "rt", encoding='UTF-32-LE') as f: + res = self.loadfunc(f) + assert_array_equal(res, wanted) + + # Python2 .open does not support encoding + @pytest.mark.skipif(MAJVER == 2, reason="Needs Python version >= 3") + def test_compressed_gzip(self): + self.check_compressed(gzip.open, ('.gz',)) + + @pytest.mark.skipif(not HAS_BZ2, reason="Needs bz2") + @pytest.mark.skipif(MAJVER == 2, reason="Needs Python version >= 3") + def test_compressed_bz2(self): + self.check_compressed(bz2.open, ('.bz2',)) + + @pytest.mark.skipif(not HAS_LZMA, reason="Needs lzma") + @pytest.mark.skipif(MAJVER == 2, reason="Needs Python version >= 3") + def test_compressed_lzma(self): + self.check_compressed(lzma.open, ('.xz', '.lzma')) + + def test_encoding(self): + with temppath() as path: + with open(path, "wb") as f: + f.write('0.\n1.\n2.'.encode("UTF-16")) + x = self.loadfunc(path, encoding="UTF-16") + assert_array_equal(x, [0., 1., 2.]) + + def test_stringload(self): + # umlaute + nonascii = b'\xc3\xb6\xc3\xbc\xc3\xb6'.decode("UTF-8") + with temppath() as path: + with open(path, "wb") as f: + f.write(nonascii.encode("UTF-16")) + x = self.loadfunc(path, encoding="UTF-16", dtype=np.unicode) + assert_array_equal(x, nonascii) + + def test_binary_decode(self): + utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04' + v = self.loadfunc(BytesIO(utf16), dtype=np.unicode, encoding='UTF-16') + assert_array_equal(v, np.array(utf16.decode('UTF-16').split())) + + def test_converters_decode(self): + # test converters that decode strings + c = TextIO() + c.write(b'\xcf\x96') + c.seek(0) + x = self.loadfunc(c, dtype=np.unicode, + converters={0: lambda x: x.decode('UTF-8')}) + a = np.array([b'\xcf\x96'.decode('UTF-8')]) + assert_array_equal(x, a) + + def test_converters_nodecode(self): + # test native string converters enabled by setting an encoding + utf8 = b'\xcf\x96'.decode('UTF-8') + with temppath() as path: + with io.open(path, 'wt', encoding='UTF-8') as f: + f.write(utf8) + x = self.loadfunc(path, dtype=np.unicode, + converters={0: lambda x: x + 't'}, + encoding='UTF-8') + a = np.array([utf8 + 't']) + assert_array_equal(x, a) + + +class TestLoadTxt(LoadTxtBase): + loadfunc = staticmethod(np.loadtxt) + + def setup(self): + # lower chunksize for testing + self.orig_chunk = np.lib.npyio._loadtxt_chunksize + np.lib.npyio._loadtxt_chunksize = 1 + def teardown(self): + np.lib.npyio._loadtxt_chunksize = self.orig_chunk + + def test_record(self): + c = TextIO() + c.write('1 2\n3 4') + c.seek(0) + x = np.loadtxt(c, dtype=[('x', np.int32), ('y', np.int32)]) + a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) + assert_array_equal(x, a) + + d = TextIO() + d.write('M 64.0 75.0\nF 25.0 60.0') + d.seek(0) + mydescriptor = {'names': ('gender', 'age', 'weight'), + 'formats': ('S1', 'i4', 'f4')} + b = np.array([('M', 64.0, 75.0), + ('F', 25.0, 60.0)], dtype=mydescriptor) + y = np.loadtxt(d, dtype=mydescriptor) + assert_array_equal(y, b) + + def test_array(self): + c = TextIO() + c.write('1 2\n3 4') + + c.seek(0) + x = np.loadtxt(c, dtype=int) + a = np.array([[1, 2], [3, 4]], int) + assert_array_equal(x, a) + + c.seek(0) + x = np.loadtxt(c, dtype=float) + a = np.array([[1, 2], [3, 4]], float) + assert_array_equal(x, a) + + def test_1D(self): + c = TextIO() + c.write('1\n2\n3\n4\n') + c.seek(0) + x = np.loadtxt(c, dtype=int) + a = np.array([1, 2, 3, 4], int) + assert_array_equal(x, a) + + c = TextIO() + c.write('1,2,3,4\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',') + a = np.array([1, 2, 3, 4], int) + assert_array_equal(x, a) + + def test_missing(self): + c = TextIO() + c.write('1,2,3,,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + converters={3: lambda s: int(s or - 999)}) + a = np.array([1, 2, 3, -999, 5], int) + assert_array_equal(x, a) + + def test_converters_with_usecols(self): + c = TextIO() + c.write('1,2,3,,5\n6,7,8,9,10\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + converters={3: lambda s: int(s or - 999)}, + usecols=(1, 3,)) + a = np.array([[2, -999], [7, 9]], int) + assert_array_equal(x, a) + + def test_comments_unicode(self): + c = TextIO() + c.write('# comment\n1,2,3,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + comments=u'#') + a = np.array([1, 2, 3, 5], int) + assert_array_equal(x, a) + + def test_comments_byte(self): + c = TextIO() + c.write('# comment\n1,2,3,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + comments=b'#') + a = np.array([1, 2, 3, 5], int) + assert_array_equal(x, a) + + def test_comments_multiple(self): + c = TextIO() + c.write('# comment\n1,2,3\n@ comment2\n4,5,6 // comment3') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + comments=['#', '@', '//']) + a = np.array([[1, 2, 3], [4, 5, 6]], int) + assert_array_equal(x, a) + + def test_comments_multi_chars(self): + c = TextIO() + c.write('/* comment\n1,2,3,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + comments='/*') + a = np.array([1, 2, 3, 5], int) + assert_array_equal(x, a) + + # Check that '/*' is not transformed to ['/', '*'] + c = TextIO() + c.write('*/ comment\n1,2,3,5\n') + c.seek(0) + assert_raises(ValueError, np.loadtxt, c, dtype=int, delimiter=',', + comments='/*') + + def test_skiprows(self): + c = TextIO() + c.write('comment\n1,2,3,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + skiprows=1) + a = np.array([1, 2, 3, 5], int) + assert_array_equal(x, a) + + c = TextIO() + c.write('# comment\n1,2,3,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + skiprows=1) + a = np.array([1, 2, 3, 5], int) + assert_array_equal(x, a) + + def test_usecols(self): + a = np.array([[1, 2], [3, 4]], float) + c = BytesIO() + np.savetxt(c, a) + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=(1,)) + assert_array_equal(x, a[:, 1]) + + a = np.array([[1, 2, 3], [3, 4, 5]], float) + c = BytesIO() + np.savetxt(c, a) + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=(1, 2)) + assert_array_equal(x, a[:, 1:]) + + # Testing with arrays instead of tuples. + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=np.array([1, 2])) + assert_array_equal(x, a[:, 1:]) + + # Testing with an integer instead of a sequence + for int_type in [int, np.int8, np.int16, + np.int32, np.int64, np.uint8, np.uint16, + np.uint32, np.uint64]: + to_read = int_type(1) + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=to_read) + assert_array_equal(x, a[:, 1]) + + # Testing with some crazy custom integer type + class CrazyInt(object): + def __index__(self): + return 1 + + crazy_int = CrazyInt() + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=crazy_int) + assert_array_equal(x, a[:, 1]) + + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=(crazy_int,)) + assert_array_equal(x, a[:, 1]) + + # Checking with dtypes defined converters. + data = '''JOE 70.1 25.3 + BOB 60.5 27.9 + ''' + c = TextIO(data) + names = ['stid', 'temp'] + dtypes = ['S4', 'f8'] + arr = np.loadtxt(c, usecols=(0, 2), dtype=list(zip(names, dtypes))) + assert_equal(arr['stid'], [b"JOE", b"BOB"]) + assert_equal(arr['temp'], [25.3, 27.9]) + + # Testing non-ints in usecols + c.seek(0) + bogus_idx = 1.5 + assert_raises_regex( + TypeError, + '^usecols must be.*%s' % type(bogus_idx), + np.loadtxt, c, usecols=bogus_idx + ) + + assert_raises_regex( + TypeError, + '^usecols must be.*%s' % type(bogus_idx), + np.loadtxt, c, usecols=[0, bogus_idx, 0] + ) + + def test_fancy_dtype(self): + c = TextIO() + c.write('1,2,3.0\n4,5,6.0\n') + c.seek(0) + dt = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) + x = np.loadtxt(c, dtype=dt, delimiter=',') + a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dt) + assert_array_equal(x, a) + + def test_shaped_dtype(self): + c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6") + dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), + ('block', int, (2, 3))]) + x = np.loadtxt(c, dtype=dt) + a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])], + dtype=dt) + assert_array_equal(x, a) + + def test_3d_shaped_dtype(self): + c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6 7 8 9 10 11 12") + dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), + ('block', int, (2, 2, 3))]) + x = np.loadtxt(c, dtype=dt) + a = np.array([('aaaa', 1.0, 8.0, + [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])], + dtype=dt) + assert_array_equal(x, a) + + def test_str_dtype(self): + # see gh-8033 + c = ["str1", "str2"] + + for dt in (str, np.bytes_): + a = np.array(["str1", "str2"], dtype=dt) + x = np.loadtxt(c, dtype=dt) + assert_array_equal(x, a) + + def test_empty_file(self): + with suppress_warnings() as sup: + sup.filter(message="loadtxt: Empty input file:") + c = TextIO() + x = np.loadtxt(c) + assert_equal(x.shape, (0,)) + x = np.loadtxt(c, dtype=np.int64) + assert_equal(x.shape, (0,)) + assert_(x.dtype == np.int64) + + def test_unused_converter(self): + c = TextIO() + c.writelines(['1 21\n', '3 42\n']) + c.seek(0) + data = np.loadtxt(c, usecols=(1,), + converters={0: lambda s: int(s, 16)}) + assert_array_equal(data, [21, 42]) + + c.seek(0) + data = np.loadtxt(c, usecols=(1,), + converters={1: lambda s: int(s, 16)}) + assert_array_equal(data, [33, 66]) + + def test_dtype_with_object(self): + # Test using an explicit dtype with an object + data = """ 1; 2001-01-01 + 2; 2002-01-31 """ + ndtype = [('idx', int), ('code', object)] + func = lambda s: strptime(s.strip(), "%Y-%m-%d") + converters = {1: func} + test = np.loadtxt(TextIO(data), delimiter=";", dtype=ndtype, + converters=converters) + control = np.array( + [(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))], + dtype=ndtype) + assert_equal(test, control) + + def test_uint64_type(self): + tgt = (9223372043271415339, 9223372043271415853) + c = TextIO() + c.write("%s %s" % tgt) + c.seek(0) + res = np.loadtxt(c, dtype=np.uint64) + assert_equal(res, tgt) + + def test_int64_type(self): + tgt = (-9223372036854775807, 9223372036854775807) + c = TextIO() + c.write("%s %s" % tgt) + c.seek(0) + res = np.loadtxt(c, dtype=np.int64) + assert_equal(res, tgt) + + def test_from_float_hex(self): + # IEEE doubles and floats only, otherwise the float32 + # conversion may fail. + tgt = np.logspace(-10, 10, 5).astype(np.float32) + tgt = np.hstack((tgt, -tgt)).astype(float) + inp = '\n'.join(map(float.hex, tgt)) + c = TextIO() + c.write(inp) + for dt in [float, np.float32]: + c.seek(0) + res = np.loadtxt(c, dtype=dt) + assert_equal(res, tgt, err_msg="%s" % dt) + + def test_from_complex(self): + tgt = (complex(1, 1), complex(1, -1)) + c = TextIO() + c.write("%s %s" % tgt) + c.seek(0) + res = np.loadtxt(c, dtype=complex) + assert_equal(res, tgt) + + def test_complex_misformatted(self): + # test for backward compatibility + # some complex formats used to generate x+-yj + a = np.zeros((2, 2), dtype=np.complex128) + re = np.pi + im = np.e + a[:] = re - 1.0j * im + c = BytesIO() + np.savetxt(c, a, fmt='%.16e') + c.seek(0) + txt = c.read() + c.seek(0) + # misformat the sign on the imaginary part, gh 7895 + txt_bad = txt.replace(b'e+00-', b'e00+-') + assert_(txt_bad != txt) + c.write(txt_bad) + c.seek(0) + res = np.loadtxt(c, dtype=complex) + assert_equal(res, a) + + def test_universal_newline(self): + with temppath() as name: + with open(name, 'w') as f: + f.write('1 21\r3 42\r') + data = np.loadtxt(name) + assert_array_equal(data, [[1, 21], [3, 42]]) + + def test_empty_field_after_tab(self): + c = TextIO() + c.write('1 \t2 \t3\tstart \n4\t5\t6\t \n7\t8\t9.5\t') + c.seek(0) + dt = {'names': ('x', 'y', 'z', 'comment'), + 'formats': (' num rows + c = TextIO() + c.write('comment\n1,2,3,5\n4,5,7,8\n2,1,4,5') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + skiprows=1, max_rows=6) + a = np.array([[1, 2, 3, 5], [4, 5, 7, 8], [2, 1, 4, 5]], int) + assert_array_equal(x, a) + +class Testfromregex(object): + def test_record(self): + c = TextIO() + c.write('1.312 foo\n1.534 bar\n4.444 qux') + c.seek(0) + + dt = [('num', np.float64), ('val', 'S3')] + x = np.fromregex(c, r"([0-9.]+)\s+(...)", dt) + a = np.array([(1.312, 'foo'), (1.534, 'bar'), (4.444, 'qux')], + dtype=dt) + assert_array_equal(x, a) + + def test_record_2(self): + c = TextIO() + c.write('1312 foo\n1534 bar\n4444 qux') + c.seek(0) + + dt = [('num', np.int32), ('val', 'S3')] + x = np.fromregex(c, r"(\d+)\s+(...)", dt) + a = np.array([(1312, 'foo'), (1534, 'bar'), (4444, 'qux')], + dtype=dt) + assert_array_equal(x, a) + + def test_record_3(self): + c = TextIO() + c.write('1312 foo\n1534 bar\n4444 qux') + c.seek(0) + + dt = [('num', np.float64)] + x = np.fromregex(c, r"(\d+)\s+...", dt) + a = np.array([(1312,), (1534,), (4444,)], dtype=dt) + assert_array_equal(x, a) + + def test_record_unicode(self): + utf8 = b'\xcf\x96' + with temppath() as path: + with open(path, 'wb') as f: + f.write(b'1.312 foo' + utf8 + b' \n1.534 bar\n4.444 qux') + + dt = [('num', np.float64), ('val', 'U4')] + x = np.fromregex(path, r"(?u)([0-9.]+)\s+(\w+)", dt, encoding='UTF-8') + a = np.array([(1.312, 'foo' + utf8.decode('UTF-8')), (1.534, 'bar'), + (4.444, 'qux')], dtype=dt) + assert_array_equal(x, a) + + regexp = re.compile(r"([0-9.]+)\s+(\w+)", re.UNICODE) + x = np.fromregex(path, regexp, dt, encoding='UTF-8') + assert_array_equal(x, a) + + def test_compiled_bytes(self): + regexp = re.compile(b'(\\d)') + c = BytesIO(b'123') + dt = [('num', np.float64)] + a = np.array([1, 2, 3], dtype=dt) + x = np.fromregex(c, regexp, dt) + assert_array_equal(x, a) + +#####-------------------------------------------------------------------------- + + +class TestFromTxt(LoadTxtBase): + loadfunc = staticmethod(np.genfromtxt) + + def test_record(self): + # Test w/ explicit dtype + data = TextIO('1 2\n3 4') + test = np.ndfromtxt(data, dtype=[('x', np.int32), ('y', np.int32)]) + control = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) + assert_equal(test, control) + # + data = TextIO('M 64.0 75.0\nF 25.0 60.0') + descriptor = {'names': ('gender', 'age', 'weight'), + 'formats': ('S1', 'i4', 'f4')} + control = np.array([('M', 64.0, 75.0), ('F', 25.0, 60.0)], + dtype=descriptor) + test = np.ndfromtxt(data, dtype=descriptor) + assert_equal(test, control) + + def test_array(self): + # Test outputting a standard ndarray + data = TextIO('1 2\n3 4') + control = np.array([[1, 2], [3, 4]], dtype=int) + test = np.ndfromtxt(data, dtype=int) + assert_array_equal(test, control) + # + data.seek(0) + control = np.array([[1, 2], [3, 4]], dtype=float) + test = np.loadtxt(data, dtype=float) + assert_array_equal(test, control) + + def test_1D(self): + # Test squeezing to 1D + control = np.array([1, 2, 3, 4], int) + # + data = TextIO('1\n2\n3\n4\n') + test = np.ndfromtxt(data, dtype=int) + assert_array_equal(test, control) + # + data = TextIO('1,2,3,4\n') + test = np.ndfromtxt(data, dtype=int, delimiter=',') + assert_array_equal(test, control) + + def test_comments(self): + # Test the stripping of comments + control = np.array([1, 2, 3, 5], int) + # Comment on its own line + data = TextIO('# comment\n1,2,3,5\n') + test = np.ndfromtxt(data, dtype=int, delimiter=',', comments='#') + assert_equal(test, control) + # Comment at the end of a line + data = TextIO('1,2,3,5# comment\n') + test = np.ndfromtxt(data, dtype=int, delimiter=',', comments='#') + assert_equal(test, control) + + def test_skiprows(self): + # Test row skipping + control = np.array([1, 2, 3, 5], int) + kwargs = dict(dtype=int, delimiter=',') + # + data = TextIO('comment\n1,2,3,5\n') + test = np.ndfromtxt(data, skip_header=1, **kwargs) + assert_equal(test, control) + # + data = TextIO('# comment\n1,2,3,5\n') + test = np.loadtxt(data, skiprows=1, **kwargs) + assert_equal(test, control) + + def test_skip_footer(self): + data = ["# %i" % i for i in range(1, 6)] + data.append("A, B, C") + data.extend(["%i,%3.1f,%03s" % (i, i, i) for i in range(51)]) + data[-1] = "99,99" + kwargs = dict(delimiter=",", names=True, skip_header=5, skip_footer=10) + test = np.genfromtxt(TextIO("\n".join(data)), **kwargs) + ctrl = np.array([("%f" % i, "%f" % i, "%f" % i) for i in range(41)], + dtype=[(_, float) for _ in "ABC"]) + assert_equal(test, ctrl) + + def test_skip_footer_with_invalid(self): + with suppress_warnings() as sup: + sup.filter(ConversionWarning) + basestr = '1 1\n2 2\n3 3\n4 4\n5 \n6 \n7 \n' + # Footer too small to get rid of all invalid values + assert_raises(ValueError, np.genfromtxt, + TextIO(basestr), skip_footer=1) + # except ValueError: + # pass + a = np.genfromtxt( + TextIO(basestr), skip_footer=1, invalid_raise=False) + assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])) + # + a = np.genfromtxt(TextIO(basestr), skip_footer=3) + assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])) + # + basestr = '1 1\n2 \n3 3\n4 4\n5 \n6 6\n7 7\n' + a = np.genfromtxt( + TextIO(basestr), skip_footer=1, invalid_raise=False) + assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.], [6., 6.]])) + a = np.genfromtxt( + TextIO(basestr), skip_footer=3, invalid_raise=False) + assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.]])) + + def test_header(self): + # Test retrieving a header + data = TextIO('gender age weight\nM 64.0 75.0\nF 25.0 60.0') + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.ndfromtxt(data, dtype=None, names=True) + assert_(w[0].category is np.VisibleDeprecationWarning) + control = {'gender': np.array([b'M', b'F']), + 'age': np.array([64.0, 25.0]), + 'weight': np.array([75.0, 60.0])} + assert_equal(test['gender'], control['gender']) + assert_equal(test['age'], control['age']) + assert_equal(test['weight'], control['weight']) + + def test_auto_dtype(self): + # Test the automatic definition of the output dtype + data = TextIO('A 64 75.0 3+4j True\nBCD 25 60.0 5+6j False') + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.ndfromtxt(data, dtype=None) + assert_(w[0].category is np.VisibleDeprecationWarning) + control = [np.array([b'A', b'BCD']), + np.array([64, 25]), + np.array([75.0, 60.0]), + np.array([3 + 4j, 5 + 6j]), + np.array([True, False]), ] + assert_equal(test.dtype.names, ['f0', 'f1', 'f2', 'f3', 'f4']) + for (i, ctrl) in enumerate(control): + assert_equal(test['f%i' % i], ctrl) + + def test_auto_dtype_uniform(self): + # Tests whether the output dtype can be uniformized + data = TextIO('1 2 3 4\n5 6 7 8\n') + test = np.ndfromtxt(data, dtype=None) + control = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) + assert_equal(test, control) + + def test_fancy_dtype(self): + # Check that a nested dtype isn't MIA + data = TextIO('1,2,3.0\n4,5,6.0\n') + fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) + test = np.ndfromtxt(data, dtype=fancydtype, delimiter=',') + control = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype) + assert_equal(test, control) + + def test_names_overwrite(self): + # Test overwriting the names of the dtype + descriptor = {'names': ('g', 'a', 'w'), + 'formats': ('S1', 'i4', 'f4')} + data = TextIO(b'M 64.0 75.0\nF 25.0 60.0') + names = ('gender', 'age', 'weight') + test = np.ndfromtxt(data, dtype=descriptor, names=names) + descriptor['names'] = names + control = np.array([('M', 64.0, 75.0), + ('F', 25.0, 60.0)], dtype=descriptor) + assert_equal(test, control) + + def test_commented_header(self): + # Check that names can be retrieved even if the line is commented out. + data = TextIO(""" +#gender age weight +M 21 72.100000 +F 35 58.330000 +M 33 21.99 + """) + # The # is part of the first name and should be deleted automatically. + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.genfromtxt(data, names=True, dtype=None) + assert_(w[0].category is np.VisibleDeprecationWarning) + ctrl = np.array([('M', 21, 72.1), ('F', 35, 58.33), ('M', 33, 21.99)], + dtype=[('gender', '|S1'), ('age', int), ('weight', float)]) + assert_equal(test, ctrl) + # Ditto, but we should get rid of the first element + data = TextIO(b""" +# gender age weight +M 21 72.100000 +F 35 58.330000 +M 33 21.99 + """) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.genfromtxt(data, names=True, dtype=None) + assert_(w[0].category is np.VisibleDeprecationWarning) + assert_equal(test, ctrl) + + def test_names_and_comments_none(self): + # Tests case when names is true but comments is None (gh-10780) + data = TextIO('col1 col2\n 1 2\n 3 4') + test = np.genfromtxt(data, dtype=(int, int), comments=None, names=True) + control = np.array([(1, 2), (3, 4)], dtype=[('col1', int), ('col2', int)]) + assert_equal(test, control) + + def test_autonames_and_usecols(self): + # Tests names and usecols + data = TextIO('A B C D\n aaaa 121 45 9.1') + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.ndfromtxt(data, usecols=('A', 'C', 'D'), + names=True, dtype=None) + assert_(w[0].category is np.VisibleDeprecationWarning) + control = np.array(('aaaa', 45, 9.1), + dtype=[('A', '|S4'), ('C', int), ('D', float)]) + assert_equal(test, control) + + def test_converters_with_usecols(self): + # Test the combination user-defined converters and usecol + data = TextIO('1,2,3,,5\n6,7,8,9,10\n') + test = np.ndfromtxt(data, dtype=int, delimiter=',', + converters={3: lambda s: int(s or - 999)}, + usecols=(1, 3,)) + control = np.array([[2, -999], [7, 9]], int) + assert_equal(test, control) + + def test_converters_with_usecols_and_names(self): + # Tests names and usecols + data = TextIO('A B C D\n aaaa 121 45 9.1') + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.ndfromtxt(data, usecols=('A', 'C', 'D'), names=True, + dtype=None, + converters={'C': lambda s: 2 * int(s)}) + assert_(w[0].category is np.VisibleDeprecationWarning) + control = np.array(('aaaa', 90, 9.1), + dtype=[('A', '|S4'), ('C', int), ('D', float)]) + assert_equal(test, control) + + def test_converters_cornercases(self): + # Test the conversion to datetime. + converter = { + 'date': lambda s: strptime(s, '%Y-%m-%d %H:%M:%SZ')} + data = TextIO('2009-02-03 12:00:00Z, 72214.0') + test = np.ndfromtxt(data, delimiter=',', dtype=None, + names=['date', 'stid'], converters=converter) + control = np.array((datetime(2009, 2, 3), 72214.), + dtype=[('date', np.object_), ('stid', float)]) + assert_equal(test, control) + + def test_converters_cornercases2(self): + # Test the conversion to datetime64. + converter = { + 'date': lambda s: np.datetime64(strptime(s, '%Y-%m-%d %H:%M:%SZ'))} + data = TextIO('2009-02-03 12:00:00Z, 72214.0') + test = np.ndfromtxt(data, delimiter=',', dtype=None, + names=['date', 'stid'], converters=converter) + control = np.array((datetime(2009, 2, 3), 72214.), + dtype=[('date', 'datetime64[us]'), ('stid', float)]) + assert_equal(test, control) + + def test_unused_converter(self): + # Test whether unused converters are forgotten + data = TextIO("1 21\n 3 42\n") + test = np.ndfromtxt(data, usecols=(1,), + converters={0: lambda s: int(s, 16)}) + assert_equal(test, [21, 42]) + # + data.seek(0) + test = np.ndfromtxt(data, usecols=(1,), + converters={1: lambda s: int(s, 16)}) + assert_equal(test, [33, 66]) + + def test_invalid_converter(self): + strip_rand = lambda x: float((b'r' in x.lower() and x.split()[-1]) or + (b'r' not in x.lower() and x.strip() or 0.0)) + strip_per = lambda x: float((b'%' in x.lower() and x.split()[0]) or + (b'%' not in x.lower() and x.strip() or 0.0)) + s = TextIO("D01N01,10/1/2003 ,1 %,R 75,400,600\r\n" + "L24U05,12/5/2003, 2 %,1,300, 150.5\r\n" + "D02N03,10/10/2004,R 1,,7,145.55") + kwargs = dict( + converters={2: strip_per, 3: strip_rand}, delimiter=",", + dtype=None) + assert_raises(ConverterError, np.genfromtxt, s, **kwargs) + + def test_tricky_converter_bug1666(self): + # Test some corner cases + s = TextIO('q1,2\nq3,4') + cnv = lambda s: float(s[1:]) + test = np.genfromtxt(s, delimiter=',', converters={0: cnv}) + control = np.array([[1., 2.], [3., 4.]]) + assert_equal(test, control) + + def test_dtype_with_converters(self): + dstr = "2009; 23; 46" + test = np.ndfromtxt(TextIO(dstr,), + delimiter=";", dtype=float, converters={0: bytes}) + control = np.array([('2009', 23., 46)], + dtype=[('f0', '|S4'), ('f1', float), ('f2', float)]) + assert_equal(test, control) + test = np.ndfromtxt(TextIO(dstr,), + delimiter=";", dtype=float, converters={0: float}) + control = np.array([2009., 23., 46],) + assert_equal(test, control) + + def test_dtype_with_converters_and_usecols(self): + dstr = "1,5,-1,1:1\n2,8,-1,1:n\n3,3,-2,m:n\n" + dmap = {'1:1':0, '1:n':1, 'm:1':2, 'm:n':3} + dtyp = [('e1','i4'),('e2','i4'),('e3','i2'),('n', 'i1')] + conv = {0: int, 1: int, 2: int, 3: lambda r: dmap[r.decode()]} + test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',', + names=None, converters=conv) + control = np.rec.array([(1,5,-1,0), (2,8,-1,1), (3,3,-2,3)], dtype=dtyp) + assert_equal(test, control) + dtyp = [('e1','i4'),('e2','i4'),('n', 'i1')] + test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',', + usecols=(0,1,3), names=None, converters=conv) + control = np.rec.array([(1,5,0), (2,8,1), (3,3,3)], dtype=dtyp) + assert_equal(test, control) + + def test_dtype_with_object(self): + # Test using an explicit dtype with an object + data = """ 1; 2001-01-01 + 2; 2002-01-31 """ + ndtype = [('idx', int), ('code', object)] + func = lambda s: strptime(s.strip(), "%Y-%m-%d") + converters = {1: func} + test = np.genfromtxt(TextIO(data), delimiter=";", dtype=ndtype, + converters=converters) + control = np.array( + [(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))], + dtype=ndtype) + assert_equal(test, control) + + ndtype = [('nest', [('idx', int), ('code', object)])] + with assert_raises_regex(NotImplementedError, + 'Nested fields.* not supported.*'): + test = np.genfromtxt(TextIO(data), delimiter=";", + dtype=ndtype, converters=converters) + + def test_userconverters_with_explicit_dtype(self): + # Test user_converters w/ explicit (standard) dtype + data = TextIO('skip,skip,2001-01-01,1.0,skip') + test = np.genfromtxt(data, delimiter=",", names=None, dtype=float, + usecols=(2, 3), converters={2: bytes}) + control = np.array([('2001-01-01', 1.)], + dtype=[('', '|S10'), ('', float)]) + assert_equal(test, control) + + def test_utf8_userconverters_with_explicit_dtype(self): + utf8 = b'\xcf\x96' + with temppath() as path: + with open(path, 'wb') as f: + f.write(b'skip,skip,2001-01-01' + utf8 + b',1.0,skip') + test = np.genfromtxt(path, delimiter=",", names=None, dtype=float, + usecols=(2, 3), converters={2: np.unicode}, + encoding='UTF-8') + control = np.array([('2001-01-01' + utf8.decode('UTF-8'), 1.)], + dtype=[('', '|U11'), ('', float)]) + assert_equal(test, control) + + def test_spacedelimiter(self): + # Test space delimiter + data = TextIO("1 2 3 4 5\n6 7 8 9 10") + test = np.ndfromtxt(data) + control = np.array([[1., 2., 3., 4., 5.], + [6., 7., 8., 9., 10.]]) + assert_equal(test, control) + + def test_integer_delimiter(self): + # Test using an integer for delimiter + data = " 1 2 3\n 4 5 67\n890123 4" + test = np.genfromtxt(TextIO(data), delimiter=3) + control = np.array([[1, 2, 3], [4, 5, 67], [890, 123, 4]]) + assert_equal(test, control) + + def test_missing(self): + data = TextIO('1,2,3,,5\n') + test = np.ndfromtxt(data, dtype=int, delimiter=',', + converters={3: lambda s: int(s or - 999)}) + control = np.array([1, 2, 3, -999, 5], int) + assert_equal(test, control) + + def test_missing_with_tabs(self): + # Test w/ a delimiter tab + txt = "1\t2\t3\n\t2\t\n1\t\t3" + test = np.genfromtxt(TextIO(txt), delimiter="\t", + usemask=True,) + ctrl_d = np.array([(1, 2, 3), (np.nan, 2, np.nan), (1, np.nan, 3)],) + ctrl_m = np.array([(0, 0, 0), (1, 0, 1), (0, 1, 0)], dtype=bool) + assert_equal(test.data, ctrl_d) + assert_equal(test.mask, ctrl_m) + + def test_usecols(self): + # Test the selection of columns + # Select 1 column + control = np.array([[1, 2], [3, 4]], float) + data = TextIO() + np.savetxt(data, control) + data.seek(0) + test = np.ndfromtxt(data, dtype=float, usecols=(1,)) + assert_equal(test, control[:, 1]) + # + control = np.array([[1, 2, 3], [3, 4, 5]], float) + data = TextIO() + np.savetxt(data, control) + data.seek(0) + test = np.ndfromtxt(data, dtype=float, usecols=(1, 2)) + assert_equal(test, control[:, 1:]) + # Testing with arrays instead of tuples. + data.seek(0) + test = np.ndfromtxt(data, dtype=float, usecols=np.array([1, 2])) + assert_equal(test, control[:, 1:]) + + def test_usecols_as_css(self): + # Test giving usecols with a comma-separated string + data = "1 2 3\n4 5 6" + test = np.genfromtxt(TextIO(data), + names="a, b, c", usecols="a, c") + ctrl = np.array([(1, 3), (4, 6)], dtype=[(_, float) for _ in "ac"]) + assert_equal(test, ctrl) + + def test_usecols_with_structured_dtype(self): + # Test usecols with an explicit structured dtype + data = TextIO("JOE 70.1 25.3\nBOB 60.5 27.9") + names = ['stid', 'temp'] + dtypes = ['S4', 'f8'] + test = np.ndfromtxt( + data, usecols=(0, 2), dtype=list(zip(names, dtypes))) + assert_equal(test['stid'], [b"JOE", b"BOB"]) + assert_equal(test['temp'], [25.3, 27.9]) + + def test_usecols_with_integer(self): + # Test usecols with an integer + test = np.genfromtxt(TextIO(b"1 2 3\n4 5 6"), usecols=0) + assert_equal(test, np.array([1., 4.])) + + def test_usecols_with_named_columns(self): + # Test usecols with named columns + ctrl = np.array([(1, 3), (4, 6)], dtype=[('a', float), ('c', float)]) + data = "1 2 3\n4 5 6" + kwargs = dict(names="a, b, c") + test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs) + assert_equal(test, ctrl) + test = np.genfromtxt(TextIO(data), + usecols=('a', 'c'), **kwargs) + assert_equal(test, ctrl) + + def test_empty_file(self): + # Test that an empty file raises the proper warning. + with suppress_warnings() as sup: + sup.filter(message="genfromtxt: Empty input file:") + data = TextIO() + test = np.genfromtxt(data) + assert_equal(test, np.array([])) + + def test_fancy_dtype_alt(self): + # Check that a nested dtype isn't MIA + data = TextIO('1,2,3.0\n4,5,6.0\n') + fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) + test = np.mafromtxt(data, dtype=fancydtype, delimiter=',') + control = ma.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype) + assert_equal(test, control) + + def test_shaped_dtype(self): + c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6") + dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), + ('block', int, (2, 3))]) + x = np.ndfromtxt(c, dtype=dt) + a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])], + dtype=dt) + assert_array_equal(x, a) + + def test_withmissing(self): + data = TextIO('A,B\n0,1\n2,N/A') + kwargs = dict(delimiter=",", missing_values="N/A", names=True) + test = np.mafromtxt(data, dtype=None, **kwargs) + control = ma.array([(0, 1), (2, -1)], + mask=[(False, False), (False, True)], + dtype=[('A', int), ('B', int)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + # + data.seek(0) + test = np.mafromtxt(data, **kwargs) + control = ma.array([(0, 1), (2, -1)], + mask=[(False, False), (False, True)], + dtype=[('A', float), ('B', float)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + def test_user_missing_values(self): + data = "A, B, C\n0, 0., 0j\n1, N/A, 1j\n-9, 2.2, N/A\n3, -99, 3j" + basekwargs = dict(dtype=None, delimiter=",", names=True,) + mdtype = [('A', int), ('B', float), ('C', complex)] + # + test = np.mafromtxt(TextIO(data), missing_values="N/A", + **basekwargs) + control = ma.array([(0, 0.0, 0j), (1, -999, 1j), + (-9, 2.2, -999j), (3, -99, 3j)], + mask=[(0, 0, 0), (0, 1, 0), (0, 0, 1), (0, 0, 0)], + dtype=mdtype) + assert_equal(test, control) + # + basekwargs['dtype'] = mdtype + test = np.mafromtxt(TextIO(data), + missing_values={0: -9, 1: -99, 2: -999j}, **basekwargs) + control = ma.array([(0, 0.0, 0j), (1, -999, 1j), + (-9, 2.2, -999j), (3, -99, 3j)], + mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)], + dtype=mdtype) + assert_equal(test, control) + # + test = np.mafromtxt(TextIO(data), + missing_values={0: -9, 'B': -99, 'C': -999j}, + **basekwargs) + control = ma.array([(0, 0.0, 0j), (1, -999, 1j), + (-9, 2.2, -999j), (3, -99, 3j)], + mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)], + dtype=mdtype) + assert_equal(test, control) + + def test_user_filling_values(self): + # Test with missing and filling values + ctrl = np.array([(0, 3), (4, -999)], dtype=[('a', int), ('b', int)]) + data = "N/A, 2, 3\n4, ,???" + kwargs = dict(delimiter=",", + dtype=int, + names="a,b,c", + missing_values={0: "N/A", 'b': " ", 2: "???"}, + filling_values={0: 0, 'b': 0, 2: -999}) + test = np.genfromtxt(TextIO(data), **kwargs) + ctrl = np.array([(0, 2, 3), (4, 0, -999)], + dtype=[(_, int) for _ in "abc"]) + assert_equal(test, ctrl) + # + test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs) + ctrl = np.array([(0, 3), (4, -999)], dtype=[(_, int) for _ in "ac"]) + assert_equal(test, ctrl) + + data2 = "1,2,*,4\n5,*,7,8\n" + test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int, + missing_values="*", filling_values=0) + ctrl = np.array([[1, 2, 0, 4], [5, 0, 7, 8]]) + assert_equal(test, ctrl) + test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int, + missing_values="*", filling_values=-1) + ctrl = np.array([[1, 2, -1, 4], [5, -1, 7, 8]]) + assert_equal(test, ctrl) + + def test_withmissing_float(self): + data = TextIO('A,B\n0,1.5\n2,-999.00') + test = np.mafromtxt(data, dtype=None, delimiter=',', + missing_values='-999.0', names=True,) + control = ma.array([(0, 1.5), (2, -1.)], + mask=[(False, False), (False, True)], + dtype=[('A', int), ('B', float)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + def test_with_masked_column_uniform(self): + # Test masked column + data = TextIO('1 2 3\n4 5 6\n') + test = np.genfromtxt(data, dtype=None, + missing_values='2,5', usemask=True) + control = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[0, 1, 0], [0, 1, 0]]) + assert_equal(test, control) + + def test_with_masked_column_various(self): + # Test masked column + data = TextIO('True 2 3\nFalse 5 6\n') + test = np.genfromtxt(data, dtype=None, + missing_values='2,5', usemask=True) + control = ma.array([(1, 2, 3), (0, 5, 6)], + mask=[(0, 1, 0), (0, 1, 0)], + dtype=[('f0', bool), ('f1', bool), ('f2', int)]) + assert_equal(test, control) + + def test_invalid_raise(self): + # Test invalid raise + data = ["1, 1, 1, 1, 1"] * 50 + for i in range(5): + data[10 * i] = "2, 2, 2, 2 2" + data.insert(0, "a, b, c, d, e") + mdata = TextIO("\n".join(data)) + # + kwargs = dict(delimiter=",", dtype=None, names=True) + # XXX: is there a better way to get the return value of the + # callable in assert_warns ? + ret = {} + + def f(_ret={}): + _ret['mtest'] = np.ndfromtxt(mdata, invalid_raise=False, **kwargs) + assert_warns(ConversionWarning, f, _ret=ret) + mtest = ret['mtest'] + assert_equal(len(mtest), 45) + assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'abcde'])) + # + mdata.seek(0) + assert_raises(ValueError, np.ndfromtxt, mdata, + delimiter=",", names=True) + + def test_invalid_raise_with_usecols(self): + # Test invalid_raise with usecols + data = ["1, 1, 1, 1, 1"] * 50 + for i in range(5): + data[10 * i] = "2, 2, 2, 2 2" + data.insert(0, "a, b, c, d, e") + mdata = TextIO("\n".join(data)) + kwargs = dict(delimiter=",", dtype=None, names=True, + invalid_raise=False) + # XXX: is there a better way to get the return value of the + # callable in assert_warns ? + ret = {} + + def f(_ret={}): + _ret['mtest'] = np.ndfromtxt(mdata, usecols=(0, 4), **kwargs) + assert_warns(ConversionWarning, f, _ret=ret) + mtest = ret['mtest'] + assert_equal(len(mtest), 45) + assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'ae'])) + # + mdata.seek(0) + mtest = np.ndfromtxt(mdata, usecols=(0, 1), **kwargs) + assert_equal(len(mtest), 50) + control = np.ones(50, dtype=[(_, int) for _ in 'ab']) + control[[10 * _ for _ in range(5)]] = (2, 2) + assert_equal(mtest, control) + + def test_inconsistent_dtype(self): + # Test inconsistent dtype + data = ["1, 1, 1, 1, -1.1"] * 50 + mdata = TextIO("\n".join(data)) + + converters = {4: lambda x: "(%s)" % x} + kwargs = dict(delimiter=",", converters=converters, + dtype=[(_, int) for _ in 'abcde'],) + assert_raises(ValueError, np.genfromtxt, mdata, **kwargs) + + def test_default_field_format(self): + # Test default format + data = "0, 1, 2.3\n4, 5, 6.7" + mtest = np.ndfromtxt(TextIO(data), + delimiter=",", dtype=None, defaultfmt="f%02i") + ctrl = np.array([(0, 1, 2.3), (4, 5, 6.7)], + dtype=[("f00", int), ("f01", int), ("f02", float)]) + assert_equal(mtest, ctrl) + + def test_single_dtype_wo_names(self): + # Test single dtype w/o names + data = "0, 1, 2.3\n4, 5, 6.7" + mtest = np.ndfromtxt(TextIO(data), + delimiter=",", dtype=float, defaultfmt="f%02i") + ctrl = np.array([[0., 1., 2.3], [4., 5., 6.7]], dtype=float) + assert_equal(mtest, ctrl) + + def test_single_dtype_w_explicit_names(self): + # Test single dtype w explicit names + data = "0, 1, 2.3\n4, 5, 6.7" + mtest = np.ndfromtxt(TextIO(data), + delimiter=",", dtype=float, names="a, b, c") + ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)], + dtype=[(_, float) for _ in "abc"]) + assert_equal(mtest, ctrl) + + def test_single_dtype_w_implicit_names(self): + # Test single dtype w implicit names + data = "a, b, c\n0, 1, 2.3\n4, 5, 6.7" + mtest = np.ndfromtxt(TextIO(data), + delimiter=",", dtype=float, names=True) + ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)], + dtype=[(_, float) for _ in "abc"]) + assert_equal(mtest, ctrl) + + def test_easy_structured_dtype(self): + # Test easy structured dtype + data = "0, 1, 2.3\n4, 5, 6.7" + mtest = np.ndfromtxt(TextIO(data), delimiter=",", + dtype=(int, float, float), defaultfmt="f_%02i") + ctrl = np.array([(0, 1., 2.3), (4, 5., 6.7)], + dtype=[("f_00", int), ("f_01", float), ("f_02", float)]) + assert_equal(mtest, ctrl) + + def test_autostrip(self): + # Test autostrip + data = "01/01/2003 , 1.3, abcde" + kwargs = dict(delimiter=",", dtype=None) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + mtest = np.ndfromtxt(TextIO(data), **kwargs) + assert_(w[0].category is np.VisibleDeprecationWarning) + ctrl = np.array([('01/01/2003 ', 1.3, ' abcde')], + dtype=[('f0', '|S12'), ('f1', float), ('f2', '|S8')]) + assert_equal(mtest, ctrl) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + mtest = np.ndfromtxt(TextIO(data), autostrip=True, **kwargs) + assert_(w[0].category is np.VisibleDeprecationWarning) + ctrl = np.array([('01/01/2003', 1.3, 'abcde')], + dtype=[('f0', '|S10'), ('f1', float), ('f2', '|S5')]) + assert_equal(mtest, ctrl) + + def test_replace_space(self): + # Test the 'replace_space' option + txt = "A.A, B (B), C:C\n1, 2, 3.14" + # Test default: replace ' ' by '_' and delete non-alphanum chars + test = np.genfromtxt(TextIO(txt), + delimiter=",", names=True, dtype=None) + ctrl_dtype = [("AA", int), ("B_B", int), ("CC", float)] + ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype) + assert_equal(test, ctrl) + # Test: no replace, no delete + test = np.genfromtxt(TextIO(txt), + delimiter=",", names=True, dtype=None, + replace_space='', deletechars='') + ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", float)] + ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype) + assert_equal(test, ctrl) + # Test: no delete (spaces are replaced by _) + test = np.genfromtxt(TextIO(txt), + delimiter=",", names=True, dtype=None, + deletechars='') + ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", float)] + ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype) + assert_equal(test, ctrl) + + def test_replace_space_known_dtype(self): + # Test the 'replace_space' (and related) options when dtype != None + txt = "A.A, B (B), C:C\n1, 2, 3" + # Test default: replace ' ' by '_' and delete non-alphanum chars + test = np.genfromtxt(TextIO(txt), + delimiter=",", names=True, dtype=int) + ctrl_dtype = [("AA", int), ("B_B", int), ("CC", int)] + ctrl = np.array((1, 2, 3), dtype=ctrl_dtype) + assert_equal(test, ctrl) + # Test: no replace, no delete + test = np.genfromtxt(TextIO(txt), + delimiter=",", names=True, dtype=int, + replace_space='', deletechars='') + ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", int)] + ctrl = np.array((1, 2, 3), dtype=ctrl_dtype) + assert_equal(test, ctrl) + # Test: no delete (spaces are replaced by _) + test = np.genfromtxt(TextIO(txt), + delimiter=",", names=True, dtype=int, + deletechars='') + ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", int)] + ctrl = np.array((1, 2, 3), dtype=ctrl_dtype) + assert_equal(test, ctrl) + + def test_incomplete_names(self): + # Test w/ incomplete names + data = "A,,C\n0,1,2\n3,4,5" + kwargs = dict(delimiter=",", names=True) + # w/ dtype=None + ctrl = np.array([(0, 1, 2), (3, 4, 5)], + dtype=[(_, int) for _ in ('A', 'f0', 'C')]) + test = np.ndfromtxt(TextIO(data), dtype=None, **kwargs) + assert_equal(test, ctrl) + # w/ default dtype + ctrl = np.array([(0, 1, 2), (3, 4, 5)], + dtype=[(_, float) for _ in ('A', 'f0', 'C')]) + test = np.ndfromtxt(TextIO(data), **kwargs) + + def test_names_auto_completion(self): + # Make sure that names are properly completed + data = "1 2 3\n 4 5 6" + test = np.genfromtxt(TextIO(data), + dtype=(int, float, int), names="a") + ctrl = np.array([(1, 2, 3), (4, 5, 6)], + dtype=[('a', int), ('f0', float), ('f1', int)]) + assert_equal(test, ctrl) + + def test_names_with_usecols_bug1636(self): + # Make sure we pick up the right names w/ usecols + data = "A,B,C,D,E\n0,1,2,3,4\n0,1,2,3,4\n0,1,2,3,4" + ctrl_names = ("A", "C", "E") + test = np.genfromtxt(TextIO(data), + dtype=(int, int, int), delimiter=",", + usecols=(0, 2, 4), names=True) + assert_equal(test.dtype.names, ctrl_names) + # + test = np.genfromtxt(TextIO(data), + dtype=(int, int, int), delimiter=",", + usecols=("A", "C", "E"), names=True) + assert_equal(test.dtype.names, ctrl_names) + # + test = np.genfromtxt(TextIO(data), + dtype=int, delimiter=",", + usecols=("A", "C", "E"), names=True) + assert_equal(test.dtype.names, ctrl_names) + + def test_fixed_width_names(self): + # Test fix-width w/ names + data = " A B C\n 0 1 2.3\n 45 67 9." + kwargs = dict(delimiter=(5, 5, 4), names=True, dtype=None) + ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)], + dtype=[('A', int), ('B', int), ('C', float)]) + test = np.ndfromtxt(TextIO(data), **kwargs) + assert_equal(test, ctrl) + # + kwargs = dict(delimiter=5, names=True, dtype=None) + ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)], + dtype=[('A', int), ('B', int), ('C', float)]) + test = np.ndfromtxt(TextIO(data), **kwargs) + assert_equal(test, ctrl) + + def test_filling_values(self): + # Test missing values + data = b"1, 2, 3\n1, , 5\n0, 6, \n" + kwargs = dict(delimiter=",", dtype=None, filling_values=-999) + ctrl = np.array([[1, 2, 3], [1, -999, 5], [0, 6, -999]], dtype=int) + test = np.ndfromtxt(TextIO(data), **kwargs) + assert_equal(test, ctrl) + + def test_comments_is_none(self): + # Github issue 329 (None was previously being converted to 'None'). + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.genfromtxt(TextIO("test1,testNonetherestofthedata"), + dtype=None, comments=None, delimiter=',') + assert_(w[0].category is np.VisibleDeprecationWarning) + assert_equal(test[1], b'testNonetherestofthedata') + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.genfromtxt(TextIO("test1, testNonetherestofthedata"), + dtype=None, comments=None, delimiter=',') + assert_(w[0].category is np.VisibleDeprecationWarning) + assert_equal(test[1], b' testNonetherestofthedata') + + def test_latin1(self): + latin1 = b'\xf6\xfc\xf6' + norm = b"norm1,norm2,norm3\n" + enc = b"test1,testNonethe" + latin1 + b",test3\n" + s = norm + enc + norm + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.genfromtxt(TextIO(s), + dtype=None, comments=None, delimiter=',') + assert_(w[0].category is np.VisibleDeprecationWarning) + assert_equal(test[1, 0], b"test1") + assert_equal(test[1, 1], b"testNonethe" + latin1) + assert_equal(test[1, 2], b"test3") + test = np.genfromtxt(TextIO(s), + dtype=None, comments=None, delimiter=',', + encoding='latin1') + assert_equal(test[1, 0], u"test1") + assert_equal(test[1, 1], u"testNonethe" + latin1.decode('latin1')) + assert_equal(test[1, 2], u"test3") + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.genfromtxt(TextIO(b"0,testNonethe" + latin1), + dtype=None, comments=None, delimiter=',') + assert_(w[0].category is np.VisibleDeprecationWarning) + assert_equal(test['f0'], 0) + assert_equal(test['f1'], b"testNonethe" + latin1) + + def test_binary_decode_autodtype(self): + utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04' + v = self.loadfunc(BytesIO(utf16), dtype=None, encoding='UTF-16') + assert_array_equal(v, np.array(utf16.decode('UTF-16').split())) + + def test_utf8_byte_encoding(self): + utf8 = b"\xcf\x96" + norm = b"norm1,norm2,norm3\n" + enc = b"test1,testNonethe" + utf8 + b",test3\n" + s = norm + enc + norm + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.genfromtxt(TextIO(s), + dtype=None, comments=None, delimiter=',') + assert_(w[0].category is np.VisibleDeprecationWarning) + ctl = np.array([ + [b'norm1', b'norm2', b'norm3'], + [b'test1', b'testNonethe' + utf8, b'test3'], + [b'norm1', b'norm2', b'norm3']]) + assert_array_equal(test, ctl) + + def test_utf8_file(self): + utf8 = b"\xcf\x96" + with temppath() as path: + with open(path, "wb") as f: + f.write((b"test1,testNonethe" + utf8 + b",test3\n") * 2) + test = np.genfromtxt(path, dtype=None, comments=None, + delimiter=',', encoding="UTF-8") + ctl = np.array([ + ["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"], + ["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"]], + dtype=np.unicode) + assert_array_equal(test, ctl) + + # test a mixed dtype + with open(path, "wb") as f: + f.write(b"0,testNonethe" + utf8) + test = np.genfromtxt(path, dtype=None, comments=None, + delimiter=',', encoding="UTF-8") + assert_equal(test['f0'], 0) + assert_equal(test['f1'], "testNonethe" + utf8.decode("UTF-8")) + + def test_utf8_file_nodtype_unicode(self): + # bytes encoding with non-latin1 -> unicode upcast + utf8 = u'\u03d6' + latin1 = u'\xf6\xfc\xf6' + + # skip test if cannot encode utf8 test string with preferred + # encoding. The preferred encoding is assumed to be the default + # encoding of io.open. Will need to change this for PyTest, maybe + # using pytest.mark.xfail(raises=***). + try: + encoding = locale.getpreferredencoding() + utf8.encode(encoding) + except (UnicodeError, ImportError): + pytest.skip('Skipping test_utf8_file_nodtype_unicode, ' + 'unable to encode utf8 in preferred encoding') + + with temppath() as path: + with io.open(path, "wt") as f: + f.write(u"norm1,norm2,norm3\n") + f.write(u"norm1," + latin1 + u",norm3\n") + f.write(u"test1,testNonethe" + utf8 + u",test3\n") + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', + np.VisibleDeprecationWarning) + test = np.genfromtxt(path, dtype=None, comments=None, + delimiter=',') + # Check for warning when encoding not specified. + assert_(w[0].category is np.VisibleDeprecationWarning) + ctl = np.array([ + ["norm1", "norm2", "norm3"], + ["norm1", latin1, "norm3"], + ["test1", "testNonethe" + utf8, "test3"]], + dtype=np.unicode) + assert_array_equal(test, ctl) + + def test_recfromtxt(self): + # + data = TextIO('A,B\n0,1\n2,3') + kwargs = dict(delimiter=",", missing_values="N/A", names=True) + test = np.recfromtxt(data, **kwargs) + control = np.array([(0, 1), (2, 3)], + dtype=[('A', int), ('B', int)]) + assert_(isinstance(test, np.recarray)) + assert_equal(test, control) + # + data = TextIO('A,B\n0,1\n2,N/A') + test = np.recfromtxt(data, dtype=None, usemask=True, **kwargs) + control = ma.array([(0, 1), (2, -1)], + mask=[(False, False), (False, True)], + dtype=[('A', int), ('B', int)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + assert_equal(test.A, [0, 2]) + + def test_recfromcsv(self): + # + data = TextIO('A,B\n0,1\n2,3') + kwargs = dict(missing_values="N/A", names=True, case_sensitive=True) + test = np.recfromcsv(data, dtype=None, **kwargs) + control = np.array([(0, 1), (2, 3)], + dtype=[('A', int), ('B', int)]) + assert_(isinstance(test, np.recarray)) + assert_equal(test, control) + # + data = TextIO('A,B\n0,1\n2,N/A') + test = np.recfromcsv(data, dtype=None, usemask=True, **kwargs) + control = ma.array([(0, 1), (2, -1)], + mask=[(False, False), (False, True)], + dtype=[('A', int), ('B', int)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + assert_equal(test.A, [0, 2]) + # + data = TextIO('A,B\n0,1\n2,3') + test = np.recfromcsv(data, missing_values='N/A',) + control = np.array([(0, 1), (2, 3)], + dtype=[('a', int), ('b', int)]) + assert_(isinstance(test, np.recarray)) + assert_equal(test, control) + # + data = TextIO('A,B\n0,1\n2,3') + dtype = [('a', int), ('b', float)] + test = np.recfromcsv(data, missing_values='N/A', dtype=dtype) + control = np.array([(0, 1), (2, 3)], + dtype=dtype) + assert_(isinstance(test, np.recarray)) + assert_equal(test, control) + + #gh-10394 + data = TextIO('color\n"red"\n"blue"') + test = np.recfromcsv(data, converters={0: lambda x: x.strip(b'\"')}) + control = np.array([('red',), ('blue',)], dtype=[('color', (bytes, 4))]) + assert_equal(test.dtype, control.dtype) + assert_equal(test, control) + + def test_max_rows(self): + # Test the `max_rows` keyword argument. + data = '1 2\n3 4\n5 6\n7 8\n9 10\n' + txt = TextIO(data) + a1 = np.genfromtxt(txt, max_rows=3) + a2 = np.genfromtxt(txt) + assert_equal(a1, [[1, 2], [3, 4], [5, 6]]) + assert_equal(a2, [[7, 8], [9, 10]]) + + # max_rows must be at least 1. + assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=0) + + # An input with several invalid rows. + data = '1 1\n2 2\n0 \n3 3\n4 4\n5 \n6 \n7 \n' + + test = np.genfromtxt(TextIO(data), max_rows=2) + control = np.array([[1., 1.], [2., 2.]]) + assert_equal(test, control) + + # Test keywords conflict + assert_raises(ValueError, np.genfromtxt, TextIO(data), skip_footer=1, + max_rows=4) + + # Test with invalid value + assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=4) + + # Test with invalid not raise + with suppress_warnings() as sup: + sup.filter(ConversionWarning) + + test = np.genfromtxt(TextIO(data), max_rows=4, invalid_raise=False) + control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]) + assert_equal(test, control) + + test = np.genfromtxt(TextIO(data), max_rows=5, invalid_raise=False) + control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]) + assert_equal(test, control) + + # Structured array with field names. + data = 'a b\n#c d\n1 1\n2 2\n#0 \n3 3\n4 4\n5 5\n' + + # Test with header, names and comments + txt = TextIO(data) + test = np.genfromtxt(txt, skip_header=1, max_rows=3, names=True) + control = np.array([(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)], + dtype=[('c', ' should convert to float + # 2**34 = 17179869184 => should convert to int64 + # 2**10 = 1024 => should convert to int (int32 on 32-bit systems, + # int64 on 64-bit systems) + + data = TextIO('73786976294838206464 17179869184 1024') + + test = np.ndfromtxt(data, dtype=None) + + assert_equal(test.dtype.names, ['f0', 'f1', 'f2']) + + assert_(test.dtype['f0'] == float) + assert_(test.dtype['f1'] == np.int64) + assert_(test.dtype['f2'] == np.integer) + + assert_allclose(test['f0'], 73786976294838206464.) + assert_equal(test['f1'], 17179869184) + assert_equal(test['f2'], 1024) + + +@pytest.mark.skipif(Path is None, reason="No pathlib.Path") +class TestPathUsage(object): + # Test that pathlib.Path can be used + def test_loadtxt(self): + with temppath(suffix='.txt') as path: + path = Path(path) + a = np.array([[1.1, 2], [3, 4]]) + np.savetxt(path, a) + x = np.loadtxt(path) + assert_array_equal(x, a) + + def test_save_load(self): + # Test that pathlib.Path instances can be used with save. + with temppath(suffix='.npy') as path: + path = Path(path) + a = np.array([[1, 2], [3, 4]], int) + np.save(path, a) + data = np.load(path) + assert_array_equal(data, a) + + def test_save_load_memmap(self): + # Test that pathlib.Path instances can be loaded mem-mapped. + with temppath(suffix='.npy') as path: + path = Path(path) + a = np.array([[1, 2], [3, 4]], int) + np.save(path, a) + data = np.load(path, mmap_mode='r') + assert_array_equal(data, a) + # close the mem-mapped file + del data + + def test_save_load_memmap_readwrite(self): + # Test that pathlib.Path instances can be written mem-mapped. + with temppath(suffix='.npy') as path: + path = Path(path) + a = np.array([[1, 2], [3, 4]], int) + np.save(path, a) + b = np.load(path, mmap_mode='r+') + a[0][0] = 5 + b[0][0] = 5 + del b # closes the file + data = np.load(path) + assert_array_equal(data, a) + + def test_savez_load(self): + # Test that pathlib.Path instances can be used with savez. + with temppath(suffix='.npz') as path: + path = Path(path) + np.savez(path, lab='place holder') + with np.load(path) as data: + assert_array_equal(data['lab'], 'place holder') + + def test_savez_compressed_load(self): + # Test that pathlib.Path instances can be used with savez. + with temppath(suffix='.npz') as path: + path = Path(path) + np.savez_compressed(path, lab='place holder') + data = np.load(path) + assert_array_equal(data['lab'], 'place holder') + data.close() + + def test_genfromtxt(self): + with temppath(suffix='.txt') as path: + path = Path(path) + a = np.array([(1, 2), (3, 4)]) + np.savetxt(path, a) + data = np.genfromtxt(path) + assert_array_equal(a, data) + + def test_ndfromtxt(self): + # Test outputting a standard ndarray + with temppath(suffix='.txt') as path: + path = Path(path) + with path.open('w') as f: + f.write(u'1 2\n3 4') + + control = np.array([[1, 2], [3, 4]], dtype=int) + test = np.ndfromtxt(path, dtype=int) + assert_array_equal(test, control) + + def test_mafromtxt(self): + # From `test_fancy_dtype_alt` above + with temppath(suffix='.txt') as path: + path = Path(path) + with path.open('w') as f: + f.write(u'1,2,3.0\n4,5,6.0\n') + + test = np.mafromtxt(path, delimiter=',') + control = ma.array([(1.0, 2.0, 3.0), (4.0, 5.0, 6.0)]) + assert_equal(test, control) + + def test_recfromtxt(self): + with temppath(suffix='.txt') as path: + path = Path(path) + with path.open('w') as f: + f.write(u'A,B\n0,1\n2,3') + + kwargs = dict(delimiter=",", missing_values="N/A", names=True) + test = np.recfromtxt(path, **kwargs) + control = np.array([(0, 1), (2, 3)], + dtype=[('A', int), ('B', int)]) + assert_(isinstance(test, np.recarray)) + assert_equal(test, control) + + def test_recfromcsv(self): + with temppath(suffix='.txt') as path: + path = Path(path) + with path.open('w') as f: + f.write(u'A,B\n0,1\n2,3') + + kwargs = dict(missing_values="N/A", names=True, case_sensitive=True) + test = np.recfromcsv(path, dtype=None, **kwargs) + control = np.array([(0, 1), (2, 3)], + dtype=[('A', int), ('B', int)]) + assert_(isinstance(test, np.recarray)) + assert_equal(test, control) + + +def test_gzip_load(): + a = np.random.random((5, 5)) + + s = BytesIO() + f = gzip.GzipFile(fileobj=s, mode="w") + + np.save(f, a) + f.close() + s.seek(0) + + f = gzip.GzipFile(fileobj=s, mode="r") + assert_array_equal(np.load(f), a) + + +def test_gzip_loadtxt(): + # Thanks to another windows brokenness, we can't use + # NamedTemporaryFile: a file created from this function cannot be + # reopened by another open call. So we first put the gzipped string + # of the test reference array, write it to a securely opened file, + # which is then read from by the loadtxt function + s = BytesIO() + g = gzip.GzipFile(fileobj=s, mode='w') + g.write(b'1 2 3\n') + g.close() + + s.seek(0) + with temppath(suffix='.gz') as name: + with open(name, 'wb') as f: + f.write(s.read()) + res = np.loadtxt(name) + s.close() + + assert_array_equal(res, [1, 2, 3]) + + +def test_gzip_loadtxt_from_string(): + s = BytesIO() + f = gzip.GzipFile(fileobj=s, mode="w") + f.write(b'1 2 3\n') + f.close() + s.seek(0) + + f = gzip.GzipFile(fileobj=s, mode="r") + assert_array_equal(np.loadtxt(f), [1, 2, 3]) + + +def test_npzfile_dict(): + s = BytesIO() + x = np.zeros((3, 3)) + y = np.zeros((3, 3)) + + np.savez(s, x=x, y=y) + s.seek(0) + + z = np.load(s) + + assert_('x' in z) + assert_('y' in z) + assert_('x' in z.keys()) + assert_('y' in z.keys()) + + for f, a in z.items(): + assert_(f in ['x', 'y']) + assert_equal(a.shape, (3, 3)) + + assert_(len(z.items()) == 2) + + for f in z: + assert_(f in ['x', 'y']) + + assert_('x' in z.keys()) + + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_load_refcount(): + # Check that objects returned by np.load are directly freed based on + # their refcount, rather than needing the gc to collect them. + + f = BytesIO() + np.savez(f, [1, 2, 3]) + f.seek(0) + + with assert_no_gc_cycles(): + np.load(f) + + f.seek(0) + dt = [("a", 'u1', 2), ("b", 'u1', 2)] + with assert_no_gc_cycles(): + x = np.loadtxt(TextIO("0 1 2 3"), dtype=dt) + assert_equal(x, np.array([((0, 1), (2, 3))], dtype=dt)) diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_io.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_io.pyc new file mode 100644 index 0000000..966e607 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_io.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_mixins.py b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_mixins.py new file mode 100644 index 0000000..3dd5346 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_mixins.py @@ -0,0 +1,224 @@ +from __future__ import division, absolute_import, print_function + +import numbers +import operator +import sys + +import numpy as np +from numpy.testing import assert_, assert_equal, assert_raises + + +PY2 = sys.version_info.major < 3 + + +# NOTE: This class should be kept as an exact copy of the example from the +# docstring for NDArrayOperatorsMixin. + +class ArrayLike(np.lib.mixins.NDArrayOperatorsMixin): + def __init__(self, value): + self.value = np.asarray(value) + + # One might also consider adding the built-in list type to this + # list, to support operations like np.add(array_like, list) + _HANDLED_TYPES = (np.ndarray, numbers.Number) + + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + out = kwargs.get('out', ()) + for x in inputs + out: + # Only support operations with instances of _HANDLED_TYPES. + # Use ArrayLike instead of type(self) for isinstance to + # allow subclasses that don't override __array_ufunc__ to + # handle ArrayLike objects. + if not isinstance(x, self._HANDLED_TYPES + (ArrayLike,)): + return NotImplemented + + # Defer to the implementation of the ufunc on unwrapped values. + inputs = tuple(x.value if isinstance(x, ArrayLike) else x + for x in inputs) + if out: + kwargs['out'] = tuple( + x.value if isinstance(x, ArrayLike) else x + for x in out) + result = getattr(ufunc, method)(*inputs, **kwargs) + + if type(result) is tuple: + # multiple return values + return tuple(type(self)(x) for x in result) + elif method == 'at': + # no return value + return None + else: + # one return value + return type(self)(result) + + def __repr__(self): + return '%s(%r)' % (type(self).__name__, self.value) + + +def wrap_array_like(result): + if type(result) is tuple: + return tuple(ArrayLike(r) for r in result) + else: + return ArrayLike(result) + + +def _assert_equal_type_and_value(result, expected, err_msg=None): + assert_equal(type(result), type(expected), err_msg=err_msg) + if isinstance(result, tuple): + assert_equal(len(result), len(expected), err_msg=err_msg) + for result_item, expected_item in zip(result, expected): + _assert_equal_type_and_value(result_item, expected_item, err_msg) + else: + assert_equal(result.value, expected.value, err_msg=err_msg) + assert_equal(getattr(result.value, 'dtype', None), + getattr(expected.value, 'dtype', None), err_msg=err_msg) + + +_ALL_BINARY_OPERATORS = [ + operator.lt, + operator.le, + operator.eq, + operator.ne, + operator.gt, + operator.ge, + operator.add, + operator.sub, + operator.mul, + operator.truediv, + operator.floordiv, + # TODO: test div on Python 2, only + operator.mod, + divmod, + pow, + operator.lshift, + operator.rshift, + operator.and_, + operator.xor, + operator.or_, +] + + +class TestNDArrayOperatorsMixin(object): + + def test_array_like_add(self): + + def check(result): + _assert_equal_type_and_value(result, ArrayLike(0)) + + check(ArrayLike(0) + 0) + check(0 + ArrayLike(0)) + + check(ArrayLike(0) + np.array(0)) + check(np.array(0) + ArrayLike(0)) + + check(ArrayLike(np.array(0)) + 0) + check(0 + ArrayLike(np.array(0))) + + check(ArrayLike(np.array(0)) + np.array(0)) + check(np.array(0) + ArrayLike(np.array(0))) + + def test_inplace(self): + array_like = ArrayLike(np.array([0])) + array_like += 1 + _assert_equal_type_and_value(array_like, ArrayLike(np.array([1]))) + + array = np.array([0]) + array += ArrayLike(1) + _assert_equal_type_and_value(array, ArrayLike(np.array([1]))) + + def test_opt_out(self): + + class OptOut(object): + """Object that opts out of __array_ufunc__.""" + __array_ufunc__ = None + + def __add__(self, other): + return self + + def __radd__(self, other): + return self + + array_like = ArrayLike(1) + opt_out = OptOut() + + # supported operations + assert_(array_like + opt_out is opt_out) + assert_(opt_out + array_like is opt_out) + + # not supported + with assert_raises(TypeError): + # don't use the Python default, array_like = array_like + opt_out + array_like += opt_out + with assert_raises(TypeError): + array_like - opt_out + with assert_raises(TypeError): + opt_out - array_like + + def test_subclass(self): + + class SubArrayLike(ArrayLike): + """Should take precedence over ArrayLike.""" + + x = ArrayLike(0) + y = SubArrayLike(1) + _assert_equal_type_and_value(x + y, y) + _assert_equal_type_and_value(y + x, y) + + def test_object(self): + x = ArrayLike(0) + obj = object() + with assert_raises(TypeError): + x + obj + with assert_raises(TypeError): + obj + x + with assert_raises(TypeError): + x += obj + + def test_unary_methods(self): + array = np.array([-1, 0, 1, 2]) + array_like = ArrayLike(array) + for op in [operator.neg, + operator.pos, + abs, + operator.invert]: + _assert_equal_type_and_value(op(array_like), ArrayLike(op(array))) + + def test_forward_binary_methods(self): + array = np.array([-1, 0, 1, 2]) + array_like = ArrayLike(array) + for op in _ALL_BINARY_OPERATORS: + expected = wrap_array_like(op(array, 1)) + actual = op(array_like, 1) + err_msg = 'failed for operator {}'.format(op) + _assert_equal_type_and_value(expected, actual, err_msg=err_msg) + + def test_reflected_binary_methods(self): + for op in _ALL_BINARY_OPERATORS: + expected = wrap_array_like(op(2, 1)) + actual = op(2, ArrayLike(1)) + err_msg = 'failed for operator {}'.format(op) + _assert_equal_type_and_value(expected, actual, err_msg=err_msg) + + def test_matmul(self): + array = np.array([1, 2], dtype=np.float64) + array_like = ArrayLike(array) + expected = ArrayLike(np.float64(5)) + _assert_equal_type_and_value(expected, np.matmul(array_like, array)) + if not PY2: + _assert_equal_type_and_value( + expected, operator.matmul(array_like, array)) + _assert_equal_type_and_value( + expected, operator.matmul(array, array_like)) + + def test_ufunc_at(self): + array = ArrayLike(np.array([1, 2, 3, 4])) + assert_(np.negative.at(array, np.array([0, 1])) is None) + _assert_equal_type_and_value(array, ArrayLike([-1, -2, 3, 4])) + + def test_ufunc_two_outputs(self): + mantissa, exponent = np.frexp(2 ** -3) + expected = (ArrayLike(mantissa), ArrayLike(exponent)) + _assert_equal_type_and_value( + np.frexp(ArrayLike(2 ** -3)), expected) + _assert_equal_type_and_value( + np.frexp(ArrayLike(np.array(2 ** -3))), expected) diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_mixins.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_mixins.pyc new file mode 100644 index 0000000..2c43ca4 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_mixins.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_nanfunctions.py b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_nanfunctions.py new file mode 100644 index 0000000..504372f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_nanfunctions.py @@ -0,0 +1,927 @@ +from __future__ import division, absolute_import, print_function + +import warnings + +import numpy as np +from numpy.testing import ( + assert_, assert_equal, assert_almost_equal, assert_no_warnings, + assert_raises, assert_array_equal, suppress_warnings + ) + + +# Test data +_ndat = np.array([[0.6244, np.nan, 0.2692, 0.0116, np.nan, 0.1170], + [0.5351, -0.9403, np.nan, 0.2100, 0.4759, 0.2833], + [np.nan, np.nan, np.nan, 0.1042, np.nan, -0.5954], + [0.1610, np.nan, np.nan, 0.1859, 0.3146, np.nan]]) + + +# Rows of _ndat with nans removed +_rdat = [np.array([0.6244, 0.2692, 0.0116, 0.1170]), + np.array([0.5351, -0.9403, 0.2100, 0.4759, 0.2833]), + np.array([0.1042, -0.5954]), + np.array([0.1610, 0.1859, 0.3146])] + +# Rows of _ndat with nans converted to ones +_ndat_ones = np.array([[0.6244, 1.0, 0.2692, 0.0116, 1.0, 0.1170], + [0.5351, -0.9403, 1.0, 0.2100, 0.4759, 0.2833], + [1.0, 1.0, 1.0, 0.1042, 1.0, -0.5954], + [0.1610, 1.0, 1.0, 0.1859, 0.3146, 1.0]]) + +# Rows of _ndat with nans converted to zeros +_ndat_zeros = np.array([[0.6244, 0.0, 0.2692, 0.0116, 0.0, 0.1170], + [0.5351, -0.9403, 0.0, 0.2100, 0.4759, 0.2833], + [0.0, 0.0, 0.0, 0.1042, 0.0, -0.5954], + [0.1610, 0.0, 0.0, 0.1859, 0.3146, 0.0]]) + + +class TestNanFunctions_MinMax(object): + + nanfuncs = [np.nanmin, np.nanmax] + stdfuncs = [np.min, np.max] + + def test_mutation(self): + # Check that passed array is not modified. + ndat = _ndat.copy() + for f in self.nanfuncs: + f(ndat) + assert_equal(ndat, _ndat) + + def test_keepdims(self): + mat = np.eye(3) + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for axis in [None, 0, 1]: + tgt = rf(mat, axis=axis, keepdims=True) + res = nf(mat, axis=axis, keepdims=True) + assert_(res.ndim == tgt.ndim) + + def test_out(self): + mat = np.eye(3) + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + resout = np.zeros(3) + tgt = rf(mat, axis=1) + res = nf(mat, axis=1, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + + def test_dtype_from_input(self): + codes = 'efdgFDG' + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for c in codes: + mat = np.eye(3, dtype=c) + tgt = rf(mat, axis=1).dtype.type + res = nf(mat, axis=1).dtype.type + assert_(res is tgt) + # scalar case + tgt = rf(mat, axis=None).dtype.type + res = nf(mat, axis=None).dtype.type + assert_(res is tgt) + + def test_result_values(self): + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + tgt = [rf(d) for d in _rdat] + res = nf(_ndat, axis=1) + assert_almost_equal(res, tgt) + + def test_allnans(self): + mat = np.array([np.nan]*9).reshape(3, 3) + for f in self.nanfuncs: + for axis in [None, 0, 1]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(f(mat, axis=axis)).all()) + assert_(len(w) == 1, 'no warning raised') + assert_(issubclass(w[0].category, RuntimeWarning)) + # Check scalars + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(f(np.nan))) + assert_(len(w) == 1, 'no warning raised') + assert_(issubclass(w[0].category, RuntimeWarning)) + + def test_masked(self): + mat = np.ma.fix_invalid(_ndat) + msk = mat._mask.copy() + for f in [np.nanmin]: + res = f(mat, axis=1) + tgt = f(_ndat, axis=1) + assert_equal(res, tgt) + assert_equal(mat._mask, msk) + assert_(not np.isinf(mat).any()) + + def test_scalar(self): + for f in self.nanfuncs: + assert_(f(0.) == 0.) + + def test_subclass(self): + class MyNDArray(np.ndarray): + pass + + # Check that it works and that type and + # shape are preserved + mine = np.eye(3).view(MyNDArray) + for f in self.nanfuncs: + res = f(mine, axis=0) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == (3,)) + res = f(mine, axis=1) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == (3,)) + res = f(mine) + assert_(res.shape == ()) + + # check that rows of nan are dealt with for subclasses (#4628) + mine[1] = np.nan + for f in self.nanfuncs: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mine, axis=0) + assert_(isinstance(res, MyNDArray)) + assert_(not np.any(np.isnan(res))) + assert_(len(w) == 0) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mine, axis=1) + assert_(isinstance(res, MyNDArray)) + assert_(np.isnan(res[1]) and not np.isnan(res[0]) + and not np.isnan(res[2])) + assert_(len(w) == 1, 'no warning raised') + assert_(issubclass(w[0].category, RuntimeWarning)) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mine) + assert_(res.shape == ()) + assert_(res != np.nan) + assert_(len(w) == 0) + + def test_object_array(self): + arr = np.array([[1.0, 2.0], [np.nan, 4.0], [np.nan, np.nan]], dtype=object) + assert_equal(np.nanmin(arr), 1.0) + assert_equal(np.nanmin(arr, axis=0), [1.0, 2.0]) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + # assert_equal does not work on object arrays of nan + assert_equal(list(np.nanmin(arr, axis=1)), [1.0, 4.0, np.nan]) + assert_(len(w) == 1, 'no warning raised') + assert_(issubclass(w[0].category, RuntimeWarning)) + + +class TestNanFunctions_ArgminArgmax(object): + + nanfuncs = [np.nanargmin, np.nanargmax] + + def test_mutation(self): + # Check that passed array is not modified. + ndat = _ndat.copy() + for f in self.nanfuncs: + f(ndat) + assert_equal(ndat, _ndat) + + def test_result_values(self): + for f, fcmp in zip(self.nanfuncs, [np.greater, np.less]): + for row in _ndat: + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in") + ind = f(row) + val = row[ind] + # comparing with NaN is tricky as the result + # is always false except for NaN != NaN + assert_(not np.isnan(val)) + assert_(not fcmp(val, row).any()) + assert_(not np.equal(val, row[:ind]).any()) + + def test_allnans(self): + mat = np.array([np.nan]*9).reshape(3, 3) + for f in self.nanfuncs: + for axis in [None, 0, 1]: + assert_raises(ValueError, f, mat, axis=axis) + assert_raises(ValueError, f, np.nan) + + def test_empty(self): + mat = np.zeros((0, 3)) + for f in self.nanfuncs: + for axis in [0, None]: + assert_raises(ValueError, f, mat, axis=axis) + for axis in [1]: + res = f(mat, axis=axis) + assert_equal(res, np.zeros(0)) + + def test_scalar(self): + for f in self.nanfuncs: + assert_(f(0.) == 0.) + + def test_subclass(self): + class MyNDArray(np.ndarray): + pass + + # Check that it works and that type and + # shape are preserved + mine = np.eye(3).view(MyNDArray) + for f in self.nanfuncs: + res = f(mine, axis=0) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == (3,)) + res = f(mine, axis=1) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == (3,)) + res = f(mine) + assert_(res.shape == ()) + + +class TestNanFunctions_IntTypes(object): + + int_types = (np.int8, np.int16, np.int32, np.int64, np.uint8, + np.uint16, np.uint32, np.uint64) + + mat = np.array([127, 39, 93, 87, 46]) + + def integer_arrays(self): + for dtype in self.int_types: + yield self.mat.astype(dtype) + + def test_nanmin(self): + tgt = np.min(self.mat) + for mat in self.integer_arrays(): + assert_equal(np.nanmin(mat), tgt) + + def test_nanmax(self): + tgt = np.max(self.mat) + for mat in self.integer_arrays(): + assert_equal(np.nanmax(mat), tgt) + + def test_nanargmin(self): + tgt = np.argmin(self.mat) + for mat in self.integer_arrays(): + assert_equal(np.nanargmin(mat), tgt) + + def test_nanargmax(self): + tgt = np.argmax(self.mat) + for mat in self.integer_arrays(): + assert_equal(np.nanargmax(mat), tgt) + + def test_nansum(self): + tgt = np.sum(self.mat) + for mat in self.integer_arrays(): + assert_equal(np.nansum(mat), tgt) + + def test_nanprod(self): + tgt = np.prod(self.mat) + for mat in self.integer_arrays(): + assert_equal(np.nanprod(mat), tgt) + + def test_nancumsum(self): + tgt = np.cumsum(self.mat) + for mat in self.integer_arrays(): + assert_equal(np.nancumsum(mat), tgt) + + def test_nancumprod(self): + tgt = np.cumprod(self.mat) + for mat in self.integer_arrays(): + assert_equal(np.nancumprod(mat), tgt) + + def test_nanmean(self): + tgt = np.mean(self.mat) + for mat in self.integer_arrays(): + assert_equal(np.nanmean(mat), tgt) + + def test_nanvar(self): + tgt = np.var(self.mat) + for mat in self.integer_arrays(): + assert_equal(np.nanvar(mat), tgt) + + tgt = np.var(mat, ddof=1) + for mat in self.integer_arrays(): + assert_equal(np.nanvar(mat, ddof=1), tgt) + + def test_nanstd(self): + tgt = np.std(self.mat) + for mat in self.integer_arrays(): + assert_equal(np.nanstd(mat), tgt) + + tgt = np.std(self.mat, ddof=1) + for mat in self.integer_arrays(): + assert_equal(np.nanstd(mat, ddof=1), tgt) + + +class SharedNanFunctionsTestsMixin(object): + def test_mutation(self): + # Check that passed array is not modified. + ndat = _ndat.copy() + for f in self.nanfuncs: + f(ndat) + assert_equal(ndat, _ndat) + + def test_keepdims(self): + mat = np.eye(3) + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for axis in [None, 0, 1]: + tgt = rf(mat, axis=axis, keepdims=True) + res = nf(mat, axis=axis, keepdims=True) + assert_(res.ndim == tgt.ndim) + + def test_out(self): + mat = np.eye(3) + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + resout = np.zeros(3) + tgt = rf(mat, axis=1) + res = nf(mat, axis=1, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + + def test_dtype_from_dtype(self): + mat = np.eye(3) + codes = 'efdgFDG' + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for c in codes: + with suppress_warnings() as sup: + if nf in {np.nanstd, np.nanvar} and c in 'FDG': + # Giving the warning is a small bug, see gh-8000 + sup.filter(np.ComplexWarning) + tgt = rf(mat, dtype=np.dtype(c), axis=1).dtype.type + res = nf(mat, dtype=np.dtype(c), axis=1).dtype.type + assert_(res is tgt) + # scalar case + tgt = rf(mat, dtype=np.dtype(c), axis=None).dtype.type + res = nf(mat, dtype=np.dtype(c), axis=None).dtype.type + assert_(res is tgt) + + def test_dtype_from_char(self): + mat = np.eye(3) + codes = 'efdgFDG' + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for c in codes: + with suppress_warnings() as sup: + if nf in {np.nanstd, np.nanvar} and c in 'FDG': + # Giving the warning is a small bug, see gh-8000 + sup.filter(np.ComplexWarning) + tgt = rf(mat, dtype=c, axis=1).dtype.type + res = nf(mat, dtype=c, axis=1).dtype.type + assert_(res is tgt) + # scalar case + tgt = rf(mat, dtype=c, axis=None).dtype.type + res = nf(mat, dtype=c, axis=None).dtype.type + assert_(res is tgt) + + def test_dtype_from_input(self): + codes = 'efdgFDG' + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for c in codes: + mat = np.eye(3, dtype=c) + tgt = rf(mat, axis=1).dtype.type + res = nf(mat, axis=1).dtype.type + assert_(res is tgt, "res %s, tgt %s" % (res, tgt)) + # scalar case + tgt = rf(mat, axis=None).dtype.type + res = nf(mat, axis=None).dtype.type + assert_(res is tgt) + + def test_result_values(self): + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + tgt = [rf(d) for d in _rdat] + res = nf(_ndat, axis=1) + assert_almost_equal(res, tgt) + + def test_scalar(self): + for f in self.nanfuncs: + assert_(f(0.) == 0.) + + def test_subclass(self): + class MyNDArray(np.ndarray): + pass + + # Check that it works and that type and + # shape are preserved + array = np.eye(3) + mine = array.view(MyNDArray) + for f in self.nanfuncs: + expected_shape = f(array, axis=0).shape + res = f(mine, axis=0) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == expected_shape) + expected_shape = f(array, axis=1).shape + res = f(mine, axis=1) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == expected_shape) + expected_shape = f(array).shape + res = f(mine) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == expected_shape) + + +class TestNanFunctions_SumProd(SharedNanFunctionsTestsMixin): + + nanfuncs = [np.nansum, np.nanprod] + stdfuncs = [np.sum, np.prod] + + def test_allnans(self): + # Check for FutureWarning + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = np.nansum([np.nan]*3, axis=None) + assert_(res == 0, 'result is not 0') + assert_(len(w) == 0, 'warning raised') + # Check scalar + res = np.nansum(np.nan) + assert_(res == 0, 'result is not 0') + assert_(len(w) == 0, 'warning raised') + # Check there is no warning for not all-nan + np.nansum([0]*3, axis=None) + assert_(len(w) == 0, 'unwanted warning raised') + + def test_empty(self): + for f, tgt_value in zip([np.nansum, np.nanprod], [0, 1]): + mat = np.zeros((0, 3)) + tgt = [tgt_value]*3 + res = f(mat, axis=0) + assert_equal(res, tgt) + tgt = [] + res = f(mat, axis=1) + assert_equal(res, tgt) + tgt = tgt_value + res = f(mat, axis=None) + assert_equal(res, tgt) + + +class TestNanFunctions_CumSumProd(SharedNanFunctionsTestsMixin): + + nanfuncs = [np.nancumsum, np.nancumprod] + stdfuncs = [np.cumsum, np.cumprod] + + def test_allnans(self): + for f, tgt_value in zip(self.nanfuncs, [0, 1]): + # Unlike other nan-functions, sum/prod/cumsum/cumprod don't warn on all nan input + with assert_no_warnings(): + res = f([np.nan]*3, axis=None) + tgt = tgt_value*np.ones((3)) + assert_(np.array_equal(res, tgt), 'result is not %s * np.ones((3))' % (tgt_value)) + # Check scalar + res = f(np.nan) + tgt = tgt_value*np.ones((1)) + assert_(np.array_equal(res, tgt), 'result is not %s * np.ones((1))' % (tgt_value)) + # Check there is no warning for not all-nan + f([0]*3, axis=None) + + def test_empty(self): + for f, tgt_value in zip(self.nanfuncs, [0, 1]): + mat = np.zeros((0, 3)) + tgt = tgt_value*np.ones((0, 3)) + res = f(mat, axis=0) + assert_equal(res, tgt) + tgt = mat + res = f(mat, axis=1) + assert_equal(res, tgt) + tgt = np.zeros((0)) + res = f(mat, axis=None) + assert_equal(res, tgt) + + def test_keepdims(self): + for f, g in zip(self.nanfuncs, self.stdfuncs): + mat = np.eye(3) + for axis in [None, 0, 1]: + tgt = f(mat, axis=axis, out=None) + res = g(mat, axis=axis, out=None) + assert_(res.ndim == tgt.ndim) + + for f in self.nanfuncs: + d = np.ones((3, 5, 7, 11)) + # Randomly set some elements to NaN: + rs = np.random.RandomState(0) + d[rs.rand(*d.shape) < 0.5] = np.nan + res = f(d, axis=None) + assert_equal(res.shape, (1155,)) + for axis in np.arange(4): + res = f(d, axis=axis) + assert_equal(res.shape, (3, 5, 7, 11)) + + def test_result_values(self): + for axis in (-2, -1, 0, 1, None): + tgt = np.cumprod(_ndat_ones, axis=axis) + res = np.nancumprod(_ndat, axis=axis) + assert_almost_equal(res, tgt) + tgt = np.cumsum(_ndat_zeros,axis=axis) + res = np.nancumsum(_ndat, axis=axis) + assert_almost_equal(res, tgt) + + def test_out(self): + mat = np.eye(3) + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + resout = np.eye(3) + for axis in (-2, -1, 0, 1): + tgt = rf(mat, axis=axis) + res = nf(mat, axis=axis, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + + +class TestNanFunctions_MeanVarStd(SharedNanFunctionsTestsMixin): + + nanfuncs = [np.nanmean, np.nanvar, np.nanstd] + stdfuncs = [np.mean, np.var, np.std] + + def test_dtype_error(self): + for f in self.nanfuncs: + for dtype in [np.bool_, np.int_, np.object_]: + assert_raises(TypeError, f, _ndat, axis=1, dtype=dtype) + + def test_out_dtype_error(self): + for f in self.nanfuncs: + for dtype in [np.bool_, np.int_, np.object_]: + out = np.empty(_ndat.shape[0], dtype=dtype) + assert_raises(TypeError, f, _ndat, axis=1, out=out) + + def test_ddof(self): + nanfuncs = [np.nanvar, np.nanstd] + stdfuncs = [np.var, np.std] + for nf, rf in zip(nanfuncs, stdfuncs): + for ddof in [0, 1]: + tgt = [rf(d, ddof=ddof) for d in _rdat] + res = nf(_ndat, axis=1, ddof=ddof) + assert_almost_equal(res, tgt) + + def test_ddof_too_big(self): + nanfuncs = [np.nanvar, np.nanstd] + stdfuncs = [np.var, np.std] + dsize = [len(d) for d in _rdat] + for nf, rf in zip(nanfuncs, stdfuncs): + for ddof in range(5): + with suppress_warnings() as sup: + sup.record(RuntimeWarning) + sup.filter(np.ComplexWarning) + tgt = [ddof >= d for d in dsize] + res = nf(_ndat, axis=1, ddof=ddof) + assert_equal(np.isnan(res), tgt) + if any(tgt): + assert_(len(sup.log) == 1) + else: + assert_(len(sup.log) == 0) + + def test_allnans(self): + mat = np.array([np.nan]*9).reshape(3, 3) + for f in self.nanfuncs: + for axis in [None, 0, 1]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(f(mat, axis=axis)).all()) + assert_(len(w) == 1) + assert_(issubclass(w[0].category, RuntimeWarning)) + # Check scalar + assert_(np.isnan(f(np.nan))) + assert_(len(w) == 2) + assert_(issubclass(w[0].category, RuntimeWarning)) + + def test_empty(self): + mat = np.zeros((0, 3)) + for f in self.nanfuncs: + for axis in [0, None]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(f(mat, axis=axis)).all()) + assert_(len(w) == 1) + assert_(issubclass(w[0].category, RuntimeWarning)) + for axis in [1]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_equal(f(mat, axis=axis), np.zeros([])) + assert_(len(w) == 0) + + +class TestNanFunctions_Median(object): + + def test_mutation(self): + # Check that passed array is not modified. + ndat = _ndat.copy() + np.nanmedian(ndat) + assert_equal(ndat, _ndat) + + def test_keepdims(self): + mat = np.eye(3) + for axis in [None, 0, 1]: + tgt = np.median(mat, axis=axis, out=None, overwrite_input=False) + res = np.nanmedian(mat, axis=axis, out=None, overwrite_input=False) + assert_(res.ndim == tgt.ndim) + + d = np.ones((3, 5, 7, 11)) + # Randomly set some elements to NaN: + w = np.random.random((4, 200)) * np.array(d.shape)[:, None] + w = w.astype(np.intp) + d[tuple(w)] = np.nan + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + res = np.nanmedian(d, axis=None, keepdims=True) + assert_equal(res.shape, (1, 1, 1, 1)) + res = np.nanmedian(d, axis=(0, 1), keepdims=True) + assert_equal(res.shape, (1, 1, 7, 11)) + res = np.nanmedian(d, axis=(0, 3), keepdims=True) + assert_equal(res.shape, (1, 5, 7, 1)) + res = np.nanmedian(d, axis=(1,), keepdims=True) + assert_equal(res.shape, (3, 1, 7, 11)) + res = np.nanmedian(d, axis=(0, 1, 2, 3), keepdims=True) + assert_equal(res.shape, (1, 1, 1, 1)) + res = np.nanmedian(d, axis=(0, 1, 3), keepdims=True) + assert_equal(res.shape, (1, 1, 7, 1)) + + def test_out(self): + mat = np.random.rand(3, 3) + nan_mat = np.insert(mat, [0, 2], np.nan, axis=1) + resout = np.zeros(3) + tgt = np.median(mat, axis=1) + res = np.nanmedian(nan_mat, axis=1, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + # 0-d output: + resout = np.zeros(()) + tgt = np.median(mat, axis=None) + res = np.nanmedian(nan_mat, axis=None, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + res = np.nanmedian(nan_mat, axis=(0, 1), out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + + def test_small_large(self): + # test the small and large code paths, current cutoff 400 elements + for s in [5, 20, 51, 200, 1000]: + d = np.random.randn(4, s) + # Randomly set some elements to NaN: + w = np.random.randint(0, d.size, size=d.size // 5) + d.ravel()[w] = np.nan + d[:,0] = 1. # ensure at least one good value + # use normal median without nans to compare + tgt = [] + for x in d: + nonan = np.compress(~np.isnan(x), x) + tgt.append(np.median(nonan, overwrite_input=True)) + + assert_array_equal(np.nanmedian(d, axis=-1), tgt) + + def test_result_values(self): + tgt = [np.median(d) for d in _rdat] + res = np.nanmedian(_ndat, axis=1) + assert_almost_equal(res, tgt) + + def test_allnans(self): + mat = np.array([np.nan]*9).reshape(3, 3) + for axis in [None, 0, 1]: + with suppress_warnings() as sup: + sup.record(RuntimeWarning) + + assert_(np.isnan(np.nanmedian(mat, axis=axis)).all()) + if axis is None: + assert_(len(sup.log) == 1) + else: + assert_(len(sup.log) == 3) + # Check scalar + assert_(np.isnan(np.nanmedian(np.nan))) + if axis is None: + assert_(len(sup.log) == 2) + else: + assert_(len(sup.log) == 4) + + def test_empty(self): + mat = np.zeros((0, 3)) + for axis in [0, None]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(np.nanmedian(mat, axis=axis)).all()) + assert_(len(w) == 1) + assert_(issubclass(w[0].category, RuntimeWarning)) + for axis in [1]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_equal(np.nanmedian(mat, axis=axis), np.zeros([])) + assert_(len(w) == 0) + + def test_scalar(self): + assert_(np.nanmedian(0.) == 0.) + + def test_extended_axis_invalid(self): + d = np.ones((3, 5, 7, 11)) + assert_raises(np.AxisError, np.nanmedian, d, axis=-5) + assert_raises(np.AxisError, np.nanmedian, d, axis=(0, -5)) + assert_raises(np.AxisError, np.nanmedian, d, axis=4) + assert_raises(np.AxisError, np.nanmedian, d, axis=(0, 4)) + assert_raises(ValueError, np.nanmedian, d, axis=(1, 1)) + + def test_float_special(self): + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + for inf in [np.inf, -np.inf]: + a = np.array([[inf, np.nan], [np.nan, np.nan]]) + assert_equal(np.nanmedian(a, axis=0), [inf, np.nan]) + assert_equal(np.nanmedian(a, axis=1), [inf, np.nan]) + assert_equal(np.nanmedian(a), inf) + + # minimum fill value check + a = np.array([[np.nan, np.nan, inf], + [np.nan, np.nan, inf]]) + assert_equal(np.nanmedian(a), inf) + assert_equal(np.nanmedian(a, axis=0), [np.nan, np.nan, inf]) + assert_equal(np.nanmedian(a, axis=1), inf) + + # no mask path + a = np.array([[inf, inf], [inf, inf]]) + assert_equal(np.nanmedian(a, axis=1), inf) + + a = np.array([[inf, 7, -inf, -9], + [-10, np.nan, np.nan, 5], + [4, np.nan, np.nan, inf]], + dtype=np.float32) + if inf > 0: + assert_equal(np.nanmedian(a, axis=0), [4., 7., -inf, 5.]) + assert_equal(np.nanmedian(a), 4.5) + else: + assert_equal(np.nanmedian(a, axis=0), [-10., 7., -inf, -9.]) + assert_equal(np.nanmedian(a), -2.5) + assert_equal(np.nanmedian(a, axis=-1), [-1., -2.5, inf]) + + for i in range(0, 10): + for j in range(1, 10): + a = np.array([([np.nan] * i) + ([inf] * j)] * 2) + assert_equal(np.nanmedian(a), inf) + assert_equal(np.nanmedian(a, axis=1), inf) + assert_equal(np.nanmedian(a, axis=0), + ([np.nan] * i) + [inf] * j) + + a = np.array([([np.nan] * i) + ([-inf] * j)] * 2) + assert_equal(np.nanmedian(a), -inf) + assert_equal(np.nanmedian(a, axis=1), -inf) + assert_equal(np.nanmedian(a, axis=0), + ([np.nan] * i) + [-inf] * j) + + +class TestNanFunctions_Percentile(object): + + def test_mutation(self): + # Check that passed array is not modified. + ndat = _ndat.copy() + np.nanpercentile(ndat, 30) + assert_equal(ndat, _ndat) + + def test_keepdims(self): + mat = np.eye(3) + for axis in [None, 0, 1]: + tgt = np.percentile(mat, 70, axis=axis, out=None, + overwrite_input=False) + res = np.nanpercentile(mat, 70, axis=axis, out=None, + overwrite_input=False) + assert_(res.ndim == tgt.ndim) + + d = np.ones((3, 5, 7, 11)) + # Randomly set some elements to NaN: + w = np.random.random((4, 200)) * np.array(d.shape)[:, None] + w = w.astype(np.intp) + d[tuple(w)] = np.nan + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + res = np.nanpercentile(d, 90, axis=None, keepdims=True) + assert_equal(res.shape, (1, 1, 1, 1)) + res = np.nanpercentile(d, 90, axis=(0, 1), keepdims=True) + assert_equal(res.shape, (1, 1, 7, 11)) + res = np.nanpercentile(d, 90, axis=(0, 3), keepdims=True) + assert_equal(res.shape, (1, 5, 7, 1)) + res = np.nanpercentile(d, 90, axis=(1,), keepdims=True) + assert_equal(res.shape, (3, 1, 7, 11)) + res = np.nanpercentile(d, 90, axis=(0, 1, 2, 3), keepdims=True) + assert_equal(res.shape, (1, 1, 1, 1)) + res = np.nanpercentile(d, 90, axis=(0, 1, 3), keepdims=True) + assert_equal(res.shape, (1, 1, 7, 1)) + + def test_out(self): + mat = np.random.rand(3, 3) + nan_mat = np.insert(mat, [0, 2], np.nan, axis=1) + resout = np.zeros(3) + tgt = np.percentile(mat, 42, axis=1) + res = np.nanpercentile(nan_mat, 42, axis=1, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + # 0-d output: + resout = np.zeros(()) + tgt = np.percentile(mat, 42, axis=None) + res = np.nanpercentile(nan_mat, 42, axis=None, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + res = np.nanpercentile(nan_mat, 42, axis=(0, 1), out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + + def test_result_values(self): + tgt = [np.percentile(d, 28) for d in _rdat] + res = np.nanpercentile(_ndat, 28, axis=1) + assert_almost_equal(res, tgt) + # Transpose the array to fit the output convention of numpy.percentile + tgt = np.transpose([np.percentile(d, (28, 98)) for d in _rdat]) + res = np.nanpercentile(_ndat, (28, 98), axis=1) + assert_almost_equal(res, tgt) + + def test_allnans(self): + mat = np.array([np.nan]*9).reshape(3, 3) + for axis in [None, 0, 1]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(np.nanpercentile(mat, 60, axis=axis)).all()) + if axis is None: + assert_(len(w) == 1) + else: + assert_(len(w) == 3) + assert_(issubclass(w[0].category, RuntimeWarning)) + # Check scalar + assert_(np.isnan(np.nanpercentile(np.nan, 60))) + if axis is None: + assert_(len(w) == 2) + else: + assert_(len(w) == 4) + assert_(issubclass(w[0].category, RuntimeWarning)) + + def test_empty(self): + mat = np.zeros((0, 3)) + for axis in [0, None]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(np.nanpercentile(mat, 40, axis=axis)).all()) + assert_(len(w) == 1) + assert_(issubclass(w[0].category, RuntimeWarning)) + for axis in [1]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_equal(np.nanpercentile(mat, 40, axis=axis), np.zeros([])) + assert_(len(w) == 0) + + def test_scalar(self): + assert_equal(np.nanpercentile(0., 100), 0.) + a = np.arange(6) + r = np.nanpercentile(a, 50, axis=0) + assert_equal(r, 2.5) + assert_(np.isscalar(r)) + + def test_extended_axis_invalid(self): + d = np.ones((3, 5, 7, 11)) + assert_raises(np.AxisError, np.nanpercentile, d, q=5, axis=-5) + assert_raises(np.AxisError, np.nanpercentile, d, q=5, axis=(0, -5)) + assert_raises(np.AxisError, np.nanpercentile, d, q=5, axis=4) + assert_raises(np.AxisError, np.nanpercentile, d, q=5, axis=(0, 4)) + assert_raises(ValueError, np.nanpercentile, d, q=5, axis=(1, 1)) + + def test_multiple_percentiles(self): + perc = [50, 100] + mat = np.ones((4, 3)) + nan_mat = np.nan * mat + # For checking consistency in higher dimensional case + large_mat = np.ones((3, 4, 5)) + large_mat[:, 0:2:4, :] = 0 + large_mat[:, :, 3:] *= 2 + for axis in [None, 0, 1]: + for keepdim in [False, True]: + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "All-NaN slice encountered") + val = np.percentile(mat, perc, axis=axis, keepdims=keepdim) + nan_val = np.nanpercentile(nan_mat, perc, axis=axis, + keepdims=keepdim) + assert_equal(nan_val.shape, val.shape) + + val = np.percentile(large_mat, perc, axis=axis, + keepdims=keepdim) + nan_val = np.nanpercentile(large_mat, perc, axis=axis, + keepdims=keepdim) + assert_equal(nan_val, val) + + megamat = np.ones((3, 4, 5, 6)) + assert_equal(np.nanpercentile(megamat, perc, axis=(1, 2)).shape, (2, 3, 6)) + + +class TestNanFunctions_Quantile(object): + # most of this is already tested by TestPercentile + + def test_regression(self): + ar = np.arange(24).reshape(2, 3, 4).astype(float) + ar[0][1] = np.nan + + assert_equal(np.nanquantile(ar, q=0.5), np.nanpercentile(ar, q=50)) + assert_equal(np.nanquantile(ar, q=0.5, axis=0), + np.nanpercentile(ar, q=50, axis=0)) + assert_equal(np.nanquantile(ar, q=0.5, axis=1), + np.nanpercentile(ar, q=50, axis=1)) + assert_equal(np.nanquantile(ar, q=[0.5], axis=1), + np.nanpercentile(ar, q=[50], axis=1)) + assert_equal(np.nanquantile(ar, q=[0.25, 0.5, 0.75], axis=1), + np.nanpercentile(ar, q=[25, 50, 75], axis=1)) + + def test_basic(self): + x = np.arange(8) * 0.5 + assert_equal(np.nanquantile(x, 0), 0.) + assert_equal(np.nanquantile(x, 1), 3.5) + assert_equal(np.nanquantile(x, 0.5), 1.75) + + def test_no_p_overwrite(self): + # this is worth retesting, because quantile does not make a copy + p0 = np.array([0, 0.75, 0.25, 0.5, 1.0]) + p = p0.copy() + np.nanquantile(np.arange(100.), p, interpolation="midpoint") + assert_array_equal(p, p0) + + p0 = p0.tolist() + p = p.tolist() + np.nanquantile(np.arange(100.), p, interpolation="midpoint") + assert_array_equal(p, p0) diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_nanfunctions.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_nanfunctions.pyc new file mode 100644 index 0000000..fed510b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_nanfunctions.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_packbits.py b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_packbits.py new file mode 100644 index 0000000..fde5c37 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_packbits.py @@ -0,0 +1,268 @@ +from __future__ import division, absolute_import, print_function + +import numpy as np +from numpy.testing import assert_array_equal, assert_equal, assert_raises + + +def test_packbits(): + # Copied from the docstring. + a = [[[1, 0, 1], [0, 1, 0]], + [[1, 1, 0], [0, 0, 1]]] + for dt in '?bBhHiIlLqQ': + arr = np.array(a, dtype=dt) + b = np.packbits(arr, axis=-1) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, np.array([[[160], [64]], [[192], [32]]])) + + assert_raises(TypeError, np.packbits, np.array(a, dtype=float)) + + +def test_packbits_empty(): + shapes = [ + (0,), (10, 20, 0), (10, 0, 20), (0, 10, 20), (20, 0, 0), (0, 20, 0), + (0, 0, 20), (0, 0, 0), + ] + for dt in '?bBhHiIlLqQ': + for shape in shapes: + a = np.empty(shape, dtype=dt) + b = np.packbits(a) + assert_equal(b.dtype, np.uint8) + assert_equal(b.shape, (0,)) + + +def test_packbits_empty_with_axis(): + # Original shapes and lists of packed shapes for different axes. + shapes = [ + ((0,), [(0,)]), + ((10, 20, 0), [(2, 20, 0), (10, 3, 0), (10, 20, 0)]), + ((10, 0, 20), [(2, 0, 20), (10, 0, 20), (10, 0, 3)]), + ((0, 10, 20), [(0, 10, 20), (0, 2, 20), (0, 10, 3)]), + ((20, 0, 0), [(3, 0, 0), (20, 0, 0), (20, 0, 0)]), + ((0, 20, 0), [(0, 20, 0), (0, 3, 0), (0, 20, 0)]), + ((0, 0, 20), [(0, 0, 20), (0, 0, 20), (0, 0, 3)]), + ((0, 0, 0), [(0, 0, 0), (0, 0, 0), (0, 0, 0)]), + ] + for dt in '?bBhHiIlLqQ': + for in_shape, out_shapes in shapes: + for ax, out_shape in enumerate(out_shapes): + a = np.empty(in_shape, dtype=dt) + b = np.packbits(a, axis=ax) + assert_equal(b.dtype, np.uint8) + assert_equal(b.shape, out_shape) + + +def test_packbits_large(): + # test data large enough for 16 byte vectorization + a = np.array([1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, + 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, + 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, + 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, + 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, + 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, + 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, + 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, + 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, + 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, + 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, + 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, + 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, + 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, + 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0]) + a = a.repeat(3) + for dtype in '?bBhHiIlLqQ': + arr = np.array(a, dtype=dtype) + b = np.packbits(arr, axis=None) + assert_equal(b.dtype, np.uint8) + r = [252, 127, 192, 3, 254, 7, 252, 0, 7, 31, 240, 0, 28, 1, 255, 252, + 113, 248, 3, 255, 192, 28, 15, 192, 28, 126, 0, 224, 127, 255, + 227, 142, 7, 31, 142, 63, 28, 126, 56, 227, 240, 0, 227, 128, 63, + 224, 14, 56, 252, 112, 56, 255, 241, 248, 3, 240, 56, 224, 112, + 63, 255, 255, 199, 224, 14, 0, 31, 143, 192, 3, 255, 199, 0, 1, + 255, 224, 1, 255, 252, 126, 63, 0, 1, 192, 252, 14, 63, 0, 15, + 199, 252, 113, 255, 3, 128, 56, 252, 14, 7, 0, 113, 255, 255, 142, 56, 227, + 129, 248, 227, 129, 199, 31, 128] + assert_array_equal(b, r) + # equal for size being multiple of 8 + assert_array_equal(np.unpackbits(b)[:-4], a) + + # check last byte of different remainders (16 byte vectorization) + b = [np.packbits(arr[:-i], axis=None)[-1] for i in range(1, 16)] + assert_array_equal(b, [128, 128, 128, 31, 30, 28, 24, 16, 0, 0, 0, 199, + 198, 196, 192]) + + + arr = arr.reshape(36, 25) + b = np.packbits(arr, axis=0) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, [[190, 186, 178, 178, 150, 215, 87, 83, 83, 195, + 199, 206, 204, 204, 140, 140, 136, 136, 8, 40, 105, + 107, 75, 74, 88], + [72, 216, 248, 241, 227, 195, 202, 90, 90, 83, + 83, 119, 127, 109, 73, 64, 208, 244, 189, 45, + 41, 104, 122, 90, 18], + [113, 120, 248, 216, 152, 24, 60, 52, 182, 150, + 150, 150, 146, 210, 210, 246, 255, 255, 223, + 151, 21, 17, 17, 131, 163], + [214, 210, 210, 64, 68, 5, 5, 1, 72, 88, 92, + 92, 78, 110, 39, 181, 149, 220, 222, 218, 218, + 202, 234, 170, 168], + [0, 128, 128, 192, 80, 112, 48, 160, 160, 224, + 240, 208, 144, 128, 160, 224, 240, 208, 144, + 144, 176, 240, 224, 192, 128]]) + + b = np.packbits(arr, axis=1) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, [[252, 127, 192, 0], + [ 7, 252, 15, 128], + [240, 0, 28, 0], + [255, 128, 0, 128], + [192, 31, 255, 128], + [142, 63, 0, 0], + [255, 240, 7, 0], + [ 7, 224, 14, 0], + [126, 0, 224, 0], + [255, 255, 199, 0], + [ 56, 28, 126, 0], + [113, 248, 227, 128], + [227, 142, 63, 0], + [ 0, 28, 112, 0], + [ 15, 248, 3, 128], + [ 28, 126, 56, 0], + [ 56, 255, 241, 128], + [240, 7, 224, 0], + [227, 129, 192, 128], + [255, 255, 254, 0], + [126, 0, 224, 0], + [ 3, 241, 248, 0], + [ 0, 255, 241, 128], + [128, 0, 255, 128], + [224, 1, 255, 128], + [248, 252, 126, 0], + [ 0, 7, 3, 128], + [224, 113, 248, 0], + [ 0, 252, 127, 128], + [142, 63, 224, 0], + [224, 14, 63, 0], + [ 7, 3, 128, 0], + [113, 255, 255, 128], + [ 28, 113, 199, 0], + [ 7, 227, 142, 0], + [ 14, 56, 252, 0]]) + + arr = arr.T.copy() + b = np.packbits(arr, axis=0) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, [[252, 7, 240, 255, 192, 142, 255, 7, 126, 255, + 56, 113, 227, 0, 15, 28, 56, 240, 227, 255, + 126, 3, 0, 128, 224, 248, 0, 224, 0, 142, 224, + 7, 113, 28, 7, 14], + [127, 252, 0, 128, 31, 63, 240, 224, 0, 255, + 28, 248, 142, 28, 248, 126, 255, 7, 129, 255, + 0, 241, 255, 0, 1, 252, 7, 113, 252, 63, 14, + 3, 255, 113, 227, 56], + [192, 15, 28, 0, 255, 0, 7, 14, 224, 199, 126, + 227, 63, 112, 3, 56, 241, 224, 192, 254, 224, + 248, 241, 255, 255, 126, 3, 248, 127, 224, 63, + 128, 255, 199, 142, 252], + [0, 128, 0, 128, 128, 0, 0, 0, 0, 0, 0, 128, 0, + 0, 128, 0, 128, 0, 128, 0, 0, 0, 128, 128, + 128, 0, 128, 0, 128, 0, 0, 0, 128, 0, 0, 0]]) + + b = np.packbits(arr, axis=1) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, [[190, 72, 113, 214, 0], + [186, 216, 120, 210, 128], + [178, 248, 248, 210, 128], + [178, 241, 216, 64, 192], + [150, 227, 152, 68, 80], + [215, 195, 24, 5, 112], + [ 87, 202, 60, 5, 48], + [ 83, 90, 52, 1, 160], + [ 83, 90, 182, 72, 160], + [195, 83, 150, 88, 224], + [199, 83, 150, 92, 240], + [206, 119, 150, 92, 208], + [204, 127, 146, 78, 144], + [204, 109, 210, 110, 128], + [140, 73, 210, 39, 160], + [140, 64, 246, 181, 224], + [136, 208, 255, 149, 240], + [136, 244, 255, 220, 208], + [ 8, 189, 223, 222, 144], + [ 40, 45, 151, 218, 144], + [105, 41, 21, 218, 176], + [107, 104, 17, 202, 240], + [ 75, 122, 17, 234, 224], + [ 74, 90, 131, 170, 192], + [ 88, 18, 163, 168, 128]]) + + + # result is the same if input is multiplied with a nonzero value + for dtype in 'bBhHiIlLqQ': + arr = np.array(a, dtype=dtype) + rnd = np.random.randint(low=np.iinfo(dtype).min, + high=np.iinfo(dtype).max, size=arr.size, + dtype=dtype) + rnd[rnd == 0] = 1 + arr *= rnd.astype(dtype) + b = np.packbits(arr, axis=-1) + assert_array_equal(np.unpackbits(b)[:-4], a) + + assert_raises(TypeError, np.packbits, np.array(a, dtype=float)) + + +def test_packbits_very_large(): + # test some with a larger arrays gh-8637 + # code is covered earlier but larger array makes crash on bug more likely + for s in range(950, 1050): + for dt in '?bBhHiIlLqQ': + x = np.ones((200, s), dtype=bool) + np.packbits(x, axis=1) + + +def test_unpackbits(): + # Copied from the docstring. + a = np.array([[2], [7], [23]], dtype=np.uint8) + b = np.unpackbits(a, axis=1) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, np.array([[0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 0, 1, 0, 1, 1, 1]])) + + +def test_unpackbits_empty(): + a = np.empty((0,), dtype=np.uint8) + b = np.unpackbits(a) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, np.empty((0,))) + + +def test_unpackbits_empty_with_axis(): + # Lists of packed shapes for different axes and unpacked shapes. + shapes = [ + ([(0,)], (0,)), + ([(2, 24, 0), (16, 3, 0), (16, 24, 0)], (16, 24, 0)), + ([(2, 0, 24), (16, 0, 24), (16, 0, 3)], (16, 0, 24)), + ([(0, 16, 24), (0, 2, 24), (0, 16, 3)], (0, 16, 24)), + ([(3, 0, 0), (24, 0, 0), (24, 0, 0)], (24, 0, 0)), + ([(0, 24, 0), (0, 3, 0), (0, 24, 0)], (0, 24, 0)), + ([(0, 0, 24), (0, 0, 24), (0, 0, 3)], (0, 0, 24)), + ([(0, 0, 0), (0, 0, 0), (0, 0, 0)], (0, 0, 0)), + ] + for in_shapes, out_shape in shapes: + for ax, in_shape in enumerate(in_shapes): + a = np.empty(in_shape, dtype=np.uint8) + b = np.unpackbits(a, axis=ax) + assert_equal(b.dtype, np.uint8) + assert_equal(b.shape, out_shape) + + +def test_unpackbits_large(): + # test all possible numbers via comparison to already tested packbits + d = np.arange(277, dtype=np.uint8) + assert_array_equal(np.packbits(np.unpackbits(d)), d) + assert_array_equal(np.packbits(np.unpackbits(d[::2])), d[::2]) + d = np.tile(d, (3, 1)) + assert_array_equal(np.packbits(np.unpackbits(d, axis=1), axis=1), d) + d = d.T.copy() + assert_array_equal(np.packbits(np.unpackbits(d, axis=0), axis=0), d) diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_packbits.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_packbits.pyc new file mode 100644 index 0000000..92747f2 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_packbits.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_polynomial.py b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_polynomial.py new file mode 100644 index 0000000..77414ba --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_polynomial.py @@ -0,0 +1,261 @@ +from __future__ import division, absolute_import, print_function + +import numpy as np +from numpy.testing import ( + assert_, assert_equal, assert_array_equal, assert_almost_equal, + assert_array_almost_equal, assert_raises, assert_allclose + ) + + +class TestPolynomial(object): + def test_poly1d_str_and_repr(self): + p = np.poly1d([1., 2, 3]) + assert_equal(repr(p), 'poly1d([1., 2., 3.])') + assert_equal(str(p), + ' 2\n' + '1 x + 2 x + 3') + + q = np.poly1d([3., 2, 1]) + assert_equal(repr(q), 'poly1d([3., 2., 1.])') + assert_equal(str(q), + ' 2\n' + '3 x + 2 x + 1') + + r = np.poly1d([1.89999 + 2j, -3j, -5.12345678, 2 + 1j]) + assert_equal(str(r), + ' 3 2\n' + '(1.9 + 2j) x - 3j x - 5.123 x + (2 + 1j)') + + assert_equal(str(np.poly1d([-3, -2, -1])), + ' 2\n' + '-3 x - 2 x - 1') + + def test_poly1d_resolution(self): + p = np.poly1d([1., 2, 3]) + q = np.poly1d([3., 2, 1]) + assert_equal(p(0), 3.0) + assert_equal(p(5), 38.0) + assert_equal(q(0), 1.0) + assert_equal(q(5), 86.0) + + def test_poly1d_math(self): + # here we use some simple coeffs to make calculations easier + p = np.poly1d([1., 2, 4]) + q = np.poly1d([4., 2, 1]) + assert_equal(p/q, (np.poly1d([0.25]), np.poly1d([1.5, 3.75]))) + assert_equal(p.integ(), np.poly1d([1/3, 1., 4., 0.])) + assert_equal(p.integ(1), np.poly1d([1/3, 1., 4., 0.])) + + p = np.poly1d([1., 2, 3]) + q = np.poly1d([3., 2, 1]) + assert_equal(p * q, np.poly1d([3., 8., 14., 8., 3.])) + assert_equal(p + q, np.poly1d([4., 4., 4.])) + assert_equal(p - q, np.poly1d([-2., 0., 2.])) + assert_equal(p ** 4, np.poly1d([1., 8., 36., 104., 214., 312., 324., 216., 81.])) + assert_equal(p(q), np.poly1d([9., 12., 16., 8., 6.])) + assert_equal(q(p), np.poly1d([3., 12., 32., 40., 34.])) + assert_equal(p.deriv(), np.poly1d([2., 2.])) + assert_equal(p.deriv(2), np.poly1d([2.])) + assert_equal(np.polydiv(np.poly1d([1, 0, -1]), np.poly1d([1, 1])), + (np.poly1d([1., -1.]), np.poly1d([0.]))) + + def test_poly1d_misc(self): + p = np.poly1d([1., 2, 3]) + assert_equal(np.asarray(p), np.array([1., 2., 3.])) + assert_equal(len(p), 2) + assert_equal((p[0], p[1], p[2], p[3]), (3.0, 2.0, 1.0, 0)) + + def test_poly1d_variable_arg(self): + q = np.poly1d([1., 2, 3], variable='y') + assert_equal(str(q), + ' 2\n' + '1 y + 2 y + 3') + q = np.poly1d([1., 2, 3], variable='lambda') + assert_equal(str(q), + ' 2\n' + '1 lambda + 2 lambda + 3') + + def test_poly(self): + assert_array_almost_equal(np.poly([3, -np.sqrt(2), np.sqrt(2)]), + [1, -3, -2, 6]) + + # From matlab docs + A = [[1, 2, 3], [4, 5, 6], [7, 8, 0]] + assert_array_almost_equal(np.poly(A), [1, -6, -72, -27]) + + # Should produce real output for perfect conjugates + assert_(np.isrealobj(np.poly([+1.082j, +2.613j, -2.613j, -1.082j]))) + assert_(np.isrealobj(np.poly([0+1j, -0+-1j, 1+2j, + 1-2j, 1.+3.5j, 1-3.5j]))) + assert_(np.isrealobj(np.poly([1j, -1j, 1+2j, 1-2j, 1+3j, 1-3.j]))) + assert_(np.isrealobj(np.poly([1j, -1j, 1+2j, 1-2j]))) + assert_(np.isrealobj(np.poly([1j, -1j, 2j, -2j]))) + assert_(np.isrealobj(np.poly([1j, -1j]))) + assert_(np.isrealobj(np.poly([1, -1]))) + + assert_(np.iscomplexobj(np.poly([1j, -1.0000001j]))) + + np.random.seed(42) + a = np.random.randn(100) + 1j*np.random.randn(100) + assert_(np.isrealobj(np.poly(np.concatenate((a, np.conjugate(a)))))) + + def test_roots(self): + assert_array_equal(np.roots([1, 0, 0]), [0, 0]) + + def test_str_leading_zeros(self): + p = np.poly1d([4, 3, 2, 1]) + p[3] = 0 + assert_equal(str(p), + " 2\n" + "3 x + 2 x + 1") + + p = np.poly1d([1, 2]) + p[0] = 0 + p[1] = 0 + assert_equal(str(p), " \n0") + + def test_polyfit(self): + c = np.array([3., 2., 1.]) + x = np.linspace(0, 2, 7) + y = np.polyval(c, x) + err = [1, -1, 1, -1, 1, -1, 1] + weights = np.arange(8, 1, -1)**2/7.0 + + # Check exception when too few points for variance estimate. Note that + # the estimate requires the number of data points to exceed + # degree + 1 + assert_raises(ValueError, np.polyfit, + [1], [1], deg=0, cov=True) + + # check 1D case + m, cov = np.polyfit(x, y+err, 2, cov=True) + est = [3.8571, 0.2857, 1.619] + assert_almost_equal(est, m, decimal=4) + val0 = [[ 1.4694, -2.9388, 0.8163], + [-2.9388, 6.3673, -2.1224], + [ 0.8163, -2.1224, 1.161 ]] + assert_almost_equal(val0, cov, decimal=4) + + m2, cov2 = np.polyfit(x, y+err, 2, w=weights, cov=True) + assert_almost_equal([4.8927, -1.0177, 1.7768], m2, decimal=4) + val = [[ 4.3964, -5.0052, 0.4878], + [-5.0052, 6.8067, -0.9089], + [ 0.4878, -0.9089, 0.3337]] + assert_almost_equal(val, cov2, decimal=4) + + m3, cov3 = np.polyfit(x, y+err, 2, w=weights, cov="unscaled") + assert_almost_equal([4.8927, -1.0177, 1.7768], m3, decimal=4) + val = [[ 0.1473, -0.1677, 0.0163], + [-0.1677, 0.228 , -0.0304], + [ 0.0163, -0.0304, 0.0112]] + assert_almost_equal(val, cov3, decimal=4) + + # check 2D (n,1) case + y = y[:, np.newaxis] + c = c[:, np.newaxis] + assert_almost_equal(c, np.polyfit(x, y, 2)) + # check 2D (n,2) case + yy = np.concatenate((y, y), axis=1) + cc = np.concatenate((c, c), axis=1) + assert_almost_equal(cc, np.polyfit(x, yy, 2)) + + m, cov = np.polyfit(x, yy + np.array(err)[:, np.newaxis], 2, cov=True) + assert_almost_equal(est, m[:, 0], decimal=4) + assert_almost_equal(est, m[:, 1], decimal=4) + assert_almost_equal(val0, cov[:, :, 0], decimal=4) + assert_almost_equal(val0, cov[:, :, 1], decimal=4) + + # check order 1 (deg=0) case, were the analytic results are simple + np.random.seed(123) + y = np.random.normal(size=(4, 10000)) + mean, cov = np.polyfit(np.zeros(y.shape[0]), y, deg=0, cov=True) + # Should get sigma_mean = sigma/sqrt(N) = 1./sqrt(4) = 0.5. + assert_allclose(mean.std(), 0.5, atol=0.01) + assert_allclose(np.sqrt(cov.mean()), 0.5, atol=0.01) + # Without scaling, since reduced chi2 is 1, the result should be the same. + mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=np.ones(y.shape[0]), + deg=0, cov="unscaled") + assert_allclose(mean.std(), 0.5, atol=0.01) + assert_almost_equal(np.sqrt(cov.mean()), 0.5) + # If we estimate our errors wrong, no change with scaling: + w = np.full(y.shape[0], 1./0.5) + mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=w, deg=0, cov=True) + assert_allclose(mean.std(), 0.5, atol=0.01) + assert_allclose(np.sqrt(cov.mean()), 0.5, atol=0.01) + # But if we do not scale, our estimate for the error in the mean will + # differ. + mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=w, deg=0, cov="unscaled") + assert_allclose(mean.std(), 0.5, atol=0.01) + assert_almost_equal(np.sqrt(cov.mean()), 0.25) + + def test_objects(self): + from decimal import Decimal + p = np.poly1d([Decimal('4.0'), Decimal('3.0'), Decimal('2.0')]) + p2 = p * Decimal('1.333333333333333') + assert_(p2[1] == Decimal("3.9999999999999990")) + p2 = p.deriv() + assert_(p2[1] == Decimal('8.0')) + p2 = p.integ() + assert_(p2[3] == Decimal("1.333333333333333333333333333")) + assert_(p2[2] == Decimal('1.5')) + assert_(np.issubdtype(p2.coeffs.dtype, np.object_)) + p = np.poly([Decimal(1), Decimal(2)]) + assert_equal(np.poly([Decimal(1), Decimal(2)]), + [1, Decimal(-3), Decimal(2)]) + + def test_complex(self): + p = np.poly1d([3j, 2j, 1j]) + p2 = p.integ() + assert_((p2.coeffs == [1j, 1j, 1j, 0]).all()) + p2 = p.deriv() + assert_((p2.coeffs == [6j, 2j]).all()) + + def test_integ_coeffs(self): + p = np.poly1d([3, 2, 1]) + p2 = p.integ(3, k=[9, 7, 6]) + assert_( + (p2.coeffs == [1/4./5., 1/3./4., 1/2./3., 9/1./2., 7, 6]).all()) + + def test_zero_dims(self): + try: + np.poly(np.zeros((0, 0))) + except ValueError: + pass + + def test_poly_int_overflow(self): + """ + Regression test for gh-5096. + """ + v = np.arange(1, 21) + assert_almost_equal(np.poly(v), np.poly(np.diag(v))) + + def test_poly_eq(self): + p = np.poly1d([1, 2, 3]) + p2 = np.poly1d([1, 2, 4]) + assert_equal(p == None, False) + assert_equal(p != None, True) + assert_equal(p == p, True) + assert_equal(p == p2, False) + assert_equal(p != p2, True) + + def test_polydiv(self): + b = np.poly1d([2, 6, 6, 1]) + a = np.poly1d([-1j, (1+2j), -(2+1j), 1]) + q, r = np.polydiv(b, a) + assert_equal(q.coeffs.dtype, np.complex128) + assert_equal(r.coeffs.dtype, np.complex128) + assert_equal(q*a + r, b) + + def test_poly_coeffs_immutable(self): + """ Coefficients should not be modifiable """ + p = np.poly1d([1, 2, 3]) + + try: + # despite throwing an exception, this used to change state + p.coeffs += 1 + except Exception: + pass + assert_equal(p.coeffs, [1, 2, 3]) + + p.coeffs[2] += 10 + assert_equal(p.coeffs, [1, 2, 3]) diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_polynomial.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_polynomial.pyc new file mode 100644 index 0000000..a4f7282 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_polynomial.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_recfunctions.py b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_recfunctions.py new file mode 100644 index 0000000..0696936 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_recfunctions.py @@ -0,0 +1,911 @@ +from __future__ import division, absolute_import, print_function + +import pytest + +import numpy as np +import numpy.ma as ma +from numpy.ma.mrecords import MaskedRecords +from numpy.ma.testutils import assert_equal +from numpy.testing import assert_, assert_raises +from numpy.lib.recfunctions import ( + drop_fields, rename_fields, get_fieldstructure, recursive_fill_fields, + find_duplicates, merge_arrays, append_fields, stack_arrays, join_by, + repack_fields, unstructured_to_structured, structured_to_unstructured, + apply_along_fields, require_fields, assign_fields_by_name) +get_names = np.lib.recfunctions.get_names +get_names_flat = np.lib.recfunctions.get_names_flat +zip_descr = np.lib.recfunctions.zip_descr + + +class TestRecFunctions(object): + # Misc tests + + def setup(self): + x = np.array([1, 2, ]) + y = np.array([10, 20, 30]) + z = np.array([('A', 1.), ('B', 2.)], + dtype=[('A', '|S3'), ('B', float)]) + w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], + dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) + self.data = (w, x, y, z) + + def test_zip_descr(self): + # Test zip_descr + (w, x, y, z) = self.data + + # Std array + test = zip_descr((x, x), flatten=True) + assert_equal(test, + np.dtype([('', int), ('', int)])) + test = zip_descr((x, x), flatten=False) + assert_equal(test, + np.dtype([('', int), ('', int)])) + + # Std & flexible-dtype + test = zip_descr((x, z), flatten=True) + assert_equal(test, + np.dtype([('', int), ('A', '|S3'), ('B', float)])) + test = zip_descr((x, z), flatten=False) + assert_equal(test, + np.dtype([('', int), + ('', [('A', '|S3'), ('B', float)])])) + + # Standard & nested dtype + test = zip_descr((x, w), flatten=True) + assert_equal(test, + np.dtype([('', int), + ('a', int), + ('ba', float), ('bb', int)])) + test = zip_descr((x, w), flatten=False) + assert_equal(test, + np.dtype([('', int), + ('', [('a', int), + ('b', [('ba', float), ('bb', int)])])])) + + def test_drop_fields(self): + # Test drop_fields + a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], + dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) + + # A basic field + test = drop_fields(a, 'a') + control = np.array([((2, 3.0),), ((5, 6.0),)], + dtype=[('b', [('ba', float), ('bb', int)])]) + assert_equal(test, control) + + # Another basic field (but nesting two fields) + test = drop_fields(a, 'b') + control = np.array([(1,), (4,)], dtype=[('a', int)]) + assert_equal(test, control) + + # A nested sub-field + test = drop_fields(a, ['ba', ]) + control = np.array([(1, (3.0,)), (4, (6.0,))], + dtype=[('a', int), ('b', [('bb', int)])]) + assert_equal(test, control) + + # All the nested sub-field from a field: zap that field + test = drop_fields(a, ['ba', 'bb']) + control = np.array([(1,), (4,)], dtype=[('a', int)]) + assert_equal(test, control) + + test = drop_fields(a, ['a', 'b']) + assert_(test is None) + + def test_rename_fields(self): + # Test rename fields + a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))], + dtype=[('a', int), + ('b', [('ba', float), ('bb', (float, 2))])]) + test = rename_fields(a, {'a': 'A', 'bb': 'BB'}) + newdtype = [('A', int), ('b', [('ba', float), ('BB', (float, 2))])] + control = a.view(newdtype) + assert_equal(test.dtype, newdtype) + assert_equal(test, control) + + def test_get_names(self): + # Test get_names + ndtype = np.dtype([('A', '|S3'), ('B', float)]) + test = get_names(ndtype) + assert_equal(test, ('A', 'B')) + + ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])]) + test = get_names(ndtype) + assert_equal(test, ('a', ('b', ('ba', 'bb')))) + + def test_get_names_flat(self): + # Test get_names_flat + ndtype = np.dtype([('A', '|S3'), ('B', float)]) + test = get_names_flat(ndtype) + assert_equal(test, ('A', 'B')) + + ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])]) + test = get_names_flat(ndtype) + assert_equal(test, ('a', 'b', 'ba', 'bb')) + + def test_get_fieldstructure(self): + # Test get_fieldstructure + + # No nested fields + ndtype = np.dtype([('A', '|S3'), ('B', float)]) + test = get_fieldstructure(ndtype) + assert_equal(test, {'A': [], 'B': []}) + + # One 1-nested field + ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])]) + test = get_fieldstructure(ndtype) + assert_equal(test, {'A': [], 'B': [], 'BA': ['B', ], 'BB': ['B']}) + + # One 2-nested fields + ndtype = np.dtype([('A', int), + ('B', [('BA', int), + ('BB', [('BBA', int), ('BBB', int)])])]) + test = get_fieldstructure(ndtype) + control = {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], + 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']} + assert_equal(test, control) + + def test_find_duplicates(self): + # Test find_duplicates + a = ma.array([(2, (2., 'B')), (1, (2., 'B')), (2, (2., 'B')), + (1, (1., 'B')), (2, (2., 'B')), (2, (2., 'C'))], + mask=[(0, (0, 0)), (0, (0, 0)), (0, (0, 0)), + (0, (0, 0)), (1, (0, 0)), (0, (1, 0))], + dtype=[('A', int), ('B', [('BA', float), ('BB', '|S1')])]) + + test = find_duplicates(a, ignoremask=False, return_index=True) + control = [0, 2] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + test = find_duplicates(a, key='A', return_index=True) + control = [0, 1, 2, 3, 5] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + test = find_duplicates(a, key='B', return_index=True) + control = [0, 1, 2, 4] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + test = find_duplicates(a, key='BA', return_index=True) + control = [0, 1, 2, 4] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + test = find_duplicates(a, key='BB', return_index=True) + control = [0, 1, 2, 3, 4] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + def test_find_duplicates_ignoremask(self): + # Test the ignoremask option of find_duplicates + ndtype = [('a', int)] + a = ma.array([1, 1, 1, 2, 2, 3, 3], + mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype) + test = find_duplicates(a, ignoremask=True, return_index=True) + control = [0, 1, 3, 4] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + test = find_duplicates(a, ignoremask=False, return_index=True) + control = [0, 1, 2, 3, 4, 6] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + def test_repack_fields(self): + dt = np.dtype('u1,f4,i8', align=True) + a = np.zeros(2, dtype=dt) + + assert_equal(repack_fields(dt), np.dtype('u1,f4,i8')) + assert_equal(repack_fields(a).itemsize, 13) + assert_equal(repack_fields(repack_fields(dt), align=True), dt) + + # make sure type is preserved + dt = np.dtype((np.record, dt)) + assert_(repack_fields(dt).type is np.record) + + def test_structured_to_unstructured(self): + a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)]) + out = structured_to_unstructured(a) + assert_equal(out, np.zeros((4,5), dtype='f8')) + + b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)], + dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')]) + out = np.mean(structured_to_unstructured(b[['x', 'z']]), axis=-1) + assert_equal(out, np.array([ 3. , 5.5, 9. , 11. ])) + + c = np.arange(20).reshape((4,5)) + out = unstructured_to_structured(c, a.dtype) + want = np.array([( 0, ( 1., 2), [ 3., 4.]), + ( 5, ( 6., 7), [ 8., 9.]), + (10, (11., 12), [13., 14.]), + (15, (16., 17), [18., 19.])], + dtype=[('a', 'i4'), + ('b', [('f0', 'f4'), ('f1', 'u2')]), + ('c', 'f4', (2,))]) + assert_equal(out, want) + + d = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)], + dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')]) + assert_equal(apply_along_fields(np.mean, d), + np.array([ 8.0/3, 16.0/3, 26.0/3, 11. ])) + assert_equal(apply_along_fields(np.mean, d[['x', 'z']]), + np.array([ 3. , 5.5, 9. , 11. ])) + + # check that for uniform field dtypes we get a view, not a copy: + d = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)], + dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'i4')]) + dd = structured_to_unstructured(d) + ddd = unstructured_to_structured(dd, d.dtype) + assert_(dd.base is d) + assert_(ddd.base is d) + + # test that nested fields with identical names don't break anything + point = np.dtype([('x', int), ('y', int)]) + triangle = np.dtype([('a', point), ('b', point), ('c', point)]) + arr = np.zeros(10, triangle) + res = structured_to_unstructured(arr, dtype=int) + assert_equal(res, np.zeros((10, 6), dtype=int)) + + + def test_field_assignment_by_name(self): + a = np.ones(2, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')]) + newdt = [('b', 'f4'), ('c', 'u1')] + + assert_equal(require_fields(a, newdt), np.ones(2, newdt)) + + b = np.array([(1,2), (3,4)], dtype=newdt) + assign_fields_by_name(a, b, zero_unassigned=False) + assert_equal(a, np.array([(1,1,2),(1,3,4)], dtype=a.dtype)) + assign_fields_by_name(a, b) + assert_equal(a, np.array([(0,1,2),(0,3,4)], dtype=a.dtype)) + + # test nested fields + a = np.ones(2, dtype=[('a', [('b', 'f8'), ('c', 'u1')])]) + newdt = [('a', [('c', 'u1')])] + assert_equal(require_fields(a, newdt), np.ones(2, newdt)) + b = np.array([((2,),), ((3,),)], dtype=newdt) + assign_fields_by_name(a, b, zero_unassigned=False) + assert_equal(a, np.array([((1,2),), ((1,3),)], dtype=a.dtype)) + assign_fields_by_name(a, b) + assert_equal(a, np.array([((0,2),), ((0,3),)], dtype=a.dtype)) + + # test unstructured code path for 0d arrays + a, b = np.array(3), np.array(0) + assign_fields_by_name(b, a) + assert_equal(b[()], 3) + + +class TestRecursiveFillFields(object): + # Test recursive_fill_fields. + def test_simple_flexible(self): + # Test recursive_fill_fields on flexible-array + a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)]) + b = np.zeros((3,), dtype=a.dtype) + test = recursive_fill_fields(a, b) + control = np.array([(1, 10.), (2, 20.), (0, 0.)], + dtype=[('A', int), ('B', float)]) + assert_equal(test, control) + + def test_masked_flexible(self): + # Test recursive_fill_fields on masked flexible-array + a = ma.array([(1, 10.), (2, 20.)], mask=[(0, 1), (1, 0)], + dtype=[('A', int), ('B', float)]) + b = ma.zeros((3,), dtype=a.dtype) + test = recursive_fill_fields(a, b) + control = ma.array([(1, 10.), (2, 20.), (0, 0.)], + mask=[(0, 1), (1, 0), (0, 0)], + dtype=[('A', int), ('B', float)]) + assert_equal(test, control) + + +class TestMergeArrays(object): + # Test merge_arrays + + def setup(self): + x = np.array([1, 2, ]) + y = np.array([10, 20, 30]) + z = np.array( + [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) + w = np.array( + [(1, (2, 3.0)), (4, (5, 6.0))], + dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) + self.data = (w, x, y, z) + + def test_solo(self): + # Test merge_arrays on a single array. + (_, x, _, z) = self.data + + test = merge_arrays(x) + control = np.array([(1,), (2,)], dtype=[('f0', int)]) + assert_equal(test, control) + test = merge_arrays((x,)) + assert_equal(test, control) + + test = merge_arrays(z, flatten=False) + assert_equal(test, z) + test = merge_arrays(z, flatten=True) + assert_equal(test, z) + + def test_solo_w_flatten(self): + # Test merge_arrays on a single array w & w/o flattening + w = self.data[0] + test = merge_arrays(w, flatten=False) + assert_equal(test, w) + + test = merge_arrays(w, flatten=True) + control = np.array([(1, 2, 3.0), (4, 5, 6.0)], + dtype=[('a', int), ('ba', float), ('bb', int)]) + assert_equal(test, control) + + def test_standard(self): + # Test standard & standard + # Test merge arrays + (_, x, y, _) = self.data + test = merge_arrays((x, y), usemask=False) + control = np.array([(1, 10), (2, 20), (-1, 30)], + dtype=[('f0', int), ('f1', int)]) + assert_equal(test, control) + + test = merge_arrays((x, y), usemask=True) + control = ma.array([(1, 10), (2, 20), (-1, 30)], + mask=[(0, 0), (0, 0), (1, 0)], + dtype=[('f0', int), ('f1', int)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + def test_flatten(self): + # Test standard & flexible + (_, x, _, z) = self.data + test = merge_arrays((x, z), flatten=True) + control = np.array([(1, 'A', 1.), (2, 'B', 2.)], + dtype=[('f0', int), ('A', '|S3'), ('B', float)]) + assert_equal(test, control) + + test = merge_arrays((x, z), flatten=False) + control = np.array([(1, ('A', 1.)), (2, ('B', 2.))], + dtype=[('f0', int), + ('f1', [('A', '|S3'), ('B', float)])]) + assert_equal(test, control) + + def test_flatten_wflexible(self): + # Test flatten standard & nested + (w, x, _, _) = self.data + test = merge_arrays((x, w), flatten=True) + control = np.array([(1, 1, 2, 3.0), (2, 4, 5, 6.0)], + dtype=[('f0', int), + ('a', int), ('ba', float), ('bb', int)]) + assert_equal(test, control) + + test = merge_arrays((x, w), flatten=False) + controldtype = [('f0', int), + ('f1', [('a', int), + ('b', [('ba', float), ('bb', int)])])] + control = np.array([(1., (1, (2, 3.0))), (2, (4, (5, 6.0)))], + dtype=controldtype) + assert_equal(test, control) + + def test_wmasked_arrays(self): + # Test merge_arrays masked arrays + (_, x, _, _) = self.data + mx = ma.array([1, 2, 3], mask=[1, 0, 0]) + test = merge_arrays((x, mx), usemask=True) + control = ma.array([(1, 1), (2, 2), (-1, 3)], + mask=[(0, 1), (0, 0), (1, 0)], + dtype=[('f0', int), ('f1', int)]) + assert_equal(test, control) + test = merge_arrays((x, mx), usemask=True, asrecarray=True) + assert_equal(test, control) + assert_(isinstance(test, MaskedRecords)) + + def test_w_singlefield(self): + # Test single field + test = merge_arrays((np.array([1, 2]).view([('a', int)]), + np.array([10., 20., 30.])),) + control = ma.array([(1, 10.), (2, 20.), (-1, 30.)], + mask=[(0, 0), (0, 0), (1, 0)], + dtype=[('a', int), ('f1', float)]) + assert_equal(test, control) + + def test_w_shorter_flex(self): + # Test merge_arrays w/ a shorter flexndarray. + z = self.data[-1] + + # Fixme, this test looks incomplete and broken + #test = merge_arrays((z, np.array([10, 20, 30]).view([('C', int)]))) + #control = np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)], + # dtype=[('A', '|S3'), ('B', float), ('C', int)]) + #assert_equal(test, control) + + # Hack to avoid pyflakes warnings about unused variables + merge_arrays((z, np.array([10, 20, 30]).view([('C', int)]))) + np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)], + dtype=[('A', '|S3'), ('B', float), ('C', int)]) + + def test_singlerecord(self): + (_, x, y, z) = self.data + test = merge_arrays((x[0], y[0], z[0]), usemask=False) + control = np.array([(1, 10, ('A', 1))], + dtype=[('f0', int), + ('f1', int), + ('f2', [('A', '|S3'), ('B', float)])]) + assert_equal(test, control) + + +class TestAppendFields(object): + # Test append_fields + + def setup(self): + x = np.array([1, 2, ]) + y = np.array([10, 20, 30]) + z = np.array( + [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) + w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], + dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) + self.data = (w, x, y, z) + + def test_append_single(self): + # Test simple case + (_, x, _, _) = self.data + test = append_fields(x, 'A', data=[10, 20, 30]) + control = ma.array([(1, 10), (2, 20), (-1, 30)], + mask=[(0, 0), (0, 0), (1, 0)], + dtype=[('f0', int), ('A', int)],) + assert_equal(test, control) + + def test_append_double(self): + # Test simple case + (_, x, _, _) = self.data + test = append_fields(x, ('A', 'B'), data=[[10, 20, 30], [100, 200]]) + control = ma.array([(1, 10, 100), (2, 20, 200), (-1, 30, -1)], + mask=[(0, 0, 0), (0, 0, 0), (1, 0, 1)], + dtype=[('f0', int), ('A', int), ('B', int)],) + assert_equal(test, control) + + def test_append_on_flex(self): + # Test append_fields on flexible type arrays + z = self.data[-1] + test = append_fields(z, 'C', data=[10, 20, 30]) + control = ma.array([('A', 1., 10), ('B', 2., 20), (-1, -1., 30)], + mask=[(0, 0, 0), (0, 0, 0), (1, 1, 0)], + dtype=[('A', '|S3'), ('B', float), ('C', int)],) + assert_equal(test, control) + + def test_append_on_nested(self): + # Test append_fields on nested fields + w = self.data[0] + test = append_fields(w, 'C', data=[10, 20, 30]) + control = ma.array([(1, (2, 3.0), 10), + (4, (5, 6.0), 20), + (-1, (-1, -1.), 30)], + mask=[( + 0, (0, 0), 0), (0, (0, 0), 0), (1, (1, 1), 0)], + dtype=[('a', int), + ('b', [('ba', float), ('bb', int)]), + ('C', int)],) + assert_equal(test, control) + + +class TestStackArrays(object): + # Test stack_arrays + def setup(self): + x = np.array([1, 2, ]) + y = np.array([10, 20, 30]) + z = np.array( + [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) + w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], + dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) + self.data = (w, x, y, z) + + def test_solo(self): + # Test stack_arrays on single arrays + (_, x, _, _) = self.data + test = stack_arrays((x,)) + assert_equal(test, x) + assert_(test is x) + + test = stack_arrays(x) + assert_equal(test, x) + assert_(test is x) + + def test_unnamed_fields(self): + # Tests combinations of arrays w/o named fields + (_, x, y, _) = self.data + + test = stack_arrays((x, x), usemask=False) + control = np.array([1, 2, 1, 2]) + assert_equal(test, control) + + test = stack_arrays((x, y), usemask=False) + control = np.array([1, 2, 10, 20, 30]) + assert_equal(test, control) + + test = stack_arrays((y, x), usemask=False) + control = np.array([10, 20, 30, 1, 2]) + assert_equal(test, control) + + def test_unnamed_and_named_fields(self): + # Test combination of arrays w/ & w/o named fields + (_, x, _, z) = self.data + + test = stack_arrays((x, z)) + control = ma.array([(1, -1, -1), (2, -1, -1), + (-1, 'A', 1), (-1, 'B', 2)], + mask=[(0, 1, 1), (0, 1, 1), + (1, 0, 0), (1, 0, 0)], + dtype=[('f0', int), ('A', '|S3'), ('B', float)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + test = stack_arrays((z, x)) + control = ma.array([('A', 1, -1), ('B', 2, -1), + (-1, -1, 1), (-1, -1, 2), ], + mask=[(0, 0, 1), (0, 0, 1), + (1, 1, 0), (1, 1, 0)], + dtype=[('A', '|S3'), ('B', float), ('f2', int)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + test = stack_arrays((z, z, x)) + control = ma.array([('A', 1, -1), ('B', 2, -1), + ('A', 1, -1), ('B', 2, -1), + (-1, -1, 1), (-1, -1, 2), ], + mask=[(0, 0, 1), (0, 0, 1), + (0, 0, 1), (0, 0, 1), + (1, 1, 0), (1, 1, 0)], + dtype=[('A', '|S3'), ('B', float), ('f2', int)]) + assert_equal(test, control) + + def test_matching_named_fields(self): + # Test combination of arrays w/ matching field names + (_, x, _, z) = self.data + zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], + dtype=[('A', '|S3'), ('B', float), ('C', float)]) + test = stack_arrays((z, zz)) + control = ma.array([('A', 1, -1), ('B', 2, -1), + ( + 'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], + dtype=[('A', '|S3'), ('B', float), ('C', float)], + mask=[(0, 0, 1), (0, 0, 1), + (0, 0, 0), (0, 0, 0), (0, 0, 0)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + test = stack_arrays((z, zz, x)) + ndtype = [('A', '|S3'), ('B', float), ('C', float), ('f3', int)] + control = ma.array([('A', 1, -1, -1), ('B', 2, -1, -1), + ('a', 10., 100., -1), ('b', 20., 200., -1), + ('c', 30., 300., -1), + (-1, -1, -1, 1), (-1, -1, -1, 2)], + dtype=ndtype, + mask=[(0, 0, 1, 1), (0, 0, 1, 1), + (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), + (1, 1, 1, 0), (1, 1, 1, 0)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + def test_defaults(self): + # Test defaults: no exception raised if keys of defaults are not fields. + (_, _, _, z) = self.data + zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], + dtype=[('A', '|S3'), ('B', float), ('C', float)]) + defaults = {'A': '???', 'B': -999., 'C': -9999., 'D': -99999.} + test = stack_arrays((z, zz), defaults=defaults) + control = ma.array([('A', 1, -9999.), ('B', 2, -9999.), + ( + 'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], + dtype=[('A', '|S3'), ('B', float), ('C', float)], + mask=[(0, 0, 1), (0, 0, 1), + (0, 0, 0), (0, 0, 0), (0, 0, 0)]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + def test_autoconversion(self): + # Tests autoconversion + adtype = [('A', int), ('B', bool), ('C', float)] + a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype) + bdtype = [('A', int), ('B', float), ('C', float)] + b = ma.array([(4, 5, 6)], dtype=bdtype) + control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)], + dtype=bdtype) + test = stack_arrays((a, b), autoconvert=True) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + with assert_raises(TypeError): + stack_arrays((a, b), autoconvert=False) + + def test_checktitles(self): + # Test using titles in the field names + adtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)] + a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype) + bdtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)] + b = ma.array([(4, 5, 6)], dtype=bdtype) + test = stack_arrays((a, b)) + control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)], + dtype=bdtype) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + def test_subdtype(self): + z = np.array([ + ('A', 1), ('B', 2) + ], dtype=[('A', '|S3'), ('B', float, (1,))]) + zz = np.array([ + ('a', [10.], 100.), ('b', [20.], 200.), ('c', [30.], 300.) + ], dtype=[('A', '|S3'), ('B', float, (1,)), ('C', float)]) + + res = stack_arrays((z, zz)) + expected = ma.array( + data=[ + (b'A', [1.0], 0), + (b'B', [2.0], 0), + (b'a', [10.0], 100.0), + (b'b', [20.0], 200.0), + (b'c', [30.0], 300.0)], + mask=[ + (False, [False], True), + (False, [False], True), + (False, [False], False), + (False, [False], False), + (False, [False], False) + ], + dtype=zz.dtype + ) + assert_equal(res.dtype, expected.dtype) + assert_equal(res, expected) + assert_equal(res.mask, expected.mask) + + +class TestJoinBy(object): + def setup(self): + self.a = np.array(list(zip(np.arange(10), np.arange(50, 60), + np.arange(100, 110))), + dtype=[('a', int), ('b', int), ('c', int)]) + self.b = np.array(list(zip(np.arange(5, 15), np.arange(65, 75), + np.arange(100, 110))), + dtype=[('a', int), ('b', int), ('d', int)]) + + def test_inner_join(self): + # Basic test of join_by + a, b = self.a, self.b + + test = join_by('a', a, b, jointype='inner') + control = np.array([(5, 55, 65, 105, 100), (6, 56, 66, 106, 101), + (7, 57, 67, 107, 102), (8, 58, 68, 108, 103), + (9, 59, 69, 109, 104)], + dtype=[('a', int), ('b1', int), ('b2', int), + ('c', int), ('d', int)]) + assert_equal(test, control) + + def test_join(self): + a, b = self.a, self.b + + # Fixme, this test is broken + #test = join_by(('a', 'b'), a, b) + #control = np.array([(5, 55, 105, 100), (6, 56, 106, 101), + # (7, 57, 107, 102), (8, 58, 108, 103), + # (9, 59, 109, 104)], + # dtype=[('a', int), ('b', int), + # ('c', int), ('d', int)]) + #assert_equal(test, control) + + # Hack to avoid pyflakes unused variable warnings + join_by(('a', 'b'), a, b) + np.array([(5, 55, 105, 100), (6, 56, 106, 101), + (7, 57, 107, 102), (8, 58, 108, 103), + (9, 59, 109, 104)], + dtype=[('a', int), ('b', int), + ('c', int), ('d', int)]) + + def test_join_subdtype(self): + # tests the bug in https://stackoverflow.com/q/44769632/102441 + from numpy.lib import recfunctions as rfn + foo = np.array([(1,)], + dtype=[('key', int)]) + bar = np.array([(1, np.array([1,2,3]))], + dtype=[('key', int), ('value', 'uint16', 3)]) + res = join_by('key', foo, bar) + assert_equal(res, bar.view(ma.MaskedArray)) + + def test_outer_join(self): + a, b = self.a, self.b + + test = join_by(('a', 'b'), a, b, 'outer') + control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1), + (2, 52, 102, -1), (3, 53, 103, -1), + (4, 54, 104, -1), (5, 55, 105, -1), + (5, 65, -1, 100), (6, 56, 106, -1), + (6, 66, -1, 101), (7, 57, 107, -1), + (7, 67, -1, 102), (8, 58, 108, -1), + (8, 68, -1, 103), (9, 59, 109, -1), + (9, 69, -1, 104), (10, 70, -1, 105), + (11, 71, -1, 106), (12, 72, -1, 107), + (13, 73, -1, 108), (14, 74, -1, 109)], + mask=[(0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 1, 0), (0, 0, 0, 1), + (0, 0, 1, 0), (0, 0, 0, 1), + (0, 0, 1, 0), (0, 0, 0, 1), + (0, 0, 1, 0), (0, 0, 0, 1), + (0, 0, 1, 0), (0, 0, 1, 0), + (0, 0, 1, 0), (0, 0, 1, 0), + (0, 0, 1, 0), (0, 0, 1, 0)], + dtype=[('a', int), ('b', int), + ('c', int), ('d', int)]) + assert_equal(test, control) + + def test_leftouter_join(self): + a, b = self.a, self.b + + test = join_by(('a', 'b'), a, b, 'leftouter') + control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1), + (2, 52, 102, -1), (3, 53, 103, -1), + (4, 54, 104, -1), (5, 55, 105, -1), + (6, 56, 106, -1), (7, 57, 107, -1), + (8, 58, 108, -1), (9, 59, 109, -1)], + mask=[(0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1)], + dtype=[('a', int), ('b', int), ('c', int), ('d', int)]) + assert_equal(test, control) + + def test_different_field_order(self): + # gh-8940 + a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')]) + b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')]) + # this should not give a FutureWarning: + j = join_by(['c', 'b'], a, b, jointype='inner', usemask=False) + assert_equal(j.dtype.names, ['b', 'c', 'a1', 'a2']) + + def test_duplicate_keys(self): + a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')]) + b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')]) + assert_raises(ValueError, join_by, ['a', 'b', 'b'], a, b) + + @pytest.mark.xfail(reason="See comment at gh-9343") + def test_same_name_different_dtypes_key(self): + a_dtype = np.dtype([('key', 'S5'), ('value', '= 3: + from io import StringIO + else: + from StringIO import StringIO + + dt = [("a", 'u1', 2), ("b", 'u1', 2)] + x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt) + assert_equal(x, np.array([((0, 1), (2, 3))], dtype=dt)) + + dt = [("a", [("a", 'u1', (1, 3)), ("b", 'u1')])] + x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt) + assert_equal(x, np.array([(((0, 1, 2), 3),)], dtype=dt)) + + dt = [("a", 'u1', (2, 2))] + x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt) + assert_equal(x, np.array([(((0, 1), (2, 3)),)], dtype=dt)) + + dt = [("a", 'u1', (2, 3, 2))] + x = np.loadtxt(StringIO("0 1 2 3 4 5 6 7 8 9 10 11"), dtype=dt) + data = [((((0, 1), (2, 3), (4, 5)), ((6, 7), (8, 9), (10, 11))),)] + assert_equal(x, np.array(data, dtype=dt)) + + def test_nansum_with_boolean(self): + # gh-2978 + a = np.zeros(2, dtype=bool) + try: + np.nansum(a) + except Exception: + raise AssertionError() + + def test_py3_compat(self): + # gh-2561 + # Test if the oldstyle class test is bypassed in python3 + class C(): + """Old-style class in python2, normal class in python3""" + pass + + out = open(os.devnull, 'w') + try: + np.info(C(), output=out) + except AttributeError: + raise AssertionError() + finally: + out.close() diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_regression.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_regression.pyc new file mode 100644 index 0000000..77e7e81 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_regression.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_shape_base.py b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_shape_base.py new file mode 100644 index 0000000..01ea028 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_shape_base.py @@ -0,0 +1,708 @@ +from __future__ import division, absolute_import, print_function + +import numpy as np +import warnings +import functools +import sys +import pytest + +from numpy.lib.shape_base import ( + apply_along_axis, apply_over_axes, array_split, split, hsplit, dsplit, + vsplit, dstack, column_stack, kron, tile, expand_dims, take_along_axis, + put_along_axis + ) +from numpy.testing import ( + assert_, assert_equal, assert_array_equal, assert_raises, assert_warns + ) + + +IS_64BIT = sys.maxsize > 2**32 + + +def _add_keepdims(func): + """ hack in keepdims behavior into a function taking an axis """ + @functools.wraps(func) + def wrapped(a, axis, **kwargs): + res = func(a, axis=axis, **kwargs) + if axis is None: + axis = 0 # res is now a scalar, so we can insert this anywhere + return np.expand_dims(res, axis=axis) + return wrapped + + +class TestTakeAlongAxis(object): + def test_argequivalent(self): + """ Test it translates from arg to """ + from numpy.random import rand + a = rand(3, 4, 5) + + funcs = [ + (np.sort, np.argsort, dict()), + (_add_keepdims(np.min), _add_keepdims(np.argmin), dict()), + (_add_keepdims(np.max), _add_keepdims(np.argmax), dict()), + (np.partition, np.argpartition, dict(kth=2)), + ] + + for func, argfunc, kwargs in funcs: + for axis in list(range(a.ndim)) + [None]: + a_func = func(a, axis=axis, **kwargs) + ai_func = argfunc(a, axis=axis, **kwargs) + assert_equal(a_func, take_along_axis(a, ai_func, axis=axis)) + + def test_invalid(self): + """ Test it errors when indices has too few dimensions """ + a = np.ones((10, 10)) + ai = np.ones((10, 2), dtype=np.intp) + + # sanity check + take_along_axis(a, ai, axis=1) + + # not enough indices + assert_raises(ValueError, take_along_axis, a, np.array(1), axis=1) + # bool arrays not allowed + assert_raises(IndexError, take_along_axis, a, ai.astype(bool), axis=1) + # float arrays not allowed + assert_raises(IndexError, take_along_axis, a, ai.astype(float), axis=1) + # invalid axis + assert_raises(np.AxisError, take_along_axis, a, ai, axis=10) + + def test_empty(self): + """ Test everything is ok with empty results, even with inserted dims """ + a = np.ones((3, 4, 5)) + ai = np.ones((3, 0, 5), dtype=np.intp) + + actual = take_along_axis(a, ai, axis=1) + assert_equal(actual.shape, ai.shape) + + def test_broadcast(self): + """ Test that non-indexing dimensions are broadcast in both directions """ + a = np.ones((3, 4, 1)) + ai = np.ones((1, 2, 5), dtype=np.intp) + actual = take_along_axis(a, ai, axis=1) + assert_equal(actual.shape, (3, 2, 5)) + + +class TestPutAlongAxis(object): + def test_replace_max(self): + a_base = np.array([[10, 30, 20], [60, 40, 50]]) + + for axis in list(range(a_base.ndim)) + [None]: + # we mutate this in the loop + a = a_base.copy() + + # replace the max with a small value + i_max = _add_keepdims(np.argmax)(a, axis=axis) + put_along_axis(a, i_max, -99, axis=axis) + + # find the new minimum, which should max + i_min = _add_keepdims(np.argmin)(a, axis=axis) + + assert_equal(i_min, i_max) + + def test_broadcast(self): + """ Test that non-indexing dimensions are broadcast in both directions """ + a = np.ones((3, 4, 1)) + ai = np.arange(10, dtype=np.intp).reshape((1, 2, 5)) % 4 + put_along_axis(a, ai, 20, axis=1) + assert_equal(take_along_axis(a, ai, axis=1), 20) + + +class TestApplyAlongAxis(object): + def test_simple(self): + a = np.ones((20, 10), 'd') + assert_array_equal( + apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1])) + + def test_simple101(self): + a = np.ones((10, 101), 'd') + assert_array_equal( + apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1])) + + def test_3d(self): + a = np.arange(27).reshape((3, 3, 3)) + assert_array_equal(apply_along_axis(np.sum, 0, a), + [[27, 30, 33], [36, 39, 42], [45, 48, 51]]) + + def test_preserve_subclass(self): + def double(row): + return row * 2 + + class MyNDArray(np.ndarray): + pass + + m = np.array([[0, 1], [2, 3]]).view(MyNDArray) + expected = np.array([[0, 2], [4, 6]]).view(MyNDArray) + + result = apply_along_axis(double, 0, m) + assert_(isinstance(result, MyNDArray)) + assert_array_equal(result, expected) + + result = apply_along_axis(double, 1, m) + assert_(isinstance(result, MyNDArray)) + assert_array_equal(result, expected) + + def test_subclass(self): + class MinimalSubclass(np.ndarray): + data = 1 + + def minimal_function(array): + return array.data + + a = np.zeros((6, 3)).view(MinimalSubclass) + + assert_array_equal( + apply_along_axis(minimal_function, 0, a), np.array([1, 1, 1]) + ) + + def test_scalar_array(self, cls=np.ndarray): + a = np.ones((6, 3)).view(cls) + res = apply_along_axis(np.sum, 0, a) + assert_(isinstance(res, cls)) + assert_array_equal(res, np.array([6, 6, 6]).view(cls)) + + def test_0d_array(self, cls=np.ndarray): + def sum_to_0d(x): + """ Sum x, returning a 0d array of the same class """ + assert_equal(x.ndim, 1) + return np.squeeze(np.sum(x, keepdims=True)) + a = np.ones((6, 3)).view(cls) + res = apply_along_axis(sum_to_0d, 0, a) + assert_(isinstance(res, cls)) + assert_array_equal(res, np.array([6, 6, 6]).view(cls)) + + res = apply_along_axis(sum_to_0d, 1, a) + assert_(isinstance(res, cls)) + assert_array_equal(res, np.array([3, 3, 3, 3, 3, 3]).view(cls)) + + def test_axis_insertion(self, cls=np.ndarray): + def f1to2(x): + """produces an asymmetric non-square matrix from x""" + assert_equal(x.ndim, 1) + return (x[::-1] * x[1:,None]).view(cls) + + a2d = np.arange(6*3).reshape((6, 3)) + + # 2d insertion along first axis + actual = apply_along_axis(f1to2, 0, a2d) + expected = np.stack([ + f1to2(a2d[:,i]) for i in range(a2d.shape[1]) + ], axis=-1).view(cls) + assert_equal(type(actual), type(expected)) + assert_equal(actual, expected) + + # 2d insertion along last axis + actual = apply_along_axis(f1to2, 1, a2d) + expected = np.stack([ + f1to2(a2d[i,:]) for i in range(a2d.shape[0]) + ], axis=0).view(cls) + assert_equal(type(actual), type(expected)) + assert_equal(actual, expected) + + # 3d insertion along middle axis + a3d = np.arange(6*5*3).reshape((6, 5, 3)) + + actual = apply_along_axis(f1to2, 1, a3d) + expected = np.stack([ + np.stack([ + f1to2(a3d[i,:,j]) for i in range(a3d.shape[0]) + ], axis=0) + for j in range(a3d.shape[2]) + ], axis=-1).view(cls) + assert_equal(type(actual), type(expected)) + assert_equal(actual, expected) + + def test_subclass_preservation(self): + class MinimalSubclass(np.ndarray): + pass + self.test_scalar_array(MinimalSubclass) + self.test_0d_array(MinimalSubclass) + self.test_axis_insertion(MinimalSubclass) + + def test_axis_insertion_ma(self): + def f1to2(x): + """produces an asymmetric non-square matrix from x""" + assert_equal(x.ndim, 1) + res = x[::-1] * x[1:,None] + return np.ma.masked_where(res%5==0, res) + a = np.arange(6*3).reshape((6, 3)) + res = apply_along_axis(f1to2, 0, a) + assert_(isinstance(res, np.ma.masked_array)) + assert_equal(res.ndim, 3) + assert_array_equal(res[:,:,0].mask, f1to2(a[:,0]).mask) + assert_array_equal(res[:,:,1].mask, f1to2(a[:,1]).mask) + assert_array_equal(res[:,:,2].mask, f1to2(a[:,2]).mask) + + def test_tuple_func1d(self): + def sample_1d(x): + return x[1], x[0] + res = np.apply_along_axis(sample_1d, 1, np.array([[1, 2], [3, 4]])) + assert_array_equal(res, np.array([[2, 1], [4, 3]])) + + def test_empty(self): + # can't apply_along_axis when there's no chance to call the function + def never_call(x): + assert_(False) # should never be reached + + a = np.empty((0, 0)) + assert_raises(ValueError, np.apply_along_axis, never_call, 0, a) + assert_raises(ValueError, np.apply_along_axis, never_call, 1, a) + + # but it's sometimes ok with some non-zero dimensions + def empty_to_1(x): + assert_(len(x) == 0) + return 1 + + a = np.empty((10, 0)) + actual = np.apply_along_axis(empty_to_1, 1, a) + assert_equal(actual, np.ones(10)) + assert_raises(ValueError, np.apply_along_axis, empty_to_1, 0, a) + + def test_with_iterable_object(self): + # from issue 5248 + d = np.array([ + [{1, 11}, {2, 22}, {3, 33}], + [{4, 44}, {5, 55}, {6, 66}] + ]) + actual = np.apply_along_axis(lambda a: set.union(*a), 0, d) + expected = np.array([{1, 11, 4, 44}, {2, 22, 5, 55}, {3, 33, 6, 66}]) + + assert_equal(actual, expected) + + # issue 8642 - assert_equal doesn't detect this! + for i in np.ndindex(actual.shape): + assert_equal(type(actual[i]), type(expected[i])) + + +class TestApplyOverAxes(object): + def test_simple(self): + a = np.arange(24).reshape(2, 3, 4) + aoa_a = apply_over_axes(np.sum, a, [0, 2]) + assert_array_equal(aoa_a, np.array([[[60], [92], [124]]])) + + +class TestExpandDims(object): + def test_functionality(self): + s = (2, 3, 4, 5) + a = np.empty(s) + for axis in range(-5, 4): + b = expand_dims(a, axis) + assert_(b.shape[axis] == 1) + assert_(np.squeeze(b).shape == s) + + def test_deprecations(self): + # 2017-05-17, 1.13.0 + s = (2, 3, 4, 5) + a = np.empty(s) + with warnings.catch_warnings(): + warnings.simplefilter("always") + assert_warns(DeprecationWarning, expand_dims, a, -6) + assert_warns(DeprecationWarning, expand_dims, a, 5) + + def test_subclasses(self): + a = np.arange(10).reshape((2, 5)) + a = np.ma.array(a, mask=a%3 == 0) + + expanded = np.expand_dims(a, axis=1) + assert_(isinstance(expanded, np.ma.MaskedArray)) + assert_equal(expanded.shape, (2, 1, 5)) + assert_equal(expanded.mask.shape, (2, 1, 5)) + + +class TestArraySplit(object): + def test_integer_0_split(self): + a = np.arange(10) + assert_raises(ValueError, array_split, a, 0) + + def test_integer_split(self): + a = np.arange(10) + res = array_split(a, 1) + desired = [np.arange(10)] + compare_results(res, desired) + + res = array_split(a, 2) + desired = [np.arange(5), np.arange(5, 10)] + compare_results(res, desired) + + res = array_split(a, 3) + desired = [np.arange(4), np.arange(4, 7), np.arange(7, 10)] + compare_results(res, desired) + + res = array_split(a, 4) + desired = [np.arange(3), np.arange(3, 6), np.arange(6, 8), + np.arange(8, 10)] + compare_results(res, desired) + + res = array_split(a, 5) + desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), + np.arange(6, 8), np.arange(8, 10)] + compare_results(res, desired) + + res = array_split(a, 6) + desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), + np.arange(6, 8), np.arange(8, 9), np.arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 7) + desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), + np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), + np.arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 8) + desired = [np.arange(2), np.arange(2, 4), np.arange(4, 5), + np.arange(5, 6), np.arange(6, 7), np.arange(7, 8), + np.arange(8, 9), np.arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 9) + desired = [np.arange(2), np.arange(2, 3), np.arange(3, 4), + np.arange(4, 5), np.arange(5, 6), np.arange(6, 7), + np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 10) + desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3), + np.arange(3, 4), np.arange(4, 5), np.arange(5, 6), + np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), + np.arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 11) + desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3), + np.arange(3, 4), np.arange(4, 5), np.arange(5, 6), + np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), + np.arange(9, 10), np.array([])] + compare_results(res, desired) + + def test_integer_split_2D_rows(self): + a = np.array([np.arange(10), np.arange(10)]) + res = array_split(a, 3, axis=0) + tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]), + np.zeros((0, 10))] + compare_results(res, tgt) + assert_(a.dtype.type is res[-1].dtype.type) + + # Same thing for manual splits: + res = array_split(a, [0, 1, 2], axis=0) + tgt = [np.zeros((0, 10)), np.array([np.arange(10)]), + np.array([np.arange(10)])] + compare_results(res, tgt) + assert_(a.dtype.type is res[-1].dtype.type) + + def test_integer_split_2D_cols(self): + a = np.array([np.arange(10), np.arange(10)]) + res = array_split(a, 3, axis=-1) + desired = [np.array([np.arange(4), np.arange(4)]), + np.array([np.arange(4, 7), np.arange(4, 7)]), + np.array([np.arange(7, 10), np.arange(7, 10)])] + compare_results(res, desired) + + def test_integer_split_2D_default(self): + """ This will fail if we change default axis + """ + a = np.array([np.arange(10), np.arange(10)]) + res = array_split(a, 3) + tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]), + np.zeros((0, 10))] + compare_results(res, tgt) + assert_(a.dtype.type is res[-1].dtype.type) + # perhaps should check higher dimensions + + @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform") + def test_integer_split_2D_rows_greater_max_int32(self): + a = np.broadcast_to([0], (1 << 32, 2)) + res = array_split(a, 4) + chunk = np.broadcast_to([0], (1 << 30, 2)) + tgt = [chunk] * 4 + for i in range(len(tgt)): + assert_equal(res[i].shape, tgt[i].shape) + + def test_index_split_simple(self): + a = np.arange(10) + indices = [1, 5, 7] + res = array_split(a, indices, axis=-1) + desired = [np.arange(0, 1), np.arange(1, 5), np.arange(5, 7), + np.arange(7, 10)] + compare_results(res, desired) + + def test_index_split_low_bound(self): + a = np.arange(10) + indices = [0, 5, 7] + res = array_split(a, indices, axis=-1) + desired = [np.array([]), np.arange(0, 5), np.arange(5, 7), + np.arange(7, 10)] + compare_results(res, desired) + + def test_index_split_high_bound(self): + a = np.arange(10) + indices = [0, 5, 7, 10, 12] + res = array_split(a, indices, axis=-1) + desired = [np.array([]), np.arange(0, 5), np.arange(5, 7), + np.arange(7, 10), np.array([]), np.array([])] + compare_results(res, desired) + + +class TestSplit(object): + # The split function is essentially the same as array_split, + # except that it test if splitting will result in an + # equal split. Only test for this case. + + def test_equal_split(self): + a = np.arange(10) + res = split(a, 2) + desired = [np.arange(5), np.arange(5, 10)] + compare_results(res, desired) + + def test_unequal_split(self): + a = np.arange(10) + assert_raises(ValueError, split, a, 3) + + +class TestColumnStack(object): + def test_non_iterable(self): + assert_raises(TypeError, column_stack, 1) + + def test_1D_arrays(self): + # example from docstring + a = np.array((1, 2, 3)) + b = np.array((2, 3, 4)) + expected = np.array([[1, 2], + [2, 3], + [3, 4]]) + actual = np.column_stack((a, b)) + assert_equal(actual, expected) + + def test_2D_arrays(self): + # same as hstack 2D docstring example + a = np.array([[1], [2], [3]]) + b = np.array([[2], [3], [4]]) + expected = np.array([[1, 2], + [2, 3], + [3, 4]]) + actual = np.column_stack((a, b)) + assert_equal(actual, expected) + + def test_generator(self): + with assert_warns(FutureWarning): + column_stack((np.arange(3) for _ in range(2))) + + +class TestDstack(object): + def test_non_iterable(self): + assert_raises(TypeError, dstack, 1) + + def test_0D_array(self): + a = np.array(1) + b = np.array(2) + res = dstack([a, b]) + desired = np.array([[[1, 2]]]) + assert_array_equal(res, desired) + + def test_1D_array(self): + a = np.array([1]) + b = np.array([2]) + res = dstack([a, b]) + desired = np.array([[[1, 2]]]) + assert_array_equal(res, desired) + + def test_2D_array(self): + a = np.array([[1], [2]]) + b = np.array([[1], [2]]) + res = dstack([a, b]) + desired = np.array([[[1, 1]], [[2, 2, ]]]) + assert_array_equal(res, desired) + + def test_2D_array2(self): + a = np.array([1, 2]) + b = np.array([1, 2]) + res = dstack([a, b]) + desired = np.array([[[1, 1], [2, 2]]]) + assert_array_equal(res, desired) + + def test_generator(self): + with assert_warns(FutureWarning): + dstack((np.arange(3) for _ in range(2))) + + +# array_split has more comprehensive test of splitting. +# only do simple test on hsplit, vsplit, and dsplit +class TestHsplit(object): + """Only testing for integer splits. + + """ + def test_non_iterable(self): + assert_raises(ValueError, hsplit, 1, 1) + + def test_0D_array(self): + a = np.array(1) + try: + hsplit(a, 2) + assert_(0) + except ValueError: + pass + + def test_1D_array(self): + a = np.array([1, 2, 3, 4]) + res = hsplit(a, 2) + desired = [np.array([1, 2]), np.array([3, 4])] + compare_results(res, desired) + + def test_2D_array(self): + a = np.array([[1, 2, 3, 4], + [1, 2, 3, 4]]) + res = hsplit(a, 2) + desired = [np.array([[1, 2], [1, 2]]), np.array([[3, 4], [3, 4]])] + compare_results(res, desired) + + +class TestVsplit(object): + """Only testing for integer splits. + + """ + def test_non_iterable(self): + assert_raises(ValueError, vsplit, 1, 1) + + def test_0D_array(self): + a = np.array(1) + assert_raises(ValueError, vsplit, a, 2) + + def test_1D_array(self): + a = np.array([1, 2, 3, 4]) + try: + vsplit(a, 2) + assert_(0) + except ValueError: + pass + + def test_2D_array(self): + a = np.array([[1, 2, 3, 4], + [1, 2, 3, 4]]) + res = vsplit(a, 2) + desired = [np.array([[1, 2, 3, 4]]), np.array([[1, 2, 3, 4]])] + compare_results(res, desired) + + +class TestDsplit(object): + # Only testing for integer splits. + def test_non_iterable(self): + assert_raises(ValueError, dsplit, 1, 1) + + def test_0D_array(self): + a = np.array(1) + assert_raises(ValueError, dsplit, a, 2) + + def test_1D_array(self): + a = np.array([1, 2, 3, 4]) + assert_raises(ValueError, dsplit, a, 2) + + def test_2D_array(self): + a = np.array([[1, 2, 3, 4], + [1, 2, 3, 4]]) + try: + dsplit(a, 2) + assert_(0) + except ValueError: + pass + + def test_3D_array(self): + a = np.array([[[1, 2, 3, 4], + [1, 2, 3, 4]], + [[1, 2, 3, 4], + [1, 2, 3, 4]]]) + res = dsplit(a, 2) + desired = [np.array([[[1, 2], [1, 2]], [[1, 2], [1, 2]]]), + np.array([[[3, 4], [3, 4]], [[3, 4], [3, 4]]])] + compare_results(res, desired) + + +class TestSqueeze(object): + def test_basic(self): + from numpy.random import rand + + a = rand(20, 10, 10, 1, 1) + b = rand(20, 1, 10, 1, 20) + c = rand(1, 1, 20, 10) + assert_array_equal(np.squeeze(a), np.reshape(a, (20, 10, 10))) + assert_array_equal(np.squeeze(b), np.reshape(b, (20, 10, 20))) + assert_array_equal(np.squeeze(c), np.reshape(c, (20, 10))) + + # Squeezing to 0-dim should still give an ndarray + a = [[[1.5]]] + res = np.squeeze(a) + assert_equal(res, 1.5) + assert_equal(res.ndim, 0) + assert_equal(type(res), np.ndarray) + + +class TestKron(object): + def test_return_type(self): + class myarray(np.ndarray): + __array_priority__ = 0.0 + + a = np.ones([2, 2]) + ma = myarray(a.shape, a.dtype, a.data) + assert_equal(type(kron(a, a)), np.ndarray) + assert_equal(type(kron(ma, ma)), myarray) + assert_equal(type(kron(a, ma)), np.ndarray) + assert_equal(type(kron(ma, a)), myarray) + + +class TestTile(object): + def test_basic(self): + a = np.array([0, 1, 2]) + b = [[1, 2], [3, 4]] + assert_equal(tile(a, 2), [0, 1, 2, 0, 1, 2]) + assert_equal(tile(a, (2, 2)), [[0, 1, 2, 0, 1, 2], [0, 1, 2, 0, 1, 2]]) + assert_equal(tile(a, (1, 2)), [[0, 1, 2, 0, 1, 2]]) + assert_equal(tile(b, 2), [[1, 2, 1, 2], [3, 4, 3, 4]]) + assert_equal(tile(b, (2, 1)), [[1, 2], [3, 4], [1, 2], [3, 4]]) + assert_equal(tile(b, (2, 2)), [[1, 2, 1, 2], [3, 4, 3, 4], + [1, 2, 1, 2], [3, 4, 3, 4]]) + + def test_tile_one_repetition_on_array_gh4679(self): + a = np.arange(5) + b = tile(a, 1) + b += 2 + assert_equal(a, np.arange(5)) + + def test_empty(self): + a = np.array([[[]]]) + b = np.array([[], []]) + c = tile(b, 2).shape + d = tile(a, (3, 2, 5)).shape + assert_equal(c, (2, 0)) + assert_equal(d, (3, 2, 0)) + + def test_kroncompare(self): + from numpy.random import randint + + reps = [(2,), (1, 2), (2, 1), (2, 2), (2, 3, 2), (3, 2)] + shape = [(3,), (2, 3), (3, 4, 3), (3, 2, 3), (4, 3, 2, 4), (2, 2)] + for s in shape: + b = randint(0, 10, size=s) + for r in reps: + a = np.ones(r, b.dtype) + large = tile(b, r) + klarge = kron(a, b) + assert_equal(large, klarge) + + +class TestMayShareMemory(object): + def test_basic(self): + d = np.ones((50, 60)) + d2 = np.ones((30, 60, 6)) + assert_(np.may_share_memory(d, d)) + assert_(np.may_share_memory(d, d[::-1])) + assert_(np.may_share_memory(d, d[::2])) + assert_(np.may_share_memory(d, d[1:, ::-1])) + + assert_(not np.may_share_memory(d[::-1], d2)) + assert_(not np.may_share_memory(d[::2], d2)) + assert_(not np.may_share_memory(d[1:, ::-1], d2)) + assert_(np.may_share_memory(d2[1:, ::-1], d2)) + + +# Utility +def compare_results(res, desired): + for i in range(len(desired)): + assert_array_equal(res[i], desired[i]) diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_shape_base.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_shape_base.pyc new file mode 100644 index 0000000..9626ad0 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_shape_base.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_stride_tricks.py b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_stride_tricks.py new file mode 100644 index 0000000..b2bd7da --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_stride_tricks.py @@ -0,0 +1,445 @@ +from __future__ import division, absolute_import, print_function + +import numpy as np +from numpy.core._rational_tests import rational +from numpy.testing import ( + assert_equal, assert_array_equal, assert_raises, assert_, + assert_raises_regex + ) +from numpy.lib.stride_tricks import ( + as_strided, broadcast_arrays, _broadcast_shape, broadcast_to + ) + +def assert_shapes_correct(input_shapes, expected_shape): + # Broadcast a list of arrays with the given input shapes and check the + # common output shape. + + inarrays = [np.zeros(s) for s in input_shapes] + outarrays = broadcast_arrays(*inarrays) + outshapes = [a.shape for a in outarrays] + expected = [expected_shape] * len(inarrays) + assert_equal(outshapes, expected) + + +def assert_incompatible_shapes_raise(input_shapes): + # Broadcast a list of arrays with the given (incompatible) input shapes + # and check that they raise a ValueError. + + inarrays = [np.zeros(s) for s in input_shapes] + assert_raises(ValueError, broadcast_arrays, *inarrays) + + +def assert_same_as_ufunc(shape0, shape1, transposed=False, flipped=False): + # Broadcast two shapes against each other and check that the data layout + # is the same as if a ufunc did the broadcasting. + + x0 = np.zeros(shape0, dtype=int) + # Note that multiply.reduce's identity element is 1.0, so when shape1==(), + # this gives the desired n==1. + n = int(np.multiply.reduce(shape1)) + x1 = np.arange(n).reshape(shape1) + if transposed: + x0 = x0.T + x1 = x1.T + if flipped: + x0 = x0[::-1] + x1 = x1[::-1] + # Use the add ufunc to do the broadcasting. Since we're adding 0s to x1, the + # result should be exactly the same as the broadcasted view of x1. + y = x0 + x1 + b0, b1 = broadcast_arrays(x0, x1) + assert_array_equal(y, b1) + + +def test_same(): + x = np.arange(10) + y = np.arange(10) + bx, by = broadcast_arrays(x, y) + assert_array_equal(x, bx) + assert_array_equal(y, by) + +def test_broadcast_kwargs(): + # ensure that a TypeError is appropriately raised when + # np.broadcast_arrays() is called with any keyword + # argument other than 'subok' + x = np.arange(10) + y = np.arange(10) + + with assert_raises_regex(TypeError, + r'broadcast_arrays\(\) got an unexpected keyword*'): + broadcast_arrays(x, y, dtype='float64') + + +def test_one_off(): + x = np.array([[1, 2, 3]]) + y = np.array([[1], [2], [3]]) + bx, by = broadcast_arrays(x, y) + bx0 = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]) + by0 = bx0.T + assert_array_equal(bx0, bx) + assert_array_equal(by0, by) + + +def test_same_input_shapes(): + # Check that the final shape is just the input shape. + + data = [ + (), + (1,), + (3,), + (0, 1), + (0, 3), + (1, 0), + (3, 0), + (1, 3), + (3, 1), + (3, 3), + ] + for shape in data: + input_shapes = [shape] + # Single input. + assert_shapes_correct(input_shapes, shape) + # Double input. + input_shapes2 = [shape, shape] + assert_shapes_correct(input_shapes2, shape) + # Triple input. + input_shapes3 = [shape, shape, shape] + assert_shapes_correct(input_shapes3, shape) + + +def test_two_compatible_by_ones_input_shapes(): + # Check that two different input shapes of the same length, but some have + # ones, broadcast to the correct shape. + + data = [ + [[(1,), (3,)], (3,)], + [[(1, 3), (3, 3)], (3, 3)], + [[(3, 1), (3, 3)], (3, 3)], + [[(1, 3), (3, 1)], (3, 3)], + [[(1, 1), (3, 3)], (3, 3)], + [[(1, 1), (1, 3)], (1, 3)], + [[(1, 1), (3, 1)], (3, 1)], + [[(1, 0), (0, 0)], (0, 0)], + [[(0, 1), (0, 0)], (0, 0)], + [[(1, 0), (0, 1)], (0, 0)], + [[(1, 1), (0, 0)], (0, 0)], + [[(1, 1), (1, 0)], (1, 0)], + [[(1, 1), (0, 1)], (0, 1)], + ] + for input_shapes, expected_shape in data: + assert_shapes_correct(input_shapes, expected_shape) + # Reverse the input shapes since broadcasting should be symmetric. + assert_shapes_correct(input_shapes[::-1], expected_shape) + + +def test_two_compatible_by_prepending_ones_input_shapes(): + # Check that two different input shapes (of different lengths) broadcast + # to the correct shape. + + data = [ + [[(), (3,)], (3,)], + [[(3,), (3, 3)], (3, 3)], + [[(3,), (3, 1)], (3, 3)], + [[(1,), (3, 3)], (3, 3)], + [[(), (3, 3)], (3, 3)], + [[(1, 1), (3,)], (1, 3)], + [[(1,), (3, 1)], (3, 1)], + [[(1,), (1, 3)], (1, 3)], + [[(), (1, 3)], (1, 3)], + [[(), (3, 1)], (3, 1)], + [[(), (0,)], (0,)], + [[(0,), (0, 0)], (0, 0)], + [[(0,), (0, 1)], (0, 0)], + [[(1,), (0, 0)], (0, 0)], + [[(), (0, 0)], (0, 0)], + [[(1, 1), (0,)], (1, 0)], + [[(1,), (0, 1)], (0, 1)], + [[(1,), (1, 0)], (1, 0)], + [[(), (1, 0)], (1, 0)], + [[(), (0, 1)], (0, 1)], + ] + for input_shapes, expected_shape in data: + assert_shapes_correct(input_shapes, expected_shape) + # Reverse the input shapes since broadcasting should be symmetric. + assert_shapes_correct(input_shapes[::-1], expected_shape) + + +def test_incompatible_shapes_raise_valueerror(): + # Check that a ValueError is raised for incompatible shapes. + + data = [ + [(3,), (4,)], + [(2, 3), (2,)], + [(3,), (3,), (4,)], + [(1, 3, 4), (2, 3, 3)], + ] + for input_shapes in data: + assert_incompatible_shapes_raise(input_shapes) + # Reverse the input shapes since broadcasting should be symmetric. + assert_incompatible_shapes_raise(input_shapes[::-1]) + + +def test_same_as_ufunc(): + # Check that the data layout is the same as if a ufunc did the operation. + + data = [ + [[(1,), (3,)], (3,)], + [[(1, 3), (3, 3)], (3, 3)], + [[(3, 1), (3, 3)], (3, 3)], + [[(1, 3), (3, 1)], (3, 3)], + [[(1, 1), (3, 3)], (3, 3)], + [[(1, 1), (1, 3)], (1, 3)], + [[(1, 1), (3, 1)], (3, 1)], + [[(1, 0), (0, 0)], (0, 0)], + [[(0, 1), (0, 0)], (0, 0)], + [[(1, 0), (0, 1)], (0, 0)], + [[(1, 1), (0, 0)], (0, 0)], + [[(1, 1), (1, 0)], (1, 0)], + [[(1, 1), (0, 1)], (0, 1)], + [[(), (3,)], (3,)], + [[(3,), (3, 3)], (3, 3)], + [[(3,), (3, 1)], (3, 3)], + [[(1,), (3, 3)], (3, 3)], + [[(), (3, 3)], (3, 3)], + [[(1, 1), (3,)], (1, 3)], + [[(1,), (3, 1)], (3, 1)], + [[(1,), (1, 3)], (1, 3)], + [[(), (1, 3)], (1, 3)], + [[(), (3, 1)], (3, 1)], + [[(), (0,)], (0,)], + [[(0,), (0, 0)], (0, 0)], + [[(0,), (0, 1)], (0, 0)], + [[(1,), (0, 0)], (0, 0)], + [[(), (0, 0)], (0, 0)], + [[(1, 1), (0,)], (1, 0)], + [[(1,), (0, 1)], (0, 1)], + [[(1,), (1, 0)], (1, 0)], + [[(), (1, 0)], (1, 0)], + [[(), (0, 1)], (0, 1)], + ] + for input_shapes, expected_shape in data: + assert_same_as_ufunc(input_shapes[0], input_shapes[1], + "Shapes: %s %s" % (input_shapes[0], input_shapes[1])) + # Reverse the input shapes since broadcasting should be symmetric. + assert_same_as_ufunc(input_shapes[1], input_shapes[0]) + # Try them transposed, too. + assert_same_as_ufunc(input_shapes[0], input_shapes[1], True) + # ... and flipped for non-rank-0 inputs in order to test negative + # strides. + if () not in input_shapes: + assert_same_as_ufunc(input_shapes[0], input_shapes[1], False, True) + assert_same_as_ufunc(input_shapes[0], input_shapes[1], True, True) + + +def test_broadcast_to_succeeds(): + data = [ + [np.array(0), (0,), np.array(0)], + [np.array(0), (1,), np.zeros(1)], + [np.array(0), (3,), np.zeros(3)], + [np.ones(1), (1,), np.ones(1)], + [np.ones(1), (2,), np.ones(2)], + [np.ones(1), (1, 2, 3), np.ones((1, 2, 3))], + [np.arange(3), (3,), np.arange(3)], + [np.arange(3), (1, 3), np.arange(3).reshape(1, -1)], + [np.arange(3), (2, 3), np.array([[0, 1, 2], [0, 1, 2]])], + # test if shape is not a tuple + [np.ones(0), 0, np.ones(0)], + [np.ones(1), 1, np.ones(1)], + [np.ones(1), 2, np.ones(2)], + # these cases with size 0 are strange, but they reproduce the behavior + # of broadcasting with ufuncs (see test_same_as_ufunc above) + [np.ones(1), (0,), np.ones(0)], + [np.ones((1, 2)), (0, 2), np.ones((0, 2))], + [np.ones((2, 1)), (2, 0), np.ones((2, 0))], + ] + for input_array, shape, expected in data: + actual = broadcast_to(input_array, shape) + assert_array_equal(expected, actual) + + +def test_broadcast_to_raises(): + data = [ + [(0,), ()], + [(1,), ()], + [(3,), ()], + [(3,), (1,)], + [(3,), (2,)], + [(3,), (4,)], + [(1, 2), (2, 1)], + [(1, 1), (1,)], + [(1,), -1], + [(1,), (-1,)], + [(1, 2), (-1, 2)], + ] + for orig_shape, target_shape in data: + arr = np.zeros(orig_shape) + assert_raises(ValueError, lambda: broadcast_to(arr, target_shape)) + + +def test_broadcast_shape(): + # broadcast_shape is already exercized indirectly by broadcast_arrays + assert_equal(_broadcast_shape(), ()) + assert_equal(_broadcast_shape([1, 2]), (2,)) + assert_equal(_broadcast_shape(np.ones((1, 1))), (1, 1)) + assert_equal(_broadcast_shape(np.ones((1, 1)), np.ones((3, 4))), (3, 4)) + assert_equal(_broadcast_shape(*([np.ones((1, 2))] * 32)), (1, 2)) + assert_equal(_broadcast_shape(*([np.ones((1, 2))] * 100)), (1, 2)) + + # regression tests for gh-5862 + assert_equal(_broadcast_shape(*([np.ones(2)] * 32 + [1])), (2,)) + bad_args = [np.ones(2)] * 32 + [np.ones(3)] * 32 + assert_raises(ValueError, lambda: _broadcast_shape(*bad_args)) + + +def test_as_strided(): + a = np.array([None]) + a_view = as_strided(a) + expected = np.array([None]) + assert_array_equal(a_view, np.array([None])) + + a = np.array([1, 2, 3, 4]) + a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,)) + expected = np.array([1, 3]) + assert_array_equal(a_view, expected) + + a = np.array([1, 2, 3, 4]) + a_view = as_strided(a, shape=(3, 4), strides=(0, 1 * a.itemsize)) + expected = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) + assert_array_equal(a_view, expected) + + # Regression test for gh-5081 + dt = np.dtype([('num', 'i4'), ('obj', 'O')]) + a = np.empty((4,), dtype=dt) + a['num'] = np.arange(1, 5) + a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize)) + expected_num = [[1, 2, 3, 4]] * 3 + expected_obj = [[None]*4]*3 + assert_equal(a_view.dtype, dt) + assert_array_equal(expected_num, a_view['num']) + assert_array_equal(expected_obj, a_view['obj']) + + # Make sure that void types without fields are kept unchanged + a = np.empty((4,), dtype='V4') + a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize)) + assert_equal(a.dtype, a_view.dtype) + + # Make sure that the only type that could fail is properly handled + dt = np.dtype({'names': [''], 'formats': ['V4']}) + a = np.empty((4,), dtype=dt) + a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize)) + assert_equal(a.dtype, a_view.dtype) + + # Custom dtypes should not be lost (gh-9161) + r = [rational(i) for i in range(4)] + a = np.array(r, dtype=rational) + a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize)) + assert_equal(a.dtype, a_view.dtype) + assert_array_equal([r] * 3, a_view) + +def as_strided_writeable(): + arr = np.ones(10) + view = as_strided(arr, writeable=False) + assert_(not view.flags.writeable) + + # Check that writeable also is fine: + view = as_strided(arr, writeable=True) + assert_(view.flags.writeable) + view[...] = 3 + assert_array_equal(arr, np.full_like(arr, 3)) + + # Test that things do not break down for readonly: + arr.flags.writeable = False + view = as_strided(arr, writeable=False) + view = as_strided(arr, writeable=True) + assert_(not view.flags.writeable) + + +class VerySimpleSubClass(np.ndarray): + def __new__(cls, *args, **kwargs): + kwargs['subok'] = True + return np.array(*args, **kwargs).view(cls) + + +class SimpleSubClass(VerySimpleSubClass): + def __new__(cls, *args, **kwargs): + kwargs['subok'] = True + self = np.array(*args, **kwargs).view(cls) + self.info = 'simple' + return self + + def __array_finalize__(self, obj): + self.info = getattr(obj, 'info', '') + ' finalized' + + +def test_subclasses(): + # test that subclass is preserved only if subok=True + a = VerySimpleSubClass([1, 2, 3, 4]) + assert_(type(a) is VerySimpleSubClass) + a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,)) + assert_(type(a_view) is np.ndarray) + a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,), subok=True) + assert_(type(a_view) is VerySimpleSubClass) + # test that if a subclass has __array_finalize__, it is used + a = SimpleSubClass([1, 2, 3, 4]) + a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,), subok=True) + assert_(type(a_view) is SimpleSubClass) + assert_(a_view.info == 'simple finalized') + + # similar tests for broadcast_arrays + b = np.arange(len(a)).reshape(-1, 1) + a_view, b_view = broadcast_arrays(a, b) + assert_(type(a_view) is np.ndarray) + assert_(type(b_view) is np.ndarray) + assert_(a_view.shape == b_view.shape) + a_view, b_view = broadcast_arrays(a, b, subok=True) + assert_(type(a_view) is SimpleSubClass) + assert_(a_view.info == 'simple finalized') + assert_(type(b_view) is np.ndarray) + assert_(a_view.shape == b_view.shape) + + # and for broadcast_to + shape = (2, 4) + a_view = broadcast_to(a, shape) + assert_(type(a_view) is np.ndarray) + assert_(a_view.shape == shape) + a_view = broadcast_to(a, shape, subok=True) + assert_(type(a_view) is SimpleSubClass) + assert_(a_view.info == 'simple finalized') + assert_(a_view.shape == shape) + + +def test_writeable(): + # broadcast_to should return a readonly array + original = np.array([1, 2, 3]) + result = broadcast_to(original, (2, 3)) + assert_equal(result.flags.writeable, False) + assert_raises(ValueError, result.__setitem__, slice(None), 0) + + # but the result of broadcast_arrays needs to be writeable (for now), to + # preserve backwards compatibility + for results in [broadcast_arrays(original), + broadcast_arrays(0, original)]: + for result in results: + assert_equal(result.flags.writeable, True) + # keep readonly input readonly + original.flags.writeable = False + _, result = broadcast_arrays(0, original) + assert_equal(result.flags.writeable, False) + + # regression test for GH6491 + shape = (2,) + strides = [0] + tricky_array = as_strided(np.array(0), shape, strides) + other = np.zeros((1,)) + first, second = broadcast_arrays(tricky_array, other) + assert_(first.shape == second.shape) + + +def test_reference_types(): + input_array = np.array('a', dtype=object) + expected = np.array(['a'] * 3, dtype=object) + actual = broadcast_to(input_array, (3,)) + assert_array_equal(expected, actual) + + actual, _ = broadcast_arrays(input_array, np.ones(3)) + assert_array_equal(expected, actual) diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_stride_tricks.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_stride_tricks.pyc new file mode 100644 index 0000000..da75182 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_stride_tricks.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_twodim_base.py b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_twodim_base.py new file mode 100644 index 0000000..bf93b4a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_twodim_base.py @@ -0,0 +1,510 @@ +"""Test functions for matrix module + +""" +from __future__ import division, absolute_import, print_function + +from numpy.testing import ( + assert_equal, assert_array_equal, assert_array_max_ulp, + assert_array_almost_equal, assert_raises, + ) + +from numpy import ( + arange, add, fliplr, flipud, zeros, ones, eye, array, diag, histogram2d, + tri, mask_indices, triu_indices, triu_indices_from, tril_indices, + tril_indices_from, vander, + ) + +import numpy as np + + +def get_mat(n): + data = arange(n) + data = add.outer(data, data) + return data + + +class TestEye(object): + def test_basic(self): + assert_equal(eye(4), + array([[1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1]])) + + assert_equal(eye(4, dtype='f'), + array([[1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1]], 'f')) + + assert_equal(eye(3) == 1, + eye(3, dtype=bool)) + + def test_diag(self): + assert_equal(eye(4, k=1), + array([[0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + [0, 0, 0, 0]])) + + assert_equal(eye(4, k=-1), + array([[0, 0, 0, 0], + [1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0]])) + + def test_2d(self): + assert_equal(eye(4, 3), + array([[1, 0, 0], + [0, 1, 0], + [0, 0, 1], + [0, 0, 0]])) + + assert_equal(eye(3, 4), + array([[1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0]])) + + def test_diag2d(self): + assert_equal(eye(3, 4, k=2), + array([[0, 0, 1, 0], + [0, 0, 0, 1], + [0, 0, 0, 0]])) + + assert_equal(eye(4, 3, k=-2), + array([[0, 0, 0], + [0, 0, 0], + [1, 0, 0], + [0, 1, 0]])) + + def test_eye_bounds(self): + assert_equal(eye(2, 2, 1), [[0, 1], [0, 0]]) + assert_equal(eye(2, 2, -1), [[0, 0], [1, 0]]) + assert_equal(eye(2, 2, 2), [[0, 0], [0, 0]]) + assert_equal(eye(2, 2, -2), [[0, 0], [0, 0]]) + assert_equal(eye(3, 2, 2), [[0, 0], [0, 0], [0, 0]]) + assert_equal(eye(3, 2, 1), [[0, 1], [0, 0], [0, 0]]) + assert_equal(eye(3, 2, -1), [[0, 0], [1, 0], [0, 1]]) + assert_equal(eye(3, 2, -2), [[0, 0], [0, 0], [1, 0]]) + assert_equal(eye(3, 2, -3), [[0, 0], [0, 0], [0, 0]]) + + def test_strings(self): + assert_equal(eye(2, 2, dtype='S3'), + [[b'1', b''], [b'', b'1']]) + + def test_bool(self): + assert_equal(eye(2, 2, dtype=bool), [[True, False], [False, True]]) + + def test_order(self): + mat_c = eye(4, 3, k=-1) + mat_f = eye(4, 3, k=-1, order='F') + assert_equal(mat_c, mat_f) + assert mat_c.flags.c_contiguous + assert not mat_c.flags.f_contiguous + assert not mat_f.flags.c_contiguous + assert mat_f.flags.f_contiguous + + +class TestDiag(object): + def test_vector(self): + vals = (100 * arange(5)).astype('l') + b = zeros((5, 5)) + for k in range(5): + b[k, k] = vals[k] + assert_equal(diag(vals), b) + b = zeros((7, 7)) + c = b.copy() + for k in range(5): + b[k, k + 2] = vals[k] + c[k + 2, k] = vals[k] + assert_equal(diag(vals, k=2), b) + assert_equal(diag(vals, k=-2), c) + + def test_matrix(self, vals=None): + if vals is None: + vals = (100 * get_mat(5) + 1).astype('l') + b = zeros((5,)) + for k in range(5): + b[k] = vals[k, k] + assert_equal(diag(vals), b) + b = b * 0 + for k in range(3): + b[k] = vals[k, k + 2] + assert_equal(diag(vals, 2), b[:3]) + for k in range(3): + b[k] = vals[k + 2, k] + assert_equal(diag(vals, -2), b[:3]) + + def test_fortran_order(self): + vals = array((100 * get_mat(5) + 1), order='F', dtype='l') + self.test_matrix(vals) + + def test_diag_bounds(self): + A = [[1, 2], [3, 4], [5, 6]] + assert_equal(diag(A, k=2), []) + assert_equal(diag(A, k=1), [2]) + assert_equal(diag(A, k=0), [1, 4]) + assert_equal(diag(A, k=-1), [3, 6]) + assert_equal(diag(A, k=-2), [5]) + assert_equal(diag(A, k=-3), []) + + def test_failure(self): + assert_raises(ValueError, diag, [[[1]]]) + + +class TestFliplr(object): + def test_basic(self): + assert_raises(ValueError, fliplr, ones(4)) + a = get_mat(4) + b = a[:, ::-1] + assert_equal(fliplr(a), b) + a = [[0, 1, 2], + [3, 4, 5]] + b = [[2, 1, 0], + [5, 4, 3]] + assert_equal(fliplr(a), b) + + +class TestFlipud(object): + def test_basic(self): + a = get_mat(4) + b = a[::-1, :] + assert_equal(flipud(a), b) + a = [[0, 1, 2], + [3, 4, 5]] + b = [[3, 4, 5], + [0, 1, 2]] + assert_equal(flipud(a), b) + + +class TestHistogram2d(object): + def test_simple(self): + x = array( + [0.41702200, 0.72032449, 1.1437481e-4, 0.302332573, 0.146755891]) + y = array( + [0.09233859, 0.18626021, 0.34556073, 0.39676747, 0.53881673]) + xedges = np.linspace(0, 1, 10) + yedges = np.linspace(0, 1, 10) + H = histogram2d(x, y, (xedges, yedges))[0] + answer = array( + [[0, 0, 0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [1, 0, 1, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]]) + assert_array_equal(H.T, answer) + H = histogram2d(x, y, xedges)[0] + assert_array_equal(H.T, answer) + H, xedges, yedges = histogram2d(list(range(10)), list(range(10))) + assert_array_equal(H, eye(10, 10)) + assert_array_equal(xedges, np.linspace(0, 9, 11)) + assert_array_equal(yedges, np.linspace(0, 9, 11)) + + def test_asym(self): + x = array([1, 1, 2, 3, 4, 4, 4, 5]) + y = array([1, 3, 2, 0, 1, 2, 3, 4]) + H, xed, yed = histogram2d( + x, y, (6, 5), range=[[0, 6], [0, 5]], density=True) + answer = array( + [[0., 0, 0, 0, 0], + [0, 1, 0, 1, 0], + [0, 0, 1, 0, 0], + [1, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 1]]) + assert_array_almost_equal(H, answer/8., 3) + assert_array_equal(xed, np.linspace(0, 6, 7)) + assert_array_equal(yed, np.linspace(0, 5, 6)) + + def test_density(self): + x = array([1, 2, 3, 1, 2, 3, 1, 2, 3]) + y = array([1, 1, 1, 2, 2, 2, 3, 3, 3]) + H, xed, yed = histogram2d( + x, y, [[1, 2, 3, 5], [1, 2, 3, 5]], density=True) + answer = array([[1, 1, .5], + [1, 1, .5], + [.5, .5, .25]])/9. + assert_array_almost_equal(H, answer, 3) + + def test_all_outliers(self): + r = np.random.rand(100) + 1. + 1e6 # histogramdd rounds by decimal=6 + H, xed, yed = histogram2d(r, r, (4, 5), range=([0, 1], [0, 1])) + assert_array_equal(H, 0) + + def test_empty(self): + a, edge1, edge2 = histogram2d([], [], bins=([0, 1], [0, 1])) + assert_array_max_ulp(a, array([[0.]])) + + a, edge1, edge2 = histogram2d([], [], bins=4) + assert_array_max_ulp(a, np.zeros((4, 4))) + + def test_binparameter_combination(self): + x = array( + [0, 0.09207008, 0.64575234, 0.12875982, 0.47390599, + 0.59944483, 1]) + y = array( + [0, 0.14344267, 0.48988575, 0.30558665, 0.44700682, + 0.15886423, 1]) + edges = (0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1) + H, xe, ye = histogram2d(x, y, (edges, 4)) + answer = array( + [[2., 0., 0., 0.], + [0., 1., 0., 0.], + [0., 0., 0., 0.], + [0., 0., 0., 0.], + [0., 1., 0., 0.], + [1., 0., 0., 0.], + [0., 1., 0., 0.], + [0., 0., 0., 0.], + [0., 0., 0., 0.], + [0., 0., 0., 1.]]) + assert_array_equal(H, answer) + assert_array_equal(ye, array([0., 0.25, 0.5, 0.75, 1])) + H, xe, ye = histogram2d(x, y, (4, edges)) + answer = array( + [[1., 1., 0., 1., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 1., 0., 0., 0., 0., 0.], + [0., 1., 0., 0., 1., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0., 0., 0., 0., 0., 1.]]) + assert_array_equal(H, answer) + assert_array_equal(xe, array([0., 0.25, 0.5, 0.75, 1])) + + +class TestTri(object): + def test_dtype(self): + out = array([[1, 0, 0], + [1, 1, 0], + [1, 1, 1]]) + assert_array_equal(tri(3), out) + assert_array_equal(tri(3, dtype=bool), out.astype(bool)) + + +def test_tril_triu_ndim2(): + for dtype in np.typecodes['AllFloat'] + np.typecodes['AllInteger']: + a = np.ones((2, 2), dtype=dtype) + b = np.tril(a) + c = np.triu(a) + assert_array_equal(b, [[1, 0], [1, 1]]) + assert_array_equal(c, b.T) + # should return the same dtype as the original array + assert_equal(b.dtype, a.dtype) + assert_equal(c.dtype, a.dtype) + + +def test_tril_triu_ndim3(): + for dtype in np.typecodes['AllFloat'] + np.typecodes['AllInteger']: + a = np.array([ + [[1, 1], [1, 1]], + [[1, 1], [1, 0]], + [[1, 1], [0, 0]], + ], dtype=dtype) + a_tril_desired = np.array([ + [[1, 0], [1, 1]], + [[1, 0], [1, 0]], + [[1, 0], [0, 0]], + ], dtype=dtype) + a_triu_desired = np.array([ + [[1, 1], [0, 1]], + [[1, 1], [0, 0]], + [[1, 1], [0, 0]], + ], dtype=dtype) + a_triu_observed = np.triu(a) + a_tril_observed = np.tril(a) + assert_array_equal(a_triu_observed, a_triu_desired) + assert_array_equal(a_tril_observed, a_tril_desired) + assert_equal(a_triu_observed.dtype, a.dtype) + assert_equal(a_tril_observed.dtype, a.dtype) + + +def test_tril_triu_with_inf(): + # Issue 4859 + arr = np.array([[1, 1, np.inf], + [1, 1, 1], + [np.inf, 1, 1]]) + out_tril = np.array([[1, 0, 0], + [1, 1, 0], + [np.inf, 1, 1]]) + out_triu = out_tril.T + assert_array_equal(np.triu(arr), out_triu) + assert_array_equal(np.tril(arr), out_tril) + + +def test_tril_triu_dtype(): + # Issue 4916 + # tril and triu should return the same dtype as input + for c in np.typecodes['All']: + if c == 'V': + continue + arr = np.zeros((3, 3), dtype=c) + assert_equal(np.triu(arr).dtype, arr.dtype) + assert_equal(np.tril(arr).dtype, arr.dtype) + + # check special cases + arr = np.array([['2001-01-01T12:00', '2002-02-03T13:56'], + ['2004-01-01T12:00', '2003-01-03T13:45']], + dtype='datetime64') + assert_equal(np.triu(arr).dtype, arr.dtype) + assert_equal(np.tril(arr).dtype, arr.dtype) + + arr = np.zeros((3,3), dtype='f4,f4') + assert_equal(np.triu(arr).dtype, arr.dtype) + assert_equal(np.tril(arr).dtype, arr.dtype) + + +def test_mask_indices(): + # simple test without offset + iu = mask_indices(3, np.triu) + a = np.arange(9).reshape(3, 3) + assert_array_equal(a[iu], array([0, 1, 2, 4, 5, 8])) + # Now with an offset + iu1 = mask_indices(3, np.triu, 1) + assert_array_equal(a[iu1], array([1, 2, 5])) + + +def test_tril_indices(): + # indices without and with offset + il1 = tril_indices(4) + il2 = tril_indices(4, k=2) + il3 = tril_indices(4, m=5) + il4 = tril_indices(4, k=2, m=5) + + a = np.array([[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16]]) + b = np.arange(1, 21).reshape(4, 5) + + # indexing: + assert_array_equal(a[il1], + array([1, 5, 6, 9, 10, 11, 13, 14, 15, 16])) + assert_array_equal(b[il3], + array([1, 6, 7, 11, 12, 13, 16, 17, 18, 19])) + + # And for assigning values: + a[il1] = -1 + assert_array_equal(a, + array([[-1, 2, 3, 4], + [-1, -1, 7, 8], + [-1, -1, -1, 12], + [-1, -1, -1, -1]])) + b[il3] = -1 + assert_array_equal(b, + array([[-1, 2, 3, 4, 5], + [-1, -1, 8, 9, 10], + [-1, -1, -1, 14, 15], + [-1, -1, -1, -1, 20]])) + # These cover almost the whole array (two diagonals right of the main one): + a[il2] = -10 + assert_array_equal(a, + array([[-10, -10, -10, 4], + [-10, -10, -10, -10], + [-10, -10, -10, -10], + [-10, -10, -10, -10]])) + b[il4] = -10 + assert_array_equal(b, + array([[-10, -10, -10, 4, 5], + [-10, -10, -10, -10, 10], + [-10, -10, -10, -10, -10], + [-10, -10, -10, -10, -10]])) + + +class TestTriuIndices(object): + def test_triu_indices(self): + iu1 = triu_indices(4) + iu2 = triu_indices(4, k=2) + iu3 = triu_indices(4, m=5) + iu4 = triu_indices(4, k=2, m=5) + + a = np.array([[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16]]) + b = np.arange(1, 21).reshape(4, 5) + + # Both for indexing: + assert_array_equal(a[iu1], + array([1, 2, 3, 4, 6, 7, 8, 11, 12, 16])) + assert_array_equal(b[iu3], + array([1, 2, 3, 4, 5, 7, 8, 9, + 10, 13, 14, 15, 19, 20])) + + # And for assigning values: + a[iu1] = -1 + assert_array_equal(a, + array([[-1, -1, -1, -1], + [5, -1, -1, -1], + [9, 10, -1, -1], + [13, 14, 15, -1]])) + b[iu3] = -1 + assert_array_equal(b, + array([[-1, -1, -1, -1, -1], + [6, -1, -1, -1, -1], + [11, 12, -1, -1, -1], + [16, 17, 18, -1, -1]])) + + # These cover almost the whole array (two diagonals right of the + # main one): + a[iu2] = -10 + assert_array_equal(a, + array([[-1, -1, -10, -10], + [5, -1, -1, -10], + [9, 10, -1, -1], + [13, 14, 15, -1]])) + b[iu4] = -10 + assert_array_equal(b, + array([[-1, -1, -10, -10, -10], + [6, -1, -1, -10, -10], + [11, 12, -1, -1, -10], + [16, 17, 18, -1, -1]])) + + +class TestTrilIndicesFrom(object): + def test_exceptions(self): + assert_raises(ValueError, tril_indices_from, np.ones((2,))) + assert_raises(ValueError, tril_indices_from, np.ones((2, 2, 2))) + # assert_raises(ValueError, tril_indices_from, np.ones((2, 3))) + + +class TestTriuIndicesFrom(object): + def test_exceptions(self): + assert_raises(ValueError, triu_indices_from, np.ones((2,))) + assert_raises(ValueError, triu_indices_from, np.ones((2, 2, 2))) + # assert_raises(ValueError, triu_indices_from, np.ones((2, 3))) + + +class TestVander(object): + def test_basic(self): + c = np.array([0, 1, -2, 3]) + v = vander(c) + powers = np.array([[0, 0, 0, 0, 1], + [1, 1, 1, 1, 1], + [16, -8, 4, -2, 1], + [81, 27, 9, 3, 1]]) + # Check default value of N: + assert_array_equal(v, powers[:, 1:]) + # Check a range of N values, including 0 and 5 (greater than default) + m = powers.shape[1] + for n in range(6): + v = vander(c, N=n) + assert_array_equal(v, powers[:, m-n:m]) + + def test_dtypes(self): + c = array([11, -12, 13], dtype=np.int8) + v = vander(c) + expected = np.array([[121, 11, 1], + [144, -12, 1], + [169, 13, 1]]) + assert_array_equal(v, expected) + + c = array([1.0+1j, 1.0-1j]) + v = vander(c, N=3) + expected = np.array([[2j, 1+1j, 1], + [-2j, 1-1j, 1]]) + # The data is floating point, but the values are small integers, + # so assert_array_equal *should* be safe here (rather than, say, + # assert_array_almost_equal). + assert_array_equal(v, expected) diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_twodim_base.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_twodim_base.pyc new file mode 100644 index 0000000..a8a7939 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_twodim_base.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_type_check.py b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_type_check.py new file mode 100644 index 0000000..2982ca3 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_type_check.py @@ -0,0 +1,442 @@ +from __future__ import division, absolute_import, print_function + +import numpy as np +from numpy.compat import long +from numpy.testing import ( + assert_, assert_equal, assert_array_equal, assert_raises + ) +from numpy.lib.type_check import ( + common_type, mintypecode, isreal, iscomplex, isposinf, isneginf, + nan_to_num, isrealobj, iscomplexobj, asfarray, real_if_close + ) + + +def assert_all(x): + assert_(np.all(x), x) + + +class TestCommonType(object): + def test_basic(self): + ai32 = np.array([[1, 2], [3, 4]], dtype=np.int32) + af16 = np.array([[1, 2], [3, 4]], dtype=np.float16) + af32 = np.array([[1, 2], [3, 4]], dtype=np.float32) + af64 = np.array([[1, 2], [3, 4]], dtype=np.float64) + acs = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.csingle) + acd = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.cdouble) + assert_(common_type(ai32) == np.float64) + assert_(common_type(af16) == np.float16) + assert_(common_type(af32) == np.float32) + assert_(common_type(af64) == np.float64) + assert_(common_type(acs) == np.csingle) + assert_(common_type(acd) == np.cdouble) + + +class TestMintypecode(object): + + def test_default_1(self): + for itype in '1bcsuwil': + assert_equal(mintypecode(itype), 'd') + assert_equal(mintypecode('f'), 'f') + assert_equal(mintypecode('d'), 'd') + assert_equal(mintypecode('F'), 'F') + assert_equal(mintypecode('D'), 'D') + + def test_default_2(self): + for itype in '1bcsuwil': + assert_equal(mintypecode(itype+'f'), 'f') + assert_equal(mintypecode(itype+'d'), 'd') + assert_equal(mintypecode(itype+'F'), 'F') + assert_equal(mintypecode(itype+'D'), 'D') + assert_equal(mintypecode('ff'), 'f') + assert_equal(mintypecode('fd'), 'd') + assert_equal(mintypecode('fF'), 'F') + assert_equal(mintypecode('fD'), 'D') + assert_equal(mintypecode('df'), 'd') + assert_equal(mintypecode('dd'), 'd') + #assert_equal(mintypecode('dF',savespace=1),'F') + assert_equal(mintypecode('dF'), 'D') + assert_equal(mintypecode('dD'), 'D') + assert_equal(mintypecode('Ff'), 'F') + #assert_equal(mintypecode('Fd',savespace=1),'F') + assert_equal(mintypecode('Fd'), 'D') + assert_equal(mintypecode('FF'), 'F') + assert_equal(mintypecode('FD'), 'D') + assert_equal(mintypecode('Df'), 'D') + assert_equal(mintypecode('Dd'), 'D') + assert_equal(mintypecode('DF'), 'D') + assert_equal(mintypecode('DD'), 'D') + + def test_default_3(self): + assert_equal(mintypecode('fdF'), 'D') + #assert_equal(mintypecode('fdF',savespace=1),'F') + assert_equal(mintypecode('fdD'), 'D') + assert_equal(mintypecode('fFD'), 'D') + assert_equal(mintypecode('dFD'), 'D') + + assert_equal(mintypecode('ifd'), 'd') + assert_equal(mintypecode('ifF'), 'F') + assert_equal(mintypecode('ifD'), 'D') + assert_equal(mintypecode('idF'), 'D') + #assert_equal(mintypecode('idF',savespace=1),'F') + assert_equal(mintypecode('idD'), 'D') + + +class TestIsscalar(object): + + def test_basic(self): + assert_(np.isscalar(3)) + assert_(not np.isscalar([3])) + assert_(not np.isscalar((3,))) + assert_(np.isscalar(3j)) + assert_(np.isscalar(long(10))) + assert_(np.isscalar(4.0)) + + +class TestReal(object): + + def test_real(self): + y = np.random.rand(10,) + assert_array_equal(y, np.real(y)) + + y = np.array(1) + out = np.real(y) + assert_array_equal(y, out) + assert_(isinstance(out, np.ndarray)) + + y = 1 + out = np.real(y) + assert_equal(y, out) + assert_(not isinstance(out, np.ndarray)) + + def test_cmplx(self): + y = np.random.rand(10,)+1j*np.random.rand(10,) + assert_array_equal(y.real, np.real(y)) + + y = np.array(1 + 1j) + out = np.real(y) + assert_array_equal(y.real, out) + assert_(isinstance(out, np.ndarray)) + + y = 1 + 1j + out = np.real(y) + assert_equal(1.0, out) + assert_(not isinstance(out, np.ndarray)) + + +class TestImag(object): + + def test_real(self): + y = np.random.rand(10,) + assert_array_equal(0, np.imag(y)) + + y = np.array(1) + out = np.imag(y) + assert_array_equal(0, out) + assert_(isinstance(out, np.ndarray)) + + y = 1 + out = np.imag(y) + assert_equal(0, out) + assert_(not isinstance(out, np.ndarray)) + + def test_cmplx(self): + y = np.random.rand(10,)+1j*np.random.rand(10,) + assert_array_equal(y.imag, np.imag(y)) + + y = np.array(1 + 1j) + out = np.imag(y) + assert_array_equal(y.imag, out) + assert_(isinstance(out, np.ndarray)) + + y = 1 + 1j + out = np.imag(y) + assert_equal(1.0, out) + assert_(not isinstance(out, np.ndarray)) + + +class TestIscomplex(object): + + def test_fail(self): + z = np.array([-1, 0, 1]) + res = iscomplex(z) + assert_(not np.sometrue(res, axis=0)) + + def test_pass(self): + z = np.array([-1j, 1, 0]) + res = iscomplex(z) + assert_array_equal(res, [1, 0, 0]) + + +class TestIsreal(object): + + def test_pass(self): + z = np.array([-1, 0, 1j]) + res = isreal(z) + assert_array_equal(res, [1, 1, 0]) + + def test_fail(self): + z = np.array([-1j, 1, 0]) + res = isreal(z) + assert_array_equal(res, [0, 1, 1]) + + +class TestIscomplexobj(object): + + def test_basic(self): + z = np.array([-1, 0, 1]) + assert_(not iscomplexobj(z)) + z = np.array([-1j, 0, -1]) + assert_(iscomplexobj(z)) + + def test_scalar(self): + assert_(not iscomplexobj(1.0)) + assert_(iscomplexobj(1+0j)) + + def test_list(self): + assert_(iscomplexobj([3, 1+0j, True])) + assert_(not iscomplexobj([3, 1, True])) + + def test_duck(self): + class DummyComplexArray: + @property + def dtype(self): + return np.dtype(complex) + dummy = DummyComplexArray() + assert_(iscomplexobj(dummy)) + + def test_pandas_duck(self): + # This tests a custom np.dtype duck-typed class, such as used by pandas + # (pandas.core.dtypes) + class PdComplex(np.complex128): + pass + class PdDtype(object): + name = 'category' + names = None + type = PdComplex + kind = 'c' + str = ' 1e10) and assert_all(np.isfinite(vals[2])) + assert_equal(type(vals), np.ndarray) + + # perform the same test but in-place + with np.errstate(divide='ignore', invalid='ignore'): + vals = np.array((-1., 0, 1))/0. + result = nan_to_num(vals, copy=False) + + assert_(result is vals) + assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0])) + assert_(vals[1] == 0) + assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2])) + assert_equal(type(vals), np.ndarray) + + def test_array(self): + vals = nan_to_num([1]) + assert_array_equal(vals, np.array([1], int)) + assert_equal(type(vals), np.ndarray) + + def test_integer(self): + vals = nan_to_num(1) + assert_all(vals == 1) + assert_equal(type(vals), np.int_) + + def test_float(self): + vals = nan_to_num(1.0) + assert_all(vals == 1.0) + assert_equal(type(vals), np.float_) + + def test_complex_good(self): + vals = nan_to_num(1+1j) + assert_all(vals == 1+1j) + assert_equal(type(vals), np.complex_) + + def test_complex_bad(self): + with np.errstate(divide='ignore', invalid='ignore'): + v = 1 + 1j + v += np.array(0+1.j)/0. + vals = nan_to_num(v) + # !! This is actually (unexpectedly) zero + assert_all(np.isfinite(vals)) + assert_equal(type(vals), np.complex_) + + def test_complex_bad2(self): + with np.errstate(divide='ignore', invalid='ignore'): + v = 1 + 1j + v += np.array(-1+1.j)/0. + vals = nan_to_num(v) + assert_all(np.isfinite(vals)) + assert_equal(type(vals), np.complex_) + # Fixme + #assert_all(vals.imag > 1e10) and assert_all(np.isfinite(vals)) + # !! This is actually (unexpectedly) positive + # !! inf. Comment out for now, and see if it + # !! changes + #assert_all(vals.real < -1e10) and assert_all(np.isfinite(vals)) + + +class TestRealIfClose(object): + + def test_basic(self): + a = np.random.rand(10) + b = real_if_close(a+1e-15j) + assert_all(isrealobj(b)) + assert_array_equal(a, b) + b = real_if_close(a+1e-7j) + assert_all(iscomplexobj(b)) + b = real_if_close(a+1e-7j, tol=1e-6) + assert_all(isrealobj(b)) + + +class TestArrayConversion(object): + + def test_asfarray(self): + a = asfarray(np.array([1, 2, 3])) + assert_equal(a.__class__, np.ndarray) + assert_(np.issubdtype(a.dtype, np.floating)) + + # previously this would infer dtypes from arrays, unlike every single + # other numpy function + assert_raises(TypeError, + asfarray, np.array([1, 2, 3]), dtype=np.array(1.0)) diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_type_check.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_type_check.pyc new file mode 100644 index 0000000..8e4f55b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_type_check.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_ufunclike.py b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_ufunclike.py new file mode 100644 index 0000000..0f06876 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_ufunclike.py @@ -0,0 +1,106 @@ +from __future__ import division, absolute_import, print_function + +import numpy as np +import numpy.core as nx +import numpy.lib.ufunclike as ufl +from numpy.testing import ( + assert_, assert_equal, assert_array_equal, assert_warns, assert_raises +) + + +class TestUfunclike(object): + + def test_isposinf(self): + a = nx.array([nx.inf, -nx.inf, nx.nan, 0.0, 3.0, -3.0]) + out = nx.zeros(a.shape, bool) + tgt = nx.array([True, False, False, False, False, False]) + + res = ufl.isposinf(a) + assert_equal(res, tgt) + res = ufl.isposinf(a, out) + assert_equal(res, tgt) + assert_equal(out, tgt) + + a = a.astype(np.complex) + with assert_raises(TypeError): + ufl.isposinf(a) + + def test_isneginf(self): + a = nx.array([nx.inf, -nx.inf, nx.nan, 0.0, 3.0, -3.0]) + out = nx.zeros(a.shape, bool) + tgt = nx.array([False, True, False, False, False, False]) + + res = ufl.isneginf(a) + assert_equal(res, tgt) + res = ufl.isneginf(a, out) + assert_equal(res, tgt) + assert_equal(out, tgt) + + a = a.astype(np.complex) + with assert_raises(TypeError): + ufl.isneginf(a) + + def test_fix(self): + a = nx.array([[1.0, 1.1, 1.5, 1.8], [-1.0, -1.1, -1.5, -1.8]]) + out = nx.zeros(a.shape, float) + tgt = nx.array([[1., 1., 1., 1.], [-1., -1., -1., -1.]]) + + res = ufl.fix(a) + assert_equal(res, tgt) + res = ufl.fix(a, out) + assert_equal(res, tgt) + assert_equal(out, tgt) + assert_equal(ufl.fix(3.14), 3) + + def test_fix_with_subclass(self): + class MyArray(nx.ndarray): + def __new__(cls, data, metadata=None): + res = nx.array(data, copy=True).view(cls) + res.metadata = metadata + return res + + def __array_wrap__(self, obj, context=None): + if isinstance(obj, MyArray): + obj.metadata = self.metadata + return obj + + def __array_finalize__(self, obj): + self.metadata = getattr(obj, 'metadata', None) + return self + + a = nx.array([1.1, -1.1]) + m = MyArray(a, metadata='foo') + f = ufl.fix(m) + assert_array_equal(f, nx.array([1, -1])) + assert_(isinstance(f, MyArray)) + assert_equal(f.metadata, 'foo') + + # check 0d arrays don't decay to scalars + m0d = m[0,...] + m0d.metadata = 'bar' + f0d = ufl.fix(m0d) + assert_(isinstance(f0d, MyArray)) + assert_equal(f0d.metadata, 'bar') + + def test_deprecated(self): + # NumPy 1.13.0, 2017-04-26 + assert_warns(DeprecationWarning, ufl.fix, [1, 2], y=nx.empty(2)) + assert_warns(DeprecationWarning, ufl.isposinf, [1, 2], y=nx.empty(2)) + assert_warns(DeprecationWarning, ufl.isneginf, [1, 2], y=nx.empty(2)) + + def test_scalar(self): + x = np.inf + actual = np.isposinf(x) + expected = np.True_ + assert_equal(actual, expected) + assert_equal(type(actual), type(expected)) + + x = -3.4 + actual = np.fix(x) + expected = np.float64(-3.0) + assert_equal(actual, expected) + assert_equal(type(actual), type(expected)) + + out = np.array(0.0) + actual = np.fix(x, out=out) + assert_(actual is out) diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_ufunclike.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_ufunclike.pyc new file mode 100644 index 0000000..d2f98b3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_ufunclike.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_utils.py b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_utils.py new file mode 100644 index 0000000..2723f34 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_utils.py @@ -0,0 +1,91 @@ +from __future__ import division, absolute_import, print_function + +import sys +import pytest + +from numpy.core import arange +from numpy.testing import assert_, assert_equal, assert_raises_regex +from numpy.lib import deprecate +import numpy.lib.utils as utils + +if sys.version_info[0] >= 3: + from io import StringIO +else: + from StringIO import StringIO + + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +def test_lookfor(): + out = StringIO() + utils.lookfor('eigenvalue', module='numpy', output=out, + import_modules=False) + out = out.getvalue() + assert_('numpy.linalg.eig' in out) + + +@deprecate +def old_func(self, x): + return x + + +@deprecate(message="Rather use new_func2") +def old_func2(self, x): + return x + + +def old_func3(self, x): + return x +new_func3 = deprecate(old_func3, old_name="old_func3", new_name="new_func3") + + +def test_deprecate_decorator(): + assert_('deprecated' in old_func.__doc__) + + +def test_deprecate_decorator_message(): + assert_('Rather use new_func2' in old_func2.__doc__) + + +def test_deprecate_fn(): + assert_('old_func3' in new_func3.__doc__) + assert_('new_func3' in new_func3.__doc__) + + +def test_safe_eval_nameconstant(): + # Test if safe_eval supports Python 3.4 _ast.NameConstant + utils.safe_eval('None') + + +class TestByteBounds(object): + + def test_byte_bounds(self): + # pointer difference matches size * itemsize + # due to contiguity + a = arange(12).reshape(3, 4) + low, high = utils.byte_bounds(a) + assert_equal(high - low, a.size * a.itemsize) + + def test_unusual_order_positive_stride(self): + a = arange(12).reshape(3, 4) + b = a.T + low, high = utils.byte_bounds(b) + assert_equal(high - low, b.size * b.itemsize) + + def test_unusual_order_negative_stride(self): + a = arange(12).reshape(3, 4) + b = a.T[::-1] + low, high = utils.byte_bounds(b) + assert_equal(high - low, b.size * b.itemsize) + + def test_strided(self): + a = arange(12) + b = a[::2] + low, high = utils.byte_bounds(b) + # the largest pointer address is lost (even numbers only in the + # stride), and compensate addresses for striding by 2 + assert_equal(high - low, b.size * 2 * b.itemsize - b.itemsize) + + +def test_assert_raises_regex_context_manager(): + with assert_raises_regex(ValueError, 'no deprecation warning'): + raise ValueError('no deprecation warning') diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_utils.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_utils.pyc new file mode 100644 index 0000000..a64be06 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/tests/test_utils.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/twodim_base.py b/project/venv/lib/python2.7/site-packages/numpy/lib/twodim_base.py new file mode 100644 index 0000000..5c840b1 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/twodim_base.py @@ -0,0 +1,1002 @@ +""" Basic functions for manipulating 2d arrays + +""" +from __future__ import division, absolute_import, print_function + +import functools + +from numpy.core.numeric import ( + absolute, asanyarray, arange, zeros, greater_equal, multiply, ones, + asarray, where, int8, int16, int32, int64, empty, promote_types, diagonal, + nonzero + ) +from numpy.core.overrides import set_module +from numpy.core import overrides +from numpy.core import iinfo, transpose + + +__all__ = [ + 'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'tri', 'triu', + 'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices', + 'tril_indices_from', 'triu_indices', 'triu_indices_from', ] + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +i1 = iinfo(int8) +i2 = iinfo(int16) +i4 = iinfo(int32) + + +def _min_int(low, high): + """ get small int that fits the range """ + if high <= i1.max and low >= i1.min: + return int8 + if high <= i2.max and low >= i2.min: + return int16 + if high <= i4.max and low >= i4.min: + return int32 + return int64 + + +def _flip_dispatcher(m): + return (m,) + + +@array_function_dispatch(_flip_dispatcher) +def fliplr(m): + """ + Flip array in the left/right direction. + + Flip the entries in each row in the left/right direction. + Columns are preserved, but appear in a different order than before. + + Parameters + ---------- + m : array_like + Input array, must be at least 2-D. + + Returns + ------- + f : ndarray + A view of `m` with the columns reversed. Since a view + is returned, this operation is :math:`\\mathcal O(1)`. + + See Also + -------- + flipud : Flip array in the up/down direction. + rot90 : Rotate array counterclockwise. + + Notes + ----- + Equivalent to m[:,::-1]. Requires the array to be at least 2-D. + + Examples + -------- + >>> A = np.diag([1.,2.,3.]) + >>> A + array([[ 1., 0., 0.], + [ 0., 2., 0.], + [ 0., 0., 3.]]) + >>> np.fliplr(A) + array([[ 0., 0., 1.], + [ 0., 2., 0.], + [ 3., 0., 0.]]) + + >>> A = np.random.randn(2,3,5) + >>> np.all(np.fliplr(A) == A[:,::-1,...]) + True + + """ + m = asanyarray(m) + if m.ndim < 2: + raise ValueError("Input must be >= 2-d.") + return m[:, ::-1] + + +@array_function_dispatch(_flip_dispatcher) +def flipud(m): + """ + Flip array in the up/down direction. + + Flip the entries in each column in the up/down direction. + Rows are preserved, but appear in a different order than before. + + Parameters + ---------- + m : array_like + Input array. + + Returns + ------- + out : array_like + A view of `m` with the rows reversed. Since a view is + returned, this operation is :math:`\\mathcal O(1)`. + + See Also + -------- + fliplr : Flip array in the left/right direction. + rot90 : Rotate array counterclockwise. + + Notes + ----- + Equivalent to ``m[::-1,...]``. + Does not require the array to be two-dimensional. + + Examples + -------- + >>> A = np.diag([1.0, 2, 3]) + >>> A + array([[ 1., 0., 0.], + [ 0., 2., 0.], + [ 0., 0., 3.]]) + >>> np.flipud(A) + array([[ 0., 0., 3.], + [ 0., 2., 0.], + [ 1., 0., 0.]]) + + >>> A = np.random.randn(2,3,5) + >>> np.all(np.flipud(A) == A[::-1,...]) + True + + >>> np.flipud([1,2]) + array([2, 1]) + + """ + m = asanyarray(m) + if m.ndim < 1: + raise ValueError("Input must be >= 1-d.") + return m[::-1, ...] + + +@set_module('numpy') +def eye(N, M=None, k=0, dtype=float, order='C'): + """ + Return a 2-D array with ones on the diagonal and zeros elsewhere. + + Parameters + ---------- + N : int + Number of rows in the output. + M : int, optional + Number of columns in the output. If None, defaults to `N`. + k : int, optional + Index of the diagonal: 0 (the default) refers to the main diagonal, + a positive value refers to an upper diagonal, and a negative value + to a lower diagonal. + dtype : data-type, optional + Data-type of the returned array. + order : {'C', 'F'}, optional + Whether the output should be stored in row-major (C-style) or + column-major (Fortran-style) order in memory. + + .. versionadded:: 1.14.0 + + Returns + ------- + I : ndarray of shape (N,M) + An array where all elements are equal to zero, except for the `k`-th + diagonal, whose values are equal to one. + + See Also + -------- + identity : (almost) equivalent function + diag : diagonal 2-D array from a 1-D array specified by the user. + + Examples + -------- + >>> np.eye(2, dtype=int) + array([[1, 0], + [0, 1]]) + >>> np.eye(3, k=1) + array([[ 0., 1., 0.], + [ 0., 0., 1.], + [ 0., 0., 0.]]) + + """ + if M is None: + M = N + m = zeros((N, M), dtype=dtype, order=order) + if k >= M: + return m + if k >= 0: + i = k + else: + i = (-k) * M + m[:M-k].flat[i::M+1] = 1 + return m + + +def _diag_dispatcher(v, k=None): + return (v,) + + +@array_function_dispatch(_diag_dispatcher) +def diag(v, k=0): + """ + Extract a diagonal or construct a diagonal array. + + See the more detailed documentation for ``numpy.diagonal`` if you use this + function to extract a diagonal and wish to write to the resulting array; + whether it returns a copy or a view depends on what version of numpy you + are using. + + Parameters + ---------- + v : array_like + If `v` is a 2-D array, return a copy of its `k`-th diagonal. + If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th + diagonal. + k : int, optional + Diagonal in question. The default is 0. Use `k>0` for diagonals + above the main diagonal, and `k<0` for diagonals below the main + diagonal. + + Returns + ------- + out : ndarray + The extracted diagonal or constructed diagonal array. + + See Also + -------- + diagonal : Return specified diagonals. + diagflat : Create a 2-D array with the flattened input as a diagonal. + trace : Sum along diagonals. + triu : Upper triangle of an array. + tril : Lower triangle of an array. + + Examples + -------- + >>> x = np.arange(9).reshape((3,3)) + >>> x + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + + >>> np.diag(x) + array([0, 4, 8]) + >>> np.diag(x, k=1) + array([1, 5]) + >>> np.diag(x, k=-1) + array([3, 7]) + + >>> np.diag(np.diag(x)) + array([[0, 0, 0], + [0, 4, 0], + [0, 0, 8]]) + + """ + v = asanyarray(v) + s = v.shape + if len(s) == 1: + n = s[0]+abs(k) + res = zeros((n, n), v.dtype) + if k >= 0: + i = k + else: + i = (-k) * n + res[:n-k].flat[i::n+1] = v + return res + elif len(s) == 2: + return diagonal(v, k) + else: + raise ValueError("Input must be 1- or 2-d.") + + +@array_function_dispatch(_diag_dispatcher) +def diagflat(v, k=0): + """ + Create a two-dimensional array with the flattened input as a diagonal. + + Parameters + ---------- + v : array_like + Input data, which is flattened and set as the `k`-th + diagonal of the output. + k : int, optional + Diagonal to set; 0, the default, corresponds to the "main" diagonal, + a positive (negative) `k` giving the number of the diagonal above + (below) the main. + + Returns + ------- + out : ndarray + The 2-D output array. + + See Also + -------- + diag : MATLAB work-alike for 1-D and 2-D arrays. + diagonal : Return specified diagonals. + trace : Sum along diagonals. + + Examples + -------- + >>> np.diagflat([[1,2], [3,4]]) + array([[1, 0, 0, 0], + [0, 2, 0, 0], + [0, 0, 3, 0], + [0, 0, 0, 4]]) + + >>> np.diagflat([1,2], 1) + array([[0, 1, 0], + [0, 0, 2], + [0, 0, 0]]) + + """ + try: + wrap = v.__array_wrap__ + except AttributeError: + wrap = None + v = asarray(v).ravel() + s = len(v) + n = s + abs(k) + res = zeros((n, n), v.dtype) + if (k >= 0): + i = arange(0, n-k) + fi = i+k+i*n + else: + i = arange(0, n+k) + fi = i+(i-k)*n + res.flat[fi] = v + if not wrap: + return res + return wrap(res) + + +@set_module('numpy') +def tri(N, M=None, k=0, dtype=float): + """ + An array with ones at and below the given diagonal and zeros elsewhere. + + Parameters + ---------- + N : int + Number of rows in the array. + M : int, optional + Number of columns in the array. + By default, `M` is taken equal to `N`. + k : int, optional + The sub-diagonal at and below which the array is filled. + `k` = 0 is the main diagonal, while `k` < 0 is below it, + and `k` > 0 is above. The default is 0. + dtype : dtype, optional + Data type of the returned array. The default is float. + + Returns + ------- + tri : ndarray of shape (N, M) + Array with its lower triangle filled with ones and zero elsewhere; + in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise. + + Examples + -------- + >>> np.tri(3, 5, 2, dtype=int) + array([[1, 1, 1, 0, 0], + [1, 1, 1, 1, 0], + [1, 1, 1, 1, 1]]) + + >>> np.tri(3, 5, -1) + array([[ 0., 0., 0., 0., 0.], + [ 1., 0., 0., 0., 0.], + [ 1., 1., 0., 0., 0.]]) + + """ + if M is None: + M = N + + m = greater_equal.outer(arange(N, dtype=_min_int(0, N)), + arange(-k, M-k, dtype=_min_int(-k, M - k))) + + # Avoid making a copy if the requested type is already bool + m = m.astype(dtype, copy=False) + + return m + + +def _trilu_dispatcher(m, k=None): + return (m,) + + +@array_function_dispatch(_trilu_dispatcher) +def tril(m, k=0): + """ + Lower triangle of an array. + + Return a copy of an array with elements above the `k`-th diagonal zeroed. + + Parameters + ---------- + m : array_like, shape (M, N) + Input array. + k : int, optional + Diagonal above which to zero elements. `k = 0` (the default) is the + main diagonal, `k < 0` is below it and `k > 0` is above. + + Returns + ------- + tril : ndarray, shape (M, N) + Lower triangle of `m`, of same shape and data-type as `m`. + + See Also + -------- + triu : same thing, only for the upper triangle + + Examples + -------- + >>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) + array([[ 0, 0, 0], + [ 4, 0, 0], + [ 7, 8, 0], + [10, 11, 12]]) + + """ + m = asanyarray(m) + mask = tri(*m.shape[-2:], k=k, dtype=bool) + + return where(mask, m, zeros(1, m.dtype)) + + +@array_function_dispatch(_trilu_dispatcher) +def triu(m, k=0): + """ + Upper triangle of an array. + + Return a copy of a matrix with the elements below the `k`-th diagonal + zeroed. + + Please refer to the documentation for `tril` for further details. + + See Also + -------- + tril : lower triangle of an array + + Examples + -------- + >>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) + array([[ 1, 2, 3], + [ 4, 5, 6], + [ 0, 8, 9], + [ 0, 0, 12]]) + + """ + m = asanyarray(m) + mask = tri(*m.shape[-2:], k=k-1, dtype=bool) + + return where(mask, zeros(1, m.dtype), m) + + +def _vander_dispatcher(x, N=None, increasing=None): + return (x,) + + +# Originally borrowed from John Hunter and matplotlib +@array_function_dispatch(_vander_dispatcher) +def vander(x, N=None, increasing=False): + """ + Generate a Vandermonde matrix. + + The columns of the output matrix are powers of the input vector. The + order of the powers is determined by the `increasing` boolean argument. + Specifically, when `increasing` is False, the `i`-th output column is + the input vector raised element-wise to the power of ``N - i - 1``. Such + a matrix with a geometric progression in each row is named for Alexandre- + Theophile Vandermonde. + + Parameters + ---------- + x : array_like + 1-D input array. + N : int, optional + Number of columns in the output. If `N` is not specified, a square + array is returned (``N = len(x)``). + increasing : bool, optional + Order of the powers of the columns. If True, the powers increase + from left to right, if False (the default) they are reversed. + + .. versionadded:: 1.9.0 + + Returns + ------- + out : ndarray + Vandermonde matrix. If `increasing` is False, the first column is + ``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is + True, the columns are ``x^0, x^1, ..., x^(N-1)``. + + See Also + -------- + polynomial.polynomial.polyvander + + Examples + -------- + >>> x = np.array([1, 2, 3, 5]) + >>> N = 3 + >>> np.vander(x, N) + array([[ 1, 1, 1], + [ 4, 2, 1], + [ 9, 3, 1], + [25, 5, 1]]) + + >>> np.column_stack([x**(N-1-i) for i in range(N)]) + array([[ 1, 1, 1], + [ 4, 2, 1], + [ 9, 3, 1], + [25, 5, 1]]) + + >>> x = np.array([1, 2, 3, 5]) + >>> np.vander(x) + array([[ 1, 1, 1, 1], + [ 8, 4, 2, 1], + [ 27, 9, 3, 1], + [125, 25, 5, 1]]) + >>> np.vander(x, increasing=True) + array([[ 1, 1, 1, 1], + [ 1, 2, 4, 8], + [ 1, 3, 9, 27], + [ 1, 5, 25, 125]]) + + The determinant of a square Vandermonde matrix is the product + of the differences between the values of the input vector: + + >>> np.linalg.det(np.vander(x)) + 48.000000000000043 + >>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1) + 48 + + """ + x = asarray(x) + if x.ndim != 1: + raise ValueError("x must be a one-dimensional array or sequence.") + if N is None: + N = len(x) + + v = empty((len(x), N), dtype=promote_types(x.dtype, int)) + tmp = v[:, ::-1] if not increasing else v + + if N > 0: + tmp[:, 0] = 1 + if N > 1: + tmp[:, 1:] = x[:, None] + multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1) + + return v + + +def _histogram2d_dispatcher(x, y, bins=None, range=None, normed=None, + weights=None, density=None): + return (x, y, bins, weights) + + +@array_function_dispatch(_histogram2d_dispatcher) +def histogram2d(x, y, bins=10, range=None, normed=None, weights=None, + density=None): + """ + Compute the bi-dimensional histogram of two data samples. + + Parameters + ---------- + x : array_like, shape (N,) + An array containing the x coordinates of the points to be + histogrammed. + y : array_like, shape (N,) + An array containing the y coordinates of the points to be + histogrammed. + bins : int or array_like or [int, int] or [array, array], optional + The bin specification: + + * If int, the number of bins for the two dimensions (nx=ny=bins). + * If array_like, the bin edges for the two dimensions + (x_edges=y_edges=bins). + * If [int, int], the number of bins in each dimension + (nx, ny = bins). + * If [array, array], the bin edges in each dimension + (x_edges, y_edges = bins). + * A combination [int, array] or [array, int], where int + is the number of bins and array is the bin edges. + + range : array_like, shape(2,2), optional + The leftmost and rightmost edges of the bins along each dimension + (if not specified explicitly in the `bins` parameters): + ``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range + will be considered outliers and not tallied in the histogram. + density : bool, optional + If False, the default, returns the number of samples in each bin. + If True, returns the probability *density* function at the bin, + ``bin_count / sample_count / bin_area``. + normed : bool, optional + An alias for the density argument that behaves identically. To avoid + confusion with the broken normed argument to `histogram`, `density` + should be preferred. + weights : array_like, shape(N,), optional + An array of values ``w_i`` weighing each sample ``(x_i, y_i)``. + Weights are normalized to 1 if `normed` is True. If `normed` is + False, the values of the returned histogram are equal to the sum of + the weights belonging to the samples falling into each bin. + + Returns + ------- + H : ndarray, shape(nx, ny) + The bi-dimensional histogram of samples `x` and `y`. Values in `x` + are histogrammed along the first dimension and values in `y` are + histogrammed along the second dimension. + xedges : ndarray, shape(nx+1,) + The bin edges along the first dimension. + yedges : ndarray, shape(ny+1,) + The bin edges along the second dimension. + + See Also + -------- + histogram : 1D histogram + histogramdd : Multidimensional histogram + + Notes + ----- + When `normed` is True, then the returned histogram is the sample + density, defined such that the sum over bins of the product + ``bin_value * bin_area`` is 1. + + Please note that the histogram does not follow the Cartesian convention + where `x` values are on the abscissa and `y` values on the ordinate + axis. Rather, `x` is histogrammed along the first dimension of the + array (vertical), and `y` along the second dimension of the array + (horizontal). This ensures compatibility with `histogramdd`. + + Examples + -------- + >>> from matplotlib.image import NonUniformImage + >>> import matplotlib.pyplot as plt + + Construct a 2-D histogram with variable bin width. First define the bin + edges: + + >>> xedges = [0, 1, 3, 5] + >>> yedges = [0, 2, 3, 4, 6] + + Next we create a histogram H with random bin content: + + >>> x = np.random.normal(2, 1, 100) + >>> y = np.random.normal(1, 1, 100) + >>> H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges)) + >>> H = H.T # Let each row list bins with common y range. + + :func:`imshow ` can only display square bins: + + >>> fig = plt.figure(figsize=(7, 3)) + >>> ax = fig.add_subplot(131, title='imshow: square bins') + >>> plt.imshow(H, interpolation='nearest', origin='low', + ... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]]) + + :func:`pcolormesh ` can display actual edges: + + >>> ax = fig.add_subplot(132, title='pcolormesh: actual edges', + ... aspect='equal') + >>> X, Y = np.meshgrid(xedges, yedges) + >>> ax.pcolormesh(X, Y, H) + + :class:`NonUniformImage ` can be used to + display actual bin edges with interpolation: + + >>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated', + ... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]]) + >>> im = NonUniformImage(ax, interpolation='bilinear') + >>> xcenters = (xedges[:-1] + xedges[1:]) / 2 + >>> ycenters = (yedges[:-1] + yedges[1:]) / 2 + >>> im.set_data(xcenters, ycenters, H) + >>> ax.images.append(im) + >>> plt.show() + + """ + from numpy import histogramdd + + try: + N = len(bins) + except TypeError: + N = 1 + + if N != 1 and N != 2: + xedges = yedges = asarray(bins) + bins = [xedges, yedges] + hist, edges = histogramdd([x, y], bins, range, normed, weights, density) + return hist, edges[0], edges[1] + + +@set_module('numpy') +def mask_indices(n, mask_func, k=0): + """ + Return the indices to access (n, n) arrays, given a masking function. + + Assume `mask_func` is a function that, for a square array a of size + ``(n, n)`` with a possible offset argument `k`, when called as + ``mask_func(a, k)`` returns a new array with zeros in certain locations + (functions like `triu` or `tril` do precisely this). Then this function + returns the indices where the non-zero values would be located. + + Parameters + ---------- + n : int + The returned indices will be valid to access arrays of shape (n, n). + mask_func : callable + A function whose call signature is similar to that of `triu`, `tril`. + That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`. + `k` is an optional argument to the function. + k : scalar + An optional argument which is passed through to `mask_func`. Functions + like `triu`, `tril` take a second argument that is interpreted as an + offset. + + Returns + ------- + indices : tuple of arrays. + The `n` arrays of indices corresponding to the locations where + ``mask_func(np.ones((n, n)), k)`` is True. + + See Also + -------- + triu, tril, triu_indices, tril_indices + + Notes + ----- + .. versionadded:: 1.4.0 + + Examples + -------- + These are the indices that would allow you to access the upper triangular + part of any 3x3 array: + + >>> iu = np.mask_indices(3, np.triu) + + For example, if `a` is a 3x3 array: + + >>> a = np.arange(9).reshape(3, 3) + >>> a + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> a[iu] + array([0, 1, 2, 4, 5, 8]) + + An offset can be passed also to the masking function. This gets us the + indices starting on the first diagonal right of the main one: + + >>> iu1 = np.mask_indices(3, np.triu, 1) + + with which we now extract only three elements: + + >>> a[iu1] + array([1, 2, 5]) + + """ + m = ones((n, n), int) + a = mask_func(m, k) + return nonzero(a != 0) + + +@set_module('numpy') +def tril_indices(n, k=0, m=None): + """ + Return the indices for the lower-triangle of an (n, m) array. + + Parameters + ---------- + n : int + The row dimension of the arrays for which the returned + indices will be valid. + k : int, optional + Diagonal offset (see `tril` for details). + m : int, optional + .. versionadded:: 1.9.0 + + The column dimension of the arrays for which the returned + arrays will be valid. + By default `m` is taken equal to `n`. + + + Returns + ------- + inds : tuple of arrays + The indices for the triangle. The returned tuple contains two arrays, + each with the indices along one dimension of the array. + + See also + -------- + triu_indices : similar function, for upper-triangular. + mask_indices : generic function accepting an arbitrary mask function. + tril, triu + + Notes + ----- + .. versionadded:: 1.4.0 + + Examples + -------- + Compute two different sets of indices to access 4x4 arrays, one for the + lower triangular part starting at the main diagonal, and one starting two + diagonals further right: + + >>> il1 = np.tril_indices(4) + >>> il2 = np.tril_indices(4, 2) + + Here is how they can be used with a sample array: + + >>> a = np.arange(16).reshape(4, 4) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + + Both for indexing: + + >>> a[il1] + array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15]) + + And for assigning values: + + >>> a[il1] = -1 + >>> a + array([[-1, 1, 2, 3], + [-1, -1, 6, 7], + [-1, -1, -1, 11], + [-1, -1, -1, -1]]) + + These cover almost the whole array (two diagonals right of the main one): + + >>> a[il2] = -10 + >>> a + array([[-10, -10, -10, 3], + [-10, -10, -10, -10], + [-10, -10, -10, -10], + [-10, -10, -10, -10]]) + + """ + return nonzero(tri(n, m, k=k, dtype=bool)) + + +def _trilu_indices_form_dispatcher(arr, k=None): + return (arr,) + + +@array_function_dispatch(_trilu_indices_form_dispatcher) +def tril_indices_from(arr, k=0): + """ + Return the indices for the lower-triangle of arr. + + See `tril_indices` for full details. + + Parameters + ---------- + arr : array_like + The indices will be valid for square arrays whose dimensions are + the same as arr. + k : int, optional + Diagonal offset (see `tril` for details). + + See Also + -------- + tril_indices, tril + + Notes + ----- + .. versionadded:: 1.4.0 + + """ + if arr.ndim != 2: + raise ValueError("input array must be 2-d") + return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1]) + + +@set_module('numpy') +def triu_indices(n, k=0, m=None): + """ + Return the indices for the upper-triangle of an (n, m) array. + + Parameters + ---------- + n : int + The size of the arrays for which the returned indices will + be valid. + k : int, optional + Diagonal offset (see `triu` for details). + m : int, optional + .. versionadded:: 1.9.0 + + The column dimension of the arrays for which the returned + arrays will be valid. + By default `m` is taken equal to `n`. + + + Returns + ------- + inds : tuple, shape(2) of ndarrays, shape(`n`) + The indices for the triangle. The returned tuple contains two arrays, + each with the indices along one dimension of the array. Can be used + to slice a ndarray of shape(`n`, `n`). + + See also + -------- + tril_indices : similar function, for lower-triangular. + mask_indices : generic function accepting an arbitrary mask function. + triu, tril + + Notes + ----- + .. versionadded:: 1.4.0 + + Examples + -------- + Compute two different sets of indices to access 4x4 arrays, one for the + upper triangular part starting at the main diagonal, and one starting two + diagonals further right: + + >>> iu1 = np.triu_indices(4) + >>> iu2 = np.triu_indices(4, 2) + + Here is how they can be used with a sample array: + + >>> a = np.arange(16).reshape(4, 4) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + + Both for indexing: + + >>> a[iu1] + array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15]) + + And for assigning values: + + >>> a[iu1] = -1 + >>> a + array([[-1, -1, -1, -1], + [ 4, -1, -1, -1], + [ 8, 9, -1, -1], + [12, 13, 14, -1]]) + + These cover only a small part of the whole array (two diagonals right + of the main one): + + >>> a[iu2] = -10 + >>> a + array([[ -1, -1, -10, -10], + [ 4, -1, -1, -10], + [ 8, 9, -1, -1], + [ 12, 13, 14, -1]]) + + """ + return nonzero(~tri(n, m, k=k-1, dtype=bool)) + + +@array_function_dispatch(_trilu_indices_form_dispatcher) +def triu_indices_from(arr, k=0): + """ + Return the indices for the upper-triangle of arr. + + See `triu_indices` for full details. + + Parameters + ---------- + arr : ndarray, shape(N, N) + The indices will be valid for square arrays. + k : int, optional + Diagonal offset (see `triu` for details). + + Returns + ------- + triu_indices_from : tuple, shape(2) of ndarray, shape(N) + Indices for the upper-triangle of `arr`. + + See Also + -------- + triu_indices, triu + + Notes + ----- + .. versionadded:: 1.4.0 + + """ + if arr.ndim != 2: + raise ValueError("input array must be 2-d") + return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1]) diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/twodim_base.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/twodim_base.pyc new file mode 100644 index 0000000..122f1c2 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/twodim_base.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/type_check.py b/project/venv/lib/python2.7/site-packages/numpy/lib/type_check.py new file mode 100644 index 0000000..90b1e9a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/type_check.py @@ -0,0 +1,698 @@ +"""Automatically adapted for numpy Sep 19, 2005 by convertcode.py + +""" +from __future__ import division, absolute_import, print_function +import functools +import warnings + +__all__ = ['iscomplexobj', 'isrealobj', 'imag', 'iscomplex', + 'isreal', 'nan_to_num', 'real', 'real_if_close', + 'typename', 'asfarray', 'mintypecode', 'asscalar', + 'common_type'] + +import numpy.core.numeric as _nx +from numpy.core.numeric import asarray, asanyarray, isnan, zeros +from numpy.core.overrides import set_module +from numpy.core import overrides +from .ufunclike import isneginf, isposinf + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +_typecodes_by_elsize = 'GDFgdfQqLlIiHhBb?' + + +@set_module('numpy') +def mintypecode(typechars, typeset='GDFgdf', default='d'): + """ + Return the character for the minimum-size type to which given types can + be safely cast. + + The returned type character must represent the smallest size dtype such + that an array of the returned type can handle the data from an array of + all types in `typechars` (or if `typechars` is an array, then its + dtype.char). + + Parameters + ---------- + typechars : list of str or array_like + If a list of strings, each string should represent a dtype. + If array_like, the character representation of the array dtype is used. + typeset : str or list of str, optional + The set of characters that the returned character is chosen from. + The default set is 'GDFgdf'. + default : str, optional + The default character, this is returned if none of the characters in + `typechars` matches a character in `typeset`. + + Returns + ------- + typechar : str + The character representing the minimum-size type that was found. + + See Also + -------- + dtype, sctype2char, maximum_sctype + + Examples + -------- + >>> np.mintypecode(['d', 'f', 'S']) + 'd' + >>> x = np.array([1.1, 2-3.j]) + >>> np.mintypecode(x) + 'D' + + >>> np.mintypecode('abceh', default='G') + 'G' + + """ + typecodes = [(isinstance(t, str) and t) or asarray(t).dtype.char + for t in typechars] + intersection = [t for t in typecodes if t in typeset] + if not intersection: + return default + if 'F' in intersection and 'd' in intersection: + return 'D' + l = [(_typecodes_by_elsize.index(t), t) for t in intersection] + l.sort() + return l[0][1] + + +def _asfarray_dispatcher(a, dtype=None): + return (a,) + + +@array_function_dispatch(_asfarray_dispatcher) +def asfarray(a, dtype=_nx.float_): + """ + Return an array converted to a float type. + + Parameters + ---------- + a : array_like + The input array. + dtype : str or dtype object, optional + Float type code to coerce input array `a`. If `dtype` is one of the + 'int' dtypes, it is replaced with float64. + + Returns + ------- + out : ndarray + The input `a` as a float ndarray. + + Examples + -------- + >>> np.asfarray([2, 3]) + array([ 2., 3.]) + >>> np.asfarray([2, 3], dtype='float') + array([ 2., 3.]) + >>> np.asfarray([2, 3], dtype='int8') + array([ 2., 3.]) + + """ + if not _nx.issubdtype(dtype, _nx.inexact): + dtype = _nx.float_ + return asarray(a, dtype=dtype) + + +def _real_dispatcher(val): + return (val,) + + +@array_function_dispatch(_real_dispatcher) +def real(val): + """ + Return the real part of the complex argument. + + Parameters + ---------- + val : array_like + Input array. + + Returns + ------- + out : ndarray or scalar + The real component of the complex argument. If `val` is real, the type + of `val` is used for the output. If `val` has complex elements, the + returned type is float. + + See Also + -------- + real_if_close, imag, angle + + Examples + -------- + >>> a = np.array([1+2j, 3+4j, 5+6j]) + >>> a.real + array([ 1., 3., 5.]) + >>> a.real = 9 + >>> a + array([ 9.+2.j, 9.+4.j, 9.+6.j]) + >>> a.real = np.array([9, 8, 7]) + >>> a + array([ 9.+2.j, 8.+4.j, 7.+6.j]) + >>> np.real(1 + 1j) + 1.0 + + """ + try: + return val.real + except AttributeError: + return asanyarray(val).real + + +def _imag_dispatcher(val): + return (val,) + + +@array_function_dispatch(_imag_dispatcher) +def imag(val): + """ + Return the imaginary part of the complex argument. + + Parameters + ---------- + val : array_like + Input array. + + Returns + ------- + out : ndarray or scalar + The imaginary component of the complex argument. If `val` is real, + the type of `val` is used for the output. If `val` has complex + elements, the returned type is float. + + See Also + -------- + real, angle, real_if_close + + Examples + -------- + >>> a = np.array([1+2j, 3+4j, 5+6j]) + >>> a.imag + array([ 2., 4., 6.]) + >>> a.imag = np.array([8, 10, 12]) + >>> a + array([ 1. +8.j, 3.+10.j, 5.+12.j]) + >>> np.imag(1 + 1j) + 1.0 + + """ + try: + return val.imag + except AttributeError: + return asanyarray(val).imag + + +def _is_type_dispatcher(x): + return (x,) + + +@array_function_dispatch(_is_type_dispatcher) +def iscomplex(x): + """ + Returns a bool array, where True if input element is complex. + + What is tested is whether the input has a non-zero imaginary part, not if + the input type is complex. + + Parameters + ---------- + x : array_like + Input array. + + Returns + ------- + out : ndarray of bools + Output array. + + See Also + -------- + isreal + iscomplexobj : Return True if x is a complex type or an array of complex + numbers. + + Examples + -------- + >>> np.iscomplex([1+1j, 1+0j, 4.5, 3, 2, 2j]) + array([ True, False, False, False, False, True]) + + """ + ax = asanyarray(x) + if issubclass(ax.dtype.type, _nx.complexfloating): + return ax.imag != 0 + res = zeros(ax.shape, bool) + return res[()] # convert to scalar if needed + + +@array_function_dispatch(_is_type_dispatcher) +def isreal(x): + """ + Returns a bool array, where True if input element is real. + + If element has complex type with zero complex part, the return value + for that element is True. + + Parameters + ---------- + x : array_like + Input array. + + Returns + ------- + out : ndarray, bool + Boolean array of same shape as `x`. + + See Also + -------- + iscomplex + isrealobj : Return True if x is not a complex type. + + Examples + -------- + >>> np.isreal([1+1j, 1+0j, 4.5, 3, 2, 2j]) + array([False, True, True, True, True, False]) + + """ + return imag(x) == 0 + + +@array_function_dispatch(_is_type_dispatcher) +def iscomplexobj(x): + """ + Check for a complex type or an array of complex numbers. + + The type of the input is checked, not the value. Even if the input + has an imaginary part equal to zero, `iscomplexobj` evaluates to True. + + Parameters + ---------- + x : any + The input can be of any type and shape. + + Returns + ------- + iscomplexobj : bool + The return value, True if `x` is of a complex type or has at least + one complex element. + + See Also + -------- + isrealobj, iscomplex + + Examples + -------- + >>> np.iscomplexobj(1) + False + >>> np.iscomplexobj(1+0j) + True + >>> np.iscomplexobj([3, 1+0j, True]) + True + + """ + try: + dtype = x.dtype + type_ = dtype.type + except AttributeError: + type_ = asarray(x).dtype.type + return issubclass(type_, _nx.complexfloating) + + +@array_function_dispatch(_is_type_dispatcher) +def isrealobj(x): + """ + Return True if x is a not complex type or an array of complex numbers. + + The type of the input is checked, not the value. So even if the input + has an imaginary part equal to zero, `isrealobj` evaluates to False + if the data type is complex. + + Parameters + ---------- + x : any + The input can be of any type and shape. + + Returns + ------- + y : bool + The return value, False if `x` is of a complex type. + + See Also + -------- + iscomplexobj, isreal + + Examples + -------- + >>> np.isrealobj(1) + True + >>> np.isrealobj(1+0j) + False + >>> np.isrealobj([3, 1+0j, True]) + False + + """ + return not iscomplexobj(x) + +#----------------------------------------------------------------------------- + +def _getmaxmin(t): + from numpy.core import getlimits + f = getlimits.finfo(t) + return f.max, f.min + + +def _nan_to_num_dispatcher(x, copy=None): + return (x,) + + +@array_function_dispatch(_nan_to_num_dispatcher) +def nan_to_num(x, copy=True): + """ + Replace NaN with zero and infinity with large finite numbers. + + If `x` is inexact, NaN is replaced by zero, and infinity and -infinity + replaced by the respectively largest and most negative finite floating + point values representable by ``x.dtype``. + + For complex dtypes, the above is applied to each of the real and + imaginary components of `x` separately. + + If `x` is not inexact, then no replacements are made. + + Parameters + ---------- + x : scalar or array_like + Input data. + copy : bool, optional + Whether to create a copy of `x` (True) or to replace values + in-place (False). The in-place operation only occurs if + casting to an array does not require a copy. + Default is True. + + .. versionadded:: 1.13 + + Returns + ------- + out : ndarray + `x`, with the non-finite values replaced. If `copy` is False, this may + be `x` itself. + + See Also + -------- + isinf : Shows which elements are positive or negative infinity. + isneginf : Shows which elements are negative infinity. + isposinf : Shows which elements are positive infinity. + isnan : Shows which elements are Not a Number (NaN). + isfinite : Shows which elements are finite (not NaN, not infinity) + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). This means that Not a Number is not equivalent to infinity. + + Examples + -------- + >>> np.nan_to_num(np.inf) + 1.7976931348623157e+308 + >>> np.nan_to_num(-np.inf) + -1.7976931348623157e+308 + >>> np.nan_to_num(np.nan) + 0.0 + >>> x = np.array([np.inf, -np.inf, np.nan, -128, 128]) + >>> np.nan_to_num(x) + array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, + -1.28000000e+002, 1.28000000e+002]) + >>> y = np.array([complex(np.inf, np.nan), np.nan, complex(np.nan, np.inf)]) + >>> np.nan_to_num(y) + array([ 1.79769313e+308 +0.00000000e+000j, + 0.00000000e+000 +0.00000000e+000j, + 0.00000000e+000 +1.79769313e+308j]) + """ + x = _nx.array(x, subok=True, copy=copy) + xtype = x.dtype.type + + isscalar = (x.ndim == 0) + + if not issubclass(xtype, _nx.inexact): + return x[()] if isscalar else x + + iscomplex = issubclass(xtype, _nx.complexfloating) + + dest = (x.real, x.imag) if iscomplex else (x,) + maxf, minf = _getmaxmin(x.real.dtype) + for d in dest: + _nx.copyto(d, 0.0, where=isnan(d)) + _nx.copyto(d, maxf, where=isposinf(d)) + _nx.copyto(d, minf, where=isneginf(d)) + return x[()] if isscalar else x + +#----------------------------------------------------------------------------- + +def _real_if_close_dispatcher(a, tol=None): + return (a,) + + +@array_function_dispatch(_real_if_close_dispatcher) +def real_if_close(a, tol=100): + """ + If complex input returns a real array if complex parts are close to zero. + + "Close to zero" is defined as `tol` * (machine epsilon of the type for + `a`). + + Parameters + ---------- + a : array_like + Input array. + tol : float + Tolerance in machine epsilons for the complex part of the elements + in the array. + + Returns + ------- + out : ndarray + If `a` is real, the type of `a` is used for the output. If `a` + has complex elements, the returned type is float. + + See Also + -------- + real, imag, angle + + Notes + ----- + Machine epsilon varies from machine to machine and between data types + but Python floats on most platforms have a machine epsilon equal to + 2.2204460492503131e-16. You can use 'np.finfo(float).eps' to print + out the machine epsilon for floats. + + Examples + -------- + >>> np.finfo(float).eps + 2.2204460492503131e-16 + + >>> np.real_if_close([2.1 + 4e-14j], tol=1000) + array([ 2.1]) + >>> np.real_if_close([2.1 + 4e-13j], tol=1000) + array([ 2.1 +4.00000000e-13j]) + + """ + a = asanyarray(a) + if not issubclass(a.dtype.type, _nx.complexfloating): + return a + if tol > 1: + from numpy.core import getlimits + f = getlimits.finfo(a.dtype.type) + tol = f.eps * tol + if _nx.all(_nx.absolute(a.imag) < tol): + a = a.real + return a + + +def _asscalar_dispatcher(a): + return (a,) + + +@array_function_dispatch(_asscalar_dispatcher) +def asscalar(a): + """ + Convert an array of size 1 to its scalar equivalent. + + .. deprecated:: 1.16 + + Deprecated, use `numpy.ndarray.item()` instead. + + Parameters + ---------- + a : ndarray + Input array of size 1. + + Returns + ------- + out : scalar + Scalar representation of `a`. The output data type is the same type + returned by the input's `item` method. + + Examples + -------- + >>> np.asscalar(np.array([24])) + 24 + + """ + + # 2018-10-10, 1.16 + warnings.warn('np.asscalar(a) is deprecated since NumPy v1.16, use ' + 'a.item() instead', DeprecationWarning, stacklevel=1) + return a.item() + +#----------------------------------------------------------------------------- + +_namefromtype = {'S1': 'character', + '?': 'bool', + 'b': 'signed char', + 'B': 'unsigned char', + 'h': 'short', + 'H': 'unsigned short', + 'i': 'integer', + 'I': 'unsigned integer', + 'l': 'long integer', + 'L': 'unsigned long integer', + 'q': 'long long integer', + 'Q': 'unsigned long long integer', + 'f': 'single precision', + 'd': 'double precision', + 'g': 'long precision', + 'F': 'complex single precision', + 'D': 'complex double precision', + 'G': 'complex long double precision', + 'S': 'string', + 'U': 'unicode', + 'V': 'void', + 'O': 'object' + } + +@set_module('numpy') +def typename(char): + """ + Return a description for the given data type code. + + Parameters + ---------- + char : str + Data type code. + + Returns + ------- + out : str + Description of the input data type code. + + See Also + -------- + dtype, typecodes + + Examples + -------- + >>> typechars = ['S1', '?', 'B', 'D', 'G', 'F', 'I', 'H', 'L', 'O', 'Q', + ... 'S', 'U', 'V', 'b', 'd', 'g', 'f', 'i', 'h', 'l', 'q'] + >>> for typechar in typechars: + ... print(typechar, ' : ', np.typename(typechar)) + ... + S1 : character + ? : bool + B : unsigned char + D : complex double precision + G : complex long double precision + F : complex single precision + I : unsigned integer + H : unsigned short + L : unsigned long integer + O : object + Q : unsigned long long integer + S : string + U : unicode + V : void + b : signed char + d : double precision + g : long precision + f : single precision + i : integer + h : short + l : long integer + q : long long integer + + """ + return _namefromtype[char] + +#----------------------------------------------------------------------------- + +#determine the "minimum common type" for a group of arrays. +array_type = [[_nx.half, _nx.single, _nx.double, _nx.longdouble], + [None, _nx.csingle, _nx.cdouble, _nx.clongdouble]] +array_precision = {_nx.half: 0, + _nx.single: 1, + _nx.double: 2, + _nx.longdouble: 3, + _nx.csingle: 1, + _nx.cdouble: 2, + _nx.clongdouble: 3} + + +def _common_type_dispatcher(*arrays): + return arrays + + +@array_function_dispatch(_common_type_dispatcher) +def common_type(*arrays): + """ + Return a scalar type which is common to the input arrays. + + The return type will always be an inexact (i.e. floating point) scalar + type, even if all the arrays are integer arrays. If one of the inputs is + an integer array, the minimum precision type that is returned is a + 64-bit floating point dtype. + + All input arrays except int64 and uint64 can be safely cast to the + returned dtype without loss of information. + + Parameters + ---------- + array1, array2, ... : ndarrays + Input arrays. + + Returns + ------- + out : data type code + Data type code. + + See Also + -------- + dtype, mintypecode + + Examples + -------- + >>> np.common_type(np.arange(2, dtype=np.float32)) + + >>> np.common_type(np.arange(2, dtype=np.float32), np.arange(2)) + + >>> np.common_type(np.arange(4), np.array([45, 6.j]), np.array([45.0])) + + + """ + is_complex = False + precision = 0 + for a in arrays: + t = a.dtype.type + if iscomplexobj(a): + is_complex = True + if issubclass(t, _nx.integer): + p = 2 # array_precision[_nx.double] + else: + p = array_precision.get(t, None) + if p is None: + raise TypeError("can't get common type for non-numeric array") + precision = max(precision, p) + if is_complex: + return array_type[1][precision] + else: + return array_type[0][precision] diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/type_check.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/type_check.pyc new file mode 100644 index 0000000..02bb23c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/type_check.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/ufunclike.py b/project/venv/lib/python2.7/site-packages/numpy/lib/ufunclike.py new file mode 100644 index 0000000..9a9e6f9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/ufunclike.py @@ -0,0 +1,250 @@ +""" +Module of functions that are like ufuncs in acting on arrays and optionally +storing results in an output array. + +""" +from __future__ import division, absolute_import, print_function + +__all__ = ['fix', 'isneginf', 'isposinf'] + +import numpy.core.numeric as nx +from numpy.core.overrides import array_function_dispatch, ENABLE_ARRAY_FUNCTION +import warnings +import functools + + +def _deprecate_out_named_y(f): + """ + Allow the out argument to be passed as the name `y` (deprecated) + + In future, this decorator should be removed. + """ + @functools.wraps(f) + def func(x, out=None, **kwargs): + if 'y' in kwargs: + if 'out' in kwargs: + raise TypeError( + "{} got multiple values for argument 'out'/'y'" + .format(f.__name__) + ) + out = kwargs.pop('y') + # NumPy 1.13.0, 2017-04-26 + warnings.warn( + "The name of the out argument to {} has changed from `y` to " + "`out`, to match other ufuncs.".format(f.__name__), + DeprecationWarning, stacklevel=3) + return f(x, out=out, **kwargs) + + return func + + +def _fix_out_named_y(f): + """ + Allow the out argument to be passed as the name `y` (deprecated) + + This decorator should only be used if _deprecate_out_named_y is used on + a corresponding dispatcher fucntion. + """ + @functools.wraps(f) + def func(x, out=None, **kwargs): + if 'y' in kwargs: + # we already did error checking in _deprecate_out_named_y + out = kwargs.pop('y') + return f(x, out=out, **kwargs) + + return func + + +if not ENABLE_ARRAY_FUNCTION: + _fix_out_named_y = _deprecate_out_named_y + + +@_deprecate_out_named_y +def _dispatcher(x, out=None): + return (x, out) + + +@array_function_dispatch(_dispatcher, verify=False, module='numpy') +@_fix_out_named_y +def fix(x, out=None): + """ + Round to nearest integer towards zero. + + Round an array of floats element-wise to nearest integer towards zero. + The rounded values are returned as floats. + + Parameters + ---------- + x : array_like + An array of floats to be rounded + y : ndarray, optional + Output array + + Returns + ------- + out : ndarray of floats + The array of rounded numbers + + See Also + -------- + trunc, floor, ceil + around : Round to given number of decimals + + Examples + -------- + >>> np.fix(3.14) + 3.0 + >>> np.fix(3) + 3.0 + >>> np.fix([2.1, 2.9, -2.1, -2.9]) + array([ 2., 2., -2., -2.]) + + """ + # promote back to an array if flattened + res = nx.asanyarray(nx.ceil(x, out=out)) + res = nx.floor(x, out=res, where=nx.greater_equal(x, 0)) + + # when no out argument is passed and no subclasses are involved, flatten + # scalars + if out is None and type(res) is nx.ndarray: + res = res[()] + return res + + +@array_function_dispatch(_dispatcher, verify=False, module='numpy') +@_fix_out_named_y +def isposinf(x, out=None): + """ + Test element-wise for positive infinity, return result as bool array. + + Parameters + ---------- + x : array_like + The input array. + y : array_like, optional + A boolean array with the same shape as `x` to store the result. + + Returns + ------- + out : ndarray + A boolean array with the same dimensions as the input. + If second argument is not supplied then a boolean array is returned + with values True where the corresponding element of the input is + positive infinity and values False where the element of the input is + not positive infinity. + + If a second argument is supplied the result is stored there. If the + type of that array is a numeric type the result is represented as zeros + and ones, if the type is boolean then as False and True. + The return value `out` is then a reference to that array. + + See Also + -------- + isinf, isneginf, isfinite, isnan + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). + + Errors result if the second argument is also supplied when x is a scalar + input, if first and second arguments have different shapes, or if the + first argument has complex values + + Examples + -------- + >>> np.isposinf(np.PINF) + array(True, dtype=bool) + >>> np.isposinf(np.inf) + array(True, dtype=bool) + >>> np.isposinf(np.NINF) + array(False, dtype=bool) + >>> np.isposinf([-np.inf, 0., np.inf]) + array([False, False, True]) + + >>> x = np.array([-np.inf, 0., np.inf]) + >>> y = np.array([2, 2, 2]) + >>> np.isposinf(x, y) + array([0, 0, 1]) + >>> y + array([0, 0, 1]) + + """ + is_inf = nx.isinf(x) + try: + signbit = ~nx.signbit(x) + except TypeError: + raise TypeError('This operation is not supported for complex values ' + 'because it would be ambiguous.') + else: + return nx.logical_and(is_inf, signbit, out) + + +@array_function_dispatch(_dispatcher, verify=False, module='numpy') +@_fix_out_named_y +def isneginf(x, out=None): + """ + Test element-wise for negative infinity, return result as bool array. + + Parameters + ---------- + x : array_like + The input array. + out : array_like, optional + A boolean array with the same shape and type as `x` to store the + result. + + Returns + ------- + out : ndarray + A boolean array with the same dimensions as the input. + If second argument is not supplied then a numpy boolean array is + returned with values True where the corresponding element of the + input is negative infinity and values False where the element of + the input is not negative infinity. + + If a second argument is supplied the result is stored there. If the + type of that array is a numeric type the result is represented as + zeros and ones, if the type is boolean then as False and True. The + return value `out` is then a reference to that array. + + See Also + -------- + isinf, isposinf, isnan, isfinite + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). + + Errors result if the second argument is also supplied when x is a scalar + input, if first and second arguments have different shapes, or if the + first argument has complex values. + + Examples + -------- + >>> np.isneginf(np.NINF) + array(True, dtype=bool) + >>> np.isneginf(np.inf) + array(False, dtype=bool) + >>> np.isneginf(np.PINF) + array(False, dtype=bool) + >>> np.isneginf([-np.inf, 0., np.inf]) + array([ True, False, False]) + + >>> x = np.array([-np.inf, 0., np.inf]) + >>> y = np.array([2, 2, 2]) + >>> np.isneginf(x, y) + array([1, 0, 0]) + >>> y + array([1, 0, 0]) + + """ + is_inf = nx.isinf(x) + try: + signbit = nx.signbit(x) + except TypeError: + raise TypeError('This operation is not supported for complex values ' + 'because it would be ambiguous.') + else: + return nx.logical_and(is_inf, signbit, out) diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/ufunclike.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/ufunclike.pyc new file mode 100644 index 0000000..ec72a2d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/ufunclike.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/user_array.py b/project/venv/lib/python2.7/site-packages/numpy/lib/user_array.py new file mode 100644 index 0000000..f1510a7 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/user_array.py @@ -0,0 +1,288 @@ +""" +Standard container-class for easy multiple-inheritance. + +Try to inherit from the ndarray instead of using this class as this is not +complete. + +""" +from __future__ import division, absolute_import, print_function + +from numpy.core import ( + array, asarray, absolute, add, subtract, multiply, divide, + remainder, power, left_shift, right_shift, bitwise_and, bitwise_or, + bitwise_xor, invert, less, less_equal, not_equal, equal, greater, + greater_equal, shape, reshape, arange, sin, sqrt, transpose +) +from numpy.compat import long + + +class container(object): + """ + container(data, dtype=None, copy=True) + + Standard container-class for easy multiple-inheritance. + + Methods + ------- + copy + tostring + byteswap + astype + + """ + def __init__(self, data, dtype=None, copy=True): + self.array = array(data, dtype, copy=copy) + + def __repr__(self): + if self.ndim > 0: + return self.__class__.__name__ + repr(self.array)[len("array"):] + else: + return self.__class__.__name__ + "(" + repr(self.array) + ")" + + def __array__(self, t=None): + if t: + return self.array.astype(t) + return self.array + + # Array as sequence + def __len__(self): + return len(self.array) + + def __getitem__(self, index): + return self._rc(self.array[index]) + + def __setitem__(self, index, value): + self.array[index] = asarray(value, self.dtype) + + def __abs__(self): + return self._rc(absolute(self.array)) + + def __neg__(self): + return self._rc(-self.array) + + def __add__(self, other): + return self._rc(self.array + asarray(other)) + + __radd__ = __add__ + + def __iadd__(self, other): + add(self.array, other, self.array) + return self + + def __sub__(self, other): + return self._rc(self.array - asarray(other)) + + def __rsub__(self, other): + return self._rc(asarray(other) - self.array) + + def __isub__(self, other): + subtract(self.array, other, self.array) + return self + + def __mul__(self, other): + return self._rc(multiply(self.array, asarray(other))) + + __rmul__ = __mul__ + + def __imul__(self, other): + multiply(self.array, other, self.array) + return self + + def __div__(self, other): + return self._rc(divide(self.array, asarray(other))) + + def __rdiv__(self, other): + return self._rc(divide(asarray(other), self.array)) + + def __idiv__(self, other): + divide(self.array, other, self.array) + return self + + def __mod__(self, other): + return self._rc(remainder(self.array, other)) + + def __rmod__(self, other): + return self._rc(remainder(other, self.array)) + + def __imod__(self, other): + remainder(self.array, other, self.array) + return self + + def __divmod__(self, other): + return (self._rc(divide(self.array, other)), + self._rc(remainder(self.array, other))) + + def __rdivmod__(self, other): + return (self._rc(divide(other, self.array)), + self._rc(remainder(other, self.array))) + + def __pow__(self, other): + return self._rc(power(self.array, asarray(other))) + + def __rpow__(self, other): + return self._rc(power(asarray(other), self.array)) + + def __ipow__(self, other): + power(self.array, other, self.array) + return self + + def __lshift__(self, other): + return self._rc(left_shift(self.array, other)) + + def __rshift__(self, other): + return self._rc(right_shift(self.array, other)) + + def __rlshift__(self, other): + return self._rc(left_shift(other, self.array)) + + def __rrshift__(self, other): + return self._rc(right_shift(other, self.array)) + + def __ilshift__(self, other): + left_shift(self.array, other, self.array) + return self + + def __irshift__(self, other): + right_shift(self.array, other, self.array) + return self + + def __and__(self, other): + return self._rc(bitwise_and(self.array, other)) + + def __rand__(self, other): + return self._rc(bitwise_and(other, self.array)) + + def __iand__(self, other): + bitwise_and(self.array, other, self.array) + return self + + def __xor__(self, other): + return self._rc(bitwise_xor(self.array, other)) + + def __rxor__(self, other): + return self._rc(bitwise_xor(other, self.array)) + + def __ixor__(self, other): + bitwise_xor(self.array, other, self.array) + return self + + def __or__(self, other): + return self._rc(bitwise_or(self.array, other)) + + def __ror__(self, other): + return self._rc(bitwise_or(other, self.array)) + + def __ior__(self, other): + bitwise_or(self.array, other, self.array) + return self + + def __pos__(self): + return self._rc(self.array) + + def __invert__(self): + return self._rc(invert(self.array)) + + def _scalarfunc(self, func): + if self.ndim == 0: + return func(self[0]) + else: + raise TypeError( + "only rank-0 arrays can be converted to Python scalars.") + + def __complex__(self): + return self._scalarfunc(complex) + + def __float__(self): + return self._scalarfunc(float) + + def __int__(self): + return self._scalarfunc(int) + + def __long__(self): + return self._scalarfunc(long) + + def __hex__(self): + return self._scalarfunc(hex) + + def __oct__(self): + return self._scalarfunc(oct) + + def __lt__(self, other): + return self._rc(less(self.array, other)) + + def __le__(self, other): + return self._rc(less_equal(self.array, other)) + + def __eq__(self, other): + return self._rc(equal(self.array, other)) + + def __ne__(self, other): + return self._rc(not_equal(self.array, other)) + + def __gt__(self, other): + return self._rc(greater(self.array, other)) + + def __ge__(self, other): + return self._rc(greater_equal(self.array, other)) + + def copy(self): + "" + return self._rc(self.array.copy()) + + def tostring(self): + "" + return self.array.tostring() + + def byteswap(self): + "" + return self._rc(self.array.byteswap()) + + def astype(self, typecode): + "" + return self._rc(self.array.astype(typecode)) + + def _rc(self, a): + if len(shape(a)) == 0: + return a + else: + return self.__class__(a) + + def __array_wrap__(self, *args): + return self.__class__(args[0]) + + def __setattr__(self, attr, value): + if attr == 'array': + object.__setattr__(self, attr, value) + return + try: + self.array.__setattr__(attr, value) + except AttributeError: + object.__setattr__(self, attr, value) + + # Only called after other approaches fail. + def __getattr__(self, attr): + if (attr == 'array'): + return object.__getattribute__(self, attr) + return self.array.__getattribute__(attr) + +############################################################# +# Test of class container +############################################################# +if __name__ == '__main__': + temp = reshape(arange(10000), (100, 100)) + + ua = container(temp) + # new object created begin test + print(dir(ua)) + print(shape(ua), ua.shape) # I have changed Numeric.py + + ua_small = ua[:3, :5] + print(ua_small) + # this did not change ua[0,0], which is not normal behavior + ua_small[0, 0] = 10 + print(ua_small[0, 0], ua[0, 0]) + print(sin(ua_small) / 3. * 6. + sqrt(ua_small ** 2)) + print(less(ua_small, 103), type(less(ua_small, 103))) + print(type(ua_small * reshape(arange(15), shape(ua_small)))) + print(reshape(ua_small, (5, 3))) + print(transpose(ua_small)) diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/user_array.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/user_array.pyc new file mode 100644 index 0000000..adc1e39 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/user_array.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/utils.py b/project/venv/lib/python2.7/site-packages/numpy/lib/utils.py new file mode 100644 index 0000000..84edf40 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/lib/utils.py @@ -0,0 +1,1159 @@ +from __future__ import division, absolute_import, print_function + +import os +import sys +import types +import re +import warnings + +from numpy.core.numerictypes import issubclass_, issubsctype, issubdtype +from numpy.core.overrides import set_module +from numpy.core import ndarray, ufunc, asarray +import numpy as np + +# getargspec and formatargspec were removed in Python 3.6 +from numpy.compat import getargspec, formatargspec + +__all__ = [ + 'issubclass_', 'issubsctype', 'issubdtype', 'deprecate', + 'deprecate_with_doc', 'get_include', 'info', 'source', 'who', + 'lookfor', 'byte_bounds', 'safe_eval' + ] + +def get_include(): + """ + Return the directory that contains the NumPy \\*.h header files. + + Extension modules that need to compile against NumPy should use this + function to locate the appropriate include directory. + + Notes + ----- + When using ``distutils``, for example in ``setup.py``. + :: + + import numpy as np + ... + Extension('extension_name', ... + include_dirs=[np.get_include()]) + ... + + """ + import numpy + if numpy.show_config is None: + # running from numpy source directory + d = os.path.join(os.path.dirname(numpy.__file__), 'core', 'include') + else: + # using installed numpy core headers + import numpy.core as core + d = os.path.join(os.path.dirname(core.__file__), 'include') + return d + + +def _set_function_name(func, name): + func.__name__ = name + return func + + +class _Deprecate(object): + """ + Decorator class to deprecate old functions. + + Refer to `deprecate` for details. + + See Also + -------- + deprecate + + """ + + def __init__(self, old_name=None, new_name=None, message=None): + self.old_name = old_name + self.new_name = new_name + self.message = message + + def __call__(self, func, *args, **kwargs): + """ + Decorator call. Refer to ``decorate``. + + """ + old_name = self.old_name + new_name = self.new_name + message = self.message + + if old_name is None: + try: + old_name = func.__name__ + except AttributeError: + old_name = func.__name__ + if new_name is None: + depdoc = "`%s` is deprecated!" % old_name + else: + depdoc = "`%s` is deprecated, use `%s` instead!" % \ + (old_name, new_name) + + if message is not None: + depdoc += "\n" + message + + def newfunc(*args,**kwds): + """`arrayrange` is deprecated, use `arange` instead!""" + warnings.warn(depdoc, DeprecationWarning, stacklevel=2) + return func(*args, **kwds) + + newfunc = _set_function_name(newfunc, old_name) + doc = func.__doc__ + if doc is None: + doc = depdoc + else: + doc = '\n\n'.join([depdoc, doc]) + newfunc.__doc__ = doc + try: + d = func.__dict__ + except AttributeError: + pass + else: + newfunc.__dict__.update(d) + return newfunc + +def deprecate(*args, **kwargs): + """ + Issues a DeprecationWarning, adds warning to `old_name`'s + docstring, rebinds ``old_name.__name__`` and returns the new + function object. + + This function may also be used as a decorator. + + Parameters + ---------- + func : function + The function to be deprecated. + old_name : str, optional + The name of the function to be deprecated. Default is None, in + which case the name of `func` is used. + new_name : str, optional + The new name for the function. Default is None, in which case the + deprecation message is that `old_name` is deprecated. If given, the + deprecation message is that `old_name` is deprecated and `new_name` + should be used instead. + message : str, optional + Additional explanation of the deprecation. Displayed in the + docstring after the warning. + + Returns + ------- + old_func : function + The deprecated function. + + Examples + -------- + Note that ``olduint`` returns a value after printing Deprecation + Warning: + + >>> olduint = np.deprecate(np.uint) + >>> olduint(6) + /usr/lib/python2.5/site-packages/numpy/lib/utils.py:114: + DeprecationWarning: uint32 is deprecated + warnings.warn(str1, DeprecationWarning, stacklevel=2) + 6 + + """ + # Deprecate may be run as a function or as a decorator + # If run as a function, we initialise the decorator class + # and execute its __call__ method. + + if args: + fn = args[0] + args = args[1:] + + return _Deprecate(*args, **kwargs)(fn) + else: + return _Deprecate(*args, **kwargs) + +deprecate_with_doc = lambda msg: _Deprecate(message=msg) + + +#-------------------------------------------- +# Determine if two arrays can share memory +#-------------------------------------------- + +def byte_bounds(a): + """ + Returns pointers to the end-points of an array. + + Parameters + ---------- + a : ndarray + Input array. It must conform to the Python-side of the array + interface. + + Returns + ------- + (low, high) : tuple of 2 integers + The first integer is the first byte of the array, the second + integer is just past the last byte of the array. If `a` is not + contiguous it will not use every byte between the (`low`, `high`) + values. + + Examples + -------- + >>> I = np.eye(2, dtype='f'); I.dtype + dtype('float32') + >>> low, high = np.byte_bounds(I) + >>> high - low == I.size*I.itemsize + True + >>> I = np.eye(2, dtype='G'); I.dtype + dtype('complex192') + >>> low, high = np.byte_bounds(I) + >>> high - low == I.size*I.itemsize + True + + """ + ai = a.__array_interface__ + a_data = ai['data'][0] + astrides = ai['strides'] + ashape = ai['shape'] + bytes_a = asarray(a).dtype.itemsize + + a_low = a_high = a_data + if astrides is None: + # contiguous case + a_high += a.size * bytes_a + else: + for shape, stride in zip(ashape, astrides): + if stride < 0: + a_low += (shape-1)*stride + else: + a_high += (shape-1)*stride + a_high += bytes_a + return a_low, a_high + + +#----------------------------------------------------------------------------- +# Function for output and information on the variables used. +#----------------------------------------------------------------------------- + + +def who(vardict=None): + """ + Print the NumPy arrays in the given dictionary. + + If there is no dictionary passed in or `vardict` is None then returns + NumPy arrays in the globals() dictionary (all NumPy arrays in the + namespace). + + Parameters + ---------- + vardict : dict, optional + A dictionary possibly containing ndarrays. Default is globals(). + + Returns + ------- + out : None + Returns 'None'. + + Notes + ----- + Prints out the name, shape, bytes and type of all of the ndarrays + present in `vardict`. + + Examples + -------- + >>> a = np.arange(10) + >>> b = np.ones(20) + >>> np.who() + Name Shape Bytes Type + =========================================================== + a 10 40 int32 + b 20 160 float64 + Upper bound on total bytes = 200 + + >>> d = {'x': np.arange(2.0), 'y': np.arange(3.0), 'txt': 'Some str', + ... 'idx':5} + >>> np.who(d) + Name Shape Bytes Type + =========================================================== + y 3 24 float64 + x 2 16 float64 + Upper bound on total bytes = 40 + + """ + if vardict is None: + frame = sys._getframe().f_back + vardict = frame.f_globals + sta = [] + cache = {} + for name in vardict.keys(): + if isinstance(vardict[name], ndarray): + var = vardict[name] + idv = id(var) + if idv in cache.keys(): + namestr = name + " (%s)" % cache[idv] + original = 0 + else: + cache[idv] = name + namestr = name + original = 1 + shapestr = " x ".join(map(str, var.shape)) + bytestr = str(var.nbytes) + sta.append([namestr, shapestr, bytestr, var.dtype.name, + original]) + + maxname = 0 + maxshape = 0 + maxbyte = 0 + totalbytes = 0 + for k in range(len(sta)): + val = sta[k] + if maxname < len(val[0]): + maxname = len(val[0]) + if maxshape < len(val[1]): + maxshape = len(val[1]) + if maxbyte < len(val[2]): + maxbyte = len(val[2]) + if val[4]: + totalbytes += int(val[2]) + + if len(sta) > 0: + sp1 = max(10, maxname) + sp2 = max(10, maxshape) + sp3 = max(10, maxbyte) + prval = "Name %s Shape %s Bytes %s Type" % (sp1*' ', sp2*' ', sp3*' ') + print(prval + "\n" + "="*(len(prval)+5) + "\n") + + for k in range(len(sta)): + val = sta[k] + print("%s %s %s %s %s %s %s" % (val[0], ' '*(sp1-len(val[0])+4), + val[1], ' '*(sp2-len(val[1])+5), + val[2], ' '*(sp3-len(val[2])+5), + val[3])) + print("\nUpper bound on total bytes = %d" % totalbytes) + return + +#----------------------------------------------------------------------------- + + +# NOTE: pydoc defines a help function which works similarly to this +# except it uses a pager to take over the screen. + +# combine name and arguments and split to multiple lines of width +# characters. End lines on a comma and begin argument list indented with +# the rest of the arguments. +def _split_line(name, arguments, width): + firstwidth = len(name) + k = firstwidth + newstr = name + sepstr = ", " + arglist = arguments.split(sepstr) + for argument in arglist: + if k == firstwidth: + addstr = "" + else: + addstr = sepstr + k = k + len(argument) + len(addstr) + if k > width: + k = firstwidth + 1 + len(argument) + newstr = newstr + ",\n" + " "*(firstwidth+2) + argument + else: + newstr = newstr + addstr + argument + return newstr + +_namedict = None +_dictlist = None + +# Traverse all module directories underneath globals +# to see if something is defined +def _makenamedict(module='numpy'): + module = __import__(module, globals(), locals(), []) + thedict = {module.__name__:module.__dict__} + dictlist = [module.__name__] + totraverse = [module.__dict__] + while True: + if len(totraverse) == 0: + break + thisdict = totraverse.pop(0) + for x in thisdict.keys(): + if isinstance(thisdict[x], types.ModuleType): + modname = thisdict[x].__name__ + if modname not in dictlist: + moddict = thisdict[x].__dict__ + dictlist.append(modname) + totraverse.append(moddict) + thedict[modname] = moddict + return thedict, dictlist + + +def _info(obj, output=sys.stdout): + """Provide information about ndarray obj. + + Parameters + ---------- + obj : ndarray + Must be ndarray, not checked. + output + Where printed output goes. + + Notes + ----- + Copied over from the numarray module prior to its removal. + Adapted somewhat as only numpy is an option now. + + Called by info. + + """ + extra = "" + tic = "" + bp = lambda x: x + cls = getattr(obj, '__class__', type(obj)) + nm = getattr(cls, '__name__', cls) + strides = obj.strides + endian = obj.dtype.byteorder + + print("class: ", nm, file=output) + print("shape: ", obj.shape, file=output) + print("strides: ", strides, file=output) + print("itemsize: ", obj.itemsize, file=output) + print("aligned: ", bp(obj.flags.aligned), file=output) + print("contiguous: ", bp(obj.flags.contiguous), file=output) + print("fortran: ", obj.flags.fortran, file=output) + print( + "data pointer: %s%s" % (hex(obj.ctypes._as_parameter_.value), extra), + file=output + ) + print("byteorder: ", end=' ', file=output) + if endian in ['|', '=']: + print("%s%s%s" % (tic, sys.byteorder, tic), file=output) + byteswap = False + elif endian == '>': + print("%sbig%s" % (tic, tic), file=output) + byteswap = sys.byteorder != "big" + else: + print("%slittle%s" % (tic, tic), file=output) + byteswap = sys.byteorder != "little" + print("byteswap: ", bp(byteswap), file=output) + print("type: %s" % obj.dtype, file=output) + + +@set_module('numpy') +def info(object=None, maxwidth=76, output=sys.stdout, toplevel='numpy'): + """ + Get help information for a function, class, or module. + + Parameters + ---------- + object : object or str, optional + Input object or name to get information about. If `object` is a + numpy object, its docstring is given. If it is a string, available + modules are searched for matching objects. If None, information + about `info` itself is returned. + maxwidth : int, optional + Printing width. + output : file like object, optional + File like object that the output is written to, default is + ``stdout``. The object has to be opened in 'w' or 'a' mode. + toplevel : str, optional + Start search at this level. + + See Also + -------- + source, lookfor + + Notes + ----- + When used interactively with an object, ``np.info(obj)`` is equivalent + to ``help(obj)`` on the Python prompt or ``obj?`` on the IPython + prompt. + + Examples + -------- + >>> np.info(np.polyval) # doctest: +SKIP + polyval(p, x) + Evaluate the polynomial p at x. + ... + + When using a string for `object` it is possible to get multiple results. + + >>> np.info('fft') # doctest: +SKIP + *** Found in numpy *** + Core FFT routines + ... + *** Found in numpy.fft *** + fft(a, n=None, axis=-1) + ... + *** Repeat reference found in numpy.fft.fftpack *** + *** Total of 3 references found. *** + + """ + global _namedict, _dictlist + # Local import to speed up numpy's import time. + import pydoc + import inspect + + if (hasattr(object, '_ppimport_importer') or + hasattr(object, '_ppimport_module')): + object = object._ppimport_module + elif hasattr(object, '_ppimport_attr'): + object = object._ppimport_attr + + if object is None: + info(info) + elif isinstance(object, ndarray): + _info(object, output=output) + elif isinstance(object, str): + if _namedict is None: + _namedict, _dictlist = _makenamedict(toplevel) + numfound = 0 + objlist = [] + for namestr in _dictlist: + try: + obj = _namedict[namestr][object] + if id(obj) in objlist: + print("\n " + "*** Repeat reference found in %s *** " % namestr, + file=output + ) + else: + objlist.append(id(obj)) + print(" *** Found in %s ***" % namestr, file=output) + info(obj) + print("-"*maxwidth, file=output) + numfound += 1 + except KeyError: + pass + if numfound == 0: + print("Help for %s not found." % object, file=output) + else: + print("\n " + "*** Total of %d references found. ***" % numfound, + file=output + ) + + elif inspect.isfunction(object): + name = object.__name__ + arguments = formatargspec(*getargspec(object)) + + if len(name+arguments) > maxwidth: + argstr = _split_line(name, arguments, maxwidth) + else: + argstr = name + arguments + + print(" " + argstr + "\n", file=output) + print(inspect.getdoc(object), file=output) + + elif inspect.isclass(object): + name = object.__name__ + arguments = "()" + try: + if hasattr(object, '__init__'): + arguments = formatargspec( + *getargspec(object.__init__.__func__) + ) + arglist = arguments.split(', ') + if len(arglist) > 1: + arglist[1] = "("+arglist[1] + arguments = ", ".join(arglist[1:]) + except Exception: + pass + + if len(name+arguments) > maxwidth: + argstr = _split_line(name, arguments, maxwidth) + else: + argstr = name + arguments + + print(" " + argstr + "\n", file=output) + doc1 = inspect.getdoc(object) + if doc1 is None: + if hasattr(object, '__init__'): + print(inspect.getdoc(object.__init__), file=output) + else: + print(inspect.getdoc(object), file=output) + + methods = pydoc.allmethods(object) + if methods != []: + print("\n\nMethods:\n", file=output) + for meth in methods: + if meth[0] == '_': + continue + thisobj = getattr(object, meth, None) + if thisobj is not None: + methstr, other = pydoc.splitdoc( + inspect.getdoc(thisobj) or "None" + ) + print(" %s -- %s" % (meth, methstr), file=output) + + elif (sys.version_info[0] < 3 + and isinstance(object, types.InstanceType)): + # check for __call__ method + # types.InstanceType is the type of the instances of oldstyle classes + print("Instance of class: ", object.__class__.__name__, file=output) + print(file=output) + if hasattr(object, '__call__'): + arguments = formatargspec( + *getargspec(object.__call__.__func__) + ) + arglist = arguments.split(', ') + if len(arglist) > 1: + arglist[1] = "("+arglist[1] + arguments = ", ".join(arglist[1:]) + else: + arguments = "()" + + if hasattr(object, 'name'): + name = "%s" % object.name + else: + name = "" + if len(name+arguments) > maxwidth: + argstr = _split_line(name, arguments, maxwidth) + else: + argstr = name + arguments + + print(" " + argstr + "\n", file=output) + doc = inspect.getdoc(object.__call__) + if doc is not None: + print(inspect.getdoc(object.__call__), file=output) + print(inspect.getdoc(object), file=output) + + else: + print(inspect.getdoc(object), file=output) + + elif inspect.ismethod(object): + name = object.__name__ + arguments = formatargspec( + *getargspec(object.__func__) + ) + arglist = arguments.split(', ') + if len(arglist) > 1: + arglist[1] = "("+arglist[1] + arguments = ", ".join(arglist[1:]) + else: + arguments = "()" + + if len(name+arguments) > maxwidth: + argstr = _split_line(name, arguments, maxwidth) + else: + argstr = name + arguments + + print(" " + argstr + "\n", file=output) + print(inspect.getdoc(object), file=output) + + elif hasattr(object, '__doc__'): + print(inspect.getdoc(object), file=output) + + +@set_module('numpy') +def source(object, output=sys.stdout): + """ + Print or write to a file the source code for a NumPy object. + + The source code is only returned for objects written in Python. Many + functions and classes are defined in C and will therefore not return + useful information. + + Parameters + ---------- + object : numpy object + Input object. This can be any object (function, class, module, + ...). + output : file object, optional + If `output` not supplied then source code is printed to screen + (sys.stdout). File object must be created with either write 'w' or + append 'a' modes. + + See Also + -------- + lookfor, info + + Examples + -------- + >>> np.source(np.interp) #doctest: +SKIP + In file: /usr/lib/python2.6/dist-packages/numpy/lib/function_base.py + def interp(x, xp, fp, left=None, right=None): + \"\"\".... (full docstring printed)\"\"\" + if isinstance(x, (float, int, number)): + return compiled_interp([x], xp, fp, left, right).item() + else: + return compiled_interp(x, xp, fp, left, right) + + The source code is only returned for objects written in Python. + + >>> np.source(np.array) #doctest: +SKIP + Not available for this object. + + """ + # Local import to speed up numpy's import time. + import inspect + try: + print("In file: %s\n" % inspect.getsourcefile(object), file=output) + print(inspect.getsource(object), file=output) + except Exception: + print("Not available for this object.", file=output) + + +# Cache for lookfor: {id(module): {name: (docstring, kind, index), ...}...} +# where kind: "func", "class", "module", "object" +# and index: index in breadth-first namespace traversal +_lookfor_caches = {} + +# regexp whose match indicates that the string may contain a function +# signature +_function_signature_re = re.compile(r"[a-z0-9_]+\(.*[,=].*\)", re.I) + + +@set_module('numpy') +def lookfor(what, module=None, import_modules=True, regenerate=False, + output=None): + """ + Do a keyword search on docstrings. + + A list of objects that matched the search is displayed, + sorted by relevance. All given keywords need to be found in the + docstring for it to be returned as a result, but the order does + not matter. + + Parameters + ---------- + what : str + String containing words to look for. + module : str or list, optional + Name of module(s) whose docstrings to go through. + import_modules : bool, optional + Whether to import sub-modules in packages. Default is True. + regenerate : bool, optional + Whether to re-generate the docstring cache. Default is False. + output : file-like, optional + File-like object to write the output to. If omitted, use a pager. + + See Also + -------- + source, info + + Notes + ----- + Relevance is determined only roughly, by checking if the keywords occur + in the function name, at the start of a docstring, etc. + + Examples + -------- + >>> np.lookfor('binary representation') + Search results for 'binary representation' + ------------------------------------------ + numpy.binary_repr + Return the binary representation of the input number as a string. + numpy.core.setup_common.long_double_representation + Given a binary dump as given by GNU od -b, look for long double + numpy.base_repr + Return a string representation of a number in the given base system. + ... + + """ + import pydoc + + # Cache + cache = _lookfor_generate_cache(module, import_modules, regenerate) + + # Search + # XXX: maybe using a real stemming search engine would be better? + found = [] + whats = str(what).lower().split() + if not whats: + return + + for name, (docstring, kind, index) in cache.items(): + if kind in ('module', 'object'): + # don't show modules or objects + continue + ok = True + doc = docstring.lower() + for w in whats: + if w not in doc: + ok = False + break + if ok: + found.append(name) + + # Relevance sort + # XXX: this is full Harrison-Stetson heuristics now, + # XXX: it probably could be improved + + kind_relevance = {'func': 1000, 'class': 1000, + 'module': -1000, 'object': -1000} + + def relevance(name, docstr, kind, index): + r = 0 + # do the keywords occur within the start of the docstring? + first_doc = "\n".join(docstr.lower().strip().split("\n")[:3]) + r += sum([200 for w in whats if w in first_doc]) + # do the keywords occur in the function name? + r += sum([30 for w in whats if w in name]) + # is the full name long? + r += -len(name) * 5 + # is the object of bad type? + r += kind_relevance.get(kind, -1000) + # is the object deep in namespace hierarchy? + r += -name.count('.') * 10 + r += max(-index / 100, -100) + return r + + def relevance_value(a): + return relevance(a, *cache[a]) + found.sort(key=relevance_value) + + # Pretty-print + s = "Search results for '%s'" % (' '.join(whats)) + help_text = [s, "-"*len(s)] + for name in found[::-1]: + doc, kind, ix = cache[name] + + doclines = [line.strip() for line in doc.strip().split("\n") + if line.strip()] + + # find a suitable short description + try: + first_doc = doclines[0].strip() + if _function_signature_re.search(first_doc): + first_doc = doclines[1].strip() + except IndexError: + first_doc = "" + help_text.append("%s\n %s" % (name, first_doc)) + + if not found: + help_text.append("Nothing found.") + + # Output + if output is not None: + output.write("\n".join(help_text)) + elif len(help_text) > 10: + pager = pydoc.getpager() + pager("\n".join(help_text)) + else: + print("\n".join(help_text)) + +def _lookfor_generate_cache(module, import_modules, regenerate): + """ + Generate docstring cache for given module. + + Parameters + ---------- + module : str, None, module + Module for which to generate docstring cache + import_modules : bool + Whether to import sub-modules in packages. + regenerate : bool + Re-generate the docstring cache + + Returns + ------- + cache : dict {obj_full_name: (docstring, kind, index), ...} + Docstring cache for the module, either cached one (regenerate=False) + or newly generated. + + """ + global _lookfor_caches + # Local import to speed up numpy's import time. + import inspect + + if sys.version_info[0] >= 3: + # In Python3 stderr, stdout are text files. + from io import StringIO + else: + from StringIO import StringIO + + if module is None: + module = "numpy" + + if isinstance(module, str): + try: + __import__(module) + except ImportError: + return {} + module = sys.modules[module] + elif isinstance(module, list) or isinstance(module, tuple): + cache = {} + for mod in module: + cache.update(_lookfor_generate_cache(mod, import_modules, + regenerate)) + return cache + + if id(module) in _lookfor_caches and not regenerate: + return _lookfor_caches[id(module)] + + # walk items and collect docstrings + cache = {} + _lookfor_caches[id(module)] = cache + seen = {} + index = 0 + stack = [(module.__name__, module)] + while stack: + name, item = stack.pop(0) + if id(item) in seen: + continue + seen[id(item)] = True + + index += 1 + kind = "object" + + if inspect.ismodule(item): + kind = "module" + try: + _all = item.__all__ + except AttributeError: + _all = None + + # import sub-packages + if import_modules and hasattr(item, '__path__'): + for pth in item.__path__: + for mod_path in os.listdir(pth): + this_py = os.path.join(pth, mod_path) + init_py = os.path.join(pth, mod_path, '__init__.py') + if (os.path.isfile(this_py) and + mod_path.endswith('.py')): + to_import = mod_path[:-3] + elif os.path.isfile(init_py): + to_import = mod_path + else: + continue + if to_import == '__init__': + continue + + try: + old_stdout = sys.stdout + old_stderr = sys.stderr + try: + sys.stdout = StringIO() + sys.stderr = StringIO() + __import__("%s.%s" % (name, to_import)) + finally: + sys.stdout = old_stdout + sys.stderr = old_stderr + # Catch SystemExit, too + except BaseException: + continue + + for n, v in _getmembers(item): + try: + item_name = getattr(v, '__name__', "%s.%s" % (name, n)) + mod_name = getattr(v, '__module__', None) + except NameError: + # ref. SWIG's global cvars + # NameError: Unknown C global variable + item_name = "%s.%s" % (name, n) + mod_name = None + if '.' not in item_name and mod_name: + item_name = "%s.%s" % (mod_name, item_name) + + if not item_name.startswith(name + '.'): + # don't crawl "foreign" objects + if isinstance(v, ufunc): + # ... unless they are ufuncs + pass + else: + continue + elif not (inspect.ismodule(v) or _all is None or n in _all): + continue + stack.append(("%s.%s" % (name, n), v)) + elif inspect.isclass(item): + kind = "class" + for n, v in _getmembers(item): + stack.append(("%s.%s" % (name, n), v)) + elif hasattr(item, "__call__"): + kind = "func" + + try: + doc = inspect.getdoc(item) + except NameError: + # ref SWIG's NameError: Unknown C global variable + doc = None + if doc is not None: + cache[name] = (doc, kind, index) + + return cache + +def _getmembers(item): + import inspect + try: + members = inspect.getmembers(item) + except Exception: + members = [(x, getattr(item, x)) for x in dir(item) + if hasattr(item, x)] + return members + +#----------------------------------------------------------------------------- + +# The following SafeEval class and company are adapted from Michael Spencer's +# ASPN Python Cookbook recipe: https://code.activestate.com/recipes/364469/ +# +# Accordingly it is mostly Copyright 2006 by Michael Spencer. +# The recipe, like most of the other ASPN Python Cookbook recipes was made +# available under the Python license. +# https://en.wikipedia.org/wiki/Python_License + +# It has been modified to: +# * handle unary -/+ +# * support True/False/None +# * raise SyntaxError instead of a custom exception. + +class SafeEval(object): + """ + Object to evaluate constant string expressions. + + This includes strings with lists, dicts and tuples using the abstract + syntax tree created by ``compiler.parse``. + + .. deprecated:: 1.10.0 + + See Also + -------- + safe_eval + + """ + def __init__(self): + # 2014-10-15, 1.10 + warnings.warn("SafeEval is deprecated in 1.10 and will be removed.", + DeprecationWarning, stacklevel=2) + + def visit(self, node): + cls = node.__class__ + meth = getattr(self, 'visit' + cls.__name__, self.default) + return meth(node) + + def default(self, node): + raise SyntaxError("Unsupported source construct: %s" + % node.__class__) + + def visitExpression(self, node): + return self.visit(node.body) + + def visitNum(self, node): + return node.n + + def visitStr(self, node): + return node.s + + def visitBytes(self, node): + return node.s + + def visitDict(self, node,**kw): + return dict([(self.visit(k), self.visit(v)) + for k, v in zip(node.keys, node.values)]) + + def visitTuple(self, node): + return tuple([self.visit(i) for i in node.elts]) + + def visitList(self, node): + return [self.visit(i) for i in node.elts] + + def visitUnaryOp(self, node): + import ast + if isinstance(node.op, ast.UAdd): + return +self.visit(node.operand) + elif isinstance(node.op, ast.USub): + return -self.visit(node.operand) + else: + raise SyntaxError("Unknown unary op: %r" % node.op) + + def visitName(self, node): + if node.id == 'False': + return False + elif node.id == 'True': + return True + elif node.id == 'None': + return None + else: + raise SyntaxError("Unknown name: %s" % node.id) + + def visitNameConstant(self, node): + return node.value + + +def safe_eval(source): + """ + Protected string evaluation. + + Evaluate a string containing a Python literal expression without + allowing the execution of arbitrary non-literal code. + + Parameters + ---------- + source : str + The string to evaluate. + + Returns + ------- + obj : object + The result of evaluating `source`. + + Raises + ------ + SyntaxError + If the code has invalid Python syntax, or if it contains + non-literal code. + + Examples + -------- + >>> np.safe_eval('1') + 1 + >>> np.safe_eval('[1, 2, 3]') + [1, 2, 3] + >>> np.safe_eval('{"foo": ("bar", 10.0)}') + {'foo': ('bar', 10.0)} + + >>> np.safe_eval('import os') + Traceback (most recent call last): + ... + SyntaxError: invalid syntax + + >>> np.safe_eval('open("/home/user/.ssh/id_dsa").read()') + Traceback (most recent call last): + ... + SyntaxError: Unsupported source construct: compiler.ast.CallFunc + + """ + # Local import to speed up numpy's import time. + import ast + + return ast.literal_eval(source) + + +def _median_nancheck(data, result, axis, out): + """ + Utility function to check median result from data for NaN values at the end + and return NaN in that case. Input result can also be a MaskedArray. + + Parameters + ---------- + data : array + Input data to median function + result : Array or MaskedArray + Result of median function + axis : {int, sequence of int, None}, optional + Axis or axes along which the median was computed. + out : ndarray, optional + Output array in which to place the result. + Returns + ------- + median : scalar or ndarray + Median or NaN in axes which contained NaN in the input. + """ + if data.size == 0: + return result + data = np.moveaxis(data, axis, -1) + n = np.isnan(data[..., -1]) + # masked NaN values are ok + if np.ma.isMaskedArray(n): + n = n.filled(False) + if result.ndim == 0: + if n == True: + warnings.warn("Invalid value encountered in median", + RuntimeWarning, stacklevel=3) + if out is not None: + out[...] = data.dtype.type(np.nan) + result = out + else: + result = data.dtype.type(np.nan) + elif np.count_nonzero(n.ravel()) > 0: + warnings.warn("Invalid value encountered in median for" + + " %d results" % np.count_nonzero(n.ravel()), + RuntimeWarning, stacklevel=3) + result[n] = np.nan + return result + +#----------------------------------------------------------------------------- diff --git a/project/venv/lib/python2.7/site-packages/numpy/lib/utils.pyc b/project/venv/lib/python2.7/site-packages/numpy/lib/utils.pyc new file mode 100644 index 0000000..3df8d98 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/lib/utils.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/linalg/__init__.py b/project/venv/lib/python2.7/site-packages/numpy/linalg/__init__.py new file mode 100644 index 0000000..4b696c8 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/linalg/__init__.py @@ -0,0 +1,55 @@ +""" +Core Linear Algebra Tools +========================= + +=============== ========================================================== +Linear algebra basics +========================================================================== +norm Vector or matrix norm +inv Inverse of a square matrix +solve Solve a linear system of equations +det Determinant of a square matrix +slogdet Logarithm of the determinant of a square matrix +lstsq Solve linear least-squares problem +pinv Pseudo-inverse (Moore-Penrose) calculated using a singular + value decomposition +matrix_power Integer power of a square matrix +matrix_rank Calculate matrix rank using an SVD-based method +=============== ========================================================== + +=============== ========================================================== +Eigenvalues and decompositions +========================================================================== +eig Eigenvalues and vectors of a square matrix +eigh Eigenvalues and eigenvectors of a Hermitian matrix +eigvals Eigenvalues of a square matrix +eigvalsh Eigenvalues of a Hermitian matrix +qr QR decomposition of a matrix +svd Singular value decomposition of a matrix +cholesky Cholesky decomposition of a matrix +=============== ========================================================== + +=============== ========================================================== +Tensor operations +========================================================================== +tensorsolve Solve a linear tensor equation +tensorinv Calculate an inverse of a tensor +=============== ========================================================== + +=============== ========================================================== +Exceptions +========================================================================== +LinAlgError Indicates a failed linear algebra operation +=============== ========================================================== + +""" +from __future__ import division, absolute_import, print_function + +# To get sub-modules +from .info import __doc__ + +from .linalg import * + +from numpy._pytesttester import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/project/venv/lib/python2.7/site-packages/numpy/linalg/__init__.pyc b/project/venv/lib/python2.7/site-packages/numpy/linalg/__init__.pyc new file mode 100644 index 0000000..dad83e5 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/linalg/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/linalg/_umath_linalg.so b/project/venv/lib/python2.7/site-packages/numpy/linalg/_umath_linalg.so new file mode 100755 index 0000000..49d7ab6 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/linalg/_umath_linalg.so differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/linalg/info.py b/project/venv/lib/python2.7/site-packages/numpy/linalg/info.py new file mode 100644 index 0000000..646ecda --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/linalg/info.py @@ -0,0 +1,37 @@ +"""\ +Core Linear Algebra Tools +------------------------- +Linear algebra basics: + +- norm Vector or matrix norm +- inv Inverse of a square matrix +- solve Solve a linear system of equations +- det Determinant of a square matrix +- lstsq Solve linear least-squares problem +- pinv Pseudo-inverse (Moore-Penrose) calculated using a singular + value decomposition +- matrix_power Integer power of a square matrix + +Eigenvalues and decompositions: + +- eig Eigenvalues and vectors of a square matrix +- eigh Eigenvalues and eigenvectors of a Hermitian matrix +- eigvals Eigenvalues of a square matrix +- eigvalsh Eigenvalues of a Hermitian matrix +- qr QR decomposition of a matrix +- svd Singular value decomposition of a matrix +- cholesky Cholesky decomposition of a matrix + +Tensor operations: + +- tensorsolve Solve a linear tensor equation +- tensorinv Calculate an inverse of a tensor + +Exceptions: + +- LinAlgError Indicates a failed linear algebra operation + +""" +from __future__ import division, absolute_import, print_function + +depends = ['core'] diff --git a/project/venv/lib/python2.7/site-packages/numpy/linalg/info.pyc b/project/venv/lib/python2.7/site-packages/numpy/linalg/info.pyc new file mode 100644 index 0000000..1c45b23 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/linalg/info.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/linalg/lapack_lite.so b/project/venv/lib/python2.7/site-packages/numpy/linalg/lapack_lite.so new file mode 100755 index 0000000..aa514da Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/linalg/lapack_lite.so differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/linalg/linalg.py b/project/venv/lib/python2.7/site-packages/numpy/linalg/linalg.py new file mode 100644 index 0000000..8363d73 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/linalg/linalg.py @@ -0,0 +1,2720 @@ +"""Lite version of scipy.linalg. + +Notes +----- +This module is a lite version of the linalg.py module in SciPy which +contains high-level Python interface to the LAPACK library. The lite +version only accesses the following LAPACK functions: dgesv, zgesv, +dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf, +zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr. +""" +from __future__ import division, absolute_import, print_function + + +__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv', + 'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det', + 'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank', + 'LinAlgError', 'multi_dot'] + +import functools +import operator +import warnings + +from numpy.core import ( + array, asarray, zeros, empty, empty_like, intc, single, double, + csingle, cdouble, inexact, complexfloating, newaxis, all, Inf, dot, + add, multiply, sqrt, fastCopyAndTranspose, sum, isfinite, + finfo, errstate, geterrobj, moveaxis, amin, amax, product, abs, + atleast_2d, intp, asanyarray, object_, matmul, + swapaxes, divide, count_nonzero, isnan +) +from numpy.core.multiarray import normalize_axis_index +from numpy.core.overrides import set_module +from numpy.core import overrides +from numpy.lib.twodim_base import triu, eye +from numpy.linalg import lapack_lite, _umath_linalg + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy.linalg') + + +# For Python2/3 compatibility +_N = b'N' +_V = b'V' +_A = b'A' +_S = b'S' +_L = b'L' + +fortran_int = intc + + +@set_module('numpy.linalg') +class LinAlgError(Exception): + """ + Generic Python-exception-derived object raised by linalg functions. + + General purpose exception class, derived from Python's exception.Exception + class, programmatically raised in linalg functions when a Linear + Algebra-related condition would prevent further correct execution of the + function. + + Parameters + ---------- + None + + Examples + -------- + >>> from numpy import linalg as LA + >>> LA.inv(np.zeros((2,2))) + Traceback (most recent call last): + File "", line 1, in + File "...linalg.py", line 350, + in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype))) + File "...linalg.py", line 249, + in solve + raise LinAlgError('Singular matrix') + numpy.linalg.LinAlgError: Singular matrix + + """ + + +def _determine_error_states(): + errobj = geterrobj() + bufsize = errobj[0] + + with errstate(invalid='call', over='ignore', + divide='ignore', under='ignore'): + invalid_call_errmask = geterrobj()[1] + + return [bufsize, invalid_call_errmask, None] + +# Dealing with errors in _umath_linalg +_linalg_error_extobj = _determine_error_states() +del _determine_error_states + +def _raise_linalgerror_singular(err, flag): + raise LinAlgError("Singular matrix") + +def _raise_linalgerror_nonposdef(err, flag): + raise LinAlgError("Matrix is not positive definite") + +def _raise_linalgerror_eigenvalues_nonconvergence(err, flag): + raise LinAlgError("Eigenvalues did not converge") + +def _raise_linalgerror_svd_nonconvergence(err, flag): + raise LinAlgError("SVD did not converge") + +def _raise_linalgerror_lstsq(err, flag): + raise LinAlgError("SVD did not converge in Linear Least Squares") + +def get_linalg_error_extobj(callback): + extobj = list(_linalg_error_extobj) # make a copy + extobj[2] = callback + return extobj + +def _makearray(a): + new = asarray(a) + wrap = getattr(a, "__array_prepare__", new.__array_wrap__) + return new, wrap + +def isComplexType(t): + return issubclass(t, complexfloating) + +_real_types_map = {single : single, + double : double, + csingle : single, + cdouble : double} + +_complex_types_map = {single : csingle, + double : cdouble, + csingle : csingle, + cdouble : cdouble} + +def _realType(t, default=double): + return _real_types_map.get(t, default) + +def _complexType(t, default=cdouble): + return _complex_types_map.get(t, default) + +def _linalgRealType(t): + """Cast the type t to either double or cdouble.""" + return double + +def _commonType(*arrays): + # in lite version, use higher precision (always double or cdouble) + result_type = single + is_complex = False + for a in arrays: + if issubclass(a.dtype.type, inexact): + if isComplexType(a.dtype.type): + is_complex = True + rt = _realType(a.dtype.type, default=None) + if rt is None: + # unsupported inexact scalar + raise TypeError("array type %s is unsupported in linalg" % + (a.dtype.name,)) + else: + rt = double + if rt is double: + result_type = double + if is_complex: + t = cdouble + result_type = _complex_types_map[result_type] + else: + t = double + return t, result_type + + +# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are). + +_fastCT = fastCopyAndTranspose + +def _to_native_byte_order(*arrays): + ret = [] + for arr in arrays: + if arr.dtype.byteorder not in ('=', '|'): + ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('='))) + else: + ret.append(arr) + if len(ret) == 1: + return ret[0] + else: + return ret + +def _fastCopyAndTranspose(type, *arrays): + cast_arrays = () + for a in arrays: + if a.dtype.type is type: + cast_arrays = cast_arrays + (_fastCT(a),) + else: + cast_arrays = cast_arrays + (_fastCT(a.astype(type)),) + if len(cast_arrays) == 1: + return cast_arrays[0] + else: + return cast_arrays + +def _assertRank2(*arrays): + for a in arrays: + if a.ndim != 2: + raise LinAlgError('%d-dimensional array given. Array must be ' + 'two-dimensional' % a.ndim) + +def _assertRankAtLeast2(*arrays): + for a in arrays: + if a.ndim < 2: + raise LinAlgError('%d-dimensional array given. Array must be ' + 'at least two-dimensional' % a.ndim) + +def _assertNdSquareness(*arrays): + for a in arrays: + m, n = a.shape[-2:] + if m != n: + raise LinAlgError('Last 2 dimensions of the array must be square') + +def _assertFinite(*arrays): + for a in arrays: + if not (isfinite(a).all()): + raise LinAlgError("Array must not contain infs or NaNs") + +def _isEmpty2d(arr): + # check size first for efficiency + return arr.size == 0 and product(arr.shape[-2:]) == 0 + +def _assertNoEmpty2d(*arrays): + for a in arrays: + if _isEmpty2d(a): + raise LinAlgError("Arrays cannot be empty") + +def transpose(a): + """ + Transpose each matrix in a stack of matrices. + + Unlike np.transpose, this only swaps the last two axes, rather than all of + them + + Parameters + ---------- + a : (...,M,N) array_like + + Returns + ------- + aT : (...,N,M) ndarray + """ + return swapaxes(a, -1, -2) + +# Linear equations + +def _tensorsolve_dispatcher(a, b, axes=None): + return (a, b) + + +@array_function_dispatch(_tensorsolve_dispatcher) +def tensorsolve(a, b, axes=None): + """ + Solve the tensor equation ``a x = b`` for x. + + It is assumed that all indices of `x` are summed over in the product, + together with the rightmost indices of `a`, as is done in, for example, + ``tensordot(a, x, axes=b.ndim)``. + + Parameters + ---------- + a : array_like + Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals + the shape of that sub-tensor of `a` consisting of the appropriate + number of its rightmost indices, and must be such that + ``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be + 'square'). + b : array_like + Right-hand tensor, which can be of any shape. + axes : tuple of ints, optional + Axes in `a` to reorder to the right, before inversion. + If None (default), no reordering is done. + + Returns + ------- + x : ndarray, shape Q + + Raises + ------ + LinAlgError + If `a` is singular or not 'square' (in the above sense). + + See Also + -------- + numpy.tensordot, tensorinv, numpy.einsum + + Examples + -------- + >>> a = np.eye(2*3*4) + >>> a.shape = (2*3, 4, 2, 3, 4) + >>> b = np.random.randn(2*3, 4) + >>> x = np.linalg.tensorsolve(a, b) + >>> x.shape + (2, 3, 4) + >>> np.allclose(np.tensordot(a, x, axes=3), b) + True + + """ + a, wrap = _makearray(a) + b = asarray(b) + an = a.ndim + + if axes is not None: + allaxes = list(range(0, an)) + for k in axes: + allaxes.remove(k) + allaxes.insert(an, k) + a = a.transpose(allaxes) + + oldshape = a.shape[-(an-b.ndim):] + prod = 1 + for k in oldshape: + prod *= k + + a = a.reshape(-1, prod) + b = b.ravel() + res = wrap(solve(a, b)) + res.shape = oldshape + return res + + +def _solve_dispatcher(a, b): + return (a, b) + + +@array_function_dispatch(_solve_dispatcher) +def solve(a, b): + """ + Solve a linear matrix equation, or system of linear scalar equations. + + Computes the "exact" solution, `x`, of the well-determined, i.e., full + rank, linear matrix equation `ax = b`. + + Parameters + ---------- + a : (..., M, M) array_like + Coefficient matrix. + b : {(..., M,), (..., M, K)}, array_like + Ordinate or "dependent variable" values. + + Returns + ------- + x : {(..., M,), (..., M, K)} ndarray + Solution to the system a x = b. Returned shape is identical to `b`. + + Raises + ------ + LinAlgError + If `a` is singular or not square. + + Notes + ----- + + .. versionadded:: 1.8.0 + + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The solutions are computed using LAPACK routine _gesv + + `a` must be square and of full-rank, i.e., all rows (or, equivalently, + columns) must be linearly independent; if either is not true, use + `lstsq` for the least-squares best "solution" of the + system/equation. + + References + ---------- + .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, + FL, Academic Press, Inc., 1980, pg. 22. + + Examples + -------- + Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``: + + >>> a = np.array([[3,1], [1,2]]) + >>> b = np.array([9,8]) + >>> x = np.linalg.solve(a, b) + >>> x + array([ 2., 3.]) + + Check that the solution is correct: + + >>> np.allclose(np.dot(a, x), b) + True + + """ + a, _ = _makearray(a) + _assertRankAtLeast2(a) + _assertNdSquareness(a) + b, wrap = _makearray(b) + t, result_t = _commonType(a, b) + + # We use the b = (..., M,) logic, only if the number of extra dimensions + # match exactly + if b.ndim == a.ndim - 1: + gufunc = _umath_linalg.solve1 + else: + gufunc = _umath_linalg.solve + + signature = 'DD->D' if isComplexType(t) else 'dd->d' + extobj = get_linalg_error_extobj(_raise_linalgerror_singular) + r = gufunc(a, b, signature=signature, extobj=extobj) + + return wrap(r.astype(result_t, copy=False)) + + +def _tensorinv_dispatcher(a, ind=None): + return (a,) + + +@array_function_dispatch(_tensorinv_dispatcher) +def tensorinv(a, ind=2): + """ + Compute the 'inverse' of an N-dimensional array. + + The result is an inverse for `a` relative to the tensordot operation + ``tensordot(a, b, ind)``, i. e., up to floating-point accuracy, + ``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the + tensordot operation. + + Parameters + ---------- + a : array_like + Tensor to 'invert'. Its shape must be 'square', i. e., + ``prod(a.shape[:ind]) == prod(a.shape[ind:])``. + ind : int, optional + Number of first indices that are involved in the inverse sum. + Must be a positive integer, default is 2. + + Returns + ------- + b : ndarray + `a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``. + + Raises + ------ + LinAlgError + If `a` is singular or not 'square' (in the above sense). + + See Also + -------- + numpy.tensordot, tensorsolve + + Examples + -------- + >>> a = np.eye(4*6) + >>> a.shape = (4, 6, 8, 3) + >>> ainv = np.linalg.tensorinv(a, ind=2) + >>> ainv.shape + (8, 3, 4, 6) + >>> b = np.random.randn(4, 6) + >>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b)) + True + + >>> a = np.eye(4*6) + >>> a.shape = (24, 8, 3) + >>> ainv = np.linalg.tensorinv(a, ind=1) + >>> ainv.shape + (8, 3, 24) + >>> b = np.random.randn(24) + >>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b)) + True + + """ + a = asarray(a) + oldshape = a.shape + prod = 1 + if ind > 0: + invshape = oldshape[ind:] + oldshape[:ind] + for k in oldshape[ind:]: + prod *= k + else: + raise ValueError("Invalid ind argument.") + a = a.reshape(prod, -1) + ia = inv(a) + return ia.reshape(*invshape) + + +# Matrix inversion + +def _unary_dispatcher(a): + return (a,) + + +@array_function_dispatch(_unary_dispatcher) +def inv(a): + """ + Compute the (multiplicative) inverse of a matrix. + + Given a square matrix `a`, return the matrix `ainv` satisfying + ``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``. + + Parameters + ---------- + a : (..., M, M) array_like + Matrix to be inverted. + + Returns + ------- + ainv : (..., M, M) ndarray or matrix + (Multiplicative) inverse of the matrix `a`. + + Raises + ------ + LinAlgError + If `a` is not square or inversion fails. + + Notes + ----- + + .. versionadded:: 1.8.0 + + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + Examples + -------- + >>> from numpy.linalg import inv + >>> a = np.array([[1., 2.], [3., 4.]]) + >>> ainv = inv(a) + >>> np.allclose(np.dot(a, ainv), np.eye(2)) + True + >>> np.allclose(np.dot(ainv, a), np.eye(2)) + True + + If a is a matrix object, then the return value is a matrix as well: + + >>> ainv = inv(np.matrix(a)) + >>> ainv + matrix([[-2. , 1. ], + [ 1.5, -0.5]]) + + Inverses of several matrices can be computed at once: + + >>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]]) + >>> inv(a) + array([[[-2. , 1. ], + [ 1.5, -0.5]], + [[-5. , 2. ], + [ 3. , -1. ]]]) + + """ + a, wrap = _makearray(a) + _assertRankAtLeast2(a) + _assertNdSquareness(a) + t, result_t = _commonType(a) + + signature = 'D->D' if isComplexType(t) else 'd->d' + extobj = get_linalg_error_extobj(_raise_linalgerror_singular) + ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj) + return wrap(ainv.astype(result_t, copy=False)) + + +def _matrix_power_dispatcher(a, n): + return (a,) + + +@array_function_dispatch(_matrix_power_dispatcher) +def matrix_power(a, n): + """ + Raise a square matrix to the (integer) power `n`. + + For positive integers `n`, the power is computed by repeated matrix + squarings and matrix multiplications. If ``n == 0``, the identity matrix + of the same shape as M is returned. If ``n < 0``, the inverse + is computed and then raised to the ``abs(n)``. + + .. note:: Stacks of object matrices are not currently supported. + + Parameters + ---------- + a : (..., M, M) array_like + Matrix to be "powered." + n : int + The exponent can be any integer or long integer, positive, + negative, or zero. + + Returns + ------- + a**n : (..., M, M) ndarray or matrix object + The return value is the same shape and type as `M`; + if the exponent is positive or zero then the type of the + elements is the same as those of `M`. If the exponent is + negative the elements are floating-point. + + Raises + ------ + LinAlgError + For matrices that are not square or that (for negative powers) cannot + be inverted numerically. + + Examples + -------- + >>> from numpy.linalg import matrix_power + >>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit + >>> matrix_power(i, 3) # should = -i + array([[ 0, -1], + [ 1, 0]]) + >>> matrix_power(i, 0) + array([[1, 0], + [0, 1]]) + >>> matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements + array([[ 0., 1.], + [-1., 0.]]) + + Somewhat more sophisticated example + + >>> q = np.zeros((4, 4)) + >>> q[0:2, 0:2] = -i + >>> q[2:4, 2:4] = i + >>> q # one of the three quaternion units not equal to 1 + array([[ 0., -1., 0., 0.], + [ 1., 0., 0., 0.], + [ 0., 0., 0., 1.], + [ 0., 0., -1., 0.]]) + >>> matrix_power(q, 2) # = -np.eye(4) + array([[-1., 0., 0., 0.], + [ 0., -1., 0., 0.], + [ 0., 0., -1., 0.], + [ 0., 0., 0., -1.]]) + + """ + a = asanyarray(a) + _assertRankAtLeast2(a) + _assertNdSquareness(a) + + try: + n = operator.index(n) + except TypeError: + raise TypeError("exponent must be an integer") + + # Fall back on dot for object arrays. Object arrays are not supported by + # the current implementation of matmul using einsum + if a.dtype != object: + fmatmul = matmul + elif a.ndim == 2: + fmatmul = dot + else: + raise NotImplementedError( + "matrix_power not supported for stacks of object arrays") + + if n == 0: + a = empty_like(a) + a[...] = eye(a.shape[-2], dtype=a.dtype) + return a + + elif n < 0: + a = inv(a) + n = abs(n) + + # short-cuts. + if n == 1: + return a + + elif n == 2: + return fmatmul(a, a) + + elif n == 3: + return fmatmul(fmatmul(a, a), a) + + # Use binary decomposition to reduce the number of matrix multiplications. + # Here, we iterate over the bits of n, from LSB to MSB, raise `a` to + # increasing powers of 2, and multiply into the result as needed. + z = result = None + while n > 0: + z = a if z is None else fmatmul(z, z) + n, bit = divmod(n, 2) + if bit: + result = z if result is None else fmatmul(result, z) + + return result + + +# Cholesky decomposition + + +@array_function_dispatch(_unary_dispatcher) +def cholesky(a): + """ + Cholesky decomposition. + + Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`, + where `L` is lower-triangular and .H is the conjugate transpose operator + (which is the ordinary transpose if `a` is real-valued). `a` must be + Hermitian (symmetric if real-valued) and positive-definite. Only `L` is + actually returned. + + Parameters + ---------- + a : (..., M, M) array_like + Hermitian (symmetric if all elements are real), positive-definite + input matrix. + + Returns + ------- + L : (..., M, M) array_like + Upper or lower-triangular Cholesky factor of `a`. Returns a + matrix object if `a` is a matrix object. + + Raises + ------ + LinAlgError + If the decomposition fails, for example, if `a` is not + positive-definite. + + Notes + ----- + + .. versionadded:: 1.8.0 + + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The Cholesky decomposition is often used as a fast way of solving + + .. math:: A \\mathbf{x} = \\mathbf{b} + + (when `A` is both Hermitian/symmetric and positive-definite). + + First, we solve for :math:`\\mathbf{y}` in + + .. math:: L \\mathbf{y} = \\mathbf{b}, + + and then for :math:`\\mathbf{x}` in + + .. math:: L.H \\mathbf{x} = \\mathbf{y}. + + Examples + -------- + >>> A = np.array([[1,-2j],[2j,5]]) + >>> A + array([[ 1.+0.j, 0.-2.j], + [ 0.+2.j, 5.+0.j]]) + >>> L = np.linalg.cholesky(A) + >>> L + array([[ 1.+0.j, 0.+0.j], + [ 0.+2.j, 1.+0.j]]) + >>> np.dot(L, L.T.conj()) # verify that L * L.H = A + array([[ 1.+0.j, 0.-2.j], + [ 0.+2.j, 5.+0.j]]) + >>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like? + >>> np.linalg.cholesky(A) # an ndarray object is returned + array([[ 1.+0.j, 0.+0.j], + [ 0.+2.j, 1.+0.j]]) + >>> # But a matrix object is returned if A is a matrix object + >>> LA.cholesky(np.matrix(A)) + matrix([[ 1.+0.j, 0.+0.j], + [ 0.+2.j, 1.+0.j]]) + + """ + extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef) + gufunc = _umath_linalg.cholesky_lo + a, wrap = _makearray(a) + _assertRankAtLeast2(a) + _assertNdSquareness(a) + t, result_t = _commonType(a) + signature = 'D->D' if isComplexType(t) else 'd->d' + r = gufunc(a, signature=signature, extobj=extobj) + return wrap(r.astype(result_t, copy=False)) + + +# QR decompostion + +def _qr_dispatcher(a, mode=None): + return (a,) + + +@array_function_dispatch(_qr_dispatcher) +def qr(a, mode='reduced'): + """ + Compute the qr factorization of a matrix. + + Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is + upper-triangular. + + Parameters + ---------- + a : array_like, shape (M, N) + Matrix to be factored. + mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional + If K = min(M, N), then + + * 'reduced' : returns q, r with dimensions (M, K), (K, N) (default) + * 'complete' : returns q, r with dimensions (M, M), (M, N) + * 'r' : returns r only with dimensions (K, N) + * 'raw' : returns h, tau with dimensions (N, M), (K,) + * 'full' : alias of 'reduced', deprecated + * 'economic' : returns h from 'raw', deprecated. + + The options 'reduced', 'complete, and 'raw' are new in numpy 1.8, + see the notes for more information. The default is 'reduced', and to + maintain backward compatibility with earlier versions of numpy both + it and the old default 'full' can be omitted. Note that array h + returned in 'raw' mode is transposed for calling Fortran. The + 'economic' mode is deprecated. The modes 'full' and 'economic' may + be passed using only the first letter for backwards compatibility, + but all others must be spelled out. See the Notes for more + explanation. + + + Returns + ------- + q : ndarray of float or complex, optional + A matrix with orthonormal columns. When mode = 'complete' the + result is an orthogonal/unitary matrix depending on whether or not + a is real/complex. The determinant may be either +/- 1 in that + case. + r : ndarray of float or complex, optional + The upper-triangular matrix. + (h, tau) : ndarrays of np.double or np.cdouble, optional + The array h contains the Householder reflectors that generate q + along with r. The tau array contains scaling factors for the + reflectors. In the deprecated 'economic' mode only h is returned. + + Raises + ------ + LinAlgError + If factoring fails. + + Notes + ----- + This is an interface to the LAPACK routines dgeqrf, zgeqrf, + dorgqr, and zungqr. + + For more information on the qr factorization, see for example: + https://en.wikipedia.org/wiki/QR_factorization + + Subclasses of `ndarray` are preserved except for the 'raw' mode. So if + `a` is of type `matrix`, all the return values will be matrices too. + + New 'reduced', 'complete', and 'raw' options for mode were added in + NumPy 1.8.0 and the old option 'full' was made an alias of 'reduced'. In + addition the options 'full' and 'economic' were deprecated. Because + 'full' was the previous default and 'reduced' is the new default, + backward compatibility can be maintained by letting `mode` default. + The 'raw' option was added so that LAPACK routines that can multiply + arrays by q using the Householder reflectors can be used. Note that in + this case the returned arrays are of type np.double or np.cdouble and + the h array is transposed to be FORTRAN compatible. No routines using + the 'raw' return are currently exposed by numpy, but some are available + in lapack_lite and just await the necessary work. + + Examples + -------- + >>> a = np.random.randn(9, 6) + >>> q, r = np.linalg.qr(a) + >>> np.allclose(a, np.dot(q, r)) # a does equal qr + True + >>> r2 = np.linalg.qr(a, mode='r') + >>> r3 = np.linalg.qr(a, mode='economic') + >>> np.allclose(r, r2) # mode='r' returns the same r as mode='full' + True + >>> # But only triu parts are guaranteed equal when mode='economic' + >>> np.allclose(r, np.triu(r3[:6,:6], k=0)) + True + + Example illustrating a common use of `qr`: solving of least squares + problems + + What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for + the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points + and you'll see that it should be y0 = 0, m = 1.) The answer is provided + by solving the over-determined matrix equation ``Ax = b``, where:: + + A = array([[0, 1], [1, 1], [1, 1], [2, 1]]) + x = array([[y0], [m]]) + b = array([[1], [0], [2], [1]]) + + If A = qr such that q is orthonormal (which is always possible via + Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice, + however, we simply use `lstsq`.) + + >>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]]) + >>> A + array([[0, 1], + [1, 1], + [1, 1], + [2, 1]]) + >>> b = np.array([1, 0, 2, 1]) + >>> q, r = LA.qr(A) + >>> p = np.dot(q.T, b) + >>> np.dot(LA.inv(r), p) + array([ 1.1e-16, 1.0e+00]) + + """ + if mode not in ('reduced', 'complete', 'r', 'raw'): + if mode in ('f', 'full'): + # 2013-04-01, 1.8 + msg = "".join(( + "The 'full' option is deprecated in favor of 'reduced'.\n", + "For backward compatibility let mode default.")) + warnings.warn(msg, DeprecationWarning, stacklevel=2) + mode = 'reduced' + elif mode in ('e', 'economic'): + # 2013-04-01, 1.8 + msg = "The 'economic' option is deprecated." + warnings.warn(msg, DeprecationWarning, stacklevel=2) + mode = 'economic' + else: + raise ValueError("Unrecognized mode '%s'" % mode) + + a, wrap = _makearray(a) + _assertRank2(a) + m, n = a.shape + t, result_t = _commonType(a) + a = _fastCopyAndTranspose(t, a) + a = _to_native_byte_order(a) + mn = min(m, n) + tau = zeros((mn,), t) + + if isComplexType(t): + lapack_routine = lapack_lite.zgeqrf + routine_name = 'zgeqrf' + else: + lapack_routine = lapack_lite.dgeqrf + routine_name = 'dgeqrf' + + # calculate optimal size of work data 'work' + lwork = 1 + work = zeros((lwork,), t) + results = lapack_routine(m, n, a, max(1, m), tau, work, -1, 0) + if results['info'] != 0: + raise LinAlgError('%s returns %d' % (routine_name, results['info'])) + + # do qr decomposition + lwork = max(1, n, int(abs(work[0]))) + work = zeros((lwork,), t) + results = lapack_routine(m, n, a, max(1, m), tau, work, lwork, 0) + if results['info'] != 0: + raise LinAlgError('%s returns %d' % (routine_name, results['info'])) + + # handle modes that don't return q + if mode == 'r': + r = _fastCopyAndTranspose(result_t, a[:, :mn]) + return wrap(triu(r)) + + if mode == 'raw': + return a, tau + + if mode == 'economic': + if t != result_t : + a = a.astype(result_t, copy=False) + return wrap(a.T) + + # generate q from a + if mode == 'complete' and m > n: + mc = m + q = empty((m, m), t) + else: + mc = mn + q = empty((n, m), t) + q[:n] = a + + if isComplexType(t): + lapack_routine = lapack_lite.zungqr + routine_name = 'zungqr' + else: + lapack_routine = lapack_lite.dorgqr + routine_name = 'dorgqr' + + # determine optimal lwork + lwork = 1 + work = zeros((lwork,), t) + results = lapack_routine(m, mc, mn, q, max(1, m), tau, work, -1, 0) + if results['info'] != 0: + raise LinAlgError('%s returns %d' % (routine_name, results['info'])) + + # compute q + lwork = max(1, n, int(abs(work[0]))) + work = zeros((lwork,), t) + results = lapack_routine(m, mc, mn, q, max(1, m), tau, work, lwork, 0) + if results['info'] != 0: + raise LinAlgError('%s returns %d' % (routine_name, results['info'])) + + q = _fastCopyAndTranspose(result_t, q[:mc]) + r = _fastCopyAndTranspose(result_t, a[:, :mc]) + + return wrap(q), wrap(triu(r)) + + +# Eigenvalues + + +@array_function_dispatch(_unary_dispatcher) +def eigvals(a): + """ + Compute the eigenvalues of a general matrix. + + Main difference between `eigvals` and `eig`: the eigenvectors aren't + returned. + + Parameters + ---------- + a : (..., M, M) array_like + A complex- or real-valued matrix whose eigenvalues will be computed. + + Returns + ------- + w : (..., M,) ndarray + The eigenvalues, each repeated according to its multiplicity. + They are not necessarily ordered, nor are they necessarily + real for real matrices. + + Raises + ------ + LinAlgError + If the eigenvalue computation does not converge. + + See Also + -------- + eig : eigenvalues and right eigenvectors of general arrays + eigvalsh : eigenvalues of real symmetric or complex Hermitian + (conjugate symmetric) arrays. + eigh : eigenvalues and eigenvectors of real symmetric or complex + Hermitian (conjugate symmetric) arrays. + + Notes + ----- + + .. versionadded:: 1.8.0 + + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + This is implemented using the _geev LAPACK routines which compute + the eigenvalues and eigenvectors of general square arrays. + + Examples + -------- + Illustration, using the fact that the eigenvalues of a diagonal matrix + are its diagonal elements, that multiplying a matrix on the left + by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose + of `Q`), preserves the eigenvalues of the "middle" matrix. In other words, + if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as + ``A``: + + >>> from numpy import linalg as LA + >>> x = np.random.random() + >>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]]) + >>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :]) + (1.0, 1.0, 0.0) + + Now multiply a diagonal matrix by Q on one side and by Q.T on the other: + + >>> D = np.diag((-1,1)) + >>> LA.eigvals(D) + array([-1., 1.]) + >>> A = np.dot(Q, D) + >>> A = np.dot(A, Q.T) + >>> LA.eigvals(A) + array([ 1., -1.]) + + """ + a, wrap = _makearray(a) + _assertRankAtLeast2(a) + _assertNdSquareness(a) + _assertFinite(a) + t, result_t = _commonType(a) + + extobj = get_linalg_error_extobj( + _raise_linalgerror_eigenvalues_nonconvergence) + signature = 'D->D' if isComplexType(t) else 'd->D' + w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj) + + if not isComplexType(t): + if all(w.imag == 0): + w = w.real + result_t = _realType(result_t) + else: + result_t = _complexType(result_t) + + return w.astype(result_t, copy=False) + + +def _eigvalsh_dispatcher(a, UPLO=None): + return (a,) + + +@array_function_dispatch(_eigvalsh_dispatcher) +def eigvalsh(a, UPLO='L'): + """ + Compute the eigenvalues of a complex Hermitian or real symmetric matrix. + + Main difference from eigh: the eigenvectors are not computed. + + Parameters + ---------- + a : (..., M, M) array_like + A complex- or real-valued matrix whose eigenvalues are to be + computed. + UPLO : {'L', 'U'}, optional + Specifies whether the calculation is done with the lower triangular + part of `a` ('L', default) or the upper triangular part ('U'). + Irrespective of this value only the real parts of the diagonal will + be considered in the computation to preserve the notion of a Hermitian + matrix. It therefore follows that the imaginary part of the diagonal + will always be treated as zero. + + Returns + ------- + w : (..., M,) ndarray + The eigenvalues in ascending order, each repeated according to + its multiplicity. + + Raises + ------ + LinAlgError + If the eigenvalue computation does not converge. + + See Also + -------- + eigh : eigenvalues and eigenvectors of real symmetric or complex Hermitian + (conjugate symmetric) arrays. + eigvals : eigenvalues of general real or complex arrays. + eig : eigenvalues and right eigenvectors of general real or complex + arrays. + + Notes + ----- + + .. versionadded:: 1.8.0 + + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The eigenvalues are computed using LAPACK routines _syevd, _heevd + + Examples + -------- + >>> from numpy import linalg as LA + >>> a = np.array([[1, -2j], [2j, 5]]) + >>> LA.eigvalsh(a) + array([ 0.17157288, 5.82842712]) + + >>> # demonstrate the treatment of the imaginary part of the diagonal + >>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]]) + >>> a + array([[ 5.+2.j, 9.-2.j], + [ 0.+2.j, 2.-1.j]]) + >>> # with UPLO='L' this is numerically equivalent to using LA.eigvals() + >>> # with: + >>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]]) + >>> b + array([[ 5.+0.j, 0.-2.j], + [ 0.+2.j, 2.+0.j]]) + >>> wa = LA.eigvalsh(a) + >>> wb = LA.eigvals(b) + >>> wa; wb + array([ 1., 6.]) + array([ 6.+0.j, 1.+0.j]) + + """ + UPLO = UPLO.upper() + if UPLO not in ('L', 'U'): + raise ValueError("UPLO argument must be 'L' or 'U'") + + extobj = get_linalg_error_extobj( + _raise_linalgerror_eigenvalues_nonconvergence) + if UPLO == 'L': + gufunc = _umath_linalg.eigvalsh_lo + else: + gufunc = _umath_linalg.eigvalsh_up + + a, wrap = _makearray(a) + _assertRankAtLeast2(a) + _assertNdSquareness(a) + t, result_t = _commonType(a) + signature = 'D->d' if isComplexType(t) else 'd->d' + w = gufunc(a, signature=signature, extobj=extobj) + return w.astype(_realType(result_t), copy=False) + +def _convertarray(a): + t, result_t = _commonType(a) + a = _fastCT(a.astype(t)) + return a, t, result_t + + +# Eigenvectors + + +@array_function_dispatch(_unary_dispatcher) +def eig(a): + """ + Compute the eigenvalues and right eigenvectors of a square array. + + Parameters + ---------- + a : (..., M, M) array + Matrices for which the eigenvalues and right eigenvectors will + be computed + + Returns + ------- + w : (..., M) array + The eigenvalues, each repeated according to its multiplicity. + The eigenvalues are not necessarily ordered. The resulting + array will be of complex type, unless the imaginary part is + zero in which case it will be cast to a real type. When `a` + is real the resulting eigenvalues will be real (0 imaginary + part) or occur in conjugate pairs + + v : (..., M, M) array + The normalized (unit "length") eigenvectors, such that the + column ``v[:,i]`` is the eigenvector corresponding to the + eigenvalue ``w[i]``. + + Raises + ------ + LinAlgError + If the eigenvalue computation does not converge. + + See Also + -------- + eigvals : eigenvalues of a non-symmetric array. + + eigh : eigenvalues and eigenvectors of a real symmetric or complex + Hermitian (conjugate symmetric) array. + + eigvalsh : eigenvalues of a real symmetric or complex Hermitian + (conjugate symmetric) array. + + Notes + ----- + + .. versionadded:: 1.8.0 + + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + This is implemented using the _geev LAPACK routines which compute + the eigenvalues and eigenvectors of general square arrays. + + The number `w` is an eigenvalue of `a` if there exists a vector + `v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and + `v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]`` + for :math:`i \\in \\{0,...,M-1\\}`. + + The array `v` of eigenvectors may not be of maximum rank, that is, some + of the columns may be linearly dependent, although round-off error may + obscure that fact. If the eigenvalues are all different, then theoretically + the eigenvectors are linearly independent. Likewise, the (complex-valued) + matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e., + if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate + transpose of `a`. + + Finally, it is emphasized that `v` consists of the *right* (as in + right-hand side) eigenvectors of `a`. A vector `y` satisfying + ``dot(y.T, a) = z * y.T`` for some number `z` is called a *left* + eigenvector of `a`, and, in general, the left and right eigenvectors + of a matrix are not necessarily the (perhaps conjugate) transposes + of each other. + + References + ---------- + G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL, + Academic Press, Inc., 1980, Various pp. + + Examples + -------- + >>> from numpy import linalg as LA + + (Almost) trivial example with real e-values and e-vectors. + + >>> w, v = LA.eig(np.diag((1, 2, 3))) + >>> w; v + array([ 1., 2., 3.]) + array([[ 1., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 1.]]) + + Real matrix possessing complex e-values and e-vectors; note that the + e-values are complex conjugates of each other. + + >>> w, v = LA.eig(np.array([[1, -1], [1, 1]])) + >>> w; v + array([ 1. + 1.j, 1. - 1.j]) + array([[ 0.70710678+0.j , 0.70710678+0.j ], + [ 0.00000000-0.70710678j, 0.00000000+0.70710678j]]) + + Complex-valued matrix with real e-values (but complex-valued e-vectors); + note that a.conj().T = a, i.e., a is Hermitian. + + >>> a = np.array([[1, 1j], [-1j, 1]]) + >>> w, v = LA.eig(a) + >>> w; v + array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0} + array([[ 0.00000000+0.70710678j, 0.70710678+0.j ], + [ 0.70710678+0.j , 0.00000000+0.70710678j]]) + + Be careful about round-off error! + + >>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]]) + >>> # Theor. e-values are 1 +/- 1e-9 + >>> w, v = LA.eig(a) + >>> w; v + array([ 1., 1.]) + array([[ 1., 0.], + [ 0., 1.]]) + + """ + a, wrap = _makearray(a) + _assertRankAtLeast2(a) + _assertNdSquareness(a) + _assertFinite(a) + t, result_t = _commonType(a) + + extobj = get_linalg_error_extobj( + _raise_linalgerror_eigenvalues_nonconvergence) + signature = 'D->DD' if isComplexType(t) else 'd->DD' + w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj) + + if not isComplexType(t) and all(w.imag == 0.0): + w = w.real + vt = vt.real + result_t = _realType(result_t) + else: + result_t = _complexType(result_t) + + vt = vt.astype(result_t, copy=False) + return w.astype(result_t, copy=False), wrap(vt) + + +@array_function_dispatch(_eigvalsh_dispatcher) +def eigh(a, UPLO='L'): + """ + Return the eigenvalues and eigenvectors of a complex Hermitian + (conjugate symmetric) or a real symmetric matrix. + + Returns two objects, a 1-D array containing the eigenvalues of `a`, and + a 2-D square array or matrix (depending on the input type) of the + corresponding eigenvectors (in columns). + + Parameters + ---------- + a : (..., M, M) array + Hermitian or real symmetric matrices whose eigenvalues and + eigenvectors are to be computed. + UPLO : {'L', 'U'}, optional + Specifies whether the calculation is done with the lower triangular + part of `a` ('L', default) or the upper triangular part ('U'). + Irrespective of this value only the real parts of the diagonal will + be considered in the computation to preserve the notion of a Hermitian + matrix. It therefore follows that the imaginary part of the diagonal + will always be treated as zero. + + Returns + ------- + w : (..., M) ndarray + The eigenvalues in ascending order, each repeated according to + its multiplicity. + v : {(..., M, M) ndarray, (..., M, M) matrix} + The column ``v[:, i]`` is the normalized eigenvector corresponding + to the eigenvalue ``w[i]``. Will return a matrix object if `a` is + a matrix object. + + Raises + ------ + LinAlgError + If the eigenvalue computation does not converge. + + See Also + -------- + eigvalsh : eigenvalues of real symmetric or complex Hermitian + (conjugate symmetric) arrays. + eig : eigenvalues and right eigenvectors for non-symmetric arrays. + eigvals : eigenvalues of non-symmetric arrays. + + Notes + ----- + + .. versionadded:: 1.8.0 + + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The eigenvalues/eigenvectors are computed using LAPACK routines _syevd, + _heevd + + The eigenvalues of real symmetric or complex Hermitian matrices are + always real. [1]_ The array `v` of (column) eigenvectors is unitary + and `a`, `w`, and `v` satisfy the equations + ``dot(a, v[:, i]) = w[i] * v[:, i]``. + + References + ---------- + .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, + FL, Academic Press, Inc., 1980, pg. 222. + + Examples + -------- + >>> from numpy import linalg as LA + >>> a = np.array([[1, -2j], [2j, 5]]) + >>> a + array([[ 1.+0.j, 0.-2.j], + [ 0.+2.j, 5.+0.j]]) + >>> w, v = LA.eigh(a) + >>> w; v + array([ 0.17157288, 5.82842712]) + array([[-0.92387953+0.j , -0.38268343+0.j ], + [ 0.00000000+0.38268343j, 0.00000000-0.92387953j]]) + + >>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair + array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j]) + >>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair + array([ 0.+0.j, 0.+0.j]) + + >>> A = np.matrix(a) # what happens if input is a matrix object + >>> A + matrix([[ 1.+0.j, 0.-2.j], + [ 0.+2.j, 5.+0.j]]) + >>> w, v = LA.eigh(A) + >>> w; v + array([ 0.17157288, 5.82842712]) + matrix([[-0.92387953+0.j , -0.38268343+0.j ], + [ 0.00000000+0.38268343j, 0.00000000-0.92387953j]]) + + >>> # demonstrate the treatment of the imaginary part of the diagonal + >>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]]) + >>> a + array([[ 5.+2.j, 9.-2.j], + [ 0.+2.j, 2.-1.j]]) + >>> # with UPLO='L' this is numerically equivalent to using LA.eig() with: + >>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]]) + >>> b + array([[ 5.+0.j, 0.-2.j], + [ 0.+2.j, 2.+0.j]]) + >>> wa, va = LA.eigh(a) + >>> wb, vb = LA.eig(b) + >>> wa; wb + array([ 1., 6.]) + array([ 6.+0.j, 1.+0.j]) + >>> va; vb + array([[-0.44721360-0.j , -0.89442719+0.j ], + [ 0.00000000+0.89442719j, 0.00000000-0.4472136j ]]) + array([[ 0.89442719+0.j , 0.00000000-0.4472136j], + [ 0.00000000-0.4472136j, 0.89442719+0.j ]]) + """ + UPLO = UPLO.upper() + if UPLO not in ('L', 'U'): + raise ValueError("UPLO argument must be 'L' or 'U'") + + a, wrap = _makearray(a) + _assertRankAtLeast2(a) + _assertNdSquareness(a) + t, result_t = _commonType(a) + + extobj = get_linalg_error_extobj( + _raise_linalgerror_eigenvalues_nonconvergence) + if UPLO == 'L': + gufunc = _umath_linalg.eigh_lo + else: + gufunc = _umath_linalg.eigh_up + + signature = 'D->dD' if isComplexType(t) else 'd->dd' + w, vt = gufunc(a, signature=signature, extobj=extobj) + w = w.astype(_realType(result_t), copy=False) + vt = vt.astype(result_t, copy=False) + return w, wrap(vt) + + +# Singular value decomposition + +def _svd_dispatcher(a, full_matrices=None, compute_uv=None): + return (a,) + + +@array_function_dispatch(_svd_dispatcher) +def svd(a, full_matrices=True, compute_uv=True): + """ + Singular Value Decomposition. + + When `a` is a 2D array, it is factorized as ``u @ np.diag(s) @ vh + = (u * s) @ vh``, where `u` and `vh` are 2D unitary arrays and `s` is a 1D + array of `a`'s singular values. When `a` is higher-dimensional, SVD is + applied in stacked mode as explained below. + + Parameters + ---------- + a : (..., M, N) array_like + A real or complex array with ``a.ndim >= 2``. + full_matrices : bool, optional + If True (default), `u` and `vh` have the shapes ``(..., M, M)`` and + ``(..., N, N)``, respectively. Otherwise, the shapes are + ``(..., M, K)`` and ``(..., K, N)``, respectively, where + ``K = min(M, N)``. + compute_uv : bool, optional + Whether or not to compute `u` and `vh` in addition to `s`. True + by default. + + Returns + ------- + u : { (..., M, M), (..., M, K) } array + Unitary array(s). The first ``a.ndim - 2`` dimensions have the same + size as those of the input `a`. The size of the last two dimensions + depends on the value of `full_matrices`. Only returned when + `compute_uv` is True. + s : (..., K) array + Vector(s) with the singular values, within each vector sorted in + descending order. The first ``a.ndim - 2`` dimensions have the same + size as those of the input `a`. + vh : { (..., N, N), (..., K, N) } array + Unitary array(s). The first ``a.ndim - 2`` dimensions have the same + size as those of the input `a`. The size of the last two dimensions + depends on the value of `full_matrices`. Only returned when + `compute_uv` is True. + + Raises + ------ + LinAlgError + If SVD computation does not converge. + + Notes + ----- + + .. versionchanged:: 1.8.0 + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The decomposition is performed using LAPACK routine ``_gesdd``. + + SVD is usually described for the factorization of a 2D matrix :math:`A`. + The higher-dimensional case will be discussed below. In the 2D case, SVD is + written as :math:`A = U S V^H`, where :math:`A = a`, :math:`U= u`, + :math:`S= \\mathtt{np.diag}(s)` and :math:`V^H = vh`. The 1D array `s` + contains the singular values of `a` and `u` and `vh` are unitary. The rows + of `vh` are the eigenvectors of :math:`A^H A` and the columns of `u` are + the eigenvectors of :math:`A A^H`. In both cases the corresponding + (possibly non-zero) eigenvalues are given by ``s**2``. + + If `a` has more than two dimensions, then broadcasting rules apply, as + explained in :ref:`routines.linalg-broadcasting`. This means that SVD is + working in "stacked" mode: it iterates over all indices of the first + ``a.ndim - 2`` dimensions and for each combination SVD is applied to the + last two indices. The matrix `a` can be reconstructed from the + decomposition with either ``(u * s[..., None, :]) @ vh`` or + ``u @ (s[..., None] * vh)``. (The ``@`` operator can be replaced by the + function ``np.matmul`` for python versions below 3.5.) + + If `a` is a ``matrix`` object (as opposed to an ``ndarray``), then so are + all the return values. + + Examples + -------- + >>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6) + >>> b = np.random.randn(2, 7, 8, 3) + 1j*np.random.randn(2, 7, 8, 3) + + Reconstruction based on full SVD, 2D case: + + >>> u, s, vh = np.linalg.svd(a, full_matrices=True) + >>> u.shape, s.shape, vh.shape + ((9, 9), (6,), (6, 6)) + >>> np.allclose(a, np.dot(u[:, :6] * s, vh)) + True + >>> smat = np.zeros((9, 6), dtype=complex) + >>> smat[:6, :6] = np.diag(s) + >>> np.allclose(a, np.dot(u, np.dot(smat, vh))) + True + + Reconstruction based on reduced SVD, 2D case: + + >>> u, s, vh = np.linalg.svd(a, full_matrices=False) + >>> u.shape, s.shape, vh.shape + ((9, 6), (6,), (6, 6)) + >>> np.allclose(a, np.dot(u * s, vh)) + True + >>> smat = np.diag(s) + >>> np.allclose(a, np.dot(u, np.dot(smat, vh))) + True + + Reconstruction based on full SVD, 4D case: + + >>> u, s, vh = np.linalg.svd(b, full_matrices=True) + >>> u.shape, s.shape, vh.shape + ((2, 7, 8, 8), (2, 7, 3), (2, 7, 3, 3)) + >>> np.allclose(b, np.matmul(u[..., :3] * s[..., None, :], vh)) + True + >>> np.allclose(b, np.matmul(u[..., :3], s[..., None] * vh)) + True + + Reconstruction based on reduced SVD, 4D case: + + >>> u, s, vh = np.linalg.svd(b, full_matrices=False) + >>> u.shape, s.shape, vh.shape + ((2, 7, 8, 3), (2, 7, 3), (2, 7, 3, 3)) + >>> np.allclose(b, np.matmul(u * s[..., None, :], vh)) + True + >>> np.allclose(b, np.matmul(u, s[..., None] * vh)) + True + + """ + a, wrap = _makearray(a) + _assertRankAtLeast2(a) + t, result_t = _commonType(a) + + extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence) + + m, n = a.shape[-2:] + if compute_uv: + if full_matrices: + if m < n: + gufunc = _umath_linalg.svd_m_f + else: + gufunc = _umath_linalg.svd_n_f + else: + if m < n: + gufunc = _umath_linalg.svd_m_s + else: + gufunc = _umath_linalg.svd_n_s + + signature = 'D->DdD' if isComplexType(t) else 'd->ddd' + u, s, vh = gufunc(a, signature=signature, extobj=extobj) + u = u.astype(result_t, copy=False) + s = s.astype(_realType(result_t), copy=False) + vh = vh.astype(result_t, copy=False) + return wrap(u), s, wrap(vh) + else: + if m < n: + gufunc = _umath_linalg.svd_m + else: + gufunc = _umath_linalg.svd_n + + signature = 'D->d' if isComplexType(t) else 'd->d' + s = gufunc(a, signature=signature, extobj=extobj) + s = s.astype(_realType(result_t), copy=False) + return s + + +def _cond_dispatcher(x, p=None): + return (x,) + + +@array_function_dispatch(_cond_dispatcher) +def cond(x, p=None): + """ + Compute the condition number of a matrix. + + This function is capable of returning the condition number using + one of seven different norms, depending on the value of `p` (see + Parameters below). + + Parameters + ---------- + x : (..., M, N) array_like + The matrix whose condition number is sought. + p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional + Order of the norm: + + ===== ============================ + p norm for matrices + ===== ============================ + None 2-norm, computed directly using the ``SVD`` + 'fro' Frobenius norm + inf max(sum(abs(x), axis=1)) + -inf min(sum(abs(x), axis=1)) + 1 max(sum(abs(x), axis=0)) + -1 min(sum(abs(x), axis=0)) + 2 2-norm (largest sing. value) + -2 smallest singular value + ===== ============================ + + inf means the numpy.inf object, and the Frobenius norm is + the root-of-sum-of-squares norm. + + Returns + ------- + c : {float, inf} + The condition number of the matrix. May be infinite. + + See Also + -------- + numpy.linalg.norm + + Notes + ----- + The condition number of `x` is defined as the norm of `x` times the + norm of the inverse of `x` [1]_; the norm can be the usual L2-norm + (root-of-sum-of-squares) or one of a number of other matrix norms. + + References + ---------- + .. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL, + Academic Press, Inc., 1980, pg. 285. + + Examples + -------- + >>> from numpy import linalg as LA + >>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]]) + >>> a + array([[ 1, 0, -1], + [ 0, 1, 0], + [ 1, 0, 1]]) + >>> LA.cond(a) + 1.4142135623730951 + >>> LA.cond(a, 'fro') + 3.1622776601683795 + >>> LA.cond(a, np.inf) + 2.0 + >>> LA.cond(a, -np.inf) + 1.0 + >>> LA.cond(a, 1) + 2.0 + >>> LA.cond(a, -1) + 1.0 + >>> LA.cond(a, 2) + 1.4142135623730951 + >>> LA.cond(a, -2) + 0.70710678118654746 + >>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0)) + 0.70710678118654746 + + """ + x = asarray(x) # in case we have a matrix + _assertNoEmpty2d(x) + if p is None or p == 2 or p == -2: + s = svd(x, compute_uv=False) + with errstate(all='ignore'): + if p == -2: + r = s[..., -1] / s[..., 0] + else: + r = s[..., 0] / s[..., -1] + else: + # Call inv(x) ignoring errors. The result array will + # contain nans in the entries where inversion failed. + _assertRankAtLeast2(x) + _assertNdSquareness(x) + t, result_t = _commonType(x) + signature = 'D->D' if isComplexType(t) else 'd->d' + with errstate(all='ignore'): + invx = _umath_linalg.inv(x, signature=signature) + r = norm(x, p, axis=(-2, -1)) * norm(invx, p, axis=(-2, -1)) + r = r.astype(result_t, copy=False) + + # Convert nans to infs unless the original array had nan entries + r = asarray(r) + nan_mask = isnan(r) + if nan_mask.any(): + nan_mask &= ~isnan(x).any(axis=(-2, -1)) + if r.ndim > 0: + r[nan_mask] = Inf + elif nan_mask: + r[()] = Inf + + # Convention is to return scalars instead of 0d arrays + if r.ndim == 0: + r = r[()] + + return r + + +def _matrix_rank_dispatcher(M, tol=None, hermitian=None): + return (M,) + + +@array_function_dispatch(_matrix_rank_dispatcher) +def matrix_rank(M, tol=None, hermitian=False): + """ + Return matrix rank of array using SVD method + + Rank of the array is the number of singular values of the array that are + greater than `tol`. + + .. versionchanged:: 1.14 + Can now operate on stacks of matrices + + Parameters + ---------- + M : {(M,), (..., M, N)} array_like + input vector or stack of matrices + tol : (...) array_like, float, optional + threshold below which SVD values are considered zero. If `tol` is + None, and ``S`` is an array with singular values for `M`, and + ``eps`` is the epsilon value for datatype of ``S``, then `tol` is + set to ``S.max() * max(M.shape) * eps``. + + .. versionchanged:: 1.14 + Broadcasted against the stack of matrices + hermitian : bool, optional + If True, `M` is assumed to be Hermitian (symmetric if real-valued), + enabling a more efficient method for finding singular values. + Defaults to False. + + .. versionadded:: 1.14 + + Notes + ----- + The default threshold to detect rank deficiency is a test on the magnitude + of the singular values of `M`. By default, we identify singular values less + than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with + the symbols defined above). This is the algorithm MATLAB uses [1]. It also + appears in *Numerical recipes* in the discussion of SVD solutions for linear + least squares [2]. + + This default threshold is designed to detect rank deficiency accounting for + the numerical errors of the SVD computation. Imagine that there is a column + in `M` that is an exact (in floating point) linear combination of other + columns in `M`. Computing the SVD on `M` will not produce a singular value + exactly equal to 0 in general: any difference of the smallest SVD value from + 0 will be caused by numerical imprecision in the calculation of the SVD. + Our threshold for small SVD values takes this numerical imprecision into + account, and the default threshold will detect such numerical rank + deficiency. The threshold may declare a matrix `M` rank deficient even if + the linear combination of some columns of `M` is not exactly equal to + another column of `M` but only numerically very close to another column of + `M`. + + We chose our default threshold because it is in wide use. Other thresholds + are possible. For example, elsewhere in the 2007 edition of *Numerical + recipes* there is an alternative threshold of ``S.max() * + np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe + this threshold as being based on "expected roundoff error" (p 71). + + The thresholds above deal with floating point roundoff error in the + calculation of the SVD. However, you may have more information about the + sources of error in `M` that would make you consider other tolerance values + to detect *effective* rank deficiency. The most useful measure of the + tolerance depends on the operations you intend to use on your matrix. For + example, if your data come from uncertain measurements with uncertainties + greater than floating point epsilon, choosing a tolerance near that + uncertainty may be preferable. The tolerance may be absolute if the + uncertainties are absolute rather than relative. + + References + ---------- + .. [1] MATLAB reference documention, "Rank" + https://www.mathworks.com/help/techdoc/ref/rank.html + .. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery, + "Numerical Recipes (3rd edition)", Cambridge University Press, 2007, + page 795. + + Examples + -------- + >>> from numpy.linalg import matrix_rank + >>> matrix_rank(np.eye(4)) # Full rank matrix + 4 + >>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix + >>> matrix_rank(I) + 3 + >>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0 + 1 + >>> matrix_rank(np.zeros((4,))) + 0 + """ + M = asarray(M) + if M.ndim < 2: + return int(not all(M==0)) + if hermitian: + S = abs(eigvalsh(M)) + else: + S = svd(M, compute_uv=False) + if tol is None: + tol = S.max(axis=-1, keepdims=True) * max(M.shape[-2:]) * finfo(S.dtype).eps + else: + tol = asarray(tol)[..., newaxis] + return count_nonzero(S > tol, axis=-1) + + +# Generalized inverse + +def _pinv_dispatcher(a, rcond=None): + return (a,) + + +@array_function_dispatch(_pinv_dispatcher) +def pinv(a, rcond=1e-15): + """ + Compute the (Moore-Penrose) pseudo-inverse of a matrix. + + Calculate the generalized inverse of a matrix using its + singular-value decomposition (SVD) and including all + *large* singular values. + + .. versionchanged:: 1.14 + Can now operate on stacks of matrices + + Parameters + ---------- + a : (..., M, N) array_like + Matrix or stack of matrices to be pseudo-inverted. + rcond : (...) array_like of float + Cutoff for small singular values. + Singular values smaller (in modulus) than + `rcond` * largest_singular_value (again, in modulus) + are set to zero. Broadcasts against the stack of matrices + + Returns + ------- + B : (..., N, M) ndarray + The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so + is `B`. + + Raises + ------ + LinAlgError + If the SVD computation does not converge. + + Notes + ----- + The pseudo-inverse of a matrix A, denoted :math:`A^+`, is + defined as: "the matrix that 'solves' [the least-squares problem] + :math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then + :math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`. + + It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular + value decomposition of A, then + :math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are + orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting + of A's so-called singular values, (followed, typically, by + zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix + consisting of the reciprocals of A's singular values + (again, followed by zeros). [1]_ + + References + ---------- + .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, + FL, Academic Press, Inc., 1980, pp. 139-142. + + Examples + -------- + The following example checks that ``a * a+ * a == a`` and + ``a+ * a * a+ == a+``: + + >>> a = np.random.randn(9, 6) + >>> B = np.linalg.pinv(a) + >>> np.allclose(a, np.dot(a, np.dot(B, a))) + True + >>> np.allclose(B, np.dot(B, np.dot(a, B))) + True + + """ + a, wrap = _makearray(a) + rcond = asarray(rcond) + if _isEmpty2d(a): + m, n = a.shape[-2:] + res = empty(a.shape[:-2] + (n, m), dtype=a.dtype) + return wrap(res) + a = a.conjugate() + u, s, vt = svd(a, full_matrices=False) + + # discard small singular values + cutoff = rcond[..., newaxis] * amax(s, axis=-1, keepdims=True) + large = s > cutoff + s = divide(1, s, where=large, out=s) + s[~large] = 0 + + res = matmul(transpose(vt), multiply(s[..., newaxis], transpose(u))) + return wrap(res) + + +# Determinant + + +@array_function_dispatch(_unary_dispatcher) +def slogdet(a): + """ + Compute the sign and (natural) logarithm of the determinant of an array. + + If an array has a very small or very large determinant, then a call to + `det` may overflow or underflow. This routine is more robust against such + issues, because it computes the logarithm of the determinant rather than + the determinant itself. + + Parameters + ---------- + a : (..., M, M) array_like + Input array, has to be a square 2-D array. + + Returns + ------- + sign : (...) array_like + A number representing the sign of the determinant. For a real matrix, + this is 1, 0, or -1. For a complex matrix, this is a complex number + with absolute value 1 (i.e., it is on the unit circle), or else 0. + logdet : (...) array_like + The natural log of the absolute value of the determinant. + + If the determinant is zero, then `sign` will be 0 and `logdet` will be + -Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``. + + See Also + -------- + det + + Notes + ----- + + .. versionadded:: 1.8.0 + + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + .. versionadded:: 1.6.0 + + The determinant is computed via LU factorization using the LAPACK + routine z/dgetrf. + + + Examples + -------- + The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``: + + >>> a = np.array([[1, 2], [3, 4]]) + >>> (sign, logdet) = np.linalg.slogdet(a) + >>> (sign, logdet) + (-1, 0.69314718055994529) + >>> sign * np.exp(logdet) + -2.0 + + Computing log-determinants for a stack of matrices: + + >>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ]) + >>> a.shape + (3, 2, 2) + >>> sign, logdet = np.linalg.slogdet(a) + >>> (sign, logdet) + (array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154])) + >>> sign * np.exp(logdet) + array([-2., -3., -8.]) + + This routine succeeds where ordinary `det` does not: + + >>> np.linalg.det(np.eye(500) * 0.1) + 0.0 + >>> np.linalg.slogdet(np.eye(500) * 0.1) + (1, -1151.2925464970228) + + """ + a = asarray(a) + _assertRankAtLeast2(a) + _assertNdSquareness(a) + t, result_t = _commonType(a) + real_t = _realType(result_t) + signature = 'D->Dd' if isComplexType(t) else 'd->dd' + sign, logdet = _umath_linalg.slogdet(a, signature=signature) + sign = sign.astype(result_t, copy=False) + logdet = logdet.astype(real_t, copy=False) + return sign, logdet + + +@array_function_dispatch(_unary_dispatcher) +def det(a): + """ + Compute the determinant of an array. + + Parameters + ---------- + a : (..., M, M) array_like + Input array to compute determinants for. + + Returns + ------- + det : (...) array_like + Determinant of `a`. + + See Also + -------- + slogdet : Another way to represent the determinant, more suitable + for large matrices where underflow/overflow may occur. + + Notes + ----- + + .. versionadded:: 1.8.0 + + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The determinant is computed via LU factorization using the LAPACK + routine z/dgetrf. + + Examples + -------- + The determinant of a 2-D array [[a, b], [c, d]] is ad - bc: + + >>> a = np.array([[1, 2], [3, 4]]) + >>> np.linalg.det(a) + -2.0 + + Computing determinants for a stack of matrices: + + >>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ]) + >>> a.shape + (3, 2, 2) + >>> np.linalg.det(a) + array([-2., -3., -8.]) + + """ + a = asarray(a) + _assertRankAtLeast2(a) + _assertNdSquareness(a) + t, result_t = _commonType(a) + signature = 'D->D' if isComplexType(t) else 'd->d' + r = _umath_linalg.det(a, signature=signature) + r = r.astype(result_t, copy=False) + return r + + +# Linear Least Squares + +def _lstsq_dispatcher(a, b, rcond=None): + return (a, b) + + +@array_function_dispatch(_lstsq_dispatcher) +def lstsq(a, b, rcond="warn"): + """ + Return the least-squares solution to a linear matrix equation. + + Solves the equation `a x = b` by computing a vector `x` that + minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may + be under-, well-, or over- determined (i.e., the number of + linearly independent rows of `a` can be less than, equal to, or + greater than its number of linearly independent columns). If `a` + is square and of full rank, then `x` (but for round-off error) is + the "exact" solution of the equation. + + Parameters + ---------- + a : (M, N) array_like + "Coefficient" matrix. + b : {(M,), (M, K)} array_like + Ordinate or "dependent variable" values. If `b` is two-dimensional, + the least-squares solution is calculated for each of the `K` columns + of `b`. + rcond : float, optional + Cut-off ratio for small singular values of `a`. + For the purposes of rank determination, singular values are treated + as zero if they are smaller than `rcond` times the largest singular + value of `a`. + + .. versionchanged:: 1.14.0 + If not set, a FutureWarning is given. The previous default + of ``-1`` will use the machine precision as `rcond` parameter, + the new default will use the machine precision times `max(M, N)`. + To silence the warning and use the new default, use ``rcond=None``, + to keep using the old behavior, use ``rcond=-1``. + + Returns + ------- + x : {(N,), (N, K)} ndarray + Least-squares solution. If `b` is two-dimensional, + the solutions are in the `K` columns of `x`. + residuals : {(1,), (K,), (0,)} ndarray + Sums of residuals; squared Euclidean 2-norm for each column in + ``b - a*x``. + If the rank of `a` is < N or M <= N, this is an empty array. + If `b` is 1-dimensional, this is a (1,) shape array. + Otherwise the shape is (K,). + rank : int + Rank of matrix `a`. + s : (min(M, N),) ndarray + Singular values of `a`. + + Raises + ------ + LinAlgError + If computation does not converge. + + Notes + ----- + If `b` is a matrix, then all array results are returned as matrices. + + Examples + -------- + Fit a line, ``y = mx + c``, through some noisy data-points: + + >>> x = np.array([0, 1, 2, 3]) + >>> y = np.array([-1, 0.2, 0.9, 2.1]) + + By examining the coefficients, we see that the line should have a + gradient of roughly 1 and cut the y-axis at, more or less, -1. + + We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]`` + and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`: + + >>> A = np.vstack([x, np.ones(len(x))]).T + >>> A + array([[ 0., 1.], + [ 1., 1.], + [ 2., 1.], + [ 3., 1.]]) + + >>> m, c = np.linalg.lstsq(A, y, rcond=None)[0] + >>> print(m, c) + 1.0 -0.95 + + Plot the data along with the fitted line: + + >>> import matplotlib.pyplot as plt + >>> plt.plot(x, y, 'o', label='Original data', markersize=10) + >>> plt.plot(x, m*x + c, 'r', label='Fitted line') + >>> plt.legend() + >>> plt.show() + + """ + a, _ = _makearray(a) + b, wrap = _makearray(b) + is_1d = b.ndim == 1 + if is_1d: + b = b[:, newaxis] + _assertRank2(a, b) + m, n = a.shape[-2:] + m2, n_rhs = b.shape[-2:] + if m != m2: + raise LinAlgError('Incompatible dimensions') + + t, result_t = _commonType(a, b) + # FIXME: real_t is unused + real_t = _linalgRealType(t) + result_real_t = _realType(result_t) + + # Determine default rcond value + if rcond == "warn": + # 2017-08-19, 1.14.0 + warnings.warn("`rcond` parameter will change to the default of " + "machine precision times ``max(M, N)`` where M and N " + "are the input matrix dimensions.\n" + "To use the future default and silence this warning " + "we advise to pass `rcond=None`, to keep using the old, " + "explicitly pass `rcond=-1`.", + FutureWarning, stacklevel=2) + rcond = -1 + if rcond is None: + rcond = finfo(t).eps * max(n, m) + + if m <= n: + gufunc = _umath_linalg.lstsq_m + else: + gufunc = _umath_linalg.lstsq_n + + signature = 'DDd->Ddid' if isComplexType(t) else 'ddd->ddid' + extobj = get_linalg_error_extobj(_raise_linalgerror_lstsq) + if n_rhs == 0: + # lapack can't handle n_rhs = 0 - so allocate the array one larger in that axis + b = zeros(b.shape[:-2] + (m, n_rhs + 1), dtype=b.dtype) + x, resids, rank, s = gufunc(a, b, rcond, signature=signature, extobj=extobj) + if m == 0: + x[...] = 0 + if n_rhs == 0: + # remove the item we added + x = x[..., :n_rhs] + resids = resids[..., :n_rhs] + + # remove the axis we added + if is_1d: + x = x.squeeze(axis=-1) + # we probably should squeeze resids too, but we can't + # without breaking compatibility. + + # as documented + if rank != n or m <= n: + resids = array([], result_real_t) + + # coerce output arrays + s = s.astype(result_real_t, copy=False) + resids = resids.astype(result_real_t, copy=False) + x = x.astype(result_t, copy=True) # Copying lets the memory in r_parts be freed + return wrap(x), wrap(resids), rank, s + + +def _multi_svd_norm(x, row_axis, col_axis, op): + """Compute a function of the singular values of the 2-D matrices in `x`. + + This is a private utility function used by numpy.linalg.norm(). + + Parameters + ---------- + x : ndarray + row_axis, col_axis : int + The axes of `x` that hold the 2-D matrices. + op : callable + This should be either numpy.amin or numpy.amax or numpy.sum. + + Returns + ------- + result : float or ndarray + If `x` is 2-D, the return values is a float. + Otherwise, it is an array with ``x.ndim - 2`` dimensions. + The return values are either the minimum or maximum or sum of the + singular values of the matrices, depending on whether `op` + is `numpy.amin` or `numpy.amax` or `numpy.sum`. + + """ + y = moveaxis(x, (row_axis, col_axis), (-2, -1)) + result = op(svd(y, compute_uv=0), axis=-1) + return result + + +def _norm_dispatcher(x, ord=None, axis=None, keepdims=None): + return (x,) + + +@array_function_dispatch(_norm_dispatcher) +def norm(x, ord=None, axis=None, keepdims=False): + """ + Matrix or vector norm. + + This function is able to return one of eight different matrix norms, + or one of an infinite number of vector norms (described below), depending + on the value of the ``ord`` parameter. + + Parameters + ---------- + x : array_like + Input array. If `axis` is None, `x` must be 1-D or 2-D. + ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional + Order of the norm (see table under ``Notes``). inf means numpy's + `inf` object. + axis : {int, 2-tuple of ints, None}, optional + If `axis` is an integer, it specifies the axis of `x` along which to + compute the vector norms. If `axis` is a 2-tuple, it specifies the + axes that hold 2-D matrices, and the matrix norms of these matrices + are computed. If `axis` is None then either a vector norm (when `x` + is 1-D) or a matrix norm (when `x` is 2-D) is returned. + + .. versionadded:: 1.8.0 + + keepdims : bool, optional + If this is set to True, the axes which are normed over are left in the + result as dimensions with size one. With this option the result will + broadcast correctly against the original `x`. + + .. versionadded:: 1.10.0 + + Returns + ------- + n : float or ndarray + Norm of the matrix or vector(s). + + Notes + ----- + For values of ``ord <= 0``, the result is, strictly speaking, not a + mathematical 'norm', but it may still be useful for various numerical + purposes. + + The following norms can be calculated: + + ===== ============================ ========================== + ord norm for matrices norm for vectors + ===== ============================ ========================== + None Frobenius norm 2-norm + 'fro' Frobenius norm -- + 'nuc' nuclear norm -- + inf max(sum(abs(x), axis=1)) max(abs(x)) + -inf min(sum(abs(x), axis=1)) min(abs(x)) + 0 -- sum(x != 0) + 1 max(sum(abs(x), axis=0)) as below + -1 min(sum(abs(x), axis=0)) as below + 2 2-norm (largest sing. value) as below + -2 smallest singular value as below + other -- sum(abs(x)**ord)**(1./ord) + ===== ============================ ========================== + + The Frobenius norm is given by [1]_: + + :math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}` + + The nuclear norm is the sum of the singular values. + + References + ---------- + .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*, + Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15 + + Examples + -------- + >>> from numpy import linalg as LA + >>> a = np.arange(9) - 4 + >>> a + array([-4, -3, -2, -1, 0, 1, 2, 3, 4]) + >>> b = a.reshape((3, 3)) + >>> b + array([[-4, -3, -2], + [-1, 0, 1], + [ 2, 3, 4]]) + + >>> LA.norm(a) + 7.745966692414834 + >>> LA.norm(b) + 7.745966692414834 + >>> LA.norm(b, 'fro') + 7.745966692414834 + >>> LA.norm(a, np.inf) + 4.0 + >>> LA.norm(b, np.inf) + 9.0 + >>> LA.norm(a, -np.inf) + 0.0 + >>> LA.norm(b, -np.inf) + 2.0 + + >>> LA.norm(a, 1) + 20.0 + >>> LA.norm(b, 1) + 7.0 + >>> LA.norm(a, -1) + -4.6566128774142013e-010 + >>> LA.norm(b, -1) + 6.0 + >>> LA.norm(a, 2) + 7.745966692414834 + >>> LA.norm(b, 2) + 7.3484692283495345 + + >>> LA.norm(a, -2) + nan + >>> LA.norm(b, -2) + 1.8570331885190563e-016 + >>> LA.norm(a, 3) + 5.8480354764257312 + >>> LA.norm(a, -3) + nan + + Using the `axis` argument to compute vector norms: + + >>> c = np.array([[ 1, 2, 3], + ... [-1, 1, 4]]) + >>> LA.norm(c, axis=0) + array([ 1.41421356, 2.23606798, 5. ]) + >>> LA.norm(c, axis=1) + array([ 3.74165739, 4.24264069]) + >>> LA.norm(c, ord=1, axis=1) + array([ 6., 6.]) + + Using the `axis` argument to compute matrix norms: + + >>> m = np.arange(8).reshape(2,2,2) + >>> LA.norm(m, axis=(1,2)) + array([ 3.74165739, 11.22497216]) + >>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :]) + (3.7416573867739413, 11.224972160321824) + + """ + x = asarray(x) + + if not issubclass(x.dtype.type, (inexact, object_)): + x = x.astype(float) + + # Immediately handle some default, simple, fast, and common cases. + if axis is None: + ndim = x.ndim + if ((ord is None) or + (ord in ('f', 'fro') and ndim == 2) or + (ord == 2 and ndim == 1)): + + x = x.ravel(order='K') + if isComplexType(x.dtype.type): + sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag) + else: + sqnorm = dot(x, x) + ret = sqrt(sqnorm) + if keepdims: + ret = ret.reshape(ndim*[1]) + return ret + + # Normalize the `axis` argument to a tuple. + nd = x.ndim + if axis is None: + axis = tuple(range(nd)) + elif not isinstance(axis, tuple): + try: + axis = int(axis) + except Exception: + raise TypeError("'axis' must be None, an integer or a tuple of integers") + axis = (axis,) + + if len(axis) == 1: + if ord == Inf: + return abs(x).max(axis=axis, keepdims=keepdims) + elif ord == -Inf: + return abs(x).min(axis=axis, keepdims=keepdims) + elif ord == 0: + # Zero norm + return (x != 0).astype(x.real.dtype).sum(axis=axis, keepdims=keepdims) + elif ord == 1: + # special case for speedup + return add.reduce(abs(x), axis=axis, keepdims=keepdims) + elif ord is None or ord == 2: + # special case for speedup + s = (x.conj() * x).real + return sqrt(add.reduce(s, axis=axis, keepdims=keepdims)) + else: + try: + ord + 1 + except TypeError: + raise ValueError("Invalid norm order for vectors.") + absx = abs(x) + absx **= ord + ret = add.reduce(absx, axis=axis, keepdims=keepdims) + ret **= (1 / ord) + return ret + elif len(axis) == 2: + row_axis, col_axis = axis + row_axis = normalize_axis_index(row_axis, nd) + col_axis = normalize_axis_index(col_axis, nd) + if row_axis == col_axis: + raise ValueError('Duplicate axes given.') + if ord == 2: + ret = _multi_svd_norm(x, row_axis, col_axis, amax) + elif ord == -2: + ret = _multi_svd_norm(x, row_axis, col_axis, amin) + elif ord == 1: + if col_axis > row_axis: + col_axis -= 1 + ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis) + elif ord == Inf: + if row_axis > col_axis: + row_axis -= 1 + ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis) + elif ord == -1: + if col_axis > row_axis: + col_axis -= 1 + ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis) + elif ord == -Inf: + if row_axis > col_axis: + row_axis -= 1 + ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis) + elif ord in [None, 'fro', 'f']: + ret = sqrt(add.reduce((x.conj() * x).real, axis=axis)) + elif ord == 'nuc': + ret = _multi_svd_norm(x, row_axis, col_axis, sum) + else: + raise ValueError("Invalid norm order for matrices.") + if keepdims: + ret_shape = list(x.shape) + ret_shape[axis[0]] = 1 + ret_shape[axis[1]] = 1 + ret = ret.reshape(ret_shape) + return ret + else: + raise ValueError("Improper number of dimensions to norm.") + + +# multi_dot + +def _multidot_dispatcher(arrays): + return arrays + + +@array_function_dispatch(_multidot_dispatcher) +def multi_dot(arrays): + """ + Compute the dot product of two or more arrays in a single function call, + while automatically selecting the fastest evaluation order. + + `multi_dot` chains `numpy.dot` and uses optimal parenthesization + of the matrices [1]_ [2]_. Depending on the shapes of the matrices, + this can speed up the multiplication a lot. + + If the first argument is 1-D it is treated as a row vector. + If the last argument is 1-D it is treated as a column vector. + The other arguments must be 2-D. + + Think of `multi_dot` as:: + + def multi_dot(arrays): return functools.reduce(np.dot, arrays) + + + Parameters + ---------- + arrays : sequence of array_like + If the first argument is 1-D it is treated as row vector. + If the last argument is 1-D it is treated as column vector. + The other arguments must be 2-D. + + Returns + ------- + output : ndarray + Returns the dot product of the supplied arrays. + + See Also + -------- + dot : dot multiplication with two arguments. + + References + ---------- + + .. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378 + .. [2] https://en.wikipedia.org/wiki/Matrix_chain_multiplication + + Examples + -------- + `multi_dot` allows you to write:: + + >>> from numpy.linalg import multi_dot + >>> # Prepare some data + >>> A = np.random.random(10000, 100) + >>> B = np.random.random(100, 1000) + >>> C = np.random.random(1000, 5) + >>> D = np.random.random(5, 333) + >>> # the actual dot multiplication + >>> multi_dot([A, B, C, D]) + + instead of:: + + >>> np.dot(np.dot(np.dot(A, B), C), D) + >>> # or + >>> A.dot(B).dot(C).dot(D) + + Notes + ----- + The cost for a matrix multiplication can be calculated with the + following function:: + + def cost(A, B): + return A.shape[0] * A.shape[1] * B.shape[1] + + Let's assume we have three matrices + :math:`A_{10x100}, B_{100x5}, C_{5x50}`. + + The costs for the two different parenthesizations are as follows:: + + cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500 + cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000 + + """ + n = len(arrays) + # optimization only makes sense for len(arrays) > 2 + if n < 2: + raise ValueError("Expecting at least two arrays.") + elif n == 2: + return dot(arrays[0], arrays[1]) + + arrays = [asanyarray(a) for a in arrays] + + # save original ndim to reshape the result array into the proper form later + ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim + # Explicitly convert vectors to 2D arrays to keep the logic of the internal + # _multi_dot_* functions as simple as possible. + if arrays[0].ndim == 1: + arrays[0] = atleast_2d(arrays[0]) + if arrays[-1].ndim == 1: + arrays[-1] = atleast_2d(arrays[-1]).T + _assertRank2(*arrays) + + # _multi_dot_three is much faster than _multi_dot_matrix_chain_order + if n == 3: + result = _multi_dot_three(arrays[0], arrays[1], arrays[2]) + else: + order = _multi_dot_matrix_chain_order(arrays) + result = _multi_dot(arrays, order, 0, n - 1) + + # return proper shape + if ndim_first == 1 and ndim_last == 1: + return result[0, 0] # scalar + elif ndim_first == 1 or ndim_last == 1: + return result.ravel() # 1-D + else: + return result + + +def _multi_dot_three(A, B, C): + """ + Find the best order for three arrays and do the multiplication. + + For three arguments `_multi_dot_three` is approximately 15 times faster + than `_multi_dot_matrix_chain_order` + + """ + a0, a1b0 = A.shape + b1c0, c1 = C.shape + # cost1 = cost((AB)C) = a0*a1b0*b1c0 + a0*b1c0*c1 + cost1 = a0 * b1c0 * (a1b0 + c1) + # cost2 = cost(A(BC)) = a1b0*b1c0*c1 + a0*a1b0*c1 + cost2 = a1b0 * c1 * (a0 + b1c0) + + if cost1 < cost2: + return dot(dot(A, B), C) + else: + return dot(A, dot(B, C)) + + +def _multi_dot_matrix_chain_order(arrays, return_costs=False): + """ + Return a np.array that encodes the optimal order of mutiplications. + + The optimal order array is then used by `_multi_dot()` to do the + multiplication. + + Also return the cost matrix if `return_costs` is `True` + + The implementation CLOSELY follows Cormen, "Introduction to Algorithms", + Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices. + + cost[i, j] = min([ + cost[prefix] + cost[suffix] + cost_mult(prefix, suffix) + for k in range(i, j)]) + + """ + n = len(arrays) + # p stores the dimensions of the matrices + # Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50] + p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]] + # m is a matrix of costs of the subproblems + # m[i,j]: min number of scalar multiplications needed to compute A_{i..j} + m = zeros((n, n), dtype=double) + # s is the actual ordering + # s[i, j] is the value of k at which we split the product A_i..A_j + s = empty((n, n), dtype=intp) + + for l in range(1, n): + for i in range(n - l): + j = i + l + m[i, j] = Inf + for k in range(i, j): + q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1] + if q < m[i, j]: + m[i, j] = q + s[i, j] = k # Note that Cormen uses 1-based index + + return (s, m) if return_costs else s + + +def _multi_dot(arrays, order, i, j): + """Actually do the multiplication with the given order.""" + if i == j: + return arrays[i] + else: + return dot(_multi_dot(arrays, order, i, order[i, j]), + _multi_dot(arrays, order, order[i, j] + 1, j)) diff --git a/project/venv/lib/python2.7/site-packages/numpy/linalg/linalg.pyc b/project/venv/lib/python2.7/site-packages/numpy/linalg/linalg.pyc new file mode 100644 index 0000000..6492d0c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/linalg/linalg.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/linalg/setup.py b/project/venv/lib/python2.7/site-packages/numpy/linalg/setup.py new file mode 100644 index 0000000..66c07c9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/linalg/setup.py @@ -0,0 +1,60 @@ +from __future__ import division, print_function + +import os +import sys + +def configuration(parent_package='', top_path=None): + from numpy.distutils.misc_util import Configuration + from numpy.distutils.system_info import get_info + config = Configuration('linalg', parent_package, top_path) + + config.add_data_dir('tests') + + # Configure lapack_lite + + src_dir = 'lapack_lite' + lapack_lite_src = [ + os.path.join(src_dir, 'python_xerbla.c'), + os.path.join(src_dir, 'f2c_z_lapack.c'), + os.path.join(src_dir, 'f2c_c_lapack.c'), + os.path.join(src_dir, 'f2c_d_lapack.c'), + os.path.join(src_dir, 'f2c_s_lapack.c'), + os.path.join(src_dir, 'f2c_lapack.c'), + os.path.join(src_dir, 'f2c_blas.c'), + os.path.join(src_dir, 'f2c_config.c'), + os.path.join(src_dir, 'f2c.c'), + ] + all_sources = config.paths(lapack_lite_src) + + lapack_info = get_info('lapack_opt', 0) # and {} + + def get_lapack_lite_sources(ext, build_dir): + if not lapack_info: + print("### Warning: Using unoptimized lapack ###") + return all_sources + else: + if sys.platform == 'win32': + print("### Warning: python_xerbla.c is disabled ###") + return [] + return [all_sources[0]] + + config.add_extension( + 'lapack_lite', + sources=['lapack_litemodule.c', get_lapack_lite_sources], + depends=['lapack_lite/f2c.h'], + extra_info=lapack_info, + ) + + # umath_linalg module + config.add_extension( + '_umath_linalg', + sources=['umath_linalg.c.src', get_lapack_lite_sources], + depends=['lapack_lite/f2c.h'], + extra_info=lapack_info, + libraries=['npymath'], + ) + return config + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(configuration=configuration) diff --git a/project/venv/lib/python2.7/site-packages/numpy/linalg/setup.pyc b/project/venv/lib/python2.7/site-packages/numpy/linalg/setup.pyc new file mode 100644 index 0000000..842e693 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/linalg/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/linalg/tests/__init__.py b/project/venv/lib/python2.7/site-packages/numpy/linalg/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/numpy/linalg/tests/__init__.pyc b/project/venv/lib/python2.7/site-packages/numpy/linalg/tests/__init__.pyc new file mode 100644 index 0000000..6cf0a6a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/linalg/tests/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/linalg/tests/test_build.py b/project/venv/lib/python2.7/site-packages/numpy/linalg/tests/test_build.py new file mode 100644 index 0000000..921390d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/linalg/tests/test_build.py @@ -0,0 +1,55 @@ +from __future__ import division, absolute_import, print_function + +from subprocess import PIPE, Popen +import sys +import re +import pytest + +from numpy.linalg import lapack_lite +from numpy.testing import assert_ + + +class FindDependenciesLdd(object): + + def __init__(self): + self.cmd = ['ldd'] + + try: + p = Popen(self.cmd, stdout=PIPE, stderr=PIPE) + stdout, stderr = p.communicate() + except OSError: + raise RuntimeError("command %s cannot be run" % self.cmd) + + def get_dependencies(self, lfile): + p = Popen(self.cmd + [lfile], stdout=PIPE, stderr=PIPE) + stdout, stderr = p.communicate() + if not (p.returncode == 0): + raise RuntimeError("failed dependencies check for %s" % lfile) + + return stdout + + def grep_dependencies(self, lfile, deps): + stdout = self.get_dependencies(lfile) + + rdeps = dict([(dep, re.compile(dep)) for dep in deps]) + founds = [] + for l in stdout.splitlines(): + for k, v in rdeps.items(): + if v.search(l): + founds.append(k) + + return founds + + +class TestF77Mismatch(object): + + @pytest.mark.skipif(not(sys.platform[:5] == 'linux'), + reason="no fortran compiler on non-Linux platform") + def test_lapack(self): + f = FindDependenciesLdd() + deps = f.grep_dependencies(lapack_lite.__file__, + [b'libg2c', b'libgfortran']) + assert_(len(deps) <= 1, + """Both g77 and gfortran runtimes linked in lapack_lite ! This is likely to +cause random crashes and wrong results. See numpy INSTALL.txt for more +information.""") diff --git a/project/venv/lib/python2.7/site-packages/numpy/linalg/tests/test_build.pyc b/project/venv/lib/python2.7/site-packages/numpy/linalg/tests/test_build.pyc new file mode 100644 index 0000000..a03d12f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/linalg/tests/test_build.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/linalg/tests/test_deprecations.py b/project/venv/lib/python2.7/site-packages/numpy/linalg/tests/test_deprecations.py new file mode 100644 index 0000000..e12755e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/linalg/tests/test_deprecations.py @@ -0,0 +1,22 @@ +"""Test deprecation and future warnings. + +""" +from __future__ import division, absolute_import, print_function + +import numpy as np +from numpy.testing import assert_warns + + +def test_qr_mode_full_future_warning(): + """Check mode='full' FutureWarning. + + In numpy 1.8 the mode options 'full' and 'economic' in linalg.qr were + deprecated. The release date will probably be sometime in the summer + of 2013. + + """ + a = np.eye(2) + assert_warns(DeprecationWarning, np.linalg.qr, a, mode='full') + assert_warns(DeprecationWarning, np.linalg.qr, a, mode='f') + assert_warns(DeprecationWarning, np.linalg.qr, a, mode='economic') + assert_warns(DeprecationWarning, np.linalg.qr, a, mode='e') diff --git a/project/venv/lib/python2.7/site-packages/numpy/linalg/tests/test_deprecations.pyc b/project/venv/lib/python2.7/site-packages/numpy/linalg/tests/test_deprecations.pyc new file mode 100644 index 0000000..09b6cbd Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/linalg/tests/test_deprecations.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/linalg/tests/test_linalg.py b/project/venv/lib/python2.7/site-packages/numpy/linalg/tests/test_linalg.py new file mode 100644 index 0000000..235488c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/linalg/tests/test_linalg.py @@ -0,0 +1,1964 @@ +""" Test functions for linalg module + +""" +from __future__ import division, absolute_import, print_function + +import os +import sys +import itertools +import traceback +import textwrap +import subprocess +import pytest + +import numpy as np +from numpy import array, single, double, csingle, cdouble, dot, identity, matmul +from numpy import multiply, atleast_2d, inf, asarray +from numpy import linalg +from numpy.linalg import matrix_power, norm, matrix_rank, multi_dot, LinAlgError +from numpy.linalg.linalg import _multi_dot_matrix_chain_order +from numpy.testing import ( + assert_, assert_equal, assert_raises, assert_array_equal, + assert_almost_equal, assert_allclose, suppress_warnings, + assert_raises_regex, + ) + + +def consistent_subclass(out, in_): + # For ndarray subclass input, our output should have the same subclass + # (non-ndarray input gets converted to ndarray). + return type(out) is (type(in_) if isinstance(in_, np.ndarray) + else np.ndarray) + + +old_assert_almost_equal = assert_almost_equal + + +def assert_almost_equal(a, b, single_decimal=6, double_decimal=12, **kw): + if asarray(a).dtype.type in (single, csingle): + decimal = single_decimal + else: + decimal = double_decimal + old_assert_almost_equal(a, b, decimal=decimal, **kw) + + +def get_real_dtype(dtype): + return {single: single, double: double, + csingle: single, cdouble: double}[dtype] + + +def get_complex_dtype(dtype): + return {single: csingle, double: cdouble, + csingle: csingle, cdouble: cdouble}[dtype] + + +def get_rtol(dtype): + # Choose a safe rtol + if dtype in (single, csingle): + return 1e-5 + else: + return 1e-11 + + +# used to categorize tests +all_tags = { + 'square', 'nonsquare', 'hermitian', # mutually exclusive + 'generalized', 'size-0', 'strided' # optional additions +} + + +class LinalgCase(object): + def __init__(self, name, a, b, tags=set()): + """ + A bundle of arguments to be passed to a test case, with an identifying + name, the operands a and b, and a set of tags to filter the tests + """ + assert_(isinstance(name, str)) + self.name = name + self.a = a + self.b = b + self.tags = frozenset(tags) # prevent shared tags + + def check(self, do): + """ + Run the function `do` on this test case, expanding arguments + """ + do(self.a, self.b, tags=self.tags) + + def __repr__(self): + return "" % (self.name,) + + +def apply_tag(tag, cases): + """ + Add the given tag (a string) to each of the cases (a list of LinalgCase + objects) + """ + assert tag in all_tags, "Invalid tag" + for case in cases: + case.tags = case.tags | {tag} + return cases + + +# +# Base test cases +# + +np.random.seed(1234) + +CASES = [] + +# square test cases +CASES += apply_tag('square', [ + LinalgCase("single", + array([[1., 2.], [3., 4.]], dtype=single), + array([2., 1.], dtype=single)), + LinalgCase("double", + array([[1., 2.], [3., 4.]], dtype=double), + array([2., 1.], dtype=double)), + LinalgCase("double_2", + array([[1., 2.], [3., 4.]], dtype=double), + array([[2., 1., 4.], [3., 4., 6.]], dtype=double)), + LinalgCase("csingle", + array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=csingle), + array([2. + 1j, 1. + 2j], dtype=csingle)), + LinalgCase("cdouble", + array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble), + array([2. + 1j, 1. + 2j], dtype=cdouble)), + LinalgCase("cdouble_2", + array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble), + array([[2. + 1j, 1. + 2j, 1 + 3j], [1 - 2j, 1 - 3j, 1 - 6j]], dtype=cdouble)), + LinalgCase("0x0", + np.empty((0, 0), dtype=double), + np.empty((0,), dtype=double), + tags={'size-0'}), + LinalgCase("8x8", + np.random.rand(8, 8), + np.random.rand(8)), + LinalgCase("1x1", + np.random.rand(1, 1), + np.random.rand(1)), + LinalgCase("nonarray", + [[1, 2], [3, 4]], + [2, 1]), +]) + +# non-square test-cases +CASES += apply_tag('nonsquare', [ + LinalgCase("single_nsq_1", + array([[1., 2., 3.], [3., 4., 6.]], dtype=single), + array([2., 1.], dtype=single)), + LinalgCase("single_nsq_2", + array([[1., 2.], [3., 4.], [5., 6.]], dtype=single), + array([2., 1., 3.], dtype=single)), + LinalgCase("double_nsq_1", + array([[1., 2., 3.], [3., 4., 6.]], dtype=double), + array([2., 1.], dtype=double)), + LinalgCase("double_nsq_2", + array([[1., 2.], [3., 4.], [5., 6.]], dtype=double), + array([2., 1., 3.], dtype=double)), + LinalgCase("csingle_nsq_1", + array( + [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=csingle), + array([2. + 1j, 1. + 2j], dtype=csingle)), + LinalgCase("csingle_nsq_2", + array( + [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=csingle), + array([2. + 1j, 1. + 2j, 3. - 3j], dtype=csingle)), + LinalgCase("cdouble_nsq_1", + array( + [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble), + array([2. + 1j, 1. + 2j], dtype=cdouble)), + LinalgCase("cdouble_nsq_2", + array( + [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble), + array([2. + 1j, 1. + 2j, 3. - 3j], dtype=cdouble)), + LinalgCase("cdouble_nsq_1_2", + array( + [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble), + array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)), + LinalgCase("cdouble_nsq_2_2", + array( + [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble), + array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)), + LinalgCase("8x11", + np.random.rand(8, 11), + np.random.rand(8)), + LinalgCase("1x5", + np.random.rand(1, 5), + np.random.rand(1)), + LinalgCase("5x1", + np.random.rand(5, 1), + np.random.rand(5)), + LinalgCase("0x4", + np.random.rand(0, 4), + np.random.rand(0), + tags={'size-0'}), + LinalgCase("4x0", + np.random.rand(4, 0), + np.random.rand(4), + tags={'size-0'}), +]) + +# hermitian test-cases +CASES += apply_tag('hermitian', [ + LinalgCase("hsingle", + array([[1., 2.], [2., 1.]], dtype=single), + None), + LinalgCase("hdouble", + array([[1., 2.], [2., 1.]], dtype=double), + None), + LinalgCase("hcsingle", + array([[1., 2 + 3j], [2 - 3j, 1]], dtype=csingle), + None), + LinalgCase("hcdouble", + array([[1., 2 + 3j], [2 - 3j, 1]], dtype=cdouble), + None), + LinalgCase("hempty", + np.empty((0, 0), dtype=double), + None, + tags={'size-0'}), + LinalgCase("hnonarray", + [[1, 2], [2, 1]], + None), + LinalgCase("matrix_b_only", + array([[1., 2.], [2., 1.]]), + None), + LinalgCase("hmatrix_1x1", + np.random.rand(1, 1), + None), +]) + + +# +# Gufunc test cases +# +def _make_generalized_cases(): + new_cases = [] + + for case in CASES: + if not isinstance(case.a, np.ndarray): + continue + + a = np.array([case.a, 2 * case.a, 3 * case.a]) + if case.b is None: + b = None + else: + b = np.array([case.b, 7 * case.b, 6 * case.b]) + new_case = LinalgCase(case.name + "_tile3", a, b, + tags=case.tags | {'generalized'}) + new_cases.append(new_case) + + a = np.array([case.a] * 2 * 3).reshape((3, 2) + case.a.shape) + if case.b is None: + b = None + else: + b = np.array([case.b] * 2 * 3).reshape((3, 2) + case.b.shape) + new_case = LinalgCase(case.name + "_tile213", a, b, + tags=case.tags | {'generalized'}) + new_cases.append(new_case) + + return new_cases + + +CASES += _make_generalized_cases() + + +# +# Generate stride combination variations of the above +# +def _stride_comb_iter(x): + """ + Generate cartesian product of strides for all axes + """ + + if not isinstance(x, np.ndarray): + yield x, "nop" + return + + stride_set = [(1,)] * x.ndim + stride_set[-1] = (1, 3, -4) + if x.ndim > 1: + stride_set[-2] = (1, 3, -4) + if x.ndim > 2: + stride_set[-3] = (1, -4) + + for repeats in itertools.product(*tuple(stride_set)): + new_shape = [abs(a * b) for a, b in zip(x.shape, repeats)] + slices = tuple([slice(None, None, repeat) for repeat in repeats]) + + # new array with different strides, but same data + xi = np.empty(new_shape, dtype=x.dtype) + xi.view(np.uint32).fill(0xdeadbeef) + xi = xi[slices] + xi[...] = x + xi = xi.view(x.__class__) + assert_(np.all(xi == x)) + yield xi, "stride_" + "_".join(["%+d" % j for j in repeats]) + + # generate also zero strides if possible + if x.ndim >= 1 and x.shape[-1] == 1: + s = list(x.strides) + s[-1] = 0 + xi = np.lib.stride_tricks.as_strided(x, strides=s) + yield xi, "stride_xxx_0" + if x.ndim >= 2 and x.shape[-2] == 1: + s = list(x.strides) + s[-2] = 0 + xi = np.lib.stride_tricks.as_strided(x, strides=s) + yield xi, "stride_xxx_0_x" + if x.ndim >= 2 and x.shape[:-2] == (1, 1): + s = list(x.strides) + s[-1] = 0 + s[-2] = 0 + xi = np.lib.stride_tricks.as_strided(x, strides=s) + yield xi, "stride_xxx_0_0" + + +def _make_strided_cases(): + new_cases = [] + for case in CASES: + for a, a_label in _stride_comb_iter(case.a): + for b, b_label in _stride_comb_iter(case.b): + new_case = LinalgCase(case.name + "_" + a_label + "_" + b_label, a, b, + tags=case.tags | {'strided'}) + new_cases.append(new_case) + return new_cases + + +CASES += _make_strided_cases() + + +# +# Test different routines against the above cases +# +class LinalgTestCase(object): + TEST_CASES = CASES + + def check_cases(self, require=set(), exclude=set()): + """ + Run func on each of the cases with all of the tags in require, and none + of the tags in exclude + """ + for case in self.TEST_CASES: + # filter by require and exclude + if case.tags & require != require: + continue + if case.tags & exclude: + continue + + try: + case.check(self.do) + except Exception: + msg = "In test case: %r\n\n" % case + msg += traceback.format_exc() + raise AssertionError(msg) + + +class LinalgSquareTestCase(LinalgTestCase): + + def test_sq_cases(self): + self.check_cases(require={'square'}, + exclude={'generalized', 'size-0'}) + + def test_empty_sq_cases(self): + self.check_cases(require={'square', 'size-0'}, + exclude={'generalized'}) + + +class LinalgNonsquareTestCase(LinalgTestCase): + + def test_nonsq_cases(self): + self.check_cases(require={'nonsquare'}, + exclude={'generalized', 'size-0'}) + + def test_empty_nonsq_cases(self): + self.check_cases(require={'nonsquare', 'size-0'}, + exclude={'generalized'}) + + +class HermitianTestCase(LinalgTestCase): + + def test_herm_cases(self): + self.check_cases(require={'hermitian'}, + exclude={'generalized', 'size-0'}) + + def test_empty_herm_cases(self): + self.check_cases(require={'hermitian', 'size-0'}, + exclude={'generalized'}) + + +class LinalgGeneralizedSquareTestCase(LinalgTestCase): + + @pytest.mark.slow + def test_generalized_sq_cases(self): + self.check_cases(require={'generalized', 'square'}, + exclude={'size-0'}) + + @pytest.mark.slow + def test_generalized_empty_sq_cases(self): + self.check_cases(require={'generalized', 'square', 'size-0'}) + + +class LinalgGeneralizedNonsquareTestCase(LinalgTestCase): + + @pytest.mark.slow + def test_generalized_nonsq_cases(self): + self.check_cases(require={'generalized', 'nonsquare'}, + exclude={'size-0'}) + + @pytest.mark.slow + def test_generalized_empty_nonsq_cases(self): + self.check_cases(require={'generalized', 'nonsquare', 'size-0'}) + + +class HermitianGeneralizedTestCase(LinalgTestCase): + + @pytest.mark.slow + def test_generalized_herm_cases(self): + self.check_cases(require={'generalized', 'hermitian'}, + exclude={'size-0'}) + + @pytest.mark.slow + def test_generalized_empty_herm_cases(self): + self.check_cases(require={'generalized', 'hermitian', 'size-0'}, + exclude={'none'}) + + +def dot_generalized(a, b): + a = asarray(a) + if a.ndim >= 3: + if a.ndim == b.ndim: + # matrix x matrix + new_shape = a.shape[:-1] + b.shape[-1:] + elif a.ndim == b.ndim + 1: + # matrix x vector + new_shape = a.shape[:-1] + else: + raise ValueError("Not implemented...") + r = np.empty(new_shape, dtype=np.common_type(a, b)) + for c in itertools.product(*map(range, a.shape[:-2])): + r[c] = dot(a[c], b[c]) + return r + else: + return dot(a, b) + + +def identity_like_generalized(a): + a = asarray(a) + if a.ndim >= 3: + r = np.empty(a.shape, dtype=a.dtype) + r[...] = identity(a.shape[-2]) + return r + else: + return identity(a.shape[0]) + + +class SolveCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + # kept apart from TestSolve for use for testing with matrices. + def do(self, a, b, tags): + x = linalg.solve(a, b) + assert_almost_equal(b, dot_generalized(a, x)) + assert_(consistent_subclass(x, b)) + + +class TestSolve(SolveCases): + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + assert_equal(linalg.solve(x, x).dtype, dtype) + + def test_0_size(self): + class ArraySubclass(np.ndarray): + pass + # Test system of 0x0 matrices + a = np.arange(8).reshape(2, 2, 2) + b = np.arange(6).reshape(1, 2, 3).view(ArraySubclass) + + expected = linalg.solve(a, b)[:, 0:0, :] + result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0, :]) + assert_array_equal(result, expected) + assert_(isinstance(result, ArraySubclass)) + + # Test errors for non-square and only b's dimension being 0 + assert_raises(linalg.LinAlgError, linalg.solve, a[:, 0:0, 0:1], b) + assert_raises(ValueError, linalg.solve, a, b[:, 0:0, :]) + + # Test broadcasting error + b = np.arange(6).reshape(1, 3, 2) # broadcasting error + assert_raises(ValueError, linalg.solve, a, b) + assert_raises(ValueError, linalg.solve, a[0:0], b[0:0]) + + # Test zero "single equations" with 0x0 matrices. + b = np.arange(2).reshape(1, 2).view(ArraySubclass) + expected = linalg.solve(a, b)[:, 0:0] + result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0]) + assert_array_equal(result, expected) + assert_(isinstance(result, ArraySubclass)) + + b = np.arange(3).reshape(1, 3) + assert_raises(ValueError, linalg.solve, a, b) + assert_raises(ValueError, linalg.solve, a[0:0], b[0:0]) + assert_raises(ValueError, linalg.solve, a[:, 0:0, 0:0], b) + + def test_0_size_k(self): + # test zero multiple equation (K=0) case. + class ArraySubclass(np.ndarray): + pass + a = np.arange(4).reshape(1, 2, 2) + b = np.arange(6).reshape(3, 2, 1).view(ArraySubclass) + + expected = linalg.solve(a, b)[:, :, 0:0] + result = linalg.solve(a, b[:, :, 0:0]) + assert_array_equal(result, expected) + assert_(isinstance(result, ArraySubclass)) + + # test both zero. + expected = linalg.solve(a, b)[:, 0:0, 0:0] + result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0, 0:0]) + assert_array_equal(result, expected) + assert_(isinstance(result, ArraySubclass)) + + +class InvCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + + def do(self, a, b, tags): + a_inv = linalg.inv(a) + assert_almost_equal(dot_generalized(a, a_inv), + identity_like_generalized(a)) + assert_(consistent_subclass(a_inv, a)) + + +class TestInv(InvCases): + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + assert_equal(linalg.inv(x).dtype, dtype) + + def test_0_size(self): + # Check that all kinds of 0-sized arrays work + class ArraySubclass(np.ndarray): + pass + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) + res = linalg.inv(a) + assert_(res.dtype.type is np.float64) + assert_equal(a.shape, res.shape) + assert_(isinstance(res, ArraySubclass)) + + a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) + res = linalg.inv(a) + assert_(res.dtype.type is np.complex64) + assert_equal(a.shape, res.shape) + assert_(isinstance(res, ArraySubclass)) + + +class EigvalsCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + + def do(self, a, b, tags): + ev = linalg.eigvals(a) + evalues, evectors = linalg.eig(a) + assert_almost_equal(ev, evalues) + + +class TestEigvals(EigvalsCases): + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + assert_equal(linalg.eigvals(x).dtype, dtype) + x = np.array([[1, 0.5], [-1, 1]], dtype=dtype) + assert_equal(linalg.eigvals(x).dtype, get_complex_dtype(dtype)) + + def test_0_size(self): + # Check that all kinds of 0-sized arrays work + class ArraySubclass(np.ndarray): + pass + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) + res = linalg.eigvals(a) + assert_(res.dtype.type is np.float64) + assert_equal((0, 1), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(res, np.ndarray)) + + a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) + res = linalg.eigvals(a) + assert_(res.dtype.type is np.complex64) + assert_equal((0,), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(res, np.ndarray)) + + +class EigCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + + def do(self, a, b, tags): + evalues, evectors = linalg.eig(a) + assert_allclose(dot_generalized(a, evectors), + np.asarray(evectors) * np.asarray(evalues)[..., None, :], + rtol=get_rtol(evalues.dtype)) + assert_(consistent_subclass(evectors, a)) + + +class TestEig(EigCases): + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + w, v = np.linalg.eig(x) + assert_equal(w.dtype, dtype) + assert_equal(v.dtype, dtype) + + x = np.array([[1, 0.5], [-1, 1]], dtype=dtype) + w, v = np.linalg.eig(x) + assert_equal(w.dtype, get_complex_dtype(dtype)) + assert_equal(v.dtype, get_complex_dtype(dtype)) + + def test_0_size(self): + # Check that all kinds of 0-sized arrays work + class ArraySubclass(np.ndarray): + pass + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) + res, res_v = linalg.eig(a) + assert_(res_v.dtype.type is np.float64) + assert_(res.dtype.type is np.float64) + assert_equal(a.shape, res_v.shape) + assert_equal((0, 1), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(a, np.ndarray)) + + a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) + res, res_v = linalg.eig(a) + assert_(res_v.dtype.type is np.complex64) + assert_(res.dtype.type is np.complex64) + assert_equal(a.shape, res_v.shape) + assert_equal((0,), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(a, np.ndarray)) + + +class SVDCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + + def do(self, a, b, tags): + u, s, vt = linalg.svd(a, 0) + assert_allclose(a, dot_generalized(np.asarray(u) * np.asarray(s)[..., None, :], + np.asarray(vt)), + rtol=get_rtol(u.dtype)) + assert_(consistent_subclass(u, a)) + assert_(consistent_subclass(vt, a)) + + +class TestSVD(SVDCases): + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + u, s, vh = linalg.svd(x) + assert_equal(u.dtype, dtype) + assert_equal(s.dtype, get_real_dtype(dtype)) + assert_equal(vh.dtype, dtype) + s = linalg.svd(x, compute_uv=False) + assert_equal(s.dtype, get_real_dtype(dtype)) + + def test_empty_identity(self): + """ Empty input should put an identity matrix in u or vh """ + x = np.empty((4, 0)) + u, s, vh = linalg.svd(x, compute_uv=True) + assert_equal(u.shape, (4, 4)) + assert_equal(vh.shape, (0, 0)) + assert_equal(u, np.eye(4)) + + x = np.empty((0, 4)) + u, s, vh = linalg.svd(x, compute_uv=True) + assert_equal(u.shape, (0, 0)) + assert_equal(vh.shape, (4, 4)) + assert_equal(vh, np.eye(4)) + + +class CondCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + # cond(x, p) for p in (None, 2, -2) + + def do(self, a, b, tags): + c = asarray(a) # a might be a matrix + if 'size-0' in tags: + assert_raises(LinAlgError, linalg.cond, c) + return + + # +-2 norms + s = linalg.svd(c, compute_uv=False) + assert_almost_equal( + linalg.cond(a), s[..., 0] / s[..., -1], + single_decimal=5, double_decimal=11) + assert_almost_equal( + linalg.cond(a, 2), s[..., 0] / s[..., -1], + single_decimal=5, double_decimal=11) + assert_almost_equal( + linalg.cond(a, -2), s[..., -1] / s[..., 0], + single_decimal=5, double_decimal=11) + + # Other norms + cinv = np.linalg.inv(c) + assert_almost_equal( + linalg.cond(a, 1), + abs(c).sum(-2).max(-1) * abs(cinv).sum(-2).max(-1), + single_decimal=5, double_decimal=11) + assert_almost_equal( + linalg.cond(a, -1), + abs(c).sum(-2).min(-1) * abs(cinv).sum(-2).min(-1), + single_decimal=5, double_decimal=11) + assert_almost_equal( + linalg.cond(a, np.inf), + abs(c).sum(-1).max(-1) * abs(cinv).sum(-1).max(-1), + single_decimal=5, double_decimal=11) + assert_almost_equal( + linalg.cond(a, -np.inf), + abs(c).sum(-1).min(-1) * abs(cinv).sum(-1).min(-1), + single_decimal=5, double_decimal=11) + assert_almost_equal( + linalg.cond(a, 'fro'), + np.sqrt((abs(c)**2).sum(-1).sum(-1) + * (abs(cinv)**2).sum(-1).sum(-1)), + single_decimal=5, double_decimal=11) + + +class TestCond(CondCases): + def test_basic_nonsvd(self): + # Smoketest the non-svd norms + A = array([[1., 0, 1], [0, -2., 0], [0, 0, 3.]]) + assert_almost_equal(linalg.cond(A, inf), 4) + assert_almost_equal(linalg.cond(A, -inf), 2/3) + assert_almost_equal(linalg.cond(A, 1), 4) + assert_almost_equal(linalg.cond(A, -1), 0.5) + assert_almost_equal(linalg.cond(A, 'fro'), np.sqrt(265 / 12)) + + def test_singular(self): + # Singular matrices have infinite condition number for + # positive norms, and negative norms shouldn't raise + # exceptions + As = [np.zeros((2, 2)), np.ones((2, 2))] + p_pos = [None, 1, 2, 'fro'] + p_neg = [-1, -2] + for A, p in itertools.product(As, p_pos): + # Inversion may not hit exact infinity, so just check the + # number is large + assert_(linalg.cond(A, p) > 1e15) + for A, p in itertools.product(As, p_neg): + linalg.cond(A, p) + + def test_nan(self): + # nans should be passed through, not converted to infs + ps = [None, 1, -1, 2, -2, 'fro'] + p_pos = [None, 1, 2, 'fro'] + + A = np.ones((2, 2)) + A[0,1] = np.nan + for p in ps: + c = linalg.cond(A, p) + assert_(isinstance(c, np.float_)) + assert_(np.isnan(c)) + + A = np.ones((3, 2, 2)) + A[1,0,1] = np.nan + for p in ps: + c = linalg.cond(A, p) + assert_(np.isnan(c[1])) + if p in p_pos: + assert_(c[0] > 1e15) + assert_(c[2] > 1e15) + else: + assert_(not np.isnan(c[0])) + assert_(not np.isnan(c[2])) + + def test_stacked_singular(self): + # Check behavior when only some of the stacked matrices are + # singular + np.random.seed(1234) + A = np.random.rand(2, 2, 2, 2) + A[0,0] = 0 + A[1,1] = 0 + + for p in (None, 1, 2, 'fro', -1, -2): + c = linalg.cond(A, p) + assert_equal(c[0,0], np.inf) + assert_equal(c[1,1], np.inf) + assert_(np.isfinite(c[0,1])) + assert_(np.isfinite(c[1,0])) + + +class PinvCases(LinalgSquareTestCase, + LinalgNonsquareTestCase, + LinalgGeneralizedSquareTestCase, + LinalgGeneralizedNonsquareTestCase): + + def do(self, a, b, tags): + a_ginv = linalg.pinv(a) + # `a @ a_ginv == I` does not hold if a is singular + dot = dot_generalized + assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11) + assert_(consistent_subclass(a_ginv, a)) + + +class TestPinv(PinvCases): + pass + + +class DetCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + + def do(self, a, b, tags): + d = linalg.det(a) + (s, ld) = linalg.slogdet(a) + if asarray(a).dtype.type in (single, double): + ad = asarray(a).astype(double) + else: + ad = asarray(a).astype(cdouble) + ev = linalg.eigvals(ad) + assert_almost_equal(d, multiply.reduce(ev, axis=-1)) + assert_almost_equal(s * np.exp(ld), multiply.reduce(ev, axis=-1)) + + s = np.atleast_1d(s) + ld = np.atleast_1d(ld) + m = (s != 0) + assert_almost_equal(np.abs(s[m]), 1) + assert_equal(ld[~m], -inf) + + +class TestDet(DetCases): + def test_zero(self): + assert_equal(linalg.det([[0.0]]), 0.0) + assert_equal(type(linalg.det([[0.0]])), double) + assert_equal(linalg.det([[0.0j]]), 0.0) + assert_equal(type(linalg.det([[0.0j]])), cdouble) + + assert_equal(linalg.slogdet([[0.0]]), (0.0, -inf)) + assert_equal(type(linalg.slogdet([[0.0]])[0]), double) + assert_equal(type(linalg.slogdet([[0.0]])[1]), double) + assert_equal(linalg.slogdet([[0.0j]]), (0.0j, -inf)) + assert_equal(type(linalg.slogdet([[0.0j]])[0]), cdouble) + assert_equal(type(linalg.slogdet([[0.0j]])[1]), double) + + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + assert_equal(np.linalg.det(x).dtype, dtype) + ph, s = np.linalg.slogdet(x) + assert_equal(s.dtype, get_real_dtype(dtype)) + assert_equal(ph.dtype, dtype) + + def test_0_size(self): + a = np.zeros((0, 0), dtype=np.complex64) + res = linalg.det(a) + assert_equal(res, 1.) + assert_(res.dtype.type is np.complex64) + res = linalg.slogdet(a) + assert_equal(res, (1, 0)) + assert_(res[0].dtype.type is np.complex64) + assert_(res[1].dtype.type is np.float32) + + a = np.zeros((0, 0), dtype=np.float64) + res = linalg.det(a) + assert_equal(res, 1.) + assert_(res.dtype.type is np.float64) + res = linalg.slogdet(a) + assert_equal(res, (1, 0)) + assert_(res[0].dtype.type is np.float64) + assert_(res[1].dtype.type is np.float64) + + +class LstsqCases(LinalgSquareTestCase, LinalgNonsquareTestCase): + + def do(self, a, b, tags): + arr = np.asarray(a) + m, n = arr.shape + u, s, vt = linalg.svd(a, 0) + x, residuals, rank, sv = linalg.lstsq(a, b, rcond=-1) + if m == 0: + assert_((x == 0).all()) + if m <= n: + assert_almost_equal(b, dot(a, x)) + assert_equal(rank, m) + else: + assert_equal(rank, n) + assert_almost_equal(sv, sv.__array_wrap__(s)) + if rank == n and m > n: + expect_resids = ( + np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0) + expect_resids = np.asarray(expect_resids) + if np.asarray(b).ndim == 1: + expect_resids.shape = (1,) + assert_equal(residuals.shape, expect_resids.shape) + else: + expect_resids = np.array([]).view(type(x)) + assert_almost_equal(residuals, expect_resids) + assert_(np.issubdtype(residuals.dtype, np.floating)) + assert_(consistent_subclass(x, b)) + assert_(consistent_subclass(residuals, b)) + + +class TestLstsq(LstsqCases): + def test_future_rcond(self): + a = np.array([[0., 1., 0., 1., 2., 0.], + [0., 2., 0., 0., 1., 0.], + [1., 0., 1., 0., 0., 4.], + [0., 0., 0., 2., 3., 0.]]).T + + b = np.array([1, 0, 0, 0, 0, 0]) + with suppress_warnings() as sup: + w = sup.record(FutureWarning, "`rcond` parameter will change") + x, residuals, rank, s = linalg.lstsq(a, b) + assert_(rank == 4) + x, residuals, rank, s = linalg.lstsq(a, b, rcond=-1) + assert_(rank == 4) + x, residuals, rank, s = linalg.lstsq(a, b, rcond=None) + assert_(rank == 3) + # Warning should be raised exactly once (first command) + assert_(len(w) == 1) + + @pytest.mark.parametrize(["m", "n", "n_rhs"], [ + (4, 2, 2), + (0, 4, 1), + (0, 4, 2), + (4, 0, 1), + (4, 0, 2), + (4, 2, 0), + (0, 0, 0) + ]) + def test_empty_a_b(self, m, n, n_rhs): + a = np.arange(m * n).reshape(m, n) + b = np.ones((m, n_rhs)) + x, residuals, rank, s = linalg.lstsq(a, b, rcond=None) + if m == 0: + assert_((x == 0).all()) + assert_equal(x.shape, (n, n_rhs)) + assert_equal(residuals.shape, ((n_rhs,) if m > n else (0,))) + if m > n and n_rhs > 0: + # residuals are exactly the squared norms of b's columns + r = b - np.dot(a, x) + assert_almost_equal(residuals, (r * r).sum(axis=-2)) + assert_equal(rank, min(m, n)) + assert_equal(s.shape, (min(m, n),)) + + def test_incompatible_dims(self): + # use modified version of docstring example + x = np.array([0, 1, 2, 3]) + y = np.array([-1, 0.2, 0.9, 2.1, 3.3]) + A = np.vstack([x, np.ones(len(x))]).T + with assert_raises_regex(LinAlgError, "Incompatible dimensions"): + linalg.lstsq(A, y, rcond=None) + + +@pytest.mark.parametrize('dt', [np.dtype(c) for c in '?bBhHiIqQefdgFDGO']) +class TestMatrixPower(object): + + rshft_0 = np.eye(4) + rshft_1 = rshft_0[[3, 0, 1, 2]] + rshft_2 = rshft_0[[2, 3, 0, 1]] + rshft_3 = rshft_0[[1, 2, 3, 0]] + rshft_all = [rshft_0, rshft_1, rshft_2, rshft_3] + noninv = array([[1, 0], [0, 0]]) + stacked = np.block([[[rshft_0]]]*2) + #FIXME the 'e' dtype might work in future + dtnoinv = [object, np.dtype('e'), np.dtype('g'), np.dtype('G')] + + def test_large_power(self, dt): + rshft = self.rshft_1.astype(dt) + assert_equal( + matrix_power(rshft, 2**100 + 2**10 + 2**5 + 0), self.rshft_0) + assert_equal( + matrix_power(rshft, 2**100 + 2**10 + 2**5 + 1), self.rshft_1) + assert_equal( + matrix_power(rshft, 2**100 + 2**10 + 2**5 + 2), self.rshft_2) + assert_equal( + matrix_power(rshft, 2**100 + 2**10 + 2**5 + 3), self.rshft_3) + + def test_power_is_zero(self, dt): + def tz(M): + mz = matrix_power(M, 0) + assert_equal(mz, identity_like_generalized(M)) + assert_equal(mz.dtype, M.dtype) + + for mat in self.rshft_all: + tz(mat.astype(dt)) + if dt != object: + tz(self.stacked.astype(dt)) + + def test_power_is_one(self, dt): + def tz(mat): + mz = matrix_power(mat, 1) + assert_equal(mz, mat) + assert_equal(mz.dtype, mat.dtype) + + for mat in self.rshft_all: + tz(mat.astype(dt)) + if dt != object: + tz(self.stacked.astype(dt)) + + def test_power_is_two(self, dt): + def tz(mat): + mz = matrix_power(mat, 2) + mmul = matmul if mat.dtype != object else dot + assert_equal(mz, mmul(mat, mat)) + assert_equal(mz.dtype, mat.dtype) + + for mat in self.rshft_all: + tz(mat.astype(dt)) + if dt != object: + tz(self.stacked.astype(dt)) + + def test_power_is_minus_one(self, dt): + def tz(mat): + invmat = matrix_power(mat, -1) + mmul = matmul if mat.dtype != object else dot + assert_almost_equal( + mmul(invmat, mat), identity_like_generalized(mat)) + + for mat in self.rshft_all: + if dt not in self.dtnoinv: + tz(mat.astype(dt)) + + def test_exceptions_bad_power(self, dt): + mat = self.rshft_0.astype(dt) + assert_raises(TypeError, matrix_power, mat, 1.5) + assert_raises(TypeError, matrix_power, mat, [1]) + + def test_exceptions_non_square(self, dt): + assert_raises(LinAlgError, matrix_power, np.array([1], dt), 1) + assert_raises(LinAlgError, matrix_power, np.array([[1], [2]], dt), 1) + assert_raises(LinAlgError, matrix_power, np.ones((4, 3, 2), dt), 1) + + def test_exceptions_not_invertible(self, dt): + if dt in self.dtnoinv: + return + mat = self.noninv.astype(dt) + assert_raises(LinAlgError, matrix_power, mat, -1) + + + +class TestEigvalshCases(HermitianTestCase, HermitianGeneralizedTestCase): + + def do(self, a, b, tags): + # note that eigenvalue arrays returned by eig must be sorted since + # their order isn't guaranteed. + ev = linalg.eigvalsh(a, 'L') + evalues, evectors = linalg.eig(a) + evalues.sort(axis=-1) + assert_allclose(ev, evalues, rtol=get_rtol(ev.dtype)) + + ev2 = linalg.eigvalsh(a, 'U') + assert_allclose(ev2, evalues, rtol=get_rtol(ev.dtype)) + + +class TestEigvalsh(object): + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + w = np.linalg.eigvalsh(x) + assert_equal(w.dtype, get_real_dtype(dtype)) + + def test_invalid(self): + x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32) + assert_raises(ValueError, np.linalg.eigvalsh, x, UPLO="lrong") + assert_raises(ValueError, np.linalg.eigvalsh, x, "lower") + assert_raises(ValueError, np.linalg.eigvalsh, x, "upper") + + def test_UPLO(self): + Klo = np.array([[0, 0], [1, 0]], dtype=np.double) + Kup = np.array([[0, 1], [0, 0]], dtype=np.double) + tgt = np.array([-1, 1], dtype=np.double) + rtol = get_rtol(np.double) + + # Check default is 'L' + w = np.linalg.eigvalsh(Klo) + assert_allclose(w, tgt, rtol=rtol) + # Check 'L' + w = np.linalg.eigvalsh(Klo, UPLO='L') + assert_allclose(w, tgt, rtol=rtol) + # Check 'l' + w = np.linalg.eigvalsh(Klo, UPLO='l') + assert_allclose(w, tgt, rtol=rtol) + # Check 'U' + w = np.linalg.eigvalsh(Kup, UPLO='U') + assert_allclose(w, tgt, rtol=rtol) + # Check 'u' + w = np.linalg.eigvalsh(Kup, UPLO='u') + assert_allclose(w, tgt, rtol=rtol) + + def test_0_size(self): + # Check that all kinds of 0-sized arrays work + class ArraySubclass(np.ndarray): + pass + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) + res = linalg.eigvalsh(a) + assert_(res.dtype.type is np.float64) + assert_equal((0, 1), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(res, np.ndarray)) + + a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) + res = linalg.eigvalsh(a) + assert_(res.dtype.type is np.float32) + assert_equal((0,), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(res, np.ndarray)) + + +class TestEighCases(HermitianTestCase, HermitianGeneralizedTestCase): + + def do(self, a, b, tags): + # note that eigenvalue arrays returned by eig must be sorted since + # their order isn't guaranteed. + ev, evc = linalg.eigh(a) + evalues, evectors = linalg.eig(a) + evalues.sort(axis=-1) + assert_almost_equal(ev, evalues) + + assert_allclose(dot_generalized(a, evc), + np.asarray(ev)[..., None, :] * np.asarray(evc), + rtol=get_rtol(ev.dtype)) + + ev2, evc2 = linalg.eigh(a, 'U') + assert_almost_equal(ev2, evalues) + + assert_allclose(dot_generalized(a, evc2), + np.asarray(ev2)[..., None, :] * np.asarray(evc2), + rtol=get_rtol(ev.dtype), err_msg=repr(a)) + + +class TestEigh(object): + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + w, v = np.linalg.eigh(x) + assert_equal(w.dtype, get_real_dtype(dtype)) + assert_equal(v.dtype, dtype) + + def test_invalid(self): + x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32) + assert_raises(ValueError, np.linalg.eigh, x, UPLO="lrong") + assert_raises(ValueError, np.linalg.eigh, x, "lower") + assert_raises(ValueError, np.linalg.eigh, x, "upper") + + def test_UPLO(self): + Klo = np.array([[0, 0], [1, 0]], dtype=np.double) + Kup = np.array([[0, 1], [0, 0]], dtype=np.double) + tgt = np.array([-1, 1], dtype=np.double) + rtol = get_rtol(np.double) + + # Check default is 'L' + w, v = np.linalg.eigh(Klo) + assert_allclose(w, tgt, rtol=rtol) + # Check 'L' + w, v = np.linalg.eigh(Klo, UPLO='L') + assert_allclose(w, tgt, rtol=rtol) + # Check 'l' + w, v = np.linalg.eigh(Klo, UPLO='l') + assert_allclose(w, tgt, rtol=rtol) + # Check 'U' + w, v = np.linalg.eigh(Kup, UPLO='U') + assert_allclose(w, tgt, rtol=rtol) + # Check 'u' + w, v = np.linalg.eigh(Kup, UPLO='u') + assert_allclose(w, tgt, rtol=rtol) + + def test_0_size(self): + # Check that all kinds of 0-sized arrays work + class ArraySubclass(np.ndarray): + pass + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) + res, res_v = linalg.eigh(a) + assert_(res_v.dtype.type is np.float64) + assert_(res.dtype.type is np.float64) + assert_equal(a.shape, res_v.shape) + assert_equal((0, 1), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(a, np.ndarray)) + + a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) + res, res_v = linalg.eigh(a) + assert_(res_v.dtype.type is np.complex64) + assert_(res.dtype.type is np.float32) + assert_equal(a.shape, res_v.shape) + assert_equal((0,), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(a, np.ndarray)) + + +class _TestNormBase(object): + dt = None + dec = None + + +class _TestNormGeneral(_TestNormBase): + + def test_empty(self): + assert_equal(norm([]), 0.0) + assert_equal(norm(array([], dtype=self.dt)), 0.0) + assert_equal(norm(atleast_2d(array([], dtype=self.dt))), 0.0) + + def test_vector_return_type(self): + a = np.array([1, 0, 1]) + + exact_types = np.typecodes['AllInteger'] + inexact_types = np.typecodes['AllFloat'] + + all_types = exact_types + inexact_types + + for each_inexact_types in all_types: + at = a.astype(each_inexact_types) + + an = norm(at, -np.inf) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 0.0) + + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "divide by zero encountered") + an = norm(at, -1) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 0.0) + + an = norm(at, 0) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 2) + + an = norm(at, 1) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 2.0) + + an = norm(at, 2) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0/2.0)) + + an = norm(at, 4) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0/4.0)) + + an = norm(at, np.inf) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 1.0) + + def test_vector(self): + a = [1, 2, 3, 4] + b = [-1, -2, -3, -4] + c = [-1, 2, -3, 4] + + def _test(v): + np.testing.assert_almost_equal(norm(v), 30 ** 0.5, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, inf), 4.0, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, -inf), 1.0, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, 1), 10.0, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, -1), 12.0 / 25, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, 2), 30 ** 0.5, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, -2), ((205. / 144) ** -0.5), + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, 0), 4, + decimal=self.dec) + + for v in (a, b, c,): + _test(v) + + for v in (array(a, dtype=self.dt), array(b, dtype=self.dt), + array(c, dtype=self.dt)): + _test(v) + + def test_axis(self): + # Vector norms. + # Compare the use of `axis` with computing the norm of each row + # or column separately. + A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt) + for order in [None, -1, 0, 1, 2, 3, np.Inf, -np.Inf]: + expected0 = [norm(A[:, k], ord=order) for k in range(A.shape[1])] + assert_almost_equal(norm(A, ord=order, axis=0), expected0) + expected1 = [norm(A[k, :], ord=order) for k in range(A.shape[0])] + assert_almost_equal(norm(A, ord=order, axis=1), expected1) + + # Matrix norms. + B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4) + nd = B.ndim + for order in [None, -2, 2, -1, 1, np.Inf, -np.Inf, 'fro']: + for axis in itertools.combinations(range(-nd, nd), 2): + row_axis, col_axis = axis + if row_axis < 0: + row_axis += nd + if col_axis < 0: + col_axis += nd + if row_axis == col_axis: + assert_raises(ValueError, norm, B, ord=order, axis=axis) + else: + n = norm(B, ord=order, axis=axis) + + # The logic using k_index only works for nd = 3. + # This has to be changed if nd is increased. + k_index = nd - (row_axis + col_axis) + if row_axis < col_axis: + expected = [norm(B[:].take(k, axis=k_index), ord=order) + for k in range(B.shape[k_index])] + else: + expected = [norm(B[:].take(k, axis=k_index).T, ord=order) + for k in range(B.shape[k_index])] + assert_almost_equal(n, expected) + + def test_keepdims(self): + A = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4) + + allclose_err = 'order {0}, axis = {1}' + shape_err = 'Shape mismatch found {0}, expected {1}, order={2}, axis={3}' + + # check the order=None, axis=None case + expected = norm(A, ord=None, axis=None) + found = norm(A, ord=None, axis=None, keepdims=True) + assert_allclose(np.squeeze(found), expected, + err_msg=allclose_err.format(None, None)) + expected_shape = (1, 1, 1) + assert_(found.shape == expected_shape, + shape_err.format(found.shape, expected_shape, None, None)) + + # Vector norms. + for order in [None, -1, 0, 1, 2, 3, np.Inf, -np.Inf]: + for k in range(A.ndim): + expected = norm(A, ord=order, axis=k) + found = norm(A, ord=order, axis=k, keepdims=True) + assert_allclose(np.squeeze(found), expected, + err_msg=allclose_err.format(order, k)) + expected_shape = list(A.shape) + expected_shape[k] = 1 + expected_shape = tuple(expected_shape) + assert_(found.shape == expected_shape, + shape_err.format(found.shape, expected_shape, order, k)) + + # Matrix norms. + for order in [None, -2, 2, -1, 1, np.Inf, -np.Inf, 'fro', 'nuc']: + for k in itertools.permutations(range(A.ndim), 2): + expected = norm(A, ord=order, axis=k) + found = norm(A, ord=order, axis=k, keepdims=True) + assert_allclose(np.squeeze(found), expected, + err_msg=allclose_err.format(order, k)) + expected_shape = list(A.shape) + expected_shape[k[0]] = 1 + expected_shape[k[1]] = 1 + expected_shape = tuple(expected_shape) + assert_(found.shape == expected_shape, + shape_err.format(found.shape, expected_shape, order, k)) + + +class _TestNorm2D(_TestNormBase): + # Define the part for 2d arrays separately, so we can subclass this + # and run the tests using np.matrix in matrixlib.tests.test_matrix_linalg. + array = np.array + + def test_matrix_empty(self): + assert_equal(norm(self.array([[]], dtype=self.dt)), 0.0) + + def test_matrix_return_type(self): + a = self.array([[1, 0, 1], [0, 1, 1]]) + + exact_types = np.typecodes['AllInteger'] + + # float32, complex64, float64, complex128 types are the only types + # allowed by `linalg`, which performs the matrix operations used + # within `norm`. + inexact_types = 'fdFD' + + all_types = exact_types + inexact_types + + for each_inexact_types in all_types: + at = a.astype(each_inexact_types) + + an = norm(at, -np.inf) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 2.0) + + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "divide by zero encountered") + an = norm(at, -1) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 1.0) + + an = norm(at, 1) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 2.0) + + an = norm(at, 2) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 3.0**(1.0/2.0)) + + an = norm(at, -2) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 1.0) + + an = norm(at, np.inf) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 2.0) + + an = norm(at, 'fro') + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 2.0) + + an = norm(at, 'nuc') + assert_(issubclass(an.dtype.type, np.floating)) + # Lower bar needed to support low precision floats. + # They end up being off by 1 in the 7th place. + np.testing.assert_almost_equal(an, 2.7320508075688772, decimal=6) + + def test_matrix_2x2(self): + A = self.array([[1, 3], [5, 7]], dtype=self.dt) + assert_almost_equal(norm(A), 84 ** 0.5) + assert_almost_equal(norm(A, 'fro'), 84 ** 0.5) + assert_almost_equal(norm(A, 'nuc'), 10.0) + assert_almost_equal(norm(A, inf), 12.0) + assert_almost_equal(norm(A, -inf), 4.0) + assert_almost_equal(norm(A, 1), 10.0) + assert_almost_equal(norm(A, -1), 6.0) + assert_almost_equal(norm(A, 2), 9.1231056256176615) + assert_almost_equal(norm(A, -2), 0.87689437438234041) + + assert_raises(ValueError, norm, A, 'nofro') + assert_raises(ValueError, norm, A, -3) + assert_raises(ValueError, norm, A, 0) + + def test_matrix_3x3(self): + # This test has been added because the 2x2 example + # happened to have equal nuclear norm and induced 1-norm. + # The 1/10 scaling factor accommodates the absolute tolerance + # used in assert_almost_equal. + A = (1 / 10) * \ + self.array([[1, 2, 3], [6, 0, 5], [3, 2, 1]], dtype=self.dt) + assert_almost_equal(norm(A), (1 / 10) * 89 ** 0.5) + assert_almost_equal(norm(A, 'fro'), (1 / 10) * 89 ** 0.5) + assert_almost_equal(norm(A, 'nuc'), 1.3366836911774836) + assert_almost_equal(norm(A, inf), 1.1) + assert_almost_equal(norm(A, -inf), 0.6) + assert_almost_equal(norm(A, 1), 1.0) + assert_almost_equal(norm(A, -1), 0.4) + assert_almost_equal(norm(A, 2), 0.88722940323461277) + assert_almost_equal(norm(A, -2), 0.19456584790481812) + + def test_bad_args(self): + # Check that bad arguments raise the appropriate exceptions. + + A = self.array([[1, 2, 3], [4, 5, 6]], dtype=self.dt) + B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4) + + # Using `axis=` or passing in a 1-D array implies vector + # norms are being computed, so also using `ord='fro'` + # or `ord='nuc'` raises a ValueError. + assert_raises(ValueError, norm, A, 'fro', 0) + assert_raises(ValueError, norm, A, 'nuc', 0) + assert_raises(ValueError, norm, [3, 4], 'fro', None) + assert_raises(ValueError, norm, [3, 4], 'nuc', None) + + # Similarly, norm should raise an exception when ord is any finite + # number other than 1, 2, -1 or -2 when computing matrix norms. + for order in [0, 3]: + assert_raises(ValueError, norm, A, order, None) + assert_raises(ValueError, norm, A, order, (0, 1)) + assert_raises(ValueError, norm, B, order, (1, 2)) + + # Invalid axis + assert_raises(np.AxisError, norm, B, None, 3) + assert_raises(np.AxisError, norm, B, None, (2, 3)) + assert_raises(ValueError, norm, B, None, (0, 1, 2)) + + +class _TestNorm(_TestNorm2D, _TestNormGeneral): + pass + + +class TestNorm_NonSystematic(object): + + def test_longdouble_norm(self): + # Non-regression test: p-norm of longdouble would previously raise + # UnboundLocalError. + x = np.arange(10, dtype=np.longdouble) + old_assert_almost_equal(norm(x, ord=3), 12.65, decimal=2) + + def test_intmin(self): + # Non-regression test: p-norm of signed integer would previously do + # float cast and abs in the wrong order. + x = np.array([-2 ** 31], dtype=np.int32) + old_assert_almost_equal(norm(x, ord=3), 2 ** 31, decimal=5) + + def test_complex_high_ord(self): + # gh-4156 + d = np.empty((2,), dtype=np.clongdouble) + d[0] = 6 + 7j + d[1] = -6 + 7j + res = 11.615898132184 + old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=10) + d = d.astype(np.complex128) + old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=9) + d = d.astype(np.complex64) + old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=5) + + +# Separate definitions so we can use them for matrix tests. +class _TestNormDoubleBase(_TestNormBase): + dt = np.double + dec = 12 + + +class _TestNormSingleBase(_TestNormBase): + dt = np.float32 + dec = 6 + + +class _TestNormInt64Base(_TestNormBase): + dt = np.int64 + dec = 12 + + +class TestNormDouble(_TestNorm, _TestNormDoubleBase): + pass + + +class TestNormSingle(_TestNorm, _TestNormSingleBase): + pass + + +class TestNormInt64(_TestNorm, _TestNormInt64Base): + pass + + +class TestMatrixRank(object): + + def test_matrix_rank(self): + # Full rank matrix + assert_equal(4, matrix_rank(np.eye(4))) + # rank deficient matrix + I = np.eye(4) + I[-1, -1] = 0. + assert_equal(matrix_rank(I), 3) + # All zeros - zero rank + assert_equal(matrix_rank(np.zeros((4, 4))), 0) + # 1 dimension - rank 1 unless all 0 + assert_equal(matrix_rank([1, 0, 0, 0]), 1) + assert_equal(matrix_rank(np.zeros((4,))), 0) + # accepts array-like + assert_equal(matrix_rank([1]), 1) + # greater than 2 dimensions treated as stacked matrices + ms = np.array([I, np.eye(4), np.zeros((4,4))]) + assert_equal(matrix_rank(ms), np.array([3, 4, 0])) + # works on scalar + assert_equal(matrix_rank(1), 1) + + def test_symmetric_rank(self): + assert_equal(4, matrix_rank(np.eye(4), hermitian=True)) + assert_equal(1, matrix_rank(np.ones((4, 4)), hermitian=True)) + assert_equal(0, matrix_rank(np.zeros((4, 4)), hermitian=True)) + # rank deficient matrix + I = np.eye(4) + I[-1, -1] = 0. + assert_equal(3, matrix_rank(I, hermitian=True)) + # manually supplied tolerance + I[-1, -1] = 1e-8 + assert_equal(4, matrix_rank(I, hermitian=True, tol=0.99e-8)) + assert_equal(3, matrix_rank(I, hermitian=True, tol=1.01e-8)) + + +def test_reduced_rank(): + # Test matrices with reduced rank + rng = np.random.RandomState(20120714) + for i in range(100): + # Make a rank deficient matrix + X = rng.normal(size=(40, 10)) + X[:, 0] = X[:, 1] + X[:, 2] + # Assert that matrix_rank detected deficiency + assert_equal(matrix_rank(X), 9) + X[:, 3] = X[:, 4] + X[:, 5] + assert_equal(matrix_rank(X), 8) + + +class TestQR(object): + # Define the array class here, so run this on matrices elsewhere. + array = np.array + + def check_qr(self, a): + # This test expects the argument `a` to be an ndarray or + # a subclass of an ndarray of inexact type. + a_type = type(a) + a_dtype = a.dtype + m, n = a.shape + k = min(m, n) + + # mode == 'complete' + q, r = linalg.qr(a, mode='complete') + assert_(q.dtype == a_dtype) + assert_(r.dtype == a_dtype) + assert_(isinstance(q, a_type)) + assert_(isinstance(r, a_type)) + assert_(q.shape == (m, m)) + assert_(r.shape == (m, n)) + assert_almost_equal(dot(q, r), a) + assert_almost_equal(dot(q.T.conj(), q), np.eye(m)) + assert_almost_equal(np.triu(r), r) + + # mode == 'reduced' + q1, r1 = linalg.qr(a, mode='reduced') + assert_(q1.dtype == a_dtype) + assert_(r1.dtype == a_dtype) + assert_(isinstance(q1, a_type)) + assert_(isinstance(r1, a_type)) + assert_(q1.shape == (m, k)) + assert_(r1.shape == (k, n)) + assert_almost_equal(dot(q1, r1), a) + assert_almost_equal(dot(q1.T.conj(), q1), np.eye(k)) + assert_almost_equal(np.triu(r1), r1) + + # mode == 'r' + r2 = linalg.qr(a, mode='r') + assert_(r2.dtype == a_dtype) + assert_(isinstance(r2, a_type)) + assert_almost_equal(r2, r1) + + + @pytest.mark.parametrize(["m", "n"], [ + (3, 0), + (0, 3), + (0, 0) + ]) + def test_qr_empty(self, m, n): + k = min(m, n) + a = np.empty((m, n)) + + self.check_qr(a) + + h, tau = np.linalg.qr(a, mode='raw') + assert_equal(h.dtype, np.double) + assert_equal(tau.dtype, np.double) + assert_equal(h.shape, (n, m)) + assert_equal(tau.shape, (k,)) + + def test_mode_raw(self): + # The factorization is not unique and varies between libraries, + # so it is not possible to check against known values. Functional + # testing is a possibility, but awaits the exposure of more + # of the functions in lapack_lite. Consequently, this test is + # very limited in scope. Note that the results are in FORTRAN + # order, hence the h arrays are transposed. + a = self.array([[1, 2], [3, 4], [5, 6]], dtype=np.double) + + # Test double + h, tau = linalg.qr(a, mode='raw') + assert_(h.dtype == np.double) + assert_(tau.dtype == np.double) + assert_(h.shape == (2, 3)) + assert_(tau.shape == (2,)) + + h, tau = linalg.qr(a.T, mode='raw') + assert_(h.dtype == np.double) + assert_(tau.dtype == np.double) + assert_(h.shape == (3, 2)) + assert_(tau.shape == (2,)) + + def test_mode_all_but_economic(self): + a = self.array([[1, 2], [3, 4]]) + b = self.array([[1, 2], [3, 4], [5, 6]]) + for dt in "fd": + m1 = a.astype(dt) + m2 = b.astype(dt) + self.check_qr(m1) + self.check_qr(m2) + self.check_qr(m2.T) + + for dt in "fd": + m1 = 1 + 1j * a.astype(dt) + m2 = 1 + 1j * b.astype(dt) + self.check_qr(m1) + self.check_qr(m2) + self.check_qr(m2.T) + + +class TestCholesky(object): + # TODO: are there no other tests for cholesky? + + def test_basic_property(self): + # Check A = L L^H + shapes = [(1, 1), (2, 2), (3, 3), (50, 50), (3, 10, 10)] + dtypes = (np.float32, np.float64, np.complex64, np.complex128) + + for shape, dtype in itertools.product(shapes, dtypes): + np.random.seed(1) + a = np.random.randn(*shape) + if np.issubdtype(dtype, np.complexfloating): + a = a + 1j*np.random.randn(*shape) + + t = list(range(len(shape))) + t[-2:] = -1, -2 + + a = np.matmul(a.transpose(t).conj(), a) + a = np.asarray(a, dtype=dtype) + + c = np.linalg.cholesky(a) + + b = np.matmul(c, c.transpose(t).conj()) + assert_allclose(b, a, + err_msg="{} {}\n{}\n{}".format(shape, dtype, a, c), + atol=500 * a.shape[0] * np.finfo(dtype).eps) + + def test_0_size(self): + class ArraySubclass(np.ndarray): + pass + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) + res = linalg.cholesky(a) + assert_equal(a.shape, res.shape) + assert_(res.dtype.type is np.float64) + # for documentation purpose: + assert_(isinstance(res, np.ndarray)) + + a = np.zeros((1, 0, 0), dtype=np.complex64).view(ArraySubclass) + res = linalg.cholesky(a) + assert_equal(a.shape, res.shape) + assert_(res.dtype.type is np.complex64) + assert_(isinstance(res, np.ndarray)) + + +def test_byteorder_check(): + # Byte order check should pass for native order + if sys.byteorder == 'little': + native = '<' + else: + native = '>' + + for dtt in (np.float32, np.float64): + arr = np.eye(4, dtype=dtt) + n_arr = arr.newbyteorder(native) + sw_arr = arr.newbyteorder('S').byteswap() + assert_equal(arr.dtype.byteorder, '=') + for routine in (linalg.inv, linalg.det, linalg.pinv): + # Normal call + res = routine(arr) + # Native but not '=' + assert_array_equal(res, routine(n_arr)) + # Swapped + assert_array_equal(res, routine(sw_arr)) + + +def test_generalized_raise_multiloop(): + # It should raise an error even if the error doesn't occur in the + # last iteration of the ufunc inner loop + + invertible = np.array([[1, 2], [3, 4]]) + non_invertible = np.array([[1, 1], [1, 1]]) + + x = np.zeros([4, 4, 2, 2])[1::2] + x[...] = invertible + x[0, 0] = non_invertible + + assert_raises(np.linalg.LinAlgError, np.linalg.inv, x) + + +def test_xerbla_override(): + # Check that our xerbla has been successfully linked in. If it is not, + # the default xerbla routine is called, which prints a message to stdout + # and may, or may not, abort the process depending on the LAPACK package. + + XERBLA_OK = 255 + + try: + pid = os.fork() + except (OSError, AttributeError): + # fork failed, or not running on POSIX + pytest.skip("Not POSIX or fork failed.") + + if pid == 0: + # child; close i/o file handles + os.close(1) + os.close(0) + # Avoid producing core files. + import resource + resource.setrlimit(resource.RLIMIT_CORE, (0, 0)) + # These calls may abort. + try: + np.linalg.lapack_lite.xerbla() + except ValueError: + pass + except Exception: + os._exit(os.EX_CONFIG) + + try: + a = np.array([[1.]]) + np.linalg.lapack_lite.dorgqr( + 1, 1, 1, a, + 0, # <- invalid value + a, a, 0, 0) + except ValueError as e: + if "DORGQR parameter number 5" in str(e): + # success, reuse error code to mark success as + # FORTRAN STOP returns as success. + os._exit(XERBLA_OK) + + # Did not abort, but our xerbla was not linked in. + os._exit(os.EX_CONFIG) + else: + # parent + pid, status = os.wait() + if os.WEXITSTATUS(status) != XERBLA_OK: + pytest.skip('Numpy xerbla not linked in.') + + +def test_sdot_bug_8577(): + # Regression test that loading certain other libraries does not + # result to wrong results in float32 linear algebra. + # + # There's a bug gh-8577 on OSX that can trigger this, and perhaps + # there are also other situations in which it occurs. + # + # Do the check in a separate process. + + bad_libs = ['PyQt5.QtWidgets', 'IPython'] + + template = textwrap.dedent(""" + import sys + {before} + try: + import {bad_lib} + except ImportError: + sys.exit(0) + {after} + x = np.ones(2, dtype=np.float32) + sys.exit(0 if np.allclose(x.dot(x), 2.0) else 1) + """) + + for bad_lib in bad_libs: + code = template.format(before="import numpy as np", after="", + bad_lib=bad_lib) + subprocess.check_call([sys.executable, "-c", code]) + + # Swapped import order + code = template.format(after="import numpy as np", before="", + bad_lib=bad_lib) + subprocess.check_call([sys.executable, "-c", code]) + + +class TestMultiDot(object): + + def test_basic_function_with_three_arguments(self): + # multi_dot with three arguments uses a fast hand coded algorithm to + # determine the optimal order. Therefore test it separately. + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + + assert_almost_equal(multi_dot([A, B, C]), A.dot(B).dot(C)) + assert_almost_equal(multi_dot([A, B, C]), np.dot(A, np.dot(B, C))) + + def test_basic_function_with_two_arguments(self): + # separate code path with two arguments + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + + assert_almost_equal(multi_dot([A, B]), A.dot(B)) + assert_almost_equal(multi_dot([A, B]), np.dot(A, B)) + + def test_basic_function_with_dynamic_programing_optimization(self): + # multi_dot with four or more arguments uses the dynamic programing + # optimization and therefore deserve a separate + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + D = np.random.random((2, 1)) + assert_almost_equal(multi_dot([A, B, C, D]), A.dot(B).dot(C).dot(D)) + + def test_vector_as_first_argument(self): + # The first argument can be 1-D + A1d = np.random.random(2) # 1-D + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + D = np.random.random((2, 2)) + + # the result should be 1-D + assert_equal(multi_dot([A1d, B, C, D]).shape, (2,)) + + def test_vector_as_last_argument(self): + # The last argument can be 1-D + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + D1d = np.random.random(2) # 1-D + + # the result should be 1-D + assert_equal(multi_dot([A, B, C, D1d]).shape, (6,)) + + def test_vector_as_first_and_last_argument(self): + # The first and last arguments can be 1-D + A1d = np.random.random(2) # 1-D + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + D1d = np.random.random(2) # 1-D + + # the result should be a scalar + assert_equal(multi_dot([A1d, B, C, D1d]).shape, ()) + + def test_dynamic_programming_logic(self): + # Test for the dynamic programming part + # This test is directly taken from Cormen page 376. + arrays = [np.random.random((30, 35)), + np.random.random((35, 15)), + np.random.random((15, 5)), + np.random.random((5, 10)), + np.random.random((10, 20)), + np.random.random((20, 25))] + m_expected = np.array([[0., 15750., 7875., 9375., 11875., 15125.], + [0., 0., 2625., 4375., 7125., 10500.], + [0., 0., 0., 750., 2500., 5375.], + [0., 0., 0., 0., 1000., 3500.], + [0., 0., 0., 0., 0., 5000.], + [0., 0., 0., 0., 0., 0.]]) + s_expected = np.array([[0, 1, 1, 3, 3, 3], + [0, 0, 2, 3, 3, 3], + [0, 0, 0, 3, 3, 3], + [0, 0, 0, 0, 4, 5], + [0, 0, 0, 0, 0, 5], + [0, 0, 0, 0, 0, 0]], dtype=int) + s_expected -= 1 # Cormen uses 1-based index, python does not. + + s, m = _multi_dot_matrix_chain_order(arrays, return_costs=True) + + # Only the upper triangular part (without the diagonal) is interesting. + assert_almost_equal(np.triu(s[:-1, 1:]), + np.triu(s_expected[:-1, 1:])) + assert_almost_equal(np.triu(m), np.triu(m_expected)) + + def test_too_few_input_arrays(self): + assert_raises(ValueError, multi_dot, []) + assert_raises(ValueError, multi_dot, [np.random.random((3, 3))]) + + +class TestTensorinv(object): + + @pytest.mark.parametrize("arr, ind", [ + (np.ones((4, 6, 8, 2)), 2), + (np.ones((3, 3, 2)), 1), + ]) + def test_non_square_handling(self, arr, ind): + with assert_raises(LinAlgError): + linalg.tensorinv(arr, ind=ind) + + @pytest.mark.parametrize("shape, ind", [ + # examples from docstring + ((4, 6, 8, 3), 2), + ((24, 8, 3), 1), + ]) + def test_tensorinv_shape(self, shape, ind): + a = np.eye(24) + a.shape = shape + ainv = linalg.tensorinv(a=a, ind=ind) + expected = a.shape[ind:] + a.shape[:ind] + actual = ainv.shape + assert_equal(actual, expected) + + @pytest.mark.parametrize("ind", [ + 0, -2, + ]) + def test_tensorinv_ind_limit(self, ind): + a = np.eye(24) + a.shape = (4, 6, 8, 3) + with assert_raises(ValueError): + linalg.tensorinv(a=a, ind=ind) + + def test_tensorinv_result(self): + # mimic a docstring example + a = np.eye(24) + a.shape = (24, 8, 3) + ainv = linalg.tensorinv(a, ind=1) + b = np.ones(24) + assert_allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b)) diff --git a/project/venv/lib/python2.7/site-packages/numpy/linalg/tests/test_linalg.pyc b/project/venv/lib/python2.7/site-packages/numpy/linalg/tests/test_linalg.pyc new file mode 100644 index 0000000..feed86e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/linalg/tests/test_linalg.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/linalg/tests/test_regression.py b/project/venv/lib/python2.7/site-packages/numpy/linalg/tests/test_regression.py new file mode 100644 index 0000000..bd3a458 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/linalg/tests/test_regression.py @@ -0,0 +1,150 @@ +""" Test functions for linalg module +""" +from __future__ import division, absolute_import, print_function + +import warnings + +import numpy as np +from numpy import linalg, arange, float64, array, dot, transpose +from numpy.testing import ( + assert_, assert_raises, assert_equal, assert_array_equal, + assert_array_almost_equal, assert_array_less +) + + +class TestRegression(object): + + def test_eig_build(self): + # Ticket #652 + rva = array([1.03221168e+02 + 0.j, + -1.91843603e+01 + 0.j, + -6.04004526e-01 + 15.84422474j, + -6.04004526e-01 - 15.84422474j, + -1.13692929e+01 + 0.j, + -6.57612485e-01 + 10.41755503j, + -6.57612485e-01 - 10.41755503j, + 1.82126812e+01 + 0.j, + 1.06011014e+01 + 0.j, + 7.80732773e+00 + 0.j, + -7.65390898e-01 + 0.j, + 1.51971555e-15 + 0.j, + -1.51308713e-15 + 0.j]) + a = arange(13 * 13, dtype=float64) + a.shape = (13, 13) + a = a % 17 + va, ve = linalg.eig(a) + va.sort() + rva.sort() + assert_array_almost_equal(va, rva) + + def test_eigh_build(self): + # Ticket 662. + rvals = [68.60568999, 89.57756725, 106.67185574] + + cov = array([[77.70273908, 3.51489954, 15.64602427], + [3.51489954, 88.97013878, -1.07431931], + [15.64602427, -1.07431931, 98.18223512]]) + + vals, vecs = linalg.eigh(cov) + assert_array_almost_equal(vals, rvals) + + def test_svd_build(self): + # Ticket 627. + a = array([[0., 1.], [1., 1.], [2., 1.], [3., 1.]]) + m, n = a.shape + u, s, vh = linalg.svd(a) + + b = dot(transpose(u[:, n:]), a) + + assert_array_almost_equal(b, np.zeros((2, 2))) + + def test_norm_vector_badarg(self): + # Regression for #786: Froebenius norm for vectors raises + # TypeError. + assert_raises(ValueError, linalg.norm, array([1., 2., 3.]), 'fro') + + def test_lapack_endian(self): + # For bug #1482 + a = array([[5.7998084, -2.1825367], + [-2.1825367, 9.85910595]], dtype='>f8') + b = array(a, dtype=' 0.5) + assert_equal(c, 1) + assert_equal(np.linalg.matrix_rank(a), 1) + assert_array_less(1, np.linalg.norm(a, ord=2)) + + def test_norm_object_array(self): + # gh-7575 + testvector = np.array([np.array([0, 1]), 0, 0], dtype=object) + + norm = linalg.norm(testvector) + assert_array_equal(norm, [0, 1]) + assert_(norm.dtype == np.dtype('float64')) + + norm = linalg.norm(testvector, ord=1) + assert_array_equal(norm, [0, 1]) + assert_(norm.dtype != np.dtype('float64')) + + norm = linalg.norm(testvector, ord=2) + assert_array_equal(norm, [0, 1]) + assert_(norm.dtype == np.dtype('float64')) + + assert_raises(ValueError, linalg.norm, testvector, ord='fro') + assert_raises(ValueError, linalg.norm, testvector, ord='nuc') + assert_raises(ValueError, linalg.norm, testvector, ord=np.inf) + assert_raises(ValueError, linalg.norm, testvector, ord=-np.inf) + with warnings.catch_warnings(): + warnings.simplefilter("error", DeprecationWarning) + assert_raises((AttributeError, DeprecationWarning), + linalg.norm, testvector, ord=0) + assert_raises(ValueError, linalg.norm, testvector, ord=-1) + assert_raises(ValueError, linalg.norm, testvector, ord=-2) + + testmatrix = np.array([[np.array([0, 1]), 0, 0], + [0, 0, 0]], dtype=object) + + norm = linalg.norm(testmatrix) + assert_array_equal(norm, [0, 1]) + assert_(norm.dtype == np.dtype('float64')) + + norm = linalg.norm(testmatrix, ord='fro') + assert_array_equal(norm, [0, 1]) + assert_(norm.dtype == np.dtype('float64')) + + assert_raises(TypeError, linalg.norm, testmatrix, ord='nuc') + assert_raises(ValueError, linalg.norm, testmatrix, ord=np.inf) + assert_raises(ValueError, linalg.norm, testmatrix, ord=-np.inf) + assert_raises(ValueError, linalg.norm, testmatrix, ord=0) + assert_raises(ValueError, linalg.norm, testmatrix, ord=1) + assert_raises(ValueError, linalg.norm, testmatrix, ord=-1) + assert_raises(TypeError, linalg.norm, testmatrix, ord=2) + assert_raises(TypeError, linalg.norm, testmatrix, ord=-2) + assert_raises(ValueError, linalg.norm, testmatrix, ord=3) + + def test_lstsq_complex_larger_rhs(self): + # gh-9891 + size = 20 + n_rhs = 70 + G = np.random.randn(size, size) + 1j * np.random.randn(size, size) + u = np.random.randn(size, n_rhs) + 1j * np.random.randn(size, n_rhs) + b = G.dot(u) + # This should work without segmentation fault. + u_lstsq, res, rank, sv = linalg.lstsq(G, b, rcond=None) + # check results just in case + assert_array_almost_equal(u_lstsq, u) diff --git a/project/venv/lib/python2.7/site-packages/numpy/linalg/tests/test_regression.pyc b/project/venv/lib/python2.7/site-packages/numpy/linalg/tests/test_regression.pyc new file mode 100644 index 0000000..46b66e6 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/linalg/tests/test_regression.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/ma/__init__.py b/project/venv/lib/python2.7/site-packages/numpy/ma/__init__.py new file mode 100644 index 0000000..36ceb1f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/ma/__init__.py @@ -0,0 +1,56 @@ +""" +============= +Masked Arrays +============= + +Arrays sometimes contain invalid or missing data. When doing operations +on such arrays, we wish to suppress invalid values, which is the purpose masked +arrays fulfill (an example of typical use is given below). + +For example, examine the following array: + +>>> x = np.array([2, 1, 3, np.nan, 5, 2, 3, np.nan]) + +When we try to calculate the mean of the data, the result is undetermined: + +>>> np.mean(x) +nan + +The mean is calculated using roughly ``np.sum(x)/len(x)``, but since +any number added to ``NaN`` [1]_ produces ``NaN``, this doesn't work. Enter +masked arrays: + +>>> m = np.ma.masked_array(x, np.isnan(x)) +>>> m +masked_array(data = [2.0 1.0 3.0 -- 5.0 2.0 3.0 --], + mask = [False False False True False False False True], + fill_value=1e+20) + +Here, we construct a masked array that suppress all ``NaN`` values. We +may now proceed to calculate the mean of the other values: + +>>> np.mean(m) +2.6666666666666665 + +.. [1] Not-a-Number, a floating point value that is the result of an + invalid operation. + +.. moduleauthor:: Pierre Gerard-Marchant +.. moduleauthor:: Jarrod Millman + +""" +from __future__ import division, absolute_import, print_function + +from . import core +from .core import * + +from . import extras +from .extras import * + +__all__ = ['core', 'extras'] +__all__ += core.__all__ +__all__ += extras.__all__ + +from numpy._pytesttester import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/project/venv/lib/python2.7/site-packages/numpy/ma/__init__.pyc b/project/venv/lib/python2.7/site-packages/numpy/ma/__init__.pyc new file mode 100644 index 0000000..33e22be Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/ma/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/ma/bench.py b/project/venv/lib/python2.7/site-packages/numpy/ma/bench.py new file mode 100644 index 0000000..a9ba42d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/ma/bench.py @@ -0,0 +1,133 @@ +#! /usr/bin/env python +# -*- coding: utf-8 -*- + +from __future__ import division, print_function + +import timeit +import numpy + + +############################################################################### +# Global variables # +############################################################################### + + +# Small arrays +xs = numpy.random.uniform(-1, 1, 6).reshape(2, 3) +ys = numpy.random.uniform(-1, 1, 6).reshape(2, 3) +zs = xs + 1j * ys +m1 = [[True, False, False], [False, False, True]] +m2 = [[True, False, True], [False, False, True]] +nmxs = numpy.ma.array(xs, mask=m1) +nmys = numpy.ma.array(ys, mask=m2) +nmzs = numpy.ma.array(zs, mask=m1) + +# Big arrays +xl = numpy.random.uniform(-1, 1, 100*100).reshape(100, 100) +yl = numpy.random.uniform(-1, 1, 100*100).reshape(100, 100) +zl = xl + 1j * yl +maskx = xl > 0.8 +masky = yl < -0.8 +nmxl = numpy.ma.array(xl, mask=maskx) +nmyl = numpy.ma.array(yl, mask=masky) +nmzl = numpy.ma.array(zl, mask=maskx) + + +############################################################################### +# Functions # +############################################################################### + + +def timer(s, v='', nloop=500, nrep=3): + units = ["s", "ms", "µs", "ns"] + scaling = [1, 1e3, 1e6, 1e9] + print("%s : %-50s : " % (v, s), end=' ') + varnames = ["%ss,nm%ss,%sl,nm%sl" % tuple(x*4) for x in 'xyz'] + setup = 'from __main__ import numpy, ma, %s' % ','.join(varnames) + Timer = timeit.Timer(stmt=s, setup=setup) + best = min(Timer.repeat(nrep, nloop)) / nloop + if best > 0.0: + order = min(-int(numpy.floor(numpy.log10(best)) // 3), 3) + else: + order = 3 + print("%d loops, best of %d: %.*g %s per loop" % (nloop, nrep, + 3, + best * scaling[order], + units[order])) + + +def compare_functions_1v(func, nloop=500, + xs=xs, nmxs=nmxs, xl=xl, nmxl=nmxl): + funcname = func.__name__ + print("-"*50) + print("%s on small arrays" % funcname) + module, data = "numpy.ma", "nmxs" + timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop) + + print("%s on large arrays" % funcname) + module, data = "numpy.ma", "nmxl" + timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop) + return + +def compare_methods(methodname, args, vars='x', nloop=500, test=True, + xs=xs, nmxs=nmxs, xl=xl, nmxl=nmxl): + print("-"*50) + print("%s on small arrays" % methodname) + data, ver = "nm%ss" % vars, 'numpy.ma' + timer("%(data)s.%(methodname)s(%(args)s)" % locals(), v=ver, nloop=nloop) + + print("%s on large arrays" % methodname) + data, ver = "nm%sl" % vars, 'numpy.ma' + timer("%(data)s.%(methodname)s(%(args)s)" % locals(), v=ver, nloop=nloop) + return + +def compare_functions_2v(func, nloop=500, test=True, + xs=xs, nmxs=nmxs, + ys=ys, nmys=nmys, + xl=xl, nmxl=nmxl, + yl=yl, nmyl=nmyl): + funcname = func.__name__ + print("-"*50) + print("%s on small arrays" % funcname) + module, data = "numpy.ma", "nmxs,nmys" + timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop) + + print("%s on large arrays" % funcname) + module, data = "numpy.ma", "nmxl,nmyl" + timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop) + return + + +if __name__ == '__main__': + compare_functions_1v(numpy.sin) + compare_functions_1v(numpy.log) + compare_functions_1v(numpy.sqrt) + + compare_functions_2v(numpy.multiply) + compare_functions_2v(numpy.divide) + compare_functions_2v(numpy.power) + + compare_methods('ravel', '', nloop=1000) + compare_methods('conjugate', '', 'z', nloop=1000) + compare_methods('transpose', '', nloop=1000) + compare_methods('compressed', '', nloop=1000) + compare_methods('__getitem__', '0', nloop=1000) + compare_methods('__getitem__', '(0,0)', nloop=1000) + compare_methods('__getitem__', '[0,-1]', nloop=1000) + compare_methods('__setitem__', '0, 17', nloop=1000, test=False) + compare_methods('__setitem__', '(0,0), 17', nloop=1000, test=False) + + print("-"*50) + print("__setitem__ on small arrays") + timer('nmxs.__setitem__((-1,0),numpy.ma.masked)', 'numpy.ma ', nloop=10000) + + print("-"*50) + print("__setitem__ on large arrays") + timer('nmxl.__setitem__((-1,0),numpy.ma.masked)', 'numpy.ma ', nloop=10000) + + print("-"*50) + print("where on small arrays") + timer('numpy.ma.where(nmxs>2,nmxs,nmys)', 'numpy.ma ', nloop=1000) + print("-"*50) + print("where on large arrays") + timer('numpy.ma.where(nmxl>2,nmxl,nmyl)', 'numpy.ma ', nloop=100) diff --git a/project/venv/lib/python2.7/site-packages/numpy/ma/bench.pyc b/project/venv/lib/python2.7/site-packages/numpy/ma/bench.pyc new file mode 100644 index 0000000..f0418ec Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/ma/bench.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/ma/core.py b/project/venv/lib/python2.7/site-packages/numpy/ma/core.py new file mode 100644 index 0000000..96d7207 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/ma/core.py @@ -0,0 +1,8092 @@ +""" +numpy.ma : a package to handle missing or invalid values. + +This package was initially written for numarray by Paul F. Dubois +at Lawrence Livermore National Laboratory. +In 2006, the package was completely rewritten by Pierre Gerard-Marchant +(University of Georgia) to make the MaskedArray class a subclass of ndarray, +and to improve support of structured arrays. + + +Copyright 1999, 2000, 2001 Regents of the University of California. +Released for unlimited redistribution. + +* Adapted for numpy_core 2005 by Travis Oliphant and (mainly) Paul Dubois. +* Subclassing of the base `ndarray` 2006 by Pierre Gerard-Marchant + (pgmdevlist_AT_gmail_DOT_com) +* Improvements suggested by Reggie Dugard (reggie_AT_merfinllc_DOT_com) + +.. moduleauthor:: Pierre Gerard-Marchant + +""" +# pylint: disable-msg=E1002 +from __future__ import division, absolute_import, print_function + +import sys +import operator +import warnings +import textwrap +import re +from functools import reduce + +if sys.version_info[0] >= 3: + import builtins +else: + import __builtin__ as builtins + +import numpy as np +import numpy.core.umath as umath +import numpy.core.numerictypes as ntypes +from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue +from numpy import array as narray +from numpy.lib.function_base import angle +from numpy.compat import ( + getargspec, formatargspec, long, basestring, unicode, bytes + ) +from numpy import expand_dims +from numpy.core.multiarray import normalize_axis_index +from numpy.core.numeric import normalize_axis_tuple +from numpy.core._internal import recursive +from numpy.core.numeric import pickle + + +__all__ = [ + 'MAError', 'MaskError', 'MaskType', 'MaskedArray', 'abs', 'absolute', + 'add', 'all', 'allclose', 'allequal', 'alltrue', 'amax', 'amin', + 'angle', 'anom', 'anomalies', 'any', 'append', 'arange', 'arccos', + 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', + 'argmax', 'argmin', 'argsort', 'around', 'array', 'asanyarray', + 'asarray', 'bitwise_and', 'bitwise_or', 'bitwise_xor', 'bool_', 'ceil', + 'choose', 'clip', 'common_fill_value', 'compress', 'compressed', + 'concatenate', 'conjugate', 'convolve', 'copy', 'correlate', 'cos', 'cosh', + 'count', 'cumprod', 'cumsum', 'default_fill_value', 'diag', 'diagonal', + 'diff', 'divide', 'dump', 'dumps', 'empty', 'empty_like', 'equal', 'exp', + 'expand_dims', 'fabs', 'filled', 'fix_invalid', 'flatten_mask', + 'flatten_structured_array', 'floor', 'floor_divide', 'fmod', + 'frombuffer', 'fromflex', 'fromfunction', 'getdata', 'getmask', + 'getmaskarray', 'greater', 'greater_equal', 'harden_mask', 'hypot', + 'identity', 'ids', 'indices', 'inner', 'innerproduct', 'isMA', + 'isMaskedArray', 'is_mask', 'is_masked', 'isarray', 'left_shift', + 'less', 'less_equal', 'load', 'loads', 'log', 'log10', 'log2', + 'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'make_mask', + 'make_mask_descr', 'make_mask_none', 'mask_or', 'masked', + 'masked_array', 'masked_equal', 'masked_greater', + 'masked_greater_equal', 'masked_inside', 'masked_invalid', + 'masked_less', 'masked_less_equal', 'masked_not_equal', + 'masked_object', 'masked_outside', 'masked_print_option', + 'masked_singleton', 'masked_values', 'masked_where', 'max', 'maximum', + 'maximum_fill_value', 'mean', 'min', 'minimum', 'minimum_fill_value', + 'mod', 'multiply', 'mvoid', 'ndim', 'negative', 'nomask', 'nonzero', + 'not_equal', 'ones', 'outer', 'outerproduct', 'power', 'prod', + 'product', 'ptp', 'put', 'putmask', 'rank', 'ravel', 'remainder', + 'repeat', 'reshape', 'resize', 'right_shift', 'round', 'round_', + 'set_fill_value', 'shape', 'sin', 'sinh', 'size', 'soften_mask', + 'sometrue', 'sort', 'sqrt', 'squeeze', 'std', 'subtract', 'sum', + 'swapaxes', 'take', 'tan', 'tanh', 'trace', 'transpose', 'true_divide', + 'var', 'where', 'zeros', + ] + +MaskType = np.bool_ +nomask = MaskType(0) + +class MaskedArrayFutureWarning(FutureWarning): + pass + +def _deprecate_argsort_axis(arr): + """ + Adjust the axis passed to argsort, warning if necessary + + Parameters + ---------- + arr + The array which argsort was called on + + np.ma.argsort has a long-term bug where the default of the axis argument + is wrong (gh-8701), which now must be kept for backwards compatibiity. + Thankfully, this only makes a difference when arrays are 2- or more- + dimensional, so we only need a warning then. + """ + if arr.ndim <= 1: + # no warning needed - but switch to -1 anyway, to avoid surprising + # subclasses, which are more likely to implement scalar axes. + return -1 + else: + # 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default + warnings.warn( + "In the future the default for argsort will be axis=-1, not the " + "current None, to match its documentation and np.argsort. " + "Explicitly pass -1 or None to silence this warning.", + MaskedArrayFutureWarning, stacklevel=3) + return None + + +def doc_note(initialdoc, note): + """ + Adds a Notes section to an existing docstring. + + """ + if initialdoc is None: + return + if note is None: + return initialdoc + + notesplit = re.split(r'\n\s*?Notes\n\s*?-----', initialdoc) + + notedoc = """\ +Notes + ----- + %s""" % note + + if len(notesplit) > 1: + notedoc = '\n\n ' + notedoc + '\n' + + return ''.join(notesplit[:1] + [notedoc] + notesplit[1:]) + + +def get_object_signature(obj): + """ + Get the signature from obj + + """ + try: + sig = formatargspec(*getargspec(obj)) + except TypeError: + sig = '' + return sig + + +############################################################################### +# Exceptions # +############################################################################### + + +class MAError(Exception): + """ + Class for masked array related errors. + + """ + pass + + +class MaskError(MAError): + """ + Class for mask related errors. + + """ + pass + + +############################################################################### +# Filling options # +############################################################################### + + +# b: boolean - c: complex - f: floats - i: integer - O: object - S: string +default_filler = {'b': True, + 'c': 1.e20 + 0.0j, + 'f': 1.e20, + 'i': 999999, + 'O': '?', + 'S': b'N/A', + 'u': 999999, + 'V': b'???', + 'U': u'N/A' + } + +# Add datetime64 and timedelta64 types +for v in ["Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", + "fs", "as"]: + default_filler["M8[" + v + "]"] = np.datetime64("NaT", v) + default_filler["m8[" + v + "]"] = np.timedelta64("NaT", v) + +max_filler = ntypes._minvals +max_filler.update([(k, -np.inf) for k in [np.float32, np.float64]]) +min_filler = ntypes._maxvals +min_filler.update([(k, +np.inf) for k in [np.float32, np.float64]]) +if 'float128' in ntypes.typeDict: + max_filler.update([(np.float128, -np.inf)]) + min_filler.update([(np.float128, +np.inf)]) + + +def _recursive_fill_value(dtype, f): + """ + Recursively produce a fill value for `dtype`, calling f on scalar dtypes + """ + if dtype.names is not None: + vals = tuple(_recursive_fill_value(dtype[name], f) for name in dtype.names) + return np.array(vals, dtype=dtype)[()] # decay to void scalar from 0d + elif dtype.subdtype: + subtype, shape = dtype.subdtype + subval = _recursive_fill_value(subtype, f) + return np.full(shape, subval) + else: + return f(dtype) + + +def _get_dtype_of(obj): + """ Convert the argument for *_fill_value into a dtype """ + if isinstance(obj, np.dtype): + return obj + elif hasattr(obj, 'dtype'): + return obj.dtype + else: + return np.asanyarray(obj).dtype + + +def default_fill_value(obj): + """ + Return the default fill value for the argument object. + + The default filling value depends on the datatype of the input + array or the type of the input scalar: + + ======== ======== + datatype default + ======== ======== + bool True + int 999999 + float 1.e20 + complex 1.e20+0j + object '?' + string 'N/A' + ======== ======== + + For structured types, a structured scalar is returned, with each field the + default fill value for its type. + + For subarray types, the fill value is an array of the same size containing + the default scalar fill value. + + Parameters + ---------- + obj : ndarray, dtype or scalar + The array data-type or scalar for which the default fill value + is returned. + + Returns + ------- + fill_value : scalar + The default fill value. + + Examples + -------- + >>> np.ma.default_fill_value(1) + 999999 + >>> np.ma.default_fill_value(np.array([1.1, 2., np.pi])) + 1e+20 + >>> np.ma.default_fill_value(np.dtype(complex)) + (1e+20+0j) + + """ + def _scalar_fill_value(dtype): + if dtype.kind in 'Mm': + return default_filler.get(dtype.str[1:], '?') + else: + return default_filler.get(dtype.kind, '?') + + dtype = _get_dtype_of(obj) + return _recursive_fill_value(dtype, _scalar_fill_value) + + +def _extremum_fill_value(obj, extremum, extremum_name): + + def _scalar_fill_value(dtype): + try: + return extremum[dtype] + except KeyError: + raise TypeError( + "Unsuitable type {} for calculating {}." + .format(dtype, extremum_name) + ) + + dtype = _get_dtype_of(obj) + return _recursive_fill_value(dtype, _scalar_fill_value) + + +def minimum_fill_value(obj): + """ + Return the maximum value that can be represented by the dtype of an object. + + This function is useful for calculating a fill value suitable for + taking the minimum of an array with a given dtype. + + Parameters + ---------- + obj : ndarray, dtype or scalar + An object that can be queried for it's numeric type. + + Returns + ------- + val : scalar + The maximum representable value. + + Raises + ------ + TypeError + If `obj` isn't a suitable numeric type. + + See Also + -------- + maximum_fill_value : The inverse function. + set_fill_value : Set the filling value of a masked array. + MaskedArray.fill_value : Return current fill value. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.int8() + >>> ma.minimum_fill_value(a) + 127 + >>> a = np.int32() + >>> ma.minimum_fill_value(a) + 2147483647 + + An array of numeric data can also be passed. + + >>> a = np.array([1, 2, 3], dtype=np.int8) + >>> ma.minimum_fill_value(a) + 127 + >>> a = np.array([1, 2, 3], dtype=np.float32) + >>> ma.minimum_fill_value(a) + inf + + """ + return _extremum_fill_value(obj, min_filler, "minimum") + + +def maximum_fill_value(obj): + """ + Return the minimum value that can be represented by the dtype of an object. + + This function is useful for calculating a fill value suitable for + taking the maximum of an array with a given dtype. + + Parameters + ---------- + obj : ndarray, dtype or scalar + An object that can be queried for it's numeric type. + + Returns + ------- + val : scalar + The minimum representable value. + + Raises + ------ + TypeError + If `obj` isn't a suitable numeric type. + + See Also + -------- + minimum_fill_value : The inverse function. + set_fill_value : Set the filling value of a masked array. + MaskedArray.fill_value : Return current fill value. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.int8() + >>> ma.maximum_fill_value(a) + -128 + >>> a = np.int32() + >>> ma.maximum_fill_value(a) + -2147483648 + + An array of numeric data can also be passed. + + >>> a = np.array([1, 2, 3], dtype=np.int8) + >>> ma.maximum_fill_value(a) + -128 + >>> a = np.array([1, 2, 3], dtype=np.float32) + >>> ma.maximum_fill_value(a) + -inf + + """ + return _extremum_fill_value(obj, max_filler, "maximum") + + +def _recursive_set_fill_value(fillvalue, dt): + """ + Create a fill value for a structured dtype. + + Parameters + ---------- + fillvalue: scalar or array_like + Scalar or array representing the fill value. If it is of shorter + length than the number of fields in dt, it will be resized. + dt: dtype + The structured dtype for which to create the fill value. + + Returns + ------- + val: tuple + A tuple of values corresponding to the structured fill value. + + """ + fillvalue = np.resize(fillvalue, len(dt.names)) + output_value = [] + for (fval, name) in zip(fillvalue, dt.names): + cdtype = dt[name] + if cdtype.subdtype: + cdtype = cdtype.subdtype[0] + + if cdtype.names is not None: + output_value.append(tuple(_recursive_set_fill_value(fval, cdtype))) + else: + output_value.append(np.array(fval, dtype=cdtype).item()) + return tuple(output_value) + + +def _check_fill_value(fill_value, ndtype): + """ + Private function validating the given `fill_value` for the given dtype. + + If fill_value is None, it is set to the default corresponding to the dtype. + + If fill_value is not None, its value is forced to the given dtype. + + The result is always a 0d array. + + """ + ndtype = np.dtype(ndtype) + if fill_value is None: + fill_value = default_fill_value(ndtype) + elif ndtype.names is not None: + if isinstance(fill_value, (ndarray, np.void)): + try: + fill_value = np.array(fill_value, copy=False, dtype=ndtype) + except ValueError: + err_msg = "Unable to transform %s to dtype %s" + raise ValueError(err_msg % (fill_value, ndtype)) + else: + fill_value = np.asarray(fill_value, dtype=object) + fill_value = np.array(_recursive_set_fill_value(fill_value, ndtype), + dtype=ndtype) + else: + if isinstance(fill_value, basestring) and (ndtype.char not in 'OSVU'): + # Note this check doesn't work if fill_value is not a scalar + err_msg = "Cannot set fill value of string with array of dtype %s" + raise TypeError(err_msg % ndtype) + else: + # In case we want to convert 1e20 to int. + # Also in case of converting string arrays. + try: + fill_value = np.array(fill_value, copy=False, dtype=ndtype) + except (OverflowError, ValueError): + # Raise TypeError instead of OverflowError or ValueError. + # OverflowError is seldom used, and the real problem here is + # that the passed fill_value is not compatible with the ndtype. + err_msg = "Cannot convert fill_value %s to dtype %s" + raise TypeError(err_msg % (fill_value, ndtype)) + return np.array(fill_value) + + +def set_fill_value(a, fill_value): + """ + Set the filling value of a, if a is a masked array. + + This function changes the fill value of the masked array `a` in place. + If `a` is not a masked array, the function returns silently, without + doing anything. + + Parameters + ---------- + a : array_like + Input array. + fill_value : dtype + Filling value. A consistency test is performed to make sure + the value is compatible with the dtype of `a`. + + Returns + ------- + None + Nothing returned by this function. + + See Also + -------- + maximum_fill_value : Return the default fill value for a dtype. + MaskedArray.fill_value : Return current fill value. + MaskedArray.set_fill_value : Equivalent method. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(5) + >>> a + array([0, 1, 2, 3, 4]) + >>> a = ma.masked_where(a < 3, a) + >>> a + masked_array(data = [-- -- -- 3 4], + mask = [ True True True False False], + fill_value=999999) + >>> ma.set_fill_value(a, -999) + >>> a + masked_array(data = [-- -- -- 3 4], + mask = [ True True True False False], + fill_value=-999) + + Nothing happens if `a` is not a masked array. + + >>> a = range(5) + >>> a + [0, 1, 2, 3, 4] + >>> ma.set_fill_value(a, 100) + >>> a + [0, 1, 2, 3, 4] + >>> a = np.arange(5) + >>> a + array([0, 1, 2, 3, 4]) + >>> ma.set_fill_value(a, 100) + >>> a + array([0, 1, 2, 3, 4]) + + """ + if isinstance(a, MaskedArray): + a.set_fill_value(fill_value) + return + + +def get_fill_value(a): + """ + Return the filling value of a, if any. Otherwise, returns the + default filling value for that type. + + """ + if isinstance(a, MaskedArray): + result = a.fill_value + else: + result = default_fill_value(a) + return result + + +def common_fill_value(a, b): + """ + Return the common filling value of two masked arrays, if any. + + If ``a.fill_value == b.fill_value``, return the fill value, + otherwise return None. + + Parameters + ---------- + a, b : MaskedArray + The masked arrays for which to compare fill values. + + Returns + ------- + fill_value : scalar or None + The common fill value, or None. + + Examples + -------- + >>> x = np.ma.array([0, 1.], fill_value=3) + >>> y = np.ma.array([0, 1.], fill_value=3) + >>> np.ma.common_fill_value(x, y) + 3.0 + + """ + t1 = get_fill_value(a) + t2 = get_fill_value(b) + if t1 == t2: + return t1 + return None + + +def filled(a, fill_value=None): + """ + Return input as an array with masked data replaced by a fill value. + + If `a` is not a `MaskedArray`, `a` itself is returned. + If `a` is a `MaskedArray` and `fill_value` is None, `fill_value` is set to + ``a.fill_value``. + + Parameters + ---------- + a : MaskedArray or array_like + An input object. + fill_value : scalar, optional + Filling value. Default is None. + + Returns + ------- + a : ndarray + The filled array. + + See Also + -------- + compressed + + Examples + -------- + >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], + ... [1, 0, 0], + ... [0, 0, 0]]) + >>> x.filled() + array([[999999, 1, 2], + [999999, 4, 5], + [ 6, 7, 8]]) + + """ + if hasattr(a, 'filled'): + return a.filled(fill_value) + elif isinstance(a, ndarray): + # Should we check for contiguity ? and a.flags['CONTIGUOUS']: + return a + elif isinstance(a, dict): + return np.array(a, 'O') + else: + return np.array(a) + + +def get_masked_subclass(*arrays): + """ + Return the youngest subclass of MaskedArray from a list of (masked) arrays. + + In case of siblings, the first listed takes over. + + """ + if len(arrays) == 1: + arr = arrays[0] + if isinstance(arr, MaskedArray): + rcls = type(arr) + else: + rcls = MaskedArray + else: + arrcls = [type(a) for a in arrays] + rcls = arrcls[0] + if not issubclass(rcls, MaskedArray): + rcls = MaskedArray + for cls in arrcls[1:]: + if issubclass(cls, rcls): + rcls = cls + # Don't return MaskedConstant as result: revert to MaskedArray + if rcls.__name__ == 'MaskedConstant': + return MaskedArray + return rcls + + +def getdata(a, subok=True): + """ + Return the data of a masked array as an ndarray. + + Return the data of `a` (if any) as an ndarray if `a` is a ``MaskedArray``, + else return `a` as a ndarray or subclass (depending on `subok`) if not. + + Parameters + ---------- + a : array_like + Input ``MaskedArray``, alternatively a ndarray or a subclass thereof. + subok : bool + Whether to force the output to be a `pure` ndarray (False) or to + return a subclass of ndarray if appropriate (True, default). + + See Also + -------- + getmask : Return the mask of a masked array, or nomask. + getmaskarray : Return the mask of a masked array, or full array of False. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = ma.masked_equal([[1,2],[3,4]], 2) + >>> a + masked_array(data = + [[1 --] + [3 4]], + mask = + [[False True] + [False False]], + fill_value=999999) + >>> ma.getdata(a) + array([[1, 2], + [3, 4]]) + + Equivalently use the ``MaskedArray`` `data` attribute. + + >>> a.data + array([[1, 2], + [3, 4]]) + + """ + try: + data = a._data + except AttributeError: + data = np.array(a, copy=False, subok=subok) + if not subok: + return data.view(ndarray) + return data + + +get_data = getdata + + +def fix_invalid(a, mask=nomask, copy=True, fill_value=None): + """ + Return input with invalid data masked and replaced by a fill value. + + Invalid data means values of `nan`, `inf`, etc. + + Parameters + ---------- + a : array_like + Input array, a (subclass of) ndarray. + mask : sequence, optional + Mask. Must be convertible to an array of booleans with the same + shape as `data`. True indicates a masked (i.e. invalid) data. + copy : bool, optional + Whether to use a copy of `a` (True) or to fix `a` in place (False). + Default is True. + fill_value : scalar, optional + Value used for fixing invalid data. Default is None, in which case + the ``a.fill_value`` is used. + + Returns + ------- + b : MaskedArray + The input array with invalid entries fixed. + + Notes + ----- + A copy is performed by default. + + Examples + -------- + >>> x = np.ma.array([1., -1, np.nan, np.inf], mask=[1] + [0]*3) + >>> x + masked_array(data = [-- -1.0 nan inf], + mask = [ True False False False], + fill_value = 1e+20) + >>> np.ma.fix_invalid(x) + masked_array(data = [-- -1.0 -- --], + mask = [ True False True True], + fill_value = 1e+20) + + >>> fixed = np.ma.fix_invalid(x) + >>> fixed.data + array([ 1.00000000e+00, -1.00000000e+00, 1.00000000e+20, + 1.00000000e+20]) + >>> x.data + array([ 1., -1., NaN, Inf]) + + """ + a = masked_array(a, copy=copy, mask=mask, subok=True) + invalid = np.logical_not(np.isfinite(a._data)) + if not invalid.any(): + return a + a._mask |= invalid + if fill_value is None: + fill_value = a.fill_value + a._data[invalid] = fill_value + return a + +def is_string_or_list_of_strings(val): + return (isinstance(val, basestring) or + (isinstance(val, list) and val and + builtins.all(isinstance(s, basestring) for s in val))) + +############################################################################### +# Ufuncs # +############################################################################### + + +ufunc_domain = {} +ufunc_fills = {} + + +class _DomainCheckInterval(object): + """ + Define a valid interval, so that : + + ``domain_check_interval(a,b)(x) == True`` where + ``x < a`` or ``x > b``. + + """ + + def __init__(self, a, b): + "domain_check_interval(a,b)(x) = true where x < a or y > b" + if (a > b): + (a, b) = (b, a) + self.a = a + self.b = b + + def __call__(self, x): + "Execute the call behavior." + # nans at masked positions cause RuntimeWarnings, even though + # they are masked. To avoid this we suppress warnings. + with np.errstate(invalid='ignore'): + return umath.logical_or(umath.greater(x, self.b), + umath.less(x, self.a)) + + +class _DomainTan(object): + """ + Define a valid interval for the `tan` function, so that: + + ``domain_tan(eps) = True`` where ``abs(cos(x)) < eps`` + + """ + + def __init__(self, eps): + "domain_tan(eps) = true where abs(cos(x)) < eps)" + self.eps = eps + + def __call__(self, x): + "Executes the call behavior." + with np.errstate(invalid='ignore'): + return umath.less(umath.absolute(umath.cos(x)), self.eps) + + +class _DomainSafeDivide(object): + """ + Define a domain for safe division. + + """ + + def __init__(self, tolerance=None): + self.tolerance = tolerance + + def __call__(self, a, b): + # Delay the selection of the tolerance to here in order to reduce numpy + # import times. The calculation of these parameters is a substantial + # component of numpy's import time. + if self.tolerance is None: + self.tolerance = np.finfo(float).tiny + # don't call ma ufuncs from __array_wrap__ which would fail for scalars + a, b = np.asarray(a), np.asarray(b) + with np.errstate(invalid='ignore'): + return umath.absolute(a) * self.tolerance >= umath.absolute(b) + + +class _DomainGreater(object): + """ + DomainGreater(v)(x) is True where x <= v. + + """ + + def __init__(self, critical_value): + "DomainGreater(v)(x) = true where x <= v" + self.critical_value = critical_value + + def __call__(self, x): + "Executes the call behavior." + with np.errstate(invalid='ignore'): + return umath.less_equal(x, self.critical_value) + + +class _DomainGreaterEqual(object): + """ + DomainGreaterEqual(v)(x) is True where x < v. + + """ + + def __init__(self, critical_value): + "DomainGreaterEqual(v)(x) = true where x < v" + self.critical_value = critical_value + + def __call__(self, x): + "Executes the call behavior." + with np.errstate(invalid='ignore'): + return umath.less(x, self.critical_value) + + +class _MaskedUFunc(object): + def __init__(self, ufunc): + self.f = ufunc + self.__doc__ = ufunc.__doc__ + self.__name__ = ufunc.__name__ + + def __str__(self): + return "Masked version of {}".format(self.f) + + +class _MaskedUnaryOperation(_MaskedUFunc): + """ + Defines masked version of unary operations, where invalid values are + pre-masked. + + Parameters + ---------- + mufunc : callable + The function for which to define a masked version. Made available + as ``_MaskedUnaryOperation.f``. + fill : scalar, optional + Filling value, default is 0. + domain : class instance + Domain for the function. Should be one of the ``_Domain*`` + classes. Default is None. + + """ + + def __init__(self, mufunc, fill=0, domain=None): + super(_MaskedUnaryOperation, self).__init__(mufunc) + self.fill = fill + self.domain = domain + ufunc_domain[mufunc] = domain + ufunc_fills[mufunc] = fill + + def __call__(self, a, *args, **kwargs): + """ + Execute the call behavior. + + """ + d = getdata(a) + # Deal with domain + if self.domain is not None: + # Case 1.1. : Domained function + # nans at masked positions cause RuntimeWarnings, even though + # they are masked. To avoid this we suppress warnings. + with np.errstate(divide='ignore', invalid='ignore'): + result = self.f(d, *args, **kwargs) + # Make a mask + m = ~umath.isfinite(result) + m |= self.domain(d) + m |= getmask(a) + else: + # Case 1.2. : Function without a domain + # Get the result and the mask + with np.errstate(divide='ignore', invalid='ignore'): + result = self.f(d, *args, **kwargs) + m = getmask(a) + + if not result.ndim: + # Case 2.1. : The result is scalarscalar + if m: + return masked + return result + + if m is not nomask: + # Case 2.2. The result is an array + # We need to fill the invalid data back w/ the input Now, + # that's plain silly: in C, we would just skip the element and + # keep the original, but we do have to do it that way in Python + + # In case result has a lower dtype than the inputs (as in + # equal) + try: + np.copyto(result, d, where=m) + except TypeError: + pass + # Transform to + masked_result = result.view(get_masked_subclass(a)) + masked_result._mask = m + masked_result._update_from(a) + return masked_result + + +class _MaskedBinaryOperation(_MaskedUFunc): + """ + Define masked version of binary operations, where invalid + values are pre-masked. + + Parameters + ---------- + mbfunc : function + The function for which to define a masked version. Made available + as ``_MaskedBinaryOperation.f``. + domain : class instance + Default domain for the function. Should be one of the ``_Domain*`` + classes. Default is None. + fillx : scalar, optional + Filling value for the first argument, default is 0. + filly : scalar, optional + Filling value for the second argument, default is 0. + + """ + + def __init__(self, mbfunc, fillx=0, filly=0): + """ + abfunc(fillx, filly) must be defined. + + abfunc(x, filly) = x for all x to enable reduce. + + """ + super(_MaskedBinaryOperation, self).__init__(mbfunc) + self.fillx = fillx + self.filly = filly + ufunc_domain[mbfunc] = None + ufunc_fills[mbfunc] = (fillx, filly) + + def __call__(self, a, b, *args, **kwargs): + """ + Execute the call behavior. + + """ + # Get the data, as ndarray + (da, db) = (getdata(a), getdata(b)) + # Get the result + with np.errstate(): + np.seterr(divide='ignore', invalid='ignore') + result = self.f(da, db, *args, **kwargs) + # Get the mask for the result + (ma, mb) = (getmask(a), getmask(b)) + if ma is nomask: + if mb is nomask: + m = nomask + else: + m = umath.logical_or(getmaskarray(a), mb) + elif mb is nomask: + m = umath.logical_or(ma, getmaskarray(b)) + else: + m = umath.logical_or(ma, mb) + + # Case 1. : scalar + if not result.ndim: + if m: + return masked + return result + + # Case 2. : array + # Revert result to da where masked + if m is not nomask and m.any(): + # any errors, just abort; impossible to guarantee masked values + try: + np.copyto(result, da, casting='unsafe', where=m) + except Exception: + pass + + # Transforms to a (subclass of) MaskedArray + masked_result = result.view(get_masked_subclass(a, b)) + masked_result._mask = m + if isinstance(a, MaskedArray): + masked_result._update_from(a) + elif isinstance(b, MaskedArray): + masked_result._update_from(b) + return masked_result + + def reduce(self, target, axis=0, dtype=None): + """ + Reduce `target` along the given `axis`. + + """ + tclass = get_masked_subclass(target) + m = getmask(target) + t = filled(target, self.filly) + if t.shape == (): + t = t.reshape(1) + if m is not nomask: + m = make_mask(m, copy=1) + m.shape = (1,) + + if m is nomask: + tr = self.f.reduce(t, axis) + mr = nomask + else: + tr = self.f.reduce(t, axis, dtype=dtype or t.dtype) + mr = umath.logical_and.reduce(m, axis) + + if not tr.shape: + if mr: + return masked + else: + return tr + masked_tr = tr.view(tclass) + masked_tr._mask = mr + return masked_tr + + def outer(self, a, b): + """ + Return the function applied to the outer product of a and b. + + """ + (da, db) = (getdata(a), getdata(b)) + d = self.f.outer(da, db) + ma = getmask(a) + mb = getmask(b) + if ma is nomask and mb is nomask: + m = nomask + else: + ma = getmaskarray(a) + mb = getmaskarray(b) + m = umath.logical_or.outer(ma, mb) + if (not m.ndim) and m: + return masked + if m is not nomask: + np.copyto(d, da, where=m) + if not d.shape: + return d + masked_d = d.view(get_masked_subclass(a, b)) + masked_d._mask = m + return masked_d + + def accumulate(self, target, axis=0): + """Accumulate `target` along `axis` after filling with y fill + value. + + """ + tclass = get_masked_subclass(target) + t = filled(target, self.filly) + result = self.f.accumulate(t, axis) + masked_result = result.view(tclass) + return masked_result + + + +class _DomainedBinaryOperation(_MaskedUFunc): + """ + Define binary operations that have a domain, like divide. + + They have no reduce, outer or accumulate. + + Parameters + ---------- + mbfunc : function + The function for which to define a masked version. Made available + as ``_DomainedBinaryOperation.f``. + domain : class instance + Default domain for the function. Should be one of the ``_Domain*`` + classes. + fillx : scalar, optional + Filling value for the first argument, default is 0. + filly : scalar, optional + Filling value for the second argument, default is 0. + + """ + + def __init__(self, dbfunc, domain, fillx=0, filly=0): + """abfunc(fillx, filly) must be defined. + abfunc(x, filly) = x for all x to enable reduce. + """ + super(_DomainedBinaryOperation, self).__init__(dbfunc) + self.domain = domain + self.fillx = fillx + self.filly = filly + ufunc_domain[dbfunc] = domain + ufunc_fills[dbfunc] = (fillx, filly) + + def __call__(self, a, b, *args, **kwargs): + "Execute the call behavior." + # Get the data + (da, db) = (getdata(a), getdata(b)) + # Get the result + with np.errstate(divide='ignore', invalid='ignore'): + result = self.f(da, db, *args, **kwargs) + # Get the mask as a combination of the source masks and invalid + m = ~umath.isfinite(result) + m |= getmask(a) + m |= getmask(b) + # Apply the domain + domain = ufunc_domain.get(self.f, None) + if domain is not None: + m |= domain(da, db) + # Take care of the scalar case first + if (not m.ndim): + if m: + return masked + else: + return result + # When the mask is True, put back da if possible + # any errors, just abort; impossible to guarantee masked values + try: + np.copyto(result, 0, casting='unsafe', where=m) + # avoid using "*" since this may be overlaid + masked_da = umath.multiply(m, da) + # only add back if it can be cast safely + if np.can_cast(masked_da.dtype, result.dtype, casting='safe'): + result += masked_da + except Exception: + pass + + # Transforms to a (subclass of) MaskedArray + masked_result = result.view(get_masked_subclass(a, b)) + masked_result._mask = m + if isinstance(a, MaskedArray): + masked_result._update_from(a) + elif isinstance(b, MaskedArray): + masked_result._update_from(b) + return masked_result + + +# Unary ufuncs +exp = _MaskedUnaryOperation(umath.exp) +conjugate = _MaskedUnaryOperation(umath.conjugate) +sin = _MaskedUnaryOperation(umath.sin) +cos = _MaskedUnaryOperation(umath.cos) +tan = _MaskedUnaryOperation(umath.tan) +arctan = _MaskedUnaryOperation(umath.arctan) +arcsinh = _MaskedUnaryOperation(umath.arcsinh) +sinh = _MaskedUnaryOperation(umath.sinh) +cosh = _MaskedUnaryOperation(umath.cosh) +tanh = _MaskedUnaryOperation(umath.tanh) +abs = absolute = _MaskedUnaryOperation(umath.absolute) +angle = _MaskedUnaryOperation(angle) # from numpy.lib.function_base +fabs = _MaskedUnaryOperation(umath.fabs) +negative = _MaskedUnaryOperation(umath.negative) +floor = _MaskedUnaryOperation(umath.floor) +ceil = _MaskedUnaryOperation(umath.ceil) +around = _MaskedUnaryOperation(np.round_) +logical_not = _MaskedUnaryOperation(umath.logical_not) + +# Domained unary ufuncs +sqrt = _MaskedUnaryOperation(umath.sqrt, 0.0, + _DomainGreaterEqual(0.0)) +log = _MaskedUnaryOperation(umath.log, 1.0, + _DomainGreater(0.0)) +log2 = _MaskedUnaryOperation(umath.log2, 1.0, + _DomainGreater(0.0)) +log10 = _MaskedUnaryOperation(umath.log10, 1.0, + _DomainGreater(0.0)) +tan = _MaskedUnaryOperation(umath.tan, 0.0, + _DomainTan(1e-35)) +arcsin = _MaskedUnaryOperation(umath.arcsin, 0.0, + _DomainCheckInterval(-1.0, 1.0)) +arccos = _MaskedUnaryOperation(umath.arccos, 0.0, + _DomainCheckInterval(-1.0, 1.0)) +arccosh = _MaskedUnaryOperation(umath.arccosh, 1.0, + _DomainGreaterEqual(1.0)) +arctanh = _MaskedUnaryOperation(umath.arctanh, 0.0, + _DomainCheckInterval(-1.0 + 1e-15, 1.0 - 1e-15)) + +# Binary ufuncs +add = _MaskedBinaryOperation(umath.add) +subtract = _MaskedBinaryOperation(umath.subtract) +multiply = _MaskedBinaryOperation(umath.multiply, 1, 1) +arctan2 = _MaskedBinaryOperation(umath.arctan2, 0.0, 1.0) +equal = _MaskedBinaryOperation(umath.equal) +equal.reduce = None +not_equal = _MaskedBinaryOperation(umath.not_equal) +not_equal.reduce = None +less_equal = _MaskedBinaryOperation(umath.less_equal) +less_equal.reduce = None +greater_equal = _MaskedBinaryOperation(umath.greater_equal) +greater_equal.reduce = None +less = _MaskedBinaryOperation(umath.less) +less.reduce = None +greater = _MaskedBinaryOperation(umath.greater) +greater.reduce = None +logical_and = _MaskedBinaryOperation(umath.logical_and) +alltrue = _MaskedBinaryOperation(umath.logical_and, 1, 1).reduce +logical_or = _MaskedBinaryOperation(umath.logical_or) +sometrue = logical_or.reduce +logical_xor = _MaskedBinaryOperation(umath.logical_xor) +bitwise_and = _MaskedBinaryOperation(umath.bitwise_and) +bitwise_or = _MaskedBinaryOperation(umath.bitwise_or) +bitwise_xor = _MaskedBinaryOperation(umath.bitwise_xor) +hypot = _MaskedBinaryOperation(umath.hypot) + +# Domained binary ufuncs +divide = _DomainedBinaryOperation(umath.divide, _DomainSafeDivide(), 0, 1) +true_divide = _DomainedBinaryOperation(umath.true_divide, + _DomainSafeDivide(), 0, 1) +floor_divide = _DomainedBinaryOperation(umath.floor_divide, + _DomainSafeDivide(), 0, 1) +remainder = _DomainedBinaryOperation(umath.remainder, + _DomainSafeDivide(), 0, 1) +fmod = _DomainedBinaryOperation(umath.fmod, _DomainSafeDivide(), 0, 1) +mod = _DomainedBinaryOperation(umath.mod, _DomainSafeDivide(), 0, 1) + + +############################################################################### +# Mask creation functions # +############################################################################### + + +def _replace_dtype_fields_recursive(dtype, primitive_dtype): + "Private function allowing recursion in _replace_dtype_fields." + _recurse = _replace_dtype_fields_recursive + + # Do we have some name fields ? + if dtype.names is not None: + descr = [] + for name in dtype.names: + field = dtype.fields[name] + if len(field) == 3: + # Prepend the title to the name + name = (field[-1], name) + descr.append((name, _recurse(field[0], primitive_dtype))) + new_dtype = np.dtype(descr) + + # Is this some kind of composite a la (float,2) + elif dtype.subdtype: + descr = list(dtype.subdtype) + descr[0] = _recurse(dtype.subdtype[0], primitive_dtype) + new_dtype = np.dtype(tuple(descr)) + + # this is a primitive type, so do a direct replacement + else: + new_dtype = primitive_dtype + + # preserve identity of dtypes + if new_dtype == dtype: + new_dtype = dtype + + return new_dtype + + +def _replace_dtype_fields(dtype, primitive_dtype): + """ + Construct a dtype description list from a given dtype. + + Returns a new dtype object, with all fields and subtypes in the given type + recursively replaced with `primitive_dtype`. + + Arguments are coerced to dtypes first. + """ + dtype = np.dtype(dtype) + primitive_dtype = np.dtype(primitive_dtype) + return _replace_dtype_fields_recursive(dtype, primitive_dtype) + + +def make_mask_descr(ndtype): + """ + Construct a dtype description list from a given dtype. + + Returns a new dtype object, with the type of all fields in `ndtype` to a + boolean type. Field names are not altered. + + Parameters + ---------- + ndtype : dtype + The dtype to convert. + + Returns + ------- + result : dtype + A dtype that looks like `ndtype`, the type of all fields is boolean. + + Examples + -------- + >>> import numpy.ma as ma + >>> dtype = np.dtype({'names':['foo', 'bar'], + 'formats':[np.float32, int]}) + >>> dtype + dtype([('foo', '>> ma.make_mask_descr(dtype) + dtype([('foo', '|b1'), ('bar', '|b1')]) + >>> ma.make_mask_descr(np.float32) + dtype('bool') + + """ + return _replace_dtype_fields(ndtype, MaskType) + + +def getmask(a): + """ + Return the mask of a masked array, or nomask. + + Return the mask of `a` as an ndarray if `a` is a `MaskedArray` and the + mask is not `nomask`, else return `nomask`. To guarantee a full array + of booleans of the same shape as a, use `getmaskarray`. + + Parameters + ---------- + a : array_like + Input `MaskedArray` for which the mask is required. + + See Also + -------- + getdata : Return the data of a masked array as an ndarray. + getmaskarray : Return the mask of a masked array, or full array of False. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = ma.masked_equal([[1,2],[3,4]], 2) + >>> a + masked_array(data = + [[1 --] + [3 4]], + mask = + [[False True] + [False False]], + fill_value=999999) + >>> ma.getmask(a) + array([[False, True], + [False, False]]) + + Equivalently use the `MaskedArray` `mask` attribute. + + >>> a.mask + array([[False, True], + [False, False]]) + + Result when mask == `nomask` + + >>> b = ma.masked_array([[1,2],[3,4]]) + >>> b + masked_array(data = + [[1 2] + [3 4]], + mask = + False, + fill_value=999999) + >>> ma.nomask + False + >>> ma.getmask(b) == ma.nomask + True + >>> b.mask == ma.nomask + True + + """ + return getattr(a, '_mask', nomask) + + +get_mask = getmask + + +def getmaskarray(arr): + """ + Return the mask of a masked array, or full boolean array of False. + + Return the mask of `arr` as an ndarray if `arr` is a `MaskedArray` and + the mask is not `nomask`, else return a full boolean array of False of + the same shape as `arr`. + + Parameters + ---------- + arr : array_like + Input `MaskedArray` for which the mask is required. + + See Also + -------- + getmask : Return the mask of a masked array, or nomask. + getdata : Return the data of a masked array as an ndarray. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = ma.masked_equal([[1,2],[3,4]], 2) + >>> a + masked_array(data = + [[1 --] + [3 4]], + mask = + [[False True] + [False False]], + fill_value=999999) + >>> ma.getmaskarray(a) + array([[False, True], + [False, False]]) + + Result when mask == ``nomask`` + + >>> b = ma.masked_array([[1,2],[3,4]]) + >>> b + masked_array(data = + [[1 2] + [3 4]], + mask = + False, + fill_value=999999) + >>> >ma.getmaskarray(b) + array([[False, False], + [False, False]]) + + """ + mask = getmask(arr) + if mask is nomask: + mask = make_mask_none(np.shape(arr), getattr(arr, 'dtype', None)) + return mask + + +def is_mask(m): + """ + Return True if m is a valid, standard mask. + + This function does not check the contents of the input, only that the + type is MaskType. In particular, this function returns False if the + mask has a flexible dtype. + + Parameters + ---------- + m : array_like + Array to test. + + Returns + ------- + result : bool + True if `m.dtype.type` is MaskType, False otherwise. + + See Also + -------- + isMaskedArray : Test whether input is an instance of MaskedArray. + + Examples + -------- + >>> import numpy.ma as ma + >>> m = ma.masked_equal([0, 1, 0, 2, 3], 0) + >>> m + masked_array(data = [-- 1 -- 2 3], + mask = [ True False True False False], + fill_value=999999) + >>> ma.is_mask(m) + False + >>> ma.is_mask(m.mask) + True + + Input must be an ndarray (or have similar attributes) + for it to be considered a valid mask. + + >>> m = [False, True, False] + >>> ma.is_mask(m) + False + >>> m = np.array([False, True, False]) + >>> m + array([False, True, False]) + >>> ma.is_mask(m) + True + + Arrays with complex dtypes don't return True. + + >>> dtype = np.dtype({'names':['monty', 'pithon'], + 'formats':[bool, bool]}) + >>> dtype + dtype([('monty', '|b1'), ('pithon', '|b1')]) + >>> m = np.array([(True, False), (False, True), (True, False)], + dtype=dtype) + >>> m + array([(True, False), (False, True), (True, False)], + dtype=[('monty', '|b1'), ('pithon', '|b1')]) + >>> ma.is_mask(m) + False + + """ + try: + return m.dtype.type is MaskType + except AttributeError: + return False + + +def _shrink_mask(m): + """ + Shrink a mask to nomask if possible + """ + if m.dtype.names is None and not m.any(): + return nomask + else: + return m + + +def make_mask(m, copy=False, shrink=True, dtype=MaskType): + """ + Create a boolean mask from an array. + + Return `m` as a boolean mask, creating a copy if necessary or requested. + The function can accept any sequence that is convertible to integers, + or ``nomask``. Does not require that contents must be 0s and 1s, values + of 0 are interepreted as False, everything else as True. + + Parameters + ---------- + m : array_like + Potential mask. + copy : bool, optional + Whether to return a copy of `m` (True) or `m` itself (False). + shrink : bool, optional + Whether to shrink `m` to ``nomask`` if all its values are False. + dtype : dtype, optional + Data-type of the output mask. By default, the output mask has a + dtype of MaskType (bool). If the dtype is flexible, each field has + a boolean dtype. This is ignored when `m` is ``nomask``, in which + case ``nomask`` is always returned. + + Returns + ------- + result : ndarray + A boolean mask derived from `m`. + + Examples + -------- + >>> import numpy.ma as ma + >>> m = [True, False, True, True] + >>> ma.make_mask(m) + array([ True, False, True, True]) + >>> m = [1, 0, 1, 1] + >>> ma.make_mask(m) + array([ True, False, True, True]) + >>> m = [1, 0, 2, -3] + >>> ma.make_mask(m) + array([ True, False, True, True]) + + Effect of the `shrink` parameter. + + >>> m = np.zeros(4) + >>> m + array([ 0., 0., 0., 0.]) + >>> ma.make_mask(m) + False + >>> ma.make_mask(m, shrink=False) + array([False, False, False, False]) + + Using a flexible `dtype`. + + >>> m = [1, 0, 1, 1] + >>> n = [0, 1, 0, 0] + >>> arr = [] + >>> for man, mouse in zip(m, n): + ... arr.append((man, mouse)) + >>> arr + [(1, 0), (0, 1), (1, 0), (1, 0)] + >>> dtype = np.dtype({'names':['man', 'mouse'], + 'formats':[int, int]}) + >>> arr = np.array(arr, dtype=dtype) + >>> arr + array([(1, 0), (0, 1), (1, 0), (1, 0)], + dtype=[('man', '>> ma.make_mask(arr, dtype=dtype) + array([(True, False), (False, True), (True, False), (True, False)], + dtype=[('man', '|b1'), ('mouse', '|b1')]) + + """ + if m is nomask: + return nomask + + # Make sure the input dtype is valid. + dtype = make_mask_descr(dtype) + + # legacy boolean special case: "existence of fields implies true" + if isinstance(m, ndarray) and m.dtype.fields and dtype == np.bool_: + return np.ones(m.shape, dtype=dtype) + + # Fill the mask in case there are missing data; turn it into an ndarray. + result = np.array(filled(m, True), copy=copy, dtype=dtype, subok=True) + # Bas les masques ! + if shrink: + result = _shrink_mask(result) + return result + + +def make_mask_none(newshape, dtype=None): + """ + Return a boolean mask of the given shape, filled with False. + + This function returns a boolean ndarray with all entries False, that can + be used in common mask manipulations. If a complex dtype is specified, the + type of each field is converted to a boolean type. + + Parameters + ---------- + newshape : tuple + A tuple indicating the shape of the mask. + dtype : {None, dtype}, optional + If None, use a MaskType instance. Otherwise, use a new datatype with + the same fields as `dtype`, converted to boolean types. + + Returns + ------- + result : ndarray + An ndarray of appropriate shape and dtype, filled with False. + + See Also + -------- + make_mask : Create a boolean mask from an array. + make_mask_descr : Construct a dtype description list from a given dtype. + + Examples + -------- + >>> import numpy.ma as ma + >>> ma.make_mask_none((3,)) + array([False, False, False]) + + Defining a more complex dtype. + + >>> dtype = np.dtype({'names':['foo', 'bar'], + 'formats':[np.float32, int]}) + >>> dtype + dtype([('foo', '>> ma.make_mask_none((3,), dtype=dtype) + array([(False, False), (False, False), (False, False)], + dtype=[('foo', '|b1'), ('bar', '|b1')]) + + """ + if dtype is None: + result = np.zeros(newshape, dtype=MaskType) + else: + result = np.zeros(newshape, dtype=make_mask_descr(dtype)) + return result + + +def mask_or(m1, m2, copy=False, shrink=True): + """ + Combine two masks with the ``logical_or`` operator. + + The result may be a view on `m1` or `m2` if the other is `nomask` + (i.e. False). + + Parameters + ---------- + m1, m2 : array_like + Input masks. + copy : bool, optional + If copy is False and one of the inputs is `nomask`, return a view + of the other input mask. Defaults to False. + shrink : bool, optional + Whether to shrink the output to `nomask` if all its values are + False. Defaults to True. + + Returns + ------- + mask : output mask + The result masks values that are masked in either `m1` or `m2`. + + Raises + ------ + ValueError + If `m1` and `m2` have different flexible dtypes. + + Examples + -------- + >>> m1 = np.ma.make_mask([0, 1, 1, 0]) + >>> m2 = np.ma.make_mask([1, 0, 0, 0]) + >>> np.ma.mask_or(m1, m2) + array([ True, True, True, False]) + + """ + + @recursive + def _recursive_mask_or(self, m1, m2, newmask): + names = m1.dtype.names + for name in names: + current1 = m1[name] + if current1.dtype.names is not None: + self(current1, m2[name], newmask[name]) + else: + umath.logical_or(current1, m2[name], newmask[name]) + return + + if (m1 is nomask) or (m1 is False): + dtype = getattr(m2, 'dtype', MaskType) + return make_mask(m2, copy=copy, shrink=shrink, dtype=dtype) + if (m2 is nomask) or (m2 is False): + dtype = getattr(m1, 'dtype', MaskType) + return make_mask(m1, copy=copy, shrink=shrink, dtype=dtype) + if m1 is m2 and is_mask(m1): + return m1 + (dtype1, dtype2) = (getattr(m1, 'dtype', None), getattr(m2, 'dtype', None)) + if (dtype1 != dtype2): + raise ValueError("Incompatible dtypes '%s'<>'%s'" % (dtype1, dtype2)) + if dtype1.names is not None: + # Allocate an output mask array with the properly broadcast shape. + newmask = np.empty(np.broadcast(m1, m2).shape, dtype1) + _recursive_mask_or(m1, m2, newmask) + return newmask + return make_mask(umath.logical_or(m1, m2), copy=copy, shrink=shrink) + + +def flatten_mask(mask): + """ + Returns a completely flattened version of the mask, where nested fields + are collapsed. + + Parameters + ---------- + mask : array_like + Input array, which will be interpreted as booleans. + + Returns + ------- + flattened_mask : ndarray of bools + The flattened input. + + Examples + -------- + >>> mask = np.array([0, 0, 1]) + >>> flatten_mask(mask) + array([False, False, True]) + + >>> mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)]) + >>> flatten_mask(mask) + array([False, False, False, True]) + + >>> mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])] + >>> mask = np.array([(0, (0, 0)), (0, (0, 1))], dtype=mdtype) + >>> flatten_mask(mask) + array([False, False, False, False, False, True]) + + """ + + def _flatmask(mask): + "Flatten the mask and returns a (maybe nested) sequence of booleans." + mnames = mask.dtype.names + if mnames is not None: + return [flatten_mask(mask[name]) for name in mnames] + else: + return mask + + def _flatsequence(sequence): + "Generates a flattened version of the sequence." + try: + for element in sequence: + if hasattr(element, '__iter__'): + for f in _flatsequence(element): + yield f + else: + yield element + except TypeError: + yield sequence + + mask = np.asarray(mask) + flattened = _flatsequence(_flatmask(mask)) + return np.array([_ for _ in flattened], dtype=bool) + + +def _check_mask_axis(mask, axis, keepdims=np._NoValue): + "Check whether there are masked values along the given axis" + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + if mask is not nomask: + return mask.all(axis=axis, **kwargs) + return nomask + + +############################################################################### +# Masking functions # +############################################################################### + +def masked_where(condition, a, copy=True): + """ + Mask an array where a condition is met. + + Return `a` as an array masked where `condition` is True. + Any masked values of `a` or `condition` are also masked in the output. + + Parameters + ---------- + condition : array_like + Masking condition. When `condition` tests floating point values for + equality, consider using ``masked_values`` instead. + a : array_like + Array to mask. + copy : bool + If True (default) make a copy of `a` in the result. If False modify + `a` in place and return a view. + + Returns + ------- + result : MaskedArray + The result of masking `a` where `condition` is True. + + See Also + -------- + masked_values : Mask using floating point equality. + masked_equal : Mask where equal to a given value. + masked_not_equal : Mask where `not` equal to a given value. + masked_less_equal : Mask where less than or equal to a given value. + masked_greater_equal : Mask where greater than or equal to a given value. + masked_less : Mask where less than a given value. + masked_greater : Mask where greater than a given value. + masked_inside : Mask inside a given interval. + masked_outside : Mask outside a given interval. + masked_invalid : Mask invalid values (NaNs or infs). + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_where(a <= 2, a) + masked_array(data = [-- -- -- 3], + mask = [ True True True False], + fill_value=999999) + + Mask array `b` conditional on `a`. + + >>> b = ['a', 'b', 'c', 'd'] + >>> ma.masked_where(a == 2, b) + masked_array(data = [a b -- d], + mask = [False False True False], + fill_value=N/A) + + Effect of the `copy` argument. + + >>> c = ma.masked_where(a <= 2, a) + >>> c + masked_array(data = [-- -- -- 3], + mask = [ True True True False], + fill_value=999999) + >>> c[0] = 99 + >>> c + masked_array(data = [99 -- -- 3], + mask = [False True True False], + fill_value=999999) + >>> a + array([0, 1, 2, 3]) + >>> c = ma.masked_where(a <= 2, a, copy=False) + >>> c[0] = 99 + >>> c + masked_array(data = [99 -- -- 3], + mask = [False True True False], + fill_value=999999) + >>> a + array([99, 1, 2, 3]) + + When `condition` or `a` contain masked values. + + >>> a = np.arange(4) + >>> a = ma.masked_where(a == 2, a) + >>> a + masked_array(data = [0 1 -- 3], + mask = [False False True False], + fill_value=999999) + >>> b = np.arange(4) + >>> b = ma.masked_where(b == 0, b) + >>> b + masked_array(data = [-- 1 2 3], + mask = [ True False False False], + fill_value=999999) + >>> ma.masked_where(a == 3, b) + masked_array(data = [-- 1 -- --], + mask = [ True False True True], + fill_value=999999) + + """ + # Make sure that condition is a valid standard-type mask. + cond = make_mask(condition, shrink=False) + a = np.array(a, copy=copy, subok=True) + + (cshape, ashape) = (cond.shape, a.shape) + if cshape and cshape != ashape: + raise IndexError("Inconsistent shape between the condition and the input" + " (got %s and %s)" % (cshape, ashape)) + if hasattr(a, '_mask'): + cond = mask_or(cond, a._mask) + cls = type(a) + else: + cls = MaskedArray + result = a.view(cls) + # Assign to *.mask so that structured masks are handled correctly. + result.mask = _shrink_mask(cond) + return result + + +def masked_greater(x, value, copy=True): + """ + Mask an array where greater than a given value. + + This function is a shortcut to ``masked_where``, with + `condition` = (x > value). + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_greater(a, 2) + masked_array(data = [0 1 2 --], + mask = [False False False True], + fill_value=999999) + + """ + return masked_where(greater(x, value), x, copy=copy) + + +def masked_greater_equal(x, value, copy=True): + """ + Mask an array where greater than or equal to a given value. + + This function is a shortcut to ``masked_where``, with + `condition` = (x >= value). + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_greater_equal(a, 2) + masked_array(data = [0 1 -- --], + mask = [False False True True], + fill_value=999999) + + """ + return masked_where(greater_equal(x, value), x, copy=copy) + + +def masked_less(x, value, copy=True): + """ + Mask an array where less than a given value. + + This function is a shortcut to ``masked_where``, with + `condition` = (x < value). + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_less(a, 2) + masked_array(data = [-- -- 2 3], + mask = [ True True False False], + fill_value=999999) + + """ + return masked_where(less(x, value), x, copy=copy) + + +def masked_less_equal(x, value, copy=True): + """ + Mask an array where less than or equal to a given value. + + This function is a shortcut to ``masked_where``, with + `condition` = (x <= value). + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_less_equal(a, 2) + masked_array(data = [-- -- -- 3], + mask = [ True True True False], + fill_value=999999) + + """ + return masked_where(less_equal(x, value), x, copy=copy) + + +def masked_not_equal(x, value, copy=True): + """ + Mask an array where `not` equal to a given value. + + This function is a shortcut to ``masked_where``, with + `condition` = (x != value). + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_not_equal(a, 2) + masked_array(data = [-- -- 2 --], + mask = [ True True False True], + fill_value=999999) + + """ + return masked_where(not_equal(x, value), x, copy=copy) + + +def masked_equal(x, value, copy=True): + """ + Mask an array where equal to a given value. + + This function is a shortcut to ``masked_where``, with + `condition` = (x == value). For floating point arrays, + consider using ``masked_values(x, value)``. + + See Also + -------- + masked_where : Mask where a condition is met. + masked_values : Mask using floating point equality. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_equal(a, 2) + masked_array(data = [0 1 -- 3], + mask = [False False True False], + fill_value=999999) + + """ + output = masked_where(equal(x, value), x, copy=copy) + output.fill_value = value + return output + + +def masked_inside(x, v1, v2, copy=True): + """ + Mask an array inside a given interval. + + Shortcut to ``masked_where``, where `condition` is True for `x` inside + the interval [v1,v2] (v1 <= x <= v2). The boundaries `v1` and `v2` + can be given in either order. + + See Also + -------- + masked_where : Mask where a condition is met. + + Notes + ----- + The array `x` is prefilled with its filling value. + + Examples + -------- + >>> import numpy.ma as ma + >>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1] + >>> ma.masked_inside(x, -0.3, 0.3) + masked_array(data = [0.31 1.2 -- -- -0.4 -1.1], + mask = [False False True True False False], + fill_value=1e+20) + + The order of `v1` and `v2` doesn't matter. + + >>> ma.masked_inside(x, 0.3, -0.3) + masked_array(data = [0.31 1.2 -- -- -0.4 -1.1], + mask = [False False True True False False], + fill_value=1e+20) + + """ + if v2 < v1: + (v1, v2) = (v2, v1) + xf = filled(x) + condition = (xf >= v1) & (xf <= v2) + return masked_where(condition, x, copy=copy) + + +def masked_outside(x, v1, v2, copy=True): + """ + Mask an array outside a given interval. + + Shortcut to ``masked_where``, where `condition` is True for `x` outside + the interval [v1,v2] (x < v1)|(x > v2). + The boundaries `v1` and `v2` can be given in either order. + + See Also + -------- + masked_where : Mask where a condition is met. + + Notes + ----- + The array `x` is prefilled with its filling value. + + Examples + -------- + >>> import numpy.ma as ma + >>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1] + >>> ma.masked_outside(x, -0.3, 0.3) + masked_array(data = [-- -- 0.01 0.2 -- --], + mask = [ True True False False True True], + fill_value=1e+20) + + The order of `v1` and `v2` doesn't matter. + + >>> ma.masked_outside(x, 0.3, -0.3) + masked_array(data = [-- -- 0.01 0.2 -- --], + mask = [ True True False False True True], + fill_value=1e+20) + + """ + if v2 < v1: + (v1, v2) = (v2, v1) + xf = filled(x) + condition = (xf < v1) | (xf > v2) + return masked_where(condition, x, copy=copy) + + +def masked_object(x, value, copy=True, shrink=True): + """ + Mask the array `x` where the data are exactly equal to value. + + This function is similar to `masked_values`, but only suitable + for object arrays: for floating point, use `masked_values` instead. + + Parameters + ---------- + x : array_like + Array to mask + value : object + Comparison value + copy : {True, False}, optional + Whether to return a copy of `x`. + shrink : {True, False}, optional + Whether to collapse a mask full of False to nomask + + Returns + ------- + result : MaskedArray + The result of masking `x` where equal to `value`. + + See Also + -------- + masked_where : Mask where a condition is met. + masked_equal : Mask where equal to a given value (integers). + masked_values : Mask using floating point equality. + + Examples + -------- + >>> import numpy.ma as ma + >>> food = np.array(['green_eggs', 'ham'], dtype=object) + >>> # don't eat spoiled food + >>> eat = ma.masked_object(food, 'green_eggs') + >>> print(eat) + [-- ham] + >>> # plain ol` ham is boring + >>> fresh_food = np.array(['cheese', 'ham', 'pineapple'], dtype=object) + >>> eat = ma.masked_object(fresh_food, 'green_eggs') + >>> print(eat) + [cheese ham pineapple] + + Note that `mask` is set to ``nomask`` if possible. + + >>> eat + masked_array(data = [cheese ham pineapple], + mask = False, + fill_value=?) + + """ + if isMaskedArray(x): + condition = umath.equal(x._data, value) + mask = x._mask + else: + condition = umath.equal(np.asarray(x), value) + mask = nomask + mask = mask_or(mask, make_mask(condition, shrink=shrink)) + return masked_array(x, mask=mask, copy=copy, fill_value=value) + + +def masked_values(x, value, rtol=1e-5, atol=1e-8, copy=True, shrink=True): + """ + Mask using floating point equality. + + Return a MaskedArray, masked where the data in array `x` are approximately + equal to `value`, determined using `isclose`. The default tolerances for + `masked_values` are the same as those for `isclose`. + + For integer types, exact equality is used, in the same way as + `masked_equal`. + + The fill_value is set to `value` and the mask is set to ``nomask`` if + possible. + + Parameters + ---------- + x : array_like + Array to mask. + value : float + Masking value. + rtol, atol : float, optional + Tolerance parameters passed on to `isclose` + copy : bool, optional + Whether to return a copy of `x`. + shrink : bool, optional + Whether to collapse a mask full of False to ``nomask``. + + Returns + ------- + result : MaskedArray + The result of masking `x` where approximately equal to `value`. + + See Also + -------- + masked_where : Mask where a condition is met. + masked_equal : Mask where equal to a given value (integers). + + Examples + -------- + >>> import numpy.ma as ma + >>> x = np.array([1, 1.1, 2, 1.1, 3]) + >>> ma.masked_values(x, 1.1) + masked_array(data = [1.0 -- 2.0 -- 3.0], + mask = [False True False True False], + fill_value=1.1) + + Note that `mask` is set to ``nomask`` if possible. + + >>> ma.masked_values(x, 1.5) + masked_array(data = [ 1. 1.1 2. 1.1 3. ], + mask = False, + fill_value=1.5) + + For integers, the fill value will be different in general to the + result of ``masked_equal``. + + >>> x = np.arange(5) + >>> x + array([0, 1, 2, 3, 4]) + >>> ma.masked_values(x, 2) + masked_array(data = [0 1 -- 3 4], + mask = [False False True False False], + fill_value=2) + >>> ma.masked_equal(x, 2) + masked_array(data = [0 1 -- 3 4], + mask = [False False True False False], + fill_value=999999) + + """ + xnew = filled(x, value) + if np.issubdtype(xnew.dtype, np.floating): + mask = np.isclose(xnew, value, atol=atol, rtol=rtol) + else: + mask = umath.equal(xnew, value) + ret = masked_array(xnew, mask=mask, copy=copy, fill_value=value) + if shrink: + ret.shrink_mask() + return ret + + +def masked_invalid(a, copy=True): + """ + Mask an array where invalid values occur (NaNs or infs). + + This function is a shortcut to ``masked_where``, with + `condition` = ~(np.isfinite(a)). Any pre-existing mask is conserved. + Only applies to arrays with a dtype where NaNs or infs make sense + (i.e. floating point types), but accepts any array_like object. + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(5, dtype=float) + >>> a[2] = np.NaN + >>> a[3] = np.PINF + >>> a + array([ 0., 1., NaN, Inf, 4.]) + >>> ma.masked_invalid(a) + masked_array(data = [0.0 1.0 -- -- 4.0], + mask = [False False True True False], + fill_value=1e+20) + + """ + a = np.array(a, copy=copy, subok=True) + mask = getattr(a, '_mask', None) + if mask is not None: + condition = ~(np.isfinite(getdata(a))) + if mask is not nomask: + condition |= mask + cls = type(a) + else: + condition = ~(np.isfinite(a)) + cls = MaskedArray + result = a.view(cls) + result._mask = condition + return result + + +############################################################################### +# Printing options # +############################################################################### + + +class _MaskedPrintOption(object): + """ + Handle the string used to represent missing data in a masked array. + + """ + + def __init__(self, display): + """ + Create the masked_print_option object. + + """ + self._display = display + self._enabled = True + + def display(self): + """ + Display the string to print for masked values. + + """ + return self._display + + def set_display(self, s): + """ + Set the string to print for masked values. + + """ + self._display = s + + def enabled(self): + """ + Is the use of the display value enabled? + + """ + return self._enabled + + def enable(self, shrink=1): + """ + Set the enabling shrink to `shrink`. + + """ + self._enabled = shrink + + def __str__(self): + return str(self._display) + + __repr__ = __str__ + +# if you single index into a masked location you get this object. +masked_print_option = _MaskedPrintOption('--') + + +def _recursive_printoption(result, mask, printopt): + """ + Puts printoptions in result where mask is True. + + Private function allowing for recursion + + """ + names = result.dtype.names + if names is not None: + for name in names: + curdata = result[name] + curmask = mask[name] + _recursive_printoption(curdata, curmask, printopt) + else: + np.copyto(result, printopt, where=mask) + return + +# For better or worse, these end in a newline +_legacy_print_templates = dict( + long_std=textwrap.dedent("""\ + masked_%(name)s(data = + %(data)s, + %(nlen)s mask = + %(mask)s, + %(nlen)s fill_value = %(fill)s) + """), + long_flx=textwrap.dedent("""\ + masked_%(name)s(data = + %(data)s, + %(nlen)s mask = + %(mask)s, + %(nlen)s fill_value = %(fill)s, + %(nlen)s dtype = %(dtype)s) + """), + short_std=textwrap.dedent("""\ + masked_%(name)s(data = %(data)s, + %(nlen)s mask = %(mask)s, + %(nlen)s fill_value = %(fill)s) + """), + short_flx=textwrap.dedent("""\ + masked_%(name)s(data = %(data)s, + %(nlen)s mask = %(mask)s, + %(nlen)s fill_value = %(fill)s, + %(nlen)s dtype = %(dtype)s) + """) +) + +############################################################################### +# MaskedArray class # +############################################################################### + + +def _recursive_filled(a, mask, fill_value): + """ + Recursively fill `a` with `fill_value`. + + """ + names = a.dtype.names + for name in names: + current = a[name] + if current.dtype.names is not None: + _recursive_filled(current, mask[name], fill_value[name]) + else: + np.copyto(current, fill_value[name], where=mask[name]) + + +def flatten_structured_array(a): + """ + Flatten a structured array. + + The data type of the output is chosen such that it can represent all of the + (nested) fields. + + Parameters + ---------- + a : structured array + + Returns + ------- + output : masked array or ndarray + A flattened masked array if the input is a masked array, otherwise a + standard ndarray. + + Examples + -------- + >>> ndtype = [('a', int), ('b', float)] + >>> a = np.array([(1, 1), (2, 2)], dtype=ndtype) + >>> flatten_structured_array(a) + array([[1., 1.], + [2., 2.]]) + + """ + + def flatten_sequence(iterable): + """ + Flattens a compound of nested iterables. + + """ + for elm in iter(iterable): + if hasattr(elm, '__iter__'): + for f in flatten_sequence(elm): + yield f + else: + yield elm + + a = np.asanyarray(a) + inishape = a.shape + a = a.ravel() + if isinstance(a, MaskedArray): + out = np.array([tuple(flatten_sequence(d.item())) for d in a._data]) + out = out.view(MaskedArray) + out._mask = np.array([tuple(flatten_sequence(d.item())) + for d in getmaskarray(a)]) + else: + out = np.array([tuple(flatten_sequence(d.item())) for d in a]) + if len(inishape) > 1: + newshape = list(out.shape) + newshape[0] = inishape + out.shape = tuple(flatten_sequence(newshape)) + return out + + +def _arraymethod(funcname, onmask=True): + """ + Return a class method wrapper around a basic array method. + + Creates a class method which returns a masked array, where the new + ``_data`` array is the output of the corresponding basic method called + on the original ``_data``. + + If `onmask` is True, the new mask is the output of the method called + on the initial mask. Otherwise, the new mask is just a reference + to the initial mask. + + Parameters + ---------- + funcname : str + Name of the function to apply on data. + onmask : bool + Whether the mask must be processed also (True) or left + alone (False). Default is True. Make available as `_onmask` + attribute. + + Returns + ------- + method : instancemethod + Class method wrapper of the specified basic array method. + + """ + def wrapped_method(self, *args, **params): + result = getattr(self._data, funcname)(*args, **params) + result = result.view(type(self)) + result._update_from(self) + mask = self._mask + if not onmask: + result.__setmask__(mask) + elif mask is not nomask: + # __setmask__ makes a copy, which we don't want + result._mask = getattr(mask, funcname)(*args, **params) + return result + methdoc = getattr(ndarray, funcname, None) or getattr(np, funcname, None) + if methdoc is not None: + wrapped_method.__doc__ = methdoc.__doc__ + wrapped_method.__name__ = funcname + return wrapped_method + + +class MaskedIterator(object): + """ + Flat iterator object to iterate over masked arrays. + + A `MaskedIterator` iterator is returned by ``x.flat`` for any masked array + `x`. It allows iterating over the array as if it were a 1-D array, + either in a for-loop or by calling its `next` method. + + Iteration is done in C-contiguous style, with the last index varying the + fastest. The iterator can also be indexed using basic slicing or + advanced indexing. + + See Also + -------- + MaskedArray.flat : Return a flat iterator over an array. + MaskedArray.flatten : Returns a flattened copy of an array. + + Notes + ----- + `MaskedIterator` is not exported by the `ma` module. Instead of + instantiating a `MaskedIterator` directly, use `MaskedArray.flat`. + + Examples + -------- + >>> x = np.ma.array(arange(6).reshape(2, 3)) + >>> fl = x.flat + >>> type(fl) + + >>> for item in fl: + ... print(item) + ... + 0 + 1 + 2 + 3 + 4 + 5 + + Extracting more than a single element b indexing the `MaskedIterator` + returns a masked array: + + >>> fl[2:4] + masked_array(data = [2 3], + mask = False, + fill_value = 999999) + + """ + + def __init__(self, ma): + self.ma = ma + self.dataiter = ma._data.flat + + if ma._mask is nomask: + self.maskiter = None + else: + self.maskiter = ma._mask.flat + + def __iter__(self): + return self + + def __getitem__(self, indx): + result = self.dataiter.__getitem__(indx).view(type(self.ma)) + if self.maskiter is not None: + _mask = self.maskiter.__getitem__(indx) + if isinstance(_mask, ndarray): + # set shape to match that of data; this is needed for matrices + _mask.shape = result.shape + result._mask = _mask + elif isinstance(_mask, np.void): + return mvoid(result, mask=_mask, hardmask=self.ma._hardmask) + elif _mask: # Just a scalar, masked + return masked + return result + + # This won't work if ravel makes a copy + def __setitem__(self, index, value): + self.dataiter[index] = getdata(value) + if self.maskiter is not None: + self.maskiter[index] = getmaskarray(value) + + def __next__(self): + """ + Return the next value, or raise StopIteration. + + Examples + -------- + >>> x = np.ma.array([3, 2], mask=[0, 1]) + >>> fl = x.flat + >>> fl.next() + 3 + >>> fl.next() + masked_array(data = --, + mask = True, + fill_value = 1e+20) + >>> fl.next() + Traceback (most recent call last): + File "", line 1, in + File "/home/ralf/python/numpy/numpy/ma/core.py", line 2243, in next + d = self.dataiter.next() + StopIteration + + """ + d = next(self.dataiter) + if self.maskiter is not None: + m = next(self.maskiter) + if isinstance(m, np.void): + return mvoid(d, mask=m, hardmask=self.ma._hardmask) + elif m: # Just a scalar, masked + return masked + return d + + next = __next__ + + +class MaskedArray(ndarray): + """ + An array class with possibly masked values. + + Masked values of True exclude the corresponding element from any + computation. + + Construction:: + + x = MaskedArray(data, mask=nomask, dtype=None, copy=False, subok=True, + ndmin=0, fill_value=None, keep_mask=True, hard_mask=None, + shrink=True, order=None) + + Parameters + ---------- + data : array_like + Input data. + mask : sequence, optional + Mask. Must be convertible to an array of booleans with the same + shape as `data`. True indicates a masked (i.e. invalid) data. + dtype : dtype, optional + Data type of the output. + If `dtype` is None, the type of the data argument (``data.dtype``) + is used. If `dtype` is not None and different from ``data.dtype``, + a copy is performed. + copy : bool, optional + Whether to copy the input data (True), or to use a reference instead. + Default is False. + subok : bool, optional + Whether to return a subclass of `MaskedArray` if possible (True) or a + plain `MaskedArray`. Default is True. + ndmin : int, optional + Minimum number of dimensions. Default is 0. + fill_value : scalar, optional + Value used to fill in the masked values when necessary. + If None, a default based on the data-type is used. + keep_mask : bool, optional + Whether to combine `mask` with the mask of the input data, if any + (True), or to use only `mask` for the output (False). Default is True. + hard_mask : bool, optional + Whether to use a hard mask or not. With a hard mask, masked values + cannot be unmasked. Default is False. + shrink : bool, optional + Whether to force compression of an empty mask. Default is True. + order : {'C', 'F', 'A'}, optional + Specify the order of the array. If order is 'C', then the array + will be in C-contiguous order (last-index varies the fastest). + If order is 'F', then the returned array will be in + Fortran-contiguous order (first-index varies the fastest). + If order is 'A' (default), then the returned array may be + in any order (either C-, Fortran-contiguous, or even discontiguous), + unless a copy is required, in which case it will be C-contiguous. + + """ + + __array_priority__ = 15 + _defaultmask = nomask + _defaulthardmask = False + _baseclass = ndarray + + # Maximum number of elements per axis used when printing an array. The + # 1d case is handled separately because we need more values in this case. + _print_width = 100 + _print_width_1d = 1500 + + def __new__(cls, data=None, mask=nomask, dtype=None, copy=False, + subok=True, ndmin=0, fill_value=None, keep_mask=True, + hard_mask=None, shrink=True, order=None, **options): + """ + Create a new masked array from scratch. + + Notes + ----- + A masked array can also be created by taking a .view(MaskedArray). + + """ + # Process data. + _data = np.array(data, dtype=dtype, copy=copy, + order=order, subok=True, ndmin=ndmin) + _baseclass = getattr(data, '_baseclass', type(_data)) + # Check that we're not erasing the mask. + if isinstance(data, MaskedArray) and (data.shape != _data.shape): + copy = True + + # Here, we copy the _view_, so that we can attach new properties to it + # we must never do .view(MaskedConstant), as that would create a new + # instance of np.ma.masked, which make identity comparison fail + if isinstance(data, cls) and subok and not isinstance(data, MaskedConstant): + _data = ndarray.view(_data, type(data)) + else: + _data = ndarray.view(_data, cls) + # Backwards compatibility w/ numpy.core.ma. + if hasattr(data, '_mask') and not isinstance(data, ndarray): + _data._mask = data._mask + # FIXME _sharedmask is never used. + _sharedmask = True + # Process mask. + # Type of the mask + mdtype = make_mask_descr(_data.dtype) + + if mask is nomask: + # Case 1. : no mask in input. + # Erase the current mask ? + if not keep_mask: + # With a reduced version + if shrink: + _data._mask = nomask + # With full version + else: + _data._mask = np.zeros(_data.shape, dtype=mdtype) + # Check whether we missed something + elif isinstance(data, (tuple, list)): + try: + # If data is a sequence of masked array + mask = np.array([getmaskarray(m) for m in data], + dtype=mdtype) + except ValueError: + # If data is nested + mask = nomask + # Force shrinking of the mask if needed (and possible) + if (mdtype == MaskType) and mask.any(): + _data._mask = mask + _data._sharedmask = False + else: + _data._sharedmask = not copy + if copy: + _data._mask = _data._mask.copy() + # Reset the shape of the original mask + if getmask(data) is not nomask: + data._mask.shape = data.shape + else: + # Case 2. : With a mask in input. + # If mask is boolean, create an array of True or False + if mask is True and mdtype == MaskType: + mask = np.ones(_data.shape, dtype=mdtype) + elif mask is False and mdtype == MaskType: + mask = np.zeros(_data.shape, dtype=mdtype) + else: + # Read the mask with the current mdtype + try: + mask = np.array(mask, copy=copy, dtype=mdtype) + # Or assume it's a sequence of bool/int + except TypeError: + mask = np.array([tuple([m] * len(mdtype)) for m in mask], + dtype=mdtype) + # Make sure the mask and the data have the same shape + if mask.shape != _data.shape: + (nd, nm) = (_data.size, mask.size) + if nm == 1: + mask = np.resize(mask, _data.shape) + elif nm == nd: + mask = np.reshape(mask, _data.shape) + else: + msg = "Mask and data not compatible: data size is %i, " + \ + "mask size is %i." + raise MaskError(msg % (nd, nm)) + copy = True + # Set the mask to the new value + if _data._mask is nomask: + _data._mask = mask + _data._sharedmask = not copy + else: + if not keep_mask: + _data._mask = mask + _data._sharedmask = not copy + else: + if _data.dtype.names is not None: + def _recursive_or(a, b): + "do a|=b on each field of a, recursively" + for name in a.dtype.names: + (af, bf) = (a[name], b[name]) + if af.dtype.names is not None: + _recursive_or(af, bf) + else: + af |= bf + + _recursive_or(_data._mask, mask) + else: + _data._mask = np.logical_or(mask, _data._mask) + _data._sharedmask = False + # Update fill_value. + if fill_value is None: + fill_value = getattr(data, '_fill_value', None) + # But don't run the check unless we have something to check. + if fill_value is not None: + _data._fill_value = _check_fill_value(fill_value, _data.dtype) + # Process extra options .. + if hard_mask is None: + _data._hardmask = getattr(data, '_hardmask', False) + else: + _data._hardmask = hard_mask + _data._baseclass = _baseclass + return _data + + + def _update_from(self, obj): + """ + Copies some attributes of obj to self. + + """ + if isinstance(obj, ndarray): + _baseclass = type(obj) + else: + _baseclass = ndarray + # We need to copy the _basedict to avoid backward propagation + _optinfo = {} + _optinfo.update(getattr(obj, '_optinfo', {})) + _optinfo.update(getattr(obj, '_basedict', {})) + if not isinstance(obj, MaskedArray): + _optinfo.update(getattr(obj, '__dict__', {})) + _dict = dict(_fill_value=getattr(obj, '_fill_value', None), + _hardmask=getattr(obj, '_hardmask', False), + _sharedmask=getattr(obj, '_sharedmask', False), + _isfield=getattr(obj, '_isfield', False), + _baseclass=getattr(obj, '_baseclass', _baseclass), + _optinfo=_optinfo, + _basedict=_optinfo) + self.__dict__.update(_dict) + self.__dict__.update(_optinfo) + return + + def __array_finalize__(self, obj): + """ + Finalizes the masked array. + + """ + # Get main attributes. + self._update_from(obj) + + # We have to decide how to initialize self.mask, based on + # obj.mask. This is very difficult. There might be some + # correspondence between the elements in the array we are being + # created from (= obj) and us. Or there might not. This method can + # be called in all kinds of places for all kinds of reasons -- could + # be empty_like, could be slicing, could be a ufunc, could be a view. + # The numpy subclassing interface simply doesn't give us any way + # to know, which means that at best this method will be based on + # guesswork and heuristics. To make things worse, there isn't even any + # clear consensus about what the desired behavior is. For instance, + # most users think that np.empty_like(marr) -- which goes via this + # method -- should return a masked array with an empty mask (see + # gh-3404 and linked discussions), but others disagree, and they have + # existing code which depends on empty_like returning an array that + # matches the input mask. + # + # Historically our algorithm was: if the template object mask had the + # same *number of elements* as us, then we used *it's mask object + # itself* as our mask, so that writes to us would also write to the + # original array. This is horribly broken in multiple ways. + # + # Now what we do instead is, if the template object mask has the same + # number of elements as us, and we do not have the same base pointer + # as the template object (b/c views like arr[...] should keep the same + # mask), then we make a copy of the template object mask and use + # that. This is also horribly broken but somewhat less so. Maybe. + if isinstance(obj, ndarray): + # XX: This looks like a bug -- shouldn't it check self.dtype + # instead? + if obj.dtype.names is not None: + _mask = getmaskarray(obj) + else: + _mask = getmask(obj) + + # If self and obj point to exactly the same data, then probably + # self is a simple view of obj (e.g., self = obj[...]), so they + # should share the same mask. (This isn't 100% reliable, e.g. self + # could be the first row of obj, or have strange strides, but as a + # heuristic it's not bad.) In all other cases, we make a copy of + # the mask, so that future modifications to 'self' do not end up + # side-effecting 'obj' as well. + if (_mask is not nomask and obj.__array_interface__["data"][0] + != self.__array_interface__["data"][0]): + # We should make a copy. But we could get here via astype, + # in which case the mask might need a new dtype as well + # (e.g., changing to or from a structured dtype), and the + # order could have changed. So, change the mask type if + # needed and use astype instead of copy. + if self.dtype == obj.dtype: + _mask_dtype = _mask.dtype + else: + _mask_dtype = make_mask_descr(self.dtype) + + if self.flags.c_contiguous: + order = "C" + elif self.flags.f_contiguous: + order = "F" + else: + order = "K" + + _mask = _mask.astype(_mask_dtype, order) + else: + # Take a view so shape changes, etc., do not propagate back. + _mask = _mask.view() + else: + _mask = nomask + + self._mask = _mask + # Finalize the mask + if self._mask is not nomask: + try: + self._mask.shape = self.shape + except ValueError: + self._mask = nomask + except (TypeError, AttributeError): + # When _mask.shape is not writable (because it's a void) + pass + + # Finalize the fill_value + if self._fill_value is not None: + self._fill_value = _check_fill_value(self._fill_value, self.dtype) + elif self.dtype.names is not None: + # Finalize the default fill_value for structured arrays + self._fill_value = _check_fill_value(None, self.dtype) + + def __array_wrap__(self, obj, context=None): + """ + Special hook for ufuncs. + + Wraps the numpy array and sets the mask according to context. + + """ + if obj is self: # for in-place operations + result = obj + else: + result = obj.view(type(self)) + result._update_from(self) + + if context is not None: + result._mask = result._mask.copy() + func, args, out_i = context + # args sometimes contains outputs (gh-10459), which we don't want + input_args = args[:func.nin] + m = reduce(mask_or, [getmaskarray(arg) for arg in input_args]) + # Get the domain mask + domain = ufunc_domain.get(func, None) + if domain is not None: + # Take the domain, and make sure it's a ndarray + with np.errstate(divide='ignore', invalid='ignore'): + d = filled(domain(*input_args), True) + + if d.any(): + # Fill the result where the domain is wrong + try: + # Binary domain: take the last value + fill_value = ufunc_fills[func][-1] + except TypeError: + # Unary domain: just use this one + fill_value = ufunc_fills[func] + except KeyError: + # Domain not recognized, use fill_value instead + fill_value = self.fill_value + + np.copyto(result, fill_value, where=d) + + # Update the mask + if m is nomask: + m = d + else: + # Don't modify inplace, we risk back-propagation + m = (m | d) + + # Make sure the mask has the proper size + if result is not self and result.shape == () and m: + return masked + else: + result._mask = m + result._sharedmask = False + + return result + + def view(self, dtype=None, type=None, fill_value=None): + """ + Return a view of the MaskedArray data + + Parameters + ---------- + dtype : data-type or ndarray sub-class, optional + Data-type descriptor of the returned view, e.g., float32 or int16. + The default, None, results in the view having the same data-type + as `a`. As with ``ndarray.view``, dtype can also be specified as + an ndarray sub-class, which then specifies the type of the + returned object (this is equivalent to setting the ``type`` + parameter). + type : Python type, optional + Type of the returned view, either ndarray or a subclass. The + default None results in type preservation. + + Notes + ----- + + ``a.view()`` is used two different ways: + + ``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view + of the array's memory with a different data-type. This can cause a + reinterpretation of the bytes of memory. + + ``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just + returns an instance of `ndarray_subclass` that looks at the same array + (same shape, dtype, etc.) This does not cause a reinterpretation of the + memory. + + If `fill_value` is not specified, but `dtype` is specified (and is not + an ndarray sub-class), the `fill_value` of the MaskedArray will be + reset. If neither `fill_value` nor `dtype` are specified (or if + `dtype` is an ndarray sub-class), then the fill value is preserved. + Finally, if `fill_value` is specified, but `dtype` is not, the fill + value is set to the specified value. + + For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of + bytes per entry than the previous dtype (for example, converting a + regular array to a structured array), then the behavior of the view + cannot be predicted just from the superficial appearance of ``a`` (shown + by ``print(a)``). It also depends on exactly how ``a`` is stored in + memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus + defined as a slice or transpose, etc., the view may give different + results. + """ + + if dtype is None: + if type is None: + output = ndarray.view(self) + else: + output = ndarray.view(self, type) + elif type is None: + try: + if issubclass(dtype, ndarray): + output = ndarray.view(self, dtype) + dtype = None + else: + output = ndarray.view(self, dtype) + except TypeError: + output = ndarray.view(self, dtype) + else: + output = ndarray.view(self, dtype, type) + + # also make the mask be a view (so attr changes to the view's + # mask do no affect original object's mask) + # (especially important to avoid affecting np.masked singleton) + if (getmask(output) is not nomask): + output._mask = output._mask.view() + + # Make sure to reset the _fill_value if needed + if getattr(output, '_fill_value', None) is not None: + if fill_value is None: + if dtype is None: + pass # leave _fill_value as is + else: + output._fill_value = None + else: + output.fill_value = fill_value + return output + view.__doc__ = ndarray.view.__doc__ + + def __getitem__(self, indx): + """ + x.__getitem__(y) <==> x[y] + + Return the item described by i, as a masked array. + + """ + # We could directly use ndarray.__getitem__ on self. + # But then we would have to modify __array_finalize__ to prevent the + # mask of being reshaped if it hasn't been set up properly yet + # So it's easier to stick to the current version + dout = self.data[indx] + _mask = self._mask + + def _is_scalar(m): + return not isinstance(m, np.ndarray) + + def _scalar_heuristic(arr, elem): + """ + Return whether `elem` is a scalar result of indexing `arr`, or None + if undecidable without promoting nomask to a full mask + """ + # obviously a scalar + if not isinstance(elem, np.ndarray): + return True + + # object array scalar indexing can return anything + elif arr.dtype.type is np.object_: + if arr.dtype is not elem.dtype: + # elem is an array, but dtypes do not match, so must be + # an element + return True + + # well-behaved subclass that only returns 0d arrays when + # expected - this is not a scalar + elif type(arr).__getitem__ == ndarray.__getitem__: + return False + + return None + + if _mask is not nomask: + # _mask cannot be a subclass, so it tells us whether we should + # expect a scalar. It also cannot be of dtype object. + mout = _mask[indx] + scalar_expected = _is_scalar(mout) + + else: + # attempt to apply the heuristic to avoid constructing a full mask + mout = nomask + scalar_expected = _scalar_heuristic(self.data, dout) + if scalar_expected is None: + # heuristics have failed + # construct a full array, so we can be certain. This is costly. + # we could also fall back on ndarray.__getitem__(self.data, indx) + scalar_expected = _is_scalar(getmaskarray(self)[indx]) + + # Did we extract a single item? + if scalar_expected: + # A record + if isinstance(dout, np.void): + # We should always re-cast to mvoid, otherwise users can + # change masks on rows that already have masked values, but not + # on rows that have no masked values, which is inconsistent. + return mvoid(dout, mask=mout, hardmask=self._hardmask) + + # special case introduced in gh-5962 + elif (self.dtype.type is np.object_ and + isinstance(dout, np.ndarray) and + dout is not masked): + # If masked, turn into a MaskedArray, with everything masked. + if mout: + return MaskedArray(dout, mask=True) + else: + return dout + + # Just a scalar + else: + if mout: + return masked + else: + return dout + else: + # Force dout to MA + dout = dout.view(type(self)) + # Inherit attributes from self + dout._update_from(self) + # Check the fill_value + if is_string_or_list_of_strings(indx): + if self._fill_value is not None: + dout._fill_value = self._fill_value[indx] + + # If we're indexing a multidimensional field in a + # structured array (such as dtype("(2,)i2,(2,)i1")), + # dimensionality goes up (M[field].ndim == M.ndim + + # M.dtype[field].ndim). That's fine for + # M[field] but problematic for M[field].fill_value + # which should have shape () to avoid breaking several + # methods. There is no great way out, so set to + # first element. See issue #6723. + if dout._fill_value.ndim > 0: + if not (dout._fill_value == + dout._fill_value.flat[0]).all(): + warnings.warn( + "Upon accessing multidimensional field " + "{indx:s}, need to keep dimensionality " + "of fill_value at 0. Discarding " + "heterogeneous fill_value and setting " + "all to {fv!s}.".format(indx=indx, + fv=dout._fill_value[0]), + stacklevel=2) + dout._fill_value = dout._fill_value.flat[0] + dout._isfield = True + # Update the mask if needed + if mout is not nomask: + # set shape to match that of data; this is needed for matrices + dout._mask = reshape(mout, dout.shape) + dout._sharedmask = True + # Note: Don't try to check for m.any(), that'll take too long + return dout + + def __setitem__(self, indx, value): + """ + x.__setitem__(i, y) <==> x[i]=y + + Set item described by index. If value is masked, masks those + locations. + + """ + if self is masked: + raise MaskError('Cannot alter the masked element.') + _data = self._data + _mask = self._mask + if isinstance(indx, basestring): + _data[indx] = value + if _mask is nomask: + self._mask = _mask = make_mask_none(self.shape, self.dtype) + _mask[indx] = getmask(value) + return + + _dtype = _data.dtype + + if value is masked: + # The mask wasn't set: create a full version. + if _mask is nomask: + _mask = self._mask = make_mask_none(self.shape, _dtype) + # Now, set the mask to its value. + if _dtype.names is not None: + _mask[indx] = tuple([True] * len(_dtype.names)) + else: + _mask[indx] = True + return + + # Get the _data part of the new value + dval = getattr(value, '_data', value) + # Get the _mask part of the new value + mval = getmask(value) + if _dtype.names is not None and mval is nomask: + mval = tuple([False] * len(_dtype.names)) + if _mask is nomask: + # Set the data, then the mask + _data[indx] = dval + if mval is not nomask: + _mask = self._mask = make_mask_none(self.shape, _dtype) + _mask[indx] = mval + elif not self._hardmask: + # Set the data, then the mask + _data[indx] = dval + _mask[indx] = mval + elif hasattr(indx, 'dtype') and (indx.dtype == MaskType): + indx = indx * umath.logical_not(_mask) + _data[indx] = dval + else: + if _dtype.names is not None: + err_msg = "Flexible 'hard' masks are not yet supported." + raise NotImplementedError(err_msg) + mindx = mask_or(_mask[indx], mval, copy=True) + dindx = self._data[indx] + if dindx.size > 1: + np.copyto(dindx, dval, where=~mindx) + elif mindx is nomask: + dindx = dval + _data[indx] = dindx + _mask[indx] = mindx + return + + # Define so that we can overwrite the setter. + @property + def dtype(self): + return super(MaskedArray, self).dtype + + @dtype.setter + def dtype(self, dtype): + super(MaskedArray, type(self)).dtype.__set__(self, dtype) + if self._mask is not nomask: + self._mask = self._mask.view(make_mask_descr(dtype), ndarray) + # Try to reset the shape of the mask (if we don't have a void). + # This raises a ValueError if the dtype change won't work. + try: + self._mask.shape = self.shape + except (AttributeError, TypeError): + pass + + @property + def shape(self): + return super(MaskedArray, self).shape + + @shape.setter + def shape(self, shape): + super(MaskedArray, type(self)).shape.__set__(self, shape) + # Cannot use self._mask, since it may not (yet) exist when a + # masked matrix sets the shape. + if getmask(self) is not nomask: + self._mask.shape = self.shape + + def __setmask__(self, mask, copy=False): + """ + Set the mask. + + """ + idtype = self.dtype + current_mask = self._mask + if mask is masked: + mask = True + + if (current_mask is nomask): + # Make sure the mask is set + # Just don't do anything if there's nothing to do. + if mask is nomask: + return + current_mask = self._mask = make_mask_none(self.shape, idtype) + + if idtype.names is None: + # No named fields. + # Hardmask: don't unmask the data + if self._hardmask: + current_mask |= mask + # Softmask: set everything to False + # If it's obviously a compatible scalar, use a quick update + # method. + elif isinstance(mask, (int, float, np.bool_, np.number)): + current_mask[...] = mask + # Otherwise fall back to the slower, general purpose way. + else: + current_mask.flat = mask + else: + # Named fields w/ + mdtype = current_mask.dtype + mask = np.array(mask, copy=False) + # Mask is a singleton + if not mask.ndim: + # It's a boolean : make a record + if mask.dtype.kind == 'b': + mask = np.array(tuple([mask.item()] * len(mdtype)), + dtype=mdtype) + # It's a record: make sure the dtype is correct + else: + mask = mask.astype(mdtype) + # Mask is a sequence + else: + # Make sure the new mask is a ndarray with the proper dtype + try: + mask = np.array(mask, copy=copy, dtype=mdtype) + # Or assume it's a sequence of bool/int + except TypeError: + mask = np.array([tuple([m] * len(mdtype)) for m in mask], + dtype=mdtype) + # Hardmask: don't unmask the data + if self._hardmask: + for n in idtype.names: + current_mask[n] |= mask[n] + # Softmask: set everything to False + # If it's obviously a compatible scalar, use a quick update + # method. + elif isinstance(mask, (int, float, np.bool_, np.number)): + current_mask[...] = mask + # Otherwise fall back to the slower, general purpose way. + else: + current_mask.flat = mask + # Reshape if needed + if current_mask.shape: + current_mask.shape = self.shape + return + + _set_mask = __setmask__ + + def _get_mask(self): + """Return the current mask. + + """ + # We could try to force a reshape, but that wouldn't work in some + # cases. + return self._mask + + mask = property(fget=_get_mask, fset=__setmask__, doc="Mask") + + def _get_recordmask(self): + """ + Return the mask of the records. + + A record is masked when all the fields are masked. + + """ + _mask = self._mask.view(ndarray) + if _mask.dtype.names is None: + return _mask + return np.all(flatten_structured_array(_mask), axis=-1) + + def _set_recordmask(self): + """ + Return the mask of the records. + + A record is masked when all the fields are masked. + + """ + raise NotImplementedError("Coming soon: setting the mask per records!") + + recordmask = property(fget=_get_recordmask) + + def harden_mask(self): + """ + Force the mask to hard. + + Whether the mask of a masked array is hard or soft is determined by + its `hardmask` property. `harden_mask` sets `hardmask` to True. + + See Also + -------- + hardmask + + """ + self._hardmask = True + return self + + def soften_mask(self): + """ + Force the mask to soft. + + Whether the mask of a masked array is hard or soft is determined by + its `hardmask` property. `soften_mask` sets `hardmask` to False. + + See Also + -------- + hardmask + + """ + self._hardmask = False + return self + + hardmask = property(fget=lambda self: self._hardmask, + doc="Hardness of the mask") + + def unshare_mask(self): + """ + Copy the mask and set the sharedmask flag to False. + + Whether the mask is shared between masked arrays can be seen from + the `sharedmask` property. `unshare_mask` ensures the mask is not shared. + A copy of the mask is only made if it was shared. + + See Also + -------- + sharedmask + + """ + if self._sharedmask: + self._mask = self._mask.copy() + self._sharedmask = False + return self + + sharedmask = property(fget=lambda self: self._sharedmask, + doc="Share status of the mask (read-only).") + + def shrink_mask(self): + """ + Reduce a mask to nomask when possible. + + Parameters + ---------- + None + + Returns + ------- + None + + Examples + -------- + >>> x = np.ma.array([[1,2 ], [3, 4]], mask=[0]*4) + >>> x.mask + array([[False, False], + [False, False]]) + >>> x.shrink_mask() + >>> x.mask + False + + """ + self._mask = _shrink_mask(self._mask) + return self + + baseclass = property(fget=lambda self: self._baseclass, + doc="Class of the underlying data (read-only).") + + def _get_data(self): + """Return the current data, as a view of the original + underlying data. + + """ + return ndarray.view(self, self._baseclass) + + _data = property(fget=_get_data) + data = property(fget=_get_data) + + def _get_flat(self): + "Return a flat iterator." + return MaskedIterator(self) + + def _set_flat(self, value): + "Set a flattened version of self to value." + y = self.ravel() + y[:] = value + + flat = property(fget=_get_flat, fset=_set_flat, + doc="Flat version of the array.") + + def get_fill_value(self): + """ + Return the filling value of the masked array. + + Returns + ------- + fill_value : scalar + The filling value. + + Examples + -------- + >>> for dt in [np.int32, np.int64, np.float64, np.complex128]: + ... np.ma.array([0, 1], dtype=dt).get_fill_value() + ... + 999999 + 999999 + 1e+20 + (1e+20+0j) + + >>> x = np.ma.array([0, 1.], fill_value=-np.inf) + >>> x.get_fill_value() + -inf + + """ + if self._fill_value is None: + self._fill_value = _check_fill_value(None, self.dtype) + + # Temporary workaround to account for the fact that str and bytes + # scalars cannot be indexed with (), whereas all other numpy + # scalars can. See issues #7259 and #7267. + # The if-block can be removed after #7267 has been fixed. + if isinstance(self._fill_value, ndarray): + return self._fill_value[()] + return self._fill_value + + def set_fill_value(self, value=None): + """ + Set the filling value of the masked array. + + Parameters + ---------- + value : scalar, optional + The new filling value. Default is None, in which case a default + based on the data type is used. + + See Also + -------- + ma.set_fill_value : Equivalent function. + + Examples + -------- + >>> x = np.ma.array([0, 1.], fill_value=-np.inf) + >>> x.fill_value + -inf + >>> x.set_fill_value(np.pi) + >>> x.fill_value + 3.1415926535897931 + + Reset to default: + + >>> x.set_fill_value() + >>> x.fill_value + 1e+20 + + """ + target = _check_fill_value(value, self.dtype) + _fill_value = self._fill_value + if _fill_value is None: + # Create the attribute if it was undefined + self._fill_value = target + else: + # Don't overwrite the attribute, just fill it (for propagation) + _fill_value[()] = target + + fill_value = property(fget=get_fill_value, fset=set_fill_value, + doc="Filling value.") + + def filled(self, fill_value=None): + """ + Return a copy of self, with masked values filled with a given value. + **However**, if there are no masked values to fill, self will be + returned instead as an ndarray. + + Parameters + ---------- + fill_value : scalar, optional + The value to use for invalid entries (None by default). + If None, the `fill_value` attribute of the array is used instead. + + Returns + ------- + filled_array : ndarray + A copy of ``self`` with invalid entries replaced by *fill_value* + (be it the function argument or the attribute of ``self``), or + ``self`` itself as an ndarray if there are no invalid entries to + be replaced. + + Notes + ----- + The result is **not** a MaskedArray! + + Examples + -------- + >>> x = np.ma.array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999) + >>> x.filled() + array([1, 2, -999, 4, -999]) + >>> type(x.filled()) + + + Subclassing is preserved. This means that if, e.g., the data part of + the masked array is a recarray, `filled` returns a recarray: + + >>> x = np.array([(-1, 2), (-3, 4)], dtype='i8,i8').view(np.recarray) + >>> m = np.ma.array(x, mask=[(True, False), (False, True)]) + >>> m.filled() + rec.array([(999999, 2), ( -3, 999999)], + dtype=[('f0', '>> x = np.ma.array(np.arange(5), mask=[0]*2 + [1]*3) + >>> x.compressed() + array([0, 1]) + >>> type(x.compressed()) + + + """ + data = ndarray.ravel(self._data) + if self._mask is not nomask: + data = data.compress(np.logical_not(ndarray.ravel(self._mask))) + return data + + def compress(self, condition, axis=None, out=None): + """ + Return `a` where condition is ``True``. + + If condition is a `MaskedArray`, missing values are considered + as ``False``. + + Parameters + ---------- + condition : var + Boolean 1-d array selecting which entries to return. If len(condition) + is less than the size of a along the axis, then output is truncated + to length of condition array. + axis : {None, int}, optional + Axis along which the operation must be performed. + out : {None, ndarray}, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output but the type will be cast if + necessary. + + Returns + ------- + result : MaskedArray + A :class:`MaskedArray` object. + + Notes + ----- + Please note the difference with :meth:`compressed` ! + The output of :meth:`compress` has a mask, the output of + :meth:`compressed` does not. + + Examples + -------- + >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) + >>> print(x) + [[1 -- 3] + [-- 5 --] + [7 -- 9]] + >>> x.compress([1, 0, 1]) + masked_array(data = [1 3], + mask = [False False], + fill_value=999999) + + >>> x.compress([1, 0, 1], axis=1) + masked_array(data = + [[1 3] + [-- --] + [7 9]], + mask = + [[False False] + [ True True] + [False False]], + fill_value=999999) + + """ + # Get the basic components + (_data, _mask) = (self._data, self._mask) + + # Force the condition to a regular ndarray and forget the missing + # values. + condition = np.array(condition, copy=False, subok=False) + + _new = _data.compress(condition, axis=axis, out=out).view(type(self)) + _new._update_from(self) + if _mask is not nomask: + _new._mask = _mask.compress(condition, axis=axis) + return _new + + def _insert_masked_print(self): + """ + Replace masked values with masked_print_option, casting all innermost + dtypes to object. + """ + if masked_print_option.enabled(): + mask = self._mask + if mask is nomask: + res = self._data + else: + # convert to object array to make filled work + data = self._data + # For big arrays, to avoid a costly conversion to the + # object dtype, extract the corners before the conversion. + print_width = (self._print_width if self.ndim > 1 + else self._print_width_1d) + for axis in range(self.ndim): + if data.shape[axis] > print_width: + ind = print_width // 2 + arr = np.split(data, (ind, -ind), axis=axis) + data = np.concatenate((arr[0], arr[2]), axis=axis) + arr = np.split(mask, (ind, -ind), axis=axis) + mask = np.concatenate((arr[0], arr[2]), axis=axis) + + rdtype = _replace_dtype_fields(self.dtype, "O") + res = data.astype(rdtype) + _recursive_printoption(res, mask, masked_print_option) + else: + res = self.filled(self.fill_value) + return res + + def __str__(self): + return str(self._insert_masked_print()) + + if sys.version_info.major < 3: + def __unicode__(self): + return unicode(self._insert_masked_print()) + + def __repr__(self): + """ + Literal string representation. + + """ + if self._baseclass is np.ndarray: + name = 'array' + else: + name = self._baseclass.__name__ + + + # 2016-11-19: Demoted to legacy format + if np.get_printoptions()['legacy'] == '1.13': + is_long = self.ndim > 1 + parameters = dict( + name=name, + nlen=" " * len(name), + data=str(self), + mask=str(self._mask), + fill=str(self.fill_value), + dtype=str(self.dtype) + ) + is_structured = bool(self.dtype.names) + key = '{}_{}'.format( + 'long' if is_long else 'short', + 'flx' if is_structured else 'std' + ) + return _legacy_print_templates[key] % parameters + + prefix = 'masked_{}('.format(name) + + dtype_needed = ( + not np.core.arrayprint.dtype_is_implied(self.dtype) or + np.all(self.mask) or + self.size == 0 + ) + + # determine which keyword args need to be shown + keys = ['data', 'mask', 'fill_value'] + if dtype_needed: + keys.append('dtype') + + # array has only one row (non-column) + is_one_row = builtins.all(dim == 1 for dim in self.shape[:-1]) + + # choose what to indent each keyword with + min_indent = 2 + if is_one_row: + # first key on the same line as the type, remaining keys + # aligned by equals + indents = {} + indents[keys[0]] = prefix + for k in keys[1:]: + n = builtins.max(min_indent, len(prefix + keys[0]) - len(k)) + indents[k] = ' ' * n + prefix = '' # absorbed into the first indent + else: + # each key on its own line, indented by two spaces + indents = {k: ' ' * min_indent for k in keys} + prefix = prefix + '\n' # first key on the next line + + # format the field values + reprs = {} + reprs['data'] = np.array2string( + self._insert_masked_print(), + separator=", ", + prefix=indents['data'] + 'data=', + suffix=',') + reprs['mask'] = np.array2string( + self._mask, + separator=", ", + prefix=indents['mask'] + 'mask=', + suffix=',') + reprs['fill_value'] = repr(self.fill_value) + if dtype_needed: + reprs['dtype'] = np.core.arrayprint.dtype_short_repr(self.dtype) + + # join keys with values and indentations + result = ',\n'.join( + '{}{}={}'.format(indents[k], k, reprs[k]) + for k in keys + ) + return prefix + result + ')' + + def _delegate_binop(self, other): + # This emulates the logic in + # private/binop_override.h:forward_binop_should_defer + if isinstance(other, type(self)): + return False + array_ufunc = getattr(other, "__array_ufunc__", False) + if array_ufunc is False: + other_priority = getattr(other, "__array_priority__", -1000000) + return self.__array_priority__ < other_priority + else: + # If array_ufunc is not None, it will be called inside the ufunc; + # None explicitly tells us to not call the ufunc, i.e., defer. + return array_ufunc is None + + def _comparison(self, other, compare): + """Compare self with other using operator.eq or operator.ne. + + When either of the elements is masked, the result is masked as well, + but the underlying boolean data are still set, with self and other + considered equal if both are masked, and unequal otherwise. + + For structured arrays, all fields are combined, with masked values + ignored. The result is masked if all fields were masked, with self + and other considered equal only if both were fully masked. + """ + omask = getmask(other) + smask = self.mask + mask = mask_or(smask, omask, copy=True) + + odata = getdata(other) + if mask.dtype.names is not None: + # For possibly masked structured arrays we need to be careful, + # since the standard structured array comparison will use all + # fields, masked or not. To avoid masked fields influencing the + # outcome, we set all masked fields in self to other, so they'll + # count as equal. To prepare, we ensure we have the right shape. + broadcast_shape = np.broadcast(self, odata).shape + sbroadcast = np.broadcast_to(self, broadcast_shape, subok=True) + sbroadcast._mask = mask + sdata = sbroadcast.filled(odata) + # Now take care of the mask; the merged mask should have an item + # masked if all fields were masked (in one and/or other). + mask = (mask == np.ones((), mask.dtype)) + + else: + # For regular arrays, just use the data as they come. + sdata = self.data + + check = compare(sdata, odata) + + if isinstance(check, (np.bool_, bool)): + return masked if mask else check + + if mask is not nomask: + # Adjust elements that were masked, which should be treated + # as equal if masked in both, unequal if masked in one. + # Note that this works automatically for structured arrays too. + check = np.where(mask, compare(smask, omask), check) + if mask.shape != check.shape: + # Guarantee consistency of the shape, making a copy since the + # the mask may need to get written to later. + mask = np.broadcast_to(mask, check.shape).copy() + + check = check.view(type(self)) + check._update_from(self) + check._mask = mask + + # Cast fill value to bool_ if needed. If it cannot be cast, the + # default boolean fill value is used. + if check._fill_value is not None: + try: + fill = _check_fill_value(check._fill_value, np.bool_) + except (TypeError, ValueError): + fill = _check_fill_value(None, np.bool_) + check._fill_value = fill + + return check + + def __eq__(self, other): + """Check whether other equals self elementwise. + + When either of the elements is masked, the result is masked as well, + but the underlying boolean data are still set, with self and other + considered equal if both are masked, and unequal otherwise. + + For structured arrays, all fields are combined, with masked values + ignored. The result is masked if all fields were masked, with self + and other considered equal only if both were fully masked. + """ + return self._comparison(other, operator.eq) + + def __ne__(self, other): + """Check whether other does not equal self elementwise. + + When either of the elements is masked, the result is masked as well, + but the underlying boolean data are still set, with self and other + considered equal if both are masked, and unequal otherwise. + + For structured arrays, all fields are combined, with masked values + ignored. The result is masked if all fields were masked, with self + and other considered equal only if both were fully masked. + """ + return self._comparison(other, operator.ne) + + def __add__(self, other): + """ + Add self to other, and return a new masked array. + + """ + if self._delegate_binop(other): + return NotImplemented + return add(self, other) + + def __radd__(self, other): + """ + Add other to self, and return a new masked array. + + """ + # In analogy with __rsub__ and __rdiv__, use original order: + # we get here from `other + self`. + return add(other, self) + + def __sub__(self, other): + """ + Subtract other from self, and return a new masked array. + + """ + if self._delegate_binop(other): + return NotImplemented + return subtract(self, other) + + def __rsub__(self, other): + """ + Subtract self from other, and return a new masked array. + + """ + return subtract(other, self) + + def __mul__(self, other): + "Multiply self by other, and return a new masked array." + if self._delegate_binop(other): + return NotImplemented + return multiply(self, other) + + def __rmul__(self, other): + """ + Multiply other by self, and return a new masked array. + + """ + # In analogy with __rsub__ and __rdiv__, use original order: + # we get here from `other * self`. + return multiply(other, self) + + def __div__(self, other): + """ + Divide other into self, and return a new masked array. + + """ + if self._delegate_binop(other): + return NotImplemented + return divide(self, other) + + def __truediv__(self, other): + """ + Divide other into self, and return a new masked array. + + """ + if self._delegate_binop(other): + return NotImplemented + return true_divide(self, other) + + def __rtruediv__(self, other): + """ + Divide self into other, and return a new masked array. + + """ + return true_divide(other, self) + + def __floordiv__(self, other): + """ + Divide other into self, and return a new masked array. + + """ + if self._delegate_binop(other): + return NotImplemented + return floor_divide(self, other) + + def __rfloordiv__(self, other): + """ + Divide self into other, and return a new masked array. + + """ + return floor_divide(other, self) + + def __pow__(self, other): + """ + Raise self to the power other, masking the potential NaNs/Infs + + """ + if self._delegate_binop(other): + return NotImplemented + return power(self, other) + + def __rpow__(self, other): + """ + Raise other to the power self, masking the potential NaNs/Infs + + """ + return power(other, self) + + def __iadd__(self, other): + """ + Add other to self in-place. + + """ + m = getmask(other) + if self._mask is nomask: + if m is not nomask and m.any(): + self._mask = make_mask_none(self.shape, self.dtype) + self._mask += m + else: + if m is not nomask: + self._mask += m + self._data.__iadd__(np.where(self._mask, self.dtype.type(0), + getdata(other))) + return self + + def __isub__(self, other): + """ + Subtract other from self in-place. + + """ + m = getmask(other) + if self._mask is nomask: + if m is not nomask and m.any(): + self._mask = make_mask_none(self.shape, self.dtype) + self._mask += m + elif m is not nomask: + self._mask += m + self._data.__isub__(np.where(self._mask, self.dtype.type(0), + getdata(other))) + return self + + def __imul__(self, other): + """ + Multiply self by other in-place. + + """ + m = getmask(other) + if self._mask is nomask: + if m is not nomask and m.any(): + self._mask = make_mask_none(self.shape, self.dtype) + self._mask += m + elif m is not nomask: + self._mask += m + self._data.__imul__(np.where(self._mask, self.dtype.type(1), + getdata(other))) + return self + + def __idiv__(self, other): + """ + Divide self by other in-place. + + """ + other_data = getdata(other) + dom_mask = _DomainSafeDivide().__call__(self._data, other_data) + other_mask = getmask(other) + new_mask = mask_or(other_mask, dom_mask) + # The following 3 lines control the domain filling + if dom_mask.any(): + (_, fval) = ufunc_fills[np.divide] + other_data = np.where(dom_mask, fval, other_data) + self._mask |= new_mask + self._data.__idiv__(np.where(self._mask, self.dtype.type(1), + other_data)) + return self + + def __ifloordiv__(self, other): + """ + Floor divide self by other in-place. + + """ + other_data = getdata(other) + dom_mask = _DomainSafeDivide().__call__(self._data, other_data) + other_mask = getmask(other) + new_mask = mask_or(other_mask, dom_mask) + # The following 3 lines control the domain filling + if dom_mask.any(): + (_, fval) = ufunc_fills[np.floor_divide] + other_data = np.where(dom_mask, fval, other_data) + self._mask |= new_mask + self._data.__ifloordiv__(np.where(self._mask, self.dtype.type(1), + other_data)) + return self + + def __itruediv__(self, other): + """ + True divide self by other in-place. + + """ + other_data = getdata(other) + dom_mask = _DomainSafeDivide().__call__(self._data, other_data) + other_mask = getmask(other) + new_mask = mask_or(other_mask, dom_mask) + # The following 3 lines control the domain filling + if dom_mask.any(): + (_, fval) = ufunc_fills[np.true_divide] + other_data = np.where(dom_mask, fval, other_data) + self._mask |= new_mask + self._data.__itruediv__(np.where(self._mask, self.dtype.type(1), + other_data)) + return self + + def __ipow__(self, other): + """ + Raise self to the power other, in place. + + """ + other_data = getdata(other) + other_mask = getmask(other) + with np.errstate(divide='ignore', invalid='ignore'): + self._data.__ipow__(np.where(self._mask, self.dtype.type(1), + other_data)) + invalid = np.logical_not(np.isfinite(self._data)) + if invalid.any(): + if self._mask is not nomask: + self._mask |= invalid + else: + self._mask = invalid + np.copyto(self._data, self.fill_value, where=invalid) + new_mask = mask_or(other_mask, invalid) + self._mask = mask_or(self._mask, new_mask) + return self + + def __float__(self): + """ + Convert to float. + + """ + if self.size > 1: + raise TypeError("Only length-1 arrays can be converted " + "to Python scalars") + elif self._mask: + warnings.warn("Warning: converting a masked element to nan.", stacklevel=2) + return np.nan + return float(self.item()) + + def __int__(self): + """ + Convert to int. + + """ + if self.size > 1: + raise TypeError("Only length-1 arrays can be converted " + "to Python scalars") + elif self._mask: + raise MaskError('Cannot convert masked element to a Python int.') + return int(self.item()) + + def __long__(self): + """ + Convert to long. + """ + if self.size > 1: + raise TypeError("Only length-1 arrays can be converted " + "to Python scalars") + elif self._mask: + raise MaskError('Cannot convert masked element to a Python long.') + return long(self.item()) + + + def get_imag(self): + """ + Return the imaginary part of the masked array. + + The returned array is a view on the imaginary part of the `MaskedArray` + whose `get_imag` method is called. + + Parameters + ---------- + None + + Returns + ------- + result : MaskedArray + The imaginary part of the masked array. + + See Also + -------- + get_real, real, imag + + Examples + -------- + >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) + >>> x.get_imag() + masked_array(data = [1.0 -- 1.6], + mask = [False True False], + fill_value = 1e+20) + + """ + result = self._data.imag.view(type(self)) + result.__setmask__(self._mask) + return result + + imag = property(fget=get_imag, doc="Imaginary part.") + + def get_real(self): + """ + Return the real part of the masked array. + + The returned array is a view on the real part of the `MaskedArray` + whose `get_real` method is called. + + Parameters + ---------- + None + + Returns + ------- + result : MaskedArray + The real part of the masked array. + + See Also + -------- + get_imag, real, imag + + Examples + -------- + >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) + >>> x.get_real() + masked_array(data = [1.0 -- 3.45], + mask = [False True False], + fill_value = 1e+20) + + """ + result = self._data.real.view(type(self)) + result.__setmask__(self._mask) + return result + real = property(fget=get_real, doc="Real part") + + def count(self, axis=None, keepdims=np._NoValue): + """ + Count the non-masked elements of the array along the given axis. + + Parameters + ---------- + axis : None or int or tuple of ints, optional + Axis or axes along which the count is performed. + The default (`axis` = `None`) performs the count over all + the dimensions of the input array. `axis` may be negative, in + which case it counts from the last to the first axis. + + .. versionadded:: 1.10.0 + + If this is a tuple of ints, the count is performed on multiple + axes, instead of a single axis or all the axes as before. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + Returns + ------- + result : ndarray or scalar + An array with the same shape as the input array, with the specified + axis removed. If the array is a 0-d array, or if `axis` is None, a + scalar is returned. + + See Also + -------- + count_masked : Count masked elements in array or along a given axis. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = ma.arange(6).reshape((2, 3)) + >>> a[1, :] = ma.masked + >>> a + masked_array(data = + [[0 1 2] + [-- -- --]], + mask = + [[False False False] + [ True True True]], + fill_value = 999999) + >>> a.count() + 3 + + When the `axis` keyword is specified an array of appropriate size is + returned. + + >>> a.count(axis=0) + array([1, 1, 1]) + >>> a.count(axis=1) + array([3, 0]) + + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + m = self._mask + # special case for matrices (we assume no other subclasses modify + # their dimensions) + if isinstance(self.data, np.matrix): + if m is nomask: + m = np.zeros(self.shape, dtype=np.bool_) + m = m.view(type(self.data)) + + if m is nomask: + # compare to _count_reduce_items in _methods.py + + if self.shape is (): + if axis not in (None, 0): + raise np.AxisError(axis=axis, ndim=self.ndim) + return 1 + elif axis is None: + if kwargs.get('keepdims', False): + return np.array(self.size, dtype=np.intp, ndmin=self.ndim) + return self.size + + axes = normalize_axis_tuple(axis, self.ndim) + items = 1 + for ax in axes: + items *= self.shape[ax] + + if kwargs.get('keepdims', False): + out_dims = list(self.shape) + for a in axes: + out_dims[a] = 1 + else: + out_dims = [d for n, d in enumerate(self.shape) + if n not in axes] + # make sure to return a 0-d array if axis is supplied + return np.full(out_dims, items, dtype=np.intp) + + # take care of the masked singleton + if self is masked: + return 0 + + return (~m).sum(axis=axis, dtype=np.intp, **kwargs) + + def ravel(self, order='C'): + """ + Returns a 1D version of self, as a view. + + Parameters + ---------- + order : {'C', 'F', 'A', 'K'}, optional + The elements of `a` are read using this index order. 'C' means to + index the elements in C-like order, with the last axis index + changing fastest, back to the first axis index changing slowest. + 'F' means to index the elements in Fortran-like index order, with + the first index changing fastest, and the last index changing + slowest. Note that the 'C' and 'F' options take no account of the + memory layout of the underlying array, and only refer to the order + of axis indexing. 'A' means to read the elements in Fortran-like + index order if `m` is Fortran *contiguous* in memory, C-like order + otherwise. 'K' means to read the elements in the order they occur + in memory, except for reversing the data when strides are negative. + By default, 'C' index order is used. + + Returns + ------- + MaskedArray + Output view is of shape ``(self.size,)`` (or + ``(np.ma.product(self.shape),)``). + + Examples + -------- + >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) + >>> print(x) + [[1 -- 3] + [-- 5 --] + [7 -- 9]] + >>> print(x.ravel()) + [1 -- 3 -- 5 -- 7 -- 9] + + """ + r = ndarray.ravel(self._data, order=order).view(type(self)) + r._update_from(self) + if self._mask is not nomask: + r._mask = ndarray.ravel(self._mask, order=order).reshape(r.shape) + else: + r._mask = nomask + return r + + + def reshape(self, *s, **kwargs): + """ + Give a new shape to the array without changing its data. + + Returns a masked array containing the same data, but with a new shape. + The result is a view on the original array; if this is not possible, a + ValueError is raised. + + Parameters + ---------- + shape : int or tuple of ints + The new shape should be compatible with the original shape. If an + integer is supplied, then the result will be a 1-D array of that + length. + order : {'C', 'F'}, optional + Determines whether the array data should be viewed as in C + (row-major) or FORTRAN (column-major) order. + + Returns + ------- + reshaped_array : array + A new view on the array. + + See Also + -------- + reshape : Equivalent function in the masked array module. + numpy.ndarray.reshape : Equivalent method on ndarray object. + numpy.reshape : Equivalent function in the NumPy module. + + Notes + ----- + The reshaping operation cannot guarantee that a copy will not be made, + to modify the shape in place, use ``a.shape = s`` + + Examples + -------- + >>> x = np.ma.array([[1,2],[3,4]], mask=[1,0,0,1]) + >>> print(x) + [[-- 2] + [3 --]] + >>> x = x.reshape((4,1)) + >>> print(x) + [[--] + [2] + [3] + [--]] + + """ + kwargs.update(order=kwargs.get('order', 'C')) + result = self._data.reshape(*s, **kwargs).view(type(self)) + result._update_from(self) + mask = self._mask + if mask is not nomask: + result._mask = mask.reshape(*s, **kwargs) + return result + + def resize(self, newshape, refcheck=True, order=False): + """ + .. warning:: + + This method does nothing, except raise a ValueError exception. A + masked array does not own its data and therefore cannot safely be + resized in place. Use the `numpy.ma.resize` function instead. + + This method is difficult to implement safely and may be deprecated in + future releases of NumPy. + + """ + # Note : the 'order' keyword looks broken, let's just drop it + errmsg = "A masked array does not own its data "\ + "and therefore cannot be resized.\n" \ + "Use the numpy.ma.resize function instead." + raise ValueError(errmsg) + + def put(self, indices, values, mode='raise'): + """ + Set storage-indexed locations to corresponding values. + + Sets self._data.flat[n] = values[n] for each n in indices. + If `values` is shorter than `indices` then it will repeat. + If `values` has some masked values, the initial mask is updated + in consequence, else the corresponding values are unmasked. + + Parameters + ---------- + indices : 1-D array_like + Target indices, interpreted as integers. + values : array_like + Values to place in self._data copy at target indices. + mode : {'raise', 'wrap', 'clip'}, optional + Specifies how out-of-bounds indices will behave. + 'raise' : raise an error. + 'wrap' : wrap around. + 'clip' : clip to the range. + + Notes + ----- + `values` can be a scalar or length 1 array. + + Examples + -------- + >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) + >>> print(x) + [[1 -- 3] + [-- 5 --] + [7 -- 9]] + >>> x.put([0,4,8],[10,20,30]) + >>> print(x) + [[10 -- 3] + [-- 20 --] + [7 -- 30]] + + >>> x.put(4,999) + >>> print(x) + [[10 -- 3] + [-- 999 --] + [7 -- 30]] + + """ + # Hard mask: Get rid of the values/indices that fall on masked data + if self._hardmask and self._mask is not nomask: + mask = self._mask[indices] + indices = narray(indices, copy=False) + values = narray(values, copy=False, subok=True) + values.resize(indices.shape) + indices = indices[~mask] + values = values[~mask] + + self._data.put(indices, values, mode=mode) + + # short circuit if neither self nor values are masked + if self._mask is nomask and getmask(values) is nomask: + return + + m = getmaskarray(self) + + if getmask(values) is nomask: + m.put(indices, False, mode=mode) + else: + m.put(indices, values._mask, mode=mode) + m = make_mask(m, copy=False, shrink=True) + self._mask = m + return + + def ids(self): + """ + Return the addresses of the data and mask areas. + + Parameters + ---------- + None + + Examples + -------- + >>> x = np.ma.array([1, 2, 3], mask=[0, 1, 1]) + >>> x.ids() + (166670640, 166659832) + + If the array has no mask, the address of `nomask` is returned. This address + is typically not close to the data in memory: + + >>> x = np.ma.array([1, 2, 3]) + >>> x.ids() + (166691080, 3083169284L) + + """ + if self._mask is nomask: + return (self.ctypes.data, id(nomask)) + return (self.ctypes.data, self._mask.ctypes.data) + + def iscontiguous(self): + """ + Return a boolean indicating whether the data is contiguous. + + Parameters + ---------- + None + + Examples + -------- + >>> x = np.ma.array([1, 2, 3]) + >>> x.iscontiguous() + True + + `iscontiguous` returns one of the flags of the masked array: + + >>> x.flags + C_CONTIGUOUS : True + F_CONTIGUOUS : True + OWNDATA : False + WRITEABLE : True + ALIGNED : True + WRITEBACKIFCOPY : False + UPDATEIFCOPY : False + + """ + return self.flags['CONTIGUOUS'] + + def all(self, axis=None, out=None, keepdims=np._NoValue): + """ + Returns True if all elements evaluate to True. + + The output array is masked where all the values along the given axis + are masked: if the output would have been a scalar and that all the + values are masked, then the output is `masked`. + + Refer to `numpy.all` for full documentation. + + See Also + -------- + ndarray.all : corresponding function for ndarrays + numpy.all : equivalent function + + Examples + -------- + >>> np.ma.array([1,2,3]).all() + True + >>> a = np.ma.array([1,2,3], mask=True) + >>> (a.all() is np.ma.masked) + True + + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + mask = _check_mask_axis(self._mask, axis, **kwargs) + if out is None: + d = self.filled(True).all(axis=axis, **kwargs).view(type(self)) + if d.ndim: + d.__setmask__(mask) + elif mask: + return masked + return d + self.filled(True).all(axis=axis, out=out, **kwargs) + if isinstance(out, MaskedArray): + if out.ndim or mask: + out.__setmask__(mask) + return out + + def any(self, axis=None, out=None, keepdims=np._NoValue): + """ + Returns True if any of the elements of `a` evaluate to True. + + Masked values are considered as False during computation. + + Refer to `numpy.any` for full documentation. + + See Also + -------- + ndarray.any : corresponding function for ndarrays + numpy.any : equivalent function + + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + mask = _check_mask_axis(self._mask, axis, **kwargs) + if out is None: + d = self.filled(False).any(axis=axis, **kwargs).view(type(self)) + if d.ndim: + d.__setmask__(mask) + elif mask: + d = masked + return d + self.filled(False).any(axis=axis, out=out, **kwargs) + if isinstance(out, MaskedArray): + if out.ndim or mask: + out.__setmask__(mask) + return out + + def nonzero(self): + """ + Return the indices of unmasked elements that are not zero. + + Returns a tuple of arrays, one for each dimension, containing the + indices of the non-zero elements in that dimension. The corresponding + non-zero values can be obtained with:: + + a[a.nonzero()] + + To group the indices by element, rather than dimension, use + instead:: + + np.transpose(a.nonzero()) + + The result of this is always a 2d array, with a row for each non-zero + element. + + Parameters + ---------- + None + + Returns + ------- + tuple_of_arrays : tuple + Indices of elements that are non-zero. + + See Also + -------- + numpy.nonzero : + Function operating on ndarrays. + flatnonzero : + Return indices that are non-zero in the flattened version of the input + array. + ndarray.nonzero : + Equivalent ndarray method. + count_nonzero : + Counts the number of non-zero elements in the input array. + + Examples + -------- + >>> import numpy.ma as ma + >>> x = ma.array(np.eye(3)) + >>> x + masked_array(data = + [[ 1. 0. 0.] + [ 0. 1. 0.] + [ 0. 0. 1.]], + mask = + False, + fill_value=1e+20) + >>> x.nonzero() + (array([0, 1, 2]), array([0, 1, 2])) + + Masked elements are ignored. + + >>> x[1, 1] = ma.masked + >>> x + masked_array(data = + [[1.0 0.0 0.0] + [0.0 -- 0.0] + [0.0 0.0 1.0]], + mask = + [[False False False] + [False True False] + [False False False]], + fill_value=1e+20) + >>> x.nonzero() + (array([0, 2]), array([0, 2])) + + Indices can also be grouped by element. + + >>> np.transpose(x.nonzero()) + array([[0, 0], + [2, 2]]) + + A common use for ``nonzero`` is to find the indices of an array, where + a condition is True. Given an array `a`, the condition `a` > 3 is a + boolean array and since False is interpreted as 0, ma.nonzero(a > 3) + yields the indices of the `a` where the condition is true. + + >>> a = ma.array([[1,2,3],[4,5,6],[7,8,9]]) + >>> a > 3 + masked_array(data = + [[False False False] + [ True True True] + [ True True True]], + mask = + False, + fill_value=999999) + >>> ma.nonzero(a > 3) + (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) + + The ``nonzero`` method of the condition array can also be called. + + >>> (a > 3).nonzero() + (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) + + """ + return narray(self.filled(0), copy=False).nonzero() + + def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None): + """ + (this docstring should be overwritten) + """ + #!!!: implement out + test! + m = self._mask + if m is nomask: + result = super(MaskedArray, self).trace(offset=offset, axis1=axis1, + axis2=axis2, out=out) + return result.astype(dtype) + else: + D = self.diagonal(offset=offset, axis1=axis1, axis2=axis2) + return D.astype(dtype).filled(0).sum(axis=-1, out=out) + trace.__doc__ = ndarray.trace.__doc__ + + def dot(self, b, out=None, strict=False): + """ + a.dot(b, out=None) + + Masked dot product of two arrays. Note that `out` and `strict` are + located in different positions than in `ma.dot`. In order to + maintain compatibility with the functional version, it is + recommended that the optional arguments be treated as keyword only. + At some point that may be mandatory. + + .. versionadded:: 1.10.0 + + Parameters + ---------- + b : masked_array_like + Inputs array. + out : masked_array, optional + Output argument. This must have the exact kind that would be + returned if it was not used. In particular, it must have the + right type, must be C-contiguous, and its dtype must be the + dtype that would be returned for `ma.dot(a,b)`. This is a + performance feature. Therefore, if these conditions are not + met, an exception is raised, instead of attempting to be + flexible. + strict : bool, optional + Whether masked data are propagated (True) or set to 0 (False) + for the computation. Default is False. Propagating the mask + means that if a masked value appears in a row or column, the + whole row or column is considered masked. + + .. versionadded:: 1.10.2 + + See Also + -------- + numpy.ma.dot : equivalent function + + """ + return dot(self, b, out=out, strict=strict) + + def sum(self, axis=None, dtype=None, out=None, keepdims=np._NoValue): + """ + Return the sum of the array elements over the given axis. + + Masked elements are set to 0 internally. + + Refer to `numpy.sum` for full documentation. + + See Also + -------- + ndarray.sum : corresponding function for ndarrays + numpy.sum : equivalent function + + Examples + -------- + >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) + >>> print(x) + [[1 -- 3] + [-- 5 --] + [7 -- 9]] + >>> print(x.sum()) + 25 + >>> print(x.sum(axis=1)) + [4 5 16] + >>> print(x.sum(axis=0)) + [8 5 12] + >>> print(type(x.sum(axis=0, dtype=np.int64)[0])) + + + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + _mask = self._mask + newmask = _check_mask_axis(_mask, axis, **kwargs) + # No explicit output + if out is None: + result = self.filled(0).sum(axis, dtype=dtype, **kwargs) + rndim = getattr(result, 'ndim', 0) + if rndim: + result = result.view(type(self)) + result.__setmask__(newmask) + elif newmask: + result = masked + return result + # Explicit output + result = self.filled(0).sum(axis, dtype=dtype, out=out, **kwargs) + if isinstance(out, MaskedArray): + outmask = getmask(out) + if (outmask is nomask): + outmask = out._mask = make_mask_none(out.shape) + outmask.flat = newmask + return out + + def cumsum(self, axis=None, dtype=None, out=None): + """ + Return the cumulative sum of the array elements over the given axis. + + Masked values are set to 0 internally during the computation. + However, their position is saved, and the result will be masked at + the same locations. + + Refer to `numpy.cumsum` for full documentation. + + Notes + ----- + The mask is lost if `out` is not a valid :class:`MaskedArray` ! + + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + See Also + -------- + ndarray.cumsum : corresponding function for ndarrays + numpy.cumsum : equivalent function + + Examples + -------- + >>> marr = np.ma.array(np.arange(10), mask=[0,0,0,1,1,1,0,0,0,0]) + >>> print(marr.cumsum()) + [0 1 3 -- -- -- 9 16 24 33] + + """ + result = self.filled(0).cumsum(axis=axis, dtype=dtype, out=out) + if out is not None: + if isinstance(out, MaskedArray): + out.__setmask__(self.mask) + return out + result = result.view(type(self)) + result.__setmask__(self._mask) + return result + + def prod(self, axis=None, dtype=None, out=None, keepdims=np._NoValue): + """ + Return the product of the array elements over the given axis. + + Masked elements are set to 1 internally for computation. + + Refer to `numpy.prod` for full documentation. + + Notes + ----- + Arithmetic is modular when using integer types, and no error is raised + on overflow. + + See Also + -------- + ndarray.prod : corresponding function for ndarrays + numpy.prod : equivalent function + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + _mask = self._mask + newmask = _check_mask_axis(_mask, axis, **kwargs) + # No explicit output + if out is None: + result = self.filled(1).prod(axis, dtype=dtype, **kwargs) + rndim = getattr(result, 'ndim', 0) + if rndim: + result = result.view(type(self)) + result.__setmask__(newmask) + elif newmask: + result = masked + return result + # Explicit output + result = self.filled(1).prod(axis, dtype=dtype, out=out, **kwargs) + if isinstance(out, MaskedArray): + outmask = getmask(out) + if (outmask is nomask): + outmask = out._mask = make_mask_none(out.shape) + outmask.flat = newmask + return out + product = prod + + def cumprod(self, axis=None, dtype=None, out=None): + """ + Return the cumulative product of the array elements over the given axis. + + Masked values are set to 1 internally during the computation. + However, their position is saved, and the result will be masked at + the same locations. + + Refer to `numpy.cumprod` for full documentation. + + Notes + ----- + The mask is lost if `out` is not a valid MaskedArray ! + + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + See Also + -------- + ndarray.cumprod : corresponding function for ndarrays + numpy.cumprod : equivalent function + """ + result = self.filled(1).cumprod(axis=axis, dtype=dtype, out=out) + if out is not None: + if isinstance(out, MaskedArray): + out.__setmask__(self._mask) + return out + result = result.view(type(self)) + result.__setmask__(self._mask) + return result + + def mean(self, axis=None, dtype=None, out=None, keepdims=np._NoValue): + """ + Returns the average of the array elements along given axis. + + Masked entries are ignored, and result elements which are not + finite will be masked. + + Refer to `numpy.mean` for full documentation. + + See Also + -------- + ndarray.mean : corresponding function for ndarrays + numpy.mean : Equivalent function + numpy.ma.average: Weighted average. + + Examples + -------- + >>> a = np.ma.array([1,2,3], mask=[False, False, True]) + >>> a + masked_array(data = [1 2 --], + mask = [False False True], + fill_value = 999999) + >>> a.mean() + 1.5 + + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + if self._mask is nomask: + result = super(MaskedArray, self).mean(axis=axis, + dtype=dtype, **kwargs)[()] + else: + dsum = self.sum(axis=axis, dtype=dtype, **kwargs) + cnt = self.count(axis=axis, **kwargs) + if cnt.shape == () and (cnt == 0): + result = masked + else: + result = dsum * 1. / cnt + if out is not None: + out.flat = result + if isinstance(out, MaskedArray): + outmask = getmask(out) + if (outmask is nomask): + outmask = out._mask = make_mask_none(out.shape) + outmask.flat = getmask(result) + return out + return result + + def anom(self, axis=None, dtype=None): + """ + Compute the anomalies (deviations from the arithmetic mean) + along the given axis. + + Returns an array of anomalies, with the same shape as the input and + where the arithmetic mean is computed along the given axis. + + Parameters + ---------- + axis : int, optional + Axis over which the anomalies are taken. + The default is to use the mean of the flattened array as reference. + dtype : dtype, optional + Type to use in computing the variance. For arrays of integer type + the default is float32; for arrays of float types it is the same as + the array type. + + See Also + -------- + mean : Compute the mean of the array. + + Examples + -------- + >>> a = np.ma.array([1,2,3]) + >>> a.anom() + masked_array(data = [-1. 0. 1.], + mask = False, + fill_value = 1e+20) + + """ + m = self.mean(axis, dtype) + if m is masked: + return m + + if not axis: + return (self - m) + else: + return (self - expand_dims(m, axis)) + + def var(self, axis=None, dtype=None, out=None, ddof=0, + keepdims=np._NoValue): + """ + Returns the variance of the array elements along given axis. + + Masked entries are ignored, and result elements which are not + finite will be masked. + + Refer to `numpy.var` for full documentation. + + See Also + -------- + ndarray.var : corresponding function for ndarrays + numpy.var : Equivalent function + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + # Easy case: nomask, business as usual + if self._mask is nomask: + ret = super(MaskedArray, self).var(axis=axis, dtype=dtype, out=out, + ddof=ddof, **kwargs)[()] + if out is not None: + if isinstance(out, MaskedArray): + out.__setmask__(nomask) + return out + return ret + + # Some data are masked, yay! + cnt = self.count(axis=axis, **kwargs) - ddof + danom = self - self.mean(axis, dtype, keepdims=True) + if iscomplexobj(self): + danom = umath.absolute(danom) ** 2 + else: + danom *= danom + dvar = divide(danom.sum(axis, **kwargs), cnt).view(type(self)) + # Apply the mask if it's not a scalar + if dvar.ndim: + dvar._mask = mask_or(self._mask.all(axis, **kwargs), (cnt <= 0)) + dvar._update_from(self) + elif getmask(dvar): + # Make sure that masked is returned when the scalar is masked. + dvar = masked + if out is not None: + if isinstance(out, MaskedArray): + out.flat = 0 + out.__setmask__(True) + elif out.dtype.kind in 'biu': + errmsg = "Masked data information would be lost in one or "\ + "more location." + raise MaskError(errmsg) + else: + out.flat = np.nan + return out + # In case with have an explicit output + if out is not None: + # Set the data + out.flat = dvar + # Set the mask if needed + if isinstance(out, MaskedArray): + out.__setmask__(dvar.mask) + return out + return dvar + var.__doc__ = np.var.__doc__ + + def std(self, axis=None, dtype=None, out=None, ddof=0, + keepdims=np._NoValue): + """ + Returns the standard deviation of the array elements along given axis. + + Masked entries are ignored. + + Refer to `numpy.std` for full documentation. + + See Also + -------- + ndarray.std : corresponding function for ndarrays + numpy.std : Equivalent function + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + dvar = self.var(axis, dtype, out, ddof, **kwargs) + if dvar is not masked: + if out is not None: + np.power(out, 0.5, out=out, casting='unsafe') + return out + dvar = sqrt(dvar) + return dvar + + def round(self, decimals=0, out=None): + """ + Return each element rounded to the given number of decimals. + + Refer to `numpy.around` for full documentation. + + See Also + -------- + ndarray.around : corresponding function for ndarrays + numpy.around : equivalent function + """ + result = self._data.round(decimals=decimals, out=out).view(type(self)) + if result.ndim > 0: + result._mask = self._mask + result._update_from(self) + elif self._mask: + # Return masked when the scalar is masked + result = masked + # No explicit output: we're done + if out is None: + return result + if isinstance(out, MaskedArray): + out.__setmask__(self._mask) + return out + + def argsort(self, axis=np._NoValue, kind='quicksort', order=None, + endwith=True, fill_value=None): + """ + Return an ndarray of indices that sort the array along the + specified axis. Masked values are filled beforehand to + `fill_value`. + + Parameters + ---------- + axis : int, optional + Axis along which to sort. If None, the default, the flattened array + is used. + + .. versionchanged:: 1.13.0 + Previously, the default was documented to be -1, but that was + in error. At some future date, the default will change to -1, as + originally intended. + Until then, the axis should be given explicitly when + ``arr.ndim > 1``, to avoid a FutureWarning. + kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional + Sorting algorithm. + order : list, optional + When `a` is an array with fields defined, this argument specifies + which fields to compare first, second, etc. Not all fields need be + specified. + endwith : {True, False}, optional + Whether missing values (if any) should be treated as the largest values + (True) or the smallest values (False) + When the array contains unmasked values at the same extremes of the + datatype, the ordering of these values and the masked values is + undefined. + fill_value : {var}, optional + Value used internally for the masked values. + If ``fill_value`` is not None, it supersedes ``endwith``. + + Returns + ------- + index_array : ndarray, int + Array of indices that sort `a` along the specified axis. + In other words, ``a[index_array]`` yields a sorted `a`. + + See Also + -------- + MaskedArray.sort : Describes sorting algorithms used. + lexsort : Indirect stable sort with multiple keys. + ndarray.sort : Inplace sort. + + Notes + ----- + See `sort` for notes on the different sorting algorithms. + + Examples + -------- + >>> a = np.ma.array([3,2,1], mask=[False, False, True]) + >>> a + masked_array(data = [3 2 --], + mask = [False False True], + fill_value = 999999) + >>> a.argsort() + array([1, 0, 2]) + + """ + + # 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default + if axis is np._NoValue: + axis = _deprecate_argsort_axis(self) + + if fill_value is None: + if endwith: + # nan > inf + if np.issubdtype(self.dtype, np.floating): + fill_value = np.nan + else: + fill_value = minimum_fill_value(self) + else: + fill_value = maximum_fill_value(self) + + filled = self.filled(fill_value) + return filled.argsort(axis=axis, kind=kind, order=order) + + def argmin(self, axis=None, fill_value=None, out=None): + """ + Return array of indices to the minimum values along the given axis. + + Parameters + ---------- + axis : {None, integer} + If None, the index is into the flattened array, otherwise along + the specified axis + fill_value : {var}, optional + Value used to fill in the masked values. If None, the output of + minimum_fill_value(self._data) is used instead. + out : {None, array}, optional + Array into which the result can be placed. Its type is preserved + and it must be of the right shape to hold the output. + + Returns + ------- + ndarray or scalar + If multi-dimension input, returns a new ndarray of indices to the + minimum values along the given axis. Otherwise, returns a scalar + of index to the minimum values along the given axis. + + Examples + -------- + >>> x = np.ma.array(arange(4), mask=[1,1,0,0]) + >>> x.shape = (2,2) + >>> print(x) + [[-- --] + [2 3]] + >>> print(x.argmin(axis=0, fill_value=-1)) + [0 0] + >>> print(x.argmin(axis=0, fill_value=9)) + [1 1] + + """ + if fill_value is None: + fill_value = minimum_fill_value(self) + d = self.filled(fill_value).view(ndarray) + return d.argmin(axis, out=out) + + def argmax(self, axis=None, fill_value=None, out=None): + """ + Returns array of indices of the maximum values along the given axis. + Masked values are treated as if they had the value fill_value. + + Parameters + ---------- + axis : {None, integer} + If None, the index is into the flattened array, otherwise along + the specified axis + fill_value : {var}, optional + Value used to fill in the masked values. If None, the output of + maximum_fill_value(self._data) is used instead. + out : {None, array}, optional + Array into which the result can be placed. Its type is preserved + and it must be of the right shape to hold the output. + + Returns + ------- + index_array : {integer_array} + + Examples + -------- + >>> a = np.arange(6).reshape(2,3) + >>> a.argmax() + 5 + >>> a.argmax(0) + array([1, 1, 1]) + >>> a.argmax(1) + array([2, 2]) + + """ + if fill_value is None: + fill_value = maximum_fill_value(self._data) + d = self.filled(fill_value).view(ndarray) + return d.argmax(axis, out=out) + + def sort(self, axis=-1, kind='quicksort', order=None, + endwith=True, fill_value=None): + """ + Sort the array, in-place + + Parameters + ---------- + a : array_like + Array to be sorted. + axis : int, optional + Axis along which to sort. If None, the array is flattened before + sorting. The default is -1, which sorts along the last axis. + kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional + Sorting algorithm. Default is 'quicksort'. + order : list, optional + When `a` is a structured array, this argument specifies which fields + to compare first, second, and so on. This list does not need to + include all of the fields. + endwith : {True, False}, optional + Whether missing values (if any) should be treated as the largest values + (True) or the smallest values (False) + When the array contains unmasked values at the same extremes of the + datatype, the ordering of these values and the masked values is + undefined. + fill_value : {var}, optional + Value used internally for the masked values. + If ``fill_value`` is not None, it supersedes ``endwith``. + + Returns + ------- + sorted_array : ndarray + Array of the same type and shape as `a`. + + See Also + -------- + ndarray.sort : Method to sort an array in-place. + argsort : Indirect sort. + lexsort : Indirect stable sort on multiple keys. + searchsorted : Find elements in a sorted array. + + Notes + ----- + See ``sort`` for notes on the different sorting algorithms. + + Examples + -------- + >>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) + >>> # Default + >>> a.sort() + >>> print(a) + [1 3 5 -- --] + + >>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) + >>> # Put missing values in the front + >>> a.sort(endwith=False) + >>> print(a) + [-- -- 1 3 5] + + >>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) + >>> # fill_value takes over endwith + >>> a.sort(endwith=False, fill_value=3) + >>> print(a) + [1 -- -- 3 5] + + """ + if self._mask is nomask: + ndarray.sort(self, axis=axis, kind=kind, order=order) + return + + if self is masked: + return + + sidx = self.argsort(axis=axis, kind=kind, order=order, + fill_value=fill_value, endwith=endwith) + + self[...] = np.take_along_axis(self, sidx, axis=axis) + + def min(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue): + """ + Return the minimum along a given axis. + + Parameters + ---------- + axis : {None, int}, optional + Axis along which to operate. By default, ``axis`` is None and the + flattened input is used. + out : array_like, optional + Alternative output array in which to place the result. Must be of + the same shape and buffer length as the expected output. + fill_value : {var}, optional + Value used to fill in the masked values. + If None, use the output of `minimum_fill_value`. + + Returns + ------- + amin : array_like + New array holding the result. + If ``out`` was specified, ``out`` is returned. + + See Also + -------- + minimum_fill_value + Returns the minimum filling value for a given datatype. + + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + _mask = self._mask + newmask = _check_mask_axis(_mask, axis, **kwargs) + if fill_value is None: + fill_value = minimum_fill_value(self) + # No explicit output + if out is None: + result = self.filled(fill_value).min( + axis=axis, out=out, **kwargs).view(type(self)) + if result.ndim: + # Set the mask + result.__setmask__(newmask) + # Get rid of Infs + if newmask.ndim: + np.copyto(result, result.fill_value, where=newmask) + elif newmask: + result = masked + return result + # Explicit output + result = self.filled(fill_value).min(axis=axis, out=out, **kwargs) + if isinstance(out, MaskedArray): + outmask = getmask(out) + if (outmask is nomask): + outmask = out._mask = make_mask_none(out.shape) + outmask.flat = newmask + else: + if out.dtype.kind in 'biu': + errmsg = "Masked data information would be lost in one or more"\ + " location." + raise MaskError(errmsg) + np.copyto(out, np.nan, where=newmask) + return out + + # unique to masked arrays + def mini(self, axis=None): + """ + Return the array minimum along the specified axis. + + .. deprecated:: 1.13.0 + This function is identical to both: + + * ``self.min(keepdims=True, axis=axis).squeeze(axis=axis)`` + * ``np.ma.minimum.reduce(self, axis=axis)`` + + Typically though, ``self.min(axis=axis)`` is sufficient. + + Parameters + ---------- + axis : int, optional + The axis along which to find the minima. Default is None, in which case + the minimum value in the whole array is returned. + + Returns + ------- + min : scalar or MaskedArray + If `axis` is None, the result is a scalar. Otherwise, if `axis` is + given and the array is at least 2-D, the result is a masked array with + dimension one smaller than the array on which `mini` is called. + + Examples + -------- + >>> x = np.ma.array(np.arange(6), mask=[0 ,1, 0, 0, 0 ,1]).reshape(3, 2) + >>> print(x) + [[0 --] + [2 3] + [4 --]] + >>> x.mini() + 0 + >>> x.mini(axis=0) + masked_array(data = [0 3], + mask = [False False], + fill_value = 999999) + >>> print(x.mini(axis=1)) + [0 2 4] + + There is a small difference between `mini` and `min`: + + >>> x[:,1].mini(axis=0) + masked_array(data = --, + mask = True, + fill_value = 999999) + >>> x[:,1].min(axis=0) + masked + """ + + # 2016-04-13, 1.13.0, gh-8764 + warnings.warn( + "`mini` is deprecated; use the `min` method or " + "`np.ma.minimum.reduce instead.", + DeprecationWarning, stacklevel=2) + return minimum.reduce(self, axis) + + def max(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue): + """ + Return the maximum along a given axis. + + Parameters + ---------- + axis : {None, int}, optional + Axis along which to operate. By default, ``axis`` is None and the + flattened input is used. + out : array_like, optional + Alternative output array in which to place the result. Must + be of the same shape and buffer length as the expected output. + fill_value : {var}, optional + Value used to fill in the masked values. + If None, use the output of maximum_fill_value(). + + Returns + ------- + amax : array_like + New array holding the result. + If ``out`` was specified, ``out`` is returned. + + See Also + -------- + maximum_fill_value + Returns the maximum filling value for a given datatype. + + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + _mask = self._mask + newmask = _check_mask_axis(_mask, axis, **kwargs) + if fill_value is None: + fill_value = maximum_fill_value(self) + # No explicit output + if out is None: + result = self.filled(fill_value).max( + axis=axis, out=out, **kwargs).view(type(self)) + if result.ndim: + # Set the mask + result.__setmask__(newmask) + # Get rid of Infs + if newmask.ndim: + np.copyto(result, result.fill_value, where=newmask) + elif newmask: + result = masked + return result + # Explicit output + result = self.filled(fill_value).max(axis=axis, out=out, **kwargs) + if isinstance(out, MaskedArray): + outmask = getmask(out) + if (outmask is nomask): + outmask = out._mask = make_mask_none(out.shape) + outmask.flat = newmask + else: + + if out.dtype.kind in 'biu': + errmsg = "Masked data information would be lost in one or more"\ + " location." + raise MaskError(errmsg) + np.copyto(out, np.nan, where=newmask) + return out + + def ptp(self, axis=None, out=None, fill_value=None, keepdims=False): + """ + Return (maximum - minimum) along the given dimension + (i.e. peak-to-peak value). + + Parameters + ---------- + axis : {None, int}, optional + Axis along which to find the peaks. If None (default) the + flattened array is used. + out : {None, array_like}, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type will be cast if necessary. + fill_value : {var}, optional + Value used to fill in the masked values. + + Returns + ------- + ptp : ndarray. + A new array holding the result, unless ``out`` was + specified, in which case a reference to ``out`` is returned. + + """ + if out is None: + result = self.max(axis=axis, fill_value=fill_value, + keepdims=keepdims) + result -= self.min(axis=axis, fill_value=fill_value, + keepdims=keepdims) + return result + out.flat = self.max(axis=axis, out=out, fill_value=fill_value, + keepdims=keepdims) + min_value = self.min(axis=axis, fill_value=fill_value, + keepdims=keepdims) + np.subtract(out, min_value, out=out, casting='unsafe') + return out + + def partition(self, *args, **kwargs): + warnings.warn("Warning: 'partition' will ignore the 'mask' " + "of the {}.".format(self.__class__.__name__), + stacklevel=2) + return super(MaskedArray, self).partition(*args, **kwargs) + + def argpartition(self, *args, **kwargs): + warnings.warn("Warning: 'argpartition' will ignore the 'mask' " + "of the {}.".format(self.__class__.__name__), + stacklevel=2) + return super(MaskedArray, self).argpartition(*args, **kwargs) + + def take(self, indices, axis=None, out=None, mode='raise'): + """ + """ + (_data, _mask) = (self._data, self._mask) + cls = type(self) + # Make sure the indices are not masked + maskindices = getmask(indices) + if maskindices is not nomask: + indices = indices.filled(0) + # Get the data, promoting scalars to 0d arrays with [...] so that + # .view works correctly + if out is None: + out = _data.take(indices, axis=axis, mode=mode)[...].view(cls) + else: + np.take(_data, indices, axis=axis, mode=mode, out=out) + # Get the mask + if isinstance(out, MaskedArray): + if _mask is nomask: + outmask = maskindices + else: + outmask = _mask.take(indices, axis=axis, mode=mode) + outmask |= maskindices + out.__setmask__(outmask) + # demote 0d arrays back to scalars, for consistency with ndarray.take + return out[()] + + # Array methods + clip = _arraymethod('clip', onmask=False) + copy = _arraymethod('copy') + diagonal = _arraymethod('diagonal') + flatten = _arraymethod('flatten') + repeat = _arraymethod('repeat') + squeeze = _arraymethod('squeeze') + swapaxes = _arraymethod('swapaxes') + T = property(fget=lambda self: self.transpose()) + transpose = _arraymethod('transpose') + + def tolist(self, fill_value=None): + """ + Return the data portion of the masked array as a hierarchical Python list. + + Data items are converted to the nearest compatible Python type. + Masked values are converted to `fill_value`. If `fill_value` is None, + the corresponding entries in the output list will be ``None``. + + Parameters + ---------- + fill_value : scalar, optional + The value to use for invalid entries. Default is None. + + Returns + ------- + result : list + The Python list representation of the masked array. + + Examples + -------- + >>> x = np.ma.array([[1,2,3], [4,5,6], [7,8,9]], mask=[0] + [1,0]*4) + >>> x.tolist() + [[1, None, 3], [None, 5, None], [7, None, 9]] + >>> x.tolist(-999) + [[1, -999, 3], [-999, 5, -999], [7, -999, 9]] + + """ + _mask = self._mask + # No mask ? Just return .data.tolist ? + if _mask is nomask: + return self._data.tolist() + # Explicit fill_value: fill the array and get the list + if fill_value is not None: + return self.filled(fill_value).tolist() + # Structured array. + names = self.dtype.names + if names: + result = self._data.astype([(_, object) for _ in names]) + for n in names: + result[n][_mask[n]] = None + return result.tolist() + # Standard arrays. + if _mask is nomask: + return [None] + # Set temps to save time when dealing w/ marrays. + inishape = self.shape + result = np.array(self._data.ravel(), dtype=object) + result[_mask.ravel()] = None + result.shape = inishape + return result.tolist() + + def tostring(self, fill_value=None, order='C'): + """ + This function is a compatibility alias for tobytes. Despite its name it + returns bytes not strings. + """ + + return self.tobytes(fill_value, order='C') + + def tobytes(self, fill_value=None, order='C'): + """ + Return the array data as a string containing the raw bytes in the array. + + The array is filled with a fill value before the string conversion. + + .. versionadded:: 1.9.0 + + Parameters + ---------- + fill_value : scalar, optional + Value used to fill in the masked values. Default is None, in which + case `MaskedArray.fill_value` is used. + order : {'C','F','A'}, optional + Order of the data item in the copy. Default is 'C'. + + - 'C' -- C order (row major). + - 'F' -- Fortran order (column major). + - 'A' -- Any, current order of array. + - None -- Same as 'A'. + + See Also + -------- + ndarray.tobytes + tolist, tofile + + Notes + ----- + As for `ndarray.tobytes`, information about the shape, dtype, etc., + but also about `fill_value`, will be lost. + + Examples + -------- + >>> x = np.ma.array(np.array([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]]) + >>> x.tobytes() + '\\x01\\x00\\x00\\x00?B\\x0f\\x00?B\\x0f\\x00\\x04\\x00\\x00\\x00' + + """ + return self.filled(fill_value).tobytes(order=order) + + def tofile(self, fid, sep="", format="%s"): + """ + Save a masked array to a file in binary format. + + .. warning:: + This function is not implemented yet. + + Raises + ------ + NotImplementedError + When `tofile` is called. + + """ + raise NotImplementedError("MaskedArray.tofile() not implemented yet.") + + def toflex(self): + """ + Transforms a masked array into a flexible-type array. + + The flexible type array that is returned will have two fields: + + * the ``_data`` field stores the ``_data`` part of the array. + * the ``_mask`` field stores the ``_mask`` part of the array. + + Parameters + ---------- + None + + Returns + ------- + record : ndarray + A new flexible-type `ndarray` with two fields: the first element + containing a value, the second element containing the corresponding + mask boolean. The returned record shape matches self.shape. + + Notes + ----- + A side-effect of transforming a masked array into a flexible `ndarray` is + that meta information (``fill_value``, ...) will be lost. + + Examples + -------- + >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) + >>> print(x) + [[1 -- 3] + [-- 5 --] + [7 -- 9]] + >>> print(x.toflex()) + [[(1, False) (2, True) (3, False)] + [(4, True) (5, False) (6, True)] + [(7, False) (8, True) (9, False)]] + + """ + # Get the basic dtype. + ddtype = self.dtype + # Make sure we have a mask + _mask = self._mask + if _mask is None: + _mask = make_mask_none(self.shape, ddtype) + # And get its dtype + mdtype = self._mask.dtype + + record = np.ndarray(shape=self.shape, + dtype=[('_data', ddtype), ('_mask', mdtype)]) + record['_data'] = self._data + record['_mask'] = self._mask + return record + torecords = toflex + + # Pickling + def __getstate__(self): + """Return the internal state of the masked array, for pickling + purposes. + + """ + cf = 'CF'[self.flags.fnc] + data_state = super(MaskedArray, self).__reduce__()[2] + return data_state + (getmaskarray(self).tobytes(cf), self._fill_value) + + def __setstate__(self, state): + """Restore the internal state of the masked array, for + pickling purposes. ``state`` is typically the output of the + ``__getstate__`` output, and is a 5-tuple: + + - class name + - a tuple giving the shape of the data + - a typecode for the data + - a binary string for the data + - a binary string for the mask. + + """ + (_, shp, typ, isf, raw, msk, flv) = state + super(MaskedArray, self).__setstate__((shp, typ, isf, raw)) + self._mask.__setstate__((shp, make_mask_descr(typ), isf, msk)) + self.fill_value = flv + + def __reduce__(self): + """Return a 3-tuple for pickling a MaskedArray. + + """ + return (_mareconstruct, + (self.__class__, self._baseclass, (0,), 'b',), + self.__getstate__()) + + def __deepcopy__(self, memo=None): + from copy import deepcopy + copied = MaskedArray.__new__(type(self), self, copy=True) + if memo is None: + memo = {} + memo[id(self)] = copied + for (k, v) in self.__dict__.items(): + copied.__dict__[k] = deepcopy(v, memo) + return copied + + +def _mareconstruct(subtype, baseclass, baseshape, basetype,): + """Internal function that builds a new MaskedArray from the + information stored in a pickle. + + """ + _data = ndarray.__new__(baseclass, baseshape, basetype) + _mask = ndarray.__new__(ndarray, baseshape, make_mask_descr(basetype)) + return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,) + + +class mvoid(MaskedArray): + """ + Fake a 'void' object to use for masked array with structured dtypes. + """ + + def __new__(self, data, mask=nomask, dtype=None, fill_value=None, + hardmask=False, copy=False, subok=True): + _data = np.array(data, copy=copy, subok=subok, dtype=dtype) + _data = _data.view(self) + _data._hardmask = hardmask + if mask is not nomask: + if isinstance(mask, np.void): + _data._mask = mask + else: + try: + # Mask is already a 0D array + _data._mask = np.void(mask) + except TypeError: + # Transform the mask to a void + mdtype = make_mask_descr(dtype) + _data._mask = np.array(mask, dtype=mdtype)[()] + if fill_value is not None: + _data.fill_value = fill_value + return _data + + def _get_data(self): + # Make sure that the _data part is a np.void + return super(mvoid, self)._data[()] + + _data = property(fget=_get_data) + + def __getitem__(self, indx): + """ + Get the index. + + """ + m = self._mask + if isinstance(m[indx], ndarray): + # Can happen when indx is a multi-dimensional field: + # A = ma.masked_array(data=[([0,1],)], mask=[([True, + # False],)], dtype=[("A", ">i2", (2,))]) + # x = A[0]; y = x["A"]; then y.mask["A"].size==2 + # and we can not say masked/unmasked. + # The result is no longer mvoid! + # See also issue #6724. + return masked_array( + data=self._data[indx], mask=m[indx], + fill_value=self._fill_value[indx], + hard_mask=self._hardmask) + if m is not nomask and m[indx]: + return masked + return self._data[indx] + + def __setitem__(self, indx, value): + self._data[indx] = value + if self._hardmask: + self._mask[indx] |= getattr(value, "_mask", False) + else: + self._mask[indx] = getattr(value, "_mask", False) + + def __str__(self): + m = self._mask + if m is nomask: + return str(self._data) + + rdtype = _replace_dtype_fields(self._data.dtype, "O") + data_arr = super(mvoid, self)._data + res = data_arr.astype(rdtype) + _recursive_printoption(res, self._mask, masked_print_option) + return str(res) + + __repr__ = __str__ + + def __iter__(self): + "Defines an iterator for mvoid" + (_data, _mask) = (self._data, self._mask) + if _mask is nomask: + for d in _data: + yield d + else: + for (d, m) in zip(_data, _mask): + if m: + yield masked + else: + yield d + + def __len__(self): + return self._data.__len__() + + def filled(self, fill_value=None): + """ + Return a copy with masked fields filled with a given value. + + Parameters + ---------- + fill_value : scalar, optional + The value to use for invalid entries (None by default). + If None, the `fill_value` attribute is used instead. + + Returns + ------- + filled_void + A `np.void` object + + See Also + -------- + MaskedArray.filled + + """ + return asarray(self).filled(fill_value)[()] + + def tolist(self): + """ + Transforms the mvoid object into a tuple. + + Masked fields are replaced by None. + + Returns + ------- + returned_tuple + Tuple of fields + """ + _mask = self._mask + if _mask is nomask: + return self._data.tolist() + result = [] + for (d, m) in zip(self._data, self._mask): + if m: + result.append(None) + else: + # .item() makes sure we return a standard Python object + result.append(d.item()) + return tuple(result) + + +############################################################################## +# Shortcuts # +############################################################################## + + +def isMaskedArray(x): + """ + Test whether input is an instance of MaskedArray. + + This function returns True if `x` is an instance of MaskedArray + and returns False otherwise. Any object is accepted as input. + + Parameters + ---------- + x : object + Object to test. + + Returns + ------- + result : bool + True if `x` is a MaskedArray. + + See Also + -------- + isMA : Alias to isMaskedArray. + isarray : Alias to isMaskedArray. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.eye(3, 3) + >>> a + array([[ 1., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 1.]]) + >>> m = ma.masked_values(a, 0) + >>> m + masked_array(data = + [[1.0 -- --] + [-- 1.0 --] + [-- -- 1.0]], + mask = + [[False True True] + [ True False True] + [ True True False]], + fill_value=0.0) + >>> ma.isMaskedArray(a) + False + >>> ma.isMaskedArray(m) + True + >>> ma.isMaskedArray([0, 1, 2]) + False + + """ + return isinstance(x, MaskedArray) + + +isarray = isMaskedArray +isMA = isMaskedArray # backward compatibility + + +class MaskedConstant(MaskedArray): + # the lone np.ma.masked instance + __singleton = None + + @classmethod + def __has_singleton(cls): + # second case ensures `cls.__singleton` is not just a view on the + # superclass singleton + return cls.__singleton is not None and type(cls.__singleton) is cls + + def __new__(cls): + if not cls.__has_singleton(): + # We define the masked singleton as a float for higher precedence. + # Note that it can be tricky sometimes w/ type comparison + data = np.array(0.) + mask = np.array(True) + + # prevent any modifications + data.flags.writeable = False + mask.flags.writeable = False + + # don't fall back on MaskedArray.__new__(MaskedConstant), since + # that might confuse it - this way, the construction is entirely + # within our control + cls.__singleton = MaskedArray(data, mask=mask).view(cls) + + return cls.__singleton + + def __array_finalize__(self, obj): + if not self.__has_singleton(): + # this handles the `.view` in __new__, which we want to copy across + # properties normally + return super(MaskedConstant, self).__array_finalize__(obj) + elif self is self.__singleton: + # not clear how this can happen, play it safe + pass + else: + # everywhere else, we want to downcast to MaskedArray, to prevent a + # duplicate maskedconstant. + self.__class__ = MaskedArray + MaskedArray.__array_finalize__(self, obj) + + def __array_prepare__(self, obj, context=None): + return self.view(MaskedArray).__array_prepare__(obj, context) + + def __array_wrap__(self, obj, context=None): + return self.view(MaskedArray).__array_wrap__(obj, context) + + def __str__(self): + return str(masked_print_option._display) + + if sys.version_info.major < 3: + def __unicode__(self): + return unicode(masked_print_option._display) + + def __repr__(self): + if self is MaskedConstant.__singleton: + return 'masked' + else: + # it's a subclass, or something is wrong, make it obvious + return object.__repr__(self) + + def __reduce__(self): + """Override of MaskedArray's __reduce__. + """ + return (self.__class__, ()) + + # inplace operations have no effect. We have to override them to avoid + # trying to modify the readonly data and mask arrays + def __iop__(self, other): + return self + __iadd__ = \ + __isub__ = \ + __imul__ = \ + __ifloordiv__ = \ + __itruediv__ = \ + __ipow__ = \ + __iop__ + del __iop__ # don't leave this around + + def copy(self, *args, **kwargs): + """ Copy is a no-op on the maskedconstant, as it is a scalar """ + # maskedconstant is a scalar, so copy doesn't need to copy. There's + # precedent for this with `np.bool_` scalars. + return self + + def __copy__(self): + return self + + def __deepcopy__(self, memo): + return self + + def __setattr__(self, attr, value): + if not self.__has_singleton(): + # allow the singleton to be initialized + return super(MaskedConstant, self).__setattr__(attr, value) + elif self is self.__singleton: + raise AttributeError( + "attributes of {!r} are not writeable".format(self)) + else: + # duplicate instance - we can end up here from __array_finalize__, + # where we set the __class__ attribute + return super(MaskedConstant, self).__setattr__(attr, value) + + +masked = masked_singleton = MaskedConstant() +masked_array = MaskedArray + + +def array(data, dtype=None, copy=False, order=None, + mask=nomask, fill_value=None, keep_mask=True, + hard_mask=False, shrink=True, subok=True, ndmin=0): + """ + Shortcut to MaskedArray. + + The options are in a different order for convenience and backwards + compatibility. + + """ + return MaskedArray(data, mask=mask, dtype=dtype, copy=copy, + subok=subok, keep_mask=keep_mask, + hard_mask=hard_mask, fill_value=fill_value, + ndmin=ndmin, shrink=shrink, order=order) +array.__doc__ = masked_array.__doc__ + + +def is_masked(x): + """ + Determine whether input has masked values. + + Accepts any object as input, but always returns False unless the + input is a MaskedArray containing masked values. + + Parameters + ---------- + x : array_like + Array to check for masked values. + + Returns + ------- + result : bool + True if `x` is a MaskedArray with masked values, False otherwise. + + Examples + -------- + >>> import numpy.ma as ma + >>> x = ma.masked_equal([0, 1, 0, 2, 3], 0) + >>> x + masked_array(data = [-- 1 -- 2 3], + mask = [ True False True False False], + fill_value=999999) + >>> ma.is_masked(x) + True + >>> x = ma.masked_equal([0, 1, 0, 2, 3], 42) + >>> x + masked_array(data = [0 1 0 2 3], + mask = False, + fill_value=999999) + >>> ma.is_masked(x) + False + + Always returns False if `x` isn't a MaskedArray. + + >>> x = [False, True, False] + >>> ma.is_masked(x) + False + >>> x = 'a string' + >>> ma.is_masked(x) + False + + """ + m = getmask(x) + if m is nomask: + return False + elif m.any(): + return True + return False + + +############################################################################## +# Extrema functions # +############################################################################## + + +class _extrema_operation(_MaskedUFunc): + """ + Generic class for maximum/minimum functions. + + .. note:: + This is the base class for `_maximum_operation` and + `_minimum_operation`. + + """ + def __init__(self, ufunc, compare, fill_value): + super(_extrema_operation, self).__init__(ufunc) + self.compare = compare + self.fill_value_func = fill_value + + def __call__(self, a, b=None): + "Executes the call behavior." + if b is None: + # 2016-04-13, 1.13.0 + warnings.warn( + "Single-argument form of np.ma.{0} is deprecated. Use " + "np.ma.{0}.reduce instead.".format(self.__name__), + DeprecationWarning, stacklevel=2) + return self.reduce(a) + return where(self.compare(a, b), a, b) + + def reduce(self, target, axis=np._NoValue): + "Reduce target along the given axis." + target = narray(target, copy=False, subok=True) + m = getmask(target) + + if axis is np._NoValue and target.ndim > 1: + # 2017-05-06, Numpy 1.13.0: warn on axis default + warnings.warn( + "In the future the default for ma.{0}.reduce will be axis=0, " + "not the current None, to match np.{0}.reduce. " + "Explicitly pass 0 or None to silence this warning.".format( + self.__name__ + ), + MaskedArrayFutureWarning, stacklevel=2) + axis = None + + if axis is not np._NoValue: + kwargs = dict(axis=axis) + else: + kwargs = dict() + + if m is nomask: + t = self.f.reduce(target, **kwargs) + else: + target = target.filled( + self.fill_value_func(target)).view(type(target)) + t = self.f.reduce(target, **kwargs) + m = umath.logical_and.reduce(m, **kwargs) + if hasattr(t, '_mask'): + t._mask = m + elif m: + t = masked + return t + + def outer(self, a, b): + "Return the function applied to the outer product of a and b." + ma = getmask(a) + mb = getmask(b) + if ma is nomask and mb is nomask: + m = nomask + else: + ma = getmaskarray(a) + mb = getmaskarray(b) + m = logical_or.outer(ma, mb) + result = self.f.outer(filled(a), filled(b)) + if not isinstance(result, MaskedArray): + result = result.view(MaskedArray) + result._mask = m + return result + +def min(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue): + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + try: + return obj.min(axis=axis, fill_value=fill_value, out=out, **kwargs) + except (AttributeError, TypeError): + # If obj doesn't have a min method, or if the method doesn't accept a + # fill_value argument + return asanyarray(obj).min(axis=axis, fill_value=fill_value, + out=out, **kwargs) +min.__doc__ = MaskedArray.min.__doc__ + +def max(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue): + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + try: + return obj.max(axis=axis, fill_value=fill_value, out=out, **kwargs) + except (AttributeError, TypeError): + # If obj doesn't have a max method, or if the method doesn't accept a + # fill_value argument + return asanyarray(obj).max(axis=axis, fill_value=fill_value, + out=out, **kwargs) +max.__doc__ = MaskedArray.max.__doc__ + + +def ptp(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue): + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + try: + return obj.ptp(axis, out=out, fill_value=fill_value, **kwargs) + except (AttributeError, TypeError): + # If obj doesn't have a ptp method or if the method doesn't accept + # a fill_value argument + return asanyarray(obj).ptp(axis=axis, fill_value=fill_value, + out=out, **kwargs) +ptp.__doc__ = MaskedArray.ptp.__doc__ + + +############################################################################## +# Definition of functions from the corresponding methods # +############################################################################## + + +class _frommethod(object): + """ + Define functions from existing MaskedArray methods. + + Parameters + ---------- + methodname : str + Name of the method to transform. + + """ + + def __init__(self, methodname, reversed=False): + self.__name__ = methodname + self.__doc__ = self.getdoc() + self.reversed = reversed + + def getdoc(self): + "Return the doc of the function (from the doc of the method)." + meth = getattr(MaskedArray, self.__name__, None) or\ + getattr(np, self.__name__, None) + signature = self.__name__ + get_object_signature(meth) + if meth is not None: + doc = """ %s\n%s""" % ( + signature, getattr(meth, '__doc__', None)) + return doc + + def __call__(self, a, *args, **params): + if self.reversed: + args = list(args) + a, args[0] = args[0], a + + marr = asanyarray(a) + method_name = self.__name__ + method = getattr(type(marr), method_name, None) + if method is None: + # use the corresponding np function + method = getattr(np, method_name) + + return method(marr, *args, **params) + + +all = _frommethod('all') +anomalies = anom = _frommethod('anom') +any = _frommethod('any') +compress = _frommethod('compress', reversed=True) +cumprod = _frommethod('cumprod') +cumsum = _frommethod('cumsum') +copy = _frommethod('copy') +diagonal = _frommethod('diagonal') +harden_mask = _frommethod('harden_mask') +ids = _frommethod('ids') +maximum = _extrema_operation(umath.maximum, greater, maximum_fill_value) +mean = _frommethod('mean') +minimum = _extrema_operation(umath.minimum, less, minimum_fill_value) +nonzero = _frommethod('nonzero') +prod = _frommethod('prod') +product = _frommethod('prod') +ravel = _frommethod('ravel') +repeat = _frommethod('repeat') +shrink_mask = _frommethod('shrink_mask') +soften_mask = _frommethod('soften_mask') +std = _frommethod('std') +sum = _frommethod('sum') +swapaxes = _frommethod('swapaxes') +#take = _frommethod('take') +trace = _frommethod('trace') +var = _frommethod('var') + +count = _frommethod('count') + +def take(a, indices, axis=None, out=None, mode='raise'): + """ + """ + a = masked_array(a) + return a.take(indices, axis=axis, out=out, mode=mode) + + +def power(a, b, third=None): + """ + Returns element-wise base array raised to power from second array. + + This is the masked array version of `numpy.power`. For details see + `numpy.power`. + + See Also + -------- + numpy.power + + Notes + ----- + The *out* argument to `numpy.power` is not supported, `third` has to be + None. + + """ + if third is not None: + raise MaskError("3-argument power not supported.") + # Get the masks + ma = getmask(a) + mb = getmask(b) + m = mask_or(ma, mb) + # Get the rawdata + fa = getdata(a) + fb = getdata(b) + # Get the type of the result (so that we preserve subclasses) + if isinstance(a, MaskedArray): + basetype = type(a) + else: + basetype = MaskedArray + # Get the result and view it as a (subclass of) MaskedArray + with np.errstate(divide='ignore', invalid='ignore'): + result = np.where(m, fa, umath.power(fa, fb)).view(basetype) + result._update_from(a) + # Find where we're in trouble w/ NaNs and Infs + invalid = np.logical_not(np.isfinite(result.view(ndarray))) + # Add the initial mask + if m is not nomask: + if not (result.ndim): + return masked + result._mask = np.logical_or(m, invalid) + # Fix the invalid parts + if invalid.any(): + if not result.ndim: + return masked + elif result._mask is nomask: + result._mask = invalid + result._data[invalid] = result.fill_value + return result + +argmin = _frommethod('argmin') +argmax = _frommethod('argmax') + +def argsort(a, axis=np._NoValue, kind='quicksort', order=None, endwith=True, fill_value=None): + "Function version of the eponymous method." + a = np.asanyarray(a) + + # 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default + if axis is np._NoValue: + axis = _deprecate_argsort_axis(a) + + if isinstance(a, MaskedArray): + return a.argsort(axis=axis, kind=kind, order=order, + endwith=endwith, fill_value=fill_value) + else: + return a.argsort(axis=axis, kind=kind, order=order) +argsort.__doc__ = MaskedArray.argsort.__doc__ + +def sort(a, axis=-1, kind='quicksort', order=None, endwith=True, fill_value=None): + "Function version of the eponymous method." + a = np.array(a, copy=True, subok=True) + if axis is None: + a = a.flatten() + axis = 0 + + if isinstance(a, MaskedArray): + a.sort(axis=axis, kind=kind, order=order, + endwith=endwith, fill_value=fill_value) + else: + a.sort(axis=axis, kind=kind, order=order) + return a +sort.__doc__ = MaskedArray.sort.__doc__ + + +def compressed(x): + """ + Return all the non-masked data as a 1-D array. + + This function is equivalent to calling the "compressed" method of a + `MaskedArray`, see `MaskedArray.compressed` for details. + + See Also + -------- + MaskedArray.compressed + Equivalent method. + + """ + return asanyarray(x).compressed() + + +def concatenate(arrays, axis=0): + """ + Concatenate a sequence of arrays along the given axis. + + Parameters + ---------- + arrays : sequence of array_like + The arrays must have the same shape, except in the dimension + corresponding to `axis` (the first, by default). + axis : int, optional + The axis along which the arrays will be joined. Default is 0. + + Returns + ------- + result : MaskedArray + The concatenated array with any masked entries preserved. + + See Also + -------- + numpy.concatenate : Equivalent function in the top-level NumPy module. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = ma.arange(3) + >>> a[1] = ma.masked + >>> b = ma.arange(2, 5) + >>> a + masked_array(data = [0 -- 2], + mask = [False True False], + fill_value = 999999) + >>> b + masked_array(data = [2 3 4], + mask = False, + fill_value = 999999) + >>> ma.concatenate([a, b]) + masked_array(data = [0 -- 2 2 3 4], + mask = [False True False False False False], + fill_value = 999999) + + """ + d = np.concatenate([getdata(a) for a in arrays], axis) + rcls = get_masked_subclass(*arrays) + data = d.view(rcls) + # Check whether one of the arrays has a non-empty mask. + for x in arrays: + if getmask(x) is not nomask: + break + else: + return data + # OK, so we have to concatenate the masks + dm = np.concatenate([getmaskarray(a) for a in arrays], axis) + dm = dm.reshape(d.shape) + + # If we decide to keep a '_shrinkmask' option, we want to check that + # all of them are True, and then check for dm.any() + data._mask = _shrink_mask(dm) + return data + + +def diag(v, k=0): + """ + Extract a diagonal or construct a diagonal array. + + This function is the equivalent of `numpy.diag` that takes masked + values into account, see `numpy.diag` for details. + + See Also + -------- + numpy.diag : Equivalent function for ndarrays. + + """ + output = np.diag(v, k).view(MaskedArray) + if getmask(v) is not nomask: + output._mask = np.diag(v._mask, k) + return output + + +def left_shift(a, n): + """ + Shift the bits of an integer to the left. + + This is the masked array version of `numpy.left_shift`, for details + see that function. + + See Also + -------- + numpy.left_shift + + """ + m = getmask(a) + if m is nomask: + d = umath.left_shift(filled(a), n) + return masked_array(d) + else: + d = umath.left_shift(filled(a, 0), n) + return masked_array(d, mask=m) + + +def right_shift(a, n): + """ + Shift the bits of an integer to the right. + + This is the masked array version of `numpy.right_shift`, for details + see that function. + + See Also + -------- + numpy.right_shift + + """ + m = getmask(a) + if m is nomask: + d = umath.right_shift(filled(a), n) + return masked_array(d) + else: + d = umath.right_shift(filled(a, 0), n) + return masked_array(d, mask=m) + + +def put(a, indices, values, mode='raise'): + """ + Set storage-indexed locations to corresponding values. + + This function is equivalent to `MaskedArray.put`, see that method + for details. + + See Also + -------- + MaskedArray.put + + """ + # We can't use 'frommethod', the order of arguments is different + try: + return a.put(indices, values, mode=mode) + except AttributeError: + return narray(a, copy=False).put(indices, values, mode=mode) + + +def putmask(a, mask, values): # , mode='raise'): + """ + Changes elements of an array based on conditional and input values. + + This is the masked array version of `numpy.putmask`, for details see + `numpy.putmask`. + + See Also + -------- + numpy.putmask + + Notes + ----- + Using a masked array as `values` will **not** transform a `ndarray` into + a `MaskedArray`. + + """ + # We can't use 'frommethod', the order of arguments is different + if not isinstance(a, MaskedArray): + a = a.view(MaskedArray) + (valdata, valmask) = (getdata(values), getmask(values)) + if getmask(a) is nomask: + if valmask is not nomask: + a._sharedmask = True + a._mask = make_mask_none(a.shape, a.dtype) + np.copyto(a._mask, valmask, where=mask) + elif a._hardmask: + if valmask is not nomask: + m = a._mask.copy() + np.copyto(m, valmask, where=mask) + a.mask |= m + else: + if valmask is nomask: + valmask = getmaskarray(values) + np.copyto(a._mask, valmask, where=mask) + np.copyto(a._data, valdata, where=mask) + return + + +def transpose(a, axes=None): + """ + Permute the dimensions of an array. + + This function is exactly equivalent to `numpy.transpose`. + + See Also + -------- + numpy.transpose : Equivalent function in top-level NumPy module. + + Examples + -------- + >>> import numpy.ma as ma + >>> x = ma.arange(4).reshape((2,2)) + >>> x[1, 1] = ma.masked + >>>> x + masked_array(data = + [[0 1] + [2 --]], + mask = + [[False False] + [False True]], + fill_value = 999999) + + >>> ma.transpose(x) + masked_array(data = + [[0 2] + [1 --]], + mask = + [[False False] + [False True]], + fill_value = 999999) + + """ + # We can't use 'frommethod', as 'transpose' doesn't take keywords + try: + return a.transpose(axes) + except AttributeError: + return narray(a, copy=False).transpose(axes).view(MaskedArray) + + +def reshape(a, new_shape, order='C'): + """ + Returns an array containing the same data with a new shape. + + Refer to `MaskedArray.reshape` for full documentation. + + See Also + -------- + MaskedArray.reshape : equivalent function + + """ + # We can't use 'frommethod', it whine about some parameters. Dmmit. + try: + return a.reshape(new_shape, order=order) + except AttributeError: + _tmp = narray(a, copy=False).reshape(new_shape, order=order) + return _tmp.view(MaskedArray) + + +def resize(x, new_shape): + """ + Return a new masked array with the specified size and shape. + + This is the masked equivalent of the `numpy.resize` function. The new + array is filled with repeated copies of `x` (in the order that the + data are stored in memory). If `x` is masked, the new array will be + masked, and the new mask will be a repetition of the old one. + + See Also + -------- + numpy.resize : Equivalent function in the top level NumPy module. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = ma.array([[1, 2] ,[3, 4]]) + >>> a[0, 1] = ma.masked + >>> a + masked_array(data = + [[1 --] + [3 4]], + mask = + [[False True] + [False False]], + fill_value = 999999) + >>> np.resize(a, (3, 3)) + array([[1, 2, 3], + [4, 1, 2], + [3, 4, 1]]) + >>> ma.resize(a, (3, 3)) + masked_array(data = + [[1 -- 3] + [4 1 --] + [3 4 1]], + mask = + [[False True False] + [False False True] + [False False False]], + fill_value = 999999) + + A MaskedArray is always returned, regardless of the input type. + + >>> a = np.array([[1, 2] ,[3, 4]]) + >>> ma.resize(a, (3, 3)) + masked_array(data = + [[1 2 3] + [4 1 2] + [3 4 1]], + mask = + False, + fill_value = 999999) + + """ + # We can't use _frommethods here, as N.resize is notoriously whiny. + m = getmask(x) + if m is not nomask: + m = np.resize(m, new_shape) + result = np.resize(x, new_shape).view(get_masked_subclass(x)) + if result.ndim: + result._mask = m + return result + + +def rank(obj): + """ + maskedarray version of the numpy function. + + .. note:: + Deprecated since 1.10.0 + + """ + # 2015-04-12, 1.10.0 + warnings.warn( + "`rank` is deprecated; use the `ndim` function instead. ", + np.VisibleDeprecationWarning, stacklevel=2) + return np.ndim(getdata(obj)) + +rank.__doc__ = np.rank.__doc__ + + +def ndim(obj): + """ + maskedarray version of the numpy function. + + """ + return np.ndim(getdata(obj)) + +ndim.__doc__ = np.ndim.__doc__ + + +def shape(obj): + "maskedarray version of the numpy function." + return np.shape(getdata(obj)) +shape.__doc__ = np.shape.__doc__ + + +def size(obj, axis=None): + "maskedarray version of the numpy function." + return np.size(getdata(obj), axis) +size.__doc__ = np.size.__doc__ + + +############################################################################## +# Extra functions # +############################################################################## + + +def where(condition, x=_NoValue, y=_NoValue): + """ + Return a masked array with elements from `x` or `y`, depending on condition. + + .. note:: + When only `condition` is provided, this function is identical to + `nonzero`. The rest of this documentation covers only the case where + all three arguments are provided. + + Parameters + ---------- + condition : array_like, bool + Where True, yield `x`, otherwise yield `y`. + x, y : array_like, optional + Values from which to choose. `x`, `y` and `condition` need to be + broadcastable to some shape. + + Returns + ------- + out : MaskedArray + An masked array with `masked` elements where the condition is masked, + elements from `x` where `condition` is True, and elements from `y` + elsewhere. + + See Also + -------- + numpy.where : Equivalent function in the top-level NumPy module. + nonzero : The function that is called when x and y are omitted + + Examples + -------- + >>> x = np.ma.array(np.arange(9.).reshape(3, 3), mask=[[0, 1, 0], + ... [1, 0, 1], + ... [0, 1, 0]]) + >>> print(x) + [[0.0 -- 2.0] + [-- 4.0 --] + [6.0 -- 8.0]] + >>> print(np.ma.where(x > 5, x, -3.1416)) + [[-3.1416 -- -3.1416] + [-- -3.1416 --] + [6.0 -- 8.0]] + + """ + + # handle the single-argument case + missing = (x is _NoValue, y is _NoValue).count(True) + if missing == 1: + raise ValueError("Must provide both 'x' and 'y' or neither.") + if missing == 2: + return nonzero(condition) + + # we only care if the condition is true - false or masked pick y + cf = filled(condition, False) + xd = getdata(x) + yd = getdata(y) + + # we need the full arrays here for correct final dimensions + cm = getmaskarray(condition) + xm = getmaskarray(x) + ym = getmaskarray(y) + + # deal with the fact that masked.dtype == float64, but we don't actually + # want to treat it as that. + if x is masked and y is not masked: + xd = np.zeros((), dtype=yd.dtype) + xm = np.ones((), dtype=ym.dtype) + elif y is masked and x is not masked: + yd = np.zeros((), dtype=xd.dtype) + ym = np.ones((), dtype=xm.dtype) + + data = np.where(cf, xd, yd) + mask = np.where(cf, xm, ym) + mask = np.where(cm, np.ones((), dtype=mask.dtype), mask) + + # collapse the mask, for backwards compatibility + mask = _shrink_mask(mask) + + return masked_array(data, mask=mask) + + +def choose(indices, choices, out=None, mode='raise'): + """ + Use an index array to construct a new array from a set of choices. + + Given an array of integers and a set of n choice arrays, this method + will create a new array that merges each of the choice arrays. Where a + value in `a` is i, the new array will have the value that choices[i] + contains in the same place. + + Parameters + ---------- + a : ndarray of ints + This array must contain integers in ``[0, n-1]``, where n is the + number of choices. + choices : sequence of arrays + Choice arrays. The index array and all of the choices should be + broadcastable to the same shape. + out : array, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and `dtype`. + mode : {'raise', 'wrap', 'clip'}, optional + Specifies how out-of-bounds indices will behave. + + * 'raise' : raise an error + * 'wrap' : wrap around + * 'clip' : clip to the range + + Returns + ------- + merged_array : array + + See Also + -------- + choose : equivalent function + + Examples + -------- + >>> choice = np.array([[1,1,1], [2,2,2], [3,3,3]]) + >>> a = np.array([2, 1, 0]) + >>> np.ma.choose(a, choice) + masked_array(data = [3 2 1], + mask = False, + fill_value=999999) + + """ + def fmask(x): + "Returns the filled array, or True if masked." + if x is masked: + return True + return filled(x) + + def nmask(x): + "Returns the mask, True if ``masked``, False if ``nomask``." + if x is masked: + return True + return getmask(x) + # Get the indices. + c = filled(indices, 0) + # Get the masks. + masks = [nmask(x) for x in choices] + data = [fmask(x) for x in choices] + # Construct the mask + outputmask = np.choose(c, masks, mode=mode) + outputmask = make_mask(mask_or(outputmask, getmask(indices)), + copy=0, shrink=True) + # Get the choices. + d = np.choose(c, data, mode=mode, out=out).view(MaskedArray) + if out is not None: + if isinstance(out, MaskedArray): + out.__setmask__(outputmask) + return out + d.__setmask__(outputmask) + return d + + +def round_(a, decimals=0, out=None): + """ + Return a copy of a, rounded to 'decimals' places. + + When 'decimals' is negative, it specifies the number of positions + to the left of the decimal point. The real and imaginary parts of + complex numbers are rounded separately. Nothing is done if the + array is not of float type and 'decimals' is greater than or equal + to 0. + + Parameters + ---------- + decimals : int + Number of decimals to round to. May be negative. + out : array_like + Existing array to use for output. + If not given, returns a default copy of a. + + Notes + ----- + If out is given and does not have a mask attribute, the mask of a + is lost! + + """ + if out is None: + return np.round_(a, decimals, out) + else: + np.round_(getdata(a), decimals, out) + if hasattr(out, '_mask'): + out._mask = getmask(a) + return out +round = round_ + + +# Needed by dot, so move here from extras.py. It will still be exported +# from extras.py for compatibility. +def mask_rowcols(a, axis=None): + """ + Mask rows and/or columns of a 2D array that contain masked values. + + Mask whole rows and/or columns of a 2D array that contain + masked values. The masking behavior is selected using the + `axis` parameter. + + - If `axis` is None, rows *and* columns are masked. + - If `axis` is 0, only rows are masked. + - If `axis` is 1 or -1, only columns are masked. + + Parameters + ---------- + a : array_like, MaskedArray + The array to mask. If not a MaskedArray instance (or if no array + elements are masked). The result is a MaskedArray with `mask` set + to `nomask` (False). Must be a 2D array. + axis : int, optional + Axis along which to perform the operation. If None, applies to a + flattened version of the array. + + Returns + ------- + a : MaskedArray + A modified version of the input array, masked depending on the value + of the `axis` parameter. + + Raises + ------ + NotImplementedError + If input array `a` is not 2D. + + See Also + -------- + mask_rows : Mask rows of a 2D array that contain masked values. + mask_cols : Mask cols of a 2D array that contain masked values. + masked_where : Mask where a condition is met. + + Notes + ----- + The input array's mask is modified by this function. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.zeros((3, 3), dtype=int) + >>> a[1, 1] = 1 + >>> a + array([[0, 0, 0], + [0, 1, 0], + [0, 0, 0]]) + >>> a = ma.masked_equal(a, 1) + >>> a + masked_array(data = + [[0 0 0] + [0 -- 0] + [0 0 0]], + mask = + [[False False False] + [False True False] + [False False False]], + fill_value=999999) + >>> ma.mask_rowcols(a) + masked_array(data = + [[0 -- 0] + [-- -- --] + [0 -- 0]], + mask = + [[False True False] + [ True True True] + [False True False]], + fill_value=999999) + + """ + a = array(a, subok=False) + if a.ndim != 2: + raise NotImplementedError("mask_rowcols works for 2D arrays only.") + m = getmask(a) + # Nothing is masked: return a + if m is nomask or not m.any(): + return a + maskedval = m.nonzero() + a._mask = a._mask.copy() + if not axis: + a[np.unique(maskedval[0])] = masked + if axis in [None, 1, -1]: + a[:, np.unique(maskedval[1])] = masked + return a + + +# Include masked dot here to avoid import problems in getting it from +# extras.py. Note that it is not included in __all__, but rather exported +# from extras in order to avoid backward compatibility problems. +def dot(a, b, strict=False, out=None): + """ + Return the dot product of two arrays. + + This function is the equivalent of `numpy.dot` that takes masked values + into account. Note that `strict` and `out` are in different position + than in the method version. In order to maintain compatibility with the + corresponding method, it is recommended that the optional arguments be + treated as keyword only. At some point that may be mandatory. + + .. note:: + Works only with 2-D arrays at the moment. + + + Parameters + ---------- + a, b : masked_array_like + Inputs arrays. + strict : bool, optional + Whether masked data are propagated (True) or set to 0 (False) for + the computation. Default is False. Propagating the mask means that + if a masked value appears in a row or column, the whole row or + column is considered masked. + out : masked_array, optional + Output argument. This must have the exact kind that would be returned + if it was not used. In particular, it must have the right type, must be + C-contiguous, and its dtype must be the dtype that would be returned + for `dot(a,b)`. This is a performance feature. Therefore, if these + conditions are not met, an exception is raised, instead of attempting + to be flexible. + + .. versionadded:: 1.10.2 + + See Also + -------- + numpy.dot : Equivalent function for ndarrays. + + Examples + -------- + >>> a = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[1, 0, 0], [0, 0, 0]]) + >>> b = ma.array([[1, 2], [3, 4], [5, 6]], mask=[[1, 0], [0, 0], [0, 0]]) + >>> np.ma.dot(a, b) + masked_array(data = + [[21 26] + [45 64]], + mask = + [[False False] + [False False]], + fill_value = 999999) + >>> np.ma.dot(a, b, strict=True) + masked_array(data = + [[-- --] + [-- 64]], + mask = + [[ True True] + [ True False]], + fill_value = 999999) + + """ + # !!!: Works only with 2D arrays. There should be a way to get it to run + # with higher dimension + if strict and (a.ndim == 2) and (b.ndim == 2): + a = mask_rowcols(a, 0) + b = mask_rowcols(b, 1) + am = ~getmaskarray(a) + bm = ~getmaskarray(b) + + if out is None: + d = np.dot(filled(a, 0), filled(b, 0)) + m = ~np.dot(am, bm) + if d.ndim == 0: + d = np.asarray(d) + r = d.view(get_masked_subclass(a, b)) + r.__setmask__(m) + return r + else: + d = np.dot(filled(a, 0), filled(b, 0), out._data) + if out.mask.shape != d.shape: + out._mask = np.empty(d.shape, MaskType) + np.dot(am, bm, out._mask) + np.logical_not(out._mask, out._mask) + return out + + +def inner(a, b): + """ + Returns the inner product of a and b for arrays of floating point types. + + Like the generic NumPy equivalent the product sum is over the last dimension + of a and b. The first argument is not conjugated. + + """ + fa = filled(a, 0) + fb = filled(b, 0) + if fa.ndim == 0: + fa.shape = (1,) + if fb.ndim == 0: + fb.shape = (1,) + return np.inner(fa, fb).view(MaskedArray) +inner.__doc__ = doc_note(np.inner.__doc__, + "Masked values are replaced by 0.") +innerproduct = inner + + +def outer(a, b): + "maskedarray version of the numpy function." + fa = filled(a, 0).ravel() + fb = filled(b, 0).ravel() + d = np.outer(fa, fb) + ma = getmask(a) + mb = getmask(b) + if ma is nomask and mb is nomask: + return masked_array(d) + ma = getmaskarray(a) + mb = getmaskarray(b) + m = make_mask(1 - np.outer(1 - ma, 1 - mb), copy=0) + return masked_array(d, mask=m) +outer.__doc__ = doc_note(np.outer.__doc__, + "Masked values are replaced by 0.") +outerproduct = outer + + +def _convolve_or_correlate(f, a, v, mode, propagate_mask): + """ + Helper function for ma.correlate and ma.convolve + """ + if propagate_mask: + # results which are contributed to by either item in any pair being invalid + mask = ( + f(getmaskarray(a), np.ones(np.shape(v), dtype=bool), mode=mode) + | f(np.ones(np.shape(a), dtype=bool), getmaskarray(v), mode=mode) + ) + data = f(getdata(a), getdata(v), mode=mode) + else: + # results which are not contributed to by any pair of valid elements + mask = ~f(~getmaskarray(a), ~getmaskarray(v)) + data = f(filled(a, 0), filled(v, 0), mode=mode) + + return masked_array(data, mask=mask) + + +def correlate(a, v, mode='valid', propagate_mask=True): + """ + Cross-correlation of two 1-dimensional sequences. + + Parameters + ---------- + a, v : array_like + Input sequences. + mode : {'valid', 'same', 'full'}, optional + Refer to the `np.convolve` docstring. Note that the default + is 'valid', unlike `convolve`, which uses 'full'. + propagate_mask : bool + If True, then a result element is masked if any masked element contributes towards it. + If False, then a result element is only masked if no non-masked element + contribute towards it + + Returns + ------- + out : MaskedArray + Discrete cross-correlation of `a` and `v`. + + See Also + -------- + numpy.correlate : Equivalent function in the top-level NumPy module. + """ + return _convolve_or_correlate(np.correlate, a, v, mode, propagate_mask) + + +def convolve(a, v, mode='full', propagate_mask=True): + """ + Returns the discrete, linear convolution of two one-dimensional sequences. + + Parameters + ---------- + a, v : array_like + Input sequences. + mode : {'valid', 'same', 'full'}, optional + Refer to the `np.convolve` docstring. + propagate_mask : bool + If True, then if any masked element is included in the sum for a result + element, then the result is masked. + If False, then the result element is only masked if no non-masked cells + contribute towards it + + Returns + ------- + out : MaskedArray + Discrete, linear convolution of `a` and `v`. + + See Also + -------- + numpy.convolve : Equivalent function in the top-level NumPy module. + """ + return _convolve_or_correlate(np.convolve, a, v, mode, propagate_mask) + + +def allequal(a, b, fill_value=True): + """ + Return True if all entries of a and b are equal, using + fill_value as a truth value where either or both are masked. + + Parameters + ---------- + a, b : array_like + Input arrays to compare. + fill_value : bool, optional + Whether masked values in a or b are considered equal (True) or not + (False). + + Returns + ------- + y : bool + Returns True if the two arrays are equal within the given + tolerance, False otherwise. If either array contains NaN, + then False is returned. + + See Also + -------- + all, any + numpy.ma.allclose + + Examples + -------- + >>> a = ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1]) + >>> a + masked_array(data = [10000000000.0 1e-07 --], + mask = [False False True], + fill_value=1e+20) + + >>> b = array([1e10, 1e-7, -42.0]) + >>> b + array([ 1.00000000e+10, 1.00000000e-07, -4.20000000e+01]) + >>> ma.allequal(a, b, fill_value=False) + False + >>> ma.allequal(a, b) + True + + """ + m = mask_or(getmask(a), getmask(b)) + if m is nomask: + x = getdata(a) + y = getdata(b) + d = umath.equal(x, y) + return d.all() + elif fill_value: + x = getdata(a) + y = getdata(b) + d = umath.equal(x, y) + dm = array(d, mask=m, copy=False) + return dm.filled(True).all(None) + else: + return False + + +def allclose(a, b, masked_equal=True, rtol=1e-5, atol=1e-8): + """ + Returns True if two arrays are element-wise equal within a tolerance. + + This function is equivalent to `allclose` except that masked values + are treated as equal (default) or unequal, depending on the `masked_equal` + argument. + + Parameters + ---------- + a, b : array_like + Input arrays to compare. + masked_equal : bool, optional + Whether masked values in `a` and `b` are considered equal (True) or not + (False). They are considered equal by default. + rtol : float, optional + Relative tolerance. The relative difference is equal to ``rtol * b``. + Default is 1e-5. + atol : float, optional + Absolute tolerance. The absolute difference is equal to `atol`. + Default is 1e-8. + + Returns + ------- + y : bool + Returns True if the two arrays are equal within the given + tolerance, False otherwise. If either array contains NaN, then + False is returned. + + See Also + -------- + all, any + numpy.allclose : the non-masked `allclose`. + + Notes + ----- + If the following equation is element-wise True, then `allclose` returns + True:: + + absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) + + Return True if all elements of `a` and `b` are equal subject to + given tolerances. + + Examples + -------- + >>> a = ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1]) + >>> a + masked_array(data = [10000000000.0 1e-07 --], + mask = [False False True], + fill_value = 1e+20) + >>> b = ma.array([1e10, 1e-8, -42.0], mask=[0, 0, 1]) + >>> ma.allclose(a, b) + False + + >>> a = ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1]) + >>> b = ma.array([1.00001e10, 1e-9, -42.0], mask=[0, 0, 1]) + >>> ma.allclose(a, b) + True + >>> ma.allclose(a, b, masked_equal=False) + False + + Masked values are not compared directly. + + >>> a = ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1]) + >>> b = ma.array([1.00001e10, 1e-9, 42.0], mask=[0, 0, 1]) + >>> ma.allclose(a, b) + True + >>> ma.allclose(a, b, masked_equal=False) + False + + """ + x = masked_array(a, copy=False) + y = masked_array(b, copy=False) + + # make sure y is an inexact type to avoid abs(MIN_INT); will cause + # casting of x later. + dtype = np.result_type(y, 1.) + if y.dtype != dtype: + y = masked_array(y, dtype=dtype, copy=False) + + m = mask_or(getmask(x), getmask(y)) + xinf = np.isinf(masked_array(x, copy=False, mask=m)).filled(False) + # If we have some infs, they should fall at the same place. + if not np.all(xinf == filled(np.isinf(y), False)): + return False + # No infs at all + if not np.any(xinf): + d = filled(less_equal(absolute(x - y), atol + rtol * absolute(y)), + masked_equal) + return np.all(d) + + if not np.all(filled(x[xinf] == y[xinf], masked_equal)): + return False + x = x[~xinf] + y = y[~xinf] + + d = filled(less_equal(absolute(x - y), atol + rtol * absolute(y)), + masked_equal) + + return np.all(d) + + +def asarray(a, dtype=None, order=None): + """ + Convert the input to a masked array of the given data-type. + + No copy is performed if the input is already an `ndarray`. If `a` is + a subclass of `MaskedArray`, a base class `MaskedArray` is returned. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to a masked array. This + includes lists, lists of tuples, tuples, tuples of tuples, tuples + of lists, ndarrays and masked arrays. + dtype : dtype, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F'}, optional + Whether to use row-major ('C') or column-major ('FORTRAN') memory + representation. Default is 'C'. + + Returns + ------- + out : MaskedArray + Masked array interpretation of `a`. + + See Also + -------- + asanyarray : Similar to `asarray`, but conserves subclasses. + + Examples + -------- + >>> x = np.arange(10.).reshape(2, 5) + >>> x + array([[ 0., 1., 2., 3., 4.], + [ 5., 6., 7., 8., 9.]]) + >>> np.ma.asarray(x) + masked_array(data = + [[ 0. 1. 2. 3. 4.] + [ 5. 6. 7. 8. 9.]], + mask = + False, + fill_value = 1e+20) + >>> type(np.ma.asarray(x)) + + + """ + order = order or 'C' + return masked_array(a, dtype=dtype, copy=False, keep_mask=True, + subok=False, order=order) + + +def asanyarray(a, dtype=None): + """ + Convert the input to a masked array, conserving subclasses. + + If `a` is a subclass of `MaskedArray`, its class is conserved. + No copy is performed if the input is already an `ndarray`. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. + dtype : dtype, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F'}, optional + Whether to use row-major ('C') or column-major ('FORTRAN') memory + representation. Default is 'C'. + + Returns + ------- + out : MaskedArray + MaskedArray interpretation of `a`. + + See Also + -------- + asarray : Similar to `asanyarray`, but does not conserve subclass. + + Examples + -------- + >>> x = np.arange(10.).reshape(2, 5) + >>> x + array([[ 0., 1., 2., 3., 4.], + [ 5., 6., 7., 8., 9.]]) + >>> np.ma.asanyarray(x) + masked_array(data = + [[ 0. 1. 2. 3. 4.] + [ 5. 6. 7. 8. 9.]], + mask = + False, + fill_value = 1e+20) + >>> type(np.ma.asanyarray(x)) + + + """ + # workaround for #8666, to preserve identity. Ideally the bottom line + # would handle this for us. + if isinstance(a, MaskedArray) and (dtype is None or dtype == a.dtype): + return a + return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True) + + +############################################################################## +# Pickling # +############################################################################## + +def _pickle_warn(method): + # NumPy 1.15.0, 2017-12-10 + warnings.warn( + "np.ma.{method} is deprecated, use pickle.{method} instead" + .format(method=method), + DeprecationWarning, + stacklevel=3) + + +def dump(a, F): + """ + Pickle a masked array to a file. + + This is a wrapper around ``cPickle.dump``. + + Parameters + ---------- + a : MaskedArray + The array to be pickled. + F : str or file-like object + The file to pickle `a` to. If a string, the full path to the file. + + """ + _pickle_warn('dump') + if not hasattr(F, 'readline'): + with open(F, 'w') as F: + pickle.dump(a, F) + else: + pickle.dump(a, F) + + +def dumps(a): + """ + Return a string corresponding to the pickling of a masked array. + + This is a wrapper around ``cPickle.dumps``. + + Parameters + ---------- + a : MaskedArray + The array for which the string representation of the pickle is + returned. + + """ + _pickle_warn('dumps') + return pickle.dumps(a) + + +def load(F): + """ + Wrapper around ``cPickle.load`` which accepts either a file-like object + or a filename. + + Parameters + ---------- + F : str or file + The file or file name to load. + + See Also + -------- + dump : Pickle an array + + Notes + ----- + This is different from `numpy.load`, which does not use cPickle but loads + the NumPy binary .npy format. + + """ + _pickle_warn('load') + if not hasattr(F, 'readline'): + with open(F, 'r') as F: + return pickle.load(F) + else: + return pickle.load(F) + + +def loads(strg): + """ + Load a pickle from the current string. + + The result of ``cPickle.loads(strg)`` is returned. + + Parameters + ---------- + strg : str + The string to load. + + See Also + -------- + dumps : Return a string corresponding to the pickling of a masked array. + + """ + _pickle_warn('loads') + return pickle.loads(strg) + + +def fromfile(file, dtype=float, count=-1, sep=''): + raise NotImplementedError( + "fromfile() not yet implemented for a MaskedArray.") + + +def fromflex(fxarray): + """ + Build a masked array from a suitable flexible-type array. + + The input array has to have a data-type with ``_data`` and ``_mask`` + fields. This type of array is output by `MaskedArray.toflex`. + + Parameters + ---------- + fxarray : ndarray + The structured input array, containing ``_data`` and ``_mask`` + fields. If present, other fields are discarded. + + Returns + ------- + result : MaskedArray + The constructed masked array. + + See Also + -------- + MaskedArray.toflex : Build a flexible-type array from a masked array. + + Examples + -------- + >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[0] + [1, 0] * 4) + >>> rec = x.toflex() + >>> rec + array([[(0, False), (1, True), (2, False)], + [(3, True), (4, False), (5, True)], + [(6, False), (7, True), (8, False)]], + dtype=[('_data', '>> x2 = np.ma.fromflex(rec) + >>> x2 + masked_array(data = + [[0 -- 2] + [-- 4 --] + [6 -- 8]], + mask = + [[False True False] + [ True False True] + [False True False]], + fill_value = 999999) + + Extra fields can be present in the structured array but are discarded: + + >>> dt = [('_data', '>> rec2 = np.zeros((2, 2), dtype=dt) + >>> rec2 + array([[(0, False, 0.0), (0, False, 0.0)], + [(0, False, 0.0), (0, False, 0.0)]], + dtype=[('_data', '>> y = np.ma.fromflex(rec2) + >>> y + masked_array(data = + [[0 0] + [0 0]], + mask = + [[False False] + [False False]], + fill_value = 999999) + + """ + return masked_array(fxarray['_data'], mask=fxarray['_mask']) + + +class _convert2ma(object): + + """ + Convert functions from numpy to numpy.ma. + + Parameters + ---------- + _methodname : string + Name of the method to transform. + + """ + __doc__ = None + + def __init__(self, funcname, params=None): + self._func = getattr(np, funcname) + self.__doc__ = self.getdoc() + self._extras = params or {} + + def getdoc(self): + "Return the doc of the function (from the doc of the method)." + doc = getattr(self._func, '__doc__', None) + sig = get_object_signature(self._func) + if doc: + # Add the signature of the function at the beginning of the doc + if sig: + sig = "%s%s\n" % (self._func.__name__, sig) + doc = sig + doc + return doc + + def __call__(self, *args, **params): + # Find the common parameters to the call and the definition + _extras = self._extras + common_params = set(params).intersection(_extras) + # Drop the common parameters from the call + for p in common_params: + _extras[p] = params.pop(p) + # Get the result + result = self._func.__call__(*args, **params).view(MaskedArray) + if "fill_value" in common_params: + result.fill_value = _extras.get("fill_value", None) + if "hardmask" in common_params: + result._hardmask = bool(_extras.get("hard_mask", False)) + return result + +arange = _convert2ma('arange', params=dict(fill_value=None, hardmask=False)) +clip = np.clip +diff = np.diff +empty = _convert2ma('empty', params=dict(fill_value=None, hardmask=False)) +empty_like = _convert2ma('empty_like') +frombuffer = _convert2ma('frombuffer') +fromfunction = _convert2ma('fromfunction') +identity = _convert2ma( + 'identity', params=dict(fill_value=None, hardmask=False)) +indices = np.indices +ones = _convert2ma('ones', params=dict(fill_value=None, hardmask=False)) +ones_like = np.ones_like +squeeze = np.squeeze +zeros = _convert2ma('zeros', params=dict(fill_value=None, hardmask=False)) +zeros_like = np.zeros_like + + +def append(a, b, axis=None): + """Append values to the end of an array. + + .. versionadded:: 1.9.0 + + Parameters + ---------- + a : array_like + Values are appended to a copy of this array. + b : array_like + These values are appended to a copy of `a`. It must be of the + correct shape (the same shape as `a`, excluding `axis`). If `axis` + is not specified, `b` can be any shape and will be flattened + before use. + axis : int, optional + The axis along which `v` are appended. If `axis` is not given, + both `a` and `b` are flattened before use. + + Returns + ------- + append : MaskedArray + A copy of `a` with `b` appended to `axis`. Note that `append` + does not occur in-place: a new array is allocated and filled. If + `axis` is None, the result is a flattened array. + + See Also + -------- + numpy.append : Equivalent function in the top-level NumPy module. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = ma.masked_values([1, 2, 3], 2) + >>> b = ma.masked_values([[4, 5, 6], [7, 8, 9]], 7) + >>> print(ma.append(a, b)) + [1 -- 3 4 5 6 -- 8 9] + """ + return concatenate([a, b], axis) diff --git a/project/venv/lib/python2.7/site-packages/numpy/ma/core.pyc b/project/venv/lib/python2.7/site-packages/numpy/ma/core.pyc new file mode 100644 index 0000000..0009f80 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/ma/core.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/ma/extras.py b/project/venv/lib/python2.7/site-packages/numpy/ma/extras.py new file mode 100644 index 0000000..3be4d36 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/ma/extras.py @@ -0,0 +1,1901 @@ +""" +Masked arrays add-ons. + +A collection of utilities for `numpy.ma`. + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu +:version: $Id: extras.py 3473 2007-10-29 15:18:13Z jarrod.millman $ + +""" +from __future__ import division, absolute_import, print_function + +__all__ = [ + 'apply_along_axis', 'apply_over_axes', 'atleast_1d', 'atleast_2d', + 'atleast_3d', 'average', 'clump_masked', 'clump_unmasked', + 'column_stack', 'compress_cols', 'compress_nd', 'compress_rowcols', + 'compress_rows', 'count_masked', 'corrcoef', 'cov', 'diagflat', 'dot', + 'dstack', 'ediff1d', 'flatnotmasked_contiguous', 'flatnotmasked_edges', + 'hsplit', 'hstack', 'isin', 'in1d', 'intersect1d', 'mask_cols', 'mask_rowcols', + 'mask_rows', 'masked_all', 'masked_all_like', 'median', 'mr_', + 'notmasked_contiguous', 'notmasked_edges', 'polyfit', 'row_stack', + 'setdiff1d', 'setxor1d', 'stack', 'unique', 'union1d', 'vander', 'vstack', + ] + +import itertools +import warnings + +from . import core as ma +from .core import ( + MaskedArray, MAError, add, array, asarray, concatenate, filled, count, + getmask, getmaskarray, make_mask_descr, masked, masked_array, mask_or, + nomask, ones, sort, zeros, getdata, get_masked_subclass, dot, + mask_rowcols + ) + +import numpy as np +from numpy import ndarray, array as nxarray +import numpy.core.umath as umath +from numpy.core.multiarray import normalize_axis_index +from numpy.core.numeric import normalize_axis_tuple +from numpy.lib.function_base import _ureduce +from numpy.lib.index_tricks import AxisConcatenator + + +def issequence(seq): + """ + Is seq a sequence (ndarray, list or tuple)? + + """ + return isinstance(seq, (ndarray, tuple, list)) + + +def count_masked(arr, axis=None): + """ + Count the number of masked elements along the given axis. + + Parameters + ---------- + arr : array_like + An array with (possibly) masked elements. + axis : int, optional + Axis along which to count. If None (default), a flattened + version of the array is used. + + Returns + ------- + count : int, ndarray + The total number of masked elements (axis=None) or the number + of masked elements along each slice of the given axis. + + See Also + -------- + MaskedArray.count : Count non-masked elements. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(9).reshape((3,3)) + >>> a = ma.array(a) + >>> a[1, 0] = ma.masked + >>> a[1, 2] = ma.masked + >>> a[2, 1] = ma.masked + >>> a + masked_array(data = + [[0 1 2] + [-- 4 --] + [6 -- 8]], + mask = + [[False False False] + [ True False True] + [False True False]], + fill_value=999999) + >>> ma.count_masked(a) + 3 + + When the `axis` keyword is used an array is returned. + + >>> ma.count_masked(a, axis=0) + array([1, 1, 1]) + >>> ma.count_masked(a, axis=1) + array([0, 2, 1]) + + """ + m = getmaskarray(arr) + return m.sum(axis) + + +def masked_all(shape, dtype=float): + """ + Empty masked array with all elements masked. + + Return an empty masked array of the given shape and dtype, where all the + data are masked. + + Parameters + ---------- + shape : tuple + Shape of the required MaskedArray. + dtype : dtype, optional + Data type of the output. + + Returns + ------- + a : MaskedArray + A masked array with all data masked. + + See Also + -------- + masked_all_like : Empty masked array modelled on an existing array. + + Examples + -------- + >>> import numpy.ma as ma + >>> ma.masked_all((3, 3)) + masked_array(data = + [[-- -- --] + [-- -- --] + [-- -- --]], + mask = + [[ True True True] + [ True True True] + [ True True True]], + fill_value=1e+20) + + The `dtype` parameter defines the underlying data type. + + >>> a = ma.masked_all((3, 3)) + >>> a.dtype + dtype('float64') + >>> a = ma.masked_all((3, 3), dtype=np.int32) + >>> a.dtype + dtype('int32') + + """ + a = masked_array(np.empty(shape, dtype), + mask=np.ones(shape, make_mask_descr(dtype))) + return a + + +def masked_all_like(arr): + """ + Empty masked array with the properties of an existing array. + + Return an empty masked array of the same shape and dtype as + the array `arr`, where all the data are masked. + + Parameters + ---------- + arr : ndarray + An array describing the shape and dtype of the required MaskedArray. + + Returns + ------- + a : MaskedArray + A masked array with all data masked. + + Raises + ------ + AttributeError + If `arr` doesn't have a shape attribute (i.e. not an ndarray) + + See Also + -------- + masked_all : Empty masked array with all elements masked. + + Examples + -------- + >>> import numpy.ma as ma + >>> arr = np.zeros((2, 3), dtype=np.float32) + >>> arr + array([[ 0., 0., 0.], + [ 0., 0., 0.]], dtype=float32) + >>> ma.masked_all_like(arr) + masked_array(data = + [[-- -- --] + [-- -- --]], + mask = + [[ True True True] + [ True True True]], + fill_value=1e+20) + + The dtype of the masked array matches the dtype of `arr`. + + >>> arr.dtype + dtype('float32') + >>> ma.masked_all_like(arr).dtype + dtype('float32') + + """ + a = np.empty_like(arr).view(MaskedArray) + a._mask = np.ones(a.shape, dtype=make_mask_descr(a.dtype)) + return a + + +#####-------------------------------------------------------------------------- +#---- --- Standard functions --- +#####-------------------------------------------------------------------------- +class _fromnxfunction(object): + """ + Defines a wrapper to adapt NumPy functions to masked arrays. + + + An instance of `_fromnxfunction` can be called with the same parameters + as the wrapped NumPy function. The docstring of `newfunc` is adapted from + the wrapped function as well, see `getdoc`. + + This class should not be used directly. Instead, one of its extensions that + provides support for a specific type of input should be used. + + Parameters + ---------- + funcname : str + The name of the function to be adapted. The function should be + in the NumPy namespace (i.e. ``np.funcname``). + + """ + + def __init__(self, funcname): + self.__name__ = funcname + self.__doc__ = self.getdoc() + + def getdoc(self): + """ + Retrieve the docstring and signature from the function. + + The ``__doc__`` attribute of the function is used as the docstring for + the new masked array version of the function. A note on application + of the function to the mask is appended. + + .. warning:: + If the function docstring already contained a Notes section, the + new docstring will have two Notes sections instead of appending a note + to the existing section. + + Parameters + ---------- + None + + """ + npfunc = getattr(np, self.__name__, None) + doc = getattr(npfunc, '__doc__', None) + if doc: + sig = self.__name__ + ma.get_object_signature(npfunc) + locdoc = "Notes\n-----\nThe function is applied to both the _data"\ + " and the _mask, if any." + return '\n'.join((sig, doc, locdoc)) + return + + def __call__(self, *args, **params): + pass + + +class _fromnxfunction_single(_fromnxfunction): + """ + A version of `_fromnxfunction` that is called with a single array + argument followed by auxiliary args that are passed verbatim for + both the data and mask calls. + """ + def __call__(self, x, *args, **params): + func = getattr(np, self.__name__) + if isinstance(x, ndarray): + _d = func(x.__array__(), *args, **params) + _m = func(getmaskarray(x), *args, **params) + return masked_array(_d, mask=_m) + else: + _d = func(np.asarray(x), *args, **params) + _m = func(getmaskarray(x), *args, **params) + return masked_array(_d, mask=_m) + + +class _fromnxfunction_seq(_fromnxfunction): + """ + A version of `_fromnxfunction` that is called with a single sequence + of arrays followed by auxiliary args that are passed verbatim for + both the data and mask calls. + """ + def __call__(self, x, *args, **params): + func = getattr(np, self.__name__) + _d = func(tuple([np.asarray(a) for a in x]), *args, **params) + _m = func(tuple([getmaskarray(a) for a in x]), *args, **params) + return masked_array(_d, mask=_m) + + +class _fromnxfunction_args(_fromnxfunction): + """ + A version of `_fromnxfunction` that is called with multiple array + arguments. The first non-array-like input marks the beginning of the + arguments that are passed verbatim for both the data and mask calls. + Array arguments are processed independently and the results are + returned in a list. If only one array is found, the return value is + just the processed array instead of a list. + """ + def __call__(self, *args, **params): + func = getattr(np, self.__name__) + arrays = [] + args = list(args) + while len(args) > 0 and issequence(args[0]): + arrays.append(args.pop(0)) + res = [] + for x in arrays: + _d = func(np.asarray(x), *args, **params) + _m = func(getmaskarray(x), *args, **params) + res.append(masked_array(_d, mask=_m)) + if len(arrays) == 1: + return res[0] + return res + + +class _fromnxfunction_allargs(_fromnxfunction): + """ + A version of `_fromnxfunction` that is called with multiple array + arguments. Similar to `_fromnxfunction_args` except that all args + are converted to arrays even if they are not so already. This makes + it possible to process scalars as 1-D arrays. Only keyword arguments + are passed through verbatim for the data and mask calls. Arrays + arguments are processed independently and the results are returned + in a list. If only one arg is present, the return value is just the + processed array instead of a list. + """ + def __call__(self, *args, **params): + func = getattr(np, self.__name__) + res = [] + for x in args: + _d = func(np.asarray(x), **params) + _m = func(getmaskarray(x), **params) + res.append(masked_array(_d, mask=_m)) + if len(args) == 1: + return res[0] + return res + + +atleast_1d = _fromnxfunction_allargs('atleast_1d') +atleast_2d = _fromnxfunction_allargs('atleast_2d') +atleast_3d = _fromnxfunction_allargs('atleast_3d') + +vstack = row_stack = _fromnxfunction_seq('vstack') +hstack = _fromnxfunction_seq('hstack') +column_stack = _fromnxfunction_seq('column_stack') +dstack = _fromnxfunction_seq('dstack') +stack = _fromnxfunction_seq('stack') + +hsplit = _fromnxfunction_single('hsplit') + +diagflat = _fromnxfunction_single('diagflat') + + +#####-------------------------------------------------------------------------- +#---- +#####-------------------------------------------------------------------------- +def flatten_inplace(seq): + """Flatten a sequence in place.""" + k = 0 + while (k != len(seq)): + while hasattr(seq[k], '__iter__'): + seq[k:(k + 1)] = seq[k] + k += 1 + return seq + + +def apply_along_axis(func1d, axis, arr, *args, **kwargs): + """ + (This docstring should be overwritten) + """ + arr = array(arr, copy=False, subok=True) + nd = arr.ndim + axis = normalize_axis_index(axis, nd) + ind = [0] * (nd - 1) + i = np.zeros(nd, 'O') + indlist = list(range(nd)) + indlist.remove(axis) + i[axis] = slice(None, None) + outshape = np.asarray(arr.shape).take(indlist) + i.put(indlist, ind) + j = i.copy() + res = func1d(arr[tuple(i.tolist())], *args, **kwargs) + # if res is a number, then we have a smaller output array + asscalar = np.isscalar(res) + if not asscalar: + try: + len(res) + except TypeError: + asscalar = True + # Note: we shouldn't set the dtype of the output from the first result + # so we force the type to object, and build a list of dtypes. We'll + # just take the largest, to avoid some downcasting + dtypes = [] + if asscalar: + dtypes.append(np.asarray(res).dtype) + outarr = zeros(outshape, object) + outarr[tuple(ind)] = res + Ntot = np.product(outshape) + k = 1 + while k < Ntot: + # increment the index + ind[-1] += 1 + n = -1 + while (ind[n] >= outshape[n]) and (n > (1 - nd)): + ind[n - 1] += 1 + ind[n] = 0 + n -= 1 + i.put(indlist, ind) + res = func1d(arr[tuple(i.tolist())], *args, **kwargs) + outarr[tuple(ind)] = res + dtypes.append(asarray(res).dtype) + k += 1 + else: + res = array(res, copy=False, subok=True) + j = i.copy() + j[axis] = ([slice(None, None)] * res.ndim) + j.put(indlist, ind) + Ntot = np.product(outshape) + holdshape = outshape + outshape = list(arr.shape) + outshape[axis] = res.shape + dtypes.append(asarray(res).dtype) + outshape = flatten_inplace(outshape) + outarr = zeros(outshape, object) + outarr[tuple(flatten_inplace(j.tolist()))] = res + k = 1 + while k < Ntot: + # increment the index + ind[-1] += 1 + n = -1 + while (ind[n] >= holdshape[n]) and (n > (1 - nd)): + ind[n - 1] += 1 + ind[n] = 0 + n -= 1 + i.put(indlist, ind) + j.put(indlist, ind) + res = func1d(arr[tuple(i.tolist())], *args, **kwargs) + outarr[tuple(flatten_inplace(j.tolist()))] = res + dtypes.append(asarray(res).dtype) + k += 1 + max_dtypes = np.dtype(np.asarray(dtypes).max()) + if not hasattr(arr, '_mask'): + result = np.asarray(outarr, dtype=max_dtypes) + else: + result = asarray(outarr, dtype=max_dtypes) + result.fill_value = ma.default_fill_value(result) + return result +apply_along_axis.__doc__ = np.apply_along_axis.__doc__ + + +def apply_over_axes(func, a, axes): + """ + (This docstring will be overwritten) + """ + val = asarray(a) + N = a.ndim + if array(axes).ndim == 0: + axes = (axes,) + for axis in axes: + if axis < 0: + axis = N + axis + args = (val, axis) + res = func(*args) + if res.ndim == val.ndim: + val = res + else: + res = ma.expand_dims(res, axis) + if res.ndim == val.ndim: + val = res + else: + raise ValueError("function is not returning " + "an array of the correct shape") + return val + +if apply_over_axes.__doc__ is not None: + apply_over_axes.__doc__ = np.apply_over_axes.__doc__[ + :np.apply_over_axes.__doc__.find('Notes')].rstrip() + \ + """ + + Examples + -------- + >>> a = ma.arange(24).reshape(2,3,4) + >>> a[:,0,1] = ma.masked + >>> a[:,1,:] = ma.masked + >>> print(a) + [[[0 -- 2 3] + [-- -- -- --] + [8 9 10 11]] + + [[12 -- 14 15] + [-- -- -- --] + [20 21 22 23]]] + >>> print(ma.apply_over_axes(ma.sum, a, [0,2])) + [[[46] + [--] + [124]]] + + Tuple axis arguments to ufuncs are equivalent: + + >>> print(ma.sum(a, axis=(0,2)).reshape((1,-1,1))) + [[[46] + [--] + [124]]] + """ + + +def average(a, axis=None, weights=None, returned=False): + """ + Return the weighted average of array over the given axis. + + Parameters + ---------- + a : array_like + Data to be averaged. + Masked entries are not taken into account in the computation. + axis : int, optional + Axis along which to average `a`. If `None`, averaging is done over + the flattened array. + weights : array_like, optional + The importance that each element has in the computation of the average. + The weights array can either be 1-D (in which case its length must be + the size of `a` along the given axis) or of the same shape as `a`. + If ``weights=None``, then all data in `a` are assumed to have a + weight equal to one. If `weights` is complex, the imaginary parts + are ignored. + returned : bool, optional + Flag indicating whether a tuple ``(result, sum of weights)`` + should be returned as output (True), or just the result (False). + Default is False. + + Returns + ------- + average, [sum_of_weights] : (tuple of) scalar or MaskedArray + The average along the specified axis. When returned is `True`, + return a tuple with the average as the first element and the sum + of the weights as the second element. The return type is `np.float64` + if `a` is of integer type and floats smaller than `float64`, or the + input data-type, otherwise. If returned, `sum_of_weights` is always + `float64`. + + Examples + -------- + >>> a = np.ma.array([1., 2., 3., 4.], mask=[False, False, True, True]) + >>> np.ma.average(a, weights=[3, 1, 0, 0]) + 1.25 + + >>> x = np.ma.arange(6.).reshape(3, 2) + >>> print(x) + [[ 0. 1.] + [ 2. 3.] + [ 4. 5.]] + >>> avg, sumweights = np.ma.average(x, axis=0, weights=[1, 2, 3], + ... returned=True) + >>> print(avg) + [2.66666666667 3.66666666667] + + """ + a = asarray(a) + m = getmask(a) + + # inspired by 'average' in numpy/lib/function_base.py + + if weights is None: + avg = a.mean(axis) + scl = avg.dtype.type(a.count(axis)) + else: + wgt = np.asanyarray(weights) + + if issubclass(a.dtype.type, (np.integer, np.bool_)): + result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8') + else: + result_dtype = np.result_type(a.dtype, wgt.dtype) + + # Sanity checks + if a.shape != wgt.shape: + if axis is None: + raise TypeError( + "Axis must be specified when shapes of a and weights " + "differ.") + if wgt.ndim != 1: + raise TypeError( + "1D weights expected when shapes of a and weights differ.") + if wgt.shape[0] != a.shape[axis]: + raise ValueError( + "Length of weights not compatible with specified axis.") + + # setup wgt to broadcast along axis + wgt = np.broadcast_to(wgt, (a.ndim-1)*(1,) + wgt.shape) + wgt = wgt.swapaxes(-1, axis) + + if m is not nomask: + wgt = wgt*(~a.mask) + + scl = wgt.sum(axis=axis, dtype=result_dtype) + avg = np.multiply(a, wgt, dtype=result_dtype).sum(axis)/scl + + if returned: + if scl.shape != avg.shape: + scl = np.broadcast_to(scl, avg.shape).copy() + return avg, scl + else: + return avg + + +def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): + """ + Compute the median along the specified axis. + + Returns the median of the array elements. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + axis : int, optional + Axis along which the medians are computed. The default (None) is + to compute the median along a flattened version of the array. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type will be cast if necessary. + overwrite_input : bool, optional + If True, then allow use of memory of input array (a) for + calculations. The input array will be modified by the call to + median. This will save memory when you do not need to preserve + the contents of the input array. Treat the input as undefined, + but it will probably be fully or partially sorted. Default is + False. Note that, if `overwrite_input` is True, and the input + is not already an `ndarray`, an error will be raised. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + .. versionadded:: 1.10.0 + + Returns + ------- + median : ndarray + A new array holding the result is returned unless out is + specified, in which case a reference to out is returned. + Return data-type is `float64` for integers and floats smaller than + `float64`, or the input data-type, otherwise. + + See Also + -------- + mean + + Notes + ----- + Given a vector ``V`` with ``N`` non masked values, the median of ``V`` + is the middle value of a sorted copy of ``V`` (``Vs``) - i.e. + ``Vs[(N-1)/2]``, when ``N`` is odd, or ``{Vs[N/2 - 1] + Vs[N/2]}/2`` + when ``N`` is even. + + Examples + -------- + >>> x = np.ma.array(np.arange(8), mask=[0]*4 + [1]*4) + >>> np.ma.median(x) + 1.5 + + >>> x = np.ma.array(np.arange(10).reshape(2, 5), mask=[0]*6 + [1]*4) + >>> np.ma.median(x) + 2.5 + >>> np.ma.median(x, axis=-1, overwrite_input=True) + masked_array(data = [ 2. 5.], + mask = False, + fill_value = 1e+20) + + """ + if not hasattr(a, 'mask'): + m = np.median(getdata(a, subok=True), axis=axis, + out=out, overwrite_input=overwrite_input, + keepdims=keepdims) + if isinstance(m, np.ndarray) and 1 <= m.ndim: + return masked_array(m, copy=False) + else: + return m + + r, k = _ureduce(a, func=_median, axis=axis, out=out, + overwrite_input=overwrite_input) + if keepdims: + return r.reshape(k) + else: + return r + +def _median(a, axis=None, out=None, overwrite_input=False): + # when an unmasked NaN is present return it, so we need to sort the NaN + # values behind the mask + if np.issubdtype(a.dtype, np.inexact): + fill_value = np.inf + else: + fill_value = None + if overwrite_input: + if axis is None: + asorted = a.ravel() + asorted.sort(fill_value=fill_value) + else: + a.sort(axis=axis, fill_value=fill_value) + asorted = a + else: + asorted = sort(a, axis=axis, fill_value=fill_value) + + if axis is None: + axis = 0 + else: + axis = normalize_axis_index(axis, asorted.ndim) + + if asorted.shape[axis] == 0: + # for empty axis integer indices fail so use slicing to get same result + # as median (which is mean of empty slice = nan) + indexer = [slice(None)] * asorted.ndim + indexer[axis] = slice(0, 0) + indexer = tuple(indexer) + return np.ma.mean(asorted[indexer], axis=axis, out=out) + + if asorted.ndim == 1: + counts = count(asorted) + idx, odd = divmod(count(asorted), 2) + mid = asorted[idx + odd - 1:idx + 1] + if np.issubdtype(asorted.dtype, np.inexact) and asorted.size > 0: + # avoid inf / x = masked + s = mid.sum(out=out) + if not odd: + s = np.true_divide(s, 2., casting='safe', out=out) + s = np.lib.utils._median_nancheck(asorted, s, axis, out) + else: + s = mid.mean(out=out) + + # if result is masked either the input contained enough + # minimum_fill_value so that it would be the median or all values + # masked + if np.ma.is_masked(s) and not np.all(asorted.mask): + return np.ma.minimum_fill_value(asorted) + return s + + counts = count(asorted, axis=axis, keepdims=True) + h = counts // 2 + + # duplicate high if odd number of elements so mean does nothing + odd = counts % 2 == 1 + l = np.where(odd, h, h-1) + + lh = np.concatenate([l,h], axis=axis) + + # get low and high median + low_high = np.take_along_axis(asorted, lh, axis=axis) + + def replace_masked(s): + # Replace masked entries with minimum_full_value unless it all values + # are masked. This is required as the sort order of values equal or + # larger than the fill value is undefined and a valid value placed + # elsewhere, e.g. [4, --, inf]. + if np.ma.is_masked(s): + rep = (~np.all(asorted.mask, axis=axis, keepdims=True)) & s.mask + s.data[rep] = np.ma.minimum_fill_value(asorted) + s.mask[rep] = False + + replace_masked(low_high) + + if np.issubdtype(asorted.dtype, np.inexact): + # avoid inf / x = masked + s = np.ma.sum(low_high, axis=axis, out=out) + np.true_divide(s.data, 2., casting='unsafe', out=s.data) + + s = np.lib.utils._median_nancheck(asorted, s, axis, out) + else: + s = np.ma.mean(low_high, axis=axis, out=out) + + return s + + +def compress_nd(x, axis=None): + """Suppress slices from multiple dimensions which contain masked values. + + Parameters + ---------- + x : array_like, MaskedArray + The array to operate on. If not a MaskedArray instance (or if no array + elements are masked, `x` is interpreted as a MaskedArray with `mask` + set to `nomask`. + axis : tuple of ints or int, optional + Which dimensions to suppress slices from can be configured with this + parameter. + - If axis is a tuple of ints, those are the axes to suppress slices from. + - If axis is an int, then that is the only axis to suppress slices from. + - If axis is None, all axis are selected. + + Returns + ------- + compress_array : ndarray + The compressed array. + """ + x = asarray(x) + m = getmask(x) + # Set axis to tuple of ints + if axis is None: + axis = tuple(range(x.ndim)) + else: + axis = normalize_axis_tuple(axis, x.ndim) + + # Nothing is masked: return x + if m is nomask or not m.any(): + return x._data + # All is masked: return empty + if m.all(): + return nxarray([]) + # Filter elements through boolean indexing + data = x._data + for ax in axis: + axes = tuple(list(range(ax)) + list(range(ax + 1, x.ndim))) + data = data[(slice(None),)*ax + (~m.any(axis=axes),)] + return data + +def compress_rowcols(x, axis=None): + """ + Suppress the rows and/or columns of a 2-D array that contain + masked values. + + The suppression behavior is selected with the `axis` parameter. + + - If axis is None, both rows and columns are suppressed. + - If axis is 0, only rows are suppressed. + - If axis is 1 or -1, only columns are suppressed. + + Parameters + ---------- + x : array_like, MaskedArray + The array to operate on. If not a MaskedArray instance (or if no array + elements are masked), `x` is interpreted as a MaskedArray with + `mask` set to `nomask`. Must be a 2D array. + axis : int, optional + Axis along which to perform the operation. Default is None. + + Returns + ------- + compressed_array : ndarray + The compressed array. + + Examples + -------- + >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], + ... [1, 0, 0], + ... [0, 0, 0]]) + >>> x + masked_array(data = + [[-- 1 2] + [-- 4 5] + [6 7 8]], + mask = + [[ True False False] + [ True False False] + [False False False]], + fill_value = 999999) + + >>> np.ma.compress_rowcols(x) + array([[7, 8]]) + >>> np.ma.compress_rowcols(x, 0) + array([[6, 7, 8]]) + >>> np.ma.compress_rowcols(x, 1) + array([[1, 2], + [4, 5], + [7, 8]]) + + """ + if asarray(x).ndim != 2: + raise NotImplementedError("compress_rowcols works for 2D arrays only.") + return compress_nd(x, axis=axis) + + +def compress_rows(a): + """ + Suppress whole rows of a 2-D array that contain masked values. + + This is equivalent to ``np.ma.compress_rowcols(a, 0)``, see + `extras.compress_rowcols` for details. + + See Also + -------- + extras.compress_rowcols + + """ + a = asarray(a) + if a.ndim != 2: + raise NotImplementedError("compress_rows works for 2D arrays only.") + return compress_rowcols(a, 0) + +def compress_cols(a): + """ + Suppress whole columns of a 2-D array that contain masked values. + + This is equivalent to ``np.ma.compress_rowcols(a, 1)``, see + `extras.compress_rowcols` for details. + + See Also + -------- + extras.compress_rowcols + + """ + a = asarray(a) + if a.ndim != 2: + raise NotImplementedError("compress_cols works for 2D arrays only.") + return compress_rowcols(a, 1) + +def mask_rows(a, axis=None): + """ + Mask rows of a 2D array that contain masked values. + + This function is a shortcut to ``mask_rowcols`` with `axis` equal to 0. + + See Also + -------- + mask_rowcols : Mask rows and/or columns of a 2D array. + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.zeros((3, 3), dtype=int) + >>> a[1, 1] = 1 + >>> a + array([[0, 0, 0], + [0, 1, 0], + [0, 0, 0]]) + >>> a = ma.masked_equal(a, 1) + >>> a + masked_array(data = + [[0 0 0] + [0 -- 0] + [0 0 0]], + mask = + [[False False False] + [False True False] + [False False False]], + fill_value=999999) + >>> ma.mask_rows(a) + masked_array(data = + [[0 0 0] + [-- -- --] + [0 0 0]], + mask = + [[False False False] + [ True True True] + [False False False]], + fill_value=999999) + + """ + return mask_rowcols(a, 0) + +def mask_cols(a, axis=None): + """ + Mask columns of a 2D array that contain masked values. + + This function is a shortcut to ``mask_rowcols`` with `axis` equal to 1. + + See Also + -------- + mask_rowcols : Mask rows and/or columns of a 2D array. + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.zeros((3, 3), dtype=int) + >>> a[1, 1] = 1 + >>> a + array([[0, 0, 0], + [0, 1, 0], + [0, 0, 0]]) + >>> a = ma.masked_equal(a, 1) + >>> a + masked_array(data = + [[0 0 0] + [0 -- 0] + [0 0 0]], + mask = + [[False False False] + [False True False] + [False False False]], + fill_value=999999) + >>> ma.mask_cols(a) + masked_array(data = + [[0 -- 0] + [0 -- 0] + [0 -- 0]], + mask = + [[False True False] + [False True False] + [False True False]], + fill_value=999999) + + """ + return mask_rowcols(a, 1) + + +#####-------------------------------------------------------------------------- +#---- --- arraysetops --- +#####-------------------------------------------------------------------------- + +def ediff1d(arr, to_end=None, to_begin=None): + """ + Compute the differences between consecutive elements of an array. + + This function is the equivalent of `numpy.ediff1d` that takes masked + values into account, see `numpy.ediff1d` for details. + + See Also + -------- + numpy.ediff1d : Equivalent function for ndarrays. + + """ + arr = ma.asanyarray(arr).flat + ed = arr[1:] - arr[:-1] + arrays = [ed] + # + if to_begin is not None: + arrays.insert(0, to_begin) + if to_end is not None: + arrays.append(to_end) + # + if len(arrays) != 1: + # We'll save ourselves a copy of a potentially large array in the common + # case where neither to_begin or to_end was given. + ed = hstack(arrays) + # + return ed + + +def unique(ar1, return_index=False, return_inverse=False): + """ + Finds the unique elements of an array. + + Masked values are considered the same element (masked). The output array + is always a masked array. See `numpy.unique` for more details. + + See Also + -------- + numpy.unique : Equivalent function for ndarrays. + + """ + output = np.unique(ar1, + return_index=return_index, + return_inverse=return_inverse) + if isinstance(output, tuple): + output = list(output) + output[0] = output[0].view(MaskedArray) + output = tuple(output) + else: + output = output.view(MaskedArray) + return output + + +def intersect1d(ar1, ar2, assume_unique=False): + """ + Returns the unique elements common to both arrays. + + Masked values are considered equal one to the other. + The output is always a masked array. + + See `numpy.intersect1d` for more details. + + See Also + -------- + numpy.intersect1d : Equivalent function for ndarrays. + + Examples + -------- + >>> x = array([1, 3, 3, 3], mask=[0, 0, 0, 1]) + >>> y = array([3, 1, 1, 1], mask=[0, 0, 0, 1]) + >>> intersect1d(x, y) + masked_array(data = [1 3 --], + mask = [False False True], + fill_value = 999999) + + """ + if assume_unique: + aux = ma.concatenate((ar1, ar2)) + else: + # Might be faster than unique( intersect1d( ar1, ar2 ) )? + aux = ma.concatenate((unique(ar1), unique(ar2))) + aux.sort() + return aux[:-1][aux[1:] == aux[:-1]] + + +def setxor1d(ar1, ar2, assume_unique=False): + """ + Set exclusive-or of 1-D arrays with unique elements. + + The output is always a masked array. See `numpy.setxor1d` for more details. + + See Also + -------- + numpy.setxor1d : Equivalent function for ndarrays. + + """ + if not assume_unique: + ar1 = unique(ar1) + ar2 = unique(ar2) + + aux = ma.concatenate((ar1, ar2)) + if aux.size == 0: + return aux + aux.sort() + auxf = aux.filled() +# flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0 + flag = ma.concatenate(([True], (auxf[1:] != auxf[:-1]), [True])) +# flag2 = ediff1d( flag ) == 0 + flag2 = (flag[1:] == flag[:-1]) + return aux[flag2] + + +def in1d(ar1, ar2, assume_unique=False, invert=False): + """ + Test whether each element of an array is also present in a second + array. + + The output is always a masked array. See `numpy.in1d` for more details. + + We recommend using :func:`isin` instead of `in1d` for new code. + + See Also + -------- + isin : Version of this function that preserves the shape of ar1. + numpy.in1d : Equivalent function for ndarrays. + + Notes + ----- + .. versionadded:: 1.4.0 + + """ + if not assume_unique: + ar1, rev_idx = unique(ar1, return_inverse=True) + ar2 = unique(ar2) + + ar = ma.concatenate((ar1, ar2)) + # We need this to be a stable sort, so always use 'mergesort' + # here. The values from the first array should always come before + # the values from the second array. + order = ar.argsort(kind='mergesort') + sar = ar[order] + if invert: + bool_ar = (sar[1:] != sar[:-1]) + else: + bool_ar = (sar[1:] == sar[:-1]) + flag = ma.concatenate((bool_ar, [invert])) + indx = order.argsort(kind='mergesort')[:len(ar1)] + + if assume_unique: + return flag[indx] + else: + return flag[indx][rev_idx] + + +def isin(element, test_elements, assume_unique=False, invert=False): + """ + Calculates `element in test_elements`, broadcasting over + `element` only. + + The output is always a masked array of the same shape as `element`. + See `numpy.isin` for more details. + + See Also + -------- + in1d : Flattened version of this function. + numpy.isin : Equivalent function for ndarrays. + + Notes + ----- + .. versionadded:: 1.13.0 + + """ + element = ma.asarray(element) + return in1d(element, test_elements, assume_unique=assume_unique, + invert=invert).reshape(element.shape) + + +def union1d(ar1, ar2): + """ + Union of two arrays. + + The output is always a masked array. See `numpy.union1d` for more details. + + See also + -------- + numpy.union1d : Equivalent function for ndarrays. + + """ + return unique(ma.concatenate((ar1, ar2), axis=None)) + + +def setdiff1d(ar1, ar2, assume_unique=False): + """ + Set difference of 1D arrays with unique elements. + + The output is always a masked array. See `numpy.setdiff1d` for more + details. + + See Also + -------- + numpy.setdiff1d : Equivalent function for ndarrays. + + Examples + -------- + >>> x = np.ma.array([1, 2, 3, 4], mask=[0, 1, 0, 1]) + >>> np.ma.setdiff1d(x, [1, 2]) + masked_array(data = [3 --], + mask = [False True], + fill_value = 999999) + + """ + if assume_unique: + ar1 = ma.asarray(ar1).ravel() + else: + ar1 = unique(ar1) + ar2 = unique(ar2) + return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)] + + +############################################################################### +# Covariance # +############################################################################### + + +def _covhelper(x, y=None, rowvar=True, allow_masked=True): + """ + Private function for the computation of covariance and correlation + coefficients. + + """ + x = ma.array(x, ndmin=2, copy=True, dtype=float) + xmask = ma.getmaskarray(x) + # Quick exit if we can't process masked data + if not allow_masked and xmask.any(): + raise ValueError("Cannot process masked data.") + # + if x.shape[0] == 1: + rowvar = True + # Make sure that rowvar is either 0 or 1 + rowvar = int(bool(rowvar)) + axis = 1 - rowvar + if rowvar: + tup = (slice(None), None) + else: + tup = (None, slice(None)) + # + if y is None: + xnotmask = np.logical_not(xmask).astype(int) + else: + y = array(y, copy=False, ndmin=2, dtype=float) + ymask = ma.getmaskarray(y) + if not allow_masked and ymask.any(): + raise ValueError("Cannot process masked data.") + if xmask.any() or ymask.any(): + if y.shape == x.shape: + # Define some common mask + common_mask = np.logical_or(xmask, ymask) + if common_mask is not nomask: + xmask = x._mask = y._mask = ymask = common_mask + x._sharedmask = False + y._sharedmask = False + x = ma.concatenate((x, y), axis) + xnotmask = np.logical_not(np.concatenate((xmask, ymask), axis)).astype(int) + x -= x.mean(axis=rowvar)[tup] + return (x, xnotmask, rowvar) + + +def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): + """ + Estimate the covariance matrix. + + Except for the handling of missing data this function does the same as + `numpy.cov`. For more details and examples, see `numpy.cov`. + + By default, masked values are recognized as such. If `x` and `y` have the + same shape, a common mask is allocated: if ``x[i,j]`` is masked, then + ``y[i,j]`` will also be masked. + Setting `allow_masked` to False will raise an exception if values are + missing in either of the input arrays. + + Parameters + ---------- + x : array_like + A 1-D or 2-D array containing multiple variables and observations. + Each row of `x` represents a variable, and each column a single + observation of all those variables. Also see `rowvar` below. + y : array_like, optional + An additional set of variables and observations. `y` has the same + form as `x`. + rowvar : bool, optional + If `rowvar` is True (default), then each row represents a + variable, with observations in the columns. Otherwise, the relationship + is transposed: each column represents a variable, while the rows + contain observations. + bias : bool, optional + Default normalization (False) is by ``(N-1)``, where ``N`` is the + number of observations given (unbiased estimate). If `bias` is True, + then normalization is by ``N``. This keyword can be overridden by + the keyword ``ddof`` in numpy versions >= 1.5. + allow_masked : bool, optional + If True, masked values are propagated pair-wise: if a value is masked + in `x`, the corresponding value is masked in `y`. + If False, raises a `ValueError` exception when some values are missing. + ddof : {None, int}, optional + If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is + the number of observations; this overrides the value implied by + ``bias``. The default value is ``None``. + + .. versionadded:: 1.5 + + Raises + ------ + ValueError + Raised if some values are missing and `allow_masked` is False. + + See Also + -------- + numpy.cov + + """ + # Check inputs + if ddof is not None and ddof != int(ddof): + raise ValueError("ddof must be an integer") + # Set up ddof + if ddof is None: + if bias: + ddof = 0 + else: + ddof = 1 + + (x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked) + if not rowvar: + fact = np.dot(xnotmask.T, xnotmask) * 1. - ddof + result = (dot(x.T, x.conj(), strict=False) / fact).squeeze() + else: + fact = np.dot(xnotmask, xnotmask.T) * 1. - ddof + result = (dot(x, x.T.conj(), strict=False) / fact).squeeze() + return result + + +def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True, + ddof=np._NoValue): + """ + Return Pearson product-moment correlation coefficients. + + Except for the handling of missing data this function does the same as + `numpy.corrcoef`. For more details and examples, see `numpy.corrcoef`. + + Parameters + ---------- + x : array_like + A 1-D or 2-D array containing multiple variables and observations. + Each row of `x` represents a variable, and each column a single + observation of all those variables. Also see `rowvar` below. + y : array_like, optional + An additional set of variables and observations. `y` has the same + shape as `x`. + rowvar : bool, optional + If `rowvar` is True (default), then each row represents a + variable, with observations in the columns. Otherwise, the relationship + is transposed: each column represents a variable, while the rows + contain observations. + bias : _NoValue, optional + Has no effect, do not use. + + .. deprecated:: 1.10.0 + allow_masked : bool, optional + If True, masked values are propagated pair-wise: if a value is masked + in `x`, the corresponding value is masked in `y`. + If False, raises an exception. Because `bias` is deprecated, this + argument needs to be treated as keyword only to avoid a warning. + ddof : _NoValue, optional + Has no effect, do not use. + + .. deprecated:: 1.10.0 + + See Also + -------- + numpy.corrcoef : Equivalent function in top-level NumPy module. + cov : Estimate the covariance matrix. + + Notes + ----- + This function accepts but discards arguments `bias` and `ddof`. This is + for backwards compatibility with previous versions of this function. These + arguments had no effect on the return values of the function and can be + safely ignored in this and previous versions of numpy. + """ + msg = 'bias and ddof have no effect and are deprecated' + if bias is not np._NoValue or ddof is not np._NoValue: + # 2015-03-15, 1.10 + warnings.warn(msg, DeprecationWarning, stacklevel=2) + # Get the data + (x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked) + # Compute the covariance matrix + if not rowvar: + fact = np.dot(xnotmask.T, xnotmask) * 1. + c = (dot(x.T, x.conj(), strict=False) / fact).squeeze() + else: + fact = np.dot(xnotmask, xnotmask.T) * 1. + c = (dot(x, x.T.conj(), strict=False) / fact).squeeze() + # Check whether we have a scalar + try: + diag = ma.diagonal(c) + except ValueError: + return 1 + # + if xnotmask.all(): + _denom = ma.sqrt(ma.multiply.outer(diag, diag)) + else: + _denom = diagflat(diag) + _denom._sharedmask = False # We know return is always a copy + n = x.shape[1 - rowvar] + if rowvar: + for i in range(n - 1): + for j in range(i + 1, n): + _x = mask_cols(vstack((x[i], x[j]))).var(axis=1) + _denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x)) + else: + for i in range(n - 1): + for j in range(i + 1, n): + _x = mask_cols( + vstack((x[:, i], x[:, j]))).var(axis=1) + _denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x)) + return c / _denom + +#####-------------------------------------------------------------------------- +#---- --- Concatenation helpers --- +#####-------------------------------------------------------------------------- + +class MAxisConcatenator(AxisConcatenator): + """ + Translate slice objects to concatenation along an axis. + + For documentation on usage, see `mr_class`. + + See Also + -------- + mr_class + + """ + concatenate = staticmethod(concatenate) + + @classmethod + def makemat(cls, arr): + # There used to be a view as np.matrix here, but we may eventually + # deprecate that class. In preparation, we use the unmasked version + # to construct the matrix (with copy=False for backwards compatibility + # with the .view) + data = super(MAxisConcatenator, cls).makemat(arr.data, copy=False) + return array(data, mask=arr.mask) + + def __getitem__(self, key): + # matrix builder syntax, like 'a, b; c, d' + if isinstance(key, str): + raise MAError("Unavailable for masked array.") + + return super(MAxisConcatenator, self).__getitem__(key) + + +class mr_class(MAxisConcatenator): + """ + Translate slice objects to concatenation along the first axis. + + This is the masked array version of `lib.index_tricks.RClass`. + + See Also + -------- + lib.index_tricks.RClass + + Examples + -------- + >>> np.ma.mr_[np.ma.array([1,2,3]), 0, 0, np.ma.array([4,5,6])] + array([1, 2, 3, 0, 0, 4, 5, 6]) + + """ + def __init__(self): + MAxisConcatenator.__init__(self, 0) + +mr_ = mr_class() + +#####-------------------------------------------------------------------------- +#---- Find unmasked data --- +#####-------------------------------------------------------------------------- + +def flatnotmasked_edges(a): + """ + Find the indices of the first and last unmasked values. + + Expects a 1-D `MaskedArray`, returns None if all values are masked. + + Parameters + ---------- + a : array_like + Input 1-D `MaskedArray` + + Returns + ------- + edges : ndarray or None + The indices of first and last non-masked value in the array. + Returns None if all values are masked. + + See Also + -------- + flatnotmasked_contiguous, notmasked_contiguous, notmasked_edges, + clump_masked, clump_unmasked + + Notes + ----- + Only accepts 1-D arrays. + + Examples + -------- + >>> a = np.ma.arange(10) + >>> flatnotmasked_edges(a) + [0,-1] + + >>> mask = (a < 3) | (a > 8) | (a == 5) + >>> a[mask] = np.ma.masked + >>> np.array(a[~a.mask]) + array([3, 4, 6, 7, 8]) + + >>> flatnotmasked_edges(a) + array([3, 8]) + + >>> a[:] = np.ma.masked + >>> print(flatnotmasked_edges(ma)) + None + + """ + m = getmask(a) + if m is nomask or not np.any(m): + return np.array([0, a.size - 1]) + unmasked = np.flatnonzero(~m) + if len(unmasked) > 0: + return unmasked[[0, -1]] + else: + return None + + +def notmasked_edges(a, axis=None): + """ + Find the indices of the first and last unmasked values along an axis. + + If all values are masked, return None. Otherwise, return a list + of two tuples, corresponding to the indices of the first and last + unmasked values respectively. + + Parameters + ---------- + a : array_like + The input array. + axis : int, optional + Axis along which to perform the operation. + If None (default), applies to a flattened version of the array. + + Returns + ------- + edges : ndarray or list + An array of start and end indexes if there are any masked data in + the array. If there are no masked data in the array, `edges` is a + list of the first and last index. + + See Also + -------- + flatnotmasked_contiguous, flatnotmasked_edges, notmasked_contiguous, + clump_masked, clump_unmasked + + Examples + -------- + >>> a = np.arange(9).reshape((3, 3)) + >>> m = np.zeros_like(a) + >>> m[1:, 1:] = 1 + + >>> am = np.ma.array(a, mask=m) + >>> np.array(am[~am.mask]) + array([0, 1, 2, 3, 6]) + + >>> np.ma.notmasked_edges(ma) + array([0, 6]) + + """ + a = asarray(a) + if axis is None or a.ndim == 1: + return flatnotmasked_edges(a) + m = getmaskarray(a) + idx = array(np.indices(a.shape), mask=np.asarray([m] * a.ndim)) + return [tuple([idx[i].min(axis).compressed() for i in range(a.ndim)]), + tuple([idx[i].max(axis).compressed() for i in range(a.ndim)]), ] + + +def flatnotmasked_contiguous(a): + """ + Find contiguous unmasked data in a masked array along the given axis. + + Parameters + ---------- + a : narray + The input array. + + Returns + ------- + slice_list : list + A sorted sequence of `slice` objects (start index, end index). + + ..versionchanged:: 1.15.0 + Now returns an empty list instead of None for a fully masked array + + See Also + -------- + flatnotmasked_edges, notmasked_contiguous, notmasked_edges, + clump_masked, clump_unmasked + + Notes + ----- + Only accepts 2-D arrays at most. + + Examples + -------- + >>> a = np.ma.arange(10) + >>> np.ma.flatnotmasked_contiguous(a) + [slice(0, 10, None)] + + >>> mask = (a < 3) | (a > 8) | (a == 5) + >>> a[mask] = np.ma.masked + >>> np.array(a[~a.mask]) + array([3, 4, 6, 7, 8]) + + >>> np.ma.flatnotmasked_contiguous(a) + [slice(3, 5, None), slice(6, 9, None)] + >>> a[:] = np.ma.masked + >>> np.ma.flatnotmasked_contiguous(a) + [] + + """ + m = getmask(a) + if m is nomask: + return [slice(0, a.size)] + i = 0 + result = [] + for (k, g) in itertools.groupby(m.ravel()): + n = len(list(g)) + if not k: + result.append(slice(i, i + n)) + i += n + return result + +def notmasked_contiguous(a, axis=None): + """ + Find contiguous unmasked data in a masked array along the given axis. + + Parameters + ---------- + a : array_like + The input array. + axis : int, optional + Axis along which to perform the operation. + If None (default), applies to a flattened version of the array, and this + is the same as `flatnotmasked_contiguous`. + + Returns + ------- + endpoints : list + A list of slices (start and end indexes) of unmasked indexes + in the array. + + If the input is 2d and axis is specified, the result is a list of lists. + + See Also + -------- + flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges, + clump_masked, clump_unmasked + + Notes + ----- + Only accepts 2-D arrays at most. + + Examples + -------- + >>> a = np.arange(12).reshape((3, 4)) + >>> mask = np.zeros_like(a) + >>> mask[1:, :-1] = 1; mask[0, 1] = 1; mask[-1, 0] = 0 + >>> ma = np.ma.array(a, mask=mask) + >>> ma + masked_array( + data=[[0, --, 2, 3], + [--, --, --, 7], + [8, --, --, 11]], + mask=[[False, True, False, False], + [ True, True, True, False], + [False, True, True, False]], + fill_value=999999) + >>> np.array(ma[~ma.mask]) + array([ 0, 2, 3, 7, 8, 11]) + + >>> np.ma.notmasked_contiguous(ma) + [slice(0, 1, None), slice(2, 4, None), slice(7, 9, None), slice(11, 12, None)] + + >>> np.ma.notmasked_contiguous(ma, axis=0) + [[slice(0, 1, None), slice(2, 3, None)], # column broken into two segments + [], # fully masked column + [slice(0, 1, None)], + [slice(0, 3, None)]] + + >>> np.ma.notmasked_contiguous(ma, axis=1) + [[slice(0, 1, None), slice(2, 4, None)], # row broken into two segments + [slice(3, 4, None)], + [slice(0, 1, None), slice(3, 4, None)]] + """ + a = asarray(a) + nd = a.ndim + if nd > 2: + raise NotImplementedError("Currently limited to atmost 2D array.") + if axis is None or nd == 1: + return flatnotmasked_contiguous(a) + # + result = [] + # + other = (axis + 1) % 2 + idx = [0, 0] + idx[axis] = slice(None, None) + # + for i in range(a.shape[other]): + idx[other] = i + result.append(flatnotmasked_contiguous(a[tuple(idx)])) + return result + + +def _ezclump(mask): + """ + Finds the clumps (groups of data with the same values) for a 1D bool array. + + Returns a series of slices. + """ + if mask.ndim > 1: + mask = mask.ravel() + idx = (mask[1:] ^ mask[:-1]).nonzero() + idx = idx[0] + 1 + + if mask[0]: + if len(idx) == 0: + return [slice(0, mask.size)] + + r = [slice(0, idx[0])] + r.extend((slice(left, right) + for left, right in zip(idx[1:-1:2], idx[2::2]))) + else: + if len(idx) == 0: + return [] + + r = [slice(left, right) for left, right in zip(idx[:-1:2], idx[1::2])] + + if mask[-1]: + r.append(slice(idx[-1], mask.size)) + return r + + +def clump_unmasked(a): + """ + Return list of slices corresponding to the unmasked clumps of a 1-D array. + (A "clump" is defined as a contiguous region of the array). + + Parameters + ---------- + a : ndarray + A one-dimensional masked array. + + Returns + ------- + slices : list of slice + The list of slices, one for each continuous region of unmasked + elements in `a`. + + Notes + ----- + .. versionadded:: 1.4.0 + + See Also + -------- + flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges, + notmasked_contiguous, clump_masked + + Examples + -------- + >>> a = np.ma.masked_array(np.arange(10)) + >>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked + >>> np.ma.clump_unmasked(a) + [slice(3, 6, None), slice(7, 8, None)] + + """ + mask = getattr(a, '_mask', nomask) + if mask is nomask: + return [slice(0, a.size)] + return _ezclump(~mask) + + +def clump_masked(a): + """ + Returns a list of slices corresponding to the masked clumps of a 1-D array. + (A "clump" is defined as a contiguous region of the array). + + Parameters + ---------- + a : ndarray + A one-dimensional masked array. + + Returns + ------- + slices : list of slice + The list of slices, one for each continuous region of masked elements + in `a`. + + Notes + ----- + .. versionadded:: 1.4.0 + + See Also + -------- + flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges, + notmasked_contiguous, clump_unmasked + + Examples + -------- + >>> a = np.ma.masked_array(np.arange(10)) + >>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked + >>> np.ma.clump_masked(a) + [slice(0, 3, None), slice(6, 7, None), slice(8, 10, None)] + + """ + mask = ma.getmask(a) + if mask is nomask: + return [] + return _ezclump(mask) + + +############################################################################### +# Polynomial fit # +############################################################################### + + +def vander(x, n=None): + """ + Masked values in the input array result in rows of zeros. + + """ + _vander = np.vander(x, n) + m = getmask(x) + if m is not nomask: + _vander[m] = 0 + return _vander + +vander.__doc__ = ma.doc_note(np.vander.__doc__, vander.__doc__) + + +def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): + """ + Any masked values in x is propagated in y, and vice-versa. + + """ + x = asarray(x) + y = asarray(y) + + m = getmask(x) + if y.ndim == 1: + m = mask_or(m, getmask(y)) + elif y.ndim == 2: + my = getmask(mask_rows(y)) + if my is not nomask: + m = mask_or(m, my[:, 0]) + else: + raise TypeError("Expected a 1D or 2D array for y!") + + if w is not None: + w = asarray(w) + if w.ndim != 1: + raise TypeError("expected a 1-d array for weights") + if w.shape[0] != y.shape[0]: + raise TypeError("expected w and y to have the same length") + m = mask_or(m, getmask(w)) + + if m is not nomask: + not_m = ~m + if w is not None: + w = w[not_m] + return np.polyfit(x[not_m], y[not_m], deg, rcond, full, w, cov) + else: + return np.polyfit(x, y, deg, rcond, full, w, cov) + +polyfit.__doc__ = ma.doc_note(np.polyfit.__doc__, polyfit.__doc__) diff --git a/project/venv/lib/python2.7/site-packages/numpy/ma/extras.pyc b/project/venv/lib/python2.7/site-packages/numpy/ma/extras.pyc new file mode 100644 index 0000000..681a3f5 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/ma/extras.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/ma/mrecords.py b/project/venv/lib/python2.7/site-packages/numpy/ma/mrecords.py new file mode 100644 index 0000000..daf2f87 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/ma/mrecords.py @@ -0,0 +1,777 @@ +""":mod:`numpy.ma..mrecords` + +Defines the equivalent of :class:`numpy.recarrays` for masked arrays, +where fields can be accessed as attributes. +Note that :class:`numpy.ma.MaskedArray` already supports structured datatypes +and the masking of individual fields. + +.. moduleauthor:: Pierre Gerard-Marchant + +""" +from __future__ import division, absolute_import, print_function + +# We should make sure that no field is called '_mask','mask','_fieldmask', +# or whatever restricted keywords. An idea would be to no bother in the +# first place, and then rename the invalid fields with a trailing +# underscore. Maybe we could just overload the parser function ? + +import sys +import warnings + +import numpy as np +import numpy.core.numerictypes as ntypes +from numpy.compat import basestring +from numpy import ( + bool_, dtype, ndarray, recarray, array as narray + ) +from numpy.core.records import ( + fromarrays as recfromarrays, fromrecords as recfromrecords + ) + +_byteorderconv = np.core.records._byteorderconv + +import numpy.ma as ma +from numpy.ma import ( + MAError, MaskedArray, masked, nomask, masked_array, getdata, + getmaskarray, filled + ) + +_check_fill_value = ma.core._check_fill_value + + +__all__ = [ + 'MaskedRecords', 'mrecarray', 'fromarrays', 'fromrecords', + 'fromtextfile', 'addfield', + ] + +reserved_fields = ['_data', '_mask', '_fieldmask', 'dtype'] + + +def _checknames(descr, names=None): + """ + Checks that field names ``descr`` are not reserved keywords. + + If this is the case, a default 'f%i' is substituted. If the argument + `names` is not None, updates the field names to valid names. + + """ + ndescr = len(descr) + default_names = ['f%i' % i for i in range(ndescr)] + if names is None: + new_names = default_names + else: + if isinstance(names, (tuple, list)): + new_names = names + elif isinstance(names, str): + new_names = names.split(',') + else: + raise NameError("illegal input names %s" % repr(names)) + nnames = len(new_names) + if nnames < ndescr: + new_names += default_names[nnames:] + ndescr = [] + for (n, d, t) in zip(new_names, default_names, descr.descr): + if n in reserved_fields: + if t[0] in reserved_fields: + ndescr.append((d, t[1])) + else: + ndescr.append(t) + else: + ndescr.append((n, t[1])) + return np.dtype(ndescr) + + +def _get_fieldmask(self): + mdescr = [(n, '|b1') for n in self.dtype.names] + fdmask = np.empty(self.shape, dtype=mdescr) + fdmask.flat = tuple([False] * len(mdescr)) + return fdmask + + +class MaskedRecords(MaskedArray, object): + """ + + Attributes + ---------- + _data : recarray + Underlying data, as a record array. + _mask : boolean array + Mask of the records. A record is masked when all its fields are + masked. + _fieldmask : boolean recarray + Record array of booleans, setting the mask of each individual field + of each record. + _fill_value : record + Filling values for each field. + + """ + + def __new__(cls, shape, dtype=None, buf=None, offset=0, strides=None, + formats=None, names=None, titles=None, + byteorder=None, aligned=False, + mask=nomask, hard_mask=False, fill_value=None, keep_mask=True, + copy=False, + **options): + + self = recarray.__new__(cls, shape, dtype=dtype, buf=buf, offset=offset, + strides=strides, formats=formats, names=names, + titles=titles, byteorder=byteorder, + aligned=aligned,) + + mdtype = ma.make_mask_descr(self.dtype) + if mask is nomask or not np.size(mask): + if not keep_mask: + self._mask = tuple([False] * len(mdtype)) + else: + mask = np.array(mask, copy=copy) + if mask.shape != self.shape: + (nd, nm) = (self.size, mask.size) + if nm == 1: + mask = np.resize(mask, self.shape) + elif nm == nd: + mask = np.reshape(mask, self.shape) + else: + msg = "Mask and data not compatible: data size is %i, " + \ + "mask size is %i." + raise MAError(msg % (nd, nm)) + copy = True + if not keep_mask: + self.__setmask__(mask) + self._sharedmask = True + else: + if mask.dtype == mdtype: + _mask = mask + else: + _mask = np.array([tuple([m] * len(mdtype)) for m in mask], + dtype=mdtype) + self._mask = _mask + return self + + def __array_finalize__(self, obj): + # Make sure we have a _fieldmask by default + _mask = getattr(obj, '_mask', None) + if _mask is None: + objmask = getattr(obj, '_mask', nomask) + _dtype = ndarray.__getattribute__(self, 'dtype') + if objmask is nomask: + _mask = ma.make_mask_none(self.shape, dtype=_dtype) + else: + mdescr = ma.make_mask_descr(_dtype) + _mask = narray([tuple([m] * len(mdescr)) for m in objmask], + dtype=mdescr).view(recarray) + # Update some of the attributes + _dict = self.__dict__ + _dict.update(_mask=_mask) + self._update_from(obj) + if _dict['_baseclass'] == ndarray: + _dict['_baseclass'] = recarray + return + + def _getdata(self): + """ + Returns the data as a recarray. + + """ + return ndarray.view(self, recarray) + + _data = property(fget=_getdata) + + def _getfieldmask(self): + """ + Alias to mask. + + """ + return self._mask + + _fieldmask = property(fget=_getfieldmask) + + def __len__(self): + """ + Returns the length + + """ + # We have more than one record + if self.ndim: + return len(self._data) + # We have only one record: return the nb of fields + return len(self.dtype) + + def __getattribute__(self, attr): + try: + return object.__getattribute__(self, attr) + except AttributeError: + # attr must be a fieldname + pass + fielddict = ndarray.__getattribute__(self, 'dtype').fields + try: + res = fielddict[attr][:2] + except (TypeError, KeyError): + raise AttributeError("record array has no attribute %s" % attr) + # So far, so good + _localdict = ndarray.__getattribute__(self, '__dict__') + _data = ndarray.view(self, _localdict['_baseclass']) + obj = _data.getfield(*res) + if obj.dtype.fields: + raise NotImplementedError("MaskedRecords is currently limited to" + "simple records.") + # Get some special attributes + # Reset the object's mask + hasmasked = False + _mask = _localdict.get('_mask', None) + if _mask is not None: + try: + _mask = _mask[attr] + except IndexError: + # Couldn't find a mask: use the default (nomask) + pass + hasmasked = _mask.view((bool, (len(_mask.dtype) or 1))).any() + if (obj.shape or hasmasked): + obj = obj.view(MaskedArray) + obj._baseclass = ndarray + obj._isfield = True + obj._mask = _mask + # Reset the field values + _fill_value = _localdict.get('_fill_value', None) + if _fill_value is not None: + try: + obj._fill_value = _fill_value[attr] + except ValueError: + obj._fill_value = None + else: + obj = obj.item() + return obj + + def __setattr__(self, attr, val): + """ + Sets the attribute attr to the value val. + + """ + # Should we call __setmask__ first ? + if attr in ['mask', 'fieldmask']: + self.__setmask__(val) + return + # Create a shortcut (so that we don't have to call getattr all the time) + _localdict = object.__getattribute__(self, '__dict__') + # Check whether we're creating a new field + newattr = attr not in _localdict + try: + # Is attr a generic attribute ? + ret = object.__setattr__(self, attr, val) + except Exception: + # Not a generic attribute: exit if it's not a valid field + fielddict = ndarray.__getattribute__(self, 'dtype').fields or {} + optinfo = ndarray.__getattribute__(self, '_optinfo') or {} + if not (attr in fielddict or attr in optinfo): + exctype, value = sys.exc_info()[:2] + raise exctype(value) + else: + # Get the list of names + fielddict = ndarray.__getattribute__(self, 'dtype').fields or {} + # Check the attribute + if attr not in fielddict: + return ret + if newattr: + # We just added this one or this setattr worked on an + # internal attribute. + try: + object.__delattr__(self, attr) + except Exception: + return ret + # Let's try to set the field + try: + res = fielddict[attr][:2] + except (TypeError, KeyError): + raise AttributeError("record array has no attribute %s" % attr) + + if val is masked: + _fill_value = _localdict['_fill_value'] + if _fill_value is not None: + dval = _localdict['_fill_value'][attr] + else: + dval = val + mval = True + else: + dval = filled(val) + mval = getmaskarray(val) + obj = ndarray.__getattribute__(self, '_data').setfield(dval, *res) + _localdict['_mask'].__setitem__(attr, mval) + return obj + + def __getitem__(self, indx): + """ + Returns all the fields sharing the same fieldname base. + + The fieldname base is either `_data` or `_mask`. + + """ + _localdict = self.__dict__ + _mask = ndarray.__getattribute__(self, '_mask') + _data = ndarray.view(self, _localdict['_baseclass']) + # We want a field + if isinstance(indx, basestring): + # Make sure _sharedmask is True to propagate back to _fieldmask + # Don't use _set_mask, there are some copies being made that + # break propagation Don't force the mask to nomask, that wreaks + # easy masking + obj = _data[indx].view(MaskedArray) + obj._mask = _mask[indx] + obj._sharedmask = True + fval = _localdict['_fill_value'] + if fval is not None: + obj._fill_value = fval[indx] + # Force to masked if the mask is True + if not obj.ndim and obj._mask: + return masked + return obj + # We want some elements. + # First, the data. + obj = np.array(_data[indx], copy=False).view(mrecarray) + obj._mask = np.array(_mask[indx], copy=False).view(recarray) + return obj + + def __setitem__(self, indx, value): + """ + Sets the given record to value. + + """ + MaskedArray.__setitem__(self, indx, value) + if isinstance(indx, basestring): + self._mask[indx] = ma.getmaskarray(value) + + def __str__(self): + """ + Calculates the string representation. + + """ + if self.size > 1: + mstr = ["(%s)" % ",".join([str(i) for i in s]) + for s in zip(*[getattr(self, f) for f in self.dtype.names])] + return "[%s]" % ", ".join(mstr) + else: + mstr = ["%s" % ",".join([str(i) for i in s]) + for s in zip([getattr(self, f) for f in self.dtype.names])] + return "(%s)" % ", ".join(mstr) + + def __repr__(self): + """ + Calculates the repr representation. + + """ + _names = self.dtype.names + fmt = "%%%is : %%s" % (max([len(n) for n in _names]) + 4,) + reprstr = [fmt % (f, getattr(self, f)) for f in self.dtype.names] + reprstr.insert(0, 'masked_records(') + reprstr.extend([fmt % (' fill_value', self.fill_value), + ' )']) + return str("\n".join(reprstr)) + + def view(self, dtype=None, type=None): + """ + Returns a view of the mrecarray. + + """ + # OK, basic copy-paste from MaskedArray.view. + if dtype is None: + if type is None: + output = ndarray.view(self) + else: + output = ndarray.view(self, type) + # Here again. + elif type is None: + try: + if issubclass(dtype, ndarray): + output = ndarray.view(self, dtype) + dtype = None + else: + output = ndarray.view(self, dtype) + # OK, there's the change + except TypeError: + dtype = np.dtype(dtype) + # we need to revert to MaskedArray, but keeping the possibility + # of subclasses (eg, TimeSeriesRecords), so we'll force a type + # set to the first parent + if dtype.fields is None: + basetype = self.__class__.__bases__[0] + output = self.__array__().view(dtype, basetype) + output._update_from(self) + else: + output = ndarray.view(self, dtype) + output._fill_value = None + else: + output = ndarray.view(self, dtype, type) + # Update the mask, just like in MaskedArray.view + if (getattr(output, '_mask', nomask) is not nomask): + mdtype = ma.make_mask_descr(output.dtype) + output._mask = self._mask.view(mdtype, ndarray) + output._mask.shape = output.shape + return output + + def harden_mask(self): + """ + Forces the mask to hard. + + """ + self._hardmask = True + + def soften_mask(self): + """ + Forces the mask to soft + + """ + self._hardmask = False + + def copy(self): + """ + Returns a copy of the masked record. + + """ + copied = self._data.copy().view(type(self)) + copied._mask = self._mask.copy() + return copied + + def tolist(self, fill_value=None): + """ + Return the data portion of the array as a list. + + Data items are converted to the nearest compatible Python type. + Masked values are converted to fill_value. If fill_value is None, + the corresponding entries in the output list will be ``None``. + + """ + if fill_value is not None: + return self.filled(fill_value).tolist() + result = narray(self.filled().tolist(), dtype=object) + mask = narray(self._mask.tolist()) + result[mask] = None + return result.tolist() + + def __getstate__(self): + """Return the internal state of the masked array. + + This is for pickling. + + """ + state = (1, + self.shape, + self.dtype, + self.flags.fnc, + self._data.tobytes(), + self._mask.tobytes(), + self._fill_value, + ) + return state + + def __setstate__(self, state): + """ + Restore the internal state of the masked array. + + This is for pickling. ``state`` is typically the output of the + ``__getstate__`` output, and is a 5-tuple: + + - class name + - a tuple giving the shape of the data + - a typecode for the data + - a binary string for the data + - a binary string for the mask. + + """ + (ver, shp, typ, isf, raw, msk, flv) = state + ndarray.__setstate__(self, (shp, typ, isf, raw)) + mdtype = dtype([(k, bool_) for (k, _) in self.dtype.descr]) + self.__dict__['_mask'].__setstate__((shp, mdtype, isf, msk)) + self.fill_value = flv + + def __reduce__(self): + """ + Return a 3-tuple for pickling a MaskedArray. + + """ + return (_mrreconstruct, + (self.__class__, self._baseclass, (0,), 'b',), + self.__getstate__()) + +def _mrreconstruct(subtype, baseclass, baseshape, basetype,): + """ + Build a new MaskedArray from the information stored in a pickle. + + """ + _data = ndarray.__new__(baseclass, baseshape, basetype).view(subtype) + _mask = ndarray.__new__(ndarray, baseshape, 'b1') + return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,) + +mrecarray = MaskedRecords + + +############################################################################### +# Constructors # +############################################################################### + + +def fromarrays(arraylist, dtype=None, shape=None, formats=None, + names=None, titles=None, aligned=False, byteorder=None, + fill_value=None): + """ + Creates a mrecarray from a (flat) list of masked arrays. + + Parameters + ---------- + arraylist : sequence + A list of (masked) arrays. Each element of the sequence is first converted + to a masked array if needed. If a 2D array is passed as argument, it is + processed line by line + dtype : {None, dtype}, optional + Data type descriptor. + shape : {None, integer}, optional + Number of records. If None, shape is defined from the shape of the + first array in the list. + formats : {None, sequence}, optional + Sequence of formats for each individual field. If None, the formats will + be autodetected by inspecting the fields and selecting the highest dtype + possible. + names : {None, sequence}, optional + Sequence of the names of each field. + fill_value : {None, sequence}, optional + Sequence of data to be used as filling values. + + Notes + ----- + Lists of tuples should be preferred over lists of lists for faster processing. + + """ + datalist = [getdata(x) for x in arraylist] + masklist = [np.atleast_1d(getmaskarray(x)) for x in arraylist] + _array = recfromarrays(datalist, + dtype=dtype, shape=shape, formats=formats, + names=names, titles=titles, aligned=aligned, + byteorder=byteorder).view(mrecarray) + _array._mask.flat = list(zip(*masklist)) + if fill_value is not None: + _array.fill_value = fill_value + return _array + + +def fromrecords(reclist, dtype=None, shape=None, formats=None, names=None, + titles=None, aligned=False, byteorder=None, + fill_value=None, mask=nomask): + """ + Creates a MaskedRecords from a list of records. + + Parameters + ---------- + reclist : sequence + A list of records. Each element of the sequence is first converted + to a masked array if needed. If a 2D array is passed as argument, it is + processed line by line + dtype : {None, dtype}, optional + Data type descriptor. + shape : {None,int}, optional + Number of records. If None, ``shape`` is defined from the shape of the + first array in the list. + formats : {None, sequence}, optional + Sequence of formats for each individual field. If None, the formats will + be autodetected by inspecting the fields and selecting the highest dtype + possible. + names : {None, sequence}, optional + Sequence of the names of each field. + fill_value : {None, sequence}, optional + Sequence of data to be used as filling values. + mask : {nomask, sequence}, optional. + External mask to apply on the data. + + Notes + ----- + Lists of tuples should be preferred over lists of lists for faster processing. + + """ + # Grab the initial _fieldmask, if needed: + _mask = getattr(reclist, '_mask', None) + # Get the list of records. + if isinstance(reclist, ndarray): + # Make sure we don't have some hidden mask + if isinstance(reclist, MaskedArray): + reclist = reclist.filled().view(ndarray) + # Grab the initial dtype, just in case + if dtype is None: + dtype = reclist.dtype + reclist = reclist.tolist() + mrec = recfromrecords(reclist, dtype=dtype, shape=shape, formats=formats, + names=names, titles=titles, + aligned=aligned, byteorder=byteorder).view(mrecarray) + # Set the fill_value if needed + if fill_value is not None: + mrec.fill_value = fill_value + # Now, let's deal w/ the mask + if mask is not nomask: + mask = np.array(mask, copy=False) + maskrecordlength = len(mask.dtype) + if maskrecordlength: + mrec._mask.flat = mask + elif mask.ndim == 2: + mrec._mask.flat = [tuple(m) for m in mask] + else: + mrec.__setmask__(mask) + if _mask is not None: + mrec._mask[:] = _mask + return mrec + + +def _guessvartypes(arr): + """ + Tries to guess the dtypes of the str_ ndarray `arr`. + + Guesses by testing element-wise conversion. Returns a list of dtypes. + The array is first converted to ndarray. If the array is 2D, the test + is performed on the first line. An exception is raised if the file is + 3D or more. + + """ + vartypes = [] + arr = np.asarray(arr) + if arr.ndim == 2: + arr = arr[0] + elif arr.ndim > 2: + raise ValueError("The array should be 2D at most!") + # Start the conversion loop. + for f in arr: + try: + int(f) + except (ValueError, TypeError): + try: + float(f) + except (ValueError, TypeError): + try: + complex(f) + except (ValueError, TypeError): + vartypes.append(arr.dtype) + else: + vartypes.append(np.dtype(complex)) + else: + vartypes.append(np.dtype(float)) + else: + vartypes.append(np.dtype(int)) + return vartypes + + +def openfile(fname): + """ + Opens the file handle of file `fname`. + + """ + # A file handle + if hasattr(fname, 'readline'): + return fname + # Try to open the file and guess its type + try: + f = open(fname) + except IOError: + raise IOError("No such file: '%s'" % fname) + if f.readline()[:2] != "\\x": + f.seek(0, 0) + return f + f.close() + raise NotImplementedError("Wow, binary file") + + +def fromtextfile(fname, delimitor=None, commentchar='#', missingchar='', + varnames=None, vartypes=None): + """ + Creates a mrecarray from data stored in the file `filename`. + + Parameters + ---------- + fname : {file name/handle} + Handle of an opened file. + delimitor : {None, string}, optional + Alphanumeric character used to separate columns in the file. + If None, any (group of) white spacestring(s) will be used. + commentchar : {'#', string}, optional + Alphanumeric character used to mark the start of a comment. + missingchar : {'', string}, optional + String indicating missing data, and used to create the masks. + varnames : {None, sequence}, optional + Sequence of the variable names. If None, a list will be created from + the first non empty line of the file. + vartypes : {None, sequence}, optional + Sequence of the variables dtypes. If None, it will be estimated from + the first non-commented line. + + + Ultra simple: the varnames are in the header, one line""" + # Try to open the file. + ftext = openfile(fname) + + # Get the first non-empty line as the varnames + while True: + line = ftext.readline() + firstline = line[:line.find(commentchar)].strip() + _varnames = firstline.split(delimitor) + if len(_varnames) > 1: + break + if varnames is None: + varnames = _varnames + + # Get the data. + _variables = masked_array([line.strip().split(delimitor) for line in ftext + if line[0] != commentchar and len(line) > 1]) + (_, nfields) = _variables.shape + ftext.close() + + # Try to guess the dtype. + if vartypes is None: + vartypes = _guessvartypes(_variables[0]) + else: + vartypes = [np.dtype(v) for v in vartypes] + if len(vartypes) != nfields: + msg = "Attempting to %i dtypes for %i fields!" + msg += " Reverting to default." + warnings.warn(msg % (len(vartypes), nfields), stacklevel=2) + vartypes = _guessvartypes(_variables[0]) + + # Construct the descriptor. + mdescr = [(n, f) for (n, f) in zip(varnames, vartypes)] + mfillv = [ma.default_fill_value(f) for f in vartypes] + + # Get the data and the mask. + # We just need a list of masked_arrays. It's easier to create it like that: + _mask = (_variables.T == missingchar) + _datalist = [masked_array(a, mask=m, dtype=t, fill_value=f) + for (a, m, t, f) in zip(_variables.T, _mask, vartypes, mfillv)] + + return fromarrays(_datalist, dtype=mdescr) + + +def addfield(mrecord, newfield, newfieldname=None): + """Adds a new field to the masked record array + + Uses `newfield` as data and `newfieldname` as name. If `newfieldname` + is None, the new field name is set to 'fi', where `i` is the number of + existing fields. + + """ + _data = mrecord._data + _mask = mrecord._mask + if newfieldname is None or newfieldname in reserved_fields: + newfieldname = 'f%i' % len(_data.dtype) + newfield = ma.array(newfield) + # Get the new data. + # Create a new empty recarray + newdtype = np.dtype(_data.dtype.descr + [(newfieldname, newfield.dtype)]) + newdata = recarray(_data.shape, newdtype) + # Add the existing field + [newdata.setfield(_data.getfield(*f), *f) + for f in _data.dtype.fields.values()] + # Add the new field + newdata.setfield(newfield._data, *newdata.dtype.fields[newfieldname]) + newdata = newdata.view(MaskedRecords) + # Get the new mask + # Create a new empty recarray + newmdtype = np.dtype([(n, bool_) for n in newdtype.names]) + newmask = recarray(_data.shape, newmdtype) + # Add the old masks + [newmask.setfield(_mask.getfield(*f), *f) + for f in _mask.dtype.fields.values()] + # Add the mask of the new field + newmask.setfield(getmaskarray(newfield), + *newmask.dtype.fields[newfieldname]) + newdata._mask = newmask + return newdata diff --git a/project/venv/lib/python2.7/site-packages/numpy/ma/mrecords.pyc b/project/venv/lib/python2.7/site-packages/numpy/ma/mrecords.pyc new file mode 100644 index 0000000..90ca3cd Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/ma/mrecords.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/ma/setup.py b/project/venv/lib/python2.7/site-packages/numpy/ma/setup.py new file mode 100644 index 0000000..d1d6c89 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/ma/setup.py @@ -0,0 +1,13 @@ +#!/usr/bin/env python +from __future__ import division, print_function + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('ma', parent_package, top_path) + config.add_data_dir('tests') + return config + +if __name__ == "__main__": + from numpy.distutils.core import setup + config = configuration(top_path='').todict() + setup(**config) diff --git a/project/venv/lib/python2.7/site-packages/numpy/ma/setup.pyc b/project/venv/lib/python2.7/site-packages/numpy/ma/setup.pyc new file mode 100644 index 0000000..5a6e490 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/ma/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/ma/tests/__init__.py b/project/venv/lib/python2.7/site-packages/numpy/ma/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/numpy/ma/tests/__init__.pyc b/project/venv/lib/python2.7/site-packages/numpy/ma/tests/__init__.pyc new file mode 100644 index 0000000..4b137f8 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/ma/tests/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/ma/tests/test_core.py b/project/venv/lib/python2.7/site-packages/numpy/ma/tests/test_core.py new file mode 100644 index 0000000..e0dbf1b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/ma/tests/test_core.py @@ -0,0 +1,5205 @@ +# pylint: disable-msg=W0400,W0511,W0611,W0612,W0614,R0201,E1102 +"""Tests suite for MaskedArray & subclassing. + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu +""" +from __future__ import division, absolute_import, print_function + +__author__ = "Pierre GF Gerard-Marchant" + +import sys +import warnings +import operator +import itertools +import textwrap +import pytest + +from functools import reduce + + +import numpy as np +import numpy.ma.core +import numpy.core.fromnumeric as fromnumeric +import numpy.core.umath as umath +from numpy.testing import ( + assert_raises, assert_warns, suppress_warnings + ) +from numpy import ndarray +from numpy.compat import asbytes +from numpy.ma.testutils import ( + assert_, assert_array_equal, assert_equal, assert_almost_equal, + assert_equal_records, fail_if_equal, assert_not_equal, + assert_mask_equal + ) +from numpy.ma.core import ( + MAError, MaskError, MaskType, MaskedArray, abs, absolute, add, all, + allclose, allequal, alltrue, angle, anom, arange, arccos, arccosh, arctan2, + arcsin, arctan, argsort, array, asarray, choose, concatenate, + conjugate, cos, cosh, count, default_fill_value, diag, divide, empty, + empty_like, equal, exp, flatten_mask, filled, fix_invalid, + flatten_structured_array, fromflex, getmask, getmaskarray, greater, + greater_equal, identity, inner, isMaskedArray, less, less_equal, log, + log10, make_mask, make_mask_descr, mask_or, masked, masked_array, + masked_equal, masked_greater, masked_greater_equal, masked_inside, + masked_less, masked_less_equal, masked_not_equal, masked_outside, + masked_print_option, masked_values, masked_where, max, maximum, + maximum_fill_value, min, minimum, minimum_fill_value, mod, multiply, + mvoid, nomask, not_equal, ones, outer, power, product, put, putmask, + ravel, repeat, reshape, resize, shape, sin, sinh, sometrue, sort, sqrt, + subtract, sum, take, tan, tanh, transpose, where, zeros, + ) +from numpy.core.numeric import pickle + +pi = np.pi + + +suppress_copy_mask_on_assignment = suppress_warnings() +suppress_copy_mask_on_assignment.filter( + numpy.ma.core.MaskedArrayFutureWarning, + "setting an item on a masked array which has a shared mask will not copy") + + +# For parametrized numeric testing +num_dts = [np.dtype(dt_) for dt_ in '?bhilqBHILQefdgFD'] +num_ids = [dt_.char for dt_ in num_dts] + + +class TestMaskedArray(object): + # Base test class for MaskedArrays. + + def setup(self): + # Base data definition. + x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) + y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) + a10 = 10. + m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] + xm = masked_array(x, mask=m1) + ym = masked_array(y, mask=m2) + z = np.array([-.5, 0., .5, .8]) + zm = masked_array(z, mask=[0, 1, 0, 0]) + xf = np.where(m1, 1e+20, x) + xm.set_fill_value(1e+20) + self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf) + + def test_basicattributes(self): + # Tests some basic array attributes. + a = array([1, 3, 2]) + b = array([1, 3, 2], mask=[1, 0, 1]) + assert_equal(a.ndim, 1) + assert_equal(b.ndim, 1) + assert_equal(a.size, 3) + assert_equal(b.size, 3) + assert_equal(a.shape, (3,)) + assert_equal(b.shape, (3,)) + + def test_basic0d(self): + # Checks masking a scalar + x = masked_array(0) + assert_equal(str(x), '0') + x = masked_array(0, mask=True) + assert_equal(str(x), str(masked_print_option)) + x = masked_array(0, mask=False) + assert_equal(str(x), '0') + x = array(0, mask=1) + assert_(x.filled().dtype is x._data.dtype) + + def test_basic1d(self): + # Test of basic array creation and properties in 1 dimension. + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + assert_(not isMaskedArray(x)) + assert_(isMaskedArray(xm)) + assert_((xm - ym).filled(0).any()) + fail_if_equal(xm.mask.astype(int), ym.mask.astype(int)) + s = x.shape + assert_equal(np.shape(xm), s) + assert_equal(xm.shape, s) + assert_equal(xm.dtype, x.dtype) + assert_equal(zm.dtype, z.dtype) + assert_equal(xm.size, reduce(lambda x, y:x * y, s)) + assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1)) + assert_array_equal(xm, xf) + assert_array_equal(filled(xm, 1.e20), xf) + assert_array_equal(x, xm) + + def test_basic2d(self): + # Test of basic array creation and properties in 2 dimensions. + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + for s in [(4, 3), (6, 2)]: + x.shape = s + y.shape = s + xm.shape = s + ym.shape = s + xf.shape = s + + assert_(not isMaskedArray(x)) + assert_(isMaskedArray(xm)) + assert_equal(shape(xm), s) + assert_equal(xm.shape, s) + assert_equal(xm.size, reduce(lambda x, y:x * y, s)) + assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1)) + assert_equal(xm, xf) + assert_equal(filled(xm, 1.e20), xf) + assert_equal(x, xm) + + def test_concatenate_basic(self): + # Tests concatenations. + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + # basic concatenation + assert_equal(np.concatenate((x, y)), concatenate((xm, ym))) + assert_equal(np.concatenate((x, y)), concatenate((x, y))) + assert_equal(np.concatenate((x, y)), concatenate((xm, y))) + assert_equal(np.concatenate((x, y, x)), concatenate((x, ym, x))) + + def test_concatenate_alongaxis(self): + # Tests concatenations. + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + # Concatenation along an axis + s = (3, 4) + x.shape = y.shape = xm.shape = ym.shape = s + assert_equal(xm.mask, np.reshape(m1, s)) + assert_equal(ym.mask, np.reshape(m2, s)) + xmym = concatenate((xm, ym), 1) + assert_equal(np.concatenate((x, y), 1), xmym) + assert_equal(np.concatenate((xm.mask, ym.mask), 1), xmym._mask) + + x = zeros(2) + y = array(ones(2), mask=[False, True]) + z = concatenate((x, y)) + assert_array_equal(z, [0, 0, 1, 1]) + assert_array_equal(z.mask, [False, False, False, True]) + z = concatenate((y, x)) + assert_array_equal(z, [1, 1, 0, 0]) + assert_array_equal(z.mask, [False, True, False, False]) + + def test_concatenate_flexible(self): + # Tests the concatenation on flexible arrays. + data = masked_array(list(zip(np.random.rand(10), + np.arange(10))), + dtype=[('a', float), ('b', int)]) + + test = concatenate([data[:5], data[5:]]) + assert_equal_records(test, data) + + def test_creation_ndmin(self): + # Check the use of ndmin + x = array([1, 2, 3], mask=[1, 0, 0], ndmin=2) + assert_equal(x.shape, (1, 3)) + assert_equal(x._data, [[1, 2, 3]]) + assert_equal(x._mask, [[1, 0, 0]]) + + def test_creation_ndmin_from_maskedarray(self): + # Make sure we're not losing the original mask w/ ndmin + x = array([1, 2, 3]) + x[-1] = masked + xx = array(x, ndmin=2, dtype=float) + assert_equal(x.shape, x._mask.shape) + assert_equal(xx.shape, xx._mask.shape) + + def test_creation_maskcreation(self): + # Tests how masks are initialized at the creation of Maskedarrays. + data = arange(24, dtype=float) + data[[3, 6, 15]] = masked + dma_1 = MaskedArray(data) + assert_equal(dma_1.mask, data.mask) + dma_2 = MaskedArray(dma_1) + assert_equal(dma_2.mask, dma_1.mask) + dma_3 = MaskedArray(dma_1, mask=[1, 0, 0, 0] * 6) + fail_if_equal(dma_3.mask, dma_1.mask) + + x = array([1, 2, 3], mask=True) + assert_equal(x._mask, [True, True, True]) + x = array([1, 2, 3], mask=False) + assert_equal(x._mask, [False, False, False]) + y = array([1, 2, 3], mask=x._mask, copy=False) + assert_(np.may_share_memory(x.mask, y.mask)) + y = array([1, 2, 3], mask=x._mask, copy=True) + assert_(not np.may_share_memory(x.mask, y.mask)) + + def test_creation_with_list_of_maskedarrays(self): + # Tests creating a masked array from a list of masked arrays. + x = array(np.arange(5), mask=[1, 0, 0, 0, 0]) + data = array((x, x[::-1])) + assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]]) + assert_equal(data._mask, [[1, 0, 0, 0, 0], [0, 0, 0, 0, 1]]) + + x.mask = nomask + data = array((x, x[::-1])) + assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]]) + assert_(data.mask is nomask) + + def test_creation_from_ndarray_with_padding(self): + x = np.array([('A', 0)], dtype={'names':['f0','f1'], + 'formats':['S4','i8'], + 'offsets':[0,8]}) + array(x) # used to fail due to 'V' padding field in x.dtype.descr + + def test_asarray(self): + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + xm.fill_value = -9999 + xm._hardmask = True + xmm = asarray(xm) + assert_equal(xmm._data, xm._data) + assert_equal(xmm._mask, xm._mask) + assert_equal(xmm.fill_value, xm.fill_value) + assert_equal(xmm._hardmask, xm._hardmask) + + def test_asarray_default_order(self): + # See Issue #6646 + m = np.eye(3).T + assert_(not m.flags.c_contiguous) + + new_m = asarray(m) + assert_(new_m.flags.c_contiguous) + + def test_asarray_enforce_order(self): + # See Issue #6646 + m = np.eye(3).T + assert_(not m.flags.c_contiguous) + + new_m = asarray(m, order='C') + assert_(new_m.flags.c_contiguous) + + def test_fix_invalid(self): + # Checks fix_invalid. + with np.errstate(invalid='ignore'): + data = masked_array([np.nan, 0., 1.], mask=[0, 0, 1]) + data_fixed = fix_invalid(data) + assert_equal(data_fixed._data, [data.fill_value, 0., 1.]) + assert_equal(data_fixed._mask, [1., 0., 1.]) + + def test_maskedelement(self): + # Test of masked element + x = arange(6) + x[1] = masked + assert_(str(masked) == '--') + assert_(x[1] is masked) + assert_equal(filled(x[1], 0), 0) + + def test_set_element_as_object(self): + # Tests setting elements with object + a = empty(1, dtype=object) + x = (1, 2, 3, 4, 5) + a[0] = x + assert_equal(a[0], x) + assert_(a[0] is x) + + import datetime + dt = datetime.datetime.now() + a[0] = dt + assert_(a[0] is dt) + + def test_indexing(self): + # Tests conversions and indexing + x1 = np.array([1, 2, 4, 3]) + x2 = array(x1, mask=[1, 0, 0, 0]) + x3 = array(x1, mask=[0, 1, 0, 1]) + x4 = array(x1) + # test conversion to strings + str(x2) # raises? + repr(x2) # raises? + assert_equal(np.sort(x1), sort(x2, endwith=False)) + # tests of indexing + assert_(type(x2[1]) is type(x1[1])) + assert_(x1[1] == x2[1]) + assert_(x2[0] is masked) + assert_equal(x1[2], x2[2]) + assert_equal(x1[2:5], x2[2:5]) + assert_equal(x1[:], x2[:]) + assert_equal(x1[1:], x3[1:]) + x1[2] = 9 + x2[2] = 9 + assert_equal(x1, x2) + x1[1:3] = 99 + x2[1:3] = 99 + assert_equal(x1, x2) + x2[1] = masked + assert_equal(x1, x2) + x2[1:3] = masked + assert_equal(x1, x2) + x2[:] = x1 + x2[1] = masked + assert_(allequal(getmask(x2), array([0, 1, 0, 0]))) + x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) + assert_(allequal(getmask(x3), array([0, 1, 1, 0]))) + x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) + assert_(allequal(getmask(x4), array([0, 1, 1, 0]))) + assert_(allequal(x4, array([1, 2, 3, 4]))) + x1 = np.arange(5) * 1.0 + x2 = masked_values(x1, 3.0) + assert_equal(x1, x2) + assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask)) + assert_equal(3.0, x2.fill_value) + x1 = array([1, 'hello', 2, 3], object) + x2 = np.array([1, 'hello', 2, 3], object) + s1 = x1[1] + s2 = x2[1] + assert_equal(type(s2), str) + assert_equal(type(s1), str) + assert_equal(s1, s2) + assert_(x1[1:1].shape == (0,)) + + @suppress_copy_mask_on_assignment + def test_copy(self): + # Tests of some subtle points of copying and sizing. + n = [0, 0, 1, 0, 0] + m = make_mask(n) + m2 = make_mask(m) + assert_(m is m2) + m3 = make_mask(m, copy=1) + assert_(m is not m3) + + x1 = np.arange(5) + y1 = array(x1, mask=m) + assert_equal(y1._data.__array_interface__, x1.__array_interface__) + assert_(allequal(x1, y1.data)) + assert_equal(y1._mask.__array_interface__, m.__array_interface__) + + y1a = array(y1) + # Default for masked array is not to copy; see gh-10318. + assert_(y1a._data.__array_interface__ == + y1._data.__array_interface__) + assert_(y1a._mask.__array_interface__ == + y1._mask.__array_interface__) + + y2 = array(x1, mask=m3) + assert_(y2._data.__array_interface__ == x1.__array_interface__) + assert_(y2._mask.__array_interface__ == m3.__array_interface__) + assert_(y2[2] is masked) + y2[2] = 9 + assert_(y2[2] is not masked) + assert_(y2._mask.__array_interface__ == m3.__array_interface__) + assert_(allequal(y2.mask, 0)) + + y2a = array(x1, mask=m, copy=1) + assert_(y2a._data.__array_interface__ != x1.__array_interface__) + #assert_( y2a.mask is not m) + assert_(y2a._mask.__array_interface__ != m.__array_interface__) + assert_(y2a[2] is masked) + y2a[2] = 9 + assert_(y2a[2] is not masked) + #assert_( y2a.mask is not m) + assert_(y2a._mask.__array_interface__ != m.__array_interface__) + assert_(allequal(y2a.mask, 0)) + + y3 = array(x1 * 1.0, mask=m) + assert_(filled(y3).dtype is (x1 * 1.0).dtype) + + x4 = arange(4) + x4[2] = masked + y4 = resize(x4, (8,)) + assert_equal(concatenate([x4, x4]), y4) + assert_equal(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]) + y5 = repeat(x4, (2, 2, 2, 2), axis=0) + assert_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3]) + y6 = repeat(x4, 2, axis=0) + assert_equal(y5, y6) + y7 = x4.repeat((2, 2, 2, 2), axis=0) + assert_equal(y5, y7) + y8 = x4.repeat(2, 0) + assert_equal(y5, y8) + + y9 = x4.copy() + assert_equal(y9._data, x4._data) + assert_equal(y9._mask, x4._mask) + + x = masked_array([1, 2, 3], mask=[0, 1, 0]) + # Copy is False by default + y = masked_array(x) + assert_equal(y._data.ctypes.data, x._data.ctypes.data) + assert_equal(y._mask.ctypes.data, x._mask.ctypes.data) + y = masked_array(x, copy=True) + assert_not_equal(y._data.ctypes.data, x._data.ctypes.data) + assert_not_equal(y._mask.ctypes.data, x._mask.ctypes.data) + + def test_copy_0d(self): + # gh-9430 + x = np.ma.array(43, mask=True) + xc = x.copy() + assert_equal(xc.mask, True) + + def test_copy_on_python_builtins(self): + # Tests copy works on python builtins (issue#8019) + assert_(isMaskedArray(np.ma.copy([1,2,3]))) + assert_(isMaskedArray(np.ma.copy((1,2,3)))) + + def test_copy_immutable(self): + # Tests that the copy method is immutable, GitHub issue #5247 + a = np.ma.array([1, 2, 3]) + b = np.ma.array([4, 5, 6]) + a_copy_method = a.copy + b.copy + assert_equal(a_copy_method(), [1, 2, 3]) + + def test_deepcopy(self): + from copy import deepcopy + a = array([0, 1, 2], mask=[False, True, False]) + copied = deepcopy(a) + assert_equal(copied.mask, a.mask) + assert_not_equal(id(a._mask), id(copied._mask)) + + copied[1] = 1 + assert_equal(copied.mask, [0, 0, 0]) + assert_equal(a.mask, [0, 1, 0]) + + copied = deepcopy(a) + assert_equal(copied.mask, a.mask) + copied.mask[1] = False + assert_equal(copied.mask, [0, 0, 0]) + assert_equal(a.mask, [0, 1, 0]) + + def test_str_repr(self): + a = array([0, 1, 2], mask=[False, True, False]) + assert_equal(str(a), '[0 -- 2]') + assert_equal( + repr(a), + textwrap.dedent('''\ + masked_array(data=[0, --, 2], + mask=[False, True, False], + fill_value=999999)''') + ) + + # arrays with a continuation + a = np.ma.arange(2000) + a[1:50] = np.ma.masked + assert_equal( + repr(a), + textwrap.dedent('''\ + masked_array(data=[0, --, --, ..., 1997, 1998, 1999], + mask=[False, True, True, ..., False, False, False], + fill_value=999999)''') + ) + + # line-wrapped 1d arrays are correctly aligned + a = np.ma.arange(20) + assert_equal( + repr(a), + textwrap.dedent('''\ + masked_array(data=[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, + 14, 15, 16, 17, 18, 19], + mask=False, + fill_value=999999)''') + ) + + # 2d arrays cause wrapping + a = array([[1, 2, 3], [4, 5, 6]], dtype=np.int8) + a[1,1] = np.ma.masked + assert_equal( + repr(a), + textwrap.dedent('''\ + masked_array( + data=[[1, 2, 3], + [4, --, 6]], + mask=[[False, False, False], + [False, True, False]], + fill_value=999999, + dtype=int8)''') + ) + + # but not it they're a row vector + assert_equal( + repr(a[:1]), + textwrap.dedent('''\ + masked_array(data=[[1, 2, 3]], + mask=[[False, False, False]], + fill_value=999999, + dtype=int8)''') + ) + + # dtype=int is implied, so not shown + assert_equal( + repr(a.astype(int)), + textwrap.dedent('''\ + masked_array( + data=[[1, 2, 3], + [4, --, 6]], + mask=[[False, False, False], + [False, True, False]], + fill_value=999999)''') + ) + + def test_str_repr_legacy(self): + oldopts = np.get_printoptions() + np.set_printoptions(legacy='1.13') + try: + a = array([0, 1, 2], mask=[False, True, False]) + assert_equal(str(a), '[0 -- 2]') + assert_equal(repr(a), 'masked_array(data = [0 -- 2],\n' + ' mask = [False True False],\n' + ' fill_value = 999999)\n') + + a = np.ma.arange(2000) + a[1:50] = np.ma.masked + assert_equal( + repr(a), + 'masked_array(data = [0 -- -- ..., 1997 1998 1999],\n' + ' mask = [False True True ..., False False False],\n' + ' fill_value = 999999)\n' + ) + finally: + np.set_printoptions(**oldopts) + + def test_0d_unicode(self): + u = u'caf\xe9' + utype = type(u) + + arr_nomask = np.ma.array(u) + arr_masked = np.ma.array(u, mask=True) + + assert_equal(utype(arr_nomask), u) + assert_equal(utype(arr_masked), u'--') + + def test_pickling(self): + # Tests pickling + for dtype in (int, float, str, object): + a = arange(10).astype(dtype) + a.fill_value = 999 + + masks = ([0, 0, 0, 1, 0, 1, 0, 1, 0, 1], # partially masked + True, # Fully masked + False) # Fully unmasked + + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + for mask in masks: + a.mask = mask + a_pickled = pickle.loads(pickle.dumps(a, protocol=proto)) + assert_equal(a_pickled._mask, a._mask) + assert_equal(a_pickled._data, a._data) + if dtype in (object, int): + assert_equal(a_pickled.fill_value, 999) + else: + assert_equal(a_pickled.fill_value, dtype(999)) + assert_array_equal(a_pickled.mask, mask) + + def test_pickling_subbaseclass(self): + # Test pickling w/ a subclass of ndarray + x = np.array([(1.0, 2), (3.0, 4)], + dtype=[('x', float), ('y', int)]).view(np.recarray) + a = masked_array(x, mask=[(True, False), (False, True)]) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + a_pickled = pickle.loads(pickle.dumps(a, protocol=proto)) + assert_equal(a_pickled._mask, a._mask) + assert_equal(a_pickled, a) + assert_(isinstance(a_pickled._data, np.recarray)) + + def test_pickling_maskedconstant(self): + # Test pickling MaskedConstant + mc = np.ma.masked + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + mc_pickled = pickle.loads(pickle.dumps(mc, protocol=proto)) + assert_equal(mc_pickled._baseclass, mc._baseclass) + assert_equal(mc_pickled._mask, mc._mask) + assert_equal(mc_pickled._data, mc._data) + + def test_pickling_wstructured(self): + # Tests pickling w/ structured array + a = array([(1, 1.), (2, 2.)], mask=[(0, 0), (0, 1)], + dtype=[('a', int), ('b', float)]) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + a_pickled = pickle.loads(pickle.dumps(a, protocol=proto)) + assert_equal(a_pickled._mask, a._mask) + assert_equal(a_pickled, a) + + def test_pickling_keepalignment(self): + # Tests pickling w/ F_CONTIGUOUS arrays + a = arange(10) + a.shape = (-1, 2) + b = a.T + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + test = pickle.loads(pickle.dumps(b, protocol=proto)) + assert_equal(test, b) + + def test_single_element_subscript(self): + # Tests single element subscripts of Maskedarrays. + a = array([1, 3, 2]) + b = array([1, 3, 2], mask=[1, 0, 1]) + assert_equal(a[0].shape, ()) + assert_equal(b[0].shape, ()) + assert_equal(b[1].shape, ()) + + def test_topython(self): + # Tests some communication issues with Python. + assert_equal(1, int(array(1))) + assert_equal(1.0, float(array(1))) + assert_equal(1, int(array([[[1]]]))) + assert_equal(1.0, float(array([[1]]))) + assert_raises(TypeError, float, array([1, 1])) + + with suppress_warnings() as sup: + sup.filter(UserWarning, 'Warning: converting a masked element') + assert_(np.isnan(float(array([1], mask=[1])))) + + a = array([1, 2, 3], mask=[1, 0, 0]) + assert_raises(TypeError, lambda: float(a)) + assert_equal(float(a[-1]), 3.) + assert_(np.isnan(float(a[0]))) + assert_raises(TypeError, int, a) + assert_equal(int(a[-1]), 3) + assert_raises(MAError, lambda:int(a[0])) + + def test_oddfeatures_1(self): + # Test of other odd features + x = arange(20) + x = x.reshape(4, 5) + x.flat[5] = 12 + assert_(x[1, 0] == 12) + z = x + 10j * x + assert_equal(z.real, x) + assert_equal(z.imag, 10 * x) + assert_equal((z * conjugate(z)).real, 101 * x * x) + z.imag[...] = 0.0 + + x = arange(10) + x[3] = masked + assert_(str(x[3]) == str(masked)) + c = x >= 8 + assert_(count(where(c, masked, masked)) == 0) + assert_(shape(where(c, masked, masked)) == c.shape) + + z = masked_where(c, x) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is not masked) + assert_(z[7] is not masked) + assert_(z[8] is masked) + assert_(z[9] is masked) + assert_equal(x, z) + + def test_oddfeatures_2(self): + # Tests some more features. + x = array([1., 2., 3., 4., 5.]) + c = array([1, 1, 1, 0, 0]) + x[2] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + c[0] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + assert_(z[0] is masked) + assert_(z[1] is not masked) + assert_(z[2] is masked) + + @suppress_copy_mask_on_assignment + def test_oddfeatures_3(self): + # Tests some generic features + atest = array([10], mask=True) + btest = array([20]) + idx = atest.mask + atest[idx] = btest[idx] + assert_equal(atest, [20]) + + def test_filled_with_object_dtype(self): + a = np.ma.masked_all(1, dtype='O') + assert_equal(a.filled('x')[0], 'x') + + def test_filled_with_flexible_dtype(self): + # Test filled w/ flexible dtype + flexi = array([(1, 1, 1)], + dtype=[('i', int), ('s', '|S8'), ('f', float)]) + flexi[0] = masked + assert_equal(flexi.filled(), + np.array([(default_fill_value(0), + default_fill_value('0'), + default_fill_value(0.),)], dtype=flexi.dtype)) + flexi[0] = masked + assert_equal(flexi.filled(1), + np.array([(1, '1', 1.)], dtype=flexi.dtype)) + + def test_filled_with_mvoid(self): + # Test filled w/ mvoid + ndtype = [('a', int), ('b', float)] + a = mvoid((1, 2.), mask=[(0, 1)], dtype=ndtype) + # Filled using default + test = a.filled() + assert_equal(tuple(test), (1, default_fill_value(1.))) + # Explicit fill_value + test = a.filled((-1, -1)) + assert_equal(tuple(test), (1, -1)) + # Using predefined filling values + a.fill_value = (-999, -999) + assert_equal(tuple(a.filled()), (1, -999)) + + def test_filled_with_nested_dtype(self): + # Test filled w/ nested dtype + ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])] + a = array([(1, (1, 1)), (2, (2, 2))], + mask=[(0, (1, 0)), (0, (0, 1))], dtype=ndtype) + test = a.filled(0) + control = np.array([(1, (0, 1)), (2, (2, 0))], dtype=ndtype) + assert_equal(test, control) + + test = a['B'].filled(0) + control = np.array([(0, 1), (2, 0)], dtype=a['B'].dtype) + assert_equal(test, control) + + # test if mask gets set correctly (see #6760) + Z = numpy.ma.zeros(2, numpy.dtype([("A", "(2,2)i1,(2,2)i1", (2,2))])) + assert_equal(Z.data.dtype, numpy.dtype([('A', [('f0', 'i1', (2, 2)), + ('f1', 'i1', (2, 2))], (2, 2))])) + assert_equal(Z.mask.dtype, numpy.dtype([('A', [('f0', '?', (2, 2)), + ('f1', '?', (2, 2))], (2, 2))])) + + def test_filled_with_f_order(self): + # Test filled w/ F-contiguous array + a = array(np.array([(0, 1, 2), (4, 5, 6)], order='F'), + mask=np.array([(0, 0, 1), (1, 0, 0)], order='F'), + order='F') # this is currently ignored + assert_(a.flags['F_CONTIGUOUS']) + assert_(a.filled(0).flags['F_CONTIGUOUS']) + + def test_optinfo_propagation(self): + # Checks that _optinfo dictionary isn't back-propagated + x = array([1, 2, 3, ], dtype=float) + x._optinfo['info'] = '???' + y = x.copy() + assert_equal(y._optinfo['info'], '???') + y._optinfo['info'] = '!!!' + assert_equal(x._optinfo['info'], '???') + + def test_optinfo_forward_propagation(self): + a = array([1,2,2,4]) + a._optinfo["key"] = "value" + assert_equal(a._optinfo["key"], (a == 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a != 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a > 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a >= 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a <= 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a + 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a - 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a * 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a / 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], a[:2]._optinfo["key"]) + assert_equal(a._optinfo["key"], a[[0,0,2]]._optinfo["key"]) + assert_equal(a._optinfo["key"], np.exp(a)._optinfo["key"]) + assert_equal(a._optinfo["key"], np.abs(a)._optinfo["key"]) + assert_equal(a._optinfo["key"], array(a, copy=True)._optinfo["key"]) + assert_equal(a._optinfo["key"], np.zeros_like(a)._optinfo["key"]) + + def test_fancy_printoptions(self): + # Test printing a masked array w/ fancy dtype. + fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) + test = array([(1, (2, 3.0)), (4, (5, 6.0))], + mask=[(1, (0, 1)), (0, (1, 0))], + dtype=fancydtype) + control = "[(--, (2, --)) (4, (--, 6.0))]" + assert_equal(str(test), control) + + # Test 0-d array with multi-dimensional dtype + t_2d0 = masked_array(data = (0, [[0.0, 0.0, 0.0], + [0.0, 0.0, 0.0]], + 0.0), + mask = (False, [[True, False, True], + [False, False, True]], + False), + dtype = "int, (2,3)float, float") + control = "(0, [[--, 0.0, --], [0.0, 0.0, --]], 0.0)" + assert_equal(str(t_2d0), control) + + def test_flatten_structured_array(self): + # Test flatten_structured_array on arrays + # On ndarray + ndtype = [('a', int), ('b', float)] + a = np.array([(1, 1), (2, 2)], dtype=ndtype) + test = flatten_structured_array(a) + control = np.array([[1., 1.], [2., 2.]], dtype=float) + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) + # On masked_array + a = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) + test = flatten_structured_array(a) + control = array([[1., 1.], [2., 2.]], + mask=[[0, 1], [1, 0]], dtype=float) + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) + assert_equal(test.mask, control.mask) + # On masked array with nested structure + ndtype = [('a', int), ('b', [('ba', int), ('bb', float)])] + a = array([(1, (1, 1.1)), (2, (2, 2.2))], + mask=[(0, (1, 0)), (1, (0, 1))], dtype=ndtype) + test = flatten_structured_array(a) + control = array([[1., 1., 1.1], [2., 2., 2.2]], + mask=[[0, 1, 0], [1, 0, 1]], dtype=float) + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) + assert_equal(test.mask, control.mask) + # Keeping the initial shape + ndtype = [('a', int), ('b', float)] + a = np.array([[(1, 1), ], [(2, 2), ]], dtype=ndtype) + test = flatten_structured_array(a) + control = np.array([[[1., 1.], ], [[2., 2.], ]], dtype=float) + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) + + def test_void0d(self): + # Test creating a mvoid object + ndtype = [('a', int), ('b', int)] + a = np.array([(1, 2,)], dtype=ndtype)[0] + f = mvoid(a) + assert_(isinstance(f, mvoid)) + + a = masked_array([(1, 2)], mask=[(1, 0)], dtype=ndtype)[0] + assert_(isinstance(a, mvoid)) + + a = masked_array([(1, 2), (1, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) + f = mvoid(a._data[0], a._mask[0]) + assert_(isinstance(f, mvoid)) + + def test_mvoid_getitem(self): + # Test mvoid.__getitem__ + ndtype = [('a', int), ('b', int)] + a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)], + dtype=ndtype) + # w/o mask + f = a[0] + assert_(isinstance(f, mvoid)) + assert_equal((f[0], f['a']), (1, 1)) + assert_equal(f['b'], 2) + # w/ mask + f = a[1] + assert_(isinstance(f, mvoid)) + assert_(f[0] is masked) + assert_(f['a'] is masked) + assert_equal(f[1], 4) + + # exotic dtype + A = masked_array(data=[([0,1],)], + mask=[([True, False],)], + dtype=[("A", ">i2", (2,))]) + assert_equal(A[0]["A"], A["A"][0]) + assert_equal(A[0]["A"], masked_array(data=[0, 1], + mask=[True, False], dtype=">i2")) + + def test_mvoid_iter(self): + # Test iteration on __getitem__ + ndtype = [('a', int), ('b', int)] + a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)], + dtype=ndtype) + # w/o mask + assert_equal(list(a[0]), [1, 2]) + # w/ mask + assert_equal(list(a[1]), [masked, 4]) + + def test_mvoid_print(self): + # Test printing a mvoid + mx = array([(1, 1), (2, 2)], dtype=[('a', int), ('b', int)]) + assert_equal(str(mx[0]), "(1, 1)") + mx['b'][0] = masked + ini_display = masked_print_option._display + masked_print_option.set_display("-X-") + try: + assert_equal(str(mx[0]), "(1, -X-)") + assert_equal(repr(mx[0]), "(1, -X-)") + finally: + masked_print_option.set_display(ini_display) + + # also check if there are object datatypes (see gh-7493) + mx = array([(1,), (2,)], dtype=[('a', 'O')]) + assert_equal(str(mx[0]), "(1,)") + + def test_mvoid_multidim_print(self): + + # regression test for gh-6019 + t_ma = masked_array(data = [([1, 2, 3],)], + mask = [([False, True, False],)], + fill_value = ([999999, 999999, 999999],), + dtype = [('a', ' 1: + assert_equal(np.concatenate((x, y), 1), concatenate((xm, ym), 1)) + assert_equal(np.add.reduce(x, 1), add.reduce(x, 1)) + assert_equal(np.sum(x, 1), sum(x, 1)) + assert_equal(np.product(x, 1), product(x, 1)) + + def test_binops_d2D(self): + # Test binary operations on 2D data + a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]]) + b = array([[2., 3.], [4., 5.], [6., 7.]]) + + test = a * b + control = array([[2., 3.], [2., 2.], [3., 3.]], + mask=[[0, 0], [1, 1], [1, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + test = b * a + control = array([[2., 3.], [4., 5.], [6., 7.]], + mask=[[0, 0], [1, 1], [1, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + a = array([[1.], [2.], [3.]]) + b = array([[2., 3.], [4., 5.], [6., 7.]], + mask=[[0, 0], [0, 0], [0, 1]]) + test = a * b + control = array([[2, 3], [8, 10], [18, 3]], + mask=[[0, 0], [0, 0], [0, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + test = b * a + control = array([[2, 3], [8, 10], [18, 7]], + mask=[[0, 0], [0, 0], [0, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + def test_domained_binops_d2D(self): + # Test domained binary operations on 2D data + a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]]) + b = array([[2., 3.], [4., 5.], [6., 7.]]) + + test = a / b + control = array([[1. / 2., 1. / 3.], [2., 2.], [3., 3.]], + mask=[[0, 0], [1, 1], [1, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + test = b / a + control = array([[2. / 1., 3. / 1.], [4., 5.], [6., 7.]], + mask=[[0, 0], [1, 1], [1, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + a = array([[1.], [2.], [3.]]) + b = array([[2., 3.], [4., 5.], [6., 7.]], + mask=[[0, 0], [0, 0], [0, 1]]) + test = a / b + control = array([[1. / 2, 1. / 3], [2. / 4, 2. / 5], [3. / 6, 3]], + mask=[[0, 0], [0, 0], [0, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + test = b / a + control = array([[2 / 1., 3 / 1.], [4 / 2., 5 / 2.], [6 / 3., 7]], + mask=[[0, 0], [0, 0], [0, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + def test_noshrinking(self): + # Check that we don't shrink a mask when not wanted + # Binary operations + a = masked_array([1., 2., 3.], mask=[False, False, False], + shrink=False) + b = a + 1 + assert_equal(b.mask, [0, 0, 0]) + # In place binary operation + a += 1 + assert_equal(a.mask, [0, 0, 0]) + # Domained binary operation + b = a / 1. + assert_equal(b.mask, [0, 0, 0]) + # In place binary operation + a /= 1. + assert_equal(a.mask, [0, 0, 0]) + + def test_ufunc_nomask(self): + # check the case ufuncs should set the mask to false + m = np.ma.array([1]) + # check we don't get array([False], dtype=bool) + assert_equal(np.true_divide(m, 5).mask.shape, ()) + + def test_noshink_on_creation(self): + # Check that the mask is not shrunk on array creation when not wanted + a = np.ma.masked_values([1., 2.5, 3.1], 1.5, shrink=False) + assert_equal(a.mask, [0, 0, 0]) + + def test_mod(self): + # Tests mod + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + assert_equal(mod(x, y), mod(xm, ym)) + test = mod(ym, xm) + assert_equal(test, np.mod(ym, xm)) + assert_equal(test.mask, mask_or(xm.mask, ym.mask)) + test = mod(xm, ym) + assert_equal(test, np.mod(xm, ym)) + assert_equal(test.mask, mask_or(mask_or(xm.mask, ym.mask), (ym == 0))) + + def test_TakeTransposeInnerOuter(self): + # Test of take, transpose, inner, outer products + x = arange(24) + y = np.arange(24) + x[5:6] = masked + x = x.reshape(2, 3, 4) + y = y.reshape(2, 3, 4) + assert_equal(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1))) + assert_equal(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1)) + assert_equal(np.inner(filled(x, 0), filled(y, 0)), + inner(x, y)) + assert_equal(np.outer(filled(x, 0), filled(y, 0)), + outer(x, y)) + y = array(['abc', 1, 'def', 2, 3], object) + y[2] = masked + t = take(y, [0, 3, 4]) + assert_(t[0] == 'abc') + assert_(t[1] == 2) + assert_(t[2] == 3) + + def test_imag_real(self): + # Check complex + xx = array([1 + 10j, 20 + 2j], mask=[1, 0]) + assert_equal(xx.imag, [10, 2]) + assert_equal(xx.imag.filled(), [1e+20, 2]) + assert_equal(xx.imag.dtype, xx._data.imag.dtype) + assert_equal(xx.real, [1, 20]) + assert_equal(xx.real.filled(), [1e+20, 20]) + assert_equal(xx.real.dtype, xx._data.real.dtype) + + def test_methods_with_output(self): + xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4) + xm[:, 0] = xm[0] = xm[-1, -1] = masked + + funclist = ('sum', 'prod', 'var', 'std', 'max', 'min', 'ptp', 'mean',) + + for funcname in funclist: + npfunc = getattr(np, funcname) + xmmeth = getattr(xm, funcname) + # A ndarray as explicit input + output = np.empty(4, dtype=float) + output.fill(-9999) + result = npfunc(xm, axis=0, out=output) + # ... the result should be the given output + assert_(result is output) + assert_equal(result, xmmeth(axis=0, out=output)) + + output = empty(4, dtype=int) + result = xmmeth(axis=0, out=output) + assert_(result is output) + assert_(output[0] is masked) + + def test_eq_on_structured(self): + # Test the equality of structured arrays + ndtype = [('A', int), ('B', int)] + a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype) + + test = (a == a) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [False, False]) + assert_(test.fill_value == True) + + test = (a == a[0]) + assert_equal(test.data, [True, False]) + assert_equal(test.mask, [False, False]) + assert_(test.fill_value == True) + + b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) + test = (a == b) + assert_equal(test.data, [False, True]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + test = (a[0] == b) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) + test = (a == b) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [False, False]) + assert_(test.fill_value == True) + + # complicated dtype, 2-dimensional array. + ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])] + a = array([[(1, (1, 1)), (2, (2, 2))], + [(3, (3, 3)), (4, (4, 4))]], + mask=[[(0, (1, 0)), (0, (0, 1))], + [(1, (0, 0)), (1, (1, 1))]], dtype=ndtype) + test = (a[0, 0] == a) + assert_equal(test.data, [[True, False], [False, False]]) + assert_equal(test.mask, [[False, False], [False, True]]) + assert_(test.fill_value == True) + + def test_ne_on_structured(self): + # Test the equality of structured arrays + ndtype = [('A', int), ('B', int)] + a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype) + + test = (a != a) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [False, False]) + assert_(test.fill_value == True) + + test = (a != a[0]) + assert_equal(test.data, [False, True]) + assert_equal(test.mask, [False, False]) + assert_(test.fill_value == True) + + b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) + test = (a != b) + assert_equal(test.data, [True, False]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + test = (a[0] != b) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) + test = (a != b) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [False, False]) + assert_(test.fill_value == True) + + # complicated dtype, 2-dimensional array. + ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])] + a = array([[(1, (1, 1)), (2, (2, 2))], + [(3, (3, 3)), (4, (4, 4))]], + mask=[[(0, (1, 0)), (0, (0, 1))], + [(1, (0, 0)), (1, (1, 1))]], dtype=ndtype) + test = (a[0, 0] != a) + assert_equal(test.data, [[False, True], [True, True]]) + assert_equal(test.mask, [[False, False], [False, True]]) + assert_(test.fill_value == True) + + def test_eq_ne_structured_extra(self): + # ensure simple examples are symmetric and make sense. + # from https://github.com/numpy/numpy/pull/8590#discussion_r101126465 + dt = np.dtype('i4,i4') + for m1 in (mvoid((1, 2), mask=(0, 0), dtype=dt), + mvoid((1, 2), mask=(0, 1), dtype=dt), + mvoid((1, 2), mask=(1, 0), dtype=dt), + mvoid((1, 2), mask=(1, 1), dtype=dt)): + ma1 = m1.view(MaskedArray) + r1 = ma1.view('2i4') + for m2 in (np.array((1, 1), dtype=dt), + mvoid((1, 1), dtype=dt), + mvoid((1, 0), mask=(0, 1), dtype=dt), + mvoid((3, 2), mask=(0, 1), dtype=dt)): + ma2 = m2.view(MaskedArray) + r2 = ma2.view('2i4') + eq_expected = (r1 == r2).all() + assert_equal(m1 == m2, eq_expected) + assert_equal(m2 == m1, eq_expected) + assert_equal(ma1 == m2, eq_expected) + assert_equal(m1 == ma2, eq_expected) + assert_equal(ma1 == ma2, eq_expected) + # Also check it is the same if we do it element by element. + el_by_el = [m1[name] == m2[name] for name in dt.names] + assert_equal(array(el_by_el, dtype=bool).all(), eq_expected) + ne_expected = (r1 != r2).any() + assert_equal(m1 != m2, ne_expected) + assert_equal(m2 != m1, ne_expected) + assert_equal(ma1 != m2, ne_expected) + assert_equal(m1 != ma2, ne_expected) + assert_equal(ma1 != ma2, ne_expected) + el_by_el = [m1[name] != m2[name] for name in dt.names] + assert_equal(array(el_by_el, dtype=bool).any(), ne_expected) + + @pytest.mark.parametrize('dt', ['S', 'U']) + @pytest.mark.parametrize('fill', [None, 'A']) + def test_eq_for_strings(self, dt, fill): + # Test the equality of structured arrays + a = array(['a', 'b'], dtype=dt, mask=[0, 1], fill_value=fill) + + test = (a == a) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + test = (a == a[0]) + assert_equal(test.data, [True, False]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + b = array(['a', 'b'], dtype=dt, mask=[1, 0], fill_value=fill) + test = (a == b) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, True]) + assert_(test.fill_value == True) + + # test = (a[0] == b) # doesn't work in Python2 + test = (b == a[0]) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + @pytest.mark.parametrize('dt', ['S', 'U']) + @pytest.mark.parametrize('fill', [None, 'A']) + def test_ne_for_strings(self, dt, fill): + # Test the equality of structured arrays + a = array(['a', 'b'], dtype=dt, mask=[0, 1], fill_value=fill) + + test = (a != a) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + test = (a != a[0]) + assert_equal(test.data, [False, True]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + b = array(['a', 'b'], dtype=dt, mask=[1, 0], fill_value=fill) + test = (a != b) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, True]) + assert_(test.fill_value == True) + + # test = (a[0] != b) # doesn't work in Python2 + test = (b != a[0]) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + @pytest.mark.parametrize('dt1', num_dts, ids=num_ids) + @pytest.mark.parametrize('dt2', num_dts, ids=num_ids) + @pytest.mark.parametrize('fill', [None, 1]) + def test_eq_for_numeric(self, dt1, dt2, fill): + # Test the equality of structured arrays + a = array([0, 1], dtype=dt1, mask=[0, 1], fill_value=fill) + + test = (a == a) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + test = (a == a[0]) + assert_equal(test.data, [True, False]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + b = array([0, 1], dtype=dt2, mask=[1, 0], fill_value=fill) + test = (a == b) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, True]) + assert_(test.fill_value == True) + + # test = (a[0] == b) # doesn't work in Python2 + test = (b == a[0]) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + @pytest.mark.parametrize('dt1', num_dts, ids=num_ids) + @pytest.mark.parametrize('dt2', num_dts, ids=num_ids) + @pytest.mark.parametrize('fill', [None, 1]) + def test_ne_for_numeric(self, dt1, dt2, fill): + # Test the equality of structured arrays + a = array([0, 1], dtype=dt1, mask=[0, 1], fill_value=fill) + + test = (a != a) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + test = (a != a[0]) + assert_equal(test.data, [False, True]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + b = array([0, 1], dtype=dt2, mask=[1, 0], fill_value=fill) + test = (a != b) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, True]) + assert_(test.fill_value == True) + + # test = (a[0] != b) # doesn't work in Python2 + test = (b != a[0]) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + def test_eq_with_None(self): + # Really, comparisons with None should not be done, but check them + # anyway. Note that pep8 will flag these tests. + # Deprecation is in place for arrays, and when it happens this + # test will fail (and have to be changed accordingly). + + # With partial mask + with suppress_warnings() as sup: + sup.filter(FutureWarning, "Comparison to `None`") + a = array([None, 1], mask=[0, 1]) + assert_equal(a == None, array([True, False], mask=[0, 1])) + assert_equal(a.data == None, [True, False]) + assert_equal(a != None, array([False, True], mask=[0, 1])) + # With nomask + a = array([None, 1], mask=False) + assert_equal(a == None, [True, False]) + assert_equal(a != None, [False, True]) + # With complete mask + a = array([None, 2], mask=True) + assert_equal(a == None, array([False, True], mask=True)) + assert_equal(a != None, array([True, False], mask=True)) + # Fully masked, even comparison to None should return "masked" + a = masked + assert_equal(a == None, masked) + + def test_eq_with_scalar(self): + a = array(1) + assert_equal(a == 1, True) + assert_equal(a == 0, False) + assert_equal(a != 1, False) + assert_equal(a != 0, True) + b = array(1, mask=True) + assert_equal(b == 0, masked) + assert_equal(b == 1, masked) + assert_equal(b != 0, masked) + assert_equal(b != 1, masked) + + def test_eq_different_dimensions(self): + m1 = array([1, 1], mask=[0, 1]) + # test comparison with both masked and regular arrays. + for m2 in (array([[0, 1], [1, 2]]), + np.array([[0, 1], [1, 2]])): + test = (m1 == m2) + assert_equal(test.data, [[False, False], + [True, False]]) + assert_equal(test.mask, [[False, True], + [False, True]]) + + def test_numpyarithmetics(self): + # Check that the mask is not back-propagated when using numpy functions + a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1]) + control = masked_array([np.nan, np.nan, 0, np.log(2), -1], + mask=[1, 1, 0, 0, 1]) + + test = log(a) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + assert_equal(a.mask, [0, 0, 0, 0, 1]) + + test = np.log(a) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + assert_equal(a.mask, [0, 0, 0, 0, 1]) + + +class TestMaskedArrayAttributes(object): + + def test_keepmask(self): + # Tests the keep mask flag + x = masked_array([1, 2, 3], mask=[1, 0, 0]) + mx = masked_array(x) + assert_equal(mx.mask, x.mask) + mx = masked_array(x, mask=[0, 1, 0], keep_mask=False) + assert_equal(mx.mask, [0, 1, 0]) + mx = masked_array(x, mask=[0, 1, 0], keep_mask=True) + assert_equal(mx.mask, [1, 1, 0]) + # We default to true + mx = masked_array(x, mask=[0, 1, 0]) + assert_equal(mx.mask, [1, 1, 0]) + + def test_hardmask(self): + # Test hard_mask + d = arange(5) + n = [0, 0, 0, 1, 1] + m = make_mask(n) + xh = array(d, mask=m, hard_mask=True) + # We need to copy, to avoid updating d in xh ! + xs = array(d, mask=m, hard_mask=False, copy=True) + xh[[1, 4]] = [10, 40] + xs[[1, 4]] = [10, 40] + assert_equal(xh._data, [0, 10, 2, 3, 4]) + assert_equal(xs._data, [0, 10, 2, 3, 40]) + assert_equal(xs.mask, [0, 0, 0, 1, 0]) + assert_(xh._hardmask) + assert_(not xs._hardmask) + xh[1:4] = [10, 20, 30] + xs[1:4] = [10, 20, 30] + assert_equal(xh._data, [0, 10, 20, 3, 4]) + assert_equal(xs._data, [0, 10, 20, 30, 40]) + assert_equal(xs.mask, nomask) + xh[0] = masked + xs[0] = masked + assert_equal(xh.mask, [1, 0, 0, 1, 1]) + assert_equal(xs.mask, [1, 0, 0, 0, 0]) + xh[:] = 1 + xs[:] = 1 + assert_equal(xh._data, [0, 1, 1, 3, 4]) + assert_equal(xs._data, [1, 1, 1, 1, 1]) + assert_equal(xh.mask, [1, 0, 0, 1, 1]) + assert_equal(xs.mask, nomask) + # Switch to soft mask + xh.soften_mask() + xh[:] = arange(5) + assert_equal(xh._data, [0, 1, 2, 3, 4]) + assert_equal(xh.mask, nomask) + # Switch back to hard mask + xh.harden_mask() + xh[xh < 3] = masked + assert_equal(xh._data, [0, 1, 2, 3, 4]) + assert_equal(xh._mask, [1, 1, 1, 0, 0]) + xh[filled(xh > 1, False)] = 5 + assert_equal(xh._data, [0, 1, 2, 5, 5]) + assert_equal(xh._mask, [1, 1, 1, 0, 0]) + + xh = array([[1, 2], [3, 4]], mask=[[1, 0], [0, 0]], hard_mask=True) + xh[0] = 0 + assert_equal(xh._data, [[1, 0], [3, 4]]) + assert_equal(xh._mask, [[1, 0], [0, 0]]) + xh[-1, -1] = 5 + assert_equal(xh._data, [[1, 0], [3, 5]]) + assert_equal(xh._mask, [[1, 0], [0, 0]]) + xh[filled(xh < 5, False)] = 2 + assert_equal(xh._data, [[1, 2], [2, 5]]) + assert_equal(xh._mask, [[1, 0], [0, 0]]) + + def test_hardmask_again(self): + # Another test of hardmask + d = arange(5) + n = [0, 0, 0, 1, 1] + m = make_mask(n) + xh = array(d, mask=m, hard_mask=True) + xh[4:5] = 999 + xh[0:1] = 999 + assert_equal(xh._data, [999, 1, 2, 3, 4]) + + def test_hardmask_oncemore_yay(self): + # OK, yet another test of hardmask + # Make sure that harden_mask/soften_mask//unshare_mask returns self + a = array([1, 2, 3], mask=[1, 0, 0]) + b = a.harden_mask() + assert_equal(a, b) + b[0] = 0 + assert_equal(a, b) + assert_equal(b, array([1, 2, 3], mask=[1, 0, 0])) + a = b.soften_mask() + a[0] = 0 + assert_equal(a, b) + assert_equal(b, array([0, 2, 3], mask=[0, 0, 0])) + + def test_smallmask(self): + # Checks the behaviour of _smallmask + a = arange(10) + a[1] = masked + a[1] = 1 + assert_equal(a._mask, nomask) + a = arange(10) + a._smallmask = False + a[1] = masked + a[1] = 1 + assert_equal(a._mask, zeros(10)) + + def test_shrink_mask(self): + # Tests .shrink_mask() + a = array([1, 2, 3], mask=[0, 0, 0]) + b = a.shrink_mask() + assert_equal(a, b) + assert_equal(a.mask, nomask) + + # Mask cannot be shrunk on structured types, so is a no-op + a = np.ma.array([(1, 2.0)], [('a', int), ('b', float)]) + b = a.copy() + a.shrink_mask() + assert_equal(a.mask, b.mask) + + def test_flat(self): + # Test that flat can return all types of items [#4585, #4615] + # test 2-D record array + # ... on structured array w/ masked records + x = array([[(1, 1.1, 'one'), (2, 2.2, 'two'), (3, 3.3, 'thr')], + [(4, 4.4, 'fou'), (5, 5.5, 'fiv'), (6, 6.6, 'six')]], + dtype=[('a', int), ('b', float), ('c', '|S8')]) + x['a'][0, 1] = masked + x['b'][1, 0] = masked + x['c'][0, 2] = masked + x[-1, -1] = masked + xflat = x.flat + assert_equal(xflat[0], x[0, 0]) + assert_equal(xflat[1], x[0, 1]) + assert_equal(xflat[2], x[0, 2]) + assert_equal(xflat[:3], x[0]) + assert_equal(xflat[3], x[1, 0]) + assert_equal(xflat[4], x[1, 1]) + assert_equal(xflat[5], x[1, 2]) + assert_equal(xflat[3:], x[1]) + assert_equal(xflat[-1], x[-1, -1]) + i = 0 + j = 0 + for xf in xflat: + assert_equal(xf, x[j, i]) + i += 1 + if i >= x.shape[-1]: + i = 0 + j += 1 + + def test_assign_dtype(self): + # check that the mask's dtype is updated when dtype is changed + a = np.zeros(4, dtype='f4,i4') + + m = np.ma.array(a) + m.dtype = np.dtype('f4') + repr(m) # raises? + assert_equal(m.dtype, np.dtype('f4')) + + # check that dtype changes that change shape of mask too much + # are not allowed + def assign(): + m = np.ma.array(a) + m.dtype = np.dtype('f8') + assert_raises(ValueError, assign) + + b = a.view(dtype='f4', type=np.ma.MaskedArray) # raises? + assert_equal(b.dtype, np.dtype('f4')) + + # check that nomask is preserved + a = np.zeros(4, dtype='f4') + m = np.ma.array(a) + m.dtype = np.dtype('f4,i4') + assert_equal(m.dtype, np.dtype('f4,i4')) + assert_equal(m._mask, np.ma.nomask) + + +class TestFillingValues(object): + + def test_check_on_scalar(self): + # Test _check_fill_value set to valid and invalid values + _check_fill_value = np.ma.core._check_fill_value + + fval = _check_fill_value(0, int) + assert_equal(fval, 0) + fval = _check_fill_value(None, int) + assert_equal(fval, default_fill_value(0)) + + fval = _check_fill_value(0, "|S3") + assert_equal(fval, b"0") + fval = _check_fill_value(None, "|S3") + assert_equal(fval, default_fill_value(b"camelot!")) + assert_raises(TypeError, _check_fill_value, 1e+20, int) + assert_raises(TypeError, _check_fill_value, 'stuff', int) + + def test_check_on_fields(self): + # Tests _check_fill_value with records + _check_fill_value = np.ma.core._check_fill_value + ndtype = [('a', int), ('b', float), ('c', "|S3")] + # A check on a list should return a single record + fval = _check_fill_value([-999, -12345678.9, "???"], ndtype) + assert_(isinstance(fval, ndarray)) + assert_equal(fval.item(), [-999, -12345678.9, b"???"]) + # A check on None should output the defaults + fval = _check_fill_value(None, ndtype) + assert_(isinstance(fval, ndarray)) + assert_equal(fval.item(), [default_fill_value(0), + default_fill_value(0.), + asbytes(default_fill_value("0"))]) + #.....Using a structured type as fill_value should work + fill_val = np.array((-999, -12345678.9, "???"), dtype=ndtype) + fval = _check_fill_value(fill_val, ndtype) + assert_(isinstance(fval, ndarray)) + assert_equal(fval.item(), [-999, -12345678.9, b"???"]) + + #.....Using a flexible type w/ a different type shouldn't matter + # BEHAVIOR in 1.5 and earlier, and 1.13 and later: match structured + # types by position + fill_val = np.array((-999, -12345678.9, "???"), + dtype=[("A", int), ("B", float), ("C", "|S3")]) + fval = _check_fill_value(fill_val, ndtype) + assert_(isinstance(fval, ndarray)) + assert_equal(fval.item(), [-999, -12345678.9, b"???"]) + + #.....Using an object-array shouldn't matter either + fill_val = np.ndarray(shape=(1,), dtype=object) + fill_val[0] = (-999, -12345678.9, b"???") + fval = _check_fill_value(fill_val, object) + assert_(isinstance(fval, ndarray)) + assert_equal(fval.item(), [-999, -12345678.9, b"???"]) + # NOTE: This test was never run properly as "fill_value" rather than + # "fill_val" was assigned. Written properly, it fails. + #fill_val = np.array((-999, -12345678.9, "???")) + #fval = _check_fill_value(fill_val, ndtype) + #assert_(isinstance(fval, ndarray)) + #assert_equal(fval.item(), [-999, -12345678.9, b"???"]) + #.....One-field-only flexible type should work as well + ndtype = [("a", int)] + fval = _check_fill_value(-999999999, ndtype) + assert_(isinstance(fval, ndarray)) + assert_equal(fval.item(), (-999999999,)) + + def test_fillvalue_conversion(self): + # Tests the behavior of fill_value during conversion + # We had a tailored comment to make sure special attributes are + # properly dealt with + a = array([b'3', b'4', b'5']) + a._optinfo.update({'comment':"updated!"}) + + b = array(a, dtype=int) + assert_equal(b._data, [3, 4, 5]) + assert_equal(b.fill_value, default_fill_value(0)) + + b = array(a, dtype=float) + assert_equal(b._data, [3, 4, 5]) + assert_equal(b.fill_value, default_fill_value(0.)) + + b = a.astype(int) + assert_equal(b._data, [3, 4, 5]) + assert_equal(b.fill_value, default_fill_value(0)) + assert_equal(b._optinfo['comment'], "updated!") + + b = a.astype([('a', '|S3')]) + assert_equal(b['a']._data, a._data) + assert_equal(b['a'].fill_value, a.fill_value) + + def test_default_fill_value(self): + # check all calling conventions + f1 = default_fill_value(1.) + f2 = default_fill_value(np.array(1.)) + f3 = default_fill_value(np.array(1.).dtype) + assert_equal(f1, f2) + assert_equal(f1, f3) + + def test_default_fill_value_structured(self): + fields = array([(1, 1, 1)], + dtype=[('i', int), ('s', '|S8'), ('f', float)]) + + f1 = default_fill_value(fields) + f2 = default_fill_value(fields.dtype) + expected = np.array((default_fill_value(0), + default_fill_value('0'), + default_fill_value(0.)), dtype=fields.dtype) + assert_equal(f1, expected) + assert_equal(f2, expected) + + def test_default_fill_value_void(self): + dt = np.dtype([('v', 'V7')]) + f = default_fill_value(dt) + assert_equal(f['v'], np.array(default_fill_value(dt['v']), dt['v'])) + + def test_fillvalue(self): + # Yet more fun with the fill_value + data = masked_array([1, 2, 3], fill_value=-999) + series = data[[0, 2, 1]] + assert_equal(series._fill_value, data._fill_value) + + mtype = [('f', float), ('s', '|S3')] + x = array([(1, 'a'), (2, 'b'), (pi, 'pi')], dtype=mtype) + x.fill_value = 999 + assert_equal(x.fill_value.item(), [999., b'999']) + assert_equal(x['f'].fill_value, 999) + assert_equal(x['s'].fill_value, b'999') + + x.fill_value = (9, '???') + assert_equal(x.fill_value.item(), (9, b'???')) + assert_equal(x['f'].fill_value, 9) + assert_equal(x['s'].fill_value, b'???') + + x = array([1, 2, 3.1]) + x.fill_value = 999 + assert_equal(np.asarray(x.fill_value).dtype, float) + assert_equal(x.fill_value, 999.) + assert_equal(x._fill_value, np.array(999.)) + + def test_subarray_fillvalue(self): + # gh-10483 test multi-field index fill value + fields = array([(1, 1, 1)], + dtype=[('i', int), ('s', '|S8'), ('f', float)]) + with suppress_warnings() as sup: + sup.filter(FutureWarning, "Numpy has detected") + subfields = fields[['i', 'f']] + assert_equal(tuple(subfields.fill_value), (999999, 1.e+20)) + # test comparison does not raise: + subfields[1:] == subfields[:-1] + + def test_fillvalue_exotic_dtype(self): + # Tests yet more exotic flexible dtypes + _check_fill_value = np.ma.core._check_fill_value + ndtype = [('i', int), ('s', '|S8'), ('f', float)] + control = np.array((default_fill_value(0), + default_fill_value('0'), + default_fill_value(0.),), + dtype=ndtype) + assert_equal(_check_fill_value(None, ndtype), control) + # The shape shouldn't matter + ndtype = [('f0', float, (2, 2))] + control = np.array((default_fill_value(0.),), + dtype=[('f0', float)]).astype(ndtype) + assert_equal(_check_fill_value(None, ndtype), control) + control = np.array((0,), dtype=[('f0', float)]).astype(ndtype) + assert_equal(_check_fill_value(0, ndtype), control) + + ndtype = np.dtype("int, (2,3)float, float") + control = np.array((default_fill_value(0), + default_fill_value(0.), + default_fill_value(0.),), + dtype="int, float, float").astype(ndtype) + test = _check_fill_value(None, ndtype) + assert_equal(test, control) + control = np.array((0, 0, 0), dtype="int, float, float").astype(ndtype) + assert_equal(_check_fill_value(0, ndtype), control) + # but when indexing, fill value should become scalar not tuple + # See issue #6723 + M = masked_array(control) + assert_equal(M["f1"].fill_value.ndim, 0) + + def test_fillvalue_datetime_timedelta(self): + # Test default fillvalue for datetime64 and timedelta64 types. + # See issue #4476, this would return '?' which would cause errors + # elsewhere + + for timecode in ("as", "fs", "ps", "ns", "us", "ms", "s", "m", + "h", "D", "W", "M", "Y"): + control = numpy.datetime64("NaT", timecode) + test = default_fill_value(numpy.dtype(" 0 + + # test different unary domains + sqrt(m) + log(m) + tan(m) + arcsin(m) + arccos(m) + arccosh(m) + + # test binary domains + divide(m, 2) + + # also check that allclose uses ma ufuncs, to avoid warning + allclose(m, 0.5) + +class TestMaskedArrayInPlaceArithmetics(object): + # Test MaskedArray Arithmetics + + def setup(self): + x = arange(10) + y = arange(10) + xm = arange(10) + xm[2] = masked + self.intdata = (x, y, xm) + self.floatdata = (x.astype(float), y.astype(float), xm.astype(float)) + self.othertypes = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + self.othertypes = [np.dtype(_).type for _ in self.othertypes] + self.uint8data = ( + x.astype(np.uint8), + y.astype(np.uint8), + xm.astype(np.uint8) + ) + + def test_inplace_addition_scalar(self): + # Test of inplace additions + (x, y, xm) = self.intdata + xm[2] = masked + x += 1 + assert_equal(x, y + 1) + xm += 1 + assert_equal(xm, y + 1) + + (x, _, xm) = self.floatdata + id1 = x.data.ctypes.data + x += 1. + assert_(id1 == x.data.ctypes.data) + assert_equal(x, y + 1.) + + def test_inplace_addition_array(self): + # Test of inplace additions + (x, y, xm) = self.intdata + m = xm.mask + a = arange(10, dtype=np.int16) + a[-1] = masked + x += a + xm += a + assert_equal(x, y + a) + assert_equal(xm, y + a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + def test_inplace_subtraction_scalar(self): + # Test of inplace subtractions + (x, y, xm) = self.intdata + x -= 1 + assert_equal(x, y - 1) + xm -= 1 + assert_equal(xm, y - 1) + + def test_inplace_subtraction_array(self): + # Test of inplace subtractions + (x, y, xm) = self.floatdata + m = xm.mask + a = arange(10, dtype=float) + a[-1] = masked + x -= a + xm -= a + assert_equal(x, y - a) + assert_equal(xm, y - a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + def test_inplace_multiplication_scalar(self): + # Test of inplace multiplication + (x, y, xm) = self.floatdata + x *= 2.0 + assert_equal(x, y * 2) + xm *= 2.0 + assert_equal(xm, y * 2) + + def test_inplace_multiplication_array(self): + # Test of inplace multiplication + (x, y, xm) = self.floatdata + m = xm.mask + a = arange(10, dtype=float) + a[-1] = masked + x *= a + xm *= a + assert_equal(x, y * a) + assert_equal(xm, y * a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + def test_inplace_division_scalar_int(self): + # Test of inplace division + (x, y, xm) = self.intdata + x = arange(10) * 2 + xm = arange(10) * 2 + xm[2] = masked + x //= 2 + assert_equal(x, y) + xm //= 2 + assert_equal(xm, y) + + def test_inplace_division_scalar_float(self): + # Test of inplace division + (x, y, xm) = self.floatdata + x /= 2.0 + assert_equal(x, y / 2.0) + xm /= arange(10) + assert_equal(xm, ones((10,))) + + def test_inplace_division_array_float(self): + # Test of inplace division + (x, y, xm) = self.floatdata + m = xm.mask + a = arange(10, dtype=float) + a[-1] = masked + x /= a + xm /= a + assert_equal(x, y / a) + assert_equal(xm, y / a) + assert_equal(xm.mask, mask_or(mask_or(m, a.mask), (a == 0))) + + def test_inplace_division_misc(self): + + x = [1., 1., 1., -2., pi / 2., 4., 5., -10., 10., 1., 2., 3.] + y = [5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.] + m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] + xm = masked_array(x, mask=m1) + ym = masked_array(y, mask=m2) + + z = xm / ym + assert_equal(z._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1]) + assert_equal(z._data, + [1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.]) + + xm = xm.copy() + xm /= ym + assert_equal(xm._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1]) + assert_equal(z._data, + [1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.]) + + def test_datafriendly_add(self): + # Test keeping data w/ (inplace) addition + x = array([1, 2, 3], mask=[0, 0, 1]) + # Test add w/ scalar + xx = x + 1 + assert_equal(xx.data, [2, 3, 3]) + assert_equal(xx.mask, [0, 0, 1]) + # Test iadd w/ scalar + x += 1 + assert_equal(x.data, [2, 3, 3]) + assert_equal(x.mask, [0, 0, 1]) + # Test add w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x + array([1, 2, 3], mask=[1, 0, 0]) + assert_equal(xx.data, [1, 4, 3]) + assert_equal(xx.mask, [1, 0, 1]) + # Test iadd w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + x += array([1, 2, 3], mask=[1, 0, 0]) + assert_equal(x.data, [1, 4, 3]) + assert_equal(x.mask, [1, 0, 1]) + + def test_datafriendly_sub(self): + # Test keeping data w/ (inplace) subtraction + # Test sub w/ scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x - 1 + assert_equal(xx.data, [0, 1, 3]) + assert_equal(xx.mask, [0, 0, 1]) + # Test isub w/ scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + x -= 1 + assert_equal(x.data, [0, 1, 3]) + assert_equal(x.mask, [0, 0, 1]) + # Test sub w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x - array([1, 2, 3], mask=[1, 0, 0]) + assert_equal(xx.data, [1, 0, 3]) + assert_equal(xx.mask, [1, 0, 1]) + # Test isub w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + x -= array([1, 2, 3], mask=[1, 0, 0]) + assert_equal(x.data, [1, 0, 3]) + assert_equal(x.mask, [1, 0, 1]) + + def test_datafriendly_mul(self): + # Test keeping data w/ (inplace) multiplication + # Test mul w/ scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x * 2 + assert_equal(xx.data, [2, 4, 3]) + assert_equal(xx.mask, [0, 0, 1]) + # Test imul w/ scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + x *= 2 + assert_equal(x.data, [2, 4, 3]) + assert_equal(x.mask, [0, 0, 1]) + # Test mul w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x * array([10, 20, 30], mask=[1, 0, 0]) + assert_equal(xx.data, [1, 40, 3]) + assert_equal(xx.mask, [1, 0, 1]) + # Test imul w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + x *= array([10, 20, 30], mask=[1, 0, 0]) + assert_equal(x.data, [1, 40, 3]) + assert_equal(x.mask, [1, 0, 1]) + + def test_datafriendly_div(self): + # Test keeping data w/ (inplace) division + # Test div on scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x / 2. + assert_equal(xx.data, [1 / 2., 2 / 2., 3]) + assert_equal(xx.mask, [0, 0, 1]) + # Test idiv on scalar + x = array([1., 2., 3.], mask=[0, 0, 1]) + x /= 2. + assert_equal(x.data, [1 / 2., 2 / 2., 3]) + assert_equal(x.mask, [0, 0, 1]) + # Test div on array + x = array([1., 2., 3.], mask=[0, 0, 1]) + xx = x / array([10., 20., 30.], mask=[1, 0, 0]) + assert_equal(xx.data, [1., 2. / 20., 3.]) + assert_equal(xx.mask, [1, 0, 1]) + # Test idiv on array + x = array([1., 2., 3.], mask=[0, 0, 1]) + x /= array([10., 20., 30.], mask=[1, 0, 0]) + assert_equal(x.data, [1., 2 / 20., 3.]) + assert_equal(x.mask, [1, 0, 1]) + + def test_datafriendly_pow(self): + # Test keeping data w/ (inplace) power + # Test pow on scalar + x = array([1., 2., 3.], mask=[0, 0, 1]) + xx = x ** 2.5 + assert_equal(xx.data, [1., 2. ** 2.5, 3.]) + assert_equal(xx.mask, [0, 0, 1]) + # Test ipow on scalar + x **= 2.5 + assert_equal(x.data, [1., 2. ** 2.5, 3]) + assert_equal(x.mask, [0, 0, 1]) + + def test_datafriendly_add_arrays(self): + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 0]) + a += b + assert_equal(a, [[2, 2], [4, 4]]) + if a.mask is not nomask: + assert_equal(a.mask, [[0, 0], [0, 0]]) + + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 1]) + a += b + assert_equal(a, [[2, 2], [4, 4]]) + assert_equal(a.mask, [[0, 1], [0, 1]]) + + def test_datafriendly_sub_arrays(self): + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 0]) + a -= b + assert_equal(a, [[0, 0], [2, 2]]) + if a.mask is not nomask: + assert_equal(a.mask, [[0, 0], [0, 0]]) + + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 1]) + a -= b + assert_equal(a, [[0, 0], [2, 2]]) + assert_equal(a.mask, [[0, 1], [0, 1]]) + + def test_datafriendly_mul_arrays(self): + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 0]) + a *= b + assert_equal(a, [[1, 1], [3, 3]]) + if a.mask is not nomask: + assert_equal(a.mask, [[0, 0], [0, 0]]) + + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 1]) + a *= b + assert_equal(a, [[1, 1], [3, 3]]) + assert_equal(a.mask, [[0, 1], [0, 1]]) + + def test_inplace_addition_scalar_type(self): + # Test of inplace additions + for t in self.othertypes: + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings("always") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + xm[2] = masked + x += t(1) + assert_equal(x, y + t(1)) + xm += t(1) + assert_equal(xm, y + t(1)) + + assert_equal(len(w), 0, "Failed on type=%s." % t) + + def test_inplace_addition_array_type(self): + # Test of inplace additions + for t in self.othertypes: + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings("always") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + m = xm.mask + a = arange(10, dtype=t) + a[-1] = masked + x += a + xm += a + assert_equal(x, y + a) + assert_equal(xm, y + a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + assert_equal(len(w), 0, "Failed on type=%s." % t) + + def test_inplace_subtraction_scalar_type(self): + # Test of inplace subtractions + for t in self.othertypes: + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings("always") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x -= t(1) + assert_equal(x, y - t(1)) + xm -= t(1) + assert_equal(xm, y - t(1)) + + assert_equal(len(w), 0, "Failed on type=%s." % t) + + def test_inplace_subtraction_array_type(self): + # Test of inplace subtractions + for t in self.othertypes: + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings("always") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + m = xm.mask + a = arange(10, dtype=t) + a[-1] = masked + x -= a + xm -= a + assert_equal(x, y - a) + assert_equal(xm, y - a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + assert_equal(len(w), 0, "Failed on type=%s." % t) + + def test_inplace_multiplication_scalar_type(self): + # Test of inplace multiplication + for t in self.othertypes: + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings("always") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x *= t(2) + assert_equal(x, y * t(2)) + xm *= t(2) + assert_equal(xm, y * t(2)) + + assert_equal(len(w), 0, "Failed on type=%s." % t) + + def test_inplace_multiplication_array_type(self): + # Test of inplace multiplication + for t in self.othertypes: + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings("always") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + m = xm.mask + a = arange(10, dtype=t) + a[-1] = masked + x *= a + xm *= a + assert_equal(x, y * a) + assert_equal(xm, y * a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + assert_equal(len(w), 0, "Failed on type=%s." % t) + + def test_inplace_floor_division_scalar_type(self): + # Test of inplace division + for t in self.othertypes: + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings("always") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x = arange(10, dtype=t) * t(2) + xm = arange(10, dtype=t) * t(2) + xm[2] = masked + x //= t(2) + xm //= t(2) + assert_equal(x, y) + assert_equal(xm, y) + + assert_equal(len(w), 0, "Failed on type=%s." % t) + + def test_inplace_floor_division_array_type(self): + # Test of inplace division + for t in self.othertypes: + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings("always") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + m = xm.mask + a = arange(10, dtype=t) + a[-1] = masked + x //= a + xm //= a + assert_equal(x, y // a) + assert_equal(xm, y // a) + assert_equal( + xm.mask, + mask_or(mask_or(m, a.mask), (a == t(0))) + ) + + assert_equal(len(w), 0, "Failed on type=%s." % t) + + def test_inplace_division_scalar_type(self): + # Test of inplace division + for t in self.othertypes: + with suppress_warnings() as sup: + sup.record(UserWarning) + + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x = arange(10, dtype=t) * t(2) + xm = arange(10, dtype=t) * t(2) + xm[2] = masked + + # May get a DeprecationWarning or a TypeError. + # + # This is a consequence of the fact that this is true divide + # and will require casting to float for calculation and + # casting back to the original type. This will only be raised + # with integers. Whether it is an error or warning is only + # dependent on how stringent the casting rules are. + # + # Will handle the same way. + try: + x /= t(2) + assert_equal(x, y) + except (DeprecationWarning, TypeError) as e: + warnings.warn(str(e), stacklevel=1) + try: + xm /= t(2) + assert_equal(xm, y) + except (DeprecationWarning, TypeError) as e: + warnings.warn(str(e), stacklevel=1) + + if issubclass(t, np.integer): + assert_equal(len(sup.log), 2, "Failed on type=%s." % t) + else: + assert_equal(len(sup.log), 0, "Failed on type=%s." % t) + + def test_inplace_division_array_type(self): + # Test of inplace division + for t in self.othertypes: + with suppress_warnings() as sup: + sup.record(UserWarning) + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + m = xm.mask + a = arange(10, dtype=t) + a[-1] = masked + + # May get a DeprecationWarning or a TypeError. + # + # This is a consequence of the fact that this is true divide + # and will require casting to float for calculation and + # casting back to the original type. This will only be raised + # with integers. Whether it is an error or warning is only + # dependent on how stringent the casting rules are. + # + # Will handle the same way. + try: + x /= a + assert_equal(x, y / a) + except (DeprecationWarning, TypeError) as e: + warnings.warn(str(e), stacklevel=1) + try: + xm /= a + assert_equal(xm, y / a) + assert_equal( + xm.mask, + mask_or(mask_or(m, a.mask), (a == t(0))) + ) + except (DeprecationWarning, TypeError) as e: + warnings.warn(str(e), stacklevel=1) + + if issubclass(t, np.integer): + assert_equal(len(sup.log), 2, "Failed on type=%s." % t) + else: + assert_equal(len(sup.log), 0, "Failed on type=%s." % t) + + def test_inplace_pow_type(self): + # Test keeping data w/ (inplace) power + for t in self.othertypes: + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings("always") + # Test pow on scalar + x = array([1, 2, 3], mask=[0, 0, 1], dtype=t) + xx = x ** t(2) + xx_r = array([1, 2 ** 2, 3], mask=[0, 0, 1], dtype=t) + assert_equal(xx.data, xx_r.data) + assert_equal(xx.mask, xx_r.mask) + # Test ipow on scalar + x **= t(2) + assert_equal(x.data, xx_r.data) + assert_equal(x.mask, xx_r.mask) + + assert_equal(len(w), 0, "Failed on type=%s." % t) + + +class TestMaskedArrayMethods(object): + # Test class for miscellaneous MaskedArrays methods. + def setup(self): + # Base data definition. + x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, + 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) + X = x.reshape(6, 6) + XX = x.reshape(3, 2, 2, 3) + + m = np.array([0, 1, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, + 0, 0, 1, 0, 1, 0]) + mx = array(data=x, mask=m) + mX = array(data=X, mask=m.reshape(X.shape)) + mXX = array(data=XX, mask=m.reshape(XX.shape)) + + m2 = np.array([1, 1, 0, 1, 0, 0, + 1, 1, 1, 1, 0, 1, + 0, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 1, 0, + 0, 0, 1, 0, 1, 1]) + m2x = array(data=x, mask=m2) + m2X = array(data=X, mask=m2.reshape(X.shape)) + m2XX = array(data=XX, mask=m2.reshape(XX.shape)) + self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) + + def test_generic_methods(self): + # Tests some MaskedArray methods. + a = array([1, 3, 2]) + assert_equal(a.any(), a._data.any()) + assert_equal(a.all(), a._data.all()) + assert_equal(a.argmax(), a._data.argmax()) + assert_equal(a.argmin(), a._data.argmin()) + assert_equal(a.choose(0, 1, 2, 3, 4), a._data.choose(0, 1, 2, 3, 4)) + assert_equal(a.compress([1, 0, 1]), a._data.compress([1, 0, 1])) + assert_equal(a.conj(), a._data.conj()) + assert_equal(a.conjugate(), a._data.conjugate()) + + m = array([[1, 2], [3, 4]]) + assert_equal(m.diagonal(), m._data.diagonal()) + assert_equal(a.sum(), a._data.sum()) + assert_equal(a.take([1, 2]), a._data.take([1, 2])) + assert_equal(m.transpose(), m._data.transpose()) + + def test_allclose(self): + # Tests allclose on arrays + a = np.random.rand(10) + b = a + np.random.rand(10) * 1e-8 + assert_(allclose(a, b)) + # Test allclose w/ infs + a[0] = np.inf + assert_(not allclose(a, b)) + b[0] = np.inf + assert_(allclose(a, b)) + # Test allclose w/ masked + a = masked_array(a) + a[-1] = masked + assert_(allclose(a, b, masked_equal=True)) + assert_(not allclose(a, b, masked_equal=False)) + # Test comparison w/ scalar + a *= 1e-8 + a[0] = 0 + assert_(allclose(a, 0, masked_equal=True)) + + # Test that the function works for MIN_INT integer typed arrays + a = masked_array([np.iinfo(np.int_).min], dtype=np.int_) + assert_(allclose(a, a)) + + def test_allany(self): + # Checks the any/all methods/functions. + x = np.array([[0.13, 0.26, 0.90], + [0.28, 0.33, 0.63], + [0.31, 0.87, 0.70]]) + m = np.array([[True, False, False], + [False, False, False], + [True, True, False]], dtype=np.bool_) + mx = masked_array(x, mask=m) + mxbig = (mx > 0.5) + mxsmall = (mx < 0.5) + + assert_(not mxbig.all()) + assert_(mxbig.any()) + assert_equal(mxbig.all(0), [False, False, True]) + assert_equal(mxbig.all(1), [False, False, True]) + assert_equal(mxbig.any(0), [False, False, True]) + assert_equal(mxbig.any(1), [True, True, True]) + + assert_(not mxsmall.all()) + assert_(mxsmall.any()) + assert_equal(mxsmall.all(0), [True, True, False]) + assert_equal(mxsmall.all(1), [False, False, False]) + assert_equal(mxsmall.any(0), [True, True, False]) + assert_equal(mxsmall.any(1), [True, True, False]) + + def test_allany_oddities(self): + # Some fun with all and any + store = empty((), dtype=bool) + full = array([1, 2, 3], mask=True) + + assert_(full.all() is masked) + full.all(out=store) + assert_(store) + assert_(store._mask, True) + assert_(store is not masked) + + store = empty((), dtype=bool) + assert_(full.any() is masked) + full.any(out=store) + assert_(not store) + assert_(store._mask, True) + assert_(store is not masked) + + def test_argmax_argmin(self): + # Tests argmin & argmax on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + + assert_equal(mx.argmin(), 35) + assert_equal(mX.argmin(), 35) + assert_equal(m2x.argmin(), 4) + assert_equal(m2X.argmin(), 4) + assert_equal(mx.argmax(), 28) + assert_equal(mX.argmax(), 28) + assert_equal(m2x.argmax(), 31) + assert_equal(m2X.argmax(), 31) + + assert_equal(mX.argmin(0), [2, 2, 2, 5, 0, 5]) + assert_equal(m2X.argmin(0), [2, 2, 4, 5, 0, 4]) + assert_equal(mX.argmax(0), [0, 5, 0, 5, 4, 0]) + assert_equal(m2X.argmax(0), [5, 5, 0, 5, 1, 0]) + + assert_equal(mX.argmin(1), [4, 1, 0, 0, 5, 5, ]) + assert_equal(m2X.argmin(1), [4, 4, 0, 0, 5, 3]) + assert_equal(mX.argmax(1), [2, 4, 1, 1, 4, 1]) + assert_equal(m2X.argmax(1), [2, 4, 1, 1, 1, 1]) + + def test_clip(self): + # Tests clip on MaskedArrays. + x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, + 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) + m = np.array([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0]) + mx = array(x, mask=m) + clipped = mx.clip(2, 8) + assert_equal(clipped.mask, mx.mask) + assert_equal(clipped._data, x.clip(2, 8)) + assert_equal(clipped._data, mx._data.clip(2, 8)) + + def test_compress(self): + # test compress + a = masked_array([1., 2., 3., 4., 5.], fill_value=9999) + condition = (a > 1.5) & (a < 3.5) + assert_equal(a.compress(condition), [2., 3.]) + + a[[2, 3]] = masked + b = a.compress(condition) + assert_equal(b._data, [2., 3.]) + assert_equal(b._mask, [0, 1]) + assert_equal(b.fill_value, 9999) + assert_equal(b, a[condition]) + + condition = (a < 4.) + b = a.compress(condition) + assert_equal(b._data, [1., 2., 3.]) + assert_equal(b._mask, [0, 0, 1]) + assert_equal(b.fill_value, 9999) + assert_equal(b, a[condition]) + + a = masked_array([[10, 20, 30], [40, 50, 60]], + mask=[[0, 0, 1], [1, 0, 0]]) + b = a.compress(a.ravel() >= 22) + assert_equal(b._data, [30, 40, 50, 60]) + assert_equal(b._mask, [1, 1, 0, 0]) + + x = np.array([3, 1, 2]) + b = a.compress(x >= 2, axis=1) + assert_equal(b._data, [[10, 30], [40, 60]]) + assert_equal(b._mask, [[0, 1], [1, 0]]) + + def test_compressed(self): + # Tests compressed + a = array([1, 2, 3, 4], mask=[0, 0, 0, 0]) + b = a.compressed() + assert_equal(b, a) + a[0] = masked + b = a.compressed() + assert_equal(b, [2, 3, 4]) + + def test_empty(self): + # Tests empty/like + datatype = [('a', int), ('b', float), ('c', '|S8')] + a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')], + dtype=datatype) + assert_equal(len(a.fill_value.item()), len(datatype)) + + b = empty_like(a) + assert_equal(b.shape, a.shape) + assert_equal(b.fill_value, a.fill_value) + + b = empty(len(a), dtype=datatype) + assert_equal(b.shape, a.shape) + assert_equal(b.fill_value, a.fill_value) + + # check empty_like mask handling + a = masked_array([1, 2, 3], mask=[False, True, False]) + b = empty_like(a) + assert_(not np.may_share_memory(a.mask, b.mask)) + b = a.view(masked_array) + assert_(np.may_share_memory(a.mask, b.mask)) + + @suppress_copy_mask_on_assignment + def test_put(self): + # Tests put. + d = arange(5) + n = [0, 0, 0, 1, 1] + m = make_mask(n) + x = array(d, mask=m) + assert_(x[3] is masked) + assert_(x[4] is masked) + x[[1, 4]] = [10, 40] + assert_(x[3] is masked) + assert_(x[4] is not masked) + assert_equal(x, [0, 10, 2, -1, 40]) + + x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2) + i = [0, 2, 4, 6] + x.put(i, [6, 4, 2, 0]) + assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ])) + assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]) + x.put(i, masked_array([0, 2, 4, 6], [1, 0, 1, 0])) + assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ]) + assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0]) + + x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2) + put(x, i, [6, 4, 2, 0]) + assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ])) + assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]) + put(x, i, masked_array([0, 2, 4, 6], [1, 0, 1, 0])) + assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ]) + assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0]) + + def test_put_nomask(self): + # GitHub issue 6425 + x = zeros(10) + z = array([3., -1.], mask=[False, True]) + + x.put([1, 2], z) + assert_(x[0] is not masked) + assert_equal(x[0], 0) + assert_(x[1] is not masked) + assert_equal(x[1], 3) + assert_(x[2] is masked) + assert_(x[3] is not masked) + assert_equal(x[3], 0) + + def test_put_hardmask(self): + # Tests put on hardmask + d = arange(5) + n = [0, 0, 0, 1, 1] + m = make_mask(n) + xh = array(d + 1, mask=m, hard_mask=True, copy=True) + xh.put([4, 2, 0, 1, 3], [1, 2, 3, 4, 5]) + assert_equal(xh._data, [3, 4, 2, 4, 5]) + + def test_putmask(self): + x = arange(6) + 1 + mx = array(x, mask=[0, 0, 0, 1, 1, 1]) + mask = [0, 0, 1, 0, 0, 1] + # w/o mask, w/o masked values + xx = x.copy() + putmask(xx, mask, 99) + assert_equal(xx, [1, 2, 99, 4, 5, 99]) + # w/ mask, w/o masked values + mxx = mx.copy() + putmask(mxx, mask, 99) + assert_equal(mxx._data, [1, 2, 99, 4, 5, 99]) + assert_equal(mxx._mask, [0, 0, 0, 1, 1, 0]) + # w/o mask, w/ masked values + values = array([10, 20, 30, 40, 50, 60], mask=[1, 1, 1, 0, 0, 0]) + xx = x.copy() + putmask(xx, mask, values) + assert_equal(xx._data, [1, 2, 30, 4, 5, 60]) + assert_equal(xx._mask, [0, 0, 1, 0, 0, 0]) + # w/ mask, w/ masked values + mxx = mx.copy() + putmask(mxx, mask, values) + assert_equal(mxx._data, [1, 2, 30, 4, 5, 60]) + assert_equal(mxx._mask, [0, 0, 1, 1, 1, 0]) + # w/ mask, w/ masked values + hardmask + mxx = mx.copy() + mxx.harden_mask() + putmask(mxx, mask, values) + assert_equal(mxx, [1, 2, 30, 4, 5, 60]) + + def test_ravel(self): + # Tests ravel + a = array([[1, 2, 3, 4, 5]], mask=[[0, 1, 0, 0, 0]]) + aravel = a.ravel() + assert_equal(aravel._mask.shape, aravel.shape) + a = array([0, 0], mask=[1, 1]) + aravel = a.ravel() + assert_equal(aravel._mask.shape, a.shape) + # Checks that small_mask is preserved + a = array([1, 2, 3, 4], mask=[0, 0, 0, 0], shrink=False) + assert_equal(a.ravel()._mask, [0, 0, 0, 0]) + # Test that the fill_value is preserved + a.fill_value = -99 + a.shape = (2, 2) + ar = a.ravel() + assert_equal(ar._mask, [0, 0, 0, 0]) + assert_equal(ar._data, [1, 2, 3, 4]) + assert_equal(ar.fill_value, -99) + # Test index ordering + assert_equal(a.ravel(order='C'), [1, 2, 3, 4]) + assert_equal(a.ravel(order='F'), [1, 3, 2, 4]) + + def test_reshape(self): + # Tests reshape + x = arange(4) + x[0] = masked + y = x.reshape(2, 2) + assert_equal(y.shape, (2, 2,)) + assert_equal(y._mask.shape, (2, 2,)) + assert_equal(x.shape, (4,)) + assert_equal(x._mask.shape, (4,)) + + def test_sort(self): + # Test sort + x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8) + + sortedx = sort(x) + assert_equal(sortedx._data, [1, 2, 3, 4]) + assert_equal(sortedx._mask, [0, 0, 0, 1]) + + sortedx = sort(x, endwith=False) + assert_equal(sortedx._data, [4, 1, 2, 3]) + assert_equal(sortedx._mask, [1, 0, 0, 0]) + + x.sort() + assert_equal(x._data, [1, 2, 3, 4]) + assert_equal(x._mask, [0, 0, 0, 1]) + + x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8) + x.sort(endwith=False) + assert_equal(x._data, [4, 1, 2, 3]) + assert_equal(x._mask, [1, 0, 0, 0]) + + x = [1, 4, 2, 3] + sortedx = sort(x) + assert_(not isinstance(sorted, MaskedArray)) + + x = array([0, 1, -1, -2, 2], mask=nomask, dtype=np.int8) + sortedx = sort(x, endwith=False) + assert_equal(sortedx._data, [-2, -1, 0, 1, 2]) + x = array([0, 1, -1, -2, 2], mask=[0, 1, 0, 0, 1], dtype=np.int8) + sortedx = sort(x, endwith=False) + assert_equal(sortedx._data, [1, 2, -2, -1, 0]) + assert_equal(sortedx._mask, [1, 1, 0, 0, 0]) + + def test_stable_sort(self): + x = array([1, 2, 3, 1, 2, 3], dtype=np.uint8) + expected = array([0, 3, 1, 4, 2, 5]) + computed = argsort(x, kind='stable') + assert_equal(computed, expected) + + def test_argsort_matches_sort(self): + x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8) + + for kwargs in [dict(), + dict(endwith=True), + dict(endwith=False), + dict(fill_value=2), + dict(fill_value=2, endwith=True), + dict(fill_value=2, endwith=False)]: + sortedx = sort(x, **kwargs) + argsortedx = x[argsort(x, **kwargs)] + assert_equal(sortedx._data, argsortedx._data) + assert_equal(sortedx._mask, argsortedx._mask) + + def test_sort_2d(self): + # Check sort of 2D array. + # 2D array w/o mask + a = masked_array([[8, 4, 1], [2, 0, 9]]) + a.sort(0) + assert_equal(a, [[2, 0, 1], [8, 4, 9]]) + a = masked_array([[8, 4, 1], [2, 0, 9]]) + a.sort(1) + assert_equal(a, [[1, 4, 8], [0, 2, 9]]) + # 2D array w/mask + a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]]) + a.sort(0) + assert_equal(a, [[2, 0, 1], [8, 4, 9]]) + assert_equal(a._mask, [[0, 0, 0], [1, 0, 1]]) + a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]]) + a.sort(1) + assert_equal(a, [[1, 4, 8], [0, 2, 9]]) + assert_equal(a._mask, [[0, 0, 1], [0, 0, 1]]) + # 3D + a = masked_array([[[7, 8, 9], [4, 5, 6], [1, 2, 3]], + [[1, 2, 3], [7, 8, 9], [4, 5, 6]], + [[7, 8, 9], [1, 2, 3], [4, 5, 6]], + [[4, 5, 6], [1, 2, 3], [7, 8, 9]]]) + a[a % 4 == 0] = masked + am = a.copy() + an = a.filled(99) + am.sort(0) + an.sort(0) + assert_equal(am, an) + am = a.copy() + an = a.filled(99) + am.sort(1) + an.sort(1) + assert_equal(am, an) + am = a.copy() + an = a.filled(99) + am.sort(2) + an.sort(2) + assert_equal(am, an) + + def test_sort_flexible(self): + # Test sort on structured dtype. + a = array( + data=[(3, 3), (3, 2), (2, 2), (2, 1), (1, 0), (1, 1), (1, 2)], + mask=[(0, 0), (0, 1), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0)], + dtype=[('A', int), ('B', int)]) + mask_last = array( + data=[(1, 1), (1, 2), (2, 1), (2, 2), (3, 3), (3, 2), (1, 0)], + mask=[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (1, 0)], + dtype=[('A', int), ('B', int)]) + mask_first = array( + data=[(1, 0), (1, 1), (1, 2), (2, 1), (2, 2), (3, 2), (3, 3)], + mask=[(1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (0, 0)], + dtype=[('A', int), ('B', int)]) + + test = sort(a) + assert_equal(test, mask_last) + assert_equal(test.mask, mask_last.mask) + + test = sort(a, endwith=False) + assert_equal(test, mask_first) + assert_equal(test.mask, mask_first.mask) + + # Test sort on dtype with subarray (gh-8069) + # Just check that the sort does not error, structured array subarrays + # are treated as byte strings and that leads to differing behavior + # depending on endianess and `endwith`. + dt = np.dtype([('v', int, 2)]) + a = a.view(dt) + test = sort(a) + test = sort(a, endwith=False) + + def test_argsort(self): + # Test argsort + a = array([1, 5, 2, 4, 3], mask=[1, 0, 0, 1, 0]) + assert_equal(np.argsort(a), argsort(a)) + + def test_squeeze(self): + # Check squeeze + data = masked_array([[1, 2, 3]]) + assert_equal(data.squeeze(), [1, 2, 3]) + data = masked_array([[1, 2, 3]], mask=[[1, 1, 1]]) + assert_equal(data.squeeze(), [1, 2, 3]) + assert_equal(data.squeeze()._mask, [1, 1, 1]) + + # normal ndarrays return a view + arr = np.array([[1]]) + arr_sq = arr.squeeze() + assert_equal(arr_sq, 1) + arr_sq[...] = 2 + assert_equal(arr[0,0], 2) + + # so maskedarrays should too + m_arr = masked_array([[1]], mask=True) + m_arr_sq = m_arr.squeeze() + assert_(m_arr_sq is not np.ma.masked) + assert_equal(m_arr_sq.mask, True) + m_arr_sq[...] = 2 + assert_equal(m_arr[0,0], 2) + + def test_swapaxes(self): + # Tests swapaxes on MaskedArrays. + x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, + 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) + m = np.array([0, 1, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, + 0, 0, 1, 0, 1, 0]) + mX = array(x, mask=m).reshape(6, 6) + mXX = mX.reshape(3, 2, 2, 3) + + mXswapped = mX.swapaxes(0, 1) + assert_equal(mXswapped[-1], mX[:, -1]) + + mXXswapped = mXX.swapaxes(0, 2) + assert_equal(mXXswapped.shape, (2, 2, 3, 3)) + + def test_take(self): + # Tests take + x = masked_array([10, 20, 30, 40], [0, 1, 0, 1]) + assert_equal(x.take([0, 0, 3]), masked_array([10, 10, 40], [0, 0, 1])) + assert_equal(x.take([0, 0, 3]), x[[0, 0, 3]]) + assert_equal(x.take([[0, 1], [0, 1]]), + masked_array([[10, 20], [10, 20]], [[0, 1], [0, 1]])) + + # assert_equal crashes when passed np.ma.mask + assert_(x[1] is np.ma.masked) + assert_(x.take(1) is np.ma.masked) + + x = array([[10, 20, 30], [40, 50, 60]], mask=[[0, 0, 1], [1, 0, 0, ]]) + assert_equal(x.take([0, 2], axis=1), + array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]])) + assert_equal(take(x, [0, 2], axis=1), + array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]])) + + def test_take_masked_indices(self): + # Test take w/ masked indices + a = np.array((40, 18, 37, 9, 22)) + indices = np.arange(3)[None,:] + np.arange(5)[:, None] + mindices = array(indices, mask=(indices >= len(a))) + # No mask + test = take(a, mindices, mode='clip') + ctrl = array([[40, 18, 37], + [18, 37, 9], + [37, 9, 22], + [9, 22, 22], + [22, 22, 22]]) + assert_equal(test, ctrl) + # Masked indices + test = take(a, mindices) + ctrl = array([[40, 18, 37], + [18, 37, 9], + [37, 9, 22], + [9, 22, 40], + [22, 40, 40]]) + ctrl[3, 2] = ctrl[4, 1] = ctrl[4, 2] = masked + assert_equal(test, ctrl) + assert_equal(test.mask, ctrl.mask) + # Masked input + masked indices + a = array((40, 18, 37, 9, 22), mask=(0, 1, 0, 0, 0)) + test = take(a, mindices) + ctrl[0, 1] = ctrl[1, 0] = masked + assert_equal(test, ctrl) + assert_equal(test.mask, ctrl.mask) + + def test_tolist(self): + # Tests to list + # ... on 1D + x = array(np.arange(12)) + x[[1, -2]] = masked + xlist = x.tolist() + assert_(xlist[1] is None) + assert_(xlist[-2] is None) + # ... on 2D + x.shape = (3, 4) + xlist = x.tolist() + ctrl = [[0, None, 2, 3], [4, 5, 6, 7], [8, 9, None, 11]] + assert_equal(xlist[0], [0, None, 2, 3]) + assert_equal(xlist[1], [4, 5, 6, 7]) + assert_equal(xlist[2], [8, 9, None, 11]) + assert_equal(xlist, ctrl) + # ... on structured array w/ masked records + x = array(list(zip([1, 2, 3], + [1.1, 2.2, 3.3], + ['one', 'two', 'thr'])), + dtype=[('a', int), ('b', float), ('c', '|S8')]) + x[-1] = masked + assert_equal(x.tolist(), + [(1, 1.1, b'one'), + (2, 2.2, b'two'), + (None, None, None)]) + # ... on structured array w/ masked fields + a = array([(1, 2,), (3, 4)], mask=[(0, 1), (0, 0)], + dtype=[('a', int), ('b', int)]) + test = a.tolist() + assert_equal(test, [[1, None], [3, 4]]) + # ... on mvoid + a = a[0] + test = a.tolist() + assert_equal(test, [1, None]) + + def test_tolist_specialcase(self): + # Test mvoid.tolist: make sure we return a standard Python object + a = array([(0, 1), (2, 3)], dtype=[('a', int), ('b', int)]) + # w/o mask: each entry is a np.void whose elements are standard Python + for entry in a: + for item in entry.tolist(): + assert_(not isinstance(item, np.generic)) + # w/ mask: each entry is a ma.void whose elements should be + # standard Python + a.mask[0] = (0, 1) + for entry in a: + for item in entry.tolist(): + assert_(not isinstance(item, np.generic)) + + def test_toflex(self): + # Test the conversion to records + data = arange(10) + record = data.toflex() + assert_equal(record['_data'], data._data) + assert_equal(record['_mask'], data._mask) + + data[[0, 1, 2, -1]] = masked + record = data.toflex() + assert_equal(record['_data'], data._data) + assert_equal(record['_mask'], data._mask) + + ndtype = [('i', int), ('s', '|S3'), ('f', float)] + data = array([(i, s, f) for (i, s, f) in zip(np.arange(10), + 'ABCDEFGHIJKLM', + np.random.rand(10))], + dtype=ndtype) + data[[0, 1, 2, -1]] = masked + record = data.toflex() + assert_equal(record['_data'], data._data) + assert_equal(record['_mask'], data._mask) + + ndtype = np.dtype("int, (2,3)float, float") + data = array([(i, f, ff) for (i, f, ff) in zip(np.arange(10), + np.random.rand(10), + np.random.rand(10))], + dtype=ndtype) + data[[0, 1, 2, -1]] = masked + record = data.toflex() + assert_equal_records(record['_data'], data._data) + assert_equal_records(record['_mask'], data._mask) + + def test_fromflex(self): + # Test the reconstruction of a masked_array from a record + a = array([1, 2, 3]) + test = fromflex(a.toflex()) + assert_equal(test, a) + assert_equal(test.mask, a.mask) + + a = array([1, 2, 3], mask=[0, 0, 1]) + test = fromflex(a.toflex()) + assert_equal(test, a) + assert_equal(test.mask, a.mask) + + a = array([(1, 1.), (2, 2.), (3, 3.)], mask=[(1, 0), (0, 0), (0, 1)], + dtype=[('A', int), ('B', float)]) + test = fromflex(a.toflex()) + assert_equal(test, a) + assert_equal(test.data, a.data) + + def test_arraymethod(self): + # Test a _arraymethod w/ n argument + marray = masked_array([[1, 2, 3, 4, 5]], mask=[0, 0, 1, 0, 0]) + control = masked_array([[1], [2], [3], [4], [5]], + mask=[0, 0, 1, 0, 0]) + assert_equal(marray.T, control) + assert_equal(marray.transpose(), control) + + assert_equal(MaskedArray.cumsum(marray.T, 0), control.cumsum(0)) + + def test_arraymethod_0d(self): + # gh-9430 + x = np.ma.array(42, mask=True) + assert_equal(x.T.mask, x.mask) + assert_equal(x.T.data, x.data) + + def test_transpose_view(self): + x = np.ma.array([[1, 2, 3], [4, 5, 6]]) + x[0,1] = np.ma.masked + xt = x.T + + xt[1,0] = 10 + xt[0,1] = np.ma.masked + + assert_equal(x.data, xt.T.data) + assert_equal(x.mask, xt.T.mask) + + def test_diagonal_view(self): + x = np.ma.zeros((3,3)) + x[0,0] = 10 + x[1,1] = np.ma.masked + x[2,2] = 20 + xd = x.diagonal() + x[1,1] = 15 + assert_equal(xd.mask, x.diagonal().mask) + assert_equal(xd.data, x.diagonal().data) + + +class TestMaskedArrayMathMethods(object): + + def setup(self): + # Base data definition. + x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, + 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) + X = x.reshape(6, 6) + XX = x.reshape(3, 2, 2, 3) + + m = np.array([0, 1, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, + 0, 0, 1, 0, 1, 0]) + mx = array(data=x, mask=m) + mX = array(data=X, mask=m.reshape(X.shape)) + mXX = array(data=XX, mask=m.reshape(XX.shape)) + + m2 = np.array([1, 1, 0, 1, 0, 0, + 1, 1, 1, 1, 0, 1, + 0, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 1, 0, + 0, 0, 1, 0, 1, 1]) + m2x = array(data=x, mask=m2) + m2X = array(data=X, mask=m2.reshape(X.shape)) + m2XX = array(data=XX, mask=m2.reshape(XX.shape)) + self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) + + def test_cumsumprod(self): + # Tests cumsum & cumprod on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + mXcp = mX.cumsum(0) + assert_equal(mXcp._data, mX.filled(0).cumsum(0)) + mXcp = mX.cumsum(1) + assert_equal(mXcp._data, mX.filled(0).cumsum(1)) + + mXcp = mX.cumprod(0) + assert_equal(mXcp._data, mX.filled(1).cumprod(0)) + mXcp = mX.cumprod(1) + assert_equal(mXcp._data, mX.filled(1).cumprod(1)) + + def test_cumsumprod_with_output(self): + # Tests cumsum/cumprod w/ output + xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4) + xm[:, 0] = xm[0] = xm[-1, -1] = masked + + for funcname in ('cumsum', 'cumprod'): + npfunc = getattr(np, funcname) + xmmeth = getattr(xm, funcname) + + # A ndarray as explicit input + output = np.empty((3, 4), dtype=float) + output.fill(-9999) + result = npfunc(xm, axis=0, out=output) + # ... the result should be the given output + assert_(result is output) + assert_equal(result, xmmeth(axis=0, out=output)) + + output = empty((3, 4), dtype=int) + result = xmmeth(axis=0, out=output) + assert_(result is output) + + def test_ptp(self): + # Tests ptp on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + (n, m) = X.shape + assert_equal(mx.ptp(), mx.compressed().ptp()) + rows = np.zeros(n, float) + cols = np.zeros(m, float) + for k in range(m): + cols[k] = mX[:, k].compressed().ptp() + for k in range(n): + rows[k] = mX[k].compressed().ptp() + assert_equal(mX.ptp(0), cols) + assert_equal(mX.ptp(1), rows) + + def test_add_object(self): + x = masked_array(['a', 'b'], mask=[1, 0], dtype=object) + y = x + 'x' + assert_equal(y[1], 'bx') + assert_(y.mask[0]) + + def test_sum_object(self): + # Test sum on object dtype + a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=object) + assert_equal(a.sum(), 5) + a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object) + assert_equal(a.sum(axis=0), [5, 7, 9]) + + def test_prod_object(self): + # Test prod on object dtype + a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=object) + assert_equal(a.prod(), 2 * 3) + a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object) + assert_equal(a.prod(axis=0), [4, 10, 18]) + + def test_meananom_object(self): + # Test mean/anom on object dtype + a = masked_array([1, 2, 3], dtype=object) + assert_equal(a.mean(), 2) + assert_equal(a.anom(), [-1, 0, 1]) + + def test_trace(self): + # Tests trace on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + mXdiag = mX.diagonal() + assert_equal(mX.trace(), mX.diagonal().compressed().sum()) + assert_almost_equal(mX.trace(), + X.trace() - sum(mXdiag.mask * X.diagonal(), + axis=0)) + assert_equal(np.trace(mX), mX.trace()) + + # gh-5560 + arr = np.arange(2*4*4).reshape(2,4,4) + m_arr = np.ma.masked_array(arr, False) + assert_equal(arr.trace(axis1=1, axis2=2), m_arr.trace(axis1=1, axis2=2)) + + def test_dot(self): + # Tests dot on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + fx = mx.filled(0) + r = mx.dot(mx) + assert_almost_equal(r.filled(0), fx.dot(fx)) + assert_(r.mask is nomask) + + fX = mX.filled(0) + r = mX.dot(mX) + assert_almost_equal(r.filled(0), fX.dot(fX)) + assert_(r.mask[1,3]) + r1 = empty_like(r) + mX.dot(mX, out=r1) + assert_almost_equal(r, r1) + + mYY = mXX.swapaxes(-1, -2) + fXX, fYY = mXX.filled(0), mYY.filled(0) + r = mXX.dot(mYY) + assert_almost_equal(r.filled(0), fXX.dot(fYY)) + r1 = empty_like(r) + mXX.dot(mYY, out=r1) + assert_almost_equal(r, r1) + + def test_dot_shape_mismatch(self): + # regression test + x = masked_array([[1,2],[3,4]], mask=[[0,1],[0,0]]) + y = masked_array([[1,2],[3,4]], mask=[[0,1],[0,0]]) + z = masked_array([[0,1],[3,3]]) + x.dot(y, out=z) + assert_almost_equal(z.filled(0), [[1, 0], [15, 16]]) + assert_almost_equal(z.mask, [[0, 1], [0, 0]]) + + def test_varmean_nomask(self): + # gh-5769 + foo = array([1,2,3,4], dtype='f8') + bar = array([1,2,3,4], dtype='f8') + assert_equal(type(foo.mean()), np.float64) + assert_equal(type(foo.var()), np.float64) + assert((foo.mean() == bar.mean()) is np.bool_(True)) + + # check array type is preserved and out works + foo = array(np.arange(16).reshape((4,4)), dtype='f8') + bar = empty(4, dtype='f4') + assert_equal(type(foo.mean(axis=1)), MaskedArray) + assert_equal(type(foo.var(axis=1)), MaskedArray) + assert_(foo.mean(axis=1, out=bar) is bar) + assert_(foo.var(axis=1, out=bar) is bar) + + def test_varstd(self): + # Tests var & std on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + assert_almost_equal(mX.var(axis=None), mX.compressed().var()) + assert_almost_equal(mX.std(axis=None), mX.compressed().std()) + assert_almost_equal(mX.std(axis=None, ddof=1), + mX.compressed().std(ddof=1)) + assert_almost_equal(mX.var(axis=None, ddof=1), + mX.compressed().var(ddof=1)) + assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape) + assert_equal(mX.var().shape, X.var().shape) + (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1)) + assert_almost_equal(mX.var(axis=None, ddof=2), + mX.compressed().var(ddof=2)) + assert_almost_equal(mX.std(axis=None, ddof=2), + mX.compressed().std(ddof=2)) + for k in range(6): + assert_almost_equal(mXvar1[k], mX[k].compressed().var()) + assert_almost_equal(mXvar0[k], mX[:, k].compressed().var()) + assert_almost_equal(np.sqrt(mXvar0[k]), + mX[:, k].compressed().std()) + + @pytest.mark.skipif(sys.platform=='win32' and sys.version_info < (3, 6), + reason='Fails on Python < 3.6 on Windows, gh-9671') + @suppress_copy_mask_on_assignment + def test_varstd_specialcases(self): + # Test a special case for var + nout = np.array(-1, dtype=float) + mout = array(-1, dtype=float) + + x = array(arange(10), mask=True) + for methodname in ('var', 'std'): + method = getattr(x, methodname) + assert_(method() is masked) + assert_(method(0) is masked) + assert_(method(-1) is masked) + # Using a masked array as explicit output + method(out=mout) + assert_(mout is not masked) + assert_equal(mout.mask, True) + # Using a ndarray as explicit output + method(out=nout) + assert_(np.isnan(nout)) + + x = array(arange(10), mask=True) + x[-1] = 9 + for methodname in ('var', 'std'): + method = getattr(x, methodname) + assert_(method(ddof=1) is masked) + assert_(method(0, ddof=1) is masked) + assert_(method(-1, ddof=1) is masked) + # Using a masked array as explicit output + method(out=mout, ddof=1) + assert_(mout is not masked) + assert_equal(mout.mask, True) + # Using a ndarray as explicit output + method(out=nout, ddof=1) + assert_(np.isnan(nout)) + + def test_varstd_ddof(self): + a = array([[1, 1, 0], [1, 1, 0]], mask=[[0, 0, 1], [0, 0, 1]]) + test = a.std(axis=0, ddof=0) + assert_equal(test.filled(0), [0, 0, 0]) + assert_equal(test.mask, [0, 0, 1]) + test = a.std(axis=0, ddof=1) + assert_equal(test.filled(0), [0, 0, 0]) + assert_equal(test.mask, [0, 0, 1]) + test = a.std(axis=0, ddof=2) + assert_equal(test.filled(0), [0, 0, 0]) + assert_equal(test.mask, [1, 1, 1]) + + def test_diag(self): + # Test diag + x = arange(9).reshape((3, 3)) + x[1, 1] = masked + out = np.diag(x) + assert_equal(out, [0, 4, 8]) + out = diag(x) + assert_equal(out, [0, 4, 8]) + assert_equal(out.mask, [0, 1, 0]) + out = diag(out) + control = array([[0, 0, 0], [0, 4, 0], [0, 0, 8]], + mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(out, control) + + def test_axis_methods_nomask(self): + # Test the combination nomask & methods w/ axis + a = array([[1, 2, 3], [4, 5, 6]]) + + assert_equal(a.sum(0), [5, 7, 9]) + assert_equal(a.sum(-1), [6, 15]) + assert_equal(a.sum(1), [6, 15]) + + assert_equal(a.prod(0), [4, 10, 18]) + assert_equal(a.prod(-1), [6, 120]) + assert_equal(a.prod(1), [6, 120]) + + assert_equal(a.min(0), [1, 2, 3]) + assert_equal(a.min(-1), [1, 4]) + assert_equal(a.min(1), [1, 4]) + + assert_equal(a.max(0), [4, 5, 6]) + assert_equal(a.max(-1), [3, 6]) + assert_equal(a.max(1), [3, 6]) + + +class TestMaskedArrayMathMethodsComplex(object): + # Test class for miscellaneous MaskedArrays methods. + def setup(self): + # Base data definition. + x = np.array([8.375j, 7.545j, 8.828j, 8.5j, 1.757j, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479j, + 7.189j, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993j]) + X = x.reshape(6, 6) + XX = x.reshape(3, 2, 2, 3) + + m = np.array([0, 1, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, + 0, 0, 1, 0, 1, 0]) + mx = array(data=x, mask=m) + mX = array(data=X, mask=m.reshape(X.shape)) + mXX = array(data=XX, mask=m.reshape(XX.shape)) + + m2 = np.array([1, 1, 0, 1, 0, 0, + 1, 1, 1, 1, 0, 1, + 0, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 1, 0, + 0, 0, 1, 0, 1, 1]) + m2x = array(data=x, mask=m2) + m2X = array(data=X, mask=m2.reshape(X.shape)) + m2XX = array(data=XX, mask=m2.reshape(XX.shape)) + self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) + + def test_varstd(self): + # Tests var & std on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + assert_almost_equal(mX.var(axis=None), mX.compressed().var()) + assert_almost_equal(mX.std(axis=None), mX.compressed().std()) + assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape) + assert_equal(mX.var().shape, X.var().shape) + (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1)) + assert_almost_equal(mX.var(axis=None, ddof=2), + mX.compressed().var(ddof=2)) + assert_almost_equal(mX.std(axis=None, ddof=2), + mX.compressed().std(ddof=2)) + for k in range(6): + assert_almost_equal(mXvar1[k], mX[k].compressed().var()) + assert_almost_equal(mXvar0[k], mX[:, k].compressed().var()) + assert_almost_equal(np.sqrt(mXvar0[k]), + mX[:, k].compressed().std()) + + +class TestMaskedArrayFunctions(object): + # Test class for miscellaneous functions. + + def setup(self): + x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) + y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) + m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] + xm = masked_array(x, mask=m1) + ym = masked_array(y, mask=m2) + xm.set_fill_value(1e+20) + self.info = (xm, ym) + + def test_masked_where_bool(self): + x = [1, 2] + y = masked_where(False, x) + assert_equal(y, [1, 2]) + assert_equal(y[1], 2) + + def test_masked_equal_wlist(self): + x = [1, 2, 3] + mx = masked_equal(x, 3) + assert_equal(mx, x) + assert_equal(mx._mask, [0, 0, 1]) + mx = masked_not_equal(x, 3) + assert_equal(mx, x) + assert_equal(mx._mask, [1, 1, 0]) + + def test_masked_equal_fill_value(self): + x = [1, 2, 3] + mx = masked_equal(x, 3) + assert_equal(mx._mask, [0, 0, 1]) + assert_equal(mx.fill_value, 3) + + def test_masked_where_condition(self): + # Tests masking functions. + x = array([1., 2., 3., 4., 5.]) + x[2] = masked + assert_equal(masked_where(greater(x, 2), x), masked_greater(x, 2)) + assert_equal(masked_where(greater_equal(x, 2), x), + masked_greater_equal(x, 2)) + assert_equal(masked_where(less(x, 2), x), masked_less(x, 2)) + assert_equal(masked_where(less_equal(x, 2), x), + masked_less_equal(x, 2)) + assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)) + assert_equal(masked_where(equal(x, 2), x), masked_equal(x, 2)) + assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)) + assert_equal(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]), + [99, 99, 3, 4, 5]) + + def test_masked_where_oddities(self): + # Tests some generic features. + atest = ones((10, 10, 10), dtype=float) + btest = zeros(atest.shape, MaskType) + ctest = masked_where(btest, atest) + assert_equal(atest, ctest) + + def test_masked_where_shape_constraint(self): + a = arange(10) + with assert_raises(IndexError): + masked_equal(1, a) + test = masked_equal(a, 1) + assert_equal(test.mask, [0, 1, 0, 0, 0, 0, 0, 0, 0, 0]) + + def test_masked_where_structured(self): + # test that masked_where on a structured array sets a structured + # mask (see issue #2972) + a = np.zeros(10, dtype=[("A", " 6, x) + + def test_masked_otherfunctions(self): + assert_equal(masked_inside(list(range(5)), 1, 3), + [0, 199, 199, 199, 4]) + assert_equal(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199]) + assert_equal(masked_inside(array(list(range(5)), + mask=[1, 0, 0, 0, 0]), 1, 3).mask, + [1, 1, 1, 1, 0]) + assert_equal(masked_outside(array(list(range(5)), + mask=[0, 1, 0, 0, 0]), 1, 3).mask, + [1, 1, 0, 0, 1]) + assert_equal(masked_equal(array(list(range(5)), + mask=[1, 0, 0, 0, 0]), 2).mask, + [1, 0, 1, 0, 0]) + assert_equal(masked_not_equal(array([2, 2, 1, 2, 1], + mask=[1, 0, 0, 0, 0]), 2).mask, + [1, 0, 1, 0, 1]) + + def test_round(self): + a = array([1.23456, 2.34567, 3.45678, 4.56789, 5.67890], + mask=[0, 1, 0, 0, 0]) + assert_equal(a.round(), [1., 2., 3., 5., 6.]) + assert_equal(a.round(1), [1.2, 2.3, 3.5, 4.6, 5.7]) + assert_equal(a.round(3), [1.235, 2.346, 3.457, 4.568, 5.679]) + b = empty_like(a) + a.round(out=b) + assert_equal(b, [1., 2., 3., 5., 6.]) + + x = array([1., 2., 3., 4., 5.]) + c = array([1, 1, 1, 0, 0]) + x[2] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + c[0] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + assert_(z[0] is masked) + assert_(z[1] is not masked) + assert_(z[2] is masked) + + def test_round_with_output(self): + # Testing round with an explicit output + + xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4) + xm[:, 0] = xm[0] = xm[-1, -1] = masked + + # A ndarray as explicit input + output = np.empty((3, 4), dtype=float) + output.fill(-9999) + result = np.round(xm, decimals=2, out=output) + # ... the result should be the given output + assert_(result is output) + assert_equal(result, xm.round(decimals=2, out=output)) + + output = empty((3, 4), dtype=float) + result = xm.round(decimals=2, out=output) + assert_(result is output) + + def test_round_with_scalar(self): + # Testing round with scalar/zero dimension input + # GH issue 2244 + a = array(1.1, mask=[False]) + assert_equal(a.round(), 1) + + a = array(1.1, mask=[True]) + assert_(a.round() is masked) + + a = array(1.1, mask=[False]) + output = np.empty(1, dtype=float) + output.fill(-9999) + a.round(out=output) + assert_equal(output, 1) + + a = array(1.1, mask=[False]) + output = array(-9999., mask=[True]) + a.round(out=output) + assert_equal(output[()], 1) + + a = array(1.1, mask=[True]) + output = array(-9999., mask=[False]) + a.round(out=output) + assert_(output[()] is masked) + + def test_identity(self): + a = identity(5) + assert_(isinstance(a, MaskedArray)) + assert_equal(a, np.identity(5)) + + def test_power(self): + x = -1.1 + assert_almost_equal(power(x, 2.), 1.21) + assert_(power(x, masked) is masked) + x = array([-1.1, -1.1, 1.1, 1.1, 0.]) + b = array([0.5, 2., 0.5, 2., -1.], mask=[0, 0, 0, 0, 1]) + y = power(x, b) + assert_almost_equal(y, [0, 1.21, 1.04880884817, 1.21, 0.]) + assert_equal(y._mask, [1, 0, 0, 0, 1]) + b.mask = nomask + y = power(x, b) + assert_equal(y._mask, [1, 0, 0, 0, 1]) + z = x ** b + assert_equal(z._mask, y._mask) + assert_almost_equal(z, y) + assert_almost_equal(z._data, y._data) + x **= b + assert_equal(x._mask, y._mask) + assert_almost_equal(x, y) + assert_almost_equal(x._data, y._data) + + def test_power_with_broadcasting(self): + # Test power w/ broadcasting + a2 = np.array([[1., 2., 3.], [4., 5., 6.]]) + a2m = array(a2, mask=[[1, 0, 0], [0, 0, 1]]) + b1 = np.array([2, 4, 3]) + b2 = np.array([b1, b1]) + b2m = array(b2, mask=[[0, 1, 0], [0, 1, 0]]) + + ctrl = array([[1 ** 2, 2 ** 4, 3 ** 3], [4 ** 2, 5 ** 4, 6 ** 3]], + mask=[[1, 1, 0], [0, 1, 1]]) + # No broadcasting, base & exp w/ mask + test = a2m ** b2m + assert_equal(test, ctrl) + assert_equal(test.mask, ctrl.mask) + # No broadcasting, base w/ mask, exp w/o mask + test = a2m ** b2 + assert_equal(test, ctrl) + assert_equal(test.mask, a2m.mask) + # No broadcasting, base w/o mask, exp w/ mask + test = a2 ** b2m + assert_equal(test, ctrl) + assert_equal(test.mask, b2m.mask) + + ctrl = array([[2 ** 2, 4 ** 4, 3 ** 3], [2 ** 2, 4 ** 4, 3 ** 3]], + mask=[[0, 1, 0], [0, 1, 0]]) + test = b1 ** b2m + assert_equal(test, ctrl) + assert_equal(test.mask, ctrl.mask) + test = b2m ** b1 + assert_equal(test, ctrl) + assert_equal(test.mask, ctrl.mask) + + def test_where(self): + # Test the where function + x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) + y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) + m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] + xm = masked_array(x, mask=m1) + ym = masked_array(y, mask=m2) + xm.set_fill_value(1e+20) + + d = where(xm > 2, xm, -9) + assert_equal(d, [-9., -9., -9., -9., -9., 4., + -9., -9., 10., -9., -9., 3.]) + assert_equal(d._mask, xm._mask) + d = where(xm > 2, -9, ym) + assert_equal(d, [5., 0., 3., 2., -1., -9., + -9., -10., -9., 1., 0., -9.]) + assert_equal(d._mask, [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0]) + d = where(xm > 2, xm, masked) + assert_equal(d, [-9., -9., -9., -9., -9., 4., + -9., -9., 10., -9., -9., 3.]) + tmp = xm._mask.copy() + tmp[(xm <= 2).filled(True)] = True + assert_equal(d._mask, tmp) + + ixm = xm.astype(int) + d = where(ixm > 2, ixm, masked) + assert_equal(d, [-9, -9, -9, -9, -9, 4, -9, -9, 10, -9, -9, 3]) + assert_equal(d.dtype, ixm.dtype) + + def test_where_object(self): + a = np.array(None) + b = masked_array(None) + r = b.copy() + assert_equal(np.ma.where(True, a, a), r) + assert_equal(np.ma.where(True, b, b), r) + + def test_where_with_masked_choice(self): + x = arange(10) + x[3] = masked + c = x >= 8 + # Set False to masked + z = where(c, x, masked) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is masked) + assert_(z[7] is masked) + assert_(z[8] is not masked) + assert_(z[9] is not masked) + assert_equal(x, z) + # Set True to masked + z = where(c, masked, x) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is not masked) + assert_(z[7] is not masked) + assert_(z[8] is masked) + assert_(z[9] is masked) + + def test_where_with_masked_condition(self): + x = array([1., 2., 3., 4., 5.]) + c = array([1, 1, 1, 0, 0]) + x[2] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + c[0] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + assert_(z[0] is masked) + assert_(z[1] is not masked) + assert_(z[2] is masked) + + x = arange(1, 6) + x[-1] = masked + y = arange(1, 6) * 10 + y[2] = masked + c = array([1, 1, 1, 0, 0], mask=[1, 0, 0, 0, 0]) + cm = c.filled(1) + z = where(c, x, y) + zm = where(cm, x, y) + assert_equal(z, zm) + assert_(getmask(zm) is nomask) + assert_equal(zm, [1, 2, 3, 40, 50]) + z = where(c, masked, 1) + assert_equal(z, [99, 99, 99, 1, 1]) + z = where(c, 1, masked) + assert_equal(z, [99, 1, 1, 99, 99]) + + def test_where_type(self): + # Test the type conservation with where + x = np.arange(4, dtype=np.int32) + y = np.arange(4, dtype=np.float32) * 2.2 + test = where(x > 1.5, y, x).dtype + control = np.find_common_type([np.int32, np.float32], []) + assert_equal(test, control) + + def test_where_broadcast(self): + # Issue 8599 + x = np.arange(9).reshape(3, 3) + y = np.zeros(3) + core = np.where([1, 0, 1], x, y) + ma = where([1, 0, 1], x, y) + + assert_equal(core, ma) + assert_equal(core.dtype, ma.dtype) + + def test_where_structured(self): + # Issue 8600 + dt = np.dtype([('a', int), ('b', int)]) + x = np.array([(1, 2), (3, 4), (5, 6)], dtype=dt) + y = np.array((10, 20), dtype=dt) + core = np.where([0, 1, 1], x, y) + ma = np.where([0, 1, 1], x, y) + + assert_equal(core, ma) + assert_equal(core.dtype, ma.dtype) + + def test_where_structured_masked(self): + dt = np.dtype([('a', int), ('b', int)]) + x = np.array([(1, 2), (3, 4), (5, 6)], dtype=dt) + + ma = where([0, 1, 1], x, masked) + expected = masked_where([1, 0, 0], x) + + assert_equal(ma.dtype, expected.dtype) + assert_equal(ma, expected) + assert_equal(ma.mask, expected.mask) + + def test_choose(self): + # Test choose + choices = [[0, 1, 2, 3], [10, 11, 12, 13], + [20, 21, 22, 23], [30, 31, 32, 33]] + chosen = choose([2, 3, 1, 0], choices) + assert_equal(chosen, array([20, 31, 12, 3])) + chosen = choose([2, 4, 1, 0], choices, mode='clip') + assert_equal(chosen, array([20, 31, 12, 3])) + chosen = choose([2, 4, 1, 0], choices, mode='wrap') + assert_equal(chosen, array([20, 1, 12, 3])) + # Check with some masked indices + indices_ = array([2, 4, 1, 0], mask=[1, 0, 0, 1]) + chosen = choose(indices_, choices, mode='wrap') + assert_equal(chosen, array([99, 1, 12, 99])) + assert_equal(chosen.mask, [1, 0, 0, 1]) + # Check with some masked choices + choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1], + [1, 0, 0, 0], [0, 0, 0, 0]]) + indices_ = [2, 3, 1, 0] + chosen = choose(indices_, choices, mode='wrap') + assert_equal(chosen, array([20, 31, 12, 3])) + assert_equal(chosen.mask, [1, 0, 0, 1]) + + def test_choose_with_out(self): + # Test choose with an explicit out keyword + choices = [[0, 1, 2, 3], [10, 11, 12, 13], + [20, 21, 22, 23], [30, 31, 32, 33]] + store = empty(4, dtype=int) + chosen = choose([2, 3, 1, 0], choices, out=store) + assert_equal(store, array([20, 31, 12, 3])) + assert_(store is chosen) + # Check with some masked indices + out + store = empty(4, dtype=int) + indices_ = array([2, 3, 1, 0], mask=[1, 0, 0, 1]) + chosen = choose(indices_, choices, mode='wrap', out=store) + assert_equal(store, array([99, 31, 12, 99])) + assert_equal(store.mask, [1, 0, 0, 1]) + # Check with some masked choices + out ina ndarray ! + choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1], + [1, 0, 0, 0], [0, 0, 0, 0]]) + indices_ = [2, 3, 1, 0] + store = empty(4, dtype=int).view(ndarray) + chosen = choose(indices_, choices, mode='wrap', out=store) + assert_equal(store, array([999999, 31, 12, 999999])) + + def test_reshape(self): + a = arange(10) + a[0] = masked + # Try the default + b = a.reshape((5, 2)) + assert_equal(b.shape, (5, 2)) + assert_(b.flags['C']) + # Try w/ arguments as list instead of tuple + b = a.reshape(5, 2) + assert_equal(b.shape, (5, 2)) + assert_(b.flags['C']) + # Try w/ order + b = a.reshape((5, 2), order='F') + assert_equal(b.shape, (5, 2)) + assert_(b.flags['F']) + # Try w/ order + b = a.reshape(5, 2, order='F') + assert_equal(b.shape, (5, 2)) + assert_(b.flags['F']) + + c = np.reshape(a, (2, 5)) + assert_(isinstance(c, MaskedArray)) + assert_equal(c.shape, (2, 5)) + assert_(c[0, 0] is masked) + assert_(c.flags['C']) + + def test_make_mask_descr(self): + # Flexible + ntype = [('a', float), ('b', float)] + test = make_mask_descr(ntype) + assert_equal(test, [('a', bool), ('b', bool)]) + assert_(test is make_mask_descr(test)) + + # Standard w/ shape + ntype = (float, 2) + test = make_mask_descr(ntype) + assert_equal(test, (bool, 2)) + assert_(test is make_mask_descr(test)) + + # Standard standard + ntype = float + test = make_mask_descr(ntype) + assert_equal(test, np.dtype(bool)) + assert_(test is make_mask_descr(test)) + + # Nested + ntype = [('a', float), ('b', [('ba', float), ('bb', float)])] + test = make_mask_descr(ntype) + control = np.dtype([('a', 'b1'), ('b', [('ba', 'b1'), ('bb', 'b1')])]) + assert_equal(test, control) + assert_(test is make_mask_descr(test)) + + # Named+ shape + ntype = [('a', (float, 2))] + test = make_mask_descr(ntype) + assert_equal(test, np.dtype([('a', (bool, 2))])) + assert_(test is make_mask_descr(test)) + + # 2 names + ntype = [(('A', 'a'), float)] + test = make_mask_descr(ntype) + assert_equal(test, np.dtype([(('A', 'a'), bool)])) + assert_(test is make_mask_descr(test)) + + # nested boolean types should preserve identity + base_type = np.dtype([('a', int, 3)]) + base_mtype = make_mask_descr(base_type) + sub_type = np.dtype([('a', int), ('b', base_mtype)]) + test = make_mask_descr(sub_type) + assert_equal(test, np.dtype([('a', bool), ('b', [('a', bool, 3)])])) + assert_(test.fields['b'][0] is base_mtype) + + def test_make_mask(self): + # Test make_mask + # w/ a list as an input + mask = [0, 1] + test = make_mask(mask) + assert_equal(test.dtype, MaskType) + assert_equal(test, [0, 1]) + # w/ a ndarray as an input + mask = np.array([0, 1], dtype=bool) + test = make_mask(mask) + assert_equal(test.dtype, MaskType) + assert_equal(test, [0, 1]) + # w/ a flexible-type ndarray as an input - use default + mdtype = [('a', bool), ('b', bool)] + mask = np.array([(0, 0), (0, 1)], dtype=mdtype) + test = make_mask(mask) + assert_equal(test.dtype, MaskType) + assert_equal(test, [1, 1]) + # w/ a flexible-type ndarray as an input - use input dtype + mdtype = [('a', bool), ('b', bool)] + mask = np.array([(0, 0), (0, 1)], dtype=mdtype) + test = make_mask(mask, dtype=mask.dtype) + assert_equal(test.dtype, mdtype) + assert_equal(test, mask) + # w/ a flexible-type ndarray as an input - use input dtype + mdtype = [('a', float), ('b', float)] + bdtype = [('a', bool), ('b', bool)] + mask = np.array([(0, 0), (0, 1)], dtype=mdtype) + test = make_mask(mask, dtype=mask.dtype) + assert_equal(test.dtype, bdtype) + assert_equal(test, np.array([(0, 0), (0, 1)], dtype=bdtype)) + # Ensure this also works for void + mask = np.array((False, True), dtype='?,?')[()] + assert_(isinstance(mask, np.void)) + test = make_mask(mask, dtype=mask.dtype) + assert_equal(test, mask) + assert_(test is not mask) + mask = np.array((0, 1), dtype='i4,i4')[()] + test2 = make_mask(mask, dtype=mask.dtype) + assert_equal(test2, test) + # test that nomask is returned when m is nomask. + bools = [True, False] + dtypes = [MaskType, float] + msgformat = 'copy=%s, shrink=%s, dtype=%s' + for cpy, shr, dt in itertools.product(bools, bools, dtypes): + res = make_mask(nomask, copy=cpy, shrink=shr, dtype=dt) + assert_(res is nomask, msgformat % (cpy, shr, dt)) + + def test_mask_or(self): + # Initialize + mtype = [('a', bool), ('b', bool)] + mask = np.array([(0, 0), (0, 1), (1, 0), (0, 0)], dtype=mtype) + # Test using nomask as input + test = mask_or(mask, nomask) + assert_equal(test, mask) + test = mask_or(nomask, mask) + assert_equal(test, mask) + # Using False as input + test = mask_or(mask, False) + assert_equal(test, mask) + # Using another array w / the same dtype + other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=mtype) + test = mask_or(mask, other) + control = np.array([(0, 1), (0, 1), (1, 1), (0, 1)], dtype=mtype) + assert_equal(test, control) + # Using another array w / a different dtype + othertype = [('A', bool), ('B', bool)] + other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=othertype) + try: + test = mask_or(mask, other) + except ValueError: + pass + # Using nested arrays + dtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])] + amask = np.array([(0, (1, 0)), (0, (1, 0))], dtype=dtype) + bmask = np.array([(1, (0, 1)), (0, (0, 0))], dtype=dtype) + cntrl = np.array([(1, (1, 1)), (0, (1, 0))], dtype=dtype) + assert_equal(mask_or(amask, bmask), cntrl) + + def test_flatten_mask(self): + # Tests flatten mask + # Standard dtype + mask = np.array([0, 0, 1], dtype=bool) + assert_equal(flatten_mask(mask), mask) + # Flexible dtype + mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)]) + test = flatten_mask(mask) + control = np.array([0, 0, 0, 1], dtype=bool) + assert_equal(test, control) + + mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])] + data = [(0, (0, 0)), (0, (0, 1))] + mask = np.array(data, dtype=mdtype) + test = flatten_mask(mask) + control = np.array([0, 0, 0, 0, 0, 1], dtype=bool) + assert_equal(test, control) + + def test_on_ndarray(self): + # Test functions on ndarrays + a = np.array([1, 2, 3, 4]) + m = array(a, mask=False) + test = anom(a) + assert_equal(test, m.anom()) + test = reshape(a, (2, 2)) + assert_equal(test, m.reshape(2, 2)) + + def test_compress(self): + # Test compress function on ndarray and masked array + # Address Github #2495. + arr = np.arange(8) + arr.shape = 4, 2 + cond = np.array([True, False, True, True]) + control = arr[[0, 2, 3]] + test = np.ma.compress(cond, arr, axis=0) + assert_equal(test, control) + marr = np.ma.array(arr) + test = np.ma.compress(cond, marr, axis=0) + assert_equal(test, control) + + def test_compressed(self): + # Test ma.compressed function. + # Address gh-4026 + a = np.ma.array([1, 2]) + test = np.ma.compressed(a) + assert_(type(test) is np.ndarray) + + # Test case when input data is ndarray subclass + class A(np.ndarray): + pass + + a = np.ma.array(A(shape=0)) + test = np.ma.compressed(a) + assert_(type(test) is A) + + # Test that compress flattens + test = np.ma.compressed([[1],[2]]) + assert_equal(test.ndim, 1) + test = np.ma.compressed([[[[[1]]]]]) + assert_equal(test.ndim, 1) + + # Test case when input is MaskedArray subclass + class M(MaskedArray): + pass + + test = np.ma.compressed(M(shape=(0,1,2))) + assert_equal(test.ndim, 1) + + # with .compressed() overridden + class M(MaskedArray): + def compressed(self): + return 42 + + test = np.ma.compressed(M(shape=(0,1,2))) + assert_equal(test, 42) + + def test_convolve(self): + a = masked_equal(np.arange(5), 2) + b = np.array([1, 1]) + test = np.ma.convolve(a, b) + assert_equal(test, masked_equal([0, 1, -1, -1, 7, 4], -1)) + + test = np.ma.convolve(a, b, propagate_mask=False) + assert_equal(test, masked_equal([0, 1, 1, 3, 7, 4], -1)) + + test = np.ma.convolve([1, 1], [1, 1, 1]) + assert_equal(test, masked_equal([1, 2, 2, 1], -1)) + + a = [1, 1] + b = masked_equal([1, -1, -1, 1], -1) + test = np.ma.convolve(a, b, propagate_mask=False) + assert_equal(test, masked_equal([1, 1, -1, 1, 1], -1)) + test = np.ma.convolve(a, b, propagate_mask=True) + assert_equal(test, masked_equal([-1, -1, -1, -1, -1], -1)) + + +class TestMaskedFields(object): + + def setup(self): + ilist = [1, 2, 3, 4, 5] + flist = [1.1, 2.2, 3.3, 4.4, 5.5] + slist = ['one', 'two', 'three', 'four', 'five'] + ddtype = [('a', int), ('b', float), ('c', '|S8')] + mdtype = [('a', bool), ('b', bool), ('c', bool)] + mask = [0, 1, 0, 0, 1] + base = array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype) + self.data = dict(base=base, mask=mask, ddtype=ddtype, mdtype=mdtype) + + def test_set_records_masks(self): + base = self.data['base'] + mdtype = self.data['mdtype'] + # Set w/ nomask or masked + base.mask = nomask + assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype)) + base.mask = masked + assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype)) + # Set w/ simple boolean + base.mask = False + assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype)) + base.mask = True + assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype)) + # Set w/ list + base.mask = [0, 0, 0, 1, 1] + assert_equal_records(base._mask, + np.array([(x, x, x) for x in [0, 0, 0, 1, 1]], + dtype=mdtype)) + + def test_set_record_element(self): + # Check setting an element of a record) + base = self.data['base'] + (base_a, base_b, base_c) = (base['a'], base['b'], base['c']) + base[0] = (pi, pi, 'pi') + + assert_equal(base_a.dtype, int) + assert_equal(base_a._data, [3, 2, 3, 4, 5]) + + assert_equal(base_b.dtype, float) + assert_equal(base_b._data, [pi, 2.2, 3.3, 4.4, 5.5]) + + assert_equal(base_c.dtype, '|S8') + assert_equal(base_c._data, + [b'pi', b'two', b'three', b'four', b'five']) + + def test_set_record_slice(self): + base = self.data['base'] + (base_a, base_b, base_c) = (base['a'], base['b'], base['c']) + base[:3] = (pi, pi, 'pi') + + assert_equal(base_a.dtype, int) + assert_equal(base_a._data, [3, 3, 3, 4, 5]) + + assert_equal(base_b.dtype, float) + assert_equal(base_b._data, [pi, pi, pi, 4.4, 5.5]) + + assert_equal(base_c.dtype, '|S8') + assert_equal(base_c._data, + [b'pi', b'pi', b'pi', b'four', b'five']) + + def test_mask_element(self): + "Check record access" + base = self.data['base'] + base[0] = masked + + for n in ('a', 'b', 'c'): + assert_equal(base[n].mask, [1, 1, 0, 0, 1]) + assert_equal(base[n]._data, base._data[n]) + + def test_getmaskarray(self): + # Test getmaskarray on flexible dtype + ndtype = [('a', int), ('b', float)] + test = empty(3, dtype=ndtype) + assert_equal(getmaskarray(test), + np.array([(0, 0), (0, 0), (0, 0)], + dtype=[('a', '|b1'), ('b', '|b1')])) + test[:] = masked + assert_equal(getmaskarray(test), + np.array([(1, 1), (1, 1), (1, 1)], + dtype=[('a', '|b1'), ('b', '|b1')])) + + def test_view(self): + # Test view w/ flexible dtype + iterator = list(zip(np.arange(10), np.random.rand(10))) + data = np.array(iterator) + a = array(iterator, dtype=[('a', float), ('b', float)]) + a.mask[0] = (1, 0) + controlmask = np.array([1] + 19 * [0], dtype=bool) + # Transform globally to simple dtype + test = a.view(float) + assert_equal(test, data.ravel()) + assert_equal(test.mask, controlmask) + # Transform globally to dty + test = a.view((float, 2)) + assert_equal(test, data) + assert_equal(test.mask, controlmask.reshape(-1, 2)) + + def test_getitem(self): + ndtype = [('a', float), ('b', float)] + a = array(list(zip(np.random.rand(10), np.arange(10))), dtype=ndtype) + a.mask = np.array(list(zip([0, 0, 0, 0, 0, 0, 0, 0, 1, 1], + [1, 0, 0, 0, 0, 0, 0, 0, 1, 0])), + dtype=[('a', bool), ('b', bool)]) + + def _test_index(i): + assert_equal(type(a[i]), mvoid) + assert_equal_records(a[i]._data, a._data[i]) + assert_equal_records(a[i]._mask, a._mask[i]) + + assert_equal(type(a[i, ...]), MaskedArray) + assert_equal_records(a[i,...]._data, a._data[i,...]) + assert_equal_records(a[i,...]._mask, a._mask[i,...]) + + _test_index(1) # No mask + _test_index(0) # One element masked + _test_index(-2) # All element masked + + def test_setitem(self): + # Issue 4866: check that one can set individual items in [record][col] + # and [col][record] order + ndtype = np.dtype([('a', float), ('b', int)]) + ma = np.ma.MaskedArray([(1.0, 1), (2.0, 2)], dtype=ndtype) + ma['a'][1] = 3.0 + assert_equal(ma['a'], np.array([1.0, 3.0])) + ma[1]['a'] = 4.0 + assert_equal(ma['a'], np.array([1.0, 4.0])) + # Issue 2403 + mdtype = np.dtype([('a', bool), ('b', bool)]) + # soft mask + control = np.array([(False, True), (True, True)], dtype=mdtype) + a = np.ma.masked_all((2,), dtype=ndtype) + a['a'][0] = 2 + assert_equal(a.mask, control) + a = np.ma.masked_all((2,), dtype=ndtype) + a[0]['a'] = 2 + assert_equal(a.mask, control) + # hard mask + control = np.array([(True, True), (True, True)], dtype=mdtype) + a = np.ma.masked_all((2,), dtype=ndtype) + a.harden_mask() + a['a'][0] = 2 + assert_equal(a.mask, control) + a = np.ma.masked_all((2,), dtype=ndtype) + a.harden_mask() + a[0]['a'] = 2 + assert_equal(a.mask, control) + + def test_setitem_scalar(self): + # 8510 + mask_0d = np.ma.masked_array(1, mask=True) + arr = np.ma.arange(3) + arr[0] = mask_0d + assert_array_equal(arr.mask, [True, False, False]) + + def test_element_len(self): + # check that len() works for mvoid (Github issue #576) + for rec in self.data['base']: + assert_equal(len(rec), len(self.data['ddtype'])) + + +class TestMaskedObjectArray(object): + + def test_getitem(self): + arr = np.ma.array([None, None]) + for dt in [float, object]: + a0 = np.eye(2).astype(dt) + a1 = np.eye(3).astype(dt) + arr[0] = a0 + arr[1] = a1 + + assert_(arr[0] is a0) + assert_(arr[1] is a1) + assert_(isinstance(arr[0,...], MaskedArray)) + assert_(isinstance(arr[1,...], MaskedArray)) + assert_(arr[0,...][()] is a0) + assert_(arr[1,...][()] is a1) + + arr[0] = np.ma.masked + + assert_(arr[1] is a1) + assert_(isinstance(arr[0,...], MaskedArray)) + assert_(isinstance(arr[1,...], MaskedArray)) + assert_equal(arr[0,...].mask, True) + assert_(arr[1,...][()] is a1) + + # gh-5962 - object arrays of arrays do something special + assert_equal(arr[0].data, a0) + assert_equal(arr[0].mask, True) + assert_equal(arr[0,...][()].data, a0) + assert_equal(arr[0,...][()].mask, True) + + def test_nested_ma(self): + + arr = np.ma.array([None, None]) + # set the first object to be an unmasked masked constant. A little fiddly + arr[0,...] = np.array([np.ma.masked], object)[0,...] + + # check the above line did what we were aiming for + assert_(arr.data[0] is np.ma.masked) + + # test that getitem returned the value by identity + assert_(arr[0] is np.ma.masked) + + # now mask the masked value! + arr[0] = np.ma.masked + assert_(arr[0] is np.ma.masked) + + +class TestMaskedView(object): + + def setup(self): + iterator = list(zip(np.arange(10), np.random.rand(10))) + data = np.array(iterator) + a = array(iterator, dtype=[('a', float), ('b', float)]) + a.mask[0] = (1, 0) + controlmask = np.array([1] + 19 * [0], dtype=bool) + self.data = (data, a, controlmask) + + def test_view_to_nothing(self): + (data, a, controlmask) = self.data + test = a.view() + assert_(isinstance(test, MaskedArray)) + assert_equal(test._data, a._data) + assert_equal(test._mask, a._mask) + + def test_view_to_type(self): + (data, a, controlmask) = self.data + test = a.view(np.ndarray) + assert_(not isinstance(test, MaskedArray)) + assert_equal(test, a._data) + assert_equal_records(test, data.view(a.dtype).squeeze()) + + def test_view_to_simple_dtype(self): + (data, a, controlmask) = self.data + # View globally + test = a.view(float) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, data.ravel()) + assert_equal(test.mask, controlmask) + + def test_view_to_flexible_dtype(self): + (data, a, controlmask) = self.data + + test = a.view([('A', float), ('B', float)]) + assert_equal(test.mask.dtype.names, ('A', 'B')) + assert_equal(test['A'], a['a']) + assert_equal(test['B'], a['b']) + + test = a[0].view([('A', float), ('B', float)]) + assert_(isinstance(test, MaskedArray)) + assert_equal(test.mask.dtype.names, ('A', 'B')) + assert_equal(test['A'], a['a'][0]) + assert_equal(test['B'], a['b'][0]) + + test = a[-1].view([('A', float), ('B', float)]) + assert_(isinstance(test, MaskedArray)) + assert_equal(test.dtype.names, ('A', 'B')) + assert_equal(test['A'], a['a'][-1]) + assert_equal(test['B'], a['b'][-1]) + + def test_view_to_subdtype(self): + (data, a, controlmask) = self.data + # View globally + test = a.view((float, 2)) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, data) + assert_equal(test.mask, controlmask.reshape(-1, 2)) + # View on 1 masked element + test = a[0].view((float, 2)) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, data[0]) + assert_equal(test.mask, (1, 0)) + # View on 1 unmasked element + test = a[-1].view((float, 2)) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, data[-1]) + + def test_view_to_dtype_and_type(self): + (data, a, controlmask) = self.data + + test = a.view((float, 2), np.recarray) + assert_equal(test, data) + assert_(isinstance(test, np.recarray)) + assert_(not isinstance(test, MaskedArray)) + + +class TestOptionalArgs(object): + def test_ndarrayfuncs(self): + # test axis arg behaves the same as ndarray (including multiple axes) + + d = np.arange(24.0).reshape((2,3,4)) + m = np.zeros(24, dtype=bool).reshape((2,3,4)) + # mask out last element of last dimension + m[:,:,-1] = True + a = np.ma.array(d, mask=m) + + def testaxis(f, a, d): + numpy_f = numpy.__getattribute__(f) + ma_f = np.ma.__getattribute__(f) + + # test axis arg + assert_equal(ma_f(a, axis=1)[...,:-1], numpy_f(d[...,:-1], axis=1)) + assert_equal(ma_f(a, axis=(0,1))[...,:-1], + numpy_f(d[...,:-1], axis=(0,1))) + + def testkeepdims(f, a, d): + numpy_f = numpy.__getattribute__(f) + ma_f = np.ma.__getattribute__(f) + + # test keepdims arg + assert_equal(ma_f(a, keepdims=True).shape, + numpy_f(d, keepdims=True).shape) + assert_equal(ma_f(a, keepdims=False).shape, + numpy_f(d, keepdims=False).shape) + + # test both at once + assert_equal(ma_f(a, axis=1, keepdims=True)[...,:-1], + numpy_f(d[...,:-1], axis=1, keepdims=True)) + assert_equal(ma_f(a, axis=(0,1), keepdims=True)[...,:-1], + numpy_f(d[...,:-1], axis=(0,1), keepdims=True)) + + for f in ['sum', 'prod', 'mean', 'var', 'std']: + testaxis(f, a, d) + testkeepdims(f, a, d) + + for f in ['min', 'max']: + testaxis(f, a, d) + + d = (np.arange(24).reshape((2,3,4))%2 == 0) + a = np.ma.array(d, mask=m) + for f in ['all', 'any']: + testaxis(f, a, d) + testkeepdims(f, a, d) + + def test_count(self): + # test np.ma.count specially + + d = np.arange(24.0).reshape((2,3,4)) + m = np.zeros(24, dtype=bool).reshape((2,3,4)) + m[:,0,:] = True + a = np.ma.array(d, mask=m) + + assert_equal(count(a), 16) + assert_equal(count(a, axis=1), 2*ones((2,4))) + assert_equal(count(a, axis=(0,1)), 4*ones((4,))) + assert_equal(count(a, keepdims=True), 16*ones((1,1,1))) + assert_equal(count(a, axis=1, keepdims=True), 2*ones((2,1,4))) + assert_equal(count(a, axis=(0,1), keepdims=True), 4*ones((1,1,4))) + assert_equal(count(a, axis=-2), 2*ones((2,4))) + assert_raises(ValueError, count, a, axis=(1,1)) + assert_raises(np.AxisError, count, a, axis=3) + + # check the 'nomask' path + a = np.ma.array(d, mask=nomask) + + assert_equal(count(a), 24) + assert_equal(count(a, axis=1), 3*ones((2,4))) + assert_equal(count(a, axis=(0,1)), 6*ones((4,))) + assert_equal(count(a, keepdims=True), 24*ones((1,1,1))) + assert_equal(np.ndim(count(a, keepdims=True)), 3) + assert_equal(count(a, axis=1, keepdims=True), 3*ones((2,1,4))) + assert_equal(count(a, axis=(0,1), keepdims=True), 6*ones((1,1,4))) + assert_equal(count(a, axis=-2), 3*ones((2,4))) + assert_raises(ValueError, count, a, axis=(1,1)) + assert_raises(np.AxisError, count, a, axis=3) + + # check the 'masked' singleton + assert_equal(count(np.ma.masked), 0) + + # check 0-d arrays do not allow axis > 0 + assert_raises(np.AxisError, count, np.ma.array(1), axis=1) + + +class TestMaskedConstant(object): + def _do_add_test(self, add): + # sanity check + assert_(add(np.ma.masked, 1) is np.ma.masked) + + # now try with a vector + vector = np.array([1, 2, 3]) + result = add(np.ma.masked, vector) + + # lots of things could go wrong here + assert_(result is not np.ma.masked) + assert_(not isinstance(result, np.ma.core.MaskedConstant)) + assert_equal(result.shape, vector.shape) + assert_equal(np.ma.getmask(result), np.ones(vector.shape, dtype=bool)) + + def test_ufunc(self): + self._do_add_test(np.add) + + def test_operator(self): + self._do_add_test(lambda a, b: a + b) + + def test_ctor(self): + m = np.ma.array(np.ma.masked) + + # most importantly, we do not want to create a new MaskedConstant + # instance + assert_(not isinstance(m, np.ma.core.MaskedConstant)) + assert_(m is not np.ma.masked) + + def test_repr(self): + # copies should not exist, but if they do, it should be obvious that + # something is wrong + assert_equal(repr(np.ma.masked), 'masked') + + # create a new instance in a weird way + masked2 = np.ma.MaskedArray.__new__(np.ma.core.MaskedConstant) + assert_not_equal(repr(masked2), 'masked') + + def test_pickle(self): + from io import BytesIO + + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + with BytesIO() as f: + pickle.dump(np.ma.masked, f, protocol=proto) + f.seek(0) + res = pickle.load(f) + assert_(res is np.ma.masked) + + def test_copy(self): + # gh-9328 + # copy is a no-op, like it is with np.True_ + assert_equal( + np.ma.masked.copy() is np.ma.masked, + np.True_.copy() is np.True_) + + def test__copy(self): + import copy + assert_( + copy.copy(np.ma.masked) is np.ma.masked) + + def test_deepcopy(self): + import copy + assert_( + copy.deepcopy(np.ma.masked) is np.ma.masked) + + def test_immutable(self): + orig = np.ma.masked + assert_raises(np.ma.core.MaskError, operator.setitem, orig, (), 1) + assert_raises(ValueError,operator.setitem, orig.data, (), 1) + assert_raises(ValueError, operator.setitem, orig.mask, (), False) + + view = np.ma.masked.view(np.ma.MaskedArray) + assert_raises(ValueError, operator.setitem, view, (), 1) + assert_raises(ValueError, operator.setitem, view.data, (), 1) + assert_raises(ValueError, operator.setitem, view.mask, (), False) + + def test_coercion_int(self): + a_i = np.zeros((), int) + assert_raises(MaskError, operator.setitem, a_i, (), np.ma.masked) + assert_raises(MaskError, int, np.ma.masked) + + @pytest.mark.skipif(sys.version_info.major == 3, + reason="long doesn't exist in Python 3") + def test_coercion_long(self): + assert_raises(MaskError, long, np.ma.masked) + + def test_coercion_float(self): + a_f = np.zeros((), float) + assert_warns(UserWarning, operator.setitem, a_f, (), np.ma.masked) + assert_(np.isnan(a_f[()])) + + @pytest.mark.xfail(reason="See gh-9750") + def test_coercion_unicode(self): + a_u = np.zeros((), 'U10') + a_u[()] = np.ma.masked + assert_equal(a_u[()], u'--') + + @pytest.mark.xfail(reason="See gh-9750") + def test_coercion_bytes(self): + a_b = np.zeros((), 'S10') + a_b[()] = np.ma.masked + assert_equal(a_b[()], b'--') + + def test_subclass(self): + # https://github.com/astropy/astropy/issues/6645 + class Sub(type(np.ma.masked)): pass + + a = Sub() + assert_(a is Sub()) + assert_(a is not np.ma.masked) + assert_not_equal(repr(a), 'masked') + + def test_attributes_readonly(self): + assert_raises(AttributeError, setattr, np.ma.masked, 'shape', (1,)) + assert_raises(AttributeError, setattr, np.ma.masked, 'dtype', np.int64) + + +class TestMaskedWhereAliases(object): + + # TODO: Test masked_object, masked_equal, ... + + def test_masked_values(self): + res = masked_values(np.array([-32768.0]), np.int16(-32768)) + assert_equal(res.mask, [True]) + + res = masked_values(np.inf, np.inf) + assert_equal(res.mask, True) + + res = np.ma.masked_values(np.inf, -np.inf) + assert_equal(res.mask, False) + + res = np.ma.masked_values([1, 2, 3, 4], 5, shrink=True) + assert_(res.mask is np.ma.nomask) + + res = np.ma.masked_values([1, 2, 3, 4], 5, shrink=False) + assert_equal(res.mask, [False] * 4) + + +def test_masked_array(): + a = np.ma.array([0, 1, 2, 3], mask=[0, 0, 1, 0]) + assert_equal(np.argwhere(a), [[1], [3]]) + +def test_append_masked_array(): + a = np.ma.masked_equal([1,2,3], value=2) + b = np.ma.masked_equal([4,3,2], value=2) + + result = np.ma.append(a, b) + expected_data = [1, 2, 3, 4, 3, 2] + expected_mask = [False, True, False, False, False, True] + assert_array_equal(result.data, expected_data) + assert_array_equal(result.mask, expected_mask) + + a = np.ma.masked_all((2,2)) + b = np.ma.ones((3,1)) + + result = np.ma.append(a, b) + expected_data = [1] * 3 + expected_mask = [True] * 4 + [False] * 3 + assert_array_equal(result.data[-3], expected_data) + assert_array_equal(result.mask, expected_mask) + + result = np.ma.append(a, b, axis=None) + assert_array_equal(result.data[-3], expected_data) + assert_array_equal(result.mask, expected_mask) + + +def test_append_masked_array_along_axis(): + a = np.ma.masked_equal([1,2,3], value=2) + b = np.ma.masked_values([[4, 5, 6], [7, 8, 9]], 7) + + # When `axis` is specified, `values` must have the correct shape. + assert_raises(ValueError, np.ma.append, a, b, axis=0) + + result = np.ma.append(a[np.newaxis,:], b, axis=0) + expected = np.ma.arange(1, 10) + expected[[1, 6]] = np.ma.masked + expected = expected.reshape((3,3)) + assert_array_equal(result.data, expected.data) + assert_array_equal(result.mask, expected.mask) + + +def test_default_fill_value_complex(): + # regression test for Python 3, where 'unicode' was not defined + assert_(default_fill_value(1 + 1j) == 1.e20 + 0.0j) + + +def test_ufunc_with_output(): + # check that giving an output argument always returns that output. + # Regression test for gh-8416. + x = array([1., 2., 3.], mask=[0, 0, 1]) + y = np.add(x, 1., out=x) + assert_(y is x) + + +def test_ufunc_with_out_varied(): + """ Test that masked arrays are immune to gh-10459 """ + # the mask of the output should not affect the result, however it is passed + a = array([ 1, 2, 3], mask=[1, 0, 0]) + b = array([10, 20, 30], mask=[1, 0, 0]) + out = array([ 0, 0, 0], mask=[0, 0, 1]) + expected = array([11, 22, 33], mask=[1, 0, 0]) + + out_pos = out.copy() + res_pos = np.add(a, b, out_pos) + + out_kw = out.copy() + res_kw = np.add(a, b, out=out_kw) + + out_tup = out.copy() + res_tup = np.add(a, b, out=(out_tup,)) + + assert_equal(res_kw.mask, expected.mask) + assert_equal(res_kw.data, expected.data) + assert_equal(res_tup.mask, expected.mask) + assert_equal(res_tup.data, expected.data) + assert_equal(res_pos.mask, expected.mask) + assert_equal(res_pos.data, expected.data) + + +def test_astype_mask_ordering(): + descr = [('v', int, 3), ('x', [('y', float)])] + x = array([ + [([1, 2, 3], (1.0,)), ([1, 2, 3], (2.0,))], + [([1, 2, 3], (3.0,)), ([1, 2, 3], (4.0,))]], dtype=descr) + x[0]['v'][0] = np.ma.masked + + x_a = x.astype(descr) + assert x_a.dtype.names == np.dtype(descr).names + assert x_a.mask.dtype.names == np.dtype(descr).names + assert_equal(x, x_a) + + assert_(x is x.astype(x.dtype, copy=False)) + assert_equal(type(x.astype(x.dtype, subok=False)), np.ndarray) + + x_f = x.astype(x.dtype, order='F') + assert_(x_f.flags.f_contiguous) + assert_(x_f.mask.flags.f_contiguous) + + # Also test the same indirectly, via np.array + x_a2 = np.array(x, dtype=descr, subok=True) + assert x_a2.dtype.names == np.dtype(descr).names + assert x_a2.mask.dtype.names == np.dtype(descr).names + assert_equal(x, x_a2) + + assert_(x is np.array(x, dtype=descr, copy=False, subok=True)) + + x_f2 = np.array(x, dtype=x.dtype, order='F', subok=True) + assert_(x_f2.flags.f_contiguous) + assert_(x_f2.mask.flags.f_contiguous) + + +@pytest.mark.parametrize('dt1', num_dts, ids=num_ids) +@pytest.mark.parametrize('dt2', num_dts, ids=num_ids) +@pytest.mark.filterwarnings('ignore::numpy.ComplexWarning') +def test_astype_basic(dt1, dt2): + # See gh-12070 + src = np.ma.array(ones(3, dt1), fill_value=1) + dst = src.astype(dt2) + + assert_(src.fill_value == 1) + assert_(src.dtype == dt1) + assert_(src.fill_value.dtype == dt1) + + assert_(dst.fill_value == 1) + assert_(dst.dtype == dt2) + assert_(dst.fill_value.dtype == dt2) + + assert_equal(src, dst) + + +def test_fieldless_void(): + dt = np.dtype([]) # a void dtype with no fields + x = np.empty(4, dt) + + # these arrays contain no values, so there's little to test - but this + # shouldn't crash + mx = np.ma.array(x) + assert_equal(mx.dtype, x.dtype) + assert_equal(mx.shape, x.shape) + + mx = np.ma.array(x, mask=x) + assert_equal(mx.dtype, x.dtype) + assert_equal(mx.shape, x.shape) diff --git a/project/venv/lib/python2.7/site-packages/numpy/ma/tests/test_core.pyc b/project/venv/lib/python2.7/site-packages/numpy/ma/tests/test_core.pyc new file mode 100644 index 0000000..fce5a0d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/ma/tests/test_core.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/ma/tests/test_deprecations.py b/project/venv/lib/python2.7/site-packages/numpy/ma/tests/test_deprecations.py new file mode 100644 index 0000000..72cc29a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/ma/tests/test_deprecations.py @@ -0,0 +1,70 @@ +"""Test deprecation and future warnings. + +""" +from __future__ import division, absolute_import, print_function + +import numpy as np +from numpy.testing import assert_warns +from numpy.ma.testutils import assert_equal +from numpy.ma.core import MaskedArrayFutureWarning + +class TestArgsort(object): + """ gh-8701 """ + def _test_base(self, argsort, cls): + arr_0d = np.array(1).view(cls) + argsort(arr_0d) + + arr_1d = np.array([1, 2, 3]).view(cls) + argsort(arr_1d) + + # argsort has a bad default for >1d arrays + arr_2d = np.array([[1, 2], [3, 4]]).view(cls) + result = assert_warns( + np.ma.core.MaskedArrayFutureWarning, argsort, arr_2d) + assert_equal(result, argsort(arr_2d, axis=None)) + + # should be no warnings for explicitly specifying it + argsort(arr_2d, axis=None) + argsort(arr_2d, axis=-1) + + def test_function_ndarray(self): + return self._test_base(np.ma.argsort, np.ndarray) + + def test_function_maskedarray(self): + return self._test_base(np.ma.argsort, np.ma.MaskedArray) + + def test_method(self): + return self._test_base(np.ma.MaskedArray.argsort, np.ma.MaskedArray) + + +class TestMinimumMaximum(object): + def test_minimum(self): + assert_warns(DeprecationWarning, np.ma.minimum, np.ma.array([1, 2])) + + def test_maximum(self): + assert_warns(DeprecationWarning, np.ma.maximum, np.ma.array([1, 2])) + + def test_axis_default(self): + # NumPy 1.13, 2017-05-06 + + data1d = np.ma.arange(6) + data2d = data1d.reshape(2, 3) + + ma_min = np.ma.minimum.reduce + ma_max = np.ma.maximum.reduce + + # check that the default axis is still None, but warns on 2d arrays + result = assert_warns(MaskedArrayFutureWarning, ma_max, data2d) + assert_equal(result, ma_max(data2d, axis=None)) + + result = assert_warns(MaskedArrayFutureWarning, ma_min, data2d) + assert_equal(result, ma_min(data2d, axis=None)) + + # no warnings on 1d, as both new and old defaults are equivalent + result = ma_min(data1d) + assert_equal(result, ma_min(data1d, axis=None)) + assert_equal(result, ma_min(data1d, axis=0)) + + result = ma_max(data1d) + assert_equal(result, ma_max(data1d, axis=None)) + assert_equal(result, ma_max(data1d, axis=0)) diff --git a/project/venv/lib/python2.7/site-packages/numpy/ma/tests/test_deprecations.pyc b/project/venv/lib/python2.7/site-packages/numpy/ma/tests/test_deprecations.pyc new file mode 100644 index 0000000..26a4d65 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/ma/tests/test_deprecations.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/ma/tests/test_extras.py b/project/venv/lib/python2.7/site-packages/numpy/ma/tests/test_extras.py new file mode 100644 index 0000000..5243cf7 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/ma/tests/test_extras.py @@ -0,0 +1,1678 @@ +# pylint: disable-msg=W0611, W0612, W0511 +"""Tests suite for MaskedArray. +Adapted from the original test_ma by Pierre Gerard-Marchant + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu +:version: $Id: test_extras.py 3473 2007-10-29 15:18:13Z jarrod.millman $ + +""" +from __future__ import division, absolute_import, print_function + +import warnings +import itertools + +import numpy as np +from numpy.testing import ( + assert_warns, suppress_warnings + ) +from numpy.ma.testutils import ( + assert_, assert_array_equal, assert_equal, assert_almost_equal + ) +from numpy.ma.core import ( + array, arange, masked, MaskedArray, masked_array, getmaskarray, shape, + nomask, ones, zeros, count + ) +from numpy.ma.extras import ( + atleast_1d, atleast_2d, atleast_3d, mr_, dot, polyfit, cov, corrcoef, + median, average, unique, setxor1d, setdiff1d, union1d, intersect1d, in1d, + ediff1d, apply_over_axes, apply_along_axis, compress_nd, compress_rowcols, + mask_rowcols, clump_masked, clump_unmasked, flatnotmasked_contiguous, + notmasked_contiguous, notmasked_edges, masked_all, masked_all_like, isin, + diagflat, stack, vstack + ) + + +class TestGeneric(object): + # + def test_masked_all(self): + # Tests masked_all + # Standard dtype + test = masked_all((2,), dtype=float) + control = array([1, 1], mask=[1, 1], dtype=float) + assert_equal(test, control) + # Flexible dtype + dt = np.dtype({'names': ['a', 'b'], 'formats': ['f', 'f']}) + test = masked_all((2,), dtype=dt) + control = array([(0, 0), (0, 0)], mask=[(1, 1), (1, 1)], dtype=dt) + assert_equal(test, control) + test = masked_all((2, 2), dtype=dt) + control = array([[(0, 0), (0, 0)], [(0, 0), (0, 0)]], + mask=[[(1, 1), (1, 1)], [(1, 1), (1, 1)]], + dtype=dt) + assert_equal(test, control) + # Nested dtype + dt = np.dtype([('a', 'f'), ('b', [('ba', 'f'), ('bb', 'f')])]) + test = masked_all((2,), dtype=dt) + control = array([(1, (1, 1)), (1, (1, 1))], + mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt) + assert_equal(test, control) + test = masked_all((2,), dtype=dt) + control = array([(1, (1, 1)), (1, (1, 1))], + mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt) + assert_equal(test, control) + test = masked_all((1, 1), dtype=dt) + control = array([[(1, (1, 1))]], mask=[[(1, (1, 1))]], dtype=dt) + assert_equal(test, control) + + def test_masked_all_like(self): + # Tests masked_all + # Standard dtype + base = array([1, 2], dtype=float) + test = masked_all_like(base) + control = array([1, 1], mask=[1, 1], dtype=float) + assert_equal(test, control) + # Flexible dtype + dt = np.dtype({'names': ['a', 'b'], 'formats': ['f', 'f']}) + base = array([(0, 0), (0, 0)], mask=[(1, 1), (1, 1)], dtype=dt) + test = masked_all_like(base) + control = array([(10, 10), (10, 10)], mask=[(1, 1), (1, 1)], dtype=dt) + assert_equal(test, control) + # Nested dtype + dt = np.dtype([('a', 'f'), ('b', [('ba', 'f'), ('bb', 'f')])]) + control = array([(1, (1, 1)), (1, (1, 1))], + mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt) + test = masked_all_like(control) + assert_equal(test, control) + + def check_clump(self, f): + for i in range(1, 7): + for j in range(2**i): + k = np.arange(i, dtype=int) + ja = np.full(i, j, dtype=int) + a = masked_array(2**k) + a.mask = (ja & (2**k)) != 0 + s = 0 + for sl in f(a): + s += a.data[sl].sum() + if f == clump_unmasked: + assert_equal(a.compressed().sum(), s) + else: + a.mask = ~a.mask + assert_equal(a.compressed().sum(), s) + + def test_clump_masked(self): + # Test clump_masked + a = masked_array(np.arange(10)) + a[[0, 1, 2, 6, 8, 9]] = masked + # + test = clump_masked(a) + control = [slice(0, 3), slice(6, 7), slice(8, 10)] + assert_equal(test, control) + + self.check_clump(clump_masked) + + def test_clump_unmasked(self): + # Test clump_unmasked + a = masked_array(np.arange(10)) + a[[0, 1, 2, 6, 8, 9]] = masked + test = clump_unmasked(a) + control = [slice(3, 6), slice(7, 8), ] + assert_equal(test, control) + + self.check_clump(clump_unmasked) + + def test_flatnotmasked_contiguous(self): + # Test flatnotmasked_contiguous + a = arange(10) + # No mask + test = flatnotmasked_contiguous(a) + assert_equal(test, [slice(0, a.size)]) + # mask of all false + a.mask = np.zeros(10, dtype=bool) + assert_equal(test, [slice(0, a.size)]) + # Some mask + a[(a < 3) | (a > 8) | (a == 5)] = masked + test = flatnotmasked_contiguous(a) + assert_equal(test, [slice(3, 5), slice(6, 9)]) + # + a[:] = masked + test = flatnotmasked_contiguous(a) + assert_equal(test, []) + + +class TestAverage(object): + # Several tests of average. Why so many ? Good point... + def test_testAverage1(self): + # Test of average. + ott = array([0., 1., 2., 3.], mask=[True, False, False, False]) + assert_equal(2.0, average(ott, axis=0)) + assert_equal(2.0, average(ott, weights=[1., 1., 2., 1.])) + result, wts = average(ott, weights=[1., 1., 2., 1.], returned=1) + assert_equal(2.0, result) + assert_(wts == 4.0) + ott[:] = masked + assert_equal(average(ott, axis=0).mask, [True]) + ott = array([0., 1., 2., 3.], mask=[True, False, False, False]) + ott = ott.reshape(2, 2) + ott[:, 1] = masked + assert_equal(average(ott, axis=0), [2.0, 0.0]) + assert_equal(average(ott, axis=1).mask[0], [True]) + assert_equal([2., 0.], average(ott, axis=0)) + result, wts = average(ott, axis=0, returned=1) + assert_equal(wts, [1., 0.]) + + def test_testAverage2(self): + # More tests of average. + w1 = [0, 1, 1, 1, 1, 0] + w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]] + x = arange(6, dtype=np.float_) + assert_equal(average(x, axis=0), 2.5) + assert_equal(average(x, axis=0, weights=w1), 2.5) + y = array([arange(6, dtype=np.float_), 2.0 * arange(6)]) + assert_equal(average(y, None), np.add.reduce(np.arange(6)) * 3. / 12.) + assert_equal(average(y, axis=0), np.arange(6) * 3. / 2.) + assert_equal(average(y, axis=1), + [average(x, axis=0), average(x, axis=0) * 2.0]) + assert_equal(average(y, None, weights=w2), 20. / 6.) + assert_equal(average(y, axis=0, weights=w2), + [0., 1., 2., 3., 4., 10.]) + assert_equal(average(y, axis=1), + [average(x, axis=0), average(x, axis=0) * 2.0]) + m1 = zeros(6) + m2 = [0, 0, 1, 1, 0, 0] + m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]] + m4 = ones(6) + m5 = [0, 1, 1, 1, 1, 1] + assert_equal(average(masked_array(x, m1), axis=0), 2.5) + assert_equal(average(masked_array(x, m2), axis=0), 2.5) + assert_equal(average(masked_array(x, m4), axis=0).mask, [True]) + assert_equal(average(masked_array(x, m5), axis=0), 0.0) + assert_equal(count(average(masked_array(x, m4), axis=0)), 0) + z = masked_array(y, m3) + assert_equal(average(z, None), 20. / 6.) + assert_equal(average(z, axis=0), [0., 1., 99., 99., 4.0, 7.5]) + assert_equal(average(z, axis=1), [2.5, 5.0]) + assert_equal(average(z, axis=0, weights=w2), + [0., 1., 99., 99., 4.0, 10.0]) + + def test_testAverage3(self): + # Yet more tests of average! + a = arange(6) + b = arange(6) * 3 + r1, w1 = average([[a, b], [b, a]], axis=1, returned=1) + assert_equal(shape(r1), shape(w1)) + assert_equal(r1.shape, w1.shape) + r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=1) + assert_equal(shape(w2), shape(r2)) + r2, w2 = average(ones((2, 2, 3)), returned=1) + assert_equal(shape(w2), shape(r2)) + r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=1) + assert_equal(shape(w2), shape(r2)) + a2d = array([[1, 2], [0, 4]], float) + a2dm = masked_array(a2d, [[False, False], [True, False]]) + a2da = average(a2d, axis=0) + assert_equal(a2da, [0.5, 3.0]) + a2dma = average(a2dm, axis=0) + assert_equal(a2dma, [1.0, 3.0]) + a2dma = average(a2dm, axis=None) + assert_equal(a2dma, 7. / 3.) + a2dma = average(a2dm, axis=1) + assert_equal(a2dma, [1.5, 4.0]) + + def test_onintegers_with_mask(self): + # Test average on integers with mask + a = average(array([1, 2])) + assert_equal(a, 1.5) + a = average(array([1, 2, 3, 4], mask=[False, False, True, True])) + assert_equal(a, 1.5) + + def test_complex(self): + # Test with complex data. + # (Regression test for https://github.com/numpy/numpy/issues/2684) + mask = np.array([[0, 0, 0, 1, 0], + [0, 1, 0, 0, 0]], dtype=bool) + a = masked_array([[0, 1+2j, 3+4j, 5+6j, 7+8j], + [9j, 0+1j, 2+3j, 4+5j, 7+7j]], + mask=mask) + + av = average(a) + expected = np.average(a.compressed()) + assert_almost_equal(av.real, expected.real) + assert_almost_equal(av.imag, expected.imag) + + av0 = average(a, axis=0) + expected0 = average(a.real, axis=0) + average(a.imag, axis=0)*1j + assert_almost_equal(av0.real, expected0.real) + assert_almost_equal(av0.imag, expected0.imag) + + av1 = average(a, axis=1) + expected1 = average(a.real, axis=1) + average(a.imag, axis=1)*1j + assert_almost_equal(av1.real, expected1.real) + assert_almost_equal(av1.imag, expected1.imag) + + # Test with the 'weights' argument. + wts = np.array([[0.5, 1.0, 2.0, 1.0, 0.5], + [1.0, 1.0, 1.0, 1.0, 1.0]]) + wav = average(a, weights=wts) + expected = np.average(a.compressed(), weights=wts[~mask]) + assert_almost_equal(wav.real, expected.real) + assert_almost_equal(wav.imag, expected.imag) + + wav0 = average(a, weights=wts, axis=0) + expected0 = (average(a.real, weights=wts, axis=0) + + average(a.imag, weights=wts, axis=0)*1j) + assert_almost_equal(wav0.real, expected0.real) + assert_almost_equal(wav0.imag, expected0.imag) + + wav1 = average(a, weights=wts, axis=1) + expected1 = (average(a.real, weights=wts, axis=1) + + average(a.imag, weights=wts, axis=1)*1j) + assert_almost_equal(wav1.real, expected1.real) + assert_almost_equal(wav1.imag, expected1.imag) + + +class TestConcatenator(object): + # Tests for mr_, the equivalent of r_ for masked arrays. + + def test_1d(self): + # Tests mr_ on 1D arrays. + assert_array_equal(mr_[1, 2, 3, 4, 5, 6], array([1, 2, 3, 4, 5, 6])) + b = ones(5) + m = [1, 0, 0, 0, 0] + d = masked_array(b, mask=m) + c = mr_[d, 0, 0, d] + assert_(isinstance(c, MaskedArray)) + assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1]) + assert_array_equal(c.mask, mr_[m, 0, 0, m]) + + def test_2d(self): + # Tests mr_ on 2D arrays. + a_1 = np.random.rand(5, 5) + a_2 = np.random.rand(5, 5) + m_1 = np.round_(np.random.rand(5, 5), 0) + m_2 = np.round_(np.random.rand(5, 5), 0) + b_1 = masked_array(a_1, mask=m_1) + b_2 = masked_array(a_2, mask=m_2) + # append columns + d = mr_['1', b_1, b_2] + assert_(d.shape == (5, 10)) + assert_array_equal(d[:, :5], b_1) + assert_array_equal(d[:, 5:], b_2) + assert_array_equal(d.mask, np.r_['1', m_1, m_2]) + d = mr_[b_1, b_2] + assert_(d.shape == (10, 5)) + assert_array_equal(d[:5,:], b_1) + assert_array_equal(d[5:,:], b_2) + assert_array_equal(d.mask, np.r_[m_1, m_2]) + + def test_masked_constant(self): + actual = mr_[np.ma.masked, 1] + assert_equal(actual.mask, [True, False]) + assert_equal(actual.data[1], 1) + + actual = mr_[[1, 2], np.ma.masked] + assert_equal(actual.mask, [False, False, True]) + assert_equal(actual.data[:2], [1, 2]) + + +class TestNotMasked(object): + # Tests notmasked_edges and notmasked_contiguous. + + def test_edges(self): + # Tests unmasked_edges + data = masked_array(np.arange(25).reshape(5, 5), + mask=[[0, 0, 1, 0, 0], + [0, 0, 0, 1, 1], + [1, 1, 0, 0, 0], + [0, 0, 0, 0, 0], + [1, 1, 1, 0, 0]],) + test = notmasked_edges(data, None) + assert_equal(test, [0, 24]) + test = notmasked_edges(data, 0) + assert_equal(test[0], [(0, 0, 1, 0, 0), (0, 1, 2, 3, 4)]) + assert_equal(test[1], [(3, 3, 3, 4, 4), (0, 1, 2, 3, 4)]) + test = notmasked_edges(data, 1) + assert_equal(test[0], [(0, 1, 2, 3, 4), (0, 0, 2, 0, 3)]) + assert_equal(test[1], [(0, 1, 2, 3, 4), (4, 2, 4, 4, 4)]) + # + test = notmasked_edges(data.data, None) + assert_equal(test, [0, 24]) + test = notmasked_edges(data.data, 0) + assert_equal(test[0], [(0, 0, 0, 0, 0), (0, 1, 2, 3, 4)]) + assert_equal(test[1], [(4, 4, 4, 4, 4), (0, 1, 2, 3, 4)]) + test = notmasked_edges(data.data, -1) + assert_equal(test[0], [(0, 1, 2, 3, 4), (0, 0, 0, 0, 0)]) + assert_equal(test[1], [(0, 1, 2, 3, 4), (4, 4, 4, 4, 4)]) + # + data[-2] = masked + test = notmasked_edges(data, 0) + assert_equal(test[0], [(0, 0, 1, 0, 0), (0, 1, 2, 3, 4)]) + assert_equal(test[1], [(1, 1, 2, 4, 4), (0, 1, 2, 3, 4)]) + test = notmasked_edges(data, -1) + assert_equal(test[0], [(0, 1, 2, 4), (0, 0, 2, 3)]) + assert_equal(test[1], [(0, 1, 2, 4), (4, 2, 4, 4)]) + + def test_contiguous(self): + # Tests notmasked_contiguous + a = masked_array(np.arange(24).reshape(3, 8), + mask=[[0, 0, 0, 0, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 1], + [0, 0, 0, 0, 0, 0, 1, 0]]) + tmp = notmasked_contiguous(a, None) + assert_equal(tmp, [ + slice(0, 4, None), + slice(16, 22, None), + slice(23, 24, None) + ]) + + tmp = notmasked_contiguous(a, 0) + assert_equal(tmp, [ + [slice(0, 1, None), slice(2, 3, None)], + [slice(0, 1, None), slice(2, 3, None)], + [slice(0, 1, None), slice(2, 3, None)], + [slice(0, 1, None), slice(2, 3, None)], + [slice(2, 3, None)], + [slice(2, 3, None)], + [], + [slice(2, 3, None)] + ]) + # + tmp = notmasked_contiguous(a, 1) + assert_equal(tmp, [ + [slice(0, 4, None)], + [], + [slice(0, 6, None), slice(7, 8, None)] + ]) + + +class TestCompressFunctions(object): + + def test_compress_nd(self): + # Tests compress_nd + x = np.array(list(range(3*4*5))).reshape(3, 4, 5) + m = np.zeros((3,4,5)).astype(bool) + m[1,1,1] = True + x = array(x, mask=m) + + # axis=None + a = compress_nd(x) + assert_equal(a, [[[ 0, 2, 3, 4], + [10, 12, 13, 14], + [15, 17, 18, 19]], + [[40, 42, 43, 44], + [50, 52, 53, 54], + [55, 57, 58, 59]]]) + + # axis=0 + a = compress_nd(x, 0) + assert_equal(a, [[[ 0, 1, 2, 3, 4], + [ 5, 6, 7, 8, 9], + [10, 11, 12, 13, 14], + [15, 16, 17, 18, 19]], + [[40, 41, 42, 43, 44], + [45, 46, 47, 48, 49], + [50, 51, 52, 53, 54], + [55, 56, 57, 58, 59]]]) + + # axis=1 + a = compress_nd(x, 1) + assert_equal(a, [[[ 0, 1, 2, 3, 4], + [10, 11, 12, 13, 14], + [15, 16, 17, 18, 19]], + [[20, 21, 22, 23, 24], + [30, 31, 32, 33, 34], + [35, 36, 37, 38, 39]], + [[40, 41, 42, 43, 44], + [50, 51, 52, 53, 54], + [55, 56, 57, 58, 59]]]) + + a2 = compress_nd(x, (1,)) + a3 = compress_nd(x, -2) + a4 = compress_nd(x, (-2,)) + assert_equal(a, a2) + assert_equal(a, a3) + assert_equal(a, a4) + + # axis=2 + a = compress_nd(x, 2) + assert_equal(a, [[[ 0, 2, 3, 4], + [ 5, 7, 8, 9], + [10, 12, 13, 14], + [15, 17, 18, 19]], + [[20, 22, 23, 24], + [25, 27, 28, 29], + [30, 32, 33, 34], + [35, 37, 38, 39]], + [[40, 42, 43, 44], + [45, 47, 48, 49], + [50, 52, 53, 54], + [55, 57, 58, 59]]]) + + a2 = compress_nd(x, (2,)) + a3 = compress_nd(x, -1) + a4 = compress_nd(x, (-1,)) + assert_equal(a, a2) + assert_equal(a, a3) + assert_equal(a, a4) + + # axis=(0, 1) + a = compress_nd(x, (0, 1)) + assert_equal(a, [[[ 0, 1, 2, 3, 4], + [10, 11, 12, 13, 14], + [15, 16, 17, 18, 19]], + [[40, 41, 42, 43, 44], + [50, 51, 52, 53, 54], + [55, 56, 57, 58, 59]]]) + a2 = compress_nd(x, (0, -2)) + assert_equal(a, a2) + + # axis=(1, 2) + a = compress_nd(x, (1, 2)) + assert_equal(a, [[[ 0, 2, 3, 4], + [10, 12, 13, 14], + [15, 17, 18, 19]], + [[20, 22, 23, 24], + [30, 32, 33, 34], + [35, 37, 38, 39]], + [[40, 42, 43, 44], + [50, 52, 53, 54], + [55, 57, 58, 59]]]) + + a2 = compress_nd(x, (-2, 2)) + a3 = compress_nd(x, (1, -1)) + a4 = compress_nd(x, (-2, -1)) + assert_equal(a, a2) + assert_equal(a, a3) + assert_equal(a, a4) + + # axis=(0, 2) + a = compress_nd(x, (0, 2)) + assert_equal(a, [[[ 0, 2, 3, 4], + [ 5, 7, 8, 9], + [10, 12, 13, 14], + [15, 17, 18, 19]], + [[40, 42, 43, 44], + [45, 47, 48, 49], + [50, 52, 53, 54], + [55, 57, 58, 59]]]) + + a2 = compress_nd(x, (0, -1)) + assert_equal(a, a2) + + def test_compress_rowcols(self): + # Tests compress_rowcols + x = array(np.arange(9).reshape(3, 3), + mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]]) + assert_equal(compress_rowcols(x), [[4, 5], [7, 8]]) + assert_equal(compress_rowcols(x, 0), [[3, 4, 5], [6, 7, 8]]) + assert_equal(compress_rowcols(x, 1), [[1, 2], [4, 5], [7, 8]]) + x = array(x._data, mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(compress_rowcols(x), [[0, 2], [6, 8]]) + assert_equal(compress_rowcols(x, 0), [[0, 1, 2], [6, 7, 8]]) + assert_equal(compress_rowcols(x, 1), [[0, 2], [3, 5], [6, 8]]) + x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(compress_rowcols(x), [[8]]) + assert_equal(compress_rowcols(x, 0), [[6, 7, 8]]) + assert_equal(compress_rowcols(x, 1,), [[2], [5], [8]]) + x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + assert_equal(compress_rowcols(x).size, 0) + assert_equal(compress_rowcols(x, 0).size, 0) + assert_equal(compress_rowcols(x, 1).size, 0) + + def test_mask_rowcols(self): + # Tests mask_rowcols. + x = array(np.arange(9).reshape(3, 3), + mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]]) + assert_equal(mask_rowcols(x).mask, + [[1, 1, 1], [1, 0, 0], [1, 0, 0]]) + assert_equal(mask_rowcols(x, 0).mask, + [[1, 1, 1], [0, 0, 0], [0, 0, 0]]) + assert_equal(mask_rowcols(x, 1).mask, + [[1, 0, 0], [1, 0, 0], [1, 0, 0]]) + x = array(x._data, mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(mask_rowcols(x).mask, + [[0, 1, 0], [1, 1, 1], [0, 1, 0]]) + assert_equal(mask_rowcols(x, 0).mask, + [[0, 0, 0], [1, 1, 1], [0, 0, 0]]) + assert_equal(mask_rowcols(x, 1).mask, + [[0, 1, 0], [0, 1, 0], [0, 1, 0]]) + x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(mask_rowcols(x).mask, + [[1, 1, 1], [1, 1, 1], [1, 1, 0]]) + assert_equal(mask_rowcols(x, 0).mask, + [[1, 1, 1], [1, 1, 1], [0, 0, 0]]) + assert_equal(mask_rowcols(x, 1,).mask, + [[1, 1, 0], [1, 1, 0], [1, 1, 0]]) + x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + assert_(mask_rowcols(x).all() is masked) + assert_(mask_rowcols(x, 0).all() is masked) + assert_(mask_rowcols(x, 1).all() is masked) + assert_(mask_rowcols(x).mask.all()) + assert_(mask_rowcols(x, 0).mask.all()) + assert_(mask_rowcols(x, 1).mask.all()) + + def test_dot(self): + # Tests dot product + n = np.arange(1, 7) + # + m = [1, 0, 0, 0, 0, 0] + a = masked_array(n, mask=m).reshape(2, 3) + b = masked_array(n, mask=m).reshape(3, 2) + c = dot(a, b, strict=True) + assert_equal(c.mask, [[1, 1], [1, 0]]) + c = dot(b, a, strict=True) + assert_equal(c.mask, [[1, 1, 1], [1, 0, 0], [1, 0, 0]]) + c = dot(a, b, strict=False) + assert_equal(c, np.dot(a.filled(0), b.filled(0))) + c = dot(b, a, strict=False) + assert_equal(c, np.dot(b.filled(0), a.filled(0))) + # + m = [0, 0, 0, 0, 0, 1] + a = masked_array(n, mask=m).reshape(2, 3) + b = masked_array(n, mask=m).reshape(3, 2) + c = dot(a, b, strict=True) + assert_equal(c.mask, [[0, 1], [1, 1]]) + c = dot(b, a, strict=True) + assert_equal(c.mask, [[0, 0, 1], [0, 0, 1], [1, 1, 1]]) + c = dot(a, b, strict=False) + assert_equal(c, np.dot(a.filled(0), b.filled(0))) + assert_equal(c, dot(a, b)) + c = dot(b, a, strict=False) + assert_equal(c, np.dot(b.filled(0), a.filled(0))) + # + m = [0, 0, 0, 0, 0, 0] + a = masked_array(n, mask=m).reshape(2, 3) + b = masked_array(n, mask=m).reshape(3, 2) + c = dot(a, b) + assert_equal(c.mask, nomask) + c = dot(b, a) + assert_equal(c.mask, nomask) + # + a = masked_array(n, mask=[1, 0, 0, 0, 0, 0]).reshape(2, 3) + b = masked_array(n, mask=[0, 0, 0, 0, 0, 0]).reshape(3, 2) + c = dot(a, b, strict=True) + assert_equal(c.mask, [[1, 1], [0, 0]]) + c = dot(a, b, strict=False) + assert_equal(c, np.dot(a.filled(0), b.filled(0))) + c = dot(b, a, strict=True) + assert_equal(c.mask, [[1, 0, 0], [1, 0, 0], [1, 0, 0]]) + c = dot(b, a, strict=False) + assert_equal(c, np.dot(b.filled(0), a.filled(0))) + # + a = masked_array(n, mask=[0, 0, 0, 0, 0, 1]).reshape(2, 3) + b = masked_array(n, mask=[0, 0, 0, 0, 0, 0]).reshape(3, 2) + c = dot(a, b, strict=True) + assert_equal(c.mask, [[0, 0], [1, 1]]) + c = dot(a, b) + assert_equal(c, np.dot(a.filled(0), b.filled(0))) + c = dot(b, a, strict=True) + assert_equal(c.mask, [[0, 0, 1], [0, 0, 1], [0, 0, 1]]) + c = dot(b, a, strict=False) + assert_equal(c, np.dot(b.filled(0), a.filled(0))) + # + a = masked_array(n, mask=[0, 0, 0, 0, 0, 1]).reshape(2, 3) + b = masked_array(n, mask=[0, 0, 1, 0, 0, 0]).reshape(3, 2) + c = dot(a, b, strict=True) + assert_equal(c.mask, [[1, 0], [1, 1]]) + c = dot(a, b, strict=False) + assert_equal(c, np.dot(a.filled(0), b.filled(0))) + c = dot(b, a, strict=True) + assert_equal(c.mask, [[0, 0, 1], [1, 1, 1], [0, 0, 1]]) + c = dot(b, a, strict=False) + assert_equal(c, np.dot(b.filled(0), a.filled(0))) + + def test_dot_returns_maskedarray(self): + # See gh-6611 + a = np.eye(3) + b = array(a) + assert_(type(dot(a, a)) is MaskedArray) + assert_(type(dot(a, b)) is MaskedArray) + assert_(type(dot(b, a)) is MaskedArray) + assert_(type(dot(b, b)) is MaskedArray) + + def test_dot_out(self): + a = array(np.eye(3)) + out = array(np.zeros((3, 3))) + res = dot(a, a, out=out) + assert_(res is out) + assert_equal(a, res) + + +class TestApplyAlongAxis(object): + # Tests 2D functions + def test_3d(self): + a = arange(12.).reshape(2, 2, 3) + + def myfunc(b): + return b[1] + + xa = apply_along_axis(myfunc, 2, a) + assert_equal(xa, [[1, 4], [7, 10]]) + + # Tests kwargs functions + def test_3d_kwargs(self): + a = arange(12).reshape(2, 2, 3) + + def myfunc(b, offset=0): + return b[1+offset] + + xa = apply_along_axis(myfunc, 2, a, offset=1) + assert_equal(xa, [[2, 5], [8, 11]]) + + +class TestApplyOverAxes(object): + # Tests apply_over_axes + def test_basic(self): + a = arange(24).reshape(2, 3, 4) + test = apply_over_axes(np.sum, a, [0, 2]) + ctrl = np.array([[[60], [92], [124]]]) + assert_equal(test, ctrl) + a[(a % 2).astype(bool)] = masked + test = apply_over_axes(np.sum, a, [0, 2]) + ctrl = np.array([[[28], [44], [60]]]) + assert_equal(test, ctrl) + + +class TestMedian(object): + def test_pytype(self): + r = np.ma.median([[np.inf, np.inf], [np.inf, np.inf]], axis=-1) + assert_equal(r, np.inf) + + def test_inf(self): + # test that even which computes handles inf / x = masked + r = np.ma.median(np.ma.masked_array([[np.inf, np.inf], + [np.inf, np.inf]]), axis=-1) + assert_equal(r, np.inf) + r = np.ma.median(np.ma.masked_array([[np.inf, np.inf], + [np.inf, np.inf]]), axis=None) + assert_equal(r, np.inf) + # all masked + r = np.ma.median(np.ma.masked_array([[np.inf, np.inf], + [np.inf, np.inf]], mask=True), + axis=-1) + assert_equal(r.mask, True) + r = np.ma.median(np.ma.masked_array([[np.inf, np.inf], + [np.inf, np.inf]], mask=True), + axis=None) + assert_equal(r.mask, True) + + def test_non_masked(self): + x = np.arange(9) + assert_equal(np.ma.median(x), 4.) + assert_(type(np.ma.median(x)) is not MaskedArray) + x = range(8) + assert_equal(np.ma.median(x), 3.5) + assert_(type(np.ma.median(x)) is not MaskedArray) + x = 5 + assert_equal(np.ma.median(x), 5.) + assert_(type(np.ma.median(x)) is not MaskedArray) + # integer + x = np.arange(9 * 8).reshape(9, 8) + assert_equal(np.ma.median(x, axis=0), np.median(x, axis=0)) + assert_equal(np.ma.median(x, axis=1), np.median(x, axis=1)) + assert_(np.ma.median(x, axis=1) is not MaskedArray) + # float + x = np.arange(9 * 8.).reshape(9, 8) + assert_equal(np.ma.median(x, axis=0), np.median(x, axis=0)) + assert_equal(np.ma.median(x, axis=1), np.median(x, axis=1)) + assert_(np.ma.median(x, axis=1) is not MaskedArray) + + def test_docstring_examples(self): + "test the examples given in the docstring of ma.median" + x = array(np.arange(8), mask=[0]*4 + [1]*4) + assert_equal(np.ma.median(x), 1.5) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + x = array(np.arange(10).reshape(2, 5), mask=[0]*6 + [1]*4) + assert_equal(np.ma.median(x), 2.5) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + ma_x = np.ma.median(x, axis=-1, overwrite_input=True) + assert_equal(ma_x, [2., 5.]) + assert_equal(ma_x.shape, (2,), "shape mismatch") + assert_(type(ma_x) is MaskedArray) + + def test_axis_argument_errors(self): + msg = "mask = %s, ndim = %s, axis = %s, overwrite_input = %s" + for ndmin in range(5): + for mask in [False, True]: + x = array(1, ndmin=ndmin, mask=mask) + + # Valid axis values should not raise exception + args = itertools.product(range(-ndmin, ndmin), [False, True]) + for axis, over in args: + try: + np.ma.median(x, axis=axis, overwrite_input=over) + except Exception: + raise AssertionError(msg % (mask, ndmin, axis, over)) + + # Invalid axis values should raise exception + args = itertools.product([-(ndmin + 1), ndmin], [False, True]) + for axis, over in args: + try: + np.ma.median(x, axis=axis, overwrite_input=over) + except np.AxisError: + pass + else: + raise AssertionError(msg % (mask, ndmin, axis, over)) + + def test_masked_0d(self): + # Check values + x = array(1, mask=False) + assert_equal(np.ma.median(x), 1) + x = array(1, mask=True) + assert_equal(np.ma.median(x), np.ma.masked) + + def test_masked_1d(self): + x = array(np.arange(5), mask=True) + assert_equal(np.ma.median(x), np.ma.masked) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is np.ma.core.MaskedConstant) + x = array(np.arange(5), mask=False) + assert_equal(np.ma.median(x), 2.) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + x = array(np.arange(5), mask=[0,1,0,0,0]) + assert_equal(np.ma.median(x), 2.5) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + x = array(np.arange(5), mask=[0,1,1,1,1]) + assert_equal(np.ma.median(x), 0.) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + # integer + x = array(np.arange(5), mask=[0,1,1,0,0]) + assert_equal(np.ma.median(x), 3.) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + # float + x = array(np.arange(5.), mask=[0,1,1,0,0]) + assert_equal(np.ma.median(x), 3.) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + # integer + x = array(np.arange(6), mask=[0,1,1,1,1,0]) + assert_equal(np.ma.median(x), 2.5) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + # float + x = array(np.arange(6.), mask=[0,1,1,1,1,0]) + assert_equal(np.ma.median(x), 2.5) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + + def test_1d_shape_consistency(self): + assert_equal(np.ma.median(array([1,2,3],mask=[0,0,0])).shape, + np.ma.median(array([1,2,3],mask=[0,1,0])).shape ) + + def test_2d(self): + # Tests median w/ 2D + (n, p) = (101, 30) + x = masked_array(np.linspace(-1., 1., n),) + x[:10] = x[-10:] = masked + z = masked_array(np.empty((n, p), dtype=float)) + z[:, 0] = x[:] + idx = np.arange(len(x)) + for i in range(1, p): + np.random.shuffle(idx) + z[:, i] = x[idx] + assert_equal(median(z[:, 0]), 0) + assert_equal(median(z), 0) + assert_equal(median(z, axis=0), np.zeros(p)) + assert_equal(median(z.T, axis=1), np.zeros(p)) + + def test_2d_waxis(self): + # Tests median w/ 2D arrays and different axis. + x = masked_array(np.arange(30).reshape(10, 3)) + x[:3] = x[-3:] = masked + assert_equal(median(x), 14.5) + assert_(type(np.ma.median(x)) is not MaskedArray) + assert_equal(median(x, axis=0), [13.5, 14.5, 15.5]) + assert_(type(np.ma.median(x, axis=0)) is MaskedArray) + assert_equal(median(x, axis=1), [0, 0, 0, 10, 13, 16, 19, 0, 0, 0]) + assert_(type(np.ma.median(x, axis=1)) is MaskedArray) + assert_equal(median(x, axis=1).mask, [1, 1, 1, 0, 0, 0, 0, 1, 1, 1]) + + def test_3d(self): + # Tests median w/ 3D + x = np.ma.arange(24).reshape(3, 4, 2) + x[x % 3 == 0] = masked + assert_equal(median(x, 0), [[12, 9], [6, 15], [12, 9], [18, 15]]) + x.shape = (4, 3, 2) + assert_equal(median(x, 0), [[99, 10], [11, 99], [13, 14]]) + x = np.ma.arange(24).reshape(4, 3, 2) + x[x % 5 == 0] = masked + assert_equal(median(x, 0), [[12, 10], [8, 9], [16, 17]]) + + def test_neg_axis(self): + x = masked_array(np.arange(30).reshape(10, 3)) + x[:3] = x[-3:] = masked + assert_equal(median(x, axis=-1), median(x, axis=1)) + + def test_out_1d(self): + # integer float even odd + for v in (30, 30., 31, 31.): + x = masked_array(np.arange(v)) + x[:3] = x[-3:] = masked + out = masked_array(np.ones(())) + r = median(x, out=out) + if v == 30: + assert_equal(out, 14.5) + else: + assert_equal(out, 15.) + assert_(r is out) + assert_(type(r) is MaskedArray) + + def test_out(self): + # integer float even odd + for v in (40, 40., 30, 30.): + x = masked_array(np.arange(v).reshape(10, -1)) + x[:3] = x[-3:] = masked + out = masked_array(np.ones(10)) + r = median(x, axis=1, out=out) + if v == 30: + e = masked_array([0.]*3 + [10, 13, 16, 19] + [0.]*3, + mask=[True] * 3 + [False] * 4 + [True] * 3) + else: + e = masked_array([0.]*3 + [13.5, 17.5, 21.5, 25.5] + [0.]*3, + mask=[True]*3 + [False]*4 + [True]*3) + assert_equal(r, e) + assert_(r is out) + assert_(type(r) is MaskedArray) + + def test_single_non_masked_value_on_axis(self): + data = [[1., 0.], + [0., 3.], + [0., 0.]] + masked_arr = np.ma.masked_equal(data, 0) + expected = [1., 3.] + assert_array_equal(np.ma.median(masked_arr, axis=0), + expected) + + def test_nan(self): + with suppress_warnings() as w: + w.record(RuntimeWarning) + for mask in (False, np.zeros(6, dtype=bool)): + dm = np.ma.array([[1, np.nan, 3], [1, 2, 3]]) + dm.mask = mask + + # scalar result + r = np.ma.median(dm, axis=None) + assert_(np.isscalar(r)) + assert_array_equal(r, np.nan) + r = np.ma.median(dm.ravel(), axis=0) + assert_(np.isscalar(r)) + assert_array_equal(r, np.nan) + + r = np.ma.median(dm, axis=0) + assert_equal(type(r), MaskedArray) + assert_array_equal(r, [1, np.nan, 3]) + r = np.ma.median(dm, axis=1) + assert_equal(type(r), MaskedArray) + assert_array_equal(r, [np.nan, 2]) + r = np.ma.median(dm, axis=-1) + assert_equal(type(r), MaskedArray) + assert_array_equal(r, [np.nan, 2]) + + dm = np.ma.array([[1, np.nan, 3], [1, 2, 3]]) + dm[:, 2] = np.ma.masked + assert_array_equal(np.ma.median(dm, axis=None), np.nan) + assert_array_equal(np.ma.median(dm, axis=0), [1, np.nan, 3]) + assert_array_equal(np.ma.median(dm, axis=1), [np.nan, 1.5]) + assert_equal([x.category is RuntimeWarning for x in w.log], + [True]*13) + + def test_out_nan(self): + with warnings.catch_warnings(record=True): + warnings.filterwarnings('always', '', RuntimeWarning) + o = np.ma.masked_array(np.zeros((4,))) + d = np.ma.masked_array(np.ones((3, 4))) + d[2, 1] = np.nan + d[2, 2] = np.ma.masked + assert_equal(np.ma.median(d, 0, out=o), o) + o = np.ma.masked_array(np.zeros((3,))) + assert_equal(np.ma.median(d, 1, out=o), o) + o = np.ma.masked_array(np.zeros(())) + assert_equal(np.ma.median(d, out=o), o) + + def test_nan_behavior(self): + a = np.ma.masked_array(np.arange(24, dtype=float)) + a[::3] = np.ma.masked + a[2] = np.nan + with suppress_warnings() as w: + w.record(RuntimeWarning) + assert_array_equal(np.ma.median(a), np.nan) + assert_array_equal(np.ma.median(a, axis=0), np.nan) + assert_(w.log[0].category is RuntimeWarning) + assert_(w.log[1].category is RuntimeWarning) + + a = np.ma.masked_array(np.arange(24, dtype=float).reshape(2, 3, 4)) + a.mask = np.arange(a.size) % 2 == 1 + aorig = a.copy() + a[1, 2, 3] = np.nan + a[1, 1, 2] = np.nan + + # no axis + with suppress_warnings() as w: + w.record(RuntimeWarning) + warnings.filterwarnings('always', '', RuntimeWarning) + assert_array_equal(np.ma.median(a), np.nan) + assert_(np.isscalar(np.ma.median(a))) + assert_(w.log[0].category is RuntimeWarning) + + # axis0 + b = np.ma.median(aorig, axis=0) + b[2, 3] = np.nan + b[1, 2] = np.nan + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.ma.median(a, 0), b) + assert_equal(len(w), 1) + + # axis1 + b = np.ma.median(aorig, axis=1) + b[1, 3] = np.nan + b[1, 2] = np.nan + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.ma.median(a, 1), b) + assert_equal(len(w), 1) + + # axis02 + b = np.ma.median(aorig, axis=(0, 2)) + b[1] = np.nan + b[2] = np.nan + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.ma.median(a, (0, 2)), b) + assert_equal(len(w), 1) + + def test_ambigous_fill(self): + # 255 is max value, used as filler for sort + a = np.array([[3, 3, 255], [3, 3, 255]], dtype=np.uint8) + a = np.ma.masked_array(a, mask=a == 3) + assert_array_equal(np.ma.median(a, axis=1), 255) + assert_array_equal(np.ma.median(a, axis=1).mask, False) + assert_array_equal(np.ma.median(a, axis=0), a[0]) + assert_array_equal(np.ma.median(a), 255) + + def test_special(self): + for inf in [np.inf, -np.inf]: + a = np.array([[inf, np.nan], [np.nan, np.nan]]) + a = np.ma.masked_array(a, mask=np.isnan(a)) + assert_equal(np.ma.median(a, axis=0), [inf, np.nan]) + assert_equal(np.ma.median(a, axis=1), [inf, np.nan]) + assert_equal(np.ma.median(a), inf) + + a = np.array([[np.nan, np.nan, inf], [np.nan, np.nan, inf]]) + a = np.ma.masked_array(a, mask=np.isnan(a)) + assert_array_equal(np.ma.median(a, axis=1), inf) + assert_array_equal(np.ma.median(a, axis=1).mask, False) + assert_array_equal(np.ma.median(a, axis=0), a[0]) + assert_array_equal(np.ma.median(a), inf) + + # no mask + a = np.array([[inf, inf], [inf, inf]]) + assert_equal(np.ma.median(a), inf) + assert_equal(np.ma.median(a, axis=0), inf) + assert_equal(np.ma.median(a, axis=1), inf) + + a = np.array([[inf, 7, -inf, -9], + [-10, np.nan, np.nan, 5], + [4, np.nan, np.nan, inf]], + dtype=np.float32) + a = np.ma.masked_array(a, mask=np.isnan(a)) + if inf > 0: + assert_equal(np.ma.median(a, axis=0), [4., 7., -inf, 5.]) + assert_equal(np.ma.median(a), 4.5) + else: + assert_equal(np.ma.median(a, axis=0), [-10., 7., -inf, -9.]) + assert_equal(np.ma.median(a), -2.5) + assert_equal(np.ma.median(a, axis=1), [-1., -2.5, inf]) + + for i in range(0, 10): + for j in range(1, 10): + a = np.array([([np.nan] * i) + ([inf] * j)] * 2) + a = np.ma.masked_array(a, mask=np.isnan(a)) + assert_equal(np.ma.median(a), inf) + assert_equal(np.ma.median(a, axis=1), inf) + assert_equal(np.ma.median(a, axis=0), + ([np.nan] * i) + [inf] * j) + + def test_empty(self): + # empty arrays + a = np.ma.masked_array(np.array([], dtype=float)) + with suppress_warnings() as w: + w.record(RuntimeWarning) + assert_array_equal(np.ma.median(a), np.nan) + assert_(w.log[0].category is RuntimeWarning) + + # multiple dimensions + a = np.ma.masked_array(np.array([], dtype=float, ndmin=3)) + # no axis + with suppress_warnings() as w: + w.record(RuntimeWarning) + warnings.filterwarnings('always', '', RuntimeWarning) + assert_array_equal(np.ma.median(a), np.nan) + assert_(w.log[0].category is RuntimeWarning) + + # axis 0 and 1 + b = np.ma.masked_array(np.array([], dtype=float, ndmin=2)) + assert_equal(np.ma.median(a, axis=0), b) + assert_equal(np.ma.median(a, axis=1), b) + + # axis 2 + b = np.ma.masked_array(np.array(np.nan, dtype=float, ndmin=2)) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.ma.median(a, axis=2), b) + assert_(w[0].category is RuntimeWarning) + + def test_object(self): + o = np.ma.masked_array(np.arange(7.)) + assert_(type(np.ma.median(o.astype(object))), float) + o[2] = np.nan + assert_(type(np.ma.median(o.astype(object))), float) + + +class TestCov(object): + + def setup(self): + self.data = array(np.random.rand(12)) + + def test_1d_without_missing(self): + # Test cov on 1D variable w/o missing values + x = self.data + assert_almost_equal(np.cov(x), cov(x)) + assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False)) + assert_almost_equal(np.cov(x, rowvar=False, bias=True), + cov(x, rowvar=False, bias=True)) + + def test_2d_without_missing(self): + # Test cov on 1 2D variable w/o missing values + x = self.data.reshape(3, 4) + assert_almost_equal(np.cov(x), cov(x)) + assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False)) + assert_almost_equal(np.cov(x, rowvar=False, bias=True), + cov(x, rowvar=False, bias=True)) + + def test_1d_with_missing(self): + # Test cov 1 1D variable w/missing values + x = self.data + x[-1] = masked + x -= x.mean() + nx = x.compressed() + assert_almost_equal(np.cov(nx), cov(x)) + assert_almost_equal(np.cov(nx, rowvar=False), cov(x, rowvar=False)) + assert_almost_equal(np.cov(nx, rowvar=False, bias=True), + cov(x, rowvar=False, bias=True)) + # + try: + cov(x, allow_masked=False) + except ValueError: + pass + # + # 2 1D variables w/ missing values + nx = x[1:-1] + assert_almost_equal(np.cov(nx, nx[::-1]), cov(x, x[::-1])) + assert_almost_equal(np.cov(nx, nx[::-1], rowvar=False), + cov(x, x[::-1], rowvar=False)) + assert_almost_equal(np.cov(nx, nx[::-1], rowvar=False, bias=True), + cov(x, x[::-1], rowvar=False, bias=True)) + + def test_2d_with_missing(self): + # Test cov on 2D variable w/ missing value + x = self.data + x[-1] = masked + x = x.reshape(3, 4) + valid = np.logical_not(getmaskarray(x)).astype(int) + frac = np.dot(valid, valid.T) + xf = (x - x.mean(1)[:, None]).filled(0) + assert_almost_equal(cov(x), + np.cov(xf) * (x.shape[1] - 1) / (frac - 1.)) + assert_almost_equal(cov(x, bias=True), + np.cov(xf, bias=True) * x.shape[1] / frac) + frac = np.dot(valid.T, valid) + xf = (x - x.mean(0)).filled(0) + assert_almost_equal(cov(x, rowvar=False), + (np.cov(xf, rowvar=False) * + (x.shape[0] - 1) / (frac - 1.))) + assert_almost_equal(cov(x, rowvar=False, bias=True), + (np.cov(xf, rowvar=False, bias=True) * + x.shape[0] / frac)) + + +class TestCorrcoef(object): + + def setup(self): + self.data = array(np.random.rand(12)) + self.data2 = array(np.random.rand(12)) + + def test_ddof(self): + # ddof raises DeprecationWarning + x, y = self.data, self.data2 + expected = np.corrcoef(x) + expected2 = np.corrcoef(x, y) + with suppress_warnings() as sup: + warnings.simplefilter("always") + assert_warns(DeprecationWarning, corrcoef, x, ddof=-1) + sup.filter(DeprecationWarning, "bias and ddof have no effect") + # ddof has no or negligible effect on the function + assert_almost_equal(np.corrcoef(x, ddof=0), corrcoef(x, ddof=0)) + assert_almost_equal(corrcoef(x, ddof=-1), expected) + assert_almost_equal(corrcoef(x, y, ddof=-1), expected2) + assert_almost_equal(corrcoef(x, ddof=3), expected) + assert_almost_equal(corrcoef(x, y, ddof=3), expected2) + + def test_bias(self): + x, y = self.data, self.data2 + expected = np.corrcoef(x) + # bias raises DeprecationWarning + with suppress_warnings() as sup: + warnings.simplefilter("always") + assert_warns(DeprecationWarning, corrcoef, x, y, True, False) + assert_warns(DeprecationWarning, corrcoef, x, y, True, True) + assert_warns(DeprecationWarning, corrcoef, x, bias=False) + sup.filter(DeprecationWarning, "bias and ddof have no effect") + # bias has no or negligible effect on the function + assert_almost_equal(corrcoef(x, bias=1), expected) + + def test_1d_without_missing(self): + # Test cov on 1D variable w/o missing values + x = self.data + assert_almost_equal(np.corrcoef(x), corrcoef(x)) + assert_almost_equal(np.corrcoef(x, rowvar=False), + corrcoef(x, rowvar=False)) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "bias and ddof have no effect") + assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True), + corrcoef(x, rowvar=False, bias=True)) + + def test_2d_without_missing(self): + # Test corrcoef on 1 2D variable w/o missing values + x = self.data.reshape(3, 4) + assert_almost_equal(np.corrcoef(x), corrcoef(x)) + assert_almost_equal(np.corrcoef(x, rowvar=False), + corrcoef(x, rowvar=False)) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "bias and ddof have no effect") + assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True), + corrcoef(x, rowvar=False, bias=True)) + + def test_1d_with_missing(self): + # Test corrcoef 1 1D variable w/missing values + x = self.data + x[-1] = masked + x -= x.mean() + nx = x.compressed() + assert_almost_equal(np.corrcoef(nx), corrcoef(x)) + assert_almost_equal(np.corrcoef(nx, rowvar=False), + corrcoef(x, rowvar=False)) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "bias and ddof have no effect") + assert_almost_equal(np.corrcoef(nx, rowvar=False, bias=True), + corrcoef(x, rowvar=False, bias=True)) + try: + corrcoef(x, allow_masked=False) + except ValueError: + pass + # 2 1D variables w/ missing values + nx = x[1:-1] + assert_almost_equal(np.corrcoef(nx, nx[::-1]), corrcoef(x, x[::-1])) + assert_almost_equal(np.corrcoef(nx, nx[::-1], rowvar=False), + corrcoef(x, x[::-1], rowvar=False)) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "bias and ddof have no effect") + # ddof and bias have no or negligible effect on the function + assert_almost_equal(np.corrcoef(nx, nx[::-1]), + corrcoef(x, x[::-1], bias=1)) + assert_almost_equal(np.corrcoef(nx, nx[::-1]), + corrcoef(x, x[::-1], ddof=2)) + + def test_2d_with_missing(self): + # Test corrcoef on 2D variable w/ missing value + x = self.data + x[-1] = masked + x = x.reshape(3, 4) + + test = corrcoef(x) + control = np.corrcoef(x) + assert_almost_equal(test[:-1, :-1], control[:-1, :-1]) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "bias and ddof have no effect") + # ddof and bias have no or negligible effect on the function + assert_almost_equal(corrcoef(x, ddof=-2)[:-1, :-1], + control[:-1, :-1]) + assert_almost_equal(corrcoef(x, ddof=3)[:-1, :-1], + control[:-1, :-1]) + assert_almost_equal(corrcoef(x, bias=1)[:-1, :-1], + control[:-1, :-1]) + + +class TestPolynomial(object): + # + def test_polyfit(self): + # Tests polyfit + # On ndarrays + x = np.random.rand(10) + y = np.random.rand(20).reshape(-1, 2) + assert_almost_equal(polyfit(x, y, 3), np.polyfit(x, y, 3)) + # ON 1D maskedarrays + x = x.view(MaskedArray) + x[0] = masked + y = y.view(MaskedArray) + y[0, 0] = y[-1, -1] = masked + # + (C, R, K, S, D) = polyfit(x, y[:, 0], 3, full=True) + (c, r, k, s, d) = np.polyfit(x[1:], y[1:, 0].compressed(), 3, + full=True) + for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): + assert_almost_equal(a, a_) + # + (C, R, K, S, D) = polyfit(x, y[:, -1], 3, full=True) + (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1, -1], 3, full=True) + for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): + assert_almost_equal(a, a_) + # + (C, R, K, S, D) = polyfit(x, y, 3, full=True) + (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1,:], 3, full=True) + for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): + assert_almost_equal(a, a_) + # + w = np.random.rand(10) + 1 + wo = w.copy() + xs = x[1:-1] + ys = y[1:-1] + ws = w[1:-1] + (C, R, K, S, D) = polyfit(x, y, 3, full=True, w=w) + (c, r, k, s, d) = np.polyfit(xs, ys, 3, full=True, w=ws) + assert_equal(w, wo) + for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): + assert_almost_equal(a, a_) + + def test_polyfit_with_masked_NaNs(self): + x = np.random.rand(10) + y = np.random.rand(20).reshape(-1, 2) + + x[0] = np.nan + y[-1,-1] = np.nan + x = x.view(MaskedArray) + y = y.view(MaskedArray) + x[0] = masked + y[-1,-1] = masked + + (C, R, K, S, D) = polyfit(x, y, 3, full=True) + (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1,:], 3, full=True) + for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): + assert_almost_equal(a, a_) + + +class TestArraySetOps(object): + + def test_unique_onlist(self): + # Test unique on list + data = [1, 1, 1, 2, 2, 3] + test = unique(data, return_index=True, return_inverse=True) + assert_(isinstance(test[0], MaskedArray)) + assert_equal(test[0], masked_array([1, 2, 3], mask=[0, 0, 0])) + assert_equal(test[1], [0, 3, 5]) + assert_equal(test[2], [0, 0, 0, 1, 1, 2]) + + def test_unique_onmaskedarray(self): + # Test unique on masked data w/use_mask=True + data = masked_array([1, 1, 1, 2, 2, 3], mask=[0, 0, 1, 0, 1, 0]) + test = unique(data, return_index=True, return_inverse=True) + assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1])) + assert_equal(test[1], [0, 3, 5, 2]) + assert_equal(test[2], [0, 0, 3, 1, 3, 2]) + # + data.fill_value = 3 + data = masked_array(data=[1, 1, 1, 2, 2, 3], + mask=[0, 0, 1, 0, 1, 0], fill_value=3) + test = unique(data, return_index=True, return_inverse=True) + assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1])) + assert_equal(test[1], [0, 3, 5, 2]) + assert_equal(test[2], [0, 0, 3, 1, 3, 2]) + + def test_unique_allmasked(self): + # Test all masked + data = masked_array([1, 1, 1], mask=True) + test = unique(data, return_index=True, return_inverse=True) + assert_equal(test[0], masked_array([1, ], mask=[True])) + assert_equal(test[1], [0]) + assert_equal(test[2], [0, 0, 0]) + # + # Test masked + data = masked + test = unique(data, return_index=True, return_inverse=True) + assert_equal(test[0], masked_array(masked)) + assert_equal(test[1], [0]) + assert_equal(test[2], [0]) + + def test_ediff1d(self): + # Tests mediff1d + x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) + control = array([1, 1, 1, 4], mask=[1, 0, 0, 1]) + test = ediff1d(x) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + + def test_ediff1d_tobegin(self): + # Test ediff1d w/ to_begin + x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) + test = ediff1d(x, to_begin=masked) + control = array([0, 1, 1, 1, 4], mask=[1, 1, 0, 0, 1]) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + # + test = ediff1d(x, to_begin=[1, 2, 3]) + control = array([1, 2, 3, 1, 1, 1, 4], mask=[0, 0, 0, 1, 0, 0, 1]) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + + def test_ediff1d_toend(self): + # Test ediff1d w/ to_end + x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) + test = ediff1d(x, to_end=masked) + control = array([1, 1, 1, 4, 0], mask=[1, 0, 0, 1, 1]) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + # + test = ediff1d(x, to_end=[1, 2, 3]) + control = array([1, 1, 1, 4, 1, 2, 3], mask=[1, 0, 0, 1, 0, 0, 0]) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + + def test_ediff1d_tobegin_toend(self): + # Test ediff1d w/ to_begin and to_end + x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) + test = ediff1d(x, to_end=masked, to_begin=masked) + control = array([0, 1, 1, 1, 4, 0], mask=[1, 1, 0, 0, 1, 1]) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + # + test = ediff1d(x, to_end=[1, 2, 3], to_begin=masked) + control = array([0, 1, 1, 1, 4, 1, 2, 3], + mask=[1, 1, 0, 0, 1, 0, 0, 0]) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + + def test_ediff1d_ndarray(self): + # Test ediff1d w/ a ndarray + x = np.arange(5) + test = ediff1d(x) + control = array([1, 1, 1, 1], mask=[0, 0, 0, 0]) + assert_equal(test, control) + assert_(isinstance(test, MaskedArray)) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + # + test = ediff1d(x, to_end=masked, to_begin=masked) + control = array([0, 1, 1, 1, 1, 0], mask=[1, 0, 0, 0, 0, 1]) + assert_(isinstance(test, MaskedArray)) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + + def test_intersect1d(self): + # Test intersect1d + x = array([1, 3, 3, 3], mask=[0, 0, 0, 1]) + y = array([3, 1, 1, 1], mask=[0, 0, 0, 1]) + test = intersect1d(x, y) + control = array([1, 3, -1], mask=[0, 0, 1]) + assert_equal(test, control) + + def test_setxor1d(self): + # Test setxor1d + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) + test = setxor1d(a, b) + assert_equal(test, array([3, 4, 7])) + # + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = [1, 2, 3, 4, 5] + test = setxor1d(a, b) + assert_equal(test, array([3, 4, 7, -1], mask=[0, 0, 0, 1])) + # + a = array([1, 2, 3]) + b = array([6, 5, 4]) + test = setxor1d(a, b) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, [1, 2, 3, 4, 5, 6]) + # + a = array([1, 8, 2, 3], mask=[0, 1, 0, 0]) + b = array([6, 5, 4, 8], mask=[0, 0, 0, 1]) + test = setxor1d(a, b) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, [1, 2, 3, 4, 5, 6]) + # + assert_array_equal([], setxor1d([], [])) + + def test_isin(self): + # the tests for in1d cover most of isin's behavior + # if in1d is removed, would need to change those tests to test + # isin instead. + a = np.arange(24).reshape([2, 3, 4]) + mask = np.zeros([2, 3, 4]) + mask[1, 2, 0] = 1 + a = array(a, mask=mask) + b = array(data=[0, 10, 20, 30, 1, 3, 11, 22, 33], + mask=[0, 1, 0, 1, 0, 1, 0, 1, 0]) + ec = zeros((2, 3, 4), dtype=bool) + ec[0, 0, 0] = True + ec[0, 0, 1] = True + ec[0, 2, 3] = True + c = isin(a, b) + assert_(isinstance(c, MaskedArray)) + assert_array_equal(c, ec) + #compare results of np.isin to ma.isin + d = np.isin(a, b[~b.mask]) & ~a.mask + assert_array_equal(c, d) + + def test_in1d(self): + # Test in1d + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) + test = in1d(a, b) + assert_equal(test, [True, True, True, False, True]) + # + a = array([5, 5, 2, 1, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 5, -1], mask=[0, 0, 1]) + test = in1d(a, b) + assert_equal(test, [True, True, False, True, True]) + # + assert_array_equal([], in1d([], [])) + + def test_in1d_invert(self): + # Test in1d's invert parameter + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) + assert_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True)) + + a = array([5, 5, 2, 1, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 5, -1], mask=[0, 0, 1]) + assert_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True)) + + assert_array_equal([], in1d([], [], invert=True)) + + def test_union1d(self): + # Test union1d + a = array([1, 2, 5, 7, 5, -1], mask=[0, 0, 0, 0, 0, 1]) + b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) + test = union1d(a, b) + control = array([1, 2, 3, 4, 5, 7, -1], mask=[0, 0, 0, 0, 0, 0, 1]) + assert_equal(test, control) + + # Tests gh-10340, arguments to union1d should be + # flattened if they are not already 1D + x = array([[0, 1, 2], [3, 4, 5]], mask=[[0, 0, 0], [0, 0, 1]]) + y = array([0, 1, 2, 3, 4], mask=[0, 0, 0, 0, 1]) + ez = array([0, 1, 2, 3, 4, 5], mask=[0, 0, 0, 0, 0, 1]) + z = union1d(x, y) + assert_equal(z, ez) + # + assert_array_equal([], union1d([], [])) + + def test_setdiff1d(self): + # Test setdiff1d + a = array([6, 5, 4, 7, 7, 1, 2, 1], mask=[0, 0, 0, 0, 0, 0, 0, 1]) + b = array([2, 4, 3, 3, 2, 1, 5]) + test = setdiff1d(a, b) + assert_equal(test, array([6, 7, -1], mask=[0, 0, 1])) + # + a = arange(10) + b = arange(8) + assert_equal(setdiff1d(a, b), array([8, 9])) + a = array([], np.uint32, mask=[]) + assert_equal(setdiff1d(a, []).dtype, np.uint32) + + def test_setdiff1d_char_array(self): + # Test setdiff1d_charray + a = np.array(['a', 'b', 'c']) + b = np.array(['a', 'b', 's']) + assert_array_equal(setdiff1d(a, b), np.array(['c'])) + + +class TestShapeBase(object): + + def test_atleast_2d(self): + # Test atleast_2d + a = masked_array([0, 1, 2], mask=[0, 1, 0]) + b = atleast_2d(a) + assert_equal(b.shape, (1, 3)) + assert_equal(b.mask.shape, b.data.shape) + assert_equal(a.shape, (3,)) + assert_equal(a.mask.shape, a.data.shape) + assert_equal(b.mask.shape, b.data.shape) + + def test_shape_scalar(self): + # the atleast and diagflat function should work with scalars + # GitHub issue #3367 + # Additionally, the atleast functions should accept multiple scalars + # correctly + b = atleast_1d(1.0) + assert_equal(b.shape, (1,)) + assert_equal(b.mask.shape, b.shape) + assert_equal(b.data.shape, b.shape) + + b = atleast_1d(1.0, 2.0) + for a in b: + assert_equal(a.shape, (1,)) + assert_equal(a.mask.shape, a.shape) + assert_equal(a.data.shape, a.shape) + + b = atleast_2d(1.0) + assert_equal(b.shape, (1, 1)) + assert_equal(b.mask.shape, b.shape) + assert_equal(b.data.shape, b.shape) + + b = atleast_2d(1.0, 2.0) + for a in b: + assert_equal(a.shape, (1, 1)) + assert_equal(a.mask.shape, a.shape) + assert_equal(a.data.shape, a.shape) + + b = atleast_3d(1.0) + assert_equal(b.shape, (1, 1, 1)) + assert_equal(b.mask.shape, b.shape) + assert_equal(b.data.shape, b.shape) + + b = atleast_3d(1.0, 2.0) + for a in b: + assert_equal(a.shape, (1, 1, 1)) + assert_equal(a.mask.shape, a.shape) + assert_equal(a.data.shape, a.shape) + + + b = diagflat(1.0) + assert_equal(b.shape, (1, 1)) + assert_equal(b.mask.shape, b.data.shape) + + +class TestStack(object): + + def test_stack_1d(self): + a = masked_array([0, 1, 2], mask=[0, 1, 0]) + b = masked_array([9, 8, 7], mask=[1, 0, 0]) + + c = stack([a, b], axis=0) + assert_equal(c.shape, (2, 3)) + assert_array_equal(a.mask, c[0].mask) + assert_array_equal(b.mask, c[1].mask) + + d = vstack([a, b]) + assert_array_equal(c.data, d.data) + assert_array_equal(c.mask, d.mask) + + c = stack([a, b], axis=1) + assert_equal(c.shape, (3, 2)) + assert_array_equal(a.mask, c[:, 0].mask) + assert_array_equal(b.mask, c[:, 1].mask) + + def test_stack_masks(self): + a = masked_array([0, 1, 2], mask=True) + b = masked_array([9, 8, 7], mask=False) + + c = stack([a, b], axis=0) + assert_equal(c.shape, (2, 3)) + assert_array_equal(a.mask, c[0].mask) + assert_array_equal(b.mask, c[1].mask) + + d = vstack([a, b]) + assert_array_equal(c.data, d.data) + assert_array_equal(c.mask, d.mask) + + c = stack([a, b], axis=1) + assert_equal(c.shape, (3, 2)) + assert_array_equal(a.mask, c[:, 0].mask) + assert_array_equal(b.mask, c[:, 1].mask) + + def test_stack_nd(self): + # 2D + shp = (3, 2) + d1 = np.random.randint(0, 10, shp) + d2 = np.random.randint(0, 10, shp) + m1 = np.random.randint(0, 2, shp).astype(bool) + m2 = np.random.randint(0, 2, shp).astype(bool) + a1 = masked_array(d1, mask=m1) + a2 = masked_array(d2, mask=m2) + + c = stack([a1, a2], axis=0) + c_shp = (2,) + shp + assert_equal(c.shape, c_shp) + assert_array_equal(a1.mask, c[0].mask) + assert_array_equal(a2.mask, c[1].mask) + + c = stack([a1, a2], axis=-1) + c_shp = shp + (2,) + assert_equal(c.shape, c_shp) + assert_array_equal(a1.mask, c[..., 0].mask) + assert_array_equal(a2.mask, c[..., 1].mask) + + # 4D + shp = (3, 2, 4, 5,) + d1 = np.random.randint(0, 10, shp) + d2 = np.random.randint(0, 10, shp) + m1 = np.random.randint(0, 2, shp).astype(bool) + m2 = np.random.randint(0, 2, shp).astype(bool) + a1 = masked_array(d1, mask=m1) + a2 = masked_array(d2, mask=m2) + + c = stack([a1, a2], axis=0) + c_shp = (2,) + shp + assert_equal(c.shape, c_shp) + assert_array_equal(a1.mask, c[0].mask) + assert_array_equal(a2.mask, c[1].mask) + + c = stack([a1, a2], axis=-1) + c_shp = shp + (2,) + assert_equal(c.shape, c_shp) + assert_array_equal(a1.mask, c[..., 0].mask) + assert_array_equal(a2.mask, c[..., 1].mask) diff --git a/project/venv/lib/python2.7/site-packages/numpy/ma/tests/test_extras.pyc b/project/venv/lib/python2.7/site-packages/numpy/ma/tests/test_extras.pyc new file mode 100644 index 0000000..39951b0 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/ma/tests/test_extras.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/ma/tests/test_mrecords.py b/project/venv/lib/python2.7/site-packages/numpy/ma/tests/test_mrecords.py new file mode 100644 index 0000000..dbbf1c8 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/ma/tests/test_mrecords.py @@ -0,0 +1,495 @@ +# pylint: disable-msg=W0611, W0612, W0511,R0201 +"""Tests suite for mrecords. + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu + +""" +from __future__ import division, absolute_import, print_function + +import numpy as np +import numpy.ma as ma +from numpy import recarray +from numpy.ma import masked, nomask +from numpy.testing import temppath +from numpy.core.records import ( + fromrecords as recfromrecords, fromarrays as recfromarrays + ) +from numpy.ma.mrecords import ( + MaskedRecords, mrecarray, fromarrays, fromtextfile, fromrecords, + addfield + ) +from numpy.ma.testutils import ( + assert_, assert_equal, + assert_equal_records, + ) +from numpy.core.numeric import pickle + + +class TestMRecords(object): + + ilist = [1, 2, 3, 4, 5] + flist = [1.1, 2.2, 3.3, 4.4, 5.5] + slist = [b'one', b'two', b'three', b'four', b'five'] + ddtype = [('a', int), ('b', float), ('c', '|S8')] + mask = [0, 1, 0, 0, 1] + base = ma.array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype) + + def test_byview(self): + # Test creation by view + base = self.base + mbase = base.view(mrecarray) + assert_equal(mbase.recordmask, base.recordmask) + assert_equal_records(mbase._mask, base._mask) + assert_(isinstance(mbase._data, recarray)) + assert_equal_records(mbase._data, base._data.view(recarray)) + for field in ('a', 'b', 'c'): + assert_equal(base[field], mbase[field]) + assert_equal_records(mbase.view(mrecarray), mbase) + + def test_get(self): + # Tests fields retrieval + base = self.base.copy() + mbase = base.view(mrecarray) + # As fields.......... + for field in ('a', 'b', 'c'): + assert_equal(getattr(mbase, field), mbase[field]) + assert_equal(base[field], mbase[field]) + # as elements ....... + mbase_first = mbase[0] + assert_(isinstance(mbase_first, mrecarray)) + assert_equal(mbase_first.dtype, mbase.dtype) + assert_equal(mbase_first.tolist(), (1, 1.1, b'one')) + # Used to be mask, now it's recordmask + assert_equal(mbase_first.recordmask, nomask) + assert_equal(mbase_first._mask.item(), (False, False, False)) + assert_equal(mbase_first['a'], mbase['a'][0]) + mbase_last = mbase[-1] + assert_(isinstance(mbase_last, mrecarray)) + assert_equal(mbase_last.dtype, mbase.dtype) + assert_equal(mbase_last.tolist(), (None, None, None)) + # Used to be mask, now it's recordmask + assert_equal(mbase_last.recordmask, True) + assert_equal(mbase_last._mask.item(), (True, True, True)) + assert_equal(mbase_last['a'], mbase['a'][-1]) + assert_((mbase_last['a'] is masked)) + # as slice .......... + mbase_sl = mbase[:2] + assert_(isinstance(mbase_sl, mrecarray)) + assert_equal(mbase_sl.dtype, mbase.dtype) + # Used to be mask, now it's recordmask + assert_equal(mbase_sl.recordmask, [0, 1]) + assert_equal_records(mbase_sl.mask, + np.array([(False, False, False), + (True, True, True)], + dtype=mbase._mask.dtype)) + assert_equal_records(mbase_sl, base[:2].view(mrecarray)) + for field in ('a', 'b', 'c'): + assert_equal(getattr(mbase_sl, field), base[:2][field]) + + def test_set_fields(self): + # Tests setting fields. + base = self.base.copy() + mbase = base.view(mrecarray) + mbase = mbase.copy() + mbase.fill_value = (999999, 1e20, 'N/A') + # Change the data, the mask should be conserved + mbase.a._data[:] = 5 + assert_equal(mbase['a']._data, [5, 5, 5, 5, 5]) + assert_equal(mbase['a']._mask, [0, 1, 0, 0, 1]) + # Change the elements, and the mask will follow + mbase.a = 1 + assert_equal(mbase['a']._data, [1]*5) + assert_equal(ma.getmaskarray(mbase['a']), [0]*5) + # Use to be _mask, now it's recordmask + assert_equal(mbase.recordmask, [False]*5) + assert_equal(mbase._mask.tolist(), + np.array([(0, 0, 0), + (0, 1, 1), + (0, 0, 0), + (0, 0, 0), + (0, 1, 1)], + dtype=bool)) + # Set a field to mask ........................ + mbase.c = masked + # Use to be mask, and now it's still mask ! + assert_equal(mbase.c.mask, [1]*5) + assert_equal(mbase.c.recordmask, [1]*5) + assert_equal(ma.getmaskarray(mbase['c']), [1]*5) + assert_equal(ma.getdata(mbase['c']), [b'N/A']*5) + assert_equal(mbase._mask.tolist(), + np.array([(0, 0, 1), + (0, 1, 1), + (0, 0, 1), + (0, 0, 1), + (0, 1, 1)], + dtype=bool)) + # Set fields by slices ....................... + mbase = base.view(mrecarray).copy() + mbase.a[3:] = 5 + assert_equal(mbase.a, [1, 2, 3, 5, 5]) + assert_equal(mbase.a._mask, [0, 1, 0, 0, 0]) + mbase.b[3:] = masked + assert_equal(mbase.b, base['b']) + assert_equal(mbase.b._mask, [0, 1, 0, 1, 1]) + # Set fields globally.......................... + ndtype = [('alpha', '|S1'), ('num', int)] + data = ma.array([('a', 1), ('b', 2), ('c', 3)], dtype=ndtype) + rdata = data.view(MaskedRecords) + val = ma.array([10, 20, 30], mask=[1, 0, 0]) + + rdata['num'] = val + assert_equal(rdata.num, val) + assert_equal(rdata.num.mask, [1, 0, 0]) + + def test_set_fields_mask(self): + # Tests setting the mask of a field. + base = self.base.copy() + # This one has already a mask.... + mbase = base.view(mrecarray) + mbase['a'][-2] = masked + assert_equal(mbase.a, [1, 2, 3, 4, 5]) + assert_equal(mbase.a._mask, [0, 1, 0, 1, 1]) + # This one has not yet + mbase = fromarrays([np.arange(5), np.random.rand(5)], + dtype=[('a', int), ('b', float)]) + mbase['a'][-2] = masked + assert_equal(mbase.a, [0, 1, 2, 3, 4]) + assert_equal(mbase.a._mask, [0, 0, 0, 1, 0]) + + def test_set_mask(self): + base = self.base.copy() + mbase = base.view(mrecarray) + # Set the mask to True ....................... + mbase.mask = masked + assert_equal(ma.getmaskarray(mbase['b']), [1]*5) + assert_equal(mbase['a']._mask, mbase['b']._mask) + assert_equal(mbase['a']._mask, mbase['c']._mask) + assert_equal(mbase._mask.tolist(), + np.array([(1, 1, 1)]*5, dtype=bool)) + # Delete the mask ............................ + mbase.mask = nomask + assert_equal(ma.getmaskarray(mbase['c']), [0]*5) + assert_equal(mbase._mask.tolist(), + np.array([(0, 0, 0)]*5, dtype=bool)) + + def test_set_mask_fromarray(self): + base = self.base.copy() + mbase = base.view(mrecarray) + # Sets the mask w/ an array + mbase.mask = [1, 0, 0, 0, 1] + assert_equal(mbase.a.mask, [1, 0, 0, 0, 1]) + assert_equal(mbase.b.mask, [1, 0, 0, 0, 1]) + assert_equal(mbase.c.mask, [1, 0, 0, 0, 1]) + # Yay, once more ! + mbase.mask = [0, 0, 0, 0, 1] + assert_equal(mbase.a.mask, [0, 0, 0, 0, 1]) + assert_equal(mbase.b.mask, [0, 0, 0, 0, 1]) + assert_equal(mbase.c.mask, [0, 0, 0, 0, 1]) + + def test_set_mask_fromfields(self): + mbase = self.base.copy().view(mrecarray) + + nmask = np.array( + [(0, 1, 0), (0, 1, 0), (1, 0, 1), (1, 0, 1), (0, 0, 0)], + dtype=[('a', bool), ('b', bool), ('c', bool)]) + mbase.mask = nmask + assert_equal(mbase.a.mask, [0, 0, 1, 1, 0]) + assert_equal(mbase.b.mask, [1, 1, 0, 0, 0]) + assert_equal(mbase.c.mask, [0, 0, 1, 1, 0]) + # Reinitialize and redo + mbase.mask = False + mbase.fieldmask = nmask + assert_equal(mbase.a.mask, [0, 0, 1, 1, 0]) + assert_equal(mbase.b.mask, [1, 1, 0, 0, 0]) + assert_equal(mbase.c.mask, [0, 0, 1, 1, 0]) + + def test_set_elements(self): + base = self.base.copy() + # Set an element to mask ..................... + mbase = base.view(mrecarray).copy() + mbase[-2] = masked + assert_equal( + mbase._mask.tolist(), + np.array([(0, 0, 0), (1, 1, 1), (0, 0, 0), (1, 1, 1), (1, 1, 1)], + dtype=bool)) + # Used to be mask, now it's recordmask! + assert_equal(mbase.recordmask, [0, 1, 0, 1, 1]) + # Set slices ................................. + mbase = base.view(mrecarray).copy() + mbase[:2] = (5, 5, 5) + assert_equal(mbase.a._data, [5, 5, 3, 4, 5]) + assert_equal(mbase.a._mask, [0, 0, 0, 0, 1]) + assert_equal(mbase.b._data, [5., 5., 3.3, 4.4, 5.5]) + assert_equal(mbase.b._mask, [0, 0, 0, 0, 1]) + assert_equal(mbase.c._data, + [b'5', b'5', b'three', b'four', b'five']) + assert_equal(mbase.b._mask, [0, 0, 0, 0, 1]) + + mbase = base.view(mrecarray).copy() + mbase[:2] = masked + assert_equal(mbase.a._data, [1, 2, 3, 4, 5]) + assert_equal(mbase.a._mask, [1, 1, 0, 0, 1]) + assert_equal(mbase.b._data, [1.1, 2.2, 3.3, 4.4, 5.5]) + assert_equal(mbase.b._mask, [1, 1, 0, 0, 1]) + assert_equal(mbase.c._data, + [b'one', b'two', b'three', b'four', b'five']) + assert_equal(mbase.b._mask, [1, 1, 0, 0, 1]) + + def test_setslices_hardmask(self): + # Tests setting slices w/ hardmask. + base = self.base.copy() + mbase = base.view(mrecarray) + mbase.harden_mask() + try: + mbase[-2:] = (5, 5, 5) + assert_equal(mbase.a._data, [1, 2, 3, 5, 5]) + assert_equal(mbase.b._data, [1.1, 2.2, 3.3, 5, 5.5]) + assert_equal(mbase.c._data, + [b'one', b'two', b'three', b'5', b'five']) + assert_equal(mbase.a._mask, [0, 1, 0, 0, 1]) + assert_equal(mbase.b._mask, mbase.a._mask) + assert_equal(mbase.b._mask, mbase.c._mask) + except NotImplementedError: + # OK, not implemented yet... + pass + except AssertionError: + raise + else: + raise Exception("Flexible hard masks should be supported !") + # Not using a tuple should crash + try: + mbase[-2:] = 3 + except (NotImplementedError, TypeError): + pass + else: + raise TypeError("Should have expected a readable buffer object!") + + def test_hardmask(self): + # Test hardmask + base = self.base.copy() + mbase = base.view(mrecarray) + mbase.harden_mask() + assert_(mbase._hardmask) + mbase.mask = nomask + assert_equal_records(mbase._mask, base._mask) + mbase.soften_mask() + assert_(not mbase._hardmask) + mbase.mask = nomask + # So, the mask of a field is no longer set to nomask... + assert_equal_records(mbase._mask, + ma.make_mask_none(base.shape, base.dtype)) + assert_(ma.make_mask(mbase['b']._mask) is nomask) + assert_equal(mbase['a']._mask, mbase['b']._mask) + + def test_pickling(self): + # Test pickling + base = self.base.copy() + mrec = base.view(mrecarray) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + _ = pickle.dumps(mrec, protocol=proto) + mrec_ = pickle.loads(_) + assert_equal(mrec_.dtype, mrec.dtype) + assert_equal_records(mrec_._data, mrec._data) + assert_equal(mrec_._mask, mrec._mask) + assert_equal_records(mrec_._mask, mrec._mask) + + def test_filled(self): + # Test filling the array + _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int) + _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float) + _c = ma.array(['one', 'two', 'three'], mask=[0, 0, 1], dtype='|S8') + ddtype = [('a', int), ('b', float), ('c', '|S8')] + mrec = fromarrays([_a, _b, _c], dtype=ddtype, + fill_value=(99999, 99999., 'N/A')) + mrecfilled = mrec.filled() + assert_equal(mrecfilled['a'], np.array((1, 2, 99999), dtype=int)) + assert_equal(mrecfilled['b'], np.array((1.1, 2.2, 99999.), + dtype=float)) + assert_equal(mrecfilled['c'], np.array(('one', 'two', 'N/A'), + dtype='|S8')) + + def test_tolist(self): + # Test tolist. + _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int) + _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float) + _c = ma.array(['one', 'two', 'three'], mask=[1, 0, 0], dtype='|S8') + ddtype = [('a', int), ('b', float), ('c', '|S8')] + mrec = fromarrays([_a, _b, _c], dtype=ddtype, + fill_value=(99999, 99999., 'N/A')) + + assert_equal(mrec.tolist(), + [(1, 1.1, None), (2, 2.2, b'two'), + (None, None, b'three')]) + + def test_withnames(self): + # Test the creation w/ format and names + x = mrecarray(1, formats=float, names='base') + x[0]['base'] = 10 + assert_equal(x['base'][0], 10) + + def test_exotic_formats(self): + # Test that 'exotic' formats are processed properly + easy = mrecarray(1, dtype=[('i', int), ('s', '|S8'), ('f', float)]) + easy[0] = masked + assert_equal(easy.filled(1).item(), (1, b'1', 1.)) + + solo = mrecarray(1, dtype=[('f0', ' 1: + assert_(eq(np.concatenate((x, y), 1), + concatenate((xm, ym), 1))) + assert_(eq(np.add.reduce(x, 1), add.reduce(x, 1))) + assert_(eq(np.sum(x, 1), sum(x, 1))) + assert_(eq(np.product(x, 1), product(x, 1))) + + def test_testCI(self): + # Test of conversions and indexing + x1 = np.array([1, 2, 4, 3]) + x2 = array(x1, mask=[1, 0, 0, 0]) + x3 = array(x1, mask=[0, 1, 0, 1]) + x4 = array(x1) + # test conversion to strings + str(x2) # raises? + repr(x2) # raises? + assert_(eq(np.sort(x1), sort(x2, fill_value=0))) + # tests of indexing + assert_(type(x2[1]) is type(x1[1])) + assert_(x1[1] == x2[1]) + assert_(x2[0] is masked) + assert_(eq(x1[2], x2[2])) + assert_(eq(x1[2:5], x2[2:5])) + assert_(eq(x1[:], x2[:])) + assert_(eq(x1[1:], x3[1:])) + x1[2] = 9 + x2[2] = 9 + assert_(eq(x1, x2)) + x1[1:3] = 99 + x2[1:3] = 99 + assert_(eq(x1, x2)) + x2[1] = masked + assert_(eq(x1, x2)) + x2[1:3] = masked + assert_(eq(x1, x2)) + x2[:] = x1 + x2[1] = masked + assert_(allequal(getmask(x2), array([0, 1, 0, 0]))) + x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) + assert_(allequal(getmask(x3), array([0, 1, 1, 0]))) + x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) + assert_(allequal(getmask(x4), array([0, 1, 1, 0]))) + assert_(allequal(x4, array([1, 2, 3, 4]))) + x1 = np.arange(5) * 1.0 + x2 = masked_values(x1, 3.0) + assert_(eq(x1, x2)) + assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask)) + assert_(eq(3.0, x2.fill_value)) + x1 = array([1, 'hello', 2, 3], object) + x2 = np.array([1, 'hello', 2, 3], object) + s1 = x1[1] + s2 = x2[1] + assert_equal(type(s2), str) + assert_equal(type(s1), str) + assert_equal(s1, s2) + assert_(x1[1:1].shape == (0,)) + + def test_testCopySize(self): + # Tests of some subtle points of copying and sizing. + n = [0, 0, 1, 0, 0] + m = make_mask(n) + m2 = make_mask(m) + assert_(m is m2) + m3 = make_mask(m, copy=1) + assert_(m is not m3) + + x1 = np.arange(5) + y1 = array(x1, mask=m) + assert_(y1._data is not x1) + assert_(allequal(x1, y1._data)) + assert_(y1.mask is m) + + y1a = array(y1, copy=0) + # For copy=False, one might expect that the array would just + # passed on, i.e., that it would be "is" instead of "==". + # See gh-4043 for discussion. + assert_(y1a._mask.__array_interface__ == + y1._mask.__array_interface__) + + y2 = array(x1, mask=m3, copy=0) + assert_(y2.mask is m3) + assert_(y2[2] is masked) + y2[2] = 9 + assert_(y2[2] is not masked) + assert_(y2.mask is m3) + assert_(allequal(y2.mask, 0)) + + y2a = array(x1, mask=m, copy=1) + assert_(y2a.mask is not m) + assert_(y2a[2] is masked) + y2a[2] = 9 + assert_(y2a[2] is not masked) + assert_(y2a.mask is not m) + assert_(allequal(y2a.mask, 0)) + + y3 = array(x1 * 1.0, mask=m) + assert_(filled(y3).dtype is (x1 * 1.0).dtype) + + x4 = arange(4) + x4[2] = masked + y4 = resize(x4, (8,)) + assert_(eq(concatenate([x4, x4]), y4)) + assert_(eq(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0])) + y5 = repeat(x4, (2, 2, 2, 2), axis=0) + assert_(eq(y5, [0, 0, 1, 1, 2, 2, 3, 3])) + y6 = repeat(x4, 2, axis=0) + assert_(eq(y5, y6)) + + def test_testPut(self): + # Test of put + d = arange(5) + n = [0, 0, 0, 1, 1] + m = make_mask(n) + m2 = m.copy() + x = array(d, mask=m) + assert_(x[3] is masked) + assert_(x[4] is masked) + x[[1, 4]] = [10, 40] + assert_(x.mask is m) + assert_(x[3] is masked) + assert_(x[4] is not masked) + assert_(eq(x, [0, 10, 2, -1, 40])) + + x = array(d, mask=m2, copy=True) + x.put([0, 1, 2], [-1, 100, 200]) + assert_(x.mask is not m2) + assert_(x[3] is masked) + assert_(x[4] is masked) + assert_(eq(x, [-1, 100, 200, 0, 0])) + + def test_testPut2(self): + # Test of put + d = arange(5) + x = array(d, mask=[0, 0, 0, 0, 0]) + z = array([10, 40], mask=[1, 0]) + assert_(x[2] is not masked) + assert_(x[3] is not masked) + x[2:4] = z + assert_(x[2] is masked) + assert_(x[3] is not masked) + assert_(eq(x, [0, 1, 10, 40, 4])) + + d = arange(5) + x = array(d, mask=[0, 0, 0, 0, 0]) + y = x[2:4] + z = array([10, 40], mask=[1, 0]) + assert_(x[2] is not masked) + assert_(x[3] is not masked) + y[:] = z + assert_(y[0] is masked) + assert_(y[1] is not masked) + assert_(eq(y, [10, 40])) + assert_(x[2] is masked) + assert_(x[3] is not masked) + assert_(eq(x, [0, 1, 10, 40, 4])) + + def test_testMaPut(self): + (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d + m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1] + i = np.nonzero(m)[0] + put(ym, i, zm) + assert_(all(take(ym, i, axis=0) == zm)) + + def test_testOddFeatures(self): + # Test of other odd features + x = arange(20) + x = x.reshape(4, 5) + x.flat[5] = 12 + assert_(x[1, 0] == 12) + z = x + 10j * x + assert_(eq(z.real, x)) + assert_(eq(z.imag, 10 * x)) + assert_(eq((z * conjugate(z)).real, 101 * x * x)) + z.imag[...] = 0.0 + + x = arange(10) + x[3] = masked + assert_(str(x[3]) == str(masked)) + c = x >= 8 + assert_(count(where(c, masked, masked)) == 0) + assert_(shape(where(c, masked, masked)) == c.shape) + z = where(c, x, masked) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is masked) + assert_(z[7] is masked) + assert_(z[8] is not masked) + assert_(z[9] is not masked) + assert_(eq(x, z)) + z = where(c, masked, x) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is not masked) + assert_(z[7] is not masked) + assert_(z[8] is masked) + assert_(z[9] is masked) + z = masked_where(c, x) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is not masked) + assert_(z[7] is not masked) + assert_(z[8] is masked) + assert_(z[9] is masked) + assert_(eq(x, z)) + x = array([1., 2., 3., 4., 5.]) + c = array([1, 1, 1, 0, 0]) + x[2] = masked + z = where(c, x, -x) + assert_(eq(z, [1., 2., 0., -4., -5])) + c[0] = masked + z = where(c, x, -x) + assert_(eq(z, [1., 2., 0., -4., -5])) + assert_(z[0] is masked) + assert_(z[1] is not masked) + assert_(z[2] is masked) + assert_(eq(masked_where(greater(x, 2), x), masked_greater(x, 2))) + assert_(eq(masked_where(greater_equal(x, 2), x), + masked_greater_equal(x, 2))) + assert_(eq(masked_where(less(x, 2), x), masked_less(x, 2))) + assert_(eq(masked_where(less_equal(x, 2), x), masked_less_equal(x, 2))) + assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))) + assert_(eq(masked_where(equal(x, 2), x), masked_equal(x, 2))) + assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))) + assert_(eq(masked_inside(list(range(5)), 1, 3), [0, 199, 199, 199, 4])) + assert_(eq(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199])) + assert_(eq(masked_inside(array(list(range(5)), + mask=[1, 0, 0, 0, 0]), 1, 3).mask, + [1, 1, 1, 1, 0])) + assert_(eq(masked_outside(array(list(range(5)), + mask=[0, 1, 0, 0, 0]), 1, 3).mask, + [1, 1, 0, 0, 1])) + assert_(eq(masked_equal(array(list(range(5)), + mask=[1, 0, 0, 0, 0]), 2).mask, + [1, 0, 1, 0, 0])) + assert_(eq(masked_not_equal(array([2, 2, 1, 2, 1], + mask=[1, 0, 0, 0, 0]), 2).mask, + [1, 0, 1, 0, 1])) + assert_(eq(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]), + [99, 99, 3, 4, 5])) + atest = ones((10, 10, 10), dtype=np.float32) + btest = zeros(atest.shape, MaskType) + ctest = masked_where(btest, atest) + assert_(eq(atest, ctest)) + z = choose(c, (-x, x)) + assert_(eq(z, [1., 2., 0., -4., -5])) + assert_(z[0] is masked) + assert_(z[1] is not masked) + assert_(z[2] is masked) + x = arange(6) + x[5] = masked + y = arange(6) * 10 + y[2] = masked + c = array([1, 1, 1, 0, 0, 0], mask=[1, 0, 0, 0, 0, 0]) + cm = c.filled(1) + z = where(c, x, y) + zm = where(cm, x, y) + assert_(eq(z, zm)) + assert_(getmask(zm) is nomask) + assert_(eq(zm, [0, 1, 2, 30, 40, 50])) + z = where(c, masked, 1) + assert_(eq(z, [99, 99, 99, 1, 1, 1])) + z = where(c, 1, masked) + assert_(eq(z, [99, 1, 1, 99, 99, 99])) + + def test_testMinMax2(self): + # Test of minimum, maximum. + assert_(eq(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3])) + assert_(eq(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9])) + x = arange(5) + y = arange(5) - 2 + x[3] = masked + y[0] = masked + assert_(eq(minimum(x, y), where(less(x, y), x, y))) + assert_(eq(maximum(x, y), where(greater(x, y), x, y))) + assert_(minimum.reduce(x) == 0) + assert_(maximum.reduce(x) == 4) + + def test_testTakeTransposeInnerOuter(self): + # Test of take, transpose, inner, outer products + x = arange(24) + y = np.arange(24) + x[5:6] = masked + x = x.reshape(2, 3, 4) + y = y.reshape(2, 3, 4) + assert_(eq(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1)))) + assert_(eq(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1))) + assert_(eq(np.inner(filled(x, 0), filled(y, 0)), + inner(x, y))) + assert_(eq(np.outer(filled(x, 0), filled(y, 0)), + outer(x, y))) + y = array(['abc', 1, 'def', 2, 3], object) + y[2] = masked + t = take(y, [0, 3, 4]) + assert_(t[0] == 'abc') + assert_(t[1] == 2) + assert_(t[2] == 3) + + def test_testInplace(self): + # Test of inplace operations and rich comparisons + y = arange(10) + + x = arange(10) + xm = arange(10) + xm[2] = masked + x += 1 + assert_(eq(x, y + 1)) + xm += 1 + assert_(eq(x, y + 1)) + + x = arange(10) + xm = arange(10) + xm[2] = masked + x -= 1 + assert_(eq(x, y - 1)) + xm -= 1 + assert_(eq(xm, y - 1)) + + x = arange(10) * 1.0 + xm = arange(10) * 1.0 + xm[2] = masked + x *= 2.0 + assert_(eq(x, y * 2)) + xm *= 2.0 + assert_(eq(xm, y * 2)) + + x = arange(10) * 2 + xm = arange(10) + xm[2] = masked + x //= 2 + assert_(eq(x, y)) + xm //= 2 + assert_(eq(x, y)) + + x = arange(10) * 1.0 + xm = arange(10) * 1.0 + xm[2] = masked + x /= 2.0 + assert_(eq(x, y / 2.0)) + xm /= arange(10) + assert_(eq(xm, ones((10,)))) + + x = arange(10).astype(np.float32) + xm = arange(10) + xm[2] = masked + x += 1. + assert_(eq(x, y + 1.)) + + def test_testPickle(self): + # Test of pickling + x = arange(12) + x[4:10:2] = masked + x = x.reshape(4, 3) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + s = pickle.dumps(x, protocol=proto) + y = pickle.loads(s) + assert_(eq(x, y)) + + def test_testMasked(self): + # Test of masked element + xx = arange(6) + xx[1] = masked + assert_(str(masked) == '--') + assert_(xx[1] is masked) + assert_equal(filled(xx[1], 0), 0) + + def test_testAverage1(self): + # Test of average. + ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) + assert_(eq(2.0, average(ott, axis=0))) + assert_(eq(2.0, average(ott, weights=[1., 1., 2., 1.]))) + result, wts = average(ott, weights=[1., 1., 2., 1.], returned=1) + assert_(eq(2.0, result)) + assert_(wts == 4.0) + ott[:] = masked + assert_(average(ott, axis=0) is masked) + ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) + ott = ott.reshape(2, 2) + ott[:, 1] = masked + assert_(eq(average(ott, axis=0), [2.0, 0.0])) + assert_(average(ott, axis=1)[0] is masked) + assert_(eq([2., 0.], average(ott, axis=0))) + result, wts = average(ott, axis=0, returned=1) + assert_(eq(wts, [1., 0.])) + + def test_testAverage2(self): + # More tests of average. + w1 = [0, 1, 1, 1, 1, 0] + w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]] + x = arange(6) + assert_(allclose(average(x, axis=0), 2.5)) + assert_(allclose(average(x, axis=0, weights=w1), 2.5)) + y = array([arange(6), 2.0 * arange(6)]) + assert_(allclose(average(y, None), + np.add.reduce(np.arange(6)) * 3. / 12.)) + assert_(allclose(average(y, axis=0), np.arange(6) * 3. / 2.)) + assert_(allclose(average(y, axis=1), + [average(x, axis=0), average(x, axis=0)*2.0])) + assert_(allclose(average(y, None, weights=w2), 20. / 6.)) + assert_(allclose(average(y, axis=0, weights=w2), + [0., 1., 2., 3., 4., 10.])) + assert_(allclose(average(y, axis=1), + [average(x, axis=0), average(x, axis=0)*2.0])) + m1 = zeros(6) + m2 = [0, 0, 1, 1, 0, 0] + m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]] + m4 = ones(6) + m5 = [0, 1, 1, 1, 1, 1] + assert_(allclose(average(masked_array(x, m1), axis=0), 2.5)) + assert_(allclose(average(masked_array(x, m2), axis=0), 2.5)) + assert_(average(masked_array(x, m4), axis=0) is masked) + assert_equal(average(masked_array(x, m5), axis=0), 0.0) + assert_equal(count(average(masked_array(x, m4), axis=0)), 0) + z = masked_array(y, m3) + assert_(allclose(average(z, None), 20. / 6.)) + assert_(allclose(average(z, axis=0), + [0., 1., 99., 99., 4.0, 7.5])) + assert_(allclose(average(z, axis=1), [2.5, 5.0])) + assert_(allclose(average(z, axis=0, weights=w2), + [0., 1., 99., 99., 4.0, 10.0])) + + a = arange(6) + b = arange(6) * 3 + r1, w1 = average([[a, b], [b, a]], axis=1, returned=1) + assert_equal(shape(r1), shape(w1)) + assert_equal(r1.shape, w1.shape) + r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=1) + assert_equal(shape(w2), shape(r2)) + r2, w2 = average(ones((2, 2, 3)), returned=1) + assert_equal(shape(w2), shape(r2)) + r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=1) + assert_(shape(w2) == shape(r2)) + a2d = array([[1, 2], [0, 4]], float) + a2dm = masked_array(a2d, [[0, 0], [1, 0]]) + a2da = average(a2d, axis=0) + assert_(eq(a2da, [0.5, 3.0])) + a2dma = average(a2dm, axis=0) + assert_(eq(a2dma, [1.0, 3.0])) + a2dma = average(a2dm, axis=None) + assert_(eq(a2dma, 7. / 3.)) + a2dma = average(a2dm, axis=1) + assert_(eq(a2dma, [1.5, 4.0])) + + def test_testToPython(self): + assert_equal(1, int(array(1))) + assert_equal(1.0, float(array(1))) + assert_equal(1, int(array([[[1]]]))) + assert_equal(1.0, float(array([[1]]))) + assert_raises(TypeError, float, array([1, 1])) + assert_raises(ValueError, bool, array([0, 1])) + assert_raises(ValueError, bool, array([0, 0], mask=[0, 1])) + + def test_testScalarArithmetic(self): + xm = array(0, mask=1) + #TODO FIXME: Find out what the following raises a warning in r8247 + with np.errstate(divide='ignore'): + assert_((1 / array(0)).mask) + assert_((1 + xm).mask) + assert_((-xm).mask) + assert_((-xm).mask) + assert_(maximum(xm, xm).mask) + assert_(minimum(xm, xm).mask) + assert_(xm.filled().dtype is xm._data.dtype) + x = array(0, mask=0) + assert_(x.filled() == x._data) + assert_equal(str(xm), str(masked_print_option)) + + def test_testArrayMethods(self): + a = array([1, 3, 2]) + assert_(eq(a.any(), a._data.any())) + assert_(eq(a.all(), a._data.all())) + assert_(eq(a.argmax(), a._data.argmax())) + assert_(eq(a.argmin(), a._data.argmin())) + assert_(eq(a.choose(0, 1, 2, 3, 4), + a._data.choose(0, 1, 2, 3, 4))) + assert_(eq(a.compress([1, 0, 1]), a._data.compress([1, 0, 1]))) + assert_(eq(a.conj(), a._data.conj())) + assert_(eq(a.conjugate(), a._data.conjugate())) + m = array([[1, 2], [3, 4]]) + assert_(eq(m.diagonal(), m._data.diagonal())) + assert_(eq(a.sum(), a._data.sum())) + assert_(eq(a.take([1, 2]), a._data.take([1, 2]))) + assert_(eq(m.transpose(), m._data.transpose())) + + def test_testArrayAttributes(self): + a = array([1, 3, 2]) + assert_equal(a.ndim, 1) + + def test_testAPI(self): + assert_(not [m for m in dir(np.ndarray) + if m not in dir(MaskedArray) and + not m.startswith('_')]) + + def test_testSingleElementSubscript(self): + a = array([1, 3, 2]) + b = array([1, 3, 2], mask=[1, 0, 1]) + assert_equal(a[0].shape, ()) + assert_equal(b[0].shape, ()) + assert_equal(b[1].shape, ()) + + +class TestUfuncs(object): + def setup(self): + self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6), + array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),) + + def test_testUfuncRegression(self): + f_invalid_ignore = [ + 'sqrt', 'arctanh', 'arcsin', 'arccos', + 'arccosh', 'arctanh', 'log', 'log10', 'divide', + 'true_divide', 'floor_divide', 'remainder', 'fmod'] + for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate', + 'sin', 'cos', 'tan', + 'arcsin', 'arccos', 'arctan', + 'sinh', 'cosh', 'tanh', + 'arcsinh', + 'arccosh', + 'arctanh', + 'absolute', 'fabs', 'negative', + 'floor', 'ceil', + 'logical_not', + 'add', 'subtract', 'multiply', + 'divide', 'true_divide', 'floor_divide', + 'remainder', 'fmod', 'hypot', 'arctan2', + 'equal', 'not_equal', 'less_equal', 'greater_equal', + 'less', 'greater', + 'logical_and', 'logical_or', 'logical_xor']: + try: + uf = getattr(umath, f) + except AttributeError: + uf = getattr(fromnumeric, f) + mf = getattr(np.ma, f) + args = self.d[:uf.nin] + with np.errstate(): + if f in f_invalid_ignore: + np.seterr(invalid='ignore') + if f in ['arctanh', 'log', 'log10']: + np.seterr(divide='ignore') + ur = uf(*args) + mr = mf(*args) + assert_(eq(ur.filled(0), mr.filled(0), f)) + assert_(eqmask(ur.mask, mr.mask)) + + def test_reduce(self): + a = self.d[0] + assert_(not alltrue(a, axis=0)) + assert_(sometrue(a, axis=0)) + assert_equal(sum(a[:3], axis=0), 0) + assert_equal(product(a, axis=0), 0) + + def test_minmax(self): + a = arange(1, 13).reshape(3, 4) + amask = masked_where(a < 5, a) + assert_equal(amask.max(), a.max()) + assert_equal(amask.min(), 5) + assert_((amask.max(0) == a.max(0)).all()) + assert_((amask.min(0) == [5, 6, 7, 8]).all()) + assert_(amask.max(1)[0].mask) + assert_(amask.min(1)[0].mask) + + def test_nonzero(self): + for t in "?bhilqpBHILQPfdgFDGO": + x = array([1, 0, 2, 0], mask=[0, 0, 1, 1]) + assert_(eq(nonzero(x), [0])) + + +class TestArrayMethods(object): + + def setup(self): + x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, + 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) + X = x.reshape(6, 6) + XX = x.reshape(3, 2, 2, 3) + + m = np.array([0, 1, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, + 0, 0, 1, 0, 1, 0]) + mx = array(data=x, mask=m) + mX = array(data=X, mask=m.reshape(X.shape)) + mXX = array(data=XX, mask=m.reshape(XX.shape)) + + self.d = (x, X, XX, m, mx, mX, mXX) + + def test_trace(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + mXdiag = mX.diagonal() + assert_equal(mX.trace(), mX.diagonal().compressed().sum()) + assert_(eq(mX.trace(), + X.trace() - sum(mXdiag.mask * X.diagonal(), + axis=0))) + + def test_clip(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + clipped = mx.clip(2, 8) + assert_(eq(clipped.mask, mx.mask)) + assert_(eq(clipped._data, x.clip(2, 8))) + assert_(eq(clipped._data, mx._data.clip(2, 8))) + + def test_ptp(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + (n, m) = X.shape + assert_equal(mx.ptp(), mx.compressed().ptp()) + rows = np.zeros(n, np.float_) + cols = np.zeros(m, np.float_) + for k in range(m): + cols[k] = mX[:, k].compressed().ptp() + for k in range(n): + rows[k] = mX[k].compressed().ptp() + assert_(eq(mX.ptp(0), cols)) + assert_(eq(mX.ptp(1), rows)) + + def test_swapaxes(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + mXswapped = mX.swapaxes(0, 1) + assert_(eq(mXswapped[-1], mX[:, -1])) + mXXswapped = mXX.swapaxes(0, 2) + assert_equal(mXXswapped.shape, (2, 2, 3, 3)) + + def test_cumprod(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + mXcp = mX.cumprod(0) + assert_(eq(mXcp._data, mX.filled(1).cumprod(0))) + mXcp = mX.cumprod(1) + assert_(eq(mXcp._data, mX.filled(1).cumprod(1))) + + def test_cumsum(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + mXcp = mX.cumsum(0) + assert_(eq(mXcp._data, mX.filled(0).cumsum(0))) + mXcp = mX.cumsum(1) + assert_(eq(mXcp._data, mX.filled(0).cumsum(1))) + + def test_varstd(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + assert_(eq(mX.var(axis=None), mX.compressed().var())) + assert_(eq(mX.std(axis=None), mX.compressed().std())) + assert_(eq(mXX.var(axis=3).shape, XX.var(axis=3).shape)) + assert_(eq(mX.var().shape, X.var().shape)) + (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1)) + for k in range(6): + assert_(eq(mXvar1[k], mX[k].compressed().var())) + assert_(eq(mXvar0[k], mX[:, k].compressed().var())) + assert_(eq(np.sqrt(mXvar0[k]), + mX[:, k].compressed().std())) + + +def eqmask(m1, m2): + if m1 is nomask: + return m2 is nomask + if m2 is nomask: + return m1 is nomask + return (m1 == m2).all() diff --git a/project/venv/lib/python2.7/site-packages/numpy/ma/tests/test_old_ma.pyc b/project/venv/lib/python2.7/site-packages/numpy/ma/tests/test_old_ma.pyc new file mode 100644 index 0000000..8bac5ff Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/ma/tests/test_old_ma.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/ma/tests/test_regression.py b/project/venv/lib/python2.7/site-packages/numpy/ma/tests/test_regression.py new file mode 100644 index 0000000..54f1bda --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/ma/tests/test_regression.py @@ -0,0 +1,89 @@ +from __future__ import division, absolute_import, print_function + +import numpy as np +from numpy.testing import ( + assert_, assert_array_equal, assert_allclose, suppress_warnings + ) + + +class TestRegression(object): + def test_masked_array_create(self): + # Ticket #17 + x = np.ma.masked_array([0, 1, 2, 3, 0, 4, 5, 6], + mask=[0, 0, 0, 1, 1, 1, 0, 0]) + assert_array_equal(np.ma.nonzero(x), [[1, 2, 6, 7]]) + + def test_masked_array(self): + # Ticket #61 + np.ma.array(1, mask=[1]) + + def test_mem_masked_where(self): + # Ticket #62 + from numpy.ma import masked_where, MaskType + a = np.zeros((1, 1)) + b = np.zeros(a.shape, MaskType) + c = masked_where(b, a) + a-c + + def test_masked_array_multiply(self): + # Ticket #254 + a = np.ma.zeros((4, 1)) + a[2, 0] = np.ma.masked + b = np.zeros((4, 2)) + a*b + b*a + + def test_masked_array_repeat(self): + # Ticket #271 + np.ma.array([1], mask=False).repeat(10) + + def test_masked_array_repr_unicode(self): + # Ticket #1256 + repr(np.ma.array(u"Unicode")) + + def test_atleast_2d(self): + # Ticket #1559 + a = np.ma.masked_array([0.0, 1.2, 3.5], mask=[False, True, False]) + b = np.atleast_2d(a) + assert_(a.mask.ndim == 1) + assert_(b.mask.ndim == 2) + + def test_set_fill_value_unicode_py3(self): + # Ticket #2733 + a = np.ma.masked_array(['a', 'b', 'c'], mask=[1, 0, 0]) + a.fill_value = 'X' + assert_(a.fill_value == 'X') + + def test_var_sets_maskedarray_scalar(self): + # Issue gh-2757 + a = np.ma.array(np.arange(5), mask=True) + mout = np.ma.array(-1, dtype=float) + a.var(out=mout) + assert_(mout._data == 0) + + def test_ddof_corrcoef(self): + # See gh-3336 + x = np.ma.masked_equal([1, 2, 3, 4, 5], 4) + y = np.array([2, 2.5, 3.1, 3, 5]) + # this test can be removed after deprecation. + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "bias and ddof have no effect") + r0 = np.ma.corrcoef(x, y, ddof=0) + r1 = np.ma.corrcoef(x, y, ddof=1) + # ddof should not have an effect (it gets cancelled out) + assert_allclose(r0.data, r1.data) + + def test_mask_not_backmangled(self): + # See gh-10314. Test case taken from gh-3140. + a = np.ma.MaskedArray([1., 2.], mask=[False, False]) + assert_(a.mask.shape == (2,)) + b = np.tile(a, (2, 1)) + # Check that the above no longer changes a.shape to (1, 2) + assert_(a.mask.shape == (2,)) + assert_(b.shape == (2, 2)) + assert_(b.mask.shape == (2, 2)) + + def test_empty_list_on_structured(self): + # See gh-12464. Indexing with empty list should give empty result. + ma = np.ma.MaskedArray([(1, 1.), (2, 2.), (3, 3.)], dtype='i4,f4') + assert_array_equal(ma[[]], ma[:0]) diff --git a/project/venv/lib/python2.7/site-packages/numpy/ma/tests/test_regression.pyc b/project/venv/lib/python2.7/site-packages/numpy/ma/tests/test_regression.pyc new file mode 100644 index 0000000..c99afb1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/ma/tests/test_regression.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/ma/tests/test_subclassing.py b/project/venv/lib/python2.7/site-packages/numpy/ma/tests/test_subclassing.py new file mode 100644 index 0000000..f8ab52b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/ma/tests/test_subclassing.py @@ -0,0 +1,351 @@ +# pylint: disable-msg=W0611, W0612, W0511,R0201 +"""Tests suite for MaskedArray & subclassing. + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu +:version: $Id: test_subclassing.py 3473 2007-10-29 15:18:13Z jarrod.millman $ + +""" +from __future__ import division, absolute_import, print_function + +import numpy as np +from numpy.testing import assert_, assert_raises +from numpy.ma.testutils import assert_equal +from numpy.ma.core import ( + array, arange, masked, MaskedArray, masked_array, log, add, hypot, + divide, asarray, asanyarray, nomask + ) +# from numpy.ma.core import ( + +def assert_startswith(a, b): + # produces a better error message than assert_(a.startswith(b)) + assert_equal(a[:len(b)], b) + +class SubArray(np.ndarray): + # Defines a generic np.ndarray subclass, that stores some metadata + # in the dictionary `info`. + def __new__(cls,arr,info={}): + x = np.asanyarray(arr).view(cls) + x.info = info.copy() + return x + + def __array_finalize__(self, obj): + if callable(getattr(super(SubArray, self), + '__array_finalize__', None)): + super(SubArray, self).__array_finalize__(obj) + self.info = getattr(obj, 'info', {}).copy() + return + + def __add__(self, other): + result = super(SubArray, self).__add__(other) + result.info['added'] = result.info.get('added', 0) + 1 + return result + + def __iadd__(self, other): + result = super(SubArray, self).__iadd__(other) + result.info['iadded'] = result.info.get('iadded', 0) + 1 + return result + + +subarray = SubArray + + +class SubMaskedArray(MaskedArray): + """Pure subclass of MaskedArray, keeping some info on subclass.""" + def __new__(cls, info=None, **kwargs): + obj = super(SubMaskedArray, cls).__new__(cls, **kwargs) + obj._optinfo['info'] = info + return obj + + +class MSubArray(SubArray, MaskedArray): + + def __new__(cls, data, info={}, mask=nomask): + subarr = SubArray(data, info) + _data = MaskedArray.__new__(cls, data=subarr, mask=mask) + _data.info = subarr.info + return _data + + def _get_series(self): + _view = self.view(MaskedArray) + _view._sharedmask = False + return _view + _series = property(fget=_get_series) + +msubarray = MSubArray + + +# Also a subclass that overrides __str__, __repr__ and __setitem__, disallowing +# setting to non-class values (and thus np.ma.core.masked_print_option) +# and overrides __array_wrap__, updating the info dict, to check that this +# doesn't get destroyed by MaskedArray._update_from. But this one also needs +# its own iterator... +class CSAIterator(object): + """ + Flat iterator object that uses its own setter/getter + (works around ndarray.flat not propagating subclass setters/getters + see https://github.com/numpy/numpy/issues/4564) + roughly following MaskedIterator + """ + def __init__(self, a): + self._original = a + self._dataiter = a.view(np.ndarray).flat + + def __iter__(self): + return self + + def __getitem__(self, indx): + out = self._dataiter.__getitem__(indx) + if not isinstance(out, np.ndarray): + out = out.__array__() + out = out.view(type(self._original)) + return out + + def __setitem__(self, index, value): + self._dataiter[index] = self._original._validate_input(value) + + def __next__(self): + return next(self._dataiter).__array__().view(type(self._original)) + + next = __next__ + + +class ComplicatedSubArray(SubArray): + + def __str__(self): + return 'myprefix {0} mypostfix'.format(self.view(SubArray)) + + def __repr__(self): + # Return a repr that does not start with 'name(' + return '<{0} {1}>'.format(self.__class__.__name__, self) + + def _validate_input(self, value): + if not isinstance(value, ComplicatedSubArray): + raise ValueError("Can only set to MySubArray values") + return value + + def __setitem__(self, item, value): + # validation ensures direct assignment with ndarray or + # masked_print_option will fail + super(ComplicatedSubArray, self).__setitem__( + item, self._validate_input(value)) + + def __getitem__(self, item): + # ensure getter returns our own class also for scalars + value = super(ComplicatedSubArray, self).__getitem__(item) + if not isinstance(value, np.ndarray): # scalar + value = value.__array__().view(ComplicatedSubArray) + return value + + @property + def flat(self): + return CSAIterator(self) + + @flat.setter + def flat(self, value): + y = self.ravel() + y[:] = value + + def __array_wrap__(self, obj, context=None): + obj = super(ComplicatedSubArray, self).__array_wrap__(obj, context) + if context is not None and context[0] is np.multiply: + obj.info['multiplied'] = obj.info.get('multiplied', 0) + 1 + + return obj + + +class TestSubclassing(object): + # Test suite for masked subclasses of ndarray. + + def setup(self): + x = np.arange(5, dtype='float') + mx = msubarray(x, mask=[0, 1, 0, 0, 0]) + self.data = (x, mx) + + def test_data_subclassing(self): + # Tests whether the subclass is kept. + x = np.arange(5) + m = [0, 0, 1, 0, 0] + xsub = SubArray(x) + xmsub = masked_array(xsub, mask=m) + assert_(isinstance(xmsub, MaskedArray)) + assert_equal(xmsub._data, xsub) + assert_(isinstance(xmsub._data, SubArray)) + + def test_maskedarray_subclassing(self): + # Tests subclassing MaskedArray + (x, mx) = self.data + assert_(isinstance(mx._data, subarray)) + + def test_masked_unary_operations(self): + # Tests masked_unary_operation + (x, mx) = self.data + with np.errstate(divide='ignore'): + assert_(isinstance(log(mx), msubarray)) + assert_equal(log(x), np.log(x)) + + def test_masked_binary_operations(self): + # Tests masked_binary_operation + (x, mx) = self.data + # Result should be a msubarray + assert_(isinstance(add(mx, mx), msubarray)) + assert_(isinstance(add(mx, x), msubarray)) + # Result should work + assert_equal(add(mx, x), mx+x) + assert_(isinstance(add(mx, mx)._data, subarray)) + assert_(isinstance(add.outer(mx, mx), msubarray)) + assert_(isinstance(hypot(mx, mx), msubarray)) + assert_(isinstance(hypot(mx, x), msubarray)) + + def test_masked_binary_operations2(self): + # Tests domained_masked_binary_operation + (x, mx) = self.data + xmx = masked_array(mx.data.__array__(), mask=mx.mask) + assert_(isinstance(divide(mx, mx), msubarray)) + assert_(isinstance(divide(mx, x), msubarray)) + assert_equal(divide(mx, mx), divide(xmx, xmx)) + + def test_attributepropagation(self): + x = array(arange(5), mask=[0]+[1]*4) + my = masked_array(subarray(x)) + ym = msubarray(x) + # + z = (my+1) + assert_(isinstance(z, MaskedArray)) + assert_(not isinstance(z, MSubArray)) + assert_(isinstance(z._data, SubArray)) + assert_equal(z._data.info, {}) + # + z = (ym+1) + assert_(isinstance(z, MaskedArray)) + assert_(isinstance(z, MSubArray)) + assert_(isinstance(z._data, SubArray)) + assert_(z._data.info['added'] > 0) + # Test that inplace methods from data get used (gh-4617) + ym += 1 + assert_(isinstance(ym, MaskedArray)) + assert_(isinstance(ym, MSubArray)) + assert_(isinstance(ym._data, SubArray)) + assert_(ym._data.info['iadded'] > 0) + # + ym._set_mask([1, 0, 0, 0, 1]) + assert_equal(ym._mask, [1, 0, 0, 0, 1]) + ym._series._set_mask([0, 0, 0, 0, 1]) + assert_equal(ym._mask, [0, 0, 0, 0, 1]) + # + xsub = subarray(x, info={'name':'x'}) + mxsub = masked_array(xsub) + assert_(hasattr(mxsub, 'info')) + assert_equal(mxsub.info, xsub.info) + + def test_subclasspreservation(self): + # Checks that masked_array(...,subok=True) preserves the class. + x = np.arange(5) + m = [0, 0, 1, 0, 0] + xinfo = [(i, j) for (i, j) in zip(x, m)] + xsub = MSubArray(x, mask=m, info={'xsub':xinfo}) + # + mxsub = masked_array(xsub, subok=False) + assert_(not isinstance(mxsub, MSubArray)) + assert_(isinstance(mxsub, MaskedArray)) + assert_equal(mxsub._mask, m) + # + mxsub = asarray(xsub) + assert_(not isinstance(mxsub, MSubArray)) + assert_(isinstance(mxsub, MaskedArray)) + assert_equal(mxsub._mask, m) + # + mxsub = masked_array(xsub, subok=True) + assert_(isinstance(mxsub, MSubArray)) + assert_equal(mxsub.info, xsub.info) + assert_equal(mxsub._mask, xsub._mask) + # + mxsub = asanyarray(xsub) + assert_(isinstance(mxsub, MSubArray)) + assert_equal(mxsub.info, xsub.info) + assert_equal(mxsub._mask, m) + + def test_subclass_items(self): + """test that getter and setter go via baseclass""" + x = np.arange(5) + xcsub = ComplicatedSubArray(x) + mxcsub = masked_array(xcsub, mask=[True, False, True, False, False]) + # getter should return a ComplicatedSubArray, even for single item + # first check we wrote ComplicatedSubArray correctly + assert_(isinstance(xcsub[1], ComplicatedSubArray)) + assert_(isinstance(xcsub[1,...], ComplicatedSubArray)) + assert_(isinstance(xcsub[1:4], ComplicatedSubArray)) + + # now that it propagates inside the MaskedArray + assert_(isinstance(mxcsub[1], ComplicatedSubArray)) + assert_(isinstance(mxcsub[1,...].data, ComplicatedSubArray)) + assert_(mxcsub[0] is masked) + assert_(isinstance(mxcsub[0,...].data, ComplicatedSubArray)) + assert_(isinstance(mxcsub[1:4].data, ComplicatedSubArray)) + + # also for flattened version (which goes via MaskedIterator) + assert_(isinstance(mxcsub.flat[1].data, ComplicatedSubArray)) + assert_(mxcsub.flat[0] is masked) + assert_(isinstance(mxcsub.flat[1:4].base, ComplicatedSubArray)) + + # setter should only work with ComplicatedSubArray input + # first check we wrote ComplicatedSubArray correctly + assert_raises(ValueError, xcsub.__setitem__, 1, x[4]) + # now that it propagates inside the MaskedArray + assert_raises(ValueError, mxcsub.__setitem__, 1, x[4]) + assert_raises(ValueError, mxcsub.__setitem__, slice(1, 4), x[1:4]) + mxcsub[1] = xcsub[4] + mxcsub[1:4] = xcsub[1:4] + # also for flattened version (which goes via MaskedIterator) + assert_raises(ValueError, mxcsub.flat.__setitem__, 1, x[4]) + assert_raises(ValueError, mxcsub.flat.__setitem__, slice(1, 4), x[1:4]) + mxcsub.flat[1] = xcsub[4] + mxcsub.flat[1:4] = xcsub[1:4] + + def test_subclass_nomask_items(self): + x = np.arange(5) + xcsub = ComplicatedSubArray(x) + mxcsub_nomask = masked_array(xcsub) + + assert_(isinstance(mxcsub_nomask[1,...].data, ComplicatedSubArray)) + assert_(isinstance(mxcsub_nomask[0,...].data, ComplicatedSubArray)) + + assert_(isinstance(mxcsub_nomask[1], ComplicatedSubArray)) + assert_(isinstance(mxcsub_nomask[0], ComplicatedSubArray)) + + def test_subclass_repr(self): + """test that repr uses the name of the subclass + and 'array' for np.ndarray""" + x = np.arange(5) + mx = masked_array(x, mask=[True, False, True, False, False]) + assert_startswith(repr(mx), 'masked_array') + xsub = SubArray(x) + mxsub = masked_array(xsub, mask=[True, False, True, False, False]) + assert_startswith(repr(mxsub), + 'masked_{0}(data=[--, 1, --, 3, 4]'.format(SubArray.__name__)) + + def test_subclass_str(self): + """test str with subclass that has overridden str, setitem""" + # first without override + x = np.arange(5) + xsub = SubArray(x) + mxsub = masked_array(xsub, mask=[True, False, True, False, False]) + assert_equal(str(mxsub), '[-- 1 -- 3 4]') + + xcsub = ComplicatedSubArray(x) + assert_raises(ValueError, xcsub.__setitem__, 0, + np.ma.core.masked_print_option) + mxcsub = masked_array(xcsub, mask=[True, False, True, False, False]) + assert_equal(str(mxcsub), 'myprefix [-- 1 -- 3 4] mypostfix') + + def test_pure_subclass_info_preservation(self): + # Test that ufuncs and methods conserve extra information consistently; + # see gh-7122. + arr1 = SubMaskedArray('test', data=[1,2,3,4,5,6]) + arr2 = SubMaskedArray(data=[0,1,2,3,4,5]) + diff1 = np.subtract(arr1, arr2) + assert_('info' in diff1._optinfo) + assert_(diff1._optinfo['info'] == 'test') + diff2 = arr1 - arr2 + assert_('info' in diff2._optinfo) + assert_(diff2._optinfo['info'] == 'test') diff --git a/project/venv/lib/python2.7/site-packages/numpy/ma/tests/test_subclassing.pyc b/project/venv/lib/python2.7/site-packages/numpy/ma/tests/test_subclassing.pyc new file mode 100644 index 0000000..ebc4f92 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/ma/tests/test_subclassing.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/ma/testutils.py b/project/venv/lib/python2.7/site-packages/numpy/ma/testutils.py new file mode 100644 index 0000000..c0deaa9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/ma/testutils.py @@ -0,0 +1,290 @@ +"""Miscellaneous functions for testing masked arrays and subclasses + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu +:version: $Id: testutils.py 3529 2007-11-13 08:01:14Z jarrod.millman $ + +""" +from __future__ import division, absolute_import, print_function + +import operator + +import numpy as np +from numpy import ndarray, float_ +import numpy.core.umath as umath +import numpy.testing +from numpy.testing import ( + assert_, assert_allclose, assert_array_almost_equal_nulp, + assert_raises, build_err_msg + ) +from .core import mask_or, getmask, masked_array, nomask, masked, filled + +__all__masked = [ + 'almost', 'approx', 'assert_almost_equal', 'assert_array_almost_equal', + 'assert_array_approx_equal', 'assert_array_compare', + 'assert_array_equal', 'assert_array_less', 'assert_close', + 'assert_equal', 'assert_equal_records', 'assert_mask_equal', + 'assert_not_equal', 'fail_if_array_equal', + ] + +# Include some normal test functions to avoid breaking other projects who +# have mistakenly included them from this file. SciPy is one. That is +# unfortunate, as some of these functions are not intended to work with +# masked arrays. But there was no way to tell before. +from unittest import TestCase +__some__from_testing = [ + 'TestCase', 'assert_', 'assert_allclose', 'assert_array_almost_equal_nulp', + 'assert_raises' + ] + +__all__ = __all__masked + __some__from_testing + + +def approx(a, b, fill_value=True, rtol=1e-5, atol=1e-8): + """ + Returns true if all components of a and b are equal to given tolerances. + + If fill_value is True, masked values considered equal. Otherwise, + masked values are considered unequal. The relative error rtol should + be positive and << 1.0 The absolute error atol comes into play for + those elements of b that are very small or zero; it says how small a + must be also. + + """ + m = mask_or(getmask(a), getmask(b)) + d1 = filled(a) + d2 = filled(b) + if d1.dtype.char == "O" or d2.dtype.char == "O": + return np.equal(d1, d2).ravel() + x = filled(masked_array(d1, copy=False, mask=m), fill_value).astype(float_) + y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_) + d = np.less_equal(umath.absolute(x - y), atol + rtol * umath.absolute(y)) + return d.ravel() + + +def almost(a, b, decimal=6, fill_value=True): + """ + Returns True if a and b are equal up to decimal places. + + If fill_value is True, masked values considered equal. Otherwise, + masked values are considered unequal. + + """ + m = mask_or(getmask(a), getmask(b)) + d1 = filled(a) + d2 = filled(b) + if d1.dtype.char == "O" or d2.dtype.char == "O": + return np.equal(d1, d2).ravel() + x = filled(masked_array(d1, copy=False, mask=m), fill_value).astype(float_) + y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_) + d = np.around(np.abs(x - y), decimal) <= 10.0 ** (-decimal) + return d.ravel() + + +def _assert_equal_on_sequences(actual, desired, err_msg=''): + """ + Asserts the equality of two non-array sequences. + + """ + assert_equal(len(actual), len(desired), err_msg) + for k in range(len(desired)): + assert_equal(actual[k], desired[k], 'item=%r\n%s' % (k, err_msg)) + return + + +def assert_equal_records(a, b): + """ + Asserts that two records are equal. + + Pretty crude for now. + + """ + assert_equal(a.dtype, b.dtype) + for f in a.dtype.names: + (af, bf) = (operator.getitem(a, f), operator.getitem(b, f)) + if not (af is masked) and not (bf is masked): + assert_equal(operator.getitem(a, f), operator.getitem(b, f)) + return + + +def assert_equal(actual, desired, err_msg=''): + """ + Asserts that two items are equal. + + """ + # Case #1: dictionary ..... + if isinstance(desired, dict): + if not isinstance(actual, dict): + raise AssertionError(repr(type(actual))) + assert_equal(len(actual), len(desired), err_msg) + for k, i in desired.items(): + if k not in actual: + raise AssertionError("%s not in %s" % (k, actual)) + assert_equal(actual[k], desired[k], 'key=%r\n%s' % (k, err_msg)) + return + # Case #2: lists ..... + if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): + return _assert_equal_on_sequences(actual, desired, err_msg='') + if not (isinstance(actual, ndarray) or isinstance(desired, ndarray)): + msg = build_err_msg([actual, desired], err_msg,) + if not desired == actual: + raise AssertionError(msg) + return + # Case #4. arrays or equivalent + if ((actual is masked) and not (desired is masked)) or \ + ((desired is masked) and not (actual is masked)): + msg = build_err_msg([actual, desired], + err_msg, header='', names=('x', 'y')) + raise ValueError(msg) + actual = np.array(actual, copy=False, subok=True) + desired = np.array(desired, copy=False, subok=True) + (actual_dtype, desired_dtype) = (actual.dtype, desired.dtype) + if actual_dtype.char == "S" and desired_dtype.char == "S": + return _assert_equal_on_sequences(actual.tolist(), + desired.tolist(), + err_msg='') + return assert_array_equal(actual, desired, err_msg) + + +def fail_if_equal(actual, desired, err_msg='',): + """ + Raises an assertion error if two items are equal. + + """ + if isinstance(desired, dict): + if not isinstance(actual, dict): + raise AssertionError(repr(type(actual))) + fail_if_equal(len(actual), len(desired), err_msg) + for k, i in desired.items(): + if k not in actual: + raise AssertionError(repr(k)) + fail_if_equal(actual[k], desired[k], 'key=%r\n%s' % (k, err_msg)) + return + if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): + fail_if_equal(len(actual), len(desired), err_msg) + for k in range(len(desired)): + fail_if_equal(actual[k], desired[k], 'item=%r\n%s' % (k, err_msg)) + return + if isinstance(actual, np.ndarray) or isinstance(desired, np.ndarray): + return fail_if_array_equal(actual, desired, err_msg) + msg = build_err_msg([actual, desired], err_msg) + if not desired != actual: + raise AssertionError(msg) + + +assert_not_equal = fail_if_equal + + +def assert_almost_equal(actual, desired, decimal=7, err_msg='', verbose=True): + """ + Asserts that two items are almost equal. + + The test is equivalent to abs(desired-actual) < 0.5 * 10**(-decimal). + + """ + if isinstance(actual, np.ndarray) or isinstance(desired, np.ndarray): + return assert_array_almost_equal(actual, desired, decimal=decimal, + err_msg=err_msg, verbose=verbose) + msg = build_err_msg([actual, desired], + err_msg=err_msg, verbose=verbose) + if not round(abs(desired - actual), decimal) == 0: + raise AssertionError(msg) + + +assert_close = assert_almost_equal + + +def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='', + fill_value=True): + """ + Asserts that comparison between two masked arrays is satisfied. + + The comparison is elementwise. + + """ + # Allocate a common mask and refill + m = mask_or(getmask(x), getmask(y)) + x = masked_array(x, copy=False, mask=m, keep_mask=False, subok=False) + y = masked_array(y, copy=False, mask=m, keep_mask=False, subok=False) + if ((x is masked) and not (y is masked)) or \ + ((y is masked) and not (x is masked)): + msg = build_err_msg([x, y], err_msg=err_msg, verbose=verbose, + header=header, names=('x', 'y')) + raise ValueError(msg) + # OK, now run the basic tests on filled versions + return np.testing.assert_array_compare(comparison, + x.filled(fill_value), + y.filled(fill_value), + err_msg=err_msg, + verbose=verbose, header=header) + + +def assert_array_equal(x, y, err_msg='', verbose=True): + """ + Checks the elementwise equality of two masked arrays. + + """ + assert_array_compare(operator.__eq__, x, y, + err_msg=err_msg, verbose=verbose, + header='Arrays are not equal') + + +def fail_if_array_equal(x, y, err_msg='', verbose=True): + """ + Raises an assertion error if two masked arrays are not equal elementwise. + + """ + def compare(x, y): + return (not np.alltrue(approx(x, y))) + assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, + header='Arrays are not equal') + + +def assert_array_approx_equal(x, y, decimal=6, err_msg='', verbose=True): + """ + Checks the equality of two masked arrays, up to given number odecimals. + + The equality is checked elementwise. + + """ + def compare(x, y): + "Returns the result of the loose comparison between x and y)." + return approx(x, y, rtol=10. ** -decimal) + assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, + header='Arrays are not almost equal') + + +def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True): + """ + Checks the equality of two masked arrays, up to given number odecimals. + + The equality is checked elementwise. + + """ + def compare(x, y): + "Returns the result of the loose comparison between x and y)." + return almost(x, y, decimal) + assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, + header='Arrays are not almost equal') + + +def assert_array_less(x, y, err_msg='', verbose=True): + """ + Checks that x is smaller than y elementwise. + + """ + assert_array_compare(operator.__lt__, x, y, + err_msg=err_msg, verbose=verbose, + header='Arrays are not less-ordered') + + +def assert_mask_equal(m1, m2, err_msg=''): + """ + Asserts the equality of two masks. + + """ + if m1 is nomask: + assert_(m2 is nomask) + if m2 is nomask: + assert_(m1 is nomask) + assert_array_equal(m1, m2, err_msg=err_msg) diff --git a/project/venv/lib/python2.7/site-packages/numpy/ma/testutils.pyc b/project/venv/lib/python2.7/site-packages/numpy/ma/testutils.pyc new file mode 100644 index 0000000..5ff0e04 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/ma/testutils.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/ma/timer_comparison.py b/project/venv/lib/python2.7/site-packages/numpy/ma/timer_comparison.py new file mode 100644 index 0000000..68104ed --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/ma/timer_comparison.py @@ -0,0 +1,440 @@ +from __future__ import division, absolute_import, print_function + +import timeit +from functools import reduce + +import numpy as np +from numpy import float_ +import numpy.core.fromnumeric as fromnumeric + +from numpy.testing import build_err_msg + +# Fixme: this does not look right. +np.seterr(all='ignore') + +pi = np.pi + + +class ModuleTester(object): + def __init__(self, module): + self.module = module + self.allequal = module.allequal + self.arange = module.arange + self.array = module.array + self.concatenate = module.concatenate + self.count = module.count + self.equal = module.equal + self.filled = module.filled + self.getmask = module.getmask + self.getmaskarray = module.getmaskarray + self.id = id + self.inner = module.inner + self.make_mask = module.make_mask + self.masked = module.masked + self.masked_array = module.masked_array + self.masked_values = module.masked_values + self.mask_or = module.mask_or + self.nomask = module.nomask + self.ones = module.ones + self.outer = module.outer + self.repeat = module.repeat + self.resize = module.resize + self.sort = module.sort + self.take = module.take + self.transpose = module.transpose + self.zeros = module.zeros + self.MaskType = module.MaskType + try: + self.umath = module.umath + except AttributeError: + self.umath = module.core.umath + self.testnames = [] + + def assert_array_compare(self, comparison, x, y, err_msg='', header='', + fill_value=True): + """ + Assert that a comparison of two masked arrays is satisfied elementwise. + + """ + xf = self.filled(x) + yf = self.filled(y) + m = self.mask_or(self.getmask(x), self.getmask(y)) + + x = self.filled(self.masked_array(xf, mask=m), fill_value) + y = self.filled(self.masked_array(yf, mask=m), fill_value) + if (x.dtype.char != "O"): + x = x.astype(float_) + if isinstance(x, np.ndarray) and x.size > 1: + x[np.isnan(x)] = 0 + elif np.isnan(x): + x = 0 + if (y.dtype.char != "O"): + y = y.astype(float_) + if isinstance(y, np.ndarray) and y.size > 1: + y[np.isnan(y)] = 0 + elif np.isnan(y): + y = 0 + try: + cond = (x.shape == () or y.shape == ()) or x.shape == y.shape + if not cond: + msg = build_err_msg([x, y], + err_msg + + '\n(shapes %s, %s mismatch)' % (x.shape, + y.shape), + header=header, + names=('x', 'y')) + assert cond, msg + val = comparison(x, y) + if m is not self.nomask and fill_value: + val = self.masked_array(val, mask=m) + if isinstance(val, bool): + cond = val + reduced = [0] + else: + reduced = val.ravel() + cond = reduced.all() + reduced = reduced.tolist() + if not cond: + match = 100-100.0*reduced.count(1)/len(reduced) + msg = build_err_msg([x, y], + err_msg + + '\n(mismatch %s%%)' % (match,), + header=header, + names=('x', 'y')) + assert cond, msg + except ValueError: + msg = build_err_msg([x, y], err_msg, header=header, names=('x', 'y')) + raise ValueError(msg) + + def assert_array_equal(self, x, y, err_msg=''): + """ + Checks the elementwise equality of two masked arrays. + + """ + self.assert_array_compare(self.equal, x, y, err_msg=err_msg, + header='Arrays are not equal') + + def test_0(self): + """ + Tests creation + + """ + x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) + m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + xm = self.masked_array(x, mask=m) + xm[0] + + def test_1(self): + """ + Tests creation + + """ + x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) + y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) + m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] + xm = self.masked_array(x, mask=m1) + ym = self.masked_array(y, mask=m2) + xf = np.where(m1, 1.e+20, x) + xm.set_fill_value(1.e+20) + + assert((xm-ym).filled(0).any()) + s = x.shape + assert(xm.size == reduce(lambda x, y:x*y, s)) + assert(self.count(xm) == len(m1) - reduce(lambda x, y:x+y, m1)) + + for s in [(4, 3), (6, 2)]: + x.shape = s + y.shape = s + xm.shape = s + ym.shape = s + xf.shape = s + assert(self.count(xm) == len(m1) - reduce(lambda x, y:x+y, m1)) + + def test_2(self): + """ + Tests conversions and indexing. + + """ + x1 = np.array([1, 2, 4, 3]) + x2 = self.array(x1, mask=[1, 0, 0, 0]) + x3 = self.array(x1, mask=[0, 1, 0, 1]) + x4 = self.array(x1) + # test conversion to strings, no errors + str(x2) + repr(x2) + # tests of indexing + assert type(x2[1]) is type(x1[1]) + assert x1[1] == x2[1] + x1[2] = 9 + x2[2] = 9 + self.assert_array_equal(x1, x2) + x1[1:3] = 99 + x2[1:3] = 99 + x2[1] = self.masked + x2[1:3] = self.masked + x2[:] = x1 + x2[1] = self.masked + x3[:] = self.masked_array([1, 2, 3, 4], [0, 1, 1, 0]) + x4[:] = self.masked_array([1, 2, 3, 4], [0, 1, 1, 0]) + x1 = np.arange(5)*1.0 + x2 = self.masked_values(x1, 3.0) + x1 = self.array([1, 'hello', 2, 3], object) + x2 = np.array([1, 'hello', 2, 3], object) + # check that no error occurs. + x1[1] + x2[1] + assert x1[1:1].shape == (0,) + # Tests copy-size + n = [0, 0, 1, 0, 0] + m = self.make_mask(n) + m2 = self.make_mask(m) + assert(m is m2) + m3 = self.make_mask(m, copy=1) + assert(m is not m3) + + def test_3(self): + """ + Tests resize/repeat + + """ + x4 = self.arange(4) + x4[2] = self.masked + y4 = self.resize(x4, (8,)) + assert self.allequal(self.concatenate([x4, x4]), y4) + assert self.allequal(self.getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]) + y5 = self.repeat(x4, (2, 2, 2, 2), axis=0) + self.assert_array_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3]) + y6 = self.repeat(x4, 2, axis=0) + assert self.allequal(y5, y6) + y7 = x4.repeat((2, 2, 2, 2), axis=0) + assert self.allequal(y5, y7) + y8 = x4.repeat(2, 0) + assert self.allequal(y5, y8) + + def test_4(self): + """ + Test of take, transpose, inner, outer products. + + """ + x = self.arange(24) + y = np.arange(24) + x[5:6] = self.masked + x = x.reshape(2, 3, 4) + y = y.reshape(2, 3, 4) + assert self.allequal(np.transpose(y, (2, 0, 1)), self.transpose(x, (2, 0, 1))) + assert self.allequal(np.take(y, (2, 0, 1), 1), self.take(x, (2, 0, 1), 1)) + assert self.allequal(np.inner(self.filled(x, 0), self.filled(y, 0)), + self.inner(x, y)) + assert self.allequal(np.outer(self.filled(x, 0), self.filled(y, 0)), + self.outer(x, y)) + y = self.array(['abc', 1, 'def', 2, 3], object) + y[2] = self.masked + t = self.take(y, [0, 3, 4]) + assert t[0] == 'abc' + assert t[1] == 2 + assert t[2] == 3 + + def test_5(self): + """ + Tests inplace w/ scalar + + """ + x = self.arange(10) + y = self.arange(10) + xm = self.arange(10) + xm[2] = self.masked + x += 1 + assert self.allequal(x, y+1) + xm += 1 + assert self.allequal(xm, y+1) + + x = self.arange(10) + xm = self.arange(10) + xm[2] = self.masked + x -= 1 + assert self.allequal(x, y-1) + xm -= 1 + assert self.allequal(xm, y-1) + + x = self.arange(10)*1.0 + xm = self.arange(10)*1.0 + xm[2] = self.masked + x *= 2.0 + assert self.allequal(x, y*2) + xm *= 2.0 + assert self.allequal(xm, y*2) + + x = self.arange(10)*2 + xm = self.arange(10)*2 + xm[2] = self.masked + x /= 2 + assert self.allequal(x, y) + xm /= 2 + assert self.allequal(xm, y) + + x = self.arange(10)*1.0 + xm = self.arange(10)*1.0 + xm[2] = self.masked + x /= 2.0 + assert self.allequal(x, y/2.0) + xm /= self.arange(10) + self.assert_array_equal(xm, self.ones((10,))) + + x = self.arange(10).astype(float_) + xm = self.arange(10) + xm[2] = self.masked + x += 1. + assert self.allequal(x, y + 1.) + + def test_6(self): + """ + Tests inplace w/ array + + """ + x = self.arange(10, dtype=float_) + y = self.arange(10) + xm = self.arange(10, dtype=float_) + xm[2] = self.masked + m = xm.mask + a = self.arange(10, dtype=float_) + a[-1] = self.masked + x += a + xm += a + assert self.allequal(x, y+a) + assert self.allequal(xm, y+a) + assert self.allequal(xm.mask, self.mask_or(m, a.mask)) + + x = self.arange(10, dtype=float_) + xm = self.arange(10, dtype=float_) + xm[2] = self.masked + m = xm.mask + a = self.arange(10, dtype=float_) + a[-1] = self.masked + x -= a + xm -= a + assert self.allequal(x, y-a) + assert self.allequal(xm, y-a) + assert self.allequal(xm.mask, self.mask_or(m, a.mask)) + + x = self.arange(10, dtype=float_) + xm = self.arange(10, dtype=float_) + xm[2] = self.masked + m = xm.mask + a = self.arange(10, dtype=float_) + a[-1] = self.masked + x *= a + xm *= a + assert self.allequal(x, y*a) + assert self.allequal(xm, y*a) + assert self.allequal(xm.mask, self.mask_or(m, a.mask)) + + x = self.arange(10, dtype=float_) + xm = self.arange(10, dtype=float_) + xm[2] = self.masked + m = xm.mask + a = self.arange(10, dtype=float_) + a[-1] = self.masked + x /= a + xm /= a + + def test_7(self): + "Tests ufunc" + d = (self.array([1.0, 0, -1, pi/2]*2, mask=[0, 1]+[0]*6), + self.array([1.0, 0, -1, pi/2]*2, mask=[1, 0]+[0]*6),) + for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate', +# 'sin', 'cos', 'tan', +# 'arcsin', 'arccos', 'arctan', +# 'sinh', 'cosh', 'tanh', +# 'arcsinh', +# 'arccosh', +# 'arctanh', +# 'absolute', 'fabs', 'negative', +# # 'nonzero', 'around', +# 'floor', 'ceil', +# # 'sometrue', 'alltrue', +# 'logical_not', +# 'add', 'subtract', 'multiply', +# 'divide', 'true_divide', 'floor_divide', +# 'remainder', 'fmod', 'hypot', 'arctan2', +# 'equal', 'not_equal', 'less_equal', 'greater_equal', +# 'less', 'greater', +# 'logical_and', 'logical_or', 'logical_xor', + ]: + try: + uf = getattr(self.umath, f) + except AttributeError: + uf = getattr(fromnumeric, f) + mf = getattr(self.module, f) + args = d[:uf.nin] + ur = uf(*args) + mr = mf(*args) + self.assert_array_equal(ur.filled(0), mr.filled(0), f) + self.assert_array_equal(ur._mask, mr._mask) + + def test_99(self): + # test average + ott = self.array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) + self.assert_array_equal(2.0, self.average(ott, axis=0)) + self.assert_array_equal(2.0, self.average(ott, weights=[1., 1., 2., 1.])) + result, wts = self.average(ott, weights=[1., 1., 2., 1.], returned=1) + self.assert_array_equal(2.0, result) + assert(wts == 4.0) + ott[:] = self.masked + assert(self.average(ott, axis=0) is self.masked) + ott = self.array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) + ott = ott.reshape(2, 2) + ott[:, 1] = self.masked + self.assert_array_equal(self.average(ott, axis=0), [2.0, 0.0]) + assert(self.average(ott, axis=1)[0] is self.masked) + self.assert_array_equal([2., 0.], self.average(ott, axis=0)) + result, wts = self.average(ott, axis=0, returned=1) + self.assert_array_equal(wts, [1., 0.]) + w1 = [0, 1, 1, 1, 1, 0] + w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]] + x = self.arange(6) + self.assert_array_equal(self.average(x, axis=0), 2.5) + self.assert_array_equal(self.average(x, axis=0, weights=w1), 2.5) + y = self.array([self.arange(6), 2.0*self.arange(6)]) + self.assert_array_equal(self.average(y, None), np.add.reduce(np.arange(6))*3./12.) + self.assert_array_equal(self.average(y, axis=0), np.arange(6) * 3./2.) + self.assert_array_equal(self.average(y, axis=1), [self.average(x, axis=0), self.average(x, axis=0) * 2.0]) + self.assert_array_equal(self.average(y, None, weights=w2), 20./6.) + self.assert_array_equal(self.average(y, axis=0, weights=w2), [0., 1., 2., 3., 4., 10.]) + self.assert_array_equal(self.average(y, axis=1), [self.average(x, axis=0), self.average(x, axis=0) * 2.0]) + m1 = self.zeros(6) + m2 = [0, 0, 1, 1, 0, 0] + m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]] + m4 = self.ones(6) + m5 = [0, 1, 1, 1, 1, 1] + self.assert_array_equal(self.average(self.masked_array(x, m1), axis=0), 2.5) + self.assert_array_equal(self.average(self.masked_array(x, m2), axis=0), 2.5) + self.assert_array_equal(self.average(self.masked_array(x, m5), axis=0), 0.0) + self.assert_array_equal(self.count(self.average(self.masked_array(x, m4), axis=0)), 0) + z = self.masked_array(y, m3) + self.assert_array_equal(self.average(z, None), 20./6.) + self.assert_array_equal(self.average(z, axis=0), [0., 1., 99., 99., 4.0, 7.5]) + self.assert_array_equal(self.average(z, axis=1), [2.5, 5.0]) + self.assert_array_equal(self.average(z, axis=0, weights=w2), [0., 1., 99., 99., 4.0, 10.0]) + + def test_A(self): + x = self.arange(24) + x[5:6] = self.masked + x = x.reshape(2, 3, 4) + + +if __name__ == '__main__': + setup_base = ("from __main__ import ModuleTester \n" + "import numpy\n" + "tester = ModuleTester(module)\n") + setup_cur = "import numpy.ma.core as module\n" + setup_base + (nrepeat, nloop) = (10, 10) + + if 1: + for i in range(1, 8): + func = 'tester.test_%i()' % i + cur = timeit.Timer(func, setup_cur).repeat(nrepeat, nloop*10) + cur = np.sort(cur) + print("#%i" % i + 50*'.') + print(eval("ModuleTester.test_%i.__doc__" % i)) + print("core_current : %.3f - %.3f" % (cur[0], cur[1])) diff --git a/project/venv/lib/python2.7/site-packages/numpy/ma/timer_comparison.pyc b/project/venv/lib/python2.7/site-packages/numpy/ma/timer_comparison.pyc new file mode 100644 index 0000000..eb8255c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/ma/timer_comparison.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/ma/version.py b/project/venv/lib/python2.7/site-packages/numpy/ma/version.py new file mode 100644 index 0000000..a2c5c42 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/ma/version.py @@ -0,0 +1,14 @@ +"""Version number + +""" +from __future__ import division, absolute_import, print_function + +version = '1.00' +release = False + +if not release: + from . import core + from . import extras + revision = [core.__revision__.split(':')[-1][:-1].strip(), + extras.__revision__.split(':')[-1][:-1].strip(),] + version += '.dev%04i' % max([int(rev) for rev in revision]) diff --git a/project/venv/lib/python2.7/site-packages/numpy/ma/version.pyc b/project/venv/lib/python2.7/site-packages/numpy/ma/version.pyc new file mode 100644 index 0000000..339507d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/ma/version.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/matlib.py b/project/venv/lib/python2.7/site-packages/numpy/matlib.py new file mode 100644 index 0000000..004e5f0 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/matlib.py @@ -0,0 +1,363 @@ +from __future__ import division, absolute_import, print_function + +import numpy as np +from numpy.matrixlib.defmatrix import matrix, asmatrix +# need * as we're copying the numpy namespace +from numpy import * + +__version__ = np.__version__ + +__all__ = np.__all__[:] # copy numpy namespace +__all__ += ['rand', 'randn', 'repmat'] + +def empty(shape, dtype=None, order='C'): + """Return a new matrix of given shape and type, without initializing entries. + + Parameters + ---------- + shape : int or tuple of int + Shape of the empty matrix. + dtype : data-type, optional + Desired output data-type. + order : {'C', 'F'}, optional + Whether to store multi-dimensional data in row-major + (C-style) or column-major (Fortran-style) order in + memory. + + See Also + -------- + empty_like, zeros + + Notes + ----- + `empty`, unlike `zeros`, does not set the matrix values to zero, + and may therefore be marginally faster. On the other hand, it requires + the user to manually set all the values in the array, and should be + used with caution. + + Examples + -------- + >>> import numpy.matlib + >>> np.matlib.empty((2, 2)) # filled with random data + matrix([[ 6.76425276e-320, 9.79033856e-307], + [ 7.39337286e-309, 3.22135945e-309]]) #random + >>> np.matlib.empty((2, 2), dtype=int) + matrix([[ 6600475, 0], + [ 6586976, 22740995]]) #random + + """ + return ndarray.__new__(matrix, shape, dtype, order=order) + +def ones(shape, dtype=None, order='C'): + """ + Matrix of ones. + + Return a matrix of given shape and type, filled with ones. + + Parameters + ---------- + shape : {sequence of ints, int} + Shape of the matrix + dtype : data-type, optional + The desired data-type for the matrix, default is np.float64. + order : {'C', 'F'}, optional + Whether to store matrix in C- or Fortran-contiguous order, + default is 'C'. + + Returns + ------- + out : matrix + Matrix of ones of given shape, dtype, and order. + + See Also + -------- + ones : Array of ones. + matlib.zeros : Zero matrix. + + Notes + ----- + If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``, + `out` becomes a single row matrix of shape ``(1,N)``. + + Examples + -------- + >>> np.matlib.ones((2,3)) + matrix([[ 1., 1., 1.], + [ 1., 1., 1.]]) + + >>> np.matlib.ones(2) + matrix([[ 1., 1.]]) + + """ + a = ndarray.__new__(matrix, shape, dtype, order=order) + a.fill(1) + return a + +def zeros(shape, dtype=None, order='C'): + """ + Return a matrix of given shape and type, filled with zeros. + + Parameters + ---------- + shape : int or sequence of ints + Shape of the matrix + dtype : data-type, optional + The desired data-type for the matrix, default is float. + order : {'C', 'F'}, optional + Whether to store the result in C- or Fortran-contiguous order, + default is 'C'. + + Returns + ------- + out : matrix + Zero matrix of given shape, dtype, and order. + + See Also + -------- + numpy.zeros : Equivalent array function. + matlib.ones : Return a matrix of ones. + + Notes + ----- + If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``, + `out` becomes a single row matrix of shape ``(1,N)``. + + Examples + -------- + >>> import numpy.matlib + >>> np.matlib.zeros((2, 3)) + matrix([[ 0., 0., 0.], + [ 0., 0., 0.]]) + + >>> np.matlib.zeros(2) + matrix([[ 0., 0.]]) + + """ + a = ndarray.__new__(matrix, shape, dtype, order=order) + a.fill(0) + return a + +def identity(n,dtype=None): + """ + Returns the square identity matrix of given size. + + Parameters + ---------- + n : int + Size of the returned identity matrix. + dtype : data-type, optional + Data-type of the output. Defaults to ``float``. + + Returns + ------- + out : matrix + `n` x `n` matrix with its main diagonal set to one, + and all other elements zero. + + See Also + -------- + numpy.identity : Equivalent array function. + matlib.eye : More general matrix identity function. + + Examples + -------- + >>> import numpy.matlib + >>> np.matlib.identity(3, dtype=int) + matrix([[1, 0, 0], + [0, 1, 0], + [0, 0, 1]]) + + """ + a = array([1]+n*[0], dtype=dtype) + b = empty((n, n), dtype=dtype) + b.flat = a + return b + +def eye(n,M=None, k=0, dtype=float, order='C'): + """ + Return a matrix with ones on the diagonal and zeros elsewhere. + + Parameters + ---------- + n : int + Number of rows in the output. + M : int, optional + Number of columns in the output, defaults to `n`. + k : int, optional + Index of the diagonal: 0 refers to the main diagonal, + a positive value refers to an upper diagonal, + and a negative value to a lower diagonal. + dtype : dtype, optional + Data-type of the returned matrix. + order : {'C', 'F'}, optional + Whether the output should be stored in row-major (C-style) or + column-major (Fortran-style) order in memory. + + .. versionadded:: 1.14.0 + + Returns + ------- + I : matrix + A `n` x `M` matrix where all elements are equal to zero, + except for the `k`-th diagonal, whose values are equal to one. + + See Also + -------- + numpy.eye : Equivalent array function. + identity : Square identity matrix. + + Examples + -------- + >>> import numpy.matlib + >>> np.matlib.eye(3, k=1, dtype=float) + matrix([[ 0., 1., 0.], + [ 0., 0., 1.], + [ 0., 0., 0.]]) + + """ + return asmatrix(np.eye(n, M=M, k=k, dtype=dtype, order=order)) + +def rand(*args): + """ + Return a matrix of random values with given shape. + + Create a matrix of the given shape and propagate it with + random samples from a uniform distribution over ``[0, 1)``. + + Parameters + ---------- + \\*args : Arguments + Shape of the output. + If given as N integers, each integer specifies the size of one + dimension. + If given as a tuple, this tuple gives the complete shape. + + Returns + ------- + out : ndarray + The matrix of random values with shape given by `\\*args`. + + See Also + -------- + randn, numpy.random.rand + + Examples + -------- + >>> import numpy.matlib + >>> np.matlib.rand(2, 3) + matrix([[ 0.68340382, 0.67926887, 0.83271405], + [ 0.00793551, 0.20468222, 0.95253525]]) #random + >>> np.matlib.rand((2, 3)) + matrix([[ 0.84682055, 0.73626594, 0.11308016], + [ 0.85429008, 0.3294825 , 0.89139555]]) #random + + If the first argument is a tuple, other arguments are ignored: + + >>> np.matlib.rand((2, 3), 4) + matrix([[ 0.46898646, 0.15163588, 0.95188261], + [ 0.59208621, 0.09561818, 0.00583606]]) #random + + """ + if isinstance(args[0], tuple): + args = args[0] + return asmatrix(np.random.rand(*args)) + +def randn(*args): + """ + Return a random matrix with data from the "standard normal" distribution. + + `randn` generates a matrix filled with random floats sampled from a + univariate "normal" (Gaussian) distribution of mean 0 and variance 1. + + Parameters + ---------- + \\*args : Arguments + Shape of the output. + If given as N integers, each integer specifies the size of one + dimension. If given as a tuple, this tuple gives the complete shape. + + Returns + ------- + Z : matrix of floats + A matrix of floating-point samples drawn from the standard normal + distribution. + + See Also + -------- + rand, random.randn + + Notes + ----- + For random samples from :math:`N(\\mu, \\sigma^2)`, use: + + ``sigma * np.matlib.randn(...) + mu`` + + Examples + -------- + >>> import numpy.matlib + >>> np.matlib.randn(1) + matrix([[-0.09542833]]) #random + >>> np.matlib.randn(1, 2, 3) + matrix([[ 0.16198284, 0.0194571 , 0.18312985], + [-0.7509172 , 1.61055 , 0.45298599]]) #random + + Two-by-four matrix of samples from :math:`N(3, 6.25)`: + + >>> 2.5 * np.matlib.randn((2, 4)) + 3 + matrix([[ 4.74085004, 8.89381862, 4.09042411, 4.83721922], + [ 7.52373709, 5.07933944, -2.64043543, 0.45610557]]) #random + + """ + if isinstance(args[0], tuple): + args = args[0] + return asmatrix(np.random.randn(*args)) + +def repmat(a, m, n): + """ + Repeat a 0-D to 2-D array or matrix MxN times. + + Parameters + ---------- + a : array_like + The array or matrix to be repeated. + m, n : int + The number of times `a` is repeated along the first and second axes. + + Returns + ------- + out : ndarray + The result of repeating `a`. + + Examples + -------- + >>> import numpy.matlib + >>> a0 = np.array(1) + >>> np.matlib.repmat(a0, 2, 3) + array([[1, 1, 1], + [1, 1, 1]]) + + >>> a1 = np.arange(4) + >>> np.matlib.repmat(a1, 2, 2) + array([[0, 1, 2, 3, 0, 1, 2, 3], + [0, 1, 2, 3, 0, 1, 2, 3]]) + + >>> a2 = np.asmatrix(np.arange(6).reshape(2, 3)) + >>> np.matlib.repmat(a2, 2, 3) + matrix([[0, 1, 2, 0, 1, 2, 0, 1, 2], + [3, 4, 5, 3, 4, 5, 3, 4, 5], + [0, 1, 2, 0, 1, 2, 0, 1, 2], + [3, 4, 5, 3, 4, 5, 3, 4, 5]]) + + """ + a = asanyarray(a) + ndim = a.ndim + if ndim == 0: + origrows, origcols = (1, 1) + elif ndim == 1: + origrows, origcols = (1, a.shape[0]) + else: + origrows, origcols = a.shape + rows = origrows * m + cols = origcols * n + c = a.reshape(1, a.size).repeat(m, 0).reshape(rows, origcols).repeat(n, 0) + return c.reshape(rows, cols) diff --git a/project/venv/lib/python2.7/site-packages/numpy/matlib.pyc b/project/venv/lib/python2.7/site-packages/numpy/matlib.pyc new file mode 100644 index 0000000..04c923b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/matlib.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/matrixlib/__init__.py b/project/venv/lib/python2.7/site-packages/numpy/matrixlib/__init__.py new file mode 100644 index 0000000..777e0cd --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/matrixlib/__init__.py @@ -0,0 +1,12 @@ +"""Sub-package containing the matrix class and related functions. + +""" +from __future__ import division, absolute_import, print_function + +from .defmatrix import * + +__all__ = defmatrix.__all__ + +from numpy._pytesttester import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/project/venv/lib/python2.7/site-packages/numpy/matrixlib/__init__.pyc b/project/venv/lib/python2.7/site-packages/numpy/matrixlib/__init__.pyc new file mode 100644 index 0000000..4202efa Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/matrixlib/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/matrixlib/defmatrix.py b/project/venv/lib/python2.7/site-packages/numpy/matrixlib/defmatrix.py new file mode 100644 index 0000000..93b344c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/matrixlib/defmatrix.py @@ -0,0 +1,1109 @@ +from __future__ import division, absolute_import, print_function + +__all__ = ['matrix', 'bmat', 'mat', 'asmatrix'] + +import sys +import warnings +import ast +import numpy.core.numeric as N +from numpy.core.numeric import concatenate, isscalar +from numpy.core.overrides import set_module +# While not in __all__, matrix_power used to be defined here, so we import +# it for backward compatibility. +from numpy.linalg import matrix_power + + +def _convert_from_string(data): + for char in '[]': + data = data.replace(char, '') + + rows = data.split(';') + newdata = [] + count = 0 + for row in rows: + trow = row.split(',') + newrow = [] + for col in trow: + temp = col.split() + newrow.extend(map(ast.literal_eval, temp)) + if count == 0: + Ncols = len(newrow) + elif len(newrow) != Ncols: + raise ValueError("Rows not the same size.") + count += 1 + newdata.append(newrow) + return newdata + + +@set_module('numpy') +def asmatrix(data, dtype=None): + """ + Interpret the input as a matrix. + + Unlike `matrix`, `asmatrix` does not make a copy if the input is already + a matrix or an ndarray. Equivalent to ``matrix(data, copy=False)``. + + Parameters + ---------- + data : array_like + Input data. + dtype : data-type + Data-type of the output matrix. + + Returns + ------- + mat : matrix + `data` interpreted as a matrix. + + Examples + -------- + >>> x = np.array([[1, 2], [3, 4]]) + + >>> m = np.asmatrix(x) + + >>> x[0,0] = 5 + + >>> m + matrix([[5, 2], + [3, 4]]) + + """ + return matrix(data, dtype=dtype, copy=False) + + +@set_module('numpy') +class matrix(N.ndarray): + """ + matrix(data, dtype=None, copy=True) + + .. note:: It is no longer recommended to use this class, even for linear + algebra. Instead use regular arrays. The class may be removed + in the future. + + Returns a matrix from an array-like object, or from a string of data. + A matrix is a specialized 2-D array that retains its 2-D nature + through operations. It has certain special operators, such as ``*`` + (matrix multiplication) and ``**`` (matrix power). + + Parameters + ---------- + data : array_like or string + If `data` is a string, it is interpreted as a matrix with commas + or spaces separating columns, and semicolons separating rows. + dtype : data-type + Data-type of the output matrix. + copy : bool + If `data` is already an `ndarray`, then this flag determines + whether the data is copied (the default), or whether a view is + constructed. + + See Also + -------- + array + + Examples + -------- + >>> a = np.matrix('1 2; 3 4') + >>> print(a) + [[1 2] + [3 4]] + + >>> np.matrix([[1, 2], [3, 4]]) + matrix([[1, 2], + [3, 4]]) + + """ + __array_priority__ = 10.0 + def __new__(subtype, data, dtype=None, copy=True): + warnings.warn('the matrix subclass is not the recommended way to ' + 'represent matrices or deal with linear algebra (see ' + 'https://docs.scipy.org/doc/numpy/user/' + 'numpy-for-matlab-users.html). ' + 'Please adjust your code to use regular ndarray.', + PendingDeprecationWarning, stacklevel=2) + if isinstance(data, matrix): + dtype2 = data.dtype + if (dtype is None): + dtype = dtype2 + if (dtype2 == dtype) and (not copy): + return data + return data.astype(dtype) + + if isinstance(data, N.ndarray): + if dtype is None: + intype = data.dtype + else: + intype = N.dtype(dtype) + new = data.view(subtype) + if intype != data.dtype: + return new.astype(intype) + if copy: return new.copy() + else: return new + + if isinstance(data, str): + data = _convert_from_string(data) + + # now convert data to an array + arr = N.array(data, dtype=dtype, copy=copy) + ndim = arr.ndim + shape = arr.shape + if (ndim > 2): + raise ValueError("matrix must be 2-dimensional") + elif ndim == 0: + shape = (1, 1) + elif ndim == 1: + shape = (1, shape[0]) + + order = 'C' + if (ndim == 2) and arr.flags.fortran: + order = 'F' + + if not (order or arr.flags.contiguous): + arr = arr.copy() + + ret = N.ndarray.__new__(subtype, shape, arr.dtype, + buffer=arr, + order=order) + return ret + + def __array_finalize__(self, obj): + self._getitem = False + if (isinstance(obj, matrix) and obj._getitem): return + ndim = self.ndim + if (ndim == 2): + return + if (ndim > 2): + newshape = tuple([x for x in self.shape if x > 1]) + ndim = len(newshape) + if ndim == 2: + self.shape = newshape + return + elif (ndim > 2): + raise ValueError("shape too large to be a matrix.") + else: + newshape = self.shape + if ndim == 0: + self.shape = (1, 1) + elif ndim == 1: + self.shape = (1, newshape[0]) + return + + def __getitem__(self, index): + self._getitem = True + + try: + out = N.ndarray.__getitem__(self, index) + finally: + self._getitem = False + + if not isinstance(out, N.ndarray): + return out + + if out.ndim == 0: + return out[()] + if out.ndim == 1: + sh = out.shape[0] + # Determine when we should have a column array + try: + n = len(index) + except Exception: + n = 0 + if n > 1 and isscalar(index[1]): + out.shape = (sh, 1) + else: + out.shape = (1, sh) + return out + + def __mul__(self, other): + if isinstance(other, (N.ndarray, list, tuple)) : + # This promotes 1-D vectors to row vectors + return N.dot(self, asmatrix(other)) + if isscalar(other) or not hasattr(other, '__rmul__') : + return N.dot(self, other) + return NotImplemented + + def __rmul__(self, other): + return N.dot(other, self) + + def __imul__(self, other): + self[:] = self * other + return self + + def __pow__(self, other): + return matrix_power(self, other) + + def __ipow__(self, other): + self[:] = self ** other + return self + + def __rpow__(self, other): + return NotImplemented + + def _align(self, axis): + """A convenience function for operations that need to preserve axis + orientation. + """ + if axis is None: + return self[0, 0] + elif axis==0: + return self + elif axis==1: + return self.transpose() + else: + raise ValueError("unsupported axis") + + def _collapse(self, axis): + """A convenience function for operations that want to collapse + to a scalar like _align, but are using keepdims=True + """ + if axis is None: + return self[0, 0] + else: + return self + + # Necessary because base-class tolist expects dimension + # reduction by x[0] + def tolist(self): + """ + Return the matrix as a (possibly nested) list. + + See `ndarray.tolist` for full documentation. + + See Also + -------- + ndarray.tolist + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.tolist() + [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]] + + """ + return self.__array__().tolist() + + # To preserve orientation of result... + def sum(self, axis=None, dtype=None, out=None): + """ + Returns the sum of the matrix elements, along the given axis. + + Refer to `numpy.sum` for full documentation. + + See Also + -------- + numpy.sum + + Notes + ----- + This is the same as `ndarray.sum`, except that where an `ndarray` would + be returned, a `matrix` object is returned instead. + + Examples + -------- + >>> x = np.matrix([[1, 2], [4, 3]]) + >>> x.sum() + 10 + >>> x.sum(axis=1) + matrix([[3], + [7]]) + >>> x.sum(axis=1, dtype='float') + matrix([[ 3.], + [ 7.]]) + >>> out = np.zeros((1, 2), dtype='float') + >>> x.sum(axis=1, dtype='float', out=out) + matrix([[ 3.], + [ 7.]]) + + """ + return N.ndarray.sum(self, axis, dtype, out, keepdims=True)._collapse(axis) + + + # To update docstring from array to matrix... + def squeeze(self, axis=None): + """ + Return a possibly reshaped matrix. + + Refer to `numpy.squeeze` for more documentation. + + Parameters + ---------- + axis : None or int or tuple of ints, optional + Selects a subset of the single-dimensional entries in the shape. + If an axis is selected with shape entry greater than one, + an error is raised. + + Returns + ------- + squeezed : matrix + The matrix, but as a (1, N) matrix if it had shape (N, 1). + + See Also + -------- + numpy.squeeze : related function + + Notes + ----- + If `m` has a single column then that column is returned + as the single row of a matrix. Otherwise `m` is returned. + The returned matrix is always either `m` itself or a view into `m`. + Supplying an axis keyword argument will not affect the returned matrix + but it may cause an error to be raised. + + Examples + -------- + >>> c = np.matrix([[1], [2]]) + >>> c + matrix([[1], + [2]]) + >>> c.squeeze() + matrix([[1, 2]]) + >>> r = c.T + >>> r + matrix([[1, 2]]) + >>> r.squeeze() + matrix([[1, 2]]) + >>> m = np.matrix([[1, 2], [3, 4]]) + >>> m.squeeze() + matrix([[1, 2], + [3, 4]]) + + """ + return N.ndarray.squeeze(self, axis=axis) + + + # To update docstring from array to matrix... + def flatten(self, order='C'): + """ + Return a flattened copy of the matrix. + + All `N` elements of the matrix are placed into a single row. + + Parameters + ---------- + order : {'C', 'F', 'A', 'K'}, optional + 'C' means to flatten in row-major (C-style) order. 'F' means to + flatten in column-major (Fortran-style) order. 'A' means to + flatten in column-major order if `m` is Fortran *contiguous* in + memory, row-major order otherwise. 'K' means to flatten `m` in + the order the elements occur in memory. The default is 'C'. + + Returns + ------- + y : matrix + A copy of the matrix, flattened to a `(1, N)` matrix where `N` + is the number of elements in the original matrix. + + See Also + -------- + ravel : Return a flattened array. + flat : A 1-D flat iterator over the matrix. + + Examples + -------- + >>> m = np.matrix([[1,2], [3,4]]) + >>> m.flatten() + matrix([[1, 2, 3, 4]]) + >>> m.flatten('F') + matrix([[1, 3, 2, 4]]) + + """ + return N.ndarray.flatten(self, order=order) + + def mean(self, axis=None, dtype=None, out=None): + """ + Returns the average of the matrix elements along the given axis. + + Refer to `numpy.mean` for full documentation. + + See Also + -------- + numpy.mean + + Notes + ----- + Same as `ndarray.mean` except that, where that returns an `ndarray`, + this returns a `matrix` object. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3, 4))) + >>> x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.mean() + 5.5 + >>> x.mean(0) + matrix([[ 4., 5., 6., 7.]]) + >>> x.mean(1) + matrix([[ 1.5], + [ 5.5], + [ 9.5]]) + + """ + return N.ndarray.mean(self, axis, dtype, out, keepdims=True)._collapse(axis) + + def std(self, axis=None, dtype=None, out=None, ddof=0): + """ + Return the standard deviation of the array elements along the given axis. + + Refer to `numpy.std` for full documentation. + + See Also + -------- + numpy.std + + Notes + ----- + This is the same as `ndarray.std`, except that where an `ndarray` would + be returned, a `matrix` object is returned instead. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3, 4))) + >>> x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.std() + 3.4520525295346629 + >>> x.std(0) + matrix([[ 3.26598632, 3.26598632, 3.26598632, 3.26598632]]) + >>> x.std(1) + matrix([[ 1.11803399], + [ 1.11803399], + [ 1.11803399]]) + + """ + return N.ndarray.std(self, axis, dtype, out, ddof, keepdims=True)._collapse(axis) + + def var(self, axis=None, dtype=None, out=None, ddof=0): + """ + Returns the variance of the matrix elements, along the given axis. + + Refer to `numpy.var` for full documentation. + + See Also + -------- + numpy.var + + Notes + ----- + This is the same as `ndarray.var`, except that where an `ndarray` would + be returned, a `matrix` object is returned instead. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3, 4))) + >>> x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.var() + 11.916666666666666 + >>> x.var(0) + matrix([[ 10.66666667, 10.66666667, 10.66666667, 10.66666667]]) + >>> x.var(1) + matrix([[ 1.25], + [ 1.25], + [ 1.25]]) + + """ + return N.ndarray.var(self, axis, dtype, out, ddof, keepdims=True)._collapse(axis) + + def prod(self, axis=None, dtype=None, out=None): + """ + Return the product of the array elements over the given axis. + + Refer to `prod` for full documentation. + + See Also + -------- + prod, ndarray.prod + + Notes + ----- + Same as `ndarray.prod`, except, where that returns an `ndarray`, this + returns a `matrix` object instead. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.prod() + 0 + >>> x.prod(0) + matrix([[ 0, 45, 120, 231]]) + >>> x.prod(1) + matrix([[ 0], + [ 840], + [7920]]) + + """ + return N.ndarray.prod(self, axis, dtype, out, keepdims=True)._collapse(axis) + + def any(self, axis=None, out=None): + """ + Test whether any array element along a given axis evaluates to True. + + Refer to `numpy.any` for full documentation. + + Parameters + ---------- + axis : int, optional + Axis along which logical OR is performed + out : ndarray, optional + Output to existing array instead of creating new one, must have + same shape as expected output + + Returns + ------- + any : bool, ndarray + Returns a single bool if `axis` is ``None``; otherwise, + returns `ndarray` + + """ + return N.ndarray.any(self, axis, out, keepdims=True)._collapse(axis) + + def all(self, axis=None, out=None): + """ + Test whether all matrix elements along a given axis evaluate to True. + + Parameters + ---------- + See `numpy.all` for complete descriptions + + See Also + -------- + numpy.all + + Notes + ----- + This is the same as `ndarray.all`, but it returns a `matrix` object. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> y = x[0]; y + matrix([[0, 1, 2, 3]]) + >>> (x == y) + matrix([[ True, True, True, True], + [False, False, False, False], + [False, False, False, False]]) + >>> (x == y).all() + False + >>> (x == y).all(0) + matrix([[False, False, False, False]]) + >>> (x == y).all(1) + matrix([[ True], + [False], + [False]]) + + """ + return N.ndarray.all(self, axis, out, keepdims=True)._collapse(axis) + + def max(self, axis=None, out=None): + """ + Return the maximum value along an axis. + + Parameters + ---------- + See `amax` for complete descriptions + + See Also + -------- + amax, ndarray.max + + Notes + ----- + This is the same as `ndarray.max`, but returns a `matrix` object + where `ndarray.max` would return an ndarray. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.max() + 11 + >>> x.max(0) + matrix([[ 8, 9, 10, 11]]) + >>> x.max(1) + matrix([[ 3], + [ 7], + [11]]) + + """ + return N.ndarray.max(self, axis, out, keepdims=True)._collapse(axis) + + def argmax(self, axis=None, out=None): + """ + Indexes of the maximum values along an axis. + + Return the indexes of the first occurrences of the maximum values + along the specified axis. If axis is None, the index is for the + flattened matrix. + + Parameters + ---------- + See `numpy.argmax` for complete descriptions + + See Also + -------- + numpy.argmax + + Notes + ----- + This is the same as `ndarray.argmax`, but returns a `matrix` object + where `ndarray.argmax` would return an `ndarray`. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.argmax() + 11 + >>> x.argmax(0) + matrix([[2, 2, 2, 2]]) + >>> x.argmax(1) + matrix([[3], + [3], + [3]]) + + """ + return N.ndarray.argmax(self, axis, out)._align(axis) + + def min(self, axis=None, out=None): + """ + Return the minimum value along an axis. + + Parameters + ---------- + See `amin` for complete descriptions. + + See Also + -------- + amin, ndarray.min + + Notes + ----- + This is the same as `ndarray.min`, but returns a `matrix` object + where `ndarray.min` would return an ndarray. + + Examples + -------- + >>> x = -np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, -1, -2, -3], + [ -4, -5, -6, -7], + [ -8, -9, -10, -11]]) + >>> x.min() + -11 + >>> x.min(0) + matrix([[ -8, -9, -10, -11]]) + >>> x.min(1) + matrix([[ -3], + [ -7], + [-11]]) + + """ + return N.ndarray.min(self, axis, out, keepdims=True)._collapse(axis) + + def argmin(self, axis=None, out=None): + """ + Indexes of the minimum values along an axis. + + Return the indexes of the first occurrences of the minimum values + along the specified axis. If axis is None, the index is for the + flattened matrix. + + Parameters + ---------- + See `numpy.argmin` for complete descriptions. + + See Also + -------- + numpy.argmin + + Notes + ----- + This is the same as `ndarray.argmin`, but returns a `matrix` object + where `ndarray.argmin` would return an `ndarray`. + + Examples + -------- + >>> x = -np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, -1, -2, -3], + [ -4, -5, -6, -7], + [ -8, -9, -10, -11]]) + >>> x.argmin() + 11 + >>> x.argmin(0) + matrix([[2, 2, 2, 2]]) + >>> x.argmin(1) + matrix([[3], + [3], + [3]]) + + """ + return N.ndarray.argmin(self, axis, out)._align(axis) + + def ptp(self, axis=None, out=None): + """ + Peak-to-peak (maximum - minimum) value along the given axis. + + Refer to `numpy.ptp` for full documentation. + + See Also + -------- + numpy.ptp + + Notes + ----- + Same as `ndarray.ptp`, except, where that would return an `ndarray` object, + this returns a `matrix` object. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.ptp() + 11 + >>> x.ptp(0) + matrix([[8, 8, 8, 8]]) + >>> x.ptp(1) + matrix([[3], + [3], + [3]]) + + """ + return N.ndarray.ptp(self, axis, out)._align(axis) + + def getI(self): + """ + Returns the (multiplicative) inverse of invertible `self`. + + Parameters + ---------- + None + + Returns + ------- + ret : matrix object + If `self` is non-singular, `ret` is such that ``ret * self`` == + ``self * ret`` == ``np.matrix(np.eye(self[0,:].size)`` all return + ``True``. + + Raises + ------ + numpy.linalg.LinAlgError: Singular matrix + If `self` is singular. + + See Also + -------- + linalg.inv + + Examples + -------- + >>> m = np.matrix('[1, 2; 3, 4]'); m + matrix([[1, 2], + [3, 4]]) + >>> m.getI() + matrix([[-2. , 1. ], + [ 1.5, -0.5]]) + >>> m.getI() * m + matrix([[ 1., 0.], + [ 0., 1.]]) + + """ + M, N = self.shape + if M == N: + from numpy.dual import inv as func + else: + from numpy.dual import pinv as func + return asmatrix(func(self)) + + def getA(self): + """ + Return `self` as an `ndarray` object. + + Equivalent to ``np.asarray(self)``. + + Parameters + ---------- + None + + Returns + ------- + ret : ndarray + `self` as an `ndarray` + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.getA() + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + + """ + return self.__array__() + + def getA1(self): + """ + Return `self` as a flattened `ndarray`. + + Equivalent to ``np.asarray(x).ravel()`` + + Parameters + ---------- + None + + Returns + ------- + ret : ndarray + `self`, 1-D, as an `ndarray` + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.getA1() + array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) + + """ + return self.__array__().ravel() + + + def ravel(self, order='C'): + """ + Return a flattened matrix. + + Refer to `numpy.ravel` for more documentation. + + Parameters + ---------- + order : {'C', 'F', 'A', 'K'}, optional + The elements of `m` are read using this index order. 'C' means to + index the elements in C-like order, with the last axis index + changing fastest, back to the first axis index changing slowest. + 'F' means to index the elements in Fortran-like index order, with + the first index changing fastest, and the last index changing + slowest. Note that the 'C' and 'F' options take no account of the + memory layout of the underlying array, and only refer to the order + of axis indexing. 'A' means to read the elements in Fortran-like + index order if `m` is Fortran *contiguous* in memory, C-like order + otherwise. 'K' means to read the elements in the order they occur + in memory, except for reversing the data when strides are negative. + By default, 'C' index order is used. + + Returns + ------- + ret : matrix + Return the matrix flattened to shape `(1, N)` where `N` + is the number of elements in the original matrix. + A copy is made only if necessary. + + See Also + -------- + matrix.flatten : returns a similar output matrix but always a copy + matrix.flat : a flat iterator on the array. + numpy.ravel : related function which returns an ndarray + + """ + return N.ndarray.ravel(self, order=order) + + + def getT(self): + """ + Returns the transpose of the matrix. + + Does *not* conjugate! For the complex conjugate transpose, use ``.H``. + + Parameters + ---------- + None + + Returns + ------- + ret : matrix object + The (non-conjugated) transpose of the matrix. + + See Also + -------- + transpose, getH + + Examples + -------- + >>> m = np.matrix('[1, 2; 3, 4]') + >>> m + matrix([[1, 2], + [3, 4]]) + >>> m.getT() + matrix([[1, 3], + [2, 4]]) + + """ + return self.transpose() + + def getH(self): + """ + Returns the (complex) conjugate transpose of `self`. + + Equivalent to ``np.transpose(self)`` if `self` is real-valued. + + Parameters + ---------- + None + + Returns + ------- + ret : matrix object + complex conjugate transpose of `self` + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))) + >>> z = x - 1j*x; z + matrix([[ 0. +0.j, 1. -1.j, 2. -2.j, 3. -3.j], + [ 4. -4.j, 5. -5.j, 6. -6.j, 7. -7.j], + [ 8. -8.j, 9. -9.j, 10.-10.j, 11.-11.j]]) + >>> z.getH() + matrix([[ 0. +0.j, 4. +4.j, 8. +8.j], + [ 1. +1.j, 5. +5.j, 9. +9.j], + [ 2. +2.j, 6. +6.j, 10.+10.j], + [ 3. +3.j, 7. +7.j, 11.+11.j]]) + + """ + if issubclass(self.dtype.type, N.complexfloating): + return self.transpose().conjugate() + else: + return self.transpose() + + T = property(getT, None) + A = property(getA, None) + A1 = property(getA1, None) + H = property(getH, None) + I = property(getI, None) + +def _from_string(str, gdict, ldict): + rows = str.split(';') + rowtup = [] + for row in rows: + trow = row.split(',') + newrow = [] + for x in trow: + newrow.extend(x.split()) + trow = newrow + coltup = [] + for col in trow: + col = col.strip() + try: + thismat = ldict[col] + except KeyError: + try: + thismat = gdict[col] + except KeyError: + raise KeyError("%s not found" % (col,)) + + coltup.append(thismat) + rowtup.append(concatenate(coltup, axis=-1)) + return concatenate(rowtup, axis=0) + + +@set_module('numpy') +def bmat(obj, ldict=None, gdict=None): + """ + Build a matrix object from a string, nested sequence, or array. + + Parameters + ---------- + obj : str or array_like + Input data. If a string, variables in the current scope may be + referenced by name. + ldict : dict, optional + A dictionary that replaces local operands in current frame. + Ignored if `obj` is not a string or `gdict` is `None`. + gdict : dict, optional + A dictionary that replaces global operands in current frame. + Ignored if `obj` is not a string. + + Returns + ------- + out : matrix + Returns a matrix object, which is a specialized 2-D array. + + See Also + -------- + block : + A generalization of this function for N-d arrays, that returns normal + ndarrays. + + Examples + -------- + >>> A = np.mat('1 1; 1 1') + >>> B = np.mat('2 2; 2 2') + >>> C = np.mat('3 4; 5 6') + >>> D = np.mat('7 8; 9 0') + + All the following expressions construct the same block matrix: + + >>> np.bmat([[A, B], [C, D]]) + matrix([[1, 1, 2, 2], + [1, 1, 2, 2], + [3, 4, 7, 8], + [5, 6, 9, 0]]) + >>> np.bmat(np.r_[np.c_[A, B], np.c_[C, D]]) + matrix([[1, 1, 2, 2], + [1, 1, 2, 2], + [3, 4, 7, 8], + [5, 6, 9, 0]]) + >>> np.bmat('A,B; C,D') + matrix([[1, 1, 2, 2], + [1, 1, 2, 2], + [3, 4, 7, 8], + [5, 6, 9, 0]]) + + """ + if isinstance(obj, str): + if gdict is None: + # get previous frame + frame = sys._getframe().f_back + glob_dict = frame.f_globals + loc_dict = frame.f_locals + else: + glob_dict = gdict + loc_dict = ldict + + return matrix(_from_string(obj, glob_dict, loc_dict)) + + if isinstance(obj, (tuple, list)): + # [[A,B],[C,D]] + arr_rows = [] + for row in obj: + if isinstance(row, N.ndarray): # not 2-d + return matrix(concatenate(obj, axis=-1)) + else: + arr_rows.append(concatenate(row, axis=-1)) + return matrix(concatenate(arr_rows, axis=0)) + if isinstance(obj, N.ndarray): + return matrix(obj) + +mat = asmatrix diff --git a/project/venv/lib/python2.7/site-packages/numpy/matrixlib/defmatrix.pyc b/project/venv/lib/python2.7/site-packages/numpy/matrixlib/defmatrix.pyc new file mode 100644 index 0000000..11049d1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/matrixlib/defmatrix.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/matrixlib/setup.py b/project/venv/lib/python2.7/site-packages/numpy/matrixlib/setup.py new file mode 100644 index 0000000..d0981d6 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/matrixlib/setup.py @@ -0,0 +1,13 @@ +#!/usr/bin/env python +from __future__ import division, print_function + +def configuration(parent_package='', top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('matrixlib', parent_package, top_path) + config.add_data_dir('tests') + return config + +if __name__ == "__main__": + from numpy.distutils.core import setup + config = configuration(top_path='').todict() + setup(**config) diff --git a/project/venv/lib/python2.7/site-packages/numpy/matrixlib/setup.pyc b/project/venv/lib/python2.7/site-packages/numpy/matrixlib/setup.pyc new file mode 100644 index 0000000..5eee7e6 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/matrixlib/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/matrixlib/tests/__init__.py b/project/venv/lib/python2.7/site-packages/numpy/matrixlib/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/numpy/matrixlib/tests/__init__.pyc b/project/venv/lib/python2.7/site-packages/numpy/matrixlib/tests/__init__.pyc new file mode 100644 index 0000000..a4e91f9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/matrixlib/tests/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/matrixlib/tests/test_defmatrix.py b/project/venv/lib/python2.7/site-packages/numpy/matrixlib/tests/test_defmatrix.py new file mode 100644 index 0000000..aa6e08d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/matrixlib/tests/test_defmatrix.py @@ -0,0 +1,460 @@ +from __future__ import division, absolute_import, print_function + +try: + # Accessing collections abstract classes from collections + # has been deprecated since Python 3.3 + import collections.abc as collections_abc +except ImportError: + import collections as collections_abc + +import numpy as np +from numpy import matrix, asmatrix, bmat +from numpy.testing import ( + assert_, assert_equal, assert_almost_equal, assert_array_equal, + assert_array_almost_equal, assert_raises + ) +from numpy.linalg import matrix_power +from numpy.matrixlib import mat + +class TestCtor(object): + def test_basic(self): + A = np.array([[1, 2], [3, 4]]) + mA = matrix(A) + assert_(np.all(mA.A == A)) + + B = bmat("A,A;A,A") + C = bmat([[A, A], [A, A]]) + D = np.array([[1, 2, 1, 2], + [3, 4, 3, 4], + [1, 2, 1, 2], + [3, 4, 3, 4]]) + assert_(np.all(B.A == D)) + assert_(np.all(C.A == D)) + + E = np.array([[5, 6], [7, 8]]) + AEresult = matrix([[1, 2, 5, 6], [3, 4, 7, 8]]) + assert_(np.all(bmat([A, E]) == AEresult)) + + vec = np.arange(5) + mvec = matrix(vec) + assert_(mvec.shape == (1, 5)) + + def test_exceptions(self): + # Check for ValueError when called with invalid string data. + assert_raises(ValueError, matrix, "invalid") + + def test_bmat_nondefault_str(self): + A = np.array([[1, 2], [3, 4]]) + B = np.array([[5, 6], [7, 8]]) + Aresult = np.array([[1, 2, 1, 2], + [3, 4, 3, 4], + [1, 2, 1, 2], + [3, 4, 3, 4]]) + mixresult = np.array([[1, 2, 5, 6], + [3, 4, 7, 8], + [5, 6, 1, 2], + [7, 8, 3, 4]]) + assert_(np.all(bmat("A,A;A,A") == Aresult)) + assert_(np.all(bmat("A,A;A,A", ldict={'A':B}) == Aresult)) + assert_raises(TypeError, bmat, "A,A;A,A", gdict={'A':B}) + assert_( + np.all(bmat("A,A;A,A", ldict={'A':A}, gdict={'A':B}) == Aresult)) + b2 = bmat("A,B;C,D", ldict={'A':A,'B':B}, gdict={'C':B,'D':A}) + assert_(np.all(b2 == mixresult)) + + +class TestProperties(object): + def test_sum(self): + """Test whether matrix.sum(axis=1) preserves orientation. + Fails in NumPy <= 0.9.6.2127. + """ + M = matrix([[1, 2, 0, 0], + [3, 4, 0, 0], + [1, 2, 1, 2], + [3, 4, 3, 4]]) + sum0 = matrix([8, 12, 4, 6]) + sum1 = matrix([3, 7, 6, 14]).T + sumall = 30 + assert_array_equal(sum0, M.sum(axis=0)) + assert_array_equal(sum1, M.sum(axis=1)) + assert_equal(sumall, M.sum()) + + assert_array_equal(sum0, np.sum(M, axis=0)) + assert_array_equal(sum1, np.sum(M, axis=1)) + assert_equal(sumall, np.sum(M)) + + def test_prod(self): + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(x.prod(), 720) + assert_equal(x.prod(0), matrix([[4, 10, 18]])) + assert_equal(x.prod(1), matrix([[6], [120]])) + + assert_equal(np.prod(x), 720) + assert_equal(np.prod(x, axis=0), matrix([[4, 10, 18]])) + assert_equal(np.prod(x, axis=1), matrix([[6], [120]])) + + y = matrix([0, 1, 3]) + assert_(y.prod() == 0) + + def test_max(self): + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(x.max(), 6) + assert_equal(x.max(0), matrix([[4, 5, 6]])) + assert_equal(x.max(1), matrix([[3], [6]])) + + assert_equal(np.max(x), 6) + assert_equal(np.max(x, axis=0), matrix([[4, 5, 6]])) + assert_equal(np.max(x, axis=1), matrix([[3], [6]])) + + def test_min(self): + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(x.min(), 1) + assert_equal(x.min(0), matrix([[1, 2, 3]])) + assert_equal(x.min(1), matrix([[1], [4]])) + + assert_equal(np.min(x), 1) + assert_equal(np.min(x, axis=0), matrix([[1, 2, 3]])) + assert_equal(np.min(x, axis=1), matrix([[1], [4]])) + + def test_ptp(self): + x = np.arange(4).reshape((2, 2)) + assert_(x.ptp() == 3) + assert_(np.all(x.ptp(0) == np.array([2, 2]))) + assert_(np.all(x.ptp(1) == np.array([1, 1]))) + + def test_var(self): + x = np.arange(9).reshape((3, 3)) + mx = x.view(np.matrix) + assert_equal(x.var(ddof=0), mx.var(ddof=0)) + assert_equal(x.var(ddof=1), mx.var(ddof=1)) + + def test_basic(self): + import numpy.linalg as linalg + + A = np.array([[1., 2.], + [3., 4.]]) + mA = matrix(A) + assert_(np.allclose(linalg.inv(A), mA.I)) + assert_(np.all(np.array(np.transpose(A) == mA.T))) + assert_(np.all(np.array(np.transpose(A) == mA.H))) + assert_(np.all(A == mA.A)) + + B = A + 2j*A + mB = matrix(B) + assert_(np.allclose(linalg.inv(B), mB.I)) + assert_(np.all(np.array(np.transpose(B) == mB.T))) + assert_(np.all(np.array(np.transpose(B).conj() == mB.H))) + + def test_pinv(self): + x = matrix(np.arange(6).reshape(2, 3)) + xpinv = matrix([[-0.77777778, 0.27777778], + [-0.11111111, 0.11111111], + [ 0.55555556, -0.05555556]]) + assert_almost_equal(x.I, xpinv) + + def test_comparisons(self): + A = np.arange(100).reshape(10, 10) + mA = matrix(A) + mB = matrix(A) + 0.1 + assert_(np.all(mB == A+0.1)) + assert_(np.all(mB == matrix(A+0.1))) + assert_(not np.any(mB == matrix(A-0.1))) + assert_(np.all(mA < mB)) + assert_(np.all(mA <= mB)) + assert_(np.all(mA <= mA)) + assert_(not np.any(mA < mA)) + + assert_(not np.any(mB < mA)) + assert_(np.all(mB >= mA)) + assert_(np.all(mB >= mB)) + assert_(not np.any(mB > mB)) + + assert_(np.all(mA == mA)) + assert_(not np.any(mA == mB)) + assert_(np.all(mB != mA)) + + assert_(not np.all(abs(mA) > 0)) + assert_(np.all(abs(mB > 0))) + + def test_asmatrix(self): + A = np.arange(100).reshape(10, 10) + mA = asmatrix(A) + A[0, 0] = -10 + assert_(A[0, 0] == mA[0, 0]) + + def test_noaxis(self): + A = matrix([[1, 0], [0, 1]]) + assert_(A.sum() == matrix(2)) + assert_(A.mean() == matrix(0.5)) + + def test_repr(self): + A = matrix([[1, 0], [0, 1]]) + assert_(repr(A) == "matrix([[1, 0],\n [0, 1]])") + + def test_make_bool_matrix_from_str(self): + A = matrix('True; True; False') + B = matrix([[True], [True], [False]]) + assert_array_equal(A, B) + +class TestCasting(object): + def test_basic(self): + A = np.arange(100).reshape(10, 10) + mA = matrix(A) + + mB = mA.copy() + O = np.ones((10, 10), np.float64) * 0.1 + mB = mB + O + assert_(mB.dtype.type == np.float64) + assert_(np.all(mA != mB)) + assert_(np.all(mB == mA+0.1)) + + mC = mA.copy() + O = np.ones((10, 10), np.complex128) + mC = mC * O + assert_(mC.dtype.type == np.complex128) + assert_(np.all(mA != mB)) + + +class TestAlgebra(object): + def test_basic(self): + import numpy.linalg as linalg + + A = np.array([[1., 2.], [3., 4.]]) + mA = matrix(A) + + B = np.identity(2) + for i in range(6): + assert_(np.allclose((mA ** i).A, B)) + B = np.dot(B, A) + + Ainv = linalg.inv(A) + B = np.identity(2) + for i in range(6): + assert_(np.allclose((mA ** -i).A, B)) + B = np.dot(B, Ainv) + + assert_(np.allclose((mA * mA).A, np.dot(A, A))) + assert_(np.allclose((mA + mA).A, (A + A))) + assert_(np.allclose((3*mA).A, (3*A))) + + mA2 = matrix(A) + mA2 *= 3 + assert_(np.allclose(mA2.A, 3*A)) + + def test_pow(self): + """Test raising a matrix to an integer power works as expected.""" + m = matrix("1. 2.; 3. 4.") + m2 = m.copy() + m2 **= 2 + mi = m.copy() + mi **= -1 + m4 = m2.copy() + m4 **= 2 + assert_array_almost_equal(m2, m**2) + assert_array_almost_equal(m4, np.dot(m2, m2)) + assert_array_almost_equal(np.dot(mi, m), np.eye(2)) + + def test_scalar_type_pow(self): + m = matrix([[1, 2], [3, 4]]) + for scalar_t in [np.int8, np.uint8]: + two = scalar_t(2) + assert_array_almost_equal(m ** 2, m ** two) + + def test_notimplemented(self): + '''Check that 'not implemented' operations produce a failure.''' + A = matrix([[1., 2.], + [3., 4.]]) + + # __rpow__ + with assert_raises(TypeError): + 1.0**A + + # __mul__ with something not a list, ndarray, tuple, or scalar + with assert_raises(TypeError): + A*object() + + +class TestMatrixReturn(object): + def test_instance_methods(self): + a = matrix([1.0], dtype='f8') + methodargs = { + 'astype': ('intc',), + 'clip': (0.0, 1.0), + 'compress': ([1],), + 'repeat': (1,), + 'reshape': (1,), + 'swapaxes': (0, 0), + 'dot': np.array([1.0]), + } + excluded_methods = [ + 'argmin', 'choose', 'dump', 'dumps', 'fill', 'getfield', + 'getA', 'getA1', 'item', 'nonzero', 'put', 'putmask', 'resize', + 'searchsorted', 'setflags', 'setfield', 'sort', + 'partition', 'argpartition', + 'take', 'tofile', 'tolist', 'tostring', 'tobytes', 'all', 'any', + 'sum', 'argmax', 'argmin', 'min', 'max', 'mean', 'var', 'ptp', + 'prod', 'std', 'ctypes', 'itemset', + ] + for attrib in dir(a): + if attrib.startswith('_') or attrib in excluded_methods: + continue + f = getattr(a, attrib) + if isinstance(f, collections_abc.Callable): + # reset contents of a + a.astype('f8') + a.fill(1.0) + if attrib in methodargs: + args = methodargs[attrib] + else: + args = () + b = f(*args) + assert_(type(b) is matrix, "%s" % attrib) + assert_(type(a.real) is matrix) + assert_(type(a.imag) is matrix) + c, d = matrix([0.0]).nonzero() + assert_(type(c) is np.ndarray) + assert_(type(d) is np.ndarray) + + +class TestIndexing(object): + def test_basic(self): + x = asmatrix(np.zeros((3, 2), float)) + y = np.zeros((3, 1), float) + y[:, 0] = [0.8, 0.2, 0.3] + x[:, 1] = y > 0.5 + assert_equal(x, [[0, 1], [0, 0], [0, 0]]) + + +class TestNewScalarIndexing(object): + a = matrix([[1, 2], [3, 4]]) + + def test_dimesions(self): + a = self.a + x = a[0] + assert_equal(x.ndim, 2) + + def test_array_from_matrix_list(self): + a = self.a + x = np.array([a, a]) + assert_equal(x.shape, [2, 2, 2]) + + def test_array_to_list(self): + a = self.a + assert_equal(a.tolist(), [[1, 2], [3, 4]]) + + def test_fancy_indexing(self): + a = self.a + x = a[1, [0, 1, 0]] + assert_(isinstance(x, matrix)) + assert_equal(x, matrix([[3, 4, 3]])) + x = a[[1, 0]] + assert_(isinstance(x, matrix)) + assert_equal(x, matrix([[3, 4], [1, 2]])) + x = a[[[1], [0]], [[1, 0], [0, 1]]] + assert_(isinstance(x, matrix)) + assert_equal(x, matrix([[4, 3], [1, 2]])) + + def test_matrix_element(self): + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(x[0][0], matrix([[1, 2, 3]])) + assert_equal(x[0][0].shape, (1, 3)) + assert_equal(x[0].shape, (1, 3)) + assert_equal(x[:, 0].shape, (2, 1)) + + x = matrix(0) + assert_equal(x[0, 0], 0) + assert_equal(x[0], 0) + assert_equal(x[:, 0].shape, x.shape) + + def test_scalar_indexing(self): + x = asmatrix(np.zeros((3, 2), float)) + assert_equal(x[0, 0], x[0][0]) + + def test_row_column_indexing(self): + x = asmatrix(np.eye(2)) + assert_array_equal(x[0,:], [[1, 0]]) + assert_array_equal(x[1,:], [[0, 1]]) + assert_array_equal(x[:, 0], [[1], [0]]) + assert_array_equal(x[:, 1], [[0], [1]]) + + def test_boolean_indexing(self): + A = np.arange(6) + A.shape = (3, 2) + x = asmatrix(A) + assert_array_equal(x[:, np.array([True, False])], x[:, 0]) + assert_array_equal(x[np.array([True, False, False]),:], x[0,:]) + + def test_list_indexing(self): + A = np.arange(6) + A.shape = (3, 2) + x = asmatrix(A) + assert_array_equal(x[:, [1, 0]], x[:, ::-1]) + assert_array_equal(x[[2, 1, 0],:], x[::-1,:]) + + +class TestPower(object): + def test_returntype(self): + a = np.array([[0, 1], [0, 0]]) + assert_(type(matrix_power(a, 2)) is np.ndarray) + a = mat(a) + assert_(type(matrix_power(a, 2)) is matrix) + + def test_list(self): + assert_array_equal(matrix_power([[0, 1], [0, 0]], 2), [[0, 0], [0, 0]]) + + +class TestShape(object): + + a = np.array([[1], [2]]) + m = matrix([[1], [2]]) + + def test_shape(self): + assert_equal(self.a.shape, (2, 1)) + assert_equal(self.m.shape, (2, 1)) + + def test_numpy_ravel(self): + assert_equal(np.ravel(self.a).shape, (2,)) + assert_equal(np.ravel(self.m).shape, (2,)) + + def test_member_ravel(self): + assert_equal(self.a.ravel().shape, (2,)) + assert_equal(self.m.ravel().shape, (1, 2)) + + def test_member_flatten(self): + assert_equal(self.a.flatten().shape, (2,)) + assert_equal(self.m.flatten().shape, (1, 2)) + + def test_numpy_ravel_order(self): + x = np.array([[1, 2, 3], [4, 5, 6]]) + assert_equal(np.ravel(x), [1, 2, 3, 4, 5, 6]) + assert_equal(np.ravel(x, order='F'), [1, 4, 2, 5, 3, 6]) + assert_equal(np.ravel(x.T), [1, 4, 2, 5, 3, 6]) + assert_equal(np.ravel(x.T, order='A'), [1, 2, 3, 4, 5, 6]) + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(np.ravel(x), [1, 2, 3, 4, 5, 6]) + assert_equal(np.ravel(x, order='F'), [1, 4, 2, 5, 3, 6]) + assert_equal(np.ravel(x.T), [1, 4, 2, 5, 3, 6]) + assert_equal(np.ravel(x.T, order='A'), [1, 2, 3, 4, 5, 6]) + + def test_matrix_ravel_order(self): + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(x.ravel(), [[1, 2, 3, 4, 5, 6]]) + assert_equal(x.ravel(order='F'), [[1, 4, 2, 5, 3, 6]]) + assert_equal(x.T.ravel(), [[1, 4, 2, 5, 3, 6]]) + assert_equal(x.T.ravel(order='A'), [[1, 2, 3, 4, 5, 6]]) + + def test_array_memory_sharing(self): + assert_(np.may_share_memory(self.a, self.a.ravel())) + assert_(not np.may_share_memory(self.a, self.a.flatten())) + + def test_matrix_memory_sharing(self): + assert_(np.may_share_memory(self.m, self.m.ravel())) + assert_(not np.may_share_memory(self.m, self.m.flatten())) + + def test_expand_dims_matrix(self): + # matrices are always 2d - so expand_dims only makes sense when the + # type is changed away from matrix. + a = np.arange(10).reshape((2, 5)).view(np.matrix) + expanded = np.expand_dims(a, axis=1) + assert_equal(expanded.ndim, 3) + assert_(not isinstance(expanded, np.matrix)) diff --git a/project/venv/lib/python2.7/site-packages/numpy/matrixlib/tests/test_defmatrix.pyc b/project/venv/lib/python2.7/site-packages/numpy/matrixlib/tests/test_defmatrix.pyc new file mode 100644 index 0000000..2c2d081 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/matrixlib/tests/test_defmatrix.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/matrixlib/tests/test_interaction.py b/project/venv/lib/python2.7/site-packages/numpy/matrixlib/tests/test_interaction.py new file mode 100644 index 0000000..088ae3c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/matrixlib/tests/test_interaction.py @@ -0,0 +1,363 @@ +"""Tests of interaction of matrix with other parts of numpy. + +Note that tests with MaskedArray and linalg are done in separate files. +""" +from __future__ import division, absolute_import, print_function + +import pytest + +import textwrap +import warnings + +import numpy as np +from numpy.testing import (assert_, assert_equal, assert_raises, + assert_raises_regex, assert_array_equal, + assert_almost_equal, assert_array_almost_equal) + + +def test_fancy_indexing(): + # The matrix class messes with the shape. While this is always + # weird (getitem is not used, it does not have setitem nor knows + # about fancy indexing), this tests gh-3110 + # 2018-04-29: moved here from core.tests.test_index. + m = np.matrix([[1, 2], [3, 4]]) + + assert_(isinstance(m[[0, 1, 0], :], np.matrix)) + + # gh-3110. Note the transpose currently because matrices do *not* + # support dimension fixing for fancy indexing correctly. + x = np.asmatrix(np.arange(50).reshape(5, 10)) + assert_equal(x[:2, np.array(-1)], x[:2, -1].T) + + +def test_polynomial_mapdomain(): + # test that polynomial preserved matrix subtype. + # 2018-04-29: moved here from polynomial.tests.polyutils. + dom1 = [0, 4] + dom2 = [1, 3] + x = np.matrix([dom1, dom1]) + res = np.polynomial.polyutils.mapdomain(x, dom1, dom2) + assert_(isinstance(res, np.matrix)) + + +def test_sort_matrix_none(): + # 2018-04-29: moved here from core.tests.test_multiarray + a = np.matrix([[2, 1, 0]]) + actual = np.sort(a, axis=None) + expected = np.matrix([[0, 1, 2]]) + assert_equal(actual, expected) + assert_(type(expected) is np.matrix) + + +def test_partition_matrix_none(): + # gh-4301 + # 2018-04-29: moved here from core.tests.test_multiarray + a = np.matrix([[2, 1, 0]]) + actual = np.partition(a, 1, axis=None) + expected = np.matrix([[0, 1, 2]]) + assert_equal(actual, expected) + assert_(type(expected) is np.matrix) + + +def test_dot_scalar_and_matrix_of_objects(): + # Ticket #2469 + # 2018-04-29: moved here from core.tests.test_multiarray + arr = np.matrix([1, 2], dtype=object) + desired = np.matrix([[3, 6]], dtype=object) + assert_equal(np.dot(arr, 3), desired) + assert_equal(np.dot(3, arr), desired) + + +def test_inner_scalar_and_matrix(): + # 2018-04-29: moved here from core.tests.test_multiarray + for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': + sca = np.array(3, dtype=dt)[()] + arr = np.matrix([[1, 2], [3, 4]], dtype=dt) + desired = np.matrix([[3, 6], [9, 12]], dtype=dt) + assert_equal(np.inner(arr, sca), desired) + assert_equal(np.inner(sca, arr), desired) + + +def test_inner_scalar_and_matrix_of_objects(): + # Ticket #4482 + # 2018-04-29: moved here from core.tests.test_multiarray + arr = np.matrix([1, 2], dtype=object) + desired = np.matrix([[3, 6]], dtype=object) + assert_equal(np.inner(arr, 3), desired) + assert_equal(np.inner(3, arr), desired) + + +def test_iter_allocate_output_subtype(): + # Make sure that the subtype with priority wins + # 2018-04-29: moved here from core.tests.test_nditer, given the + # matrix specific shape test. + + # matrix vs ndarray + a = np.matrix([[1, 2], [3, 4]]) + b = np.arange(4).reshape(2, 2).T + i = np.nditer([a, b, None], [], + [['readonly'], ['readonly'], ['writeonly', 'allocate']]) + assert_(type(i.operands[2]) is np.matrix) + assert_(type(i.operands[2]) is not np.ndarray) + assert_equal(i.operands[2].shape, (2, 2)) + + # matrix always wants things to be 2D + b = np.arange(4).reshape(1, 2, 2) + assert_raises(RuntimeError, np.nditer, [a, b, None], [], + [['readonly'], ['readonly'], ['writeonly', 'allocate']]) + # but if subtypes are disabled, the result can still work + i = np.nditer([a, b, None], [], + [['readonly'], ['readonly'], + ['writeonly', 'allocate', 'no_subtype']]) + assert_(type(i.operands[2]) is np.ndarray) + assert_(type(i.operands[2]) is not np.matrix) + assert_equal(i.operands[2].shape, (1, 2, 2)) + + +def like_function(): + # 2018-04-29: moved here from core.tests.test_numeric + a = np.matrix([[1, 2], [3, 4]]) + for like_function in np.zeros_like, np.ones_like, np.empty_like: + b = like_function(a) + assert_(type(b) is np.matrix) + + c = like_function(a, subok=False) + assert_(type(c) is not np.matrix) + + +def test_array_astype(): + # 2018-04-29: copied here from core.tests.test_api + # subok=True passes through a matrix + a = np.matrix([[0, 1, 2], [3, 4, 5]], dtype='f4') + b = a.astype('f4', subok=True, copy=False) + assert_(a is b) + + # subok=True is default, and creates a subtype on a cast + b = a.astype('i4', copy=False) + assert_equal(a, b) + assert_equal(type(b), np.matrix) + + # subok=False never returns a matrix + b = a.astype('f4', subok=False, copy=False) + assert_equal(a, b) + assert_(not (a is b)) + assert_(type(b) is not np.matrix) + + +def test_stack(): + # 2018-04-29: copied here from core.tests.test_shape_base + # check np.matrix cannot be stacked + m = np.matrix([[1, 2], [3, 4]]) + assert_raises_regex(ValueError, 'shape too large to be a matrix', + np.stack, [m, m]) + + +def test_object_scalar_multiply(): + # Tickets #2469 and #4482 + # 2018-04-29: moved here from core.tests.test_ufunc + arr = np.matrix([1, 2], dtype=object) + desired = np.matrix([[3, 6]], dtype=object) + assert_equal(np.multiply(arr, 3), desired) + assert_equal(np.multiply(3, arr), desired) + + +def test_nanfunctions_matrices(): + # Check that it works and that type and + # shape are preserved + # 2018-04-29: moved here from core.tests.test_nanfunctions + mat = np.matrix(np.eye(3)) + for f in [np.nanmin, np.nanmax]: + res = f(mat, axis=0) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (1, 3)) + res = f(mat, axis=1) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (3, 1)) + res = f(mat) + assert_(np.isscalar(res)) + # check that rows of nan are dealt with for subclasses (#4628) + mat[1] = np.nan + for f in [np.nanmin, np.nanmax]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mat, axis=0) + assert_(isinstance(res, np.matrix)) + assert_(not np.any(np.isnan(res))) + assert_(len(w) == 0) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mat, axis=1) + assert_(isinstance(res, np.matrix)) + assert_(np.isnan(res[1, 0]) and not np.isnan(res[0, 0]) + and not np.isnan(res[2, 0])) + assert_(len(w) == 1, 'no warning raised') + assert_(issubclass(w[0].category, RuntimeWarning)) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mat) + assert_(np.isscalar(res)) + assert_(res != np.nan) + assert_(len(w) == 0) + + +def test_nanfunctions_matrices_general(): + # Check that it works and that type and + # shape are preserved + # 2018-04-29: moved here from core.tests.test_nanfunctions + mat = np.matrix(np.eye(3)) + for f in (np.nanargmin, np.nanargmax, np.nansum, np.nanprod, + np.nanmean, np.nanvar, np.nanstd): + res = f(mat, axis=0) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (1, 3)) + res = f(mat, axis=1) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (3, 1)) + res = f(mat) + assert_(np.isscalar(res)) + + for f in np.nancumsum, np.nancumprod: + res = f(mat, axis=0) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (3, 3)) + res = f(mat, axis=1) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (3, 3)) + res = f(mat) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (1, 3*3)) + + +def test_average_matrix(): + # 2018-04-29: moved here from core.tests.test_function_base. + y = np.matrix(np.random.rand(5, 5)) + assert_array_equal(y.mean(0), np.average(y, 0)) + + a = np.matrix([[1, 2], [3, 4]]) + w = np.matrix([[1, 2], [3, 4]]) + + r = np.average(a, axis=0, weights=w) + assert_equal(type(r), np.matrix) + assert_equal(r, [[2.5, 10.0/3]]) + + +def test_trapz_matrix(): + # Test to make sure matrices give the same answer as ndarrays + # 2018-04-29: moved here from core.tests.test_function_base. + x = np.linspace(0, 5) + y = x * x + r = np.trapz(y, x) + mx = np.matrix(x) + my = np.matrix(y) + mr = np.trapz(my, mx) + assert_almost_equal(mr, r) + + +def test_ediff1d_matrix(): + # 2018-04-29: moved here from core.tests.test_arraysetops. + assert(isinstance(np.ediff1d(np.matrix(1)), np.matrix)) + assert(isinstance(np.ediff1d(np.matrix(1), to_begin=1), np.matrix)) + + +def test_apply_along_axis_matrix(): + # this test is particularly malicious because matrix + # refuses to become 1d + # 2018-04-29: moved here from core.tests.test_shape_base. + def double(row): + return row * 2 + + m = np.matrix([[0, 1], [2, 3]]) + expected = np.matrix([[0, 2], [4, 6]]) + + result = np.apply_along_axis(double, 0, m) + assert_(isinstance(result, np.matrix)) + assert_array_equal(result, expected) + + result = np.apply_along_axis(double, 1, m) + assert_(isinstance(result, np.matrix)) + assert_array_equal(result, expected) + + +def test_kron_matrix(): + # 2018-04-29: moved here from core.tests.test_shape_base. + a = np.ones([2, 2]) + m = np.asmatrix(a) + assert_equal(type(np.kron(a, a)), np.ndarray) + assert_equal(type(np.kron(m, m)), np.matrix) + assert_equal(type(np.kron(a, m)), np.matrix) + assert_equal(type(np.kron(m, a)), np.matrix) + + +class TestConcatenatorMatrix(object): + # 2018-04-29: moved here from core.tests.test_index_tricks. + def test_matrix(self): + a = [1, 2] + b = [3, 4] + + ab_r = np.r_['r', a, b] + ab_c = np.r_['c', a, b] + + assert_equal(type(ab_r), np.matrix) + assert_equal(type(ab_c), np.matrix) + + assert_equal(np.array(ab_r), [[1, 2, 3, 4]]) + assert_equal(np.array(ab_c), [[1], [2], [3], [4]]) + + assert_raises(ValueError, lambda: np.r_['rc', a, b]) + + def test_matrix_scalar(self): + r = np.r_['r', [1, 2], 3] + assert_equal(type(r), np.matrix) + assert_equal(np.array(r), [[1, 2, 3]]) + + def test_matrix_builder(self): + a = np.array([1]) + b = np.array([2]) + c = np.array([3]) + d = np.array([4]) + actual = np.r_['a, b; c, d'] + expected = np.bmat([[a, b], [c, d]]) + + assert_equal(actual, expected) + assert_equal(type(actual), type(expected)) + + +def test_array_equal_error_message_matrix(): + # 2018-04-29: moved here from testing.tests.test_utils. + try: + assert_equal(np.array([1, 2]), np.matrix([1, 2])) + except AssertionError as e: + msg = str(e) + msg2 = msg.replace("shapes (2L,), (1L, 2L)", "shapes (2,), (1, 2)") + msg_reference = textwrap.dedent("""\ + + Arrays are not equal + + (shapes (2,), (1, 2) mismatch) + x: array([1, 2]) + y: matrix([[1, 2]])""") + try: + assert_equal(msg, msg_reference) + except AssertionError: + assert_equal(msg2, msg_reference) + else: + raise AssertionError("Did not raise") + + +def test_array_almost_equal_matrix(): + # Matrix slicing keeps things 2-D, while array does not necessarily. + # See gh-8452. + # 2018-04-29: moved here from testing.tests.test_utils. + m1 = np.matrix([[1., 2.]]) + m2 = np.matrix([[1., np.nan]]) + m3 = np.matrix([[1., -np.inf]]) + m4 = np.matrix([[np.nan, np.inf]]) + m5 = np.matrix([[1., 2.], [np.nan, np.inf]]) + for assert_func in assert_array_almost_equal, assert_almost_equal: + for m in m1, m2, m3, m4, m5: + assert_func(m, m) + a = np.array(m) + assert_func(a, m) + assert_func(m, a) diff --git a/project/venv/lib/python2.7/site-packages/numpy/matrixlib/tests/test_interaction.pyc b/project/venv/lib/python2.7/site-packages/numpy/matrixlib/tests/test_interaction.pyc new file mode 100644 index 0000000..48d54e2 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/matrixlib/tests/test_interaction.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/matrixlib/tests/test_masked_matrix.py b/project/venv/lib/python2.7/site-packages/numpy/matrixlib/tests/test_masked_matrix.py new file mode 100644 index 0000000..52fd185 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/matrixlib/tests/test_masked_matrix.py @@ -0,0 +1,231 @@ +from __future__ import division, absolute_import, print_function + +import numpy as np +from numpy.ma.testutils import (assert_, assert_equal, assert_raises, + assert_array_equal) +from numpy.ma.core import (masked_array, masked_values, masked, allequal, + MaskType, getmask, MaskedArray, nomask, + log, add, hypot, divide) +from numpy.ma.extras import mr_ +from numpy.core.numeric import pickle + + +class MMatrix(MaskedArray, np.matrix,): + + def __new__(cls, data, mask=nomask): + mat = np.matrix(data) + _data = MaskedArray.__new__(cls, data=mat, mask=mask) + return _data + + def __array_finalize__(self, obj): + np.matrix.__array_finalize__(self, obj) + MaskedArray.__array_finalize__(self, obj) + return + + def _get_series(self): + _view = self.view(MaskedArray) + _view._sharedmask = False + return _view + _series = property(fget=_get_series) + + +class TestMaskedMatrix(object): + def test_matrix_indexing(self): + # Tests conversions and indexing + x1 = np.matrix([[1, 2, 3], [4, 3, 2]]) + x2 = masked_array(x1, mask=[[1, 0, 0], [0, 1, 0]]) + x3 = masked_array(x1, mask=[[0, 1, 0], [1, 0, 0]]) + x4 = masked_array(x1) + # test conversion to strings + str(x2) # raises? + repr(x2) # raises? + # tests of indexing + assert_(type(x2[1, 0]) is type(x1[1, 0])) + assert_(x1[1, 0] == x2[1, 0]) + assert_(x2[1, 1] is masked) + assert_equal(x1[0, 2], x2[0, 2]) + assert_equal(x1[0, 1:], x2[0, 1:]) + assert_equal(x1[:, 2], x2[:, 2]) + assert_equal(x1[:], x2[:]) + assert_equal(x1[1:], x3[1:]) + x1[0, 2] = 9 + x2[0, 2] = 9 + assert_equal(x1, x2) + x1[0, 1:] = 99 + x2[0, 1:] = 99 + assert_equal(x1, x2) + x2[0, 1] = masked + assert_equal(x1, x2) + x2[0, 1:] = masked + assert_equal(x1, x2) + x2[0, :] = x1[0, :] + x2[0, 1] = masked + assert_(allequal(getmask(x2), np.array([[0, 1, 0], [0, 1, 0]]))) + x3[1, :] = masked_array([1, 2, 3], [1, 1, 0]) + assert_(allequal(getmask(x3)[1], masked_array([1, 1, 0]))) + assert_(allequal(getmask(x3[1]), masked_array([1, 1, 0]))) + x4[1, :] = masked_array([1, 2, 3], [1, 1, 0]) + assert_(allequal(getmask(x4[1]), masked_array([1, 1, 0]))) + assert_(allequal(x4[1], masked_array([1, 2, 3]))) + x1 = np.matrix(np.arange(5) * 1.0) + x2 = masked_values(x1, 3.0) + assert_equal(x1, x2) + assert_(allequal(masked_array([0, 0, 0, 1, 0], dtype=MaskType), + x2.mask)) + assert_equal(3.0, x2.fill_value) + + def test_pickling_subbaseclass(self): + # Test pickling w/ a subclass of ndarray + a = masked_array(np.matrix(list(range(10))), mask=[1, 0, 1, 0, 0] * 2) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + a_pickled = pickle.loads(pickle.dumps(a, protocol=proto)) + assert_equal(a_pickled._mask, a._mask) + assert_equal(a_pickled, a) + assert_(isinstance(a_pickled._data, np.matrix)) + + def test_count_mean_with_matrix(self): + m = masked_array(np.matrix([[1, 2], [3, 4]]), mask=np.zeros((2, 2))) + + assert_equal(m.count(axis=0).shape, (1, 2)) + assert_equal(m.count(axis=1).shape, (2, 1)) + + # Make sure broadcasting inside mean and var work + assert_equal(m.mean(axis=0), [[2., 3.]]) + assert_equal(m.mean(axis=1), [[1.5], [3.5]]) + + def test_flat(self): + # Test that flat can return items even for matrices [#4585, #4615] + # test simple access + test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) + assert_equal(test.flat[1], 2) + assert_equal(test.flat[2], masked) + assert_(np.all(test.flat[0:2] == test[0, 0:2])) + # Test flat on masked_matrices + test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) + test.flat = masked_array([3, 2, 1], mask=[1, 0, 0]) + control = masked_array(np.matrix([[3, 2, 1]]), mask=[1, 0, 0]) + assert_equal(test, control) + # Test setting + test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) + testflat = test.flat + testflat[:] = testflat[[2, 1, 0]] + assert_equal(test, control) + testflat[0] = 9 + # test that matrices keep the correct shape (#4615) + a = masked_array(np.matrix(np.eye(2)), mask=0) + b = a.flat + b01 = b[:2] + assert_equal(b01.data, np.array([[1., 0.]])) + assert_equal(b01.mask, np.array([[False, False]])) + + def test_allany_onmatrices(self): + x = np.array([[0.13, 0.26, 0.90], + [0.28, 0.33, 0.63], + [0.31, 0.87, 0.70]]) + X = np.matrix(x) + m = np.array([[True, False, False], + [False, False, False], + [True, True, False]], dtype=np.bool_) + mX = masked_array(X, mask=m) + mXbig = (mX > 0.5) + mXsmall = (mX < 0.5) + + assert_(not mXbig.all()) + assert_(mXbig.any()) + assert_equal(mXbig.all(0), np.matrix([False, False, True])) + assert_equal(mXbig.all(1), np.matrix([False, False, True]).T) + assert_equal(mXbig.any(0), np.matrix([False, False, True])) + assert_equal(mXbig.any(1), np.matrix([True, True, True]).T) + + assert_(not mXsmall.all()) + assert_(mXsmall.any()) + assert_equal(mXsmall.all(0), np.matrix([True, True, False])) + assert_equal(mXsmall.all(1), np.matrix([False, False, False]).T) + assert_equal(mXsmall.any(0), np.matrix([True, True, False])) + assert_equal(mXsmall.any(1), np.matrix([True, True, False]).T) + + def test_compressed(self): + a = masked_array(np.matrix([1, 2, 3, 4]), mask=[0, 0, 0, 0]) + b = a.compressed() + assert_equal(b, a) + assert_(isinstance(b, np.matrix)) + a[0, 0] = masked + b = a.compressed() + assert_equal(b, [[2, 3, 4]]) + + def test_ravel(self): + a = masked_array(np.matrix([1, 2, 3, 4, 5]), mask=[[0, 1, 0, 0, 0]]) + aravel = a.ravel() + assert_equal(aravel.shape, (1, 5)) + assert_equal(aravel._mask.shape, a.shape) + + def test_view(self): + # Test view w/ flexible dtype + iterator = list(zip(np.arange(10), np.random.rand(10))) + data = np.array(iterator) + a = masked_array(iterator, dtype=[('a', float), ('b', float)]) + a.mask[0] = (1, 0) + test = a.view((float, 2), np.matrix) + assert_equal(test, data) + assert_(isinstance(test, np.matrix)) + assert_(not isinstance(test, MaskedArray)) + + +class TestSubclassing(object): + # Test suite for masked subclasses of ndarray. + + def setup(self): + x = np.arange(5, dtype='float') + mx = MMatrix(x, mask=[0, 1, 0, 0, 0]) + self.data = (x, mx) + + def test_maskedarray_subclassing(self): + # Tests subclassing MaskedArray + (x, mx) = self.data + assert_(isinstance(mx._data, np.matrix)) + + def test_masked_unary_operations(self): + # Tests masked_unary_operation + (x, mx) = self.data + with np.errstate(divide='ignore'): + assert_(isinstance(log(mx), MMatrix)) + assert_equal(log(x), np.log(x)) + + def test_masked_binary_operations(self): + # Tests masked_binary_operation + (x, mx) = self.data + # Result should be a MMatrix + assert_(isinstance(add(mx, mx), MMatrix)) + assert_(isinstance(add(mx, x), MMatrix)) + # Result should work + assert_equal(add(mx, x), mx+x) + assert_(isinstance(add(mx, mx)._data, np.matrix)) + assert_(isinstance(add.outer(mx, mx), MMatrix)) + assert_(isinstance(hypot(mx, mx), MMatrix)) + assert_(isinstance(hypot(mx, x), MMatrix)) + + def test_masked_binary_operations2(self): + # Tests domained_masked_binary_operation + (x, mx) = self.data + xmx = masked_array(mx.data.__array__(), mask=mx.mask) + assert_(isinstance(divide(mx, mx), MMatrix)) + assert_(isinstance(divide(mx, x), MMatrix)) + assert_equal(divide(mx, mx), divide(xmx, xmx)) + +class TestConcatenator(object): + # Tests for mr_, the equivalent of r_ for masked arrays. + + def test_matrix_builder(self): + assert_raises(np.ma.MAError, lambda: mr_['1, 2; 3, 4']) + + def test_matrix(self): + # Test consistency with unmasked version. If we ever deprecate + # matrix, this test should either still pass, or both actual and + # expected should fail to be build. + actual = mr_['r', 1, 2, 3] + expected = np.ma.array(np.r_['r', 1, 2, 3]) + assert_array_equal(actual, expected) + + # outer type is masked array, inner type is matrix + assert_equal(type(actual), type(expected)) + assert_equal(type(actual.data), type(expected.data)) diff --git a/project/venv/lib/python2.7/site-packages/numpy/matrixlib/tests/test_masked_matrix.pyc b/project/venv/lib/python2.7/site-packages/numpy/matrixlib/tests/test_masked_matrix.pyc new file mode 100644 index 0000000..8c90a59 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/matrixlib/tests/test_masked_matrix.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/matrixlib/tests/test_matrix_linalg.py b/project/venv/lib/python2.7/site-packages/numpy/matrixlib/tests/test_matrix_linalg.py new file mode 100644 index 0000000..6fc733c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/matrixlib/tests/test_matrix_linalg.py @@ -0,0 +1,95 @@ +""" Test functions for linalg module using the matrix class.""" +from __future__ import division, absolute_import, print_function + +import numpy as np + +from numpy.linalg.tests.test_linalg import ( + LinalgCase, apply_tag, TestQR as _TestQR, LinalgTestCase, + _TestNorm2D, _TestNormDoubleBase, _TestNormSingleBase, _TestNormInt64Base, + SolveCases, InvCases, EigvalsCases, EigCases, SVDCases, CondCases, + PinvCases, DetCases, LstsqCases) + + +CASES = [] + +# square test cases +CASES += apply_tag('square', [ + LinalgCase("0x0_matrix", + np.empty((0, 0), dtype=np.double).view(np.matrix), + np.empty((0, 1), dtype=np.double).view(np.matrix), + tags={'size-0'}), + LinalgCase("matrix_b_only", + np.array([[1., 2.], [3., 4.]]), + np.matrix([2., 1.]).T), + LinalgCase("matrix_a_and_b", + np.matrix([[1., 2.], [3., 4.]]), + np.matrix([2., 1.]).T), +]) + +# hermitian test-cases +CASES += apply_tag('hermitian', [ + LinalgCase("hmatrix_a_and_b", + np.matrix([[1., 2.], [2., 1.]]), + None), +]) +# No need to make generalized or strided cases for matrices. + + +class MatrixTestCase(LinalgTestCase): + TEST_CASES = CASES + + +class TestSolveMatrix(SolveCases, MatrixTestCase): + pass + + +class TestInvMatrix(InvCases, MatrixTestCase): + pass + + +class TestEigvalsMatrix(EigvalsCases, MatrixTestCase): + pass + + +class TestEigMatrix(EigCases, MatrixTestCase): + pass + + +class TestSVDMatrix(SVDCases, MatrixTestCase): + pass + + +class TestCondMatrix(CondCases, MatrixTestCase): + pass + + +class TestPinvMatrix(PinvCases, MatrixTestCase): + pass + + +class TestDetMatrix(DetCases, MatrixTestCase): + pass + + +class TestLstsqMatrix(LstsqCases, MatrixTestCase): + pass + + +class _TestNorm2DMatrix(_TestNorm2D): + array = np.matrix + + +class TestNormDoubleMatrix(_TestNorm2DMatrix, _TestNormDoubleBase): + pass + + +class TestNormSingleMatrix(_TestNorm2DMatrix, _TestNormSingleBase): + pass + + +class TestNormInt64Matrix(_TestNorm2DMatrix, _TestNormInt64Base): + pass + + +class TestQRMatrix(_TestQR): + array = np.matrix diff --git a/project/venv/lib/python2.7/site-packages/numpy/matrixlib/tests/test_matrix_linalg.pyc b/project/venv/lib/python2.7/site-packages/numpy/matrixlib/tests/test_matrix_linalg.pyc new file mode 100644 index 0000000..d089943 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/matrixlib/tests/test_matrix_linalg.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/matrixlib/tests/test_multiarray.py b/project/venv/lib/python2.7/site-packages/numpy/matrixlib/tests/test_multiarray.py new file mode 100644 index 0000000..6d84bd4 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/matrixlib/tests/test_multiarray.py @@ -0,0 +1,18 @@ +from __future__ import division, absolute_import, print_function + +import numpy as np +from numpy.testing import assert_, assert_equal, assert_array_equal + +class TestView(object): + def test_type(self): + x = np.array([1, 2, 3]) + assert_(isinstance(x.view(np.matrix), np.matrix)) + + def test_keywords(self): + x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)]) + # We must be specific about the endianness here: + y = x.view(dtype='= 2.6. + +""" +from __future__ import division, absolute_import, print_function + +from abc import ABCMeta, abstractmethod, abstractproperty +import numbers + +import numpy as np +from . import polyutils as pu + +__all__ = ['ABCPolyBase'] + +class ABCPolyBase(object): + """An abstract base class for immutable series classes. + + ABCPolyBase provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' along with the + methods listed below. + + .. versionadded:: 1.9.0 + + Parameters + ---------- + coef : array_like + Series coefficients in order of increasing degree, i.e., + ``(1, 2, 3)`` gives ``1*P_0(x) + 2*P_1(x) + 3*P_2(x)``, where + ``P_i`` is the basis polynomials of degree ``i``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is the derived class domain. + window : (2,) array_like, optional + Window, see domain for its use. The default value is the + derived class window. + + Attributes + ---------- + coef : (N,) ndarray + Series coefficients in order of increasing degree. + domain : (2,) ndarray + Domain that is mapped to window. + window : (2,) ndarray + Window that domain is mapped to. + + Class Attributes + ---------------- + maxpower : int + Maximum power allowed, i.e., the largest number ``n`` such that + ``p(x)**n`` is allowed. This is to limit runaway polynomial size. + domain : (2,) ndarray + Default domain of the class. + window : (2,) ndarray + Default window of the class. + + """ + __metaclass__ = ABCMeta + + # Not hashable + __hash__ = None + + # Opt out of numpy ufuncs and Python ops with ndarray subclasses. + __array_ufunc__ = None + + # Limit runaway size. T_n^m has degree n*m + maxpower = 100 + + @abstractproperty + def domain(self): + pass + + @abstractproperty + def window(self): + pass + + @abstractproperty + def nickname(self): + pass + + @abstractproperty + def basis_name(self): + pass + + @abstractmethod + def _add(self): + pass + + @abstractmethod + def _sub(self): + pass + + @abstractmethod + def _mul(self): + pass + + @abstractmethod + def _div(self): + pass + + @abstractmethod + def _pow(self): + pass + + @abstractmethod + def _val(self): + pass + + @abstractmethod + def _int(self): + pass + + @abstractmethod + def _der(self): + pass + + @abstractmethod + def _fit(self): + pass + + @abstractmethod + def _line(self): + pass + + @abstractmethod + def _roots(self): + pass + + @abstractmethod + def _fromroots(self): + pass + + def has_samecoef(self, other): + """Check if coefficients match. + + .. versionadded:: 1.6.0 + + Parameters + ---------- + other : class instance + The other class must have the ``coef`` attribute. + + Returns + ------- + bool : boolean + True if the coefficients are the same, False otherwise. + + """ + if len(self.coef) != len(other.coef): + return False + elif not np.all(self.coef == other.coef): + return False + else: + return True + + def has_samedomain(self, other): + """Check if domains match. + + .. versionadded:: 1.6.0 + + Parameters + ---------- + other : class instance + The other class must have the ``domain`` attribute. + + Returns + ------- + bool : boolean + True if the domains are the same, False otherwise. + + """ + return np.all(self.domain == other.domain) + + def has_samewindow(self, other): + """Check if windows match. + + .. versionadded:: 1.6.0 + + Parameters + ---------- + other : class instance + The other class must have the ``window`` attribute. + + Returns + ------- + bool : boolean + True if the windows are the same, False otherwise. + + """ + return np.all(self.window == other.window) + + def has_sametype(self, other): + """Check if types match. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + other : object + Class instance. + + Returns + ------- + bool : boolean + True if other is same class as self + + """ + return isinstance(other, self.__class__) + + def _get_coefficients(self, other): + """Interpret other as polynomial coefficients. + + The `other` argument is checked to see if it is of the same + class as self with identical domain and window. If so, + return its coefficients, otherwise return `other`. + + .. versionadded:: 1.9.0 + + Parameters + ---------- + other : anything + Object to be checked. + + Returns + ------- + coef + The coefficients of`other` if it is a compatible instance, + of ABCPolyBase, otherwise `other`. + + Raises + ------ + TypeError + When `other` is an incompatible instance of ABCPolyBase. + + """ + if isinstance(other, ABCPolyBase): + if not isinstance(other, self.__class__): + raise TypeError("Polynomial types differ") + elif not np.all(self.domain == other.domain): + raise TypeError("Domains differ") + elif not np.all(self.window == other.window): + raise TypeError("Windows differ") + return other.coef + return other + + def __init__(self, coef, domain=None, window=None): + [coef] = pu.as_series([coef], trim=False) + self.coef = coef + + if domain is not None: + [domain] = pu.as_series([domain], trim=False) + if len(domain) != 2: + raise ValueError("Domain has wrong number of elements.") + self.domain = domain + + if window is not None: + [window] = pu.as_series([window], trim=False) + if len(window) != 2: + raise ValueError("Window has wrong number of elements.") + self.window = window + + def __repr__(self): + format = "%s(%s, domain=%s, window=%s)" + coef = repr(self.coef)[6:-1] + domain = repr(self.domain)[6:-1] + window = repr(self.window)[6:-1] + name = self.__class__.__name__ + return format % (name, coef, domain, window) + + def __str__(self): + format = "%s(%s)" + coef = str(self.coef) + name = self.nickname + return format % (name, coef) + + @classmethod + def _repr_latex_term(cls, i, arg_str, needs_parens): + if cls.basis_name is None: + raise NotImplementedError( + "Subclasses must define either a basis name, or override " + "_repr_latex_term(i, arg_str, needs_parens)") + # since we always add parens, we don't care if the expression needs them + return "{{{basis}}}_{{{i}}}({arg_str})".format( + basis=cls.basis_name, i=i, arg_str=arg_str + ) + + @staticmethod + def _repr_latex_scalar(x): + # TODO: we're stuck with disabling math formatting until we handle + # exponents in this function + return r'\text{{{}}}'.format(x) + + def _repr_latex_(self): + # get the scaled argument string to the basis functions + off, scale = self.mapparms() + if off == 0 and scale == 1: + term = 'x' + needs_parens = False + elif scale == 1: + term = '{} + x'.format( + self._repr_latex_scalar(off) + ) + needs_parens = True + elif off == 0: + term = '{}x'.format( + self._repr_latex_scalar(scale) + ) + needs_parens = True + else: + term = '{} + {}x'.format( + self._repr_latex_scalar(off), + self._repr_latex_scalar(scale) + ) + needs_parens = True + + mute = r"\color{{LightGray}}{{{}}}".format + + parts = [] + for i, c in enumerate(self.coef): + # prevent duplication of + and - signs + if i == 0: + coef_str = '{}'.format(self._repr_latex_scalar(c)) + elif not isinstance(c, numbers.Real): + coef_str = ' + ({})'.format(self._repr_latex_scalar(c)) + elif not np.signbit(c): + coef_str = ' + {}'.format(self._repr_latex_scalar(c)) + else: + coef_str = ' - {}'.format(self._repr_latex_scalar(-c)) + + # produce the string for the term + term_str = self._repr_latex_term(i, term, needs_parens) + if term_str == '1': + part = coef_str + else: + part = r'{}\,{}'.format(coef_str, term_str) + + if c == 0: + part = mute(part) + + parts.append(part) + + if parts: + body = ''.join(parts) + else: + # in case somehow there are no coefficients at all + body = '0' + + return r'$x \mapsto {}$'.format(body) + + + + # Pickle and copy + + def __getstate__(self): + ret = self.__dict__.copy() + ret['coef'] = self.coef.copy() + ret['domain'] = self.domain.copy() + ret['window'] = self.window.copy() + return ret + + def __setstate__(self, dict): + self.__dict__ = dict + + # Call + + def __call__(self, arg): + off, scl = pu.mapparms(self.domain, self.window) + arg = off + scl*arg + return self._val(arg, self.coef) + + def __iter__(self): + return iter(self.coef) + + def __len__(self): + return len(self.coef) + + # Numeric properties. + + def __neg__(self): + return self.__class__(-self.coef, self.domain, self.window) + + def __pos__(self): + return self + + def __add__(self, other): + othercoef = self._get_coefficients(other) + try: + coef = self._add(self.coef, othercoef) + except Exception: + return NotImplemented + return self.__class__(coef, self.domain, self.window) + + def __sub__(self, other): + othercoef = self._get_coefficients(other) + try: + coef = self._sub(self.coef, othercoef) + except Exception: + return NotImplemented + return self.__class__(coef, self.domain, self.window) + + def __mul__(self, other): + othercoef = self._get_coefficients(other) + try: + coef = self._mul(self.coef, othercoef) + except Exception: + return NotImplemented + return self.__class__(coef, self.domain, self.window) + + def __div__(self, other): + # this can be removed when python 2 support is dropped. + return self.__floordiv__(other) + + def __truediv__(self, other): + # there is no true divide if the rhs is not a Number, although it + # could return the first n elements of an infinite series. + # It is hard to see where n would come from, though. + if not isinstance(other, numbers.Number) or isinstance(other, bool): + form = "unsupported types for true division: '%s', '%s'" + raise TypeError(form % (type(self), type(other))) + return self.__floordiv__(other) + + def __floordiv__(self, other): + res = self.__divmod__(other) + if res is NotImplemented: + return res + return res[0] + + def __mod__(self, other): + res = self.__divmod__(other) + if res is NotImplemented: + return res + return res[1] + + def __divmod__(self, other): + othercoef = self._get_coefficients(other) + try: + quo, rem = self._div(self.coef, othercoef) + except ZeroDivisionError as e: + raise e + except Exception: + return NotImplemented + quo = self.__class__(quo, self.domain, self.window) + rem = self.__class__(rem, self.domain, self.window) + return quo, rem + + def __pow__(self, other): + coef = self._pow(self.coef, other, maxpower=self.maxpower) + res = self.__class__(coef, self.domain, self.window) + return res + + def __radd__(self, other): + try: + coef = self._add(other, self.coef) + except Exception: + return NotImplemented + return self.__class__(coef, self.domain, self.window) + + def __rsub__(self, other): + try: + coef = self._sub(other, self.coef) + except Exception: + return NotImplemented + return self.__class__(coef, self.domain, self.window) + + def __rmul__(self, other): + try: + coef = self._mul(other, self.coef) + except Exception: + return NotImplemented + return self.__class__(coef, self.domain, self.window) + + def __rdiv__(self, other): + # set to __floordiv__ /. + return self.__rfloordiv__(other) + + def __rtruediv__(self, other): + # An instance of ABCPolyBase is not considered a + # Number. + return NotImplemented + + def __rfloordiv__(self, other): + res = self.__rdivmod__(other) + if res is NotImplemented: + return res + return res[0] + + def __rmod__(self, other): + res = self.__rdivmod__(other) + if res is NotImplemented: + return res + return res[1] + + def __rdivmod__(self, other): + try: + quo, rem = self._div(other, self.coef) + except ZeroDivisionError as e: + raise e + except Exception: + return NotImplemented + quo = self.__class__(quo, self.domain, self.window) + rem = self.__class__(rem, self.domain, self.window) + return quo, rem + + def __eq__(self, other): + res = (isinstance(other, self.__class__) and + np.all(self.domain == other.domain) and + np.all(self.window == other.window) and + (self.coef.shape == other.coef.shape) and + np.all(self.coef == other.coef)) + return res + + def __ne__(self, other): + return not self.__eq__(other) + + # + # Extra methods. + # + + def copy(self): + """Return a copy. + + Returns + ------- + new_series : series + Copy of self. + + """ + return self.__class__(self.coef, self.domain, self.window) + + def degree(self): + """The degree of the series. + + .. versionadded:: 1.5.0 + + Returns + ------- + degree : int + Degree of the series, one less than the number of coefficients. + + """ + return len(self) - 1 + + def cutdeg(self, deg): + """Truncate series to the given degree. + + Reduce the degree of the series to `deg` by discarding the + high order terms. If `deg` is greater than the current degree a + copy of the current series is returned. This can be useful in least + squares where the coefficients of the high degree terms may be very + small. + + .. versionadded:: 1.5.0 + + Parameters + ---------- + deg : non-negative int + The series is reduced to degree `deg` by discarding the high + order terms. The value of `deg` must be a non-negative integer. + + Returns + ------- + new_series : series + New instance of series with reduced degree. + + """ + return self.truncate(deg + 1) + + def trim(self, tol=0): + """Remove trailing coefficients + + Remove trailing coefficients until a coefficient is reached whose + absolute value greater than `tol` or the beginning of the series is + reached. If all the coefficients would be removed the series is set + to ``[0]``. A new series instance is returned with the new + coefficients. The current instance remains unchanged. + + Parameters + ---------- + tol : non-negative number. + All trailing coefficients less than `tol` will be removed. + + Returns + ------- + new_series : series + Contains the new set of coefficients. + + """ + coef = pu.trimcoef(self.coef, tol) + return self.__class__(coef, self.domain, self.window) + + def truncate(self, size): + """Truncate series to length `size`. + + Reduce the series to length `size` by discarding the high + degree terms. The value of `size` must be a positive integer. This + can be useful in least squares where the coefficients of the + high degree terms may be very small. + + Parameters + ---------- + size : positive int + The series is reduced to length `size` by discarding the high + degree terms. The value of `size` must be a positive integer. + + Returns + ------- + new_series : series + New instance of series with truncated coefficients. + + """ + isize = int(size) + if isize != size or isize < 1: + raise ValueError("size must be a positive integer") + if isize >= len(self.coef): + coef = self.coef + else: + coef = self.coef[:isize] + return self.__class__(coef, self.domain, self.window) + + def convert(self, domain=None, kind=None, window=None): + """Convert series to a different kind and/or domain and/or window. + + Parameters + ---------- + domain : array_like, optional + The domain of the converted series. If the value is None, + the default domain of `kind` is used. + kind : class, optional + The polynomial series type class to which the current instance + should be converted. If kind is None, then the class of the + current instance is used. + window : array_like, optional + The window of the converted series. If the value is None, + the default window of `kind` is used. + + Returns + ------- + new_series : series + The returned class can be of different type than the current + instance and/or have a different domain and/or different + window. + + Notes + ----- + Conversion between domains and class types can result in + numerically ill defined series. + + Examples + -------- + + """ + if kind is None: + kind = self.__class__ + if domain is None: + domain = kind.domain + if window is None: + window = kind.window + return self(kind.identity(domain, window=window)) + + def mapparms(self): + """Return the mapping parameters. + + The returned values define a linear map ``off + scl*x`` that is + applied to the input arguments before the series is evaluated. The + map depends on the ``domain`` and ``window``; if the current + ``domain`` is equal to the ``window`` the resulting map is the + identity. If the coefficients of the series instance are to be + used by themselves outside this class, then the linear function + must be substituted for the ``x`` in the standard representation of + the base polynomials. + + Returns + ------- + off, scl : float or complex + The mapping function is defined by ``off + scl*x``. + + Notes + ----- + If the current domain is the interval ``[l1, r1]`` and the window + is ``[l2, r2]``, then the linear mapping function ``L`` is + defined by the equations:: + + L(l1) = l2 + L(r1) = r2 + + """ + return pu.mapparms(self.domain, self.window) + + def integ(self, m=1, k=[], lbnd=None): + """Integrate. + + Return a series instance that is the definite integral of the + current series. + + Parameters + ---------- + m : non-negative int + The number of integrations to perform. + k : array_like + Integration constants. The first constant is applied to the + first integration, the second to the second, and so on. The + list of values must less than or equal to `m` in length and any + missing values are set to zero. + lbnd : Scalar + The lower bound of the definite integral. + + Returns + ------- + new_series : series + A new series representing the integral. The domain is the same + as the domain of the integrated series. + + """ + off, scl = self.mapparms() + if lbnd is None: + lbnd = 0 + else: + lbnd = off + scl*lbnd + coef = self._int(self.coef, m, k, lbnd, 1./scl) + return self.__class__(coef, self.domain, self.window) + + def deriv(self, m=1): + """Differentiate. + + Return a series instance of that is the derivative of the current + series. + + Parameters + ---------- + m : non-negative int + Find the derivative of order `m`. + + Returns + ------- + new_series : series + A new series representing the derivative. The domain is the same + as the domain of the differentiated series. + + """ + off, scl = self.mapparms() + coef = self._der(self.coef, m, scl) + return self.__class__(coef, self.domain, self.window) + + def roots(self): + """Return the roots of the series polynomial. + + Compute the roots for the series. Note that the accuracy of the + roots decrease the further outside the domain they lie. + + Returns + ------- + roots : ndarray + Array containing the roots of the series. + + """ + roots = self._roots(self.coef) + return pu.mapdomain(roots, self.window, self.domain) + + def linspace(self, n=100, domain=None): + """Return x, y values at equally spaced points in domain. + + Returns the x, y values at `n` linearly spaced points across the + domain. Here y is the value of the polynomial at the points x. By + default the domain is the same as that of the series instance. + This method is intended mostly as a plotting aid. + + .. versionadded:: 1.5.0 + + Parameters + ---------- + n : int, optional + Number of point pairs to return. The default value is 100. + domain : {None, array_like}, optional + If not None, the specified domain is used instead of that of + the calling instance. It should be of the form ``[beg,end]``. + The default is None which case the class domain is used. + + Returns + ------- + x, y : ndarray + x is equal to linspace(self.domain[0], self.domain[1], n) and + y is the series evaluated at element of x. + + """ + if domain is None: + domain = self.domain + x = np.linspace(domain[0], domain[1], n) + y = self(x) + return x, y + + @classmethod + def fit(cls, x, y, deg, domain=None, rcond=None, full=False, w=None, + window=None): + """Least squares fit to data. + + Return a series instance that is the least squares fit to the data + `y` sampled at `x`. The domain of the returned instance can be + specified and this will often result in a superior fit with less + chance of ill conditioning. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + domain : {None, [beg, end], []}, optional + Domain to use for the returned series. If ``None``, + then a minimal domain that covers the points `x` is chosen. If + ``[]`` the class domain is used. The default value was the + class domain in NumPy 1.4 and ``None`` in later versions. + The ``[]`` option was added in numpy 1.5.0. + rcond : float, optional + Relative condition number of the fit. Singular values smaller + than this relative to the largest singular value will be + ignored. The default value is len(x)*eps, where eps is the + relative precision of the float type, about 2e-16 in most + cases. + full : bool, optional + Switch determining nature of return value. When it is False + (the default) just the coefficients are returned, when True + diagnostic information from the singular value decomposition is + also returned. + w : array_like, shape (M,), optional + Weights. If not None the contribution of each point + ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the + weights are chosen so that the errors of the products + ``w[i]*y[i]`` all have the same variance. The default value is + None. + + .. versionadded:: 1.5.0 + window : {[beg, end]}, optional + Window to use for the returned series. The default + value is the default class domain + + .. versionadded:: 1.6.0 + + Returns + ------- + new_series : series + A series that represents the least squares fit to the data and + has the domain and window specified in the call. If the + coefficients for the unscaled and unshifted basis polynomials are + of interest, do ``new_series.convert().coef``. + + [resid, rank, sv, rcond] : list + These values are only returned if `full` = True + + resid -- sum of squared residuals of the least squares fit + rank -- the numerical rank of the scaled Vandermonde matrix + sv -- singular values of the scaled Vandermonde matrix + rcond -- value of `rcond`. + + For more details, see `linalg.lstsq`. + + """ + if domain is None: + domain = pu.getdomain(x) + elif type(domain) is list and len(domain) == 0: + domain = cls.domain + + if window is None: + window = cls.window + + xnew = pu.mapdomain(x, domain, window) + res = cls._fit(xnew, y, deg, w=w, rcond=rcond, full=full) + if full: + [coef, status] = res + return cls(coef, domain=domain, window=window), status + else: + coef = res + return cls(coef, domain=domain, window=window) + + @classmethod + def fromroots(cls, roots, domain=[], window=None): + """Return series instance that has the specified roots. + + Returns a series representing the product + ``(x - r[0])*(x - r[1])*...*(x - r[n-1])``, where ``r`` is a + list of roots. + + Parameters + ---------- + roots : array_like + List of roots. + domain : {[], None, array_like}, optional + Domain for the resulting series. If None the domain is the + interval from the smallest root to the largest. If [] the + domain is the class domain. The default is []. + window : {None, array_like}, optional + Window for the returned series. If None the class window is + used. The default is None. + + Returns + ------- + new_series : series + Series with the specified roots. + + """ + [roots] = pu.as_series([roots], trim=False) + if domain is None: + domain = pu.getdomain(roots) + elif type(domain) is list and len(domain) == 0: + domain = cls.domain + + if window is None: + window = cls.window + + deg = len(roots) + off, scl = pu.mapparms(domain, window) + rnew = off + scl*roots + coef = cls._fromroots(rnew) / scl**deg + return cls(coef, domain=domain, window=window) + + @classmethod + def identity(cls, domain=None, window=None): + """Identity function. + + If ``p`` is the returned series, then ``p(x) == x`` for all + values of x. + + Parameters + ---------- + domain : {None, array_like}, optional + If given, the array must be of the form ``[beg, end]``, where + ``beg`` and ``end`` are the endpoints of the domain. If None is + given then the class domain is used. The default is None. + window : {None, array_like}, optional + If given, the resulting array must be if the form + ``[beg, end]``, where ``beg`` and ``end`` are the endpoints of + the window. If None is given then the class window is used. The + default is None. + + Returns + ------- + new_series : series + Series of representing the identity. + + """ + if domain is None: + domain = cls.domain + if window is None: + window = cls.window + off, scl = pu.mapparms(window, domain) + coef = cls._line(off, scl) + return cls(coef, domain, window) + + @classmethod + def basis(cls, deg, domain=None, window=None): + """Series basis polynomial of degree `deg`. + + Returns the series representing the basis polynomial of degree `deg`. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + deg : int + Degree of the basis polynomial for the series. Must be >= 0. + domain : {None, array_like}, optional + If given, the array must be of the form ``[beg, end]``, where + ``beg`` and ``end`` are the endpoints of the domain. If None is + given then the class domain is used. The default is None. + window : {None, array_like}, optional + If given, the resulting array must be if the form + ``[beg, end]``, where ``beg`` and ``end`` are the endpoints of + the window. If None is given then the class window is used. The + default is None. + + Returns + ------- + new_series : series + A series with the coefficient of the `deg` term set to one and + all others zero. + + """ + if domain is None: + domain = cls.domain + if window is None: + window = cls.window + ideg = int(deg) + + if ideg != deg or ideg < 0: + raise ValueError("deg must be non-negative integer") + return cls([0]*ideg + [1], domain, window) + + @classmethod + def cast(cls, series, domain=None, window=None): + """Convert series to series of this class. + + The `series` is expected to be an instance of some polynomial + series of one of the types supported by by the numpy.polynomial + module, but could be some other class that supports the convert + method. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + series : series + The series instance to be converted. + domain : {None, array_like}, optional + If given, the array must be of the form ``[beg, end]``, where + ``beg`` and ``end`` are the endpoints of the domain. If None is + given then the class domain is used. The default is None. + window : {None, array_like}, optional + If given, the resulting array must be if the form + ``[beg, end]``, where ``beg`` and ``end`` are the endpoints of + the window. If None is given then the class window is used. The + default is None. + + Returns + ------- + new_series : series + A series of the same kind as the calling class and equal to + `series` when evaluated. + + See Also + -------- + convert : similar instance method + + """ + if domain is None: + domain = cls.domain + if window is None: + window = cls.window + return series.convert(domain, cls, window) diff --git a/project/venv/lib/python2.7/site-packages/numpy/polynomial/_polybase.pyc b/project/venv/lib/python2.7/site-packages/numpy/polynomial/_polybase.pyc new file mode 100644 index 0000000..b75cd6f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/polynomial/_polybase.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/polynomial/chebyshev.py b/project/venv/lib/python2.7/site-packages/numpy/polynomial/chebyshev.py new file mode 100644 index 0000000..92cdb18 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/polynomial/chebyshev.py @@ -0,0 +1,2200 @@ +""" +Objects for dealing with Chebyshev series. + +This module provides a number of objects (mostly functions) useful for +dealing with Chebyshev series, including a `Chebyshev` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with such polynomials is in the +docstring for its "parent" sub-package, `numpy.polynomial`). + +Constants +--------- +- `chebdomain` -- Chebyshev series default domain, [-1,1]. +- `chebzero` -- (Coefficients of the) Chebyshev series that evaluates + identically to 0. +- `chebone` -- (Coefficients of the) Chebyshev series that evaluates + identically to 1. +- `chebx` -- (Coefficients of the) Chebyshev series for the identity map, + ``f(x) = x``. + +Arithmetic +---------- +- `chebadd` -- add two Chebyshev series. +- `chebsub` -- subtract one Chebyshev series from another. +- `chebmulx` -- multiply a Chebyshev series in ``P_i(x)`` by ``x``. +- `chebmul` -- multiply two Chebyshev series. +- `chebdiv` -- divide one Chebyshev series by another. +- `chebpow` -- raise a Chebyshev series to a positive integer power. +- `chebval` -- evaluate a Chebyshev series at given points. +- `chebval2d` -- evaluate a 2D Chebyshev series at given points. +- `chebval3d` -- evaluate a 3D Chebyshev series at given points. +- `chebgrid2d` -- evaluate a 2D Chebyshev series on a Cartesian product. +- `chebgrid3d` -- evaluate a 3D Chebyshev series on a Cartesian product. + +Calculus +-------- +- `chebder` -- differentiate a Chebyshev series. +- `chebint` -- integrate a Chebyshev series. + +Misc Functions +-------------- +- `chebfromroots` -- create a Chebyshev series with specified roots. +- `chebroots` -- find the roots of a Chebyshev series. +- `chebvander` -- Vandermonde-like matrix for Chebyshev polynomials. +- `chebvander2d` -- Vandermonde-like matrix for 2D power series. +- `chebvander3d` -- Vandermonde-like matrix for 3D power series. +- `chebgauss` -- Gauss-Chebyshev quadrature, points and weights. +- `chebweight` -- Chebyshev weight function. +- `chebcompanion` -- symmetrized companion matrix in Chebyshev form. +- `chebfit` -- least-squares fit returning a Chebyshev series. +- `chebpts1` -- Chebyshev points of the first kind. +- `chebpts2` -- Chebyshev points of the second kind. +- `chebtrim` -- trim leading coefficients from a Chebyshev series. +- `chebline` -- Chebyshev series representing given straight line. +- `cheb2poly` -- convert a Chebyshev series to a polynomial. +- `poly2cheb` -- convert a polynomial to a Chebyshev series. +- `chebinterpolate` -- interpolate a function at the Chebyshev points. + +Classes +------- +- `Chebyshev` -- A Chebyshev series class. + +See also +-------- +`numpy.polynomial` + +Notes +----- +The implementations of multiplication, division, integration, and +differentiation use the algebraic identities [1]_: + +.. math :: + T_n(x) = \\frac{z^n + z^{-n}}{2} \\\\ + z\\frac{dx}{dz} = \\frac{z - z^{-1}}{2}. + +where + +.. math :: x = \\frac{z + z^{-1}}{2}. + +These identities allow a Chebyshev series to be expressed as a finite, +symmetric Laurent series. In this module, this sort of Laurent series +is referred to as a "z-series." + +References +---------- +.. [1] A. T. Benjamin, et al., "Combinatorial Trigonometry with Chebyshev + Polynomials," *Journal of Statistical Planning and Inference 14*, 2008 + (preprint: https://www.math.hmc.edu/~benjamin/papers/CombTrig.pdf, pg. 4) + +""" +from __future__ import division, absolute_import, print_function + +import warnings +import numpy as np +import numpy.linalg as la +from numpy.core.multiarray import normalize_axis_index + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +__all__ = [ + 'chebzero', 'chebone', 'chebx', 'chebdomain', 'chebline', 'chebadd', + 'chebsub', 'chebmulx', 'chebmul', 'chebdiv', 'chebpow', 'chebval', + 'chebder', 'chebint', 'cheb2poly', 'poly2cheb', 'chebfromroots', + 'chebvander', 'chebfit', 'chebtrim', 'chebroots', 'chebpts1', + 'chebpts2', 'Chebyshev', 'chebval2d', 'chebval3d', 'chebgrid2d', + 'chebgrid3d', 'chebvander2d', 'chebvander3d', 'chebcompanion', + 'chebgauss', 'chebweight', 'chebinterpolate'] + +chebtrim = pu.trimcoef + +# +# A collection of functions for manipulating z-series. These are private +# functions and do minimal error checking. +# + +def _cseries_to_zseries(c): + """Covert Chebyshev series to z-series. + + Covert a Chebyshev series to the equivalent z-series. The result is + never an empty array. The dtype of the return is the same as that of + the input. No checks are run on the arguments as this routine is for + internal use. + + Parameters + ---------- + c : 1-D ndarray + Chebyshev coefficients, ordered from low to high + + Returns + ------- + zs : 1-D ndarray + Odd length symmetric z-series, ordered from low to high. + + """ + n = c.size + zs = np.zeros(2*n-1, dtype=c.dtype) + zs[n-1:] = c/2 + return zs + zs[::-1] + + +def _zseries_to_cseries(zs): + """Covert z-series to a Chebyshev series. + + Covert a z series to the equivalent Chebyshev series. The result is + never an empty array. The dtype of the return is the same as that of + the input. No checks are run on the arguments as this routine is for + internal use. + + Parameters + ---------- + zs : 1-D ndarray + Odd length symmetric z-series, ordered from low to high. + + Returns + ------- + c : 1-D ndarray + Chebyshev coefficients, ordered from low to high. + + """ + n = (zs.size + 1)//2 + c = zs[n-1:].copy() + c[1:n] *= 2 + return c + + +def _zseries_mul(z1, z2): + """Multiply two z-series. + + Multiply two z-series to produce a z-series. + + Parameters + ---------- + z1, z2 : 1-D ndarray + The arrays must be 1-D but this is not checked. + + Returns + ------- + product : 1-D ndarray + The product z-series. + + Notes + ----- + This is simply convolution. If symmetric/anti-symmetric z-series are + denoted by S/A then the following rules apply: + + S*S, A*A -> S + S*A, A*S -> A + + """ + return np.convolve(z1, z2) + + +def _zseries_div(z1, z2): + """Divide the first z-series by the second. + + Divide `z1` by `z2` and return the quotient and remainder as z-series. + Warning: this implementation only applies when both z1 and z2 have the + same symmetry, which is sufficient for present purposes. + + Parameters + ---------- + z1, z2 : 1-D ndarray + The arrays must be 1-D and have the same symmetry, but this is not + checked. + + Returns + ------- + + (quotient, remainder) : 1-D ndarrays + Quotient and remainder as z-series. + + Notes + ----- + This is not the same as polynomial division on account of the desired form + of the remainder. If symmetric/anti-symmetric z-series are denoted by S/A + then the following rules apply: + + S/S -> S,S + A/A -> S,A + + The restriction to types of the same symmetry could be fixed but seems like + unneeded generality. There is no natural form for the remainder in the case + where there is no symmetry. + + """ + z1 = z1.copy() + z2 = z2.copy() + len1 = len(z1) + len2 = len(z2) + if len2 == 1: + z1 /= z2 + return z1, z1[:1]*0 + elif len1 < len2: + return z1[:1]*0, z1 + else: + dlen = len1 - len2 + scl = z2[0] + z2 /= scl + quo = np.empty(dlen + 1, dtype=z1.dtype) + i = 0 + j = dlen + while i < j: + r = z1[i] + quo[i] = z1[i] + quo[dlen - i] = r + tmp = r*z2 + z1[i:i+len2] -= tmp + z1[j:j+len2] -= tmp + i += 1 + j -= 1 + r = z1[i] + quo[i] = r + tmp = r*z2 + z1[i:i+len2] -= tmp + quo /= scl + rem = z1[i+1:i-1+len2].copy() + return quo, rem + + +def _zseries_der(zs): + """Differentiate a z-series. + + The derivative is with respect to x, not z. This is achieved using the + chain rule and the value of dx/dz given in the module notes. + + Parameters + ---------- + zs : z-series + The z-series to differentiate. + + Returns + ------- + derivative : z-series + The derivative + + Notes + ----- + The zseries for x (ns) has been multiplied by two in order to avoid + using floats that are incompatible with Decimal and likely other + specialized scalar types. This scaling has been compensated by + multiplying the value of zs by two also so that the two cancels in the + division. + + """ + n = len(zs)//2 + ns = np.array([-1, 0, 1], dtype=zs.dtype) + zs *= np.arange(-n, n+1)*2 + d, r = _zseries_div(zs, ns) + return d + + +def _zseries_int(zs): + """Integrate a z-series. + + The integral is with respect to x, not z. This is achieved by a change + of variable using dx/dz given in the module notes. + + Parameters + ---------- + zs : z-series + The z-series to integrate + + Returns + ------- + integral : z-series + The indefinite integral + + Notes + ----- + The zseries for x (ns) has been multiplied by two in order to avoid + using floats that are incompatible with Decimal and likely other + specialized scalar types. This scaling has been compensated by + dividing the resulting zs by two. + + """ + n = 1 + len(zs)//2 + ns = np.array([-1, 0, 1], dtype=zs.dtype) + zs = _zseries_mul(zs, ns) + div = np.arange(-n, n+1)*2 + zs[:n] /= div[:n] + zs[n+1:] /= div[n+1:] + zs[n] = 0 + return zs + +# +# Chebyshev series functions +# + + +def poly2cheb(pol): + """ + Convert a polynomial to a Chebyshev series. + + Convert an array representing the coefficients of a polynomial (relative + to the "standard" basis) ordered from lowest degree to highest, to an + array of the coefficients of the equivalent Chebyshev series, ordered + from lowest to highest degree. + + Parameters + ---------- + pol : array_like + 1-D array containing the polynomial coefficients + + Returns + ------- + c : ndarray + 1-D array containing the coefficients of the equivalent Chebyshev + series. + + See Also + -------- + cheb2poly + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy import polynomial as P + >>> p = P.Polynomial(range(4)) + >>> p + Polynomial([ 0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1]) + >>> c = p.convert(kind=P.Chebyshev) + >>> c + Chebyshev([ 1. , 3.25, 1. , 0.75], domain=[-1, 1], window=[-1, 1]) + >>> P.chebyshev.poly2cheb(range(4)) + array([ 1. , 3.25, 1. , 0.75]) + + """ + [pol] = pu.as_series([pol]) + deg = len(pol) - 1 + res = 0 + for i in range(deg, -1, -1): + res = chebadd(chebmulx(res), pol[i]) + return res + + +def cheb2poly(c): + """ + Convert a Chebyshev series to a polynomial. + + Convert an array representing the coefficients of a Chebyshev series, + ordered from lowest degree to highest, to an array of the coefficients + of the equivalent polynomial (relative to the "standard" basis) ordered + from lowest to highest degree. + + Parameters + ---------- + c : array_like + 1-D array containing the Chebyshev series coefficients, ordered + from lowest order term to highest. + + Returns + ------- + pol : ndarray + 1-D array containing the coefficients of the equivalent polynomial + (relative to the "standard" basis) ordered from lowest order term + to highest. + + See Also + -------- + poly2cheb + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy import polynomial as P + >>> c = P.Chebyshev(range(4)) + >>> c + Chebyshev([ 0., 1., 2., 3.], [-1., 1.]) + >>> p = c.convert(kind=P.Polynomial) + >>> p + Polynomial([ -2., -8., 4., 12.], [-1., 1.]) + >>> P.chebyshev.cheb2poly(range(4)) + array([ -2., -8., 4., 12.]) + + """ + from .polynomial import polyadd, polysub, polymulx + + [c] = pu.as_series([c]) + n = len(c) + if n < 3: + return c + else: + c0 = c[-2] + c1 = c[-1] + # i is the current degree of c1 + for i in range(n - 1, 1, -1): + tmp = c0 + c0 = polysub(c[i - 2], c1) + c1 = polyadd(tmp, polymulx(c1)*2) + return polyadd(c0, polymulx(c1)) + + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Chebyshev default domain. +chebdomain = np.array([-1, 1]) + +# Chebyshev coefficients representing zero. +chebzero = np.array([0]) + +# Chebyshev coefficients representing one. +chebone = np.array([1]) + +# Chebyshev coefficients representing the identity x. +chebx = np.array([0, 1]) + + +def chebline(off, scl): + """ + Chebyshev series whose graph is a straight line. + + + + Parameters + ---------- + off, scl : scalars + The specified line is given by ``off + scl*x``. + + Returns + ------- + y : ndarray + This module's representation of the Chebyshev series for + ``off + scl*x``. + + See Also + -------- + polyline + + Examples + -------- + >>> import numpy.polynomial.chebyshev as C + >>> C.chebline(3,2) + array([3, 2]) + >>> C.chebval(-3, C.chebline(3,2)) # should be -3 + -3.0 + + """ + if scl != 0: + return np.array([off, scl]) + else: + return np.array([off]) + + +def chebfromroots(roots): + """ + Generate a Chebyshev series with given roots. + + The function returns the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + in Chebyshev form, where the `r_n` are the roots specified in `roots`. + If a zero has multiplicity n, then it must appear in `roots` n times. + For instance, if 2 is a root of multiplicity three and 3 is a root of + multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The + roots can appear in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x) + + The coefficient of the last term is not generally 1 for monic + polynomials in Chebyshev form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of coefficients. If all roots are real then `out` is a + real array, if some of the roots are complex, then `out` is complex + even if all the coefficients in the result are real (see Examples + below). + + See Also + -------- + polyfromroots, legfromroots, lagfromroots, hermfromroots, + hermefromroots. + + Examples + -------- + >>> import numpy.polynomial.chebyshev as C + >>> C.chebfromroots((-1,0,1)) # x^3 - x relative to the standard basis + array([ 0. , -0.25, 0. , 0.25]) + >>> j = complex(0,1) + >>> C.chebfromroots((-j,j)) # x^2 + 1 relative to the standard basis + array([ 1.5+0.j, 0.0+0.j, 0.5+0.j]) + + """ + if len(roots) == 0: + return np.ones(1) + else: + [roots] = pu.as_series([roots], trim=False) + roots.sort() + p = [chebline(-r, 1) for r in roots] + n = len(p) + while n > 1: + m, r = divmod(n, 2) + tmp = [chebmul(p[i], p[i+m]) for i in range(m)] + if r: + tmp[0] = chebmul(tmp[0], p[-1]) + p = tmp + n = m + return p[0] + + +def chebadd(c1, c2): + """ + Add one Chebyshev series to another. + + Returns the sum of two Chebyshev series `c1` + `c2`. The arguments + are sequences of coefficients ordered from lowest order term to + highest, i.e., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Chebyshev series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the Chebyshev series of their sum. + + See Also + -------- + chebsub, chebmulx, chebmul, chebdiv, chebpow + + Notes + ----- + Unlike multiplication, division, etc., the sum of two Chebyshev series + is a Chebyshev series (without having to "reproject" the result onto + the basis set) so addition, just like that of "standard" polynomials, + is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> C.chebadd(c1,c2) + array([ 4., 4., 4.]) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if len(c1) > len(c2): + c1[:c2.size] += c2 + ret = c1 + else: + c2[:c1.size] += c1 + ret = c2 + return pu.trimseq(ret) + + +def chebsub(c1, c2): + """ + Subtract one Chebyshev series from another. + + Returns the difference of two Chebyshev series `c1` - `c2`. The + sequences of coefficients are from lowest order term to highest, i.e., + [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Chebyshev series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Chebyshev series coefficients representing their difference. + + See Also + -------- + chebadd, chebmulx, chebmul, chebdiv, chebpow + + Notes + ----- + Unlike multiplication, division, etc., the difference of two Chebyshev + series is a Chebyshev series (without having to "reproject" the result + onto the basis set) so subtraction, just like that of "standard" + polynomials, is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> C.chebsub(c1,c2) + array([-2., 0., 2.]) + >>> C.chebsub(c2,c1) # -C.chebsub(c1,c2) + array([ 2., 0., -2.]) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if len(c1) > len(c2): + c1[:c2.size] -= c2 + ret = c1 + else: + c2 = -c2 + c2[:c1.size] += c1 + ret = c2 + return pu.trimseq(ret) + + +def chebmulx(c): + """Multiply a Chebyshev series by x. + + Multiply the polynomial `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of Chebyshev series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + Notes + ----- + + .. versionadded:: 1.5.0 + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> C.chebmulx([1,2,3]) + array([ 1., 2.5, 3., 1.5, 2.]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0]*0 + prd[1] = c[0] + if len(c) > 1: + tmp = c[1:]/2 + prd[2:] = tmp + prd[0:-2] += tmp + return prd + + +def chebmul(c1, c2): + """ + Multiply one Chebyshev series by another. + + Returns the product of two Chebyshev series `c1` * `c2`. The arguments + are sequences of coefficients, from lowest order "term" to highest, + e.g., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Chebyshev series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Chebyshev series coefficients representing their product. + + See Also + -------- + chebadd, chebsub, chebmulx, chebdiv, chebpow + + Notes + ----- + In general, the (polynomial) product of two C-series results in terms + that are not in the Chebyshev polynomial basis set. Thus, to express + the product as a C-series, it is typically necessary to "reproject" + the product onto said basis set, which typically produces + "unintuitive live" (but correct) results; see Examples section below. + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> C.chebmul(c1,c2) # multiplication requires "reprojection" + array([ 6.5, 12. , 12. , 4. , 1.5]) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + z1 = _cseries_to_zseries(c1) + z2 = _cseries_to_zseries(c2) + prd = _zseries_mul(z1, z2) + ret = _zseries_to_cseries(prd) + return pu.trimseq(ret) + + +def chebdiv(c1, c2): + """ + Divide one Chebyshev series by another. + + Returns the quotient-with-remainder of two Chebyshev series + `c1` / `c2`. The arguments are sequences of coefficients from lowest + order "term" to highest, e.g., [1,2,3] represents the series + ``T_0 + 2*T_1 + 3*T_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Chebyshev series coefficients ordered from low to + high. + + Returns + ------- + [quo, rem] : ndarrays + Of Chebyshev series coefficients representing the quotient and + remainder. + + See Also + -------- + chebadd, chebsub, chemulx, chebmul, chebpow + + Notes + ----- + In general, the (polynomial) division of one C-series by another + results in quotient and remainder terms that are not in the Chebyshev + polynomial basis set. Thus, to express these results as C-series, it + is typically necessary to "reproject" the results onto said basis + set, which typically produces "unintuitive" (but correct) results; + see Examples section below. + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> C.chebdiv(c1,c2) # quotient "intuitive," remainder not + (array([ 3.]), array([-8., -4.])) + >>> c2 = (0,1,2,3) + >>> C.chebdiv(c2,c1) # neither "intuitive" + (array([ 0., 2.]), array([-2., -4.])) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if c2[-1] == 0: + raise ZeroDivisionError() + + lc1 = len(c1) + lc2 = len(c2) + if lc1 < lc2: + return c1[:1]*0, c1 + elif lc2 == 1: + return c1/c2[-1], c1[:1]*0 + else: + z1 = _cseries_to_zseries(c1) + z2 = _cseries_to_zseries(c2) + quo, rem = _zseries_div(z1, z2) + quo = pu.trimseq(_zseries_to_cseries(quo)) + rem = pu.trimseq(_zseries_to_cseries(rem)) + return quo, rem + + +def chebpow(c, pow, maxpower=16): + """Raise a Chebyshev series to a power. + + Returns the Chebyshev series `c` raised to the power `pow`. The + argument `c` is a sequence of coefficients ordered from low to high. + i.e., [1,2,3] is the series ``T_0 + 2*T_1 + 3*T_2.`` + + Parameters + ---------- + c : array_like + 1-D array of Chebyshev series coefficients ordered from low to + high. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Chebyshev series of power. + + See Also + -------- + chebadd, chebsub, chebmulx, chebmul, chebdiv + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> C.chebpow([1, 2, 3, 4], 2) + array([15.5, 22. , 16. , 14. , 12.5, 12. , 8. ]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + power = int(pow) + if power != pow or power < 0: + raise ValueError("Power must be a non-negative integer.") + elif maxpower is not None and power > maxpower: + raise ValueError("Power is too large") + elif power == 0: + return np.array([1], dtype=c.dtype) + elif power == 1: + return c + else: + # This can be made more efficient by using powers of two + # in the usual way. + zs = _cseries_to_zseries(c) + prd = zs + for i in range(2, power + 1): + prd = np.convolve(prd, zs) + return _zseries_to_cseries(prd) + + +def chebder(c, m=1, scl=1, axis=0): + """ + Differentiate a Chebyshev series. + + Returns the Chebyshev series coefficients `c` differentiated `m` times + along `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The argument + `c` is an array of coefficients from low to high degree along each + axis, e.g., [1,2,3] represents the series ``1*T_0 + 2*T_1 + 3*T_2`` + while [[1,2],[1,2]] represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) + + 2*T_0(x)*T_1(y) + 2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is + ``y``. + + Parameters + ---------- + c : array_like + Array of Chebyshev series coefficients. If c is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change of + variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + der : ndarray + Chebyshev series of the derivative. + + See Also + -------- + chebint + + Notes + ----- + In general, the result of differentiating a C-series needs to be + "reprojected" onto the C-series basis set. Thus, typically, the + result of this function is "unintuitive," albeit correct; see Examples + section below. + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c = (1,2,3,4) + >>> C.chebder(c) + array([ 14., 12., 24.]) + >>> C.chebder(c,3) + array([ 96.]) + >>> C.chebder(c,scl=-1) + array([-14., -12., -24.]) + >>> C.chebder(c,2,-1) + array([ 12., 96.]) + + """ + c = np.array(c, ndmin=1, copy=1) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + cnt, iaxis = [int(t) for t in [m, axis]] + + if cnt != m: + raise ValueError("The order of derivation must be integer") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + if iaxis != axis: + raise ValueError("The axis must be integer") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + n = len(c) + if cnt >= n: + c = c[:1]*0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=c.dtype) + for j in range(n, 2, -1): + der[j - 1] = (2*j)*c[j] + c[j - 2] += (j*c[j])/(j - 2) + if n > 1: + der[1] = 4*c[2] + der[0] = c[1] + c = der + c = np.moveaxis(c, 0, iaxis) + return c + + +def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a Chebyshev series. + + Returns the Chebyshev series coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients from low to high degree along each axis, e.g., [1,2,3] + represents the series ``T_0 + 2*T_1 + 3*T_2`` while [[1,2],[1,2]] + represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) + 2*T_0(x)*T_1(y) + + 2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. + + Parameters + ---------- + c : array_like + Array of Chebyshev series coefficients. If c is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at zero + is the first value in the list, the value of the second integral + at zero is the second value, etc. If ``k == []`` (the default), + all constants are set to zero. If ``m == 1``, a single scalar can + be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + S : ndarray + C-series coefficients of the integral. + + Raises + ------ + ValueError + If ``m < 1``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. + + See Also + -------- + chebder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. + Why is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + :math:`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a`- perhaps not what one would have first thought. + + Also note that, in general, the result of integrating a C-series needs + to be "reprojected" onto the C-series basis set. Thus, typically, + the result of this function is "unintuitive," albeit correct; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c = (1,2,3) + >>> C.chebint(c) + array([ 0.5, -0.5, 0.5, 0.5]) + >>> C.chebint(c,3) + array([ 0.03125 , -0.1875 , 0.04166667, -0.05208333, 0.01041667, + 0.00625 ]) + >>> C.chebint(c, k=3) + array([ 3.5, -0.5, 0.5, 0.5]) + >>> C.chebint(c,lbnd=-2) + array([ 8.5, -0.5, 0.5, 0.5]) + >>> C.chebint(c,scl=-2) + array([-1., 1., -1., -1.]) + + """ + c = np.array(c, ndmin=1, copy=1) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if not np.iterable(k): + k = [k] + cnt, iaxis = [int(t) for t in [m, axis]] + + if cnt != m: + raise ValueError("The order of integration must be integer") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") + if iaxis != axis: + raise ValueError("The axis must be integer") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + k = list(k) + [0]*(cnt - len(k)) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) + tmp[0] = c[0]*0 + tmp[1] = c[0] + if n > 1: + tmp[2] = c[1]/4 + for j in range(2, n): + t = c[j]/(2*j + 1) # FIXME: t never used + tmp[j + 1] = c[j]/(2*(j + 1)) + tmp[j - 1] -= c[j]/(2*(j - 1)) + tmp[0] += k[i] - chebval(lbnd, tmp) + c = tmp + c = np.moveaxis(c, 0, iaxis) + return c + + +def chebval(x, c, tensor=True): + """ + Evaluate a Chebyshev series at points x. + + If `c` is of length `n + 1`, this function returns the value: + + .. math:: p(x) = c_0 * T_0(x) + c_1 * T_1(x) + ... + c_n * T_n(x) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + with themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + .. versionadded:: 1.7.0 + + Returns + ------- + values : ndarray, algebra_like + The shape of the return value is described above. + + See Also + -------- + chebval2d, chebgrid2d, chebval3d, chebgrid3d + + Notes + ----- + The evaluation uses Clenshaw recursion, aka synthetic division. + + Examples + -------- + + """ + c = np.array(c, ndmin=1, copy=1) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,)*x.ndim) + + if len(c) == 1: + c0 = c[0] + c1 = 0 + elif len(c) == 2: + c0 = c[0] + c1 = c[1] + else: + x2 = 2*x + c0 = c[-2] + c1 = c[-1] + for i in range(3, len(c) + 1): + tmp = c0 + c0 = c[-i] - c1 + c1 = tmp + c1*x2 + return c0 + c1*x + + +def chebval2d(x, y, c): + """ + Evaluate a 2-D Chebyshev series at points (x, y). + + This function returns the values: + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * T_i(x) * T_j(y) + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` is a 1-D array a one is implicitly appended to its shape to make + it 2-D. The shape of the result will be c.shape[2:] + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points `(x, y)`, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than 2 the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional Chebyshev series at points formed + from pairs of corresponding values from `x` and `y`. + + See Also + -------- + chebval, chebgrid2d, chebval3d, chebgrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + try: + x, y = np.array((x, y), copy=0) + except Exception: + raise ValueError('x, y are incompatible') + + c = chebval(x, c) + c = chebval(y, c, tensor=False) + return c + + +def chebgrid2d(x, y, c): + """ + Evaluate a 2-D Chebyshev series on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * T_i(a) * T_j(b), + + where the points `(a, b)` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape + y.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j is contained in `c[i,j]`. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional Chebyshev series at points in the + Cartesian product of `x` and `y`. + + See Also + -------- + chebval, chebval2d, chebval3d, chebgrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + c = chebval(x, c) + c = chebval(y, c) + return c + + +def chebval3d(x, y, z, c): + """ + Evaluate a 3-D Chebyshev series at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * T_i(x) * T_j(y) * T_k(z) + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + chebval, chebval2d, chebgrid2d, chebgrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + try: + x, y, z = np.array((x, y, z), copy=0) + except Exception: + raise ValueError('x, y, z are incompatible') + + c = chebval(x, c) + c = chebval(y, c, tensor=False) + c = chebval(z, c, tensor=False) + return c + + +def chebgrid3d(x, y, z, c): + """ + Evaluate a 3-D Chebyshev series on the Cartesian product of x, y, and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * T_i(a) * T_j(b) * T_k(c) + + where the points `(a, b, c)` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + chebval, chebval2d, chebgrid2d, chebval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + c = chebval(x, c) + c = chebval(y, c) + c = chebval(z, c) + return c + + +def chebvander(x, deg): + """Pseudo-Vandermonde matrix of given degree. + + Returns the pseudo-Vandermonde matrix of degree `deg` and sample points + `x`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., i] = T_i(x), + + where `0 <= i <= deg`. The leading indices of `V` index the elements of + `x` and the last index is the degree of the Chebyshev polynomial. + + If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the + matrix ``V = chebvander(x, n)``, then ``np.dot(V, c)`` and + ``chebval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of Chebyshev series of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray + The pseudo Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where The last index is the degree of the + corresponding Chebyshev polynomial. The dtype will be the same as + the converted `x`. + + """ + ideg = int(deg) + if ideg != deg: + raise ValueError("deg must be integer") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=0, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + # Use forward recursion to generate the entries. + v[0] = x*0 + 1 + if ideg > 0: + x2 = 2*x + v[1] = x + for i in range(2, ideg + 1): + v[i] = v[i-1]*x2 - v[i-2] + return np.moveaxis(v, 0, -1) + + +def chebvander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y)`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (deg[1] + 1)*i + j] = T_i(x) * T_j(y), + + where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of + `V` index the points `(x, y)` and the last index encodes the degrees of + the Chebyshev polynomials. + + If ``V = chebvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``chebval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D Chebyshev + series of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + chebvander, chebvander3d. chebval2d, chebval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + ideg = [int(d) for d in deg] + is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] + if is_valid != [1, 1]: + raise ValueError("degrees must be non-negative integers") + degx, degy = ideg + x, y = np.array((x, y), copy=0) + 0.0 + + vx = chebvander(x, degx) + vy = chebvander(y, degy) + v = vx[..., None]*vy[..., None,:] + return v.reshape(v.shape[:-2] + (-1,)) + + +def chebvander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`, + then The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = T_i(x)*T_j(y)*T_k(z), + + where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading + indices of `V` index the points `(x, y, z)` and the last index encodes + the degrees of the Chebyshev polynomials. + + If ``V = chebvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``chebval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D Chebyshev + series of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + chebvander, chebvander3d. chebval2d, chebval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + ideg = [int(d) for d in deg] + is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] + if is_valid != [1, 1, 1]: + raise ValueError("degrees must be non-negative integers") + degx, degy, degz = ideg + x, y, z = np.array((x, y, z), copy=0) + 0.0 + + vx = chebvander(x, degx) + vy = chebvander(y, degy) + vz = chebvander(z, degz) + v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:] + return v.reshape(v.shape[:-3] + (-1,)) + + +def chebfit(x, y, deg, rcond=None, full=False, w=None): + """ + Least squares fit of Chebyshev series to data. + + Return the coefficients of a Chebyshev series of degree `deg` that is the + least squares fit to the data values `y` given at points `x`. If `y` is + 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple + fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x), + + where `n` is `deg`. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer, + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the contribution of each point + ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the + weights are chosen so that the errors of the products ``w[i]*y[i]`` + all have the same variance. The default value is None. + + .. versionadded:: 1.5.0 + + Returns + ------- + coef : ndarray, shape (M,) or (M, K) + Chebyshev coefficients ordered from low to high. If `y` was 2-D, + the coefficients for the data in column k of `y` are in column + `k`. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if `full` = True + + resid -- sum of squared residuals of the least squares fit + rank -- the numerical rank of the scaled Vandermonde matrix + sv -- singular values of the scaled Vandermonde matrix + rcond -- value of `rcond`. + + For more details, see `linalg.lstsq`. + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if `full` = False. The + warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', RankWarning) + + See Also + -------- + polyfit, legfit, lagfit, hermfit, hermefit + chebval : Evaluates a Chebyshev series. + chebvander : Vandermonde matrix of Chebyshev series. + chebweight : Chebyshev weight function. + linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the Chebyshev series `p` that + minimizes the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where :math:`w_j` are the weights. This problem is solved by setting up + as the (typically) overdetermined matrix equation + + .. math:: V(x) * c = w * y, + + where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the + coefficients to be solved for, `w` are the weights, and `y` are the + observed values. This equation is then solved using the singular value + decomposition of `V`. + + If some of the singular values of `V` are so small that they are + neglected, then a `RankWarning` will be issued. This means that the + coefficient values may be poorly determined. Using a lower order fit + will usually get rid of the warning. The `rcond` parameter can also be + set to a value smaller than its default, but the resulting fit may be + spurious and have large contributions from roundoff error. + + Fits using Chebyshev series are usually better conditioned than fits + using power series, but much can depend on the distribution of the + sample points and the smoothness of the data. If the quality of the fit + is inadequate splines may be a good alternative. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + https://en.wikipedia.org/wiki/Curve_fitting + + Examples + -------- + + """ + x = np.asarray(x) + 0.0 + y = np.asarray(y) + 0.0 + deg = np.asarray(deg) + + # check arguments. + if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0: + raise TypeError("deg must be an int or non-empty 1-D array of int") + if deg.min() < 0: + raise ValueError("expected deg >= 0") + if x.ndim != 1: + raise TypeError("expected 1D vector for x") + if x.size == 0: + raise TypeError("expected non-empty vector for x") + if y.ndim < 1 or y.ndim > 2: + raise TypeError("expected 1D or 2D array for y") + if len(x) != len(y): + raise TypeError("expected x and y to have same length") + + if deg.ndim == 0: + lmax = deg + order = lmax + 1 + van = chebvander(x, lmax) + else: + deg = np.sort(deg) + lmax = deg[-1] + order = len(deg) + van = chebvander(x, lmax)[:, deg] + + # set up the least squares matrices in transposed form + lhs = van.T + rhs = y.T + if w is not None: + w = np.asarray(w) + 0.0 + if w.ndim != 1: + raise TypeError("expected 1D vector for w") + if len(x) != len(w): + raise TypeError("expected x and w to have same length") + # apply weights. Don't use inplace operations as they + # can cause problems with NA. + lhs = lhs * w + rhs = rhs * w + + # set rcond + if rcond is None: + rcond = len(x)*np.finfo(x.dtype).eps + + # Determine the norms of the design matrix columns. + if issubclass(lhs.dtype.type, np.complexfloating): + scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1)) + else: + scl = np.sqrt(np.square(lhs).sum(1)) + scl[scl == 0] = 1 + + # Solve the least squares problem. + c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond) + c = (c.T/scl).T + + # Expand c to include non-fitted coefficients which are set to zero + if deg.ndim > 0: + if c.ndim == 2: + cc = np.zeros((lmax + 1, c.shape[1]), dtype=c.dtype) + else: + cc = np.zeros(lmax + 1, dtype=c.dtype) + cc[deg] = c + c = cc + + # warn on rank reduction + if rank != order and not full: + msg = "The fit may be poorly conditioned" + warnings.warn(msg, pu.RankWarning, stacklevel=2) + + if full: + return c, [resids, rank, s, rcond] + else: + return c + + +def chebcompanion(c): + """Return the scaled companion matrix of c. + + The basis polynomials are scaled so that the companion matrix is + symmetric when `c` is a Chebyshev basis polynomial. This provides + better eigenvalue estimates than the unscaled case and for basis + polynomials the eigenvalues are guaranteed to be real if + `numpy.linalg.eigvalsh` is used to obtain them. + + Parameters + ---------- + c : array_like + 1-D array of Chebyshev series coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Scaled companion matrix of dimensions (deg, deg). + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[-c[0]/c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + scl = np.array([1.] + [np.sqrt(.5)]*(n-1)) + top = mat.reshape(-1)[1::n+1] + bot = mat.reshape(-1)[n::n+1] + top[0] = np.sqrt(.5) + top[1:] = 1/2 + bot[...] = top + mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*.5 + return mat + + +def chebroots(c): + """ + Compute the roots of a Chebyshev series. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * T_i(x). + + Parameters + ---------- + c : 1-D array_like + 1-D array of coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the series. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + polyroots, legroots, lagroots, hermroots, hermeroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the series for such + values. Roots with multiplicity greater than 1 will also show larger + errors as the value of the series near such points is relatively + insensitive to errors in the roots. Isolated roots near the origin can + be improved by a few iterations of Newton's method. + + The Chebyshev series basis polynomials aren't powers of `x` so the + results of this function may seem unintuitive. + + Examples + -------- + >>> import numpy.polynomial.chebyshev as cheb + >>> cheb.chebroots((-1, 1,-1, 1)) # T3 - T2 + T1 - T0 has real roots + array([ -5.00000000e-01, 2.60860684e-17, 1.00000000e+00]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([-c[0]/c[1]]) + + m = chebcompanion(c) + r = la.eigvals(m) + r.sort() + return r + + +def chebinterpolate(func, deg, args=()): + """Interpolate a function at the Chebyshev points of the first kind. + + Returns the Chebyshev series that interpolates `func` at the Chebyshev + points of the first kind in the interval [-1, 1]. The interpolating + series tends to a minmax approximation to `func` with increasing `deg` + if the function is continuous in the interval. + + .. versionadded:: 1.14.0 + + Parameters + ---------- + func : function + The function to be approximated. It must be a function of a single + variable of the form ``f(x, a, b, c...)``, where ``a, b, c...`` are + extra arguments passed in the `args` parameter. + deg : int + Degree of the interpolating polynomial + args : tuple, optional + Extra arguments to be used in the function call. Default is no extra + arguments. + + Returns + ------- + coef : ndarray, shape (deg + 1,) + Chebyshev coefficients of the interpolating series ordered from low to + high. + + Examples + -------- + >>> import numpy.polynomial.chebyshev as C + >>> C.chebfromfunction(lambda x: np.tanh(x) + 0.5, 8) + array([ 5.00000000e-01, 8.11675684e-01, -9.86864911e-17, + -5.42457905e-02, -2.71387850e-16, 4.51658839e-03, + 2.46716228e-17, -3.79694221e-04, -3.26899002e-16]) + + Notes + ----- + + The Chebyshev polynomials used in the interpolation are orthogonal when + sampled at the Chebyshev points of the first kind. If it is desired to + constrain some of the coefficients they can simply be set to the desired + value after the interpolation, no new interpolation or fit is needed. This + is especially useful if it is known apriori that some of coefficients are + zero. For instance, if the function is even then the coefficients of the + terms of odd degree in the result can be set to zero. + + """ + deg = np.asarray(deg) + + # check arguments. + if deg.ndim > 0 or deg.dtype.kind not in 'iu' or deg.size == 0: + raise TypeError("deg must be an int") + if deg < 0: + raise ValueError("expected deg >= 0") + + order = deg + 1 + xcheb = chebpts1(order) + yfunc = func(xcheb, *args) + m = chebvander(xcheb, deg) + c = np.dot(m.T, yfunc) + c[0] /= order + c[1:] /= 0.5*order + + return c + + +def chebgauss(deg): + """ + Gauss-Chebyshev quadrature. + + Computes the sample points and weights for Gauss-Chebyshev quadrature. + These sample points and weights will correctly integrate polynomials of + degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with + the weight function :math:`f(x) = 1/\\sqrt{1 - x^2}`. + + Parameters + ---------- + deg : int + Number of sample points and weights. It must be >= 1. + + Returns + ------- + x : ndarray + 1-D ndarray containing the sample points. + y : ndarray + 1-D ndarray containing the weights. + + Notes + ----- + + .. versionadded:: 1.7.0 + + The results have only been tested up to degree 100, higher degrees may + be problematic. For Gauss-Chebyshev there are closed form solutions for + the sample points and weights. If n = `deg`, then + + .. math:: x_i = \\cos(\\pi (2 i - 1) / (2 n)) + + .. math:: w_i = \\pi / n + + """ + ideg = int(deg) + if ideg != deg or ideg < 1: + raise ValueError("deg must be a non-negative integer") + + x = np.cos(np.pi * np.arange(1, 2*ideg, 2) / (2.0*ideg)) + w = np.ones(ideg)*(np.pi/ideg) + + return x, w + + +def chebweight(x): + """ + The weight function of the Chebyshev polynomials. + + The weight function is :math:`1/\\sqrt{1 - x^2}` and the interval of + integration is :math:`[-1, 1]`. The Chebyshev polynomials are + orthogonal, but not normalized, with respect to this weight function. + + Parameters + ---------- + x : array_like + Values at which the weight function will be computed. + + Returns + ------- + w : ndarray + The weight function at `x`. + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + w = 1./(np.sqrt(1. + x) * np.sqrt(1. - x)) + return w + + +def chebpts1(npts): + """ + Chebyshev points of the first kind. + + The Chebyshev points of the first kind are the points ``cos(x)``, + where ``x = [pi*(k + .5)/npts for k in range(npts)]``. + + Parameters + ---------- + npts : int + Number of sample points desired. + + Returns + ------- + pts : ndarray + The Chebyshev points of the first kind. + + See Also + -------- + chebpts2 + + Notes + ----- + + .. versionadded:: 1.5.0 + + """ + _npts = int(npts) + if _npts != npts: + raise ValueError("npts must be integer") + if _npts < 1: + raise ValueError("npts must be >= 1") + + x = np.linspace(-np.pi, 0, _npts, endpoint=False) + np.pi/(2*_npts) + return np.cos(x) + + +def chebpts2(npts): + """ + Chebyshev points of the second kind. + + The Chebyshev points of the second kind are the points ``cos(x)``, + where ``x = [pi*k/(npts - 1) for k in range(npts)]``. + + Parameters + ---------- + npts : int + Number of sample points desired. + + Returns + ------- + pts : ndarray + The Chebyshev points of the second kind. + + Notes + ----- + + .. versionadded:: 1.5.0 + + """ + _npts = int(npts) + if _npts != npts: + raise ValueError("npts must be integer") + if _npts < 2: + raise ValueError("npts must be >= 2") + + x = np.linspace(-np.pi, 0, _npts) + return np.cos(x) + + +# +# Chebyshev series class +# + +class Chebyshev(ABCPolyBase): + """A Chebyshev series class. + + The Chebyshev class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + methods listed below. + + Parameters + ---------- + coef : array_like + Chebyshev coefficients in order of increasing degree, i.e., + ``(1, 2, 3)`` gives ``1*T_0(x) + 2*T_1(x) + 3*T_2(x)``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [-1, 1]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [-1, 1]. + + .. versionadded:: 1.6.0 + + """ + # Virtual Functions + _add = staticmethod(chebadd) + _sub = staticmethod(chebsub) + _mul = staticmethod(chebmul) + _div = staticmethod(chebdiv) + _pow = staticmethod(chebpow) + _val = staticmethod(chebval) + _int = staticmethod(chebint) + _der = staticmethod(chebder) + _fit = staticmethod(chebfit) + _line = staticmethod(chebline) + _roots = staticmethod(chebroots) + _fromroots = staticmethod(chebfromroots) + + @classmethod + def interpolate(cls, func, deg, domain=None, args=()): + """Interpolate a function at the Chebyshev points of the first kind. + + Returns the series that interpolates `func` at the Chebyshev points of + the first kind scaled and shifted to the `domain`. The resulting series + tends to a minmax approximation of `func` when the function is + continuous in the domain. + + .. versionadded:: 1.14.0 + + Parameters + ---------- + func : function + The function to be interpolated. It must be a function of a single + variable of the form ``f(x, a, b, c...)``, where ``a, b, c...`` are + extra arguments passed in the `args` parameter. + deg : int + Degree of the interpolating polynomial. + domain : {None, [beg, end]}, optional + Domain over which `func` is interpolated. The default is None, in + which case the domain is [-1, 1]. + args : tuple, optional + Extra arguments to be used in the function call. Default is no + extra arguments. + + Returns + ------- + polynomial : Chebyshev instance + Interpolating Chebyshev instance. + + Notes + ----- + See `numpy.polynomial.chebfromfunction` for more details. + + """ + if domain is None: + domain = cls.domain + xfunc = lambda x: func(pu.mapdomain(x, cls.window, domain), *args) + coef = chebinterpolate(xfunc, deg) + return cls(coef, domain=domain) + + # Virtual properties + nickname = 'cheb' + domain = np.array(chebdomain) + window = np.array(chebdomain) + basis_name = 'T' diff --git a/project/venv/lib/python2.7/site-packages/numpy/polynomial/chebyshev.pyc b/project/venv/lib/python2.7/site-packages/numpy/polynomial/chebyshev.pyc new file mode 100644 index 0000000..abc48a7 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/polynomial/chebyshev.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/polynomial/hermite.py b/project/venv/lib/python2.7/site-packages/numpy/polynomial/hermite.py new file mode 100644 index 0000000..4905f36 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/polynomial/hermite.py @@ -0,0 +1,1859 @@ +""" +Objects for dealing with Hermite series. + +This module provides a number of objects (mostly functions) useful for +dealing with Hermite series, including a `Hermite` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with such polynomials is in the +docstring for its "parent" sub-package, `numpy.polynomial`). + +Constants +--------- +- `hermdomain` -- Hermite series default domain, [-1,1]. +- `hermzero` -- Hermite series that evaluates identically to 0. +- `hermone` -- Hermite series that evaluates identically to 1. +- `hermx` -- Hermite series for the identity map, ``f(x) = x``. + +Arithmetic +---------- +- `hermadd` -- add two Hermite series. +- `hermsub` -- subtract one Hermite series from another. +- `hermmulx` -- multiply a Hermite series in ``P_i(x)`` by ``x``. +- `hermmul` -- multiply two Hermite series. +- `hermdiv` -- divide one Hermite series by another. +- `hermpow` -- raise a Hermite series to a positive integer power. +- `hermval` -- evaluate a Hermite series at given points. +- `hermval2d` -- evaluate a 2D Hermite series at given points. +- `hermval3d` -- evaluate a 3D Hermite series at given points. +- `hermgrid2d` -- evaluate a 2D Hermite series on a Cartesian product. +- `hermgrid3d` -- evaluate a 3D Hermite series on a Cartesian product. + +Calculus +-------- +- `hermder` -- differentiate a Hermite series. +- `hermint` -- integrate a Hermite series. + +Misc Functions +-------------- +- `hermfromroots` -- create a Hermite series with specified roots. +- `hermroots` -- find the roots of a Hermite series. +- `hermvander` -- Vandermonde-like matrix for Hermite polynomials. +- `hermvander2d` -- Vandermonde-like matrix for 2D power series. +- `hermvander3d` -- Vandermonde-like matrix for 3D power series. +- `hermgauss` -- Gauss-Hermite quadrature, points and weights. +- `hermweight` -- Hermite weight function. +- `hermcompanion` -- symmetrized companion matrix in Hermite form. +- `hermfit` -- least-squares fit returning a Hermite series. +- `hermtrim` -- trim leading coefficients from a Hermite series. +- `hermline` -- Hermite series of given straight line. +- `herm2poly` -- convert a Hermite series to a polynomial. +- `poly2herm` -- convert a polynomial to a Hermite series. + +Classes +------- +- `Hermite` -- A Hermite series class. + +See also +-------- +`numpy.polynomial` + +""" +from __future__ import division, absolute_import, print_function + +import warnings +import numpy as np +import numpy.linalg as la +from numpy.core.multiarray import normalize_axis_index + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +__all__ = [ + 'hermzero', 'hermone', 'hermx', 'hermdomain', 'hermline', 'hermadd', + 'hermsub', 'hermmulx', 'hermmul', 'hermdiv', 'hermpow', 'hermval', + 'hermder', 'hermint', 'herm2poly', 'poly2herm', 'hermfromroots', + 'hermvander', 'hermfit', 'hermtrim', 'hermroots', 'Hermite', + 'hermval2d', 'hermval3d', 'hermgrid2d', 'hermgrid3d', 'hermvander2d', + 'hermvander3d', 'hermcompanion', 'hermgauss', 'hermweight'] + +hermtrim = pu.trimcoef + + +def poly2herm(pol): + """ + poly2herm(pol) + + Convert a polynomial to a Hermite series. + + Convert an array representing the coefficients of a polynomial (relative + to the "standard" basis) ordered from lowest degree to highest, to an + array of the coefficients of the equivalent Hermite series, ordered + from lowest to highest degree. + + Parameters + ---------- + pol : array_like + 1-D array containing the polynomial coefficients + + Returns + ------- + c : ndarray + 1-D array containing the coefficients of the equivalent Hermite + series. + + See Also + -------- + herm2poly + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy.polynomial.hermite import poly2herm + >>> poly2herm(np.arange(4)) + array([ 1. , 2.75 , 0.5 , 0.375]) + + """ + [pol] = pu.as_series([pol]) + deg = len(pol) - 1 + res = 0 + for i in range(deg, -1, -1): + res = hermadd(hermmulx(res), pol[i]) + return res + + +def herm2poly(c): + """ + Convert a Hermite series to a polynomial. + + Convert an array representing the coefficients of a Hermite series, + ordered from lowest degree to highest, to an array of the coefficients + of the equivalent polynomial (relative to the "standard" basis) ordered + from lowest to highest degree. + + Parameters + ---------- + c : array_like + 1-D array containing the Hermite series coefficients, ordered + from lowest order term to highest. + + Returns + ------- + pol : ndarray + 1-D array containing the coefficients of the equivalent polynomial + (relative to the "standard" basis) ordered from lowest order term + to highest. + + See Also + -------- + poly2herm + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy.polynomial.hermite import herm2poly + >>> herm2poly([ 1. , 2.75 , 0.5 , 0.375]) + array([ 0., 1., 2., 3.]) + + """ + from .polynomial import polyadd, polysub, polymulx + + [c] = pu.as_series([c]) + n = len(c) + if n == 1: + return c + if n == 2: + c[1] *= 2 + return c + else: + c0 = c[-2] + c1 = c[-1] + # i is the current degree of c1 + for i in range(n - 1, 1, -1): + tmp = c0 + c0 = polysub(c[i - 2], c1*(2*(i - 1))) + c1 = polyadd(tmp, polymulx(c1)*2) + return polyadd(c0, polymulx(c1)*2) + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Hermite +hermdomain = np.array([-1, 1]) + +# Hermite coefficients representing zero. +hermzero = np.array([0]) + +# Hermite coefficients representing one. +hermone = np.array([1]) + +# Hermite coefficients representing the identity x. +hermx = np.array([0, 1/2]) + + +def hermline(off, scl): + """ + Hermite series whose graph is a straight line. + + + + Parameters + ---------- + off, scl : scalars + The specified line is given by ``off + scl*x``. + + Returns + ------- + y : ndarray + This module's representation of the Hermite series for + ``off + scl*x``. + + See Also + -------- + polyline, chebline + + Examples + -------- + >>> from numpy.polynomial.hermite import hermline, hermval + >>> hermval(0,hermline(3, 2)) + 3.0 + >>> hermval(1,hermline(3, 2)) + 5.0 + + """ + if scl != 0: + return np.array([off, scl/2]) + else: + return np.array([off]) + + +def hermfromroots(roots): + """ + Generate a Hermite series with given roots. + + The function returns the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + in Hermite form, where the `r_n` are the roots specified in `roots`. + If a zero has multiplicity n, then it must appear in `roots` n times. + For instance, if 2 is a root of multiplicity three and 3 is a root of + multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The + roots can appear in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * H_1(x) + ... + c_n * H_n(x) + + The coefficient of the last term is not generally 1 for monic + polynomials in Hermite form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of coefficients. If all roots are real then `out` is a + real array, if some of the roots are complex, then `out` is complex + even if all the coefficients in the result are real (see Examples + below). + + See Also + -------- + polyfromroots, legfromroots, lagfromroots, chebfromroots, + hermefromroots. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermfromroots, hermval + >>> coef = hermfromroots((-1, 0, 1)) + >>> hermval((-1, 0, 1), coef) + array([ 0., 0., 0.]) + >>> coef = hermfromroots((-1j, 1j)) + >>> hermval((-1j, 1j), coef) + array([ 0.+0.j, 0.+0.j]) + + """ + if len(roots) == 0: + return np.ones(1) + else: + [roots] = pu.as_series([roots], trim=False) + roots.sort() + p = [hermline(-r, 1) for r in roots] + n = len(p) + while n > 1: + m, r = divmod(n, 2) + tmp = [hermmul(p[i], p[i+m]) for i in range(m)] + if r: + tmp[0] = hermmul(tmp[0], p[-1]) + p = tmp + n = m + return p[0] + + +def hermadd(c1, c2): + """ + Add one Hermite series to another. + + Returns the sum of two Hermite series `c1` + `c2`. The arguments + are sequences of coefficients ordered from lowest order term to + highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the Hermite series of their sum. + + See Also + -------- + hermsub, hermmulx, hermmul, hermdiv, hermpow + + Notes + ----- + Unlike multiplication, division, etc., the sum of two Hermite series + is a Hermite series (without having to "reproject" the result onto + the basis set) so addition, just like that of "standard" polynomials, + is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.hermite import hermadd + >>> hermadd([1, 2, 3], [1, 2, 3, 4]) + array([ 2., 4., 6., 4.]) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if len(c1) > len(c2): + c1[:c2.size] += c2 + ret = c1 + else: + c2[:c1.size] += c1 + ret = c2 + return pu.trimseq(ret) + + +def hermsub(c1, c2): + """ + Subtract one Hermite series from another. + + Returns the difference of two Hermite series `c1` - `c2`. The + sequences of coefficients are from lowest order term to highest, i.e., + [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Hermite series coefficients representing their difference. + + See Also + -------- + hermadd, hermmulx, hermmul, hermdiv, hermpow + + Notes + ----- + Unlike multiplication, division, etc., the difference of two Hermite + series is a Hermite series (without having to "reproject" the result + onto the basis set) so subtraction, just like that of "standard" + polynomials, is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.hermite import hermsub + >>> hermsub([1, 2, 3, 4], [1, 2, 3]) + array([ 0., 0., 0., 4.]) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if len(c1) > len(c2): + c1[:c2.size] -= c2 + ret = c1 + else: + c2 = -c2 + c2[:c1.size] += c1 + ret = c2 + return pu.trimseq(ret) + + +def hermmulx(c): + """Multiply a Hermite series by x. + + Multiply the Hermite series `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + See Also + -------- + hermadd, hermsub, hermmul, hermdiv, hermpow + + Notes + ----- + The multiplication uses the recursion relationship for Hermite + polynomials in the form + + .. math:: + + xP_i(x) = (P_{i + 1}(x)/2 + i*P_{i - 1}(x)) + + Examples + -------- + >>> from numpy.polynomial.hermite import hermmulx + >>> hermmulx([1, 2, 3]) + array([ 2. , 6.5, 1. , 1.5]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0]*0 + prd[1] = c[0]/2 + for i in range(1, len(c)): + prd[i + 1] = c[i]/2 + prd[i - 1] += c[i]*i + return prd + + +def hermmul(c1, c2): + """ + Multiply one Hermite series by another. + + Returns the product of two Hermite series `c1` * `c2`. The arguments + are sequences of coefficients, from lowest order "term" to highest, + e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Hermite series coefficients representing their product. + + See Also + -------- + hermadd, hermsub, hermmulx, hermdiv, hermpow + + Notes + ----- + In general, the (polynomial) product of two C-series results in terms + that are not in the Hermite polynomial basis set. Thus, to express + the product as a Hermite series, it is necessary to "reproject" the + product onto said basis set, which may produce "unintuitive" (but + correct) results; see Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermmul + >>> hermmul([1, 2, 3], [0, 1, 2]) + array([ 52., 29., 52., 7., 6.]) + + """ + # s1, s2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + + if len(c1) > len(c2): + c = c2 + xs = c1 + else: + c = c1 + xs = c2 + + if len(c) == 1: + c0 = c[0]*xs + c1 = 0 + elif len(c) == 2: + c0 = c[0]*xs + c1 = c[1]*xs + else: + nd = len(c) + c0 = c[-2]*xs + c1 = c[-1]*xs + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = hermsub(c[-i]*xs, c1*(2*(nd - 1))) + c1 = hermadd(tmp, hermmulx(c1)*2) + return hermadd(c0, hermmulx(c1)*2) + + +def hermdiv(c1, c2): + """ + Divide one Hermite series by another. + + Returns the quotient-with-remainder of two Hermite series + `c1` / `c2`. The arguments are sequences of coefficients from lowest + order "term" to highest, e.g., [1,2,3] represents the series + ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + [quo, rem] : ndarrays + Of Hermite series coefficients representing the quotient and + remainder. + + See Also + -------- + hermadd, hermsub, hermmulx, hermmul, hermpow + + Notes + ----- + In general, the (polynomial) division of one Hermite series by another + results in quotient and remainder terms that are not in the Hermite + polynomial basis set. Thus, to express these results as a Hermite + series, it is necessary to "reproject" the results onto the Hermite + basis set, which may produce "unintuitive" (but correct) results; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermdiv + >>> hermdiv([ 52., 29., 52., 7., 6.], [0, 1, 2]) + (array([ 1., 2., 3.]), array([ 0.])) + >>> hermdiv([ 54., 31., 52., 7., 6.], [0, 1, 2]) + (array([ 1., 2., 3.]), array([ 2., 2.])) + >>> hermdiv([ 53., 30., 52., 7., 6.], [0, 1, 2]) + (array([ 1., 2., 3.]), array([ 1., 1.])) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if c2[-1] == 0: + raise ZeroDivisionError() + + lc1 = len(c1) + lc2 = len(c2) + if lc1 < lc2: + return c1[:1]*0, c1 + elif lc2 == 1: + return c1/c2[-1], c1[:1]*0 + else: + quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype) + rem = c1 + for i in range(lc1 - lc2, - 1, -1): + p = hermmul([0]*i + [1], c2) + q = rem[-1]/p[-1] + rem = rem[:-1] - q*p[:-1] + quo[i] = q + return quo, pu.trimseq(rem) + + +def hermpow(c, pow, maxpower=16): + """Raise a Hermite series to a power. + + Returns the Hermite series `c` raised to the power `pow`. The + argument `c` is a sequence of coefficients ordered from low to high. + i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` + + Parameters + ---------- + c : array_like + 1-D array of Hermite series coefficients ordered from low to + high. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Hermite series of power. + + See Also + -------- + hermadd, hermsub, hermmulx, hermmul, hermdiv + + Examples + -------- + >>> from numpy.polynomial.hermite import hermpow + >>> hermpow([1, 2, 3], 2) + array([ 81., 52., 82., 12., 9.]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + power = int(pow) + if power != pow or power < 0: + raise ValueError("Power must be a non-negative integer.") + elif maxpower is not None and power > maxpower: + raise ValueError("Power is too large") + elif power == 0: + return np.array([1], dtype=c.dtype) + elif power == 1: + return c + else: + # This can be made more efficient by using powers of two + # in the usual way. + prd = c + for i in range(2, power + 1): + prd = hermmul(prd, c) + return prd + + +def hermder(c, m=1, scl=1, axis=0): + """ + Differentiate a Hermite series. + + Returns the Hermite series coefficients `c` differentiated `m` times + along `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The argument + `c` is an array of coefficients from low to high degree along each + axis, e.g., [1,2,3] represents the series ``1*H_0 + 2*H_1 + 3*H_2`` + while [[1,2],[1,2]] represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + + 2*H_0(x)*H_1(y) + 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is + ``y``. + + Parameters + ---------- + c : array_like + Array of Hermite series coefficients. If `c` is multidimensional the + different axis correspond to different variables with the degree in + each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change of + variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + der : ndarray + Hermite series of the derivative. + + See Also + -------- + hermint + + Notes + ----- + In general, the result of differentiating a Hermite series does not + resemble the same operation on a power series. Thus the result of this + function may be "unintuitive," albeit correct; see Examples section + below. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermder + >>> hermder([ 1. , 0.5, 0.5, 0.5]) + array([ 1., 2., 3.]) + >>> hermder([-0.5, 1./2., 1./8., 1./12., 1./16.], m=2) + array([ 1., 2., 3.]) + + """ + c = np.array(c, ndmin=1, copy=1) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + cnt, iaxis = [int(t) for t in [m, axis]] + + if cnt != m: + raise ValueError("The order of derivation must be integer") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + if iaxis != axis: + raise ValueError("The axis must be integer") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + n = len(c) + if cnt >= n: + c = c[:1]*0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=c.dtype) + for j in range(n, 0, -1): + der[j - 1] = (2*j)*c[j] + c = der + c = np.moveaxis(c, 0, iaxis) + return c + + +def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a Hermite series. + + Returns the Hermite series coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients from low to high degree along each axis, e.g., [1,2,3] + represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]] + represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) + + 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. + + Parameters + ---------- + c : array_like + Array of Hermite series coefficients. If c is multidimensional the + different axis correspond to different variables with the degree in + each axis given by the corresponding index. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at + ``lbnd`` is the first value in the list, the value of the second + integral at ``lbnd`` is the second value, etc. If ``k == []`` (the + default), all constants are set to zero. If ``m == 1``, a single + scalar can be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + S : ndarray + Hermite series coefficients of the integral. + + Raises + ------ + ValueError + If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. + + See Also + -------- + hermder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. + Why is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + :math:`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a` - perhaps not what one would have first thought. + + Also note that, in general, the result of integrating a C-series needs + to be "reprojected" onto the C-series basis set. Thus, typically, + the result of this function is "unintuitive," albeit correct; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermint + >>> hermint([1,2,3]) # integrate once, value 0 at 0. + array([ 1. , 0.5, 0.5, 0.5]) + >>> hermint([1,2,3], m=2) # integrate twice, value & deriv 0 at 0 + array([-0.5 , 0.5 , 0.125 , 0.08333333, 0.0625 ]) + >>> hermint([1,2,3], k=1) # integrate once, value 1 at 0. + array([ 2. , 0.5, 0.5, 0.5]) + >>> hermint([1,2,3], lbnd=-1) # integrate once, value 0 at -1 + array([-2. , 0.5, 0.5, 0.5]) + >>> hermint([1,2,3], m=2, k=[1,2], lbnd=-1) + array([ 1.66666667, -0.5 , 0.125 , 0.08333333, 0.0625 ]) + + """ + c = np.array(c, ndmin=1, copy=1) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if not np.iterable(k): + k = [k] + cnt, iaxis = [int(t) for t in [m, axis]] + + if cnt != m: + raise ValueError("The order of integration must be integer") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") + if iaxis != axis: + raise ValueError("The axis must be integer") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + k = list(k) + [0]*(cnt - len(k)) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) + tmp[0] = c[0]*0 + tmp[1] = c[0]/2 + for j in range(1, n): + tmp[j + 1] = c[j]/(2*(j + 1)) + tmp[0] += k[i] - hermval(lbnd, tmp) + c = tmp + c = np.moveaxis(c, 0, iaxis) + return c + + +def hermval(x, c, tensor=True): + """ + Evaluate an Hermite series at points x. + + If `c` is of length `n + 1`, this function returns the value: + + .. math:: p(x) = c_0 * H_0(x) + c_1 * H_1(x) + ... + c_n * H_n(x) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + with themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + .. versionadded:: 1.7.0 + + Returns + ------- + values : ndarray, algebra_like + The shape of the return value is described above. + + See Also + -------- + hermval2d, hermgrid2d, hermval3d, hermgrid3d + + Notes + ----- + The evaluation uses Clenshaw recursion, aka synthetic division. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermval + >>> coef = [1,2,3] + >>> hermval(1, coef) + 11.0 + >>> hermval([[1,2],[3,4]], coef) + array([[ 11., 51.], + [ 115., 203.]]) + + """ + c = np.array(c, ndmin=1, copy=0) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,)*x.ndim) + + x2 = x*2 + if len(c) == 1: + c0 = c[0] + c1 = 0 + elif len(c) == 2: + c0 = c[0] + c1 = c[1] + else: + nd = len(c) + c0 = c[-2] + c1 = c[-1] + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = c[-i] - c1*(2*(nd - 1)) + c1 = tmp + c1*x2 + return c0 + c1*x2 + + +def hermval2d(x, y, c): + """ + Evaluate a 2-D Hermite series at points (x, y). + + This function returns the values: + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * H_i(x) * H_j(y) + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` is a 1-D array a one is implicitly appended to its shape to make + it 2-D. The shape of the result will be c.shape[2:] + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points `(x, y)`, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than two the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points formed with + pairs of corresponding values from `x` and `y`. + + See Also + -------- + hermval, hermgrid2d, hermval3d, hermgrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + try: + x, y = np.array((x, y), copy=0) + except Exception: + raise ValueError('x, y are incompatible') + + c = hermval(x, c) + c = hermval(y, c, tensor=False) + return c + + +def hermgrid2d(x, y, c): + """ + Evaluate a 2-D Hermite series on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * H_i(a) * H_j(b) + + where the points `(a, b)` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + hermval, hermval2d, hermval3d, hermgrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + c = hermval(x, c) + c = hermval(y, c) + return c + + +def hermval3d(x, y, z, c): + """ + Evaluate a 3-D Hermite series at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * H_i(x) * H_j(y) * H_k(z) + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + hermval, hermval2d, hermgrid2d, hermgrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + try: + x, y, z = np.array((x, y, z), copy=0) + except Exception: + raise ValueError('x, y, z are incompatible') + + c = hermval(x, c) + c = hermval(y, c, tensor=False) + c = hermval(z, c, tensor=False) + return c + + +def hermgrid3d(x, y, z, c): + """ + Evaluate a 3-D Hermite series on the Cartesian product of x, y, and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * H_i(a) * H_j(b) * H_k(c) + + where the points `(a, b, c)` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + hermval, hermval2d, hermgrid2d, hermval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + c = hermval(x, c) + c = hermval(y, c) + c = hermval(z, c) + return c + + +def hermvander(x, deg): + """Pseudo-Vandermonde matrix of given degree. + + Returns the pseudo-Vandermonde matrix of degree `deg` and sample points + `x`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., i] = H_i(x), + + where `0 <= i <= deg`. The leading indices of `V` index the elements of + `x` and the last index is the degree of the Hermite polynomial. + + If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the + array ``V = hermvander(x, n)``, then ``np.dot(V, c)`` and + ``hermval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of Hermite series of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray + The pseudo-Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where The last index is the degree of the + corresponding Hermite polynomial. The dtype will be the same as + the converted `x`. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermvander + >>> x = np.array([-1, 0, 1]) + >>> hermvander(x, 3) + array([[ 1., -2., 2., 4.], + [ 1., 0., -2., -0.], + [ 1., 2., 2., -4.]]) + + """ + ideg = int(deg) + if ideg != deg: + raise ValueError("deg must be integer") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=0, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + v[0] = x*0 + 1 + if ideg > 0: + x2 = x*2 + v[1] = x2 + for i in range(2, ideg + 1): + v[i] = (v[i-1]*x2 - v[i-2]*(2*(i - 1))) + return np.moveaxis(v, 0, -1) + + +def hermvander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y)`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (deg[1] + 1)*i + j] = H_i(x) * H_j(y), + + where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of + `V` index the points `(x, y)` and the last index encodes the degrees of + the Hermite polynomials. + + If ``V = hermvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``hermval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D Hermite + series of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + hermvander, hermvander3d. hermval2d, hermval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + ideg = [int(d) for d in deg] + is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] + if is_valid != [1, 1]: + raise ValueError("degrees must be non-negative integers") + degx, degy = ideg + x, y = np.array((x, y), copy=0) + 0.0 + + vx = hermvander(x, degx) + vy = hermvander(y, degy) + v = vx[..., None]*vy[..., None,:] + return v.reshape(v.shape[:-2] + (-1,)) + + +def hermvander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`, + then The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = H_i(x)*H_j(y)*H_k(z), + + where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading + indices of `V` index the points `(x, y, z)` and the last index encodes + the degrees of the Hermite polynomials. + + If ``V = hermvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``hermval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D Hermite + series of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + hermvander, hermvander3d. hermval2d, hermval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + ideg = [int(d) for d in deg] + is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] + if is_valid != [1, 1, 1]: + raise ValueError("degrees must be non-negative integers") + degx, degy, degz = ideg + x, y, z = np.array((x, y, z), copy=0) + 0.0 + + vx = hermvander(x, degx) + vy = hermvander(y, degy) + vz = hermvander(z, degz) + v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:] + return v.reshape(v.shape[:-3] + (-1,)) + + +def hermfit(x, y, deg, rcond=None, full=False, w=None): + """ + Least squares fit of Hermite series to data. + + Return the coefficients of a Hermite series of degree `deg` that is the + least squares fit to the data values `y` given at points `x`. If `y` is + 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple + fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * H_1(x) + ... + c_n * H_n(x), + + where `n` is `deg`. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the contribution of each point + ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the + weights are chosen so that the errors of the products ``w[i]*y[i]`` + all have the same variance. The default value is None. + + Returns + ------- + coef : ndarray, shape (M,) or (M, K) + Hermite coefficients ordered from low to high. If `y` was 2-D, + the coefficients for the data in column k of `y` are in column + `k`. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if `full` = True + + resid -- sum of squared residuals of the least squares fit + rank -- the numerical rank of the scaled Vandermonde matrix + sv -- singular values of the scaled Vandermonde matrix + rcond -- value of `rcond`. + + For more details, see `linalg.lstsq`. + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if `full` = False. The + warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', RankWarning) + + See Also + -------- + chebfit, legfit, lagfit, polyfit, hermefit + hermval : Evaluates a Hermite series. + hermvander : Vandermonde matrix of Hermite series. + hermweight : Hermite weight function + linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the Hermite series `p` that + minimizes the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where the :math:`w_j` are the weights. This problem is solved by + setting up the (typically) overdetermined matrix equation + + .. math:: V(x) * c = w * y, + + where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the + coefficients to be solved for, `w` are the weights, `y` are the + observed values. This equation is then solved using the singular value + decomposition of `V`. + + If some of the singular values of `V` are so small that they are + neglected, then a `RankWarning` will be issued. This means that the + coefficient values may be poorly determined. Using a lower order fit + will usually get rid of the warning. The `rcond` parameter can also be + set to a value smaller than its default, but the resulting fit may be + spurious and have large contributions from roundoff error. + + Fits using Hermite series are probably most useful when the data can be + approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the Hermite + weight. In that case the weight ``sqrt(w(x[i])`` should be used + together with data values ``y[i]/sqrt(w(x[i])``. The weight function is + available as `hermweight`. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + https://en.wikipedia.org/wiki/Curve_fitting + + Examples + -------- + >>> from numpy.polynomial.hermite import hermfit, hermval + >>> x = np.linspace(-10, 10) + >>> err = np.random.randn(len(x))/10 + >>> y = hermval(x, [1, 2, 3]) + err + >>> hermfit(x, y, 2) + array([ 0.97902637, 1.99849131, 3.00006 ]) + + """ + x = np.asarray(x) + 0.0 + y = np.asarray(y) + 0.0 + deg = np.asarray(deg) + + # check arguments. + if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0: + raise TypeError("deg must be an int or non-empty 1-D array of int") + if deg.min() < 0: + raise ValueError("expected deg >= 0") + if x.ndim != 1: + raise TypeError("expected 1D vector for x") + if x.size == 0: + raise TypeError("expected non-empty vector for x") + if y.ndim < 1 or y.ndim > 2: + raise TypeError("expected 1D or 2D array for y") + if len(x) != len(y): + raise TypeError("expected x and y to have same length") + + if deg.ndim == 0: + lmax = deg + order = lmax + 1 + van = hermvander(x, lmax) + else: + deg = np.sort(deg) + lmax = deg[-1] + order = len(deg) + van = hermvander(x, lmax)[:, deg] + + # set up the least squares matrices in transposed form + lhs = van.T + rhs = y.T + if w is not None: + w = np.asarray(w) + 0.0 + if w.ndim != 1: + raise TypeError("expected 1D vector for w") + if len(x) != len(w): + raise TypeError("expected x and w to have same length") + # apply weights. Don't use inplace operations as they + # can cause problems with NA. + lhs = lhs * w + rhs = rhs * w + + # set rcond + if rcond is None: + rcond = len(x)*np.finfo(x.dtype).eps + + # Determine the norms of the design matrix columns. + if issubclass(lhs.dtype.type, np.complexfloating): + scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1)) + else: + scl = np.sqrt(np.square(lhs).sum(1)) + scl[scl == 0] = 1 + + # Solve the least squares problem. + c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond) + c = (c.T/scl).T + + # Expand c to include non-fitted coefficients which are set to zero + if deg.ndim > 0: + if c.ndim == 2: + cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype) + else: + cc = np.zeros(lmax+1, dtype=c.dtype) + cc[deg] = c + c = cc + + # warn on rank reduction + if rank != order and not full: + msg = "The fit may be poorly conditioned" + warnings.warn(msg, pu.RankWarning, stacklevel=2) + + if full: + return c, [resids, rank, s, rcond] + else: + return c + + +def hermcompanion(c): + """Return the scaled companion matrix of c. + + The basis polynomials are scaled so that the companion matrix is + symmetric when `c` is an Hermite basis polynomial. This provides + better eigenvalue estimates than the unscaled case and for basis + polynomials the eigenvalues are guaranteed to be real if + `numpy.linalg.eigvalsh` is used to obtain them. + + Parameters + ---------- + c : array_like + 1-D array of Hermite series coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Scaled companion matrix of dimensions (deg, deg). + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[-.5*c[0]/c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + scl = np.hstack((1., 1./np.sqrt(2.*np.arange(n - 1, 0, -1)))) + scl = np.multiply.accumulate(scl)[::-1] + top = mat.reshape(-1)[1::n+1] + bot = mat.reshape(-1)[n::n+1] + top[...] = np.sqrt(.5*np.arange(1, n)) + bot[...] = top + mat[:, -1] -= scl*c[:-1]/(2.0*c[-1]) + return mat + + +def hermroots(c): + """ + Compute the roots of a Hermite series. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * H_i(x). + + Parameters + ---------- + c : 1-D array_like + 1-D array of coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the series. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + polyroots, legroots, lagroots, chebroots, hermeroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the series for such + values. Roots with multiplicity greater than 1 will also show larger + errors as the value of the series near such points is relatively + insensitive to errors in the roots. Isolated roots near the origin can + be improved by a few iterations of Newton's method. + + The Hermite series basis polynomials aren't powers of `x` so the + results of this function may seem unintuitive. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermroots, hermfromroots + >>> coef = hermfromroots([-1, 0, 1]) + >>> coef + array([ 0. , 0.25 , 0. , 0.125]) + >>> hermroots(coef) + array([ -1.00000000e+00, -1.38777878e-17, 1.00000000e+00]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) <= 1: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([-.5*c[0]/c[1]]) + + m = hermcompanion(c) + r = la.eigvals(m) + r.sort() + return r + + +def _normed_hermite_n(x, n): + """ + Evaluate a normalized Hermite polynomial. + + Compute the value of the normalized Hermite polynomial of degree ``n`` + at the points ``x``. + + + Parameters + ---------- + x : ndarray of double. + Points at which to evaluate the function + n : int + Degree of the normalized Hermite function to be evaluated. + + Returns + ------- + values : ndarray + The shape of the return value is described above. + + Notes + ----- + .. versionadded:: 1.10.0 + + This function is needed for finding the Gauss points and integration + weights for high degrees. The values of the standard Hermite functions + overflow when n >= 207. + + """ + if n == 0: + return np.full(x.shape, 1/np.sqrt(np.sqrt(np.pi))) + + c0 = 0. + c1 = 1./np.sqrt(np.sqrt(np.pi)) + nd = float(n) + for i in range(n - 1): + tmp = c0 + c0 = -c1*np.sqrt((nd - 1.)/nd) + c1 = tmp + c1*x*np.sqrt(2./nd) + nd = nd - 1.0 + return c0 + c1*x*np.sqrt(2) + + +def hermgauss(deg): + """ + Gauss-Hermite quadrature. + + Computes the sample points and weights for Gauss-Hermite quadrature. + These sample points and weights will correctly integrate polynomials of + degree :math:`2*deg - 1` or less over the interval :math:`[-\\inf, \\inf]` + with the weight function :math:`f(x) = \\exp(-x^2)`. + + Parameters + ---------- + deg : int + Number of sample points and weights. It must be >= 1. + + Returns + ------- + x : ndarray + 1-D ndarray containing the sample points. + y : ndarray + 1-D ndarray containing the weights. + + Notes + ----- + + .. versionadded:: 1.7.0 + + The results have only been tested up to degree 100, higher degrees may + be problematic. The weights are determined by using the fact that + + .. math:: w_k = c / (H'_n(x_k) * H_{n-1}(x_k)) + + where :math:`c` is a constant independent of :math:`k` and :math:`x_k` + is the k'th root of :math:`H_n`, and then scaling the results to get + the right value when integrating 1. + + """ + ideg = int(deg) + if ideg != deg or ideg < 1: + raise ValueError("deg must be a non-negative integer") + + # first approximation of roots. We use the fact that the companion + # matrix is symmetric in this case in order to obtain better zeros. + c = np.array([0]*deg + [1], dtype=np.float64) + m = hermcompanion(c) + x = la.eigvalsh(m) + + # improve roots by one application of Newton + dy = _normed_hermite_n(x, ideg) + df = _normed_hermite_n(x, ideg - 1) * np.sqrt(2*ideg) + x -= dy/df + + # compute the weights. We scale the factor to avoid possible numerical + # overflow. + fm = _normed_hermite_n(x, ideg - 1) + fm /= np.abs(fm).max() + w = 1/(fm * fm) + + # for Hermite we can also symmetrize + w = (w + w[::-1])/2 + x = (x - x[::-1])/2 + + # scale w to get the right value + w *= np.sqrt(np.pi) / w.sum() + + return x, w + + +def hermweight(x): + """ + Weight function of the Hermite polynomials. + + The weight function is :math:`\\exp(-x^2)` and the interval of + integration is :math:`[-\\inf, \\inf]`. the Hermite polynomials are + orthogonal, but not normalized, with respect to this weight function. + + Parameters + ---------- + x : array_like + Values at which the weight function will be computed. + + Returns + ------- + w : ndarray + The weight function at `x`. + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + w = np.exp(-x**2) + return w + + +# +# Hermite series class +# + +class Hermite(ABCPolyBase): + """An Hermite series class. + + The Hermite class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed in the `ABCPolyBase` documentation. + + Parameters + ---------- + coef : array_like + Hermite coefficients in order of increasing degree, i.e, + ``(1, 2, 3)`` gives ``1*H_0(x) + 2*H_1(X) + 3*H_2(x)``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [-1, 1]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [-1, 1]. + + .. versionadded:: 1.6.0 + + """ + # Virtual Functions + _add = staticmethod(hermadd) + _sub = staticmethod(hermsub) + _mul = staticmethod(hermmul) + _div = staticmethod(hermdiv) + _pow = staticmethod(hermpow) + _val = staticmethod(hermval) + _int = staticmethod(hermint) + _der = staticmethod(hermder) + _fit = staticmethod(hermfit) + _line = staticmethod(hermline) + _roots = staticmethod(hermroots) + _fromroots = staticmethod(hermfromroots) + + # Virtual properties + nickname = 'herm' + domain = np.array(hermdomain) + window = np.array(hermdomain) + basis_name = 'H' diff --git a/project/venv/lib/python2.7/site-packages/numpy/polynomial/hermite.pyc b/project/venv/lib/python2.7/site-packages/numpy/polynomial/hermite.pyc new file mode 100644 index 0000000..8e6c29d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/polynomial/hermite.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/polynomial/hermite_e.py b/project/venv/lib/python2.7/site-packages/numpy/polynomial/hermite_e.py new file mode 100644 index 0000000..6cb044a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/polynomial/hermite_e.py @@ -0,0 +1,1852 @@ +""" +Objects for dealing with Hermite_e series. + +This module provides a number of objects (mostly functions) useful for +dealing with Hermite_e series, including a `HermiteE` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with such polynomials is in the +docstring for its "parent" sub-package, `numpy.polynomial`). + +Constants +--------- +- `hermedomain` -- Hermite_e series default domain, [-1,1]. +- `hermezero` -- Hermite_e series that evaluates identically to 0. +- `hermeone` -- Hermite_e series that evaluates identically to 1. +- `hermex` -- Hermite_e series for the identity map, ``f(x) = x``. + +Arithmetic +---------- +- `hermeadd` -- add two Hermite_e series. +- `hermesub` -- subtract one Hermite_e series from another. +- `hermemulx` -- multiply a Hermite_e series in ``P_i(x)`` by ``x``. +- `hermemul` -- multiply two Hermite_e series. +- `hermediv` -- divide one Hermite_e series by another. +- `hermepow` -- raise a Hermite_e series to a positive integer power. +- `hermeval` -- evaluate a Hermite_e series at given points. +- `hermeval2d` -- evaluate a 2D Hermite_e series at given points. +- `hermeval3d` -- evaluate a 3D Hermite_e series at given points. +- `hermegrid2d` -- evaluate a 2D Hermite_e series on a Cartesian product. +- `hermegrid3d` -- evaluate a 3D Hermite_e series on a Cartesian product. + +Calculus +-------- +- `hermeder` -- differentiate a Hermite_e series. +- `hermeint` -- integrate a Hermite_e series. + +Misc Functions +-------------- +- `hermefromroots` -- create a Hermite_e series with specified roots. +- `hermeroots` -- find the roots of a Hermite_e series. +- `hermevander` -- Vandermonde-like matrix for Hermite_e polynomials. +- `hermevander2d` -- Vandermonde-like matrix for 2D power series. +- `hermevander3d` -- Vandermonde-like matrix for 3D power series. +- `hermegauss` -- Gauss-Hermite_e quadrature, points and weights. +- `hermeweight` -- Hermite_e weight function. +- `hermecompanion` -- symmetrized companion matrix in Hermite_e form. +- `hermefit` -- least-squares fit returning a Hermite_e series. +- `hermetrim` -- trim leading coefficients from a Hermite_e series. +- `hermeline` -- Hermite_e series of given straight line. +- `herme2poly` -- convert a Hermite_e series to a polynomial. +- `poly2herme` -- convert a polynomial to a Hermite_e series. + +Classes +------- +- `HermiteE` -- A Hermite_e series class. + +See also +-------- +`numpy.polynomial` + +""" +from __future__ import division, absolute_import, print_function + +import warnings +import numpy as np +import numpy.linalg as la +from numpy.core.multiarray import normalize_axis_index + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +__all__ = [ + 'hermezero', 'hermeone', 'hermex', 'hermedomain', 'hermeline', + 'hermeadd', 'hermesub', 'hermemulx', 'hermemul', 'hermediv', + 'hermepow', 'hermeval', 'hermeder', 'hermeint', 'herme2poly', + 'poly2herme', 'hermefromroots', 'hermevander', 'hermefit', 'hermetrim', + 'hermeroots', 'HermiteE', 'hermeval2d', 'hermeval3d', 'hermegrid2d', + 'hermegrid3d', 'hermevander2d', 'hermevander3d', 'hermecompanion', + 'hermegauss', 'hermeweight'] + +hermetrim = pu.trimcoef + + +def poly2herme(pol): + """ + poly2herme(pol) + + Convert a polynomial to a Hermite series. + + Convert an array representing the coefficients of a polynomial (relative + to the "standard" basis) ordered from lowest degree to highest, to an + array of the coefficients of the equivalent Hermite series, ordered + from lowest to highest degree. + + Parameters + ---------- + pol : array_like + 1-D array containing the polynomial coefficients + + Returns + ------- + c : ndarray + 1-D array containing the coefficients of the equivalent Hermite + series. + + See Also + -------- + herme2poly + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import poly2herme + >>> poly2herme(np.arange(4)) + array([ 2., 10., 2., 3.]) + + """ + [pol] = pu.as_series([pol]) + deg = len(pol) - 1 + res = 0 + for i in range(deg, -1, -1): + res = hermeadd(hermemulx(res), pol[i]) + return res + + +def herme2poly(c): + """ + Convert a Hermite series to a polynomial. + + Convert an array representing the coefficients of a Hermite series, + ordered from lowest degree to highest, to an array of the coefficients + of the equivalent polynomial (relative to the "standard" basis) ordered + from lowest to highest degree. + + Parameters + ---------- + c : array_like + 1-D array containing the Hermite series coefficients, ordered + from lowest order term to highest. + + Returns + ------- + pol : ndarray + 1-D array containing the coefficients of the equivalent polynomial + (relative to the "standard" basis) ordered from lowest order term + to highest. + + See Also + -------- + poly2herme + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import herme2poly + >>> herme2poly([ 2., 10., 2., 3.]) + array([ 0., 1., 2., 3.]) + + """ + from .polynomial import polyadd, polysub, polymulx + + [c] = pu.as_series([c]) + n = len(c) + if n == 1: + return c + if n == 2: + return c + else: + c0 = c[-2] + c1 = c[-1] + # i is the current degree of c1 + for i in range(n - 1, 1, -1): + tmp = c0 + c0 = polysub(c[i - 2], c1*(i - 1)) + c1 = polyadd(tmp, polymulx(c1)) + return polyadd(c0, polymulx(c1)) + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Hermite +hermedomain = np.array([-1, 1]) + +# Hermite coefficients representing zero. +hermezero = np.array([0]) + +# Hermite coefficients representing one. +hermeone = np.array([1]) + +# Hermite coefficients representing the identity x. +hermex = np.array([0, 1]) + + +def hermeline(off, scl): + """ + Hermite series whose graph is a straight line. + + + + Parameters + ---------- + off, scl : scalars + The specified line is given by ``off + scl*x``. + + Returns + ------- + y : ndarray + This module's representation of the Hermite series for + ``off + scl*x``. + + See Also + -------- + polyline, chebline + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeline + >>> from numpy.polynomial.hermite_e import hermeline, hermeval + >>> hermeval(0,hermeline(3, 2)) + 3.0 + >>> hermeval(1,hermeline(3, 2)) + 5.0 + + """ + if scl != 0: + return np.array([off, scl]) + else: + return np.array([off]) + + +def hermefromroots(roots): + """ + Generate a HermiteE series with given roots. + + The function returns the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + in HermiteE form, where the `r_n` are the roots specified in `roots`. + If a zero has multiplicity n, then it must appear in `roots` n times. + For instance, if 2 is a root of multiplicity three and 3 is a root of + multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The + roots can appear in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x) + + The coefficient of the last term is not generally 1 for monic + polynomials in HermiteE form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of coefficients. If all roots are real then `out` is a + real array, if some of the roots are complex, then `out` is complex + even if all the coefficients in the result are real (see Examples + below). + + See Also + -------- + polyfromroots, legfromroots, lagfromroots, hermfromroots, + chebfromroots. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermefromroots, hermeval + >>> coef = hermefromroots((-1, 0, 1)) + >>> hermeval((-1, 0, 1), coef) + array([ 0., 0., 0.]) + >>> coef = hermefromroots((-1j, 1j)) + >>> hermeval((-1j, 1j), coef) + array([ 0.+0.j, 0.+0.j]) + + """ + if len(roots) == 0: + return np.ones(1) + else: + [roots] = pu.as_series([roots], trim=False) + roots.sort() + p = [hermeline(-r, 1) for r in roots] + n = len(p) + while n > 1: + m, r = divmod(n, 2) + tmp = [hermemul(p[i], p[i+m]) for i in range(m)] + if r: + tmp[0] = hermemul(tmp[0], p[-1]) + p = tmp + n = m + return p[0] + + +def hermeadd(c1, c2): + """ + Add one Hermite series to another. + + Returns the sum of two Hermite series `c1` + `c2`. The arguments + are sequences of coefficients ordered from lowest order term to + highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the Hermite series of their sum. + + See Also + -------- + hermesub, hermemulx, hermemul, hermediv, hermepow + + Notes + ----- + Unlike multiplication, division, etc., the sum of two Hermite series + is a Hermite series (without having to "reproject" the result onto + the basis set) so addition, just like that of "standard" polynomials, + is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeadd + >>> hermeadd([1, 2, 3], [1, 2, 3, 4]) + array([ 2., 4., 6., 4.]) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if len(c1) > len(c2): + c1[:c2.size] += c2 + ret = c1 + else: + c2[:c1.size] += c1 + ret = c2 + return pu.trimseq(ret) + + +def hermesub(c1, c2): + """ + Subtract one Hermite series from another. + + Returns the difference of two Hermite series `c1` - `c2`. The + sequences of coefficients are from lowest order term to highest, i.e., + [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Hermite series coefficients representing their difference. + + See Also + -------- + hermeadd, hermemulx, hermemul, hermediv, hermepow + + Notes + ----- + Unlike multiplication, division, etc., the difference of two Hermite + series is a Hermite series (without having to "reproject" the result + onto the basis set) so subtraction, just like that of "standard" + polynomials, is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermesub + >>> hermesub([1, 2, 3, 4], [1, 2, 3]) + array([ 0., 0., 0., 4.]) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if len(c1) > len(c2): + c1[:c2.size] -= c2 + ret = c1 + else: + c2 = -c2 + c2[:c1.size] += c1 + ret = c2 + return pu.trimseq(ret) + + +def hermemulx(c): + """Multiply a Hermite series by x. + + Multiply the Hermite series `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + Notes + ----- + The multiplication uses the recursion relationship for Hermite + polynomials in the form + + .. math:: + + xP_i(x) = (P_{i + 1}(x) + iP_{i - 1}(x))) + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermemulx + >>> hermemulx([1, 2, 3]) + array([ 2., 7., 2., 3.]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0]*0 + prd[1] = c[0] + for i in range(1, len(c)): + prd[i + 1] = c[i] + prd[i - 1] += c[i]*i + return prd + + +def hermemul(c1, c2): + """ + Multiply one Hermite series by another. + + Returns the product of two Hermite series `c1` * `c2`. The arguments + are sequences of coefficients, from lowest order "term" to highest, + e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Hermite series coefficients representing their product. + + See Also + -------- + hermeadd, hermesub, hermemulx, hermediv, hermepow + + Notes + ----- + In general, the (polynomial) product of two C-series results in terms + that are not in the Hermite polynomial basis set. Thus, to express + the product as a Hermite series, it is necessary to "reproject" the + product onto said basis set, which may produce "unintuitive" (but + correct) results; see Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermemul + >>> hermemul([1, 2, 3], [0, 1, 2]) + array([ 14., 15., 28., 7., 6.]) + + """ + # s1, s2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + + if len(c1) > len(c2): + c = c2 + xs = c1 + else: + c = c1 + xs = c2 + + if len(c) == 1: + c0 = c[0]*xs + c1 = 0 + elif len(c) == 2: + c0 = c[0]*xs + c1 = c[1]*xs + else: + nd = len(c) + c0 = c[-2]*xs + c1 = c[-1]*xs + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = hermesub(c[-i]*xs, c1*(nd - 1)) + c1 = hermeadd(tmp, hermemulx(c1)) + return hermeadd(c0, hermemulx(c1)) + + +def hermediv(c1, c2): + """ + Divide one Hermite series by another. + + Returns the quotient-with-remainder of two Hermite series + `c1` / `c2`. The arguments are sequences of coefficients from lowest + order "term" to highest, e.g., [1,2,3] represents the series + ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + [quo, rem] : ndarrays + Of Hermite series coefficients representing the quotient and + remainder. + + See Also + -------- + hermeadd, hermesub, hermemulx, hermemul, hermepow + + Notes + ----- + In general, the (polynomial) division of one Hermite series by another + results in quotient and remainder terms that are not in the Hermite + polynomial basis set. Thus, to express these results as a Hermite + series, it is necessary to "reproject" the results onto the Hermite + basis set, which may produce "unintuitive" (but correct) results; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermediv + >>> hermediv([ 14., 15., 28., 7., 6.], [0, 1, 2]) + (array([ 1., 2., 3.]), array([ 0.])) + >>> hermediv([ 15., 17., 28., 7., 6.], [0, 1, 2]) + (array([ 1., 2., 3.]), array([ 1., 2.])) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if c2[-1] == 0: + raise ZeroDivisionError() + + lc1 = len(c1) + lc2 = len(c2) + if lc1 < lc2: + return c1[:1]*0, c1 + elif lc2 == 1: + return c1/c2[-1], c1[:1]*0 + else: + quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype) + rem = c1 + for i in range(lc1 - lc2, - 1, -1): + p = hermemul([0]*i + [1], c2) + q = rem[-1]/p[-1] + rem = rem[:-1] - q*p[:-1] + quo[i] = q + return quo, pu.trimseq(rem) + + +def hermepow(c, pow, maxpower=16): + """Raise a Hermite series to a power. + + Returns the Hermite series `c` raised to the power `pow`. The + argument `c` is a sequence of coefficients ordered from low to high. + i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` + + Parameters + ---------- + c : array_like + 1-D array of Hermite series coefficients ordered from low to + high. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Hermite series of power. + + See Also + -------- + hermeadd, hermesub, hermemulx, hermemul, hermediv + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermepow + >>> hermepow([1, 2, 3], 2) + array([ 23., 28., 46., 12., 9.]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + power = int(pow) + if power != pow or power < 0: + raise ValueError("Power must be a non-negative integer.") + elif maxpower is not None and power > maxpower: + raise ValueError("Power is too large") + elif power == 0: + return np.array([1], dtype=c.dtype) + elif power == 1: + return c + else: + # This can be made more efficient by using powers of two + # in the usual way. + prd = c + for i in range(2, power + 1): + prd = hermemul(prd, c) + return prd + + +def hermeder(c, m=1, scl=1, axis=0): + """ + Differentiate a Hermite_e series. + + Returns the series coefficients `c` differentiated `m` times along + `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The argument + `c` is an array of coefficients from low to high degree along each + axis, e.g., [1,2,3] represents the series ``1*He_0 + 2*He_1 + 3*He_2`` + while [[1,2],[1,2]] represents ``1*He_0(x)*He_0(y) + 1*He_1(x)*He_0(y) + + 2*He_0(x)*He_1(y) + 2*He_1(x)*He_1(y)`` if axis=0 is ``x`` and axis=1 + is ``y``. + + Parameters + ---------- + c : array_like + Array of Hermite_e series coefficients. If `c` is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change of + variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + der : ndarray + Hermite series of the derivative. + + See Also + -------- + hermeint + + Notes + ----- + In general, the result of differentiating a Hermite series does not + resemble the same operation on a power series. Thus the result of this + function may be "unintuitive," albeit correct; see Examples section + below. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeder + >>> hermeder([ 1., 1., 1., 1.]) + array([ 1., 2., 3.]) + >>> hermeder([-0.25, 1., 1./2., 1./3., 1./4 ], m=2) + array([ 1., 2., 3.]) + + """ + c = np.array(c, ndmin=1, copy=1) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + cnt, iaxis = [int(t) for t in [m, axis]] + + if cnt != m: + raise ValueError("The order of derivation must be integer") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + if iaxis != axis: + raise ValueError("The axis must be integer") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + n = len(c) + if cnt >= n: + return c[:1]*0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=c.dtype) + for j in range(n, 0, -1): + der[j - 1] = j*c[j] + c = der + c = np.moveaxis(c, 0, iaxis) + return c + + +def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a Hermite_e series. + + Returns the Hermite_e series coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients from low to high degree along each axis, e.g., [1,2,3] + represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]] + represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) + + 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. + + Parameters + ---------- + c : array_like + Array of Hermite_e series coefficients. If c is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at + ``lbnd`` is the first value in the list, the value of the second + integral at ``lbnd`` is the second value, etc. If ``k == []`` (the + default), all constants are set to zero. If ``m == 1``, a single + scalar can be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + S : ndarray + Hermite_e series coefficients of the integral. + + Raises + ------ + ValueError + If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. + + See Also + -------- + hermeder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. + Why is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + :math:`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a` - perhaps not what one would have first thought. + + Also note that, in general, the result of integrating a C-series needs + to be "reprojected" onto the C-series basis set. Thus, typically, + the result of this function is "unintuitive," albeit correct; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeint + >>> hermeint([1, 2, 3]) # integrate once, value 0 at 0. + array([ 1., 1., 1., 1.]) + >>> hermeint([1, 2, 3], m=2) # integrate twice, value & deriv 0 at 0 + array([-0.25 , 1. , 0.5 , 0.33333333, 0.25 ]) + >>> hermeint([1, 2, 3], k=1) # integrate once, value 1 at 0. + array([ 2., 1., 1., 1.]) + >>> hermeint([1, 2, 3], lbnd=-1) # integrate once, value 0 at -1 + array([-1., 1., 1., 1.]) + >>> hermeint([1, 2, 3], m=2, k=[1, 2], lbnd=-1) + array([ 1.83333333, 0. , 0.5 , 0.33333333, 0.25 ]) + + """ + c = np.array(c, ndmin=1, copy=1) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if not np.iterable(k): + k = [k] + cnt, iaxis = [int(t) for t in [m, axis]] + + if cnt != m: + raise ValueError("The order of integration must be integer") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") + if iaxis != axis: + raise ValueError("The axis must be integer") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + k = list(k) + [0]*(cnt - len(k)) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) + tmp[0] = c[0]*0 + tmp[1] = c[0] + for j in range(1, n): + tmp[j + 1] = c[j]/(j + 1) + tmp[0] += k[i] - hermeval(lbnd, tmp) + c = tmp + c = np.moveaxis(c, 0, iaxis) + return c + + +def hermeval(x, c, tensor=True): + """ + Evaluate an HermiteE series at points x. + + If `c` is of length `n + 1`, this function returns the value: + + .. math:: p(x) = c_0 * He_0(x) + c_1 * He_1(x) + ... + c_n * He_n(x) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + with themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + .. versionadded:: 1.7.0 + + Returns + ------- + values : ndarray, algebra_like + The shape of the return value is described above. + + See Also + -------- + hermeval2d, hermegrid2d, hermeval3d, hermegrid3d + + Notes + ----- + The evaluation uses Clenshaw recursion, aka synthetic division. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeval + >>> coef = [1,2,3] + >>> hermeval(1, coef) + 3.0 + >>> hermeval([[1,2],[3,4]], coef) + array([[ 3., 14.], + [ 31., 54.]]) + + """ + c = np.array(c, ndmin=1, copy=0) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,)*x.ndim) + + if len(c) == 1: + c0 = c[0] + c1 = 0 + elif len(c) == 2: + c0 = c[0] + c1 = c[1] + else: + nd = len(c) + c0 = c[-2] + c1 = c[-1] + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = c[-i] - c1*(nd - 1) + c1 = tmp + c1*x + return c0 + c1*x + + +def hermeval2d(x, y, c): + """ + Evaluate a 2-D HermiteE series at points (x, y). + + This function returns the values: + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * He_i(x) * He_j(y) + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` is a 1-D array a one is implicitly appended to its shape to make + it 2-D. The shape of the result will be c.shape[2:] + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points `(x, y)`, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than two the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points formed with + pairs of corresponding values from `x` and `y`. + + See Also + -------- + hermeval, hermegrid2d, hermeval3d, hermegrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + try: + x, y = np.array((x, y), copy=0) + except Exception: + raise ValueError('x, y are incompatible') + + c = hermeval(x, c) + c = hermeval(y, c, tensor=False) + return c + + +def hermegrid2d(x, y, c): + """ + Evaluate a 2-D HermiteE series on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * H_i(a) * H_j(b) + + where the points `(a, b)` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + hermeval, hermeval2d, hermeval3d, hermegrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + c = hermeval(x, c) + c = hermeval(y, c) + return c + + +def hermeval3d(x, y, z, c): + """ + Evaluate a 3-D Hermite_e series at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * He_i(x) * He_j(y) * He_k(z) + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + hermeval, hermeval2d, hermegrid2d, hermegrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + try: + x, y, z = np.array((x, y, z), copy=0) + except Exception: + raise ValueError('x, y, z are incompatible') + + c = hermeval(x, c) + c = hermeval(y, c, tensor=False) + c = hermeval(z, c, tensor=False) + return c + + +def hermegrid3d(x, y, z, c): + """ + Evaluate a 3-D HermiteE series on the Cartesian product of x, y, and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * He_i(a) * He_j(b) * He_k(c) + + where the points `(a, b, c)` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + hermeval, hermeval2d, hermegrid2d, hermeval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + c = hermeval(x, c) + c = hermeval(y, c) + c = hermeval(z, c) + return c + + +def hermevander(x, deg): + """Pseudo-Vandermonde matrix of given degree. + + Returns the pseudo-Vandermonde matrix of degree `deg` and sample points + `x`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., i] = He_i(x), + + where `0 <= i <= deg`. The leading indices of `V` index the elements of + `x` and the last index is the degree of the HermiteE polynomial. + + If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the + array ``V = hermevander(x, n)``, then ``np.dot(V, c)`` and + ``hermeval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of HermiteE series of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray + The pseudo-Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where The last index is the degree of the + corresponding HermiteE polynomial. The dtype will be the same as + the converted `x`. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermevander + >>> x = np.array([-1, 0, 1]) + >>> hermevander(x, 3) + array([[ 1., -1., 0., 2.], + [ 1., 0., -1., -0.], + [ 1., 1., 0., -2.]]) + + """ + ideg = int(deg) + if ideg != deg: + raise ValueError("deg must be integer") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=0, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + v[0] = x*0 + 1 + if ideg > 0: + v[1] = x + for i in range(2, ideg + 1): + v[i] = (v[i-1]*x - v[i-2]*(i - 1)) + return np.moveaxis(v, 0, -1) + + +def hermevander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y)`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (deg[1] + 1)*i + j] = He_i(x) * He_j(y), + + where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of + `V` index the points `(x, y)` and the last index encodes the degrees of + the HermiteE polynomials. + + If ``V = hermevander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``hermeval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D HermiteE + series of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + hermevander, hermevander3d. hermeval2d, hermeval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + ideg = [int(d) for d in deg] + is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] + if is_valid != [1, 1]: + raise ValueError("degrees must be non-negative integers") + degx, degy = ideg + x, y = np.array((x, y), copy=0) + 0.0 + + vx = hermevander(x, degx) + vy = hermevander(y, degy) + v = vx[..., None]*vy[..., None,:] + return v.reshape(v.shape[:-2] + (-1,)) + + +def hermevander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`, + then Hehe pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = He_i(x)*He_j(y)*He_k(z), + + where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading + indices of `V` index the points `(x, y, z)` and the last index encodes + the degrees of the HermiteE polynomials. + + If ``V = hermevander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``hermeval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D HermiteE + series of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + hermevander, hermevander3d. hermeval2d, hermeval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + ideg = [int(d) for d in deg] + is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] + if is_valid != [1, 1, 1]: + raise ValueError("degrees must be non-negative integers") + degx, degy, degz = ideg + x, y, z = np.array((x, y, z), copy=0) + 0.0 + + vx = hermevander(x, degx) + vy = hermevander(y, degy) + vz = hermevander(z, degz) + v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:] + return v.reshape(v.shape[:-3] + (-1,)) + + +def hermefit(x, y, deg, rcond=None, full=False, w=None): + """ + Least squares fit of Hermite series to data. + + Return the coefficients of a HermiteE series of degree `deg` that is + the least squares fit to the data values `y` given at points `x`. If + `y` is 1-D the returned coefficients will also be 1-D. If `y` is 2-D + multiple fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x), + + where `n` is `deg`. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the contribution of each point + ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the + weights are chosen so that the errors of the products ``w[i]*y[i]`` + all have the same variance. The default value is None. + + Returns + ------- + coef : ndarray, shape (M,) or (M, K) + Hermite coefficients ordered from low to high. If `y` was 2-D, + the coefficients for the data in column k of `y` are in column + `k`. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if `full` = True + + resid -- sum of squared residuals of the least squares fit + rank -- the numerical rank of the scaled Vandermonde matrix + sv -- singular values of the scaled Vandermonde matrix + rcond -- value of `rcond`. + + For more details, see `linalg.lstsq`. + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if `full` = False. The + warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', RankWarning) + + See Also + -------- + chebfit, legfit, polyfit, hermfit, polyfit + hermeval : Evaluates a Hermite series. + hermevander : pseudo Vandermonde matrix of Hermite series. + hermeweight : HermiteE weight function. + linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the HermiteE series `p` that + minimizes the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where the :math:`w_j` are the weights. This problem is solved by + setting up the (typically) overdetermined matrix equation + + .. math:: V(x) * c = w * y, + + where `V` is the pseudo Vandermonde matrix of `x`, the elements of `c` + are the coefficients to be solved for, and the elements of `y` are the + observed values. This equation is then solved using the singular value + decomposition of `V`. + + If some of the singular values of `V` are so small that they are + neglected, then a `RankWarning` will be issued. This means that the + coefficient values may be poorly determined. Using a lower order fit + will usually get rid of the warning. The `rcond` parameter can also be + set to a value smaller than its default, but the resulting fit may be + spurious and have large contributions from roundoff error. + + Fits using HermiteE series are probably most useful when the data can + be approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the HermiteE + weight. In that case the weight ``sqrt(w(x[i])`` should be used + together with data values ``y[i]/sqrt(w(x[i])``. The weight function is + available as `hermeweight`. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + https://en.wikipedia.org/wiki/Curve_fitting + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermefit, hermeval + >>> x = np.linspace(-10, 10) + >>> err = np.random.randn(len(x))/10 + >>> y = hermeval(x, [1, 2, 3]) + err + >>> hermefit(x, y, 2) + array([ 1.01690445, 1.99951418, 2.99948696]) + + """ + x = np.asarray(x) + 0.0 + y = np.asarray(y) + 0.0 + deg = np.asarray(deg) + + # check arguments. + if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0: + raise TypeError("deg must be an int or non-empty 1-D array of int") + if deg.min() < 0: + raise ValueError("expected deg >= 0") + if x.ndim != 1: + raise TypeError("expected 1D vector for x") + if x.size == 0: + raise TypeError("expected non-empty vector for x") + if y.ndim < 1 or y.ndim > 2: + raise TypeError("expected 1D or 2D array for y") + if len(x) != len(y): + raise TypeError("expected x and y to have same length") + + if deg.ndim == 0: + lmax = deg + order = lmax + 1 + van = hermevander(x, lmax) + else: + deg = np.sort(deg) + lmax = deg[-1] + order = len(deg) + van = hermevander(x, lmax)[:, deg] + + # set up the least squares matrices in transposed form + lhs = van.T + rhs = y.T + if w is not None: + w = np.asarray(w) + 0.0 + if w.ndim != 1: + raise TypeError("expected 1D vector for w") + if len(x) != len(w): + raise TypeError("expected x and w to have same length") + # apply weights. Don't use inplace operations as they + # can cause problems with NA. + lhs = lhs * w + rhs = rhs * w + + # set rcond + if rcond is None: + rcond = len(x)*np.finfo(x.dtype).eps + + # Determine the norms of the design matrix columns. + if issubclass(lhs.dtype.type, np.complexfloating): + scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1)) + else: + scl = np.sqrt(np.square(lhs).sum(1)) + scl[scl == 0] = 1 + + # Solve the least squares problem. + c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond) + c = (c.T/scl).T + + # Expand c to include non-fitted coefficients which are set to zero + if deg.ndim > 0: + if c.ndim == 2: + cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype) + else: + cc = np.zeros(lmax+1, dtype=c.dtype) + cc[deg] = c + c = cc + + # warn on rank reduction + if rank != order and not full: + msg = "The fit may be poorly conditioned" + warnings.warn(msg, pu.RankWarning, stacklevel=2) + + if full: + return c, [resids, rank, s, rcond] + else: + return c + + +def hermecompanion(c): + """ + Return the scaled companion matrix of c. + + The basis polynomials are scaled so that the companion matrix is + symmetric when `c` is an HermiteE basis polynomial. This provides + better eigenvalue estimates than the unscaled case and for basis + polynomials the eigenvalues are guaranteed to be real if + `numpy.linalg.eigvalsh` is used to obtain them. + + Parameters + ---------- + c : array_like + 1-D array of HermiteE series coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Scaled companion matrix of dimensions (deg, deg). + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[-c[0]/c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + scl = np.hstack((1., 1./np.sqrt(np.arange(n - 1, 0, -1)))) + scl = np.multiply.accumulate(scl)[::-1] + top = mat.reshape(-1)[1::n+1] + bot = mat.reshape(-1)[n::n+1] + top[...] = np.sqrt(np.arange(1, n)) + bot[...] = top + mat[:, -1] -= scl*c[:-1]/c[-1] + return mat + + +def hermeroots(c): + """ + Compute the roots of a HermiteE series. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * He_i(x). + + Parameters + ---------- + c : 1-D array_like + 1-D array of coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the series. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + polyroots, legroots, lagroots, hermroots, chebroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the series for such + values. Roots with multiplicity greater than 1 will also show larger + errors as the value of the series near such points is relatively + insensitive to errors in the roots. Isolated roots near the origin can + be improved by a few iterations of Newton's method. + + The HermiteE series basis polynomials aren't powers of `x` so the + results of this function may seem unintuitive. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeroots, hermefromroots + >>> coef = hermefromroots([-1, 0, 1]) + >>> coef + array([ 0., 2., 0., 1.]) + >>> hermeroots(coef) + array([-1., 0., 1.]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) <= 1: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([-c[0]/c[1]]) + + m = hermecompanion(c) + r = la.eigvals(m) + r.sort() + return r + + +def _normed_hermite_e_n(x, n): + """ + Evaluate a normalized HermiteE polynomial. + + Compute the value of the normalized HermiteE polynomial of degree ``n`` + at the points ``x``. + + + Parameters + ---------- + x : ndarray of double. + Points at which to evaluate the function + n : int + Degree of the normalized HermiteE function to be evaluated. + + Returns + ------- + values : ndarray + The shape of the return value is described above. + + Notes + ----- + .. versionadded:: 1.10.0 + + This function is needed for finding the Gauss points and integration + weights for high degrees. The values of the standard HermiteE functions + overflow when n >= 207. + + """ + if n == 0: + return np.full(x.shape, 1/np.sqrt(np.sqrt(2*np.pi))) + + c0 = 0. + c1 = 1./np.sqrt(np.sqrt(2*np.pi)) + nd = float(n) + for i in range(n - 1): + tmp = c0 + c0 = -c1*np.sqrt((nd - 1.)/nd) + c1 = tmp + c1*x*np.sqrt(1./nd) + nd = nd - 1.0 + return c0 + c1*x + + +def hermegauss(deg): + """ + Gauss-HermiteE quadrature. + + Computes the sample points and weights for Gauss-HermiteE quadrature. + These sample points and weights will correctly integrate polynomials of + degree :math:`2*deg - 1` or less over the interval :math:`[-\\inf, \\inf]` + with the weight function :math:`f(x) = \\exp(-x^2/2)`. + + Parameters + ---------- + deg : int + Number of sample points and weights. It must be >= 1. + + Returns + ------- + x : ndarray + 1-D ndarray containing the sample points. + y : ndarray + 1-D ndarray containing the weights. + + Notes + ----- + + .. versionadded:: 1.7.0 + + The results have only been tested up to degree 100, higher degrees may + be problematic. The weights are determined by using the fact that + + .. math:: w_k = c / (He'_n(x_k) * He_{n-1}(x_k)) + + where :math:`c` is a constant independent of :math:`k` and :math:`x_k` + is the k'th root of :math:`He_n`, and then scaling the results to get + the right value when integrating 1. + + """ + ideg = int(deg) + if ideg != deg or ideg < 1: + raise ValueError("deg must be a non-negative integer") + + # first approximation of roots. We use the fact that the companion + # matrix is symmetric in this case in order to obtain better zeros. + c = np.array([0]*deg + [1]) + m = hermecompanion(c) + x = la.eigvalsh(m) + + # improve roots by one application of Newton + dy = _normed_hermite_e_n(x, ideg) + df = _normed_hermite_e_n(x, ideg - 1) * np.sqrt(ideg) + x -= dy/df + + # compute the weights. We scale the factor to avoid possible numerical + # overflow. + fm = _normed_hermite_e_n(x, ideg - 1) + fm /= np.abs(fm).max() + w = 1/(fm * fm) + + # for Hermite_e we can also symmetrize + w = (w + w[::-1])/2 + x = (x - x[::-1])/2 + + # scale w to get the right value + w *= np.sqrt(2*np.pi) / w.sum() + + return x, w + + +def hermeweight(x): + """Weight function of the Hermite_e polynomials. + + The weight function is :math:`\\exp(-x^2/2)` and the interval of + integration is :math:`[-\\inf, \\inf]`. the HermiteE polynomials are + orthogonal, but not normalized, with respect to this weight function. + + Parameters + ---------- + x : array_like + Values at which the weight function will be computed. + + Returns + ------- + w : ndarray + The weight function at `x`. + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + w = np.exp(-.5*x**2) + return w + + +# +# HermiteE series class +# + +class HermiteE(ABCPolyBase): + """An HermiteE series class. + + The HermiteE class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed in the `ABCPolyBase` documentation. + + Parameters + ---------- + coef : array_like + HermiteE coefficients in order of increasing degree, i.e, + ``(1, 2, 3)`` gives ``1*He_0(x) + 2*He_1(X) + 3*He_2(x)``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [-1, 1]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [-1, 1]. + + .. versionadded:: 1.6.0 + + """ + # Virtual Functions + _add = staticmethod(hermeadd) + _sub = staticmethod(hermesub) + _mul = staticmethod(hermemul) + _div = staticmethod(hermediv) + _pow = staticmethod(hermepow) + _val = staticmethod(hermeval) + _int = staticmethod(hermeint) + _der = staticmethod(hermeder) + _fit = staticmethod(hermefit) + _line = staticmethod(hermeline) + _roots = staticmethod(hermeroots) + _fromroots = staticmethod(hermefromroots) + + # Virtual properties + nickname = 'herme' + domain = np.array(hermedomain) + window = np.array(hermedomain) + basis_name = 'He' diff --git a/project/venv/lib/python2.7/site-packages/numpy/polynomial/hermite_e.pyc b/project/venv/lib/python2.7/site-packages/numpy/polynomial/hermite_e.pyc new file mode 100644 index 0000000..1b8c072 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/polynomial/hermite_e.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/polynomial/laguerre.py b/project/venv/lib/python2.7/site-packages/numpy/polynomial/laguerre.py new file mode 100644 index 0000000..a116d20 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/polynomial/laguerre.py @@ -0,0 +1,1809 @@ +""" +Objects for dealing with Laguerre series. + +This module provides a number of objects (mostly functions) useful for +dealing with Laguerre series, including a `Laguerre` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with such polynomials is in the +docstring for its "parent" sub-package, `numpy.polynomial`). + +Constants +--------- +- `lagdomain` -- Laguerre series default domain, [-1,1]. +- `lagzero` -- Laguerre series that evaluates identically to 0. +- `lagone` -- Laguerre series that evaluates identically to 1. +- `lagx` -- Laguerre series for the identity map, ``f(x) = x``. + +Arithmetic +---------- +- `lagadd` -- add two Laguerre series. +- `lagsub` -- subtract one Laguerre series from another. +- `lagmulx` -- multiply a Laguerre series in ``P_i(x)`` by ``x``. +- `lagmul` -- multiply two Laguerre series. +- `lagdiv` -- divide one Laguerre series by another. +- `lagpow` -- raise a Laguerre series to a positive integer power. +- `lagval` -- evaluate a Laguerre series at given points. +- `lagval2d` -- evaluate a 2D Laguerre series at given points. +- `lagval3d` -- evaluate a 3D Laguerre series at given points. +- `laggrid2d` -- evaluate a 2D Laguerre series on a Cartesian product. +- `laggrid3d` -- evaluate a 3D Laguerre series on a Cartesian product. + +Calculus +-------- +- `lagder` -- differentiate a Laguerre series. +- `lagint` -- integrate a Laguerre series. + +Misc Functions +-------------- +- `lagfromroots` -- create a Laguerre series with specified roots. +- `lagroots` -- find the roots of a Laguerre series. +- `lagvander` -- Vandermonde-like matrix for Laguerre polynomials. +- `lagvander2d` -- Vandermonde-like matrix for 2D power series. +- `lagvander3d` -- Vandermonde-like matrix for 3D power series. +- `laggauss` -- Gauss-Laguerre quadrature, points and weights. +- `lagweight` -- Laguerre weight function. +- `lagcompanion` -- symmetrized companion matrix in Laguerre form. +- `lagfit` -- least-squares fit returning a Laguerre series. +- `lagtrim` -- trim leading coefficients from a Laguerre series. +- `lagline` -- Laguerre series of given straight line. +- `lag2poly` -- convert a Laguerre series to a polynomial. +- `poly2lag` -- convert a polynomial to a Laguerre series. + +Classes +------- +- `Laguerre` -- A Laguerre series class. + +See also +-------- +`numpy.polynomial` + +""" +from __future__ import division, absolute_import, print_function + +import warnings +import numpy as np +import numpy.linalg as la +from numpy.core.multiarray import normalize_axis_index + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +__all__ = [ + 'lagzero', 'lagone', 'lagx', 'lagdomain', 'lagline', 'lagadd', + 'lagsub', 'lagmulx', 'lagmul', 'lagdiv', 'lagpow', 'lagval', 'lagder', + 'lagint', 'lag2poly', 'poly2lag', 'lagfromroots', 'lagvander', + 'lagfit', 'lagtrim', 'lagroots', 'Laguerre', 'lagval2d', 'lagval3d', + 'laggrid2d', 'laggrid3d', 'lagvander2d', 'lagvander3d', 'lagcompanion', + 'laggauss', 'lagweight'] + +lagtrim = pu.trimcoef + + +def poly2lag(pol): + """ + poly2lag(pol) + + Convert a polynomial to a Laguerre series. + + Convert an array representing the coefficients of a polynomial (relative + to the "standard" basis) ordered from lowest degree to highest, to an + array of the coefficients of the equivalent Laguerre series, ordered + from lowest to highest degree. + + Parameters + ---------- + pol : array_like + 1-D array containing the polynomial coefficients + + Returns + ------- + c : ndarray + 1-D array containing the coefficients of the equivalent Laguerre + series. + + See Also + -------- + lag2poly + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy.polynomial.laguerre import poly2lag + >>> poly2lag(np.arange(4)) + array([ 23., -63., 58., -18.]) + + """ + [pol] = pu.as_series([pol]) + deg = len(pol) - 1 + res = 0 + for i in range(deg, -1, -1): + res = lagadd(lagmulx(res), pol[i]) + return res + + +def lag2poly(c): + """ + Convert a Laguerre series to a polynomial. + + Convert an array representing the coefficients of a Laguerre series, + ordered from lowest degree to highest, to an array of the coefficients + of the equivalent polynomial (relative to the "standard" basis) ordered + from lowest to highest degree. + + Parameters + ---------- + c : array_like + 1-D array containing the Laguerre series coefficients, ordered + from lowest order term to highest. + + Returns + ------- + pol : ndarray + 1-D array containing the coefficients of the equivalent polynomial + (relative to the "standard" basis) ordered from lowest order term + to highest. + + See Also + -------- + poly2lag + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lag2poly + >>> lag2poly([ 23., -63., 58., -18.]) + array([ 0., 1., 2., 3.]) + + """ + from .polynomial import polyadd, polysub, polymulx + + [c] = pu.as_series([c]) + n = len(c) + if n == 1: + return c + else: + c0 = c[-2] + c1 = c[-1] + # i is the current degree of c1 + for i in range(n - 1, 1, -1): + tmp = c0 + c0 = polysub(c[i - 2], (c1*(i - 1))/i) + c1 = polyadd(tmp, polysub((2*i - 1)*c1, polymulx(c1))/i) + return polyadd(c0, polysub(c1, polymulx(c1))) + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Laguerre +lagdomain = np.array([0, 1]) + +# Laguerre coefficients representing zero. +lagzero = np.array([0]) + +# Laguerre coefficients representing one. +lagone = np.array([1]) + +# Laguerre coefficients representing the identity x. +lagx = np.array([1, -1]) + + +def lagline(off, scl): + """ + Laguerre series whose graph is a straight line. + + + + Parameters + ---------- + off, scl : scalars + The specified line is given by ``off + scl*x``. + + Returns + ------- + y : ndarray + This module's representation of the Laguerre series for + ``off + scl*x``. + + See Also + -------- + polyline, chebline + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagline, lagval + >>> lagval(0,lagline(3, 2)) + 3.0 + >>> lagval(1,lagline(3, 2)) + 5.0 + + """ + if scl != 0: + return np.array([off + scl, -scl]) + else: + return np.array([off]) + + +def lagfromroots(roots): + """ + Generate a Laguerre series with given roots. + + The function returns the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + in Laguerre form, where the `r_n` are the roots specified in `roots`. + If a zero has multiplicity n, then it must appear in `roots` n times. + For instance, if 2 is a root of multiplicity three and 3 is a root of + multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The + roots can appear in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x) + + The coefficient of the last term is not generally 1 for monic + polynomials in Laguerre form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of coefficients. If all roots are real then `out` is a + real array, if some of the roots are complex, then `out` is complex + even if all the coefficients in the result are real (see Examples + below). + + See Also + -------- + polyfromroots, legfromroots, chebfromroots, hermfromroots, + hermefromroots. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagfromroots, lagval + >>> coef = lagfromroots((-1, 0, 1)) + >>> lagval((-1, 0, 1), coef) + array([ 0., 0., 0.]) + >>> coef = lagfromroots((-1j, 1j)) + >>> lagval((-1j, 1j), coef) + array([ 0.+0.j, 0.+0.j]) + + """ + if len(roots) == 0: + return np.ones(1) + else: + [roots] = pu.as_series([roots], trim=False) + roots.sort() + p = [lagline(-r, 1) for r in roots] + n = len(p) + while n > 1: + m, r = divmod(n, 2) + tmp = [lagmul(p[i], p[i+m]) for i in range(m)] + if r: + tmp[0] = lagmul(tmp[0], p[-1]) + p = tmp + n = m + return p[0] + + +def lagadd(c1, c2): + """ + Add one Laguerre series to another. + + Returns the sum of two Laguerre series `c1` + `c2`. The arguments + are sequences of coefficients ordered from lowest order term to + highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Laguerre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the Laguerre series of their sum. + + See Also + -------- + lagsub, lagmulx, lagmul, lagdiv, lagpow + + Notes + ----- + Unlike multiplication, division, etc., the sum of two Laguerre series + is a Laguerre series (without having to "reproject" the result onto + the basis set) so addition, just like that of "standard" polynomials, + is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagadd + >>> lagadd([1, 2, 3], [1, 2, 3, 4]) + array([ 2., 4., 6., 4.]) + + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if len(c1) > len(c2): + c1[:c2.size] += c2 + ret = c1 + else: + c2[:c1.size] += c1 + ret = c2 + return pu.trimseq(ret) + + +def lagsub(c1, c2): + """ + Subtract one Laguerre series from another. + + Returns the difference of two Laguerre series `c1` - `c2`. The + sequences of coefficients are from lowest order term to highest, i.e., + [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Laguerre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Laguerre series coefficients representing their difference. + + See Also + -------- + lagadd, lagmulx, lagmul, lagdiv, lagpow + + Notes + ----- + Unlike multiplication, division, etc., the difference of two Laguerre + series is a Laguerre series (without having to "reproject" the result + onto the basis set) so subtraction, just like that of "standard" + polynomials, is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagsub + >>> lagsub([1, 2, 3, 4], [1, 2, 3]) + array([ 0., 0., 0., 4.]) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if len(c1) > len(c2): + c1[:c2.size] -= c2 + ret = c1 + else: + c2 = -c2 + c2[:c1.size] += c1 + ret = c2 + return pu.trimseq(ret) + + +def lagmulx(c): + """Multiply a Laguerre series by x. + + Multiply the Laguerre series `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of Laguerre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + See Also + -------- + lagadd, lagsub, lagmul, lagdiv, lagpow + + Notes + ----- + The multiplication uses the recursion relationship for Laguerre + polynomials in the form + + .. math:: + + xP_i(x) = (-(i + 1)*P_{i + 1}(x) + (2i + 1)P_{i}(x) - iP_{i - 1}(x)) + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagmulx + >>> lagmulx([1, 2, 3]) + array([ -1., -1., 11., -9.]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0] + prd[1] = -c[0] + for i in range(1, len(c)): + prd[i + 1] = -c[i]*(i + 1) + prd[i] += c[i]*(2*i + 1) + prd[i - 1] -= c[i]*i + return prd + + +def lagmul(c1, c2): + """ + Multiply one Laguerre series by another. + + Returns the product of two Laguerre series `c1` * `c2`. The arguments + are sequences of coefficients, from lowest order "term" to highest, + e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Laguerre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Laguerre series coefficients representing their product. + + See Also + -------- + lagadd, lagsub, lagmulx, lagdiv, lagpow + + Notes + ----- + In general, the (polynomial) product of two C-series results in terms + that are not in the Laguerre polynomial basis set. Thus, to express + the product as a Laguerre series, it is necessary to "reproject" the + product onto said basis set, which may produce "unintuitive" (but + correct) results; see Examples section below. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagmul + >>> lagmul([1, 2, 3], [0, 1, 2]) + array([ 8., -13., 38., -51., 36.]) + + """ + # s1, s2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + + if len(c1) > len(c2): + c = c2 + xs = c1 + else: + c = c1 + xs = c2 + + if len(c) == 1: + c0 = c[0]*xs + c1 = 0 + elif len(c) == 2: + c0 = c[0]*xs + c1 = c[1]*xs + else: + nd = len(c) + c0 = c[-2]*xs + c1 = c[-1]*xs + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = lagsub(c[-i]*xs, (c1*(nd - 1))/nd) + c1 = lagadd(tmp, lagsub((2*nd - 1)*c1, lagmulx(c1))/nd) + return lagadd(c0, lagsub(c1, lagmulx(c1))) + + +def lagdiv(c1, c2): + """ + Divide one Laguerre series by another. + + Returns the quotient-with-remainder of two Laguerre series + `c1` / `c2`. The arguments are sequences of coefficients from lowest + order "term" to highest, e.g., [1,2,3] represents the series + ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Laguerre series coefficients ordered from low to + high. + + Returns + ------- + [quo, rem] : ndarrays + Of Laguerre series coefficients representing the quotient and + remainder. + + See Also + -------- + lagadd, lagsub, lagmulx, lagmul, lagpow + + Notes + ----- + In general, the (polynomial) division of one Laguerre series by another + results in quotient and remainder terms that are not in the Laguerre + polynomial basis set. Thus, to express these results as a Laguerre + series, it is necessary to "reproject" the results onto the Laguerre + basis set, which may produce "unintuitive" (but correct) results; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagdiv + >>> lagdiv([ 8., -13., 38., -51., 36.], [0, 1, 2]) + (array([ 1., 2., 3.]), array([ 0.])) + >>> lagdiv([ 9., -12., 38., -51., 36.], [0, 1, 2]) + (array([ 1., 2., 3.]), array([ 1., 1.])) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if c2[-1] == 0: + raise ZeroDivisionError() + + lc1 = len(c1) + lc2 = len(c2) + if lc1 < lc2: + return c1[:1]*0, c1 + elif lc2 == 1: + return c1/c2[-1], c1[:1]*0 + else: + quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype) + rem = c1 + for i in range(lc1 - lc2, - 1, -1): + p = lagmul([0]*i + [1], c2) + q = rem[-1]/p[-1] + rem = rem[:-1] - q*p[:-1] + quo[i] = q + return quo, pu.trimseq(rem) + + +def lagpow(c, pow, maxpower=16): + """Raise a Laguerre series to a power. + + Returns the Laguerre series `c` raised to the power `pow`. The + argument `c` is a sequence of coefficients ordered from low to high. + i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` + + Parameters + ---------- + c : array_like + 1-D array of Laguerre series coefficients ordered from low to + high. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Laguerre series of power. + + See Also + -------- + lagadd, lagsub, lagmulx, lagmul, lagdiv + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagpow + >>> lagpow([1, 2, 3], 2) + array([ 14., -16., 56., -72., 54.]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + power = int(pow) + if power != pow or power < 0: + raise ValueError("Power must be a non-negative integer.") + elif maxpower is not None and power > maxpower: + raise ValueError("Power is too large") + elif power == 0: + return np.array([1], dtype=c.dtype) + elif power == 1: + return c + else: + # This can be made more efficient by using powers of two + # in the usual way. + prd = c + for i in range(2, power + 1): + prd = lagmul(prd, c) + return prd + + +def lagder(c, m=1, scl=1, axis=0): + """ + Differentiate a Laguerre series. + + Returns the Laguerre series coefficients `c` differentiated `m` times + along `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The argument + `c` is an array of coefficients from low to high degree along each + axis, e.g., [1,2,3] represents the series ``1*L_0 + 2*L_1 + 3*L_2`` + while [[1,2],[1,2]] represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + + 2*L_0(x)*L_1(y) + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is + ``y``. + + Parameters + ---------- + c : array_like + Array of Laguerre series coefficients. If `c` is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change of + variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + der : ndarray + Laguerre series of the derivative. + + See Also + -------- + lagint + + Notes + ----- + In general, the result of differentiating a Laguerre series does not + resemble the same operation on a power series. Thus the result of this + function may be "unintuitive," albeit correct; see Examples section + below. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagder + >>> lagder([ 1., 1., 1., -3.]) + array([ 1., 2., 3.]) + >>> lagder([ 1., 0., 0., -4., 3.], m=2) + array([ 1., 2., 3.]) + + """ + c = np.array(c, ndmin=1, copy=1) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + cnt, iaxis = [int(t) for t in [m, axis]] + + if cnt != m: + raise ValueError("The order of derivation must be integer") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + if iaxis != axis: + raise ValueError("The axis must be integer") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + n = len(c) + if cnt >= n: + c = c[:1]*0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=c.dtype) + for j in range(n, 1, -1): + der[j - 1] = -c[j] + c[j - 1] += c[j] + der[0] = -c[1] + c = der + c = np.moveaxis(c, 0, iaxis) + return c + + +def lagint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a Laguerre series. + + Returns the Laguerre series coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients from low to high degree along each axis, e.g., [1,2,3] + represents the series ``L_0 + 2*L_1 + 3*L_2`` while [[1,2],[1,2]] + represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + 2*L_0(x)*L_1(y) + + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. + + + Parameters + ---------- + c : array_like + Array of Laguerre series coefficients. If `c` is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at + ``lbnd`` is the first value in the list, the value of the second + integral at ``lbnd`` is the second value, etc. If ``k == []`` (the + default), all constants are set to zero. If ``m == 1``, a single + scalar can be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + S : ndarray + Laguerre series coefficients of the integral. + + Raises + ------ + ValueError + If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. + + See Also + -------- + lagder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. + Why is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + :math:`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a` - perhaps not what one would have first thought. + + Also note that, in general, the result of integrating a C-series needs + to be "reprojected" onto the C-series basis set. Thus, typically, + the result of this function is "unintuitive," albeit correct; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagint + >>> lagint([1,2,3]) + array([ 1., 1., 1., -3.]) + >>> lagint([1,2,3], m=2) + array([ 1., 0., 0., -4., 3.]) + >>> lagint([1,2,3], k=1) + array([ 2., 1., 1., -3.]) + >>> lagint([1,2,3], lbnd=-1) + array([ 11.5, 1. , 1. , -3. ]) + >>> lagint([1,2], m=2, k=[1,2], lbnd=-1) + array([ 11.16666667, -5. , -3. , 2. ]) + + """ + c = np.array(c, ndmin=1, copy=1) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if not np.iterable(k): + k = [k] + cnt, iaxis = [int(t) for t in [m, axis]] + + if cnt != m: + raise ValueError("The order of integration must be integer") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") + if iaxis != axis: + raise ValueError("The axis must be integer") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + k = list(k) + [0]*(cnt - len(k)) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) + tmp[0] = c[0] + tmp[1] = -c[0] + for j in range(1, n): + tmp[j] += c[j] + tmp[j + 1] = -c[j] + tmp[0] += k[i] - lagval(lbnd, tmp) + c = tmp + c = np.moveaxis(c, 0, iaxis) + return c + + +def lagval(x, c, tensor=True): + """ + Evaluate a Laguerre series at points x. + + If `c` is of length `n + 1`, this function returns the value: + + .. math:: p(x) = c_0 * L_0(x) + c_1 * L_1(x) + ... + c_n * L_n(x) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + with themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + .. versionadded:: 1.7.0 + + Returns + ------- + values : ndarray, algebra_like + The shape of the return value is described above. + + See Also + -------- + lagval2d, laggrid2d, lagval3d, laggrid3d + + Notes + ----- + The evaluation uses Clenshaw recursion, aka synthetic division. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagval + >>> coef = [1,2,3] + >>> lagval(1, coef) + -0.5 + >>> lagval([[1,2],[3,4]], coef) + array([[-0.5, -4. ], + [-4.5, -2. ]]) + + """ + c = np.array(c, ndmin=1, copy=0) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,)*x.ndim) + + if len(c) == 1: + c0 = c[0] + c1 = 0 + elif len(c) == 2: + c0 = c[0] + c1 = c[1] + else: + nd = len(c) + c0 = c[-2] + c1 = c[-1] + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = c[-i] - (c1*(nd - 1))/nd + c1 = tmp + (c1*((2*nd - 1) - x))/nd + return c0 + c1*(1 - x) + + +def lagval2d(x, y, c): + """ + Evaluate a 2-D Laguerre series at points (x, y). + + This function returns the values: + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * L_i(x) * L_j(y) + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` is a 1-D array a one is implicitly appended to its shape to make + it 2-D. The shape of the result will be c.shape[2:] + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points `(x, y)`, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than two the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points formed with + pairs of corresponding values from `x` and `y`. + + See Also + -------- + lagval, laggrid2d, lagval3d, laggrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + try: + x, y = np.array((x, y), copy=0) + except Exception: + raise ValueError('x, y are incompatible') + + c = lagval(x, c) + c = lagval(y, c, tensor=False) + return c + + +def laggrid2d(x, y, c): + """ + Evaluate a 2-D Laguerre series on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * L_i(a) * L_j(b) + + where the points `(a, b)` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape + y.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j is contained in `c[i,j]`. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional Chebyshev series at points in the + Cartesian product of `x` and `y`. + + See Also + -------- + lagval, lagval2d, lagval3d, laggrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + c = lagval(x, c) + c = lagval(y, c) + return c + + +def lagval3d(x, y, z, c): + """ + Evaluate a 3-D Laguerre series at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * L_i(x) * L_j(y) * L_k(z) + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimension polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + lagval, lagval2d, laggrid2d, laggrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + try: + x, y, z = np.array((x, y, z), copy=0) + except Exception: + raise ValueError('x, y, z are incompatible') + + c = lagval(x, c) + c = lagval(y, c, tensor=False) + c = lagval(z, c, tensor=False) + return c + + +def laggrid3d(x, y, z, c): + """ + Evaluate a 3-D Laguerre series on the Cartesian product of x, y, and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c) + + where the points `(a, b, c)` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + lagval, lagval2d, laggrid2d, lagval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + c = lagval(x, c) + c = lagval(y, c) + c = lagval(z, c) + return c + + +def lagvander(x, deg): + """Pseudo-Vandermonde matrix of given degree. + + Returns the pseudo-Vandermonde matrix of degree `deg` and sample points + `x`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., i] = L_i(x) + + where `0 <= i <= deg`. The leading indices of `V` index the elements of + `x` and the last index is the degree of the Laguerre polynomial. + + If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the + array ``V = lagvander(x, n)``, then ``np.dot(V, c)`` and + ``lagval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of Laguerre series of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray + The pseudo-Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where The last index is the degree of the + corresponding Laguerre polynomial. The dtype will be the same as + the converted `x`. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagvander + >>> x = np.array([0, 1, 2]) + >>> lagvander(x, 3) + array([[ 1. , 1. , 1. , 1. ], + [ 1. , 0. , -0.5 , -0.66666667], + [ 1. , -1. , -1. , -0.33333333]]) + + """ + ideg = int(deg) + if ideg != deg: + raise ValueError("deg must be integer") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=0, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + v[0] = x*0 + 1 + if ideg > 0: + v[1] = 1 - x + for i in range(2, ideg + 1): + v[i] = (v[i-1]*(2*i - 1 - x) - v[i-2]*(i - 1))/i + return np.moveaxis(v, 0, -1) + + +def lagvander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y)`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (deg[1] + 1)*i + j] = L_i(x) * L_j(y), + + where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of + `V` index the points `(x, y)` and the last index encodes the degrees of + the Laguerre polynomials. + + If ``V = lagvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``lagval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D Laguerre + series of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + lagvander, lagvander3d. lagval2d, lagval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + ideg = [int(d) for d in deg] + is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] + if is_valid != [1, 1]: + raise ValueError("degrees must be non-negative integers") + degx, degy = ideg + x, y = np.array((x, y), copy=0) + 0.0 + + vx = lagvander(x, degx) + vy = lagvander(y, degy) + v = vx[..., None]*vy[..., None,:] + return v.reshape(v.shape[:-2] + (-1,)) + + +def lagvander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`, + then The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = L_i(x)*L_j(y)*L_k(z), + + where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading + indices of `V` index the points `(x, y, z)` and the last index encodes + the degrees of the Laguerre polynomials. + + If ``V = lagvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``lagval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D Laguerre + series of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + lagvander, lagvander3d. lagval2d, lagval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + ideg = [int(d) for d in deg] + is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] + if is_valid != [1, 1, 1]: + raise ValueError("degrees must be non-negative integers") + degx, degy, degz = ideg + x, y, z = np.array((x, y, z), copy=0) + 0.0 + + vx = lagvander(x, degx) + vy = lagvander(y, degy) + vz = lagvander(z, degz) + v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:] + return v.reshape(v.shape[:-3] + (-1,)) + + +def lagfit(x, y, deg, rcond=None, full=False, w=None): + """ + Least squares fit of Laguerre series to data. + + Return the coefficients of a Laguerre series of degree `deg` that is the + least squares fit to the data values `y` given at points `x`. If `y` is + 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple + fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x), + + where `n` is `deg`. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the contribution of each point + ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the + weights are chosen so that the errors of the products ``w[i]*y[i]`` + all have the same variance. The default value is None. + + Returns + ------- + coef : ndarray, shape (M,) or (M, K) + Laguerre coefficients ordered from low to high. If `y` was 2-D, + the coefficients for the data in column k of `y` are in column + `k`. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if `full` = True + + resid -- sum of squared residuals of the least squares fit + rank -- the numerical rank of the scaled Vandermonde matrix + sv -- singular values of the scaled Vandermonde matrix + rcond -- value of `rcond`. + + For more details, see `linalg.lstsq`. + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if `full` = False. The + warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', RankWarning) + + See Also + -------- + chebfit, legfit, polyfit, hermfit, hermefit + lagval : Evaluates a Laguerre series. + lagvander : pseudo Vandermonde matrix of Laguerre series. + lagweight : Laguerre weight function. + linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the Laguerre series `p` that + minimizes the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where the :math:`w_j` are the weights. This problem is solved by + setting up as the (typically) overdetermined matrix equation + + .. math:: V(x) * c = w * y, + + where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the + coefficients to be solved for, `w` are the weights, and `y` are the + observed values. This equation is then solved using the singular value + decomposition of `V`. + + If some of the singular values of `V` are so small that they are + neglected, then a `RankWarning` will be issued. This means that the + coefficient values may be poorly determined. Using a lower order fit + will usually get rid of the warning. The `rcond` parameter can also be + set to a value smaller than its default, but the resulting fit may be + spurious and have large contributions from roundoff error. + + Fits using Laguerre series are probably most useful when the data can + be approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the Laguerre + weight. In that case the weight ``sqrt(w(x[i])`` should be used + together with data values ``y[i]/sqrt(w(x[i])``. The weight function is + available as `lagweight`. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + https://en.wikipedia.org/wiki/Curve_fitting + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagfit, lagval + >>> x = np.linspace(0, 10) + >>> err = np.random.randn(len(x))/10 + >>> y = lagval(x, [1, 2, 3]) + err + >>> lagfit(x, y, 2) + array([ 0.96971004, 2.00193749, 3.00288744]) + + """ + x = np.asarray(x) + 0.0 + y = np.asarray(y) + 0.0 + deg = np.asarray(deg) + + # check arguments. + if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0: + raise TypeError("deg must be an int or non-empty 1-D array of int") + if deg.min() < 0: + raise ValueError("expected deg >= 0") + if x.ndim != 1: + raise TypeError("expected 1D vector for x") + if x.size == 0: + raise TypeError("expected non-empty vector for x") + if y.ndim < 1 or y.ndim > 2: + raise TypeError("expected 1D or 2D array for y") + if len(x) != len(y): + raise TypeError("expected x and y to have same length") + + if deg.ndim == 0: + lmax = deg + order = lmax + 1 + van = lagvander(x, lmax) + else: + deg = np.sort(deg) + lmax = deg[-1] + order = len(deg) + van = lagvander(x, lmax)[:, deg] + + # set up the least squares matrices in transposed form + lhs = van.T + rhs = y.T + if w is not None: + w = np.asarray(w) + 0.0 + if w.ndim != 1: + raise TypeError("expected 1D vector for w") + if len(x) != len(w): + raise TypeError("expected x and w to have same length") + # apply weights. Don't use inplace operations as they + # can cause problems with NA. + lhs = lhs * w + rhs = rhs * w + + # set rcond + if rcond is None: + rcond = len(x)*np.finfo(x.dtype).eps + + # Determine the norms of the design matrix columns. + if issubclass(lhs.dtype.type, np.complexfloating): + scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1)) + else: + scl = np.sqrt(np.square(lhs).sum(1)) + scl[scl == 0] = 1 + + # Solve the least squares problem. + c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond) + c = (c.T/scl).T + + # Expand c to include non-fitted coefficients which are set to zero + if deg.ndim > 0: + if c.ndim == 2: + cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype) + else: + cc = np.zeros(lmax+1, dtype=c.dtype) + cc[deg] = c + c = cc + + # warn on rank reduction + if rank != order and not full: + msg = "The fit may be poorly conditioned" + warnings.warn(msg, pu.RankWarning, stacklevel=2) + + if full: + return c, [resids, rank, s, rcond] + else: + return c + + +def lagcompanion(c): + """ + Return the companion matrix of c. + + The usual companion matrix of the Laguerre polynomials is already + symmetric when `c` is a basis Laguerre polynomial, so no scaling is + applied. + + Parameters + ---------- + c : array_like + 1-D array of Laguerre series coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Companion matrix of dimensions (deg, deg). + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[1 + c[0]/c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + top = mat.reshape(-1)[1::n+1] + mid = mat.reshape(-1)[0::n+1] + bot = mat.reshape(-1)[n::n+1] + top[...] = -np.arange(1, n) + mid[...] = 2.*np.arange(n) + 1. + bot[...] = top + mat[:, -1] += (c[:-1]/c[-1])*n + return mat + + +def lagroots(c): + """ + Compute the roots of a Laguerre series. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * L_i(x). + + Parameters + ---------- + c : 1-D array_like + 1-D array of coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the series. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + polyroots, legroots, chebroots, hermroots, hermeroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the series for such + values. Roots with multiplicity greater than 1 will also show larger + errors as the value of the series near such points is relatively + insensitive to errors in the roots. Isolated roots near the origin can + be improved by a few iterations of Newton's method. + + The Laguerre series basis polynomials aren't powers of `x` so the + results of this function may seem unintuitive. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagroots, lagfromroots + >>> coef = lagfromroots([0, 1, 2]) + >>> coef + array([ 2., -8., 12., -6.]) + >>> lagroots(coef) + array([ -4.44089210e-16, 1.00000000e+00, 2.00000000e+00]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) <= 1: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([1 + c[0]/c[1]]) + + m = lagcompanion(c) + r = la.eigvals(m) + r.sort() + return r + + +def laggauss(deg): + """ + Gauss-Laguerre quadrature. + + Computes the sample points and weights for Gauss-Laguerre quadrature. + These sample points and weights will correctly integrate polynomials of + degree :math:`2*deg - 1` or less over the interval :math:`[0, \\inf]` + with the weight function :math:`f(x) = \\exp(-x)`. + + Parameters + ---------- + deg : int + Number of sample points and weights. It must be >= 1. + + Returns + ------- + x : ndarray + 1-D ndarray containing the sample points. + y : ndarray + 1-D ndarray containing the weights. + + Notes + ----- + + .. versionadded:: 1.7.0 + + The results have only been tested up to degree 100 higher degrees may + be problematic. The weights are determined by using the fact that + + .. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k)) + + where :math:`c` is a constant independent of :math:`k` and :math:`x_k` + is the k'th root of :math:`L_n`, and then scaling the results to get + the right value when integrating 1. + + """ + ideg = int(deg) + if ideg != deg or ideg < 1: + raise ValueError("deg must be a non-negative integer") + + # first approximation of roots. We use the fact that the companion + # matrix is symmetric in this case in order to obtain better zeros. + c = np.array([0]*deg + [1]) + m = lagcompanion(c) + x = la.eigvalsh(m) + + # improve roots by one application of Newton + dy = lagval(x, c) + df = lagval(x, lagder(c)) + x -= dy/df + + # compute the weights. We scale the factor to avoid possible numerical + # overflow. + fm = lagval(x, c[1:]) + fm /= np.abs(fm).max() + df /= np.abs(df).max() + w = 1/(fm * df) + + # scale w to get the right value, 1 in this case + w /= w.sum() + + return x, w + + +def lagweight(x): + """Weight function of the Laguerre polynomials. + + The weight function is :math:`exp(-x)` and the interval of integration + is :math:`[0, \\inf]`. The Laguerre polynomials are orthogonal, but not + normalized, with respect to this weight function. + + Parameters + ---------- + x : array_like + Values at which the weight function will be computed. + + Returns + ------- + w : ndarray + The weight function at `x`. + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + w = np.exp(-x) + return w + +# +# Laguerre series class +# + +class Laguerre(ABCPolyBase): + """A Laguerre series class. + + The Laguerre class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed in the `ABCPolyBase` documentation. + + Parameters + ---------- + coef : array_like + Laguerre coefficients in order of increasing degree, i.e, + ``(1, 2, 3)`` gives ``1*L_0(x) + 2*L_1(X) + 3*L_2(x)``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [0, 1]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [0, 1]. + + .. versionadded:: 1.6.0 + + """ + # Virtual Functions + _add = staticmethod(lagadd) + _sub = staticmethod(lagsub) + _mul = staticmethod(lagmul) + _div = staticmethod(lagdiv) + _pow = staticmethod(lagpow) + _val = staticmethod(lagval) + _int = staticmethod(lagint) + _der = staticmethod(lagder) + _fit = staticmethod(lagfit) + _line = staticmethod(lagline) + _roots = staticmethod(lagroots) + _fromroots = staticmethod(lagfromroots) + + # Virtual properties + nickname = 'lag' + domain = np.array(lagdomain) + window = np.array(lagdomain) + basis_name = 'L' diff --git a/project/venv/lib/python2.7/site-packages/numpy/polynomial/laguerre.pyc b/project/venv/lib/python2.7/site-packages/numpy/polynomial/laguerre.pyc new file mode 100644 index 0000000..252cf00 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/polynomial/laguerre.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/polynomial/legendre.py b/project/venv/lib/python2.7/site-packages/numpy/polynomial/legendre.py new file mode 100644 index 0000000..e9c2459 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/polynomial/legendre.py @@ -0,0 +1,1844 @@ +""" +Legendre Series (:mod: `numpy.polynomial.legendre`) +=================================================== + +.. currentmodule:: numpy.polynomial.polynomial + +This module provides a number of objects (mostly functions) useful for +dealing with Legendre series, including a `Legendre` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with such polynomials is in the +docstring for its "parent" sub-package, `numpy.polynomial`). + +Constants +--------- + +.. autosummary:: + :toctree: generated/ + + legdomain Legendre series default domain, [-1,1]. + legzero Legendre series that evaluates identically to 0. + legone Legendre series that evaluates identically to 1. + legx Legendre series for the identity map, ``f(x) = x``. + +Arithmetic +---------- + +.. autosummary:: + :toctree: generated/ + + legadd add two Legendre series. + legsub subtract one Legendre series from another. + legmulx multiply a Legendre series in ``P_i(x)`` by ``x``. + legmul multiply two Legendre series. + legdiv divide one Legendre series by another. + legpow raise a Legendre series to a positive integer power. + legval evaluate a Legendre series at given points. + legval2d evaluate a 2D Legendre series at given points. + legval3d evaluate a 3D Legendre series at given points. + leggrid2d evaluate a 2D Legendre series on a Cartesian product. + leggrid3d evaluate a 3D Legendre series on a Cartesian product. + +Calculus +-------- + +.. autosummary:: + :toctree: generated/ + + legder differentiate a Legendre series. + legint integrate a Legendre series. + +Misc Functions +-------------- + +.. autosummary:: + :toctree: generated/ + + legfromroots create a Legendre series with specified roots. + legroots find the roots of a Legendre series. + legvander Vandermonde-like matrix for Legendre polynomials. + legvander2d Vandermonde-like matrix for 2D power series. + legvander3d Vandermonde-like matrix for 3D power series. + leggauss Gauss-Legendre quadrature, points and weights. + legweight Legendre weight function. + legcompanion symmetrized companion matrix in Legendre form. + legfit least-squares fit returning a Legendre series. + legtrim trim leading coefficients from a Legendre series. + legline Legendre series representing given straight line. + leg2poly convert a Legendre series to a polynomial. + poly2leg convert a polynomial to a Legendre series. + +Classes +------- + Legendre A Legendre series class. + +See also +-------- +numpy.polynomial.polynomial +numpy.polynomial.chebyshev +numpy.polynomial.laguerre +numpy.polynomial.hermite +numpy.polynomial.hermite_e + +""" +from __future__ import division, absolute_import, print_function + +import warnings +import numpy as np +import numpy.linalg as la +from numpy.core.multiarray import normalize_axis_index + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +__all__ = [ + 'legzero', 'legone', 'legx', 'legdomain', 'legline', 'legadd', + 'legsub', 'legmulx', 'legmul', 'legdiv', 'legpow', 'legval', 'legder', + 'legint', 'leg2poly', 'poly2leg', 'legfromroots', 'legvander', + 'legfit', 'legtrim', 'legroots', 'Legendre', 'legval2d', 'legval3d', + 'leggrid2d', 'leggrid3d', 'legvander2d', 'legvander3d', 'legcompanion', + 'leggauss', 'legweight'] + +legtrim = pu.trimcoef + + +def poly2leg(pol): + """ + Convert a polynomial to a Legendre series. + + Convert an array representing the coefficients of a polynomial (relative + to the "standard" basis) ordered from lowest degree to highest, to an + array of the coefficients of the equivalent Legendre series, ordered + from lowest to highest degree. + + Parameters + ---------- + pol : array_like + 1-D array containing the polynomial coefficients + + Returns + ------- + c : ndarray + 1-D array containing the coefficients of the equivalent Legendre + series. + + See Also + -------- + leg2poly + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy import polynomial as P + >>> p = P.Polynomial(np.arange(4)) + >>> p + Polynomial([ 0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1]) + >>> c = P.Legendre(P.legendre.poly2leg(p.coef)) + >>> c + Legendre([ 1. , 3.25, 1. , 0.75], domain=[-1, 1], window=[-1, 1]) + + """ + [pol] = pu.as_series([pol]) + deg = len(pol) - 1 + res = 0 + for i in range(deg, -1, -1): + res = legadd(legmulx(res), pol[i]) + return res + + +def leg2poly(c): + """ + Convert a Legendre series to a polynomial. + + Convert an array representing the coefficients of a Legendre series, + ordered from lowest degree to highest, to an array of the coefficients + of the equivalent polynomial (relative to the "standard" basis) ordered + from lowest to highest degree. + + Parameters + ---------- + c : array_like + 1-D array containing the Legendre series coefficients, ordered + from lowest order term to highest. + + Returns + ------- + pol : ndarray + 1-D array containing the coefficients of the equivalent polynomial + (relative to the "standard" basis) ordered from lowest order term + to highest. + + See Also + -------- + poly2leg + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> c = P.Legendre(range(4)) + >>> c + Legendre([ 0., 1., 2., 3.], [-1., 1.]) + >>> p = c.convert(kind=P.Polynomial) + >>> p + Polynomial([-1. , -3.5, 3. , 7.5], [-1., 1.]) + >>> P.leg2poly(range(4)) + array([-1. , -3.5, 3. , 7.5]) + + + """ + from .polynomial import polyadd, polysub, polymulx + + [c] = pu.as_series([c]) + n = len(c) + if n < 3: + return c + else: + c0 = c[-2] + c1 = c[-1] + # i is the current degree of c1 + for i in range(n - 1, 1, -1): + tmp = c0 + c0 = polysub(c[i - 2], (c1*(i - 1))/i) + c1 = polyadd(tmp, (polymulx(c1)*(2*i - 1))/i) + return polyadd(c0, polymulx(c1)) + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Legendre +legdomain = np.array([-1, 1]) + +# Legendre coefficients representing zero. +legzero = np.array([0]) + +# Legendre coefficients representing one. +legone = np.array([1]) + +# Legendre coefficients representing the identity x. +legx = np.array([0, 1]) + + +def legline(off, scl): + """ + Legendre series whose graph is a straight line. + + + + Parameters + ---------- + off, scl : scalars + The specified line is given by ``off + scl*x``. + + Returns + ------- + y : ndarray + This module's representation of the Legendre series for + ``off + scl*x``. + + See Also + -------- + polyline, chebline + + Examples + -------- + >>> import numpy.polynomial.legendre as L + >>> L.legline(3,2) + array([3, 2]) + >>> L.legval(-3, L.legline(3,2)) # should be -3 + -3.0 + + """ + if scl != 0: + return np.array([off, scl]) + else: + return np.array([off]) + + +def legfromroots(roots): + """ + Generate a Legendre series with given roots. + + The function returns the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + in Legendre form, where the `r_n` are the roots specified in `roots`. + If a zero has multiplicity n, then it must appear in `roots` n times. + For instance, if 2 is a root of multiplicity three and 3 is a root of + multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The + roots can appear in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x) + + The coefficient of the last term is not generally 1 for monic + polynomials in Legendre form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of coefficients. If all roots are real then `out` is a + real array, if some of the roots are complex, then `out` is complex + even if all the coefficients in the result are real (see Examples + below). + + See Also + -------- + polyfromroots, chebfromroots, lagfromroots, hermfromroots, + hermefromroots. + + Examples + -------- + >>> import numpy.polynomial.legendre as L + >>> L.legfromroots((-1,0,1)) # x^3 - x relative to the standard basis + array([ 0. , -0.4, 0. , 0.4]) + >>> j = complex(0,1) + >>> L.legfromroots((-j,j)) # x^2 + 1 relative to the standard basis + array([ 1.33333333+0.j, 0.00000000+0.j, 0.66666667+0.j]) + + """ + if len(roots) == 0: + return np.ones(1) + else: + [roots] = pu.as_series([roots], trim=False) + roots.sort() + p = [legline(-r, 1) for r in roots] + n = len(p) + while n > 1: + m, r = divmod(n, 2) + tmp = [legmul(p[i], p[i+m]) for i in range(m)] + if r: + tmp[0] = legmul(tmp[0], p[-1]) + p = tmp + n = m + return p[0] + + +def legadd(c1, c2): + """ + Add one Legendre series to another. + + Returns the sum of two Legendre series `c1` + `c2`. The arguments + are sequences of coefficients ordered from lowest order term to + highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Legendre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the Legendre series of their sum. + + See Also + -------- + legsub, legmulx, legmul, legdiv, legpow + + Notes + ----- + Unlike multiplication, division, etc., the sum of two Legendre series + is a Legendre series (without having to "reproject" the result onto + the basis set) so addition, just like that of "standard" polynomials, + is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> L.legadd(c1,c2) + array([ 4., 4., 4.]) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if len(c1) > len(c2): + c1[:c2.size] += c2 + ret = c1 + else: + c2[:c1.size] += c1 + ret = c2 + return pu.trimseq(ret) + + +def legsub(c1, c2): + """ + Subtract one Legendre series from another. + + Returns the difference of two Legendre series `c1` - `c2`. The + sequences of coefficients are from lowest order term to highest, i.e., + [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Legendre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Legendre series coefficients representing their difference. + + See Also + -------- + legadd, legmulx, legmul, legdiv, legpow + + Notes + ----- + Unlike multiplication, division, etc., the difference of two Legendre + series is a Legendre series (without having to "reproject" the result + onto the basis set) so subtraction, just like that of "standard" + polynomials, is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> L.legsub(c1,c2) + array([-2., 0., 2.]) + >>> L.legsub(c2,c1) # -C.legsub(c1,c2) + array([ 2., 0., -2.]) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if len(c1) > len(c2): + c1[:c2.size] -= c2 + ret = c1 + else: + c2 = -c2 + c2[:c1.size] += c1 + ret = c2 + return pu.trimseq(ret) + + +def legmulx(c): + """Multiply a Legendre series by x. + + Multiply the Legendre series `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of Legendre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + See Also + -------- + legadd, legmul, legmul, legdiv, legpow + + Notes + ----- + The multiplication uses the recursion relationship for Legendre + polynomials in the form + + .. math:: + + xP_i(x) = ((i + 1)*P_{i + 1}(x) + i*P_{i - 1}(x))/(2i + 1) + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> L.legmulx([1,2,3]) + array([ 0.66666667, 2.2, 1.33333333, 1.8]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0]*0 + prd[1] = c[0] + for i in range(1, len(c)): + j = i + 1 + k = i - 1 + s = i + j + prd[j] = (c[i]*j)/s + prd[k] += (c[i]*i)/s + return prd + + +def legmul(c1, c2): + """ + Multiply one Legendre series by another. + + Returns the product of two Legendre series `c1` * `c2`. The arguments + are sequences of coefficients, from lowest order "term" to highest, + e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Legendre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Legendre series coefficients representing their product. + + See Also + -------- + legadd, legsub, legmulx, legdiv, legpow + + Notes + ----- + In general, the (polynomial) product of two C-series results in terms + that are not in the Legendre polynomial basis set. Thus, to express + the product as a Legendre series, it is necessary to "reproject" the + product onto said basis set, which may produce "unintuitive" (but + correct) results; see Examples section below. + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c1 = (1,2,3) + >>> c2 = (3,2) + >>> P.legmul(c1,c2) # multiplication requires "reprojection" + array([ 4.33333333, 10.4 , 11.66666667, 3.6 ]) + + """ + # s1, s2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + + if len(c1) > len(c2): + c = c2 + xs = c1 + else: + c = c1 + xs = c2 + + if len(c) == 1: + c0 = c[0]*xs + c1 = 0 + elif len(c) == 2: + c0 = c[0]*xs + c1 = c[1]*xs + else: + nd = len(c) + c0 = c[-2]*xs + c1 = c[-1]*xs + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = legsub(c[-i]*xs, (c1*(nd - 1))/nd) + c1 = legadd(tmp, (legmulx(c1)*(2*nd - 1))/nd) + return legadd(c0, legmulx(c1)) + + +def legdiv(c1, c2): + """ + Divide one Legendre series by another. + + Returns the quotient-with-remainder of two Legendre series + `c1` / `c2`. The arguments are sequences of coefficients from lowest + order "term" to highest, e.g., [1,2,3] represents the series + ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Legendre series coefficients ordered from low to + high. + + Returns + ------- + quo, rem : ndarrays + Of Legendre series coefficients representing the quotient and + remainder. + + See Also + -------- + legadd, legsub, legmulx, legmul, legpow + + Notes + ----- + In general, the (polynomial) division of one Legendre series by another + results in quotient and remainder terms that are not in the Legendre + polynomial basis set. Thus, to express these results as a Legendre + series, it is necessary to "reproject" the results onto the Legendre + basis set, which may produce "unintuitive" (but correct) results; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> L.legdiv(c1,c2) # quotient "intuitive," remainder not + (array([ 3.]), array([-8., -4.])) + >>> c2 = (0,1,2,3) + >>> L.legdiv(c2,c1) # neither "intuitive" + (array([-0.07407407, 1.66666667]), array([-1.03703704, -2.51851852])) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if c2[-1] == 0: + raise ZeroDivisionError() + + lc1 = len(c1) + lc2 = len(c2) + if lc1 < lc2: + return c1[:1]*0, c1 + elif lc2 == 1: + return c1/c2[-1], c1[:1]*0 + else: + quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype) + rem = c1 + for i in range(lc1 - lc2, - 1, -1): + p = legmul([0]*i + [1], c2) + q = rem[-1]/p[-1] + rem = rem[:-1] - q*p[:-1] + quo[i] = q + return quo, pu.trimseq(rem) + + +def legpow(c, pow, maxpower=16): + """Raise a Legendre series to a power. + + Returns the Legendre series `c` raised to the power `pow`. The + argument `c` is a sequence of coefficients ordered from low to high. + i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` + + Parameters + ---------- + c : array_like + 1-D array of Legendre series coefficients ordered from low to + high. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Legendre series of power. + + See Also + -------- + legadd, legsub, legmulx, legmul, legdiv + + Examples + -------- + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + power = int(pow) + if power != pow or power < 0: + raise ValueError("Power must be a non-negative integer.") + elif maxpower is not None and power > maxpower: + raise ValueError("Power is too large") + elif power == 0: + return np.array([1], dtype=c.dtype) + elif power == 1: + return c + else: + # This can be made more efficient by using powers of two + # in the usual way. + prd = c + for i in range(2, power + 1): + prd = legmul(prd, c) + return prd + + +def legder(c, m=1, scl=1, axis=0): + """ + Differentiate a Legendre series. + + Returns the Legendre series coefficients `c` differentiated `m` times + along `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The argument + `c` is an array of coefficients from low to high degree along each + axis, e.g., [1,2,3] represents the series ``1*L_0 + 2*L_1 + 3*L_2`` + while [[1,2],[1,2]] represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + + 2*L_0(x)*L_1(y) + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is + ``y``. + + Parameters + ---------- + c : array_like + Array of Legendre series coefficients. If c is multidimensional the + different axis correspond to different variables with the degree in + each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change of + variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + der : ndarray + Legendre series of the derivative. + + See Also + -------- + legint + + Notes + ----- + In general, the result of differentiating a Legendre series does not + resemble the same operation on a power series. Thus the result of this + function may be "unintuitive," albeit correct; see Examples section + below. + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c = (1,2,3,4) + >>> L.legder(c) + array([ 6., 9., 20.]) + >>> L.legder(c, 3) + array([ 60.]) + >>> L.legder(c, scl=-1) + array([ -6., -9., -20.]) + >>> L.legder(c, 2,-1) + array([ 9., 60.]) + + """ + c = np.array(c, ndmin=1, copy=1) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + cnt, iaxis = [int(t) for t in [m, axis]] + + if cnt != m: + raise ValueError("The order of derivation must be integer") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + if iaxis != axis: + raise ValueError("The axis must be integer") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + n = len(c) + if cnt >= n: + c = c[:1]*0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=c.dtype) + for j in range(n, 2, -1): + der[j - 1] = (2*j - 1)*c[j] + c[j - 2] += c[j] + if n > 1: + der[1] = 3*c[2] + der[0] = c[1] + c = der + c = np.moveaxis(c, 0, iaxis) + return c + + +def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a Legendre series. + + Returns the Legendre series coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients from low to high degree along each axis, e.g., [1,2,3] + represents the series ``L_0 + 2*L_1 + 3*L_2`` while [[1,2],[1,2]] + represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + 2*L_0(x)*L_1(y) + + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. + + Parameters + ---------- + c : array_like + Array of Legendre series coefficients. If c is multidimensional the + different axis correspond to different variables with the degree in + each axis given by the corresponding index. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at + ``lbnd`` is the first value in the list, the value of the second + integral at ``lbnd`` is the second value, etc. If ``k == []`` (the + default), all constants are set to zero. If ``m == 1``, a single + scalar can be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + S : ndarray + Legendre series coefficient array of the integral. + + Raises + ------ + ValueError + If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. + + See Also + -------- + legder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. + Why is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + :math:`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a` - perhaps not what one would have first thought. + + Also note that, in general, the result of integrating a C-series needs + to be "reprojected" onto the C-series basis set. Thus, typically, + the result of this function is "unintuitive," albeit correct; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c = (1,2,3) + >>> L.legint(c) + array([ 0.33333333, 0.4 , 0.66666667, 0.6 ]) + >>> L.legint(c, 3) + array([ 1.66666667e-02, -1.78571429e-02, 4.76190476e-02, + -1.73472348e-18, 1.90476190e-02, 9.52380952e-03]) + >>> L.legint(c, k=3) + array([ 3.33333333, 0.4 , 0.66666667, 0.6 ]) + >>> L.legint(c, lbnd=-2) + array([ 7.33333333, 0.4 , 0.66666667, 0.6 ]) + >>> L.legint(c, scl=2) + array([ 0.66666667, 0.8 , 1.33333333, 1.2 ]) + + """ + c = np.array(c, ndmin=1, copy=1) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if not np.iterable(k): + k = [k] + cnt, iaxis = [int(t) for t in [m, axis]] + + if cnt != m: + raise ValueError("The order of integration must be integer") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") + if iaxis != axis: + raise ValueError("The axis must be integer") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + k = list(k) + [0]*(cnt - len(k)) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) + tmp[0] = c[0]*0 + tmp[1] = c[0] + if n > 1: + tmp[2] = c[1]/3 + for j in range(2, n): + t = c[j]/(2*j + 1) + tmp[j + 1] = t + tmp[j - 1] -= t + tmp[0] += k[i] - legval(lbnd, tmp) + c = tmp + c = np.moveaxis(c, 0, iaxis) + return c + + +def legval(x, c, tensor=True): + """ + Evaluate a Legendre series at points x. + + If `c` is of length `n + 1`, this function returns the value: + + .. math:: p(x) = c_0 * L_0(x) + c_1 * L_1(x) + ... + c_n * L_n(x) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + with themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + .. versionadded:: 1.7.0 + + Returns + ------- + values : ndarray, algebra_like + The shape of the return value is described above. + + See Also + -------- + legval2d, leggrid2d, legval3d, leggrid3d + + Notes + ----- + The evaluation uses Clenshaw recursion, aka synthetic division. + + Examples + -------- + + """ + c = np.array(c, ndmin=1, copy=0) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,)*x.ndim) + + if len(c) == 1: + c0 = c[0] + c1 = 0 + elif len(c) == 2: + c0 = c[0] + c1 = c[1] + else: + nd = len(c) + c0 = c[-2] + c1 = c[-1] + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = c[-i] - (c1*(nd - 1))/nd + c1 = tmp + (c1*x*(2*nd - 1))/nd + return c0 + c1*x + + +def legval2d(x, y, c): + """ + Evaluate a 2-D Legendre series at points (x, y). + + This function returns the values: + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * L_i(x) * L_j(y) + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` is a 1-D array a one is implicitly appended to its shape to make + it 2-D. The shape of the result will be c.shape[2:] + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points `(x, y)`, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than two the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional Legendre series at points formed + from pairs of corresponding values from `x` and `y`. + + See Also + -------- + legval, leggrid2d, legval3d, leggrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + try: + x, y = np.array((x, y), copy=0) + except Exception: + raise ValueError('x, y are incompatible') + + c = legval(x, c) + c = legval(y, c, tensor=False) + return c + + +def leggrid2d(x, y, c): + """ + Evaluate a 2-D Legendre series on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * L_i(a) * L_j(b) + + where the points `(a, b)` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape + y.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j is contained in `c[i,j]`. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional Chebyshev series at points in the + Cartesian product of `x` and `y`. + + See Also + -------- + legval, legval2d, legval3d, leggrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + c = legval(x, c) + c = legval(y, c) + return c + + +def legval3d(x, y, z, c): + """ + Evaluate a 3-D Legendre series at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * L_i(x) * L_j(y) * L_k(z) + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + legval, legval2d, leggrid2d, leggrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + try: + x, y, z = np.array((x, y, z), copy=0) + except Exception: + raise ValueError('x, y, z are incompatible') + + c = legval(x, c) + c = legval(y, c, tensor=False) + c = legval(z, c, tensor=False) + return c + + +def leggrid3d(x, y, z, c): + """ + Evaluate a 3-D Legendre series on the Cartesian product of x, y, and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c) + + where the points `(a, b, c)` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + legval, legval2d, leggrid2d, legval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + c = legval(x, c) + c = legval(y, c) + c = legval(z, c) + return c + + +def legvander(x, deg): + """Pseudo-Vandermonde matrix of given degree. + + Returns the pseudo-Vandermonde matrix of degree `deg` and sample points + `x`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., i] = L_i(x) + + where `0 <= i <= deg`. The leading indices of `V` index the elements of + `x` and the last index is the degree of the Legendre polynomial. + + If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the + array ``V = legvander(x, n)``, then ``np.dot(V, c)`` and + ``legval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of Legendre series of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray + The pseudo-Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where The last index is the degree of the + corresponding Legendre polynomial. The dtype will be the same as + the converted `x`. + + """ + ideg = int(deg) + if ideg != deg: + raise ValueError("deg must be integer") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=0, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + # Use forward recursion to generate the entries. This is not as accurate + # as reverse recursion in this application but it is more efficient. + v[0] = x*0 + 1 + if ideg > 0: + v[1] = x + for i in range(2, ideg + 1): + v[i] = (v[i-1]*x*(2*i - 1) - v[i-2]*(i - 1))/i + return np.moveaxis(v, 0, -1) + + +def legvander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y)`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (deg[1] + 1)*i + j] = L_i(x) * L_j(y), + + where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of + `V` index the points `(x, y)` and the last index encodes the degrees of + the Legendre polynomials. + + If ``V = legvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``legval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D Legendre + series of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + legvander, legvander3d. legval2d, legval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + ideg = [int(d) for d in deg] + is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] + if is_valid != [1, 1]: + raise ValueError("degrees must be non-negative integers") + degx, degy = ideg + x, y = np.array((x, y), copy=0) + 0.0 + + vx = legvander(x, degx) + vy = legvander(y, degy) + v = vx[..., None]*vy[..., None,:] + return v.reshape(v.shape[:-2] + (-1,)) + + +def legvander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`, + then The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = L_i(x)*L_j(y)*L_k(z), + + where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading + indices of `V` index the points `(x, y, z)` and the last index encodes + the degrees of the Legendre polynomials. + + If ``V = legvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``legval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D Legendre + series of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + legvander, legvander3d. legval2d, legval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + ideg = [int(d) for d in deg] + is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] + if is_valid != [1, 1, 1]: + raise ValueError("degrees must be non-negative integers") + degx, degy, degz = ideg + x, y, z = np.array((x, y, z), copy=0) + 0.0 + + vx = legvander(x, degx) + vy = legvander(y, degy) + vz = legvander(z, degz) + v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:] + return v.reshape(v.shape[:-3] + (-1,)) + + +def legfit(x, y, deg, rcond=None, full=False, w=None): + """ + Least squares fit of Legendre series to data. + + Return the coefficients of a Legendre series of degree `deg` that is the + least squares fit to the data values `y` given at points `x`. If `y` is + 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple + fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x), + + where `n` is `deg`. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the contribution of each point + ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the + weights are chosen so that the errors of the products ``w[i]*y[i]`` + all have the same variance. The default value is None. + + .. versionadded:: 1.5.0 + + Returns + ------- + coef : ndarray, shape (M,) or (M, K) + Legendre coefficients ordered from low to high. If `y` was + 2-D, the coefficients for the data in column k of `y` are in + column `k`. If `deg` is specified as a list, coefficients for + terms not included in the fit are set equal to zero in the + returned `coef`. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if `full` = True + + resid -- sum of squared residuals of the least squares fit + rank -- the numerical rank of the scaled Vandermonde matrix + sv -- singular values of the scaled Vandermonde matrix + rcond -- value of `rcond`. + + For more details, see `linalg.lstsq`. + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if `full` = False. The + warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', RankWarning) + + See Also + -------- + chebfit, polyfit, lagfit, hermfit, hermefit + legval : Evaluates a Legendre series. + legvander : Vandermonde matrix of Legendre series. + legweight : Legendre weight function (= 1). + linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the Legendre series `p` that + minimizes the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where :math:`w_j` are the weights. This problem is solved by setting up + as the (typically) overdetermined matrix equation + + .. math:: V(x) * c = w * y, + + where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the + coefficients to be solved for, `w` are the weights, and `y` are the + observed values. This equation is then solved using the singular value + decomposition of `V`. + + If some of the singular values of `V` are so small that they are + neglected, then a `RankWarning` will be issued. This means that the + coefficient values may be poorly determined. Using a lower order fit + will usually get rid of the warning. The `rcond` parameter can also be + set to a value smaller than its default, but the resulting fit may be + spurious and have large contributions from roundoff error. + + Fits using Legendre series are usually better conditioned than fits + using power series, but much can depend on the distribution of the + sample points and the smoothness of the data. If the quality of the fit + is inadequate splines may be a good alternative. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + https://en.wikipedia.org/wiki/Curve_fitting + + Examples + -------- + + """ + x = np.asarray(x) + 0.0 + y = np.asarray(y) + 0.0 + deg = np.asarray(deg) + + # check arguments. + if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0: + raise TypeError("deg must be an int or non-empty 1-D array of int") + if deg.min() < 0: + raise ValueError("expected deg >= 0") + if x.ndim != 1: + raise TypeError("expected 1D vector for x") + if x.size == 0: + raise TypeError("expected non-empty vector for x") + if y.ndim < 1 or y.ndim > 2: + raise TypeError("expected 1D or 2D array for y") + if len(x) != len(y): + raise TypeError("expected x and y to have same length") + + if deg.ndim == 0: + lmax = deg + order = lmax + 1 + van = legvander(x, lmax) + else: + deg = np.sort(deg) + lmax = deg[-1] + order = len(deg) + van = legvander(x, lmax)[:, deg] + + # set up the least squares matrices in transposed form + lhs = van.T + rhs = y.T + if w is not None: + w = np.asarray(w) + 0.0 + if w.ndim != 1: + raise TypeError("expected 1D vector for w") + if len(x) != len(w): + raise TypeError("expected x and w to have same length") + # apply weights. Don't use inplace operations as they + # can cause problems with NA. + lhs = lhs * w + rhs = rhs * w + + # set rcond + if rcond is None: + rcond = len(x)*np.finfo(x.dtype).eps + + # Determine the norms of the design matrix columns. + if issubclass(lhs.dtype.type, np.complexfloating): + scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1)) + else: + scl = np.sqrt(np.square(lhs).sum(1)) + scl[scl == 0] = 1 + + # Solve the least squares problem. + c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond) + c = (c.T/scl).T + + # Expand c to include non-fitted coefficients which are set to zero + if deg.ndim > 0: + if c.ndim == 2: + cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype) + else: + cc = np.zeros(lmax+1, dtype=c.dtype) + cc[deg] = c + c = cc + + # warn on rank reduction + if rank != order and not full: + msg = "The fit may be poorly conditioned" + warnings.warn(msg, pu.RankWarning, stacklevel=2) + + if full: + return c, [resids, rank, s, rcond] + else: + return c + + +def legcompanion(c): + """Return the scaled companion matrix of c. + + The basis polynomials are scaled so that the companion matrix is + symmetric when `c` is an Legendre basis polynomial. This provides + better eigenvalue estimates than the unscaled case and for basis + polynomials the eigenvalues are guaranteed to be real if + `numpy.linalg.eigvalsh` is used to obtain them. + + Parameters + ---------- + c : array_like + 1-D array of Legendre series coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Scaled companion matrix of dimensions (deg, deg). + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[-c[0]/c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + scl = 1./np.sqrt(2*np.arange(n) + 1) + top = mat.reshape(-1)[1::n+1] + bot = mat.reshape(-1)[n::n+1] + top[...] = np.arange(1, n)*scl[:n-1]*scl[1:n] + bot[...] = top + mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*(n/(2*n - 1)) + return mat + + +def legroots(c): + """ + Compute the roots of a Legendre series. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * L_i(x). + + Parameters + ---------- + c : 1-D array_like + 1-D array of coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the series. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + polyroots, chebroots, lagroots, hermroots, hermeroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the series for such values. + Roots with multiplicity greater than 1 will also show larger errors as + the value of the series near such points is relatively insensitive to + errors in the roots. Isolated roots near the origin can be improved by + a few iterations of Newton's method. + + The Legendre series basis polynomials aren't powers of ``x`` so the + results of this function may seem unintuitive. + + Examples + -------- + >>> import numpy.polynomial.legendre as leg + >>> leg.legroots((1, 2, 3, 4)) # 4L_3 + 3L_2 + 2L_1 + 1L_0, all real roots + array([-0.85099543, -0.11407192, 0.51506735]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([-c[0]/c[1]]) + + m = legcompanion(c) + r = la.eigvals(m) + r.sort() + return r + + +def leggauss(deg): + """ + Gauss-Legendre quadrature. + + Computes the sample points and weights for Gauss-Legendre quadrature. + These sample points and weights will correctly integrate polynomials of + degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with + the weight function :math:`f(x) = 1`. + + Parameters + ---------- + deg : int + Number of sample points and weights. It must be >= 1. + + Returns + ------- + x : ndarray + 1-D ndarray containing the sample points. + y : ndarray + 1-D ndarray containing the weights. + + Notes + ----- + + .. versionadded:: 1.7.0 + + The results have only been tested up to degree 100, higher degrees may + be problematic. The weights are determined by using the fact that + + .. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k)) + + where :math:`c` is a constant independent of :math:`k` and :math:`x_k` + is the k'th root of :math:`L_n`, and then scaling the results to get + the right value when integrating 1. + + """ + ideg = int(deg) + if ideg != deg or ideg < 1: + raise ValueError("deg must be a non-negative integer") + + # first approximation of roots. We use the fact that the companion + # matrix is symmetric in this case in order to obtain better zeros. + c = np.array([0]*deg + [1]) + m = legcompanion(c) + x = la.eigvalsh(m) + + # improve roots by one application of Newton + dy = legval(x, c) + df = legval(x, legder(c)) + x -= dy/df + + # compute the weights. We scale the factor to avoid possible numerical + # overflow. + fm = legval(x, c[1:]) + fm /= np.abs(fm).max() + df /= np.abs(df).max() + w = 1/(fm * df) + + # for Legendre we can also symmetrize + w = (w + w[::-1])/2 + x = (x - x[::-1])/2 + + # scale w to get the right value + w *= 2. / w.sum() + + return x, w + + +def legweight(x): + """ + Weight function of the Legendre polynomials. + + The weight function is :math:`1` and the interval of integration is + :math:`[-1, 1]`. The Legendre polynomials are orthogonal, but not + normalized, with respect to this weight function. + + Parameters + ---------- + x : array_like + Values at which the weight function will be computed. + + Returns + ------- + w : ndarray + The weight function at `x`. + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + w = x*0.0 + 1.0 + return w + +# +# Legendre series class +# + +class Legendre(ABCPolyBase): + """A Legendre series class. + + The Legendre class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed in the `ABCPolyBase` documentation. + + Parameters + ---------- + coef : array_like + Legendre coefficients in order of increasing degree, i.e., + ``(1, 2, 3)`` gives ``1*P_0(x) + 2*P_1(x) + 3*P_2(x)``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [-1, 1]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [-1, 1]. + + .. versionadded:: 1.6.0 + + """ + # Virtual Functions + _add = staticmethod(legadd) + _sub = staticmethod(legsub) + _mul = staticmethod(legmul) + _div = staticmethod(legdiv) + _pow = staticmethod(legpow) + _val = staticmethod(legval) + _int = staticmethod(legint) + _der = staticmethod(legder) + _fit = staticmethod(legfit) + _line = staticmethod(legline) + _roots = staticmethod(legroots) + _fromroots = staticmethod(legfromroots) + + # Virtual properties + nickname = 'leg' + domain = np.array(legdomain) + window = np.array(legdomain) + basis_name = 'P' diff --git a/project/venv/lib/python2.7/site-packages/numpy/polynomial/legendre.pyc b/project/venv/lib/python2.7/site-packages/numpy/polynomial/legendre.pyc new file mode 100644 index 0000000..fa92be9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/polynomial/legendre.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/polynomial/polynomial.py b/project/venv/lib/python2.7/site-packages/numpy/polynomial/polynomial.py new file mode 100644 index 0000000..259cd31 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/polynomial/polynomial.py @@ -0,0 +1,1665 @@ +""" +Objects for dealing with polynomials. + +This module provides a number of objects (mostly functions) useful for +dealing with polynomials, including a `Polynomial` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with polynomial objects is in +the docstring for its "parent" sub-package, `numpy.polynomial`). + +Constants +--------- +- `polydomain` -- Polynomial default domain, [-1,1]. +- `polyzero` -- (Coefficients of the) "zero polynomial." +- `polyone` -- (Coefficients of the) constant polynomial 1. +- `polyx` -- (Coefficients of the) identity map polynomial, ``f(x) = x``. + +Arithmetic +---------- +- `polyadd` -- add two polynomials. +- `polysub` -- subtract one polynomial from another. +- `polymulx` -- multiply a polynomial in ``P_i(x)`` by ``x``. +- `polymul` -- multiply two polynomials. +- `polydiv` -- divide one polynomial by another. +- `polypow` -- raise a polynomial to a positive integer power. +- `polyval` -- evaluate a polynomial at given points. +- `polyval2d` -- evaluate a 2D polynomial at given points. +- `polyval3d` -- evaluate a 3D polynomial at given points. +- `polygrid2d` -- evaluate a 2D polynomial on a Cartesian product. +- `polygrid3d` -- evaluate a 3D polynomial on a Cartesian product. + +Calculus +-------- +- `polyder` -- differentiate a polynomial. +- `polyint` -- integrate a polynomial. + +Misc Functions +-------------- +- `polyfromroots` -- create a polynomial with specified roots. +- `polyroots` -- find the roots of a polynomial. +- `polyvalfromroots` -- evaluate a polynomial at given points from roots. +- `polyvander` -- Vandermonde-like matrix for powers. +- `polyvander2d` -- Vandermonde-like matrix for 2D power series. +- `polyvander3d` -- Vandermonde-like matrix for 3D power series. +- `polycompanion` -- companion matrix in power series form. +- `polyfit` -- least-squares fit returning a polynomial. +- `polytrim` -- trim leading coefficients from a polynomial. +- `polyline` -- polynomial representing given straight line. + +Classes +------- +- `Polynomial` -- polynomial class. + +See Also +-------- +`numpy.polynomial` + +""" +from __future__ import division, absolute_import, print_function + +__all__ = [ + 'polyzero', 'polyone', 'polyx', 'polydomain', 'polyline', 'polyadd', + 'polysub', 'polymulx', 'polymul', 'polydiv', 'polypow', 'polyval', + 'polyvalfromroots', 'polyder', 'polyint', 'polyfromroots', 'polyvander', + 'polyfit', 'polytrim', 'polyroots', 'Polynomial', 'polyval2d', 'polyval3d', + 'polygrid2d', 'polygrid3d', 'polyvander2d', 'polyvander3d'] + +import warnings +import numpy as np +import numpy.linalg as la +from numpy.core.multiarray import normalize_axis_index + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +polytrim = pu.trimcoef + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Polynomial default domain. +polydomain = np.array([-1, 1]) + +# Polynomial coefficients representing zero. +polyzero = np.array([0]) + +# Polynomial coefficients representing one. +polyone = np.array([1]) + +# Polynomial coefficients representing the identity x. +polyx = np.array([0, 1]) + +# +# Polynomial series functions +# + + +def polyline(off, scl): + """ + Returns an array representing a linear polynomial. + + Parameters + ---------- + off, scl : scalars + The "y-intercept" and "slope" of the line, respectively. + + Returns + ------- + y : ndarray + This module's representation of the linear polynomial ``off + + scl*x``. + + See Also + -------- + chebline + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> P.polyline(1,-1) + array([ 1, -1]) + >>> P.polyval(1, P.polyline(1,-1)) # should be 0 + 0.0 + + """ + if scl != 0: + return np.array([off, scl]) + else: + return np.array([off]) + + +def polyfromroots(roots): + """ + Generate a monic polynomial with given roots. + + Return the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + where the `r_n` are the roots specified in `roots`. If a zero has + multiplicity n, then it must appear in `roots` n times. For instance, + if 2 is a root of multiplicity three and 3 is a root of multiplicity 2, + then `roots` looks something like [2, 2, 2, 3, 3]. The roots can appear + in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * x + ... + x^n + + The coefficient of the last term is 1 for monic polynomials in this + form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of the polynomial's coefficients If all the roots are + real, then `out` is also real, otherwise it is complex. (see + Examples below). + + See Also + -------- + chebfromroots, legfromroots, lagfromroots, hermfromroots + hermefromroots + + Notes + ----- + The coefficients are determined by multiplying together linear factors + of the form `(x - r_i)`, i.e. + + .. math:: p(x) = (x - r_0) (x - r_1) ... (x - r_n) + + where ``n == len(roots) - 1``; note that this implies that `1` is always + returned for :math:`a_n`. + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> P.polyfromroots((-1,0,1)) # x(x - 1)(x + 1) = x^3 - x + array([ 0., -1., 0., 1.]) + >>> j = complex(0,1) + >>> P.polyfromroots((-j,j)) # complex returned, though values are real + array([ 1.+0.j, 0.+0.j, 1.+0.j]) + + """ + if len(roots) == 0: + return np.ones(1) + else: + [roots] = pu.as_series([roots], trim=False) + roots.sort() + p = [polyline(-r, 1) for r in roots] + n = len(p) + while n > 1: + m, r = divmod(n, 2) + tmp = [polymul(p[i], p[i+m]) for i in range(m)] + if r: + tmp[0] = polymul(tmp[0], p[-1]) + p = tmp + n = m + return p[0] + + +def polyadd(c1, c2): + """ + Add one polynomial to another. + + Returns the sum of two polynomials `c1` + `c2`. The arguments are + sequences of coefficients from lowest order term to highest, i.e., + [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of polynomial coefficients ordered from low to high. + + Returns + ------- + out : ndarray + The coefficient array representing their sum. + + See Also + -------- + polysub, polymulx, polymul, polydiv, polypow + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> sum = P.polyadd(c1,c2); sum + array([ 4., 4., 4.]) + >>> P.polyval(2, sum) # 4 + 4(2) + 4(2**2) + 28.0 + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if len(c1) > len(c2): + c1[:c2.size] += c2 + ret = c1 + else: + c2[:c1.size] += c1 + ret = c2 + return pu.trimseq(ret) + + +def polysub(c1, c2): + """ + Subtract one polynomial from another. + + Returns the difference of two polynomials `c1` - `c2`. The arguments + are sequences of coefficients from lowest order term to highest, i.e., + [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of polynomial coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of coefficients representing their difference. + + See Also + -------- + polyadd, polymulx, polymul, polydiv, polypow + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> P.polysub(c1,c2) + array([-2., 0., 2.]) + >>> P.polysub(c2,c1) # -P.polysub(c1,c2) + array([ 2., 0., -2.]) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if len(c1) > len(c2): + c1[:c2.size] -= c2 + ret = c1 + else: + c2 = -c2 + c2[:c1.size] += c1 + ret = c2 + return pu.trimseq(ret) + + +def polymulx(c): + """Multiply a polynomial by x. + + Multiply the polynomial `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of polynomial coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + See Also + -------- + polyadd, polysub, polymul, polydiv, polypow + + Notes + ----- + + .. versionadded:: 1.5.0 + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0]*0 + prd[1:] = c + return prd + + +def polymul(c1, c2): + """ + Multiply one polynomial by another. + + Returns the product of two polynomials `c1` * `c2`. The arguments are + sequences of coefficients, from lowest order term to highest, e.g., + [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2.`` + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of coefficients representing a polynomial, relative to the + "standard" basis, and ordered from lowest order term to highest. + + Returns + ------- + out : ndarray + Of the coefficients of their product. + + See Also + -------- + polyadd, polysub, polymulx, polydiv, polypow + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> P.polymul(c1,c2) + array([ 3., 8., 14., 8., 3.]) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + ret = np.convolve(c1, c2) + return pu.trimseq(ret) + + +def polydiv(c1, c2): + """ + Divide one polynomial by another. + + Returns the quotient-with-remainder of two polynomials `c1` / `c2`. + The arguments are sequences of coefficients, from lowest order term + to highest, e.g., [1,2,3] represents ``1 + 2*x + 3*x**2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of polynomial coefficients ordered from low to high. + + Returns + ------- + [quo, rem] : ndarrays + Of coefficient series representing the quotient and remainder. + + See Also + -------- + polyadd, polysub, polymulx, polymul, polypow + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> P.polydiv(c1,c2) + (array([ 3.]), array([-8., -4.])) + >>> P.polydiv(c2,c1) + (array([ 0.33333333]), array([ 2.66666667, 1.33333333])) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if c2[-1] == 0: + raise ZeroDivisionError() + + len1 = len(c1) + len2 = len(c2) + if len2 == 1: + return c1/c2[-1], c1[:1]*0 + elif len1 < len2: + return c1[:1]*0, c1 + else: + dlen = len1 - len2 + scl = c2[-1] + c2 = c2[:-1]/scl + i = dlen + j = len1 - 1 + while i >= 0: + c1[i:j] -= c2*c1[j] + i -= 1 + j -= 1 + return c1[j+1:]/scl, pu.trimseq(c1[:j+1]) + + +def polypow(c, pow, maxpower=None): + """Raise a polynomial to a power. + + Returns the polynomial `c` raised to the power `pow`. The argument + `c` is a sequence of coefficients ordered from low to high. i.e., + [1,2,3] is the series ``1 + 2*x + 3*x**2.`` + + Parameters + ---------- + c : array_like + 1-D array of array of series coefficients ordered from low to + high degree. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Power series of power. + + See Also + -------- + polyadd, polysub, polymulx, polymul, polydiv + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> P.polypow([1,2,3], 2) + array([ 1., 4., 10., 12., 9.]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + power = int(pow) + if power != pow or power < 0: + raise ValueError("Power must be a non-negative integer.") + elif maxpower is not None and power > maxpower: + raise ValueError("Power is too large") + elif power == 0: + return np.array([1], dtype=c.dtype) + elif power == 1: + return c + else: + # This can be made more efficient by using powers of two + # in the usual way. + prd = c + for i in range(2, power + 1): + prd = np.convolve(prd, c) + return prd + + +def polyder(c, m=1, scl=1, axis=0): + """ + Differentiate a polynomial. + + Returns the polynomial coefficients `c` differentiated `m` times along + `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The + argument `c` is an array of coefficients from low to high degree along + each axis, e.g., [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2`` + while [[1,2],[1,2]] represents ``1 + 1*x + 2*y + 2*x*y`` if axis=0 is + ``x`` and axis=1 is ``y``. + + Parameters + ---------- + c : array_like + Array of polynomial coefficients. If c is multidimensional the + different axis correspond to different variables with the degree + in each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change + of variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + der : ndarray + Polynomial coefficients of the derivative. + + See Also + -------- + polyint + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c = (1,2,3,4) # 1 + 2x + 3x**2 + 4x**3 + >>> P.polyder(c) # (d/dx)(c) = 2 + 6x + 12x**2 + array([ 2., 6., 12.]) + >>> P.polyder(c,3) # (d**3/dx**3)(c) = 24 + array([ 24.]) + >>> P.polyder(c,scl=-1) # (d/d(-x))(c) = -2 - 6x - 12x**2 + array([ -2., -6., -12.]) + >>> P.polyder(c,2,-1) # (d**2/d(-x)**2)(c) = 6 + 24x + array([ 6., 24.]) + + """ + c = np.array(c, ndmin=1, copy=1) + if c.dtype.char in '?bBhHiIlLqQpP': + # astype fails with NA + c = c + 0.0 + cdt = c.dtype + cnt, iaxis = [int(t) for t in [m, axis]] + + if cnt != m: + raise ValueError("The order of derivation must be integer") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + if iaxis != axis: + raise ValueError("The axis must be integer") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + n = len(c) + if cnt >= n: + c = c[:1]*0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=cdt) + for j in range(n, 0, -1): + der[j - 1] = j*c[j] + c = der + c = np.moveaxis(c, 0, iaxis) + return c + + +def polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a polynomial. + + Returns the polynomial coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients, from low to high degree along each axis, e.g., [1,2,3] + represents the polynomial ``1 + 2*x + 3*x**2`` while [[1,2],[1,2]] + represents ``1 + 1*x + 2*y + 2*x*y`` if axis=0 is ``x`` and axis=1 is + ``y``. + + Parameters + ---------- + c : array_like + 1-D array of polynomial coefficients, ordered from low to high. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at zero + is the first value in the list, the value of the second integral + at zero is the second value, etc. If ``k == []`` (the default), + all constants are set to zero. If ``m == 1``, a single scalar can + be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + S : ndarray + Coefficient array of the integral. + + Raises + ------ + ValueError + If ``m < 1``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. + + See Also + -------- + polyder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. Why + is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + :math:`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a` - perhaps not what one would have first thought. + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c = (1,2,3) + >>> P.polyint(c) # should return array([0, 1, 1, 1]) + array([ 0., 1., 1., 1.]) + >>> P.polyint(c,3) # should return array([0, 0, 0, 1/6, 1/12, 1/20]) + array([ 0. , 0. , 0. , 0.16666667, 0.08333333, + 0.05 ]) + >>> P.polyint(c,k=3) # should return array([3, 1, 1, 1]) + array([ 3., 1., 1., 1.]) + >>> P.polyint(c,lbnd=-2) # should return array([6, 1, 1, 1]) + array([ 6., 1., 1., 1.]) + >>> P.polyint(c,scl=-2) # should return array([0, -2, -2, -2]) + array([ 0., -2., -2., -2.]) + + """ + c = np.array(c, ndmin=1, copy=1) + if c.dtype.char in '?bBhHiIlLqQpP': + # astype doesn't preserve mask attribute. + c = c + 0.0 + cdt = c.dtype + if not np.iterable(k): + k = [k] + cnt, iaxis = [int(t) for t in [m, axis]] + + if cnt != m: + raise ValueError("The order of integration must be integer") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") + if iaxis != axis: + raise ValueError("The axis must be integer") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + k = list(k) + [0]*(cnt - len(k)) + c = np.moveaxis(c, iaxis, 0) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=cdt) + tmp[0] = c[0]*0 + tmp[1] = c[0] + for j in range(1, n): + tmp[j + 1] = c[j]/(j + 1) + tmp[0] += k[i] - polyval(lbnd, tmp) + c = tmp + c = np.moveaxis(c, 0, iaxis) + return c + + +def polyval(x, c, tensor=True): + """ + Evaluate a polynomial at points x. + + If `c` is of length `n + 1`, this function returns the value + + .. math:: p(x) = c_0 + c_1 * x + ... + c_n * x^n + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + with themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + .. versionadded:: 1.7.0 + + Returns + ------- + values : ndarray, compatible object + The shape of the returned array is described above. + + See Also + -------- + polyval2d, polygrid2d, polyval3d, polygrid3d + + Notes + ----- + The evaluation uses Horner's method. + + Examples + -------- + >>> from numpy.polynomial.polynomial import polyval + >>> polyval(1, [1,2,3]) + 6.0 + >>> a = np.arange(4).reshape(2,2) + >>> a + array([[0, 1], + [2, 3]]) + >>> polyval(a, [1,2,3]) + array([[ 1., 6.], + [ 17., 34.]]) + >>> coef = np.arange(4).reshape(2,2) # multidimensional coefficients + >>> coef + array([[0, 1], + [2, 3]]) + >>> polyval([1,2], coef, tensor=True) + array([[ 2., 4.], + [ 4., 7.]]) + >>> polyval([1,2], coef, tensor=False) + array([ 2., 7.]) + + """ + c = np.array(c, ndmin=1, copy=0) + if c.dtype.char in '?bBhHiIlLqQpP': + # astype fails with NA + c = c + 0.0 + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,)*x.ndim) + + c0 = c[-1] + x*0 + for i in range(2, len(c) + 1): + c0 = c[-i] + c0*x + return c0 + + +def polyvalfromroots(x, r, tensor=True): + """ + Evaluate a polynomial specified by its roots at points x. + + If `r` is of length `N`, this function returns the value + + .. math:: p(x) = \\prod_{n=1}^{N} (x - r_n) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `r`. + + If `r` is a 1-D array, then `p(x)` will have the same shape as `x`. If `r` + is multidimensional, then the shape of the result depends on the value of + `tensor`. If `tensor is ``True`` the shape will be r.shape[1:] + x.shape; + that is, each polynomial is evaluated at every value of `x`. If `tensor` is + ``False``, the shape will be r.shape[1:]; that is, each polynomial is + evaluated only for the corresponding broadcast value of `x`. Note that + scalars have shape (,). + + .. versionadded:: 1.12 + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + with themselves and with the elements of `r`. + r : array_like + Array of roots. If `r` is multidimensional the first index is the + root index, while the remaining indices enumerate multiple + polynomials. For instance, in the two dimensional case the roots + of each polynomial may be thought of as stored in the columns of `r`. + tensor : boolean, optional + If True, the shape of the roots array is extended with ones on the + right, one for each dimension of `x`. Scalars have dimension 0 for this + action. The result is that every column of coefficients in `r` is + evaluated for every element of `x`. If False, `x` is broadcast over the + columns of `r` for the evaluation. This keyword is useful when `r` is + multidimensional. The default value is True. + + Returns + ------- + values : ndarray, compatible object + The shape of the returned array is described above. + + See Also + -------- + polyroots, polyfromroots, polyval + + Examples + -------- + >>> from numpy.polynomial.polynomial import polyvalfromroots + >>> polyvalfromroots(1, [1,2,3]) + 0.0 + >>> a = np.arange(4).reshape(2,2) + >>> a + array([[0, 1], + [2, 3]]) + >>> polyvalfromroots(a, [-1, 0, 1]) + array([[ -0., 0.], + [ 6., 24.]]) + >>> r = np.arange(-2, 2).reshape(2,2) # multidimensional coefficients + >>> r # each column of r defines one polynomial + array([[-2, -1], + [ 0, 1]]) + >>> b = [-2, 1] + >>> polyvalfromroots(b, r, tensor=True) + array([[-0., 3.], + [ 3., 0.]]) + >>> polyvalfromroots(b, r, tensor=False) + array([-0., 0.]) + """ + r = np.array(r, ndmin=1, copy=0) + if r.dtype.char in '?bBhHiIlLqQpP': + r = r.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray): + if tensor: + r = r.reshape(r.shape + (1,)*x.ndim) + elif x.ndim >= r.ndim: + raise ValueError("x.ndim must be < r.ndim when tensor == False") + return np.prod(x - r, axis=0) + + +def polyval2d(x, y, c): + """ + Evaluate a 2-D polynomial at points (x, y). + + This function returns the value + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * x^i * y^j + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points `(x, y)`, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in `c[i,j]`. If `c` has + dimension greater than two the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points formed with + pairs of corresponding values from `x` and `y`. + + See Also + -------- + polyval, polygrid2d, polyval3d, polygrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + try: + x, y = np.array((x, y), copy=0) + except Exception: + raise ValueError('x, y are incompatible') + + c = polyval(x, c) + c = polyval(y, c, tensor=False) + return c + + +def polygrid2d(x, y, c): + """ + Evaluate a 2-D polynomial on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * a^i * b^j + + where the points `(a, b)` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape + y.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + polyval, polyval2d, polyval3d, polygrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + c = polyval(x, c) + c = polyval(y, c) + return c + + +def polyval3d(x, y, z, c): + """ + Evaluate a 3-D polynomial at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * x^i * y^j * z^k + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + polyval, polyval2d, polygrid2d, polygrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + try: + x, y, z = np.array((x, y, z), copy=0) + except Exception: + raise ValueError('x, y, z are incompatible') + + c = polyval(x, c) + c = polyval(y, c, tensor=False) + c = polyval(z, c, tensor=False) + return c + + +def polygrid3d(x, y, z, c): + """ + Evaluate a 3-D polynomial on the Cartesian product of x, y and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * a^i * b^j * c^k + + where the points `(a, b, c)` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + polyval, polyval2d, polygrid2d, polyval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + c = polyval(x, c) + c = polyval(y, c) + c = polyval(z, c) + return c + + +def polyvander(x, deg): + """Vandermonde matrix of given degree. + + Returns the Vandermonde matrix of degree `deg` and sample points + `x`. The Vandermonde matrix is defined by + + .. math:: V[..., i] = x^i, + + where `0 <= i <= deg`. The leading indices of `V` index the elements of + `x` and the last index is the power of `x`. + + If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the + matrix ``V = polyvander(x, n)``, then ``np.dot(V, c)`` and + ``polyval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of polynomials of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray. + The Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where the last index is the power of `x`. + The dtype will be the same as the converted `x`. + + See Also + -------- + polyvander2d, polyvander3d + + """ + ideg = int(deg) + if ideg != deg: + raise ValueError("deg must be integer") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=0, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + v[0] = x*0 + 1 + if ideg > 0: + v[1] = x + for i in range(2, ideg + 1): + v[i] = v[i-1]*x + return np.moveaxis(v, 0, -1) + + +def polyvander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y)`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (deg[1] + 1)*i + j] = x^i * y^j, + + where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of + `V` index the points `(x, y)` and the last index encodes the powers of + `x` and `y`. + + If ``V = polyvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``polyval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D polynomials + of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + polyvander, polyvander3d. polyval2d, polyval3d + + """ + ideg = [int(d) for d in deg] + is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] + if is_valid != [1, 1]: + raise ValueError("degrees must be non-negative integers") + degx, degy = ideg + x, y = np.array((x, y), copy=0) + 0.0 + + vx = polyvander(x, degx) + vy = polyvander(y, degy) + v = vx[..., None]*vy[..., None,:] + # einsum bug + #v = np.einsum("...i,...j->...ij", vx, vy) + return v.reshape(v.shape[:-2] + (-1,)) + + +def polyvander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`, + then The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = x^i * y^j * z^k, + + where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading + indices of `V` index the points `(x, y, z)` and the last index encodes + the powers of `x`, `y`, and `z`. + + If ``V = polyvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``polyval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D polynomials + of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + polyvander, polyvander3d. polyval2d, polyval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + ideg = [int(d) for d in deg] + is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] + if is_valid != [1, 1, 1]: + raise ValueError("degrees must be non-negative integers") + degx, degy, degz = ideg + x, y, z = np.array((x, y, z), copy=0) + 0.0 + + vx = polyvander(x, degx) + vy = polyvander(y, degy) + vz = polyvander(z, degz) + v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:] + # einsum bug + #v = np.einsum("...i, ...j, ...k->...ijk", vx, vy, vz) + return v.reshape(v.shape[:-3] + (-1,)) + + +def polyfit(x, y, deg, rcond=None, full=False, w=None): + """ + Least-squares fit of a polynomial to data. + + Return the coefficients of a polynomial of degree `deg` that is the + least squares fit to the data values `y` given at points `x`. If `y` is + 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple + fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * x + ... + c_n * x^n, + + where `n` is `deg`. + + Parameters + ---------- + x : array_like, shape (`M`,) + x-coordinates of the `M` sample (data) points ``(x[i], y[i])``. + y : array_like, shape (`M`,) or (`M`, `K`) + y-coordinates of the sample points. Several sets of sample points + sharing the same x-coordinates can be (independently) fit with one + call to `polyfit` by passing in for `y` a 2-D array that contains + one data set per column. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + rcond : float, optional + Relative condition number of the fit. Singular values smaller + than `rcond`, relative to the largest singular value, will be + ignored. The default value is ``len(x)*eps``, where `eps` is the + relative precision of the platform's float type, about 2e-16 in + most cases. + full : bool, optional + Switch determining the nature of the return value. When ``False`` + (the default) just the coefficients are returned; when ``True``, + diagnostic information from the singular value decomposition (used + to solve the fit's matrix equation) is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the contribution of each point + ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the + weights are chosen so that the errors of the products ``w[i]*y[i]`` + all have the same variance. The default value is None. + + .. versionadded:: 1.5.0 + + Returns + ------- + coef : ndarray, shape (`deg` + 1,) or (`deg` + 1, `K`) + Polynomial coefficients ordered from low to high. If `y` was 2-D, + the coefficients in column `k` of `coef` represent the polynomial + fit to the data in `y`'s `k`-th column. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if `full` = True + + resid -- sum of squared residuals of the least squares fit + rank -- the numerical rank of the scaled Vandermonde matrix + sv -- singular values of the scaled Vandermonde matrix + rcond -- value of `rcond`. + + For more details, see `linalg.lstsq`. + + Raises + ------ + RankWarning + Raised if the matrix in the least-squares fit is rank deficient. + The warning is only raised if `full` == False. The warnings can + be turned off by: + + >>> import warnings + >>> warnings.simplefilter('ignore', RankWarning) + + See Also + -------- + chebfit, legfit, lagfit, hermfit, hermefit + polyval : Evaluates a polynomial. + polyvander : Vandermonde matrix for powers. + linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the polynomial `p` that minimizes + the sum of the weighted squared errors + + .. math :: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where the :math:`w_j` are the weights. This problem is solved by + setting up the (typically) over-determined matrix equation: + + .. math :: V(x) * c = w * y, + + where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the + coefficients to be solved for, `w` are the weights, and `y` are the + observed values. This equation is then solved using the singular value + decomposition of `V`. + + If some of the singular values of `V` are so small that they are + neglected (and `full` == ``False``), a `RankWarning` will be raised. + This means that the coefficient values may be poorly determined. + Fitting to a lower order polynomial will usually get rid of the warning + (but may not be what you want, of course; if you have independent + reason(s) for choosing the degree which isn't working, you may have to: + a) reconsider those reasons, and/or b) reconsider the quality of your + data). The `rcond` parameter can also be set to a value smaller than + its default, but the resulting fit may be spurious and have large + contributions from roundoff error. + + Polynomial fits using double precision tend to "fail" at about + (polynomial) degree 20. Fits using Chebyshev or Legendre series are + generally better conditioned, but much can still depend on the + distribution of the sample points and the smoothness of the data. If + the quality of the fit is inadequate, splines may be a good + alternative. + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> x = np.linspace(-1,1,51) # x "data": [-1, -0.96, ..., 0.96, 1] + >>> y = x**3 - x + np.random.randn(len(x)) # x^3 - x + N(0,1) "noise" + >>> c, stats = P.polyfit(x,y,3,full=True) + >>> c # c[0], c[2] should be approx. 0, c[1] approx. -1, c[3] approx. 1 + array([ 0.01909725, -1.30598256, -0.00577963, 1.02644286]) + >>> stats # note the large SSR, explaining the rather poor results + [array([ 38.06116253]), 4, array([ 1.38446749, 1.32119158, 0.50443316, + 0.28853036]), 1.1324274851176597e-014] + + Same thing without the added noise + + >>> y = x**3 - x + >>> c, stats = P.polyfit(x,y,3,full=True) + >>> c # c[0], c[2] should be "very close to 0", c[1] ~= -1, c[3] ~= 1 + array([ -1.73362882e-17, -1.00000000e+00, -2.67471909e-16, + 1.00000000e+00]) + >>> stats # note the minuscule SSR + [array([ 7.46346754e-31]), 4, array([ 1.38446749, 1.32119158, + 0.50443316, 0.28853036]), 1.1324274851176597e-014] + + """ + x = np.asarray(x) + 0.0 + y = np.asarray(y) + 0.0 + deg = np.asarray(deg) + + # check arguments. + if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0: + raise TypeError("deg must be an int or non-empty 1-D array of int") + if deg.min() < 0: + raise ValueError("expected deg >= 0") + if x.ndim != 1: + raise TypeError("expected 1D vector for x") + if x.size == 0: + raise TypeError("expected non-empty vector for x") + if y.ndim < 1 or y.ndim > 2: + raise TypeError("expected 1D or 2D array for y") + if len(x) != len(y): + raise TypeError("expected x and y to have same length") + + if deg.ndim == 0: + lmax = deg + order = lmax + 1 + van = polyvander(x, lmax) + else: + deg = np.sort(deg) + lmax = deg[-1] + order = len(deg) + van = polyvander(x, lmax)[:, deg] + + # set up the least squares matrices in transposed form + lhs = van.T + rhs = y.T + if w is not None: + w = np.asarray(w) + 0.0 + if w.ndim != 1: + raise TypeError("expected 1D vector for w") + if len(x) != len(w): + raise TypeError("expected x and w to have same length") + # apply weights. Don't use inplace operations as they + # can cause problems with NA. + lhs = lhs * w + rhs = rhs * w + + # set rcond + if rcond is None: + rcond = len(x)*np.finfo(x.dtype).eps + + # Determine the norms of the design matrix columns. + if issubclass(lhs.dtype.type, np.complexfloating): + scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1)) + else: + scl = np.sqrt(np.square(lhs).sum(1)) + scl[scl == 0] = 1 + + # Solve the least squares problem. + c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond) + c = (c.T/scl).T + + # Expand c to include non-fitted coefficients which are set to zero + if deg.ndim == 1: + if c.ndim == 2: + cc = np.zeros((lmax + 1, c.shape[1]), dtype=c.dtype) + else: + cc = np.zeros(lmax + 1, dtype=c.dtype) + cc[deg] = c + c = cc + + # warn on rank reduction + if rank != order and not full: + msg = "The fit may be poorly conditioned" + warnings.warn(msg, pu.RankWarning, stacklevel=2) + + if full: + return c, [resids, rank, s, rcond] + else: + return c + + +def polycompanion(c): + """ + Return the companion matrix of c. + + The companion matrix for power series cannot be made symmetric by + scaling the basis, so this function differs from those for the + orthogonal polynomials. + + Parameters + ---------- + c : array_like + 1-D array of polynomial coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Companion matrix of dimensions (deg, deg). + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[-c[0]/c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + bot = mat.reshape(-1)[n::n+1] + bot[...] = 1 + mat[:, -1] -= c[:-1]/c[-1] + return mat + + +def polyroots(c): + """ + Compute the roots of a polynomial. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * x^i. + + Parameters + ---------- + c : 1-D array_like + 1-D array of polynomial coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the polynomial. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + chebroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the power series for such + values. Roots with multiplicity greater than 1 will also show larger + errors as the value of the series near such points is relatively + insensitive to errors in the roots. Isolated roots near the origin can + be improved by a few iterations of Newton's method. + + Examples + -------- + >>> import numpy.polynomial.polynomial as poly + >>> poly.polyroots(poly.polyfromroots((-1,0,1))) + array([-1., 0., 1.]) + >>> poly.polyroots(poly.polyfromroots((-1,0,1))).dtype + dtype('float64') + >>> j = complex(0,1) + >>> poly.polyroots(poly.polyfromroots((-j,0,j))) + array([ 0.00000000e+00+0.j, 0.00000000e+00+1.j, 2.77555756e-17-1.j]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([-c[0]/c[1]]) + + m = polycompanion(c) + r = la.eigvals(m) + r.sort() + return r + + +# +# polynomial class +# + +class Polynomial(ABCPolyBase): + """A power series class. + + The Polynomial class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed in the `ABCPolyBase` documentation. + + Parameters + ---------- + coef : array_like + Polynomial coefficients in order of increasing degree, i.e., + ``(1, 2, 3)`` give ``1 + 2*x + 3*x**2``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [-1, 1]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [-1, 1]. + + .. versionadded:: 1.6.0 + + """ + # Virtual Functions + _add = staticmethod(polyadd) + _sub = staticmethod(polysub) + _mul = staticmethod(polymul) + _div = staticmethod(polydiv) + _pow = staticmethod(polypow) + _val = staticmethod(polyval) + _int = staticmethod(polyint) + _der = staticmethod(polyder) + _fit = staticmethod(polyfit) + _line = staticmethod(polyline) + _roots = staticmethod(polyroots) + _fromroots = staticmethod(polyfromroots) + + # Virtual properties + nickname = 'poly' + domain = np.array(polydomain) + window = np.array(polydomain) + basis_name = None + + @staticmethod + def _repr_latex_term(i, arg_str, needs_parens): + if needs_parens: + arg_str = r'\left({}\right)'.format(arg_str) + if i == 0: + return '1' + elif i == 1: + return arg_str + else: + return '{}^{{{}}}'.format(arg_str, i) diff --git a/project/venv/lib/python2.7/site-packages/numpy/polynomial/polynomial.pyc b/project/venv/lib/python2.7/site-packages/numpy/polynomial/polynomial.pyc new file mode 100644 index 0000000..24d1b67 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/polynomial/polynomial.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/polynomial/polyutils.py b/project/venv/lib/python2.7/site-packages/numpy/polynomial/polyutils.py new file mode 100644 index 0000000..c1ed0c9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/polynomial/polyutils.py @@ -0,0 +1,412 @@ +""" +Utility classes and functions for the polynomial modules. + +This module provides: error and warning objects; a polynomial base class; +and some routines used in both the `polynomial` and `chebyshev` modules. + +Error objects +------------- + +.. autosummary:: + :toctree: generated/ + + PolyError base class for this sub-package's errors. + PolyDomainError raised when domains are mismatched. + +Warning objects +--------------- + +.. autosummary:: + :toctree: generated/ + + RankWarning raised in least-squares fit for rank-deficient matrix. + +Base class +---------- + +.. autosummary:: + :toctree: generated/ + + PolyBase Obsolete base class for the polynomial classes. Do not use. + +Functions +--------- + +.. autosummary:: + :toctree: generated/ + + as_series convert list of array_likes into 1-D arrays of common type. + trimseq remove trailing zeros. + trimcoef remove small trailing coefficients. + getdomain return the domain appropriate for a given set of abscissae. + mapdomain maps points between domains. + mapparms parameters of the linear map between domains. + +""" +from __future__ import division, absolute_import, print_function + +import numpy as np + +__all__ = [ + 'RankWarning', 'PolyError', 'PolyDomainError', 'as_series', 'trimseq', + 'trimcoef', 'getdomain', 'mapdomain', 'mapparms', 'PolyBase'] + +# +# Warnings and Exceptions +# + +class RankWarning(UserWarning): + """Issued by chebfit when the design matrix is rank deficient.""" + pass + +class PolyError(Exception): + """Base class for errors in this module.""" + pass + +class PolyDomainError(PolyError): + """Issued by the generic Poly class when two domains don't match. + + This is raised when an binary operation is passed Poly objects with + different domains. + + """ + pass + +# +# Base class for all polynomial types +# + +class PolyBase(object): + """ + Base class for all polynomial types. + + Deprecated in numpy 1.9.0, use the abstract + ABCPolyBase class instead. Note that the latter + requires a number of virtual functions to be + implemented. + + """ + pass + +# +# Helper functions to convert inputs to 1-D arrays +# +def trimseq(seq): + """Remove small Poly series coefficients. + + Parameters + ---------- + seq : sequence + Sequence of Poly series coefficients. This routine fails for + empty sequences. + + Returns + ------- + series : sequence + Subsequence with trailing zeros removed. If the resulting sequence + would be empty, return the first element. The returned sequence may + or may not be a view. + + Notes + ----- + Do not lose the type info if the sequence contains unknown objects. + + """ + if len(seq) == 0: + return seq + else: + for i in range(len(seq) - 1, -1, -1): + if seq[i] != 0: + break + return seq[:i+1] + + +def as_series(alist, trim=True): + """ + Return argument as a list of 1-d arrays. + + The returned list contains array(s) of dtype double, complex double, or + object. A 1-d argument of shape ``(N,)`` is parsed into ``N`` arrays of + size one; a 2-d argument of shape ``(M,N)`` is parsed into ``M`` arrays + of size ``N`` (i.e., is "parsed by row"); and a higher dimensional array + raises a Value Error if it is not first reshaped into either a 1-d or 2-d + array. + + Parameters + ---------- + alist : array_like + A 1- or 2-d array_like + trim : boolean, optional + When True, trailing zeros are removed from the inputs. + When False, the inputs are passed through intact. + + Returns + ------- + [a1, a2,...] : list of 1-D arrays + A copy of the input data as a list of 1-d arrays. + + Raises + ------ + ValueError + Raised when `as_series` cannot convert its input to 1-d arrays, or at + least one of the resulting arrays is empty. + + Examples + -------- + >>> from numpy.polynomial import polyutils as pu + >>> a = np.arange(4) + >>> pu.as_series(a) + [array([ 0.]), array([ 1.]), array([ 2.]), array([ 3.])] + >>> b = np.arange(6).reshape((2,3)) + >>> pu.as_series(b) + [array([ 0., 1., 2.]), array([ 3., 4., 5.])] + + >>> pu.as_series((1, np.arange(3), np.arange(2, dtype=np.float16))) + [array([ 1.]), array([ 0., 1., 2.]), array([ 0., 1.])] + + >>> pu.as_series([2, [1.1, 0.]]) + [array([ 2.]), array([ 1.1])] + + >>> pu.as_series([2, [1.1, 0.]], trim=False) + [array([ 2.]), array([ 1.1, 0. ])] + + """ + arrays = [np.array(a, ndmin=1, copy=0) for a in alist] + if min([a.size for a in arrays]) == 0: + raise ValueError("Coefficient array is empty") + if any([a.ndim != 1 for a in arrays]): + raise ValueError("Coefficient array is not 1-d") + if trim: + arrays = [trimseq(a) for a in arrays] + + if any([a.dtype == np.dtype(object) for a in arrays]): + ret = [] + for a in arrays: + if a.dtype != np.dtype(object): + tmp = np.empty(len(a), dtype=np.dtype(object)) + tmp[:] = a[:] + ret.append(tmp) + else: + ret.append(a.copy()) + else: + try: + dtype = np.common_type(*arrays) + except Exception: + raise ValueError("Coefficient arrays have no common type") + ret = [np.array(a, copy=1, dtype=dtype) for a in arrays] + return ret + + +def trimcoef(c, tol=0): + """ + Remove "small" "trailing" coefficients from a polynomial. + + "Small" means "small in absolute value" and is controlled by the + parameter `tol`; "trailing" means highest order coefficient(s), e.g., in + ``[0, 1, 1, 0, 0]`` (which represents ``0 + x + x**2 + 0*x**3 + 0*x**4``) + both the 3-rd and 4-th order coefficients would be "trimmed." + + Parameters + ---------- + c : array_like + 1-d array of coefficients, ordered from lowest order to highest. + tol : number, optional + Trailing (i.e., highest order) elements with absolute value less + than or equal to `tol` (default value is zero) are removed. + + Returns + ------- + trimmed : ndarray + 1-d array with trailing zeros removed. If the resulting series + would be empty, a series containing a single zero is returned. + + Raises + ------ + ValueError + If `tol` < 0 + + See Also + -------- + trimseq + + Examples + -------- + >>> from numpy.polynomial import polyutils as pu + >>> pu.trimcoef((0,0,3,0,5,0,0)) + array([ 0., 0., 3., 0., 5.]) + >>> pu.trimcoef((0,0,1e-3,0,1e-5,0,0),1e-3) # item == tol is trimmed + array([ 0.]) + >>> i = complex(0,1) # works for complex + >>> pu.trimcoef((3e-4,1e-3*(1-i),5e-4,2e-5*(1+i)), 1e-3) + array([ 0.0003+0.j , 0.0010-0.001j]) + + """ + if tol < 0: + raise ValueError("tol must be non-negative") + + [c] = as_series([c]) + [ind] = np.nonzero(np.abs(c) > tol) + if len(ind) == 0: + return c[:1]*0 + else: + return c[:ind[-1] + 1].copy() + +def getdomain(x): + """ + Return a domain suitable for given abscissae. + + Find a domain suitable for a polynomial or Chebyshev series + defined at the values supplied. + + Parameters + ---------- + x : array_like + 1-d array of abscissae whose domain will be determined. + + Returns + ------- + domain : ndarray + 1-d array containing two values. If the inputs are complex, then + the two returned points are the lower left and upper right corners + of the smallest rectangle (aligned with the axes) in the complex + plane containing the points `x`. If the inputs are real, then the + two points are the ends of the smallest interval containing the + points `x`. + + See Also + -------- + mapparms, mapdomain + + Examples + -------- + >>> from numpy.polynomial import polyutils as pu + >>> points = np.arange(4)**2 - 5; points + array([-5, -4, -1, 4]) + >>> pu.getdomain(points) + array([-5., 4.]) + >>> c = np.exp(complex(0,1)*np.pi*np.arange(12)/6) # unit circle + >>> pu.getdomain(c) + array([-1.-1.j, 1.+1.j]) + + """ + [x] = as_series([x], trim=False) + if x.dtype.char in np.typecodes['Complex']: + rmin, rmax = x.real.min(), x.real.max() + imin, imax = x.imag.min(), x.imag.max() + return np.array((complex(rmin, imin), complex(rmax, imax))) + else: + return np.array((x.min(), x.max())) + +def mapparms(old, new): + """ + Linear map parameters between domains. + + Return the parameters of the linear map ``offset + scale*x`` that maps + `old` to `new` such that ``old[i] -> new[i]``, ``i = 0, 1``. + + Parameters + ---------- + old, new : array_like + Domains. Each domain must (successfully) convert to a 1-d array + containing precisely two values. + + Returns + ------- + offset, scale : scalars + The map ``L(x) = offset + scale*x`` maps the first domain to the + second. + + See Also + -------- + getdomain, mapdomain + + Notes + ----- + Also works for complex numbers, and thus can be used to calculate the + parameters required to map any line in the complex plane to any other + line therein. + + Examples + -------- + >>> from numpy.polynomial import polyutils as pu + >>> pu.mapparms((-1,1),(-1,1)) + (0.0, 1.0) + >>> pu.mapparms((1,-1),(-1,1)) + (0.0, -1.0) + >>> i = complex(0,1) + >>> pu.mapparms((-i,-1),(1,i)) + ((1+1j), (1+0j)) + + """ + oldlen = old[1] - old[0] + newlen = new[1] - new[0] + off = (old[1]*new[0] - old[0]*new[1])/oldlen + scl = newlen/oldlen + return off, scl + +def mapdomain(x, old, new): + """ + Apply linear map to input points. + + The linear map ``offset + scale*x`` that maps the domain `old` to + the domain `new` is applied to the points `x`. + + Parameters + ---------- + x : array_like + Points to be mapped. If `x` is a subtype of ndarray the subtype + will be preserved. + old, new : array_like + The two domains that determine the map. Each must (successfully) + convert to 1-d arrays containing precisely two values. + + Returns + ------- + x_out : ndarray + Array of points of the same shape as `x`, after application of the + linear map between the two domains. + + See Also + -------- + getdomain, mapparms + + Notes + ----- + Effectively, this implements: + + .. math :: + x\\_out = new[0] + m(x - old[0]) + + where + + .. math :: + m = \\frac{new[1]-new[0]}{old[1]-old[0]} + + Examples + -------- + >>> from numpy.polynomial import polyutils as pu + >>> old_domain = (-1,1) + >>> new_domain = (0,2*np.pi) + >>> x = np.linspace(-1,1,6); x + array([-1. , -0.6, -0.2, 0.2, 0.6, 1. ]) + >>> x_out = pu.mapdomain(x, old_domain, new_domain); x_out + array([ 0. , 1.25663706, 2.51327412, 3.76991118, 5.02654825, + 6.28318531]) + >>> x - pu.mapdomain(x_out, new_domain, old_domain) + array([ 0., 0., 0., 0., 0., 0.]) + + Also works for complex numbers (and thus can be used to map any line in + the complex plane to any other line therein). + + >>> i = complex(0,1) + >>> old = (-1 - i, 1 + i) + >>> new = (-1 + i, 1 - i) + >>> z = np.linspace(old[0], old[1], 6); z + array([-1.0-1.j , -0.6-0.6j, -0.2-0.2j, 0.2+0.2j, 0.6+0.6j, 1.0+1.j ]) + >>> new_z = P.mapdomain(z, old, new); new_z + array([-1.0+1.j , -0.6+0.6j, -0.2+0.2j, 0.2-0.2j, 0.6-0.6j, 1.0-1.j ]) + + """ + x = np.asanyarray(x) + off, scl = mapparms(old, new) + return off + scl*x diff --git a/project/venv/lib/python2.7/site-packages/numpy/polynomial/polyutils.pyc b/project/venv/lib/python2.7/site-packages/numpy/polynomial/polyutils.pyc new file mode 100644 index 0000000..870fa3a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/polynomial/polyutils.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/polynomial/setup.py b/project/venv/lib/python2.7/site-packages/numpy/polynomial/setup.py new file mode 100644 index 0000000..cb59ee1 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/polynomial/setup.py @@ -0,0 +1,11 @@ +from __future__ import division, print_function + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('polynomial', parent_package, top_path) + config.add_data_dir('tests') + return config + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(configuration=configuration) diff --git a/project/venv/lib/python2.7/site-packages/numpy/polynomial/setup.pyc b/project/venv/lib/python2.7/site-packages/numpy/polynomial/setup.pyc new file mode 100644 index 0000000..866b9a2 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/polynomial/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/__init__.py b/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/__init__.pyc b/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/__init__.pyc new file mode 100644 index 0000000..3e6be04 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_chebyshev.py b/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_chebyshev.py new file mode 100644 index 0000000..7fb7492 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_chebyshev.py @@ -0,0 +1,621 @@ +"""Tests for chebyshev module. + +""" +from __future__ import division, absolute_import, print_function + +from functools import reduce + +import numpy as np +import numpy.polynomial.chebyshev as cheb +from numpy.polynomial.polynomial import polyval +from numpy.testing import ( + assert_almost_equal, assert_raises, assert_equal, assert_, + ) + + +def trim(x): + return cheb.chebtrim(x, tol=1e-6) + +T0 = [1] +T1 = [0, 1] +T2 = [-1, 0, 2] +T3 = [0, -3, 0, 4] +T4 = [1, 0, -8, 0, 8] +T5 = [0, 5, 0, -20, 0, 16] +T6 = [-1, 0, 18, 0, -48, 0, 32] +T7 = [0, -7, 0, 56, 0, -112, 0, 64] +T8 = [1, 0, -32, 0, 160, 0, -256, 0, 128] +T9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256] + +Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9] + + +class TestPrivate(object): + + def test__cseries_to_zseries(self): + for i in range(5): + inp = np.array([2] + [1]*i, np.double) + tgt = np.array([.5]*i + [2] + [.5]*i, np.double) + res = cheb._cseries_to_zseries(inp) + assert_equal(res, tgt) + + def test__zseries_to_cseries(self): + for i in range(5): + inp = np.array([.5]*i + [2] + [.5]*i, np.double) + tgt = np.array([2] + [1]*i, np.double) + res = cheb._zseries_to_cseries(inp) + assert_equal(res, tgt) + + +class TestConstants(object): + + def test_chebdomain(self): + assert_equal(cheb.chebdomain, [-1, 1]) + + def test_chebzero(self): + assert_equal(cheb.chebzero, [0]) + + def test_chebone(self): + assert_equal(cheb.chebone, [1]) + + def test_chebx(self): + assert_equal(cheb.chebx, [0, 1]) + + +class TestArithmetic(object): + + def test_chebadd(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = cheb.chebadd([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_chebsub(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = cheb.chebsub([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_chebmulx(self): + assert_equal(cheb.chebmulx([0]), [0]) + assert_equal(cheb.chebmulx([1]), [0, 1]) + for i in range(1, 5): + ser = [0]*i + [1] + tgt = [0]*(i - 1) + [.5, 0, .5] + assert_equal(cheb.chebmulx(ser), tgt) + + def test_chebmul(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + tgt = np.zeros(i + j + 1) + tgt[i + j] += .5 + tgt[abs(i - j)] += .5 + res = cheb.chebmul([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_chebdiv(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + ci = [0]*i + [1] + cj = [0]*j + [1] + tgt = cheb.chebadd(ci, cj) + quo, rem = cheb.chebdiv(tgt, ci) + res = cheb.chebadd(cheb.chebmul(quo, ci), rem) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_chebpow(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + c = np.arange(i + 1) + tgt = reduce(cheb.chebmul, [c]*j, np.array([1])) + res = cheb.chebpow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + +class TestEvaluation(object): + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([2.5, 2., 1.5]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + y = polyval(x, [1., 2., 3.]) + + def test_chebval(self): + #check empty input + assert_equal(cheb.chebval([], [1]).size, 0) + + #check normal input) + x = np.linspace(-1, 1) + y = [polyval(x, c) for c in Tlist] + for i in range(10): + msg = "At i=%d" % i + tgt = y[i] + res = cheb.chebval(x, [0]*i + [1]) + assert_almost_equal(res, tgt, err_msg=msg) + + #check that shape is preserved + for i in range(3): + dims = [2]*i + x = np.zeros(dims) + assert_equal(cheb.chebval(x, [1]).shape, dims) + assert_equal(cheb.chebval(x, [1, 0]).shape, dims) + assert_equal(cheb.chebval(x, [1, 0, 0]).shape, dims) + + def test_chebval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, cheb.chebval2d, x1, x2[:2], self.c2d) + + #test values + tgt = y1*y2 + res = cheb.chebval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = cheb.chebval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_chebval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, cheb.chebval3d, x1, x2, x3[:2], self.c3d) + + #test values + tgt = y1*y2*y3 + res = cheb.chebval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = cheb.chebval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_chebgrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j->ij', y1, y2) + res = cheb.chebgrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = cheb.chebgrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3)*2) + + def test_chebgrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = cheb.chebgrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = cheb.chebgrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)*3) + + +class TestIntegral(object): + + def test_chebint(self): + # check exceptions + assert_raises(ValueError, cheb.chebint, [0], .5) + assert_raises(ValueError, cheb.chebint, [0], -1) + assert_raises(ValueError, cheb.chebint, [0], 1, [0, 0]) + assert_raises(ValueError, cheb.chebint, [0], lbnd=[0]) + assert_raises(ValueError, cheb.chebint, [0], scl=[0]) + assert_raises(ValueError, cheb.chebint, [0], axis=.5) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0]*(i - 2) + [1] + res = cheb.chebint([0], m=i, k=k) + assert_almost_equal(res, [0, 1]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [1/scl] + chebpol = cheb.poly2cheb(pol) + chebint = cheb.chebint(chebpol, m=1, k=[i]) + res = cheb.cheb2poly(chebint) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + chebpol = cheb.poly2cheb(pol) + chebint = cheb.chebint(chebpol, m=1, k=[i], lbnd=-1) + assert_almost_equal(cheb.chebval(-1, chebint), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [2/scl] + chebpol = cheb.poly2cheb(pol) + chebint = cheb.chebint(chebpol, m=1, k=[i], scl=2) + res = cheb.cheb2poly(chebint) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = cheb.chebint(tgt, m=1) + res = cheb.chebint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = cheb.chebint(tgt, m=1, k=[k]) + res = cheb.chebint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = cheb.chebint(tgt, m=1, k=[k], lbnd=-1) + res = cheb.chebint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = cheb.chebint(tgt, m=1, k=[k], scl=2) + res = cheb.chebint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_chebint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([cheb.chebint(c) for c in c2d.T]).T + res = cheb.chebint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([cheb.chebint(c) for c in c2d]) + res = cheb.chebint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([cheb.chebint(c, k=3) for c in c2d]) + res = cheb.chebint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + +class TestDerivative(object): + + def test_chebder(self): + # check exceptions + assert_raises(ValueError, cheb.chebder, [0], .5) + assert_raises(ValueError, cheb.chebder, [0], -1) + + # check that zeroth derivative does nothing + for i in range(5): + tgt = [0]*i + [1] + res = cheb.chebder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = cheb.chebder(cheb.chebint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = cheb.chebder(cheb.chebint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_chebder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([cheb.chebder(c) for c in c2d.T]).T + res = cheb.chebder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([cheb.chebder(c) for c in c2d]) + res = cheb.chebder(c2d, axis=1) + assert_almost_equal(res, tgt) + + +class TestVander(object): + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + + def test_chebvander(self): + # check for 1d x + x = np.arange(3) + v = cheb.chebvander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], cheb.chebval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = cheb.chebvander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], cheb.chebval(x, coef)) + + def test_chebvander2d(self): + # also tests chebval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = cheb.chebvander2d(x1, x2, [1, 2]) + tgt = cheb.chebval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = cheb.chebvander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_chebvander3d(self): + # also tests chebval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = cheb.chebvander3d(x1, x2, x3, [1, 2, 3]) + tgt = cheb.chebval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = cheb.chebvander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + +class TestFitting(object): + + def test_chebfit(self): + def f(x): + return x*(x - 1)*(x - 2) + + def f2(x): + return x**4 + x**2 + 1 + + # Test exceptions + assert_raises(ValueError, cheb.chebfit, [1], [1], -1) + assert_raises(TypeError, cheb.chebfit, [[1]], [1], 0) + assert_raises(TypeError, cheb.chebfit, [], [1], 0) + assert_raises(TypeError, cheb.chebfit, [1], [[[1]]], 0) + assert_raises(TypeError, cheb.chebfit, [1, 2], [1], 0) + assert_raises(TypeError, cheb.chebfit, [1], [1, 2], 0) + assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, cheb.chebfit, [1], [1], [-1,]) + assert_raises(ValueError, cheb.chebfit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, cheb.chebfit, [1], [1], []) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = cheb.chebfit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(cheb.chebval(x, coef3), y) + coef3 = cheb.chebfit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(cheb.chebval(x, coef3), y) + # + coef4 = cheb.chebfit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(cheb.chebval(x, coef4), y) + coef4 = cheb.chebfit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(cheb.chebval(x, coef4), y) + # check things still work if deg is not in strict increasing + coef4 = cheb.chebfit(x, y, [2, 3, 4, 1, 0]) + assert_equal(len(coef4), 5) + assert_almost_equal(cheb.chebval(x, coef4), y) + # + coef2d = cheb.chebfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = cheb.chebfit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + y[0::2] = 0 + wcoef3 = cheb.chebfit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + wcoef3 = cheb.chebfit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = cheb.chebfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = cheb.chebfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(cheb.chebfit(x, x, 1), [0, 1]) + assert_almost_equal(cheb.chebfit(x, x, [0, 1]), [0, 1]) + # test fitting only even polynomials + x = np.linspace(-1, 1) + y = f2(x) + coef1 = cheb.chebfit(x, y, 4) + assert_almost_equal(cheb.chebval(x, coef1), y) + coef2 = cheb.chebfit(x, y, [0, 2, 4]) + assert_almost_equal(cheb.chebval(x, coef2), y) + assert_almost_equal(coef1, coef2) + + +class TestInterpolate(object): + + def f(self, x): + return x * (x - 1) * (x - 2) + + def test_raises(self): + assert_raises(ValueError, cheb.chebinterpolate, self.f, -1) + assert_raises(TypeError, cheb.chebinterpolate, self.f, 10.) + + def test_dimensions(self): + for deg in range(1, 5): + assert_(cheb.chebinterpolate(self.f, deg).shape == (deg + 1,)) + + def test_approximation(self): + + def powx(x, p): + return x**p + + x = np.linspace(-1, 1, 10) + for deg in range(0, 10): + for p in range(0, deg + 1): + c = cheb.chebinterpolate(powx, deg, (p,)) + assert_almost_equal(cheb.chebval(x, c), powx(x, p), decimal=12) + + +class TestCompanion(object): + + def test_raises(self): + assert_raises(ValueError, cheb.chebcompanion, []) + assert_raises(ValueError, cheb.chebcompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0]*i + [1] + assert_(cheb.chebcompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(cheb.chebcompanion([1, 2])[0, 0] == -.5) + + +class TestGauss(object): + + def test_100(self): + x, w = cheb.chebgauss(100) + + # test orthogonality. Note that the results need to be normalized, + # otherwise the huge values that can arise from fast growing + # functions like Laguerre can be very confusing. + v = cheb.chebvander(x, 99) + vv = np.dot(v.T * w, v) + vd = 1/np.sqrt(vv.diagonal()) + vv = vd[:, None] * vv * vd + assert_almost_equal(vv, np.eye(100)) + + # check that the integral of 1 is correct + tgt = np.pi + assert_almost_equal(w.sum(), tgt) + + +class TestMisc(object): + + def test_chebfromroots(self): + res = cheb.chebfromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) + tgt = [0]*i + [1] + res = cheb.chebfromroots(roots)*2**(i-1) + assert_almost_equal(trim(res), trim(tgt)) + + def test_chebroots(self): + assert_almost_equal(cheb.chebroots([1]), []) + assert_almost_equal(cheb.chebroots([1, 2]), [-.5]) + for i in range(2, 5): + tgt = np.linspace(-1, 1, i) + res = cheb.chebroots(cheb.chebfromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_chebtrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, cheb.chebtrim, coef, -1) + + # Test results + assert_equal(cheb.chebtrim(coef), coef[:-1]) + assert_equal(cheb.chebtrim(coef, 1), coef[:-3]) + assert_equal(cheb.chebtrim(coef, 2), [0]) + + def test_chebline(self): + assert_equal(cheb.chebline(3, 4), [3, 4]) + + def test_cheb2poly(self): + for i in range(10): + assert_almost_equal(cheb.cheb2poly([0]*i + [1]), Tlist[i]) + + def test_poly2cheb(self): + for i in range(10): + assert_almost_equal(cheb.poly2cheb(Tlist[i]), [0]*i + [1]) + + def test_weight(self): + x = np.linspace(-1, 1, 11)[1:-1] + tgt = 1./(np.sqrt(1 + x) * np.sqrt(1 - x)) + res = cheb.chebweight(x) + assert_almost_equal(res, tgt) + + def test_chebpts1(self): + #test exceptions + assert_raises(ValueError, cheb.chebpts1, 1.5) + assert_raises(ValueError, cheb.chebpts1, 0) + + #test points + tgt = [0] + assert_almost_equal(cheb.chebpts1(1), tgt) + tgt = [-0.70710678118654746, 0.70710678118654746] + assert_almost_equal(cheb.chebpts1(2), tgt) + tgt = [-0.86602540378443871, 0, 0.86602540378443871] + assert_almost_equal(cheb.chebpts1(3), tgt) + tgt = [-0.9238795325, -0.3826834323, 0.3826834323, 0.9238795325] + assert_almost_equal(cheb.chebpts1(4), tgt) + + def test_chebpts2(self): + #test exceptions + assert_raises(ValueError, cheb.chebpts2, 1.5) + assert_raises(ValueError, cheb.chebpts2, 1) + + #test points + tgt = [-1, 1] + assert_almost_equal(cheb.chebpts2(2), tgt) + tgt = [-1, 0, 1] + assert_almost_equal(cheb.chebpts2(3), tgt) + tgt = [-1, -0.5, .5, 1] + assert_almost_equal(cheb.chebpts2(4), tgt) + tgt = [-1.0, -0.707106781187, 0, 0.707106781187, 1.0] + assert_almost_equal(cheb.chebpts2(5), tgt) diff --git a/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_chebyshev.pyc b/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_chebyshev.pyc new file mode 100644 index 0000000..3a235db Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_chebyshev.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_classes.py b/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_classes.py new file mode 100644 index 0000000..15e24f9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_classes.py @@ -0,0 +1,642 @@ +"""Test inter-conversion of different polynomial classes. + +This tests the convert and cast methods of all the polynomial classes. + +""" +from __future__ import division, absolute_import, print_function + +import operator as op +from numbers import Number + +import pytest +import numpy as np +from numpy.polynomial import ( + Polynomial, Legendre, Chebyshev, Laguerre, Hermite, HermiteE) +from numpy.testing import ( + assert_almost_equal, assert_raises, assert_equal, assert_, + ) +from numpy.compat import long + + +# +# fixtures +# + +classes = ( + Polynomial, Legendre, Chebyshev, Laguerre, + Hermite, HermiteE + ) +classids = tuple(cls.__name__ for cls in classes) + +@pytest.fixture(params=classes, ids=classids) +def Poly(request): + return request.param + +# +# helper functions +# +random = np.random.random + + +def assert_poly_almost_equal(p1, p2, msg=""): + try: + assert_(np.all(p1.domain == p2.domain)) + assert_(np.all(p1.window == p2.window)) + assert_almost_equal(p1.coef, p2.coef) + except AssertionError: + msg = "Result: %s\nTarget: %s", (p1, p2) + raise AssertionError(msg) + + +# +# Test conversion methods that depend on combinations of two classes. +# + +Poly1 = Poly +Poly2 = Poly + + +def test_conversion(Poly1, Poly2): + x = np.linspace(0, 1, 10) + coef = random((3,)) + + d1 = Poly1.domain + random((2,))*.25 + w1 = Poly1.window + random((2,))*.25 + p1 = Poly1(coef, domain=d1, window=w1) + + d2 = Poly2.domain + random((2,))*.25 + w2 = Poly2.window + random((2,))*.25 + p2 = p1.convert(kind=Poly2, domain=d2, window=w2) + + assert_almost_equal(p2.domain, d2) + assert_almost_equal(p2.window, w2) + assert_almost_equal(p2(x), p1(x)) + + +def test_cast(Poly1, Poly2): + x = np.linspace(0, 1, 10) + coef = random((3,)) + + d1 = Poly1.domain + random((2,))*.25 + w1 = Poly1.window + random((2,))*.25 + p1 = Poly1(coef, domain=d1, window=w1) + + d2 = Poly2.domain + random((2,))*.25 + w2 = Poly2.window + random((2,))*.25 + p2 = Poly2.cast(p1, domain=d2, window=w2) + + assert_almost_equal(p2.domain, d2) + assert_almost_equal(p2.window, w2) + assert_almost_equal(p2(x), p1(x)) + + +# +# test methods that depend on one class +# + + +def test_identity(Poly): + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + x = np.linspace(d[0], d[1], 11) + p = Poly.identity(domain=d, window=w) + assert_equal(p.domain, d) + assert_equal(p.window, w) + assert_almost_equal(p(x), x) + + +def test_basis(Poly): + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + p = Poly.basis(5, domain=d, window=w) + assert_equal(p.domain, d) + assert_equal(p.window, w) + assert_equal(p.coef, [0]*5 + [1]) + + +def test_fromroots(Poly): + # check that requested roots are zeros of a polynomial + # of correct degree, domain, and window. + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + r = random((5,)) + p1 = Poly.fromroots(r, domain=d, window=w) + assert_equal(p1.degree(), len(r)) + assert_equal(p1.domain, d) + assert_equal(p1.window, w) + assert_almost_equal(p1(r), 0) + + # check that polynomial is monic + pdom = Polynomial.domain + pwin = Polynomial.window + p2 = Polynomial.cast(p1, domain=pdom, window=pwin) + assert_almost_equal(p2.coef[-1], 1) + + +def test_fit(Poly): + + def f(x): + return x*(x - 1)*(x - 2) + x = np.linspace(0, 3) + y = f(x) + + # check default value of domain and window + p = Poly.fit(x, y, 3) + assert_almost_equal(p.domain, [0, 3]) + assert_almost_equal(p(x), y) + assert_equal(p.degree(), 3) + + # check with given domains and window + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + p = Poly.fit(x, y, 3, domain=d, window=w) + assert_almost_equal(p(x), y) + assert_almost_equal(p.domain, d) + assert_almost_equal(p.window, w) + p = Poly.fit(x, y, [0, 1, 2, 3], domain=d, window=w) + assert_almost_equal(p(x), y) + assert_almost_equal(p.domain, d) + assert_almost_equal(p.window, w) + + # check with class domain default + p = Poly.fit(x, y, 3, []) + assert_equal(p.domain, Poly.domain) + assert_equal(p.window, Poly.window) + p = Poly.fit(x, y, [0, 1, 2, 3], []) + assert_equal(p.domain, Poly.domain) + assert_equal(p.window, Poly.window) + + # check that fit accepts weights. + w = np.zeros_like(x) + z = y + random(y.shape)*.25 + w[::2] = 1 + p1 = Poly.fit(x[::2], z[::2], 3) + p2 = Poly.fit(x, z, 3, w=w) + p3 = Poly.fit(x, z, [0, 1, 2, 3], w=w) + assert_almost_equal(p1(x), p2(x)) + assert_almost_equal(p2(x), p3(x)) + + +def test_equal(Poly): + p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3]) + p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3]) + p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3]) + p4 = Poly([1, 2, 3], domain=[0, 1], window=[1, 2]) + assert_(p1 == p1) + assert_(not p1 == p2) + assert_(not p1 == p3) + assert_(not p1 == p4) + + +def test_not_equal(Poly): + p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3]) + p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3]) + p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3]) + p4 = Poly([1, 2, 3], domain=[0, 1], window=[1, 2]) + assert_(not p1 != p1) + assert_(p1 != p2) + assert_(p1 != p3) + assert_(p1 != p4) + + +def test_add(Poly): + # This checks commutation, not numerical correctness + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = p1 + p2 + assert_poly_almost_equal(p2 + p1, p3) + assert_poly_almost_equal(p1 + c2, p3) + assert_poly_almost_equal(c2 + p1, p3) + assert_poly_almost_equal(p1 + tuple(c2), p3) + assert_poly_almost_equal(tuple(c2) + p1, p3) + assert_poly_almost_equal(p1 + np.array(c2), p3) + assert_poly_almost_equal(np.array(c2) + p1, p3) + assert_raises(TypeError, op.add, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, op.add, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.add, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.add, p1, Polynomial([0])) + + +def test_sub(Poly): + # This checks commutation, not numerical correctness + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = p1 - p2 + assert_poly_almost_equal(p2 - p1, -p3) + assert_poly_almost_equal(p1 - c2, p3) + assert_poly_almost_equal(c2 - p1, -p3) + assert_poly_almost_equal(p1 - tuple(c2), p3) + assert_poly_almost_equal(tuple(c2) - p1, -p3) + assert_poly_almost_equal(p1 - np.array(c2), p3) + assert_poly_almost_equal(np.array(c2) - p1, -p3) + assert_raises(TypeError, op.sub, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, op.sub, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.sub, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.sub, p1, Polynomial([0])) + + +def test_mul(Poly): + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = p1 * p2 + assert_poly_almost_equal(p2 * p1, p3) + assert_poly_almost_equal(p1 * c2, p3) + assert_poly_almost_equal(c2 * p1, p3) + assert_poly_almost_equal(p1 * tuple(c2), p3) + assert_poly_almost_equal(tuple(c2) * p1, p3) + assert_poly_almost_equal(p1 * np.array(c2), p3) + assert_poly_almost_equal(np.array(c2) * p1, p3) + assert_poly_almost_equal(p1 * 2, p1 * Poly([2])) + assert_poly_almost_equal(2 * p1, p1 * Poly([2])) + assert_raises(TypeError, op.mul, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, op.mul, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.mul, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.mul, p1, Polynomial([0])) + + +def test_floordiv(Poly): + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + c3 = list(random((2,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = Poly(c3) + p4 = p1 * p2 + p3 + c4 = list(p4.coef) + assert_poly_almost_equal(p4 // p2, p1) + assert_poly_almost_equal(p4 // c2, p1) + assert_poly_almost_equal(c4 // p2, p1) + assert_poly_almost_equal(p4 // tuple(c2), p1) + assert_poly_almost_equal(tuple(c4) // p2, p1) + assert_poly_almost_equal(p4 // np.array(c2), p1) + assert_poly_almost_equal(np.array(c4) // p2, p1) + assert_poly_almost_equal(2 // p2, Poly([0])) + assert_poly_almost_equal(p2 // 2, 0.5*p2) + assert_raises( + TypeError, op.floordiv, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises( + TypeError, op.floordiv, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.floordiv, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.floordiv, p1, Polynomial([0])) + + +def test_truediv(Poly): + # true division is valid only if the denominator is a Number and + # not a python bool. + p1 = Poly([1,2,3]) + p2 = p1 * 5 + + for stype in np.ScalarType: + if not issubclass(stype, Number) or issubclass(stype, bool): + continue + s = stype(5) + assert_poly_almost_equal(op.truediv(p2, s), p1) + assert_raises(TypeError, op.truediv, s, p2) + for stype in (int, long, float): + s = stype(5) + assert_poly_almost_equal(op.truediv(p2, s), p1) + assert_raises(TypeError, op.truediv, s, p2) + for stype in [complex]: + s = stype(5, 0) + assert_poly_almost_equal(op.truediv(p2, s), p1) + assert_raises(TypeError, op.truediv, s, p2) + for s in [tuple(), list(), dict(), bool(), np.array([1])]: + assert_raises(TypeError, op.truediv, p2, s) + assert_raises(TypeError, op.truediv, s, p2) + for ptype in classes: + assert_raises(TypeError, op.truediv, p2, ptype(1)) + + +def test_mod(Poly): + # This checks commutation, not numerical correctness + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + c3 = list(random((2,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = Poly(c3) + p4 = p1 * p2 + p3 + c4 = list(p4.coef) + assert_poly_almost_equal(p4 % p2, p3) + assert_poly_almost_equal(p4 % c2, p3) + assert_poly_almost_equal(c4 % p2, p3) + assert_poly_almost_equal(p4 % tuple(c2), p3) + assert_poly_almost_equal(tuple(c4) % p2, p3) + assert_poly_almost_equal(p4 % np.array(c2), p3) + assert_poly_almost_equal(np.array(c4) % p2, p3) + assert_poly_almost_equal(2 % p2, Poly([2])) + assert_poly_almost_equal(p2 % 2, Poly([0])) + assert_raises(TypeError, op.mod, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, op.mod, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.mod, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.mod, p1, Polynomial([0])) + + +def test_divmod(Poly): + # This checks commutation, not numerical correctness + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + c3 = list(random((2,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = Poly(c3) + p4 = p1 * p2 + p3 + c4 = list(p4.coef) + quo, rem = divmod(p4, p2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(p4, c2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(c4, p2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(p4, tuple(c2)) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(tuple(c4), p2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(p4, np.array(c2)) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(np.array(c4), p2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(p2, 2) + assert_poly_almost_equal(quo, 0.5*p2) + assert_poly_almost_equal(rem, Poly([0])) + quo, rem = divmod(2, p2) + assert_poly_almost_equal(quo, Poly([0])) + assert_poly_almost_equal(rem, Poly([2])) + assert_raises(TypeError, divmod, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, divmod, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, divmod, p1, Chebyshev([0])) + else: + assert_raises(TypeError, divmod, p1, Polynomial([0])) + + +def test_roots(Poly): + d = Poly.domain * 1.25 + .25 + w = Poly.window + tgt = np.linspace(d[0], d[1], 5) + res = np.sort(Poly.fromroots(tgt, domain=d, window=w).roots()) + assert_almost_equal(res, tgt) + # default domain and window + res = np.sort(Poly.fromroots(tgt).roots()) + assert_almost_equal(res, tgt) + + +def test_degree(Poly): + p = Poly.basis(5) + assert_equal(p.degree(), 5) + + +def test_copy(Poly): + p1 = Poly.basis(5) + p2 = p1.copy() + assert_(p1 == p2) + assert_(p1 is not p2) + assert_(p1.coef is not p2.coef) + assert_(p1.domain is not p2.domain) + assert_(p1.window is not p2.window) + + +def test_integ(Poly): + P = Polynomial + # Check defaults + p0 = Poly.cast(P([1*2, 2*3, 3*4])) + p1 = P.cast(p0.integ()) + p2 = P.cast(p0.integ(2)) + assert_poly_almost_equal(p1, P([0, 2, 3, 4])) + assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1])) + # Check with k + p0 = Poly.cast(P([1*2, 2*3, 3*4])) + p1 = P.cast(p0.integ(k=1)) + p2 = P.cast(p0.integ(2, k=[1, 1])) + assert_poly_almost_equal(p1, P([1, 2, 3, 4])) + assert_poly_almost_equal(p2, P([1, 1, 1, 1, 1])) + # Check with lbnd + p0 = Poly.cast(P([1*2, 2*3, 3*4])) + p1 = P.cast(p0.integ(lbnd=1)) + p2 = P.cast(p0.integ(2, lbnd=1)) + assert_poly_almost_equal(p1, P([-9, 2, 3, 4])) + assert_poly_almost_equal(p2, P([6, -9, 1, 1, 1])) + # Check scaling + d = 2*Poly.domain + p0 = Poly.cast(P([1*2, 2*3, 3*4]), domain=d) + p1 = P.cast(p0.integ()) + p2 = P.cast(p0.integ(2)) + assert_poly_almost_equal(p1, P([0, 2, 3, 4])) + assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1])) + + +def test_deriv(Poly): + # Check that the derivative is the inverse of integration. It is + # assumes that the integration has been checked elsewhere. + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + p1 = Poly([1, 2, 3], domain=d, window=w) + p2 = p1.integ(2, k=[1, 2]) + p3 = p1.integ(1, k=[1]) + assert_almost_equal(p2.deriv(1).coef, p3.coef) + assert_almost_equal(p2.deriv(2).coef, p1.coef) + # default domain and window + p1 = Poly([1, 2, 3]) + p2 = p1.integ(2, k=[1, 2]) + p3 = p1.integ(1, k=[1]) + assert_almost_equal(p2.deriv(1).coef, p3.coef) + assert_almost_equal(p2.deriv(2).coef, p1.coef) + + +def test_linspace(Poly): + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + p = Poly([1, 2, 3], domain=d, window=w) + # check default domain + xtgt = np.linspace(d[0], d[1], 20) + ytgt = p(xtgt) + xres, yres = p.linspace(20) + assert_almost_equal(xres, xtgt) + assert_almost_equal(yres, ytgt) + # check specified domain + xtgt = np.linspace(0, 2, 20) + ytgt = p(xtgt) + xres, yres = p.linspace(20, domain=[0, 2]) + assert_almost_equal(xres, xtgt) + assert_almost_equal(yres, ytgt) + + +def test_pow(Poly): + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + tgt = Poly([1], domain=d, window=w) + tst = Poly([1, 2, 3], domain=d, window=w) + for i in range(5): + assert_poly_almost_equal(tst**i, tgt) + tgt = tgt * tst + # default domain and window + tgt = Poly([1]) + tst = Poly([1, 2, 3]) + for i in range(5): + assert_poly_almost_equal(tst**i, tgt) + tgt = tgt * tst + # check error for invalid powers + assert_raises(ValueError, op.pow, tgt, 1.5) + assert_raises(ValueError, op.pow, tgt, -1) + + +def test_call(Poly): + P = Polynomial + d = Poly.domain + x = np.linspace(d[0], d[1], 11) + + # Check defaults + p = Poly.cast(P([1, 2, 3])) + tgt = 1 + x*(2 + 3*x) + res = p(x) + assert_almost_equal(res, tgt) + + +def test_cutdeg(Poly): + p = Poly([1, 2, 3]) + assert_raises(ValueError, p.cutdeg, .5) + assert_raises(ValueError, p.cutdeg, -1) + assert_equal(len(p.cutdeg(3)), 3) + assert_equal(len(p.cutdeg(2)), 3) + assert_equal(len(p.cutdeg(1)), 2) + assert_equal(len(p.cutdeg(0)), 1) + + +def test_truncate(Poly): + p = Poly([1, 2, 3]) + assert_raises(ValueError, p.truncate, .5) + assert_raises(ValueError, p.truncate, 0) + assert_equal(len(p.truncate(4)), 3) + assert_equal(len(p.truncate(3)), 3) + assert_equal(len(p.truncate(2)), 2) + assert_equal(len(p.truncate(1)), 1) + + +def test_trim(Poly): + c = [1, 1e-6, 1e-12, 0] + p = Poly(c) + assert_equal(p.trim().coef, c[:3]) + assert_equal(p.trim(1e-10).coef, c[:2]) + assert_equal(p.trim(1e-5).coef, c[:1]) + + +def test_mapparms(Poly): + # check with defaults. Should be identity. + d = Poly.domain + w = Poly.window + p = Poly([1], domain=d, window=w) + assert_almost_equal([0, 1], p.mapparms()) + # + w = 2*d + 1 + p = Poly([1], domain=d, window=w) + assert_almost_equal([1, 2], p.mapparms()) + + +def test_ufunc_override(Poly): + p = Poly([1, 2, 3]) + x = np.ones(3) + assert_raises(TypeError, np.add, p, x) + assert_raises(TypeError, np.add, x, p) + + + +class TestLatexRepr(object): + """Test the latex repr used by ipython """ + + def as_latex(self, obj): + # right now we ignore the formatting of scalars in our tests, since + # it makes them too verbose. Ideally, the formatting of scalars will + # be fixed such that tests below continue to pass + obj._repr_latex_scalar = lambda x: str(x) + try: + return obj._repr_latex_() + finally: + del obj._repr_latex_scalar + + def test_simple_polynomial(self): + # default input + p = Polynomial([1, 2, 3]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0 + 2.0\,x + 3.0\,x^{2}$') + + # translated input + p = Polynomial([1, 2, 3], domain=[-2, 0]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0 + 2.0\,\left(1.0 + x\right) + 3.0\,\left(1.0 + x\right)^{2}$') + + # scaled input + p = Polynomial([1, 2, 3], domain=[-0.5, 0.5]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0 + 2.0\,\left(2.0x\right) + 3.0\,\left(2.0x\right)^{2}$') + + # affine input + p = Polynomial([1, 2, 3], domain=[-1, 0]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0 + 2.0\,\left(1.0 + 2.0x\right) + 3.0\,\left(1.0 + 2.0x\right)^{2}$') + + def test_basis_func(self): + p = Chebyshev([1, 2, 3]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0\,{T}_{0}(x) + 2.0\,{T}_{1}(x) + 3.0\,{T}_{2}(x)$') + # affine input - check no surplus parens are added + p = Chebyshev([1, 2, 3], domain=[-1, 0]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0\,{T}_{0}(1.0 + 2.0x) + 2.0\,{T}_{1}(1.0 + 2.0x) + 3.0\,{T}_{2}(1.0 + 2.0x)$') + + def test_multichar_basis_func(self): + p = HermiteE([1, 2, 3]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0\,{He}_{0}(x) + 2.0\,{He}_{1}(x) + 3.0\,{He}_{2}(x)$') + + +# +# Test class method that only exists for some classes +# + + +class TestInterpolate(object): + + def f(self, x): + return x * (x - 1) * (x - 2) + + def test_raises(self): + assert_raises(ValueError, Chebyshev.interpolate, self.f, -1) + assert_raises(TypeError, Chebyshev.interpolate, self.f, 10.) + + def test_dimensions(self): + for deg in range(1, 5): + assert_(Chebyshev.interpolate(self.f, deg).degree() == deg) + + def test_approximation(self): + + def powx(x, p): + return x**p + + x = np.linspace(0, 2, 10) + for deg in range(0, 10): + for t in range(0, deg + 1): + p = Chebyshev.interpolate(powx, deg, domain=[0, 2], args=(t,)) + assert_almost_equal(p(x), powx(x, t), decimal=12) diff --git a/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_classes.pyc b/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_classes.pyc new file mode 100644 index 0000000..339a885 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_classes.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_hermite.py b/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_hermite.py new file mode 100644 index 0000000..1287ef3 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_hermite.py @@ -0,0 +1,557 @@ +"""Tests for hermite module. + +""" +from __future__ import division, absolute_import, print_function + +from functools import reduce + +import numpy as np +import numpy.polynomial.hermite as herm +from numpy.polynomial.polynomial import polyval +from numpy.testing import ( + assert_almost_equal, assert_raises, assert_equal, assert_, + ) + +H0 = np.array([1]) +H1 = np.array([0, 2]) +H2 = np.array([-2, 0, 4]) +H3 = np.array([0, -12, 0, 8]) +H4 = np.array([12, 0, -48, 0, 16]) +H5 = np.array([0, 120, 0, -160, 0, 32]) +H6 = np.array([-120, 0, 720, 0, -480, 0, 64]) +H7 = np.array([0, -1680, 0, 3360, 0, -1344, 0, 128]) +H8 = np.array([1680, 0, -13440, 0, 13440, 0, -3584, 0, 256]) +H9 = np.array([0, 30240, 0, -80640, 0, 48384, 0, -9216, 0, 512]) + +Hlist = [H0, H1, H2, H3, H4, H5, H6, H7, H8, H9] + + +def trim(x): + return herm.hermtrim(x, tol=1e-6) + + +class TestConstants(object): + + def test_hermdomain(self): + assert_equal(herm.hermdomain, [-1, 1]) + + def test_hermzero(self): + assert_equal(herm.hermzero, [0]) + + def test_hermone(self): + assert_equal(herm.hermone, [1]) + + def test_hermx(self): + assert_equal(herm.hermx, [0, .5]) + + +class TestArithmetic(object): + x = np.linspace(-3, 3, 100) + + def test_hermadd(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = herm.hermadd([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermsub(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = herm.hermsub([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermmulx(self): + assert_equal(herm.hermmulx([0]), [0]) + assert_equal(herm.hermmulx([1]), [0, .5]) + for i in range(1, 5): + ser = [0]*i + [1] + tgt = [0]*(i - 1) + [i, 0, .5] + assert_equal(herm.hermmulx(ser), tgt) + + def test_hermmul(self): + # check values of result + for i in range(5): + pol1 = [0]*i + [1] + val1 = herm.hermval(self.x, pol1) + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + pol2 = [0]*j + [1] + val2 = herm.hermval(self.x, pol2) + pol3 = herm.hermmul(pol1, pol2) + val3 = herm.hermval(self.x, pol3) + assert_(len(pol3) == i + j + 1, msg) + assert_almost_equal(val3, val1*val2, err_msg=msg) + + def test_hermdiv(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + ci = [0]*i + [1] + cj = [0]*j + [1] + tgt = herm.hermadd(ci, cj) + quo, rem = herm.hermdiv(tgt, ci) + res = herm.hermadd(herm.hermmul(quo, ci), rem) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermpow(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + c = np.arange(i + 1) + tgt = reduce(herm.hermmul, [c]*j, np.array([1])) + res = herm.hermpow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + +class TestEvaluation(object): + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([2.5, 1., .75]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + y = polyval(x, [1., 2., 3.]) + + def test_hermval(self): + #check empty input + assert_equal(herm.hermval([], [1]).size, 0) + + #check normal input) + x = np.linspace(-1, 1) + y = [polyval(x, c) for c in Hlist] + for i in range(10): + msg = "At i=%d" % i + tgt = y[i] + res = herm.hermval(x, [0]*i + [1]) + assert_almost_equal(res, tgt, err_msg=msg) + + #check that shape is preserved + for i in range(3): + dims = [2]*i + x = np.zeros(dims) + assert_equal(herm.hermval(x, [1]).shape, dims) + assert_equal(herm.hermval(x, [1, 0]).shape, dims) + assert_equal(herm.hermval(x, [1, 0, 0]).shape, dims) + + def test_hermval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, herm.hermval2d, x1, x2[:2], self.c2d) + + #test values + tgt = y1*y2 + res = herm.hermval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = herm.hermval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_hermval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, herm.hermval3d, x1, x2, x3[:2], self.c3d) + + #test values + tgt = y1*y2*y3 + res = herm.hermval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = herm.hermval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_hermgrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j->ij', y1, y2) + res = herm.hermgrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = herm.hermgrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3)*2) + + def test_hermgrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = herm.hermgrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = herm.hermgrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)*3) + + +class TestIntegral(object): + + def test_hermint(self): + # check exceptions + assert_raises(ValueError, herm.hermint, [0], .5) + assert_raises(ValueError, herm.hermint, [0], -1) + assert_raises(ValueError, herm.hermint, [0], 1, [0, 0]) + assert_raises(ValueError, herm.hermint, [0], lbnd=[0]) + assert_raises(ValueError, herm.hermint, [0], scl=[0]) + assert_raises(ValueError, herm.hermint, [0], axis=.5) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0]*(i - 2) + [1] + res = herm.hermint([0], m=i, k=k) + assert_almost_equal(res, [0, .5]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [1/scl] + hermpol = herm.poly2herm(pol) + hermint = herm.hermint(hermpol, m=1, k=[i]) + res = herm.herm2poly(hermint) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + hermpol = herm.poly2herm(pol) + hermint = herm.hermint(hermpol, m=1, k=[i], lbnd=-1) + assert_almost_equal(herm.hermval(-1, hermint), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [2/scl] + hermpol = herm.poly2herm(pol) + hermint = herm.hermint(hermpol, m=1, k=[i], scl=2) + res = herm.herm2poly(hermint) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = herm.hermint(tgt, m=1) + res = herm.hermint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = herm.hermint(tgt, m=1, k=[k]) + res = herm.hermint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = herm.hermint(tgt, m=1, k=[k], lbnd=-1) + res = herm.hermint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = herm.hermint(tgt, m=1, k=[k], scl=2) + res = herm.hermint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([herm.hermint(c) for c in c2d.T]).T + res = herm.hermint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herm.hermint(c) for c in c2d]) + res = herm.hermint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herm.hermint(c, k=3) for c in c2d]) + res = herm.hermint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + +class TestDerivative(object): + + def test_hermder(self): + # check exceptions + assert_raises(ValueError, herm.hermder, [0], .5) + assert_raises(ValueError, herm.hermder, [0], -1) + + # check that zeroth derivative does nothing + for i in range(5): + tgt = [0]*i + [1] + res = herm.hermder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = herm.hermder(herm.hermint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = herm.hermder(herm.hermint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([herm.hermder(c) for c in c2d.T]).T + res = herm.hermder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herm.hermder(c) for c in c2d]) + res = herm.hermder(c2d, axis=1) + assert_almost_equal(res, tgt) + + +class TestVander(object): + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + + def test_hermvander(self): + # check for 1d x + x = np.arange(3) + v = herm.hermvander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], herm.hermval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = herm.hermvander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], herm.hermval(x, coef)) + + def test_hermvander2d(self): + # also tests hermval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = herm.hermvander2d(x1, x2, [1, 2]) + tgt = herm.hermval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = herm.hermvander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_hermvander3d(self): + # also tests hermval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = herm.hermvander3d(x1, x2, x3, [1, 2, 3]) + tgt = herm.hermval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = herm.hermvander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + +class TestFitting(object): + + def test_hermfit(self): + def f(x): + return x*(x - 1)*(x - 2) + + def f2(x): + return x**4 + x**2 + 1 + + # Test exceptions + assert_raises(ValueError, herm.hermfit, [1], [1], -1) + assert_raises(TypeError, herm.hermfit, [[1]], [1], 0) + assert_raises(TypeError, herm.hermfit, [], [1], 0) + assert_raises(TypeError, herm.hermfit, [1], [[[1]]], 0) + assert_raises(TypeError, herm.hermfit, [1, 2], [1], 0) + assert_raises(TypeError, herm.hermfit, [1], [1, 2], 0) + assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, herm.hermfit, [1], [1], [-1,]) + assert_raises(ValueError, herm.hermfit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, herm.hermfit, [1], [1], []) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = herm.hermfit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(herm.hermval(x, coef3), y) + coef3 = herm.hermfit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(herm.hermval(x, coef3), y) + # + coef4 = herm.hermfit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(herm.hermval(x, coef4), y) + coef4 = herm.hermfit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(herm.hermval(x, coef4), y) + # check things still work if deg is not in strict increasing + coef4 = herm.hermfit(x, y, [2, 3, 4, 1, 0]) + assert_equal(len(coef4), 5) + assert_almost_equal(herm.hermval(x, coef4), y) + # + coef2d = herm.hermfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = herm.hermfit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + y[0::2] = 0 + wcoef3 = herm.hermfit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + wcoef3 = herm.hermfit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = herm.hermfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = herm.hermfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(herm.hermfit(x, x, 1), [0, .5]) + assert_almost_equal(herm.hermfit(x, x, [0, 1]), [0, .5]) + # test fitting only even Legendre polynomials + x = np.linspace(-1, 1) + y = f2(x) + coef1 = herm.hermfit(x, y, 4) + assert_almost_equal(herm.hermval(x, coef1), y) + coef2 = herm.hermfit(x, y, [0, 2, 4]) + assert_almost_equal(herm.hermval(x, coef2), y) + assert_almost_equal(coef1, coef2) + + +class TestCompanion(object): + + def test_raises(self): + assert_raises(ValueError, herm.hermcompanion, []) + assert_raises(ValueError, herm.hermcompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0]*i + [1] + assert_(herm.hermcompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(herm.hermcompanion([1, 2])[0, 0] == -.25) + + +class TestGauss(object): + + def test_100(self): + x, w = herm.hermgauss(100) + + # test orthogonality. Note that the results need to be normalized, + # otherwise the huge values that can arise from fast growing + # functions like Laguerre can be very confusing. + v = herm.hermvander(x, 99) + vv = np.dot(v.T * w, v) + vd = 1/np.sqrt(vv.diagonal()) + vv = vd[:, None] * vv * vd + assert_almost_equal(vv, np.eye(100)) + + # check that the integral of 1 is correct + tgt = np.sqrt(np.pi) + assert_almost_equal(w.sum(), tgt) + + +class TestMisc(object): + + def test_hermfromroots(self): + res = herm.hermfromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) + pol = herm.hermfromroots(roots) + res = herm.hermval(roots, pol) + tgt = 0 + assert_(len(pol) == i + 1) + assert_almost_equal(herm.herm2poly(pol)[-1], 1) + assert_almost_equal(res, tgt) + + def test_hermroots(self): + assert_almost_equal(herm.hermroots([1]), []) + assert_almost_equal(herm.hermroots([1, 1]), [-.5]) + for i in range(2, 5): + tgt = np.linspace(-1, 1, i) + res = herm.hermroots(herm.hermfromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermtrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, herm.hermtrim, coef, -1) + + # Test results + assert_equal(herm.hermtrim(coef), coef[:-1]) + assert_equal(herm.hermtrim(coef, 1), coef[:-3]) + assert_equal(herm.hermtrim(coef, 2), [0]) + + def test_hermline(self): + assert_equal(herm.hermline(3, 4), [3, 2]) + + def test_herm2poly(self): + for i in range(10): + assert_almost_equal(herm.herm2poly([0]*i + [1]), Hlist[i]) + + def test_poly2herm(self): + for i in range(10): + assert_almost_equal(herm.poly2herm(Hlist[i]), [0]*i + [1]) + + def test_weight(self): + x = np.linspace(-5, 5, 11) + tgt = np.exp(-x**2) + res = herm.hermweight(x) + assert_almost_equal(res, tgt) diff --git a/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_hermite.pyc b/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_hermite.pyc new file mode 100644 index 0000000..767e9c1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_hermite.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_hermite_e.py b/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_hermite_e.py new file mode 100644 index 0000000..ccb44ad --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_hermite_e.py @@ -0,0 +1,558 @@ +"""Tests for hermite_e module. + +""" +from __future__ import division, absolute_import, print_function + +from functools import reduce + +import numpy as np +import numpy.polynomial.hermite_e as herme +from numpy.polynomial.polynomial import polyval +from numpy.testing import ( + assert_almost_equal, assert_raises, assert_equal, assert_, + ) + +He0 = np.array([1]) +He1 = np.array([0, 1]) +He2 = np.array([-1, 0, 1]) +He3 = np.array([0, -3, 0, 1]) +He4 = np.array([3, 0, -6, 0, 1]) +He5 = np.array([0, 15, 0, -10, 0, 1]) +He6 = np.array([-15, 0, 45, 0, -15, 0, 1]) +He7 = np.array([0, -105, 0, 105, 0, -21, 0, 1]) +He8 = np.array([105, 0, -420, 0, 210, 0, -28, 0, 1]) +He9 = np.array([0, 945, 0, -1260, 0, 378, 0, -36, 0, 1]) + +Helist = [He0, He1, He2, He3, He4, He5, He6, He7, He8, He9] + + +def trim(x): + return herme.hermetrim(x, tol=1e-6) + + +class TestConstants(object): + + def test_hermedomain(self): + assert_equal(herme.hermedomain, [-1, 1]) + + def test_hermezero(self): + assert_equal(herme.hermezero, [0]) + + def test_hermeone(self): + assert_equal(herme.hermeone, [1]) + + def test_hermex(self): + assert_equal(herme.hermex, [0, 1]) + + +class TestArithmetic(object): + x = np.linspace(-3, 3, 100) + + def test_hermeadd(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = herme.hermeadd([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermesub(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = herme.hermesub([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermemulx(self): + assert_equal(herme.hermemulx([0]), [0]) + assert_equal(herme.hermemulx([1]), [0, 1]) + for i in range(1, 5): + ser = [0]*i + [1] + tgt = [0]*(i - 1) + [i, 0, 1] + assert_equal(herme.hermemulx(ser), tgt) + + def test_hermemul(self): + # check values of result + for i in range(5): + pol1 = [0]*i + [1] + val1 = herme.hermeval(self.x, pol1) + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + pol2 = [0]*j + [1] + val2 = herme.hermeval(self.x, pol2) + pol3 = herme.hermemul(pol1, pol2) + val3 = herme.hermeval(self.x, pol3) + assert_(len(pol3) == i + j + 1, msg) + assert_almost_equal(val3, val1*val2, err_msg=msg) + + def test_hermediv(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + ci = [0]*i + [1] + cj = [0]*j + [1] + tgt = herme.hermeadd(ci, cj) + quo, rem = herme.hermediv(tgt, ci) + res = herme.hermeadd(herme.hermemul(quo, ci), rem) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermepow(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + c = np.arange(i + 1) + tgt = reduce(herme.hermemul, [c]*j, np.array([1])) + res = herme.hermepow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + +class TestEvaluation(object): + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([4., 2., 3.]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + y = polyval(x, [1., 2., 3.]) + + def test_hermeval(self): + #check empty input + assert_equal(herme.hermeval([], [1]).size, 0) + + #check normal input) + x = np.linspace(-1, 1) + y = [polyval(x, c) for c in Helist] + for i in range(10): + msg = "At i=%d" % i + tgt = y[i] + res = herme.hermeval(x, [0]*i + [1]) + assert_almost_equal(res, tgt, err_msg=msg) + + #check that shape is preserved + for i in range(3): + dims = [2]*i + x = np.zeros(dims) + assert_equal(herme.hermeval(x, [1]).shape, dims) + assert_equal(herme.hermeval(x, [1, 0]).shape, dims) + assert_equal(herme.hermeval(x, [1, 0, 0]).shape, dims) + + def test_hermeval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, herme.hermeval2d, x1, x2[:2], self.c2d) + + #test values + tgt = y1*y2 + res = herme.hermeval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = herme.hermeval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_hermeval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, herme.hermeval3d, x1, x2, x3[:2], self.c3d) + + #test values + tgt = y1*y2*y3 + res = herme.hermeval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = herme.hermeval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_hermegrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j->ij', y1, y2) + res = herme.hermegrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = herme.hermegrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3)*2) + + def test_hermegrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = herme.hermegrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = herme.hermegrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)*3) + + +class TestIntegral(object): + + def test_hermeint(self): + # check exceptions + assert_raises(ValueError, herme.hermeint, [0], .5) + assert_raises(ValueError, herme.hermeint, [0], -1) + assert_raises(ValueError, herme.hermeint, [0], 1, [0, 0]) + assert_raises(ValueError, herme.hermeint, [0], lbnd=[0]) + assert_raises(ValueError, herme.hermeint, [0], scl=[0]) + assert_raises(ValueError, herme.hermeint, [0], axis=.5) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0]*(i - 2) + [1] + res = herme.hermeint([0], m=i, k=k) + assert_almost_equal(res, [0, 1]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [1/scl] + hermepol = herme.poly2herme(pol) + hermeint = herme.hermeint(hermepol, m=1, k=[i]) + res = herme.herme2poly(hermeint) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + hermepol = herme.poly2herme(pol) + hermeint = herme.hermeint(hermepol, m=1, k=[i], lbnd=-1) + assert_almost_equal(herme.hermeval(-1, hermeint), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [2/scl] + hermepol = herme.poly2herme(pol) + hermeint = herme.hermeint(hermepol, m=1, k=[i], scl=2) + res = herme.herme2poly(hermeint) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = herme.hermeint(tgt, m=1) + res = herme.hermeint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = herme.hermeint(tgt, m=1, k=[k]) + res = herme.hermeint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = herme.hermeint(tgt, m=1, k=[k], lbnd=-1) + res = herme.hermeint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = herme.hermeint(tgt, m=1, k=[k], scl=2) + res = herme.hermeint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermeint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([herme.hermeint(c) for c in c2d.T]).T + res = herme.hermeint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herme.hermeint(c) for c in c2d]) + res = herme.hermeint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herme.hermeint(c, k=3) for c in c2d]) + res = herme.hermeint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + +class TestDerivative(object): + + def test_hermeder(self): + # check exceptions + assert_raises(ValueError, herme.hermeder, [0], .5) + assert_raises(ValueError, herme.hermeder, [0], -1) + + # check that zeroth derivative does nothing + for i in range(5): + tgt = [0]*i + [1] + res = herme.hermeder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = herme.hermeder(herme.hermeint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = herme.hermeder( + herme.hermeint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermeder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([herme.hermeder(c) for c in c2d.T]).T + res = herme.hermeder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herme.hermeder(c) for c in c2d]) + res = herme.hermeder(c2d, axis=1) + assert_almost_equal(res, tgt) + + +class TestVander(object): + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + + def test_hermevander(self): + # check for 1d x + x = np.arange(3) + v = herme.hermevander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], herme.hermeval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = herme.hermevander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], herme.hermeval(x, coef)) + + def test_hermevander2d(self): + # also tests hermeval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = herme.hermevander2d(x1, x2, [1, 2]) + tgt = herme.hermeval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = herme.hermevander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_hermevander3d(self): + # also tests hermeval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = herme.hermevander3d(x1, x2, x3, [1, 2, 3]) + tgt = herme.hermeval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = herme.hermevander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + +class TestFitting(object): + + def test_hermefit(self): + def f(x): + return x*(x - 1)*(x - 2) + + def f2(x): + return x**4 + x**2 + 1 + + # Test exceptions + assert_raises(ValueError, herme.hermefit, [1], [1], -1) + assert_raises(TypeError, herme.hermefit, [[1]], [1], 0) + assert_raises(TypeError, herme.hermefit, [], [1], 0) + assert_raises(TypeError, herme.hermefit, [1], [[[1]]], 0) + assert_raises(TypeError, herme.hermefit, [1, 2], [1], 0) + assert_raises(TypeError, herme.hermefit, [1], [1, 2], 0) + assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, herme.hermefit, [1], [1], [-1,]) + assert_raises(ValueError, herme.hermefit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, herme.hermefit, [1], [1], []) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = herme.hermefit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(herme.hermeval(x, coef3), y) + coef3 = herme.hermefit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(herme.hermeval(x, coef3), y) + # + coef4 = herme.hermefit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(herme.hermeval(x, coef4), y) + coef4 = herme.hermefit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(herme.hermeval(x, coef4), y) + # check things still work if deg is not in strict increasing + coef4 = herme.hermefit(x, y, [2, 3, 4, 1, 0]) + assert_equal(len(coef4), 5) + assert_almost_equal(herme.hermeval(x, coef4), y) + # + coef2d = herme.hermefit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = herme.hermefit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + y[0::2] = 0 + wcoef3 = herme.hermefit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + wcoef3 = herme.hermefit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = herme.hermefit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = herme.hermefit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(herme.hermefit(x, x, 1), [0, 1]) + assert_almost_equal(herme.hermefit(x, x, [0, 1]), [0, 1]) + # test fitting only even Legendre polynomials + x = np.linspace(-1, 1) + y = f2(x) + coef1 = herme.hermefit(x, y, 4) + assert_almost_equal(herme.hermeval(x, coef1), y) + coef2 = herme.hermefit(x, y, [0, 2, 4]) + assert_almost_equal(herme.hermeval(x, coef2), y) + assert_almost_equal(coef1, coef2) + + +class TestCompanion(object): + + def test_raises(self): + assert_raises(ValueError, herme.hermecompanion, []) + assert_raises(ValueError, herme.hermecompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0]*i + [1] + assert_(herme.hermecompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(herme.hermecompanion([1, 2])[0, 0] == -.5) + + +class TestGauss(object): + + def test_100(self): + x, w = herme.hermegauss(100) + + # test orthogonality. Note that the results need to be normalized, + # otherwise the huge values that can arise from fast growing + # functions like Laguerre can be very confusing. + v = herme.hermevander(x, 99) + vv = np.dot(v.T * w, v) + vd = 1/np.sqrt(vv.diagonal()) + vv = vd[:, None] * vv * vd + assert_almost_equal(vv, np.eye(100)) + + # check that the integral of 1 is correct + tgt = np.sqrt(2*np.pi) + assert_almost_equal(w.sum(), tgt) + + +class TestMisc(object): + + def test_hermefromroots(self): + res = herme.hermefromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) + pol = herme.hermefromroots(roots) + res = herme.hermeval(roots, pol) + tgt = 0 + assert_(len(pol) == i + 1) + assert_almost_equal(herme.herme2poly(pol)[-1], 1) + assert_almost_equal(res, tgt) + + def test_hermeroots(self): + assert_almost_equal(herme.hermeroots([1]), []) + assert_almost_equal(herme.hermeroots([1, 1]), [-1]) + for i in range(2, 5): + tgt = np.linspace(-1, 1, i) + res = herme.hermeroots(herme.hermefromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermetrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, herme.hermetrim, coef, -1) + + # Test results + assert_equal(herme.hermetrim(coef), coef[:-1]) + assert_equal(herme.hermetrim(coef, 1), coef[:-3]) + assert_equal(herme.hermetrim(coef, 2), [0]) + + def test_hermeline(self): + assert_equal(herme.hermeline(3, 4), [3, 4]) + + def test_herme2poly(self): + for i in range(10): + assert_almost_equal(herme.herme2poly([0]*i + [1]), Helist[i]) + + def test_poly2herme(self): + for i in range(10): + assert_almost_equal(herme.poly2herme(Helist[i]), [0]*i + [1]) + + def test_weight(self): + x = np.linspace(-5, 5, 11) + tgt = np.exp(-.5*x**2) + res = herme.hermeweight(x) + assert_almost_equal(res, tgt) diff --git a/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_hermite_e.pyc b/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_hermite_e.pyc new file mode 100644 index 0000000..a03b515 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_hermite_e.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_laguerre.py b/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_laguerre.py new file mode 100644 index 0000000..3ababec --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_laguerre.py @@ -0,0 +1,539 @@ +"""Tests for laguerre module. + +""" +from __future__ import division, absolute_import, print_function + +from functools import reduce + +import numpy as np +import numpy.polynomial.laguerre as lag +from numpy.polynomial.polynomial import polyval +from numpy.testing import ( + assert_almost_equal, assert_raises, assert_equal, assert_, + ) + +L0 = np.array([1])/1 +L1 = np.array([1, -1])/1 +L2 = np.array([2, -4, 1])/2 +L3 = np.array([6, -18, 9, -1])/6 +L4 = np.array([24, -96, 72, -16, 1])/24 +L5 = np.array([120, -600, 600, -200, 25, -1])/120 +L6 = np.array([720, -4320, 5400, -2400, 450, -36, 1])/720 + +Llist = [L0, L1, L2, L3, L4, L5, L6] + + +def trim(x): + return lag.lagtrim(x, tol=1e-6) + + +class TestConstants(object): + + def test_lagdomain(self): + assert_equal(lag.lagdomain, [0, 1]) + + def test_lagzero(self): + assert_equal(lag.lagzero, [0]) + + def test_lagone(self): + assert_equal(lag.lagone, [1]) + + def test_lagx(self): + assert_equal(lag.lagx, [1, -1]) + + +class TestArithmetic(object): + x = np.linspace(-3, 3, 100) + + def test_lagadd(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = lag.lagadd([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_lagsub(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = lag.lagsub([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_lagmulx(self): + assert_equal(lag.lagmulx([0]), [0]) + assert_equal(lag.lagmulx([1]), [1, -1]) + for i in range(1, 5): + ser = [0]*i + [1] + tgt = [0]*(i - 1) + [-i, 2*i + 1, -(i + 1)] + assert_almost_equal(lag.lagmulx(ser), tgt) + + def test_lagmul(self): + # check values of result + for i in range(5): + pol1 = [0]*i + [1] + val1 = lag.lagval(self.x, pol1) + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + pol2 = [0]*j + [1] + val2 = lag.lagval(self.x, pol2) + pol3 = lag.lagmul(pol1, pol2) + val3 = lag.lagval(self.x, pol3) + assert_(len(pol3) == i + j + 1, msg) + assert_almost_equal(val3, val1*val2, err_msg=msg) + + def test_lagdiv(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + ci = [0]*i + [1] + cj = [0]*j + [1] + tgt = lag.lagadd(ci, cj) + quo, rem = lag.lagdiv(tgt, ci) + res = lag.lagadd(lag.lagmul(quo, ci), rem) + assert_almost_equal(trim(res), trim(tgt), err_msg=msg) + + def test_lagpow(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + c = np.arange(i + 1) + tgt = reduce(lag.lagmul, [c]*j, np.array([1])) + res = lag.lagpow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + +class TestEvaluation(object): + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([9., -14., 6.]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + y = polyval(x, [1., 2., 3.]) + + def test_lagval(self): + #check empty input + assert_equal(lag.lagval([], [1]).size, 0) + + #check normal input) + x = np.linspace(-1, 1) + y = [polyval(x, c) for c in Llist] + for i in range(7): + msg = "At i=%d" % i + tgt = y[i] + res = lag.lagval(x, [0]*i + [1]) + assert_almost_equal(res, tgt, err_msg=msg) + + #check that shape is preserved + for i in range(3): + dims = [2]*i + x = np.zeros(dims) + assert_equal(lag.lagval(x, [1]).shape, dims) + assert_equal(lag.lagval(x, [1, 0]).shape, dims) + assert_equal(lag.lagval(x, [1, 0, 0]).shape, dims) + + def test_lagval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, lag.lagval2d, x1, x2[:2], self.c2d) + + #test values + tgt = y1*y2 + res = lag.lagval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = lag.lagval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_lagval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, lag.lagval3d, x1, x2, x3[:2], self.c3d) + + #test values + tgt = y1*y2*y3 + res = lag.lagval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = lag.lagval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_laggrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j->ij', y1, y2) + res = lag.laggrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = lag.laggrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3)*2) + + def test_laggrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = lag.laggrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = lag.laggrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)*3) + + +class TestIntegral(object): + + def test_lagint(self): + # check exceptions + assert_raises(ValueError, lag.lagint, [0], .5) + assert_raises(ValueError, lag.lagint, [0], -1) + assert_raises(ValueError, lag.lagint, [0], 1, [0, 0]) + assert_raises(ValueError, lag.lagint, [0], lbnd=[0]) + assert_raises(ValueError, lag.lagint, [0], scl=[0]) + assert_raises(ValueError, lag.lagint, [0], axis=.5) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0]*(i - 2) + [1] + res = lag.lagint([0], m=i, k=k) + assert_almost_equal(res, [1, -1]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [1/scl] + lagpol = lag.poly2lag(pol) + lagint = lag.lagint(lagpol, m=1, k=[i]) + res = lag.lag2poly(lagint) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + lagpol = lag.poly2lag(pol) + lagint = lag.lagint(lagpol, m=1, k=[i], lbnd=-1) + assert_almost_equal(lag.lagval(-1, lagint), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [2/scl] + lagpol = lag.poly2lag(pol) + lagint = lag.lagint(lagpol, m=1, k=[i], scl=2) + res = lag.lag2poly(lagint) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = lag.lagint(tgt, m=1) + res = lag.lagint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = lag.lagint(tgt, m=1, k=[k]) + res = lag.lagint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = lag.lagint(tgt, m=1, k=[k], lbnd=-1) + res = lag.lagint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = lag.lagint(tgt, m=1, k=[k], scl=2) + res = lag.lagint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_lagint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([lag.lagint(c) for c in c2d.T]).T + res = lag.lagint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([lag.lagint(c) for c in c2d]) + res = lag.lagint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([lag.lagint(c, k=3) for c in c2d]) + res = lag.lagint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + +class TestDerivative(object): + + def test_lagder(self): + # check exceptions + assert_raises(ValueError, lag.lagder, [0], .5) + assert_raises(ValueError, lag.lagder, [0], -1) + + # check that zeroth derivative does nothing + for i in range(5): + tgt = [0]*i + [1] + res = lag.lagder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = lag.lagder(lag.lagint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = lag.lagder(lag.lagint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_lagder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([lag.lagder(c) for c in c2d.T]).T + res = lag.lagder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([lag.lagder(c) for c in c2d]) + res = lag.lagder(c2d, axis=1) + assert_almost_equal(res, tgt) + + +class TestVander(object): + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + + def test_lagvander(self): + # check for 1d x + x = np.arange(3) + v = lag.lagvander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], lag.lagval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = lag.lagvander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], lag.lagval(x, coef)) + + def test_lagvander2d(self): + # also tests lagval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = lag.lagvander2d(x1, x2, [1, 2]) + tgt = lag.lagval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = lag.lagvander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_lagvander3d(self): + # also tests lagval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = lag.lagvander3d(x1, x2, x3, [1, 2, 3]) + tgt = lag.lagval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = lag.lagvander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + +class TestFitting(object): + + def test_lagfit(self): + def f(x): + return x*(x - 1)*(x - 2) + + # Test exceptions + assert_raises(ValueError, lag.lagfit, [1], [1], -1) + assert_raises(TypeError, lag.lagfit, [[1]], [1], 0) + assert_raises(TypeError, lag.lagfit, [], [1], 0) + assert_raises(TypeError, lag.lagfit, [1], [[[1]]], 0) + assert_raises(TypeError, lag.lagfit, [1, 2], [1], 0) + assert_raises(TypeError, lag.lagfit, [1], [1, 2], 0) + assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, lag.lagfit, [1], [1], [-1,]) + assert_raises(ValueError, lag.lagfit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, lag.lagfit, [1], [1], []) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = lag.lagfit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(lag.lagval(x, coef3), y) + coef3 = lag.lagfit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(lag.lagval(x, coef3), y) + # + coef4 = lag.lagfit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(lag.lagval(x, coef4), y) + coef4 = lag.lagfit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(lag.lagval(x, coef4), y) + # + coef2d = lag.lagfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = lag.lagfit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + y[0::2] = 0 + wcoef3 = lag.lagfit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + wcoef3 = lag.lagfit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = lag.lagfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = lag.lagfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(lag.lagfit(x, x, 1), [1, -1]) + assert_almost_equal(lag.lagfit(x, x, [0, 1]), [1, -1]) + + +class TestCompanion(object): + + def test_raises(self): + assert_raises(ValueError, lag.lagcompanion, []) + assert_raises(ValueError, lag.lagcompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0]*i + [1] + assert_(lag.lagcompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(lag.lagcompanion([1, 2])[0, 0] == 1.5) + + +class TestGauss(object): + + def test_100(self): + x, w = lag.laggauss(100) + + # test orthogonality. Note that the results need to be normalized, + # otherwise the huge values that can arise from fast growing + # functions like Laguerre can be very confusing. + v = lag.lagvander(x, 99) + vv = np.dot(v.T * w, v) + vd = 1/np.sqrt(vv.diagonal()) + vv = vd[:, None] * vv * vd + assert_almost_equal(vv, np.eye(100)) + + # check that the integral of 1 is correct + tgt = 1.0 + assert_almost_equal(w.sum(), tgt) + + +class TestMisc(object): + + def test_lagfromroots(self): + res = lag.lagfromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) + pol = lag.lagfromroots(roots) + res = lag.lagval(roots, pol) + tgt = 0 + assert_(len(pol) == i + 1) + assert_almost_equal(lag.lag2poly(pol)[-1], 1) + assert_almost_equal(res, tgt) + + def test_lagroots(self): + assert_almost_equal(lag.lagroots([1]), []) + assert_almost_equal(lag.lagroots([0, 1]), [1]) + for i in range(2, 5): + tgt = np.linspace(0, 3, i) + res = lag.lagroots(lag.lagfromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_lagtrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, lag.lagtrim, coef, -1) + + # Test results + assert_equal(lag.lagtrim(coef), coef[:-1]) + assert_equal(lag.lagtrim(coef, 1), coef[:-3]) + assert_equal(lag.lagtrim(coef, 2), [0]) + + def test_lagline(self): + assert_equal(lag.lagline(3, 4), [7, -4]) + + def test_lag2poly(self): + for i in range(7): + assert_almost_equal(lag.lag2poly([0]*i + [1]), Llist[i]) + + def test_poly2lag(self): + for i in range(7): + assert_almost_equal(lag.poly2lag(Llist[i]), [0]*i + [1]) + + def test_weight(self): + x = np.linspace(0, 10, 11) + tgt = np.exp(-x) + res = lag.lagweight(x) + assert_almost_equal(res, tgt) diff --git a/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_laguerre.pyc b/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_laguerre.pyc new file mode 100644 index 0000000..60b282a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_laguerre.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_legendre.py b/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_legendre.py new file mode 100644 index 0000000..a23086d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_legendre.py @@ -0,0 +1,558 @@ +"""Tests for legendre module. + +""" +from __future__ import division, absolute_import, print_function + +from functools import reduce + +import numpy as np +import numpy.polynomial.legendre as leg +from numpy.polynomial.polynomial import polyval +from numpy.testing import ( + assert_almost_equal, assert_raises, assert_equal, assert_, + ) + +L0 = np.array([1]) +L1 = np.array([0, 1]) +L2 = np.array([-1, 0, 3])/2 +L3 = np.array([0, -3, 0, 5])/2 +L4 = np.array([3, 0, -30, 0, 35])/8 +L5 = np.array([0, 15, 0, -70, 0, 63])/8 +L6 = np.array([-5, 0, 105, 0, -315, 0, 231])/16 +L7 = np.array([0, -35, 0, 315, 0, -693, 0, 429])/16 +L8 = np.array([35, 0, -1260, 0, 6930, 0, -12012, 0, 6435])/128 +L9 = np.array([0, 315, 0, -4620, 0, 18018, 0, -25740, 0, 12155])/128 + +Llist = [L0, L1, L2, L3, L4, L5, L6, L7, L8, L9] + + +def trim(x): + return leg.legtrim(x, tol=1e-6) + + +class TestConstants(object): + + def test_legdomain(self): + assert_equal(leg.legdomain, [-1, 1]) + + def test_legzero(self): + assert_equal(leg.legzero, [0]) + + def test_legone(self): + assert_equal(leg.legone, [1]) + + def test_legx(self): + assert_equal(leg.legx, [0, 1]) + + +class TestArithmetic(object): + x = np.linspace(-1, 1, 100) + + def test_legadd(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = leg.legadd([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_legsub(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = leg.legsub([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_legmulx(self): + assert_equal(leg.legmulx([0]), [0]) + assert_equal(leg.legmulx([1]), [0, 1]) + for i in range(1, 5): + tmp = 2*i + 1 + ser = [0]*i + [1] + tgt = [0]*(i - 1) + [i/tmp, 0, (i + 1)/tmp] + assert_equal(leg.legmulx(ser), tgt) + + def test_legmul(self): + # check values of result + for i in range(5): + pol1 = [0]*i + [1] + val1 = leg.legval(self.x, pol1) + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + pol2 = [0]*j + [1] + val2 = leg.legval(self.x, pol2) + pol3 = leg.legmul(pol1, pol2) + val3 = leg.legval(self.x, pol3) + assert_(len(pol3) == i + j + 1, msg) + assert_almost_equal(val3, val1*val2, err_msg=msg) + + def test_legdiv(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + ci = [0]*i + [1] + cj = [0]*j + [1] + tgt = leg.legadd(ci, cj) + quo, rem = leg.legdiv(tgt, ci) + res = leg.legadd(leg.legmul(quo, ci), rem) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_legpow(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + c = np.arange(i + 1) + tgt = reduce(leg.legmul, [c]*j, np.array([1])) + res = leg.legpow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + +class TestEvaluation(object): + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([2., 2., 2.]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + y = polyval(x, [1., 2., 3.]) + + def test_legval(self): + #check empty input + assert_equal(leg.legval([], [1]).size, 0) + + #check normal input) + x = np.linspace(-1, 1) + y = [polyval(x, c) for c in Llist] + for i in range(10): + msg = "At i=%d" % i + tgt = y[i] + res = leg.legval(x, [0]*i + [1]) + assert_almost_equal(res, tgt, err_msg=msg) + + #check that shape is preserved + for i in range(3): + dims = [2]*i + x = np.zeros(dims) + assert_equal(leg.legval(x, [1]).shape, dims) + assert_equal(leg.legval(x, [1, 0]).shape, dims) + assert_equal(leg.legval(x, [1, 0, 0]).shape, dims) + + def test_legval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, leg.legval2d, x1, x2[:2], self.c2d) + + #test values + tgt = y1*y2 + res = leg.legval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = leg.legval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_legval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, leg.legval3d, x1, x2, x3[:2], self.c3d) + + #test values + tgt = y1*y2*y3 + res = leg.legval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = leg.legval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_leggrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j->ij', y1, y2) + res = leg.leggrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = leg.leggrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3)*2) + + def test_leggrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = leg.leggrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = leg.leggrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)*3) + + +class TestIntegral(object): + + def test_legint(self): + # check exceptions + assert_raises(ValueError, leg.legint, [0], .5) + assert_raises(ValueError, leg.legint, [0], -1) + assert_raises(ValueError, leg.legint, [0], 1, [0, 0]) + assert_raises(ValueError, leg.legint, [0], lbnd=[0]) + assert_raises(ValueError, leg.legint, [0], scl=[0]) + assert_raises(ValueError, leg.legint, [0], axis=.5) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0]*(i - 2) + [1] + res = leg.legint([0], m=i, k=k) + assert_almost_equal(res, [0, 1]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [1/scl] + legpol = leg.poly2leg(pol) + legint = leg.legint(legpol, m=1, k=[i]) + res = leg.leg2poly(legint) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + legpol = leg.poly2leg(pol) + legint = leg.legint(legpol, m=1, k=[i], lbnd=-1) + assert_almost_equal(leg.legval(-1, legint), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [2/scl] + legpol = leg.poly2leg(pol) + legint = leg.legint(legpol, m=1, k=[i], scl=2) + res = leg.leg2poly(legint) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = leg.legint(tgt, m=1) + res = leg.legint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = leg.legint(tgt, m=1, k=[k]) + res = leg.legint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = leg.legint(tgt, m=1, k=[k], lbnd=-1) + res = leg.legint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = leg.legint(tgt, m=1, k=[k], scl=2) + res = leg.legint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_legint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([leg.legint(c) for c in c2d.T]).T + res = leg.legint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([leg.legint(c) for c in c2d]) + res = leg.legint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([leg.legint(c, k=3) for c in c2d]) + res = leg.legint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + +class TestDerivative(object): + + def test_legder(self): + # check exceptions + assert_raises(ValueError, leg.legder, [0], .5) + assert_raises(ValueError, leg.legder, [0], -1) + + # check that zeroth derivative does nothing + for i in range(5): + tgt = [0]*i + [1] + res = leg.legder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = leg.legder(leg.legint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = leg.legder(leg.legint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_legder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([leg.legder(c) for c in c2d.T]).T + res = leg.legder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([leg.legder(c) for c in c2d]) + res = leg.legder(c2d, axis=1) + assert_almost_equal(res, tgt) + + +class TestVander(object): + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + + def test_legvander(self): + # check for 1d x + x = np.arange(3) + v = leg.legvander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], leg.legval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = leg.legvander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], leg.legval(x, coef)) + + def test_legvander2d(self): + # also tests polyval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = leg.legvander2d(x1, x2, [1, 2]) + tgt = leg.legval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = leg.legvander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_legvander3d(self): + # also tests polyval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = leg.legvander3d(x1, x2, x3, [1, 2, 3]) + tgt = leg.legval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = leg.legvander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + +class TestFitting(object): + + def test_legfit(self): + def f(x): + return x*(x - 1)*(x - 2) + + def f2(x): + return x**4 + x**2 + 1 + + # Test exceptions + assert_raises(ValueError, leg.legfit, [1], [1], -1) + assert_raises(TypeError, leg.legfit, [[1]], [1], 0) + assert_raises(TypeError, leg.legfit, [], [1], 0) + assert_raises(TypeError, leg.legfit, [1], [[[1]]], 0) + assert_raises(TypeError, leg.legfit, [1, 2], [1], 0) + assert_raises(TypeError, leg.legfit, [1], [1, 2], 0) + assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, leg.legfit, [1], [1], [-1,]) + assert_raises(ValueError, leg.legfit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, leg.legfit, [1], [1], []) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = leg.legfit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(leg.legval(x, coef3), y) + coef3 = leg.legfit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(leg.legval(x, coef3), y) + # + coef4 = leg.legfit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(leg.legval(x, coef4), y) + coef4 = leg.legfit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(leg.legval(x, coef4), y) + # check things still work if deg is not in strict increasing + coef4 = leg.legfit(x, y, [2, 3, 4, 1, 0]) + assert_equal(len(coef4), 5) + assert_almost_equal(leg.legval(x, coef4), y) + # + coef2d = leg.legfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = leg.legfit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + y[0::2] = 0 + wcoef3 = leg.legfit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + wcoef3 = leg.legfit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = leg.legfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = leg.legfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(leg.legfit(x, x, 1), [0, 1]) + assert_almost_equal(leg.legfit(x, x, [0, 1]), [0, 1]) + # test fitting only even Legendre polynomials + x = np.linspace(-1, 1) + y = f2(x) + coef1 = leg.legfit(x, y, 4) + assert_almost_equal(leg.legval(x, coef1), y) + coef2 = leg.legfit(x, y, [0, 2, 4]) + assert_almost_equal(leg.legval(x, coef2), y) + assert_almost_equal(coef1, coef2) + + +class TestCompanion(object): + + def test_raises(self): + assert_raises(ValueError, leg.legcompanion, []) + assert_raises(ValueError, leg.legcompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0]*i + [1] + assert_(leg.legcompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(leg.legcompanion([1, 2])[0, 0] == -.5) + + +class TestGauss(object): + + def test_100(self): + x, w = leg.leggauss(100) + + # test orthogonality. Note that the results need to be normalized, + # otherwise the huge values that can arise from fast growing + # functions like Laguerre can be very confusing. + v = leg.legvander(x, 99) + vv = np.dot(v.T * w, v) + vd = 1/np.sqrt(vv.diagonal()) + vv = vd[:, None] * vv * vd + assert_almost_equal(vv, np.eye(100)) + + # check that the integral of 1 is correct + tgt = 2.0 + assert_almost_equal(w.sum(), tgt) + + +class TestMisc(object): + + def test_legfromroots(self): + res = leg.legfromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) + pol = leg.legfromroots(roots) + res = leg.legval(roots, pol) + tgt = 0 + assert_(len(pol) == i + 1) + assert_almost_equal(leg.leg2poly(pol)[-1], 1) + assert_almost_equal(res, tgt) + + def test_legroots(self): + assert_almost_equal(leg.legroots([1]), []) + assert_almost_equal(leg.legroots([1, 2]), [-.5]) + for i in range(2, 5): + tgt = np.linspace(-1, 1, i) + res = leg.legroots(leg.legfromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_legtrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, leg.legtrim, coef, -1) + + # Test results + assert_equal(leg.legtrim(coef), coef[:-1]) + assert_equal(leg.legtrim(coef, 1), coef[:-3]) + assert_equal(leg.legtrim(coef, 2), [0]) + + def test_legline(self): + assert_equal(leg.legline(3, 4), [3, 4]) + + def test_leg2poly(self): + for i in range(10): + assert_almost_equal(leg.leg2poly([0]*i + [1]), Llist[i]) + + def test_poly2leg(self): + for i in range(10): + assert_almost_equal(leg.poly2leg(Llist[i]), [0]*i + [1]) + + def test_weight(self): + x = np.linspace(-1, 1, 11) + tgt = 1. + res = leg.legweight(x) + assert_almost_equal(res, tgt) diff --git a/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_legendre.pyc b/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_legendre.pyc new file mode 100644 index 0000000..71cfa5a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_legendre.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_polynomial.py b/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_polynomial.py new file mode 100644 index 0000000..0c93be2 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_polynomial.py @@ -0,0 +1,578 @@ +"""Tests for polynomial module. + +""" +from __future__ import division, absolute_import, print_function + +from functools import reduce + +import numpy as np +import numpy.polynomial.polynomial as poly +from numpy.testing import ( + assert_almost_equal, assert_raises, assert_equal, assert_, + ) + + +def trim(x): + return poly.polytrim(x, tol=1e-6) + +T0 = [1] +T1 = [0, 1] +T2 = [-1, 0, 2] +T3 = [0, -3, 0, 4] +T4 = [1, 0, -8, 0, 8] +T5 = [0, 5, 0, -20, 0, 16] +T6 = [-1, 0, 18, 0, -48, 0, 32] +T7 = [0, -7, 0, 56, 0, -112, 0, 64] +T8 = [1, 0, -32, 0, 160, 0, -256, 0, 128] +T9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256] + +Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9] + + +class TestConstants(object): + + def test_polydomain(self): + assert_equal(poly.polydomain, [-1, 1]) + + def test_polyzero(self): + assert_equal(poly.polyzero, [0]) + + def test_polyone(self): + assert_equal(poly.polyone, [1]) + + def test_polyx(self): + assert_equal(poly.polyx, [0, 1]) + + +class TestArithmetic(object): + + def test_polyadd(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = poly.polyadd([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_polysub(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = poly.polysub([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_polymulx(self): + assert_equal(poly.polymulx([0]), [0]) + assert_equal(poly.polymulx([1]), [0, 1]) + for i in range(1, 5): + ser = [0]*i + [1] + tgt = [0]*(i + 1) + [1] + assert_equal(poly.polymulx(ser), tgt) + + def test_polymul(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + tgt = np.zeros(i + j + 1) + tgt[i + j] += 1 + res = poly.polymul([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_polydiv(self): + # check zero division + assert_raises(ZeroDivisionError, poly.polydiv, [1], [0]) + + # check scalar division + quo, rem = poly.polydiv([2], [2]) + assert_equal((quo, rem), (1, 0)) + quo, rem = poly.polydiv([2, 2], [2]) + assert_equal((quo, rem), ((1, 1), 0)) + + # check rest. + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + ci = [0]*i + [1, 2] + cj = [0]*j + [1, 2] + tgt = poly.polyadd(ci, cj) + quo, rem = poly.polydiv(tgt, ci) + res = poly.polyadd(poly.polymul(quo, ci), rem) + assert_equal(res, tgt, err_msg=msg) + + def test_polypow(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + c = np.arange(i + 1) + tgt = reduce(poly.polymul, [c]*j, np.array([1])) + res = poly.polypow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + +class TestEvaluation(object): + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([1., 2., 3.]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + y = poly.polyval(x, [1., 2., 3.]) + + def test_polyval(self): + #check empty input + assert_equal(poly.polyval([], [1]).size, 0) + + #check normal input) + x = np.linspace(-1, 1) + y = [x**i for i in range(5)] + for i in range(5): + tgt = y[i] + res = poly.polyval(x, [0]*i + [1]) + assert_almost_equal(res, tgt) + tgt = x*(x**2 - 1) + res = poly.polyval(x, [0, -1, 0, 1]) + assert_almost_equal(res, tgt) + + #check that shape is preserved + for i in range(3): + dims = [2]*i + x = np.zeros(dims) + assert_equal(poly.polyval(x, [1]).shape, dims) + assert_equal(poly.polyval(x, [1, 0]).shape, dims) + assert_equal(poly.polyval(x, [1, 0, 0]).shape, dims) + + def test_polyvalfromroots(self): + # check exception for broadcasting x values over root array with + # too few dimensions + assert_raises(ValueError, poly.polyvalfromroots, + [1], [1], tensor=False) + + # check empty input + assert_equal(poly.polyvalfromroots([], [1]).size, 0) + assert_(poly.polyvalfromroots([], [1]).shape == (0,)) + + # check empty input + multidimensional roots + assert_equal(poly.polyvalfromroots([], [[1] * 5]).size, 0) + assert_(poly.polyvalfromroots([], [[1] * 5]).shape == (5, 0)) + + # check scalar input + assert_equal(poly.polyvalfromroots(1, 1), 0) + assert_(poly.polyvalfromroots(1, np.ones((3, 3))).shape == (3,)) + + # check normal input) + x = np.linspace(-1, 1) + y = [x**i for i in range(5)] + for i in range(1, 5): + tgt = y[i] + res = poly.polyvalfromroots(x, [0]*i) + assert_almost_equal(res, tgt) + tgt = x*(x - 1)*(x + 1) + res = poly.polyvalfromroots(x, [-1, 0, 1]) + assert_almost_equal(res, tgt) + + # check that shape is preserved + for i in range(3): + dims = [2]*i + x = np.zeros(dims) + assert_equal(poly.polyvalfromroots(x, [1]).shape, dims) + assert_equal(poly.polyvalfromroots(x, [1, 0]).shape, dims) + assert_equal(poly.polyvalfromroots(x, [1, 0, 0]).shape, dims) + + # check compatibility with factorization + ptest = [15, 2, -16, -2, 1] + r = poly.polyroots(ptest) + x = np.linspace(-1, 1) + assert_almost_equal(poly.polyval(x, ptest), + poly.polyvalfromroots(x, r)) + + # check multidimensional arrays of roots and values + # check tensor=False + rshape = (3, 5) + x = np.arange(-3, 2) + r = np.random.randint(-5, 5, size=rshape) + res = poly.polyvalfromroots(x, r, tensor=False) + tgt = np.empty(r.shape[1:]) + for ii in range(tgt.size): + tgt[ii] = poly.polyvalfromroots(x[ii], r[:, ii]) + assert_equal(res, tgt) + + # check tensor=True + x = np.vstack([x, 2*x]) + res = poly.polyvalfromroots(x, r, tensor=True) + tgt = np.empty(r.shape[1:] + x.shape) + for ii in range(r.shape[1]): + for jj in range(x.shape[0]): + tgt[ii, jj, :] = poly.polyvalfromroots(x[jj], r[:, ii]) + assert_equal(res, tgt) + + def test_polyval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, poly.polyval2d, x1, x2[:2], self.c2d) + + #test values + tgt = y1*y2 + res = poly.polyval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = poly.polyval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_polyval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, poly.polyval3d, x1, x2, x3[:2], self.c3d) + + #test values + tgt = y1*y2*y3 + res = poly.polyval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = poly.polyval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_polygrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j->ij', y1, y2) + res = poly.polygrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = poly.polygrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3)*2) + + def test_polygrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = poly.polygrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = poly.polygrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)*3) + + +class TestIntegral(object): + + def test_polyint(self): + # check exceptions + assert_raises(ValueError, poly.polyint, [0], .5) + assert_raises(ValueError, poly.polyint, [0], -1) + assert_raises(ValueError, poly.polyint, [0], 1, [0, 0]) + assert_raises(ValueError, poly.polyint, [0], lbnd=[0]) + assert_raises(ValueError, poly.polyint, [0], scl=[0]) + assert_raises(ValueError, poly.polyint, [0], axis=.5) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0]*(i - 2) + [1] + res = poly.polyint([0], m=i, k=k) + assert_almost_equal(res, [0, 1]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [1/scl] + res = poly.polyint(pol, m=1, k=[i]) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + res = poly.polyint(pol, m=1, k=[i], lbnd=-1) + assert_almost_equal(poly.polyval(-1, res), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [2/scl] + res = poly.polyint(pol, m=1, k=[i], scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = poly.polyint(tgt, m=1) + res = poly.polyint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = poly.polyint(tgt, m=1, k=[k]) + res = poly.polyint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = poly.polyint(tgt, m=1, k=[k], lbnd=-1) + res = poly.polyint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = poly.polyint(tgt, m=1, k=[k], scl=2) + res = poly.polyint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_polyint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([poly.polyint(c) for c in c2d.T]).T + res = poly.polyint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([poly.polyint(c) for c in c2d]) + res = poly.polyint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([poly.polyint(c, k=3) for c in c2d]) + res = poly.polyint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + +class TestDerivative(object): + + def test_polyder(self): + # check exceptions + assert_raises(ValueError, poly.polyder, [0], .5) + assert_raises(ValueError, poly.polyder, [0], -1) + + # check that zeroth derivative does nothing + for i in range(5): + tgt = [0]*i + [1] + res = poly.polyder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = poly.polyder(poly.polyint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = poly.polyder(poly.polyint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_polyder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([poly.polyder(c) for c in c2d.T]).T + res = poly.polyder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([poly.polyder(c) for c in c2d]) + res = poly.polyder(c2d, axis=1) + assert_almost_equal(res, tgt) + + +class TestVander(object): + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + + def test_polyvander(self): + # check for 1d x + x = np.arange(3) + v = poly.polyvander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], poly.polyval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = poly.polyvander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], poly.polyval(x, coef)) + + def test_polyvander2d(self): + # also tests polyval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = poly.polyvander2d(x1, x2, [1, 2]) + tgt = poly.polyval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = poly.polyvander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_polyvander3d(self): + # also tests polyval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = poly.polyvander3d(x1, x2, x3, [1, 2, 3]) + tgt = poly.polyval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = poly.polyvander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + +class TestCompanion(object): + + def test_raises(self): + assert_raises(ValueError, poly.polycompanion, []) + assert_raises(ValueError, poly.polycompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0]*i + [1] + assert_(poly.polycompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(poly.polycompanion([1, 2])[0, 0] == -.5) + + +class TestMisc(object): + + def test_polyfromroots(self): + res = poly.polyfromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) + tgt = Tlist[i] + res = poly.polyfromroots(roots)*2**(i-1) + assert_almost_equal(trim(res), trim(tgt)) + + def test_polyroots(self): + assert_almost_equal(poly.polyroots([1]), []) + assert_almost_equal(poly.polyroots([1, 2]), [-.5]) + for i in range(2, 5): + tgt = np.linspace(-1, 1, i) + res = poly.polyroots(poly.polyfromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_polyfit(self): + def f(x): + return x*(x - 1)*(x - 2) + + def f2(x): + return x**4 + x**2 + 1 + + # Test exceptions + assert_raises(ValueError, poly.polyfit, [1], [1], -1) + assert_raises(TypeError, poly.polyfit, [[1]], [1], 0) + assert_raises(TypeError, poly.polyfit, [], [1], 0) + assert_raises(TypeError, poly.polyfit, [1], [[[1]]], 0) + assert_raises(TypeError, poly.polyfit, [1, 2], [1], 0) + assert_raises(TypeError, poly.polyfit, [1], [1, 2], 0) + assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, poly.polyfit, [1], [1], [-1,]) + assert_raises(ValueError, poly.polyfit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, poly.polyfit, [1], [1], []) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = poly.polyfit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(poly.polyval(x, coef3), y) + coef3 = poly.polyfit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(poly.polyval(x, coef3), y) + # + coef4 = poly.polyfit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(poly.polyval(x, coef4), y) + coef4 = poly.polyfit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(poly.polyval(x, coef4), y) + # + coef2d = poly.polyfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = poly.polyfit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + yw[0::2] = 0 + wcoef3 = poly.polyfit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + wcoef3 = poly.polyfit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = poly.polyfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = poly.polyfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(poly.polyfit(x, x, 1), [0, 1]) + assert_almost_equal(poly.polyfit(x, x, [0, 1]), [0, 1]) + # test fitting only even Polyendre polynomials + x = np.linspace(-1, 1) + y = f2(x) + coef1 = poly.polyfit(x, y, 4) + assert_almost_equal(poly.polyval(x, coef1), y) + coef2 = poly.polyfit(x, y, [0, 2, 4]) + assert_almost_equal(poly.polyval(x, coef2), y) + assert_almost_equal(coef1, coef2) + + def test_polytrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, poly.polytrim, coef, -1) + + # Test results + assert_equal(poly.polytrim(coef), coef[:-1]) + assert_equal(poly.polytrim(coef, 1), coef[:-3]) + assert_equal(poly.polytrim(coef, 2), [0]) + + def test_polyline(self): + assert_equal(poly.polyline(3, 4), [3, 4]) diff --git a/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_polynomial.pyc b/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_polynomial.pyc new file mode 100644 index 0000000..bb581d5 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_polynomial.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_polyutils.py b/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_polyutils.py new file mode 100644 index 0000000..801c558 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_polyutils.py @@ -0,0 +1,108 @@ +"""Tests for polyutils module. + +""" +from __future__ import division, absolute_import, print_function + +import numpy as np +import numpy.polynomial.polyutils as pu +from numpy.testing import ( + assert_almost_equal, assert_raises, assert_equal, assert_, + ) + + +class TestMisc(object): + + def test_trimseq(self): + for i in range(5): + tgt = [1] + res = pu.trimseq([1] + [0]*5) + assert_equal(res, tgt) + + def test_as_series(self): + # check exceptions + assert_raises(ValueError, pu.as_series, [[]]) + assert_raises(ValueError, pu.as_series, [[[1, 2]]]) + assert_raises(ValueError, pu.as_series, [[1], ['a']]) + # check common types + types = ['i', 'd', 'O'] + for i in range(len(types)): + for j in range(i): + ci = np.ones(1, types[i]) + cj = np.ones(1, types[j]) + [resi, resj] = pu.as_series([ci, cj]) + assert_(resi.dtype.char == resj.dtype.char) + assert_(resj.dtype.char == types[i]) + + def test_trimcoef(self): + coef = [2, -1, 1, 0] + # Test exceptions + assert_raises(ValueError, pu.trimcoef, coef, -1) + # Test results + assert_equal(pu.trimcoef(coef), coef[:-1]) + assert_equal(pu.trimcoef(coef, 1), coef[:-3]) + assert_equal(pu.trimcoef(coef, 2), [0]) + + +class TestDomain(object): + + def test_getdomain(self): + # test for real values + x = [1, 10, 3, -1] + tgt = [-1, 10] + res = pu.getdomain(x) + assert_almost_equal(res, tgt) + + # test for complex values + x = [1 + 1j, 1 - 1j, 0, 2] + tgt = [-1j, 2 + 1j] + res = pu.getdomain(x) + assert_almost_equal(res, tgt) + + def test_mapdomain(self): + # test for real values + dom1 = [0, 4] + dom2 = [1, 3] + tgt = dom2 + res = pu.mapdomain(dom1, dom1, dom2) + assert_almost_equal(res, tgt) + + # test for complex values + dom1 = [0 - 1j, 2 + 1j] + dom2 = [-2, 2] + tgt = dom2 + x = dom1 + res = pu.mapdomain(x, dom1, dom2) + assert_almost_equal(res, tgt) + + # test for multidimensional arrays + dom1 = [0, 4] + dom2 = [1, 3] + tgt = np.array([dom2, dom2]) + x = np.array([dom1, dom1]) + res = pu.mapdomain(x, dom1, dom2) + assert_almost_equal(res, tgt) + + # test that subtypes are preserved. + class MyNDArray(np.ndarray): + pass + + dom1 = [0, 4] + dom2 = [1, 3] + x = np.array([dom1, dom1]).view(MyNDArray) + res = pu.mapdomain(x, dom1, dom2) + assert_(isinstance(res, MyNDArray)) + + def test_mapparms(self): + # test for real values + dom1 = [0, 4] + dom2 = [1, 3] + tgt = [1, .5] + res = pu. mapparms(dom1, dom2) + assert_almost_equal(res, tgt) + + # test for complex values + dom1 = [0 - 1j, 2 + 1j] + dom2 = [-2, 2] + tgt = [-1 + 1j, 1 - 1j] + res = pu.mapparms(dom1, dom2) + assert_almost_equal(res, tgt) diff --git a/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_polyutils.pyc b/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_polyutils.pyc new file mode 100644 index 0000000..da863ef Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_polyutils.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_printing.py b/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_printing.py new file mode 100644 index 0000000..3f12364 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_printing.py @@ -0,0 +1,68 @@ +from __future__ import division, absolute_import, print_function + +import numpy.polynomial as poly +from numpy.testing import assert_equal + + +class TestStr(object): + def test_polynomial_str(self): + res = str(poly.Polynomial([0, 1])) + tgt = 'poly([0. 1.])' + assert_equal(res, tgt) + + def test_chebyshev_str(self): + res = str(poly.Chebyshev([0, 1])) + tgt = 'cheb([0. 1.])' + assert_equal(res, tgt) + + def test_legendre_str(self): + res = str(poly.Legendre([0, 1])) + tgt = 'leg([0. 1.])' + assert_equal(res, tgt) + + def test_hermite_str(self): + res = str(poly.Hermite([0, 1])) + tgt = 'herm([0. 1.])' + assert_equal(res, tgt) + + def test_hermiteE_str(self): + res = str(poly.HermiteE([0, 1])) + tgt = 'herme([0. 1.])' + assert_equal(res, tgt) + + def test_laguerre_str(self): + res = str(poly.Laguerre([0, 1])) + tgt = 'lag([0. 1.])' + assert_equal(res, tgt) + + +class TestRepr(object): + def test_polynomial_str(self): + res = repr(poly.Polynomial([0, 1])) + tgt = 'Polynomial([0., 1.], domain=[-1, 1], window=[-1, 1])' + assert_equal(res, tgt) + + def test_chebyshev_str(self): + res = repr(poly.Chebyshev([0, 1])) + tgt = 'Chebyshev([0., 1.], domain=[-1, 1], window=[-1, 1])' + assert_equal(res, tgt) + + def test_legendre_repr(self): + res = repr(poly.Legendre([0, 1])) + tgt = 'Legendre([0., 1.], domain=[-1, 1], window=[-1, 1])' + assert_equal(res, tgt) + + def test_hermite_repr(self): + res = repr(poly.Hermite([0, 1])) + tgt = 'Hermite([0., 1.], domain=[-1, 1], window=[-1, 1])' + assert_equal(res, tgt) + + def test_hermiteE_repr(self): + res = repr(poly.HermiteE([0, 1])) + tgt = 'HermiteE([0., 1.], domain=[-1, 1], window=[-1, 1])' + assert_equal(res, tgt) + + def test_laguerre_repr(self): + res = repr(poly.Laguerre([0, 1])) + tgt = 'Laguerre([0., 1.], domain=[0, 1], window=[0, 1])' + assert_equal(res, tgt) diff --git a/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_printing.pyc b/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_printing.pyc new file mode 100644 index 0000000..61db0a4 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_printing.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/random/__init__.py b/project/venv/lib/python2.7/site-packages/numpy/random/__init__.py new file mode 100644 index 0000000..965ab5e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/random/__init__.py @@ -0,0 +1,166 @@ +""" +======================== +Random Number Generation +======================== + +==================== ========================================================= +Utility functions +============================================================================== +random_sample Uniformly distributed floats over ``[0, 1)``. +random Alias for `random_sample`. +bytes Uniformly distributed random bytes. +random_integers Uniformly distributed integers in a given range. +permutation Randomly permute a sequence / generate a random sequence. +shuffle Randomly permute a sequence in place. +seed Seed the random number generator. +choice Random sample from 1-D array. + +==================== ========================================================= + +==================== ========================================================= +Compatibility functions +============================================================================== +rand Uniformly distributed values. +randn Normally distributed values. +ranf Uniformly distributed floating point numbers. +randint Uniformly distributed integers in a given range. +==================== ========================================================= + +==================== ========================================================= +Univariate distributions +============================================================================== +beta Beta distribution over ``[0, 1]``. +binomial Binomial distribution. +chisquare :math:`\\chi^2` distribution. +exponential Exponential distribution. +f F (Fisher-Snedecor) distribution. +gamma Gamma distribution. +geometric Geometric distribution. +gumbel Gumbel distribution. +hypergeometric Hypergeometric distribution. +laplace Laplace distribution. +logistic Logistic distribution. +lognormal Log-normal distribution. +logseries Logarithmic series distribution. +negative_binomial Negative binomial distribution. +noncentral_chisquare Non-central chi-square distribution. +noncentral_f Non-central F distribution. +normal Normal / Gaussian distribution. +pareto Pareto distribution. +poisson Poisson distribution. +power Power distribution. +rayleigh Rayleigh distribution. +triangular Triangular distribution. +uniform Uniform distribution. +vonmises Von Mises circular distribution. +wald Wald (inverse Gaussian) distribution. +weibull Weibull distribution. +zipf Zipf's distribution over ranked data. +==================== ========================================================= + +==================== ========================================================= +Multivariate distributions +============================================================================== +dirichlet Multivariate generalization of Beta distribution. +multinomial Multivariate generalization of the binomial distribution. +multivariate_normal Multivariate generalization of the normal distribution. +==================== ========================================================= + +==================== ========================================================= +Standard distributions +============================================================================== +standard_cauchy Standard Cauchy-Lorentz distribution. +standard_exponential Standard exponential distribution. +standard_gamma Standard Gamma distribution. +standard_normal Standard normal distribution. +standard_t Standard Student's t-distribution. +==================== ========================================================= + +==================== ========================================================= +Internal functions +============================================================================== +get_state Get tuple representing internal state of generator. +set_state Set state of generator. +==================== ========================================================= + +""" +from __future__ import division, absolute_import, print_function + +import warnings + +__all__ = [ + 'beta', + 'binomial', + 'bytes', + 'chisquare', + 'choice', + 'dirichlet', + 'exponential', + 'f', + 'gamma', + 'geometric', + 'get_state', + 'gumbel', + 'hypergeometric', + 'laplace', + 'logistic', + 'lognormal', + 'logseries', + 'multinomial', + 'multivariate_normal', + 'negative_binomial', + 'noncentral_chisquare', + 'noncentral_f', + 'normal', + 'pareto', + 'permutation', + 'poisson', + 'power', + 'rand', + 'randint', + 'randn', + 'random_integers', + 'random_sample', + 'rayleigh', + 'seed', + 'set_state', + 'shuffle', + 'standard_cauchy', + 'standard_exponential', + 'standard_gamma', + 'standard_normal', + 'standard_t', + 'triangular', + 'uniform', + 'vonmises', + 'wald', + 'weibull', + 'zipf' +] + +with warnings.catch_warnings(): + warnings.filterwarnings("ignore", message="numpy.ndarray size changed") + from .mtrand import * + +# Some aliases: +ranf = random = sample = random_sample +__all__.extend(['ranf', 'random', 'sample']) + +def __RandomState_ctor(): + """Return a RandomState instance. + + This function exists solely to assist (un)pickling. + + Note that the state of the RandomState returned here is irrelevant, as this function's + entire purpose is to return a newly allocated RandomState whose state pickle can set. + Consequently the RandomState returned by this function is a freshly allocated copy + with a seed=0. + + See https://github.com/numpy/numpy/issues/4763 for a detailed discussion + + """ + return RandomState(seed=0) + +from numpy._pytesttester import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/project/venv/lib/python2.7/site-packages/numpy/random/__init__.pyc b/project/venv/lib/python2.7/site-packages/numpy/random/__init__.pyc new file mode 100644 index 0000000..d27d660 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/random/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/random/info.py b/project/venv/lib/python2.7/site-packages/numpy/random/info.py new file mode 100644 index 0000000..b9fd7f2 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/random/info.py @@ -0,0 +1,5 @@ +from __future__ import division, absolute_import, print_function + +from .. import __doc__ + +depends = ['core'] diff --git a/project/venv/lib/python2.7/site-packages/numpy/random/info.pyc b/project/venv/lib/python2.7/site-packages/numpy/random/info.pyc new file mode 100644 index 0000000..42994b9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/random/info.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/random/mtrand.so b/project/venv/lib/python2.7/site-packages/numpy/random/mtrand.so new file mode 100755 index 0000000..ec7d226 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/random/mtrand.so differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/random/randomkit.h b/project/venv/lib/python2.7/site-packages/numpy/random/randomkit.h new file mode 100644 index 0000000..a24dabe --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/random/randomkit.h @@ -0,0 +1,226 @@ +/* Random kit 1.3 */ + +/* + * Copyright (c) 2003-2005, Jean-Sebastien Roy (js@jeannot.org) + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +/* @(#) $Jeannot: randomkit.h,v 1.24 2005/07/21 22:14:09 js Exp $ */ + +/* + * Typical use: + * + * { + * rk_state state; + * unsigned long seed = 1, random_value; + * + * rk_seed(seed, &state); // Initialize the RNG + * ... + * random_value = rk_random(&state); // Generate random values in [0..RK_MAX] + * } + * + * Instead of rk_seed, you can use rk_randomseed which will get a random seed + * from /dev/urandom (or the clock, if /dev/urandom is unavailable): + * + * { + * rk_state state; + * unsigned long random_value; + * + * rk_randomseed(&state); // Initialize the RNG with a random seed + * ... + * random_value = rk_random(&state); // Generate random values in [0..RK_MAX] + * } + */ + +/* + * Useful macro: + * RK_DEV_RANDOM: the device used for random seeding. + * defaults to "/dev/urandom" + */ + +#ifndef _RANDOMKIT_ +#define _RANDOMKIT_ + +#include +#include + + +#define RK_STATE_LEN 624 + +typedef struct rk_state_ +{ + unsigned long key[RK_STATE_LEN]; + int pos; + int has_gauss; /* !=0: gauss contains a gaussian deviate */ + double gauss; + + /* The rk_state structure has been extended to store the following + * information for the binomial generator. If the input values of n or p + * are different than nsave and psave, then the other parameters will be + * recomputed. RTK 2005-09-02 */ + + int has_binomial; /* !=0: following parameters initialized for + binomial */ + double psave; + long nsave; + double r; + double q; + double fm; + long m; + double p1; + double xm; + double xl; + double xr; + double c; + double laml; + double lamr; + double p2; + double p3; + double p4; + +} +rk_state; + +typedef enum { + RK_NOERR = 0, /* no error */ + RK_ENODEV = 1, /* no RK_DEV_RANDOM device */ + RK_ERR_MAX = 2 +} rk_error; + +/* error strings */ +extern char *rk_strerror[RK_ERR_MAX]; + +/* Maximum generated random value */ +#define RK_MAX 0xFFFFFFFFUL + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Initialize the RNG state using the given seed. + */ +extern void rk_seed(unsigned long seed, rk_state *state); + +/* + * Initialize the RNG state using a random seed. + * Uses /dev/random or, when unavailable, the clock (see randomkit.c). + * Returns RK_NOERR when no errors occurs. + * Returns RK_ENODEV when the use of RK_DEV_RANDOM failed (for example because + * there is no such device). In this case, the RNG was initialized using the + * clock. + */ +extern rk_error rk_randomseed(rk_state *state); + +/* + * Returns a random unsigned long between 0 and RK_MAX inclusive + */ +extern unsigned long rk_random(rk_state *state); + +/* + * Returns a random long between 0 and LONG_MAX inclusive + */ +extern long rk_long(rk_state *state); + +/* + * Returns a random unsigned long between 0 and ULONG_MAX inclusive + */ +extern unsigned long rk_ulong(rk_state *state); + +/* + * Returns a random unsigned long between 0 and max inclusive. + */ +extern unsigned long rk_interval(unsigned long max, rk_state *state); + +/* + * Fills an array with cnt random npy_uint64 between off and off + rng + * inclusive. The numbers wrap if rng is sufficiently large. + */ +extern void rk_random_uint64(npy_uint64 off, npy_uint64 rng, npy_intp cnt, + npy_uint64 *out, rk_state *state); + +/* + * Fills an array with cnt random npy_uint32 between off and off + rng + * inclusive. The numbers wrap if rng is sufficiently large. + */ +extern void rk_random_uint32(npy_uint32 off, npy_uint32 rng, npy_intp cnt, + npy_uint32 *out, rk_state *state); + +/* + * Fills an array with cnt random npy_uint16 between off and off + rng + * inclusive. The numbers wrap if rng is sufficiently large. + */ +extern void rk_random_uint16(npy_uint16 off, npy_uint16 rng, npy_intp cnt, + npy_uint16 *out, rk_state *state); + +/* + * Fills an array with cnt random npy_uint8 between off and off + rng + * inclusive. The numbers wrap if rng is sufficiently large. + */ +extern void rk_random_uint8(npy_uint8 off, npy_uint8 rng, npy_intp cnt, + npy_uint8 *out, rk_state *state); + +/* + * Fills an array with cnt random npy_bool between off and off + rng + * inclusive. It is assumed tha npy_bool as the same size as npy_uint8. + */ +extern void rk_random_bool(npy_bool off, npy_bool rng, npy_intp cnt, + npy_bool *out, rk_state *state); + +/* + * Returns a random double between 0.0 and 1.0, 1.0 excluded. + */ +extern double rk_double(rk_state *state); + +/* + * fill the buffer with size random bytes + */ +extern void rk_fill(void *buffer, size_t size, rk_state *state); + +/* + * fill the buffer with randombytes from the random device + * Returns RK_ENODEV if the device is unavailable, or RK_NOERR if it is + * On Unix, if strong is defined, RK_DEV_RANDOM is used. If not, RK_DEV_URANDOM + * is used instead. This parameter has no effect on Windows. + * Warning: on most unixes RK_DEV_RANDOM will wait for enough entropy to answer + * which can take a very long time on quiet systems. + */ +extern rk_error rk_devfill(void *buffer, size_t size, int strong); + +/* + * fill the buffer using rk_devfill if the random device is available and using + * rk_fill if it is not + * parameters have the same meaning as rk_fill and rk_devfill + * Returns RK_ENODEV if the device is unavailable, or RK_NOERR if it is + */ +extern rk_error rk_altfill(void *buffer, size_t size, int strong, + rk_state *state); + +/* + * return a random gaussian deviate with variance unity and zero mean. + */ +extern double rk_gauss(rk_state *state); + +#ifdef __cplusplus +} +#endif + +#endif /* _RANDOMKIT_ */ diff --git a/project/venv/lib/python2.7/site-packages/numpy/random/setup.py b/project/venv/lib/python2.7/site-packages/numpy/random/setup.py new file mode 100644 index 0000000..394a70e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/random/setup.py @@ -0,0 +1,63 @@ +from __future__ import division, print_function + +from os.path import join +import sys +from distutils.dep_util import newer +from distutils.msvccompiler import get_build_version as get_msvc_build_version + +def needs_mingw_ftime_workaround(): + # We need the mingw workaround for _ftime if the msvc runtime version is + # 7.1 or above and we build with mingw ... + # ... but we can't easily detect compiler version outside distutils command + # context, so we will need to detect in randomkit whether we build with gcc + msver = get_msvc_build_version() + if msver and msver >= 8: + return True + + return False + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration, get_mathlibs + config = Configuration('random', parent_package, top_path) + + def generate_libraries(ext, build_dir): + config_cmd = config.get_config_cmd() + libs = get_mathlibs() + if sys.platform == 'win32': + libs.append('Advapi32') + ext.libraries.extend(libs) + return None + + # enable unix large file support on 32 bit systems + # (64 bit off_t, lseek -> lseek64 etc.) + if sys.platform[:3] == "aix": + defs = [('_LARGE_FILES', None)] + else: + defs = [('_FILE_OFFSET_BITS', '64'), + ('_LARGEFILE_SOURCE', '1'), + ('_LARGEFILE64_SOURCE', '1')] + if needs_mingw_ftime_workaround(): + defs.append(("NPY_NEEDS_MINGW_TIME_WORKAROUND", None)) + + libs = [] + # Configure mtrand + config.add_extension('mtrand', + sources=[join('mtrand', x) for x in + ['mtrand.c', 'randomkit.c', 'initarray.c', + 'distributions.c']]+[generate_libraries], + libraries=libs, + depends=[join('mtrand', '*.h'), + join('mtrand', '*.pyx'), + join('mtrand', '*.pxi'),], + define_macros=defs, + ) + + config.add_data_files(('.', join('mtrand', 'randomkit.h'))) + config.add_data_dir('tests') + + return config + + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(configuration=configuration) diff --git a/project/venv/lib/python2.7/site-packages/numpy/random/setup.pyc b/project/venv/lib/python2.7/site-packages/numpy/random/setup.pyc new file mode 100644 index 0000000..b6f61aa Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/random/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/random/tests/__init__.py b/project/venv/lib/python2.7/site-packages/numpy/random/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/numpy/random/tests/__init__.pyc b/project/venv/lib/python2.7/site-packages/numpy/random/tests/__init__.pyc new file mode 100644 index 0000000..014f7d3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/random/tests/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/random/tests/test_random.py b/project/venv/lib/python2.7/site-packages/numpy/random/tests/test_random.py new file mode 100644 index 0000000..d4721bc --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/random/tests/test_random.py @@ -0,0 +1,1653 @@ +from __future__ import division, absolute_import, print_function +import warnings + +import numpy as np +from numpy.testing import ( + assert_, assert_raises, assert_equal, assert_warns, + assert_no_warnings, assert_array_equal, assert_array_almost_equal, + suppress_warnings + ) +from numpy import random +import sys + + +class TestSeed(object): + def test_scalar(self): + s = np.random.RandomState(0) + assert_equal(s.randint(1000), 684) + s = np.random.RandomState(4294967295) + assert_equal(s.randint(1000), 419) + + def test_array(self): + s = np.random.RandomState(range(10)) + assert_equal(s.randint(1000), 468) + s = np.random.RandomState(np.arange(10)) + assert_equal(s.randint(1000), 468) + s = np.random.RandomState([0]) + assert_equal(s.randint(1000), 973) + s = np.random.RandomState([4294967295]) + assert_equal(s.randint(1000), 265) + + def test_invalid_scalar(self): + # seed must be an unsigned 32 bit integer + assert_raises(TypeError, np.random.RandomState, -0.5) + assert_raises(ValueError, np.random.RandomState, -1) + + def test_invalid_array(self): + # seed must be an unsigned 32 bit integer + assert_raises(TypeError, np.random.RandomState, [-0.5]) + assert_raises(ValueError, np.random.RandomState, [-1]) + assert_raises(ValueError, np.random.RandomState, [4294967296]) + assert_raises(ValueError, np.random.RandomState, [1, 2, 4294967296]) + assert_raises(ValueError, np.random.RandomState, [1, -2, 4294967296]) + + def test_invalid_array_shape(self): + # gh-9832 + assert_raises(ValueError, np.random.RandomState, np.array([], dtype=np.int64)) + assert_raises(ValueError, np.random.RandomState, [[1, 2, 3]]) + assert_raises(ValueError, np.random.RandomState, [[1, 2, 3], + [4, 5, 6]]) + + +class TestBinomial(object): + def test_n_zero(self): + # Tests the corner case of n == 0 for the binomial distribution. + # binomial(0, p) should be zero for any p in [0, 1]. + # This test addresses issue #3480. + zeros = np.zeros(2, dtype='int') + for p in [0, .5, 1]: + assert_(random.binomial(0, p) == 0) + assert_array_equal(random.binomial(zeros, p), zeros) + + def test_p_is_nan(self): + # Issue #4571. + assert_raises(ValueError, random.binomial, 1, np.nan) + + +class TestMultinomial(object): + def test_basic(self): + random.multinomial(100, [0.2, 0.8]) + + def test_zero_probability(self): + random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0]) + + def test_int_negative_interval(self): + assert_(-5 <= random.randint(-5, -1) < -1) + x = random.randint(-5, -1, 5) + assert_(np.all(-5 <= x)) + assert_(np.all(x < -1)) + + def test_size(self): + # gh-3173 + p = [0.5, 0.5] + assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.multinomial(1, p, [2, 2]).shape, (2, 2, 2)) + assert_equal(np.random.multinomial(1, p, (2, 2)).shape, (2, 2, 2)) + assert_equal(np.random.multinomial(1, p, np.array((2, 2))).shape, + (2, 2, 2)) + + assert_raises(TypeError, np.random.multinomial, 1, p, + float(1)) + + +class TestSetState(object): + def setup(self): + self.seed = 1234567890 + self.prng = random.RandomState(self.seed) + self.state = self.prng.get_state() + + def test_basic(self): + old = self.prng.tomaxint(16) + self.prng.set_state(self.state) + new = self.prng.tomaxint(16) + assert_(np.all(old == new)) + + def test_gaussian_reset(self): + # Make sure the cached every-other-Gaussian is reset. + old = self.prng.standard_normal(size=3) + self.prng.set_state(self.state) + new = self.prng.standard_normal(size=3) + assert_(np.all(old == new)) + + def test_gaussian_reset_in_media_res(self): + # When the state is saved with a cached Gaussian, make sure the + # cached Gaussian is restored. + + self.prng.standard_normal() + state = self.prng.get_state() + old = self.prng.standard_normal(size=3) + self.prng.set_state(state) + new = self.prng.standard_normal(size=3) + assert_(np.all(old == new)) + + def test_backwards_compatibility(self): + # Make sure we can accept old state tuples that do not have the + # cached Gaussian value. + old_state = self.state[:-2] + x1 = self.prng.standard_normal(size=16) + self.prng.set_state(old_state) + x2 = self.prng.standard_normal(size=16) + self.prng.set_state(self.state) + x3 = self.prng.standard_normal(size=16) + assert_(np.all(x1 == x2)) + assert_(np.all(x1 == x3)) + + def test_negative_binomial(self): + # Ensure that the negative binomial results take floating point + # arguments without truncation. + self.prng.negative_binomial(0.5, 0.5) + + +class TestRandint(object): + + rfunc = np.random.randint + + # valid integer/boolean types + itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16, + np.int32, np.uint32, np.int64, np.uint64] + + def test_unsupported_type(self): + assert_raises(TypeError, self.rfunc, 1, dtype=float) + + def test_bounds_checking(self): + for dt in self.itype: + lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min + ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1 + assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt) + assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt) + assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt) + assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt) + + def test_rng_zero_and_extremes(self): + for dt in self.itype: + lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min + ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1 + + tgt = ubnd - 1 + assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + + tgt = lbnd + assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + + tgt = (lbnd + ubnd)//2 + assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + + def test_full_range(self): + # Test for ticket #1690 + + for dt in self.itype: + lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min + ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1 + + try: + self.rfunc(lbnd, ubnd, dtype=dt) + except Exception as e: + raise AssertionError("No error should have been raised, " + "but one was with the following " + "message:\n\n%s" % str(e)) + + def test_in_bounds_fuzz(self): + # Don't use fixed seed + np.random.seed() + + for dt in self.itype[1:]: + for ubnd in [4, 8, 16]: + vals = self.rfunc(2, ubnd, size=2**16, dtype=dt) + assert_(vals.max() < ubnd) + assert_(vals.min() >= 2) + + vals = self.rfunc(0, 2, size=2**16, dtype=np.bool_) + + assert_(vals.max() < 2) + assert_(vals.min() >= 0) + + def test_repeatability(self): + import hashlib + # We use a md5 hash of generated sequences of 1000 samples + # in the range [0, 6) for all but bool, where the range + # is [0, 2). Hashes are for little endian numbers. + tgt = {'bool': '7dd3170d7aa461d201a65f8bcf3944b0', + 'int16': '1b7741b80964bb190c50d541dca1cac1', + 'int32': '4dc9fcc2b395577ebb51793e58ed1a05', + 'int64': '17db902806f448331b5a758d7d2ee672', + 'int8': '27dd30c4e08a797063dffac2490b0be6', + 'uint16': '1b7741b80964bb190c50d541dca1cac1', + 'uint32': '4dc9fcc2b395577ebb51793e58ed1a05', + 'uint64': '17db902806f448331b5a758d7d2ee672', + 'uint8': '27dd30c4e08a797063dffac2490b0be6'} + + for dt in self.itype[1:]: + np.random.seed(1234) + + # view as little endian for hash + if sys.byteorder == 'little': + val = self.rfunc(0, 6, size=1000, dtype=dt) + else: + val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap() + + res = hashlib.md5(val.view(np.int8)).hexdigest() + assert_(tgt[np.dtype(dt).name] == res) + + # bools do not depend on endianness + np.random.seed(1234) + val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8) + res = hashlib.md5(val).hexdigest() + assert_(tgt[np.dtype(bool).name] == res) + + def test_int64_uint64_corner_case(self): + # When stored in Numpy arrays, `lbnd` is casted + # as np.int64, and `ubnd` is casted as np.uint64. + # Checking whether `lbnd` >= `ubnd` used to be + # done solely via direct comparison, which is incorrect + # because when Numpy tries to compare both numbers, + # it casts both to np.float64 because there is + # no integer superset of np.int64 and np.uint64. However, + # `ubnd` is too large to be represented in np.float64, + # causing it be round down to np.iinfo(np.int64).max, + # leading to a ValueError because `lbnd` now equals + # the new `ubnd`. + + dt = np.int64 + tgt = np.iinfo(np.int64).max + lbnd = np.int64(np.iinfo(np.int64).max) + ubnd = np.uint64(np.iinfo(np.int64).max + 1) + + # None of these function calls should + # generate a ValueError now. + actual = np.random.randint(lbnd, ubnd, dtype=dt) + assert_equal(actual, tgt) + + def test_respect_dtype_singleton(self): + # See gh-7203 + for dt in self.itype: + lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min + ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1 + + sample = self.rfunc(lbnd, ubnd, dtype=dt) + assert_equal(sample.dtype, np.dtype(dt)) + + for dt in (bool, int, np.long): + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + + # gh-7284: Ensure that we get Python data types + sample = self.rfunc(lbnd, ubnd, dtype=dt) + assert_(not hasattr(sample, 'dtype')) + assert_equal(type(sample), dt) + + +class TestRandomDist(object): + # Make sure the random distribution returns the correct value for a + # given seed + + def setup(self): + self.seed = 1234567890 + + def test_rand(self): + np.random.seed(self.seed) + actual = np.random.rand(3, 2) + desired = np.array([[0.61879477158567997, 0.59162362775974664], + [0.88868358904449662, 0.89165480011560816], + [0.4575674820298663, 0.7781880808593471]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_randn(self): + np.random.seed(self.seed) + actual = np.random.randn(3, 2) + desired = np.array([[1.34016345771863121, 1.73759122771936081], + [1.498988344300628, -0.2286433324536169], + [2.031033998682787, 2.17032494605655257]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_randint(self): + np.random.seed(self.seed) + actual = np.random.randint(-99, 99, size=(3, 2)) + desired = np.array([[31, 3], + [-52, 41], + [-48, -66]]) + assert_array_equal(actual, desired) + + def test_random_integers(self): + np.random.seed(self.seed) + with suppress_warnings() as sup: + w = sup.record(DeprecationWarning) + actual = np.random.random_integers(-99, 99, size=(3, 2)) + assert_(len(w) == 1) + desired = np.array([[31, 3], + [-52, 41], + [-48, -66]]) + assert_array_equal(actual, desired) + + def test_random_integers_max_int(self): + # Tests whether random_integers can generate the + # maximum allowed Python int that can be converted + # into a C long. Previous implementations of this + # method have thrown an OverflowError when attempting + # to generate this integer. + with suppress_warnings() as sup: + w = sup.record(DeprecationWarning) + actual = np.random.random_integers(np.iinfo('l').max, + np.iinfo('l').max) + assert_(len(w) == 1) + + desired = np.iinfo('l').max + assert_equal(actual, desired) + + def test_random_integers_deprecated(self): + with warnings.catch_warnings(): + warnings.simplefilter("error", DeprecationWarning) + + # DeprecationWarning raised with high == None + assert_raises(DeprecationWarning, + np.random.random_integers, + np.iinfo('l').max) + + # DeprecationWarning raised with high != None + assert_raises(DeprecationWarning, + np.random.random_integers, + np.iinfo('l').max, np.iinfo('l').max) + + def test_random_sample(self): + np.random.seed(self.seed) + actual = np.random.random_sample((3, 2)) + desired = np.array([[0.61879477158567997, 0.59162362775974664], + [0.88868358904449662, 0.89165480011560816], + [0.4575674820298663, 0.7781880808593471]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_choice_uniform_replace(self): + np.random.seed(self.seed) + actual = np.random.choice(4, 4) + desired = np.array([2, 3, 2, 3]) + assert_array_equal(actual, desired) + + def test_choice_nonuniform_replace(self): + np.random.seed(self.seed) + actual = np.random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) + desired = np.array([1, 1, 2, 2]) + assert_array_equal(actual, desired) + + def test_choice_uniform_noreplace(self): + np.random.seed(self.seed) + actual = np.random.choice(4, 3, replace=False) + desired = np.array([0, 1, 3]) + assert_array_equal(actual, desired) + + def test_choice_nonuniform_noreplace(self): + np.random.seed(self.seed) + actual = np.random.choice(4, 3, replace=False, + p=[0.1, 0.3, 0.5, 0.1]) + desired = np.array([2, 3, 1]) + assert_array_equal(actual, desired) + + def test_choice_noninteger(self): + np.random.seed(self.seed) + actual = np.random.choice(['a', 'b', 'c', 'd'], 4) + desired = np.array(['c', 'd', 'c', 'd']) + assert_array_equal(actual, desired) + + def test_choice_exceptions(self): + sample = np.random.choice + assert_raises(ValueError, sample, -1, 3) + assert_raises(ValueError, sample, 3., 3) + assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3) + assert_raises(ValueError, sample, [], 3) + assert_raises(ValueError, sample, [1, 2, 3, 4], 3, + p=[[0.25, 0.25], [0.25, 0.25]]) + assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2]) + assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1]) + assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4]) + assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False) + assert_raises(ValueError, sample, [1, 2, 3], 2, + replace=False, p=[1, 0, 0]) + + def test_choice_return_shape(self): + p = [0.1, 0.9] + # Check scalar + assert_(np.isscalar(np.random.choice(2, replace=True))) + assert_(np.isscalar(np.random.choice(2, replace=False))) + assert_(np.isscalar(np.random.choice(2, replace=True, p=p))) + assert_(np.isscalar(np.random.choice(2, replace=False, p=p))) + assert_(np.isscalar(np.random.choice([1, 2], replace=True))) + assert_(np.random.choice([None], replace=True) is None) + a = np.array([1, 2]) + arr = np.empty(1, dtype=object) + arr[0] = a + assert_(np.random.choice(arr, replace=True) is a) + + # Check 0-d array + s = tuple() + assert_(not np.isscalar(np.random.choice(2, s, replace=True))) + assert_(not np.isscalar(np.random.choice(2, s, replace=False))) + assert_(not np.isscalar(np.random.choice(2, s, replace=True, p=p))) + assert_(not np.isscalar(np.random.choice(2, s, replace=False, p=p))) + assert_(not np.isscalar(np.random.choice([1, 2], s, replace=True))) + assert_(np.random.choice([None], s, replace=True).ndim == 0) + a = np.array([1, 2]) + arr = np.empty(1, dtype=object) + arr[0] = a + assert_(np.random.choice(arr, s, replace=True).item() is a) + + # Check multi dimensional array + s = (2, 3) + p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2] + assert_equal(np.random.choice(6, s, replace=True).shape, s) + assert_equal(np.random.choice(6, s, replace=False).shape, s) + assert_equal(np.random.choice(6, s, replace=True, p=p).shape, s) + assert_equal(np.random.choice(6, s, replace=False, p=p).shape, s) + assert_equal(np.random.choice(np.arange(6), s, replace=True).shape, s) + + # Check zero-size + assert_equal(np.random.randint(0, 0, size=(3, 0, 4)).shape, (3, 0, 4)) + assert_equal(np.random.randint(0, -10, size=0).shape, (0,)) + assert_equal(np.random.randint(10, 10, size=0).shape, (0,)) + assert_equal(np.random.choice(0, size=0).shape, (0,)) + assert_equal(np.random.choice([], size=(0,)).shape, (0,)) + assert_equal(np.random.choice(['a', 'b'], size=(3, 0, 4)).shape, (3, 0, 4)) + assert_raises(ValueError, np.random.choice, [], 10) + + def test_choice_nan_probabilities(self): + a = np.array([42, 1, 2]) + p = [None, None, None] + assert_raises(ValueError, np.random.choice, a, p=p) + + def test_bytes(self): + np.random.seed(self.seed) + actual = np.random.bytes(10) + desired = b'\x82Ui\x9e\xff\x97+Wf\xa5' + assert_equal(actual, desired) + + def test_shuffle(self): + # Test lists, arrays (of various dtypes), and multidimensional versions + # of both, c-contiguous or not: + for conv in [lambda x: np.array([]), + lambda x: x, + lambda x: np.asarray(x).astype(np.int8), + lambda x: np.asarray(x).astype(np.float32), + lambda x: np.asarray(x).astype(np.complex64), + lambda x: np.asarray(x).astype(object), + lambda x: [(i, i) for i in x], + lambda x: np.asarray([[i, i] for i in x]), + lambda x: np.vstack([x, x]).T, + # gh-11442 + lambda x: (np.asarray([(i, i) for i in x], + [("a", int), ("b", int)]) + .view(np.recarray)), + # gh-4270 + lambda x: np.asarray([(i, i) for i in x], + [("a", object, 1), + ("b", np.int32, 1)])]: + np.random.seed(self.seed) + alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) + np.random.shuffle(alist) + actual = alist + desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3]) + assert_array_equal(actual, desired) + + def test_shuffle_masked(self): + # gh-3263 + a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1) + b = np.ma.masked_values(np.arange(20) % 3 - 1, -1) + a_orig = a.copy() + b_orig = b.copy() + for i in range(50): + np.random.shuffle(a) + assert_equal( + sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask])) + np.random.shuffle(b) + assert_equal( + sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask])) + + def test_beta(self): + np.random.seed(self.seed) + actual = np.random.beta(.1, .9, size=(3, 2)) + desired = np.array( + [[1.45341850513746058e-02, 5.31297615662868145e-04], + [1.85366619058432324e-06, 4.19214516800110563e-03], + [1.58405155108498093e-04, 1.26252891949397652e-04]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_binomial(self): + np.random.seed(self.seed) + actual = np.random.binomial(100.123, .456, size=(3, 2)) + desired = np.array([[37, 43], + [42, 48], + [46, 45]]) + assert_array_equal(actual, desired) + + def test_chisquare(self): + np.random.seed(self.seed) + actual = np.random.chisquare(50, size=(3, 2)) + desired = np.array([[63.87858175501090585, 68.68407748911370447], + [65.77116116901505904, 47.09686762438974483], + [72.3828403199695174, 74.18408615260374006]]) + assert_array_almost_equal(actual, desired, decimal=13) + + def test_dirichlet(self): + np.random.seed(self.seed) + alpha = np.array([51.72840233779265162, 39.74494232180943953]) + actual = np.random.mtrand.dirichlet(alpha, size=(3, 2)) + desired = np.array([[[0.54539444573611562, 0.45460555426388438], + [0.62345816822039413, 0.37654183177960598]], + [[0.55206000085785778, 0.44793999914214233], + [0.58964023305154301, 0.41035976694845688]], + [[0.59266909280647828, 0.40733090719352177], + [0.56974431743975207, 0.43025568256024799]]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_dirichlet_size(self): + # gh-3173 + p = np.array([51.72840233779265162, 39.74494232180943953]) + assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.dirichlet(p, [2, 2]).shape, (2, 2, 2)) + assert_equal(np.random.dirichlet(p, (2, 2)).shape, (2, 2, 2)) + assert_equal(np.random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2)) + + assert_raises(TypeError, np.random.dirichlet, p, float(1)) + + def test_dirichlet_bad_alpha(self): + # gh-2089 + alpha = np.array([5.4e-01, -1.0e-16]) + assert_raises(ValueError, np.random.mtrand.dirichlet, alpha) + + def test_exponential(self): + np.random.seed(self.seed) + actual = np.random.exponential(1.1234, size=(3, 2)) + desired = np.array([[1.08342649775011624, 1.00607889924557314], + [2.46628830085216721, 2.49668106809923884], + [0.68717433461363442, 1.69175666993575979]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_exponential_0(self): + assert_equal(np.random.exponential(scale=0), 0) + assert_raises(ValueError, np.random.exponential, scale=-0.) + + def test_f(self): + np.random.seed(self.seed) + actual = np.random.f(12, 77, size=(3, 2)) + desired = np.array([[1.21975394418575878, 1.75135759791559775], + [1.44803115017146489, 1.22108959480396262], + [1.02176975757740629, 1.34431827623300415]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_gamma(self): + np.random.seed(self.seed) + actual = np.random.gamma(5, 3, size=(3, 2)) + desired = np.array([[24.60509188649287182, 28.54993563207210627], + [26.13476110204064184, 12.56988482927716078], + [31.71863275789960568, 33.30143302795922011]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_gamma_0(self): + assert_equal(np.random.gamma(shape=0, scale=0), 0) + assert_raises(ValueError, np.random.gamma, shape=-0., scale=-0.) + + def test_geometric(self): + np.random.seed(self.seed) + actual = np.random.geometric(.123456789, size=(3, 2)) + desired = np.array([[8, 7], + [17, 17], + [5, 12]]) + assert_array_equal(actual, desired) + + def test_gumbel(self): + np.random.seed(self.seed) + actual = np.random.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[0.19591898743416816, 0.34405539668096674], + [-1.4492522252274278, -1.47374816298446865], + [1.10651090478803416, -0.69535848626236174]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_gumbel_0(self): + assert_equal(np.random.gumbel(scale=0), 0) + assert_raises(ValueError, np.random.gumbel, scale=-0.) + + def test_hypergeometric(self): + np.random.seed(self.seed) + actual = np.random.hypergeometric(10.1, 5.5, 14, size=(3, 2)) + desired = np.array([[10, 10], + [10, 10], + [9, 9]]) + assert_array_equal(actual, desired) + + # Test nbad = 0 + actual = np.random.hypergeometric(5, 0, 3, size=4) + desired = np.array([3, 3, 3, 3]) + assert_array_equal(actual, desired) + + actual = np.random.hypergeometric(15, 0, 12, size=4) + desired = np.array([12, 12, 12, 12]) + assert_array_equal(actual, desired) + + # Test ngood = 0 + actual = np.random.hypergeometric(0, 5, 3, size=4) + desired = np.array([0, 0, 0, 0]) + assert_array_equal(actual, desired) + + actual = np.random.hypergeometric(0, 15, 12, size=4) + desired = np.array([0, 0, 0, 0]) + assert_array_equal(actual, desired) + + def test_laplace(self): + np.random.seed(self.seed) + actual = np.random.laplace(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[0.66599721112760157, 0.52829452552221945], + [3.12791959514407125, 3.18202813572992005], + [-0.05391065675859356, 1.74901336242837324]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_laplace_0(self): + assert_equal(np.random.laplace(scale=0), 0) + assert_raises(ValueError, np.random.laplace, scale=-0.) + + def test_logistic(self): + np.random.seed(self.seed) + actual = np.random.logistic(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[1.09232835305011444, 0.8648196662399954], + [4.27818590694950185, 4.33897006346929714], + [-0.21682183359214885, 2.63373365386060332]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_lognormal(self): + np.random.seed(self.seed) + actual = np.random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) + desired = np.array([[16.50698631688883822, 36.54846706092654784], + [22.67886599981281748, 0.71617561058995771], + [65.72798501792723869, 86.84341601437161273]]) + assert_array_almost_equal(actual, desired, decimal=13) + + def test_lognormal_0(self): + assert_equal(np.random.lognormal(sigma=0), 1) + assert_raises(ValueError, np.random.lognormal, sigma=-0.) + + def test_logseries(self): + np.random.seed(self.seed) + actual = np.random.logseries(p=.923456789, size=(3, 2)) + desired = np.array([[2, 2], + [6, 17], + [3, 6]]) + assert_array_equal(actual, desired) + + def test_multinomial(self): + np.random.seed(self.seed) + actual = np.random.multinomial(20, [1/6.]*6, size=(3, 2)) + desired = np.array([[[4, 3, 5, 4, 2, 2], + [5, 2, 8, 2, 2, 1]], + [[3, 4, 3, 6, 0, 4], + [2, 1, 4, 3, 6, 4]], + [[4, 4, 2, 5, 2, 3], + [4, 3, 4, 2, 3, 4]]]) + assert_array_equal(actual, desired) + + def test_multivariate_normal(self): + np.random.seed(self.seed) + mean = (.123456789, 10) + cov = [[1, 0], [0, 1]] + size = (3, 2) + actual = np.random.multivariate_normal(mean, cov, size) + desired = np.array([[[1.463620246718631, 11.73759122771936 ], + [1.622445133300628, 9.771356667546383]], + [[2.154490787682787, 12.170324946056553], + [1.719909438201865, 9.230548443648306]], + [[0.689515026297799, 9.880729819607714], + [-0.023054015651998, 9.201096623542879]]]) + + assert_array_almost_equal(actual, desired, decimal=15) + + # Check for default size, was raising deprecation warning + actual = np.random.multivariate_normal(mean, cov) + desired = np.array([0.895289569463708, 9.17180864067987]) + assert_array_almost_equal(actual, desired, decimal=15) + + # Check that non positive-semidefinite covariance warns with + # RuntimeWarning + mean = [0, 0] + cov = [[1, 2], [2, 1]] + assert_warns(RuntimeWarning, np.random.multivariate_normal, mean, cov) + + # and that it doesn't warn with RuntimeWarning check_valid='ignore' + assert_no_warnings(np.random.multivariate_normal, mean, cov, + check_valid='ignore') + + # and that it raises with RuntimeWarning check_valid='raises' + assert_raises(ValueError, np.random.multivariate_normal, mean, cov, + check_valid='raise') + + def test_negative_binomial(self): + np.random.seed(self.seed) + actual = np.random.negative_binomial(n=100, p=.12345, size=(3, 2)) + desired = np.array([[848, 841], + [892, 611], + [779, 647]]) + assert_array_equal(actual, desired) + + def test_noncentral_chisquare(self): + np.random.seed(self.seed) + actual = np.random.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) + desired = np.array([[23.91905354498517511, 13.35324692733826346], + [31.22452661329736401, 16.60047399466177254], + [5.03461598262724586, 17.94973089023519464]]) + assert_array_almost_equal(actual, desired, decimal=14) + + actual = np.random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) + desired = np.array([[1.47145377828516666, 0.15052899268012659], + [0.00943803056963588, 1.02647251615666169], + [0.332334982684171, 0.15451287602753125]]) + assert_array_almost_equal(actual, desired, decimal=14) + + np.random.seed(self.seed) + actual = np.random.noncentral_chisquare(df=5, nonc=0, size=(3, 2)) + desired = np.array([[9.597154162763948, 11.725484450296079], + [10.413711048138335, 3.694475922923986], + [13.484222138963087, 14.377255424602957]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_noncentral_f(self): + np.random.seed(self.seed) + actual = np.random.noncentral_f(dfnum=5, dfden=2, nonc=1, + size=(3, 2)) + desired = np.array([[1.40598099674926669, 0.34207973179285761], + [3.57715069265772545, 7.92632662577829805], + [0.43741599463544162, 1.1774208752428319]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_normal(self): + np.random.seed(self.seed) + actual = np.random.normal(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[2.80378370443726244, 3.59863924443872163], + [3.121433477601256, -0.33382987590723379], + [4.18552478636557357, 4.46410668111310471]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_normal_0(self): + assert_equal(np.random.normal(scale=0), 0) + assert_raises(ValueError, np.random.normal, scale=-0.) + + def test_pareto(self): + np.random.seed(self.seed) + actual = np.random.pareto(a=.123456789, size=(3, 2)) + desired = np.array( + [[2.46852460439034849e+03, 1.41286880810518346e+03], + [5.28287797029485181e+07, 6.57720981047328785e+07], + [1.40840323350391515e+02, 1.98390255135251704e+05]]) + # For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this + # matrix differs by 24 nulps. Discussion: + # https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html + # Consensus is that this is probably some gcc quirk that affects + # rounding but not in any important way, so we just use a looser + # tolerance on this test: + np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30) + + def test_poisson(self): + np.random.seed(self.seed) + actual = np.random.poisson(lam=.123456789, size=(3, 2)) + desired = np.array([[0, 0], + [1, 0], + [0, 0]]) + assert_array_equal(actual, desired) + + def test_poisson_exceptions(self): + lambig = np.iinfo('l').max + lamneg = -1 + assert_raises(ValueError, np.random.poisson, lamneg) + assert_raises(ValueError, np.random.poisson, [lamneg]*10) + assert_raises(ValueError, np.random.poisson, lambig) + assert_raises(ValueError, np.random.poisson, [lambig]*10) + + def test_power(self): + np.random.seed(self.seed) + actual = np.random.power(a=.123456789, size=(3, 2)) + desired = np.array([[0.02048932883240791, 0.01424192241128213], + [0.38446073748535298, 0.39499689943484395], + [0.00177699707563439, 0.13115505880863756]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_rayleigh(self): + np.random.seed(self.seed) + actual = np.random.rayleigh(scale=10, size=(3, 2)) + desired = np.array([[13.8882496494248393, 13.383318339044731], + [20.95413364294492098, 21.08285015800712614], + [11.06066537006854311, 17.35468505778271009]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_rayleigh_0(self): + assert_equal(np.random.rayleigh(scale=0), 0) + assert_raises(ValueError, np.random.rayleigh, scale=-0.) + + def test_standard_cauchy(self): + np.random.seed(self.seed) + actual = np.random.standard_cauchy(size=(3, 2)) + desired = np.array([[0.77127660196445336, -6.55601161955910605], + [0.93582023391158309, -2.07479293013759447], + [-4.74601644297011926, 0.18338989290760804]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_exponential(self): + np.random.seed(self.seed) + actual = np.random.standard_exponential(size=(3, 2)) + desired = np.array([[0.96441739162374596, 0.89556604882105506], + [2.1953785836319808, 2.22243285392490542], + [0.6116915921431676, 1.50592546727413201]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_gamma(self): + np.random.seed(self.seed) + actual = np.random.standard_gamma(shape=3, size=(3, 2)) + desired = np.array([[5.50841531318455058, 6.62953470301903103], + [5.93988484943779227, 2.31044849402133989], + [7.54838614231317084, 8.012756093271868]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_standard_gamma_0(self): + assert_equal(np.random.standard_gamma(shape=0), 0) + assert_raises(ValueError, np.random.standard_gamma, shape=-0.) + + def test_standard_normal(self): + np.random.seed(self.seed) + actual = np.random.standard_normal(size=(3, 2)) + desired = np.array([[1.34016345771863121, 1.73759122771936081], + [1.498988344300628, -0.2286433324536169], + [2.031033998682787, 2.17032494605655257]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_t(self): + np.random.seed(self.seed) + actual = np.random.standard_t(df=10, size=(3, 2)) + desired = np.array([[0.97140611862659965, -0.08830486548450577], + [1.36311143689505321, -0.55317463909867071], + [-0.18473749069684214, 0.61181537341755321]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_triangular(self): + np.random.seed(self.seed) + actual = np.random.triangular(left=5.12, mode=10.23, right=20.34, + size=(3, 2)) + desired = np.array([[12.68117178949215784, 12.4129206149193152], + [16.20131377335158263, 16.25692138747600524], + [11.20400690911820263, 14.4978144835829923]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_uniform(self): + np.random.seed(self.seed) + actual = np.random.uniform(low=1.23, high=10.54, size=(3, 2)) + desired = np.array([[6.99097932346268003, 6.73801597444323974], + [9.50364421400426274, 9.53130618907631089], + [5.48995325769805476, 8.47493103280052118]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_uniform_range_bounds(self): + fmin = np.finfo('float').min + fmax = np.finfo('float').max + + func = np.random.uniform + assert_raises(OverflowError, func, -np.inf, 0) + assert_raises(OverflowError, func, 0, np.inf) + assert_raises(OverflowError, func, fmin, fmax) + assert_raises(OverflowError, func, [-np.inf], [0]) + assert_raises(OverflowError, func, [0], [np.inf]) + + # (fmax / 1e17) - fmin is within range, so this should not throw + # account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX > + # DBL_MAX by increasing fmin a bit + np.random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17) + + def test_scalar_exception_propagation(self): + # Tests that exceptions are correctly propagated in distributions + # when called with objects that throw exceptions when converted to + # scalars. + # + # Regression test for gh: 8865 + + class ThrowingFloat(np.ndarray): + def __float__(self): + raise TypeError + + throwing_float = np.array(1.0).view(ThrowingFloat) + assert_raises(TypeError, np.random.uniform, throwing_float, throwing_float) + + class ThrowingInteger(np.ndarray): + def __int__(self): + raise TypeError + + throwing_int = np.array(1).view(ThrowingInteger) + assert_raises(TypeError, np.random.hypergeometric, throwing_int, 1, 1) + + def test_vonmises(self): + np.random.seed(self.seed) + actual = np.random.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) + desired = np.array([[2.28567572673902042, 2.89163838442285037], + [0.38198375564286025, 2.57638023113890746], + [1.19153771588353052, 1.83509849681825354]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_vonmises_small(self): + # check infinite loop, gh-4720 + np.random.seed(self.seed) + r = np.random.vonmises(mu=0., kappa=1.1e-8, size=10**6) + np.testing.assert_(np.isfinite(r).all()) + + def test_wald(self): + np.random.seed(self.seed) + actual = np.random.wald(mean=1.23, scale=1.54, size=(3, 2)) + desired = np.array([[3.82935265715889983, 5.13125249184285526], + [0.35045403618358717, 1.50832396872003538], + [0.24124319895843183, 0.22031101461955038]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_weibull(self): + np.random.seed(self.seed) + actual = np.random.weibull(a=1.23, size=(3, 2)) + desired = np.array([[0.97097342648766727, 0.91422896443565516], + [1.89517770034962929, 1.91414357960479564], + [0.67057783752390987, 1.39494046635066793]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_weibull_0(self): + np.random.seed(self.seed) + assert_equal(np.random.weibull(a=0, size=12), np.zeros(12)) + assert_raises(ValueError, np.random.weibull, a=-0.) + + def test_zipf(self): + np.random.seed(self.seed) + actual = np.random.zipf(a=1.23, size=(3, 2)) + desired = np.array([[66, 29], + [1, 1], + [3, 13]]) + assert_array_equal(actual, desired) + + +class TestBroadcast(object): + # tests that functions that broadcast behave + # correctly when presented with non-scalar arguments + def setup(self): + self.seed = 123456789 + + def setSeed(self): + np.random.seed(self.seed) + + # TODO: Include test for randint once it can broadcast + # Can steal the test written in PR #6938 + + def test_uniform(self): + low = [0] + high = [1] + uniform = np.random.uniform + desired = np.array([0.53283302478975902, + 0.53413660089041659, + 0.50955303552646702]) + + self.setSeed() + actual = uniform(low * 3, high) + assert_array_almost_equal(actual, desired, decimal=14) + + self.setSeed() + actual = uniform(low, high * 3) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_normal(self): + loc = [0] + scale = [1] + bad_scale = [-1] + normal = np.random.normal + desired = np.array([2.2129019979039612, + 2.1283977976520019, + 1.8417114045748335]) + + self.setSeed() + actual = normal(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, normal, loc * 3, bad_scale) + + self.setSeed() + actual = normal(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, normal, loc, bad_scale * 3) + + def test_beta(self): + a = [1] + b = [2] + bad_a = [-1] + bad_b = [-2] + beta = np.random.beta + desired = np.array([0.19843558305989056, + 0.075230336409423643, + 0.24976865978980844]) + + self.setSeed() + actual = beta(a * 3, b) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, beta, bad_a * 3, b) + assert_raises(ValueError, beta, a * 3, bad_b) + + self.setSeed() + actual = beta(a, b * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, beta, bad_a, b * 3) + assert_raises(ValueError, beta, a, bad_b * 3) + + def test_exponential(self): + scale = [1] + bad_scale = [-1] + exponential = np.random.exponential + desired = np.array([0.76106853658845242, + 0.76386282278691653, + 0.71243813125891797]) + + self.setSeed() + actual = exponential(scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, exponential, bad_scale * 3) + + def test_standard_gamma(self): + shape = [1] + bad_shape = [-1] + std_gamma = np.random.standard_gamma + desired = np.array([0.76106853658845242, + 0.76386282278691653, + 0.71243813125891797]) + + self.setSeed() + actual = std_gamma(shape * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, std_gamma, bad_shape * 3) + + def test_gamma(self): + shape = [1] + scale = [2] + bad_shape = [-1] + bad_scale = [-2] + gamma = np.random.gamma + desired = np.array([1.5221370731769048, + 1.5277256455738331, + 1.4248762625178359]) + + self.setSeed() + actual = gamma(shape * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gamma, bad_shape * 3, scale) + assert_raises(ValueError, gamma, shape * 3, bad_scale) + + self.setSeed() + actual = gamma(shape, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gamma, bad_shape, scale * 3) + assert_raises(ValueError, gamma, shape, bad_scale * 3) + + def test_f(self): + dfnum = [1] + dfden = [2] + bad_dfnum = [-1] + bad_dfden = [-2] + f = np.random.f + desired = np.array([0.80038951638264799, + 0.86768719635363512, + 2.7251095168386801]) + + self.setSeed() + actual = f(dfnum * 3, dfden) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, f, bad_dfnum * 3, dfden) + assert_raises(ValueError, f, dfnum * 3, bad_dfden) + + self.setSeed() + actual = f(dfnum, dfden * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, f, bad_dfnum, dfden * 3) + assert_raises(ValueError, f, dfnum, bad_dfden * 3) + + def test_noncentral_f(self): + dfnum = [2] + dfden = [3] + nonc = [4] + bad_dfnum = [0] + bad_dfden = [-1] + bad_nonc = [-2] + nonc_f = np.random.noncentral_f + desired = np.array([9.1393943263705211, + 13.025456344595602, + 8.8018098359100545]) + + self.setSeed() + actual = nonc_f(dfnum * 3, dfden, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc) + assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc) + assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc) + + self.setSeed() + actual = nonc_f(dfnum, dfden * 3, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc) + assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc) + assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc) + + self.setSeed() + actual = nonc_f(dfnum, dfden, nonc * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3) + assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3) + assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3) + + def test_noncentral_f_small_df(self): + self.setSeed() + desired = np.array([6.869638627492048, 0.785880199263955]) + actual = np.random.noncentral_f(0.9, 0.9, 2, size=2) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_chisquare(self): + df = [1] + bad_df = [-1] + chisquare = np.random.chisquare + desired = np.array([0.57022801133088286, + 0.51947702108840776, + 0.1320969254923558]) + + self.setSeed() + actual = chisquare(df * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, chisquare, bad_df * 3) + + def test_noncentral_chisquare(self): + df = [1] + nonc = [2] + bad_df = [-1] + bad_nonc = [-2] + nonc_chi = np.random.noncentral_chisquare + desired = np.array([9.0015599467913763, + 4.5804135049718742, + 6.0872302432834564]) + + self.setSeed() + actual = nonc_chi(df * 3, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_chi, bad_df * 3, nonc) + assert_raises(ValueError, nonc_chi, df * 3, bad_nonc) + + self.setSeed() + actual = nonc_chi(df, nonc * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_chi, bad_df, nonc * 3) + assert_raises(ValueError, nonc_chi, df, bad_nonc * 3) + + def test_standard_t(self): + df = [1] + bad_df = [-1] + t = np.random.standard_t + desired = np.array([3.0702872575217643, + 5.8560725167361607, + 1.0274791436474273]) + + self.setSeed() + actual = t(df * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, t, bad_df * 3) + + def test_vonmises(self): + mu = [2] + kappa = [1] + bad_kappa = [-1] + vonmises = np.random.vonmises + desired = np.array([2.9883443664201312, + -2.7064099483995943, + -1.8672476700665914]) + + self.setSeed() + actual = vonmises(mu * 3, kappa) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, vonmises, mu * 3, bad_kappa) + + self.setSeed() + actual = vonmises(mu, kappa * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, vonmises, mu, bad_kappa * 3) + + def test_pareto(self): + a = [1] + bad_a = [-1] + pareto = np.random.pareto + desired = np.array([1.1405622680198362, + 1.1465519762044529, + 1.0389564467453547]) + + self.setSeed() + actual = pareto(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, pareto, bad_a * 3) + + def test_weibull(self): + a = [1] + bad_a = [-1] + weibull = np.random.weibull + desired = np.array([0.76106853658845242, + 0.76386282278691653, + 0.71243813125891797]) + + self.setSeed() + actual = weibull(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, weibull, bad_a * 3) + + def test_power(self): + a = [1] + bad_a = [-1] + power = np.random.power + desired = np.array([0.53283302478975902, + 0.53413660089041659, + 0.50955303552646702]) + + self.setSeed() + actual = power(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, power, bad_a * 3) + + def test_laplace(self): + loc = [0] + scale = [1] + bad_scale = [-1] + laplace = np.random.laplace + desired = np.array([0.067921356028507157, + 0.070715642226971326, + 0.019290950698972624]) + + self.setSeed() + actual = laplace(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, laplace, loc * 3, bad_scale) + + self.setSeed() + actual = laplace(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, laplace, loc, bad_scale * 3) + + def test_gumbel(self): + loc = [0] + scale = [1] + bad_scale = [-1] + gumbel = np.random.gumbel + desired = np.array([0.2730318639556768, + 0.26936705726291116, + 0.33906220393037939]) + + self.setSeed() + actual = gumbel(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gumbel, loc * 3, bad_scale) + + self.setSeed() + actual = gumbel(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gumbel, loc, bad_scale * 3) + + def test_logistic(self): + loc = [0] + scale = [1] + bad_scale = [-1] + logistic = np.random.logistic + desired = np.array([0.13152135837586171, + 0.13675915696285773, + 0.038216792802833396]) + + self.setSeed() + actual = logistic(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, logistic, loc * 3, bad_scale) + + self.setSeed() + actual = logistic(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, logistic, loc, bad_scale * 3) + + def test_lognormal(self): + mean = [0] + sigma = [1] + bad_sigma = [-1] + lognormal = np.random.lognormal + desired = np.array([9.1422086044848427, + 8.4013952870126261, + 6.3073234116578671]) + + self.setSeed() + actual = lognormal(mean * 3, sigma) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, lognormal, mean * 3, bad_sigma) + + self.setSeed() + actual = lognormal(mean, sigma * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, lognormal, mean, bad_sigma * 3) + + def test_rayleigh(self): + scale = [1] + bad_scale = [-1] + rayleigh = np.random.rayleigh + desired = np.array([1.2337491937897689, + 1.2360119924878694, + 1.1936818095781789]) + + self.setSeed() + actual = rayleigh(scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rayleigh, bad_scale * 3) + + def test_wald(self): + mean = [0.5] + scale = [1] + bad_mean = [0] + bad_scale = [-2] + wald = np.random.wald + desired = np.array([0.11873681120271318, + 0.12450084820795027, + 0.9096122728408238]) + + self.setSeed() + actual = wald(mean * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, wald, bad_mean * 3, scale) + assert_raises(ValueError, wald, mean * 3, bad_scale) + + self.setSeed() + actual = wald(mean, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, wald, bad_mean, scale * 3) + assert_raises(ValueError, wald, mean, bad_scale * 3) + + def test_triangular(self): + left = [1] + right = [3] + mode = [2] + bad_left_one = [3] + bad_mode_one = [4] + bad_left_two, bad_mode_two = right * 2 + triangular = np.random.triangular + desired = np.array([2.03339048710429, + 2.0347400359389356, + 2.0095991069536208]) + + self.setSeed() + actual = triangular(left * 3, mode, right) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one * 3, mode, right) + assert_raises(ValueError, triangular, left * 3, bad_mode_one, right) + assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, right) + + self.setSeed() + actual = triangular(left, mode * 3, right) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one, mode * 3, right) + assert_raises(ValueError, triangular, left, bad_mode_one * 3, right) + assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, right) + + self.setSeed() + actual = triangular(left, mode, right * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one, mode, right * 3) + assert_raises(ValueError, triangular, left, bad_mode_one, right * 3) + assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, right * 3) + + def test_binomial(self): + n = [1] + p = [0.5] + bad_n = [-1] + bad_p_one = [-1] + bad_p_two = [1.5] + binom = np.random.binomial + desired = np.array([1, 1, 1]) + + self.setSeed() + actual = binom(n * 3, p) + assert_array_equal(actual, desired) + assert_raises(ValueError, binom, bad_n * 3, p) + assert_raises(ValueError, binom, n * 3, bad_p_one) + assert_raises(ValueError, binom, n * 3, bad_p_two) + + self.setSeed() + actual = binom(n, p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, binom, bad_n, p * 3) + assert_raises(ValueError, binom, n, bad_p_one * 3) + assert_raises(ValueError, binom, n, bad_p_two * 3) + + def test_negative_binomial(self): + n = [1] + p = [0.5] + bad_n = [-1] + bad_p_one = [-1] + bad_p_two = [1.5] + neg_binom = np.random.negative_binomial + desired = np.array([1, 0, 1]) + + self.setSeed() + actual = neg_binom(n * 3, p) + assert_array_equal(actual, desired) + assert_raises(ValueError, neg_binom, bad_n * 3, p) + assert_raises(ValueError, neg_binom, n * 3, bad_p_one) + assert_raises(ValueError, neg_binom, n * 3, bad_p_two) + + self.setSeed() + actual = neg_binom(n, p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, neg_binom, bad_n, p * 3) + assert_raises(ValueError, neg_binom, n, bad_p_one * 3) + assert_raises(ValueError, neg_binom, n, bad_p_two * 3) + + def test_poisson(self): + max_lam = np.random.RandomState().poisson_lam_max + + lam = [1] + bad_lam_one = [-1] + bad_lam_two = [max_lam * 2] + poisson = np.random.poisson + desired = np.array([1, 1, 0]) + + self.setSeed() + actual = poisson(lam * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, poisson, bad_lam_one * 3) + assert_raises(ValueError, poisson, bad_lam_two * 3) + + def test_zipf(self): + a = [2] + bad_a = [0] + zipf = np.random.zipf + desired = np.array([2, 2, 1]) + + self.setSeed() + actual = zipf(a * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, zipf, bad_a * 3) + with np.errstate(invalid='ignore'): + assert_raises(ValueError, zipf, np.nan) + assert_raises(ValueError, zipf, [0, 0, np.nan]) + + def test_geometric(self): + p = [0.5] + bad_p_one = [-1] + bad_p_two = [1.5] + geom = np.random.geometric + desired = np.array([2, 2, 2]) + + self.setSeed() + actual = geom(p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, geom, bad_p_one * 3) + assert_raises(ValueError, geom, bad_p_two * 3) + + def test_hypergeometric(self): + ngood = [1] + nbad = [2] + nsample = [2] + bad_ngood = [-1] + bad_nbad = [-2] + bad_nsample_one = [0] + bad_nsample_two = [4] + hypergeom = np.random.hypergeometric + desired = np.array([1, 1, 1]) + + self.setSeed() + actual = hypergeom(ngood * 3, nbad, nsample) + assert_array_equal(actual, desired) + assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample) + assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample) + assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one) + assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two) + + self.setSeed() + actual = hypergeom(ngood, nbad * 3, nsample) + assert_array_equal(actual, desired) + assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample) + assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample) + assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one) + assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two) + + self.setSeed() + actual = hypergeom(ngood, nbad, nsample * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3) + assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3) + assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3) + assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3) + + def test_logseries(self): + p = [0.5] + bad_p_one = [2] + bad_p_two = [-1] + logseries = np.random.logseries + desired = np.array([1, 1, 1]) + + self.setSeed() + actual = logseries(p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, logseries, bad_p_one * 3) + assert_raises(ValueError, logseries, bad_p_two * 3) + +class TestThread(object): + # make sure each state produces the same sequence even in threads + def setup(self): + self.seeds = range(4) + + def check_function(self, function, sz): + from threading import Thread + + out1 = np.empty((len(self.seeds),) + sz) + out2 = np.empty((len(self.seeds),) + sz) + + # threaded generation + t = [Thread(target=function, args=(np.random.RandomState(s), o)) + for s, o in zip(self.seeds, out1)] + [x.start() for x in t] + [x.join() for x in t] + + # the same serial + for s, o in zip(self.seeds, out2): + function(np.random.RandomState(s), o) + + # these platforms change x87 fpu precision mode in threads + if np.intp().dtype.itemsize == 4 and sys.platform == "win32": + assert_array_almost_equal(out1, out2) + else: + assert_array_equal(out1, out2) + + def test_normal(self): + def gen_random(state, out): + out[...] = state.normal(size=10000) + self.check_function(gen_random, sz=(10000,)) + + def test_exp(self): + def gen_random(state, out): + out[...] = state.exponential(scale=np.ones((100, 1000))) + self.check_function(gen_random, sz=(100, 1000)) + + def test_multinomial(self): + def gen_random(state, out): + out[...] = state.multinomial(10, [1/6.]*6, size=10000) + self.check_function(gen_random, sz=(10000, 6)) + +# See Issue #4263 +class TestSingleEltArrayInput(object): + def setup(self): + self.argOne = np.array([2]) + self.argTwo = np.array([3]) + self.argThree = np.array([4]) + self.tgtShape = (1,) + + def test_one_arg_funcs(self): + funcs = (np.random.exponential, np.random.standard_gamma, + np.random.chisquare, np.random.standard_t, + np.random.pareto, np.random.weibull, + np.random.power, np.random.rayleigh, + np.random.poisson, np.random.zipf, + np.random.geometric, np.random.logseries) + + probfuncs = (np.random.geometric, np.random.logseries) + + for func in funcs: + if func in probfuncs: # p < 1.0 + out = func(np.array([0.5])) + + else: + out = func(self.argOne) + + assert_equal(out.shape, self.tgtShape) + + def test_two_arg_funcs(self): + funcs = (np.random.uniform, np.random.normal, + np.random.beta, np.random.gamma, + np.random.f, np.random.noncentral_chisquare, + np.random.vonmises, np.random.laplace, + np.random.gumbel, np.random.logistic, + np.random.lognormal, np.random.wald, + np.random.binomial, np.random.negative_binomial) + + probfuncs = (np.random.binomial, np.random.negative_binomial) + + for func in funcs: + if func in probfuncs: # p <= 1 + argTwo = np.array([0.5]) + + else: + argTwo = self.argTwo + + out = func(self.argOne, argTwo) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne[0], argTwo) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne, argTwo[0]) + assert_equal(out.shape, self.tgtShape) + +# TODO: Uncomment once randint can broadcast arguments +# def test_randint(self): +# itype = [bool, np.int8, np.uint8, np.int16, np.uint16, +# np.int32, np.uint32, np.int64, np.uint64] +# func = np.random.randint +# high = np.array([1]) +# low = np.array([0]) +# +# for dt in itype: +# out = func(low, high, dtype=dt) +# self.assert_equal(out.shape, self.tgtShape) +# +# out = func(low[0], high, dtype=dt) +# self.assert_equal(out.shape, self.tgtShape) +# +# out = func(low, high[0], dtype=dt) +# self.assert_equal(out.shape, self.tgtShape) + + def test_three_arg_funcs(self): + funcs = [np.random.noncentral_f, np.random.triangular, + np.random.hypergeometric] + + for func in funcs: + out = func(self.argOne, self.argTwo, self.argThree) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne[0], self.argTwo, self.argThree) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne, self.argTwo[0], self.argThree) + assert_equal(out.shape, self.tgtShape) diff --git a/project/venv/lib/python2.7/site-packages/numpy/random/tests/test_random.pyc b/project/venv/lib/python2.7/site-packages/numpy/random/tests/test_random.pyc new file mode 100644 index 0000000..b4f88a3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/random/tests/test_random.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/random/tests/test_regression.py b/project/venv/lib/python2.7/site-packages/numpy/random/tests/test_regression.py new file mode 100644 index 0000000..ca9bbbc --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/random/tests/test_regression.py @@ -0,0 +1,157 @@ +from __future__ import division, absolute_import, print_function + +import sys +from numpy.testing import ( + assert_, assert_array_equal, assert_raises, + ) +from numpy import random +from numpy.compat import long +import numpy as np + + +class TestRegression(object): + + def test_VonMises_range(self): + # Make sure generated random variables are in [-pi, pi]. + # Regression test for ticket #986. + for mu in np.linspace(-7., 7., 5): + r = random.mtrand.vonmises(mu, 1, 50) + assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) + + def test_hypergeometric_range(self): + # Test for ticket #921 + assert_(np.all(np.random.hypergeometric(3, 18, 11, size=10) < 4)) + assert_(np.all(np.random.hypergeometric(18, 3, 11, size=10) > 0)) + + # Test for ticket #5623 + args = [ + (2**20 - 2, 2**20 - 2, 2**20 - 2), # Check for 32-bit systems + ] + is_64bits = sys.maxsize > 2**32 + if is_64bits and sys.platform != 'win32': + args.append((2**40 - 2, 2**40 - 2, 2**40 - 2)) # Check for 64-bit systems + for arg in args: + assert_(np.random.hypergeometric(*arg) > 0) + + def test_logseries_convergence(self): + # Test for ticket #923 + N = 1000 + np.random.seed(0) + rvsn = np.random.logseries(0.8, size=N) + # these two frequency counts should be close to theoretical + # numbers with this large sample + # theoretical large N result is 0.49706795 + freq = np.sum(rvsn == 1) / float(N) + msg = "Frequency was %f, should be > 0.45" % freq + assert_(freq > 0.45, msg) + # theoretical large N result is 0.19882718 + freq = np.sum(rvsn == 2) / float(N) + msg = "Frequency was %f, should be < 0.23" % freq + assert_(freq < 0.23, msg) + + def test_permutation_longs(self): + np.random.seed(1234) + a = np.random.permutation(12) + np.random.seed(1234) + b = np.random.permutation(long(12)) + assert_array_equal(a, b) + + def test_shuffle_mixed_dimension(self): + # Test for trac ticket #2074 + for t in [[1, 2, 3, None], + [(1, 1), (2, 2), (3, 3), None], + [1, (2, 2), (3, 3), None], + [(1, 1), 2, 3, None]]: + np.random.seed(12345) + shuffled = list(t) + random.shuffle(shuffled) + assert_array_equal(shuffled, [t[0], t[3], t[1], t[2]]) + + def test_call_within_randomstate(self): + # Check that custom RandomState does not call into global state + m = np.random.RandomState() + res = np.array([0, 8, 7, 2, 1, 9, 4, 7, 0, 3]) + for i in range(3): + np.random.seed(i) + m.seed(4321) + # If m.state is not honored, the result will change + assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res) + + def test_multivariate_normal_size_types(self): + # Test for multivariate_normal issue with 'size' argument. + # Check that the multivariate_normal size argument can be a + # numpy integer. + np.random.multivariate_normal([0], [[0]], size=1) + np.random.multivariate_normal([0], [[0]], size=np.int_(1)) + np.random.multivariate_normal([0], [[0]], size=np.int64(1)) + + def test_beta_small_parameters(self): + # Test that beta with small a and b parameters does not produce + # NaNs due to roundoff errors causing 0 / 0, gh-5851 + np.random.seed(1234567890) + x = np.random.beta(0.0001, 0.0001, size=100) + assert_(not np.any(np.isnan(x)), 'Nans in np.random.beta') + + def test_choice_sum_of_probs_tolerance(self): + # The sum of probs should be 1.0 with some tolerance. + # For low precision dtypes the tolerance was too tight. + # See numpy github issue 6123. + np.random.seed(1234) + a = [1, 2, 3] + counts = [4, 4, 2] + for dt in np.float16, np.float32, np.float64: + probs = np.array(counts, dtype=dt) / sum(counts) + c = np.random.choice(a, p=probs) + assert_(c in a) + assert_raises(ValueError, np.random.choice, a, p=probs*0.9) + + def test_shuffle_of_array_of_different_length_strings(self): + # Test that permuting an array of different length strings + # will not cause a segfault on garbage collection + # Tests gh-7710 + np.random.seed(1234) + + a = np.array(['a', 'a' * 1000]) + + for _ in range(100): + np.random.shuffle(a) + + # Force Garbage Collection - should not segfault. + import gc + gc.collect() + + def test_shuffle_of_array_of_objects(self): + # Test that permuting an array of objects will not cause + # a segfault on garbage collection. + # See gh-7719 + np.random.seed(1234) + a = np.array([np.arange(1), np.arange(4)]) + + for _ in range(1000): + np.random.shuffle(a) + + # Force Garbage Collection - should not segfault. + import gc + gc.collect() + + def test_permutation_subclass(self): + class N(np.ndarray): + pass + + np.random.seed(1) + orig = np.arange(3).view(N) + perm = np.random.permutation(orig) + assert_array_equal(perm, np.array([0, 2, 1])) + assert_array_equal(orig, np.arange(3).view(N)) + + class M(object): + a = np.arange(5) + + def __array__(self): + return self.a + + np.random.seed(1) + m = M() + perm = np.random.permutation(m) + assert_array_equal(perm, np.array([2, 1, 4, 0, 3])) + assert_array_equal(m.__array__(), np.arange(5)) diff --git a/project/venv/lib/python2.7/site-packages/numpy/random/tests/test_regression.pyc b/project/venv/lib/python2.7/site-packages/numpy/random/tests/test_regression.pyc new file mode 100644 index 0000000..aad551b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/random/tests/test_regression.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/setup.py b/project/venv/lib/python2.7/site-packages/numpy/setup.py new file mode 100644 index 0000000..4ccdaee --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/setup.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python +from __future__ import division, print_function + + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('numpy', parent_package, top_path) + + config.add_subpackage('compat') + config.add_subpackage('core') + config.add_subpackage('distutils') + config.add_subpackage('doc') + config.add_subpackage('f2py') + config.add_subpackage('fft') + config.add_subpackage('lib') + config.add_subpackage('linalg') + config.add_subpackage('ma') + config.add_subpackage('matrixlib') + config.add_subpackage('polynomial') + config.add_subpackage('random') + config.add_subpackage('testing') + config.add_data_dir('doc') + config.add_data_dir('tests') + config.make_config_py() # installs __config__.py + return config + +if __name__ == '__main__': + print('This is the wrong setup.py file to run') diff --git a/project/venv/lib/python2.7/site-packages/numpy/setup.pyc b/project/venv/lib/python2.7/site-packages/numpy/setup.pyc new file mode 100644 index 0000000..8f37d38 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/testing/__init__.py b/project/venv/lib/python2.7/site-packages/numpy/testing/__init__.py new file mode 100644 index 0000000..a8bd4fc --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/testing/__init__.py @@ -0,0 +1,22 @@ +"""Common test support for all numpy test scripts. + +This single module should provide all the common functionality for numpy tests +in a single location, so that test scripts can just import it and work right +away. + +""" +from __future__ import division, absolute_import, print_function + +from unittest import TestCase + +from ._private.utils import * +from ._private import decorators as dec +from ._private.nosetester import ( + run_module_suite, NoseTester as Tester + ) + +__all__ = _private.utils.__all__ + ['TestCase', 'run_module_suite'] + +from numpy._pytesttester import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/project/venv/lib/python2.7/site-packages/numpy/testing/__init__.pyc b/project/venv/lib/python2.7/site-packages/numpy/testing/__init__.pyc new file mode 100644 index 0000000..4cc2133 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/testing/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/testing/_private/__init__.py b/project/venv/lib/python2.7/site-packages/numpy/testing/_private/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/numpy/testing/_private/__init__.pyc b/project/venv/lib/python2.7/site-packages/numpy/testing/_private/__init__.pyc new file mode 100644 index 0000000..8936086 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/testing/_private/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/testing/_private/decorators.py b/project/venv/lib/python2.7/site-packages/numpy/testing/_private/decorators.py new file mode 100644 index 0000000..24c4e38 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/testing/_private/decorators.py @@ -0,0 +1,292 @@ +""" +Decorators for labeling and modifying behavior of test objects. + +Decorators that merely return a modified version of the original +function object are straightforward. Decorators that return a new +function object need to use +:: + + nose.tools.make_decorator(original_function)(decorator) + +in returning the decorator, in order to preserve meta-data such as +function name, setup and teardown functions and so on - see +``nose.tools`` for more information. + +""" +from __future__ import division, absolute_import, print_function + +try: + # Accessing collections abstract classes from collections + # has been deprecated since Python 3.3 + import collections.abc as collections_abc +except ImportError: + import collections as collections_abc + +from .utils import SkipTest, assert_warns, HAS_REFCOUNT + +__all__ = ['slow', 'setastest', 'skipif', 'knownfailureif', 'deprecated', + 'parametrize', '_needs_refcount',] + + +def slow(t): + """ + Label a test as 'slow'. + + The exact definition of a slow test is obviously both subjective and + hardware-dependent, but in general any individual test that requires more + than a second or two should be labeled as slow (the whole suite consists of + thousands of tests, so even a second is significant). + + Parameters + ---------- + t : callable + The test to label as slow. + + Returns + ------- + t : callable + The decorated test `t`. + + Examples + -------- + The `numpy.testing` module includes ``import decorators as dec``. + A test can be decorated as slow like this:: + + from numpy.testing import * + + @dec.slow + def test_big(self): + print('Big, slow test') + + """ + + t.slow = True + return t + +def setastest(tf=True): + """ + Signals to nose that this function is or is not a test. + + Parameters + ---------- + tf : bool + If True, specifies that the decorated callable is a test. + If False, specifies that the decorated callable is not a test. + Default is True. + + Notes + ----- + This decorator can't use the nose namespace, because it can be + called from a non-test module. See also ``istest`` and ``nottest`` in + ``nose.tools``. + + Examples + -------- + `setastest` can be used in the following way:: + + from numpy.testing import dec + + @dec.setastest(False) + def func_with_test_in_name(arg1, arg2): + pass + + """ + def set_test(t): + t.__test__ = tf + return t + return set_test + +def skipif(skip_condition, msg=None): + """ + Make function raise SkipTest exception if a given condition is true. + + If the condition is a callable, it is used at runtime to dynamically + make the decision. This is useful for tests that may require costly + imports, to delay the cost until the test suite is actually executed. + + Parameters + ---------- + skip_condition : bool or callable + Flag to determine whether to skip the decorated test. + msg : str, optional + Message to give on raising a SkipTest exception. Default is None. + + Returns + ------- + decorator : function + Decorator which, when applied to a function, causes SkipTest + to be raised when `skip_condition` is True, and the function + to be called normally otherwise. + + Notes + ----- + The decorator itself is decorated with the ``nose.tools.make_decorator`` + function in order to transmit function name, and various other metadata. + + """ + + def skip_decorator(f): + # Local import to avoid a hard nose dependency and only incur the + # import time overhead at actual test-time. + import nose + + # Allow for both boolean or callable skip conditions. + if isinstance(skip_condition, collections_abc.Callable): + skip_val = lambda: skip_condition() + else: + skip_val = lambda: skip_condition + + def get_msg(func,msg=None): + """Skip message with information about function being skipped.""" + if msg is None: + out = 'Test skipped due to test condition' + else: + out = msg + + return "Skipping test: %s: %s" % (func.__name__, out) + + # We need to define *two* skippers because Python doesn't allow both + # return with value and yield inside the same function. + def skipper_func(*args, **kwargs): + """Skipper for normal test functions.""" + if skip_val(): + raise SkipTest(get_msg(f, msg)) + else: + return f(*args, **kwargs) + + def skipper_gen(*args, **kwargs): + """Skipper for test generators.""" + if skip_val(): + raise SkipTest(get_msg(f, msg)) + else: + for x in f(*args, **kwargs): + yield x + + # Choose the right skipper to use when building the actual decorator. + if nose.util.isgenerator(f): + skipper = skipper_gen + else: + skipper = skipper_func + + return nose.tools.make_decorator(f)(skipper) + + return skip_decorator + + +def knownfailureif(fail_condition, msg=None): + """ + Make function raise KnownFailureException exception if given condition is true. + + If the condition is a callable, it is used at runtime to dynamically + make the decision. This is useful for tests that may require costly + imports, to delay the cost until the test suite is actually executed. + + Parameters + ---------- + fail_condition : bool or callable + Flag to determine whether to mark the decorated test as a known + failure (if True) or not (if False). + msg : str, optional + Message to give on raising a KnownFailureException exception. + Default is None. + + Returns + ------- + decorator : function + Decorator, which, when applied to a function, causes + KnownFailureException to be raised when `fail_condition` is True, + and the function to be called normally otherwise. + + Notes + ----- + The decorator itself is decorated with the ``nose.tools.make_decorator`` + function in order to transmit function name, and various other metadata. + + """ + if msg is None: + msg = 'Test skipped due to known failure' + + # Allow for both boolean or callable known failure conditions. + if isinstance(fail_condition, collections_abc.Callable): + fail_val = lambda: fail_condition() + else: + fail_val = lambda: fail_condition + + def knownfail_decorator(f): + # Local import to avoid a hard nose dependency and only incur the + # import time overhead at actual test-time. + import nose + from .noseclasses import KnownFailureException + + def knownfailer(*args, **kwargs): + if fail_val(): + raise KnownFailureException(msg) + else: + return f(*args, **kwargs) + return nose.tools.make_decorator(f)(knownfailer) + + return knownfail_decorator + +def deprecated(conditional=True): + """ + Filter deprecation warnings while running the test suite. + + This decorator can be used to filter DeprecationWarning's, to avoid + printing them during the test suite run, while checking that the test + actually raises a DeprecationWarning. + + Parameters + ---------- + conditional : bool or callable, optional + Flag to determine whether to mark test as deprecated or not. If the + condition is a callable, it is used at runtime to dynamically make the + decision. Default is True. + + Returns + ------- + decorator : function + The `deprecated` decorator itself. + + Notes + ----- + .. versionadded:: 1.4.0 + + """ + def deprecate_decorator(f): + # Local import to avoid a hard nose dependency and only incur the + # import time overhead at actual test-time. + import nose + + def _deprecated_imp(*args, **kwargs): + # Poor man's replacement for the with statement + with assert_warns(DeprecationWarning): + f(*args, **kwargs) + + if isinstance(conditional, collections_abc.Callable): + cond = conditional() + else: + cond = conditional + if cond: + return nose.tools.make_decorator(f)(_deprecated_imp) + else: + return f + return deprecate_decorator + + +def parametrize(vars, input): + """ + Pytest compatibility class. This implements the simplest level of + pytest.mark.parametrize for use in nose as an aid in making the transition + to pytest. It achieves that by adding a dummy var parameter and ignoring + the doc_func parameter of the base class. It does not support variable + substitution by name, nor does it support nesting or classes. See the + pytest documentation for usage. + + .. versionadded:: 1.14.0 + + """ + from .parameterized import parameterized + + return parameterized(input) + +_needs_refcount = skipif(not HAS_REFCOUNT, "python has no sys.getrefcount") diff --git a/project/venv/lib/python2.7/site-packages/numpy/testing/_private/decorators.pyc b/project/venv/lib/python2.7/site-packages/numpy/testing/_private/decorators.pyc new file mode 100644 index 0000000..26ef9ad Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/testing/_private/decorators.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/testing/_private/noseclasses.py b/project/venv/lib/python2.7/site-packages/numpy/testing/_private/noseclasses.py new file mode 100644 index 0000000..e99bbc9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/testing/_private/noseclasses.py @@ -0,0 +1,366 @@ +# These classes implement a doctest runner plugin for nose, a "known failure" +# error class, and a customized TestProgram for NumPy. + +# Because this module imports nose directly, it should not +# be used except by nosetester.py to avoid a general NumPy +# dependency on nose. +from __future__ import division, absolute_import, print_function + +import os +import sys +import doctest +import inspect + +import numpy +import nose +from nose.plugins import doctests as npd +from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin +from nose.plugins.base import Plugin +from nose.util import src +from .nosetester import get_package_name +from .utils import KnownFailureException, KnownFailureTest + + +# Some of the classes in this module begin with 'Numpy' to clearly distinguish +# them from the plethora of very similar names from nose/unittest/doctest + +#----------------------------------------------------------------------------- +# Modified version of the one in the stdlib, that fixes a python bug (doctests +# not found in extension modules, https://bugs.python.org/issue3158) +class NumpyDocTestFinder(doctest.DocTestFinder): + + def _from_module(self, module, object): + """ + Return true if the given object is defined in the given + module. + """ + if module is None: + return True + elif inspect.isfunction(object): + return module.__dict__ is object.__globals__ + elif inspect.isbuiltin(object): + return module.__name__ == object.__module__ + elif inspect.isclass(object): + return module.__name__ == object.__module__ + elif inspect.ismethod(object): + # This one may be a bug in cython that fails to correctly set the + # __module__ attribute of methods, but since the same error is easy + # to make by extension code writers, having this safety in place + # isn't such a bad idea + return module.__name__ == object.__self__.__class__.__module__ + elif inspect.getmodule(object) is not None: + return module is inspect.getmodule(object) + elif hasattr(object, '__module__'): + return module.__name__ == object.__module__ + elif isinstance(object, property): + return True # [XX] no way not be sure. + else: + raise ValueError("object must be a class or function") + + def _find(self, tests, obj, name, module, source_lines, globs, seen): + """ + Find tests for the given object and any contained objects, and + add them to `tests`. + """ + + doctest.DocTestFinder._find(self, tests, obj, name, module, + source_lines, globs, seen) + + # Below we re-run pieces of the above method with manual modifications, + # because the original code is buggy and fails to correctly identify + # doctests in extension modules. + + # Local shorthands + from inspect import ( + isroutine, isclass, ismodule, isfunction, ismethod + ) + + # Look for tests in a module's contained objects. + if ismodule(obj) and self._recurse: + for valname, val in obj.__dict__.items(): + valname1 = '%s.%s' % (name, valname) + if ( (isroutine(val) or isclass(val)) + and self._from_module(module, val)): + + self._find(tests, val, valname1, module, source_lines, + globs, seen) + + # Look for tests in a class's contained objects. + if isclass(obj) and self._recurse: + for valname, val in obj.__dict__.items(): + # Special handling for staticmethod/classmethod. + if isinstance(val, staticmethod): + val = getattr(obj, valname) + if isinstance(val, classmethod): + val = getattr(obj, valname).__func__ + + # Recurse to methods, properties, and nested classes. + if ((isfunction(val) or isclass(val) or + ismethod(val) or isinstance(val, property)) and + self._from_module(module, val)): + valname = '%s.%s' % (name, valname) + self._find(tests, val, valname, module, source_lines, + globs, seen) + + +# second-chance checker; if the default comparison doesn't +# pass, then see if the expected output string contains flags that +# tell us to ignore the output +class NumpyOutputChecker(doctest.OutputChecker): + def check_output(self, want, got, optionflags): + ret = doctest.OutputChecker.check_output(self, want, got, + optionflags) + if not ret: + if "#random" in want: + return True + + # it would be useful to normalize endianness so that + # bigendian machines don't fail all the tests (and there are + # actually some bigendian examples in the doctests). Let's try + # making them all little endian + got = got.replace("'>", "'<") + want = want.replace("'>", "'<") + + # try to normalize out 32 and 64 bit default int sizes + for sz in [4, 8]: + got = got.replace("'>> np.testing.nosetester.get_package_name('nonsense') + 'numpy' + + """ + + fullpath = filepath[:] + pkg_name = [] + while 'site-packages' in filepath or 'dist-packages' in filepath: + filepath, p2 = os.path.split(filepath) + if p2 in ('site-packages', 'dist-packages'): + break + pkg_name.append(p2) + + # if package name determination failed, just default to numpy/scipy + if not pkg_name: + if 'scipy' in fullpath: + return 'scipy' + else: + return 'numpy' + + # otherwise, reverse to get correct order and return + pkg_name.reverse() + + # don't include the outer egg directory + if pkg_name[0].endswith('.egg'): + pkg_name.pop(0) + + return '.'.join(pkg_name) + + +def run_module_suite(file_to_run=None, argv=None): + """ + Run a test module. + + Equivalent to calling ``$ nosetests `` from + the command line + + Parameters + ---------- + file_to_run : str, optional + Path to test module, or None. + By default, run the module from which this function is called. + argv : list of strings + Arguments to be passed to the nose test runner. ``argv[0]`` is + ignored. All command line arguments accepted by ``nosetests`` + will work. If it is the default value None, sys.argv is used. + + .. versionadded:: 1.9.0 + + Examples + -------- + Adding the following:: + + if __name__ == "__main__" : + run_module_suite(argv=sys.argv) + + at the end of a test module will run the tests when that module is + called in the python interpreter. + + Alternatively, calling:: + + >>> run_module_suite(file_to_run="numpy/tests/test_matlib.py") # doctest: +SKIP + + from an interpreter will run all the test routine in 'test_matlib.py'. + """ + if file_to_run is None: + f = sys._getframe(1) + file_to_run = f.f_locals.get('__file__', None) + if file_to_run is None: + raise AssertionError + + if argv is None: + argv = sys.argv + [file_to_run] + else: + argv = argv + [file_to_run] + + nose = import_nose() + from .noseclasses import KnownFailurePlugin + nose.run(argv=argv, addplugins=[KnownFailurePlugin()]) + + +class NoseTester(object): + """ + Nose test runner. + + This class is made available as numpy.testing.Tester, and a test function + is typically added to a package's __init__.py like so:: + + from numpy.testing import Tester + test = Tester().test + + Calling this test function finds and runs all tests associated with the + package and all its sub-packages. + + Attributes + ---------- + package_path : str + Full path to the package to test. + package_name : str + Name of the package to test. + + Parameters + ---------- + package : module, str or None, optional + The package to test. If a string, this should be the full path to + the package. If None (default), `package` is set to the module from + which `NoseTester` is initialized. + raise_warnings : None, str or sequence of warnings, optional + This specifies which warnings to configure as 'raise' instead + of being shown once during the test execution. Valid strings are: + + - "develop" : equals ``(Warning,)`` + - "release" : equals ``()``, don't raise on any warnings. + + Default is "release". + depth : int, optional + If `package` is None, then this can be used to initialize from the + module of the caller of (the caller of (...)) the code that + initializes `NoseTester`. Default of 0 means the module of the + immediate caller; higher values are useful for utility routines that + want to initialize `NoseTester` objects on behalf of other code. + + """ + def __init__(self, package=None, raise_warnings="release", depth=0, + check_fpu_mode=False): + # Back-compat: 'None' used to mean either "release" or "develop" + # depending on whether this was a release or develop version of + # numpy. Those semantics were fine for testing numpy, but not so + # helpful for downstream projects like scipy that use + # numpy.testing. (They want to set this based on whether *they* are a + # release or develop version, not whether numpy is.) So we continue to + # accept 'None' for back-compat, but it's now just an alias for the + # default "release". + if raise_warnings is None: + raise_warnings = "release" + + package_name = None + if package is None: + f = sys._getframe(1 + depth) + package_path = f.f_locals.get('__file__', None) + if package_path is None: + raise AssertionError + package_path = os.path.dirname(package_path) + package_name = f.f_locals.get('__name__', None) + elif isinstance(package, type(os)): + package_path = os.path.dirname(package.__file__) + package_name = getattr(package, '__name__', None) + else: + package_path = str(package) + + self.package_path = package_path + + # Find the package name under test; this name is used to limit coverage + # reporting (if enabled). + if package_name is None: + package_name = get_package_name(package_path) + self.package_name = package_name + + # Set to "release" in constructor in maintenance branches. + self.raise_warnings = raise_warnings + + # Whether to check for FPU mode changes + self.check_fpu_mode = check_fpu_mode + + def _test_argv(self, label, verbose, extra_argv): + ''' Generate argv for nosetest command + + Parameters + ---------- + label : {'fast', 'full', '', attribute identifier}, optional + see ``test`` docstring + verbose : int, optional + Verbosity value for test outputs, in the range 1-10. Default is 1. + extra_argv : list, optional + List with any extra arguments to pass to nosetests. + + Returns + ------- + argv : list + command line arguments that will be passed to nose + ''' + argv = [__file__, self.package_path, '-s'] + if label and label != 'full': + if not isinstance(label, basestring): + raise TypeError('Selection label should be a string') + if label == 'fast': + label = 'not slow' + argv += ['-A', label] + argv += ['--verbosity', str(verbose)] + + # When installing with setuptools, and also in some other cases, the + # test_*.py files end up marked +x executable. Nose, by default, does + # not run files marked with +x as they might be scripts. However, in + # our case nose only looks for test_*.py files under the package + # directory, which should be safe. + argv += ['--exe'] + + if extra_argv: + argv += extra_argv + return argv + + def _show_system_info(self): + nose = import_nose() + + import numpy + print("NumPy version %s" % numpy.__version__) + relaxed_strides = numpy.ones((10, 1), order="C").flags.f_contiguous + print("NumPy relaxed strides checking option:", relaxed_strides) + npdir = os.path.dirname(numpy.__file__) + print("NumPy is installed in %s" % npdir) + + if 'scipy' in self.package_name: + import scipy + print("SciPy version %s" % scipy.__version__) + spdir = os.path.dirname(scipy.__file__) + print("SciPy is installed in %s" % spdir) + + pyversion = sys.version.replace('\n', '') + print("Python version %s" % pyversion) + print("nose version %d.%d.%d" % nose.__versioninfo__) + + def _get_custom_doctester(self): + """ Return instantiated plugin for doctests + + Allows subclassing of this class to override doctester + + A return value of None means use the nose builtin doctest plugin + """ + from .noseclasses import NumpyDoctest + return NumpyDoctest() + + def prepare_test_args(self, label='fast', verbose=1, extra_argv=None, + doctests=False, coverage=False, timer=False): + """ + Run tests for module using nose. + + This method does the heavy lifting for the `test` method. It takes all + the same arguments, for details see `test`. + + See Also + -------- + test + + """ + # fail with nice error message if nose is not present + import_nose() + # compile argv + argv = self._test_argv(label, verbose, extra_argv) + # our way of doing coverage + if coverage: + argv += ['--cover-package=%s' % self.package_name, '--with-coverage', + '--cover-tests', '--cover-erase'] + + if timer: + if timer is True: + argv += ['--with-timer'] + elif isinstance(timer, int): + argv += ['--with-timer', '--timer-top-n', str(timer)] + + # construct list of plugins + import nose.plugins.builtin + from nose.plugins import EntryPointPluginManager + from .noseclasses import (KnownFailurePlugin, Unplugger, + FPUModeCheckPlugin) + plugins = [KnownFailurePlugin()] + plugins += [p() for p in nose.plugins.builtin.plugins] + if self.check_fpu_mode: + plugins += [FPUModeCheckPlugin()] + argv += ["--with-fpumodecheckplugin"] + try: + # External plugins (like nose-timer) + entrypoint_manager = EntryPointPluginManager() + entrypoint_manager.loadPlugins() + plugins += [p for p in entrypoint_manager.plugins] + except ImportError: + # Relies on pkg_resources, not a hard dependency + pass + + # add doctesting if required + doctest_argv = '--with-doctest' in argv + if doctests == False and doctest_argv: + doctests = True + plug = self._get_custom_doctester() + if plug is None: + # use standard doctesting + if doctests and not doctest_argv: + argv += ['--with-doctest'] + else: # custom doctesting + if doctest_argv: # in fact the unplugger would take care of this + argv.remove('--with-doctest') + plugins += [Unplugger('doctest'), plug] + if doctests: + argv += ['--with-' + plug.name] + return argv, plugins + + def test(self, label='fast', verbose=1, extra_argv=None, + doctests=False, coverage=False, raise_warnings=None, + timer=False): + """ + Run tests for module using nose. + + Parameters + ---------- + label : {'fast', 'full', '', attribute identifier}, optional + Identifies the tests to run. This can be a string to pass to + the nosetests executable with the '-A' option, or one of several + special values. Special values are: + + * 'fast' - the default - which corresponds to the ``nosetests -A`` + option of 'not slow'. + * 'full' - fast (as above) and slow tests as in the + 'no -A' option to nosetests - this is the same as ''. + * None or '' - run all tests. + * attribute_identifier - string passed directly to nosetests as '-A'. + + verbose : int, optional + Verbosity value for test outputs, in the range 1-10. Default is 1. + extra_argv : list, optional + List with any extra arguments to pass to nosetests. + doctests : bool, optional + If True, run doctests in module. Default is False. + coverage : bool, optional + If True, report coverage of NumPy code. Default is False. + (This requires the + `coverage module `_). + raise_warnings : None, str or sequence of warnings, optional + This specifies which warnings to configure as 'raise' instead + of being shown once during the test execution. Valid strings are: + + * "develop" : equals ``(Warning,)`` + * "release" : equals ``()``, do not raise on any warnings. + timer : bool or int, optional + Timing of individual tests with ``nose-timer`` (which needs to be + installed). If True, time tests and report on all of them. + If an integer (say ``N``), report timing results for ``N`` slowest + tests. + + Returns + ------- + result : object + Returns the result of running the tests as a + ``nose.result.TextTestResult`` object. + + Notes + ----- + Each NumPy module exposes `test` in its namespace to run all tests for it. + For example, to run all tests for numpy.lib: + + >>> np.lib.test() #doctest: +SKIP + + Examples + -------- + >>> result = np.lib.test() #doctest: +SKIP + Running unit tests for numpy.lib + ... + Ran 976 tests in 3.933s + + OK + + >>> result.errors #doctest: +SKIP + [] + >>> result.knownfail #doctest: +SKIP + [] + """ + + # cap verbosity at 3 because nose becomes *very* verbose beyond that + verbose = min(verbose, 3) + + from . import utils + utils.verbose = verbose + + argv, plugins = self.prepare_test_args( + label, verbose, extra_argv, doctests, coverage, timer) + + if doctests: + print("Running unit tests and doctests for %s" % self.package_name) + else: + print("Running unit tests for %s" % self.package_name) + + self._show_system_info() + + # reset doctest state on every run + import doctest + doctest.master = None + + if raise_warnings is None: + raise_warnings = self.raise_warnings + + _warn_opts = dict(develop=(Warning,), + release=()) + if isinstance(raise_warnings, basestring): + raise_warnings = _warn_opts[raise_warnings] + + with suppress_warnings("location") as sup: + # Reset the warning filters to the default state, + # so that running the tests is more repeatable. + warnings.resetwarnings() + # Set all warnings to 'warn', this is because the default 'once' + # has the bad property of possibly shadowing later warnings. + warnings.filterwarnings('always') + # Force the requested warnings to raise + for warningtype in raise_warnings: + warnings.filterwarnings('error', category=warningtype) + # Filter out annoying import messages. + sup.filter(message='Not importing directory') + sup.filter(message="numpy.dtype size changed") + sup.filter(message="numpy.ufunc size changed") + sup.filter(category=np.ModuleDeprecationWarning) + # Filter out boolean '-' deprecation messages. This allows + # older versions of scipy to test without a flood of messages. + sup.filter(message=".*boolean negative.*") + sup.filter(message=".*boolean subtract.*") + # Filter out distutils cpu warnings (could be localized to + # distutils tests). ASV has problems with top level import, + # so fetch module for suppression here. + with warnings.catch_warnings(): + warnings.simplefilter("always") + from ...distutils import cpuinfo + sup.filter(category=UserWarning, module=cpuinfo) + # See #7949: Filter out deprecation warnings due to the -3 flag to + # python 2 + if sys.version_info.major == 2 and sys.py3kwarning: + # This is very specific, so using the fragile module filter + # is fine + import threading + sup.filter(DeprecationWarning, + r"sys\.exc_clear\(\) not supported in 3\.x", + module=threading) + sup.filter(DeprecationWarning, message=r"in 3\.x, __setslice__") + sup.filter(DeprecationWarning, message=r"in 3\.x, __getslice__") + sup.filter(DeprecationWarning, message=r"buffer\(\) not supported in 3\.x") + sup.filter(DeprecationWarning, message=r"CObject type is not supported in 3\.x") + sup.filter(DeprecationWarning, message=r"comparing unequal types not supported in 3\.x") + # Filter out some deprecation warnings inside nose 1.3.7 when run + # on python 3.5b2. See + # https://github.com/nose-devs/nose/issues/929 + # Note: it is hard to filter based on module for sup (lineno could + # be implemented). + warnings.filterwarnings("ignore", message=".*getargspec.*", + category=DeprecationWarning, + module=r"nose\.") + + from .noseclasses import NumpyTestProgram + + t = NumpyTestProgram(argv=argv, exit=False, plugins=plugins) + + return t.result + + def bench(self, label='fast', verbose=1, extra_argv=None): + """ + Run benchmarks for module using nose. + + Parameters + ---------- + label : {'fast', 'full', '', attribute identifier}, optional + Identifies the benchmarks to run. This can be a string to pass to + the nosetests executable with the '-A' option, or one of several + special values. Special values are: + + * 'fast' - the default - which corresponds to the ``nosetests -A`` + option of 'not slow'. + * 'full' - fast (as above) and slow benchmarks as in the + 'no -A' option to nosetests - this is the same as ''. + * None or '' - run all tests. + * attribute_identifier - string passed directly to nosetests as '-A'. + + verbose : int, optional + Verbosity value for benchmark outputs, in the range 1-10. Default is 1. + extra_argv : list, optional + List with any extra arguments to pass to nosetests. + + Returns + ------- + success : bool + Returns True if running the benchmarks works, False if an error + occurred. + + Notes + ----- + Benchmarks are like tests, but have names starting with "bench" instead + of "test", and can be found under the "benchmarks" sub-directory of the + module. + + Each NumPy module exposes `bench` in its namespace to run all benchmarks + for it. + + Examples + -------- + >>> success = np.lib.bench() #doctest: +SKIP + Running benchmarks for numpy.lib + ... + using 562341 items: + unique: + 0.11 + unique1d: + 0.11 + ratio: 1.0 + nUnique: 56230 == 56230 + ... + OK + + >>> success #doctest: +SKIP + True + + """ + + print("Running benchmarks for %s" % self.package_name) + self._show_system_info() + + argv = self._test_argv(label, verbose, extra_argv) + argv += ['--match', r'(?:^|[\\b_\\.%s-])[Bb]ench' % os.sep] + + # import nose or make informative error + nose = import_nose() + + # get plugin to disable doctests + from .noseclasses import Unplugger + add_plugins = [Unplugger('doctest')] + + return nose.run(argv=argv, addplugins=add_plugins) + + +def _numpy_tester(): + if hasattr(np, "__version__") and ".dev0" in np.__version__: + mode = "develop" + else: + mode = "release" + return NoseTester(raise_warnings=mode, depth=1, + check_fpu_mode=True) diff --git a/project/venv/lib/python2.7/site-packages/numpy/testing/_private/nosetester.pyc b/project/venv/lib/python2.7/site-packages/numpy/testing/_private/nosetester.pyc new file mode 100644 index 0000000..d1c0e24 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/testing/_private/nosetester.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/testing/_private/parameterized.py b/project/venv/lib/python2.7/site-packages/numpy/testing/_private/parameterized.py new file mode 100644 index 0000000..a5fa4fb --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/testing/_private/parameterized.py @@ -0,0 +1,490 @@ +""" +tl;dr: all code code is licensed under simplified BSD, unless stated otherwise. + +Unless stated otherwise in the source files, all code is copyright 2010 David +Wolever . All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +EVENT SHALL OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +The views and conclusions contained in the software and documentation are those +of the authors and should not be interpreted as representing official policies, +either expressed or implied, of David Wolever. + +""" +import re +import sys +import inspect +import warnings +from functools import wraps +from types import MethodType as MethodType +from collections import namedtuple + +try: + from collections import OrderedDict as MaybeOrderedDict +except ImportError: + MaybeOrderedDict = dict + +from unittest import TestCase + +PY3 = sys.version_info[0] == 3 +PY2 = sys.version_info[0] == 2 + + +if PY3: + # Python 3 doesn't have an InstanceType, so just use a dummy type. + class InstanceType(): + pass + lzip = lambda *a: list(zip(*a)) + text_type = str + string_types = str, + bytes_type = bytes + def make_method(func, instance, type): + if instance is None: + return func + return MethodType(func, instance) +else: + from types import InstanceType + lzip = zip + text_type = unicode + bytes_type = str + string_types = basestring, + def make_method(func, instance, type): + return MethodType(func, instance, type) + +_param = namedtuple("param", "args kwargs") + +class param(_param): + """ Represents a single parameter to a test case. + + For example:: + + >>> p = param("foo", bar=16) + >>> p + param("foo", bar=16) + >>> p.args + ('foo', ) + >>> p.kwargs + {'bar': 16} + + Intended to be used as an argument to ``@parameterized``:: + + @parameterized([ + param("foo", bar=16), + ]) + def test_stuff(foo, bar=16): + pass + """ + + def __new__(cls, *args , **kwargs): + return _param.__new__(cls, args, kwargs) + + @classmethod + def explicit(cls, args=None, kwargs=None): + """ Creates a ``param`` by explicitly specifying ``args`` and + ``kwargs``:: + + >>> param.explicit([1,2,3]) + param(*(1, 2, 3)) + >>> param.explicit(kwargs={"foo": 42}) + param(*(), **{"foo": "42"}) + """ + args = args or () + kwargs = kwargs or {} + return cls(*args, **kwargs) + + @classmethod + def from_decorator(cls, args): + """ Returns an instance of ``param()`` for ``@parameterized`` argument + ``args``:: + + >>> param.from_decorator((42, )) + param(args=(42, ), kwargs={}) + >>> param.from_decorator("foo") + param(args=("foo", ), kwargs={}) + """ + if isinstance(args, param): + return args + elif isinstance(args, string_types): + args = (args, ) + try: + return cls(*args) + except TypeError as e: + if "after * must be" not in str(e): + raise + raise TypeError( + "Parameters must be tuples, but %r is not (hint: use '(%r, )')" + %(args, args), + ) + + def __repr__(self): + return "param(*%r, **%r)" %self + + +class QuietOrderedDict(MaybeOrderedDict): + """ When OrderedDict is available, use it to make sure that the kwargs in + doc strings are consistently ordered. """ + __str__ = dict.__str__ + __repr__ = dict.__repr__ + + +def parameterized_argument_value_pairs(func, p): + """Return tuples of parameterized arguments and their values. + + This is useful if you are writing your own doc_func + function and need to know the values for each parameter name:: + + >>> def func(a, foo=None, bar=42, **kwargs): pass + >>> p = param(1, foo=7, extra=99) + >>> parameterized_argument_value_pairs(func, p) + [("a", 1), ("foo", 7), ("bar", 42), ("**kwargs", {"extra": 99})] + + If the function's first argument is named ``self`` then it will be + ignored:: + + >>> def func(self, a): pass + >>> p = param(1) + >>> parameterized_argument_value_pairs(func, p) + [("a", 1)] + + Additionally, empty ``*args`` or ``**kwargs`` will be ignored:: + + >>> def func(foo, *args): pass + >>> p = param(1) + >>> parameterized_argument_value_pairs(func, p) + [("foo", 1)] + >>> p = param(1, 16) + >>> parameterized_argument_value_pairs(func, p) + [("foo", 1), ("*args", (16, ))] + """ + argspec = inspect.getargspec(func) + arg_offset = 1 if argspec.args[:1] == ["self"] else 0 + + named_args = argspec.args[arg_offset:] + + result = lzip(named_args, p.args) + named_args = argspec.args[len(result) + arg_offset:] + varargs = p.args[len(result):] + + result.extend([ + (name, p.kwargs.get(name, default)) + for (name, default) + in zip(named_args, argspec.defaults or []) + ]) + + seen_arg_names = {n for (n, _) in result} + keywords = QuietOrderedDict(sorted([ + (name, p.kwargs[name]) + for name in p.kwargs + if name not in seen_arg_names + ])) + + if varargs: + result.append(("*%s" %(argspec.varargs, ), tuple(varargs))) + + if keywords: + result.append(("**%s" %(argspec.keywords, ), keywords)) + + return result + +def short_repr(x, n=64): + """ A shortened repr of ``x`` which is guaranteed to be ``unicode``:: + + >>> short_repr("foo") + u"foo" + >>> short_repr("123456789", n=4) + u"12...89" + """ + + x_repr = repr(x) + if isinstance(x_repr, bytes_type): + try: + x_repr = text_type(x_repr, "utf-8") + except UnicodeDecodeError: + x_repr = text_type(x_repr, "latin1") + if len(x_repr) > n: + x_repr = x_repr[:n//2] + "..." + x_repr[len(x_repr) - n//2:] + return x_repr + +def default_doc_func(func, num, p): + if func.__doc__ is None: + return None + + all_args_with_values = parameterized_argument_value_pairs(func, p) + + # Assumes that the function passed is a bound method. + descs = ["%s=%s" %(n, short_repr(v)) for n, v in all_args_with_values] + + # The documentation might be a multiline string, so split it + # and just work with the first string, ignoring the period + # at the end if there is one. + first, nl, rest = func.__doc__.lstrip().partition("\n") + suffix = "" + if first.endswith("."): + suffix = "." + first = first[:-1] + args = "%s[with %s]" %(len(first) and " " or "", ", ".join(descs)) + return "".join([first.rstrip(), args, suffix, nl, rest]) + +def default_name_func(func, num, p): + base_name = func.__name__ + name_suffix = "_%s" %(num, ) + if len(p.args) > 0 and isinstance(p.args[0], string_types): + name_suffix += "_" + parameterized.to_safe_name(p.args[0]) + return base_name + name_suffix + + +# force nose for numpy purposes. +_test_runner_override = 'nose' +_test_runner_guess = False +_test_runners = set(["unittest", "unittest2", "nose", "nose2", "pytest"]) +_test_runner_aliases = { + "_pytest": "pytest", +} + +def set_test_runner(name): + global _test_runner_override + if name not in _test_runners: + raise TypeError( + "Invalid test runner: %r (must be one of: %s)" + %(name, ", ".join(_test_runners)), + ) + _test_runner_override = name + +def detect_runner(): + """ Guess which test runner we're using by traversing the stack and looking + for the first matching module. This *should* be reasonably safe, as + it's done during test disocvery where the test runner should be the + stack frame immediately outside. """ + if _test_runner_override is not None: + return _test_runner_override + global _test_runner_guess + if _test_runner_guess is False: + stack = inspect.stack() + for record in reversed(stack): + frame = record[0] + module = frame.f_globals.get("__name__").partition(".")[0] + if module in _test_runner_aliases: + module = _test_runner_aliases[module] + if module in _test_runners: + _test_runner_guess = module + break + if record[1].endswith("python2.6/unittest.py"): + _test_runner_guess = "unittest" + break + else: + _test_runner_guess = None + return _test_runner_guess + +class parameterized(object): + """ Parameterize a test case:: + + class TestInt(object): + @parameterized([ + ("A", 10), + ("F", 15), + param("10", 42, base=42) + ]) + def test_int(self, input, expected, base=16): + actual = int(input, base=base) + assert_equal(actual, expected) + + @parameterized([ + (2, 3, 5) + (3, 5, 8), + ]) + def test_add(a, b, expected): + assert_equal(a + b, expected) + """ + + def __init__(self, input, doc_func=None): + self.get_input = self.input_as_callable(input) + self.doc_func = doc_func or default_doc_func + + def __call__(self, test_func): + self.assert_not_in_testcase_subclass() + + @wraps(test_func) + def wrapper(test_self=None): + test_cls = test_self and type(test_self) + if test_self is not None: + if issubclass(test_cls, InstanceType): + raise TypeError(( + "@parameterized can't be used with old-style classes, but " + "%r has an old-style class. Consider using a new-style " + "class, or '@parameterized.expand' " + "(see http://stackoverflow.com/q/54867/71522 for more " + "information on old-style classes)." + ) %(test_self, )) + + original_doc = wrapper.__doc__ + for num, args in enumerate(wrapper.parameterized_input): + p = param.from_decorator(args) + unbound_func, nose_tuple = self.param_as_nose_tuple(test_self, test_func, num, p) + try: + wrapper.__doc__ = nose_tuple[0].__doc__ + # Nose uses `getattr(instance, test_func.__name__)` to get + # a method bound to the test instance (as opposed to a + # method bound to the instance of the class created when + # tests were being enumerated). Set a value here to make + # sure nose can get the correct test method. + if test_self is not None: + setattr(test_cls, test_func.__name__, unbound_func) + yield nose_tuple + finally: + if test_self is not None: + delattr(test_cls, test_func.__name__) + wrapper.__doc__ = original_doc + wrapper.parameterized_input = self.get_input() + wrapper.parameterized_func = test_func + test_func.__name__ = "_parameterized_original_%s" %(test_func.__name__, ) + return wrapper + + def param_as_nose_tuple(self, test_self, func, num, p): + nose_func = wraps(func)(lambda *args: func(*args[:-1], **args[-1])) + nose_func.__doc__ = self.doc_func(func, num, p) + # Track the unbound function because we need to setattr the unbound + # function onto the class for nose to work (see comments above), and + # Python 3 doesn't let us pull the function out of a bound method. + unbound_func = nose_func + if test_self is not None: + # Under nose on Py2 we need to return an unbound method to make + # sure that the `self` in the method is properly shared with the + # `self` used in `setUp` and `tearDown`. But only there. Everyone + # else needs a bound method. + func_self = ( + None if PY2 and detect_runner() == "nose" else + test_self + ) + nose_func = make_method(nose_func, func_self, type(test_self)) + return unbound_func, (nose_func, ) + p.args + (p.kwargs or {}, ) + + def assert_not_in_testcase_subclass(self): + parent_classes = self._terrible_magic_get_defining_classes() + if any(issubclass(cls, TestCase) for cls in parent_classes): + raise Exception("Warning: '@parameterized' tests won't work " + "inside subclasses of 'TestCase' - use " + "'@parameterized.expand' instead.") + + def _terrible_magic_get_defining_classes(self): + """ Returns the set of parent classes of the class currently being defined. + Will likely only work if called from the ``parameterized`` decorator. + This function is entirely @brandon_rhodes's fault, as he suggested + the implementation: http://stackoverflow.com/a/8793684/71522 + """ + stack = inspect.stack() + if len(stack) <= 4: + return [] + frame = stack[4] + code_context = frame[4] and frame[4][0].strip() + if not (code_context and code_context.startswith("class ")): + return [] + _, _, parents = code_context.partition("(") + parents, _, _ = parents.partition(")") + return eval("[" + parents + "]", frame[0].f_globals, frame[0].f_locals) + + @classmethod + def input_as_callable(cls, input): + if callable(input): + return lambda: cls.check_input_values(input()) + input_values = cls.check_input_values(input) + return lambda: input_values + + @classmethod + def check_input_values(cls, input_values): + # Explicitly convert non-list inputs to a list so that: + # 1. A helpful exception will be raised if they aren't iterable, and + # 2. Generators are unwrapped exactly once (otherwise `nosetests + # --processes=n` has issues; see: + # https://github.com/wolever/nose-parameterized/pull/31) + if not isinstance(input_values, list): + input_values = list(input_values) + return [ param.from_decorator(p) for p in input_values ] + + @classmethod + def expand(cls, input, name_func=None, doc_func=None, **legacy): + """ A "brute force" method of parameterizing test cases. Creates new + test cases and injects them into the namespace that the wrapped + function is being defined in. Useful for parameterizing tests in + subclasses of 'UnitTest', where Nose test generators don't work. + + >>> @parameterized.expand([("foo", 1, 2)]) + ... def test_add1(name, input, expected): + ... actual = add1(input) + ... assert_equal(actual, expected) + ... + >>> locals() + ... 'test_add1_foo_0': ... + >>> + """ + + if "testcase_func_name" in legacy: + warnings.warn("testcase_func_name= is deprecated; use name_func=", + DeprecationWarning, stacklevel=2) + if not name_func: + name_func = legacy["testcase_func_name"] + + if "testcase_func_doc" in legacy: + warnings.warn("testcase_func_doc= is deprecated; use doc_func=", + DeprecationWarning, stacklevel=2) + if not doc_func: + doc_func = legacy["testcase_func_doc"] + + doc_func = doc_func or default_doc_func + name_func = name_func or default_name_func + + def parameterized_expand_wrapper(f, instance=None): + stack = inspect.stack() + frame = stack[1] + frame_locals = frame[0].f_locals + + parameters = cls.input_as_callable(input)() + for num, p in enumerate(parameters): + name = name_func(f, num, p) + frame_locals[name] = cls.param_as_standalone_func(p, f, name) + frame_locals[name].__doc__ = doc_func(f, num, p) + + f.__test__ = False + return parameterized_expand_wrapper + + @classmethod + def param_as_standalone_func(cls, p, func, name): + @wraps(func) + def standalone_func(*a): + return func(*(a + p.args), **p.kwargs) + standalone_func.__name__ = name + + # place_as is used by py.test to determine what source file should be + # used for this test. + standalone_func.place_as = func + + # Remove __wrapped__ because py.test will try to look at __wrapped__ + # to determine which parameters should be used with this test case, + # and obviously we don't need it to do any parameterization. + try: + del standalone_func.__wrapped__ + except AttributeError: + pass + return standalone_func + + @classmethod + def to_safe_name(cls, s): + return str(re.sub("[^a-zA-Z0-9_]+", "_", s)) diff --git a/project/venv/lib/python2.7/site-packages/numpy/testing/_private/parameterized.pyc b/project/venv/lib/python2.7/site-packages/numpy/testing/_private/parameterized.pyc new file mode 100644 index 0000000..db64233 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/testing/_private/parameterized.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/testing/_private/utils.py b/project/venv/lib/python2.7/site-packages/numpy/testing/_private/utils.py new file mode 100644 index 0000000..36f5c3c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/testing/_private/utils.py @@ -0,0 +1,2327 @@ +""" +Utility function to facilitate testing. + +""" +from __future__ import division, absolute_import, print_function + +import os +import sys +import re +import gc +import operator +import warnings +from functools import partial, wraps +import shutil +import contextlib +from tempfile import mkdtemp, mkstemp +from unittest.case import SkipTest +from warnings import WarningMessage +import pprint + +from numpy.core import( + float32, empty, arange, array_repr, ndarray, isnat, array) +from numpy.lib.utils import deprecate + +if sys.version_info[0] >= 3: + from io import StringIO +else: + from StringIO import StringIO + +__all__ = [ + 'assert_equal', 'assert_almost_equal', 'assert_approx_equal', + 'assert_array_equal', 'assert_array_less', 'assert_string_equal', + 'assert_array_almost_equal', 'assert_raises', 'build_err_msg', + 'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal', + 'raises', 'rand', 'rundocs', 'runstring', 'verbose', 'measure', + 'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex', + 'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings', + 'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings', + 'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY', + 'HAS_REFCOUNT', 'suppress_warnings', 'assert_array_compare', + '_assert_valid_refcount', '_gen_alignment_data', 'assert_no_gc_cycles', + ] + + +class KnownFailureException(Exception): + '''Raise this exception to mark a test as a known failing test.''' + pass + + +KnownFailureTest = KnownFailureException # backwards compat +verbose = 0 + +IS_PYPY = '__pypy__' in sys.modules +HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None + + +def import_nose(): + """ Import nose only when needed. + """ + nose_is_good = True + minimum_nose_version = (1, 0, 0) + try: + import nose + except ImportError: + nose_is_good = False + else: + if nose.__versioninfo__ < minimum_nose_version: + nose_is_good = False + + if not nose_is_good: + msg = ('Need nose >= %d.%d.%d for tests - see ' + 'https://nose.readthedocs.io' % + minimum_nose_version) + raise ImportError(msg) + + return nose + + +def assert_(val, msg=''): + """ + Assert that works in release mode. + Accepts callable msg to allow deferring evaluation until failure. + + The Python built-in ``assert`` does not work when executing code in + optimized mode (the ``-O`` flag) - no byte-code is generated for it. + + For documentation on usage, refer to the Python documentation. + + """ + __tracebackhide__ = True # Hide traceback for py.test + if not val: + try: + smsg = msg() + except TypeError: + smsg = msg + raise AssertionError(smsg) + + +def gisnan(x): + """like isnan, but always raise an error if type not supported instead of + returning a TypeError object. + + Notes + ----- + isnan and other ufunc sometimes return a NotImplementedType object instead + of raising any exception. This function is a wrapper to make sure an + exception is always raised. + + This should be removed once this problem is solved at the Ufunc level.""" + from numpy.core import isnan + st = isnan(x) + if isinstance(st, type(NotImplemented)): + raise TypeError("isnan not supported for this type") + return st + + +def gisfinite(x): + """like isfinite, but always raise an error if type not supported instead of + returning a TypeError object. + + Notes + ----- + isfinite and other ufunc sometimes return a NotImplementedType object instead + of raising any exception. This function is a wrapper to make sure an + exception is always raised. + + This should be removed once this problem is solved at the Ufunc level.""" + from numpy.core import isfinite, errstate + with errstate(invalid='ignore'): + st = isfinite(x) + if isinstance(st, type(NotImplemented)): + raise TypeError("isfinite not supported for this type") + return st + + +def gisinf(x): + """like isinf, but always raise an error if type not supported instead of + returning a TypeError object. + + Notes + ----- + isinf and other ufunc sometimes return a NotImplementedType object instead + of raising any exception. This function is a wrapper to make sure an + exception is always raised. + + This should be removed once this problem is solved at the Ufunc level.""" + from numpy.core import isinf, errstate + with errstate(invalid='ignore'): + st = isinf(x) + if isinstance(st, type(NotImplemented)): + raise TypeError("isinf not supported for this type") + return st + + +@deprecate(message="numpy.testing.rand is deprecated in numpy 1.11. " + "Use numpy.random.rand instead.") +def rand(*args): + """Returns an array of random numbers with the given shape. + + This only uses the standard library, so it is useful for testing purposes. + """ + import random + from numpy.core import zeros, float64 + results = zeros(args, float64) + f = results.flat + for i in range(len(f)): + f[i] = random.random() + return results + + +if os.name == 'nt': + # Code "stolen" from enthought/debug/memusage.py + def GetPerformanceAttributes(object, counter, instance=None, + inum=-1, format=None, machine=None): + # NOTE: Many counters require 2 samples to give accurate results, + # including "% Processor Time" (as by definition, at any instant, a + # thread's CPU usage is either 0 or 100). To read counters like this, + # you should copy this function, but keep the counter open, and call + # CollectQueryData() each time you need to know. + # See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp (dead link) + # My older explanation for this was that the "AddCounter" process forced + # the CPU to 100%, but the above makes more sense :) + import win32pdh + if format is None: + format = win32pdh.PDH_FMT_LONG + path = win32pdh.MakeCounterPath( (machine, object, instance, None, inum, counter)) + hq = win32pdh.OpenQuery() + try: + hc = win32pdh.AddCounter(hq, path) + try: + win32pdh.CollectQueryData(hq) + type, val = win32pdh.GetFormattedCounterValue(hc, format) + return val + finally: + win32pdh.RemoveCounter(hc) + finally: + win32pdh.CloseQuery(hq) + + def memusage(processName="python", instance=0): + # from win32pdhutil, part of the win32all package + import win32pdh + return GetPerformanceAttributes("Process", "Virtual Bytes", + processName, instance, + win32pdh.PDH_FMT_LONG, None) +elif sys.platform[:5] == 'linux': + + def memusage(_proc_pid_stat='/proc/%s/stat' % (os.getpid())): + """ + Return virtual memory size in bytes of the running python. + + """ + try: + f = open(_proc_pid_stat, 'r') + l = f.readline().split(' ') + f.close() + return int(l[22]) + except Exception: + return +else: + def memusage(): + """ + Return memory usage of running python. [Not implemented] + + """ + raise NotImplementedError + + +if sys.platform[:5] == 'linux': + def jiffies(_proc_pid_stat='/proc/%s/stat' % (os.getpid()), + _load_time=[]): + """ + Return number of jiffies elapsed. + + Return number of jiffies (1/100ths of a second) that this + process has been scheduled in user mode. See man 5 proc. + + """ + import time + if not _load_time: + _load_time.append(time.time()) + try: + f = open(_proc_pid_stat, 'r') + l = f.readline().split(' ') + f.close() + return int(l[13]) + except Exception: + return int(100*(time.time()-_load_time[0])) +else: + # os.getpid is not in all platforms available. + # Using time is safe but inaccurate, especially when process + # was suspended or sleeping. + def jiffies(_load_time=[]): + """ + Return number of jiffies elapsed. + + Return number of jiffies (1/100ths of a second) that this + process has been scheduled in user mode. See man 5 proc. + + """ + import time + if not _load_time: + _load_time.append(time.time()) + return int(100*(time.time()-_load_time[0])) + + +def build_err_msg(arrays, err_msg, header='Items are not equal:', + verbose=True, names=('ACTUAL', 'DESIRED'), precision=8): + msg = ['\n' + header] + if err_msg: + if err_msg.find('\n') == -1 and len(err_msg) < 79-len(header): + msg = [msg[0] + ' ' + err_msg] + else: + msg.append(err_msg) + if verbose: + for i, a in enumerate(arrays): + + if isinstance(a, ndarray): + # precision argument is only needed if the objects are ndarrays + r_func = partial(array_repr, precision=precision) + else: + r_func = repr + + try: + r = r_func(a) + except Exception as exc: + r = '[repr failed for <{}>: {}]'.format(type(a).__name__, exc) + if r.count('\n') > 3: + r = '\n'.join(r.splitlines()[:3]) + r += '...' + msg.append(' %s: %s' % (names[i], r)) + return '\n'.join(msg) + + +def assert_equal(actual, desired, err_msg='', verbose=True): + """ + Raises an AssertionError if two objects are not equal. + + Given two objects (scalars, lists, tuples, dictionaries or numpy arrays), + check that all elements of these objects are equal. An exception is raised + at the first conflicting values. + + Parameters + ---------- + actual : array_like + The object to check. + desired : array_like + The expected object. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal. + + Examples + -------- + >>> np.testing.assert_equal([4,5], [4,6]) + Traceback (most recent call last): + ... + AssertionError: + Items are not equal: + item=1 + ACTUAL: 5 + DESIRED: 6 + + """ + __tracebackhide__ = True # Hide traceback for py.test + if isinstance(desired, dict): + if not isinstance(actual, dict): + raise AssertionError(repr(type(actual))) + assert_equal(len(actual), len(desired), err_msg, verbose) + for k, i in desired.items(): + if k not in actual: + raise AssertionError(repr(k)) + assert_equal(actual[k], desired[k], 'key=%r\n%s' % (k, err_msg), verbose) + return + if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): + assert_equal(len(actual), len(desired), err_msg, verbose) + for k in range(len(desired)): + assert_equal(actual[k], desired[k], 'item=%r\n%s' % (k, err_msg), verbose) + return + from numpy.core import ndarray, isscalar, signbit + from numpy.lib import iscomplexobj, real, imag + if isinstance(actual, ndarray) or isinstance(desired, ndarray): + return assert_array_equal(actual, desired, err_msg, verbose) + msg = build_err_msg([actual, desired], err_msg, verbose=verbose) + + # Handle complex numbers: separate into real/imag to handle + # nan/inf/negative zero correctly + # XXX: catch ValueError for subclasses of ndarray where iscomplex fail + try: + usecomplex = iscomplexobj(actual) or iscomplexobj(desired) + except (ValueError, TypeError): + usecomplex = False + + if usecomplex: + if iscomplexobj(actual): + actualr = real(actual) + actuali = imag(actual) + else: + actualr = actual + actuali = 0 + if iscomplexobj(desired): + desiredr = real(desired) + desiredi = imag(desired) + else: + desiredr = desired + desiredi = 0 + try: + assert_equal(actualr, desiredr) + assert_equal(actuali, desiredi) + except AssertionError: + raise AssertionError(msg) + + # isscalar test to check cases such as [np.nan] != np.nan + if isscalar(desired) != isscalar(actual): + raise AssertionError(msg) + + # Inf/nan/negative zero handling + try: + isdesnan = gisnan(desired) + isactnan = gisnan(actual) + if isdesnan and isactnan: + return # both nan, so equal + + # handle signed zero specially for floats + if desired == 0 and actual == 0: + if not signbit(desired) == signbit(actual): + raise AssertionError(msg) + + except (TypeError, ValueError, NotImplementedError): + pass + + try: + isdesnat = isnat(desired) + isactnat = isnat(actual) + dtypes_match = array(desired).dtype.type == array(actual).dtype.type + if isdesnat and isactnat: + # If both are NaT (and have the same dtype -- datetime or + # timedelta) they are considered equal. + if dtypes_match: + return + else: + raise AssertionError(msg) + + except (TypeError, ValueError, NotImplementedError): + pass + + try: + # Explicitly use __eq__ for comparison, gh-2552 + if not (desired == actual): + raise AssertionError(msg) + + except (DeprecationWarning, FutureWarning) as e: + # this handles the case when the two types are not even comparable + if 'elementwise == comparison' in e.args[0]: + raise AssertionError(msg) + else: + raise + + +def print_assert_equal(test_string, actual, desired): + """ + Test if two objects are equal, and print an error message if test fails. + + The test is performed with ``actual == desired``. + + Parameters + ---------- + test_string : str + The message supplied to AssertionError. + actual : object + The object to test for equality against `desired`. + desired : object + The expected result. + + Examples + -------- + >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1]) + >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 2]) + Traceback (most recent call last): + ... + AssertionError: Test XYZ of func xyz failed + ACTUAL: + [0, 1] + DESIRED: + [0, 2] + + """ + __tracebackhide__ = True # Hide traceback for py.test + import pprint + + if not (actual == desired): + msg = StringIO() + msg.write(test_string) + msg.write(' failed\nACTUAL: \n') + pprint.pprint(actual, msg) + msg.write('DESIRED: \n') + pprint.pprint(desired, msg) + raise AssertionError(msg.getvalue()) + + +def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True): + """ + Raises an AssertionError if two items are not equal up to desired + precision. + + .. note:: It is recommended to use one of `assert_allclose`, + `assert_array_almost_equal_nulp` or `assert_array_max_ulp` + instead of this function for more consistent floating point + comparisons. + + The test verifies that the elements of ``actual`` and ``desired`` satisfy. + + ``abs(desired-actual) < 1.5 * 10**(-decimal)`` + + That is a looser test than originally documented, but agrees with what the + actual implementation in `assert_array_almost_equal` did up to rounding + vagaries. An exception is raised at conflicting values. For ndarrays this + delegates to assert_array_almost_equal + + Parameters + ---------- + actual : array_like + The object to check. + desired : array_like + The expected object. + decimal : int, optional + Desired precision, default is 7. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal up to specified precision. + + See Also + -------- + assert_allclose: Compare two array_like objects for equality with desired + relative and/or absolute precision. + assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal + + Examples + -------- + >>> import numpy.testing as npt + >>> npt.assert_almost_equal(2.3333333333333, 2.33333334) + >>> npt.assert_almost_equal(2.3333333333333, 2.33333334, decimal=10) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not almost equal to 10 decimals + ACTUAL: 2.3333333333333 + DESIRED: 2.33333334 + + >>> npt.assert_almost_equal(np.array([1.0,2.3333333333333]), + ... np.array([1.0,2.33333334]), decimal=9) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not almost equal to 9 decimals + Mismatch: 50% + Max absolute difference: 6.66669964e-09 + Max relative difference: 2.85715698e-09 + x: array([1. , 2.333333333]) + y: array([1. , 2.33333334]) + + """ + __tracebackhide__ = True # Hide traceback for py.test + from numpy.core import ndarray + from numpy.lib import iscomplexobj, real, imag + + # Handle complex numbers: separate into real/imag to handle + # nan/inf/negative zero correctly + # XXX: catch ValueError for subclasses of ndarray where iscomplex fail + try: + usecomplex = iscomplexobj(actual) or iscomplexobj(desired) + except ValueError: + usecomplex = False + + def _build_err_msg(): + header = ('Arrays are not almost equal to %d decimals' % decimal) + return build_err_msg([actual, desired], err_msg, verbose=verbose, + header=header) + + if usecomplex: + if iscomplexobj(actual): + actualr = real(actual) + actuali = imag(actual) + else: + actualr = actual + actuali = 0 + if iscomplexobj(desired): + desiredr = real(desired) + desiredi = imag(desired) + else: + desiredr = desired + desiredi = 0 + try: + assert_almost_equal(actualr, desiredr, decimal=decimal) + assert_almost_equal(actuali, desiredi, decimal=decimal) + except AssertionError: + raise AssertionError(_build_err_msg()) + + if isinstance(actual, (ndarray, tuple, list)) \ + or isinstance(desired, (ndarray, tuple, list)): + return assert_array_almost_equal(actual, desired, decimal, err_msg) + try: + # If one of desired/actual is not finite, handle it specially here: + # check that both are nan if any is a nan, and test for equality + # otherwise + if not (gisfinite(desired) and gisfinite(actual)): + if gisnan(desired) or gisnan(actual): + if not (gisnan(desired) and gisnan(actual)): + raise AssertionError(_build_err_msg()) + else: + if not desired == actual: + raise AssertionError(_build_err_msg()) + return + except (NotImplementedError, TypeError): + pass + if abs(desired - actual) >= 1.5 * 10.0**(-decimal): + raise AssertionError(_build_err_msg()) + + +def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True): + """ + Raises an AssertionError if two items are not equal up to significant + digits. + + .. note:: It is recommended to use one of `assert_allclose`, + `assert_array_almost_equal_nulp` or `assert_array_max_ulp` + instead of this function for more consistent floating point + comparisons. + + Given two numbers, check that they are approximately equal. + Approximately equal is defined as the number of significant digits + that agree. + + Parameters + ---------- + actual : scalar + The object to check. + desired : scalar + The expected object. + significant : int, optional + Desired precision, default is 7. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal up to specified precision. + + See Also + -------- + assert_allclose: Compare two array_like objects for equality with desired + relative and/or absolute precision. + assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal + + Examples + -------- + >>> np.testing.assert_approx_equal(0.12345677777777e-20, 0.1234567e-20) + >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345671e-20, + ... significant=8) + >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345672e-20, + ... significant=8) + Traceback (most recent call last): + ... + AssertionError: + Items are not equal to 8 significant digits: + ACTUAL: 1.234567e-21 + DESIRED: 1.2345672e-21 + + the evaluated condition that raises the exception is + + >>> abs(0.12345670e-20/1e-21 - 0.12345672e-20/1e-21) >= 10**-(8-1) + True + + """ + __tracebackhide__ = True # Hide traceback for py.test + import numpy as np + + (actual, desired) = map(float, (actual, desired)) + if desired == actual: + return + # Normalized the numbers to be in range (-10.0,10.0) + # scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual)))))) + with np.errstate(invalid='ignore'): + scale = 0.5*(np.abs(desired) + np.abs(actual)) + scale = np.power(10, np.floor(np.log10(scale))) + try: + sc_desired = desired/scale + except ZeroDivisionError: + sc_desired = 0.0 + try: + sc_actual = actual/scale + except ZeroDivisionError: + sc_actual = 0.0 + msg = build_err_msg( + [actual, desired], err_msg, + header='Items are not equal to %d significant digits:' % significant, + verbose=verbose) + try: + # If one of desired/actual is not finite, handle it specially here: + # check that both are nan if any is a nan, and test for equality + # otherwise + if not (gisfinite(desired) and gisfinite(actual)): + if gisnan(desired) or gisnan(actual): + if not (gisnan(desired) and gisnan(actual)): + raise AssertionError(msg) + else: + if not desired == actual: + raise AssertionError(msg) + return + except (TypeError, NotImplementedError): + pass + if np.abs(sc_desired - sc_actual) >= np.power(10., -(significant-1)): + raise AssertionError(msg) + + +def assert_array_compare(comparison, x, y, err_msg='', verbose=True, + header='', precision=6, equal_nan=True, + equal_inf=True): + __tracebackhide__ = True # Hide traceback for py.test + from numpy.core import array, array2string, isnan, inf, bool_, errstate + + x = array(x, copy=False, subok=True) + y = array(y, copy=False, subok=True) + + # original array for output formating + ox, oy = x, y + + def isnumber(x): + return x.dtype.char in '?bhilqpBHILQPefdgFDG' + + def istime(x): + return x.dtype.char in "Mm" + + def func_assert_same_pos(x, y, func=isnan, hasval='nan'): + """Handling nan/inf. + + Combine results of running func on x and y, checking that they are True + at the same locations. + + """ + x_id = func(x) + y_id = func(y) + # We include work-arounds here to handle three types of slightly + # pathological ndarray subclasses: + # (1) all() on `masked` array scalars can return masked arrays, so we + # use != True + # (2) __eq__ on some ndarray subclasses returns Python booleans + # instead of element-wise comparisons, so we cast to bool_() and + # use isinstance(..., bool) checks + # (3) subclasses with bare-bones __array_function__ implemenations may + # not implement np.all(), so favor using the .all() method + # We are not committed to supporting such subclasses, but it's nice to + # support them if possible. + if bool_(x_id == y_id).all() != True: + msg = build_err_msg([x, y], + err_msg + '\nx and y %s location mismatch:' + % (hasval), verbose=verbose, header=header, + names=('x', 'y'), precision=precision) + raise AssertionError(msg) + # If there is a scalar, then here we know the array has the same + # flag as it everywhere, so we should return the scalar flag. + if isinstance(x_id, bool) or x_id.ndim == 0: + return bool_(x_id) + elif isinstance(x_id, bool) or y_id.ndim == 0: + return bool_(y_id) + else: + return y_id + + try: + cond = (x.shape == () or y.shape == ()) or x.shape == y.shape + if not cond: + msg = build_err_msg([x, y], + err_msg + + '\n(shapes %s, %s mismatch)' % (x.shape, + y.shape), + verbose=verbose, header=header, + names=('x', 'y'), precision=precision) + raise AssertionError(msg) + + flagged = bool_(False) + if isnumber(x) and isnumber(y): + if equal_nan: + flagged = func_assert_same_pos(x, y, func=isnan, hasval='nan') + + if equal_inf: + flagged |= func_assert_same_pos(x, y, + func=lambda xy: xy == +inf, + hasval='+inf') + flagged |= func_assert_same_pos(x, y, + func=lambda xy: xy == -inf, + hasval='-inf') + + elif istime(x) and istime(y): + # If one is datetime64 and the other timedelta64 there is no point + if equal_nan and x.dtype.type == y.dtype.type: + flagged = func_assert_same_pos(x, y, func=isnat, hasval="NaT") + + if flagged.ndim > 0: + x, y = x[~flagged], y[~flagged] + # Only do the comparison if actual values are left + if x.size == 0: + return + elif flagged: + # no sense doing comparison if everything is flagged. + return + + val = comparison(x, y) + + if isinstance(val, bool): + cond = val + reduced = [0] + else: + reduced = val.ravel() + cond = reduced.all() + reduced = reduced.tolist() + + # The below comparison is a hack to ensure that fully masked + # results, for which val.ravel().all() returns np.ma.masked, + # do not trigger a failure (np.ma.masked != True evaluates as + # np.ma.masked, which is falsy). + if cond != True: + mismatch = 100.0 * reduced.count(0) / ox.size + remarks = ['Mismatch: {:.3g}%'.format(mismatch)] + + with errstate(invalid='ignore', divide='ignore'): + # ignore errors for non-numeric types + try: + error = abs(x - y) + max_abs_error = error.max() + remarks.append('Max absolute difference: ' + + array2string(max_abs_error)) + + # note: this definition of relative error matches that one + # used by assert_allclose (found in np.isclose) + max_rel_error = (error / abs(y)).max() + remarks.append('Max relative difference: ' + + array2string(max_rel_error)) + except TypeError: + pass + + err_msg += '\n' + '\n'.join(remarks) + msg = build_err_msg([ox, oy], err_msg, + verbose=verbose, header=header, + names=('x', 'y'), precision=precision) + raise AssertionError(msg) + except ValueError: + import traceback + efmt = traceback.format_exc() + header = 'error during assertion:\n\n%s\n\n%s' % (efmt, header) + + msg = build_err_msg([x, y], err_msg, verbose=verbose, header=header, + names=('x', 'y'), precision=precision) + raise ValueError(msg) + + +def assert_array_equal(x, y, err_msg='', verbose=True): + """ + Raises an AssertionError if two array_like objects are not equal. + + Given two array_like objects, check that the shape is equal and all + elements of these objects are equal. An exception is raised at + shape mismatch or conflicting values. In contrast to the standard usage + in numpy, NaNs are compared like numbers, no assertion is raised if + both objects have NaNs in the same positions. + + The usual caution for verifying equality with floating point numbers is + advised. + + Parameters + ---------- + x : array_like + The actual object to check. + y : array_like + The desired, expected object. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired objects are not equal. + + See Also + -------- + assert_allclose: Compare two array_like objects for equality with desired + relative and/or absolute precision. + assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal + + Examples + -------- + The first assert does not raise an exception: + + >>> np.testing.assert_array_equal([1.0,2.33333,np.nan], + ... [np.exp(0),2.33333, np.nan]) + + Assert fails with numerical inprecision with floats: + + >>> np.testing.assert_array_equal([1.0,np.pi,np.nan], + ... [1, np.sqrt(np.pi)**2, np.nan]) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not equal + Mismatch: 33.3% + Max absolute difference: 4.4408921e-16 + Max relative difference: 1.41357986e-16 + x: array([1. , 3.141593, nan]) + y: array([1. , 3.141593, nan]) + + Use `assert_allclose` or one of the nulp (number of floating point values) + functions for these cases instead: + + >>> np.testing.assert_allclose([1.0,np.pi,np.nan], + ... [1, np.sqrt(np.pi)**2, np.nan], + ... rtol=1e-10, atol=0) + + """ + __tracebackhide__ = True # Hide traceback for py.test + assert_array_compare(operator.__eq__, x, y, err_msg=err_msg, + verbose=verbose, header='Arrays are not equal') + + +def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True): + """ + Raises an AssertionError if two objects are not equal up to desired + precision. + + .. note:: It is recommended to use one of `assert_allclose`, + `assert_array_almost_equal_nulp` or `assert_array_max_ulp` + instead of this function for more consistent floating point + comparisons. + + The test verifies identical shapes and that the elements of ``actual`` and + ``desired`` satisfy. + + ``abs(desired-actual) < 1.5 * 10**(-decimal)`` + + That is a looser test than originally documented, but agrees with what the + actual implementation did up to rounding vagaries. An exception is raised + at shape mismatch or conflicting values. In contrast to the standard usage + in numpy, NaNs are compared like numbers, no assertion is raised if both + objects have NaNs in the same positions. + + Parameters + ---------- + x : array_like + The actual object to check. + y : array_like + The desired, expected object. + decimal : int, optional + Desired precision, default is 6. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal up to specified precision. + + See Also + -------- + assert_allclose: Compare two array_like objects for equality with desired + relative and/or absolute precision. + assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal + + Examples + -------- + the first assert does not raise an exception + + >>> np.testing.assert_array_almost_equal([1.0,2.333,np.nan], + ... [1.0,2.333,np.nan]) + + >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan], + ... [1.0,2.33339,np.nan], decimal=5) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not almost equal to 5 decimals + Mismatch: 33.3% + Max absolute difference: 6.e-05 + Max relative difference: 2.57136612e-05 + x: array([1. , 2.33333, nan]) + y: array([1. , 2.33339, nan]) + + >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan], + ... [1.0,2.33333, 5], decimal=5) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not almost equal to 5 decimals + x and y nan location mismatch: + x: array([1. , 2.33333, nan]) + y: array([1. , 2.33333, 5. ]) + + """ + __tracebackhide__ = True # Hide traceback for py.test + from numpy.core import number, float_, result_type, array + from numpy.core.numerictypes import issubdtype + from numpy.core.fromnumeric import any as npany + + def compare(x, y): + try: + if npany(gisinf(x)) or npany( gisinf(y)): + xinfid = gisinf(x) + yinfid = gisinf(y) + if not (xinfid == yinfid).all(): + return False + # if one item, x and y is +- inf + if x.size == y.size == 1: + return x == y + x = x[~xinfid] + y = y[~yinfid] + except (TypeError, NotImplementedError): + pass + + # make sure y is an inexact type to avoid abs(MIN_INT); will cause + # casting of x later. + dtype = result_type(y, 1.) + y = array(y, dtype=dtype, copy=False, subok=True) + z = abs(x - y) + + if not issubdtype(z.dtype, number): + z = z.astype(float_) # handle object arrays + + return z < 1.5 * 10.0**(-decimal) + + assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, + header=('Arrays are not almost equal to %d decimals' % decimal), + precision=decimal) + + +def assert_array_less(x, y, err_msg='', verbose=True): + """ + Raises an AssertionError if two array_like objects are not ordered by less + than. + + Given two array_like objects, check that the shape is equal and all + elements of the first object are strictly smaller than those of the + second object. An exception is raised at shape mismatch or incorrectly + ordered values. Shape mismatch does not raise if an object has zero + dimension. In contrast to the standard usage in numpy, NaNs are + compared, no assertion is raised if both objects have NaNs in the same + positions. + + + + Parameters + ---------- + x : array_like + The smaller object to check. + y : array_like + The larger object to compare. + err_msg : string + The error message to be printed in case of failure. + verbose : bool + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired objects are not equal. + + See Also + -------- + assert_array_equal: tests objects for equality + assert_array_almost_equal: test objects for equality up to precision + + + + Examples + -------- + >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1.1, 2.0, np.nan]) + >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1, 2.0, np.nan]) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not less-ordered + Mismatch: 33.3% + Max absolute difference: 1. + Max relative difference: 0.5 + x: array([ 1., 1., nan]) + y: array([ 1., 2., nan]) + + >>> np.testing.assert_array_less([1.0, 4.0], 3) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not less-ordered + Mismatch: 50% + Max absolute difference: 2. + Max relative difference: 0.66666667 + x: array([1., 4.]) + y: array(3) + + >>> np.testing.assert_array_less([1.0, 2.0, 3.0], [4]) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not less-ordered + (shapes (3,), (1,) mismatch) + x: array([1., 2., 3.]) + y: array([4]) + + """ + __tracebackhide__ = True # Hide traceback for py.test + assert_array_compare(operator.__lt__, x, y, err_msg=err_msg, + verbose=verbose, + header='Arrays are not less-ordered', + equal_inf=False) + + +def runstring(astr, dict): + exec(astr, dict) + + +def assert_string_equal(actual, desired): + """ + Test if two strings are equal. + + If the given strings are equal, `assert_string_equal` does nothing. + If they are not equal, an AssertionError is raised, and the diff + between the strings is shown. + + Parameters + ---------- + actual : str + The string to test for equality against the expected string. + desired : str + The expected string. + + Examples + -------- + >>> np.testing.assert_string_equal('abc', 'abc') + >>> np.testing.assert_string_equal('abc', 'abcd') + Traceback (most recent call last): + File "", line 1, in + ... + AssertionError: Differences in strings: + - abc+ abcd? + + + """ + # delay import of difflib to reduce startup time + __tracebackhide__ = True # Hide traceback for py.test + import difflib + + if not isinstance(actual, str): + raise AssertionError(repr(type(actual))) + if not isinstance(desired, str): + raise AssertionError(repr(type(desired))) + if desired == actual: + return + + diff = list(difflib.Differ().compare(actual.splitlines(1), desired.splitlines(1))) + diff_list = [] + while diff: + d1 = diff.pop(0) + if d1.startswith(' '): + continue + if d1.startswith('- '): + l = [d1] + d2 = diff.pop(0) + if d2.startswith('? '): + l.append(d2) + d2 = diff.pop(0) + if not d2.startswith('+ '): + raise AssertionError(repr(d2)) + l.append(d2) + if diff: + d3 = diff.pop(0) + if d3.startswith('? '): + l.append(d3) + else: + diff.insert(0, d3) + if d2[2:] == d1[2:]: + continue + diff_list.extend(l) + continue + raise AssertionError(repr(d1)) + if not diff_list: + return + msg = 'Differences in strings:\n%s' % (''.join(diff_list)).rstrip() + if actual != desired: + raise AssertionError(msg) + + +def rundocs(filename=None, raise_on_error=True): + """ + Run doctests found in the given file. + + By default `rundocs` raises an AssertionError on failure. + + Parameters + ---------- + filename : str + The path to the file for which the doctests are run. + raise_on_error : bool + Whether to raise an AssertionError when a doctest fails. Default is + True. + + Notes + ----- + The doctests can be run by the user/developer by adding the ``doctests`` + argument to the ``test()`` call. For example, to run all tests (including + doctests) for `numpy.lib`: + + >>> np.lib.test(doctests=True) # doctest: +SKIP + """ + from numpy.compat import npy_load_module + import doctest + if filename is None: + f = sys._getframe(1) + filename = f.f_globals['__file__'] + name = os.path.splitext(os.path.basename(filename))[0] + m = npy_load_module(name, filename) + + tests = doctest.DocTestFinder().find(m) + runner = doctest.DocTestRunner(verbose=False) + + msg = [] + if raise_on_error: + out = lambda s: msg.append(s) + else: + out = None + + for test in tests: + runner.run(test, out=out) + + if runner.failures > 0 and raise_on_error: + raise AssertionError("Some doctests failed:\n%s" % "\n".join(msg)) + + +def raises(*args): + """Decorator to check for raised exceptions. + + The decorated test function must raise one of the passed exceptions to + pass. If you want to test many assertions about exceptions in a single + test, you may want to use `assert_raises` instead. + + .. warning:: + This decorator is nose specific, do not use it if you are using a + different test framework. + + Parameters + ---------- + args : exceptions + The test passes if any of the passed exceptions is raised. + + Raises + ------ + AssertionError + + Examples + -------- + + Usage:: + + @raises(TypeError, ValueError) + def test_raises_type_error(): + raise TypeError("This test passes") + + @raises(Exception) + def test_that_fails_by_passing(): + pass + + """ + nose = import_nose() + return nose.tools.raises(*args) + +# +# assert_raises and assert_raises_regex are taken from unittest. +# +import unittest + + +class _Dummy(unittest.TestCase): + def nop(self): + pass + +_d = _Dummy('nop') + +def assert_raises(*args, **kwargs): + """ + assert_raises(exception_class, callable, *args, **kwargs) + assert_raises(exception_class) + + Fail unless an exception of class exception_class is thrown + by callable when invoked with arguments args and keyword + arguments kwargs. If a different type of exception is + thrown, it will not be caught, and the test case will be + deemed to have suffered an error, exactly as for an + unexpected exception. + + Alternatively, `assert_raises` can be used as a context manager: + + >>> from numpy.testing import assert_raises + >>> with assert_raises(ZeroDivisionError): + ... 1 / 0 + + is equivalent to + + >>> def div(x, y): + ... return x / y + >>> assert_raises(ZeroDivisionError, div, 1, 0) + + """ + __tracebackhide__ = True # Hide traceback for py.test + return _d.assertRaises(*args,**kwargs) + + +def assert_raises_regex(exception_class, expected_regexp, *args, **kwargs): + """ + assert_raises_regex(exception_class, expected_regexp, callable, *args, + **kwargs) + assert_raises_regex(exception_class, expected_regexp) + + Fail unless an exception of class exception_class and with message that + matches expected_regexp is thrown by callable when invoked with arguments + args and keyword arguments kwargs. + + Alternatively, can be used as a context manager like `assert_raises`. + + Name of this function adheres to Python 3.2+ reference, but should work in + all versions down to 2.6. + + Notes + ----- + .. versionadded:: 1.9.0 + + """ + __tracebackhide__ = True # Hide traceback for py.test + + if sys.version_info.major >= 3: + funcname = _d.assertRaisesRegex + else: + # Only present in Python 2.7, missing from unittest in 2.6 + funcname = _d.assertRaisesRegexp + + return funcname(exception_class, expected_regexp, *args, **kwargs) + + +def decorate_methods(cls, decorator, testmatch=None): + """ + Apply a decorator to all methods in a class matching a regular expression. + + The given decorator is applied to all public methods of `cls` that are + matched by the regular expression `testmatch` + (``testmatch.search(methodname)``). Methods that are private, i.e. start + with an underscore, are ignored. + + Parameters + ---------- + cls : class + Class whose methods to decorate. + decorator : function + Decorator to apply to methods + testmatch : compiled regexp or str, optional + The regular expression. Default value is None, in which case the + nose default (``re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)``) + is used. + If `testmatch` is a string, it is compiled to a regular expression + first. + + """ + if testmatch is None: + testmatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep) + else: + testmatch = re.compile(testmatch) + cls_attr = cls.__dict__ + + # delayed import to reduce startup time + from inspect import isfunction + + methods = [_m for _m in cls_attr.values() if isfunction(_m)] + for function in methods: + try: + if hasattr(function, 'compat_func_name'): + funcname = function.compat_func_name + else: + funcname = function.__name__ + except AttributeError: + # not a function + continue + if testmatch.search(funcname) and not funcname.startswith('_'): + setattr(cls, funcname, decorator(function)) + return + + +def measure(code_str, times=1, label=None): + """ + Return elapsed time for executing code in the namespace of the caller. + + The supplied code string is compiled with the Python builtin ``compile``. + The precision of the timing is 10 milli-seconds. If the code will execute + fast on this timescale, it can be executed many times to get reasonable + timing accuracy. + + Parameters + ---------- + code_str : str + The code to be timed. + times : int, optional + The number of times the code is executed. Default is 1. The code is + only compiled once. + label : str, optional + A label to identify `code_str` with. This is passed into ``compile`` + as the second argument (for run-time error messages). + + Returns + ------- + elapsed : float + Total elapsed time in seconds for executing `code_str` `times` times. + + Examples + -------- + >>> times = 10 + >>> etime = np.testing.measure('for i in range(1000): np.sqrt(i**2)', times=times) + >>> print("Time for a single execution : ", etime / times, "s") # doctest: +SKIP + Time for a single execution : 0.005 s + + """ + frame = sys._getframe(1) + locs, globs = frame.f_locals, frame.f_globals + + code = compile(code_str, + 'Test name: %s ' % label, + 'exec') + i = 0 + elapsed = jiffies() + while i < times: + i += 1 + exec(code, globs, locs) + elapsed = jiffies() - elapsed + return 0.01*elapsed + + +def _assert_valid_refcount(op): + """ + Check that ufuncs don't mishandle refcount of object `1`. + Used in a few regression tests. + """ + if not HAS_REFCOUNT: + return True + import numpy as np, gc + + b = np.arange(100*100).reshape(100, 100) + c = b + i = 1 + + gc.disable() + try: + rc = sys.getrefcount(i) + for j in range(15): + d = op(b, c) + assert_(sys.getrefcount(i) >= rc) + finally: + gc.enable() + del d # for pyflakes + + +def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True, + err_msg='', verbose=True): + """ + Raises an AssertionError if two objects are not equal up to desired + tolerance. + + The test is equivalent to ``allclose(actual, desired, rtol, atol)``. + It compares the difference between `actual` and `desired` to + ``atol + rtol * abs(desired)``. + + .. versionadded:: 1.5.0 + + Parameters + ---------- + actual : array_like + Array obtained. + desired : array_like + Array desired. + rtol : float, optional + Relative tolerance. + atol : float, optional + Absolute tolerance. + equal_nan : bool, optional. + If True, NaNs will compare equal. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal up to specified precision. + + See Also + -------- + assert_array_almost_equal_nulp, assert_array_max_ulp + + Examples + -------- + >>> x = [1e-5, 1e-3, 1e-1] + >>> y = np.arccos(np.cos(x)) + >>> np.testing.assert_allclose(x, y, rtol=1e-5, atol=0) + + """ + __tracebackhide__ = True # Hide traceback for py.test + import numpy as np + + def compare(x, y): + return np.core.numeric.isclose(x, y, rtol=rtol, atol=atol, + equal_nan=equal_nan) + + actual, desired = np.asanyarray(actual), np.asanyarray(desired) + header = 'Not equal to tolerance rtol=%g, atol=%g' % (rtol, atol) + assert_array_compare(compare, actual, desired, err_msg=str(err_msg), + verbose=verbose, header=header, equal_nan=equal_nan) + + +def assert_array_almost_equal_nulp(x, y, nulp=1): + """ + Compare two arrays relatively to their spacing. + + This is a relatively robust method to compare two arrays whose amplitude + is variable. + + Parameters + ---------- + x, y : array_like + Input arrays. + nulp : int, optional + The maximum number of unit in the last place for tolerance (see Notes). + Default is 1. + + Returns + ------- + None + + Raises + ------ + AssertionError + If the spacing between `x` and `y` for one or more elements is larger + than `nulp`. + + See Also + -------- + assert_array_max_ulp : Check that all items of arrays differ in at most + N Units in the Last Place. + spacing : Return the distance between x and the nearest adjacent number. + + Notes + ----- + An assertion is raised if the following condition is not met:: + + abs(x - y) <= nulps * spacing(maximum(abs(x), abs(y))) + + Examples + -------- + >>> x = np.array([1., 1e-10, 1e-20]) + >>> eps = np.finfo(x.dtype).eps + >>> np.testing.assert_array_almost_equal_nulp(x, x*eps/2 + x) + + >>> np.testing.assert_array_almost_equal_nulp(x, x*eps + x) + Traceback (most recent call last): + ... + AssertionError: X and Y are not equal to 1 ULP (max is 2) + + """ + __tracebackhide__ = True # Hide traceback for py.test + import numpy as np + ax = np.abs(x) + ay = np.abs(y) + ref = nulp * np.spacing(np.where(ax > ay, ax, ay)) + if not np.all(np.abs(x-y) <= ref): + if np.iscomplexobj(x) or np.iscomplexobj(y): + msg = "X and Y are not equal to %d ULP" % nulp + else: + max_nulp = np.max(nulp_diff(x, y)) + msg = "X and Y are not equal to %d ULP (max is %g)" % (nulp, max_nulp) + raise AssertionError(msg) + + +def assert_array_max_ulp(a, b, maxulp=1, dtype=None): + """ + Check that all items of arrays differ in at most N Units in the Last Place. + + Parameters + ---------- + a, b : array_like + Input arrays to be compared. + maxulp : int, optional + The maximum number of units in the last place that elements of `a` and + `b` can differ. Default is 1. + dtype : dtype, optional + Data-type to convert `a` and `b` to if given. Default is None. + + Returns + ------- + ret : ndarray + Array containing number of representable floating point numbers between + items in `a` and `b`. + + Raises + ------ + AssertionError + If one or more elements differ by more than `maxulp`. + + See Also + -------- + assert_array_almost_equal_nulp : Compare two arrays relatively to their + spacing. + + Examples + -------- + >>> a = np.linspace(0., 1., 100) + >>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a))) + + """ + __tracebackhide__ = True # Hide traceback for py.test + import numpy as np + ret = nulp_diff(a, b, dtype) + if not np.all(ret <= maxulp): + raise AssertionError("Arrays are not almost equal up to %g ULP" % + maxulp) + return ret + + +def nulp_diff(x, y, dtype=None): + """For each item in x and y, return the number of representable floating + points between them. + + Parameters + ---------- + x : array_like + first input array + y : array_like + second input array + dtype : dtype, optional + Data-type to convert `x` and `y` to if given. Default is None. + + Returns + ------- + nulp : array_like + number of representable floating point numbers between each item in x + and y. + + Examples + -------- + # By definition, epsilon is the smallest number such as 1 + eps != 1, so + # there should be exactly one ULP between 1 and 1 + eps + >>> nulp_diff(1, 1 + np.finfo(x.dtype).eps) + 1.0 + """ + import numpy as np + if dtype: + x = np.array(x, dtype=dtype) + y = np.array(y, dtype=dtype) + else: + x = np.array(x) + y = np.array(y) + + t = np.common_type(x, y) + if np.iscomplexobj(x) or np.iscomplexobj(y): + raise NotImplementedError("_nulp not implemented for complex array") + + x = np.array(x, dtype=t) + y = np.array(y, dtype=t) + + if not x.shape == y.shape: + raise ValueError("x and y do not have the same shape: %s - %s" % + (x.shape, y.shape)) + + def _diff(rx, ry, vdt): + diff = np.array(rx-ry, dtype=vdt) + return np.abs(diff) + + rx = integer_repr(x) + ry = integer_repr(y) + return _diff(rx, ry, t) + + +def _integer_repr(x, vdt, comp): + # Reinterpret binary representation of the float as sign-magnitude: + # take into account two-complement representation + # See also + # https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/ + rx = x.view(vdt) + if not (rx.size == 1): + rx[rx < 0] = comp - rx[rx < 0] + else: + if rx < 0: + rx = comp - rx + + return rx + + +def integer_repr(x): + """Return the signed-magnitude interpretation of the binary representation of + x.""" + import numpy as np + if x.dtype == np.float16: + return _integer_repr(x, np.int16, np.int16(-2**15)) + elif x.dtype == np.float32: + return _integer_repr(x, np.int32, np.int32(-2**31)) + elif x.dtype == np.float64: + return _integer_repr(x, np.int64, np.int64(-2**63)) + else: + raise ValueError("Unsupported dtype %s" % x.dtype) + + +@contextlib.contextmanager +def _assert_warns_context(warning_class, name=None): + __tracebackhide__ = True # Hide traceback for py.test + with suppress_warnings() as sup: + l = sup.record(warning_class) + yield + if not len(l) > 0: + name_str = " when calling %s" % name if name is not None else "" + raise AssertionError("No warning raised" + name_str) + + +def assert_warns(warning_class, *args, **kwargs): + """ + Fail unless the given callable throws the specified warning. + + A warning of class warning_class should be thrown by the callable when + invoked with arguments args and keyword arguments kwargs. + If a different type of warning is thrown, it will not be caught. + + If called with all arguments other than the warning class omitted, may be + used as a context manager: + + with assert_warns(SomeWarning): + do_something() + + The ability to be used as a context manager is new in NumPy v1.11.0. + + .. versionadded:: 1.4.0 + + Parameters + ---------- + warning_class : class + The class defining the warning that `func` is expected to throw. + func : callable + The callable to test. + \\*args : Arguments + Arguments passed to `func`. + \\*\\*kwargs : Kwargs + Keyword arguments passed to `func`. + + Returns + ------- + The value returned by `func`. + + """ + if not args: + return _assert_warns_context(warning_class) + + func = args[0] + args = args[1:] + with _assert_warns_context(warning_class, name=func.__name__): + return func(*args, **kwargs) + + +@contextlib.contextmanager +def _assert_no_warnings_context(name=None): + __tracebackhide__ = True # Hide traceback for py.test + with warnings.catch_warnings(record=True) as l: + warnings.simplefilter('always') + yield + if len(l) > 0: + name_str = " when calling %s" % name if name is not None else "" + raise AssertionError("Got warnings%s: %s" % (name_str, l)) + + +def assert_no_warnings(*args, **kwargs): + """ + Fail if the given callable produces any warnings. + + If called with all arguments omitted, may be used as a context manager: + + with assert_no_warnings(): + do_something() + + The ability to be used as a context manager is new in NumPy v1.11.0. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + func : callable + The callable to test. + \\*args : Arguments + Arguments passed to `func`. + \\*\\*kwargs : Kwargs + Keyword arguments passed to `func`. + + Returns + ------- + The value returned by `func`. + + """ + if not args: + return _assert_no_warnings_context() + + func = args[0] + args = args[1:] + with _assert_no_warnings_context(name=func.__name__): + return func(*args, **kwargs) + + +def _gen_alignment_data(dtype=float32, type='binary', max_size=24): + """ + generator producing data with different alignment and offsets + to test simd vectorization + + Parameters + ---------- + dtype : dtype + data type to produce + type : string + 'unary': create data for unary operations, creates one input + and output array + 'binary': create data for unary operations, creates two input + and output array + max_size : integer + maximum size of data to produce + + Returns + ------- + if type is 'unary' yields one output, one input array and a message + containing information on the data + if type is 'binary' yields one output array, two input array and a message + containing information on the data + + """ + ufmt = 'unary offset=(%d, %d), size=%d, dtype=%r, %s' + bfmt = 'binary offset=(%d, %d, %d), size=%d, dtype=%r, %s' + for o in range(3): + for s in range(o + 2, max(o + 3, max_size)): + if type == 'unary': + inp = lambda: arange(s, dtype=dtype)[o:] + out = empty((s,), dtype=dtype)[o:] + yield out, inp(), ufmt % (o, o, s, dtype, 'out of place') + d = inp() + yield d, d, ufmt % (o, o, s, dtype, 'in place') + yield out[1:], inp()[:-1], ufmt % \ + (o + 1, o, s - 1, dtype, 'out of place') + yield out[:-1], inp()[1:], ufmt % \ + (o, o + 1, s - 1, dtype, 'out of place') + yield inp()[:-1], inp()[1:], ufmt % \ + (o, o + 1, s - 1, dtype, 'aliased') + yield inp()[1:], inp()[:-1], ufmt % \ + (o + 1, o, s - 1, dtype, 'aliased') + if type == 'binary': + inp1 = lambda: arange(s, dtype=dtype)[o:] + inp2 = lambda: arange(s, dtype=dtype)[o:] + out = empty((s,), dtype=dtype)[o:] + yield out, inp1(), inp2(), bfmt % \ + (o, o, o, s, dtype, 'out of place') + d = inp1() + yield d, d, inp2(), bfmt % \ + (o, o, o, s, dtype, 'in place1') + d = inp2() + yield d, inp1(), d, bfmt % \ + (o, o, o, s, dtype, 'in place2') + yield out[1:], inp1()[:-1], inp2()[:-1], bfmt % \ + (o + 1, o, o, s - 1, dtype, 'out of place') + yield out[:-1], inp1()[1:], inp2()[:-1], bfmt % \ + (o, o + 1, o, s - 1, dtype, 'out of place') + yield out[:-1], inp1()[:-1], inp2()[1:], bfmt % \ + (o, o, o + 1, s - 1, dtype, 'out of place') + yield inp1()[1:], inp1()[:-1], inp2()[:-1], bfmt % \ + (o + 1, o, o, s - 1, dtype, 'aliased') + yield inp1()[:-1], inp1()[1:], inp2()[:-1], bfmt % \ + (o, o + 1, o, s - 1, dtype, 'aliased') + yield inp1()[:-1], inp1()[:-1], inp2()[1:], bfmt % \ + (o, o, o + 1, s - 1, dtype, 'aliased') + + +class IgnoreException(Exception): + "Ignoring this exception due to disabled feature" + pass + + +@contextlib.contextmanager +def tempdir(*args, **kwargs): + """Context manager to provide a temporary test folder. + + All arguments are passed as this to the underlying tempfile.mkdtemp + function. + + """ + tmpdir = mkdtemp(*args, **kwargs) + try: + yield tmpdir + finally: + shutil.rmtree(tmpdir) + + +@contextlib.contextmanager +def temppath(*args, **kwargs): + """Context manager for temporary files. + + Context manager that returns the path to a closed temporary file. Its + parameters are the same as for tempfile.mkstemp and are passed directly + to that function. The underlying file is removed when the context is + exited, so it should be closed at that time. + + Windows does not allow a temporary file to be opened if it is already + open, so the underlying file must be closed after opening before it + can be opened again. + + """ + fd, path = mkstemp(*args, **kwargs) + os.close(fd) + try: + yield path + finally: + os.remove(path) + + +class clear_and_catch_warnings(warnings.catch_warnings): + """ Context manager that resets warning registry for catching warnings + + Warnings can be slippery, because, whenever a warning is triggered, Python + adds a ``__warningregistry__`` member to the *calling* module. This makes + it impossible to retrigger the warning in this module, whatever you put in + the warnings filters. This context manager accepts a sequence of `modules` + as a keyword argument to its constructor and: + + * stores and removes any ``__warningregistry__`` entries in given `modules` + on entry; + * resets ``__warningregistry__`` to its previous state on exit. + + This makes it possible to trigger any warning afresh inside the context + manager without disturbing the state of warnings outside. + + For compatibility with Python 3.0, please consider all arguments to be + keyword-only. + + Parameters + ---------- + record : bool, optional + Specifies whether warnings should be captured by a custom + implementation of ``warnings.showwarning()`` and be appended to a list + returned by the context manager. Otherwise None is returned by the + context manager. The objects appended to the list are arguments whose + attributes mirror the arguments to ``showwarning()``. + modules : sequence, optional + Sequence of modules for which to reset warnings registry on entry and + restore on exit. To work correctly, all 'ignore' filters should + filter by one of these modules. + + Examples + -------- + >>> import warnings + >>> with np.testing.clear_and_catch_warnings( + ... modules=[np.core.fromnumeric]): + ... warnings.simplefilter('always') + ... warnings.filterwarnings('ignore', module='np.core.fromnumeric') + ... # do something that raises a warning but ignore those in + ... # np.core.fromnumeric + """ + class_modules = () + + def __init__(self, record=False, modules=()): + self.modules = set(modules).union(self.class_modules) + self._warnreg_copies = {} + super(clear_and_catch_warnings, self).__init__(record=record) + + def __enter__(self): + for mod in self.modules: + if hasattr(mod, '__warningregistry__'): + mod_reg = mod.__warningregistry__ + self._warnreg_copies[mod] = mod_reg.copy() + mod_reg.clear() + return super(clear_and_catch_warnings, self).__enter__() + + def __exit__(self, *exc_info): + super(clear_and_catch_warnings, self).__exit__(*exc_info) + for mod in self.modules: + if hasattr(mod, '__warningregistry__'): + mod.__warningregistry__.clear() + if mod in self._warnreg_copies: + mod.__warningregistry__.update(self._warnreg_copies[mod]) + + +class suppress_warnings(object): + """ + Context manager and decorator doing much the same as + ``warnings.catch_warnings``. + + However, it also provides a filter mechanism to work around + https://bugs.python.org/issue4180. + + This bug causes Python before 3.4 to not reliably show warnings again + after they have been ignored once (even within catch_warnings). It + means that no "ignore" filter can be used easily, since following + tests might need to see the warning. Additionally it allows easier + specificity for testing warnings and can be nested. + + Parameters + ---------- + forwarding_rule : str, optional + One of "always", "once", "module", or "location". Analogous to + the usual warnings module filter mode, it is useful to reduce + noise mostly on the outmost level. Unsuppressed and unrecorded + warnings will be forwarded based on this rule. Defaults to "always". + "location" is equivalent to the warnings "default", match by exact + location the warning warning originated from. + + Notes + ----- + Filters added inside the context manager will be discarded again + when leaving it. Upon entering all filters defined outside a + context will be applied automatically. + + When a recording filter is added, matching warnings are stored in the + ``log`` attribute as well as in the list returned by ``record``. + + If filters are added and the ``module`` keyword is given, the + warning registry of this module will additionally be cleared when + applying it, entering the context, or exiting it. This could cause + warnings to appear a second time after leaving the context if they + were configured to be printed once (default) and were already + printed before the context was entered. + + Nesting this context manager will work as expected when the + forwarding rule is "always" (default). Unfiltered and unrecorded + warnings will be passed out and be matched by the outer level. + On the outmost level they will be printed (or caught by another + warnings context). The forwarding rule argument can modify this + behaviour. + + Like ``catch_warnings`` this context manager is not threadsafe. + + Examples + -------- + + With a context manager:: + + with np.testing.suppress_warnings() as sup: + sup.filter(DeprecationWarning, "Some text") + sup.filter(module=np.ma.core) + log = sup.record(FutureWarning, "Does this occur?") + command_giving_warnings() + # The FutureWarning was given once, the filtered warnings were + # ignored. All other warnings abide outside settings (may be + # printed/error) + assert_(len(log) == 1) + assert_(len(sup.log) == 1) # also stored in log attribute + + Or as a decorator:: + + sup = np.testing.suppress_warnings() + sup.filter(module=np.ma.core) # module must match exactly + @sup + def some_function(): + # do something which causes a warning in np.ma.core + pass + """ + def __init__(self, forwarding_rule="always"): + self._entered = False + + # Suppressions are either instance or defined inside one with block: + self._suppressions = [] + + if forwarding_rule not in {"always", "module", "once", "location"}: + raise ValueError("unsupported forwarding rule.") + self._forwarding_rule = forwarding_rule + + def _clear_registries(self): + if hasattr(warnings, "_filters_mutated"): + # clearing the registry should not be necessary on new pythons, + # instead the filters should be mutated. + warnings._filters_mutated() + return + # Simply clear the registry, this should normally be harmless, + # note that on new pythons it would be invalidated anyway. + for module in self._tmp_modules: + if hasattr(module, "__warningregistry__"): + module.__warningregistry__.clear() + + def _filter(self, category=Warning, message="", module=None, record=False): + if record: + record = [] # The log where to store warnings + else: + record = None + if self._entered: + if module is None: + warnings.filterwarnings( + "always", category=category, message=message) + else: + module_regex = module.__name__.replace('.', r'\.') + '$' + warnings.filterwarnings( + "always", category=category, message=message, + module=module_regex) + self._tmp_modules.add(module) + self._clear_registries() + + self._tmp_suppressions.append( + (category, message, re.compile(message, re.I), module, record)) + else: + self._suppressions.append( + (category, message, re.compile(message, re.I), module, record)) + + return record + + def filter(self, category=Warning, message="", module=None): + """ + Add a new suppressing filter or apply it if the state is entered. + + Parameters + ---------- + category : class, optional + Warning class to filter + message : string, optional + Regular expression matching the warning message. + module : module, optional + Module to filter for. Note that the module (and its file) + must match exactly and cannot be a submodule. This may make + it unreliable for external modules. + + Notes + ----- + When added within a context, filters are only added inside + the context and will be forgotten when the context is exited. + """ + self._filter(category=category, message=message, module=module, + record=False) + + def record(self, category=Warning, message="", module=None): + """ + Append a new recording filter or apply it if the state is entered. + + All warnings matching will be appended to the ``log`` attribute. + + Parameters + ---------- + category : class, optional + Warning class to filter + message : string, optional + Regular expression matching the warning message. + module : module, optional + Module to filter for. Note that the module (and its file) + must match exactly and cannot be a submodule. This may make + it unreliable for external modules. + + Returns + ------- + log : list + A list which will be filled with all matched warnings. + + Notes + ----- + When added within a context, filters are only added inside + the context and will be forgotten when the context is exited. + """ + return self._filter(category=category, message=message, module=module, + record=True) + + def __enter__(self): + if self._entered: + raise RuntimeError("cannot enter suppress_warnings twice.") + + self._orig_show = warnings.showwarning + self._filters = warnings.filters + warnings.filters = self._filters[:] + + self._entered = True + self._tmp_suppressions = [] + self._tmp_modules = set() + self._forwarded = set() + + self.log = [] # reset global log (no need to keep same list) + + for cat, mess, _, mod, log in self._suppressions: + if log is not None: + del log[:] # clear the log + if mod is None: + warnings.filterwarnings( + "always", category=cat, message=mess) + else: + module_regex = mod.__name__.replace('.', r'\.') + '$' + warnings.filterwarnings( + "always", category=cat, message=mess, + module=module_regex) + self._tmp_modules.add(mod) + warnings.showwarning = self._showwarning + self._clear_registries() + + return self + + def __exit__(self, *exc_info): + warnings.showwarning = self._orig_show + warnings.filters = self._filters + self._clear_registries() + self._entered = False + del self._orig_show + del self._filters + + def _showwarning(self, message, category, filename, lineno, + *args, **kwargs): + use_warnmsg = kwargs.pop("use_warnmsg", None) + for cat, _, pattern, mod, rec in ( + self._suppressions + self._tmp_suppressions)[::-1]: + if (issubclass(category, cat) and + pattern.match(message.args[0]) is not None): + if mod is None: + # Message and category match, either recorded or ignored + if rec is not None: + msg = WarningMessage(message, category, filename, + lineno, **kwargs) + self.log.append(msg) + rec.append(msg) + return + # Use startswith, because warnings strips the c or o from + # .pyc/.pyo files. + elif mod.__file__.startswith(filename): + # The message and module (filename) match + if rec is not None: + msg = WarningMessage(message, category, filename, + lineno, **kwargs) + self.log.append(msg) + rec.append(msg) + return + + # There is no filter in place, so pass to the outside handler + # unless we should only pass it once + if self._forwarding_rule == "always": + if use_warnmsg is None: + self._orig_show(message, category, filename, lineno, + *args, **kwargs) + else: + self._orig_showmsg(use_warnmsg) + return + + if self._forwarding_rule == "once": + signature = (message.args, category) + elif self._forwarding_rule == "module": + signature = (message.args, category, filename) + elif self._forwarding_rule == "location": + signature = (message.args, category, filename, lineno) + + if signature in self._forwarded: + return + self._forwarded.add(signature) + if use_warnmsg is None: + self._orig_show(message, category, filename, lineno, *args, + **kwargs) + else: + self._orig_showmsg(use_warnmsg) + + def __call__(self, func): + """ + Function decorator to apply certain suppressions to a whole + function. + """ + @wraps(func) + def new_func(*args, **kwargs): + with self: + return func(*args, **kwargs) + + return new_func + + +@contextlib.contextmanager +def _assert_no_gc_cycles_context(name=None): + __tracebackhide__ = True # Hide traceback for py.test + + # not meaningful to test if there is no refcounting + if not HAS_REFCOUNT: + return + + assert_(gc.isenabled()) + gc.disable() + gc_debug = gc.get_debug() + try: + for i in range(100): + if gc.collect() == 0: + break + else: + raise RuntimeError( + "Unable to fully collect garbage - perhaps a __del__ method is " + "creating more reference cycles?") + + gc.set_debug(gc.DEBUG_SAVEALL) + yield + # gc.collect returns the number of unreachable objects in cycles that + # were found -- we are checking that no cycles were created in the context + n_objects_in_cycles = gc.collect() + objects_in_cycles = gc.garbage[:] + finally: + del gc.garbage[:] + gc.set_debug(gc_debug) + gc.enable() + + if n_objects_in_cycles: + name_str = " when calling %s" % name if name is not None else "" + raise AssertionError( + "Reference cycles were found{}: {} objects were collected, " + "of which {} are shown below:{}" + .format( + name_str, + n_objects_in_cycles, + len(objects_in_cycles), + ''.join( + "\n {} object with id={}:\n {}".format( + type(o).__name__, + id(o), + pprint.pformat(o).replace('\n', '\n ') + ) for o in objects_in_cycles + ) + ) + ) + + +def assert_no_gc_cycles(*args, **kwargs): + """ + Fail if the given callable produces any reference cycles. + + If called with all arguments omitted, may be used as a context manager: + + with assert_no_gc_cycles(): + do_something() + + .. versionadded:: 1.15.0 + + Parameters + ---------- + func : callable + The callable to test. + \\*args : Arguments + Arguments passed to `func`. + \\*\\*kwargs : Kwargs + Keyword arguments passed to `func`. + + Returns + ------- + Nothing. The result is deliberately discarded to ensure that all cycles + are found. + + """ + if not args: + return _assert_no_gc_cycles_context() + + func = args[0] + args = args[1:] + with _assert_no_gc_cycles_context(name=func.__name__): + func(*args, **kwargs) diff --git a/project/venv/lib/python2.7/site-packages/numpy/testing/_private/utils.pyc b/project/venv/lib/python2.7/site-packages/numpy/testing/_private/utils.pyc new file mode 100644 index 0000000..1d431fc Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/testing/_private/utils.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/testing/decorators.py b/project/venv/lib/python2.7/site-packages/numpy/testing/decorators.py new file mode 100644 index 0000000..bf78be5 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/testing/decorators.py @@ -0,0 +1,15 @@ +""" +Back compatibility decorators module. It will import the appropriate +set of tools + +""" +from __future__ import division, absolute_import, print_function + +import warnings + +# 2018-04-04, numpy 1.15.0 +warnings.warn("Importing from numpy.testing.decorators is deprecated " + "since numpy 1.15.0, import from numpy.testing instead.", + DeprecationWarning, stacklevel=2) + +from ._private.decorators import * diff --git a/project/venv/lib/python2.7/site-packages/numpy/testing/decorators.pyc b/project/venv/lib/python2.7/site-packages/numpy/testing/decorators.pyc new file mode 100644 index 0000000..9e095a2 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/testing/decorators.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/testing/noseclasses.py b/project/venv/lib/python2.7/site-packages/numpy/testing/noseclasses.py new file mode 100644 index 0000000..5748a9a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/testing/noseclasses.py @@ -0,0 +1,14 @@ +""" +Back compatibility noseclasses module. It will import the appropriate +set of tools +""" +from __future__ import division, absolute_import, print_function + +import warnings + +# 2018-04-04, numpy 1.15.0 +warnings.warn("Importing from numpy.testing.noseclasses is deprecated " + "since 1.15.0, import from numpy.testing instead", + DeprecationWarning, stacklevel=2) + +from ._private.noseclasses import * diff --git a/project/venv/lib/python2.7/site-packages/numpy/testing/noseclasses.pyc b/project/venv/lib/python2.7/site-packages/numpy/testing/noseclasses.pyc new file mode 100644 index 0000000..9812100 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/testing/noseclasses.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/testing/nosetester.py b/project/venv/lib/python2.7/site-packages/numpy/testing/nosetester.py new file mode 100644 index 0000000..2ac212e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/testing/nosetester.py @@ -0,0 +1,19 @@ +""" +Back compatibility nosetester module. It will import the appropriate +set of tools + +""" +from __future__ import division, absolute_import, print_function + +import warnings + +# 2018-04-04, numpy 1.15.0 +warnings.warn("Importing from numpy.testing.nosetester is deprecated " + "since 1.15.0, import from numpy.testing instead.", + DeprecationWarning, stacklevel=2) + +from ._private.nosetester import * + +__all__ = ['get_package_name', 'run_module_suite', 'NoseTester', + '_numpy_tester', 'get_package_name', 'import_nose', + 'suppress_warnings'] diff --git a/project/venv/lib/python2.7/site-packages/numpy/testing/nosetester.pyc b/project/venv/lib/python2.7/site-packages/numpy/testing/nosetester.pyc new file mode 100644 index 0000000..a189952 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/testing/nosetester.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/testing/print_coercion_tables.py b/project/venv/lib/python2.7/site-packages/numpy/testing/print_coercion_tables.py new file mode 100644 index 0000000..3a359f4 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/testing/print_coercion_tables.py @@ -0,0 +1,91 @@ +#!/usr/bin/env python +"""Prints type-coercion tables for the built-in NumPy types + +""" +from __future__ import division, absolute_import, print_function + +import numpy as np + +# Generic object that can be added, but doesn't do anything else +class GenericObject(object): + def __init__(self, v): + self.v = v + + def __add__(self, other): + return self + + def __radd__(self, other): + return self + + dtype = np.dtype('O') + +def print_cancast_table(ntypes): + print('X', end=' ') + for char in ntypes: + print(char, end=' ') + print() + for row in ntypes: + print(row, end=' ') + for col in ntypes: + print(int(np.can_cast(row, col)), end=' ') + print() + +def print_coercion_table(ntypes, inputfirstvalue, inputsecondvalue, firstarray, use_promote_types=False): + print('+', end=' ') + for char in ntypes: + print(char, end=' ') + print() + for row in ntypes: + if row == 'O': + rowtype = GenericObject + else: + rowtype = np.obj2sctype(row) + + print(row, end=' ') + for col in ntypes: + if col == 'O': + coltype = GenericObject + else: + coltype = np.obj2sctype(col) + try: + if firstarray: + rowvalue = np.array([rowtype(inputfirstvalue)], dtype=rowtype) + else: + rowvalue = rowtype(inputfirstvalue) + colvalue = coltype(inputsecondvalue) + if use_promote_types: + char = np.promote_types(rowvalue.dtype, colvalue.dtype).char + else: + value = np.add(rowvalue, colvalue) + if isinstance(value, np.ndarray): + char = value.dtype.char + else: + char = np.dtype(type(value)).char + except ValueError: + char = '!' + except OverflowError: + char = '@' + except TypeError: + char = '#' + print(char, end=' ') + print() + +print("can cast") +print_cancast_table(np.typecodes['All']) +print() +print("In these tables, ValueError is '!', OverflowError is '@', TypeError is '#'") +print() +print("scalar + scalar") +print_coercion_table(np.typecodes['All'], 0, 0, False) +print() +print("scalar + neg scalar") +print_coercion_table(np.typecodes['All'], 0, -1, False) +print() +print("array + scalar") +print_coercion_table(np.typecodes['All'], 0, 0, True) +print() +print("array + neg scalar") +print_coercion_table(np.typecodes['All'], 0, -1, True) +print() +print("promote_types") +print_coercion_table(np.typecodes['All'], 0, 0, False, True) diff --git a/project/venv/lib/python2.7/site-packages/numpy/testing/print_coercion_tables.pyc b/project/venv/lib/python2.7/site-packages/numpy/testing/print_coercion_tables.pyc new file mode 100644 index 0000000..fb115e5 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/testing/print_coercion_tables.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/testing/setup.py b/project/venv/lib/python2.7/site-packages/numpy/testing/setup.py new file mode 100644 index 0000000..7c3f2fb --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/testing/setup.py @@ -0,0 +1,21 @@ +#!/usr/bin/env python +from __future__ import division, print_function + + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('testing', parent_package, top_path) + + config.add_subpackage('_private') + config.add_data_dir('tests') + return config + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(maintainer="NumPy Developers", + maintainer_email="numpy-dev@numpy.org", + description="NumPy test module", + url="https://www.numpy.org", + license="NumPy License (BSD Style)", + configuration=configuration, + ) diff --git a/project/venv/lib/python2.7/site-packages/numpy/testing/setup.pyc b/project/venv/lib/python2.7/site-packages/numpy/testing/setup.pyc new file mode 100644 index 0000000..d9a27a6 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/testing/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/testing/tests/__init__.py b/project/venv/lib/python2.7/site-packages/numpy/testing/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/numpy/testing/tests/__init__.pyc b/project/venv/lib/python2.7/site-packages/numpy/testing/tests/__init__.pyc new file mode 100644 index 0000000..afbc288 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/testing/tests/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/testing/tests/test_decorators.py b/project/venv/lib/python2.7/site-packages/numpy/testing/tests/test_decorators.py new file mode 100644 index 0000000..bb3ea1a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/testing/tests/test_decorators.py @@ -0,0 +1,214 @@ +""" +Test the decorators from ``testing.decorators``. + +""" +from __future__ import division, absolute_import, print_function + +import warnings +import pytest + +from numpy.testing import ( + assert_, assert_raises, dec, SkipTest, KnownFailureException, + ) + + +try: + import nose # noqa: F401 +except ImportError: + HAVE_NOSE = False +else: + HAVE_NOSE = True + + +@pytest.mark.skipif(not HAVE_NOSE, reason="Needs nose") +class TestNoseDecorators(object): + # These tests are run in a class for simplicity while still + # getting a report on each, skipped or success. + + class DidntSkipException(Exception): + pass + + def test_slow(self): + @dec.slow + def slow_func(x, y, z): + pass + + assert_(slow_func.slow) + + def test_setastest(self): + @dec.setastest() + def f_default(a): + pass + + @dec.setastest(True) + def f_istest(a): + pass + + @dec.setastest(False) + def f_isnottest(a): + pass + + assert_(f_default.__test__) + assert_(f_istest.__test__) + assert_(not f_isnottest.__test__) + + def test_skip_functions_hardcoded(self): + @dec.skipif(True) + def f1(x): + raise self.DidntSkipException + + try: + f1('a') + except self.DidntSkipException: + raise Exception('Failed to skip') + except SkipTest().__class__: + pass + + @dec.skipif(False) + def f2(x): + raise self.DidntSkipException + + try: + f2('a') + except self.DidntSkipException: + pass + except SkipTest().__class__: + raise Exception('Skipped when not expected to') + + def test_skip_functions_callable(self): + def skip_tester(): + return skip_flag == 'skip me!' + + @dec.skipif(skip_tester) + def f1(x): + raise self.DidntSkipException + + try: + skip_flag = 'skip me!' + f1('a') + except self.DidntSkipException: + raise Exception('Failed to skip') + except SkipTest().__class__: + pass + + @dec.skipif(skip_tester) + def f2(x): + raise self.DidntSkipException + + try: + skip_flag = 'five is right out!' + f2('a') + except self.DidntSkipException: + pass + except SkipTest().__class__: + raise Exception('Skipped when not expected to') + + def test_skip_generators_hardcoded(self): + @dec.knownfailureif(True, "This test is known to fail") + def g1(x): + for i in range(x): + yield i + + try: + for j in g1(10): + pass + except KnownFailureException().__class__: + pass + else: + raise Exception('Failed to mark as known failure') + + @dec.knownfailureif(False, "This test is NOT known to fail") + def g2(x): + for i in range(x): + yield i + raise self.DidntSkipException('FAIL') + + try: + for j in g2(10): + pass + except KnownFailureException().__class__: + raise Exception('Marked incorrectly as known failure') + except self.DidntSkipException: + pass + + def test_skip_generators_callable(self): + def skip_tester(): + return skip_flag == 'skip me!' + + @dec.knownfailureif(skip_tester, "This test is known to fail") + def g1(x): + for i in range(x): + yield i + + try: + skip_flag = 'skip me!' + for j in g1(10): + pass + except KnownFailureException().__class__: + pass + else: + raise Exception('Failed to mark as known failure') + + @dec.knownfailureif(skip_tester, "This test is NOT known to fail") + def g2(x): + for i in range(x): + yield i + raise self.DidntSkipException('FAIL') + + try: + skip_flag = 'do not skip' + for j in g2(10): + pass + except KnownFailureException().__class__: + raise Exception('Marked incorrectly as known failure') + except self.DidntSkipException: + pass + + def test_deprecated(self): + @dec.deprecated(True) + def non_deprecated_func(): + pass + + @dec.deprecated() + def deprecated_func(): + import warnings + warnings.warn("TEST: deprecated func", DeprecationWarning) + + @dec.deprecated() + def deprecated_func2(): + import warnings + warnings.warn("AHHHH") + raise ValueError + + @dec.deprecated() + def deprecated_func3(): + import warnings + warnings.warn("AHHHH") + + # marked as deprecated, but does not raise DeprecationWarning + assert_raises(AssertionError, non_deprecated_func) + # should be silent + deprecated_func() + with warnings.catch_warnings(record=True): + warnings.simplefilter("always") # do not propagate unrelated warnings + # fails if deprecated decorator just disables test. See #1453. + assert_raises(ValueError, deprecated_func2) + # warning is not a DeprecationWarning + assert_raises(AssertionError, deprecated_func3) + + def test_parametrize(self): + # dec.parametrize assumes that it is being run by nose. Because + # we are running under pytest, we need to explicitly check the + # results. + @dec.parametrize('base, power, expected', + [(1, 1, 1), + (2, 1, 2), + (2, 2, 4)]) + def check_parametrize(base, power, expected): + assert_(base**power == expected) + + count = 0 + for test in check_parametrize(): + test[0](*test[1:]) + count += 1 + assert_(count == 3) diff --git a/project/venv/lib/python2.7/site-packages/numpy/testing/tests/test_decorators.pyc b/project/venv/lib/python2.7/site-packages/numpy/testing/tests/test_decorators.pyc new file mode 100644 index 0000000..3042f3b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/testing/tests/test_decorators.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/testing/tests/test_doctesting.py b/project/venv/lib/python2.7/site-packages/numpy/testing/tests/test_doctesting.py new file mode 100644 index 0000000..b77cd93 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/testing/tests/test_doctesting.py @@ -0,0 +1,59 @@ +""" Doctests for NumPy-specific nose/doctest modifications + +""" +from __future__ import division, absolute_import, print_function + +#FIXME: None of these tests is run, because 'check' is not a recognized +# testing prefix. + +# try the #random directive on the output line +def check_random_directive(): + ''' + >>> 2+2 + #random: may vary on your system + ''' + +# check the implicit "import numpy as np" +def check_implicit_np(): + ''' + >>> np.array([1,2,3]) + array([1, 2, 3]) + ''' + +# there's some extraneous whitespace around the correct responses +def check_whitespace_enabled(): + ''' + # whitespace after the 3 + >>> 1+2 + 3 + + # whitespace before the 7 + >>> 3+4 + 7 + ''' + +def check_empty_output(): + """ Check that no output does not cause an error. + + This is related to nose bug 445; the numpy plugin changed the + doctest-result-variable default and therefore hit this bug: + http://code.google.com/p/python-nose/issues/detail?id=445 + + >>> a = 10 + """ + +def check_skip(): + """ Check skip directive + + The test below should not run + + >>> 1/0 #doctest: +SKIP + """ + + +if __name__ == '__main__': + # Run tests outside numpy test rig + import nose + from numpy.testing.noseclasses import NumpyDoctest + argv = ['', __file__, '--with-numpydoctest'] + nose.core.TestProgram(argv=argv, addplugins=[NumpyDoctest()]) diff --git a/project/venv/lib/python2.7/site-packages/numpy/testing/tests/test_doctesting.pyc b/project/venv/lib/python2.7/site-packages/numpy/testing/tests/test_doctesting.pyc new file mode 100644 index 0000000..5eb1580 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/testing/tests/test_doctesting.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/testing/tests/test_utils.py b/project/venv/lib/python2.7/site-packages/numpy/testing/tests/test_utils.py new file mode 100644 index 0000000..c376a38 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/testing/tests/test_utils.py @@ -0,0 +1,1542 @@ +from __future__ import division, absolute_import, print_function + +import warnings +import sys +import os +import itertools +import textwrap +import pytest +import weakref + +import numpy as np +from numpy.testing import ( + assert_equal, assert_array_equal, assert_almost_equal, + assert_array_almost_equal, assert_array_less, build_err_msg, raises, + assert_raises, assert_warns, assert_no_warnings, assert_allclose, + assert_approx_equal, assert_array_almost_equal_nulp, assert_array_max_ulp, + clear_and_catch_warnings, suppress_warnings, assert_string_equal, assert_, + tempdir, temppath, assert_no_gc_cycles, HAS_REFCOUNT + ) + + +class _GenericTest(object): + + def _test_equal(self, a, b): + self._assert_func(a, b) + + def _test_not_equal(self, a, b): + with assert_raises(AssertionError): + self._assert_func(a, b) + + def test_array_rank1_eq(self): + """Test two equal array of rank 1 are found equal.""" + a = np.array([1, 2]) + b = np.array([1, 2]) + + self._test_equal(a, b) + + def test_array_rank1_noteq(self): + """Test two different array of rank 1 are found not equal.""" + a = np.array([1, 2]) + b = np.array([2, 2]) + + self._test_not_equal(a, b) + + def test_array_rank2_eq(self): + """Test two equal array of rank 2 are found equal.""" + a = np.array([[1, 2], [3, 4]]) + b = np.array([[1, 2], [3, 4]]) + + self._test_equal(a, b) + + def test_array_diffshape(self): + """Test two arrays with different shapes are found not equal.""" + a = np.array([1, 2]) + b = np.array([[1, 2], [1, 2]]) + + self._test_not_equal(a, b) + + def test_objarray(self): + """Test object arrays.""" + a = np.array([1, 1], dtype=object) + self._test_equal(a, 1) + + def test_array_likes(self): + self._test_equal([1, 2, 3], (1, 2, 3)) + + +class TestArrayEqual(_GenericTest): + + def setup(self): + self._assert_func = assert_array_equal + + def test_generic_rank1(self): + """Test rank 1 array for all dtypes.""" + def foo(t): + a = np.empty(2, t) + a.fill(1) + b = a.copy() + c = a.copy() + c.fill(0) + self._test_equal(a, b) + self._test_not_equal(c, b) + + # Test numeric types and object + for t in '?bhilqpBHILQPfdgFDG': + foo(t) + + # Test strings + for t in ['S1', 'U1']: + foo(t) + + def test_generic_rank3(self): + """Test rank 3 array for all dtypes.""" + def foo(t): + a = np.empty((4, 2, 3), t) + a.fill(1) + b = a.copy() + c = a.copy() + c.fill(0) + self._test_equal(a, b) + self._test_not_equal(c, b) + + # Test numeric types and object + for t in '?bhilqpBHILQPfdgFDG': + foo(t) + + # Test strings + for t in ['S1', 'U1']: + foo(t) + + def test_nan_array(self): + """Test arrays with nan values in them.""" + a = np.array([1, 2, np.nan]) + b = np.array([1, 2, np.nan]) + + self._test_equal(a, b) + + c = np.array([1, 2, 3]) + self._test_not_equal(c, b) + + def test_string_arrays(self): + """Test two arrays with different shapes are found not equal.""" + a = np.array(['floupi', 'floupa']) + b = np.array(['floupi', 'floupa']) + + self._test_equal(a, b) + + c = np.array(['floupipi', 'floupa']) + + self._test_not_equal(c, b) + + def test_recarrays(self): + """Test record arrays.""" + a = np.empty(2, [('floupi', float), ('floupa', float)]) + a['floupi'] = [1, 2] + a['floupa'] = [1, 2] + b = a.copy() + + self._test_equal(a, b) + + c = np.empty(2, [('floupipi', float), ('floupa', float)]) + c['floupipi'] = a['floupi'].copy() + c['floupa'] = a['floupa'].copy() + + with suppress_warnings() as sup: + l = sup.record(FutureWarning, message="elementwise == ") + self._test_not_equal(c, b) + assert_equal(len(l), 1) + + def test_masked_nan_inf(self): + # Regression test for gh-11121 + a = np.ma.MaskedArray([3., 4., 6.5], mask=[False, True, False]) + b = np.array([3., np.nan, 6.5]) + self._test_equal(a, b) + self._test_equal(b, a) + a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, False, False]) + b = np.array([np.inf, 4., 6.5]) + self._test_equal(a, b) + self._test_equal(b, a) + + def test_subclass_that_overrides_eq(self): + # While we cannot guarantee testing functions will always work for + # subclasses, the tests should ideally rely only on subclasses having + # comparison operators, not on them being able to store booleans + # (which, e.g., astropy Quantity cannot usefully do). See gh-8452. + class MyArray(np.ndarray): + def __eq__(self, other): + return bool(np.equal(self, other).all()) + + def __ne__(self, other): + return not self == other + + a = np.array([1., 2.]).view(MyArray) + b = np.array([2., 3.]).view(MyArray) + assert_(type(a == a), bool) + assert_(a == a) + assert_(a != b) + self._test_equal(a, a) + self._test_not_equal(a, b) + self._test_not_equal(b, a) + + def test_subclass_that_does_not_implement_npall(self): + class MyArray(np.ndarray): + def __array_function__(self, *args, **kwargs): + return NotImplemented + + a = np.array([1., 2.]).view(MyArray) + b = np.array([2., 3.]).view(MyArray) + if np.core.overrides.ENABLE_ARRAY_FUNCTION: + with assert_raises(TypeError): + np.all(a) + self._test_equal(a, a) + self._test_not_equal(a, b) + self._test_not_equal(b, a) + + +class TestBuildErrorMessage(object): + + def test_build_err_msg_defaults(self): + x = np.array([1.00001, 2.00002, 3.00003]) + y = np.array([1.00002, 2.00003, 3.00004]) + err_msg = 'There is a mismatch' + + a = build_err_msg([x, y], err_msg) + b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array([' + '1.00001, 2.00002, 3.00003])\n DESIRED: array([1.00002, ' + '2.00003, 3.00004])') + assert_equal(a, b) + + def test_build_err_msg_no_verbose(self): + x = np.array([1.00001, 2.00002, 3.00003]) + y = np.array([1.00002, 2.00003, 3.00004]) + err_msg = 'There is a mismatch' + + a = build_err_msg([x, y], err_msg, verbose=False) + b = '\nItems are not equal: There is a mismatch' + assert_equal(a, b) + + def test_build_err_msg_custom_names(self): + x = np.array([1.00001, 2.00002, 3.00003]) + y = np.array([1.00002, 2.00003, 3.00004]) + err_msg = 'There is a mismatch' + + a = build_err_msg([x, y], err_msg, names=('FOO', 'BAR')) + b = ('\nItems are not equal: There is a mismatch\n FOO: array([' + '1.00001, 2.00002, 3.00003])\n BAR: array([1.00002, 2.00003, ' + '3.00004])') + assert_equal(a, b) + + def test_build_err_msg_custom_precision(self): + x = np.array([1.000000001, 2.00002, 3.00003]) + y = np.array([1.000000002, 2.00003, 3.00004]) + err_msg = 'There is a mismatch' + + a = build_err_msg([x, y], err_msg, precision=10) + b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array([' + '1.000000001, 2.00002 , 3.00003 ])\n DESIRED: array([' + '1.000000002, 2.00003 , 3.00004 ])') + assert_equal(a, b) + + +class TestEqual(TestArrayEqual): + + def setup(self): + self._assert_func = assert_equal + + def test_nan_items(self): + self._assert_func(np.nan, np.nan) + self._assert_func([np.nan], [np.nan]) + self._test_not_equal(np.nan, [np.nan]) + self._test_not_equal(np.nan, 1) + + def test_inf_items(self): + self._assert_func(np.inf, np.inf) + self._assert_func([np.inf], [np.inf]) + self._test_not_equal(np.inf, [np.inf]) + + def test_datetime(self): + self._test_equal( + np.datetime64("2017-01-01", "s"), + np.datetime64("2017-01-01", "s") + ) + self._test_equal( + np.datetime64("2017-01-01", "s"), + np.datetime64("2017-01-01", "m") + ) + + # gh-10081 + self._test_not_equal( + np.datetime64("2017-01-01", "s"), + np.datetime64("2017-01-02", "s") + ) + self._test_not_equal( + np.datetime64("2017-01-01", "s"), + np.datetime64("2017-01-02", "m") + ) + + def test_nat_items(self): + # not a datetime + nadt_no_unit = np.datetime64("NaT") + nadt_s = np.datetime64("NaT", "s") + nadt_d = np.datetime64("NaT", "ns") + # not a timedelta + natd_no_unit = np.timedelta64("NaT") + natd_s = np.timedelta64("NaT", "s") + natd_d = np.timedelta64("NaT", "ns") + + dts = [nadt_no_unit, nadt_s, nadt_d] + tds = [natd_no_unit, natd_s, natd_d] + for a, b in itertools.product(dts, dts): + self._assert_func(a, b) + self._assert_func([a], [b]) + self._test_not_equal([a], b) + + for a, b in itertools.product(tds, tds): + self._assert_func(a, b) + self._assert_func([a], [b]) + self._test_not_equal([a], b) + + for a, b in itertools.product(tds, dts): + self._test_not_equal(a, b) + self._test_not_equal(a, [b]) + self._test_not_equal([a], [b]) + self._test_not_equal([a], np.datetime64("2017-01-01", "s")) + self._test_not_equal([b], np.datetime64("2017-01-01", "s")) + self._test_not_equal([a], np.timedelta64(123, "s")) + self._test_not_equal([b], np.timedelta64(123, "s")) + + def test_non_numeric(self): + self._assert_func('ab', 'ab') + self._test_not_equal('ab', 'abb') + + def test_complex_item(self): + self._assert_func(complex(1, 2), complex(1, 2)) + self._assert_func(complex(1, np.nan), complex(1, np.nan)) + self._test_not_equal(complex(1, np.nan), complex(1, 2)) + self._test_not_equal(complex(np.nan, 1), complex(1, np.nan)) + self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2)) + + def test_negative_zero(self): + self._test_not_equal(np.PZERO, np.NZERO) + + def test_complex(self): + x = np.array([complex(1, 2), complex(1, np.nan)]) + y = np.array([complex(1, 2), complex(1, 2)]) + self._assert_func(x, x) + self._test_not_equal(x, y) + + def test_error_message(self): + with pytest.raises(AssertionError) as exc_info: + self._assert_func(np.array([1, 2]), np.array([[1, 2]])) + msg = str(exc_info.value) + msg2 = msg.replace("shapes (2L,), (1L, 2L)", "shapes (2,), (1, 2)") + msg_reference = textwrap.dedent("""\ + + Arrays are not equal + + (shapes (2,), (1, 2) mismatch) + x: array([1, 2]) + y: array([[1, 2]])""") + + try: + assert_equal(msg, msg_reference) + except AssertionError: + assert_equal(msg2, msg_reference) + + +class TestArrayAlmostEqual(_GenericTest): + + def setup(self): + self._assert_func = assert_array_almost_equal + + def test_closeness(self): + # Note that in the course of time we ended up with + # `abs(x - y) < 1.5 * 10**(-decimal)` + # instead of the previously documented + # `abs(x - y) < 0.5 * 10**(-decimal)` + # so this check serves to preserve the wrongness. + + # test scalars + self._assert_func(1.499999, 0.0, decimal=0) + assert_raises(AssertionError, + lambda: self._assert_func(1.5, 0.0, decimal=0)) + + # test arrays + self._assert_func([1.499999], [0.0], decimal=0) + assert_raises(AssertionError, + lambda: self._assert_func([1.5], [0.0], decimal=0)) + + def test_simple(self): + x = np.array([1234.2222]) + y = np.array([1234.2223]) + + self._assert_func(x, y, decimal=3) + self._assert_func(x, y, decimal=4) + assert_raises(AssertionError, + lambda: self._assert_func(x, y, decimal=5)) + + def test_nan(self): + anan = np.array([np.nan]) + aone = np.array([1]) + ainf = np.array([np.inf]) + self._assert_func(anan, anan) + assert_raises(AssertionError, + lambda: self._assert_func(anan, aone)) + assert_raises(AssertionError, + lambda: self._assert_func(anan, ainf)) + assert_raises(AssertionError, + lambda: self._assert_func(ainf, anan)) + + def test_inf(self): + a = np.array([[1., 2.], [3., 4.]]) + b = a.copy() + a[0, 0] = np.inf + assert_raises(AssertionError, + lambda: self._assert_func(a, b)) + b[0, 0] = -np.inf + assert_raises(AssertionError, + lambda: self._assert_func(a, b)) + + def test_subclass(self): + a = np.array([[1., 2.], [3., 4.]]) + b = np.ma.masked_array([[1., 2.], [0., 4.]], + [[False, False], [True, False]]) + self._assert_func(a, b) + self._assert_func(b, a) + self._assert_func(b, b) + + # Test fully masked as well (see gh-11123). + a = np.ma.MaskedArray(3.5, mask=True) + b = np.array([3., 4., 6.5]) + self._test_equal(a, b) + self._test_equal(b, a) + a = np.ma.masked + b = np.array([3., 4., 6.5]) + self._test_equal(a, b) + self._test_equal(b, a) + a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True]) + b = np.array([1., 2., 3.]) + self._test_equal(a, b) + self._test_equal(b, a) + a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True]) + b = np.array(1.) + self._test_equal(a, b) + self._test_equal(b, a) + + def test_subclass_that_cannot_be_bool(self): + # While we cannot guarantee testing functions will always work for + # subclasses, the tests should ideally rely only on subclasses having + # comparison operators, not on them being able to store booleans + # (which, e.g., astropy Quantity cannot usefully do). See gh-8452. + class MyArray(np.ndarray): + def __eq__(self, other): + return super(MyArray, self).__eq__(other).view(np.ndarray) + + def __lt__(self, other): + return super(MyArray, self).__lt__(other).view(np.ndarray) + + def all(self, *args, **kwargs): + raise NotImplementedError + + a = np.array([1., 2.]).view(MyArray) + self._assert_func(a, a) + + +class TestAlmostEqual(_GenericTest): + + def setup(self): + self._assert_func = assert_almost_equal + + def test_closeness(self): + # Note that in the course of time we ended up with + # `abs(x - y) < 1.5 * 10**(-decimal)` + # instead of the previously documented + # `abs(x - y) < 0.5 * 10**(-decimal)` + # so this check serves to preserve the wrongness. + + # test scalars + self._assert_func(1.499999, 0.0, decimal=0) + assert_raises(AssertionError, + lambda: self._assert_func(1.5, 0.0, decimal=0)) + + # test arrays + self._assert_func([1.499999], [0.0], decimal=0) + assert_raises(AssertionError, + lambda: self._assert_func([1.5], [0.0], decimal=0)) + + def test_nan_item(self): + self._assert_func(np.nan, np.nan) + assert_raises(AssertionError, + lambda: self._assert_func(np.nan, 1)) + assert_raises(AssertionError, + lambda: self._assert_func(np.nan, np.inf)) + assert_raises(AssertionError, + lambda: self._assert_func(np.inf, np.nan)) + + def test_inf_item(self): + self._assert_func(np.inf, np.inf) + self._assert_func(-np.inf, -np.inf) + assert_raises(AssertionError, + lambda: self._assert_func(np.inf, 1)) + assert_raises(AssertionError, + lambda: self._assert_func(-np.inf, np.inf)) + + def test_simple_item(self): + self._test_not_equal(1, 2) + + def test_complex_item(self): + self._assert_func(complex(1, 2), complex(1, 2)) + self._assert_func(complex(1, np.nan), complex(1, np.nan)) + self._assert_func(complex(np.inf, np.nan), complex(np.inf, np.nan)) + self._test_not_equal(complex(1, np.nan), complex(1, 2)) + self._test_not_equal(complex(np.nan, 1), complex(1, np.nan)) + self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2)) + + def test_complex(self): + x = np.array([complex(1, 2), complex(1, np.nan)]) + z = np.array([complex(1, 2), complex(np.nan, 1)]) + y = np.array([complex(1, 2), complex(1, 2)]) + self._assert_func(x, x) + self._test_not_equal(x, y) + self._test_not_equal(x, z) + + def test_error_message(self): + """Check the message is formatted correctly for the decimal value. + Also check the message when input includes inf or nan (gh12200)""" + x = np.array([1.00000000001, 2.00000000002, 3.00003]) + y = np.array([1.00000000002, 2.00000000003, 3.00004]) + + # Test with a different amount of decimal digits + with pytest.raises(AssertionError) as exc_info: + self._assert_func(x, y, decimal=12) + msgs = str(exc_info.value).split('\n') + assert_equal(msgs[3], 'Mismatch: 100%') + assert_equal(msgs[4], 'Max absolute difference: 1.e-05') + assert_equal(msgs[5], 'Max relative difference: 3.33328889e-06') + assert_equal( + msgs[6], + ' x: array([1.00000000001, 2.00000000002, 3.00003 ])') + assert_equal( + msgs[7], + ' y: array([1.00000000002, 2.00000000003, 3.00004 ])') + + # With the default value of decimal digits, only the 3rd element + # differs. Note that we only check for the formatting of the arrays + # themselves. + with pytest.raises(AssertionError) as exc_info: + self._assert_func(x, y) + msgs = str(exc_info.value).split('\n') + assert_equal(msgs[3], 'Mismatch: 33.3%') + assert_equal(msgs[4], 'Max absolute difference: 1.e-05') + assert_equal(msgs[5], 'Max relative difference: 3.33328889e-06') + assert_equal(msgs[6], ' x: array([1. , 2. , 3.00003])') + assert_equal(msgs[7], ' y: array([1. , 2. , 3.00004])') + + # Check the error message when input includes inf + x = np.array([np.inf, 0]) + y = np.array([np.inf, 1]) + with pytest.raises(AssertionError) as exc_info: + self._assert_func(x, y) + msgs = str(exc_info.value).split('\n') + assert_equal(msgs[3], 'Mismatch: 50%') + assert_equal(msgs[4], 'Max absolute difference: 1.') + assert_equal(msgs[5], 'Max relative difference: 1.') + assert_equal(msgs[6], ' x: array([inf, 0.])') + assert_equal(msgs[7], ' y: array([inf, 1.])') + + # Check the error message when dividing by zero + x = np.array([1, 2]) + y = np.array([0, 0]) + with pytest.raises(AssertionError) as exc_info: + self._assert_func(x, y) + msgs = str(exc_info.value).split('\n') + assert_equal(msgs[3], 'Mismatch: 100%') + assert_equal(msgs[4], 'Max absolute difference: 2') + assert_equal(msgs[5], 'Max relative difference: inf') + + def test_subclass_that_cannot_be_bool(self): + # While we cannot guarantee testing functions will always work for + # subclasses, the tests should ideally rely only on subclasses having + # comparison operators, not on them being able to store booleans + # (which, e.g., astropy Quantity cannot usefully do). See gh-8452. + class MyArray(np.ndarray): + def __eq__(self, other): + return super(MyArray, self).__eq__(other).view(np.ndarray) + + def __lt__(self, other): + return super(MyArray, self).__lt__(other).view(np.ndarray) + + def all(self, *args, **kwargs): + raise NotImplementedError + + a = np.array([1., 2.]).view(MyArray) + self._assert_func(a, a) + + +class TestApproxEqual(object): + + def setup(self): + self._assert_func = assert_approx_equal + + def test_simple_arrays(self): + x = np.array([1234.22]) + y = np.array([1234.23]) + + self._assert_func(x, y, significant=5) + self._assert_func(x, y, significant=6) + assert_raises(AssertionError, + lambda: self._assert_func(x, y, significant=7)) + + def test_simple_items(self): + x = 1234.22 + y = 1234.23 + + self._assert_func(x, y, significant=4) + self._assert_func(x, y, significant=5) + self._assert_func(x, y, significant=6) + assert_raises(AssertionError, + lambda: self._assert_func(x, y, significant=7)) + + def test_nan_array(self): + anan = np.array(np.nan) + aone = np.array(1) + ainf = np.array(np.inf) + self._assert_func(anan, anan) + assert_raises(AssertionError, lambda: self._assert_func(anan, aone)) + assert_raises(AssertionError, lambda: self._assert_func(anan, ainf)) + assert_raises(AssertionError, lambda: self._assert_func(ainf, anan)) + + def test_nan_items(self): + anan = np.array(np.nan) + aone = np.array(1) + ainf = np.array(np.inf) + self._assert_func(anan, anan) + assert_raises(AssertionError, lambda: self._assert_func(anan, aone)) + assert_raises(AssertionError, lambda: self._assert_func(anan, ainf)) + assert_raises(AssertionError, lambda: self._assert_func(ainf, anan)) + + +class TestArrayAssertLess(object): + + def setup(self): + self._assert_func = assert_array_less + + def test_simple_arrays(self): + x = np.array([1.1, 2.2]) + y = np.array([1.2, 2.3]) + + self._assert_func(x, y) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + y = np.array([1.0, 2.3]) + + assert_raises(AssertionError, lambda: self._assert_func(x, y)) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + def test_rank2(self): + x = np.array([[1.1, 2.2], [3.3, 4.4]]) + y = np.array([[1.2, 2.3], [3.4, 4.5]]) + + self._assert_func(x, y) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + y = np.array([[1.0, 2.3], [3.4, 4.5]]) + + assert_raises(AssertionError, lambda: self._assert_func(x, y)) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + def test_rank3(self): + x = np.ones(shape=(2, 2, 2)) + y = np.ones(shape=(2, 2, 2))+1 + + self._assert_func(x, y) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + y[0, 0, 0] = 0 + + assert_raises(AssertionError, lambda: self._assert_func(x, y)) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + def test_simple_items(self): + x = 1.1 + y = 2.2 + + self._assert_func(x, y) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + y = np.array([2.2, 3.3]) + + self._assert_func(x, y) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + y = np.array([1.0, 3.3]) + + assert_raises(AssertionError, lambda: self._assert_func(x, y)) + + def test_nan_noncompare(self): + anan = np.array(np.nan) + aone = np.array(1) + ainf = np.array(np.inf) + self._assert_func(anan, anan) + assert_raises(AssertionError, lambda: self._assert_func(aone, anan)) + assert_raises(AssertionError, lambda: self._assert_func(anan, aone)) + assert_raises(AssertionError, lambda: self._assert_func(anan, ainf)) + assert_raises(AssertionError, lambda: self._assert_func(ainf, anan)) + + def test_nan_noncompare_array(self): + x = np.array([1.1, 2.2, 3.3]) + anan = np.array(np.nan) + + assert_raises(AssertionError, lambda: self._assert_func(x, anan)) + assert_raises(AssertionError, lambda: self._assert_func(anan, x)) + + x = np.array([1.1, 2.2, np.nan]) + + assert_raises(AssertionError, lambda: self._assert_func(x, anan)) + assert_raises(AssertionError, lambda: self._assert_func(anan, x)) + + y = np.array([1.0, 2.0, np.nan]) + + self._assert_func(y, x) + assert_raises(AssertionError, lambda: self._assert_func(x, y)) + + def test_inf_compare(self): + aone = np.array(1) + ainf = np.array(np.inf) + + self._assert_func(aone, ainf) + self._assert_func(-ainf, aone) + self._assert_func(-ainf, ainf) + assert_raises(AssertionError, lambda: self._assert_func(ainf, aone)) + assert_raises(AssertionError, lambda: self._assert_func(aone, -ainf)) + assert_raises(AssertionError, lambda: self._assert_func(ainf, ainf)) + assert_raises(AssertionError, lambda: self._assert_func(ainf, -ainf)) + assert_raises(AssertionError, lambda: self._assert_func(-ainf, -ainf)) + + def test_inf_compare_array(self): + x = np.array([1.1, 2.2, np.inf]) + ainf = np.array(np.inf) + + assert_raises(AssertionError, lambda: self._assert_func(x, ainf)) + assert_raises(AssertionError, lambda: self._assert_func(ainf, x)) + assert_raises(AssertionError, lambda: self._assert_func(x, -ainf)) + assert_raises(AssertionError, lambda: self._assert_func(-x, -ainf)) + assert_raises(AssertionError, lambda: self._assert_func(-ainf, -x)) + self._assert_func(-ainf, x) + + +@pytest.mark.skip(reason="The raises decorator depends on Nose") +class TestRaises(object): + + def setup(self): + class MyException(Exception): + pass + + self.e = MyException + + def raises_exception(self, e): + raise e + + def does_not_raise_exception(self): + pass + + def test_correct_catch(self): + raises(self.e)(self.raises_exception)(self.e) # raises? + + def test_wrong_exception(self): + try: + raises(self.e)(self.raises_exception)(RuntimeError) # raises? + except RuntimeError: + return + else: + raise AssertionError("should have caught RuntimeError") + + def test_catch_no_raise(self): + try: + raises(self.e)(self.does_not_raise_exception)() # raises? + except AssertionError: + return + else: + raise AssertionError("should have raised an AssertionError") + + +class TestWarns(object): + + def test_warn(self): + def f(): + warnings.warn("yo") + return 3 + + before_filters = sys.modules['warnings'].filters[:] + assert_equal(assert_warns(UserWarning, f), 3) + after_filters = sys.modules['warnings'].filters + + assert_raises(AssertionError, assert_no_warnings, f) + assert_equal(assert_no_warnings(lambda x: x, 1), 1) + + # Check that the warnings state is unchanged + assert_equal(before_filters, after_filters, + "assert_warns does not preserver warnings state") + + def test_context_manager(self): + + before_filters = sys.modules['warnings'].filters[:] + with assert_warns(UserWarning): + warnings.warn("yo") + after_filters = sys.modules['warnings'].filters + + def no_warnings(): + with assert_no_warnings(): + warnings.warn("yo") + + assert_raises(AssertionError, no_warnings) + assert_equal(before_filters, after_filters, + "assert_warns does not preserver warnings state") + + def test_warn_wrong_warning(self): + def f(): + warnings.warn("yo", DeprecationWarning) + + failed = False + with warnings.catch_warnings(): + warnings.simplefilter("error", DeprecationWarning) + try: + # Should raise a DeprecationWarning + assert_warns(UserWarning, f) + failed = True + except DeprecationWarning: + pass + + if failed: + raise AssertionError("wrong warning caught by assert_warn") + + +class TestAssertAllclose(object): + + def test_simple(self): + x = 1e-3 + y = 1e-9 + + assert_allclose(x, y, atol=1) + assert_raises(AssertionError, assert_allclose, x, y) + + a = np.array([x, y, x, y]) + b = np.array([x, y, x, x]) + + assert_allclose(a, b, atol=1) + assert_raises(AssertionError, assert_allclose, a, b) + + b[-1] = y * (1 + 1e-8) + assert_allclose(a, b) + assert_raises(AssertionError, assert_allclose, a, b, rtol=1e-9) + + assert_allclose(6, 10, rtol=0.5) + assert_raises(AssertionError, assert_allclose, 10, 6, rtol=0.5) + + def test_min_int(self): + a = np.array([np.iinfo(np.int_).min], dtype=np.int_) + # Should not raise: + assert_allclose(a, a) + + def test_report_fail_percentage(self): + a = np.array([1, 1, 1, 1]) + b = np.array([1, 1, 1, 2]) + + with pytest.raises(AssertionError) as exc_info: + assert_allclose(a, b) + msg = str(exc_info.value) + assert_('Mismatch: 25%\nMax absolute difference: 1\n' + 'Max relative difference: 0.5' in msg) + + def test_equal_nan(self): + a = np.array([np.nan]) + b = np.array([np.nan]) + # Should not raise: + assert_allclose(a, b, equal_nan=True) + + def test_not_equal_nan(self): + a = np.array([np.nan]) + b = np.array([np.nan]) + assert_raises(AssertionError, assert_allclose, a, b, equal_nan=False) + + def test_equal_nan_default(self): + # Make sure equal_nan default behavior remains unchanged. (All + # of these functions use assert_array_compare under the hood.) + # None of these should raise. + a = np.array([np.nan]) + b = np.array([np.nan]) + assert_array_equal(a, b) + assert_array_almost_equal(a, b) + assert_array_less(a, b) + assert_allclose(a, b) + + +class TestArrayAlmostEqualNulp(object): + + def test_float64_pass(self): + # The number of units of least precision + # In this case, use a few places above the lowest level (ie nulp=1) + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float64) + x = 10**x + x = np.r_[-x, x] + + # Addition + eps = np.finfo(x.dtype).eps + y = x + x*eps*nulp/2. + assert_array_almost_equal_nulp(x, y, nulp) + + # Subtraction + epsneg = np.finfo(x.dtype).epsneg + y = x - x*epsneg*nulp/2. + assert_array_almost_equal_nulp(x, y, nulp) + + def test_float64_fail(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float64) + x = 10**x + x = np.r_[-x, x] + + eps = np.finfo(x.dtype).eps + y = x + x*eps*nulp*2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + x, y, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x*epsneg*nulp*2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + x, y, nulp) + + def test_float32_pass(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float32) + x = 10**x + x = np.r_[-x, x] + + eps = np.finfo(x.dtype).eps + y = x + x*eps*nulp/2. + assert_array_almost_equal_nulp(x, y, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x*epsneg*nulp/2. + assert_array_almost_equal_nulp(x, y, nulp) + + def test_float32_fail(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float32) + x = 10**x + x = np.r_[-x, x] + + eps = np.finfo(x.dtype).eps + y = x + x*eps*nulp*2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + x, y, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x*epsneg*nulp*2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + x, y, nulp) + + def test_float16_pass(self): + nulp = 5 + x = np.linspace(-4, 4, 10, dtype=np.float16) + x = 10**x + x = np.r_[-x, x] + + eps = np.finfo(x.dtype).eps + y = x + x*eps*nulp/2. + assert_array_almost_equal_nulp(x, y, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x*epsneg*nulp/2. + assert_array_almost_equal_nulp(x, y, nulp) + + def test_float16_fail(self): + nulp = 5 + x = np.linspace(-4, 4, 10, dtype=np.float16) + x = 10**x + x = np.r_[-x, x] + + eps = np.finfo(x.dtype).eps + y = x + x*eps*nulp*2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + x, y, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x*epsneg*nulp*2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + x, y, nulp) + + def test_complex128_pass(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float64) + x = 10**x + x = np.r_[-x, x] + xi = x + x*1j + + eps = np.finfo(x.dtype).eps + y = x + x*eps*nulp/2. + assert_array_almost_equal_nulp(xi, x + y*1j, nulp) + assert_array_almost_equal_nulp(xi, y + x*1j, nulp) + # The test condition needs to be at least a factor of sqrt(2) smaller + # because the real and imaginary parts both change + y = x + x*eps*nulp/4. + assert_array_almost_equal_nulp(xi, y + y*1j, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x*epsneg*nulp/2. + assert_array_almost_equal_nulp(xi, x + y*1j, nulp) + assert_array_almost_equal_nulp(xi, y + x*1j, nulp) + y = x - x*epsneg*nulp/4. + assert_array_almost_equal_nulp(xi, y + y*1j, nulp) + + def test_complex128_fail(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float64) + x = 10**x + x = np.r_[-x, x] + xi = x + x*1j + + eps = np.finfo(x.dtype).eps + y = x + x*eps*nulp*2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, x + y*1j, nulp) + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + x*1j, nulp) + # The test condition needs to be at least a factor of sqrt(2) smaller + # because the real and imaginary parts both change + y = x + x*eps*nulp + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + y*1j, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x*epsneg*nulp*2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, x + y*1j, nulp) + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + x*1j, nulp) + y = x - x*epsneg*nulp + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + y*1j, nulp) + + def test_complex64_pass(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float32) + x = 10**x + x = np.r_[-x, x] + xi = x + x*1j + + eps = np.finfo(x.dtype).eps + y = x + x*eps*nulp/2. + assert_array_almost_equal_nulp(xi, x + y*1j, nulp) + assert_array_almost_equal_nulp(xi, y + x*1j, nulp) + y = x + x*eps*nulp/4. + assert_array_almost_equal_nulp(xi, y + y*1j, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x*epsneg*nulp/2. + assert_array_almost_equal_nulp(xi, x + y*1j, nulp) + assert_array_almost_equal_nulp(xi, y + x*1j, nulp) + y = x - x*epsneg*nulp/4. + assert_array_almost_equal_nulp(xi, y + y*1j, nulp) + + def test_complex64_fail(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float32) + x = 10**x + x = np.r_[-x, x] + xi = x + x*1j + + eps = np.finfo(x.dtype).eps + y = x + x*eps*nulp*2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, x + y*1j, nulp) + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + x*1j, nulp) + y = x + x*eps*nulp + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + y*1j, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x*epsneg*nulp*2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, x + y*1j, nulp) + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + x*1j, nulp) + y = x - x*epsneg*nulp + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + y*1j, nulp) + + +class TestULP(object): + + def test_equal(self): + x = np.random.randn(10) + assert_array_max_ulp(x, x, maxulp=0) + + def test_single(self): + # Generate 1 + small deviation, check that adding eps gives a few UNL + x = np.ones(10).astype(np.float32) + x += 0.01 * np.random.randn(10).astype(np.float32) + eps = np.finfo(np.float32).eps + assert_array_max_ulp(x, x+eps, maxulp=20) + + def test_double(self): + # Generate 1 + small deviation, check that adding eps gives a few UNL + x = np.ones(10).astype(np.float64) + x += 0.01 * np.random.randn(10).astype(np.float64) + eps = np.finfo(np.float64).eps + assert_array_max_ulp(x, x+eps, maxulp=200) + + def test_inf(self): + for dt in [np.float32, np.float64]: + inf = np.array([np.inf]).astype(dt) + big = np.array([np.finfo(dt).max]) + assert_array_max_ulp(inf, big, maxulp=200) + + def test_nan(self): + # Test that nan is 'far' from small, tiny, inf, max and min + for dt in [np.float32, np.float64]: + if dt == np.float32: + maxulp = 1e6 + else: + maxulp = 1e12 + inf = np.array([np.inf]).astype(dt) + nan = np.array([np.nan]).astype(dt) + big = np.array([np.finfo(dt).max]) + tiny = np.array([np.finfo(dt).tiny]) + zero = np.array([np.PZERO]).astype(dt) + nzero = np.array([np.NZERO]).astype(dt) + assert_raises(AssertionError, + lambda: assert_array_max_ulp(nan, inf, + maxulp=maxulp)) + assert_raises(AssertionError, + lambda: assert_array_max_ulp(nan, big, + maxulp=maxulp)) + assert_raises(AssertionError, + lambda: assert_array_max_ulp(nan, tiny, + maxulp=maxulp)) + assert_raises(AssertionError, + lambda: assert_array_max_ulp(nan, zero, + maxulp=maxulp)) + assert_raises(AssertionError, + lambda: assert_array_max_ulp(nan, nzero, + maxulp=maxulp)) + + +class TestStringEqual(object): + def test_simple(self): + assert_string_equal("hello", "hello") + assert_string_equal("hello\nmultiline", "hello\nmultiline") + + with pytest.raises(AssertionError) as exc_info: + assert_string_equal("foo\nbar", "hello\nbar") + msg = str(exc_info.value) + assert_equal(msg, "Differences in strings:\n- foo\n+ hello") + + assert_raises(AssertionError, + lambda: assert_string_equal("foo", "hello")) + + def test_regex(self): + assert_string_equal("a+*b", "a+*b") + + assert_raises(AssertionError, + lambda: assert_string_equal("aaa", "a+b")) + + +def assert_warn_len_equal(mod, n_in_context, py34=None, py37=None): + try: + mod_warns = mod.__warningregistry__ + except AttributeError: + # the lack of a __warningregistry__ + # attribute means that no warning has + # occurred; this can be triggered in + # a parallel test scenario, while in + # a serial test scenario an initial + # warning (and therefore the attribute) + # are always created first + mod_warns = {} + + num_warns = len(mod_warns) + # Python 3.4 appears to clear any pre-existing warnings of the same type, + # when raising warnings inside a catch_warnings block. So, there is a + # warning generated by the tests within the context manager, but no + # previous warnings. + if 'version' in mod_warns: + # Python 3 adds a 'version' entry to the registry, + # do not count it. + num_warns -= 1 + + # Behavior of warnings is Python version dependent. Adjust the + # expected result to compensate. In particular, Python 3.7 does + # not make an entry for ignored warnings. + if sys.version_info[:2] >= (3, 7): + if py37 is not None: + n_in_context = py37 + elif sys.version_info[:2] >= (3, 4): + if py34 is not None: + n_in_context = py34 + assert_equal(num_warns, n_in_context) + +def test_warn_len_equal_call_scenarios(): + # assert_warn_len_equal is called under + # varying circumstances depending on serial + # vs. parallel test scenarios; this test + # simply aims to probe both code paths and + # check that no assertion is uncaught + + # parallel scenario -- no warning issued yet + class mod(object): + pass + + mod_inst = mod() + + assert_warn_len_equal(mod=mod_inst, + n_in_context=0) + + # serial test scenario -- the __warningregistry__ + # attribute should be present + class mod(object): + def __init__(self): + self.__warningregistry__ = {'warning1':1, + 'warning2':2} + + mod_inst = mod() + assert_warn_len_equal(mod=mod_inst, + n_in_context=2) + + +def _get_fresh_mod(): + # Get this module, with warning registry empty + my_mod = sys.modules[__name__] + try: + my_mod.__warningregistry__.clear() + except AttributeError: + # will not have a __warningregistry__ unless warning has been + # raised in the module at some point + pass + return my_mod + + +def test_clear_and_catch_warnings(): + # Initial state of module, no warnings + my_mod = _get_fresh_mod() + assert_equal(getattr(my_mod, '__warningregistry__', {}), {}) + with clear_and_catch_warnings(modules=[my_mod]): + warnings.simplefilter('ignore') + warnings.warn('Some warning') + assert_equal(my_mod.__warningregistry__, {}) + # Without specified modules, don't clear warnings during context + # Python 3.7 catch_warnings doesn't make an entry for 'ignore'. + with clear_and_catch_warnings(): + warnings.simplefilter('ignore') + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 1, py37=0) + # Confirm that specifying module keeps old warning, does not add new + with clear_and_catch_warnings(modules=[my_mod]): + warnings.simplefilter('ignore') + warnings.warn('Another warning') + assert_warn_len_equal(my_mod, 1, py37=0) + # Another warning, no module spec does add to warnings dict, except on + # Python 3.4 (see comments in `assert_warn_len_equal`) + # Python 3.7 catch_warnings doesn't make an entry for 'ignore'. + with clear_and_catch_warnings(): + warnings.simplefilter('ignore') + warnings.warn('Another warning') + assert_warn_len_equal(my_mod, 2, py34=1, py37=0) + + +def test_suppress_warnings_module(): + # Initial state of module, no warnings + my_mod = _get_fresh_mod() + assert_equal(getattr(my_mod, '__warningregistry__', {}), {}) + + def warn_other_module(): + # Apply along axis is implemented in python; stacklevel=2 means + # we end up inside its module, not ours. + def warn(arr): + warnings.warn("Some warning 2", stacklevel=2) + return arr + np.apply_along_axis(warn, 0, [0]) + + # Test module based warning suppression: + assert_warn_len_equal(my_mod, 0) + with suppress_warnings() as sup: + sup.record(UserWarning) + # suppress warning from other module (may have .pyc ending), + # if apply_along_axis is moved, had to be changed. + sup.filter(module=np.lib.shape_base) + warnings.warn("Some warning") + warn_other_module() + # Check that the suppression did test the file correctly (this module + # got filtered) + assert_equal(len(sup.log), 1) + assert_equal(sup.log[0].message.args[0], "Some warning") + assert_warn_len_equal(my_mod, 0, py37=0) + sup = suppress_warnings() + # Will have to be changed if apply_along_axis is moved: + sup.filter(module=my_mod) + with sup: + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + # And test repeat works: + sup.filter(module=my_mod) + with sup: + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + + # Without specified modules, don't clear warnings during context + # Python 3.7 does not add ignored warnings. + with suppress_warnings(): + warnings.simplefilter('ignore') + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 1, py37=0) + +def test_suppress_warnings_type(): + # Initial state of module, no warnings + my_mod = _get_fresh_mod() + assert_equal(getattr(my_mod, '__warningregistry__', {}), {}) + + # Test module based warning suppression: + with suppress_warnings() as sup: + sup.filter(UserWarning) + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + sup = suppress_warnings() + sup.filter(UserWarning) + with sup: + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + # And test repeat works: + sup.filter(module=my_mod) + with sup: + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + + # Without specified modules, don't clear warnings during context + # Python 3.7 does not add ignored warnings. + with suppress_warnings(): + warnings.simplefilter('ignore') + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 1, py37=0) + + +def test_suppress_warnings_decorate_no_record(): + sup = suppress_warnings() + sup.filter(UserWarning) + + @sup + def warn(category): + warnings.warn('Some warning', category) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + warn(UserWarning) # should be supppressed + warn(RuntimeWarning) + assert_equal(len(w), 1) + + +def test_suppress_warnings_record(): + sup = suppress_warnings() + log1 = sup.record() + + with sup: + log2 = sup.record(message='Some other warning 2') + sup.filter(message='Some warning') + warnings.warn('Some warning') + warnings.warn('Some other warning') + warnings.warn('Some other warning 2') + + assert_equal(len(sup.log), 2) + assert_equal(len(log1), 1) + assert_equal(len(log2),1) + assert_equal(log2[0].message.args[0], 'Some other warning 2') + + # Do it again, with the same context to see if some warnings survived: + with sup: + log2 = sup.record(message='Some other warning 2') + sup.filter(message='Some warning') + warnings.warn('Some warning') + warnings.warn('Some other warning') + warnings.warn('Some other warning 2') + + assert_equal(len(sup.log), 2) + assert_equal(len(log1), 1) + assert_equal(len(log2), 1) + assert_equal(log2[0].message.args[0], 'Some other warning 2') + + # Test nested: + with suppress_warnings() as sup: + sup.record() + with suppress_warnings() as sup2: + sup2.record(message='Some warning') + warnings.warn('Some warning') + warnings.warn('Some other warning') + assert_equal(len(sup2.log), 1) + assert_equal(len(sup.log), 1) + + +def test_suppress_warnings_forwarding(): + def warn_other_module(): + # Apply along axis is implemented in python; stacklevel=2 means + # we end up inside its module, not ours. + def warn(arr): + warnings.warn("Some warning", stacklevel=2) + return arr + np.apply_along_axis(warn, 0, [0]) + + with suppress_warnings() as sup: + sup.record() + with suppress_warnings("always"): + for i in range(2): + warnings.warn("Some warning") + + assert_equal(len(sup.log), 2) + + with suppress_warnings() as sup: + sup.record() + with suppress_warnings("location"): + for i in range(2): + warnings.warn("Some warning") + warnings.warn("Some warning") + + assert_equal(len(sup.log), 2) + + with suppress_warnings() as sup: + sup.record() + with suppress_warnings("module"): + for i in range(2): + warnings.warn("Some warning") + warnings.warn("Some warning") + warn_other_module() + + assert_equal(len(sup.log), 2) + + with suppress_warnings() as sup: + sup.record() + with suppress_warnings("once"): + for i in range(2): + warnings.warn("Some warning") + warnings.warn("Some other warning") + warn_other_module() + + assert_equal(len(sup.log), 2) + + +def test_tempdir(): + with tempdir() as tdir: + fpath = os.path.join(tdir, 'tmp') + with open(fpath, 'w'): + pass + assert_(not os.path.isdir(tdir)) + + raised = False + try: + with tempdir() as tdir: + raise ValueError() + except ValueError: + raised = True + assert_(raised) + assert_(not os.path.isdir(tdir)) + + +def test_temppath(): + with temppath() as fpath: + with open(fpath, 'w'): + pass + assert_(not os.path.isfile(fpath)) + + raised = False + try: + with temppath() as fpath: + raise ValueError() + except ValueError: + raised = True + assert_(raised) + assert_(not os.path.isfile(fpath)) + + +class my_cacw(clear_and_catch_warnings): + + class_modules = (sys.modules[__name__],) + + +def test_clear_and_catch_warnings_inherit(): + # Test can subclass and add default modules + my_mod = _get_fresh_mod() + with my_cacw(): + warnings.simplefilter('ignore') + warnings.warn('Some warning') + assert_equal(my_mod.__warningregistry__, {}) + + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +class TestAssertNoGcCycles(object): + """ Test assert_no_gc_cycles """ + def test_passes(self): + def no_cycle(): + b = [] + b.append([]) + return b + + with assert_no_gc_cycles(): + no_cycle() + + assert_no_gc_cycles(no_cycle) + + def test_asserts(self): + def make_cycle(): + a = [] + a.append(a) + a.append(a) + return a + + with assert_raises(AssertionError): + with assert_no_gc_cycles(): + make_cycle() + + with assert_raises(AssertionError): + assert_no_gc_cycles(make_cycle) + + def test_fails(self): + """ + Test that in cases where the garbage cannot be collected, we raise an + error, instead of hanging forever trying to clear it. + """ + + class ReferenceCycleInDel(object): + """ + An object that not only contains a reference cycle, but creates new + cycles whenever it's garbage-collected and its __del__ runs + """ + make_cycle = True + + def __init__(self): + self.cycle = self + + def __del__(self): + # break the current cycle so that `self` can be freed + self.cycle = None + + if ReferenceCycleInDel.make_cycle: + # but create a new one so that the garbage collector has more + # work to do. + ReferenceCycleInDel() + + try: + w = weakref.ref(ReferenceCycleInDel()) + try: + with assert_raises(RuntimeError): + # this will be unable to get a baseline empty garbage + assert_no_gc_cycles(lambda: None) + except AssertionError: + # the above test is only necessary if the GC actually tried to free + # our object anyway, which python 2.7 does not. + if w() is not None: + pytest.skip("GC does not call __del__ on cyclic objects") + raise + + finally: + # make sure that we stop creating reference cycles + ReferenceCycleInDel.make_cycle = False diff --git a/project/venv/lib/python2.7/site-packages/numpy/testing/tests/test_utils.pyc b/project/venv/lib/python2.7/site-packages/numpy/testing/tests/test_utils.pyc new file mode 100644 index 0000000..e70184c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/testing/tests/test_utils.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/testing/utils.py b/project/venv/lib/python2.7/site-packages/numpy/testing/utils.py new file mode 100644 index 0000000..98f19e3 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/testing/utils.py @@ -0,0 +1,29 @@ +""" +Back compatibility utils module. It will import the appropriate +set of tools + +""" +from __future__ import division, absolute_import, print_function + +import warnings + +# 2018-04-04, numpy 1.15.0 +warnings.warn("Importing from numpy.testing.utils is deprecated " + "since 1.15.0, import from numpy.testing instead.", + ImportWarning, stacklevel=2) + +from ._private.utils import * + +__all__ = [ + 'assert_equal', 'assert_almost_equal', 'assert_approx_equal', + 'assert_array_equal', 'assert_array_less', 'assert_string_equal', + 'assert_array_almost_equal', 'assert_raises', 'build_err_msg', + 'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal', + 'raises', 'rand', 'rundocs', 'runstring', 'verbose', 'measure', + 'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex', + 'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings', + 'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings', + 'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY', + 'HAS_REFCOUNT', 'suppress_warnings', 'assert_array_compare', + '_assert_valid_refcount', '_gen_alignment_data', 'assert_no_gc_cycles' + ] diff --git a/project/venv/lib/python2.7/site-packages/numpy/testing/utils.pyc b/project/venv/lib/python2.7/site-packages/numpy/testing/utils.pyc new file mode 100644 index 0000000..5496bdc Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/testing/utils.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/tests/__init__.py b/project/venv/lib/python2.7/site-packages/numpy/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/numpy/tests/__init__.pyc b/project/venv/lib/python2.7/site-packages/numpy/tests/__init__.pyc new file mode 100644 index 0000000..e19a5c4 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/tests/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/tests/test_ctypeslib.py b/project/venv/lib/python2.7/site-packages/numpy/tests/test_ctypeslib.py new file mode 100644 index 0000000..521208c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/tests/test_ctypeslib.py @@ -0,0 +1,367 @@ +from __future__ import division, absolute_import, print_function + +import sys +import pytest +import weakref + +import numpy as np +from numpy.ctypeslib import ndpointer, load_library, as_array +from numpy.distutils.misc_util import get_shared_lib_extension +from numpy.testing import assert_, assert_array_equal, assert_raises, assert_equal + +try: + import ctypes +except ImportError: + ctypes = None +else: + cdll = None + test_cdll = None + if hasattr(sys, 'gettotalrefcount'): + try: + cdll = load_library('_multiarray_umath_d', np.core._multiarray_umath.__file__) + except OSError: + pass + try: + test_cdll = load_library('_multiarray_tests', np.core._multiarray_tests.__file__) + except OSError: + pass + if cdll is None: + cdll = load_library('_multiarray_umath', np.core._multiarray_umath.__file__) + if test_cdll is None: + test_cdll = load_library('_multiarray_tests', np.core._multiarray_tests.__file__) + + c_forward_pointer = test_cdll.forward_pointer + + +@pytest.mark.skipif(ctypes is None, + reason="ctypes not available in this python") +@pytest.mark.skipif(sys.platform == 'cygwin', + reason="Known to fail on cygwin") +class TestLoadLibrary(object): + def test_basic(self): + try: + # Should succeed + load_library('_multiarray_umath', np.core._multiarray_umath.__file__) + except ImportError as e: + msg = ("ctypes is not available on this python: skipping the test" + " (import error was: %s)" % str(e)) + print(msg) + + def test_basic2(self): + # Regression for #801: load_library with a full library name + # (including extension) does not work. + try: + try: + so = get_shared_lib_extension(is_python_ext=True) + # Should succeed + load_library('_multiarray_umath%s' % so, np.core._multiarray_umath.__file__) + except ImportError: + print("No distutils available, skipping test.") + except ImportError as e: + msg = ("ctypes is not available on this python: skipping the test" + " (import error was: %s)" % str(e)) + print(msg) + + +class TestNdpointer(object): + def test_dtype(self): + dt = np.intc + p = ndpointer(dtype=dt) + assert_(p.from_param(np.array([1], dt))) + dt = 'i4') + p = ndpointer(dtype=dt) + p.from_param(np.array([1], dt)) + assert_raises(TypeError, p.from_param, + np.array([1], dt.newbyteorder('swap'))) + dtnames = ['x', 'y'] + dtformats = [np.intc, np.float64] + dtdescr = {'names': dtnames, 'formats': dtformats} + dt = np.dtype(dtdescr) + p = ndpointer(dtype=dt) + assert_(p.from_param(np.zeros((10,), dt))) + samedt = np.dtype(dtdescr) + p = ndpointer(dtype=samedt) + assert_(p.from_param(np.zeros((10,), dt))) + dt2 = np.dtype(dtdescr, align=True) + if dt.itemsize != dt2.itemsize: + assert_raises(TypeError, p.from_param, np.zeros((10,), dt2)) + else: + assert_(p.from_param(np.zeros((10,), dt2))) + + def test_ndim(self): + p = ndpointer(ndim=0) + assert_(p.from_param(np.array(1))) + assert_raises(TypeError, p.from_param, np.array([1])) + p = ndpointer(ndim=1) + assert_raises(TypeError, p.from_param, np.array(1)) + assert_(p.from_param(np.array([1]))) + p = ndpointer(ndim=2) + assert_(p.from_param(np.array([[1]]))) + + def test_shape(self): + p = ndpointer(shape=(1, 2)) + assert_(p.from_param(np.array([[1, 2]]))) + assert_raises(TypeError, p.from_param, np.array([[1], [2]])) + p = ndpointer(shape=()) + assert_(p.from_param(np.array(1))) + + def test_flags(self): + x = np.array([[1, 2], [3, 4]], order='F') + p = ndpointer(flags='FORTRAN') + assert_(p.from_param(x)) + p = ndpointer(flags='CONTIGUOUS') + assert_raises(TypeError, p.from_param, x) + p = ndpointer(flags=x.flags.num) + assert_(p.from_param(x)) + assert_raises(TypeError, p.from_param, np.array([[1, 2], [3, 4]])) + + def test_cache(self): + assert_(ndpointer(dtype=np.float64) is ndpointer(dtype=np.float64)) + + # shapes are normalized + assert_(ndpointer(shape=2) is ndpointer(shape=(2,))) + + # 1.12 <= v < 1.16 had a bug that made these fail + assert_(ndpointer(shape=2) is not ndpointer(ndim=2)) + assert_(ndpointer(ndim=2) is not ndpointer(shape=2)) + +@pytest.mark.skipif(ctypes is None, + reason="ctypes not available on this python installation") +class TestNdpointerCFunc(object): + def test_arguments(self): + """ Test that arguments are coerced from arrays """ + c_forward_pointer.restype = ctypes.c_void_p + c_forward_pointer.argtypes = (ndpointer(ndim=2),) + + c_forward_pointer(np.zeros((2, 3))) + # too many dimensions + assert_raises( + ctypes.ArgumentError, c_forward_pointer, np.zeros((2, 3, 4))) + + @pytest.mark.parametrize( + 'dt', [ + float, + np.dtype(dict( + formats=['u2') + ct = np.ctypeslib.as_ctypes_type(dt) + assert_equal(ct, ctypes.c_uint16.__ctype_be__) + + dt = np.dtype('u2') + ct = np.ctypeslib.as_ctypes_type(dt) + assert_equal(ct, ctypes.c_uint16) + + def test_subarray(self): + dt = np.dtype((np.int32, (2, 3))) + ct = np.ctypeslib.as_ctypes_type(dt) + assert_equal(ct, 2 * (3 * ctypes.c_int32)) + + def test_structure(self): + dt = np.dtype([ + ('a', np.uint16), + ('b', np.uint32), + ]) + + ct = np.ctypeslib.as_ctypes_type(dt) + assert_(issubclass(ct, ctypes.Structure)) + assert_equal(ctypes.sizeof(ct), dt.itemsize) + assert_equal(ct._fields_, [ + ('a', ctypes.c_uint16), + ('b', ctypes.c_uint32), + ]) + + def test_structure_aligned(self): + dt = np.dtype([ + ('a', np.uint16), + ('b', np.uint32), + ], align=True) + + ct = np.ctypeslib.as_ctypes_type(dt) + assert_(issubclass(ct, ctypes.Structure)) + assert_equal(ctypes.sizeof(ct), dt.itemsize) + assert_equal(ct._fields_, [ + ('a', ctypes.c_uint16), + ('', ctypes.c_char * 2), # padding + ('b', ctypes.c_uint32), + ]) + + def test_union(self): + dt = np.dtype(dict( + names=['a', 'b'], + offsets=[0, 0], + formats=[np.uint16, np.uint32] + )) + + ct = np.ctypeslib.as_ctypes_type(dt) + assert_(issubclass(ct, ctypes.Union)) + assert_equal(ctypes.sizeof(ct), dt.itemsize) + assert_equal(ct._fields_, [ + ('a', ctypes.c_uint16), + ('b', ctypes.c_uint32), + ]) + + def test_padded_union(self): + dt = np.dtype(dict( + names=['a', 'b'], + offsets=[0, 0], + formats=[np.uint16, np.uint32], + itemsize=5, + )) + + ct = np.ctypeslib.as_ctypes_type(dt) + assert_(issubclass(ct, ctypes.Union)) + assert_equal(ctypes.sizeof(ct), dt.itemsize) + assert_equal(ct._fields_, [ + ('a', ctypes.c_uint16), + ('b', ctypes.c_uint32), + ('', ctypes.c_char * 5), # padding + ]) + + def test_overlapping(self): + dt = np.dtype(dict( + names=['a', 'b'], + offsets=[0, 2], + formats=[np.uint32, np.uint32] + )) + assert_raises(NotImplementedError, np.ctypeslib.as_ctypes_type, dt) diff --git a/project/venv/lib/python2.7/site-packages/numpy/tests/test_ctypeslib.pyc b/project/venv/lib/python2.7/site-packages/numpy/tests/test_ctypeslib.pyc new file mode 100644 index 0000000..b03965f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/tests/test_ctypeslib.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/tests/test_matlib.py b/project/venv/lib/python2.7/site-packages/numpy/tests/test_matlib.py new file mode 100644 index 0000000..38a7e39 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/tests/test_matlib.py @@ -0,0 +1,68 @@ +from __future__ import division, absolute_import, print_function + +# As we are testing matrices, we ignore its PendingDeprecationWarnings +try: + import pytest + pytestmark = pytest.mark.filterwarnings( + 'ignore:the matrix subclass is not:PendingDeprecationWarning') +except ImportError: + pass + +import numpy as np +import numpy.matlib +from numpy.testing import assert_array_equal, assert_ + +def test_empty(): + x = numpy.matlib.empty((2,)) + assert_(isinstance(x, np.matrix)) + assert_(x.shape, (1, 2)) + +def test_ones(): + assert_array_equal(numpy.matlib.ones((2, 3)), + np.matrix([[ 1., 1., 1.], + [ 1., 1., 1.]])) + + assert_array_equal(numpy.matlib.ones(2), np.matrix([[ 1., 1.]])) + +def test_zeros(): + assert_array_equal(numpy.matlib.zeros((2, 3)), + np.matrix([[ 0., 0., 0.], + [ 0., 0., 0.]])) + + assert_array_equal(numpy.matlib.zeros(2), np.matrix([[ 0., 0.]])) + +def test_identity(): + x = numpy.matlib.identity(2, dtype=int) + assert_array_equal(x, np.matrix([[1, 0], [0, 1]])) + +def test_eye(): + xc = numpy.matlib.eye(3, k=1, dtype=int) + assert_array_equal(xc, np.matrix([[ 0, 1, 0], + [ 0, 0, 1], + [ 0, 0, 0]])) + assert xc.flags.c_contiguous + assert not xc.flags.f_contiguous + + xf = numpy.matlib.eye(3, 4, dtype=int, order='F') + assert_array_equal(xf, np.matrix([[ 1, 0, 0, 0], + [ 0, 1, 0, 0], + [ 0, 0, 1, 0]])) + assert not xf.flags.c_contiguous + assert xf.flags.f_contiguous + +def test_rand(): + x = numpy.matlib.rand(3) + # check matrix type, array would have shape (3,) + assert_(x.ndim == 2) + +def test_randn(): + x = np.matlib.randn(3) + # check matrix type, array would have shape (3,) + assert_(x.ndim == 2) + +def test_repmat(): + a1 = np.arange(4) + x = numpy.matlib.repmat(a1, 2, 2) + y = np.array([[0, 1, 2, 3, 0, 1, 2, 3], + [0, 1, 2, 3, 0, 1, 2, 3]]) + assert_array_equal(x, y) diff --git a/project/venv/lib/python2.7/site-packages/numpy/tests/test_matlib.pyc b/project/venv/lib/python2.7/site-packages/numpy/tests/test_matlib.pyc new file mode 100644 index 0000000..9d1ba52 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/tests/test_matlib.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/tests/test_numpy_version.py b/project/venv/lib/python2.7/site-packages/numpy/tests/test_numpy_version.py new file mode 100644 index 0000000..7fac8fd --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/tests/test_numpy_version.py @@ -0,0 +1,19 @@ +from __future__ import division, absolute_import, print_function + +import re + +import numpy as np +from numpy.testing import assert_ + + +def test_valid_numpy_version(): + # Verify that the numpy version is a valid one (no .post suffix or other + # nonsense). See gh-6431 for an issue caused by an invalid version. + version_pattern = r"^[0-9]+\.[0-9]+\.[0-9]+(|a[0-9]|b[0-9]|rc[0-9])" + dev_suffix = r"(\.dev0\+([0-9a-f]{7}|Unknown))" + if np.version.release: + res = re.match(version_pattern, np.__version__) + else: + res = re.match(version_pattern + dev_suffix, np.__version__) + + assert_(res is not None, np.__version__) diff --git a/project/venv/lib/python2.7/site-packages/numpy/tests/test_numpy_version.pyc b/project/venv/lib/python2.7/site-packages/numpy/tests/test_numpy_version.pyc new file mode 100644 index 0000000..afeb192 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/tests/test_numpy_version.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/tests/test_public_api.py b/project/venv/lib/python2.7/site-packages/numpy/tests/test_public_api.py new file mode 100644 index 0000000..194f8ec --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/tests/test_public_api.py @@ -0,0 +1,89 @@ +from __future__ import division, absolute_import, print_function + +import sys + +import numpy as np +import pytest +try: + import ctypes +except ImportError: + ctypes = None + +def check_dir(module, module_name=None): + """Returns a mapping of all objects with the wrong __module__ attribute.""" + if module_name is None: + module_name = module.__name__ + results = {} + for name in dir(module): + item = getattr(module, name) + if (hasattr(item, '__module__') and hasattr(item, '__name__') + and item.__module__ != module_name): + results[name] = item.__module__ + '.' + item.__name__ + return results + + +@pytest.mark.skipif( + sys.version_info[0] < 3, + reason="NumPy exposes slightly different functions on Python 2") +def test_numpy_namespace(): + # None of these objects are publicly documented. + undocumented = { + 'Tester': 'numpy.testing._private.nosetester.NoseTester', + '_add_newdoc_ufunc': 'numpy.core._multiarray_umath._add_newdoc_ufunc', + 'add_docstring': 'numpy.core._multiarray_umath.add_docstring', + 'add_newdoc': 'numpy.core.function_base.add_newdoc', + 'add_newdoc_ufunc': 'numpy.core._multiarray_umath._add_newdoc_ufunc', + 'byte_bounds': 'numpy.lib.utils.byte_bounds', + 'compare_chararrays': 'numpy.core._multiarray_umath.compare_chararrays', + 'deprecate': 'numpy.lib.utils.deprecate', + 'deprecate_with_doc': 'numpy.lib.utils.', + 'disp': 'numpy.lib.function_base.disp', + 'fastCopyAndTranspose': 'numpy.core._multiarray_umath._fastCopyAndTranspose', + 'get_array_wrap': 'numpy.lib.shape_base.get_array_wrap', + 'get_include': 'numpy.lib.utils.get_include', + 'int_asbuffer': 'numpy.core._multiarray_umath.int_asbuffer', + 'mafromtxt': 'numpy.lib.npyio.mafromtxt', + 'maximum_sctype': 'numpy.core.numerictypes.maximum_sctype', + 'ndfromtxt': 'numpy.lib.npyio.ndfromtxt', + 'recfromcsv': 'numpy.lib.npyio.recfromcsv', + 'recfromtxt': 'numpy.lib.npyio.recfromtxt', + 'safe_eval': 'numpy.lib.utils.safe_eval', + 'set_string_function': 'numpy.core.arrayprint.set_string_function', + 'show_config': 'numpy.__config__.show', + 'who': 'numpy.lib.utils.who', + } + # These built-in types are re-exported by numpy. + builtins = { + 'bool': 'builtins.bool', + 'complex': 'builtins.complex', + 'float': 'builtins.float', + 'int': 'builtins.int', + 'long': 'builtins.int', + 'object': 'builtins.object', + 'str': 'builtins.str', + 'unicode': 'builtins.str', + } + whitelist = dict(undocumented, **builtins) + bad_results = check_dir(np) + # pytest gives better error messages with the builtin assert than with + # assert_equal + assert bad_results == whitelist + + +def test_numpy_linalg(): + bad_results = check_dir(np.linalg) + assert bad_results == {} + + +def test_numpy_fft(): + bad_results = check_dir(np.fft) + assert bad_results == {} + +@pytest.mark.skipif(ctypes is None, + reason="ctypes not available in this python") +def test_NPY_NO_EXPORT(): + cdll = ctypes.CDLL(np.core._multiarray_tests.__file__) + # Make sure an arbitrary NPY_NO_EXPORT function is actually hidden + f = getattr(cdll, 'test_not_exported', None) + assert f is None, ("'test_not_exported' is mistakenly exported, " + "NPY_NO_EXPORT does not work") diff --git a/project/venv/lib/python2.7/site-packages/numpy/tests/test_public_api.pyc b/project/venv/lib/python2.7/site-packages/numpy/tests/test_public_api.pyc new file mode 100644 index 0000000..c73a586 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/tests/test_public_api.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/tests/test_reloading.py b/project/venv/lib/python2.7/site-packages/numpy/tests/test_reloading.py new file mode 100644 index 0000000..a073d69 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/tests/test_reloading.py @@ -0,0 +1,38 @@ +from __future__ import division, absolute_import, print_function + +import sys + +from numpy.testing import assert_raises, assert_, assert_equal +from numpy.core.numeric import pickle + +if sys.version_info[:2] >= (3, 4): + from importlib import reload +else: + from imp import reload + +def test_numpy_reloading(): + # gh-7844. Also check that relevant globals retain their identity. + import numpy as np + import numpy._globals + + _NoValue = np._NoValue + VisibleDeprecationWarning = np.VisibleDeprecationWarning + ModuleDeprecationWarning = np.ModuleDeprecationWarning + + reload(np) + assert_(_NoValue is np._NoValue) + assert_(ModuleDeprecationWarning is np.ModuleDeprecationWarning) + assert_(VisibleDeprecationWarning is np.VisibleDeprecationWarning) + + assert_raises(RuntimeError, reload, numpy._globals) + reload(np) + assert_(_NoValue is np._NoValue) + assert_(ModuleDeprecationWarning is np.ModuleDeprecationWarning) + assert_(VisibleDeprecationWarning is np.VisibleDeprecationWarning) + +def test_novalue(): + import numpy as np + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + assert_equal(repr(np._NoValue), '') + assert_(pickle.loads(pickle.dumps(np._NoValue, + protocol=proto)) is np._NoValue) diff --git a/project/venv/lib/python2.7/site-packages/numpy/tests/test_reloading.pyc b/project/venv/lib/python2.7/site-packages/numpy/tests/test_reloading.pyc new file mode 100644 index 0000000..3c40f7a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/tests/test_reloading.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/tests/test_scripts.py b/project/venv/lib/python2.7/site-packages/numpy/tests/test_scripts.py new file mode 100644 index 0000000..e42dc25 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/tests/test_scripts.py @@ -0,0 +1,49 @@ +""" Test scripts + +Test that we can run executable scripts that have been installed with numpy. +""" +from __future__ import division, print_function, absolute_import + +import sys +import os +import pytest +from os.path import join as pathjoin, isfile, dirname +import subprocess + +import numpy as np +from numpy.compat.py3k import basestring +from numpy.testing import assert_, assert_equal + +is_inplace = isfile(pathjoin(dirname(np.__file__), '..', 'setup.py')) + + +def find_f2py_commands(): + if sys.platform == 'win32': + exe_dir = dirname(sys.executable) + if exe_dir.endswith('Scripts'): # virtualenv + return [os.path.join(exe_dir, 'f2py')] + else: + return [os.path.join(exe_dir, "Scripts", 'f2py')] + else: + # Three scripts are installed in Unix-like systems: + # 'f2py', 'f2py{major}', and 'f2py{major.minor}'. For example, + # if installed with python3.7 the scripts would be named + # 'f2py', 'f2py3', and 'f2py3.7'. + version = sys.version_info + major = str(version.major) + minor = str(version.minor) + return ['f2py', 'f2py' + major, 'f2py' + major + '.' + minor] + + +@pytest.mark.skipif(is_inplace, reason="Cannot test f2py command inplace") +@pytest.mark.xfail(reason="Test is unreliable") +@pytest.mark.parametrize('f2py_cmd', find_f2py_commands()) +def test_f2py(f2py_cmd): + # test that we can run f2py script + stdout = subprocess.check_output([f2py_cmd, '-v']) + assert_equal(stdout.strip(), b'2') + + +def test_pep338(): + stdout = subprocess.check_output([sys.executable, '-mnumpy.f2py', '-v']) + assert_equal(stdout.strip(), b'2') diff --git a/project/venv/lib/python2.7/site-packages/numpy/tests/test_scripts.pyc b/project/venv/lib/python2.7/site-packages/numpy/tests/test_scripts.pyc new file mode 100644 index 0000000..55473fa Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/tests/test_scripts.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/tests/test_warnings.py b/project/venv/lib/python2.7/site-packages/numpy/tests/test_warnings.py new file mode 100644 index 0000000..aa6f69f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/tests/test_warnings.py @@ -0,0 +1,78 @@ +""" +Tests which scan for certain occurrences in the code, they may not find +all of these occurrences but should catch almost all. +""" +from __future__ import division, absolute_import, print_function + +import sys +import pytest + +if sys.version_info >= (3, 4): + from pathlib import Path + import ast + import tokenize + import numpy + + class ParseCall(ast.NodeVisitor): + def __init__(self): + self.ls = [] + + def visit_Attribute(self, node): + ast.NodeVisitor.generic_visit(self, node) + self.ls.append(node.attr) + + def visit_Name(self, node): + self.ls.append(node.id) + + + class FindFuncs(ast.NodeVisitor): + def __init__(self, filename): + super().__init__() + self.__filename = filename + + def visit_Call(self, node): + p = ParseCall() + p.visit(node.func) + ast.NodeVisitor.generic_visit(self, node) + + if p.ls[-1] == 'simplefilter' or p.ls[-1] == 'filterwarnings': + if node.args[0].s == "ignore": + raise AssertionError( + "ignore filter should not be used; found in " + "{} on line {}".format(self.__filename, node.lineno)) + + if p.ls[-1] == 'warn' and ( + len(p.ls) == 1 or p.ls[-2] == 'warnings'): + + if "testing/tests/test_warnings.py" is self.__filename: + # This file + return + + # See if stacklevel exists: + if len(node.args) == 3: + return + args = {kw.arg for kw in node.keywords} + if "stacklevel" in args: + return + raise AssertionError( + "warnings should have an appropriate stacklevel; found in " + "{} on line {}".format(self.__filename, node.lineno)) + + + @pytest.mark.slow + def test_warning_calls(): + # combined "ignore" and stacklevel error + base = Path(numpy.__file__).parent + + for path in base.rglob("*.py"): + if base / "testing" in path.parents: + continue + if path == base / "__init__.py": + continue + if path == base / "random" / "__init__.py": + continue + # use tokenize to auto-detect encoding on systems where no + # default encoding is defined (e.g. LANG='C') + with tokenize.open(str(path)) as file: + tree = ast.parse(file.read()) + FindFuncs(path).visit(tree) diff --git a/project/venv/lib/python2.7/site-packages/numpy/tests/test_warnings.pyc b/project/venv/lib/python2.7/site-packages/numpy/tests/test_warnings.pyc new file mode 100644 index 0000000..0aa9f01 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/tests/test_warnings.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/numpy/version.py b/project/venv/lib/python2.7/site-packages/numpy/version.py new file mode 100644 index 0000000..164ddab --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/numpy/version.py @@ -0,0 +1,12 @@ + +# THIS FILE IS GENERATED FROM NUMPY SETUP.PY +# +# To compare versions robustly, use `numpy.lib.NumpyVersion` +short_version = '1.16.2' +version = '1.16.2' +full_version = '1.16.2' +git_revision = '0eeb158ead494e130a25239ac8473a06451b1072' +release = True + +if not release: + version = full_version diff --git a/project/venv/lib/python2.7/site-packages/numpy/version.pyc b/project/venv/lib/python2.7/site-packages/numpy/version.pyc new file mode 100644 index 0000000..72c358a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/version.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip-19.0.3.dist-info/INSTALLER b/project/venv/lib/python2.7/site-packages/pip-19.0.3.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip-19.0.3.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/project/venv/lib/python2.7/site-packages/pip-19.0.3.dist-info/LICENSE.txt b/project/venv/lib/python2.7/site-packages/pip-19.0.3.dist-info/LICENSE.txt new file mode 100644 index 0000000..737fec5 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip-19.0.3.dist-info/LICENSE.txt @@ -0,0 +1,20 @@ +Copyright (c) 2008-2019 The pip developers (see AUTHORS.txt file) + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/project/venv/lib/python2.7/site-packages/pip-19.0.3.dist-info/METADATA b/project/venv/lib/python2.7/site-packages/pip-19.0.3.dist-info/METADATA new file mode 100644 index 0000000..a91529c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip-19.0.3.dist-info/METADATA @@ -0,0 +1,75 @@ +Metadata-Version: 2.1 +Name: pip +Version: 19.0.3 +Summary: The PyPA recommended tool for installing Python packages. +Home-page: https://pip.pypa.io/ +Author: The pip developers +Author-email: pypa-dev@groups.google.com +License: MIT +Keywords: distutils easy_install egg setuptools wheel virtualenv +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Topic :: Software Development :: Build Tools +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Requires-Python: >=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.* + +pip - The Python Package Installer +================================== + +.. image:: https://img.shields.io/pypi/v/pip.svg + :target: https://pypi.org/project/pip/ + +.. image:: https://readthedocs.org/projects/pip/badge/?version=latest + :target: https://pip.pypa.io/en/latest + +pip is the `package installer`_ for Python. You can use pip to install packages from the `Python Package Index`_ and other indexes. + +Please take a look at our documentation for how to install and use pip: + +* `Installation`_ +* `Usage`_ +* `Release notes`_ + +If you find bugs, need help, or want to talk to the developers please use our mailing lists or chat rooms: + +* `Issue tracking`_ +* `Discourse channel`_ +* `User IRC`_ + +If you want to get involved head over to GitHub to get the source code and feel free to jump on the developer mailing lists and chat rooms: + +* `GitHub page`_ +* `Dev mailing list`_ +* `Dev IRC`_ + +Code of Conduct +--------------- + +Everyone interacting in the pip project's codebases, issue trackers, chat +rooms, and mailing lists is expected to follow the `PyPA Code of Conduct`_. + +.. _package installer: https://packaging.python.org/en/latest/current/ +.. _Python Package Index: https://pypi.org +.. _Installation: https://pip.pypa.io/en/stable/installing.html +.. _Usage: https://pip.pypa.io/en/stable/ +.. _Release notes: https://pip.pypa.io/en/stable/news.html +.. _GitHub page: https://github.com/pypa/pip +.. _Issue tracking: https://github.com/pypa/pip/issues +.. _Discourse channel: https://discuss.python.org/c/packaging +.. _Dev mailing list: https://groups.google.com/forum/#!forum/pypa-dev +.. _User IRC: https://webchat.freenode.net/?channels=%23pypa +.. _Dev IRC: https://webchat.freenode.net/?channels=%23pypa-dev +.. _PyPA Code of Conduct: https://www.pypa.io/en/latest/code-of-conduct/ + + diff --git a/project/venv/lib/python2.7/site-packages/pip-19.0.3.dist-info/RECORD b/project/venv/lib/python2.7/site-packages/pip-19.0.3.dist-info/RECORD new file mode 100644 index 0000000..ed2a76d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip-19.0.3.dist-info/RECORD @@ -0,0 +1,620 @@ +../../../bin/pip,sha256=v_IMrzIKqz_oK2daVvgaxk3H8f2qbbw_oiJmOPASl8E,282 +../../../bin/pip2,sha256=v_IMrzIKqz_oK2daVvgaxk3H8f2qbbw_oiJmOPASl8E,282 +../../../bin/pip2.7,sha256=v_IMrzIKqz_oK2daVvgaxk3H8f2qbbw_oiJmOPASl8E,282 +pip-19.0.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +pip-19.0.3.dist-info/LICENSE.txt,sha256=W6Ifuwlk-TatfRU2LR7W1JMcyMj5_y1NkRkOEJvnRDE,1090 +pip-19.0.3.dist-info/METADATA,sha256=UFkQ3jmvF9jPeZVMc6IScYpjOYGZx-05u0kqWTl8MnY,2892 +pip-19.0.3.dist-info/RECORD,, +pip-19.0.3.dist-info/WHEEL,sha256=HX-v9-noUkyUoxyZ1PMSuS7auUxDAR4VBdoYLqD0xws,110 +pip-19.0.3.dist-info/entry_points.txt,sha256=S_zfxY25QtQDVY1BiLAmOKSkkI5llzCKPLiYOSEupsY,98 +pip-19.0.3.dist-info/top_level.txt,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +pip/__init__.py,sha256=_0sfqHzmBOW_4x3-R2jxH4luuX5Ffe32B7BaisaO2XI,23 +pip/__init__.pyc,, +pip/__main__.py,sha256=L3IHqBeasELUHvwy5CT_izVEMhM12tve289qut49DvU,623 +pip/__main__.pyc,, +pip/_internal/__init__.py,sha256=b0jSFCCViGhB1RWni35_NMkH3Y-mbZrV648DGMagDjs,2869 +pip/_internal/__init__.pyc,, +pip/_internal/build_env.py,sha256=M6gja0toc36njmTGewkXMx0A-ZiaG6kL3yIw-N8Eu9M,7439 +pip/_internal/build_env.pyc,, +pip/_internal/cache.py,sha256=WfnnzjrXNi5-i1Ahy6UZWLs2LxSKAY5Sswri3z-pf68,7684 +pip/_internal/cache.pyc,, +pip/_internal/cli/__init__.py,sha256=FkHBgpxxb-_gd6r1FjnNhfMOzAUYyXoXKJ6abijfcFU,132 +pip/_internal/cli/__init__.pyc,, +pip/_internal/cli/autocompletion.py,sha256=ptvsMdGjq42pzoY4skABVF43u2xAtLJlXAulPi-A10Y,6083 +pip/_internal/cli/autocompletion.pyc,, +pip/_internal/cli/base_command.py,sha256=YymFGRVq-Z0RcOyV5YzcRDANLeV19Em4XkipwBFqkEU,12725 +pip/_internal/cli/base_command.pyc,, +pip/_internal/cli/cmdoptions.py,sha256=pZQFNv-E7S0k4IYm6MW127FfLO0RP4yKkfyfb3V_x90,23885 +pip/_internal/cli/cmdoptions.pyc,, +pip/_internal/cli/main_parser.py,sha256=ReG-nZ95-7WxZJLY1wrwknPGbECOd-zkUnHiShKr5ZY,3016 +pip/_internal/cli/main_parser.pyc,, +pip/_internal/cli/parser.py,sha256=VZKUKJPbU6I2cHPLDOikin-aCx7OvLcZ3fzYp3xytd8,9378 +pip/_internal/cli/parser.pyc,, +pip/_internal/cli/status_codes.py,sha256=F6uDG6Gj7RNKQJUDnd87QKqI16Us-t-B0wPF_4QMpWc,156 +pip/_internal/cli/status_codes.pyc,, +pip/_internal/commands/__init__.py,sha256=CQAzhVx9ViPtqLNUvAeqnKj5iWfFEcqMx5RlZWjJ30c,2251 +pip/_internal/commands/__init__.pyc,, +pip/_internal/commands/check.py,sha256=liigNVif0iz2mBfhvsajrLZT5zM5KIvgmKvhAW91EzA,1430 +pip/_internal/commands/check.pyc,, +pip/_internal/commands/completion.py,sha256=hqvCvoxsIHjysiD7olHKTqK2lzE1_lS6LWn69kN5qyI,2929 +pip/_internal/commands/completion.pyc,, +pip/_internal/commands/configuration.py,sha256=265HWuUxPggCNcIeWHA3p-LDDiRVnexwFgwmHGgWOHY,7125 +pip/_internal/commands/configuration.pyc,, +pip/_internal/commands/download.py,sha256=XPe3Kuj9iZfXwOiJq70mYVYNZD5lJCLnGT_C61cOsKw,6623 +pip/_internal/commands/download.pyc,, +pip/_internal/commands/freeze.py,sha256=VvS3G0wrm_9BH3B7Ex5msLL_1UQTtCq5G8dDI63Iemo,3259 +pip/_internal/commands/freeze.pyc,, +pip/_internal/commands/hash.py,sha256=K1JycsD-rpjqrRcL_ijacY9UKmI82pQcLYq4kCM4Pv0,1681 +pip/_internal/commands/hash.pyc,, +pip/_internal/commands/help.py,sha256=MwBhPJpW1Dt3GfJV3V8V6kgAy_pXT0jGrZJB1wCTW-E,1090 +pip/_internal/commands/help.pyc,, +pip/_internal/commands/install.py,sha256=OqLybBwThV0IRq0xwnlsENWBB9-hw8Dcv5pUPg5QtKw,22580 +pip/_internal/commands/install.pyc,, +pip/_internal/commands/list.py,sha256=cbJEvxkBlFfSjBalQrbTqb_KFR6eLMo7Mp_JXttPyQI,10150 +pip/_internal/commands/list.pyc,, +pip/_internal/commands/search.py,sha256=sLZ9icKMEEGekHvzRRZMiTd1zCFIZeDptyyU1mQCYzk,4728 +pip/_internal/commands/search.pyc,, +pip/_internal/commands/show.py,sha256=9EVh86vY0NZdlhT-wsuV-zq_MAV6qqV4S1Akn3wkUuw,6289 +pip/_internal/commands/show.pyc,, +pip/_internal/commands/uninstall.py,sha256=h0gfPF5jylDESx_IHgF6bZME7QAEOHzQHdn65GP-jrE,2963 +pip/_internal/commands/uninstall.pyc,, +pip/_internal/commands/wheel.py,sha256=7MNPZqK9WWxZC3TgzvMBH-RPRlOFLpwq927lkzUiUjI,7167 +pip/_internal/commands/wheel.pyc,, +pip/_internal/configuration.py,sha256=KMgG3ufFrUKX_QESi2cMVvFi47tl845Bg1ZkNthlWik,13243 +pip/_internal/configuration.pyc,, +pip/_internal/download.py,sha256=KoQvMd0OfeMUn-Wi_v2e99jxkue_zKkxFBHiiQDS3Z0,34696 +pip/_internal/download.pyc,, +pip/_internal/exceptions.py,sha256=bRSURPPUs2wMBb2TillETj6EBTDgpp4fWp5CcKZe3K0,9145 +pip/_internal/exceptions.pyc,, +pip/_internal/index.py,sha256=sYBuVbKkl11YqinxBIxro8_tx0GQ_5n4gbx9EpS3xN0,37840 +pip/_internal/index.pyc,, +pip/_internal/locations.py,sha256=1JWExRYwqJq6slzprUVq0u2nxTzmGem-8L1CxU0tdVs,6944 +pip/_internal/locations.pyc,, +pip/_internal/models/__init__.py,sha256=3DHUd_qxpPozfzouoqa9g9ts1Czr5qaHfFxbnxriepM,63 +pip/_internal/models/__init__.pyc,, +pip/_internal/models/candidate.py,sha256=avICbDUtLA5zIwX_Xy4z3-Qg6tf_ysZzz30sdFbVnys,1094 +pip/_internal/models/candidate.pyc,, +pip/_internal/models/format_control.py,sha256=p0L8487xgkUrGyvULKCBQcJW0uZbWeP3ZXE_eGBGfe8,2264 +pip/_internal/models/format_control.pyc,, +pip/_internal/models/index.py,sha256=K59A8-hVhBM20Xkahr4dTwP7OjkJyEqXH11UwHFVgqM,1060 +pip/_internal/models/index.pyc,, +pip/_internal/models/link.py,sha256=mQu9rcPjaRGSqsboFLAdgMRT6B6iatiiCoToNHv4zS4,4817 +pip/_internal/models/link.pyc,, +pip/_internal/operations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_internal/operations/__init__.pyc,, +pip/_internal/operations/check.py,sha256=KND1M5Bh_zMBP9hvvSovZPwolTxH3sWGiHD6hABegs8,5137 +pip/_internal/operations/check.pyc,, +pip/_internal/operations/freeze.py,sha256=To8UFKGiZIOfA87Y1S-7HVn_-cKjRYXJ4X45maMWA-c,9321 +pip/_internal/operations/freeze.pyc,, +pip/_internal/operations/prepare.py,sha256=l2SemS5Z_KYB0PQ7y2E12Yl28-rfegcmSRbqvElsQpI,16740 +pip/_internal/operations/prepare.pyc,, +pip/_internal/pep425tags.py,sha256=t0VKiMvgd1VYcTdJe4H_6_VYeCB6PMDwnqZrE69FYH0,13142 +pip/_internal/pep425tags.pyc,, +pip/_internal/pyproject.py,sha256=nBCBkD95mF2BoYTOjyfS0zccTjYPgIUWM-NB9pOBgbM,6478 +pip/_internal/pyproject.pyc,, +pip/_internal/req/__init__.py,sha256=gneiGyc-U5QXYi2XN0q9QzcQ2TK3R7vwQ4AzTmk9rIk,2343 +pip/_internal/req/__init__.pyc,, +pip/_internal/req/constructors.py,sha256=bMUEADysJNU7rnXK_k4OqpNXGMWFkE6b3JjqFULf0GU,11378 +pip/_internal/req/constructors.pyc,, +pip/_internal/req/req_file.py,sha256=Onq9xqes1R1qptlkAUOhlvGO2JZLvVvOBA7aO72YIfc,13730 +pip/_internal/req/req_file.pyc,, +pip/_internal/req/req_install.py,sha256=BXrpciRx6_Ah1OfeYHQt_8BKUgpRpgRPqOT0LobMsD8,39955 +pip/_internal/req/req_install.pyc,, +pip/_internal/req/req_set.py,sha256=dwaxSEGvtFV4G6uW1dNLbfpV9xbPBBdDbHslR1FN7jc,8064 +pip/_internal/req/req_set.pyc,, +pip/_internal/req/req_tracker.py,sha256=aAvF76NrFVc0SmOtj3Ee570i9g5yJbxv0uJsBxumbG8,2905 +pip/_internal/req/req_tracker.pyc,, +pip/_internal/req/req_uninstall.py,sha256=OaIJ6Hdo-LJ27LU2cAPWzHfbl_3iobsGzay9wDlfLpk,21458 +pip/_internal/req/req_uninstall.pyc,, +pip/_internal/resolve.py,sha256=whoi0DJIk0B-j_W6wLkosFwcMKCImanHnpZKeYd-X9U,15226 +pip/_internal/resolve.pyc,, +pip/_internal/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_internal/utils/__init__.pyc,, +pip/_internal/utils/appdirs.py,sha256=d_iHU6K5MlL2Dq82QVtZgFXpyfrrDuDwuCmiI5H9tQ0,9435 +pip/_internal/utils/appdirs.pyc,, +pip/_internal/utils/compat.py,sha256=AEVxz_VJCAVl2HoW0s3H07QXPG_xj_HfMGVws-mB9n8,8565 +pip/_internal/utils/compat.pyc,, +pip/_internal/utils/deprecation.py,sha256=MF43y-bB3N2foClmZUdfBtUsfAOz8Bdck1EzRy5RVe4,3044 +pip/_internal/utils/deprecation.pyc,, +pip/_internal/utils/encoding.py,sha256=jsXgq7MlYmX_fB9yqzC54H2SpPfQbzYfMXrx8PT15R4,1225 +pip/_internal/utils/encoding.pyc,, +pip/_internal/utils/filesystem.py,sha256=ojaIDvOFOtkpKme5se6X2N8ARmQxu8cxvaaI-NFqVtk,990 +pip/_internal/utils/filesystem.pyc,, +pip/_internal/utils/glibc.py,sha256=lxM6vJc-nUhUX3Dc1UOFlNBdjCylo-9Ta6c536uyvSA,3296 +pip/_internal/utils/glibc.pyc,, +pip/_internal/utils/hashes.py,sha256=FMYKr_y6NAalGcjOkN5dgM91vVhm3J-hCAc70SCQPO8,3569 +pip/_internal/utils/hashes.pyc,, +pip/_internal/utils/logging.py,sha256=VjAGhQKvmuN3tUplwamHGVMQfZoBefGI7GtvlQDLW2g,9719 +pip/_internal/utils/logging.pyc,, +pip/_internal/utils/misc.py,sha256=-4KcZiJ8ErnLzOZDYm6bCj-KwB-MbxJZbnTDxqT3eF4,33547 +pip/_internal/utils/misc.pyc,, +pip/_internal/utils/models.py,sha256=DQYZSRhjvSdDTAaJLLCpDtxAn1S_-v_8nlNjv4T2jwY,1042 +pip/_internal/utils/models.pyc,, +pip/_internal/utils/outdated.py,sha256=vnSpakXMU3lFiFxyX3stWzTyu2OnWGG8KA2rdOlcrBY,5974 +pip/_internal/utils/outdated.pyc,, +pip/_internal/utils/packaging.py,sha256=cDVTZVp3eR2MQX45DYlkzGyHP6zcF45ujm5oCAoA230,2785 +pip/_internal/utils/packaging.pyc,, +pip/_internal/utils/setuptools_build.py,sha256=0blfscmNJW_iZ5DcswJeDB_PbtTEjfK9RL1R1WEDW2E,278 +pip/_internal/utils/setuptools_build.pyc,, +pip/_internal/utils/temp_dir.py,sha256=0Xq5ZlOd2OOeHwKM6hGy66gnMGAbyhio7DtjLHd7DFg,5339 +pip/_internal/utils/temp_dir.pyc,, +pip/_internal/utils/typing.py,sha256=ztYtZAcqjCYDwP-WlF6EiAAskAsZBMMXtuqvfgZIlgQ,1139 +pip/_internal/utils/typing.pyc,, +pip/_internal/utils/ui.py,sha256=l4CEswlh8fWvISW4-RUtlXtw2hFvko08OZBYCWBTxSw,14256 +pip/_internal/utils/ui.pyc,, +pip/_internal/vcs/__init__.py,sha256=O1rQ5XoDr4r38kKktwuCL3SNU2h0WGjB-lVHjPuY-pw,17278 +pip/_internal/vcs/__init__.pyc,, +pip/_internal/vcs/bazaar.py,sha256=AqsBYeXjl5Zw8IaoIVI8WStDE6_UqZ1RTfvVH5qZkG4,3670 +pip/_internal/vcs/bazaar.pyc,, +pip/_internal/vcs/git.py,sha256=zO-_jOa7baD_Y6y_zDFQVhYSvc1jgnDEA307y9LATAA,13407 +pip/_internal/vcs/git.pyc,, +pip/_internal/vcs/mercurial.py,sha256=aAxoCGfLjHcxZtN7FSvFL28MwLOUL0dZzUssZ0IU__g,3447 +pip/_internal/vcs/mercurial.pyc,, +pip/_internal/vcs/subversion.py,sha256=hxFLX0Ncdth7dY7excIdFo6UGQrjuZ6KIeIL3jqr-3o,7081 +pip/_internal/vcs/subversion.pyc,, +pip/_internal/wheel.py,sha256=110d-8C4sg_RmZHw-bVAPvHiAiF9TAhZJXo9tvN1PIk,41001 +pip/_internal/wheel.pyc,, +pip/_vendor/__init__.py,sha256=vsMCQHIwFuzqN63uGhBNE0zimx6rlZl3SC-m7YHmjG0,4779 +pip/_vendor/__init__.pyc,, +pip/_vendor/appdirs.py,sha256=BENKsvcA08IpccD9345-rMrg3aXWFA1q6BFEglnHg6I,24547 +pip/_vendor/appdirs.pyc,, +pip/_vendor/cachecontrol/__init__.py,sha256=6cRPchVqkAkeUtYTSW8qCetjSqJo-GxP-n4VMVDbvmc,302 +pip/_vendor/cachecontrol/__init__.pyc,, +pip/_vendor/cachecontrol/_cmd.py,sha256=URGE0KrA87QekCG3SGPatlSPT571dZTDjNa-ZXX3pDc,1295 +pip/_vendor/cachecontrol/_cmd.pyc,, +pip/_vendor/cachecontrol/adapter.py,sha256=eBGAtVNRZgtl_Kj5JV54miqL9YND-D0JZPahwY8kFtY,4863 +pip/_vendor/cachecontrol/adapter.pyc,, +pip/_vendor/cachecontrol/cache.py,sha256=1fc4wJP8HYt1ycnJXeEw5pCpeBL2Cqxx6g9Fb0AYDWQ,805 +pip/_vendor/cachecontrol/cache.pyc,, +pip/_vendor/cachecontrol/caches/__init__.py,sha256=-gHNKYvaeD0kOk5M74eOrsSgIKUtC6i6GfbmugGweEo,86 +pip/_vendor/cachecontrol/caches/__init__.pyc,, +pip/_vendor/cachecontrol/caches/file_cache.py,sha256=8vrSzzGcdfEfICago1uSFbkumNJMGLbCdEkXsmUIExw,4177 +pip/_vendor/cachecontrol/caches/file_cache.pyc,, +pip/_vendor/cachecontrol/caches/redis_cache.py,sha256=HxelMpNCo-dYr2fiJDwM3hhhRmxUYtB5tXm1GpAAT4Y,856 +pip/_vendor/cachecontrol/caches/redis_cache.pyc,, +pip/_vendor/cachecontrol/compat.py,sha256=kHNvMRdt6s_Xwqq_9qJmr9ou3wYMOMUMxPPcwNxT8Mc,695 +pip/_vendor/cachecontrol/compat.pyc,, +pip/_vendor/cachecontrol/controller.py,sha256=U7g-YwizQ2O5NRgK_MZreF1ntM4E49C3PuF3od-Vwz4,13698 +pip/_vendor/cachecontrol/controller.pyc,, +pip/_vendor/cachecontrol/filewrapper.py,sha256=vACKO8Llzu_ZWyjV1Fxn1MA4TGU60N5N3GSrAFdAY2Q,2533 +pip/_vendor/cachecontrol/filewrapper.pyc,, +pip/_vendor/cachecontrol/heuristics.py,sha256=BFGHJ3yQcxvZizfo90LLZ04T_Z5XSCXvFotrp7Us0sc,4070 +pip/_vendor/cachecontrol/heuristics.pyc,, +pip/_vendor/cachecontrol/serialize.py,sha256=GebE34fgToyWwAsRPguh8hEPN6CqoG-5hRMXRsjVABQ,6954 +pip/_vendor/cachecontrol/serialize.pyc,, +pip/_vendor/cachecontrol/wrapper.py,sha256=sfr9YHWx-5TwNz1H5rT6QOo8ggII6v3vbEDjQFwR6wc,671 +pip/_vendor/cachecontrol/wrapper.pyc,, +pip/_vendor/certifi/__init__.py,sha256=timLpLv3BNcGLLEz2s6gyA34hOhMb4AIPBz0zxOxna8,52 +pip/_vendor/certifi/__init__.pyc,, +pip/_vendor/certifi/__main__.py,sha256=NaCn6WtWME-zzVWQ2j4zFyl8cY4knDa9CwtHNIeFPhM,53 +pip/_vendor/certifi/__main__.pyc,, +pip/_vendor/certifi/cacert.pem,sha256=zGy4Y1gu9Zy-6CGvg3apdC7kXMm3f1ELolJwNDnBRv0,275834 +pip/_vendor/certifi/core.py,sha256=K_VfM6GwSemTFisUgFWyk__w1m9pCGFRF5zTzO5bGv0,288 +pip/_vendor/certifi/core.pyc,, +pip/_vendor/chardet/__init__.py,sha256=YsP5wQlsHJ2auF1RZJfypiSrCA7_bQiRm3ES_NI76-Y,1559 +pip/_vendor/chardet/__init__.pyc,, +pip/_vendor/chardet/big5freq.py,sha256=D_zK5GyzoVsRes0HkLJziltFQX0bKCLOrFe9_xDvO_8,31254 +pip/_vendor/chardet/big5freq.pyc,, +pip/_vendor/chardet/big5prober.py,sha256=kBxHbdetBpPe7xrlb-e990iot64g_eGSLd32lB7_h3M,1757 +pip/_vendor/chardet/big5prober.pyc,, +pip/_vendor/chardet/chardistribution.py,sha256=3woWS62KrGooKyqz4zQSnjFbJpa6V7g02daAibTwcl8,9411 +pip/_vendor/chardet/chardistribution.pyc,, +pip/_vendor/chardet/charsetgroupprober.py,sha256=6bDu8YIiRuScX4ca9Igb0U69TA2PGXXDej6Cc4_9kO4,3787 +pip/_vendor/chardet/charsetgroupprober.pyc,, +pip/_vendor/chardet/charsetprober.py,sha256=KSmwJErjypyj0bRZmC5F5eM7c8YQgLYIjZXintZNstg,5110 +pip/_vendor/chardet/charsetprober.pyc,, +pip/_vendor/chardet/cli/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1 +pip/_vendor/chardet/cli/__init__.pyc,, +pip/_vendor/chardet/cli/chardetect.py,sha256=DI8dlV3FBD0c0XA_y3sQ78z754DUv1J8n34RtDjOXNw,2774 +pip/_vendor/chardet/cli/chardetect.pyc,, +pip/_vendor/chardet/codingstatemachine.py,sha256=VYp_6cyyki5sHgXDSZnXW4q1oelHc3cu9AyQTX7uug8,3590 +pip/_vendor/chardet/codingstatemachine.pyc,, +pip/_vendor/chardet/compat.py,sha256=PKTzHkSbtbHDqS9PyujMbX74q1a8mMpeQTDVsQhZMRw,1134 +pip/_vendor/chardet/compat.pyc,, +pip/_vendor/chardet/cp949prober.py,sha256=TZ434QX8zzBsnUvL_8wm4AQVTZ2ZkqEEQL_lNw9f9ow,1855 +pip/_vendor/chardet/cp949prober.pyc,, +pip/_vendor/chardet/enums.py,sha256=Aimwdb9as1dJKZaFNUH2OhWIVBVd6ZkJJ_WK5sNY8cU,1661 +pip/_vendor/chardet/enums.pyc,, +pip/_vendor/chardet/escprober.py,sha256=kkyqVg1Yw3DIOAMJ2bdlyQgUFQhuHAW8dUGskToNWSc,3950 +pip/_vendor/chardet/escprober.pyc,, +pip/_vendor/chardet/escsm.py,sha256=RuXlgNvTIDarndvllNCk5WZBIpdCxQ0kcd9EAuxUh84,10510 +pip/_vendor/chardet/escsm.pyc,, +pip/_vendor/chardet/eucjpprober.py,sha256=iD8Jdp0ISRjgjiVN7f0e8xGeQJ5GM2oeZ1dA8nbSeUw,3749 +pip/_vendor/chardet/eucjpprober.pyc,, +pip/_vendor/chardet/euckrfreq.py,sha256=-7GdmvgWez4-eO4SuXpa7tBiDi5vRXQ8WvdFAzVaSfo,13546 +pip/_vendor/chardet/euckrfreq.pyc,, +pip/_vendor/chardet/euckrprober.py,sha256=MqFMTQXxW4HbzIpZ9lKDHB3GN8SP4yiHenTmf8g_PxY,1748 +pip/_vendor/chardet/euckrprober.pyc,, +pip/_vendor/chardet/euctwfreq.py,sha256=No1WyduFOgB5VITUA7PLyC5oJRNzRyMbBxaKI1l16MA,31621 +pip/_vendor/chardet/euctwfreq.pyc,, +pip/_vendor/chardet/euctwprober.py,sha256=13p6EP4yRaxqnP4iHtxHOJ6R2zxHq1_m8hTRjzVZ95c,1747 +pip/_vendor/chardet/euctwprober.pyc,, +pip/_vendor/chardet/gb2312freq.py,sha256=JX8lsweKLmnCwmk8UHEQsLgkr_rP_kEbvivC4qPOrlc,20715 +pip/_vendor/chardet/gb2312freq.pyc,, +pip/_vendor/chardet/gb2312prober.py,sha256=gGvIWi9WhDjE-xQXHvNIyrnLvEbMAYgyUSZ65HUfylw,1754 +pip/_vendor/chardet/gb2312prober.pyc,, +pip/_vendor/chardet/hebrewprober.py,sha256=c3SZ-K7hvyzGY6JRAZxJgwJ_sUS9k0WYkvMY00YBYFo,13838 +pip/_vendor/chardet/hebrewprober.pyc,, +pip/_vendor/chardet/jisfreq.py,sha256=vpmJv2Bu0J8gnMVRPHMFefTRvo_ha1mryLig8CBwgOg,25777 +pip/_vendor/chardet/jisfreq.pyc,, +pip/_vendor/chardet/jpcntx.py,sha256=PYlNqRUQT8LM3cT5FmHGP0iiscFlTWED92MALvBungo,19643 +pip/_vendor/chardet/jpcntx.pyc,, +pip/_vendor/chardet/langbulgarianmodel.py,sha256=1HqQS9Pbtnj1xQgxitJMvw8X6kKr5OockNCZWfEQrPE,12839 +pip/_vendor/chardet/langbulgarianmodel.pyc,, +pip/_vendor/chardet/langcyrillicmodel.py,sha256=LODajvsetH87yYDDQKA2CULXUH87tI223dhfjh9Zx9c,17948 +pip/_vendor/chardet/langcyrillicmodel.pyc,, +pip/_vendor/chardet/langgreekmodel.py,sha256=8YAW7bU8YwSJap0kIJSbPMw1BEqzGjWzqcqf0WgUKAA,12688 +pip/_vendor/chardet/langgreekmodel.pyc,, +pip/_vendor/chardet/langhebrewmodel.py,sha256=JSnqmE5E62tDLTPTvLpQsg5gOMO4PbdWRvV7Avkc0HA,11345 +pip/_vendor/chardet/langhebrewmodel.pyc,, +pip/_vendor/chardet/langhungarianmodel.py,sha256=RhapYSG5l0ZaO-VV4Fan5sW0WRGQqhwBM61yx3yxyOA,12592 +pip/_vendor/chardet/langhungarianmodel.pyc,, +pip/_vendor/chardet/langthaimodel.py,sha256=8l0173Gu_W6G8mxmQOTEF4ls2YdE7FxWf3QkSxEGXJQ,11290 +pip/_vendor/chardet/langthaimodel.pyc,, +pip/_vendor/chardet/langturkishmodel.py,sha256=W22eRNJsqI6uWAfwXSKVWWnCerYqrI8dZQTm_M0lRFk,11102 +pip/_vendor/chardet/langturkishmodel.pyc,, +pip/_vendor/chardet/latin1prober.py,sha256=S2IoORhFk39FEFOlSFWtgVybRiP6h7BlLldHVclNkU8,5370 +pip/_vendor/chardet/latin1prober.pyc,, +pip/_vendor/chardet/mbcharsetprober.py,sha256=AR95eFH9vuqSfvLQZN-L5ijea25NOBCoXqw8s5O9xLQ,3413 +pip/_vendor/chardet/mbcharsetprober.pyc,, +pip/_vendor/chardet/mbcsgroupprober.py,sha256=h6TRnnYq2OxG1WdD5JOyxcdVpn7dG0q-vB8nWr5mbh4,2012 +pip/_vendor/chardet/mbcsgroupprober.pyc,, +pip/_vendor/chardet/mbcssm.py,sha256=SY32wVIF3HzcjY3BaEspy9metbNSKxIIB0RKPn7tjpI,25481 +pip/_vendor/chardet/mbcssm.pyc,, +pip/_vendor/chardet/sbcharsetprober.py,sha256=LDSpCldDCFlYwUkGkwD2oFxLlPWIWXT09akH_2PiY74,5657 +pip/_vendor/chardet/sbcharsetprober.pyc,, +pip/_vendor/chardet/sbcsgroupprober.py,sha256=1IprcCB_k1qfmnxGC6MBbxELlKqD3scW6S8YIwdeyXA,3546 +pip/_vendor/chardet/sbcsgroupprober.pyc,, +pip/_vendor/chardet/sjisprober.py,sha256=IIt-lZj0WJqK4rmUZzKZP4GJlE8KUEtFYVuY96ek5MQ,3774 +pip/_vendor/chardet/sjisprober.pyc,, +pip/_vendor/chardet/universaldetector.py,sha256=qL0174lSZE442eB21nnktT9_VcAye07laFWUeUrjttY,12485 +pip/_vendor/chardet/universaldetector.pyc,, +pip/_vendor/chardet/utf8prober.py,sha256=IdD8v3zWOsB8OLiyPi-y_fqwipRFxV9Nc1eKBLSuIEw,2766 +pip/_vendor/chardet/utf8prober.pyc,, +pip/_vendor/chardet/version.py,sha256=sp3B08mrDXB-pf3K9fqJ_zeDHOCLC8RrngQyDFap_7g,242 +pip/_vendor/chardet/version.pyc,, +pip/_vendor/colorama/__init__.py,sha256=lJdY6COz9uM_pXwuk9oLr0fp8H8q2RrUqN16GKabvq4,239 +pip/_vendor/colorama/__init__.pyc,, +pip/_vendor/colorama/ansi.py,sha256=Fi0un-QLqRm-v7o_nKiOqyC8PapBJK7DLV_q9LKtTO0,2524 +pip/_vendor/colorama/ansi.pyc,, +pip/_vendor/colorama/ansitowin32.py,sha256=u8QaqdqS_xYSfNkPM1eRJLHz6JMWPodaJaP0mxgHCDc,10462 +pip/_vendor/colorama/ansitowin32.pyc,, +pip/_vendor/colorama/initialise.py,sha256=PprovDNxMTrvoNHFcL2NZjpH2XzDc8BLxLxiErfUl4k,1915 +pip/_vendor/colorama/initialise.pyc,, +pip/_vendor/colorama/win32.py,sha256=bJ8Il9jwaBN5BJ8bmN6FoYZ1QYuMKv2j8fGrXh7TJjw,5404 +pip/_vendor/colorama/win32.pyc,, +pip/_vendor/colorama/winterm.py,sha256=2y_2b7Zsv34feAsP67mLOVc-Bgq51mdYGo571VprlrM,6438 +pip/_vendor/colorama/winterm.pyc,, +pip/_vendor/distlib/__init__.py,sha256=7uthK6m96pTekk8hjlT-MybcwYmmxwP8gEOxXVg1f2s,581 +pip/_vendor/distlib/__init__.pyc,, +pip/_vendor/distlib/_backport/__init__.py,sha256=bqS_dTOH6uW9iGgd0uzfpPjo6vZ4xpPZ7kyfZJ2vNaw,274 +pip/_vendor/distlib/_backport/__init__.pyc,, +pip/_vendor/distlib/_backport/misc.py,sha256=KWecINdbFNOxSOP1fGF680CJnaC6S4fBRgEtaYTw0ig,971 +pip/_vendor/distlib/_backport/misc.pyc,, +pip/_vendor/distlib/_backport/shutil.py,sha256=VW1t3uYqUjWZH7jV-6QiimLhnldoV5uIpH4EuiT1jfw,25647 +pip/_vendor/distlib/_backport/shutil.pyc,, +pip/_vendor/distlib/_backport/sysconfig.cfg,sha256=swZKxq9RY5e9r3PXCrlvQPMsvOdiWZBTHLEbqS8LJLU,2617 +pip/_vendor/distlib/_backport/sysconfig.py,sha256=JdJ9ztRy4Hc-b5-VS74x3nUtdEIVr_OBvMsIb8O2sjc,26964 +pip/_vendor/distlib/_backport/sysconfig.pyc,, +pip/_vendor/distlib/_backport/tarfile.py,sha256=Ihp7rXRcjbIKw8COm9wSePV9ARGXbSF9gGXAMn2Q-KU,92628 +pip/_vendor/distlib/_backport/tarfile.pyc,, +pip/_vendor/distlib/compat.py,sha256=xdNZmqFN5HwF30HjRn5M415pcC2kgXRBXn767xS8v-M,41404 +pip/_vendor/distlib/compat.pyc,, +pip/_vendor/distlib/database.py,sha256=-KJH63AJ7hqjLtGCwOTrionhKr2Vsytdwkjyo8UdEco,51029 +pip/_vendor/distlib/database.pyc,, +pip/_vendor/distlib/index.py,sha256=Dd1kIV06XIdynNpKxHMMRRIKsXuoUsG7QIzntfVtZCI,21073 +pip/_vendor/distlib/index.pyc,, +pip/_vendor/distlib/locators.py,sha256=S9G2IsZp0RnMMbXGrT-gu7892pNpy1XMlUEuUHX3OI8,51828 +pip/_vendor/distlib/locators.pyc,, +pip/_vendor/distlib/manifest.py,sha256=nQEhYmgoreaBZzyFzwYsXxJARu3fo4EkunU163U16iE,14811 +pip/_vendor/distlib/manifest.pyc,, +pip/_vendor/distlib/markers.py,sha256=6Ac3cCfFBERexiESWIOXmg-apIP8l2esafNSX3KMy-8,4387 +pip/_vendor/distlib/markers.pyc,, +pip/_vendor/distlib/metadata.py,sha256=BNCnpRfFVslyZcosr4vnE_YbkRb3TNxXtk7TrDszJdc,40172 +pip/_vendor/distlib/metadata.pyc,, +pip/_vendor/distlib/resources.py,sha256=2FGv0ZHF14KXjLIlL0R991lyQQGcewOS4mJ-5n-JVnc,10766 +pip/_vendor/distlib/resources.pyc,, +pip/_vendor/distlib/scripts.py,sha256=NYqRJ2uuEuJwr_NNLzWH0m_s_YsobDFQb6HqxuQ2Sew,16638 +pip/_vendor/distlib/scripts.pyc,, +pip/_vendor/distlib/t32.exe,sha256=ftub1bsSPUCOnBn-eCtcarKTk0N0CBEP53BumkIxWJE,92672 +pip/_vendor/distlib/t64.exe,sha256=iChOG627LWTHY8-jzSwlo9SYU5a-0JHwQu4AqDz8I68,102400 +pip/_vendor/distlib/util.py,sha256=gwKL5geJKmtR4GeIUnoMAWjsPPG3tVP_mFxw_Sx-isc,59681 +pip/_vendor/distlib/util.pyc,, +pip/_vendor/distlib/version.py,sha256=_n7F6juvQGAcn769E_SHa7fOcf5ERlEVymJ_EjPRwGw,23391 +pip/_vendor/distlib/version.pyc,, +pip/_vendor/distlib/w32.exe,sha256=NPYPpt7PIjVqABEu1CzabbDyHHkJpuw-_qZq_48H0j0,89088 +pip/_vendor/distlib/w64.exe,sha256=Yb-qr1OQEzL8KRGTk-XHUZDwMSljfQeZnVoTk-K4e7E,99328 +pip/_vendor/distlib/wheel.py,sha256=gV53KDG7BgbxsdeKjnATbP47gTEJRNylcIeE1TFin1o,39880 +pip/_vendor/distlib/wheel.pyc,, +pip/_vendor/distro.py,sha256=dOMrjIXv-3GmEbtP-NJc057Sv19P7ZAdke-v0TBeNio,42455 +pip/_vendor/distro.pyc,, +pip/_vendor/html5lib/__init__.py,sha256=Ztrn7UvF-wIFAgRBBa0ML-Gu5AffH3BPX_INJx4SaBI,1162 +pip/_vendor/html5lib/__init__.pyc,, +pip/_vendor/html5lib/_ihatexml.py,sha256=3LBtJMlzgwM8vpQiU1TvGmEEmNH72sV0yD8yS53y07A,16705 +pip/_vendor/html5lib/_ihatexml.pyc,, +pip/_vendor/html5lib/_inputstream.py,sha256=bPUWcAfJScK4xkjQQaG_HsI2BvEVbFvI0AsodDYPQj0,32552 +pip/_vendor/html5lib/_inputstream.pyc,, +pip/_vendor/html5lib/_tokenizer.py,sha256=YAaOEBD6qc5ISq9Xt9Nif1OFgcybTTfMdwqBkZhpAq4,76580 +pip/_vendor/html5lib/_tokenizer.pyc,, +pip/_vendor/html5lib/_trie/__init__.py,sha256=8VR1bcgD2OpeS2XExpu5yBhP_Q1K-lwKbBKICBPf1kU,289 +pip/_vendor/html5lib/_trie/__init__.pyc,, +pip/_vendor/html5lib/_trie/_base.py,sha256=uJHVhzif9S0MJXgy9F98iEev5evi_rgUk5BmEbUSp8c,930 +pip/_vendor/html5lib/_trie/_base.pyc,, +pip/_vendor/html5lib/_trie/datrie.py,sha256=EQpqSfkZRuTbE-DuhW7xMdVDxdZNZ0CfmnYfHA_3zxM,1178 +pip/_vendor/html5lib/_trie/datrie.pyc,, +pip/_vendor/html5lib/_trie/py.py,sha256=wXmQLrZRf4MyWNyg0m3h81m9InhLR7GJ002mIIZh-8o,1775 +pip/_vendor/html5lib/_trie/py.pyc,, +pip/_vendor/html5lib/_utils.py,sha256=ismpASeqa2jqEPQjHUj8vReAf7yIoKnvLN5fuOw6nv0,4015 +pip/_vendor/html5lib/_utils.pyc,, +pip/_vendor/html5lib/constants.py,sha256=4lmZWLtEPRLnl8NzftOoYTJdo6jpeMtP6dqQC0g_bWQ,83518 +pip/_vendor/html5lib/constants.pyc,, +pip/_vendor/html5lib/filters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/html5lib/filters/__init__.pyc,, +pip/_vendor/html5lib/filters/alphabeticalattributes.py,sha256=lViZc2JMCclXi_5gduvmdzrRxtO5Xo9ONnbHBVCsykU,919 +pip/_vendor/html5lib/filters/alphabeticalattributes.pyc,, +pip/_vendor/html5lib/filters/base.py,sha256=z-IU9ZAYjpsVsqmVt7kuWC63jR11hDMr6CVrvuao8W0,286 +pip/_vendor/html5lib/filters/base.pyc,, +pip/_vendor/html5lib/filters/inject_meta_charset.py,sha256=egDXUEHXmAG9504xz0K6ALDgYkvUrC2q15YUVeNlVQg,2945 +pip/_vendor/html5lib/filters/inject_meta_charset.pyc,, +pip/_vendor/html5lib/filters/lint.py,sha256=jk6q56xY0ojiYfvpdP-OZSm9eTqcAdRqhCoPItemPYA,3643 +pip/_vendor/html5lib/filters/lint.pyc,, +pip/_vendor/html5lib/filters/optionaltags.py,sha256=8lWT75J0aBOHmPgfmqTHSfPpPMp01T84NKu0CRedxcE,10588 +pip/_vendor/html5lib/filters/optionaltags.pyc,, +pip/_vendor/html5lib/filters/sanitizer.py,sha256=4ON02KNjuqda1lCw5_JCUZxb0BzWR5M7ON84dtJ7dm0,26248 +pip/_vendor/html5lib/filters/sanitizer.pyc,, +pip/_vendor/html5lib/filters/whitespace.py,sha256=8eWqZxd4UC4zlFGW6iyY6f-2uuT8pOCSALc3IZt7_t4,1214 +pip/_vendor/html5lib/filters/whitespace.pyc,, +pip/_vendor/html5lib/html5parser.py,sha256=g5g2ezkusHxhi7b23vK_-d6K6BfIJRbqIQmvQ9z4EgI,118963 +pip/_vendor/html5lib/html5parser.pyc,, +pip/_vendor/html5lib/serializer.py,sha256=yfcfBHse2wDs6ojxn-kieJjLT5s1ipilQJ0gL3-rJis,15758 +pip/_vendor/html5lib/serializer.pyc,, +pip/_vendor/html5lib/treeadapters/__init__.py,sha256=A0rY5gXIe4bJOiSGRO_j_tFhngRBO8QZPzPtPw5dFzo,679 +pip/_vendor/html5lib/treeadapters/__init__.pyc,, +pip/_vendor/html5lib/treeadapters/genshi.py,sha256=CH27pAsDKmu4ZGkAUrwty7u0KauGLCZRLPMzaO3M5vo,1715 +pip/_vendor/html5lib/treeadapters/genshi.pyc,, +pip/_vendor/html5lib/treeadapters/sax.py,sha256=BKS8woQTnKiqeffHsxChUqL4q2ZR_wb5fc9MJ3zQC8s,1776 +pip/_vendor/html5lib/treeadapters/sax.pyc,, +pip/_vendor/html5lib/treebuilders/__init__.py,sha256=AysSJyvPfikCMMsTVvaxwkgDieELD5dfR8FJIAuq7hY,3592 +pip/_vendor/html5lib/treebuilders/__init__.pyc,, +pip/_vendor/html5lib/treebuilders/base.py,sha256=wQGp5yy22TNG8tJ6aREe4UUeTR7A99dEz0BXVaedWb4,14579 +pip/_vendor/html5lib/treebuilders/base.pyc,, +pip/_vendor/html5lib/treebuilders/dom.py,sha256=SY3MsijXyzdNPc8aK5IQsupBoM8J67y56DgNtGvsb9g,8835 +pip/_vendor/html5lib/treebuilders/dom.pyc,, +pip/_vendor/html5lib/treebuilders/etree.py,sha256=aqIBOGj_dFYqBURIcTegGNBhAIJOw5iFDHb4jrkYH-8,12764 +pip/_vendor/html5lib/treebuilders/etree.pyc,, +pip/_vendor/html5lib/treebuilders/etree_lxml.py,sha256=9V0dXxbJYYq-Skgb5-_OL2NkVYpjioEb4CHajo0e9yI,14122 +pip/_vendor/html5lib/treebuilders/etree_lxml.pyc,, +pip/_vendor/html5lib/treewalkers/__init__.py,sha256=yhXxHpjlSqfQyUag3v8-vWjMPriFBU8YRAPNpDgBTn8,5714 +pip/_vendor/html5lib/treewalkers/__init__.pyc,, +pip/_vendor/html5lib/treewalkers/base.py,sha256=ouiOsuSzvI0KgzdWP8PlxIaSNs9falhbiinAEc_UIJY,7476 +pip/_vendor/html5lib/treewalkers/base.pyc,, +pip/_vendor/html5lib/treewalkers/dom.py,sha256=EHyFR8D8lYNnyDU9lx_IKigVJRyecUGua0mOi7HBukc,1413 +pip/_vendor/html5lib/treewalkers/dom.pyc,, +pip/_vendor/html5lib/treewalkers/etree.py,sha256=sz1o6mmE93NQ53qJFDO7HKyDtuwgK-Ay3qSFZPC6u00,4550 +pip/_vendor/html5lib/treewalkers/etree.pyc,, +pip/_vendor/html5lib/treewalkers/etree_lxml.py,sha256=sY6wfRshWTllu6n48TPWpKsQRPp-0CQrT0hj_AdzHSU,6309 +pip/_vendor/html5lib/treewalkers/etree_lxml.pyc,, +pip/_vendor/html5lib/treewalkers/genshi.py,sha256=4D2PECZ5n3ZN3qu3jMl9yY7B81jnQApBQSVlfaIuYbA,2309 +pip/_vendor/html5lib/treewalkers/genshi.pyc,, +pip/_vendor/idna/__init__.py,sha256=9Nt7xpyet3DmOrPUGooDdAwmHZZu1qUAy2EaJ93kGiQ,58 +pip/_vendor/idna/__init__.pyc,, +pip/_vendor/idna/codec.py,sha256=lvYb7yu7PhAqFaAIAdWcwgaWI2UmgseUua-1c0AsG0A,3299 +pip/_vendor/idna/codec.pyc,, +pip/_vendor/idna/compat.py,sha256=R-h29D-6mrnJzbXxymrWUW7iZUvy-26TQwZ0ij57i4U,232 +pip/_vendor/idna/compat.pyc,, +pip/_vendor/idna/core.py,sha256=JDCZZ_PLESqIgEbU8mPyoEufWwoOiIqygA17-QZIe3s,11733 +pip/_vendor/idna/core.pyc,, +pip/_vendor/idna/idnadata.py,sha256=HXaPFw6_YAJ0qppACPu0YLAULtRs3QovRM_CCZHGdY0,40899 +pip/_vendor/idna/idnadata.pyc,, +pip/_vendor/idna/intranges.py,sha256=TY1lpxZIQWEP6tNqjZkFA5hgoMWOj1OBmnUG8ihT87E,1749 +pip/_vendor/idna/intranges.pyc,, +pip/_vendor/idna/package_data.py,sha256=kIzeKKXEouXLR4srqwf9Q3zv-NffKSOz5aSDOJARPB0,21 +pip/_vendor/idna/package_data.pyc,, +pip/_vendor/idna/uts46data.py,sha256=oLyNZ1pBaiBlj9zFzLFRd_P7J8MkRcgDisjExZR_4MY,198292 +pip/_vendor/idna/uts46data.pyc,, +pip/_vendor/ipaddress.py,sha256=2OgbkeAD2rLkcXqbcvof3J5R7lRwjNLoBySyTkBtKnc,79852 +pip/_vendor/ipaddress.pyc,, +pip/_vendor/lockfile/__init__.py,sha256=Tqpz90DwKYfhPsfzVOJl84TL87pdFE5ePNHdXAxs4Tk,9371 +pip/_vendor/lockfile/__init__.pyc,, +pip/_vendor/lockfile/linklockfile.py,sha256=C7OH3H4GdK68u4FQgp8fkP2kO4fyUTSyj3X6blgfobc,2652 +pip/_vendor/lockfile/linklockfile.pyc,, +pip/_vendor/lockfile/mkdirlockfile.py,sha256=e3qgIL-etZMLsS-3ft19iW_8IQ360HNkGOqE3yBKsUw,3096 +pip/_vendor/lockfile/mkdirlockfile.pyc,, +pip/_vendor/lockfile/pidlockfile.py,sha256=ukH9uk6NFuxyVmG5QiWw4iKq3fT7MjqUguX95avYPIY,6090 +pip/_vendor/lockfile/pidlockfile.pyc,, +pip/_vendor/lockfile/sqlitelockfile.py,sha256=o2TMkMRY0iwn-iL1XMRRIFStMUkS4i3ajceeYNntKFg,5506 +pip/_vendor/lockfile/sqlitelockfile.pyc,, +pip/_vendor/lockfile/symlinklockfile.py,sha256=ABwXXmvTHvCl5viPblShL3PG-gGsLiT1roAMfDRwhi8,2616 +pip/_vendor/lockfile/symlinklockfile.pyc,, +pip/_vendor/msgpack/__init__.py,sha256=y0bk2YbzK6J2e0J_dyreN6nD7yM2IezT6m_tU2h-Mdg,1677 +pip/_vendor/msgpack/__init__.pyc,, +pip/_vendor/msgpack/_version.py,sha256=dN7wVIjbyuQIJ35B2o6gymQNDLPlj_7-uTfgCv7KErM,20 +pip/_vendor/msgpack/_version.pyc,, +pip/_vendor/msgpack/exceptions.py,sha256=lPkAi_u12NlFajDz4FELSHEdfU8hrR3zeTvKX8aQuz4,1056 +pip/_vendor/msgpack/exceptions.pyc,, +pip/_vendor/msgpack/fallback.py,sha256=h0ll8xnq12mI9PuQ9Qd_Ihtt08Sp8L0JqhG9KY8Vyjk,36411 +pip/_vendor/msgpack/fallback.pyc,, +pip/_vendor/packaging/__about__.py,sha256=Wg0-hNgTU2_lBZcGBh5pm1R9yroQ3rv-X0rig8KjA6o,744 +pip/_vendor/packaging/__about__.pyc,, +pip/_vendor/packaging/__init__.py,sha256=6enbp5XgRfjBjsI9-bn00HjHf5TH21PDMOKkJW8xw-w,562 +pip/_vendor/packaging/__init__.pyc,, +pip/_vendor/packaging/_compat.py,sha256=Ugdm-qcneSchW25JrtMIKgUxfEEBcCAz6WrEeXeqz9o,865 +pip/_vendor/packaging/_compat.pyc,, +pip/_vendor/packaging/_structures.py,sha256=pVd90XcXRGwpZRB_qdFuVEibhCHpX_bL5zYr9-N0mc8,1416 +pip/_vendor/packaging/_structures.pyc,, +pip/_vendor/packaging/markers.py,sha256=-QjvJkhSJBxBogO9J_EpPQudHaaLV3rgVYsBDqn-ZLc,8234 +pip/_vendor/packaging/markers.pyc,, +pip/_vendor/packaging/requirements.py,sha256=grcnFU8x7KD230JaFLXtWl3VClLuOmsOy4c-m55tOWs,4700 +pip/_vendor/packaging/requirements.pyc,, +pip/_vendor/packaging/specifiers.py,sha256=0ZzQpcUnvrQ6LjR-mQRLzMr8G6hdRv-mY0VSf_amFtI,27778 +pip/_vendor/packaging/specifiers.pyc,, +pip/_vendor/packaging/utils.py,sha256=VaTC0Ei7zO2xl9ARiWmz2YFLFt89PuuhLbAlXMyAGms,1520 +pip/_vendor/packaging/utils.pyc,, +pip/_vendor/packaging/version.py,sha256=Npdwnb8OHedj_2L86yiUqscujb7w_i5gmSK1PhOAFzg,11978 +pip/_vendor/packaging/version.pyc,, +pip/_vendor/pep517/__init__.py,sha256=nOY747zTld3oTdEetBG6DWxEcZXTeOQk0aHvbR-sa5w,84 +pip/_vendor/pep517/__init__.pyc,, +pip/_vendor/pep517/_in_process.py,sha256=xMY2kLutkjCti5WqTmKOLRRL3o8Ds_k-fObFyuMv1tk,6061 +pip/_vendor/pep517/_in_process.pyc,, +pip/_vendor/pep517/build.py,sha256=-n8PT-ugS1TdqoTUY1vatDQjrLtx48K_-Quu2MuQBiA,2699 +pip/_vendor/pep517/build.pyc,, +pip/_vendor/pep517/check.py,sha256=Lu7nMdYu1JVV58fE3hv-d_avTy5h0yO9LsIzAt82Clk,5885 +pip/_vendor/pep517/check.pyc,, +pip/_vendor/pep517/colorlog.py,sha256=Tk9AuYm_cLF3BKTBoSTJt9bRryn0aFojIQOwbfVUTxQ,4098 +pip/_vendor/pep517/colorlog.pyc,, +pip/_vendor/pep517/compat.py,sha256=4SFG4QN-cNj8ebSa0wV0HUtEEQWwmbok2a0uk1gYEOM,631 +pip/_vendor/pep517/compat.pyc,, +pip/_vendor/pep517/envbuild.py,sha256=9-u4KffexPMEm52rTaIjEOxsCAd2DMByxzv5H566QLw,5763 +pip/_vendor/pep517/envbuild.pyc,, +pip/_vendor/pep517/wrappers.py,sha256=9dZn-q7F5KyQKUJMie2uKwur2FG0CLXz_kLZzkJOhZc,5912 +pip/_vendor/pep517/wrappers.pyc,, +pip/_vendor/pkg_resources/__init__.py,sha256=JGk92Be39-a8sQIltjZF-Dk9ZOIAR0lpCZ9rYrGHfVM,104648 +pip/_vendor/pkg_resources/__init__.pyc,, +pip/_vendor/pkg_resources/py31compat.py,sha256=CRk8fkiPRDLsbi5pZcKsHI__Pbmh_94L8mr9Qy9Ab2U,562 +pip/_vendor/pkg_resources/py31compat.pyc,, +pip/_vendor/progress/__init__.py,sha256=Hv3Y8Hr6RyM34NdZkrZQWMURjS2h5sONRHJSvZXWZgQ,3188 +pip/_vendor/progress/__init__.pyc,, +pip/_vendor/progress/bar.py,sha256=hlkDAEv9pRRiWqR5XL6vIAgMG4u_dBGEW_8klQhBRq0,2942 +pip/_vendor/progress/bar.pyc,, +pip/_vendor/progress/counter.py,sha256=XtBuZY4yYmr50E2A_fAzjWhm0IkwaVwxNsNVYDE7nsw,1528 +pip/_vendor/progress/counter.pyc,, +pip/_vendor/progress/helpers.py,sha256=6FsBLh_xUlKiVua-zZIutCjxth-IO8FtyUj6I2tx9fg,2952 +pip/_vendor/progress/helpers.pyc,, +pip/_vendor/progress/spinner.py,sha256=m7bASI2GUbLFG-PbAefdHtrrWWlJLFhhSBbw70gp2TY,1439 +pip/_vendor/progress/spinner.pyc,, +pip/_vendor/pyparsing.py,sha256=jh8A5pZOiogg5mR2riJEb2vlfKQ4grylOcYSmW2SU0s,243692 +pip/_vendor/pyparsing.pyc,, +pip/_vendor/pytoml/__init__.py,sha256=W_SKx36Hsew-Fty36BOpreLm4uF4V_Tgkm_z9rIoOE8,127 +pip/_vendor/pytoml/__init__.pyc,, +pip/_vendor/pytoml/core.py,sha256=9CrLLTs1PdWjEwRnYzt_i4dhHcZvGxs_GsMlYAX3iY4,509 +pip/_vendor/pytoml/core.pyc,, +pip/_vendor/pytoml/parser.py,sha256=2tDXkldqPQJhyadXzL2rGhVbjUyBNeXXhaEfncHl2iQ,10326 +pip/_vendor/pytoml/parser.pyc,, +pip/_vendor/pytoml/test.py,sha256=2nQs4aX3XQEaaQCx6x_OJTS2Hb0_IiTZRqNOeDmLCzo,1021 +pip/_vendor/pytoml/test.pyc,, +pip/_vendor/pytoml/utils.py,sha256=JCLHx77Hu1R3F-bRgiROIiKyCzLwyebnp5P35cRJxWs,1665 +pip/_vendor/pytoml/utils.pyc,, +pip/_vendor/pytoml/writer.py,sha256=WbNNQg3sh_V-s3kt88LkNNbxEq6pPDdhRE-daJzArcI,3198 +pip/_vendor/pytoml/writer.pyc,, +pip/_vendor/requests/__init__.py,sha256=ZI8kbaEzLAxsqex3MmMPr-v24d1RfZbNAOY8fUxg2Xw,4074 +pip/_vendor/requests/__init__.pyc,, +pip/_vendor/requests/__version__.py,sha256=8KG3anaNCi-PEclPPOHJ_cv1udY_L1_njVr84gRZ9HM,436 +pip/_vendor/requests/__version__.pyc,, +pip/_vendor/requests/_internal_utils.py,sha256=Zx3PnEUccyfsB-ie11nZVAW8qClJy0gx1qNME7rgT18,1096 +pip/_vendor/requests/_internal_utils.pyc,, +pip/_vendor/requests/adapters.py,sha256=e-bmKEApNVqFdylxuMJJfiaHdlmS_zhWhIMEzlHvGuc,21548 +pip/_vendor/requests/adapters.pyc,, +pip/_vendor/requests/api.py,sha256=hWZgfD7OriCZFOnpeq0bv2pbXDl8YXfxDwAcU036qDs,6253 +pip/_vendor/requests/api.pyc,, +pip/_vendor/requests/auth.py,sha256=QB2-cSUj1jrvWZfPXttsZpyAacQgtKLVk14vQW9TpSE,10206 +pip/_vendor/requests/auth.pyc,, +pip/_vendor/requests/certs.py,sha256=nXRVq9DtGmv_1AYbwjTu9UrgAcdJv05ZvkNeaoLOZxY,465 +pip/_vendor/requests/certs.pyc,, +pip/_vendor/requests/compat.py,sha256=FZX4Q_EMKiMnhZpZ3g_gOsT-j2ca9ij2gehDx1cwYeo,1941 +pip/_vendor/requests/compat.pyc,, +pip/_vendor/requests/cookies.py,sha256=Y-bKX6TvW3FnYlE6Au0SXtVVWcaNdFvuAwQxw-G0iTI,18430 +pip/_vendor/requests/cookies.pyc,, +pip/_vendor/requests/exceptions.py,sha256=-mLam3TAx80V09EaH3H-ZxR61eAVuLRZ8zgBBSLjK44,3197 +pip/_vendor/requests/exceptions.pyc,, +pip/_vendor/requests/help.py,sha256=SJPVcoXeo7KfK4AxJN5eFVQCjr0im87tU2n7ubLsksU,3578 +pip/_vendor/requests/help.pyc,, +pip/_vendor/requests/hooks.py,sha256=QReGyy0bRcr5rkwCuObNakbYsc7EkiKeBwG4qHekr2Q,757 +pip/_vendor/requests/hooks.pyc,, +pip/_vendor/requests/models.py,sha256=6s-37iAqXVptq8z7U_LoH_pbIPrCQUm_Z8QuIGE29Q0,34275 +pip/_vendor/requests/models.pyc,, +pip/_vendor/requests/packages.py,sha256=njJmVifY4aSctuW3PP5EFRCxjEwMRDO6J_feG2dKWsI,695 +pip/_vendor/requests/packages.pyc,, +pip/_vendor/requests/sessions.py,sha256=DjbCotDW6xSAaBsjbW-L8l4N0UcwmrxVNgSrZgIjGWM,29332 +pip/_vendor/requests/sessions.pyc,, +pip/_vendor/requests/status_codes.py,sha256=XWlcpBjbCtq9sSqpH9_KKxgnLTf9Z__wCWolq21ySlg,4129 +pip/_vendor/requests/status_codes.pyc,, +pip/_vendor/requests/structures.py,sha256=zoP8qly2Jak5e89HwpqjN1z2diztI-_gaqts1raJJBc,2981 +pip/_vendor/requests/structures.pyc,, +pip/_vendor/requests/utils.py,sha256=LtPJ1db6mJff2TJSJWKi7rBpzjPS3mSOrjC9zRhoD3A,30049 +pip/_vendor/requests/utils.pyc,, +pip/_vendor/retrying.py,sha256=k3fflf5_Mm0XcIJYhB7Tj34bqCCPhUDkYbx1NvW2FPE,9972 +pip/_vendor/retrying.pyc,, +pip/_vendor/six.py,sha256=h9jch2pS86y4R36pKRS3LOYUCVFNIJMRwjZ4fJDtJ44,32452 +pip/_vendor/six.pyc,, +pip/_vendor/urllib3/__init__.py,sha256=EZviRQA_iuL_94EeJHY4JAArRXbRCkAzA0HH9iXZ15s,2722 +pip/_vendor/urllib3/__init__.pyc,, +pip/_vendor/urllib3/_collections.py,sha256=-CAKsDE-WdubAjlBSZLx7b0e7WKenaNGwWvGLDEF1TM,10746 +pip/_vendor/urllib3/_collections.pyc,, +pip/_vendor/urllib3/connection.py,sha256=KLFvknLgllcMkgJ-zUsFjCzOt9P03fDoIpTPz_vqXCw,13839 +pip/_vendor/urllib3/connection.pyc,, +pip/_vendor/urllib3/connectionpool.py,sha256=rgc_3D0VsD5VDxr4KzzA8Plee0Rmerm5WKb71FcxWu8,35097 +pip/_vendor/urllib3/connectionpool.pyc,, +pip/_vendor/urllib3/contrib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/urllib3/contrib/__init__.pyc,, +pip/_vendor/urllib3/contrib/_appengine_environ.py,sha256=lhYXvB5_oGKSeurX7za3XhcGyERvNjXRQ3eJp2GmQ3M,717 +pip/_vendor/urllib3/contrib/_appengine_environ.pyc,, +pip/_vendor/urllib3/contrib/_securetransport/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/urllib3/contrib/_securetransport/__init__.pyc,, +pip/_vendor/urllib3/contrib/_securetransport/bindings.py,sha256=x2kLSh-ASZKsun0FxtraBuLVe3oHuth4YW6yZ5Vof-w,17560 +pip/_vendor/urllib3/contrib/_securetransport/bindings.pyc,, +pip/_vendor/urllib3/contrib/_securetransport/low_level.py,sha256=Umy5u-3Z957GirdapnicXVOpHaM4xdOZABJuJxfaeJA,12162 +pip/_vendor/urllib3/contrib/_securetransport/low_level.pyc,, +pip/_vendor/urllib3/contrib/appengine.py,sha256=VvDpkc5gf9dTXNxXmyG1mPdON_3DrYG_eW4uOqN98oQ,10938 +pip/_vendor/urllib3/contrib/appengine.pyc,, +pip/_vendor/urllib3/contrib/ntlmpool.py,sha256=5ZpMF7N9B6NEjVU-r-xjDOV_-hkNvsDoNc84J2yqauI,4459 +pip/_vendor/urllib3/contrib/ntlmpool.pyc,, +pip/_vendor/urllib3/contrib/pyopenssl.py,sha256=-kI_9y99Iwybv6Wy8IF8PugVl61BeMBEEqGwrDYNCuc,15823 +pip/_vendor/urllib3/contrib/pyopenssl.pyc,, +pip/_vendor/urllib3/contrib/securetransport.py,sha256=BqXSlChN9_hjCWgyN6JdcgvBUdc37QCCX4u3_8zE_9o,30309 +pip/_vendor/urllib3/contrib/securetransport.pyc,, +pip/_vendor/urllib3/contrib/socks.py,sha256=Iom0snbHkCuZbZ7Sle2Kueha1W0jYAJ0SyCOtePLaio,6391 +pip/_vendor/urllib3/contrib/socks.pyc,, +pip/_vendor/urllib3/exceptions.py,sha256=rFeIfBNKC8KJ61ux-MtJyJlEC9G9ggkmCeF751JwVR4,6604 +pip/_vendor/urllib3/exceptions.pyc,, +pip/_vendor/urllib3/fields.py,sha256=D_TE_SK15YatdbhWDMN0OE3X6UCJn1RTkANINCYOobE,5943 +pip/_vendor/urllib3/fields.pyc,, +pip/_vendor/urllib3/filepost.py,sha256=40CROlpRKVBpFUkD0R6wJf_PpvbcRQRFUu0OOQlFkKM,2436 +pip/_vendor/urllib3/filepost.pyc,, +pip/_vendor/urllib3/packages/__init__.py,sha256=nlChrGzkjCkmhCX9HrF_qHPUgosfsPQkVIJxiiLhk9g,109 +pip/_vendor/urllib3/packages/__init__.pyc,, +pip/_vendor/urllib3/packages/backports/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/urllib3/packages/backports/__init__.pyc,, +pip/_vendor/urllib3/packages/backports/makefile.py,sha256=so2z9BiNM8kh38Ve5tomQP_mp2_ubEqzdlCpLZKzzCI,1456 +pip/_vendor/urllib3/packages/backports/makefile.pyc,, +pip/_vendor/urllib3/packages/six.py,sha256=A6hdJZVjI3t_geebZ9BzUvwRrIXo0lfwzQlM2LcKyas,30098 +pip/_vendor/urllib3/packages/six.pyc,, +pip/_vendor/urllib3/packages/ssl_match_hostname/__init__.py,sha256=WBVbxQBojNAxfZwNavkox3BgJiMA9BJmm-_fwd0jD_o,688 +pip/_vendor/urllib3/packages/ssl_match_hostname/__init__.pyc,, +pip/_vendor/urllib3/packages/ssl_match_hostname/_implementation.py,sha256=E-9J-kAaUn76WMZ4PpzKUxM4C3yjY7mopOpbPIy3Dso,5700 +pip/_vendor/urllib3/packages/ssl_match_hostname/_implementation.pyc,, +pip/_vendor/urllib3/poolmanager.py,sha256=csE6Bh6L0FJ3iNOHk2z8KhMT8Eiq976b6pk8I6vrOC8,16853 +pip/_vendor/urllib3/poolmanager.pyc,, +pip/_vendor/urllib3/request.py,sha256=OfelFYzPnxGlU3amEz9uBLjCBOriwgJh4QC_aW9SF3U,5991 +pip/_vendor/urllib3/request.pyc,, +pip/_vendor/urllib3/response.py,sha256=ta1jp4B5PGBWzoAV1s48WLuHCRICQnK7F9m_kyK4Z8g,25609 +pip/_vendor/urllib3/response.pyc,, +pip/_vendor/urllib3/util/__init__.py,sha256=6Ran4oAVIy40Cu_oEPWnNV9bwF5rXx6G1DUZ7oehjPY,1044 +pip/_vendor/urllib3/util/__init__.pyc,, +pip/_vendor/urllib3/util/connection.py,sha256=-AyqcRTuNUHuo5ndtsU0Og_nMyCGATC-kYqOUdBHwIQ,4639 +pip/_vendor/urllib3/util/connection.pyc,, +pip/_vendor/urllib3/util/queue.py,sha256=myTX3JDHntglKQNBf3b6dasHH-uF-W59vzGSQiFdAfI,497 +pip/_vendor/urllib3/util/queue.pyc,, +pip/_vendor/urllib3/util/request.py,sha256=H5_lrHvtwl2U2BbT1UYN9HpruNc1gsNFlz2njQmhPrQ,3705 +pip/_vendor/urllib3/util/request.pyc,, +pip/_vendor/urllib3/util/response.py,sha256=028PNXDZhwBtnm2uXvnAHi_l9_AAGrAMH2Igh2AbgWg,2586 +pip/_vendor/urllib3/util/response.pyc,, +pip/_vendor/urllib3/util/retry.py,sha256=kFQTesNiwPp6ZeQo9VHeUO7b8qA-_l3BnErCAOEPo4Q,15105 +pip/_vendor/urllib3/util/retry.pyc,, +pip/_vendor/urllib3/util/ssl_.py,sha256=4qqBDM82bufhqqEd0b-99sObz95XmEVEXDVi5iAyCeE,13172 +pip/_vendor/urllib3/util/ssl_.pyc,, +pip/_vendor/urllib3/util/timeout.py,sha256=7lHNrgL5YH2cI1j-yZnzV_J8jBlRVdmFhQaNyM1_2b8,9757 +pip/_vendor/urllib3/util/timeout.pyc,, +pip/_vendor/urllib3/util/url.py,sha256=qCY_HHUXvo05wAsEERALgExtlgxLnAHSQ7ce1b-g3SM,6487 +pip/_vendor/urllib3/util/url.pyc,, +pip/_vendor/urllib3/util/wait.py,sha256=p4BZo_Ukp5JF0Dn6jro7cUfqIjnU6WFtuoA6poaV5Jk,5403 +pip/_vendor/urllib3/util/wait.pyc,, +pip/_vendor/webencodings/__init__.py,sha256=qOBJIuPy_4ByYH6W_bNgJF-qYQ2DoU-dKsDu5yRWCXg,10579 +pip/_vendor/webencodings/__init__.pyc,, +pip/_vendor/webencodings/labels.py,sha256=4AO_KxTddqGtrL9ns7kAPjb0CcN6xsCIxbK37HY9r3E,8979 +pip/_vendor/webencodings/labels.pyc,, +pip/_vendor/webencodings/mklabels.py,sha256=GYIeywnpaLnP0GSic8LFWgd0UVvO_l1Nc6YoF-87R_4,1305 +pip/_vendor/webencodings/mklabels.pyc,, +pip/_vendor/webencodings/tests.py,sha256=OtGLyjhNY1fvkW1GvLJ_FV9ZoqC9Anyjr7q3kxTbzNs,6563 +pip/_vendor/webencodings/tests.pyc,, +pip/_vendor/webencodings/x_user_defined.py,sha256=yOqWSdmpytGfUgh_Z6JYgDNhoc-BAHyyeeT15Fr42tM,4307 +pip/_vendor/webencodings/x_user_defined.pyc,, diff --git a/project/venv/lib/python2.7/site-packages/pip-19.0.3.dist-info/WHEEL b/project/venv/lib/python2.7/site-packages/pip-19.0.3.dist-info/WHEEL new file mode 100644 index 0000000..c8240f0 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip-19.0.3.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.33.1) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/project/venv/lib/python2.7/site-packages/pip-19.0.3.dist-info/entry_points.txt b/project/venv/lib/python2.7/site-packages/pip-19.0.3.dist-info/entry_points.txt new file mode 100644 index 0000000..f5809cb --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip-19.0.3.dist-info/entry_points.txt @@ -0,0 +1,5 @@ +[console_scripts] +pip = pip._internal:main +pip3 = pip._internal:main +pip3.7 = pip._internal:main + diff --git a/project/venv/lib/python2.7/site-packages/pip-19.0.3.dist-info/top_level.txt b/project/venv/lib/python2.7/site-packages/pip-19.0.3.dist-info/top_level.txt new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip-19.0.3.dist-info/top_level.txt @@ -0,0 +1 @@ +pip diff --git a/project/venv/lib/python2.7/site-packages/pip/__init__.py b/project/venv/lib/python2.7/site-packages/pip/__init__.py new file mode 100644 index 0000000..f48c1ca --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/__init__.py @@ -0,0 +1 @@ +__version__ = "19.0.3" diff --git a/project/venv/lib/python2.7/site-packages/pip/__init__.pyc b/project/venv/lib/python2.7/site-packages/pip/__init__.pyc new file mode 100644 index 0000000..97ee299 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/__main__.py b/project/venv/lib/python2.7/site-packages/pip/__main__.py new file mode 100644 index 0000000..0c223f8 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/__main__.py @@ -0,0 +1,19 @@ +from __future__ import absolute_import + +import os +import sys + +# If we are running from a wheel, add the wheel to sys.path +# This allows the usage python pip-*.whl/pip install pip-*.whl +if __package__ == '': + # __file__ is pip-*.whl/pip/__main__.py + # first dirname call strips of '/__main__.py', second strips off '/pip' + # Resulting path is the name of the wheel itself + # Add that to sys.path so we can import pip + path = os.path.dirname(os.path.dirname(__file__)) + sys.path.insert(0, path) + +from pip._internal import main as _main # isort:skip # noqa + +if __name__ == '__main__': + sys.exit(_main()) diff --git a/project/venv/lib/python2.7/site-packages/pip/__main__.pyc b/project/venv/lib/python2.7/site-packages/pip/__main__.pyc new file mode 100644 index 0000000..8744eb9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/__main__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/__init__.py b/project/venv/lib/python2.7/site-packages/pip/_internal/__init__.py new file mode 100644 index 0000000..276124d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/__init__.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python +from __future__ import absolute_import + +import locale +import logging +import os +import warnings + +import sys + +# 2016-06-17 barry@debian.org: urllib3 1.14 added optional support for socks, +# but if invoked (i.e. imported), it will issue a warning to stderr if socks +# isn't available. requests unconditionally imports urllib3's socks contrib +# module, triggering this warning. The warning breaks DEP-8 tests (because of +# the stderr output) and is just plain annoying in normal usage. I don't want +# to add socks as yet another dependency for pip, nor do I want to allow-stder +# in the DEP-8 tests, so just suppress the warning. pdb tells me this has to +# be done before the import of pip.vcs. +from pip._vendor.urllib3.exceptions import DependencyWarning +warnings.filterwarnings("ignore", category=DependencyWarning) # noqa + +# We want to inject the use of SecureTransport as early as possible so that any +# references or sessions or what have you are ensured to have it, however we +# only want to do this in the case that we're running on macOS and the linked +# OpenSSL is too old to handle TLSv1.2 +try: + import ssl +except ImportError: + pass +else: + # Checks for OpenSSL 1.0.1 on MacOS + if sys.platform == "darwin" and ssl.OPENSSL_VERSION_NUMBER < 0x1000100f: + try: + from pip._vendor.urllib3.contrib import securetransport + except (ImportError, OSError): + pass + else: + securetransport.inject_into_urllib3() + +from pip._internal.cli.autocompletion import autocomplete +from pip._internal.cli.main_parser import parse_command +from pip._internal.commands import commands_dict +from pip._internal.exceptions import PipError +from pip._internal.utils import deprecation +from pip._internal.vcs import git, mercurial, subversion, bazaar # noqa +from pip._vendor.urllib3.exceptions import InsecureRequestWarning + +logger = logging.getLogger(__name__) + +# Hide the InsecureRequestWarning from urllib3 +warnings.filterwarnings("ignore", category=InsecureRequestWarning) + + +def main(args=None): + if args is None: + args = sys.argv[1:] + + # Configure our deprecation warnings to be sent through loggers + deprecation.install_warning_logger() + + autocomplete() + + try: + cmd_name, cmd_args = parse_command(args) + except PipError as exc: + sys.stderr.write("ERROR: %s" % exc) + sys.stderr.write(os.linesep) + sys.exit(1) + + # Needed for locale.getpreferredencoding(False) to work + # in pip._internal.utils.encoding.auto_decode + try: + locale.setlocale(locale.LC_ALL, '') + except locale.Error as e: + # setlocale can apparently crash if locale are uninitialized + logger.debug("Ignoring error %s when setting locale", e) + command = commands_dict[cmd_name](isolated=("--isolated" in cmd_args)) + return command.main(cmd_args) diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/__init__.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/__init__.pyc new file mode 100644 index 0000000..e009574 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/build_env.py b/project/venv/lib/python2.7/site-packages/pip/_internal/build_env.py new file mode 100644 index 0000000..d744cc7 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/build_env.py @@ -0,0 +1,215 @@ +"""Build Environment used for isolation during sdist building +""" + +import logging +import os +import sys +import textwrap +from collections import OrderedDict +from distutils.sysconfig import get_python_lib +from sysconfig import get_paths + +from pip._vendor.pkg_resources import Requirement, VersionConflict, WorkingSet + +from pip import __file__ as pip_location +from pip._internal.utils.misc import call_subprocess +from pip._internal.utils.temp_dir import TempDirectory +from pip._internal.utils.typing import MYPY_CHECK_RUNNING +from pip._internal.utils.ui import open_spinner + +if MYPY_CHECK_RUNNING: + from typing import Tuple, Set, Iterable, Optional, List # noqa: F401 + from pip._internal.index import PackageFinder # noqa: F401 + +logger = logging.getLogger(__name__) + + +class _Prefix: + + def __init__(self, path): + # type: (str) -> None + self.path = path + self.setup = False + self.bin_dir = get_paths( + 'nt' if os.name == 'nt' else 'posix_prefix', + vars={'base': path, 'platbase': path} + )['scripts'] + # Note: prefer distutils' sysconfig to get the + # library paths so PyPy is correctly supported. + purelib = get_python_lib(plat_specific=False, prefix=path) + platlib = get_python_lib(plat_specific=True, prefix=path) + if purelib == platlib: + self.lib_dirs = [purelib] + else: + self.lib_dirs = [purelib, platlib] + + +class BuildEnvironment(object): + """Creates and manages an isolated environment to install build deps + """ + + def __init__(self): + # type: () -> None + self._temp_dir = TempDirectory(kind="build-env") + self._temp_dir.create() + + self._prefixes = OrderedDict(( + (name, _Prefix(os.path.join(self._temp_dir.path, name))) + for name in ('normal', 'overlay') + )) + + self._bin_dirs = [] # type: List[str] + self._lib_dirs = [] # type: List[str] + for prefix in reversed(list(self._prefixes.values())): + self._bin_dirs.append(prefix.bin_dir) + self._lib_dirs.extend(prefix.lib_dirs) + + # Customize site to: + # - ensure .pth files are honored + # - prevent access to system site packages + system_sites = { + os.path.normcase(site) for site in ( + get_python_lib(plat_specific=False), + get_python_lib(plat_specific=True), + ) + } + self._site_dir = os.path.join(self._temp_dir.path, 'site') + if not os.path.exists(self._site_dir): + os.mkdir(self._site_dir) + with open(os.path.join(self._site_dir, 'sitecustomize.py'), 'w') as fp: + fp.write(textwrap.dedent( + ''' + import os, site, sys + + # First, drop system-sites related paths. + original_sys_path = sys.path[:] + known_paths = set() + for path in {system_sites!r}: + site.addsitedir(path, known_paths=known_paths) + system_paths = set( + os.path.normcase(path) + for path in sys.path[len(original_sys_path):] + ) + original_sys_path = [ + path for path in original_sys_path + if os.path.normcase(path) not in system_paths + ] + sys.path = original_sys_path + + # Second, add lib directories. + # ensuring .pth file are processed. + for path in {lib_dirs!r}: + assert not path in sys.path + site.addsitedir(path) + ''' + ).format(system_sites=system_sites, lib_dirs=self._lib_dirs)) + + def __enter__(self): + self._save_env = { + name: os.environ.get(name, None) + for name in ('PATH', 'PYTHONNOUSERSITE', 'PYTHONPATH') + } + + path = self._bin_dirs[:] + old_path = self._save_env['PATH'] + if old_path: + path.extend(old_path.split(os.pathsep)) + + pythonpath = [self._site_dir] + + os.environ.update({ + 'PATH': os.pathsep.join(path), + 'PYTHONNOUSERSITE': '1', + 'PYTHONPATH': os.pathsep.join(pythonpath), + }) + + def __exit__(self, exc_type, exc_val, exc_tb): + for varname, old_value in self._save_env.items(): + if old_value is None: + os.environ.pop(varname, None) + else: + os.environ[varname] = old_value + + def cleanup(self): + # type: () -> None + self._temp_dir.cleanup() + + def check_requirements(self, reqs): + # type: (Iterable[str]) -> Tuple[Set[Tuple[str, str]], Set[str]] + """Return 2 sets: + - conflicting requirements: set of (installed, wanted) reqs tuples + - missing requirements: set of reqs + """ + missing = set() + conflicting = set() + if reqs: + ws = WorkingSet(self._lib_dirs) + for req in reqs: + try: + if ws.find(Requirement.parse(req)) is None: + missing.add(req) + except VersionConflict as e: + conflicting.add((str(e.args[0].as_requirement()), + str(e.args[1]))) + return conflicting, missing + + def install_requirements( + self, + finder, # type: PackageFinder + requirements, # type: Iterable[str] + prefix_as_string, # type: str + message # type: Optional[str] + ): + # type: (...) -> None + prefix = self._prefixes[prefix_as_string] + assert not prefix.setup + prefix.setup = True + if not requirements: + return + args = [ + sys.executable, os.path.dirname(pip_location), 'install', + '--ignore-installed', '--no-user', '--prefix', prefix.path, + '--no-warn-script-location', + ] # type: List[str] + if logger.getEffectiveLevel() <= logging.DEBUG: + args.append('-v') + for format_control in ('no_binary', 'only_binary'): + formats = getattr(finder.format_control, format_control) + args.extend(('--' + format_control.replace('_', '-'), + ','.join(sorted(formats or {':none:'})))) + if finder.index_urls: + args.extend(['-i', finder.index_urls[0]]) + for extra_index in finder.index_urls[1:]: + args.extend(['--extra-index-url', extra_index]) + else: + args.append('--no-index') + for link in finder.find_links: + args.extend(['--find-links', link]) + for _, host, _ in finder.secure_origins: + args.extend(['--trusted-host', host]) + if finder.allow_all_prereleases: + args.append('--pre') + args.append('--') + args.extend(requirements) + with open_spinner(message) as spinner: + call_subprocess(args, show_stdout=False, spinner=spinner) + + +class NoOpBuildEnvironment(BuildEnvironment): + """A no-op drop-in replacement for BuildEnvironment + """ + + def __init__(self): + pass + + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_val, exc_tb): + pass + + def cleanup(self): + pass + + def install_requirements(self, finder, requirements, prefix, message): + raise NotImplementedError() diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/build_env.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/build_env.pyc new file mode 100644 index 0000000..bb6d22f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/build_env.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/cache.py b/project/venv/lib/python2.7/site-packages/pip/_internal/cache.py new file mode 100644 index 0000000..eb295c4 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/cache.py @@ -0,0 +1,224 @@ +"""Cache Management +""" + +import errno +import hashlib +import logging +import os + +from pip._vendor.packaging.utils import canonicalize_name + +from pip._internal.download import path_to_url +from pip._internal.models.link import Link +from pip._internal.utils.compat import expanduser +from pip._internal.utils.temp_dir import TempDirectory +from pip._internal.utils.typing import MYPY_CHECK_RUNNING +from pip._internal.wheel import InvalidWheelFilename, Wheel + +if MYPY_CHECK_RUNNING: + from typing import Optional, Set, List, Any # noqa: F401 + from pip._internal.index import FormatControl # noqa: F401 + +logger = logging.getLogger(__name__) + + +class Cache(object): + """An abstract class - provides cache directories for data from links + + + :param cache_dir: The root of the cache. + :param format_control: An object of FormatControl class to limit + binaries being read from the cache. + :param allowed_formats: which formats of files the cache should store. + ('binary' and 'source' are the only allowed values) + """ + + def __init__(self, cache_dir, format_control, allowed_formats): + # type: (str, FormatControl, Set[str]) -> None + super(Cache, self).__init__() + self.cache_dir = expanduser(cache_dir) if cache_dir else None + self.format_control = format_control + self.allowed_formats = allowed_formats + + _valid_formats = {"source", "binary"} + assert self.allowed_formats.union(_valid_formats) == _valid_formats + + def _get_cache_path_parts(self, link): + # type: (Link) -> List[str] + """Get parts of part that must be os.path.joined with cache_dir + """ + + # We want to generate an url to use as our cache key, we don't want to + # just re-use the URL because it might have other items in the fragment + # and we don't care about those. + key_parts = [link.url_without_fragment] + if link.hash_name is not None and link.hash is not None: + key_parts.append("=".join([link.hash_name, link.hash])) + key_url = "#".join(key_parts) + + # Encode our key url with sha224, we'll use this because it has similar + # security properties to sha256, but with a shorter total output (and + # thus less secure). However the differences don't make a lot of + # difference for our use case here. + hashed = hashlib.sha224(key_url.encode()).hexdigest() + + # We want to nest the directories some to prevent having a ton of top + # level directories where we might run out of sub directories on some + # FS. + parts = [hashed[:2], hashed[2:4], hashed[4:6], hashed[6:]] + + return parts + + def _get_candidates(self, link, package_name): + # type: (Link, Optional[str]) -> List[Any] + can_not_cache = ( + not self.cache_dir or + not package_name or + not link + ) + if can_not_cache: + return [] + + canonical_name = canonicalize_name(package_name) + formats = self.format_control.get_allowed_formats( + canonical_name + ) + if not self.allowed_formats.intersection(formats): + return [] + + root = self.get_path_for_link(link) + try: + return os.listdir(root) + except OSError as err: + if err.errno in {errno.ENOENT, errno.ENOTDIR}: + return [] + raise + + def get_path_for_link(self, link): + # type: (Link) -> str + """Return a directory to store cached items in for link. + """ + raise NotImplementedError() + + def get(self, link, package_name): + # type: (Link, Optional[str]) -> Link + """Returns a link to a cached item if it exists, otherwise returns the + passed link. + """ + raise NotImplementedError() + + def _link_for_candidate(self, link, candidate): + # type: (Link, str) -> Link + root = self.get_path_for_link(link) + path = os.path.join(root, candidate) + + return Link(path_to_url(path)) + + def cleanup(self): + # type: () -> None + pass + + +class SimpleWheelCache(Cache): + """A cache of wheels for future installs. + """ + + def __init__(self, cache_dir, format_control): + # type: (str, FormatControl) -> None + super(SimpleWheelCache, self).__init__( + cache_dir, format_control, {"binary"} + ) + + def get_path_for_link(self, link): + # type: (Link) -> str + """Return a directory to store cached wheels for link + + Because there are M wheels for any one sdist, we provide a directory + to cache them in, and then consult that directory when looking up + cache hits. + + We only insert things into the cache if they have plausible version + numbers, so that we don't contaminate the cache with things that were + not unique. E.g. ./package might have dozens of installs done for it + and build a version of 0.0...and if we built and cached a wheel, we'd + end up using the same wheel even if the source has been edited. + + :param link: The link of the sdist for which this will cache wheels. + """ + parts = self._get_cache_path_parts(link) + + # Store wheels within the root cache_dir + return os.path.join(self.cache_dir, "wheels", *parts) + + def get(self, link, package_name): + # type: (Link, Optional[str]) -> Link + candidates = [] + + for wheel_name in self._get_candidates(link, package_name): + try: + wheel = Wheel(wheel_name) + except InvalidWheelFilename: + continue + if not wheel.supported(): + # Built for a different python/arch/etc + continue + candidates.append((wheel.support_index_min(), wheel_name)) + + if not candidates: + return link + + return self._link_for_candidate(link, min(candidates)[1]) + + +class EphemWheelCache(SimpleWheelCache): + """A SimpleWheelCache that creates it's own temporary cache directory + """ + + def __init__(self, format_control): + # type: (FormatControl) -> None + self._temp_dir = TempDirectory(kind="ephem-wheel-cache") + self._temp_dir.create() + + super(EphemWheelCache, self).__init__( + self._temp_dir.path, format_control + ) + + def cleanup(self): + # type: () -> None + self._temp_dir.cleanup() + + +class WheelCache(Cache): + """Wraps EphemWheelCache and SimpleWheelCache into a single Cache + + This Cache allows for gracefully degradation, using the ephem wheel cache + when a certain link is not found in the simple wheel cache first. + """ + + def __init__(self, cache_dir, format_control): + # type: (str, FormatControl) -> None + super(WheelCache, self).__init__( + cache_dir, format_control, {'binary'} + ) + self._wheel_cache = SimpleWheelCache(cache_dir, format_control) + self._ephem_cache = EphemWheelCache(format_control) + + def get_path_for_link(self, link): + # type: (Link) -> str + return self._wheel_cache.get_path_for_link(link) + + def get_ephem_path_for_link(self, link): + # type: (Link) -> str + return self._ephem_cache.get_path_for_link(link) + + def get(self, link, package_name): + # type: (Link, Optional[str]) -> Link + retval = self._wheel_cache.get(link, package_name) + if retval is link: + retval = self._ephem_cache.get(link, package_name) + return retval + + def cleanup(self): + # type: () -> None + self._wheel_cache.cleanup() + self._ephem_cache.cleanup() diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/cache.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/cache.pyc new file mode 100644 index 0000000..cd5c730 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/cache.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/cli/__init__.py b/project/venv/lib/python2.7/site-packages/pip/_internal/cli/__init__.py new file mode 100644 index 0000000..e589bb9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/cli/__init__.py @@ -0,0 +1,4 @@ +"""Subpackage containing all of pip's command line interface related code +""" + +# This file intentionally does not import submodules diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/cli/__init__.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/cli/__init__.pyc new file mode 100644 index 0000000..81b9c84 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/cli/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/cli/autocompletion.py b/project/venv/lib/python2.7/site-packages/pip/_internal/cli/autocompletion.py new file mode 100644 index 0000000..0a04199 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/cli/autocompletion.py @@ -0,0 +1,152 @@ +"""Logic that powers autocompletion installed by ``pip completion``. +""" + +import optparse +import os +import sys + +from pip._internal.cli.main_parser import create_main_parser +from pip._internal.commands import commands_dict, get_summaries +from pip._internal.utils.misc import get_installed_distributions + + +def autocomplete(): + """Entry Point for completion of main and subcommand options. + """ + # Don't complete if user hasn't sourced bash_completion file. + if 'PIP_AUTO_COMPLETE' not in os.environ: + return + cwords = os.environ['COMP_WORDS'].split()[1:] + cword = int(os.environ['COMP_CWORD']) + try: + current = cwords[cword - 1] + except IndexError: + current = '' + + subcommands = [cmd for cmd, summary in get_summaries()] + options = [] + # subcommand + try: + subcommand_name = [w for w in cwords if w in subcommands][0] + except IndexError: + subcommand_name = None + + parser = create_main_parser() + # subcommand options + if subcommand_name: + # special case: 'help' subcommand has no options + if subcommand_name == 'help': + sys.exit(1) + # special case: list locally installed dists for show and uninstall + should_list_installed = ( + subcommand_name in ['show', 'uninstall'] and + not current.startswith('-') + ) + if should_list_installed: + installed = [] + lc = current.lower() + for dist in get_installed_distributions(local_only=True): + if dist.key.startswith(lc) and dist.key not in cwords[1:]: + installed.append(dist.key) + # if there are no dists installed, fall back to option completion + if installed: + for dist in installed: + print(dist) + sys.exit(1) + + subcommand = commands_dict[subcommand_name]() + + for opt in subcommand.parser.option_list_all: + if opt.help != optparse.SUPPRESS_HELP: + for opt_str in opt._long_opts + opt._short_opts: + options.append((opt_str, opt.nargs)) + + # filter out previously specified options from available options + prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]] + options = [(x, v) for (x, v) in options if x not in prev_opts] + # filter options by current input + options = [(k, v) for k, v in options if k.startswith(current)] + # get completion type given cwords and available subcommand options + completion_type = get_path_completion_type( + cwords, cword, subcommand.parser.option_list_all, + ) + # get completion files and directories if ``completion_type`` is + # ````, ``

    `` or ```` + if completion_type: + options = auto_complete_paths(current, completion_type) + options = ((opt, 0) for opt in options) + for option in options: + opt_label = option[0] + # append '=' to options which require args + if option[1] and option[0][:2] == "--": + opt_label += '=' + print(opt_label) + else: + # show main parser options only when necessary + + opts = [i.option_list for i in parser.option_groups] + opts.append(parser.option_list) + opts = (o for it in opts for o in it) + if current.startswith('-'): + for opt in opts: + if opt.help != optparse.SUPPRESS_HELP: + subcommands += opt._long_opts + opt._short_opts + else: + # get completion type given cwords and all available options + completion_type = get_path_completion_type(cwords, cword, opts) + if completion_type: + subcommands = auto_complete_paths(current, completion_type) + + print(' '.join([x for x in subcommands if x.startswith(current)])) + sys.exit(1) + + +def get_path_completion_type(cwords, cword, opts): + """Get the type of path completion (``file``, ``dir``, ``path`` or None) + + :param cwords: same as the environmental variable ``COMP_WORDS`` + :param cword: same as the environmental variable ``COMP_CWORD`` + :param opts: The available options to check + :return: path completion type (``file``, ``dir``, ``path`` or None) + """ + if cword < 2 or not cwords[cword - 2].startswith('-'): + return + for opt in opts: + if opt.help == optparse.SUPPRESS_HELP: + continue + for o in str(opt).split('/'): + if cwords[cword - 2].split('=')[0] == o: + if not opt.metavar or any( + x in ('path', 'file', 'dir') + for x in opt.metavar.split('/')): + return opt.metavar + + +def auto_complete_paths(current, completion_type): + """If ``completion_type`` is ``file`` or ``path``, list all regular files + and directories starting with ``current``; otherwise only list directories + starting with ``current``. + + :param current: The word to be completed + :param completion_type: path completion type(`file`, `path` or `dir`)i + :return: A generator of regular files and/or directories + """ + directory, filename = os.path.split(current) + current_path = os.path.abspath(directory) + # Don't complete paths if they can't be accessed + if not os.access(current_path, os.R_OK): + return + filename = os.path.normcase(filename) + # list all files that start with ``filename`` + file_list = (x for x in os.listdir(current_path) + if os.path.normcase(x).startswith(filename)) + for f in file_list: + opt = os.path.join(current_path, f) + comp_file = os.path.normcase(os.path.join(directory, f)) + # complete regular files when there is not ```` after option + # complete directories when there is ````, ```` or + # ````after option + if completion_type != 'dir' and os.path.isfile(opt): + yield comp_file + elif os.path.isdir(opt): + yield os.path.join(comp_file, '') diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/cli/autocompletion.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/cli/autocompletion.pyc new file mode 100644 index 0000000..134a3f9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/cli/autocompletion.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/cli/base_command.py b/project/venv/lib/python2.7/site-packages/pip/_internal/cli/base_command.py new file mode 100644 index 0000000..3ceea49 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/cli/base_command.py @@ -0,0 +1,341 @@ +"""Base Command class, and related routines""" +from __future__ import absolute_import, print_function + +import logging +import logging.config +import optparse +import os +import platform +import sys +import traceback + +from pip._internal.cli import cmdoptions +from pip._internal.cli.parser import ( + ConfigOptionParser, UpdatingDefaultsHelpFormatter, +) +from pip._internal.cli.status_codes import ( + ERROR, PREVIOUS_BUILD_DIR_ERROR, SUCCESS, UNKNOWN_ERROR, + VIRTUALENV_NOT_FOUND, +) +from pip._internal.download import PipSession +from pip._internal.exceptions import ( + BadCommand, CommandError, InstallationError, PreviousBuildDirError, + UninstallationError, +) +from pip._internal.index import PackageFinder +from pip._internal.locations import running_under_virtualenv +from pip._internal.req.constructors import ( + install_req_from_editable, install_req_from_line, +) +from pip._internal.req.req_file import parse_requirements +from pip._internal.utils.deprecation import deprecated +from pip._internal.utils.logging import BrokenStdoutLoggingError, setup_logging +from pip._internal.utils.misc import ( + get_prog, normalize_path, redact_password_from_url, +) +from pip._internal.utils.outdated import pip_version_check +from pip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + from typing import Optional, List, Tuple, Any # noqa: F401 + from optparse import Values # noqa: F401 + from pip._internal.cache import WheelCache # noqa: F401 + from pip._internal.req.req_set import RequirementSet # noqa: F401 + +__all__ = ['Command'] + +logger = logging.getLogger(__name__) + + +class Command(object): + name = None # type: Optional[str] + usage = None # type: Optional[str] + hidden = False # type: bool + ignore_require_venv = False # type: bool + + def __init__(self, isolated=False): + # type: (bool) -> None + parser_kw = { + 'usage': self.usage, + 'prog': '%s %s' % (get_prog(), self.name), + 'formatter': UpdatingDefaultsHelpFormatter(), + 'add_help_option': False, + 'name': self.name, + 'description': self.__doc__, + 'isolated': isolated, + } + + self.parser = ConfigOptionParser(**parser_kw) + + # Commands should add options to this option group + optgroup_name = '%s Options' % self.name.capitalize() + self.cmd_opts = optparse.OptionGroup(self.parser, optgroup_name) + + # Add the general options + gen_opts = cmdoptions.make_option_group( + cmdoptions.general_group, + self.parser, + ) + self.parser.add_option_group(gen_opts) + + def run(self, options, args): + # type: (Values, List[Any]) -> Any + raise NotImplementedError + + def _build_session(self, options, retries=None, timeout=None): + # type: (Values, Optional[int], Optional[int]) -> PipSession + session = PipSession( + cache=( + normalize_path(os.path.join(options.cache_dir, "http")) + if options.cache_dir else None + ), + retries=retries if retries is not None else options.retries, + insecure_hosts=options.trusted_hosts, + ) + + # Handle custom ca-bundles from the user + if options.cert: + session.verify = options.cert + + # Handle SSL client certificate + if options.client_cert: + session.cert = options.client_cert + + # Handle timeouts + if options.timeout or timeout: + session.timeout = ( + timeout if timeout is not None else options.timeout + ) + + # Handle configured proxies + if options.proxy: + session.proxies = { + "http": options.proxy, + "https": options.proxy, + } + + # Determine if we can prompt the user for authentication or not + session.auth.prompting = not options.no_input + + return session + + def parse_args(self, args): + # type: (List[str]) -> Tuple + # factored out for testability + return self.parser.parse_args(args) + + def main(self, args): + # type: (List[str]) -> int + options, args = self.parse_args(args) + + # Set verbosity so that it can be used elsewhere. + self.verbosity = options.verbose - options.quiet + + level_number = setup_logging( + verbosity=self.verbosity, + no_color=options.no_color, + user_log_file=options.log, + ) + + if sys.version_info[:2] == (3, 4): + deprecated( + "Python 3.4 support has been deprecated. pip 19.1 will be the " + "last one supporting it. Please upgrade your Python as Python " + "3.4 won't be maintained after March 2019 (cf PEP 429).", + replacement=None, + gone_in='19.2', + ) + elif sys.version_info[:2] == (2, 7): + message = ( + "A future version of pip will drop support for Python 2.7." + ) + if platform.python_implementation() == "CPython": + message = ( + "Python 2.7 will reach the end of its life on January " + "1st, 2020. Please upgrade your Python as Python 2.7 " + "won't be maintained after that date. " + ) + message + deprecated(message, replacement=None, gone_in=None) + + # TODO: Try to get these passing down from the command? + # without resorting to os.environ to hold these. + # This also affects isolated builds and it should. + + if options.no_input: + os.environ['PIP_NO_INPUT'] = '1' + + if options.exists_action: + os.environ['PIP_EXISTS_ACTION'] = ' '.join(options.exists_action) + + if options.require_venv and not self.ignore_require_venv: + # If a venv is required check if it can really be found + if not running_under_virtualenv(): + logger.critical( + 'Could not find an activated virtualenv (required).' + ) + sys.exit(VIRTUALENV_NOT_FOUND) + + try: + status = self.run(options, args) + # FIXME: all commands should return an exit status + # and when it is done, isinstance is not needed anymore + if isinstance(status, int): + return status + except PreviousBuildDirError as exc: + logger.critical(str(exc)) + logger.debug('Exception information:', exc_info=True) + + return PREVIOUS_BUILD_DIR_ERROR + except (InstallationError, UninstallationError, BadCommand) as exc: + logger.critical(str(exc)) + logger.debug('Exception information:', exc_info=True) + + return ERROR + except CommandError as exc: + logger.critical('ERROR: %s', exc) + logger.debug('Exception information:', exc_info=True) + + return ERROR + except BrokenStdoutLoggingError: + # Bypass our logger and write any remaining messages to stderr + # because stdout no longer works. + print('ERROR: Pipe to stdout was broken', file=sys.stderr) + if level_number <= logging.DEBUG: + traceback.print_exc(file=sys.stderr) + + return ERROR + except KeyboardInterrupt: + logger.critical('Operation cancelled by user') + logger.debug('Exception information:', exc_info=True) + + return ERROR + except BaseException: + logger.critical('Exception:', exc_info=True) + + return UNKNOWN_ERROR + finally: + allow_version_check = ( + # Does this command have the index_group options? + hasattr(options, "no_index") and + # Is this command allowed to perform this check? + not (options.disable_pip_version_check or options.no_index) + ) + # Check if we're using the latest version of pip available + if allow_version_check: + session = self._build_session( + options, + retries=0, + timeout=min(5, options.timeout) + ) + with session: + pip_version_check(session, options) + + # Shutdown the logging module + logging.shutdown() + + return SUCCESS + + +class RequirementCommand(Command): + + @staticmethod + def populate_requirement_set(requirement_set, # type: RequirementSet + args, # type: List[str] + options, # type: Values + finder, # type: PackageFinder + session, # type: PipSession + name, # type: str + wheel_cache # type: Optional[WheelCache] + ): + # type: (...) -> None + """ + Marshal cmd line args into a requirement set. + """ + # NOTE: As a side-effect, options.require_hashes and + # requirement_set.require_hashes may be updated + + for filename in options.constraints: + for req_to_add in parse_requirements( + filename, + constraint=True, finder=finder, options=options, + session=session, wheel_cache=wheel_cache): + req_to_add.is_direct = True + requirement_set.add_requirement(req_to_add) + + for req in args: + req_to_add = install_req_from_line( + req, None, isolated=options.isolated_mode, + use_pep517=options.use_pep517, + wheel_cache=wheel_cache + ) + req_to_add.is_direct = True + requirement_set.add_requirement(req_to_add) + + for req in options.editables: + req_to_add = install_req_from_editable( + req, + isolated=options.isolated_mode, + use_pep517=options.use_pep517, + wheel_cache=wheel_cache + ) + req_to_add.is_direct = True + requirement_set.add_requirement(req_to_add) + + for filename in options.requirements: + for req_to_add in parse_requirements( + filename, + finder=finder, options=options, session=session, + wheel_cache=wheel_cache, + use_pep517=options.use_pep517): + req_to_add.is_direct = True + requirement_set.add_requirement(req_to_add) + # If --require-hashes was a line in a requirements file, tell + # RequirementSet about it: + requirement_set.require_hashes = options.require_hashes + + if not (args or options.editables or options.requirements): + opts = {'name': name} + if options.find_links: + raise CommandError( + 'You must give at least one requirement to %(name)s ' + '(maybe you meant "pip %(name)s %(links)s"?)' % + dict(opts, links=' '.join(options.find_links))) + else: + raise CommandError( + 'You must give at least one requirement to %(name)s ' + '(see "pip help %(name)s")' % opts) + + def _build_package_finder( + self, + options, # type: Values + session, # type: PipSession + platform=None, # type: Optional[str] + python_versions=None, # type: Optional[List[str]] + abi=None, # type: Optional[str] + implementation=None # type: Optional[str] + ): + # type: (...) -> PackageFinder + """ + Create a package finder appropriate to this requirement command. + """ + index_urls = [options.index_url] + options.extra_index_urls + if options.no_index: + logger.debug( + 'Ignoring indexes: %s', + ','.join(redact_password_from_url(url) for url in index_urls), + ) + index_urls = [] + + return PackageFinder( + find_links=options.find_links, + format_control=options.format_control, + index_urls=index_urls, + trusted_hosts=options.trusted_hosts, + allow_all_prereleases=options.pre, + session=session, + platform=platform, + versions=python_versions, + abi=abi, + implementation=implementation, + prefer_binary=options.prefer_binary, + ) diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/cli/base_command.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/cli/base_command.pyc new file mode 100644 index 0000000..a13858a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/cli/base_command.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/cli/cmdoptions.py b/project/venv/lib/python2.7/site-packages/pip/_internal/cli/cmdoptions.py new file mode 100644 index 0000000..5cf5ee9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/cli/cmdoptions.py @@ -0,0 +1,809 @@ +""" +shared options and groups + +The principle here is to define options once, but *not* instantiate them +globally. One reason being that options with action='append' can carry state +between parses. pip parses general options twice internally, and shouldn't +pass on state. To be consistent, all options will follow this design. + +""" +from __future__ import absolute_import + +import textwrap +import warnings +from distutils.util import strtobool +from functools import partial +from optparse import SUPPRESS_HELP, Option, OptionGroup + +from pip._internal.exceptions import CommandError +from pip._internal.locations import USER_CACHE_DIR, src_prefix +from pip._internal.models.format_control import FormatControl +from pip._internal.models.index import PyPI +from pip._internal.utils.hashes import STRONG_HASHES +from pip._internal.utils.typing import MYPY_CHECK_RUNNING +from pip._internal.utils.ui import BAR_TYPES + +if MYPY_CHECK_RUNNING: + from typing import Any, Callable, Dict, List, Optional, Union # noqa: F401 + from optparse import OptionParser, Values # noqa: F401 + from pip._internal.cli.parser import ConfigOptionParser # noqa: F401 + + +def raise_option_error(parser, option, msg): + """ + Raise an option parsing error using parser.error(). + + Args: + parser: an OptionParser instance. + option: an Option instance. + msg: the error text. + """ + msg = '{} error: {}'.format(option, msg) + msg = textwrap.fill(' '.join(msg.split())) + parser.error(msg) + + +def make_option_group(group, parser): + # type: (Dict[str, Any], ConfigOptionParser) -> OptionGroup + """ + Return an OptionGroup object + group -- assumed to be dict with 'name' and 'options' keys + parser -- an optparse Parser + """ + option_group = OptionGroup(parser, group['name']) + for option in group['options']: + option_group.add_option(option()) + return option_group + + +def check_install_build_global(options, check_options=None): + # type: (Values, Optional[Values]) -> None + """Disable wheels if per-setup.py call options are set. + + :param options: The OptionParser options to update. + :param check_options: The options to check, if not supplied defaults to + options. + """ + if check_options is None: + check_options = options + + def getname(n): + return getattr(check_options, n, None) + names = ["build_options", "global_options", "install_options"] + if any(map(getname, names)): + control = options.format_control + control.disallow_binaries() + warnings.warn( + 'Disabling all use of wheels due to the use of --build-options ' + '/ --global-options / --install-options.', stacklevel=2, + ) + + +def check_dist_restriction(options, check_target=False): + # type: (Values, bool) -> None + """Function for determining if custom platform options are allowed. + + :param options: The OptionParser options. + :param check_target: Whether or not to check if --target is being used. + """ + dist_restriction_set = any([ + options.python_version, + options.platform, + options.abi, + options.implementation, + ]) + + binary_only = FormatControl(set(), {':all:'}) + sdist_dependencies_allowed = ( + options.format_control != binary_only and + not options.ignore_dependencies + ) + + # Installations or downloads using dist restrictions must not combine + # source distributions and dist-specific wheels, as they are not + # gauranteed to be locally compatible. + if dist_restriction_set and sdist_dependencies_allowed: + raise CommandError( + "When restricting platform and interpreter constraints using " + "--python-version, --platform, --abi, or --implementation, " + "either --no-deps must be set, or --only-binary=:all: must be " + "set and --no-binary must not be set (or must be set to " + ":none:)." + ) + + if check_target: + if dist_restriction_set and not options.target_dir: + raise CommandError( + "Can not use any platform or abi specific options unless " + "installing via '--target'" + ) + + +########### +# options # +########### + +help_ = partial( + Option, + '-h', '--help', + dest='help', + action='help', + help='Show help.', +) # type: Callable[..., Option] + +isolated_mode = partial( + Option, + "--isolated", + dest="isolated_mode", + action="store_true", + default=False, + help=( + "Run pip in an isolated mode, ignoring environment variables and user " + "configuration." + ), +) # type: Callable[..., Option] + +require_virtualenv = partial( + Option, + # Run only if inside a virtualenv, bail if not. + '--require-virtualenv', '--require-venv', + dest='require_venv', + action='store_true', + default=False, + help=SUPPRESS_HELP +) # type: Callable[..., Option] + +verbose = partial( + Option, + '-v', '--verbose', + dest='verbose', + action='count', + default=0, + help='Give more output. Option is additive, and can be used up to 3 times.' +) # type: Callable[..., Option] + +no_color = partial( + Option, + '--no-color', + dest='no_color', + action='store_true', + default=False, + help="Suppress colored output", +) # type: Callable[..., Option] + +version = partial( + Option, + '-V', '--version', + dest='version', + action='store_true', + help='Show version and exit.', +) # type: Callable[..., Option] + +quiet = partial( + Option, + '-q', '--quiet', + dest='quiet', + action='count', + default=0, + help=( + 'Give less output. Option is additive, and can be used up to 3' + ' times (corresponding to WARNING, ERROR, and CRITICAL logging' + ' levels).' + ), +) # type: Callable[..., Option] + +progress_bar = partial( + Option, + '--progress-bar', + dest='progress_bar', + type='choice', + choices=list(BAR_TYPES.keys()), + default='on', + help=( + 'Specify type of progress to be displayed [' + + '|'.join(BAR_TYPES.keys()) + '] (default: %default)' + ), +) # type: Callable[..., Option] + +log = partial( + Option, + "--log", "--log-file", "--local-log", + dest="log", + metavar="path", + help="Path to a verbose appending log." +) # type: Callable[..., Option] + +no_input = partial( + Option, + # Don't ask for input + '--no-input', + dest='no_input', + action='store_true', + default=False, + help=SUPPRESS_HELP +) # type: Callable[..., Option] + +proxy = partial( + Option, + '--proxy', + dest='proxy', + type='str', + default='', + help="Specify a proxy in the form [user:passwd@]proxy.server:port." +) # type: Callable[..., Option] + +retries = partial( + Option, + '--retries', + dest='retries', + type='int', + default=5, + help="Maximum number of retries each connection should attempt " + "(default %default times).", +) # type: Callable[..., Option] + +timeout = partial( + Option, + '--timeout', '--default-timeout', + metavar='sec', + dest='timeout', + type='float', + default=15, + help='Set the socket timeout (default %default seconds).', +) # type: Callable[..., Option] + +skip_requirements_regex = partial( + Option, + # A regex to be used to skip requirements + '--skip-requirements-regex', + dest='skip_requirements_regex', + type='str', + default='', + help=SUPPRESS_HELP, +) # type: Callable[..., Option] + + +def exists_action(): + # type: () -> Option + return Option( + # Option when path already exist + '--exists-action', + dest='exists_action', + type='choice', + choices=['s', 'i', 'w', 'b', 'a'], + default=[], + action='append', + metavar='action', + help="Default action when a path already exists: " + "(s)witch, (i)gnore, (w)ipe, (b)ackup, (a)bort).", + ) + + +cert = partial( + Option, + '--cert', + dest='cert', + type='str', + metavar='path', + help="Path to alternate CA bundle.", +) # type: Callable[..., Option] + +client_cert = partial( + Option, + '--client-cert', + dest='client_cert', + type='str', + default=None, + metavar='path', + help="Path to SSL client certificate, a single file containing the " + "private key and the certificate in PEM format.", +) # type: Callable[..., Option] + +index_url = partial( + Option, + '-i', '--index-url', '--pypi-url', + dest='index_url', + metavar='URL', + default=PyPI.simple_url, + help="Base URL of Python Package Index (default %default). " + "This should point to a repository compliant with PEP 503 " + "(the simple repository API) or a local directory laid out " + "in the same format.", +) # type: Callable[..., Option] + + +def extra_index_url(): + return Option( + '--extra-index-url', + dest='extra_index_urls', + metavar='URL', + action='append', + default=[], + help="Extra URLs of package indexes to use in addition to " + "--index-url. Should follow the same rules as " + "--index-url.", + ) + + +no_index = partial( + Option, + '--no-index', + dest='no_index', + action='store_true', + default=False, + help='Ignore package index (only looking at --find-links URLs instead).', +) # type: Callable[..., Option] + + +def find_links(): + # type: () -> Option + return Option( + '-f', '--find-links', + dest='find_links', + action='append', + default=[], + metavar='url', + help="If a url or path to an html file, then parse for links to " + "archives. If a local path or file:// url that's a directory, " + "then look for archives in the directory listing.", + ) + + +def trusted_host(): + # type: () -> Option + return Option( + "--trusted-host", + dest="trusted_hosts", + action="append", + metavar="HOSTNAME", + default=[], + help="Mark this host as trusted, even though it does not have valid " + "or any HTTPS.", + ) + + +def constraints(): + # type: () -> Option + return Option( + '-c', '--constraint', + dest='constraints', + action='append', + default=[], + metavar='file', + help='Constrain versions using the given constraints file. ' + 'This option can be used multiple times.' + ) + + +def requirements(): + # type: () -> Option + return Option( + '-r', '--requirement', + dest='requirements', + action='append', + default=[], + metavar='file', + help='Install from the given requirements file. ' + 'This option can be used multiple times.' + ) + + +def editable(): + # type: () -> Option + return Option( + '-e', '--editable', + dest='editables', + action='append', + default=[], + metavar='path/url', + help=('Install a project in editable mode (i.e. setuptools ' + '"develop mode") from a local project path or a VCS url.'), + ) + + +src = partial( + Option, + '--src', '--source', '--source-dir', '--source-directory', + dest='src_dir', + metavar='dir', + default=src_prefix, + help='Directory to check out editable projects into. ' + 'The default in a virtualenv is "/src". ' + 'The default for global installs is "/src".' +) # type: Callable[..., Option] + + +def _get_format_control(values, option): + # type: (Values, Option) -> Any + """Get a format_control object.""" + return getattr(values, option.dest) + + +def _handle_no_binary(option, opt_str, value, parser): + # type: (Option, str, str, OptionParser) -> None + existing = _get_format_control(parser.values, option) + FormatControl.handle_mutual_excludes( + value, existing.no_binary, existing.only_binary, + ) + + +def _handle_only_binary(option, opt_str, value, parser): + # type: (Option, str, str, OptionParser) -> None + existing = _get_format_control(parser.values, option) + FormatControl.handle_mutual_excludes( + value, existing.only_binary, existing.no_binary, + ) + + +def no_binary(): + # type: () -> Option + format_control = FormatControl(set(), set()) + return Option( + "--no-binary", dest="format_control", action="callback", + callback=_handle_no_binary, type="str", + default=format_control, + help="Do not use binary packages. Can be supplied multiple times, and " + "each time adds to the existing value. Accepts either :all: to " + "disable all binary packages, :none: to empty the set, or one or " + "more package names with commas between them. Note that some " + "packages are tricky to compile and may fail to install when " + "this option is used on them.", + ) + + +def only_binary(): + # type: () -> Option + format_control = FormatControl(set(), set()) + return Option( + "--only-binary", dest="format_control", action="callback", + callback=_handle_only_binary, type="str", + default=format_control, + help="Do not use source packages. Can be supplied multiple times, and " + "each time adds to the existing value. Accepts either :all: to " + "disable all source packages, :none: to empty the set, or one or " + "more package names with commas between them. Packages without " + "binary distributions will fail to install when this option is " + "used on them.", + ) + + +platform = partial( + Option, + '--platform', + dest='platform', + metavar='platform', + default=None, + help=("Only use wheels compatible with . " + "Defaults to the platform of the running system."), +) # type: Callable[..., Option] + + +python_version = partial( + Option, + '--python-version', + dest='python_version', + metavar='python_version', + default=None, + help=("Only use wheels compatible with Python " + "interpreter version . If not specified, then the " + "current system interpreter minor version is used. A major " + "version (e.g. '2') can be specified to match all " + "minor revs of that major version. A minor version " + "(e.g. '34') can also be specified."), +) # type: Callable[..., Option] + + +implementation = partial( + Option, + '--implementation', + dest='implementation', + metavar='implementation', + default=None, + help=("Only use wheels compatible with Python " + "implementation , e.g. 'pp', 'jy', 'cp', " + " or 'ip'. If not specified, then the current " + "interpreter implementation is used. Use 'py' to force " + "implementation-agnostic wheels."), +) # type: Callable[..., Option] + + +abi = partial( + Option, + '--abi', + dest='abi', + metavar='abi', + default=None, + help=("Only use wheels compatible with Python " + "abi , e.g. 'pypy_41'. If not specified, then the " + "current interpreter abi tag is used. Generally " + "you will need to specify --implementation, " + "--platform, and --python-version when using " + "this option."), +) # type: Callable[..., Option] + + +def prefer_binary(): + # type: () -> Option + return Option( + "--prefer-binary", + dest="prefer_binary", + action="store_true", + default=False, + help="Prefer older binary packages over newer source packages." + ) + + +cache_dir = partial( + Option, + "--cache-dir", + dest="cache_dir", + default=USER_CACHE_DIR, + metavar="dir", + help="Store the cache data in ." +) # type: Callable[..., Option] + + +def no_cache_dir_callback(option, opt, value, parser): + """ + Process a value provided for the --no-cache-dir option. + + This is an optparse.Option callback for the --no-cache-dir option. + """ + # The value argument will be None if --no-cache-dir is passed via the + # command-line, since the option doesn't accept arguments. However, + # the value can be non-None if the option is triggered e.g. by an + # environment variable, like PIP_NO_CACHE_DIR=true. + if value is not None: + # Then parse the string value to get argument error-checking. + try: + strtobool(value) + except ValueError as exc: + raise_option_error(parser, option=option, msg=str(exc)) + + # Originally, setting PIP_NO_CACHE_DIR to a value that strtobool() + # converted to 0 (like "false" or "no") caused cache_dir to be disabled + # rather than enabled (logic would say the latter). Thus, we disable + # the cache directory not just on values that parse to True, but (for + # backwards compatibility reasons) also on values that parse to False. + # In other words, always set it to False if the option is provided in + # some (valid) form. + parser.values.cache_dir = False + + +no_cache = partial( + Option, + "--no-cache-dir", + dest="cache_dir", + action="callback", + callback=no_cache_dir_callback, + help="Disable the cache.", +) # type: Callable[..., Option] + +no_deps = partial( + Option, + '--no-deps', '--no-dependencies', + dest='ignore_dependencies', + action='store_true', + default=False, + help="Don't install package dependencies.", +) # type: Callable[..., Option] + +build_dir = partial( + Option, + '-b', '--build', '--build-dir', '--build-directory', + dest='build_dir', + metavar='dir', + help='Directory to unpack packages into and build in. Note that ' + 'an initial build still takes place in a temporary directory. ' + 'The location of temporary directories can be controlled by setting ' + 'the TMPDIR environment variable (TEMP on Windows) appropriately. ' + 'When passed, build directories are not cleaned in case of failures.' +) # type: Callable[..., Option] + +ignore_requires_python = partial( + Option, + '--ignore-requires-python', + dest='ignore_requires_python', + action='store_true', + help='Ignore the Requires-Python information.' +) # type: Callable[..., Option] + +no_build_isolation = partial( + Option, + '--no-build-isolation', + dest='build_isolation', + action='store_false', + default=True, + help='Disable isolation when building a modern source distribution. ' + 'Build dependencies specified by PEP 518 must be already installed ' + 'if this option is used.' +) # type: Callable[..., Option] + + +def no_use_pep517_callback(option, opt, value, parser): + """ + Process a value provided for the --no-use-pep517 option. + + This is an optparse.Option callback for the no_use_pep517 option. + """ + # Since --no-use-pep517 doesn't accept arguments, the value argument + # will be None if --no-use-pep517 is passed via the command-line. + # However, the value can be non-None if the option is triggered e.g. + # by an environment variable, for example "PIP_NO_USE_PEP517=true". + if value is not None: + msg = """A value was passed for --no-use-pep517, + probably using either the PIP_NO_USE_PEP517 environment variable + or the "no-use-pep517" config file option. Use an appropriate value + of the PIP_USE_PEP517 environment variable or the "use-pep517" + config file option instead. + """ + raise_option_error(parser, option=option, msg=msg) + + # Otherwise, --no-use-pep517 was passed via the command-line. + parser.values.use_pep517 = False + + +use_pep517 = partial( + Option, + '--use-pep517', + dest='use_pep517', + action='store_true', + default=None, + help='Use PEP 517 for building source distributions ' + '(use --no-use-pep517 to force legacy behaviour).' +) # type: Any + +no_use_pep517 = partial( + Option, + '--no-use-pep517', + dest='use_pep517', + action='callback', + callback=no_use_pep517_callback, + default=None, + help=SUPPRESS_HELP +) # type: Any + +install_options = partial( + Option, + '--install-option', + dest='install_options', + action='append', + metavar='options', + help="Extra arguments to be supplied to the setup.py install " + "command (use like --install-option=\"--install-scripts=/usr/local/" + "bin\"). Use multiple --install-option options to pass multiple " + "options to setup.py install. If you are using an option with a " + "directory path, be sure to use absolute path.", +) # type: Callable[..., Option] + +global_options = partial( + Option, + '--global-option', + dest='global_options', + action='append', + metavar='options', + help="Extra global options to be supplied to the setup.py " + "call before the install command.", +) # type: Callable[..., Option] + +no_clean = partial( + Option, + '--no-clean', + action='store_true', + default=False, + help="Don't clean up build directories." +) # type: Callable[..., Option] + +pre = partial( + Option, + '--pre', + action='store_true', + default=False, + help="Include pre-release and development versions. By default, " + "pip only finds stable versions.", +) # type: Callable[..., Option] + +disable_pip_version_check = partial( + Option, + "--disable-pip-version-check", + dest="disable_pip_version_check", + action="store_true", + default=False, + help="Don't periodically check PyPI to determine whether a new version " + "of pip is available for download. Implied with --no-index.", +) # type: Callable[..., Option] + + +# Deprecated, Remove later +always_unzip = partial( + Option, + '-Z', '--always-unzip', + dest='always_unzip', + action='store_true', + help=SUPPRESS_HELP, +) # type: Callable[..., Option] + + +def _merge_hash(option, opt_str, value, parser): + # type: (Option, str, str, OptionParser) -> None + """Given a value spelled "algo:digest", append the digest to a list + pointed to in a dict by the algo name.""" + if not parser.values.hashes: + parser.values.hashes = {} # type: ignore + try: + algo, digest = value.split(':', 1) + except ValueError: + parser.error('Arguments to %s must be a hash name ' + 'followed by a value, like --hash=sha256:abcde...' % + opt_str) + if algo not in STRONG_HASHES: + parser.error('Allowed hash algorithms for %s are %s.' % + (opt_str, ', '.join(STRONG_HASHES))) + parser.values.hashes.setdefault(algo, []).append(digest) + + +hash = partial( + Option, + '--hash', + # Hash values eventually end up in InstallRequirement.hashes due to + # __dict__ copying in process_line(). + dest='hashes', + action='callback', + callback=_merge_hash, + type='string', + help="Verify that the package's archive matches this " + 'hash before installing. Example: --hash=sha256:abcdef...', +) # type: Callable[..., Option] + + +require_hashes = partial( + Option, + '--require-hashes', + dest='require_hashes', + action='store_true', + default=False, + help='Require a hash to check each requirement against, for ' + 'repeatable installs. This option is implied when any package in a ' + 'requirements file has a --hash option.', +) # type: Callable[..., Option] + + +########## +# groups # +########## + +general_group = { + 'name': 'General Options', + 'options': [ + help_, + isolated_mode, + require_virtualenv, + verbose, + version, + quiet, + log, + no_input, + proxy, + retries, + timeout, + skip_requirements_regex, + exists_action, + trusted_host, + cert, + client_cert, + cache_dir, + no_cache, + disable_pip_version_check, + no_color, + ] +} # type: Dict[str, Any] + +index_group = { + 'name': 'Package Index Options', + 'options': [ + index_url, + extra_index_url, + no_index, + find_links, + ] +} # type: Dict[str, Any] diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/cli/cmdoptions.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/cli/cmdoptions.pyc new file mode 100644 index 0000000..8598d2b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/cli/cmdoptions.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/cli/main_parser.py b/project/venv/lib/python2.7/site-packages/pip/_internal/cli/main_parser.py new file mode 100644 index 0000000..b17c749 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/cli/main_parser.py @@ -0,0 +1,104 @@ +"""A single place for constructing and exposing the main parser +""" + +import os +import sys + +from pip import __version__ +from pip._internal.cli import cmdoptions +from pip._internal.cli.parser import ( + ConfigOptionParser, UpdatingDefaultsHelpFormatter, +) +from pip._internal.commands import ( + commands_dict, get_similar_commands, get_summaries, +) +from pip._internal.exceptions import CommandError +from pip._internal.utils.misc import get_prog +from pip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + from typing import Tuple, List # noqa: F401 + + +__all__ = ["create_main_parser", "parse_command"] + + +def create_main_parser(): + # type: () -> ConfigOptionParser + """Creates and returns the main parser for pip's CLI + """ + + parser_kw = { + 'usage': '\n%prog [options]', + 'add_help_option': False, + 'formatter': UpdatingDefaultsHelpFormatter(), + 'name': 'global', + 'prog': get_prog(), + } + + parser = ConfigOptionParser(**parser_kw) + parser.disable_interspersed_args() + + pip_pkg_dir = os.path.abspath(os.path.join( + os.path.dirname(__file__), "..", "..", + )) + parser.version = 'pip %s from %s (python %s)' % ( + __version__, pip_pkg_dir, sys.version[:3], + ) + + # add the general options + gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser) + parser.add_option_group(gen_opts) + + # so the help formatter knows + parser.main = True # type: ignore + + # create command listing for description + command_summaries = get_summaries() + description = [''] + ['%-27s %s' % (i, j) for i, j in command_summaries] + parser.description = '\n'.join(description) + + return parser + + +def parse_command(args): + # type: (List[str]) -> Tuple[str, List[str]] + parser = create_main_parser() + + # Note: parser calls disable_interspersed_args(), so the result of this + # call is to split the initial args into the general options before the + # subcommand and everything else. + # For example: + # args: ['--timeout=5', 'install', '--user', 'INITools'] + # general_options: ['--timeout==5'] + # args_else: ['install', '--user', 'INITools'] + general_options, args_else = parser.parse_args(args) + + # --version + if general_options.version: + sys.stdout.write(parser.version) # type: ignore + sys.stdout.write(os.linesep) + sys.exit() + + # pip || pip help -> print_help() + if not args_else or (args_else[0] == 'help' and len(args_else) == 1): + parser.print_help() + sys.exit() + + # the subcommand name + cmd_name = args_else[0] + + if cmd_name not in commands_dict: + guess = get_similar_commands(cmd_name) + + msg = ['unknown command "%s"' % cmd_name] + if guess: + msg.append('maybe you meant "%s"' % guess) + + raise CommandError(' - '.join(msg)) + + # all the args without the subcommand + cmd_args = args[:] + cmd_args.remove(cmd_name) + + return cmd_name, cmd_args diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/cli/main_parser.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/cli/main_parser.pyc new file mode 100644 index 0000000..888d226 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/cli/main_parser.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/cli/parser.py b/project/venv/lib/python2.7/site-packages/pip/_internal/cli/parser.py new file mode 100644 index 0000000..e1eaac4 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/cli/parser.py @@ -0,0 +1,261 @@ +"""Base option parser setup""" +from __future__ import absolute_import + +import logging +import optparse +import sys +import textwrap +from distutils.util import strtobool + +from pip._vendor.six import string_types + +from pip._internal.cli.status_codes import UNKNOWN_ERROR +from pip._internal.configuration import Configuration, ConfigurationError +from pip._internal.utils.compat import get_terminal_size + +logger = logging.getLogger(__name__) + + +class PrettyHelpFormatter(optparse.IndentedHelpFormatter): + """A prettier/less verbose help formatter for optparse.""" + + def __init__(self, *args, **kwargs): + # help position must be aligned with __init__.parseopts.description + kwargs['max_help_position'] = 30 + kwargs['indent_increment'] = 1 + kwargs['width'] = get_terminal_size()[0] - 2 + optparse.IndentedHelpFormatter.__init__(self, *args, **kwargs) + + def format_option_strings(self, option): + return self._format_option_strings(option, ' <%s>', ', ') + + def _format_option_strings(self, option, mvarfmt=' <%s>', optsep=', '): + """ + Return a comma-separated list of option strings and metavars. + + :param option: tuple of (short opt, long opt), e.g: ('-f', '--format') + :param mvarfmt: metavar format string - evaluated as mvarfmt % metavar + :param optsep: separator + """ + opts = [] + + if option._short_opts: + opts.append(option._short_opts[0]) + if option._long_opts: + opts.append(option._long_opts[0]) + if len(opts) > 1: + opts.insert(1, optsep) + + if option.takes_value(): + metavar = option.metavar or option.dest.lower() + opts.append(mvarfmt % metavar.lower()) + + return ''.join(opts) + + def format_heading(self, heading): + if heading == 'Options': + return '' + return heading + ':\n' + + def format_usage(self, usage): + """ + Ensure there is only one newline between usage and the first heading + if there is no description. + """ + msg = '\nUsage: %s\n' % self.indent_lines(textwrap.dedent(usage), " ") + return msg + + def format_description(self, description): + # leave full control over description to us + if description: + if hasattr(self.parser, 'main'): + label = 'Commands' + else: + label = 'Description' + # some doc strings have initial newlines, some don't + description = description.lstrip('\n') + # some doc strings have final newlines and spaces, some don't + description = description.rstrip() + # dedent, then reindent + description = self.indent_lines(textwrap.dedent(description), " ") + description = '%s:\n%s\n' % (label, description) + return description + else: + return '' + + def format_epilog(self, epilog): + # leave full control over epilog to us + if epilog: + return epilog + else: + return '' + + def indent_lines(self, text, indent): + new_lines = [indent + line for line in text.split('\n')] + return "\n".join(new_lines) + + +class UpdatingDefaultsHelpFormatter(PrettyHelpFormatter): + """Custom help formatter for use in ConfigOptionParser. + + This is updates the defaults before expanding them, allowing + them to show up correctly in the help listing. + """ + + def expand_default(self, option): + if self.parser is not None: + self.parser._update_defaults(self.parser.defaults) + return optparse.IndentedHelpFormatter.expand_default(self, option) + + +class CustomOptionParser(optparse.OptionParser): + + def insert_option_group(self, idx, *args, **kwargs): + """Insert an OptionGroup at a given position.""" + group = self.add_option_group(*args, **kwargs) + + self.option_groups.pop() + self.option_groups.insert(idx, group) + + return group + + @property + def option_list_all(self): + """Get a list of all options, including those in option groups.""" + res = self.option_list[:] + for i in self.option_groups: + res.extend(i.option_list) + + return res + + +class ConfigOptionParser(CustomOptionParser): + """Custom option parser which updates its defaults by checking the + configuration files and environmental variables""" + + def __init__(self, *args, **kwargs): + self.name = kwargs.pop('name') + + isolated = kwargs.pop("isolated", False) + self.config = Configuration(isolated) + + assert self.name + optparse.OptionParser.__init__(self, *args, **kwargs) + + def check_default(self, option, key, val): + try: + return option.check_value(key, val) + except optparse.OptionValueError as exc: + print("An error occurred during configuration: %s" % exc) + sys.exit(3) + + def _get_ordered_configuration_items(self): + # Configuration gives keys in an unordered manner. Order them. + override_order = ["global", self.name, ":env:"] + + # Pool the options into different groups + section_items = {name: [] for name in override_order} + for section_key, val in self.config.items(): + # ignore empty values + if not val: + logger.debug( + "Ignoring configuration key '%s' as it's value is empty.", + section_key + ) + continue + + section, key = section_key.split(".", 1) + if section in override_order: + section_items[section].append((key, val)) + + # Yield each group in their override order + for section in override_order: + for key, val in section_items[section]: + yield key, val + + def _update_defaults(self, defaults): + """Updates the given defaults with values from the config files and + the environ. Does a little special handling for certain types of + options (lists).""" + + # Accumulate complex default state. + self.values = optparse.Values(self.defaults) + late_eval = set() + # Then set the options with those values + for key, val in self._get_ordered_configuration_items(): + # '--' because configuration supports only long names + option = self.get_option('--' + key) + + # Ignore options not present in this parser. E.g. non-globals put + # in [global] by users that want them to apply to all applicable + # commands. + if option is None: + continue + + if option.action in ('store_true', 'store_false', 'count'): + try: + val = strtobool(val) + except ValueError: + error_msg = invalid_config_error_message( + option.action, key, val + ) + self.error(error_msg) + + elif option.action == 'append': + val = val.split() + val = [self.check_default(option, key, v) for v in val] + elif option.action == 'callback': + late_eval.add(option.dest) + opt_str = option.get_opt_string() + val = option.convert_value(opt_str, val) + # From take_action + args = option.callback_args or () + kwargs = option.callback_kwargs or {} + option.callback(option, opt_str, val, self, *args, **kwargs) + else: + val = self.check_default(option, key, val) + + defaults[option.dest] = val + + for key in late_eval: + defaults[key] = getattr(self.values, key) + self.values = None + return defaults + + def get_default_values(self): + """Overriding to make updating the defaults after instantiation of + the option parser possible, _update_defaults() does the dirty work.""" + if not self.process_default_values: + # Old, pre-Optik 1.5 behaviour. + return optparse.Values(self.defaults) + + # Load the configuration, or error out in case of an error + try: + self.config.load() + except ConfigurationError as err: + self.exit(UNKNOWN_ERROR, str(err)) + + defaults = self._update_defaults(self.defaults.copy()) # ours + for option in self._get_all_options(): + default = defaults.get(option.dest) + if isinstance(default, string_types): + opt_str = option.get_opt_string() + defaults[option.dest] = option.check_value(opt_str, default) + return optparse.Values(defaults) + + def error(self, msg): + self.print_usage(sys.stderr) + self.exit(UNKNOWN_ERROR, "%s\n" % msg) + + +def invalid_config_error_message(action, key, val): + """Returns a better error message when invalid configuration option + is provided.""" + if action in ('store_true', 'store_false'): + return ("{0} is not a valid value for {1} option, " + "please specify a boolean value like yes/no, " + "true/false or 1/0 instead.").format(val, key) + + return ("{0} is not a valid value for {1} option, " + "please specify a numerical value like 1/0 " + "instead.").format(val, key) diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/cli/parser.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/cli/parser.pyc new file mode 100644 index 0000000..ce7c326 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/cli/parser.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/cli/status_codes.py b/project/venv/lib/python2.7/site-packages/pip/_internal/cli/status_codes.py new file mode 100644 index 0000000..275360a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/cli/status_codes.py @@ -0,0 +1,8 @@ +from __future__ import absolute_import + +SUCCESS = 0 +ERROR = 1 +UNKNOWN_ERROR = 2 +VIRTUALENV_NOT_FOUND = 3 +PREVIOUS_BUILD_DIR_ERROR = 4 +NO_MATCHES_FOUND = 23 diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/cli/status_codes.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/cli/status_codes.pyc new file mode 100644 index 0000000..b71947d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/cli/status_codes.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/commands/__init__.py b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/__init__.py new file mode 100644 index 0000000..c7d1da3 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/__init__.py @@ -0,0 +1,79 @@ +""" +Package containing all pip commands +""" +from __future__ import absolute_import + +from pip._internal.commands.completion import CompletionCommand +from pip._internal.commands.configuration import ConfigurationCommand +from pip._internal.commands.download import DownloadCommand +from pip._internal.commands.freeze import FreezeCommand +from pip._internal.commands.hash import HashCommand +from pip._internal.commands.help import HelpCommand +from pip._internal.commands.list import ListCommand +from pip._internal.commands.check import CheckCommand +from pip._internal.commands.search import SearchCommand +from pip._internal.commands.show import ShowCommand +from pip._internal.commands.install import InstallCommand +from pip._internal.commands.uninstall import UninstallCommand +from pip._internal.commands.wheel import WheelCommand + +from pip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + from typing import List, Type # noqa: F401 + from pip._internal.cli.base_command import Command # noqa: F401 + +commands_order = [ + InstallCommand, + DownloadCommand, + UninstallCommand, + FreezeCommand, + ListCommand, + ShowCommand, + CheckCommand, + ConfigurationCommand, + SearchCommand, + WheelCommand, + HashCommand, + CompletionCommand, + HelpCommand, +] # type: List[Type[Command]] + +commands_dict = {c.name: c for c in commands_order} + + +def get_summaries(ordered=True): + """Yields sorted (command name, command summary) tuples.""" + + if ordered: + cmditems = _sort_commands(commands_dict, commands_order) + else: + cmditems = commands_dict.items() + + for name, command_class in cmditems: + yield (name, command_class.summary) + + +def get_similar_commands(name): + """Command name auto-correct.""" + from difflib import get_close_matches + + name = name.lower() + + close_commands = get_close_matches(name, commands_dict.keys()) + + if close_commands: + return close_commands[0] + else: + return False + + +def _sort_commands(cmddict, order): + def keyfn(key): + try: + return order.index(key[1]) + except ValueError: + # unordered items should come last + return 0xff + + return sorted(cmddict.items(), key=keyfn) diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/commands/__init__.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/__init__.pyc new file mode 100644 index 0000000..d56d5a3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/commands/check.py b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/check.py new file mode 100644 index 0000000..801cecc --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/check.py @@ -0,0 +1,41 @@ +import logging + +from pip._internal.cli.base_command import Command +from pip._internal.operations.check import ( + check_package_set, create_package_set_from_installed, +) + +logger = logging.getLogger(__name__) + + +class CheckCommand(Command): + """Verify installed packages have compatible dependencies.""" + name = 'check' + usage = """ + %prog [options]""" + summary = 'Verify installed packages have compatible dependencies.' + + def run(self, options, args): + package_set, parsing_probs = create_package_set_from_installed() + missing, conflicting = check_package_set(package_set) + + for project_name in missing: + version = package_set[project_name].version + for dependency in missing[project_name]: + logger.info( + "%s %s requires %s, which is not installed.", + project_name, version, dependency[0], + ) + + for project_name in conflicting: + version = package_set[project_name].version + for dep_name, dep_version, req in conflicting[project_name]: + logger.info( + "%s %s has requirement %s, but you have %s %s.", + project_name, version, req, dep_name, dep_version, + ) + + if missing or conflicting or parsing_probs: + return 1 + else: + logger.info("No broken requirements found.") diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/commands/check.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/check.pyc new file mode 100644 index 0000000..4a2e6b4 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/check.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/commands/completion.py b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/completion.py new file mode 100644 index 0000000..2fcdd39 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/completion.py @@ -0,0 +1,94 @@ +from __future__ import absolute_import + +import sys +import textwrap + +from pip._internal.cli.base_command import Command +from pip._internal.utils.misc import get_prog + +BASE_COMPLETION = """ +# pip %(shell)s completion start%(script)s# pip %(shell)s completion end +""" + +COMPLETION_SCRIPTS = { + 'bash': """ + _pip_completion() + { + COMPREPLY=( $( COMP_WORDS="${COMP_WORDS[*]}" \\ + COMP_CWORD=$COMP_CWORD \\ + PIP_AUTO_COMPLETE=1 $1 ) ) + } + complete -o default -F _pip_completion %(prog)s + """, + 'zsh': """ + function _pip_completion { + local words cword + read -Ac words + read -cn cword + reply=( $( COMP_WORDS="$words[*]" \\ + COMP_CWORD=$(( cword-1 )) \\ + PIP_AUTO_COMPLETE=1 $words[1] ) ) + } + compctl -K _pip_completion %(prog)s + """, + 'fish': """ + function __fish_complete_pip + set -lx COMP_WORDS (commandline -o) "" + set -lx COMP_CWORD ( \\ + math (contains -i -- (commandline -t) $COMP_WORDS)-1 \\ + ) + set -lx PIP_AUTO_COMPLETE 1 + string split \\ -- (eval $COMP_WORDS[1]) + end + complete -fa "(__fish_complete_pip)" -c %(prog)s + """, +} + + +class CompletionCommand(Command): + """A helper command to be used for command completion.""" + name = 'completion' + summary = 'A helper command used for command completion.' + ignore_require_venv = True + + def __init__(self, *args, **kw): + super(CompletionCommand, self).__init__(*args, **kw) + + cmd_opts = self.cmd_opts + + cmd_opts.add_option( + '--bash', '-b', + action='store_const', + const='bash', + dest='shell', + help='Emit completion code for bash') + cmd_opts.add_option( + '--zsh', '-z', + action='store_const', + const='zsh', + dest='shell', + help='Emit completion code for zsh') + cmd_opts.add_option( + '--fish', '-f', + action='store_const', + const='fish', + dest='shell', + help='Emit completion code for fish') + + self.parser.insert_option_group(0, cmd_opts) + + def run(self, options, args): + """Prints the completion code of the given shell""" + shells = COMPLETION_SCRIPTS.keys() + shell_options = ['--' + shell for shell in sorted(shells)] + if options.shell in shells: + script = textwrap.dedent( + COMPLETION_SCRIPTS.get(options.shell, '') % { + 'prog': get_prog(), + } + ) + print(BASE_COMPLETION % {'script': script, 'shell': options.shell}) + else: + sys.stderr.write( + 'ERROR: You must pass %s\n' % ' or '.join(shell_options) + ) diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/commands/completion.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/completion.pyc new file mode 100644 index 0000000..e5830d0 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/completion.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/commands/configuration.py b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/configuration.py new file mode 100644 index 0000000..826c08d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/configuration.py @@ -0,0 +1,227 @@ +import logging +import os +import subprocess + +from pip._internal.cli.base_command import Command +from pip._internal.cli.status_codes import ERROR, SUCCESS +from pip._internal.configuration import Configuration, kinds +from pip._internal.exceptions import PipError +from pip._internal.locations import venv_config_file +from pip._internal.utils.misc import get_prog + +logger = logging.getLogger(__name__) + + +class ConfigurationCommand(Command): + """Manage local and global configuration. + + Subcommands: + + list: List the active configuration (or from the file specified) + edit: Edit the configuration file in an editor + get: Get the value associated with name + set: Set the name=value + unset: Unset the value associated with name + + If none of --user, --global and --venv are passed, a virtual + environment configuration file is used if one is active and the file + exists. Otherwise, all modifications happen on the to the user file by + default. + """ + + name = 'config' + usage = """ + %prog [] list + %prog [] [--editor ] edit + + %prog [] get name + %prog [] set name value + %prog [] unset name + """ + + summary = "Manage local and global configuration." + + def __init__(self, *args, **kwargs): + super(ConfigurationCommand, self).__init__(*args, **kwargs) + + self.configuration = None + + self.cmd_opts.add_option( + '--editor', + dest='editor', + action='store', + default=None, + help=( + 'Editor to use to edit the file. Uses VISUAL or EDITOR ' + 'environment variables if not provided.' + ) + ) + + self.cmd_opts.add_option( + '--global', + dest='global_file', + action='store_true', + default=False, + help='Use the system-wide configuration file only' + ) + + self.cmd_opts.add_option( + '--user', + dest='user_file', + action='store_true', + default=False, + help='Use the user configuration file only' + ) + + self.cmd_opts.add_option( + '--venv', + dest='venv_file', + action='store_true', + default=False, + help='Use the virtualenv configuration file only' + ) + + self.parser.insert_option_group(0, self.cmd_opts) + + def run(self, options, args): + handlers = { + "list": self.list_values, + "edit": self.open_in_editor, + "get": self.get_name, + "set": self.set_name_value, + "unset": self.unset_name + } + + # Determine action + if not args or args[0] not in handlers: + logger.error("Need an action ({}) to perform.".format( + ", ".join(sorted(handlers))) + ) + return ERROR + + action = args[0] + + # Determine which configuration files are to be loaded + # Depends on whether the command is modifying. + try: + load_only = self._determine_file( + options, need_value=(action in ["get", "set", "unset", "edit"]) + ) + except PipError as e: + logger.error(e.args[0]) + return ERROR + + # Load a new configuration + self.configuration = Configuration( + isolated=options.isolated_mode, load_only=load_only + ) + self.configuration.load() + + # Error handling happens here, not in the action-handlers. + try: + handlers[action](options, args[1:]) + except PipError as e: + logger.error(e.args[0]) + return ERROR + + return SUCCESS + + def _determine_file(self, options, need_value): + file_options = { + kinds.USER: options.user_file, + kinds.GLOBAL: options.global_file, + kinds.VENV: options.venv_file + } + + if sum(file_options.values()) == 0: + if not need_value: + return None + # Default to user, unless there's a virtualenv file. + elif os.path.exists(venv_config_file): + return kinds.VENV + else: + return kinds.USER + elif sum(file_options.values()) == 1: + # There's probably a better expression for this. + return [key for key in file_options if file_options[key]][0] + + raise PipError( + "Need exactly one file to operate upon " + "(--user, --venv, --global) to perform." + ) + + def list_values(self, options, args): + self._get_n_args(args, "list", n=0) + + for key, value in sorted(self.configuration.items()): + logger.info("%s=%r", key, value) + + def get_name(self, options, args): + key = self._get_n_args(args, "get [name]", n=1) + value = self.configuration.get_value(key) + + logger.info("%s", value) + + def set_name_value(self, options, args): + key, value = self._get_n_args(args, "set [name] [value]", n=2) + self.configuration.set_value(key, value) + + self._save_configuration() + + def unset_name(self, options, args): + key = self._get_n_args(args, "unset [name]", n=1) + self.configuration.unset_value(key) + + self._save_configuration() + + def open_in_editor(self, options, args): + editor = self._determine_editor(options) + + fname = self.configuration.get_file_to_edit() + if fname is None: + raise PipError("Could not determine appropriate file.") + + try: + subprocess.check_call([editor, fname]) + except subprocess.CalledProcessError as e: + raise PipError( + "Editor Subprocess exited with exit code {}" + .format(e.returncode) + ) + + def _get_n_args(self, args, example, n): + """Helper to make sure the command got the right number of arguments + """ + if len(args) != n: + msg = ( + 'Got unexpected number of arguments, expected {}. ' + '(example: "{} config {}")' + ).format(n, get_prog(), example) + raise PipError(msg) + + if n == 1: + return args[0] + else: + return args + + def _save_configuration(self): + # We successfully ran a modifying command. Need to save the + # configuration. + try: + self.configuration.save() + except Exception: + logger.error( + "Unable to save configuration. Please report this as a bug.", + exc_info=1 + ) + raise PipError("Internal Error.") + + def _determine_editor(self, options): + if options.editor is not None: + return options.editor + elif "VISUAL" in os.environ: + return os.environ["VISUAL"] + elif "EDITOR" in os.environ: + return os.environ["EDITOR"] + else: + raise PipError("Could not determine editor to use.") diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/commands/configuration.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/configuration.pyc new file mode 100644 index 0000000..deb362c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/configuration.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/commands/download.py b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/download.py new file mode 100644 index 0000000..a57e4bc --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/download.py @@ -0,0 +1,176 @@ +from __future__ import absolute_import + +import logging +import os + +from pip._internal.cli import cmdoptions +from pip._internal.cli.base_command import RequirementCommand +from pip._internal.operations.prepare import RequirementPreparer +from pip._internal.req import RequirementSet +from pip._internal.req.req_tracker import RequirementTracker +from pip._internal.resolve import Resolver +from pip._internal.utils.filesystem import check_path_owner +from pip._internal.utils.misc import ensure_dir, normalize_path +from pip._internal.utils.temp_dir import TempDirectory + +logger = logging.getLogger(__name__) + + +class DownloadCommand(RequirementCommand): + """ + Download packages from: + + - PyPI (and other indexes) using requirement specifiers. + - VCS project urls. + - Local project directories. + - Local or remote source archives. + + pip also supports downloading from "requirements files", which provide + an easy way to specify a whole environment to be downloaded. + """ + name = 'download' + + usage = """ + %prog [options] [package-index-options] ... + %prog [options] -r [package-index-options] ... + %prog [options] ... + %prog [options] ... + %prog [options] ...""" + + summary = 'Download packages.' + + def __init__(self, *args, **kw): + super(DownloadCommand, self).__init__(*args, **kw) + + cmd_opts = self.cmd_opts + + cmd_opts.add_option(cmdoptions.constraints()) + cmd_opts.add_option(cmdoptions.requirements()) + cmd_opts.add_option(cmdoptions.build_dir()) + cmd_opts.add_option(cmdoptions.no_deps()) + cmd_opts.add_option(cmdoptions.global_options()) + cmd_opts.add_option(cmdoptions.no_binary()) + cmd_opts.add_option(cmdoptions.only_binary()) + cmd_opts.add_option(cmdoptions.prefer_binary()) + cmd_opts.add_option(cmdoptions.src()) + cmd_opts.add_option(cmdoptions.pre()) + cmd_opts.add_option(cmdoptions.no_clean()) + cmd_opts.add_option(cmdoptions.require_hashes()) + cmd_opts.add_option(cmdoptions.progress_bar()) + cmd_opts.add_option(cmdoptions.no_build_isolation()) + cmd_opts.add_option(cmdoptions.use_pep517()) + cmd_opts.add_option(cmdoptions.no_use_pep517()) + + cmd_opts.add_option( + '-d', '--dest', '--destination-dir', '--destination-directory', + dest='download_dir', + metavar='dir', + default=os.curdir, + help=("Download packages into ."), + ) + + cmd_opts.add_option(cmdoptions.platform()) + cmd_opts.add_option(cmdoptions.python_version()) + cmd_opts.add_option(cmdoptions.implementation()) + cmd_opts.add_option(cmdoptions.abi()) + + index_opts = cmdoptions.make_option_group( + cmdoptions.index_group, + self.parser, + ) + + self.parser.insert_option_group(0, index_opts) + self.parser.insert_option_group(0, cmd_opts) + + def run(self, options, args): + options.ignore_installed = True + # editable doesn't really make sense for `pip download`, but the bowels + # of the RequirementSet code require that property. + options.editables = [] + + if options.python_version: + python_versions = [options.python_version] + else: + python_versions = None + + cmdoptions.check_dist_restriction(options) + + options.src_dir = os.path.abspath(options.src_dir) + options.download_dir = normalize_path(options.download_dir) + + ensure_dir(options.download_dir) + + with self._build_session(options) as session: + finder = self._build_package_finder( + options=options, + session=session, + platform=options.platform, + python_versions=python_versions, + abi=options.abi, + implementation=options.implementation, + ) + build_delete = (not (options.no_clean or options.build_dir)) + if options.cache_dir and not check_path_owner(options.cache_dir): + logger.warning( + "The directory '%s' or its parent directory is not owned " + "by the current user and caching wheels has been " + "disabled. check the permissions and owner of that " + "directory. If executing pip with sudo, you may want " + "sudo's -H flag.", + options.cache_dir, + ) + options.cache_dir = None + + with RequirementTracker() as req_tracker, TempDirectory( + options.build_dir, delete=build_delete, kind="download" + ) as directory: + + requirement_set = RequirementSet( + require_hashes=options.require_hashes, + ) + self.populate_requirement_set( + requirement_set, + args, + options, + finder, + session, + self.name, + None + ) + + preparer = RequirementPreparer( + build_dir=directory.path, + src_dir=options.src_dir, + download_dir=options.download_dir, + wheel_download_dir=None, + progress_bar=options.progress_bar, + build_isolation=options.build_isolation, + req_tracker=req_tracker, + ) + + resolver = Resolver( + preparer=preparer, + finder=finder, + session=session, + wheel_cache=None, + use_user_site=False, + upgrade_strategy="to-satisfy-only", + force_reinstall=False, + ignore_dependencies=options.ignore_dependencies, + ignore_requires_python=False, + ignore_installed=True, + isolated=options.isolated_mode, + ) + resolver.resolve(requirement_set) + + downloaded = ' '.join([ + req.name for req in requirement_set.successfully_downloaded + ]) + if downloaded: + logger.info('Successfully downloaded %s', downloaded) + + # Clean up + if not options.no_clean: + requirement_set.cleanup_files() + + return requirement_set diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/commands/download.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/download.pyc new file mode 100644 index 0000000..8caecba Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/download.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/commands/freeze.py b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/freeze.py new file mode 100644 index 0000000..dc9c53a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/freeze.py @@ -0,0 +1,96 @@ +from __future__ import absolute_import + +import sys + +from pip._internal.cache import WheelCache +from pip._internal.cli.base_command import Command +from pip._internal.models.format_control import FormatControl +from pip._internal.operations.freeze import freeze +from pip._internal.utils.compat import stdlib_pkgs + +DEV_PKGS = {'pip', 'setuptools', 'distribute', 'wheel'} + + +class FreezeCommand(Command): + """ + Output installed packages in requirements format. + + packages are listed in a case-insensitive sorted order. + """ + name = 'freeze' + usage = """ + %prog [options]""" + summary = 'Output installed packages in requirements format.' + log_streams = ("ext://sys.stderr", "ext://sys.stderr") + + def __init__(self, *args, **kw): + super(FreezeCommand, self).__init__(*args, **kw) + + self.cmd_opts.add_option( + '-r', '--requirement', + dest='requirements', + action='append', + default=[], + metavar='file', + help="Use the order in the given requirements file and its " + "comments when generating output. This option can be " + "used multiple times.") + self.cmd_opts.add_option( + '-f', '--find-links', + dest='find_links', + action='append', + default=[], + metavar='URL', + help='URL for finding packages, which will be added to the ' + 'output.') + self.cmd_opts.add_option( + '-l', '--local', + dest='local', + action='store_true', + default=False, + help='If in a virtualenv that has global access, do not output ' + 'globally-installed packages.') + self.cmd_opts.add_option( + '--user', + dest='user', + action='store_true', + default=False, + help='Only output packages installed in user-site.') + self.cmd_opts.add_option( + '--all', + dest='freeze_all', + action='store_true', + help='Do not skip these packages in the output:' + ' %s' % ', '.join(DEV_PKGS)) + self.cmd_opts.add_option( + '--exclude-editable', + dest='exclude_editable', + action='store_true', + help='Exclude editable package from output.') + + self.parser.insert_option_group(0, self.cmd_opts) + + def run(self, options, args): + format_control = FormatControl(set(), set()) + wheel_cache = WheelCache(options.cache_dir, format_control) + skip = set(stdlib_pkgs) + if not options.freeze_all: + skip.update(DEV_PKGS) + + freeze_kwargs = dict( + requirement=options.requirements, + find_links=options.find_links, + local_only=options.local, + user_only=options.user, + skip_regex=options.skip_requirements_regex, + isolated=options.isolated_mode, + wheel_cache=wheel_cache, + skip=skip, + exclude_editable=options.exclude_editable, + ) + + try: + for line in freeze(**freeze_kwargs): + sys.stdout.write(line + '\n') + finally: + wheel_cache.cleanup() diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/commands/freeze.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/freeze.pyc new file mode 100644 index 0000000..8b4338f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/freeze.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/commands/hash.py b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/hash.py new file mode 100644 index 0000000..423440e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/hash.py @@ -0,0 +1,57 @@ +from __future__ import absolute_import + +import hashlib +import logging +import sys + +from pip._internal.cli.base_command import Command +from pip._internal.cli.status_codes import ERROR +from pip._internal.utils.hashes import FAVORITE_HASH, STRONG_HASHES +from pip._internal.utils.misc import read_chunks + +logger = logging.getLogger(__name__) + + +class HashCommand(Command): + """ + Compute a hash of a local package archive. + + These can be used with --hash in a requirements file to do repeatable + installs. + + """ + name = 'hash' + usage = '%prog [options] ...' + summary = 'Compute hashes of package archives.' + ignore_require_venv = True + + def __init__(self, *args, **kw): + super(HashCommand, self).__init__(*args, **kw) + self.cmd_opts.add_option( + '-a', '--algorithm', + dest='algorithm', + choices=STRONG_HASHES, + action='store', + default=FAVORITE_HASH, + help='The hash algorithm to use: one of %s' % + ', '.join(STRONG_HASHES)) + self.parser.insert_option_group(0, self.cmd_opts) + + def run(self, options, args): + if not args: + self.parser.print_usage(sys.stderr) + return ERROR + + algorithm = options.algorithm + for path in args: + logger.info('%s:\n--hash=%s:%s', + path, algorithm, _hash_of_file(path, algorithm)) + + +def _hash_of_file(path, algorithm): + """Return the hash digest of a file.""" + with open(path, 'rb') as archive: + hash = hashlib.new(algorithm) + for chunk in read_chunks(archive): + hash.update(chunk) + return hash.hexdigest() diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/commands/hash.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/hash.pyc new file mode 100644 index 0000000..f7ffd25 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/hash.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/commands/help.py b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/help.py new file mode 100644 index 0000000..49a81cb --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/help.py @@ -0,0 +1,37 @@ +from __future__ import absolute_import + +from pip._internal.cli.base_command import Command +from pip._internal.cli.status_codes import SUCCESS +from pip._internal.exceptions import CommandError + + +class HelpCommand(Command): + """Show help for commands""" + name = 'help' + usage = """ + %prog """ + summary = 'Show help for commands.' + ignore_require_venv = True + + def run(self, options, args): + from pip._internal.commands import commands_dict, get_similar_commands + + try: + # 'pip help' with no args is handled by pip.__init__.parseopt() + cmd_name = args[0] # the command we need help for + except IndexError: + return SUCCESS + + if cmd_name not in commands_dict: + guess = get_similar_commands(cmd_name) + + msg = ['unknown command "%s"' % cmd_name] + if guess: + msg.append('maybe you meant "%s"' % guess) + + raise CommandError(' - '.join(msg)) + + command = commands_dict[cmd_name]() + command.parser.print_help() + + return SUCCESS diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/commands/help.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/help.pyc new file mode 100644 index 0000000..8558e9c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/help.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/commands/install.py b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/install.py new file mode 100644 index 0000000..1c244d2 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/install.py @@ -0,0 +1,566 @@ +from __future__ import absolute_import + +import errno +import logging +import operator +import os +import shutil +from optparse import SUPPRESS_HELP + +from pip._vendor import pkg_resources + +from pip._internal.cache import WheelCache +from pip._internal.cli import cmdoptions +from pip._internal.cli.base_command import RequirementCommand +from pip._internal.cli.status_codes import ERROR +from pip._internal.exceptions import ( + CommandError, InstallationError, PreviousBuildDirError, +) +from pip._internal.locations import distutils_scheme, virtualenv_no_global +from pip._internal.operations.check import check_install_conflicts +from pip._internal.operations.prepare import RequirementPreparer +from pip._internal.req import RequirementSet, install_given_reqs +from pip._internal.req.req_tracker import RequirementTracker +from pip._internal.resolve import Resolver +from pip._internal.utils.filesystem import check_path_owner +from pip._internal.utils.misc import ( + ensure_dir, get_installed_version, + protect_pip_from_modification_on_windows, +) +from pip._internal.utils.temp_dir import TempDirectory +from pip._internal.wheel import WheelBuilder + +logger = logging.getLogger(__name__) + + +class InstallCommand(RequirementCommand): + """ + Install packages from: + + - PyPI (and other indexes) using requirement specifiers. + - VCS project urls. + - Local project directories. + - Local or remote source archives. + + pip also supports installing from "requirements files", which provide + an easy way to specify a whole environment to be installed. + """ + name = 'install' + + usage = """ + %prog [options] [package-index-options] ... + %prog [options] -r [package-index-options] ... + %prog [options] [-e] ... + %prog [options] [-e] ... + %prog [options] ...""" + + summary = 'Install packages.' + + def __init__(self, *args, **kw): + super(InstallCommand, self).__init__(*args, **kw) + + cmd_opts = self.cmd_opts + + cmd_opts.add_option(cmdoptions.requirements()) + cmd_opts.add_option(cmdoptions.constraints()) + cmd_opts.add_option(cmdoptions.no_deps()) + cmd_opts.add_option(cmdoptions.pre()) + + cmd_opts.add_option(cmdoptions.editable()) + cmd_opts.add_option( + '-t', '--target', + dest='target_dir', + metavar='dir', + default=None, + help='Install packages into . ' + 'By default this will not replace existing files/folders in ' + '. Use --upgrade to replace existing packages in ' + 'with new versions.' + ) + cmd_opts.add_option(cmdoptions.platform()) + cmd_opts.add_option(cmdoptions.python_version()) + cmd_opts.add_option(cmdoptions.implementation()) + cmd_opts.add_option(cmdoptions.abi()) + + cmd_opts.add_option( + '--user', + dest='use_user_site', + action='store_true', + help="Install to the Python user install directory for your " + "platform. Typically ~/.local/, or %APPDATA%\\Python on " + "Windows. (See the Python documentation for site.USER_BASE " + "for full details.)") + cmd_opts.add_option( + '--no-user', + dest='use_user_site', + action='store_false', + help=SUPPRESS_HELP) + cmd_opts.add_option( + '--root', + dest='root_path', + metavar='dir', + default=None, + help="Install everything relative to this alternate root " + "directory.") + cmd_opts.add_option( + '--prefix', + dest='prefix_path', + metavar='dir', + default=None, + help="Installation prefix where lib, bin and other top-level " + "folders are placed") + + cmd_opts.add_option(cmdoptions.build_dir()) + + cmd_opts.add_option(cmdoptions.src()) + + cmd_opts.add_option( + '-U', '--upgrade', + dest='upgrade', + action='store_true', + help='Upgrade all specified packages to the newest available ' + 'version. The handling of dependencies depends on the ' + 'upgrade-strategy used.' + ) + + cmd_opts.add_option( + '--upgrade-strategy', + dest='upgrade_strategy', + default='only-if-needed', + choices=['only-if-needed', 'eager'], + help='Determines how dependency upgrading should be handled ' + '[default: %default]. ' + '"eager" - dependencies are upgraded regardless of ' + 'whether the currently installed version satisfies the ' + 'requirements of the upgraded package(s). ' + '"only-if-needed" - are upgraded only when they do not ' + 'satisfy the requirements of the upgraded package(s).' + ) + + cmd_opts.add_option( + '--force-reinstall', + dest='force_reinstall', + action='store_true', + help='Reinstall all packages even if they are already ' + 'up-to-date.') + + cmd_opts.add_option( + '-I', '--ignore-installed', + dest='ignore_installed', + action='store_true', + help='Ignore the installed packages (reinstalling instead).') + + cmd_opts.add_option(cmdoptions.ignore_requires_python()) + cmd_opts.add_option(cmdoptions.no_build_isolation()) + cmd_opts.add_option(cmdoptions.use_pep517()) + cmd_opts.add_option(cmdoptions.no_use_pep517()) + + cmd_opts.add_option(cmdoptions.install_options()) + cmd_opts.add_option(cmdoptions.global_options()) + + cmd_opts.add_option( + "--compile", + action="store_true", + dest="compile", + default=True, + help="Compile Python source files to bytecode", + ) + + cmd_opts.add_option( + "--no-compile", + action="store_false", + dest="compile", + help="Do not compile Python source files to bytecode", + ) + + cmd_opts.add_option( + "--no-warn-script-location", + action="store_false", + dest="warn_script_location", + default=True, + help="Do not warn when installing scripts outside PATH", + ) + cmd_opts.add_option( + "--no-warn-conflicts", + action="store_false", + dest="warn_about_conflicts", + default=True, + help="Do not warn about broken dependencies", + ) + + cmd_opts.add_option(cmdoptions.no_binary()) + cmd_opts.add_option(cmdoptions.only_binary()) + cmd_opts.add_option(cmdoptions.prefer_binary()) + cmd_opts.add_option(cmdoptions.no_clean()) + cmd_opts.add_option(cmdoptions.require_hashes()) + cmd_opts.add_option(cmdoptions.progress_bar()) + + index_opts = cmdoptions.make_option_group( + cmdoptions.index_group, + self.parser, + ) + + self.parser.insert_option_group(0, index_opts) + self.parser.insert_option_group(0, cmd_opts) + + def run(self, options, args): + cmdoptions.check_install_build_global(options) + upgrade_strategy = "to-satisfy-only" + if options.upgrade: + upgrade_strategy = options.upgrade_strategy + + if options.build_dir: + options.build_dir = os.path.abspath(options.build_dir) + + cmdoptions.check_dist_restriction(options, check_target=True) + + if options.python_version: + python_versions = [options.python_version] + else: + python_versions = None + + options.src_dir = os.path.abspath(options.src_dir) + install_options = options.install_options or [] + if options.use_user_site: + if options.prefix_path: + raise CommandError( + "Can not combine '--user' and '--prefix' as they imply " + "different installation locations" + ) + if virtualenv_no_global(): + raise InstallationError( + "Can not perform a '--user' install. User site-packages " + "are not visible in this virtualenv." + ) + install_options.append('--user') + install_options.append('--prefix=') + + target_temp_dir = TempDirectory(kind="target") + if options.target_dir: + options.ignore_installed = True + options.target_dir = os.path.abspath(options.target_dir) + if (os.path.exists(options.target_dir) and not + os.path.isdir(options.target_dir)): + raise CommandError( + "Target path exists but is not a directory, will not " + "continue." + ) + + # Create a target directory for using with the target option + target_temp_dir.create() + install_options.append('--home=' + target_temp_dir.path) + + global_options = options.global_options or [] + + with self._build_session(options) as session: + finder = self._build_package_finder( + options=options, + session=session, + platform=options.platform, + python_versions=python_versions, + abi=options.abi, + implementation=options.implementation, + ) + build_delete = (not (options.no_clean or options.build_dir)) + wheel_cache = WheelCache(options.cache_dir, options.format_control) + + if options.cache_dir and not check_path_owner(options.cache_dir): + logger.warning( + "The directory '%s' or its parent directory is not owned " + "by the current user and caching wheels has been " + "disabled. check the permissions and owner of that " + "directory. If executing pip with sudo, you may want " + "sudo's -H flag.", + options.cache_dir, + ) + options.cache_dir = None + + with RequirementTracker() as req_tracker, TempDirectory( + options.build_dir, delete=build_delete, kind="install" + ) as directory: + requirement_set = RequirementSet( + require_hashes=options.require_hashes, + check_supported_wheels=not options.target_dir, + ) + + try: + self.populate_requirement_set( + requirement_set, args, options, finder, session, + self.name, wheel_cache + ) + preparer = RequirementPreparer( + build_dir=directory.path, + src_dir=options.src_dir, + download_dir=None, + wheel_download_dir=None, + progress_bar=options.progress_bar, + build_isolation=options.build_isolation, + req_tracker=req_tracker, + ) + + resolver = Resolver( + preparer=preparer, + finder=finder, + session=session, + wheel_cache=wheel_cache, + use_user_site=options.use_user_site, + upgrade_strategy=upgrade_strategy, + force_reinstall=options.force_reinstall, + ignore_dependencies=options.ignore_dependencies, + ignore_requires_python=options.ignore_requires_python, + ignore_installed=options.ignore_installed, + isolated=options.isolated_mode, + use_pep517=options.use_pep517 + ) + resolver.resolve(requirement_set) + + protect_pip_from_modification_on_windows( + modifying_pip=requirement_set.has_requirement("pip") + ) + + # Consider legacy and PEP517-using requirements separately + legacy_requirements = [] + pep517_requirements = [] + for req in requirement_set.requirements.values(): + if req.use_pep517: + pep517_requirements.append(req) + else: + legacy_requirements.append(req) + + # We don't build wheels for legacy requirements if we + # don't have wheel installed or we don't have a cache dir + try: + import wheel # noqa: F401 + build_legacy = bool(options.cache_dir) + except ImportError: + build_legacy = False + + wb = WheelBuilder( + finder, preparer, wheel_cache, + build_options=[], global_options=[], + ) + + # Always build PEP 517 requirements + build_failures = wb.build( + pep517_requirements, + session=session, autobuilding=True + ) + + if build_legacy: + # We don't care about failures building legacy + # requirements, as we'll fall through to a direct + # install for those. + wb.build( + legacy_requirements, + session=session, autobuilding=True + ) + + # If we're using PEP 517, we cannot do a direct install + # so we fail here. + if build_failures: + raise InstallationError( + "Could not build wheels for {} which use" + " PEP 517 and cannot be installed directly".format( + ", ".join(r.name for r in build_failures))) + + to_install = resolver.get_installation_order( + requirement_set + ) + + # Consistency Checking of the package set we're installing. + should_warn_about_conflicts = ( + not options.ignore_dependencies and + options.warn_about_conflicts + ) + if should_warn_about_conflicts: + self._warn_about_conflicts(to_install) + + # Don't warn about script install locations if + # --target has been specified + warn_script_location = options.warn_script_location + if options.target_dir: + warn_script_location = False + + installed = install_given_reqs( + to_install, + install_options, + global_options, + root=options.root_path, + home=target_temp_dir.path, + prefix=options.prefix_path, + pycompile=options.compile, + warn_script_location=warn_script_location, + use_user_site=options.use_user_site, + ) + + lib_locations = get_lib_location_guesses( + user=options.use_user_site, + home=target_temp_dir.path, + root=options.root_path, + prefix=options.prefix_path, + isolated=options.isolated_mode, + ) + working_set = pkg_resources.WorkingSet(lib_locations) + + reqs = sorted(installed, key=operator.attrgetter('name')) + items = [] + for req in reqs: + item = req.name + try: + installed_version = get_installed_version( + req.name, working_set=working_set + ) + if installed_version: + item += '-' + installed_version + except Exception: + pass + items.append(item) + installed = ' '.join(items) + if installed: + logger.info('Successfully installed %s', installed) + except EnvironmentError as error: + show_traceback = (self.verbosity >= 1) + + message = create_env_error_message( + error, show_traceback, options.use_user_site, + ) + logger.error(message, exc_info=show_traceback) + + return ERROR + except PreviousBuildDirError: + options.no_clean = True + raise + finally: + # Clean up + if not options.no_clean: + requirement_set.cleanup_files() + wheel_cache.cleanup() + + if options.target_dir: + self._handle_target_dir( + options.target_dir, target_temp_dir, options.upgrade + ) + return requirement_set + + def _handle_target_dir(self, target_dir, target_temp_dir, upgrade): + ensure_dir(target_dir) + + # Checking both purelib and platlib directories for installed + # packages to be moved to target directory + lib_dir_list = [] + + with target_temp_dir: + # Checking both purelib and platlib directories for installed + # packages to be moved to target directory + scheme = distutils_scheme('', home=target_temp_dir.path) + purelib_dir = scheme['purelib'] + platlib_dir = scheme['platlib'] + data_dir = scheme['data'] + + if os.path.exists(purelib_dir): + lib_dir_list.append(purelib_dir) + if os.path.exists(platlib_dir) and platlib_dir != purelib_dir: + lib_dir_list.append(platlib_dir) + if os.path.exists(data_dir): + lib_dir_list.append(data_dir) + + for lib_dir in lib_dir_list: + for item in os.listdir(lib_dir): + if lib_dir == data_dir: + ddir = os.path.join(data_dir, item) + if any(s.startswith(ddir) for s in lib_dir_list[:-1]): + continue + target_item_dir = os.path.join(target_dir, item) + if os.path.exists(target_item_dir): + if not upgrade: + logger.warning( + 'Target directory %s already exists. Specify ' + '--upgrade to force replacement.', + target_item_dir + ) + continue + if os.path.islink(target_item_dir): + logger.warning( + 'Target directory %s already exists and is ' + 'a link. Pip will not automatically replace ' + 'links, please remove if replacement is ' + 'desired.', + target_item_dir + ) + continue + if os.path.isdir(target_item_dir): + shutil.rmtree(target_item_dir) + else: + os.remove(target_item_dir) + + shutil.move( + os.path.join(lib_dir, item), + target_item_dir + ) + + def _warn_about_conflicts(self, to_install): + try: + package_set, _dep_info = check_install_conflicts(to_install) + except Exception: + logger.error("Error checking for conflicts.", exc_info=True) + return + missing, conflicting = _dep_info + + # NOTE: There is some duplication here from pip check + for project_name in missing: + version = package_set[project_name][0] + for dependency in missing[project_name]: + logger.critical( + "%s %s requires %s, which is not installed.", + project_name, version, dependency[1], + ) + + for project_name in conflicting: + version = package_set[project_name][0] + for dep_name, dep_version, req in conflicting[project_name]: + logger.critical( + "%s %s has requirement %s, but you'll have %s %s which is " + "incompatible.", + project_name, version, req, dep_name, dep_version, + ) + + +def get_lib_location_guesses(*args, **kwargs): + scheme = distutils_scheme('', *args, **kwargs) + return [scheme['purelib'], scheme['platlib']] + + +def create_env_error_message(error, show_traceback, using_user_site): + """Format an error message for an EnvironmentError + + It may occur anytime during the execution of the install command. + """ + parts = [] + + # Mention the error if we are not going to show a traceback + parts.append("Could not install packages due to an EnvironmentError") + if not show_traceback: + parts.append(": ") + parts.append(str(error)) + else: + parts.append(".") + + # Spilt the error indication from a helper message (if any) + parts[-1] += "\n" + + # Suggest useful actions to the user: + # (1) using user site-packages or (2) verifying the permissions + if error.errno == errno.EACCES: + user_option_part = "Consider using the `--user` option" + permissions_part = "Check the permissions" + + if not using_user_site: + parts.extend([ + user_option_part, " or ", + permissions_part.lower(), + ]) + else: + parts.append(permissions_part) + parts.append(".\n") + + return "".join(parts).strip() + "\n" diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/commands/install.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/install.pyc new file mode 100644 index 0000000..4eaa10d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/install.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/commands/list.py b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/list.py new file mode 100644 index 0000000..a640274 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/list.py @@ -0,0 +1,301 @@ +from __future__ import absolute_import + +import json +import logging + +from pip._vendor import six +from pip._vendor.six.moves import zip_longest + +from pip._internal.cli import cmdoptions +from pip._internal.cli.base_command import Command +from pip._internal.exceptions import CommandError +from pip._internal.index import PackageFinder +from pip._internal.utils.misc import ( + dist_is_editable, get_installed_distributions, +) +from pip._internal.utils.packaging import get_installer + +logger = logging.getLogger(__name__) + + +class ListCommand(Command): + """ + List installed packages, including editables. + + Packages are listed in a case-insensitive sorted order. + """ + name = 'list' + usage = """ + %prog [options]""" + summary = 'List installed packages.' + + def __init__(self, *args, **kw): + super(ListCommand, self).__init__(*args, **kw) + + cmd_opts = self.cmd_opts + + cmd_opts.add_option( + '-o', '--outdated', + action='store_true', + default=False, + help='List outdated packages') + cmd_opts.add_option( + '-u', '--uptodate', + action='store_true', + default=False, + help='List uptodate packages') + cmd_opts.add_option( + '-e', '--editable', + action='store_true', + default=False, + help='List editable projects.') + cmd_opts.add_option( + '-l', '--local', + action='store_true', + default=False, + help=('If in a virtualenv that has global access, do not list ' + 'globally-installed packages.'), + ) + self.cmd_opts.add_option( + '--user', + dest='user', + action='store_true', + default=False, + help='Only output packages installed in user-site.') + + cmd_opts.add_option( + '--pre', + action='store_true', + default=False, + help=("Include pre-release and development versions. By default, " + "pip only finds stable versions."), + ) + + cmd_opts.add_option( + '--format', + action='store', + dest='list_format', + default="columns", + choices=('columns', 'freeze', 'json'), + help="Select the output format among: columns (default), freeze, " + "or json", + ) + + cmd_opts.add_option( + '--not-required', + action='store_true', + dest='not_required', + help="List packages that are not dependencies of " + "installed packages.", + ) + + cmd_opts.add_option( + '--exclude-editable', + action='store_false', + dest='include_editable', + help='Exclude editable package from output.', + ) + cmd_opts.add_option( + '--include-editable', + action='store_true', + dest='include_editable', + help='Include editable package from output.', + default=True, + ) + index_opts = cmdoptions.make_option_group( + cmdoptions.index_group, self.parser + ) + + self.parser.insert_option_group(0, index_opts) + self.parser.insert_option_group(0, cmd_opts) + + def _build_package_finder(self, options, index_urls, session): + """ + Create a package finder appropriate to this list command. + """ + return PackageFinder( + find_links=options.find_links, + index_urls=index_urls, + allow_all_prereleases=options.pre, + trusted_hosts=options.trusted_hosts, + session=session, + ) + + def run(self, options, args): + if options.outdated and options.uptodate: + raise CommandError( + "Options --outdated and --uptodate cannot be combined.") + + packages = get_installed_distributions( + local_only=options.local, + user_only=options.user, + editables_only=options.editable, + include_editables=options.include_editable, + ) + + # get_not_required must be called firstly in order to find and + # filter out all dependencies correctly. Otherwise a package + # can't be identified as requirement because some parent packages + # could be filtered out before. + if options.not_required: + packages = self.get_not_required(packages, options) + + if options.outdated: + packages = self.get_outdated(packages, options) + elif options.uptodate: + packages = self.get_uptodate(packages, options) + + self.output_package_listing(packages, options) + + def get_outdated(self, packages, options): + return [ + dist for dist in self.iter_packages_latest_infos(packages, options) + if dist.latest_version > dist.parsed_version + ] + + def get_uptodate(self, packages, options): + return [ + dist for dist in self.iter_packages_latest_infos(packages, options) + if dist.latest_version == dist.parsed_version + ] + + def get_not_required(self, packages, options): + dep_keys = set() + for dist in packages: + dep_keys.update(requirement.key for requirement in dist.requires()) + return {pkg for pkg in packages if pkg.key not in dep_keys} + + def iter_packages_latest_infos(self, packages, options): + index_urls = [options.index_url] + options.extra_index_urls + if options.no_index: + logger.debug('Ignoring indexes: %s', ','.join(index_urls)) + index_urls = [] + + with self._build_session(options) as session: + finder = self._build_package_finder(options, index_urls, session) + + for dist in packages: + typ = 'unknown' + all_candidates = finder.find_all_candidates(dist.key) + if not options.pre: + # Remove prereleases + all_candidates = [candidate for candidate in all_candidates + if not candidate.version.is_prerelease] + + if not all_candidates: + continue + best_candidate = max(all_candidates, + key=finder._candidate_sort_key) + remote_version = best_candidate.version + if best_candidate.location.is_wheel: + typ = 'wheel' + else: + typ = 'sdist' + # This is dirty but makes the rest of the code much cleaner + dist.latest_version = remote_version + dist.latest_filetype = typ + yield dist + + def output_package_listing(self, packages, options): + packages = sorted( + packages, + key=lambda dist: dist.project_name.lower(), + ) + if options.list_format == 'columns' and packages: + data, header = format_for_columns(packages, options) + self.output_package_listing_columns(data, header) + elif options.list_format == 'freeze': + for dist in packages: + if options.verbose >= 1: + logger.info("%s==%s (%s)", dist.project_name, + dist.version, dist.location) + else: + logger.info("%s==%s", dist.project_name, dist.version) + elif options.list_format == 'json': + logger.info(format_for_json(packages, options)) + + def output_package_listing_columns(self, data, header): + # insert the header first: we need to know the size of column names + if len(data) > 0: + data.insert(0, header) + + pkg_strings, sizes = tabulate(data) + + # Create and add a separator. + if len(data) > 0: + pkg_strings.insert(1, " ".join(map(lambda x: '-' * x, sizes))) + + for val in pkg_strings: + logger.info(val) + + +def tabulate(vals): + # From pfmoore on GitHub: + # https://github.com/pypa/pip/issues/3651#issuecomment-216932564 + assert len(vals) > 0 + + sizes = [0] * max(len(x) for x in vals) + for row in vals: + sizes = [max(s, len(str(c))) for s, c in zip_longest(sizes, row)] + + result = [] + for row in vals: + display = " ".join([str(c).ljust(s) if c is not None else '' + for s, c in zip_longest(sizes, row)]) + result.append(display) + + return result, sizes + + +def format_for_columns(pkgs, options): + """ + Convert the package data into something usable + by output_package_listing_columns. + """ + running_outdated = options.outdated + # Adjust the header for the `pip list --outdated` case. + if running_outdated: + header = ["Package", "Version", "Latest", "Type"] + else: + header = ["Package", "Version"] + + data = [] + if options.verbose >= 1 or any(dist_is_editable(x) for x in pkgs): + header.append("Location") + if options.verbose >= 1: + header.append("Installer") + + for proj in pkgs: + # if we're working on the 'outdated' list, separate out the + # latest_version and type + row = [proj.project_name, proj.version] + + if running_outdated: + row.append(proj.latest_version) + row.append(proj.latest_filetype) + + if options.verbose >= 1 or dist_is_editable(proj): + row.append(proj.location) + if options.verbose >= 1: + row.append(get_installer(proj)) + + data.append(row) + + return data, header + + +def format_for_json(packages, options): + data = [] + for dist in packages: + info = { + 'name': dist.project_name, + 'version': six.text_type(dist.version), + } + if options.verbose >= 1: + info['location'] = dist.location + info['installer'] = get_installer(dist) + if options.outdated: + info['latest_version'] = six.text_type(dist.latest_version) + info['latest_filetype'] = dist.latest_filetype + data.append(info) + return json.dumps(data) diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/commands/list.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/list.pyc new file mode 100644 index 0000000..b1540d7 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/list.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/commands/search.py b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/search.py new file mode 100644 index 0000000..c157a31 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/search.py @@ -0,0 +1,135 @@ +from __future__ import absolute_import + +import logging +import sys +import textwrap +from collections import OrderedDict + +from pip._vendor import pkg_resources +from pip._vendor.packaging.version import parse as parse_version +# NOTE: XMLRPC Client is not annotated in typeshed as on 2017-07-17, which is +# why we ignore the type on this import +from pip._vendor.six.moves import xmlrpc_client # type: ignore + +from pip._internal.cli.base_command import Command +from pip._internal.cli.status_codes import NO_MATCHES_FOUND, SUCCESS +from pip._internal.download import PipXmlrpcTransport +from pip._internal.exceptions import CommandError +from pip._internal.models.index import PyPI +from pip._internal.utils.compat import get_terminal_size +from pip._internal.utils.logging import indent_log + +logger = logging.getLogger(__name__) + + +class SearchCommand(Command): + """Search for PyPI packages whose name or summary contains .""" + name = 'search' + usage = """ + %prog [options] """ + summary = 'Search PyPI for packages.' + ignore_require_venv = True + + def __init__(self, *args, **kw): + super(SearchCommand, self).__init__(*args, **kw) + self.cmd_opts.add_option( + '-i', '--index', + dest='index', + metavar='URL', + default=PyPI.pypi_url, + help='Base URL of Python Package Index (default %default)') + + self.parser.insert_option_group(0, self.cmd_opts) + + def run(self, options, args): + if not args: + raise CommandError('Missing required argument (search query).') + query = args + pypi_hits = self.search(query, options) + hits = transform_hits(pypi_hits) + + terminal_width = None + if sys.stdout.isatty(): + terminal_width = get_terminal_size()[0] + + print_results(hits, terminal_width=terminal_width) + if pypi_hits: + return SUCCESS + return NO_MATCHES_FOUND + + def search(self, query, options): + index_url = options.index + with self._build_session(options) as session: + transport = PipXmlrpcTransport(index_url, session) + pypi = xmlrpc_client.ServerProxy(index_url, transport) + hits = pypi.search({'name': query, 'summary': query}, 'or') + return hits + + +def transform_hits(hits): + """ + The list from pypi is really a list of versions. We want a list of + packages with the list of versions stored inline. This converts the + list from pypi into one we can use. + """ + packages = OrderedDict() + for hit in hits: + name = hit['name'] + summary = hit['summary'] + version = hit['version'] + + if name not in packages.keys(): + packages[name] = { + 'name': name, + 'summary': summary, + 'versions': [version], + } + else: + packages[name]['versions'].append(version) + + # if this is the highest version, replace summary and score + if version == highest_version(packages[name]['versions']): + packages[name]['summary'] = summary + + return list(packages.values()) + + +def print_results(hits, name_column_width=None, terminal_width=None): + if not hits: + return + if name_column_width is None: + name_column_width = max([ + len(hit['name']) + len(highest_version(hit.get('versions', ['-']))) + for hit in hits + ]) + 4 + + installed_packages = [p.project_name for p in pkg_resources.working_set] + for hit in hits: + name = hit['name'] + summary = hit['summary'] or '' + latest = highest_version(hit.get('versions', ['-'])) + if terminal_width is not None: + target_width = terminal_width - name_column_width - 5 + if target_width > 10: + # wrap and indent summary to fit terminal + summary = textwrap.wrap(summary, target_width) + summary = ('\n' + ' ' * (name_column_width + 3)).join(summary) + + line = '%-*s - %s' % (name_column_width, + '%s (%s)' % (name, latest), summary) + try: + logger.info(line) + if name in installed_packages: + dist = pkg_resources.get_distribution(name) + with indent_log(): + if dist.version == latest: + logger.info('INSTALLED: %s (latest)', dist.version) + else: + logger.info('INSTALLED: %s', dist.version) + logger.info('LATEST: %s', latest) + except UnicodeEncodeError: + pass + + +def highest_version(versions): + return max(versions, key=parse_version) diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/commands/search.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/search.pyc new file mode 100644 index 0000000..f649289 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/search.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/commands/show.py b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/show.py new file mode 100644 index 0000000..f92c9bc --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/show.py @@ -0,0 +1,168 @@ +from __future__ import absolute_import + +import logging +import os +from email.parser import FeedParser # type: ignore + +from pip._vendor import pkg_resources +from pip._vendor.packaging.utils import canonicalize_name + +from pip._internal.cli.base_command import Command +from pip._internal.cli.status_codes import ERROR, SUCCESS + +logger = logging.getLogger(__name__) + + +class ShowCommand(Command): + """ + Show information about one or more installed packages. + + The output is in RFC-compliant mail header format. + """ + name = 'show' + usage = """ + %prog [options] ...""" + summary = 'Show information about installed packages.' + ignore_require_venv = True + + def __init__(self, *args, **kw): + super(ShowCommand, self).__init__(*args, **kw) + self.cmd_opts.add_option( + '-f', '--files', + dest='files', + action='store_true', + default=False, + help='Show the full list of installed files for each package.') + + self.parser.insert_option_group(0, self.cmd_opts) + + def run(self, options, args): + if not args: + logger.warning('ERROR: Please provide a package name or names.') + return ERROR + query = args + + results = search_packages_info(query) + if not print_results( + results, list_files=options.files, verbose=options.verbose): + return ERROR + return SUCCESS + + +def search_packages_info(query): + """ + Gather details from installed distributions. Print distribution name, + version, location, and installed files. Installed files requires a + pip generated 'installed-files.txt' in the distributions '.egg-info' + directory. + """ + installed = {} + for p in pkg_resources.working_set: + installed[canonicalize_name(p.project_name)] = p + + query_names = [canonicalize_name(name) for name in query] + + for dist in [installed[pkg] for pkg in query_names if pkg in installed]: + package = { + 'name': dist.project_name, + 'version': dist.version, + 'location': dist.location, + 'requires': [dep.project_name for dep in dist.requires()], + } + file_list = None + metadata = None + if isinstance(dist, pkg_resources.DistInfoDistribution): + # RECORDs should be part of .dist-info metadatas + if dist.has_metadata('RECORD'): + lines = dist.get_metadata_lines('RECORD') + paths = [l.split(',')[0] for l in lines] + paths = [os.path.join(dist.location, p) for p in paths] + file_list = [os.path.relpath(p, dist.location) for p in paths] + + if dist.has_metadata('METADATA'): + metadata = dist.get_metadata('METADATA') + else: + # Otherwise use pip's log for .egg-info's + if dist.has_metadata('installed-files.txt'): + paths = dist.get_metadata_lines('installed-files.txt') + paths = [os.path.join(dist.egg_info, p) for p in paths] + file_list = [os.path.relpath(p, dist.location) for p in paths] + + if dist.has_metadata('PKG-INFO'): + metadata = dist.get_metadata('PKG-INFO') + + if dist.has_metadata('entry_points.txt'): + entry_points = dist.get_metadata_lines('entry_points.txt') + package['entry_points'] = entry_points + + if dist.has_metadata('INSTALLER'): + for line in dist.get_metadata_lines('INSTALLER'): + if line.strip(): + package['installer'] = line.strip() + break + + # @todo: Should pkg_resources.Distribution have a + # `get_pkg_info` method? + feed_parser = FeedParser() + feed_parser.feed(metadata) + pkg_info_dict = feed_parser.close() + for key in ('metadata-version', 'summary', + 'home-page', 'author', 'author-email', 'license'): + package[key] = pkg_info_dict.get(key) + + # It looks like FeedParser cannot deal with repeated headers + classifiers = [] + for line in metadata.splitlines(): + if line.startswith('Classifier: '): + classifiers.append(line[len('Classifier: '):]) + package['classifiers'] = classifiers + + if file_list: + package['files'] = sorted(file_list) + yield package + + +def print_results(distributions, list_files=False, verbose=False): + """ + Print the informations from installed distributions found. + """ + results_printed = False + for i, dist in enumerate(distributions): + results_printed = True + if i > 0: + logger.info("---") + + name = dist.get('name', '') + required_by = [ + pkg.project_name for pkg in pkg_resources.working_set + if name in [required.name for required in pkg.requires()] + ] + + logger.info("Name: %s", name) + logger.info("Version: %s", dist.get('version', '')) + logger.info("Summary: %s", dist.get('summary', '')) + logger.info("Home-page: %s", dist.get('home-page', '')) + logger.info("Author: %s", dist.get('author', '')) + logger.info("Author-email: %s", dist.get('author-email', '')) + logger.info("License: %s", dist.get('license', '')) + logger.info("Location: %s", dist.get('location', '')) + logger.info("Requires: %s", ', '.join(dist.get('requires', []))) + logger.info("Required-by: %s", ', '.join(required_by)) + + if verbose: + logger.info("Metadata-Version: %s", + dist.get('metadata-version', '')) + logger.info("Installer: %s", dist.get('installer', '')) + logger.info("Classifiers:") + for classifier in dist.get('classifiers', []): + logger.info(" %s", classifier) + logger.info("Entry-points:") + for entry in dist.get('entry_points', []): + logger.info(" %s", entry.strip()) + if list_files: + logger.info("Files:") + for line in dist.get('files', []): + logger.info(" %s", line.strip()) + if "files" not in dist: + logger.info("Cannot locate installed-files.txt") + return results_printed diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/commands/show.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/show.pyc new file mode 100644 index 0000000..6db6890 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/show.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/commands/uninstall.py b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/uninstall.py new file mode 100644 index 0000000..0cd6f54 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/uninstall.py @@ -0,0 +1,78 @@ +from __future__ import absolute_import + +from pip._vendor.packaging.utils import canonicalize_name + +from pip._internal.cli.base_command import Command +from pip._internal.exceptions import InstallationError +from pip._internal.req import parse_requirements +from pip._internal.req.constructors import install_req_from_line +from pip._internal.utils.misc import protect_pip_from_modification_on_windows + + +class UninstallCommand(Command): + """ + Uninstall packages. + + pip is able to uninstall most installed packages. Known exceptions are: + + - Pure distutils packages installed with ``python setup.py install``, which + leave behind no metadata to determine what files were installed. + - Script wrappers installed by ``python setup.py develop``. + """ + name = 'uninstall' + usage = """ + %prog [options] ... + %prog [options] -r ...""" + summary = 'Uninstall packages.' + + def __init__(self, *args, **kw): + super(UninstallCommand, self).__init__(*args, **kw) + self.cmd_opts.add_option( + '-r', '--requirement', + dest='requirements', + action='append', + default=[], + metavar='file', + help='Uninstall all the packages listed in the given requirements ' + 'file. This option can be used multiple times.', + ) + self.cmd_opts.add_option( + '-y', '--yes', + dest='yes', + action='store_true', + help="Don't ask for confirmation of uninstall deletions.") + + self.parser.insert_option_group(0, self.cmd_opts) + + def run(self, options, args): + with self._build_session(options) as session: + reqs_to_uninstall = {} + for name in args: + req = install_req_from_line( + name, isolated=options.isolated_mode, + ) + if req.name: + reqs_to_uninstall[canonicalize_name(req.name)] = req + for filename in options.requirements: + for req in parse_requirements( + filename, + options=options, + session=session): + if req.name: + reqs_to_uninstall[canonicalize_name(req.name)] = req + if not reqs_to_uninstall: + raise InstallationError( + 'You must give at least one requirement to %(name)s (see ' + '"pip help %(name)s")' % dict(name=self.name) + ) + + protect_pip_from_modification_on_windows( + modifying_pip="pip" in reqs_to_uninstall + ) + + for req in reqs_to_uninstall.values(): + uninstall_pathset = req.uninstall( + auto_confirm=options.yes, verbose=self.verbosity > 0, + ) + if uninstall_pathset: + uninstall_pathset.commit() diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/commands/uninstall.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/uninstall.pyc new file mode 100644 index 0000000..4fae6ae Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/uninstall.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/commands/wheel.py b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/wheel.py new file mode 100644 index 0000000..cd72a3d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/wheel.py @@ -0,0 +1,186 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import + +import logging +import os + +from pip._internal.cache import WheelCache +from pip._internal.cli import cmdoptions +from pip._internal.cli.base_command import RequirementCommand +from pip._internal.exceptions import CommandError, PreviousBuildDirError +from pip._internal.operations.prepare import RequirementPreparer +from pip._internal.req import RequirementSet +from pip._internal.req.req_tracker import RequirementTracker +from pip._internal.resolve import Resolver +from pip._internal.utils.temp_dir import TempDirectory +from pip._internal.wheel import WheelBuilder + +logger = logging.getLogger(__name__) + + +class WheelCommand(RequirementCommand): + """ + Build Wheel archives for your requirements and dependencies. + + Wheel is a built-package format, and offers the advantage of not + recompiling your software during every install. For more details, see the + wheel docs: https://wheel.readthedocs.io/en/latest/ + + Requirements: setuptools>=0.8, and wheel. + + 'pip wheel' uses the bdist_wheel setuptools extension from the wheel + package to build individual wheels. + + """ + + name = 'wheel' + usage = """ + %prog [options] ... + %prog [options] -r ... + %prog [options] [-e] ... + %prog [options] [-e] ... + %prog [options] ...""" + + summary = 'Build wheels from your requirements.' + + def __init__(self, *args, **kw): + super(WheelCommand, self).__init__(*args, **kw) + + cmd_opts = self.cmd_opts + + cmd_opts.add_option( + '-w', '--wheel-dir', + dest='wheel_dir', + metavar='dir', + default=os.curdir, + help=("Build wheels into , where the default is the " + "current working directory."), + ) + cmd_opts.add_option(cmdoptions.no_binary()) + cmd_opts.add_option(cmdoptions.only_binary()) + cmd_opts.add_option(cmdoptions.prefer_binary()) + cmd_opts.add_option( + '--build-option', + dest='build_options', + metavar='options', + action='append', + help="Extra arguments to be supplied to 'setup.py bdist_wheel'.", + ) + cmd_opts.add_option(cmdoptions.no_build_isolation()) + cmd_opts.add_option(cmdoptions.use_pep517()) + cmd_opts.add_option(cmdoptions.no_use_pep517()) + cmd_opts.add_option(cmdoptions.constraints()) + cmd_opts.add_option(cmdoptions.editable()) + cmd_opts.add_option(cmdoptions.requirements()) + cmd_opts.add_option(cmdoptions.src()) + cmd_opts.add_option(cmdoptions.ignore_requires_python()) + cmd_opts.add_option(cmdoptions.no_deps()) + cmd_opts.add_option(cmdoptions.build_dir()) + cmd_opts.add_option(cmdoptions.progress_bar()) + + cmd_opts.add_option( + '--global-option', + dest='global_options', + action='append', + metavar='options', + help="Extra global options to be supplied to the setup.py " + "call before the 'bdist_wheel' command.") + + cmd_opts.add_option( + '--pre', + action='store_true', + default=False, + help=("Include pre-release and development versions. By default, " + "pip only finds stable versions."), + ) + + cmd_opts.add_option(cmdoptions.no_clean()) + cmd_opts.add_option(cmdoptions.require_hashes()) + + index_opts = cmdoptions.make_option_group( + cmdoptions.index_group, + self.parser, + ) + + self.parser.insert_option_group(0, index_opts) + self.parser.insert_option_group(0, cmd_opts) + + def run(self, options, args): + cmdoptions.check_install_build_global(options) + + index_urls = [options.index_url] + options.extra_index_urls + if options.no_index: + logger.debug('Ignoring indexes: %s', ','.join(index_urls)) + index_urls = [] + + if options.build_dir: + options.build_dir = os.path.abspath(options.build_dir) + + options.src_dir = os.path.abspath(options.src_dir) + + with self._build_session(options) as session: + finder = self._build_package_finder(options, session) + build_delete = (not (options.no_clean or options.build_dir)) + wheel_cache = WheelCache(options.cache_dir, options.format_control) + + with RequirementTracker() as req_tracker, TempDirectory( + options.build_dir, delete=build_delete, kind="wheel" + ) as directory: + + requirement_set = RequirementSet( + require_hashes=options.require_hashes, + ) + + try: + self.populate_requirement_set( + requirement_set, args, options, finder, session, + self.name, wheel_cache + ) + + preparer = RequirementPreparer( + build_dir=directory.path, + src_dir=options.src_dir, + download_dir=None, + wheel_download_dir=options.wheel_dir, + progress_bar=options.progress_bar, + build_isolation=options.build_isolation, + req_tracker=req_tracker, + ) + + resolver = Resolver( + preparer=preparer, + finder=finder, + session=session, + wheel_cache=wheel_cache, + use_user_site=False, + upgrade_strategy="to-satisfy-only", + force_reinstall=False, + ignore_dependencies=options.ignore_dependencies, + ignore_requires_python=options.ignore_requires_python, + ignore_installed=True, + isolated=options.isolated_mode, + use_pep517=options.use_pep517 + ) + resolver.resolve(requirement_set) + + # build wheels + wb = WheelBuilder( + finder, preparer, wheel_cache, + build_options=options.build_options or [], + global_options=options.global_options or [], + no_clean=options.no_clean, + ) + build_failures = wb.build( + requirement_set.requirements.values(), session=session, + ) + if len(build_failures) != 0: + raise CommandError( + "Failed to build one or more wheels" + ) + except PreviousBuildDirError: + options.no_clean = True + raise + finally: + if not options.no_clean: + requirement_set.cleanup_files() + wheel_cache.cleanup() diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/commands/wheel.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/wheel.pyc new file mode 100644 index 0000000..59a46ff Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/commands/wheel.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/configuration.py b/project/venv/lib/python2.7/site-packages/pip/_internal/configuration.py new file mode 100644 index 0000000..fe6df9b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/configuration.py @@ -0,0 +1,387 @@ +"""Configuration management setup + +Some terminology: +- name + As written in config files. +- value + Value associated with a name +- key + Name combined with it's section (section.name) +- variant + A single word describing where the configuration key-value pair came from +""" + +import locale +import logging +import os + +from pip._vendor import six +from pip._vendor.six.moves import configparser + +from pip._internal.exceptions import ( + ConfigurationError, ConfigurationFileCouldNotBeLoaded, +) +from pip._internal.locations import ( + legacy_config_file, new_config_file, running_under_virtualenv, + site_config_files, venv_config_file, +) +from pip._internal.utils.misc import ensure_dir, enum +from pip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + from typing import ( # noqa: F401 + Any, Dict, Iterable, List, NewType, Optional, Tuple + ) + + RawConfigParser = configparser.RawConfigParser # Shorthand + Kind = NewType("Kind", str) + +logger = logging.getLogger(__name__) + + +# NOTE: Maybe use the optionx attribute to normalize keynames. +def _normalize_name(name): + # type: (str) -> str + """Make a name consistent regardless of source (environment or file) + """ + name = name.lower().replace('_', '-') + if name.startswith('--'): + name = name[2:] # only prefer long opts + return name + + +def _disassemble_key(name): + # type: (str) -> List[str] + return name.split(".", 1) + + +# The kinds of configurations there are. +kinds = enum( + USER="user", # User Specific + GLOBAL="global", # System Wide + VENV="venv", # Virtual Environment Specific + ENV="env", # from PIP_CONFIG_FILE + ENV_VAR="env-var", # from Environment Variables +) + + +class Configuration(object): + """Handles management of configuration. + + Provides an interface to accessing and managing configuration files. + + This class converts provides an API that takes "section.key-name" style + keys and stores the value associated with it as "key-name" under the + section "section". + + This allows for a clean interface wherein the both the section and the + key-name are preserved in an easy to manage form in the configuration files + and the data stored is also nice. + """ + + def __init__(self, isolated, load_only=None): + # type: (bool, Kind) -> None + super(Configuration, self).__init__() + + _valid_load_only = [kinds.USER, kinds.GLOBAL, kinds.VENV, None] + if load_only not in _valid_load_only: + raise ConfigurationError( + "Got invalid value for load_only - should be one of {}".format( + ", ".join(map(repr, _valid_load_only[:-1])) + ) + ) + self.isolated = isolated # type: bool + self.load_only = load_only # type: Optional[Kind] + + # The order here determines the override order. + self._override_order = [ + kinds.GLOBAL, kinds.USER, kinds.VENV, kinds.ENV, kinds.ENV_VAR + ] + + self._ignore_env_names = ["version", "help"] + + # Because we keep track of where we got the data from + self._parsers = { + variant: [] for variant in self._override_order + } # type: Dict[Kind, List[Tuple[str, RawConfigParser]]] + self._config = { + variant: {} for variant in self._override_order + } # type: Dict[Kind, Dict[str, Any]] + self._modified_parsers = [] # type: List[Tuple[str, RawConfigParser]] + + def load(self): + # type: () -> None + """Loads configuration from configuration files and environment + """ + self._load_config_files() + if not self.isolated: + self._load_environment_vars() + + def get_file_to_edit(self): + # type: () -> Optional[str] + """Returns the file with highest priority in configuration + """ + assert self.load_only is not None, \ + "Need to be specified a file to be editing" + + try: + return self._get_parser_to_modify()[0] + except IndexError: + return None + + def items(self): + # type: () -> Iterable[Tuple[str, Any]] + """Returns key-value pairs like dict.items() representing the loaded + configuration + """ + return self._dictionary.items() + + def get_value(self, key): + # type: (str) -> Any + """Get a value from the configuration. + """ + try: + return self._dictionary[key] + except KeyError: + raise ConfigurationError("No such key - {}".format(key)) + + def set_value(self, key, value): + # type: (str, Any) -> None + """Modify a value in the configuration. + """ + self._ensure_have_load_only() + + fname, parser = self._get_parser_to_modify() + + if parser is not None: + section, name = _disassemble_key(key) + + # Modify the parser and the configuration + if not parser.has_section(section): + parser.add_section(section) + parser.set(section, name, value) + + self._config[self.load_only][key] = value + self._mark_as_modified(fname, parser) + + def unset_value(self, key): + # type: (str) -> None + """Unset a value in the configuration. + """ + self._ensure_have_load_only() + + if key not in self._config[self.load_only]: + raise ConfigurationError("No such key - {}".format(key)) + + fname, parser = self._get_parser_to_modify() + + if parser is not None: + section, name = _disassemble_key(key) + + # Remove the key in the parser + modified_something = False + if parser.has_section(section): + # Returns whether the option was removed or not + modified_something = parser.remove_option(section, name) + + if modified_something: + # name removed from parser, section may now be empty + section_iter = iter(parser.items(section)) + try: + val = six.next(section_iter) + except StopIteration: + val = None + + if val is None: + parser.remove_section(section) + + self._mark_as_modified(fname, parser) + else: + raise ConfigurationError( + "Fatal Internal error [id=1]. Please report as a bug." + ) + + del self._config[self.load_only][key] + + def save(self): + # type: () -> None + """Save the currentin-memory state. + """ + self._ensure_have_load_only() + + for fname, parser in self._modified_parsers: + logger.info("Writing to %s", fname) + + # Ensure directory exists. + ensure_dir(os.path.dirname(fname)) + + with open(fname, "w") as f: + parser.write(f) # type: ignore + + # + # Private routines + # + + def _ensure_have_load_only(self): + # type: () -> None + if self.load_only is None: + raise ConfigurationError("Needed a specific file to be modifying.") + logger.debug("Will be working with %s variant only", self.load_only) + + @property + def _dictionary(self): + # type: () -> Dict[str, Any] + """A dictionary representing the loaded configuration. + """ + # NOTE: Dictionaries are not populated if not loaded. So, conditionals + # are not needed here. + retval = {} + + for variant in self._override_order: + retval.update(self._config[variant]) + + return retval + + def _load_config_files(self): + # type: () -> None + """Loads configuration from configuration files + """ + config_files = dict(self._iter_config_files()) + if config_files[kinds.ENV][0:1] == [os.devnull]: + logger.debug( + "Skipping loading configuration files due to " + "environment's PIP_CONFIG_FILE being os.devnull" + ) + return + + for variant, files in config_files.items(): + for fname in files: + # If there's specific variant set in `load_only`, load only + # that variant, not the others. + if self.load_only is not None and variant != self.load_only: + logger.debug( + "Skipping file '%s' (variant: %s)", fname, variant + ) + continue + + parser = self._load_file(variant, fname) + + # Keeping track of the parsers used + self._parsers[variant].append((fname, parser)) + + def _load_file(self, variant, fname): + # type: (Kind, str) -> RawConfigParser + logger.debug("For variant '%s', will try loading '%s'", variant, fname) + parser = self._construct_parser(fname) + + for section in parser.sections(): + items = parser.items(section) + self._config[variant].update(self._normalized_keys(section, items)) + + return parser + + def _construct_parser(self, fname): + # type: (str) -> RawConfigParser + parser = configparser.RawConfigParser() + # If there is no such file, don't bother reading it but create the + # parser anyway, to hold the data. + # Doing this is useful when modifying and saving files, where we don't + # need to construct a parser. + if os.path.exists(fname): + try: + parser.read(fname) + except UnicodeDecodeError: + # See https://github.com/pypa/pip/issues/4963 + raise ConfigurationFileCouldNotBeLoaded( + reason="contains invalid {} characters".format( + locale.getpreferredencoding(False) + ), + fname=fname, + ) + except configparser.Error as error: + # See https://github.com/pypa/pip/issues/4893 + raise ConfigurationFileCouldNotBeLoaded(error=error) + return parser + + def _load_environment_vars(self): + # type: () -> None + """Loads configuration from environment variables + """ + self._config[kinds.ENV_VAR].update( + self._normalized_keys(":env:", self._get_environ_vars()) + ) + + def _normalized_keys(self, section, items): + # type: (str, Iterable[Tuple[str, Any]]) -> Dict[str, Any] + """Normalizes items to construct a dictionary with normalized keys. + + This routine is where the names become keys and are made the same + regardless of source - configuration files or environment. + """ + normalized = {} + for name, val in items: + key = section + "." + _normalize_name(name) + normalized[key] = val + return normalized + + def _get_environ_vars(self): + # type: () -> Iterable[Tuple[str, str]] + """Returns a generator with all environmental vars with prefix PIP_""" + for key, val in os.environ.items(): + should_be_yielded = ( + key.startswith("PIP_") and + key[4:].lower() not in self._ignore_env_names + ) + if should_be_yielded: + yield key[4:].lower(), val + + # XXX: This is patched in the tests. + def _iter_config_files(self): + # type: () -> Iterable[Tuple[Kind, List[str]]] + """Yields variant and configuration files associated with it. + + This should be treated like items of a dictionary. + """ + # SMELL: Move the conditions out of this function + + # environment variables have the lowest priority + config_file = os.environ.get('PIP_CONFIG_FILE', None) + if config_file is not None: + yield kinds.ENV, [config_file] + else: + yield kinds.ENV, [] + + # at the base we have any global configuration + yield kinds.GLOBAL, list(site_config_files) + + # per-user configuration next + should_load_user_config = not self.isolated and not ( + config_file and os.path.exists(config_file) + ) + if should_load_user_config: + # The legacy config file is overridden by the new config file + yield kinds.USER, [legacy_config_file, new_config_file] + + # finally virtualenv configuration first trumping others + if running_under_virtualenv(): + yield kinds.VENV, [venv_config_file] + + def _get_parser_to_modify(self): + # type: () -> Tuple[str, RawConfigParser] + # Determine which parser to modify + parsers = self._parsers[self.load_only] + if not parsers: + # This should not happen if everything works correctly. + raise ConfigurationError( + "Fatal Internal error [id=2]. Please report as a bug." + ) + + # Use the highest priority parser. + return parsers[-1] + + # XXX: This is patched in the tests. + def _mark_as_modified(self, fname, parser): + # type: (str, RawConfigParser) -> None + file_parser_tuple = (fname, parser) + if file_parser_tuple not in self._modified_parsers: + self._modified_parsers.append(file_parser_tuple) diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/configuration.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/configuration.pyc new file mode 100644 index 0000000..2f8ddd1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/configuration.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/download.py b/project/venv/lib/python2.7/site-packages/pip/_internal/download.py new file mode 100644 index 0000000..2bbe176 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/download.py @@ -0,0 +1,971 @@ +from __future__ import absolute_import + +import cgi +import email.utils +import getpass +import json +import logging +import mimetypes +import os +import platform +import re +import shutil +import sys + +from pip._vendor import requests, six, urllib3 +from pip._vendor.cachecontrol import CacheControlAdapter +from pip._vendor.cachecontrol.caches import FileCache +from pip._vendor.lockfile import LockError +from pip._vendor.requests.adapters import BaseAdapter, HTTPAdapter +from pip._vendor.requests.auth import AuthBase, HTTPBasicAuth +from pip._vendor.requests.models import CONTENT_CHUNK_SIZE, Response +from pip._vendor.requests.structures import CaseInsensitiveDict +from pip._vendor.requests.utils import get_netrc_auth +# NOTE: XMLRPC Client is not annotated in typeshed as on 2017-07-17, which is +# why we ignore the type on this import +from pip._vendor.six.moves import xmlrpc_client # type: ignore +from pip._vendor.six.moves.urllib import parse as urllib_parse +from pip._vendor.six.moves.urllib import request as urllib_request +from pip._vendor.urllib3.util import IS_PYOPENSSL + +import pip +from pip._internal.exceptions import HashMismatch, InstallationError +from pip._internal.locations import write_delete_marker_file +from pip._internal.models.index import PyPI +from pip._internal.utils.encoding import auto_decode +from pip._internal.utils.filesystem import check_path_owner +from pip._internal.utils.glibc import libc_ver +from pip._internal.utils.logging import indent_log +from pip._internal.utils.misc import ( + ARCHIVE_EXTENSIONS, ask_path_exists, backup_dir, call_subprocess, consume, + display_path, format_size, get_installed_version, rmtree, + split_auth_from_netloc, splitext, unpack_file, +) +from pip._internal.utils.setuptools_build import SETUPTOOLS_SHIM +from pip._internal.utils.temp_dir import TempDirectory +from pip._internal.utils.typing import MYPY_CHECK_RUNNING +from pip._internal.utils.ui import DownloadProgressProvider +from pip._internal.vcs import vcs + +if MYPY_CHECK_RUNNING: + from typing import ( # noqa: F401 + Optional, Tuple, Dict, IO, Text, Union + ) + from pip._internal.models.link import Link # noqa: F401 + from pip._internal.utils.hashes import Hashes # noqa: F401 + from pip._internal.vcs import AuthInfo # noqa: F401 + +try: + import ssl # noqa +except ImportError: + ssl = None + +HAS_TLS = (ssl is not None) or IS_PYOPENSSL + +__all__ = ['get_file_content', + 'is_url', 'url_to_path', 'path_to_url', + 'is_archive_file', 'unpack_vcs_link', + 'unpack_file_url', 'is_vcs_url', 'is_file_url', + 'unpack_http_url', 'unpack_url'] + + +logger = logging.getLogger(__name__) + + +def user_agent(): + """ + Return a string representing the user agent. + """ + data = { + "installer": {"name": "pip", "version": pip.__version__}, + "python": platform.python_version(), + "implementation": { + "name": platform.python_implementation(), + }, + } + + if data["implementation"]["name"] == 'CPython': + data["implementation"]["version"] = platform.python_version() + elif data["implementation"]["name"] == 'PyPy': + if sys.pypy_version_info.releaselevel == 'final': + pypy_version_info = sys.pypy_version_info[:3] + else: + pypy_version_info = sys.pypy_version_info + data["implementation"]["version"] = ".".join( + [str(x) for x in pypy_version_info] + ) + elif data["implementation"]["name"] == 'Jython': + # Complete Guess + data["implementation"]["version"] = platform.python_version() + elif data["implementation"]["name"] == 'IronPython': + # Complete Guess + data["implementation"]["version"] = platform.python_version() + + if sys.platform.startswith("linux"): + from pip._vendor import distro + distro_infos = dict(filter( + lambda x: x[1], + zip(["name", "version", "id"], distro.linux_distribution()), + )) + libc = dict(filter( + lambda x: x[1], + zip(["lib", "version"], libc_ver()), + )) + if libc: + distro_infos["libc"] = libc + if distro_infos: + data["distro"] = distro_infos + + if sys.platform.startswith("darwin") and platform.mac_ver()[0]: + data["distro"] = {"name": "macOS", "version": platform.mac_ver()[0]} + + if platform.system(): + data.setdefault("system", {})["name"] = platform.system() + + if platform.release(): + data.setdefault("system", {})["release"] = platform.release() + + if platform.machine(): + data["cpu"] = platform.machine() + + if HAS_TLS: + data["openssl_version"] = ssl.OPENSSL_VERSION + + setuptools_version = get_installed_version("setuptools") + if setuptools_version is not None: + data["setuptools_version"] = setuptools_version + + return "{data[installer][name]}/{data[installer][version]} {json}".format( + data=data, + json=json.dumps(data, separators=(",", ":"), sort_keys=True), + ) + + +class MultiDomainBasicAuth(AuthBase): + + def __init__(self, prompting=True): + # type: (bool) -> None + self.prompting = prompting + self.passwords = {} # type: Dict[str, AuthInfo] + + def __call__(self, req): + parsed = urllib_parse.urlparse(req.url) + + # Split the credentials from the netloc. + netloc, url_user_password = split_auth_from_netloc(parsed.netloc) + + # Set the url of the request to the url without any credentials + req.url = urllib_parse.urlunparse(parsed[:1] + (netloc,) + parsed[2:]) + + # Use any stored credentials that we have for this netloc + username, password = self.passwords.get(netloc, (None, None)) + + # Use the credentials embedded in the url if we have none stored + if username is None: + username, password = url_user_password + + # Get creds from netrc if we still don't have them + if username is None and password is None: + netrc_auth = get_netrc_auth(req.url) + username, password = netrc_auth if netrc_auth else (None, None) + + if username or password: + # Store the username and password + self.passwords[netloc] = (username, password) + + # Send the basic auth with this request + req = HTTPBasicAuth(username or "", password or "")(req) + + # Attach a hook to handle 401 responses + req.register_hook("response", self.handle_401) + + return req + + def handle_401(self, resp, **kwargs): + # We only care about 401 responses, anything else we want to just + # pass through the actual response + if resp.status_code != 401: + return resp + + # We are not able to prompt the user so simply return the response + if not self.prompting: + return resp + + parsed = urllib_parse.urlparse(resp.url) + + # Prompt the user for a new username and password + username = six.moves.input("User for %s: " % parsed.netloc) + password = getpass.getpass("Password: ") + + # Store the new username and password to use for future requests + if username or password: + self.passwords[parsed.netloc] = (username, password) + + # Consume content and release the original connection to allow our new + # request to reuse the same one. + resp.content + resp.raw.release_conn() + + # Add our new username and password to the request + req = HTTPBasicAuth(username or "", password or "")(resp.request) + req.register_hook("response", self.warn_on_401) + + # Send our new request + new_resp = resp.connection.send(req, **kwargs) + new_resp.history.append(resp) + + return new_resp + + def warn_on_401(self, resp, **kwargs): + # warn user that they provided incorrect credentials + if resp.status_code == 401: + logger.warning('401 Error, Credentials not correct for %s', + resp.request.url) + + +class LocalFSAdapter(BaseAdapter): + + def send(self, request, stream=None, timeout=None, verify=None, cert=None, + proxies=None): + pathname = url_to_path(request.url) + + resp = Response() + resp.status_code = 200 + resp.url = request.url + + try: + stats = os.stat(pathname) + except OSError as exc: + resp.status_code = 404 + resp.raw = exc + else: + modified = email.utils.formatdate(stats.st_mtime, usegmt=True) + content_type = mimetypes.guess_type(pathname)[0] or "text/plain" + resp.headers = CaseInsensitiveDict({ + "Content-Type": content_type, + "Content-Length": stats.st_size, + "Last-Modified": modified, + }) + + resp.raw = open(pathname, "rb") + resp.close = resp.raw.close + + return resp + + def close(self): + pass + + +class SafeFileCache(FileCache): + """ + A file based cache which is safe to use even when the target directory may + not be accessible or writable. + """ + + def __init__(self, *args, **kwargs): + super(SafeFileCache, self).__init__(*args, **kwargs) + + # Check to ensure that the directory containing our cache directory + # is owned by the user current executing pip. If it does not exist + # we will check the parent directory until we find one that does exist. + # If it is not owned by the user executing pip then we will disable + # the cache and log a warning. + if not check_path_owner(self.directory): + logger.warning( + "The directory '%s' or its parent directory is not owned by " + "the current user and the cache has been disabled. Please " + "check the permissions and owner of that directory. If " + "executing pip with sudo, you may want sudo's -H flag.", + self.directory, + ) + + # Set our directory to None to disable the Cache + self.directory = None + + def get(self, *args, **kwargs): + # If we don't have a directory, then the cache should be a no-op. + if self.directory is None: + return + + try: + return super(SafeFileCache, self).get(*args, **kwargs) + except (LockError, OSError, IOError): + # We intentionally silence this error, if we can't access the cache + # then we can just skip caching and process the request as if + # caching wasn't enabled. + pass + + def set(self, *args, **kwargs): + # If we don't have a directory, then the cache should be a no-op. + if self.directory is None: + return + + try: + return super(SafeFileCache, self).set(*args, **kwargs) + except (LockError, OSError, IOError): + # We intentionally silence this error, if we can't access the cache + # then we can just skip caching and process the request as if + # caching wasn't enabled. + pass + + def delete(self, *args, **kwargs): + # If we don't have a directory, then the cache should be a no-op. + if self.directory is None: + return + + try: + return super(SafeFileCache, self).delete(*args, **kwargs) + except (LockError, OSError, IOError): + # We intentionally silence this error, if we can't access the cache + # then we can just skip caching and process the request as if + # caching wasn't enabled. + pass + + +class InsecureHTTPAdapter(HTTPAdapter): + + def cert_verify(self, conn, url, verify, cert): + conn.cert_reqs = 'CERT_NONE' + conn.ca_certs = None + + +class PipSession(requests.Session): + + timeout = None # type: Optional[int] + + def __init__(self, *args, **kwargs): + retries = kwargs.pop("retries", 0) + cache = kwargs.pop("cache", None) + insecure_hosts = kwargs.pop("insecure_hosts", []) + + super(PipSession, self).__init__(*args, **kwargs) + + # Attach our User Agent to the request + self.headers["User-Agent"] = user_agent() + + # Attach our Authentication handler to the session + self.auth = MultiDomainBasicAuth() + + # Create our urllib3.Retry instance which will allow us to customize + # how we handle retries. + retries = urllib3.Retry( + # Set the total number of retries that a particular request can + # have. + total=retries, + + # A 503 error from PyPI typically means that the Fastly -> Origin + # connection got interrupted in some way. A 503 error in general + # is typically considered a transient error so we'll go ahead and + # retry it. + # A 500 may indicate transient error in Amazon S3 + # A 520 or 527 - may indicate transient error in CloudFlare + status_forcelist=[500, 503, 520, 527], + + # Add a small amount of back off between failed requests in + # order to prevent hammering the service. + backoff_factor=0.25, + ) + + # We want to _only_ cache responses on securely fetched origins. We do + # this because we can't validate the response of an insecurely fetched + # origin, and we don't want someone to be able to poison the cache and + # require manual eviction from the cache to fix it. + if cache: + secure_adapter = CacheControlAdapter( + cache=SafeFileCache(cache, use_dir_lock=True), + max_retries=retries, + ) + else: + secure_adapter = HTTPAdapter(max_retries=retries) + + # Our Insecure HTTPAdapter disables HTTPS validation. It does not + # support caching (see above) so we'll use it for all http:// URLs as + # well as any https:// host that we've marked as ignoring TLS errors + # for. + insecure_adapter = InsecureHTTPAdapter(max_retries=retries) + + self.mount("https://", secure_adapter) + self.mount("http://", insecure_adapter) + + # Enable file:// urls + self.mount("file://", LocalFSAdapter()) + + # We want to use a non-validating adapter for any requests which are + # deemed insecure. + for host in insecure_hosts: + self.mount("https://{}/".format(host), insecure_adapter) + + def request(self, method, url, *args, **kwargs): + # Allow setting a default timeout on a session + kwargs.setdefault("timeout", self.timeout) + + # Dispatch the actual request + return super(PipSession, self).request(method, url, *args, **kwargs) + + +def get_file_content(url, comes_from=None, session=None): + # type: (str, Optional[str], Optional[PipSession]) -> Tuple[str, Text] + """Gets the content of a file; it may be a filename, file: URL, or + http: URL. Returns (location, content). Content is unicode. + + :param url: File path or url. + :param comes_from: Origin description of requirements. + :param session: Instance of pip.download.PipSession. + """ + if session is None: + raise TypeError( + "get_file_content() missing 1 required keyword argument: 'session'" + ) + + match = _scheme_re.search(url) + if match: + scheme = match.group(1).lower() + if (scheme == 'file' and comes_from and + comes_from.startswith('http')): + raise InstallationError( + 'Requirements file %s references URL %s, which is local' + % (comes_from, url)) + if scheme == 'file': + path = url.split(':', 1)[1] + path = path.replace('\\', '/') + match = _url_slash_drive_re.match(path) + if match: + path = match.group(1) + ':' + path.split('|', 1)[1] + path = urllib_parse.unquote(path) + if path.startswith('/'): + path = '/' + path.lstrip('/') + url = path + else: + # FIXME: catch some errors + resp = session.get(url) + resp.raise_for_status() + return resp.url, resp.text + try: + with open(url, 'rb') as f: + content = auto_decode(f.read()) + except IOError as exc: + raise InstallationError( + 'Could not open requirements file: %s' % str(exc) + ) + return url, content + + +_scheme_re = re.compile(r'^(http|https|file):', re.I) +_url_slash_drive_re = re.compile(r'/*([a-z])\|', re.I) + + +def is_url(name): + # type: (Union[str, Text]) -> bool + """Returns true if the name looks like a URL""" + if ':' not in name: + return False + scheme = name.split(':', 1)[0].lower() + return scheme in ['http', 'https', 'file', 'ftp'] + vcs.all_schemes + + +def url_to_path(url): + # type: (str) -> str + """ + Convert a file: URL to a path. + """ + assert url.startswith('file:'), ( + "You can only turn file: urls into filenames (not %r)" % url) + + _, netloc, path, _, _ = urllib_parse.urlsplit(url) + + # if we have a UNC path, prepend UNC share notation + if netloc: + netloc = '\\\\' + netloc + + path = urllib_request.url2pathname(netloc + path) + return path + + +def path_to_url(path): + # type: (Union[str, Text]) -> str + """ + Convert a path to a file: URL. The path will be made absolute and have + quoted path parts. + """ + path = os.path.normpath(os.path.abspath(path)) + url = urllib_parse.urljoin('file:', urllib_request.pathname2url(path)) + return url + + +def is_archive_file(name): + # type: (str) -> bool + """Return True if `name` is a considered as an archive file.""" + ext = splitext(name)[1].lower() + if ext in ARCHIVE_EXTENSIONS: + return True + return False + + +def unpack_vcs_link(link, location): + vcs_backend = _get_used_vcs_backend(link) + vcs_backend.unpack(location) + + +def _get_used_vcs_backend(link): + for backend in vcs.backends: + if link.scheme in backend.schemes: + vcs_backend = backend(link.url) + return vcs_backend + + +def is_vcs_url(link): + # type: (Link) -> bool + return bool(_get_used_vcs_backend(link)) + + +def is_file_url(link): + # type: (Link) -> bool + return link.url.lower().startswith('file:') + + +def is_dir_url(link): + # type: (Link) -> bool + """Return whether a file:// Link points to a directory. + + ``link`` must not have any other scheme but file://. Call is_file_url() + first. + + """ + link_path = url_to_path(link.url_without_fragment) + return os.path.isdir(link_path) + + +def _progress_indicator(iterable, *args, **kwargs): + return iterable + + +def _download_url( + resp, # type: Response + link, # type: Link + content_file, # type: IO + hashes, # type: Hashes + progress_bar # type: str +): + # type: (...) -> None + try: + total_length = int(resp.headers['content-length']) + except (ValueError, KeyError, TypeError): + total_length = 0 + + cached_resp = getattr(resp, "from_cache", False) + if logger.getEffectiveLevel() > logging.INFO: + show_progress = False + elif cached_resp: + show_progress = False + elif total_length > (40 * 1000): + show_progress = True + elif not total_length: + show_progress = True + else: + show_progress = False + + show_url = link.show_url + + def resp_read(chunk_size): + try: + # Special case for urllib3. + for chunk in resp.raw.stream( + chunk_size, + # We use decode_content=False here because we don't + # want urllib3 to mess with the raw bytes we get + # from the server. If we decompress inside of + # urllib3 then we cannot verify the checksum + # because the checksum will be of the compressed + # file. This breakage will only occur if the + # server adds a Content-Encoding header, which + # depends on how the server was configured: + # - Some servers will notice that the file isn't a + # compressible file and will leave the file alone + # and with an empty Content-Encoding + # - Some servers will notice that the file is + # already compressed and will leave the file + # alone and will add a Content-Encoding: gzip + # header + # - Some servers won't notice anything at all and + # will take a file that's already been compressed + # and compress it again and set the + # Content-Encoding: gzip header + # + # By setting this not to decode automatically we + # hope to eliminate problems with the second case. + decode_content=False): + yield chunk + except AttributeError: + # Standard file-like object. + while True: + chunk = resp.raw.read(chunk_size) + if not chunk: + break + yield chunk + + def written_chunks(chunks): + for chunk in chunks: + content_file.write(chunk) + yield chunk + + progress_indicator = _progress_indicator + + if link.netloc == PyPI.netloc: + url = show_url + else: + url = link.url_without_fragment + + if show_progress: # We don't show progress on cached responses + progress_indicator = DownloadProgressProvider(progress_bar, + max=total_length) + if total_length: + logger.info("Downloading %s (%s)", url, format_size(total_length)) + else: + logger.info("Downloading %s", url) + elif cached_resp: + logger.info("Using cached %s", url) + else: + logger.info("Downloading %s", url) + + logger.debug('Downloading from URL %s', link) + + downloaded_chunks = written_chunks( + progress_indicator( + resp_read(CONTENT_CHUNK_SIZE), + CONTENT_CHUNK_SIZE + ) + ) + if hashes: + hashes.check_against_chunks(downloaded_chunks) + else: + consume(downloaded_chunks) + + +def _copy_file(filename, location, link): + copy = True + download_location = os.path.join(location, link.filename) + if os.path.exists(download_location): + response = ask_path_exists( + 'The file %s exists. (i)gnore, (w)ipe, (b)ackup, (a)abort' % + display_path(download_location), ('i', 'w', 'b', 'a')) + if response == 'i': + copy = False + elif response == 'w': + logger.warning('Deleting %s', display_path(download_location)) + os.remove(download_location) + elif response == 'b': + dest_file = backup_dir(download_location) + logger.warning( + 'Backing up %s to %s', + display_path(download_location), + display_path(dest_file), + ) + shutil.move(download_location, dest_file) + elif response == 'a': + sys.exit(-1) + if copy: + shutil.copy(filename, download_location) + logger.info('Saved %s', display_path(download_location)) + + +def unpack_http_url( + link, # type: Link + location, # type: str + download_dir=None, # type: Optional[str] + session=None, # type: Optional[PipSession] + hashes=None, # type: Optional[Hashes] + progress_bar="on" # type: str +): + # type: (...) -> None + if session is None: + raise TypeError( + "unpack_http_url() missing 1 required keyword argument: 'session'" + ) + + with TempDirectory(kind="unpack") as temp_dir: + # If a download dir is specified, is the file already downloaded there? + already_downloaded_path = None + if download_dir: + already_downloaded_path = _check_download_dir(link, + download_dir, + hashes) + + if already_downloaded_path: + from_path = already_downloaded_path + content_type = mimetypes.guess_type(from_path)[0] + else: + # let's download to a tmp dir + from_path, content_type = _download_http_url(link, + session, + temp_dir.path, + hashes, + progress_bar) + + # unpack the archive to the build dir location. even when only + # downloading archives, they have to be unpacked to parse dependencies + unpack_file(from_path, location, content_type, link) + + # a download dir is specified; let's copy the archive there + if download_dir and not already_downloaded_path: + _copy_file(from_path, download_dir, link) + + if not already_downloaded_path: + os.unlink(from_path) + + +def unpack_file_url( + link, # type: Link + location, # type: str + download_dir=None, # type: Optional[str] + hashes=None # type: Optional[Hashes] +): + # type: (...) -> None + """Unpack link into location. + + If download_dir is provided and link points to a file, make a copy + of the link file inside download_dir. + """ + link_path = url_to_path(link.url_without_fragment) + + # If it's a url to a local directory + if is_dir_url(link): + if os.path.isdir(location): + rmtree(location) + shutil.copytree(link_path, location, symlinks=True) + if download_dir: + logger.info('Link is a directory, ignoring download_dir') + return + + # If --require-hashes is off, `hashes` is either empty, the + # link's embedded hash, or MissingHashes; it is required to + # match. If --require-hashes is on, we are satisfied by any + # hash in `hashes` matching: a URL-based or an option-based + # one; no internet-sourced hash will be in `hashes`. + if hashes: + hashes.check_against_path(link_path) + + # If a download dir is specified, is the file already there and valid? + already_downloaded_path = None + if download_dir: + already_downloaded_path = _check_download_dir(link, + download_dir, + hashes) + + if already_downloaded_path: + from_path = already_downloaded_path + else: + from_path = link_path + + content_type = mimetypes.guess_type(from_path)[0] + + # unpack the archive to the build dir location. even when only downloading + # archives, they have to be unpacked to parse dependencies + unpack_file(from_path, location, content_type, link) + + # a download dir is specified and not already downloaded + if download_dir and not already_downloaded_path: + _copy_file(from_path, download_dir, link) + + +def _copy_dist_from_dir(link_path, location): + """Copy distribution files in `link_path` to `location`. + + Invoked when user requests to install a local directory. E.g.: + + pip install . + pip install ~/dev/git-repos/python-prompt-toolkit + + """ + + # Note: This is currently VERY SLOW if you have a lot of data in the + # directory, because it copies everything with `shutil.copytree`. + # What it should really do is build an sdist and install that. + # See https://github.com/pypa/pip/issues/2195 + + if os.path.isdir(location): + rmtree(location) + + # build an sdist + setup_py = 'setup.py' + sdist_args = [sys.executable] + sdist_args.append('-c') + sdist_args.append(SETUPTOOLS_SHIM % setup_py) + sdist_args.append('sdist') + sdist_args += ['--dist-dir', location] + logger.info('Running setup.py sdist for %s', link_path) + + with indent_log(): + call_subprocess(sdist_args, cwd=link_path, show_stdout=False) + + # unpack sdist into `location` + sdist = os.path.join(location, os.listdir(location)[0]) + logger.info('Unpacking sdist %s into %s', sdist, location) + unpack_file(sdist, location, content_type=None, link=None) + + +class PipXmlrpcTransport(xmlrpc_client.Transport): + """Provide a `xmlrpclib.Transport` implementation via a `PipSession` + object. + """ + + def __init__(self, index_url, session, use_datetime=False): + xmlrpc_client.Transport.__init__(self, use_datetime) + index_parts = urllib_parse.urlparse(index_url) + self._scheme = index_parts.scheme + self._session = session + + def request(self, host, handler, request_body, verbose=False): + parts = (self._scheme, host, handler, None, None, None) + url = urllib_parse.urlunparse(parts) + try: + headers = {'Content-Type': 'text/xml'} + response = self._session.post(url, data=request_body, + headers=headers, stream=True) + response.raise_for_status() + self.verbose = verbose + return self.parse_response(response.raw) + except requests.HTTPError as exc: + logger.critical( + "HTTP error %s while getting %s", + exc.response.status_code, url, + ) + raise + + +def unpack_url( + link, # type: Optional[Link] + location, # type: Optional[str] + download_dir=None, # type: Optional[str] + only_download=False, # type: bool + session=None, # type: Optional[PipSession] + hashes=None, # type: Optional[Hashes] + progress_bar="on" # type: str +): + # type: (...) -> None + """Unpack link. + If link is a VCS link: + if only_download, export into download_dir and ignore location + else unpack into location + for other types of link: + - unpack into location + - if download_dir, copy the file into download_dir + - if only_download, mark location for deletion + + :param hashes: A Hashes object, one of whose embedded hashes must match, + or HashMismatch will be raised. If the Hashes is empty, no matches are + required, and unhashable types of requirements (like VCS ones, which + would ordinarily raise HashUnsupported) are allowed. + """ + # non-editable vcs urls + if is_vcs_url(link): + unpack_vcs_link(link, location) + + # file urls + elif is_file_url(link): + unpack_file_url(link, location, download_dir, hashes=hashes) + + # http urls + else: + if session is None: + session = PipSession() + + unpack_http_url( + link, + location, + download_dir, + session, + hashes=hashes, + progress_bar=progress_bar + ) + if only_download: + write_delete_marker_file(location) + + +def _download_http_url( + link, # type: Link + session, # type: PipSession + temp_dir, # type: str + hashes, # type: Hashes + progress_bar # type: str +): + # type: (...) -> Tuple[str, str] + """Download link url into temp_dir using provided session""" + target_url = link.url.split('#', 1)[0] + try: + resp = session.get( + target_url, + # We use Accept-Encoding: identity here because requests + # defaults to accepting compressed responses. This breaks in + # a variety of ways depending on how the server is configured. + # - Some servers will notice that the file isn't a compressible + # file and will leave the file alone and with an empty + # Content-Encoding + # - Some servers will notice that the file is already + # compressed and will leave the file alone and will add a + # Content-Encoding: gzip header + # - Some servers won't notice anything at all and will take + # a file that's already been compressed and compress it again + # and set the Content-Encoding: gzip header + # By setting this to request only the identity encoding We're + # hoping to eliminate the third case. Hopefully there does not + # exist a server which when given a file will notice it is + # already compressed and that you're not asking for a + # compressed file and will then decompress it before sending + # because if that's the case I don't think it'll ever be + # possible to make this work. + headers={"Accept-Encoding": "identity"}, + stream=True, + ) + resp.raise_for_status() + except requests.HTTPError as exc: + logger.critical( + "HTTP error %s while getting %s", exc.response.status_code, link, + ) + raise + + content_type = resp.headers.get('content-type', '') + filename = link.filename # fallback + # Have a look at the Content-Disposition header for a better guess + content_disposition = resp.headers.get('content-disposition') + if content_disposition: + type, params = cgi.parse_header(content_disposition) + # We use ``or`` here because we don't want to use an "empty" value + # from the filename param. + filename = params.get('filename') or filename + ext = splitext(filename)[1] + if not ext: + ext = mimetypes.guess_extension(content_type) + if ext: + filename += ext + if not ext and link.url != resp.url: + ext = os.path.splitext(resp.url)[1] + if ext: + filename += ext + file_path = os.path.join(temp_dir, filename) + with open(file_path, 'wb') as content_file: + _download_url(resp, link, content_file, hashes, progress_bar) + return file_path, content_type + + +def _check_download_dir(link, download_dir, hashes): + # type: (Link, str, Hashes) -> Optional[str] + """ Check download_dir for previously downloaded file with correct hash + If a correct file is found return its path else None + """ + download_path = os.path.join(download_dir, link.filename) + if os.path.exists(download_path): + # If already downloaded, does its hash match? + logger.info('File was already downloaded %s', download_path) + if hashes: + try: + hashes.check_against_path(download_path) + except HashMismatch: + logger.warning( + 'Previously-downloaded file %s has bad hash. ' + 'Re-downloading.', + download_path + ) + os.unlink(download_path) + return None + return download_path + return None diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/download.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/download.pyc new file mode 100644 index 0000000..3f32949 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/download.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/exceptions.py b/project/venv/lib/python2.7/site-packages/pip/_internal/exceptions.py new file mode 100644 index 0000000..38ceeea --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/exceptions.py @@ -0,0 +1,274 @@ +"""Exceptions used throughout package""" +from __future__ import absolute_import + +from itertools import chain, groupby, repeat + +from pip._vendor.six import iteritems + +from pip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + from typing import Optional # noqa: F401 + from pip._internal.req.req_install import InstallRequirement # noqa: F401 + + +class PipError(Exception): + """Base pip exception""" + + +class ConfigurationError(PipError): + """General exception in configuration""" + + +class InstallationError(PipError): + """General exception during installation""" + + +class UninstallationError(PipError): + """General exception during uninstallation""" + + +class DistributionNotFound(InstallationError): + """Raised when a distribution cannot be found to satisfy a requirement""" + + +class RequirementsFileParseError(InstallationError): + """Raised when a general error occurs parsing a requirements file line.""" + + +class BestVersionAlreadyInstalled(PipError): + """Raised when the most up-to-date version of a package is already + installed.""" + + +class BadCommand(PipError): + """Raised when virtualenv or a command is not found""" + + +class CommandError(PipError): + """Raised when there is an error in command-line arguments""" + + +class PreviousBuildDirError(PipError): + """Raised when there's a previous conflicting build directory""" + + +class InvalidWheelFilename(InstallationError): + """Invalid wheel filename.""" + + +class UnsupportedWheel(InstallationError): + """Unsupported wheel.""" + + +class HashErrors(InstallationError): + """Multiple HashError instances rolled into one for reporting""" + + def __init__(self): + self.errors = [] + + def append(self, error): + self.errors.append(error) + + def __str__(self): + lines = [] + self.errors.sort(key=lambda e: e.order) + for cls, errors_of_cls in groupby(self.errors, lambda e: e.__class__): + lines.append(cls.head) + lines.extend(e.body() for e in errors_of_cls) + if lines: + return '\n'.join(lines) + + def __nonzero__(self): + return bool(self.errors) + + def __bool__(self): + return self.__nonzero__() + + +class HashError(InstallationError): + """ + A failure to verify a package against known-good hashes + + :cvar order: An int sorting hash exception classes by difficulty of + recovery (lower being harder), so the user doesn't bother fretting + about unpinned packages when he has deeper issues, like VCS + dependencies, to deal with. Also keeps error reports in a + deterministic order. + :cvar head: A section heading for display above potentially many + exceptions of this kind + :ivar req: The InstallRequirement that triggered this error. This is + pasted on after the exception is instantiated, because it's not + typically available earlier. + + """ + req = None # type: Optional[InstallRequirement] + head = '' + + def body(self): + """Return a summary of me for display under the heading. + + This default implementation simply prints a description of the + triggering requirement. + + :param req: The InstallRequirement that provoked this error, with + populate_link() having already been called + + """ + return ' %s' % self._requirement_name() + + def __str__(self): + return '%s\n%s' % (self.head, self.body()) + + def _requirement_name(self): + """Return a description of the requirement that triggered me. + + This default implementation returns long description of the req, with + line numbers + + """ + return str(self.req) if self.req else 'unknown package' + + +class VcsHashUnsupported(HashError): + """A hash was provided for a version-control-system-based requirement, but + we don't have a method for hashing those.""" + + order = 0 + head = ("Can't verify hashes for these requirements because we don't " + "have a way to hash version control repositories:") + + +class DirectoryUrlHashUnsupported(HashError): + """A hash was provided for a version-control-system-based requirement, but + we don't have a method for hashing those.""" + + order = 1 + head = ("Can't verify hashes for these file:// requirements because they " + "point to directories:") + + +class HashMissing(HashError): + """A hash was needed for a requirement but is absent.""" + + order = 2 + head = ('Hashes are required in --require-hashes mode, but they are ' + 'missing from some requirements. Here is a list of those ' + 'requirements along with the hashes their downloaded archives ' + 'actually had. Add lines like these to your requirements files to ' + 'prevent tampering. (If you did not enable --require-hashes ' + 'manually, note that it turns on automatically when any package ' + 'has a hash.)') + + def __init__(self, gotten_hash): + """ + :param gotten_hash: The hash of the (possibly malicious) archive we + just downloaded + """ + self.gotten_hash = gotten_hash + + def body(self): + # Dodge circular import. + from pip._internal.utils.hashes import FAVORITE_HASH + + package = None + if self.req: + # In the case of URL-based requirements, display the original URL + # seen in the requirements file rather than the package name, + # so the output can be directly copied into the requirements file. + package = (self.req.original_link if self.req.original_link + # In case someone feeds something downright stupid + # to InstallRequirement's constructor. + else getattr(self.req, 'req', None)) + return ' %s --hash=%s:%s' % (package or 'unknown package', + FAVORITE_HASH, + self.gotten_hash) + + +class HashUnpinned(HashError): + """A requirement had a hash specified but was not pinned to a specific + version.""" + + order = 3 + head = ('In --require-hashes mode, all requirements must have their ' + 'versions pinned with ==. These do not:') + + +class HashMismatch(HashError): + """ + Distribution file hash values don't match. + + :ivar package_name: The name of the package that triggered the hash + mismatch. Feel free to write to this after the exception is raise to + improve its error message. + + """ + order = 4 + head = ('THESE PACKAGES DO NOT MATCH THE HASHES FROM THE REQUIREMENTS ' + 'FILE. If you have updated the package versions, please update ' + 'the hashes. Otherwise, examine the package contents carefully; ' + 'someone may have tampered with them.') + + def __init__(self, allowed, gots): + """ + :param allowed: A dict of algorithm names pointing to lists of allowed + hex digests + :param gots: A dict of algorithm names pointing to hashes we + actually got from the files under suspicion + """ + self.allowed = allowed + self.gots = gots + + def body(self): + return ' %s:\n%s' % (self._requirement_name(), + self._hash_comparison()) + + def _hash_comparison(self): + """ + Return a comparison of actual and expected hash values. + + Example:: + + Expected sha256 abcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeabcde + or 123451234512345123451234512345123451234512345 + Got bcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdef + + """ + def hash_then_or(hash_name): + # For now, all the decent hashes have 6-char names, so we can get + # away with hard-coding space literals. + return chain([hash_name], repeat(' or')) + + lines = [] + for hash_name, expecteds in iteritems(self.allowed): + prefix = hash_then_or(hash_name) + lines.extend((' Expected %s %s' % (next(prefix), e)) + for e in expecteds) + lines.append(' Got %s\n' % + self.gots[hash_name].hexdigest()) + prefix = ' or' + return '\n'.join(lines) + + +class UnsupportedPythonVersion(InstallationError): + """Unsupported python version according to Requires-Python package + metadata.""" + + +class ConfigurationFileCouldNotBeLoaded(ConfigurationError): + """When there are errors while loading a configuration file + """ + + def __init__(self, reason="could not be loaded", fname=None, error=None): + super(ConfigurationFileCouldNotBeLoaded, self).__init__(error) + self.reason = reason + self.fname = fname + self.error = error + + def __str__(self): + if self.fname is not None: + message_part = " in {}.".format(self.fname) + else: + assert self.error is not None + message_part = ".\n{}\n".format(self.error.message) + return "Configuration file {}{}".format(self.reason, message_part) diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/exceptions.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/exceptions.pyc new file mode 100644 index 0000000..fd2c98c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/exceptions.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/index.py b/project/venv/lib/python2.7/site-packages/pip/_internal/index.py new file mode 100644 index 0000000..9eda3a3 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/index.py @@ -0,0 +1,990 @@ +"""Routines related to PyPI, indexes""" +from __future__ import absolute_import + +import cgi +import itertools +import logging +import mimetypes +import os +import posixpath +import re +import sys +from collections import namedtuple + +from pip._vendor import html5lib, requests, six +from pip._vendor.distlib.compat import unescape +from pip._vendor.packaging import specifiers +from pip._vendor.packaging.utils import canonicalize_name +from pip._vendor.packaging.version import parse as parse_version +from pip._vendor.requests.exceptions import RetryError, SSLError +from pip._vendor.six.moves.urllib import parse as urllib_parse +from pip._vendor.six.moves.urllib import request as urllib_request + +from pip._internal.download import HAS_TLS, is_url, path_to_url, url_to_path +from pip._internal.exceptions import ( + BestVersionAlreadyInstalled, DistributionNotFound, InvalidWheelFilename, + UnsupportedWheel, +) +from pip._internal.models.candidate import InstallationCandidate +from pip._internal.models.format_control import FormatControl +from pip._internal.models.index import PyPI +from pip._internal.models.link import Link +from pip._internal.pep425tags import get_supported +from pip._internal.utils.compat import ipaddress +from pip._internal.utils.logging import indent_log +from pip._internal.utils.misc import ( + ARCHIVE_EXTENSIONS, SUPPORTED_EXTENSIONS, WHEEL_EXTENSION, normalize_path, + redact_password_from_url, +) +from pip._internal.utils.packaging import check_requires_python +from pip._internal.utils.typing import MYPY_CHECK_RUNNING +from pip._internal.wheel import Wheel + +if MYPY_CHECK_RUNNING: + from logging import Logger # noqa: F401 + from typing import ( # noqa: F401 + Tuple, Optional, Any, List, Union, Callable, Set, Sequence, + Iterable, MutableMapping + ) + from pip._vendor.packaging.version import _BaseVersion # noqa: F401 + from pip._vendor.requests import Response # noqa: F401 + from pip._internal.req import InstallRequirement # noqa: F401 + from pip._internal.download import PipSession # noqa: F401 + + SecureOrigin = Tuple[str, str, Optional[str]] + BuildTag = Tuple[Any, ...] # either emply tuple or Tuple[int, str] + CandidateSortingKey = Tuple[int, _BaseVersion, BuildTag, Optional[int]] + +__all__ = ['FormatControl', 'PackageFinder'] + + +SECURE_ORIGINS = [ + # protocol, hostname, port + # Taken from Chrome's list of secure origins (See: http://bit.ly/1qrySKC) + ("https", "*", "*"), + ("*", "localhost", "*"), + ("*", "127.0.0.0/8", "*"), + ("*", "::1/128", "*"), + ("file", "*", None), + # ssh is always secure. + ("ssh", "*", "*"), +] # type: List[SecureOrigin] + + +logger = logging.getLogger(__name__) + + +def _match_vcs_scheme(url): + # type: (str) -> Optional[str] + """Look for VCS schemes in the URL. + + Returns the matched VCS scheme, or None if there's no match. + """ + from pip._internal.vcs import VcsSupport + for scheme in VcsSupport.schemes: + if url.lower().startswith(scheme) and url[len(scheme)] in '+:': + return scheme + return None + + +def _is_url_like_archive(url): + # type: (str) -> bool + """Return whether the URL looks like an archive. + """ + filename = Link(url).filename + for bad_ext in ARCHIVE_EXTENSIONS: + if filename.endswith(bad_ext): + return True + return False + + +class _NotHTML(Exception): + def __init__(self, content_type, request_desc): + # type: (str, str) -> None + super(_NotHTML, self).__init__(content_type, request_desc) + self.content_type = content_type + self.request_desc = request_desc + + +def _ensure_html_header(response): + # type: (Response) -> None + """Check the Content-Type header to ensure the response contains HTML. + + Raises `_NotHTML` if the content type is not text/html. + """ + content_type = response.headers.get("Content-Type", "") + if not content_type.lower().startswith("text/html"): + raise _NotHTML(content_type, response.request.method) + + +class _NotHTTP(Exception): + pass + + +def _ensure_html_response(url, session): + # type: (str, PipSession) -> None + """Send a HEAD request to the URL, and ensure the response contains HTML. + + Raises `_NotHTTP` if the URL is not available for a HEAD request, or + `_NotHTML` if the content type is not text/html. + """ + scheme, netloc, path, query, fragment = urllib_parse.urlsplit(url) + if scheme not in {'http', 'https'}: + raise _NotHTTP() + + resp = session.head(url, allow_redirects=True) + resp.raise_for_status() + + _ensure_html_header(resp) + + +def _get_html_response(url, session): + # type: (str, PipSession) -> Response + """Access an HTML page with GET, and return the response. + + This consists of three parts: + + 1. If the URL looks suspiciously like an archive, send a HEAD first to + check the Content-Type is HTML, to avoid downloading a large file. + Raise `_NotHTTP` if the content type cannot be determined, or + `_NotHTML` if it is not HTML. + 2. Actually perform the request. Raise HTTP exceptions on network failures. + 3. Check the Content-Type header to make sure we got HTML, and raise + `_NotHTML` otherwise. + """ + if _is_url_like_archive(url): + _ensure_html_response(url, session=session) + + logger.debug('Getting page %s', url) + + resp = session.get( + url, + headers={ + "Accept": "text/html", + # We don't want to blindly returned cached data for + # /simple/, because authors generally expecting that + # twine upload && pip install will function, but if + # they've done a pip install in the last ~10 minutes + # it won't. Thus by setting this to zero we will not + # blindly use any cached data, however the benefit of + # using max-age=0 instead of no-cache, is that we will + # still support conditional requests, so we will still + # minimize traffic sent in cases where the page hasn't + # changed at all, we will just always incur the round + # trip for the conditional GET now instead of only + # once per 10 minutes. + # For more information, please see pypa/pip#5670. + "Cache-Control": "max-age=0", + }, + ) + resp.raise_for_status() + + # The check for archives above only works if the url ends with + # something that looks like an archive. However that is not a + # requirement of an url. Unless we issue a HEAD request on every + # url we cannot know ahead of time for sure if something is HTML + # or not. However we can check after we've downloaded it. + _ensure_html_header(resp) + + return resp + + +def _handle_get_page_fail( + link, # type: Link + reason, # type: Union[str, Exception] + meth=None # type: Optional[Callable[..., None]] +): + # type: (...) -> None + if meth is None: + meth = logger.debug + meth("Could not fetch URL %s: %s - skipping", link, reason) + + +def _get_html_page(link, session=None): + # type: (Link, Optional[PipSession]) -> Optional[HTMLPage] + if session is None: + raise TypeError( + "_get_html_page() missing 1 required keyword argument: 'session'" + ) + + url = link.url.split('#', 1)[0] + + # Check for VCS schemes that do not support lookup as web pages. + vcs_scheme = _match_vcs_scheme(url) + if vcs_scheme: + logger.debug('Cannot look at %s URL %s', vcs_scheme, link) + return None + + # Tack index.html onto file:// URLs that point to directories + scheme, _, path, _, _, _ = urllib_parse.urlparse(url) + if (scheme == 'file' and os.path.isdir(urllib_request.url2pathname(path))): + # add trailing slash if not present so urljoin doesn't trim + # final segment + if not url.endswith('/'): + url += '/' + url = urllib_parse.urljoin(url, 'index.html') + logger.debug(' file: URL is directory, getting %s', url) + + try: + resp = _get_html_response(url, session=session) + except _NotHTTP as exc: + logger.debug( + 'Skipping page %s because it looks like an archive, and cannot ' + 'be checked by HEAD.', link, + ) + except _NotHTML as exc: + logger.debug( + 'Skipping page %s because the %s request got Content-Type: %s', + link, exc.request_desc, exc.content_type, + ) + except requests.HTTPError as exc: + _handle_get_page_fail(link, exc) + except RetryError as exc: + _handle_get_page_fail(link, exc) + except SSLError as exc: + reason = "There was a problem confirming the ssl certificate: " + reason += str(exc) + _handle_get_page_fail(link, reason, meth=logger.info) + except requests.ConnectionError as exc: + _handle_get_page_fail(link, "connection error: %s" % exc) + except requests.Timeout: + _handle_get_page_fail(link, "timed out") + else: + return HTMLPage(resp.content, resp.url, resp.headers) + return None + + +class PackageFinder(object): + """This finds packages. + + This is meant to match easy_install's technique for looking for + packages, by reading pages and looking for appropriate links. + """ + + def __init__( + self, + find_links, # type: List[str] + index_urls, # type: List[str] + allow_all_prereleases=False, # type: bool + trusted_hosts=None, # type: Optional[Iterable[str]] + session=None, # type: Optional[PipSession] + format_control=None, # type: Optional[FormatControl] + platform=None, # type: Optional[str] + versions=None, # type: Optional[List[str]] + abi=None, # type: Optional[str] + implementation=None, # type: Optional[str] + prefer_binary=False # type: bool + ): + # type: (...) -> None + """Create a PackageFinder. + + :param format_control: A FormatControl object or None. Used to control + the selection of source packages / binary packages when consulting + the index and links. + :param platform: A string or None. If None, searches for packages + that are supported by the current system. Otherwise, will find + packages that can be built on the platform passed in. These + packages will only be downloaded for distribution: they will + not be built locally. + :param versions: A list of strings or None. This is passed directly + to pep425tags.py in the get_supported() method. + :param abi: A string or None. This is passed directly + to pep425tags.py in the get_supported() method. + :param implementation: A string or None. This is passed directly + to pep425tags.py in the get_supported() method. + """ + if session is None: + raise TypeError( + "PackageFinder() missing 1 required keyword argument: " + "'session'" + ) + + # Build find_links. If an argument starts with ~, it may be + # a local file relative to a home directory. So try normalizing + # it and if it exists, use the normalized version. + # This is deliberately conservative - it might be fine just to + # blindly normalize anything starting with a ~... + self.find_links = [] # type: List[str] + for link in find_links: + if link.startswith('~'): + new_link = normalize_path(link) + if os.path.exists(new_link): + link = new_link + self.find_links.append(link) + + self.index_urls = index_urls + + # These are boring links that have already been logged somehow: + self.logged_links = set() # type: Set[Link] + + self.format_control = format_control or FormatControl(set(), set()) + + # Domains that we won't emit warnings for when not using HTTPS + self.secure_origins = [ + ("*", host, "*") + for host in (trusted_hosts if trusted_hosts else []) + ] # type: List[SecureOrigin] + + # Do we want to allow _all_ pre-releases? + self.allow_all_prereleases = allow_all_prereleases + + # The Session we'll use to make requests + self.session = session + + # The valid tags to check potential found wheel candidates against + self.valid_tags = get_supported( + versions=versions, + platform=platform, + abi=abi, + impl=implementation, + ) + + # Do we prefer old, but valid, binary dist over new source dist + self.prefer_binary = prefer_binary + + # If we don't have TLS enabled, then WARN if anyplace we're looking + # relies on TLS. + if not HAS_TLS: + for link in itertools.chain(self.index_urls, self.find_links): + parsed = urllib_parse.urlparse(link) + if parsed.scheme == "https": + logger.warning( + "pip is configured with locations that require " + "TLS/SSL, however the ssl module in Python is not " + "available." + ) + break + + def get_formatted_locations(self): + # type: () -> str + lines = [] + if self.index_urls and self.index_urls != [PyPI.simple_url]: + lines.append( + "Looking in indexes: {}".format(", ".join( + redact_password_from_url(url) for url in self.index_urls)) + ) + if self.find_links: + lines.append( + "Looking in links: {}".format(", ".join(self.find_links)) + ) + return "\n".join(lines) + + @staticmethod + def _sort_locations(locations, expand_dir=False): + # type: (Sequence[str], bool) -> Tuple[List[str], List[str]] + """ + Sort locations into "files" (archives) and "urls", and return + a pair of lists (files,urls) + """ + files = [] + urls = [] + + # puts the url for the given file path into the appropriate list + def sort_path(path): + url = path_to_url(path) + if mimetypes.guess_type(url, strict=False)[0] == 'text/html': + urls.append(url) + else: + files.append(url) + + for url in locations: + + is_local_path = os.path.exists(url) + is_file_url = url.startswith('file:') + + if is_local_path or is_file_url: + if is_local_path: + path = url + else: + path = url_to_path(url) + if os.path.isdir(path): + if expand_dir: + path = os.path.realpath(path) + for item in os.listdir(path): + sort_path(os.path.join(path, item)) + elif is_file_url: + urls.append(url) + else: + logger.warning( + "Path '{0}' is ignored: " + "it is a directory.".format(path), + ) + elif os.path.isfile(path): + sort_path(path) + else: + logger.warning( + "Url '%s' is ignored: it is neither a file " + "nor a directory.", url, + ) + elif is_url(url): + # Only add url with clear scheme + urls.append(url) + else: + logger.warning( + "Url '%s' is ignored. It is either a non-existing " + "path or lacks a specific scheme.", url, + ) + + return files, urls + + def _candidate_sort_key(self, candidate): + # type: (InstallationCandidate) -> CandidateSortingKey + """ + Function used to generate link sort key for link tuples. + The greater the return value, the more preferred it is. + If not finding wheels, then sorted by version only. + If finding wheels, then the sort order is by version, then: + 1. existing installs + 2. wheels ordered via Wheel.support_index_min(self.valid_tags) + 3. source archives + If prefer_binary was set, then all wheels are sorted above sources. + Note: it was considered to embed this logic into the Link + comparison operators, but then different sdist links + with the same version, would have to be considered equal + """ + support_num = len(self.valid_tags) + build_tag = tuple() # type: BuildTag + binary_preference = 0 + if candidate.location.is_wheel: + # can raise InvalidWheelFilename + wheel = Wheel(candidate.location.filename) + if not wheel.supported(self.valid_tags): + raise UnsupportedWheel( + "%s is not a supported wheel for this platform. It " + "can't be sorted." % wheel.filename + ) + if self.prefer_binary: + binary_preference = 1 + pri = -(wheel.support_index_min(self.valid_tags)) + if wheel.build_tag is not None: + match = re.match(r'^(\d+)(.*)$', wheel.build_tag) + build_tag_groups = match.groups() + build_tag = (int(build_tag_groups[0]), build_tag_groups[1]) + else: # sdist + pri = -(support_num) + return (binary_preference, candidate.version, build_tag, pri) + + def _validate_secure_origin(self, logger, location): + # type: (Logger, Link) -> bool + # Determine if this url used a secure transport mechanism + parsed = urllib_parse.urlparse(str(location)) + origin = (parsed.scheme, parsed.hostname, parsed.port) + + # The protocol to use to see if the protocol matches. + # Don't count the repository type as part of the protocol: in + # cases such as "git+ssh", only use "ssh". (I.e., Only verify against + # the last scheme.) + protocol = origin[0].rsplit('+', 1)[-1] + + # Determine if our origin is a secure origin by looking through our + # hardcoded list of secure origins, as well as any additional ones + # configured on this PackageFinder instance. + for secure_origin in (SECURE_ORIGINS + self.secure_origins): + if protocol != secure_origin[0] and secure_origin[0] != "*": + continue + + try: + # We need to do this decode dance to ensure that we have a + # unicode object, even on Python 2.x. + addr = ipaddress.ip_address( + origin[1] + if ( + isinstance(origin[1], six.text_type) or + origin[1] is None + ) + else origin[1].decode("utf8") + ) + network = ipaddress.ip_network( + secure_origin[1] + if isinstance(secure_origin[1], six.text_type) + # setting secure_origin[1] to proper Union[bytes, str] + # creates problems in other places + else secure_origin[1].decode("utf8") # type: ignore + ) + except ValueError: + # We don't have both a valid address or a valid network, so + # we'll check this origin against hostnames. + if (origin[1] and + origin[1].lower() != secure_origin[1].lower() and + secure_origin[1] != "*"): + continue + else: + # We have a valid address and network, so see if the address + # is contained within the network. + if addr not in network: + continue + + # Check to see if the port patches + if (origin[2] != secure_origin[2] and + secure_origin[2] != "*" and + secure_origin[2] is not None): + continue + + # If we've gotten here, then this origin matches the current + # secure origin and we should return True + return True + + # If we've gotten to this point, then the origin isn't secure and we + # will not accept it as a valid location to search. We will however + # log a warning that we are ignoring it. + logger.warning( + "The repository located at %s is not a trusted or secure host and " + "is being ignored. If this repository is available via HTTPS we " + "recommend you use HTTPS instead, otherwise you may silence " + "this warning and allow it anyway with '--trusted-host %s'.", + parsed.hostname, + parsed.hostname, + ) + + return False + + def _get_index_urls_locations(self, project_name): + # type: (str) -> List[str] + """Returns the locations found via self.index_urls + + Checks the url_name on the main (first in the list) index and + use this url_name to produce all locations + """ + + def mkurl_pypi_url(url): + loc = posixpath.join( + url, + urllib_parse.quote(canonicalize_name(project_name))) + # For maximum compatibility with easy_install, ensure the path + # ends in a trailing slash. Although this isn't in the spec + # (and PyPI can handle it without the slash) some other index + # implementations might break if they relied on easy_install's + # behavior. + if not loc.endswith('/'): + loc = loc + '/' + return loc + + return [mkurl_pypi_url(url) for url in self.index_urls] + + def find_all_candidates(self, project_name): + # type: (str) -> List[Optional[InstallationCandidate]] + """Find all available InstallationCandidate for project_name + + This checks index_urls and find_links. + All versions found are returned as an InstallationCandidate list. + + See _link_package_versions for details on which files are accepted + """ + index_locations = self._get_index_urls_locations(project_name) + index_file_loc, index_url_loc = self._sort_locations(index_locations) + fl_file_loc, fl_url_loc = self._sort_locations( + self.find_links, expand_dir=True, + ) + + file_locations = (Link(url) for url in itertools.chain( + index_file_loc, fl_file_loc, + )) + + # We trust every url that the user has given us whether it was given + # via --index-url or --find-links. + # We want to filter out any thing which does not have a secure origin. + url_locations = [ + link for link in itertools.chain( + (Link(url) for url in index_url_loc), + (Link(url) for url in fl_url_loc), + ) + if self._validate_secure_origin(logger, link) + ] + + logger.debug('%d location(s) to search for versions of %s:', + len(url_locations), project_name) + + for location in url_locations: + logger.debug('* %s', location) + + canonical_name = canonicalize_name(project_name) + formats = self.format_control.get_allowed_formats(canonical_name) + search = Search(project_name, canonical_name, formats) + find_links_versions = self._package_versions( + # We trust every directly linked archive in find_links + (Link(url, '-f') for url in self.find_links), + search + ) + + page_versions = [] + for page in self._get_pages(url_locations, project_name): + logger.debug('Analyzing links from page %s', page.url) + with indent_log(): + page_versions.extend( + self._package_versions(page.iter_links(), search) + ) + + file_versions = self._package_versions(file_locations, search) + if file_versions: + file_versions.sort(reverse=True) + logger.debug( + 'Local files found: %s', + ', '.join([ + url_to_path(candidate.location.url) + for candidate in file_versions + ]) + ) + + # This is an intentional priority ordering + return file_versions + find_links_versions + page_versions + + def find_requirement(self, req, upgrade): + # type: (InstallRequirement, bool) -> Optional[Link] + """Try to find a Link matching req + + Expects req, an InstallRequirement and upgrade, a boolean + Returns a Link if found, + Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise + """ + all_candidates = self.find_all_candidates(req.name) + + # Filter out anything which doesn't match our specifier + compatible_versions = set( + req.specifier.filter( + # We turn the version object into a str here because otherwise + # when we're debundled but setuptools isn't, Python will see + # packaging.version.Version and + # pkg_resources._vendor.packaging.version.Version as different + # types. This way we'll use a str as a common data interchange + # format. If we stop using the pkg_resources provided specifier + # and start using our own, we can drop the cast to str(). + [str(c.version) for c in all_candidates], + prereleases=( + self.allow_all_prereleases + if self.allow_all_prereleases else None + ), + ) + ) + applicable_candidates = [ + # Again, converting to str to deal with debundling. + c for c in all_candidates if str(c.version) in compatible_versions + ] + + if applicable_candidates: + best_candidate = max(applicable_candidates, + key=self._candidate_sort_key) + else: + best_candidate = None + + if req.satisfied_by is not None: + installed_version = parse_version(req.satisfied_by.version) + else: + installed_version = None + + if installed_version is None and best_candidate is None: + logger.critical( + 'Could not find a version that satisfies the requirement %s ' + '(from versions: %s)', + req, + ', '.join( + sorted( + {str(c.version) for c in all_candidates}, + key=parse_version, + ) + ) + ) + + raise DistributionNotFound( + 'No matching distribution found for %s' % req + ) + + best_installed = False + if installed_version and ( + best_candidate is None or + best_candidate.version <= installed_version): + best_installed = True + + if not upgrade and installed_version is not None: + if best_installed: + logger.debug( + 'Existing installed version (%s) is most up-to-date and ' + 'satisfies requirement', + installed_version, + ) + else: + logger.debug( + 'Existing installed version (%s) satisfies requirement ' + '(most up-to-date version is %s)', + installed_version, + best_candidate.version, + ) + return None + + if best_installed: + # We have an existing version, and its the best version + logger.debug( + 'Installed version (%s) is most up-to-date (past versions: ' + '%s)', + installed_version, + ', '.join(sorted(compatible_versions, key=parse_version)) or + "none", + ) + raise BestVersionAlreadyInstalled + + logger.debug( + 'Using version %s (newest of versions: %s)', + best_candidate.version, + ', '.join(sorted(compatible_versions, key=parse_version)) + ) + return best_candidate.location + + def _get_pages(self, locations, project_name): + # type: (Iterable[Link], str) -> Iterable[HTMLPage] + """ + Yields (page, page_url) from the given locations, skipping + locations that have errors. + """ + seen = set() # type: Set[Link] + for location in locations: + if location in seen: + continue + seen.add(location) + + page = _get_html_page(location, session=self.session) + if page is None: + continue + + yield page + + _py_version_re = re.compile(r'-py([123]\.?[0-9]?)$') + + def _sort_links(self, links): + # type: (Iterable[Link]) -> List[Link] + """ + Returns elements of links in order, non-egg links first, egg links + second, while eliminating duplicates + """ + eggs, no_eggs = [], [] + seen = set() # type: Set[Link] + for link in links: + if link not in seen: + seen.add(link) + if link.egg_fragment: + eggs.append(link) + else: + no_eggs.append(link) + return no_eggs + eggs + + def _package_versions( + self, + links, # type: Iterable[Link] + search # type: Search + ): + # type: (...) -> List[Optional[InstallationCandidate]] + result = [] + for link in self._sort_links(links): + v = self._link_package_versions(link, search) + if v is not None: + result.append(v) + return result + + def _log_skipped_link(self, link, reason): + # type: (Link, str) -> None + if link not in self.logged_links: + logger.debug('Skipping link %s; %s', link, reason) + self.logged_links.add(link) + + def _link_package_versions(self, link, search): + # type: (Link, Search) -> Optional[InstallationCandidate] + """Return an InstallationCandidate or None""" + version = None + if link.egg_fragment: + egg_info = link.egg_fragment + ext = link.ext + else: + egg_info, ext = link.splitext() + if not ext: + self._log_skipped_link(link, 'not a file') + return None + if ext not in SUPPORTED_EXTENSIONS: + self._log_skipped_link( + link, 'unsupported archive format: %s' % ext, + ) + return None + if "binary" not in search.formats and ext == WHEEL_EXTENSION: + self._log_skipped_link( + link, 'No binaries permitted for %s' % search.supplied, + ) + return None + if "macosx10" in link.path and ext == '.zip': + self._log_skipped_link(link, 'macosx10 one') + return None + if ext == WHEEL_EXTENSION: + try: + wheel = Wheel(link.filename) + except InvalidWheelFilename: + self._log_skipped_link(link, 'invalid wheel filename') + return None + if canonicalize_name(wheel.name) != search.canonical: + self._log_skipped_link( + link, 'wrong project name (not %s)' % search.supplied) + return None + + if not wheel.supported(self.valid_tags): + self._log_skipped_link( + link, 'it is not compatible with this Python') + return None + + version = wheel.version + + # This should be up by the search.ok_binary check, but see issue 2700. + if "source" not in search.formats and ext != WHEEL_EXTENSION: + self._log_skipped_link( + link, 'No sources permitted for %s' % search.supplied, + ) + return None + + if not version: + version = _egg_info_matches(egg_info, search.canonical) + if not version: + self._log_skipped_link( + link, 'Missing project version for %s' % search.supplied) + return None + + match = self._py_version_re.search(version) + if match: + version = version[:match.start()] + py_version = match.group(1) + if py_version != sys.version[:3]: + self._log_skipped_link( + link, 'Python version is incorrect') + return None + try: + support_this_python = check_requires_python(link.requires_python) + except specifiers.InvalidSpecifier: + logger.debug("Package %s has an invalid Requires-Python entry: %s", + link.filename, link.requires_python) + support_this_python = True + + if not support_this_python: + logger.debug("The package %s is incompatible with the python " + "version in use. Acceptable python versions are: %s", + link, link.requires_python) + return None + logger.debug('Found link %s, version: %s', link, version) + + return InstallationCandidate(search.supplied, version, link) + + +def _find_name_version_sep(egg_info, canonical_name): + # type: (str, str) -> int + """Find the separator's index based on the package's canonical name. + + `egg_info` must be an egg info string for the given package, and + `canonical_name` must be the package's canonical name. + + This function is needed since the canonicalized name does not necessarily + have the same length as the egg info's name part. An example:: + + >>> egg_info = 'foo__bar-1.0' + >>> canonical_name = 'foo-bar' + >>> _find_name_version_sep(egg_info, canonical_name) + 8 + """ + # Project name and version must be separated by one single dash. Find all + # occurrences of dashes; if the string in front of it matches the canonical + # name, this is the one separating the name and version parts. + for i, c in enumerate(egg_info): + if c != "-": + continue + if canonicalize_name(egg_info[:i]) == canonical_name: + return i + raise ValueError("{} does not match {}".format(egg_info, canonical_name)) + + +def _egg_info_matches(egg_info, canonical_name): + # type: (str, str) -> Optional[str] + """Pull the version part out of a string. + + :param egg_info: The string to parse. E.g. foo-2.1 + :param canonical_name: The canonicalized name of the package this + belongs to. + """ + try: + version_start = _find_name_version_sep(egg_info, canonical_name) + 1 + except ValueError: + return None + version = egg_info[version_start:] + if not version: + return None + return version + + +def _determine_base_url(document, page_url): + """Determine the HTML document's base URL. + + This looks for a ```` tag in the HTML document. If present, its href + attribute denotes the base URL of anchor tags in the document. If there is + no such tag (or if it does not have a valid href attribute), the HTML + file's URL is used as the base URL. + + :param document: An HTML document representation. The current + implementation expects the result of ``html5lib.parse()``. + :param page_url: The URL of the HTML document. + """ + for base in document.findall(".//base"): + href = base.get("href") + if href is not None: + return href + return page_url + + +def _get_encoding_from_headers(headers): + """Determine if we have any encoding information in our headers. + """ + if headers and "Content-Type" in headers: + content_type, params = cgi.parse_header(headers["Content-Type"]) + if "charset" in params: + return params['charset'] + return None + + +_CLEAN_LINK_RE = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I) + + +def _clean_link(url): + # type: (str) -> str + """Makes sure a link is fully encoded. That is, if a ' ' shows up in + the link, it will be rewritten to %20 (while not over-quoting + % or other characters).""" + return _CLEAN_LINK_RE.sub(lambda match: '%%%2x' % ord(match.group(0)), url) + + +class HTMLPage(object): + """Represents one page, along with its URL""" + + def __init__(self, content, url, headers=None): + # type: (bytes, str, MutableMapping[str, str]) -> None + self.content = content + self.url = url + self.headers = headers + + def __str__(self): + return redact_password_from_url(self.url) + + def iter_links(self): + # type: () -> Iterable[Link] + """Yields all links in the page""" + document = html5lib.parse( + self.content, + transport_encoding=_get_encoding_from_headers(self.headers), + namespaceHTMLElements=False, + ) + base_url = _determine_base_url(document, self.url) + for anchor in document.findall(".//a"): + if anchor.get("href"): + href = anchor.get("href") + url = _clean_link(urllib_parse.urljoin(base_url, href)) + pyrequire = anchor.get('data-requires-python') + pyrequire = unescape(pyrequire) if pyrequire else None + yield Link(url, self.url, requires_python=pyrequire) + + +Search = namedtuple('Search', 'supplied canonical formats') +"""Capture key aspects of a search. + +:attribute supplied: The user supplied package. +:attribute canonical: The canonical package name. +:attribute formats: The formats allowed for this package. Should be a set + with 'binary' or 'source' or both in it. +""" diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/index.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/index.pyc new file mode 100644 index 0000000..df17aa7 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/index.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/locations.py b/project/venv/lib/python2.7/site-packages/pip/_internal/locations.py new file mode 100644 index 0000000..c6e2a3e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/locations.py @@ -0,0 +1,211 @@ +"""Locations where we look for configs, install stuff, etc""" +from __future__ import absolute_import + +import os +import os.path +import platform +import site +import sys +import sysconfig +from distutils import sysconfig as distutils_sysconfig +from distutils.command.install import SCHEME_KEYS # type: ignore + +from pip._internal.utils import appdirs +from pip._internal.utils.compat import WINDOWS, expanduser +from pip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + from typing import Any, Union, Dict, List, Optional # noqa: F401 + + +# Application Directories +USER_CACHE_DIR = appdirs.user_cache_dir("pip") + + +DELETE_MARKER_MESSAGE = '''\ +This file is placed here by pip to indicate the source was put +here by pip. + +Once this package is successfully installed this source code will be +deleted (unless you remove this file). +''' +PIP_DELETE_MARKER_FILENAME = 'pip-delete-this-directory.txt' + + +def write_delete_marker_file(directory): + # type: (str) -> None + """ + Write the pip delete marker file into this directory. + """ + filepath = os.path.join(directory, PIP_DELETE_MARKER_FILENAME) + with open(filepath, 'w') as marker_fp: + marker_fp.write(DELETE_MARKER_MESSAGE) + + +def running_under_virtualenv(): + # type: () -> bool + """ + Return True if we're running inside a virtualenv, False otherwise. + + """ + if hasattr(sys, 'real_prefix'): + return True + elif sys.prefix != getattr(sys, "base_prefix", sys.prefix): + return True + + return False + + +def virtualenv_no_global(): + # type: () -> bool + """ + Return True if in a venv and no system site packages. + """ + # this mirrors the logic in virtualenv.py for locating the + # no-global-site-packages.txt file + site_mod_dir = os.path.dirname(os.path.abspath(site.__file__)) + no_global_file = os.path.join(site_mod_dir, 'no-global-site-packages.txt') + if running_under_virtualenv() and os.path.isfile(no_global_file): + return True + else: + return False + + +if running_under_virtualenv(): + src_prefix = os.path.join(sys.prefix, 'src') +else: + # FIXME: keep src in cwd for now (it is not a temporary folder) + try: + src_prefix = os.path.join(os.getcwd(), 'src') + except OSError: + # In case the current working directory has been renamed or deleted + sys.exit( + "The folder you are executing pip from can no longer be found." + ) + +# under macOS + virtualenv sys.prefix is not properly resolved +# it is something like /path/to/python/bin/.. +# Note: using realpath due to tmp dirs on OSX being symlinks +src_prefix = os.path.abspath(src_prefix) + +# FIXME doesn't account for venv linked to global site-packages + +site_packages = sysconfig.get_path("purelib") # type: Optional[str] + +# This is because of a bug in PyPy's sysconfig module, see +# https://bitbucket.org/pypy/pypy/issues/2506/sysconfig-returns-incorrect-paths +# for more information. +if platform.python_implementation().lower() == "pypy": + site_packages = distutils_sysconfig.get_python_lib() +try: + # Use getusersitepackages if this is present, as it ensures that the + # value is initialised properly. + user_site = site.getusersitepackages() +except AttributeError: + user_site = site.USER_SITE +user_dir = expanduser('~') +if WINDOWS: + bin_py = os.path.join(sys.prefix, 'Scripts') + bin_user = os.path.join(user_site, 'Scripts') + # buildout uses 'bin' on Windows too? + if not os.path.exists(bin_py): + bin_py = os.path.join(sys.prefix, 'bin') + bin_user = os.path.join(user_site, 'bin') + + config_basename = 'pip.ini' + + legacy_storage_dir = os.path.join(user_dir, 'pip') + legacy_config_file = os.path.join( + legacy_storage_dir, + config_basename, + ) +else: + bin_py = os.path.join(sys.prefix, 'bin') + bin_user = os.path.join(user_site, 'bin') + + config_basename = 'pip.conf' + + legacy_storage_dir = os.path.join(user_dir, '.pip') + legacy_config_file = os.path.join( + legacy_storage_dir, + config_basename, + ) + # Forcing to use /usr/local/bin for standard macOS framework installs + # Also log to ~/Library/Logs/ for use with the Console.app log viewer + if sys.platform[:6] == 'darwin' and sys.prefix[:16] == '/System/Library/': + bin_py = '/usr/local/bin' + +site_config_files = [ + os.path.join(path, config_basename) + for path in appdirs.site_config_dirs('pip') +] + +venv_config_file = os.path.join(sys.prefix, config_basename) +new_config_file = os.path.join(appdirs.user_config_dir("pip"), config_basename) + + +def distutils_scheme(dist_name, user=False, home=None, root=None, + isolated=False, prefix=None): + # type:(str, bool, str, str, bool, str) -> dict + """ + Return a distutils install scheme + """ + from distutils.dist import Distribution + + scheme = {} + + if isolated: + extra_dist_args = {"script_args": ["--no-user-cfg"]} + else: + extra_dist_args = {} + dist_args = {'name': dist_name} # type: Dict[str, Union[str, List[str]]] + dist_args.update(extra_dist_args) + + d = Distribution(dist_args) + # Ignoring, typeshed issue reported python/typeshed/issues/2567 + d.parse_config_files() + # NOTE: Ignoring type since mypy can't find attributes on 'Command' + i = d.get_command_obj('install', create=True) # type: Any + assert i is not None + # NOTE: setting user or home has the side-effect of creating the home dir + # or user base for installations during finalize_options() + # ideally, we'd prefer a scheme class that has no side-effects. + assert not (user and prefix), "user={} prefix={}".format(user, prefix) + i.user = user or i.user + if user: + i.prefix = "" + i.prefix = prefix or i.prefix + i.home = home or i.home + i.root = root or i.root + i.finalize_options() + for key in SCHEME_KEYS: + scheme[key] = getattr(i, 'install_' + key) + + # install_lib specified in setup.cfg should install *everything* + # into there (i.e. it takes precedence over both purelib and + # platlib). Note, i.install_lib is *always* set after + # finalize_options(); we only want to override here if the user + # has explicitly requested it hence going back to the config + + # Ignoring, typeshed issue reported python/typeshed/issues/2567 + if 'install_lib' in d.get_option_dict('install'): # type: ignore + scheme.update(dict(purelib=i.install_lib, platlib=i.install_lib)) + + if running_under_virtualenv(): + scheme['headers'] = os.path.join( + sys.prefix, + 'include', + 'site', + 'python' + sys.version[:3], + dist_name, + ) + + if root is not None: + path_no_drive = os.path.splitdrive( + os.path.abspath(scheme["headers"]))[1] + scheme["headers"] = os.path.join( + root, + path_no_drive[1:], + ) + + return scheme diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/locations.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/locations.pyc new file mode 100644 index 0000000..91acb3f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/locations.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/models/__init__.py b/project/venv/lib/python2.7/site-packages/pip/_internal/models/__init__.py new file mode 100644 index 0000000..7855226 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/models/__init__.py @@ -0,0 +1,2 @@ +"""A package that contains models that represent entities. +""" diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/models/__init__.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/models/__init__.pyc new file mode 100644 index 0000000..61779c3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/models/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/models/candidate.py b/project/venv/lib/python2.7/site-packages/pip/_internal/models/candidate.py new file mode 100644 index 0000000..4475458 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/models/candidate.py @@ -0,0 +1,31 @@ +from pip._vendor.packaging.version import parse as parse_version + +from pip._internal.utils.models import KeyBasedCompareMixin +from pip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + from pip._vendor.packaging.version import _BaseVersion # noqa: F401 + from pip._internal.models.link import Link # noqa: F401 + from typing import Any, Union # noqa: F401 + + +class InstallationCandidate(KeyBasedCompareMixin): + """Represents a potential "candidate" for installation. + """ + + def __init__(self, project, version, location): + # type: (Any, str, Link) -> None + self.project = project + self.version = parse_version(version) # type: _BaseVersion + self.location = location + + super(InstallationCandidate, self).__init__( + key=(self.project, self.version, self.location), + defining_class=InstallationCandidate + ) + + def __repr__(self): + # type: () -> str + return "".format( + self.project, self.version, self.location, + ) diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/models/candidate.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/models/candidate.pyc new file mode 100644 index 0000000..07b877e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/models/candidate.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/models/format_control.py b/project/venv/lib/python2.7/site-packages/pip/_internal/models/format_control.py new file mode 100644 index 0000000..971a391 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/models/format_control.py @@ -0,0 +1,73 @@ +from pip._vendor.packaging.utils import canonicalize_name + +from pip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + from typing import Optional, Set, FrozenSet # noqa: F401 + + +class FormatControl(object): + """Helper for managing formats from which a package can be installed. + """ + + def __init__(self, no_binary=None, only_binary=None): + # type: (Optional[Set], Optional[Set]) -> None + if no_binary is None: + no_binary = set() + if only_binary is None: + only_binary = set() + + self.no_binary = no_binary + self.only_binary = only_binary + + def __eq__(self, other): + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not self.__eq__(other) + + def __repr__(self): + return "{}({}, {})".format( + self.__class__.__name__, + self.no_binary, + self.only_binary + ) + + @staticmethod + def handle_mutual_excludes(value, target, other): + # type: (str, Optional[Set], Optional[Set]) -> None + new = value.split(',') + while ':all:' in new: + other.clear() + target.clear() + target.add(':all:') + del new[:new.index(':all:') + 1] + # Without a none, we want to discard everything as :all: covers it + if ':none:' not in new: + return + for name in new: + if name == ':none:': + target.clear() + continue + name = canonicalize_name(name) + other.discard(name) + target.add(name) + + def get_allowed_formats(self, canonical_name): + # type: (str) -> FrozenSet + result = {"binary", "source"} + if canonical_name in self.only_binary: + result.discard('source') + elif canonical_name in self.no_binary: + result.discard('binary') + elif ':all:' in self.only_binary: + result.discard('source') + elif ':all:' in self.no_binary: + result.discard('binary') + return frozenset(result) + + def disallow_binaries(self): + # type: () -> None + self.handle_mutual_excludes( + ':all:', self.no_binary, self.only_binary, + ) diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/models/format_control.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/models/format_control.pyc new file mode 100644 index 0000000..742562b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/models/format_control.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/models/index.py b/project/venv/lib/python2.7/site-packages/pip/_internal/models/index.py new file mode 100644 index 0000000..ead1efb --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/models/index.py @@ -0,0 +1,31 @@ +from pip._vendor.six.moves.urllib import parse as urllib_parse + + +class PackageIndex(object): + """Represents a Package Index and provides easier access to endpoints + """ + + def __init__(self, url, file_storage_domain): + # type: (str, str) -> None + super(PackageIndex, self).__init__() + self.url = url + self.netloc = urllib_parse.urlsplit(url).netloc + self.simple_url = self._url_for_path('simple') + self.pypi_url = self._url_for_path('pypi') + + # This is part of a temporary hack used to block installs of PyPI + # packages which depend on external urls only necessary until PyPI can + # block such packages themselves + self.file_storage_domain = file_storage_domain + + def _url_for_path(self, path): + # type: (str) -> str + return urllib_parse.urljoin(self.url, path) + + +PyPI = PackageIndex( + 'https://pypi.org/', file_storage_domain='files.pythonhosted.org' +) +TestPyPI = PackageIndex( + 'https://test.pypi.org/', file_storage_domain='test-files.pythonhosted.org' +) diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/models/index.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/models/index.pyc new file mode 100644 index 0000000..1d8f9f5 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/models/index.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/models/link.py b/project/venv/lib/python2.7/site-packages/pip/_internal/models/link.py new file mode 100644 index 0000000..ad2f93e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/models/link.py @@ -0,0 +1,163 @@ +import posixpath +import re + +from pip._vendor.six.moves.urllib import parse as urllib_parse + +from pip._internal.download import path_to_url +from pip._internal.utils.misc import ( + WHEEL_EXTENSION, redact_password_from_url, splitext, +) +from pip._internal.utils.models import KeyBasedCompareMixin +from pip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + from typing import Optional, Tuple, Union, Text # noqa: F401 + from pip._internal.index import HTMLPage # noqa: F401 + + +class Link(KeyBasedCompareMixin): + """Represents a parsed link from a Package Index's simple URL + """ + + def __init__(self, url, comes_from=None, requires_python=None): + # type: (str, Optional[Union[str, HTMLPage]], Optional[str]) -> None + """ + url: + url of the resource pointed to (href of the link) + comes_from: + instance of HTMLPage where the link was found, or string. + requires_python: + String containing the `Requires-Python` metadata field, specified + in PEP 345. This may be specified by a data-requires-python + attribute in the HTML link tag, as described in PEP 503. + """ + + # url can be a UNC windows share + if url.startswith('\\\\'): + url = path_to_url(url) + + self.url = url + self.comes_from = comes_from + self.requires_python = requires_python if requires_python else None + + super(Link, self).__init__( + key=(self.url), + defining_class=Link + ) + + def __str__(self): + if self.requires_python: + rp = ' (requires-python:%s)' % self.requires_python + else: + rp = '' + if self.comes_from: + return '%s (from %s)%s' % (redact_password_from_url(self.url), + self.comes_from, rp) + else: + return redact_password_from_url(str(self.url)) + + def __repr__(self): + return '' % self + + @property + def filename(self): + # type: () -> str + _, netloc, path, _, _ = urllib_parse.urlsplit(self.url) + name = posixpath.basename(path.rstrip('/')) or netloc + name = urllib_parse.unquote(name) + assert name, ('URL %r produced no filename' % self.url) + return name + + @property + def scheme(self): + # type: () -> str + return urllib_parse.urlsplit(self.url)[0] + + @property + def netloc(self): + # type: () -> str + return urllib_parse.urlsplit(self.url)[1] + + @property + def path(self): + # type: () -> str + return urllib_parse.unquote(urllib_parse.urlsplit(self.url)[2]) + + def splitext(self): + # type: () -> Tuple[str, str] + return splitext(posixpath.basename(self.path.rstrip('/'))) + + @property + def ext(self): + # type: () -> str + return self.splitext()[1] + + @property + def url_without_fragment(self): + # type: () -> str + scheme, netloc, path, query, fragment = urllib_parse.urlsplit(self.url) + return urllib_parse.urlunsplit((scheme, netloc, path, query, None)) + + _egg_fragment_re = re.compile(r'[#&]egg=([^&]*)') + + @property + def egg_fragment(self): + # type: () -> Optional[str] + match = self._egg_fragment_re.search(self.url) + if not match: + return None + return match.group(1) + + _subdirectory_fragment_re = re.compile(r'[#&]subdirectory=([^&]*)') + + @property + def subdirectory_fragment(self): + # type: () -> Optional[str] + match = self._subdirectory_fragment_re.search(self.url) + if not match: + return None + return match.group(1) + + _hash_re = re.compile( + r'(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)' + ) + + @property + def hash(self): + # type: () -> Optional[str] + match = self._hash_re.search(self.url) + if match: + return match.group(2) + return None + + @property + def hash_name(self): + # type: () -> Optional[str] + match = self._hash_re.search(self.url) + if match: + return match.group(1) + return None + + @property + def show_url(self): + # type: () -> Optional[str] + return posixpath.basename(self.url.split('#', 1)[0].split('?', 1)[0]) + + @property + def is_wheel(self): + # type: () -> bool + return self.ext == WHEEL_EXTENSION + + @property + def is_artifact(self): + # type: () -> bool + """ + Determines if this points to an actual artifact (e.g. a tarball) or if + it points to an "abstract" thing like a path or a VCS location. + """ + from pip._internal.vcs import vcs + + if self.scheme in vcs.all_schemes: + return False + + return True diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/models/link.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/models/link.pyc new file mode 100644 index 0000000..2c5f987 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/models/link.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/operations/__init__.py b/project/venv/lib/python2.7/site-packages/pip/_internal/operations/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/operations/__init__.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/operations/__init__.pyc new file mode 100644 index 0000000..f474144 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/operations/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/operations/check.py b/project/venv/lib/python2.7/site-packages/pip/_internal/operations/check.py new file mode 100644 index 0000000..0b56eda --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/operations/check.py @@ -0,0 +1,155 @@ +"""Validation of dependencies of packages +""" + +import logging +from collections import namedtuple + +from pip._vendor.packaging.utils import canonicalize_name +from pip._vendor.pkg_resources import RequirementParseError + +from pip._internal.operations.prepare import make_abstract_dist +from pip._internal.utils.misc import get_installed_distributions +from pip._internal.utils.typing import MYPY_CHECK_RUNNING + +logger = logging.getLogger(__name__) + +if MYPY_CHECK_RUNNING: + from pip._internal.req.req_install import InstallRequirement # noqa: F401 + from typing import ( # noqa: F401 + Any, Callable, Dict, Optional, Set, Tuple, List + ) + + # Shorthands + PackageSet = Dict[str, 'PackageDetails'] + Missing = Tuple[str, Any] + Conflicting = Tuple[str, str, Any] + + MissingDict = Dict[str, List[Missing]] + ConflictingDict = Dict[str, List[Conflicting]] + CheckResult = Tuple[MissingDict, ConflictingDict] + +PackageDetails = namedtuple('PackageDetails', ['version', 'requires']) + + +def create_package_set_from_installed(**kwargs): + # type: (**Any) -> Tuple[PackageSet, bool] + """Converts a list of distributions into a PackageSet. + """ + # Default to using all packages installed on the system + if kwargs == {}: + kwargs = {"local_only": False, "skip": ()} + + package_set = {} + problems = False + for dist in get_installed_distributions(**kwargs): + name = canonicalize_name(dist.project_name) + try: + package_set[name] = PackageDetails(dist.version, dist.requires()) + except RequirementParseError as e: + # Don't crash on broken metadata + logging.warning("Error parsing requirements for %s: %s", name, e) + problems = True + return package_set, problems + + +def check_package_set(package_set, should_ignore=None): + # type: (PackageSet, Optional[Callable[[str], bool]]) -> CheckResult + """Check if a package set is consistent + + If should_ignore is passed, it should be a callable that takes a + package name and returns a boolean. + """ + if should_ignore is None: + def should_ignore(name): + return False + + missing = dict() + conflicting = dict() + + for package_name in package_set: + # Info about dependencies of package_name + missing_deps = set() # type: Set[Missing] + conflicting_deps = set() # type: Set[Conflicting] + + if should_ignore(package_name): + continue + + for req in package_set[package_name].requires: + name = canonicalize_name(req.project_name) # type: str + + # Check if it's missing + if name not in package_set: + missed = True + if req.marker is not None: + missed = req.marker.evaluate() + if missed: + missing_deps.add((name, req)) + continue + + # Check if there's a conflict + version = package_set[name].version # type: str + if not req.specifier.contains(version, prereleases=True): + conflicting_deps.add((name, version, req)) + + if missing_deps: + missing[package_name] = sorted(missing_deps, key=str) + if conflicting_deps: + conflicting[package_name] = sorted(conflicting_deps, key=str) + + return missing, conflicting + + +def check_install_conflicts(to_install): + # type: (List[InstallRequirement]) -> Tuple[PackageSet, CheckResult] + """For checking if the dependency graph would be consistent after \ + installing given requirements + """ + # Start from the current state + package_set, _ = create_package_set_from_installed() + # Install packages + would_be_installed = _simulate_installation_of(to_install, package_set) + + # Only warn about directly-dependent packages; create a whitelist of them + whitelist = _create_whitelist(would_be_installed, package_set) + + return ( + package_set, + check_package_set( + package_set, should_ignore=lambda name: name not in whitelist + ) + ) + + +def _simulate_installation_of(to_install, package_set): + # type: (List[InstallRequirement], PackageSet) -> Set[str] + """Computes the version of packages after installing to_install. + """ + + # Keep track of packages that were installed + installed = set() + + # Modify it as installing requirement_set would (assuming no errors) + for inst_req in to_install: + dist = make_abstract_dist(inst_req).dist() + name = canonicalize_name(dist.key) + package_set[name] = PackageDetails(dist.version, dist.requires()) + + installed.add(name) + + return installed + + +def _create_whitelist(would_be_installed, package_set): + # type: (Set[str], PackageSet) -> Set[str] + packages_affected = set(would_be_installed) + + for package_name in package_set: + if package_name in packages_affected: + continue + + for req in package_set[package_name].requires: + if canonicalize_name(req.name) in packages_affected: + packages_affected.add(package_name) + break + + return packages_affected diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/operations/check.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/operations/check.pyc new file mode 100644 index 0000000..1158c6c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/operations/check.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/operations/freeze.py b/project/venv/lib/python2.7/site-packages/pip/_internal/operations/freeze.py new file mode 100644 index 0000000..388bb73 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/operations/freeze.py @@ -0,0 +1,247 @@ +from __future__ import absolute_import + +import collections +import logging +import os +import re + +from pip._vendor import six +from pip._vendor.packaging.utils import canonicalize_name +from pip._vendor.pkg_resources import RequirementParseError + +from pip._internal.exceptions import BadCommand, InstallationError +from pip._internal.req.constructors import ( + install_req_from_editable, install_req_from_line, +) +from pip._internal.req.req_file import COMMENT_RE +from pip._internal.utils.misc import ( + dist_is_editable, get_installed_distributions, +) +from pip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + from typing import ( # noqa: F401 + Iterator, Optional, List, Container, Set, Dict, Tuple, Iterable, Union + ) + from pip._internal.cache import WheelCache # noqa: F401 + from pip._vendor.pkg_resources import ( # noqa: F401 + Distribution, Requirement + ) + + RequirementInfo = Tuple[Optional[Union[str, Requirement]], bool, List[str]] + + +logger = logging.getLogger(__name__) + + +def freeze( + requirement=None, # type: Optional[List[str]] + find_links=None, # type: Optional[List[str]] + local_only=None, # type: Optional[bool] + user_only=None, # type: Optional[bool] + skip_regex=None, # type: Optional[str] + isolated=False, # type: bool + wheel_cache=None, # type: Optional[WheelCache] + exclude_editable=False, # type: bool + skip=() # type: Container[str] +): + # type: (...) -> Iterator[str] + find_links = find_links or [] + skip_match = None + + if skip_regex: + skip_match = re.compile(skip_regex).search + + for link in find_links: + yield '-f %s' % link + installations = {} # type: Dict[str, FrozenRequirement] + for dist in get_installed_distributions(local_only=local_only, + skip=(), + user_only=user_only): + try: + req = FrozenRequirement.from_dist(dist) + except RequirementParseError: + logger.warning( + "Could not parse requirement: %s", + dist.project_name + ) + continue + if exclude_editable and req.editable: + continue + installations[req.name] = req + + if requirement: + # the options that don't get turned into an InstallRequirement + # should only be emitted once, even if the same option is in multiple + # requirements files, so we need to keep track of what has been emitted + # so that we don't emit it again if it's seen again + emitted_options = set() # type: Set[str] + # keep track of which files a requirement is in so that we can + # give an accurate warning if a requirement appears multiple times. + req_files = collections.defaultdict(list) # type: Dict[str, List[str]] + for req_file_path in requirement: + with open(req_file_path) as req_file: + for line in req_file: + if (not line.strip() or + line.strip().startswith('#') or + (skip_match and skip_match(line)) or + line.startswith(( + '-r', '--requirement', + '-Z', '--always-unzip', + '-f', '--find-links', + '-i', '--index-url', + '--pre', + '--trusted-host', + '--process-dependency-links', + '--extra-index-url'))): + line = line.rstrip() + if line not in emitted_options: + emitted_options.add(line) + yield line + continue + + if line.startswith('-e') or line.startswith('--editable'): + if line.startswith('-e'): + line = line[2:].strip() + else: + line = line[len('--editable'):].strip().lstrip('=') + line_req = install_req_from_editable( + line, + isolated=isolated, + wheel_cache=wheel_cache, + ) + else: + line_req = install_req_from_line( + COMMENT_RE.sub('', line).strip(), + isolated=isolated, + wheel_cache=wheel_cache, + ) + + if not line_req.name: + logger.info( + "Skipping line in requirement file [%s] because " + "it's not clear what it would install: %s", + req_file_path, line.strip(), + ) + logger.info( + " (add #egg=PackageName to the URL to avoid" + " this warning)" + ) + elif line_req.name not in installations: + # either it's not installed, or it is installed + # but has been processed already + if not req_files[line_req.name]: + logger.warning( + "Requirement file [%s] contains %s, but " + "package %r is not installed", + req_file_path, + COMMENT_RE.sub('', line).strip(), line_req.name + ) + else: + req_files[line_req.name].append(req_file_path) + else: + yield str(installations[line_req.name]).rstrip() + del installations[line_req.name] + req_files[line_req.name].append(req_file_path) + + # Warn about requirements that were included multiple times (in a + # single requirements file or in different requirements files). + for name, files in six.iteritems(req_files): + if len(files) > 1: + logger.warning("Requirement %s included multiple times [%s]", + name, ', '.join(sorted(set(files)))) + + yield( + '## The following requirements were added by ' + 'pip freeze:' + ) + for installation in sorted( + installations.values(), key=lambda x: x.name.lower()): + if canonicalize_name(installation.name) not in skip: + yield str(installation).rstrip() + + +def get_requirement_info(dist): + # type: (Distribution) -> RequirementInfo + """ + Compute and return values (req, editable, comments) for use in + FrozenRequirement.from_dist(). + """ + if not dist_is_editable(dist): + return (None, False, []) + + location = os.path.normcase(os.path.abspath(dist.location)) + + from pip._internal.vcs import vcs, RemoteNotFoundError + vc_type = vcs.get_backend_type(location) + + if not vc_type: + req = dist.as_requirement() + logger.debug( + 'No VCS found for editable requirement {!r} in: {!r}', req, + location, + ) + comments = [ + '# Editable install with no version control ({})'.format(req) + ] + return (location, True, comments) + + try: + req = vc_type.get_src_requirement(location, dist.project_name) + except RemoteNotFoundError: + req = dist.as_requirement() + comments = [ + '# Editable {} install with no remote ({})'.format( + vc_type.__name__, req, + ) + ] + return (location, True, comments) + + except BadCommand: + logger.warning( + 'cannot determine version of editable source in %s ' + '(%s command not found in path)', + location, + vc_type.name, + ) + return (None, True, []) + + except InstallationError as exc: + logger.warning( + "Error when trying to get requirement for VCS system %s, " + "falling back to uneditable format", exc + ) + else: + if req is not None: + return (req, True, []) + + logger.warning( + 'Could not determine repository location of %s', location + ) + comments = ['## !! Could not determine repository location'] + + return (None, False, comments) + + +class FrozenRequirement(object): + def __init__(self, name, req, editable, comments=()): + # type: (str, Union[str, Requirement], bool, Iterable[str]) -> None + self.name = name + self.req = req + self.editable = editable + self.comments = comments + + @classmethod + def from_dist(cls, dist): + # type: (Distribution) -> FrozenRequirement + req, editable, comments = get_requirement_info(dist) + if req is None: + req = dist.as_requirement() + + return cls(dist.project_name, req, editable, comments=comments) + + def __str__(self): + req = self.req + if self.editable: + req = '-e %s' % req + return '\n'.join(list(self.comments) + [str(req)]) + '\n' diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/operations/freeze.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/operations/freeze.pyc new file mode 100644 index 0000000..3c2725a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/operations/freeze.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/operations/prepare.py b/project/venv/lib/python2.7/site-packages/pip/_internal/operations/prepare.py new file mode 100644 index 0000000..4f31dd5 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/operations/prepare.py @@ -0,0 +1,413 @@ +"""Prepares a distribution for installation +""" + +import logging +import os + +from pip._vendor import pkg_resources, requests + +from pip._internal.build_env import BuildEnvironment +from pip._internal.download import ( + is_dir_url, is_file_url, is_vcs_url, unpack_url, url_to_path, +) +from pip._internal.exceptions import ( + DirectoryUrlHashUnsupported, HashUnpinned, InstallationError, + PreviousBuildDirError, VcsHashUnsupported, +) +from pip._internal.utils.compat import expanduser +from pip._internal.utils.hashes import MissingHashes +from pip._internal.utils.logging import indent_log +from pip._internal.utils.misc import display_path, normalize_path +from pip._internal.utils.typing import MYPY_CHECK_RUNNING +from pip._internal.vcs import vcs + +if MYPY_CHECK_RUNNING: + from typing import Any, Optional # noqa: F401 + from pip._internal.req.req_install import InstallRequirement # noqa: F401 + from pip._internal.index import PackageFinder # noqa: F401 + from pip._internal.download import PipSession # noqa: F401 + from pip._internal.req.req_tracker import RequirementTracker # noqa: F401 + +logger = logging.getLogger(__name__) + + +def make_abstract_dist(req): + # type: (InstallRequirement) -> DistAbstraction + """Factory to make an abstract dist object. + + Preconditions: Either an editable req with a source_dir, or satisfied_by or + a wheel link, or a non-editable req with a source_dir. + + :return: A concrete DistAbstraction. + """ + if req.editable: + return IsSDist(req) + elif req.link and req.link.is_wheel: + return IsWheel(req) + else: + return IsSDist(req) + + +class DistAbstraction(object): + """Abstracts out the wheel vs non-wheel Resolver.resolve() logic. + + The requirements for anything installable are as follows: + - we must be able to determine the requirement name + (or we can't correctly handle the non-upgrade case). + - we must be able to generate a list of run-time dependencies + without installing any additional packages (or we would + have to either burn time by doing temporary isolated installs + or alternatively violate pips 'don't start installing unless + all requirements are available' rule - neither of which are + desirable). + - for packages with setup requirements, we must also be able + to determine their requirements without installing additional + packages (for the same reason as run-time dependencies) + - we must be able to create a Distribution object exposing the + above metadata. + """ + + def __init__(self, req): + # type: (InstallRequirement) -> None + self.req = req # type: InstallRequirement + + def dist(self): + # type: () -> Any + """Return a setuptools Dist object.""" + raise NotImplementedError + + def prep_for_dist(self, finder, build_isolation): + # type: (PackageFinder, bool) -> Any + """Ensure that we can get a Dist for this requirement.""" + raise NotImplementedError + + +class IsWheel(DistAbstraction): + + def dist(self): + # type: () -> pkg_resources.Distribution + return list(pkg_resources.find_distributions( + self.req.source_dir))[0] + + def prep_for_dist(self, finder, build_isolation): + # type: (PackageFinder, bool) -> Any + # FIXME:https://github.com/pypa/pip/issues/1112 + pass + + +class IsSDist(DistAbstraction): + + def dist(self): + return self.req.get_dist() + + def prep_for_dist(self, finder, build_isolation): + # type: (PackageFinder, bool) -> None + # Prepare for building. We need to: + # 1. Load pyproject.toml (if it exists) + # 2. Set up the build environment + + self.req.load_pyproject_toml() + should_isolate = self.req.use_pep517 and build_isolation + + def _raise_conflicts(conflicting_with, conflicting_reqs): + raise InstallationError( + "Some build dependencies for %s conflict with %s: %s." % ( + self.req, conflicting_with, ', '.join( + '%s is incompatible with %s' % (installed, wanted) + for installed, wanted in sorted(conflicting)))) + + if should_isolate: + # Isolate in a BuildEnvironment and install the build-time + # requirements. + self.req.build_env = BuildEnvironment() + self.req.build_env.install_requirements( + finder, self.req.pyproject_requires, 'overlay', + "Installing build dependencies" + ) + conflicting, missing = self.req.build_env.check_requirements( + self.req.requirements_to_check + ) + if conflicting: + _raise_conflicts("PEP 517/518 supported requirements", + conflicting) + if missing: + logger.warning( + "Missing build requirements in pyproject.toml for %s.", + self.req, + ) + logger.warning( + "The project does not specify a build backend, and " + "pip cannot fall back to setuptools without %s.", + " and ".join(map(repr, sorted(missing))) + ) + # Install any extra build dependencies that the backend requests. + # This must be done in a second pass, as the pyproject.toml + # dependencies must be installed before we can call the backend. + with self.req.build_env: + # We need to have the env active when calling the hook. + self.req.spin_message = "Getting requirements to build wheel" + reqs = self.req.pep517_backend.get_requires_for_build_wheel() + conflicting, missing = self.req.build_env.check_requirements(reqs) + if conflicting: + _raise_conflicts("the backend dependencies", conflicting) + self.req.build_env.install_requirements( + finder, missing, 'normal', + "Installing backend dependencies" + ) + + self.req.prepare_metadata() + self.req.assert_source_matches_version() + + +class Installed(DistAbstraction): + + def dist(self): + # type: () -> pkg_resources.Distribution + return self.req.satisfied_by + + def prep_for_dist(self, finder, build_isolation): + # type: (PackageFinder, bool) -> Any + pass + + +class RequirementPreparer(object): + """Prepares a Requirement + """ + + def __init__( + self, + build_dir, # type: str + download_dir, # type: Optional[str] + src_dir, # type: str + wheel_download_dir, # type: Optional[str] + progress_bar, # type: str + build_isolation, # type: bool + req_tracker # type: RequirementTracker + ): + # type: (...) -> None + super(RequirementPreparer, self).__init__() + + self.src_dir = src_dir + self.build_dir = build_dir + self.req_tracker = req_tracker + + # Where still packed archives should be written to. If None, they are + # not saved, and are deleted immediately after unpacking. + self.download_dir = download_dir + + # Where still-packed .whl files should be written to. If None, they are + # written to the download_dir parameter. Separate to download_dir to + # permit only keeping wheel archives for pip wheel. + if wheel_download_dir: + wheel_download_dir = normalize_path(wheel_download_dir) + self.wheel_download_dir = wheel_download_dir + + # NOTE + # download_dir and wheel_download_dir overlap semantically and may + # be combined if we're willing to have non-wheel archives present in + # the wheelhouse output by 'pip wheel'. + + self.progress_bar = progress_bar + + # Is build isolation allowed? + self.build_isolation = build_isolation + + @property + def _download_should_save(self): + # type: () -> bool + # TODO: Modify to reduce indentation needed + if self.download_dir: + self.download_dir = expanduser(self.download_dir) + if os.path.exists(self.download_dir): + return True + else: + logger.critical('Could not find download directory') + raise InstallationError( + "Could not find or access download directory '%s'" + % display_path(self.download_dir)) + return False + + def prepare_linked_requirement( + self, + req, # type: InstallRequirement + session, # type: PipSession + finder, # type: PackageFinder + upgrade_allowed, # type: bool + require_hashes # type: bool + ): + # type: (...) -> DistAbstraction + """Prepare a requirement that would be obtained from req.link + """ + # TODO: Breakup into smaller functions + if req.link and req.link.scheme == 'file': + path = url_to_path(req.link.url) + logger.info('Processing %s', display_path(path)) + else: + logger.info('Collecting %s', req) + + with indent_log(): + # @@ if filesystem packages are not marked + # editable in a req, a non deterministic error + # occurs when the script attempts to unpack the + # build directory + req.ensure_has_source_dir(self.build_dir) + # If a checkout exists, it's unwise to keep going. version + # inconsistencies are logged later, but do not fail the + # installation. + # FIXME: this won't upgrade when there's an existing + # package unpacked in `req.source_dir` + # package unpacked in `req.source_dir` + if os.path.exists(os.path.join(req.source_dir, 'setup.py')): + raise PreviousBuildDirError( + "pip can't proceed with requirements '%s' due to a" + " pre-existing build directory (%s). This is " + "likely due to a previous installation that failed" + ". pip is being responsible and not assuming it " + "can delete this. Please delete it and try again." + % (req, req.source_dir) + ) + req.populate_link(finder, upgrade_allowed, require_hashes) + + # We can't hit this spot and have populate_link return None. + # req.satisfied_by is None here (because we're + # guarded) and upgrade has no impact except when satisfied_by + # is not None. + # Then inside find_requirement existing_applicable -> False + # If no new versions are found, DistributionNotFound is raised, + # otherwise a result is guaranteed. + assert req.link + link = req.link + + # Now that we have the real link, we can tell what kind of + # requirements we have and raise some more informative errors + # than otherwise. (For example, we can raise VcsHashUnsupported + # for a VCS URL rather than HashMissing.) + if require_hashes: + # We could check these first 2 conditions inside + # unpack_url and save repetition of conditions, but then + # we would report less-useful error messages for + # unhashable requirements, complaining that there's no + # hash provided. + if is_vcs_url(link): + raise VcsHashUnsupported() + elif is_file_url(link) and is_dir_url(link): + raise DirectoryUrlHashUnsupported() + if not req.original_link and not req.is_pinned: + # Unpinned packages are asking for trouble when a new + # version is uploaded. This isn't a security check, but + # it saves users a surprising hash mismatch in the + # future. + # + # file:/// URLs aren't pinnable, so don't complain + # about them not being pinned. + raise HashUnpinned() + + hashes = req.hashes(trust_internet=not require_hashes) + if require_hashes and not hashes: + # Known-good hashes are missing for this requirement, so + # shim it with a facade object that will provoke hash + # computation and then raise a HashMissing exception + # showing the user what the hash should be. + hashes = MissingHashes() + + try: + download_dir = self.download_dir + # We always delete unpacked sdists after pip ran. + autodelete_unpacked = True + if req.link.is_wheel and self.wheel_download_dir: + # when doing 'pip wheel` we download wheels to a + # dedicated dir. + download_dir = self.wheel_download_dir + if req.link.is_wheel: + if download_dir: + # When downloading, we only unpack wheels to get + # metadata. + autodelete_unpacked = True + else: + # When installing a wheel, we use the unpacked + # wheel. + autodelete_unpacked = False + unpack_url( + req.link, req.source_dir, + download_dir, autodelete_unpacked, + session=session, hashes=hashes, + progress_bar=self.progress_bar + ) + except requests.HTTPError as exc: + logger.critical( + 'Could not install requirement %s because of error %s', + req, + exc, + ) + raise InstallationError( + 'Could not install requirement %s because of HTTP ' + 'error %s for URL %s' % + (req, exc, req.link) + ) + abstract_dist = make_abstract_dist(req) + with self.req_tracker.track(req): + abstract_dist.prep_for_dist(finder, self.build_isolation) + if self._download_should_save: + # Make a .zip of the source_dir we already created. + if req.link.scheme in vcs.all_schemes: + req.archive(self.download_dir) + return abstract_dist + + def prepare_editable_requirement( + self, + req, # type: InstallRequirement + require_hashes, # type: bool + use_user_site, # type: bool + finder # type: PackageFinder + ): + # type: (...) -> DistAbstraction + """Prepare an editable requirement + """ + assert req.editable, "cannot prepare a non-editable req as editable" + + logger.info('Obtaining %s', req) + + with indent_log(): + if require_hashes: + raise InstallationError( + 'The editable requirement %s cannot be installed when ' + 'requiring hashes, because there is no single file to ' + 'hash.' % req + ) + req.ensure_has_source_dir(self.src_dir) + req.update_editable(not self._download_should_save) + + abstract_dist = make_abstract_dist(req) + with self.req_tracker.track(req): + abstract_dist.prep_for_dist(finder, self.build_isolation) + + if self._download_should_save: + req.archive(self.download_dir) + req.check_if_exists(use_user_site) + + return abstract_dist + + def prepare_installed_requirement(self, req, require_hashes, skip_reason): + # type: (InstallRequirement, bool, Optional[str]) -> DistAbstraction + """Prepare an already-installed requirement + """ + assert req.satisfied_by, "req should have been satisfied but isn't" + assert skip_reason is not None, ( + "did not get skip reason skipped but req.satisfied_by " + "is set to %r" % (req.satisfied_by,) + ) + logger.info( + 'Requirement %s: %s (%s)', + skip_reason, req, req.satisfied_by.version + ) + with indent_log(): + if require_hashes: + logger.debug( + 'Since it is already installed, we are trusting this ' + 'package without checking its hash. To ensure a ' + 'completely repeatable environment, install into an ' + 'empty virtualenv.' + ) + abstract_dist = Installed(req) + + return abstract_dist diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/operations/prepare.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/operations/prepare.pyc new file mode 100644 index 0000000..86a96e1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/operations/prepare.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/pep425tags.py b/project/venv/lib/python2.7/site-packages/pip/_internal/pep425tags.py new file mode 100644 index 0000000..1e782d1 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/pep425tags.py @@ -0,0 +1,381 @@ +"""Generate and work with PEP 425 Compatibility Tags.""" +from __future__ import absolute_import + +import distutils.util +import logging +import platform +import re +import sys +import sysconfig +import warnings +from collections import OrderedDict + +import pip._internal.utils.glibc +from pip._internal.utils.compat import get_extension_suffixes +from pip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + from typing import ( # noqa: F401 + Tuple, Callable, List, Optional, Union, Dict + ) + + Pep425Tag = Tuple[str, str, str] + +logger = logging.getLogger(__name__) + +_osx_arch_pat = re.compile(r'(.+)_(\d+)_(\d+)_(.+)') + + +def get_config_var(var): + # type: (str) -> Optional[str] + try: + return sysconfig.get_config_var(var) + except IOError as e: # Issue #1074 + warnings.warn("{}".format(e), RuntimeWarning) + return None + + +def get_abbr_impl(): + # type: () -> str + """Return abbreviated implementation name.""" + if hasattr(sys, 'pypy_version_info'): + pyimpl = 'pp' + elif sys.platform.startswith('java'): + pyimpl = 'jy' + elif sys.platform == 'cli': + pyimpl = 'ip' + else: + pyimpl = 'cp' + return pyimpl + + +def get_impl_ver(): + # type: () -> str + """Return implementation version.""" + impl_ver = get_config_var("py_version_nodot") + if not impl_ver or get_abbr_impl() == 'pp': + impl_ver = ''.join(map(str, get_impl_version_info())) + return impl_ver + + +def get_impl_version_info(): + # type: () -> Tuple[int, ...] + """Return sys.version_info-like tuple for use in decrementing the minor + version.""" + if get_abbr_impl() == 'pp': + # as per https://github.com/pypa/pip/issues/2882 + # attrs exist only on pypy + return (sys.version_info[0], + sys.pypy_version_info.major, # type: ignore + sys.pypy_version_info.minor) # type: ignore + else: + return sys.version_info[0], sys.version_info[1] + + +def get_impl_tag(): + # type: () -> str + """ + Returns the Tag for this specific implementation. + """ + return "{}{}".format(get_abbr_impl(), get_impl_ver()) + + +def get_flag(var, fallback, expected=True, warn=True): + # type: (str, Callable[..., bool], Union[bool, int], bool) -> bool + """Use a fallback method for determining SOABI flags if the needed config + var is unset or unavailable.""" + val = get_config_var(var) + if val is None: + if warn: + logger.debug("Config variable '%s' is unset, Python ABI tag may " + "be incorrect", var) + return fallback() + return val == expected + + +def get_abi_tag(): + # type: () -> Optional[str] + """Return the ABI tag based on SOABI (if available) or emulate SOABI + (CPython 2, PyPy).""" + soabi = get_config_var('SOABI') + impl = get_abbr_impl() + if not soabi and impl in {'cp', 'pp'} and hasattr(sys, 'maxunicode'): + d = '' + m = '' + u = '' + if get_flag('Py_DEBUG', + lambda: hasattr(sys, 'gettotalrefcount'), + warn=(impl == 'cp')): + d = 'd' + if get_flag('WITH_PYMALLOC', + lambda: impl == 'cp', + warn=(impl == 'cp')): + m = 'm' + if get_flag('Py_UNICODE_SIZE', + lambda: sys.maxunicode == 0x10ffff, + expected=4, + warn=(impl == 'cp' and + sys.version_info < (3, 3))) \ + and sys.version_info < (3, 3): + u = 'u' + abi = '%s%s%s%s%s' % (impl, get_impl_ver(), d, m, u) + elif soabi and soabi.startswith('cpython-'): + abi = 'cp' + soabi.split('-')[1] + elif soabi: + abi = soabi.replace('.', '_').replace('-', '_') + else: + abi = None + return abi + + +def _is_running_32bit(): + # type: () -> bool + return sys.maxsize == 2147483647 + + +def get_platform(): + # type: () -> str + """Return our platform name 'win32', 'linux_x86_64'""" + if sys.platform == 'darwin': + # distutils.util.get_platform() returns the release based on the value + # of MACOSX_DEPLOYMENT_TARGET on which Python was built, which may + # be significantly older than the user's current machine. + release, _, machine = platform.mac_ver() + split_ver = release.split('.') + + if machine == "x86_64" and _is_running_32bit(): + machine = "i386" + elif machine == "ppc64" and _is_running_32bit(): + machine = "ppc" + + return 'macosx_{}_{}_{}'.format(split_ver[0], split_ver[1], machine) + + # XXX remove distutils dependency + result = distutils.util.get_platform().replace('.', '_').replace('-', '_') + if result == "linux_x86_64" and _is_running_32bit(): + # 32 bit Python program (running on a 64 bit Linux): pip should only + # install and run 32 bit compiled extensions in that case. + result = "linux_i686" + + return result + + +def is_manylinux1_compatible(): + # type: () -> bool + # Only Linux, and only x86-64 / i686 + if get_platform() not in {"linux_x86_64", "linux_i686"}: + return False + + # Check for presence of _manylinux module + try: + import _manylinux + return bool(_manylinux.manylinux1_compatible) + except (ImportError, AttributeError): + # Fall through to heuristic check below + pass + + # Check glibc version. CentOS 5 uses glibc 2.5. + return pip._internal.utils.glibc.have_compatible_glibc(2, 5) + + +def is_manylinux2010_compatible(): + # type: () -> bool + # Only Linux, and only x86-64 / i686 + if get_platform() not in {"linux_x86_64", "linux_i686"}: + return False + + # Check for presence of _manylinux module + try: + import _manylinux + return bool(_manylinux.manylinux2010_compatible) + except (ImportError, AttributeError): + # Fall through to heuristic check below + pass + + # Check glibc version. CentOS 6 uses glibc 2.12. + return pip._internal.utils.glibc.have_compatible_glibc(2, 12) + + +def get_darwin_arches(major, minor, machine): + # type: (int, int, str) -> List[str] + """Return a list of supported arches (including group arches) for + the given major, minor and machine architecture of an macOS machine. + """ + arches = [] + + def _supports_arch(major, minor, arch): + # type: (int, int, str) -> bool + # Looking at the application support for macOS versions in the chart + # provided by https://en.wikipedia.org/wiki/OS_X#Versions it appears + # our timeline looks roughly like: + # + # 10.0 - Introduces ppc support. + # 10.4 - Introduces ppc64, i386, and x86_64 support, however the ppc64 + # and x86_64 support is CLI only, and cannot be used for GUI + # applications. + # 10.5 - Extends ppc64 and x86_64 support to cover GUI applications. + # 10.6 - Drops support for ppc64 + # 10.7 - Drops support for ppc + # + # Given that we do not know if we're installing a CLI or a GUI + # application, we must be conservative and assume it might be a GUI + # application and behave as if ppc64 and x86_64 support did not occur + # until 10.5. + # + # Note: The above information is taken from the "Application support" + # column in the chart not the "Processor support" since I believe + # that we care about what instruction sets an application can use + # not which processors the OS supports. + if arch == 'ppc': + return (major, minor) <= (10, 5) + if arch == 'ppc64': + return (major, minor) == (10, 5) + if arch == 'i386': + return (major, minor) >= (10, 4) + if arch == 'x86_64': + return (major, minor) >= (10, 5) + if arch in groups: + for garch in groups[arch]: + if _supports_arch(major, minor, garch): + return True + return False + + groups = OrderedDict([ + ("fat", ("i386", "ppc")), + ("intel", ("x86_64", "i386")), + ("fat64", ("x86_64", "ppc64")), + ("fat32", ("x86_64", "i386", "ppc")), + ]) # type: Dict[str, Tuple[str, ...]] + + if _supports_arch(major, minor, machine): + arches.append(machine) + + for garch in groups: + if machine in groups[garch] and _supports_arch(major, minor, garch): + arches.append(garch) + + arches.append('universal') + + return arches + + +def get_all_minor_versions_as_strings(version_info): + # type: (Tuple[int, ...]) -> List[str] + versions = [] + major = version_info[:-1] + # Support all previous minor Python versions. + for minor in range(version_info[-1], -1, -1): + versions.append(''.join(map(str, major + (minor,)))) + return versions + + +def get_supported( + versions=None, # type: Optional[List[str]] + noarch=False, # type: bool + platform=None, # type: Optional[str] + impl=None, # type: Optional[str] + abi=None # type: Optional[str] +): + # type: (...) -> List[Pep425Tag] + """Return a list of supported tags for each version specified in + `versions`. + + :param versions: a list of string versions, of the form ["33", "32"], + or None. The first version will be assumed to support our ABI. + :param platform: specify the exact platform you want valid + tags for, or None. If None, use the local system platform. + :param impl: specify the exact implementation you want valid + tags for, or None. If None, use the local interpreter impl. + :param abi: specify the exact abi you want valid + tags for, or None. If None, use the local interpreter abi. + """ + supported = [] + + # Versions must be given with respect to the preference + if versions is None: + version_info = get_impl_version_info() + versions = get_all_minor_versions_as_strings(version_info) + + impl = impl or get_abbr_impl() + + abis = [] # type: List[str] + + abi = abi or get_abi_tag() + if abi: + abis[0:0] = [abi] + + abi3s = set() + for suffix in get_extension_suffixes(): + if suffix.startswith('.abi'): + abi3s.add(suffix.split('.', 2)[1]) + + abis.extend(sorted(list(abi3s))) + + abis.append('none') + + if not noarch: + arch = platform or get_platform() + arch_prefix, arch_sep, arch_suffix = arch.partition('_') + if arch.startswith('macosx'): + # support macosx-10.6-intel on macosx-10.9-x86_64 + match = _osx_arch_pat.match(arch) + if match: + name, major, minor, actual_arch = match.groups() + tpl = '{}_{}_%i_%s'.format(name, major) + arches = [] + for m in reversed(range(int(minor) + 1)): + for a in get_darwin_arches(int(major), m, actual_arch): + arches.append(tpl % (m, a)) + else: + # arch pattern didn't match (?!) + arches = [arch] + elif arch_prefix == 'manylinux2010': + # manylinux1 wheels run on most manylinux2010 systems with the + # exception of wheels depending on ncurses. PEP 571 states + # manylinux1 wheels should be considered manylinux2010 wheels: + # https://www.python.org/dev/peps/pep-0571/#backwards-compatibility-with-manylinux1-wheels + arches = [arch, 'manylinux1' + arch_sep + arch_suffix] + elif platform is None: + arches = [] + if is_manylinux2010_compatible(): + arches.append('manylinux2010' + arch_sep + arch_suffix) + if is_manylinux1_compatible(): + arches.append('manylinux1' + arch_sep + arch_suffix) + arches.append(arch) + else: + arches = [arch] + + # Current version, current API (built specifically for our Python): + for abi in abis: + for arch in arches: + supported.append(('%s%s' % (impl, versions[0]), abi, arch)) + + # abi3 modules compatible with older version of Python + for version in versions[1:]: + # abi3 was introduced in Python 3.2 + if version in {'31', '30'}: + break + for abi in abi3s: # empty set if not Python 3 + for arch in arches: + supported.append(("%s%s" % (impl, version), abi, arch)) + + # Has binaries, does not use the Python API: + for arch in arches: + supported.append(('py%s' % (versions[0][0]), 'none', arch)) + + # No abi / arch, but requires our implementation: + supported.append(('%s%s' % (impl, versions[0]), 'none', 'any')) + # Tagged specifically as being cross-version compatible + # (with just the major version specified) + supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any')) + + # No abi / arch, generic Python + for i, version in enumerate(versions): + supported.append(('py%s' % (version,), 'none', 'any')) + if i == 0: + supported.append(('py%s' % (version[0]), 'none', 'any')) + + return supported + + +implementation_tag = get_impl_tag() diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/pep425tags.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/pep425tags.pyc new file mode 100644 index 0000000..59c3688 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/pep425tags.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/pyproject.py b/project/venv/lib/python2.7/site-packages/pip/_internal/pyproject.py new file mode 100644 index 0000000..8d739a6 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/pyproject.py @@ -0,0 +1,171 @@ +from __future__ import absolute_import + +import io +import os +import sys + +from pip._vendor import pytoml, six + +from pip._internal.exceptions import InstallationError +from pip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + from typing import Any, Tuple, Optional, List # noqa: F401 + + +def _is_list_of_str(obj): + # type: (Any) -> bool + return ( + isinstance(obj, list) and + all(isinstance(item, six.string_types) for item in obj) + ) + + +def make_pyproject_path(setup_py_dir): + # type: (str) -> str + path = os.path.join(setup_py_dir, 'pyproject.toml') + + # Python2 __file__ should not be unicode + if six.PY2 and isinstance(path, six.text_type): + path = path.encode(sys.getfilesystemencoding()) + + return path + + +def load_pyproject_toml( + use_pep517, # type: Optional[bool] + pyproject_toml, # type: str + setup_py, # type: str + req_name # type: str +): + # type: (...) -> Optional[Tuple[List[str], str, List[str]]] + """Load the pyproject.toml file. + + Parameters: + use_pep517 - Has the user requested PEP 517 processing? None + means the user hasn't explicitly specified. + pyproject_toml - Location of the project's pyproject.toml file + setup_py - Location of the project's setup.py file + req_name - The name of the requirement we're processing (for + error reporting) + + Returns: + None if we should use the legacy code path, otherwise a tuple + ( + requirements from pyproject.toml, + name of PEP 517 backend, + requirements we should check are installed after setting + up the build environment + ) + """ + has_pyproject = os.path.isfile(pyproject_toml) + has_setup = os.path.isfile(setup_py) + + if has_pyproject: + with io.open(pyproject_toml, encoding="utf-8") as f: + pp_toml = pytoml.load(f) + build_system = pp_toml.get("build-system") + else: + build_system = None + + # The following cases must use PEP 517 + # We check for use_pep517 being non-None and falsey because that means + # the user explicitly requested --no-use-pep517. The value 0 as + # opposed to False can occur when the value is provided via an + # environment variable or config file option (due to the quirk of + # strtobool() returning an integer in pip's configuration code). + if has_pyproject and not has_setup: + if use_pep517 is not None and not use_pep517: + raise InstallationError( + "Disabling PEP 517 processing is invalid: " + "project does not have a setup.py" + ) + use_pep517 = True + elif build_system and "build-backend" in build_system: + if use_pep517 is not None and not use_pep517: + raise InstallationError( + "Disabling PEP 517 processing is invalid: " + "project specifies a build backend of {} " + "in pyproject.toml".format( + build_system["build-backend"] + ) + ) + use_pep517 = True + + # If we haven't worked out whether to use PEP 517 yet, + # and the user hasn't explicitly stated a preference, + # we do so if the project has a pyproject.toml file. + elif use_pep517 is None: + use_pep517 = has_pyproject + + # At this point, we know whether we're going to use PEP 517. + assert use_pep517 is not None + + # If we're using the legacy code path, there is nothing further + # for us to do here. + if not use_pep517: + return None + + if build_system is None: + # Either the user has a pyproject.toml with no build-system + # section, or the user has no pyproject.toml, but has opted in + # explicitly via --use-pep517. + # In the absence of any explicit backend specification, we + # assume the setuptools backend that most closely emulates the + # traditional direct setup.py execution, and require wheel and + # a version of setuptools that supports that backend. + + build_system = { + "requires": ["setuptools>=40.8.0", "wheel"], + "build-backend": "setuptools.build_meta:__legacy__", + } + + # If we're using PEP 517, we have build system information (either + # from pyproject.toml, or defaulted by the code above). + # Note that at this point, we do not know if the user has actually + # specified a backend, though. + assert build_system is not None + + # Ensure that the build-system section in pyproject.toml conforms + # to PEP 518. + error_template = ( + "{package} has a pyproject.toml file that does not comply " + "with PEP 518: {reason}" + ) + + # Specifying the build-system table but not the requires key is invalid + if "requires" not in build_system: + raise InstallationError( + error_template.format(package=req_name, reason=( + "it has a 'build-system' table but not " + "'build-system.requires' which is mandatory in the table" + )) + ) + + # Error out if requires is not a list of strings + requires = build_system["requires"] + if not _is_list_of_str(requires): + raise InstallationError(error_template.format( + package=req_name, + reason="'build-system.requires' is not a list of strings.", + )) + + backend = build_system.get("build-backend") + check = [] # type: List[str] + if backend is None: + # If the user didn't specify a backend, we assume they want to use + # the setuptools backend. But we can't be sure they have included + # a version of setuptools which supplies the backend, or wheel + # (which is needed by the backend) in their requirements. So we + # make a note to check that those requirements are present once + # we have set up the environment. + # This is quite a lot of work to check for a very specific case. But + # the problem is, that case is potentially quite common - projects that + # adopted PEP 518 early for the ability to specify requirements to + # execute setup.py, but never considered needing to mention the build + # tools themselves. The original PEP 518 code had a similar check (but + # implemented in a different way). + backend = "setuptools.build_meta:__legacy__" + check = ["setuptools>=40.8.0", "wheel"] + + return (requires, backend, check) diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/pyproject.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/pyproject.pyc new file mode 100644 index 0000000..2ba4250 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/pyproject.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/req/__init__.py b/project/venv/lib/python2.7/site-packages/pip/_internal/req/__init__.py new file mode 100644 index 0000000..5e4eb92 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/req/__init__.py @@ -0,0 +1,77 @@ +from __future__ import absolute_import + +import logging + +from .req_install import InstallRequirement +from .req_set import RequirementSet +from .req_file import parse_requirements +from pip._internal.utils.logging import indent_log +from pip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + from typing import List, Sequence # noqa: F401 + +__all__ = [ + "RequirementSet", "InstallRequirement", + "parse_requirements", "install_given_reqs", +] + +logger = logging.getLogger(__name__) + + +def install_given_reqs( + to_install, # type: List[InstallRequirement] + install_options, # type: List[str] + global_options=(), # type: Sequence[str] + *args, **kwargs +): + # type: (...) -> List[InstallRequirement] + """ + Install everything in the given list. + + (to be called after having downloaded and unpacked the packages) + """ + + if to_install: + logger.info( + 'Installing collected packages: %s', + ', '.join([req.name for req in to_install]), + ) + + with indent_log(): + for requirement in to_install: + if requirement.conflicts_with: + logger.info( + 'Found existing installation: %s', + requirement.conflicts_with, + ) + with indent_log(): + uninstalled_pathset = requirement.uninstall( + auto_confirm=True + ) + try: + requirement.install( + install_options, + global_options, + *args, + **kwargs + ) + except Exception: + should_rollback = ( + requirement.conflicts_with and + not requirement.install_succeeded + ) + # if install did not succeed, rollback previous uninstall + if should_rollback: + uninstalled_pathset.rollback() + raise + else: + should_commit = ( + requirement.conflicts_with and + requirement.install_succeeded + ) + if should_commit: + uninstalled_pathset.commit() + requirement.remove_temporary_source() + + return to_install diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/req/__init__.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/req/__init__.pyc new file mode 100644 index 0000000..415adcc Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/req/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/req/constructors.py b/project/venv/lib/python2.7/site-packages/pip/_internal/req/constructors.py new file mode 100644 index 0000000..1eed1dd --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/req/constructors.py @@ -0,0 +1,339 @@ +"""Backing implementation for InstallRequirement's various constructors + +The idea here is that these formed a major chunk of InstallRequirement's size +so, moving them and support code dedicated to them outside of that class +helps creates for better understandability for the rest of the code. + +These are meant to be used elsewhere within pip to create instances of +InstallRequirement. +""" + +import logging +import os +import re + +from pip._vendor.packaging.markers import Marker +from pip._vendor.packaging.requirements import InvalidRequirement, Requirement +from pip._vendor.packaging.specifiers import Specifier +from pip._vendor.pkg_resources import RequirementParseError, parse_requirements + +from pip._internal.download import ( + is_archive_file, is_url, path_to_url, url_to_path, +) +from pip._internal.exceptions import InstallationError +from pip._internal.models.index import PyPI, TestPyPI +from pip._internal.models.link import Link +from pip._internal.pyproject import make_pyproject_path +from pip._internal.req.req_install import InstallRequirement +from pip._internal.utils.misc import is_installable_dir +from pip._internal.utils.typing import MYPY_CHECK_RUNNING +from pip._internal.vcs import vcs +from pip._internal.wheel import Wheel + +if MYPY_CHECK_RUNNING: + from typing import ( # noqa: F401 + Optional, Tuple, Set, Any, Union, Text, Dict, + ) + from pip._internal.cache import WheelCache # noqa: F401 + + +__all__ = [ + "install_req_from_editable", "install_req_from_line", + "parse_editable" +] + +logger = logging.getLogger(__name__) +operators = Specifier._operators.keys() + + +def _strip_extras(path): + # type: (str) -> Tuple[str, Optional[str]] + m = re.match(r'^(.+)(\[[^\]]+\])$', path) + extras = None + if m: + path_no_extras = m.group(1) + extras = m.group(2) + else: + path_no_extras = path + + return path_no_extras, extras + + +def parse_editable(editable_req): + # type: (str) -> Tuple[Optional[str], str, Optional[Set[str]]] + """Parses an editable requirement into: + - a requirement name + - an URL + - extras + - editable options + Accepted requirements: + svn+http://blahblah@rev#egg=Foobar[baz]&subdirectory=version_subdir + .[some_extra] + """ + + url = editable_req + + # If a file path is specified with extras, strip off the extras. + url_no_extras, extras = _strip_extras(url) + + if os.path.isdir(url_no_extras): + if not os.path.exists(os.path.join(url_no_extras, 'setup.py')): + msg = ( + 'File "setup.py" not found. Directory cannot be installed ' + 'in editable mode: {}'.format(os.path.abspath(url_no_extras)) + ) + pyproject_path = make_pyproject_path(url_no_extras) + if os.path.isfile(pyproject_path): + msg += ( + '\n(A "pyproject.toml" file was found, but editable ' + 'mode currently requires a setup.py based build.)' + ) + raise InstallationError(msg) + + # Treating it as code that has already been checked out + url_no_extras = path_to_url(url_no_extras) + + if url_no_extras.lower().startswith('file:'): + package_name = Link(url_no_extras).egg_fragment + if extras: + return ( + package_name, + url_no_extras, + Requirement("placeholder" + extras.lower()).extras, + ) + else: + return package_name, url_no_extras, None + + for version_control in vcs: + if url.lower().startswith('%s:' % version_control): + url = '%s+%s' % (version_control, url) + break + + if '+' not in url: + raise InstallationError( + '%s should either be a path to a local project or a VCS url ' + 'beginning with svn+, git+, hg+, or bzr+' % + editable_req + ) + + vc_type = url.split('+', 1)[0].lower() + + if not vcs.get_backend(vc_type): + error_message = 'For --editable=%s only ' % editable_req + \ + ', '.join([backend.name + '+URL' for backend in vcs.backends]) + \ + ' is currently supported' + raise InstallationError(error_message) + + package_name = Link(url).egg_fragment + if not package_name: + raise InstallationError( + "Could not detect requirement name for '%s', please specify one " + "with #egg=your_package_name" % editable_req + ) + return package_name, url, None + + +def deduce_helpful_msg(req): + # type: (str) -> str + """Returns helpful msg in case requirements file does not exist, + or cannot be parsed. + + :params req: Requirements file path + """ + msg = "" + if os.path.exists(req): + msg = " It does exist." + # Try to parse and check if it is a requirements file. + try: + with open(req, 'r') as fp: + # parse first line only + next(parse_requirements(fp.read())) + msg += " The argument you provided " + \ + "(%s) appears to be a" % (req) + \ + " requirements file. If that is the" + \ + " case, use the '-r' flag to install" + \ + " the packages specified within it." + except RequirementParseError: + logger.debug("Cannot parse '%s' as requirements \ + file" % (req), exc_info=True) + else: + msg += " File '%s' does not exist." % (req) + return msg + + +# ---- The actual constructors follow ---- + + +def install_req_from_editable( + editable_req, # type: str + comes_from=None, # type: Optional[str] + use_pep517=None, # type: Optional[bool] + isolated=False, # type: bool + options=None, # type: Optional[Dict[str, Any]] + wheel_cache=None, # type: Optional[WheelCache] + constraint=False # type: bool +): + # type: (...) -> InstallRequirement + name, url, extras_override = parse_editable(editable_req) + if url.startswith('file:'): + source_dir = url_to_path(url) + else: + source_dir = None + + if name is not None: + try: + req = Requirement(name) + except InvalidRequirement: + raise InstallationError("Invalid requirement: '%s'" % name) + else: + req = None + return InstallRequirement( + req, comes_from, source_dir=source_dir, + editable=True, + link=Link(url), + constraint=constraint, + use_pep517=use_pep517, + isolated=isolated, + options=options if options else {}, + wheel_cache=wheel_cache, + extras=extras_override or (), + ) + + +def install_req_from_line( + name, # type: str + comes_from=None, # type: Optional[Union[str, InstallRequirement]] + use_pep517=None, # type: Optional[bool] + isolated=False, # type: bool + options=None, # type: Optional[Dict[str, Any]] + wheel_cache=None, # type: Optional[WheelCache] + constraint=False # type: bool +): + # type: (...) -> InstallRequirement + """Creates an InstallRequirement from a name, which might be a + requirement, directory containing 'setup.py', filename, or URL. + """ + if is_url(name): + marker_sep = '; ' + else: + marker_sep = ';' + if marker_sep in name: + name, markers_as_string = name.split(marker_sep, 1) + markers_as_string = markers_as_string.strip() + if not markers_as_string: + markers = None + else: + markers = Marker(markers_as_string) + else: + markers = None + name = name.strip() + req_as_string = None + path = os.path.normpath(os.path.abspath(name)) + link = None + extras_as_string = None + + if is_url(name): + link = Link(name) + else: + p, extras_as_string = _strip_extras(path) + looks_like_dir = os.path.isdir(p) and ( + os.path.sep in name or + (os.path.altsep is not None and os.path.altsep in name) or + name.startswith('.') + ) + if looks_like_dir: + if not is_installable_dir(p): + raise InstallationError( + "Directory %r is not installable. Neither 'setup.py' " + "nor 'pyproject.toml' found." % name + ) + link = Link(path_to_url(p)) + elif is_archive_file(p): + if not os.path.isfile(p): + logger.warning( + 'Requirement %r looks like a filename, but the ' + 'file does not exist', + name + ) + link = Link(path_to_url(p)) + + # it's a local file, dir, or url + if link: + # Handle relative file URLs + if link.scheme == 'file' and re.search(r'\.\./', link.url): + link = Link( + path_to_url(os.path.normpath(os.path.abspath(link.path)))) + # wheel file + if link.is_wheel: + wheel = Wheel(link.filename) # can raise InvalidWheelFilename + req_as_string = "%s==%s" % (wheel.name, wheel.version) + else: + # set the req to the egg fragment. when it's not there, this + # will become an 'unnamed' requirement + req_as_string = link.egg_fragment + + # a requirement specifier + else: + req_as_string = name + + if extras_as_string: + extras = Requirement("placeholder" + extras_as_string.lower()).extras + else: + extras = () + if req_as_string is not None: + try: + req = Requirement(req_as_string) + except InvalidRequirement: + if os.path.sep in req_as_string: + add_msg = "It looks like a path." + add_msg += deduce_helpful_msg(req_as_string) + elif ('=' in req_as_string and + not any(op in req_as_string for op in operators)): + add_msg = "= is not a valid operator. Did you mean == ?" + else: + add_msg = "" + raise InstallationError( + "Invalid requirement: '%s'\n%s" % (req_as_string, add_msg) + ) + else: + req = None + + return InstallRequirement( + req, comes_from, link=link, markers=markers, + use_pep517=use_pep517, isolated=isolated, + options=options if options else {}, + wheel_cache=wheel_cache, + constraint=constraint, + extras=extras, + ) + + +def install_req_from_req_string( + req_string, # type: str + comes_from=None, # type: Optional[InstallRequirement] + isolated=False, # type: bool + wheel_cache=None, # type: Optional[WheelCache] + use_pep517=None # type: Optional[bool] +): + # type: (...) -> InstallRequirement + try: + req = Requirement(req_string) + except InvalidRequirement: + raise InstallationError("Invalid requirement: '%s'" % req) + + domains_not_allowed = [ + PyPI.file_storage_domain, + TestPyPI.file_storage_domain, + ] + if req.url and comes_from.link.netloc in domains_not_allowed: + # Explicitly disallow pypi packages that depend on external urls + raise InstallationError( + "Packages installed from PyPI cannot depend on packages " + "which are not also hosted on PyPI.\n" + "%s depends on %s " % (comes_from.name, req) + ) + + return InstallRequirement( + req, comes_from, isolated=isolated, wheel_cache=wheel_cache, + use_pep517=use_pep517 + ) diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/req/constructors.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/req/constructors.pyc new file mode 100644 index 0000000..f58fd2b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/req/constructors.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/req/req_file.py b/project/venv/lib/python2.7/site-packages/pip/_internal/req/req_file.py new file mode 100644 index 0000000..726f2f6 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/req/req_file.py @@ -0,0 +1,382 @@ +""" +Requirements file parsing +""" + +from __future__ import absolute_import + +import optparse +import os +import re +import shlex +import sys + +from pip._vendor.six.moves import filterfalse +from pip._vendor.six.moves.urllib import parse as urllib_parse + +from pip._internal.cli import cmdoptions +from pip._internal.download import get_file_content +from pip._internal.exceptions import RequirementsFileParseError +from pip._internal.req.constructors import ( + install_req_from_editable, install_req_from_line, +) +from pip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + from typing import ( # noqa: F401 + Iterator, Tuple, Optional, List, Callable, Text + ) + from pip._internal.req import InstallRequirement # noqa: F401 + from pip._internal.cache import WheelCache # noqa: F401 + from pip._internal.index import PackageFinder # noqa: F401 + from pip._internal.download import PipSession # noqa: F401 + + ReqFileLines = Iterator[Tuple[int, Text]] + +__all__ = ['parse_requirements'] + +SCHEME_RE = re.compile(r'^(http|https|file):', re.I) +COMMENT_RE = re.compile(r'(^|\s)+#.*$') + +# Matches environment variable-style values in '${MY_VARIABLE_1}' with the +# variable name consisting of only uppercase letters, digits or the '_' +# (underscore). This follows the POSIX standard defined in IEEE Std 1003.1, +# 2013 Edition. +ENV_VAR_RE = re.compile(r'(?P\$\{(?P[A-Z0-9_]+)\})') + +SUPPORTED_OPTIONS = [ + cmdoptions.constraints, + cmdoptions.editable, + cmdoptions.requirements, + cmdoptions.no_index, + cmdoptions.index_url, + cmdoptions.find_links, + cmdoptions.extra_index_url, + cmdoptions.always_unzip, + cmdoptions.no_binary, + cmdoptions.only_binary, + cmdoptions.pre, + cmdoptions.trusted_host, + cmdoptions.require_hashes, +] # type: List[Callable[..., optparse.Option]] + +# options to be passed to requirements +SUPPORTED_OPTIONS_REQ = [ + cmdoptions.install_options, + cmdoptions.global_options, + cmdoptions.hash, +] # type: List[Callable[..., optparse.Option]] + +# the 'dest' string values +SUPPORTED_OPTIONS_REQ_DEST = [str(o().dest) for o in SUPPORTED_OPTIONS_REQ] + + +def parse_requirements( + filename, # type: str + finder=None, # type: Optional[PackageFinder] + comes_from=None, # type: Optional[str] + options=None, # type: Optional[optparse.Values] + session=None, # type: Optional[PipSession] + constraint=False, # type: bool + wheel_cache=None, # type: Optional[WheelCache] + use_pep517=None # type: Optional[bool] +): + # type: (...) -> Iterator[InstallRequirement] + """Parse a requirements file and yield InstallRequirement instances. + + :param filename: Path or url of requirements file. + :param finder: Instance of pip.index.PackageFinder. + :param comes_from: Origin description of requirements. + :param options: cli options. + :param session: Instance of pip.download.PipSession. + :param constraint: If true, parsing a constraint file rather than + requirements file. + :param wheel_cache: Instance of pip.wheel.WheelCache + :param use_pep517: Value of the --use-pep517 option. + """ + if session is None: + raise TypeError( + "parse_requirements() missing 1 required keyword argument: " + "'session'" + ) + + _, content = get_file_content( + filename, comes_from=comes_from, session=session + ) + + lines_enum = preprocess(content, options) + + for line_number, line in lines_enum: + req_iter = process_line(line, filename, line_number, finder, + comes_from, options, session, wheel_cache, + use_pep517=use_pep517, constraint=constraint) + for req in req_iter: + yield req + + +def preprocess(content, options): + # type: (Text, Optional[optparse.Values]) -> ReqFileLines + """Split, filter, and join lines, and return a line iterator + + :param content: the content of the requirements file + :param options: cli options + """ + lines_enum = enumerate(content.splitlines(), start=1) # type: ReqFileLines + lines_enum = join_lines(lines_enum) + lines_enum = ignore_comments(lines_enum) + lines_enum = skip_regex(lines_enum, options) + lines_enum = expand_env_variables(lines_enum) + return lines_enum + + +def process_line( + line, # type: Text + filename, # type: str + line_number, # type: int + finder=None, # type: Optional[PackageFinder] + comes_from=None, # type: Optional[str] + options=None, # type: Optional[optparse.Values] + session=None, # type: Optional[PipSession] + wheel_cache=None, # type: Optional[WheelCache] + use_pep517=None, # type: Optional[bool] + constraint=False # type: bool +): + # type: (...) -> Iterator[InstallRequirement] + """Process a single requirements line; This can result in creating/yielding + requirements, or updating the finder. + + For lines that contain requirements, the only options that have an effect + are from SUPPORTED_OPTIONS_REQ, and they are scoped to the + requirement. Other options from SUPPORTED_OPTIONS may be present, but are + ignored. + + For lines that do not contain requirements, the only options that have an + effect are from SUPPORTED_OPTIONS. Options from SUPPORTED_OPTIONS_REQ may + be present, but are ignored. These lines may contain multiple options + (although our docs imply only one is supported), and all our parsed and + affect the finder. + + :param constraint: If True, parsing a constraints file. + :param options: OptionParser options that we may update + """ + parser = build_parser(line) + defaults = parser.get_default_values() + defaults.index_url = None + if finder: + defaults.format_control = finder.format_control + args_str, options_str = break_args_options(line) + # Prior to 2.7.3, shlex cannot deal with unicode entries + if sys.version_info < (2, 7, 3): + # https://github.com/python/mypy/issues/1174 + options_str = options_str.encode('utf8') # type: ignore + # https://github.com/python/mypy/issues/1174 + opts, _ = parser.parse_args( + shlex.split(options_str), defaults) # type: ignore + + # preserve for the nested code path + line_comes_from = '%s %s (line %s)' % ( + '-c' if constraint else '-r', filename, line_number, + ) + + # yield a line requirement + if args_str: + isolated = options.isolated_mode if options else False + if options: + cmdoptions.check_install_build_global(options, opts) + # get the options that apply to requirements + req_options = {} + for dest in SUPPORTED_OPTIONS_REQ_DEST: + if dest in opts.__dict__ and opts.__dict__[dest]: + req_options[dest] = opts.__dict__[dest] + yield install_req_from_line( + args_str, line_comes_from, constraint=constraint, + use_pep517=use_pep517, + isolated=isolated, options=req_options, wheel_cache=wheel_cache + ) + + # yield an editable requirement + elif opts.editables: + isolated = options.isolated_mode if options else False + yield install_req_from_editable( + opts.editables[0], comes_from=line_comes_from, + use_pep517=use_pep517, + constraint=constraint, isolated=isolated, wheel_cache=wheel_cache + ) + + # parse a nested requirements file + elif opts.requirements or opts.constraints: + if opts.requirements: + req_path = opts.requirements[0] + nested_constraint = False + else: + req_path = opts.constraints[0] + nested_constraint = True + # original file is over http + if SCHEME_RE.search(filename): + # do a url join so relative paths work + req_path = urllib_parse.urljoin(filename, req_path) + # original file and nested file are paths + elif not SCHEME_RE.search(req_path): + # do a join so relative paths work + req_path = os.path.join(os.path.dirname(filename), req_path) + # TODO: Why not use `comes_from='-r {} (line {})'` here as well? + parsed_reqs = parse_requirements( + req_path, finder, comes_from, options, session, + constraint=nested_constraint, wheel_cache=wheel_cache + ) + for req in parsed_reqs: + yield req + + # percolate hash-checking option upward + elif opts.require_hashes: + options.require_hashes = opts.require_hashes + + # set finder options + elif finder: + if opts.index_url: + finder.index_urls = [opts.index_url] + if opts.no_index is True: + finder.index_urls = [] + if opts.extra_index_urls: + finder.index_urls.extend(opts.extra_index_urls) + if opts.find_links: + # FIXME: it would be nice to keep track of the source + # of the find_links: support a find-links local path + # relative to a requirements file. + value = opts.find_links[0] + req_dir = os.path.dirname(os.path.abspath(filename)) + relative_to_reqs_file = os.path.join(req_dir, value) + if os.path.exists(relative_to_reqs_file): + value = relative_to_reqs_file + finder.find_links.append(value) + if opts.pre: + finder.allow_all_prereleases = True + if opts.trusted_hosts: + finder.secure_origins.extend( + ("*", host, "*") for host in opts.trusted_hosts) + + +def break_args_options(line): + # type: (Text) -> Tuple[str, Text] + """Break up the line into an args and options string. We only want to shlex + (and then optparse) the options, not the args. args can contain markers + which are corrupted by shlex. + """ + tokens = line.split(' ') + args = [] + options = tokens[:] + for token in tokens: + if token.startswith('-') or token.startswith('--'): + break + else: + args.append(token) + options.pop(0) + return ' '.join(args), ' '.join(options) # type: ignore + + +def build_parser(line): + # type: (Text) -> optparse.OptionParser + """ + Return a parser for parsing requirement lines + """ + parser = optparse.OptionParser(add_help_option=False) + + option_factories = SUPPORTED_OPTIONS + SUPPORTED_OPTIONS_REQ + for option_factory in option_factories: + option = option_factory() + parser.add_option(option) + + # By default optparse sys.exits on parsing errors. We want to wrap + # that in our own exception. + def parser_exit(self, msg): + # add offending line + msg = 'Invalid requirement: %s\n%s' % (line, msg) + raise RequirementsFileParseError(msg) + # NOTE: mypy disallows assigning to a method + # https://github.com/python/mypy/issues/2427 + parser.exit = parser_exit # type: ignore + + return parser + + +def join_lines(lines_enum): + # type: (ReqFileLines) -> ReqFileLines + """Joins a line ending in '\' with the previous line (except when following + comments). The joined line takes on the index of the first line. + """ + primary_line_number = None + new_line = [] # type: List[Text] + for line_number, line in lines_enum: + if not line.endswith('\\') or COMMENT_RE.match(line): + if COMMENT_RE.match(line): + # this ensures comments are always matched later + line = ' ' + line + if new_line: + new_line.append(line) + yield primary_line_number, ''.join(new_line) + new_line = [] + else: + yield line_number, line + else: + if not new_line: + primary_line_number = line_number + new_line.append(line.strip('\\')) + + # last line contains \ + if new_line: + yield primary_line_number, ''.join(new_line) + + # TODO: handle space after '\'. + + +def ignore_comments(lines_enum): + # type: (ReqFileLines) -> ReqFileLines + """ + Strips comments and filter empty lines. + """ + for line_number, line in lines_enum: + line = COMMENT_RE.sub('', line) + line = line.strip() + if line: + yield line_number, line + + +def skip_regex(lines_enum, options): + # type: (ReqFileLines, Optional[optparse.Values]) -> ReqFileLines + """ + Skip lines that match '--skip-requirements-regex' pattern + + Note: the regex pattern is only built once + """ + skip_regex = options.skip_requirements_regex if options else None + if skip_regex: + pattern = re.compile(skip_regex) + lines_enum = filterfalse(lambda e: pattern.search(e[1]), lines_enum) + return lines_enum + + +def expand_env_variables(lines_enum): + # type: (ReqFileLines) -> ReqFileLines + """Replace all environment variables that can be retrieved via `os.getenv`. + + The only allowed format for environment variables defined in the + requirement file is `${MY_VARIABLE_1}` to ensure two things: + + 1. Strings that contain a `$` aren't accidentally (partially) expanded. + 2. Ensure consistency across platforms for requirement files. + + These points are the result of a discusssion on the `github pull + request #3514 `_. + + Valid characters in variable names follow the `POSIX standard + `_ and are limited + to uppercase letter, digits and the `_` (underscore). + """ + for line_number, line in lines_enum: + for env_var, var_name in ENV_VAR_RE.findall(line): + value = os.getenv(var_name) + if not value: + continue + + line = line.replace(env_var, value) + + yield line_number, line diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/req/req_file.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/req/req_file.pyc new file mode 100644 index 0000000..fc454d9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/req/req_file.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/req/req_install.py b/project/venv/lib/python2.7/site-packages/pip/_internal/req/req_install.py new file mode 100644 index 0000000..a4834b0 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/req/req_install.py @@ -0,0 +1,1021 @@ +from __future__ import absolute_import + +import logging +import os +import shutil +import sys +import sysconfig +import zipfile +from distutils.util import change_root + +from pip._vendor import pkg_resources, six +from pip._vendor.packaging.requirements import Requirement +from pip._vendor.packaging.utils import canonicalize_name +from pip._vendor.packaging.version import Version +from pip._vendor.packaging.version import parse as parse_version +from pip._vendor.pep517.wrappers import Pep517HookCaller + +from pip._internal import wheel +from pip._internal.build_env import NoOpBuildEnvironment +from pip._internal.exceptions import InstallationError +from pip._internal.locations import ( + PIP_DELETE_MARKER_FILENAME, running_under_virtualenv, +) +from pip._internal.models.link import Link +from pip._internal.pyproject import load_pyproject_toml, make_pyproject_path +from pip._internal.req.req_uninstall import UninstallPathSet +from pip._internal.utils.compat import native_str +from pip._internal.utils.hashes import Hashes +from pip._internal.utils.logging import indent_log +from pip._internal.utils.misc import ( + _make_build_dir, ask_path_exists, backup_dir, call_subprocess, + display_path, dist_in_site_packages, dist_in_usersite, ensure_dir, + get_installed_version, redact_password_from_url, rmtree, +) +from pip._internal.utils.packaging import get_metadata +from pip._internal.utils.setuptools_build import SETUPTOOLS_SHIM +from pip._internal.utils.temp_dir import TempDirectory +from pip._internal.utils.typing import MYPY_CHECK_RUNNING +from pip._internal.utils.ui import open_spinner +from pip._internal.vcs import vcs +from pip._internal.wheel import move_wheel_files + +if MYPY_CHECK_RUNNING: + from typing import ( # noqa: F401 + Optional, Iterable, List, Union, Any, Text, Sequence, Dict + ) + from pip._internal.build_env import BuildEnvironment # noqa: F401 + from pip._internal.cache import WheelCache # noqa: F401 + from pip._internal.index import PackageFinder # noqa: F401 + from pip._vendor.pkg_resources import Distribution # noqa: F401 + from pip._vendor.packaging.specifiers import SpecifierSet # noqa: F401 + from pip._vendor.packaging.markers import Marker # noqa: F401 + + +logger = logging.getLogger(__name__) + + +class InstallRequirement(object): + """ + Represents something that may be installed later on, may have information + about where to fetch the relavant requirement and also contains logic for + installing the said requirement. + """ + + def __init__( + self, + req, # type: Optional[Requirement] + comes_from, # type: Optional[Union[str, InstallRequirement]] + source_dir=None, # type: Optional[str] + editable=False, # type: bool + link=None, # type: Optional[Link] + update=True, # type: bool + markers=None, # type: Optional[Marker] + use_pep517=None, # type: Optional[bool] + isolated=False, # type: bool + options=None, # type: Optional[Dict[str, Any]] + wheel_cache=None, # type: Optional[WheelCache] + constraint=False, # type: bool + extras=() # type: Iterable[str] + ): + # type: (...) -> None + assert req is None or isinstance(req, Requirement), req + self.req = req + self.comes_from = comes_from + self.constraint = constraint + if source_dir is not None: + self.source_dir = os.path.normpath(os.path.abspath(source_dir)) + else: + self.source_dir = None + self.editable = editable + + self._wheel_cache = wheel_cache + if link is None and req and req.url: + # PEP 508 URL requirement + link = Link(req.url) + self.link = self.original_link = link + + if extras: + self.extras = extras + elif req: + self.extras = { + pkg_resources.safe_extra(extra) for extra in req.extras + } + else: + self.extras = set() + if markers is None and req: + markers = req.marker + self.markers = markers + + self._egg_info_path = None # type: Optional[str] + # This holds the pkg_resources.Distribution object if this requirement + # is already available: + self.satisfied_by = None + # This hold the pkg_resources.Distribution object if this requirement + # conflicts with another installed distribution: + self.conflicts_with = None + # Temporary build location + self._temp_build_dir = TempDirectory(kind="req-build") + # Used to store the global directory where the _temp_build_dir should + # have been created. Cf _correct_build_location method. + self._ideal_build_dir = None # type: Optional[str] + # True if the editable should be updated: + self.update = update + # Set to True after successful installation + self.install_succeeded = None # type: Optional[bool] + # UninstallPathSet of uninstalled distribution (for possible rollback) + self.uninstalled_pathset = None + self.options = options if options else {} + # Set to True after successful preparation of this requirement + self.prepared = False + self.is_direct = False + + self.isolated = isolated + self.build_env = NoOpBuildEnvironment() # type: BuildEnvironment + + # For PEP 517, the directory where we request the project metadata + # gets stored. We need this to pass to build_wheel, so the backend + # can ensure that the wheel matches the metadata (see the PEP for + # details). + self.metadata_directory = None # type: Optional[str] + + # The static build requirements (from pyproject.toml) + self.pyproject_requires = None # type: Optional[List[str]] + + # Build requirements that we will check are available + self.requirements_to_check = [] # type: List[str] + + # The PEP 517 backend we should use to build the project + self.pep517_backend = None # type: Optional[Pep517HookCaller] + + # Are we using PEP 517 for this requirement? + # After pyproject.toml has been loaded, the only valid values are True + # and False. Before loading, None is valid (meaning "use the default"). + # Setting an explicit value before loading pyproject.toml is supported, + # but after loading this flag should be treated as read only. + self.use_pep517 = use_pep517 + + def __str__(self): + if self.req: + s = str(self.req) + if self.link: + s += ' from %s' % redact_password_from_url(self.link.url) + elif self.link: + s = redact_password_from_url(self.link.url) + else: + s = '' + if self.satisfied_by is not None: + s += ' in %s' % display_path(self.satisfied_by.location) + if self.comes_from: + if isinstance(self.comes_from, six.string_types): + comes_from = self.comes_from + else: + comes_from = self.comes_from.from_path() + if comes_from: + s += ' (from %s)' % comes_from + return s + + def __repr__(self): + return '<%s object: %s editable=%r>' % ( + self.__class__.__name__, str(self), self.editable) + + def populate_link(self, finder, upgrade, require_hashes): + # type: (PackageFinder, bool, bool) -> None + """Ensure that if a link can be found for this, that it is found. + + Note that self.link may still be None - if Upgrade is False and the + requirement is already installed. + + If require_hashes is True, don't use the wheel cache, because cached + wheels, always built locally, have different hashes than the files + downloaded from the index server and thus throw false hash mismatches. + Furthermore, cached wheels at present have undeterministic contents due + to file modification times. + """ + if self.link is None: + self.link = finder.find_requirement(self, upgrade) + if self._wheel_cache is not None and not require_hashes: + old_link = self.link + self.link = self._wheel_cache.get(self.link, self.name) + if old_link != self.link: + logger.debug('Using cached wheel link: %s', self.link) + + # Things that are valid for all kinds of requirements? + @property + def name(self): + # type: () -> Optional[str] + if self.req is None: + return None + return native_str(pkg_resources.safe_name(self.req.name)) + + @property + def specifier(self): + # type: () -> SpecifierSet + return self.req.specifier + + @property + def is_pinned(self): + # type: () -> bool + """Return whether I am pinned to an exact version. + + For example, some-package==1.2 is pinned; some-package>1.2 is not. + """ + specifiers = self.specifier + return (len(specifiers) == 1 and + next(iter(specifiers)).operator in {'==', '==='}) + + @property + def installed_version(self): + return get_installed_version(self.name) + + def match_markers(self, extras_requested=None): + # type: (Optional[Iterable[str]]) -> bool + if not extras_requested: + # Provide an extra to safely evaluate the markers + # without matching any extra + extras_requested = ('',) + if self.markers is not None: + return any( + self.markers.evaluate({'extra': extra}) + for extra in extras_requested) + else: + return True + + @property + def has_hash_options(self): + # type: () -> bool + """Return whether any known-good hashes are specified as options. + + These activate --require-hashes mode; hashes specified as part of a + URL do not. + + """ + return bool(self.options.get('hashes', {})) + + def hashes(self, trust_internet=True): + # type: (bool) -> Hashes + """Return a hash-comparer that considers my option- and URL-based + hashes to be known-good. + + Hashes in URLs--ones embedded in the requirements file, not ones + downloaded from an index server--are almost peers with ones from + flags. They satisfy --require-hashes (whether it was implicitly or + explicitly activated) but do not activate it. md5 and sha224 are not + allowed in flags, which should nudge people toward good algos. We + always OR all hashes together, even ones from URLs. + + :param trust_internet: Whether to trust URL-based (#md5=...) hashes + downloaded from the internet, as by populate_link() + + """ + good_hashes = self.options.get('hashes', {}).copy() + link = self.link if trust_internet else self.original_link + if link and link.hash: + good_hashes.setdefault(link.hash_name, []).append(link.hash) + return Hashes(good_hashes) + + def from_path(self): + # type: () -> Optional[str] + """Format a nice indicator to show where this "comes from" + """ + if self.req is None: + return None + s = str(self.req) + if self.comes_from: + if isinstance(self.comes_from, six.string_types): + comes_from = self.comes_from + else: + comes_from = self.comes_from.from_path() + if comes_from: + s += '->' + comes_from + return s + + def build_location(self, build_dir): + # type: (str) -> Optional[str] + assert build_dir is not None + if self._temp_build_dir.path is not None: + return self._temp_build_dir.path + if self.req is None: + # for requirement via a path to a directory: the name of the + # package is not available yet so we create a temp directory + # Once run_egg_info will have run, we'll be able + # to fix it via _correct_build_location + # Some systems have /tmp as a symlink which confuses custom + # builds (such as numpy). Thus, we ensure that the real path + # is returned. + self._temp_build_dir.create() + self._ideal_build_dir = build_dir + + return self._temp_build_dir.path + if self.editable: + name = self.name.lower() + else: + name = self.name + # FIXME: Is there a better place to create the build_dir? (hg and bzr + # need this) + if not os.path.exists(build_dir): + logger.debug('Creating directory %s', build_dir) + _make_build_dir(build_dir) + return os.path.join(build_dir, name) + + def _correct_build_location(self): + # type: () -> None + """Move self._temp_build_dir to self._ideal_build_dir/self.req.name + + For some requirements (e.g. a path to a directory), the name of the + package is not available until we run egg_info, so the build_location + will return a temporary directory and store the _ideal_build_dir. + + This is only called by self.run_egg_info to fix the temporary build + directory. + """ + if self.source_dir is not None: + return + assert self.req is not None + assert self._temp_build_dir.path + assert (self._ideal_build_dir is not None and + self._ideal_build_dir.path) # type: ignore + old_location = self._temp_build_dir.path + self._temp_build_dir.path = None + + new_location = self.build_location(self._ideal_build_dir) + if os.path.exists(new_location): + raise InstallationError( + 'A package already exists in %s; please remove it to continue' + % display_path(new_location)) + logger.debug( + 'Moving package %s from %s to new location %s', + self, display_path(old_location), display_path(new_location), + ) + shutil.move(old_location, new_location) + self._temp_build_dir.path = new_location + self._ideal_build_dir = None + self.source_dir = os.path.normpath(os.path.abspath(new_location)) + self._egg_info_path = None + + # Correct the metadata directory, if it exists + if self.metadata_directory: + old_meta = self.metadata_directory + rel = os.path.relpath(old_meta, start=old_location) + new_meta = os.path.join(new_location, rel) + new_meta = os.path.normpath(os.path.abspath(new_meta)) + self.metadata_directory = new_meta + + def remove_temporary_source(self): + # type: () -> None + """Remove the source files from this requirement, if they are marked + for deletion""" + if self.source_dir and os.path.exists( + os.path.join(self.source_dir, PIP_DELETE_MARKER_FILENAME)): + logger.debug('Removing source in %s', self.source_dir) + rmtree(self.source_dir) + self.source_dir = None + self._temp_build_dir.cleanup() + self.build_env.cleanup() + + def check_if_exists(self, use_user_site): + # type: (bool) -> bool + """Find an installed distribution that satisfies or conflicts + with this requirement, and set self.satisfied_by or + self.conflicts_with appropriately. + """ + if self.req is None: + return False + try: + # get_distribution() will resolve the entire list of requirements + # anyway, and we've already determined that we need the requirement + # in question, so strip the marker so that we don't try to + # evaluate it. + no_marker = Requirement(str(self.req)) + no_marker.marker = None + self.satisfied_by = pkg_resources.get_distribution(str(no_marker)) + if self.editable and self.satisfied_by: + self.conflicts_with = self.satisfied_by + # when installing editables, nothing pre-existing should ever + # satisfy + self.satisfied_by = None + return True + except pkg_resources.DistributionNotFound: + return False + except pkg_resources.VersionConflict: + existing_dist = pkg_resources.get_distribution( + self.req.name + ) + if use_user_site: + if dist_in_usersite(existing_dist): + self.conflicts_with = existing_dist + elif (running_under_virtualenv() and + dist_in_site_packages(existing_dist)): + raise InstallationError( + "Will not install to the user site because it will " + "lack sys.path precedence to %s in %s" % + (existing_dist.project_name, existing_dist.location) + ) + else: + self.conflicts_with = existing_dist + return True + + # Things valid for wheels + @property + def is_wheel(self): + # type: () -> bool + if not self.link: + return False + return self.link.is_wheel + + def move_wheel_files( + self, + wheeldir, # type: str + root=None, # type: Optional[str] + home=None, # type: Optional[str] + prefix=None, # type: Optional[str] + warn_script_location=True, # type: bool + use_user_site=False, # type: bool + pycompile=True # type: bool + ): + # type: (...) -> None + move_wheel_files( + self.name, self.req, wheeldir, + user=use_user_site, + home=home, + root=root, + prefix=prefix, + pycompile=pycompile, + isolated=self.isolated, + warn_script_location=warn_script_location, + ) + + # Things valid for sdists + @property + def setup_py_dir(self): + # type: () -> str + return os.path.join( + self.source_dir, + self.link and self.link.subdirectory_fragment or '') + + @property + def setup_py(self): + # type: () -> str + assert self.source_dir, "No source dir for %s" % self + + setup_py = os.path.join(self.setup_py_dir, 'setup.py') + + # Python2 __file__ should not be unicode + if six.PY2 and isinstance(setup_py, six.text_type): + setup_py = setup_py.encode(sys.getfilesystemencoding()) + + return setup_py + + @property + def pyproject_toml(self): + # type: () -> str + assert self.source_dir, "No source dir for %s" % self + + return make_pyproject_path(self.setup_py_dir) + + def load_pyproject_toml(self): + # type: () -> None + """Load the pyproject.toml file. + + After calling this routine, all of the attributes related to PEP 517 + processing for this requirement have been set. In particular, the + use_pep517 attribute can be used to determine whether we should + follow the PEP 517 or legacy (setup.py) code path. + """ + pep517_data = load_pyproject_toml( + self.use_pep517, + self.pyproject_toml, + self.setup_py, + str(self) + ) + + if pep517_data is None: + self.use_pep517 = False + else: + self.use_pep517 = True + requires, backend, check = pep517_data + self.requirements_to_check = check + self.pyproject_requires = requires + self.pep517_backend = Pep517HookCaller(self.setup_py_dir, backend) + + # Use a custom function to call subprocesses + self.spin_message = "" + + def runner(cmd, cwd=None, extra_environ=None): + with open_spinner(self.spin_message) as spinner: + call_subprocess( + cmd, + cwd=cwd, + extra_environ=extra_environ, + show_stdout=False, + spinner=spinner + ) + self.spin_message = "" + + self.pep517_backend._subprocess_runner = runner + + def prepare_metadata(self): + # type: () -> None + """Ensure that project metadata is available. + + Under PEP 517, call the backend hook to prepare the metadata. + Under legacy processing, call setup.py egg-info. + """ + assert self.source_dir + + with indent_log(): + if self.use_pep517: + self.prepare_pep517_metadata() + else: + self.run_egg_info() + + if not self.req: + if isinstance(parse_version(self.metadata["Version"]), Version): + op = "==" + else: + op = "===" + self.req = Requirement( + "".join([ + self.metadata["Name"], + op, + self.metadata["Version"], + ]) + ) + self._correct_build_location() + else: + metadata_name = canonicalize_name(self.metadata["Name"]) + if canonicalize_name(self.req.name) != metadata_name: + logger.warning( + 'Generating metadata for package %s ' + 'produced metadata for project name %s. Fix your ' + '#egg=%s fragments.', + self.name, metadata_name, self.name + ) + self.req = Requirement(metadata_name) + + def prepare_pep517_metadata(self): + # type: () -> None + assert self.pep517_backend is not None + + metadata_dir = os.path.join( + self.setup_py_dir, + 'pip-wheel-metadata' + ) + ensure_dir(metadata_dir) + + with self.build_env: + # Note that Pep517HookCaller implements a fallback for + # prepare_metadata_for_build_wheel, so we don't have to + # consider the possibility that this hook doesn't exist. + backend = self.pep517_backend + self.spin_message = "Preparing wheel metadata" + distinfo_dir = backend.prepare_metadata_for_build_wheel( + metadata_dir + ) + + self.metadata_directory = os.path.join(metadata_dir, distinfo_dir) + + def run_egg_info(self): + # type: () -> None + if self.name: + logger.debug( + 'Running setup.py (path:%s) egg_info for package %s', + self.setup_py, self.name, + ) + else: + logger.debug( + 'Running setup.py (path:%s) egg_info for package from %s', + self.setup_py, self.link, + ) + script = SETUPTOOLS_SHIM % self.setup_py + base_cmd = [sys.executable, '-c', script] + if self.isolated: + base_cmd += ["--no-user-cfg"] + egg_info_cmd = base_cmd + ['egg_info'] + # We can't put the .egg-info files at the root, because then the + # source code will be mistaken for an installed egg, causing + # problems + if self.editable: + egg_base_option = [] # type: List[str] + else: + egg_info_dir = os.path.join(self.setup_py_dir, 'pip-egg-info') + ensure_dir(egg_info_dir) + egg_base_option = ['--egg-base', 'pip-egg-info'] + with self.build_env: + call_subprocess( + egg_info_cmd + egg_base_option, + cwd=self.setup_py_dir, + show_stdout=False, + command_desc='python setup.py egg_info') + + @property + def egg_info_path(self): + # type: () -> str + if self._egg_info_path is None: + if self.editable: + base = self.source_dir + else: + base = os.path.join(self.setup_py_dir, 'pip-egg-info') + filenames = os.listdir(base) + if self.editable: + filenames = [] + for root, dirs, files in os.walk(base): + for dir in vcs.dirnames: + if dir in dirs: + dirs.remove(dir) + # Iterate over a copy of ``dirs``, since mutating + # a list while iterating over it can cause trouble. + # (See https://github.com/pypa/pip/pull/462.) + for dir in list(dirs): + # Don't search in anything that looks like a virtualenv + # environment + if ( + os.path.lexists( + os.path.join(root, dir, 'bin', 'python') + ) or + os.path.exists( + os.path.join( + root, dir, 'Scripts', 'Python.exe' + ) + )): + dirs.remove(dir) + # Also don't search through tests + elif dir == 'test' or dir == 'tests': + dirs.remove(dir) + filenames.extend([os.path.join(root, dir) + for dir in dirs]) + filenames = [f for f in filenames if f.endswith('.egg-info')] + + if not filenames: + raise InstallationError( + "Files/directories not found in %s" % base + ) + # if we have more than one match, we pick the toplevel one. This + # can easily be the case if there is a dist folder which contains + # an extracted tarball for testing purposes. + if len(filenames) > 1: + filenames.sort( + key=lambda x: x.count(os.path.sep) + + (os.path.altsep and x.count(os.path.altsep) or 0) + ) + self._egg_info_path = os.path.join(base, filenames[0]) + return self._egg_info_path + + @property + def metadata(self): + if not hasattr(self, '_metadata'): + self._metadata = get_metadata(self.get_dist()) + + return self._metadata + + def get_dist(self): + # type: () -> Distribution + """Return a pkg_resources.Distribution for this requirement""" + if self.metadata_directory: + base_dir, distinfo = os.path.split(self.metadata_directory) + metadata = pkg_resources.PathMetadata( + base_dir, self.metadata_directory + ) + dist_name = os.path.splitext(distinfo)[0] + typ = pkg_resources.DistInfoDistribution + else: + egg_info = self.egg_info_path.rstrip(os.path.sep) + base_dir = os.path.dirname(egg_info) + metadata = pkg_resources.PathMetadata(base_dir, egg_info) + dist_name = os.path.splitext(os.path.basename(egg_info))[0] + # https://github.com/python/mypy/issues/1174 + typ = pkg_resources.Distribution # type: ignore + + return typ( + base_dir, + project_name=dist_name, + metadata=metadata, + ) + + def assert_source_matches_version(self): + # type: () -> None + assert self.source_dir + version = self.metadata['version'] + if self.req.specifier and version not in self.req.specifier: + logger.warning( + 'Requested %s, but installing version %s', + self, + version, + ) + else: + logger.debug( + 'Source in %s has version %s, which satisfies requirement %s', + display_path(self.source_dir), + version, + self, + ) + + # For both source distributions and editables + def ensure_has_source_dir(self, parent_dir): + # type: (str) -> str + """Ensure that a source_dir is set. + + This will create a temporary build dir if the name of the requirement + isn't known yet. + + :param parent_dir: The ideal pip parent_dir for the source_dir. + Generally src_dir for editables and build_dir for sdists. + :return: self.source_dir + """ + if self.source_dir is None: + self.source_dir = self.build_location(parent_dir) + return self.source_dir + + # For editable installations + def install_editable( + self, + install_options, # type: List[str] + global_options=(), # type: Sequence[str] + prefix=None # type: Optional[str] + ): + # type: (...) -> None + logger.info('Running setup.py develop for %s', self.name) + + if self.isolated: + global_options = list(global_options) + ["--no-user-cfg"] + + if prefix: + prefix_param = ['--prefix={}'.format(prefix)] + install_options = list(install_options) + prefix_param + + with indent_log(): + # FIXME: should we do --install-headers here too? + with self.build_env: + call_subprocess( + [ + sys.executable, + '-c', + SETUPTOOLS_SHIM % self.setup_py + ] + + list(global_options) + + ['develop', '--no-deps'] + + list(install_options), + + cwd=self.setup_py_dir, + show_stdout=False, + ) + + self.install_succeeded = True + + def update_editable(self, obtain=True): + # type: (bool) -> None + if not self.link: + logger.debug( + "Cannot update repository at %s; repository location is " + "unknown", + self.source_dir, + ) + return + assert self.editable + assert self.source_dir + if self.link.scheme == 'file': + # Static paths don't get updated + return + assert '+' in self.link.url, "bad url: %r" % self.link.url + if not self.update: + return + vc_type, url = self.link.url.split('+', 1) + backend = vcs.get_backend(vc_type) + if backend: + vcs_backend = backend(self.link.url) + if obtain: + vcs_backend.obtain(self.source_dir) + else: + vcs_backend.export(self.source_dir) + else: + assert 0, ( + 'Unexpected version control type (in %s): %s' + % (self.link, vc_type)) + + # Top-level Actions + def uninstall(self, auto_confirm=False, verbose=False, + use_user_site=False): + # type: (bool, bool, bool) -> Optional[UninstallPathSet] + """ + Uninstall the distribution currently satisfying this requirement. + + Prompts before removing or modifying files unless + ``auto_confirm`` is True. + + Refuses to delete or modify files outside of ``sys.prefix`` - + thus uninstallation within a virtual environment can only + modify that virtual environment, even if the virtualenv is + linked to global site-packages. + + """ + if not self.check_if_exists(use_user_site): + logger.warning("Skipping %s as it is not installed.", self.name) + return None + dist = self.satisfied_by or self.conflicts_with + + uninstalled_pathset = UninstallPathSet.from_dist(dist) + uninstalled_pathset.remove(auto_confirm, verbose) + return uninstalled_pathset + + def _clean_zip_name(self, name, prefix): # only used by archive. + assert name.startswith(prefix + os.path.sep), ( + "name %r doesn't start with prefix %r" % (name, prefix) + ) + name = name[len(prefix) + 1:] + name = name.replace(os.path.sep, '/') + return name + + def _get_archive_name(self, path, parentdir, rootdir): + # type: (str, str, str) -> str + path = os.path.join(parentdir, path) + name = self._clean_zip_name(path, rootdir) + return self.name + '/' + name + + # TODO: Investigate if this should be kept in InstallRequirement + # Seems to be used only when VCS + downloads + def archive(self, build_dir): + # type: (str) -> None + assert self.source_dir + create_archive = True + archive_name = '%s-%s.zip' % (self.name, self.metadata["version"]) + archive_path = os.path.join(build_dir, archive_name) + if os.path.exists(archive_path): + response = ask_path_exists( + 'The file %s exists. (i)gnore, (w)ipe, (b)ackup, (a)bort ' % + display_path(archive_path), ('i', 'w', 'b', 'a')) + if response == 'i': + create_archive = False + elif response == 'w': + logger.warning('Deleting %s', display_path(archive_path)) + os.remove(archive_path) + elif response == 'b': + dest_file = backup_dir(archive_path) + logger.warning( + 'Backing up %s to %s', + display_path(archive_path), + display_path(dest_file), + ) + shutil.move(archive_path, dest_file) + elif response == 'a': + sys.exit(-1) + if create_archive: + zip = zipfile.ZipFile( + archive_path, 'w', zipfile.ZIP_DEFLATED, + allowZip64=True + ) + dir = os.path.normcase(os.path.abspath(self.setup_py_dir)) + for dirpath, dirnames, filenames in os.walk(dir): + if 'pip-egg-info' in dirnames: + dirnames.remove('pip-egg-info') + for dirname in dirnames: + dir_arcname = self._get_archive_name(dirname, + parentdir=dirpath, + rootdir=dir) + zipdir = zipfile.ZipInfo(dir_arcname + '/') + zipdir.external_attr = 0x1ED << 16 # 0o755 + zip.writestr(zipdir, '') + for filename in filenames: + if filename == PIP_DELETE_MARKER_FILENAME: + continue + file_arcname = self._get_archive_name(filename, + parentdir=dirpath, + rootdir=dir) + filename = os.path.join(dirpath, filename) + zip.write(filename, file_arcname) + zip.close() + logger.info('Saved %s', display_path(archive_path)) + + def install( + self, + install_options, # type: List[str] + global_options=None, # type: Optional[Sequence[str]] + root=None, # type: Optional[str] + home=None, # type: Optional[str] + prefix=None, # type: Optional[str] + warn_script_location=True, # type: bool + use_user_site=False, # type: bool + pycompile=True # type: bool + ): + # type: (...) -> None + global_options = global_options if global_options is not None else [] + if self.editable: + self.install_editable( + install_options, global_options, prefix=prefix, + ) + return + if self.is_wheel: + version = wheel.wheel_version(self.source_dir) + wheel.check_compatibility(version, self.name) + + self.move_wheel_files( + self.source_dir, root=root, prefix=prefix, home=home, + warn_script_location=warn_script_location, + use_user_site=use_user_site, pycompile=pycompile, + ) + self.install_succeeded = True + return + + # Extend the list of global and install options passed on to + # the setup.py call with the ones from the requirements file. + # Options specified in requirements file override those + # specified on the command line, since the last option given + # to setup.py is the one that is used. + global_options = list(global_options) + \ + self.options.get('global_options', []) + install_options = list(install_options) + \ + self.options.get('install_options', []) + + if self.isolated: + # https://github.com/python/mypy/issues/1174 + global_options = global_options + ["--no-user-cfg"] # type: ignore + + with TempDirectory(kind="record") as temp_dir: + record_filename = os.path.join(temp_dir.path, 'install-record.txt') + install_args = self.get_install_args( + global_options, record_filename, root, prefix, pycompile, + ) + msg = 'Running setup.py install for %s' % (self.name,) + with open_spinner(msg) as spinner: + with indent_log(): + with self.build_env: + call_subprocess( + install_args + install_options, + cwd=self.setup_py_dir, + show_stdout=False, + spinner=spinner, + ) + + if not os.path.exists(record_filename): + logger.debug('Record file %s not found', record_filename) + return + self.install_succeeded = True + + def prepend_root(path): + if root is None or not os.path.isabs(path): + return path + else: + return change_root(root, path) + + with open(record_filename) as f: + for line in f: + directory = os.path.dirname(line) + if directory.endswith('.egg-info'): + egg_info_dir = prepend_root(directory) + break + else: + logger.warning( + 'Could not find .egg-info directory in install record' + ' for %s', + self, + ) + # FIXME: put the record somewhere + # FIXME: should this be an error? + return + new_lines = [] + with open(record_filename) as f: + for line in f: + filename = line.strip() + if os.path.isdir(filename): + filename += os.path.sep + new_lines.append( + os.path.relpath(prepend_root(filename), egg_info_dir) + ) + new_lines.sort() + ensure_dir(egg_info_dir) + inst_files_path = os.path.join(egg_info_dir, 'installed-files.txt') + with open(inst_files_path, 'w') as f: + f.write('\n'.join(new_lines) + '\n') + + def get_install_args( + self, + global_options, # type: Sequence[str] + record_filename, # type: str + root, # type: Optional[str] + prefix, # type: Optional[str] + pycompile # type: bool + ): + # type: (...) -> List[str] + install_args = [sys.executable, "-u"] + install_args.append('-c') + install_args.append(SETUPTOOLS_SHIM % self.setup_py) + install_args += list(global_options) + \ + ['install', '--record', record_filename] + install_args += ['--single-version-externally-managed'] + + if root is not None: + install_args += ['--root', root] + if prefix is not None: + install_args += ['--prefix', prefix] + + if pycompile: + install_args += ["--compile"] + else: + install_args += ["--no-compile"] + + if running_under_virtualenv(): + py_ver_str = 'python' + sysconfig.get_python_version() + install_args += ['--install-headers', + os.path.join(sys.prefix, 'include', 'site', + py_ver_str, self.name)] + + return install_args diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/req/req_install.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/req/req_install.pyc new file mode 100644 index 0000000..2a3db4e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/req/req_install.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/req/req_set.py b/project/venv/lib/python2.7/site-packages/pip/_internal/req/req_set.py new file mode 100644 index 0000000..d1410e9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/req/req_set.py @@ -0,0 +1,197 @@ +from __future__ import absolute_import + +import logging +from collections import OrderedDict + +from pip._internal.exceptions import InstallationError +from pip._internal.utils.logging import indent_log +from pip._internal.utils.typing import MYPY_CHECK_RUNNING +from pip._internal.wheel import Wheel + +if MYPY_CHECK_RUNNING: + from typing import Optional, List, Tuple, Dict, Iterable # noqa: F401 + from pip._internal.req.req_install import InstallRequirement # noqa: F401 + + +logger = logging.getLogger(__name__) + + +class RequirementSet(object): + + def __init__(self, require_hashes=False, check_supported_wheels=True): + # type: (bool, bool) -> None + """Create a RequirementSet. + """ + + self.requirements = OrderedDict() # type: Dict[str, InstallRequirement] # noqa: E501 + self.require_hashes = require_hashes + self.check_supported_wheels = check_supported_wheels + + # Mapping of alias: real_name + self.requirement_aliases = {} # type: Dict[str, str] + self.unnamed_requirements = [] # type: List[InstallRequirement] + self.successfully_downloaded = [] # type: List[InstallRequirement] + self.reqs_to_cleanup = [] # type: List[InstallRequirement] + + def __str__(self): + reqs = [req for req in self.requirements.values() + if not req.comes_from] + reqs.sort(key=lambda req: req.name.lower()) + return ' '.join([str(req.req) for req in reqs]) + + def __repr__(self): + reqs = [req for req in self.requirements.values()] + reqs.sort(key=lambda req: req.name.lower()) + reqs_str = ', '.join([str(req.req) for req in reqs]) + return ('<%s object; %d requirement(s): %s>' + % (self.__class__.__name__, len(reqs), reqs_str)) + + def add_requirement( + self, + install_req, # type: InstallRequirement + parent_req_name=None, # type: Optional[str] + extras_requested=None # type: Optional[Iterable[str]] + ): + # type: (...) -> Tuple[List[InstallRequirement], Optional[InstallRequirement]] # noqa: E501 + """Add install_req as a requirement to install. + + :param parent_req_name: The name of the requirement that needed this + added. The name is used because when multiple unnamed requirements + resolve to the same name, we could otherwise end up with dependency + links that point outside the Requirements set. parent_req must + already be added. Note that None implies that this is a user + supplied requirement, vs an inferred one. + :param extras_requested: an iterable of extras used to evaluate the + environment markers. + :return: Additional requirements to scan. That is either [] if + the requirement is not applicable, or [install_req] if the + requirement is applicable and has just been added. + """ + name = install_req.name + + # If the markers do not match, ignore this requirement. + if not install_req.match_markers(extras_requested): + logger.info( + "Ignoring %s: markers '%s' don't match your environment", + name, install_req.markers, + ) + return [], None + + # If the wheel is not supported, raise an error. + # Should check this after filtering out based on environment markers to + # allow specifying different wheels based on the environment/OS, in a + # single requirements file. + if install_req.link and install_req.link.is_wheel: + wheel = Wheel(install_req.link.filename) + if self.check_supported_wheels and not wheel.supported(): + raise InstallationError( + "%s is not a supported wheel on this platform." % + wheel.filename + ) + + # This next bit is really a sanity check. + assert install_req.is_direct == (parent_req_name is None), ( + "a direct req shouldn't have a parent and also, " + "a non direct req should have a parent" + ) + + # Unnamed requirements are scanned again and the requirement won't be + # added as a dependency until after scanning. + if not name: + # url or path requirement w/o an egg fragment + self.unnamed_requirements.append(install_req) + return [install_req], None + + try: + existing_req = self.get_requirement(name) + except KeyError: + existing_req = None + + has_conflicting_requirement = ( + parent_req_name is None and + existing_req and + not existing_req.constraint and + existing_req.extras == install_req.extras and + existing_req.req.specifier != install_req.req.specifier + ) + if has_conflicting_requirement: + raise InstallationError( + "Double requirement given: %s (already in %s, name=%r)" + % (install_req, existing_req, name) + ) + + # When no existing requirement exists, add the requirement as a + # dependency and it will be scanned again after. + if not existing_req: + self.requirements[name] = install_req + # FIXME: what about other normalizations? E.g., _ vs. -? + if name.lower() != name: + self.requirement_aliases[name.lower()] = name + # We'd want to rescan this requirements later + return [install_req], install_req + + # Assume there's no need to scan, and that we've already + # encountered this for scanning. + if install_req.constraint or not existing_req.constraint: + return [], existing_req + + does_not_satisfy_constraint = ( + install_req.link and + not ( + existing_req.link and + install_req.link.path == existing_req.link.path + ) + ) + if does_not_satisfy_constraint: + self.reqs_to_cleanup.append(install_req) + raise InstallationError( + "Could not satisfy constraints for '%s': " + "installation from path or url cannot be " + "constrained to a version" % name, + ) + # If we're now installing a constraint, mark the existing + # object for real installation. + existing_req.constraint = False + existing_req.extras = tuple(sorted( + set(existing_req.extras) | set(install_req.extras) + )) + logger.debug( + "Setting %s extras to: %s", + existing_req, existing_req.extras, + ) + # Return the existing requirement for addition to the parent and + # scanning again. + return [existing_req], existing_req + + def has_requirement(self, project_name): + # type: (str) -> bool + name = project_name.lower() + if (name in self.requirements and + not self.requirements[name].constraint or + name in self.requirement_aliases and + not self.requirements[self.requirement_aliases[name]].constraint): + return True + return False + + @property + def has_requirements(self): + # type: () -> List[InstallRequirement] + return list(req for req in self.requirements.values() if not + req.constraint) or self.unnamed_requirements + + def get_requirement(self, project_name): + # type: (str) -> InstallRequirement + for name in project_name, project_name.lower(): + if name in self.requirements: + return self.requirements[name] + if name in self.requirement_aliases: + return self.requirements[self.requirement_aliases[name]] + raise KeyError("No project with the name %r" % project_name) + + def cleanup_files(self): + # type: () -> None + """Clean up files, remove builds.""" + logger.debug('Cleaning up...') + with indent_log(): + for req in self.reqs_to_cleanup: + req.remove_temporary_source() diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/req/req_set.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/req/req_set.pyc new file mode 100644 index 0000000..580dc7a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/req/req_set.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/req/req_tracker.py b/project/venv/lib/python2.7/site-packages/pip/_internal/req/req_tracker.py new file mode 100644 index 0000000..82e084a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/req/req_tracker.py @@ -0,0 +1,88 @@ +from __future__ import absolute_import + +import contextlib +import errno +import hashlib +import logging +import os + +from pip._internal.utils.temp_dir import TempDirectory +from pip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + from typing import Set, Iterator # noqa: F401 + from pip._internal.req.req_install import InstallRequirement # noqa: F401 + from pip._internal.models.link import Link # noqa: F401 + +logger = logging.getLogger(__name__) + + +class RequirementTracker(object): + + def __init__(self): + # type: () -> None + self._root = os.environ.get('PIP_REQ_TRACKER') + if self._root is None: + self._temp_dir = TempDirectory(delete=False, kind='req-tracker') + self._temp_dir.create() + self._root = os.environ['PIP_REQ_TRACKER'] = self._temp_dir.path + logger.debug('Created requirements tracker %r', self._root) + else: + self._temp_dir = None + logger.debug('Re-using requirements tracker %r', self._root) + self._entries = set() # type: Set[InstallRequirement] + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.cleanup() + + def _entry_path(self, link): + # type: (Link) -> str + hashed = hashlib.sha224(link.url_without_fragment.encode()).hexdigest() + return os.path.join(self._root, hashed) + + def add(self, req): + # type: (InstallRequirement) -> None + link = req.link + info = str(req) + entry_path = self._entry_path(link) + try: + with open(entry_path) as fp: + # Error, these's already a build in progress. + raise LookupError('%s is already being built: %s' + % (link, fp.read())) + except IOError as e: + if e.errno != errno.ENOENT: + raise + assert req not in self._entries + with open(entry_path, 'w') as fp: + fp.write(info) + self._entries.add(req) + logger.debug('Added %s to build tracker %r', req, self._root) + + def remove(self, req): + # type: (InstallRequirement) -> None + link = req.link + self._entries.remove(req) + os.unlink(self._entry_path(link)) + logger.debug('Removed %s from build tracker %r', req, self._root) + + def cleanup(self): + # type: () -> None + for req in set(self._entries): + self.remove(req) + remove = self._temp_dir is not None + if remove: + self._temp_dir.cleanup() + logger.debug('%s build tracker %r', + 'Removed' if remove else 'Cleaned', + self._root) + + @contextlib.contextmanager + def track(self, req): + # type: (InstallRequirement) -> Iterator[None] + self.add(req) + yield + self.remove(req) diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/req/req_tracker.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/req/req_tracker.pyc new file mode 100644 index 0000000..41dde23 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/req/req_tracker.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/req/req_uninstall.py b/project/venv/lib/python2.7/site-packages/pip/_internal/req/req_uninstall.py new file mode 100644 index 0000000..c80959e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/req/req_uninstall.py @@ -0,0 +1,596 @@ +from __future__ import absolute_import + +import csv +import functools +import logging +import os +import sys +import sysconfig + +from pip._vendor import pkg_resources + +from pip._internal.exceptions import UninstallationError +from pip._internal.locations import bin_py, bin_user +from pip._internal.utils.compat import WINDOWS, cache_from_source, uses_pycache +from pip._internal.utils.logging import indent_log +from pip._internal.utils.misc import ( + FakeFile, ask, dist_in_usersite, dist_is_local, egg_link_path, is_local, + normalize_path, renames, rmtree, +) +from pip._internal.utils.temp_dir import AdjacentTempDirectory, TempDirectory + +logger = logging.getLogger(__name__) + + +def _script_names(dist, script_name, is_gui): + """Create the fully qualified name of the files created by + {console,gui}_scripts for the given ``dist``. + Returns the list of file names + """ + if dist_in_usersite(dist): + bin_dir = bin_user + else: + bin_dir = bin_py + exe_name = os.path.join(bin_dir, script_name) + paths_to_remove = [exe_name] + if WINDOWS: + paths_to_remove.append(exe_name + '.exe') + paths_to_remove.append(exe_name + '.exe.manifest') + if is_gui: + paths_to_remove.append(exe_name + '-script.pyw') + else: + paths_to_remove.append(exe_name + '-script.py') + return paths_to_remove + + +def _unique(fn): + @functools.wraps(fn) + def unique(*args, **kw): + seen = set() + for item in fn(*args, **kw): + if item not in seen: + seen.add(item) + yield item + return unique + + +@_unique +def uninstallation_paths(dist): + """ + Yield all the uninstallation paths for dist based on RECORD-without-.py[co] + + Yield paths to all the files in RECORD. For each .py file in RECORD, add + the .pyc and .pyo in the same directory. + + UninstallPathSet.add() takes care of the __pycache__ .py[co]. + """ + r = csv.reader(FakeFile(dist.get_metadata_lines('RECORD'))) + for row in r: + path = os.path.join(dist.location, row[0]) + yield path + if path.endswith('.py'): + dn, fn = os.path.split(path) + base = fn[:-3] + path = os.path.join(dn, base + '.pyc') + yield path + path = os.path.join(dn, base + '.pyo') + yield path + + +def compact(paths): + """Compact a path set to contain the minimal number of paths + necessary to contain all paths in the set. If /a/path/ and + /a/path/to/a/file.txt are both in the set, leave only the + shorter path.""" + + sep = os.path.sep + short_paths = set() + for path in sorted(paths, key=len): + should_skip = any( + path.startswith(shortpath.rstrip("*")) and + path[len(shortpath.rstrip("*").rstrip(sep))] == sep + for shortpath in short_paths + ) + if not should_skip: + short_paths.add(path) + return short_paths + + +def compress_for_rename(paths): + """Returns a set containing the paths that need to be renamed. + + This set may include directories when the original sequence of paths + included every file on disk. + """ + case_map = dict((os.path.normcase(p), p) for p in paths) + remaining = set(case_map) + unchecked = sorted(set(os.path.split(p)[0] + for p in case_map.values()), key=len) + wildcards = set() + + def norm_join(*a): + return os.path.normcase(os.path.join(*a)) + + for root in unchecked: + if any(os.path.normcase(root).startswith(w) + for w in wildcards): + # This directory has already been handled. + continue + + all_files = set() + all_subdirs = set() + for dirname, subdirs, files in os.walk(root): + all_subdirs.update(norm_join(root, dirname, d) + for d in subdirs) + all_files.update(norm_join(root, dirname, f) + for f in files) + # If all the files we found are in our remaining set of files to + # remove, then remove them from the latter set and add a wildcard + # for the directory. + if not (all_files - remaining): + remaining.difference_update(all_files) + wildcards.add(root + os.sep) + + return set(map(case_map.__getitem__, remaining)) | wildcards + + +def compress_for_output_listing(paths): + """Returns a tuple of 2 sets of which paths to display to user + + The first set contains paths that would be deleted. Files of a package + are not added and the top-level directory of the package has a '*' added + at the end - to signify that all it's contents are removed. + + The second set contains files that would have been skipped in the above + folders. + """ + + will_remove = list(paths) + will_skip = set() + + # Determine folders and files + folders = set() + files = set() + for path in will_remove: + if path.endswith(".pyc"): + continue + if path.endswith("__init__.py") or ".dist-info" in path: + folders.add(os.path.dirname(path)) + files.add(path) + + _normcased_files = set(map(os.path.normcase, files)) + + folders = compact(folders) + + # This walks the tree using os.walk to not miss extra folders + # that might get added. + for folder in folders: + for dirpath, _, dirfiles in os.walk(folder): + for fname in dirfiles: + if fname.endswith(".pyc"): + continue + + file_ = os.path.join(dirpath, fname) + if (os.path.isfile(file_) and + os.path.normcase(file_) not in _normcased_files): + # We are skipping this file. Add it to the set. + will_skip.add(file_) + + will_remove = files | { + os.path.join(folder, "*") for folder in folders + } + + return will_remove, will_skip + + +class StashedUninstallPathSet(object): + """A set of file rename operations to stash files while + tentatively uninstalling them.""" + def __init__(self): + # Mapping from source file root to [Adjacent]TempDirectory + # for files under that directory. + self._save_dirs = {} + # (old path, new path) tuples for each move that may need + # to be undone. + self._moves = [] + + def _get_directory_stash(self, path): + """Stashes a directory. + + Directories are stashed adjacent to their original location if + possible, or else moved/copied into the user's temp dir.""" + + try: + save_dir = AdjacentTempDirectory(path) + save_dir.create() + except OSError: + save_dir = TempDirectory(kind="uninstall") + save_dir.create() + self._save_dirs[os.path.normcase(path)] = save_dir + + return save_dir.path + + def _get_file_stash(self, path): + """Stashes a file. + + If no root has been provided, one will be created for the directory + in the user's temp directory.""" + path = os.path.normcase(path) + head, old_head = os.path.dirname(path), None + save_dir = None + + while head != old_head: + try: + save_dir = self._save_dirs[head] + break + except KeyError: + pass + head, old_head = os.path.dirname(head), head + else: + # Did not find any suitable root + head = os.path.dirname(path) + save_dir = TempDirectory(kind='uninstall') + save_dir.create() + self._save_dirs[head] = save_dir + + relpath = os.path.relpath(path, head) + if relpath and relpath != os.path.curdir: + return os.path.join(save_dir.path, relpath) + return save_dir.path + + def stash(self, path): + """Stashes the directory or file and returns its new location. + """ + if os.path.isdir(path): + new_path = self._get_directory_stash(path) + else: + new_path = self._get_file_stash(path) + + self._moves.append((path, new_path)) + if os.path.isdir(path) and os.path.isdir(new_path): + # If we're moving a directory, we need to + # remove the destination first or else it will be + # moved to inside the existing directory. + # We just created new_path ourselves, so it will + # be removable. + os.rmdir(new_path) + renames(path, new_path) + return new_path + + def commit(self): + """Commits the uninstall by removing stashed files.""" + for _, save_dir in self._save_dirs.items(): + save_dir.cleanup() + self._moves = [] + self._save_dirs = {} + + def rollback(self): + """Undoes the uninstall by moving stashed files back.""" + for p in self._moves: + logging.info("Moving to %s\n from %s", *p) + + for new_path, path in self._moves: + try: + logger.debug('Replacing %s from %s', new_path, path) + if os.path.isfile(new_path): + os.unlink(new_path) + elif os.path.isdir(new_path): + rmtree(new_path) + renames(path, new_path) + except OSError as ex: + logger.error("Failed to restore %s", new_path) + logger.debug("Exception: %s", ex) + + self.commit() + + @property + def can_rollback(self): + return bool(self._moves) + + +class UninstallPathSet(object): + """A set of file paths to be removed in the uninstallation of a + requirement.""" + def __init__(self, dist): + self.paths = set() + self._refuse = set() + self.pth = {} + self.dist = dist + self._moved_paths = StashedUninstallPathSet() + + def _permitted(self, path): + """ + Return True if the given path is one we are permitted to + remove/modify, False otherwise. + + """ + return is_local(path) + + def add(self, path): + head, tail = os.path.split(path) + + # we normalize the head to resolve parent directory symlinks, but not + # the tail, since we only want to uninstall symlinks, not their targets + path = os.path.join(normalize_path(head), os.path.normcase(tail)) + + if not os.path.exists(path): + return + if self._permitted(path): + self.paths.add(path) + else: + self._refuse.add(path) + + # __pycache__ files can show up after 'installed-files.txt' is created, + # due to imports + if os.path.splitext(path)[1] == '.py' and uses_pycache: + self.add(cache_from_source(path)) + + def add_pth(self, pth_file, entry): + pth_file = normalize_path(pth_file) + if self._permitted(pth_file): + if pth_file not in self.pth: + self.pth[pth_file] = UninstallPthEntries(pth_file) + self.pth[pth_file].add(entry) + else: + self._refuse.add(pth_file) + + def remove(self, auto_confirm=False, verbose=False): + """Remove paths in ``self.paths`` with confirmation (unless + ``auto_confirm`` is True).""" + + if not self.paths: + logger.info( + "Can't uninstall '%s'. No files were found to uninstall.", + self.dist.project_name, + ) + return + + dist_name_version = ( + self.dist.project_name + "-" + self.dist.version + ) + logger.info('Uninstalling %s:', dist_name_version) + + with indent_log(): + if auto_confirm or self._allowed_to_proceed(verbose): + moved = self._moved_paths + + for_rename = compress_for_rename(self.paths) + + for path in sorted(compact(for_rename)): + moved.stash(path) + logger.debug('Removing file or directory %s', path) + + for pth in self.pth.values(): + pth.remove() + + logger.info('Successfully uninstalled %s', dist_name_version) + + def _allowed_to_proceed(self, verbose): + """Display which files would be deleted and prompt for confirmation + """ + + def _display(msg, paths): + if not paths: + return + + logger.info(msg) + with indent_log(): + for path in sorted(compact(paths)): + logger.info(path) + + if not verbose: + will_remove, will_skip = compress_for_output_listing(self.paths) + else: + # In verbose mode, display all the files that are going to be + # deleted. + will_remove = list(self.paths) + will_skip = set() + + _display('Would remove:', will_remove) + _display('Would not remove (might be manually added):', will_skip) + _display('Would not remove (outside of prefix):', self._refuse) + if verbose: + _display('Will actually move:', compress_for_rename(self.paths)) + + return ask('Proceed (y/n)? ', ('y', 'n')) == 'y' + + def rollback(self): + """Rollback the changes previously made by remove().""" + if not self._moved_paths.can_rollback: + logger.error( + "Can't roll back %s; was not uninstalled", + self.dist.project_name, + ) + return False + logger.info('Rolling back uninstall of %s', self.dist.project_name) + self._moved_paths.rollback() + for pth in self.pth.values(): + pth.rollback() + + def commit(self): + """Remove temporary save dir: rollback will no longer be possible.""" + self._moved_paths.commit() + + @classmethod + def from_dist(cls, dist): + dist_path = normalize_path(dist.location) + if not dist_is_local(dist): + logger.info( + "Not uninstalling %s at %s, outside environment %s", + dist.key, + dist_path, + sys.prefix, + ) + return cls(dist) + + if dist_path in {p for p in {sysconfig.get_path("stdlib"), + sysconfig.get_path("platstdlib")} + if p}: + logger.info( + "Not uninstalling %s at %s, as it is in the standard library.", + dist.key, + dist_path, + ) + return cls(dist) + + paths_to_remove = cls(dist) + develop_egg_link = egg_link_path(dist) + develop_egg_link_egg_info = '{}.egg-info'.format( + pkg_resources.to_filename(dist.project_name)) + egg_info_exists = dist.egg_info and os.path.exists(dist.egg_info) + # Special case for distutils installed package + distutils_egg_info = getattr(dist._provider, 'path', None) + + # Uninstall cases order do matter as in the case of 2 installs of the + # same package, pip needs to uninstall the currently detected version + if (egg_info_exists and dist.egg_info.endswith('.egg-info') and + not dist.egg_info.endswith(develop_egg_link_egg_info)): + # if dist.egg_info.endswith(develop_egg_link_egg_info), we + # are in fact in the develop_egg_link case + paths_to_remove.add(dist.egg_info) + if dist.has_metadata('installed-files.txt'): + for installed_file in dist.get_metadata( + 'installed-files.txt').splitlines(): + path = os.path.normpath( + os.path.join(dist.egg_info, installed_file) + ) + paths_to_remove.add(path) + # FIXME: need a test for this elif block + # occurs with --single-version-externally-managed/--record outside + # of pip + elif dist.has_metadata('top_level.txt'): + if dist.has_metadata('namespace_packages.txt'): + namespaces = dist.get_metadata('namespace_packages.txt') + else: + namespaces = [] + for top_level_pkg in [ + p for p + in dist.get_metadata('top_level.txt').splitlines() + if p and p not in namespaces]: + path = os.path.join(dist.location, top_level_pkg) + paths_to_remove.add(path) + paths_to_remove.add(path + '.py') + paths_to_remove.add(path + '.pyc') + paths_to_remove.add(path + '.pyo') + + elif distutils_egg_info: + raise UninstallationError( + "Cannot uninstall {!r}. It is a distutils installed project " + "and thus we cannot accurately determine which files belong " + "to it which would lead to only a partial uninstall.".format( + dist.project_name, + ) + ) + + elif dist.location.endswith('.egg'): + # package installed by easy_install + # We cannot match on dist.egg_name because it can slightly vary + # i.e. setuptools-0.6c11-py2.6.egg vs setuptools-0.6rc11-py2.6.egg + paths_to_remove.add(dist.location) + easy_install_egg = os.path.split(dist.location)[1] + easy_install_pth = os.path.join(os.path.dirname(dist.location), + 'easy-install.pth') + paths_to_remove.add_pth(easy_install_pth, './' + easy_install_egg) + + elif egg_info_exists and dist.egg_info.endswith('.dist-info'): + for path in uninstallation_paths(dist): + paths_to_remove.add(path) + + elif develop_egg_link: + # develop egg + with open(develop_egg_link, 'r') as fh: + link_pointer = os.path.normcase(fh.readline().strip()) + assert (link_pointer == dist.location), ( + 'Egg-link %s does not match installed location of %s ' + '(at %s)' % (link_pointer, dist.project_name, dist.location) + ) + paths_to_remove.add(develop_egg_link) + easy_install_pth = os.path.join(os.path.dirname(develop_egg_link), + 'easy-install.pth') + paths_to_remove.add_pth(easy_install_pth, dist.location) + + else: + logger.debug( + 'Not sure how to uninstall: %s - Check: %s', + dist, dist.location, + ) + + # find distutils scripts= scripts + if dist.has_metadata('scripts') and dist.metadata_isdir('scripts'): + for script in dist.metadata_listdir('scripts'): + if dist_in_usersite(dist): + bin_dir = bin_user + else: + bin_dir = bin_py + paths_to_remove.add(os.path.join(bin_dir, script)) + if WINDOWS: + paths_to_remove.add(os.path.join(bin_dir, script) + '.bat') + + # find console_scripts + _scripts_to_remove = [] + console_scripts = dist.get_entry_map(group='console_scripts') + for name in console_scripts.keys(): + _scripts_to_remove.extend(_script_names(dist, name, False)) + # find gui_scripts + gui_scripts = dist.get_entry_map(group='gui_scripts') + for name in gui_scripts.keys(): + _scripts_to_remove.extend(_script_names(dist, name, True)) + + for s in _scripts_to_remove: + paths_to_remove.add(s) + + return paths_to_remove + + +class UninstallPthEntries(object): + def __init__(self, pth_file): + if not os.path.isfile(pth_file): + raise UninstallationError( + "Cannot remove entries from nonexistent file %s" % pth_file + ) + self.file = pth_file + self.entries = set() + self._saved_lines = None + + def add(self, entry): + entry = os.path.normcase(entry) + # On Windows, os.path.normcase converts the entry to use + # backslashes. This is correct for entries that describe absolute + # paths outside of site-packages, but all the others use forward + # slashes. + if WINDOWS and not os.path.splitdrive(entry)[0]: + entry = entry.replace('\\', '/') + self.entries.add(entry) + + def remove(self): + logger.debug('Removing pth entries from %s:', self.file) + with open(self.file, 'rb') as fh: + # windows uses '\r\n' with py3k, but uses '\n' with py2.x + lines = fh.readlines() + self._saved_lines = lines + if any(b'\r\n' in line for line in lines): + endline = '\r\n' + else: + endline = '\n' + # handle missing trailing newline + if lines and not lines[-1].endswith(endline.encode("utf-8")): + lines[-1] = lines[-1] + endline.encode("utf-8") + for entry in self.entries: + try: + logger.debug('Removing entry: %s', entry) + lines.remove((entry + endline).encode("utf-8")) + except ValueError: + pass + with open(self.file, 'wb') as fh: + fh.writelines(lines) + + def rollback(self): + if self._saved_lines is None: + logger.error( + 'Cannot roll back changes to %s, none were made', self.file + ) + return False + logger.debug('Rolling %s back to previous state', self.file) + with open(self.file, 'wb') as fh: + fh.writelines(self._saved_lines) + return True diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/req/req_uninstall.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/req/req_uninstall.pyc new file mode 100644 index 0000000..670cf7e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/req/req_uninstall.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/resolve.py b/project/venv/lib/python2.7/site-packages/pip/_internal/resolve.py new file mode 100644 index 0000000..33f572f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/resolve.py @@ -0,0 +1,393 @@ +"""Dependency Resolution + +The dependency resolution in pip is performed as follows: + +for top-level requirements: + a. only one spec allowed per project, regardless of conflicts or not. + otherwise a "double requirement" exception is raised + b. they override sub-dependency requirements. +for sub-dependencies + a. "first found, wins" (where the order is breadth first) +""" + +import logging +from collections import defaultdict +from itertools import chain + +from pip._internal.exceptions import ( + BestVersionAlreadyInstalled, DistributionNotFound, HashError, HashErrors, + UnsupportedPythonVersion, +) +from pip._internal.req.constructors import install_req_from_req_string +from pip._internal.utils.logging import indent_log +from pip._internal.utils.misc import dist_in_usersite, ensure_dir +from pip._internal.utils.packaging import check_dist_requires_python +from pip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + from typing import Optional, DefaultDict, List, Set # noqa: F401 + from pip._internal.download import PipSession # noqa: F401 + from pip._internal.req.req_install import InstallRequirement # noqa: F401 + from pip._internal.index import PackageFinder # noqa: F401 + from pip._internal.req.req_set import RequirementSet # noqa: F401 + from pip._internal.operations.prepare import ( # noqa: F401 + DistAbstraction, RequirementPreparer + ) + from pip._internal.cache import WheelCache # noqa: F401 + +logger = logging.getLogger(__name__) + + +class Resolver(object): + """Resolves which packages need to be installed/uninstalled to perform \ + the requested operation without breaking the requirements of any package. + """ + + _allowed_strategies = {"eager", "only-if-needed", "to-satisfy-only"} + + def __init__( + self, + preparer, # type: RequirementPreparer + session, # type: PipSession + finder, # type: PackageFinder + wheel_cache, # type: Optional[WheelCache] + use_user_site, # type: bool + ignore_dependencies, # type: bool + ignore_installed, # type: bool + ignore_requires_python, # type: bool + force_reinstall, # type: bool + isolated, # type: bool + upgrade_strategy, # type: str + use_pep517=None # type: Optional[bool] + ): + # type: (...) -> None + super(Resolver, self).__init__() + assert upgrade_strategy in self._allowed_strategies + + self.preparer = preparer + self.finder = finder + self.session = session + + # NOTE: This would eventually be replaced with a cache that can give + # information about both sdist and wheels transparently. + self.wheel_cache = wheel_cache + + # This is set in resolve + self.require_hashes = None # type: Optional[bool] + + self.upgrade_strategy = upgrade_strategy + self.force_reinstall = force_reinstall + self.isolated = isolated + self.ignore_dependencies = ignore_dependencies + self.ignore_installed = ignore_installed + self.ignore_requires_python = ignore_requires_python + self.use_user_site = use_user_site + self.use_pep517 = use_pep517 + + self._discovered_dependencies = \ + defaultdict(list) # type: DefaultDict[str, List] + + def resolve(self, requirement_set): + # type: (RequirementSet) -> None + """Resolve what operations need to be done + + As a side-effect of this method, the packages (and their dependencies) + are downloaded, unpacked and prepared for installation. This + preparation is done by ``pip.operations.prepare``. + + Once PyPI has static dependency metadata available, it would be + possible to move the preparation to become a step separated from + dependency resolution. + """ + # make the wheelhouse + if self.preparer.wheel_download_dir: + ensure_dir(self.preparer.wheel_download_dir) + + # If any top-level requirement has a hash specified, enter + # hash-checking mode, which requires hashes from all. + root_reqs = ( + requirement_set.unnamed_requirements + + list(requirement_set.requirements.values()) + ) + self.require_hashes = ( + requirement_set.require_hashes or + any(req.has_hash_options for req in root_reqs) + ) + + # Display where finder is looking for packages + locations = self.finder.get_formatted_locations() + if locations: + logger.info(locations) + + # Actually prepare the files, and collect any exceptions. Most hash + # exceptions cannot be checked ahead of time, because + # req.populate_link() needs to be called before we can make decisions + # based on link type. + discovered_reqs = [] # type: List[InstallRequirement] + hash_errors = HashErrors() + for req in chain(root_reqs, discovered_reqs): + try: + discovered_reqs.extend( + self._resolve_one(requirement_set, req) + ) + except HashError as exc: + exc.req = req + hash_errors.append(exc) + + if hash_errors: + raise hash_errors + + def _is_upgrade_allowed(self, req): + # type: (InstallRequirement) -> bool + if self.upgrade_strategy == "to-satisfy-only": + return False + elif self.upgrade_strategy == "eager": + return True + else: + assert self.upgrade_strategy == "only-if-needed" + return req.is_direct + + def _set_req_to_reinstall(self, req): + # type: (InstallRequirement) -> None + """ + Set a requirement to be installed. + """ + # Don't uninstall the conflict if doing a user install and the + # conflict is not a user install. + if not self.use_user_site or dist_in_usersite(req.satisfied_by): + req.conflicts_with = req.satisfied_by + req.satisfied_by = None + + # XXX: Stop passing requirement_set for options + def _check_skip_installed(self, req_to_install): + # type: (InstallRequirement) -> Optional[str] + """Check if req_to_install should be skipped. + + This will check if the req is installed, and whether we should upgrade + or reinstall it, taking into account all the relevant user options. + + After calling this req_to_install will only have satisfied_by set to + None if the req_to_install is to be upgraded/reinstalled etc. Any + other value will be a dist recording the current thing installed that + satisfies the requirement. + + Note that for vcs urls and the like we can't assess skipping in this + routine - we simply identify that we need to pull the thing down, + then later on it is pulled down and introspected to assess upgrade/ + reinstalls etc. + + :return: A text reason for why it was skipped, or None. + """ + if self.ignore_installed: + return None + + req_to_install.check_if_exists(self.use_user_site) + if not req_to_install.satisfied_by: + return None + + if self.force_reinstall: + self._set_req_to_reinstall(req_to_install) + return None + + if not self._is_upgrade_allowed(req_to_install): + if self.upgrade_strategy == "only-if-needed": + return 'already satisfied, skipping upgrade' + return 'already satisfied' + + # Check for the possibility of an upgrade. For link-based + # requirements we have to pull the tree down and inspect to assess + # the version #, so it's handled way down. + if not req_to_install.link: + try: + self.finder.find_requirement(req_to_install, upgrade=True) + except BestVersionAlreadyInstalled: + # Then the best version is installed. + return 'already up-to-date' + except DistributionNotFound: + # No distribution found, so we squash the error. It will + # be raised later when we re-try later to do the install. + # Why don't we just raise here? + pass + + self._set_req_to_reinstall(req_to_install) + return None + + def _get_abstract_dist_for(self, req): + # type: (InstallRequirement) -> DistAbstraction + """Takes a InstallRequirement and returns a single AbstractDist \ + representing a prepared variant of the same. + """ + assert self.require_hashes is not None, ( + "require_hashes should have been set in Resolver.resolve()" + ) + + if req.editable: + return self.preparer.prepare_editable_requirement( + req, self.require_hashes, self.use_user_site, self.finder, + ) + + # satisfied_by is only evaluated by calling _check_skip_installed, + # so it must be None here. + assert req.satisfied_by is None + skip_reason = self._check_skip_installed(req) + + if req.satisfied_by: + return self.preparer.prepare_installed_requirement( + req, self.require_hashes, skip_reason + ) + + upgrade_allowed = self._is_upgrade_allowed(req) + abstract_dist = self.preparer.prepare_linked_requirement( + req, self.session, self.finder, upgrade_allowed, + self.require_hashes + ) + + # NOTE + # The following portion is for determining if a certain package is + # going to be re-installed/upgraded or not and reporting to the user. + # This should probably get cleaned up in a future refactor. + + # req.req is only avail after unpack for URL + # pkgs repeat check_if_exists to uninstall-on-upgrade + # (#14) + if not self.ignore_installed: + req.check_if_exists(self.use_user_site) + + if req.satisfied_by: + should_modify = ( + self.upgrade_strategy != "to-satisfy-only" or + self.force_reinstall or + self.ignore_installed or + req.link.scheme == 'file' + ) + if should_modify: + self._set_req_to_reinstall(req) + else: + logger.info( + 'Requirement already satisfied (use --upgrade to upgrade):' + ' %s', req, + ) + + return abstract_dist + + def _resolve_one( + self, + requirement_set, # type: RequirementSet + req_to_install # type: InstallRequirement + ): + # type: (...) -> List[InstallRequirement] + """Prepare a single requirements file. + + :return: A list of additional InstallRequirements to also install. + """ + # Tell user what we are doing for this requirement: + # obtain (editable), skipping, processing (local url), collecting + # (remote url or package name) + if req_to_install.constraint or req_to_install.prepared: + return [] + + req_to_install.prepared = True + + # register tmp src for cleanup in case something goes wrong + requirement_set.reqs_to_cleanup.append(req_to_install) + + abstract_dist = self._get_abstract_dist_for(req_to_install) + + # Parse and return dependencies + dist = abstract_dist.dist() + try: + check_dist_requires_python(dist) + except UnsupportedPythonVersion as err: + if self.ignore_requires_python: + logger.warning(err.args[0]) + else: + raise + + more_reqs = [] # type: List[InstallRequirement] + + def add_req(subreq, extras_requested): + sub_install_req = install_req_from_req_string( + str(subreq), + req_to_install, + isolated=self.isolated, + wheel_cache=self.wheel_cache, + use_pep517=self.use_pep517 + ) + parent_req_name = req_to_install.name + to_scan_again, add_to_parent = requirement_set.add_requirement( + sub_install_req, + parent_req_name=parent_req_name, + extras_requested=extras_requested, + ) + if parent_req_name and add_to_parent: + self._discovered_dependencies[parent_req_name].append( + add_to_parent + ) + more_reqs.extend(to_scan_again) + + with indent_log(): + # We add req_to_install before its dependencies, so that we + # can refer to it when adding dependencies. + if not requirement_set.has_requirement(req_to_install.name): + # 'unnamed' requirements will get added here + req_to_install.is_direct = True + requirement_set.add_requirement( + req_to_install, parent_req_name=None, + ) + + if not self.ignore_dependencies: + if req_to_install.extras: + logger.debug( + "Installing extra requirements: %r", + ','.join(req_to_install.extras), + ) + missing_requested = sorted( + set(req_to_install.extras) - set(dist.extras) + ) + for missing in missing_requested: + logger.warning( + '%s does not provide the extra \'%s\'', + dist, missing + ) + + available_requested = sorted( + set(dist.extras) & set(req_to_install.extras) + ) + for subreq in dist.requires(available_requested): + add_req(subreq, extras_requested=available_requested) + + if not req_to_install.editable and not req_to_install.satisfied_by: + # XXX: --no-install leads this to report 'Successfully + # downloaded' for only non-editable reqs, even though we took + # action on them. + requirement_set.successfully_downloaded.append(req_to_install) + + return more_reqs + + def get_installation_order(self, req_set): + # type: (RequirementSet) -> List[InstallRequirement] + """Create the installation order. + + The installation order is topological - requirements are installed + before the requiring thing. We break cycles at an arbitrary point, + and make no other guarantees. + """ + # The current implementation, which we may change at any point + # installs the user specified things in the order given, except when + # dependencies must come earlier to achieve topological order. + order = [] + ordered_reqs = set() # type: Set[InstallRequirement] + + def schedule(req): + if req.satisfied_by or req in ordered_reqs: + return + if req.constraint: + return + ordered_reqs.add(req) + for dep in self._discovered_dependencies[req.name]: + schedule(dep) + order.append(req) + + for install_req in req_set.requirements.values(): + schedule(install_req) + return order diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/resolve.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/resolve.pyc new file mode 100644 index 0000000..ed03df1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/resolve.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/utils/__init__.py b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/utils/__init__.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/__init__.pyc new file mode 100644 index 0000000..a952ccb Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/utils/appdirs.py b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/appdirs.py new file mode 100644 index 0000000..9af9fa7 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/appdirs.py @@ -0,0 +1,270 @@ +""" +This code was taken from https://github.com/ActiveState/appdirs and modified +to suit our purposes. +""" +from __future__ import absolute_import + +import os +import sys + +from pip._vendor.six import PY2, text_type + +from pip._internal.utils.compat import WINDOWS, expanduser +from pip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + from typing import ( # noqa: F401 + List, Union + ) + + +def user_cache_dir(appname): + # type: (str) -> str + r""" + Return full path to the user-specific cache dir for this application. + + "appname" is the name of application. + + Typical user cache directories are: + macOS: ~/Library/Caches/ + Unix: ~/.cache/ (XDG default) + Windows: C:\Users\\AppData\Local\\Cache + + On Windows the only suggestion in the MSDN docs is that local settings go + in the `CSIDL_LOCAL_APPDATA` directory. This is identical to the + non-roaming app data dir (the default returned by `user_data_dir`). Apps + typically put cache data somewhere *under* the given dir here. Some + examples: + ...\Mozilla\Firefox\Profiles\\Cache + ...\Acme\SuperApp\Cache\1.0 + + OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value. + """ + if WINDOWS: + # Get the base path + path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA")) + + # When using Python 2, return paths as bytes on Windows like we do on + # other operating systems. See helper function docs for more details. + if PY2 and isinstance(path, text_type): + path = _win_path_to_bytes(path) + + # Add our app name and Cache directory to it + path = os.path.join(path, appname, "Cache") + elif sys.platform == "darwin": + # Get the base path + path = expanduser("~/Library/Caches") + + # Add our app name to it + path = os.path.join(path, appname) + else: + # Get the base path + path = os.getenv("XDG_CACHE_HOME", expanduser("~/.cache")) + + # Add our app name to it + path = os.path.join(path, appname) + + return path + + +def user_data_dir(appname, roaming=False): + # type: (str, bool) -> str + r""" + Return full path to the user-specific data dir for this application. + + "appname" is the name of application. + If None, just the system directory is returned. + "roaming" (boolean, default False) can be set True to use the Windows + roaming appdata directory. That means that for users on a Windows + network setup for roaming profiles, this user data will be + sync'd on login. See + + for a discussion of issues. + + Typical user data directories are: + macOS: ~/Library/Application Support/ + if it exists, else ~/.config/ + Unix: ~/.local/share/ # or in + $XDG_DATA_HOME, if defined + Win XP (not roaming): C:\Documents and Settings\\ ... + ...Application Data\ + Win XP (roaming): C:\Documents and Settings\\Local ... + ...Settings\Application Data\ + Win 7 (not roaming): C:\\Users\\AppData\Local\ + Win 7 (roaming): C:\\Users\\AppData\Roaming\ + + For Unix, we follow the XDG spec and support $XDG_DATA_HOME. + That means, by default "~/.local/share/". + """ + if WINDOWS: + const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA" + path = os.path.join(os.path.normpath(_get_win_folder(const)), appname) + elif sys.platform == "darwin": + path = os.path.join( + expanduser('~/Library/Application Support/'), + appname, + ) if os.path.isdir(os.path.join( + expanduser('~/Library/Application Support/'), + appname, + ) + ) else os.path.join( + expanduser('~/.config/'), + appname, + ) + else: + path = os.path.join( + os.getenv('XDG_DATA_HOME', expanduser("~/.local/share")), + appname, + ) + + return path + + +def user_config_dir(appname, roaming=True): + # type: (str, bool) -> str + """Return full path to the user-specific config dir for this application. + + "appname" is the name of application. + If None, just the system directory is returned. + "roaming" (boolean, default True) can be set False to not use the + Windows roaming appdata directory. That means that for users on a + Windows network setup for roaming profiles, this user data will be + sync'd on login. See + + for a discussion of issues. + + Typical user data directories are: + macOS: same as user_data_dir + Unix: ~/.config/ + Win *: same as user_data_dir + + For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME. + That means, by default "~/.config/". + """ + if WINDOWS: + path = user_data_dir(appname, roaming=roaming) + elif sys.platform == "darwin": + path = user_data_dir(appname) + else: + path = os.getenv('XDG_CONFIG_HOME', expanduser("~/.config")) + path = os.path.join(path, appname) + + return path + + +# for the discussion regarding site_config_dirs locations +# see +def site_config_dirs(appname): + # type: (str) -> List[str] + r"""Return a list of potential user-shared config dirs for this application. + + "appname" is the name of application. + + Typical user config directories are: + macOS: /Library/Application Support// + Unix: /etc or $XDG_CONFIG_DIRS[i]// for each value in + $XDG_CONFIG_DIRS + Win XP: C:\Documents and Settings\All Users\Application ... + ...Data\\ + Vista: (Fail! "C:\ProgramData" is a hidden *system* directory + on Vista.) + Win 7: Hidden, but writeable on Win 7: + C:\ProgramData\\ + """ + if WINDOWS: + path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA")) + pathlist = [os.path.join(path, appname)] + elif sys.platform == 'darwin': + pathlist = [os.path.join('/Library/Application Support', appname)] + else: + # try looking in $XDG_CONFIG_DIRS + xdg_config_dirs = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg') + if xdg_config_dirs: + pathlist = [ + os.path.join(expanduser(x), appname) + for x in xdg_config_dirs.split(os.pathsep) + ] + else: + pathlist = [] + + # always look in /etc directly as well + pathlist.append('/etc') + + return pathlist + + +# -- Windows support functions -- + +def _get_win_folder_from_registry(csidl_name): + # type: (str) -> str + """ + This is a fallback technique at best. I'm not sure if using the + registry for this guarantees us the correct answer for all CSIDL_* + names. + """ + import _winreg + + shell_folder_name = { + "CSIDL_APPDATA": "AppData", + "CSIDL_COMMON_APPDATA": "Common AppData", + "CSIDL_LOCAL_APPDATA": "Local AppData", + }[csidl_name] + + key = _winreg.OpenKey( + _winreg.HKEY_CURRENT_USER, + r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders" + ) + directory, _type = _winreg.QueryValueEx(key, shell_folder_name) + return directory + + +def _get_win_folder_with_ctypes(csidl_name): + # type: (str) -> str + csidl_const = { + "CSIDL_APPDATA": 26, + "CSIDL_COMMON_APPDATA": 35, + "CSIDL_LOCAL_APPDATA": 28, + }[csidl_name] + + buf = ctypes.create_unicode_buffer(1024) + ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf) + + # Downgrade to short path name if have highbit chars. See + # . + has_high_char = False + for c in buf: + if ord(c) > 255: + has_high_char = True + break + if has_high_char: + buf2 = ctypes.create_unicode_buffer(1024) + if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024): + buf = buf2 + + return buf.value + + +if WINDOWS: + try: + import ctypes + _get_win_folder = _get_win_folder_with_ctypes + except ImportError: + _get_win_folder = _get_win_folder_from_registry + + +def _win_path_to_bytes(path): + """Encode Windows paths to bytes. Only used on Python 2. + + Motivation is to be consistent with other operating systems where paths + are also returned as bytes. This avoids problems mixing bytes and Unicode + elsewhere in the codebase. For more details and discussion see + . + + If encoding using ASCII and MBCS fails, return the original Unicode path. + """ + for encoding in ('ASCII', 'MBCS'): + try: + return path.encode(encoding) + except (UnicodeEncodeError, LookupError): + pass + return path diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/utils/appdirs.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/appdirs.pyc new file mode 100644 index 0000000..83f324c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/appdirs.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/utils/compat.py b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/compat.py new file mode 100644 index 0000000..2d8b3bf --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/compat.py @@ -0,0 +1,264 @@ +"""Stuff that differs in different Python versions and platform +distributions.""" +from __future__ import absolute_import, division + +import codecs +import locale +import logging +import os +import shutil +import sys + +from pip._vendor.six import text_type + +from pip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + from typing import Tuple, Text # noqa: F401 + +try: + import ipaddress +except ImportError: + try: + from pip._vendor import ipaddress # type: ignore + except ImportError: + import ipaddr as ipaddress # type: ignore + ipaddress.ip_address = ipaddress.IPAddress # type: ignore + ipaddress.ip_network = ipaddress.IPNetwork # type: ignore + + +__all__ = [ + "ipaddress", "uses_pycache", "console_to_str", "native_str", + "get_path_uid", "stdlib_pkgs", "WINDOWS", "samefile", "get_terminal_size", + "get_extension_suffixes", +] + + +logger = logging.getLogger(__name__) + +if sys.version_info >= (3, 4): + uses_pycache = True + from importlib.util import cache_from_source +else: + import imp + + try: + cache_from_source = imp.cache_from_source # type: ignore + except AttributeError: + # does not use __pycache__ + cache_from_source = None + + uses_pycache = cache_from_source is not None + + +if sys.version_info >= (3, 5): + backslashreplace_decode = "backslashreplace" +else: + # In version 3.4 and older, backslashreplace exists + # but does not support use for decoding. + # We implement our own replace handler for this + # situation, so that we can consistently use + # backslash replacement for all versions. + def backslashreplace_decode_fn(err): + raw_bytes = (err.object[i] for i in range(err.start, err.end)) + if sys.version_info[0] == 2: + # Python 2 gave us characters - convert to numeric bytes + raw_bytes = (ord(b) for b in raw_bytes) + return u"".join(u"\\x%x" % c for c in raw_bytes), err.end + codecs.register_error( + "backslashreplace_decode", + backslashreplace_decode_fn, + ) + backslashreplace_decode = "backslashreplace_decode" + + +def console_to_str(data): + # type: (bytes) -> Text + """Return a string, safe for output, of subprocess output. + + We assume the data is in the locale preferred encoding. + If it won't decode properly, we warn the user but decode as + best we can. + + We also ensure that the output can be safely written to + standard output without encoding errors. + """ + + # First, get the encoding we assume. This is the preferred + # encoding for the locale, unless that is not found, or + # it is ASCII, in which case assume UTF-8 + encoding = locale.getpreferredencoding() + if (not encoding) or codecs.lookup(encoding).name == "ascii": + encoding = "utf-8" + + # Now try to decode the data - if we fail, warn the user and + # decode with replacement. + try: + decoded_data = data.decode(encoding) + except UnicodeDecodeError: + logger.warning( + "Subprocess output does not appear to be encoded as %s", + encoding, + ) + decoded_data = data.decode(encoding, errors=backslashreplace_decode) + + # Make sure we can print the output, by encoding it to the output + # encoding with replacement of unencodable characters, and then + # decoding again. + # We use stderr's encoding because it's less likely to be + # redirected and if we don't find an encoding we skip this + # step (on the assumption that output is wrapped by something + # that won't fail). + # The double getattr is to deal with the possibility that we're + # being called in a situation where sys.__stderr__ doesn't exist, + # or doesn't have an encoding attribute. Neither of these cases + # should occur in normal pip use, but there's no harm in checking + # in case people use pip in (unsupported) unusual situations. + output_encoding = getattr(getattr(sys, "__stderr__", None), + "encoding", None) + + if output_encoding: + output_encoded = decoded_data.encode( + output_encoding, + errors="backslashreplace" + ) + decoded_data = output_encoded.decode(output_encoding) + + return decoded_data + + +if sys.version_info >= (3,): + def native_str(s, replace=False): + # type: (str, bool) -> str + if isinstance(s, bytes): + return s.decode('utf-8', 'replace' if replace else 'strict') + return s + +else: + def native_str(s, replace=False): + # type: (str, bool) -> str + # Replace is ignored -- unicode to UTF-8 can't fail + if isinstance(s, text_type): + return s.encode('utf-8') + return s + + +def get_path_uid(path): + # type: (str) -> int + """ + Return path's uid. + + Does not follow symlinks: + https://github.com/pypa/pip/pull/935#discussion_r5307003 + + Placed this function in compat due to differences on AIX and + Jython, that should eventually go away. + + :raises OSError: When path is a symlink or can't be read. + """ + if hasattr(os, 'O_NOFOLLOW'): + fd = os.open(path, os.O_RDONLY | os.O_NOFOLLOW) + file_uid = os.fstat(fd).st_uid + os.close(fd) + else: # AIX and Jython + # WARNING: time of check vulnerability, but best we can do w/o NOFOLLOW + if not os.path.islink(path): + # older versions of Jython don't have `os.fstat` + file_uid = os.stat(path).st_uid + else: + # raise OSError for parity with os.O_NOFOLLOW above + raise OSError( + "%s is a symlink; Will not return uid for symlinks" % path + ) + return file_uid + + +if sys.version_info >= (3, 4): + from importlib.machinery import EXTENSION_SUFFIXES + + def get_extension_suffixes(): + return EXTENSION_SUFFIXES +else: + from imp import get_suffixes + + def get_extension_suffixes(): + return [suffix[0] for suffix in get_suffixes()] + + +def expanduser(path): + # type: (str) -> str + """ + Expand ~ and ~user constructions. + + Includes a workaround for https://bugs.python.org/issue14768 + """ + expanded = os.path.expanduser(path) + if path.startswith('~/') and expanded.startswith('//'): + expanded = expanded[1:] + return expanded + + +# packages in the stdlib that may have installation metadata, but should not be +# considered 'installed'. this theoretically could be determined based on +# dist.location (py27:`sysconfig.get_paths()['stdlib']`, +# py26:sysconfig.get_config_vars('LIBDEST')), but fear platform variation may +# make this ineffective, so hard-coding +stdlib_pkgs = {"python", "wsgiref", "argparse"} + + +# windows detection, covers cpython and ironpython +WINDOWS = (sys.platform.startswith("win") or + (sys.platform == 'cli' and os.name == 'nt')) + + +def samefile(file1, file2): + # type: (str, str) -> bool + """Provide an alternative for os.path.samefile on Windows/Python2""" + if hasattr(os.path, 'samefile'): + return os.path.samefile(file1, file2) + else: + path1 = os.path.normcase(os.path.abspath(file1)) + path2 = os.path.normcase(os.path.abspath(file2)) + return path1 == path2 + + +if hasattr(shutil, 'get_terminal_size'): + def get_terminal_size(): + # type: () -> Tuple[int, int] + """ + Returns a tuple (x, y) representing the width(x) and the height(y) + in characters of the terminal window. + """ + return tuple(shutil.get_terminal_size()) # type: ignore +else: + def get_terminal_size(): + # type: () -> Tuple[int, int] + """ + Returns a tuple (x, y) representing the width(x) and the height(y) + in characters of the terminal window. + """ + def ioctl_GWINSZ(fd): + try: + import fcntl + import termios + import struct + cr = struct.unpack_from( + 'hh', + fcntl.ioctl(fd, termios.TIOCGWINSZ, '12345678') + ) + except Exception: + return None + if cr == (0, 0): + return None + return cr + cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2) + if not cr: + try: + fd = os.open(os.ctermid(), os.O_RDONLY) + cr = ioctl_GWINSZ(fd) + os.close(fd) + except Exception: + pass + if not cr: + cr = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80)) + return int(cr[1]), int(cr[0]) diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/utils/compat.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/compat.pyc new file mode 100644 index 0000000..c81298c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/compat.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/utils/deprecation.py b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/deprecation.py new file mode 100644 index 0000000..0beaf74 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/deprecation.py @@ -0,0 +1,90 @@ +""" +A module that implements tooling to enable easy warnings about deprecations. +""" +from __future__ import absolute_import + +import logging +import warnings + +from pip._vendor.packaging.version import parse + +from pip import __version__ as current_version +from pip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + from typing import Any, Optional # noqa: F401 + + +class PipDeprecationWarning(Warning): + pass + + +_original_showwarning = None # type: Any + + +# Warnings <-> Logging Integration +def _showwarning(message, category, filename, lineno, file=None, line=None): + if file is not None: + if _original_showwarning is not None: + _original_showwarning( + message, category, filename, lineno, file, line, + ) + elif issubclass(category, PipDeprecationWarning): + # We use a specially named logger which will handle all of the + # deprecation messages for pip. + logger = logging.getLogger("pip._internal.deprecations") + logger.warning(message) + else: + _original_showwarning( + message, category, filename, lineno, file, line, + ) + + +def install_warning_logger(): + # type: () -> None + # Enable our Deprecation Warnings + warnings.simplefilter("default", PipDeprecationWarning, append=True) + + global _original_showwarning + + if _original_showwarning is None: + _original_showwarning = warnings.showwarning + warnings.showwarning = _showwarning + + +def deprecated(reason, replacement, gone_in, issue=None): + # type: (str, Optional[str], Optional[str], Optional[int]) -> None + """Helper to deprecate existing functionality. + + reason: + Textual reason shown to the user about why this functionality has + been deprecated. + replacement: + Textual suggestion shown to the user about what alternative + functionality they can use. + gone_in: + The version of pip does this functionality should get removed in. + Raises errors if pip's current version is greater than or equal to + this. + issue: + Issue number on the tracker that would serve as a useful place for + users to find related discussion and provide feedback. + + Always pass replacement, gone_in and issue as keyword arguments for clarity + at the call site. + """ + + # Construct a nice message. + # This is purposely eagerly formatted as we want it to appear as if someone + # typed this entire message out. + message = "DEPRECATION: " + reason + if replacement is not None: + message += " A possible replacement is {}.".format(replacement) + if issue is not None: + url = "https://github.com/pypa/pip/issues/" + str(issue) + message += " You can find discussion regarding this at {}.".format(url) + + # Raise as an error if it has to be removed. + if gone_in is not None and parse(current_version) >= parse(gone_in): + raise PipDeprecationWarning(message) + warnings.warn(message, category=PipDeprecationWarning, stacklevel=2) diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/utils/deprecation.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/deprecation.pyc new file mode 100644 index 0000000..2c4d4bf Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/deprecation.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/utils/encoding.py b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/encoding.py new file mode 100644 index 0000000..d36defa --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/encoding.py @@ -0,0 +1,39 @@ +import codecs +import locale +import re +import sys + +from pip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + from typing import List, Tuple, Text # noqa: F401 + +BOMS = [ + (codecs.BOM_UTF8, 'utf8'), + (codecs.BOM_UTF16, 'utf16'), + (codecs.BOM_UTF16_BE, 'utf16-be'), + (codecs.BOM_UTF16_LE, 'utf16-le'), + (codecs.BOM_UTF32, 'utf32'), + (codecs.BOM_UTF32_BE, 'utf32-be'), + (codecs.BOM_UTF32_LE, 'utf32-le'), +] # type: List[Tuple[bytes, Text]] + +ENCODING_RE = re.compile(br'coding[:=]\s*([-\w.]+)') + + +def auto_decode(data): + # type: (bytes) -> Text + """Check a bytes string for a BOM to correctly detect the encoding + + Fallback to locale.getpreferredencoding(False) like open() on Python3""" + for bom, encoding in BOMS: + if data.startswith(bom): + return data[len(bom):].decode(encoding) + # Lets check the first two lines as in PEP263 + for line in data.split(b'\n')[:2]: + if line[0:1] == b'#' and ENCODING_RE.search(line): + encoding = ENCODING_RE.search(line).groups()[0].decode('ascii') + return data.decode(encoding) + return data.decode( + locale.getpreferredencoding(False) or sys.getdefaultencoding(), + ) diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/utils/encoding.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/encoding.pyc new file mode 100644 index 0000000..503d93e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/encoding.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/utils/filesystem.py b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/filesystem.py new file mode 100644 index 0000000..1e6b033 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/filesystem.py @@ -0,0 +1,30 @@ +import os +import os.path + +from pip._internal.utils.compat import get_path_uid + + +def check_path_owner(path): + # type: (str) -> bool + # If we don't have a way to check the effective uid of this process, then + # we'll just assume that we own the directory. + if not hasattr(os, "geteuid"): + return True + + previous = None + while path != previous: + if os.path.lexists(path): + # Check if path is writable by current user. + if os.geteuid() == 0: + # Special handling for root user in order to handle properly + # cases where users use sudo without -H flag. + try: + path_uid = get_path_uid(path) + except OSError: + return False + return path_uid == 0 + else: + return os.access(path, os.W_OK) + else: + previous, path = path, os.path.dirname(path) + return False # assume we don't own the path diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/utils/filesystem.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/filesystem.pyc new file mode 100644 index 0000000..24565ca Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/filesystem.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/utils/glibc.py b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/glibc.py new file mode 100644 index 0000000..8a51f69 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/glibc.py @@ -0,0 +1,93 @@ +from __future__ import absolute_import + +import ctypes +import re +import warnings + +from pip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + from typing import Optional, Tuple # noqa: F401 + + +def glibc_version_string(): + # type: () -> Optional[str] + "Returns glibc version string, or None if not using glibc." + + # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen + # manpage says, "If filename is NULL, then the returned handle is for the + # main program". This way we can let the linker do the work to figure out + # which libc our process is actually using. + process_namespace = ctypes.CDLL(None) + try: + gnu_get_libc_version = process_namespace.gnu_get_libc_version + except AttributeError: + # Symbol doesn't exist -> therefore, we are not linked to + # glibc. + return None + + # Call gnu_get_libc_version, which returns a string like "2.5" + gnu_get_libc_version.restype = ctypes.c_char_p + version_str = gnu_get_libc_version() + # py2 / py3 compatibility: + if not isinstance(version_str, str): + version_str = version_str.decode("ascii") + + return version_str + + +# Separated out from have_compatible_glibc for easier unit testing +def check_glibc_version(version_str, required_major, minimum_minor): + # type: (str, int, int) -> bool + # Parse string and check against requested version. + # + # We use a regexp instead of str.split because we want to discard any + # random junk that might come after the minor version -- this might happen + # in patched/forked versions of glibc (e.g. Linaro's version of glibc + # uses version strings like "2.20-2014.11"). See gh-3588. + m = re.match(r"(?P[0-9]+)\.(?P[0-9]+)", version_str) + if not m: + warnings.warn("Expected glibc version with 2 components major.minor," + " got: %s" % version_str, RuntimeWarning) + return False + return (int(m.group("major")) == required_major and + int(m.group("minor")) >= minimum_minor) + + +def have_compatible_glibc(required_major, minimum_minor): + # type: (int, int) -> bool + version_str = glibc_version_string() # type: Optional[str] + if version_str is None: + return False + return check_glibc_version(version_str, required_major, minimum_minor) + + +# platform.libc_ver regularly returns completely nonsensical glibc +# versions. E.g. on my computer, platform says: +# +# ~$ python2.7 -c 'import platform; print(platform.libc_ver())' +# ('glibc', '2.7') +# ~$ python3.5 -c 'import platform; print(platform.libc_ver())' +# ('glibc', '2.9') +# +# But the truth is: +# +# ~$ ldd --version +# ldd (Debian GLIBC 2.22-11) 2.22 +# +# This is unfortunate, because it means that the linehaul data on libc +# versions that was generated by pip 8.1.2 and earlier is useless and +# misleading. Solution: instead of using platform, use our code that actually +# works. +def libc_ver(): + # type: () -> Tuple[str, str] + """Try to determine the glibc version + + Returns a tuple of strings (lib, version) which default to empty strings + in case the lookup fails. + """ + glibc_version = glibc_version_string() + if glibc_version is None: + return ("", "") + else: + return ("glibc", glibc_version) diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/utils/glibc.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/glibc.pyc new file mode 100644 index 0000000..1ce3317 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/glibc.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/utils/hashes.py b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/hashes.py new file mode 100644 index 0000000..c6df7a1 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/hashes.py @@ -0,0 +1,115 @@ +from __future__ import absolute_import + +import hashlib + +from pip._vendor.six import iteritems, iterkeys, itervalues + +from pip._internal.exceptions import ( + HashMismatch, HashMissing, InstallationError, +) +from pip._internal.utils.misc import read_chunks +from pip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + from typing import ( # noqa: F401 + Dict, List, BinaryIO, NoReturn, Iterator + ) + from pip._vendor.six import PY3 + if PY3: + from hashlib import _Hash # noqa: F401 + else: + from hashlib import _hash as _Hash # noqa: F401 + + +# The recommended hash algo of the moment. Change this whenever the state of +# the art changes; it won't hurt backward compatibility. +FAVORITE_HASH = 'sha256' + + +# Names of hashlib algorithms allowed by the --hash option and ``pip hash`` +# Currently, those are the ones at least as collision-resistant as sha256. +STRONG_HASHES = ['sha256', 'sha384', 'sha512'] + + +class Hashes(object): + """A wrapper that builds multiple hashes at once and checks them against + known-good values + + """ + def __init__(self, hashes=None): + # type: (Dict[str, List[str]]) -> None + """ + :param hashes: A dict of algorithm names pointing to lists of allowed + hex digests + """ + self._allowed = {} if hashes is None else hashes + + def check_against_chunks(self, chunks): + # type: (Iterator[bytes]) -> None + """Check good hashes against ones built from iterable of chunks of + data. + + Raise HashMismatch if none match. + + """ + gots = {} + for hash_name in iterkeys(self._allowed): + try: + gots[hash_name] = hashlib.new(hash_name) + except (ValueError, TypeError): + raise InstallationError('Unknown hash name: %s' % hash_name) + + for chunk in chunks: + for hash in itervalues(gots): + hash.update(chunk) + + for hash_name, got in iteritems(gots): + if got.hexdigest() in self._allowed[hash_name]: + return + self._raise(gots) + + def _raise(self, gots): + # type: (Dict[str, _Hash]) -> NoReturn + raise HashMismatch(self._allowed, gots) + + def check_against_file(self, file): + # type: (BinaryIO) -> None + """Check good hashes against a file-like object + + Raise HashMismatch if none match. + + """ + return self.check_against_chunks(read_chunks(file)) + + def check_against_path(self, path): + # type: (str) -> None + with open(path, 'rb') as file: + return self.check_against_file(file) + + def __nonzero__(self): + # type: () -> bool + """Return whether I know any known-good hashes.""" + return bool(self._allowed) + + def __bool__(self): + # type: () -> bool + return self.__nonzero__() + + +class MissingHashes(Hashes): + """A workalike for Hashes used when we're missing a hash for a requirement + + It computes the actual hash of the requirement and raises a HashMissing + exception showing it to the user. + + """ + def __init__(self): + # type: () -> None + """Don't offer the ``hashes`` kwarg.""" + # Pass our favorite hash in to generate a "gotten hash". With the + # empty list, it will never match, so an error will always raise. + super(MissingHashes, self).__init__(hashes={FAVORITE_HASH: []}) + + def _raise(self, gots): + # type: (Dict[str, _Hash]) -> NoReturn + raise HashMissing(gots[FAVORITE_HASH].hexdigest()) diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/utils/hashes.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/hashes.pyc new file mode 100644 index 0000000..bd41248 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/hashes.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/utils/logging.py b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/logging.py new file mode 100644 index 0000000..579d696 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/logging.py @@ -0,0 +1,318 @@ +from __future__ import absolute_import + +import contextlib +import errno +import logging +import logging.handlers +import os +import sys + +from pip._vendor.six import PY2 + +from pip._internal.utils.compat import WINDOWS +from pip._internal.utils.misc import ensure_dir + +try: + import threading +except ImportError: + import dummy_threading as threading # type: ignore + + +try: + from pip._vendor import colorama +# Lots of different errors can come from this, including SystemError and +# ImportError. +except Exception: + colorama = None + + +_log_state = threading.local() +_log_state.indentation = 0 + + +class BrokenStdoutLoggingError(Exception): + """ + Raised if BrokenPipeError occurs for the stdout stream while logging. + """ + pass + + +# BrokenPipeError does not exist in Python 2 and, in addition, manifests +# differently in Windows and non-Windows. +if WINDOWS: + # In Windows, a broken pipe can show up as EINVAL rather than EPIPE: + # https://bugs.python.org/issue19612 + # https://bugs.python.org/issue30418 + if PY2: + def _is_broken_pipe_error(exc_class, exc): + """See the docstring for non-Windows Python 3 below.""" + return (exc_class is IOError and + exc.errno in (errno.EINVAL, errno.EPIPE)) + else: + # In Windows, a broken pipe IOError became OSError in Python 3. + def _is_broken_pipe_error(exc_class, exc): + """See the docstring for non-Windows Python 3 below.""" + return ((exc_class is BrokenPipeError) or # noqa: F821 + (exc_class is OSError and + exc.errno in (errno.EINVAL, errno.EPIPE))) +elif PY2: + def _is_broken_pipe_error(exc_class, exc): + """See the docstring for non-Windows Python 3 below.""" + return (exc_class is IOError and exc.errno == errno.EPIPE) +else: + # Then we are in the non-Windows Python 3 case. + def _is_broken_pipe_error(exc_class, exc): + """ + Return whether an exception is a broken pipe error. + + Args: + exc_class: an exception class. + exc: an exception instance. + """ + return (exc_class is BrokenPipeError) # noqa: F821 + + +@contextlib.contextmanager +def indent_log(num=2): + """ + A context manager which will cause the log output to be indented for any + log messages emitted inside it. + """ + _log_state.indentation += num + try: + yield + finally: + _log_state.indentation -= num + + +def get_indentation(): + return getattr(_log_state, 'indentation', 0) + + +class IndentingFormatter(logging.Formatter): + def __init__(self, *args, **kwargs): + """ + A logging.Formatter obeying containing indent_log contexts. + + :param add_timestamp: A bool indicating output lines should be prefixed + with their record's timestamp. + """ + self.add_timestamp = kwargs.pop("add_timestamp", False) + super(IndentingFormatter, self).__init__(*args, **kwargs) + + def format(self, record): + """ + Calls the standard formatter, but will indent all of the log messages + by our current indentation level. + """ + formatted = super(IndentingFormatter, self).format(record) + prefix = '' + if self.add_timestamp: + prefix = self.formatTime(record, "%Y-%m-%dT%H:%M:%S ") + prefix += " " * get_indentation() + formatted = "".join([ + prefix + line + for line in formatted.splitlines(True) + ]) + return formatted + + +def _color_wrap(*colors): + def wrapped(inp): + return "".join(list(colors) + [inp, colorama.Style.RESET_ALL]) + return wrapped + + +class ColorizedStreamHandler(logging.StreamHandler): + + # Don't build up a list of colors if we don't have colorama + if colorama: + COLORS = [ + # This needs to be in order from highest logging level to lowest. + (logging.ERROR, _color_wrap(colorama.Fore.RED)), + (logging.WARNING, _color_wrap(colorama.Fore.YELLOW)), + ] + else: + COLORS = [] + + def __init__(self, stream=None, no_color=None): + logging.StreamHandler.__init__(self, stream) + self._no_color = no_color + + if WINDOWS and colorama: + self.stream = colorama.AnsiToWin32(self.stream) + + def _using_stdout(self): + """ + Return whether the handler is using sys.stdout. + """ + if WINDOWS and colorama: + # Then self.stream is an AnsiToWin32 object. + return self.stream.wrapped is sys.stdout + + return self.stream is sys.stdout + + def should_color(self): + # Don't colorize things if we do not have colorama or if told not to + if not colorama or self._no_color: + return False + + real_stream = ( + self.stream if not isinstance(self.stream, colorama.AnsiToWin32) + else self.stream.wrapped + ) + + # If the stream is a tty we should color it + if hasattr(real_stream, "isatty") and real_stream.isatty(): + return True + + # If we have an ANSI term we should color it + if os.environ.get("TERM") == "ANSI": + return True + + # If anything else we should not color it + return False + + def format(self, record): + msg = logging.StreamHandler.format(self, record) + + if self.should_color(): + for level, color in self.COLORS: + if record.levelno >= level: + msg = color(msg) + break + + return msg + + # The logging module says handleError() can be customized. + def handleError(self, record): + exc_class, exc = sys.exc_info()[:2] + # If a broken pipe occurred while calling write() or flush() on the + # stdout stream in logging's Handler.emit(), then raise our special + # exception so we can handle it in main() instead of logging the + # broken pipe error and continuing. + if (exc_class and self._using_stdout() and + _is_broken_pipe_error(exc_class, exc)): + raise BrokenStdoutLoggingError() + + return super(ColorizedStreamHandler, self).handleError(record) + + +class BetterRotatingFileHandler(logging.handlers.RotatingFileHandler): + + def _open(self): + ensure_dir(os.path.dirname(self.baseFilename)) + return logging.handlers.RotatingFileHandler._open(self) + + +class MaxLevelFilter(logging.Filter): + + def __init__(self, level): + self.level = level + + def filter(self, record): + return record.levelno < self.level + + +def setup_logging(verbosity, no_color, user_log_file): + """Configures and sets up all of the logging + + Returns the requested logging level, as its integer value. + """ + + # Determine the level to be logging at. + if verbosity >= 1: + level = "DEBUG" + elif verbosity == -1: + level = "WARNING" + elif verbosity == -2: + level = "ERROR" + elif verbosity <= -3: + level = "CRITICAL" + else: + level = "INFO" + + level_number = getattr(logging, level) + + # The "root" logger should match the "console" level *unless* we also need + # to log to a user log file. + include_user_log = user_log_file is not None + if include_user_log: + additional_log_file = user_log_file + root_level = "DEBUG" + else: + additional_log_file = "/dev/null" + root_level = level + + # Disable any logging besides WARNING unless we have DEBUG level logging + # enabled for vendored libraries. + vendored_log_level = "WARNING" if level in ["INFO", "ERROR"] else "DEBUG" + + # Shorthands for clarity + log_streams = { + "stdout": "ext://sys.stdout", + "stderr": "ext://sys.stderr", + } + handler_classes = { + "stream": "pip._internal.utils.logging.ColorizedStreamHandler", + "file": "pip._internal.utils.logging.BetterRotatingFileHandler", + } + + logging.config.dictConfig({ + "version": 1, + "disable_existing_loggers": False, + "filters": { + "exclude_warnings": { + "()": "pip._internal.utils.logging.MaxLevelFilter", + "level": logging.WARNING, + }, + }, + "formatters": { + "indent": { + "()": IndentingFormatter, + "format": "%(message)s", + }, + "indent_with_timestamp": { + "()": IndentingFormatter, + "format": "%(message)s", + "add_timestamp": True, + }, + }, + "handlers": { + "console": { + "level": level, + "class": handler_classes["stream"], + "no_color": no_color, + "stream": log_streams["stdout"], + "filters": ["exclude_warnings"], + "formatter": "indent", + }, + "console_errors": { + "level": "WARNING", + "class": handler_classes["stream"], + "no_color": no_color, + "stream": log_streams["stderr"], + "formatter": "indent", + }, + "user_log": { + "level": "DEBUG", + "class": handler_classes["file"], + "filename": additional_log_file, + "delay": True, + "formatter": "indent_with_timestamp", + }, + }, + "root": { + "level": root_level, + "handlers": ["console", "console_errors"] + ( + ["user_log"] if include_user_log else [] + ), + }, + "loggers": { + "pip._vendor": { + "level": vendored_log_level + } + }, + }) + + return level_number diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/utils/logging.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/logging.pyc new file mode 100644 index 0000000..1317b02 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/logging.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/utils/misc.py b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/misc.py new file mode 100644 index 0000000..84605ee --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/misc.py @@ -0,0 +1,1040 @@ +from __future__ import absolute_import + +import contextlib +import errno +import io +import locale +# we have a submodule named 'logging' which would shadow this if we used the +# regular name: +import logging as std_logging +import os +import posixpath +import re +import shutil +import stat +import subprocess +import sys +import tarfile +import zipfile +from collections import deque + +from pip._vendor import pkg_resources +# NOTE: retrying is not annotated in typeshed as on 2017-07-17, which is +# why we ignore the type on this import. +from pip._vendor.retrying import retry # type: ignore +from pip._vendor.six import PY2 +from pip._vendor.six.moves import input +from pip._vendor.six.moves.urllib import parse as urllib_parse +from pip._vendor.six.moves.urllib.parse import unquote as urllib_unquote + +from pip._internal.exceptions import CommandError, InstallationError +from pip._internal.locations import ( + running_under_virtualenv, site_packages, user_site, virtualenv_no_global, + write_delete_marker_file, +) +from pip._internal.utils.compat import ( + WINDOWS, console_to_str, expanduser, stdlib_pkgs, +) +from pip._internal.utils.typing import MYPY_CHECK_RUNNING + +if PY2: + from io import BytesIO as StringIO +else: + from io import StringIO + +if MYPY_CHECK_RUNNING: + from typing import ( # noqa: F401 + Optional, Tuple, Iterable, List, Match, Union, Any, Mapping, Text, + AnyStr, Container + ) + from pip._vendor.pkg_resources import Distribution # noqa: F401 + from pip._internal.models.link import Link # noqa: F401 + from pip._internal.utils.ui import SpinnerInterface # noqa: F401 + + +__all__ = ['rmtree', 'display_path', 'backup_dir', + 'ask', 'splitext', + 'format_size', 'is_installable_dir', + 'is_svn_page', 'file_contents', + 'split_leading_dir', 'has_leading_dir', + 'normalize_path', + 'renames', 'get_prog', + 'unzip_file', 'untar_file', 'unpack_file', 'call_subprocess', + 'captured_stdout', 'ensure_dir', + 'ARCHIVE_EXTENSIONS', 'SUPPORTED_EXTENSIONS', 'WHEEL_EXTENSION', + 'get_installed_version', 'remove_auth_from_url'] + + +logger = std_logging.getLogger(__name__) + +WHEEL_EXTENSION = '.whl' +BZ2_EXTENSIONS = ('.tar.bz2', '.tbz') +XZ_EXTENSIONS = ('.tar.xz', '.txz', '.tlz', '.tar.lz', '.tar.lzma') +ZIP_EXTENSIONS = ('.zip', WHEEL_EXTENSION) +TAR_EXTENSIONS = ('.tar.gz', '.tgz', '.tar') +ARCHIVE_EXTENSIONS = ( + ZIP_EXTENSIONS + BZ2_EXTENSIONS + TAR_EXTENSIONS + XZ_EXTENSIONS) +SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + TAR_EXTENSIONS + +try: + import bz2 # noqa + SUPPORTED_EXTENSIONS += BZ2_EXTENSIONS +except ImportError: + logger.debug('bz2 module is not available') + +try: + # Only for Python 3.3+ + import lzma # noqa + SUPPORTED_EXTENSIONS += XZ_EXTENSIONS +except ImportError: + logger.debug('lzma module is not available') + + +def ensure_dir(path): + # type: (AnyStr) -> None + """os.path.makedirs without EEXIST.""" + try: + os.makedirs(path) + except OSError as e: + if e.errno != errno.EEXIST: + raise + + +def get_prog(): + # type: () -> str + try: + prog = os.path.basename(sys.argv[0]) + if prog in ('__main__.py', '-c'): + return "%s -m pip" % sys.executable + else: + return prog + except (AttributeError, TypeError, IndexError): + pass + return 'pip' + + +# Retry every half second for up to 3 seconds +@retry(stop_max_delay=3000, wait_fixed=500) +def rmtree(dir, ignore_errors=False): + # type: (str, bool) -> None + shutil.rmtree(dir, ignore_errors=ignore_errors, + onerror=rmtree_errorhandler) + + +def rmtree_errorhandler(func, path, exc_info): + """On Windows, the files in .svn are read-only, so when rmtree() tries to + remove them, an exception is thrown. We catch that here, remove the + read-only attribute, and hopefully continue without problems.""" + # if file type currently read only + if os.stat(path).st_mode & stat.S_IREAD: + # convert to read/write + os.chmod(path, stat.S_IWRITE) + # use the original function to repeat the operation + func(path) + return + else: + raise + + +def display_path(path): + # type: (Union[str, Text]) -> str + """Gives the display value for a given path, making it relative to cwd + if possible.""" + path = os.path.normcase(os.path.abspath(path)) + if sys.version_info[0] == 2: + path = path.decode(sys.getfilesystemencoding(), 'replace') + path = path.encode(sys.getdefaultencoding(), 'replace') + if path.startswith(os.getcwd() + os.path.sep): + path = '.' + path[len(os.getcwd()):] + return path + + +def backup_dir(dir, ext='.bak'): + # type: (str, str) -> str + """Figure out the name of a directory to back up the given dir to + (adding .bak, .bak2, etc)""" + n = 1 + extension = ext + while os.path.exists(dir + extension): + n += 1 + extension = ext + str(n) + return dir + extension + + +def ask_path_exists(message, options): + # type: (str, Iterable[str]) -> str + for action in os.environ.get('PIP_EXISTS_ACTION', '').split(): + if action in options: + return action + return ask(message, options) + + +def ask(message, options): + # type: (str, Iterable[str]) -> str + """Ask the message interactively, with the given possible responses""" + while 1: + if os.environ.get('PIP_NO_INPUT'): + raise Exception( + 'No input was expected ($PIP_NO_INPUT set); question: %s' % + message + ) + response = input(message) + response = response.strip().lower() + if response not in options: + print( + 'Your response (%r) was not one of the expected responses: ' + '%s' % (response, ', '.join(options)) + ) + else: + return response + + +def format_size(bytes): + # type: (float) -> str + if bytes > 1000 * 1000: + return '%.1fMB' % (bytes / 1000.0 / 1000) + elif bytes > 10 * 1000: + return '%ikB' % (bytes / 1000) + elif bytes > 1000: + return '%.1fkB' % (bytes / 1000.0) + else: + return '%ibytes' % bytes + + +def is_installable_dir(path): + # type: (str) -> bool + """Is path is a directory containing setup.py or pyproject.toml? + """ + if not os.path.isdir(path): + return False + setup_py = os.path.join(path, 'setup.py') + if os.path.isfile(setup_py): + return True + pyproject_toml = os.path.join(path, 'pyproject.toml') + if os.path.isfile(pyproject_toml): + return True + return False + + +def is_svn_page(html): + # type: (Union[str, Text]) -> Optional[Match[Union[str, Text]]] + """ + Returns true if the page appears to be the index page of an svn repository + """ + return (re.search(r'[^<]*Revision \d+:', html) and + re.search(r'Powered by (?:<a[^>]*?>)?Subversion', html, re.I)) + + +def file_contents(filename): + # type: (str) -> Text + with open(filename, 'rb') as fp: + return fp.read().decode('utf-8') + + +def read_chunks(file, size=io.DEFAULT_BUFFER_SIZE): + """Yield pieces of data from a file-like object until EOF.""" + while True: + chunk = file.read(size) + if not chunk: + break + yield chunk + + +def split_leading_dir(path): + # type: (Union[str, Text]) -> List[Union[str, Text]] + path = path.lstrip('/').lstrip('\\') + if '/' in path and (('\\' in path and path.find('/') < path.find('\\')) or + '\\' not in path): + return path.split('/', 1) + elif '\\' in path: + return path.split('\\', 1) + else: + return [path, ''] + + +def has_leading_dir(paths): + # type: (Iterable[Union[str, Text]]) -> bool + """Returns true if all the paths have the same leading path name + (i.e., everything is in one subdirectory in an archive)""" + common_prefix = None + for path in paths: + prefix, rest = split_leading_dir(path) + if not prefix: + return False + elif common_prefix is None: + common_prefix = prefix + elif prefix != common_prefix: + return False + return True + + +def normalize_path(path, resolve_symlinks=True): + # type: (str, bool) -> str + """ + Convert a path to its canonical, case-normalized, absolute version. + + """ + path = expanduser(path) + if resolve_symlinks: + path = os.path.realpath(path) + else: + path = os.path.abspath(path) + return os.path.normcase(path) + + +def splitext(path): + # type: (str) -> Tuple[str, str] + """Like os.path.splitext, but take off .tar too""" + base, ext = posixpath.splitext(path) + if base.lower().endswith('.tar'): + ext = base[-4:] + ext + base = base[:-4] + return base, ext + + +def renames(old, new): + # type: (str, str) -> None + """Like os.renames(), but handles renaming across devices.""" + # Implementation borrowed from os.renames(). + head, tail = os.path.split(new) + if head and tail and not os.path.exists(head): + os.makedirs(head) + + shutil.move(old, new) + + head, tail = os.path.split(old) + if head and tail: + try: + os.removedirs(head) + except OSError: + pass + + +def is_local(path): + # type: (str) -> bool + """ + Return True if path is within sys.prefix, if we're running in a virtualenv. + + If we're not in a virtualenv, all paths are considered "local." + + """ + if not running_under_virtualenv(): + return True + return normalize_path(path).startswith(normalize_path(sys.prefix)) + + +def dist_is_local(dist): + # type: (Distribution) -> bool + """ + Return True if given Distribution object is installed locally + (i.e. within current virtualenv). + + Always True if we're not in a virtualenv. + + """ + return is_local(dist_location(dist)) + + +def dist_in_usersite(dist): + # type: (Distribution) -> bool + """ + Return True if given Distribution is installed in user site. + """ + norm_path = normalize_path(dist_location(dist)) + return norm_path.startswith(normalize_path(user_site)) + + +def dist_in_site_packages(dist): + # type: (Distribution) -> bool + """ + Return True if given Distribution is installed in + sysconfig.get_python_lib(). + """ + return normalize_path( + dist_location(dist) + ).startswith(normalize_path(site_packages)) + + +def dist_is_editable(dist): + # type: (Distribution) -> bool + """ + Return True if given Distribution is an editable install. + """ + for path_item in sys.path: + egg_link = os.path.join(path_item, dist.project_name + '.egg-link') + if os.path.isfile(egg_link): + return True + return False + + +def get_installed_distributions(local_only=True, + skip=stdlib_pkgs, + include_editables=True, + editables_only=False, + user_only=False): + # type: (bool, Container[str], bool, bool, bool) -> List[Distribution] + """ + Return a list of installed Distribution objects. + + If ``local_only`` is True (default), only return installations + local to the current virtualenv, if in a virtualenv. + + ``skip`` argument is an iterable of lower-case project names to + ignore; defaults to stdlib_pkgs + + If ``include_editables`` is False, don't report editables. + + If ``editables_only`` is True , only report editables. + + If ``user_only`` is True , only report installations in the user + site directory. + + """ + if local_only: + local_test = dist_is_local + else: + def local_test(d): + return True + + if include_editables: + def editable_test(d): + return True + else: + def editable_test(d): + return not dist_is_editable(d) + + if editables_only: + def editables_only_test(d): + return dist_is_editable(d) + else: + def editables_only_test(d): + return True + + if user_only: + user_test = dist_in_usersite + else: + def user_test(d): + return True + + # because of pkg_resources vendoring, mypy cannot find stub in typeshed + return [d for d in pkg_resources.working_set # type: ignore + if local_test(d) and + d.key not in skip and + editable_test(d) and + editables_only_test(d) and + user_test(d) + ] + + +def egg_link_path(dist): + # type: (Distribution) -> Optional[str] + """ + Return the path for the .egg-link file if it exists, otherwise, None. + + There's 3 scenarios: + 1) not in a virtualenv + try to find in site.USER_SITE, then site_packages + 2) in a no-global virtualenv + try to find in site_packages + 3) in a yes-global virtualenv + try to find in site_packages, then site.USER_SITE + (don't look in global location) + + For #1 and #3, there could be odd cases, where there's an egg-link in 2 + locations. + + This method will just return the first one found. + """ + sites = [] + if running_under_virtualenv(): + if virtualenv_no_global(): + sites.append(site_packages) + else: + sites.append(site_packages) + if user_site: + sites.append(user_site) + else: + if user_site: + sites.append(user_site) + sites.append(site_packages) + + for site in sites: + egglink = os.path.join(site, dist.project_name) + '.egg-link' + if os.path.isfile(egglink): + return egglink + return None + + +def dist_location(dist): + # type: (Distribution) -> str + """ + Get the site-packages location of this distribution. Generally + this is dist.location, except in the case of develop-installed + packages, where dist.location is the source code location, and we + want to know where the egg-link file is. + + """ + egg_link = egg_link_path(dist) + if egg_link: + return egg_link + return dist.location + + +def current_umask(): + """Get the current umask which involves having to set it temporarily.""" + mask = os.umask(0) + os.umask(mask) + return mask + + +def unzip_file(filename, location, flatten=True): + # type: (str, str, bool) -> None + """ + Unzip the file (with path `filename`) to the destination `location`. All + files are written based on system defaults and umask (i.e. permissions are + not preserved), except that regular file members with any execute + permissions (user, group, or world) have "chmod +x" applied after being + written. Note that for windows, any execute changes using os.chmod are + no-ops per the python docs. + """ + ensure_dir(location) + zipfp = open(filename, 'rb') + try: + zip = zipfile.ZipFile(zipfp, allowZip64=True) + leading = has_leading_dir(zip.namelist()) and flatten + for info in zip.infolist(): + name = info.filename + fn = name + if leading: + fn = split_leading_dir(name)[1] + fn = os.path.join(location, fn) + dir = os.path.dirname(fn) + if fn.endswith('/') or fn.endswith('\\'): + # A directory + ensure_dir(fn) + else: + ensure_dir(dir) + # Don't use read() to avoid allocating an arbitrarily large + # chunk of memory for the file's content + fp = zip.open(name) + try: + with open(fn, 'wb') as destfp: + shutil.copyfileobj(fp, destfp) + finally: + fp.close() + mode = info.external_attr >> 16 + # if mode and regular file and any execute permissions for + # user/group/world? + if mode and stat.S_ISREG(mode) and mode & 0o111: + # make dest file have execute for user/group/world + # (chmod +x) no-op on windows per python docs + os.chmod(fn, (0o777 - current_umask() | 0o111)) + finally: + zipfp.close() + + +def untar_file(filename, location): + # type: (str, str) -> None + """ + Untar the file (with path `filename`) to the destination `location`. + All files are written based on system defaults and umask (i.e. permissions + are not preserved), except that regular file members with any execute + permissions (user, group, or world) have "chmod +x" applied after being + written. Note that for windows, any execute changes using os.chmod are + no-ops per the python docs. + """ + ensure_dir(location) + if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'): + mode = 'r:gz' + elif filename.lower().endswith(BZ2_EXTENSIONS): + mode = 'r:bz2' + elif filename.lower().endswith(XZ_EXTENSIONS): + mode = 'r:xz' + elif filename.lower().endswith('.tar'): + mode = 'r' + else: + logger.warning( + 'Cannot determine compression type for file %s', filename, + ) + mode = 'r:*' + tar = tarfile.open(filename, mode) + try: + leading = has_leading_dir([ + member.name for member in tar.getmembers() + ]) + for member in tar.getmembers(): + fn = member.name + if leading: + # https://github.com/python/mypy/issues/1174 + fn = split_leading_dir(fn)[1] # type: ignore + path = os.path.join(location, fn) + if member.isdir(): + ensure_dir(path) + elif member.issym(): + try: + # https://github.com/python/typeshed/issues/2673 + tar._extract_member(member, path) # type: ignore + except Exception as exc: + # Some corrupt tar files seem to produce this + # (specifically bad symlinks) + logger.warning( + 'In the tar file %s the member %s is invalid: %s', + filename, member.name, exc, + ) + continue + else: + try: + fp = tar.extractfile(member) + except (KeyError, AttributeError) as exc: + # Some corrupt tar files seem to produce this + # (specifically bad symlinks) + logger.warning( + 'In the tar file %s the member %s is invalid: %s', + filename, member.name, exc, + ) + continue + ensure_dir(os.path.dirname(path)) + with open(path, 'wb') as destfp: + shutil.copyfileobj(fp, destfp) + fp.close() + # Update the timestamp (useful for cython compiled files) + # https://github.com/python/typeshed/issues/2673 + tar.utime(member, path) # type: ignore + # member have any execute permissions for user/group/world? + if member.mode & 0o111: + # make dest file have execute for user/group/world + # no-op on windows per python docs + os.chmod(path, (0o777 - current_umask() | 0o111)) + finally: + tar.close() + + +def unpack_file( + filename, # type: str + location, # type: str + content_type, # type: Optional[str] + link # type: Optional[Link] +): + # type: (...) -> None + filename = os.path.realpath(filename) + if (content_type == 'application/zip' or + filename.lower().endswith(ZIP_EXTENSIONS) or + zipfile.is_zipfile(filename)): + unzip_file( + filename, + location, + flatten=not filename.endswith('.whl') + ) + elif (content_type == 'application/x-gzip' or + tarfile.is_tarfile(filename) or + filename.lower().endswith( + TAR_EXTENSIONS + BZ2_EXTENSIONS + XZ_EXTENSIONS)): + untar_file(filename, location) + elif (content_type and content_type.startswith('text/html') and + is_svn_page(file_contents(filename))): + # We don't really care about this + from pip._internal.vcs.subversion import Subversion + Subversion('svn+' + link.url).unpack(location) + else: + # FIXME: handle? + # FIXME: magic signatures? + logger.critical( + 'Cannot unpack file %s (downloaded from %s, content-type: %s); ' + 'cannot detect archive format', + filename, location, content_type, + ) + raise InstallationError( + 'Cannot determine archive format of %s' % location + ) + + +def call_subprocess( + cmd, # type: List[str] + show_stdout=True, # type: bool + cwd=None, # type: Optional[str] + on_returncode='raise', # type: str + extra_ok_returncodes=None, # type: Optional[Iterable[int]] + command_desc=None, # type: Optional[str] + extra_environ=None, # type: Optional[Mapping[str, Any]] + unset_environ=None, # type: Optional[Iterable[str]] + spinner=None # type: Optional[SpinnerInterface] +): + # type: (...) -> Optional[Text] + """ + Args: + extra_ok_returncodes: an iterable of integer return codes that are + acceptable, in addition to 0. Defaults to None, which means []. + unset_environ: an iterable of environment variable names to unset + prior to calling subprocess.Popen(). + """ + if extra_ok_returncodes is None: + extra_ok_returncodes = [] + if unset_environ is None: + unset_environ = [] + # This function's handling of subprocess output is confusing and I + # previously broke it terribly, so as penance I will write a long comment + # explaining things. + # + # The obvious thing that affects output is the show_stdout= + # kwarg. show_stdout=True means, let the subprocess write directly to our + # stdout. Even though it is nominally the default, it is almost never used + # inside pip (and should not be used in new code without a very good + # reason); as of 2016-02-22 it is only used in a few places inside the VCS + # wrapper code. Ideally we should get rid of it entirely, because it + # creates a lot of complexity here for a rarely used feature. + # + # Most places in pip set show_stdout=False. What this means is: + # - We connect the child stdout to a pipe, which we read. + # - By default, we hide the output but show a spinner -- unless the + # subprocess exits with an error, in which case we show the output. + # - If the --verbose option was passed (= loglevel is DEBUG), then we show + # the output unconditionally. (But in this case we don't want to show + # the output a second time if it turns out that there was an error.) + # + # stderr is always merged with stdout (even if show_stdout=True). + if show_stdout: + stdout = None + else: + stdout = subprocess.PIPE + if command_desc is None: + cmd_parts = [] + for part in cmd: + if ' ' in part or '\n' in part or '"' in part or "'" in part: + part = '"%s"' % part.replace('"', '\\"') + cmd_parts.append(part) + command_desc = ' '.join(cmd_parts) + logger.debug("Running command %s", command_desc) + env = os.environ.copy() + if extra_environ: + env.update(extra_environ) + for name in unset_environ: + env.pop(name, None) + try: + proc = subprocess.Popen( + cmd, stderr=subprocess.STDOUT, stdin=subprocess.PIPE, + stdout=stdout, cwd=cwd, env=env, + ) + proc.stdin.close() + except Exception as exc: + logger.critical( + "Error %s while executing command %s", exc, command_desc, + ) + raise + all_output = [] + if stdout is not None: + while True: + line = console_to_str(proc.stdout.readline()) + if not line: + break + line = line.rstrip() + all_output.append(line + '\n') + if logger.getEffectiveLevel() <= std_logging.DEBUG: + # Show the line immediately + logger.debug(line) + else: + # Update the spinner + if spinner is not None: + spinner.spin() + try: + proc.wait() + finally: + if proc.stdout: + proc.stdout.close() + if spinner is not None: + if proc.returncode: + spinner.finish("error") + else: + spinner.finish("done") + if proc.returncode and proc.returncode not in extra_ok_returncodes: + if on_returncode == 'raise': + if (logger.getEffectiveLevel() > std_logging.DEBUG and + not show_stdout): + logger.info( + 'Complete output from command %s:', command_desc, + ) + logger.info( + ''.join(all_output) + + '\n----------------------------------------' + ) + raise InstallationError( + 'Command "%s" failed with error code %s in %s' + % (command_desc, proc.returncode, cwd)) + elif on_returncode == 'warn': + logger.warning( + 'Command "%s" had error code %s in %s', + command_desc, proc.returncode, cwd, + ) + elif on_returncode == 'ignore': + pass + else: + raise ValueError('Invalid value: on_returncode=%s' % + repr(on_returncode)) + if not show_stdout: + return ''.join(all_output) + return None + + +def read_text_file(filename): + # type: (str) -> str + """Return the contents of *filename*. + + Try to decode the file contents with utf-8, the preferred system encoding + (e.g., cp1252 on some Windows machines), and latin1, in that order. + Decoding a byte string with latin1 will never raise an error. In the worst + case, the returned string will contain some garbage characters. + + """ + with open(filename, 'rb') as fp: + data = fp.read() + + encodings = ['utf-8', locale.getpreferredencoding(False), 'latin1'] + for enc in encodings: + try: + # https://github.com/python/mypy/issues/1174 + data = data.decode(enc) # type: ignore + except UnicodeDecodeError: + continue + break + + assert not isinstance(data, bytes) # Latin1 should have worked. + return data + + +def _make_build_dir(build_dir): + os.makedirs(build_dir) + write_delete_marker_file(build_dir) + + +class FakeFile(object): + """Wrap a list of lines in an object with readline() to make + ConfigParser happy.""" + def __init__(self, lines): + self._gen = (l for l in lines) + + def readline(self): + try: + try: + return next(self._gen) + except NameError: + return self._gen.next() + except StopIteration: + return '' + + def __iter__(self): + return self._gen + + +class StreamWrapper(StringIO): + + @classmethod + def from_stream(cls, orig_stream): + cls.orig_stream = orig_stream + return cls() + + # compileall.compile_dir() needs stdout.encoding to print to stdout + @property + def encoding(self): + return self.orig_stream.encoding + + +@contextlib.contextmanager +def captured_output(stream_name): + """Return a context manager used by captured_stdout/stdin/stderr + that temporarily replaces the sys stream *stream_name* with a StringIO. + + Taken from Lib/support/__init__.py in the CPython repo. + """ + orig_stdout = getattr(sys, stream_name) + setattr(sys, stream_name, StreamWrapper.from_stream(orig_stdout)) + try: + yield getattr(sys, stream_name) + finally: + setattr(sys, stream_name, orig_stdout) + + +def captured_stdout(): + """Capture the output of sys.stdout: + + with captured_stdout() as stdout: + print('hello') + self.assertEqual(stdout.getvalue(), 'hello\n') + + Taken from Lib/support/__init__.py in the CPython repo. + """ + return captured_output('stdout') + + +def captured_stderr(): + """ + See captured_stdout(). + """ + return captured_output('stderr') + + +class cached_property(object): + """A property that is only computed once per instance and then replaces + itself with an ordinary attribute. Deleting the attribute resets the + property. + + Source: https://github.com/bottlepy/bottle/blob/0.11.5/bottle.py#L175 + """ + + def __init__(self, func): + self.__doc__ = getattr(func, '__doc__') + self.func = func + + def __get__(self, obj, cls): + if obj is None: + # We're being accessed from the class itself, not from an object + return self + value = obj.__dict__[self.func.__name__] = self.func(obj) + return value + + +def get_installed_version(dist_name, working_set=None): + """Get the installed version of dist_name avoiding pkg_resources cache""" + # Create a requirement that we'll look for inside of setuptools. + req = pkg_resources.Requirement.parse(dist_name) + + if working_set is None: + # We want to avoid having this cached, so we need to construct a new + # working set each time. + working_set = pkg_resources.WorkingSet() + + # Get the installed distribution from our working set + dist = working_set.find(req) + + # Check to see if we got an installed distribution or not, if we did + # we want to return it's version. + return dist.version if dist else None + + +def consume(iterator): + """Consume an iterable at C speed.""" + deque(iterator, maxlen=0) + + +# Simulates an enum +def enum(*sequential, **named): + enums = dict(zip(sequential, range(len(sequential))), **named) + reverse = {value: key for key, value in enums.items()} + enums['reverse_mapping'] = reverse + return type('Enum', (), enums) + + +def make_vcs_requirement_url(repo_url, rev, project_name, subdir=None): + """ + Return the URL for a VCS requirement. + + Args: + repo_url: the remote VCS url, with any needed VCS prefix (e.g. "git+"). + project_name: the (unescaped) project name. + """ + egg_project_name = pkg_resources.to_filename(project_name) + req = '{}@{}#egg={}'.format(repo_url, rev, egg_project_name) + if subdir: + req += '&subdirectory={}'.format(subdir) + + return req + + +def split_auth_from_netloc(netloc): + """ + Parse out and remove the auth information from a netloc. + + Returns: (netloc, (username, password)). + """ + if '@' not in netloc: + return netloc, (None, None) + + # Split from the right because that's how urllib.parse.urlsplit() + # behaves if more than one @ is present (which can be checked using + # the password attribute of urlsplit()'s return value). + auth, netloc = netloc.rsplit('@', 1) + if ':' in auth: + # Split from the left because that's how urllib.parse.urlsplit() + # behaves if more than one : is present (which again can be checked + # using the password attribute of the return value) + user_pass = auth.split(':', 1) + else: + user_pass = auth, None + + user_pass = tuple( + None if x is None else urllib_unquote(x) for x in user_pass + ) + + return netloc, user_pass + + +def redact_netloc(netloc): + # type: (str) -> str + """ + Replace the password in a netloc with "****", if it exists. + + For example, "user:pass@example.com" returns "user:****@example.com". + """ + netloc, (user, password) = split_auth_from_netloc(netloc) + if user is None: + return netloc + password = '' if password is None else ':****' + return '{user}{password}@{netloc}'.format(user=urllib_parse.quote(user), + password=password, + netloc=netloc) + + +def _transform_url(url, transform_netloc): + purl = urllib_parse.urlsplit(url) + netloc = transform_netloc(purl.netloc) + # stripped url + url_pieces = ( + purl.scheme, netloc, purl.path, purl.query, purl.fragment + ) + surl = urllib_parse.urlunsplit(url_pieces) + return surl + + +def _get_netloc(netloc): + return split_auth_from_netloc(netloc)[0] + + +def remove_auth_from_url(url): + # type: (str) -> str + # Return a copy of url with 'username:password@' removed. + # username/pass params are passed to subversion through flags + # and are not recognized in the url. + return _transform_url(url, _get_netloc) + + +def redact_password_from_url(url): + # type: (str) -> str + """Replace the password in a given url with ****.""" + return _transform_url(url, redact_netloc) + + +def protect_pip_from_modification_on_windows(modifying_pip): + """Protection of pip.exe from modification on Windows + + On Windows, any operation modifying pip should be run as: + python -m pip ... + """ + pip_names = [ + "pip.exe", + "pip{}.exe".format(sys.version_info[0]), + "pip{}.{}.exe".format(*sys.version_info[:2]) + ] + + # See https://github.com/pypa/pip/issues/1299 for more discussion + should_show_use_python_msg = ( + modifying_pip and + WINDOWS and + os.path.basename(sys.argv[0]) in pip_names + ) + + if should_show_use_python_msg: + new_command = [ + sys.executable, "-m", "pip" + ] + sys.argv[1:] + raise CommandError( + 'To modify pip, please run the following command:\n{}' + .format(" ".join(new_command)) + ) diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/utils/misc.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/misc.pyc new file mode 100644 index 0000000..2dd6ca7 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/misc.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/utils/models.py b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/models.py new file mode 100644 index 0000000..d5cb80a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/models.py @@ -0,0 +1,40 @@ +"""Utilities for defining models +""" + +import operator + + +class KeyBasedCompareMixin(object): + """Provides comparision capabilities that is based on a key + """ + + def __init__(self, key, defining_class): + self._compare_key = key + self._defining_class = defining_class + + def __hash__(self): + return hash(self._compare_key) + + def __lt__(self, other): + return self._compare(other, operator.__lt__) + + def __le__(self, other): + return self._compare(other, operator.__le__) + + def __gt__(self, other): + return self._compare(other, operator.__gt__) + + def __ge__(self, other): + return self._compare(other, operator.__ge__) + + def __eq__(self, other): + return self._compare(other, operator.__eq__) + + def __ne__(self, other): + return self._compare(other, operator.__ne__) + + def _compare(self, other, method): + if not isinstance(other, self._defining_class): + return NotImplemented + + return method(self._compare_key, other._compare_key) diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/utils/models.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/models.pyc new file mode 100644 index 0000000..d0d5121 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/models.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/utils/outdated.py b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/outdated.py new file mode 100644 index 0000000..37c47a4 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/outdated.py @@ -0,0 +1,164 @@ +from __future__ import absolute_import + +import datetime +import json +import logging +import os.path +import sys + +from pip._vendor import lockfile, pkg_resources +from pip._vendor.packaging import version as packaging_version + +from pip._internal.index import PackageFinder +from pip._internal.utils.compat import WINDOWS +from pip._internal.utils.filesystem import check_path_owner +from pip._internal.utils.misc import ensure_dir, get_installed_version +from pip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + import optparse # noqa: F401 + from typing import Any, Dict # noqa: F401 + from pip._internal.download import PipSession # noqa: F401 + + +SELFCHECK_DATE_FMT = "%Y-%m-%dT%H:%M:%SZ" + + +logger = logging.getLogger(__name__) + + +class SelfCheckState(object): + def __init__(self, cache_dir): + # type: (str) -> None + self.state = {} # type: Dict[str, Any] + self.statefile_path = None + + # Try to load the existing state + if cache_dir: + self.statefile_path = os.path.join(cache_dir, "selfcheck.json") + try: + with open(self.statefile_path) as statefile: + self.state = json.load(statefile)[sys.prefix] + except (IOError, ValueError, KeyError): + # Explicitly suppressing exceptions, since we don't want to + # error out if the cache file is invalid. + pass + + def save(self, pypi_version, current_time): + # type: (str, datetime.datetime) -> None + # If we do not have a path to cache in, don't bother saving. + if not self.statefile_path: + return + + # Check to make sure that we own the directory + if not check_path_owner(os.path.dirname(self.statefile_path)): + return + + # Now that we've ensured the directory is owned by this user, we'll go + # ahead and make sure that all our directories are created. + ensure_dir(os.path.dirname(self.statefile_path)) + + # Attempt to write out our version check file + with lockfile.LockFile(self.statefile_path): + if os.path.exists(self.statefile_path): + with open(self.statefile_path) as statefile: + state = json.load(statefile) + else: + state = {} + + state[sys.prefix] = { + "last_check": current_time.strftime(SELFCHECK_DATE_FMT), + "pypi_version": pypi_version, + } + + with open(self.statefile_path, "w") as statefile: + json.dump(state, statefile, sort_keys=True, + separators=(",", ":")) + + +def was_installed_by_pip(pkg): + # type: (str) -> bool + """Checks whether pkg was installed by pip + + This is used not to display the upgrade message when pip is in fact + installed by system package manager, such as dnf on Fedora. + """ + try: + dist = pkg_resources.get_distribution(pkg) + return (dist.has_metadata('INSTALLER') and + 'pip' in dist.get_metadata_lines('INSTALLER')) + except pkg_resources.DistributionNotFound: + return False + + +def pip_version_check(session, options): + # type: (PipSession, optparse.Values) -> None + """Check for an update for pip. + + Limit the frequency of checks to once per week. State is stored either in + the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix + of the pip script path. + """ + installed_version = get_installed_version("pip") + if not installed_version: + return + + pip_version = packaging_version.parse(installed_version) + pypi_version = None + + try: + state = SelfCheckState(cache_dir=options.cache_dir) + + current_time = datetime.datetime.utcnow() + # Determine if we need to refresh the state + if "last_check" in state.state and "pypi_version" in state.state: + last_check = datetime.datetime.strptime( + state.state["last_check"], + SELFCHECK_DATE_FMT + ) + if (current_time - last_check).total_seconds() < 7 * 24 * 60 * 60: + pypi_version = state.state["pypi_version"] + + # Refresh the version if we need to or just see if we need to warn + if pypi_version is None: + # Lets use PackageFinder to see what the latest pip version is + finder = PackageFinder( + find_links=options.find_links, + index_urls=[options.index_url] + options.extra_index_urls, + allow_all_prereleases=False, # Explicitly set to False + trusted_hosts=options.trusted_hosts, + session=session, + ) + all_candidates = finder.find_all_candidates("pip") + if not all_candidates: + return + pypi_version = str( + max(all_candidates, key=lambda c: c.version).version + ) + + # save that we've performed a check + state.save(pypi_version, current_time) + + remote_version = packaging_version.parse(pypi_version) + + # Determine if our pypi_version is older + if (pip_version < remote_version and + pip_version.base_version != remote_version.base_version and + was_installed_by_pip('pip')): + # Advise "python -m pip" on Windows to avoid issues + # with overwriting pip.exe. + if WINDOWS: + pip_cmd = "python -m pip" + else: + pip_cmd = "pip" + logger.warning( + "You are using pip version %s, however version %s is " + "available.\nYou should consider upgrading via the " + "'%s install --upgrade pip' command.", + pip_version, pypi_version, pip_cmd + ) + except Exception: + logger.debug( + "There was an error checking the latest version of pip", + exc_info=True, + ) diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/utils/outdated.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/outdated.pyc new file mode 100644 index 0000000..9d3eace Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/outdated.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/utils/packaging.py b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/packaging.py new file mode 100644 index 0000000..7aaf7b5 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/packaging.py @@ -0,0 +1,85 @@ +from __future__ import absolute_import + +import logging +import sys +from email.parser import FeedParser + +from pip._vendor import pkg_resources +from pip._vendor.packaging import specifiers, version + +from pip._internal import exceptions +from pip._internal.utils.misc import display_path +from pip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + from typing import Optional # noqa: F401 + from email.message import Message # noqa: F401 + from pip._vendor.pkg_resources import Distribution # noqa: F401 + + +logger = logging.getLogger(__name__) + + +def check_requires_python(requires_python): + # type: (Optional[str]) -> bool + """ + Check if the python version in use match the `requires_python` specifier. + + Returns `True` if the version of python in use matches the requirement. + Returns `False` if the version of python in use does not matches the + requirement. + + Raises an InvalidSpecifier if `requires_python` have an invalid format. + """ + if requires_python is None: + # The package provides no information + return True + requires_python_specifier = specifiers.SpecifierSet(requires_python) + + # We only use major.minor.micro + python_version = version.parse('.'.join(map(str, sys.version_info[:3]))) + return python_version in requires_python_specifier + + +def get_metadata(dist): + # type: (Distribution) -> Message + if (isinstance(dist, pkg_resources.DistInfoDistribution) and + dist.has_metadata('METADATA')): + metadata = dist.get_metadata('METADATA') + elif dist.has_metadata('PKG-INFO'): + metadata = dist.get_metadata('PKG-INFO') + else: + logger.warning("No metadata found in %s", display_path(dist.location)) + metadata = '' + + feed_parser = FeedParser() + feed_parser.feed(metadata) + return feed_parser.close() + + +def check_dist_requires_python(dist): + pkg_info_dict = get_metadata(dist) + requires_python = pkg_info_dict.get('Requires-Python') + try: + if not check_requires_python(requires_python): + raise exceptions.UnsupportedPythonVersion( + "%s requires Python '%s' but the running Python is %s" % ( + dist.project_name, + requires_python, + '.'.join(map(str, sys.version_info[:3])),) + ) + except specifiers.InvalidSpecifier as e: + logger.warning( + "Package %s has an invalid Requires-Python entry %s - %s", + dist.project_name, requires_python, e, + ) + return + + +def get_installer(dist): + # type: (Distribution) -> str + if dist.has_metadata('INSTALLER'): + for line in dist.get_metadata_lines('INSTALLER'): + if line.strip(): + return line.strip() + return '' diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/utils/packaging.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/packaging.pyc new file mode 100644 index 0000000..2a1e28e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/packaging.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/utils/setuptools_build.py b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/setuptools_build.py new file mode 100644 index 0000000..03973e9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/setuptools_build.py @@ -0,0 +1,8 @@ +# Shim to wrap setup.py invocation with setuptools +SETUPTOOLS_SHIM = ( + "import setuptools, tokenize;__file__=%r;" + "f=getattr(tokenize, 'open', open)(__file__);" + "code=f.read().replace('\\r\\n', '\\n');" + "f.close();" + "exec(compile(code, __file__, 'exec'))" +) diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/utils/setuptools_build.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/setuptools_build.pyc new file mode 100644 index 0000000..ab41d93 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/setuptools_build.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/utils/temp_dir.py b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/temp_dir.py new file mode 100644 index 0000000..2c81ad5 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/temp_dir.py @@ -0,0 +1,155 @@ +from __future__ import absolute_import + +import errno +import itertools +import logging +import os.path +import tempfile + +from pip._internal.utils.misc import rmtree + +logger = logging.getLogger(__name__) + + +class TempDirectory(object): + """Helper class that owns and cleans up a temporary directory. + + This class can be used as a context manager or as an OO representation of a + temporary directory. + + Attributes: + path + Location to the created temporary directory or None + delete + Whether the directory should be deleted when exiting + (when used as a contextmanager) + + Methods: + create() + Creates a temporary directory and stores its path in the path + attribute. + cleanup() + Deletes the temporary directory and sets path attribute to None + + When used as a context manager, a temporary directory is created on + entering the context and, if the delete attribute is True, on exiting the + context the created directory is deleted. + """ + + def __init__(self, path=None, delete=None, kind="temp"): + super(TempDirectory, self).__init__() + + if path is None and delete is None: + # If we were not given an explicit directory, and we were not given + # an explicit delete option, then we'll default to deleting. + delete = True + + self.path = path + self.delete = delete + self.kind = kind + + def __repr__(self): + return "<{} {!r}>".format(self.__class__.__name__, self.path) + + def __enter__(self): + self.create() + return self + + def __exit__(self, exc, value, tb): + if self.delete: + self.cleanup() + + def create(self): + """Create a temporary directory and store its path in self.path + """ + if self.path is not None: + logger.debug( + "Skipped creation of temporary directory: {}".format(self.path) + ) + return + # We realpath here because some systems have their default tmpdir + # symlinked to another directory. This tends to confuse build + # scripts, so we canonicalize the path by traversing potential + # symlinks here. + self.path = os.path.realpath( + tempfile.mkdtemp(prefix="pip-{}-".format(self.kind)) + ) + logger.debug("Created temporary directory: {}".format(self.path)) + + def cleanup(self): + """Remove the temporary directory created and reset state + """ + if self.path is not None and os.path.exists(self.path): + rmtree(self.path) + self.path = None + + +class AdjacentTempDirectory(TempDirectory): + """Helper class that creates a temporary directory adjacent to a real one. + + Attributes: + original + The original directory to create a temp directory for. + path + After calling create() or entering, contains the full + path to the temporary directory. + delete + Whether the directory should be deleted when exiting + (when used as a contextmanager) + + """ + # The characters that may be used to name the temp directory + # We always prepend a ~ and then rotate through these until + # a usable name is found. + # pkg_resources raises a different error for .dist-info folder + # with leading '-' and invalid metadata + LEADING_CHARS = "-~.=%0123456789" + + def __init__(self, original, delete=None): + super(AdjacentTempDirectory, self).__init__(delete=delete) + self.original = original.rstrip('/\\') + + @classmethod + def _generate_names(cls, name): + """Generates a series of temporary names. + + The algorithm replaces the leading characters in the name + with ones that are valid filesystem characters, but are not + valid package names (for both Python and pip definitions of + package). + """ + for i in range(1, len(name)): + for candidate in itertools.combinations_with_replacement( + cls.LEADING_CHARS, i - 1): + new_name = '~' + ''.join(candidate) + name[i:] + if new_name != name: + yield new_name + + # If we make it this far, we will have to make a longer name + for i in range(len(cls.LEADING_CHARS)): + for candidate in itertools.combinations_with_replacement( + cls.LEADING_CHARS, i): + new_name = '~' + ''.join(candidate) + name + if new_name != name: + yield new_name + + def create(self): + root, name = os.path.split(self.original) + for candidate in self._generate_names(name): + path = os.path.join(root, candidate) + try: + os.mkdir(path) + except OSError as ex: + # Continue if the name exists already + if ex.errno != errno.EEXIST: + raise + else: + self.path = os.path.realpath(path) + break + + if not self.path: + # Final fallback on the default behavior. + self.path = os.path.realpath( + tempfile.mkdtemp(prefix="pip-{}-".format(self.kind)) + ) + logger.debug("Created temporary directory: {}".format(self.path)) diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/utils/temp_dir.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/temp_dir.pyc new file mode 100644 index 0000000..3520bdd Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/temp_dir.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/utils/typing.py b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/typing.py new file mode 100644 index 0000000..e085cdf --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/typing.py @@ -0,0 +1,29 @@ +"""For neatly implementing static typing in pip. + +`mypy` - the static type analysis tool we use - uses the `typing` module, which +provides core functionality fundamental to mypy's functioning. + +Generally, `typing` would be imported at runtime and used in that fashion - +it acts as a no-op at runtime and does not have any run-time overhead by +design. + +As it turns out, `typing` is not vendorable - it uses separate sources for +Python 2/Python 3. Thus, this codebase can not expect it to be present. +To work around this, mypy allows the typing import to be behind a False-y +optional to prevent it from running at runtime and type-comments can be used +to remove the need for the types to be accessible directly during runtime. + +This module provides the False-y guard in a nicely named fashion so that a +curious maintainer can reach here to read this. + +In pip, all static-typing related imports should be guarded as follows: + + from pip._internal.utils.typing import MYPY_CHECK_RUNNING + + if MYPY_CHECK_RUNNING: + from typing import ... # noqa: F401 + +Ref: https://github.com/python/mypy/issues/3216 +""" + +MYPY_CHECK_RUNNING = False diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/utils/typing.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/typing.pyc new file mode 100644 index 0000000..da6d9b0 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/typing.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/utils/ui.py b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/ui.py new file mode 100644 index 0000000..433675d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/ui.py @@ -0,0 +1,441 @@ +from __future__ import absolute_import, division + +import contextlib +import itertools +import logging +import sys +import time +from signal import SIGINT, default_int_handler, signal + +from pip._vendor import six +from pip._vendor.progress.bar import ( + Bar, ChargingBar, FillingCirclesBar, FillingSquaresBar, IncrementalBar, + ShadyBar, +) +from pip._vendor.progress.helpers import HIDE_CURSOR, SHOW_CURSOR, WritelnMixin +from pip._vendor.progress.spinner import Spinner + +from pip._internal.utils.compat import WINDOWS +from pip._internal.utils.logging import get_indentation +from pip._internal.utils.misc import format_size +from pip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + from typing import Any, Iterator, IO # noqa: F401 + +try: + from pip._vendor import colorama +# Lots of different errors can come from this, including SystemError and +# ImportError. +except Exception: + colorama = None + +logger = logging.getLogger(__name__) + + +def _select_progress_class(preferred, fallback): + encoding = getattr(preferred.file, "encoding", None) + + # If we don't know what encoding this file is in, then we'll just assume + # that it doesn't support unicode and use the ASCII bar. + if not encoding: + return fallback + + # Collect all of the possible characters we want to use with the preferred + # bar. + characters = [ + getattr(preferred, "empty_fill", six.text_type()), + getattr(preferred, "fill", six.text_type()), + ] + characters += list(getattr(preferred, "phases", [])) + + # Try to decode the characters we're using for the bar using the encoding + # of the given file, if this works then we'll assume that we can use the + # fancier bar and if not we'll fall back to the plaintext bar. + try: + six.text_type().join(characters).encode(encoding) + except UnicodeEncodeError: + return fallback + else: + return preferred + + +_BaseBar = _select_progress_class(IncrementalBar, Bar) # type: Any + + +class InterruptibleMixin(object): + """ + Helper to ensure that self.finish() gets called on keyboard interrupt. + + This allows downloads to be interrupted without leaving temporary state + (like hidden cursors) behind. + + This class is similar to the progress library's existing SigIntMixin + helper, but as of version 1.2, that helper has the following problems: + + 1. It calls sys.exit(). + 2. It discards the existing SIGINT handler completely. + 3. It leaves its own handler in place even after an uninterrupted finish, + which will have unexpected delayed effects if the user triggers an + unrelated keyboard interrupt some time after a progress-displaying + download has already completed, for example. + """ + + def __init__(self, *args, **kwargs): + """ + Save the original SIGINT handler for later. + """ + super(InterruptibleMixin, self).__init__(*args, **kwargs) + + self.original_handler = signal(SIGINT, self.handle_sigint) + + # If signal() returns None, the previous handler was not installed from + # Python, and we cannot restore it. This probably should not happen, + # but if it does, we must restore something sensible instead, at least. + # The least bad option should be Python's default SIGINT handler, which + # just raises KeyboardInterrupt. + if self.original_handler is None: + self.original_handler = default_int_handler + + def finish(self): + """ + Restore the original SIGINT handler after finishing. + + This should happen regardless of whether the progress display finishes + normally, or gets interrupted. + """ + super(InterruptibleMixin, self).finish() + signal(SIGINT, self.original_handler) + + def handle_sigint(self, signum, frame): + """ + Call self.finish() before delegating to the original SIGINT handler. + + This handler should only be in place while the progress display is + active. + """ + self.finish() + self.original_handler(signum, frame) + + +class SilentBar(Bar): + + def update(self): + pass + + +class BlueEmojiBar(IncrementalBar): + + suffix = "%(percent)d%%" + bar_prefix = " " + bar_suffix = " " + phases = (u"\U0001F539", u"\U0001F537", u"\U0001F535") # type: Any + + +class DownloadProgressMixin(object): + + def __init__(self, *args, **kwargs): + super(DownloadProgressMixin, self).__init__(*args, **kwargs) + self.message = (" " * (get_indentation() + 2)) + self.message + + @property + def downloaded(self): + return format_size(self.index) + + @property + def download_speed(self): + # Avoid zero division errors... + if self.avg == 0.0: + return "..." + return format_size(1 / self.avg) + "/s" + + @property + def pretty_eta(self): + if self.eta: + return "eta %s" % self.eta_td + return "" + + def iter(self, it, n=1): + for x in it: + yield x + self.next(n) + self.finish() + + +class WindowsMixin(object): + + def __init__(self, *args, **kwargs): + # The Windows terminal does not support the hide/show cursor ANSI codes + # even with colorama. So we'll ensure that hide_cursor is False on + # Windows. + # This call neds to go before the super() call, so that hide_cursor + # is set in time. The base progress bar class writes the "hide cursor" + # code to the terminal in its init, so if we don't set this soon + # enough, we get a "hide" with no corresponding "show"... + if WINDOWS and self.hide_cursor: + self.hide_cursor = False + + super(WindowsMixin, self).__init__(*args, **kwargs) + + # Check if we are running on Windows and we have the colorama module, + # if we do then wrap our file with it. + if WINDOWS and colorama: + self.file = colorama.AnsiToWin32(self.file) + # The progress code expects to be able to call self.file.isatty() + # but the colorama.AnsiToWin32() object doesn't have that, so we'll + # add it. + self.file.isatty = lambda: self.file.wrapped.isatty() + # The progress code expects to be able to call self.file.flush() + # but the colorama.AnsiToWin32() object doesn't have that, so we'll + # add it. + self.file.flush = lambda: self.file.wrapped.flush() + + +class BaseDownloadProgressBar(WindowsMixin, InterruptibleMixin, + DownloadProgressMixin): + + file = sys.stdout + message = "%(percent)d%%" + suffix = "%(downloaded)s %(download_speed)s %(pretty_eta)s" + +# NOTE: The "type: ignore" comments on the following classes are there to +# work around https://github.com/python/typing/issues/241 + + +class DefaultDownloadProgressBar(BaseDownloadProgressBar, + _BaseBar): + pass + + +class DownloadSilentBar(BaseDownloadProgressBar, SilentBar): # type: ignore + pass + + +class DownloadIncrementalBar(BaseDownloadProgressBar, # type: ignore + IncrementalBar): + pass + + +class DownloadChargingBar(BaseDownloadProgressBar, # type: ignore + ChargingBar): + pass + + +class DownloadShadyBar(BaseDownloadProgressBar, ShadyBar): # type: ignore + pass + + +class DownloadFillingSquaresBar(BaseDownloadProgressBar, # type: ignore + FillingSquaresBar): + pass + + +class DownloadFillingCirclesBar(BaseDownloadProgressBar, # type: ignore + FillingCirclesBar): + pass + + +class DownloadBlueEmojiProgressBar(BaseDownloadProgressBar, # type: ignore + BlueEmojiBar): + pass + + +class DownloadProgressSpinner(WindowsMixin, InterruptibleMixin, + DownloadProgressMixin, WritelnMixin, Spinner): + + file = sys.stdout + suffix = "%(downloaded)s %(download_speed)s" + + def next_phase(self): + if not hasattr(self, "_phaser"): + self._phaser = itertools.cycle(self.phases) + return next(self._phaser) + + def update(self): + message = self.message % self + phase = self.next_phase() + suffix = self.suffix % self + line = ''.join([ + message, + " " if message else "", + phase, + " " if suffix else "", + suffix, + ]) + + self.writeln(line) + + +BAR_TYPES = { + "off": (DownloadSilentBar, DownloadSilentBar), + "on": (DefaultDownloadProgressBar, DownloadProgressSpinner), + "ascii": (DownloadIncrementalBar, DownloadProgressSpinner), + "pretty": (DownloadFillingCirclesBar, DownloadProgressSpinner), + "emoji": (DownloadBlueEmojiProgressBar, DownloadProgressSpinner) +} + + +def DownloadProgressProvider(progress_bar, max=None): + if max is None or max == 0: + return BAR_TYPES[progress_bar][1]().iter + else: + return BAR_TYPES[progress_bar][0](max=max).iter + + +################################################################ +# Generic "something is happening" spinners +# +# We don't even try using progress.spinner.Spinner here because it's actually +# simpler to reimplement from scratch than to coerce their code into doing +# what we need. +################################################################ + +@contextlib.contextmanager +def hidden_cursor(file): + # type: (IO) -> Iterator[None] + # The Windows terminal does not support the hide/show cursor ANSI codes, + # even via colorama. So don't even try. + if WINDOWS: + yield + # We don't want to clutter the output with control characters if we're + # writing to a file, or if the user is running with --quiet. + # See https://github.com/pypa/pip/issues/3418 + elif not file.isatty() or logger.getEffectiveLevel() > logging.INFO: + yield + else: + file.write(HIDE_CURSOR) + try: + yield + finally: + file.write(SHOW_CURSOR) + + +class RateLimiter(object): + def __init__(self, min_update_interval_seconds): + # type: (float) -> None + self._min_update_interval_seconds = min_update_interval_seconds + self._last_update = 0 # type: float + + def ready(self): + # type: () -> bool + now = time.time() + delta = now - self._last_update + return delta >= self._min_update_interval_seconds + + def reset(self): + # type: () -> None + self._last_update = time.time() + + +class SpinnerInterface(object): + def spin(self): + # type: () -> None + raise NotImplementedError() + + def finish(self, final_status): + # type: (str) -> None + raise NotImplementedError() + + +class InteractiveSpinner(SpinnerInterface): + def __init__(self, message, file=None, spin_chars="-\\|/", + # Empirically, 8 updates/second looks nice + min_update_interval_seconds=0.125): + self._message = message + if file is None: + file = sys.stdout + self._file = file + self._rate_limiter = RateLimiter(min_update_interval_seconds) + self._finished = False + + self._spin_cycle = itertools.cycle(spin_chars) + + self._file.write(" " * get_indentation() + self._message + " ... ") + self._width = 0 + + def _write(self, status): + assert not self._finished + # Erase what we wrote before by backspacing to the beginning, writing + # spaces to overwrite the old text, and then backspacing again + backup = "\b" * self._width + self._file.write(backup + " " * self._width + backup) + # Now we have a blank slate to add our status + self._file.write(status) + self._width = len(status) + self._file.flush() + self._rate_limiter.reset() + + def spin(self): + # type: () -> None + if self._finished: + return + if not self._rate_limiter.ready(): + return + self._write(next(self._spin_cycle)) + + def finish(self, final_status): + # type: (str) -> None + if self._finished: + return + self._write(final_status) + self._file.write("\n") + self._file.flush() + self._finished = True + + +# Used for dumb terminals, non-interactive installs (no tty), etc. +# We still print updates occasionally (once every 60 seconds by default) to +# act as a keep-alive for systems like Travis-CI that take lack-of-output as +# an indication that a task has frozen. +class NonInteractiveSpinner(SpinnerInterface): + def __init__(self, message, min_update_interval_seconds=60): + # type: (str, float) -> None + self._message = message + self._finished = False + self._rate_limiter = RateLimiter(min_update_interval_seconds) + self._update("started") + + def _update(self, status): + assert not self._finished + self._rate_limiter.reset() + logger.info("%s: %s", self._message, status) + + def spin(self): + # type: () -> None + if self._finished: + return + if not self._rate_limiter.ready(): + return + self._update("still running...") + + def finish(self, final_status): + # type: (str) -> None + if self._finished: + return + self._update("finished with status '%s'" % (final_status,)) + self._finished = True + + +@contextlib.contextmanager +def open_spinner(message): + # type: (str) -> Iterator[SpinnerInterface] + # Interactive spinner goes directly to sys.stdout rather than being routed + # through the logging system, but it acts like it has level INFO, + # i.e. it's only displayed if we're at level INFO or better. + # Non-interactive spinner goes through the logging system, so it is always + # in sync with logging configuration. + if sys.stdout.isatty() and logger.getEffectiveLevel() <= logging.INFO: + spinner = InteractiveSpinner(message) # type: SpinnerInterface + else: + spinner = NonInteractiveSpinner(message) + try: + with hidden_cursor(sys.stdout): + yield spinner + except KeyboardInterrupt: + spinner.finish("canceled") + raise + except Exception: + spinner.finish("error") + raise + else: + spinner.finish("done") diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/utils/ui.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/ui.pyc new file mode 100644 index 0000000..bf18dbd Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/utils/ui.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/vcs/__init__.py b/project/venv/lib/python2.7/site-packages/pip/_internal/vcs/__init__.py new file mode 100644 index 0000000..9cba764 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/vcs/__init__.py @@ -0,0 +1,534 @@ +"""Handles all VCS (version control) support""" +from __future__ import absolute_import + +import errno +import logging +import os +import shutil +import sys + +from pip._vendor.six.moves.urllib import parse as urllib_parse + +from pip._internal.exceptions import BadCommand +from pip._internal.utils.misc import ( + display_path, backup_dir, call_subprocess, rmtree, ask_path_exists, +) +from pip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + from typing import ( # noqa: F401 + Any, Dict, Iterable, List, Mapping, Optional, Text, Tuple, Type + ) + from pip._internal.utils.ui import SpinnerInterface # noqa: F401 + + AuthInfo = Tuple[Optional[str], Optional[str]] + +__all__ = ['vcs'] + + +logger = logging.getLogger(__name__) + + +class RemoteNotFoundError(Exception): + pass + + +class RevOptions(object): + + """ + Encapsulates a VCS-specific revision to install, along with any VCS + install options. + + Instances of this class should be treated as if immutable. + """ + + def __init__(self, vcs, rev=None, extra_args=None): + # type: (VersionControl, Optional[str], Optional[List[str]]) -> None + """ + Args: + vcs: a VersionControl object. + rev: the name of the revision to install. + extra_args: a list of extra options. + """ + if extra_args is None: + extra_args = [] + + self.extra_args = extra_args + self.rev = rev + self.vcs = vcs + + def __repr__(self): + return '<RevOptions {}: rev={!r}>'.format(self.vcs.name, self.rev) + + @property + def arg_rev(self): + # type: () -> Optional[str] + if self.rev is None: + return self.vcs.default_arg_rev + + return self.rev + + def to_args(self): + # type: () -> List[str] + """ + Return the VCS-specific command arguments. + """ + args = [] # type: List[str] + rev = self.arg_rev + if rev is not None: + args += self.vcs.get_base_rev_args(rev) + args += self.extra_args + + return args + + def to_display(self): + # type: () -> str + if not self.rev: + return '' + + return ' (to revision {})'.format(self.rev) + + def make_new(self, rev): + # type: (str) -> RevOptions + """ + Make a copy of the current instance, but with a new rev. + + Args: + rev: the name of the revision for the new object. + """ + return self.vcs.make_rev_options(rev, extra_args=self.extra_args) + + +class VcsSupport(object): + _registry = {} # type: Dict[str, Type[VersionControl]] + schemes = ['ssh', 'git', 'hg', 'bzr', 'sftp', 'svn'] + + def __init__(self): + # type: () -> None + # Register more schemes with urlparse for various version control + # systems + urllib_parse.uses_netloc.extend(self.schemes) + # Python >= 2.7.4, 3.3 doesn't have uses_fragment + if getattr(urllib_parse, 'uses_fragment', None): + urllib_parse.uses_fragment.extend(self.schemes) + super(VcsSupport, self).__init__() + + def __iter__(self): + return self._registry.__iter__() + + @property + def backends(self): + # type: () -> List[Type[VersionControl]] + return list(self._registry.values()) + + @property + def dirnames(self): + # type: () -> List[str] + return [backend.dirname for backend in self.backends] + + @property + def all_schemes(self): + # type: () -> List[str] + schemes = [] # type: List[str] + for backend in self.backends: + schemes.extend(backend.schemes) + return schemes + + def register(self, cls): + # type: (Type[VersionControl]) -> None + if not hasattr(cls, 'name'): + logger.warning('Cannot register VCS %s', cls.__name__) + return + if cls.name not in self._registry: + self._registry[cls.name] = cls + logger.debug('Registered VCS backend: %s', cls.name) + + def unregister(self, cls=None, name=None): + # type: (Optional[Type[VersionControl]], Optional[str]) -> None + if name in self._registry: + del self._registry[name] + elif cls in self._registry.values(): + del self._registry[cls.name] + else: + logger.warning('Cannot unregister because no class or name given') + + def get_backend_type(self, location): + # type: (str) -> Optional[Type[VersionControl]] + """ + Return the type of the version control backend if found at given + location, e.g. vcs.get_backend_type('/path/to/vcs/checkout') + """ + for vc_type in self._registry.values(): + if vc_type.controls_location(location): + logger.debug('Determine that %s uses VCS: %s', + location, vc_type.name) + return vc_type + return None + + def get_backend(self, name): + # type: (str) -> Optional[Type[VersionControl]] + name = name.lower() + if name in self._registry: + return self._registry[name] + return None + + +vcs = VcsSupport() + + +class VersionControl(object): + name = '' + dirname = '' + repo_name = '' + # List of supported schemes for this Version Control + schemes = () # type: Tuple[str, ...] + # Iterable of environment variable names to pass to call_subprocess(). + unset_environ = () # type: Tuple[str, ...] + default_arg_rev = None # type: Optional[str] + + def __init__(self, url=None, *args, **kwargs): + self.url = url + super(VersionControl, self).__init__(*args, **kwargs) + + def get_base_rev_args(self, rev): + """ + Return the base revision arguments for a vcs command. + + Args: + rev: the name of a revision to install. Cannot be None. + """ + raise NotImplementedError + + def make_rev_options(self, rev=None, extra_args=None): + # type: (Optional[str], Optional[List[str]]) -> RevOptions + """ + Return a RevOptions object. + + Args: + rev: the name of a revision to install. + extra_args: a list of extra options. + """ + return RevOptions(self, rev, extra_args=extra_args) + + @classmethod + def _is_local_repository(cls, repo): + # type: (str) -> bool + """ + posix absolute paths start with os.path.sep, + win32 ones start with drive (like c:\\folder) + """ + drive, tail = os.path.splitdrive(repo) + return repo.startswith(os.path.sep) or bool(drive) + + def export(self, location): + """ + Export the repository at the url to the destination location + i.e. only download the files, without vcs informations + """ + raise NotImplementedError + + def get_netloc_and_auth(self, netloc, scheme): + """ + Parse the repository URL's netloc, and return the new netloc to use + along with auth information. + + Args: + netloc: the original repository URL netloc. + scheme: the repository URL's scheme without the vcs prefix. + + This is mainly for the Subversion class to override, so that auth + information can be provided via the --username and --password options + instead of through the URL. For other subclasses like Git without + such an option, auth information must stay in the URL. + + Returns: (netloc, (username, password)). + """ + return netloc, (None, None) + + def get_url_rev_and_auth(self, url): + # type: (str) -> Tuple[str, Optional[str], AuthInfo] + """ + Parse the repository URL to use, and return the URL, revision, + and auth info to use. + + Returns: (url, rev, (username, password)). + """ + scheme, netloc, path, query, frag = urllib_parse.urlsplit(url) + if '+' not in scheme: + raise ValueError( + "Sorry, {!r} is a malformed VCS url. " + "The format is <vcs>+<protocol>://<url>, " + "e.g. svn+http://myrepo/svn/MyApp#egg=MyApp".format(url) + ) + # Remove the vcs prefix. + scheme = scheme.split('+', 1)[1] + netloc, user_pass = self.get_netloc_and_auth(netloc, scheme) + rev = None + if '@' in path: + path, rev = path.rsplit('@', 1) + url = urllib_parse.urlunsplit((scheme, netloc, path, query, '')) + return url, rev, user_pass + + def make_rev_args(self, username, password): + """ + Return the RevOptions "extra arguments" to use in obtain(). + """ + return [] + + def get_url_rev_options(self, url): + # type: (str) -> Tuple[str, RevOptions] + """ + Return the URL and RevOptions object to use in obtain() and in + some cases export(), as a tuple (url, rev_options). + """ + url, rev, user_pass = self.get_url_rev_and_auth(url) + username, password = user_pass + extra_args = self.make_rev_args(username, password) + rev_options = self.make_rev_options(rev, extra_args=extra_args) + + return url, rev_options + + def normalize_url(self, url): + # type: (str) -> str + """ + Normalize a URL for comparison by unquoting it and removing any + trailing slash. + """ + return urllib_parse.unquote(url).rstrip('/') + + def compare_urls(self, url1, url2): + # type: (str, str) -> bool + """ + Compare two repo URLs for identity, ignoring incidental differences. + """ + return (self.normalize_url(url1) == self.normalize_url(url2)) + + def fetch_new(self, dest, url, rev_options): + """ + Fetch a revision from a repository, in the case that this is the + first fetch from the repository. + + Args: + dest: the directory to fetch the repository to. + rev_options: a RevOptions object. + """ + raise NotImplementedError + + def switch(self, dest, url, rev_options): + """ + Switch the repo at ``dest`` to point to ``URL``. + + Args: + rev_options: a RevOptions object. + """ + raise NotImplementedError + + def update(self, dest, url, rev_options): + """ + Update an already-existing repo to the given ``rev_options``. + + Args: + rev_options: a RevOptions object. + """ + raise NotImplementedError + + def is_commit_id_equal(self, dest, name): + """ + Return whether the id of the current commit equals the given name. + + Args: + dest: the repository directory. + name: a string name. + """ + raise NotImplementedError + + def obtain(self, dest): + # type: (str) -> None + """ + Install or update in editable mode the package represented by this + VersionControl object. + + Args: + dest: the repository directory in which to install or update. + """ + url, rev_options = self.get_url_rev_options(self.url) + + if not os.path.exists(dest): + self.fetch_new(dest, url, rev_options) + return + + rev_display = rev_options.to_display() + if self.is_repository_directory(dest): + existing_url = self.get_remote_url(dest) + if self.compare_urls(existing_url, url): + logger.debug( + '%s in %s exists, and has correct URL (%s)', + self.repo_name.title(), + display_path(dest), + url, + ) + if not self.is_commit_id_equal(dest, rev_options.rev): + logger.info( + 'Updating %s %s%s', + display_path(dest), + self.repo_name, + rev_display, + ) + self.update(dest, url, rev_options) + else: + logger.info('Skipping because already up-to-date.') + return + + logger.warning( + '%s %s in %s exists with URL %s', + self.name, + self.repo_name, + display_path(dest), + existing_url, + ) + prompt = ('(s)witch, (i)gnore, (w)ipe, (b)ackup ', + ('s', 'i', 'w', 'b')) + else: + logger.warning( + 'Directory %s already exists, and is not a %s %s.', + dest, + self.name, + self.repo_name, + ) + # https://github.com/python/mypy/issues/1174 + prompt = ('(i)gnore, (w)ipe, (b)ackup ', # type: ignore + ('i', 'w', 'b')) + + logger.warning( + 'The plan is to install the %s repository %s', + self.name, + url, + ) + response = ask_path_exists('What to do? %s' % prompt[0], prompt[1]) + + if response == 'a': + sys.exit(-1) + + if response == 'w': + logger.warning('Deleting %s', display_path(dest)) + rmtree(dest) + self.fetch_new(dest, url, rev_options) + return + + if response == 'b': + dest_dir = backup_dir(dest) + logger.warning( + 'Backing up %s to %s', display_path(dest), dest_dir, + ) + shutil.move(dest, dest_dir) + self.fetch_new(dest, url, rev_options) + return + + # Do nothing if the response is "i". + if response == 's': + logger.info( + 'Switching %s %s to %s%s', + self.repo_name, + display_path(dest), + url, + rev_display, + ) + self.switch(dest, url, rev_options) + + def unpack(self, location): + # type: (str) -> None + """ + Clean up current location and download the url repository + (and vcs infos) into location + """ + if os.path.exists(location): + rmtree(location) + self.obtain(location) + + @classmethod + def get_src_requirement(cls, location, project_name): + """ + Return a string representing the requirement needed to + redownload the files currently present in location, something + like: + {repository_url}@{revision}#egg={project_name}-{version_identifier} + """ + raise NotImplementedError + + @classmethod + def get_remote_url(cls, location): + """ + Return the url used at location + + Raises RemoteNotFoundError if the repository does not have a remote + url configured. + """ + raise NotImplementedError + + @classmethod + def get_revision(cls, location): + """ + Return the current commit id of the files at the given location. + """ + raise NotImplementedError + + @classmethod + def run_command( + cls, + cmd, # type: List[str] + show_stdout=True, # type: bool + cwd=None, # type: Optional[str] + on_returncode='raise', # type: str + extra_ok_returncodes=None, # type: Optional[Iterable[int]] + command_desc=None, # type: Optional[str] + extra_environ=None, # type: Optional[Mapping[str, Any]] + spinner=None # type: Optional[SpinnerInterface] + ): + # type: (...) -> Optional[Text] + """ + Run a VCS subcommand + This is simply a wrapper around call_subprocess that adds the VCS + command name, and checks that the VCS is available + """ + cmd = [cls.name] + cmd + try: + return call_subprocess(cmd, show_stdout, cwd, + on_returncode=on_returncode, + extra_ok_returncodes=extra_ok_returncodes, + command_desc=command_desc, + extra_environ=extra_environ, + unset_environ=cls.unset_environ, + spinner=spinner) + except OSError as e: + # errno.ENOENT = no such file or directory + # In other words, the VCS executable isn't available + if e.errno == errno.ENOENT: + raise BadCommand( + 'Cannot find command %r - do you have ' + '%r installed and in your ' + 'PATH?' % (cls.name, cls.name)) + else: + raise # re-raise exception if a different error occurred + + @classmethod + def is_repository_directory(cls, path): + # type: (str) -> bool + """ + Return whether a directory path is a repository directory. + """ + logger.debug('Checking in %s for %s (%s)...', + path, cls.dirname, cls.name) + return os.path.exists(os.path.join(path, cls.dirname)) + + @classmethod + def controls_location(cls, location): + # type: (str) -> bool + """ + Check if a location is controlled by the vcs. + It is meant to be overridden to implement smarter detection + mechanisms for specific vcs. + + This can do more than is_repository_directory() alone. For example, + the Git override checks that Git is actually available. + """ + return cls.is_repository_directory(location) diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/vcs/__init__.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/vcs/__init__.pyc new file mode 100644 index 0000000..48c80e8 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/vcs/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/vcs/bazaar.py b/project/venv/lib/python2.7/site-packages/pip/_internal/vcs/bazaar.py new file mode 100644 index 0000000..4c6ac79 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/vcs/bazaar.py @@ -0,0 +1,114 @@ +from __future__ import absolute_import + +import logging +import os + +from pip._vendor.six.moves.urllib import parse as urllib_parse + +from pip._internal.download import path_to_url +from pip._internal.utils.misc import ( + display_path, make_vcs_requirement_url, rmtree, +) +from pip._internal.utils.temp_dir import TempDirectory +from pip._internal.vcs import VersionControl, vcs + +logger = logging.getLogger(__name__) + + +class Bazaar(VersionControl): + name = 'bzr' + dirname = '.bzr' + repo_name = 'branch' + schemes = ( + 'bzr', 'bzr+http', 'bzr+https', 'bzr+ssh', 'bzr+sftp', 'bzr+ftp', + 'bzr+lp', + ) + + def __init__(self, url=None, *args, **kwargs): + super(Bazaar, self).__init__(url, *args, **kwargs) + # This is only needed for python <2.7.5 + # Register lp but do not expose as a scheme to support bzr+lp. + if getattr(urllib_parse, 'uses_fragment', None): + urllib_parse.uses_fragment.extend(['lp']) + + def get_base_rev_args(self, rev): + return ['-r', rev] + + def export(self, location): + """ + Export the Bazaar repository at the url to the destination location + """ + # Remove the location to make sure Bazaar can export it correctly + if os.path.exists(location): + rmtree(location) + + with TempDirectory(kind="export") as temp_dir: + self.unpack(temp_dir.path) + + self.run_command( + ['export', location], + cwd=temp_dir.path, show_stdout=False, + ) + + def fetch_new(self, dest, url, rev_options): + rev_display = rev_options.to_display() + logger.info( + 'Checking out %s%s to %s', + url, + rev_display, + display_path(dest), + ) + cmd_args = ['branch', '-q'] + rev_options.to_args() + [url, dest] + self.run_command(cmd_args) + + def switch(self, dest, url, rev_options): + self.run_command(['switch', url], cwd=dest) + + def update(self, dest, url, rev_options): + cmd_args = ['pull', '-q'] + rev_options.to_args() + self.run_command(cmd_args, cwd=dest) + + def get_url_rev_and_auth(self, url): + # hotfix the URL scheme after removing bzr+ from bzr+ssh:// readd it + url, rev, user_pass = super(Bazaar, self).get_url_rev_and_auth(url) + if url.startswith('ssh://'): + url = 'bzr+' + url + return url, rev, user_pass + + @classmethod + def get_remote_url(cls, location): + urls = cls.run_command(['info'], show_stdout=False, cwd=location) + for line in urls.splitlines(): + line = line.strip() + for x in ('checkout of branch: ', + 'parent branch: '): + if line.startswith(x): + repo = line.split(x)[1] + if cls._is_local_repository(repo): + return path_to_url(repo) + return repo + return None + + @classmethod + def get_revision(cls, location): + revision = cls.run_command( + ['revno'], show_stdout=False, cwd=location, + ) + return revision.splitlines()[-1] + + @classmethod + def get_src_requirement(cls, location, project_name): + repo = cls.get_remote_url(location) + if not repo: + return None + if not repo.lower().startswith('bzr:'): + repo = 'bzr+' + repo + current_rev = cls.get_revision(location) + return make_vcs_requirement_url(repo, current_rev, project_name) + + def is_commit_id_equal(self, dest, name): + """Always assume the versions don't match""" + return False + + +vcs.register(Bazaar) diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/vcs/bazaar.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/vcs/bazaar.pyc new file mode 100644 index 0000000..cf08cae Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/vcs/bazaar.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/vcs/git.py b/project/venv/lib/python2.7/site-packages/pip/_internal/vcs/git.py new file mode 100644 index 0000000..dd2bd61 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/vcs/git.py @@ -0,0 +1,369 @@ +from __future__ import absolute_import + +import logging +import os.path +import re + +from pip._vendor.packaging.version import parse as parse_version +from pip._vendor.six.moves.urllib import parse as urllib_parse +from pip._vendor.six.moves.urllib import request as urllib_request + +from pip._internal.exceptions import BadCommand +from pip._internal.utils.compat import samefile +from pip._internal.utils.misc import ( + display_path, make_vcs_requirement_url, redact_password_from_url, +) +from pip._internal.utils.temp_dir import TempDirectory +from pip._internal.vcs import RemoteNotFoundError, VersionControl, vcs + +urlsplit = urllib_parse.urlsplit +urlunsplit = urllib_parse.urlunsplit + + +logger = logging.getLogger(__name__) + + +HASH_REGEX = re.compile('[a-fA-F0-9]{40}') + + +def looks_like_hash(sha): + return bool(HASH_REGEX.match(sha)) + + +class Git(VersionControl): + name = 'git' + dirname = '.git' + repo_name = 'clone' + schemes = ( + 'git', 'git+http', 'git+https', 'git+ssh', 'git+git', 'git+file', + ) + # Prevent the user's environment variables from interfering with pip: + # https://github.com/pypa/pip/issues/1130 + unset_environ = ('GIT_DIR', 'GIT_WORK_TREE') + default_arg_rev = 'HEAD' + + def __init__(self, url=None, *args, **kwargs): + + # Works around an apparent Git bug + # (see https://article.gmane.org/gmane.comp.version-control.git/146500) + if url: + scheme, netloc, path, query, fragment = urlsplit(url) + if scheme.endswith('file'): + initial_slashes = path[:-len(path.lstrip('/'))] + newpath = ( + initial_slashes + + urllib_request.url2pathname(path) + .replace('\\', '/').lstrip('/') + ) + url = urlunsplit((scheme, netloc, newpath, query, fragment)) + after_plus = scheme.find('+') + 1 + url = scheme[:after_plus] + urlunsplit( + (scheme[after_plus:], netloc, newpath, query, fragment), + ) + + super(Git, self).__init__(url, *args, **kwargs) + + def get_base_rev_args(self, rev): + return [rev] + + def get_git_version(self): + VERSION_PFX = 'git version ' + version = self.run_command(['version'], show_stdout=False) + if version.startswith(VERSION_PFX): + version = version[len(VERSION_PFX):].split()[0] + else: + version = '' + # get first 3 positions of the git version becasue + # on windows it is x.y.z.windows.t, and this parses as + # LegacyVersion which always smaller than a Version. + version = '.'.join(version.split('.')[:3]) + return parse_version(version) + + def get_current_branch(self, location): + """ + Return the current branch, or None if HEAD isn't at a branch + (e.g. detached HEAD). + """ + # git-symbolic-ref exits with empty stdout if "HEAD" is a detached + # HEAD rather than a symbolic ref. In addition, the -q causes the + # command to exit with status code 1 instead of 128 in this case + # and to suppress the message to stderr. + args = ['symbolic-ref', '-q', 'HEAD'] + output = self.run_command( + args, extra_ok_returncodes=(1, ), show_stdout=False, cwd=location, + ) + ref = output.strip() + + if ref.startswith('refs/heads/'): + return ref[len('refs/heads/'):] + + return None + + def export(self, location): + """Export the Git repository at the url to the destination location""" + if not location.endswith('/'): + location = location + '/' + + with TempDirectory(kind="export") as temp_dir: + self.unpack(temp_dir.path) + self.run_command( + ['checkout-index', '-a', '-f', '--prefix', location], + show_stdout=False, cwd=temp_dir.path + ) + + def get_revision_sha(self, dest, rev): + """ + Return (sha_or_none, is_branch), where sha_or_none is a commit hash + if the revision names a remote branch or tag, otherwise None. + + Args: + dest: the repository directory. + rev: the revision name. + """ + # Pass rev to pre-filter the list. + output = self.run_command(['show-ref', rev], cwd=dest, + show_stdout=False, on_returncode='ignore') + refs = {} + for line in output.strip().splitlines(): + try: + sha, ref = line.split() + except ValueError: + # Include the offending line to simplify troubleshooting if + # this error ever occurs. + raise ValueError('unexpected show-ref line: {!r}'.format(line)) + + refs[ref] = sha + + branch_ref = 'refs/remotes/origin/{}'.format(rev) + tag_ref = 'refs/tags/{}'.format(rev) + + sha = refs.get(branch_ref) + if sha is not None: + return (sha, True) + + sha = refs.get(tag_ref) + + return (sha, False) + + def resolve_revision(self, dest, url, rev_options): + """ + Resolve a revision to a new RevOptions object with the SHA1 of the + branch, tag, or ref if found. + + Args: + rev_options: a RevOptions object. + """ + rev = rev_options.arg_rev + sha, is_branch = self.get_revision_sha(dest, rev) + + if sha is not None: + rev_options = rev_options.make_new(sha) + rev_options.branch_name = rev if is_branch else None + + return rev_options + + # Do not show a warning for the common case of something that has + # the form of a Git commit hash. + if not looks_like_hash(rev): + logger.warning( + "Did not find branch or tag '%s', assuming revision or ref.", + rev, + ) + + if not rev.startswith('refs/'): + return rev_options + + # If it looks like a ref, we have to fetch it explicitly. + self.run_command( + ['fetch', '-q', url] + rev_options.to_args(), + cwd=dest, + ) + # Change the revision to the SHA of the ref we fetched + sha = self.get_revision(dest, rev='FETCH_HEAD') + rev_options = rev_options.make_new(sha) + + return rev_options + + def is_commit_id_equal(self, dest, name): + """ + Return whether the current commit hash equals the given name. + + Args: + dest: the repository directory. + name: a string name. + """ + if not name: + # Then avoid an unnecessary subprocess call. + return False + + return self.get_revision(dest) == name + + def fetch_new(self, dest, url, rev_options): + rev_display = rev_options.to_display() + logger.info( + 'Cloning %s%s to %s', redact_password_from_url(url), + rev_display, display_path(dest), + ) + self.run_command(['clone', '-q', url, dest]) + + if rev_options.rev: + # Then a specific revision was requested. + rev_options = self.resolve_revision(dest, url, rev_options) + branch_name = getattr(rev_options, 'branch_name', None) + if branch_name is None: + # Only do a checkout if the current commit id doesn't match + # the requested revision. + if not self.is_commit_id_equal(dest, rev_options.rev): + cmd_args = ['checkout', '-q'] + rev_options.to_args() + self.run_command(cmd_args, cwd=dest) + elif self.get_current_branch(dest) != branch_name: + # Then a specific branch was requested, and that branch + # is not yet checked out. + track_branch = 'origin/{}'.format(branch_name) + cmd_args = [ + 'checkout', '-b', branch_name, '--track', track_branch, + ] + self.run_command(cmd_args, cwd=dest) + + #: repo may contain submodules + self.update_submodules(dest) + + def switch(self, dest, url, rev_options): + self.run_command(['config', 'remote.origin.url', url], cwd=dest) + cmd_args = ['checkout', '-q'] + rev_options.to_args() + self.run_command(cmd_args, cwd=dest) + + self.update_submodules(dest) + + def update(self, dest, url, rev_options): + # First fetch changes from the default remote + if self.get_git_version() >= parse_version('1.9.0'): + # fetch tags in addition to everything else + self.run_command(['fetch', '-q', '--tags'], cwd=dest) + else: + self.run_command(['fetch', '-q'], cwd=dest) + # Then reset to wanted revision (maybe even origin/master) + rev_options = self.resolve_revision(dest, url, rev_options) + cmd_args = ['reset', '--hard', '-q'] + rev_options.to_args() + self.run_command(cmd_args, cwd=dest) + #: update submodules + self.update_submodules(dest) + + @classmethod + def get_remote_url(cls, location): + """ + Return URL of the first remote encountered. + + Raises RemoteNotFoundError if the repository does not have a remote + url configured. + """ + # We need to pass 1 for extra_ok_returncodes since the command + # exits with return code 1 if there are no matching lines. + stdout = cls.run_command( + ['config', '--get-regexp', r'remote\..*\.url'], + extra_ok_returncodes=(1, ), show_stdout=False, cwd=location, + ) + remotes = stdout.splitlines() + try: + found_remote = remotes[0] + except IndexError: + raise RemoteNotFoundError + + for remote in remotes: + if remote.startswith('remote.origin.url '): + found_remote = remote + break + url = found_remote.split(' ')[1] + return url.strip() + + @classmethod + def get_revision(cls, location, rev=None): + if rev is None: + rev = 'HEAD' + current_rev = cls.run_command( + ['rev-parse', rev], show_stdout=False, cwd=location, + ) + return current_rev.strip() + + @classmethod + def _get_subdirectory(cls, location): + """Return the relative path of setup.py to the git repo root.""" + # find the repo root + git_dir = cls.run_command(['rev-parse', '--git-dir'], + show_stdout=False, cwd=location).strip() + if not os.path.isabs(git_dir): + git_dir = os.path.join(location, git_dir) + root_dir = os.path.join(git_dir, '..') + # find setup.py + orig_location = location + while not os.path.exists(os.path.join(location, 'setup.py')): + last_location = location + location = os.path.dirname(location) + if location == last_location: + # We've traversed up to the root of the filesystem without + # finding setup.py + logger.warning( + "Could not find setup.py for directory %s (tried all " + "parent directories)", + orig_location, + ) + return None + # relative path of setup.py to repo root + if samefile(root_dir, location): + return None + return os.path.relpath(location, root_dir) + + @classmethod + def get_src_requirement(cls, location, project_name): + repo = cls.get_remote_url(location) + if not repo.lower().startswith('git:'): + repo = 'git+' + repo + current_rev = cls.get_revision(location) + subdir = cls._get_subdirectory(location) + req = make_vcs_requirement_url(repo, current_rev, project_name, + subdir=subdir) + + return req + + def get_url_rev_and_auth(self, url): + """ + Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'. + That's required because although they use SSH they sometimes don't + work with a ssh:// scheme (e.g. GitHub). But we need a scheme for + parsing. Hence we remove it again afterwards and return it as a stub. + """ + if '://' not in url: + assert 'file:' not in url + url = url.replace('git+', 'git+ssh://') + url, rev, user_pass = super(Git, self).get_url_rev_and_auth(url) + url = url.replace('ssh://', '') + else: + url, rev, user_pass = super(Git, self).get_url_rev_and_auth(url) + + return url, rev, user_pass + + def update_submodules(self, location): + if not os.path.exists(os.path.join(location, '.gitmodules')): + return + self.run_command( + ['submodule', 'update', '--init', '--recursive', '-q'], + cwd=location, + ) + + @classmethod + def controls_location(cls, location): + if super(Git, cls).controls_location(location): + return True + try: + r = cls.run_command(['rev-parse'], + cwd=location, + show_stdout=False, + on_returncode='ignore') + return not r + except BadCommand: + logger.debug("could not determine if %s is under git control " + "because git is not available", location) + return False + + +vcs.register(Git) diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/vcs/git.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/vcs/git.pyc new file mode 100644 index 0000000..2a25bec Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/vcs/git.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/vcs/mercurial.py b/project/venv/lib/python2.7/site-packages/pip/_internal/vcs/mercurial.py new file mode 100644 index 0000000..26e75de --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/vcs/mercurial.py @@ -0,0 +1,103 @@ +from __future__ import absolute_import + +import logging +import os + +from pip._vendor.six.moves import configparser + +from pip._internal.download import path_to_url +from pip._internal.utils.misc import display_path, make_vcs_requirement_url +from pip._internal.utils.temp_dir import TempDirectory +from pip._internal.vcs import VersionControl, vcs + +logger = logging.getLogger(__name__) + + +class Mercurial(VersionControl): + name = 'hg' + dirname = '.hg' + repo_name = 'clone' + schemes = ('hg', 'hg+http', 'hg+https', 'hg+ssh', 'hg+static-http') + + def get_base_rev_args(self, rev): + return [rev] + + def export(self, location): + """Export the Hg repository at the url to the destination location""" + with TempDirectory(kind="export") as temp_dir: + self.unpack(temp_dir.path) + + self.run_command( + ['archive', location], show_stdout=False, cwd=temp_dir.path + ) + + def fetch_new(self, dest, url, rev_options): + rev_display = rev_options.to_display() + logger.info( + 'Cloning hg %s%s to %s', + url, + rev_display, + display_path(dest), + ) + self.run_command(['clone', '--noupdate', '-q', url, dest]) + cmd_args = ['update', '-q'] + rev_options.to_args() + self.run_command(cmd_args, cwd=dest) + + def switch(self, dest, url, rev_options): + repo_config = os.path.join(dest, self.dirname, 'hgrc') + config = configparser.SafeConfigParser() + try: + config.read(repo_config) + config.set('paths', 'default', url) + with open(repo_config, 'w') as config_file: + config.write(config_file) + except (OSError, configparser.NoSectionError) as exc: + logger.warning( + 'Could not switch Mercurial repository to %s: %s', url, exc, + ) + else: + cmd_args = ['update', '-q'] + rev_options.to_args() + self.run_command(cmd_args, cwd=dest) + + def update(self, dest, url, rev_options): + self.run_command(['pull', '-q'], cwd=dest) + cmd_args = ['update', '-q'] + rev_options.to_args() + self.run_command(cmd_args, cwd=dest) + + @classmethod + def get_remote_url(cls, location): + url = cls.run_command( + ['showconfig', 'paths.default'], + show_stdout=False, cwd=location).strip() + if cls._is_local_repository(url): + url = path_to_url(url) + return url.strip() + + @classmethod + def get_revision(cls, location): + current_revision = cls.run_command( + ['parents', '--template={rev}'], + show_stdout=False, cwd=location).strip() + return current_revision + + @classmethod + def get_revision_hash(cls, location): + current_rev_hash = cls.run_command( + ['parents', '--template={node}'], + show_stdout=False, cwd=location).strip() + return current_rev_hash + + @classmethod + def get_src_requirement(cls, location, project_name): + repo = cls.get_remote_url(location) + if not repo.lower().startswith('hg:'): + repo = 'hg+' + repo + current_rev_hash = cls.get_revision_hash(location) + return make_vcs_requirement_url(repo, current_rev_hash, project_name) + + def is_commit_id_equal(self, dest, name): + """Always assume the versions don't match""" + return False + + +vcs.register(Mercurial) diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/vcs/mercurial.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/vcs/mercurial.pyc new file mode 100644 index 0000000..1174c22 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/vcs/mercurial.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/vcs/subversion.py b/project/venv/lib/python2.7/site-packages/pip/_internal/vcs/subversion.py new file mode 100644 index 0000000..42ac5ac --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/vcs/subversion.py @@ -0,0 +1,200 @@ +from __future__ import absolute_import + +import logging +import os +import re + +from pip._internal.utils.logging import indent_log +from pip._internal.utils.misc import ( + display_path, make_vcs_requirement_url, rmtree, split_auth_from_netloc, +) +from pip._internal.vcs import VersionControl, vcs + +_svn_xml_url_re = re.compile('url="([^"]+)"') +_svn_rev_re = re.compile(r'committed-rev="(\d+)"') +_svn_info_xml_rev_re = re.compile(r'\s*revision="(\d+)"') +_svn_info_xml_url_re = re.compile(r'<url>(.*)</url>') + + +logger = logging.getLogger(__name__) + + +class Subversion(VersionControl): + name = 'svn' + dirname = '.svn' + repo_name = 'checkout' + schemes = ('svn', 'svn+ssh', 'svn+http', 'svn+https', 'svn+svn') + + def get_base_rev_args(self, rev): + return ['-r', rev] + + def export(self, location): + """Export the svn repository at the url to the destination location""" + url, rev_options = self.get_url_rev_options(self.url) + + logger.info('Exporting svn repository %s to %s', url, location) + with indent_log(): + if os.path.exists(location): + # Subversion doesn't like to check out over an existing + # directory --force fixes this, but was only added in svn 1.5 + rmtree(location) + cmd_args = ['export'] + rev_options.to_args() + [url, location] + self.run_command(cmd_args, show_stdout=False) + + def fetch_new(self, dest, url, rev_options): + rev_display = rev_options.to_display() + logger.info( + 'Checking out %s%s to %s', + url, + rev_display, + display_path(dest), + ) + cmd_args = ['checkout', '-q'] + rev_options.to_args() + [url, dest] + self.run_command(cmd_args) + + def switch(self, dest, url, rev_options): + cmd_args = ['switch'] + rev_options.to_args() + [url, dest] + self.run_command(cmd_args) + + def update(self, dest, url, rev_options): + cmd_args = ['update'] + rev_options.to_args() + [dest] + self.run_command(cmd_args) + + @classmethod + def get_revision(cls, location): + """ + Return the maximum revision for all files under a given location + """ + # Note: taken from setuptools.command.egg_info + revision = 0 + + for base, dirs, files in os.walk(location): + if cls.dirname not in dirs: + dirs[:] = [] + continue # no sense walking uncontrolled subdirs + dirs.remove(cls.dirname) + entries_fn = os.path.join(base, cls.dirname, 'entries') + if not os.path.exists(entries_fn): + # FIXME: should we warn? + continue + + dirurl, localrev = cls._get_svn_url_rev(base) + + if base == location: + base = dirurl + '/' # save the root url + elif not dirurl or not dirurl.startswith(base): + dirs[:] = [] + continue # not part of the same svn tree, skip it + revision = max(revision, localrev) + return revision + + def get_netloc_and_auth(self, netloc, scheme): + """ + This override allows the auth information to be passed to svn via the + --username and --password options instead of via the URL. + """ + if scheme == 'ssh': + # The --username and --password options can't be used for + # svn+ssh URLs, so keep the auth information in the URL. + return super(Subversion, self).get_netloc_and_auth( + netloc, scheme) + + return split_auth_from_netloc(netloc) + + def get_url_rev_and_auth(self, url): + # hotfix the URL scheme after removing svn+ from svn+ssh:// readd it + url, rev, user_pass = super(Subversion, self).get_url_rev_and_auth(url) + if url.startswith('ssh://'): + url = 'svn+' + url + return url, rev, user_pass + + def make_rev_args(self, username, password): + extra_args = [] + if username: + extra_args += ['--username', username] + if password: + extra_args += ['--password', password] + + return extra_args + + @classmethod + def get_remote_url(cls, location): + # In cases where the source is in a subdirectory, not alongside + # setup.py we have to look up in the location until we find a real + # setup.py + orig_location = location + while not os.path.exists(os.path.join(location, 'setup.py')): + last_location = location + location = os.path.dirname(location) + if location == last_location: + # We've traversed up to the root of the filesystem without + # finding setup.py + logger.warning( + "Could not find setup.py for directory %s (tried all " + "parent directories)", + orig_location, + ) + return None + + return cls._get_svn_url_rev(location)[0] + + @classmethod + def _get_svn_url_rev(cls, location): + from pip._internal.exceptions import InstallationError + + entries_path = os.path.join(location, cls.dirname, 'entries') + if os.path.exists(entries_path): + with open(entries_path) as f: + data = f.read() + else: # subversion >= 1.7 does not have the 'entries' file + data = '' + + if (data.startswith('8') or + data.startswith('9') or + data.startswith('10')): + data = list(map(str.splitlines, data.split('\n\x0c\n'))) + del data[0][0] # get rid of the '8' + url = data[0][3] + revs = [int(d[9]) for d in data if len(d) > 9 and d[9]] + [0] + elif data.startswith('<?xml'): + match = _svn_xml_url_re.search(data) + if not match: + raise ValueError('Badly formatted data: %r' % data) + url = match.group(1) # get repository URL + revs = [int(m.group(1)) for m in _svn_rev_re.finditer(data)] + [0] + else: + try: + # subversion >= 1.7 + xml = cls.run_command( + ['info', '--xml', location], + show_stdout=False, + ) + url = _svn_info_xml_url_re.search(xml).group(1) + revs = [ + int(m.group(1)) for m in _svn_info_xml_rev_re.finditer(xml) + ] + except InstallationError: + url, revs = None, [] + + if revs: + rev = max(revs) + else: + rev = 0 + + return url, rev + + @classmethod + def get_src_requirement(cls, location, project_name): + repo = cls.get_remote_url(location) + if repo is None: + return None + repo = 'svn+' + repo + rev = cls.get_revision(location) + return make_vcs_requirement_url(repo, rev, project_name) + + def is_commit_id_equal(self, dest, name): + """Always assume the versions don't match""" + return False + + +vcs.register(Subversion) diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/vcs/subversion.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/vcs/subversion.pyc new file mode 100644 index 0000000..fc5b09a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/vcs/subversion.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/wheel.py b/project/venv/lib/python2.7/site-packages/pip/_internal/wheel.py new file mode 100644 index 0000000..67bcc7f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_internal/wheel.py @@ -0,0 +1,1095 @@ +""" +Support for installing and building the "wheel" binary package format. +""" +from __future__ import absolute_import + +import collections +import compileall +import csv +import hashlib +import logging +import os.path +import re +import shutil +import stat +import sys +import warnings +from base64 import urlsafe_b64encode +from email.parser import Parser + +from pip._vendor import pkg_resources +from pip._vendor.distlib.scripts import ScriptMaker +from pip._vendor.packaging.utils import canonicalize_name +from pip._vendor.six import StringIO + +from pip._internal import pep425tags +from pip._internal.download import path_to_url, unpack_url +from pip._internal.exceptions import ( + InstallationError, InvalidWheelFilename, UnsupportedWheel, +) +from pip._internal.locations import ( + PIP_DELETE_MARKER_FILENAME, distutils_scheme, +) +from pip._internal.models.link import Link +from pip._internal.utils.logging import indent_log +from pip._internal.utils.misc import ( + call_subprocess, captured_stdout, ensure_dir, read_chunks, +) +from pip._internal.utils.setuptools_build import SETUPTOOLS_SHIM +from pip._internal.utils.temp_dir import TempDirectory +from pip._internal.utils.typing import MYPY_CHECK_RUNNING +from pip._internal.utils.ui import open_spinner + +if MYPY_CHECK_RUNNING: + from typing import ( # noqa: F401 + Dict, List, Optional, Sequence, Mapping, Tuple, IO, Text, Any, + Union, Iterable + ) + from pip._vendor.packaging.requirements import Requirement # noqa: F401 + from pip._internal.req.req_install import InstallRequirement # noqa: F401 + from pip._internal.download import PipSession # noqa: F401 + from pip._internal.index import FormatControl, PackageFinder # noqa: F401 + from pip._internal.operations.prepare import ( # noqa: F401 + RequirementPreparer + ) + from pip._internal.cache import WheelCache # noqa: F401 + from pip._internal.pep425tags import Pep425Tag # noqa: F401 + + InstalledCSVRow = Tuple[str, ...] + + +VERSION_COMPATIBLE = (1, 0) + + +logger = logging.getLogger(__name__) + + +def normpath(src, p): + return os.path.relpath(src, p).replace(os.path.sep, '/') + + +def rehash(path, blocksize=1 << 20): + # type: (str, int) -> Tuple[str, str] + """Return (hash, length) for path using hashlib.sha256()""" + h = hashlib.sha256() + length = 0 + with open(path, 'rb') as f: + for block in read_chunks(f, size=blocksize): + length += len(block) + h.update(block) + digest = 'sha256=' + urlsafe_b64encode( + h.digest() + ).decode('latin1').rstrip('=') + # unicode/str python2 issues + return (digest, str(length)) # type: ignore + + +def open_for_csv(name, mode): + # type: (str, Text) -> IO + if sys.version_info[0] < 3: + nl = {} # type: Dict[str, Any] + bin = 'b' + else: + nl = {'newline': ''} # type: Dict[str, Any] + bin = '' + return open(name, mode + bin, **nl) + + +def replace_python_tag(wheelname, new_tag): + # type: (str, str) -> str + """Replace the Python tag in a wheel file name with a new value. + """ + parts = wheelname.split('-') + parts[-3] = new_tag + return '-'.join(parts) + + +def fix_script(path): + # type: (str) -> Optional[bool] + """Replace #!python with #!/path/to/python + Return True if file was changed.""" + # XXX RECORD hashes will need to be updated + if os.path.isfile(path): + with open(path, 'rb') as script: + firstline = script.readline() + if not firstline.startswith(b'#!python'): + return False + exename = sys.executable.encode(sys.getfilesystemencoding()) + firstline = b'#!' + exename + os.linesep.encode("ascii") + rest = script.read() + with open(path, 'wb') as script: + script.write(firstline) + script.write(rest) + return True + return None + + +dist_info_re = re.compile(r"""^(?P<namever>(?P<name>.+?)(-(?P<ver>.+?))?) + \.dist-info$""", re.VERBOSE) + + +def root_is_purelib(name, wheeldir): + # type: (str, str) -> bool + """ + Return True if the extracted wheel in wheeldir should go into purelib. + """ + name_folded = name.replace("-", "_") + for item in os.listdir(wheeldir): + match = dist_info_re.match(item) + if match and match.group('name') == name_folded: + with open(os.path.join(wheeldir, item, 'WHEEL')) as wheel: + for line in wheel: + line = line.lower().rstrip() + if line == "root-is-purelib: true": + return True + return False + + +def get_entrypoints(filename): + # type: (str) -> Tuple[Dict[str, str], Dict[str, str]] + if not os.path.exists(filename): + return {}, {} + + # This is done because you can pass a string to entry_points wrappers which + # means that they may or may not be valid INI files. The attempt here is to + # strip leading and trailing whitespace in order to make them valid INI + # files. + with open(filename) as fp: + data = StringIO() + for line in fp: + data.write(line.strip()) + data.write("\n") + data.seek(0) + + # get the entry points and then the script names + entry_points = pkg_resources.EntryPoint.parse_map(data) + console = entry_points.get('console_scripts', {}) + gui = entry_points.get('gui_scripts', {}) + + def _split_ep(s): + """get the string representation of EntryPoint, remove space and split + on '='""" + return str(s).replace(" ", "").split("=") + + # convert the EntryPoint objects into strings with module:function + console = dict(_split_ep(v) for v in console.values()) + gui = dict(_split_ep(v) for v in gui.values()) + return console, gui + + +def message_about_scripts_not_on_PATH(scripts): + # type: (Sequence[str]) -> Optional[str] + """Determine if any scripts are not on PATH and format a warning. + + Returns a warning message if one or more scripts are not on PATH, + otherwise None. + """ + if not scripts: + return None + + # Group scripts by the path they were installed in + grouped_by_dir = collections.defaultdict(set) # type: Dict[str, set] + for destfile in scripts: + parent_dir = os.path.dirname(destfile) + script_name = os.path.basename(destfile) + grouped_by_dir[parent_dir].add(script_name) + + # We don't want to warn for directories that are on PATH. + not_warn_dirs = [ + os.path.normcase(i).rstrip(os.sep) for i in + os.environ.get("PATH", "").split(os.pathsep) + ] + # If an executable sits with sys.executable, we don't warn for it. + # This covers the case of venv invocations without activating the venv. + not_warn_dirs.append(os.path.normcase(os.path.dirname(sys.executable))) + warn_for = { + parent_dir: scripts for parent_dir, scripts in grouped_by_dir.items() + if os.path.normcase(parent_dir) not in not_warn_dirs + } + if not warn_for: + return None + + # Format a message + msg_lines = [] + for parent_dir, scripts in warn_for.items(): + scripts = sorted(scripts) + if len(scripts) == 1: + start_text = "script {} is".format(scripts[0]) + else: + start_text = "scripts {} are".format( + ", ".join(scripts[:-1]) + " and " + scripts[-1] + ) + + msg_lines.append( + "The {} installed in '{}' which is not on PATH." + .format(start_text, parent_dir) + ) + + last_line_fmt = ( + "Consider adding {} to PATH or, if you prefer " + "to suppress this warning, use --no-warn-script-location." + ) + if len(msg_lines) == 1: + msg_lines.append(last_line_fmt.format("this directory")) + else: + msg_lines.append(last_line_fmt.format("these directories")) + + # Returns the formatted multiline message + return "\n".join(msg_lines) + + +def sorted_outrows(outrows): + # type: (Iterable[InstalledCSVRow]) -> List[InstalledCSVRow] + """ + Return the given rows of a RECORD file in sorted order. + + Each row is a 3-tuple (path, hash, size) and corresponds to a record of + a RECORD file (see PEP 376 and PEP 427 for details). For the rows + passed to this function, the size can be an integer as an int or string, + or the empty string. + """ + # Normally, there should only be one row per path, in which case the + # second and third elements don't come into play when sorting. + # However, in cases in the wild where a path might happen to occur twice, + # we don't want the sort operation to trigger an error (but still want + # determinism). Since the third element can be an int or string, we + # coerce each element to a string to avoid a TypeError in this case. + # For additional background, see-- + # https://github.com/pypa/pip/issues/5868 + return sorted(outrows, key=lambda row: tuple(str(x) for x in row)) + + +def get_csv_rows_for_installed( + old_csv_rows, # type: Iterable[List[str]] + installed, # type: Dict[str, str] + changed, # type: set + generated, # type: List[str] + lib_dir, # type: str +): + # type: (...) -> List[InstalledCSVRow] + """ + :param installed: A map from archive RECORD path to installation RECORD + path. + """ + installed_rows = [] # type: List[InstalledCSVRow] + for row in old_csv_rows: + if len(row) > 3: + logger.warning( + 'RECORD line has more than three elements: {}'.format(row) + ) + # Make a copy because we are mutating the row. + row = list(row) + old_path = row[0] + new_path = installed.pop(old_path, old_path) + row[0] = new_path + if new_path in changed: + digest, length = rehash(new_path) + row[1] = digest + row[2] = length + installed_rows.append(tuple(row)) + for f in generated: + digest, length = rehash(f) + installed_rows.append((normpath(f, lib_dir), digest, str(length))) + for f in installed: + installed_rows.append((installed[f], '', '')) + return installed_rows + + +def move_wheel_files( + name, # type: str + req, # type: Requirement + wheeldir, # type: str + user=False, # type: bool + home=None, # type: Optional[str] + root=None, # type: Optional[str] + pycompile=True, # type: bool + scheme=None, # type: Optional[Mapping[str, str]] + isolated=False, # type: bool + prefix=None, # type: Optional[str] + warn_script_location=True # type: bool +): + # type: (...) -> None + """Install a wheel""" + # TODO: Investigate and break this up. + # TODO: Look into moving this into a dedicated class for representing an + # installation. + + if not scheme: + scheme = distutils_scheme( + name, user=user, home=home, root=root, isolated=isolated, + prefix=prefix, + ) + + if root_is_purelib(name, wheeldir): + lib_dir = scheme['purelib'] + else: + lib_dir = scheme['platlib'] + + info_dir = [] # type: List[str] + data_dirs = [] + source = wheeldir.rstrip(os.path.sep) + os.path.sep + + # Record details of the files moved + # installed = files copied from the wheel to the destination + # changed = files changed while installing (scripts #! line typically) + # generated = files newly generated during the install (script wrappers) + installed = {} # type: Dict[str, str] + changed = set() + generated = [] # type: List[str] + + # Compile all of the pyc files that we're going to be installing + if pycompile: + with captured_stdout() as stdout: + with warnings.catch_warnings(): + warnings.filterwarnings('ignore') + compileall.compile_dir(source, force=True, quiet=True) + logger.debug(stdout.getvalue()) + + def record_installed(srcfile, destfile, modified=False): + """Map archive RECORD paths to installation RECORD paths.""" + oldpath = normpath(srcfile, wheeldir) + newpath = normpath(destfile, lib_dir) + installed[oldpath] = newpath + if modified: + changed.add(destfile) + + def clobber(source, dest, is_base, fixer=None, filter=None): + ensure_dir(dest) # common for the 'include' path + + for dir, subdirs, files in os.walk(source): + basedir = dir[len(source):].lstrip(os.path.sep) + destdir = os.path.join(dest, basedir) + if is_base and basedir.split(os.path.sep, 1)[0].endswith('.data'): + continue + for s in subdirs: + destsubdir = os.path.join(dest, basedir, s) + if is_base and basedir == '' and destsubdir.endswith('.data'): + data_dirs.append(s) + continue + elif (is_base and + s.endswith('.dist-info') and + canonicalize_name(s).startswith( + canonicalize_name(req.name))): + assert not info_dir, ('Multiple .dist-info directories: ' + + destsubdir + ', ' + + ', '.join(info_dir)) + info_dir.append(destsubdir) + for f in files: + # Skip unwanted files + if filter and filter(f): + continue + srcfile = os.path.join(dir, f) + destfile = os.path.join(dest, basedir, f) + # directory creation is lazy and after the file filtering above + # to ensure we don't install empty dirs; empty dirs can't be + # uninstalled. + ensure_dir(destdir) + + # copyfile (called below) truncates the destination if it + # exists and then writes the new contents. This is fine in most + # cases, but can cause a segfault if pip has loaded a shared + # object (e.g. from pyopenssl through its vendored urllib3) + # Since the shared object is mmap'd an attempt to call a + # symbol in it will then cause a segfault. Unlinking the file + # allows writing of new contents while allowing the process to + # continue to use the old copy. + if os.path.exists(destfile): + os.unlink(destfile) + + # We use copyfile (not move, copy, or copy2) to be extra sure + # that we are not moving directories over (copyfile fails for + # directories) as well as to ensure that we are not copying + # over any metadata because we want more control over what + # metadata we actually copy over. + shutil.copyfile(srcfile, destfile) + + # Copy over the metadata for the file, currently this only + # includes the atime and mtime. + st = os.stat(srcfile) + if hasattr(os, "utime"): + os.utime(destfile, (st.st_atime, st.st_mtime)) + + # If our file is executable, then make our destination file + # executable. + if os.access(srcfile, os.X_OK): + st = os.stat(srcfile) + permissions = ( + st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH + ) + os.chmod(destfile, permissions) + + changed = False + if fixer: + changed = fixer(destfile) + record_installed(srcfile, destfile, changed) + + clobber(source, lib_dir, True) + + assert info_dir, "%s .dist-info directory not found" % req + + # Get the defined entry points + ep_file = os.path.join(info_dir[0], 'entry_points.txt') + console, gui = get_entrypoints(ep_file) + + def is_entrypoint_wrapper(name): + # EP, EP.exe and EP-script.py are scripts generated for + # entry point EP by setuptools + if name.lower().endswith('.exe'): + matchname = name[:-4] + elif name.lower().endswith('-script.py'): + matchname = name[:-10] + elif name.lower().endswith(".pya"): + matchname = name[:-4] + else: + matchname = name + # Ignore setuptools-generated scripts + return (matchname in console or matchname in gui) + + for datadir in data_dirs: + fixer = None + filter = None + for subdir in os.listdir(os.path.join(wheeldir, datadir)): + fixer = None + if subdir == 'scripts': + fixer = fix_script + filter = is_entrypoint_wrapper + source = os.path.join(wheeldir, datadir, subdir) + dest = scheme[subdir] + clobber(source, dest, False, fixer=fixer, filter=filter) + + maker = ScriptMaker(None, scheme['scripts']) + + # Ensure old scripts are overwritten. + # See https://github.com/pypa/pip/issues/1800 + maker.clobber = True + + # Ensure we don't generate any variants for scripts because this is almost + # never what somebody wants. + # See https://bitbucket.org/pypa/distlib/issue/35/ + maker.variants = {''} + + # This is required because otherwise distlib creates scripts that are not + # executable. + # See https://bitbucket.org/pypa/distlib/issue/32/ + maker.set_mode = True + + # Simplify the script and fix the fact that the default script swallows + # every single stack trace. + # See https://bitbucket.org/pypa/distlib/issue/34/ + # See https://bitbucket.org/pypa/distlib/issue/33/ + def _get_script_text(entry): + if entry.suffix is None: + raise InstallationError( + "Invalid script entry point: %s for req: %s - A callable " + "suffix is required. Cf https://packaging.python.org/en/" + "latest/distributing.html#console-scripts for more " + "information." % (entry, req) + ) + return maker.script_template % { + "module": entry.prefix, + "import_name": entry.suffix.split(".")[0], + "func": entry.suffix, + } + # ignore type, because mypy disallows assigning to a method, + # see https://github.com/python/mypy/issues/2427 + maker._get_script_text = _get_script_text # type: ignore + maker.script_template = r"""# -*- coding: utf-8 -*- +import re +import sys + +from %(module)s import %(import_name)s + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(%(func)s()) +""" + + # Special case pip and setuptools to generate versioned wrappers + # + # The issue is that some projects (specifically, pip and setuptools) use + # code in setup.py to create "versioned" entry points - pip2.7 on Python + # 2.7, pip3.3 on Python 3.3, etc. But these entry points are baked into + # the wheel metadata at build time, and so if the wheel is installed with + # a *different* version of Python the entry points will be wrong. The + # correct fix for this is to enhance the metadata to be able to describe + # such versioned entry points, but that won't happen till Metadata 2.0 is + # available. + # In the meantime, projects using versioned entry points will either have + # incorrect versioned entry points, or they will not be able to distribute + # "universal" wheels (i.e., they will need a wheel per Python version). + # + # Because setuptools and pip are bundled with _ensurepip and virtualenv, + # we need to use universal wheels. So, as a stopgap until Metadata 2.0, we + # override the versioned entry points in the wheel and generate the + # correct ones. This code is purely a short-term measure until Metadata 2.0 + # is available. + # + # To add the level of hack in this section of code, in order to support + # ensurepip this code will look for an ``ENSUREPIP_OPTIONS`` environment + # variable which will control which version scripts get installed. + # + # ENSUREPIP_OPTIONS=altinstall + # - Only pipX.Y and easy_install-X.Y will be generated and installed + # ENSUREPIP_OPTIONS=install + # - pipX.Y, pipX, easy_install-X.Y will be generated and installed. Note + # that this option is technically if ENSUREPIP_OPTIONS is set and is + # not altinstall + # DEFAULT + # - The default behavior is to install pip, pipX, pipX.Y, easy_install + # and easy_install-X.Y. + pip_script = console.pop('pip', None) + if pip_script: + if "ENSUREPIP_OPTIONS" not in os.environ: + spec = 'pip = ' + pip_script + generated.extend(maker.make(spec)) + + if os.environ.get("ENSUREPIP_OPTIONS", "") != "altinstall": + spec = 'pip%s = %s' % (sys.version[:1], pip_script) + generated.extend(maker.make(spec)) + + spec = 'pip%s = %s' % (sys.version[:3], pip_script) + generated.extend(maker.make(spec)) + # Delete any other versioned pip entry points + pip_ep = [k for k in console if re.match(r'pip(\d(\.\d)?)?$', k)] + for k in pip_ep: + del console[k] + easy_install_script = console.pop('easy_install', None) + if easy_install_script: + if "ENSUREPIP_OPTIONS" not in os.environ: + spec = 'easy_install = ' + easy_install_script + generated.extend(maker.make(spec)) + + spec = 'easy_install-%s = %s' % (sys.version[:3], easy_install_script) + generated.extend(maker.make(spec)) + # Delete any other versioned easy_install entry points + easy_install_ep = [ + k for k in console if re.match(r'easy_install(-\d\.\d)?$', k) + ] + for k in easy_install_ep: + del console[k] + + # Generate the console and GUI entry points specified in the wheel + if len(console) > 0: + generated_console_scripts = maker.make_multiple( + ['%s = %s' % kv for kv in console.items()] + ) + generated.extend(generated_console_scripts) + + if warn_script_location: + msg = message_about_scripts_not_on_PATH(generated_console_scripts) + if msg is not None: + logger.warning(msg) + + if len(gui) > 0: + generated.extend( + maker.make_multiple( + ['%s = %s' % kv for kv in gui.items()], + {'gui': True} + ) + ) + + # Record pip as the installer + installer = os.path.join(info_dir[0], 'INSTALLER') + temp_installer = os.path.join(info_dir[0], 'INSTALLER.pip') + with open(temp_installer, 'wb') as installer_file: + installer_file.write(b'pip\n') + shutil.move(temp_installer, installer) + generated.append(installer) + + # Record details of all files installed + record = os.path.join(info_dir[0], 'RECORD') + temp_record = os.path.join(info_dir[0], 'RECORD.pip') + with open_for_csv(record, 'r') as record_in: + with open_for_csv(temp_record, 'w+') as record_out: + reader = csv.reader(record_in) + outrows = get_csv_rows_for_installed( + reader, installed=installed, changed=changed, + generated=generated, lib_dir=lib_dir, + ) + writer = csv.writer(record_out) + # Sort to simplify testing. + for row in sorted_outrows(outrows): + writer.writerow(row) + shutil.move(temp_record, record) + + +def wheel_version(source_dir): + # type: (Optional[str]) -> Optional[Tuple[int, ...]] + """ + Return the Wheel-Version of an extracted wheel, if possible. + + Otherwise, return None if we couldn't parse / extract it. + """ + try: + dist = [d for d in pkg_resources.find_on_path(None, source_dir)][0] + + wheel_data = dist.get_metadata('WHEEL') + wheel_data = Parser().parsestr(wheel_data) + + version = wheel_data['Wheel-Version'].strip() + version = tuple(map(int, version.split('.'))) + return version + except Exception: + return None + + +def check_compatibility(version, name): + # type: (Optional[Tuple[int, ...]], str) -> None + """ + Raises errors or warns if called with an incompatible Wheel-Version. + + Pip should refuse to install a Wheel-Version that's a major series + ahead of what it's compatible with (e.g 2.0 > 1.1); and warn when + installing a version only minor version ahead (e.g 1.2 > 1.1). + + version: a 2-tuple representing a Wheel-Version (Major, Minor) + name: name of wheel or package to raise exception about + + :raises UnsupportedWheel: when an incompatible Wheel-Version is given + """ + if not version: + raise UnsupportedWheel( + "%s is in an unsupported or invalid wheel" % name + ) + if version[0] > VERSION_COMPATIBLE[0]: + raise UnsupportedWheel( + "%s's Wheel-Version (%s) is not compatible with this version " + "of pip" % (name, '.'.join(map(str, version))) + ) + elif version > VERSION_COMPATIBLE: + logger.warning( + 'Installing from a newer Wheel-Version (%s)', + '.'.join(map(str, version)), + ) + + +class Wheel(object): + """A wheel file""" + + # TODO: Maybe move the class into the models sub-package + # TODO: Maybe move the install code into this class + + wheel_file_re = re.compile( + r"""^(?P<namever>(?P<name>.+?)-(?P<ver>.*?)) + ((-(?P<build>\d[^-]*?))?-(?P<pyver>.+?)-(?P<abi>.+?)-(?P<plat>.+?) + \.whl|\.dist-info)$""", + re.VERBOSE + ) + + def __init__(self, filename): + # type: (str) -> None + """ + :raises InvalidWheelFilename: when the filename is invalid for a wheel + """ + wheel_info = self.wheel_file_re.match(filename) + if not wheel_info: + raise InvalidWheelFilename( + "%s is not a valid wheel filename." % filename + ) + self.filename = filename + self.name = wheel_info.group('name').replace('_', '-') + # we'll assume "_" means "-" due to wheel naming scheme + # (https://github.com/pypa/pip/issues/1150) + self.version = wheel_info.group('ver').replace('_', '-') + self.build_tag = wheel_info.group('build') + self.pyversions = wheel_info.group('pyver').split('.') + self.abis = wheel_info.group('abi').split('.') + self.plats = wheel_info.group('plat').split('.') + + # All the tag combinations from this file + self.file_tags = { + (x, y, z) for x in self.pyversions + for y in self.abis for z in self.plats + } + + def support_index_min(self, tags=None): + # type: (Optional[List[Pep425Tag]]) -> Optional[int] + """ + Return the lowest index that one of the wheel's file_tag combinations + achieves in the supported_tags list e.g. if there are 8 supported tags, + and one of the file tags is first in the list, then return 0. Returns + None is the wheel is not supported. + """ + if tags is None: # for mock + tags = pep425tags.get_supported() + indexes = [tags.index(c) for c in self.file_tags if c in tags] + return min(indexes) if indexes else None + + def supported(self, tags=None): + # type: (Optional[List[Pep425Tag]]) -> bool + """Is this wheel supported on this system?""" + if tags is None: # for mock + tags = pep425tags.get_supported() + return bool(set(tags).intersection(self.file_tags)) + + +def _contains_egg_info( + s, _egg_info_re=re.compile(r'([a-z0-9_.]+)-([a-z0-9_.!+-]+)', re.I)): + """Determine whether the string looks like an egg_info. + + :param s: The string to parse. E.g. foo-2.1 + """ + return bool(_egg_info_re.search(s)) + + +def should_use_ephemeral_cache( + req, # type: InstallRequirement + format_control, # type: FormatControl + autobuilding, # type: bool + cache_available # type: bool +): + # type: (...) -> Optional[bool] + """ + Return whether to build an InstallRequirement object using the + ephemeral cache. + + :param cache_available: whether a cache directory is available for the + autobuilding=True case. + + :return: True or False to build the requirement with ephem_cache=True + or False, respectively; or None not to build the requirement. + """ + if req.constraint: + return None + if req.is_wheel: + if not autobuilding: + logger.info( + 'Skipping %s, due to already being wheel.', req.name, + ) + return None + if not autobuilding: + return False + + if req.editable or not req.source_dir: + return None + + if req.link and not req.link.is_artifact: + # VCS checkout. Build wheel just for this run. + return True + + if "binary" not in format_control.get_allowed_formats( + canonicalize_name(req.name)): + logger.info( + "Skipping bdist_wheel for %s, due to binaries " + "being disabled for it.", req.name, + ) + return None + + link = req.link + base, ext = link.splitext() + if cache_available and _contains_egg_info(base): + return False + + # Otherwise, build the wheel just for this run using the ephemeral + # cache since we are either in the case of e.g. a local directory, or + # no cache directory is available to use. + return True + + +def format_command( + command_args, # type: List[str] + command_output, # type: str +): + # type: (...) -> str + """ + Format command information for logging. + """ + text = 'Command arguments: {}\n'.format(command_args) + + if not command_output: + text += 'Command output: None' + elif logger.getEffectiveLevel() > logging.DEBUG: + text += 'Command output: [use --verbose to show]' + else: + if not command_output.endswith('\n'): + command_output += '\n' + text += ( + 'Command output:\n{}' + '-----------------------------------------' + ).format(command_output) + + return text + + +def get_legacy_build_wheel_path( + names, # type: List[str] + temp_dir, # type: str + req, # type: InstallRequirement + command_args, # type: List[str] + command_output, # type: str +): + # type: (...) -> Optional[str] + """ + Return the path to the wheel in the temporary build directory. + """ + # Sort for determinism. + names = sorted(names) + if not names: + msg = ( + 'Legacy build of wheel for {!r} created no files.\n' + ).format(req.name) + msg += format_command(command_args, command_output) + logger.warning(msg) + return None + + if len(names) > 1: + msg = ( + 'Legacy build of wheel for {!r} created more than one file.\n' + 'Filenames (choosing first): {}\n' + ).format(req.name, names) + msg += format_command(command_args, command_output) + logger.warning(msg) + + return os.path.join(temp_dir, names[0]) + + +class WheelBuilder(object): + """Build wheels from a RequirementSet.""" + + def __init__( + self, + finder, # type: PackageFinder + preparer, # type: RequirementPreparer + wheel_cache, # type: WheelCache + build_options=None, # type: Optional[List[str]] + global_options=None, # type: Optional[List[str]] + no_clean=False # type: bool + ): + # type: (...) -> None + self.finder = finder + self.preparer = preparer + self.wheel_cache = wheel_cache + + self._wheel_dir = preparer.wheel_download_dir + + self.build_options = build_options or [] + self.global_options = global_options or [] + self.no_clean = no_clean + + def _build_one(self, req, output_dir, python_tag=None): + """Build one wheel. + + :return: The filename of the built wheel, or None if the build failed. + """ + # Install build deps into temporary directory (PEP 518) + with req.build_env: + return self._build_one_inside_env(req, output_dir, + python_tag=python_tag) + + def _build_one_inside_env(self, req, output_dir, python_tag=None): + with TempDirectory(kind="wheel") as temp_dir: + if req.use_pep517: + builder = self._build_one_pep517 + else: + builder = self._build_one_legacy + wheel_path = builder(req, temp_dir.path, python_tag=python_tag) + if wheel_path is not None: + wheel_name = os.path.basename(wheel_path) + dest_path = os.path.join(output_dir, wheel_name) + try: + shutil.move(wheel_path, dest_path) + logger.info('Stored in directory: %s', output_dir) + return dest_path + except Exception: + pass + # Ignore return, we can't do anything else useful. + self._clean_one(req) + return None + + def _base_setup_args(self, req): + # NOTE: Eventually, we'd want to also -S to the flags here, when we're + # isolating. Currently, it breaks Python in virtualenvs, because it + # relies on site.py to find parts of the standard library outside the + # virtualenv. + return [ + sys.executable, '-u', '-c', + SETUPTOOLS_SHIM % req.setup_py + ] + list(self.global_options) + + def _build_one_pep517(self, req, tempd, python_tag=None): + """Build one InstallRequirement using the PEP 517 build process. + + Returns path to wheel if successfully built. Otherwise, returns None. + """ + assert req.metadata_directory is not None + try: + req.spin_message = 'Building wheel for %s (PEP 517)' % (req.name,) + logger.debug('Destination directory: %s', tempd) + wheel_name = req.pep517_backend.build_wheel( + tempd, + metadata_directory=req.metadata_directory + ) + if python_tag: + # General PEP 517 backends don't necessarily support + # a "--python-tag" option, so we rename the wheel + # file directly. + new_name = replace_python_tag(wheel_name, python_tag) + os.rename( + os.path.join(tempd, wheel_name), + os.path.join(tempd, new_name) + ) + # Reassign to simplify the return at the end of function + wheel_name = new_name + except Exception: + logger.error('Failed building wheel for %s', req.name) + return None + return os.path.join(tempd, wheel_name) + + def _build_one_legacy(self, req, tempd, python_tag=None): + """Build one InstallRequirement using the "legacy" build process. + + Returns path to wheel if successfully built. Otherwise, returns None. + """ + base_args = self._base_setup_args(req) + + spin_message = 'Building wheel for %s (setup.py)' % (req.name,) + with open_spinner(spin_message) as spinner: + logger.debug('Destination directory: %s', tempd) + wheel_args = base_args + ['bdist_wheel', '-d', tempd] \ + + self.build_options + + if python_tag is not None: + wheel_args += ["--python-tag", python_tag] + + try: + output = call_subprocess(wheel_args, cwd=req.setup_py_dir, + show_stdout=False, spinner=spinner) + except Exception: + spinner.finish("error") + logger.error('Failed building wheel for %s', req.name) + return None + names = os.listdir(tempd) + wheel_path = get_legacy_build_wheel_path( + names=names, + temp_dir=tempd, + req=req, + command_args=wheel_args, + command_output=output, + ) + return wheel_path + + def _clean_one(self, req): + base_args = self._base_setup_args(req) + + logger.info('Running setup.py clean for %s', req.name) + clean_args = base_args + ['clean', '--all'] + try: + call_subprocess(clean_args, cwd=req.source_dir, show_stdout=False) + return True + except Exception: + logger.error('Failed cleaning build dir for %s', req.name) + return False + + def build( + self, + requirements, # type: Iterable[InstallRequirement] + session, # type: PipSession + autobuilding=False # type: bool + ): + # type: (...) -> List[InstallRequirement] + """Build wheels. + + :param unpack: If True, replace the sdist we built from with the + newly built wheel, in preparation for installation. + :return: True if all the wheels built correctly. + """ + buildset = [] + format_control = self.finder.format_control + # Whether a cache directory is available for autobuilding=True. + cache_available = bool(self._wheel_dir or self.wheel_cache.cache_dir) + + for req in requirements: + ephem_cache = should_use_ephemeral_cache( + req, format_control=format_control, autobuilding=autobuilding, + cache_available=cache_available, + ) + if ephem_cache is None: + continue + + buildset.append((req, ephem_cache)) + + if not buildset: + return [] + + # Is any wheel build not using the ephemeral cache? + if any(not ephem_cache for _, ephem_cache in buildset): + have_directory_for_build = self._wheel_dir or ( + autobuilding and self.wheel_cache.cache_dir + ) + assert have_directory_for_build + + # TODO by @pradyunsg + # Should break up this method into 2 separate methods. + + # Build the wheels. + logger.info( + 'Building wheels for collected packages: %s', + ', '.join([req.name for (req, _) in buildset]), + ) + _cache = self.wheel_cache # shorter name + with indent_log(): + build_success, build_failure = [], [] + for req, ephem in buildset: + python_tag = None + if autobuilding: + python_tag = pep425tags.implementation_tag + if ephem: + output_dir = _cache.get_ephem_path_for_link(req.link) + else: + output_dir = _cache.get_path_for_link(req.link) + try: + ensure_dir(output_dir) + except OSError as e: + logger.warning("Building wheel for %s failed: %s", + req.name, e) + build_failure.append(req) + continue + else: + output_dir = self._wheel_dir + wheel_file = self._build_one( + req, output_dir, + python_tag=python_tag, + ) + if wheel_file: + build_success.append(req) + if autobuilding: + # XXX: This is mildly duplicative with prepare_files, + # but not close enough to pull out to a single common + # method. + # The code below assumes temporary source dirs - + # prevent it doing bad things. + if req.source_dir and not os.path.exists(os.path.join( + req.source_dir, PIP_DELETE_MARKER_FILENAME)): + raise AssertionError( + "bad source dir - missing marker") + # Delete the source we built the wheel from + req.remove_temporary_source() + # set the build directory again - name is known from + # the work prepare_files did. + req.source_dir = req.build_location( + self.preparer.build_dir + ) + # Update the link for this. + req.link = Link(path_to_url(wheel_file)) + assert req.link.is_wheel + # extract the wheel into the dir + unpack_url( + req.link, req.source_dir, None, False, + session=session, + ) + else: + build_failure.append(req) + + # notify success/failure + if build_success: + logger.info( + 'Successfully built %s', + ' '.join([req.name for req in build_success]), + ) + if build_failure: + logger.info( + 'Failed to build %s', + ' '.join([req.name for req in build_failure]), + ) + # Return a list of requirements that failed to build + return build_failure diff --git a/project/venv/lib/python2.7/site-packages/pip/_internal/wheel.pyc b/project/venv/lib/python2.7/site-packages/pip/_internal/wheel.pyc new file mode 100644 index 0000000..f2c40c1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_internal/wheel.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/__init__.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/__init__.py new file mode 100644 index 0000000..b919b54 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/__init__.py @@ -0,0 +1,111 @@ +""" +pip._vendor is for vendoring dependencies of pip to prevent needing pip to +depend on something external. + +Files inside of pip._vendor should be considered immutable and should only be +updated to versions from upstream. +""" +from __future__ import absolute_import + +import glob +import os.path +import sys + +# Downstream redistributors which have debundled our dependencies should also +# patch this value to be true. This will trigger the additional patching +# to cause things like "six" to be available as pip. +DEBUNDLED = False + +# By default, look in this directory for a bunch of .whl files which we will +# add to the beginning of sys.path before attempting to import anything. This +# is done to support downstream re-distributors like Debian and Fedora who +# wish to create their own Wheels for our dependencies to aid in debundling. +WHEEL_DIR = os.path.abspath(os.path.dirname(__file__)) + + +# Define a small helper function to alias our vendored modules to the real ones +# if the vendored ones do not exist. This idea of this was taken from +# https://github.com/kennethreitz/requests/pull/2567. +def vendored(modulename): + vendored_name = "{0}.{1}".format(__name__, modulename) + + try: + __import__(vendored_name, globals(), locals(), level=0) + except ImportError: + try: + __import__(modulename, globals(), locals(), level=0) + except ImportError: + # We can just silently allow import failures to pass here. If we + # got to this point it means that ``import pip._vendor.whatever`` + # failed and so did ``import whatever``. Since we're importing this + # upfront in an attempt to alias imports, not erroring here will + # just mean we get a regular import error whenever pip *actually* + # tries to import one of these modules to use it, which actually + # gives us a better error message than we would have otherwise + # gotten. + pass + else: + sys.modules[vendored_name] = sys.modules[modulename] + base, head = vendored_name.rsplit(".", 1) + setattr(sys.modules[base], head, sys.modules[modulename]) + + +# If we're operating in a debundled setup, then we want to go ahead and trigger +# the aliasing of our vendored libraries as well as looking for wheels to add +# to our sys.path. This will cause all of this code to be a no-op typically +# however downstream redistributors can enable it in a consistent way across +# all platforms. +if DEBUNDLED: + # Actually look inside of WHEEL_DIR to find .whl files and add them to the + # front of our sys.path. + sys.path[:] = glob.glob(os.path.join(WHEEL_DIR, "*.whl")) + sys.path + + # Actually alias all of our vendored dependencies. + vendored("cachecontrol") + vendored("colorama") + vendored("distlib") + vendored("distro") + vendored("html5lib") + vendored("lockfile") + vendored("six") + vendored("six.moves") + vendored("six.moves.urllib") + vendored("six.moves.urllib.parse") + vendored("packaging") + vendored("packaging.version") + vendored("packaging.specifiers") + vendored("pep517") + vendored("pkg_resources") + vendored("progress") + vendored("pytoml") + vendored("retrying") + vendored("requests") + vendored("requests.packages") + vendored("requests.packages.urllib3") + vendored("requests.packages.urllib3._collections") + vendored("requests.packages.urllib3.connection") + vendored("requests.packages.urllib3.connectionpool") + vendored("requests.packages.urllib3.contrib") + vendored("requests.packages.urllib3.contrib.ntlmpool") + vendored("requests.packages.urllib3.contrib.pyopenssl") + vendored("requests.packages.urllib3.exceptions") + vendored("requests.packages.urllib3.fields") + vendored("requests.packages.urllib3.filepost") + vendored("requests.packages.urllib3.packages") + vendored("requests.packages.urllib3.packages.ordered_dict") + vendored("requests.packages.urllib3.packages.six") + vendored("requests.packages.urllib3.packages.ssl_match_hostname") + vendored("requests.packages.urllib3.packages.ssl_match_hostname." + "_implementation") + vendored("requests.packages.urllib3.poolmanager") + vendored("requests.packages.urllib3.request") + vendored("requests.packages.urllib3.response") + vendored("requests.packages.urllib3.util") + vendored("requests.packages.urllib3.util.connection") + vendored("requests.packages.urllib3.util.request") + vendored("requests.packages.urllib3.util.response") + vendored("requests.packages.urllib3.util.retry") + vendored("requests.packages.urllib3.util.ssl_") + vendored("requests.packages.urllib3.util.timeout") + vendored("requests.packages.urllib3.util.url") + vendored("urllib3") diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/__init__.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/__init__.pyc new file mode 100644 index 0000000..8b06073 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/appdirs.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/appdirs.py new file mode 100644 index 0000000..2bd3911 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/appdirs.py @@ -0,0 +1,604 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# Copyright (c) 2005-2010 ActiveState Software Inc. +# Copyright (c) 2013 Eddy Petrișor + +"""Utilities for determining application-specific dirs. + +See <http://github.com/ActiveState/appdirs> for details and usage. +""" +# Dev Notes: +# - MSDN on where to store app data files: +# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120 +# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html +# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html + +__version_info__ = (1, 4, 3) +__version__ = '.'.join(map(str, __version_info__)) + + +import sys +import os + +PY3 = sys.version_info[0] == 3 + +if PY3: + unicode = str + +if sys.platform.startswith('java'): + import platform + os_name = platform.java_ver()[3][0] + if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc. + system = 'win32' + elif os_name.startswith('Mac'): # "Mac OS X", etc. + system = 'darwin' + else: # "Linux", "SunOS", "FreeBSD", etc. + # Setting this to "linux2" is not ideal, but only Windows or Mac + # are actually checked for and the rest of the module expects + # *sys.platform* style strings. + system = 'linux2' +else: + system = sys.platform + + + +def user_data_dir(appname=None, appauthor=None, version=None, roaming=False): + r"""Return full path to the user-specific data dir for this application. + + "appname" is the name of application. + If None, just the system directory is returned. + "appauthor" (only used on Windows) is the name of the + appauthor or distributing body for this application. Typically + it is the owning company name. This falls back to appname. You may + pass False to disable it. + "version" is an optional version path element to append to the + path. You might want to use this if you want multiple versions + of your app to be able to run independently. If used, this + would typically be "<major>.<minor>". + Only applied when appname is present. + "roaming" (boolean, default False) can be set True to use the Windows + roaming appdata directory. That means that for users on a Windows + network setup for roaming profiles, this user data will be + sync'd on login. See + <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx> + for a discussion of issues. + + Typical user data directories are: + Mac OS X: ~/Library/Application Support/<AppName> + Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined + Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName> + Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName> + Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName> + Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName> + + For Unix, we follow the XDG spec and support $XDG_DATA_HOME. + That means, by default "~/.local/share/<AppName>". + """ + if system == "win32": + if appauthor is None: + appauthor = appname + const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA" + path = os.path.normpath(_get_win_folder(const)) + if appname: + if appauthor is not False: + path = os.path.join(path, appauthor, appname) + else: + path = os.path.join(path, appname) + elif system == 'darwin': + path = os.path.expanduser('~/Library/Application Support/') + if appname: + path = os.path.join(path, appname) + else: + path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share")) + if appname: + path = os.path.join(path, appname) + if appname and version: + path = os.path.join(path, version) + return path + + +def site_data_dir(appname=None, appauthor=None, version=None, multipath=False): + r"""Return full path to the user-shared data dir for this application. + + "appname" is the name of application. + If None, just the system directory is returned. + "appauthor" (only used on Windows) is the name of the + appauthor or distributing body for this application. Typically + it is the owning company name. This falls back to appname. You may + pass False to disable it. + "version" is an optional version path element to append to the + path. You might want to use this if you want multiple versions + of your app to be able to run independently. If used, this + would typically be "<major>.<minor>". + Only applied when appname is present. + "multipath" is an optional parameter only applicable to *nix + which indicates that the entire list of data dirs should be + returned. By default, the first item from XDG_DATA_DIRS is + returned, or '/usr/local/share/<AppName>', + if XDG_DATA_DIRS is not set + + Typical site data directories are: + Mac OS X: /Library/Application Support/<AppName> + Unix: /usr/local/share/<AppName> or /usr/share/<AppName> + Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName> + Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) + Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7. + + For Unix, this is using the $XDG_DATA_DIRS[0] default. + + WARNING: Do not use this on Windows. See the Vista-Fail note above for why. + """ + if system == "win32": + if appauthor is None: + appauthor = appname + path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA")) + if appname: + if appauthor is not False: + path = os.path.join(path, appauthor, appname) + else: + path = os.path.join(path, appname) + elif system == 'darwin': + path = os.path.expanduser('/Library/Application Support') + if appname: + path = os.path.join(path, appname) + else: + # XDG default for $XDG_DATA_DIRS + # only first, if multipath is False + path = os.getenv('XDG_DATA_DIRS', + os.pathsep.join(['/usr/local/share', '/usr/share'])) + pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)] + if appname: + if version: + appname = os.path.join(appname, version) + pathlist = [os.sep.join([x, appname]) for x in pathlist] + + if multipath: + path = os.pathsep.join(pathlist) + else: + path = pathlist[0] + return path + + if appname and version: + path = os.path.join(path, version) + return path + + +def user_config_dir(appname=None, appauthor=None, version=None, roaming=False): + r"""Return full path to the user-specific config dir for this application. + + "appname" is the name of application. + If None, just the system directory is returned. + "appauthor" (only used on Windows) is the name of the + appauthor or distributing body for this application. Typically + it is the owning company name. This falls back to appname. You may + pass False to disable it. + "version" is an optional version path element to append to the + path. You might want to use this if you want multiple versions + of your app to be able to run independently. If used, this + would typically be "<major>.<minor>". + Only applied when appname is present. + "roaming" (boolean, default False) can be set True to use the Windows + roaming appdata directory. That means that for users on a Windows + network setup for roaming profiles, this user data will be + sync'd on login. See + <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx> + for a discussion of issues. + + Typical user config directories are: + Mac OS X: same as user_data_dir + Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined + Win *: same as user_data_dir + + For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME. + That means, by default "~/.config/<AppName>". + """ + if system in ["win32", "darwin"]: + path = user_data_dir(appname, appauthor, None, roaming) + else: + path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config")) + if appname: + path = os.path.join(path, appname) + if appname and version: + path = os.path.join(path, version) + return path + + +def site_config_dir(appname=None, appauthor=None, version=None, multipath=False): + r"""Return full path to the user-shared data dir for this application. + + "appname" is the name of application. + If None, just the system directory is returned. + "appauthor" (only used on Windows) is the name of the + appauthor or distributing body for this application. Typically + it is the owning company name. This falls back to appname. You may + pass False to disable it. + "version" is an optional version path element to append to the + path. You might want to use this if you want multiple versions + of your app to be able to run independently. If used, this + would typically be "<major>.<minor>". + Only applied when appname is present. + "multipath" is an optional parameter only applicable to *nix + which indicates that the entire list of config dirs should be + returned. By default, the first item from XDG_CONFIG_DIRS is + returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set + + Typical site config directories are: + Mac OS X: same as site_data_dir + Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in + $XDG_CONFIG_DIRS + Win *: same as site_data_dir + Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) + + For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False + + WARNING: Do not use this on Windows. See the Vista-Fail note above for why. + """ + if system in ["win32", "darwin"]: + path = site_data_dir(appname, appauthor) + if appname and version: + path = os.path.join(path, version) + else: + # XDG default for $XDG_CONFIG_DIRS + # only first, if multipath is False + path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg') + pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)] + if appname: + if version: + appname = os.path.join(appname, version) + pathlist = [os.sep.join([x, appname]) for x in pathlist] + + if multipath: + path = os.pathsep.join(pathlist) + else: + path = pathlist[0] + return path + + +def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True): + r"""Return full path to the user-specific cache dir for this application. + + "appname" is the name of application. + If None, just the system directory is returned. + "appauthor" (only used on Windows) is the name of the + appauthor or distributing body for this application. Typically + it is the owning company name. This falls back to appname. You may + pass False to disable it. + "version" is an optional version path element to append to the + path. You might want to use this if you want multiple versions + of your app to be able to run independently. If used, this + would typically be "<major>.<minor>". + Only applied when appname is present. + "opinion" (boolean) can be False to disable the appending of + "Cache" to the base app data dir for Windows. See + discussion below. + + Typical user cache directories are: + Mac OS X: ~/Library/Caches/<AppName> + Unix: ~/.cache/<AppName> (XDG default) + Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache + Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache + + On Windows the only suggestion in the MSDN docs is that local settings go in + the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming + app data dir (the default returned by `user_data_dir` above). Apps typically + put cache data somewhere *under* the given dir here. Some examples: + ...\Mozilla\Firefox\Profiles\<ProfileName>\Cache + ...\Acme\SuperApp\Cache\1.0 + OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value. + This can be disabled with the `opinion=False` option. + """ + if system == "win32": + if appauthor is None: + appauthor = appname + path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA")) + if appname: + if appauthor is not False: + path = os.path.join(path, appauthor, appname) + else: + path = os.path.join(path, appname) + if opinion: + path = os.path.join(path, "Cache") + elif system == 'darwin': + path = os.path.expanduser('~/Library/Caches') + if appname: + path = os.path.join(path, appname) + else: + path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache')) + if appname: + path = os.path.join(path, appname) + if appname and version: + path = os.path.join(path, version) + return path + + +def user_state_dir(appname=None, appauthor=None, version=None, roaming=False): + r"""Return full path to the user-specific state dir for this application. + + "appname" is the name of application. + If None, just the system directory is returned. + "appauthor" (only used on Windows) is the name of the + appauthor or distributing body for this application. Typically + it is the owning company name. This falls back to appname. You may + pass False to disable it. + "version" is an optional version path element to append to the + path. You might want to use this if you want multiple versions + of your app to be able to run independently. If used, this + would typically be "<major>.<minor>". + Only applied when appname is present. + "roaming" (boolean, default False) can be set True to use the Windows + roaming appdata directory. That means that for users on a Windows + network setup for roaming profiles, this user data will be + sync'd on login. See + <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx> + for a discussion of issues. + + Typical user state directories are: + Mac OS X: same as user_data_dir + Unix: ~/.local/state/<AppName> # or in $XDG_STATE_HOME, if defined + Win *: same as user_data_dir + + For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state> + to extend the XDG spec and support $XDG_STATE_HOME. + + That means, by default "~/.local/state/<AppName>". + """ + if system in ["win32", "darwin"]: + path = user_data_dir(appname, appauthor, None, roaming) + else: + path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state")) + if appname: + path = os.path.join(path, appname) + if appname and version: + path = os.path.join(path, version) + return path + + +def user_log_dir(appname=None, appauthor=None, version=None, opinion=True): + r"""Return full path to the user-specific log dir for this application. + + "appname" is the name of application. + If None, just the system directory is returned. + "appauthor" (only used on Windows) is the name of the + appauthor or distributing body for this application. Typically + it is the owning company name. This falls back to appname. You may + pass False to disable it. + "version" is an optional version path element to append to the + path. You might want to use this if you want multiple versions + of your app to be able to run independently. If used, this + would typically be "<major>.<minor>". + Only applied when appname is present. + "opinion" (boolean) can be False to disable the appending of + "Logs" to the base app data dir for Windows, and "log" to the + base cache dir for Unix. See discussion below. + + Typical user log directories are: + Mac OS X: ~/Library/Logs/<AppName> + Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined + Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs + Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs + + On Windows the only suggestion in the MSDN docs is that local settings + go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in + examples of what some windows apps use for a logs dir.) + + OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA` + value for Windows and appends "log" to the user cache dir for Unix. + This can be disabled with the `opinion=False` option. + """ + if system == "darwin": + path = os.path.join( + os.path.expanduser('~/Library/Logs'), + appname) + elif system == "win32": + path = user_data_dir(appname, appauthor, version) + version = False + if opinion: + path = os.path.join(path, "Logs") + else: + path = user_cache_dir(appname, appauthor, version) + version = False + if opinion: + path = os.path.join(path, "log") + if appname and version: + path = os.path.join(path, version) + return path + + +class AppDirs(object): + """Convenience wrapper for getting application dirs.""" + def __init__(self, appname=None, appauthor=None, version=None, + roaming=False, multipath=False): + self.appname = appname + self.appauthor = appauthor + self.version = version + self.roaming = roaming + self.multipath = multipath + + @property + def user_data_dir(self): + return user_data_dir(self.appname, self.appauthor, + version=self.version, roaming=self.roaming) + + @property + def site_data_dir(self): + return site_data_dir(self.appname, self.appauthor, + version=self.version, multipath=self.multipath) + + @property + def user_config_dir(self): + return user_config_dir(self.appname, self.appauthor, + version=self.version, roaming=self.roaming) + + @property + def site_config_dir(self): + return site_config_dir(self.appname, self.appauthor, + version=self.version, multipath=self.multipath) + + @property + def user_cache_dir(self): + return user_cache_dir(self.appname, self.appauthor, + version=self.version) + + @property + def user_state_dir(self): + return user_state_dir(self.appname, self.appauthor, + version=self.version) + + @property + def user_log_dir(self): + return user_log_dir(self.appname, self.appauthor, + version=self.version) + + +#---- internal support stuff + +def _get_win_folder_from_registry(csidl_name): + """This is a fallback technique at best. I'm not sure if using the + registry for this guarantees us the correct answer for all CSIDL_* + names. + """ + if PY3: + import winreg as _winreg + else: + import _winreg + + shell_folder_name = { + "CSIDL_APPDATA": "AppData", + "CSIDL_COMMON_APPDATA": "Common AppData", + "CSIDL_LOCAL_APPDATA": "Local AppData", + }[csidl_name] + + key = _winreg.OpenKey( + _winreg.HKEY_CURRENT_USER, + r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders" + ) + dir, type = _winreg.QueryValueEx(key, shell_folder_name) + return dir + + +def _get_win_folder_with_pywin32(csidl_name): + from win32com.shell import shellcon, shell + dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0) + # Try to make this a unicode path because SHGetFolderPath does + # not return unicode strings when there is unicode data in the + # path. + try: + dir = unicode(dir) + + # Downgrade to short path name if have highbit chars. See + # <http://bugs.activestate.com/show_bug.cgi?id=85099>. + has_high_char = False + for c in dir: + if ord(c) > 255: + has_high_char = True + break + if has_high_char: + try: + import win32api + dir = win32api.GetShortPathName(dir) + except ImportError: + pass + except UnicodeError: + pass + return dir + + +def _get_win_folder_with_ctypes(csidl_name): + import ctypes + + csidl_const = { + "CSIDL_APPDATA": 26, + "CSIDL_COMMON_APPDATA": 35, + "CSIDL_LOCAL_APPDATA": 28, + }[csidl_name] + + buf = ctypes.create_unicode_buffer(1024) + ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf) + + # Downgrade to short path name if have highbit chars. See + # <http://bugs.activestate.com/show_bug.cgi?id=85099>. + has_high_char = False + for c in buf: + if ord(c) > 255: + has_high_char = True + break + if has_high_char: + buf2 = ctypes.create_unicode_buffer(1024) + if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024): + buf = buf2 + + return buf.value + +def _get_win_folder_with_jna(csidl_name): + import array + from com.sun import jna + from com.sun.jna.platform import win32 + + buf_size = win32.WinDef.MAX_PATH * 2 + buf = array.zeros('c', buf_size) + shell = win32.Shell32.INSTANCE + shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf) + dir = jna.Native.toString(buf.tostring()).rstrip("\0") + + # Downgrade to short path name if have highbit chars. See + # <http://bugs.activestate.com/show_bug.cgi?id=85099>. + has_high_char = False + for c in dir: + if ord(c) > 255: + has_high_char = True + break + if has_high_char: + buf = array.zeros('c', buf_size) + kernel = win32.Kernel32.INSTANCE + if kernel.GetShortPathName(dir, buf, buf_size): + dir = jna.Native.toString(buf.tostring()).rstrip("\0") + + return dir + +if system == "win32": + try: + from ctypes import windll + _get_win_folder = _get_win_folder_with_ctypes + except ImportError: + try: + import com.sun.jna + _get_win_folder = _get_win_folder_with_jna + except ImportError: + _get_win_folder = _get_win_folder_from_registry + + +#---- self test code + +if __name__ == "__main__": + appname = "MyApp" + appauthor = "MyCompany" + + props = ("user_data_dir", + "user_config_dir", + "user_cache_dir", + "user_state_dir", + "user_log_dir", + "site_data_dir", + "site_config_dir") + + print("-- app dirs %s --" % __version__) + + print("-- app dirs (with optional 'version')") + dirs = AppDirs(appname, appauthor, version="1.0") + for prop in props: + print("%s: %s" % (prop, getattr(dirs, prop))) + + print("\n-- app dirs (without optional 'version')") + dirs = AppDirs(appname, appauthor) + for prop in props: + print("%s: %s" % (prop, getattr(dirs, prop))) + + print("\n-- app dirs (without optional 'appauthor')") + dirs = AppDirs(appname) + for prop in props: + print("%s: %s" % (prop, getattr(dirs, prop))) + + print("\n-- app dirs (with disabled 'appauthor')") + dirs = AppDirs(appname, appauthor=False) + for prop in props: + print("%s: %s" % (prop, getattr(dirs, prop))) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/appdirs.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/appdirs.pyc new file mode 100644 index 0000000..9ab9b75 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/appdirs.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/__init__.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/__init__.py new file mode 100644 index 0000000..8fdee66 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/__init__.py @@ -0,0 +1,11 @@ +"""CacheControl import Interface. + +Make it easy to import from cachecontrol without long namespaces. +""" +__author__ = "Eric Larson" +__email__ = "eric@ionrock.org" +__version__ = "0.12.5" + +from .wrapper import CacheControl +from .adapter import CacheControlAdapter +from .controller import CacheController diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/__init__.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/__init__.pyc new file mode 100644 index 0000000..f44fec3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/_cmd.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/_cmd.py new file mode 100644 index 0000000..f1e0ad9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/_cmd.py @@ -0,0 +1,57 @@ +import logging + +from pip._vendor import requests + +from pip._vendor.cachecontrol.adapter import CacheControlAdapter +from pip._vendor.cachecontrol.cache import DictCache +from pip._vendor.cachecontrol.controller import logger + +from argparse import ArgumentParser + + +def setup_logging(): + logger.setLevel(logging.DEBUG) + handler = logging.StreamHandler() + logger.addHandler(handler) + + +def get_session(): + adapter = CacheControlAdapter( + DictCache(), cache_etags=True, serializer=None, heuristic=None + ) + sess = requests.Session() + sess.mount("http://", adapter) + sess.mount("https://", adapter) + + sess.cache_controller = adapter.controller + return sess + + +def get_args(): + parser = ArgumentParser() + parser.add_argument("url", help="The URL to try and cache") + return parser.parse_args() + + +def main(args=None): + args = get_args() + sess = get_session() + + # Make a request to get a response + resp = sess.get(args.url) + + # Turn on logging + setup_logging() + + # try setting the cache + sess.cache_controller.cache_response(resp.request, resp.raw) + + # Now try to get it + if sess.cache_controller.cached_request(resp.request): + print("Cached!") + else: + print("Not cached :(") + + +if __name__ == "__main__": + main() diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/_cmd.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/_cmd.pyc new file mode 100644 index 0000000..8cc2afd Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/_cmd.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/adapter.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/adapter.py new file mode 100644 index 0000000..780eb28 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/adapter.py @@ -0,0 +1,133 @@ +import types +import functools +import zlib + +from pip._vendor.requests.adapters import HTTPAdapter + +from .controller import CacheController +from .cache import DictCache +from .filewrapper import CallbackFileWrapper + + +class CacheControlAdapter(HTTPAdapter): + invalidating_methods = {"PUT", "DELETE"} + + def __init__( + self, + cache=None, + cache_etags=True, + controller_class=None, + serializer=None, + heuristic=None, + cacheable_methods=None, + *args, + **kw + ): + super(CacheControlAdapter, self).__init__(*args, **kw) + self.cache = cache or DictCache() + self.heuristic = heuristic + self.cacheable_methods = cacheable_methods or ("GET",) + + controller_factory = controller_class or CacheController + self.controller = controller_factory( + self.cache, cache_etags=cache_etags, serializer=serializer + ) + + def send(self, request, cacheable_methods=None, **kw): + """ + Send a request. Use the request information to see if it + exists in the cache and cache the response if we need to and can. + """ + cacheable = cacheable_methods or self.cacheable_methods + if request.method in cacheable: + try: + cached_response = self.controller.cached_request(request) + except zlib.error: + cached_response = None + if cached_response: + return self.build_response(request, cached_response, from_cache=True) + + # check for etags and add headers if appropriate + request.headers.update(self.controller.conditional_headers(request)) + + resp = super(CacheControlAdapter, self).send(request, **kw) + + return resp + + def build_response( + self, request, response, from_cache=False, cacheable_methods=None + ): + """ + Build a response by making a request or using the cache. + + This will end up calling send and returning a potentially + cached response + """ + cacheable = cacheable_methods or self.cacheable_methods + if not from_cache and request.method in cacheable: + # Check for any heuristics that might update headers + # before trying to cache. + if self.heuristic: + response = self.heuristic.apply(response) + + # apply any expiration heuristics + if response.status == 304: + # We must have sent an ETag request. This could mean + # that we've been expired already or that we simply + # have an etag. In either case, we want to try and + # update the cache if that is the case. + cached_response = self.controller.update_cached_response( + request, response + ) + + if cached_response is not response: + from_cache = True + + # We are done with the server response, read a + # possible response body (compliant servers will + # not return one, but we cannot be 100% sure) and + # release the connection back to the pool. + response.read(decode_content=False) + response.release_conn() + + response = cached_response + + # We always cache the 301 responses + elif response.status == 301: + self.controller.cache_response(request, response) + else: + # Wrap the response file with a wrapper that will cache the + # response when the stream has been consumed. + response._fp = CallbackFileWrapper( + response._fp, + functools.partial( + self.controller.cache_response, request, response + ), + ) + if response.chunked: + super_update_chunk_length = response._update_chunk_length + + def _update_chunk_length(self): + super_update_chunk_length() + if self.chunk_left == 0: + self._fp._close() + + response._update_chunk_length = types.MethodType( + _update_chunk_length, response + ) + + resp = super(CacheControlAdapter, self).build_response(request, response) + + # See if we should invalidate the cache. + if request.method in self.invalidating_methods and resp.ok: + cache_url = self.controller.cache_url(request.url) + self.cache.delete(cache_url) + + # Give the request a from_cache attr to let people use it + resp.from_cache = from_cache + + return resp + + def close(self): + self.cache.close() + super(CacheControlAdapter, self).close() diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/adapter.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/adapter.pyc new file mode 100644 index 0000000..f2e1b52 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/adapter.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/cache.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/cache.py new file mode 100644 index 0000000..94e0773 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/cache.py @@ -0,0 +1,39 @@ +""" +The cache object API for implementing caches. The default is a thread +safe in-memory dictionary. +""" +from threading import Lock + + +class BaseCache(object): + + def get(self, key): + raise NotImplementedError() + + def set(self, key, value): + raise NotImplementedError() + + def delete(self, key): + raise NotImplementedError() + + def close(self): + pass + + +class DictCache(BaseCache): + + def __init__(self, init_dict=None): + self.lock = Lock() + self.data = init_dict or {} + + def get(self, key): + return self.data.get(key, None) + + def set(self, key, value): + with self.lock: + self.data.update({key: value}) + + def delete(self, key): + with self.lock: + if key in self.data: + self.data.pop(key) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/cache.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/cache.pyc new file mode 100644 index 0000000..20a11ae Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/cache.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/caches/__init__.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/caches/__init__.py new file mode 100644 index 0000000..0e1658f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/caches/__init__.py @@ -0,0 +1,2 @@ +from .file_cache import FileCache # noqa +from .redis_cache import RedisCache # noqa diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/caches/__init__.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/caches/__init__.pyc new file mode 100644 index 0000000..2d1074f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/caches/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py new file mode 100644 index 0000000..1ba0080 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py @@ -0,0 +1,146 @@ +import hashlib +import os +from textwrap import dedent + +from ..cache import BaseCache +from ..controller import CacheController + +try: + FileNotFoundError +except NameError: + # py2.X + FileNotFoundError = (IOError, OSError) + + +def _secure_open_write(filename, fmode): + # We only want to write to this file, so open it in write only mode + flags = os.O_WRONLY + + # os.O_CREAT | os.O_EXCL will fail if the file already exists, so we only + # will open *new* files. + # We specify this because we want to ensure that the mode we pass is the + # mode of the file. + flags |= os.O_CREAT | os.O_EXCL + + # Do not follow symlinks to prevent someone from making a symlink that + # we follow and insecurely open a cache file. + if hasattr(os, "O_NOFOLLOW"): + flags |= os.O_NOFOLLOW + + # On Windows we'll mark this file as binary + if hasattr(os, "O_BINARY"): + flags |= os.O_BINARY + + # Before we open our file, we want to delete any existing file that is + # there + try: + os.remove(filename) + except (IOError, OSError): + # The file must not exist already, so we can just skip ahead to opening + pass + + # Open our file, the use of os.O_CREAT | os.O_EXCL will ensure that if a + # race condition happens between the os.remove and this line, that an + # error will be raised. Because we utilize a lockfile this should only + # happen if someone is attempting to attack us. + fd = os.open(filename, flags, fmode) + try: + return os.fdopen(fd, "wb") + + except: + # An error occurred wrapping our FD in a file object + os.close(fd) + raise + + +class FileCache(BaseCache): + + def __init__( + self, + directory, + forever=False, + filemode=0o0600, + dirmode=0o0700, + use_dir_lock=None, + lock_class=None, + ): + + if use_dir_lock is not None and lock_class is not None: + raise ValueError("Cannot use use_dir_lock and lock_class together") + + try: + from pip._vendor.lockfile import LockFile + from pip._vendor.lockfile.mkdirlockfile import MkdirLockFile + except ImportError: + notice = dedent( + """ + NOTE: In order to use the FileCache you must have + lockfile installed. You can install it via pip: + pip install lockfile + """ + ) + raise ImportError(notice) + + else: + if use_dir_lock: + lock_class = MkdirLockFile + + elif lock_class is None: + lock_class = LockFile + + self.directory = directory + self.forever = forever + self.filemode = filemode + self.dirmode = dirmode + self.lock_class = lock_class + + @staticmethod + def encode(x): + return hashlib.sha224(x.encode()).hexdigest() + + def _fn(self, name): + # NOTE: This method should not change as some may depend on it. + # See: https://github.com/ionrock/cachecontrol/issues/63 + hashed = self.encode(name) + parts = list(hashed[:5]) + [hashed] + return os.path.join(self.directory, *parts) + + def get(self, key): + name = self._fn(key) + try: + with open(name, "rb") as fh: + return fh.read() + + except FileNotFoundError: + return None + + def set(self, key, value): + name = self._fn(key) + + # Make sure the directory exists + try: + os.makedirs(os.path.dirname(name), self.dirmode) + except (IOError, OSError): + pass + + with self.lock_class(name) as lock: + # Write our actual file + with _secure_open_write(lock.path, self.filemode) as fh: + fh.write(value) + + def delete(self, key): + name = self._fn(key) + if not self.forever: + try: + os.remove(name) + except FileNotFoundError: + pass + + +def url_to_file_path(url, filecache): + """Return the file cache path based on the URL. + + This does not ensure the file exists! + """ + key = CacheController.cache_url(url) + return filecache._fn(key) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/caches/file_cache.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/caches/file_cache.pyc new file mode 100644 index 0000000..732af04 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/caches/file_cache.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py new file mode 100644 index 0000000..ed705ce --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py @@ -0,0 +1,33 @@ +from __future__ import division + +from datetime import datetime +from pip._vendor.cachecontrol.cache import BaseCache + + +class RedisCache(BaseCache): + + def __init__(self, conn): + self.conn = conn + + def get(self, key): + return self.conn.get(key) + + def set(self, key, value, expires=None): + if not expires: + self.conn.set(key, value) + else: + expires = expires - datetime.utcnow() + self.conn.setex(key, int(expires.total_seconds()), value) + + def delete(self, key): + self.conn.delete(key) + + def clear(self): + """Helper for clearing all the keys in a database. Use with + caution!""" + for key in self.conn.keys(): + self.conn.delete(key) + + def close(self): + """Redis uses connection pooling, no need to close the connection.""" + pass diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.pyc new file mode 100644 index 0000000..e07f839 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/compat.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/compat.py new file mode 100644 index 0000000..33b5aed --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/compat.py @@ -0,0 +1,29 @@ +try: + from urllib.parse import urljoin +except ImportError: + from urlparse import urljoin + + +try: + import cPickle as pickle +except ImportError: + import pickle + + +# Handle the case where the requests module has been patched to not have +# urllib3 bundled as part of its source. +try: + from pip._vendor.requests.packages.urllib3.response import HTTPResponse +except ImportError: + from pip._vendor.urllib3.response import HTTPResponse + +try: + from pip._vendor.requests.packages.urllib3.util import is_fp_closed +except ImportError: + from pip._vendor.urllib3.util import is_fp_closed + +# Replicate some six behaviour +try: + text_type = unicode +except NameError: + text_type = str diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/compat.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/compat.pyc new file mode 100644 index 0000000..fc89804 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/compat.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/controller.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/controller.py new file mode 100644 index 0000000..1b2b943 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/controller.py @@ -0,0 +1,367 @@ +""" +The httplib2 algorithms ported for use with requests. +""" +import logging +import re +import calendar +import time +from email.utils import parsedate_tz + +from pip._vendor.requests.structures import CaseInsensitiveDict + +from .cache import DictCache +from .serialize import Serializer + + +logger = logging.getLogger(__name__) + +URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?") + + +def parse_uri(uri): + """Parses a URI using the regex given in Appendix B of RFC 3986. + + (scheme, authority, path, query, fragment) = parse_uri(uri) + """ + groups = URI.match(uri).groups() + return (groups[1], groups[3], groups[4], groups[6], groups[8]) + + +class CacheController(object): + """An interface to see if request should cached or not. + """ + + def __init__( + self, cache=None, cache_etags=True, serializer=None, status_codes=None + ): + self.cache = cache or DictCache() + self.cache_etags = cache_etags + self.serializer = serializer or Serializer() + self.cacheable_status_codes = status_codes or (200, 203, 300, 301) + + @classmethod + def _urlnorm(cls, uri): + """Normalize the URL to create a safe key for the cache""" + (scheme, authority, path, query, fragment) = parse_uri(uri) + if not scheme or not authority: + raise Exception("Only absolute URIs are allowed. uri = %s" % uri) + + scheme = scheme.lower() + authority = authority.lower() + + if not path: + path = "/" + + # Could do syntax based normalization of the URI before + # computing the digest. See Section 6.2.2 of Std 66. + request_uri = query and "?".join([path, query]) or path + defrag_uri = scheme + "://" + authority + request_uri + + return defrag_uri + + @classmethod + def cache_url(cls, uri): + return cls._urlnorm(uri) + + def parse_cache_control(self, headers): + known_directives = { + # https://tools.ietf.org/html/rfc7234#section-5.2 + "max-age": (int, True), + "max-stale": (int, False), + "min-fresh": (int, True), + "no-cache": (None, False), + "no-store": (None, False), + "no-transform": (None, False), + "only-if-cached": (None, False), + "must-revalidate": (None, False), + "public": (None, False), + "private": (None, False), + "proxy-revalidate": (None, False), + "s-maxage": (int, True), + } + + cc_headers = headers.get("cache-control", headers.get("Cache-Control", "")) + + retval = {} + + for cc_directive in cc_headers.split(","): + if not cc_directive.strip(): + continue + + parts = cc_directive.split("=", 1) + directive = parts[0].strip() + + try: + typ, required = known_directives[directive] + except KeyError: + logger.debug("Ignoring unknown cache-control directive: %s", directive) + continue + + if not typ or not required: + retval[directive] = None + if typ: + try: + retval[directive] = typ(parts[1].strip()) + except IndexError: + if required: + logger.debug( + "Missing value for cache-control " "directive: %s", + directive, + ) + except ValueError: + logger.debug( + "Invalid value for cache-control directive " "%s, must be %s", + directive, + typ.__name__, + ) + + return retval + + def cached_request(self, request): + """ + Return a cached response if it exists in the cache, otherwise + return False. + """ + cache_url = self.cache_url(request.url) + logger.debug('Looking up "%s" in the cache', cache_url) + cc = self.parse_cache_control(request.headers) + + # Bail out if the request insists on fresh data + if "no-cache" in cc: + logger.debug('Request header has "no-cache", cache bypassed') + return False + + if "max-age" in cc and cc["max-age"] == 0: + logger.debug('Request header has "max_age" as 0, cache bypassed') + return False + + # Request allows serving from the cache, let's see if we find something + cache_data = self.cache.get(cache_url) + if cache_data is None: + logger.debug("No cache entry available") + return False + + # Check whether it can be deserialized + resp = self.serializer.loads(request, cache_data) + if not resp: + logger.warning("Cache entry deserialization failed, entry ignored") + return False + + # If we have a cached 301, return it immediately. We don't + # need to test our response for other headers b/c it is + # intrinsically "cacheable" as it is Permanent. + # See: + # https://tools.ietf.org/html/rfc7231#section-6.4.2 + # + # Client can try to refresh the value by repeating the request + # with cache busting headers as usual (ie no-cache). + if resp.status == 301: + msg = ( + 'Returning cached "301 Moved Permanently" response ' + "(ignoring date and etag information)" + ) + logger.debug(msg) + return resp + + headers = CaseInsensitiveDict(resp.headers) + if not headers or "date" not in headers: + if "etag" not in headers: + # Without date or etag, the cached response can never be used + # and should be deleted. + logger.debug("Purging cached response: no date or etag") + self.cache.delete(cache_url) + logger.debug("Ignoring cached response: no date") + return False + + now = time.time() + date = calendar.timegm(parsedate_tz(headers["date"])) + current_age = max(0, now - date) + logger.debug("Current age based on date: %i", current_age) + + # TODO: There is an assumption that the result will be a + # urllib3 response object. This may not be best since we + # could probably avoid instantiating or constructing the + # response until we know we need it. + resp_cc = self.parse_cache_control(headers) + + # determine freshness + freshness_lifetime = 0 + + # Check the max-age pragma in the cache control header + if "max-age" in resp_cc: + freshness_lifetime = resp_cc["max-age"] + logger.debug("Freshness lifetime from max-age: %i", freshness_lifetime) + + # If there isn't a max-age, check for an expires header + elif "expires" in headers: + expires = parsedate_tz(headers["expires"]) + if expires is not None: + expire_time = calendar.timegm(expires) - date + freshness_lifetime = max(0, expire_time) + logger.debug("Freshness lifetime from expires: %i", freshness_lifetime) + + # Determine if we are setting freshness limit in the + # request. Note, this overrides what was in the response. + if "max-age" in cc: + freshness_lifetime = cc["max-age"] + logger.debug( + "Freshness lifetime from request max-age: %i", freshness_lifetime + ) + + if "min-fresh" in cc: + min_fresh = cc["min-fresh"] + # adjust our current age by our min fresh + current_age += min_fresh + logger.debug("Adjusted current age from min-fresh: %i", current_age) + + # Return entry if it is fresh enough + if freshness_lifetime > current_age: + logger.debug('The response is "fresh", returning cached response') + logger.debug("%i > %i", freshness_lifetime, current_age) + return resp + + # we're not fresh. If we don't have an Etag, clear it out + if "etag" not in headers: + logger.debug('The cached response is "stale" with no etag, purging') + self.cache.delete(cache_url) + + # return the original handler + return False + + def conditional_headers(self, request): + cache_url = self.cache_url(request.url) + resp = self.serializer.loads(request, self.cache.get(cache_url)) + new_headers = {} + + if resp: + headers = CaseInsensitiveDict(resp.headers) + + if "etag" in headers: + new_headers["If-None-Match"] = headers["ETag"] + + if "last-modified" in headers: + new_headers["If-Modified-Since"] = headers["Last-Modified"] + + return new_headers + + def cache_response(self, request, response, body=None, status_codes=None): + """ + Algorithm for caching requests. + + This assumes a requests Response object. + """ + # From httplib2: Don't cache 206's since we aren't going to + # handle byte range requests + cacheable_status_codes = status_codes or self.cacheable_status_codes + if response.status not in cacheable_status_codes: + logger.debug( + "Status code %s not in %s", response.status, cacheable_status_codes + ) + return + + response_headers = CaseInsensitiveDict(response.headers) + + # If we've been given a body, our response has a Content-Length, that + # Content-Length is valid then we can check to see if the body we've + # been given matches the expected size, and if it doesn't we'll just + # skip trying to cache it. + if ( + body is not None + and "content-length" in response_headers + and response_headers["content-length"].isdigit() + and int(response_headers["content-length"]) != len(body) + ): + return + + cc_req = self.parse_cache_control(request.headers) + cc = self.parse_cache_control(response_headers) + + cache_url = self.cache_url(request.url) + logger.debug('Updating cache with response from "%s"', cache_url) + + # Delete it from the cache if we happen to have it stored there + no_store = False + if "no-store" in cc: + no_store = True + logger.debug('Response header has "no-store"') + if "no-store" in cc_req: + no_store = True + logger.debug('Request header has "no-store"') + if no_store and self.cache.get(cache_url): + logger.debug('Purging existing cache entry to honor "no-store"') + self.cache.delete(cache_url) + if no_store: + return + + # If we've been given an etag, then keep the response + if self.cache_etags and "etag" in response_headers: + logger.debug("Caching due to etag") + self.cache.set( + cache_url, self.serializer.dumps(request, response, body=body) + ) + + # Add to the cache any 301s. We do this before looking that + # the Date headers. + elif response.status == 301: + logger.debug("Caching permanant redirect") + self.cache.set(cache_url, self.serializer.dumps(request, response)) + + # Add to the cache if the response headers demand it. If there + # is no date header then we can't do anything about expiring + # the cache. + elif "date" in response_headers: + # cache when there is a max-age > 0 + if "max-age" in cc and cc["max-age"] > 0: + logger.debug("Caching b/c date exists and max-age > 0") + self.cache.set( + cache_url, self.serializer.dumps(request, response, body=body) + ) + + # If the request can expire, it means we should cache it + # in the meantime. + elif "expires" in response_headers: + if response_headers["expires"]: + logger.debug("Caching b/c of expires header") + self.cache.set( + cache_url, self.serializer.dumps(request, response, body=body) + ) + + def update_cached_response(self, request, response): + """On a 304 we will get a new set of headers that we want to + update our cached value with, assuming we have one. + + This should only ever be called when we've sent an ETag and + gotten a 304 as the response. + """ + cache_url = self.cache_url(request.url) + + cached_response = self.serializer.loads(request, self.cache.get(cache_url)) + + if not cached_response: + # we didn't have a cached response + return response + + # Lets update our headers with the headers from the new request: + # http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1 + # + # The server isn't supposed to send headers that would make + # the cached body invalid. But... just in case, we'll be sure + # to strip out ones we know that might be problmatic due to + # typical assumptions. + excluded_headers = ["content-length"] + + cached_response.headers.update( + dict( + (k, v) + for k, v in response.headers.items() + if k.lower() not in excluded_headers + ) + ) + + # we want a 200 b/c we have content via the cache + cached_response.status = 200 + + # update our cache + self.cache.set(cache_url, self.serializer.dumps(request, cached_response)) + + return cached_response diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/controller.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/controller.pyc new file mode 100644 index 0000000..26e1ccd Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/controller.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/filewrapper.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/filewrapper.py new file mode 100644 index 0000000..30ed4c5 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/filewrapper.py @@ -0,0 +1,80 @@ +from io import BytesIO + + +class CallbackFileWrapper(object): + """ + Small wrapper around a fp object which will tee everything read into a + buffer, and when that file is closed it will execute a callback with the + contents of that buffer. + + All attributes are proxied to the underlying file object. + + This class uses members with a double underscore (__) leading prefix so as + not to accidentally shadow an attribute. + """ + + def __init__(self, fp, callback): + self.__buf = BytesIO() + self.__fp = fp + self.__callback = callback + + def __getattr__(self, name): + # The vaguaries of garbage collection means that self.__fp is + # not always set. By using __getattribute__ and the private + # name[0] allows looking up the attribute value and raising an + # AttributeError when it doesn't exist. This stop thigns from + # infinitely recursing calls to getattr in the case where + # self.__fp hasn't been set. + # + # [0] https://docs.python.org/2/reference/expressions.html#atom-identifiers + fp = self.__getattribute__("_CallbackFileWrapper__fp") + return getattr(fp, name) + + def __is_fp_closed(self): + try: + return self.__fp.fp is None + + except AttributeError: + pass + + try: + return self.__fp.closed + + except AttributeError: + pass + + # We just don't cache it then. + # TODO: Add some logging here... + return False + + def _close(self): + if self.__callback: + self.__callback(self.__buf.getvalue()) + + # We assign this to None here, because otherwise we can get into + # really tricky problems where the CPython interpreter dead locks + # because the callback is holding a reference to something which + # has a __del__ method. Setting this to None breaks the cycle + # and allows the garbage collector to do it's thing normally. + self.__callback = None + + def read(self, amt=None): + data = self.__fp.read(amt) + self.__buf.write(data) + if self.__is_fp_closed(): + self._close() + + return data + + def _safe_read(self, amt): + data = self.__fp._safe_read(amt) + if amt == 2 and data == b"\r\n": + # urllib executes this read to toss the CRLF at the end + # of the chunk. + return data + + self.__buf.write(data) + if self.__is_fp_closed(): + self._close() + + return data diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/filewrapper.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/filewrapper.pyc new file mode 100644 index 0000000..42fc877 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/filewrapper.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/heuristics.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/heuristics.py new file mode 100644 index 0000000..6c0e979 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/heuristics.py @@ -0,0 +1,135 @@ +import calendar +import time + +from email.utils import formatdate, parsedate, parsedate_tz + +from datetime import datetime, timedelta + +TIME_FMT = "%a, %d %b %Y %H:%M:%S GMT" + + +def expire_after(delta, date=None): + date = date or datetime.utcnow() + return date + delta + + +def datetime_to_header(dt): + return formatdate(calendar.timegm(dt.timetuple())) + + +class BaseHeuristic(object): + + def warning(self, response): + """ + Return a valid 1xx warning header value describing the cache + adjustments. + + The response is provided too allow warnings like 113 + http://tools.ietf.org/html/rfc7234#section-5.5.4 where we need + to explicitly say response is over 24 hours old. + """ + return '110 - "Response is Stale"' + + def update_headers(self, response): + """Update the response headers with any new headers. + + NOTE: This SHOULD always include some Warning header to + signify that the response was cached by the client, not + by way of the provided headers. + """ + return {} + + def apply(self, response): + updated_headers = self.update_headers(response) + + if updated_headers: + response.headers.update(updated_headers) + warning_header_value = self.warning(response) + if warning_header_value is not None: + response.headers.update({"Warning": warning_header_value}) + + return response + + +class OneDayCache(BaseHeuristic): + """ + Cache the response by providing an expires 1 day in the + future. + """ + + def update_headers(self, response): + headers = {} + + if "expires" not in response.headers: + date = parsedate(response.headers["date"]) + expires = expire_after(timedelta(days=1), date=datetime(*date[:6])) + headers["expires"] = datetime_to_header(expires) + headers["cache-control"] = "public" + return headers + + +class ExpiresAfter(BaseHeuristic): + """ + Cache **all** requests for a defined time period. + """ + + def __init__(self, **kw): + self.delta = timedelta(**kw) + + def update_headers(self, response): + expires = expire_after(self.delta) + return {"expires": datetime_to_header(expires), "cache-control": "public"} + + def warning(self, response): + tmpl = "110 - Automatically cached for %s. Response might be stale" + return tmpl % self.delta + + +class LastModified(BaseHeuristic): + """ + If there is no Expires header already, fall back on Last-Modified + using the heuristic from + http://tools.ietf.org/html/rfc7234#section-4.2.2 + to calculate a reasonable value. + + Firefox also does something like this per + https://developer.mozilla.org/en-US/docs/Web/HTTP/Caching_FAQ + http://lxr.mozilla.org/mozilla-release/source/netwerk/protocol/http/nsHttpResponseHead.cpp#397 + Unlike mozilla we limit this to 24-hr. + """ + cacheable_by_default_statuses = { + 200, 203, 204, 206, 300, 301, 404, 405, 410, 414, 501 + } + + def update_headers(self, resp): + headers = resp.headers + + if "expires" in headers: + return {} + + if "cache-control" in headers and headers["cache-control"] != "public": + return {} + + if resp.status not in self.cacheable_by_default_statuses: + return {} + + if "date" not in headers or "last-modified" not in headers: + return {} + + date = calendar.timegm(parsedate_tz(headers["date"])) + last_modified = parsedate(headers["last-modified"]) + if date is None or last_modified is None: + return {} + + now = time.time() + current_age = max(0, now - date) + delta = date - calendar.timegm(last_modified) + freshness_lifetime = max(0, min(delta / 10, 24 * 3600)) + if freshness_lifetime <= current_age: + return {} + + expires = date + freshness_lifetime + return {"expires": time.strftime(TIME_FMT, time.gmtime(expires))} + + def warning(self, resp): + return None diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/heuristics.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/heuristics.pyc new file mode 100644 index 0000000..66b22ac Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/heuristics.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/serialize.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/serialize.py new file mode 100644 index 0000000..ec43ff2 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/serialize.py @@ -0,0 +1,186 @@ +import base64 +import io +import json +import zlib + +from pip._vendor import msgpack +from pip._vendor.requests.structures import CaseInsensitiveDict + +from .compat import HTTPResponse, pickle, text_type + + +def _b64_decode_bytes(b): + return base64.b64decode(b.encode("ascii")) + + +def _b64_decode_str(s): + return _b64_decode_bytes(s).decode("utf8") + + +class Serializer(object): + + def dumps(self, request, response, body=None): + response_headers = CaseInsensitiveDict(response.headers) + + if body is None: + body = response.read(decode_content=False) + + # NOTE: 99% sure this is dead code. I'm only leaving it + # here b/c I don't have a test yet to prove + # it. Basically, before using + # `cachecontrol.filewrapper.CallbackFileWrapper`, + # this made an effort to reset the file handle. The + # `CallbackFileWrapper` short circuits this code by + # setting the body as the content is consumed, the + # result being a `body` argument is *always* passed + # into cache_response, and in turn, + # `Serializer.dump`. + response._fp = io.BytesIO(body) + + # NOTE: This is all a bit weird, but it's really important that on + # Python 2.x these objects are unicode and not str, even when + # they contain only ascii. The problem here is that msgpack + # understands the difference between unicode and bytes and we + # have it set to differentiate between them, however Python 2 + # doesn't know the difference. Forcing these to unicode will be + # enough to have msgpack know the difference. + data = { + u"response": { + u"body": body, + u"headers": dict( + (text_type(k), text_type(v)) for k, v in response.headers.items() + ), + u"status": response.status, + u"version": response.version, + u"reason": text_type(response.reason), + u"strict": response.strict, + u"decode_content": response.decode_content, + } + } + + # Construct our vary headers + data[u"vary"] = {} + if u"vary" in response_headers: + varied_headers = response_headers[u"vary"].split(",") + for header in varied_headers: + header = text_type(header).strip() + header_value = request.headers.get(header, None) + if header_value is not None: + header_value = text_type(header_value) + data[u"vary"][header] = header_value + + return b",".join([b"cc=4", msgpack.dumps(data, use_bin_type=True)]) + + def loads(self, request, data): + # Short circuit if we've been given an empty set of data + if not data: + return + + # Determine what version of the serializer the data was serialized + # with + try: + ver, data = data.split(b",", 1) + except ValueError: + ver = b"cc=0" + + # Make sure that our "ver" is actually a version and isn't a false + # positive from a , being in the data stream. + if ver[:3] != b"cc=": + data = ver + data + ver = b"cc=0" + + # Get the version number out of the cc=N + ver = ver.split(b"=", 1)[-1].decode("ascii") + + # Dispatch to the actual load method for the given version + try: + return getattr(self, "_loads_v{}".format(ver))(request, data) + + except AttributeError: + # This is a version we don't have a loads function for, so we'll + # just treat it as a miss and return None + return + + def prepare_response(self, request, cached): + """Verify our vary headers match and construct a real urllib3 + HTTPResponse object. + """ + # Special case the '*' Vary value as it means we cannot actually + # determine if the cached response is suitable for this request. + if "*" in cached.get("vary", {}): + return + + # Ensure that the Vary headers for the cached response match our + # request + for header, value in cached.get("vary", {}).items(): + if request.headers.get(header, None) != value: + return + + body_raw = cached["response"].pop("body") + + headers = CaseInsensitiveDict(data=cached["response"]["headers"]) + if headers.get("transfer-encoding", "") == "chunked": + headers.pop("transfer-encoding") + + cached["response"]["headers"] = headers + + try: + body = io.BytesIO(body_raw) + except TypeError: + # This can happen if cachecontrol serialized to v1 format (pickle) + # using Python 2. A Python 2 str(byte string) will be unpickled as + # a Python 3 str (unicode string), which will cause the above to + # fail with: + # + # TypeError: 'str' does not support the buffer interface + body = io.BytesIO(body_raw.encode("utf8")) + + return HTTPResponse(body=body, preload_content=False, **cached["response"]) + + def _loads_v0(self, request, data): + # The original legacy cache data. This doesn't contain enough + # information to construct everything we need, so we'll treat this as + # a miss. + return + + def _loads_v1(self, request, data): + try: + cached = pickle.loads(data) + except ValueError: + return + + return self.prepare_response(request, cached) + + def _loads_v2(self, request, data): + try: + cached = json.loads(zlib.decompress(data).decode("utf8")) + except (ValueError, zlib.error): + return + + # We need to decode the items that we've base64 encoded + cached["response"]["body"] = _b64_decode_bytes(cached["response"]["body"]) + cached["response"]["headers"] = dict( + (_b64_decode_str(k), _b64_decode_str(v)) + for k, v in cached["response"]["headers"].items() + ) + cached["response"]["reason"] = _b64_decode_str(cached["response"]["reason"]) + cached["vary"] = dict( + (_b64_decode_str(k), _b64_decode_str(v) if v is not None else v) + for k, v in cached["vary"].items() + ) + + return self.prepare_response(request, cached) + + def _loads_v3(self, request, data): + # Due to Python 2 encoding issues, it's impossible to know for sure + # exactly how to load v3 entries, thus we'll treat these as a miss so + # that they get rewritten out as v4 entries. + return + + def _loads_v4(self, request, data): + try: + cached = msgpack.loads(data, encoding="utf-8") + except ValueError: + return + + return self.prepare_response(request, cached) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/serialize.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/serialize.pyc new file mode 100644 index 0000000..aed3703 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/serialize.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/wrapper.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/wrapper.py new file mode 100644 index 0000000..265bfc8 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/wrapper.py @@ -0,0 +1,29 @@ +from .adapter import CacheControlAdapter +from .cache import DictCache + + +def CacheControl( + sess, + cache=None, + cache_etags=True, + serializer=None, + heuristic=None, + controller_class=None, + adapter_class=None, + cacheable_methods=None, +): + + cache = cache or DictCache() + adapter_class = adapter_class or CacheControlAdapter + adapter = adapter_class( + cache, + cache_etags=cache_etags, + serializer=serializer, + heuristic=heuristic, + controller_class=controller_class, + cacheable_methods=cacheable_methods, + ) + sess.mount("http://", adapter) + sess.mount("https://", adapter) + + return sess diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/wrapper.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/wrapper.pyc new file mode 100644 index 0000000..a93f6b0 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/wrapper.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/certifi/__init__.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/certifi/__init__.py new file mode 100644 index 0000000..ef71f3a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/certifi/__init__.py @@ -0,0 +1,3 @@ +from .core import where + +__version__ = "2018.11.29" diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/certifi/__init__.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/certifi/__init__.pyc new file mode 100644 index 0000000..455e362 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/certifi/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/certifi/__main__.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/certifi/__main__.py new file mode 100644 index 0000000..ae2aff5 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/certifi/__main__.py @@ -0,0 +1,2 @@ +from pip._vendor.certifi import where +print(where()) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/certifi/__main__.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/certifi/__main__.pyc new file mode 100644 index 0000000..e4abc7d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/certifi/__main__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/certifi/cacert.pem b/project/venv/lib/python2.7/site-packages/pip/_vendor/certifi/cacert.pem new file mode 100644 index 0000000..db68797 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/certifi/cacert.pem @@ -0,0 +1,4512 @@ + +# Issuer: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA +# Subject: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA +# Label: "GlobalSign Root CA" +# Serial: 4835703278459707669005204 +# MD5 Fingerprint: 3e:45:52:15:09:51:92:e1:b7:5d:37:9f:b1:87:29:8a +# SHA1 Fingerprint: b1:bc:96:8b:d4:f4:9d:62:2a:a8:9a:81:f2:15:01:52:a4:1d:82:9c +# SHA256 Fingerprint: eb:d4:10:40:e4:bb:3e:c7:42:c9:e3:81:d3:1e:f2:a4:1a:48:b6:68:5c:96:e7:ce:f3:c1:df:6c:d4:33:1c:99 +-----BEGIN CERTIFICATE----- +MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkG +A1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jv +b3QgQ0ExGzAZBgNVBAMTEkdsb2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAw +MDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9i +YWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYDVQQDExJHbG9iYWxT +aWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDaDuaZ +jc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavp +xy0Sy6scTHAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp +1Wrjsok6Vjk4bwY8iGlbKk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdG +snUOhugZitVtbNV4FpWi6cgKOOvyJBNPc1STE4U6G7weNLWLBYy5d4ux2x8gkasJ +U26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrXgzT/LCrBbBlDSgeF59N8 +9iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E +BTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0B +AQUFAAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOz +yj1hTdNGCbM+w6DjY1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE +38NflNUVyRRBnMRddWQVDf9VMOyGj/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymP +AbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhHhm4qxFYxldBniYUr+WymXUad +DKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveCX4XSQRjbgbME +HMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A== +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R2 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R2 +# Label: "GlobalSign Root CA - R2" +# Serial: 4835703278459682885658125 +# MD5 Fingerprint: 94:14:77:7e:3e:5e:fd:8f:30:bd:41:b0:cf:e7:d0:30 +# SHA1 Fingerprint: 75:e0:ab:b6:13:85:12:27:1c:04:f8:5f:dd:de:38:e4:b7:24:2e:fe +# SHA256 Fingerprint: ca:42:dd:41:74:5f:d0:b8:1e:b9:02:36:2c:f9:d8:bf:71:9d:a1:bd:1b:1e:fc:94:6f:5b:4c:99:f4:2c:1b:9e +-----BEGIN CERTIFICATE----- +MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4G +A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNp +Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1 +MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMjETMBEG +A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6ErPL +v4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8 +eoLrvozps6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklq +tTleiDTsvHgMCJiEbKjNS7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzd +C9XZzPnqJworc5HGnRusyMvo4KD0L5CLTfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pa +zq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6CygPCm48CAwEAAaOBnDCB +mTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUm+IH +V2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5n +bG9iYWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG +3lm0mi3f3BmGLjANBgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4Gs +J0/WwbgcQ3izDJr86iw8bmEbTUsp9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO +291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu01yiPqFbQfXf5WRDLenVOavS +ot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG79G+dwfCMNYxd +AfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7 +TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg== +-----END CERTIFICATE----- + +# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only +# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only +# Label: "Verisign Class 3 Public Primary Certification Authority - G3" +# Serial: 206684696279472310254277870180966723415 +# MD5 Fingerprint: cd:68:b6:a7:c7:c4:ce:75:e0:1d:4f:57:44:61:92:09 +# SHA1 Fingerprint: 13:2d:0d:45:53:4b:69:97:cd:b2:d5:c3:39:e2:55:76:60:9b:5c:c6 +# SHA256 Fingerprint: eb:04:cf:5e:b1:f3:9a:fa:76:2f:2b:b1:20:f2:96:cb:a5:20:c1:b9:7d:b1:58:95:65:b8:1c:b9:a1:7b:72:44 +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQCbfgZJoz5iudXukEhxKe9XMA0GCSqGSIb3DQEBBQUAMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZl +cmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWdu +LCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlT +aWduIENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQswCQYD +VQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlT +aWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJ +bmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWdu +IENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg +LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMu6nFL8eB8aHm8b +N3O9+MlrlBIwT/A2R/XQkQr1F8ilYcEWQE37imGQ5XYgwREGfassbqb1EUGO+i2t +KmFZpGcmTNDovFJbcCAEWNF6yaRpvIMXZK0Fi7zQWM6NjPXr8EJJC52XJ2cybuGu +kxUccLwgTS8Y3pKI6GyFVxEa6X7jJhFUokWWVYPKMIno3Nij7SqAP395ZVc+FSBm +CC+Vk7+qRy+oRpfwEuL+wgorUeZ25rdGt+INpsyow0xZVYnm6FNcHOqd8GIWC6fJ +Xwzw3sJ2zq/3avL6QaaiMxTJ5Xpj055iN9WFZZ4O5lMkdBteHRJTW8cs54NJOxWu +imi5V5cCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAERSWwauSCPc/L8my/uRan2Te +2yFPhpk0djZX3dAVL8WtfxUfN2JzPtTnX84XA9s1+ivbrmAJXx5fj267Cz3qWhMe +DGBvtcC1IyIuBwvLqXTLR7sdwdela8wv0kL9Sd2nic9TutoAWii/gt/4uhMdUIaC +/Y4wjylGsB49Ndo4YhYYSq3mtlFs3q9i6wHQHiT+eo8SGhJouPtmmRQURVyu565p +F4ErWjfJXir0xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGt +TxzhT5yvDwyd93gN2PQ1VoDat20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ== +-----END CERTIFICATE----- + +# Issuer: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited +# Subject: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited +# Label: "Entrust.net Premium 2048 Secure Server CA" +# Serial: 946069240 +# MD5 Fingerprint: ee:29:31:bc:32:7e:9a:e6:e8:b5:f7:51:b4:34:71:90 +# SHA1 Fingerprint: 50:30:06:09:1d:97:d4:f5:ae:39:f7:cb:e7:92:7d:7d:65:2d:34:31 +# SHA256 Fingerprint: 6d:c4:71:72:e0:1c:bc:b0:bf:62:58:0d:89:5f:e2:b8:ac:9a:d4:f8:73:80:1e:0c:10:b9:c8:37:d2:1e:b1:77 +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIEOGPe+DANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChML +RW50cnVzdC5uZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBp +bmNvcnAuIGJ5IHJlZi4gKGxpbWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5 +IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNVBAMTKkVudHJ1c3QubmV0IENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQxNzUwNTFaFw0yOTA3 +MjQxNDE1MTJaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3d3d3 +LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxp +YWIuKTElMCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEG +A1UEAxMqRW50cnVzdC5uZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgp +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArU1LqRKGsuqjIAcVFmQq +K0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOLGp18EzoOH1u3Hs/lJBQe +sYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSrhRSGlVuX +MlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVT +XTzWnLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/ +HoZdenoVve8AjhUiVBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH +4QIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV +HQ4EFgQUVeSB0RGAvtiJuQijMfmhJAkWuXAwDQYJKoZIhvcNAQEFBQADggEBADub +j1abMOdTmXx6eadNl9cZlZD7Bh/KM3xGY4+WZiT6QBshJ8rmcnPyT/4xmf3IDExo +U8aAghOY+rat2l098c5u9hURlIIM7j+VrxGrD9cv3h8Dj1csHsm7mhpElesYT6Yf +zX1XEC+bBAlahLVu2B064dae0Wx5XnkcFMXj0EyTO2U87d89vqbllRrDtRnDvV5b +u/8j72gZyxKTJ1wDLW8w0B62GqzeWvfRqqgnpv55gcR5mTNXuhKwqeBCbJPKVt7+ +bYQLCIt+jerXmCHG8+c8eS9enNFMFY3h7CI3zJpDC5fcgJCNs2ebb0gIFVbPv/Er +fF6adulZkMV8gzURZVE= +-----END CERTIFICATE----- + +# Issuer: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust +# Subject: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust +# Label: "Baltimore CyberTrust Root" +# Serial: 33554617 +# MD5 Fingerprint: ac:b6:94:a5:9c:17:e0:d7:91:52:9b:b1:97:06:a6:e4 +# SHA1 Fingerprint: d4:de:20:d0:5e:66:fc:53:fe:1a:50:88:2c:78:db:28:52:ca:e4:74 +# SHA256 Fingerprint: 16:af:57:a9:f6:76:b0:ab:12:60:95:aa:5e:ba:de:f2:2a:b3:11:19:d6:44:ac:95:cd:4b:93:db:f3:f2:6a:eb +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJ +RTESMBAGA1UEChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYD +VQQDExlCYWx0aW1vcmUgQ3liZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoX +DTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMCSUUxEjAQBgNVBAoTCUJhbHRpbW9y +ZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFsdGltb3JlIEN5YmVy +VHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKMEuyKr +mD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjr +IZ3AQSsBUnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeK +mpYcqWe4PwzV9/lSEy/CG9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSu +XmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9XbIGevOF6uvUA65ehD5f/xXtabz5OTZy +dc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjprl3RjM71oGDHweI12v/ye +jl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoIVDaGezq1 +BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3 +DQEBBQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT92 +9hkTI7gQCvlYpNRhcL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3Wgx +jkzSswF07r51XgdIGn9w/xZchMB5hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0 +Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsaY71k5h+3zvDyny67G7fyUIhz +ksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9HRCwBXbsdtTLS +R9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp +-----END CERTIFICATE----- + +# Issuer: CN=AddTrust External CA Root O=AddTrust AB OU=AddTrust External TTP Network +# Subject: CN=AddTrust External CA Root O=AddTrust AB OU=AddTrust External TTP Network +# Label: "AddTrust External Root" +# Serial: 1 +# MD5 Fingerprint: 1d:35:54:04:85:78:b0:3f:42:42:4d:bf:20:73:0a:3f +# SHA1 Fingerprint: 02:fa:f3:e2:91:43:54:68:60:78:57:69:4d:f5:e4:5b:68:85:18:68 +# SHA256 Fingerprint: 68:7f:a4:51:38:22:78:ff:f0:c8:b1:1f:8d:43:d5:76:67:1c:6e:b2:bc:ea:b4:13:fb:83:d9:65:d0:6d:2f:f2 +-----BEGIN CERTIFICATE----- +MIIENjCCAx6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBvMQswCQYDVQQGEwJTRTEU +MBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0IEV4dGVybmFs +IFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBFeHRlcm5hbCBDQSBSb290 +MB4XDTAwMDUzMDEwNDgzOFoXDTIwMDUzMDEwNDgzOFowbzELMAkGA1UEBhMCU0Ux +FDASBgNVBAoTC0FkZFRydXN0IEFCMSYwJAYDVQQLEx1BZGRUcnVzdCBFeHRlcm5h +bCBUVFAgTmV0d29yazEiMCAGA1UEAxMZQWRkVHJ1c3QgRXh0ZXJuYWwgQ0EgUm9v +dDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALf3GjPm8gAELTngTlvt +H7xsD821+iO2zt6bETOXpClMfZOfvUq8k+0DGuOPz+VtUFrWlymUWoCwSXrbLpX9 +uMq/NzgtHj6RQa1wVsfwTz/oMp50ysiQVOnGXw94nZpAPA6sYapeFI+eh6FqUNzX +mk6vBbOmcZSccbNQYArHE504B4YCqOmoaSYYkKtMsE8jqzpPhNjfzp/haW+710LX +a0Tkx63ubUFfclpxCDezeWWkWaCUN/cALw3CknLa0Dhy2xSoRcRdKn23tNbE7qzN +E0S3ySvdQwAl+mG5aWpYIxG3pzOPVnVZ9c0p10a3CitlttNCbxWyuHv77+ldU9U0 +WicCAwEAAaOB3DCB2TAdBgNVHQ4EFgQUrb2YejS0Jvf6xCZU7wO94CTLVBowCwYD +VR0PBAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wgZkGA1UdIwSBkTCBjoAUrb2YejS0 +Jvf6xCZU7wO94CTLVBqhc6RxMG8xCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRU +cnVzdCBBQjEmMCQGA1UECxMdQWRkVHJ1c3QgRXh0ZXJuYWwgVFRQIE5ldHdvcmsx +IjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENBIFJvb3SCAQEwDQYJKoZIhvcN +AQEFBQADggEBALCb4IUlwtYj4g+WBpKdQZic2YR5gdkeWxQHIzZlj7DYd7usQWxH +YINRsPkyPef89iYTx4AWpb9a/IfPeHmJIZriTAcKhjW88t5RxNKWt9x+Tu5w/Rw5 +6wwCURQtjr0W4MHfRnXnJK3s9EK0hZNwEGe6nQY1ShjTK3rMUUKhemPR5ruhxSvC +Nr4TDea9Y355e6cJDUCrat2PisP29owaQgVR1EX1n6diIWgVIEM8med8vSTYqZEX +c4g/VhsxOBi0cQ+azcgOno4uG+GMmIPLHzHxREzGBHNJdmAPx/i9F4BrLunMTA5a +mnkPIAou1Z5jJh5VkpTYghdae9C8x49OhgQ= +-----END CERTIFICATE----- + +# Issuer: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc. +# Subject: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc. +# Label: "Entrust Root Certification Authority" +# Serial: 1164660820 +# MD5 Fingerprint: d6:a5:c3:ed:5d:dd:3e:00:c1:3d:87:92:1f:1d:3f:e4 +# SHA1 Fingerprint: b3:1e:b1:b7:40:e3:6c:84:02:da:dc:37:d4:4d:f5:d4:67:49:52:f9 +# SHA256 Fingerprint: 73:c1:76:43:4f:1b:c6:d5:ad:f4:5b:0e:76:e7:27:28:7c:8d:e5:76:16:c1:e6:e6:14:1a:2b:2c:bc:7d:8e:4c +-----BEGIN CERTIFICATE----- +MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0 +Lm5ldC9DUFMgaXMgaW5jb3Jwb3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMW +KGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsGA1UEAxMkRW50cnVzdCBSb290IENl +cnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0MloXDTI2MTEyNzIw +NTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMTkw +NwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSBy +ZWZlcmVuY2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNV +BAMTJEVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJ +KoZIhvcNAQEBBQADggEPADCCAQoCggEBALaVtkNC+sZtKm9I35RMOVcF7sN5EUFo +Nu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYszA9u3g3s+IIRe7bJWKKf4 +4LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOwwCj0Yzfv9 +KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGI +rb68j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi +94DkZfs0Nw4pgHBNrziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOB +sDCBrTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAi +gA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1MzQyWjAfBgNVHSMEGDAWgBRo +kORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DHhmak8fdLQ/uE +vW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA +A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9t +O1KzKtvn1ISMY/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6Zua +AGAT/3B+XxFNSRuzFVJ7yVTav52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP +9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTSW3iDVuycNsMm4hH2Z0kdkquM++v/ +eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0tHuu2guQOHXvgR1m +0vdXcDazv/wor3ElhVsT/h5/WrQ8 +-----END CERTIFICATE----- + +# Issuer: CN=GeoTrust Global CA O=GeoTrust Inc. +# Subject: CN=GeoTrust Global CA O=GeoTrust Inc. +# Label: "GeoTrust Global CA" +# Serial: 144470 +# MD5 Fingerprint: f7:75:ab:29:fb:51:4e:b7:77:5e:ff:05:3c:99:8e:f5 +# SHA1 Fingerprint: de:28:f4:a4:ff:e5:b9:2f:a3:c5:03:d1:a3:49:a7:f9:96:2a:82:12 +# SHA256 Fingerprint: ff:85:6a:2d:25:1d:cd:88:d3:66:56:f4:50:12:67:98:cf:ab:aa:de:40:79:9c:72:2d:e4:d2:b5:db:36:a7:3a +-----BEGIN CERTIFICATE----- +MIIDVDCCAjygAwIBAgIDAjRWMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNVBAYTAlVT +MRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9i +YWwgQ0EwHhcNMDIwNTIxMDQwMDAwWhcNMjIwNTIxMDQwMDAwWjBCMQswCQYDVQQG +EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEbMBkGA1UEAxMSR2VvVHJ1c3Qg +R2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2swYYzD9 +9BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9mOSm9BXiLnTjoBbdq +fnGk5sRgprDvgOSJKA+eJdbtg/OtppHHmMlCGDUUna2YRpIuT8rxh0PBFpVXLVDv +iS2Aelet8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6cJmTM386DGXHKTubU +1XupGc1V3sjs0l44U+VcT4wt/lAjNvxm5suOpDkZALeVAjmRCw7+OC7RHQWa9k0+ +bw8HHa8sHo9gOeL6NlMTOdReJivbPagUvTLrGAMoUgRx5aszPeE4uwc2hGKceeoW +MPRfwCvocWvk+QIDAQABo1MwUTAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTA +ephojYn7qwVkDBF9qn1luMrMTjAfBgNVHSMEGDAWgBTAephojYn7qwVkDBF9qn1l +uMrMTjANBgkqhkiG9w0BAQUFAAOCAQEANeMpauUvXVSOKVCUn5kaFOSPeCpilKIn +Z57QzxpeR+nBsqTP3UEaBU6bS+5Kb1VSsyShNwrrZHYqLizz/Tt1kL/6cdjHPTfS +tQWVYrmm3ok9Nns4d0iXrKYgjy6myQzCsplFAMfOEVEiIuCl6rYVSAlk6l5PdPcF +PseKUgzbFbS9bZvlxrFUaKnjaZC2mqUPuLk/IH2uSrW4nOQdtqvmlKXBx4Ot2/Un +hw4EbNX/3aBd7YdStysVAq45pmp06drE57xNNB6pXE0zX5IJL4hmXXeXxx12E6nV +5fEWCRE11azbJHFwLJhWC9kXtNHjUStedejV0NxPNO3CBWaAocvmMw== +-----END CERTIFICATE----- + +# Issuer: CN=GeoTrust Universal CA O=GeoTrust Inc. +# Subject: CN=GeoTrust Universal CA O=GeoTrust Inc. +# Label: "GeoTrust Universal CA" +# Serial: 1 +# MD5 Fingerprint: 92:65:58:8b:a2:1a:31:72:73:68:5c:b4:a5:7a:07:48 +# SHA1 Fingerprint: e6:21:f3:35:43:79:05:9a:4b:68:30:9d:8a:2f:74:22:15:87:ec:79 +# SHA256 Fingerprint: a0:45:9b:9f:63:b2:25:59:f5:fa:5d:4c:6d:b3:f9:f7:2f:f1:93:42:03:35:78:f0:73:bf:1d:1b:46:cb:b9:12 +-----BEGIN CERTIFICATE----- +MIIFaDCCA1CgAwIBAgIBATANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJVUzEW +MBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEeMBwGA1UEAxMVR2VvVHJ1c3QgVW5pdmVy +c2FsIENBMB4XDTA0MDMwNDA1MDAwMFoXDTI5MDMwNDA1MDAwMFowRTELMAkGA1UE +BhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xHjAcBgNVBAMTFUdlb1RydXN0 +IFVuaXZlcnNhbCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAKYV +VaCjxuAfjJ0hUNfBvitbtaSeodlyWL0AG0y/YckUHUWCq8YdgNY96xCcOq9tJPi8 +cQGeBvV8Xx7BDlXKg5pZMK4ZyzBIle0iN430SppyZj6tlcDgFgDgEB8rMQ7XlFTT +QjOgNB0eRXbdT8oYN+yFFXoZCPzVx5zw8qkuEKmS5j1YPakWaDwvdSEYfyh3peFh +F7em6fgemdtzbvQKoiFs7tqqhZJmr/Z6a4LauiIINQ/PQvE1+mrufislzDoR5G2v +c7J2Ha3QsnhnGqQ5HFELZ1aD/ThdDc7d8Lsrlh/eezJS/R27tQahsiFepdaVaH/w +mZ7cRQg+59IJDTWU3YBOU5fXtQlEIGQWFwMCTFMNaN7VqnJNk22CDtucvc+081xd +VHppCZbW2xHBjXWotM85yM48vCR85mLK4b19p71XZQvk/iXttmkQ3CgaRr0BHdCX +teGYO8A3ZNY9lO4L4fUorgtWv3GLIylBjobFS1J72HGrH4oVpjuDWtdYAVHGTEHZ +f9hBZ3KiKN9gg6meyHv8U3NyWfWTehd2Ds735VzZC1U0oqpbtWpU5xPKV+yXbfRe +Bi9Fi1jUIxaS5BZuKGNZMN9QAZxjiRqf2xeUgnA3wySemkfWWspOqGmJch+RbNt+ +nhutxx9z3SxPGWX9f5NAEC7S8O08ni4oPmkmM8V7AgMBAAGjYzBhMA8GA1UdEwEB +/wQFMAMBAf8wHQYDVR0OBBYEFNq7LqqwDLiIJlF0XG0D08DYj3rWMB8GA1UdIwQY +MBaAFNq7LqqwDLiIJlF0XG0D08DYj3rWMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG +9w0BAQUFAAOCAgEAMXjmx7XfuJRAyXHEqDXsRh3ChfMoWIawC/yOsjmPRFWrZIRc +aanQmjg8+uUfNeVE44B5lGiku8SfPeE0zTBGi1QrlaXv9z+ZhP015s8xxtxqv6fX +IwjhmF7DWgh2qaavdy+3YL1ERmrvl/9zlcGO6JP7/TG37FcREUWbMPEaiDnBTzyn +ANXH/KttgCJwpQzgXQQpAvvLoJHRfNbDflDVnVi+QTjruXU8FdmbyUqDWcDaU/0z +uzYYm4UPFd3uLax2k7nZAY1IEKj79TiG8dsKxr2EoyNB3tZ3b4XUhRxQ4K5RirqN +Pnbiucon8l+f725ZDQbYKxek0nxru18UGkiPGkzns0ccjkxFKyDuSN/n3QmOGKja +QI2SJhFTYXNd673nxE0pN2HrrDktZy4W1vUAg4WhzH92xH3kt0tm7wNFYGm2DFKW +koRepqO1pD4r2czYG0eq8kTaT/kD6PAUyz/zg97QwVTjt+gKN02LIFkDMBmhLMi9 +ER/frslKxfMnZmaGrGiR/9nmUxwPi1xpZQomyB40w11Re9epnAahNt3ViZS82eQt +DF4JbAiXfKM9fJP/P6EUp8+1Xevb2xzEdt+Iub1FBZUbrvxGakyvSOPOrg/Sfuvm +bJxPgWp6ZKy7PtXny3YuxadIwVyQD8vIP/rmMuGNG2+k5o7Y+SlIis5z/iw= +-----END CERTIFICATE----- + +# Issuer: CN=GeoTrust Universal CA 2 O=GeoTrust Inc. +# Subject: CN=GeoTrust Universal CA 2 O=GeoTrust Inc. +# Label: "GeoTrust Universal CA 2" +# Serial: 1 +# MD5 Fingerprint: 34:fc:b8:d0:36:db:9e:14:b3:c2:f2:db:8f:e4:94:c7 +# SHA1 Fingerprint: 37:9a:19:7b:41:85:45:35:0c:a6:03:69:f3:3c:2e:af:47:4f:20:79 +# SHA256 Fingerprint: a0:23:4f:3b:c8:52:7c:a5:62:8e:ec:81:ad:5d:69:89:5d:a5:68:0d:c9:1d:1c:b8:47:7f:33:f8:78:b9:5b:0b +-----BEGIN CERTIFICATE----- +MIIFbDCCA1SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBHMQswCQYDVQQGEwJVUzEW +MBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVy +c2FsIENBIDIwHhcNMDQwMzA0MDUwMDAwWhcNMjkwMzA0MDUwMDAwWjBHMQswCQYD +VQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1 +c3QgVW5pdmVyc2FsIENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQCzVFLByT7y2dyxUxpZKeexw0Uo5dfR7cXFS6GqdHtXr0om/Nj1XqduGdt0DE81 +WzILAePb63p3NeqqWuDW6KFXlPCQo3RWlEQwAx5cTiuFJnSCegx2oG9NzkEtoBUG +FF+3Qs17j1hhNNwqCPkuwwGmIkQcTAeC5lvO0Ep8BNMZcyfwqph/Lq9O64ceJHdq +XbboW0W63MOhBW9Wjo8QJqVJwy7XQYci4E+GymC16qFjwAGXEHm9ADwSbSsVsaxL +se4YuU6W3Nx2/zu+z18DwPw76L5GG//aQMJS9/7jOvdqdzXQ2o3rXhhqMcceujwb +KNZrVMaqW9eiLBsZzKIC9ptZvTdrhrVtgrrY6slWvKk2WP0+GfPtDCapkzj4T8Fd +IgbQl+rhrcZV4IErKIM6+vR7IVEAvlI4zs1meaj0gVbi0IMJR1FbUGrP20gaXT73 +y/Zl92zxlfgCOzJWgjl6W70viRu/obTo/3+NjN8D8WBOWBFM66M/ECuDmgFz2ZRt +hAAnZqzwcEAJQpKtT5MNYQlRJNiS1QuUYbKHsu3/mjX/hVTK7URDrBs8FmtISgoc +QIgfksILAAX/8sgCSqSqqcyZlpwvWOB94b67B9xfBHJcMTTD7F8t4D1kkCLm0ey4 +Lt1ZrtmhN79UNdxzMk+MBB4zsslG8dhcyFVQyWi9qLo2CQIDAQABo2MwYTAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAfBgNV +HSMEGDAWgBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAOBgNVHQ8BAf8EBAMCAYYwDQYJ +KoZIhvcNAQEFBQADggIBAGbBxiPz2eAubl/oz66wsCVNK/g7WJtAJDday6sWSf+z +dXkzoS9tcBc0kf5nfo/sm+VegqlVHy/c1FEHEv6sFj4sNcZj/NwQ6w2jqtB8zNHQ +L1EuxBRa3ugZ4T7GzKQp5y6EqgYweHZUcyiYWTjgAA1i00J9IZ+uPTqM1fp3DRgr +Fg5fNuH8KrUwJM/gYwx7WBr+mbpCErGR9Hxo4sjoryzqyX6uuyo9DRXcNJW2GHSo +ag/HtPQTxORb7QrSpJdMKu0vbBKJPfEncKpqA1Ihn0CoZ1Dy81of398j9tx4TuaY +T1U6U+Pv8vSfx3zYWK8pIpe44L2RLrB27FcRz+8pRPPphXpgY+RdM4kX2TGq2tbz +GDVyz4crL2MjhF2EjD9XoIj8mZEoJmmZ1I+XRL6O1UixpCgp8RW04eWe3fiPpm8m +1wk8OhwRDqZsN/etRIcsKMfYdIKz0G9KV7s1KSegi+ghp4dkNl3M2Basx7InQJJV +OCiNUW7dFGdTbHFcJoRNdVq2fmBWqU2t+5sel/MN2dKXVHfaPRK34B7vCAas+YWH +6aLcr34YEoP9VhdBLtUpgn2Z9DH2canPLAEnpQW5qrJITirvn5NSUZU8UnOOVkwX +QMAJKOSLakhT2+zNVVXxxvjpoixMptEmX36vWkzaH6byHCx+rgIW0lbQL1dTR+iS +-----END CERTIFICATE----- + +# Issuer: CN=AAA Certificate Services O=Comodo CA Limited +# Subject: CN=AAA Certificate Services O=Comodo CA Limited +# Label: "Comodo AAA Services root" +# Serial: 1 +# MD5 Fingerprint: 49:79:04:b0:eb:87:19:ac:47:b0:bc:11:51:9b:74:d0 +# SHA1 Fingerprint: d1:eb:23:a4:6d:17:d6:8f:d9:25:64:c2:f1:f1:60:17:64:d8:e3:49 +# SHA256 Fingerprint: d7:a7:a0:fb:5d:7e:27:31:d7:71:e9:48:4e:bc:de:f7:1d:5f:0c:3e:0a:29:48:78:2b:c8:3e:e0:ea:69:9e:f4 +-----BEGIN CERTIFICATE----- +MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEb +MBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow +GAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmlj +YXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAwMFoXDTI4MTIzMTIzNTk1OVowezEL +MAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE +BwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNVBAMM +GEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQua +BtDFcCLNSS1UY8y2bmhGC1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe +3M/vg4aijJRPn2jymJBGhCfHdr/jzDUsi14HZGWCwEiwqJH5YZ92IFCokcdmtet4 +YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszWY19zjNoFmag4qMsXeDZR +rOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjHYpy+g8cm +ez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQU +oBEKIz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF +MAMBAf8wewYDVR0fBHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20v +QUFBQ2VydGlmaWNhdGVTZXJ2aWNlcy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29t +b2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2VzLmNybDANBgkqhkiG9w0BAQUF +AAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm7l3sAg9g1o1Q +GE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz +Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2 +G9w84FoVxp7Z8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsi +l2D4kF501KKaU73yqWjgom7C12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3 +smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg== +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root Certification Authority O=QuoVadis Limited OU=Root Certification Authority +# Subject: CN=QuoVadis Root Certification Authority O=QuoVadis Limited OU=Root Certification Authority +# Label: "QuoVadis Root CA" +# Serial: 985026699 +# MD5 Fingerprint: 27:de:36:fe:72:b7:00:03:00:9d:f4:f0:1e:6c:04:24 +# SHA1 Fingerprint: de:3f:40:bd:50:93:d3:9b:6c:60:f6:da:bc:07:62:01:00:89:76:c9 +# SHA256 Fingerprint: a4:5e:de:3b:bb:f0:9c:8a:e1:5c:72:ef:c0:72:68:d6:93:a2:1c:99:6f:d5:1e:67:ca:07:94:60:fd:6d:88:73 +-----BEGIN CERTIFICATE----- +MIIF0DCCBLigAwIBAgIEOrZQizANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJC +TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDElMCMGA1UECxMcUm9vdCBDZXJ0 +aWZpY2F0aW9uIEF1dGhvcml0eTEuMCwGA1UEAxMlUXVvVmFkaXMgUm9vdCBDZXJ0 +aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wMTAzMTkxODMzMzNaFw0yMTAzMTcxODMz +MzNaMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMSUw +IwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYDVQQDEyVR +dW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAv2G1lVO6V/z68mcLOhrfEYBklbTRvM16z/Yp +li4kVEAkOPcahdxYTMukJ0KX0J+DisPkBgNbAKVRHnAEdOLB1Dqr1607BxgFjv2D +rOpm2RgbaIr1VxqYuvXtdj182d6UajtLF8HVj71lODqV0D1VNk7feVcxKh7YWWVJ +WCCYfqtffp/p1k3sg3Spx2zY7ilKhSoGFPlU5tPaZQeLYzcS19Dsw3sgQUSj7cug +F+FxZc4dZjH3dgEZyH0DWLaVSR2mEiboxgx24ONmy+pdpibu5cxfvWenAScOospU +xbF6lR1xHkopigPcakXBpBlebzbNw6Kwt/5cOOJSvPhEQ+aQuwIDAQABo4ICUjCC +Ak4wPQYIKwYBBQUHAQEEMTAvMC0GCCsGAQUFBzABhiFodHRwczovL29jc3AucXVv +dmFkaXNvZmZzaG9yZS5jb20wDwYDVR0TAQH/BAUwAwEB/zCCARoGA1UdIASCAREw +ggENMIIBCQYJKwYBBAG+WAABMIH7MIHUBggrBgEFBQcCAjCBxxqBxFJlbGlhbmNl +IG9uIHRoZSBRdW9WYWRpcyBSb290IENlcnRpZmljYXRlIGJ5IGFueSBwYXJ0eSBh +c3N1bWVzIGFjY2VwdGFuY2Ugb2YgdGhlIHRoZW4gYXBwbGljYWJsZSBzdGFuZGFy +ZCB0ZXJtcyBhbmQgY29uZGl0aW9ucyBvZiB1c2UsIGNlcnRpZmljYXRpb24gcHJh +Y3RpY2VzLCBhbmQgdGhlIFF1b1ZhZGlzIENlcnRpZmljYXRlIFBvbGljeS4wIgYI +KwYBBQUHAgEWFmh0dHA6Ly93d3cucXVvdmFkaXMuYm0wHQYDVR0OBBYEFItLbe3T +KbkGGew5Oanwl4Rqy+/fMIGuBgNVHSMEgaYwgaOAFItLbe3TKbkGGew5Oanwl4Rq +y+/foYGEpIGBMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1p +dGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYD +VQQDEyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggQ6tlCL +MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAitQUtf70mpKnGdSk +fnIYj9lofFIk3WdvOXrEql494liwTXCYhGHoG+NpGA7O+0dQoE7/8CQfvbLO9Sf8 +7C9TqnN7Az10buYWnuulLsS/VidQK2K6vkscPFVcQR0kvoIgR13VRH56FmjffU1R +cHhXHTMe/QKZnAzNCgVPx7uOpHX6Sm2xgI4JVrmcGmD+XcHXetwReNDWXcG31a0y +mQM6isxUJTkxgXsTIlG6Rmyhu576BGxJJnSP0nPrzDCi5upZIof4l/UO/erMkqQW +xFIY6iHOsfHmhIHluqmGKPJDWl0Snawe2ajlCmqnf6CHKc/yiU3U7MXi5nrQNiOK +SnQ2+Q== +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 2 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 2 O=QuoVadis Limited +# Label: "QuoVadis Root CA 2" +# Serial: 1289 +# MD5 Fingerprint: 5e:39:7b:dd:f8:ba:ec:82:e9:ac:62:ba:0c:54:00:2b +# SHA1 Fingerprint: ca:3a:fb:cf:12:40:36:4b:44:b2:16:20:88:80:48:39:19:93:7c:f7 +# SHA256 Fingerprint: 85:a0:dd:7d:d7:20:ad:b7:ff:05:f8:3d:54:2b:20:9d:c7:ff:45:28:f7:d6:77:b1:83:89:fe:a5:e5:c4:9e:86 +-----BEGIN CERTIFICATE----- +MIIFtzCCA5+gAwIBAgICBQkwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x +GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv +b3QgQ0EgMjAeFw0wNjExMjQxODI3MDBaFw0zMTExMjQxODIzMzNaMEUxCzAJBgNV +BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W +YWRpcyBSb290IENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCa +GMpLlA0ALa8DKYrwD4HIrkwZhR0In6spRIXzL4GtMh6QRr+jhiYaHv5+HBg6XJxg +Fyo6dIMzMH1hVBHL7avg5tKifvVrbxi3Cgst/ek+7wrGsxDp3MJGF/hd/aTa/55J +WpzmM+Yklvc/ulsrHHo1wtZn/qtmUIttKGAr79dgw8eTvI02kfN/+NsRE8Scd3bB +rrcCaoF6qUWD4gXmuVbBlDePSHFjIuwXZQeVikvfj8ZaCuWw419eaxGrDPmF60Tp ++ARz8un+XJiM9XOva7R+zdRcAitMOeGylZUtQofX1bOQQ7dsE/He3fbE+Ik/0XX1 +ksOR1YqI0JDs3G3eicJlcZaLDQP9nL9bFqyS2+r+eXyt66/3FsvbzSUr5R/7mp/i +Ucw6UwxI5g69ybR2BlLmEROFcmMDBOAENisgGQLodKcftslWZvB1JdxnwQ5hYIiz +PtGo/KPaHbDRsSNU30R2be1B2MGyIrZTHN81Hdyhdyox5C315eXbyOD/5YDXC2Og +/zOhD7osFRXql7PSorW+8oyWHhqPHWykYTe5hnMz15eWniN9gqRMgeKh0bpnX5UH +oycR7hYQe7xFSkyyBNKr79X9DFHOUGoIMfmR2gyPZFwDwzqLID9ujWc9Otb+fVuI +yV77zGHcizN300QyNQliBJIWENieJ0f7OyHj+OsdWwIDAQABo4GwMIGtMA8GA1Ud +EwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBQahGK8SEwzJQTU7tD2 +A8QZRtGUazBuBgNVHSMEZzBlgBQahGK8SEwzJQTU7tD2A8QZRtGUa6FJpEcwRTEL +MAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMT +ElF1b1ZhZGlzIFJvb3QgQ0EgMoICBQkwDQYJKoZIhvcNAQEFBQADggIBAD4KFk2f +BluornFdLwUvZ+YTRYPENvbzwCYMDbVHZF34tHLJRqUDGCdViXh9duqWNIAXINzn +g/iN/Ae42l9NLmeyhP3ZRPx3UIHmfLTJDQtyU/h2BwdBR5YM++CCJpNVjP4iH2Bl +fF/nJrP3MpCYUNQ3cVX2kiF495V5+vgtJodmVjB3pjd4M1IQWK4/YY7yarHvGH5K +WWPKjaJW1acvvFYfzznB4vsKqBUsfU16Y8Zsl0Q80m/DShcK+JDSV6IZUaUtl0Ha +B0+pUNqQjZRG4T7wlP0QADj1O+hA4bRuVhogzG9Yje0uRY/W6ZM/57Es3zrWIozc +hLsib9D45MY56QSIPMO661V6bYCZJPVsAfv4l7CUW+v90m/xd2gNNWQjrLhVoQPR +TUIZ3Ph1WVaj+ahJefivDrkRoHy3au000LYmYjgahwz46P0u05B/B5EqHdZ+XIWD +mbA4CD/pXvk1B+TJYm5Xf6dQlfe6yJvmjqIBxdZmv3lh8zwc4bmCXF2gw+nYSL0Z +ohEUGW6yhhtoPkg3Goi3XZZenMfvJ2II4pEZXNLxId26F0KCl3GBUzGpn/Z9Yr9y +4aOTHcyKJloJONDO1w2AFrR4pTqHTI2KpdVGl/IsELm8VCLAAVBpQ570su9t+Oza +8eOx79+Rj1QqCyXBJhnEUhAFZdWCEOrCMc0u +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 3" +# Serial: 1478 +# MD5 Fingerprint: 31:85:3c:62:94:97:63:b9:aa:fd:89:4e:af:6f:e0:cf +# SHA1 Fingerprint: 1f:49:14:f7:d8:74:95:1d:dd:ae:02:c0:be:fd:3a:2d:82:75:51:85 +# SHA256 Fingerprint: 18:f1:fc:7f:20:5d:f8:ad:dd:eb:7f:e0:07:dd:57:e3:af:37:5a:9c:4d:8d:73:54:6b:f4:f1:fe:d1:e1:8d:35 +-----BEGIN CERTIFICATE----- +MIIGnTCCBIWgAwIBAgICBcYwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x +GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv +b3QgQ0EgMzAeFw0wNjExMjQxOTExMjNaFw0zMTExMjQxOTA2NDRaMEUxCzAJBgNV +BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W +YWRpcyBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDM +V0IWVJzmmNPTTe7+7cefQzlKZbPoFog02w1ZkXTPkrgEQK0CSzGrvI2RaNggDhoB +4hp7Thdd4oq3P5kazethq8Jlph+3t723j/z9cI8LoGe+AaJZz3HmDyl2/7FWeUUr +H556VOijKTVopAFPD6QuN+8bv+OPEKhyq1hX51SGyMnzW9os2l2ObjyjPtr7guXd +8lyyBTNvijbO0BNO/79KDDRMpsMhvVAEVeuxu537RR5kFd5VAYwCdrXLoT9Cabwv +vWhDFlaJKjdhkf2mrk7AyxRllDdLkgbvBNDInIjbC3uBr7E9KsRlOni27tyAsdLT +mZw67mtaa7ONt9XOnMK+pUsvFrGeaDsGb659n/je7Mwpp5ijJUMv7/FfJuGITfhe +btfZFG4ZM2mnO4SJk8RTVROhUXhA+LjJou57ulJCg54U7QVSWllWp5f8nT8KKdjc +T5EOE7zelaTfi5m+rJsziO+1ga8bxiJTyPbH7pcUsMV8eFLI8M5ud2CEpukqdiDt +WAEXMJPpGovgc2PZapKUSU60rUqFxKMiMPwJ7Wgic6aIDFUhWMXhOp8q3crhkODZ +c6tsgLjoC2SToJyMGf+z0gzskSaHirOi4XCPLArlzW1oUevaPwV/izLmE1xr/l9A +4iLItLRkT9a6fUg+qGkM17uGcclzuD87nSVL2v9A6wIDAQABo4IBlTCCAZEwDwYD +VR0TAQH/BAUwAwEB/zCB4QYDVR0gBIHZMIHWMIHTBgkrBgEEAb5YAAMwgcUwgZMG +CCsGAQUFBwICMIGGGoGDQW55IHVzZSBvZiB0aGlzIENlcnRpZmljYXRlIGNvbnN0 +aXR1dGVzIGFjY2VwdGFuY2Ugb2YgdGhlIFF1b1ZhZGlzIFJvb3QgQ0EgMyBDZXJ0 +aWZpY2F0ZSBQb2xpY3kgLyBDZXJ0aWZpY2F0aW9uIFByYWN0aWNlIFN0YXRlbWVu +dC4wLQYIKwYBBQUHAgEWIWh0dHA6Ly93d3cucXVvdmFkaXNnbG9iYWwuY29tL2Nw +czALBgNVHQ8EBAMCAQYwHQYDVR0OBBYEFPLAE+CCQz777i9nMpY1XNu4ywLQMG4G +A1UdIwRnMGWAFPLAE+CCQz777i9nMpY1XNu4ywLQoUmkRzBFMQswCQYDVQQGEwJC +TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDEbMBkGA1UEAxMSUXVvVmFkaXMg +Um9vdCBDQSAzggIFxjANBgkqhkiG9w0BAQUFAAOCAgEAT62gLEz6wPJv92ZVqyM0 +7ucp2sNbtrCD2dDQ4iH782CnO11gUyeim/YIIirnv6By5ZwkajGxkHon24QRiSem +d1o417+shvzuXYO8BsbRd2sPbSQvS3pspweWyuOEn62Iix2rFo1bZhfZFvSLgNLd ++LJ2w/w4E6oM3kJpK27zPOuAJ9v1pkQNn1pVWQvVDVJIxa6f8i+AxeoyUDUSly7B +4f/xI4hROJ/yZlZ25w9Rl6VSDE1JUZU2Pb+iSwwQHYaZTKrzchGT5Or2m9qoXadN +t54CrnMAyNojA+j56hl0YgCUyyIgvpSnWbWCar6ZeXqp8kokUvd0/bpO5qgdAm6x +DYBEwa7TIzdfu4V8K5Iu6H6li92Z4b8nby1dqnuH/grdS/yO9SbkbnBCbjPsMZ57 +k8HkyWkaPcBrTiJt7qtYTcbQQcEr6k8Sh17rRdhs9ZgC06DYVYoGmRmioHfRMJ6s +zHXug/WwYjnPbFfiTNKRCw51KBuav/0aQ/HKd/s7j2G4aSgWQgRecCocIdiP4b0j +Wy10QJLZYxkNc91pvGJHvOB0K7Lrfb5BG7XARsWhIstfTsEokt4YutUqKLsRixeT +mJlglFwjz1onl14LBQaTNx47aTbrqZ5hHY8y2o4M1nQ+ewkk2gF3R8Q7zTSMmfXK +4SVhM7JZG+Ju1zdXtg2pEto= +-----END CERTIFICATE----- + +# Issuer: O=SECOM Trust.net OU=Security Communication RootCA1 +# Subject: O=SECOM Trust.net OU=Security Communication RootCA1 +# Label: "Security Communication Root CA" +# Serial: 0 +# MD5 Fingerprint: f1:bc:63:6a:54:e0:b5:27:f5:cd:e7:1a:e3:4d:6e:4a +# SHA1 Fingerprint: 36:b1:2b:49:f9:81:9e:d7:4c:9e:bc:38:0f:c6:56:8f:5d:ac:b2:f7 +# SHA256 Fingerprint: e7:5e:72:ed:9f:56:0e:ec:6e:b4:80:00:73:a4:3f:c3:ad:19:19:5a:39:22:82:01:78:95:97:4a:99:02:6b:6c +-----BEGIN CERTIFICATE----- +MIIDWjCCAkKgAwIBAgIBADANBgkqhkiG9w0BAQUFADBQMQswCQYDVQQGEwJKUDEY +MBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21t +dW5pY2F0aW9uIFJvb3RDQTEwHhcNMDMwOTMwMDQyMDQ5WhcNMjMwOTMwMDQyMDQ5 +WjBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYD +VQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCzs/5/022x7xZ8V6UMbXaKL0u/ZPtM7orw8yl8 +9f/uKuDp6bpbZCKamm8sOiZpUQWZJtzVHGpxxpp9Hp3dfGzGjGdnSj74cbAZJ6kJ +DKaVv0uMDPpVmDvY6CKhS3E4eayXkmmziX7qIWgGmBSWh9JhNrxtJ1aeV+7AwFb9 +Ms+k2Y7CI9eNqPPYJayX5HA49LY6tJ07lyZDo6G8SVlyTCMwhwFY9k6+HGhWZq/N +QV3Is00qVUarH9oe4kA92819uZKAnDfdDJZkndwi92SL32HeFZRSFaB9UslLqCHJ +xrHty8OVYNEP8Ktw+N/LTX7s1vqr2b1/VPKl6Xn62dZ2JChzAgMBAAGjPzA9MB0G +A1UdDgQWBBSgc0mZaNyFW2XjmygvV5+9M7wHSDALBgNVHQ8EBAMCAQYwDwYDVR0T +AQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaECpqLvkT115swW1F7NgE+vG +kl3g0dNq/vu+m22/xwVtWSDEHPC32oRYAmP6SBbvT6UL90qY8j+eG61Ha2POCEfr +Uj94nK9NrvjVT8+amCoQQTlSxN3Zmw7vkwGusi7KaEIkQmywszo+zenaSMQVy+n5 +Bw+SUEmK3TGXX8npN6o7WWWXlDLJs58+OmJYxUmtYg5xpTKqL8aJdkNAExNnPaJU +JRDL8Try2frbSVa7pv6nQTXD4IhhyYjH3zYQIphZ6rBK+1YWc26sTfcioU+tHXot +RSflMMFe8toTyyVCUZVHA4xsIcx0Qu1T/zOLjw9XARYvz6buyXAiFL39vmwLAw== +-----END CERTIFICATE----- + +# Issuer: CN=Sonera Class2 CA O=Sonera +# Subject: CN=Sonera Class2 CA O=Sonera +# Label: "Sonera Class 2 Root CA" +# Serial: 29 +# MD5 Fingerprint: a3:ec:75:0f:2e:88:df:fa:48:01:4e:0b:5c:48:6f:fb +# SHA1 Fingerprint: 37:f7:6d:e6:07:7c:90:c5:b1:3e:93:1a:b7:41:10:b4:f2:e4:9a:27 +# SHA256 Fingerprint: 79:08:b4:03:14:c1:38:10:0b:51:8d:07:35:80:7f:fb:fc:f8:51:8a:00:95:33:71:05:ba:38:6b:15:3d:d9:27 +-----BEGIN CERTIFICATE----- +MIIDIDCCAgigAwIBAgIBHTANBgkqhkiG9w0BAQUFADA5MQswCQYDVQQGEwJGSTEP +MA0GA1UEChMGU29uZXJhMRkwFwYDVQQDExBTb25lcmEgQ2xhc3MyIENBMB4XDTAx +MDQwNjA3Mjk0MFoXDTIxMDQwNjA3Mjk0MFowOTELMAkGA1UEBhMCRkkxDzANBgNV +BAoTBlNvbmVyYTEZMBcGA1UEAxMQU29uZXJhIENsYXNzMiBDQTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAJAXSjWdyvANlsdE+hY3/Ei9vX+ALTU74W+o +Z6m/AxxNjG8yR9VBaKQTBME1DJqEQ/xcHf+Js+gXGM2RX/uJ4+q/Tl18GybTdXnt +5oTjV+WtKcT0OijnpXuENmmz/V52vaMtmdOQTiMofRhj8VQ7Jp12W5dCsv+u8E7s +3TmVToMGf+dJQMjFAbJUWmYdPfz56TwKnoG4cPABi+QjVHzIrviQHgCWctRUz2Ej +vOr7nQKV0ba5cTppCD8PtOFCx4j1P5iop7oc4HFx71hXgVB6XGt0Rg6DA5jDjqhu +8nYybieDwnPz3BjotJPqdURrBGAgcVeHnfO+oJAjPYok4doh28MCAwEAAaMzMDEw +DwYDVR0TAQH/BAUwAwEB/zARBgNVHQ4ECgQISqCqWITTXjwwCwYDVR0PBAQDAgEG +MA0GCSqGSIb3DQEBBQUAA4IBAQBazof5FnIVV0sd2ZvnoiYw7JNn39Yt0jSv9zil +zqsWuasvfDXLrNAPtEwr/IDva4yRXzZ299uzGxnq9LIR/WFxRL8oszodv7ND6J+/ +3DEIcbCdjdY0RzKQxmUk96BKfARzjzlvF4xytb1LyHr4e4PDKE6cCepnP7JnBBvD +FNr450kkkdAdavphOe9r5yF1BgfYErQhIHBCcYHaPJo2vqZbDWpsmh+Re/n570K6 +Tk6ezAyNlNzZRZxe7EJQY670XcSxEtzKO6gunRRaBXW37Ndj4ro1tgQIkejanZz2 +ZrUYrAqmVCY0M9IbwdR/GjqOC6oybtv8TyWf2TLHllpwrN9M +-----END CERTIFICATE----- + +# Issuer: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com +# Subject: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com +# Label: "XRamp Global CA Root" +# Serial: 107108908803651509692980124233745014957 +# MD5 Fingerprint: a1:0b:44:b3:ca:10:d8:00:6e:9d:0f:d8:0f:92:0a:d1 +# SHA1 Fingerprint: b8:01:86:d1:eb:9c:86:a5:41:04:cf:30:54:f3:4c:52:b7:e5:58:c6 +# SHA256 Fingerprint: ce:cd:dc:90:50:99:d8:da:df:c5:b1:d2:09:b7:37:cb:e2:c1:8c:fb:2c:10:c0:ff:0b:cf:0d:32:86:fc:1a:a2 +-----BEGIN CERTIFICATE----- +MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCB +gjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEk +MCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRY +UmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQxMTAxMTcx +NDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3 +dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2Vy +dmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS6 +38eMpSe2OAtp87ZOqCwuIR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCP +KZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMxfoArtYzAQDsRhtDLooY2YKTVMIJt2W7Q +DxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FEzG+gSqmUsE3a56k0enI4 +qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqsAxcZZPRa +JSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNVi +PvryxS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0P +BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASs +jVy16bYbMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0 +eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQEwDQYJKoZIhvcNAQEFBQAD +ggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc/Kh4ZzXxHfAR +vbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt +qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLa +IR9NmXmd4c8nnxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSy +i6mx5O+aGtA9aZnuqCij4Tyz8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQ +O+7ETPTsJ3xCwnR8gooJybQDJbw= +-----END CERTIFICATE----- + +# Issuer: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority +# Subject: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority +# Label: "Go Daddy Class 2 CA" +# Serial: 0 +# MD5 Fingerprint: 91:de:06:25:ab:da:fd:32:17:0c:bb:25:17:2a:84:67 +# SHA1 Fingerprint: 27:96:ba:e6:3f:18:01:e2:77:26:1b:a0:d7:77:70:02:8f:20:ee:e4 +# SHA256 Fingerprint: c3:84:6b:f2:4b:9e:93:ca:64:27:4c:0e:c6:7c:1e:cc:5e:02:4f:fc:ac:d2:d7:40:19:35:0e:81:fe:54:6a:e4 +-----BEGIN CERTIFICATE----- +MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEh +MB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBE +YWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3 +MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkGA1UEBhMCVVMxITAfBgNVBAoTGFRo +ZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28gRGFkZHkgQ2xhc3Mg +MiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQADggEN +ADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCA +PVYYYwhv2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6w +wdhFJ2+qN1j3hybX2C32qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXi +EqITLdiOr18SPaAIBQi2XKVlOARFmR6jYGB0xUGlcmIbYsUfb18aQr4CUWWoriMY +avx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmYvLEHZ6IVDd2gWMZEewo+ +YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0OBBYEFNLE +sNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h +/t2oatTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5 +IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQAD +ggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wimPQoZ+YeAEW5p5JYXMP80kWNy +OO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKtI3lpjbi2Tc7P +TMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ +HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mER +dEr/VxqHD3VILs9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5Cuf +ReYNnyicsbkqWletNw+vHX/bvZ8= +-----END CERTIFICATE----- + +# Issuer: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority +# Subject: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority +# Label: "Starfield Class 2 CA" +# Serial: 0 +# MD5 Fingerprint: 32:4a:4b:bb:c8:63:69:9b:be:74:9a:c6:dd:1d:46:24 +# SHA1 Fingerprint: ad:7e:1c:28:b0:64:ef:8f:60:03:40:20:14:c3:d0:e3:37:0e:b5:8a +# SHA256 Fingerprint: 14:65:fa:20:53:97:b8:76:fa:a6:f0:a9:95:8e:55:90:e4:0f:cc:7f:aa:4f:b7:c2:c8:67:75:21:fb:5f:b6:58 +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzEl +MCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMp +U3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQw +NjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBoMQswCQYDVQQGEwJVUzElMCMGA1UE +ChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZp +ZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqGSIb3 +DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf +8MOh2tTYbitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN ++lq2cwQlZut3f+dZxkqZJRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0 +X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVmepsZGD3/cVE8MC5fvj13c7JdBmzDI1aa +K4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSNF4Azbl5KXZnJHoe0nRrA +1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HFMIHCMB0G +A1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fR +zt0fhvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0 +YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBD +bGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8w +DQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGsafPzWdqbAYcaT1epoXkJKtv3 +L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLMPUxA2IGvd56D +eruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl +xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynp +VSJYACPq4xJDKVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEY +WQPJIrSPnNVeKtelttQKbfi3QBFGmh95DmK/D5fs4C8fF5Q= +-----END CERTIFICATE----- + +# Issuer: O=Government Root Certification Authority +# Subject: O=Government Root Certification Authority +# Label: "Taiwan GRCA" +# Serial: 42023070807708724159991140556527066870 +# MD5 Fingerprint: 37:85:44:53:32:45:1f:20:f0:f3:95:e1:25:c4:43:4e +# SHA1 Fingerprint: f4:8b:11:bf:de:ab:be:94:54:20:71:e6:41:de:6b:be:88:2b:40:b9 +# SHA256 Fingerprint: 76:00:29:5e:ef:e8:5b:9e:1f:d6:24:db:76:06:2a:aa:ae:59:81:8a:54:d2:77:4c:d4:c0:b2:c0:11:31:e1:b3 +-----BEGIN CERTIFICATE----- +MIIFcjCCA1qgAwIBAgIQH51ZWtcvwgZEpYAIaeNe9jANBgkqhkiG9w0BAQUFADA/ +MQswCQYDVQQGEwJUVzEwMC4GA1UECgwnR292ZXJubWVudCBSb290IENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5MB4XDTAyMTIwNTEzMjMzM1oXDTMyMTIwNTEzMjMzM1ow +PzELMAkGA1UEBhMCVFcxMDAuBgNVBAoMJ0dvdmVybm1lbnQgUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB +AJoluOzMonWoe/fOW1mKydGGEghU7Jzy50b2iPN86aXfTEc2pBsBHH8eV4qNw8XR +IePaJD9IK/ufLqGU5ywck9G/GwGHU5nOp/UKIXZ3/6m3xnOUT0b3EEk3+qhZSV1q +gQdW8or5BtD3cCJNtLdBuTK4sfCxw5w/cP1T3YGq2GN49thTbqGsaoQkclSGxtKy +yhwOeYHWtXBiCAEuTk8O1RGvqa/lmr/czIdtJuTJV6L7lvnM4T9TjGxMfptTCAts +F/tnyMKtsc2AtJfcdgEWFelq16TheEfOhtX7MfP6Mb40qij7cEwdScevLJ1tZqa2 +jWR+tSBqnTuBto9AAGdLiYa4zGX+FVPpBMHWXx1E1wovJ5pGfaENda1UhhXcSTvx +ls4Pm6Dso3pdvtUqdULle96ltqqvKKyskKw4t9VoNSZ63Pc78/1Fm9G7Q3hub/FC +VGqY8A2tl+lSXunVanLeavcbYBT0peS2cWeqH+riTcFCQP5nRhc4L0c/cZyu5SHK +YS1tB6iEfC3uUSXxY5Ce/eFXiGvviiNtsea9P63RPZYLhY3Naye7twWb7LuRqQoH +EgKXTiCQ8P8NHuJBO9NAOueNXdpm5AKwB1KYXA6OM5zCppX7VRluTI6uSw+9wThN +Xo+EHWbNxWCWtFJaBYmOlXqYwZE8lSOyDvR5tMl8wUohAgMBAAGjajBoMB0GA1Ud +DgQWBBTMzO/MKWCkO7GStjz6MmKPrCUVOzAMBgNVHRMEBTADAQH/MDkGBGcqBwAE +MTAvMC0CAQAwCQYFKw4DAhoFADAHBgVnKgMAAAQUA5vwIhP/lSg209yewDL7MTqK +UWUwDQYJKoZIhvcNAQEFBQADggIBAECASvomyc5eMN1PhnR2WPWus4MzeKR6dBcZ +TulStbngCnRiqmjKeKBMmo4sIy7VahIkv9Ro04rQ2JyftB8M3jh+Vzj8jeJPXgyf +qzvS/3WXy6TjZwj/5cAWtUgBfen5Cv8b5Wppv3ghqMKnI6mGq3ZW6A4M9hPdKmaK +ZEk9GhiHkASfQlK3T8v+R0F2Ne//AHY2RTKbxkaFXeIksB7jSJaYV0eUVXoPQbFE +JPPB/hprv4j9wabak2BegUqZIJxIZhm1AHlUD7gsL0u8qV1bYH+Mh6XgUmMqvtg7 +hUAV/h62ZT/FS9p+tXo1KaMuephgIqP0fSdOLeq0dDzpD6QzDxARvBMB1uUO07+1 +EqLhRSPAzAhuYbeJq4PjJB7mXQfnHyA+z2fI56wwbSdLaG5LKlwCCDTb+HbkZ6Mm +nD+iMsJKxYEYMRBWqoTvLQr/uB930r+lWKBi5NdLkXWNiYCYfm3LU05er/ayl4WX +udpVBrkk7tfGOB5jGxI7leFYrPLfhNVfmS8NVVvmONsuP3LpSIXLuykTjx44Vbnz +ssQwmSNOXfJIoRIM3BKQCZBUkQM8R+XVyWXgt0t97EfTsws+rZ7QdAAO671RrcDe +LMDDav7v3Aun+kbfYNucpllQdSNpc5Oy+fwC00fmcc4QAu4njIT/rEUNE1yDMuAl +pYYsfPQS +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Assured ID Root CA" +# Serial: 17154717934120587862167794914071425081 +# MD5 Fingerprint: 87:ce:0b:7b:2a:0e:49:00:e1:58:71:9b:37:a8:93:72 +# SHA1 Fingerprint: 05:63:b8:63:0d:62:d7:5a:bb:c8:ab:1e:4b:df:b5:a8:99:b2:4d:43 +# SHA256 Fingerprint: 3e:90:99:b5:01:5e:8f:48:6c:00:bc:ea:9d:11:1e:e7:21:fa:ba:35:5a:89:bc:f1:df:69:56:1e:3d:c6:32:5c +-----BEGIN CERTIFICATE----- +MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBl +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv +b3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzExMTEwMDAwMDAwWjBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl +cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7c +JpSIqvTO9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYP +mDI2dsze3Tyoou9q+yHyUmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+ +wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4 +VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpyoeb6pNnVFzF1roV9Iq4/ +AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whfGHdPAgMB +AAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBRF66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYun +pyGd823IDzANBgkqhkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRC +dWKuh+vy1dneVrOfzM4UKLkNl2BcEkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTf +fwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38FnSbNd67IJKusm7Xi+fT8r87cm +NW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i8b5QZ7dsvfPx +H2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe ++o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g== +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Global Root CA" +# Serial: 10944719598952040374951832963794454346 +# MD5 Fingerprint: 79:e4:a9:84:0d:7d:3a:96:d7:c0:4f:e2:43:4c:89:2e +# SHA1 Fingerprint: a8:98:5d:3a:65:e5:e5:c4:b2:d7:d6:6d:40:c6:dd:2f:b1:9c:54:36 +# SHA256 Fingerprint: 43:48:a0:e9:44:4c:78:cb:26:5e:05:8d:5e:89:44:b4:d8:4f:96:62:bd:26:db:25:7f:89:34:a4:43:c7:01:61 +-----BEGIN CERTIFICATE----- +MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBh +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBD +QTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAwMDAwMDBaMGExCzAJBgNVBAYTAlVT +MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j +b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsB +CSDMAZOnTjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97 +nh6Vfe63SKMI2tavegw5BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt +43C/dxC//AH2hdmoRBBYMql1GNXRor5H4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7P +T19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y7vrTC0LUq7dBMtoM1O/4 +gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQABo2MwYTAO +BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbR +TLtm8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUw +DQYJKoZIhvcNAQEFBQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/Esr +hMAtudXH/vTBH1jLuG2cenTnmCmrEbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg +06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIttep3Sp+dWOIrWcBAI+0tKIJF +PnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886UAb3LujEV0ls +YSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk +CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4= +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert High Assurance EV Root CA" +# Serial: 3553400076410547919724730734378100087 +# MD5 Fingerprint: d4:74:de:57:5c:39:b2:d3:9c:85:83:c5:c0:65:49:8a +# SHA1 Fingerprint: 5f:b7:ee:06:33:e2:59:db:ad:0c:4c:9a:e6:d3:8f:1a:61:c7:dc:25 +# SHA256 Fingerprint: 74:31:e5:f4:c3:c1:ce:46:90:77:4f:0b:61:e0:54:40:88:3b:a9:a0:1e:d0:0b:a6:ab:d7:80:6e:d3:b1:18:cf +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBs +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j +ZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAwMFoXDTMxMTExMDAwMDAwMFowbDEL +MAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3 +LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug +RVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm ++9S75S0tMqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTW +PNt0OKRKzE0lgvdKpVMSOO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEM +xChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFB +Ik5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQNAQTXKFx01p8VdteZOE3 +hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUeh10aUAsg +EsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaA +FLE+w2kD+L9HAdSYJhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3Nec +nzyIZgYIVyHbIUf4KmeqvxgydkAQV8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6z +eM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFpmyPInngiK3BD41VHMWEZ71jF +hS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkKmNEVX58Svnw2 +Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe +vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep ++OkuE6N36B9K +-----END CERTIFICATE----- + +# Issuer: CN=Class 2 Primary CA O=Certplus +# Subject: CN=Class 2 Primary CA O=Certplus +# Label: "Certplus Class 2 Primary CA" +# Serial: 177770208045934040241468760488327595043 +# MD5 Fingerprint: 88:2c:8c:52:b8:a2:3c:f3:f7:bb:03:ea:ae:ac:42:0b +# SHA1 Fingerprint: 74:20:74:41:72:9c:dd:92:ec:79:31:d8:23:10:8d:c2:81:92:e2:bb +# SHA256 Fingerprint: 0f:99:3c:8a:ef:97:ba:af:56:87:14:0e:d5:9a:d1:82:1b:b4:af:ac:f0:aa:9a:58:b5:d5:7a:33:8a:3a:fb:cb +-----BEGIN CERTIFICATE----- +MIIDkjCCAnqgAwIBAgIRAIW9S/PY2uNp9pTXX8OlRCMwDQYJKoZIhvcNAQEFBQAw +PTELMAkGA1UEBhMCRlIxETAPBgNVBAoTCENlcnRwbHVzMRswGQYDVQQDExJDbGFz +cyAyIFByaW1hcnkgQ0EwHhcNOTkwNzA3MTcwNTAwWhcNMTkwNzA2MjM1OTU5WjA9 +MQswCQYDVQQGEwJGUjERMA8GA1UEChMIQ2VydHBsdXMxGzAZBgNVBAMTEkNsYXNz +IDIgUHJpbWFyeSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANxQ +ltAS+DXSCHh6tlJw/W/uz7kRy1134ezpfgSN1sxvc0NXYKwzCkTsA18cgCSR5aiR +VhKC9+Ar9NuuYS6JEI1rbLqzAr3VNsVINyPi8Fo3UjMXEuLRYE2+L0ER4/YXJQyL +kcAbmXuZVg2v7tK8R1fjeUl7NIknJITesezpWE7+Tt9avkGtrAjFGA7v0lPubNCd +EgETjdyAYveVqUSISnFOYFWe2yMZeVYHDD9jC1yw4r5+FfyUM1hBOHTE4Y+L3yas +H7WLO7dDWWuwJKZtkIvEcupdM5i3y95ee++U8Rs+yskhwcWYAqqi9lt3m/V+llU0 +HGdpwPFC40es/CgcZlUCAwEAAaOBjDCBiTAPBgNVHRMECDAGAQH/AgEKMAsGA1Ud +DwQEAwIBBjAdBgNVHQ4EFgQU43Mt38sOKAze3bOkynm4jrvoMIkwEQYJYIZIAYb4 +QgEBBAQDAgEGMDcGA1UdHwQwMC4wLKAqoCiGJmh0dHA6Ly93d3cuY2VydHBsdXMu +Y29tL0NSTC9jbGFzczIuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQCnVM+IRBnL39R/ +AN9WM2K191EBkOvDP9GIROkkXe/nFL0gt5o8AP5tn9uQ3Nf0YtaLcF3n5QRIqWh8 +yfFC82x/xXp8HVGIutIKPidd3i1RTtMTZGnkLuPT55sJmabglZvOGtd/vjzOUrMR +FcEPF80Du5wlFbqidon8BvEY0JNLDnyCt6X09l/+7UCmnYR0ObncHoUW2ikbhiMA +ybuJfm6AiB4vFLQDJKgybwOaRywwvlbGp0ICcBvqQNi6BQNwB6SW//1IMwrh3KWB +kJtN3X3n57LNXMhqlfil9o3EXXgIvnsG1knPGTZQIy4I5p4FTUcY1Rbpsda2ENW7 +l7+ijrRU +-----END CERTIFICATE----- + +# Issuer: CN=DST Root CA X3 O=Digital Signature Trust Co. +# Subject: CN=DST Root CA X3 O=Digital Signature Trust Co. +# Label: "DST Root CA X3" +# Serial: 91299735575339953335919266965803778155 +# MD5 Fingerprint: 41:03:52:dc:0f:f7:50:1b:16:f0:02:8e:ba:6f:45:c5 +# SHA1 Fingerprint: da:c9:02:4f:54:d8:f6:df:94:93:5f:b1:73:26:38:ca:6a:d7:7c:13 +# SHA256 Fingerprint: 06:87:26:03:31:a7:24:03:d9:09:f1:05:e6:9b:cf:0d:32:e1:bd:24:93:ff:c6:d9:20:6d:11:bc:d6:77:07:39 +-----BEGIN CERTIFICATE----- +MIIDSjCCAjKgAwIBAgIQRK+wgNajJ7qJMDmGLvhAazANBgkqhkiG9w0BAQUFADA/ +MSQwIgYDVQQKExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMT +DkRTVCBSb290IENBIFgzMB4XDTAwMDkzMDIxMTIxOVoXDTIxMDkzMDE0MDExNVow +PzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QgQ28uMRcwFQYDVQQD +Ew5EU1QgUm9vdCBDQSBYMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +AN+v6ZdQCINXtMxiZfaQguzH0yxrMMpb7NnDfcdAwRgUi+DoM3ZJKuM/IUmTrE4O +rz5Iy2Xu/NMhD2XSKtkyj4zl93ewEnu1lcCJo6m67XMuegwGMoOifooUMM0RoOEq +OLl5CjH9UL2AZd+3UWODyOKIYepLYYHsUmu5ouJLGiifSKOeDNoJjj4XLh7dIN9b +xiqKqy69cK3FCxolkHRyxXtqqzTWMIn/5WgTe1QLyNau7Fqckh49ZLOMxt+/yUFw +7BZy1SbsOFU5Q9D8/RhcQPGX69Wam40dutolucbY38EVAjqr2m7xPi71XAicPNaD +aeQQmxkqtilX4+U9m5/wAl0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNV +HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMSnsaR7LHH62+FLkHX/xBVghYkQMA0GCSqG +SIb3DQEBBQUAA4IBAQCjGiybFwBcqR7uKGY3Or+Dxz9LwwmglSBd49lZRNI+DT69 +ikugdB/OEIKcdBodfpga3csTS7MgROSR6cz8faXbauX+5v3gTt23ADq1cEmv8uXr +AvHRAosZy5Q6XkjEGB5YGV8eAlrwDPGxrancWYaLbumR9YbK+rlmM6pZW87ipxZz +R8srzJmwN0jP41ZL9c8PDHIyh8bwRLtTcm1D9SZImlJnt1ir/md2cXjbDaJWFBM5 +JDGFoqgCWjBH4d1QB7wCCZAA62RjYJsWvIjJEubSfZGL+T0yjWW06XyxV3bqxbYo +Ob8VZRzI9neWagqNdwvYkQsEjgfbKbYK7p2CNTUQ +-----END CERTIFICATE----- + +# Issuer: CN=SwissSign Gold CA - G2 O=SwissSign AG +# Subject: CN=SwissSign Gold CA - G2 O=SwissSign AG +# Label: "SwissSign Gold CA - G2" +# Serial: 13492815561806991280 +# MD5 Fingerprint: 24:77:d9:a8:91:d1:3b:fa:88:2d:c2:ff:f8:cd:33:93 +# SHA1 Fingerprint: d8:c5:38:8a:b7:30:1b:1b:6e:d4:7a:e6:45:25:3a:6f:9f:1a:27:61 +# SHA256 Fingerprint: 62:dd:0b:e9:b9:f5:0a:16:3e:a0:f8:e7:5c:05:3b:1e:ca:57:ea:55:c8:68:8f:64:7c:68:81:f2:c8:35:7b:95 +-----BEGIN CERTIFICATE----- +MIIFujCCA6KgAwIBAgIJALtAHEP1Xk+wMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV +BAYTAkNIMRUwEwYDVQQKEwxTd2lzc1NpZ24gQUcxHzAdBgNVBAMTFlN3aXNzU2ln +biBHb2xkIENBIC0gRzIwHhcNMDYxMDI1MDgzMDM1WhcNMzYxMDI1MDgzMDM1WjBF +MQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dpc3NTaWduIEFHMR8wHQYDVQQDExZT +d2lzc1NpZ24gR29sZCBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC +CgKCAgEAr+TufoskDhJuqVAtFkQ7kpJcyrhdhJJCEyq8ZVeCQD5XJM1QiyUqt2/8 +76LQwB8CJEoTlo8jE+YoWACjR8cGp4QjK7u9lit/VcyLwVcfDmJlD909Vopz2q5+ +bbqBHH5CjCA12UNNhPqE21Is8w4ndwtrvxEvcnifLtg+5hg3Wipy+dpikJKVyh+c +6bM8K8vzARO/Ws/BtQpgvd21mWRTuKCWs2/iJneRjOBiEAKfNA+k1ZIzUd6+jbqE +emA8atufK+ze3gE/bk3lUIbLtK/tREDFylqM2tIrfKjuvqblCqoOpd8FUrdVxyJd +MmqXl2MT28nbeTZ7hTpKxVKJ+STnnXepgv9VHKVxaSvRAiTysybUa9oEVeXBCsdt +MDeQKuSeFDNeFhdVxVu1yzSJkvGdJo+hB9TGsnhQ2wwMC3wLjEHXuendjIj3o02y +MszYF9rNt85mndT9Xv+9lz4pded+p2JYryU0pUHHPbwNUMoDAw8IWh+Vc3hiv69y +FGkOpeUDDniOJihC8AcLYiAQZzlG+qkDzAQ4embvIIO1jEpWjpEA/I5cgt6IoMPi +aG59je883WX0XaxR7ySArqpWl2/5rX3aYT+YdzylkbYcjCbaZaIJbcHiVOO5ykxM +gI93e2CaHt+28kgeDrpOVG2Y4OGiGqJ3UM/EY5LsRxmd6+ZrzsECAwEAAaOBrDCB +qTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUWyV7 +lqRlUX64OfPAeGZe6Drn8O4wHwYDVR0jBBgwFoAUWyV7lqRlUX64OfPAeGZe6Drn +8O4wRgYDVR0gBD8wPTA7BglghXQBWQECAQEwLjAsBggrBgEFBQcCARYgaHR0cDov +L3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBACe6 +45R88a7A3hfm5djV9VSwg/S7zV4Fe0+fdWavPOhWfvxyeDgD2StiGwC5+OlgzczO +UYrHUDFu4Up+GC9pWbY9ZIEr44OE5iKHjn3g7gKZYbge9LgriBIWhMIxkziWMaa5 +O1M/wySTVltpkuzFwbs4AOPsF6m43Md8AYOfMke6UiI0HTJ6CVanfCU2qT1L2sCC +bwq7EsiHSycR+R4tx5M/nttfJmtS2S6K8RTGRI0Vqbe/vd6mGu6uLftIdxf+u+yv +GPUqUfA5hJeVbG4bwyvEdGB5JbAKJ9/fXtI5z0V9QkvfsywexcZdylU6oJxpmo/a +77KwPJ+HbBIrZXAVUjEaJM9vMSNQH4xPjyPDdEFjHFWoFN0+4FFQz/EbMFYOkrCC +hdiDyyJkvC24JdVUorgG6q2SpCSgwYa1ShNqR88uC1aVVMvOmttqtKay20EIhid3 +92qgQmwLOM7XdVAyksLfKzAiSNDVQTglXaTpXZ/GlHXQRf0wl0OPkKsKx4ZzYEpp +Ld6leNcG2mqeSz53OiATIgHQv2ieY2BrNU0LbbqhPcCT4H8js1WtciVORvnSFu+w +ZMEBnunKoGqYDs/YYPIvSbjkQuE4NRb0yG5P94FW6LqjviOvrv1vA+ACOzB2+htt +Qc8Bsem4yWb02ybzOqR08kkkW8mw0FfB+j564ZfJ +-----END CERTIFICATE----- + +# Issuer: CN=SwissSign Silver CA - G2 O=SwissSign AG +# Subject: CN=SwissSign Silver CA - G2 O=SwissSign AG +# Label: "SwissSign Silver CA - G2" +# Serial: 5700383053117599563 +# MD5 Fingerprint: e0:06:a1:c9:7d:cf:c9:fc:0d:c0:56:75:96:d8:62:13 +# SHA1 Fingerprint: 9b:aa:e5:9f:56:ee:21:cb:43:5a:be:25:93:df:a7:f0:40:d1:1d:cb +# SHA256 Fingerprint: be:6c:4d:a2:bb:b9:ba:59:b6:f3:93:97:68:37:42:46:c3:c0:05:99:3f:a9:8f:02:0d:1d:ed:be:d4:8a:81:d5 +-----BEGIN CERTIFICATE----- +MIIFvTCCA6WgAwIBAgIITxvUL1S7L0swDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UE +BhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWdu +IFNpbHZlciBDQSAtIEcyMB4XDTA2MTAyNTA4MzI0NloXDTM2MTAyNTA4MzI0Nlow +RzELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMY +U3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A +MIICCgKCAgEAxPGHf9N4Mfc4yfjDmUO8x/e8N+dOcbpLj6VzHVxumK4DV644N0Mv +Fz0fyM5oEMF4rhkDKxD6LHmD9ui5aLlV8gREpzn5/ASLHvGiTSf5YXu6t+WiE7br +YT7QbNHm+/pe7R20nqA1W6GSy/BJkv6FCgU+5tkL4k+73JU3/JHpMjUi0R86TieF +nbAVlDLaYQ1HTWBCrpJH6INaUFjpiou5XaHc3ZlKHzZnu0jkg7Y360g6rw9njxcH +6ATK72oxh9TAtvmUcXtnZLi2kUpCe2UuMGoM9ZDulebyzYLs2aFK7PayS+VFheZt +eJMELpyCbTapxDFkH4aDCyr0NQp4yVXPQbBH6TCfmb5hqAaEuSh6XzjZG6k4sIN/ +c8HDO0gqgg8hm7jMqDXDhBuDsz6+pJVpATqJAHgE2cn0mRmrVn5bi4Y5FZGkECwJ +MoBgs5PAKrYYC51+jUnyEEp/+dVGLxmSo5mnJqy7jDzmDrxHB9xzUfFwZC8I+bRH +HTBsROopN4WSaGa8gzj+ezku01DwH/teYLappvonQfGbGHLy9YR0SslnxFSuSGTf +jNFusB3hB48IHpmccelM2KX3RxIfdNFRnobzwqIjQAtz20um53MGjMGg6cFZrEb6 +5i/4z3GcRm25xBWNOHkDRUjvxF3XCO6HOSKGsg0PWEP3calILv3q1h8CAwEAAaOB +rDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU +F6DNweRBtjpbO8tFnb0cwpj6hlgwHwYDVR0jBBgwFoAUF6DNweRBtjpbO8tFnb0c +wpj6hlgwRgYDVR0gBD8wPTA7BglghXQBWQEDAQEwLjAsBggrBgEFBQcCARYgaHR0 +cDovL3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIB +AHPGgeAn0i0P4JUw4ppBf1AsX19iYamGamkYDHRJ1l2E6kFSGG9YrVBWIGrGvShp +WJHckRE1qTodvBqlYJ7YH39FkWnZfrt4csEGDyrOj4VwYaygzQu4OSlWhDJOhrs9 +xCrZ1x9y7v5RoSJBsXECYxqCsGKrXlcSH9/L3XWgwF15kIwb4FDm3jH+mHtwX6WQ +2K34ArZv02DdQEsixT2tOnqfGhpHkXkzuoLcMmkDlm4fS/Bx/uNncqCxv1yL5PqZ +IseEuRuNI5c/7SXgz2W79WEE790eslpBIlqhn10s6FvJbakMDHiqYMZWjwFaDGi8 +aRl5xB9+lwW/xekkUV7U1UtT7dkjWjYDZaPBA61BMPNGG4WQr2W11bHkFlt4dR2X +em1ZqSqPe97Dh4kQmUlzeMg9vVE1dCrV8X5pGyq7O70luJpaPXJhkGaH7gzWTdQR +dAtq/gsD/KNVV4n+SsuuWxcFyPKNIzFTONItaj+CuY0IavdeQXRuwxF+B6wpYJE/ +OMpXEA29MC/HpeZBoNquBYeaoKRlbEwJDIm6uNO5wJOKMPqN5ZprFQFOZ6raYlY+ +hAhm0sQ2fac+EPyI4NSA5QC9qvNOBqN6avlicuMJT+ubDgEj8Z+7fNzcbBGXJbLy +tGMU0gYqZ4yD9c7qB9iaah7s5Aq7KkzrCWA5zspi2C5u +-----END CERTIFICATE----- + +# Issuer: CN=GeoTrust Primary Certification Authority O=GeoTrust Inc. +# Subject: CN=GeoTrust Primary Certification Authority O=GeoTrust Inc. +# Label: "GeoTrust Primary Certification Authority" +# Serial: 32798226551256963324313806436981982369 +# MD5 Fingerprint: 02:26:c3:01:5e:08:30:37:43:a9:d0:7d:cf:37:e6:bf +# SHA1 Fingerprint: 32:3c:11:8e:1b:f7:b8:b6:52:54:e2:e2:10:0d:d6:02:90:37:f0:96 +# SHA256 Fingerprint: 37:d5:10:06:c5:12:ea:ab:62:64:21:f1:ec:8c:92:01:3f:c5:f8:2a:e9:8e:e5:33:eb:46:19:b8:de:b4:d0:6c +-----BEGIN CERTIFICATE----- +MIIDfDCCAmSgAwIBAgIQGKy1av1pthU6Y2yv2vrEoTANBgkqhkiG9w0BAQUFADBY +MQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjExMC8GA1UEAxMo +R2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEx +MjcwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMFgxCzAJBgNVBAYTAlVTMRYwFAYDVQQK +Ew1HZW9UcnVzdCBJbmMuMTEwLwYDVQQDEyhHZW9UcnVzdCBQcmltYXJ5IENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAvrgVe//UfH1nrYNke8hCUy3f9oQIIGHWAVlqnEQRr+92/ZV+zmEwu3qDXwK9 +AWbK7hWNb6EwnL2hhZ6UOvNWiAAxz9juapYC2e0DjPt1befquFUWBRaa9OBesYjA +ZIVcFU2Ix7e64HXprQU9nceJSOC7KMgD4TCTZF5SwFlwIjVXiIrxlQqD17wxcwE0 +7e9GceBrAqg1cmuXm2bgyxx5X9gaBGgeRwLmnWDiNpcB3841kt++Z8dtd1k7j53W +kBWUvEI0EME5+bEnPn7WinXFsq+W06Lem+SYvn3h6YGttm/81w7a4DSwDRp35+MI +mO9Y+pyEtzavwt+s0vQQBnBxNQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4G +A1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQULNVQQZcVi/CPNmFbSvtr2ZnJM5IwDQYJ +KoZIhvcNAQEFBQADggEBAFpwfyzdtzRP9YZRqSa+S7iq8XEN3GHHoOo0Hnp3DwQ1 +6CePbJC/kRYkRj5KTs4rFtULUh38H2eiAkUxT87z+gOneZ1TatnaYzr4gNfTmeGl +4b7UVXGYNTq+k+qurUKykG/g/CFNNWMziUnWm07Kx+dOCQD32sfvmWKZd7aVIl6K +oKv0uHiYyjgZmclynnjNS6yvGaBzEi38wkG6gZHaFloxt/m0cYASSJlyc1pZU8Fj +UjPtp8nSOQJw+uCxQmYpqptR7TBUIhRf2asdweSU8Pj1K/fqynhG1riR/aYNKxoU +AT6A8EKglQdebc3MS6RFjasS6LPeWuWgfOgPIh1a6Vk= +-----END CERTIFICATE----- + +# Issuer: CN=thawte Primary Root CA O=thawte, Inc. OU=Certification Services Division/(c) 2006 thawte, Inc. - For authorized use only +# Subject: CN=thawte Primary Root CA O=thawte, Inc. OU=Certification Services Division/(c) 2006 thawte, Inc. - For authorized use only +# Label: "thawte Primary Root CA" +# Serial: 69529181992039203566298953787712940909 +# MD5 Fingerprint: 8c:ca:dc:0b:22:ce:f5:be:72:ac:41:1a:11:a8:d8:12 +# SHA1 Fingerprint: 91:c6:d6:ee:3e:8a:c8:63:84:e5:48:c2:99:29:5c:75:6c:81:7b:81 +# SHA256 Fingerprint: 8d:72:2f:81:a9:c1:13:c0:79:1d:f1:36:a2:96:6d:b2:6c:95:0a:97:1d:b4:6b:41:99:f4:ea:54:b7:8b:fb:9f +-----BEGIN CERTIFICATE----- +MIIEIDCCAwigAwIBAgIQNE7VVyDV7exJ9C/ON9srbTANBgkqhkiG9w0BAQUFADCB +qTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf +Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw +MDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxHzAdBgNV +BAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwHhcNMDYxMTE3MDAwMDAwWhcNMzYw +NzE2MjM1OTU5WjCBqTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5j +LjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYG +A1UECxMvKGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxHzAdBgNVBAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCsoPD7gFnUnMekz52hWXMJEEUMDSxuaPFs +W0hoSVk3/AszGcJ3f8wQLZU0HObrTQmnHNK4yZc2AreJ1CRfBsDMRJSUjQJib+ta +3RGNKJpchJAQeg29dGYvajig4tVUROsdB58Hum/u6f1OCyn1PoSgAfGcq/gcfomk +6KHYcWUNo1F77rzSImANuVud37r8UVsLr5iy6S7pBOhih94ryNdOwUxkHt3Ph1i6 +Sk/KaAcdHJ1KxtUvkcx8cXIcxcBn6zL9yZJclNqFwJu/U30rCfSMnZEfl2pSy94J +NqR32HuHUETVPm4pafs5SSYeCaWAe0At6+gnhcn+Yf1+5nyXHdWdAgMBAAGjQjBA +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBR7W0XP +r87Lev0xkhpqtvNG61dIUDANBgkqhkiG9w0BAQUFAAOCAQEAeRHAS7ORtvzw6WfU +DW5FvlXok9LOAz/t2iWwHVfLHjp2oEzsUHboZHIMpKnxuIvW1oeEuzLlQRHAd9mz +YJ3rG9XRbkREqaYB7FViHXe4XI5ISXycO1cRrK1zN44veFyQaEfZYGDm/Ac9IiAX +xPcW6cTYcvnIc3zfFi8VqT79aie2oetaupgf1eNNZAqdE8hhuvU5HIe6uL17In/2 +/qxAeeWsEG89jxt5dovEN7MhGITlNgDrYyCZuen+MwS7QcjBAvlEYyCegc5C09Y/ +LHbTY5xZ3Y+m4Q6gLkH3LpVHz7z9M/P2C2F+fpErgUfCJzDupxBdN49cOSvkBPB7 +jVaMaA== +-----END CERTIFICATE----- + +# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G5 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2006 VeriSign, Inc. - For authorized use only +# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G5 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2006 VeriSign, Inc. - For authorized use only +# Label: "VeriSign Class 3 Public Primary Certification Authority - G5" +# Serial: 33037644167568058970164719475676101450 +# MD5 Fingerprint: cb:17:e4:31:67:3e:e2:09:fe:45:57:93:f3:0a:fa:1c +# SHA1 Fingerprint: 4e:b6:d5:78:49:9b:1c:cf:5f:58:1e:ad:56:be:3d:9b:67:44:a5:e5 +# SHA256 Fingerprint: 9a:cf:ab:7e:43:c8:d8:80:d0:6b:26:2a:94:de:ee:e4:b4:65:99:89:c3:d0:ca:f1:9b:af:64:05:e4:1a:b7:df +-----BEGIN CERTIFICATE----- +MIIE0zCCA7ugAwIBAgIQGNrRniZ96LtKIVjNzGs7SjANBgkqhkiG9w0BAQUFADCB +yjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL +ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJp +U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxW +ZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5IC0gRzUwHhcNMDYxMTA4MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCByjEL +MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW +ZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp +U2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y +aXR5IC0gRzUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvJAgIKXo1 +nmAMqudLO07cfLw8RRy7K+D+KQL5VwijZIUVJ/XxrcgxiV0i6CqqpkKzj/i5Vbex +t0uz/o9+B1fs70PbZmIVYc9gDaTY3vjgw2IIPVQT60nKWVSFJuUrjxuf6/WhkcIz +SdhDY2pSS9KP6HBRTdGJaXvHcPaz3BJ023tdS1bTlr8Vd6Gw9KIl8q8ckmcY5fQG +BO+QueQA5N06tRn/Arr0PO7gi+s3i+z016zy9vA9r911kTMZHRxAy3QkGSGT2RT+ +rCpSx4/VBEnkjWNHiDxpg8v+R70rfk/Fla4OndTRQ8Bnc+MUCH7lP59zuDMKz10/ +NIeWiu5T6CUVAgMBAAGjgbIwga8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8E +BAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2UvZ2lmMCEwHzAH +BgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVy +aXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFH/TZafC3ey78DAJ80M5+gKv +MzEzMA0GCSqGSIb3DQEBBQUAA4IBAQCTJEowX2LP2BqYLz3q3JktvXf2pXkiOOzE +p6B4Eq1iDkVwZMXnl2YtmAl+X6/WzChl8gGqCBpH3vn5fJJaCGkgDdk+bW48DW7Y +5gaRQBi5+MHt39tBquCWIMnNZBU4gcmU7qKEKQsTb47bDN0lAtukixlE0kF6BWlK +WE9gyn6CagsCqiUXObXbf+eEZSqVir2G3l6BFoMtEMze/aiCKm0oHw0LxOXnGiYZ +4fQRbxC1lfznQgUy286dUV4otp6F01vvpX1FQHKOtw5rDgb7MzVIcbidJ4vEZV8N +hnacRHr2lVz2XTIIM6RUthg/aFzyQkqFOFSDX9HoLPKsEdao7WNq +-----END CERTIFICATE----- + +# Issuer: CN=SecureTrust CA O=SecureTrust Corporation +# Subject: CN=SecureTrust CA O=SecureTrust Corporation +# Label: "SecureTrust CA" +# Serial: 17199774589125277788362757014266862032 +# MD5 Fingerprint: dc:32:c3:a7:6d:25:57:c7:68:09:9d:ea:2d:a9:a2:d1 +# SHA1 Fingerprint: 87:82:c6:c3:04:35:3b:cf:d2:96:92:d2:59:3e:7d:44:d9:34:ff:11 +# SHA256 Fingerprint: f1:c1:b5:0a:e5:a2:0d:d8:03:0e:c9:f6:bc:24:82:3d:d3:67:b5:25:57:59:b4:e7:1b:61:fc:e9:f7:37:5d:73 +-----BEGIN CERTIFICATE----- +MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBI +MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x +FzAVBgNVBAMTDlNlY3VyZVRydXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIz +MTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAeBgNVBAoTF1NlY3VyZVRydXN0IENv +cnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQXOZEz +Zum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO +0gMdA+9tDWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIao +wW8xQmxSPmjL8xk037uHGFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj +7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b01k/unK8RCSc43Oz969XL0Imnal0ugBS +8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmHursCAwEAAaOBnTCBmjAT +BgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB +/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCeg +JYYjaHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGC +NxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt3 +6Z3q059c4EVlew3KW+JwULKUBRSuSceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/ +3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHfmbx8IVQr5Fiiu1cprp6poxkm +D5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZnMUFdAvnZyPS +CPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR +3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE= +-----END CERTIFICATE----- + +# Issuer: CN=Secure Global CA O=SecureTrust Corporation +# Subject: CN=Secure Global CA O=SecureTrust Corporation +# Label: "Secure Global CA" +# Serial: 9751836167731051554232119481456978597 +# MD5 Fingerprint: cf:f4:27:0d:d4:ed:dc:65:16:49:6d:3d:da:bf:6e:de +# SHA1 Fingerprint: 3a:44:73:5a:e5:81:90:1f:24:86:61:46:1e:3b:9c:c4:5f:f5:3a:1b +# SHA256 Fingerprint: 42:00:f5:04:3a:c8:59:0e:bb:52:7d:20:9e:d1:50:30:29:fb:cb:d4:1c:a1:b5:06:ec:27:f1:5a:de:7d:ac:69 +-----BEGIN CERTIFICATE----- +MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBK +MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x +GTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkx +MjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3Qg +Q29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jxYDiJ +iQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa +/FHtaMbQbqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJ +jnIFHovdRIWCQtBJwB1g8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnI +HmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYVHDGA76oYa8J719rO+TMg1fW9ajMtgQT7 +sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi0XPnj3pDAgMBAAGjgZ0w +gZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCsw +KaAnoCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsG +AQQBgjcVAQQDAgEAMA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0L +URYD7xh8yOOvaliTFGCRsoTciE6+OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXO +H0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cnCDpOGR86p1hcF895P4vkp9Mm +I50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/53CYNv6ZHdAbY +iNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc +f8LDmBxrThaA63p4ZUWiABqvDA1VZDRIuJK58bRQKfJPIx/abKwfROHdI3hRW8cW +-----END CERTIFICATE----- + +# Issuer: CN=COMODO Certification Authority O=COMODO CA Limited +# Subject: CN=COMODO Certification Authority O=COMODO CA Limited +# Label: "COMODO Certification Authority" +# Serial: 104350513648249232941998508985834464573 +# MD5 Fingerprint: 5c:48:dc:f7:42:72:ec:56:94:6d:1c:cc:71:35:80:75 +# SHA1 Fingerprint: 66:31:bf:9e:f7:4f:9e:b6:c9:d5:a6:0c:ba:6a:be:d1:f7:bd:ef:7b +# SHA256 Fingerprint: 0c:2c:d6:3d:f7:80:6f:a3:99:ed:e8:09:11:6b:57:5b:f8:79:89:f0:65:18:f9:80:8c:86:05:03:17:8b:af:66 +-----BEGIN CERTIFICATE----- +MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCB +gTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G +A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNV +BAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEyMDEwMDAw +MDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEbMBkGA1UECBMSR3Jl +YXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFDT01P +RE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3 +UcEbVASY06m/weaKXTuH+7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI +2GqGd0S7WWaXUF601CxwRM/aN5VCaTwwxHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8 +Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV4EajcNxo2f8ESIl33rXp ++2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA1KGzqSX+ +DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5O +nKVIrLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW +/zAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6g +PKA6hjhodHRwOi8vY3JsLmNvbW9kb2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9u +QXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOCAQEAPpiem/Yb6dc5t3iuHXIY +SdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CPOGEIqB6BCsAv +IC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/ +RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4 +zJVSk/BwJVmcIGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5dd +BA6+C4OmF4O5MBKgxTMVBbkN+8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IB +ZQ== +-----END CERTIFICATE----- + +# Issuer: CN=Network Solutions Certificate Authority O=Network Solutions L.L.C. +# Subject: CN=Network Solutions Certificate Authority O=Network Solutions L.L.C. +# Label: "Network Solutions Certificate Authority" +# Serial: 116697915152937497490437556386812487904 +# MD5 Fingerprint: d3:f3:a6:16:c0:fa:6b:1d:59:b1:2d:96:4d:0e:11:2e +# SHA1 Fingerprint: 74:f8:a3:c3:ef:e7:b3:90:06:4b:83:90:3c:21:64:60:20:e5:df:ce +# SHA256 Fingerprint: 15:f0:ba:00:a3:ac:7a:f3:ac:88:4c:07:2b:10:11:a0:77:bd:77:c0:97:f4:01:64:b2:f8:59:8a:bd:83:86:0c +-----BEGIN CERTIFICATE----- +MIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBi +MQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu +MTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3Jp +dHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMxMjM1OTU5WjBiMQswCQYDVQQGEwJV +UzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydO +ZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwz +c7MEL7xxjOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPP +OCwGJgl6cvf6UDL4wpPTaaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rl +mGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXTcrA/vGp97Eh/jcOrqnErU2lBUzS1sLnF +BgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc/Qzpf14Dl847ABSHJ3A4 +qY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMBAAGjgZcw +gZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIB +BjAPBgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwu +bmV0c29sc3NsLmNvbS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3Jp +dHkuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc8 +6fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q4LqILPxFzBiwmZVRDuwduIj/ +h1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/GGUsyfJj4akH +/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv +wKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHN +pGxlaKFJdlxDydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey +-----END CERTIFICATE----- + +# Issuer: CN=COMODO ECC Certification Authority O=COMODO CA Limited +# Subject: CN=COMODO ECC Certification Authority O=COMODO CA Limited +# Label: "COMODO ECC Certification Authority" +# Serial: 41578283867086692638256921589707938090 +# MD5 Fingerprint: 7c:62:ff:74:9d:31:53:5e:68:4a:d5:78:aa:1e:bf:23 +# SHA1 Fingerprint: 9f:74:4e:9f:2b:4d:ba:ec:0f:31:2c:50:b6:56:3b:8e:2d:93:c3:11 +# SHA256 Fingerprint: 17:93:92:7a:06:14:54:97:89:ad:ce:2f:8f:34:f7:f0:b6:6d:0f:3a:e3:a3:b8:4d:21:ec:15:db:ba:4f:ad:c7 +-----BEGIN CERTIFICATE----- +MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTEL +MAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE +BxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMT +IkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwMzA2MDAw +MDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdy +ZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09N +T0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSR +FtSrYpn1PlILBs5BAH+X4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0J +cfRK9ChQtP6IHG4/bC8vCVlbpVsLM5niwz2J+Wos77LTBumjQjBAMB0GA1UdDgQW +BBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VGFAkK+qDm +fQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdv +GDeAU/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY= +-----END CERTIFICATE----- + +# Issuer: CN=OISTE WISeKey Global Root GA CA O=WISeKey OU=Copyright (c) 2005/OISTE Foundation Endorsed +# Subject: CN=OISTE WISeKey Global Root GA CA O=WISeKey OU=Copyright (c) 2005/OISTE Foundation Endorsed +# Label: "OISTE WISeKey Global Root GA CA" +# Serial: 86718877871133159090080555911823548314 +# MD5 Fingerprint: bc:6c:51:33:a7:e9:d3:66:63:54:15:72:1b:21:92:93 +# SHA1 Fingerprint: 59:22:a1:e1:5a:ea:16:35:21:f8:98:39:6a:46:46:b0:44:1b:0f:a9 +# SHA256 Fingerprint: 41:c9:23:86:6a:b4:ca:d6:b7:ad:57:80:81:58:2e:02:07:97:a6:cb:df:4f:ff:78:ce:83:96:b3:89:37:d7:f5 +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIQQT1yx/RrH4FDffHSKFTfmjANBgkqhkiG9w0BAQUFADCB +ijELMAkGA1UEBhMCQ0gxEDAOBgNVBAoTB1dJU2VLZXkxGzAZBgNVBAsTEkNvcHly +aWdodCAoYykgMjAwNTEiMCAGA1UECxMZT0lTVEUgRm91bmRhdGlvbiBFbmRvcnNl +ZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9iYWwgUm9vdCBHQSBDQTAeFw0w +NTEyMTExNjAzNDRaFw0zNzEyMTExNjA5NTFaMIGKMQswCQYDVQQGEwJDSDEQMA4G +A1UEChMHV0lTZUtleTEbMBkGA1UECxMSQ29weXJpZ2h0IChjKSAyMDA1MSIwIAYD +VQQLExlPSVNURSBGb3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBX +SVNlS2V5IEdsb2JhbCBSb290IEdBIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAy0+zAJs9Nt350UlqaxBJH+zYK7LG+DKBKUOVTJoZIyEVRd7jyBxR +VVuuk+g3/ytr6dTqvirdqFEr12bDYVxgAsj1znJ7O7jyTmUIms2kahnBAbtzptf2 +w93NvKSLtZlhuAGio9RN1AU9ka34tAhxZK9w8RxrfvbDd50kc3vkDIzh2TbhmYsF +mQvtRTEJysIA2/dyoJaqlYfQjse2YXMNdmaM3Bu0Y6Kff5MTMPGhJ9vZ/yxViJGg +4E8HsChWjBgbl0SOid3gF27nKu+POQoxhILYQBRJLnpB5Kf+42TMwVlxSywhp1t9 +4B3RLoGbw9ho972WG6xwsRYUC9tguSYBBQIDAQABo1EwTzALBgNVHQ8EBAMCAYYw +DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUswN+rja8sHnR3JQmthG+IbJphpQw +EAYJKwYBBAGCNxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBAEuh/wuHbrP5wUOx +SPMowB0uyQlB+pQAHKSkq0lPjz0e701vvbyk9vImMMkQyh2I+3QZH4VFvbBsUfk2 +ftv1TDI6QU9bR8/oCy22xBmddMVHxjtqD6wU2zz0c5ypBd8A3HR4+vg1YFkCExh8 +vPtNsCBtQ7tgMHpnM1zFmdH4LTlSc/uMqpclXHLZCB6rTjzjgTGfA6b7wP4piFXa +hNVQA7bihKOmNqoROgHhGEvWRGizPflTdISzRpFGlgC3gCy24eMQ4tui5yiPAZZi +Fj4A4xylNoEYokxSdsARo27mHbrjWr42U8U+dY+GaSlYU7Wcu2+fXMUY7N0v4ZjJ +/L7fCg0= +-----END CERTIFICATE----- + +# Issuer: CN=Certigna O=Dhimyotis +# Subject: CN=Certigna O=Dhimyotis +# Label: "Certigna" +# Serial: 18364802974209362175 +# MD5 Fingerprint: ab:57:a6:5b:7d:42:82:19:b5:d8:58:26:28:5e:fd:ff +# SHA1 Fingerprint: b1:2e:13:63:45:86:a4:6f:1a:b2:60:68:37:58:2d:c4:ac:fd:94:97 +# SHA256 Fingerprint: e3:b6:a2:db:2e:d7:ce:48:84:2f:7a:c5:32:41:c7:b7:1d:54:14:4b:fb:40:c1:1f:3f:1d:0b:42:f5:ee:a1:2d +-----BEGIN CERTIFICATE----- +MIIDqDCCApCgAwIBAgIJAP7c4wEPyUj/MA0GCSqGSIb3DQEBBQUAMDQxCzAJBgNV +BAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hMB4X +DTA3MDYyOTE1MTMwNVoXDTI3MDYyOTE1MTMwNVowNDELMAkGA1UEBhMCRlIxEjAQ +BgNVBAoMCURoaW15b3RpczERMA8GA1UEAwwIQ2VydGlnbmEwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQDIaPHJ1tazNHUmgh7stL7qXOEm7RFHYeGifBZ4 +QCHkYJ5ayGPhxLGWkv8YbWkj4Sti993iNi+RB7lIzw7sebYs5zRLcAglozyHGxny +gQcPOJAZ0xH+hrTy0V4eHpbNgGzOOzGTtvKg0KmVEn2lmsxryIRWijOp5yIVUxbw +zBfsV1/pogqYCd7jX5xv3EjjhQsVWqa6n6xI4wmy9/Qy3l40vhx4XUJbzg4ij02Q +130yGLMLLGq/jj8UEYkgDncUtT2UCIf3JR7VsmAA7G8qKCVuKj4YYxclPz5EIBb2 +JsglrgVKtOdjLPOMFlN+XPsRGgjBRmKfIrjxwo1p3Po6WAbfAgMBAAGjgbwwgbkw +DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUGu3+QTmQtCRZvgHyUtVF9lo53BEw +ZAYDVR0jBF0wW4AUGu3+QTmQtCRZvgHyUtVF9lo53BGhOKQ2MDQxCzAJBgNVBAYT +AkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hggkA/tzj +AQ/JSP8wDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG +9w0BAQUFAAOCAQEAhQMeknH2Qq/ho2Ge6/PAD/Kl1NqV5ta+aDY9fm4fTIrv0Q8h +bV6lUmPOEvjvKtpv6zf+EwLHyzs+ImvaYS5/1HI93TDhHkxAGYwP15zRgzB7mFnc +fca5DClMoTOi62c6ZYTTluLtdkVwj7Ur3vkj1kluPBS1xp81HlDQwY9qcEQCYsuu +HWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY1gkIl2PlwS6w +t0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw +WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg== +-----END CERTIFICATE----- + +# Issuer: CN=Deutsche Telekom Root CA 2 O=Deutsche Telekom AG OU=T-TeleSec Trust Center +# Subject: CN=Deutsche Telekom Root CA 2 O=Deutsche Telekom AG OU=T-TeleSec Trust Center +# Label: "Deutsche Telekom Root CA 2" +# Serial: 38 +# MD5 Fingerprint: 74:01:4a:91:b1:08:c4:58:ce:47:cd:f0:dd:11:53:08 +# SHA1 Fingerprint: 85:a4:08:c0:9c:19:3e:5d:51:58:7d:cd:d6:13:30:fd:8c:de:37:bf +# SHA256 Fingerprint: b6:19:1a:50:d0:c3:97:7f:7d:a9:9b:cd:aa:c8:6a:22:7d:ae:b9:67:9e:c7:0b:a3:b0:c9:d9:22:71:c1:70:d3 +-----BEGIN CERTIFICATE----- +MIIDnzCCAoegAwIBAgIBJjANBgkqhkiG9w0BAQUFADBxMQswCQYDVQQGEwJERTEc +MBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxlU2Vj +IFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290IENB +IDIwHhcNOTkwNzA5MTIxMTAwWhcNMTkwNzA5MjM1OTAwWjBxMQswCQYDVQQGEwJE +RTEcMBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxl +U2VjIFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290 +IENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrC6M14IspFLEU +ha88EOQ5bzVdSq7d6mGNlUn0b2SjGmBmpKlAIoTZ1KXleJMOaAGtuU1cOs7TuKhC +QN/Po7qCWWqSG6wcmtoIKyUn+WkjR/Hg6yx6m/UTAtB+NHzCnjwAWav12gz1Mjwr +rFDa1sPeg5TKqAyZMg4ISFZbavva4VhYAUlfckE8FQYBjl2tqriTtM2e66foai1S +NNs671x1Udrb8zH57nGYMsRUFUQM+ZtV7a3fGAigo4aKSe5TBY8ZTNXeWHmb0moc +QqvF1afPaA+W5OFhmHZhyJF81j4A4pFQh+GdCuatl9Idxjp9y7zaAzTVjlsB9WoH +txa2bkp/AgMBAAGjQjBAMB0GA1UdDgQWBBQxw3kbuvVT1xfgiXotF2wKsyudMzAP +BgNVHRMECDAGAQH/AgEFMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOC +AQEAlGRZrTlk5ynrE/5aw4sTV8gEJPB0d8Bg42f76Ymmg7+Wgnxu1MM9756Abrsp +tJh6sTtU6zkXR34ajgv8HzFZMQSyzhfzLMdiNlXiItiJVbSYSKpk+tYcNthEeFpa +IzpXl/V6ME+un2pMSyuOoAPjPuCp1NJ70rOo4nI8rZ7/gFnkm0W09juwzTkZmDLl +6iFhkOQxIY40sfcvNUqFENrnijchvllj4PKFiDFT1FQUhXB59C4Gdyd1Lx+4ivn+ +xbrYNuSD7Odlt79jWvNGr4GUN9RBjNYj1h7P9WgbRGOiWrqnNVmh5XAFmw4jV5mU +Cm26OWMohpLzGITY+9HPBVZkVw== +-----END CERTIFICATE----- + +# Issuer: CN=Cybertrust Global Root O=Cybertrust, Inc +# Subject: CN=Cybertrust Global Root O=Cybertrust, Inc +# Label: "Cybertrust Global Root" +# Serial: 4835703278459682877484360 +# MD5 Fingerprint: 72:e4:4a:87:e3:69:40:80:77:ea:bc:e3:f4:ff:f0:e1 +# SHA1 Fingerprint: 5f:43:e5:b1:bf:f8:78:8c:ac:1c:c7:ca:4a:9a:c6:22:2b:cc:34:c6 +# SHA256 Fingerprint: 96:0a:df:00:63:e9:63:56:75:0c:29:65:dd:0a:08:67:da:0b:9c:bd:6e:77:71:4a:ea:fb:23:49:ab:39:3d:a3 +-----BEGIN CERTIFICATE----- +MIIDoTCCAomgAwIBAgILBAAAAAABD4WqLUgwDQYJKoZIhvcNAQEFBQAwOzEYMBYG +A1UEChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2Jh +bCBSb290MB4XDTA2MTIxNTA4MDAwMFoXDTIxMTIxNTA4MDAwMFowOzEYMBYGA1UE +ChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2JhbCBS +b290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA+Mi8vRRQZhP/8NN5 +7CPytxrHjoXxEnOmGaoQ25yiZXRadz5RfVb23CO21O1fWLE3TdVJDm71aofW0ozS +J8bi/zafmGWgE07GKmSb1ZASzxQG9Dvj1Ci+6A74q05IlG2OlTEQXO2iLb3VOm2y +HLtgwEZLAfVJrn5GitB0jaEMAs7u/OePuGtm839EAL9mJRQr3RAwHQeWP032a7iP +t3sMpTjr3kfb1V05/Iin89cqdPHoWqI7n1C6poxFNcJQZZXcY4Lv3b93TZxiyWNz +FtApD0mpSPCzqrdsxacwOUBdrsTiXSZT8M4cIwhhqJQZugRiQOwfOHB3EgZxpzAY +XSUnpQIDAQABo4GlMIGiMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/ +MB0GA1UdDgQWBBS2CHsNesysIEyGVjJez6tuhS1wVzA/BgNVHR8EODA2MDSgMqAw +hi5odHRwOi8vd3d3Mi5wdWJsaWMtdHJ1c3QuY29tL2NybC9jdC9jdHJvb3QuY3Js +MB8GA1UdIwQYMBaAFLYIew16zKwgTIZWMl7Pq26FLXBXMA0GCSqGSIb3DQEBBQUA +A4IBAQBW7wojoFROlZfJ+InaRcHUowAl9B8Tq7ejhVhpwjCt2BWKLePJzYFa+HMj +Wqd8BfP9IjsO0QbE2zZMcwSO5bAi5MXzLqXZI+O4Tkogp24CJJ8iYGd7ix1yCcUx +XOl5n4BHPa2hCwcUPUf/A2kaDAtE52Mlp3+yybh2hO0j9n0Hq0V+09+zv+mKts2o +omcrUtW3ZfA5TGOgkXmTUg9U3YO7n9GPp1Nzw8v/MOx8BLjYRB+TX3EJIrduPuoc +A06dGiBh+4E37F78CkWr1+cXVdCg6mCbpvbjjFspwgZgFJ0tl0ypkxWdYcQBX0jW +WL1WMRJOEcgh4LMRkWXbtKaIOM5V +-----END CERTIFICATE----- + +# Issuer: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority +# Subject: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority +# Label: "ePKI Root Certification Authority" +# Serial: 28956088682735189655030529057352760477 +# MD5 Fingerprint: 1b:2e:00:ca:26:06:90:3d:ad:fe:6f:15:68:d3:6b:b3 +# SHA1 Fingerprint: 67:65:0d:f1:7e:8e:7e:5b:82:40:a4:f4:56:4b:cf:e2:3d:69:c6:f0 +# SHA256 Fingerprint: c0:a6:f4:dc:63:a2:4b:fd:cf:54:ef:2a:6a:08:2a:0a:72:de:35:80:3e:2f:f5:ff:52:7a:e5:d8:72:06:df:d5 +-----BEGIN CERTIFICATE----- +MIIFsDCCA5igAwIBAgIQFci9ZUdcr7iXAF7kBtK8nTANBgkqhkiG9w0BAQUFADBe +MQswCQYDVQQGEwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0 +ZC4xKjAoBgNVBAsMIWVQS0kgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe +Fw0wNDEyMjAwMjMxMjdaFw0zNDEyMjAwMjMxMjdaMF4xCzAJBgNVBAYTAlRXMSMw +IQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEqMCgGA1UECwwhZVBL +SSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEA4SUP7o3biDN1Z82tH306Tm2d0y8U82N0ywEhajfqhFAH +SyZbCUNsIZ5qyNUD9WBpj8zwIuQf5/dqIjG3LBXy4P4AakP/h2XGtRrBp0xtInAh +ijHyl3SJCRImHJ7K2RKilTza6We/CKBk49ZCt0Xvl/T29de1ShUCWH2YWEtgvM3X +DZoTM1PRYfl61dd4s5oz9wCGzh1NlDivqOx4UXCKXBCDUSH3ET00hl7lSM2XgYI1 +TBnsZfZrxQWh7kcT1rMhJ5QQCtkkO7q+RBNGMD+XPNjX12ruOzjjK9SXDrkb5wdJ +fzcq+Xd4z1TtW0ado4AOkUPB1ltfFLqfpo0kR0BZv3I4sjZsN/+Z0V0OWQqraffA +sgRFelQArr5T9rXn4fg8ozHSqf4hUmTFpmfwdQcGlBSBVcYn5AGPF8Fqcde+S/uU +WH1+ETOxQvdibBjWzwloPn9s9h6PYq2lY9sJpx8iQkEeb5mKPtf5P0B6ebClAZLS +nT0IFaUQAS2zMnaolQ2zepr7BxB4EW/hj8e6DyUadCrlHJhBmd8hh+iVBmoKs2pH +dmX2Os+PYhcZewoozRrSgx4hxyy/vv9haLdnG7t4TY3OZ+XkwY63I2binZB1NJip +NiuKmpS5nezMirH4JYlcWrYvjB9teSSnUmjDhDXiZo1jDiVN1Rmy5nk3pyKdVDEC +AwEAAaNqMGgwHQYDVR0OBBYEFB4M97Zn8uGSJglFwFU5Lnc/QkqiMAwGA1UdEwQF +MAMBAf8wOQYEZyoHAAQxMC8wLQIBADAJBgUrDgMCGgUAMAcGBWcqAwAABBRFsMLH +ClZ87lt4DJX5GFPBphzYEDANBgkqhkiG9w0BAQUFAAOCAgEACbODU1kBPpVJufGB +uvl2ICO1J2B01GqZNF5sAFPZn/KmsSQHRGoqxqWOeBLoR9lYGxMqXnmbnwoqZ6Yl +PwZpVnPDimZI+ymBV3QGypzqKOg4ZyYr8dW1P2WT+DZdjo2NQCCHGervJ8A9tDkP +JXtoUHRVnAxZfVo9QZQlUgjgRywVMRnVvwdVxrsStZf0X4OFunHB2WyBEXYKCrC/ +gpf36j36+uwtqSiUO1bd0lEursC9CBWMd1I0ltabrNMdjmEPNXubrjlpC2JgQCA2 +j6/7Nu4tCEoduL+bXPjqpRugc6bY+G7gMwRfaKonh+3ZwZCc7b3jajWvY9+rGNm6 +5ulK6lCKD2GTHuItGeIwlDWSXQ62B68ZgI9HkFFLLk3dheLSClIKF5r8GrBQAuUB +o2M3IUxExJtRmREOc5wGj1QupyheRDmHVi03vYVElOEMSyycw5KFNGHLD7ibSkNS +/jQ6fbjpKdx2qcgw+BRxgMYeNkh0IkFch4LoGHGLQYlE535YW6i4jRPpp2zDR+2z +Gp1iro2C6pSe3VkQw63d4k3jMdXH7OjysP6SHhYKGvzZ8/gntsm+HbRsZJB/9OTE +W9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmODBCEIZ43ygknQW/2xzQ+D +hNQ+IIX3Sj0rnP0qCglN6oH4EZw= +-----END CERTIFICATE----- + +# Issuer: O=certSIGN OU=certSIGN ROOT CA +# Subject: O=certSIGN OU=certSIGN ROOT CA +# Label: "certSIGN ROOT CA" +# Serial: 35210227249154 +# MD5 Fingerprint: 18:98:c0:d6:e9:3a:fc:f9:b0:f5:0c:f7:4b:01:44:17 +# SHA1 Fingerprint: fa:b7:ee:36:97:26:62:fb:2d:b0:2a:f6:bf:03:fd:e8:7c:4b:2f:9b +# SHA256 Fingerprint: ea:a9:62:c4:fa:4a:6b:af:eb:e4:15:19:6d:35:1c:cd:88:8d:4f:53:f3:fa:8a:e6:d7:c4:66:a9:4e:60:42:bb +-----BEGIN CERTIFICATE----- +MIIDODCCAiCgAwIBAgIGIAYFFnACMA0GCSqGSIb3DQEBBQUAMDsxCzAJBgNVBAYT +AlJPMREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBD +QTAeFw0wNjA3MDQxNzIwMDRaFw0zMTA3MDQxNzIwMDRaMDsxCzAJBgNVBAYTAlJP +MREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBDQTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALczuX7IJUqOtdu0KBuqV5Do +0SLTZLrTk+jUrIZhQGpgV2hUhE28alQCBf/fm5oqrl0Hj0rDKH/v+yv6efHHrfAQ +UySQi2bJqIirr1qjAOm+ukbuW3N7LBeCgV5iLKECZbO9xSsAfsT8AzNXDe3i+s5d +RdY4zTW2ssHQnIFKquSyAVwdj1+ZxLGt24gh65AIgoDzMKND5pCCrlUoSe1b16kQ +OA7+j0xbm0bqQfWwCHTD0IgztnzXdN/chNFDDnU5oSVAKOp4yw4sLjmdjItuFhwv +JoIQ4uNllAoEwF73XVv4EOLQunpL+943AAAaWyjj0pxzPjKHmKHJUS/X3qwzs08C +AwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAcYwHQYDVR0O +BBYEFOCMm9slSbPxfIbWskKHC9BroNnkMA0GCSqGSIb3DQEBBQUAA4IBAQA+0hyJ +LjX8+HXd5n9liPRyTMks1zJO890ZeUe9jjtbkw9QSSQTaxQGcu8J06Gh40CEyecY +MnQ8SG4Pn0vU9x7Tk4ZkVJdjclDVVc/6IJMCopvDI5NOFlV2oHB5bc0hH88vLbwZ +44gx+FkagQnIl6Z0x2DEW8xXjrJ1/RsCCdtZb3KTafcxQdaIOL+Hsr0Wefmq5L6I +Jd1hJyMctTEHBDa0GpC9oHRxUIltvBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNw +i/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7NzTogVZ96edhBiIL5VaZVDADlN +9u6wWk5JRFRYX0KD +-----END CERTIFICATE----- + +# Issuer: CN=GeoTrust Primary Certification Authority - G3 O=GeoTrust Inc. OU=(c) 2008 GeoTrust Inc. - For authorized use only +# Subject: CN=GeoTrust Primary Certification Authority - G3 O=GeoTrust Inc. OU=(c) 2008 GeoTrust Inc. - For authorized use only +# Label: "GeoTrust Primary Certification Authority - G3" +# Serial: 28809105769928564313984085209975885599 +# MD5 Fingerprint: b5:e8:34:36:c9:10:44:58:48:70:6d:2e:83:d4:b8:05 +# SHA1 Fingerprint: 03:9e:ed:b8:0b:e7:a0:3c:69:53:89:3b:20:d2:d9:32:3a:4c:2a:fd +# SHA256 Fingerprint: b4:78:b8:12:25:0d:f8:78:63:5c:2a:a7:ec:7d:15:5e:aa:62:5e:e8:29:16:e2:cd:29:43:61:88:6c:d1:fb:d4 +-----BEGIN CERTIFICATE----- +MIID/jCCAuagAwIBAgIQFaxulBmyeUtB9iepwxgPHzANBgkqhkiG9w0BAQsFADCB +mDELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsT +MChjKSAyMDA4IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25s +eTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhv +cml0eSAtIEczMB4XDTA4MDQwMjAwMDAwMFoXDTM3MTIwMTIzNTk1OVowgZgxCzAJ +BgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykg +MjAwOCBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0 +BgNVBAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg +LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANziXmJYHTNXOTIz ++uvLh4yn1ErdBojqZI4xmKU4kB6Yzy5jK/BGvESyiaHAKAxJcCGVn2TAppMSAmUm +hsalifD614SgcK9PGpc/BkTVyetyEH3kMSj7HGHmKAdEc5IiaacDiGydY8hS2pgn +5whMcD60yRLBxWeDXTPzAxHsatBT4tG6NmCUgLthY2xbF37fQJQeqw3CIShwiP/W +JmxsYAQlTlV+fe+/lEjetx3dcI0FX4ilm/LC7urRQEFtYjgdVgbFA0dRIBn8exAL +DmKudlW/X3e+PkkBUz2YJQN2JFodtNuJ6nnltrM7P7pMKEF/BqxqjsHQ9gUdfeZC +huOl1UcCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYw +HQYDVR0OBBYEFMR5yo6hTgMdHNxr2zFblD4/MH8tMA0GCSqGSIb3DQEBCwUAA4IB +AQAtxRPPVoB7eni9n64smefv2t+UXglpp+duaIy9cr5HqQ6XErhK8WTTOd8lNNTB +zU6B8A8ExCSzNJbGpqow32hhc9f5joWJ7w5elShKKiePEI4ufIbEAp7aDHdlDkQN +kv39sxY2+hENHYwOB4lqKVb3cvTdFZx3NWZXqxNT2I7BQMXXExZacse3aQHEerGD +AWh9jUGhlBjBJVz88P6DAod8DQ3PLghcSkANPuyBYeYk28rgDi0Hsj5W3I31QYUH +SJsMC8tJP33st/3LjWeJGqvtux6jAAgIFyqCXDFdRootD4abdNlF+9RAsXqqaC2G +spki4cErx5z481+oghLrGREt +-----END CERTIFICATE----- + +# Issuer: CN=thawte Primary Root CA - G2 O=thawte, Inc. OU=(c) 2007 thawte, Inc. - For authorized use only +# Subject: CN=thawte Primary Root CA - G2 O=thawte, Inc. OU=(c) 2007 thawte, Inc. - For authorized use only +# Label: "thawte Primary Root CA - G2" +# Serial: 71758320672825410020661621085256472406 +# MD5 Fingerprint: 74:9d:ea:60:24:c4:fd:22:53:3e:cc:3a:72:d9:29:4f +# SHA1 Fingerprint: aa:db:bc:22:23:8f:c4:01:a1:27:bb:38:dd:f4:1d:db:08:9e:f0:12 +# SHA256 Fingerprint: a4:31:0d:50:af:18:a6:44:71:90:37:2a:86:af:af:8b:95:1f:fb:43:1d:83:7f:1e:56:88:b4:59:71:ed:15:57 +-----BEGIN CERTIFICATE----- +MIICiDCCAg2gAwIBAgIQNfwmXNmET8k9Jj1Xm67XVjAKBggqhkjOPQQDAzCBhDEL +MAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjE4MDYGA1UECxMvKGMp +IDIwMDcgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAi +BgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMjAeFw0wNzExMDUwMDAw +MDBaFw0zODAxMTgyMzU5NTlaMIGEMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhh +d3RlLCBJbmMuMTgwNgYDVQQLEy8oYykgMjAwNyB0aGF3dGUsIEluYy4gLSBGb3Ig +YXV0aG9yaXplZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9v +dCBDQSAtIEcyMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEotWcgnuVnfFSeIf+iha/ +BebfowJPDQfGAFG6DAJSLSKkQjnE/o/qycG+1E3/n3qe4rF8mq2nhglzh9HnmuN6 +papu+7qzcMBniKI11KOasf2twu8x+qi58/sIxpHR+ymVo0IwQDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUmtgAMADna3+FGO6Lts6K +DPgR4bswCgYIKoZIzj0EAwMDaQAwZgIxAN344FdHW6fmCsO99YCKlzUNG4k8VIZ3 +KMqh9HneteY4sPBlcIx/AlTCv//YoT7ZzwIxAMSNlPzcU9LcnXgWHxUzI1NS41ox +XZ3Krr0TKUQNJ1uo52icEvdYPy5yAlejj6EULg== +-----END CERTIFICATE----- + +# Issuer: CN=thawte Primary Root CA - G3 O=thawte, Inc. OU=Certification Services Division/(c) 2008 thawte, Inc. - For authorized use only +# Subject: CN=thawte Primary Root CA - G3 O=thawte, Inc. OU=Certification Services Division/(c) 2008 thawte, Inc. - For authorized use only +# Label: "thawte Primary Root CA - G3" +# Serial: 127614157056681299805556476275995414779 +# MD5 Fingerprint: fb:1b:5d:43:8a:94:cd:44:c6:76:f2:43:4b:47:e7:31 +# SHA1 Fingerprint: f1:8b:53:8d:1b:e9:03:b6:a6:f0:56:43:5b:17:15:89:ca:f3:6b:f2 +# SHA256 Fingerprint: 4b:03:f4:58:07:ad:70:f2:1b:fc:2c:ae:71:c9:fd:e4:60:4c:06:4c:f5:ff:b6:86:ba:e5:db:aa:d7:fd:d3:4c +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIQYAGXt0an6rS0mtZLL/eQ+zANBgkqhkiG9w0BAQsFADCB +rjELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf +Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw +MDggdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAiBgNV +BAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMzAeFw0wODA0MDIwMDAwMDBa +Fw0zNzEyMDEyMzU5NTlaMIGuMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhhd3Rl +LCBJbmMuMSgwJgYDVQQLEx9DZXJ0aWZpY2F0aW9uIFNlcnZpY2VzIERpdmlzaW9u +MTgwNgYDVQQLEy8oYykgMjAwOCB0aGF3dGUsIEluYy4gLSBGb3IgYXV0aG9yaXpl +ZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAtIEcz +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsr8nLPvb2FvdeHsbnndm +gcs+vHyu86YnmjSjaDFxODNi5PNxZnmxqWWjpYvVj2AtP0LMqmsywCPLLEHd5N/8 +YZzic7IilRFDGF/Eth9XbAoFWCLINkw6fKXRz4aviKdEAhN0cXMKQlkC+BsUa0Lf +b1+6a4KinVvnSr0eAXLbS3ToO39/fR8EtCab4LRarEc9VbjXsCZSKAExQGbY2SS9 +9irY7CFJXJv2eul/VTV+lmuNk5Mny5K76qxAwJ/C+IDPXfRa3M50hqY+bAtTyr2S +zhkGcuYMXDhpxwTWvGzOW/b3aJzcJRVIiKHpqfiYnODz1TEoYRFsZ5aNOZnLwkUk +OQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNV +HQ4EFgQUrWyqlGCc7eT/+j4KdCtjA/e2Wb8wDQYJKoZIhvcNAQELBQADggEBABpA +2JVlrAmSicY59BDlqQ5mU1143vokkbvnRFHfxhY0Cu9qRFHqKweKA3rD6z8KLFIW +oCtDuSWQP3CpMyVtRRooOyfPqsMpQhvfO0zAMzRbQYi/aytlryjvsvXDqmbOe1bu +t8jLZ8HJnBoYuMTDSQPxYA5QzUbF83d597YV4Djbxy8ooAw/dyZ02SUS2jHaGh7c +KUGRIjxpp7sC8rZcJwOJ9Abqm+RyguOhCcHpABnTPtRwa7pxpqpYrvS76Wy274fM +m7v/OeZWYdMKp8RcTGB7BXcmer/YB1IsYvdwY9k5vG8cwnncdimvzsUsZAReiDZu +MdRAGmI0Nj81Aa6sY6A= +-----END CERTIFICATE----- + +# Issuer: CN=GeoTrust Primary Certification Authority - G2 O=GeoTrust Inc. OU=(c) 2007 GeoTrust Inc. - For authorized use only +# Subject: CN=GeoTrust Primary Certification Authority - G2 O=GeoTrust Inc. OU=(c) 2007 GeoTrust Inc. - For authorized use only +# Label: "GeoTrust Primary Certification Authority - G2" +# Serial: 80682863203381065782177908751794619243 +# MD5 Fingerprint: 01:5e:d8:6b:bd:6f:3d:8e:a1:31:f8:12:e0:98:73:6a +# SHA1 Fingerprint: 8d:17:84:d5:37:f3:03:7d:ec:70:fe:57:8b:51:9a:99:e6:10:d7:b0 +# SHA256 Fingerprint: 5e:db:7a:c4:3b:82:a0:6a:87:61:e8:d7:be:49:79:eb:f2:61:1f:7d:d7:9b:f9:1c:1c:6b:56:6a:21:9e:d7:66 +-----BEGIN CERTIFICATE----- +MIICrjCCAjWgAwIBAgIQPLL0SAoA4v7rJDteYD7DazAKBggqhkjOPQQDAzCBmDEL +MAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChj +KSAyMDA3IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2 +MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0 +eSAtIEcyMB4XDTA3MTEwNTAwMDAwMFoXDTM4MDExODIzNTk1OVowgZgxCzAJBgNV +BAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykgMjAw +NyBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNV +BAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBH +MjB2MBAGByqGSM49AgEGBSuBBAAiA2IABBWx6P0DFUPlrOuHNxFi79KDNlJ9RVcL +So17VDs6bl8VAsBQps8lL33KSLjHUGMcKiEIfJo22Av+0SbFWDEwKCXzXV2juLal +tJLtbCyf691DiaI8S0iRHVDsJt/WYC69IaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO +BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBVfNVdRVfslsq0DafwBo/q+EVXVMAoG +CCqGSM49BAMDA2cAMGQCMGSWWaboCd6LuvpaiIjwH5HTRqjySkwCY/tsXzjbLkGT +qQ7mndwxHLKgpxgceeHHNgIwOlavmnRs9vuD4DPTCF+hnMJbn0bWtsuRBmOiBucz +rD6ogRLQy7rQkgu2npaqBA+K +-----END CERTIFICATE----- + +# Issuer: CN=VeriSign Universal Root Certification Authority O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2008 VeriSign, Inc. - For authorized use only +# Subject: CN=VeriSign Universal Root Certification Authority O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2008 VeriSign, Inc. - For authorized use only +# Label: "VeriSign Universal Root Certification Authority" +# Serial: 85209574734084581917763752644031726877 +# MD5 Fingerprint: 8e:ad:b5:01:aa:4d:81:e4:8c:1d:d1:e1:14:00:95:19 +# SHA1 Fingerprint: 36:79:ca:35:66:87:72:30:4d:30:a5:fb:87:3b:0f:a7:7b:b7:0d:54 +# SHA256 Fingerprint: 23:99:56:11:27:a5:71:25:de:8c:ef:ea:61:0d:df:2f:a0:78:b5:c8:06:7f:4e:82:82:90:bf:b8:60:e8:4b:3c +-----BEGIN CERTIFICATE----- +MIIEuTCCA6GgAwIBAgIQQBrEZCGzEyEDDrvkEhrFHTANBgkqhkiG9w0BAQsFADCB +vTELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL +ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwOCBWZXJp +U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MTgwNgYDVQQDEy9W +ZXJpU2lnbiBVbml2ZXJzYWwgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe +Fw0wODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIG9MQswCQYDVQQGEwJVUzEX +MBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0 +IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAyMDA4IFZlcmlTaWduLCBJbmMuIC0gRm9y +IGF1dGhvcml6ZWQgdXNlIG9ubHkxODA2BgNVBAMTL1ZlcmlTaWduIFVuaXZlcnNh +bCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAx2E3XrEBNNti1xWb/1hajCMj1mCOkdeQmIN65lgZOIzF +9uVkhbSicfvtvbnazU0AtMgtc6XHaXGVHzk8skQHnOgO+k1KxCHfKWGPMiJhgsWH +H26MfF8WIFFE0XBPV+rjHOPMee5Y2A7Cs0WTwCznmhcrewA3ekEzeOEz4vMQGn+H +LL729fdC4uW/h2KJXwBL38Xd5HVEMkE6HnFuacsLdUYI0crSK5XQz/u5QGtkjFdN +/BMReYTtXlT2NJ8IAfMQJQYXStrxHXpma5hgZqTZ79IugvHw7wnqRMkVauIDbjPT +rJ9VAMf2CGqUuV/c4DPxhGD5WycRtPwW8rtWaoAljQIDAQABo4GyMIGvMA8GA1Ud +EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMG0GCCsGAQUFBwEMBGEwX6FdoFsw +WTBXMFUWCWltYWdlL2dpZjAhMB8wBwYFKw4DAhoEFI/l0xqGrI2Oa8PPgGrUSBgs +exkuMCUWI2h0dHA6Ly9sb2dvLnZlcmlzaWduLmNvbS92c2xvZ28uZ2lmMB0GA1Ud +DgQWBBS2d/ppSEefUxLVwuoHMnYH0ZcHGTANBgkqhkiG9w0BAQsFAAOCAQEASvj4 +sAPmLGd75JR3Y8xuTPl9Dg3cyLk1uXBPY/ok+myDjEedO2Pzmvl2MpWRsXe8rJq+ +seQxIcaBlVZaDrHC1LGmWazxY8u4TB1ZkErvkBYoH1quEPuBUDgMbMzxPcP1Y+Oz +4yHJJDnp/RVmRvQbEdBNc6N9Rvk97ahfYtTxP/jgdFcrGJ2BtMQo2pSXpXDrrB2+ +BxHw1dvd5Yzw1TKwg+ZX4o+/vqGqvz0dtdQ46tewXDpPaj+PwGZsY6rp2aQW9IHR +lRQOfc2VNNnSj3BzgXucfr2YYdhFh5iQxeuGMMY1v/D/w1WIg0vvBZIGcfK4mJO3 +7M2CYfE45k+XmCpajQ== +-----END CERTIFICATE----- + +# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G4 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2007 VeriSign, Inc. - For authorized use only +# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G4 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2007 VeriSign, Inc. - For authorized use only +# Label: "VeriSign Class 3 Public Primary Certification Authority - G4" +# Serial: 63143484348153506665311985501458640051 +# MD5 Fingerprint: 3a:52:e1:e7:fd:6f:3a:e3:6f:f3:6f:99:1b:f9:22:41 +# SHA1 Fingerprint: 22:d5:d8:df:8f:02:31:d1:8d:f7:9d:b7:cf:8a:2d:64:c9:3f:6c:3a +# SHA256 Fingerprint: 69:dd:d7:ea:90:bb:57:c9:3e:13:5d:c8:5e:a6:fc:d5:48:0b:60:32:39:bd:c4:54:fc:75:8b:2a:26:cf:7f:79 +-----BEGIN CERTIFICATE----- +MIIDhDCCAwqgAwIBAgIQL4D+I4wOIg9IZxIokYesszAKBggqhkjOPQQDAzCByjEL +MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW +ZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp +U2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y +aXR5IC0gRzQwHhcNMDcxMTA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCByjELMAkG +A1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJp +U2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwg +SW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2ln +biBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +IC0gRzQwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASnVnp8Utpkmw4tXNherJI9/gHm +GUo9FANL+mAnINmDiWn6VMaaGF5VKmTeBvaNSjutEDxlPZCIBIngMGGzrl0Bp3ve +fLK+ymVhAIau2o970ImtTR1ZmkGxvEeA3J5iw/mjgbIwga8wDwYDVR0TAQH/BAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJ +aW1hZ2UvZ2lmMCEwHzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYj +aHR0cDovL2xvZ28udmVyaXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFLMW +kf3upm7ktS5Jj4d4gYDs5bG1MAoGCCqGSM49BAMDA2gAMGUCMGYhDBgmYFo4e1ZC +4Kf8NoRRkSAsdk1DPcQdhCPQrNZ8NQbOzWm9kA3bbEhCHQ6qQgIxAJw9SDkjOVga +FRJZap7v1VmyHVIsmXHNxynfGyphe3HR3vPA5Q06Sqotp9iGKt0uEA== +-----END CERTIFICATE----- + +# Issuer: CN=NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny O=NetLock Kft. OU=Tan\xfas\xedtv\xe1nykiad\xf3k (Certification Services) +# Subject: CN=NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny O=NetLock Kft. OU=Tan\xfas\xedtv\xe1nykiad\xf3k (Certification Services) +# Label: "NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny" +# Serial: 80544274841616 +# MD5 Fingerprint: c5:a1:b7:ff:73:dd:d6:d7:34:32:18:df:fc:3c:ad:88 +# SHA1 Fingerprint: 06:08:3f:59:3f:15:a1:04:a0:69:a4:6b:a9:03:d0:06:b7:97:09:91 +# SHA256 Fingerprint: 6c:61:da:c3:a2:de:f0:31:50:6b:e0:36:d2:a6:fe:40:19:94:fb:d1:3d:f9:c8:d4:66:59:92:74:c4:46:ec:98 +-----BEGIN CERTIFICATE----- +MIIEFTCCAv2gAwIBAgIGSUEs5AAQMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYDVQQG +EwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFTATBgNVBAoMDE5ldExvY2sgS2Z0LjE3 +MDUGA1UECwwuVGFuw7pzw610dsOhbnlraWFkw7NrIChDZXJ0aWZpY2F0aW9uIFNl +cnZpY2VzKTE1MDMGA1UEAwwsTmV0TG9jayBBcmFueSAoQ2xhc3MgR29sZCkgRsWR +dGFuw7pzw610dsOhbnkwHhcNMDgxMjExMTUwODIxWhcNMjgxMjA2MTUwODIxWjCB +pzELMAkGA1UEBhMCSFUxETAPBgNVBAcMCEJ1ZGFwZXN0MRUwEwYDVQQKDAxOZXRM +b2NrIEtmdC4xNzA1BgNVBAsMLlRhbsO6c8OtdHbDoW55a2lhZMOzayAoQ2VydGlm +aWNhdGlvbiBTZXJ2aWNlcykxNTAzBgNVBAMMLE5ldExvY2sgQXJhbnkgKENsYXNz +IEdvbGQpIEbFkXRhbsO6c8OtdHbDoW55MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAxCRec75LbRTDofTjl5Bu0jBFHjzuZ9lk4BqKf8owyoPjIMHj9DrT +lF8afFttvzBPhCf2nx9JvMaZCpDyD/V/Q4Q3Y1GLeqVw/HpYzY6b7cNGbIRwXdrz +AZAj/E4wqX7hJ2Pn7WQ8oLjJM2P+FpD/sLj916jAwJRDC7bVWaaeVtAkH3B5r9s5 +VA1lddkVQZQBr17s9o3x/61k/iCa11zr/qYfCGSji3ZVrR47KGAuhyXoqq8fxmRG +ILdwfzzeSNuWU7c5d+Qa4scWhHaXWy+7GRWF+GmF9ZmnqfI0p6m2pgP8b4Y9VHx2 +BJtr+UBdADTHLpl1neWIA6pN+APSQnbAGwIDAKiLo0UwQzASBgNVHRMBAf8ECDAG +AQH/AgEEMA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUzPpnk/C2uNClwB7zU/2M +U9+D15YwDQYJKoZIhvcNAQELBQADggEBAKt/7hwWqZw8UQCgwBEIBaeZ5m8BiFRh +bvG5GK1Krf6BQCOUL/t1fC8oS2IkgYIL9WHxHG64YTjrgfpioTtaYtOUZcTh5m2C ++C8lcLIhJsFyUR+MLMOEkMNaj7rP9KdlpeuY0fsFskZ1FSNqb4VjMIDw1Z4fKRzC +bLBQWV2QWzuoDTDPv31/zvGdg73JRm4gpvlhUbohL3u+pRVjodSVh/GeufOJ8z2F +uLjbvrW5KfnaNwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2 +XjG4Kvte9nHfRCaexOYNkbQudZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E= +-----END CERTIFICATE----- + +# Issuer: CN=Staat der Nederlanden Root CA - G2 O=Staat der Nederlanden +# Subject: CN=Staat der Nederlanden Root CA - G2 O=Staat der Nederlanden +# Label: "Staat der Nederlanden Root CA - G2" +# Serial: 10000012 +# MD5 Fingerprint: 7c:a5:0f:f8:5b:9a:7d:6d:30:ae:54:5a:e3:42:a2:8a +# SHA1 Fingerprint: 59:af:82:79:91:86:c7:b4:75:07:cb:cf:03:57:46:eb:04:dd:b7:16 +# SHA256 Fingerprint: 66:8c:83:94:7d:a6:3b:72:4b:ec:e1:74:3c:31:a0:e6:ae:d0:db:8e:c5:b3:1b:e3:77:bb:78:4f:91:b6:71:6f +-----BEGIN CERTIFICATE----- +MIIFyjCCA7KgAwIBAgIEAJiWjDANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJO +TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFh +dCBkZXIgTmVkZXJsYW5kZW4gUm9vdCBDQSAtIEcyMB4XDTA4MDMyNjExMTgxN1oX +DTIwMDMyNTExMDMxMFowWjELMAkGA1UEBhMCTkwxHjAcBgNVBAoMFVN0YWF0IGRl +ciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5lZGVybGFuZGVuIFJv +b3QgQ0EgLSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMVZ5291 +qj5LnLW4rJ4L5PnZyqtdj7U5EILXr1HgO+EASGrP2uEGQxGZqhQlEq0i6ABtQ8Sp +uOUfiUtnvWFI7/3S4GCI5bkYYCjDdyutsDeqN95kWSpGV+RLufg3fNU254DBtvPU +Z5uW6M7XxgpT0GtJlvOjCwV3SPcl5XCsMBQgJeN/dVrlSPhOewMHBPqCYYdu8DvE +pMfQ9XQ+pV0aCPKbJdL2rAQmPlU6Yiile7Iwr/g3wtG61jj99O9JMDeZJiFIhQGp +5Rbn3JBV3w/oOM2ZNyFPXfUib2rFEhZgF1XyZWampzCROME4HYYEhLoaJXhena/M +UGDWE4dS7WMfbWV9whUYdMrhfmQpjHLYFhN9C0lK8SgbIHRrxT3dsKpICT0ugpTN +GmXZK4iambwYfp/ufWZ8Pr2UuIHOzZgweMFvZ9C+X+Bo7d7iscksWXiSqt8rYGPy +5V6548r6f1CGPqI0GAwJaCgRHOThuVw+R7oyPxjMW4T182t0xHJ04eOLoEq9jWYv +6q012iDTiIJh8BIitrzQ1aTsr1SIJSQ8p22xcik/Plemf1WvbibG/ufMQFxRRIEK +eN5KzlW/HdXZt1bv8Hb/C3m1r737qWmRRpdogBQ2HbN/uymYNqUg+oJgYjOk7Na6 +B6duxc8UpufWkjTYgfX8HV2qXB72o007uPc5AgMBAAGjgZcwgZQwDwYDVR0TAQH/ +BAUwAwEB/zBSBgNVHSAESzBJMEcGBFUdIAAwPzA9BggrBgEFBQcCARYxaHR0cDov +L3d3dy5wa2lvdmVyaGVpZC5ubC9wb2xpY2llcy9yb290LXBvbGljeS1HMjAOBgNV +HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJFoMocVHYnitfGsNig0jQt8YojrMA0GCSqG +SIb3DQEBCwUAA4ICAQCoQUpnKpKBglBu4dfYszk78wIVCVBR7y29JHuIhjv5tLyS +CZa59sCrI2AGeYwRTlHSeYAz+51IvuxBQ4EffkdAHOV6CMqqi3WtFMTC6GY8ggen +5ieCWxjmD27ZUD6KQhgpxrRW/FYQoAUXvQwjf/ST7ZwaUb7dRUG/kSS0H4zpX897 +IZmflZ85OkYcbPnNe5yQzSipx6lVu6xiNGI1E0sUOlWDuYaNkqbG9AclVMwWVxJK +gnjIFNkXgiYtXSAfea7+1HAWFpWD2DU5/1JddRwWxRNVz0fMdWVSSt7wsKfkCpYL ++63C4iWEst3kvX5ZbJvw8NjnyvLplzh+ib7M+zkXYT9y2zqR2GUBGR2tUKRXCnxL +vJxxcypFURmFzI79R6d0lR2o0a9OF7FpJsKqeFdbxU2n5Z4FF5TKsl+gSRiNNOkm +bEgeqmiSBeGCc1qb3AdbCG19ndeNIdn8FCCqwkXfP+cAslHkwvgFuXkajDTznlvk +N1trSt8sV4pAWja63XVECDdCcAz+3F4hoKOKwJCcaNpQ5kUQR3i2TtJlycM33+FC +Y7BXN0Ute4qcvwXqZVUz9zkQxSgqIXobisQk+T8VyJoVIPVVYpbtbZNQvOSqeK3Z +ywplh6ZmwcSBo3c6WB4L7oOLnR7SUqTMHW+wmG2UMbX4cQrcufx9MmDm66+KAQ== +-----END CERTIFICATE----- + +# Issuer: CN=Hongkong Post Root CA 1 O=Hongkong Post +# Subject: CN=Hongkong Post Root CA 1 O=Hongkong Post +# Label: "Hongkong Post Root CA 1" +# Serial: 1000 +# MD5 Fingerprint: a8:0d:6f:39:78:b9:43:6d:77:42:6d:98:5a:cc:23:ca +# SHA1 Fingerprint: d6:da:a8:20:8d:09:d2:15:4d:24:b5:2f:cb:34:6e:b2:58:b2:8a:58 +# SHA256 Fingerprint: f9:e6:7d:33:6c:51:00:2a:c0:54:c6:32:02:2d:66:dd:a2:e7:e3:ff:f1:0a:d0:61:ed:31:d8:bb:b4:10:cf:b2 +-----BEGIN CERTIFICATE----- +MIIDMDCCAhigAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCSEsx +FjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3Qg +Um9vdCBDQSAxMB4XDTAzMDUxNTA1MTMxNFoXDTIzMDUxNTA0NTIyOVowRzELMAkG +A1UEBhMCSEsxFjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdr +b25nIFBvc3QgUm9vdCBDQSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEArP84tulmAknjorThkPlAj3n54r15/gK97iSSHSL22oVyaf7XPwnU3ZG1ApzQ +jVrhVcNQhrkpJsLj2aDxaQMoIIBFIi1WpztUlVYiWR8o3x8gPW2iNr4joLFutbEn +PzlTCeqrauh0ssJlXI6/fMN4hM2eFvz1Lk8gKgifd/PFHsSaUmYeSF7jEAaPIpjh +ZY4bXSNmO7ilMlHIhqqhqZ5/dpTCpmy3QfDVyAY45tQM4vM7TG1QjMSDJ8EThFk9 +nnV0ttgCXjqQesBCNnLsak3c78QA3xMYV18meMjWCnl3v/evt3a5pQuEF10Q6m/h +q5URX208o1xNg1vysxmKgIsLhwIDAQABoyYwJDASBgNVHRMBAf8ECDAGAQH/AgED +MA4GA1UdDwEB/wQEAwIBxjANBgkqhkiG9w0BAQUFAAOCAQEADkbVPK7ih9legYsC +mEEIjEy82tvuJxuC52pF7BaLT4Wg87JwvVqWuspube5Gi27nKi6Wsxkz67SfqLI3 +7piol7Yutmcn1KZJ/RyTZXaeQi/cImyaT/JaFTmxcdcrUehtHJjA2Sr0oYJ71clB +oiMBdDhViw+5LmeiIAQ32pwL0xch4I+XeTRvhEgCIDMb5jREn5Fw9IBehEPCKdJs +EhTkYY2sEJCehFC78JZvRZ+K88psT/oROhUVRsPNH4NbLUES7VBnQRM9IauUiqpO +fMGx+6fWtScvl6tu4B3i0RwsH0Ti/L6RoZz71ilTc4afU9hDDl3WY4JxHYB0yvbi +AmvZWg== +-----END CERTIFICATE----- + +# Issuer: CN=SecureSign RootCA11 O=Japan Certification Services, Inc. +# Subject: CN=SecureSign RootCA11 O=Japan Certification Services, Inc. +# Label: "SecureSign RootCA11" +# Serial: 1 +# MD5 Fingerprint: b7:52:74:e2:92:b4:80:93:f2:75:e4:cc:d7:f2:ea:26 +# SHA1 Fingerprint: 3b:c4:9f:48:f8:f3:73:a0:9c:1e:bd:f8:5b:b1:c3:65:c7:d8:11:b3 +# SHA256 Fingerprint: bf:0f:ee:fb:9e:3a:58:1a:d5:f9:e9:db:75:89:98:57:43:d2:61:08:5c:4d:31:4f:6f:5d:72:59:aa:42:16:12 +-----BEGIN CERTIFICATE----- +MIIDbTCCAlWgAwIBAgIBATANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQGEwJKUDEr +MCkGA1UEChMiSmFwYW4gQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcywgSW5jLjEcMBoG +A1UEAxMTU2VjdXJlU2lnbiBSb290Q0ExMTAeFw0wOTA0MDgwNDU2NDdaFw0yOTA0 +MDgwNDU2NDdaMFgxCzAJBgNVBAYTAkpQMSswKQYDVQQKEyJKYXBhbiBDZXJ0aWZp +Y2F0aW9uIFNlcnZpY2VzLCBJbmMuMRwwGgYDVQQDExNTZWN1cmVTaWduIFJvb3RD +QTExMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA/XeqpRyQBTvLTJsz +i1oURaTnkBbR31fSIRCkF/3frNYfp+TbfPfs37gD2pRY/V1yfIw/XwFndBWW4wI8 +h9uuywGOwvNmxoVF9ALGOrVisq/6nL+k5tSAMJjzDbaTj6nU2DbysPyKyiyhFTOV +MdrAG/LuYpmGYz+/3ZMqg6h2uRMft85OQoWPIucuGvKVCbIFtUROd6EgvanyTgp9 +UK31BQ1FT0Zx/Sg+U/sE2C3XZR1KG/rPO7AxmjVuyIsG0wCR8pQIZUyxNAYAeoni +8McDWc/V1uinMrPmmECGxc0nEovMe863ETxiYAcjPitAbpSACW22s293bzUIUPsC +h8U+iQIDAQABo0IwQDAdBgNVHQ4EFgQUW/hNT7KlhtQ60vFjmqC+CfZXt94wDgYD +VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEB +AKChOBZmLqdWHyGcBvod7bkixTgm2E5P7KN/ed5GIaGHd48HCJqypMWvDzKYC3xm +KbabfSVSSUOrTC4rbnpwrxYO4wJs+0LmGJ1F2FXI6Dvd5+H0LgscNFxsWEr7jIhQ +X5Ucv+2rIrVls4W6ng+4reV6G4pQOh29Dbx7VFALuUKvVaAYga1lme++5Jy/xIWr +QbJUb9wlze144o4MjQlJ3WN7WmmWAiGovVJZ6X01y8hSyn+B/tlr0/cR7SXf+Of5 +pPpyl4RTDaXQMhhRdlkUbA/r7F+AjHVDg8OFmP9Mni0N5HeDk061lgeLKBObjBmN +QSdJQO7e5iNEOdyhIta6A/I= +-----END CERTIFICATE----- + +# Issuer: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd. +# Subject: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd. +# Label: "Microsec e-Szigno Root CA 2009" +# Serial: 14014712776195784473 +# MD5 Fingerprint: f8:49:f4:03:bc:44:2d:83:be:48:69:7d:29:64:fc:b1 +# SHA1 Fingerprint: 89:df:74:fe:5c:f4:0f:4a:80:f9:e3:37:7d:54:da:91:e1:01:31:8e +# SHA256 Fingerprint: 3c:5f:81:fe:a5:fa:b8:2c:64:bf:a2:ea:ec:af:cd:e8:e0:77:fc:86:20:a7:ca:e5:37:16:3d:f3:6e:db:f3:78 +-----BEGIN CERTIFICATE----- +MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYD +VQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0 +ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0G +CSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5odTAeFw0wOTA2MTYxMTMwMThaFw0y +OTEyMzAxMTMwMThaMIGCMQswCQYDVQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3Qx +FjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3pp +Z25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5o +dTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOn4j/NjrdqG2KfgQvvP +kd6mJviZpWNwrZuuyjNAfW2WbqEORO7hE52UQlKavXWFdCyoDh2Tthi3jCyoz/tc +cbna7P7ofo/kLx2yqHWH2Leh5TvPmUpG0IMZfcChEhyVbUr02MelTTMuhTlAdX4U +fIASmFDHQWe4oIBhVKZsTh/gnQ4H6cm6M+f+wFUoLAKApxn1ntxVUwOXewdI/5n7 +N4okxFnMUBBjjqqpGrCEGob5X7uxUG6k0QrM1XF+H6cbfPVTbiJfyyvm1HxdrtbC +xkzlBQHZ7Vf8wSN5/PrIJIOV87VqUQHQd9bpEqH5GoP7ghu5sJf0dgYzQ0mg/wu1 ++rUCAwEAAaOBgDB+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G +A1UdDgQWBBTLD8bfQkPMPcu1SCOhGnqmKrs0aDAfBgNVHSMEGDAWgBTLD8bfQkPM +Pcu1SCOhGnqmKrs0aDAbBgNVHREEFDASgRBpbmZvQGUtc3ppZ25vLmh1MA0GCSqG +SIb3DQEBCwUAA4IBAQDJ0Q5eLtXMs3w+y/w9/w0olZMEyL/azXm4Q5DwpL7v8u8h +mLzU1F0G9u5C7DBsoKqpyvGvivo/C3NqPuouQH4frlRheesuCDfXI/OMn74dseGk +ddug4lQUsbocKaQY9hK6ohQU4zE1yED/t+AFdlfBHFny+L/k7SViXITwfn4fs775 +tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c +2Pm2G2JwCz02yULyMtd6YebS2z3PyKnJm9zbWETXbzivf3jTo60adbocwTZ8jx5t +HMN1Rq41Bab2XD0h7lbwyYIiLXpUq3DDfSJlgnCW +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3 +# Label: "GlobalSign Root CA - R3" +# Serial: 4835703278459759426209954 +# MD5 Fingerprint: c5:df:b8:49:ca:05:13:55:ee:2d:ba:1a:c3:3e:b0:28 +# SHA1 Fingerprint: d6:9b:56:11:48:f0:1c:77:c5:45:78:c1:09:26:df:5b:85:69:76:ad +# SHA256 Fingerprint: cb:b5:22:d7:b7:f1:27:ad:6a:01:13:86:5b:df:1c:d4:10:2e:7d:07:59:af:63:5a:7c:f4:72:0d:c9:63:c5:3b +-----BEGIN CERTIFICATE----- +MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4G +A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNp +Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4 +MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMzETMBEG +A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWtiHL8 +RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsT +gHeMCOFJ0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmm +KPZpO/bLyCiR5Z2KYVc3rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zd +QQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjlOCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZ +XriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2xmmFghcCAwEAAaNCMEAw +DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI/wS3+o +LkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZU +RUm7lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMp +jjM5RcOO5LlXbKr8EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK +6fBdRoyV3XpYKBovHd7NADdBj+1EbddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQX +mcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18YIvDQVETI53O9zJrlAGomecs +Mx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7rkpeDMdmztcpH +WD9f +-----END CERTIFICATE----- + +# Issuer: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068 +# Subject: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068 +# Label: "Autoridad de Certificacion Firmaprofesional CIF A62634068" +# Serial: 6047274297262753887 +# MD5 Fingerprint: 73:3a:74:7a:ec:bb:a3:96:a6:c2:e4:e2:c8:9b:c0:c3 +# SHA1 Fingerprint: ae:c5:fb:3f:c8:e1:bf:c4:e5:4f:03:07:5a:9a:e8:00:b7:f7:b6:fa +# SHA256 Fingerprint: 04:04:80:28:bf:1f:28:64:d4:8f:9a:d4:d8:32:94:36:6a:82:88:56:55:3f:3b:14:30:3f:90:14:7f:5d:40:ef +-----BEGIN CERTIFICATE----- +MIIGFDCCA/ygAwIBAgIIU+w77vuySF8wDQYJKoZIhvcNAQEFBQAwUTELMAkGA1UE +BhMCRVMxQjBABgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1h +cHJvZmVzaW9uYWwgQ0lGIEE2MjYzNDA2ODAeFw0wOTA1MjAwODM4MTVaFw0zMDEy +MzEwODM4MTVaMFExCzAJBgNVBAYTAkVTMUIwQAYDVQQDDDlBdXRvcmlkYWQgZGUg +Q2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBBNjI2MzQwNjgwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDDUtd9 +thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQM +cas9UX4PB99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefG +L9ItWY16Ck6WaVICqjaY7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15i +NA9wBj4gGFrO93IbJWyTdBSTo3OxDqqHECNZXyAFGUftaI6SEspd/NYrspI8IM/h +X68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyIplD9amML9ZMWGxmPsu2b +m8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctXMbScyJCy +Z/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirja +EbsXLZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/T +KI8xWVvTyQKmtFLKbpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF +6NkBiDkal4ZkQdU7hwxu+g/GvUgUvzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVh +OSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMBIGA1UdEwEB/wQIMAYBAf8CAQEwDgYD +VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRlzeurNR4APn7VdMActHNHDhpkLzCBpgYD +VR0gBIGeMIGbMIGYBgRVHSAAMIGPMC8GCCsGAQUFBwIBFiNodHRwOi8vd3d3LmZp +cm1hcHJvZmVzaW9uYWwuY29tL2NwczBcBggrBgEFBQcCAjBQHk4AUABhAHMAZQBv +ACAAZABlACAAbABhACAAQgBvAG4AYQBuAG8AdgBhACAANAA3ACAAQgBhAHIAYwBl +AGwAbwBuAGEAIAAwADgAMAAxADcwDQYJKoZIhvcNAQEFBQADggIBABd9oPm03cXF +661LJLWhAqvdpYhKsg9VSytXjDvlMd3+xDLx51tkljYyGOylMnfX40S2wBEqgLk9 +am58m9Ot/MPWo+ZkKXzR4Tgegiv/J2Wv+xYVxC5xhOW1//qkR71kMrv2JYSiJ0L1 +ILDCExARzRAVukKQKtJE4ZYm6zFIEv0q2skGz3QeqUvVhyj5eTSSPi5E6PaPT481 +PyWzOdxjKpBrIF/EUhJOlywqrJ2X3kjyo2bbwtKDlaZmp54lD+kLM5FlClrD2VQS +3a/DTg4fJl4N3LON7NWBcN7STyQF82xO9UxJZo3R/9ILJUFI/lGExkKvgATP0H5k +SeTy36LssUzAKh3ntLFlosS88Zj0qnAHY7S42jtM+kAiMFsRpvAFDsYCA0irhpuF +3dvd6qJ2gHN99ZwExEWN57kci57q13XRcrHedUTnQn3iV2t93Jm8PYMo6oCTjcVM +ZcFwgbg4/EMxsvYDNEeyrPsiBsse3RdHHF9mudMaotoRsaS8I8nkvof/uZS2+F0g +StRf571oe2XyFR7SOqkt6dhrJKyXWERHrVkY8SFlcN7ONGCoQPHzPKTDKCOM/icz +Q0CgFzzr6juwcqajuUpLXhZI9LK8yIySxZ2frHI2vDSANGupi5LAuBft7HZT9SQB +jLMi6Et8Vcad+qMUu2WFbm5PEn4KPJ2V +-----END CERTIFICATE----- + +# Issuer: CN=Izenpe.com O=IZENPE S.A. +# Subject: CN=Izenpe.com O=IZENPE S.A. +# Label: "Izenpe.com" +# Serial: 917563065490389241595536686991402621 +# MD5 Fingerprint: a6:b0:cd:85:80:da:5c:50:34:a3:39:90:2f:55:67:73 +# SHA1 Fingerprint: 2f:78:3d:25:52:18:a7:4a:65:39:71:b5:2c:a2:9c:45:15:6f:e9:19 +# SHA256 Fingerprint: 25:30:cc:8e:98:32:15:02:ba:d9:6f:9b:1f:ba:1b:09:9e:2d:29:9e:0f:45:48:bb:91:4f:36:3b:c0:d4:53:1f +-----BEGIN CERTIFICATE----- +MIIF8TCCA9mgAwIBAgIQALC3WhZIX7/hy/WL1xnmfTANBgkqhkiG9w0BAQsFADA4 +MQswCQYDVQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6 +ZW5wZS5jb20wHhcNMDcxMjEzMTMwODI4WhcNMzcxMjEzMDgyNzI1WjA4MQswCQYD +VQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6ZW5wZS5j +b20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ03rKDx6sp4boFmVq +scIbRTJxldn+EFvMr+eleQGPicPK8lVx93e+d5TzcqQsRNiekpsUOqHnJJAKClaO +xdgmlOHZSOEtPtoKct2jmRXagaKH9HtuJneJWK3W6wyyQXpzbm3benhB6QiIEn6H +LmYRY2xU+zydcsC8Lv/Ct90NduM61/e0aL6i9eOBbsFGb12N4E3GVFWJGjMxCrFX +uaOKmMPsOzTFlUFpfnXCPCDFYbpRR6AgkJOhkEvzTnyFRVSa0QUmQbC1TR0zvsQD +yCV8wXDbO/QJLVQnSKwv4cSsPsjLkkxTOTcj7NMB+eAJRE1NZMDhDVqHIrytG6P+ +JrUV86f8hBnp7KGItERphIPzidF0BqnMC9bC3ieFUCbKF7jJeodWLBoBHmy+E60Q +rLUk9TiRodZL2vG70t5HtfG8gfZZa88ZU+mNFctKy6lvROUbQc/hhqfK0GqfvEyN +BjNaooXlkDWgYlwWTvDjovoDGrQscbNYLN57C9saD+veIR8GdwYDsMnvmfzAuU8L +hij+0rnq49qlw0dpEuDb8PYZi+17cNcC1u2HGCgsBCRMd+RIihrGO5rUD8r6ddIB +QFqNeb+Lz0vPqhbBleStTIo+F5HUsWLlguWABKQDfo2/2n+iD5dPDNMN+9fR5XJ+ +HMh3/1uaD7euBUbl8agW7EekFwIDAQABo4H2MIHzMIGwBgNVHREEgagwgaWBD2lu +Zm9AaXplbnBlLmNvbaSBkTCBjjFHMEUGA1UECgw+SVpFTlBFIFMuQS4gLSBDSUYg +QTAxMzM3MjYwLVJNZXJjLlZpdG9yaWEtR2FzdGVpeiBUMTA1NSBGNjIgUzgxQzBB +BgNVBAkMOkF2ZGEgZGVsIE1lZGl0ZXJyYW5lbyBFdG9yYmlkZWEgMTQgLSAwMTAx +MCBWaXRvcmlhLUdhc3RlaXowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwHQYDVR0OBBYEFB0cZQ6o8iV7tJHP5LGx5r1VdGwFMA0GCSqGSIb3DQEBCwUA +A4ICAQB4pgwWSp9MiDrAyw6lFn2fuUhfGI8NYjb2zRlrrKvV9pF9rnHzP7MOeIWb +laQnIUdCSnxIOvVFfLMMjlF4rJUT3sb9fbgakEyrkgPH7UIBzg/YsfqikuFgba56 +awmqxinuaElnMIAkejEWOVt+8Rwu3WwJrfIxwYJOubv5vr8qhT/AQKM6WfxZSzwo +JNu0FXWuDYi6LnPAvViH5ULy617uHjAimcs30cQhbIHsvm0m5hzkQiCeR7Csg1lw +LDXWrzY0tM07+DKo7+N4ifuNRSzanLh+QBxh5z6ikixL8s36mLYp//Pye6kfLqCT +VyvehQP5aTfLnnhqBbTFMXiJ7HqnheG5ezzevh55hM6fcA5ZwjUukCox2eRFekGk +LhObNA5me0mrZJfQRsN5nXJQY6aYWwa9SG3YOYNw6DXwBdGqvOPbyALqfP2C2sJb +UjWumDqtujWTI6cfSN01RpiyEGjkpTHCClguGYEQyVB1/OpaFs4R1+7vUIgtYf8/ +QnMFlEPVjjxOAToZpR9GTnfQXeWBIiGH/pR9hNiTrdZoQ0iy2+tzJOeRf1SktoA+ +naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1ZWrOZyGls +QyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw== +-----END CERTIFICATE----- + +# Issuer: CN=Chambers of Commerce Root - 2008 O=AC Camerfirma S.A. +# Subject: CN=Chambers of Commerce Root - 2008 O=AC Camerfirma S.A. +# Label: "Chambers of Commerce Root - 2008" +# Serial: 11806822484801597146 +# MD5 Fingerprint: 5e:80:9e:84:5a:0e:65:0b:17:02:f3:55:18:2a:3e:d7 +# SHA1 Fingerprint: 78:6a:74:ac:76:ab:14:7f:9c:6a:30:50:ba:9e:a8:7e:fe:9a:ce:3c +# SHA256 Fingerprint: 06:3e:4a:fa:c4:91:df:d3:32:f3:08:9b:85:42:e9:46:17:d8:93:d7:fe:94:4e:10:a7:93:7e:e2:9d:96:93:c0 +-----BEGIN CERTIFICATE----- +MIIHTzCCBTegAwIBAgIJAKPaQn6ksa7aMA0GCSqGSIb3DQEBBQUAMIGuMQswCQYD +VQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0 +IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3 +MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xKTAnBgNVBAMTIENoYW1iZXJz +IG9mIENvbW1lcmNlIFJvb3QgLSAyMDA4MB4XDTA4MDgwMTEyMjk1MFoXDTM4MDcz +MTEyMjk1MFowga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNlZSBj +dXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29tL2FkZHJlc3MpMRIw +EAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVyZmlybWEgUy5BLjEp +MCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDgwggIiMA0G +CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCvAMtwNyuAWko6bHiUfaN/Gh/2NdW9 +28sNRHI+JrKQUrpjOyhYb6WzbZSm891kDFX29ufyIiKAXuFixrYp4YFs8r/lfTJq +VKAyGVn+H4vXPWCGhSRv4xGzdz4gljUha7MI2XAuZPeEklPWDrCQiorjh40G072Q +DuKZoRuGDtqaCrsLYVAGUvGef3bsyw/QHg3PmTA9HMRFEFis1tPo1+XqxQEHd9ZR +5gN/ikilTWh1uem8nk4ZcfUyS5xtYBkL+8ydddy/Js2Pk3g5eXNeJQ7KXOt3EgfL +ZEFHcpOrUMPrCXZkNNI5t3YRCQ12RcSprj1qr7V9ZS+UWBDsXHyvfuK2GNnQm05a +Sd+pZgvMPMZ4fKecHePOjlO+Bd5gD2vlGts/4+EhySnB8esHnFIbAURRPHsl18Tl +UlRdJQfKFiC4reRB7noI/plvg6aRArBsNlVq5331lubKgdaX8ZSD6e2wsWsSaR6s ++12pxZjptFtYer49okQ6Y1nUCyXeG0+95QGezdIp1Z8XGQpvvwyQ0wlf2eOKNcx5 +Wk0ZN5K3xMGtr/R5JJqyAQuxr1yW84Ay+1w9mPGgP0revq+ULtlVmhduYJ1jbLhj +ya6BXBg14JC7vjxPNyK5fuvPnnchpj04gftI2jE9K+OJ9dC1vX7gUMQSibMjmhAx +hduub+84Mxh2EQIDAQABo4IBbDCCAWgwEgYDVR0TAQH/BAgwBgEB/wIBDDAdBgNV +HQ4EFgQU+SSsD7K1+HnA+mCIG8TZTQKeFxkwgeMGA1UdIwSB2zCB2IAU+SSsD7K1 ++HnA+mCIG8TZTQKeFxmhgbSkgbEwga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpN +YWRyaWQgKHNlZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29t +L2FkZHJlc3MpMRIwEAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVy +ZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAt +IDIwMDiCCQCj2kJ+pLGu2jAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRV +HSAAMCowKAYIKwYBBQUHAgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20w +DQYJKoZIhvcNAQEFBQADggIBAJASryI1wqM58C7e6bXpeHxIvj99RZJe6dqxGfwW +PJ+0W2aeaufDuV2I6A+tzyMP3iU6XsxPpcG1Lawk0lgH3qLPaYRgM+gQDROpI9CF +5Y57pp49chNyM/WqfcZjHwj0/gF/JM8rLFQJ3uIrbZLGOU8W6jx+ekbURWpGqOt1 +glanq6B8aBMz9p0w8G8nOSQjKpD9kCk18pPfNKXG9/jvjA9iSnyu0/VU+I22mlaH +FoI6M6taIgj3grrqLuBHmrS1RaMFO9ncLkVAO+rcf+g769HsJtg1pDDFOqxXnrN2 +pSB7+R5KBWIBpih1YJeSDW4+TTdDDZIVnBgizVGZoCkaPF+KMjNbMMeJL0eYD6MD +xvbxrN8y8NmBGuScvfaAFPDRLLmF9dijscilIeUcE5fuDr3fKanvNFNb0+RqE4QG +tjICxFKuItLcsiFCGtpA8CnJ7AoMXOLQusxI0zcKzBIKinmwPQN/aUv0NCB9szTq +jktk9T79syNnFQ0EuPAtwQlRPLJsFfClI9eDdOTlLsn+mCdCxqvGnrDQWzilm1De +fhiYtUU79nm06PcaewaD+9CL2rvHvRirCG88gGtAPxkZumWK5r7VXNM21+9AUiRg +OGcEMeyP84LG3rlV8zsxkVrctQgVrXYlCg17LofiDKYGvCYQbTed7N14jHyAxfDZ +d0jQ +-----END CERTIFICATE----- + +# Issuer: CN=Global Chambersign Root - 2008 O=AC Camerfirma S.A. +# Subject: CN=Global Chambersign Root - 2008 O=AC Camerfirma S.A. +# Label: "Global Chambersign Root - 2008" +# Serial: 14541511773111788494 +# MD5 Fingerprint: 9e:80:ff:78:01:0c:2e:c1:36:bd:fe:96:90:6e:08:f3 +# SHA1 Fingerprint: 4a:bd:ee:ec:95:0d:35:9c:89:ae:c7:52:a1:2c:5b:29:f6:d6:aa:0c +# SHA256 Fingerprint: 13:63:35:43:93:34:a7:69:80:16:a0:d3:24:de:72:28:4e:07:9d:7b:52:20:bb:8f:bd:74:78:16:ee:be:ba:ca +-----BEGIN CERTIFICATE----- +MIIHSTCCBTGgAwIBAgIJAMnN0+nVfSPOMA0GCSqGSIb3DQEBBQUAMIGsMQswCQYD +VQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0 +IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3 +MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAlBgNVBAMTHkdsb2JhbCBD +aGFtYmVyc2lnbiBSb290IC0gMjAwODAeFw0wODA4MDExMjMxNDBaFw0zODA3MzEx +MjMxNDBaMIGsMQswCQYDVQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3Vy +cmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAG +A1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAl +BgNVBAMTHkdsb2JhbCBDaGFtYmVyc2lnbiBSb290IC0gMjAwODCCAiIwDQYJKoZI +hvcNAQEBBQADggIPADCCAgoCggIBAMDfVtPkOpt2RbQT2//BthmLN0EYlVJH6xed +KYiONWwGMi5HYvNJBL99RDaxccy9Wglz1dmFRP+RVyXfXjaOcNFccUMd2drvXNL7 +G706tcuto8xEpw2uIRU/uXpbknXYpBI4iRmKt4DS4jJvVpyR1ogQC7N0ZJJ0YPP2 +zxhPYLIj0Mc7zmFLmY/CDNBAspjcDahOo7kKrmCgrUVSY7pmvWjg+b4aqIG7HkF4 +ddPB/gBVsIdU6CeQNR1MM62X/JcumIS/LMmjv9GYERTtY/jKmIhYF5ntRQOXfjyG +HoiMvvKRhI9lNNgATH23MRdaKXoKGCQwoze1eqkBfSbW+Q6OWfH9GzO1KTsXO0G2 +Id3UwD2ln58fQ1DJu7xsepeY7s2MH/ucUa6LcL0nn3HAa6x9kGbo1106DbDVwo3V +yJ2dwW3Q0L9R5OP4wzg2rtandeavhENdk5IMagfeOx2YItaswTXbo6Al/3K1dh3e +beksZixShNBFks4c5eUzHdwHU1SjqoI7mjcv3N2gZOnm3b2u/GSFHTynyQbehP9r +6GsaPMWis0L7iwk+XwhSx2LE1AVxv8Rk5Pihg+g+EpuoHtQ2TS9x9o0o9oOpE9Jh +wZG7SMA0j0GMS0zbaRL/UJScIINZc+18ofLx/d33SdNDWKBWY8o9PeU1VlnpDsog +zCtLkykPAgMBAAGjggFqMIIBZjASBgNVHRMBAf8ECDAGAQH/AgEMMB0GA1UdDgQW +BBS5CcqcHtvTbDprru1U8VuTBjUuXjCB4QYDVR0jBIHZMIHWgBS5CcqcHtvTbDpr +ru1U8VuTBjUuXqGBsqSBrzCBrDELMAkGA1UEBhMCRVUxQzBBBgNVBAcTOk1hZHJp +ZCAoc2VlIGN1cnJlbnQgYWRkcmVzcyBhdCB3d3cuY2FtZXJmaXJtYS5jb20vYWRk +cmVzcykxEjAQBgNVBAUTCUE4Mjc0MzI4NzEbMBkGA1UEChMSQUMgQ2FtZXJmaXJt +YSBTLkEuMScwJQYDVQQDEx5HbG9iYWwgQ2hhbWJlcnNpZ24gUm9vdCAtIDIwMDiC +CQDJzdPp1X0jzjAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCow +KAYIKwYBBQUHAgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZI +hvcNAQEFBQADggIBAICIf3DekijZBZRG/5BXqfEv3xoNa/p8DhxJJHkn2EaqbylZ +UohwEurdPfWbU1Rv4WCiqAm57OtZfMY18dwY6fFn5a+6ReAJ3spED8IXDneRRXoz +X1+WLGiLwUePmJs9wOzL9dWCkoQ10b42OFZyMVtHLaoXpGNR6woBrX/sdZ7LoR/x +fxKxueRkf2fWIyr0uDldmOghp+G9PUIadJpwr2hsUF1Jz//7Dl3mLEfXgTpZALVz +a2Mg9jFFCDkO9HB+QHBaP9BrQql0PSgvAm11cpUJjUhjxsYjV5KTXjXBjfkK9yyd +Yhz2rXzdpjEetrHHfoUm+qRqtdpjMNHvkzeyZi99Bffnt0uYlDXA2TopwZ2yUDMd +SqlapskD7+3056huirRXhOukP9DuqqqHW2Pok+JrqNS4cnhrG+055F3Lm6qH1U9O +AP7Zap88MQ8oAgF9mOinsKJknnn4SPIVqczmyETrP3iZ8ntxPjzxmKfFGBI/5rso +M0LpRQp8bfKGeS/Fghl9CYl8slR2iK7ewfPM4W7bMdaTrpmg7yVqc5iJWzouE4ge +v8CSlDQb4ye3ix5vQv/n6TebUB0tovkC7stYWDpxvGjjqsGvHCgfotwjZT+B6q6Z +09gwzxMNTxXJhLynSC34MCN32EZLeW32jO06f2ARePTpm67VVMB0gNELQp/B +-----END CERTIFICATE----- + +# Issuer: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc. +# Subject: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc. +# Label: "Go Daddy Root Certificate Authority - G2" +# Serial: 0 +# MD5 Fingerprint: 80:3a:bc:22:c1:e6:fb:8d:9b:3b:27:4a:32:1b:9a:01 +# SHA1 Fingerprint: 47:be:ab:c9:22:ea:e8:0e:78:78:34:62:a7:9f:45:c2:54:fd:e6:8b +# SHA256 Fingerprint: 45:14:0b:32:47:eb:9c:c8:c5:b4:f0:d7:b5:30:91:f7:32:92:08:9e:6e:5a:63:e2:74:9d:d3:ac:a9:19:8e:da +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoT +EUdvRGFkZHkuY29tLCBJbmMuMTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRp +ZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIz +NTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQH +EwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8GA1UE +AxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKD +E6bFIEMBO4Tx5oVJnyfq9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH +/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD+qK+ihVqf94Lw7YZFAXK6sOoBJQ7Rnwy +DfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutdfMh8+7ArU6SSYmlRJQVh +GkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMlNAJWJwGR +tDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEA +AaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE +FDqahQcQZyi27/a9BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmX +WWcDYfF+OwYxdS2hII5PZYe096acvNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu +9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r5N9ss4UXnT3ZJE95kTXWXwTr +gIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYVN8Gb5DKj7Tjo +2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO +LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI +4uJEvlz36hz1 +-----END CERTIFICATE----- + +# Issuer: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Subject: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Label: "Starfield Root Certificate Authority - G2" +# Serial: 0 +# MD5 Fingerprint: d6:39:81:c6:52:7e:96:69:fc:fc:ca:66:ed:05:f2:96 +# SHA1 Fingerprint: b5:1c:06:7c:ee:2b:0c:3d:f8:55:ab:2d:92:f4:fe:39:d4:e7:0f:0e +# SHA256 Fingerprint: 2c:e1:cb:0b:f9:d2:f9:e1:02:99:3f:be:21:51:52:c3:b2:dd:0c:ab:de:1c:68:e5:31:9b:83:91:54:db:b7:f5 +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT +HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVs +ZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAw +MFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6 +b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQgVGVj +aG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZp +Y2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAL3twQP89o/8ArFvW59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMg +nLRJdzIpVv257IzdIvpy3Cdhl+72WoTsbhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1 +HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNkN3mSwOxGXn/hbVNMYq/N +Hwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7NfZTD4p7dN +dloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0 +HZbUJtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO +BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0G +CSqGSIb3DQEBCwUAA4IBAQARWfolTwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjU +sHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx4mcujJUDJi5DnUox9g61DLu3 +4jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUwF5okxBDgBPfg +8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K +pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1 +mMpYjn0q7pBZc2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0 +-----END CERTIFICATE----- + +# Issuer: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Subject: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Label: "Starfield Services Root Certificate Authority - G2" +# Serial: 0 +# MD5 Fingerprint: 17:35:74:af:7b:61:1c:eb:f4:f9:3c:e2:ee:40:f9:a2 +# SHA1 Fingerprint: 92:5a:8f:8d:2c:6d:04:e0:66:5f:59:6a:ff:22:d8:63:e8:25:6f:3f +# SHA256 Fingerprint: 56:8d:69:05:a2:c8:87:08:a4:b3:02:51:90:ed:cf:ed:b1:97:4a:60:6a:13:c6:e5:29:0f:cb:2a:e6:3e:da:b5 +-----BEGIN CERTIFICATE----- +MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT +HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVs +ZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5 +MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNVBAYTAlVTMRAwDgYD +VQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFy +ZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2Vy +dmljZXMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20p +OsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm2 +8xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4PahHQUw2eeBGg6345AWh1K +Ts9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLPLJGmpufe +hRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk +6mFBrMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAw +DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+q +AdcwKziIorhtSpzyEZGDMA0GCSqGSIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMI +bw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPPE95Dz+I0swSdHynVv/heyNXB +ve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTyxQGjhdByPq1z +qwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd +iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn +0q23KXB56jzaYyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCN +sSi6 +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Commercial O=AffirmTrust +# Subject: CN=AffirmTrust Commercial O=AffirmTrust +# Label: "AffirmTrust Commercial" +# Serial: 8608355977964138876 +# MD5 Fingerprint: 82:92:ba:5b:ef:cd:8a:6f:a6:3d:55:f9:84:f6:d6:b7 +# SHA1 Fingerprint: f9:b5:b6:32:45:5f:9c:be:ec:57:5f:80:dc:e9:6e:2c:c7:b2:78:b7 +# SHA256 Fingerprint: 03:76:ab:1d:54:c5:f9:80:3c:e4:b2:e2:01:a0:ee:7e:ef:7b:57:b6:36:e8:a9:3c:9b:8d:48:60:c9:6f:5f:a7 +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz +dCBDb21tZXJjaWFsMB4XDTEwMDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDEL +MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp +cm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6EqdbDuKP +Hx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yr +ba0F8PrVC8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPAL +MeIrJmqbTFeurCA+ukV6BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1 +yHp52UKqK39c/s4mT6NmgTWvRLpUHhwwMmWd5jyTXlBOeuM61G7MGvv50jeuJCqr +VwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNVHQ4EFgQUnZPGU4teyq8/ +nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ +KoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYG +XUPGhi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNj +vbz4YYCanrHOQnDiqX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivt +Z8SOyUOyXGsViQK8YvxO8rUzqrJv0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9g +N53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0khsUlHRUe072o0EclNmsxZt9YC +nlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8= +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Networking O=AffirmTrust +# Subject: CN=AffirmTrust Networking O=AffirmTrust +# Label: "AffirmTrust Networking" +# Serial: 8957382827206547757 +# MD5 Fingerprint: 42:65:ca:be:01:9a:9a:4c:a9:8c:41:49:cd:c0:d5:7f +# SHA1 Fingerprint: 29:36:21:02:8b:20:ed:02:f5:66:c5:32:d1:d6:ed:90:9f:45:00:2f +# SHA256 Fingerprint: 0a:81:ec:5a:92:97:77:f1:45:90:4a:f3:8d:5d:50:9f:66:b5:e2:c5:8f:cd:b5:31:05:8b:0e:17:f3:f0:b4:1b +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz +dCBOZXR3b3JraW5nMB4XDTEwMDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDEL +MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp +cm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SEHi3y +YJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbua +kCNrmreIdIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRL +QESxG9fhwoXA3hA/Pe24/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp +6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gbh+0t+nvujArjqWaJGctB+d1ENmHP4ndG +yH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNVHQ4EFgQUBx/S55zawm6i +QLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ +KoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfO +tDIuUFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzu +QY0x2+c06lkh1QF612S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZ +Lgo/bNjR9eUJtGxUAArgFU2HdW23WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4u +olu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9/ZFvgrG+CJPbFEfxojfHRZ48 +x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s= +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Premium O=AffirmTrust +# Subject: CN=AffirmTrust Premium O=AffirmTrust +# Label: "AffirmTrust Premium" +# Serial: 7893706540734352110 +# MD5 Fingerprint: c4:5d:0e:48:b6:ac:28:30:4e:0a:bc:f9:38:16:87:57 +# SHA1 Fingerprint: d8:a6:33:2c:e0:03:6f:b1:85:f6:63:4f:7d:6a:06:65:26:32:28:27 +# SHA256 Fingerprint: 70:a7:3f:7f:37:6b:60:07:42:48:90:45:34:b1:14:82:d5:bf:0e:69:8e:cc:49:8d:f5:25:77:eb:f2:e9:3b:9a +-----BEGIN CERTIFICATE----- +MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVz +dCBQcmVtaXVtMB4XDTEwMDEyOTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkG +A1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1U +cnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxBLf +qV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtnBKAQ +JG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ ++jjeRFcV5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrS +s8PhaJyJ+HoAVt70VZVs+7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5 +HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmdGPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d7 +70O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5Rp9EixAqnOEhss/n/fauG +V+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NIS+LI+H+S +qHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S +5u046uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4Ia +C1nEWTJ3s7xgaVY5/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TX +OwF0lkLgAOIua+rF7nKsu7/+6qqo+Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYE +FJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ +BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByvMiPIs0laUZx2 +KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg +Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B +8OWycvpEgjNC6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQ +MKSOyARiqcTtNd56l+0OOF6SL5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc +0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK+4w1IX2COPKpVJEZNZOUbWo6xbLQ +u4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmVBtWVyuEklut89pMF +u+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFgIxpH +YoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8 +GKa1qF60g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaO +RtGdFNrHF+QFlozEJLUbzxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6e +KeC2uAloGRwYQw== +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Premium ECC O=AffirmTrust +# Subject: CN=AffirmTrust Premium ECC O=AffirmTrust +# Label: "AffirmTrust Premium ECC" +# Serial: 8401224907861490260 +# MD5 Fingerprint: 64:b0:09:55:cf:b1:d5:99:e2:be:13:ab:a6:5d:ea:4d +# SHA1 Fingerprint: b8:23:6b:00:2f:1d:16:86:53:01:55:6c:11:a4:37:ca:eb:ff:c3:bb +# SHA256 Fingerprint: bd:71:fd:f6:da:97:e4:cf:62:d1:64:7a:dd:25:81:b0:7d:79:ad:f8:39:7e:b4:ec:ba:9c:5e:84:88:82:14:23 +-----BEGIN CERTIFICATE----- +MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMC +VVMxFDASBgNVBAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQ +cmVtaXVtIEVDQzAeFw0xMDAxMjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJ +BgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1UcnVzdDEgMB4GA1UEAwwXQWZmaXJt +VHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQNMF4bFZ0D +0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQN8O9 +ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0G +A1UdDgQWBBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4G +A1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/Vs +aobgxCd05DhT1wV/GzTjxi+zygk8N53X57hG8f2h4nECMEJZh0PUUd+60wkyWs6I +flc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKMeQ== +-----END CERTIFICATE----- + +# Issuer: CN=Certum Trusted Network CA O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Subject: CN=Certum Trusted Network CA O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Label: "Certum Trusted Network CA" +# Serial: 279744 +# MD5 Fingerprint: d5:e9:81:40:c5:18:69:fc:46:2c:89:75:62:0f:aa:78 +# SHA1 Fingerprint: 07:e0:32:e0:20:b7:2c:3f:19:2f:06:28:a2:59:3a:19:a7:0f:06:9e +# SHA256 Fingerprint: 5c:58:46:8d:55:f5:8e:49:7e:74:39:82:d2:b5:00:10:b6:d1:65:37:4a:cf:83:a7:d4:a3:2d:b7:68:c4:40:8e +-----BEGIN CERTIFICATE----- +MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBM +MSIwIAYDVQQKExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5D +ZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBU +cnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIyMTIwNzM3WhcNMjkxMjMxMTIwNzM3 +WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBUZWNobm9sb2dpZXMg +Uy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MSIw +IAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rH +UV+rpDKmYYe2bg+G0jACl/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LM +TXPb865Px1bVWqeWifrzq2jUI4ZZJ88JJ7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVU +BBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4fOQtf/WsX+sWn7Et0brM +kUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0cvW0QM8x +AcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNV +HQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15y +sHhE49wcrwn9I0j6vSrEuVUEtRCjjSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfL +I9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1mS1FhIrlQgnXdAIv94nYmem8 +J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5ajZt3hrvJBW8qY +VoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI +03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw= +-----END CERTIFICATE----- + +# Issuer: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA +# Subject: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA +# Label: "TWCA Root Certification Authority" +# Serial: 1 +# MD5 Fingerprint: aa:08:8f:f6:f9:7b:b7:f2:b1:a7:1e:9b:ea:ea:bd:79 +# SHA1 Fingerprint: cf:9e:87:6d:d3:eb:fc:42:26:97:a3:b5:a3:7a:a0:76:a9:06:23:48 +# SHA256 Fingerprint: bf:d8:8f:e1:10:1c:41:ae:3e:80:1b:f8:be:56:35:0e:e9:ba:d1:a6:b9:bd:51:5e:dc:5c:6d:5b:87:11:ac:44 +-----BEGIN CERTIFICATE----- +MIIDezCCAmOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJUVzES +MBAGA1UECgwJVEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFU +V0NBIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwODI4MDcyNDMz +WhcNMzAxMjMxMTU1OTU5WjBfMQswCQYDVQQGEwJUVzESMBAGA1UECgwJVEFJV0FO +LUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NBIFJvb3QgQ2VydGlm +aWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQCwfnK4pAOU5qfeCTiRShFAh6d8WWQUe7UREN3+v9XAu1bihSX0NXIP+FPQQeFE +AcK0HMMxQhZHhTMidrIKbw/lJVBPhYa+v5guEGcevhEFhgWQxFnQfHgQsIBct+HH +K3XLfJ+utdGdIzdjp9xCoi2SBBtQwXu4PhvJVgSLL1KbralW6cH/ralYhzC2gfeX +RfwZVzsrb+RH9JlF/h3x+JejiB03HFyP4HYlmlD4oFT/RJB2I9IyxsOrBr/8+7/z +rX2SYgJbKdM1o5OaQ2RgXbL6Mv87BK9NQGr5x+PvI/1ry+UPizgN7gr8/g+YnzAx +3WxSZfmLgb4i4RxYA7qRG4kHAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqOFsmjd6LWvJPelSDGRjjCDWmujANBgkq +hkiG9w0BAQUFAAOCAQEAPNV3PdrfibqHDAhUaiBQkr6wQT25JmSDCi/oQMCXKCeC +MErJk/9q56YAf4lCmtYR5VPOL8zy2gXE/uJQxDqGfczafhAJO5I1KlOy/usrBdls +XebQ79NqZp4VKIV66IIArB6nCWlWQtNoURi+VJq/REG6Sb4gumlc7rh3zc5sH62D +lhh9DrUUOYTxKOkto557HnpyWoOzeW/vtPzQCqVYT0bf+215WfKEIlKuD8z7fDvn +aspHYcN6+NOSBB+4IIThNlQWx0DeO4pz3N/GCUzf7Nr/1FNCocnyYh0igzyXxfkZ +YiesZSLX0zzG5Y6yU8xJzrww/nsOM5D77dIUkR8Hrw== +-----END CERTIFICATE----- + +# Issuer: O=SECOM Trust Systems CO.,LTD. OU=Security Communication RootCA2 +# Subject: O=SECOM Trust Systems CO.,LTD. OU=Security Communication RootCA2 +# Label: "Security Communication RootCA2" +# Serial: 0 +# MD5 Fingerprint: 6c:39:7d:a4:0e:55:59:b2:3f:d6:41:b1:12:50:de:43 +# SHA1 Fingerprint: 5f:3b:8c:f2:f8:10:b3:7d:78:b4:ce:ec:19:19:c3:73:34:b9:c7:74 +# SHA256 Fingerprint: 51:3b:2c:ec:b8:10:d4:cd:e5:dd:85:39:1a:df:c6:c2:dd:60:d8:7b:b7:36:d2:b5:21:48:4a:a4:7a:0e:be:f6 +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIBADANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJKUDEl +MCMGA1UEChMcU0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UECxMe +U2VjdXJpdHkgQ29tbXVuaWNhdGlvbiBSb290Q0EyMB4XDTA5MDUyOTA1MDAzOVoX +DTI5MDUyOTA1MDAzOVowXTELMAkGA1UEBhMCSlAxJTAjBgNVBAoTHFNFQ09NIFRy +dXN0IFN5c3RlbXMgQ08uLExURC4xJzAlBgNVBAsTHlNlY3VyaXR5IENvbW11bmlj +YXRpb24gUm9vdENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANAV +OVKxUrO6xVmCxF1SrjpDZYBLx/KWvNs2l9amZIyoXvDjChz335c9S672XewhtUGr +zbl+dp+++T42NKA7wfYxEUV0kz1XgMX5iZnK5atq1LXaQZAQwdbWQonCv/Q4EpVM +VAX3NuRFg3sUZdbcDE3R3n4MqzvEFb46VqZab3ZpUql6ucjrappdUtAtCms1FgkQ +hNBqyjoGADdH5H5XTz+L62e4iKrFvlNVspHEfbmwhRkGeC7bYRr6hfVKkaHnFtWO +ojnflLhwHyg/i/xAXmODPIMqGplrz95Zajv8bxbXH/1KEOtOghY6rCcMU/Gt1SSw +awNQwS08Ft1ENCcadfsCAwEAAaNCMEAwHQYDVR0OBBYEFAqFqXdlBZh8QIH4D5cs +OPEK7DzPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3 +DQEBCwUAA4IBAQBMOqNErLlFsceTfsgLCkLfZOoc7llsCLqJX2rKSpWeeo8HxdpF +coJxDjrSzG+ntKEju/Ykn8sX/oymzsLS28yN/HH8AynBbF0zX2S2ZTuJbxh2ePXc +okgfGT+Ok+vx+hfuzU7jBBJV1uXk3fs+BXziHV7Gp7yXT2g69ekuCkO2r1dcYmh8 +t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6qtnRGEmyR7jTV7JqR50S+kDFy +1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29mvVXIwAHIRc/ +SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03 +-----END CERTIFICATE----- + +# Issuer: CN=Hellenic Academic and Research Institutions RootCA 2011 O=Hellenic Academic and Research Institutions Cert. Authority +# Subject: CN=Hellenic Academic and Research Institutions RootCA 2011 O=Hellenic Academic and Research Institutions Cert. Authority +# Label: "Hellenic Academic and Research Institutions RootCA 2011" +# Serial: 0 +# MD5 Fingerprint: 73:9f:4c:4b:73:5b:79:e9:fa:ba:1c:ef:6e:cb:d5:c9 +# SHA1 Fingerprint: fe:45:65:9b:79:03:5b:98:a1:61:b5:51:2e:ac:da:58:09:48:22:4d +# SHA256 Fingerprint: bc:10:4f:15:a4:8b:e7:09:dc:a5:42:a7:e1:d4:b9:df:6f:05:45:27:e8:02:ea:a9:2d:59:54:44:25:8a:fe:71 +-----BEGIN CERTIFICATE----- +MIIEMTCCAxmgAwIBAgIBADANBgkqhkiG9w0BAQUFADCBlTELMAkGA1UEBhMCR1Ix +RDBCBgNVBAoTO0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1 +dGlvbnMgQ2VydC4gQXV0aG9yaXR5MUAwPgYDVQQDEzdIZWxsZW5pYyBBY2FkZW1p +YyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIFJvb3RDQSAyMDExMB4XDTExMTIw +NjEzNDk1MloXDTMxMTIwMTEzNDk1MlowgZUxCzAJBgNVBAYTAkdSMUQwQgYDVQQK +EztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIENl +cnQuIEF1dGhvcml0eTFAMD4GA1UEAxM3SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl +c2VhcmNoIEluc3RpdHV0aW9ucyBSb290Q0EgMjAxMTCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBAKlTAOMupvaO+mDYLZU++CwqVE7NuYRhlFhPjz2L5EPz +dYmNUeTDN9KKiE15HrcS3UN4SoqS5tdI1Q+kOilENbgH9mgdVc04UfCMJDGFr4PJ +fel3r+0ae50X+bOdOFAPplp5kYCvN66m0zH7tSYJnTxa71HFK9+WXesyHgLacEns +bgzImjeN9/E2YEsmLIKe0HjzDQ9jpFEw4fkrJxIH2Oq9GGKYsFk3fb7u8yBRQlqD +75O6aRXxYp2fmTmCobd0LovUxQt7L/DICto9eQqakxylKHJzkUOap9FNhYS5qXSP +FEDH3N6sQWRstBmbAmNtJGSPRLIl6s5ddAxjMlyNh+UCAwEAAaOBiTCBhjAPBgNV +HRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQUppFC/RNhSiOeCKQp +5dgTBCPuQSUwRwYDVR0eBEAwPqA8MAWCAy5ncjAFggMuZXUwBoIELmVkdTAGggQu +b3JnMAWBAy5ncjAFgQMuZXUwBoEELmVkdTAGgQQub3JnMA0GCSqGSIb3DQEBBQUA +A4IBAQAf73lB4XtuP7KMhjdCSk4cNx6NZrokgclPEg8hwAOXhiVtXdMiKahsog2p +6z0GW5k6x8zDmjR/qw7IThzh+uTczQ2+vyT+bOdrwg3IBp5OjWEopmr95fZi6hg8 +TqBTnbI6nOulnJEWtk2C4AwFSKls9cz4y51JtPACpf1wA+2KIaWuE4ZJwzNzvoc7 +dIsXRSZMFpGD/md9zU1jZ/rzAxKWeAaNsWftjj++n08C9bMJL/NMh98qy5V8Acys +Nnq/onN694/BtZqhFLKPM58N7yLcZnuEvUUXBj08yrl3NI/K6s8/MT7jiOOASSXI +l7WdmplNsDz4SgCbZN2fOUvRJ9e4 +-----END CERTIFICATE----- + +# Issuer: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967 +# Subject: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967 +# Label: "Actalis Authentication Root CA" +# Serial: 6271844772424770508 +# MD5 Fingerprint: 69:c1:0d:4f:07:a3:1b:c3:fe:56:3d:04:bc:11:f6:a6 +# SHA1 Fingerprint: f3:73:b3:87:06:5a:28:84:8a:f2:f3:4a:ce:19:2b:dd:c7:8e:9c:ac +# SHA256 Fingerprint: 55:92:60:84:ec:96:3a:64:b9:6e:2a:be:01:ce:0b:a8:6a:64:fb:fe:bc:c7:aa:b5:af:c1:55:b3:7f:d7:60:66 +-----BEGIN CERTIFICATE----- +MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UE +BhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8w +MzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290 +IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDkyMjExMjIwMlowazELMAkGA1UEBhMC +SVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1 +ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENB +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNv +UTufClrJwkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX +4ay8IMKx4INRimlNAJZaby/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9 +KK3giq0itFZljoZUj5NDKd45RnijMCO6zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/ +gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1fYVEiVRvjRuPjPdA1Yprb +rxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2oxgkg4YQ +51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2F +be8lEfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxe +KF+w6D9Fz8+vm2/7hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4F +v6MGn8i1zeQf1xcGDXqVdFUNaBr8EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbn +fpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5jF66CyCU3nuDuP/jVo23Eek7 +jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLYiDrIn3hm7Ynz +ezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt +ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAL +e3KHwGCmSUyIWOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70 +jsNjLiNmsGe+b7bAEzlgqqI0JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDz +WochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKxK3JCaKygvU5a2hi/a5iB0P2avl4V +SM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+Xlff1ANATIGk0k9j +pwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC4yyX +X04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+Ok +fcvHlXHo2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7R +K4X9p2jIugErsWx0Hbhzlefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btU +ZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXemOR/qnuOf0GZvBeyqdn6/axag67XH/JJU +LysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9vwGYT7JZVEc+NHt4bVaT +LnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg== +-----END CERTIFICATE----- + +# Issuer: O=Trustis Limited OU=Trustis FPS Root CA +# Subject: O=Trustis Limited OU=Trustis FPS Root CA +# Label: "Trustis FPS Root CA" +# Serial: 36053640375399034304724988975563710553 +# MD5 Fingerprint: 30:c9:e7:1e:6b:e6:14:eb:65:b2:16:69:20:31:67:4d +# SHA1 Fingerprint: 3b:c0:38:0b:33:c3:f6:a6:0c:86:15:22:93:d9:df:f5:4b:81:c0:04 +# SHA256 Fingerprint: c1:b4:82:99:ab:a5:20:8f:e9:63:0a:ce:55:ca:68:a0:3e:da:5a:51:9c:88:02:a0:d3:a6:73:be:8f:8e:55:7d +-----BEGIN CERTIFICATE----- +MIIDZzCCAk+gAwIBAgIQGx+ttiD5JNM2a/fH8YygWTANBgkqhkiG9w0BAQUFADBF +MQswCQYDVQQGEwJHQjEYMBYGA1UEChMPVHJ1c3RpcyBMaW1pdGVkMRwwGgYDVQQL +ExNUcnVzdGlzIEZQUyBSb290IENBMB4XDTAzMTIyMzEyMTQwNloXDTI0MDEyMTEx +MzY1NFowRTELMAkGA1UEBhMCR0IxGDAWBgNVBAoTD1RydXN0aXMgTGltaXRlZDEc +MBoGA1UECxMTVHJ1c3RpcyBGUFMgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAMVQe547NdDfxIzNjpvto8A2mfRC6qc+gIMPpqdZh8mQRUN+ +AOqGeSoDvT03mYlmt+WKVoaTnGhLaASMk5MCPjDSNzoiYYkchU59j9WvezX2fihH +iTHcDnlkH5nSW7r+f2C/revnPDgpai/lkQtV/+xvWNUtyd5MZnGPDNcE2gfmHhjj +vSkCqPoc4Vu5g6hBSLwacY3nYuUtsuvffM/bq1rKMfFMIvMFE/eC+XN5DL7XSxzA +0RU8k0Fk0ea+IxciAIleH2ulrG6nS4zto3Lmr2NNL4XSFDWaLk6M6jKYKIahkQlB +OrTh4/L68MkKokHdqeMDx4gVOxzUGpTXn2RZEm0CAwEAAaNTMFEwDwYDVR0TAQH/ +BAUwAwEB/zAfBgNVHSMEGDAWgBS6+nEleYtXQSUhhgtx67JkDoshZzAdBgNVHQ4E +FgQUuvpxJXmLV0ElIYYLceuyZA6LIWcwDQYJKoZIhvcNAQEFBQADggEBAH5Y//01 +GX2cGE+esCu8jowU/yyg2kdbw++BLa8F6nRIW/M+TgfHbcWzk88iNVy2P3UnXwmW +zaD+vkAMXBJV+JOCyinpXj9WV4s4NvdFGkwozZ5BuO1WTISkQMi4sKUraXAEasP4 +1BIy+Q7DsdwyhEQsb8tGD+pmQQ9P8Vilpg0ND2HepZ5dfWWhPBfnqFVO76DH7cZE +f1T1o+CP8HxVIo8ptoGj4W1OLBuAZ+ytIJ8MYmHVl/9D7S3B2l0pKoU/rGXuhg8F +jZBf3+6f9L/uHfuY5H+QK4R4EA5sSVPvFVtlRkpdr7r7OnIdzfYliB6XzCGcKQEN +ZetX2fNXlrtIzYE= +-----END CERTIFICATE----- + +# Issuer: CN=Buypass Class 2 Root CA O=Buypass AS-983163327 +# Subject: CN=Buypass Class 2 Root CA O=Buypass AS-983163327 +# Label: "Buypass Class 2 Root CA" +# Serial: 2 +# MD5 Fingerprint: 46:a7:d2:fe:45:fb:64:5a:a8:59:90:9b:78:44:9b:29 +# SHA1 Fingerprint: 49:0a:75:74:de:87:0a:47:fe:58:ee:f6:c7:6b:eb:c6:0b:12:40:99 +# SHA256 Fingerprint: 9a:11:40:25:19:7c:5b:b9:5d:94:e6:3d:55:cd:43:79:08:47:b6:46:b2:3c:df:11:ad:a4:a0:0e:ff:15:fb:48 +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd +MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg +Q2xhc3MgMiBSb290IENBMB4XDTEwMTAyNjA4MzgwM1oXDTQwMTAyNjA4MzgwM1ow +TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw +HgYDVQQDDBdCdXlwYXNzIENsYXNzIDIgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB +BQADggIPADCCAgoCggIBANfHXvfBB9R3+0Mh9PT1aeTuMgHbo4Yf5FkNuud1g1Lr +6hxhFUi7HQfKjK6w3Jad6sNgkoaCKHOcVgb/S2TwDCo3SbXlzwx87vFKu3MwZfPV +L4O2fuPn9Z6rYPnT8Z2SdIrkHJasW4DptfQxh6NR/Md+oW+OU3fUl8FVM5I+GC91 +1K2GScuVr1QGbNgGE41b/+EmGVnAJLqBcXmQRFBoJJRfuLMR8SlBYaNByyM21cHx +MlAQTn/0hpPshNOOvEu/XAFOBz3cFIqUCqTqc/sLUegTBxj6DvEr0VQVfTzh97QZ +QmdiXnfgolXsttlpF9U6r0TtSsWe5HonfOV116rLJeffawrbD02TTqigzXsu8lkB +arcNuAeBfos4GzjmCleZPe4h6KP1DBbdi+w0jpwqHAAVF41og9JwnxgIzRFo1clr +Us3ERo/ctfPYV3Me6ZQ5BL/T3jjetFPsaRyifsSP5BtwrfKi+fv3FmRmaZ9JUaLi +FRhnBkp/1Wy1TbMz4GHrXb7pmA8y1x1LPC5aAVKRCfLf6o3YBkBjqhHk/sM3nhRS +P/TizPJhk9H9Z2vXUq6/aKtAQ6BXNVN48FP4YUIHZMbXb5tMOA1jrGKvNouicwoN +9SG9dKpN6nIDSdvHXx1iY8f93ZHsM+71bbRuMGjeyNYmsHVee7QHIJihdjK4TWxP +AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMmAd+BikoL1Rpzz +uvdMw964o605MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAU18h +9bqwOlI5LJKwbADJ784g7wbylp7ppHR/ehb8t/W2+xUbP6umwHJdELFx7rxP462s +A20ucS6vxOOto70MEae0/0qyexAQH6dXQbLArvQsWdZHEIjzIVEpMMpghq9Gqx3t +OluwlN5E40EIosHsHdb9T7bWR9AUC8rmyrV7d35BH16Dx7aMOZawP5aBQW9gkOLo ++fsicdl9sz1Gv7SEr5AcD48Saq/v7h56rgJKihcrdv6sVIkkLE8/trKnToyokZf7 +KcZ7XC25y2a2t6hbElGFtQl+Ynhw/qlqYLYdDnkM/crqJIByw5c/8nerQyIKx+u2 +DISCLIBrQYoIwOula9+ZEsuK1V6ADJHgJgg2SMX6OBE1/yWDLfJ6v9r9jv6ly0Us +H8SIU653DtmadsWOLB2jutXsMq7Aqqz30XpN69QH4kj3Io6wpJ9qzo6ysmD0oyLQ +I+uUWnpp3Q+/QFesa1lQ2aOZ4W7+jQF5JyMV3pKdewlNWudLSDBaGOYKbeaP4NK7 +5t98biGCwWg5TbSYWGZizEqQXsP6JwSxeRV0mcy+rSDeJmAc61ZRpqPq5KM/p/9h +3PFaTWwyI0PurKju7koSCTxdccK+efrCh2gdC/1cacwG0Jp9VJkqyTkaGa9LKkPz +Y11aWOIv4x3kqdbQCtCev9eBCfHJxyYNrJgWVqA= +-----END CERTIFICATE----- + +# Issuer: CN=Buypass Class 3 Root CA O=Buypass AS-983163327 +# Subject: CN=Buypass Class 3 Root CA O=Buypass AS-983163327 +# Label: "Buypass Class 3 Root CA" +# Serial: 2 +# MD5 Fingerprint: 3d:3b:18:9e:2c:64:5a:e8:d5:88:ce:0e:f9:37:c2:ec +# SHA1 Fingerprint: da:fa:f7:fa:66:84:ec:06:8f:14:50:bd:c7:c2:81:a5:bc:a9:64:57 +# SHA256 Fingerprint: ed:f7:eb:bc:a2:7a:2a:38:4d:38:7b:7d:40:10:c6:66:e2:ed:b4:84:3e:4c:29:b4:ae:1d:5b:93:32:e6:b2:4d +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd +MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg +Q2xhc3MgMyBSb290IENBMB4XDTEwMTAyNjA4Mjg1OFoXDTQwMTAyNjA4Mjg1OFow +TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw +HgYDVQQDDBdCdXlwYXNzIENsYXNzIDMgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB +BQADggIPADCCAgoCggIBAKXaCpUWUOOV8l6ddjEGMnqb8RB2uACatVI2zSRHsJ8Y +ZLya9vrVediQYkwiL944PdbgqOkcLNt4EemOaFEVcsfzM4fkoF0LXOBXByow9c3E +N3coTRiR5r/VUv1xLXA+58bEiuPwKAv0dpihi4dVsjoT/Lc+JzeOIuOoTyrvYLs9 +tznDDgFHmV0ST9tD+leh7fmdvhFHJlsTmKtdFoqwNxxXnUX/iJY2v7vKB3tvh2PX +0DJq1l1sDPGzbjniazEuOQAnFN44wOwZZoYS6J1yFhNkUsepNxz9gjDthBgd9K5c +/3ATAOux9TN6S9ZV+AWNS2mw9bMoNlwUxFFzTWsL8TQH2xc519woe2v1n/MuwU8X +KhDzzMro6/1rqy6any2CbgTUUgGTLT2G/H783+9CHaZr77kgxve9oKeV/afmiSTY +zIw0bOIjL9kSGiG5VZFvC5F5GQytQIgLcOJ60g7YaEi7ghM5EFjp2CoHxhLbWNvS +O1UQRwUVZ2J+GGOmRj8JDlQyXr8NYnon74Do29lLBlo3WiXQCBJ31G8JUJc9yB3D +34xFMFbG02SrZvPAXpacw8Tvw3xrizp5f7NJzz3iiZ+gMEuFuZyUJHmPfWupRWgP +K9Dx2hzLabjKSWJtyNBjYt1gD1iqj6G8BaVmos8bdrKEZLFMOVLAMLrwjEsCsLa3 +AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFEe4zf/lb+74suwv +Tg75JbCOPGvDMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAACAj +QTUEkMJAYmDv4jVM1z+s4jSQuKFvdvoWFqRINyzpkMLyPPgKn9iB5btb2iUspKdV +cSQy9sgL8rxq+JOssgfCX5/bzMiKqr5qb+FJEMwx14C7u8jYog5kV+qi9cKpMRXS +IGrs/CIBKM+GuIAeqcwRpTzyFrNHnfzSgCHEy9BHcEGhyoMZCCxt8l13nIoUE9Q2 +HJLw5QY33KbmkJs4j1xrG0aGQ0JfPgEHU1RdZX33inOhmlRaHylDFCfChQ+1iHsa +O5S3HWCntZznKWlXWpuTekMwGwPXYshApqr8ZORK15FTAaggiG6cX0S5y2CBNOxv +033aSF/rtJC8LakcC6wc1aJoIIAE1vyxjy+7SjENSoYc6+I2KSb12tjE8nVhz36u +dmNKekBlk4f4HoCMhuWG1o8O/FMsYOgWYRqiPkN7zTlgVGr18okmAWiDSKIz6MkE +kbIRNBE+6tBDGR8Dk5AM/1E9V/RBbuHLoL7ryWPNbczk+DaqaJ3tvV2XcEQNtg41 +3OEMXbugUZTLfhbrES+jkkXITHHZvMmZUldGL1DPvTVp9D0VzgalLA8+9oG6lLvD +u79leNKGef9JOxqDDPDeeOzI8k1MGt6CKfjBWtrt7uYnXuhF0J0cUahoq0Tj0Itq +4/g7u9xN12TyUb7mqqta6THuBrxzvxNiCp/HuZc= +-----END CERTIFICATE----- + +# Issuer: CN=T-TeleSec GlobalRoot Class 3 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Subject: CN=T-TeleSec GlobalRoot Class 3 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Label: "T-TeleSec GlobalRoot Class 3" +# Serial: 1 +# MD5 Fingerprint: ca:fb:40:a8:4e:39:92:8a:1d:fe:8e:2f:c4:27:ea:ef +# SHA1 Fingerprint: 55:a6:72:3e:cb:f2:ec:cd:c3:23:74:70:19:9d:2a:be:11:e3:81:d1 +# SHA256 Fingerprint: fd:73:da:d3:1c:64:4f:f1:b4:3b:ef:0c:cd:da:96:71:0b:9c:d9:87:5e:ca:7e:31:70:7a:f3:e9:6d:52:2b:bd +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx +KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd +BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl +YyBHbG9iYWxSb290IENsYXNzIDMwHhcNMDgxMDAxMTAyOTU2WhcNMzMxMDAxMjM1 +OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy +aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50 +ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC9dZPwYiJvJK7genasfb3ZJNW4t/zN +8ELg63iIVl6bmlQdTQyK9tPPcPRStdiTBONGhnFBSivwKixVA9ZIw+A5OO3yXDw/ +RLyTPWGrTs0NvvAgJ1gORH8EGoel15YUNpDQSXuhdfsaa3Ox+M6pCSzyU9XDFES4 +hqX2iys52qMzVNn6chr3IhUciJFrf2blw2qAsCTz34ZFiP0Zf3WHHx+xGwpzJFu5 +ZeAsVMhg02YXP+HMVDNzkQI6pn97djmiH5a2OK61yJN0HZ65tOVgnS9W0eDrXltM +EnAMbEQgqxHY9Bn20pxSN+f6tsIxO0rUFJmtxxr1XV/6B7h8DR/Wgx6zAgMBAAGj +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS1 +A/d2O2GCahKqGFPrAyGUv/7OyjANBgkqhkiG9w0BAQsFAAOCAQEAVj3vlNW92nOy +WL6ukK2YJ5f+AbGwUgC4TeQbIXQbfsDuXmkqJa9c1h3a0nnJ85cp4IaH3gRZD/FZ +1GSFS5mvJQQeyUapl96Cshtwn5z2r3Ex3XsFpSzTucpH9sry9uetuUg/vBa3wW30 +6gmv7PO15wWeph6KU1HWk4HMdJP2udqmJQV0eVp+QD6CSyYRMG7hP0HHRwA11fXT +91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml +e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4p +TpPDpFQUWw== +-----END CERTIFICATE----- + +# Issuer: CN=EE Certification Centre Root CA O=AS Sertifitseerimiskeskus +# Subject: CN=EE Certification Centre Root CA O=AS Sertifitseerimiskeskus +# Label: "EE Certification Centre Root CA" +# Serial: 112324828676200291871926431888494945866 +# MD5 Fingerprint: 43:5e:88:d4:7d:1a:4a:7e:fd:84:2e:52:eb:01:d4:6f +# SHA1 Fingerprint: c9:a8:b9:e7:55:80:5e:58:e3:53:77:a7:25:eb:af:c3:7b:27:cc:d7 +# SHA256 Fingerprint: 3e:84:ba:43:42:90:85:16:e7:75:73:c0:99:2f:09:79:ca:08:4e:46:85:68:1f:f1:95:cc:ba:8a:22:9b:8a:76 +-----BEGIN CERTIFICATE----- +MIIEAzCCAuugAwIBAgIQVID5oHPtPwBMyonY43HmSjANBgkqhkiG9w0BAQUFADB1 +MQswCQYDVQQGEwJFRTEiMCAGA1UECgwZQVMgU2VydGlmaXRzZWVyaW1pc2tlc2t1 +czEoMCYGA1UEAwwfRUUgQ2VydGlmaWNhdGlvbiBDZW50cmUgUm9vdCBDQTEYMBYG +CSqGSIb3DQEJARYJcGtpQHNrLmVlMCIYDzIwMTAxMDMwMTAxMDMwWhgPMjAzMDEy +MTcyMzU5NTlaMHUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKDBlBUyBTZXJ0aWZpdHNl +ZXJpbWlza2Vza3VzMSgwJgYDVQQDDB9FRSBDZXJ0aWZpY2F0aW9uIENlbnRyZSBS +b290IENBMRgwFgYJKoZIhvcNAQkBFglwa2lAc2suZWUwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQDIIMDs4MVLqwd4lfNE7vsLDP90jmG7sWLqI9iroWUy +euuOF0+W2Ap7kaJjbMeMTC55v6kF/GlclY1i+blw7cNRfdCT5mzrMEvhvH2/UpvO +bntl8jixwKIy72KyaOBhU8E2lf/slLo2rpwcpzIP5Xy0xm90/XsY6KxX7QYgSzIw +WFv9zajmofxwvI6Sc9uXp3whrj3B9UiHbCe9nyV0gVWw93X2PaRka9ZP585ArQ/d +MtO8ihJTmMmJ+xAdTX7Nfh9WDSFwhfYggx/2uh8Ej+p3iDXE/+pOoYtNP2MbRMNE +1CV2yreN1x5KZmTNXMWcg+HCCIia7E6j8T4cLNlsHaFLAgMBAAGjgYowgYcwDwYD +VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBLyWj7qVhy/ +zQas8fElyalL1BSZMEUGA1UdJQQ+MDwGCCsGAQUFBwMCBggrBgEFBQcDAQYIKwYB +BQUHAwMGCCsGAQUFBwMEBggrBgEFBQcDCAYIKwYBBQUHAwkwDQYJKoZIhvcNAQEF +BQADggEBAHv25MANqhlHt01Xo/6tu7Fq1Q+e2+RjxY6hUFaTlrg4wCQiZrxTFGGV +v9DHKpY5P30osxBAIWrEr7BSdxjhlthWXePdNl4dp1BUoMUq5KqMlIpPnTX/dqQG +E5Gion0ARD9V04I8GtVbvFZMIi5GQ4okQC3zErg7cBqklrkar4dBGmoYDQZPxz5u +uSlNDUmJEYcyW+ZLBMjkXOZ0c5RdFpgTlf7727FE5TpwrDdr5rMzcijJs1eg9gIW +iAYLtqZLICjU3j2LrTcFU3T+bsy8QxdxXvnFzBqpYe73dgzzcvRyrc9yAjYHR8/v +GVCJYMzpJJUPwssd8m92kMfMdcGWxZ0= +-----END CERTIFICATE----- + +# Issuer: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH +# Subject: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH +# Label: "D-TRUST Root Class 3 CA 2 2009" +# Serial: 623603 +# MD5 Fingerprint: cd:e0:25:69:8d:47:ac:9c:89:35:90:f7:fd:51:3d:2f +# SHA1 Fingerprint: 58:e8:ab:b0:36:15:33:fb:80:f7:9b:1b:6d:29:d3:ff:8d:5f:00:f0 +# SHA256 Fingerprint: 49:e7:a4:42:ac:f0:ea:62:87:05:00:54:b5:25:64:b6:50:e4:f4:9e:42:e3:48:d6:aa:38:e0:39:e9:57:b1:c1 +-----BEGIN CERTIFICATE----- +MIIEMzCCAxugAwIBAgIDCYPzMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNVBAYTAkRF +MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBD +bGFzcyAzIENBIDIgMjAwOTAeFw0wOTExMDUwODM1NThaFw0yOTExMDUwODM1NTha +ME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMM +HkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBANOySs96R+91myP6Oi/WUEWJNTrGa9v+2wBoqOADER03 +UAifTUpolDWzU9GUY6cgVq/eUXjsKj3zSEhQPgrfRlWLJ23DEE0NkVJD2IfgXU42 +tSHKXzlABF9bfsyjxiupQB7ZNoTWSPOSHjRGICTBpFGOShrvUD9pXRl/RcPHAY9R +ySPocq60vFYJfxLLHLGvKZAKyVXMD9O0Gu1HNVpK7ZxzBCHQqr0ME7UAyiZsxGsM +lFqVlNpQmvH/pStmMaTJOKDfHR+4CS7zp+hnUquVH+BGPtikw8paxTGA6Eian5Rp +/hnd2HN8gcqW3o7tszIFZYQ05ub9VxC1X3a/L7AQDcUCAwEAAaOCARowggEWMA8G +A1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFP3aFMSfMN4hvR5COfyrYyNJ4PGEMA4G +A1UdDwEB/wQEAwIBBjCB0wYDVR0fBIHLMIHIMIGAoH6gfIZ6bGRhcDovL2RpcmVj +dG9yeS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwUm9vdCUyMENsYXNzJTIwMyUy +MENBJTIwMiUyMDIwMDksTz1ELVRydXN0JTIwR21iSCxDPURFP2NlcnRpZmljYXRl +cmV2b2NhdGlvbmxpc3QwQ6BBoD+GPWh0dHA6Ly93d3cuZC10cnVzdC5uZXQvY3Js +L2QtdHJ1c3Rfcm9vdF9jbGFzc18zX2NhXzJfMjAwOS5jcmwwDQYJKoZIhvcNAQEL +BQADggEBAH+X2zDI36ScfSF6gHDOFBJpiBSVYEQBrLLpME+bUMJm2H6NMLVwMeni +acfzcNsgFYbQDfC+rAF1hM5+n02/t2A7nPPKHeJeaNijnZflQGDSNiH+0LS4F9p0 +o3/U37CYAqxva2ssJSRyoWXuJVrl5jLn8t+rSfrzkGkj2wTZ51xY/GXUl77M/C4K +zCUqNQT4YJEVdT1B/yMfGchs64JTBKbkTCJNjYy6zltz7GRUUG3RnFX7acM2w4y8 +PIWmawomDeCTmGCufsYkl4phX5GOZpIJhzbNi5stPvZR1FDUWSi9g/LMKHtThm3Y +Johw1+qRzT65ysCQblrGXnRl11z+o+I= +-----END CERTIFICATE----- + +# Issuer: CN=D-TRUST Root Class 3 CA 2 EV 2009 O=D-Trust GmbH +# Subject: CN=D-TRUST Root Class 3 CA 2 EV 2009 O=D-Trust GmbH +# Label: "D-TRUST Root Class 3 CA 2 EV 2009" +# Serial: 623604 +# MD5 Fingerprint: aa:c6:43:2c:5e:2d:cd:c4:34:c0:50:4f:11:02:4f:b6 +# SHA1 Fingerprint: 96:c9:1b:0b:95:b4:10:98:42:fa:d0:d8:22:79:fe:60:fa:b9:16:83 +# SHA256 Fingerprint: ee:c5:49:6b:98:8c:e9:86:25:b9:34:09:2e:ec:29:08:be:d0:b0:f3:16:c2:d4:73:0c:84:ea:f1:f3:d3:48:81 +-----BEGIN CERTIFICATE----- +MIIEQzCCAyugAwIBAgIDCYP0MA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNVBAYTAkRF +MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBD +bGFzcyAzIENBIDIgRVYgMjAwOTAeFw0wOTExMDUwODUwNDZaFw0yOTExMDUwODUw +NDZaMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNV +BAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAwOTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAJnxhDRwui+3MKCOvXwEz75ivJn9gpfSegpn +ljgJ9hBOlSJzmY3aFS3nBfwZcyK3jpgAvDw9rKFs+9Z5JUut8Mxk2og+KbgPCdM0 +3TP1YtHhzRnp7hhPTFiu4h7WDFsVWtg6uMQYZB7jM7K1iXdODL/ZlGsTl28So/6Z +qQTMFexgaDbtCHu39b+T7WYxg4zGcTSHThfqr4uRjRxWQa4iN1438h3Z0S0NL2lR +p75mpoo6Kr3HGrHhFPC+Oh25z1uxav60sUYgovseO3Dvk5h9jHOW8sXvhXCtKSb8 +HgQ+HKDYD8tSg2J87otTlZCpV6LqYQXY+U3EJ/pure3511H3a6UCAwEAAaOCASQw +ggEgMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNOUikxiEyoZLsyvcop9Ntea +HNxnMA4GA1UdDwEB/wQEAwIBBjCB3QYDVR0fBIHVMIHSMIGHoIGEoIGBhn9sZGFw +Oi8vZGlyZWN0b3J5LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBSb290JTIwQ2xh +c3MlMjAzJTIwQ0ElMjAyJTIwRVYlMjAyMDA5LE89RC1UcnVzdCUyMEdtYkgsQz1E +RT9jZXJ0aWZpY2F0ZXJldm9jYXRpb25saXN0MEagRKBChkBodHRwOi8vd3d3LmQt +dHJ1c3QubmV0L2NybC9kLXRydXN0X3Jvb3RfY2xhc3NfM19jYV8yX2V2XzIwMDku +Y3JsMA0GCSqGSIb3DQEBCwUAA4IBAQA07XtaPKSUiO8aEXUHL7P+PPoeUSbrh/Yp +3uDx1MYkCenBz1UbtDDZzhr+BlGmFaQt77JLvyAoJUnRpjZ3NOhk31KxEcdzes05 +nsKtjHEh8lprr988TlWvsoRlFIm5d8sqMb7Po23Pb0iUMkZv53GMoKaEGTcH8gNF +CSuGdXzfX2lXANtu2KZyIktQ1HWYVt+3GP9DQ1CuekR78HlR10M9p9OB0/DJT7na +xpeG0ILD5EJt/rDiZE4OJudANCa1CInXCGNjOCd1HjPqbqjdn5lPdE2BiYBL3ZqX +KVwvvoFBuYz/6n1gBp7N1z3TLqMVvKjmJuVvw9y4AyHqnxbxLFS1 +-----END CERTIFICATE----- + +# Issuer: CN=CA Disig Root R2 O=Disig a.s. +# Subject: CN=CA Disig Root R2 O=Disig a.s. +# Label: "CA Disig Root R2" +# Serial: 10572350602393338211 +# MD5 Fingerprint: 26:01:fb:d8:27:a7:17:9a:45:54:38:1a:43:01:3b:03 +# SHA1 Fingerprint: b5:61:eb:ea:a4:de:e4:25:4b:69:1a:98:a5:57:47:c2:34:c7:d9:71 +# SHA256 Fingerprint: e2:3d:4a:03:6d:7b:70:e9:f5:95:b1:42:20:79:d2:b9:1e:df:bb:1f:b6:51:a0:63:3e:aa:8a:9d:c5:f8:07:03 +-----BEGIN CERTIFICATE----- +MIIFaTCCA1GgAwIBAgIJAJK4iNuwisFjMA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNV +BAYTAlNLMRMwEQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMu +MRkwFwYDVQQDExBDQSBEaXNpZyBSb290IFIyMB4XDTEyMDcxOTA5MTUzMFoXDTQy +MDcxOTA5MTUzMFowUjELMAkGA1UEBhMCU0sxEzARBgNVBAcTCkJyYXRpc2xhdmEx +EzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERpc2lnIFJvb3QgUjIw +ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCio8QACdaFXS1tFPbCw3Oe +NcJxVX6B+6tGUODBfEl45qt5WDza/3wcn9iXAng+a0EE6UG9vgMsRfYvZNSrXaNH +PWSb6WiaxswbP7q+sos0Ai6YVRn8jG+qX9pMzk0DIaPY0jSTVpbLTAwAFjxfGs3I +x2ymrdMxp7zo5eFm1tL7A7RBZckQrg4FY8aAamkw/dLukO8NJ9+flXP04SXabBbe +QTg06ov80egEFGEtQX6sx3dOy1FU+16SGBsEWmjGycT6txOgmLcRK7fWV8x8nhfR +yyX+hk4kLlYMeE2eARKmK6cBZW58Yh2EhN/qwGu1pSqVg8NTEQxzHQuyRpDRQjrO +QG6Vrf/GlK1ul4SOfW+eioANSW1z4nuSHsPzwfPrLgVv2RvPN3YEyLRa5Beny912 +H9AZdugsBbPWnDTYltxhh5EF5EQIM8HauQhl1K6yNg3ruji6DOWbnuuNZt2Zz9aJ +QfYEkoopKW1rOhzndX0CcQ7zwOe9yxndnWCywmZgtrEE7snmhrmaZkCo5xHtgUUD +i/ZnWejBBhG93c+AAk9lQHhcR1DIm+YfgXvkRKhbhZri3lrVx/k6RGZL5DJUfORs +nLMOPReisjQS1n6yqEm70XooQL6iFh/f5DcfEXP7kAplQ6INfPgGAVUzfbANuPT1 +rqVCV3w2EYx7XsQDnYx5nQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud +DwEB/wQEAwIBBjAdBgNVHQ4EFgQUtZn4r7CU9eMg1gqtzk5WpC5uQu0wDQYJKoZI +hvcNAQELBQADggIBACYGXnDnZTPIgm7ZnBc6G3pmsgH2eDtpXi/q/075KMOYKmFM +tCQSin1tERT3nLXK5ryeJ45MGcipvXrA1zYObYVybqjGom32+nNjf7xueQgcnYqf +GopTpti72TVVsRHFqQOzVju5hJMiXn7B9hJSi+osZ7z+Nkz1uM/Rs0mSO9MpDpkb +lvdhuDvEK7Z4bLQjb/D907JedR+Zlais9trhxTF7+9FGs9K8Z7RiVLoJ92Owk6Ka ++elSLotgEqv89WBW7xBci8QaQtyDW2QOy7W81k/BfDxujRNt+3vrMNDcTa/F1bal +TFtxyegxvug4BkihGuLq0t4SOVga/4AOgnXmt8kHbA7v/zjxmHHEt38OFdAlab0i +nSvtBfZGR6ztwPDUO+Ls7pZbkBNOHlY667DvlruWIxG68kOGdGSVyCh13x01utI3 +gzhTODY7z2zp+WsO0PsE6E9312UBeIYMej4hYvF/Y3EMyZ9E26gnonW+boE+18Dr +G5gPcFw0sorMwIUY6256s/daoQe/qUKS82Ail+QUoQebTnbAjn39pCXHR+3/H3Os +zMOl6W8KjptlwlCFtaOgUxLMVYdh84GuEEZhvUQhuMI9dM9+JDX6HAcOmz0iyu8x +L4ysEr3vQCj8KWefshNPZiTEUxnpHikV7+ZtsH8tZ/3zbBt1RqPlShfppNcL +-----END CERTIFICATE----- + +# Issuer: CN=ACCVRAIZ1 O=ACCV OU=PKIACCV +# Subject: CN=ACCVRAIZ1 O=ACCV OU=PKIACCV +# Label: "ACCVRAIZ1" +# Serial: 6828503384748696800 +# MD5 Fingerprint: d0:a0:5a:ee:05:b6:09:94:21:a1:7d:f1:b2:29:82:02 +# SHA1 Fingerprint: 93:05:7a:88:15:c6:4f:ce:88:2f:fa:91:16:52:28:78:bc:53:64:17 +# SHA256 Fingerprint: 9a:6e:c0:12:e1:a7:da:9d:be:34:19:4d:47:8a:d7:c0:db:18:22:fb:07:1d:f1:29:81:49:6e:d1:04:38:41:13 +-----BEGIN CERTIFICATE----- +MIIH0zCCBbugAwIBAgIIXsO3pkN/pOAwDQYJKoZIhvcNAQEFBQAwQjESMBAGA1UE +AwwJQUNDVlJBSVoxMRAwDgYDVQQLDAdQS0lBQ0NWMQ0wCwYDVQQKDARBQ0NWMQsw +CQYDVQQGEwJFUzAeFw0xMTA1MDUwOTM3MzdaFw0zMDEyMzEwOTM3MzdaMEIxEjAQ +BgNVBAMMCUFDQ1ZSQUlaMTEQMA4GA1UECwwHUEtJQUNDVjENMAsGA1UECgwEQUND +VjELMAkGA1UEBhMCRVMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCb +qau/YUqXry+XZpp0X9DZlv3P4uRm7x8fRzPCRKPfmt4ftVTdFXxpNRFvu8gMjmoY +HtiP2Ra8EEg2XPBjs5BaXCQ316PWywlxufEBcoSwfdtNgM3802/J+Nq2DoLSRYWo +G2ioPej0RGy9ocLLA76MPhMAhN9KSMDjIgro6TenGEyxCQ0jVn8ETdkXhBilyNpA +lHPrzg5XPAOBOp0KoVdDaaxXbXmQeOW1tDvYvEyNKKGno6e6Ak4l0Squ7a4DIrhr +IA8wKFSVf+DuzgpmndFALW4ir50awQUZ0m/A8p/4e7MCQvtQqR0tkw8jq8bBD5L/ +0KIV9VMJcRz/RROE5iZe+OCIHAr8Fraocwa48GOEAqDGWuzndN9wrqODJerWx5eH +k6fGioozl2A3ED6XPm4pFdahD9GILBKfb6qkxkLrQaLjlUPTAYVtjrs78yM2x/47 +4KElB0iryYl0/wiPgL/AlmXz7uxLaL2diMMxs0Dx6M/2OLuc5NF/1OVYm3z61PMO +m3WR5LpSLhl+0fXNWhn8ugb2+1KoS5kE3fj5tItQo05iifCHJPqDQsGH+tUtKSpa +cXpkatcnYGMN285J9Y0fkIkyF/hzQ7jSWpOGYdbhdQrqeWZ2iE9x6wQl1gpaepPl +uUsXQA+xtrn13k/c4LOsOxFwYIRKQ26ZIMApcQrAZQIDAQABo4ICyzCCAscwfQYI +KwYBBQUHAQEEcTBvMEwGCCsGAQUFBzAChkBodHRwOi8vd3d3LmFjY3YuZXMvZmls +ZWFkbWluL0FyY2hpdm9zL2NlcnRpZmljYWRvcy9yYWl6YWNjdjEuY3J0MB8GCCsG +AQUFBzABhhNodHRwOi8vb2NzcC5hY2N2LmVzMB0GA1UdDgQWBBTSh7Tj3zcnk1X2 +VuqB5TbMjB4/vTAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFNKHtOPfNyeT +VfZW6oHlNsyMHj+9MIIBcwYDVR0gBIIBajCCAWYwggFiBgRVHSAAMIIBWDCCASIG +CCsGAQUFBwICMIIBFB6CARAAQQB1AHQAbwByAGkAZABhAGQAIABkAGUAIABDAGUA +cgB0AGkAZgBpAGMAYQBjAGkA8wBuACAAUgBhAO0AegAgAGQAZQAgAGwAYQAgAEEA +QwBDAFYAIAAoAEEAZwBlAG4AYwBpAGEAIABkAGUAIABUAGUAYwBuAG8AbABvAGcA +7QBhACAAeQAgAEMAZQByAHQAaQBmAGkAYwBhAGMAaQDzAG4AIABFAGwAZQBjAHQA +cgDzAG4AaQBjAGEALAAgAEMASQBGACAAUQA0ADYAMAAxADEANQA2AEUAKQAuACAA +QwBQAFMAIABlAG4AIABoAHQAdABwADoALwAvAHcAdwB3AC4AYQBjAGMAdgAuAGUA +czAwBggrBgEFBQcCARYkaHR0cDovL3d3dy5hY2N2LmVzL2xlZ2lzbGFjaW9uX2Mu +aHRtMFUGA1UdHwROMEwwSqBIoEaGRGh0dHA6Ly93d3cuYWNjdi5lcy9maWxlYWRt +aW4vQXJjaGl2b3MvY2VydGlmaWNhZG9zL3JhaXphY2N2MV9kZXIuY3JsMA4GA1Ud +DwEB/wQEAwIBBjAXBgNVHREEEDAOgQxhY2N2QGFjY3YuZXMwDQYJKoZIhvcNAQEF +BQADggIBAJcxAp/n/UNnSEQU5CmH7UwoZtCPNdpNYbdKl02125DgBS4OxnnQ8pdp +D70ER9m+27Up2pvZrqmZ1dM8MJP1jaGo/AaNRPTKFpV8M9xii6g3+CfYCS0b78gU +JyCpZET/LtZ1qmxNYEAZSUNUY9rizLpm5U9EelvZaoErQNV/+QEnWCzI7UiRfD+m +AM/EKXMRNt6GGT6d7hmKG9Ww7Y49nCrADdg9ZuM8Db3VlFzi4qc1GwQA9j9ajepD +vV+JHanBsMyZ4k0ACtrJJ1vnE5Bc5PUzolVt3OAJTS+xJlsndQAJxGJ3KQhfnlms +tn6tn1QwIgPBHnFk/vk4CpYY3QIUrCPLBhwepH2NDd4nQeit2hW3sCPdK6jT2iWH +7ehVRE2I9DZ+hJp4rPcOVkkO1jMl1oRQQmwgEh0q1b688nCBpHBgvgW1m54ERL5h +I6zppSSMEYCUWqKiuUnSwdzRp+0xESyeGabu4VXhwOrPDYTkF7eifKXeVSUG7szA +h1xA2syVP1XgNce4hL60Xc16gwFy7ofmXx2utYXGJt/mwZrpHgJHnyqobalbz+xF +d3+YJ5oyXSrjhO7FmGYvliAd3djDJ9ew+f7Zfc3Qn48LFFhRny+Lwzgt3uiP1o2H +pPVWQxaZLPSkVrQ0uGE3ycJYgBugl6H8WY3pEfbRD0tVNEYqi4Y7 +-----END CERTIFICATE----- + +# Issuer: CN=TWCA Global Root CA O=TAIWAN-CA OU=Root CA +# Subject: CN=TWCA Global Root CA O=TAIWAN-CA OU=Root CA +# Label: "TWCA Global Root CA" +# Serial: 3262 +# MD5 Fingerprint: f9:03:7e:cf:e6:9e:3c:73:7a:2a:90:07:69:ff:2b:96 +# SHA1 Fingerprint: 9c:bb:48:53:f6:a4:f6:d3:52:a4:e8:32:52:55:60:13:f5:ad:af:65 +# SHA256 Fingerprint: 59:76:90:07:f7:68:5d:0f:cd:50:87:2f:9f:95:d5:75:5a:5b:2b:45:7d:81:f3:69:2b:61:0a:98:67:2f:0e:1b +-----BEGIN CERTIFICATE----- +MIIFQTCCAymgAwIBAgICDL4wDQYJKoZIhvcNAQELBQAwUTELMAkGA1UEBhMCVFcx +EjAQBgNVBAoTCVRBSVdBTi1DQTEQMA4GA1UECxMHUm9vdCBDQTEcMBoGA1UEAxMT +VFdDQSBHbG9iYWwgUm9vdCBDQTAeFw0xMjA2MjcwNjI4MzNaFw0zMDEyMzExNTU5 +NTlaMFExCzAJBgNVBAYTAlRXMRIwEAYDVQQKEwlUQUlXQU4tQ0ExEDAOBgNVBAsT +B1Jvb3QgQ0ExHDAaBgNVBAMTE1RXQ0EgR2xvYmFsIFJvb3QgQ0EwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCwBdvI64zEbooh745NnHEKH1Jw7W2CnJfF +10xORUnLQEK1EjRsGcJ0pDFfhQKX7EMzClPSnIyOt7h52yvVavKOZsTuKwEHktSz +0ALfUPZVr2YOy+BHYC8rMjk1Ujoog/h7FsYYuGLWRyWRzvAZEk2tY/XTP3VfKfCh +MBwqoJimFb3u/Rk28OKRQ4/6ytYQJ0lM793B8YVwm8rqqFpD/G2Gb3PpN0Wp8DbH +zIh1HrtsBv+baz4X7GGqcXzGHaL3SekVtTzWoWH1EfcFbx39Eb7QMAfCKbAJTibc +46KokWofwpFFiFzlmLhxpRUZyXx1EcxwdE8tmx2RRP1WKKD+u4ZqyPpcC1jcxkt2 +yKsi2XMPpfRaAok/T54igu6idFMqPVMnaR1sjjIsZAAmY2E2TqNGtz99sy2sbZCi +laLOz9qC5wc0GZbpuCGqKX6mOL6OKUohZnkfs8O1CWfe1tQHRvMq2uYiN2DLgbYP +oA/pyJV/v1WRBXrPPRXAb94JlAGD1zQbzECl8LibZ9WYkTunhHiVJqRaCPgrdLQA +BDzfuBSO6N+pjWxnkjMdwLfS7JLIvgm/LCkFbwJrnu+8vyq8W8BQj0FwcYeyTbcE +qYSjMq+u7msXi7Kx/mzhkIyIqJdIzshNy/MGz19qCkKxHh53L46g5pIOBvwFItIm +4TFRfTLcDwIDAQABoyMwITAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zANBgkqhkiG9w0BAQsFAAOCAgEAXzSBdu+WHdXltdkCY4QWwa6gcFGn90xHNcgL +1yg9iXHZqjNB6hQbbCEAwGxCGX6faVsgQt+i0trEfJdLjbDorMjupWkEmQqSpqsn +LhpNgb+E1HAerUf+/UqdM+DyucRFCCEK2mlpc3INvjT+lIutwx4116KD7+U4x6WF +H6vPNOw/KP4M8VeGTslV9xzU2KV9Bnpv1d8Q34FOIWWxtuEXeZVFBs5fzNxGiWNo +RI2T9GRwoD2dKAXDOXC4Ynsg/eTb6QihuJ49CcdP+yz4k3ZB3lLg4VfSnQO8d57+ +nile98FRYB/e2guyLXW3Q0iT5/Z5xoRdgFlglPx4mI88k1HtQJAH32RjJMtOcQWh +15QaiDLxInQirqWm2BJpTGCjAu4r7NRjkgtevi92a6O2JryPA9gK8kxkRr05YuWW +6zRjESjMlfGt7+/cgFhI6Uu46mWs6fyAtbXIRfmswZ/ZuepiiI7E8UuDEq3mi4TW +nsLrgxifarsbJGAzcMzs9zLzXNl5fe+epP7JI8Mk7hWSsT2RTyaGvWZzJBPqpK5j +wa19hAM8EHiGG3njxPPyBJUgriOCxLM6AGK/5jYk4Ve6xx6QddVfP5VhK8E7zeWz +aGHQRiapIVJpLesux+t3zqY6tQMzT3bR51xUAV3LePTJDL/PEo4XLSNolOer/qmy +KwbQBM0= +-----END CERTIFICATE----- + +# Issuer: CN=TeliaSonera Root CA v1 O=TeliaSonera +# Subject: CN=TeliaSonera Root CA v1 O=TeliaSonera +# Label: "TeliaSonera Root CA v1" +# Serial: 199041966741090107964904287217786801558 +# MD5 Fingerprint: 37:41:49:1b:18:56:9a:26:f5:ad:c2:66:fb:40:a5:4c +# SHA1 Fingerprint: 43:13:bb:96:f1:d5:86:9b:c1:4e:6a:92:f6:cf:f6:34:69:87:82:37 +# SHA256 Fingerprint: dd:69:36:fe:21:f8:f0:77:c1:23:a1:a5:21:c1:22:24:f7:22:55:b7:3e:03:a7:26:06:93:e8:a2:4b:0f:a3:89 +-----BEGIN CERTIFICATE----- +MIIFODCCAyCgAwIBAgIRAJW+FqD3LkbxezmCcvqLzZYwDQYJKoZIhvcNAQEFBQAw +NzEUMBIGA1UECgwLVGVsaWFTb25lcmExHzAdBgNVBAMMFlRlbGlhU29uZXJhIFJv +b3QgQ0EgdjEwHhcNMDcxMDE4MTIwMDUwWhcNMzIxMDE4MTIwMDUwWjA3MRQwEgYD +VQQKDAtUZWxpYVNvbmVyYTEfMB0GA1UEAwwWVGVsaWFTb25lcmEgUm9vdCBDQSB2 +MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMK+6yfwIaPzaSZVfp3F +VRaRXP3vIb9TgHot0pGMYzHw7CTww6XScnwQbfQ3t+XmfHnqjLWCi65ItqwA3GV1 +7CpNX8GH9SBlK4GoRz6JI5UwFpB/6FcHSOcZrr9FZ7E3GwYq/t75rH2D+1665I+X +Z75Ljo1kB1c4VWk0Nj0TSO9P4tNmHqTPGrdeNjPUtAa9GAH9d4RQAEX1jF3oI7x+ +/jXh7VB7qTCNGdMJjmhnXb88lxhTuylixcpecsHHltTbLaC0H2kD7OriUPEMPPCs +81Mt8Bz17Ww5OXOAFshSsCPN4D7c3TxHoLs1iuKYaIu+5b9y7tL6pe0S7fyYGKkm +dtwoSxAgHNN/Fnct7W+A90m7UwW7XWjH1Mh1Fj+JWov3F0fUTPHSiXk+TT2YqGHe +Oh7S+F4D4MHJHIzTjU3TlTazN19jY5szFPAtJmtTfImMMsJu7D0hADnJoWjiUIMu +sDor8zagrC/kb2HCUQk5PotTubtn2txTuXZZNp1D5SDgPTJghSJRt8czu90VL6R4 +pgd7gUY2BIbdeTXHlSw7sKMXNeVzH7RcWe/a6hBle3rQf5+ztCo3O3CLm1u5K7fs +slESl1MpWtTwEhDcTwK7EpIvYtQ/aUN8Ddb8WHUBiJ1YFkveupD/RwGJBmr2X7KQ +arMCpgKIv7NHfirZ1fpoeDVNAgMBAAGjPzA9MA8GA1UdEwEB/wQFMAMBAf8wCwYD +VR0PBAQDAgEGMB0GA1UdDgQWBBTwj1k4ALP1j5qWDNXr+nuqF+gTEjANBgkqhkiG +9w0BAQUFAAOCAgEAvuRcYk4k9AwI//DTDGjkk0kiP0Qnb7tt3oNmzqjMDfz1mgbl +dxSR651Be5kqhOX//CHBXfDkH1e3damhXwIm/9fH907eT/j3HEbAek9ALCI18Bmx +0GtnLLCo4MBANzX2hFxc469CeP6nyQ1Q6g2EdvZR74NTxnr/DlZJLo961gzmJ1Tj +TQpgcmLNkQfWpb/ImWvtxBnmq0wROMVvMeJuScg/doAmAyYp4Db29iBT4xdwNBed +Y2gea+zDTYa4EzAvXUYNR0PVG6pZDrlcjQZIrXSHX8f8MVRBE+LHIQ6e4B4N4cB7 +Q4WQxYpYxmUKeFfyxiMPAdkgS94P+5KFdSpcc41teyWRyu5FrgZLAMzTsVlQ2jqI +OylDRl6XK1TOU2+NSueW+r9xDkKLfP0ooNBIytrEgUy7onOTJsjrDNYmiLbAJM+7 +vVvrdX3pCI6GMyx5dwlppYn8s3CQh3aP0yK7Qs69cwsgJirQmz1wHiRszYd2qReW +t88NkvuOGKmYSdGe/mBEciG5Ge3C9THxOUiIkCR1VBatzvT4aRRkOfujuLpwQMcn +HL/EVlP6Y2XQ8xwOFvVrhlhNGNTkDY6lnVuR3HYkUD/GKvvZt5y11ubQ2egZixVx +SK236thZiNSQvxaz2emsWWFUyBy6ysHK4bkgTI86k4mloMy/0/Z1pHWWbVY= +-----END CERTIFICATE----- + +# Issuer: CN=E-Tugra Certification Authority O=E-Tu\u011fra EBG Bili\u015fim Teknolojileri ve Hizmetleri A.\u015e. OU=E-Tugra Sertifikasyon Merkezi +# Subject: CN=E-Tugra Certification Authority O=E-Tu\u011fra EBG Bili\u015fim Teknolojileri ve Hizmetleri A.\u015e. OU=E-Tugra Sertifikasyon Merkezi +# Label: "E-Tugra Certification Authority" +# Serial: 7667447206703254355 +# MD5 Fingerprint: b8:a1:03:63:b0:bd:21:71:70:8a:6f:13:3a:bb:79:49 +# SHA1 Fingerprint: 51:c6:e7:08:49:06:6e:f3:92:d4:5c:a0:0d:6d:a3:62:8f:c3:52:39 +# SHA256 Fingerprint: b0:bf:d5:2b:b0:d7:d9:bd:92:bf:5d:4d:c1:3d:a2:55:c0:2c:54:2f:37:83:65:ea:89:39:11:f5:5e:55:f2:3c +-----BEGIN CERTIFICATE----- +MIIGSzCCBDOgAwIBAgIIamg+nFGby1MwDQYJKoZIhvcNAQELBQAwgbIxCzAJBgNV +BAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBC +aWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhpem1ldGxlcmkgQS7Fni4xJjAkBgNV +BAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBNZXJrZXppMSgwJgYDVQQDDB9FLVR1 +Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTEzMDMwNTEyMDk0OFoXDTIz +MDMwMzEyMDk0OFowgbIxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+ +BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhp +em1ldGxlcmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBN +ZXJrZXppMSgwJgYDVQQDDB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA4vU/kwVRHoViVF56C/UY +B4Oufq9899SKa6VjQzm5S/fDxmSJPZQuVIBSOTkHS0vdhQd2h8y/L5VMzH2nPbxH +D5hw+IyFHnSOkm0bQNGZDbt1bsipa5rAhDGvykPL6ys06I+XawGb1Q5KCKpbknSF +Q9OArqGIW66z6l7LFpp3RMih9lRozt6Plyu6W0ACDGQXwLWTzeHxE2bODHnv0ZEo +q1+gElIwcxmOj+GMB6LDu0rw6h8VqO4lzKRG+Bsi77MOQ7osJLjFLFzUHPhdZL3D +k14opz8n8Y4e0ypQBaNV2cvnOVPAmJ6MVGKLJrD3fY185MaeZkJVgkfnsliNZvcH +fC425lAcP9tDJMW/hkd5s3kc91r0E+xs+D/iWR+V7kI+ua2oMoVJl0b+SzGPWsut +dEcf6ZG33ygEIqDUD13ieU/qbIWGvaimzuT6w+Gzrt48Ue7LE3wBf4QOXVGUnhMM +ti6lTPk5cDZvlsouDERVxcr6XQKj39ZkjFqzAQqptQpHF//vkUAqjqFGOjGY5RH8 +zLtJVor8udBhmm9lbObDyz51Sf6Pp+KJxWfXnUYTTjF2OySznhFlhqt/7x3U+Lzn +rFpct1pHXFXOVbQicVtbC/DP3KBhZOqp12gKY6fgDT+gr9Oq0n7vUaDmUStVkhUX +U8u3Zg5mTPj5dUyQ5xJwx0UCAwEAAaNjMGEwHQYDVR0OBBYEFC7j27JJ0JxUeVz6 +Jyr+zE7S6E5UMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAULuPbsknQnFR5 +XPonKv7MTtLoTlQwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAF +Nzr0TbdF4kV1JI+2d1LoHNgQk2Xz8lkGpD4eKexd0dCrfOAKkEh47U6YA5n+KGCR +HTAduGN8qOY1tfrTYXbm1gdLymmasoR6d5NFFxWfJNCYExL/u6Au/U5Mh/jOXKqY +GwXgAEZKgoClM4so3O0409/lPun++1ndYYRP0lSWE2ETPo+Aab6TR7U1Q9Jauz1c +77NCR807VRMGsAnb/WP2OogKmW9+4c4bU2pEZiNRCHu8W1Ki/QY3OEBhj0qWuJA3 ++GbHeJAAFS6LrVE1Uweoa2iu+U48BybNCAVwzDk/dr2l02cmAYamU9JgO3xDf1WK +vJUawSg5TB9D0pH0clmKuVb8P7Sd2nCcdlqMQ1DujjByTd//SffGqWfZbawCEeI6 +FiWnWAjLb1NBnEg4R2gz0dfHj9R0IdTDBZB6/86WiLEVKV0jq9BgoRJP3vQXzTLl +yb/IQ639Lo7xr+L0mPoSHyDYwKcMhcWQ9DstliaxLL5Mq+ux0orJ23gTDx4JnW2P +AJ8C2sH6H3p6CcRK5ogql5+Ji/03X186zjhZhkuvcQu02PJwT58yE+Owp1fl2tpD +y4Q08ijE6m30Ku/Ba3ba+367hTzSU8JNvnHhRdH9I2cNE3X7z2VnIp2usAnRCf8d +NL/+I5c30jn6PQ0GC7TbO6Orb1wdtn7os4I07QZcJA== +-----END CERTIFICATE----- + +# Issuer: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Subject: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Label: "T-TeleSec GlobalRoot Class 2" +# Serial: 1 +# MD5 Fingerprint: 2b:9b:9e:e4:7b:6c:1f:00:72:1a:cc:c1:77:79:df:6a +# SHA1 Fingerprint: 59:0d:2d:7d:88:4f:40:2e:61:7e:a5:62:32:17:65:cf:17:d8:94:e9 +# SHA256 Fingerprint: 91:e2:f5:78:8d:58:10:eb:a7:ba:58:73:7d:e1:54:8a:8e:ca:cd:01:45:98:bc:0b:14:3e:04:1b:17:05:25:52 +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx +KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd +BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl +YyBHbG9iYWxSb290IENsYXNzIDIwHhcNMDgxMDAxMTA0MDE0WhcNMzMxMDAxMjM1 +OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy +aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50 +ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCqX9obX+hzkeXaXPSi5kfl82hVYAUd +AqSzm1nzHoqvNK38DcLZSBnuaY/JIPwhqgcZ7bBcrGXHX+0CfHt8LRvWurmAwhiC +FoT6ZrAIxlQjgeTNuUk/9k9uN0goOA/FvudocP05l03Sx5iRUKrERLMjfTlH6VJi +1hKTXrcxlkIF+3anHqP1wvzpesVsqXFP6st4vGCvx9702cu+fjOlbpSD8DT6Iavq +jnKgP6TeMFvvhk1qlVtDRKgQFRzlAVfFmPHmBiiRqiDFt1MmUUOyCxGVWOHAD3bZ +wI18gfNycJ5v/hqO2V81xrJvNHy+SE/iWjnX2J14np+GPgNeGYtEotXHAgMBAAGj +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS/ +WSA2AHmgoCJrjNXyYdK4LMuCSjANBgkqhkiG9w0BAQsFAAOCAQEAMQOiYQsfdOhy +NsZt+U2e+iKo4YFWz827n+qrkRk4r6p8FU3ztqONpfSO9kSpp+ghla0+AGIWiPAC +uvxhI+YzmzB6azZie60EI4RYZeLbK4rnJVM3YlNfvNoBYimipidx5joifsFvHZVw +IEoHNN/q/xWA5brXethbdXwFeilHfkCoMRN3zUA7tFFHei4R40cR3p1m0IvVVGb6 +g1XqfMIpiRvpb7PO4gWEyS8+eIVibslfwXhjdFjASBgMmTnrpMwatXlajRWc2BQN +9noHV8cigwUtPJslJj0Ys6lDfMjIq2SPDqO/nBudMNva0Bkuqjzx+zOAduTNrRlP +BSeOE6Fuwg== +-----END CERTIFICATE----- + +# Issuer: CN=Atos TrustedRoot 2011 O=Atos +# Subject: CN=Atos TrustedRoot 2011 O=Atos +# Label: "Atos TrustedRoot 2011" +# Serial: 6643877497813316402 +# MD5 Fingerprint: ae:b9:c4:32:4b:ac:7f:5d:66:cc:77:94:bb:2a:77:56 +# SHA1 Fingerprint: 2b:b1:f5:3e:55:0c:1d:c5:f1:d4:e6:b7:6a:46:4b:55:06:02:ac:21 +# SHA256 Fingerprint: f3:56:be:a2:44:b7:a9:1e:b3:5d:53:ca:9a:d7:86:4a:ce:01:8e:2d:35:d5:f8:f9:6d:df:68:a6:f4:1a:a4:74 +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIIXDPLYixfszIwDQYJKoZIhvcNAQELBQAwPDEeMBwGA1UE +AwwVQXRvcyBUcnVzdGVkUm9vdCAyMDExMQ0wCwYDVQQKDARBdG9zMQswCQYDVQQG +EwJERTAeFw0xMTA3MDcxNDU4MzBaFw0zMDEyMzEyMzU5NTlaMDwxHjAcBgNVBAMM +FUF0b3MgVHJ1c3RlZFJvb3QgMjAxMTENMAsGA1UECgwEQXRvczELMAkGA1UEBhMC +REUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCVhTuXbyo7LjvPpvMp +Nb7PGKw+qtn4TaA+Gke5vJrf8v7MPkfoepbCJI419KkM/IL9bcFyYie96mvr54rM +VD6QUM+A1JX76LWC1BTFtqlVJVfbsVD2sGBkWXppzwO3bw2+yj5vdHLqqjAqc2K+ +SZFhyBH+DgMq92og3AIVDV4VavzjgsG1xZ1kCWyjWZgHJ8cblithdHFsQ/H3NYkQ +4J7sVaE3IqKHBAUsR320HLliKWYoyrfhk/WklAOZuXCFteZI6o1Q/NnezG8HDt0L +cp2AMBYHlT8oDv3FdU9T1nSatCQujgKRz3bFmx5VdJx4IbHwLfELn8LVlhgf8FQi +eowHAgMBAAGjfTB7MB0GA1UdDgQWBBSnpQaxLKYJYO7Rl+lwrrw7GWzbITAPBgNV +HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKelBrEspglg7tGX6XCuvDsZbNshMBgG +A1UdIAQRMA8wDQYLKwYBBAGwLQMEAQEwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3 +DQEBCwUAA4IBAQAmdzTblEiGKkGdLD4GkGDEjKwLVLgfuXvTBznk+j57sj1O7Z8j +vZfza1zv7v1Apt+hk6EKhqzvINB5Ab149xnYJDE0BAGmuhWawyfc2E8PzBhj/5kP +DpFrdRbhIfzYJsdHt6bPWHJxfrrhTZVHO8mvbaG0weyJ9rQPOLXiZNwlz6bb65pc +maHFCN795trV1lpFDMS3wrUU77QR/w4VtfX128a961qn8FYiqTxlVMYVqL2Gns2D +lmh6cYGJ4Qvh6hEbaAjMaZ7snkGeRDImeuKHCnE96+RapNLbxc3G3mB/ufNPRJLv +KrcYPqcZ2Qt9sTdBQrC6YB3y/gkRsPCHe6ed +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 1 G3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 1 G3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 1 G3" +# Serial: 687049649626669250736271037606554624078720034195 +# MD5 Fingerprint: a4:bc:5b:3f:fe:37:9a:fa:64:f0:e2:fa:05:3d:0b:ab +# SHA1 Fingerprint: 1b:8e:ea:57:96:29:1a:c9:39:ea:b8:0a:81:1a:73:73:c0:93:79:67 +# SHA256 Fingerprint: 8a:86:6f:d1:b2:76:b5:7e:57:8e:92:1c:65:82:8a:2b:ed:58:e9:f2:f2:88:05:41:34:b7:f1:f4:bf:c9:cc:74 +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIUeFhfLq0sGUvjNwc1NBMotZbUZZMwDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMSBHMzAeFw0xMjAxMTIxNzI3NDRaFw00 +MjAxMTIxNzI3NDRaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDEgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCgvlAQjunybEC0BJyFuTHK3C3kEakEPBtV +wedYMB0ktMPvhd6MLOHBPd+C5k+tR4ds7FtJwUrVu4/sh6x/gpqG7D0DmVIB0jWe +rNrwU8lmPNSsAgHaJNM7qAJGr6Qc4/hzWHa39g6QDbXwz8z6+cZM5cOGMAqNF341 +68Xfuw6cwI2H44g4hWf6Pser4BOcBRiYz5P1sZK0/CPTz9XEJ0ngnjybCKOLXSoh +4Pw5qlPafX7PGglTvF0FBM+hSo+LdoINofjSxxR3W5A2B4GbPgb6Ul5jxaYA/qXp +UhtStZI5cgMJYr2wYBZupt0lwgNm3fME0UDiTouG9G/lg6AnhF4EwfWQvTA9xO+o +abw4m6SkltFi2mnAAZauy8RRNOoMqv8hjlmPSlzkYZqn0ukqeI1RPToV7qJZjqlc +3sX5kCLliEVx3ZGZbHqfPT2YfF72vhZooF6uCyP8Wg+qInYtyaEQHeTTRCOQiJ/G +KubX9ZqzWB4vMIkIG1SitZgj7Ah3HJVdYdHLiZxfokqRmu8hqkkWCKi9YSgxyXSt +hfbZxbGL0eUQMk1fiyA6PEkfM4VZDdvLCXVDaXP7a3F98N/ETH3Goy7IlXnLc6KO +Tk0k+17kBL5yG6YnLUlamXrXXAkgt3+UuU/xDRxeiEIbEbfnkduebPRq34wGmAOt +zCjvpUfzUwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUo5fW816iEOGrRZ88F2Q87gFwnMwwDQYJKoZIhvcNAQELBQAD +ggIBABj6W3X8PnrHX3fHyt/PX8MSxEBd1DKquGrX1RUVRpgjpeaQWxiZTOOtQqOC +MTaIzen7xASWSIsBx40Bz1szBpZGZnQdT+3Btrm0DWHMY37XLneMlhwqI2hrhVd2 +cDMT/uFPpiN3GPoajOi9ZcnPP/TJF9zrx7zABC4tRi9pZsMbj/7sPtPKlL92CiUN +qXsCHKnQO18LwIE6PWThv6ctTr1NxNgpxiIY0MWscgKCP6o6ojoilzHdCGPDdRS5 +YCgtW2jgFqlmgiNR9etT2DGbe+m3nUvriBbP+V04ikkwj+3x6xn0dxoxGE1nVGwv +b2X52z3sIexe9PSLymBlVNFxZPT5pqOBMzYzcfCkeF9OrYMh3jRJjehZrJ3ydlo2 +8hP0r+AJx2EqbPfgna67hkooby7utHnNkDPDs3b69fBsnQGQ+p6Q9pxyz0fawx/k +NSBT8lTR32GDpgLiJTjehTItXnOQUl1CxM49S+H5GYQd1aJQzEH7QRTDvdbJWqNj +ZgKAvQU6O0ec7AAmTPWIUb+oI38YB7AL7YsmoWTTYUrrXJ/es69nA7Mf3W1daWhp +q1467HxpvMc7hU6eFbm0FU/DlXpY18ls6Wy58yljXrQs8C097Vpl4KlbQMJImYFt +nh8GKjwStIsPm6Ik8KaN1nrgS7ZklmOVhMJKzRwuJIczYOXD +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 2 G3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 2 G3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 2 G3" +# Serial: 390156079458959257446133169266079962026824725800 +# MD5 Fingerprint: af:0c:86:6e:bf:40:2d:7f:0b:3e:12:50:ba:12:3d:06 +# SHA1 Fingerprint: 09:3c:61:f3:8b:8b:dc:7d:55:df:75:38:02:05:00:e1:25:f5:c8:36 +# SHA256 Fingerprint: 8f:e4:fb:0a:f9:3a:4d:0d:67:db:0b:eb:b2:3e:37:c7:1b:f3:25:dc:bc:dd:24:0e:a0:4d:af:58:b4:7e:18:40 +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00 +MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFhZiFf +qq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMW +n4rjyduYNM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ym +c5GQYaYDFCDy54ejiK2toIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+ +O7q414AB+6XrW7PFXmAqMaCvN+ggOp+oMiwMzAkd056OXbxMmO7FGmh77FOm6RQ1 +o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+lV0POKa2Mq1W/xPtbAd0j +IaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZoL1NesNKq +IcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz +8eQQsSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43eh +vNURG3YBZwjgQQvD6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l +7ZizlWNof/k19N+IxWA1ksB8aRxhlRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALG +cC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZIhvcNAQELBQAD +ggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66 +AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RC +roijQ1h5fq7KpVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0Ga +W/ZZGYjeVYg3UQt4XAoeo0L9x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4n +lv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgzdWqTHBLmYF5vHX/JHyPLhGGfHoJE ++V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6XU/IyAgkwo1jwDQHV +csaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+NwmNtd +dbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNg +KCLjsZWDzYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeM +HVOyToV7BjjHLPj4sHKNJeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4 +WSr2Rz0ZiC3oheGe7IUIarFsNMkd7EgrO3jtZsSOeWmD3n+M +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 3 G3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 3 G3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 3 G3" +# Serial: 268090761170461462463995952157327242137089239581 +# MD5 Fingerprint: df:7d:b9:ad:54:6f:68:a1:df:89:57:03:97:43:b0:d7 +# SHA1 Fingerprint: 48:12:bd:92:3c:a8:c4:39:06:e7:30:6d:27:96:e6:a4:cf:22:2e:7d +# SHA256 Fingerprint: 88:ef:81:de:20:2e:b0:18:45:2e:43:f8:64:72:5c:ea:5f:bd:1f:c2:d9:d2:05:73:07:09:c5:d8:b8:69:0f:46 +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIULvWbAiin23r/1aOp7r0DoM8Sah0wDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMyBHMzAeFw0xMjAxMTIyMDI2MzJaFw00 +MjAxMTIyMDI2MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDMgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCzyw4QZ47qFJenMioKVjZ/aEzHs286IxSR +/xl/pcqs7rN2nXrpixurazHb+gtTTK/FpRp5PIpM/6zfJd5O2YIyC0TeytuMrKNu +FoM7pmRLMon7FhY4futD4tN0SsJiCnMK3UmzV9KwCoWdcTzeo8vAMvMBOSBDGzXR +U7Ox7sWTaYI+FrUoRqHe6okJ7UO4BUaKhvVZR74bbwEhELn9qdIoyhA5CcoTNs+c +ra1AdHkrAj80//ogaX3T7mH1urPnMNA3I4ZyYUUpSFlob3emLoG+B01vr87ERROR +FHAGjx+f+IdpsQ7vw4kZ6+ocYfx6bIrc1gMLnia6Et3UVDmrJqMz6nWB2i3ND0/k +A9HvFZcba5DFApCTZgIhsUfei5pKgLlVj7WiL8DWM2fafsSntARE60f75li59wzw +eyuxwHApw0BiLTtIadwjPEjrewl5qW3aqDCYz4ByA4imW0aucnl8CAMhZa634Ryl +sSqiMd5mBPfAdOhx3v89WcyWJhKLhZVXGqtrdQtEPREoPHtht+KPZ0/l7DxMYIBp +VzgeAVuNVejH38DMdyM0SXV89pgR6y3e7UEuFAUCf+D+IOs15xGsIs5XPd7JMG0Q +A4XN8f+MFrXBsj6IbGB/kE+V9/YtrQE5BwT6dYB9v0lQ7e/JxHwc64B+27bQ3RP+ +ydOc17KXqQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUxhfQvKjqAkPyGwaZXSuQILnXnOQwDQYJKoZIhvcNAQELBQAD +ggIBADRh2Va1EodVTd2jNTFGu6QHcrxfYWLopfsLN7E8trP6KZ1/AvWkyaiTt3px +KGmPc+FSkNrVvjrlt3ZqVoAh313m6Tqe5T72omnHKgqwGEfcIHB9UqM+WXzBusnI +FUBhynLWcKzSt/Ac5IYp8M7vaGPQtSCKFWGafoaYtMnCdvvMujAWzKNhxnQT5Wvv +oxXqA/4Ti2Tk08HS6IT7SdEQTXlm66r99I0xHnAUrdzeZxNMgRVhvLfZkXdxGYFg +u/BYpbWcC/ePIlUnwEsBbTuZDdQdm2NnL9DuDcpmvJRPpq3t/O5jrFc/ZSXPsoaP +0Aj/uHYUbt7lJ+yreLVTubY/6CD50qi+YUbKh4yE8/nxoGibIh6BJpsQBJFxwAYf +3KDTuVan45gtf4Od34wrnDKOMpTwATwiKp9Dwi7DmDkHOHv8XgBCH/MyJnmDhPbl +8MFREsALHgQjDFSlTC9JxUrRtm5gDWv8a4uFJGS3iQ6rJUdbPM9+Sb3H6QrG2vd+ +DhcI00iX0HGS8A85PjRqHH3Y8iKuu2n0M7SmSFXRDw4m6Oy2Cy2nhTXN/VnIn9HN +PlopNLk9hM6xZdRZkZFWdSHBd575euFgndOtBBj0fOtek49TSiIp+EgrPk2GrFt/ +ywaZWWDYWGWVjUTR939+J399roD1B0y2PpxxVJkES/1Y+Zj0 +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Assured ID Root G2" +# Serial: 15385348160840213938643033620894905419 +# MD5 Fingerprint: 92:38:b9:f8:63:24:82:65:2c:57:33:e6:fe:81:8f:9d +# SHA1 Fingerprint: a1:4b:48:d9:43:ee:0a:0e:40:90:4f:3c:e0:a4:c0:91:93:51:5d:3f +# SHA256 Fingerprint: 7d:05:eb:b6:82:33:9f:8c:94:51:ee:09:4e:eb:fe:fa:79:53:a1:14:ed:b2:f4:49:49:45:2f:ab:7d:2f:c1:85 +-----BEGIN CERTIFICATE----- +MIIDljCCAn6gAwIBAgIQC5McOtY5Z+pnI7/Dr5r0SzANBgkqhkiG9w0BAQsFADBl +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv +b3QgRzIwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl +cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZ5ygvUj82ckmIkzTz+GoeMVSA +n61UQbVH35ao1K+ALbkKz3X9iaV9JPrjIgwrvJUXCzO/GU1BBpAAvQxNEP4Htecc +biJVMWWXvdMX0h5i89vqbFCMP4QMls+3ywPgym2hFEwbid3tALBSfK+RbLE4E9Hp +EgjAALAcKxHad3A2m67OeYfcgnDmCXRwVWmvo2ifv922ebPynXApVfSr/5Vh88lA +bx3RvpO704gqu52/clpWcTs/1PPRCv4o76Pu2ZmvA9OPYLfykqGxvYmJHzDNw6Yu +YjOuFgJ3RFrngQo8p0Quebg/BLxcoIfhG69Rjs3sLPr4/m3wOnyqi+RnlTGNAgMB +AAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQW +BBTOw0q5mVXyuNtgv6l+vVa1lzan1jANBgkqhkiG9w0BAQsFAAOCAQEAyqVVjOPI +QW5pJ6d1Ee88hjZv0p3GeDgdaZaikmkuOGybfQTUiaWxMTeKySHMq2zNixya1r9I +0jJmwYrA8y8678Dj1JGG0VDjA9tzd29KOVPt3ibHtX2vK0LRdWLjSisCx1BL4Gni +lmwORGYQRI+tBev4eaymG+g3NJ1TyWGqolKvSnAWhsI6yLETcDbYz+70CjTVW0z9 +B5yiutkBclzzTcHdDrEcDcRjvq30FPuJ7KJBDkzMyFdA0G4Dqs0MjomZmWzwPDCv +ON9vvKO+KSAnq3T/EyJ43pdSVR6DtVQgA+6uwE9W3jfMw3+qBCe703e4YtsXfJwo +IhNzbM8m9Yop5w== +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Assured ID Root G3" +# Serial: 15459312981008553731928384953135426796 +# MD5 Fingerprint: 7c:7f:65:31:0c:81:df:8d:ba:3e:99:e2:5c:ad:6e:fb +# SHA1 Fingerprint: f5:17:a2:4f:9a:48:c6:c9:f8:a2:00:26:9f:dc:0f:48:2c:ab:30:89 +# SHA256 Fingerprint: 7e:37:cb:8b:4c:47:09:0c:ab:36:55:1b:a6:f4:5d:b8:40:68:0f:ba:16:6a:95:2d:b1:00:71:7f:43:05:3f:c2 +-----BEGIN CERTIFICATE----- +MIICRjCCAc2gAwIBAgIQC6Fa+h3foLVJRK/NJKBs7DAKBggqhkjOPQQDAzBlMQsw +CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu +ZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3Qg +RzMwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu +Y29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwdjAQBgcq +hkjOPQIBBgUrgQQAIgNiAAQZ57ysRGXtzbg/WPuNsVepRC0FFfLvC/8QdJ+1YlJf +Zn4f5dwbRXkLzMZTCp2NXQLZqVneAlr2lSoOjThKiknGvMYDOAdfVdp+CW7if17Q +RSAPWXYQ1qAk8C3eNvJsKTmjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ +BAQDAgGGMB0GA1UdDgQWBBTL0L2p4ZgFUaFNN6KDec6NHSrkhDAKBggqhkjOPQQD +AwNnADBkAjAlpIFFAmsSS3V0T8gj43DydXLefInwz5FyYZ5eEJJZVrmDxxDnOOlY +JjZ91eQ0hjkCMHw2U/Aw5WJjOpnitqM7mzT6HtoQknFekROn3aRukswy1vUhZscv +6pZjamVFkpUBtA== +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Global Root G2" +# Serial: 4293743540046975378534879503202253541 +# MD5 Fingerprint: e4:a6:8a:c8:54:ac:52:42:46:0a:fd:72:48:1b:2a:44 +# SHA1 Fingerprint: df:3c:24:f9:bf:d6:66:76:1b:26:80:73:fe:06:d1:cc:8d:4f:82:a4 +# SHA256 Fingerprint: cb:3c:cb:b7:60:31:e5:e0:13:8f:8d:d3:9a:23:f9:de:47:ff:c3:5e:43:c1:14:4c:ea:27:d4:6a:5a:b1:cb:5f +-----BEGIN CERTIFICATE----- +MIIDjjCCAnagAwIBAgIQAzrx5qcRqaC7KGSxHQn65TANBgkqhkiG9w0BAQsFADBh +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBH +MjAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVT +MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j +b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEcyMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzfNNNx7a8myaJCtSnX/RrohCgiN9RlUyfuI +2/Ou8jqJkTx65qsGGmvPrC3oXgkkRLpimn7Wo6h+4FR1IAWsULecYxpsMNzaHxmx +1x7e/dfgy5SDN67sH0NO3Xss0r0upS/kqbitOtSZpLYl6ZtrAGCSYP9PIUkY92eQ +q2EGnI/yuum06ZIya7XzV+hdG82MHauVBJVJ8zUtluNJbd134/tJS7SsVQepj5Wz +tCO7TG1F8PapspUwtP1MVYwnSlcUfIKdzXOS0xZKBgyMUNGPHgm+F6HmIcr9g+UQ +vIOlCsRnKPZzFBQ9RnbDhxSJITRNrw9FDKZJobq7nMWxM4MphQIDAQABo0IwQDAP +BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUTiJUIBiV +5uNu5g/6+rkS7QYXjzkwDQYJKoZIhvcNAQELBQADggEBAGBnKJRvDkhj6zHd6mcY +1Yl9PMWLSn/pvtsrF9+wX3N3KjITOYFnQoQj8kVnNeyIv/iPsGEMNKSuIEyExtv4 +NeF22d+mQrvHRAiGfzZ0JFrabA0UWTW98kndth/Jsw1HKj2ZL7tcu7XUIOGZX1NG +Fdtom/DzMNU+MeKNhJ7jitralj41E6Vf8PlwUHBHQRFXGU7Aj64GxJUTFy8bJZ91 +8rGOmaFvE7FBcf6IKshPECBV1/MUReXgRPTqh5Uykw7+U0b6LJ3/iyK5S9kJRaTe +pLiaWN0bfVKfjllDiIGknibVb63dDcY3fe0Dkhvld1927jyNxF1WW6LZZm6zNTfl +MrY= +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Global Root G3" +# Serial: 7089244469030293291760083333884364146 +# MD5 Fingerprint: f5:5d:a4:50:a5:fb:28:7e:1e:0f:0d:cc:96:57:56:ca +# SHA1 Fingerprint: 7e:04:de:89:6a:3e:66:6d:00:e6:87:d3:3f:fa:d9:3b:e8:3d:34:9e +# SHA256 Fingerprint: 31:ad:66:48:f8:10:41:38:c7:38:f3:9e:a4:32:01:33:39:3e:3a:18:cc:02:29:6e:f9:7c:2a:c9:ef:67:31:d0 +-----BEGIN CERTIFICATE----- +MIICPzCCAcWgAwIBAgIQBVVWvPJepDU1w6QP1atFcjAKBggqhkjOPQQDAzBhMQsw +CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu +ZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMzAe +Fw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVTMRUw +EwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20x +IDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEczMHYwEAYHKoZIzj0CAQYF +K4EEACIDYgAE3afZu4q4C/sLfyHS8L6+c/MzXRq8NOrexpu80JX28MzQC7phW1FG +fp4tn+6OYwwX7Adw9c+ELkCDnOg/QW07rdOkFFk2eJ0DQ+4QE2xy3q6Ip6FrtUPO +Z9wj/wMco+I+o0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAd +BgNVHQ4EFgQUs9tIpPmhxdiuNkHMEWNpYim8S8YwCgYIKoZIzj0EAwMDaAAwZQIx +AK288mw/EkrRLTnDCgmXc/SINoyIJ7vmiI1Qhadj+Z4y3maTD/HMsQmP3Wyr+mt/ +oAIwOWZbwmSNuJ5Q3KjVSaLtx9zRSX8XAbjIho9OjIgrqJqpisXRAL34VOKa5Vt8 +sycX +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Trusted Root G4" +# Serial: 7451500558977370777930084869016614236 +# MD5 Fingerprint: 78:f2:fc:aa:60:1f:2f:b4:eb:c9:37:ba:53:2e:75:49 +# SHA1 Fingerprint: dd:fb:16:cd:49:31:c9:73:a2:03:7d:3f:c8:3a:4d:7d:77:5d:05:e4 +# SHA256 Fingerprint: 55:2f:7b:dc:f1:a7:af:9e:6c:e6:72:01:7f:4f:12:ab:f7:72:40:c7:8e:76:1a:c2:03:d1:d9:d2:0a:c8:99:88 +-----BEGIN CERTIFICATE----- +MIIFkDCCA3igAwIBAgIQBZsbV56OITLiOQe9p3d1XDANBgkqhkiG9w0BAQwFADBi +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3Qg +RzQwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBiMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu +Y29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQC/5pBzaN675F1KPDAiMGkz7MKnJS7JIT3y +ithZwuEppz1Yq3aaza57G4QNxDAf8xukOBbrVsaXbR2rsnnyyhHS5F/WBTxSD1If +xp4VpX6+n6lXFllVcq9ok3DCsrp1mWpzMpTREEQQLt+C8weE5nQ7bXHiLQwb7iDV +ySAdYyktzuxeTsiT+CFhmzTrBcZe7FsavOvJz82sNEBfsXpm7nfISKhmV1efVFiO +DCu3T6cw2Vbuyntd463JT17lNecxy9qTXtyOj4DatpGYQJB5w3jHtrHEtWoYOAMQ +jdjUN6QuBX2I9YI+EJFwq1WCQTLX2wRzKm6RAXwhTNS8rhsDdV14Ztk6MUSaM0C/ +CNdaSaTC5qmgZ92kJ7yhTzm1EVgX9yRcRo9k98FpiHaYdj1ZXUJ2h4mXaXpI8OCi +EhtmmnTK3kse5w5jrubU75KSOp493ADkRSWJtppEGSt+wJS00mFt6zPZxd9LBADM +fRyVw4/3IbKyEbe7f/LVjHAsQWCqsWMYRJUadmJ+9oCw++hkpjPRiQfhvbfmQ6QY +uKZ3AeEPlAwhHbJUKSWJbOUOUlFHdL4mrLZBdd56rF+NP8m800ERElvlEFDrMcXK +chYiCd98THU/Y+whX8QgUWtvsauGi0/C1kVfnSD8oR7FwI+isX4KJpn15GkvmB0t +9dmpsh3lGwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +hjAdBgNVHQ4EFgQU7NfjgtJxXWRM3y5nP+e6mK4cD08wDQYJKoZIhvcNAQEMBQAD +ggIBALth2X2pbL4XxJEbw6GiAI3jZGgPVs93rnD5/ZpKmbnJeFwMDF/k5hQpVgs2 +SV1EY+CtnJYYZhsjDT156W1r1lT40jzBQ0CuHVD1UvyQO7uYmWlrx8GnqGikJ9yd ++SeuMIW59mdNOj6PWTkiU0TryF0Dyu1Qen1iIQqAyHNm0aAFYF/opbSnr6j3bTWc +fFqK1qI4mfN4i/RN0iAL3gTujJtHgXINwBQy7zBZLq7gcfJW5GqXb5JQbZaNaHqa +sjYUegbyJLkJEVDXCLG4iXqEI2FCKeWjzaIgQdfRnGTZ6iahixTXTBmyUEFxPT9N +cCOGDErcgdLMMpSEDQgJlxxPwO5rIHQw0uA5NBCFIRUBCOhVMt5xSdkoF1BN5r5N +0XWs0Mr7QbhDparTwwVETyw2m+L64kW4I1NsBm9nVX9GtUw/bihaeSbSpKhil9Ie +4u1Ki7wb/UdKDd9nZn6yW0HQO+T0O/QEY+nvwlQAUaCKKsnOeMzV6ocEGLPOr0mI +r/OSmbaz5mEP0oUA51Aa5BuVnRmhuZyxm7EAHu/QD09CbMkKvO5D+jpxpchNJqU1 +/YldvIViHTLSoCtU7ZpXwdv6EM8Zt4tKG48BtieVU+i2iW1bvGjUI+iLUaJW+fCm +gKDWHrO8Dw9TdSmq6hN35N6MgSGtBxBHEa2HPQfRdbzP82Z+ +-----END CERTIFICATE----- + +# Issuer: CN=COMODO RSA Certification Authority O=COMODO CA Limited +# Subject: CN=COMODO RSA Certification Authority O=COMODO CA Limited +# Label: "COMODO RSA Certification Authority" +# Serial: 101909084537582093308941363524873193117 +# MD5 Fingerprint: 1b:31:b0:71:40:36:cc:14:36:91:ad:c4:3e:fd:ec:18 +# SHA1 Fingerprint: af:e5:d2:44:a8:d1:19:42:30:ff:47:9f:e2:f8:97:bb:cd:7a:8c:b4 +# SHA256 Fingerprint: 52:f0:e1:c4:e5:8e:c6:29:29:1b:60:31:7f:07:46:71:b8:5d:7e:a8:0d:5b:07:27:34:63:53:4b:32:b4:02:34 +-----BEGIN CERTIFICATE----- +MIIF2DCCA8CgAwIBAgIQTKr5yttjb+Af907YWwOGnTANBgkqhkiG9w0BAQwFADCB +hTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G +A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNV +BAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMTE5 +MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgT +EkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR +Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCR +6FSS0gpWsawNJN3Fz0RndJkrN6N9I3AAcbxT38T6KhKPS38QVr2fcHK3YX/JSw8X +pz3jsARh7v8Rl8f0hj4K+j5c+ZPmNHrZFGvnnLOFoIJ6dq9xkNfs/Q36nGz637CC +9BR++b7Epi9Pf5l/tfxnQ3K9DADWietrLNPtj5gcFKt+5eNu/Nio5JIk2kNrYrhV +/erBvGy2i/MOjZrkm2xpmfh4SDBF1a3hDTxFYPwyllEnvGfDyi62a+pGx8cgoLEf +Zd5ICLqkTqnyg0Y3hOvozIFIQ2dOciqbXL1MGyiKXCJ7tKuY2e7gUYPDCUZObT6Z ++pUX2nwzV0E8jVHtC7ZcryxjGt9XyD+86V3Em69FmeKjWiS0uqlWPc9vqv9JWL7w +qP/0uK3pN/u6uPQLOvnoQ0IeidiEyxPx2bvhiWC4jChWrBQdnArncevPDt09qZah +SL0896+1DSJMwBGB7FY79tOi4lu3sgQiUpWAk2nojkxl8ZEDLXB0AuqLZxUpaVIC +u9ffUGpVRr+goyhhf3DQw6KqLCGqR84onAZFdr+CGCe01a60y1Dma/RMhnEw6abf +Fobg2P9A3fvQQoh/ozM6LlweQRGBY84YcWsr7KaKtzFcOmpH4MN5WdYgGq/yapiq +crxXStJLnbsQ/LBMQeXtHT1eKJ2czL+zUdqnR+WEUwIDAQABo0IwQDAdBgNVHQ4E +FgQUu69+Aj36pvE8hI6t7jiY7NkyMtQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB +/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAArx1UaEt65Ru2yyTUEUAJNMnMvl +wFTPoCWOAvn9sKIN9SCYPBMtrFaisNZ+EZLpLrqeLppysb0ZRGxhNaKatBYSaVqM +4dc+pBroLwP0rmEdEBsqpIt6xf4FpuHA1sj+nq6PK7o9mfjYcwlYRm6mnPTXJ9OV +2jeDchzTc+CiR5kDOF3VSXkAKRzH7JsgHAckaVd4sjn8OoSgtZx8jb8uk2Intzna +FxiuvTwJaP+EmzzV1gsD41eeFPfR60/IvYcjt7ZJQ3mFXLrrkguhxuhoqEwWsRqZ +CuhTLJK7oQkYdQxlqHvLI7cawiiFwxv/0Cti76R7CZGYZ4wUAc1oBmpjIXUDgIiK +boHGhfKppC3n9KUkEEeDys30jXlYsQab5xoq2Z0B15R97QNKyvDb6KkBPvVWmcke +jkk9u+UJueBPSZI9FoJAzMxZxuY67RIuaTxslbH9qh17f4a+Hg4yRvv7E491f0yL +S0Zj/gA0QHDBw7mh3aZw4gSzQbzpgJHqZJx64SIDqZxubw5lT2yHh17zbqD5daWb +QOhTsiedSrnAdyGN/4fy3ryM7xfft0kL0fJuMAsaDk527RH89elWsn2/x20Kk4yl +0MC2Hb46TpSi125sC8KKfPog88Tk5c0NqMuRkrF8hey1FGlmDoLnzc7ILaZRfyHB +NVOFBkpdn627G190 +-----END CERTIFICATE----- + +# Issuer: CN=USERTrust RSA Certification Authority O=The USERTRUST Network +# Subject: CN=USERTrust RSA Certification Authority O=The USERTRUST Network +# Label: "USERTrust RSA Certification Authority" +# Serial: 2645093764781058787591871645665788717 +# MD5 Fingerprint: 1b:fe:69:d1:91:b7:19:33:a3:72:a8:0f:e1:55:e5:b5 +# SHA1 Fingerprint: 2b:8f:1b:57:33:0d:bb:a2:d0:7a:6c:51:f7:0e:e9:0d:da:b9:ad:8e +# SHA256 Fingerprint: e7:93:c9:b0:2f:d8:aa:13:e2:1c:31:22:8a:cc:b0:81:19:64:3b:74:9c:89:89:64:b1:74:6d:46:c3:d4:cb:d2 +-----BEGIN CERTIFICATE----- +MIIF3jCCA8agAwIBAgIQAf1tMPyjylGoG7xkDjUDLTANBgkqhkiG9w0BAQwFADCB +iDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0pl +cnNleSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNV +BAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAw +MjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNV +BAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU +aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2Vy +dGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCAEmUXNg7D2wiz0KxXDXbtzSfTTK1Qg2HiqiBNCS1kCdzOiZ/MPans9s/B +3PHTsdZ7NygRK0faOca8Ohm0X6a9fZ2jY0K2dvKpOyuR+OJv0OwWIJAJPuLodMkY +tJHUYmTbf6MG8YgYapAiPLz+E/CHFHv25B+O1ORRxhFnRghRy4YUVD+8M/5+bJz/ +Fp0YvVGONaanZshyZ9shZrHUm3gDwFA66Mzw3LyeTP6vBZY1H1dat//O+T23LLb2 +VN3I5xI6Ta5MirdcmrS3ID3KfyI0rn47aGYBROcBTkZTmzNg95S+UzeQc0PzMsNT +79uq/nROacdrjGCT3sTHDN/hMq7MkztReJVni+49Vv4M0GkPGw/zJSZrM233bkf6 +c0Plfg6lZrEpfDKEY1WJxA3Bk1QwGROs0303p+tdOmw1XNtB1xLaqUkL39iAigmT +Yo61Zs8liM2EuLE/pDkP2QKe6xJMlXzzawWpXhaDzLhn4ugTncxbgtNMs+1b/97l +c6wjOy0AvzVVdAlJ2ElYGn+SNuZRkg7zJn0cTRe8yexDJtC/QV9AqURE9JnnV4ee +UB9XVKg+/XRjL7FQZQnmWEIuQxpMtPAlR1n6BB6T1CZGSlCBst6+eLf8ZxXhyVeE +Hg9j1uliutZfVS7qXMYoCAQlObgOK6nyTJccBz8NUvXt7y+CDwIDAQABo0IwQDAd +BgNVHQ4EFgQUU3m/WqorSs9UgOHYm8Cd8rIDZsswDgYDVR0PAQH/BAQDAgEGMA8G +A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAFzUfA3P9wF9QZllDHPF +Up/L+M+ZBn8b2kMVn54CVVeWFPFSPCeHlCjtHzoBN6J2/FNQwISbxmtOuowhT6KO +VWKR82kV2LyI48SqC/3vqOlLVSoGIG1VeCkZ7l8wXEskEVX/JJpuXior7gtNn3/3 +ATiUFJVDBwn7YKnuHKsSjKCaXqeYalltiz8I+8jRRa8YFWSQEg9zKC7F4iRO/Fjs +8PRF/iKz6y+O0tlFYQXBl2+odnKPi4w2r78NBc5xjeambx9spnFixdjQg3IM8WcR +iQycE0xyNN+81XHfqnHd4blsjDwSXWXavVcStkNr/+XeTWYRUc+ZruwXtuhxkYze +Sf7dNXGiFSeUHM9h4ya7b6NnJSFd5t0dCy5oGzuCr+yDZ4XUmFF0sbmZgIn/f3gZ +XHlKYC6SQK5MNyosycdiyA5d9zZbyuAlJQG03RoHnHcAP9Dc1ew91Pq7P8yF1m9/ +qS3fuQL39ZeatTXaw2ewh0qpKJ4jjv9cJ2vhsE/zB+4ALtRZh8tSQZXq9EfX7mRB +VXyNWQKV3WKdwrnuWih0hKWbt5DHDAff9Yk2dDLWKMGwsAvgnEzDHNb842m1R0aB +L6KCq9NjRHDEjf8tM7qtj3u1cIiuPhnPQCjY/MiQu12ZIvVS5ljFH4gxQ+6IHdfG +jjxDah2nGN59PRbxYvnKkKj9 +-----END CERTIFICATE----- + +# Issuer: CN=USERTrust ECC Certification Authority O=The USERTRUST Network +# Subject: CN=USERTrust ECC Certification Authority O=The USERTRUST Network +# Label: "USERTrust ECC Certification Authority" +# Serial: 123013823720199481456569720443997572134 +# MD5 Fingerprint: fa:68:bc:d9:b5:7f:ad:fd:c9:1d:06:83:28:cc:24:c1 +# SHA1 Fingerprint: d1:cb:ca:5d:b2:d5:2a:7f:69:3b:67:4d:e5:f0:5a:1d:0c:95:7d:f0 +# SHA256 Fingerprint: 4f:f4:60:d5:4b:9c:86:da:bf:bc:fc:57:12:e0:40:0d:2b:ed:3f:bc:4d:4f:bd:aa:86:e0:6a:dc:d2:a9:ad:7a +-----BEGIN CERTIFICATE----- +MIICjzCCAhWgAwIBAgIQXIuZxVqUxdJxVt7NiYDMJjAKBggqhkjOPQQDAzCBiDEL +MAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNl +eSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMT +JVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMjAx +MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNVBAgT +Ck5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVUaGUg +VVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlm +aWNhdGlvbiBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQarFRaqflo +I+d61SRvU8Za2EurxtW20eZzca7dnNYMYf3boIkDuAUU7FfO7l0/4iGzzvfUinng +o4N+LZfQYcTxmdwlkWOrfzCjtHDix6EznPO/LlxTsV+zfTJ/ijTjeXmjQjBAMB0G +A1UdDgQWBBQ64QmG1M8ZwpZ2dEl23OA1xmNjmjAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjA2Z6EWCNzklwBBHU6+4WMB +zzuqQhFkoJ2UOQIReVx7Hfpkue4WQrO/isIJxOzksU0CMQDpKmFHjFJKS04YcPbW +RNZu9YO6bVi9JNlWSOrvxKJGgYhqOkbRqZtNyWHa0V1Xahg= +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4 +# Label: "GlobalSign ECC Root CA - R4" +# Serial: 14367148294922964480859022125800977897474 +# MD5 Fingerprint: 20:f0:27:68:d1:7e:a0:9d:0e:e6:2a:ca:df:5c:89:8e +# SHA1 Fingerprint: 69:69:56:2e:40:80:f4:24:a1:e7:19:9f:14:ba:f3:ee:58:ab:6a:bb +# SHA256 Fingerprint: be:c9:49:11:c2:95:56:76:db:6c:0a:55:09:86:d7:6e:3b:a0:05:66:7c:44:2c:97:62:b4:fb:b7:73:de:22:8c +-----BEGIN CERTIFICATE----- +MIIB4TCCAYegAwIBAgIRKjikHJYKBN5CsiilC+g0mAIwCgYIKoZIzj0EAwIwUDEk +MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI0MRMwEQYDVQQKEwpH +bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX +DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD +QSAtIFI0MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu +MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuMZ5049sJQ6fLjkZHAOkrprlOQcJ +FspjsbmG+IpXwVfOQvpzofdlQv8ewQCybnMO/8ch5RikqtlxP6jUuc6MHaNCMEAw +DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFSwe61F +uOJAf/sKbvu+M8k8o4TVMAoGCCqGSM49BAMCA0gAMEUCIQDckqGgE6bPA7DmxCGX +kPoUVy0D7O48027KqGx2vKLeuwIgJ6iFJzWbVsaj8kfSt24bAgAXqmemFZHe+pTs +ewv4n4Q= +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5 +# Label: "GlobalSign ECC Root CA - R5" +# Serial: 32785792099990507226680698011560947931244 +# MD5 Fingerprint: 9f:ad:3b:1c:02:1e:8a:ba:17:74:38:81:0c:a2:bc:08 +# SHA1 Fingerprint: 1f:24:c6:30:cd:a4:18:ef:20:69:ff:ad:4f:dd:5f:46:3a:1b:69:aa +# SHA256 Fingerprint: 17:9f:bc:14:8a:3d:d0:0f:d2:4e:a1:34:58:cc:43:bf:a7:f5:9c:81:82:d7:83:a5:13:f6:eb:ec:10:0c:89:24 +-----BEGIN CERTIFICATE----- +MIICHjCCAaSgAwIBAgIRYFlJ4CYuu1X5CneKcflK2GwwCgYIKoZIzj0EAwMwUDEk +MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI1MRMwEQYDVQQKEwpH +bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX +DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD +QSAtIFI1MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu +MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAER0UOlvt9Xb/pOdEh+J8LttV7HpI6SFkc +8GIxLcB6KP4ap1yztsyX50XUWPrRd21DosCHZTQKH3rd6zwzocWdTaRvQZU4f8ke +hOvRnkmSh5SHDDqFSmafnVmTTZdhBoZKo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUPeYpSJvqB8ohREom3m7e0oPQn1kwCgYI +KoZIzj0EAwMDaAAwZQIxAOVpEslu28YxuglB4Zf4+/2a4n0Sye18ZNPLBSWLVtmg +515dTguDnFt2KaAJJiFqYgIwcdK1j1zqO+F4CYWodZI7yFz9SO8NdCKoCOJuxUnO +xwy8p2Fp8fc74SrL+SvzZpA3 +-----END CERTIFICATE----- + +# Issuer: CN=Staat der Nederlanden Root CA - G3 O=Staat der Nederlanden +# Subject: CN=Staat der Nederlanden Root CA - G3 O=Staat der Nederlanden +# Label: "Staat der Nederlanden Root CA - G3" +# Serial: 10003001 +# MD5 Fingerprint: 0b:46:67:07:db:10:2f:19:8c:35:50:60:d1:0b:f4:37 +# SHA1 Fingerprint: d8:eb:6b:41:51:92:59:e0:f3:e7:85:00:c0:3d:b6:88:97:c9:ee:fc +# SHA256 Fingerprint: 3c:4f:b0:b9:5a:b8:b3:00:32:f4:32:b8:6f:53:5f:e1:72:c1:85:d0:fd:39:86:58:37:cf:36:18:7f:a6:f4:28 +-----BEGIN CERTIFICATE----- +MIIFdDCCA1ygAwIBAgIEAJiiOTANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJO +TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFh +dCBkZXIgTmVkZXJsYW5kZW4gUm9vdCBDQSAtIEczMB4XDTEzMTExNDExMjg0MloX +DTI4MTExMzIzMDAwMFowWjELMAkGA1UEBhMCTkwxHjAcBgNVBAoMFVN0YWF0IGRl +ciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5lZGVybGFuZGVuIFJv +b3QgQ0EgLSBHMzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAL4yolQP +cPssXFnrbMSkUeiFKrPMSjTysF/zDsccPVMeiAho2G89rcKezIJnByeHaHE6n3WW +IkYFsO2tx1ueKt6c/DrGlaf1F2cY5y9JCAxcz+bMNO14+1Cx3Gsy8KL+tjzk7FqX +xz8ecAgwoNzFs21v0IJyEavSgWhZghe3eJJg+szeP4TrjTgzkApyI/o1zCZxMdFy +KJLZWyNtZrVtB0LrpjPOktvA9mxjeM3KTj215VKb8b475lRgsGYeCasH/lSJEULR +9yS6YHgamPfJEf0WwTUaVHXvQ9Plrk7O53vDxk5hUUurmkVLoR9BvUhTFXFkC4az +5S6+zqQbwSmEorXLCCN2QyIkHxcE1G6cxvx/K2Ya7Irl1s9N9WMJtxU51nus6+N8 +6U78dULI7ViVDAZCopz35HCz33JvWjdAidiFpNfxC95DGdRKWCyMijmev4SH8RY7 +Ngzp07TKbBlBUgmhHbBqv4LvcFEhMtwFdozL92TkA1CvjJFnq8Xy7ljY3r735zHP +bMk7ccHViLVlvMDoFxcHErVc0qsgk7TmgoNwNsXNo42ti+yjwUOH5kPiNL6VizXt +BznaqB16nzaeErAMZRKQFWDZJkBE41ZgpRDUajz9QdwOWke275dhdU/Z/seyHdTt +XUmzqWrLZoQT1Vyg3N9udwbRcXXIV2+vD3dbAgMBAAGjQjBAMA8GA1UdEwEB/wQF +MAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRUrfrHkleuyjWcLhL75Lpd +INyUVzANBgkqhkiG9w0BAQsFAAOCAgEAMJmdBTLIXg47mAE6iqTnB/d6+Oea31BD +U5cqPco8R5gu4RV78ZLzYdqQJRZlwJ9UXQ4DO1t3ApyEtg2YXzTdO2PCwyiBwpwp +LiniyMMB8jPqKqrMCQj3ZWfGzd/TtiunvczRDnBfuCPRy5FOCvTIeuXZYzbB1N/8 +Ipf3YF3qKS9Ysr1YvY2WTxB1v0h7PVGHoTx0IsL8B3+A3MSs/mrBcDCw6Y5p4ixp +gZQJut3+TcCDjJRYwEYgr5wfAvg1VUkvRtTA8KCWAg8zxXHzniN9lLf9OtMJgwYh +/WA9rjLA0u6NpvDntIJ8CsxwyXmA+P5M9zWEGYox+wrZ13+b8KKaa8MFSu1BYBQw +0aoRQm7TIwIEC8Zl3d1Sd9qBa7Ko+gE4uZbqKmxnl4mUnrzhVNXkanjvSr0rmj1A +fsbAddJu+2gw7OyLnflJNZoaLNmzlTnVHpL3prllL+U9bTpITAjc5CgSKL59NVzq +4BZ+Extq1z7XnvwtdbLBFNUjA9tbbws+eC8N3jONFrdI54OagQ97wUNNVQQXOEpR +1VmiiXTTn74eS9fGbbeIJG9gkaSChVtWQbzQRKtqE77RLFi3EjNYsjdj3BP1lB0/ +QFH1T/U67cjF68IeHRaVesd+QnGTbksVtzDfqu1XhUisHWrdOWnk4Xl4vs4Fv6EM +94B7IWcnMFk= +-----END CERTIFICATE----- + +# Issuer: CN=Staat der Nederlanden EV Root CA O=Staat der Nederlanden +# Subject: CN=Staat der Nederlanden EV Root CA O=Staat der Nederlanden +# Label: "Staat der Nederlanden EV Root CA" +# Serial: 10000013 +# MD5 Fingerprint: fc:06:af:7b:e8:1a:f1:9a:b4:e8:d2:70:1f:c0:f5:ba +# SHA1 Fingerprint: 76:e2:7e:c1:4f:db:82:c1:c0:a6:75:b5:05:be:3d:29:b4:ed:db:bb +# SHA256 Fingerprint: 4d:24:91:41:4c:fe:95:67:46:ec:4c:ef:a6:cf:6f:72:e2:8a:13:29:43:2f:9d:8a:90:7a:c4:cb:5d:ad:c1:5a +-----BEGIN CERTIFICATE----- +MIIFcDCCA1igAwIBAgIEAJiWjTANBgkqhkiG9w0BAQsFADBYMQswCQYDVQQGEwJO +TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSkwJwYDVQQDDCBTdGFh +dCBkZXIgTmVkZXJsYW5kZW4gRVYgUm9vdCBDQTAeFw0xMDEyMDgxMTE5MjlaFw0y +MjEyMDgxMTEwMjhaMFgxCzAJBgNVBAYTAk5MMR4wHAYDVQQKDBVTdGFhdCBkZXIg +TmVkZXJsYW5kZW4xKTAnBgNVBAMMIFN0YWF0IGRlciBOZWRlcmxhbmRlbiBFViBS +b290IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA48d+ifkkSzrS +M4M1LGns3Amk41GoJSt5uAg94JG6hIXGhaTK5skuU6TJJB79VWZxXSzFYGgEt9nC +UiY4iKTWO0Cmws0/zZiTs1QUWJZV1VD+hq2kY39ch/aO5ieSZxeSAgMs3NZmdO3d +Z//BYY1jTw+bbRcwJu+r0h8QoPnFfxZpgQNH7R5ojXKhTbImxrpsX23Wr9GxE46p +rfNeaXUmGD5BKyF/7otdBwadQ8QpCiv8Kj6GyzyDOvnJDdrFmeK8eEEzduG/L13l +pJhQDBXd4Pqcfzho0LKmeqfRMb1+ilgnQ7O6M5HTp5gVXJrm0w912fxBmJc+qiXb +j5IusHsMX/FjqTf5m3VpTCgmJdrV8hJwRVXj33NeN/UhbJCONVrJ0yPr08C+eKxC +KFhmpUZtcALXEPlLVPxdhkqHz3/KRawRWrUgUY0viEeXOcDPusBCAUCZSCELa6fS +/ZbV0b5GnUngC6agIk440ME8MLxwjyx1zNDFjFE7PZQIZCZhfbnDZY8UnCHQqv0X +cgOPvZuM5l5Tnrmd74K74bzickFbIZTTRTeU0d8JOV3nI6qaHcptqAqGhYqCvkIH +1vI4gnPah1vlPNOePqc7nvQDs/nxfRN0Av+7oeX6AHkcpmZBiFxgV6YuCcS6/ZrP +px9Aw7vMWgpVSzs4dlG4Y4uElBbmVvMCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB +/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFP6rAJCYniT8qcwaivsnuL8wbqg7 +MA0GCSqGSIb3DQEBCwUAA4ICAQDPdyxuVr5Os7aEAJSrR8kN0nbHhp8dB9O2tLsI +eK9p0gtJ3jPFrK3CiAJ9Brc1AsFgyb/E6JTe1NOpEyVa/m6irn0F3H3zbPB+po3u +2dfOWBfoqSmuc0iH55vKbimhZF8ZE/euBhD/UcabTVUlT5OZEAFTdfETzsemQUHS +v4ilf0X8rLiltTMMgsT7B/Zq5SWEXwbKwYY5EdtYzXc7LMJMD16a4/CrPmEbUCTC +wPTxGfARKbalGAKb12NMcIxHowNDXLldRqANb/9Zjr7dn3LDWyvfjFvO5QxGbJKy +CqNMVEIYFRIYvdr8unRu/8G2oGTYqV9Vrp9canaW2HNnh/tNf1zuacpzEPuKqf2e +vTY4SUmH9A4U8OmHuD+nT3pajnnUk+S7aFKErGzp85hwVXIy+TSrK0m1zSBi5Dp6 +Z2Orltxtrpfs/J92VoguZs9btsmksNcFuuEnL5O7Jiqik7Ab846+HUCjuTaPPoIa +Gl6I6lD4WeKDRikL40Rc4ZW2aZCaFG+XroHPaO+Zmr615+F/+PoTRxZMzG0IQOeL +eG9QgkRQP2YGiqtDhFZKDyAthg710tvSeopLzaXoTvFeJiUBWSOgftL2fiFX1ye8 +FVdMpEbB4IMeDExNH08GGeL5qPQ6gqGyeUN51q1veieQA6TqJIc/2b3Z6fJfUEkc +7uzXLg== +-----END CERTIFICATE----- + +# Issuer: CN=IdenTrust Commercial Root CA 1 O=IdenTrust +# Subject: CN=IdenTrust Commercial Root CA 1 O=IdenTrust +# Label: "IdenTrust Commercial Root CA 1" +# Serial: 13298821034946342390520003877796839426 +# MD5 Fingerprint: b3:3e:77:73:75:ee:a0:d3:e3:7e:49:63:49:59:bb:c7 +# SHA1 Fingerprint: df:71:7e:aa:4a:d9:4e:c9:55:84:99:60:2d:48:de:5f:bc:f0:3a:25 +# SHA256 Fingerprint: 5d:56:49:9b:e4:d2:e0:8b:cf:ca:d0:8a:3e:38:72:3d:50:50:3b:de:70:69:48:e4:2f:55:60:30:19:e5:28:ae +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIQCgFCgAAAAUUjyES1AAAAAjANBgkqhkiG9w0BAQsFADBK +MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScwJQYDVQQDEx5JZGVu +VHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwHhcNMTQwMTE2MTgxMjIzWhcNMzQw +MTE2MTgxMjIzWjBKMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScw +JQYDVQQDEx5JZGVuVHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCnUBneP5k91DNG8W9RYYKyqU+PZ4ldhNlT +3Qwo2dfw/66VQ3KZ+bVdfIrBQuExUHTRgQ18zZshq0PirK1ehm7zCYofWjK9ouuU ++ehcCuz/mNKvcbO0U59Oh++SvL3sTzIwiEsXXlfEU8L2ApeN2WIrvyQfYo3fw7gp +S0l4PJNgiCL8mdo2yMKi1CxUAGc1bnO/AljwpN3lsKImesrgNqUZFvX9t++uP0D1 +bVoE/c40yiTcdCMbXTMTEl3EASX2MN0CXZ/g1Ue9tOsbobtJSdifWwLziuQkkORi +T0/Br4sOdBeo0XKIanoBScy0RnnGF7HamB4HWfp1IYVl3ZBWzvurpWCdxJ35UrCL +vYf5jysjCiN2O/cz4ckA82n5S6LgTrx+kzmEB/dEcH7+B1rlsazRGMzyNeVJSQjK +Vsk9+w8YfYs7wRPCTY/JTw436R+hDmrfYi7LNQZReSzIJTj0+kuniVyc0uMNOYZK +dHzVWYfCP04MXFL0PfdSgvHqo6z9STQaKPNBiDoT7uje/5kdX7rL6B7yuVBgwDHT +c+XvvqDtMwt0viAgxGds8AgDelWAf0ZOlqf0Hj7h9tgJ4TNkK2PXMl6f+cB7D3hv +l7yTmvmcEpB4eoCHFddydJxVdHixuuFucAS6T6C6aMN7/zHwcz09lCqxC0EOoP5N +iGVreTO01wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zAdBgNVHQ4EFgQU7UQZwNPwBovupHu+QucmVMiONnYwDQYJKoZIhvcNAQELBQAD +ggIBAA2ukDL2pkt8RHYZYR4nKM1eVO8lvOMIkPkp165oCOGUAFjvLi5+U1KMtlwH +6oi6mYtQlNeCgN9hCQCTrQ0U5s7B8jeUeLBfnLOic7iPBZM4zY0+sLj7wM+x8uwt +LRvM7Kqas6pgghstO8OEPVeKlh6cdbjTMM1gCIOQ045U8U1mwF10A0Cj7oV+wh93 +nAbowacYXVKV7cndJZ5t+qntozo00Fl72u1Q8zW/7esUTTHHYPTa8Yec4kjixsU3 ++wYQ+nVZZjFHKdp2mhzpgq7vmrlR94gjmmmVYjzlVYA211QC//G5Xc7UI2/YRYRK +W2XviQzdFKcgyxilJbQN+QHwotL0AMh0jqEqSI5l2xPE4iUXfeu+h1sXIFRRk0pT +AwvsXcoz7WL9RccvW9xYoIA55vrX/hMUpu09lEpCdNTDd1lzzY9GvlU47/rokTLq +l1gEIt44w8y8bckzOmoKaT+gyOpyj4xjhiO9bTyWnpXgSUyqorkqG5w2gXjtw+hG +4iZZRHUe2XWJUc0QhJ1hYMtd+ZciTY6Y5uN/9lu7rs3KSoFrXgvzUeF0K+l+J6fZ +mUlO+KWA2yUPHGNiiskzZ2s8EIPGrd6ozRaOjfAHN3Gf8qv8QfXBi+wAN10J5U6A +7/qxXDgGpRtK4dw4LTzcqx+QGtVKnO7RcGzM7vRX+Bi6hG6H +-----END CERTIFICATE----- + +# Issuer: CN=IdenTrust Public Sector Root CA 1 O=IdenTrust +# Subject: CN=IdenTrust Public Sector Root CA 1 O=IdenTrust +# Label: "IdenTrust Public Sector Root CA 1" +# Serial: 13298821034946342390521976156843933698 +# MD5 Fingerprint: 37:06:a5:b0:fc:89:9d:ba:f4:6b:8c:1a:64:cd:d5:ba +# SHA1 Fingerprint: ba:29:41:60:77:98:3f:f4:f3:ef:f2:31:05:3b:2e:ea:6d:4d:45:fd +# SHA256 Fingerprint: 30:d0:89:5a:9a:44:8a:26:20:91:63:55:22:d1:f5:20:10:b5:86:7a:ca:e1:2c:78:ef:95:8f:d4:f4:38:9f:2f +-----BEGIN CERTIFICATE----- +MIIFZjCCA06gAwIBAgIQCgFCgAAAAUUjz0Z8AAAAAjANBgkqhkiG9w0BAQsFADBN +MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MSowKAYDVQQDEyFJZGVu +VHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwHhcNMTQwMTE2MTc1MzMyWhcN +MzQwMTE2MTc1MzMyWjBNMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0 +MSowKAYDVQQDEyFJZGVuVHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2IpT8pEiv6EdrCvsnduTyP4o7 +ekosMSqMjbCpwzFrqHd2hCa2rIFCDQjrVVi7evi8ZX3yoG2LqEfpYnYeEe4IFNGy +RBb06tD6Hi9e28tzQa68ALBKK0CyrOE7S8ItneShm+waOh7wCLPQ5CQ1B5+ctMlS +bdsHyo+1W/CD80/HLaXIrcuVIKQxKFdYWuSNG5qrng0M8gozOSI5Cpcu81N3uURF +/YTLNiCBWS2ab21ISGHKTN9T0a9SvESfqy9rg3LvdYDaBjMbXcjaY8ZNzaxmMc3R +3j6HEDbhuaR672BQssvKplbgN6+rNBM5Jeg5ZuSYeqoSmJxZZoY+rfGwyj4GD3vw +EUs3oERte8uojHH01bWRNszwFcYr3lEXsZdMUD2xlVl8BX0tIdUAvwFnol57plzy +9yLxkA2T26pEUWbMfXYD62qoKjgZl3YNa4ph+bz27nb9cCvdKTz4Ch5bQhyLVi9V +GxyhLrXHFub4qjySjmm2AcG1hp2JDws4lFTo6tyePSW8Uybt1as5qsVATFSrsrTZ +2fjXctscvG29ZV/viDUqZi/u9rNl8DONfJhBaUYPQxxp+pu10GFqzcpL2UyQRqsV +WaFHVCkugyhfHMKiq3IXAAaOReyL4jM9f9oZRORicsPfIsbyVtTdX5Vy7W1f90gD +W/3FKqD2cyOEEBsB5wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQU43HgntinQtnbcZFrlJPrw6PRFKMwDQYJKoZIhvcN +AQELBQADggIBAEf63QqwEZE4rU1d9+UOl1QZgkiHVIyqZJnYWv6IAcVYpZmxI1Qj +t2odIFflAWJBF9MJ23XLblSQdf4an4EKwt3X9wnQW3IV5B4Jaj0z8yGa5hV+rVHV +DRDtfULAj+7AmgjVQdZcDiFpboBhDhXAuM/FSRJSzL46zNQuOAXeNf0fb7iAaJg9 +TaDKQGXSc3z1i9kKlT/YPyNtGtEqJBnZhbMX73huqVjRI9PHE+1yJX9dsXNw0H8G +lwmEKYBhHfpe/3OsoOOJuBxxFcbeMX8S3OFtm6/n6J91eEyrRjuazr8FGF1NFTwW +mhlQBJqymm9li1JfPFgEKCXAZmExfrngdbkaqIHWchezxQMxNRF4eKLg6TCMf4Df +WN88uieW4oA0beOY02QnrEh+KHdcxiVhJfiFDGX6xDIvpZgF5PgLZxYWxoK4Mhn5 ++bl53B/N66+rDt0b20XkeucC4pVd/GnwU2lhlXV5C15V5jgclKlZM57IcXR5f1GJ +tshquDDIajjDbp7hNxbqBWJMWxJH7ae0s1hWx0nzfxJoCTFx8G34Tkf71oXuxVhA +GaQdp/lLQzfcaFpPz+vCZHTetBXZ9FRUGi8c15dxVJCO2SCdUyt/q4/i6jC8UDfv +8Ue1fXwsBOxonbRJRBD0ckscZOf85muQ3Wl9af0AVqW3rLatt8o+Ae+c +-----END CERTIFICATE----- + +# Issuer: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only +# Subject: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only +# Label: "Entrust Root Certification Authority - G2" +# Serial: 1246989352 +# MD5 Fingerprint: 4b:e2:c9:91:96:65:0c:f4:0e:5a:93:92:a0:0a:fe:b2 +# SHA1 Fingerprint: 8c:f4:27:fd:79:0c:3a:d1:66:06:8d:e8:1e:57:ef:bb:93:22:72:d4 +# SHA256 Fingerprint: 43:df:57:74:b0:3e:7f:ef:5f:e4:0d:93:1a:7b:ed:f1:bb:2e:6b:42:73:8c:4e:6d:38:41:10:3d:3a:a7:f3:39 +-----BEGIN CERTIFICATE----- +MIIEPjCCAyagAwIBAgIESlOMKDANBgkqhkiG9w0BAQsFADCBvjELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50 +cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3Qs +IEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVz +dCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIwHhcNMDkwNzA3MTcy +NTU0WhcNMzAxMjA3MTc1NTU0WjCBvjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUVu +dHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwt +dGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0 +aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5IC0gRzIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQC6hLZy254Ma+KZ6TABp3bqMriVQRrJ2mFOWHLP/vaCeb9zYQYKpSfYs1/T +RU4cctZOMvJyig/3gxnQaoCAAEUesMfnmr8SVycco2gvCoe9amsOXmXzHHfV1IWN +cCG0szLni6LVhjkCsbjSR87kyUnEO6fe+1R9V77w6G7CebI6C1XiUJgWMhNcL3hW +wcKUs/Ja5CeanyTXxuzQmyWC48zCxEXFjJd6BmsqEZ+pCm5IO2/b1BEZQvePB7/1 +U1+cPvQXLOZprE4yTGJ36rfo5bs0vBmLrpxR57d+tVOxMyLlbc9wPBr64ptntoP0 +jaWvYkxN4FisZDQSA/i2jZRjJKRxAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqciZ60B7vfec7aVHUbI2fkBJmqzAN +BgkqhkiG9w0BAQsFAAOCAQEAeZ8dlsa2eT8ijYfThwMEYGprmi5ZiXMRrEPR9RP/ +jTkrwPK9T3CMqS/qF8QLVJ7UG5aYMzyorWKiAHarWWluBh1+xLlEjZivEtRh2woZ +Rkfz6/djwUAFQKXSt/S1mja/qYh2iARVBCuch38aNzx+LaUa2NSJXsq9rD1s2G2v +1fN2D807iDginWyTmsQ9v4IbZT+mD12q/OWyFcq1rca8PdCE6OoGcrBNOTJ4vz4R +nAuknZoh8/CbCzB428Hch0P+vGOaysXCHMnHjf87ElgI5rY97HosTvuDls4MPGmH +VHOkc8KT/1EQrBVUAdj8BbGJoX90g5pJ19xOe4pIb4tF9g== +-----END CERTIFICATE----- + +# Issuer: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only +# Subject: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only +# Label: "Entrust Root Certification Authority - EC1" +# Serial: 51543124481930649114116133369 +# MD5 Fingerprint: b6:7e:1d:f0:58:c5:49:6c:24:3b:3d:ed:98:18:ed:bc +# SHA1 Fingerprint: 20:d8:06:40:df:9b:25:f5:12:25:3a:11:ea:f7:59:8a:eb:14:b5:47 +# SHA256 Fingerprint: 02:ed:0e:b2:8c:14:da:45:16:5c:56:67:91:70:0d:64:51:d7:fb:56:f0:b2:ab:1d:3b:8e:b0:70:e5:6e:df:f5 +-----BEGIN CERTIFICATE----- +MIIC+TCCAoCgAwIBAgINAKaLeSkAAAAAUNCR+TAKBggqhkjOPQQDAzCBvzELMAkG +A1UEBhMCVVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3 +d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDEyIEVu +dHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEzMDEGA1UEAxMq +RW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRUMxMB4XDTEy +MTIxODE1MjUzNloXDTM3MTIxODE1NTUzNlowgb8xCzAJBgNVBAYTAlVTMRYwFAYD +VQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1c3QubmV0 +L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxMiBFbnRydXN0LCBJbmMuIC0g +Zm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMzAxBgNVBAMTKkVudHJ1c3QgUm9vdCBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEVDMTB2MBAGByqGSM49AgEGBSuBBAAi +A2IABIQTydC6bUF74mzQ61VfZgIaJPRbiWlH47jCffHyAsWfoPZb1YsGGYZPUxBt +ByQnoaD41UcZYUx9ypMn6nQM72+WCf5j7HBdNq1nd67JnXxVRDqiY1Ef9eNi1KlH +Bz7MIKNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O +BBYEFLdj5xrdjekIplWDpOBqUEFlEUJJMAoGCCqGSM49BAMDA2cAMGQCMGF52OVC +R98crlOZF7ZvHH3hvxGU0QOIdeSNiaSKd0bebWHvAvX7td/M/k7//qnmpwIwW5nX +hTcGtXsI/esni0qU+eH6p44mCOh8kmhtc9hvJqwhAriZtyZBWyVgrtBIGu4G +-----END CERTIFICATE----- + +# Issuer: CN=CFCA EV ROOT O=China Financial Certification Authority +# Subject: CN=CFCA EV ROOT O=China Financial Certification Authority +# Label: "CFCA EV ROOT" +# Serial: 407555286 +# MD5 Fingerprint: 74:e1:b6:ed:26:7a:7a:44:30:33:94:ab:7b:27:81:30 +# SHA1 Fingerprint: e2:b8:29:4b:55:84:ab:6b:58:c2:90:46:6c:ac:3f:b8:39:8f:84:83 +# SHA256 Fingerprint: 5c:c3:d7:8e:4e:1d:5e:45:54:7a:04:e6:87:3e:64:f9:0c:f9:53:6d:1c:cc:2e:f8:00:f3:55:c4:c5:fd:70:fd +-----BEGIN CERTIFICATE----- +MIIFjTCCA3WgAwIBAgIEGErM1jANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJD +TjEwMC4GA1UECgwnQ2hpbmEgRmluYW5jaWFsIENlcnRpZmljYXRpb24gQXV0aG9y +aXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJPT1QwHhcNMTIwODA4MDMwNzAxWhcNMjkx +MjMxMDMwNzAxWjBWMQswCQYDVQQGEwJDTjEwMC4GA1UECgwnQ2hpbmEgRmluYW5j +aWFsIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJP +T1QwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXXWvNED8fBVnVBU03 +sQ7smCuOFR36k0sXgiFxEFLXUWRwFsJVaU2OFW2fvwwbwuCjZ9YMrM8irq93VCpL +TIpTUnrD7i7es3ElweldPe6hL6P3KjzJIx1qqx2hp/Hz7KDVRM8Vz3IvHWOX6Jn5 +/ZOkVIBMUtRSqy5J35DNuF++P96hyk0g1CXohClTt7GIH//62pCfCqktQT+x8Rgp +7hZZLDRJGqgG16iI0gNyejLi6mhNbiyWZXvKWfry4t3uMCz7zEasxGPrb382KzRz +EpR/38wmnvFyXVBlWY9ps4deMm/DGIq1lY+wejfeWkU7xzbh72fROdOXW3NiGUgt +hxwG+3SYIElz8AXSG7Ggo7cbcNOIabla1jj0Ytwli3i/+Oh+uFzJlU9fpy25IGvP +a931DfSCt/SyZi4QKPaXWnuWFo8BGS1sbn85WAZkgwGDg8NNkt0yxoekN+kWzqot +aK8KgWU6cMGbrU1tVMoqLUuFG7OA5nBFDWteNfB/O7ic5ARwiRIlk9oKmSJgamNg +TnYGmE69g60dWIolhdLHZR4tjsbftsbhf4oEIRUpdPA+nJCdDC7xij5aqgwJHsfV +PKPtl8MeNPo4+QgO48BdK4PRVmrJtqhUUy54Mmc9gn900PvhtgVguXDbjgv5E1hv +cWAQUhC5wUEJ73IfZzF4/5YFjQIDAQABo2MwYTAfBgNVHSMEGDAWgBTj/i39KNAL +tbq2osS/BqoFjJP7LzAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAd +BgNVHQ4EFgQU4/4t/SjQC7W6tqLEvwaqBYyT+y8wDQYJKoZIhvcNAQELBQADggIB +ACXGumvrh8vegjmWPfBEp2uEcwPenStPuiB/vHiyz5ewG5zz13ku9Ui20vsXiObT +ej/tUxPQ4i9qecsAIyjmHjdXNYmEwnZPNDatZ8POQQaIxffu2Bq41gt/UP+TqhdL +jOztUmCypAbqTuv0axn96/Ua4CUqmtzHQTb3yHQFhDmVOdYLO6Qn+gjYXB74BGBS +ESgoA//vU2YApUo0FmZ8/Qmkrp5nGm9BC2sGE5uPhnEFtC+NiWYzKXZUmhH4J/qy +P5Hgzg0b8zAarb8iXRvTvyUFTeGSGn+ZnzxEk8rUQElsgIfXBDrDMlI1Dlb4pd19 +xIsNER9Tyx6yF7Zod1rg1MvIB671Oi6ON7fQAUtDKXeMOZePglr4UeWJoBjnaH9d +Ci77o0cOPaYjesYBx4/IXr9tgFa+iiS6M+qf4TIRnvHST4D2G0CvOJ4RUHlzEhLN +5mydLIhyPDCBBpEi6lmt2hkuIsKNuYyH4Ga8cyNfIWRjgEj1oDwYPZTISEEdQLpe +/v5WOaHIz16eGWRGENoXkbcFgKyLmZJ956LYBws2J+dIeWCKw9cTXPhyQN9Ky8+Z +AAoACxGV2lZFA4gKn2fQ1XmxqI1AbQ3CekD6819kR5LLU7m7Wc5P/dAVUwHY3+vZ +5nbv0CO7O6l5s9UCKc2Jo5YPSjXnTkLAdc0Hz+Ys63su +-----END CERTIFICATE----- + +# Issuer: CN=Certinomis - Root CA O=Certinomis OU=0002 433998903 +# Subject: CN=Certinomis - Root CA O=Certinomis OU=0002 433998903 +# Label: "Certinomis - Root CA" +# Serial: 1 +# MD5 Fingerprint: 14:0a:fd:8d:a8:28:b5:38:69:db:56:7e:61:22:03:3f +# SHA1 Fingerprint: 9d:70:bb:01:a5:a4:a0:18:11:2e:f7:1c:01:b9:32:c5:34:e7:88:a8 +# SHA256 Fingerprint: 2a:99:f5:bc:11:74:b7:3c:bb:1d:62:08:84:e0:1c:34:e5:1c:cb:39:78:da:12:5f:0e:33:26:88:83:bf:41:58 +-----BEGIN CERTIFICATE----- +MIIFkjCCA3qgAwIBAgIBATANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJGUjET +MBEGA1UEChMKQ2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxHTAb +BgNVBAMTFENlcnRpbm9taXMgLSBSb290IENBMB4XDTEzMTAyMTA5MTcxOFoXDTMz +MTAyMTA5MTcxOFowWjELMAkGA1UEBhMCRlIxEzARBgNVBAoTCkNlcnRpbm9taXMx +FzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMR0wGwYDVQQDExRDZXJ0aW5vbWlzIC0g +Um9vdCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANTMCQosP5L2 +fxSeC5yaah1AMGT9qt8OHgZbn1CF6s2Nq0Nn3rD6foCWnoR4kkjW4znuzuRZWJfl +LieY6pOod5tK8O90gC3rMB+12ceAnGInkYjwSond3IjmFPnVAy//ldu9n+ws+hQV +WZUKxkd8aRi5pwP5ynapz8dvtF4F/u7BUrJ1Mofs7SlmO/NKFoL21prbcpjp3vDF +TKWrteoB4owuZH9kb/2jJZOLyKIOSY008B/sWEUuNKqEUL3nskoTuLAPrjhdsKkb +5nPJWqHZZkCqqU2mNAKthH6yI8H7KsZn9DS2sJVqM09xRLWtwHkziOC/7aOgFLSc +CbAK42C++PhmiM1b8XcF4LVzbsF9Ri6OSyemzTUK/eVNfaoqoynHWmgE6OXWk6Ri +wsXm9E/G+Z8ajYJJGYrKWUM66A0ywfRMEwNvbqY/kXPLynNvEiCL7sCCeN5LLsJJ +wx3tFvYk9CcbXFcx3FXuqB5vbKziRcxXV4p1VxngtViZSTYxPDMBbRZKzbgqg4SG +m/lg0h9tkQPTYKbVPZrdd5A9NaSfD171UkRpucC63M9933zZxKyGIjK8e2uR73r4 +F2iw4lNVYC2vPsKD2NkJK/DAZNuHi5HMkesE/Xa0lZrmFAYb1TQdvtj/dBxThZng +WVJKYe2InmtJiUZ+IFrZ50rlau7SZRFDAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIB +BjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTvkUz1pcMw6C8I6tNxIqSSaHh0 +2TAfBgNVHSMEGDAWgBTvkUz1pcMw6C8I6tNxIqSSaHh02TANBgkqhkiG9w0BAQsF +AAOCAgEAfj1U2iJdGlg+O1QnurrMyOMaauo++RLrVl89UM7g6kgmJs95Vn6RHJk/ +0KGRHCwPT5iVWVO90CLYiF2cN/z7ZMF4jIuaYAnq1fohX9B0ZedQxb8uuQsLrbWw +F6YSjNRieOpWauwK0kDDPAUwPk2Ut59KA9N9J0u2/kTO+hkzGm2kQtHdzMjI1xZS +g081lLMSVX3l4kLr5JyTCcBMWwerx20RoFAXlCOotQqSD7J6wWAsOMwaplv/8gzj +qh8c3LigkyfeY+N/IZ865Z764BNqdeuWXGKRlI5nU7aJ+BIJy29SWwNyhlCVCNSN +h4YVH5Uk2KRvms6knZtt0rJ2BobGVgjF6wnaNsIbW0G+YSrjcOa4pvi2WsS9Iff/ +ql+hbHY5ZtbqTFXhADObE5hjyW/QASAJN1LnDE8+zbz1X5YnpyACleAu6AdBBR8V +btaw5BngDwKTACdyxYvRVB9dSsNAl35VpnzBMwQUAR1JIGkLGZOdblgi90AMRgwj +Y/M50n92Uaf0yKHxDHYiI0ZSKS3io0EHVmmY0gUJvGnHWmHNj4FgFU2A3ZDifcRQ +8ow7bkrHxuaAKzyBvBGAFhAn1/DNP3nMcyrDflOR1m749fPH0FFNjkulW+YZFzvW +gQncItzujrnEj1PhZ7szuIgVRs/taTX/dQ1G885x4cVrhkIGuUE= +-----END CERTIFICATE----- + +# Issuer: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed +# Subject: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed +# Label: "OISTE WISeKey Global Root GB CA" +# Serial: 157768595616588414422159278966750757568 +# MD5 Fingerprint: a4:eb:b9:61:28:2e:b7:2f:98:b0:35:26:90:99:51:1d +# SHA1 Fingerprint: 0f:f9:40:76:18:d3:d7:6a:4b:98:f0:a8:35:9e:0c:fd:27:ac:cc:ed +# SHA256 Fingerprint: 6b:9c:08:e8:6e:b0:f7:67:cf:ad:65:cd:98:b6:21:49:e5:49:4a:67:f5:84:5e:7b:d1:ed:01:9f:27:b8:6b:d6 +-----BEGIN CERTIFICATE----- +MIIDtTCCAp2gAwIBAgIQdrEgUnTwhYdGs/gjGvbCwDANBgkqhkiG9w0BAQsFADBt +MQswCQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUg +Rm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9i +YWwgUm9vdCBHQiBDQTAeFw0xNDEyMDExNTAwMzJaFw0zOTEyMDExNTEwMzFaMG0x +CzAJBgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBG +b3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2Jh +bCBSb290IEdCIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2Be3 +HEokKtaXscriHvt9OO+Y9bI5mE4nuBFde9IllIiCFSZqGzG7qFshISvYD06fWvGx +WuR51jIjK+FTzJlFXHtPrby/h0oLS5daqPZI7H17Dc0hBt+eFf1Biki3IPShehtX +1F1Q/7pn2COZH8g/497/b1t3sWtuuMlk9+HKQUYOKXHQuSP8yYFfTvdv37+ErXNk +u7dCjmn21HYdfp2nuFeKUWdy19SouJVUQHMD9ur06/4oQnc/nSMbsrY9gBQHTC5P +99UKFg29ZkM3fiNDecNAhvVMKdqOmq0NpQSHiB6F4+lT1ZvIiwNjeOvgGUpuuy9r +M2RYk61pv48b74JIxwIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUNQ/INmNe4qPs+TtmFc5RUuORmj0wEAYJKwYBBAGCNxUB +BAMCAQAwDQYJKoZIhvcNAQELBQADggEBAEBM+4eymYGQfp3FsLAmzYh7KzKNbrgh +cViXfa43FK8+5/ea4n32cZiZBKpDdHij40lhPnOMTZTg+XHEthYOU3gf1qKHLwI5 +gSk8rxWYITD+KJAAjNHhy/peyP34EEY7onhCkRd0VQreUGdNZtGn//3ZwLWoo4rO +ZvUPQ82nK1d7Y0Zqqi5S2PTt4W2tKZB4SLrhI6qjiey1q5bAtEuiHZeeevJuQHHf +aPFlTc58Bd9TZaml8LGXBHAVRgOY1NK/VLSgWH1Sb9pWJmLU2NuJMW8c8CLC02Ic +Nc1MaRVUGpCY3useX8p3x8uOPUNpnJpY0CQ73xtAln41rYHHTnG6iBM= +-----END CERTIFICATE----- + +# Issuer: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A. +# Subject: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A. +# Label: "SZAFIR ROOT CA2" +# Serial: 357043034767186914217277344587386743377558296292 +# MD5 Fingerprint: 11:64:c1:89:b0:24:b1:8c:b1:07:7e:89:9e:51:9e:99 +# SHA1 Fingerprint: e2:52:fa:95:3f:ed:db:24:60:bd:6e:28:f3:9c:cc:cf:5e:b3:3f:de +# SHA256 Fingerprint: a1:33:9d:33:28:1a:0b:56:e5:57:d3:d3:2b:1c:e7:f9:36:7e:b0:94:bd:5f:a7:2a:7e:50:04:c8:de:d7:ca:fe +-----BEGIN CERTIFICATE----- +MIIDcjCCAlqgAwIBAgIUPopdB+xV0jLVt+O2XwHrLdzk1uQwDQYJKoZIhvcNAQEL +BQAwUTELMAkGA1UEBhMCUEwxKDAmBgNVBAoMH0tyYWpvd2EgSXpiYSBSb3psaWN6 +ZW5pb3dhIFMuQS4xGDAWBgNVBAMMD1NaQUZJUiBST09UIENBMjAeFw0xNTEwMTkw +NzQzMzBaFw0zNTEwMTkwNzQzMzBaMFExCzAJBgNVBAYTAlBMMSgwJgYDVQQKDB9L +cmFqb3dhIEl6YmEgUm96bGljemVuaW93YSBTLkEuMRgwFgYDVQQDDA9TWkFGSVIg +Uk9PVCBDQTIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC3vD5QqEvN +QLXOYeeWyrSh2gwisPq1e3YAd4wLz32ohswmUeQgPYUM1ljj5/QqGJ3a0a4m7utT +3PSQ1hNKDJA8w/Ta0o4NkjrcsbH/ON7Dui1fgLkCvUqdGw+0w8LBZwPd3BucPbOw +3gAeqDRHu5rr/gsUvTaE2g0gv/pby6kWIK05YO4vdbbnl5z5Pv1+TW9NL++IDWr6 +3fE9biCloBK0TXC5ztdyO4mTp4CEHCdJckm1/zuVnsHMyAHs6A6KCpbns6aH5db5 +BSsNl0BwPLqsdVqc1U2dAgrSS5tmS0YHF2Wtn2yIANwiieDhZNRnvDF5YTy7ykHN +XGoAyDw4jlivAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD +AgEGMB0GA1UdDgQWBBQuFqlKGLXLzPVvUPMjX/hd56zwyDANBgkqhkiG9w0BAQsF +AAOCAQEAtXP4A9xZWx126aMqe5Aosk3AM0+qmrHUuOQn/6mWmc5G4G18TKI4pAZw +8PRBEew/R40/cof5O/2kbytTAOD/OblqBw7rHRz2onKQy4I9EYKL0rufKq8h5mOG +nXkZ7/e7DDWQw4rtTw/1zBLZpD67oPwglV9PJi8RI4NOdQcPv5vRtB3pEAT+ymCP +oky4rc/hkA/NrgrHXXu3UNLUYfrVFdvXn4dRVOul4+vJhaAlIDf7js4MNIThPIGy +d05DpYhfhmehPea0XGG2Ptv+tyjFogeutcrKjSoS75ftwjCkySp6+/NNIxuZMzSg +LvWpCz/UXeHPhJ/iGcJfitYgHuNztw== +-----END CERTIFICATE----- + +# Issuer: CN=Certum Trusted Network CA 2 O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Subject: CN=Certum Trusted Network CA 2 O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Label: "Certum Trusted Network CA 2" +# Serial: 44979900017204383099463764357512596969 +# MD5 Fingerprint: 6d:46:9e:d9:25:6d:08:23:5b:5e:74:7d:1e:27:db:f2 +# SHA1 Fingerprint: d3:dd:48:3e:2b:bf:4c:05:e8:af:10:f5:fa:76:26:cf:d3:dc:30:92 +# SHA256 Fingerprint: b6:76:f2:ed:da:e8:77:5c:d3:6c:b0:f6:3c:d1:d4:60:39:61:f4:9e:62:65:ba:01:3a:2f:03:07:b6:d0:b8:04 +-----BEGIN CERTIFICATE----- +MIIF0jCCA7qgAwIBAgIQIdbQSk8lD8kyN/yqXhKN6TANBgkqhkiG9w0BAQ0FADCB +gDELMAkGA1UEBhMCUEwxIjAgBgNVBAoTGVVuaXpldG8gVGVjaG5vbG9naWVzIFMu +QS4xJzAlBgNVBAsTHkNlcnR1bSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEkMCIG +A1UEAxMbQ2VydHVtIFRydXN0ZWQgTmV0d29yayBDQSAyMCIYDzIwMTExMDA2MDgz +OTU2WhgPMjA0NjEwMDYwODM5NTZaMIGAMQswCQYDVQQGEwJQTDEiMCAGA1UEChMZ +VW5pemV0byBUZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5MSQwIgYDVQQDExtDZXJ0dW0gVHJ1c3RlZCBOZXR3 +b3JrIENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC9+Xj45tWA +DGSdhhuWZGc/IjoedQF97/tcZ4zJzFxrqZHmuULlIEub2pt7uZld2ZuAS9eEQCsn +0+i6MLs+CRqnSZXvK0AkwpfHp+6bJe+oCgCXhVqqndwpyeI1B+twTUrWwbNWuKFB +OJvR+zF/j+Bf4bE/D44WSWDXBo0Y+aomEKsq09DRZ40bRr5HMNUuctHFY9rnY3lE +fktjJImGLjQ/KUxSiyqnwOKRKIm5wFv5HdnnJ63/mgKXwcZQkpsCLL2puTRZCr+E +Sv/f/rOf69me4Jgj7KZrdxYq28ytOxykh9xGc14ZYmhFV+SQgkK7QtbwYeDBoz1m +o130GO6IyY0XRSmZMnUCMe4pJshrAua1YkV/NxVaI2iJ1D7eTiew8EAMvE0Xy02i +sx7QBlrd9pPPV3WZ9fqGGmd4s7+W/jTcvedSVuWz5XV710GRBdxdaeOVDUO5/IOW +OZV7bIBaTxNyxtd9KXpEulKkKtVBRgkg/iKgtlswjbyJDNXXcPiHUv3a76xRLgez +Tv7QCdpw75j6VuZt27VXS9zlLCUVyJ4ueE742pyehizKV/Ma5ciSixqClnrDvFAS +adgOWkaLOusm+iPJtrCBvkIApPjW/jAux9JG9uWOdf3yzLnQh1vMBhBgu4M1t15n +3kfsmUjxpKEV/q2MYo45VU85FrmxY53/twIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBS2oVQ5AsOgP46KvPrU+Bym0ToO/TAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQENBQADggIBAHGlDs7k6b8/ONWJWsQCYftMxRQXLYtPU2sQ +F/xlhMcQSZDe28cmk4gmb3DWAl45oPePq5a1pRNcgRRtDoGCERuKTsZPpd1iHkTf +CVn0W3cLN+mLIMb4Ck4uWBzrM9DPhmDJ2vuAL55MYIR4PSFk1vtBHxgP58l1cb29 +XN40hz5BsA72udY/CROWFC/emh1auVbONTqwX3BNXuMp8SMoclm2q8KMZiYcdywm +djWLKKdpoPk79SPdhRB0yZADVpHnr7pH1BKXESLjokmUbOe3lEu6LaTaM4tMpkT/ +WjzGHWTYtTHkpjx6qFcL2+1hGsvxznN3Y6SHb0xRONbkX8eftoEq5IVIeVheO/jb +AoJnwTnbw3RLPTYe+SmTiGhbqEQZIfCn6IENLOiTNrQ3ssqwGyZ6miUfmpqAnksq +P/ujmv5zMnHCnsZy4YpoJ/HkD7TETKVhk/iXEAcqMCWpuchxuO9ozC1+9eB+D4Ko +b7a6bINDd82Kkhehnlt4Fj1F4jNy3eFmypnTycUm/Q1oBEauttmbjL4ZvrHG8hnj +XALKLNhvSgfZyTXaQHXyxKcZb55CEJh15pWLYLztxRLXis7VmFxWlgPF7ncGNf/P +5O4/E2Hu29othfDNrp2yGAlFw5Khchf8R7agCyzxxN5DaAhqXzvwdmP7zAYspsbi +DrW5viSP +-----END CERTIFICATE----- + +# Issuer: CN=Hellenic Academic and Research Institutions RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Subject: CN=Hellenic Academic and Research Institutions RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Label: "Hellenic Academic and Research Institutions RootCA 2015" +# Serial: 0 +# MD5 Fingerprint: ca:ff:e2:db:03:d9:cb:4b:e9:0f:ad:84:fd:7b:18:ce +# SHA1 Fingerprint: 01:0c:06:95:a6:98:19:14:ff:bf:5f:c6:b0:b6:95:ea:29:e9:12:a6 +# SHA256 Fingerprint: a0:40:92:9a:02:ce:53:b4:ac:f4:f2:ff:c6:98:1c:e4:49:6f:75:5e:6d:45:fe:0b:2a:69:2b:cd:52:52:3f:36 +-----BEGIN CERTIFICATE----- +MIIGCzCCA/OgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBpjELMAkGA1UEBhMCR1Ix +DzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5k +IFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxQDA+BgNVBAMT +N0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgUm9v +dENBIDIwMTUwHhcNMTUwNzA3MTAxMTIxWhcNNDAwNjMwMTAxMTIxWjCBpjELMAkG +A1UEBhMCR1IxDzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNh +ZGVtaWMgYW5kIFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkx +QDA+BgNVBAMTN0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1 +dGlvbnMgUm9vdENBIDIwMTUwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQDC+Kk/G4n8PDwEXT2QNrCROnk8ZlrvbTkBSRq0t89/TSNTt5AA4xMqKKYx8ZEA +4yjsriFBzh/a/X0SWwGDD7mwX5nh8hKDgE0GPt+sr+ehiGsxr/CL0BgzuNtFajT0 +AoAkKAoCFZVedioNmToUW/bLy1O8E00BiDeUJRtCvCLYjqOWXjrZMts+6PAQZe10 +4S+nfK8nNLspfZu2zwnI5dMK/IhlZXQK3HMcXM1AsRzUtoSMTFDPaI6oWa7CJ06C +ojXdFPQf/7J31Ycvqm59JCfnxssm5uX+Zwdj2EUN3TpZZTlYepKZcj2chF6IIbjV +9Cz82XBST3i4vTwri5WY9bPRaM8gFH5MXF/ni+X1NYEZN9cRCLdmvtNKzoNXADrD +gfgXy5I2XdGj2HUb4Ysn6npIQf1FGQatJ5lOwXBH3bWfgVMS5bGMSF0xQxfjjMZ6 +Y5ZLKTBOhE5iGV48zpeQpX8B653g+IuJ3SWYPZK2fu/Z8VFRfS0myGlZYeCsargq +NhEEelC9MoS+L9xy1dcdFkfkR2YgP/SWxa+OAXqlD3pk9Q0Yh9muiNX6hME6wGko +LfINaFGq46V3xqSQDqE3izEjR8EJCOtu93ib14L8hCCZSRm2Ekax+0VVFqmjZayc +Bw/qa9wfLgZy7IaIEuQt218FL+TwA9MmM+eAws1CoRc0CwIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUcRVnyMjJvXVd +ctA4GGqd83EkVAswDQYJKoZIhvcNAQELBQADggIBAHW7bVRLqhBYRjTyYtcWNl0I +XtVsyIe9tC5G8jH4fOpCtZMWVdyhDBKg2mF+D1hYc2Ryx+hFjtyp8iY/xnmMsVMI +M4GwVhO+5lFc2JsKT0ucVlMC6U/2DWDqTUJV6HwbISHTGzrMd/K4kPFox/la/vot +9L/J9UUbzjgQKjeKeaO04wlshYaT/4mWJ3iBj2fjRnRUjtkNaeJK9E10A/+yd+2V +Z5fkscWrv2oj6NSU4kQoYsRL4vDY4ilrGnB+JGGTe08DMiUNRSQrlrRGar9KC/ea +j8GsGsVn82800vpzY4zvFrCopEYq+OsS7HK07/grfoxSwIuEVPkvPuNVqNxmsdnh +X9izjFk0WaSrT2y7HxjbdavYy5LNlDhhDgcGH0tGEPEVvo2FXDtKK4F5D7Rpn0lQ +l033DlZdwJVqwjbDG2jJ9SrcR5q+ss7FJej6A7na+RZukYT1HCjI/CbM1xyQVqdf +bzoEvM14iQuODy+jqk+iGxI9FghAD/FGTNeqewjBCvVtJ94Cj8rDtSvK6evIIVM4 +pcw72Hc3MKJP2W/R8kCtQXoXxdZKNYm3QdV8hn9VTYNKpXMgwDqvkPGaJI7ZjnHK +e7iG2rKPmT4dEw0SEe7Uq/DpFXYC5ODfqiAeW2GFZECpkJcNrVPSWh2HagCXZWK0 +vm9qp/UsQu0yrbYhnr68 +-----END CERTIFICATE----- + +# Issuer: CN=Hellenic Academic and Research Institutions ECC RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Subject: CN=Hellenic Academic and Research Institutions ECC RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Label: "Hellenic Academic and Research Institutions ECC RootCA 2015" +# Serial: 0 +# MD5 Fingerprint: 81:e5:b4:17:eb:c2:f5:e1:4b:0d:41:7b:49:92:fe:ef +# SHA1 Fingerprint: 9f:f1:71:8d:92:d5:9a:f3:7d:74:97:b4:bc:6f:84:68:0b:ba:b6:66 +# SHA256 Fingerprint: 44:b5:45:aa:8a:25:e6:5a:73:ca:15:dc:27:fc:36:d2:4c:1c:b9:95:3a:06:65:39:b1:15:82:dc:48:7b:48:33 +-----BEGIN CERTIFICATE----- +MIICwzCCAkqgAwIBAgIBADAKBggqhkjOPQQDAjCBqjELMAkGA1UEBhMCR1IxDzAN +BgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl +c2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxRDBCBgNVBAMTO0hl +bGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgRUNDIFJv +b3RDQSAyMDE1MB4XDTE1MDcwNzEwMzcxMloXDTQwMDYzMDEwMzcxMlowgaoxCzAJ +BgNVBAYTAkdSMQ8wDQYDVQQHEwZBdGhlbnMxRDBCBgNVBAoTO0hlbGxlbmljIEFj +YWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ2VydC4gQXV0aG9yaXR5 +MUQwQgYDVQQDEztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0 +dXRpb25zIEVDQyBSb290Q0EgMjAxNTB2MBAGByqGSM49AgEGBSuBBAAiA2IABJKg +QehLgoRc4vgxEZmGZE4JJS+dQS8KrjVPdJWyUWRrjWvmP3CV8AVER6ZyOFB2lQJa +jq4onvktTpnvLEhvTCUp6NFxW98dwXU3tNf6e3pCnGoKVlp8aQuqgAkkbH7BRqNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFLQi +C4KZJAEOnLvkDv2/+5cgk5kqMAoGCCqGSM49BAMCA2cAMGQCMGfOFmI4oqxiRaep +lSTAGiecMjvAwNW6qef4BENThe5SId6d9SWDPp5YSy/XZxMOIQIwBeF1Ad5o7Sof +TUwJCA3sS61kFyjndc5FZXIhF8siQQ6ME5g4mlRtm8rifOoCWCKR +-----END CERTIFICATE----- + +# Issuer: CN=ISRG Root X1 O=Internet Security Research Group +# Subject: CN=ISRG Root X1 O=Internet Security Research Group +# Label: "ISRG Root X1" +# Serial: 172886928669790476064670243504169061120 +# MD5 Fingerprint: 0c:d2:f9:e0:da:17:73:e9:ed:86:4d:a5:e3:70:e7:4e +# SHA1 Fingerprint: ca:bd:2a:79:a1:07:6a:31:f2:1d:25:36:35:cb:03:9d:43:29:a5:e8 +# SHA256 Fingerprint: 96:bc:ec:06:26:49:76:f3:74:60:77:9a:cf:28:c5:a7:cf:e8:a3:c0:aa:e1:1a:8f:fc:ee:05:c0:bd:df:08:c6 +-----BEGIN CERTIFICATE----- +MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw +TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh +cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4 +WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu +ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY +MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc +h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+ +0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U +A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW +T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH +B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC +B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv +KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn +OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn +jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw +qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI +rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq +hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL +ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ +3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK +NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5 +ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur +TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC +jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc +oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq +4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA +mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d +emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc= +-----END CERTIFICATE----- + +# Issuer: O=FNMT-RCM OU=AC RAIZ FNMT-RCM +# Subject: O=FNMT-RCM OU=AC RAIZ FNMT-RCM +# Label: "AC RAIZ FNMT-RCM" +# Serial: 485876308206448804701554682760554759 +# MD5 Fingerprint: e2:09:04:b4:d3:bd:d1:a0:14:fd:1a:d2:47:c4:57:1d +# SHA1 Fingerprint: ec:50:35:07:b2:15:c4:95:62:19:e2:a8:9a:5b:42:99:2c:4c:2c:20 +# SHA256 Fingerprint: eb:c5:57:0c:29:01:8c:4d:67:b1:aa:12:7b:af:12:f7:03:b4:61:1e:bc:17:b7:da:b5:57:38:94:17:9b:93:fa +-----BEGIN CERTIFICATE----- +MIIFgzCCA2ugAwIBAgIPXZONMGc2yAYdGsdUhGkHMA0GCSqGSIb3DQEBCwUAMDsx +CzAJBgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJ +WiBGTk1ULVJDTTAeFw0wODEwMjkxNTU5NTZaFw0zMDAxMDEwMDAwMDBaMDsxCzAJ +BgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJWiBG +Tk1ULVJDTTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALpxgHpMhm5/ +yBNtwMZ9HACXjywMI7sQmkCpGreHiPibVmr75nuOi5KOpyVdWRHbNi63URcfqQgf +BBckWKo3Shjf5TnUV/3XwSyRAZHiItQDwFj8d0fsjz50Q7qsNI1NOHZnjrDIbzAz +WHFctPVrbtQBULgTfmxKo0nRIBnuvMApGGWn3v7v3QqQIecaZ5JCEJhfTzC8PhxF +tBDXaEAUwED653cXeuYLj2VbPNmaUtu1vZ5Gzz3rkQUCwJaydkxNEJY7kvqcfw+Z +374jNUUeAlz+taibmSXaXvMiwzn15Cou08YfxGyqxRxqAQVKL9LFwag0Jl1mpdIC +IfkYtwb1TplvqKtMUejPUBjFd8g5CSxJkjKZqLsXF3mwWsXmo8RZZUc1g16p6DUL +mbvkzSDGm0oGObVo/CK67lWMK07q87Hj/LaZmtVC+nFNCM+HHmpxffnTtOmlcYF7 +wk5HlqX2doWjKI/pgG6BU6VtX7hI+cL5NqYuSf+4lsKMB7ObiFj86xsc3i1w4peS +MKGJ47xVqCfWS+2QrYv6YyVZLag13cqXM7zlzced0ezvXg5KkAYmY6252TUtB7p2 +ZSysV4999AeU14ECll2jB0nVetBX+RvnU0Z1qrB5QstocQjpYL05ac70r8NWQMet +UqIJ5G+GR4of6ygnXYMgrwTJbFaai0b1AgMBAAGjgYMwgYAwDwYDVR0TAQH/BAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFPd9xf3E6Jobd2Sn9R2gzL+H +YJptMD4GA1UdIAQ3MDUwMwYEVR0gADArMCkGCCsGAQUFBwIBFh1odHRwOi8vd3d3 +LmNlcnQuZm5tdC5lcy9kcGNzLzANBgkqhkiG9w0BAQsFAAOCAgEAB5BK3/MjTvDD +nFFlm5wioooMhfNzKWtN/gHiqQxjAb8EZ6WdmF/9ARP67Jpi6Yb+tmLSbkyU+8B1 +RXxlDPiyN8+sD8+Nb/kZ94/sHvJwnvDKuO+3/3Y3dlv2bojzr2IyIpMNOmqOFGYM +LVN0V2Ue1bLdI4E7pWYjJ2cJj+F3qkPNZVEI7VFY/uY5+ctHhKQV8Xa7pO6kO8Rf +77IzlhEYt8llvhjho6Tc+hj507wTmzl6NLrTQfv6MooqtyuGC2mDOL7Nii4LcK2N +JpLuHvUBKwrZ1pebbuCoGRw6IYsMHkCtA+fdZn71uSANA+iW+YJF1DngoABd15jm +fZ5nc8OaKveri6E6FO80vFIOiZiaBECEHX5FaZNXzuvO+FB8TxxuBEOb+dY7Ixjp +6o7RTUaN8Tvkasq6+yO3m/qZASlaWFot4/nUbQ4mrcFuNLwy+AwF+mWj2zs3gyLp +1txyM/1d8iC9djwj2ij3+RvrWWTV3F9yfiD8zYm1kGdNYno/Tq0dwzn+evQoFt9B +9kiABdcPUXmsEKvU7ANm5mqwujGSQkBqvjrTcuFqN1W8rB2Vt2lh8kORdOag0wok +RqEIr9baRRmW1FMdW4R58MD3R++Lj8UGrp1MYp3/RgT408m2ECVAdf4WqslKYIYv +uu8wd+RU4riEmViAqhOLUTpPSPaLtrM= +-----END CERTIFICATE----- + +# Issuer: CN=Amazon Root CA 1 O=Amazon +# Subject: CN=Amazon Root CA 1 O=Amazon +# Label: "Amazon Root CA 1" +# Serial: 143266978916655856878034712317230054538369994 +# MD5 Fingerprint: 43:c6:bf:ae:ec:fe:ad:2f:18:c6:88:68:30:fc:c8:e6 +# SHA1 Fingerprint: 8d:a7:f9:65:ec:5e:fc:37:91:0f:1c:6e:59:fd:c1:cc:6a:6e:de:16 +# SHA256 Fingerprint: 8e:cd:e6:88:4f:3d:87:b1:12:5b:a3:1a:c3:fc:b1:3d:70:16:de:7f:57:cc:90:4f:e1:cb:97:c6:ae:98:19:6e +-----BEGIN CERTIFICATE----- +MIIDQTCCAimgAwIBAgITBmyfz5m/jAo54vB4ikPmljZbyjANBgkqhkiG9w0BAQsF +ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6 +b24gUm9vdCBDQSAxMB4XDTE1MDUyNjAwMDAwMFoXDTM4MDExNzAwMDAwMFowOTEL +MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv +b3QgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALJ4gHHKeNXj +ca9HgFB0fW7Y14h29Jlo91ghYPl0hAEvrAIthtOgQ3pOsqTQNroBvo3bSMgHFzZM +9O6II8c+6zf1tRn4SWiw3te5djgdYZ6k/oI2peVKVuRF4fn9tBb6dNqcmzU5L/qw +IFAGbHrQgLKm+a/sRxmPUDgH3KKHOVj4utWp+UhnMJbulHheb4mjUcAwhmahRWa6 +VOujw5H5SNz/0egwLX0tdHA114gk957EWW67c4cX8jJGKLhD+rcdqsq08p8kDi1L +93FcXmn/6pUCyziKrlA4b9v7LWIbxcceVOF34GfID5yHI9Y/QCB/IIDEgEw+OyQm +jgSubJrIqg0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AYYwHQYDVR0OBBYEFIQYzIU07LwMlJQuCFmcx7IQTgoIMA0GCSqGSIb3DQEBCwUA +A4IBAQCY8jdaQZChGsV2USggNiMOruYou6r4lK5IpDB/G/wkjUu0yKGX9rbxenDI +U5PMCCjjmCXPI6T53iHTfIUJrU6adTrCC2qJeHZERxhlbI1Bjjt/msv0tadQ1wUs +N+gDS63pYaACbvXy8MWy7Vu33PqUXHeeE6V/Uq2V8viTO96LXFvKWlJbYK8U90vv +o/ufQJVtMVT8QtPHRh8jrdkPSHCa2XV4cdFyQzR1bldZwgJcJmApzyMZFo6IQ6XU +5MsI+yMRQ+hDKXJioaldXgjUkK642M4UwtBV8ob2xJNDd2ZhwLnoQdeXeGADbkpy +rqXRfboQnoZsG4q5WTP468SQvvG5 +-----END CERTIFICATE----- + +# Issuer: CN=Amazon Root CA 2 O=Amazon +# Subject: CN=Amazon Root CA 2 O=Amazon +# Label: "Amazon Root CA 2" +# Serial: 143266982885963551818349160658925006970653239 +# MD5 Fingerprint: c8:e5:8d:ce:a8:42:e2:7a:c0:2a:5c:7c:9e:26:bf:66 +# SHA1 Fingerprint: 5a:8c:ef:45:d7:a6:98:59:76:7a:8c:8b:44:96:b5:78:cf:47:4b:1a +# SHA256 Fingerprint: 1b:a5:b2:aa:8c:65:40:1a:82:96:01:18:f8:0b:ec:4f:62:30:4d:83:ce:c4:71:3a:19:c3:9c:01:1e:a4:6d:b4 +-----BEGIN CERTIFICATE----- +MIIFQTCCAymgAwIBAgITBmyf0pY1hp8KD+WGePhbJruKNzANBgkqhkiG9w0BAQwF +ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6 +b24gUm9vdCBDQSAyMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTEL +MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv +b3QgQ0EgMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK2Wny2cSkxK +gXlRmeyKy2tgURO8TW0G/LAIjd0ZEGrHJgw12MBvIITplLGbhQPDW9tK6Mj4kHbZ +W0/jTOgGNk3Mmqw9DJArktQGGWCsN0R5hYGCrVo34A3MnaZMUnbqQ523BNFQ9lXg +1dKmSYXpN+nKfq5clU1Imj+uIFptiJXZNLhSGkOQsL9sBbm2eLfq0OQ6PBJTYv9K +8nu+NQWpEjTj82R0Yiw9AElaKP4yRLuH3WUnAnE72kr3H9rN9yFVkE8P7K6C4Z9r +2UXTu/Bfh+08LDmG2j/e7HJV63mjrdvdfLC6HM783k81ds8P+HgfajZRRidhW+me +z/CiVX18JYpvL7TFz4QuK/0NURBs+18bvBt+xa47mAExkv8LV/SasrlX6avvDXbR +8O70zoan4G7ptGmh32n2M8ZpLpcTnqWHsFcQgTfJU7O7f/aS0ZzQGPSSbtqDT6Zj +mUyl+17vIWR6IF9sZIUVyzfpYgwLKhbcAS4y2j5L9Z469hdAlO+ekQiG+r5jqFoz +7Mt0Q5X5bGlSNscpb/xVA1wf+5+9R+vnSUeVC06JIglJ4PVhHvG/LopyboBZ/1c6 ++XUyo05f7O0oYtlNc/LMgRdg7c3r3NunysV+Ar3yVAhU/bQtCSwXVEqY0VThUWcI +0u1ufm8/0i2BWSlmy5A5lREedCf+3euvAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMB +Af8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSwDPBMMPQFWAJI/TPlUq9LhONm +UjANBgkqhkiG9w0BAQwFAAOCAgEAqqiAjw54o+Ci1M3m9Zh6O+oAA7CXDpO8Wqj2 +LIxyh6mx/H9z/WNxeKWHWc8w4Q0QshNabYL1auaAn6AFC2jkR2vHat+2/XcycuUY ++gn0oJMsXdKMdYV2ZZAMA3m3MSNjrXiDCYZohMr/+c8mmpJ5581LxedhpxfL86kS +k5Nrp+gvU5LEYFiwzAJRGFuFjWJZY7attN6a+yb3ACfAXVU3dJnJUH/jWS5E4ywl +7uxMMne0nxrpS10gxdr9HIcWxkPo1LsmmkVwXqkLN1PiRnsn/eBG8om3zEK2yygm +btmlyTrIQRNg91CMFa6ybRoVGld45pIq2WWQgj9sAq+uEjonljYE1x2igGOpm/Hl +urR8FLBOybEfdF849lHqm/osohHUqS0nGkWxr7JOcQ3AWEbWaQbLU8uz/mtBzUF+ +fUwPfHJ5elnNXkoOrJupmHN5fLT0zLm4BwyydFy4x2+IoZCn9Kr5v2c69BoVYh63 +n749sSmvZ6ES8lgQGVMDMBu4Gon2nL2XA46jCfMdiyHxtN/kHNGfZQIG6lzWE7OE +76KlXIx3KadowGuuQNKotOrN8I1LOJwZmhsoVLiJkO/KdYE+HvJkJMcYr07/R54H +9jVlpNMKVv/1F2Rs76giJUmTtt8AF9pYfl3uxRuw0dFfIRDH+fO6AgonB8Xx1sfT +4PsJYGw= +-----END CERTIFICATE----- + +# Issuer: CN=Amazon Root CA 3 O=Amazon +# Subject: CN=Amazon Root CA 3 O=Amazon +# Label: "Amazon Root CA 3" +# Serial: 143266986699090766294700635381230934788665930 +# MD5 Fingerprint: a0:d4:ef:0b:f7:b5:d8:49:95:2a:ec:f5:c4:fc:81:87 +# SHA1 Fingerprint: 0d:44:dd:8c:3c:8c:1a:1a:58:75:64:81:e9:0f:2e:2a:ff:b3:d2:6e +# SHA256 Fingerprint: 18:ce:6c:fe:7b:f1:4e:60:b2:e3:47:b8:df:e8:68:cb:31:d0:2e:bb:3a:da:27:15:69:f5:03:43:b4:6d:b3:a4 +-----BEGIN CERTIFICATE----- +MIIBtjCCAVugAwIBAgITBmyf1XSXNmY/Owua2eiedgPySjAKBggqhkjOPQQDAjA5 +MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g +Um9vdCBDQSAzMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG +A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg +Q0EgMzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABCmXp8ZBf8ANm+gBG1bG8lKl +ui2yEujSLtf6ycXYqm0fc4E7O5hrOXwzpcVOho6AF2hiRVd9RFgdszflZwjrZt6j +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSr +ttvXBp43rDCGB5Fwx5zEGbF4wDAKBggqhkjOPQQDAgNJADBGAiEA4IWSoxe3jfkr +BqWTrBqYaGFy+uGh0PsceGCmQ5nFuMQCIQCcAu/xlJyzlvnrxir4tiz+OpAUFteM +YyRIHN8wfdVoOw== +-----END CERTIFICATE----- + +# Issuer: CN=Amazon Root CA 4 O=Amazon +# Subject: CN=Amazon Root CA 4 O=Amazon +# Label: "Amazon Root CA 4" +# Serial: 143266989758080763974105200630763877849284878 +# MD5 Fingerprint: 89:bc:27:d5:eb:17:8d:06:6a:69:d5:fd:89:47:b4:cd +# SHA1 Fingerprint: f6:10:84:07:d6:f8:bb:67:98:0c:c2:e2:44:c2:eb:ae:1c:ef:63:be +# SHA256 Fingerprint: e3:5d:28:41:9e:d0:20:25:cf:a6:90:38:cd:62:39:62:45:8d:a5:c6:95:fb:de:a3:c2:2b:0b:fb:25:89:70:92 +-----BEGIN CERTIFICATE----- +MIIB8jCCAXigAwIBAgITBmyf18G7EEwpQ+Vxe3ssyBrBDjAKBggqhkjOPQQDAzA5 +MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g +Um9vdCBDQSA0MB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG +A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg +Q0EgNDB2MBAGByqGSM49AgEGBSuBBAAiA2IABNKrijdPo1MN/sGKe0uoe0ZLY7Bi +9i0b2whxIdIA6GO9mif78DluXeo9pcmBqqNbIJhFXRbb/egQbeOc4OO9X4Ri83Bk +M6DLJC9wuoihKqB1+IGuYgbEgds5bimwHvouXKNCMEAwDwYDVR0TAQH/BAUwAwEB +/zAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFNPsxzplbszh2naaVvuc84ZtV+WB +MAoGCCqGSM49BAMDA2gAMGUCMDqLIfG9fhGt0O9Yli/W651+kI0rz2ZVwyzjKKlw +CkcO8DdZEv8tmZQoTipPNU0zWgIxAOp1AE47xDqUEpHJWEadIRNyp4iciuRMStuW +1KyLa2tJElMzrdfkviT8tQp21KW8EA== +-----END CERTIFICATE----- + +# Issuer: CN=LuxTrust Global Root 2 O=LuxTrust S.A. +# Subject: CN=LuxTrust Global Root 2 O=LuxTrust S.A. +# Label: "LuxTrust Global Root 2" +# Serial: 59914338225734147123941058376788110305822489521 +# MD5 Fingerprint: b2:e1:09:00:61:af:f7:f1:91:6f:c4:ad:8d:5e:3b:7c +# SHA1 Fingerprint: 1e:0e:56:19:0a:d1:8b:25:98:b2:04:44:ff:66:8a:04:17:99:5f:3f +# SHA256 Fingerprint: 54:45:5f:71:29:c2:0b:14:47:c4:18:f9:97:16:8f:24:c5:8f:c5:02:3b:f5:da:5b:e2:eb:6e:1d:d8:90:2e:d5 +-----BEGIN CERTIFICATE----- +MIIFwzCCA6ugAwIBAgIUCn6m30tEntpqJIWe5rgV0xZ/u7EwDQYJKoZIhvcNAQEL +BQAwRjELMAkGA1UEBhMCTFUxFjAUBgNVBAoMDUx1eFRydXN0IFMuQS4xHzAdBgNV +BAMMFkx1eFRydXN0IEdsb2JhbCBSb290IDIwHhcNMTUwMzA1MTMyMTU3WhcNMzUw +MzA1MTMyMTU3WjBGMQswCQYDVQQGEwJMVTEWMBQGA1UECgwNTHV4VHJ1c3QgUy5B +LjEfMB0GA1UEAwwWTHV4VHJ1c3QgR2xvYmFsIFJvb3QgMjCCAiIwDQYJKoZIhvcN +AQEBBQADggIPADCCAgoCggIBANeFl78RmOnwYoNMPIf5U2o3C/IPPIfOb9wmKb3F +ibrJgz337spbxm1Jc7TJRqMbNBM/wYlFV/TZsfs2ZUv7COJIcRHIbjuend+JZTem +hfY7RBi2xjcwYkSSl2l9QjAk5A0MiWtj3sXh306pFGxT4GHO9hcvHTy95iJMHZP1 +EMShduxq3sVs35a0VkBCwGKSMKEtFZSg0iAGCW5qbeXrt77U8PEVfIvmTroTzEsn +Xpk8F12PgX8zPU/TPxvsXD/wPEx1bvKm1Z3aLQdjAsZy6ZS8TEmVT4hSyNvoaYL4 +zDRbIvCGp4m9SAptZoFtyMhk+wHh9OHe2Z7d21vUKpkmFRseTJIpgp7VkoGSQXAZ +96Tlk0u8d2cx3Rz9MXANF5kM+Qw5GSoXtTBxVdUPrljhPS80m8+f9niFwpN6cj5m +j5wWEWCPnolvZ77gR1o7DJpni89Gxq44o/KnvObWhWszJHAiS8sIm7vI+AIpHb4g +DEa/a4ebsypmQjVGbKq6rfmYe+lQVRQxv7HaLe2ArWgk+2mr2HETMOZns4dA/Yl+ +8kPREd8vZS9kzl8UubG/Mb2HeFpZZYiq/FkySIbWTLkpS5XTdvN3JW1CHDiDTf2j +X5t/Lax5Gw5CMZdjpPuKadUiDTSQMC6otOBttpSsvItO13D8xTiOZCXhTTmQzsmH +hFhxAgMBAAGjgagwgaUwDwYDVR0TAQH/BAUwAwEB/zBCBgNVHSAEOzA5MDcGByuB +KwEBAQowLDAqBggrBgEFBQcCARYeaHR0cHM6Ly9yZXBvc2l0b3J5Lmx1eHRydXN0 +Lmx1MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBT/GCh2+UgFLKGu8SsbK7JT ++Et8szAdBgNVHQ4EFgQU/xgodvlIBSyhrvErGyuyU/hLfLMwDQYJKoZIhvcNAQEL +BQADggIBAGoZFO1uecEsh9QNcH7X9njJCwROxLHOk3D+sFTAMs2ZMGQXvw/l4jP9 +BzZAcg4atmpZ1gDlaCDdLnINH2pkMSCEfUmmWjfrRcmF9dTHF5kH5ptV5AzoqbTO +jFu1EVzPig4N1qx3gf4ynCSecs5U89BvolbW7MM3LGVYvlcAGvI1+ut7MV3CwRI9 +loGIlonBWVx65n9wNOeD4rHh4bhY79SV5GCc8JaXcozrhAIuZY+kt9J/Z93I055c +qqmkoCUUBpvsT34tC38ddfEz2O3OuHVtPlu5mB0xDVbYQw8wkbIEa91WvpWAVWe+ +2M2D2RjuLg+GLZKecBPs3lHJQ3gCpU3I+V/EkVhGFndadKpAvAefMLmx9xIX3eP/ +JEAdemrRTxgKqpAd60Ae36EeRJIQmvKN4dFLRp7oRUKX6kWZ8+xm1QL68qZKJKre +zrnK+T+Tb/mjuuqlPpmt/f97mfVl7vBZKGfXkJWkE4SphMHozs51k2MavDzq1WQf +LSoSOcbDWjLtR5EWDrw4wVDej8oqkDQc7kGUnF4ZLvhFSZl0kbAEb+MEWrGrKqv+ +x9CWttrhSmQGbmBNvUJO/3jaJMobtNeWOWyu8Q6qp31IiyBMz2TWuJdGsE7RKlY6 +oJO9r4Ak4Ap+58rVyuiFVdw2KuGUaJPHZnJED4AhMmwlxyOAgwrr +-----END CERTIFICATE----- + +# Issuer: CN=TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 O=Turkiye Bilimsel ve Teknolojik Arastirma Kurumu - TUBITAK OU=Kamu Sertifikasyon Merkezi - Kamu SM +# Subject: CN=TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 O=Turkiye Bilimsel ve Teknolojik Arastirma Kurumu - TUBITAK OU=Kamu Sertifikasyon Merkezi - Kamu SM +# Label: "TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1" +# Serial: 1 +# MD5 Fingerprint: dc:00:81:dc:69:2f:3e:2f:b0:3b:f6:3d:5a:91:8e:49 +# SHA1 Fingerprint: 31:43:64:9b:ec:ce:27:ec:ed:3a:3f:0b:8f:0d:e4:e8:91:dd:ee:ca +# SHA256 Fingerprint: 46:ed:c3:68:90:46:d5:3a:45:3f:b3:10:4a:b8:0d:ca:ec:65:8b:26:60:ea:16:29:dd:7e:86:79:90:64:87:16 +-----BEGIN CERTIFICATE----- +MIIEYzCCA0ugAwIBAgIBATANBgkqhkiG9w0BAQsFADCB0jELMAkGA1UEBhMCVFIx +GDAWBgNVBAcTD0dlYnplIC0gS29jYWVsaTFCMEAGA1UEChM5VHVya2l5ZSBCaWxp +bXNlbCB2ZSBUZWtub2xvamlrIEFyYXN0aXJtYSBLdXJ1bXUgLSBUVUJJVEFLMS0w +KwYDVQQLEyRLYW11IFNlcnRpZmlrYXN5b24gTWVya2V6aSAtIEthbXUgU00xNjA0 +BgNVBAMTLVRVQklUQUsgS2FtdSBTTSBTU0wgS29rIFNlcnRpZmlrYXNpIC0gU3Vy +dW0gMTAeFw0xMzExMjUwODI1NTVaFw00MzEwMjUwODI1NTVaMIHSMQswCQYDVQQG +EwJUUjEYMBYGA1UEBxMPR2ViemUgLSBLb2NhZWxpMUIwQAYDVQQKEzlUdXJraXll +IEJpbGltc2VsIHZlIFRla25vbG9qaWsgQXJhc3Rpcm1hIEt1cnVtdSAtIFRVQklU +QUsxLTArBgNVBAsTJEthbXUgU2VydGlmaWthc3lvbiBNZXJrZXppIC0gS2FtdSBT +TTE2MDQGA1UEAxMtVFVCSVRBSyBLYW11IFNNIFNTTCBLb2sgU2VydGlmaWthc2kg +LSBTdXJ1bSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAr3UwM6q7 +a9OZLBI3hNmNe5eA027n/5tQlT6QlVZC1xl8JoSNkvoBHToP4mQ4t4y86Ij5iySr +LqP1N+RAjhgleYN1Hzv/bKjFxlb4tO2KRKOrbEz8HdDc72i9z+SqzvBV96I01INr +N3wcwv61A+xXzry0tcXtAA9TNypN9E8Mg/uGz8v+jE69h/mniyFXnHrfA2eJLJ2X +YacQuFWQfw4tJzh03+f92k4S400VIgLI4OD8D62K18lUUMw7D8oWgITQUVbDjlZ/ +iSIzL+aFCr2lqBs23tPcLG07xxO9WSMs5uWk99gL7eqQQESolbuT1dCANLZGeA4f +AJNG4e7p+exPFwIDAQABo0IwQDAdBgNVHQ4EFgQUZT/HiobGPN08VFw1+DrtUgxH +V8gwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL +BQADggEBACo/4fEyjq7hmFxLXs9rHmoJ0iKpEsdeV31zVmSAhHqT5Am5EM2fKifh +AHe+SMg1qIGf5LgsyX8OsNJLN13qudULXjS99HMpw+0mFZx+CFOKWI3QSyjfwbPf +IPP54+M638yclNhOT8NrF7f3cuitZjO1JVOr4PhMqZ398g26rrnZqsZr+ZO7rqu4 +lzwDGrpDxpa5RXI4s6ehlj2Re37AIVNMh+3yC1SVUZPVIqUNivGTDj5UDrDYyU7c +8jEyVupk+eq1nRZmQnLzf9OxMUP8pI4X8W0jq5Rm+K37DwhuJi1/FwcJsoz7UMCf +lo3Ptv0AnVoUmr8CRPXBwp8iXqIPoeM= +-----END CERTIFICATE----- + +# Issuer: CN=GDCA TrustAUTH R5 ROOT O=GUANG DONG CERTIFICATE AUTHORITY CO.,LTD. +# Subject: CN=GDCA TrustAUTH R5 ROOT O=GUANG DONG CERTIFICATE AUTHORITY CO.,LTD. +# Label: "GDCA TrustAUTH R5 ROOT" +# Serial: 9009899650740120186 +# MD5 Fingerprint: 63:cc:d9:3d:34:35:5c:6f:53:a3:e2:08:70:48:1f:b4 +# SHA1 Fingerprint: 0f:36:38:5b:81:1a:25:c3:9b:31:4e:83:ca:e9:34:66:70:cc:74:b4 +# SHA256 Fingerprint: bf:ff:8f:d0:44:33:48:7d:6a:8a:a6:0c:1a:29:76:7a:9f:c2:bb:b0:5e:42:0f:71:3a:13:b9:92:89:1d:38:93 +-----BEGIN CERTIFICATE----- +MIIFiDCCA3CgAwIBAgIIfQmX/vBH6nowDQYJKoZIhvcNAQELBQAwYjELMAkGA1UE +BhMCQ04xMjAwBgNVBAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZ +IENPLixMVEQuMR8wHQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMB4XDTE0 +MTEyNjA1MTMxNVoXDTQwMTIzMTE1NTk1OVowYjELMAkGA1UEBhMCQ04xMjAwBgNV +BAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZIENPLixMVEQuMR8w +HQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEA2aMW8Mh0dHeb7zMNOwZ+Vfy1YI92hhJCfVZmPoiC7XJj +Dp6L3TQsAlFRwxn9WVSEyfFrs0yw6ehGXTjGoqcuEVe6ghWinI9tsJlKCvLriXBj +TnnEt1u9ol2x8kECK62pOqPseQrsXzrj/e+APK00mxqriCZ7VqKChh/rNYmDf1+u +KU49tm7srsHwJ5uu4/Ts765/94Y9cnrrpftZTqfrlYwiOXnhLQiPzLyRuEH3FMEj +qcOtmkVEs7LXLM3GKeJQEK5cy4KOFxg2fZfmiJqwTTQJ9Cy5WmYqsBebnh52nUpm +MUHfP/vFBu8btn4aRjb3ZGM74zkYI+dndRTVdVeSN72+ahsmUPI2JgaQxXABZG12 +ZuGR224HwGGALrIuL4xwp9E7PLOR5G62xDtw8mySlwnNR30YwPO7ng/Wi64HtloP +zgsMR6flPri9fcebNaBhlzpBdRfMK5Z3KpIhHtmVdiBnaM8Nvd/WHwlqmuLMc3Gk +L30SgLdTMEZeS1SZD2fJpcjyIMGC7J0R38IC+xo70e0gmu9lZJIQDSri3nDxGGeC +jGHeuLzRL5z7D9Ar7Rt2ueQ5Vfj4oR24qoAATILnsn8JuLwwoC8N9VKejveSswoA +HQBUlwbgsQfZxw9cZX08bVlX5O2ljelAU58VS6Bx9hoh49pwBiFYFIeFd3mqgnkC +AwEAAaNCMEAwHQYDVR0OBBYEFOLJQJ9NzuiaoXzPDj9lxSmIahlRMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQDRSVfg +p8xoWLoBDysZzY2wYUWsEe1jUGn4H3++Fo/9nesLqjJHdtJnJO29fDMylyrHBYZm +DRd9FBUb1Ov9H5r2XpdptxolpAqzkT9fNqyL7FeoPueBihhXOYV0GkLH6VsTX4/5 +COmSdI31R9KrO9b7eGZONn356ZLpBN79SWP8bfsUcZNnL0dKt7n/HipzcEYwv1ry +L3ml4Y0M2fmyYzeMN2WFcGpcWwlyua1jPLHd+PwyvzeG5LuOmCd+uh8W4XAR8gPf +JWIyJyYYMoSf/wA6E7qaTfRPuBRwIrHKK5DOKcFw9C+df/KQHtZa37dG/OaG+svg +IHZ6uqbL9XzeYqWxi+7egmaKTjowHz+Ay60nugxe19CxVsp3cbK1daFQqUBDF8Io +2c9Si1vIY9RCPqAzekYu9wogRlR+ak8x8YF+QnQ4ZXMn7sZ8uI7XpTrXmKGcjBBV +09tL7ECQ8s1uV9JiDnxXk7Gnbc2dg7sq5+W2O3FYrf3RRbxake5TFW/TRQl1brqQ +XR4EzzffHqhmsYzmIGrv/EhOdJhCrylvLmrH+33RZjEizIYAfmaDDEL0vTSSwxrq +T8p+ck0LcIymSLumoRT2+1hEmRSuqguTaaApJUqlyyvdimYHFngVV3Eb7PVHhPOe +MTd61X8kreS8/f3MboPoDKi3QWwH3b08hpcv0g== +-----END CERTIFICATE----- + +# Issuer: CN=TrustCor RootCert CA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Subject: CN=TrustCor RootCert CA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Label: "TrustCor RootCert CA-1" +# Serial: 15752444095811006489 +# MD5 Fingerprint: 6e:85:f1:dc:1a:00:d3:22:d5:b2:b2:ac:6b:37:05:45 +# SHA1 Fingerprint: ff:bd:cd:e7:82:c8:43:5e:3c:6f:26:86:5c:ca:a8:3a:45:5b:c3:0a +# SHA256 Fingerprint: d4:0e:9c:86:cd:8f:e4:68:c1:77:69:59:f4:9e:a7:74:fa:54:86:84:b6:c4:06:f3:90:92:61:f4:dc:e2:57:5c +-----BEGIN CERTIFICATE----- +MIIEMDCCAxigAwIBAgIJANqb7HHzA7AZMA0GCSqGSIb3DQEBCwUAMIGkMQswCQYD +VQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEgQ2l0eTEk +MCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5U +cnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxHzAdBgNVBAMMFlRydXN0Q29y +IFJvb3RDZXJ0IENBLTEwHhcNMTYwMjA0MTIzMjE2WhcNMjkxMjMxMTcyMzE2WjCB +pDELMAkGA1UEBhMCUEExDzANBgNVBAgMBlBhbmFtYTEUMBIGA1UEBwwLUGFuYW1h +IENpdHkxJDAiBgNVBAoMG1RydXN0Q29yIFN5c3RlbXMgUy4gZGUgUi5MLjEnMCUG +A1UECwweVHJ1c3RDb3IgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MR8wHQYDVQQDDBZU +cnVzdENvciBSb290Q2VydCBDQS0xMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAv463leLCJhJrMxnHQFgKq1mqjQCj/IDHUHuO1CAmujIS2CNUSSUQIpid +RtLByZ5OGy4sDjjzGiVoHKZaBeYei0i/mJZ0PmnK6bV4pQa81QBeCQryJ3pS/C3V +seq0iWEk8xoT26nPUu0MJLq5nux+AHT6k61sKZKuUbS701e/s/OojZz0JEsq1pme +9J7+wH5COucLlVPat2gOkEz7cD+PSiyU8ybdY2mplNgQTsVHCJCZGxdNuWxu72CV +EY4hgLW9oHPY0LJ3xEXqWib7ZnZ2+AYfYW0PVcWDtxBWcgYHpfOxGgMFZA6dWorW +hnAbJN7+KIor0Gqw/Hqi3LJ5DotlDwIDAQABo2MwYTAdBgNVHQ4EFgQU7mtJPHo/ +DeOxCbeKyKsZn3MzUOcwHwYDVR0jBBgwFoAU7mtJPHo/DeOxCbeKyKsZn3MzUOcw +DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQAD +ggEBACUY1JGPE+6PHh0RU9otRCkZoB5rMZ5NDp6tPVxBb5UrJKF5mDo4Nvu7Zp5I +/5CQ7z3UuJu0h3U/IJvOcs+hVcFNZKIZBqEHMwwLKeXx6quj7LUKdJDHfXLy11yf +ke+Ri7fc7Waiz45mO7yfOgLgJ90WmMCV1Aqk5IGadZQ1nJBfiDcGrVmVCrDRZ9MZ +yonnMlo2HD6CqFqTvsbQZJG2z9m2GM/bftJlo6bEjhcxwft+dtvTheNYsnd6djts +L1Ac59v2Z3kf9YKVmgenFK+P3CghZwnS1k1aHBkcjndcw5QkPTJrS37UeJSDvjdN +zl/HHk484IkzlQsPpTLWPFp5LBk= +-----END CERTIFICATE----- + +# Issuer: CN=TrustCor RootCert CA-2 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Subject: CN=TrustCor RootCert CA-2 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Label: "TrustCor RootCert CA-2" +# Serial: 2711694510199101698 +# MD5 Fingerprint: a2:e1:f8:18:0b:ba:45:d5:c7:41:2a:bb:37:52:45:64 +# SHA1 Fingerprint: b8:be:6d:cb:56:f1:55:b9:63:d4:12:ca:4e:06:34:c7:94:b2:1c:c0 +# SHA256 Fingerprint: 07:53:e9:40:37:8c:1b:d5:e3:83:6e:39:5d:ae:a5:cb:83:9e:50:46:f1:bd:0e:ae:19:51:cf:10:fe:c7:c9:65 +-----BEGIN CERTIFICATE----- +MIIGLzCCBBegAwIBAgIIJaHfyjPLWQIwDQYJKoZIhvcNAQELBQAwgaQxCzAJBgNV +BAYTAlBBMQ8wDQYDVQQIDAZQYW5hbWExFDASBgNVBAcMC1BhbmFtYSBDaXR5MSQw +IgYDVQQKDBtUcnVzdENvciBTeXN0ZW1zIFMuIGRlIFIuTC4xJzAlBgNVBAsMHlRy +dXN0Q29yIENlcnRpZmljYXRlIEF1dGhvcml0eTEfMB0GA1UEAwwWVHJ1c3RDb3Ig +Um9vdENlcnQgQ0EtMjAeFw0xNjAyMDQxMjMyMjNaFw0zNDEyMzExNzI2MzlaMIGk +MQswCQYDVQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEg +Q2l0eTEkMCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYD +VQQLDB5UcnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxHzAdBgNVBAMMFlRy +dXN0Q29yIFJvb3RDZXJ0IENBLTIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCnIG7CKqJiJJWQdsg4foDSq8GbZQWU9MEKENUCrO2fk8eHyLAnK0IMPQo+ +QVqedd2NyuCb7GgypGmSaIwLgQ5WoD4a3SwlFIIvl9NkRvRUqdw6VC0xK5mC8tkq +1+9xALgxpL56JAfDQiDyitSSBBtlVkxs1Pu2YVpHI7TYabS3OtB0PAx1oYxOdqHp +2yqlO/rOsP9+aij9JxzIsekp8VduZLTQwRVtDr4uDkbIXvRR/u8OYzo7cbrPb1nK +DOObXUm4TOJXsZiKQlecdu/vvdFoqNL0Cbt3Nb4lggjEFixEIFapRBF37120Hape +az6LMvYHL1cEksr1/p3C6eizjkxLAjHZ5DxIgif3GIJ2SDpxsROhOdUuxTTCHWKF +3wP+TfSvPd9cW436cOGlfifHhi5qjxLGhF5DUVCcGZt45vz27Ud+ez1m7xMTiF88 +oWP7+ayHNZ/zgp6kPwqcMWmLmaSISo5uZk3vFsQPeSghYA2FFn3XVDjxklb9tTNM +g9zXEJ9L/cb4Qr26fHMC4P99zVvh1Kxhe1fVSntb1IVYJ12/+CtgrKAmrhQhJ8Z3 +mjOAPF5GP/fDsaOGM8boXg25NSyqRsGFAnWAoOsk+xWq5Gd/bnc/9ASKL3x74xdh +8N0JqSDIvgmk0H5Ew7IwSjiqqewYmgeCK9u4nBit2uBGF6zPXQIDAQABo2MwYTAd +BgNVHQ4EFgQU2f4hQG6UnrybPZx9mCAZ5YwwYrIwHwYDVR0jBBgwFoAU2f4hQG6U +nrybPZx9mCAZ5YwwYrIwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYw +DQYJKoZIhvcNAQELBQADggIBAJ5Fngw7tu/hOsh80QA9z+LqBrWyOrsGS2h60COX +dKcs8AjYeVrXWoSK2BKaG9l9XE1wxaX5q+WjiYndAfrs3fnpkpfbsEZC89NiqpX+ +MWcUaViQCqoL7jcjx1BRtPV+nuN79+TMQjItSQzL/0kMmx40/W5ulop5A7Zv2wnL +/V9lFDfhOPXzYRZY5LVtDQsEGz9QLX+zx3oaFoBg+Iof6Rsqxvm6ARppv9JYx1RX +CI/hOWB3S6xZhBqI8d3LT3jX5+EzLfzuQfogsL7L9ziUwOHQhQ+77Sxzq+3+knYa +ZH9bDTMJBzN7Bj8RpFxwPIXAz+OQqIN3+tvmxYxoZxBnpVIt8MSZj3+/0WvitUfW +2dCFmU2Umw9Lje4AWkcdEQOsQRivh7dvDDqPys/cA8GiCcjl/YBeyGBCARsaU1q7 +N6a3vLqE6R5sGtRk2tRD/pOLS/IseRYQ1JMLiI+h2IYURpFHmygk71dSTlxCnKr3 +Sewn6EAes6aJInKc9Q0ztFijMDvd1GpUk74aTfOTlPf8hAs/hCBcNANExdqtvArB +As8e5ZTZ845b2EzwnexhF7sUMlQMAimTHpKG9n/v55IFDlndmQguLvqcAFLTxWYp +5KeXRKQOKIETNcX2b2TmQcTVL8w0RSXPQQCWPUouwpaYT05KnJe32x+SMsj/D1Fu +1uwJ +-----END CERTIFICATE----- + +# Issuer: CN=TrustCor ECA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Subject: CN=TrustCor ECA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Label: "TrustCor ECA-1" +# Serial: 9548242946988625984 +# MD5 Fingerprint: 27:92:23:1d:0a:f5:40:7c:e9:e6:6b:9d:d8:f5:e7:6c +# SHA1 Fingerprint: 58:d1:df:95:95:67:6b:63:c0:f0:5b:1c:17:4d:8b:84:0b:c8:78:bd +# SHA256 Fingerprint: 5a:88:5d:b1:9c:01:d9:12:c5:75:93:88:93:8c:af:bb:df:03:1a:b2:d4:8e:91:ee:15:58:9b:42:97:1d:03:9c +-----BEGIN CERTIFICATE----- +MIIEIDCCAwigAwIBAgIJAISCLF8cYtBAMA0GCSqGSIb3DQEBCwUAMIGcMQswCQYD +VQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEgQ2l0eTEk +MCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5U +cnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxFzAVBgNVBAMMDlRydXN0Q29y +IEVDQS0xMB4XDTE2MDIwNDEyMzIzM1oXDTI5MTIzMTE3MjgwN1owgZwxCzAJBgNV +BAYTAlBBMQ8wDQYDVQQIDAZQYW5hbWExFDASBgNVBAcMC1BhbmFtYSBDaXR5MSQw +IgYDVQQKDBtUcnVzdENvciBTeXN0ZW1zIFMuIGRlIFIuTC4xJzAlBgNVBAsMHlRy +dXN0Q29yIENlcnRpZmljYXRlIEF1dGhvcml0eTEXMBUGA1UEAwwOVHJ1c3RDb3Ig +RUNBLTEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDPj+ARtZ+odnbb +3w9U73NjKYKtR8aja+3+XzP4Q1HpGjORMRegdMTUpwHmspI+ap3tDvl0mEDTPwOA +BoJA6LHip1GnHYMma6ve+heRK9jGrB6xnhkB1Zem6g23xFUfJ3zSCNV2HykVh0A5 +3ThFEXXQmqc04L/NyFIduUd+Dbi7xgz2c1cWWn5DkR9VOsZtRASqnKmcp0yJF4Ou +owReUoCLHhIlERnXDH19MURB6tuvsBzvgdAsxZohmz3tQjtQJvLsznFhBmIhVE5/ +wZ0+fyCMgMsq2JdiyIMzkX2woloPV+g7zPIlstR8L+xNxqE6FXrntl019fZISjZF +ZtS6mFjBAgMBAAGjYzBhMB0GA1UdDgQWBBREnkj1zG1I1KBLf/5ZJC+Dl5mahjAf +BgNVHSMEGDAWgBREnkj1zG1I1KBLf/5ZJC+Dl5mahjAPBgNVHRMBAf8EBTADAQH/ +MA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAQEABT41XBVwm8nHc2Fv +civUwo/yQ10CzsSUuZQRg2dd4mdsdXa/uwyqNsatR5Nj3B5+1t4u/ukZMjgDfxT2 +AHMsWbEhBuH7rBiVDKP/mZb3Kyeb1STMHd3BOuCYRLDE5D53sXOpZCz2HAF8P11F +hcCF5yWPldwX8zyfGm6wyuMdKulMY/okYWLW2n62HGz1Ah3UKt1VkOsqEUc8Ll50 +soIipX1TH0XsJ5F95yIW6MBoNtjG8U+ARDL54dHRHareqKucBK+tIA5kmE2la8BI +WJZpTdwHjFGTot+fDz2LYLSCjaoITmJF4PkL0uDgPFveXHEnJcLmA4GLEFPjx1Wi +tJ/X5g== +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com Root Certification Authority RSA O=SSL Corporation +# Subject: CN=SSL.com Root Certification Authority RSA O=SSL Corporation +# Label: "SSL.com Root Certification Authority RSA" +# Serial: 8875640296558310041 +# MD5 Fingerprint: 86:69:12:c0:70:f1:ec:ac:ac:c2:d5:bc:a5:5b:a1:29 +# SHA1 Fingerprint: b7:ab:33:08:d1:ea:44:77:ba:14:80:12:5a:6f:bd:a9:36:49:0c:bb +# SHA256 Fingerprint: 85:66:6a:56:2e:e0:be:5c:e9:25:c1:d8:89:0a:6f:76:a8:7e:c1:6d:4d:7d:5f:29:ea:74:19:cf:20:12:3b:69 +-----BEGIN CERTIFICATE----- +MIIF3TCCA8WgAwIBAgIIeyyb0xaAMpkwDQYJKoZIhvcNAQELBQAwfDELMAkGA1UE +BhMCVVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQK +DA9TU0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eSBSU0EwHhcNMTYwMjEyMTczOTM5WhcNNDEwMjEyMTcz +OTM5WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv +dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNv +bSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFJTQTCCAiIwDQYJKoZIhvcN +AQEBBQADggIPADCCAgoCggIBAPkP3aMrfcvQKv7sZ4Wm5y4bunfh4/WvpOz6Sl2R +xFdHaxh3a3by/ZPkPQ/CFp4LZsNWlJ4Xg4XOVu/yFv0AYvUiCVToZRdOQbngT0aX +qhvIuG5iXmmxX9sqAn78bMrzQdjt0Oj8P2FI7bADFB0QDksZ4LtO7IZl/zbzXmcC +C52GVWH9ejjt/uIZALdvoVBidXQ8oPrIJZK0bnoix/geoeOy3ZExqysdBP+lSgQ3 +6YWkMyv94tZVNHwZpEpox7Ko07fKoZOI68GXvIz5HdkihCR0xwQ9aqkpk8zruFvh +/l8lqjRYyMEjVJ0bmBHDOJx+PYZspQ9AhnwC9FwCTyjLrnGfDzrIM/4RJTXq/LrF +YD3ZfBjVsqnTdXgDciLKOsMf7yzlLqn6niy2UUb9rwPW6mBo6oUWNmuF6R7As93E +JNyAKoFBbZQ+yODJgUEAnl6/f8UImKIYLEJAs/lvOCdLToD0PYFH4Ih86hzOtXVc +US4cK38acijnALXRdMbX5J+tB5O2UzU1/Dfkw/ZdFr4hc96SCvigY2q8lpJqPvi8 +ZVWb3vUNiSYE/CUapiVpy8JtynziWV+XrOvvLsi81xtZPCvM8hnIk2snYxnP/Okm ++Mpxm3+T/jRnhE6Z6/yzeAkzcLpmpnbtG3PrGqUNxCITIJRWCk4sbE6x/c+cCbqi +M+2HAgMBAAGjYzBhMB0GA1UdDgQWBBTdBAkHovV6fVJTEpKV7jiAJQ2mWTAPBgNV +HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFN0ECQei9Xp9UlMSkpXuOIAlDaZZMA4G +A1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAIBgRlCn7Jp0cHh5wYfGV +cpNxJK1ok1iOMq8bs3AD/CUrdIWQPXhq9LmLpZc7tRiRux6n+UBbkflVma8eEdBc +Hadm47GUBwwyOabqG7B52B2ccETjit3E+ZUfijhDPwGFpUenPUayvOUiaPd7nNgs +PgohyC0zrL/FgZkxdMF1ccW+sfAjRfSda/wZY52jvATGGAslu1OJD7OAUN5F7kR/ +q5R4ZJjT9ijdh9hwZXT7DrkT66cPYakylszeu+1jTBi7qUD3oFRuIIhxdRjqerQ0 +cuAjJ3dctpDqhiVAq+8zD8ufgr6iIPv2tS0a5sKFsXQP+8hlAqRSAUfdSSLBv9jr +a6x+3uxjMxW3IwiPxg+NQVrdjsW5j+VFP3jbutIbQLH+cU0/4IGiul607BXgk90I +H37hVZkLId6Tngr75qNJvTYw/ud3sqB1l7UtgYgXZSD32pAAn8lSzDLKNXz1PQ/Y +K9f1JmzJBjSWFupwWRoyeXkLtoh/D1JIPb9s2KJELtFOt3JY04kTlf5Eq/jXixtu +nLwsoFvVagCvXzfh1foQC5ichucmj87w7G6KVwuA406ywKBjYZC6VWg3dGq2ktuf +oYYitmUnDuy2n0Jg5GfCtdpBC8TTi2EbvPofkSvXRAdeuims2cXp71NIWuuA8ShY +Ic2wBlX7Jz9TkHCpBB5XJ7k= +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com Root Certification Authority ECC O=SSL Corporation +# Subject: CN=SSL.com Root Certification Authority ECC O=SSL Corporation +# Label: "SSL.com Root Certification Authority ECC" +# Serial: 8495723813297216424 +# MD5 Fingerprint: 2e:da:e4:39:7f:9c:8f:37:d1:70:9f:26:17:51:3a:8e +# SHA1 Fingerprint: c3:19:7c:39:24:e6:54:af:1b:c4:ab:20:95:7a:e2:c3:0e:13:02:6a +# SHA256 Fingerprint: 34:17:bb:06:cc:60:07:da:1b:96:1c:92:0b:8a:b4:ce:3f:ad:82:0e:4a:a3:0b:9a:cb:c4:a7:4e:bd:ce:bc:65 +-----BEGIN CERTIFICATE----- +MIICjTCCAhSgAwIBAgIIdebfy8FoW6gwCgYIKoZIzj0EAwIwfDELMAkGA1UEBhMC +VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T +U0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZpY2F0 +aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNDAzWhcNNDEwMjEyMTgxNDAz +WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hvdXN0 +b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNvbSBS +b290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49AgEGBSuB +BAAiA2IABEVuqVDEpiM2nl8ojRfLliJkP9x6jh3MCLOicSS6jkm5BBtHllirLZXI +7Z4INcgn64mMU1jrYor+8FsPazFSY0E7ic3s7LaNGdM0B9y7xgZ/wkWV7Mt/qCPg +CemB+vNH06NjMGEwHQYDVR0OBBYEFILRhXMw5zUE044CkvvlpNHEIejNMA8GA1Ud +EwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUgtGFczDnNQTTjgKS++Wk0cQh6M0wDgYD +VR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2cAMGQCMG/n61kRpGDPYbCWe+0F+S8T +kdzt5fxQaxFGRrMcIQBiu77D5+jNB5n5DQtdcj7EqgIwH7y6C+IwJPt8bYBVCpk+ +gA0z5Wajs6O7pdWLjwkspl1+4vAHCGht0nxpbl/f5Wpl +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com EV Root Certification Authority RSA R2 O=SSL Corporation +# Subject: CN=SSL.com EV Root Certification Authority RSA R2 O=SSL Corporation +# Label: "SSL.com EV Root Certification Authority RSA R2" +# Serial: 6248227494352943350 +# MD5 Fingerprint: e1:1e:31:58:1a:ae:54:53:02:f6:17:6a:11:7b:4d:95 +# SHA1 Fingerprint: 74:3a:f0:52:9b:d0:32:a0:f4:4a:83:cd:d4:ba:a9:7b:7c:2e:c4:9a +# SHA256 Fingerprint: 2e:7b:f1:6c:c2:24:85:a7:bb:e2:aa:86:96:75:07:61:b0:ae:39:be:3b:2f:e9:d0:cc:6d:4e:f7:34:91:42:5c +-----BEGIN CERTIFICATE----- +MIIF6zCCA9OgAwIBAgIIVrYpzTS8ePYwDQYJKoZIhvcNAQELBQAwgYIxCzAJBgNV +BAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4GA1UEBwwHSG91c3RvbjEYMBYGA1UE +CgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQDDC5TU0wuY29tIEVWIFJvb3QgQ2Vy +dGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIyMB4XDTE3MDUzMTE4MTQzN1oXDTQy +MDUzMDE4MTQzN1owgYIxCzAJBgNVBAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4G +A1UEBwwHSG91c3RvbjEYMBYGA1UECgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQD +DC5TU0wuY29tIEVWIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIy +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAjzZlQOHWTcDXtOlG2mvq +M0fNTPl9fb69LT3w23jhhqXZuglXaO1XPqDQCEGD5yhBJB/jchXQARr7XnAjssuf +OePPxU7Gkm0mxnu7s9onnQqG6YE3Bf7wcXHswxzpY6IXFJ3vG2fThVUCAtZJycxa +4bH3bzKfydQ7iEGonL3Lq9ttewkfokxykNorCPzPPFTOZw+oz12WGQvE43LrrdF9 +HSfvkusQv1vrO6/PgN3B0pYEW3p+pKk8OHakYo6gOV7qd89dAFmPZiw+B6KjBSYR +aZfqhbcPlgtLyEDhULouisv3D5oi53+aNxPN8k0TayHRwMwi8qFG9kRpnMphNQcA +b9ZhCBHqurj26bNg5U257J8UZslXWNvNh2n4ioYSA0e/ZhN2rHd9NCSFg83XqpyQ +Gp8hLH94t2S42Oim9HizVcuE0jLEeK6jj2HdzghTreyI/BXkmg3mnxp3zkyPuBQV +PWKchjgGAGYS5Fl2WlPAApiiECtoRHuOec4zSnaqW4EWG7WK2NAAe15itAnWhmMO +pgWVSbooi4iTsjQc2KRVbrcc0N6ZVTsj9CLg+SlmJuwgUHfbSguPvuUCYHBBXtSu +UDkiFCbLsjtzdFVHB3mBOagwE0TlBIqulhMlQg+5U8Sb/M3kHN48+qvWBkofZ6aY +MBzdLNvcGJVXZsb/XItW9XcCAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAfBgNV +HSMEGDAWgBT5YLvU49U09rj1BoAlp3PbRmmonjAdBgNVHQ4EFgQU+WC71OPVNPa4 +9QaAJadz20ZpqJ4wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQBW +s47LCp1Jjr+kxJG7ZhcFUZh1++VQLHqe8RT6q9OKPv+RKY9ji9i0qVQBDb6Thi/5 +Sm3HXvVX+cpVHBK+Rw82xd9qt9t1wkclf7nxY/hoLVUE0fKNsKTPvDxeH3jnpaAg +cLAExbf3cqfeIg29MyVGjGSSJuM+LmOW2puMPfgYCdcDzH2GguDKBAdRUNf/ktUM +79qGn5nX67evaOI5JpS6aLe/g9Pqemc9YmeuJeVy6OLk7K4S9ksrPJ/psEDzOFSz +/bdoyNrGj1E8svuR3Bznm53htw1yj+KkxKl4+esUrMZDBcJlOSgYAsOCsp0FvmXt +ll9ldDz7CTUue5wT/RsPXcdtgTpWD8w74a8CLyKsRspGPKAcTNZEtF4uXBVmCeEm +Kf7GUmG6sXP/wwyc5WxqlD8UykAWlYTzWamsX0xhk23RO8yilQwipmdnRC652dKK +QbNmC1r7fSOl8hqw/96bg5Qu0T/fkreRrwU7ZcegbLHNYhLDkBvjJc40vG93drEQ +w/cFGsDWr3RiSBd3kmmQYRzelYB0VI8YHMPzA9C/pEN1hlMYegouCRw2n5H9gooi +S9EOUCXdywMMF8mDAAhONU2Ki+3wApRmLER/y5UnlhetCTCstnEXbosX9hwJ1C07 +mKVx01QT2WDz9UtmT/rx7iASjbSsV7FFY6GsdqnC+w== +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com EV Root Certification Authority ECC O=SSL Corporation +# Subject: CN=SSL.com EV Root Certification Authority ECC O=SSL Corporation +# Label: "SSL.com EV Root Certification Authority ECC" +# Serial: 3182246526754555285 +# MD5 Fingerprint: 59:53:22:65:83:42:01:54:c0:ce:42:b9:5a:7c:f2:90 +# SHA1 Fingerprint: 4c:dd:51:a3:d1:f5:20:32:14:b0:c6:c5:32:23:03:91:c7:46:42:6d +# SHA256 Fingerprint: 22:a2:c1:f7:bd:ed:70:4c:c1:e7:01:b5:f4:08:c3:10:88:0f:e9:56:b5:de:2a:4a:44:f9:9c:87:3a:25:a7:c8 +-----BEGIN CERTIFICATE----- +MIIClDCCAhqgAwIBAgIILCmcWxbtBZUwCgYIKoZIzj0EAwIwfzELMAkGA1UEBhMC +VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T +U0wgQ29ycG9yYXRpb24xNDAyBgNVBAMMK1NTTC5jb20gRVYgUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNTIzWhcNNDEwMjEyMTgx +NTIzWjB/MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv +dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjE0MDIGA1UEAwwrU1NMLmNv +bSBFViBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49 +AgEGBSuBBAAiA2IABKoSR5CYG/vvw0AHgyBO8TCCogbR8pKGYfL2IWjKAMTH6kMA +VIbc/R/fALhBYlzccBYy3h+Z1MzFB8gIH2EWB1E9fVwHU+M1OIzfzZ/ZLg1Kthku +WnBaBu2+8KGwytAJKaNjMGEwHQYDVR0OBBYEFFvKXuXe0oGqzagtZFG22XKbl+ZP +MA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUW8pe5d7SgarNqC1kUbbZcpuX +5k8wDgYDVR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2gAMGUCMQCK5kCJN+vp1RPZ +ytRrJPOwPYdGWBrssd9v+1a6cGvHOMzosYxPD/fxZ3YOg9AeUY8CMD32IygmTMZg +h5Mmm7I1HrrW9zzRHM76JTymGoEVW/MSD2zuZYrJh6j5B+BimoxcSg== +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R6 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R6 +# Label: "GlobalSign Root CA - R6" +# Serial: 1417766617973444989252670301619537 +# MD5 Fingerprint: 4f:dd:07:e4:d4:22:64:39:1e:0c:37:42:ea:d1:c6:ae +# SHA1 Fingerprint: 80:94:64:0e:b5:a7:a1:ca:11:9c:1f:dd:d5:9f:81:02:63:a7:fb:d1 +# SHA256 Fingerprint: 2c:ab:ea:fe:37:d0:6c:a2:2a:ba:73:91:c0:03:3d:25:98:29:52:c4:53:64:73:49:76:3a:3a:b5:ad:6c:cf:69 +-----BEGIN CERTIFICATE----- +MIIFgzCCA2ugAwIBAgIORea7A4Mzw4VlSOb/RVEwDQYJKoZIhvcNAQEMBQAwTDEg +MB4GA1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjYxEzARBgNVBAoTCkdsb2Jh +bFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMTQxMjEwMDAwMDAwWhcNMzQx +MjEwMDAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSNjET +MBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCAiIwDQYJ +KoZIhvcNAQEBBQADggIPADCCAgoCggIBAJUH6HPKZvnsFMp7PPcNCPG0RQssgrRI +xutbPK6DuEGSMxSkb3/pKszGsIhrxbaJ0cay/xTOURQh7ErdG1rG1ofuTToVBu1k +ZguSgMpE3nOUTvOniX9PeGMIyBJQbUJmL025eShNUhqKGoC3GYEOfsSKvGRMIRxD +aNc9PIrFsmbVkJq3MQbFvuJtMgamHvm566qjuL++gmNQ0PAYid/kD3n16qIfKtJw +LnvnvJO7bVPiSHyMEAc4/2ayd2F+4OqMPKq0pPbzlUoSB239jLKJz9CgYXfIWHSw +1CM69106yqLbnQneXUQtkPGBzVeS+n68UARjNN9rkxi+azayOeSsJDa38O+2HBNX +k7besvjihbdzorg1qkXy4J02oW9UivFyVm4uiMVRQkQVlO6jxTiWm05OWgtH8wY2 +SXcwvHE35absIQh1/OZhFj931dmRl4QKbNQCTXTAFO39OfuD8l4UoQSwC+n+7o/h +bguyCLNhZglqsQY6ZZZZwPA1/cnaKI0aEYdwgQqomnUdnjqGBQCe24DWJfncBZ4n +WUx2OVvq+aWh2IMP0f/fMBH5hc8zSPXKbWQULHpYT9NLCEnFlWQaYw55PfWzjMpY +rZxCRXluDocZXFSxZba/jJvcE+kNb7gu3GduyYsRtYQUigAZcIN5kZeR1Bonvzce +MgfYFGM8KEyvAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBSubAWjkxPioufi1xzWx/B/yGdToDAfBgNVHSMEGDAWgBSu +bAWjkxPioufi1xzWx/B/yGdToDANBgkqhkiG9w0BAQwFAAOCAgEAgyXt6NH9lVLN +nsAEoJFp5lzQhN7craJP6Ed41mWYqVuoPId8AorRbrcWc+ZfwFSY1XS+wc3iEZGt +Ixg93eFyRJa0lV7Ae46ZeBZDE1ZXs6KzO7V33EByrKPrmzU+sQghoefEQzd5Mr61 +55wsTLxDKZmOMNOsIeDjHfrYBzN2VAAiKrlNIC5waNrlU/yDXNOd8v9EDERm8tLj +vUYAGm0CuiVdjaExUd1URhxN25mW7xocBFymFe944Hn+Xds+qkxV/ZoVqW/hpvvf +cDDpw+5CRu3CkwWJ+n1jez/QcYF8AOiYrg54NMMl+68KnyBr3TsTjxKM4kEaSHpz +oHdpx7Zcf4LIHv5YGygrqGytXm3ABdJ7t+uA/iU3/gKbaKxCXcPu9czc8FB10jZp +nOZ7BN9uBmm23goJSFmH63sUYHpkqmlD75HHTOwY3WzvUy2MmeFe8nI+z1TIvWfs +pA9MRf/TuTAjB0yPEL+GltmZWrSZVxykzLsViVO6LAUP5MSeGbEYNNVMnbrt9x+v +JJUEeKgDu+6B5dpffItKoZB0JaezPkvILFa9x8jvOOJckvB595yEunQtYQEgfn7R +8k8HWV+LLUNS60YMlOH1Zkd5d9VUWx+tJDfLRVpOoERIyNiwmcUVhAn21klJwGW4 +5hpxbqCo8YLoRT5s1gLXCmeDBVrJpBA= +-----END CERTIFICATE----- + +# Issuer: CN=OISTE WISeKey Global Root GC CA O=WISeKey OU=OISTE Foundation Endorsed +# Subject: CN=OISTE WISeKey Global Root GC CA O=WISeKey OU=OISTE Foundation Endorsed +# Label: "OISTE WISeKey Global Root GC CA" +# Serial: 44084345621038548146064804565436152554 +# MD5 Fingerprint: a9:d6:b9:2d:2f:93:64:f8:a5:69:ca:91:e9:68:07:23 +# SHA1 Fingerprint: e0:11:84:5e:34:de:be:88:81:b9:9c:f6:16:26:d1:96:1f:c3:b9:31 +# SHA256 Fingerprint: 85:60:f9:1c:36:24:da:ba:95:70:b5:fe:a0:db:e3:6f:f1:1a:83:23:be:94:86:85:4f:b3:f3:4a:55:71:19:8d +-----BEGIN CERTIFICATE----- +MIICaTCCAe+gAwIBAgIQISpWDK7aDKtARb8roi066jAKBggqhkjOPQQDAzBtMQsw +CQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUgRm91 +bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9iYWwg +Um9vdCBHQyBDQTAeFw0xNzA1MDkwOTQ4MzRaFw00MjA1MDkwOTU4MzNaMG0xCzAJ +BgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBGb3Vu +ZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2JhbCBS +b290IEdDIENBMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAETOlQwMYPchi82PG6s4ni +eUqjFqdrVCTbUf/q9Akkwwsin8tqJ4KBDdLArzHkdIJuyiXZjHWd8dvQmqJLIX4W +p2OQ0jnUsYd4XxiWD1AbNTcPasbc2RNNpI6QN+a9WzGRo1QwUjAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUSIcUrOPDnpBgOtfKie7T +rYy0UGYwEAYJKwYBBAGCNxUBBAMCAQAwCgYIKoZIzj0EAwMDaAAwZQIwJsdpW9zV +57LnyAyMjMPdeYwbY9XJUpROTYJKcx6ygISpJcBMWm1JKWB4E+J+SOtkAjEA2zQg +Mgj/mkkCtojeFK9dbJlxjRo/i9fgojaGHAeCOnZT/cKi7e97sIBPWA9LUzm9 +-----END CERTIFICATE----- + +# Issuer: CN=GTS Root R1 O=Google Trust Services LLC +# Subject: CN=GTS Root R1 O=Google Trust Services LLC +# Label: "GTS Root R1" +# Serial: 146587175971765017618439757810265552097 +# MD5 Fingerprint: 82:1a:ef:d4:d2:4a:f2:9f:e2:3d:97:06:14:70:72:85 +# SHA1 Fingerprint: e1:c9:50:e6:ef:22:f8:4c:56:45:72:8b:92:20:60:d7:d5:a7:a3:e8 +# SHA256 Fingerprint: 2a:57:54:71:e3:13:40:bc:21:58:1c:bd:2c:f1:3e:15:84:63:20:3e:ce:94:bc:f9:d3:cc:19:6b:f0:9a:54:72 +-----BEGIN CERTIFICATE----- +MIIFWjCCA0KgAwIBAgIQbkepxUtHDA3sM9CJuRz04TANBgkqhkiG9w0BAQwFADBH +MQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExM +QzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIy +MDAwMDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNl +cnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwggIiMA0GCSqGSIb3DQEB +AQUAA4ICDwAwggIKAoICAQC2EQKLHuOhd5s73L+UPreVp0A8of2C+X0yBoJx9vaM +f/vo27xqLpeXo4xL+Sv2sfnOhB2x+cWX3u+58qPpvBKJXqeqUqv4IyfLpLGcY9vX +mX7wCl7raKb0xlpHDU0QM+NOsROjyBhsS+z8CZDfnWQpJSMHobTSPS5g4M/SCYe7 +zUjwTcLCeoiKu7rPWRnWr4+wB7CeMfGCwcDfLqZtbBkOtdh+JhpFAz2weaSUKK0P +fyblqAj+lug8aJRT7oM6iCsVlgmy4HqMLnXWnOunVmSPlk9orj2XwoSPwLxAwAtc +vfaHszVsrBhQf4TgTM2S0yDpM7xSma8ytSmzJSq0SPly4cpk9+aCEI3oncKKiPo4 +Zor8Y/kB+Xj9e1x3+naH+uzfsQ55lVe0vSbv1gHR6xYKu44LtcXFilWr06zqkUsp +zBmkMiVOKvFlRNACzqrOSbTqn3yDsEB750Orp2yjj32JgfpMpf/VjsPOS+C12LOO +Rc92wO1AK/1TD7Cn1TsNsYqiA94xrcx36m97PtbfkSIS5r762DL8EGMUUXLeXdYW +k70paDPvOmbsB4om3xPXV2V4J95eSRQAogB/mqghtqmxlbCluQ0WEdrHbEg8QOB+ +DVrNVjzRlwW5y0vtOUucxD/SVRNuJLDWcfr0wbrM7Rv1/oFB2ACYPTrIrnqYNxgF +lQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV +HQ4EFgQU5K8rJnEaK0gnhS9SZizv8IkTcT4wDQYJKoZIhvcNAQEMBQADggIBADiW +Cu49tJYeX++dnAsznyvgyv3SjgofQXSlfKqE1OXyHuY3UjKcC9FhHb8owbZEKTV1 +d5iyfNm9dKyKaOOpMQkpAWBz40d8U6iQSifvS9efk+eCNs6aaAyC58/UEBZvXw6Z +XPYfcX3v73svfuo21pdwCxXu11xWajOl40k4DLh9+42FpLFZXvRq4d2h9mREruZR +gyFmxhE+885H7pwoHyXa/6xmld01D1zvICxi/ZG6qcz8WpyTgYMpl0p8WnK0OdC3 +d8t5/Wk6kjftbjhlRn7pYL15iJdfOBL07q9bgsiG1eGZbYwE8na6SfZu6W0eX6Dv +J4J2QPim01hcDyxC2kLGe4g0x8HYRZvBPsVhHdljUEn2NIVq4BjFbkerQUIpm/Zg +DdIx02OYI5NaAIFItO/Nis3Jz5nu2Z6qNuFoS3FJFDYoOj0dzpqPJeaAcWErtXvM ++SUWgeExX6GjfhaknBZqlxi9dnKlC54dNuYvoS++cJEPqOba+MSSQGwlfnuzCdyy +F62ARPBopY+Udf90WuioAnwMCeKpSwughQtiue+hMZL77/ZRBIls6Kl0obsXs7X9 +SQ98POyDGCBDTtWTurQ0sR8WNh8M5mQ5Fkzc4P4dyKliPUDqysU0ArSuiYgzNdws +E3PYJ/HQcu51OyLemGhmW/HGY0dVHLqlCFF1pkgl +-----END CERTIFICATE----- + +# Issuer: CN=GTS Root R2 O=Google Trust Services LLC +# Subject: CN=GTS Root R2 O=Google Trust Services LLC +# Label: "GTS Root R2" +# Serial: 146587176055767053814479386953112547951 +# MD5 Fingerprint: 44:ed:9a:0e:a4:09:3b:00:f2:ae:4c:a3:c6:61:b0:8b +# SHA1 Fingerprint: d2:73:96:2a:2a:5e:39:9f:73:3f:e1:c7:1e:64:3f:03:38:34:fc:4d +# SHA256 Fingerprint: c4:5d:7b:b0:8e:6d:67:e6:2e:42:35:11:0b:56:4e:5f:78:fd:92:ef:05:8c:84:0a:ea:4e:64:55:d7:58:5c:60 +-----BEGIN CERTIFICATE----- +MIIFWjCCA0KgAwIBAgIQbkepxlqz5yDFMJo/aFLybzANBgkqhkiG9w0BAQwFADBH +MQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExM +QzEUMBIGA1UEAxMLR1RTIFJvb3QgUjIwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIy +MDAwMDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNl +cnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjIwggIiMA0GCSqGSIb3DQEB +AQUAA4ICDwAwggIKAoICAQDO3v2m++zsFDQ8BwZabFn3GTXd98GdVarTzTukk3Lv +CvptnfbwhYBboUhSnznFt+4orO/LdmgUud+tAWyZH8QiHZ/+cnfgLFuv5AS/T3Kg +GjSY6Dlo7JUle3ah5mm5hRm9iYz+re026nO8/4Piy33B0s5Ks40FnotJk9/BW9Bu +XvAuMC6C/Pq8tBcKSOWIm8Wba96wyrQD8Nr0kLhlZPdcTK3ofmZemde4wj7I0BOd +re7kRXuJVfeKH2JShBKzwkCX44ofR5GmdFrS+LFjKBC4swm4VndAoiaYecb+3yXu +PuWgf9RhD1FLPD+M2uFwdNjCaKH5wQzpoeJ/u1U8dgbuak7MkogwTZq9TwtImoS1 +mKPV+3PBV2HdKFZ1E66HjucMUQkQdYhMvI35ezzUIkgfKtzra7tEscszcTJGr61K +8YzodDqs5xoic4DSMPclQsciOzsSrZYuxsN2B6ogtzVJV+mSSeh2FnIxZyuWfoqj +x5RWIr9qS34BIbIjMt/kmkRtWVtd9QCgHJvGeJeNkP+byKq0rxFROV7Z+2et1VsR +nTKaG73VululycslaVNVJ1zgyjbLiGH7HrfQy+4W+9OmTN6SpdTi3/UGVN4unUu0 +kzCqgc7dGtxRcw1PcOnlthYhGXmy5okLdWTK1au8CcEYof/UVKGFPP0UJAOyh9Ok +twIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV +HQ4EFgQUu//KjiOfT5nK2+JopqUVJxce2Q4wDQYJKoZIhvcNAQEMBQADggIBALZp +8KZ3/p7uC4Gt4cCpx/k1HUCCq+YEtN/L9x0Pg/B+E02NjO7jMyLDOfxA325BS0JT +vhaI8dI4XsRomRyYUpOM52jtG2pzegVATX9lO9ZY8c6DR2Dj/5epnGB3GFW1fgiT +z9D2PGcDFWEJ+YF59exTpJ/JjwGLc8R3dtyDovUMSRqodt6Sm2T4syzFJ9MHwAiA +pJiS4wGWAqoC7o87xdFtCjMwc3i5T1QWvwsHoaRc5svJXISPD+AVdyx+Jn7axEvb +pxZ3B7DNdehyQtaVhJ2Gg/LkkM0JR9SLA3DaWsYDQvTtN6LwG1BUSw7YhN4ZKJmB +R64JGz9I0cNv4rBgF/XuIwKl2gBbbZCr7qLpGzvpx0QnRY5rn/WkhLx3+WuXrD5R +RaIRpsyF7gpo8j5QOHokYh4XIDdtak23CZvJ/KRY9bb7nE4Yu5UC56GtmwfuNmsk +0jmGwZODUNKBRqhfYlcsu2xkiAhu7xNUX90txGdj08+JN7+dIPT7eoOboB6BAFDC +5AwiWVIQ7UNWhwD4FFKnHYuTjKJNRn8nxnGbJN7k2oaLDX5rIMHAnuFl2GqjpuiF +izoHCBy69Y9Vmhh1fuXsgWbRIXOhNUQLgD1bnF5vKheW0YMjiGZt5obicDIvUiLn +yOd/xCxgXS/Dr55FBcOEArf9LAhST4Ldo/DUhgkC +-----END CERTIFICATE----- + +# Issuer: CN=GTS Root R3 O=Google Trust Services LLC +# Subject: CN=GTS Root R3 O=Google Trust Services LLC +# Label: "GTS Root R3" +# Serial: 146587176140553309517047991083707763997 +# MD5 Fingerprint: 1a:79:5b:6b:04:52:9c:5d:c7:74:33:1b:25:9a:f9:25 +# SHA1 Fingerprint: 30:d4:24:6f:07:ff:db:91:89:8a:0b:e9:49:66:11:eb:8c:5e:46:e5 +# SHA256 Fingerprint: 15:d5:b8:77:46:19:ea:7d:54:ce:1c:a6:d0:b0:c4:03:e0:37:a9:17:f1:31:e8:a0:4e:1e:6b:7a:71:ba:bc:e5 +-----BEGIN CERTIFICATE----- +MIICDDCCAZGgAwIBAgIQbkepx2ypcyRAiQ8DVd2NHTAKBggqhkjOPQQDAzBHMQsw +CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU +MBIGA1UEAxMLR1RTIFJvb3QgUjMwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw +MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp +Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjMwdjAQBgcqhkjOPQIBBgUrgQQA +IgNiAAQfTzOHMymKoYTey8chWEGJ6ladK0uFxh1MJ7x/JlFyb+Kf1qPKzEUURout +736GjOyxfi//qXGdGIRFBEFVbivqJn+7kAHjSxm65FSWRQmx1WyRRK2EE46ajA2A +DDL24CejQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud +DgQWBBTB8Sa6oC2uhYHP0/EqEr24Cmf9vDAKBggqhkjOPQQDAwNpADBmAjEAgFuk +fCPAlaUs3L6JbyO5o91lAFJekazInXJ0glMLfalAvWhgxeG4VDvBNhcl2MG9AjEA +njWSdIUlUfUk7GRSJFClH9voy8l27OyCbvWFGFPouOOaKaqW04MjyaR7YbPMAuhd +-----END CERTIFICATE----- + +# Issuer: CN=GTS Root R4 O=Google Trust Services LLC +# Subject: CN=GTS Root R4 O=Google Trust Services LLC +# Label: "GTS Root R4" +# Serial: 146587176229350439916519468929765261721 +# MD5 Fingerprint: 5d:b6:6a:c4:60:17:24:6a:1a:99:a8:4b:ee:5e:b4:26 +# SHA1 Fingerprint: 2a:1d:60:27:d9:4a:b1:0a:1c:4d:91:5c:cd:33:a0:cb:3e:2d:54:cb +# SHA256 Fingerprint: 71:cc:a5:39:1f:9e:79:4b:04:80:25:30:b3:63:e1:21:da:8a:30:43:bb:26:66:2f:ea:4d:ca:7f:c9:51:a4:bd +-----BEGIN CERTIFICATE----- +MIICCjCCAZGgAwIBAgIQbkepyIuUtui7OyrYorLBmTAKBggqhkjOPQQDAzBHMQsw +CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU +MBIGA1UEAxMLR1RTIFJvb3QgUjQwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw +MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp +Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjQwdjAQBgcqhkjOPQIBBgUrgQQA +IgNiAATzdHOnaItgrkO4NcWBMHtLSZ37wWHO5t5GvWvVYRg1rkDdc/eJkTBa6zzu +hXyiQHY7qca4R9gq55KRanPpsXI5nymfopjTX15YhmUPoYRlBtHci8nHc8iMai/l +xKvRHYqjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud +DgQWBBSATNbrdP9JNqPV2Py1PsVq8JQdjDAKBggqhkjOPQQDAwNnADBkAjBqUFJ0 +CMRw3J5QdCHojXohw0+WbhXRIjVhLfoIN+4Zba3bssx9BzT1YBkstTTZbyACMANx +sbqjYAuG7ZoIapVon+Kz4ZNkfF6Tpt95LY2F45TPI11xzPKwTdb+mciUqXWi4w== +-----END CERTIFICATE----- + +# Issuer: CN=UCA Global G2 Root O=UniTrust +# Subject: CN=UCA Global G2 Root O=UniTrust +# Label: "UCA Global G2 Root" +# Serial: 124779693093741543919145257850076631279 +# MD5 Fingerprint: 80:fe:f0:c4:4a:f0:5c:62:32:9f:1c:ba:78:a9:50:f8 +# SHA1 Fingerprint: 28:f9:78:16:19:7a:ff:18:25:18:aa:44:fe:c1:a0:ce:5c:b6:4c:8a +# SHA256 Fingerprint: 9b:ea:11:c9:76:fe:01:47:64:c1:be:56:a6:f9:14:b5:a5:60:31:7a:bd:99:88:39:33:82:e5:16:1a:a0:49:3c +-----BEGIN CERTIFICATE----- +MIIFRjCCAy6gAwIBAgIQXd+x2lqj7V2+WmUgZQOQ7zANBgkqhkiG9w0BAQsFADA9 +MQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxGzAZBgNVBAMMElVDQSBH +bG9iYWwgRzIgUm9vdDAeFw0xNjAzMTEwMDAwMDBaFw00MDEyMzEwMDAwMDBaMD0x +CzAJBgNVBAYTAkNOMREwDwYDVQQKDAhVbmlUcnVzdDEbMBkGA1UEAwwSVUNBIEds +b2JhbCBHMiBSb290MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxeYr +b3zvJgUno4Ek2m/LAfmZmqkywiKHYUGRO8vDaBsGxUypK8FnFyIdK+35KYmToni9 +kmugow2ifsqTs6bRjDXVdfkX9s9FxeV67HeToI8jrg4aA3++1NDtLnurRiNb/yzm +VHqUwCoV8MmNsHo7JOHXaOIxPAYzRrZUEaalLyJUKlgNAQLx+hVRZ2zA+te2G3/R +VogvGjqNO7uCEeBHANBSh6v7hn4PJGtAnTRnvI3HLYZveT6OqTwXS3+wmeOwcWDc +C/Vkw85DvG1xudLeJ1uK6NjGruFZfc8oLTW4lVYa8bJYS7cSN8h8s+1LgOGN+jIj +tm+3SJUIsUROhYw6AlQgL9+/V087OpAh18EmNVQg7Mc/R+zvWr9LesGtOxdQXGLY +D0tK3Cv6brxzks3sx1DoQZbXqX5t2Okdj4q1uViSukqSKwxW/YDrCPBeKW4bHAyv +j5OJrdu9o54hyokZ7N+1wxrrFv54NkzWbtA+FxyQF2smuvt6L78RHBgOLXMDj6Dl +NaBa4kx1HXHhOThTeEDMg5PXCp6dW4+K5OXgSORIskfNTip1KnvyIvbJvgmRlld6 +iIis7nCs+dwp4wwcOxJORNanTrAmyPPZGpeRaOrvjUYG0lZFWJo8DA+DuAUlwznP +O6Q0ibd5Ei9Hxeepl2n8pndntd978XplFeRhVmUCAwEAAaNCMEAwDgYDVR0PAQH/ +BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFIHEjMz15DD/pQwIX4wV +ZyF0Ad/fMA0GCSqGSIb3DQEBCwUAA4ICAQATZSL1jiutROTL/7lo5sOASD0Ee/oj +L3rtNtqyzm325p7lX1iPyzcyochltq44PTUbPrw7tgTQvPlJ9Zv3hcU2tsu8+Mg5 +1eRfB70VVJd0ysrtT7q6ZHafgbiERUlMjW+i67HM0cOU2kTC5uLqGOiiHycFutfl +1qnN3e92mI0ADs0b+gO3joBYDic/UvuUospeZcnWhNq5NXHzJsBPd+aBJ9J3O5oU +b3n09tDh05S60FdRvScFDcH9yBIw7m+NESsIndTUv4BFFJqIRNow6rSn4+7vW4LV +PtateJLbXDzz2K36uGt/xDYotgIVilQsnLAXc47QN6MUPJiVAAwpBVueSUmxX8fj +y88nZY41F7dXyDDZQVu5FLbowg+UMaeUmMxq67XhJ/UQqAHojhJi6IjMtX9Gl8Cb +EGY4GjZGXyJoPd/JxhMnq1MGrKI8hgZlb7F+sSlEmqO6SWkoaY/X5V+tBIZkbxqg +DMUIYs6Ao9Dz7GjevjPHF1t/gMRMTLGmhIrDO7gJzRSBuhjjVFc2/tsvfEehOjPI ++Vg7RE+xygKJBJYoaMVLuCaJu9YzL1DV/pqJuhgyklTGW+Cd+V7lDSKb9triyCGy +YiGqhkCyLmTTX8jjfhFnRR8F/uOi77Oos/N9j/gMHyIfLXC0uAE0djAA5SN4p1bX +UB+K+wb1whnw0A== +-----END CERTIFICATE----- + +# Issuer: CN=UCA Extended Validation Root O=UniTrust +# Subject: CN=UCA Extended Validation Root O=UniTrust +# Label: "UCA Extended Validation Root" +# Serial: 106100277556486529736699587978573607008 +# MD5 Fingerprint: a1:f3:5f:43:c6:34:9b:da:bf:8c:7e:05:53:ad:96:e2 +# SHA1 Fingerprint: a3:a1:b0:6f:24:61:23:4a:e3:36:a5:c2:37:fc:a6:ff:dd:f0:d7:3a +# SHA256 Fingerprint: d4:3a:f9:b3:54:73:75:5c:96:84:fc:06:d7:d8:cb:70:ee:5c:28:e7:73:fb:29:4e:b4:1e:e7:17:22:92:4d:24 +-----BEGIN CERTIFICATE----- +MIIFWjCCA0KgAwIBAgIQT9Irj/VkyDOeTzRYZiNwYDANBgkqhkiG9w0BAQsFADBH +MQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxJTAjBgNVBAMMHFVDQSBF +eHRlbmRlZCBWYWxpZGF0aW9uIFJvb3QwHhcNMTUwMzEzMDAwMDAwWhcNMzgxMjMx +MDAwMDAwWjBHMQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxJTAjBgNV +BAMMHFVDQSBFeHRlbmRlZCBWYWxpZGF0aW9uIFJvb3QwggIiMA0GCSqGSIb3DQEB +AQUAA4ICDwAwggIKAoICAQCpCQcoEwKwmeBkqh5DFnpzsZGgdT6o+uM4AHrsiWog +D4vFsJszA1qGxliG1cGFu0/GnEBNyr7uaZa4rYEwmnySBesFK5pI0Lh2PpbIILvS +sPGP2KxFRv+qZ2C0d35qHzwaUnoEPQc8hQ2E0B92CvdqFN9y4zR8V05WAT558aop +O2z6+I9tTcg1367r3CTueUWnhbYFiN6IXSV8l2RnCdm/WhUFhvMJHuxYMjMR83dk +sHYf5BA1FxvyDrFspCqjc/wJHx4yGVMR59mzLC52LqGj3n5qiAno8geK+LLNEOfi +c0CTuwjRP+H8C5SzJe98ptfRr5//lpr1kXuYC3fUfugH0mK1lTnj8/FtDw5lhIpj +VMWAtuCeS31HJqcBCF3RiJ7XwzJE+oJKCmhUfzhTA8ykADNkUVkLo4KRel7sFsLz +KuZi2irbWWIQJUoqgQtHB0MGcIfS+pMRKXpITeuUx3BNr2fVUbGAIAEBtHoIppB/ +TuDvB0GHr2qlXov7z1CymlSvw4m6WC31MJixNnI5fkkE/SmnTHnkBVfblLkWU41G +sx2VYVdWf6/wFlthWG82UBEL2KwrlRYaDh8IzTY0ZRBiZtWAXxQgXy0MoHgKaNYs +1+lvK9JKBZP8nm9rZ/+I8U6laUpSNwXqxhaN0sSZ0YIrO7o1dfdRUVjzyAfd5LQD +fwIDAQABo0IwQDAdBgNVHQ4EFgQU2XQ65DA9DfcS3H5aBZ8eNJr34RQwDwYDVR0T +AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQADggIBADaN +l8xCFWQpN5smLNb7rhVpLGsaGvdftvkHTFnq88nIua7Mui563MD1sC3AO6+fcAUR +ap8lTwEpcOPlDOHqWnzcSbvBHiqB9RZLcpHIojG5qtr8nR/zXUACE/xOHAbKsxSQ +VBcZEhrxH9cMaVr2cXj0lH2RC47skFSOvG+hTKv8dGT9cZr4QQehzZHkPJrgmzI5 +c6sq1WnIeJEmMX3ixzDx/BR4dxIOE/TdFpS/S2d7cFOFyrC78zhNLJA5wA3CXWvp +4uXViI3WLL+rG761KIcSF3Ru/H38j9CHJrAb+7lsq+KePRXBOy5nAliRn+/4Qh8s +t2j1da3Ptfb/EX3C8CSlrdP6oDyp+l3cpaDvRKS+1ujl5BOWF3sGPjLtx7dCvHaj +2GU4Kzg1USEODm8uNBNA4StnDG1KQTAYI1oyVZnJF+A83vbsea0rWBmirSwiGpWO +vpaQXUJXxPkUAzUrHC1RVwinOt4/5Mi0A3PCwSaAuwtCH60NryZy2sy+s6ODWA2C +xR9GUeOcGMyNm43sSet1UNWMKFnKdDTajAshqx7qG+XH/RU+wBeq+yNuJkbL+vmx +cmtpzyKEC2IPrNkZAJSidjzULZrtBJ4tBmIQN1IchXIbJ+XMxjHsN+xjWZsLHXbM +fjKaiJUINlK73nZfdklJrX+9ZSCyycErdhh2n1ax +-----END CERTIFICATE----- + +# Issuer: CN=Certigna Root CA O=Dhimyotis OU=0002 48146308100036 +# Subject: CN=Certigna Root CA O=Dhimyotis OU=0002 48146308100036 +# Label: "Certigna Root CA" +# Serial: 269714418870597844693661054334862075617 +# MD5 Fingerprint: 0e:5c:30:62:27:eb:5b:bc:d7:ae:62:ba:e9:d5:df:77 +# SHA1 Fingerprint: 2d:0d:52:14:ff:9e:ad:99:24:01:74:20:47:6e:6c:85:27:27:f5:43 +# SHA256 Fingerprint: d4:8d:3d:23:ee:db:50:a4:59:e5:51:97:60:1c:27:77:4b:9d:7b:18:c9:4d:5a:05:95:11:a1:02:50:b9:31:68 +-----BEGIN CERTIFICATE----- +MIIGWzCCBEOgAwIBAgIRAMrpG4nxVQMNo+ZBbcTjpuEwDQYJKoZIhvcNAQELBQAw +WjELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCURoaW15b3RpczEcMBoGA1UECwwTMDAw +MiA0ODE0NjMwODEwMDAzNjEZMBcGA1UEAwwQQ2VydGlnbmEgUm9vdCBDQTAeFw0x +MzEwMDEwODMyMjdaFw0zMzEwMDEwODMyMjdaMFoxCzAJBgNVBAYTAkZSMRIwEAYD +VQQKDAlEaGlteW90aXMxHDAaBgNVBAsMEzAwMDIgNDgxNDYzMDgxMDAwMzYxGTAX +BgNVBAMMEENlcnRpZ25hIFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw +ggIKAoICAQDNGDllGlmx6mQWDoyUJJV8g9PFOSbcDO8WV43X2KyjQn+Cyu3NW9sO +ty3tRQgXstmzy9YXUnIo245Onoq2C/mehJpNdt4iKVzSs9IGPjA5qXSjklYcoW9M +CiBtnyN6tMbaLOQdLNyzKNAT8kxOAkmhVECe5uUFoC2EyP+YbNDrihqECB63aCPu +I9Vwzm1RaRDuoXrC0SIxwoKF0vJVdlB8JXrJhFwLrN1CTivngqIkicuQstDuI7pm +TLtipPlTWmR7fJj6o0ieD5Wupxj0auwuA0Wv8HT4Ks16XdG+RCYyKfHx9WzMfgIh +C59vpD++nVPiz32pLHxYGpfhPTc3GGYo0kDFUYqMwy3OU4gkWGQwFsWq4NYKpkDf +ePb1BHxpE4S80dGnBs8B92jAqFe7OmGtBIyT46388NtEbVncSVmurJqZNjBBe3Yz +IoejwpKGbvlw7q6Hh5UbxHq9MfPU0uWZ/75I7HX1eBYdpnDBfzwboZL7z8g81sWT +Co/1VTp2lc5ZmIoJlXcymoO6LAQ6l73UL77XbJuiyn1tJslV1c/DeVIICZkHJC1k +JWumIWmbat10TWuXekG9qxf5kBdIjzb5LdXF2+6qhUVB+s06RbFo5jZMm5BX7CO5 +hwjCxAnxl4YqKE3idMDaxIzb3+KhF1nOJFl0Mdp//TBt2dzhauH8XwIDAQABo4IB +GjCCARYwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE +FBiHVuBud+4kNTxOc5of1uHieX4rMB8GA1UdIwQYMBaAFBiHVuBud+4kNTxOc5of +1uHieX4rMEQGA1UdIAQ9MDswOQYEVR0gADAxMC8GCCsGAQUFBwIBFiNodHRwczov +L3d3d3cuY2VydGlnbmEuZnIvYXV0b3JpdGVzLzBtBgNVHR8EZjBkMC+gLaArhilo +dHRwOi8vY3JsLmNlcnRpZ25hLmZyL2NlcnRpZ25hcm9vdGNhLmNybDAxoC+gLYYr +aHR0cDovL2NybC5kaGlteW90aXMuY29tL2NlcnRpZ25hcm9vdGNhLmNybDANBgkq +hkiG9w0BAQsFAAOCAgEAlLieT/DjlQgi581oQfccVdV8AOItOoldaDgvUSILSo3L +6btdPrtcPbEo/uRTVRPPoZAbAh1fZkYJMyjhDSSXcNMQH+pkV5a7XdrnxIxPTGRG +HVyH41neQtGbqH6mid2PHMkwgu07nM3A6RngatgCdTer9zQoKJHyBApPNeNgJgH6 +0BGM+RFq7q89w1DTj18zeTyGqHNFkIwgtnJzFyO+B2XleJINugHA64wcZr+shncB +lA2c5uk5jR+mUYyZDDl34bSb+hxnV29qao6pK0xXeXpXIs/NX2NGjVxZOob4Mkdi +o2cNGJHc+6Zr9UhhcyNZjgKnvETq9Emd8VRY+WCv2hikLyhF3HqgiIZd8zvn/yk1 +gPxkQ5Tm4xxvvq0OKmOZK8l+hfZx6AYDlf7ej0gcWtSS6Cvu5zHbugRqh5jnxV/v +faci9wHYTfmJ0A6aBVmknpjZbyvKcL5kwlWj9Omvw5Ip3IgWJJk8jSaYtlu3zM63 +Nwf9JtmYhST/WSMDmu2dnajkXjjO11INb9I/bbEFa0nOipFGc/T2L/Coc3cOZayh +jWZSaX5LaAzHHjcng6WMxwLkFM1JAbBzs/3GkDpv0mztO+7skb6iQ12LAEpmJURw +3kAP+HwV96LOPNdeE4yBFxgX0b3xdxA61GU5wSesVywlVP+i2k+KYTlerj1KjL0= +-----END CERTIFICATE----- diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/certifi/core.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/certifi/core.py new file mode 100644 index 0000000..2d02ea4 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/certifi/core.py @@ -0,0 +1,20 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +certifi.py +~~~~~~~~~~ + +This module returns the installation location of cacert.pem. +""" +import os + + +def where(): + f = os.path.dirname(__file__) + + return os.path.join(f, 'cacert.pem') + + +if __name__ == '__main__': + print(where()) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/certifi/core.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/certifi/core.pyc new file mode 100644 index 0000000..16fc8b4 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/certifi/core.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/__init__.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/__init__.py new file mode 100644 index 0000000..0f9f820 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/__init__.py @@ -0,0 +1,39 @@ +######################## BEGIN LICENSE BLOCK ######################## +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + + +from .compat import PY2, PY3 +from .universaldetector import UniversalDetector +from .version import __version__, VERSION + + +def detect(byte_str): + """ + Detect the encoding of the given byte string. + + :param byte_str: The byte sequence to examine. + :type byte_str: ``bytes`` or ``bytearray`` + """ + if not isinstance(byte_str, bytearray): + if not isinstance(byte_str, bytes): + raise TypeError('Expected object of type bytes or bytearray, got: ' + '{0}'.format(type(byte_str))) + else: + byte_str = bytearray(byte_str) + detector = UniversalDetector() + detector.feed(byte_str) + return detector.close() diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/__init__.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/__init__.pyc new file mode 100644 index 0000000..3113b9c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/big5freq.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/big5freq.py new file mode 100644 index 0000000..38f3251 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/big5freq.py @@ -0,0 +1,386 @@ +######################## BEGIN LICENSE BLOCK ######################## +# The Original Code is Mozilla Communicator client code. +# +# The Initial Developer of the Original Code is +# Netscape Communications Corporation. +# Portions created by the Initial Developer are Copyright (C) 1998 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Mark Pilgrim - port to Python +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + +# Big5 frequency table +# by Taiwan's Mandarin Promotion Council +# <http://www.edu.tw:81/mandr/> +# +# 128 --> 0.42261 +# 256 --> 0.57851 +# 512 --> 0.74851 +# 1024 --> 0.89384 +# 2048 --> 0.97583 +# +# Ideal Distribution Ratio = 0.74851/(1-0.74851) =2.98 +# Random Distribution Ration = 512/(5401-512)=0.105 +# +# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR + +BIG5_TYPICAL_DISTRIBUTION_RATIO = 0.75 + +#Char to FreqOrder table +BIG5_TABLE_SIZE = 5376 + +BIG5_CHAR_TO_FREQ_ORDER = ( + 1,1801,1506, 255,1431, 198, 9, 82, 6,5008, 177, 202,3681,1256,2821, 110, # 16 +3814, 33,3274, 261, 76, 44,2114, 16,2946,2187,1176, 659,3971, 26,3451,2653, # 32 +1198,3972,3350,4202, 410,2215, 302, 590, 361,1964, 8, 204, 58,4510,5009,1932, # 48 + 63,5010,5011, 317,1614, 75, 222, 159,4203,2417,1480,5012,3555,3091, 224,2822, # 64 +3682, 3, 10,3973,1471, 29,2787,1135,2866,1940, 873, 130,3275,1123, 312,5013, # 80 +4511,2052, 507, 252, 682,5014, 142,1915, 124, 206,2947, 34,3556,3204, 64, 604, # 96 +5015,2501,1977,1978, 155,1991, 645, 641,1606,5016,3452, 337, 72, 406,5017, 80, # 112 + 630, 238,3205,1509, 263, 939,1092,2654, 756,1440,1094,3453, 449, 69,2987, 591, # 128 + 179,2096, 471, 115,2035,1844, 60, 50,2988, 134, 806,1869, 734,2036,3454, 180, # 144 + 995,1607, 156, 537,2907, 688,5018, 319,1305, 779,2145, 514,2379, 298,4512, 359, # 160 +2502, 90,2716,1338, 663, 11, 906,1099,2553, 20,2441, 182, 532,1716,5019, 732, # 176 +1376,4204,1311,1420,3206, 25,2317,1056, 113, 399, 382,1950, 242,3455,2474, 529, # 192 +3276, 475,1447,3683,5020, 117, 21, 656, 810,1297,2300,2334,3557,5021, 126,4205, # 208 + 706, 456, 150, 613,4513, 71,1118,2037,4206, 145,3092, 85, 835, 486,2115,1246, # 224 +1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,5022,2128,2359, 347,3815, 221, # 240 +3558,3135,5023,1956,1153,4207, 83, 296,1199,3093, 192, 624, 93,5024, 822,1898, # 256 +2823,3136, 795,2065, 991,1554,1542,1592, 27, 43,2867, 859, 139,1456, 860,4514, # 272 + 437, 712,3974, 164,2397,3137, 695, 211,3037,2097, 195,3975,1608,3559,3560,3684, # 288 +3976, 234, 811,2989,2098,3977,2233,1441,3561,1615,2380, 668,2077,1638, 305, 228, # 304 +1664,4515, 467, 415,5025, 262,2099,1593, 239, 108, 300, 200,1033, 512,1247,2078, # 320 +5026,5027,2176,3207,3685,2682, 593, 845,1062,3277, 88,1723,2038,3978,1951, 212, # 336 + 266, 152, 149, 468,1899,4208,4516, 77, 187,5028,3038, 37, 5,2990,5029,3979, # 352 +5030,5031, 39,2524,4517,2908,3208,2079, 55, 148, 74,4518, 545, 483,1474,1029, # 368 +1665, 217,1870,1531,3138,1104,2655,4209, 24, 172,3562, 900,3980,3563,3564,4519, # 384 + 32,1408,2824,1312, 329, 487,2360,2251,2717, 784,2683, 4,3039,3351,1427,1789, # 400 + 188, 109, 499,5032,3686,1717,1790, 888,1217,3040,4520,5033,3565,5034,3352,1520, # 416 +3687,3981, 196,1034, 775,5035,5036, 929,1816, 249, 439, 38,5037,1063,5038, 794, # 432 +3982,1435,2301, 46, 178,3278,2066,5039,2381,5040, 214,1709,4521, 804, 35, 707, # 448 + 324,3688,1601,2554, 140, 459,4210,5041,5042,1365, 839, 272, 978,2262,2580,3456, # 464 +2129,1363,3689,1423, 697, 100,3094, 48, 70,1231, 495,3139,2196,5043,1294,5044, # 480 +2080, 462, 586,1042,3279, 853, 256, 988, 185,2382,3457,1698, 434,1084,5045,3458, # 496 + 314,2625,2788,4522,2335,2336, 569,2285, 637,1817,2525, 757,1162,1879,1616,3459, # 512 + 287,1577,2116, 768,4523,1671,2868,3566,2526,1321,3816, 909,2418,5046,4211, 933, # 528 +3817,4212,2053,2361,1222,4524, 765,2419,1322, 786,4525,5047,1920,1462,1677,2909, # 544 +1699,5048,4526,1424,2442,3140,3690,2600,3353,1775,1941,3460,3983,4213, 309,1369, # 560 +1130,2825, 364,2234,1653,1299,3984,3567,3985,3986,2656, 525,1085,3041, 902,2001, # 576 +1475, 964,4527, 421,1845,1415,1057,2286, 940,1364,3141, 376,4528,4529,1381, 7, # 592 +2527, 983,2383, 336,1710,2684,1846, 321,3461, 559,1131,3042,2752,1809,1132,1313, # 608 + 265,1481,1858,5049, 352,1203,2826,3280, 167,1089, 420,2827, 776, 792,1724,3568, # 624 +4214,2443,3281,5050,4215,5051, 446, 229, 333,2753, 901,3818,1200,1557,4530,2657, # 640 +1921, 395,2754,2685,3819,4216,1836, 125, 916,3209,2626,4531,5052,5053,3820,5054, # 656 +5055,5056,4532,3142,3691,1133,2555,1757,3462,1510,2318,1409,3569,5057,2146, 438, # 672 +2601,2910,2384,3354,1068, 958,3043, 461, 311,2869,2686,4217,1916,3210,4218,1979, # 688 + 383, 750,2755,2627,4219, 274, 539, 385,1278,1442,5058,1154,1965, 384, 561, 210, # 704 + 98,1295,2556,3570,5059,1711,2420,1482,3463,3987,2911,1257, 129,5060,3821, 642, # 720 + 523,2789,2790,2658,5061, 141,2235,1333, 68, 176, 441, 876, 907,4220, 603,2602, # 736 + 710, 171,3464, 404, 549, 18,3143,2398,1410,3692,1666,5062,3571,4533,2912,4534, # 752 +5063,2991, 368,5064, 146, 366, 99, 871,3693,1543, 748, 807,1586,1185, 22,2263, # 768 + 379,3822,3211,5065,3212, 505,1942,2628,1992,1382,2319,5066, 380,2362, 218, 702, # 784 +1818,1248,3465,3044,3572,3355,3282,5067,2992,3694, 930,3283,3823,5068, 59,5069, # 800 + 585, 601,4221, 497,3466,1112,1314,4535,1802,5070,1223,1472,2177,5071, 749,1837, # 816 + 690,1900,3824,1773,3988,1476, 429,1043,1791,2236,2117, 917,4222, 447,1086,1629, # 832 +5072, 556,5073,5074,2021,1654, 844,1090, 105, 550, 966,1758,2828,1008,1783, 686, # 848 +1095,5075,2287, 793,1602,5076,3573,2603,4536,4223,2948,2302,4537,3825, 980,2503, # 864 + 544, 353, 527,4538, 908,2687,2913,5077, 381,2629,1943,1348,5078,1341,1252, 560, # 880 +3095,5079,3467,2870,5080,2054, 973, 886,2081, 143,4539,5081,5082, 157,3989, 496, # 896 +4224, 57, 840, 540,2039,4540,4541,3468,2118,1445, 970,2264,1748,1966,2082,4225, # 912 +3144,1234,1776,3284,2829,3695, 773,1206,2130,1066,2040,1326,3990,1738,1725,4226, # 928 + 279,3145, 51,1544,2604, 423,1578,2131,2067, 173,4542,1880,5083,5084,1583, 264, # 944 + 610,3696,4543,2444, 280, 154,5085,5086,5087,1739, 338,1282,3096, 693,2871,1411, # 960 +1074,3826,2445,5088,4544,5089,5090,1240, 952,2399,5091,2914,1538,2688, 685,1483, # 976 +4227,2475,1436, 953,4228,2055,4545, 671,2400, 79,4229,2446,3285, 608, 567,2689, # 992 +3469,4230,4231,1691, 393,1261,1792,2401,5092,4546,5093,5094,5095,5096,1383,1672, # 1008 +3827,3213,1464, 522,1119, 661,1150, 216, 675,4547,3991,1432,3574, 609,4548,2690, # 1024 +2402,5097,5098,5099,4232,3045, 0,5100,2476, 315, 231,2447, 301,3356,4549,2385, # 1040 +5101, 233,4233,3697,1819,4550,4551,5102, 96,1777,1315,2083,5103, 257,5104,1810, # 1056 +3698,2718,1139,1820,4234,2022,1124,2164,2791,1778,2659,5105,3097, 363,1655,3214, # 1072 +5106,2993,5107,5108,5109,3992,1567,3993, 718, 103,3215, 849,1443, 341,3357,2949, # 1088 +1484,5110,1712, 127, 67, 339,4235,2403, 679,1412, 821,5111,5112, 834, 738, 351, # 1104 +2994,2147, 846, 235,1497,1881, 418,1993,3828,2719, 186,1100,2148,2756,3575,1545, # 1120 +1355,2950,2872,1377, 583,3994,4236,2581,2995,5113,1298,3699,1078,2557,3700,2363, # 1136 + 78,3829,3830, 267,1289,2100,2002,1594,4237, 348, 369,1274,2197,2178,1838,4552, # 1152 +1821,2830,3701,2757,2288,2003,4553,2951,2758, 144,3358, 882,4554,3995,2759,3470, # 1168 +4555,2915,5114,4238,1726, 320,5115,3996,3046, 788,2996,5116,2831,1774,1327,2873, # 1184 +3997,2832,5117,1306,4556,2004,1700,3831,3576,2364,2660, 787,2023, 506, 824,3702, # 1200 + 534, 323,4557,1044,3359,2024,1901, 946,3471,5118,1779,1500,1678,5119,1882,4558, # 1216 + 165, 243,4559,3703,2528, 123, 683,4239, 764,4560, 36,3998,1793, 589,2916, 816, # 1232 + 626,1667,3047,2237,1639,1555,1622,3832,3999,5120,4000,2874,1370,1228,1933, 891, # 1248 +2084,2917, 304,4240,5121, 292,2997,2720,3577, 691,2101,4241,1115,4561, 118, 662, # 1264 +5122, 611,1156, 854,2386,1316,2875, 2, 386, 515,2918,5123,5124,3286, 868,2238, # 1280 +1486, 855,2661, 785,2216,3048,5125,1040,3216,3578,5126,3146, 448,5127,1525,5128, # 1296 +2165,4562,5129,3833,5130,4242,2833,3579,3147, 503, 818,4001,3148,1568, 814, 676, # 1312 +1444, 306,1749,5131,3834,1416,1030, 197,1428, 805,2834,1501,4563,5132,5133,5134, # 1328 +1994,5135,4564,5136,5137,2198, 13,2792,3704,2998,3149,1229,1917,5138,3835,2132, # 1344 +5139,4243,4565,2404,3580,5140,2217,1511,1727,1120,5141,5142, 646,3836,2448, 307, # 1360 +5143,5144,1595,3217,5145,5146,5147,3705,1113,1356,4002,1465,2529,2530,5148, 519, # 1376 +5149, 128,2133, 92,2289,1980,5150,4003,1512, 342,3150,2199,5151,2793,2218,1981, # 1392 +3360,4244, 290,1656,1317, 789, 827,2365,5152,3837,4566, 562, 581,4004,5153, 401, # 1408 +4567,2252, 94,4568,5154,1399,2794,5155,1463,2025,4569,3218,1944,5156, 828,1105, # 1424 +4245,1262,1394,5157,4246, 605,4570,5158,1784,2876,5159,2835, 819,2102, 578,2200, # 1440 +2952,5160,1502, 436,3287,4247,3288,2836,4005,2919,3472,3473,5161,2721,2320,5162, # 1456 +5163,2337,2068, 23,4571, 193, 826,3838,2103, 699,1630,4248,3098, 390,1794,1064, # 1472 +3581,5164,1579,3099,3100,1400,5165,4249,1839,1640,2877,5166,4572,4573, 137,4250, # 1488 + 598,3101,1967, 780, 104, 974,2953,5167, 278, 899, 253, 402, 572, 504, 493,1339, # 1504 +5168,4006,1275,4574,2582,2558,5169,3706,3049,3102,2253, 565,1334,2722, 863, 41, # 1520 +5170,5171,4575,5172,1657,2338, 19, 463,2760,4251, 606,5173,2999,3289,1087,2085, # 1536 +1323,2662,3000,5174,1631,1623,1750,4252,2691,5175,2878, 791,2723,2663,2339, 232, # 1552 +2421,5176,3001,1498,5177,2664,2630, 755,1366,3707,3290,3151,2026,1609, 119,1918, # 1568 +3474, 862,1026,4253,5178,4007,3839,4576,4008,4577,2265,1952,2477,5179,1125, 817, # 1584 +4254,4255,4009,1513,1766,2041,1487,4256,3050,3291,2837,3840,3152,5180,5181,1507, # 1600 +5182,2692, 733, 40,1632,1106,2879, 345,4257, 841,2531, 230,4578,3002,1847,3292, # 1616 +3475,5183,1263, 986,3476,5184, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562, # 1632 +4010,4011,2954, 967,2761,2665,1349, 592,2134,1692,3361,3003,1995,4258,1679,4012, # 1648 +1902,2188,5185, 739,3708,2724,1296,1290,5186,4259,2201,2202,1922,1563,2605,2559, # 1664 +1871,2762,3004,5187, 435,5188, 343,1108, 596, 17,1751,4579,2239,3477,3709,5189, # 1680 +4580, 294,3582,2955,1693, 477, 979, 281,2042,3583, 643,2043,3710,2631,2795,2266, # 1696 +1031,2340,2135,2303,3584,4581, 367,1249,2560,5190,3585,5191,4582,1283,3362,2005, # 1712 + 240,1762,3363,4583,4584, 836,1069,3153, 474,5192,2149,2532, 268,3586,5193,3219, # 1728 +1521,1284,5194,1658,1546,4260,5195,3587,3588,5196,4261,3364,2693,1685,4262, 961, # 1744 +1673,2632, 190,2006,2203,3841,4585,4586,5197, 570,2504,3711,1490,5198,4587,2633, # 1760 +3293,1957,4588, 584,1514, 396,1045,1945,5199,4589,1968,2449,5200,5201,4590,4013, # 1776 + 619,5202,3154,3294, 215,2007,2796,2561,3220,4591,3221,4592, 763,4263,3842,4593, # 1792 +5203,5204,1958,1767,2956,3365,3712,1174, 452,1477,4594,3366,3155,5205,2838,1253, # 1808 +2387,2189,1091,2290,4264, 492,5206, 638,1169,1825,2136,1752,4014, 648, 926,1021, # 1824 +1324,4595, 520,4596, 997, 847,1007, 892,4597,3843,2267,1872,3713,2405,1785,4598, # 1840 +1953,2957,3103,3222,1728,4265,2044,3714,4599,2008,1701,3156,1551, 30,2268,4266, # 1856 +5207,2027,4600,3589,5208, 501,5209,4267, 594,3478,2166,1822,3590,3479,3591,3223, # 1872 + 829,2839,4268,5210,1680,3157,1225,4269,5211,3295,4601,4270,3158,2341,5212,4602, # 1888 +4271,5213,4015,4016,5214,1848,2388,2606,3367,5215,4603, 374,4017, 652,4272,4273, # 1904 + 375,1140, 798,5216,5217,5218,2366,4604,2269, 546,1659, 138,3051,2450,4605,5219, # 1920 +2254, 612,1849, 910, 796,3844,1740,1371, 825,3845,3846,5220,2920,2562,5221, 692, # 1936 + 444,3052,2634, 801,4606,4274,5222,1491, 244,1053,3053,4275,4276, 340,5223,4018, # 1952 +1041,3005, 293,1168, 87,1357,5224,1539, 959,5225,2240, 721, 694,4277,3847, 219, # 1968 +1478, 644,1417,3368,2666,1413,1401,1335,1389,4019,5226,5227,3006,2367,3159,1826, # 1984 + 730,1515, 184,2840, 66,4607,5228,1660,2958, 246,3369, 378,1457, 226,3480, 975, # 2000 +4020,2959,1264,3592, 674, 696,5229, 163,5230,1141,2422,2167, 713,3593,3370,4608, # 2016 +4021,5231,5232,1186, 15,5233,1079,1070,5234,1522,3224,3594, 276,1050,2725, 758, # 2032 +1126, 653,2960,3296,5235,2342, 889,3595,4022,3104,3007, 903,1250,4609,4023,3481, # 2048 +3596,1342,1681,1718, 766,3297, 286, 89,2961,3715,5236,1713,5237,2607,3371,3008, # 2064 +5238,2962,2219,3225,2880,5239,4610,2505,2533, 181, 387,1075,4024, 731,2190,3372, # 2080 +5240,3298, 310, 313,3482,2304, 770,4278, 54,3054, 189,4611,3105,3848,4025,5241, # 2096 +1230,1617,1850, 355,3597,4279,4612,3373, 111,4280,3716,1350,3160,3483,3055,4281, # 2112 +2150,3299,3598,5242,2797,4026,4027,3009, 722,2009,5243,1071, 247,1207,2343,2478, # 2128 +1378,4613,2010, 864,1437,1214,4614, 373,3849,1142,2220, 667,4615, 442,2763,2563, # 2144 +3850,4028,1969,4282,3300,1840, 837, 170,1107, 934,1336,1883,5244,5245,2119,4283, # 2160 +2841, 743,1569,5246,4616,4284, 582,2389,1418,3484,5247,1803,5248, 357,1395,1729, # 2176 +3717,3301,2423,1564,2241,5249,3106,3851,1633,4617,1114,2086,4285,1532,5250, 482, # 2192 +2451,4618,5251,5252,1492, 833,1466,5253,2726,3599,1641,2842,5254,1526,1272,3718, # 2208 +4286,1686,1795, 416,2564,1903,1954,1804,5255,3852,2798,3853,1159,2321,5256,2881, # 2224 +4619,1610,1584,3056,2424,2764, 443,3302,1163,3161,5257,5258,4029,5259,4287,2506, # 2240 +3057,4620,4030,3162,2104,1647,3600,2011,1873,4288,5260,4289, 431,3485,5261, 250, # 2256 + 97, 81,4290,5262,1648,1851,1558, 160, 848,5263, 866, 740,1694,5264,2204,2843, # 2272 +3226,4291,4621,3719,1687, 950,2479, 426, 469,3227,3720,3721,4031,5265,5266,1188, # 2288 + 424,1996, 861,3601,4292,3854,2205,2694, 168,1235,3602,4293,5267,2087,1674,4622, # 2304 +3374,3303, 220,2565,1009,5268,3855, 670,3010, 332,1208, 717,5269,5270,3603,2452, # 2320 +4032,3375,5271, 513,5272,1209,2882,3376,3163,4623,1080,5273,5274,5275,5276,2534, # 2336 +3722,3604, 815,1587,4033,4034,5277,3605,3486,3856,1254,4624,1328,3058,1390,4035, # 2352 +1741,4036,3857,4037,5278, 236,3858,2453,3304,5279,5280,3723,3859,1273,3860,4625, # 2368 +5281, 308,5282,4626, 245,4627,1852,2480,1307,2583, 430, 715,2137,2454,5283, 270, # 2384 + 199,2883,4038,5284,3606,2727,1753, 761,1754, 725,1661,1841,4628,3487,3724,5285, # 2400 +5286, 587, 14,3305, 227,2608, 326, 480,2270, 943,2765,3607, 291, 650,1884,5287, # 2416 +1702,1226, 102,1547, 62,3488, 904,4629,3489,1164,4294,5288,5289,1224,1548,2766, # 2432 + 391, 498,1493,5290,1386,1419,5291,2056,1177,4630, 813, 880,1081,2368, 566,1145, # 2448 +4631,2291,1001,1035,2566,2609,2242, 394,1286,5292,5293,2069,5294, 86,1494,1730, # 2464 +4039, 491,1588, 745, 897,2963, 843,3377,4040,2767,2884,3306,1768, 998,2221,2070, # 2480 + 397,1827,1195,1970,3725,3011,3378, 284,5295,3861,2507,2138,2120,1904,5296,4041, # 2496 +2151,4042,4295,1036,3490,1905, 114,2567,4296, 209,1527,5297,5298,2964,2844,2635, # 2512 +2390,2728,3164, 812,2568,5299,3307,5300,1559, 737,1885,3726,1210, 885, 28,2695, # 2528 +3608,3862,5301,4297,1004,1780,4632,5302, 346,1982,2222,2696,4633,3863,1742, 797, # 2544 +1642,4043,1934,1072,1384,2152, 896,4044,3308,3727,3228,2885,3609,5303,2569,1959, # 2560 +4634,2455,1786,5304,5305,5306,4045,4298,1005,1308,3728,4299,2729,4635,4636,1528, # 2576 +2610, 161,1178,4300,1983, 987,4637,1101,4301, 631,4046,1157,3229,2425,1343,1241, # 2592 +1016,2243,2570, 372, 877,2344,2508,1160, 555,1935, 911,4047,5307, 466,1170, 169, # 2608 +1051,2921,2697,3729,2481,3012,1182,2012,2571,1251,2636,5308, 992,2345,3491,1540, # 2624 +2730,1201,2071,2406,1997,2482,5309,4638, 528,1923,2191,1503,1874,1570,2369,3379, # 2640 +3309,5310, 557,1073,5311,1828,3492,2088,2271,3165,3059,3107, 767,3108,2799,4639, # 2656 +1006,4302,4640,2346,1267,2179,3730,3230, 778,4048,3231,2731,1597,2667,5312,4641, # 2672 +5313,3493,5314,5315,5316,3310,2698,1433,3311, 131, 95,1504,4049, 723,4303,3166, # 2688 +1842,3610,2768,2192,4050,2028,2105,3731,5317,3013,4051,1218,5318,3380,3232,4052, # 2704 +4304,2584, 248,1634,3864, 912,5319,2845,3732,3060,3865, 654, 53,5320,3014,5321, # 2720 +1688,4642, 777,3494,1032,4053,1425,5322, 191, 820,2121,2846, 971,4643, 931,3233, # 2736 + 135, 664, 783,3866,1998, 772,2922,1936,4054,3867,4644,2923,3234, 282,2732, 640, # 2752 +1372,3495,1127, 922, 325,3381,5323,5324, 711,2045,5325,5326,4055,2223,2800,1937, # 2768 +4056,3382,2224,2255,3868,2305,5327,4645,3869,1258,3312,4057,3235,2139,2965,4058, # 2784 +4059,5328,2225, 258,3236,4646, 101,1227,5329,3313,1755,5330,1391,3314,5331,2924, # 2800 +2057, 893,5332,5333,5334,1402,4305,2347,5335,5336,3237,3611,5337,5338, 878,1325, # 2816 +1781,2801,4647, 259,1385,2585, 744,1183,2272,4648,5339,4060,2509,5340, 684,1024, # 2832 +4306,5341, 472,3612,3496,1165,3315,4061,4062, 322,2153, 881, 455,1695,1152,1340, # 2848 + 660, 554,2154,4649,1058,4650,4307, 830,1065,3383,4063,4651,1924,5342,1703,1919, # 2864 +5343, 932,2273, 122,5344,4652, 947, 677,5345,3870,2637, 297,1906,1925,2274,4653, # 2880 +2322,3316,5346,5347,4308,5348,4309, 84,4310, 112, 989,5349, 547,1059,4064, 701, # 2896 +3613,1019,5350,4311,5351,3497, 942, 639, 457,2306,2456, 993,2966, 407, 851, 494, # 2912 +4654,3384, 927,5352,1237,5353,2426,3385, 573,4312, 680, 921,2925,1279,1875, 285, # 2928 + 790,1448,1984, 719,2168,5354,5355,4655,4065,4066,1649,5356,1541, 563,5357,1077, # 2944 +5358,3386,3061,3498, 511,3015,4067,4068,3733,4069,1268,2572,3387,3238,4656,4657, # 2960 +5359, 535,1048,1276,1189,2926,2029,3167,1438,1373,2847,2967,1134,2013,5360,4313, # 2976 +1238,2586,3109,1259,5361, 700,5362,2968,3168,3734,4314,5363,4315,1146,1876,1907, # 2992 +4658,2611,4070, 781,2427, 132,1589, 203, 147, 273,2802,2407, 898,1787,2155,4071, # 3008 +4072,5364,3871,2803,5365,5366,4659,4660,5367,3239,5368,1635,3872, 965,5369,1805, # 3024 +2699,1516,3614,1121,1082,1329,3317,4073,1449,3873, 65,1128,2848,2927,2769,1590, # 3040 +3874,5370,5371, 12,2668, 45, 976,2587,3169,4661, 517,2535,1013,1037,3240,5372, # 3056 +3875,2849,5373,3876,5374,3499,5375,2612, 614,1999,2323,3877,3110,2733,2638,5376, # 3072 +2588,4316, 599,1269,5377,1811,3735,5378,2700,3111, 759,1060, 489,1806,3388,3318, # 3088 +1358,5379,5380,2391,1387,1215,2639,2256, 490,5381,5382,4317,1759,2392,2348,5383, # 3104 +4662,3878,1908,4074,2640,1807,3241,4663,3500,3319,2770,2349, 874,5384,5385,3501, # 3120 +3736,1859, 91,2928,3737,3062,3879,4664,5386,3170,4075,2669,5387,3502,1202,1403, # 3136 +3880,2969,2536,1517,2510,4665,3503,2511,5388,4666,5389,2701,1886,1495,1731,4076, # 3152 +2370,4667,5390,2030,5391,5392,4077,2702,1216, 237,2589,4318,2324,4078,3881,4668, # 3168 +4669,2703,3615,3504, 445,4670,5393,5394,5395,5396,2771, 61,4079,3738,1823,4080, # 3184 +5397, 687,2046, 935, 925, 405,2670, 703,1096,1860,2734,4671,4081,1877,1367,2704, # 3200 +3389, 918,2106,1782,2483, 334,3320,1611,1093,4672, 564,3171,3505,3739,3390, 945, # 3216 +2641,2058,4673,5398,1926, 872,4319,5399,3506,2705,3112, 349,4320,3740,4082,4674, # 3232 +3882,4321,3741,2156,4083,4675,4676,4322,4677,2408,2047, 782,4084, 400, 251,4323, # 3248 +1624,5400,5401, 277,3742, 299,1265, 476,1191,3883,2122,4324,4325,1109, 205,5402, # 3264 +2590,1000,2157,3616,1861,5403,5404,5405,4678,5406,4679,2573, 107,2484,2158,4085, # 3280 +3507,3172,5407,1533, 541,1301, 158, 753,4326,2886,3617,5408,1696, 370,1088,4327, # 3296 +4680,3618, 579, 327, 440, 162,2244, 269,1938,1374,3508, 968,3063, 56,1396,3113, # 3312 +2107,3321,3391,5409,1927,2159,4681,3016,5410,3619,5411,5412,3743,4682,2485,5413, # 3328 +2804,5414,1650,4683,5415,2613,5416,5417,4086,2671,3392,1149,3393,4087,3884,4088, # 3344 +5418,1076, 49,5419, 951,3242,3322,3323, 450,2850, 920,5420,1812,2805,2371,4328, # 3360 +1909,1138,2372,3885,3509,5421,3243,4684,1910,1147,1518,2428,4685,3886,5422,4686, # 3376 +2393,2614, 260,1796,3244,5423,5424,3887,3324, 708,5425,3620,1704,5426,3621,1351, # 3392 +1618,3394,3017,1887, 944,4329,3395,4330,3064,3396,4331,5427,3744, 422, 413,1714, # 3408 +3325, 500,2059,2350,4332,2486,5428,1344,1911, 954,5429,1668,5430,5431,4089,2409, # 3424 +4333,3622,3888,4334,5432,2307,1318,2512,3114, 133,3115,2887,4687, 629, 31,2851, # 3440 +2706,3889,4688, 850, 949,4689,4090,2970,1732,2089,4335,1496,1853,5433,4091, 620, # 3456 +3245, 981,1242,3745,3397,1619,3746,1643,3326,2140,2457,1971,1719,3510,2169,5434, # 3472 +3246,5435,5436,3398,1829,5437,1277,4690,1565,2048,5438,1636,3623,3116,5439, 869, # 3488 +2852, 655,3890,3891,3117,4092,3018,3892,1310,3624,4691,5440,5441,5442,1733, 558, # 3504 +4692,3747, 335,1549,3065,1756,4336,3748,1946,3511,1830,1291,1192, 470,2735,2108, # 3520 +2806, 913,1054,4093,5443,1027,5444,3066,4094,4693, 982,2672,3399,3173,3512,3247, # 3536 +3248,1947,2807,5445, 571,4694,5446,1831,5447,3625,2591,1523,2429,5448,2090, 984, # 3552 +4695,3749,1960,5449,3750, 852, 923,2808,3513,3751, 969,1519, 999,2049,2325,1705, # 3568 +5450,3118, 615,1662, 151, 597,4095,2410,2326,1049, 275,4696,3752,4337, 568,3753, # 3584 +3626,2487,4338,3754,5451,2430,2275, 409,3249,5452,1566,2888,3514,1002, 769,2853, # 3600 + 194,2091,3174,3755,2226,3327,4339, 628,1505,5453,5454,1763,2180,3019,4096, 521, # 3616 +1161,2592,1788,2206,2411,4697,4097,1625,4340,4341, 412, 42,3119, 464,5455,2642, # 3632 +4698,3400,1760,1571,2889,3515,2537,1219,2207,3893,2643,2141,2373,4699,4700,3328, # 3648 +1651,3401,3627,5456,5457,3628,2488,3516,5458,3756,5459,5460,2276,2092, 460,5461, # 3664 +4701,5462,3020, 962, 588,3629, 289,3250,2644,1116, 52,5463,3067,1797,5464,5465, # 3680 +5466,1467,5467,1598,1143,3757,4342,1985,1734,1067,4702,1280,3402, 465,4703,1572, # 3696 + 510,5468,1928,2245,1813,1644,3630,5469,4704,3758,5470,5471,2673,1573,1534,5472, # 3712 +5473, 536,1808,1761,3517,3894,3175,2645,5474,5475,5476,4705,3518,2929,1912,2809, # 3728 +5477,3329,1122, 377,3251,5478, 360,5479,5480,4343,1529, 551,5481,2060,3759,1769, # 3744 +2431,5482,2930,4344,3330,3120,2327,2109,2031,4706,1404, 136,1468,1479, 672,1171, # 3760 +3252,2308, 271,3176,5483,2772,5484,2050, 678,2736, 865,1948,4707,5485,2014,4098, # 3776 +2971,5486,2737,2227,1397,3068,3760,4708,4709,1735,2931,3403,3631,5487,3895, 509, # 3792 +2854,2458,2890,3896,5488,5489,3177,3178,4710,4345,2538,4711,2309,1166,1010, 552, # 3808 + 681,1888,5490,5491,2972,2973,4099,1287,1596,1862,3179, 358, 453, 736, 175, 478, # 3824 +1117, 905,1167,1097,5492,1854,1530,5493,1706,5494,2181,3519,2292,3761,3520,3632, # 3840 +4346,2093,4347,5495,3404,1193,2489,4348,1458,2193,2208,1863,1889,1421,3331,2932, # 3856 +3069,2182,3521, 595,2123,5496,4100,5497,5498,4349,1707,2646, 223,3762,1359, 751, # 3872 +3121, 183,3522,5499,2810,3021, 419,2374, 633, 704,3897,2394, 241,5500,5501,5502, # 3888 + 838,3022,3763,2277,2773,2459,3898,1939,2051,4101,1309,3122,2246,1181,5503,1136, # 3904 +2209,3899,2375,1446,4350,2310,4712,5504,5505,4351,1055,2615, 484,3764,5506,4102, # 3920 + 625,4352,2278,3405,1499,4353,4103,5507,4104,4354,3253,2279,2280,3523,5508,5509, # 3936 +2774, 808,2616,3765,3406,4105,4355,3123,2539, 526,3407,3900,4356, 955,5510,1620, # 3952 +4357,2647,2432,5511,1429,3766,1669,1832, 994, 928,5512,3633,1260,5513,5514,5515, # 3968 +1949,2293, 741,2933,1626,4358,2738,2460, 867,1184, 362,3408,1392,5516,5517,4106, # 3984 +4359,1770,1736,3254,2934,4713,4714,1929,2707,1459,1158,5518,3070,3409,2891,1292, # 4000 +1930,2513,2855,3767,1986,1187,2072,2015,2617,4360,5519,2574,2514,2170,3768,2490, # 4016 +3332,5520,3769,4715,5521,5522, 666,1003,3023,1022,3634,4361,5523,4716,1814,2257, # 4032 + 574,3901,1603, 295,1535, 705,3902,4362, 283, 858, 417,5524,5525,3255,4717,4718, # 4048 +3071,1220,1890,1046,2281,2461,4107,1393,1599, 689,2575, 388,4363,5526,2491, 802, # 4064 +5527,2811,3903,2061,1405,2258,5528,4719,3904,2110,1052,1345,3256,1585,5529, 809, # 4080 +5530,5531,5532, 575,2739,3524, 956,1552,1469,1144,2328,5533,2329,1560,2462,3635, # 4096 +3257,4108, 616,2210,4364,3180,2183,2294,5534,1833,5535,3525,4720,5536,1319,3770, # 4112 +3771,1211,3636,1023,3258,1293,2812,5537,5538,5539,3905, 607,2311,3906, 762,2892, # 4128 +1439,4365,1360,4721,1485,3072,5540,4722,1038,4366,1450,2062,2648,4367,1379,4723, # 4144 +2593,5541,5542,4368,1352,1414,2330,2935,1172,5543,5544,3907,3908,4724,1798,1451, # 4160 +5545,5546,5547,5548,2936,4109,4110,2492,2351, 411,4111,4112,3637,3333,3124,4725, # 4176 +1561,2674,1452,4113,1375,5549,5550, 47,2974, 316,5551,1406,1591,2937,3181,5552, # 4192 +1025,2142,3125,3182, 354,2740, 884,2228,4369,2412, 508,3772, 726,3638, 996,2433, # 4208 +3639, 729,5553, 392,2194,1453,4114,4726,3773,5554,5555,2463,3640,2618,1675,2813, # 4224 + 919,2352,2975,2353,1270,4727,4115, 73,5556,5557, 647,5558,3259,2856,2259,1550, # 4240 +1346,3024,5559,1332, 883,3526,5560,5561,5562,5563,3334,2775,5564,1212, 831,1347, # 4256 +4370,4728,2331,3909,1864,3073, 720,3910,4729,4730,3911,5565,4371,5566,5567,4731, # 4272 +5568,5569,1799,4732,3774,2619,4733,3641,1645,2376,4734,5570,2938, 669,2211,2675, # 4288 +2434,5571,2893,5572,5573,1028,3260,5574,4372,2413,5575,2260,1353,5576,5577,4735, # 4304 +3183, 518,5578,4116,5579,4373,1961,5580,2143,4374,5581,5582,3025,2354,2355,3912, # 4320 + 516,1834,1454,4117,2708,4375,4736,2229,2620,1972,1129,3642,5583,2776,5584,2976, # 4336 +1422, 577,1470,3026,1524,3410,5585,5586, 432,4376,3074,3527,5587,2594,1455,2515, # 4352 +2230,1973,1175,5588,1020,2741,4118,3528,4737,5589,2742,5590,1743,1361,3075,3529, # 4368 +2649,4119,4377,4738,2295, 895, 924,4378,2171, 331,2247,3076, 166,1627,3077,1098, # 4384 +5591,1232,2894,2231,3411,4739, 657, 403,1196,2377, 542,3775,3412,1600,4379,3530, # 4400 +5592,4740,2777,3261, 576, 530,1362,4741,4742,2540,2676,3776,4120,5593, 842,3913, # 4416 +5594,2814,2032,1014,4121, 213,2709,3413, 665, 621,4380,5595,3777,2939,2435,5596, # 4432 +2436,3335,3643,3414,4743,4381,2541,4382,4744,3644,1682,4383,3531,1380,5597, 724, # 4448 +2282, 600,1670,5598,1337,1233,4745,3126,2248,5599,1621,4746,5600, 651,4384,5601, # 4464 +1612,4385,2621,5602,2857,5603,2743,2312,3078,5604, 716,2464,3079, 174,1255,2710, # 4480 +4122,3645, 548,1320,1398, 728,4123,1574,5605,1891,1197,3080,4124,5606,3081,3082, # 4496 +3778,3646,3779, 747,5607, 635,4386,4747,5608,5609,5610,4387,5611,5612,4748,5613, # 4512 +3415,4749,2437, 451,5614,3780,2542,2073,4388,2744,4389,4125,5615,1764,4750,5616, # 4528 +4390, 350,4751,2283,2395,2493,5617,4391,4126,2249,1434,4127, 488,4752, 458,4392, # 4544 +4128,3781, 771,1330,2396,3914,2576,3184,2160,2414,1553,2677,3185,4393,5618,2494, # 4560 +2895,2622,1720,2711,4394,3416,4753,5619,2543,4395,5620,3262,4396,2778,5621,2016, # 4576 +2745,5622,1155,1017,3782,3915,5623,3336,2313, 201,1865,4397,1430,5624,4129,5625, # 4592 +5626,5627,5628,5629,4398,1604,5630, 414,1866, 371,2595,4754,4755,3532,2017,3127, # 4608 +4756,1708, 960,4399, 887, 389,2172,1536,1663,1721,5631,2232,4130,2356,2940,1580, # 4624 +5632,5633,1744,4757,2544,4758,4759,5634,4760,5635,2074,5636,4761,3647,3417,2896, # 4640 +4400,5637,4401,2650,3418,2815, 673,2712,2465, 709,3533,4131,3648,4402,5638,1148, # 4656 + 502, 634,5639,5640,1204,4762,3649,1575,4763,2623,3783,5641,3784,3128, 948,3263, # 4672 + 121,1745,3916,1110,5642,4403,3083,2516,3027,4132,3785,1151,1771,3917,1488,4133, # 4688 +1987,5643,2438,3534,5644,5645,2094,5646,4404,3918,1213,1407,2816, 531,2746,2545, # 4704 +3264,1011,1537,4764,2779,4405,3129,1061,5647,3786,3787,1867,2897,5648,2018, 120, # 4720 +4406,4407,2063,3650,3265,2314,3919,2678,3419,1955,4765,4134,5649,3535,1047,2713, # 4736 +1266,5650,1368,4766,2858, 649,3420,3920,2546,2747,1102,2859,2679,5651,5652,2000, # 4752 +5653,1111,3651,2977,5654,2495,3921,3652,2817,1855,3421,3788,5655,5656,3422,2415, # 4768 +2898,3337,3266,3653,5657,2577,5658,3654,2818,4135,1460, 856,5659,3655,5660,2899, # 4784 +2978,5661,2900,3922,5662,4408, 632,2517, 875,3923,1697,3924,2296,5663,5664,4767, # 4800 +3028,1239, 580,4768,4409,5665, 914, 936,2075,1190,4136,1039,2124,5666,5667,5668, # 4816 +5669,3423,1473,5670,1354,4410,3925,4769,2173,3084,4137, 915,3338,4411,4412,3339, # 4832 +1605,1835,5671,2748, 398,3656,4413,3926,4138, 328,1913,2860,4139,3927,1331,4414, # 4848 +3029, 937,4415,5672,3657,4140,4141,3424,2161,4770,3425, 524, 742, 538,3085,1012, # 4864 +5673,5674,3928,2466,5675, 658,1103, 225,3929,5676,5677,4771,5678,4772,5679,3267, # 4880 +1243,5680,4142, 963,2250,4773,5681,2714,3658,3186,5682,5683,2596,2332,5684,4774, # 4896 +5685,5686,5687,3536, 957,3426,2547,2033,1931,2941,2467, 870,2019,3659,1746,2780, # 4912 +2781,2439,2468,5688,3930,5689,3789,3130,3790,3537,3427,3791,5690,1179,3086,5691, # 4928 +3187,2378,4416,3792,2548,3188,3131,2749,4143,5692,3428,1556,2549,2297, 977,2901, # 4944 +2034,4144,1205,3429,5693,1765,3430,3189,2125,1271, 714,1689,4775,3538,5694,2333, # 4960 +3931, 533,4417,3660,2184, 617,5695,2469,3340,3539,2315,5696,5697,3190,5698,5699, # 4976 +3932,1988, 618, 427,2651,3540,3431,5700,5701,1244,1690,5702,2819,4418,4776,5703, # 4992 +3541,4777,5704,2284,1576, 473,3661,4419,3432, 972,5705,3662,5706,3087,5707,5708, # 5008 +4778,4779,5709,3793,4145,4146,5710, 153,4780, 356,5711,1892,2902,4420,2144, 408, # 5024 + 803,2357,5712,3933,5713,4421,1646,2578,2518,4781,4782,3934,5714,3935,4422,5715, # 5040 +2416,3433, 752,5716,5717,1962,3341,2979,5718, 746,3030,2470,4783,4423,3794, 698, # 5056 +4784,1893,4424,3663,2550,4785,3664,3936,5719,3191,3434,5720,1824,1302,4147,2715, # 5072 +3937,1974,4425,5721,4426,3192, 823,1303,1288,1236,2861,3542,4148,3435, 774,3938, # 5088 +5722,1581,4786,1304,2862,3939,4787,5723,2440,2162,1083,3268,4427,4149,4428, 344, # 5104 +1173, 288,2316, 454,1683,5724,5725,1461,4788,4150,2597,5726,5727,4789, 985, 894, # 5120 +5728,3436,3193,5729,1914,2942,3795,1989,5730,2111,1975,5731,4151,5732,2579,1194, # 5136 + 425,5733,4790,3194,1245,3796,4429,5734,5735,2863,5736, 636,4791,1856,3940, 760, # 5152 +1800,5737,4430,2212,1508,4792,4152,1894,1684,2298,5738,5739,4793,4431,4432,2213, # 5168 + 479,5740,5741, 832,5742,4153,2496,5743,2980,2497,3797, 990,3132, 627,1815,2652, # 5184 +4433,1582,4434,2126,2112,3543,4794,5744, 799,4435,3195,5745,4795,2113,1737,3031, # 5200 +1018, 543, 754,4436,3342,1676,4796,4797,4154,4798,1489,5746,3544,5747,2624,2903, # 5216 +4155,5748,5749,2981,5750,5751,5752,5753,3196,4799,4800,2185,1722,5754,3269,3270, # 5232 +1843,3665,1715, 481, 365,1976,1857,5755,5756,1963,2498,4801,5757,2127,3666,3271, # 5248 + 433,1895,2064,2076,5758, 602,2750,5759,5760,5761,5762,5763,3032,1628,3437,5764, # 5264 +3197,4802,4156,2904,4803,2519,5765,2551,2782,5766,5767,5768,3343,4804,2905,5769, # 5280 +4805,5770,2864,4806,4807,1221,2982,4157,2520,5771,5772,5773,1868,1990,5774,5775, # 5296 +5776,1896,5777,5778,4808,1897,4158, 318,5779,2095,4159,4437,5780,5781, 485,5782, # 5312 + 938,3941, 553,2680, 116,5783,3942,3667,5784,3545,2681,2783,3438,3344,2820,5785, # 5328 +3668,2943,4160,1747,2944,2983,5786,5787, 207,5788,4809,5789,4810,2521,5790,3033, # 5344 + 890,3669,3943,5791,1878,3798,3439,5792,2186,2358,3440,1652,5793,5794,5795, 941, # 5360 +2299, 208,3546,4161,2020, 330,4438,3944,2906,2499,3799,4439,4811,5796,5797,5798, # 5376 +) + diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/big5freq.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/big5freq.pyc new file mode 100644 index 0000000..ff571d4 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/big5freq.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/big5prober.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/big5prober.py new file mode 100644 index 0000000..98f9970 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/big5prober.py @@ -0,0 +1,47 @@ +######################## BEGIN LICENSE BLOCK ######################## +# The Original Code is Mozilla Communicator client code. +# +# The Initial Developer of the Original Code is +# Netscape Communications Corporation. +# Portions created by the Initial Developer are Copyright (C) 1998 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Mark Pilgrim - port to Python +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + +from .mbcharsetprober import MultiByteCharSetProber +from .codingstatemachine import CodingStateMachine +from .chardistribution import Big5DistributionAnalysis +from .mbcssm import BIG5_SM_MODEL + + +class Big5Prober(MultiByteCharSetProber): + def __init__(self): + super(Big5Prober, self).__init__() + self.coding_sm = CodingStateMachine(BIG5_SM_MODEL) + self.distribution_analyzer = Big5DistributionAnalysis() + self.reset() + + @property + def charset_name(self): + return "Big5" + + @property + def language(self): + return "Chinese" diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/big5prober.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/big5prober.pyc new file mode 100644 index 0000000..bc5fd3b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/big5prober.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/chardistribution.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/chardistribution.py new file mode 100644 index 0000000..c0395f4 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/chardistribution.py @@ -0,0 +1,233 @@ +######################## BEGIN LICENSE BLOCK ######################## +# The Original Code is Mozilla Communicator client code. +# +# The Initial Developer of the Original Code is +# Netscape Communications Corporation. +# Portions created by the Initial Developer are Copyright (C) 1998 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Mark Pilgrim - port to Python +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + +from .euctwfreq import (EUCTW_CHAR_TO_FREQ_ORDER, EUCTW_TABLE_SIZE, + EUCTW_TYPICAL_DISTRIBUTION_RATIO) +from .euckrfreq import (EUCKR_CHAR_TO_FREQ_ORDER, EUCKR_TABLE_SIZE, + EUCKR_TYPICAL_DISTRIBUTION_RATIO) +from .gb2312freq import (GB2312_CHAR_TO_FREQ_ORDER, GB2312_TABLE_SIZE, + GB2312_TYPICAL_DISTRIBUTION_RATIO) +from .big5freq import (BIG5_CHAR_TO_FREQ_ORDER, BIG5_TABLE_SIZE, + BIG5_TYPICAL_DISTRIBUTION_RATIO) +from .jisfreq import (JIS_CHAR_TO_FREQ_ORDER, JIS_TABLE_SIZE, + JIS_TYPICAL_DISTRIBUTION_RATIO) + + +class CharDistributionAnalysis(object): + ENOUGH_DATA_THRESHOLD = 1024 + SURE_YES = 0.99 + SURE_NO = 0.01 + MINIMUM_DATA_THRESHOLD = 3 + + def __init__(self): + # Mapping table to get frequency order from char order (get from + # GetOrder()) + self._char_to_freq_order = None + self._table_size = None # Size of above table + # This is a constant value which varies from language to language, + # used in calculating confidence. See + # http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html + # for further detail. + self.typical_distribution_ratio = None + self._done = None + self._total_chars = None + self._freq_chars = None + self.reset() + + def reset(self): + """reset analyser, clear any state""" + # If this flag is set to True, detection is done and conclusion has + # been made + self._done = False + self._total_chars = 0 # Total characters encountered + # The number of characters whose frequency order is less than 512 + self._freq_chars = 0 + + def feed(self, char, char_len): + """feed a character with known length""" + if char_len == 2: + # we only care about 2-bytes character in our distribution analysis + order = self.get_order(char) + else: + order = -1 + if order >= 0: + self._total_chars += 1 + # order is valid + if order < self._table_size: + if 512 > self._char_to_freq_order[order]: + self._freq_chars += 1 + + def get_confidence(self): + """return confidence based on existing data""" + # if we didn't receive any character in our consideration range, + # return negative answer + if self._total_chars <= 0 or self._freq_chars <= self.MINIMUM_DATA_THRESHOLD: + return self.SURE_NO + + if self._total_chars != self._freq_chars: + r = (self._freq_chars / ((self._total_chars - self._freq_chars) + * self.typical_distribution_ratio)) + if r < self.SURE_YES: + return r + + # normalize confidence (we don't want to be 100% sure) + return self.SURE_YES + + def got_enough_data(self): + # It is not necessary to receive all data to draw conclusion. + # For charset detection, certain amount of data is enough + return self._total_chars > self.ENOUGH_DATA_THRESHOLD + + def get_order(self, byte_str): + # We do not handle characters based on the original encoding string, + # but convert this encoding string to a number, here called order. + # This allows multiple encodings of a language to share one frequency + # table. + return -1 + + +class EUCTWDistributionAnalysis(CharDistributionAnalysis): + def __init__(self): + super(EUCTWDistributionAnalysis, self).__init__() + self._char_to_freq_order = EUCTW_CHAR_TO_FREQ_ORDER + self._table_size = EUCTW_TABLE_SIZE + self.typical_distribution_ratio = EUCTW_TYPICAL_DISTRIBUTION_RATIO + + def get_order(self, byte_str): + # for euc-TW encoding, we are interested + # first byte range: 0xc4 -- 0xfe + # second byte range: 0xa1 -- 0xfe + # no validation needed here. State machine has done that + first_char = byte_str[0] + if first_char >= 0xC4: + return 94 * (first_char - 0xC4) + byte_str[1] - 0xA1 + else: + return -1 + + +class EUCKRDistributionAnalysis(CharDistributionAnalysis): + def __init__(self): + super(EUCKRDistributionAnalysis, self).__init__() + self._char_to_freq_order = EUCKR_CHAR_TO_FREQ_ORDER + self._table_size = EUCKR_TABLE_SIZE + self.typical_distribution_ratio = EUCKR_TYPICAL_DISTRIBUTION_RATIO + + def get_order(self, byte_str): + # for euc-KR encoding, we are interested + # first byte range: 0xb0 -- 0xfe + # second byte range: 0xa1 -- 0xfe + # no validation needed here. State machine has done that + first_char = byte_str[0] + if first_char >= 0xB0: + return 94 * (first_char - 0xB0) + byte_str[1] - 0xA1 + else: + return -1 + + +class GB2312DistributionAnalysis(CharDistributionAnalysis): + def __init__(self): + super(GB2312DistributionAnalysis, self).__init__() + self._char_to_freq_order = GB2312_CHAR_TO_FREQ_ORDER + self._table_size = GB2312_TABLE_SIZE + self.typical_distribution_ratio = GB2312_TYPICAL_DISTRIBUTION_RATIO + + def get_order(self, byte_str): + # for GB2312 encoding, we are interested + # first byte range: 0xb0 -- 0xfe + # second byte range: 0xa1 -- 0xfe + # no validation needed here. State machine has done that + first_char, second_char = byte_str[0], byte_str[1] + if (first_char >= 0xB0) and (second_char >= 0xA1): + return 94 * (first_char - 0xB0) + second_char - 0xA1 + else: + return -1 + + +class Big5DistributionAnalysis(CharDistributionAnalysis): + def __init__(self): + super(Big5DistributionAnalysis, self).__init__() + self._char_to_freq_order = BIG5_CHAR_TO_FREQ_ORDER + self._table_size = BIG5_TABLE_SIZE + self.typical_distribution_ratio = BIG5_TYPICAL_DISTRIBUTION_RATIO + + def get_order(self, byte_str): + # for big5 encoding, we are interested + # first byte range: 0xa4 -- 0xfe + # second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe + # no validation needed here. State machine has done that + first_char, second_char = byte_str[0], byte_str[1] + if first_char >= 0xA4: + if second_char >= 0xA1: + return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63 + else: + return 157 * (first_char - 0xA4) + second_char - 0x40 + else: + return -1 + + +class SJISDistributionAnalysis(CharDistributionAnalysis): + def __init__(self): + super(SJISDistributionAnalysis, self).__init__() + self._char_to_freq_order = JIS_CHAR_TO_FREQ_ORDER + self._table_size = JIS_TABLE_SIZE + self.typical_distribution_ratio = JIS_TYPICAL_DISTRIBUTION_RATIO + + def get_order(self, byte_str): + # for sjis encoding, we are interested + # first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe + # second byte range: 0x40 -- 0x7e, 0x81 -- oxfe + # no validation needed here. State machine has done that + first_char, second_char = byte_str[0], byte_str[1] + if (first_char >= 0x81) and (first_char <= 0x9F): + order = 188 * (first_char - 0x81) + elif (first_char >= 0xE0) and (first_char <= 0xEF): + order = 188 * (first_char - 0xE0 + 31) + else: + return -1 + order = order + second_char - 0x40 + if second_char > 0x7F: + order = -1 + return order + + +class EUCJPDistributionAnalysis(CharDistributionAnalysis): + def __init__(self): + super(EUCJPDistributionAnalysis, self).__init__() + self._char_to_freq_order = JIS_CHAR_TO_FREQ_ORDER + self._table_size = JIS_TABLE_SIZE + self.typical_distribution_ratio = JIS_TYPICAL_DISTRIBUTION_RATIO + + def get_order(self, byte_str): + # for euc-JP encoding, we are interested + # first byte range: 0xa0 -- 0xfe + # second byte range: 0xa1 -- 0xfe + # no validation needed here. State machine has done that + char = byte_str[0] + if char >= 0xA0: + return 94 * (char - 0xA1) + byte_str[1] - 0xa1 + else: + return -1 diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/chardistribution.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/chardistribution.pyc new file mode 100644 index 0000000..0eb5dd1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/chardistribution.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/charsetgroupprober.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/charsetgroupprober.py new file mode 100644 index 0000000..8b3738e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/charsetgroupprober.py @@ -0,0 +1,106 @@ +######################## BEGIN LICENSE BLOCK ######################## +# The Original Code is Mozilla Communicator client code. +# +# The Initial Developer of the Original Code is +# Netscape Communications Corporation. +# Portions created by the Initial Developer are Copyright (C) 1998 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Mark Pilgrim - port to Python +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + +from .enums import ProbingState +from .charsetprober import CharSetProber + + +class CharSetGroupProber(CharSetProber): + def __init__(self, lang_filter=None): + super(CharSetGroupProber, self).__init__(lang_filter=lang_filter) + self._active_num = 0 + self.probers = [] + self._best_guess_prober = None + + def reset(self): + super(CharSetGroupProber, self).reset() + self._active_num = 0 + for prober in self.probers: + if prober: + prober.reset() + prober.active = True + self._active_num += 1 + self._best_guess_prober = None + + @property + def charset_name(self): + if not self._best_guess_prober: + self.get_confidence() + if not self._best_guess_prober: + return None + return self._best_guess_prober.charset_name + + @property + def language(self): + if not self._best_guess_prober: + self.get_confidence() + if not self._best_guess_prober: + return None + return self._best_guess_prober.language + + def feed(self, byte_str): + for prober in self.probers: + if not prober: + continue + if not prober.active: + continue + state = prober.feed(byte_str) + if not state: + continue + if state == ProbingState.FOUND_IT: + self._best_guess_prober = prober + return self.state + elif state == ProbingState.NOT_ME: + prober.active = False + self._active_num -= 1 + if self._active_num <= 0: + self._state = ProbingState.NOT_ME + return self.state + return self.state + + def get_confidence(self): + state = self.state + if state == ProbingState.FOUND_IT: + return 0.99 + elif state == ProbingState.NOT_ME: + return 0.01 + best_conf = 0.0 + self._best_guess_prober = None + for prober in self.probers: + if not prober: + continue + if not prober.active: + self.logger.debug('%s not active', prober.charset_name) + continue + conf = prober.get_confidence() + self.logger.debug('%s %s confidence = %s', prober.charset_name, prober.language, conf) + if best_conf < conf: + best_conf = conf + self._best_guess_prober = prober + if not self._best_guess_prober: + return 0.0 + return best_conf diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/charsetgroupprober.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/charsetgroupprober.pyc new file mode 100644 index 0000000..b6c18cc Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/charsetgroupprober.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/charsetprober.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/charsetprober.py new file mode 100644 index 0000000..eac4e59 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/charsetprober.py @@ -0,0 +1,145 @@ +######################## BEGIN LICENSE BLOCK ######################## +# The Original Code is Mozilla Universal charset detector code. +# +# The Initial Developer of the Original Code is +# Netscape Communications Corporation. +# Portions created by the Initial Developer are Copyright (C) 2001 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Mark Pilgrim - port to Python +# Shy Shalom - original C code +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + +import logging +import re + +from .enums import ProbingState + + +class CharSetProber(object): + + SHORTCUT_THRESHOLD = 0.95 + + def __init__(self, lang_filter=None): + self._state = None + self.lang_filter = lang_filter + self.logger = logging.getLogger(__name__) + + def reset(self): + self._state = ProbingState.DETECTING + + @property + def charset_name(self): + return None + + def feed(self, buf): + pass + + @property + def state(self): + return self._state + + def get_confidence(self): + return 0.0 + + @staticmethod + def filter_high_byte_only(buf): + buf = re.sub(b'([\x00-\x7F])+', b' ', buf) + return buf + + @staticmethod + def filter_international_words(buf): + """ + We define three types of bytes: + alphabet: english alphabets [a-zA-Z] + international: international characters [\x80-\xFF] + marker: everything else [^a-zA-Z\x80-\xFF] + + The input buffer can be thought to contain a series of words delimited + by markers. This function works to filter all words that contain at + least one international character. All contiguous sequences of markers + are replaced by a single space ascii character. + + This filter applies to all scripts which do not use English characters. + """ + filtered = bytearray() + + # This regex expression filters out only words that have at-least one + # international character. The word may include one marker character at + # the end. + words = re.findall(b'[a-zA-Z]*[\x80-\xFF]+[a-zA-Z]*[^a-zA-Z\x80-\xFF]?', + buf) + + for word in words: + filtered.extend(word[:-1]) + + # If the last character in the word is a marker, replace it with a + # space as markers shouldn't affect our analysis (they are used + # similarly across all languages and may thus have similar + # frequencies). + last_char = word[-1:] + if not last_char.isalpha() and last_char < b'\x80': + last_char = b' ' + filtered.extend(last_char) + + return filtered + + @staticmethod + def filter_with_english_letters(buf): + """ + Returns a copy of ``buf`` that retains only the sequences of English + alphabet and high byte characters that are not between <> characters. + Also retains English alphabet and high byte characters immediately + before occurrences of >. + + This filter can be applied to all scripts which contain both English + characters and extended ASCII characters, but is currently only used by + ``Latin1Prober``. + """ + filtered = bytearray() + in_tag = False + prev = 0 + + for curr in range(len(buf)): + # Slice here to get bytes instead of an int with Python 3 + buf_char = buf[curr:curr + 1] + # Check if we're coming out of or entering an HTML tag + if buf_char == b'>': + in_tag = False + elif buf_char == b'<': + in_tag = True + + # If current character is not extended-ASCII and not alphabetic... + if buf_char < b'\x80' and not buf_char.isalpha(): + # ...and we're not in a tag + if curr > prev and not in_tag: + # Keep everything after last non-extended-ASCII, + # non-alphabetic character + filtered.extend(buf[prev:curr]) + # Output a space to delimit stretch we kept + filtered.extend(b' ') + prev = curr + 1 + + # If we're not in a tag... + if not in_tag: + # Keep everything after last non-extended-ASCII, non-alphabetic + # character + filtered.extend(buf[prev:]) + + return filtered diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/charsetprober.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/charsetprober.pyc new file mode 100644 index 0000000..833ac86 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/charsetprober.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/cli/__init__.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/cli/__init__.py new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/cli/__init__.py @@ -0,0 +1 @@ + diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/cli/__init__.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/cli/__init__.pyc new file mode 100644 index 0000000..58fd01f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/cli/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/cli/chardetect.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/cli/chardetect.py new file mode 100644 index 0000000..c61136b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/cli/chardetect.py @@ -0,0 +1,85 @@ +#!/usr/bin/env python +""" +Script which takes one or more file paths and reports on their detected +encodings + +Example:: + + % chardetect somefile someotherfile + somefile: windows-1252 with confidence 0.5 + someotherfile: ascii with confidence 1.0 + +If no paths are provided, it takes its input from stdin. + +""" + +from __future__ import absolute_import, print_function, unicode_literals + +import argparse +import sys + +from pip._vendor.chardet import __version__ +from pip._vendor.chardet.compat import PY2 +from pip._vendor.chardet.universaldetector import UniversalDetector + + +def description_of(lines, name='stdin'): + """ + Return a string describing the probable encoding of a file or + list of strings. + + :param lines: The lines to get the encoding of. + :type lines: Iterable of bytes + :param name: Name of file or collection of lines + :type name: str + """ + u = UniversalDetector() + for line in lines: + line = bytearray(line) + u.feed(line) + # shortcut out of the loop to save reading further - particularly useful if we read a BOM. + if u.done: + break + u.close() + result = u.result + if PY2: + name = name.decode(sys.getfilesystemencoding(), 'ignore') + if result['encoding']: + return '{0}: {1} with confidence {2}'.format(name, result['encoding'], + result['confidence']) + else: + return '{0}: no result'.format(name) + + +def main(argv=None): + """ + Handles command line arguments and gets things started. + + :param argv: List of arguments, as if specified on the command-line. + If None, ``sys.argv[1:]`` is used instead. + :type argv: list of str + """ + # Get command line arguments + parser = argparse.ArgumentParser( + description="Takes one or more file paths and reports their detected \ + encodings") + parser.add_argument('input', + help='File whose encoding we would like to determine. \ + (default: stdin)', + type=argparse.FileType('rb'), nargs='*', + default=[sys.stdin if PY2 else sys.stdin.buffer]) + parser.add_argument('--version', action='version', + version='%(prog)s {0}'.format(__version__)) + args = parser.parse_args(argv) + + for f in args.input: + if f.isatty(): + print("You are running chardetect interactively. Press " + + "CTRL-D twice at the start of a blank line to signal the " + + "end of your input. If you want help, run chardetect " + + "--help\n", file=sys.stderr) + print(description_of(f, f.name)) + + +if __name__ == '__main__': + main() diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/cli/chardetect.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/cli/chardetect.pyc new file mode 100644 index 0000000..cd59da6 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/cli/chardetect.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/codingstatemachine.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/codingstatemachine.py new file mode 100644 index 0000000..68fba44 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/codingstatemachine.py @@ -0,0 +1,88 @@ +######################## BEGIN LICENSE BLOCK ######################## +# The Original Code is mozilla.org code. +# +# The Initial Developer of the Original Code is +# Netscape Communications Corporation. +# Portions created by the Initial Developer are Copyright (C) 1998 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Mark Pilgrim - port to Python +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + +import logging + +from .enums import MachineState + + +class CodingStateMachine(object): + """ + A state machine to verify a byte sequence for a particular encoding. For + each byte the detector receives, it will feed that byte to every active + state machine available, one byte at a time. The state machine changes its + state based on its previous state and the byte it receives. There are 3 + states in a state machine that are of interest to an auto-detector: + + START state: This is the state to start with, or a legal byte sequence + (i.e. a valid code point) for character has been identified. + + ME state: This indicates that the state machine identified a byte sequence + that is specific to the charset it is designed for and that + there is no other possible encoding which can contain this byte + sequence. This will to lead to an immediate positive answer for + the detector. + + ERROR state: This indicates the state machine identified an illegal byte + sequence for that encoding. This will lead to an immediate + negative answer for this encoding. Detector will exclude this + encoding from consideration from here on. + """ + def __init__(self, sm): + self._model = sm + self._curr_byte_pos = 0 + self._curr_char_len = 0 + self._curr_state = None + self.logger = logging.getLogger(__name__) + self.reset() + + def reset(self): + self._curr_state = MachineState.START + + def next_state(self, c): + # for each byte we get its class + # if it is first byte, we also get byte length + byte_class = self._model['class_table'][c] + if self._curr_state == MachineState.START: + self._curr_byte_pos = 0 + self._curr_char_len = self._model['char_len_table'][byte_class] + # from byte's class and state_table, we get its next state + curr_state = (self._curr_state * self._model['class_factor'] + + byte_class) + self._curr_state = self._model['state_table'][curr_state] + self._curr_byte_pos += 1 + return self._curr_state + + def get_current_charlen(self): + return self._curr_char_len + + def get_coding_state_machine(self): + return self._model['name'] + + @property + def language(self): + return self._model['language'] diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/codingstatemachine.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/codingstatemachine.pyc new file mode 100644 index 0000000..cd4de33 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/codingstatemachine.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/compat.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/compat.py new file mode 100644 index 0000000..ddd7468 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/compat.py @@ -0,0 +1,34 @@ +######################## BEGIN LICENSE BLOCK ######################## +# Contributor(s): +# Dan Blanchard +# Ian Cordasco +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + +import sys + + +if sys.version_info < (3, 0): + PY2 = True + PY3 = False + base_str = (str, unicode) + text_type = unicode +else: + PY2 = False + PY3 = True + base_str = (bytes, str) + text_type = str diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/compat.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/compat.pyc new file mode 100644 index 0000000..d718d82 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/compat.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/cp949prober.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/cp949prober.py new file mode 100644 index 0000000..efd793a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/cp949prober.py @@ -0,0 +1,49 @@ +######################## BEGIN LICENSE BLOCK ######################## +# The Original Code is mozilla.org code. +# +# The Initial Developer of the Original Code is +# Netscape Communications Corporation. +# Portions created by the Initial Developer are Copyright (C) 1998 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Mark Pilgrim - port to Python +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + +from .chardistribution import EUCKRDistributionAnalysis +from .codingstatemachine import CodingStateMachine +from .mbcharsetprober import MultiByteCharSetProber +from .mbcssm import CP949_SM_MODEL + + +class CP949Prober(MultiByteCharSetProber): + def __init__(self): + super(CP949Prober, self).__init__() + self.coding_sm = CodingStateMachine(CP949_SM_MODEL) + # NOTE: CP949 is a superset of EUC-KR, so the distribution should be + # not different. + self.distribution_analyzer = EUCKRDistributionAnalysis() + self.reset() + + @property + def charset_name(self): + return "CP949" + + @property + def language(self): + return "Korean" diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/cp949prober.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/cp949prober.pyc new file mode 100644 index 0000000..96b441a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/cp949prober.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/enums.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/enums.py new file mode 100644 index 0000000..0451207 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/enums.py @@ -0,0 +1,76 @@ +""" +All of the Enums that are used throughout the chardet package. + +:author: Dan Blanchard (dan.blanchard@gmail.com) +""" + + +class InputState(object): + """ + This enum represents the different states a universal detector can be in. + """ + PURE_ASCII = 0 + ESC_ASCII = 1 + HIGH_BYTE = 2 + + +class LanguageFilter(object): + """ + This enum represents the different language filters we can apply to a + ``UniversalDetector``. + """ + CHINESE_SIMPLIFIED = 0x01 + CHINESE_TRADITIONAL = 0x02 + JAPANESE = 0x04 + KOREAN = 0x08 + NON_CJK = 0x10 + ALL = 0x1F + CHINESE = CHINESE_SIMPLIFIED | CHINESE_TRADITIONAL + CJK = CHINESE | JAPANESE | KOREAN + + +class ProbingState(object): + """ + This enum represents the different states a prober can be in. + """ + DETECTING = 0 + FOUND_IT = 1 + NOT_ME = 2 + + +class MachineState(object): + """ + This enum represents the different states a state machine can be in. + """ + START = 0 + ERROR = 1 + ITS_ME = 2 + + +class SequenceLikelihood(object): + """ + This enum represents the likelihood of a character following the previous one. + """ + NEGATIVE = 0 + UNLIKELY = 1 + LIKELY = 2 + POSITIVE = 3 + + @classmethod + def get_num_categories(cls): + """:returns: The number of likelihood categories in the enum.""" + return 4 + + +class CharacterCategory(object): + """ + This enum represents the different categories language models for + ``SingleByteCharsetProber`` put characters into. + + Anything less than CONTROL is considered a letter. + """ + UNDEFINED = 255 + LINE_BREAK = 254 + SYMBOL = 253 + DIGIT = 252 + CONTROL = 251 diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/enums.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/enums.pyc new file mode 100644 index 0000000..4be1b25 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/enums.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/escprober.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/escprober.py new file mode 100644 index 0000000..c70493f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/escprober.py @@ -0,0 +1,101 @@ +######################## BEGIN LICENSE BLOCK ######################## +# The Original Code is mozilla.org code. +# +# The Initial Developer of the Original Code is +# Netscape Communications Corporation. +# Portions created by the Initial Developer are Copyright (C) 1998 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Mark Pilgrim - port to Python +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + +from .charsetprober import CharSetProber +from .codingstatemachine import CodingStateMachine +from .enums import LanguageFilter, ProbingState, MachineState +from .escsm import (HZ_SM_MODEL, ISO2022CN_SM_MODEL, ISO2022JP_SM_MODEL, + ISO2022KR_SM_MODEL) + + +class EscCharSetProber(CharSetProber): + """ + This CharSetProber uses a "code scheme" approach for detecting encodings, + whereby easily recognizable escape or shift sequences are relied on to + identify these encodings. + """ + + def __init__(self, lang_filter=None): + super(EscCharSetProber, self).__init__(lang_filter=lang_filter) + self.coding_sm = [] + if self.lang_filter & LanguageFilter.CHINESE_SIMPLIFIED: + self.coding_sm.append(CodingStateMachine(HZ_SM_MODEL)) + self.coding_sm.append(CodingStateMachine(ISO2022CN_SM_MODEL)) + if self.lang_filter & LanguageFilter.JAPANESE: + self.coding_sm.append(CodingStateMachine(ISO2022JP_SM_MODEL)) + if self.lang_filter & LanguageFilter.KOREAN: + self.coding_sm.append(CodingStateMachine(ISO2022KR_SM_MODEL)) + self.active_sm_count = None + self._detected_charset = None + self._detected_language = None + self._state = None + self.reset() + + def reset(self): + super(EscCharSetProber, self).reset() + for coding_sm in self.coding_sm: + if not coding_sm: + continue + coding_sm.active = True + coding_sm.reset() + self.active_sm_count = len(self.coding_sm) + self._detected_charset = None + self._detected_language = None + + @property + def charset_name(self): + return self._detected_charset + + @property + def language(self): + return self._detected_language + + def get_confidence(self): + if self._detected_charset: + return 0.99 + else: + return 0.00 + + def feed(self, byte_str): + for c in byte_str: + for coding_sm in self.coding_sm: + if not coding_sm or not coding_sm.active: + continue + coding_state = coding_sm.next_state(c) + if coding_state == MachineState.ERROR: + coding_sm.active = False + self.active_sm_count -= 1 + if self.active_sm_count <= 0: + self._state = ProbingState.NOT_ME + return self.state + elif coding_state == MachineState.ITS_ME: + self._state = ProbingState.FOUND_IT + self._detected_charset = coding_sm.get_coding_state_machine() + self._detected_language = coding_sm.language + return self.state + + return self.state diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/escprober.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/escprober.pyc new file mode 100644 index 0000000..43ebae5 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/escprober.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/escsm.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/escsm.py new file mode 100644 index 0000000..0069523 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/escsm.py @@ -0,0 +1,246 @@ +######################## BEGIN LICENSE BLOCK ######################## +# The Original Code is mozilla.org code. +# +# The Initial Developer of the Original Code is +# Netscape Communications Corporation. +# Portions created by the Initial Developer are Copyright (C) 1998 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Mark Pilgrim - port to Python +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + +from .enums import MachineState + +HZ_CLS = ( +1,0,0,0,0,0,0,0, # 00 - 07 +0,0,0,0,0,0,0,0, # 08 - 0f +0,0,0,0,0,0,0,0, # 10 - 17 +0,0,0,1,0,0,0,0, # 18 - 1f +0,0,0,0,0,0,0,0, # 20 - 27 +0,0,0,0,0,0,0,0, # 28 - 2f +0,0,0,0,0,0,0,0, # 30 - 37 +0,0,0,0,0,0,0,0, # 38 - 3f +0,0,0,0,0,0,0,0, # 40 - 47 +0,0,0,0,0,0,0,0, # 48 - 4f +0,0,0,0,0,0,0,0, # 50 - 57 +0,0,0,0,0,0,0,0, # 58 - 5f +0,0,0,0,0,0,0,0, # 60 - 67 +0,0,0,0,0,0,0,0, # 68 - 6f +0,0,0,0,0,0,0,0, # 70 - 77 +0,0,0,4,0,5,2,0, # 78 - 7f +1,1,1,1,1,1,1,1, # 80 - 87 +1,1,1,1,1,1,1,1, # 88 - 8f +1,1,1,1,1,1,1,1, # 90 - 97 +1,1,1,1,1,1,1,1, # 98 - 9f +1,1,1,1,1,1,1,1, # a0 - a7 +1,1,1,1,1,1,1,1, # a8 - af +1,1,1,1,1,1,1,1, # b0 - b7 +1,1,1,1,1,1,1,1, # b8 - bf +1,1,1,1,1,1,1,1, # c0 - c7 +1,1,1,1,1,1,1,1, # c8 - cf +1,1,1,1,1,1,1,1, # d0 - d7 +1,1,1,1,1,1,1,1, # d8 - df +1,1,1,1,1,1,1,1, # e0 - e7 +1,1,1,1,1,1,1,1, # e8 - ef +1,1,1,1,1,1,1,1, # f0 - f7 +1,1,1,1,1,1,1,1, # f8 - ff +) + +HZ_ST = ( +MachineState.START,MachineState.ERROR, 3,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,# 00-07 +MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 08-0f +MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START, 4,MachineState.ERROR,# 10-17 + 5,MachineState.ERROR, 6,MachineState.ERROR, 5, 5, 4,MachineState.ERROR,# 18-1f + 4,MachineState.ERROR, 4, 4, 4,MachineState.ERROR, 4,MachineState.ERROR,# 20-27 + 4,MachineState.ITS_ME,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 28-2f +) + +HZ_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0) + +HZ_SM_MODEL = {'class_table': HZ_CLS, + 'class_factor': 6, + 'state_table': HZ_ST, + 'char_len_table': HZ_CHAR_LEN_TABLE, + 'name': "HZ-GB-2312", + 'language': 'Chinese'} + +ISO2022CN_CLS = ( +2,0,0,0,0,0,0,0, # 00 - 07 +0,0,0,0,0,0,0,0, # 08 - 0f +0,0,0,0,0,0,0,0, # 10 - 17 +0,0,0,1,0,0,0,0, # 18 - 1f +0,0,0,0,0,0,0,0, # 20 - 27 +0,3,0,0,0,0,0,0, # 28 - 2f +0,0,0,0,0,0,0,0, # 30 - 37 +0,0,0,0,0,0,0,0, # 38 - 3f +0,0,0,4,0,0,0,0, # 40 - 47 +0,0,0,0,0,0,0,0, # 48 - 4f +0,0,0,0,0,0,0,0, # 50 - 57 +0,0,0,0,0,0,0,0, # 58 - 5f +0,0,0,0,0,0,0,0, # 60 - 67 +0,0,0,0,0,0,0,0, # 68 - 6f +0,0,0,0,0,0,0,0, # 70 - 77 +0,0,0,0,0,0,0,0, # 78 - 7f +2,2,2,2,2,2,2,2, # 80 - 87 +2,2,2,2,2,2,2,2, # 88 - 8f +2,2,2,2,2,2,2,2, # 90 - 97 +2,2,2,2,2,2,2,2, # 98 - 9f +2,2,2,2,2,2,2,2, # a0 - a7 +2,2,2,2,2,2,2,2, # a8 - af +2,2,2,2,2,2,2,2, # b0 - b7 +2,2,2,2,2,2,2,2, # b8 - bf +2,2,2,2,2,2,2,2, # c0 - c7 +2,2,2,2,2,2,2,2, # c8 - cf +2,2,2,2,2,2,2,2, # d0 - d7 +2,2,2,2,2,2,2,2, # d8 - df +2,2,2,2,2,2,2,2, # e0 - e7 +2,2,2,2,2,2,2,2, # e8 - ef +2,2,2,2,2,2,2,2, # f0 - f7 +2,2,2,2,2,2,2,2, # f8 - ff +) + +ISO2022CN_ST = ( +MachineState.START, 3,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 00-07 +MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 08-0f +MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 10-17 +MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 4,MachineState.ERROR,# 18-1f +MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 20-27 + 5, 6,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 28-2f +MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 30-37 +MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.START,# 38-3f +) + +ISO2022CN_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0, 0, 0, 0) + +ISO2022CN_SM_MODEL = {'class_table': ISO2022CN_CLS, + 'class_factor': 9, + 'state_table': ISO2022CN_ST, + 'char_len_table': ISO2022CN_CHAR_LEN_TABLE, + 'name': "ISO-2022-CN", + 'language': 'Chinese'} + +ISO2022JP_CLS = ( +2,0,0,0,0,0,0,0, # 00 - 07 +0,0,0,0,0,0,2,2, # 08 - 0f +0,0,0,0,0,0,0,0, # 10 - 17 +0,0,0,1,0,0,0,0, # 18 - 1f +0,0,0,0,7,0,0,0, # 20 - 27 +3,0,0,0,0,0,0,0, # 28 - 2f +0,0,0,0,0,0,0,0, # 30 - 37 +0,0,0,0,0,0,0,0, # 38 - 3f +6,0,4,0,8,0,0,0, # 40 - 47 +0,9,5,0,0,0,0,0, # 48 - 4f +0,0,0,0,0,0,0,0, # 50 - 57 +0,0,0,0,0,0,0,0, # 58 - 5f +0,0,0,0,0,0,0,0, # 60 - 67 +0,0,0,0,0,0,0,0, # 68 - 6f +0,0,0,0,0,0,0,0, # 70 - 77 +0,0,0,0,0,0,0,0, # 78 - 7f +2,2,2,2,2,2,2,2, # 80 - 87 +2,2,2,2,2,2,2,2, # 88 - 8f +2,2,2,2,2,2,2,2, # 90 - 97 +2,2,2,2,2,2,2,2, # 98 - 9f +2,2,2,2,2,2,2,2, # a0 - a7 +2,2,2,2,2,2,2,2, # a8 - af +2,2,2,2,2,2,2,2, # b0 - b7 +2,2,2,2,2,2,2,2, # b8 - bf +2,2,2,2,2,2,2,2, # c0 - c7 +2,2,2,2,2,2,2,2, # c8 - cf +2,2,2,2,2,2,2,2, # d0 - d7 +2,2,2,2,2,2,2,2, # d8 - df +2,2,2,2,2,2,2,2, # e0 - e7 +2,2,2,2,2,2,2,2, # e8 - ef +2,2,2,2,2,2,2,2, # f0 - f7 +2,2,2,2,2,2,2,2, # f8 - ff +) + +ISO2022JP_ST = ( +MachineState.START, 3,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 00-07 +MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 08-0f +MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 10-17 +MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,# 18-1f +MachineState.ERROR, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 4,MachineState.ERROR,MachineState.ERROR,# 20-27 +MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 6,MachineState.ITS_ME,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,# 28-2f +MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,# 30-37 +MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 38-3f +MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.START,MachineState.START,# 40-47 +) + +ISO2022JP_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0) + +ISO2022JP_SM_MODEL = {'class_table': ISO2022JP_CLS, + 'class_factor': 10, + 'state_table': ISO2022JP_ST, + 'char_len_table': ISO2022JP_CHAR_LEN_TABLE, + 'name': "ISO-2022-JP", + 'language': 'Japanese'} + +ISO2022KR_CLS = ( +2,0,0,0,0,0,0,0, # 00 - 07 +0,0,0,0,0,0,0,0, # 08 - 0f +0,0,0,0,0,0,0,0, # 10 - 17 +0,0,0,1,0,0,0,0, # 18 - 1f +0,0,0,0,3,0,0,0, # 20 - 27 +0,4,0,0,0,0,0,0, # 28 - 2f +0,0,0,0,0,0,0,0, # 30 - 37 +0,0,0,0,0,0,0,0, # 38 - 3f +0,0,0,5,0,0,0,0, # 40 - 47 +0,0,0,0,0,0,0,0, # 48 - 4f +0,0,0,0,0,0,0,0, # 50 - 57 +0,0,0,0,0,0,0,0, # 58 - 5f +0,0,0,0,0,0,0,0, # 60 - 67 +0,0,0,0,0,0,0,0, # 68 - 6f +0,0,0,0,0,0,0,0, # 70 - 77 +0,0,0,0,0,0,0,0, # 78 - 7f +2,2,2,2,2,2,2,2, # 80 - 87 +2,2,2,2,2,2,2,2, # 88 - 8f +2,2,2,2,2,2,2,2, # 90 - 97 +2,2,2,2,2,2,2,2, # 98 - 9f +2,2,2,2,2,2,2,2, # a0 - a7 +2,2,2,2,2,2,2,2, # a8 - af +2,2,2,2,2,2,2,2, # b0 - b7 +2,2,2,2,2,2,2,2, # b8 - bf +2,2,2,2,2,2,2,2, # c0 - c7 +2,2,2,2,2,2,2,2, # c8 - cf +2,2,2,2,2,2,2,2, # d0 - d7 +2,2,2,2,2,2,2,2, # d8 - df +2,2,2,2,2,2,2,2, # e0 - e7 +2,2,2,2,2,2,2,2, # e8 - ef +2,2,2,2,2,2,2,2, # f0 - f7 +2,2,2,2,2,2,2,2, # f8 - ff +) + +ISO2022KR_ST = ( +MachineState.START, 3,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,# 00-07 +MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 08-0f +MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 4,MachineState.ERROR,MachineState.ERROR,# 10-17 +MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 18-1f +MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 20-27 +) + +ISO2022KR_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0) + +ISO2022KR_SM_MODEL = {'class_table': ISO2022KR_CLS, + 'class_factor': 6, + 'state_table': ISO2022KR_ST, + 'char_len_table': ISO2022KR_CHAR_LEN_TABLE, + 'name': "ISO-2022-KR", + 'language': 'Korean'} + + diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/escsm.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/escsm.pyc new file mode 100644 index 0000000..dbc1bb6 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/escsm.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/eucjpprober.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/eucjpprober.py new file mode 100644 index 0000000..20ce8f7 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/eucjpprober.py @@ -0,0 +1,92 @@ +######################## BEGIN LICENSE BLOCK ######################## +# The Original Code is mozilla.org code. +# +# The Initial Developer of the Original Code is +# Netscape Communications Corporation. +# Portions created by the Initial Developer are Copyright (C) 1998 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Mark Pilgrim - port to Python +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + +from .enums import ProbingState, MachineState +from .mbcharsetprober import MultiByteCharSetProber +from .codingstatemachine import CodingStateMachine +from .chardistribution import EUCJPDistributionAnalysis +from .jpcntx import EUCJPContextAnalysis +from .mbcssm import EUCJP_SM_MODEL + + +class EUCJPProber(MultiByteCharSetProber): + def __init__(self): + super(EUCJPProber, self).__init__() + self.coding_sm = CodingStateMachine(EUCJP_SM_MODEL) + self.distribution_analyzer = EUCJPDistributionAnalysis() + self.context_analyzer = EUCJPContextAnalysis() + self.reset() + + def reset(self): + super(EUCJPProber, self).reset() + self.context_analyzer.reset() + + @property + def charset_name(self): + return "EUC-JP" + + @property + def language(self): + return "Japanese" + + def feed(self, byte_str): + for i in range(len(byte_str)): + # PY3K: byte_str is a byte array, so byte_str[i] is an int, not a byte + coding_state = self.coding_sm.next_state(byte_str[i]) + if coding_state == MachineState.ERROR: + self.logger.debug('%s %s prober hit error at byte %s', + self.charset_name, self.language, i) + self._state = ProbingState.NOT_ME + break + elif coding_state == MachineState.ITS_ME: + self._state = ProbingState.FOUND_IT + break + elif coding_state == MachineState.START: + char_len = self.coding_sm.get_current_charlen() + if i == 0: + self._last_char[1] = byte_str[0] + self.context_analyzer.feed(self._last_char, char_len) + self.distribution_analyzer.feed(self._last_char, char_len) + else: + self.context_analyzer.feed(byte_str[i - 1:i + 1], + char_len) + self.distribution_analyzer.feed(byte_str[i - 1:i + 1], + char_len) + + self._last_char[0] = byte_str[-1] + + if self.state == ProbingState.DETECTING: + if (self.context_analyzer.got_enough_data() and + (self.get_confidence() > self.SHORTCUT_THRESHOLD)): + self._state = ProbingState.FOUND_IT + + return self.state + + def get_confidence(self): + context_conf = self.context_analyzer.get_confidence() + distrib_conf = self.distribution_analyzer.get_confidence() + return max(context_conf, distrib_conf) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/eucjpprober.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/eucjpprober.pyc new file mode 100644 index 0000000..db3726d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/eucjpprober.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/euckrfreq.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/euckrfreq.py new file mode 100644 index 0000000..b68078c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/euckrfreq.py @@ -0,0 +1,195 @@ +######################## BEGIN LICENSE BLOCK ######################## +# The Original Code is Mozilla Communicator client code. +# +# The Initial Developer of the Original Code is +# Netscape Communications Corporation. +# Portions created by the Initial Developer are Copyright (C) 1998 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Mark Pilgrim - port to Python +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + +# Sampling from about 20M text materials include literature and computer technology + +# 128 --> 0.79 +# 256 --> 0.92 +# 512 --> 0.986 +# 1024 --> 0.99944 +# 2048 --> 0.99999 +# +# Idea Distribution Ratio = 0.98653 / (1-0.98653) = 73.24 +# Random Distribution Ration = 512 / (2350-512) = 0.279. +# +# Typical Distribution Ratio + +EUCKR_TYPICAL_DISTRIBUTION_RATIO = 6.0 + +EUCKR_TABLE_SIZE = 2352 + +# Char to FreqOrder table , +EUCKR_CHAR_TO_FREQ_ORDER = ( + 13, 130, 120,1396, 481,1719,1720, 328, 609, 212,1721, 707, 400, 299,1722, 87, +1397,1723, 104, 536,1117,1203,1724,1267, 685,1268, 508,1725,1726,1727,1728,1398, +1399,1729,1730,1731, 141, 621, 326,1057, 368,1732, 267, 488, 20,1733,1269,1734, + 945,1400,1735, 47, 904,1270,1736,1737, 773, 248,1738, 409, 313, 786, 429,1739, + 116, 987, 813,1401, 683, 75,1204, 145,1740,1741,1742,1743, 16, 847, 667, 622, + 708,1744,1745,1746, 966, 787, 304, 129,1747, 60, 820, 123, 676,1748,1749,1750, +1751, 617,1752, 626,1753,1754,1755,1756, 653,1757,1758,1759,1760,1761,1762, 856, + 344,1763,1764,1765,1766, 89, 401, 418, 806, 905, 848,1767,1768,1769, 946,1205, + 709,1770,1118,1771, 241,1772,1773,1774,1271,1775, 569,1776, 999,1777,1778,1779, +1780, 337, 751,1058, 28, 628, 254,1781, 177, 906, 270, 349, 891,1079,1782, 19, +1783, 379,1784, 315,1785, 629, 754,1402, 559,1786, 636, 203,1206,1787, 710, 567, +1788, 935, 814,1789,1790,1207, 766, 528,1791,1792,1208,1793,1794,1795,1796,1797, +1403,1798,1799, 533,1059,1404,1405,1156,1406, 936, 884,1080,1800, 351,1801,1802, +1803,1804,1805, 801,1806,1807,1808,1119,1809,1157, 714, 474,1407,1810, 298, 899, + 885,1811,1120, 802,1158,1812, 892,1813,1814,1408, 659,1815,1816,1121,1817,1818, +1819,1820,1821,1822, 319,1823, 594, 545,1824, 815, 937,1209,1825,1826, 573,1409, +1022,1827,1210,1828,1829,1830,1831,1832,1833, 556, 722, 807,1122,1060,1834, 697, +1835, 900, 557, 715,1836,1410, 540,1411, 752,1159, 294, 597,1211, 976, 803, 770, +1412,1837,1838, 39, 794,1413, 358,1839, 371, 925,1840, 453, 661, 788, 531, 723, + 544,1023,1081, 869, 91,1841, 392, 430, 790, 602,1414, 677,1082, 457,1415,1416, +1842,1843, 475, 327,1024,1417, 795, 121,1844, 733, 403,1418,1845,1846,1847, 300, + 119, 711,1212, 627,1848,1272, 207,1849,1850, 796,1213, 382,1851, 519,1852,1083, + 893,1853,1854,1855, 367, 809, 487, 671,1856, 663,1857,1858, 956, 471, 306, 857, +1859,1860,1160,1084,1861,1862,1863,1864,1865,1061,1866,1867,1868,1869,1870,1871, + 282, 96, 574,1872, 502,1085,1873,1214,1874, 907,1875,1876, 827, 977,1419,1420, +1421, 268,1877,1422,1878,1879,1880, 308,1881, 2, 537,1882,1883,1215,1884,1885, + 127, 791,1886,1273,1423,1887, 34, 336, 404, 643,1888, 571, 654, 894, 840,1889, + 0, 886,1274, 122, 575, 260, 908, 938,1890,1275, 410, 316,1891,1892, 100,1893, +1894,1123, 48,1161,1124,1025,1895, 633, 901,1276,1896,1897, 115, 816,1898, 317, +1899, 694,1900, 909, 734,1424, 572, 866,1425, 691, 85, 524,1010, 543, 394, 841, +1901,1902,1903,1026,1904,1905,1906,1907,1908,1909, 30, 451, 651, 988, 310,1910, +1911,1426, 810,1216, 93,1912,1913,1277,1217,1914, 858, 759, 45, 58, 181, 610, + 269,1915,1916, 131,1062, 551, 443,1000, 821,1427, 957, 895,1086,1917,1918, 375, +1919, 359,1920, 687,1921, 822,1922, 293,1923,1924, 40, 662, 118, 692, 29, 939, + 887, 640, 482, 174,1925, 69,1162, 728,1428, 910,1926,1278,1218,1279, 386, 870, + 217, 854,1163, 823,1927,1928,1929,1930, 834,1931, 78,1932, 859,1933,1063,1934, +1935,1936,1937, 438,1164, 208, 595,1938,1939,1940,1941,1219,1125,1942, 280, 888, +1429,1430,1220,1431,1943,1944,1945,1946,1947,1280, 150, 510,1432,1948,1949,1950, +1951,1952,1953,1954,1011,1087,1955,1433,1043,1956, 881,1957, 614, 958,1064,1065, +1221,1958, 638,1001, 860, 967, 896,1434, 989, 492, 553,1281,1165,1959,1282,1002, +1283,1222,1960,1961,1962,1963, 36, 383, 228, 753, 247, 454,1964, 876, 678,1965, +1966,1284, 126, 464, 490, 835, 136, 672, 529, 940,1088,1435, 473,1967,1968, 467, + 50, 390, 227, 587, 279, 378, 598, 792, 968, 240, 151, 160, 849, 882,1126,1285, + 639,1044, 133, 140, 288, 360, 811, 563,1027, 561, 142, 523,1969,1970,1971, 7, + 103, 296, 439, 407, 506, 634, 990,1972,1973,1974,1975, 645,1976,1977,1978,1979, +1980,1981, 236,1982,1436,1983,1984,1089, 192, 828, 618, 518,1166, 333,1127,1985, + 818,1223,1986,1987,1988,1989,1990,1991,1992,1993, 342,1128,1286, 746, 842,1994, +1995, 560, 223,1287, 98, 8, 189, 650, 978,1288,1996,1437,1997, 17, 345, 250, + 423, 277, 234, 512, 226, 97, 289, 42, 167,1998, 201,1999,2000, 843, 836, 824, + 532, 338, 783,1090, 182, 576, 436,1438,1439, 527, 500,2001, 947, 889,2002,2003, +2004,2005, 262, 600, 314, 447,2006, 547,2007, 693, 738,1129,2008, 71,1440, 745, + 619, 688,2009, 829,2010,2011, 147,2012, 33, 948,2013,2014, 74, 224,2015, 61, + 191, 918, 399, 637,2016,1028,1130, 257, 902,2017,2018,2019,2020,2021,2022,2023, +2024,2025,2026, 837,2027,2028,2029,2030, 179, 874, 591, 52, 724, 246,2031,2032, +2033,2034,1167, 969,2035,1289, 630, 605, 911,1091,1168,2036,2037,2038,1441, 912, +2039, 623,2040,2041, 253,1169,1290,2042,1442, 146, 620, 611, 577, 433,2043,1224, + 719,1170, 959, 440, 437, 534, 84, 388, 480,1131, 159, 220, 198, 679,2044,1012, + 819,1066,1443, 113,1225, 194, 318,1003,1029,2045,2046,2047,2048,1067,2049,2050, +2051,2052,2053, 59, 913, 112,2054, 632,2055, 455, 144, 739,1291,2056, 273, 681, + 499,2057, 448,2058,2059, 760,2060,2061, 970, 384, 169, 245,1132,2062,2063, 414, +1444,2064,2065, 41, 235,2066, 157, 252, 877, 568, 919, 789, 580,2067, 725,2068, +2069,1292,2070,2071,1445,2072,1446,2073,2074, 55, 588, 66,1447, 271,1092,2075, +1226,2076, 960,1013, 372,2077,2078,2079,2080,2081,1293,2082,2083,2084,2085, 850, +2086,2087,2088,2089,2090, 186,2091,1068, 180,2092,2093,2094, 109,1227, 522, 606, +2095, 867,1448,1093, 991,1171, 926, 353,1133,2096, 581,2097,2098,2099,1294,1449, +1450,2100, 596,1172,1014,1228,2101,1451,1295,1173,1229,2102,2103,1296,1134,1452, + 949,1135,2104,2105,1094,1453,1454,1455,2106,1095,2107,2108,2109,2110,2111,2112, +2113,2114,2115,2116,2117, 804,2118,2119,1230,1231, 805,1456, 405,1136,2120,2121, +2122,2123,2124, 720, 701,1297, 992,1457, 927,1004,2125,2126,2127,2128,2129,2130, + 22, 417,2131, 303,2132, 385,2133, 971, 520, 513,2134,1174, 73,1096, 231, 274, + 962,1458, 673,2135,1459,2136, 152,1137,2137,2138,2139,2140,1005,1138,1460,1139, +2141,2142,2143,2144, 11, 374, 844,2145, 154,1232, 46,1461,2146, 838, 830, 721, +1233, 106,2147, 90, 428, 462, 578, 566,1175, 352,2148,2149, 538,1234, 124,1298, +2150,1462, 761, 565,2151, 686,2152, 649,2153, 72, 173,2154, 460, 415,2155,1463, +2156,1235, 305,2157,2158,2159,2160,2161,2162, 579,2163,2164,2165,2166,2167, 747, +2168,2169,2170,2171,1464, 669,2172,2173,2174,2175,2176,1465,2177, 23, 530, 285, +2178, 335, 729,2179, 397,2180,2181,2182,1030,2183,2184, 698,2185,2186, 325,2187, +2188, 369,2189, 799,1097,1015, 348,2190,1069, 680,2191, 851,1466,2192,2193, 10, +2194, 613, 424,2195, 979, 108, 449, 589, 27, 172, 81,1031, 80, 774, 281, 350, +1032, 525, 301, 582,1176,2196, 674,1045,2197,2198,1467, 730, 762,2199,2200,2201, +2202,1468,2203, 993,2204,2205, 266,1070, 963,1140,2206,2207,2208, 664,1098, 972, +2209,2210,2211,1177,1469,1470, 871,2212,2213,2214,2215,2216,1471,2217,2218,2219, +2220,2221,2222,2223,2224,2225,2226,2227,1472,1236,2228,2229,2230,2231,2232,2233, +2234,2235,1299,2236,2237, 200,2238, 477, 373,2239,2240, 731, 825, 777,2241,2242, +2243, 521, 486, 548,2244,2245,2246,1473,1300, 53, 549, 137, 875, 76, 158,2247, +1301,1474, 469, 396,1016, 278, 712,2248, 321, 442, 503, 767, 744, 941,1237,1178, +1475,2249, 82, 178,1141,1179, 973,2250,1302,2251, 297,2252,2253, 570,2254,2255, +2256, 18, 450, 206,2257, 290, 292,1142,2258, 511, 162, 99, 346, 164, 735,2259, +1476,1477, 4, 554, 343, 798,1099,2260,1100,2261, 43, 171,1303, 139, 215,2262, +2263, 717, 775,2264,1033, 322, 216,2265, 831,2266, 149,2267,1304,2268,2269, 702, +1238, 135, 845, 347, 309,2270, 484,2271, 878, 655, 238,1006,1478,2272, 67,2273, + 295,2274,2275, 461,2276, 478, 942, 412,2277,1034,2278,2279,2280, 265,2281, 541, +2282,2283,2284,2285,2286, 70, 852,1071,2287,2288,2289,2290, 21, 56, 509, 117, + 432,2291,2292, 331, 980, 552,1101, 148, 284, 105, 393,1180,1239, 755,2293, 187, +2294,1046,1479,2295, 340,2296, 63,1047, 230,2297,2298,1305, 763,1306, 101, 800, + 808, 494,2299,2300,2301, 903,2302, 37,1072, 14, 5,2303, 79, 675,2304, 312, +2305,2306,2307,2308,2309,1480, 6,1307,2310,2311,2312, 1, 470, 35, 24, 229, +2313, 695, 210, 86, 778, 15, 784, 592, 779, 32, 77, 855, 964,2314, 259,2315, + 501, 380,2316,2317, 83, 981, 153, 689,1308,1481,1482,1483,2318,2319, 716,1484, +2320,2321,2322,2323,2324,2325,1485,2326,2327, 128, 57, 68, 261,1048, 211, 170, +1240, 31,2328, 51, 435, 742,2329,2330,2331, 635,2332, 264, 456,2333,2334,2335, + 425,2336,1486, 143, 507, 263, 943,2337, 363, 920,1487, 256,1488,1102, 243, 601, +1489,2338,2339,2340,2341,2342,2343,2344, 861,2345,2346,2347,2348,2349,2350, 395, +2351,1490,1491, 62, 535, 166, 225,2352,2353, 668, 419,1241, 138, 604, 928,2354, +1181,2355,1492,1493,2356,2357,2358,1143,2359, 696,2360, 387, 307,1309, 682, 476, +2361,2362, 332, 12, 222, 156,2363, 232,2364, 641, 276, 656, 517,1494,1495,1035, + 416, 736,1496,2365,1017, 586,2366,2367,2368,1497,2369, 242,2370,2371,2372,1498, +2373, 965, 713,2374,2375,2376,2377, 740, 982,1499, 944,1500,1007,2378,2379,1310, +1501,2380,2381,2382, 785, 329,2383,2384,1502,2385,2386,2387, 932,2388,1503,2389, +2390,2391,2392,1242,2393,2394,2395,2396,2397, 994, 950,2398,2399,2400,2401,1504, +1311,2402,2403,2404,2405,1049, 749,2406,2407, 853, 718,1144,1312,2408,1182,1505, +2409,2410, 255, 516, 479, 564, 550, 214,1506,1507,1313, 413, 239, 444, 339,1145, +1036,1508,1509,1314,1037,1510,1315,2411,1511,2412,2413,2414, 176, 703, 497, 624, + 593, 921, 302,2415, 341, 165,1103,1512,2416,1513,2417,2418,2419, 376,2420, 700, +2421,2422,2423, 258, 768,1316,2424,1183,2425, 995, 608,2426,2427,2428,2429, 221, +2430,2431,2432,2433,2434,2435,2436,2437, 195, 323, 726, 188, 897, 983,1317, 377, + 644,1050, 879,2438, 452,2439,2440,2441,2442,2443,2444, 914,2445,2446,2447,2448, + 915, 489,2449,1514,1184,2450,2451, 515, 64, 427, 495,2452, 583,2453, 483, 485, +1038, 562, 213,1515, 748, 666,2454,2455,2456,2457, 334,2458, 780, 996,1008, 705, +1243,2459,2460,2461,2462,2463, 114,2464, 493,1146, 366, 163,1516, 961,1104,2465, + 291,2466,1318,1105,2467,1517, 365,2468, 355, 951,1244,2469,1319,2470, 631,2471, +2472, 218,1320, 364, 320, 756,1518,1519,1321,1520,1322,2473,2474,2475,2476, 997, +2477,2478,2479,2480, 665,1185,2481, 916,1521,2482,2483,2484, 584, 684,2485,2486, + 797,2487,1051,1186,2488,2489,2490,1522,2491,2492, 370,2493,1039,1187, 65,2494, + 434, 205, 463,1188,2495, 125, 812, 391, 402, 826, 699, 286, 398, 155, 781, 771, + 585,2496, 590, 505,1073,2497, 599, 244, 219, 917,1018, 952, 646,1523,2498,1323, +2499,2500, 49, 984, 354, 741,2501, 625,2502,1324,2503,1019, 190, 357, 757, 491, + 95, 782, 868,2504,2505,2506,2507,2508,2509, 134,1524,1074, 422,1525, 898,2510, + 161,2511,2512,2513,2514, 769,2515,1526,2516,2517, 411,1325,2518, 472,1527,2519, +2520,2521,2522,2523,2524, 985,2525,2526,2527,2528,2529,2530, 764,2531,1245,2532, +2533, 25, 204, 311,2534, 496,2535,1052,2536,2537,2538,2539,2540,2541,2542, 199, + 704, 504, 468, 758, 657,1528, 196, 44, 839,1246, 272, 750,2543, 765, 862,2544, +2545,1326,2546, 132, 615, 933,2547, 732,2548,2549,2550,1189,1529,2551, 283,1247, +1053, 607, 929,2552,2553,2554, 930, 183, 872, 616,1040,1147,2555,1148,1020, 441, + 249,1075,2556,2557,2558, 466, 743,2559,2560,2561, 92, 514, 426, 420, 526,2562, +2563,2564,2565,2566,2567,2568, 185,2569,2570,2571,2572, 776,1530, 658,2573, 362, +2574, 361, 922,1076, 793,2575,2576,2577,2578,2579,2580,1531, 251,2581,2582,2583, +2584,1532, 54, 612, 237,1327,2585,2586, 275, 408, 647, 111,2587,1533,1106, 465, + 3, 458, 9, 38,2588, 107, 110, 890, 209, 26, 737, 498,2589,1534,2590, 431, + 202, 88,1535, 356, 287,1107, 660,1149,2591, 381,1536, 986,1150, 445,1248,1151, + 974,2592,2593, 846,2594, 446, 953, 184,1249,1250, 727,2595, 923, 193, 883,2596, +2597,2598, 102, 324, 539, 817,2599, 421,1041,2600, 832,2601, 94, 175, 197, 406, +2602, 459,2603,2604,2605,2606,2607, 330, 555,2608,2609,2610, 706,1108, 389,2611, +2612,2613,2614, 233,2615, 833, 558, 931, 954,1251,2616,2617,1537, 546,2618,2619, +1009,2620,2621,2622,1538, 690,1328,2623, 955,2624,1539,2625,2626, 772,2627,2628, +2629,2630,2631, 924, 648, 863, 603,2632,2633, 934,1540, 864, 865,2634, 642,1042, + 670,1190,2635,2636,2637,2638, 168,2639, 652, 873, 542,1054,1541,2640,2641,2642, # 512, 256 +) + diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/euckrfreq.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/euckrfreq.pyc new file mode 100644 index 0000000..2b26154 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/euckrfreq.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/euckrprober.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/euckrprober.py new file mode 100644 index 0000000..345a060 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/euckrprober.py @@ -0,0 +1,47 @@ +######################## BEGIN LICENSE BLOCK ######################## +# The Original Code is mozilla.org code. +# +# The Initial Developer of the Original Code is +# Netscape Communications Corporation. +# Portions created by the Initial Developer are Copyright (C) 1998 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Mark Pilgrim - port to Python +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + +from .mbcharsetprober import MultiByteCharSetProber +from .codingstatemachine import CodingStateMachine +from .chardistribution import EUCKRDistributionAnalysis +from .mbcssm import EUCKR_SM_MODEL + + +class EUCKRProber(MultiByteCharSetProber): + def __init__(self): + super(EUCKRProber, self).__init__() + self.coding_sm = CodingStateMachine(EUCKR_SM_MODEL) + self.distribution_analyzer = EUCKRDistributionAnalysis() + self.reset() + + @property + def charset_name(self): + return "EUC-KR" + + @property + def language(self): + return "Korean" diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/euckrprober.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/euckrprober.pyc new file mode 100644 index 0000000..d2dfe30 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/euckrprober.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/euctwfreq.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/euctwfreq.py new file mode 100644 index 0000000..ed7a995 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/euctwfreq.py @@ -0,0 +1,387 @@ +######################## BEGIN LICENSE BLOCK ######################## +# The Original Code is Mozilla Communicator client code. +# +# The Initial Developer of the Original Code is +# Netscape Communications Corporation. +# Portions created by the Initial Developer are Copyright (C) 1998 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Mark Pilgrim - port to Python +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + +# EUCTW frequency table +# Converted from big5 work +# by Taiwan's Mandarin Promotion Council +# <http:#www.edu.tw:81/mandr/> + +# 128 --> 0.42261 +# 256 --> 0.57851 +# 512 --> 0.74851 +# 1024 --> 0.89384 +# 2048 --> 0.97583 +# +# Idea Distribution Ratio = 0.74851/(1-0.74851) =2.98 +# Random Distribution Ration = 512/(5401-512)=0.105 +# +# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR + +EUCTW_TYPICAL_DISTRIBUTION_RATIO = 0.75 + +# Char to FreqOrder table , +EUCTW_TABLE_SIZE = 5376 + +EUCTW_CHAR_TO_FREQ_ORDER = ( + 1,1800,1506, 255,1431, 198, 9, 82, 6,7310, 177, 202,3615,1256,2808, 110, # 2742 +3735, 33,3241, 261, 76, 44,2113, 16,2931,2184,1176, 659,3868, 26,3404,2643, # 2758 +1198,3869,3313,4060, 410,2211, 302, 590, 361,1963, 8, 204, 58,4296,7311,1931, # 2774 + 63,7312,7313, 317,1614, 75, 222, 159,4061,2412,1480,7314,3500,3068, 224,2809, # 2790 +3616, 3, 10,3870,1471, 29,2774,1135,2852,1939, 873, 130,3242,1123, 312,7315, # 2806 +4297,2051, 507, 252, 682,7316, 142,1914, 124, 206,2932, 34,3501,3173, 64, 604, # 2822 +7317,2494,1976,1977, 155,1990, 645, 641,1606,7318,3405, 337, 72, 406,7319, 80, # 2838 + 630, 238,3174,1509, 263, 939,1092,2644, 756,1440,1094,3406, 449, 69,2969, 591, # 2854 + 179,2095, 471, 115,2034,1843, 60, 50,2970, 134, 806,1868, 734,2035,3407, 180, # 2870 + 995,1607, 156, 537,2893, 688,7320, 319,1305, 779,2144, 514,2374, 298,4298, 359, # 2886 +2495, 90,2707,1338, 663, 11, 906,1099,2545, 20,2436, 182, 532,1716,7321, 732, # 2902 +1376,4062,1311,1420,3175, 25,2312,1056, 113, 399, 382,1949, 242,3408,2467, 529, # 2918 +3243, 475,1447,3617,7322, 117, 21, 656, 810,1297,2295,2329,3502,7323, 126,4063, # 2934 + 706, 456, 150, 613,4299, 71,1118,2036,4064, 145,3069, 85, 835, 486,2114,1246, # 2950 +1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,7324,2127,2354, 347,3736, 221, # 2966 +3503,3110,7325,1955,1153,4065, 83, 296,1199,3070, 192, 624, 93,7326, 822,1897, # 2982 +2810,3111, 795,2064, 991,1554,1542,1592, 27, 43,2853, 859, 139,1456, 860,4300, # 2998 + 437, 712,3871, 164,2392,3112, 695, 211,3017,2096, 195,3872,1608,3504,3505,3618, # 3014 +3873, 234, 811,2971,2097,3874,2229,1441,3506,1615,2375, 668,2076,1638, 305, 228, # 3030 +1664,4301, 467, 415,7327, 262,2098,1593, 239, 108, 300, 200,1033, 512,1247,2077, # 3046 +7328,7329,2173,3176,3619,2673, 593, 845,1062,3244, 88,1723,2037,3875,1950, 212, # 3062 + 266, 152, 149, 468,1898,4066,4302, 77, 187,7330,3018, 37, 5,2972,7331,3876, # 3078 +7332,7333, 39,2517,4303,2894,3177,2078, 55, 148, 74,4304, 545, 483,1474,1029, # 3094 +1665, 217,1869,1531,3113,1104,2645,4067, 24, 172,3507, 900,3877,3508,3509,4305, # 3110 + 32,1408,2811,1312, 329, 487,2355,2247,2708, 784,2674, 4,3019,3314,1427,1788, # 3126 + 188, 109, 499,7334,3620,1717,1789, 888,1217,3020,4306,7335,3510,7336,3315,1520, # 3142 +3621,3878, 196,1034, 775,7337,7338, 929,1815, 249, 439, 38,7339,1063,7340, 794, # 3158 +3879,1435,2296, 46, 178,3245,2065,7341,2376,7342, 214,1709,4307, 804, 35, 707, # 3174 + 324,3622,1601,2546, 140, 459,4068,7343,7344,1365, 839, 272, 978,2257,2572,3409, # 3190 +2128,1363,3623,1423, 697, 100,3071, 48, 70,1231, 495,3114,2193,7345,1294,7346, # 3206 +2079, 462, 586,1042,3246, 853, 256, 988, 185,2377,3410,1698, 434,1084,7347,3411, # 3222 + 314,2615,2775,4308,2330,2331, 569,2280, 637,1816,2518, 757,1162,1878,1616,3412, # 3238 + 287,1577,2115, 768,4309,1671,2854,3511,2519,1321,3737, 909,2413,7348,4069, 933, # 3254 +3738,7349,2052,2356,1222,4310, 765,2414,1322, 786,4311,7350,1919,1462,1677,2895, # 3270 +1699,7351,4312,1424,2437,3115,3624,2590,3316,1774,1940,3413,3880,4070, 309,1369, # 3286 +1130,2812, 364,2230,1653,1299,3881,3512,3882,3883,2646, 525,1085,3021, 902,2000, # 3302 +1475, 964,4313, 421,1844,1415,1057,2281, 940,1364,3116, 376,4314,4315,1381, 7, # 3318 +2520, 983,2378, 336,1710,2675,1845, 321,3414, 559,1131,3022,2742,1808,1132,1313, # 3334 + 265,1481,1857,7352, 352,1203,2813,3247, 167,1089, 420,2814, 776, 792,1724,3513, # 3350 +4071,2438,3248,7353,4072,7354, 446, 229, 333,2743, 901,3739,1200,1557,4316,2647, # 3366 +1920, 395,2744,2676,3740,4073,1835, 125, 916,3178,2616,4317,7355,7356,3741,7357, # 3382 +7358,7359,4318,3117,3625,1133,2547,1757,3415,1510,2313,1409,3514,7360,2145, 438, # 3398 +2591,2896,2379,3317,1068, 958,3023, 461, 311,2855,2677,4074,1915,3179,4075,1978, # 3414 + 383, 750,2745,2617,4076, 274, 539, 385,1278,1442,7361,1154,1964, 384, 561, 210, # 3430 + 98,1295,2548,3515,7362,1711,2415,1482,3416,3884,2897,1257, 129,7363,3742, 642, # 3446 + 523,2776,2777,2648,7364, 141,2231,1333, 68, 176, 441, 876, 907,4077, 603,2592, # 3462 + 710, 171,3417, 404, 549, 18,3118,2393,1410,3626,1666,7365,3516,4319,2898,4320, # 3478 +7366,2973, 368,7367, 146, 366, 99, 871,3627,1543, 748, 807,1586,1185, 22,2258, # 3494 + 379,3743,3180,7368,3181, 505,1941,2618,1991,1382,2314,7369, 380,2357, 218, 702, # 3510 +1817,1248,3418,3024,3517,3318,3249,7370,2974,3628, 930,3250,3744,7371, 59,7372, # 3526 + 585, 601,4078, 497,3419,1112,1314,4321,1801,7373,1223,1472,2174,7374, 749,1836, # 3542 + 690,1899,3745,1772,3885,1476, 429,1043,1790,2232,2116, 917,4079, 447,1086,1629, # 3558 +7375, 556,7376,7377,2020,1654, 844,1090, 105, 550, 966,1758,2815,1008,1782, 686, # 3574 +1095,7378,2282, 793,1602,7379,3518,2593,4322,4080,2933,2297,4323,3746, 980,2496, # 3590 + 544, 353, 527,4324, 908,2678,2899,7380, 381,2619,1942,1348,7381,1341,1252, 560, # 3606 +3072,7382,3420,2856,7383,2053, 973, 886,2080, 143,4325,7384,7385, 157,3886, 496, # 3622 +4081, 57, 840, 540,2038,4326,4327,3421,2117,1445, 970,2259,1748,1965,2081,4082, # 3638 +3119,1234,1775,3251,2816,3629, 773,1206,2129,1066,2039,1326,3887,1738,1725,4083, # 3654 + 279,3120, 51,1544,2594, 423,1578,2130,2066, 173,4328,1879,7386,7387,1583, 264, # 3670 + 610,3630,4329,2439, 280, 154,7388,7389,7390,1739, 338,1282,3073, 693,2857,1411, # 3686 +1074,3747,2440,7391,4330,7392,7393,1240, 952,2394,7394,2900,1538,2679, 685,1483, # 3702 +4084,2468,1436, 953,4085,2054,4331, 671,2395, 79,4086,2441,3252, 608, 567,2680, # 3718 +3422,4087,4088,1691, 393,1261,1791,2396,7395,4332,7396,7397,7398,7399,1383,1672, # 3734 +3748,3182,1464, 522,1119, 661,1150, 216, 675,4333,3888,1432,3519, 609,4334,2681, # 3750 +2397,7400,7401,7402,4089,3025, 0,7403,2469, 315, 231,2442, 301,3319,4335,2380, # 3766 +7404, 233,4090,3631,1818,4336,4337,7405, 96,1776,1315,2082,7406, 257,7407,1809, # 3782 +3632,2709,1139,1819,4091,2021,1124,2163,2778,1777,2649,7408,3074, 363,1655,3183, # 3798 +7409,2975,7410,7411,7412,3889,1567,3890, 718, 103,3184, 849,1443, 341,3320,2934, # 3814 +1484,7413,1712, 127, 67, 339,4092,2398, 679,1412, 821,7414,7415, 834, 738, 351, # 3830 +2976,2146, 846, 235,1497,1880, 418,1992,3749,2710, 186,1100,2147,2746,3520,1545, # 3846 +1355,2935,2858,1377, 583,3891,4093,2573,2977,7416,1298,3633,1078,2549,3634,2358, # 3862 + 78,3750,3751, 267,1289,2099,2001,1594,4094, 348, 369,1274,2194,2175,1837,4338, # 3878 +1820,2817,3635,2747,2283,2002,4339,2936,2748, 144,3321, 882,4340,3892,2749,3423, # 3894 +4341,2901,7417,4095,1726, 320,7418,3893,3026, 788,2978,7419,2818,1773,1327,2859, # 3910 +3894,2819,7420,1306,4342,2003,1700,3752,3521,2359,2650, 787,2022, 506, 824,3636, # 3926 + 534, 323,4343,1044,3322,2023,1900, 946,3424,7421,1778,1500,1678,7422,1881,4344, # 3942 + 165, 243,4345,3637,2521, 123, 683,4096, 764,4346, 36,3895,1792, 589,2902, 816, # 3958 + 626,1667,3027,2233,1639,1555,1622,3753,3896,7423,3897,2860,1370,1228,1932, 891, # 3974 +2083,2903, 304,4097,7424, 292,2979,2711,3522, 691,2100,4098,1115,4347, 118, 662, # 3990 +7425, 611,1156, 854,2381,1316,2861, 2, 386, 515,2904,7426,7427,3253, 868,2234, # 4006 +1486, 855,2651, 785,2212,3028,7428,1040,3185,3523,7429,3121, 448,7430,1525,7431, # 4022 +2164,4348,7432,3754,7433,4099,2820,3524,3122, 503, 818,3898,3123,1568, 814, 676, # 4038 +1444, 306,1749,7434,3755,1416,1030, 197,1428, 805,2821,1501,4349,7435,7436,7437, # 4054 +1993,7438,4350,7439,7440,2195, 13,2779,3638,2980,3124,1229,1916,7441,3756,2131, # 4070 +7442,4100,4351,2399,3525,7443,2213,1511,1727,1120,7444,7445, 646,3757,2443, 307, # 4086 +7446,7447,1595,3186,7448,7449,7450,3639,1113,1356,3899,1465,2522,2523,7451, 519, # 4102 +7452, 128,2132, 92,2284,1979,7453,3900,1512, 342,3125,2196,7454,2780,2214,1980, # 4118 +3323,7455, 290,1656,1317, 789, 827,2360,7456,3758,4352, 562, 581,3901,7457, 401, # 4134 +4353,2248, 94,4354,1399,2781,7458,1463,2024,4355,3187,1943,7459, 828,1105,4101, # 4150 +1262,1394,7460,4102, 605,4356,7461,1783,2862,7462,2822, 819,2101, 578,2197,2937, # 4166 +7463,1502, 436,3254,4103,3255,2823,3902,2905,3425,3426,7464,2712,2315,7465,7466, # 4182 +2332,2067, 23,4357, 193, 826,3759,2102, 699,1630,4104,3075, 390,1793,1064,3526, # 4198 +7467,1579,3076,3077,1400,7468,4105,1838,1640,2863,7469,4358,4359, 137,4106, 598, # 4214 +3078,1966, 780, 104, 974,2938,7470, 278, 899, 253, 402, 572, 504, 493,1339,7471, # 4230 +3903,1275,4360,2574,2550,7472,3640,3029,3079,2249, 565,1334,2713, 863, 41,7473, # 4246 +7474,4361,7475,1657,2333, 19, 463,2750,4107, 606,7476,2981,3256,1087,2084,1323, # 4262 +2652,2982,7477,1631,1623,1750,4108,2682,7478,2864, 791,2714,2653,2334, 232,2416, # 4278 +7479,2983,1498,7480,2654,2620, 755,1366,3641,3257,3126,2025,1609, 119,1917,3427, # 4294 + 862,1026,4109,7481,3904,3760,4362,3905,4363,2260,1951,2470,7482,1125, 817,4110, # 4310 +4111,3906,1513,1766,2040,1487,4112,3030,3258,2824,3761,3127,7483,7484,1507,7485, # 4326 +2683, 733, 40,1632,1106,2865, 345,4113, 841,2524, 230,4364,2984,1846,3259,3428, # 4342 +7486,1263, 986,3429,7487, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562,3907, # 4358 +3908,2939, 967,2751,2655,1349, 592,2133,1692,3324,2985,1994,4114,1679,3909,1901, # 4374 +2185,7488, 739,3642,2715,1296,1290,7489,4115,2198,2199,1921,1563,2595,2551,1870, # 4390 +2752,2986,7490, 435,7491, 343,1108, 596, 17,1751,4365,2235,3430,3643,7492,4366, # 4406 + 294,3527,2940,1693, 477, 979, 281,2041,3528, 643,2042,3644,2621,2782,2261,1031, # 4422 +2335,2134,2298,3529,4367, 367,1249,2552,7493,3530,7494,4368,1283,3325,2004, 240, # 4438 +1762,3326,4369,4370, 836,1069,3128, 474,7495,2148,2525, 268,3531,7496,3188,1521, # 4454 +1284,7497,1658,1546,4116,7498,3532,3533,7499,4117,3327,2684,1685,4118, 961,1673, # 4470 +2622, 190,2005,2200,3762,4371,4372,7500, 570,2497,3645,1490,7501,4373,2623,3260, # 4486 +1956,4374, 584,1514, 396,1045,1944,7502,4375,1967,2444,7503,7504,4376,3910, 619, # 4502 +7505,3129,3261, 215,2006,2783,2553,3189,4377,3190,4378, 763,4119,3763,4379,7506, # 4518 +7507,1957,1767,2941,3328,3646,1174, 452,1477,4380,3329,3130,7508,2825,1253,2382, # 4534 +2186,1091,2285,4120, 492,7509, 638,1169,1824,2135,1752,3911, 648, 926,1021,1324, # 4550 +4381, 520,4382, 997, 847,1007, 892,4383,3764,2262,1871,3647,7510,2400,1784,4384, # 4566 +1952,2942,3080,3191,1728,4121,2043,3648,4385,2007,1701,3131,1551, 30,2263,4122, # 4582 +7511,2026,4386,3534,7512, 501,7513,4123, 594,3431,2165,1821,3535,3432,3536,3192, # 4598 + 829,2826,4124,7514,1680,3132,1225,4125,7515,3262,4387,4126,3133,2336,7516,4388, # 4614 +4127,7517,3912,3913,7518,1847,2383,2596,3330,7519,4389, 374,3914, 652,4128,4129, # 4630 + 375,1140, 798,7520,7521,7522,2361,4390,2264, 546,1659, 138,3031,2445,4391,7523, # 4646 +2250, 612,1848, 910, 796,3765,1740,1371, 825,3766,3767,7524,2906,2554,7525, 692, # 4662 + 444,3032,2624, 801,4392,4130,7526,1491, 244,1053,3033,4131,4132, 340,7527,3915, # 4678 +1041,2987, 293,1168, 87,1357,7528,1539, 959,7529,2236, 721, 694,4133,3768, 219, # 4694 +1478, 644,1417,3331,2656,1413,1401,1335,1389,3916,7530,7531,2988,2362,3134,1825, # 4710 + 730,1515, 184,2827, 66,4393,7532,1660,2943, 246,3332, 378,1457, 226,3433, 975, # 4726 +3917,2944,1264,3537, 674, 696,7533, 163,7534,1141,2417,2166, 713,3538,3333,4394, # 4742 +3918,7535,7536,1186, 15,7537,1079,1070,7538,1522,3193,3539, 276,1050,2716, 758, # 4758 +1126, 653,2945,3263,7539,2337, 889,3540,3919,3081,2989, 903,1250,4395,3920,3434, # 4774 +3541,1342,1681,1718, 766,3264, 286, 89,2946,3649,7540,1713,7541,2597,3334,2990, # 4790 +7542,2947,2215,3194,2866,7543,4396,2498,2526, 181, 387,1075,3921, 731,2187,3335, # 4806 +7544,3265, 310, 313,3435,2299, 770,4134, 54,3034, 189,4397,3082,3769,3922,7545, # 4822 +1230,1617,1849, 355,3542,4135,4398,3336, 111,4136,3650,1350,3135,3436,3035,4137, # 4838 +2149,3266,3543,7546,2784,3923,3924,2991, 722,2008,7547,1071, 247,1207,2338,2471, # 4854 +1378,4399,2009, 864,1437,1214,4400, 373,3770,1142,2216, 667,4401, 442,2753,2555, # 4870 +3771,3925,1968,4138,3267,1839, 837, 170,1107, 934,1336,1882,7548,7549,2118,4139, # 4886 +2828, 743,1569,7550,4402,4140, 582,2384,1418,3437,7551,1802,7552, 357,1395,1729, # 4902 +3651,3268,2418,1564,2237,7553,3083,3772,1633,4403,1114,2085,4141,1532,7554, 482, # 4918 +2446,4404,7555,7556,1492, 833,1466,7557,2717,3544,1641,2829,7558,1526,1272,3652, # 4934 +4142,1686,1794, 416,2556,1902,1953,1803,7559,3773,2785,3774,1159,2316,7560,2867, # 4950 +4405,1610,1584,3036,2419,2754, 443,3269,1163,3136,7561,7562,3926,7563,4143,2499, # 4966 +3037,4406,3927,3137,2103,1647,3545,2010,1872,4144,7564,4145, 431,3438,7565, 250, # 4982 + 97, 81,4146,7566,1648,1850,1558, 160, 848,7567, 866, 740,1694,7568,2201,2830, # 4998 +3195,4147,4407,3653,1687, 950,2472, 426, 469,3196,3654,3655,3928,7569,7570,1188, # 5014 + 424,1995, 861,3546,4148,3775,2202,2685, 168,1235,3547,4149,7571,2086,1674,4408, # 5030 +3337,3270, 220,2557,1009,7572,3776, 670,2992, 332,1208, 717,7573,7574,3548,2447, # 5046 +3929,3338,7575, 513,7576,1209,2868,3339,3138,4409,1080,7577,7578,7579,7580,2527, # 5062 +3656,3549, 815,1587,3930,3931,7581,3550,3439,3777,1254,4410,1328,3038,1390,3932, # 5078 +1741,3933,3778,3934,7582, 236,3779,2448,3271,7583,7584,3657,3780,1273,3781,4411, # 5094 +7585, 308,7586,4412, 245,4413,1851,2473,1307,2575, 430, 715,2136,2449,7587, 270, # 5110 + 199,2869,3935,7588,3551,2718,1753, 761,1754, 725,1661,1840,4414,3440,3658,7589, # 5126 +7590, 587, 14,3272, 227,2598, 326, 480,2265, 943,2755,3552, 291, 650,1883,7591, # 5142 +1702,1226, 102,1547, 62,3441, 904,4415,3442,1164,4150,7592,7593,1224,1548,2756, # 5158 + 391, 498,1493,7594,1386,1419,7595,2055,1177,4416, 813, 880,1081,2363, 566,1145, # 5174 +4417,2286,1001,1035,2558,2599,2238, 394,1286,7596,7597,2068,7598, 86,1494,1730, # 5190 +3936, 491,1588, 745, 897,2948, 843,3340,3937,2757,2870,3273,1768, 998,2217,2069, # 5206 + 397,1826,1195,1969,3659,2993,3341, 284,7599,3782,2500,2137,2119,1903,7600,3938, # 5222 +2150,3939,4151,1036,3443,1904, 114,2559,4152, 209,1527,7601,7602,2949,2831,2625, # 5238 +2385,2719,3139, 812,2560,7603,3274,7604,1559, 737,1884,3660,1210, 885, 28,2686, # 5254 +3553,3783,7605,4153,1004,1779,4418,7606, 346,1981,2218,2687,4419,3784,1742, 797, # 5270 +1642,3940,1933,1072,1384,2151, 896,3941,3275,3661,3197,2871,3554,7607,2561,1958, # 5286 +4420,2450,1785,7608,7609,7610,3942,4154,1005,1308,3662,4155,2720,4421,4422,1528, # 5302 +2600, 161,1178,4156,1982, 987,4423,1101,4157, 631,3943,1157,3198,2420,1343,1241, # 5318 +1016,2239,2562, 372, 877,2339,2501,1160, 555,1934, 911,3944,7611, 466,1170, 169, # 5334 +1051,2907,2688,3663,2474,2994,1182,2011,2563,1251,2626,7612, 992,2340,3444,1540, # 5350 +2721,1201,2070,2401,1996,2475,7613,4424, 528,1922,2188,1503,1873,1570,2364,3342, # 5366 +3276,7614, 557,1073,7615,1827,3445,2087,2266,3140,3039,3084, 767,3085,2786,4425, # 5382 +1006,4158,4426,2341,1267,2176,3664,3199, 778,3945,3200,2722,1597,2657,7616,4427, # 5398 +7617,3446,7618,7619,7620,3277,2689,1433,3278, 131, 95,1504,3946, 723,4159,3141, # 5414 +1841,3555,2758,2189,3947,2027,2104,3665,7621,2995,3948,1218,7622,3343,3201,3949, # 5430 +4160,2576, 248,1634,3785, 912,7623,2832,3666,3040,3786, 654, 53,7624,2996,7625, # 5446 +1688,4428, 777,3447,1032,3950,1425,7626, 191, 820,2120,2833, 971,4429, 931,3202, # 5462 + 135, 664, 783,3787,1997, 772,2908,1935,3951,3788,4430,2909,3203, 282,2723, 640, # 5478 +1372,3448,1127, 922, 325,3344,7627,7628, 711,2044,7629,7630,3952,2219,2787,1936, # 5494 +3953,3345,2220,2251,3789,2300,7631,4431,3790,1258,3279,3954,3204,2138,2950,3955, # 5510 +3956,7632,2221, 258,3205,4432, 101,1227,7633,3280,1755,7634,1391,3281,7635,2910, # 5526 +2056, 893,7636,7637,7638,1402,4161,2342,7639,7640,3206,3556,7641,7642, 878,1325, # 5542 +1780,2788,4433, 259,1385,2577, 744,1183,2267,4434,7643,3957,2502,7644, 684,1024, # 5558 +4162,7645, 472,3557,3449,1165,3282,3958,3959, 322,2152, 881, 455,1695,1152,1340, # 5574 + 660, 554,2153,4435,1058,4436,4163, 830,1065,3346,3960,4437,1923,7646,1703,1918, # 5590 +7647, 932,2268, 122,7648,4438, 947, 677,7649,3791,2627, 297,1905,1924,2269,4439, # 5606 +2317,3283,7650,7651,4164,7652,4165, 84,4166, 112, 989,7653, 547,1059,3961, 701, # 5622 +3558,1019,7654,4167,7655,3450, 942, 639, 457,2301,2451, 993,2951, 407, 851, 494, # 5638 +4440,3347, 927,7656,1237,7657,2421,3348, 573,4168, 680, 921,2911,1279,1874, 285, # 5654 + 790,1448,1983, 719,2167,7658,7659,4441,3962,3963,1649,7660,1541, 563,7661,1077, # 5670 +7662,3349,3041,3451, 511,2997,3964,3965,3667,3966,1268,2564,3350,3207,4442,4443, # 5686 +7663, 535,1048,1276,1189,2912,2028,3142,1438,1373,2834,2952,1134,2012,7664,4169, # 5702 +1238,2578,3086,1259,7665, 700,7666,2953,3143,3668,4170,7667,4171,1146,1875,1906, # 5718 +4444,2601,3967, 781,2422, 132,1589, 203, 147, 273,2789,2402, 898,1786,2154,3968, # 5734 +3969,7668,3792,2790,7669,7670,4445,4446,7671,3208,7672,1635,3793, 965,7673,1804, # 5750 +2690,1516,3559,1121,1082,1329,3284,3970,1449,3794, 65,1128,2835,2913,2759,1590, # 5766 +3795,7674,7675, 12,2658, 45, 976,2579,3144,4447, 517,2528,1013,1037,3209,7676, # 5782 +3796,2836,7677,3797,7678,3452,7679,2602, 614,1998,2318,3798,3087,2724,2628,7680, # 5798 +2580,4172, 599,1269,7681,1810,3669,7682,2691,3088, 759,1060, 489,1805,3351,3285, # 5814 +1358,7683,7684,2386,1387,1215,2629,2252, 490,7685,7686,4173,1759,2387,2343,7687, # 5830 +4448,3799,1907,3971,2630,1806,3210,4449,3453,3286,2760,2344, 874,7688,7689,3454, # 5846 +3670,1858, 91,2914,3671,3042,3800,4450,7690,3145,3972,2659,7691,3455,1202,1403, # 5862 +3801,2954,2529,1517,2503,4451,3456,2504,7692,4452,7693,2692,1885,1495,1731,3973, # 5878 +2365,4453,7694,2029,7695,7696,3974,2693,1216, 237,2581,4174,2319,3975,3802,4454, # 5894 +4455,2694,3560,3457, 445,4456,7697,7698,7699,7700,2761, 61,3976,3672,1822,3977, # 5910 +7701, 687,2045, 935, 925, 405,2660, 703,1096,1859,2725,4457,3978,1876,1367,2695, # 5926 +3352, 918,2105,1781,2476, 334,3287,1611,1093,4458, 564,3146,3458,3673,3353, 945, # 5942 +2631,2057,4459,7702,1925, 872,4175,7703,3459,2696,3089, 349,4176,3674,3979,4460, # 5958 +3803,4177,3675,2155,3980,4461,4462,4178,4463,2403,2046, 782,3981, 400, 251,4179, # 5974 +1624,7704,7705, 277,3676, 299,1265, 476,1191,3804,2121,4180,4181,1109, 205,7706, # 5990 +2582,1000,2156,3561,1860,7707,7708,7709,4464,7710,4465,2565, 107,2477,2157,3982, # 6006 +3460,3147,7711,1533, 541,1301, 158, 753,4182,2872,3562,7712,1696, 370,1088,4183, # 6022 +4466,3563, 579, 327, 440, 162,2240, 269,1937,1374,3461, 968,3043, 56,1396,3090, # 6038 +2106,3288,3354,7713,1926,2158,4467,2998,7714,3564,7715,7716,3677,4468,2478,7717, # 6054 +2791,7718,1650,4469,7719,2603,7720,7721,3983,2661,3355,1149,3356,3984,3805,3985, # 6070 +7722,1076, 49,7723, 951,3211,3289,3290, 450,2837, 920,7724,1811,2792,2366,4184, # 6086 +1908,1138,2367,3806,3462,7725,3212,4470,1909,1147,1518,2423,4471,3807,7726,4472, # 6102 +2388,2604, 260,1795,3213,7727,7728,3808,3291, 708,7729,3565,1704,7730,3566,1351, # 6118 +1618,3357,2999,1886, 944,4185,3358,4186,3044,3359,4187,7731,3678, 422, 413,1714, # 6134 +3292, 500,2058,2345,4188,2479,7732,1344,1910, 954,7733,1668,7734,7735,3986,2404, # 6150 +4189,3567,3809,4190,7736,2302,1318,2505,3091, 133,3092,2873,4473, 629, 31,2838, # 6166 +2697,3810,4474, 850, 949,4475,3987,2955,1732,2088,4191,1496,1852,7737,3988, 620, # 6182 +3214, 981,1242,3679,3360,1619,3680,1643,3293,2139,2452,1970,1719,3463,2168,7738, # 6198 +3215,7739,7740,3361,1828,7741,1277,4476,1565,2047,7742,1636,3568,3093,7743, 869, # 6214 +2839, 655,3811,3812,3094,3989,3000,3813,1310,3569,4477,7744,7745,7746,1733, 558, # 6230 +4478,3681, 335,1549,3045,1756,4192,3682,1945,3464,1829,1291,1192, 470,2726,2107, # 6246 +2793, 913,1054,3990,7747,1027,7748,3046,3991,4479, 982,2662,3362,3148,3465,3216, # 6262 +3217,1946,2794,7749, 571,4480,7750,1830,7751,3570,2583,1523,2424,7752,2089, 984, # 6278 +4481,3683,1959,7753,3684, 852, 923,2795,3466,3685, 969,1519, 999,2048,2320,1705, # 6294 +7754,3095, 615,1662, 151, 597,3992,2405,2321,1049, 275,4482,3686,4193, 568,3687, # 6310 +3571,2480,4194,3688,7755,2425,2270, 409,3218,7756,1566,2874,3467,1002, 769,2840, # 6326 + 194,2090,3149,3689,2222,3294,4195, 628,1505,7757,7758,1763,2177,3001,3993, 521, # 6342 +1161,2584,1787,2203,2406,4483,3994,1625,4196,4197, 412, 42,3096, 464,7759,2632, # 6358 +4484,3363,1760,1571,2875,3468,2530,1219,2204,3814,2633,2140,2368,4485,4486,3295, # 6374 +1651,3364,3572,7760,7761,3573,2481,3469,7762,3690,7763,7764,2271,2091, 460,7765, # 6390 +4487,7766,3002, 962, 588,3574, 289,3219,2634,1116, 52,7767,3047,1796,7768,7769, # 6406 +7770,1467,7771,1598,1143,3691,4198,1984,1734,1067,4488,1280,3365, 465,4489,1572, # 6422 + 510,7772,1927,2241,1812,1644,3575,7773,4490,3692,7774,7775,2663,1573,1534,7776, # 6438 +7777,4199, 536,1807,1761,3470,3815,3150,2635,7778,7779,7780,4491,3471,2915,1911, # 6454 +2796,7781,3296,1122, 377,3220,7782, 360,7783,7784,4200,1529, 551,7785,2059,3693, # 6470 +1769,2426,7786,2916,4201,3297,3097,2322,2108,2030,4492,1404, 136,1468,1479, 672, # 6486 +1171,3221,2303, 271,3151,7787,2762,7788,2049, 678,2727, 865,1947,4493,7789,2013, # 6502 +3995,2956,7790,2728,2223,1397,3048,3694,4494,4495,1735,2917,3366,3576,7791,3816, # 6518 + 509,2841,2453,2876,3817,7792,7793,3152,3153,4496,4202,2531,4497,2304,1166,1010, # 6534 + 552, 681,1887,7794,7795,2957,2958,3996,1287,1596,1861,3154, 358, 453, 736, 175, # 6550 + 478,1117, 905,1167,1097,7796,1853,1530,7797,1706,7798,2178,3472,2287,3695,3473, # 6566 +3577,4203,2092,4204,7799,3367,1193,2482,4205,1458,2190,2205,1862,1888,1421,3298, # 6582 +2918,3049,2179,3474, 595,2122,7800,3997,7801,7802,4206,1707,2636, 223,3696,1359, # 6598 + 751,3098, 183,3475,7803,2797,3003, 419,2369, 633, 704,3818,2389, 241,7804,7805, # 6614 +7806, 838,3004,3697,2272,2763,2454,3819,1938,2050,3998,1309,3099,2242,1181,7807, # 6630 +1136,2206,3820,2370,1446,4207,2305,4498,7808,7809,4208,1055,2605, 484,3698,7810, # 6646 +3999, 625,4209,2273,3368,1499,4210,4000,7811,4001,4211,3222,2274,2275,3476,7812, # 6662 +7813,2764, 808,2606,3699,3369,4002,4212,3100,2532, 526,3370,3821,4213, 955,7814, # 6678 +1620,4214,2637,2427,7815,1429,3700,1669,1831, 994, 928,7816,3578,1260,7817,7818, # 6694 +7819,1948,2288, 741,2919,1626,4215,2729,2455, 867,1184, 362,3371,1392,7820,7821, # 6710 +4003,4216,1770,1736,3223,2920,4499,4500,1928,2698,1459,1158,7822,3050,3372,2877, # 6726 +1292,1929,2506,2842,3701,1985,1187,2071,2014,2607,4217,7823,2566,2507,2169,3702, # 6742 +2483,3299,7824,3703,4501,7825,7826, 666,1003,3005,1022,3579,4218,7827,4502,1813, # 6758 +2253, 574,3822,1603, 295,1535, 705,3823,4219, 283, 858, 417,7828,7829,3224,4503, # 6774 +4504,3051,1220,1889,1046,2276,2456,4004,1393,1599, 689,2567, 388,4220,7830,2484, # 6790 + 802,7831,2798,3824,2060,1405,2254,7832,4505,3825,2109,1052,1345,3225,1585,7833, # 6806 + 809,7834,7835,7836, 575,2730,3477, 956,1552,1469,1144,2323,7837,2324,1560,2457, # 6822 +3580,3226,4005, 616,2207,3155,2180,2289,7838,1832,7839,3478,4506,7840,1319,3704, # 6838 +3705,1211,3581,1023,3227,1293,2799,7841,7842,7843,3826, 607,2306,3827, 762,2878, # 6854 +1439,4221,1360,7844,1485,3052,7845,4507,1038,4222,1450,2061,2638,4223,1379,4508, # 6870 +2585,7846,7847,4224,1352,1414,2325,2921,1172,7848,7849,3828,3829,7850,1797,1451, # 6886 +7851,7852,7853,7854,2922,4006,4007,2485,2346, 411,4008,4009,3582,3300,3101,4509, # 6902 +1561,2664,1452,4010,1375,7855,7856, 47,2959, 316,7857,1406,1591,2923,3156,7858, # 6918 +1025,2141,3102,3157, 354,2731, 884,2224,4225,2407, 508,3706, 726,3583, 996,2428, # 6934 +3584, 729,7859, 392,2191,1453,4011,4510,3707,7860,7861,2458,3585,2608,1675,2800, # 6950 + 919,2347,2960,2348,1270,4511,4012, 73,7862,7863, 647,7864,3228,2843,2255,1550, # 6966 +1346,3006,7865,1332, 883,3479,7866,7867,7868,7869,3301,2765,7870,1212, 831,1347, # 6982 +4226,4512,2326,3830,1863,3053, 720,3831,4513,4514,3832,7871,4227,7872,7873,4515, # 6998 +7874,7875,1798,4516,3708,2609,4517,3586,1645,2371,7876,7877,2924, 669,2208,2665, # 7014 +2429,7878,2879,7879,7880,1028,3229,7881,4228,2408,7882,2256,1353,7883,7884,4518, # 7030 +3158, 518,7885,4013,7886,4229,1960,7887,2142,4230,7888,7889,3007,2349,2350,3833, # 7046 + 516,1833,1454,4014,2699,4231,4519,2225,2610,1971,1129,3587,7890,2766,7891,2961, # 7062 +1422, 577,1470,3008,1524,3373,7892,7893, 432,4232,3054,3480,7894,2586,1455,2508, # 7078 +2226,1972,1175,7895,1020,2732,4015,3481,4520,7896,2733,7897,1743,1361,3055,3482, # 7094 +2639,4016,4233,4521,2290, 895, 924,4234,2170, 331,2243,3056, 166,1627,3057,1098, # 7110 +7898,1232,2880,2227,3374,4522, 657, 403,1196,2372, 542,3709,3375,1600,4235,3483, # 7126 +7899,4523,2767,3230, 576, 530,1362,7900,4524,2533,2666,3710,4017,7901, 842,3834, # 7142 +7902,2801,2031,1014,4018, 213,2700,3376, 665, 621,4236,7903,3711,2925,2430,7904, # 7158 +2431,3302,3588,3377,7905,4237,2534,4238,4525,3589,1682,4239,3484,1380,7906, 724, # 7174 +2277, 600,1670,7907,1337,1233,4526,3103,2244,7908,1621,4527,7909, 651,4240,7910, # 7190 +1612,4241,2611,7911,2844,7912,2734,2307,3058,7913, 716,2459,3059, 174,1255,2701, # 7206 +4019,3590, 548,1320,1398, 728,4020,1574,7914,1890,1197,3060,4021,7915,3061,3062, # 7222 +3712,3591,3713, 747,7916, 635,4242,4528,7917,7918,7919,4243,7920,7921,4529,7922, # 7238 +3378,4530,2432, 451,7923,3714,2535,2072,4244,2735,4245,4022,7924,1764,4531,7925, # 7254 +4246, 350,7926,2278,2390,2486,7927,4247,4023,2245,1434,4024, 488,4532, 458,4248, # 7270 +4025,3715, 771,1330,2391,3835,2568,3159,2159,2409,1553,2667,3160,4249,7928,2487, # 7286 +2881,2612,1720,2702,4250,3379,4533,7929,2536,4251,7930,3231,4252,2768,7931,2015, # 7302 +2736,7932,1155,1017,3716,3836,7933,3303,2308, 201,1864,4253,1430,7934,4026,7935, # 7318 +7936,7937,7938,7939,4254,1604,7940, 414,1865, 371,2587,4534,4535,3485,2016,3104, # 7334 +4536,1708, 960,4255, 887, 389,2171,1536,1663,1721,7941,2228,4027,2351,2926,1580, # 7350 +7942,7943,7944,1744,7945,2537,4537,4538,7946,4539,7947,2073,7948,7949,3592,3380, # 7366 +2882,4256,7950,4257,2640,3381,2802, 673,2703,2460, 709,3486,4028,3593,4258,7951, # 7382 +1148, 502, 634,7952,7953,1204,4540,3594,1575,4541,2613,3717,7954,3718,3105, 948, # 7398 +3232, 121,1745,3837,1110,7955,4259,3063,2509,3009,4029,3719,1151,1771,3838,1488, # 7414 +4030,1986,7956,2433,3487,7957,7958,2093,7959,4260,3839,1213,1407,2803, 531,2737, # 7430 +2538,3233,1011,1537,7960,2769,4261,3106,1061,7961,3720,3721,1866,2883,7962,2017, # 7446 + 120,4262,4263,2062,3595,3234,2309,3840,2668,3382,1954,4542,7963,7964,3488,1047, # 7462 +2704,1266,7965,1368,4543,2845, 649,3383,3841,2539,2738,1102,2846,2669,7966,7967, # 7478 +1999,7968,1111,3596,2962,7969,2488,3842,3597,2804,1854,3384,3722,7970,7971,3385, # 7494 +2410,2884,3304,3235,3598,7972,2569,7973,3599,2805,4031,1460, 856,7974,3600,7975, # 7510 +2885,2963,7976,2886,3843,7977,4264, 632,2510, 875,3844,1697,3845,2291,7978,7979, # 7526 +4544,3010,1239, 580,4545,4265,7980, 914, 936,2074,1190,4032,1039,2123,7981,7982, # 7542 +7983,3386,1473,7984,1354,4266,3846,7985,2172,3064,4033, 915,3305,4267,4268,3306, # 7558 +1605,1834,7986,2739, 398,3601,4269,3847,4034, 328,1912,2847,4035,3848,1331,4270, # 7574 +3011, 937,4271,7987,3602,4036,4037,3387,2160,4546,3388, 524, 742, 538,3065,1012, # 7590 +7988,7989,3849,2461,7990, 658,1103, 225,3850,7991,7992,4547,7993,4548,7994,3236, # 7606 +1243,7995,4038, 963,2246,4549,7996,2705,3603,3161,7997,7998,2588,2327,7999,4550, # 7622 +8000,8001,8002,3489,3307, 957,3389,2540,2032,1930,2927,2462, 870,2018,3604,1746, # 7638 +2770,2771,2434,2463,8003,3851,8004,3723,3107,3724,3490,3390,3725,8005,1179,3066, # 7654 +8006,3162,2373,4272,3726,2541,3163,3108,2740,4039,8007,3391,1556,2542,2292, 977, # 7670 +2887,2033,4040,1205,3392,8008,1765,3393,3164,2124,1271,1689, 714,4551,3491,8009, # 7686 +2328,3852, 533,4273,3605,2181, 617,8010,2464,3308,3492,2310,8011,8012,3165,8013, # 7702 +8014,3853,1987, 618, 427,2641,3493,3394,8015,8016,1244,1690,8017,2806,4274,4552, # 7718 +8018,3494,8019,8020,2279,1576, 473,3606,4275,3395, 972,8021,3607,8022,3067,8023, # 7734 +8024,4553,4554,8025,3727,4041,4042,8026, 153,4555, 356,8027,1891,2888,4276,2143, # 7750 + 408, 803,2352,8028,3854,8029,4277,1646,2570,2511,4556,4557,3855,8030,3856,4278, # 7766 +8031,2411,3396, 752,8032,8033,1961,2964,8034, 746,3012,2465,8035,4279,3728, 698, # 7782 +4558,1892,4280,3608,2543,4559,3609,3857,8036,3166,3397,8037,1823,1302,4043,2706, # 7798 +3858,1973,4281,8038,4282,3167, 823,1303,1288,1236,2848,3495,4044,3398, 774,3859, # 7814 +8039,1581,4560,1304,2849,3860,4561,8040,2435,2161,1083,3237,4283,4045,4284, 344, # 7830 +1173, 288,2311, 454,1683,8041,8042,1461,4562,4046,2589,8043,8044,4563, 985, 894, # 7846 +8045,3399,3168,8046,1913,2928,3729,1988,8047,2110,1974,8048,4047,8049,2571,1194, # 7862 + 425,8050,4564,3169,1245,3730,4285,8051,8052,2850,8053, 636,4565,1855,3861, 760, # 7878 +1799,8054,4286,2209,1508,4566,4048,1893,1684,2293,8055,8056,8057,4287,4288,2210, # 7894 + 479,8058,8059, 832,8060,4049,2489,8061,2965,2490,3731, 990,3109, 627,1814,2642, # 7910 +4289,1582,4290,2125,2111,3496,4567,8062, 799,4291,3170,8063,4568,2112,1737,3013, # 7926 +1018, 543, 754,4292,3309,1676,4569,4570,4050,8064,1489,8065,3497,8066,2614,2889, # 7942 +4051,8067,8068,2966,8069,8070,8071,8072,3171,4571,4572,2182,1722,8073,3238,3239, # 7958 +1842,3610,1715, 481, 365,1975,1856,8074,8075,1962,2491,4573,8076,2126,3611,3240, # 7974 + 433,1894,2063,2075,8077, 602,2741,8078,8079,8080,8081,8082,3014,1628,3400,8083, # 7990 +3172,4574,4052,2890,4575,2512,8084,2544,2772,8085,8086,8087,3310,4576,2891,8088, # 8006 +4577,8089,2851,4578,4579,1221,2967,4053,2513,8090,8091,8092,1867,1989,8093,8094, # 8022 +8095,1895,8096,8097,4580,1896,4054, 318,8098,2094,4055,4293,8099,8100, 485,8101, # 8038 + 938,3862, 553,2670, 116,8102,3863,3612,8103,3498,2671,2773,3401,3311,2807,8104, # 8054 +3613,2929,4056,1747,2930,2968,8105,8106, 207,8107,8108,2672,4581,2514,8109,3015, # 8070 + 890,3614,3864,8110,1877,3732,3402,8111,2183,2353,3403,1652,8112,8113,8114, 941, # 8086 +2294, 208,3499,4057,2019, 330,4294,3865,2892,2492,3733,4295,8115,8116,8117,8118, # 8102 +) + diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/euctwfreq.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/euctwfreq.pyc new file mode 100644 index 0000000..acf1eb0 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/euctwfreq.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/euctwprober.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/euctwprober.py new file mode 100644 index 0000000..35669cc --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/euctwprober.py @@ -0,0 +1,46 @@ +######################## BEGIN LICENSE BLOCK ######################## +# The Original Code is mozilla.org code. +# +# The Initial Developer of the Original Code is +# Netscape Communications Corporation. +# Portions created by the Initial Developer are Copyright (C) 1998 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Mark Pilgrim - port to Python +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + +from .mbcharsetprober import MultiByteCharSetProber +from .codingstatemachine import CodingStateMachine +from .chardistribution import EUCTWDistributionAnalysis +from .mbcssm import EUCTW_SM_MODEL + +class EUCTWProber(MultiByteCharSetProber): + def __init__(self): + super(EUCTWProber, self).__init__() + self.coding_sm = CodingStateMachine(EUCTW_SM_MODEL) + self.distribution_analyzer = EUCTWDistributionAnalysis() + self.reset() + + @property + def charset_name(self): + return "EUC-TW" + + @property + def language(self): + return "Taiwan" diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/euctwprober.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/euctwprober.pyc new file mode 100644 index 0000000..3853deb Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/euctwprober.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/gb2312freq.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/gb2312freq.py new file mode 100644 index 0000000..697837b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/gb2312freq.py @@ -0,0 +1,283 @@ +######################## BEGIN LICENSE BLOCK ######################## +# The Original Code is Mozilla Communicator client code. +# +# The Initial Developer of the Original Code is +# Netscape Communications Corporation. +# Portions created by the Initial Developer are Copyright (C) 1998 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Mark Pilgrim - port to Python +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + +# GB2312 most frequently used character table +# +# Char to FreqOrder table , from hz6763 + +# 512 --> 0.79 -- 0.79 +# 1024 --> 0.92 -- 0.13 +# 2048 --> 0.98 -- 0.06 +# 6768 --> 1.00 -- 0.02 +# +# Ideal Distribution Ratio = 0.79135/(1-0.79135) = 3.79 +# Random Distribution Ration = 512 / (3755 - 512) = 0.157 +# +# Typical Distribution Ratio about 25% of Ideal one, still much higher that RDR + +GB2312_TYPICAL_DISTRIBUTION_RATIO = 0.9 + +GB2312_TABLE_SIZE = 3760 + +GB2312_CHAR_TO_FREQ_ORDER = ( +1671, 749,1443,2364,3924,3807,2330,3921,1704,3463,2691,1511,1515, 572,3191,2205, +2361, 224,2558, 479,1711, 963,3162, 440,4060,1905,2966,2947,3580,2647,3961,3842, +2204, 869,4207, 970,2678,5626,2944,2956,1479,4048, 514,3595, 588,1346,2820,3409, + 249,4088,1746,1873,2047,1774, 581,1813, 358,1174,3590,1014,1561,4844,2245, 670, +1636,3112, 889,1286, 953, 556,2327,3060,1290,3141, 613, 185,3477,1367, 850,3820, +1715,2428,2642,2303,2732,3041,2562,2648,3566,3946,1349, 388,3098,2091,1360,3585, + 152,1687,1539, 738,1559, 59,1232,2925,2267,1388,1249,1741,1679,2960, 151,1566, +1125,1352,4271, 924,4296, 385,3166,4459, 310,1245,2850, 70,3285,2729,3534,3575, +2398,3298,3466,1960,2265, 217,3647, 864,1909,2084,4401,2773,1010,3269,5152, 853, +3051,3121,1244,4251,1895, 364,1499,1540,2313,1180,3655,2268, 562, 715,2417,3061, + 544, 336,3768,2380,1752,4075, 950, 280,2425,4382, 183,2759,3272, 333,4297,2155, +1688,2356,1444,1039,4540, 736,1177,3349,2443,2368,2144,2225, 565, 196,1482,3406, + 927,1335,4147, 692, 878,1311,1653,3911,3622,1378,4200,1840,2969,3149,2126,1816, +2534,1546,2393,2760, 737,2494, 13, 447, 245,2747, 38,2765,2129,2589,1079, 606, + 360, 471,3755,2890, 404, 848, 699,1785,1236, 370,2221,1023,3746,2074,2026,2023, +2388,1581,2119, 812,1141,3091,2536,1519, 804,2053, 406,1596,1090, 784, 548,4414, +1806,2264,2936,1100, 343,4114,5096, 622,3358, 743,3668,1510,1626,5020,3567,2513, +3195,4115,5627,2489,2991, 24,2065,2697,1087,2719, 48,1634, 315, 68, 985,2052, + 198,2239,1347,1107,1439, 597,2366,2172, 871,3307, 919,2487,2790,1867, 236,2570, +1413,3794, 906,3365,3381,1701,1982,1818,1524,2924,1205, 616,2586,2072,2004, 575, + 253,3099, 32,1365,1182, 197,1714,2454,1201, 554,3388,3224,2748, 756,2587, 250, +2567,1507,1517,3529,1922,2761,2337,3416,1961,1677,2452,2238,3153, 615, 911,1506, +1474,2495,1265,1906,2749,3756,3280,2161, 898,2714,1759,3450,2243,2444, 563, 26, +3286,2266,3769,3344,2707,3677, 611,1402, 531,1028,2871,4548,1375, 261,2948, 835, +1190,4134, 353, 840,2684,1900,3082,1435,2109,1207,1674, 329,1872,2781,4055,2686, +2104, 608,3318,2423,2957,2768,1108,3739,3512,3271,3985,2203,1771,3520,1418,2054, +1681,1153, 225,1627,2929, 162,2050,2511,3687,1954, 124,1859,2431,1684,3032,2894, + 585,4805,3969,2869,2704,2088,2032,2095,3656,2635,4362,2209, 256, 518,2042,2105, +3777,3657, 643,2298,1148,1779, 190, 989,3544, 414, 11,2135,2063,2979,1471, 403, +3678, 126, 770,1563, 671,2499,3216,2877, 600,1179, 307,2805,4937,1268,1297,2694, + 252,4032,1448,1494,1331,1394, 127,2256, 222,1647,1035,1481,3056,1915,1048, 873, +3651, 210, 33,1608,2516, 200,1520, 415, 102, 0,3389,1287, 817, 91,3299,2940, + 836,1814, 549,2197,1396,1669,2987,3582,2297,2848,4528,1070, 687, 20,1819, 121, +1552,1364,1461,1968,2617,3540,2824,2083, 177, 948,4938,2291, 110,4549,2066, 648, +3359,1755,2110,2114,4642,4845,1693,3937,3308,1257,1869,2123, 208,1804,3159,2992, +2531,2549,3361,2418,1350,2347,2800,2568,1291,2036,2680, 72, 842,1990, 212,1233, +1154,1586, 75,2027,3410,4900,1823,1337,2710,2676, 728,2810,1522,3026,4995, 157, + 755,1050,4022, 710, 785,1936,2194,2085,1406,2777,2400, 150,1250,4049,1206, 807, +1910, 534, 529,3309,1721,1660, 274, 39,2827, 661,2670,1578, 925,3248,3815,1094, +4278,4901,4252, 41,1150,3747,2572,2227,4501,3658,4902,3813,3357,3617,2884,2258, + 887, 538,4187,3199,1294,2439,3042,2329,2343,2497,1255, 107, 543,1527, 521,3478, +3568, 194,5062, 15, 961,3870,1241,1192,2664, 66,5215,3260,2111,1295,1127,2152, +3805,4135, 901,1164,1976, 398,1278, 530,1460, 748, 904,1054,1966,1426, 53,2909, + 509, 523,2279,1534, 536,1019, 239,1685, 460,2353, 673,1065,2401,3600,4298,2272, +1272,2363, 284,1753,3679,4064,1695, 81, 815,2677,2757,2731,1386, 859, 500,4221, +2190,2566, 757,1006,2519,2068,1166,1455, 337,2654,3203,1863,1682,1914,3025,1252, +1409,1366, 847, 714,2834,2038,3209, 964,2970,1901, 885,2553,1078,1756,3049, 301, +1572,3326, 688,2130,1996,2429,1805,1648,2930,3421,2750,3652,3088, 262,1158,1254, + 389,1641,1812, 526,1719, 923,2073,1073,1902, 468, 489,4625,1140, 857,2375,3070, +3319,2863, 380, 116,1328,2693,1161,2244, 273,1212,1884,2769,3011,1775,1142, 461, +3066,1200,2147,2212, 790, 702,2695,4222,1601,1058, 434,2338,5153,3640, 67,2360, +4099,2502, 618,3472,1329, 416,1132, 830,2782,1807,2653,3211,3510,1662, 192,2124, + 296,3979,1739,1611,3684, 23, 118, 324, 446,1239,1225, 293,2520,3814,3795,2535, +3116, 17,1074, 467,2692,2201, 387,2922, 45,1326,3055,1645,3659,2817, 958, 243, +1903,2320,1339,2825,1784,3289, 356, 576, 865,2315,2381,3377,3916,1088,3122,1713, +1655, 935, 628,4689,1034,1327, 441, 800, 720, 894,1979,2183,1528,5289,2702,1071, +4046,3572,2399,1571,3281, 79, 761,1103, 327, 134, 758,1899,1371,1615, 879, 442, + 215,2605,2579, 173,2048,2485,1057,2975,3317,1097,2253,3801,4263,1403,1650,2946, + 814,4968,3487,1548,2644,1567,1285, 2, 295,2636, 97, 946,3576, 832, 141,4257, +3273, 760,3821,3521,3156,2607, 949,1024,1733,1516,1803,1920,2125,2283,2665,3180, +1501,2064,3560,2171,1592, 803,3518,1416, 732,3897,4258,1363,1362,2458, 119,1427, + 602,1525,2608,1605,1639,3175, 694,3064, 10, 465, 76,2000,4846,4208, 444,3781, +1619,3353,2206,1273,3796, 740,2483, 320,1723,2377,3660,2619,1359,1137,1762,1724, +2345,2842,1850,1862, 912, 821,1866, 612,2625,1735,2573,3369,1093, 844, 89, 937, + 930,1424,3564,2413,2972,1004,3046,3019,2011, 711,3171,1452,4178, 428, 801,1943, + 432, 445,2811, 206,4136,1472, 730, 349, 73, 397,2802,2547, 998,1637,1167, 789, + 396,3217, 154,1218, 716,1120,1780,2819,4826,1931,3334,3762,2139,1215,2627, 552, +3664,3628,3232,1405,2383,3111,1356,2652,3577,3320,3101,1703, 640,1045,1370,1246, +4996, 371,1575,2436,1621,2210, 984,4033,1734,2638, 16,4529, 663,2755,3255,1451, +3917,2257,1253,1955,2234,1263,2951, 214,1229, 617, 485, 359,1831,1969, 473,2310, + 750,2058, 165, 80,2864,2419, 361,4344,2416,2479,1134, 796,3726,1266,2943, 860, +2715, 938, 390,2734,1313,1384, 248, 202, 877,1064,2854, 522,3907, 279,1602, 297, +2357, 395,3740, 137,2075, 944,4089,2584,1267,3802, 62,1533,2285, 178, 176, 780, +2440, 201,3707, 590, 478,1560,4354,2117,1075, 30, 74,4643,4004,1635,1441,2745, + 776,2596, 238,1077,1692,1912,2844, 605, 499,1742,3947, 241,3053, 980,1749, 936, +2640,4511,2582, 515,1543,2162,5322,2892,2993, 890,2148,1924, 665,1827,3581,1032, + 968,3163, 339,1044,1896, 270, 583,1791,1720,4367,1194,3488,3669, 43,2523,1657, + 163,2167, 290,1209,1622,3378, 550, 634,2508,2510, 695,2634,2384,2512,1476,1414, + 220,1469,2341,2138,2852,3183,2900,4939,2865,3502,1211,3680, 854,3227,1299,2976, +3172, 186,2998,1459, 443,1067,3251,1495, 321,1932,3054, 909, 753,1410,1828, 436, +2441,1119,1587,3164,2186,1258, 227, 231,1425,1890,3200,3942, 247, 959, 725,5254, +2741, 577,2158,2079, 929, 120, 174, 838,2813, 591,1115, 417,2024, 40,3240,1536, +1037, 291,4151,2354, 632,1298,2406,2500,3535,1825,1846,3451, 205,1171, 345,4238, + 18,1163, 811, 685,2208,1217, 425,1312,1508,1175,4308,2552,1033, 587,1381,3059, +2984,3482, 340,1316,4023,3972, 792,3176, 519, 777,4690, 918, 933,4130,2981,3741, + 90,3360,2911,2200,5184,4550, 609,3079,2030, 272,3379,2736, 363,3881,1130,1447, + 286, 779, 357,1169,3350,3137,1630,1220,2687,2391, 747,1277,3688,2618,2682,2601, +1156,3196,5290,4034,3102,1689,3596,3128, 874, 219,2783, 798, 508,1843,2461, 269, +1658,1776,1392,1913,2983,3287,2866,2159,2372, 829,4076, 46,4253,2873,1889,1894, + 915,1834,1631,2181,2318, 298, 664,2818,3555,2735, 954,3228,3117, 527,3511,2173, + 681,2712,3033,2247,2346,3467,1652, 155,2164,3382, 113,1994, 450, 899, 494, 994, +1237,2958,1875,2336,1926,3727, 545,1577,1550, 633,3473, 204,1305,3072,2410,1956, +2471, 707,2134, 841,2195,2196,2663,3843,1026,4940, 990,3252,4997, 368,1092, 437, +3212,3258,1933,1829, 675,2977,2893, 412, 943,3723,4644,3294,3283,2230,2373,5154, +2389,2241,2661,2323,1404,2524, 593, 787, 677,3008,1275,2059, 438,2709,2609,2240, +2269,2246,1446, 36,1568,1373,3892,1574,2301,1456,3962, 693,2276,5216,2035,1143, +2720,1919,1797,1811,2763,4137,2597,1830,1699,1488,1198,2090, 424,1694, 312,3634, +3390,4179,3335,2252,1214, 561,1059,3243,2295,2561, 975,5155,2321,2751,3772, 472, +1537,3282,3398,1047,2077,2348,2878,1323,3340,3076, 690,2906, 51, 369, 170,3541, +1060,2187,2688,3670,2541,1083,1683, 928,3918, 459, 109,4427, 599,3744,4286, 143, +2101,2730,2490, 82,1588,3036,2121, 281,1860, 477,4035,1238,2812,3020,2716,3312, +1530,2188,2055,1317, 843, 636,1808,1173,3495, 649, 181,1002, 147,3641,1159,2414, +3750,2289,2795, 813,3123,2610,1136,4368, 5,3391,4541,2174, 420, 429,1728, 754, +1228,2115,2219, 347,2223,2733, 735,1518,3003,2355,3134,1764,3948,3329,1888,2424, +1001,1234,1972,3321,3363,1672,1021,1450,1584, 226, 765, 655,2526,3404,3244,2302, +3665, 731, 594,2184, 319,1576, 621, 658,2656,4299,2099,3864,1279,2071,2598,2739, + 795,3086,3699,3908,1707,2352,2402,1382,3136,2475,1465,4847,3496,3865,1085,3004, +2591,1084, 213,2287,1963,3565,2250, 822, 793,4574,3187,1772,1789,3050, 595,1484, +1959,2770,1080,2650, 456, 422,2996, 940,3322,4328,4345,3092,2742, 965,2784, 739, +4124, 952,1358,2498,2949,2565, 332,2698,2378, 660,2260,2473,4194,3856,2919, 535, +1260,2651,1208,1428,1300,1949,1303,2942, 433,2455,2450,1251,1946, 614,1269, 641, +1306,1810,2737,3078,2912, 564,2365,1419,1415,1497,4460,2367,2185,1379,3005,1307, +3218,2175,1897,3063, 682,1157,4040,4005,1712,1160,1941,1399, 394, 402,2952,1573, +1151,2986,2404, 862, 299,2033,1489,3006, 346, 171,2886,3401,1726,2932, 168,2533, + 47,2507,1030,3735,1145,3370,1395,1318,1579,3609,4560,2857,4116,1457,2529,1965, + 504,1036,2690,2988,2405, 745,5871, 849,2397,2056,3081, 863,2359,3857,2096, 99, +1397,1769,2300,4428,1643,3455,1978,1757,3718,1440, 35,4879,3742,1296,4228,2280, + 160,5063,1599,2013, 166, 520,3479,1646,3345,3012, 490,1937,1545,1264,2182,2505, +1096,1188,1369,1436,2421,1667,2792,2460,1270,2122, 727,3167,2143, 806,1706,1012, +1800,3037, 960,2218,1882, 805, 139,2456,1139,1521, 851,1052,3093,3089, 342,2039, + 744,5097,1468,1502,1585,2087, 223, 939, 326,2140,2577, 892,2481,1623,4077, 982, +3708, 135,2131, 87,2503,3114,2326,1106, 876,1616, 547,2997,2831,2093,3441,4530, +4314, 9,3256,4229,4148, 659,1462,1986,1710,2046,2913,2231,4090,4880,5255,3392, +3274,1368,3689,4645,1477, 705,3384,3635,1068,1529,2941,1458,3782,1509, 100,1656, +2548, 718,2339, 408,1590,2780,3548,1838,4117,3719,1345,3530, 717,3442,2778,3220, +2898,1892,4590,3614,3371,2043,1998,1224,3483, 891, 635, 584,2559,3355, 733,1766, +1729,1172,3789,1891,2307, 781,2982,2271,1957,1580,5773,2633,2005,4195,3097,1535, +3213,1189,1934,5693,3262, 586,3118,1324,1598, 517,1564,2217,1868,1893,4445,3728, +2703,3139,1526,1787,1992,3882,2875,1549,1199,1056,2224,1904,2711,5098,4287, 338, +1993,3129,3489,2689,1809,2815,1997, 957,1855,3898,2550,3275,3057,1105,1319, 627, +1505,1911,1883,3526, 698,3629,3456,1833,1431, 746, 77,1261,2017,2296,1977,1885, + 125,1334,1600, 525,1798,1109,2222,1470,1945, 559,2236,1186,3443,2476,1929,1411, +2411,3135,1777,3372,2621,1841,1613,3229, 668,1430,1839,2643,2916, 195,1989,2671, +2358,1387, 629,3205,2293,5256,4439, 123,1310, 888,1879,4300,3021,3605,1003,1162, +3192,2910,2010, 140,2395,2859, 55,1082,2012,2901, 662, 419,2081,1438, 680,2774, +4654,3912,1620,1731,1625,5035,4065,2328, 512,1344, 802,5443,2163,2311,2537, 524, +3399, 98,1155,2103,1918,2606,3925,2816,1393,2465,1504,3773,2177,3963,1478,4346, + 180,1113,4655,3461,2028,1698, 833,2696,1235,1322,1594,4408,3623,3013,3225,2040, +3022, 541,2881, 607,3632,2029,1665,1219, 639,1385,1686,1099,2803,3231,1938,3188, +2858, 427, 676,2772,1168,2025, 454,3253,2486,3556, 230,1950, 580, 791,1991,1280, +1086,1974,2034, 630, 257,3338,2788,4903,1017, 86,4790, 966,2789,1995,1696,1131, + 259,3095,4188,1308, 179,1463,5257, 289,4107,1248, 42,3413,1725,2288, 896,1947, + 774,4474,4254, 604,3430,4264, 392,2514,2588, 452, 237,1408,3018, 988,4531,1970, +3034,3310, 540,2370,1562,1288,2990, 502,4765,1147, 4,1853,2708, 207, 294,2814, +4078,2902,2509, 684, 34,3105,3532,2551, 644, 709,2801,2344, 573,1727,3573,3557, +2021,1081,3100,4315,2100,3681, 199,2263,1837,2385, 146,3484,1195,2776,3949, 997, +1939,3973,1008,1091,1202,1962,1847,1149,4209,5444,1076, 493, 117,5400,2521, 972, +1490,2934,1796,4542,2374,1512,2933,2657, 413,2888,1135,2762,2314,2156,1355,2369, + 766,2007,2527,2170,3124,2491,2593,2632,4757,2437, 234,3125,3591,1898,1750,1376, +1942,3468,3138, 570,2127,2145,3276,4131, 962, 132,1445,4196, 19, 941,3624,3480, +3366,1973,1374,4461,3431,2629, 283,2415,2275, 808,2887,3620,2112,2563,1353,3610, + 955,1089,3103,1053, 96, 88,4097, 823,3808,1583, 399, 292,4091,3313, 421,1128, + 642,4006, 903,2539,1877,2082, 596, 29,4066,1790, 722,2157, 130, 995,1569, 769, +1485, 464, 513,2213, 288,1923,1101,2453,4316, 133, 486,2445, 50, 625, 487,2207, + 57, 423, 481,2962, 159,3729,1558, 491, 303, 482, 501, 240,2837, 112,3648,2392, +1783, 362, 8,3433,3422, 610,2793,3277,1390,1284,1654, 21,3823, 734, 367, 623, + 193, 287, 374,1009,1483, 816, 476, 313,2255,2340,1262,2150,2899,1146,2581, 782, +2116,1659,2018,1880, 255,3586,3314,1110,2867,2137,2564, 986,2767,5185,2006, 650, + 158, 926, 762, 881,3157,2717,2362,3587, 306,3690,3245,1542,3077,2427,1691,2478, +2118,2985,3490,2438, 539,2305, 983, 129,1754, 355,4201,2386, 827,2923, 104,1773, +2838,2771, 411,2905,3919, 376, 767, 122,1114, 828,2422,1817,3506, 266,3460,1007, +1609,4998, 945,2612,4429,2274, 726,1247,1964,2914,2199,2070,4002,4108, 657,3323, +1422, 579, 455,2764,4737,1222,2895,1670, 824,1223,1487,2525, 558, 861,3080, 598, +2659,2515,1967, 752,2583,2376,2214,4180, 977, 704,2464,4999,2622,4109,1210,2961, + 819,1541, 142,2284, 44, 418, 457,1126,3730,4347,4626,1644,1876,3671,1864, 302, +1063,5694, 624, 723,1984,3745,1314,1676,2488,1610,1449,3558,3569,2166,2098, 409, +1011,2325,3704,2306, 818,1732,1383,1824,1844,3757, 999,2705,3497,1216,1423,2683, +2426,2954,2501,2726,2229,1475,2554,5064,1971,1794,1666,2014,1343, 783, 724, 191, +2434,1354,2220,5065,1763,2752,2472,4152, 131, 175,2885,3434, 92,1466,4920,2616, +3871,3872,3866, 128,1551,1632, 669,1854,3682,4691,4125,1230, 188,2973,3290,1302, +1213, 560,3266, 917, 763,3909,3249,1760, 868,1958, 764,1782,2097, 145,2277,3774, +4462, 64,1491,3062, 971,2132,3606,2442, 221,1226,1617, 218, 323,1185,3207,3147, + 571, 619,1473,1005,1744,2281, 449,1887,2396,3685, 275, 375,3816,1743,3844,3731, + 845,1983,2350,4210,1377, 773, 967,3499,3052,3743,2725,4007,1697,1022,3943,1464, +3264,2855,2722,1952,1029,2839,2467, 84,4383,2215, 820,1391,2015,2448,3672, 377, +1948,2168, 797,2545,3536,2578,2645, 94,2874,1678, 405,1259,3071, 771, 546,1315, + 470,1243,3083, 895,2468, 981, 969,2037, 846,4181, 653,1276,2928, 14,2594, 557, +3007,2474, 156, 902,1338,1740,2574, 537,2518, 973,2282,2216,2433,1928, 138,2903, +1293,2631,1612, 646,3457, 839,2935, 111, 496,2191,2847, 589,3186, 149,3994,2060, +4031,2641,4067,3145,1870, 37,3597,2136,1025,2051,3009,3383,3549,1121,1016,3261, +1301, 251,2446,2599,2153, 872,3246, 637, 334,3705, 831, 884, 921,3065,3140,4092, +2198,1944, 246,2964, 108,2045,1152,1921,2308,1031, 203,3173,4170,1907,3890, 810, +1401,2003,1690, 506, 647,1242,2828,1761,1649,3208,2249,1589,3709,2931,5156,1708, + 498, 666,2613, 834,3817,1231, 184,2851,1124, 883,3197,2261,3710,1765,1553,2658, +1178,2639,2351, 93,1193, 942,2538,2141,4402, 235,1821, 870,1591,2192,1709,1871, +3341,1618,4126,2595,2334, 603, 651, 69, 701, 268,2662,3411,2555,1380,1606, 503, + 448, 254,2371,2646, 574,1187,2309,1770, 322,2235,1292,1801, 305, 566,1133, 229, +2067,2057, 706, 167, 483,2002,2672,3295,1820,3561,3067, 316, 378,2746,3452,1112, + 136,1981, 507,1651,2917,1117, 285,4591, 182,2580,3522,1304, 335,3303,1835,2504, +1795,1792,2248, 674,1018,2106,2449,1857,2292,2845, 976,3047,1781,2600,2727,1389, +1281, 52,3152, 153, 265,3950, 672,3485,3951,4463, 430,1183, 365, 278,2169, 27, +1407,1336,2304, 209,1340,1730,2202,1852,2403,2883, 979,1737,1062, 631,2829,2542, +3876,2592, 825,2086,2226,3048,3625, 352,1417,3724, 542, 991, 431,1351,3938,1861, +2294, 826,1361,2927,3142,3503,1738, 463,2462,2723, 582,1916,1595,2808, 400,3845, +3891,2868,3621,2254, 58,2492,1123, 910,2160,2614,1372,1603,1196,1072,3385,1700, +3267,1980, 696, 480,2430, 920, 799,1570,2920,1951,2041,4047,2540,1321,4223,2469, +3562,2228,1271,2602, 401,2833,3351,2575,5157, 907,2312,1256, 410, 263,3507,1582, + 996, 678,1849,2316,1480, 908,3545,2237, 703,2322, 667,1826,2849,1531,2604,2999, +2407,3146,2151,2630,1786,3711, 469,3542, 497,3899,2409, 858, 837,4446,3393,1274, + 786, 620,1845,2001,3311, 484, 308,3367,1204,1815,3691,2332,1532,2557,1842,2020, +2724,1927,2333,4440, 567, 22,1673,2728,4475,1987,1858,1144,1597, 101,1832,3601, + 12, 974,3783,4391, 951,1412, 1,3720, 453,4608,4041, 528,1041,1027,3230,2628, +1129, 875,1051,3291,1203,2262,1069,2860,2799,2149,2615,3278, 144,1758,3040, 31, + 475,1680, 366,2685,3184, 311,1642,4008,2466,5036,1593,1493,2809, 216,1420,1668, + 233, 304,2128,3284, 232,1429,1768,1040,2008,3407,2740,2967,2543, 242,2133, 778, +1565,2022,2620, 505,2189,2756,1098,2273, 372,1614, 708, 553,2846,2094,2278, 169, +3626,2835,4161, 228,2674,3165, 809,1454,1309, 466,1705,1095, 900,3423, 880,2667, +3751,5258,2317,3109,2571,4317,2766,1503,1342, 866,4447,1118, 63,2076, 314,1881, +1348,1061, 172, 978,3515,1747, 532, 511,3970, 6, 601, 905,2699,3300,1751, 276, +1467,3725,2668, 65,4239,2544,2779,2556,1604, 578,2451,1802, 992,2331,2624,1320, +3446, 713,1513,1013, 103,2786,2447,1661, 886,1702, 916, 654,3574,2031,1556, 751, +2178,2821,2179,1498,1538,2176, 271, 914,2251,2080,1325, 638,1953,2937,3877,2432, +2754, 95,3265,1716, 260,1227,4083, 775, 106,1357,3254, 426,1607, 555,2480, 772, +1985, 244,2546, 474, 495,1046,2611,1851,2061, 71,2089,1675,2590, 742,3758,2843, +3222,1433, 267,2180,2576,2826,2233,2092,3913,2435, 956,1745,3075, 856,2113,1116, + 451, 3,1988,2896,1398, 993,2463,1878,2049,1341,2718,2721,2870,2108, 712,2904, +4363,2753,2324, 277,2872,2349,2649, 384, 987, 435, 691,3000, 922, 164,3939, 652, +1500,1184,4153,2482,3373,2165,4848,2335,3775,3508,3154,2806,2830,1554,2102,1664, +2530,1434,2408, 893,1547,2623,3447,2832,2242,2532,3169,2856,3223,2078, 49,3770, +3469, 462, 318, 656,2259,3250,3069, 679,1629,2758, 344,1138,1104,3120,1836,1283, +3115,2154,1437,4448, 934, 759,1999, 794,2862,1038, 533,2560,1722,2342, 855,2626, +1197,1663,4476,3127, 85,4240,2528, 25,1111,1181,3673, 407,3470,4561,2679,2713, + 768,1925,2841,3986,1544,1165, 932, 373,1240,2146,1930,2673, 721,4766, 354,4333, + 391,2963, 187, 61,3364,1442,1102, 330,1940,1767, 341,3809,4118, 393,2496,2062, +2211, 105, 331, 300, 439, 913,1332, 626, 379,3304,1557, 328, 689,3952, 309,1555, + 931, 317,2517,3027, 325, 569, 686,2107,3084, 60,1042,1333,2794, 264,3177,4014, +1628, 258,3712, 7,4464,1176,1043,1778, 683, 114,1975, 78,1492, 383,1886, 510, + 386, 645,5291,2891,2069,3305,4138,3867,2939,2603,2493,1935,1066,1848,3588,1015, +1282,1289,4609, 697,1453,3044,2666,3611,1856,2412, 54, 719,1330, 568,3778,2459, +1748, 788, 492, 551,1191,1000, 488,3394,3763, 282,1799, 348,2016,1523,3155,2390, +1049, 382,2019,1788,1170, 729,2968,3523, 897,3926,2785,2938,3292, 350,2319,3238, +1718,1717,2655,3453,3143,4465, 161,2889,2980,2009,1421, 56,1908,1640,2387,2232, +1917,1874,2477,4921, 148, 83,3438, 592,4245,2882,1822,1055, 741, 115,1496,1624, + 381,1638,4592,1020, 516,3214, 458, 947,4575,1432, 211,1514,2926,1865,2142, 189, + 852,1221,1400,1486, 882,2299,4036, 351, 28,1122, 700,6479,6480,6481,6482,6483, #last 512 +) + diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/gb2312freq.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/gb2312freq.pyc new file mode 100644 index 0000000..e004bef Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/gb2312freq.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/gb2312prober.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/gb2312prober.py new file mode 100644 index 0000000..8446d2d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/gb2312prober.py @@ -0,0 +1,46 @@ +######################## BEGIN LICENSE BLOCK ######################## +# The Original Code is mozilla.org code. +# +# The Initial Developer of the Original Code is +# Netscape Communications Corporation. +# Portions created by the Initial Developer are Copyright (C) 1998 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Mark Pilgrim - port to Python +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + +from .mbcharsetprober import MultiByteCharSetProber +from .codingstatemachine import CodingStateMachine +from .chardistribution import GB2312DistributionAnalysis +from .mbcssm import GB2312_SM_MODEL + +class GB2312Prober(MultiByteCharSetProber): + def __init__(self): + super(GB2312Prober, self).__init__() + self.coding_sm = CodingStateMachine(GB2312_SM_MODEL) + self.distribution_analyzer = GB2312DistributionAnalysis() + self.reset() + + @property + def charset_name(self): + return "GB2312" + + @property + def language(self): + return "Chinese" diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/gb2312prober.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/gb2312prober.pyc new file mode 100644 index 0000000..31ca979 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/gb2312prober.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/hebrewprober.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/hebrewprober.py new file mode 100644 index 0000000..b0e1bf4 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/hebrewprober.py @@ -0,0 +1,292 @@ +######################## BEGIN LICENSE BLOCK ######################## +# The Original Code is Mozilla Universal charset detector code. +# +# The Initial Developer of the Original Code is +# Shy Shalom +# Portions created by the Initial Developer are Copyright (C) 2005 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Mark Pilgrim - port to Python +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + +from .charsetprober import CharSetProber +from .enums import ProbingState + +# This prober doesn't actually recognize a language or a charset. +# It is a helper prober for the use of the Hebrew model probers + +### General ideas of the Hebrew charset recognition ### +# +# Four main charsets exist in Hebrew: +# "ISO-8859-8" - Visual Hebrew +# "windows-1255" - Logical Hebrew +# "ISO-8859-8-I" - Logical Hebrew +# "x-mac-hebrew" - ?? Logical Hebrew ?? +# +# Both "ISO" charsets use a completely identical set of code points, whereas +# "windows-1255" and "x-mac-hebrew" are two different proper supersets of +# these code points. windows-1255 defines additional characters in the range +# 0x80-0x9F as some misc punctuation marks as well as some Hebrew-specific +# diacritics and additional 'Yiddish' ligature letters in the range 0xc0-0xd6. +# x-mac-hebrew defines similar additional code points but with a different +# mapping. +# +# As far as an average Hebrew text with no diacritics is concerned, all four +# charsets are identical with respect to code points. Meaning that for the +# main Hebrew alphabet, all four map the same values to all 27 Hebrew letters +# (including final letters). +# +# The dominant difference between these charsets is their directionality. +# "Visual" directionality means that the text is ordered as if the renderer is +# not aware of a BIDI rendering algorithm. The renderer sees the text and +# draws it from left to right. The text itself when ordered naturally is read +# backwards. A buffer of Visual Hebrew generally looks like so: +# "[last word of first line spelled backwards] [whole line ordered backwards +# and spelled backwards] [first word of first line spelled backwards] +# [end of line] [last word of second line] ... etc' " +# adding punctuation marks, numbers and English text to visual text is +# naturally also "visual" and from left to right. +# +# "Logical" directionality means the text is ordered "naturally" according to +# the order it is read. It is the responsibility of the renderer to display +# the text from right to left. A BIDI algorithm is used to place general +# punctuation marks, numbers and English text in the text. +# +# Texts in x-mac-hebrew are almost impossible to find on the Internet. From +# what little evidence I could find, it seems that its general directionality +# is Logical. +# +# To sum up all of the above, the Hebrew probing mechanism knows about two +# charsets: +# Visual Hebrew - "ISO-8859-8" - backwards text - Words and sentences are +# backwards while line order is natural. For charset recognition purposes +# the line order is unimportant (In fact, for this implementation, even +# word order is unimportant). +# Logical Hebrew - "windows-1255" - normal, naturally ordered text. +# +# "ISO-8859-8-I" is a subset of windows-1255 and doesn't need to be +# specifically identified. +# "x-mac-hebrew" is also identified as windows-1255. A text in x-mac-hebrew +# that contain special punctuation marks or diacritics is displayed with +# some unconverted characters showing as question marks. This problem might +# be corrected using another model prober for x-mac-hebrew. Due to the fact +# that x-mac-hebrew texts are so rare, writing another model prober isn't +# worth the effort and performance hit. +# +#### The Prober #### +# +# The prober is divided between two SBCharSetProbers and a HebrewProber, +# all of which are managed, created, fed data, inquired and deleted by the +# SBCSGroupProber. The two SBCharSetProbers identify that the text is in +# fact some kind of Hebrew, Logical or Visual. The final decision about which +# one is it is made by the HebrewProber by combining final-letter scores +# with the scores of the two SBCharSetProbers to produce a final answer. +# +# The SBCSGroupProber is responsible for stripping the original text of HTML +# tags, English characters, numbers, low-ASCII punctuation characters, spaces +# and new lines. It reduces any sequence of such characters to a single space. +# The buffer fed to each prober in the SBCS group prober is pure text in +# high-ASCII. +# The two SBCharSetProbers (model probers) share the same language model: +# Win1255Model. +# The first SBCharSetProber uses the model normally as any other +# SBCharSetProber does, to recognize windows-1255, upon which this model was +# built. The second SBCharSetProber is told to make the pair-of-letter +# lookup in the language model backwards. This in practice exactly simulates +# a visual Hebrew model using the windows-1255 logical Hebrew model. +# +# The HebrewProber is not using any language model. All it does is look for +# final-letter evidence suggesting the text is either logical Hebrew or visual +# Hebrew. Disjointed from the model probers, the results of the HebrewProber +# alone are meaningless. HebrewProber always returns 0.00 as confidence +# since it never identifies a charset by itself. Instead, the pointer to the +# HebrewProber is passed to the model probers as a helper "Name Prober". +# When the Group prober receives a positive identification from any prober, +# it asks for the name of the charset identified. If the prober queried is a +# Hebrew model prober, the model prober forwards the call to the +# HebrewProber to make the final decision. In the HebrewProber, the +# decision is made according to the final-letters scores maintained and Both +# model probers scores. The answer is returned in the form of the name of the +# charset identified, either "windows-1255" or "ISO-8859-8". + +class HebrewProber(CharSetProber): + # windows-1255 / ISO-8859-8 code points of interest + FINAL_KAF = 0xea + NORMAL_KAF = 0xeb + FINAL_MEM = 0xed + NORMAL_MEM = 0xee + FINAL_NUN = 0xef + NORMAL_NUN = 0xf0 + FINAL_PE = 0xf3 + NORMAL_PE = 0xf4 + FINAL_TSADI = 0xf5 + NORMAL_TSADI = 0xf6 + + # Minimum Visual vs Logical final letter score difference. + # If the difference is below this, don't rely solely on the final letter score + # distance. + MIN_FINAL_CHAR_DISTANCE = 5 + + # Minimum Visual vs Logical model score difference. + # If the difference is below this, don't rely at all on the model score + # distance. + MIN_MODEL_DISTANCE = 0.01 + + VISUAL_HEBREW_NAME = "ISO-8859-8" + LOGICAL_HEBREW_NAME = "windows-1255" + + def __init__(self): + super(HebrewProber, self).__init__() + self._final_char_logical_score = None + self._final_char_visual_score = None + self._prev = None + self._before_prev = None + self._logical_prober = None + self._visual_prober = None + self.reset() + + def reset(self): + self._final_char_logical_score = 0 + self._final_char_visual_score = 0 + # The two last characters seen in the previous buffer, + # mPrev and mBeforePrev are initialized to space in order to simulate + # a word delimiter at the beginning of the data + self._prev = ' ' + self._before_prev = ' ' + # These probers are owned by the group prober. + + def set_model_probers(self, logicalProber, visualProber): + self._logical_prober = logicalProber + self._visual_prober = visualProber + + def is_final(self, c): + return c in [self.FINAL_KAF, self.FINAL_MEM, self.FINAL_NUN, + self.FINAL_PE, self.FINAL_TSADI] + + def is_non_final(self, c): + # The normal Tsadi is not a good Non-Final letter due to words like + # 'lechotet' (to chat) containing an apostrophe after the tsadi. This + # apostrophe is converted to a space in FilterWithoutEnglishLetters + # causing the Non-Final tsadi to appear at an end of a word even + # though this is not the case in the original text. + # The letters Pe and Kaf rarely display a related behavior of not being + # a good Non-Final letter. Words like 'Pop', 'Winamp' and 'Mubarak' + # for example legally end with a Non-Final Pe or Kaf. However, the + # benefit of these letters as Non-Final letters outweighs the damage + # since these words are quite rare. + return c in [self.NORMAL_KAF, self.NORMAL_MEM, + self.NORMAL_NUN, self.NORMAL_PE] + + def feed(self, byte_str): + # Final letter analysis for logical-visual decision. + # Look for evidence that the received buffer is either logical Hebrew + # or visual Hebrew. + # The following cases are checked: + # 1) A word longer than 1 letter, ending with a final letter. This is + # an indication that the text is laid out "naturally" since the + # final letter really appears at the end. +1 for logical score. + # 2) A word longer than 1 letter, ending with a Non-Final letter. In + # normal Hebrew, words ending with Kaf, Mem, Nun, Pe or Tsadi, + # should not end with the Non-Final form of that letter. Exceptions + # to this rule are mentioned above in isNonFinal(). This is an + # indication that the text is laid out backwards. +1 for visual + # score + # 3) A word longer than 1 letter, starting with a final letter. Final + # letters should not appear at the beginning of a word. This is an + # indication that the text is laid out backwards. +1 for visual + # score. + # + # The visual score and logical score are accumulated throughout the + # text and are finally checked against each other in GetCharSetName(). + # No checking for final letters in the middle of words is done since + # that case is not an indication for either Logical or Visual text. + # + # We automatically filter out all 7-bit characters (replace them with + # spaces) so the word boundary detection works properly. [MAP] + + if self.state == ProbingState.NOT_ME: + # Both model probers say it's not them. No reason to continue. + return ProbingState.NOT_ME + + byte_str = self.filter_high_byte_only(byte_str) + + for cur in byte_str: + if cur == ' ': + # We stand on a space - a word just ended + if self._before_prev != ' ': + # next-to-last char was not a space so self._prev is not a + # 1 letter word + if self.is_final(self._prev): + # case (1) [-2:not space][-1:final letter][cur:space] + self._final_char_logical_score += 1 + elif self.is_non_final(self._prev): + # case (2) [-2:not space][-1:Non-Final letter][ + # cur:space] + self._final_char_visual_score += 1 + else: + # Not standing on a space + if ((self._before_prev == ' ') and + (self.is_final(self._prev)) and (cur != ' ')): + # case (3) [-2:space][-1:final letter][cur:not space] + self._final_char_visual_score += 1 + self._before_prev = self._prev + self._prev = cur + + # Forever detecting, till the end or until both model probers return + # ProbingState.NOT_ME (handled above) + return ProbingState.DETECTING + + @property + def charset_name(self): + # Make the decision: is it Logical or Visual? + # If the final letter score distance is dominant enough, rely on it. + finalsub = self._final_char_logical_score - self._final_char_visual_score + if finalsub >= self.MIN_FINAL_CHAR_DISTANCE: + return self.LOGICAL_HEBREW_NAME + if finalsub <= -self.MIN_FINAL_CHAR_DISTANCE: + return self.VISUAL_HEBREW_NAME + + # It's not dominant enough, try to rely on the model scores instead. + modelsub = (self._logical_prober.get_confidence() + - self._visual_prober.get_confidence()) + if modelsub > self.MIN_MODEL_DISTANCE: + return self.LOGICAL_HEBREW_NAME + if modelsub < -self.MIN_MODEL_DISTANCE: + return self.VISUAL_HEBREW_NAME + + # Still no good, back to final letter distance, maybe it'll save the + # day. + if finalsub < 0.0: + return self.VISUAL_HEBREW_NAME + + # (finalsub > 0 - Logical) or (don't know what to do) default to + # Logical. + return self.LOGICAL_HEBREW_NAME + + @property + def language(self): + return 'Hebrew' + + @property + def state(self): + # Remain active as long as any of the model probers are active. + if (self._logical_prober.state == ProbingState.NOT_ME) and \ + (self._visual_prober.state == ProbingState.NOT_ME): + return ProbingState.NOT_ME + return ProbingState.DETECTING diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/hebrewprober.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/hebrewprober.pyc new file mode 100644 index 0000000..f2e949d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/hebrewprober.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/jisfreq.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/jisfreq.py new file mode 100644 index 0000000..83fc082 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/jisfreq.py @@ -0,0 +1,325 @@ +######################## BEGIN LICENSE BLOCK ######################## +# The Original Code is Mozilla Communicator client code. +# +# The Initial Developer of the Original Code is +# Netscape Communications Corporation. +# Portions created by the Initial Developer are Copyright (C) 1998 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Mark Pilgrim - port to Python +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + +# Sampling from about 20M text materials include literature and computer technology +# +# Japanese frequency table, applied to both S-JIS and EUC-JP +# They are sorted in order. + +# 128 --> 0.77094 +# 256 --> 0.85710 +# 512 --> 0.92635 +# 1024 --> 0.97130 +# 2048 --> 0.99431 +# +# Ideal Distribution Ratio = 0.92635 / (1-0.92635) = 12.58 +# Random Distribution Ration = 512 / (2965+62+83+86-512) = 0.191 +# +# Typical Distribution Ratio, 25% of IDR + +JIS_TYPICAL_DISTRIBUTION_RATIO = 3.0 + +# Char to FreqOrder table , +JIS_TABLE_SIZE = 4368 + +JIS_CHAR_TO_FREQ_ORDER = ( + 40, 1, 6, 182, 152, 180, 295,2127, 285, 381,3295,4304,3068,4606,3165,3510, # 16 +3511,1822,2785,4607,1193,2226,5070,4608, 171,2996,1247, 18, 179,5071, 856,1661, # 32 +1262,5072, 619, 127,3431,3512,3230,1899,1700, 232, 228,1294,1298, 284, 283,2041, # 48 +2042,1061,1062, 48, 49, 44, 45, 433, 434,1040,1041, 996, 787,2997,1255,4305, # 64 +2108,4609,1684,1648,5073,5074,5075,5076,5077,5078,3687,5079,4610,5080,3927,3928, # 80 +5081,3296,3432, 290,2285,1471,2187,5082,2580,2825,1303,2140,1739,1445,2691,3375, # 96 +1691,3297,4306,4307,4611, 452,3376,1182,2713,3688,3069,4308,5083,5084,5085,5086, # 112 +5087,5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102, # 128 +5103,5104,5105,5106,5107,5108,5109,5110,5111,5112,4097,5113,5114,5115,5116,5117, # 144 +5118,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,5130,5131,5132,5133, # 160 +5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,5149, # 176 +5150,5151,5152,4612,5153,5154,5155,5156,5157,5158,5159,5160,5161,5162,5163,5164, # 192 +5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,1472, 598, 618, 820,1205, # 208 +1309,1412,1858,1307,1692,5176,5177,5178,5179,5180,5181,5182,1142,1452,1234,1172, # 224 +1875,2043,2149,1793,1382,2973, 925,2404,1067,1241, 960,1377,2935,1491, 919,1217, # 240 +1865,2030,1406,1499,2749,4098,5183,5184,5185,5186,5187,5188,2561,4099,3117,1804, # 256 +2049,3689,4309,3513,1663,5189,3166,3118,3298,1587,1561,3433,5190,3119,1625,2998, # 272 +3299,4613,1766,3690,2786,4614,5191,5192,5193,5194,2161, 26,3377, 2,3929, 20, # 288 +3691, 47,4100, 50, 17, 16, 35, 268, 27, 243, 42, 155, 24, 154, 29, 184, # 304 + 4, 91, 14, 92, 53, 396, 33, 289, 9, 37, 64, 620, 21, 39, 321, 5, # 320 + 12, 11, 52, 13, 3, 208, 138, 0, 7, 60, 526, 141, 151,1069, 181, 275, # 336 +1591, 83, 132,1475, 126, 331, 829, 15, 69, 160, 59, 22, 157, 55,1079, 312, # 352 + 109, 38, 23, 25, 10, 19, 79,5195, 61, 382,1124, 8, 30,5196,5197,5198, # 368 +5199,5200,5201,5202,5203,5204,5205,5206, 89, 62, 74, 34,2416, 112, 139, 196, # 384 + 271, 149, 84, 607, 131, 765, 46, 88, 153, 683, 76, 874, 101, 258, 57, 80, # 400 + 32, 364, 121,1508, 169,1547, 68, 235, 145,2999, 41, 360,3027, 70, 63, 31, # 416 + 43, 259, 262,1383, 99, 533, 194, 66, 93, 846, 217, 192, 56, 106, 58, 565, # 432 + 280, 272, 311, 256, 146, 82, 308, 71, 100, 128, 214, 655, 110, 261, 104,1140, # 448 + 54, 51, 36, 87, 67,3070, 185,2618,2936,2020, 28,1066,2390,2059,5207,5208, # 464 +5209,5210,5211,5212,5213,5214,5215,5216,4615,5217,5218,5219,5220,5221,5222,5223, # 480 +5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,5235,5236,3514,5237,5238, # 496 +5239,5240,5241,5242,5243,5244,2297,2031,4616,4310,3692,5245,3071,5246,3598,5247, # 512 +4617,3231,3515,5248,4101,4311,4618,3808,4312,4102,5249,4103,4104,3599,5250,5251, # 528 +5252,5253,5254,5255,5256,5257,5258,5259,5260,5261,5262,5263,5264,5265,5266,5267, # 544 +5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,5279,5280,5281,5282,5283, # 560 +5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,5294,5295,5296,5297,5298,5299, # 576 +5300,5301,5302,5303,5304,5305,5306,5307,5308,5309,5310,5311,5312,5313,5314,5315, # 592 +5316,5317,5318,5319,5320,5321,5322,5323,5324,5325,5326,5327,5328,5329,5330,5331, # 608 +5332,5333,5334,5335,5336,5337,5338,5339,5340,5341,5342,5343,5344,5345,5346,5347, # 624 +5348,5349,5350,5351,5352,5353,5354,5355,5356,5357,5358,5359,5360,5361,5362,5363, # 640 +5364,5365,5366,5367,5368,5369,5370,5371,5372,5373,5374,5375,5376,5377,5378,5379, # 656 +5380,5381, 363, 642,2787,2878,2788,2789,2316,3232,2317,3434,2011, 165,1942,3930, # 672 +3931,3932,3933,5382,4619,5383,4620,5384,5385,5386,5387,5388,5389,5390,5391,5392, # 688 +5393,5394,5395,5396,5397,5398,5399,5400,5401,5402,5403,5404,5405,5406,5407,5408, # 704 +5409,5410,5411,5412,5413,5414,5415,5416,5417,5418,5419,5420,5421,5422,5423,5424, # 720 +5425,5426,5427,5428,5429,5430,5431,5432,5433,5434,5435,5436,5437,5438,5439,5440, # 736 +5441,5442,5443,5444,5445,5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456, # 752 +5457,5458,5459,5460,5461,5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472, # 768 +5473,5474,5475,5476,5477,5478,5479,5480,5481,5482,5483,5484,5485,5486,5487,5488, # 784 +5489,5490,5491,5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504, # 800 +5505,5506,5507,5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520, # 816 +5521,5522,5523,5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536, # 832 +5537,5538,5539,5540,5541,5542,5543,5544,5545,5546,5547,5548,5549,5550,5551,5552, # 848 +5553,5554,5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568, # 864 +5569,5570,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584, # 880 +5585,5586,5587,5588,5589,5590,5591,5592,5593,5594,5595,5596,5597,5598,5599,5600, # 896 +5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,5615,5616, # 912 +5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,5632, # 928 +5633,5634,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,5647,5648, # 944 +5649,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,5661,5662,5663,5664, # 960 +5665,5666,5667,5668,5669,5670,5671,5672,5673,5674,5675,5676,5677,5678,5679,5680, # 976 +5681,5682,5683,5684,5685,5686,5687,5688,5689,5690,5691,5692,5693,5694,5695,5696, # 992 +5697,5698,5699,5700,5701,5702,5703,5704,5705,5706,5707,5708,5709,5710,5711,5712, # 1008 +5713,5714,5715,5716,5717,5718,5719,5720,5721,5722,5723,5724,5725,5726,5727,5728, # 1024 +5729,5730,5731,5732,5733,5734,5735,5736,5737,5738,5739,5740,5741,5742,5743,5744, # 1040 +5745,5746,5747,5748,5749,5750,5751,5752,5753,5754,5755,5756,5757,5758,5759,5760, # 1056 +5761,5762,5763,5764,5765,5766,5767,5768,5769,5770,5771,5772,5773,5774,5775,5776, # 1072 +5777,5778,5779,5780,5781,5782,5783,5784,5785,5786,5787,5788,5789,5790,5791,5792, # 1088 +5793,5794,5795,5796,5797,5798,5799,5800,5801,5802,5803,5804,5805,5806,5807,5808, # 1104 +5809,5810,5811,5812,5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824, # 1120 +5825,5826,5827,5828,5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840, # 1136 +5841,5842,5843,5844,5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856, # 1152 +5857,5858,5859,5860,5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872, # 1168 +5873,5874,5875,5876,5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888, # 1184 +5889,5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904, # 1200 +5905,5906,5907,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920, # 1216 +5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936, # 1232 +5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952, # 1248 +5953,5954,5955,5956,5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968, # 1264 +5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984, # 1280 +5985,5986,5987,5988,5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000, # 1296 +6001,6002,6003,6004,6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016, # 1312 +6017,6018,6019,6020,6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032, # 1328 +6033,6034,6035,6036,6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048, # 1344 +6049,6050,6051,6052,6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064, # 1360 +6065,6066,6067,6068,6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080, # 1376 +6081,6082,6083,6084,6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096, # 1392 +6097,6098,6099,6100,6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112, # 1408 +6113,6114,2044,2060,4621, 997,1235, 473,1186,4622, 920,3378,6115,6116, 379,1108, # 1424 +4313,2657,2735,3934,6117,3809, 636,3233, 573,1026,3693,3435,2974,3300,2298,4105, # 1440 + 854,2937,2463, 393,2581,2417, 539, 752,1280,2750,2480, 140,1161, 440, 708,1569, # 1456 + 665,2497,1746,1291,1523,3000, 164,1603, 847,1331, 537,1997, 486, 508,1693,2418, # 1472 +1970,2227, 878,1220, 299,1030, 969, 652,2751, 624,1137,3301,2619, 65,3302,2045, # 1488 +1761,1859,3120,1930,3694,3516, 663,1767, 852, 835,3695, 269, 767,2826,2339,1305, # 1504 + 896,1150, 770,1616,6118, 506,1502,2075,1012,2519, 775,2520,2975,2340,2938,4314, # 1520 +3028,2086,1224,1943,2286,6119,3072,4315,2240,1273,1987,3935,1557, 175, 597, 985, # 1536 +3517,2419,2521,1416,3029, 585, 938,1931,1007,1052,1932,1685,6120,3379,4316,4623, # 1552 + 804, 599,3121,1333,2128,2539,1159,1554,2032,3810, 687,2033,2904, 952, 675,1467, # 1568 +3436,6121,2241,1096,1786,2440,1543,1924, 980,1813,2228, 781,2692,1879, 728,1918, # 1584 +3696,4624, 548,1950,4625,1809,1088,1356,3303,2522,1944, 502, 972, 373, 513,2827, # 1600 + 586,2377,2391,1003,1976,1631,6122,2464,1084, 648,1776,4626,2141, 324, 962,2012, # 1616 +2177,2076,1384, 742,2178,1448,1173,1810, 222, 102, 301, 445, 125,2420, 662,2498, # 1632 + 277, 200,1476,1165,1068, 224,2562,1378,1446, 450,1880, 659, 791, 582,4627,2939, # 1648 +3936,1516,1274, 555,2099,3697,1020,1389,1526,3380,1762,1723,1787,2229, 412,2114, # 1664 +1900,2392,3518, 512,2597, 427,1925,2341,3122,1653,1686,2465,2499, 697, 330, 273, # 1680 + 380,2162, 951, 832, 780, 991,1301,3073, 965,2270,3519, 668,2523,2636,1286, 535, # 1696 +1407, 518, 671, 957,2658,2378, 267, 611,2197,3030,6123, 248,2299, 967,1799,2356, # 1712 + 850,1418,3437,1876,1256,1480,2828,1718,6124,6125,1755,1664,2405,6126,4628,2879, # 1728 +2829, 499,2179, 676,4629, 557,2329,2214,2090, 325,3234, 464, 811,3001, 992,2342, # 1744 +2481,1232,1469, 303,2242, 466,1070,2163, 603,1777,2091,4630,2752,4631,2714, 322, # 1760 +2659,1964,1768, 481,2188,1463,2330,2857,3600,2092,3031,2421,4632,2318,2070,1849, # 1776 +2598,4633,1302,2254,1668,1701,2422,3811,2905,3032,3123,2046,4106,1763,1694,4634, # 1792 +1604, 943,1724,1454, 917, 868,2215,1169,2940, 552,1145,1800,1228,1823,1955, 316, # 1808 +1080,2510, 361,1807,2830,4107,2660,3381,1346,1423,1134,4108,6127, 541,1263,1229, # 1824 +1148,2540, 545, 465,1833,2880,3438,1901,3074,2482, 816,3937, 713,1788,2500, 122, # 1840 +1575, 195,1451,2501,1111,6128, 859, 374,1225,2243,2483,4317, 390,1033,3439,3075, # 1856 +2524,1687, 266, 793,1440,2599, 946, 779, 802, 507, 897,1081, 528,2189,1292, 711, # 1872 +1866,1725,1167,1640, 753, 398,2661,1053, 246, 348,4318, 137,1024,3440,1600,2077, # 1888 +2129, 825,4319, 698, 238, 521, 187,2300,1157,2423,1641,1605,1464,1610,1097,2541, # 1904 +1260,1436, 759,2255,1814,2150, 705,3235, 409,2563,3304, 561,3033,2005,2564, 726, # 1920 +1956,2343,3698,4109, 949,3812,3813,3520,1669, 653,1379,2525, 881,2198, 632,2256, # 1936 +1027, 778,1074, 733,1957, 514,1481,2466, 554,2180, 702,3938,1606,1017,1398,6129, # 1952 +1380,3521, 921, 993,1313, 594, 449,1489,1617,1166, 768,1426,1360, 495,1794,3601, # 1968 +1177,3602,1170,4320,2344, 476, 425,3167,4635,3168,1424, 401,2662,1171,3382,1998, # 1984 +1089,4110, 477,3169, 474,6130,1909, 596,2831,1842, 494, 693,1051,1028,1207,3076, # 2000 + 606,2115, 727,2790,1473,1115, 743,3522, 630, 805,1532,4321,2021, 366,1057, 838, # 2016 + 684,1114,2142,4322,2050,1492,1892,1808,2271,3814,2424,1971,1447,1373,3305,1090, # 2032 +1536,3939,3523,3306,1455,2199, 336, 369,2331,1035, 584,2393, 902, 718,2600,6131, # 2048 +2753, 463,2151,1149,1611,2467, 715,1308,3124,1268, 343,1413,3236,1517,1347,2663, # 2064 +2093,3940,2022,1131,1553,2100,2941,1427,3441,2942,1323,2484,6132,1980, 872,2368, # 2080 +2441,2943, 320,2369,2116,1082, 679,1933,3941,2791,3815, 625,1143,2023, 422,2200, # 2096 +3816,6133, 730,1695, 356,2257,1626,2301,2858,2637,1627,1778, 937, 883,2906,2693, # 2112 +3002,1769,1086, 400,1063,1325,3307,2792,4111,3077, 456,2345,1046, 747,6134,1524, # 2128 + 884,1094,3383,1474,2164,1059, 974,1688,2181,2258,1047, 345,1665,1187, 358, 875, # 2144 +3170, 305, 660,3524,2190,1334,1135,3171,1540,1649,2542,1527, 927, 968,2793, 885, # 2160 +1972,1850, 482, 500,2638,1218,1109,1085,2543,1654,2034, 876, 78,2287,1482,1277, # 2176 + 861,1675,1083,1779, 724,2754, 454, 397,1132,1612,2332, 893, 672,1237, 257,2259, # 2192 +2370, 135,3384, 337,2244, 547, 352, 340, 709,2485,1400, 788,1138,2511, 540, 772, # 2208 +1682,2260,2272,2544,2013,1843,1902,4636,1999,1562,2288,4637,2201,1403,1533, 407, # 2224 + 576,3308,1254,2071, 978,3385, 170, 136,1201,3125,2664,3172,2394, 213, 912, 873, # 2240 +3603,1713,2202, 699,3604,3699, 813,3442, 493, 531,1054, 468,2907,1483, 304, 281, # 2256 +4112,1726,1252,2094, 339,2319,2130,2639, 756,1563,2944, 748, 571,2976,1588,2425, # 2272 +2715,1851,1460,2426,1528,1392,1973,3237, 288,3309, 685,3386, 296, 892,2716,2216, # 2288 +1570,2245, 722,1747,2217, 905,3238,1103,6135,1893,1441,1965, 251,1805,2371,3700, # 2304 +2601,1919,1078, 75,2182,1509,1592,1270,2640,4638,2152,6136,3310,3817, 524, 706, # 2320 +1075, 292,3818,1756,2602, 317, 98,3173,3605,3525,1844,2218,3819,2502, 814, 567, # 2336 + 385,2908,1534,6137, 534,1642,3239, 797,6138,1670,1529, 953,4323, 188,1071, 538, # 2352 + 178, 729,3240,2109,1226,1374,2000,2357,2977, 731,2468,1116,2014,2051,6139,1261, # 2368 +1593, 803,2859,2736,3443, 556, 682, 823,1541,6140,1369,2289,1706,2794, 845, 462, # 2384 +2603,2665,1361, 387, 162,2358,1740, 739,1770,1720,1304,1401,3241,1049, 627,1571, # 2400 +2427,3526,1877,3942,1852,1500, 431,1910,1503, 677, 297,2795, 286,1433,1038,1198, # 2416 +2290,1133,1596,4113,4639,2469,1510,1484,3943,6141,2442, 108, 712,4640,2372, 866, # 2432 +3701,2755,3242,1348, 834,1945,1408,3527,2395,3243,1811, 824, 994,1179,2110,1548, # 2448 +1453, 790,3003, 690,4324,4325,2832,2909,3820,1860,3821, 225,1748, 310, 346,1780, # 2464 +2470, 821,1993,2717,2796, 828, 877,3528,2860,2471,1702,2165,2910,2486,1789, 453, # 2480 + 359,2291,1676, 73,1164,1461,1127,3311, 421, 604, 314,1037, 589, 116,2487, 737, # 2496 + 837,1180, 111, 244, 735,6142,2261,1861,1362, 986, 523, 418, 581,2666,3822, 103, # 2512 + 855, 503,1414,1867,2488,1091, 657,1597, 979, 605,1316,4641,1021,2443,2078,2001, # 2528 +1209, 96, 587,2166,1032, 260,1072,2153, 173, 94, 226,3244, 819,2006,4642,4114, # 2544 +2203, 231,1744, 782, 97,2667, 786,3387, 887, 391, 442,2219,4326,1425,6143,2694, # 2560 + 633,1544,1202, 483,2015, 592,2052,1958,2472,1655, 419, 129,4327,3444,3312,1714, # 2576 +1257,3078,4328,1518,1098, 865,1310,1019,1885,1512,1734, 469,2444, 148, 773, 436, # 2592 +1815,1868,1128,1055,4329,1245,2756,3445,2154,1934,1039,4643, 579,1238, 932,2320, # 2608 + 353, 205, 801, 115,2428, 944,2321,1881, 399,2565,1211, 678, 766,3944, 335,2101, # 2624 +1459,1781,1402,3945,2737,2131,1010, 844, 981,1326,1013, 550,1816,1545,2620,1335, # 2640 +1008, 371,2881, 936,1419,1613,3529,1456,1395,2273,1834,2604,1317,2738,2503, 416, # 2656 +1643,4330, 806,1126, 229, 591,3946,1314,1981,1576,1837,1666, 347,1790, 977,3313, # 2672 + 764,2861,1853, 688,2429,1920,1462, 77, 595, 415,2002,3034, 798,1192,4115,6144, # 2688 +2978,4331,3035,2695,2582,2072,2566, 430,2430,1727, 842,1396,3947,3702, 613, 377, # 2704 + 278, 236,1417,3388,3314,3174, 757,1869, 107,3530,6145,1194, 623,2262, 207,1253, # 2720 +2167,3446,3948, 492,1117,1935, 536,1838,2757,1246,4332, 696,2095,2406,1393,1572, # 2736 +3175,1782, 583, 190, 253,1390,2230, 830,3126,3389, 934,3245,1703,1749,2979,1870, # 2752 +2545,1656,2204, 869,2346,4116,3176,1817, 496,1764,4644, 942,1504, 404,1903,1122, # 2768 +1580,3606,2945,1022, 515, 372,1735, 955,2431,3036,6146,2797,1110,2302,2798, 617, # 2784 +6147, 441, 762,1771,3447,3607,3608,1904, 840,3037, 86, 939,1385, 572,1370,2445, # 2800 +1336, 114,3703, 898, 294, 203,3315, 703,1583,2274, 429, 961,4333,1854,1951,3390, # 2816 +2373,3704,4334,1318,1381, 966,1911,2322,1006,1155, 309, 989, 458,2718,1795,1372, # 2832 +1203, 252,1689,1363,3177, 517,1936, 168,1490, 562, 193,3823,1042,4117,1835, 551, # 2848 + 470,4645, 395, 489,3448,1871,1465,2583,2641, 417,1493, 279,1295, 511,1236,1119, # 2864 + 72,1231,1982,1812,3004, 871,1564, 984,3449,1667,2696,2096,4646,2347,2833,1673, # 2880 +3609, 695,3246,2668, 807,1183,4647, 890, 388,2333,1801,1457,2911,1765,1477,1031, # 2896 +3316,3317,1278,3391,2799,2292,2526, 163,3450,4335,2669,1404,1802,6148,2323,2407, # 2912 +1584,1728,1494,1824,1269, 298, 909,3318,1034,1632, 375, 776,1683,2061, 291, 210, # 2928 +1123, 809,1249,1002,2642,3038, 206,1011,2132, 144, 975, 882,1565, 342, 667, 754, # 2944 +1442,2143,1299,2303,2062, 447, 626,2205,1221,2739,2912,1144,1214,2206,2584, 760, # 2960 +1715, 614, 950,1281,2670,2621, 810, 577,1287,2546,4648, 242,2168, 250,2643, 691, # 2976 + 123,2644, 647, 313,1029, 689,1357,2946,1650, 216, 771,1339,1306, 808,2063, 549, # 2992 + 913,1371,2913,2914,6149,1466,1092,1174,1196,1311,2605,2396,1783,1796,3079, 406, # 3008 +2671,2117,3949,4649, 487,1825,2220,6150,2915, 448,2348,1073,6151,2397,1707, 130, # 3024 + 900,1598, 329, 176,1959,2527,1620,6152,2275,4336,3319,1983,2191,3705,3610,2155, # 3040 +3706,1912,1513,1614,6153,1988, 646, 392,2304,1589,3320,3039,1826,1239,1352,1340, # 3056 +2916, 505,2567,1709,1437,2408,2547, 906,6154,2672, 384,1458,1594,1100,1329, 710, # 3072 + 423,3531,2064,2231,2622,1989,2673,1087,1882, 333, 841,3005,1296,2882,2379, 580, # 3088 +1937,1827,1293,2585, 601, 574, 249,1772,4118,2079,1120, 645, 901,1176,1690, 795, # 3104 +2207, 478,1434, 516,1190,1530, 761,2080, 930,1264, 355, 435,1552, 644,1791, 987, # 3120 + 220,1364,1163,1121,1538, 306,2169,1327,1222, 546,2645, 218, 241, 610,1704,3321, # 3136 +1984,1839,1966,2528, 451,6155,2586,3707,2568, 907,3178, 254,2947, 186,1845,4650, # 3152 + 745, 432,1757, 428,1633, 888,2246,2221,2489,3611,2118,1258,1265, 956,3127,1784, # 3168 +4337,2490, 319, 510, 119, 457,3612, 274,2035,2007,4651,1409,3128, 970,2758, 590, # 3184 +2800, 661,2247,4652,2008,3950,1420,1549,3080,3322,3951,1651,1375,2111, 485,2491, # 3200 +1429,1156,6156,2548,2183,1495, 831,1840,2529,2446, 501,1657, 307,1894,3247,1341, # 3216 + 666, 899,2156,1539,2549,1559, 886, 349,2208,3081,2305,1736,3824,2170,2759,1014, # 3232 +1913,1386, 542,1397,2948, 490, 368, 716, 362, 159, 282,2569,1129,1658,1288,1750, # 3248 +2674, 276, 649,2016, 751,1496, 658,1818,1284,1862,2209,2087,2512,3451, 622,2834, # 3264 + 376, 117,1060,2053,1208,1721,1101,1443, 247,1250,3179,1792,3952,2760,2398,3953, # 3280 +6157,2144,3708, 446,2432,1151,2570,3452,2447,2761,2835,1210,2448,3082, 424,2222, # 3296 +1251,2449,2119,2836, 504,1581,4338, 602, 817, 857,3825,2349,2306, 357,3826,1470, # 3312 +1883,2883, 255, 958, 929,2917,3248, 302,4653,1050,1271,1751,2307,1952,1430,2697, # 3328 +2719,2359, 354,3180, 777, 158,2036,4339,1659,4340,4654,2308,2949,2248,1146,2232, # 3344 +3532,2720,1696,2623,3827,6158,3129,1550,2698,1485,1297,1428, 637, 931,2721,2145, # 3360 + 914,2550,2587, 81,2450, 612, 827,2646,1242,4655,1118,2884, 472,1855,3181,3533, # 3376 +3534, 569,1353,2699,1244,1758,2588,4119,2009,2762,2171,3709,1312,1531,6159,1152, # 3392 +1938, 134,1830, 471,3710,2276,1112,1535,3323,3453,3535, 982,1337,2950, 488, 826, # 3408 + 674,1058,1628,4120,2017, 522,2399, 211, 568,1367,3454, 350, 293,1872,1139,3249, # 3424 +1399,1946,3006,1300,2360,3324, 588, 736,6160,2606, 744, 669,3536,3828,6161,1358, # 3440 + 199, 723, 848, 933, 851,1939,1505,1514,1338,1618,1831,4656,1634,3613, 443,2740, # 3456 +3829, 717,1947, 491,1914,6162,2551,1542,4121,1025,6163,1099,1223, 198,3040,2722, # 3472 + 370, 410,1905,2589, 998,1248,3182,2380, 519,1449,4122,1710, 947, 928,1153,4341, # 3488 +2277, 344,2624,1511, 615, 105, 161,1212,1076,1960,3130,2054,1926,1175,1906,2473, # 3504 + 414,1873,2801,6164,2309, 315,1319,3325, 318,2018,2146,2157, 963, 631, 223,4342, # 3520 +4343,2675, 479,3711,1197,2625,3712,2676,2361,6165,4344,4123,6166,2451,3183,1886, # 3536 +2184,1674,1330,1711,1635,1506, 799, 219,3250,3083,3954,1677,3713,3326,2081,3614, # 3552 +1652,2073,4657,1147,3041,1752, 643,1961, 147,1974,3955,6167,1716,2037, 918,3007, # 3568 +1994, 120,1537, 118, 609,3184,4345, 740,3455,1219, 332,1615,3830,6168,1621,2980, # 3584 +1582, 783, 212, 553,2350,3714,1349,2433,2082,4124, 889,6169,2310,1275,1410, 973, # 3600 + 166,1320,3456,1797,1215,3185,2885,1846,2590,2763,4658, 629, 822,3008, 763, 940, # 3616 +1990,2862, 439,2409,1566,1240,1622, 926,1282,1907,2764, 654,2210,1607, 327,1130, # 3632 +3956,1678,1623,6170,2434,2192, 686, 608,3831,3715, 903,3957,3042,6171,2741,1522, # 3648 +1915,1105,1555,2552,1359, 323,3251,4346,3457, 738,1354,2553,2311,2334,1828,2003, # 3664 +3832,1753,2351,1227,6172,1887,4125,1478,6173,2410,1874,1712,1847, 520,1204,2607, # 3680 + 264,4659, 836,2677,2102, 600,4660,3833,2278,3084,6174,4347,3615,1342, 640, 532, # 3696 + 543,2608,1888,2400,2591,1009,4348,1497, 341,1737,3616,2723,1394, 529,3252,1321, # 3712 + 983,4661,1515,2120, 971,2592, 924, 287,1662,3186,4349,2700,4350,1519, 908,1948, # 3728 +2452, 156, 796,1629,1486,2223,2055, 694,4126,1259,1036,3392,1213,2249,2742,1889, # 3744 +1230,3958,1015, 910, 408, 559,3617,4662, 746, 725, 935,4663,3959,3009,1289, 563, # 3760 + 867,4664,3960,1567,2981,2038,2626, 988,2263,2381,4351, 143,2374, 704,1895,6175, # 3776 +1188,3716,2088, 673,3085,2362,4352, 484,1608,1921,2765,2918, 215, 904,3618,3537, # 3792 + 894, 509, 976,3043,2701,3961,4353,2837,2982, 498,6176,6177,1102,3538,1332,3393, # 3808 +1487,1636,1637, 233, 245,3962, 383, 650, 995,3044, 460,1520,1206,2352, 749,3327, # 3824 + 530, 700, 389,1438,1560,1773,3963,2264, 719,2951,2724,3834, 870,1832,1644,1000, # 3840 + 839,2474,3717, 197,1630,3394, 365,2886,3964,1285,2133, 734, 922, 818,1106, 732, # 3856 + 480,2083,1774,3458, 923,2279,1350, 221,3086, 85,2233,2234,3835,1585,3010,2147, # 3872 +1387,1705,2382,1619,2475, 133, 239,2802,1991,1016,2084,2383, 411,2838,1113, 651, # 3888 +1985,1160,3328, 990,1863,3087,1048,1276,2647, 265,2627,1599,3253,2056, 150, 638, # 3904 +2019, 656, 853, 326,1479, 680,1439,4354,1001,1759, 413,3459,3395,2492,1431, 459, # 3920 +4355,1125,3329,2265,1953,1450,2065,2863, 849, 351,2678,3131,3254,3255,1104,1577, # 3936 + 227,1351,1645,2453,2193,1421,2887, 812,2121, 634, 95,2435, 201,2312,4665,1646, # 3952 +1671,2743,1601,2554,2702,2648,2280,1315,1366,2089,3132,1573,3718,3965,1729,1189, # 3968 + 328,2679,1077,1940,1136, 558,1283, 964,1195, 621,2074,1199,1743,3460,3619,1896, # 3984 +1916,1890,3836,2952,1154,2112,1064, 862, 378,3011,2066,2113,2803,1568,2839,6178, # 4000 +3088,2919,1941,1660,2004,1992,2194, 142, 707,1590,1708,1624,1922,1023,1836,1233, # 4016 +1004,2313, 789, 741,3620,6179,1609,2411,1200,4127,3719,3720,4666,2057,3721, 593, # 4032 +2840, 367,2920,1878,6180,3461,1521, 628,1168, 692,2211,2649, 300, 720,2067,2571, # 4048 +2953,3396, 959,2504,3966,3539,3462,1977, 701,6181, 954,1043, 800, 681, 183,3722, # 4064 +1803,1730,3540,4128,2103, 815,2314, 174, 467, 230,2454,1093,2134, 755,3541,3397, # 4080 +1141,1162,6182,1738,2039, 270,3256,2513,1005,1647,2185,3837, 858,1679,1897,1719, # 4096 +2954,2324,1806, 402, 670, 167,4129,1498,2158,2104, 750,6183, 915, 189,1680,1551, # 4112 + 455,4356,1501,2455, 405,1095,2955, 338,1586,1266,1819, 570, 641,1324, 237,1556, # 4128 +2650,1388,3723,6184,1368,2384,1343,1978,3089,2436, 879,3724, 792,1191, 758,3012, # 4144 +1411,2135,1322,4357, 240,4667,1848,3725,1574,6185, 420,3045,1546,1391, 714,4358, # 4160 +1967, 941,1864, 863, 664, 426, 560,1731,2680,1785,2864,1949,2363, 403,3330,1415, # 4176 +1279,2136,1697,2335, 204, 721,2097,3838, 90,6186,2085,2505, 191,3967, 124,2148, # 4192 +1376,1798,1178,1107,1898,1405, 860,4359,1243,1272,2375,2983,1558,2456,1638, 113, # 4208 +3621, 578,1923,2609, 880, 386,4130, 784,2186,2266,1422,2956,2172,1722, 497, 263, # 4224 +2514,1267,2412,2610, 177,2703,3542, 774,1927,1344, 616,1432,1595,1018, 172,4360, # 4240 +2325, 911,4361, 438,1468,3622, 794,3968,2024,2173,1681,1829,2957, 945, 895,3090, # 4256 + 575,2212,2476, 475,2401,2681, 785,2744,1745,2293,2555,1975,3133,2865, 394,4668, # 4272 +3839, 635,4131, 639, 202,1507,2195,2766,1345,1435,2572,3726,1908,1184,1181,2457, # 4288 +3727,3134,4362, 843,2611, 437, 916,4669, 234, 769,1884,3046,3047,3623, 833,6187, # 4304 +1639,2250,2402,1355,1185,2010,2047, 999, 525,1732,1290,1488,2612, 948,1578,3728, # 4320 +2413,2477,1216,2725,2159, 334,3840,1328,3624,2921,1525,4132, 564,1056, 891,4363, # 4336 +1444,1698,2385,2251,3729,1365,2281,2235,1717,6188, 864,3841,2515, 444, 527,2767, # 4352 +2922,3625, 544, 461,6189, 566, 209,2437,3398,2098,1065,2068,3331,3626,3257,2137, # 4368 #last 512 +) + + diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/jisfreq.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/jisfreq.pyc new file mode 100644 index 0000000..f5bbb98 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/jisfreq.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/jpcntx.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/jpcntx.py new file mode 100644 index 0000000..20044e4 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/jpcntx.py @@ -0,0 +1,233 @@ +######################## BEGIN LICENSE BLOCK ######################## +# The Original Code is Mozilla Communicator client code. +# +# The Initial Developer of the Original Code is +# Netscape Communications Corporation. +# Portions created by the Initial Developer are Copyright (C) 1998 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Mark Pilgrim - port to Python +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + + +# This is hiragana 2-char sequence table, the number in each cell represents its frequency category +jp2CharContext = ( +(0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1), +(2,4,0,4,0,3,0,4,0,3,4,4,4,2,4,3,3,4,3,2,3,3,4,2,3,3,3,2,4,1,4,3,3,1,5,4,3,4,3,4,3,5,3,0,3,5,4,2,0,3,1,0,3,3,0,3,3,0,1,1,0,4,3,0,3,3,0,4,0,2,0,3,5,5,5,5,4,0,4,1,0,3,4), +(0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2), +(0,4,0,5,0,5,0,4,0,4,5,4,4,3,5,3,5,1,5,3,4,3,4,4,3,4,3,3,4,3,5,4,4,3,5,5,3,5,5,5,3,5,5,3,4,5,5,3,1,3,2,0,3,4,0,4,2,0,4,2,1,5,3,2,3,5,0,4,0,2,0,5,4,4,5,4,5,0,4,0,0,4,4), +(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), +(0,3,0,4,0,3,0,3,0,4,5,4,3,3,3,3,4,3,5,4,4,3,5,4,4,3,4,3,4,4,4,4,5,3,4,4,3,4,5,5,4,5,5,1,4,5,4,3,0,3,3,1,3,3,0,4,4,0,3,3,1,5,3,3,3,5,0,4,0,3,0,4,4,3,4,3,3,0,4,1,1,3,4), +(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), +(0,4,0,3,0,3,0,4,0,3,4,4,3,2,2,1,2,1,3,1,3,3,3,3,3,4,3,1,3,3,5,3,3,0,4,3,0,5,4,3,3,5,4,4,3,4,4,5,0,1,2,0,1,2,0,2,2,0,1,0,0,5,2,2,1,4,0,3,0,1,0,4,4,3,5,4,3,0,2,1,0,4,3), +(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), +(0,3,0,5,0,4,0,2,1,4,4,2,4,1,4,2,4,2,4,3,3,3,4,3,3,3,3,1,4,2,3,3,3,1,4,4,1,1,1,4,3,3,2,0,2,4,3,2,0,3,3,0,3,1,1,0,0,0,3,3,0,4,2,2,3,4,0,4,0,3,0,4,4,5,3,4,4,0,3,0,0,1,4), +(1,4,0,4,0,4,0,4,0,3,5,4,4,3,4,3,5,4,3,3,4,3,5,4,4,4,4,3,4,2,4,3,3,1,5,4,3,2,4,5,4,5,5,4,4,5,4,4,0,3,2,2,3,3,0,4,3,1,3,2,1,4,3,3,4,5,0,3,0,2,0,4,5,5,4,5,4,0,4,0,0,5,4), +(0,5,0,5,0,4,0,3,0,4,4,3,4,3,3,3,4,0,4,4,4,3,4,3,4,3,3,1,4,2,4,3,4,0,5,4,1,4,5,4,4,5,3,2,4,3,4,3,2,4,1,3,3,3,2,3,2,0,4,3,3,4,3,3,3,4,0,4,0,3,0,4,5,4,4,4,3,0,4,1,0,1,3), +(0,3,1,4,0,3,0,2,0,3,4,4,3,1,4,2,3,3,4,3,4,3,4,3,4,4,3,2,3,1,5,4,4,1,4,4,3,5,4,4,3,5,5,4,3,4,4,3,1,2,3,1,2,2,0,3,2,0,3,1,0,5,3,3,3,4,3,3,3,3,4,4,4,4,5,4,2,0,3,3,2,4,3), +(0,2,0,3,0,1,0,1,0,0,3,2,0,0,2,0,1,0,2,1,3,3,3,1,2,3,1,0,1,0,4,2,1,1,3,3,0,4,3,3,1,4,3,3,0,3,3,2,0,0,0,0,1,0,0,2,0,0,0,0,0,4,1,0,2,3,2,2,2,1,3,3,3,4,4,3,2,0,3,1,0,3,3), +(0,4,0,4,0,3,0,3,0,4,4,4,3,3,3,3,3,3,4,3,4,2,4,3,4,3,3,2,4,3,4,5,4,1,4,5,3,5,4,5,3,5,4,0,3,5,5,3,1,3,3,2,2,3,0,3,4,1,3,3,2,4,3,3,3,4,0,4,0,3,0,4,5,4,4,5,3,0,4,1,0,3,4), +(0,2,0,3,0,3,0,0,0,2,2,2,1,0,1,0,0,0,3,0,3,0,3,0,1,3,1,0,3,1,3,3,3,1,3,3,3,0,1,3,1,3,4,0,0,3,1,1,0,3,2,0,0,0,0,1,3,0,1,0,0,3,3,2,0,3,0,0,0,0,0,3,4,3,4,3,3,0,3,0,0,2,3), +(2,3,0,3,0,2,0,1,0,3,3,4,3,1,3,1,1,1,3,1,4,3,4,3,3,3,0,0,3,1,5,4,3,1,4,3,2,5,5,4,4,4,4,3,3,4,4,4,0,2,1,1,3,2,0,1,2,0,0,1,0,4,1,3,3,3,0,3,0,1,0,4,4,4,5,5,3,0,2,0,0,4,4), +(0,2,0,1,0,3,1,3,0,2,3,3,3,0,3,1,0,0,3,0,3,2,3,1,3,2,1,1,0,0,4,2,1,0,2,3,1,4,3,2,0,4,4,3,1,3,1,3,0,1,0,0,1,0,0,0,1,0,0,0,0,4,1,1,1,2,0,3,0,0,0,3,4,2,4,3,2,0,1,0,0,3,3), +(0,1,0,4,0,5,0,4,0,2,4,4,2,3,3,2,3,3,5,3,3,3,4,3,4,2,3,0,4,3,3,3,4,1,4,3,2,1,5,5,3,4,5,1,3,5,4,2,0,3,3,0,1,3,0,4,2,0,1,3,1,4,3,3,3,3,0,3,0,1,0,3,4,4,4,5,5,0,3,0,1,4,5), +(0,2,0,3,0,3,0,0,0,2,3,1,3,0,4,0,1,1,3,0,3,4,3,2,3,1,0,3,3,2,3,1,3,0,2,3,0,2,1,4,1,2,2,0,0,3,3,0,0,2,0,0,0,1,0,0,0,0,2,2,0,3,2,1,3,3,0,2,0,2,0,0,3,3,1,2,4,0,3,0,2,2,3), +(2,4,0,5,0,4,0,4,0,2,4,4,4,3,4,3,3,3,1,2,4,3,4,3,4,4,5,0,3,3,3,3,2,0,4,3,1,4,3,4,1,4,4,3,3,4,4,3,1,2,3,0,4,2,0,4,1,0,3,3,0,4,3,3,3,4,0,4,0,2,0,3,5,3,4,5,2,0,3,0,0,4,5), +(0,3,0,4,0,1,0,1,0,1,3,2,2,1,3,0,3,0,2,0,2,0,3,0,2,0,0,0,1,0,1,1,0,0,3,1,0,0,0,4,0,3,1,0,2,1,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,4,2,2,3,1,0,3,0,0,0,1,4,4,4,3,0,0,4,0,0,1,4), +(1,4,1,5,0,3,0,3,0,4,5,4,4,3,5,3,3,4,4,3,4,1,3,3,3,3,2,1,4,1,5,4,3,1,4,4,3,5,4,4,3,5,4,3,3,4,4,4,0,3,3,1,2,3,0,3,1,0,3,3,0,5,4,4,4,4,4,4,3,3,5,4,4,3,3,5,4,0,3,2,0,4,4), +(0,2,0,3,0,1,0,0,0,1,3,3,3,2,4,1,3,0,3,1,3,0,2,2,1,1,0,0,2,0,4,3,1,0,4,3,0,4,4,4,1,4,3,1,1,3,3,1,0,2,0,0,1,3,0,0,0,0,2,0,0,4,3,2,4,3,5,4,3,3,3,4,3,3,4,3,3,0,2,1,0,3,3), +(0,2,0,4,0,3,0,2,0,2,5,5,3,4,4,4,4,1,4,3,3,0,4,3,4,3,1,3,3,2,4,3,0,3,4,3,0,3,4,4,2,4,4,0,4,5,3,3,2,2,1,1,1,2,0,1,5,0,3,3,2,4,3,3,3,4,0,3,0,2,0,4,4,3,5,5,0,0,3,0,2,3,3), +(0,3,0,4,0,3,0,1,0,3,4,3,3,1,3,3,3,0,3,1,3,0,4,3,3,1,1,0,3,0,3,3,0,0,4,4,0,1,5,4,3,3,5,0,3,3,4,3,0,2,0,1,1,1,0,1,3,0,1,2,1,3,3,2,3,3,0,3,0,1,0,1,3,3,4,4,1,0,1,2,2,1,3), +(0,1,0,4,0,4,0,3,0,1,3,3,3,2,3,1,1,0,3,0,3,3,4,3,2,4,2,0,1,0,4,3,2,0,4,3,0,5,3,3,2,4,4,4,3,3,3,4,0,1,3,0,0,1,0,0,1,0,0,0,0,4,2,3,3,3,0,3,0,0,0,4,4,4,5,3,2,0,3,3,0,3,5), +(0,2,0,3,0,0,0,3,0,1,3,0,2,0,0,0,1,0,3,1,1,3,3,0,0,3,0,0,3,0,2,3,1,0,3,1,0,3,3,2,0,4,2,2,0,2,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,2,1,2,0,1,0,1,0,0,0,1,3,1,2,0,0,0,1,0,0,1,4), +(0,3,0,3,0,5,0,1,0,2,4,3,1,3,3,2,1,1,5,2,1,0,5,1,2,0,0,0,3,3,2,2,3,2,4,3,0,0,3,3,1,3,3,0,2,5,3,4,0,3,3,0,1,2,0,2,2,0,3,2,0,2,2,3,3,3,0,2,0,1,0,3,4,4,2,5,4,0,3,0,0,3,5), +(0,3,0,3,0,3,0,1,0,3,3,3,3,0,3,0,2,0,2,1,1,0,2,0,1,0,0,0,2,1,0,0,1,0,3,2,0,0,3,3,1,2,3,1,0,3,3,0,0,1,0,0,0,0,0,2,0,0,0,0,0,2,3,1,2,3,0,3,0,1,0,3,2,1,0,4,3,0,1,1,0,3,3), +(0,4,0,5,0,3,0,3,0,4,5,5,4,3,5,3,4,3,5,3,3,2,5,3,4,4,4,3,4,3,4,5,5,3,4,4,3,4,4,5,4,4,4,3,4,5,5,4,2,3,4,2,3,4,0,3,3,1,4,3,2,4,3,3,5,5,0,3,0,3,0,5,5,5,5,4,4,0,4,0,1,4,4), +(0,4,0,4,0,3,0,3,0,3,5,4,4,2,3,2,5,1,3,2,5,1,4,2,3,2,3,3,4,3,3,3,3,2,5,4,1,3,3,5,3,4,4,0,4,4,3,1,1,3,1,0,2,3,0,2,3,0,3,0,0,4,3,1,3,4,0,3,0,2,0,4,4,4,3,4,5,0,4,0,0,3,4), +(0,3,0,3,0,3,1,2,0,3,4,4,3,3,3,0,2,2,4,3,3,1,3,3,3,1,1,0,3,1,4,3,2,3,4,4,2,4,4,4,3,4,4,3,2,4,4,3,1,3,3,1,3,3,0,4,1,0,2,2,1,4,3,2,3,3,5,4,3,3,5,4,4,3,3,0,4,0,3,2,2,4,4), +(0,2,0,1,0,0,0,0,0,1,2,1,3,0,0,0,0,0,2,0,1,2,1,0,0,1,0,0,0,0,3,0,0,1,0,1,1,3,1,0,0,0,1,1,0,1,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,1,2,2,0,3,4,0,0,0,1,1,0,0,1,0,0,0,0,0,1,1), +(0,1,0,0,0,1,0,0,0,0,4,0,4,1,4,0,3,0,4,0,3,0,4,0,3,0,3,0,4,1,5,1,4,0,0,3,0,5,0,5,2,0,1,0,0,0,2,1,4,0,1,3,0,0,3,0,0,3,1,1,4,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0), +(1,4,0,5,0,3,0,2,0,3,5,4,4,3,4,3,5,3,4,3,3,0,4,3,3,3,3,3,3,2,4,4,3,1,3,4,4,5,4,4,3,4,4,1,3,5,4,3,3,3,1,2,2,3,3,1,3,1,3,3,3,5,3,3,4,5,0,3,0,3,0,3,4,3,4,4,3,0,3,0,2,4,3), +(0,1,0,4,0,0,0,0,0,1,4,0,4,1,4,2,4,0,3,0,1,0,1,0,0,0,0,0,2,0,3,1,1,1,0,3,0,0,0,1,2,1,0,0,1,1,1,1,0,1,0,0,0,1,0,0,3,0,0,0,0,3,2,0,2,2,0,1,0,0,0,2,3,2,3,3,0,0,0,0,2,1,0), +(0,5,1,5,0,3,0,3,0,5,4,4,5,1,5,3,3,0,4,3,4,3,5,3,4,3,3,2,4,3,4,3,3,0,3,3,1,4,4,3,4,4,4,3,4,5,5,3,2,3,1,1,3,3,1,3,1,1,3,3,2,4,5,3,3,5,0,4,0,3,0,4,4,3,5,3,3,0,3,4,0,4,3), +(0,5,0,5,0,3,0,2,0,4,4,3,5,2,4,3,3,3,4,4,4,3,5,3,5,3,3,1,4,0,4,3,3,0,3,3,0,4,4,4,4,5,4,3,3,5,5,3,2,3,1,2,3,2,0,1,0,0,3,2,2,4,4,3,1,5,0,4,0,3,0,4,3,1,3,2,1,0,3,3,0,3,3), +(0,4,0,5,0,5,0,4,0,4,5,5,5,3,4,3,3,2,5,4,4,3,5,3,5,3,4,0,4,3,4,4,3,2,4,4,3,4,5,4,4,5,5,0,3,5,5,4,1,3,3,2,3,3,1,3,1,0,4,3,1,4,4,3,4,5,0,4,0,2,0,4,3,4,4,3,3,0,4,0,0,5,5), +(0,4,0,4,0,5,0,1,1,3,3,4,4,3,4,1,3,0,5,1,3,0,3,1,3,1,1,0,3,0,3,3,4,0,4,3,0,4,4,4,3,4,4,0,3,5,4,1,0,3,0,0,2,3,0,3,1,0,3,1,0,3,2,1,3,5,0,3,0,1,0,3,2,3,3,4,4,0,2,2,0,4,4), +(2,4,0,5,0,4,0,3,0,4,5,5,4,3,5,3,5,3,5,3,5,2,5,3,4,3,3,4,3,4,5,3,2,1,5,4,3,2,3,4,5,3,4,1,2,5,4,3,0,3,3,0,3,2,0,2,3,0,4,1,0,3,4,3,3,5,0,3,0,1,0,4,5,5,5,4,3,0,4,2,0,3,5), +(0,5,0,4,0,4,0,2,0,5,4,3,4,3,4,3,3,3,4,3,4,2,5,3,5,3,4,1,4,3,4,4,4,0,3,5,0,4,4,4,4,5,3,1,3,4,5,3,3,3,3,3,3,3,0,2,2,0,3,3,2,4,3,3,3,5,3,4,1,3,3,5,3,2,0,0,0,0,4,3,1,3,3), +(0,1,0,3,0,3,0,1,0,1,3,3,3,2,3,3,3,0,3,0,0,0,3,1,3,0,0,0,2,2,2,3,0,0,3,2,0,1,2,4,1,3,3,0,0,3,3,3,0,1,0,0,2,1,0,0,3,0,3,1,0,3,0,0,1,3,0,2,0,1,0,3,3,1,3,3,0,0,1,1,0,3,3), +(0,2,0,3,0,2,1,4,0,2,2,3,1,1,3,1,1,0,2,0,3,1,2,3,1,3,0,0,1,0,4,3,2,3,3,3,1,4,2,3,3,3,3,1,0,3,1,4,0,1,1,0,1,2,0,1,1,0,1,1,0,3,1,3,2,2,0,1,0,0,0,2,3,3,3,1,0,0,0,0,0,2,3), +(0,5,0,4,0,5,0,2,0,4,5,5,3,3,4,3,3,1,5,4,4,2,4,4,4,3,4,2,4,3,5,5,4,3,3,4,3,3,5,5,4,5,5,1,3,4,5,3,1,4,3,1,3,3,0,3,3,1,4,3,1,4,5,3,3,5,0,4,0,3,0,5,3,3,1,4,3,0,4,0,1,5,3), +(0,5,0,5,0,4,0,2,0,4,4,3,4,3,3,3,3,3,5,4,4,4,4,4,4,5,3,3,5,2,4,4,4,3,4,4,3,3,4,4,5,5,3,3,4,3,4,3,3,4,3,3,3,3,1,2,2,1,4,3,3,5,4,4,3,4,0,4,0,3,0,4,4,4,4,4,1,0,4,2,0,2,4), +(0,4,0,4,0,3,0,1,0,3,5,2,3,0,3,0,2,1,4,2,3,3,4,1,4,3,3,2,4,1,3,3,3,0,3,3,0,0,3,3,3,5,3,3,3,3,3,2,0,2,0,0,2,0,0,2,0,0,1,0,0,3,1,2,2,3,0,3,0,2,0,4,4,3,3,4,1,0,3,0,0,2,4), +(0,0,0,4,0,0,0,0,0,0,1,0,1,0,2,0,0,0,0,0,1,0,2,0,1,0,0,0,0,0,3,1,3,0,3,2,0,0,0,1,0,3,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,4,0,2,0,0,0,0,0,0,2), +(0,2,1,3,0,2,0,2,0,3,3,3,3,1,3,1,3,3,3,3,3,3,4,2,2,1,2,1,4,0,4,3,1,3,3,3,2,4,3,5,4,3,3,3,3,3,3,3,0,1,3,0,2,0,0,1,0,0,1,0,0,4,2,0,2,3,0,3,3,0,3,3,4,2,3,1,4,0,1,2,0,2,3), +(0,3,0,3,0,1,0,3,0,2,3,3,3,0,3,1,2,0,3,3,2,3,3,2,3,2,3,1,3,0,4,3,2,0,3,3,1,4,3,3,2,3,4,3,1,3,3,1,1,0,1,1,0,1,0,1,0,1,0,0,0,4,1,1,0,3,0,3,1,0,2,3,3,3,3,3,1,0,0,2,0,3,3), +(0,0,0,0,0,0,0,0,0,0,3,0,2,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,3,0,3,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,2,0,2,3,0,0,0,0,0,0,0,0,3), +(0,2,0,3,1,3,0,3,0,2,3,3,3,1,3,1,3,1,3,1,3,3,3,1,3,0,2,3,1,1,4,3,3,2,3,3,1,2,2,4,1,3,3,0,1,4,2,3,0,1,3,0,3,0,0,1,3,0,2,0,0,3,3,2,1,3,0,3,0,2,0,3,4,4,4,3,1,0,3,0,0,3,3), +(0,2,0,1,0,2,0,0,0,1,3,2,2,1,3,0,1,1,3,0,3,2,3,1,2,0,2,0,1,1,3,3,3,0,3,3,1,1,2,3,2,3,3,1,2,3,2,0,0,1,0,0,0,0,0,0,3,0,1,0,0,2,1,2,1,3,0,3,0,0,0,3,4,4,4,3,2,0,2,0,0,2,4), +(0,0,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,3,1,0,0,0,0,0,0,0,3), +(0,3,0,3,0,2,0,3,0,3,3,3,2,3,2,2,2,0,3,1,3,3,3,2,3,3,0,0,3,0,3,2,2,0,2,3,1,4,3,4,3,3,2,3,1,5,4,4,0,3,1,2,1,3,0,3,1,1,2,0,2,3,1,3,1,3,0,3,0,1,0,3,3,4,4,2,1,0,2,1,0,2,4), +(0,1,0,3,0,1,0,2,0,1,4,2,5,1,4,0,2,0,2,1,3,1,4,0,2,1,0,0,2,1,4,1,1,0,3,3,0,5,1,3,2,3,3,1,0,3,2,3,0,1,0,0,0,0,0,0,1,0,0,0,0,4,0,1,0,3,0,2,0,1,0,3,3,3,4,3,3,0,0,0,0,2,3), +(0,0,0,1,0,0,0,0,0,0,2,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,1,0,0,1,0,0,0,0,0,3), +(0,1,0,3,0,4,0,3,0,2,4,3,1,0,3,2,2,1,3,1,2,2,3,1,1,1,2,1,3,0,1,2,0,1,3,2,1,3,0,5,5,1,0,0,1,3,2,1,0,3,0,0,1,0,0,0,0,0,3,4,0,1,1,1,3,2,0,2,0,1,0,2,3,3,1,2,3,0,1,0,1,0,4), +(0,0,0,1,0,3,0,3,0,2,2,1,0,0,4,0,3,0,3,1,3,0,3,0,3,0,1,0,3,0,3,1,3,0,3,3,0,0,1,2,1,1,1,0,1,2,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,2,2,1,2,0,0,2,0,0,0,0,2,3,3,3,3,0,0,0,0,1,4), +(0,0,0,3,0,3,0,0,0,0,3,1,1,0,3,0,1,0,2,0,1,0,0,0,0,0,0,0,1,0,3,0,2,0,2,3,0,0,2,2,3,1,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,2,3), +(2,4,0,5,0,5,0,4,0,3,4,3,3,3,4,3,3,3,4,3,4,4,5,4,5,5,5,2,3,0,5,5,4,1,5,4,3,1,5,4,3,4,4,3,3,4,3,3,0,3,2,0,2,3,0,3,0,0,3,3,0,5,3,2,3,3,0,3,0,3,0,3,4,5,4,5,3,0,4,3,0,3,4), +(0,3,0,3,0,3,0,3,0,3,3,4,3,2,3,2,3,0,4,3,3,3,3,3,3,3,3,0,3,2,4,3,3,1,3,4,3,4,4,4,3,4,4,3,2,4,4,1,0,2,0,0,1,1,0,2,0,0,3,1,0,5,3,2,1,3,0,3,0,1,2,4,3,2,4,3,3,0,3,2,0,4,4), +(0,3,0,3,0,1,0,0,0,1,4,3,3,2,3,1,3,1,4,2,3,2,4,2,3,4,3,0,2,2,3,3,3,0,3,3,3,0,3,4,1,3,3,0,3,4,3,3,0,1,1,0,1,0,0,0,4,0,3,0,0,3,1,2,1,3,0,4,0,1,0,4,3,3,4,3,3,0,2,0,0,3,3), +(0,3,0,4,0,1,0,3,0,3,4,3,3,0,3,3,3,1,3,1,3,3,4,3,3,3,0,0,3,1,5,3,3,1,3,3,2,5,4,3,3,4,5,3,2,5,3,4,0,1,0,0,0,0,0,2,0,0,1,1,0,4,2,2,1,3,0,3,0,2,0,4,4,3,5,3,2,0,1,1,0,3,4), +(0,5,0,4,0,5,0,2,0,4,4,3,3,2,3,3,3,1,4,3,4,1,5,3,4,3,4,0,4,2,4,3,4,1,5,4,0,4,4,4,4,5,4,1,3,5,4,2,1,4,1,1,3,2,0,3,1,0,3,2,1,4,3,3,3,4,0,4,0,3,0,4,4,4,3,3,3,0,4,2,0,3,4), +(1,4,0,4,0,3,0,1,0,3,3,3,1,1,3,3,2,2,3,3,1,0,3,2,2,1,2,0,3,1,2,1,2,0,3,2,0,2,2,3,3,4,3,0,3,3,1,2,0,1,1,3,1,2,0,0,3,0,1,1,0,3,2,2,3,3,0,3,0,0,0,2,3,3,4,3,3,0,1,0,0,1,4), +(0,4,0,4,0,4,0,0,0,3,4,4,3,1,4,2,3,2,3,3,3,1,4,3,4,0,3,0,4,2,3,3,2,2,5,4,2,1,3,4,3,4,3,1,3,3,4,2,0,2,1,0,3,3,0,0,2,0,3,1,0,4,4,3,4,3,0,4,0,1,0,2,4,4,4,4,4,0,3,2,0,3,3), +(0,0,0,1,0,4,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,3,2,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,2), +(0,2,0,3,0,4,0,4,0,1,3,3,3,0,4,0,2,1,2,1,1,1,2,0,3,1,1,0,1,0,3,1,0,0,3,3,2,0,1,1,0,0,0,0,0,1,0,2,0,2,2,0,3,1,0,0,1,0,1,1,0,1,2,0,3,0,0,0,0,1,0,0,3,3,4,3,1,0,1,0,3,0,2), +(0,0,0,3,0,5,0,0,0,0,1,0,2,0,3,1,0,1,3,0,0,0,2,0,0,0,1,0,0,0,1,1,0,0,4,0,0,0,2,3,0,1,4,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,1,0,0,0,0,0,0,0,2,0,0,3,0,0,0,0,0,3), +(0,2,0,5,0,5,0,1,0,2,4,3,3,2,5,1,3,2,3,3,3,0,4,1,2,0,3,0,4,0,2,2,1,1,5,3,0,0,1,4,2,3,2,0,3,3,3,2,0,2,4,1,1,2,0,1,1,0,3,1,0,1,3,1,2,3,0,2,0,0,0,1,3,5,4,4,4,0,3,0,0,1,3), +(0,4,0,5,0,4,0,4,0,4,5,4,3,3,4,3,3,3,4,3,4,4,5,3,4,5,4,2,4,2,3,4,3,1,4,4,1,3,5,4,4,5,5,4,4,5,5,5,2,3,3,1,4,3,1,3,3,0,3,3,1,4,3,4,4,4,0,3,0,4,0,3,3,4,4,5,0,0,4,3,0,4,5), +(0,4,0,4,0,3,0,3,0,3,4,4,4,3,3,2,4,3,4,3,4,3,5,3,4,3,2,1,4,2,4,4,3,1,3,4,2,4,5,5,3,4,5,4,1,5,4,3,0,3,2,2,3,2,1,3,1,0,3,3,3,5,3,3,3,5,4,4,2,3,3,4,3,3,3,2,1,0,3,2,1,4,3), +(0,4,0,5,0,4,0,3,0,3,5,5,3,2,4,3,4,0,5,4,4,1,4,4,4,3,3,3,4,3,5,5,2,3,3,4,1,2,5,5,3,5,5,2,3,5,5,4,0,3,2,0,3,3,1,1,5,1,4,1,0,4,3,2,3,5,0,4,0,3,0,5,4,3,4,3,0,0,4,1,0,4,4), +(1,3,0,4,0,2,0,2,0,2,5,5,3,3,3,3,3,0,4,2,3,4,4,4,3,4,0,0,3,4,5,4,3,3,3,3,2,5,5,4,5,5,5,4,3,5,5,5,1,3,1,0,1,0,0,3,2,0,4,2,0,5,2,3,2,4,1,3,0,3,0,4,5,4,5,4,3,0,4,2,0,5,4), +(0,3,0,4,0,5,0,3,0,3,4,4,3,2,3,2,3,3,3,3,3,2,4,3,3,2,2,0,3,3,3,3,3,1,3,3,3,0,4,4,3,4,4,1,1,4,4,2,0,3,1,0,1,1,0,4,1,0,2,3,1,3,3,1,3,4,0,3,0,1,0,3,1,3,0,0,1,0,2,0,0,4,4), +(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), +(0,3,0,3,0,2,0,3,0,1,5,4,3,3,3,1,4,2,1,2,3,4,4,2,4,4,5,0,3,1,4,3,4,0,4,3,3,3,2,3,2,5,3,4,3,2,2,3,0,0,3,0,2,1,0,1,2,0,0,0,0,2,1,1,3,1,0,2,0,4,0,3,4,4,4,5,2,0,2,0,0,1,3), +(0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,1,0,0,1,1,0,0,0,4,2,1,1,0,1,0,3,2,0,0,3,1,1,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,1,0,0,0,2,0,0,0,1,4,0,4,2,1,0,0,0,0,0,1), +(0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,1,0,1,0,0,0,0,3,1,0,0,0,2,0,2,1,0,0,1,2,1,0,1,1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,1,3,1,0,0,0,0,0,1,0,0,2,1,0,0,0,0,0,0,0,0,2), +(0,4,0,4,0,4,0,3,0,4,4,3,4,2,4,3,2,0,4,4,4,3,5,3,5,3,3,2,4,2,4,3,4,3,1,4,0,2,3,4,4,4,3,3,3,4,4,4,3,4,1,3,4,3,2,1,2,1,3,3,3,4,4,3,3,5,0,4,0,3,0,4,3,3,3,2,1,0,3,0,0,3,3), +(0,4,0,3,0,3,0,3,0,3,5,5,3,3,3,3,4,3,4,3,3,3,4,4,4,3,3,3,3,4,3,5,3,3,1,3,2,4,5,5,5,5,4,3,4,5,5,3,2,2,3,3,3,3,2,3,3,1,2,3,2,4,3,3,3,4,0,4,0,2,0,4,3,2,2,1,2,0,3,0,0,4,1), +) + +class JapaneseContextAnalysis(object): + NUM_OF_CATEGORY = 6 + DONT_KNOW = -1 + ENOUGH_REL_THRESHOLD = 100 + MAX_REL_THRESHOLD = 1000 + MINIMUM_DATA_THRESHOLD = 4 + + def __init__(self): + self._total_rel = None + self._rel_sample = None + self._need_to_skip_char_num = None + self._last_char_order = None + self._done = None + self.reset() + + def reset(self): + self._total_rel = 0 # total sequence received + # category counters, each integer counts sequence in its category + self._rel_sample = [0] * self.NUM_OF_CATEGORY + # if last byte in current buffer is not the last byte of a character, + # we need to know how many bytes to skip in next buffer + self._need_to_skip_char_num = 0 + self._last_char_order = -1 # The order of previous char + # If this flag is set to True, detection is done and conclusion has + # been made + self._done = False + + def feed(self, byte_str, num_bytes): + if self._done: + return + + # The buffer we got is byte oriented, and a character may span in more than one + # buffers. In case the last one or two byte in last buffer is not + # complete, we record how many byte needed to complete that character + # and skip these bytes here. We can choose to record those bytes as + # well and analyse the character once it is complete, but since a + # character will not make much difference, by simply skipping + # this character will simply our logic and improve performance. + i = self._need_to_skip_char_num + while i < num_bytes: + order, char_len = self.get_order(byte_str[i:i + 2]) + i += char_len + if i > num_bytes: + self._need_to_skip_char_num = i - num_bytes + self._last_char_order = -1 + else: + if (order != -1) and (self._last_char_order != -1): + self._total_rel += 1 + if self._total_rel > self.MAX_REL_THRESHOLD: + self._done = True + break + self._rel_sample[jp2CharContext[self._last_char_order][order]] += 1 + self._last_char_order = order + + def got_enough_data(self): + return self._total_rel > self.ENOUGH_REL_THRESHOLD + + def get_confidence(self): + # This is just one way to calculate confidence. It works well for me. + if self._total_rel > self.MINIMUM_DATA_THRESHOLD: + return (self._total_rel - self._rel_sample[0]) / self._total_rel + else: + return self.DONT_KNOW + + def get_order(self, byte_str): + return -1, 1 + +class SJISContextAnalysis(JapaneseContextAnalysis): + def __init__(self): + super(SJISContextAnalysis, self).__init__() + self._charset_name = "SHIFT_JIS" + + @property + def charset_name(self): + return self._charset_name + + def get_order(self, byte_str): + if not byte_str: + return -1, 1 + # find out current char's byte length + first_char = byte_str[0] + if (0x81 <= first_char <= 0x9F) or (0xE0 <= first_char <= 0xFC): + char_len = 2 + if (first_char == 0x87) or (0xFA <= first_char <= 0xFC): + self._charset_name = "CP932" + else: + char_len = 1 + + # return its order if it is hiragana + if len(byte_str) > 1: + second_char = byte_str[1] + if (first_char == 202) and (0x9F <= second_char <= 0xF1): + return second_char - 0x9F, char_len + + return -1, char_len + +class EUCJPContextAnalysis(JapaneseContextAnalysis): + def get_order(self, byte_str): + if not byte_str: + return -1, 1 + # find out current char's byte length + first_char = byte_str[0] + if (first_char == 0x8E) or (0xA1 <= first_char <= 0xFE): + char_len = 2 + elif first_char == 0x8F: + char_len = 3 + else: + char_len = 1 + + # return its order if it is hiragana + if len(byte_str) > 1: + second_char = byte_str[1] + if (first_char == 0xA4) and (0xA1 <= second_char <= 0xF3): + return second_char - 0xA1, char_len + + return -1, char_len + + diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/jpcntx.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/jpcntx.pyc new file mode 100644 index 0000000..a453b1f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/jpcntx.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/langbulgarianmodel.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/langbulgarianmodel.py new file mode 100644 index 0000000..2aa4fb2 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/langbulgarianmodel.py @@ -0,0 +1,228 @@ +######################## BEGIN LICENSE BLOCK ######################## +# The Original Code is Mozilla Communicator client code. +# +# The Initial Developer of the Original Code is +# Netscape Communications Corporation. +# Portions created by the Initial Developer are Copyright (C) 1998 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Mark Pilgrim - port to Python +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + +# 255: Control characters that usually does not exist in any text +# 254: Carriage/Return +# 253: symbol (punctuation) that does not belong to word +# 252: 0 - 9 + +# Character Mapping Table: +# this table is modified base on win1251BulgarianCharToOrderMap, so +# only number <64 is sure valid + +Latin5_BulgarianCharToOrderMap = ( +255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 +255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 +253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 +252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 +253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40 +110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50 +253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60 +116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70 +194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, # 80 +210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225, # 90 + 81,226,227,228,229,230,105,231,232,233,234,235,236, 45,237,238, # a0 + 31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # b0 + 39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,239, 67,240, 60, 56, # c0 + 1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # d0 + 7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,241, 42, 16, # e0 + 62,242,243,244, 58,245, 98,246,247,248,249,250,251, 91,252,253, # f0 +) + +win1251BulgarianCharToOrderMap = ( +255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 +255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 +253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 +252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 +253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40 +110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50 +253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60 +116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70 +206,207,208,209,210,211,212,213,120,214,215,216,217,218,219,220, # 80 +221, 78, 64, 83,121, 98,117,105,222,223,224,225,226,227,228,229, # 90 + 88,230,231,232,233,122, 89,106,234,235,236,237,238, 45,239,240, # a0 + 73, 80,118,114,241,242,243,244,245, 62, 58,246,247,248,249,250, # b0 + 31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # c0 + 39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,251, 67,252, 60, 56, # d0 + 1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # e0 + 7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,253, 42, 16, # f0 +) + +# Model Table: +# total sequences: 100% +# first 512 sequences: 96.9392% +# first 1024 sequences:3.0618% +# rest sequences: 0.2992% +# negative sequences: 0.0020% +BulgarianLangModel = ( +0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,3,3,3,3,3, +3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,2,2,1,2,2, +3,1,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,0,1, +0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, +3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,3,3,0,3,1,0, +0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, +3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0, +0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0, +0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,3,3,3,3,3,3,3,3,3,3,2,3,2,2,1,3,3,3,3,2,2,2,1,1,2,0,1,0,1,0,0, +0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1, +3,3,3,3,3,3,3,2,3,2,2,3,3,1,1,2,3,3,2,3,3,3,3,2,1,2,0,2,0,3,0,0, +0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1, +3,3,3,3,3,3,3,1,3,3,3,3,3,2,3,2,3,3,3,3,3,2,3,3,1,3,0,3,0,2,0,0, +0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1, +3,3,3,3,3,3,3,3,1,3,3,2,3,3,3,1,3,3,2,3,2,2,2,0,0,2,0,2,0,2,0,0, +0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1, +3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,3,3,1,2,2,3,2,1,1,2,0,2,0,0,0,0, +1,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1, +3,3,3,3,3,3,3,2,3,3,1,2,3,2,2,2,3,3,3,3,3,2,2,3,1,2,0,2,1,2,0,0, +0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1, +3,3,3,3,3,1,3,3,3,3,3,2,3,3,3,2,3,3,2,3,2,2,2,3,1,2,0,1,0,1,0,0, +0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1, +3,3,3,3,3,3,3,3,3,3,3,1,1,1,2,2,1,3,1,3,2,2,3,0,0,1,0,1,0,1,0,0, +0,0,0,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1, +3,3,3,3,3,2,2,3,2,2,3,1,2,1,1,1,2,3,1,3,1,2,2,0,1,1,1,1,0,1,0,0, +0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1, +3,3,3,3,3,1,3,2,2,3,3,1,2,3,1,1,3,3,3,3,1,2,2,1,1,1,0,2,0,2,0,1, +0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1, +3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,2,2,3,3,3,2,2,1,1,2,0,2,0,1,0,0, +0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1, +3,0,1,2,1,3,3,2,3,3,3,3,3,2,3,2,1,0,3,1,2,1,2,1,2,3,2,1,0,1,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +1,1,1,2,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,1,3,3,2,3,3,2,2,2,0,1,0,0, +0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +2,3,3,3,3,0,3,3,3,3,3,2,1,1,2,1,3,3,0,3,1,1,1,1,3,2,0,1,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1, +3,3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,1,1,3,1,3,3,2,3,2,2,2,3,0,2,0,0, +0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,3,3,3,3,2,3,3,2,2,3,2,1,1,1,1,1,3,1,3,1,1,0,0,0,1,0,0,0,1,0,0, +0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0, +3,3,3,3,3,2,3,2,0,3,2,0,3,0,2,0,0,2,1,3,1,0,0,1,0,0,0,1,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, +3,3,3,3,2,1,1,1,1,2,1,1,2,1,1,1,2,2,1,2,1,1,1,0,1,1,0,1,0,1,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1, +3,3,3,3,2,1,3,1,1,2,1,3,2,1,1,0,1,2,3,2,1,1,1,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +2,3,3,3,3,2,2,1,0,1,0,0,1,0,0,0,2,1,0,3,0,0,1,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, +3,3,3,2,3,2,3,3,1,3,2,1,1,1,2,1,1,2,1,3,0,1,0,0,0,1,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,1,1,2,2,3,3,2,3,2,2,2,3,1,2,2,1,1,2,1,1,2,2,0,1,1,0,1,0,2,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,3,3,3,2,1,3,1,0,2,2,1,3,2,1,0,0,2,0,2,0,1,0,0,0,0,0,0,0,1,0,0, +0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1, +3,3,3,3,3,3,1,2,0,2,3,1,2,3,2,0,1,3,1,2,1,1,1,0,0,1,0,0,2,2,2,3, +2,2,2,2,1,2,1,1,2,2,1,1,2,0,1,1,1,0,0,1,1,0,0,1,1,0,0,0,1,1,0,1, +3,3,3,3,3,2,1,2,2,1,2,0,2,0,1,0,1,2,1,2,1,1,0,0,0,1,0,1,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1, +3,3,2,3,3,1,1,3,1,0,3,2,1,0,0,0,1,2,0,2,0,1,0,0,0,1,0,1,2,1,2,2, +1,1,1,1,1,1,1,2,2,2,1,1,1,1,1,1,1,0,1,2,1,1,1,0,0,0,0,0,1,1,0,0, +3,1,0,1,0,2,3,2,2,2,3,2,2,2,2,2,1,0,2,1,2,1,1,1,0,1,2,1,2,2,2,1, +1,1,2,2,2,2,1,2,1,1,0,1,2,1,2,2,2,1,1,1,0,1,1,1,1,2,0,1,0,0,0,0, +2,3,2,3,3,0,0,2,1,0,2,1,0,0,0,0,2,3,0,2,0,0,0,0,0,1,0,0,2,0,1,2, +2,1,2,1,2,2,1,1,1,2,1,1,1,0,1,2,2,1,1,1,1,1,0,1,1,1,0,0,1,2,0,0, +3,3,2,2,3,0,2,3,1,1,2,0,0,0,1,0,0,2,0,2,0,0,0,1,0,1,0,1,2,0,2,2, +1,1,1,1,2,1,0,1,2,2,2,1,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,1,0,0, +2,3,2,3,3,0,0,3,0,1,1,0,1,0,0,0,2,2,1,2,0,0,0,0,0,0,0,0,2,0,1,2, +2,2,1,1,1,1,1,2,2,2,1,0,2,0,1,0,1,0,0,1,0,1,0,0,1,0,0,0,0,1,0,0, +3,3,3,3,2,2,2,2,2,0,2,1,1,1,1,2,1,2,1,1,0,2,0,1,0,1,0,0,2,0,1,2, +1,1,1,1,1,1,1,2,2,1,1,0,2,0,1,0,2,0,0,1,1,1,0,0,2,0,0,0,1,1,0,0, +2,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,0,0,0,1,2,0,1,2, +2,2,2,1,1,2,1,1,2,2,2,1,2,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,1,1,0,0, +2,3,3,3,3,0,2,2,0,2,1,0,0,0,1,1,1,2,0,2,0,0,0,3,0,0,0,0,2,0,2,2, +1,1,1,2,1,2,1,1,2,2,2,1,2,0,1,1,1,0,1,1,1,1,0,2,1,0,0,0,1,1,0,0, +2,3,3,3,3,0,2,1,0,0,2,0,0,0,0,0,1,2,0,2,0,0,0,0,0,0,0,0,2,0,1,2, +1,1,1,2,1,1,1,1,2,2,2,0,1,0,1,1,1,0,0,1,1,1,0,0,1,0,0,0,0,1,0,0, +3,3,2,2,3,0,1,0,1,0,0,0,0,0,0,0,1,1,0,3,0,0,0,0,0,0,0,0,1,0,2,2, +1,1,1,1,1,2,1,1,2,2,1,2,2,1,0,1,1,1,1,1,0,1,0,0,1,0,0,0,1,1,0,0, +3,1,0,1,0,2,2,2,2,3,2,1,1,1,2,3,0,0,1,0,2,1,1,0,1,1,1,1,2,1,1,1, +1,2,2,1,2,1,2,2,1,1,0,1,2,1,2,2,1,1,1,0,0,1,1,1,2,1,0,1,0,0,0,0, +2,1,0,1,0,3,1,2,2,2,2,1,2,2,1,1,1,0,2,1,2,2,1,1,2,1,1,0,2,1,1,1, +1,2,2,2,2,2,2,2,1,2,0,1,1,0,2,1,1,1,1,1,0,0,1,1,1,1,0,1,0,0,0,0, +2,1,1,1,1,2,2,2,2,1,2,2,2,1,2,2,1,1,2,1,2,3,2,2,1,1,1,1,0,1,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +2,2,2,3,2,0,1,2,0,1,2,1,1,0,1,0,1,2,1,2,0,0,0,1,1,0,0,0,1,0,0,2, +1,1,0,0,1,1,0,1,1,1,1,0,2,0,1,1,1,0,0,1,1,0,0,0,0,1,0,0,0,1,0,0, +2,0,0,0,0,1,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,2,1,1,1, +1,2,2,2,2,1,1,2,1,2,1,1,1,0,2,1,2,1,1,1,0,2,1,1,1,1,0,1,0,0,0,0, +3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0, +1,1,0,1,0,1,1,1,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +2,2,2,3,2,0,0,0,0,1,0,0,0,0,0,0,1,1,0,2,0,0,0,0,0,0,0,0,1,0,1,2, +1,1,1,1,1,1,0,0,2,2,2,2,2,0,1,1,0,1,1,1,1,1,0,0,1,0,0,0,1,1,0,1, +2,3,1,2,1,0,1,1,0,2,2,2,0,0,1,0,0,1,1,1,1,0,0,0,0,0,0,0,1,0,1,2, +1,1,1,1,2,1,1,1,1,1,1,1,1,0,1,1,0,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0, +2,2,2,2,2,0,0,2,0,0,2,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,0,2,2, +1,1,1,1,1,0,0,1,2,1,1,0,1,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0, +1,2,2,2,2,0,0,2,0,1,1,0,0,0,1,0,0,2,0,2,0,0,0,0,0,0,0,0,0,0,1,1, +0,0,0,1,1,1,1,1,1,1,1,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0, +1,2,2,3,2,0,0,1,0,0,1,0,0,0,0,0,0,1,0,2,0,0,0,1,0,0,0,0,0,0,0,2, +1,1,0,0,1,0,0,0,1,1,0,0,1,0,1,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0, +2,1,2,2,2,1,2,1,2,2,1,1,2,1,1,1,0,1,1,1,1,2,0,1,0,1,1,1,1,0,1,1, +1,1,2,1,1,1,1,1,1,0,0,1,2,1,1,1,1,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0, +1,0,0,1,3,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +2,2,2,2,1,0,0,1,0,2,0,0,0,0,0,1,1,1,0,1,0,0,0,0,0,0,0,0,2,0,0,1, +0,2,0,1,0,0,1,1,2,0,1,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0, +1,2,2,2,2,0,1,1,0,2,1,0,1,1,1,0,0,1,0,2,0,1,0,0,0,0,0,0,0,0,0,1, +0,1,0,0,1,0,0,0,1,1,0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0, +2,2,2,2,2,0,0,1,0,0,0,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1, +0,1,0,1,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0, +2,0,1,0,0,1,2,1,1,1,1,1,1,2,2,1,0,0,1,0,1,0,0,0,0,1,1,1,1,0,0,0, +1,1,2,1,1,1,1,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +2,2,1,2,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1, +0,0,0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +1,0,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0, +0,1,1,0,1,1,1,0,0,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0, +1,0,1,0,0,1,1,1,1,1,1,1,1,1,1,1,0,0,1,0,2,0,0,2,0,1,0,0,1,0,0,1, +1,1,0,0,1,1,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0, +1,1,1,1,1,1,1,2,0,0,0,0,0,0,2,1,0,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0, +2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,1,1,0,1,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, +) + +Latin5BulgarianModel = { + 'char_to_order_map': Latin5_BulgarianCharToOrderMap, + 'precedence_matrix': BulgarianLangModel, + 'typical_positive_ratio': 0.969392, + 'keep_english_letter': False, + 'charset_name': "ISO-8859-5", + 'language': 'Bulgairan', +} + +Win1251BulgarianModel = { + 'char_to_order_map': win1251BulgarianCharToOrderMap, + 'precedence_matrix': BulgarianLangModel, + 'typical_positive_ratio': 0.969392, + 'keep_english_letter': False, + 'charset_name': "windows-1251", + 'language': 'Bulgarian', +} diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/langbulgarianmodel.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/langbulgarianmodel.pyc new file mode 100644 index 0000000..242ed08 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/langbulgarianmodel.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/langcyrillicmodel.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/langcyrillicmodel.py new file mode 100644 index 0000000..e5f9a1f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/langcyrillicmodel.py @@ -0,0 +1,333 @@ +######################## BEGIN LICENSE BLOCK ######################## +# The Original Code is Mozilla Communicator client code. +# +# The Initial Developer of the Original Code is +# Netscape Communications Corporation. +# Portions created by the Initial Developer are Copyright (C) 1998 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Mark Pilgrim - port to Python +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + +# KOI8-R language model +# Character Mapping Table: +KOI8R_char_to_order_map = ( +255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 +255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 +253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 +252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 +253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40 +155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50 +253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60 + 67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70 +191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, # 80 +207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, # 90 +223,224,225, 68,226,227,228,229,230,231,232,233,234,235,236,237, # a0 +238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, # b0 + 27, 3, 21, 28, 13, 2, 39, 19, 26, 4, 23, 11, 8, 12, 5, 1, # c0 + 15, 16, 9, 7, 6, 14, 24, 10, 17, 18, 20, 25, 30, 29, 22, 54, # d0 + 59, 37, 44, 58, 41, 48, 53, 46, 55, 42, 60, 36, 49, 38, 31, 34, # e0 + 35, 43, 45, 32, 40, 52, 56, 33, 61, 62, 51, 57, 47, 63, 50, 70, # f0 +) + +win1251_char_to_order_map = ( +255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 +255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 +253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 +252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 +253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40 +155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50 +253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60 + 67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70 +191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, +207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, +223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238, +239,240,241,242,243,244,245,246, 68,247,248,249,250,251,252,253, + 37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35, + 45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43, + 3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15, + 9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16, +) + +latin5_char_to_order_map = ( +255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 +255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 +253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 +252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 +253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40 +155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50 +253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60 + 67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70 +191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, +207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, +223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238, + 37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35, + 45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43, + 3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15, + 9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16, +239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255, +) + +macCyrillic_char_to_order_map = ( +255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 +255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 +253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 +252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 +253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40 +155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50 +253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60 + 67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70 + 37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35, + 45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43, +191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, +207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, +223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238, +239,240,241,242,243,244,245,246,247,248,249,250,251,252, 68, 16, + 3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15, + 9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27,255, +) + +IBM855_char_to_order_map = ( +255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 +255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 +253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 +252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 +253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40 +155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50 +253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60 + 67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70 +191,192,193,194, 68,195,196,197,198,199,200,201,202,203,204,205, +206,207,208,209,210,211,212,213,214,215,216,217, 27, 59, 54, 70, + 3, 37, 21, 44, 28, 58, 13, 41, 2, 48, 39, 53, 19, 46,218,219, +220,221,222,223,224, 26, 55, 4, 42,225,226,227,228, 23, 60,229, +230,231,232,233,234,235, 11, 36,236,237,238,239,240,241,242,243, + 8, 49, 12, 38, 5, 31, 1, 34, 15,244,245,246,247, 35, 16,248, + 43, 9, 45, 7, 32, 6, 40, 14, 52, 24, 56, 10, 33, 17, 61,249, +250, 18, 62, 20, 51, 25, 57, 30, 47, 29, 63, 22, 50,251,252,255, +) + +IBM866_char_to_order_map = ( +255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 +255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 +253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 +252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 +253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40 +155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50 +253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60 + 67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70 + 37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35, + 45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43, + 3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15, +191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, +207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, +223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238, + 9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16, +239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255, +) + +# Model Table: +# total sequences: 100% +# first 512 sequences: 97.6601% +# first 1024 sequences: 2.3389% +# rest sequences: 0.1237% +# negative sequences: 0.0009% +RussianLangModel = ( +0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,1,3,3,3,3,1,3,3,3,2,3,2,3,3, +3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,2,2,2,2,2,0,0,2, +3,3,3,2,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,2,3,2,0, +0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,3,3,2,2,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,2,3,3,1,0, +0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,2,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1, +0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1, +0,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,3,3,3,3,3,3,3,2,2,2,3,1,3,3,1,3,3,3,3,2,2,3,0,2,2,2,3,3,2,1,0, +0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0, +3,3,3,3,3,3,2,3,3,3,3,3,2,2,3,2,3,3,3,2,1,2,2,0,1,2,2,2,2,2,2,0, +0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0, +3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,3,0,2,2,3,3,2,1,2,0, +0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0, +3,3,3,3,3,3,2,3,3,1,2,3,2,2,3,2,3,3,3,3,2,2,3,0,3,2,2,3,1,1,1,0, +0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,3,3,3,3,2,2,2,0,3,3,3,2,2,2,2,0, +0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,2,3,2,2,0,1,3,2,1,2,2,1,0, +0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0, +3,3,3,3,3,3,3,3,3,3,3,2,1,1,3,0,1,1,1,1,2,1,1,0,2,2,2,1,2,0,1,0, +0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,3,3,3,3,3,2,3,3,2,2,2,2,1,3,2,3,2,3,2,1,2,2,0,1,1,2,1,2,1,2,0, +0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,2,3,3,3,2,2,2,2,0,2,2,2,2,3,1,1,0, +0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0, +3,2,3,2,2,3,3,3,3,3,3,3,3,3,1,3,2,0,0,3,3,3,3,2,3,3,3,3,2,3,2,0, +0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +2,3,3,3,3,3,2,2,3,3,0,2,1,0,3,2,3,2,3,0,0,1,2,0,0,1,0,1,2,1,1,0, +0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,0,3,0,2,3,3,3,3,2,3,3,3,3,1,2,2,0,0,2,3,2,2,2,3,2,3,2,2,3,0,0, +0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,2,3,0,2,3,2,3,0,1,2,3,3,2,0,2,3,0,0,2,3,2,2,0,1,3,1,3,2,2,1,0, +0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,1,3,0,2,3,3,3,3,3,3,3,3,2,1,3,2,0,0,2,2,3,3,3,2,3,3,0,2,2,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,3,3,3,3,3,2,2,3,3,2,2,2,3,3,0,0,1,1,1,1,1,2,0,0,1,1,1,1,0,1,0, +0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,3,3,3,3,3,2,2,3,3,3,3,3,3,3,0,3,2,3,3,2,3,2,0,2,1,0,1,1,0,1,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0, +3,3,3,3,3,3,2,3,3,3,2,2,2,2,3,1,3,2,3,1,1,2,1,0,2,2,2,2,1,3,1,0, +0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0, +2,2,3,3,3,3,3,1,2,2,1,3,1,0,3,0,0,3,0,0,0,1,1,0,1,2,1,0,0,0,0,0, +0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,2,2,1,1,3,3,3,2,2,1,2,2,3,1,1,2,0,0,2,2,1,3,0,0,2,1,1,2,1,1,0, +0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,2,3,3,3,3,1,2,2,2,1,2,1,3,3,1,1,2,1,2,1,2,2,0,2,0,0,1,1,0,1,0, +0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +2,3,3,3,3,3,2,1,3,2,2,3,2,0,3,2,0,3,0,1,0,1,1,0,0,1,1,1,1,0,1,0, +0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,3,2,3,3,3,2,2,2,3,3,1,2,1,2,1,0,1,0,1,1,0,1,0,0,2,1,1,1,0,1,0, +0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0, +3,1,1,2,1,2,3,3,2,2,1,2,2,3,0,2,1,0,0,2,2,3,2,1,2,2,2,2,2,3,1,0, +0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,3,3,3,3,1,1,0,1,1,2,2,1,1,3,0,0,1,3,1,1,1,0,0,0,1,0,1,1,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +2,1,3,3,3,2,0,0,0,2,1,0,1,0,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +2,0,1,0,0,2,3,2,2,2,1,2,2,2,1,2,1,0,0,1,1,1,0,2,0,1,1,1,0,0,1,1, +1,0,0,0,0,0,1,2,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0, +2,3,3,3,3,0,0,0,0,1,0,0,0,0,3,0,1,2,1,0,0,0,0,0,0,0,1,1,0,0,1,1, +1,0,1,0,1,2,0,0,1,1,2,1,0,1,1,1,1,0,1,1,1,1,0,1,0,0,1,0,0,1,1,0, +2,2,3,2,2,2,3,1,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,0,1,0,1,1,1,0,2,1, +1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,0,1,1,0, +3,3,3,2,2,2,2,3,2,2,1,1,2,2,2,2,1,1,3,1,2,1,2,0,0,1,1,0,1,0,2,1, +1,1,1,1,1,2,1,0,1,1,1,1,0,1,0,0,1,1,0,0,1,0,1,0,0,1,0,0,0,1,1,0, +2,0,0,1,0,3,2,2,2,2,1,2,1,2,1,2,0,0,0,2,1,2,2,1,1,2,2,0,1,1,0,2, +1,1,1,1,1,0,1,1,1,2,1,1,1,2,1,0,1,2,1,1,1,1,0,1,1,1,0,0,1,0,0,1, +1,3,2,2,2,1,1,1,2,3,0,0,0,0,2,0,2,2,1,0,0,0,0,0,0,1,0,0,0,0,1,1, +1,0,1,1,0,1,0,1,1,0,1,1,0,2,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0, +2,3,2,3,2,1,2,2,2,2,1,0,0,0,2,0,0,1,1,0,0,0,0,0,0,0,1,1,0,0,2,1, +1,1,2,1,0,2,0,0,1,0,1,0,0,1,0,0,1,1,0,1,1,0,0,0,0,0,1,0,0,0,0,0, +3,0,0,1,0,2,2,2,3,2,2,2,2,2,2,2,0,0,0,2,1,2,1,1,1,2,2,0,0,0,1,2, +1,1,1,1,1,0,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,1,0,1,1,1,1,1,1,0,0,1, +2,3,2,3,3,2,0,1,1,1,0,0,1,0,2,0,1,1,3,1,0,0,0,0,0,0,0,1,0,0,2,1, +1,1,1,1,1,1,1,0,1,0,1,1,1,1,0,1,1,1,0,0,1,1,0,1,0,0,0,0,0,0,1,0, +2,3,3,3,3,1,2,2,2,2,0,1,1,0,2,1,1,1,2,1,0,1,1,0,0,1,0,1,0,0,2,0, +0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +2,3,3,3,2,0,0,1,1,2,2,1,0,0,2,0,1,1,3,0,0,1,0,0,0,0,0,1,0,1,2,1, +1,1,2,0,1,1,1,0,1,0,1,1,0,1,0,1,1,1,1,0,1,0,0,0,0,0,0,1,0,1,1,0, +1,3,2,3,2,1,0,0,2,2,2,0,1,0,2,0,1,1,1,0,1,0,0,0,3,0,1,1,0,0,2,1, +1,1,1,0,1,1,0,0,0,0,1,1,0,1,0,0,2,1,1,0,1,0,0,0,1,0,1,0,0,1,1,0, +3,1,2,1,1,2,2,2,2,2,2,1,2,2,1,1,0,0,0,2,2,2,0,0,0,1,2,1,0,1,0,1, +2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,2,1,1,1,0,1,0,1,1,0,1,1,1,0,0,1, +3,0,0,0,0,2,0,1,1,1,1,1,1,1,0,1,0,0,0,1,1,1,0,1,0,1,1,0,0,1,0,1, +1,1,0,0,1,0,0,0,1,0,1,1,0,0,1,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,1, +1,3,3,2,2,0,0,0,2,2,0,0,0,1,2,0,1,1,2,0,0,0,0,0,0,0,0,1,0,0,2,1, +0,1,1,0,0,1,1,0,0,0,1,1,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0, +2,3,2,3,2,0,0,0,0,1,1,0,0,0,2,0,2,0,2,0,0,0,0,0,1,0,0,1,0,0,1,1, +1,1,2,0,1,2,1,0,1,1,2,1,1,1,1,1,2,1,1,0,1,0,0,1,1,1,1,1,0,1,1,0, +1,3,2,2,2,1,0,0,2,2,1,0,1,2,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,1,1, +0,0,1,1,0,1,1,0,0,1,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0, +1,0,0,1,0,2,3,1,2,2,2,2,2,2,1,1,0,0,0,1,0,1,0,2,1,1,1,0,0,0,0,1, +1,1,0,1,1,0,1,1,1,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0, +2,0,2,0,0,1,0,3,2,1,2,1,2,2,0,1,0,0,0,2,1,0,0,2,1,1,1,1,0,2,0,2, +2,1,1,1,1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,0,0,0,1,1,1,1,0,1,0,0,1, +1,2,2,2,2,1,0,0,1,0,0,0,0,0,2,0,1,1,1,1,0,0,0,0,1,0,1,2,0,0,2,0, +1,0,1,1,1,2,1,0,1,0,1,1,0,0,1,0,1,1,1,0,1,0,0,0,1,0,0,1,0,1,1,0, +2,1,2,2,2,0,3,0,1,1,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1, +0,0,0,1,1,1,0,0,1,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0, +1,2,2,3,2,2,0,0,1,1,2,0,1,2,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1, +0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0, +2,2,1,1,2,1,2,2,2,2,2,1,2,2,0,1,0,0,0,1,2,2,2,1,2,1,1,1,1,1,2,1, +1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,0,1, +1,2,2,2,2,0,1,0,2,2,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0, +0,0,1,0,0,1,0,0,0,0,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0, +0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +1,2,2,2,2,0,0,0,2,2,2,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1, +0,1,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +1,2,2,2,2,0,0,0,0,1,0,0,1,1,2,0,0,0,0,1,0,1,0,0,1,0,0,2,0,0,0,1, +0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0, +1,2,2,2,1,1,2,0,2,1,1,1,1,0,2,2,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,1, +0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0, +1,0,2,1,2,0,0,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0, +0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0, +1,0,0,0,0,2,0,1,2,1,0,1,1,1,0,1,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,1, +0,0,0,0,0,1,0,0,1,1,0,0,1,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1, +2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, +1,0,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0, +2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, +1,1,1,0,1,0,1,0,0,1,1,1,1,0,0,0,1,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0, +1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, +1,1,0,1,1,0,1,0,1,0,0,0,0,1,1,0,1,1,0,0,0,0,0,1,0,1,1,0,1,0,0,0, +0,1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0, +) + +Koi8rModel = { + 'char_to_order_map': KOI8R_char_to_order_map, + 'precedence_matrix': RussianLangModel, + 'typical_positive_ratio': 0.976601, + 'keep_english_letter': False, + 'charset_name': "KOI8-R", + 'language': 'Russian', +} + +Win1251CyrillicModel = { + 'char_to_order_map': win1251_char_to_order_map, + 'precedence_matrix': RussianLangModel, + 'typical_positive_ratio': 0.976601, + 'keep_english_letter': False, + 'charset_name': "windows-1251", + 'language': 'Russian', +} + +Latin5CyrillicModel = { + 'char_to_order_map': latin5_char_to_order_map, + 'precedence_matrix': RussianLangModel, + 'typical_positive_ratio': 0.976601, + 'keep_english_letter': False, + 'charset_name': "ISO-8859-5", + 'language': 'Russian', +} + +MacCyrillicModel = { + 'char_to_order_map': macCyrillic_char_to_order_map, + 'precedence_matrix': RussianLangModel, + 'typical_positive_ratio': 0.976601, + 'keep_english_letter': False, + 'charset_name': "MacCyrillic", + 'language': 'Russian', +} + +Ibm866Model = { + 'char_to_order_map': IBM866_char_to_order_map, + 'precedence_matrix': RussianLangModel, + 'typical_positive_ratio': 0.976601, + 'keep_english_letter': False, + 'charset_name': "IBM866", + 'language': 'Russian', +} + +Ibm855Model = { + 'char_to_order_map': IBM855_char_to_order_map, + 'precedence_matrix': RussianLangModel, + 'typical_positive_ratio': 0.976601, + 'keep_english_letter': False, + 'charset_name': "IBM855", + 'language': 'Russian', +} diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/langcyrillicmodel.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/langcyrillicmodel.pyc new file mode 100644 index 0000000..4ceb904 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/langcyrillicmodel.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/langgreekmodel.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/langgreekmodel.py new file mode 100644 index 0000000..5332221 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/langgreekmodel.py @@ -0,0 +1,225 @@ +######################## BEGIN LICENSE BLOCK ######################## +# The Original Code is Mozilla Communicator client code. +# +# The Initial Developer of the Original Code is +# Netscape Communications Corporation. +# Portions created by the Initial Developer are Copyright (C) 1998 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Mark Pilgrim - port to Python +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + +# 255: Control characters that usually does not exist in any text +# 254: Carriage/Return +# 253: symbol (punctuation) that does not belong to word +# 252: 0 - 9 + +# Character Mapping Table: +Latin7_char_to_order_map = ( +255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 +255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 +253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 +252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 +253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40 + 79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50 +253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60 + 78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70 +255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80 +255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90 +253,233, 90,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0 +253,253,253,253,247,248, 61, 36, 46, 71, 73,253, 54,253,108,123, # b0 +110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0 + 35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0 +124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0 + 9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0 +) + +win1253_char_to_order_map = ( +255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 +255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 +253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 +252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 +253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40 + 79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50 +253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60 + 78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70 +255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80 +255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90 +253,233, 61,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0 +253,253,253,253,247,253,253, 36, 46, 71, 73,253, 54,253,108,123, # b0 +110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0 + 35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0 +124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0 + 9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0 +) + +# Model Table: +# total sequences: 100% +# first 512 sequences: 98.2851% +# first 1024 sequences:1.7001% +# rest sequences: 0.0359% +# negative sequences: 0.0148% +GreekLangModel = ( +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,3,2,2,3,3,3,3,3,3,3,3,1,3,3,3,0,2,2,3,3,0,3,0,3,2,0,3,3,3,0, +3,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,3,3,3,3,3,0,3,3,0,3,2,3,3,0,3,2,3,3,3,0,0,3,0,3,0,3,3,2,0,0,0, +2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0, +0,2,3,2,2,3,3,3,3,3,3,3,3,0,3,3,3,3,0,2,3,3,0,3,3,3,3,2,3,3,3,0, +2,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,2,1,3,3,3,3,2,3,3,2,3,3,2,0, +0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,2,3,3,0, +2,0,1,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0, +0,3,3,3,3,3,2,3,0,0,0,0,3,3,0,3,1,3,3,3,0,3,3,0,3,3,3,3,0,0,0,0, +2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,3,3,3,3,3,0,3,0,3,3,3,3,3,0,3,2,2,2,3,0,2,3,3,3,3,3,2,3,3,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,3,3,3,3,3,3,2,2,2,3,3,3,3,0,3,1,3,3,3,3,2,3,3,3,3,3,3,3,2,2,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,3,3,3,3,3,2,0,3,0,0,0,3,3,2,3,3,3,3,3,0,0,3,2,3,0,2,3,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,3,0,3,3,3,3,0,0,3,3,0,2,3,0,3,0,3,3,3,0,0,3,0,3,0,2,2,3,3,0,0, +0,0,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,3,3,3,3,3,2,0,3,2,3,3,3,3,0,3,3,3,3,3,0,3,3,2,3,2,3,3,2,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,3,3,2,3,2,3,3,3,3,3,3,0,2,3,2,3,2,2,2,3,2,3,3,2,3,0,2,2,2,3,0, +2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,3,0,0,0,3,3,3,2,3,3,0,0,3,0,3,0,0,0,3,2,0,3,0,3,0,0,2,0,2,0, +0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,0,0,0,3,3,0,3,3,3,0,0,1,2,3,0, +3,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,3,3,3,3,3,2,0,0,3,2,2,3,3,0,3,3,3,3,3,2,1,3,0,3,2,3,3,2,1,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,3,3,0,2,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,3,0,3,2,3,0,0,3,3,3,0, +3,0,0,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,3,3,3,3,0,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,2,0,3,2,3,0,0,3,2,3,0, +2,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,3,1,2,2,3,3,3,3,3,3,0,2,3,0,3,0,0,0,3,3,0,3,0,2,0,0,2,3,1,0, +2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,3,0,3,3,3,3,0,3,0,3,3,2,3,0,3,3,3,3,3,3,0,3,3,3,0,2,3,0,0,3,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,3,0,3,3,3,0,0,3,0,0,0,3,3,0,3,0,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,3,0,0,0,3,3,3,3,3,3,0,0,3,0,2,0,0,0,3,3,0,3,0,3,0,0,2,0,2,0, +0,0,0,0,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,3,3,3,3,3,3,0,3,0,2,0,3,2,0,3,2,3,2,3,0,0,3,2,3,2,3,3,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,3,0,0,2,3,3,3,3,3,0,0,0,3,0,2,1,0,0,3,2,2,2,0,3,0,0,2,2,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,3,0,3,3,3,2,0,3,0,3,0,3,3,0,2,1,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,2,3,3,3,0,3,3,3,3,3,3,0,2,3,0,3,0,0,0,2,1,0,2,2,3,0,0,2,2,2,0, +0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,3,0,0,2,3,3,3,2,3,0,0,1,3,0,2,0,0,0,0,3,0,1,0,2,0,0,1,1,1,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,3,3,3,3,3,1,0,3,0,0,0,3,2,0,3,2,3,3,3,0,0,3,0,3,2,2,2,1,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,3,0,3,3,3,0,0,3,0,0,0,0,2,0,2,3,3,2,2,2,2,3,0,2,0,2,2,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,3,3,3,3,2,0,0,0,0,0,0,2,3,0,2,0,2,3,2,0,0,3,0,3,0,3,1,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,3,2,3,3,2,2,3,0,2,0,3,0,0,0,2,0,0,0,0,1,2,0,2,0,2,0, +0,2,0,2,0,2,2,0,0,1,0,2,2,2,0,2,2,2,0,2,2,2,0,0,2,0,0,1,0,0,0,0, +0,2,0,3,3,2,0,0,0,0,0,0,1,3,0,2,0,2,2,2,0,0,2,0,3,0,0,2,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,3,0,2,3,2,0,2,2,0,2,0,2,2,0,2,0,2,2,2,0,0,0,0,0,0,2,3,0,0,0,2, +0,1,2,0,0,0,0,2,2,0,0,0,2,1,0,2,2,0,0,0,0,0,0,1,0,2,0,0,0,0,0,0, +0,0,2,1,0,2,3,2,2,3,2,3,2,0,0,3,3,3,0,0,3,2,0,0,0,1,1,0,2,0,2,2, +0,2,0,2,0,2,2,0,0,2,0,2,2,2,0,2,2,2,2,0,0,2,0,0,0,2,0,1,0,0,0,0, +0,3,0,3,3,2,2,0,3,0,0,0,2,2,0,2,2,2,1,2,0,0,1,2,2,0,0,3,0,0,0,2, +0,1,2,0,0,0,1,2,0,0,0,0,0,0,0,2,2,0,1,0,0,2,0,0,0,2,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,2,3,3,2,2,0,0,0,2,0,2,3,3,0,2,0,0,0,0,0,0,2,2,2,0,2,2,0,2,0,2, +0,2,2,0,0,2,2,2,2,1,0,0,2,2,0,2,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0, +0,2,0,3,2,3,0,0,0,3,0,0,2,2,0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,0,2, +0,0,2,2,0,0,2,2,2,0,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,2,0,0,3,2,0,2,2,2,2,2,0,0,0,2,0,0,0,0,2,0,1,0,0,2,0,1,0,0,0, +0,2,2,2,0,2,2,0,1,2,0,2,2,2,0,2,2,2,2,1,2,2,0,0,2,0,0,0,0,0,0,0, +0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0, +0,2,0,2,0,2,2,0,0,0,0,1,2,1,0,0,2,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,3,2,3,0,0,2,0,0,0,2,2,0,2,0,0,0,1,0,0,2,0,2,0,2,2,0,0,0,0, +0,0,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0, +0,2,2,3,2,2,0,0,0,0,0,0,1,3,0,2,0,2,2,0,0,0,1,0,2,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,2,0,2,0,3,2,0,2,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, +0,0,2,0,0,0,0,1,1,0,0,2,1,2,0,2,2,0,1,0,0,1,0,0,0,2,0,0,0,0,0,0, +0,3,0,2,2,2,0,0,2,0,0,0,2,0,0,0,2,3,0,2,0,0,0,0,0,0,2,2,0,0,0,2, +0,1,2,0,0,0,1,2,2,1,0,0,0,2,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,2,1,2,0,2,2,0,2,0,0,2,0,0,0,0,1,2,1,0,2,1,0,0,0,0,0,0,0,0,0,0, +0,0,2,0,0,0,3,1,2,2,0,2,0,0,0,0,2,0,0,0,2,0,0,3,0,0,0,0,2,2,2,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,2,1,0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,2, +0,2,2,0,0,2,2,2,2,2,0,1,2,0,0,0,2,2,0,1,0,2,0,0,2,2,0,0,0,0,0,0, +0,0,0,0,1,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,0,0,0,0,2,0,2,0,0,0,0,2, +0,1,2,0,0,0,0,2,2,1,0,1,0,1,0,2,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0, +0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,2,0,0,2,2,0,0,0,0,1,0,0,0,0,0,0,2, +0,2,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0, +0,2,2,2,2,0,0,0,3,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,1, +0,0,2,0,0,0,0,1,2,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,0,1,0,0,0,0,0,0, +0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,2,2,2,0,0,0,2,0,0,0,0,0,0,0,0,2, +0,0,1,0,0,0,0,2,1,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0, +0,3,0,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,2, +0,0,2,0,0,0,0,2,2,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,2,0,2,2,1,0,0,0,0,0,0,2,0,0,2,0,2,2,2,0,0,0,0,0,0,2,0,0,0,0,2, +0,0,2,0,0,2,0,2,2,0,0,0,0,2,0,2,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0, +0,0,3,0,0,0,2,2,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,0,0, +0,2,2,2,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1, +0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0, +0,2,0,0,0,2,0,0,0,0,0,1,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,2,0,0,0, +0,2,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,2,0,2,0,0,0, +0,0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,1,2,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +) + +Latin7GreekModel = { + 'char_to_order_map': Latin7_char_to_order_map, + 'precedence_matrix': GreekLangModel, + 'typical_positive_ratio': 0.982851, + 'keep_english_letter': False, + 'charset_name': "ISO-8859-7", + 'language': 'Greek', +} + +Win1253GreekModel = { + 'char_to_order_map': win1253_char_to_order_map, + 'precedence_matrix': GreekLangModel, + 'typical_positive_ratio': 0.982851, + 'keep_english_letter': False, + 'charset_name': "windows-1253", + 'language': 'Greek', +} diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/langgreekmodel.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/langgreekmodel.pyc new file mode 100644 index 0000000..9b4e74d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/langgreekmodel.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/langhebrewmodel.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/langhebrewmodel.py new file mode 100644 index 0000000..58f4c87 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/langhebrewmodel.py @@ -0,0 +1,200 @@ +######################## BEGIN LICENSE BLOCK ######################## +# The Original Code is Mozilla Universal charset detector code. +# +# The Initial Developer of the Original Code is +# Simon Montagu +# Portions created by the Initial Developer are Copyright (C) 2005 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Mark Pilgrim - port to Python +# Shy Shalom - original C code +# Shoshannah Forbes - original C code (?) +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + +# 255: Control characters that usually does not exist in any text +# 254: Carriage/Return +# 253: symbol (punctuation) that does not belong to word +# 252: 0 - 9 + +# Windows-1255 language model +# Character Mapping Table: +WIN1255_CHAR_TO_ORDER_MAP = ( +255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 +255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 +253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 +252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 +253, 69, 91, 79, 80, 92, 89, 97, 90, 68,111,112, 82, 73, 95, 85, # 40 + 78,121, 86, 71, 67,102,107, 84,114,103,115,253,253,253,253,253, # 50 +253, 50, 74, 60, 61, 42, 76, 70, 64, 53,105, 93, 56, 65, 54, 49, # 60 + 66,110, 51, 43, 44, 63, 81, 77, 98, 75,108,253,253,253,253,253, # 70 +124,202,203,204,205, 40, 58,206,207,208,209,210,211,212,213,214, +215, 83, 52, 47, 46, 72, 32, 94,216,113,217,109,218,219,220,221, + 34,116,222,118,100,223,224,117,119,104,125,225,226, 87, 99,227, +106,122,123,228, 55,229,230,101,231,232,120,233, 48, 39, 57,234, + 30, 59, 41, 88, 33, 37, 36, 31, 29, 35,235, 62, 28,236,126,237, +238, 38, 45,239,240,241,242,243,127,244,245,246,247,248,249,250, + 9, 8, 20, 16, 3, 2, 24, 14, 22, 1, 25, 15, 4, 11, 6, 23, + 12, 19, 13, 26, 18, 27, 21, 17, 7, 10, 5,251,252,128, 96,253, +) + +# Model Table: +# total sequences: 100% +# first 512 sequences: 98.4004% +# first 1024 sequences: 1.5981% +# rest sequences: 0.087% +# negative sequences: 0.0015% +HEBREW_LANG_MODEL = ( +0,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,3,2,1,2,0,1,0,0, +3,0,3,1,0,0,1,3,2,0,1,1,2,0,2,2,2,1,1,1,1,2,1,1,1,2,0,0,2,2,0,1, +3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2, +1,2,1,2,1,2,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0, +3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2, +1,2,1,3,1,1,0,0,2,0,0,0,1,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0, +3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,1,2,2,1,3, +1,2,1,1,2,2,0,0,2,2,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,1,0,1,1,0, +3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,2,2,2,3,2, +1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0, +3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,3,2,2,3,2,2,2,1,2,2,2,2, +1,2,1,1,2,2,0,1,2,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0, +3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,0,2,2,2,2,2, +0,2,0,2,2,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0, +3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,0,2,2,2, +0,2,1,2,2,2,0,0,2,1,0,0,0,0,1,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0, +3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,2,1,2,3,2,2,2, +1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,0, +3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,2,0,2, +0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,2,0,0,1,0, +3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,2,2,3,2,1,2,1,1,1, +0,1,1,1,1,1,3,0,1,0,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, +3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,1,1,0,0,1,0,0,1,0,0,0,0, +0,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2, +0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, +3,3,3,3,3,3,3,3,3,2,3,3,3,2,1,2,3,3,2,3,3,3,3,2,3,2,1,2,0,2,1,2, +0,2,0,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0, +3,3,3,3,3,3,3,3,3,2,3,3,3,1,2,2,3,3,2,3,2,3,2,2,3,1,2,2,0,2,2,2, +0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,1,0,0,1,0, +3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,2,2,3,3,3,3,1,3,2,2,2, +0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, +3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,2,3,2,2,2,1,2,2,0,2,2,2,2, +0,2,0,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, +3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,1,3,2,3,3,2,3,3,2,2,1,2,2,2,2,2,2, +0,2,1,2,1,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,1,0, +3,3,3,3,3,3,2,3,2,3,3,2,3,3,3,3,2,3,2,3,3,3,3,3,2,2,2,2,2,2,2,1, +0,2,0,1,2,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0, +3,3,3,3,3,3,3,3,3,2,1,2,3,3,3,3,3,3,3,2,3,2,3,2,1,2,3,0,2,1,2,2, +0,2,1,1,2,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,2,0, +3,3,3,3,3,3,3,3,3,2,3,3,3,3,2,1,3,1,2,2,2,1,2,3,3,1,2,1,2,2,2,2, +0,1,1,1,1,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0, +3,3,3,3,3,3,3,3,3,3,0,2,3,3,3,1,3,3,3,1,2,2,2,2,1,1,2,2,2,2,2,2, +0,2,0,1,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0, +3,3,3,3,3,3,2,3,3,3,2,2,3,3,3,2,1,2,3,2,3,2,2,2,2,1,2,1,1,1,2,2, +0,2,1,1,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, +3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,1,0,0,0,0,0, +1,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,3,3,3,3,2,3,3,2,3,1,2,2,2,2,3,2,3,1,1,2,2,1,2,2,1,1,0,2,2,2,2, +0,1,0,1,2,2,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0, +3,0,0,1,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,2,0, +0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,0,1,0,1,0,1,1,0,1,1,0,0,0,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,0,0,0, +0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,0,0,0,1,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0, +3,2,2,1,2,2,2,2,2,2,2,1,2,2,1,2,2,1,1,1,1,1,1,1,1,2,1,1,0,3,3,3, +0,3,0,2,2,2,2,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, +2,2,2,3,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,2,1,2,2,2,1,1,1,2,0,1, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,2,2,2,2,2,2,0,2,2,0,0,0,0,0,0, +0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +2,3,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,1,0,2,1,0, +0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0, +0,3,1,1,2,2,2,2,2,1,2,2,2,1,1,2,2,2,2,2,2,2,1,2,2,1,0,1,1,1,1,0, +0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,2,1,1,1,1,2,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0, +0,0,2,0,0,0,0,0,0,0,0,1,1,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,1,0,0, +2,1,1,2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,1,2,1,2,1,1,1,1,0,0,0,0, +0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +1,2,1,2,2,2,2,2,2,2,2,2,2,1,2,1,2,1,1,2,1,1,1,2,1,2,1,2,0,1,0,1, +0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,3,1,2,2,2,1,2,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,2,1,2,1,1,0,1,0,1, +0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +2,1,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,2, +0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, +3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +2,1,1,1,1,1,1,1,0,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,2,0,1,1,1,0,1,0,0,0,1,1,0,1,1,0,0,0,0,0,1,1,0,0, +0,1,1,1,2,1,2,2,2,0,2,0,2,0,1,1,2,1,1,1,1,2,1,0,1,1,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0, +1,0,1,0,0,0,0,0,1,0,1,2,2,0,1,0,0,1,1,2,2,1,2,0,2,0,0,0,1,2,0,1, +2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,2,0,2,1,2,0,2,0,0,1,1,1,1,1,1,0,1,0,0,0,1,0,0,1, +2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,1,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,1,2,2,0,0,1,0,0,0,1,0,0,1, +1,1,2,1,0,1,1,1,0,1,0,1,1,1,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,2,1, +0,2,0,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +2,1,0,0,1,0,1,1,1,1,0,0,0,0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,1,0,0,0,1,1,0,1, +2,0,1,0,1,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,1,0,1,1,1,0,1,0,0,1,1,2,1,1,2,0,1,0,0,0,1,1,0,1, +1,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,0,0,2,1,1,2,0,2,0,0,0,1,1,0,1, +1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,2,2,1,2,1,1,0,1,0,0,0,1,1,0,1, +2,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,1,0,1, +1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,2,1,1,1,0,2,1,1,0,0,0,2,1,0,1, +1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,0,2,1,1,0,1,0,0,0,1,1,0,1, +2,2,1,1,1,0,1,1,0,1,1,0,1,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,0,1,2,1,0,2,0,0,0,1,1,0,1, +2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0, +0,1,0,0,2,0,2,1,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,1,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +1,0,0,1,0,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,1,0,1,0,0,1,0,0,0,1,0,0,1, +1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +1,0,0,0,0,0,0,0,1,0,1,1,0,0,1,0,0,2,1,1,1,1,1,0,1,0,0,0,0,1,0,1, +0,1,1,1,2,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,1,2,1,0,0,0,0,0,1,1,1,1,1,0,1,0,0,0,1,1,0,0, +) + +Win1255HebrewModel = { + 'char_to_order_map': WIN1255_CHAR_TO_ORDER_MAP, + 'precedence_matrix': HEBREW_LANG_MODEL, + 'typical_positive_ratio': 0.984004, + 'keep_english_letter': False, + 'charset_name': "windows-1255", + 'language': 'Hebrew', +} diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/langhebrewmodel.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/langhebrewmodel.pyc new file mode 100644 index 0000000..f5a0ad2 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/langhebrewmodel.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/langhungarianmodel.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/langhungarianmodel.py new file mode 100644 index 0000000..bb7c095 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/langhungarianmodel.py @@ -0,0 +1,225 @@ +######################## BEGIN LICENSE BLOCK ######################## +# The Original Code is Mozilla Communicator client code. +# +# The Initial Developer of the Original Code is +# Netscape Communications Corporation. +# Portions created by the Initial Developer are Copyright (C) 1998 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Mark Pilgrim - port to Python +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + +# 255: Control characters that usually does not exist in any text +# 254: Carriage/Return +# 253: symbol (punctuation) that does not belong to word +# 252: 0 - 9 + +# Character Mapping Table: +Latin2_HungarianCharToOrderMap = ( +255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 +255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 +253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 +252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 +253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47, + 46, 71, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253, +253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8, + 23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253, +159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174, +175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190, +191,192,193,194,195,196,197, 75,198,199,200,201,202,203,204,205, + 79,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220, +221, 51, 81,222, 78,223,224,225,226, 44,227,228,229, 61,230,231, +232,233,234, 58,235, 66, 59,236,237,238, 60, 69, 63,239,240,241, + 82, 14, 74,242, 70, 80,243, 72,244, 15, 83, 77, 84, 30, 76, 85, +245,246,247, 25, 73, 42, 24,248,249,250, 31, 56, 29,251,252,253, +) + +win1250HungarianCharToOrderMap = ( +255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 +255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 +253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 +252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 +253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47, + 46, 72, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253, +253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8, + 23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253, +161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176, +177,178,179,180, 78,181, 69,182,183,184,185,186,187,188,189,190, +191,192,193,194,195,196,197, 76,198,199,200,201,202,203,204,205, + 81,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220, +221, 51, 83,222, 80,223,224,225,226, 44,227,228,229, 61,230,231, +232,233,234, 58,235, 66, 59,236,237,238, 60, 70, 63,239,240,241, + 84, 14, 75,242, 71, 82,243, 73,244, 15, 85, 79, 86, 30, 77, 87, +245,246,247, 25, 74, 42, 24,248,249,250, 31, 56, 29,251,252,253, +) + +# Model Table: +# total sequences: 100% +# first 512 sequences: 94.7368% +# first 1024 sequences:5.2623% +# rest sequences: 0.8894% +# negative sequences: 0.0009% +HungarianLangModel = ( +0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, +3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,2,3,3,1,1,2,2,2,2,2,1,2, +3,2,2,3,3,3,3,3,2,3,3,3,3,3,3,1,2,3,3,3,3,2,3,3,1,1,3,3,0,1,1,1, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0, +3,2,1,3,3,3,3,3,2,3,3,3,3,3,1,1,2,3,3,3,3,3,3,3,1,1,3,2,0,1,1,1, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, +3,3,3,3,3,3,3,3,3,3,3,1,1,2,3,3,3,1,3,3,3,3,3,1,3,3,2,2,0,3,2,3, +0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0, +3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,3,3,2,3,3,2,2,3,2,3,2,0,3,2,2, +0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0, +3,3,3,3,3,3,2,3,3,3,3,3,2,3,3,3,1,2,3,2,2,3,1,2,3,3,2,2,0,3,3,3, +0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0, +3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,3,2,3,3,3,3,2,3,3,3,3,0,2,3,2, +0,0,0,1,1,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0, +3,3,3,3,3,3,3,3,3,3,3,1,1,1,3,3,2,1,3,2,2,3,2,1,3,2,2,1,0,3,3,1, +0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0, +3,2,2,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,3,2,2,3,1,1,3,2,0,1,1,1, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, +3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,1,3,3,3,3,3,2,2,1,3,3,3,0,1,1,2, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0, +3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,2,0,3,2,3, +0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,1,0, +3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,1,3,2,2,2,3,1,1,3,3,1,1,0,3,3,2, +0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0, +3,3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,2,3,3,3,3,3,1,2,3,2,2,0,2,2,2, +0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0, +3,3,3,2,2,2,3,1,3,3,2,2,1,3,3,3,1,1,3,1,2,3,2,3,2,2,2,1,0,2,2,2, +0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0, +3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,2,2,3,2,1,0,3,2,0,1,1,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,1,0,3,3,3,3,0,2,3,0,0,2,1,0,1,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,3,3,3,3,3,2,2,3,3,2,2,2,2,3,3,0,1,2,3,2,3,2,2,3,2,1,2,0,2,2,2, +0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0, +3,3,3,3,3,3,1,2,3,3,3,2,1,2,3,3,2,2,2,3,2,3,3,1,3,3,1,1,0,2,3,2, +0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0, +3,3,3,1,2,2,2,2,3,3,3,1,1,1,3,3,1,1,3,1,1,3,2,1,2,3,1,1,0,2,2,2, +0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0, +3,3,3,2,1,2,1,1,3,3,1,1,1,1,3,3,1,1,2,2,1,2,1,1,2,2,1,1,0,2,2,1, +0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0, +3,3,3,1,1,2,1,1,3,3,1,0,1,1,3,3,2,0,1,1,2,3,1,0,2,2,1,0,0,1,3,2, +0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0, +3,2,1,3,3,3,3,3,1,2,3,2,3,3,2,1,1,3,2,3,2,1,2,2,0,1,2,1,0,0,1,1, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, +3,3,3,3,2,2,2,2,3,1,2,2,1,1,3,3,0,3,2,1,2,3,2,1,3,3,1,1,0,2,1,3, +0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0, +3,3,3,2,2,2,3,2,3,3,3,2,1,1,3,3,1,1,1,2,2,3,2,3,2,2,2,1,0,2,2,1, +0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0, +1,0,0,3,3,3,3,3,0,0,3,3,2,3,0,0,0,2,3,3,1,0,1,2,0,0,1,1,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,1,2,3,3,3,3,3,1,2,3,3,2,2,1,1,0,3,3,2,2,1,2,2,1,0,2,2,0,1,1,1, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,3,2,2,1,3,1,2,3,3,2,2,1,1,2,2,1,1,1,1,3,2,1,1,1,1,2,1,0,1,2,1, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0, +2,3,3,1,1,1,1,1,3,3,3,0,1,1,3,3,1,1,1,1,1,2,2,0,3,1,1,2,0,2,1,1, +0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0, +3,1,0,1,2,1,2,2,0,1,2,3,1,2,0,0,0,2,1,1,1,1,1,2,0,0,1,1,0,0,0,0, +1,2,1,2,2,2,1,2,1,2,0,2,0,2,2,1,1,2,1,1,2,1,1,1,0,1,0,0,0,1,1,0, +1,1,1,2,3,2,3,3,0,1,2,2,3,1,0,1,0,2,1,2,2,0,1,1,0,0,1,1,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +1,0,0,3,3,2,2,1,0,0,3,2,3,2,0,0,0,1,1,3,0,0,1,1,0,0,2,1,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,1,1,2,2,3,3,1,0,1,3,2,3,1,1,1,0,1,1,1,1,1,3,1,0,0,2,2,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,1,1,1,2,2,2,1,0,1,2,3,3,2,0,0,0,2,1,1,1,2,1,1,1,0,1,1,1,0,0,0, +1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,2,1,1,1,1,1,1,0,1,1,1,0,0,1,1, +3,2,2,1,0,0,1,1,2,2,0,3,0,1,2,1,1,0,0,1,1,1,0,1,1,1,1,0,2,1,1,1, +2,2,1,1,1,2,1,2,1,1,1,1,1,1,1,2,1,1,1,2,3,1,1,1,1,1,1,1,1,1,0,1, +2,3,3,0,1,0,0,0,3,3,1,0,0,1,2,2,1,0,0,0,0,2,0,0,1,1,1,0,2,1,1,1, +2,1,1,1,1,1,1,2,1,1,0,1,1,0,1,1,1,0,1,2,1,1,0,1,1,1,1,1,1,1,0,1, +2,3,3,0,1,0,0,0,2,2,0,0,0,0,1,2,2,0,0,0,0,1,0,0,1,1,0,0,2,0,1,0, +2,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1, +3,2,2,0,1,0,1,0,2,3,2,0,0,1,2,2,1,0,0,1,1,1,0,0,2,1,0,1,2,2,1,1, +2,1,1,1,1,1,1,2,1,1,1,1,1,1,0,2,1,0,1,1,0,1,1,1,0,1,1,2,1,1,0,1, +2,2,2,0,0,1,0,0,2,2,1,1,0,0,2,1,1,0,0,0,1,2,0,0,2,1,0,0,2,1,1,1, +2,1,1,1,1,2,1,2,1,1,1,2,2,1,1,2,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1, +1,2,3,0,0,0,1,0,3,2,1,0,0,1,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,2,1, +1,1,0,0,0,1,0,1,1,1,1,1,2,0,0,1,0,0,0,2,0,0,1,1,1,1,1,1,1,1,0,1, +3,0,0,2,1,2,2,1,0,0,2,1,2,2,0,0,0,2,1,1,1,0,1,1,0,0,1,1,2,0,0,0, +1,2,1,2,2,1,1,2,1,2,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,0,0,1, +1,3,2,0,0,0,1,0,2,2,2,0,0,0,2,2,1,0,0,0,0,3,1,1,1,1,0,0,2,1,1,1, +2,1,0,1,1,1,0,1,1,1,1,1,1,1,0,2,1,0,0,1,0,1,1,0,1,1,1,1,1,1,0,1, +2,3,2,0,0,0,1,0,2,2,0,0,0,0,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,1,0, +2,1,1,1,1,2,1,2,1,2,0,1,1,1,0,2,1,1,1,2,1,1,1,1,0,1,1,1,1,1,0,1, +3,1,1,2,2,2,3,2,1,1,2,2,1,1,0,1,0,2,2,1,1,1,1,1,0,0,1,1,0,1,1,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +2,2,2,0,0,0,0,0,2,2,0,0,0,0,2,2,1,0,0,0,1,1,0,0,1,2,0,0,2,1,1,1, +2,2,1,1,1,2,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,1,1,0,1,2,1,1,1,0,1, +1,0,0,1,2,3,2,1,0,0,2,0,1,1,0,0,0,1,1,1,1,0,1,1,0,0,1,0,0,0,0,0, +1,2,1,2,1,2,1,1,1,2,0,2,1,1,1,0,1,2,0,0,1,1,1,0,0,0,0,0,0,0,0,0, +2,3,2,0,0,0,0,0,1,1,2,1,0,0,1,1,1,0,0,0,0,2,0,0,1,1,0,0,2,1,1,1, +2,1,1,1,1,1,1,2,1,0,1,1,1,1,0,2,1,1,1,1,1,1,0,1,0,1,1,1,1,1,0,1, +1,2,2,0,1,1,1,0,2,2,2,0,0,0,3,2,1,0,0,0,1,1,0,0,1,1,0,1,1,1,0,0, +1,1,0,1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,0,0,1,1,1,0,1,0,1, +2,1,0,2,1,1,2,2,1,1,2,1,1,1,0,0,0,1,1,0,1,1,1,1,0,0,1,1,1,0,0,0, +1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,1,0, +1,2,3,0,0,0,1,0,2,2,0,0,0,0,2,2,0,0,0,0,0,1,0,0,1,0,0,0,2,0,1,0, +2,1,1,1,1,1,0,2,0,0,0,1,2,1,1,1,1,0,1,2,0,1,0,1,0,1,1,1,0,1,0,1, +2,2,2,0,0,0,1,0,2,1,2,0,0,0,1,1,2,0,0,0,0,1,0,0,1,1,0,0,2,1,0,1, +2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1, +1,2,2,0,0,0,1,0,2,2,2,0,0,0,1,1,0,0,0,0,0,1,1,0,2,0,0,1,1,1,0,1, +1,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,0,0,1,1,0,1,0,1,1,1,1,1,0,0,0,1, +1,0,0,1,0,1,2,1,0,0,1,1,1,2,0,0,0,1,1,0,1,0,1,1,0,0,1,0,0,0,0,0, +0,2,1,2,1,1,1,1,1,2,0,2,0,1,1,0,1,2,1,0,1,1,1,0,0,0,0,0,0,1,0,0, +2,1,1,0,1,2,0,0,1,1,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,0,0,0,2,1,0,1, +2,2,1,1,1,1,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,0,1,0,1,1,1,1,1,0,1, +1,2,2,0,0,0,0,0,1,1,0,0,0,0,2,1,0,0,0,0,0,2,0,0,2,2,0,0,2,0,0,1, +2,1,1,1,1,1,1,1,0,1,1,0,1,1,0,1,0,0,0,1,1,1,1,0,0,1,1,1,1,0,0,1, +1,1,2,0,0,3,1,0,2,1,1,1,0,0,1,1,1,0,0,0,1,1,0,0,0,1,0,0,1,0,1,0, +1,2,1,0,1,1,1,2,1,1,0,1,1,1,1,1,0,0,0,1,1,1,1,1,0,1,0,0,0,1,0,0, +2,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,2,0,0,0, +2,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,1,0,1, +2,1,1,1,2,1,1,1,0,1,1,2,1,0,0,0,0,1,1,1,1,0,1,0,0,0,0,1,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +1,1,0,1,1,1,1,1,0,0,1,1,2,1,0,0,0,1,1,0,0,0,1,1,0,0,1,0,1,0,0,0, +1,2,1,1,1,1,1,1,1,1,0,1,0,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0, +2,0,0,0,1,1,1,1,0,0,1,1,0,0,0,0,0,1,1,1,2,0,0,1,0,0,1,0,1,0,0,0, +0,1,1,1,1,1,1,1,1,2,0,1,1,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0, +1,0,0,1,1,1,1,1,0,0,2,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0, +0,1,1,1,1,1,1,0,1,1,0,1,0,1,1,0,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,0, +1,0,0,1,1,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0, +0,1,1,1,1,1,0,0,1,1,0,1,0,1,0,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0, +0,0,0,1,0,0,0,0,0,0,1,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,1,1,1,0,1,0,0,1,1,0,1,0,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0, +2,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,0,1,0,0,1,0,1,0,1,1,1,0,0,1,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +1,0,0,1,1,1,1,0,0,0,1,1,1,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0, +0,1,1,1,1,1,1,0,1,1,0,1,0,1,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0, +) + +Latin2HungarianModel = { + 'char_to_order_map': Latin2_HungarianCharToOrderMap, + 'precedence_matrix': HungarianLangModel, + 'typical_positive_ratio': 0.947368, + 'keep_english_letter': True, + 'charset_name': "ISO-8859-2", + 'language': 'Hungarian', +} + +Win1250HungarianModel = { + 'char_to_order_map': win1250HungarianCharToOrderMap, + 'precedence_matrix': HungarianLangModel, + 'typical_positive_ratio': 0.947368, + 'keep_english_letter': True, + 'charset_name': "windows-1250", + 'language': 'Hungarian', +} diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/langhungarianmodel.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/langhungarianmodel.pyc new file mode 100644 index 0000000..7b568a0 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/langhungarianmodel.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/langthaimodel.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/langthaimodel.py new file mode 100644 index 0000000..15f94c2 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/langthaimodel.py @@ -0,0 +1,199 @@ +######################## BEGIN LICENSE BLOCK ######################## +# The Original Code is Mozilla Communicator client code. +# +# The Initial Developer of the Original Code is +# Netscape Communications Corporation. +# Portions created by the Initial Developer are Copyright (C) 1998 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Mark Pilgrim - port to Python +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + +# 255: Control characters that usually does not exist in any text +# 254: Carriage/Return +# 253: symbol (punctuation) that does not belong to word +# 252: 0 - 9 + +# The following result for thai was collected from a limited sample (1M). + +# Character Mapping Table: +TIS620CharToOrderMap = ( +255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 +255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 +253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 +252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 +253,182,106,107,100,183,184,185,101, 94,186,187,108,109,110,111, # 40 +188,189,190, 89, 95,112,113,191,192,193,194,253,253,253,253,253, # 50 +253, 64, 72, 73,114, 74,115,116,102, 81,201,117, 90,103, 78, 82, # 60 + 96,202, 91, 79, 84,104,105, 97, 98, 92,203,253,253,253,253,253, # 70 +209,210,211,212,213, 88,214,215,216,217,218,219,220,118,221,222, +223,224, 99, 85, 83,225,226,227,228,229,230,231,232,233,234,235, +236, 5, 30,237, 24,238, 75, 8, 26, 52, 34, 51,119, 47, 58, 57, + 49, 53, 55, 43, 20, 19, 44, 14, 48, 3, 17, 25, 39, 62, 31, 54, + 45, 9, 16, 2, 61, 15,239, 12, 42, 46, 18, 21, 76, 4, 66, 63, + 22, 10, 1, 36, 23, 13, 40, 27, 32, 35, 86,240,241,242,243,244, + 11, 28, 41, 29, 33,245, 50, 37, 6, 7, 67, 77, 38, 93,246,247, + 68, 56, 59, 65, 69, 60, 70, 80, 71, 87,248,249,250,251,252,253, +) + +# Model Table: +# total sequences: 100% +# first 512 sequences: 92.6386% +# first 1024 sequences:7.3177% +# rest sequences: 1.0230% +# negative sequences: 0.0436% +ThaiLangModel = ( +0,1,3,3,3,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,0,0,3,3,3,0,3,3,3,3, +0,3,3,0,0,0,1,3,0,3,3,2,3,3,0,1,2,3,3,3,3,0,2,0,2,0,0,3,2,1,2,2, +3,0,3,3,2,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,0,3,2,3,0,2,2,2,3, +0,2,3,0,0,0,0,1,0,1,2,3,1,1,3,2,2,0,1,1,0,0,1,0,0,0,0,0,0,0,1,1, +3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,3,3,2,3,2,3,3,2,2,2, +3,1,2,3,0,3,3,2,2,1,2,3,3,1,2,0,1,3,0,1,0,0,1,0,0,0,0,0,0,0,1,1, +3,3,2,2,3,3,3,3,1,2,3,3,3,3,3,2,2,2,2,3,3,2,2,3,3,2,2,3,2,3,2,2, +3,3,1,2,3,1,2,2,3,3,1,0,2,1,0,0,3,1,2,1,0,0,1,0,0,0,0,0,0,1,0,1, +3,3,3,3,3,3,2,2,3,3,3,3,2,3,2,2,3,3,2,2,3,2,2,2,2,1,1,3,1,2,1,1, +3,2,1,0,2,1,0,1,0,1,1,0,1,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0, +3,3,3,2,3,2,3,3,2,2,3,2,3,3,2,3,1,1,2,3,2,2,2,3,2,2,2,2,2,1,2,1, +2,2,1,1,3,3,2,1,0,1,2,2,0,1,3,0,0,0,1,1,0,0,0,0,0,2,3,0,0,2,1,1, +3,3,2,3,3,2,0,0,3,3,0,3,3,0,2,2,3,1,2,2,1,1,1,0,2,2,2,0,2,2,1,1, +0,2,1,0,2,0,0,2,0,1,0,0,1,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,0, +3,3,2,3,3,2,0,0,3,3,0,2,3,0,2,1,2,2,2,2,1,2,0,0,2,2,2,0,2,2,1,1, +0,2,1,0,2,0,0,2,0,1,1,0,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0, +3,3,2,3,2,3,2,0,2,2,1,3,2,1,3,2,1,2,3,2,2,3,0,2,3,2,2,1,2,2,2,2, +1,2,2,0,0,0,0,2,0,1,2,0,1,1,1,0,1,0,3,1,1,0,0,0,0,0,0,0,0,0,1,0, +3,3,2,3,3,2,3,2,2,2,3,2,2,3,2,2,1,2,3,2,2,3,1,3,2,2,2,3,2,2,2,3, +3,2,1,3,0,1,1,1,0,2,1,1,1,1,1,0,1,0,1,1,0,0,0,0,0,0,0,0,0,2,0,0, +1,0,0,3,0,3,3,3,3,3,0,0,3,0,2,2,3,3,3,3,3,0,0,0,1,1,3,0,0,0,0,2, +0,0,1,0,0,0,0,0,0,0,2,3,0,0,0,3,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0, +2,0,3,3,3,3,0,0,2,3,0,0,3,0,3,3,2,3,3,3,3,3,0,0,3,3,3,0,0,0,3,3, +0,0,3,0,0,0,0,2,0,0,2,1,1,3,0,0,1,0,0,2,3,0,1,0,0,0,0,0,0,0,1,0, +3,3,3,3,2,3,3,3,3,3,3,3,1,2,1,3,3,2,2,1,2,2,2,3,1,1,2,0,2,1,2,1, +2,2,1,0,0,0,1,1,0,1,0,1,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0, +3,0,2,1,2,3,3,3,0,2,0,2,2,0,2,1,3,2,2,1,2,1,0,0,2,2,1,0,2,1,2,2, +0,1,1,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,3,3,3,2,1,3,3,1,1,3,0,2,3,1,1,3,2,1,1,2,0,2,2,3,2,1,1,1,1,1,2, +3,0,0,1,3,1,2,1,2,0,3,0,0,0,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0, +3,3,1,1,3,2,3,3,3,1,3,2,1,3,2,1,3,2,2,2,2,1,3,3,1,2,1,3,1,2,3,0, +2,1,1,3,2,2,2,1,2,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2, +3,3,2,3,2,3,3,2,3,2,3,2,3,3,2,1,0,3,2,2,2,1,2,2,2,1,2,2,1,2,1,1, +2,2,2,3,0,1,3,1,1,1,1,0,1,1,0,2,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,3,3,3,2,3,2,2,1,1,3,2,3,2,3,2,0,3,2,2,1,2,0,2,2,2,1,2,2,2,2,1, +3,2,1,2,2,1,0,2,0,1,0,0,1,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,1, +3,3,3,3,3,2,3,1,2,3,3,2,2,3,0,1,1,2,0,3,3,2,2,3,0,1,1,3,0,0,0,0, +3,1,0,3,3,0,2,0,2,1,0,0,3,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,3,3,2,3,2,3,3,0,1,3,1,1,2,1,2,1,1,3,1,1,0,2,3,1,1,1,1,1,1,1,1, +3,1,1,2,2,2,2,1,1,1,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, +3,2,2,1,1,2,1,3,3,2,3,2,2,3,2,2,3,1,2,2,1,2,0,3,2,1,2,2,2,2,2,1, +3,2,1,2,2,2,1,1,1,1,0,0,1,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,3,3,3,3,3,3,3,1,3,3,0,2,1,0,3,2,0,0,3,1,0,1,1,0,1,0,0,0,0,0,1, +1,0,0,1,0,3,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,0,2,2,2,3,0,0,1,3,0,3,2,0,3,2,2,3,3,3,3,3,1,0,2,2,2,0,2,2,1,2, +0,2,3,0,0,0,0,1,0,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, +3,0,2,3,1,3,3,2,3,3,0,3,3,0,3,2,2,3,2,3,3,3,0,0,2,2,3,0,1,1,1,3, +0,0,3,0,0,0,2,2,0,1,3,0,1,2,2,2,3,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1, +3,2,3,3,2,0,3,3,2,2,3,1,3,2,1,3,2,0,1,2,2,0,2,3,2,1,0,3,0,0,0,0, +3,0,0,2,3,1,3,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,1,3,2,2,2,1,2,0,1,3,1,1,3,1,3,0,0,2,1,1,1,1,2,1,1,1,0,2,1,0,1, +1,2,0,0,0,3,1,1,0,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,0,3,1,0,0,0,1,0, +3,3,3,3,2,2,2,2,2,1,3,1,1,1,2,0,1,1,2,1,2,1,3,2,0,0,3,1,1,1,1,1, +3,1,0,2,3,0,0,0,3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,2,3,0,3,3,0,2,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0, +0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,2,3,1,3,0,0,1,2,0,0,2,0,3,3,2,3,3,3,2,3,0,0,2,2,2,0,0,0,2,2, +0,0,1,0,0,0,0,3,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0, +0,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,1,2,3,1,3,3,0,0,1,0,3,0,0,0,0,0, +0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,3,1,2,3,1,2,3,1,0,3,0,2,2,1,0,2,1,1,2,0,1,0,0,1,1,1,1,0,1,0,0, +1,0,0,0,0,1,1,0,3,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,3,3,3,2,1,0,1,1,1,3,1,2,2,2,2,2,2,1,1,1,1,0,3,1,0,1,3,1,1,1,1, +1,1,0,2,0,1,3,1,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,1, +3,0,2,2,1,3,3,2,3,3,0,1,1,0,2,2,1,2,1,3,3,1,0,0,3,2,0,0,0,0,2,1, +0,1,0,0,0,0,1,2,0,1,1,3,1,1,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0, +0,0,3,0,0,1,0,0,0,3,0,0,3,0,3,1,0,1,1,1,3,2,0,0,0,3,0,0,0,0,2,0, +0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0, +3,3,1,3,2,1,3,3,1,2,2,0,1,2,1,0,1,2,0,0,0,0,0,3,0,0,0,3,0,0,0,0, +3,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,0,1,2,0,3,3,3,2,2,0,1,1,0,1,3,0,0,0,2,2,0,0,0,0,3,1,0,1,0,0,0, +0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,0,2,3,1,2,0,0,2,1,0,3,1,0,1,2,0,1,1,1,1,3,0,0,3,1,1,0,2,2,1,1, +0,2,0,0,0,0,0,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,0,0,3,1,2,0,0,2,2,0,1,2,0,1,0,1,3,1,2,1,0,0,0,2,0,3,0,0,0,1,0, +0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,0,1,1,2,2,0,0,0,2,0,2,1,0,1,1,0,1,1,1,2,1,0,0,1,1,1,0,2,1,1,1, +0,1,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,1, +0,0,0,2,0,1,3,1,1,1,1,0,0,0,0,3,2,0,1,0,0,0,1,2,0,0,0,1,0,0,0,0, +0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +1,0,2,3,2,2,0,0,0,1,0,0,0,0,2,3,2,1,2,2,3,0,0,0,2,3,1,0,0,0,1,1, +0,0,1,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0, +3,3,2,2,0,1,0,0,0,0,2,0,2,0,1,0,0,0,1,1,0,0,0,2,1,0,1,0,1,1,0,0, +0,1,0,2,0,0,1,0,3,0,1,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,3,1,0,0,1,0,0,0,0,0,1,1,2,0,0,0,0,1,0,0,1,3,1,0,0,0,0,1,1,0,0, +0,1,0,0,0,0,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0, +3,3,1,1,1,1,2,3,0,0,2,1,1,1,1,1,0,2,1,1,0,0,0,2,1,0,1,2,1,1,0,1, +2,1,0,3,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +1,3,1,0,0,0,0,0,0,0,3,0,0,0,3,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1, +0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,3,2,0,0,0,0,0,0,1,2,1,0,1,1,0,2,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,2,0,0,0,1,3,0,1,0,0,0,2,0,0,0,0,0,0,0,1,2,0,0,0,0,0, +3,3,0,0,1,1,2,0,0,1,2,1,0,1,1,1,0,1,1,0,0,2,1,1,0,1,0,0,1,1,1,0, +0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0, +2,2,2,1,0,0,0,0,1,0,0,0,0,3,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0, +2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +2,3,0,0,1,1,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +1,1,0,1,2,0,1,2,0,0,1,1,0,2,0,1,0,0,1,0,0,0,0,1,0,0,0,2,0,0,0,0, +1,0,0,1,0,1,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,1,0,0,0,0,0,0,0,1,1,0,1,1,0,2,1,3,0,0,0,0,1,1,0,0,0,0,0,0,0,3, +1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +2,0,1,0,1,0,0,2,0,0,2,0,0,1,1,2,0,0,1,1,0,0,0,1,0,0,0,1,1,0,0,0, +1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0, +1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0, +2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +2,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,3,0,0,0, +2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0, +1,0,0,0,0,0,0,0,0,1,0,0,0,0,2,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,1,1,0,0,2,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +) + +TIS620ThaiModel = { + 'char_to_order_map': TIS620CharToOrderMap, + 'precedence_matrix': ThaiLangModel, + 'typical_positive_ratio': 0.926386, + 'keep_english_letter': False, + 'charset_name': "TIS-620", + 'language': 'Thai', +} diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/langthaimodel.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/langthaimodel.pyc new file mode 100644 index 0000000..a9745d9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/langthaimodel.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/langturkishmodel.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/langturkishmodel.py new file mode 100644 index 0000000..a427a45 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/langturkishmodel.py @@ -0,0 +1,193 @@ +# -*- coding: utf-8 -*- +######################## BEGIN LICENSE BLOCK ######################## +# The Original Code is Mozilla Communicator client code. +# +# The Initial Developer of the Original Code is +# Netscape Communications Corporation. +# Portions created by the Initial Developer are Copyright (C) 1998 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Mark Pilgrim - port to Python +# Özgür Baskın - Turkish Language Model +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + +# 255: Control characters that usually does not exist in any text +# 254: Carriage/Return +# 253: symbol (punctuation) that does not belong to word +# 252: 0 - 9 + +# Character Mapping Table: +Latin5_TurkishCharToOrderMap = ( +255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, +255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, +255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, +255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, +255, 23, 37, 47, 39, 29, 52, 36, 45, 53, 60, 16, 49, 20, 46, 42, + 48, 69, 44, 35, 31, 51, 38, 62, 65, 43, 56,255,255,255,255,255, +255, 1, 21, 28, 12, 2, 18, 27, 25, 3, 24, 10, 5, 13, 4, 15, + 26, 64, 7, 8, 9, 14, 32, 57, 58, 11, 22,255,255,255,255,255, +180,179,178,177,176,175,174,173,172,171,170,169,168,167,166,165, +164,163,162,161,160,159,101,158,157,156,155,154,153,152,151,106, +150,149,148,147,146,145,144,100,143,142,141,140,139,138,137,136, + 94, 80, 93,135,105,134,133, 63,132,131,130,129,128,127,126,125, +124,104, 73, 99, 79, 85,123, 54,122, 98, 92,121,120, 91,103,119, + 68,118,117, 97,116,115, 50, 90,114,113,112,111, 55, 41, 40, 86, + 89, 70, 59, 78, 71, 82, 88, 33, 77, 66, 84, 83,110, 75, 61, 96, + 30, 67,109, 74, 87,102, 34, 95, 81,108, 76, 72, 17, 6, 19,107, +) + +TurkishLangModel = ( +3,2,3,3,3,1,3,3,3,3,3,3,3,3,2,1,1,3,3,1,3,3,0,3,3,3,3,3,0,3,1,3, +3,2,1,0,0,1,1,0,0,0,1,0,0,1,1,1,1,0,0,0,0,0,0,0,2,2,0,0,1,0,0,1, +3,2,2,3,3,0,3,3,3,3,3,3,3,2,3,1,0,3,3,1,3,3,0,3,3,3,3,3,0,3,0,3, +3,1,1,0,1,0,1,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,2,2,0,0,0,1,0,1, +3,3,2,3,3,0,3,3,3,3,3,3,3,2,3,1,1,3,3,0,3,3,1,2,3,3,3,3,0,3,0,3, +3,1,1,0,0,0,1,0,0,0,0,1,1,0,1,2,1,0,0,0,1,0,0,0,0,2,0,0,0,0,0,1, +3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,1,3,3,2,0,3,2,1,2,2,1,3,3,0,0,0,2, +2,2,0,1,0,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,1,0,0,1, +3,3,3,2,3,3,1,2,3,3,3,3,3,3,3,1,3,2,1,0,3,2,0,1,2,3,3,2,1,0,0,2, +2,1,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,2,0,0,0, +1,0,1,3,3,1,3,3,3,3,3,3,3,1,2,0,0,2,3,0,2,3,0,0,2,2,2,3,0,3,0,1, +2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,0,3,2,0,2,3,2,3,3,1,0,0,2, +3,2,0,0,1,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,1,1,1,0,2,0,0,1, +3,3,3,2,3,3,2,3,3,3,3,2,3,3,3,0,3,3,0,0,2,1,0,0,2,3,2,2,0,0,0,2, +2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,0,1,0,2,0,0,1, +3,3,3,2,3,3,3,3,3,3,3,2,3,3,3,0,3,2,0,1,3,2,1,1,3,2,3,2,1,0,0,2, +2,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, +3,3,3,2,3,3,3,3,3,3,3,2,3,3,3,0,3,2,2,0,2,3,0,0,2,2,2,2,0,0,0,2, +3,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,2,0,1,0,0,0, +3,3,3,3,3,3,3,2,2,2,2,3,2,3,3,0,3,3,1,1,2,2,0,0,2,2,3,2,0,0,1,3, +0,3,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,1, +3,3,3,2,3,3,3,2,1,2,2,3,2,3,3,0,3,2,0,0,1,1,0,1,1,2,1,2,0,0,0,1, +0,3,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,0,0, +3,3,3,2,3,3,2,3,2,2,2,3,3,3,3,1,3,1,1,0,3,2,1,1,3,3,2,3,1,0,0,1, +1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,2,0,0,1, +3,2,2,3,3,0,3,3,3,3,3,3,3,2,2,1,0,3,3,1,3,3,0,1,3,3,2,3,0,3,0,3, +2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0, +2,2,2,3,3,0,3,3,3,3,3,3,3,3,3,0,0,3,2,0,3,3,0,3,2,3,3,3,0,3,1,3, +2,0,0,0,0,0,0,0,0,0,0,1,0,1,2,0,1,0,0,0,0,0,0,0,2,2,0,0,1,0,0,1, +3,3,3,1,2,3,3,1,0,0,1,0,0,3,3,2,3,0,0,2,0,0,2,0,2,0,0,0,2,0,2,0, +0,3,1,0,1,0,0,0,2,2,1,0,1,1,2,1,2,2,2,0,2,1,1,0,0,0,2,0,0,0,0,0, +1,2,1,3,3,0,3,3,3,3,3,2,3,0,0,0,0,2,3,0,2,3,1,0,2,3,1,3,0,3,0,2, +3,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,3,3,1,3,3,2,2,3,2,2,0,1,2,3,0,1,2,1,0,1,0,0,0,1,0,2,2,0,0,0,1, +1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,1,0,0,1,0,0,0, +3,3,3,1,3,3,1,1,3,3,1,1,3,3,1,0,2,1,2,0,2,1,0,0,1,1,2,1,0,0,0,2, +2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,3,3,1,0,2,1,3,0,0,2,0,0,3,3,0,3,0,0,1,0,1,2,0,0,1,1,2,2,0,1,0, +0,1,2,1,1,0,1,0,1,1,1,1,1,0,1,1,1,2,2,1,2,0,1,0,0,0,0,0,0,1,0,0, +3,3,3,2,3,2,3,3,0,2,2,2,3,3,3,0,3,0,0,0,2,2,0,1,2,1,1,1,0,0,0,1, +0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0, +3,3,3,3,3,3,2,1,2,2,3,3,3,3,2,0,2,0,0,0,2,2,0,0,2,1,3,3,0,0,1,1, +1,1,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0, +1,1,2,3,3,0,3,3,3,3,3,3,2,2,0,2,0,2,3,2,3,2,2,2,2,2,2,2,1,3,2,3, +2,0,2,1,2,2,2,2,1,1,2,2,1,2,2,1,2,0,0,2,1,1,0,2,1,0,0,1,0,0,0,1, +2,3,3,1,1,1,0,1,1,1,2,3,2,1,1,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0, +0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,3,3,2,2,2,3,2,3,2,2,1,3,3,3,0,2,1,2,0,2,1,0,0,1,1,1,1,1,0,0,1, +2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,2,0,1,0,0,0, +3,3,3,2,3,3,3,3,3,2,3,1,2,3,3,1,2,0,0,0,0,0,0,0,3,2,1,1,0,0,0,0, +2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0, +3,3,3,2,2,3,3,2,1,1,1,1,1,3,3,0,3,1,0,0,1,1,0,0,3,1,2,1,0,0,0,0, +0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0, +3,3,3,2,2,3,2,2,2,3,2,1,1,3,3,0,3,0,0,0,0,1,0,0,3,1,1,2,0,0,0,1, +1,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, +1,1,1,3,3,0,3,3,3,3,3,2,2,2,1,2,0,2,1,2,2,1,1,0,1,2,2,2,2,2,2,2, +0,0,2,1,2,1,2,1,0,1,1,3,1,2,1,1,2,0,0,2,0,1,0,1,0,1,0,0,0,1,0,1, +3,3,3,1,3,3,3,0,1,1,0,2,2,3,1,0,3,0,0,0,1,0,0,0,1,0,0,1,0,1,0,0, +1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,3,2,0,0,2,2,1,0,0,1,0,0,3,3,1,3,0,0,1,1,0,2,0,3,0,0,0,2,0,1,1, +0,1,2,0,1,2,2,0,2,2,2,2,1,0,2,1,1,0,2,0,2,1,2,0,0,0,0,0,0,0,0,0, +3,3,3,1,3,2,3,2,0,2,2,2,1,3,2,0,2,1,2,0,1,2,0,0,1,0,2,2,0,0,0,2, +1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,1,0,0,0, +3,3,3,0,3,3,1,1,2,3,1,0,3,2,3,0,3,0,0,0,1,0,0,0,1,0,1,0,0,0,0,0, +1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,3,3,0,3,3,2,3,3,2,2,0,0,0,0,1,2,0,1,3,0,0,0,3,1,1,0,3,0,2, +2,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,3,3,1,2,2,1,0,3,1,1,1,1,3,3,2,3,0,0,1,0,1,2,0,2,2,0,2,2,0,2,1, +0,2,2,1,1,1,1,0,2,1,1,0,1,1,1,1,2,1,2,1,2,0,1,0,1,0,0,0,0,0,0,0, +3,3,3,0,1,1,3,0,0,1,1,0,0,2,2,0,3,0,0,1,1,0,1,0,0,0,0,0,2,0,0,0, +0,3,1,0,1,0,1,0,2,0,0,1,0,1,0,1,1,1,2,1,1,0,2,0,0,0,0,0,0,0,0,0, +3,3,3,0,2,0,2,0,1,1,1,0,0,3,3,0,2,0,0,1,0,0,2,1,1,0,1,0,1,0,1,0, +0,2,0,1,2,0,2,0,2,1,1,0,1,0,2,1,1,0,2,1,1,0,1,0,0,0,1,1,0,0,0,0, +3,2,3,0,1,0,0,0,0,0,0,0,0,1,2,0,1,0,0,1,0,0,1,0,0,0,0,0,2,0,0,0, +0,0,1,1,0,0,1,0,1,0,0,1,0,0,0,2,1,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0, +3,3,3,0,0,2,3,0,0,1,0,1,0,2,3,2,3,0,0,1,3,0,2,1,0,0,0,0,2,0,1,0, +0,2,1,0,0,1,1,0,2,1,0,0,1,0,0,1,1,0,1,1,2,0,1,0,0,0,0,1,0,0,0,0, +3,2,2,0,0,1,1,0,0,0,0,0,0,3,1,1,1,0,0,0,0,0,1,0,0,0,0,0,2,0,1,0, +0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0, +0,0,0,3,3,0,2,3,2,2,1,2,2,1,1,2,0,1,3,2,2,2,0,0,2,2,0,0,0,1,2,1, +3,0,2,1,1,0,1,1,1,0,1,2,2,2,1,1,2,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0, +0,1,1,2,3,0,3,3,3,2,2,2,2,1,0,1,0,1,0,1,2,2,0,0,2,2,1,3,1,1,2,1, +0,0,1,1,2,0,1,1,0,0,1,2,0,2,1,1,2,0,0,1,0,0,0,1,0,1,0,1,0,0,0,0, +3,3,2,0,0,3,1,0,0,0,0,0,0,3,2,1,2,0,0,1,0,0,2,0,0,0,0,0,2,0,1,0, +0,2,1,1,0,0,1,0,1,2,0,0,1,1,0,0,2,1,1,1,1,0,2,0,0,0,0,0,0,0,0,0, +3,3,2,0,0,1,0,0,0,0,1,0,0,3,3,2,2,0,0,1,0,0,2,0,1,0,0,0,2,0,1,0, +0,0,1,1,0,0,2,0,2,1,0,0,1,1,2,1,2,0,2,1,2,1,1,1,0,0,1,1,0,0,0,0, +3,3,2,0,0,2,2,0,0,0,1,1,0,2,2,1,3,1,0,1,0,1,2,0,0,0,0,0,1,0,1,0, +0,1,1,0,0,0,0,0,1,0,0,1,0,0,0,1,1,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0, +3,3,3,2,0,0,0,1,0,0,1,0,0,2,3,1,2,0,0,1,0,0,2,0,0,0,1,0,2,0,2,0, +0,1,1,2,2,1,2,0,2,1,1,0,0,1,1,0,1,1,1,1,2,1,1,0,0,0,0,0,0,0,0,0, +3,3,3,0,2,1,2,1,0,0,1,1,0,3,3,1,2,0,0,1,0,0,2,0,2,0,1,1,2,0,0,0, +0,0,1,1,1,1,2,0,1,1,0,1,1,1,1,0,0,0,1,1,1,0,1,0,0,0,1,0,0,0,0,0, +3,3,3,0,2,2,3,2,0,0,1,0,0,2,3,1,0,0,0,0,0,0,2,0,2,0,0,0,2,0,0,0, +0,1,1,0,0,0,1,0,0,1,0,1,1,0,1,0,1,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0, +3,2,3,0,0,0,0,0,0,0,1,0,0,2,2,2,2,0,0,1,0,0,2,0,0,0,0,0,2,0,1,0, +0,0,2,1,1,0,1,0,2,1,1,0,0,1,1,2,1,0,2,0,2,0,1,0,0,0,2,0,0,0,0,0, +0,0,0,2,2,0,2,1,1,1,1,2,2,0,0,1,0,1,0,0,1,3,0,0,0,0,1,0,0,2,1,0, +0,0,1,0,1,0,0,0,0,0,2,1,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0, +2,0,0,2,3,0,2,3,1,2,2,0,2,0,0,2,0,2,1,1,1,2,1,0,0,1,2,1,1,2,1,0, +1,0,2,0,1,0,1,1,0,0,2,2,1,2,1,1,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0, +3,3,3,0,2,1,2,0,0,0,1,0,0,3,2,0,1,0,0,1,0,0,2,0,0,0,1,2,1,0,1,0, +0,0,0,0,1,0,1,0,0,1,0,0,0,0,1,0,1,0,1,1,1,0,1,0,0,0,0,0,0,0,0,0, +0,0,0,2,2,0,2,2,1,1,0,1,1,1,1,1,0,0,1,2,1,1,1,0,1,0,0,0,1,1,1,1, +0,0,2,1,0,1,1,1,0,1,1,2,1,2,1,1,2,0,1,1,2,1,0,2,0,0,0,0,0,0,0,0, +3,2,2,0,0,2,0,0,0,0,0,0,0,2,2,0,2,0,0,1,0,0,2,0,0,0,0,0,2,0,0,0, +0,2,1,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0, +0,0,0,3,2,0,2,2,0,1,1,0,1,0,0,1,0,0,0,1,0,1,0,0,0,0,0,1,0,0,0,0, +2,0,1,0,1,0,1,1,0,0,1,2,0,1,0,1,1,0,0,1,0,1,0,2,0,0,0,0,0,0,0,0, +2,2,2,0,1,1,0,0,0,1,0,0,0,1,2,0,1,0,0,1,0,0,1,0,0,0,0,1,2,0,1,0, +0,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,1,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0, +2,2,2,2,1,0,1,1,1,0,0,0,0,1,2,0,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0, +1,1,2,0,1,0,0,0,1,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,2,0,0,0,0,0,1, +0,0,1,2,2,0,2,1,2,1,1,2,2,0,0,0,0,1,0,0,1,1,0,0,2,0,0,0,0,1,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0, +2,2,2,0,0,0,1,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0, +0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,1,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +2,2,2,0,1,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,1,0,0,0,0,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +) + +Latin5TurkishModel = { + 'char_to_order_map': Latin5_TurkishCharToOrderMap, + 'precedence_matrix': TurkishLangModel, + 'typical_positive_ratio': 0.970290, + 'keep_english_letter': True, + 'charset_name': "ISO-8859-9", + 'language': 'Turkish', +} diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/langturkishmodel.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/langturkishmodel.pyc new file mode 100644 index 0000000..540c3c2 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/langturkishmodel.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/latin1prober.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/latin1prober.py new file mode 100644 index 0000000..7d1e8c2 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/latin1prober.py @@ -0,0 +1,145 @@ +######################## BEGIN LICENSE BLOCK ######################## +# The Original Code is Mozilla Universal charset detector code. +# +# The Initial Developer of the Original Code is +# Netscape Communications Corporation. +# Portions created by the Initial Developer are Copyright (C) 2001 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Mark Pilgrim - port to Python +# Shy Shalom - original C code +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + +from .charsetprober import CharSetProber +from .enums import ProbingState + +FREQ_CAT_NUM = 4 + +UDF = 0 # undefined +OTH = 1 # other +ASC = 2 # ascii capital letter +ASS = 3 # ascii small letter +ACV = 4 # accent capital vowel +ACO = 5 # accent capital other +ASV = 6 # accent small vowel +ASO = 7 # accent small other +CLASS_NUM = 8 # total classes + +Latin1_CharToClass = ( + OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 00 - 07 + OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 08 - 0F + OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 10 - 17 + OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 18 - 1F + OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 20 - 27 + OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 28 - 2F + OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 30 - 37 + OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 38 - 3F + OTH, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 40 - 47 + ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 48 - 4F + ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 50 - 57 + ASC, ASC, ASC, OTH, OTH, OTH, OTH, OTH, # 58 - 5F + OTH, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 60 - 67 + ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 68 - 6F + ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 70 - 77 + ASS, ASS, ASS, OTH, OTH, OTH, OTH, OTH, # 78 - 7F + OTH, UDF, OTH, ASO, OTH, OTH, OTH, OTH, # 80 - 87 + OTH, OTH, ACO, OTH, ACO, UDF, ACO, UDF, # 88 - 8F + UDF, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 90 - 97 + OTH, OTH, ASO, OTH, ASO, UDF, ASO, ACO, # 98 - 9F + OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A0 - A7 + OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A8 - AF + OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B0 - B7 + OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B8 - BF + ACV, ACV, ACV, ACV, ACV, ACV, ACO, ACO, # C0 - C7 + ACV, ACV, ACV, ACV, ACV, ACV, ACV, ACV, # C8 - CF + ACO, ACO, ACV, ACV, ACV, ACV, ACV, OTH, # D0 - D7 + ACV, ACV, ACV, ACV, ACV, ACO, ACO, ACO, # D8 - DF + ASV, ASV, ASV, ASV, ASV, ASV, ASO, ASO, # E0 - E7 + ASV, ASV, ASV, ASV, ASV, ASV, ASV, ASV, # E8 - EF + ASO, ASO, ASV, ASV, ASV, ASV, ASV, OTH, # F0 - F7 + ASV, ASV, ASV, ASV, ASV, ASO, ASO, ASO, # F8 - FF +) + +# 0 : illegal +# 1 : very unlikely +# 2 : normal +# 3 : very likely +Latin1ClassModel = ( +# UDF OTH ASC ASS ACV ACO ASV ASO + 0, 0, 0, 0, 0, 0, 0, 0, # UDF + 0, 3, 3, 3, 3, 3, 3, 3, # OTH + 0, 3, 3, 3, 3, 3, 3, 3, # ASC + 0, 3, 3, 3, 1, 1, 3, 3, # ASS + 0, 3, 3, 3, 1, 2, 1, 2, # ACV + 0, 3, 3, 3, 3, 3, 3, 3, # ACO + 0, 3, 1, 3, 1, 1, 1, 3, # ASV + 0, 3, 1, 3, 1, 1, 3, 3, # ASO +) + + +class Latin1Prober(CharSetProber): + def __init__(self): + super(Latin1Prober, self).__init__() + self._last_char_class = None + self._freq_counter = None + self.reset() + + def reset(self): + self._last_char_class = OTH + self._freq_counter = [0] * FREQ_CAT_NUM + CharSetProber.reset(self) + + @property + def charset_name(self): + return "ISO-8859-1" + + @property + def language(self): + return "" + + def feed(self, byte_str): + byte_str = self.filter_with_english_letters(byte_str) + for c in byte_str: + char_class = Latin1_CharToClass[c] + freq = Latin1ClassModel[(self._last_char_class * CLASS_NUM) + + char_class] + if freq == 0: + self._state = ProbingState.NOT_ME + break + self._freq_counter[freq] += 1 + self._last_char_class = char_class + + return self.state + + def get_confidence(self): + if self.state == ProbingState.NOT_ME: + return 0.01 + + total = sum(self._freq_counter) + if total < 0.01: + confidence = 0.0 + else: + confidence = ((self._freq_counter[3] - self._freq_counter[1] * 20.0) + / total) + if confidence < 0.0: + confidence = 0.0 + # lower the confidence of latin1 so that other more accurate + # detector can take priority. + confidence = confidence * 0.73 + return confidence diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/latin1prober.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/latin1prober.pyc new file mode 100644 index 0000000..3078088 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/latin1prober.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/mbcharsetprober.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/mbcharsetprober.py new file mode 100644 index 0000000..6256ecf --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/mbcharsetprober.py @@ -0,0 +1,91 @@ +######################## BEGIN LICENSE BLOCK ######################## +# The Original Code is Mozilla Universal charset detector code. +# +# The Initial Developer of the Original Code is +# Netscape Communications Corporation. +# Portions created by the Initial Developer are Copyright (C) 2001 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Mark Pilgrim - port to Python +# Shy Shalom - original C code +# Proofpoint, Inc. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + +from .charsetprober import CharSetProber +from .enums import ProbingState, MachineState + + +class MultiByteCharSetProber(CharSetProber): + """ + MultiByteCharSetProber + """ + + def __init__(self, lang_filter=None): + super(MultiByteCharSetProber, self).__init__(lang_filter=lang_filter) + self.distribution_analyzer = None + self.coding_sm = None + self._last_char = [0, 0] + + def reset(self): + super(MultiByteCharSetProber, self).reset() + if self.coding_sm: + self.coding_sm.reset() + if self.distribution_analyzer: + self.distribution_analyzer.reset() + self._last_char = [0, 0] + + @property + def charset_name(self): + raise NotImplementedError + + @property + def language(self): + raise NotImplementedError + + def feed(self, byte_str): + for i in range(len(byte_str)): + coding_state = self.coding_sm.next_state(byte_str[i]) + if coding_state == MachineState.ERROR: + self.logger.debug('%s %s prober hit error at byte %s', + self.charset_name, self.language, i) + self._state = ProbingState.NOT_ME + break + elif coding_state == MachineState.ITS_ME: + self._state = ProbingState.FOUND_IT + break + elif coding_state == MachineState.START: + char_len = self.coding_sm.get_current_charlen() + if i == 0: + self._last_char[1] = byte_str[0] + self.distribution_analyzer.feed(self._last_char, char_len) + else: + self.distribution_analyzer.feed(byte_str[i - 1:i + 1], + char_len) + + self._last_char[0] = byte_str[-1] + + if self.state == ProbingState.DETECTING: + if (self.distribution_analyzer.got_enough_data() and + (self.get_confidence() > self.SHORTCUT_THRESHOLD)): + self._state = ProbingState.FOUND_IT + + return self.state + + def get_confidence(self): + return self.distribution_analyzer.get_confidence() diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/mbcharsetprober.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/mbcharsetprober.pyc new file mode 100644 index 0000000..aab38c9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/mbcharsetprober.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/mbcsgroupprober.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/mbcsgroupprober.py new file mode 100644 index 0000000..530abe7 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/mbcsgroupprober.py @@ -0,0 +1,54 @@ +######################## BEGIN LICENSE BLOCK ######################## +# The Original Code is Mozilla Universal charset detector code. +# +# The Initial Developer of the Original Code is +# Netscape Communications Corporation. +# Portions created by the Initial Developer are Copyright (C) 2001 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Mark Pilgrim - port to Python +# Shy Shalom - original C code +# Proofpoint, Inc. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + +from .charsetgroupprober import CharSetGroupProber +from .utf8prober import UTF8Prober +from .sjisprober import SJISProber +from .eucjpprober import EUCJPProber +from .gb2312prober import GB2312Prober +from .euckrprober import EUCKRProber +from .cp949prober import CP949Prober +from .big5prober import Big5Prober +from .euctwprober import EUCTWProber + + +class MBCSGroupProber(CharSetGroupProber): + def __init__(self, lang_filter=None): + super(MBCSGroupProber, self).__init__(lang_filter=lang_filter) + self.probers = [ + UTF8Prober(), + SJISProber(), + EUCJPProber(), + GB2312Prober(), + EUCKRProber(), + CP949Prober(), + Big5Prober(), + EUCTWProber() + ] + self.reset() diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/mbcsgroupprober.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/mbcsgroupprober.pyc new file mode 100644 index 0000000..c4c7176 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/mbcsgroupprober.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/mbcssm.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/mbcssm.py new file mode 100644 index 0000000..8360d0f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/mbcssm.py @@ -0,0 +1,572 @@ +######################## BEGIN LICENSE BLOCK ######################## +# The Original Code is mozilla.org code. +# +# The Initial Developer of the Original Code is +# Netscape Communications Corporation. +# Portions created by the Initial Developer are Copyright (C) 1998 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Mark Pilgrim - port to Python +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + +from .enums import MachineState + +# BIG5 + +BIG5_CLS = ( + 1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as legal value + 1,1,1,1,1,1,0,0, # 08 - 0f + 1,1,1,1,1,1,1,1, # 10 - 17 + 1,1,1,0,1,1,1,1, # 18 - 1f + 1,1,1,1,1,1,1,1, # 20 - 27 + 1,1,1,1,1,1,1,1, # 28 - 2f + 1,1,1,1,1,1,1,1, # 30 - 37 + 1,1,1,1,1,1,1,1, # 38 - 3f + 2,2,2,2,2,2,2,2, # 40 - 47 + 2,2,2,2,2,2,2,2, # 48 - 4f + 2,2,2,2,2,2,2,2, # 50 - 57 + 2,2,2,2,2,2,2,2, # 58 - 5f + 2,2,2,2,2,2,2,2, # 60 - 67 + 2,2,2,2,2,2,2,2, # 68 - 6f + 2,2,2,2,2,2,2,2, # 70 - 77 + 2,2,2,2,2,2,2,1, # 78 - 7f + 4,4,4,4,4,4,4,4, # 80 - 87 + 4,4,4,4,4,4,4,4, # 88 - 8f + 4,4,4,4,4,4,4,4, # 90 - 97 + 4,4,4,4,4,4,4,4, # 98 - 9f + 4,3,3,3,3,3,3,3, # a0 - a7 + 3,3,3,3,3,3,3,3, # a8 - af + 3,3,3,3,3,3,3,3, # b0 - b7 + 3,3,3,3,3,3,3,3, # b8 - bf + 3,3,3,3,3,3,3,3, # c0 - c7 + 3,3,3,3,3,3,3,3, # c8 - cf + 3,3,3,3,3,3,3,3, # d0 - d7 + 3,3,3,3,3,3,3,3, # d8 - df + 3,3,3,3,3,3,3,3, # e0 - e7 + 3,3,3,3,3,3,3,3, # e8 - ef + 3,3,3,3,3,3,3,3, # f0 - f7 + 3,3,3,3,3,3,3,0 # f8 - ff +) + +BIG5_ST = ( + MachineState.ERROR,MachineState.START,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07 + MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,#08-0f + MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START#10-17 +) + +BIG5_CHAR_LEN_TABLE = (0, 1, 1, 2, 0) + +BIG5_SM_MODEL = {'class_table': BIG5_CLS, + 'class_factor': 5, + 'state_table': BIG5_ST, + 'char_len_table': BIG5_CHAR_LEN_TABLE, + 'name': 'Big5'} + +# CP949 + +CP949_CLS = ( + 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,0,0, # 00 - 0f + 1,1,1,1,1,1,1,1, 1,1,1,0,1,1,1,1, # 10 - 1f + 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 20 - 2f + 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 30 - 3f + 1,4,4,4,4,4,4,4, 4,4,4,4,4,4,4,4, # 40 - 4f + 4,4,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 50 - 5f + 1,5,5,5,5,5,5,5, 5,5,5,5,5,5,5,5, # 60 - 6f + 5,5,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 70 - 7f + 0,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 80 - 8f + 6,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 90 - 9f + 6,7,7,7,7,7,7,7, 7,7,7,7,7,8,8,8, # a0 - af + 7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7, # b0 - bf + 7,7,7,7,7,7,9,2, 2,3,2,2,2,2,2,2, # c0 - cf + 2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # d0 - df + 2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # e0 - ef + 2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,0, # f0 - ff +) + +CP949_ST = ( +#cls= 0 1 2 3 4 5 6 7 8 9 # previous state = + MachineState.ERROR,MachineState.START, 3,MachineState.ERROR,MachineState.START,MachineState.START, 4, 5,MachineState.ERROR, 6, # MachineState.START + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, # MachineState.ERROR + MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME, # MachineState.ITS_ME + MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START, # 3 + MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, # 4 + MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, # 5 + MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START, # 6 +) + +CP949_CHAR_LEN_TABLE = (0, 1, 2, 0, 1, 1, 2, 2, 0, 2) + +CP949_SM_MODEL = {'class_table': CP949_CLS, + 'class_factor': 10, + 'state_table': CP949_ST, + 'char_len_table': CP949_CHAR_LEN_TABLE, + 'name': 'CP949'} + +# EUC-JP + +EUCJP_CLS = ( + 4,4,4,4,4,4,4,4, # 00 - 07 + 4,4,4,4,4,4,5,5, # 08 - 0f + 4,4,4,4,4,4,4,4, # 10 - 17 + 4,4,4,5,4,4,4,4, # 18 - 1f + 4,4,4,4,4,4,4,4, # 20 - 27 + 4,4,4,4,4,4,4,4, # 28 - 2f + 4,4,4,4,4,4,4,4, # 30 - 37 + 4,4,4,4,4,4,4,4, # 38 - 3f + 4,4,4,4,4,4,4,4, # 40 - 47 + 4,4,4,4,4,4,4,4, # 48 - 4f + 4,4,4,4,4,4,4,4, # 50 - 57 + 4,4,4,4,4,4,4,4, # 58 - 5f + 4,4,4,4,4,4,4,4, # 60 - 67 + 4,4,4,4,4,4,4,4, # 68 - 6f + 4,4,4,4,4,4,4,4, # 70 - 77 + 4,4,4,4,4,4,4,4, # 78 - 7f + 5,5,5,5,5,5,5,5, # 80 - 87 + 5,5,5,5,5,5,1,3, # 88 - 8f + 5,5,5,5,5,5,5,5, # 90 - 97 + 5,5,5,5,5,5,5,5, # 98 - 9f + 5,2,2,2,2,2,2,2, # a0 - a7 + 2,2,2,2,2,2,2,2, # a8 - af + 2,2,2,2,2,2,2,2, # b0 - b7 + 2,2,2,2,2,2,2,2, # b8 - bf + 2,2,2,2,2,2,2,2, # c0 - c7 + 2,2,2,2,2,2,2,2, # c8 - cf + 2,2,2,2,2,2,2,2, # d0 - d7 + 2,2,2,2,2,2,2,2, # d8 - df + 0,0,0,0,0,0,0,0, # e0 - e7 + 0,0,0,0,0,0,0,0, # e8 - ef + 0,0,0,0,0,0,0,0, # f0 - f7 + 0,0,0,0,0,0,0,5 # f8 - ff +) + +EUCJP_ST = ( + 3, 4, 3, 5,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07 + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f + MachineState.ITS_ME,MachineState.ITS_ME,MachineState.START,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#10-17 + MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 3,MachineState.ERROR,#18-1f + 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START#20-27 +) + +EUCJP_CHAR_LEN_TABLE = (2, 2, 2, 3, 1, 0) + +EUCJP_SM_MODEL = {'class_table': EUCJP_CLS, + 'class_factor': 6, + 'state_table': EUCJP_ST, + 'char_len_table': EUCJP_CHAR_LEN_TABLE, + 'name': 'EUC-JP'} + +# EUC-KR + +EUCKR_CLS = ( + 1,1,1,1,1,1,1,1, # 00 - 07 + 1,1,1,1,1,1,0,0, # 08 - 0f + 1,1,1,1,1,1,1,1, # 10 - 17 + 1,1,1,0,1,1,1,1, # 18 - 1f + 1,1,1,1,1,1,1,1, # 20 - 27 + 1,1,1,1,1,1,1,1, # 28 - 2f + 1,1,1,1,1,1,1,1, # 30 - 37 + 1,1,1,1,1,1,1,1, # 38 - 3f + 1,1,1,1,1,1,1,1, # 40 - 47 + 1,1,1,1,1,1,1,1, # 48 - 4f + 1,1,1,1,1,1,1,1, # 50 - 57 + 1,1,1,1,1,1,1,1, # 58 - 5f + 1,1,1,1,1,1,1,1, # 60 - 67 + 1,1,1,1,1,1,1,1, # 68 - 6f + 1,1,1,1,1,1,1,1, # 70 - 77 + 1,1,1,1,1,1,1,1, # 78 - 7f + 0,0,0,0,0,0,0,0, # 80 - 87 + 0,0,0,0,0,0,0,0, # 88 - 8f + 0,0,0,0,0,0,0,0, # 90 - 97 + 0,0,0,0,0,0,0,0, # 98 - 9f + 0,2,2,2,2,2,2,2, # a0 - a7 + 2,2,2,2,2,3,3,3, # a8 - af + 2,2,2,2,2,2,2,2, # b0 - b7 + 2,2,2,2,2,2,2,2, # b8 - bf + 2,2,2,2,2,2,2,2, # c0 - c7 + 2,3,2,2,2,2,2,2, # c8 - cf + 2,2,2,2,2,2,2,2, # d0 - d7 + 2,2,2,2,2,2,2,2, # d8 - df + 2,2,2,2,2,2,2,2, # e0 - e7 + 2,2,2,2,2,2,2,2, # e8 - ef + 2,2,2,2,2,2,2,2, # f0 - f7 + 2,2,2,2,2,2,2,0 # f8 - ff +) + +EUCKR_ST = ( + MachineState.ERROR,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07 + MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START #08-0f +) + +EUCKR_CHAR_LEN_TABLE = (0, 1, 2, 0) + +EUCKR_SM_MODEL = {'class_table': EUCKR_CLS, + 'class_factor': 4, + 'state_table': EUCKR_ST, + 'char_len_table': EUCKR_CHAR_LEN_TABLE, + 'name': 'EUC-KR'} + +# EUC-TW + +EUCTW_CLS = ( + 2,2,2,2,2,2,2,2, # 00 - 07 + 2,2,2,2,2,2,0,0, # 08 - 0f + 2,2,2,2,2,2,2,2, # 10 - 17 + 2,2,2,0,2,2,2,2, # 18 - 1f + 2,2,2,2,2,2,2,2, # 20 - 27 + 2,2,2,2,2,2,2,2, # 28 - 2f + 2,2,2,2,2,2,2,2, # 30 - 37 + 2,2,2,2,2,2,2,2, # 38 - 3f + 2,2,2,2,2,2,2,2, # 40 - 47 + 2,2,2,2,2,2,2,2, # 48 - 4f + 2,2,2,2,2,2,2,2, # 50 - 57 + 2,2,2,2,2,2,2,2, # 58 - 5f + 2,2,2,2,2,2,2,2, # 60 - 67 + 2,2,2,2,2,2,2,2, # 68 - 6f + 2,2,2,2,2,2,2,2, # 70 - 77 + 2,2,2,2,2,2,2,2, # 78 - 7f + 0,0,0,0,0,0,0,0, # 80 - 87 + 0,0,0,0,0,0,6,0, # 88 - 8f + 0,0,0,0,0,0,0,0, # 90 - 97 + 0,0,0,0,0,0,0,0, # 98 - 9f + 0,3,4,4,4,4,4,4, # a0 - a7 + 5,5,1,1,1,1,1,1, # a8 - af + 1,1,1,1,1,1,1,1, # b0 - b7 + 1,1,1,1,1,1,1,1, # b8 - bf + 1,1,3,1,3,3,3,3, # c0 - c7 + 3,3,3,3,3,3,3,3, # c8 - cf + 3,3,3,3,3,3,3,3, # d0 - d7 + 3,3,3,3,3,3,3,3, # d8 - df + 3,3,3,3,3,3,3,3, # e0 - e7 + 3,3,3,3,3,3,3,3, # e8 - ef + 3,3,3,3,3,3,3,3, # f0 - f7 + 3,3,3,3,3,3,3,0 # f8 - ff +) + +EUCTW_ST = ( + MachineState.ERROR,MachineState.ERROR,MachineState.START, 3, 3, 3, 4,MachineState.ERROR,#00-07 + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f + MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.START,MachineState.ERROR,#10-17 + MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#18-1f + 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.START,MachineState.START,#20-27 + MachineState.START,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START #28-2f +) + +EUCTW_CHAR_LEN_TABLE = (0, 0, 1, 2, 2, 2, 3) + +EUCTW_SM_MODEL = {'class_table': EUCTW_CLS, + 'class_factor': 7, + 'state_table': EUCTW_ST, + 'char_len_table': EUCTW_CHAR_LEN_TABLE, + 'name': 'x-euc-tw'} + +# GB2312 + +GB2312_CLS = ( + 1,1,1,1,1,1,1,1, # 00 - 07 + 1,1,1,1,1,1,0,0, # 08 - 0f + 1,1,1,1,1,1,1,1, # 10 - 17 + 1,1,1,0,1,1,1,1, # 18 - 1f + 1,1,1,1,1,1,1,1, # 20 - 27 + 1,1,1,1,1,1,1,1, # 28 - 2f + 3,3,3,3,3,3,3,3, # 30 - 37 + 3,3,1,1,1,1,1,1, # 38 - 3f + 2,2,2,2,2,2,2,2, # 40 - 47 + 2,2,2,2,2,2,2,2, # 48 - 4f + 2,2,2,2,2,2,2,2, # 50 - 57 + 2,2,2,2,2,2,2,2, # 58 - 5f + 2,2,2,2,2,2,2,2, # 60 - 67 + 2,2,2,2,2,2,2,2, # 68 - 6f + 2,2,2,2,2,2,2,2, # 70 - 77 + 2,2,2,2,2,2,2,4, # 78 - 7f + 5,6,6,6,6,6,6,6, # 80 - 87 + 6,6,6,6,6,6,6,6, # 88 - 8f + 6,6,6,6,6,6,6,6, # 90 - 97 + 6,6,6,6,6,6,6,6, # 98 - 9f + 6,6,6,6,6,6,6,6, # a0 - a7 + 6,6,6,6,6,6,6,6, # a8 - af + 6,6,6,6,6,6,6,6, # b0 - b7 + 6,6,6,6,6,6,6,6, # b8 - bf + 6,6,6,6,6,6,6,6, # c0 - c7 + 6,6,6,6,6,6,6,6, # c8 - cf + 6,6,6,6,6,6,6,6, # d0 - d7 + 6,6,6,6,6,6,6,6, # d8 - df + 6,6,6,6,6,6,6,6, # e0 - e7 + 6,6,6,6,6,6,6,6, # e8 - ef + 6,6,6,6,6,6,6,6, # f0 - f7 + 6,6,6,6,6,6,6,0 # f8 - ff +) + +GB2312_ST = ( + MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, 3,MachineState.ERROR,#00-07 + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f + MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,#10-17 + 4,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#18-1f + MachineState.ERROR,MachineState.ERROR, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,#20-27 + MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START #28-2f +) + +# To be accurate, the length of class 6 can be either 2 or 4. +# But it is not necessary to discriminate between the two since +# it is used for frequency analysis only, and we are validating +# each code range there as well. So it is safe to set it to be +# 2 here. +GB2312_CHAR_LEN_TABLE = (0, 1, 1, 1, 1, 1, 2) + +GB2312_SM_MODEL = {'class_table': GB2312_CLS, + 'class_factor': 7, + 'state_table': GB2312_ST, + 'char_len_table': GB2312_CHAR_LEN_TABLE, + 'name': 'GB2312'} + +# Shift_JIS + +SJIS_CLS = ( + 1,1,1,1,1,1,1,1, # 00 - 07 + 1,1,1,1,1,1,0,0, # 08 - 0f + 1,1,1,1,1,1,1,1, # 10 - 17 + 1,1,1,0,1,1,1,1, # 18 - 1f + 1,1,1,1,1,1,1,1, # 20 - 27 + 1,1,1,1,1,1,1,1, # 28 - 2f + 1,1,1,1,1,1,1,1, # 30 - 37 + 1,1,1,1,1,1,1,1, # 38 - 3f + 2,2,2,2,2,2,2,2, # 40 - 47 + 2,2,2,2,2,2,2,2, # 48 - 4f + 2,2,2,2,2,2,2,2, # 50 - 57 + 2,2,2,2,2,2,2,2, # 58 - 5f + 2,2,2,2,2,2,2,2, # 60 - 67 + 2,2,2,2,2,2,2,2, # 68 - 6f + 2,2,2,2,2,2,2,2, # 70 - 77 + 2,2,2,2,2,2,2,1, # 78 - 7f + 3,3,3,3,3,2,2,3, # 80 - 87 + 3,3,3,3,3,3,3,3, # 88 - 8f + 3,3,3,3,3,3,3,3, # 90 - 97 + 3,3,3,3,3,3,3,3, # 98 - 9f + #0xa0 is illegal in sjis encoding, but some pages does + #contain such byte. We need to be more error forgiven. + 2,2,2,2,2,2,2,2, # a0 - a7 + 2,2,2,2,2,2,2,2, # a8 - af + 2,2,2,2,2,2,2,2, # b0 - b7 + 2,2,2,2,2,2,2,2, # b8 - bf + 2,2,2,2,2,2,2,2, # c0 - c7 + 2,2,2,2,2,2,2,2, # c8 - cf + 2,2,2,2,2,2,2,2, # d0 - d7 + 2,2,2,2,2,2,2,2, # d8 - df + 3,3,3,3,3,3,3,3, # e0 - e7 + 3,3,3,3,3,4,4,4, # e8 - ef + 3,3,3,3,3,3,3,3, # f0 - f7 + 3,3,3,3,3,0,0,0) # f8 - ff + + +SJIS_ST = ( + MachineState.ERROR,MachineState.START,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07 + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f + MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START #10-17 +) + +SJIS_CHAR_LEN_TABLE = (0, 1, 1, 2, 0, 0) + +SJIS_SM_MODEL = {'class_table': SJIS_CLS, + 'class_factor': 6, + 'state_table': SJIS_ST, + 'char_len_table': SJIS_CHAR_LEN_TABLE, + 'name': 'Shift_JIS'} + +# UCS2-BE + +UCS2BE_CLS = ( + 0,0,0,0,0,0,0,0, # 00 - 07 + 0,0,1,0,0,2,0,0, # 08 - 0f + 0,0,0,0,0,0,0,0, # 10 - 17 + 0,0,0,3,0,0,0,0, # 18 - 1f + 0,0,0,0,0,0,0,0, # 20 - 27 + 0,3,3,3,3,3,0,0, # 28 - 2f + 0,0,0,0,0,0,0,0, # 30 - 37 + 0,0,0,0,0,0,0,0, # 38 - 3f + 0,0,0,0,0,0,0,0, # 40 - 47 + 0,0,0,0,0,0,0,0, # 48 - 4f + 0,0,0,0,0,0,0,0, # 50 - 57 + 0,0,0,0,0,0,0,0, # 58 - 5f + 0,0,0,0,0,0,0,0, # 60 - 67 + 0,0,0,0,0,0,0,0, # 68 - 6f + 0,0,0,0,0,0,0,0, # 70 - 77 + 0,0,0,0,0,0,0,0, # 78 - 7f + 0,0,0,0,0,0,0,0, # 80 - 87 + 0,0,0,0,0,0,0,0, # 88 - 8f + 0,0,0,0,0,0,0,0, # 90 - 97 + 0,0,0,0,0,0,0,0, # 98 - 9f + 0,0,0,0,0,0,0,0, # a0 - a7 + 0,0,0,0,0,0,0,0, # a8 - af + 0,0,0,0,0,0,0,0, # b0 - b7 + 0,0,0,0,0,0,0,0, # b8 - bf + 0,0,0,0,0,0,0,0, # c0 - c7 + 0,0,0,0,0,0,0,0, # c8 - cf + 0,0,0,0,0,0,0,0, # d0 - d7 + 0,0,0,0,0,0,0,0, # d8 - df + 0,0,0,0,0,0,0,0, # e0 - e7 + 0,0,0,0,0,0,0,0, # e8 - ef + 0,0,0,0,0,0,0,0, # f0 - f7 + 0,0,0,0,0,0,4,5 # f8 - ff +) + +UCS2BE_ST = ( + 5, 7, 7,MachineState.ERROR, 4, 3,MachineState.ERROR,MachineState.ERROR,#00-07 + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f + MachineState.ITS_ME,MachineState.ITS_ME, 6, 6, 6, 6,MachineState.ERROR,MachineState.ERROR,#10-17 + 6, 6, 6, 6, 6,MachineState.ITS_ME, 6, 6,#18-1f + 6, 6, 6, 6, 5, 7, 7,MachineState.ERROR,#20-27 + 5, 8, 6, 6,MachineState.ERROR, 6, 6, 6,#28-2f + 6, 6, 6, 6,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START #30-37 +) + +UCS2BE_CHAR_LEN_TABLE = (2, 2, 2, 0, 2, 2) + +UCS2BE_SM_MODEL = {'class_table': UCS2BE_CLS, + 'class_factor': 6, + 'state_table': UCS2BE_ST, + 'char_len_table': UCS2BE_CHAR_LEN_TABLE, + 'name': 'UTF-16BE'} + +# UCS2-LE + +UCS2LE_CLS = ( + 0,0,0,0,0,0,0,0, # 00 - 07 + 0,0,1,0,0,2,0,0, # 08 - 0f + 0,0,0,0,0,0,0,0, # 10 - 17 + 0,0,0,3,0,0,0,0, # 18 - 1f + 0,0,0,0,0,0,0,0, # 20 - 27 + 0,3,3,3,3,3,0,0, # 28 - 2f + 0,0,0,0,0,0,0,0, # 30 - 37 + 0,0,0,0,0,0,0,0, # 38 - 3f + 0,0,0,0,0,0,0,0, # 40 - 47 + 0,0,0,0,0,0,0,0, # 48 - 4f + 0,0,0,0,0,0,0,0, # 50 - 57 + 0,0,0,0,0,0,0,0, # 58 - 5f + 0,0,0,0,0,0,0,0, # 60 - 67 + 0,0,0,0,0,0,0,0, # 68 - 6f + 0,0,0,0,0,0,0,0, # 70 - 77 + 0,0,0,0,0,0,0,0, # 78 - 7f + 0,0,0,0,0,0,0,0, # 80 - 87 + 0,0,0,0,0,0,0,0, # 88 - 8f + 0,0,0,0,0,0,0,0, # 90 - 97 + 0,0,0,0,0,0,0,0, # 98 - 9f + 0,0,0,0,0,0,0,0, # a0 - a7 + 0,0,0,0,0,0,0,0, # a8 - af + 0,0,0,0,0,0,0,0, # b0 - b7 + 0,0,0,0,0,0,0,0, # b8 - bf + 0,0,0,0,0,0,0,0, # c0 - c7 + 0,0,0,0,0,0,0,0, # c8 - cf + 0,0,0,0,0,0,0,0, # d0 - d7 + 0,0,0,0,0,0,0,0, # d8 - df + 0,0,0,0,0,0,0,0, # e0 - e7 + 0,0,0,0,0,0,0,0, # e8 - ef + 0,0,0,0,0,0,0,0, # f0 - f7 + 0,0,0,0,0,0,4,5 # f8 - ff +) + +UCS2LE_ST = ( + 6, 6, 7, 6, 4, 3,MachineState.ERROR,MachineState.ERROR,#00-07 + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f + MachineState.ITS_ME,MachineState.ITS_ME, 5, 5, 5,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,#10-17 + 5, 5, 5,MachineState.ERROR, 5,MachineState.ERROR, 6, 6,#18-1f + 7, 6, 8, 8, 5, 5, 5,MachineState.ERROR,#20-27 + 5, 5, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 5, 5,#28-2f + 5, 5, 5,MachineState.ERROR, 5,MachineState.ERROR,MachineState.START,MachineState.START #30-37 +) + +UCS2LE_CHAR_LEN_TABLE = (2, 2, 2, 2, 2, 2) + +UCS2LE_SM_MODEL = {'class_table': UCS2LE_CLS, + 'class_factor': 6, + 'state_table': UCS2LE_ST, + 'char_len_table': UCS2LE_CHAR_LEN_TABLE, + 'name': 'UTF-16LE'} + +# UTF-8 + +UTF8_CLS = ( + 1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as a legal value + 1,1,1,1,1,1,0,0, # 08 - 0f + 1,1,1,1,1,1,1,1, # 10 - 17 + 1,1,1,0,1,1,1,1, # 18 - 1f + 1,1,1,1,1,1,1,1, # 20 - 27 + 1,1,1,1,1,1,1,1, # 28 - 2f + 1,1,1,1,1,1,1,1, # 30 - 37 + 1,1,1,1,1,1,1,1, # 38 - 3f + 1,1,1,1,1,1,1,1, # 40 - 47 + 1,1,1,1,1,1,1,1, # 48 - 4f + 1,1,1,1,1,1,1,1, # 50 - 57 + 1,1,1,1,1,1,1,1, # 58 - 5f + 1,1,1,1,1,1,1,1, # 60 - 67 + 1,1,1,1,1,1,1,1, # 68 - 6f + 1,1,1,1,1,1,1,1, # 70 - 77 + 1,1,1,1,1,1,1,1, # 78 - 7f + 2,2,2,2,3,3,3,3, # 80 - 87 + 4,4,4,4,4,4,4,4, # 88 - 8f + 4,4,4,4,4,4,4,4, # 90 - 97 + 4,4,4,4,4,4,4,4, # 98 - 9f + 5,5,5,5,5,5,5,5, # a0 - a7 + 5,5,5,5,5,5,5,5, # a8 - af + 5,5,5,5,5,5,5,5, # b0 - b7 + 5,5,5,5,5,5,5,5, # b8 - bf + 0,0,6,6,6,6,6,6, # c0 - c7 + 6,6,6,6,6,6,6,6, # c8 - cf + 6,6,6,6,6,6,6,6, # d0 - d7 + 6,6,6,6,6,6,6,6, # d8 - df + 7,8,8,8,8,8,8,8, # e0 - e7 + 8,8,8,8,8,9,8,8, # e8 - ef + 10,11,11,11,11,11,11,11, # f0 - f7 + 12,13,13,13,14,15,0,0 # f8 - ff +) + +UTF8_ST = ( + MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 12, 10,#00-07 + 9, 11, 8, 7, 6, 5, 4, 3,#08-0f + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#10-17 + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#18-1f + MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#20-27 + MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#28-2f + MachineState.ERROR,MachineState.ERROR, 5, 5, 5, 5,MachineState.ERROR,MachineState.ERROR,#30-37 + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#38-3f + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 5, 5, 5,MachineState.ERROR,MachineState.ERROR,#40-47 + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#48-4f + MachineState.ERROR,MachineState.ERROR, 7, 7, 7, 7,MachineState.ERROR,MachineState.ERROR,#50-57 + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#58-5f + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 7, 7,MachineState.ERROR,MachineState.ERROR,#60-67 + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#68-6f + MachineState.ERROR,MachineState.ERROR, 9, 9, 9, 9,MachineState.ERROR,MachineState.ERROR,#70-77 + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#78-7f + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 9,MachineState.ERROR,MachineState.ERROR,#80-87 + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#88-8f + MachineState.ERROR,MachineState.ERROR, 12, 12, 12, 12,MachineState.ERROR,MachineState.ERROR,#90-97 + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#98-9f + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 12,MachineState.ERROR,MachineState.ERROR,#a0-a7 + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#a8-af + MachineState.ERROR,MachineState.ERROR, 12, 12, 12,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#b0-b7 + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#b8-bf + MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,#c0-c7 + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR #c8-cf +) + +UTF8_CHAR_LEN_TABLE = (0, 1, 0, 0, 0, 0, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6) + +UTF8_SM_MODEL = {'class_table': UTF8_CLS, + 'class_factor': 16, + 'state_table': UTF8_ST, + 'char_len_table': UTF8_CHAR_LEN_TABLE, + 'name': 'UTF-8'} diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/mbcssm.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/mbcssm.pyc new file mode 100644 index 0000000..e02ef09 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/mbcssm.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/sbcharsetprober.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/sbcharsetprober.py new file mode 100644 index 0000000..0adb51d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/sbcharsetprober.py @@ -0,0 +1,132 @@ +######################## BEGIN LICENSE BLOCK ######################## +# The Original Code is Mozilla Universal charset detector code. +# +# The Initial Developer of the Original Code is +# Netscape Communications Corporation. +# Portions created by the Initial Developer are Copyright (C) 2001 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Mark Pilgrim - port to Python +# Shy Shalom - original C code +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + +from .charsetprober import CharSetProber +from .enums import CharacterCategory, ProbingState, SequenceLikelihood + + +class SingleByteCharSetProber(CharSetProber): + SAMPLE_SIZE = 64 + SB_ENOUGH_REL_THRESHOLD = 1024 # 0.25 * SAMPLE_SIZE^2 + POSITIVE_SHORTCUT_THRESHOLD = 0.95 + NEGATIVE_SHORTCUT_THRESHOLD = 0.05 + + def __init__(self, model, reversed=False, name_prober=None): + super(SingleByteCharSetProber, self).__init__() + self._model = model + # TRUE if we need to reverse every pair in the model lookup + self._reversed = reversed + # Optional auxiliary prober for name decision + self._name_prober = name_prober + self._last_order = None + self._seq_counters = None + self._total_seqs = None + self._total_char = None + self._freq_char = None + self.reset() + + def reset(self): + super(SingleByteCharSetProber, self).reset() + # char order of last character + self._last_order = 255 + self._seq_counters = [0] * SequenceLikelihood.get_num_categories() + self._total_seqs = 0 + self._total_char = 0 + # characters that fall in our sampling range + self._freq_char = 0 + + @property + def charset_name(self): + if self._name_prober: + return self._name_prober.charset_name + else: + return self._model['charset_name'] + + @property + def language(self): + if self._name_prober: + return self._name_prober.language + else: + return self._model.get('language') + + def feed(self, byte_str): + if not self._model['keep_english_letter']: + byte_str = self.filter_international_words(byte_str) + if not byte_str: + return self.state + char_to_order_map = self._model['char_to_order_map'] + for i, c in enumerate(byte_str): + # XXX: Order is in range 1-64, so one would think we want 0-63 here, + # but that leads to 27 more test failures than before. + order = char_to_order_map[c] + # XXX: This was SYMBOL_CAT_ORDER before, with a value of 250, but + # CharacterCategory.SYMBOL is actually 253, so we use CONTROL + # to make it closer to the original intent. The only difference + # is whether or not we count digits and control characters for + # _total_char purposes. + if order < CharacterCategory.CONTROL: + self._total_char += 1 + if order < self.SAMPLE_SIZE: + self._freq_char += 1 + if self._last_order < self.SAMPLE_SIZE: + self._total_seqs += 1 + if not self._reversed: + i = (self._last_order * self.SAMPLE_SIZE) + order + model = self._model['precedence_matrix'][i] + else: # reverse the order of the letters in the lookup + i = (order * self.SAMPLE_SIZE) + self._last_order + model = self._model['precedence_matrix'][i] + self._seq_counters[model] += 1 + self._last_order = order + + charset_name = self._model['charset_name'] + if self.state == ProbingState.DETECTING: + if self._total_seqs > self.SB_ENOUGH_REL_THRESHOLD: + confidence = self.get_confidence() + if confidence > self.POSITIVE_SHORTCUT_THRESHOLD: + self.logger.debug('%s confidence = %s, we have a winner', + charset_name, confidence) + self._state = ProbingState.FOUND_IT + elif confidence < self.NEGATIVE_SHORTCUT_THRESHOLD: + self.logger.debug('%s confidence = %s, below negative ' + 'shortcut threshhold %s', charset_name, + confidence, + self.NEGATIVE_SHORTCUT_THRESHOLD) + self._state = ProbingState.NOT_ME + + return self.state + + def get_confidence(self): + r = 0.01 + if self._total_seqs > 0: + r = ((1.0 * self._seq_counters[SequenceLikelihood.POSITIVE]) / + self._total_seqs / self._model['typical_positive_ratio']) + r = r * self._freq_char / self._total_char + if r >= 1.0: + r = 0.99 + return r diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/sbcharsetprober.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/sbcharsetprober.pyc new file mode 100644 index 0000000..6f08aa1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/sbcharsetprober.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/sbcsgroupprober.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/sbcsgroupprober.py new file mode 100644 index 0000000..98e95dc --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/sbcsgroupprober.py @@ -0,0 +1,73 @@ +######################## BEGIN LICENSE BLOCK ######################## +# The Original Code is Mozilla Universal charset detector code. +# +# The Initial Developer of the Original Code is +# Netscape Communications Corporation. +# Portions created by the Initial Developer are Copyright (C) 2001 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Mark Pilgrim - port to Python +# Shy Shalom - original C code +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + +from .charsetgroupprober import CharSetGroupProber +from .sbcharsetprober import SingleByteCharSetProber +from .langcyrillicmodel import (Win1251CyrillicModel, Koi8rModel, + Latin5CyrillicModel, MacCyrillicModel, + Ibm866Model, Ibm855Model) +from .langgreekmodel import Latin7GreekModel, Win1253GreekModel +from .langbulgarianmodel import Latin5BulgarianModel, Win1251BulgarianModel +# from .langhungarianmodel import Latin2HungarianModel, Win1250HungarianModel +from .langthaimodel import TIS620ThaiModel +from .langhebrewmodel import Win1255HebrewModel +from .hebrewprober import HebrewProber +from .langturkishmodel import Latin5TurkishModel + + +class SBCSGroupProber(CharSetGroupProber): + def __init__(self): + super(SBCSGroupProber, self).__init__() + self.probers = [ + SingleByteCharSetProber(Win1251CyrillicModel), + SingleByteCharSetProber(Koi8rModel), + SingleByteCharSetProber(Latin5CyrillicModel), + SingleByteCharSetProber(MacCyrillicModel), + SingleByteCharSetProber(Ibm866Model), + SingleByteCharSetProber(Ibm855Model), + SingleByteCharSetProber(Latin7GreekModel), + SingleByteCharSetProber(Win1253GreekModel), + SingleByteCharSetProber(Latin5BulgarianModel), + SingleByteCharSetProber(Win1251BulgarianModel), + # TODO: Restore Hungarian encodings (iso-8859-2 and windows-1250) + # after we retrain model. + # SingleByteCharSetProber(Latin2HungarianModel), + # SingleByteCharSetProber(Win1250HungarianModel), + SingleByteCharSetProber(TIS620ThaiModel), + SingleByteCharSetProber(Latin5TurkishModel), + ] + hebrew_prober = HebrewProber() + logical_hebrew_prober = SingleByteCharSetProber(Win1255HebrewModel, + False, hebrew_prober) + visual_hebrew_prober = SingleByteCharSetProber(Win1255HebrewModel, True, + hebrew_prober) + hebrew_prober.set_model_probers(logical_hebrew_prober, visual_hebrew_prober) + self.probers.extend([hebrew_prober, logical_hebrew_prober, + visual_hebrew_prober]) + + self.reset() diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/sbcsgroupprober.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/sbcsgroupprober.pyc new file mode 100644 index 0000000..fb4ef93 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/sbcsgroupprober.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/sjisprober.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/sjisprober.py new file mode 100644 index 0000000..9e29623 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/sjisprober.py @@ -0,0 +1,92 @@ +######################## BEGIN LICENSE BLOCK ######################## +# The Original Code is mozilla.org code. +# +# The Initial Developer of the Original Code is +# Netscape Communications Corporation. +# Portions created by the Initial Developer are Copyright (C) 1998 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Mark Pilgrim - port to Python +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + +from .mbcharsetprober import MultiByteCharSetProber +from .codingstatemachine import CodingStateMachine +from .chardistribution import SJISDistributionAnalysis +from .jpcntx import SJISContextAnalysis +from .mbcssm import SJIS_SM_MODEL +from .enums import ProbingState, MachineState + + +class SJISProber(MultiByteCharSetProber): + def __init__(self): + super(SJISProber, self).__init__() + self.coding_sm = CodingStateMachine(SJIS_SM_MODEL) + self.distribution_analyzer = SJISDistributionAnalysis() + self.context_analyzer = SJISContextAnalysis() + self.reset() + + def reset(self): + super(SJISProber, self).reset() + self.context_analyzer.reset() + + @property + def charset_name(self): + return self.context_analyzer.charset_name + + @property + def language(self): + return "Japanese" + + def feed(self, byte_str): + for i in range(len(byte_str)): + coding_state = self.coding_sm.next_state(byte_str[i]) + if coding_state == MachineState.ERROR: + self.logger.debug('%s %s prober hit error at byte %s', + self.charset_name, self.language, i) + self._state = ProbingState.NOT_ME + break + elif coding_state == MachineState.ITS_ME: + self._state = ProbingState.FOUND_IT + break + elif coding_state == MachineState.START: + char_len = self.coding_sm.get_current_charlen() + if i == 0: + self._last_char[1] = byte_str[0] + self.context_analyzer.feed(self._last_char[2 - char_len:], + char_len) + self.distribution_analyzer.feed(self._last_char, char_len) + else: + self.context_analyzer.feed(byte_str[i + 1 - char_len:i + 3 + - char_len], char_len) + self.distribution_analyzer.feed(byte_str[i - 1:i + 1], + char_len) + + self._last_char[0] = byte_str[-1] + + if self.state == ProbingState.DETECTING: + if (self.context_analyzer.got_enough_data() and + (self.get_confidence() > self.SHORTCUT_THRESHOLD)): + self._state = ProbingState.FOUND_IT + + return self.state + + def get_confidence(self): + context_conf = self.context_analyzer.get_confidence() + distrib_conf = self.distribution_analyzer.get_confidence() + return max(context_conf, distrib_conf) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/sjisprober.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/sjisprober.pyc new file mode 100644 index 0000000..1f5924d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/sjisprober.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/universaldetector.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/universaldetector.py new file mode 100644 index 0000000..7b4e92d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/universaldetector.py @@ -0,0 +1,286 @@ +######################## BEGIN LICENSE BLOCK ######################## +# The Original Code is Mozilla Universal charset detector code. +# +# The Initial Developer of the Original Code is +# Netscape Communications Corporation. +# Portions created by the Initial Developer are Copyright (C) 2001 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Mark Pilgrim - port to Python +# Shy Shalom - original C code +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### +""" +Module containing the UniversalDetector detector class, which is the primary +class a user of ``chardet`` should use. + +:author: Mark Pilgrim (initial port to Python) +:author: Shy Shalom (original C code) +:author: Dan Blanchard (major refactoring for 3.0) +:author: Ian Cordasco +""" + + +import codecs +import logging +import re + +from .charsetgroupprober import CharSetGroupProber +from .enums import InputState, LanguageFilter, ProbingState +from .escprober import EscCharSetProber +from .latin1prober import Latin1Prober +from .mbcsgroupprober import MBCSGroupProber +from .sbcsgroupprober import SBCSGroupProber + + +class UniversalDetector(object): + """ + The ``UniversalDetector`` class underlies the ``chardet.detect`` function + and coordinates all of the different charset probers. + + To get a ``dict`` containing an encoding and its confidence, you can simply + run: + + .. code:: + + u = UniversalDetector() + u.feed(some_bytes) + u.close() + detected = u.result + + """ + + MINIMUM_THRESHOLD = 0.20 + HIGH_BYTE_DETECTOR = re.compile(b'[\x80-\xFF]') + ESC_DETECTOR = re.compile(b'(\033|~{)') + WIN_BYTE_DETECTOR = re.compile(b'[\x80-\x9F]') + ISO_WIN_MAP = {'iso-8859-1': 'Windows-1252', + 'iso-8859-2': 'Windows-1250', + 'iso-8859-5': 'Windows-1251', + 'iso-8859-6': 'Windows-1256', + 'iso-8859-7': 'Windows-1253', + 'iso-8859-8': 'Windows-1255', + 'iso-8859-9': 'Windows-1254', + 'iso-8859-13': 'Windows-1257'} + + def __init__(self, lang_filter=LanguageFilter.ALL): + self._esc_charset_prober = None + self._charset_probers = [] + self.result = None + self.done = None + self._got_data = None + self._input_state = None + self._last_char = None + self.lang_filter = lang_filter + self.logger = logging.getLogger(__name__) + self._has_win_bytes = None + self.reset() + + def reset(self): + """ + Reset the UniversalDetector and all of its probers back to their + initial states. This is called by ``__init__``, so you only need to + call this directly in between analyses of different documents. + """ + self.result = {'encoding': None, 'confidence': 0.0, 'language': None} + self.done = False + self._got_data = False + self._has_win_bytes = False + self._input_state = InputState.PURE_ASCII + self._last_char = b'' + if self._esc_charset_prober: + self._esc_charset_prober.reset() + for prober in self._charset_probers: + prober.reset() + + def feed(self, byte_str): + """ + Takes a chunk of a document and feeds it through all of the relevant + charset probers. + + After calling ``feed``, you can check the value of the ``done`` + attribute to see if you need to continue feeding the + ``UniversalDetector`` more data, or if it has made a prediction + (in the ``result`` attribute). + + .. note:: + You should always call ``close`` when you're done feeding in your + document if ``done`` is not already ``True``. + """ + if self.done: + return + + if not len(byte_str): + return + + if not isinstance(byte_str, bytearray): + byte_str = bytearray(byte_str) + + # First check for known BOMs, since these are guaranteed to be correct + if not self._got_data: + # If the data starts with BOM, we know it is UTF + if byte_str.startswith(codecs.BOM_UTF8): + # EF BB BF UTF-8 with BOM + self.result = {'encoding': "UTF-8-SIG", + 'confidence': 1.0, + 'language': ''} + elif byte_str.startswith((codecs.BOM_UTF32_LE, + codecs.BOM_UTF32_BE)): + # FF FE 00 00 UTF-32, little-endian BOM + # 00 00 FE FF UTF-32, big-endian BOM + self.result = {'encoding': "UTF-32", + 'confidence': 1.0, + 'language': ''} + elif byte_str.startswith(b'\xFE\xFF\x00\x00'): + # FE FF 00 00 UCS-4, unusual octet order BOM (3412) + self.result = {'encoding': "X-ISO-10646-UCS-4-3412", + 'confidence': 1.0, + 'language': ''} + elif byte_str.startswith(b'\x00\x00\xFF\xFE'): + # 00 00 FF FE UCS-4, unusual octet order BOM (2143) + self.result = {'encoding': "X-ISO-10646-UCS-4-2143", + 'confidence': 1.0, + 'language': ''} + elif byte_str.startswith((codecs.BOM_LE, codecs.BOM_BE)): + # FF FE UTF-16, little endian BOM + # FE FF UTF-16, big endian BOM + self.result = {'encoding': "UTF-16", + 'confidence': 1.0, + 'language': ''} + + self._got_data = True + if self.result['encoding'] is not None: + self.done = True + return + + # If none of those matched and we've only see ASCII so far, check + # for high bytes and escape sequences + if self._input_state == InputState.PURE_ASCII: + if self.HIGH_BYTE_DETECTOR.search(byte_str): + self._input_state = InputState.HIGH_BYTE + elif self._input_state == InputState.PURE_ASCII and \ + self.ESC_DETECTOR.search(self._last_char + byte_str): + self._input_state = InputState.ESC_ASCII + + self._last_char = byte_str[-1:] + + # If we've seen escape sequences, use the EscCharSetProber, which + # uses a simple state machine to check for known escape sequences in + # HZ and ISO-2022 encodings, since those are the only encodings that + # use such sequences. + if self._input_state == InputState.ESC_ASCII: + if not self._esc_charset_prober: + self._esc_charset_prober = EscCharSetProber(self.lang_filter) + if self._esc_charset_prober.feed(byte_str) == ProbingState.FOUND_IT: + self.result = {'encoding': + self._esc_charset_prober.charset_name, + 'confidence': + self._esc_charset_prober.get_confidence(), + 'language': + self._esc_charset_prober.language} + self.done = True + # If we've seen high bytes (i.e., those with values greater than 127), + # we need to do more complicated checks using all our multi-byte and + # single-byte probers that are left. The single-byte probers + # use character bigram distributions to determine the encoding, whereas + # the multi-byte probers use a combination of character unigram and + # bigram distributions. + elif self._input_state == InputState.HIGH_BYTE: + if not self._charset_probers: + self._charset_probers = [MBCSGroupProber(self.lang_filter)] + # If we're checking non-CJK encodings, use single-byte prober + if self.lang_filter & LanguageFilter.NON_CJK: + self._charset_probers.append(SBCSGroupProber()) + self._charset_probers.append(Latin1Prober()) + for prober in self._charset_probers: + if prober.feed(byte_str) == ProbingState.FOUND_IT: + self.result = {'encoding': prober.charset_name, + 'confidence': prober.get_confidence(), + 'language': prober.language} + self.done = True + break + if self.WIN_BYTE_DETECTOR.search(byte_str): + self._has_win_bytes = True + + def close(self): + """ + Stop analyzing the current document and come up with a final + prediction. + + :returns: The ``result`` attribute, a ``dict`` with the keys + `encoding`, `confidence`, and `language`. + """ + # Don't bother with checks if we're already done + if self.done: + return self.result + self.done = True + + if not self._got_data: + self.logger.debug('no data received!') + + # Default to ASCII if it is all we've seen so far + elif self._input_state == InputState.PURE_ASCII: + self.result = {'encoding': 'ascii', + 'confidence': 1.0, + 'language': ''} + + # If we have seen non-ASCII, return the best that met MINIMUM_THRESHOLD + elif self._input_state == InputState.HIGH_BYTE: + prober_confidence = None + max_prober_confidence = 0.0 + max_prober = None + for prober in self._charset_probers: + if not prober: + continue + prober_confidence = prober.get_confidence() + if prober_confidence > max_prober_confidence: + max_prober_confidence = prober_confidence + max_prober = prober + if max_prober and (max_prober_confidence > self.MINIMUM_THRESHOLD): + charset_name = max_prober.charset_name + lower_charset_name = max_prober.charset_name.lower() + confidence = max_prober.get_confidence() + # Use Windows encoding name instead of ISO-8859 if we saw any + # extra Windows-specific bytes + if lower_charset_name.startswith('iso-8859'): + if self._has_win_bytes: + charset_name = self.ISO_WIN_MAP.get(lower_charset_name, + charset_name) + self.result = {'encoding': charset_name, + 'confidence': confidence, + 'language': max_prober.language} + + # Log all prober confidences if none met MINIMUM_THRESHOLD + if self.logger.getEffectiveLevel() == logging.DEBUG: + if self.result['encoding'] is None: + self.logger.debug('no probers hit minimum threshold') + for group_prober in self._charset_probers: + if not group_prober: + continue + if isinstance(group_prober, CharSetGroupProber): + for prober in group_prober.probers: + self.logger.debug('%s %s confidence = %s', + prober.charset_name, + prober.language, + prober.get_confidence()) + else: + self.logger.debug('%s %s confidence = %s', + prober.charset_name, + prober.language, + prober.get_confidence()) + return self.result diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/universaldetector.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/universaldetector.pyc new file mode 100644 index 0000000..ddcef2d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/universaldetector.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/utf8prober.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/utf8prober.py new file mode 100644 index 0000000..6c3196c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/utf8prober.py @@ -0,0 +1,82 @@ +######################## BEGIN LICENSE BLOCK ######################## +# The Original Code is mozilla.org code. +# +# The Initial Developer of the Original Code is +# Netscape Communications Corporation. +# Portions created by the Initial Developer are Copyright (C) 1998 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Mark Pilgrim - port to Python +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + +from .charsetprober import CharSetProber +from .enums import ProbingState, MachineState +from .codingstatemachine import CodingStateMachine +from .mbcssm import UTF8_SM_MODEL + + + +class UTF8Prober(CharSetProber): + ONE_CHAR_PROB = 0.5 + + def __init__(self): + super(UTF8Prober, self).__init__() + self.coding_sm = CodingStateMachine(UTF8_SM_MODEL) + self._num_mb_chars = None + self.reset() + + def reset(self): + super(UTF8Prober, self).reset() + self.coding_sm.reset() + self._num_mb_chars = 0 + + @property + def charset_name(self): + return "utf-8" + + @property + def language(self): + return "" + + def feed(self, byte_str): + for c in byte_str: + coding_state = self.coding_sm.next_state(c) + if coding_state == MachineState.ERROR: + self._state = ProbingState.NOT_ME + break + elif coding_state == MachineState.ITS_ME: + self._state = ProbingState.FOUND_IT + break + elif coding_state == MachineState.START: + if self.coding_sm.get_current_charlen() >= 2: + self._num_mb_chars += 1 + + if self.state == ProbingState.DETECTING: + if self.get_confidence() > self.SHORTCUT_THRESHOLD: + self._state = ProbingState.FOUND_IT + + return self.state + + def get_confidence(self): + unlike = 0.99 + if self._num_mb_chars < 6: + unlike *= self.ONE_CHAR_PROB ** self._num_mb_chars + return 1.0 - unlike + else: + return unlike diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/utf8prober.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/utf8prober.pyc new file mode 100644 index 0000000..850068f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/utf8prober.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/version.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/version.py new file mode 100644 index 0000000..bb2a34a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/version.py @@ -0,0 +1,9 @@ +""" +This module exists only to simplify retrieving the version number of chardet +from within setup.py and from chardet subpackages. + +:author: Dan Blanchard (dan.blanchard@gmail.com) +""" + +__version__ = "3.0.4" +VERSION = __version__.split('.') diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/version.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/version.pyc new file mode 100644 index 0000000..5d882d0 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/chardet/version.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/colorama/__init__.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/colorama/__init__.py new file mode 100644 index 0000000..2a3bf47 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/colorama/__init__.py @@ -0,0 +1,6 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +from .initialise import init, deinit, reinit, colorama_text +from .ansi import Fore, Back, Style, Cursor +from .ansitowin32 import AnsiToWin32 + +__version__ = '0.4.1' diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/colorama/__init__.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/colorama/__init__.pyc new file mode 100644 index 0000000..07a96bf Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/colorama/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/colorama/ansi.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/colorama/ansi.py new file mode 100644 index 0000000..7877658 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/colorama/ansi.py @@ -0,0 +1,102 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +''' +This module generates ANSI character codes to printing colors to terminals. +See: http://en.wikipedia.org/wiki/ANSI_escape_code +''' + +CSI = '\033[' +OSC = '\033]' +BEL = '\007' + + +def code_to_chars(code): + return CSI + str(code) + 'm' + +def set_title(title): + return OSC + '2;' + title + BEL + +def clear_screen(mode=2): + return CSI + str(mode) + 'J' + +def clear_line(mode=2): + return CSI + str(mode) + 'K' + + +class AnsiCodes(object): + def __init__(self): + # the subclasses declare class attributes which are numbers. + # Upon instantiation we define instance attributes, which are the same + # as the class attributes but wrapped with the ANSI escape sequence + for name in dir(self): + if not name.startswith('_'): + value = getattr(self, name) + setattr(self, name, code_to_chars(value)) + + +class AnsiCursor(object): + def UP(self, n=1): + return CSI + str(n) + 'A' + def DOWN(self, n=1): + return CSI + str(n) + 'B' + def FORWARD(self, n=1): + return CSI + str(n) + 'C' + def BACK(self, n=1): + return CSI + str(n) + 'D' + def POS(self, x=1, y=1): + return CSI + str(y) + ';' + str(x) + 'H' + + +class AnsiFore(AnsiCodes): + BLACK = 30 + RED = 31 + GREEN = 32 + YELLOW = 33 + BLUE = 34 + MAGENTA = 35 + CYAN = 36 + WHITE = 37 + RESET = 39 + + # These are fairly well supported, but not part of the standard. + LIGHTBLACK_EX = 90 + LIGHTRED_EX = 91 + LIGHTGREEN_EX = 92 + LIGHTYELLOW_EX = 93 + LIGHTBLUE_EX = 94 + LIGHTMAGENTA_EX = 95 + LIGHTCYAN_EX = 96 + LIGHTWHITE_EX = 97 + + +class AnsiBack(AnsiCodes): + BLACK = 40 + RED = 41 + GREEN = 42 + YELLOW = 43 + BLUE = 44 + MAGENTA = 45 + CYAN = 46 + WHITE = 47 + RESET = 49 + + # These are fairly well supported, but not part of the standard. + LIGHTBLACK_EX = 100 + LIGHTRED_EX = 101 + LIGHTGREEN_EX = 102 + LIGHTYELLOW_EX = 103 + LIGHTBLUE_EX = 104 + LIGHTMAGENTA_EX = 105 + LIGHTCYAN_EX = 106 + LIGHTWHITE_EX = 107 + + +class AnsiStyle(AnsiCodes): + BRIGHT = 1 + DIM = 2 + NORMAL = 22 + RESET_ALL = 0 + +Fore = AnsiFore() +Back = AnsiBack() +Style = AnsiStyle() +Cursor = AnsiCursor() diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/colorama/ansi.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/colorama/ansi.pyc new file mode 100644 index 0000000..08c9f81 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/colorama/ansi.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/colorama/ansitowin32.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/colorama/ansitowin32.py new file mode 100644 index 0000000..359c92b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/colorama/ansitowin32.py @@ -0,0 +1,257 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +import re +import sys +import os + +from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style +from .winterm import WinTerm, WinColor, WinStyle +from .win32 import windll, winapi_test + + +winterm = None +if windll is not None: + winterm = WinTerm() + + +class StreamWrapper(object): + ''' + Wraps a stream (such as stdout), acting as a transparent proxy for all + attribute access apart from method 'write()', which is delegated to our + Converter instance. + ''' + def __init__(self, wrapped, converter): + # double-underscore everything to prevent clashes with names of + # attributes on the wrapped stream object. + self.__wrapped = wrapped + self.__convertor = converter + + def __getattr__(self, name): + return getattr(self.__wrapped, name) + + def __enter__(self, *args, **kwargs): + # special method lookup bypasses __getattr__/__getattribute__, see + # https://stackoverflow.com/questions/12632894/why-doesnt-getattr-work-with-exit + # thus, contextlib magic methods are not proxied via __getattr__ + return self.__wrapped.__enter__(*args, **kwargs) + + def __exit__(self, *args, **kwargs): + return self.__wrapped.__exit__(*args, **kwargs) + + def write(self, text): + self.__convertor.write(text) + + def isatty(self): + stream = self.__wrapped + if 'PYCHARM_HOSTED' in os.environ: + if stream is not None and (stream is sys.__stdout__ or stream is sys.__stderr__): + return True + try: + stream_isatty = stream.isatty + except AttributeError: + return False + else: + return stream_isatty() + + @property + def closed(self): + stream = self.__wrapped + try: + return stream.closed + except AttributeError: + return True + + +class AnsiToWin32(object): + ''' + Implements a 'write()' method which, on Windows, will strip ANSI character + sequences from the text, and if outputting to a tty, will convert them into + win32 function calls. + ''' + ANSI_CSI_RE = re.compile('\001?\033\\[((?:\\d|;)*)([a-zA-Z])\002?') # Control Sequence Introducer + ANSI_OSC_RE = re.compile('\001?\033\\]((?:.|;)*?)(\x07)\002?') # Operating System Command + + def __init__(self, wrapped, convert=None, strip=None, autoreset=False): + # The wrapped stream (normally sys.stdout or sys.stderr) + self.wrapped = wrapped + + # should we reset colors to defaults after every .write() + self.autoreset = autoreset + + # create the proxy wrapping our output stream + self.stream = StreamWrapper(wrapped, self) + + on_windows = os.name == 'nt' + # We test if the WinAPI works, because even if we are on Windows + # we may be using a terminal that doesn't support the WinAPI + # (e.g. Cygwin Terminal). In this case it's up to the terminal + # to support the ANSI codes. + conversion_supported = on_windows and winapi_test() + + # should we strip ANSI sequences from our output? + if strip is None: + strip = conversion_supported or (not self.stream.closed and not self.stream.isatty()) + self.strip = strip + + # should we should convert ANSI sequences into win32 calls? + if convert is None: + convert = conversion_supported and not self.stream.closed and self.stream.isatty() + self.convert = convert + + # dict of ansi codes to win32 functions and parameters + self.win32_calls = self.get_win32_calls() + + # are we wrapping stderr? + self.on_stderr = self.wrapped is sys.stderr + + def should_wrap(self): + ''' + True if this class is actually needed. If false, then the output + stream will not be affected, nor will win32 calls be issued, so + wrapping stdout is not actually required. This will generally be + False on non-Windows platforms, unless optional functionality like + autoreset has been requested using kwargs to init() + ''' + return self.convert or self.strip or self.autoreset + + def get_win32_calls(self): + if self.convert and winterm: + return { + AnsiStyle.RESET_ALL: (winterm.reset_all, ), + AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT), + AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL), + AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL), + AnsiFore.BLACK: (winterm.fore, WinColor.BLACK), + AnsiFore.RED: (winterm.fore, WinColor.RED), + AnsiFore.GREEN: (winterm.fore, WinColor.GREEN), + AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW), + AnsiFore.BLUE: (winterm.fore, WinColor.BLUE), + AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA), + AnsiFore.CYAN: (winterm.fore, WinColor.CYAN), + AnsiFore.WHITE: (winterm.fore, WinColor.GREY), + AnsiFore.RESET: (winterm.fore, ), + AnsiFore.LIGHTBLACK_EX: (winterm.fore, WinColor.BLACK, True), + AnsiFore.LIGHTRED_EX: (winterm.fore, WinColor.RED, True), + AnsiFore.LIGHTGREEN_EX: (winterm.fore, WinColor.GREEN, True), + AnsiFore.LIGHTYELLOW_EX: (winterm.fore, WinColor.YELLOW, True), + AnsiFore.LIGHTBLUE_EX: (winterm.fore, WinColor.BLUE, True), + AnsiFore.LIGHTMAGENTA_EX: (winterm.fore, WinColor.MAGENTA, True), + AnsiFore.LIGHTCYAN_EX: (winterm.fore, WinColor.CYAN, True), + AnsiFore.LIGHTWHITE_EX: (winterm.fore, WinColor.GREY, True), + AnsiBack.BLACK: (winterm.back, WinColor.BLACK), + AnsiBack.RED: (winterm.back, WinColor.RED), + AnsiBack.GREEN: (winterm.back, WinColor.GREEN), + AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW), + AnsiBack.BLUE: (winterm.back, WinColor.BLUE), + AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA), + AnsiBack.CYAN: (winterm.back, WinColor.CYAN), + AnsiBack.WHITE: (winterm.back, WinColor.GREY), + AnsiBack.RESET: (winterm.back, ), + AnsiBack.LIGHTBLACK_EX: (winterm.back, WinColor.BLACK, True), + AnsiBack.LIGHTRED_EX: (winterm.back, WinColor.RED, True), + AnsiBack.LIGHTGREEN_EX: (winterm.back, WinColor.GREEN, True), + AnsiBack.LIGHTYELLOW_EX: (winterm.back, WinColor.YELLOW, True), + AnsiBack.LIGHTBLUE_EX: (winterm.back, WinColor.BLUE, True), + AnsiBack.LIGHTMAGENTA_EX: (winterm.back, WinColor.MAGENTA, True), + AnsiBack.LIGHTCYAN_EX: (winterm.back, WinColor.CYAN, True), + AnsiBack.LIGHTWHITE_EX: (winterm.back, WinColor.GREY, True), + } + return dict() + + def write(self, text): + if self.strip or self.convert: + self.write_and_convert(text) + else: + self.wrapped.write(text) + self.wrapped.flush() + if self.autoreset: + self.reset_all() + + + def reset_all(self): + if self.convert: + self.call_win32('m', (0,)) + elif not self.strip and not self.stream.closed: + self.wrapped.write(Style.RESET_ALL) + + + def write_and_convert(self, text): + ''' + Write the given text to our wrapped stream, stripping any ANSI + sequences from the text, and optionally converting them into win32 + calls. + ''' + cursor = 0 + text = self.convert_osc(text) + for match in self.ANSI_CSI_RE.finditer(text): + start, end = match.span() + self.write_plain_text(text, cursor, start) + self.convert_ansi(*match.groups()) + cursor = end + self.write_plain_text(text, cursor, len(text)) + + + def write_plain_text(self, text, start, end): + if start < end: + self.wrapped.write(text[start:end]) + self.wrapped.flush() + + + def convert_ansi(self, paramstring, command): + if self.convert: + params = self.extract_params(command, paramstring) + self.call_win32(command, params) + + + def extract_params(self, command, paramstring): + if command in 'Hf': + params = tuple(int(p) if len(p) != 0 else 1 for p in paramstring.split(';')) + while len(params) < 2: + # defaults: + params = params + (1,) + else: + params = tuple(int(p) for p in paramstring.split(';') if len(p) != 0) + if len(params) == 0: + # defaults: + if command in 'JKm': + params = (0,) + elif command in 'ABCD': + params = (1,) + + return params + + + def call_win32(self, command, params): + if command == 'm': + for param in params: + if param in self.win32_calls: + func_args = self.win32_calls[param] + func = func_args[0] + args = func_args[1:] + kwargs = dict(on_stderr=self.on_stderr) + func(*args, **kwargs) + elif command in 'J': + winterm.erase_screen(params[0], on_stderr=self.on_stderr) + elif command in 'K': + winterm.erase_line(params[0], on_stderr=self.on_stderr) + elif command in 'Hf': # cursor position - absolute + winterm.set_cursor_position(params, on_stderr=self.on_stderr) + elif command in 'ABCD': # cursor position - relative + n = params[0] + # A - up, B - down, C - forward, D - back + x, y = {'A': (0, -n), 'B': (0, n), 'C': (n, 0), 'D': (-n, 0)}[command] + winterm.cursor_adjust(x, y, on_stderr=self.on_stderr) + + + def convert_osc(self, text): + for match in self.ANSI_OSC_RE.finditer(text): + start, end = match.span() + text = text[:start] + text[end:] + paramstring, command = match.groups() + if command in '\x07': # \x07 = BEL + params = paramstring.split(";") + # 0 - change title and icon (we will only change title) + # 1 - change icon (we don't support this) + # 2 - change title + if params[0] in '02': + winterm.set_title(params[1]) + return text diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/colorama/ansitowin32.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/colorama/ansitowin32.pyc new file mode 100644 index 0000000..5e5d3d7 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/colorama/ansitowin32.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/colorama/initialise.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/colorama/initialise.py new file mode 100644 index 0000000..430d066 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/colorama/initialise.py @@ -0,0 +1,80 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +import atexit +import contextlib +import sys + +from .ansitowin32 import AnsiToWin32 + + +orig_stdout = None +orig_stderr = None + +wrapped_stdout = None +wrapped_stderr = None + +atexit_done = False + + +def reset_all(): + if AnsiToWin32 is not None: # Issue #74: objects might become None at exit + AnsiToWin32(orig_stdout).reset_all() + + +def init(autoreset=False, convert=None, strip=None, wrap=True): + + if not wrap and any([autoreset, convert, strip]): + raise ValueError('wrap=False conflicts with any other arg=True') + + global wrapped_stdout, wrapped_stderr + global orig_stdout, orig_stderr + + orig_stdout = sys.stdout + orig_stderr = sys.stderr + + if sys.stdout is None: + wrapped_stdout = None + else: + sys.stdout = wrapped_stdout = \ + wrap_stream(orig_stdout, convert, strip, autoreset, wrap) + if sys.stderr is None: + wrapped_stderr = None + else: + sys.stderr = wrapped_stderr = \ + wrap_stream(orig_stderr, convert, strip, autoreset, wrap) + + global atexit_done + if not atexit_done: + atexit.register(reset_all) + atexit_done = True + + +def deinit(): + if orig_stdout is not None: + sys.stdout = orig_stdout + if orig_stderr is not None: + sys.stderr = orig_stderr + + +@contextlib.contextmanager +def colorama_text(*args, **kwargs): + init(*args, **kwargs) + try: + yield + finally: + deinit() + + +def reinit(): + if wrapped_stdout is not None: + sys.stdout = wrapped_stdout + if wrapped_stderr is not None: + sys.stderr = wrapped_stderr + + +def wrap_stream(stream, convert, strip, autoreset, wrap): + if wrap: + wrapper = AnsiToWin32(stream, + convert=convert, strip=strip, autoreset=autoreset) + if wrapper.should_wrap(): + stream = wrapper.stream + return stream diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/colorama/initialise.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/colorama/initialise.pyc new file mode 100644 index 0000000..4916d91 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/colorama/initialise.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/colorama/win32.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/colorama/win32.py new file mode 100644 index 0000000..c2d8360 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/colorama/win32.py @@ -0,0 +1,152 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. + +# from winbase.h +STDOUT = -11 +STDERR = -12 + +try: + import ctypes + from ctypes import LibraryLoader + windll = LibraryLoader(ctypes.WinDLL) + from ctypes import wintypes +except (AttributeError, ImportError): + windll = None + SetConsoleTextAttribute = lambda *_: None + winapi_test = lambda *_: None +else: + from ctypes import byref, Structure, c_char, POINTER + + COORD = wintypes._COORD + + class CONSOLE_SCREEN_BUFFER_INFO(Structure): + """struct in wincon.h.""" + _fields_ = [ + ("dwSize", COORD), + ("dwCursorPosition", COORD), + ("wAttributes", wintypes.WORD), + ("srWindow", wintypes.SMALL_RECT), + ("dwMaximumWindowSize", COORD), + ] + def __str__(self): + return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % ( + self.dwSize.Y, self.dwSize.X + , self.dwCursorPosition.Y, self.dwCursorPosition.X + , self.wAttributes + , self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right + , self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X + ) + + _GetStdHandle = windll.kernel32.GetStdHandle + _GetStdHandle.argtypes = [ + wintypes.DWORD, + ] + _GetStdHandle.restype = wintypes.HANDLE + + _GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo + _GetConsoleScreenBufferInfo.argtypes = [ + wintypes.HANDLE, + POINTER(CONSOLE_SCREEN_BUFFER_INFO), + ] + _GetConsoleScreenBufferInfo.restype = wintypes.BOOL + + _SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute + _SetConsoleTextAttribute.argtypes = [ + wintypes.HANDLE, + wintypes.WORD, + ] + _SetConsoleTextAttribute.restype = wintypes.BOOL + + _SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition + _SetConsoleCursorPosition.argtypes = [ + wintypes.HANDLE, + COORD, + ] + _SetConsoleCursorPosition.restype = wintypes.BOOL + + _FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA + _FillConsoleOutputCharacterA.argtypes = [ + wintypes.HANDLE, + c_char, + wintypes.DWORD, + COORD, + POINTER(wintypes.DWORD), + ] + _FillConsoleOutputCharacterA.restype = wintypes.BOOL + + _FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute + _FillConsoleOutputAttribute.argtypes = [ + wintypes.HANDLE, + wintypes.WORD, + wintypes.DWORD, + COORD, + POINTER(wintypes.DWORD), + ] + _FillConsoleOutputAttribute.restype = wintypes.BOOL + + _SetConsoleTitleW = windll.kernel32.SetConsoleTitleW + _SetConsoleTitleW.argtypes = [ + wintypes.LPCWSTR + ] + _SetConsoleTitleW.restype = wintypes.BOOL + + def _winapi_test(handle): + csbi = CONSOLE_SCREEN_BUFFER_INFO() + success = _GetConsoleScreenBufferInfo( + handle, byref(csbi)) + return bool(success) + + def winapi_test(): + return any(_winapi_test(h) for h in + (_GetStdHandle(STDOUT), _GetStdHandle(STDERR))) + + def GetConsoleScreenBufferInfo(stream_id=STDOUT): + handle = _GetStdHandle(stream_id) + csbi = CONSOLE_SCREEN_BUFFER_INFO() + success = _GetConsoleScreenBufferInfo( + handle, byref(csbi)) + return csbi + + def SetConsoleTextAttribute(stream_id, attrs): + handle = _GetStdHandle(stream_id) + return _SetConsoleTextAttribute(handle, attrs) + + def SetConsoleCursorPosition(stream_id, position, adjust=True): + position = COORD(*position) + # If the position is out of range, do nothing. + if position.Y <= 0 or position.X <= 0: + return + # Adjust for Windows' SetConsoleCursorPosition: + # 1. being 0-based, while ANSI is 1-based. + # 2. expecting (x,y), while ANSI uses (y,x). + adjusted_position = COORD(position.Y - 1, position.X - 1) + if adjust: + # Adjust for viewport's scroll position + sr = GetConsoleScreenBufferInfo(STDOUT).srWindow + adjusted_position.Y += sr.Top + adjusted_position.X += sr.Left + # Resume normal processing + handle = _GetStdHandle(stream_id) + return _SetConsoleCursorPosition(handle, adjusted_position) + + def FillConsoleOutputCharacter(stream_id, char, length, start): + handle = _GetStdHandle(stream_id) + char = c_char(char.encode()) + length = wintypes.DWORD(length) + num_written = wintypes.DWORD(0) + # Note that this is hard-coded for ANSI (vs wide) bytes. + success = _FillConsoleOutputCharacterA( + handle, char, length, start, byref(num_written)) + return num_written.value + + def FillConsoleOutputAttribute(stream_id, attr, length, start): + ''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )''' + handle = _GetStdHandle(stream_id) + attribute = wintypes.WORD(attr) + length = wintypes.DWORD(length) + num_written = wintypes.DWORD(0) + # Note that this is hard-coded for ANSI (vs wide) bytes. + return _FillConsoleOutputAttribute( + handle, attribute, length, start, byref(num_written)) + + def SetConsoleTitle(title): + return _SetConsoleTitleW(title) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/colorama/win32.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/colorama/win32.pyc new file mode 100644 index 0000000..0aa575b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/colorama/win32.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/colorama/winterm.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/colorama/winterm.py new file mode 100644 index 0000000..0fdb4ec --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/colorama/winterm.py @@ -0,0 +1,169 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +from . import win32 + + +# from wincon.h +class WinColor(object): + BLACK = 0 + BLUE = 1 + GREEN = 2 + CYAN = 3 + RED = 4 + MAGENTA = 5 + YELLOW = 6 + GREY = 7 + +# from wincon.h +class WinStyle(object): + NORMAL = 0x00 # dim text, dim background + BRIGHT = 0x08 # bright text, dim background + BRIGHT_BACKGROUND = 0x80 # dim text, bright background + +class WinTerm(object): + + def __init__(self): + self._default = win32.GetConsoleScreenBufferInfo(win32.STDOUT).wAttributes + self.set_attrs(self._default) + self._default_fore = self._fore + self._default_back = self._back + self._default_style = self._style + # In order to emulate LIGHT_EX in windows, we borrow the BRIGHT style. + # So that LIGHT_EX colors and BRIGHT style do not clobber each other, + # we track them separately, since LIGHT_EX is overwritten by Fore/Back + # and BRIGHT is overwritten by Style codes. + self._light = 0 + + def get_attrs(self): + return self._fore + self._back * 16 + (self._style | self._light) + + def set_attrs(self, value): + self._fore = value & 7 + self._back = (value >> 4) & 7 + self._style = value & (WinStyle.BRIGHT | WinStyle.BRIGHT_BACKGROUND) + + def reset_all(self, on_stderr=None): + self.set_attrs(self._default) + self.set_console(attrs=self._default) + self._light = 0 + + def fore(self, fore=None, light=False, on_stderr=False): + if fore is None: + fore = self._default_fore + self._fore = fore + # Emulate LIGHT_EX with BRIGHT Style + if light: + self._light |= WinStyle.BRIGHT + else: + self._light &= ~WinStyle.BRIGHT + self.set_console(on_stderr=on_stderr) + + def back(self, back=None, light=False, on_stderr=False): + if back is None: + back = self._default_back + self._back = back + # Emulate LIGHT_EX with BRIGHT_BACKGROUND Style + if light: + self._light |= WinStyle.BRIGHT_BACKGROUND + else: + self._light &= ~WinStyle.BRIGHT_BACKGROUND + self.set_console(on_stderr=on_stderr) + + def style(self, style=None, on_stderr=False): + if style is None: + style = self._default_style + self._style = style + self.set_console(on_stderr=on_stderr) + + def set_console(self, attrs=None, on_stderr=False): + if attrs is None: + attrs = self.get_attrs() + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + win32.SetConsoleTextAttribute(handle, attrs) + + def get_position(self, handle): + position = win32.GetConsoleScreenBufferInfo(handle).dwCursorPosition + # Because Windows coordinates are 0-based, + # and win32.SetConsoleCursorPosition expects 1-based. + position.X += 1 + position.Y += 1 + return position + + def set_cursor_position(self, position=None, on_stderr=False): + if position is None: + # I'm not currently tracking the position, so there is no default. + # position = self.get_position() + return + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + win32.SetConsoleCursorPosition(handle, position) + + def cursor_adjust(self, x, y, on_stderr=False): + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + position = self.get_position(handle) + adjusted_position = (position.Y + y, position.X + x) + win32.SetConsoleCursorPosition(handle, adjusted_position, adjust=False) + + def erase_screen(self, mode=0, on_stderr=False): + # 0 should clear from the cursor to the end of the screen. + # 1 should clear from the cursor to the beginning of the screen. + # 2 should clear the entire screen, and move cursor to (1,1) + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + csbi = win32.GetConsoleScreenBufferInfo(handle) + # get the number of character cells in the current buffer + cells_in_screen = csbi.dwSize.X * csbi.dwSize.Y + # get number of character cells before current cursor position + cells_before_cursor = csbi.dwSize.X * csbi.dwCursorPosition.Y + csbi.dwCursorPosition.X + if mode == 0: + from_coord = csbi.dwCursorPosition + cells_to_erase = cells_in_screen - cells_before_cursor + elif mode == 1: + from_coord = win32.COORD(0, 0) + cells_to_erase = cells_before_cursor + elif mode == 2: + from_coord = win32.COORD(0, 0) + cells_to_erase = cells_in_screen + else: + # invalid mode + return + # fill the entire screen with blanks + win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord) + # now set the buffer's attributes accordingly + win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord) + if mode == 2: + # put the cursor where needed + win32.SetConsoleCursorPosition(handle, (1, 1)) + + def erase_line(self, mode=0, on_stderr=False): + # 0 should clear from the cursor to the end of the line. + # 1 should clear from the cursor to the beginning of the line. + # 2 should clear the entire line. + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + csbi = win32.GetConsoleScreenBufferInfo(handle) + if mode == 0: + from_coord = csbi.dwCursorPosition + cells_to_erase = csbi.dwSize.X - csbi.dwCursorPosition.X + elif mode == 1: + from_coord = win32.COORD(0, csbi.dwCursorPosition.Y) + cells_to_erase = csbi.dwCursorPosition.X + elif mode == 2: + from_coord = win32.COORD(0, csbi.dwCursorPosition.Y) + cells_to_erase = csbi.dwSize.X + else: + # invalid mode + return + # fill the entire screen with blanks + win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord) + # now set the buffer's attributes accordingly + win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord) + + def set_title(self, title): + win32.SetConsoleTitle(title) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/colorama/winterm.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/colorama/winterm.pyc new file mode 100644 index 0000000..f3d0425 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/colorama/winterm.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/__init__.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/__init__.py new file mode 100644 index 0000000..a786b4d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/__init__.py @@ -0,0 +1,23 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012-2017 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +import logging + +__version__ = '0.2.8' + +class DistlibException(Exception): + pass + +try: + from logging import NullHandler +except ImportError: # pragma: no cover + class NullHandler(logging.Handler): + def handle(self, record): pass + def emit(self, record): pass + def createLock(self): self.lock = None + +logger = logging.getLogger(__name__) +logger.addHandler(NullHandler()) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/__init__.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/__init__.pyc new file mode 100644 index 0000000..1bdd7ef Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/__init__.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/__init__.py new file mode 100644 index 0000000..f7dbf4c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/__init__.py @@ -0,0 +1,6 @@ +"""Modules copied from Python 3 standard libraries, for internal use only. + +Individual classes and functions are found in d2._backport.misc. Intended +usage is to always import things missing from 3.1 from that module: the +built-in/stdlib objects will be used if found. +""" diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/__init__.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/__init__.pyc new file mode 100644 index 0000000..b87fbb9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/misc.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/misc.py new file mode 100644 index 0000000..cfb318d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/misc.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012 The Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +"""Backports for individual classes and functions.""" + +import os +import sys + +__all__ = ['cache_from_source', 'callable', 'fsencode'] + + +try: + from imp import cache_from_source +except ImportError: + def cache_from_source(py_file, debug=__debug__): + ext = debug and 'c' or 'o' + return py_file + ext + + +try: + callable = callable +except NameError: + from collections import Callable + + def callable(obj): + return isinstance(obj, Callable) + + +try: + fsencode = os.fsencode +except AttributeError: + def fsencode(filename): + if isinstance(filename, bytes): + return filename + elif isinstance(filename, str): + return filename.encode(sys.getfilesystemencoding()) + else: + raise TypeError("expect bytes or str, not %s" % + type(filename).__name__) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/misc.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/misc.pyc new file mode 100644 index 0000000..9b7b691 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/misc.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.py new file mode 100644 index 0000000..159e49e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.py @@ -0,0 +1,761 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012 The Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +"""Utility functions for copying and archiving files and directory trees. + +XXX The functions here don't copy the resource fork or other metadata on Mac. + +""" + +import os +import sys +import stat +from os.path import abspath +import fnmatch +import collections +import errno +from . import tarfile + +try: + import bz2 + _BZ2_SUPPORTED = True +except ImportError: + _BZ2_SUPPORTED = False + +try: + from pwd import getpwnam +except ImportError: + getpwnam = None + +try: + from grp import getgrnam +except ImportError: + getgrnam = None + +__all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2", + "copytree", "move", "rmtree", "Error", "SpecialFileError", + "ExecError", "make_archive", "get_archive_formats", + "register_archive_format", "unregister_archive_format", + "get_unpack_formats", "register_unpack_format", + "unregister_unpack_format", "unpack_archive", "ignore_patterns"] + +class Error(EnvironmentError): + pass + +class SpecialFileError(EnvironmentError): + """Raised when trying to do a kind of operation (e.g. copying) which is + not supported on a special file (e.g. a named pipe)""" + +class ExecError(EnvironmentError): + """Raised when a command could not be executed""" + +class ReadError(EnvironmentError): + """Raised when an archive cannot be read""" + +class RegistryError(Exception): + """Raised when a registry operation with the archiving + and unpacking registries fails""" + + +try: + WindowsError +except NameError: + WindowsError = None + +def copyfileobj(fsrc, fdst, length=16*1024): + """copy data from file-like object fsrc to file-like object fdst""" + while 1: + buf = fsrc.read(length) + if not buf: + break + fdst.write(buf) + +def _samefile(src, dst): + # Macintosh, Unix. + if hasattr(os.path, 'samefile'): + try: + return os.path.samefile(src, dst) + except OSError: + return False + + # All other platforms: check for same pathname. + return (os.path.normcase(os.path.abspath(src)) == + os.path.normcase(os.path.abspath(dst))) + +def copyfile(src, dst): + """Copy data from src to dst""" + if _samefile(src, dst): + raise Error("`%s` and `%s` are the same file" % (src, dst)) + + for fn in [src, dst]: + try: + st = os.stat(fn) + except OSError: + # File most likely does not exist + pass + else: + # XXX What about other special files? (sockets, devices...) + if stat.S_ISFIFO(st.st_mode): + raise SpecialFileError("`%s` is a named pipe" % fn) + + with open(src, 'rb') as fsrc: + with open(dst, 'wb') as fdst: + copyfileobj(fsrc, fdst) + +def copymode(src, dst): + """Copy mode bits from src to dst""" + if hasattr(os, 'chmod'): + st = os.stat(src) + mode = stat.S_IMODE(st.st_mode) + os.chmod(dst, mode) + +def copystat(src, dst): + """Copy all stat info (mode bits, atime, mtime, flags) from src to dst""" + st = os.stat(src) + mode = stat.S_IMODE(st.st_mode) + if hasattr(os, 'utime'): + os.utime(dst, (st.st_atime, st.st_mtime)) + if hasattr(os, 'chmod'): + os.chmod(dst, mode) + if hasattr(os, 'chflags') and hasattr(st, 'st_flags'): + try: + os.chflags(dst, st.st_flags) + except OSError as why: + if (not hasattr(errno, 'EOPNOTSUPP') or + why.errno != errno.EOPNOTSUPP): + raise + +def copy(src, dst): + """Copy data and mode bits ("cp src dst"). + + The destination may be a directory. + + """ + if os.path.isdir(dst): + dst = os.path.join(dst, os.path.basename(src)) + copyfile(src, dst) + copymode(src, dst) + +def copy2(src, dst): + """Copy data and all stat info ("cp -p src dst"). + + The destination may be a directory. + + """ + if os.path.isdir(dst): + dst = os.path.join(dst, os.path.basename(src)) + copyfile(src, dst) + copystat(src, dst) + +def ignore_patterns(*patterns): + """Function that can be used as copytree() ignore parameter. + + Patterns is a sequence of glob-style patterns + that are used to exclude files""" + def _ignore_patterns(path, names): + ignored_names = [] + for pattern in patterns: + ignored_names.extend(fnmatch.filter(names, pattern)) + return set(ignored_names) + return _ignore_patterns + +def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2, + ignore_dangling_symlinks=False): + """Recursively copy a directory tree. + + The destination directory must not already exist. + If exception(s) occur, an Error is raised with a list of reasons. + + If the optional symlinks flag is true, symbolic links in the + source tree result in symbolic links in the destination tree; if + it is false, the contents of the files pointed to by symbolic + links are copied. If the file pointed by the symlink doesn't + exist, an exception will be added in the list of errors raised in + an Error exception at the end of the copy process. + + You can set the optional ignore_dangling_symlinks flag to true if you + want to silence this exception. Notice that this has no effect on + platforms that don't support os.symlink. + + The optional ignore argument is a callable. If given, it + is called with the `src` parameter, which is the directory + being visited by copytree(), and `names` which is the list of + `src` contents, as returned by os.listdir(): + + callable(src, names) -> ignored_names + + Since copytree() is called recursively, the callable will be + called once for each directory that is copied. It returns a + list of names relative to the `src` directory that should + not be copied. + + The optional copy_function argument is a callable that will be used + to copy each file. It will be called with the source path and the + destination path as arguments. By default, copy2() is used, but any + function that supports the same signature (like copy()) can be used. + + """ + names = os.listdir(src) + if ignore is not None: + ignored_names = ignore(src, names) + else: + ignored_names = set() + + os.makedirs(dst) + errors = [] + for name in names: + if name in ignored_names: + continue + srcname = os.path.join(src, name) + dstname = os.path.join(dst, name) + try: + if os.path.islink(srcname): + linkto = os.readlink(srcname) + if symlinks: + os.symlink(linkto, dstname) + else: + # ignore dangling symlink if the flag is on + if not os.path.exists(linkto) and ignore_dangling_symlinks: + continue + # otherwise let the copy occurs. copy2 will raise an error + copy_function(srcname, dstname) + elif os.path.isdir(srcname): + copytree(srcname, dstname, symlinks, ignore, copy_function) + else: + # Will raise a SpecialFileError for unsupported file types + copy_function(srcname, dstname) + # catch the Error from the recursive copytree so that we can + # continue with other files + except Error as err: + errors.extend(err.args[0]) + except EnvironmentError as why: + errors.append((srcname, dstname, str(why))) + try: + copystat(src, dst) + except OSError as why: + if WindowsError is not None and isinstance(why, WindowsError): + # Copying file access times may fail on Windows + pass + else: + errors.extend((src, dst, str(why))) + if errors: + raise Error(errors) + +def rmtree(path, ignore_errors=False, onerror=None): + """Recursively delete a directory tree. + + If ignore_errors is set, errors are ignored; otherwise, if onerror + is set, it is called to handle the error with arguments (func, + path, exc_info) where func is os.listdir, os.remove, or os.rmdir; + path is the argument to that function that caused it to fail; and + exc_info is a tuple returned by sys.exc_info(). If ignore_errors + is false and onerror is None, an exception is raised. + + """ + if ignore_errors: + def onerror(*args): + pass + elif onerror is None: + def onerror(*args): + raise + try: + if os.path.islink(path): + # symlinks to directories are forbidden, see bug #1669 + raise OSError("Cannot call rmtree on a symbolic link") + except OSError: + onerror(os.path.islink, path, sys.exc_info()) + # can't continue even if onerror hook returns + return + names = [] + try: + names = os.listdir(path) + except os.error: + onerror(os.listdir, path, sys.exc_info()) + for name in names: + fullname = os.path.join(path, name) + try: + mode = os.lstat(fullname).st_mode + except os.error: + mode = 0 + if stat.S_ISDIR(mode): + rmtree(fullname, ignore_errors, onerror) + else: + try: + os.remove(fullname) + except os.error: + onerror(os.remove, fullname, sys.exc_info()) + try: + os.rmdir(path) + except os.error: + onerror(os.rmdir, path, sys.exc_info()) + + +def _basename(path): + # A basename() variant which first strips the trailing slash, if present. + # Thus we always get the last component of the path, even for directories. + return os.path.basename(path.rstrip(os.path.sep)) + +def move(src, dst): + """Recursively move a file or directory to another location. This is + similar to the Unix "mv" command. + + If the destination is a directory or a symlink to a directory, the source + is moved inside the directory. The destination path must not already + exist. + + If the destination already exists but is not a directory, it may be + overwritten depending on os.rename() semantics. + + If the destination is on our current filesystem, then rename() is used. + Otherwise, src is copied to the destination and then removed. + A lot more could be done here... A look at a mv.c shows a lot of + the issues this implementation glosses over. + + """ + real_dst = dst + if os.path.isdir(dst): + if _samefile(src, dst): + # We might be on a case insensitive filesystem, + # perform the rename anyway. + os.rename(src, dst) + return + + real_dst = os.path.join(dst, _basename(src)) + if os.path.exists(real_dst): + raise Error("Destination path '%s' already exists" % real_dst) + try: + os.rename(src, real_dst) + except OSError: + if os.path.isdir(src): + if _destinsrc(src, dst): + raise Error("Cannot move a directory '%s' into itself '%s'." % (src, dst)) + copytree(src, real_dst, symlinks=True) + rmtree(src) + else: + copy2(src, real_dst) + os.unlink(src) + +def _destinsrc(src, dst): + src = abspath(src) + dst = abspath(dst) + if not src.endswith(os.path.sep): + src += os.path.sep + if not dst.endswith(os.path.sep): + dst += os.path.sep + return dst.startswith(src) + +def _get_gid(name): + """Returns a gid, given a group name.""" + if getgrnam is None or name is None: + return None + try: + result = getgrnam(name) + except KeyError: + result = None + if result is not None: + return result[2] + return None + +def _get_uid(name): + """Returns an uid, given a user name.""" + if getpwnam is None or name is None: + return None + try: + result = getpwnam(name) + except KeyError: + result = None + if result is not None: + return result[2] + return None + +def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0, + owner=None, group=None, logger=None): + """Create a (possibly compressed) tar file from all the files under + 'base_dir'. + + 'compress' must be "gzip" (the default), "bzip2", or None. + + 'owner' and 'group' can be used to define an owner and a group for the + archive that is being built. If not provided, the current owner and group + will be used. + + The output tar file will be named 'base_name' + ".tar", possibly plus + the appropriate compression extension (".gz", or ".bz2"). + + Returns the output filename. + """ + tar_compression = {'gzip': 'gz', None: ''} + compress_ext = {'gzip': '.gz'} + + if _BZ2_SUPPORTED: + tar_compression['bzip2'] = 'bz2' + compress_ext['bzip2'] = '.bz2' + + # flags for compression program, each element of list will be an argument + if compress is not None and compress not in compress_ext: + raise ValueError("bad value for 'compress', or compression format not " + "supported : {0}".format(compress)) + + archive_name = base_name + '.tar' + compress_ext.get(compress, '') + archive_dir = os.path.dirname(archive_name) + + if not os.path.exists(archive_dir): + if logger is not None: + logger.info("creating %s", archive_dir) + if not dry_run: + os.makedirs(archive_dir) + + # creating the tarball + if logger is not None: + logger.info('Creating tar archive') + + uid = _get_uid(owner) + gid = _get_gid(group) + + def _set_uid_gid(tarinfo): + if gid is not None: + tarinfo.gid = gid + tarinfo.gname = group + if uid is not None: + tarinfo.uid = uid + tarinfo.uname = owner + return tarinfo + + if not dry_run: + tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress]) + try: + tar.add(base_dir, filter=_set_uid_gid) + finally: + tar.close() + + return archive_name + +def _call_external_zip(base_dir, zip_filename, verbose=False, dry_run=False): + # XXX see if we want to keep an external call here + if verbose: + zipoptions = "-r" + else: + zipoptions = "-rq" + from distutils.errors import DistutilsExecError + from distutils.spawn import spawn + try: + spawn(["zip", zipoptions, zip_filename, base_dir], dry_run=dry_run) + except DistutilsExecError: + # XXX really should distinguish between "couldn't find + # external 'zip' command" and "zip failed". + raise ExecError("unable to create zip file '%s': " + "could neither import the 'zipfile' module nor " + "find a standalone zip utility") % zip_filename + +def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None): + """Create a zip file from all the files under 'base_dir'. + + The output zip file will be named 'base_name' + ".zip". Uses either the + "zipfile" Python module (if available) or the InfoZIP "zip" utility + (if installed and found on the default search path). If neither tool is + available, raises ExecError. Returns the name of the output zip + file. + """ + zip_filename = base_name + ".zip" + archive_dir = os.path.dirname(base_name) + + if not os.path.exists(archive_dir): + if logger is not None: + logger.info("creating %s", archive_dir) + if not dry_run: + os.makedirs(archive_dir) + + # If zipfile module is not available, try spawning an external 'zip' + # command. + try: + import zipfile + except ImportError: + zipfile = None + + if zipfile is None: + _call_external_zip(base_dir, zip_filename, verbose, dry_run) + else: + if logger is not None: + logger.info("creating '%s' and adding '%s' to it", + zip_filename, base_dir) + + if not dry_run: + zip = zipfile.ZipFile(zip_filename, "w", + compression=zipfile.ZIP_DEFLATED) + + for dirpath, dirnames, filenames in os.walk(base_dir): + for name in filenames: + path = os.path.normpath(os.path.join(dirpath, name)) + if os.path.isfile(path): + zip.write(path, path) + if logger is not None: + logger.info("adding '%s'", path) + zip.close() + + return zip_filename + +_ARCHIVE_FORMATS = { + 'gztar': (_make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"), + 'bztar': (_make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"), + 'tar': (_make_tarball, [('compress', None)], "uncompressed tar file"), + 'zip': (_make_zipfile, [], "ZIP file"), + } + +if _BZ2_SUPPORTED: + _ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')], + "bzip2'ed tar-file") + +def get_archive_formats(): + """Returns a list of supported formats for archiving and unarchiving. + + Each element of the returned sequence is a tuple (name, description) + """ + formats = [(name, registry[2]) for name, registry in + _ARCHIVE_FORMATS.items()] + formats.sort() + return formats + +def register_archive_format(name, function, extra_args=None, description=''): + """Registers an archive format. + + name is the name of the format. function is the callable that will be + used to create archives. If provided, extra_args is a sequence of + (name, value) tuples that will be passed as arguments to the callable. + description can be provided to describe the format, and will be returned + by the get_archive_formats() function. + """ + if extra_args is None: + extra_args = [] + if not isinstance(function, collections.Callable): + raise TypeError('The %s object is not callable' % function) + if not isinstance(extra_args, (tuple, list)): + raise TypeError('extra_args needs to be a sequence') + for element in extra_args: + if not isinstance(element, (tuple, list)) or len(element) !=2: + raise TypeError('extra_args elements are : (arg_name, value)') + + _ARCHIVE_FORMATS[name] = (function, extra_args, description) + +def unregister_archive_format(name): + del _ARCHIVE_FORMATS[name] + +def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0, + dry_run=0, owner=None, group=None, logger=None): + """Create an archive file (eg. zip or tar). + + 'base_name' is the name of the file to create, minus any format-specific + extension; 'format' is the archive format: one of "zip", "tar", "bztar" + or "gztar". + + 'root_dir' is a directory that will be the root directory of the + archive; ie. we typically chdir into 'root_dir' before creating the + archive. 'base_dir' is the directory where we start archiving from; + ie. 'base_dir' will be the common prefix of all files and + directories in the archive. 'root_dir' and 'base_dir' both default + to the current directory. Returns the name of the archive file. + + 'owner' and 'group' are used when creating a tar archive. By default, + uses the current owner and group. + """ + save_cwd = os.getcwd() + if root_dir is not None: + if logger is not None: + logger.debug("changing into '%s'", root_dir) + base_name = os.path.abspath(base_name) + if not dry_run: + os.chdir(root_dir) + + if base_dir is None: + base_dir = os.curdir + + kwargs = {'dry_run': dry_run, 'logger': logger} + + try: + format_info = _ARCHIVE_FORMATS[format] + except KeyError: + raise ValueError("unknown archive format '%s'" % format) + + func = format_info[0] + for arg, val in format_info[1]: + kwargs[arg] = val + + if format != 'zip': + kwargs['owner'] = owner + kwargs['group'] = group + + try: + filename = func(base_name, base_dir, **kwargs) + finally: + if root_dir is not None: + if logger is not None: + logger.debug("changing back to '%s'", save_cwd) + os.chdir(save_cwd) + + return filename + + +def get_unpack_formats(): + """Returns a list of supported formats for unpacking. + + Each element of the returned sequence is a tuple + (name, extensions, description) + """ + formats = [(name, info[0], info[3]) for name, info in + _UNPACK_FORMATS.items()] + formats.sort() + return formats + +def _check_unpack_options(extensions, function, extra_args): + """Checks what gets registered as an unpacker.""" + # first make sure no other unpacker is registered for this extension + existing_extensions = {} + for name, info in _UNPACK_FORMATS.items(): + for ext in info[0]: + existing_extensions[ext] = name + + for extension in extensions: + if extension in existing_extensions: + msg = '%s is already registered for "%s"' + raise RegistryError(msg % (extension, + existing_extensions[extension])) + + if not isinstance(function, collections.Callable): + raise TypeError('The registered function must be a callable') + + +def register_unpack_format(name, extensions, function, extra_args=None, + description=''): + """Registers an unpack format. + + `name` is the name of the format. `extensions` is a list of extensions + corresponding to the format. + + `function` is the callable that will be + used to unpack archives. The callable will receive archives to unpack. + If it's unable to handle an archive, it needs to raise a ReadError + exception. + + If provided, `extra_args` is a sequence of + (name, value) tuples that will be passed as arguments to the callable. + description can be provided to describe the format, and will be returned + by the get_unpack_formats() function. + """ + if extra_args is None: + extra_args = [] + _check_unpack_options(extensions, function, extra_args) + _UNPACK_FORMATS[name] = extensions, function, extra_args, description + +def unregister_unpack_format(name): + """Removes the pack format from the registry.""" + del _UNPACK_FORMATS[name] + +def _ensure_directory(path): + """Ensure that the parent directory of `path` exists""" + dirname = os.path.dirname(path) + if not os.path.isdir(dirname): + os.makedirs(dirname) + +def _unpack_zipfile(filename, extract_dir): + """Unpack zip `filename` to `extract_dir` + """ + try: + import zipfile + except ImportError: + raise ReadError('zlib not supported, cannot unpack this archive.') + + if not zipfile.is_zipfile(filename): + raise ReadError("%s is not a zip file" % filename) + + zip = zipfile.ZipFile(filename) + try: + for info in zip.infolist(): + name = info.filename + + # don't extract absolute paths or ones with .. in them + if name.startswith('/') or '..' in name: + continue + + target = os.path.join(extract_dir, *name.split('/')) + if not target: + continue + + _ensure_directory(target) + if not name.endswith('/'): + # file + data = zip.read(info.filename) + f = open(target, 'wb') + try: + f.write(data) + finally: + f.close() + del data + finally: + zip.close() + +def _unpack_tarfile(filename, extract_dir): + """Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir` + """ + try: + tarobj = tarfile.open(filename) + except tarfile.TarError: + raise ReadError( + "%s is not a compressed or uncompressed tar file" % filename) + try: + tarobj.extractall(extract_dir) + finally: + tarobj.close() + +_UNPACK_FORMATS = { + 'gztar': (['.tar.gz', '.tgz'], _unpack_tarfile, [], "gzip'ed tar-file"), + 'tar': (['.tar'], _unpack_tarfile, [], "uncompressed tar file"), + 'zip': (['.zip'], _unpack_zipfile, [], "ZIP file") + } + +if _BZ2_SUPPORTED: + _UNPACK_FORMATS['bztar'] = (['.bz2'], _unpack_tarfile, [], + "bzip2'ed tar-file") + +def _find_unpack_format(filename): + for name, info in _UNPACK_FORMATS.items(): + for extension in info[0]: + if filename.endswith(extension): + return name + return None + +def unpack_archive(filename, extract_dir=None, format=None): + """Unpack an archive. + + `filename` is the name of the archive. + + `extract_dir` is the name of the target directory, where the archive + is unpacked. If not provided, the current working directory is used. + + `format` is the archive format: one of "zip", "tar", or "gztar". Or any + other registered format. If not provided, unpack_archive will use the + filename extension and see if an unpacker was registered for that + extension. + + In case none is found, a ValueError is raised. + """ + if extract_dir is None: + extract_dir = os.getcwd() + + if format is not None: + try: + format_info = _UNPACK_FORMATS[format] + except KeyError: + raise ValueError("Unknown unpack format '{0}'".format(format)) + + func = format_info[1] + func(filename, extract_dir, **dict(format_info[2])) + else: + # we need to look at the registered unpackers supported extensions + format = _find_unpack_format(filename) + if format is None: + raise ReadError("Unknown archive format '{0}'".format(filename)) + + func = _UNPACK_FORMATS[format][1] + kwargs = dict(_UNPACK_FORMATS[format][2]) + func(filename, extract_dir, **kwargs) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyc new file mode 100644 index 0000000..bcd7bb4 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.cfg b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.cfg new file mode 100644 index 0000000..1746bd0 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.cfg @@ -0,0 +1,84 @@ +[posix_prefix] +# Configuration directories. Some of these come straight out of the +# configure script. They are for implementing the other variables, not to +# be used directly in [resource_locations]. +confdir = /etc +datadir = /usr/share +libdir = /usr/lib +statedir = /var +# User resource directory +local = ~/.local/{distribution.name} + +stdlib = {base}/lib/python{py_version_short} +platstdlib = {platbase}/lib/python{py_version_short} +purelib = {base}/lib/python{py_version_short}/site-packages +platlib = {platbase}/lib/python{py_version_short}/site-packages +include = {base}/include/python{py_version_short}{abiflags} +platinclude = {platbase}/include/python{py_version_short}{abiflags} +data = {base} + +[posix_home] +stdlib = {base}/lib/python +platstdlib = {base}/lib/python +purelib = {base}/lib/python +platlib = {base}/lib/python +include = {base}/include/python +platinclude = {base}/include/python +scripts = {base}/bin +data = {base} + +[nt] +stdlib = {base}/Lib +platstdlib = {base}/Lib +purelib = {base}/Lib/site-packages +platlib = {base}/Lib/site-packages +include = {base}/Include +platinclude = {base}/Include +scripts = {base}/Scripts +data = {base} + +[os2] +stdlib = {base}/Lib +platstdlib = {base}/Lib +purelib = {base}/Lib/site-packages +platlib = {base}/Lib/site-packages +include = {base}/Include +platinclude = {base}/Include +scripts = {base}/Scripts +data = {base} + +[os2_home] +stdlib = {userbase}/lib/python{py_version_short} +platstdlib = {userbase}/lib/python{py_version_short} +purelib = {userbase}/lib/python{py_version_short}/site-packages +platlib = {userbase}/lib/python{py_version_short}/site-packages +include = {userbase}/include/python{py_version_short} +scripts = {userbase}/bin +data = {userbase} + +[nt_user] +stdlib = {userbase}/Python{py_version_nodot} +platstdlib = {userbase}/Python{py_version_nodot} +purelib = {userbase}/Python{py_version_nodot}/site-packages +platlib = {userbase}/Python{py_version_nodot}/site-packages +include = {userbase}/Python{py_version_nodot}/Include +scripts = {userbase}/Scripts +data = {userbase} + +[posix_user] +stdlib = {userbase}/lib/python{py_version_short} +platstdlib = {userbase}/lib/python{py_version_short} +purelib = {userbase}/lib/python{py_version_short}/site-packages +platlib = {userbase}/lib/python{py_version_short}/site-packages +include = {userbase}/include/python{py_version_short} +scripts = {userbase}/bin +data = {userbase} + +[osx_framework_user] +stdlib = {userbase}/lib/python +platstdlib = {userbase}/lib/python +purelib = {userbase}/lib/python/site-packages +platlib = {userbase}/lib/python/site-packages +include = {userbase}/include +scripts = {userbase}/bin +data = {userbase} diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.py new file mode 100644 index 0000000..1df3aba --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.py @@ -0,0 +1,788 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012 The Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +"""Access to Python's configuration information.""" + +import codecs +import os +import re +import sys +from os.path import pardir, realpath +try: + import configparser +except ImportError: + import ConfigParser as configparser + + +__all__ = [ + 'get_config_h_filename', + 'get_config_var', + 'get_config_vars', + 'get_makefile_filename', + 'get_path', + 'get_path_names', + 'get_paths', + 'get_platform', + 'get_python_version', + 'get_scheme_names', + 'parse_config_h', +] + + +def _safe_realpath(path): + try: + return realpath(path) + except OSError: + return path + + +if sys.executable: + _PROJECT_BASE = os.path.dirname(_safe_realpath(sys.executable)) +else: + # sys.executable can be empty if argv[0] has been changed and Python is + # unable to retrieve the real program name + _PROJECT_BASE = _safe_realpath(os.getcwd()) + +if os.name == "nt" and "pcbuild" in _PROJECT_BASE[-8:].lower(): + _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir)) +# PC/VS7.1 +if os.name == "nt" and "\\pc\\v" in _PROJECT_BASE[-10:].lower(): + _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir)) +# PC/AMD64 +if os.name == "nt" and "\\pcbuild\\amd64" in _PROJECT_BASE[-14:].lower(): + _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir)) + + +def is_python_build(): + for fn in ("Setup.dist", "Setup.local"): + if os.path.isfile(os.path.join(_PROJECT_BASE, "Modules", fn)): + return True + return False + +_PYTHON_BUILD = is_python_build() + +_cfg_read = False + +def _ensure_cfg_read(): + global _cfg_read + if not _cfg_read: + from ..resources import finder + backport_package = __name__.rsplit('.', 1)[0] + _finder = finder(backport_package) + _cfgfile = _finder.find('sysconfig.cfg') + assert _cfgfile, 'sysconfig.cfg exists' + with _cfgfile.as_stream() as s: + _SCHEMES.readfp(s) + if _PYTHON_BUILD: + for scheme in ('posix_prefix', 'posix_home'): + _SCHEMES.set(scheme, 'include', '{srcdir}/Include') + _SCHEMES.set(scheme, 'platinclude', '{projectbase}/.') + + _cfg_read = True + + +_SCHEMES = configparser.RawConfigParser() +_VAR_REPL = re.compile(r'\{([^{]*?)\}') + +def _expand_globals(config): + _ensure_cfg_read() + if config.has_section('globals'): + globals = config.items('globals') + else: + globals = tuple() + + sections = config.sections() + for section in sections: + if section == 'globals': + continue + for option, value in globals: + if config.has_option(section, option): + continue + config.set(section, option, value) + config.remove_section('globals') + + # now expanding local variables defined in the cfg file + # + for section in config.sections(): + variables = dict(config.items(section)) + + def _replacer(matchobj): + name = matchobj.group(1) + if name in variables: + return variables[name] + return matchobj.group(0) + + for option, value in config.items(section): + config.set(section, option, _VAR_REPL.sub(_replacer, value)) + +#_expand_globals(_SCHEMES) + + # FIXME don't rely on sys.version here, its format is an implementation detail + # of CPython, use sys.version_info or sys.hexversion +_PY_VERSION = sys.version.split()[0] +_PY_VERSION_SHORT = sys.version[:3] +_PY_VERSION_SHORT_NO_DOT = _PY_VERSION[0] + _PY_VERSION[2] +_PREFIX = os.path.normpath(sys.prefix) +_EXEC_PREFIX = os.path.normpath(sys.exec_prefix) +_CONFIG_VARS = None +_USER_BASE = None + + +def _subst_vars(path, local_vars): + """In the string `path`, replace tokens like {some.thing} with the + corresponding value from the map `local_vars`. + + If there is no corresponding value, leave the token unchanged. + """ + def _replacer(matchobj): + name = matchobj.group(1) + if name in local_vars: + return local_vars[name] + elif name in os.environ: + return os.environ[name] + return matchobj.group(0) + return _VAR_REPL.sub(_replacer, path) + + +def _extend_dict(target_dict, other_dict): + target_keys = target_dict.keys() + for key, value in other_dict.items(): + if key in target_keys: + continue + target_dict[key] = value + + +def _expand_vars(scheme, vars): + res = {} + if vars is None: + vars = {} + _extend_dict(vars, get_config_vars()) + + for key, value in _SCHEMES.items(scheme): + if os.name in ('posix', 'nt'): + value = os.path.expanduser(value) + res[key] = os.path.normpath(_subst_vars(value, vars)) + return res + + +def format_value(value, vars): + def _replacer(matchobj): + name = matchobj.group(1) + if name in vars: + return vars[name] + return matchobj.group(0) + return _VAR_REPL.sub(_replacer, value) + + +def _get_default_scheme(): + if os.name == 'posix': + # the default scheme for posix is posix_prefix + return 'posix_prefix' + return os.name + + +def _getuserbase(): + env_base = os.environ.get("PYTHONUSERBASE", None) + + def joinuser(*args): + return os.path.expanduser(os.path.join(*args)) + + # what about 'os2emx', 'riscos' ? + if os.name == "nt": + base = os.environ.get("APPDATA") or "~" + if env_base: + return env_base + else: + return joinuser(base, "Python") + + if sys.platform == "darwin": + framework = get_config_var("PYTHONFRAMEWORK") + if framework: + if env_base: + return env_base + else: + return joinuser("~", "Library", framework, "%d.%d" % + sys.version_info[:2]) + + if env_base: + return env_base + else: + return joinuser("~", ".local") + + +def _parse_makefile(filename, vars=None): + """Parse a Makefile-style file. + + A dictionary containing name/value pairs is returned. If an + optional dictionary is passed in as the second argument, it is + used instead of a new dictionary. + """ + # Regexes needed for parsing Makefile (and similar syntaxes, + # like old-style Setup files). + _variable_rx = re.compile(r"([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)") + _findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)") + _findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}") + + if vars is None: + vars = {} + done = {} + notdone = {} + + with codecs.open(filename, encoding='utf-8', errors="surrogateescape") as f: + lines = f.readlines() + + for line in lines: + if line.startswith('#') or line.strip() == '': + continue + m = _variable_rx.match(line) + if m: + n, v = m.group(1, 2) + v = v.strip() + # `$$' is a literal `$' in make + tmpv = v.replace('$$', '') + + if "$" in tmpv: + notdone[n] = v + else: + try: + v = int(v) + except ValueError: + # insert literal `$' + done[n] = v.replace('$$', '$') + else: + done[n] = v + + # do variable interpolation here + variables = list(notdone.keys()) + + # Variables with a 'PY_' prefix in the makefile. These need to + # be made available without that prefix through sysconfig. + # Special care is needed to ensure that variable expansion works, even + # if the expansion uses the name without a prefix. + renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS') + + while len(variables) > 0: + for name in tuple(variables): + value = notdone[name] + m = _findvar1_rx.search(value) or _findvar2_rx.search(value) + if m is not None: + n = m.group(1) + found = True + if n in done: + item = str(done[n]) + elif n in notdone: + # get it on a subsequent round + found = False + elif n in os.environ: + # do it like make: fall back to environment + item = os.environ[n] + + elif n in renamed_variables: + if (name.startswith('PY_') and + name[3:] in renamed_variables): + item = "" + + elif 'PY_' + n in notdone: + found = False + + else: + item = str(done['PY_' + n]) + + else: + done[n] = item = "" + + if found: + after = value[m.end():] + value = value[:m.start()] + item + after + if "$" in after: + notdone[name] = value + else: + try: + value = int(value) + except ValueError: + done[name] = value.strip() + else: + done[name] = value + variables.remove(name) + + if (name.startswith('PY_') and + name[3:] in renamed_variables): + + name = name[3:] + if name not in done: + done[name] = value + + else: + # bogus variable reference (e.g. "prefix=$/opt/python"); + # just drop it since we can't deal + done[name] = value + variables.remove(name) + + # strip spurious spaces + for k, v in done.items(): + if isinstance(v, str): + done[k] = v.strip() + + # save the results in the global dictionary + vars.update(done) + return vars + + +def get_makefile_filename(): + """Return the path of the Makefile.""" + if _PYTHON_BUILD: + return os.path.join(_PROJECT_BASE, "Makefile") + if hasattr(sys, 'abiflags'): + config_dir_name = 'config-%s%s' % (_PY_VERSION_SHORT, sys.abiflags) + else: + config_dir_name = 'config' + return os.path.join(get_path('stdlib'), config_dir_name, 'Makefile') + + +def _init_posix(vars): + """Initialize the module as appropriate for POSIX systems.""" + # load the installed Makefile: + makefile = get_makefile_filename() + try: + _parse_makefile(makefile, vars) + except IOError as e: + msg = "invalid Python installation: unable to open %s" % makefile + if hasattr(e, "strerror"): + msg = msg + " (%s)" % e.strerror + raise IOError(msg) + # load the installed pyconfig.h: + config_h = get_config_h_filename() + try: + with open(config_h) as f: + parse_config_h(f, vars) + except IOError as e: + msg = "invalid Python installation: unable to open %s" % config_h + if hasattr(e, "strerror"): + msg = msg + " (%s)" % e.strerror + raise IOError(msg) + # On AIX, there are wrong paths to the linker scripts in the Makefile + # -- these paths are relative to the Python source, but when installed + # the scripts are in another directory. + if _PYTHON_BUILD: + vars['LDSHARED'] = vars['BLDSHARED'] + + +def _init_non_posix(vars): + """Initialize the module as appropriate for NT""" + # set basic install directories + vars['LIBDEST'] = get_path('stdlib') + vars['BINLIBDEST'] = get_path('platstdlib') + vars['INCLUDEPY'] = get_path('include') + vars['SO'] = '.pyd' + vars['EXE'] = '.exe' + vars['VERSION'] = _PY_VERSION_SHORT_NO_DOT + vars['BINDIR'] = os.path.dirname(_safe_realpath(sys.executable)) + +# +# public APIs +# + + +def parse_config_h(fp, vars=None): + """Parse a config.h-style file. + + A dictionary containing name/value pairs is returned. If an + optional dictionary is passed in as the second argument, it is + used instead of a new dictionary. + """ + if vars is None: + vars = {} + define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n") + undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n") + + while True: + line = fp.readline() + if not line: + break + m = define_rx.match(line) + if m: + n, v = m.group(1, 2) + try: + v = int(v) + except ValueError: + pass + vars[n] = v + else: + m = undef_rx.match(line) + if m: + vars[m.group(1)] = 0 + return vars + + +def get_config_h_filename(): + """Return the path of pyconfig.h.""" + if _PYTHON_BUILD: + if os.name == "nt": + inc_dir = os.path.join(_PROJECT_BASE, "PC") + else: + inc_dir = _PROJECT_BASE + else: + inc_dir = get_path('platinclude') + return os.path.join(inc_dir, 'pyconfig.h') + + +def get_scheme_names(): + """Return a tuple containing the schemes names.""" + return tuple(sorted(_SCHEMES.sections())) + + +def get_path_names(): + """Return a tuple containing the paths names.""" + # xxx see if we want a static list + return _SCHEMES.options('posix_prefix') + + +def get_paths(scheme=_get_default_scheme(), vars=None, expand=True): + """Return a mapping containing an install scheme. + + ``scheme`` is the install scheme name. If not provided, it will + return the default scheme for the current platform. + """ + _ensure_cfg_read() + if expand: + return _expand_vars(scheme, vars) + else: + return dict(_SCHEMES.items(scheme)) + + +def get_path(name, scheme=_get_default_scheme(), vars=None, expand=True): + """Return a path corresponding to the scheme. + + ``scheme`` is the install scheme name. + """ + return get_paths(scheme, vars, expand)[name] + + +def get_config_vars(*args): + """With no arguments, return a dictionary of all configuration + variables relevant for the current platform. + + On Unix, this means every variable defined in Python's installed Makefile; + On Windows and Mac OS it's a much smaller set. + + With arguments, return a list of values that result from looking up + each argument in the configuration variable dictionary. + """ + global _CONFIG_VARS + if _CONFIG_VARS is None: + _CONFIG_VARS = {} + # Normalized versions of prefix and exec_prefix are handy to have; + # in fact, these are the standard versions used most places in the + # distutils2 module. + _CONFIG_VARS['prefix'] = _PREFIX + _CONFIG_VARS['exec_prefix'] = _EXEC_PREFIX + _CONFIG_VARS['py_version'] = _PY_VERSION + _CONFIG_VARS['py_version_short'] = _PY_VERSION_SHORT + _CONFIG_VARS['py_version_nodot'] = _PY_VERSION[0] + _PY_VERSION[2] + _CONFIG_VARS['base'] = _PREFIX + _CONFIG_VARS['platbase'] = _EXEC_PREFIX + _CONFIG_VARS['projectbase'] = _PROJECT_BASE + try: + _CONFIG_VARS['abiflags'] = sys.abiflags + except AttributeError: + # sys.abiflags may not be defined on all platforms. + _CONFIG_VARS['abiflags'] = '' + + if os.name in ('nt', 'os2'): + _init_non_posix(_CONFIG_VARS) + if os.name == 'posix': + _init_posix(_CONFIG_VARS) + # Setting 'userbase' is done below the call to the + # init function to enable using 'get_config_var' in + # the init-function. + if sys.version >= '2.6': + _CONFIG_VARS['userbase'] = _getuserbase() + + if 'srcdir' not in _CONFIG_VARS: + _CONFIG_VARS['srcdir'] = _PROJECT_BASE + else: + _CONFIG_VARS['srcdir'] = _safe_realpath(_CONFIG_VARS['srcdir']) + + # Convert srcdir into an absolute path if it appears necessary. + # Normally it is relative to the build directory. However, during + # testing, for example, we might be running a non-installed python + # from a different directory. + if _PYTHON_BUILD and os.name == "posix": + base = _PROJECT_BASE + try: + cwd = os.getcwd() + except OSError: + cwd = None + if (not os.path.isabs(_CONFIG_VARS['srcdir']) and + base != cwd): + # srcdir is relative and we are not in the same directory + # as the executable. Assume executable is in the build + # directory and make srcdir absolute. + srcdir = os.path.join(base, _CONFIG_VARS['srcdir']) + _CONFIG_VARS['srcdir'] = os.path.normpath(srcdir) + + if sys.platform == 'darwin': + kernel_version = os.uname()[2] # Kernel version (8.4.3) + major_version = int(kernel_version.split('.')[0]) + + if major_version < 8: + # On Mac OS X before 10.4, check if -arch and -isysroot + # are in CFLAGS or LDFLAGS and remove them if they are. + # This is needed when building extensions on a 10.3 system + # using a universal build of python. + for key in ('LDFLAGS', 'BASECFLAGS', + # a number of derived variables. These need to be + # patched up as well. + 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): + flags = _CONFIG_VARS[key] + flags = re.sub(r'-arch\s+\w+\s', ' ', flags) + flags = re.sub('-isysroot [^ \t]*', ' ', flags) + _CONFIG_VARS[key] = flags + else: + # Allow the user to override the architecture flags using + # an environment variable. + # NOTE: This name was introduced by Apple in OSX 10.5 and + # is used by several scripting languages distributed with + # that OS release. + if 'ARCHFLAGS' in os.environ: + arch = os.environ['ARCHFLAGS'] + for key in ('LDFLAGS', 'BASECFLAGS', + # a number of derived variables. These need to be + # patched up as well. + 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): + + flags = _CONFIG_VARS[key] + flags = re.sub(r'-arch\s+\w+\s', ' ', flags) + flags = flags + ' ' + arch + _CONFIG_VARS[key] = flags + + # If we're on OSX 10.5 or later and the user tries to + # compiles an extension using an SDK that is not present + # on the current machine it is better to not use an SDK + # than to fail. + # + # The major usecase for this is users using a Python.org + # binary installer on OSX 10.6: that installer uses + # the 10.4u SDK, but that SDK is not installed by default + # when you install Xcode. + # + CFLAGS = _CONFIG_VARS.get('CFLAGS', '') + m = re.search(r'-isysroot\s+(\S+)', CFLAGS) + if m is not None: + sdk = m.group(1) + if not os.path.exists(sdk): + for key in ('LDFLAGS', 'BASECFLAGS', + # a number of derived variables. These need to be + # patched up as well. + 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): + + flags = _CONFIG_VARS[key] + flags = re.sub(r'-isysroot\s+\S+(\s|$)', ' ', flags) + _CONFIG_VARS[key] = flags + + if args: + vals = [] + for name in args: + vals.append(_CONFIG_VARS.get(name)) + return vals + else: + return _CONFIG_VARS + + +def get_config_var(name): + """Return the value of a single variable using the dictionary returned by + 'get_config_vars()'. + + Equivalent to get_config_vars().get(name) + """ + return get_config_vars().get(name) + + +def get_platform(): + """Return a string that identifies the current platform. + + This is used mainly to distinguish platform-specific build directories and + platform-specific built distributions. Typically includes the OS name + and version and the architecture (as supplied by 'os.uname()'), + although the exact information included depends on the OS; eg. for IRIX + the architecture isn't particularly important (IRIX only runs on SGI + hardware), but for Linux the kernel version isn't particularly + important. + + Examples of returned values: + linux-i586 + linux-alpha (?) + solaris-2.6-sun4u + irix-5.3 + irix64-6.2 + + Windows will return one of: + win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc) + win-ia64 (64bit Windows on Itanium) + win32 (all others - specifically, sys.platform is returned) + + For other non-POSIX platforms, currently just returns 'sys.platform'. + """ + if os.name == 'nt': + # sniff sys.version for architecture. + prefix = " bit (" + i = sys.version.find(prefix) + if i == -1: + return sys.platform + j = sys.version.find(")", i) + look = sys.version[i+len(prefix):j].lower() + if look == 'amd64': + return 'win-amd64' + if look == 'itanium': + return 'win-ia64' + return sys.platform + + if os.name != "posix" or not hasattr(os, 'uname'): + # XXX what about the architecture? NT is Intel or Alpha, + # Mac OS is M68k or PPC, etc. + return sys.platform + + # Try to distinguish various flavours of Unix + osname, host, release, version, machine = os.uname() + + # Convert the OS name to lowercase, remove '/' characters + # (to accommodate BSD/OS), and translate spaces (for "Power Macintosh") + osname = osname.lower().replace('/', '') + machine = machine.replace(' ', '_') + machine = machine.replace('/', '-') + + if osname[:5] == "linux": + # At least on Linux/Intel, 'machine' is the processor -- + # i386, etc. + # XXX what about Alpha, SPARC, etc? + return "%s-%s" % (osname, machine) + elif osname[:5] == "sunos": + if release[0] >= "5": # SunOS 5 == Solaris 2 + osname = "solaris" + release = "%d.%s" % (int(release[0]) - 3, release[2:]) + # fall through to standard osname-release-machine representation + elif osname[:4] == "irix": # could be "irix64"! + return "%s-%s" % (osname, release) + elif osname[:3] == "aix": + return "%s-%s.%s" % (osname, version, release) + elif osname[:6] == "cygwin": + osname = "cygwin" + rel_re = re.compile(r'[\d.]+') + m = rel_re.match(release) + if m: + release = m.group() + elif osname[:6] == "darwin": + # + # For our purposes, we'll assume that the system version from + # distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set + # to. This makes the compatibility story a bit more sane because the + # machine is going to compile and link as if it were + # MACOSX_DEPLOYMENT_TARGET. + cfgvars = get_config_vars() + macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET') + + if True: + # Always calculate the release of the running machine, + # needed to determine if we can build fat binaries or not. + + macrelease = macver + # Get the system version. Reading this plist is a documented + # way to get the system version (see the documentation for + # the Gestalt Manager) + try: + f = open('/System/Library/CoreServices/SystemVersion.plist') + except IOError: + # We're on a plain darwin box, fall back to the default + # behaviour. + pass + else: + try: + m = re.search(r'<key>ProductUserVisibleVersion</key>\s*' + r'<string>(.*?)</string>', f.read()) + finally: + f.close() + if m is not None: + macrelease = '.'.join(m.group(1).split('.')[:2]) + # else: fall back to the default behaviour + + if not macver: + macver = macrelease + + if macver: + release = macver + osname = "macosx" + + if ((macrelease + '.') >= '10.4.' and + '-arch' in get_config_vars().get('CFLAGS', '').strip()): + # The universal build will build fat binaries, but not on + # systems before 10.4 + # + # Try to detect 4-way universal builds, those have machine-type + # 'universal' instead of 'fat'. + + machine = 'fat' + cflags = get_config_vars().get('CFLAGS') + + archs = re.findall(r'-arch\s+(\S+)', cflags) + archs = tuple(sorted(set(archs))) + + if len(archs) == 1: + machine = archs[0] + elif archs == ('i386', 'ppc'): + machine = 'fat' + elif archs == ('i386', 'x86_64'): + machine = 'intel' + elif archs == ('i386', 'ppc', 'x86_64'): + machine = 'fat3' + elif archs == ('ppc64', 'x86_64'): + machine = 'fat64' + elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'): + machine = 'universal' + else: + raise ValueError( + "Don't know machine value for archs=%r" % (archs,)) + + elif machine == 'i386': + # On OSX the machine type returned by uname is always the + # 32-bit variant, even if the executable architecture is + # the 64-bit variant + if sys.maxsize >= 2**32: + machine = 'x86_64' + + elif machine in ('PowerPC', 'Power_Macintosh'): + # Pick a sane name for the PPC architecture. + # See 'i386' case + if sys.maxsize >= 2**32: + machine = 'ppc64' + else: + machine = 'ppc' + + return "%s-%s-%s" % (osname, release, machine) + + +def get_python_version(): + return _PY_VERSION_SHORT + + +def _print_dict(title, data): + for index, (key, value) in enumerate(sorted(data.items())): + if index == 0: + print('%s: ' % (title)) + print('\t%s = "%s"' % (key, value)) + + +def _main(): + """Display all information sysconfig detains.""" + print('Platform: "%s"' % get_platform()) + print('Python version: "%s"' % get_python_version()) + print('Current installation scheme: "%s"' % _get_default_scheme()) + print() + _print_dict('Paths', get_paths()) + print() + _print_dict('Variables', get_config_vars()) + + +if __name__ == '__main__': + _main() diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.pyc new file mode 100644 index 0000000..378751e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.py new file mode 100644 index 0000000..d66d856 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.py @@ -0,0 +1,2607 @@ +#------------------------------------------------------------------- +# tarfile.py +#------------------------------------------------------------------- +# Copyright (C) 2002 Lars Gustaebel <lars@gustaebel.de> +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person +# obtaining a copy of this software and associated documentation +# files (the "Software"), to deal in the Software without +# restriction, including without limitation the rights to use, +# copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following +# conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +from __future__ import print_function + +"""Read from and write to tar format archives. +""" + +__version__ = "$Revision$" + +version = "0.9.0" +__author__ = "Lars Gust\u00e4bel (lars@gustaebel.de)" +__date__ = "$Date: 2011-02-25 17:42:01 +0200 (Fri, 25 Feb 2011) $" +__cvsid__ = "$Id: tarfile.py 88586 2011-02-25 15:42:01Z marc-andre.lemburg $" +__credits__ = "Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend." + +#--------- +# Imports +#--------- +import sys +import os +import stat +import errno +import time +import struct +import copy +import re + +try: + import grp, pwd +except ImportError: + grp = pwd = None + +# os.symlink on Windows prior to 6.0 raises NotImplementedError +symlink_exception = (AttributeError, NotImplementedError) +try: + # WindowsError (1314) will be raised if the caller does not hold the + # SeCreateSymbolicLinkPrivilege privilege + symlink_exception += (WindowsError,) +except NameError: + pass + +# from tarfile import * +__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"] + +if sys.version_info[0] < 3: + import __builtin__ as builtins +else: + import builtins + +_open = builtins.open # Since 'open' is TarFile.open + +#--------------------------------------------------------- +# tar constants +#--------------------------------------------------------- +NUL = b"\0" # the null character +BLOCKSIZE = 512 # length of processing blocks +RECORDSIZE = BLOCKSIZE * 20 # length of records +GNU_MAGIC = b"ustar \0" # magic gnu tar string +POSIX_MAGIC = b"ustar\x0000" # magic posix tar string + +LENGTH_NAME = 100 # maximum length of a filename +LENGTH_LINK = 100 # maximum length of a linkname +LENGTH_PREFIX = 155 # maximum length of the prefix field + +REGTYPE = b"0" # regular file +AREGTYPE = b"\0" # regular file +LNKTYPE = b"1" # link (inside tarfile) +SYMTYPE = b"2" # symbolic link +CHRTYPE = b"3" # character special device +BLKTYPE = b"4" # block special device +DIRTYPE = b"5" # directory +FIFOTYPE = b"6" # fifo special device +CONTTYPE = b"7" # contiguous file + +GNUTYPE_LONGNAME = b"L" # GNU tar longname +GNUTYPE_LONGLINK = b"K" # GNU tar longlink +GNUTYPE_SPARSE = b"S" # GNU tar sparse file + +XHDTYPE = b"x" # POSIX.1-2001 extended header +XGLTYPE = b"g" # POSIX.1-2001 global header +SOLARIS_XHDTYPE = b"X" # Solaris extended header + +USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format +GNU_FORMAT = 1 # GNU tar format +PAX_FORMAT = 2 # POSIX.1-2001 (pax) format +DEFAULT_FORMAT = GNU_FORMAT + +#--------------------------------------------------------- +# tarfile constants +#--------------------------------------------------------- +# File types that tarfile supports: +SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE, + SYMTYPE, DIRTYPE, FIFOTYPE, + CONTTYPE, CHRTYPE, BLKTYPE, + GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, + GNUTYPE_SPARSE) + +# File types that will be treated as a regular file. +REGULAR_TYPES = (REGTYPE, AREGTYPE, + CONTTYPE, GNUTYPE_SPARSE) + +# File types that are part of the GNU tar format. +GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, + GNUTYPE_SPARSE) + +# Fields from a pax header that override a TarInfo attribute. +PAX_FIELDS = ("path", "linkpath", "size", "mtime", + "uid", "gid", "uname", "gname") + +# Fields from a pax header that are affected by hdrcharset. +PAX_NAME_FIELDS = set(("path", "linkpath", "uname", "gname")) + +# Fields in a pax header that are numbers, all other fields +# are treated as strings. +PAX_NUMBER_FIELDS = { + "atime": float, + "ctime": float, + "mtime": float, + "uid": int, + "gid": int, + "size": int +} + +#--------------------------------------------------------- +# Bits used in the mode field, values in octal. +#--------------------------------------------------------- +S_IFLNK = 0o120000 # symbolic link +S_IFREG = 0o100000 # regular file +S_IFBLK = 0o060000 # block device +S_IFDIR = 0o040000 # directory +S_IFCHR = 0o020000 # character device +S_IFIFO = 0o010000 # fifo + +TSUID = 0o4000 # set UID on execution +TSGID = 0o2000 # set GID on execution +TSVTX = 0o1000 # reserved + +TUREAD = 0o400 # read by owner +TUWRITE = 0o200 # write by owner +TUEXEC = 0o100 # execute/search by owner +TGREAD = 0o040 # read by group +TGWRITE = 0o020 # write by group +TGEXEC = 0o010 # execute/search by group +TOREAD = 0o004 # read by other +TOWRITE = 0o002 # write by other +TOEXEC = 0o001 # execute/search by other + +#--------------------------------------------------------- +# initialization +#--------------------------------------------------------- +if os.name in ("nt", "ce"): + ENCODING = "utf-8" +else: + ENCODING = sys.getfilesystemencoding() + +#--------------------------------------------------------- +# Some useful functions +#--------------------------------------------------------- + +def stn(s, length, encoding, errors): + """Convert a string to a null-terminated bytes object. + """ + s = s.encode(encoding, errors) + return s[:length] + (length - len(s)) * NUL + +def nts(s, encoding, errors): + """Convert a null-terminated bytes object to a string. + """ + p = s.find(b"\0") + if p != -1: + s = s[:p] + return s.decode(encoding, errors) + +def nti(s): + """Convert a number field to a python number. + """ + # There are two possible encodings for a number field, see + # itn() below. + if s[0] != chr(0o200): + try: + n = int(nts(s, "ascii", "strict") or "0", 8) + except ValueError: + raise InvalidHeaderError("invalid header") + else: + n = 0 + for i in range(len(s) - 1): + n <<= 8 + n += ord(s[i + 1]) + return n + +def itn(n, digits=8, format=DEFAULT_FORMAT): + """Convert a python number to a number field. + """ + # POSIX 1003.1-1988 requires numbers to be encoded as a string of + # octal digits followed by a null-byte, this allows values up to + # (8**(digits-1))-1. GNU tar allows storing numbers greater than + # that if necessary. A leading 0o200 byte indicates this particular + # encoding, the following digits-1 bytes are a big-endian + # representation. This allows values up to (256**(digits-1))-1. + if 0 <= n < 8 ** (digits - 1): + s = ("%0*o" % (digits - 1, n)).encode("ascii") + NUL + else: + if format != GNU_FORMAT or n >= 256 ** (digits - 1): + raise ValueError("overflow in number field") + + if n < 0: + # XXX We mimic GNU tar's behaviour with negative numbers, + # this could raise OverflowError. + n = struct.unpack("L", struct.pack("l", n))[0] + + s = bytearray() + for i in range(digits - 1): + s.insert(0, n & 0o377) + n >>= 8 + s.insert(0, 0o200) + return s + +def calc_chksums(buf): + """Calculate the checksum for a member's header by summing up all + characters except for the chksum field which is treated as if + it was filled with spaces. According to the GNU tar sources, + some tars (Sun and NeXT) calculate chksum with signed char, + which will be different if there are chars in the buffer with + the high bit set. So we calculate two checksums, unsigned and + signed. + """ + unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512])) + signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512])) + return unsigned_chksum, signed_chksum + +def copyfileobj(src, dst, length=None): + """Copy length bytes from fileobj src to fileobj dst. + If length is None, copy the entire content. + """ + if length == 0: + return + if length is None: + while True: + buf = src.read(16*1024) + if not buf: + break + dst.write(buf) + return + + BUFSIZE = 16 * 1024 + blocks, remainder = divmod(length, BUFSIZE) + for b in range(blocks): + buf = src.read(BUFSIZE) + if len(buf) < BUFSIZE: + raise IOError("end of file reached") + dst.write(buf) + + if remainder != 0: + buf = src.read(remainder) + if len(buf) < remainder: + raise IOError("end of file reached") + dst.write(buf) + return + +filemode_table = ( + ((S_IFLNK, "l"), + (S_IFREG, "-"), + (S_IFBLK, "b"), + (S_IFDIR, "d"), + (S_IFCHR, "c"), + (S_IFIFO, "p")), + + ((TUREAD, "r"),), + ((TUWRITE, "w"),), + ((TUEXEC|TSUID, "s"), + (TSUID, "S"), + (TUEXEC, "x")), + + ((TGREAD, "r"),), + ((TGWRITE, "w"),), + ((TGEXEC|TSGID, "s"), + (TSGID, "S"), + (TGEXEC, "x")), + + ((TOREAD, "r"),), + ((TOWRITE, "w"),), + ((TOEXEC|TSVTX, "t"), + (TSVTX, "T"), + (TOEXEC, "x")) +) + +def filemode(mode): + """Convert a file's mode to a string of the form + -rwxrwxrwx. + Used by TarFile.list() + """ + perm = [] + for table in filemode_table: + for bit, char in table: + if mode & bit == bit: + perm.append(char) + break + else: + perm.append("-") + return "".join(perm) + +class TarError(Exception): + """Base exception.""" + pass +class ExtractError(TarError): + """General exception for extract errors.""" + pass +class ReadError(TarError): + """Exception for unreadable tar archives.""" + pass +class CompressionError(TarError): + """Exception for unavailable compression methods.""" + pass +class StreamError(TarError): + """Exception for unsupported operations on stream-like TarFiles.""" + pass +class HeaderError(TarError): + """Base exception for header errors.""" + pass +class EmptyHeaderError(HeaderError): + """Exception for empty headers.""" + pass +class TruncatedHeaderError(HeaderError): + """Exception for truncated headers.""" + pass +class EOFHeaderError(HeaderError): + """Exception for end of file headers.""" + pass +class InvalidHeaderError(HeaderError): + """Exception for invalid headers.""" + pass +class SubsequentHeaderError(HeaderError): + """Exception for missing and invalid extended headers.""" + pass + +#--------------------------- +# internal stream interface +#--------------------------- +class _LowLevelFile(object): + """Low-level file object. Supports reading and writing. + It is used instead of a regular file object for streaming + access. + """ + + def __init__(self, name, mode): + mode = { + "r": os.O_RDONLY, + "w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC, + }[mode] + if hasattr(os, "O_BINARY"): + mode |= os.O_BINARY + self.fd = os.open(name, mode, 0o666) + + def close(self): + os.close(self.fd) + + def read(self, size): + return os.read(self.fd, size) + + def write(self, s): + os.write(self.fd, s) + +class _Stream(object): + """Class that serves as an adapter between TarFile and + a stream-like object. The stream-like object only + needs to have a read() or write() method and is accessed + blockwise. Use of gzip or bzip2 compression is possible. + A stream-like object could be for example: sys.stdin, + sys.stdout, a socket, a tape device etc. + + _Stream is intended to be used only internally. + """ + + def __init__(self, name, mode, comptype, fileobj, bufsize): + """Construct a _Stream object. + """ + self._extfileobj = True + if fileobj is None: + fileobj = _LowLevelFile(name, mode) + self._extfileobj = False + + if comptype == '*': + # Enable transparent compression detection for the + # stream interface + fileobj = _StreamProxy(fileobj) + comptype = fileobj.getcomptype() + + self.name = name or "" + self.mode = mode + self.comptype = comptype + self.fileobj = fileobj + self.bufsize = bufsize + self.buf = b"" + self.pos = 0 + self.closed = False + + try: + if comptype == "gz": + try: + import zlib + except ImportError: + raise CompressionError("zlib module is not available") + self.zlib = zlib + self.crc = zlib.crc32(b"") + if mode == "r": + self._init_read_gz() + else: + self._init_write_gz() + + if comptype == "bz2": + try: + import bz2 + except ImportError: + raise CompressionError("bz2 module is not available") + if mode == "r": + self.dbuf = b"" + self.cmp = bz2.BZ2Decompressor() + else: + self.cmp = bz2.BZ2Compressor() + except: + if not self._extfileobj: + self.fileobj.close() + self.closed = True + raise + + def __del__(self): + if hasattr(self, "closed") and not self.closed: + self.close() + + def _init_write_gz(self): + """Initialize for writing with gzip compression. + """ + self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED, + -self.zlib.MAX_WBITS, + self.zlib.DEF_MEM_LEVEL, + 0) + timestamp = struct.pack("<L", int(time.time())) + self.__write(b"\037\213\010\010" + timestamp + b"\002\377") + if self.name.endswith(".gz"): + self.name = self.name[:-3] + # RFC1952 says we must use ISO-8859-1 for the FNAME field. + self.__write(self.name.encode("iso-8859-1", "replace") + NUL) + + def write(self, s): + """Write string s to the stream. + """ + if self.comptype == "gz": + self.crc = self.zlib.crc32(s, self.crc) + self.pos += len(s) + if self.comptype != "tar": + s = self.cmp.compress(s) + self.__write(s) + + def __write(self, s): + """Write string s to the stream if a whole new block + is ready to be written. + """ + self.buf += s + while len(self.buf) > self.bufsize: + self.fileobj.write(self.buf[:self.bufsize]) + self.buf = self.buf[self.bufsize:] + + def close(self): + """Close the _Stream object. No operation should be + done on it afterwards. + """ + if self.closed: + return + + if self.mode == "w" and self.comptype != "tar": + self.buf += self.cmp.flush() + + if self.mode == "w" and self.buf: + self.fileobj.write(self.buf) + self.buf = b"" + if self.comptype == "gz": + # The native zlib crc is an unsigned 32-bit integer, but + # the Python wrapper implicitly casts that to a signed C + # long. So, on a 32-bit box self.crc may "look negative", + # while the same crc on a 64-bit box may "look positive". + # To avoid irksome warnings from the `struct` module, force + # it to look positive on all boxes. + self.fileobj.write(struct.pack("<L", self.crc & 0xffffffff)) + self.fileobj.write(struct.pack("<L", self.pos & 0xffffFFFF)) + + if not self._extfileobj: + self.fileobj.close() + + self.closed = True + + def _init_read_gz(self): + """Initialize for reading a gzip compressed fileobj. + """ + self.cmp = self.zlib.decompressobj(-self.zlib.MAX_WBITS) + self.dbuf = b"" + + # taken from gzip.GzipFile with some alterations + if self.__read(2) != b"\037\213": + raise ReadError("not a gzip file") + if self.__read(1) != b"\010": + raise CompressionError("unsupported compression method") + + flag = ord(self.__read(1)) + self.__read(6) + + if flag & 4: + xlen = ord(self.__read(1)) + 256 * ord(self.__read(1)) + self.read(xlen) + if flag & 8: + while True: + s = self.__read(1) + if not s or s == NUL: + break + if flag & 16: + while True: + s = self.__read(1) + if not s or s == NUL: + break + if flag & 2: + self.__read(2) + + def tell(self): + """Return the stream's file pointer position. + """ + return self.pos + + def seek(self, pos=0): + """Set the stream's file pointer to pos. Negative seeking + is forbidden. + """ + if pos - self.pos >= 0: + blocks, remainder = divmod(pos - self.pos, self.bufsize) + for i in range(blocks): + self.read(self.bufsize) + self.read(remainder) + else: + raise StreamError("seeking backwards is not allowed") + return self.pos + + def read(self, size=None): + """Return the next size number of bytes from the stream. + If size is not defined, return all bytes of the stream + up to EOF. + """ + if size is None: + t = [] + while True: + buf = self._read(self.bufsize) + if not buf: + break + t.append(buf) + buf = "".join(t) + else: + buf = self._read(size) + self.pos += len(buf) + return buf + + def _read(self, size): + """Return size bytes from the stream. + """ + if self.comptype == "tar": + return self.__read(size) + + c = len(self.dbuf) + while c < size: + buf = self.__read(self.bufsize) + if not buf: + break + try: + buf = self.cmp.decompress(buf) + except IOError: + raise ReadError("invalid compressed data") + self.dbuf += buf + c += len(buf) + buf = self.dbuf[:size] + self.dbuf = self.dbuf[size:] + return buf + + def __read(self, size): + """Return size bytes from stream. If internal buffer is empty, + read another block from the stream. + """ + c = len(self.buf) + while c < size: + buf = self.fileobj.read(self.bufsize) + if not buf: + break + self.buf += buf + c += len(buf) + buf = self.buf[:size] + self.buf = self.buf[size:] + return buf +# class _Stream + +class _StreamProxy(object): + """Small proxy class that enables transparent compression + detection for the Stream interface (mode 'r|*'). + """ + + def __init__(self, fileobj): + self.fileobj = fileobj + self.buf = self.fileobj.read(BLOCKSIZE) + + def read(self, size): + self.read = self.fileobj.read + return self.buf + + def getcomptype(self): + if self.buf.startswith(b"\037\213\010"): + return "gz" + if self.buf.startswith(b"BZh91"): + return "bz2" + return "tar" + + def close(self): + self.fileobj.close() +# class StreamProxy + +class _BZ2Proxy(object): + """Small proxy class that enables external file object + support for "r:bz2" and "w:bz2" modes. This is actually + a workaround for a limitation in bz2 module's BZ2File + class which (unlike gzip.GzipFile) has no support for + a file object argument. + """ + + blocksize = 16 * 1024 + + def __init__(self, fileobj, mode): + self.fileobj = fileobj + self.mode = mode + self.name = getattr(self.fileobj, "name", None) + self.init() + + def init(self): + import bz2 + self.pos = 0 + if self.mode == "r": + self.bz2obj = bz2.BZ2Decompressor() + self.fileobj.seek(0) + self.buf = b"" + else: + self.bz2obj = bz2.BZ2Compressor() + + def read(self, size): + x = len(self.buf) + while x < size: + raw = self.fileobj.read(self.blocksize) + if not raw: + break + data = self.bz2obj.decompress(raw) + self.buf += data + x += len(data) + + buf = self.buf[:size] + self.buf = self.buf[size:] + self.pos += len(buf) + return buf + + def seek(self, pos): + if pos < self.pos: + self.init() + self.read(pos - self.pos) + + def tell(self): + return self.pos + + def write(self, data): + self.pos += len(data) + raw = self.bz2obj.compress(data) + self.fileobj.write(raw) + + def close(self): + if self.mode == "w": + raw = self.bz2obj.flush() + self.fileobj.write(raw) +# class _BZ2Proxy + +#------------------------ +# Extraction file object +#------------------------ +class _FileInFile(object): + """A thin wrapper around an existing file object that + provides a part of its data as an individual file + object. + """ + + def __init__(self, fileobj, offset, size, blockinfo=None): + self.fileobj = fileobj + self.offset = offset + self.size = size + self.position = 0 + + if blockinfo is None: + blockinfo = [(0, size)] + + # Construct a map with data and zero blocks. + self.map_index = 0 + self.map = [] + lastpos = 0 + realpos = self.offset + for offset, size in blockinfo: + if offset > lastpos: + self.map.append((False, lastpos, offset, None)) + self.map.append((True, offset, offset + size, realpos)) + realpos += size + lastpos = offset + size + if lastpos < self.size: + self.map.append((False, lastpos, self.size, None)) + + def seekable(self): + if not hasattr(self.fileobj, "seekable"): + # XXX gzip.GzipFile and bz2.BZ2File + return True + return self.fileobj.seekable() + + def tell(self): + """Return the current file position. + """ + return self.position + + def seek(self, position): + """Seek to a position in the file. + """ + self.position = position + + def read(self, size=None): + """Read data from the file. + """ + if size is None: + size = self.size - self.position + else: + size = min(size, self.size - self.position) + + buf = b"" + while size > 0: + while True: + data, start, stop, offset = self.map[self.map_index] + if start <= self.position < stop: + break + else: + self.map_index += 1 + if self.map_index == len(self.map): + self.map_index = 0 + length = min(size, stop - self.position) + if data: + self.fileobj.seek(offset + (self.position - start)) + buf += self.fileobj.read(length) + else: + buf += NUL * length + size -= length + self.position += length + return buf +#class _FileInFile + + +class ExFileObject(object): + """File-like object for reading an archive member. + Is returned by TarFile.extractfile(). + """ + blocksize = 1024 + + def __init__(self, tarfile, tarinfo): + self.fileobj = _FileInFile(tarfile.fileobj, + tarinfo.offset_data, + tarinfo.size, + tarinfo.sparse) + self.name = tarinfo.name + self.mode = "r" + self.closed = False + self.size = tarinfo.size + + self.position = 0 + self.buffer = b"" + + def readable(self): + return True + + def writable(self): + return False + + def seekable(self): + return self.fileobj.seekable() + + def read(self, size=None): + """Read at most size bytes from the file. If size is not + present or None, read all data until EOF is reached. + """ + if self.closed: + raise ValueError("I/O operation on closed file") + + buf = b"" + if self.buffer: + if size is None: + buf = self.buffer + self.buffer = b"" + else: + buf = self.buffer[:size] + self.buffer = self.buffer[size:] + + if size is None: + buf += self.fileobj.read() + else: + buf += self.fileobj.read(size - len(buf)) + + self.position += len(buf) + return buf + + # XXX TextIOWrapper uses the read1() method. + read1 = read + + def readline(self, size=-1): + """Read one entire line from the file. If size is present + and non-negative, return a string with at most that + size, which may be an incomplete line. + """ + if self.closed: + raise ValueError("I/O operation on closed file") + + pos = self.buffer.find(b"\n") + 1 + if pos == 0: + # no newline found. + while True: + buf = self.fileobj.read(self.blocksize) + self.buffer += buf + if not buf or b"\n" in buf: + pos = self.buffer.find(b"\n") + 1 + if pos == 0: + # no newline found. + pos = len(self.buffer) + break + + if size != -1: + pos = min(size, pos) + + buf = self.buffer[:pos] + self.buffer = self.buffer[pos:] + self.position += len(buf) + return buf + + def readlines(self): + """Return a list with all remaining lines. + """ + result = [] + while True: + line = self.readline() + if not line: break + result.append(line) + return result + + def tell(self): + """Return the current file position. + """ + if self.closed: + raise ValueError("I/O operation on closed file") + + return self.position + + def seek(self, pos, whence=os.SEEK_SET): + """Seek to a position in the file. + """ + if self.closed: + raise ValueError("I/O operation on closed file") + + if whence == os.SEEK_SET: + self.position = min(max(pos, 0), self.size) + elif whence == os.SEEK_CUR: + if pos < 0: + self.position = max(self.position + pos, 0) + else: + self.position = min(self.position + pos, self.size) + elif whence == os.SEEK_END: + self.position = max(min(self.size + pos, self.size), 0) + else: + raise ValueError("Invalid argument") + + self.buffer = b"" + self.fileobj.seek(self.position) + + def close(self): + """Close the file object. + """ + self.closed = True + + def __iter__(self): + """Get an iterator over the file's lines. + """ + while True: + line = self.readline() + if not line: + break + yield line +#class ExFileObject + +#------------------ +# Exported Classes +#------------------ +class TarInfo(object): + """Informational class which holds the details about an + archive member given by a tar header block. + TarInfo objects are returned by TarFile.getmember(), + TarFile.getmembers() and TarFile.gettarinfo() and are + usually created internally. + """ + + __slots__ = ("name", "mode", "uid", "gid", "size", "mtime", + "chksum", "type", "linkname", "uname", "gname", + "devmajor", "devminor", + "offset", "offset_data", "pax_headers", "sparse", + "tarfile", "_sparse_structs", "_link_target") + + def __init__(self, name=""): + """Construct a TarInfo object. name is the optional name + of the member. + """ + self.name = name # member name + self.mode = 0o644 # file permissions + self.uid = 0 # user id + self.gid = 0 # group id + self.size = 0 # file size + self.mtime = 0 # modification time + self.chksum = 0 # header checksum + self.type = REGTYPE # member type + self.linkname = "" # link name + self.uname = "" # user name + self.gname = "" # group name + self.devmajor = 0 # device major number + self.devminor = 0 # device minor number + + self.offset = 0 # the tar header starts here + self.offset_data = 0 # the file's data starts here + + self.sparse = None # sparse member information + self.pax_headers = {} # pax header information + + # In pax headers the "name" and "linkname" field are called + # "path" and "linkpath". + def _getpath(self): + return self.name + def _setpath(self, name): + self.name = name + path = property(_getpath, _setpath) + + def _getlinkpath(self): + return self.linkname + def _setlinkpath(self, linkname): + self.linkname = linkname + linkpath = property(_getlinkpath, _setlinkpath) + + def __repr__(self): + return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self)) + + def get_info(self): + """Return the TarInfo's attributes as a dictionary. + """ + info = { + "name": self.name, + "mode": self.mode & 0o7777, + "uid": self.uid, + "gid": self.gid, + "size": self.size, + "mtime": self.mtime, + "chksum": self.chksum, + "type": self.type, + "linkname": self.linkname, + "uname": self.uname, + "gname": self.gname, + "devmajor": self.devmajor, + "devminor": self.devminor + } + + if info["type"] == DIRTYPE and not info["name"].endswith("/"): + info["name"] += "/" + + return info + + def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="surrogateescape"): + """Return a tar header as a string of 512 byte blocks. + """ + info = self.get_info() + + if format == USTAR_FORMAT: + return self.create_ustar_header(info, encoding, errors) + elif format == GNU_FORMAT: + return self.create_gnu_header(info, encoding, errors) + elif format == PAX_FORMAT: + return self.create_pax_header(info, encoding) + else: + raise ValueError("invalid format") + + def create_ustar_header(self, info, encoding, errors): + """Return the object as a ustar header block. + """ + info["magic"] = POSIX_MAGIC + + if len(info["linkname"]) > LENGTH_LINK: + raise ValueError("linkname is too long") + + if len(info["name"]) > LENGTH_NAME: + info["prefix"], info["name"] = self._posix_split_name(info["name"]) + + return self._create_header(info, USTAR_FORMAT, encoding, errors) + + def create_gnu_header(self, info, encoding, errors): + """Return the object as a GNU header block sequence. + """ + info["magic"] = GNU_MAGIC + + buf = b"" + if len(info["linkname"]) > LENGTH_LINK: + buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK, encoding, errors) + + if len(info["name"]) > LENGTH_NAME: + buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME, encoding, errors) + + return buf + self._create_header(info, GNU_FORMAT, encoding, errors) + + def create_pax_header(self, info, encoding): + """Return the object as a ustar header block. If it cannot be + represented this way, prepend a pax extended header sequence + with supplement information. + """ + info["magic"] = POSIX_MAGIC + pax_headers = self.pax_headers.copy() + + # Test string fields for values that exceed the field length or cannot + # be represented in ASCII encoding. + for name, hname, length in ( + ("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK), + ("uname", "uname", 32), ("gname", "gname", 32)): + + if hname in pax_headers: + # The pax header has priority. + continue + + # Try to encode the string as ASCII. + try: + info[name].encode("ascii", "strict") + except UnicodeEncodeError: + pax_headers[hname] = info[name] + continue + + if len(info[name]) > length: + pax_headers[hname] = info[name] + + # Test number fields for values that exceed the field limit or values + # that like to be stored as float. + for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)): + if name in pax_headers: + # The pax header has priority. Avoid overflow. + info[name] = 0 + continue + + val = info[name] + if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float): + pax_headers[name] = str(val) + info[name] = 0 + + # Create a pax extended header if necessary. + if pax_headers: + buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding) + else: + buf = b"" + + return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace") + + @classmethod + def create_pax_global_header(cls, pax_headers): + """Return the object as a pax global header block sequence. + """ + return cls._create_pax_generic_header(pax_headers, XGLTYPE, "utf8") + + def _posix_split_name(self, name): + """Split a name longer than 100 chars into a prefix + and a name part. + """ + prefix = name[:LENGTH_PREFIX + 1] + while prefix and prefix[-1] != "/": + prefix = prefix[:-1] + + name = name[len(prefix):] + prefix = prefix[:-1] + + if not prefix or len(name) > LENGTH_NAME: + raise ValueError("name is too long") + return prefix, name + + @staticmethod + def _create_header(info, format, encoding, errors): + """Return a header block. info is a dictionary with file + information, format must be one of the *_FORMAT constants. + """ + parts = [ + stn(info.get("name", ""), 100, encoding, errors), + itn(info.get("mode", 0) & 0o7777, 8, format), + itn(info.get("uid", 0), 8, format), + itn(info.get("gid", 0), 8, format), + itn(info.get("size", 0), 12, format), + itn(info.get("mtime", 0), 12, format), + b" ", # checksum field + info.get("type", REGTYPE), + stn(info.get("linkname", ""), 100, encoding, errors), + info.get("magic", POSIX_MAGIC), + stn(info.get("uname", ""), 32, encoding, errors), + stn(info.get("gname", ""), 32, encoding, errors), + itn(info.get("devmajor", 0), 8, format), + itn(info.get("devminor", 0), 8, format), + stn(info.get("prefix", ""), 155, encoding, errors) + ] + + buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts)) + chksum = calc_chksums(buf[-BLOCKSIZE:])[0] + buf = buf[:-364] + ("%06o\0" % chksum).encode("ascii") + buf[-357:] + return buf + + @staticmethod + def _create_payload(payload): + """Return the string payload filled with zero bytes + up to the next 512 byte border. + """ + blocks, remainder = divmod(len(payload), BLOCKSIZE) + if remainder > 0: + payload += (BLOCKSIZE - remainder) * NUL + return payload + + @classmethod + def _create_gnu_long_header(cls, name, type, encoding, errors): + """Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence + for name. + """ + name = name.encode(encoding, errors) + NUL + + info = {} + info["name"] = "././@LongLink" + info["type"] = type + info["size"] = len(name) + info["magic"] = GNU_MAGIC + + # create extended header + name blocks. + return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \ + cls._create_payload(name) + + @classmethod + def _create_pax_generic_header(cls, pax_headers, type, encoding): + """Return a POSIX.1-2008 extended or global header sequence + that contains a list of keyword, value pairs. The values + must be strings. + """ + # Check if one of the fields contains surrogate characters and thereby + # forces hdrcharset=BINARY, see _proc_pax() for more information. + binary = False + for keyword, value in pax_headers.items(): + try: + value.encode("utf8", "strict") + except UnicodeEncodeError: + binary = True + break + + records = b"" + if binary: + # Put the hdrcharset field at the beginning of the header. + records += b"21 hdrcharset=BINARY\n" + + for keyword, value in pax_headers.items(): + keyword = keyword.encode("utf8") + if binary: + # Try to restore the original byte representation of `value'. + # Needless to say, that the encoding must match the string. + value = value.encode(encoding, "surrogateescape") + else: + value = value.encode("utf8") + + l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n' + n = p = 0 + while True: + n = l + len(str(p)) + if n == p: + break + p = n + records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n" + + # We use a hardcoded "././@PaxHeader" name like star does + # instead of the one that POSIX recommends. + info = {} + info["name"] = "././@PaxHeader" + info["type"] = type + info["size"] = len(records) + info["magic"] = POSIX_MAGIC + + # Create pax header + record blocks. + return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \ + cls._create_payload(records) + + @classmethod + def frombuf(cls, buf, encoding, errors): + """Construct a TarInfo object from a 512 byte bytes object. + """ + if len(buf) == 0: + raise EmptyHeaderError("empty header") + if len(buf) != BLOCKSIZE: + raise TruncatedHeaderError("truncated header") + if buf.count(NUL) == BLOCKSIZE: + raise EOFHeaderError("end of file header") + + chksum = nti(buf[148:156]) + if chksum not in calc_chksums(buf): + raise InvalidHeaderError("bad checksum") + + obj = cls() + obj.name = nts(buf[0:100], encoding, errors) + obj.mode = nti(buf[100:108]) + obj.uid = nti(buf[108:116]) + obj.gid = nti(buf[116:124]) + obj.size = nti(buf[124:136]) + obj.mtime = nti(buf[136:148]) + obj.chksum = chksum + obj.type = buf[156:157] + obj.linkname = nts(buf[157:257], encoding, errors) + obj.uname = nts(buf[265:297], encoding, errors) + obj.gname = nts(buf[297:329], encoding, errors) + obj.devmajor = nti(buf[329:337]) + obj.devminor = nti(buf[337:345]) + prefix = nts(buf[345:500], encoding, errors) + + # Old V7 tar format represents a directory as a regular + # file with a trailing slash. + if obj.type == AREGTYPE and obj.name.endswith("/"): + obj.type = DIRTYPE + + # The old GNU sparse format occupies some of the unused + # space in the buffer for up to 4 sparse structures. + # Save the them for later processing in _proc_sparse(). + if obj.type == GNUTYPE_SPARSE: + pos = 386 + structs = [] + for i in range(4): + try: + offset = nti(buf[pos:pos + 12]) + numbytes = nti(buf[pos + 12:pos + 24]) + except ValueError: + break + structs.append((offset, numbytes)) + pos += 24 + isextended = bool(buf[482]) + origsize = nti(buf[483:495]) + obj._sparse_structs = (structs, isextended, origsize) + + # Remove redundant slashes from directories. + if obj.isdir(): + obj.name = obj.name.rstrip("/") + + # Reconstruct a ustar longname. + if prefix and obj.type not in GNU_TYPES: + obj.name = prefix + "/" + obj.name + return obj + + @classmethod + def fromtarfile(cls, tarfile): + """Return the next TarInfo object from TarFile object + tarfile. + """ + buf = tarfile.fileobj.read(BLOCKSIZE) + obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors) + obj.offset = tarfile.fileobj.tell() - BLOCKSIZE + return obj._proc_member(tarfile) + + #-------------------------------------------------------------------------- + # The following are methods that are called depending on the type of a + # member. The entry point is _proc_member() which can be overridden in a + # subclass to add custom _proc_*() methods. A _proc_*() method MUST + # implement the following + # operations: + # 1. Set self.offset_data to the position where the data blocks begin, + # if there is data that follows. + # 2. Set tarfile.offset to the position where the next member's header will + # begin. + # 3. Return self or another valid TarInfo object. + def _proc_member(self, tarfile): + """Choose the right processing method depending on + the type and call it. + """ + if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK): + return self._proc_gnulong(tarfile) + elif self.type == GNUTYPE_SPARSE: + return self._proc_sparse(tarfile) + elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE): + return self._proc_pax(tarfile) + else: + return self._proc_builtin(tarfile) + + def _proc_builtin(self, tarfile): + """Process a builtin type or an unknown type which + will be treated as a regular file. + """ + self.offset_data = tarfile.fileobj.tell() + offset = self.offset_data + if self.isreg() or self.type not in SUPPORTED_TYPES: + # Skip the following data blocks. + offset += self._block(self.size) + tarfile.offset = offset + + # Patch the TarInfo object with saved global + # header information. + self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors) + + return self + + def _proc_gnulong(self, tarfile): + """Process the blocks that hold a GNU longname + or longlink member. + """ + buf = tarfile.fileobj.read(self._block(self.size)) + + # Fetch the next header and process it. + try: + next = self.fromtarfile(tarfile) + except HeaderError: + raise SubsequentHeaderError("missing or bad subsequent header") + + # Patch the TarInfo object from the next header with + # the longname information. + next.offset = self.offset + if self.type == GNUTYPE_LONGNAME: + next.name = nts(buf, tarfile.encoding, tarfile.errors) + elif self.type == GNUTYPE_LONGLINK: + next.linkname = nts(buf, tarfile.encoding, tarfile.errors) + + return next + + def _proc_sparse(self, tarfile): + """Process a GNU sparse header plus extra headers. + """ + # We already collected some sparse structures in frombuf(). + structs, isextended, origsize = self._sparse_structs + del self._sparse_structs + + # Collect sparse structures from extended header blocks. + while isextended: + buf = tarfile.fileobj.read(BLOCKSIZE) + pos = 0 + for i in range(21): + try: + offset = nti(buf[pos:pos + 12]) + numbytes = nti(buf[pos + 12:pos + 24]) + except ValueError: + break + if offset and numbytes: + structs.append((offset, numbytes)) + pos += 24 + isextended = bool(buf[504]) + self.sparse = structs + + self.offset_data = tarfile.fileobj.tell() + tarfile.offset = self.offset_data + self._block(self.size) + self.size = origsize + return self + + def _proc_pax(self, tarfile): + """Process an extended or global header as described in + POSIX.1-2008. + """ + # Read the header information. + buf = tarfile.fileobj.read(self._block(self.size)) + + # A pax header stores supplemental information for either + # the following file (extended) or all following files + # (global). + if self.type == XGLTYPE: + pax_headers = tarfile.pax_headers + else: + pax_headers = tarfile.pax_headers.copy() + + # Check if the pax header contains a hdrcharset field. This tells us + # the encoding of the path, linkpath, uname and gname fields. Normally, + # these fields are UTF-8 encoded but since POSIX.1-2008 tar + # implementations are allowed to store them as raw binary strings if + # the translation to UTF-8 fails. + match = re.search(br"\d+ hdrcharset=([^\n]+)\n", buf) + if match is not None: + pax_headers["hdrcharset"] = match.group(1).decode("utf8") + + # For the time being, we don't care about anything other than "BINARY". + # The only other value that is currently allowed by the standard is + # "ISO-IR 10646 2000 UTF-8" in other words UTF-8. + hdrcharset = pax_headers.get("hdrcharset") + if hdrcharset == "BINARY": + encoding = tarfile.encoding + else: + encoding = "utf8" + + # Parse pax header information. A record looks like that: + # "%d %s=%s\n" % (length, keyword, value). length is the size + # of the complete record including the length field itself and + # the newline. keyword and value are both UTF-8 encoded strings. + regex = re.compile(br"(\d+) ([^=]+)=") + pos = 0 + while True: + match = regex.match(buf, pos) + if not match: + break + + length, keyword = match.groups() + length = int(length) + value = buf[match.end(2) + 1:match.start(1) + length - 1] + + # Normally, we could just use "utf8" as the encoding and "strict" + # as the error handler, but we better not take the risk. For + # example, GNU tar <= 1.23 is known to store filenames it cannot + # translate to UTF-8 as raw strings (unfortunately without a + # hdrcharset=BINARY header). + # We first try the strict standard encoding, and if that fails we + # fall back on the user's encoding and error handler. + keyword = self._decode_pax_field(keyword, "utf8", "utf8", + tarfile.errors) + if keyword in PAX_NAME_FIELDS: + value = self._decode_pax_field(value, encoding, tarfile.encoding, + tarfile.errors) + else: + value = self._decode_pax_field(value, "utf8", "utf8", + tarfile.errors) + + pax_headers[keyword] = value + pos += length + + # Fetch the next header. + try: + next = self.fromtarfile(tarfile) + except HeaderError: + raise SubsequentHeaderError("missing or bad subsequent header") + + # Process GNU sparse information. + if "GNU.sparse.map" in pax_headers: + # GNU extended sparse format version 0.1. + self._proc_gnusparse_01(next, pax_headers) + + elif "GNU.sparse.size" in pax_headers: + # GNU extended sparse format version 0.0. + self._proc_gnusparse_00(next, pax_headers, buf) + + elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0": + # GNU extended sparse format version 1.0. + self._proc_gnusparse_10(next, pax_headers, tarfile) + + if self.type in (XHDTYPE, SOLARIS_XHDTYPE): + # Patch the TarInfo object with the extended header info. + next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors) + next.offset = self.offset + + if "size" in pax_headers: + # If the extended header replaces the size field, + # we need to recalculate the offset where the next + # header starts. + offset = next.offset_data + if next.isreg() or next.type not in SUPPORTED_TYPES: + offset += next._block(next.size) + tarfile.offset = offset + + return next + + def _proc_gnusparse_00(self, next, pax_headers, buf): + """Process a GNU tar extended sparse header, version 0.0. + """ + offsets = [] + for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf): + offsets.append(int(match.group(1))) + numbytes = [] + for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf): + numbytes.append(int(match.group(1))) + next.sparse = list(zip(offsets, numbytes)) + + def _proc_gnusparse_01(self, next, pax_headers): + """Process a GNU tar extended sparse header, version 0.1. + """ + sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")] + next.sparse = list(zip(sparse[::2], sparse[1::2])) + + def _proc_gnusparse_10(self, next, pax_headers, tarfile): + """Process a GNU tar extended sparse header, version 1.0. + """ + fields = None + sparse = [] + buf = tarfile.fileobj.read(BLOCKSIZE) + fields, buf = buf.split(b"\n", 1) + fields = int(fields) + while len(sparse) < fields * 2: + if b"\n" not in buf: + buf += tarfile.fileobj.read(BLOCKSIZE) + number, buf = buf.split(b"\n", 1) + sparse.append(int(number)) + next.offset_data = tarfile.fileobj.tell() + next.sparse = list(zip(sparse[::2], sparse[1::2])) + + def _apply_pax_info(self, pax_headers, encoding, errors): + """Replace fields with supplemental information from a previous + pax extended or global header. + """ + for keyword, value in pax_headers.items(): + if keyword == "GNU.sparse.name": + setattr(self, "path", value) + elif keyword == "GNU.sparse.size": + setattr(self, "size", int(value)) + elif keyword == "GNU.sparse.realsize": + setattr(self, "size", int(value)) + elif keyword in PAX_FIELDS: + if keyword in PAX_NUMBER_FIELDS: + try: + value = PAX_NUMBER_FIELDS[keyword](value) + except ValueError: + value = 0 + if keyword == "path": + value = value.rstrip("/") + setattr(self, keyword, value) + + self.pax_headers = pax_headers.copy() + + def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors): + """Decode a single field from a pax record. + """ + try: + return value.decode(encoding, "strict") + except UnicodeDecodeError: + return value.decode(fallback_encoding, fallback_errors) + + def _block(self, count): + """Round up a byte count by BLOCKSIZE and return it, + e.g. _block(834) => 1024. + """ + blocks, remainder = divmod(count, BLOCKSIZE) + if remainder: + blocks += 1 + return blocks * BLOCKSIZE + + def isreg(self): + return self.type in REGULAR_TYPES + def isfile(self): + return self.isreg() + def isdir(self): + return self.type == DIRTYPE + def issym(self): + return self.type == SYMTYPE + def islnk(self): + return self.type == LNKTYPE + def ischr(self): + return self.type == CHRTYPE + def isblk(self): + return self.type == BLKTYPE + def isfifo(self): + return self.type == FIFOTYPE + def issparse(self): + return self.sparse is not None + def isdev(self): + return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE) +# class TarInfo + +class TarFile(object): + """The TarFile Class provides an interface to tar archives. + """ + + debug = 0 # May be set from 0 (no msgs) to 3 (all msgs) + + dereference = False # If true, add content of linked file to the + # tar file, else the link. + + ignore_zeros = False # If true, skips empty or invalid blocks and + # continues processing. + + errorlevel = 1 # If 0, fatal errors only appear in debug + # messages (if debug >= 0). If > 0, errors + # are passed to the caller as exceptions. + + format = DEFAULT_FORMAT # The format to use when creating an archive. + + encoding = ENCODING # Encoding for 8-bit character strings. + + errors = None # Error handler for unicode conversion. + + tarinfo = TarInfo # The default TarInfo class to use. + + fileobject = ExFileObject # The default ExFileObject class to use. + + def __init__(self, name=None, mode="r", fileobj=None, format=None, + tarinfo=None, dereference=None, ignore_zeros=None, encoding=None, + errors="surrogateescape", pax_headers=None, debug=None, errorlevel=None): + """Open an (uncompressed) tar archive `name'. `mode' is either 'r' to + read from an existing archive, 'a' to append data to an existing + file or 'w' to create a new file overwriting an existing one. `mode' + defaults to 'r'. + If `fileobj' is given, it is used for reading or writing data. If it + can be determined, `mode' is overridden by `fileobj's mode. + `fileobj' is not closed, when TarFile is closed. + """ + if len(mode) > 1 or mode not in "raw": + raise ValueError("mode must be 'r', 'a' or 'w'") + self.mode = mode + self._mode = {"r": "rb", "a": "r+b", "w": "wb"}[mode] + + if not fileobj: + if self.mode == "a" and not os.path.exists(name): + # Create nonexistent files in append mode. + self.mode = "w" + self._mode = "wb" + fileobj = bltn_open(name, self._mode) + self._extfileobj = False + else: + if name is None and hasattr(fileobj, "name"): + name = fileobj.name + if hasattr(fileobj, "mode"): + self._mode = fileobj.mode + self._extfileobj = True + self.name = os.path.abspath(name) if name else None + self.fileobj = fileobj + + # Init attributes. + if format is not None: + self.format = format + if tarinfo is not None: + self.tarinfo = tarinfo + if dereference is not None: + self.dereference = dereference + if ignore_zeros is not None: + self.ignore_zeros = ignore_zeros + if encoding is not None: + self.encoding = encoding + self.errors = errors + + if pax_headers is not None and self.format == PAX_FORMAT: + self.pax_headers = pax_headers + else: + self.pax_headers = {} + + if debug is not None: + self.debug = debug + if errorlevel is not None: + self.errorlevel = errorlevel + + # Init datastructures. + self.closed = False + self.members = [] # list of members as TarInfo objects + self._loaded = False # flag if all members have been read + self.offset = self.fileobj.tell() + # current position in the archive file + self.inodes = {} # dictionary caching the inodes of + # archive members already added + + try: + if self.mode == "r": + self.firstmember = None + self.firstmember = self.next() + + if self.mode == "a": + # Move to the end of the archive, + # before the first empty block. + while True: + self.fileobj.seek(self.offset) + try: + tarinfo = self.tarinfo.fromtarfile(self) + self.members.append(tarinfo) + except EOFHeaderError: + self.fileobj.seek(self.offset) + break + except HeaderError as e: + raise ReadError(str(e)) + + if self.mode in "aw": + self._loaded = True + + if self.pax_headers: + buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy()) + self.fileobj.write(buf) + self.offset += len(buf) + except: + if not self._extfileobj: + self.fileobj.close() + self.closed = True + raise + + #-------------------------------------------------------------------------- + # Below are the classmethods which act as alternate constructors to the + # TarFile class. The open() method is the only one that is needed for + # public use; it is the "super"-constructor and is able to select an + # adequate "sub"-constructor for a particular compression using the mapping + # from OPEN_METH. + # + # This concept allows one to subclass TarFile without losing the comfort of + # the super-constructor. A sub-constructor is registered and made available + # by adding it to the mapping in OPEN_METH. + + @classmethod + def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs): + """Open a tar archive for reading, writing or appending. Return + an appropriate TarFile class. + + mode: + 'r' or 'r:*' open for reading with transparent compression + 'r:' open for reading exclusively uncompressed + 'r:gz' open for reading with gzip compression + 'r:bz2' open for reading with bzip2 compression + 'a' or 'a:' open for appending, creating the file if necessary + 'w' or 'w:' open for writing without compression + 'w:gz' open for writing with gzip compression + 'w:bz2' open for writing with bzip2 compression + + 'r|*' open a stream of tar blocks with transparent compression + 'r|' open an uncompressed stream of tar blocks for reading + 'r|gz' open a gzip compressed stream of tar blocks + 'r|bz2' open a bzip2 compressed stream of tar blocks + 'w|' open an uncompressed stream for writing + 'w|gz' open a gzip compressed stream for writing + 'w|bz2' open a bzip2 compressed stream for writing + """ + + if not name and not fileobj: + raise ValueError("nothing to open") + + if mode in ("r", "r:*"): + # Find out which *open() is appropriate for opening the file. + for comptype in cls.OPEN_METH: + func = getattr(cls, cls.OPEN_METH[comptype]) + if fileobj is not None: + saved_pos = fileobj.tell() + try: + return func(name, "r", fileobj, **kwargs) + except (ReadError, CompressionError) as e: + if fileobj is not None: + fileobj.seek(saved_pos) + continue + raise ReadError("file could not be opened successfully") + + elif ":" in mode: + filemode, comptype = mode.split(":", 1) + filemode = filemode or "r" + comptype = comptype or "tar" + + # Select the *open() function according to + # given compression. + if comptype in cls.OPEN_METH: + func = getattr(cls, cls.OPEN_METH[comptype]) + else: + raise CompressionError("unknown compression type %r" % comptype) + return func(name, filemode, fileobj, **kwargs) + + elif "|" in mode: + filemode, comptype = mode.split("|", 1) + filemode = filemode or "r" + comptype = comptype or "tar" + + if filemode not in "rw": + raise ValueError("mode must be 'r' or 'w'") + + stream = _Stream(name, filemode, comptype, fileobj, bufsize) + try: + t = cls(name, filemode, stream, **kwargs) + except: + stream.close() + raise + t._extfileobj = False + return t + + elif mode in "aw": + return cls.taropen(name, mode, fileobj, **kwargs) + + raise ValueError("undiscernible mode") + + @classmethod + def taropen(cls, name, mode="r", fileobj=None, **kwargs): + """Open uncompressed tar archive name for reading or writing. + """ + if len(mode) > 1 or mode not in "raw": + raise ValueError("mode must be 'r', 'a' or 'w'") + return cls(name, mode, fileobj, **kwargs) + + @classmethod + def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): + """Open gzip compressed tar archive name for reading or writing. + Appending is not allowed. + """ + if len(mode) > 1 or mode not in "rw": + raise ValueError("mode must be 'r' or 'w'") + + try: + import gzip + gzip.GzipFile + except (ImportError, AttributeError): + raise CompressionError("gzip module is not available") + + extfileobj = fileobj is not None + try: + fileobj = gzip.GzipFile(name, mode + "b", compresslevel, fileobj) + t = cls.taropen(name, mode, fileobj, **kwargs) + except IOError: + if not extfileobj and fileobj is not None: + fileobj.close() + if fileobj is None: + raise + raise ReadError("not a gzip file") + except: + if not extfileobj and fileobj is not None: + fileobj.close() + raise + t._extfileobj = extfileobj + return t + + @classmethod + def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): + """Open bzip2 compressed tar archive name for reading or writing. + Appending is not allowed. + """ + if len(mode) > 1 or mode not in "rw": + raise ValueError("mode must be 'r' or 'w'.") + + try: + import bz2 + except ImportError: + raise CompressionError("bz2 module is not available") + + if fileobj is not None: + fileobj = _BZ2Proxy(fileobj, mode) + else: + fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel) + + try: + t = cls.taropen(name, mode, fileobj, **kwargs) + except (IOError, EOFError): + fileobj.close() + raise ReadError("not a bzip2 file") + t._extfileobj = False + return t + + # All *open() methods are registered here. + OPEN_METH = { + "tar": "taropen", # uncompressed tar + "gz": "gzopen", # gzip compressed tar + "bz2": "bz2open" # bzip2 compressed tar + } + + #-------------------------------------------------------------------------- + # The public methods which TarFile provides: + + def close(self): + """Close the TarFile. In write-mode, two finishing zero blocks are + appended to the archive. + """ + if self.closed: + return + + if self.mode in "aw": + self.fileobj.write(NUL * (BLOCKSIZE * 2)) + self.offset += (BLOCKSIZE * 2) + # fill up the end with zero-blocks + # (like option -b20 for tar does) + blocks, remainder = divmod(self.offset, RECORDSIZE) + if remainder > 0: + self.fileobj.write(NUL * (RECORDSIZE - remainder)) + + if not self._extfileobj: + self.fileobj.close() + self.closed = True + + def getmember(self, name): + """Return a TarInfo object for member `name'. If `name' can not be + found in the archive, KeyError is raised. If a member occurs more + than once in the archive, its last occurrence is assumed to be the + most up-to-date version. + """ + tarinfo = self._getmember(name) + if tarinfo is None: + raise KeyError("filename %r not found" % name) + return tarinfo + + def getmembers(self): + """Return the members of the archive as a list of TarInfo objects. The + list has the same order as the members in the archive. + """ + self._check() + if not self._loaded: # if we want to obtain a list of + self._load() # all members, we first have to + # scan the whole archive. + return self.members + + def getnames(self): + """Return the members of the archive as a list of their names. It has + the same order as the list returned by getmembers(). + """ + return [tarinfo.name for tarinfo in self.getmembers()] + + def gettarinfo(self, name=None, arcname=None, fileobj=None): + """Create a TarInfo object for either the file `name' or the file + object `fileobj' (using os.fstat on its file descriptor). You can + modify some of the TarInfo's attributes before you add it using + addfile(). If given, `arcname' specifies an alternative name for the + file in the archive. + """ + self._check("aw") + + # When fileobj is given, replace name by + # fileobj's real name. + if fileobj is not None: + name = fileobj.name + + # Building the name of the member in the archive. + # Backward slashes are converted to forward slashes, + # Absolute paths are turned to relative paths. + if arcname is None: + arcname = name + drv, arcname = os.path.splitdrive(arcname) + arcname = arcname.replace(os.sep, "/") + arcname = arcname.lstrip("/") + + # Now, fill the TarInfo object with + # information specific for the file. + tarinfo = self.tarinfo() + tarinfo.tarfile = self + + # Use os.stat or os.lstat, depending on platform + # and if symlinks shall be resolved. + if fileobj is None: + if hasattr(os, "lstat") and not self.dereference: + statres = os.lstat(name) + else: + statres = os.stat(name) + else: + statres = os.fstat(fileobj.fileno()) + linkname = "" + + stmd = statres.st_mode + if stat.S_ISREG(stmd): + inode = (statres.st_ino, statres.st_dev) + if not self.dereference and statres.st_nlink > 1 and \ + inode in self.inodes and arcname != self.inodes[inode]: + # Is it a hardlink to an already + # archived file? + type = LNKTYPE + linkname = self.inodes[inode] + else: + # The inode is added only if its valid. + # For win32 it is always 0. + type = REGTYPE + if inode[0]: + self.inodes[inode] = arcname + elif stat.S_ISDIR(stmd): + type = DIRTYPE + elif stat.S_ISFIFO(stmd): + type = FIFOTYPE + elif stat.S_ISLNK(stmd): + type = SYMTYPE + linkname = os.readlink(name) + elif stat.S_ISCHR(stmd): + type = CHRTYPE + elif stat.S_ISBLK(stmd): + type = BLKTYPE + else: + return None + + # Fill the TarInfo object with all + # information we can get. + tarinfo.name = arcname + tarinfo.mode = stmd + tarinfo.uid = statres.st_uid + tarinfo.gid = statres.st_gid + if type == REGTYPE: + tarinfo.size = statres.st_size + else: + tarinfo.size = 0 + tarinfo.mtime = statres.st_mtime + tarinfo.type = type + tarinfo.linkname = linkname + if pwd: + try: + tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0] + except KeyError: + pass + if grp: + try: + tarinfo.gname = grp.getgrgid(tarinfo.gid)[0] + except KeyError: + pass + + if type in (CHRTYPE, BLKTYPE): + if hasattr(os, "major") and hasattr(os, "minor"): + tarinfo.devmajor = os.major(statres.st_rdev) + tarinfo.devminor = os.minor(statres.st_rdev) + return tarinfo + + def list(self, verbose=True): + """Print a table of contents to sys.stdout. If `verbose' is False, only + the names of the members are printed. If it is True, an `ls -l'-like + output is produced. + """ + self._check() + + for tarinfo in self: + if verbose: + print(filemode(tarinfo.mode), end=' ') + print("%s/%s" % (tarinfo.uname or tarinfo.uid, + tarinfo.gname or tarinfo.gid), end=' ') + if tarinfo.ischr() or tarinfo.isblk(): + print("%10s" % ("%d,%d" \ + % (tarinfo.devmajor, tarinfo.devminor)), end=' ') + else: + print("%10d" % tarinfo.size, end=' ') + print("%d-%02d-%02d %02d:%02d:%02d" \ + % time.localtime(tarinfo.mtime)[:6], end=' ') + + print(tarinfo.name + ("/" if tarinfo.isdir() else ""), end=' ') + + if verbose: + if tarinfo.issym(): + print("->", tarinfo.linkname, end=' ') + if tarinfo.islnk(): + print("link to", tarinfo.linkname, end=' ') + print() + + def add(self, name, arcname=None, recursive=True, exclude=None, filter=None): + """Add the file `name' to the archive. `name' may be any type of file + (directory, fifo, symbolic link, etc.). If given, `arcname' + specifies an alternative name for the file in the archive. + Directories are added recursively by default. This can be avoided by + setting `recursive' to False. `exclude' is a function that should + return True for each filename to be excluded. `filter' is a function + that expects a TarInfo object argument and returns the changed + TarInfo object, if it returns None the TarInfo object will be + excluded from the archive. + """ + self._check("aw") + + if arcname is None: + arcname = name + + # Exclude pathnames. + if exclude is not None: + import warnings + warnings.warn("use the filter argument instead", + DeprecationWarning, 2) + if exclude(name): + self._dbg(2, "tarfile: Excluded %r" % name) + return + + # Skip if somebody tries to archive the archive... + if self.name is not None and os.path.abspath(name) == self.name: + self._dbg(2, "tarfile: Skipped %r" % name) + return + + self._dbg(1, name) + + # Create a TarInfo object from the file. + tarinfo = self.gettarinfo(name, arcname) + + if tarinfo is None: + self._dbg(1, "tarfile: Unsupported type %r" % name) + return + + # Change or exclude the TarInfo object. + if filter is not None: + tarinfo = filter(tarinfo) + if tarinfo is None: + self._dbg(2, "tarfile: Excluded %r" % name) + return + + # Append the tar header and data to the archive. + if tarinfo.isreg(): + f = bltn_open(name, "rb") + self.addfile(tarinfo, f) + f.close() + + elif tarinfo.isdir(): + self.addfile(tarinfo) + if recursive: + for f in os.listdir(name): + self.add(os.path.join(name, f), os.path.join(arcname, f), + recursive, exclude, filter=filter) + + else: + self.addfile(tarinfo) + + def addfile(self, tarinfo, fileobj=None): + """Add the TarInfo object `tarinfo' to the archive. If `fileobj' is + given, tarinfo.size bytes are read from it and added to the archive. + You can create TarInfo objects using gettarinfo(). + On Windows platforms, `fileobj' should always be opened with mode + 'rb' to avoid irritation about the file size. + """ + self._check("aw") + + tarinfo = copy.copy(tarinfo) + + buf = tarinfo.tobuf(self.format, self.encoding, self.errors) + self.fileobj.write(buf) + self.offset += len(buf) + + # If there's data to follow, append it. + if fileobj is not None: + copyfileobj(fileobj, self.fileobj, tarinfo.size) + blocks, remainder = divmod(tarinfo.size, BLOCKSIZE) + if remainder > 0: + self.fileobj.write(NUL * (BLOCKSIZE - remainder)) + blocks += 1 + self.offset += blocks * BLOCKSIZE + + self.members.append(tarinfo) + + def extractall(self, path=".", members=None): + """Extract all members from the archive to the current working + directory and set owner, modification time and permissions on + directories afterwards. `path' specifies a different directory + to extract to. `members' is optional and must be a subset of the + list returned by getmembers(). + """ + directories = [] + + if members is None: + members = self + + for tarinfo in members: + if tarinfo.isdir(): + # Extract directories with a safe mode. + directories.append(tarinfo) + tarinfo = copy.copy(tarinfo) + tarinfo.mode = 0o700 + # Do not set_attrs directories, as we will do that further down + self.extract(tarinfo, path, set_attrs=not tarinfo.isdir()) + + # Reverse sort directories. + directories.sort(key=lambda a: a.name) + directories.reverse() + + # Set correct owner, mtime and filemode on directories. + for tarinfo in directories: + dirpath = os.path.join(path, tarinfo.name) + try: + self.chown(tarinfo, dirpath) + self.utime(tarinfo, dirpath) + self.chmod(tarinfo, dirpath) + except ExtractError as e: + if self.errorlevel > 1: + raise + else: + self._dbg(1, "tarfile: %s" % e) + + def extract(self, member, path="", set_attrs=True): + """Extract a member from the archive to the current working directory, + using its full name. Its file information is extracted as accurately + as possible. `member' may be a filename or a TarInfo object. You can + specify a different directory using `path'. File attributes (owner, + mtime, mode) are set unless `set_attrs' is False. + """ + self._check("r") + + if isinstance(member, str): + tarinfo = self.getmember(member) + else: + tarinfo = member + + # Prepare the link target for makelink(). + if tarinfo.islnk(): + tarinfo._link_target = os.path.join(path, tarinfo.linkname) + + try: + self._extract_member(tarinfo, os.path.join(path, tarinfo.name), + set_attrs=set_attrs) + except EnvironmentError as e: + if self.errorlevel > 0: + raise + else: + if e.filename is None: + self._dbg(1, "tarfile: %s" % e.strerror) + else: + self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename)) + except ExtractError as e: + if self.errorlevel > 1: + raise + else: + self._dbg(1, "tarfile: %s" % e) + + def extractfile(self, member): + """Extract a member from the archive as a file object. `member' may be + a filename or a TarInfo object. If `member' is a regular file, a + file-like object is returned. If `member' is a link, a file-like + object is constructed from the link's target. If `member' is none of + the above, None is returned. + The file-like object is read-only and provides the following + methods: read(), readline(), readlines(), seek() and tell() + """ + self._check("r") + + if isinstance(member, str): + tarinfo = self.getmember(member) + else: + tarinfo = member + + if tarinfo.isreg(): + return self.fileobject(self, tarinfo) + + elif tarinfo.type not in SUPPORTED_TYPES: + # If a member's type is unknown, it is treated as a + # regular file. + return self.fileobject(self, tarinfo) + + elif tarinfo.islnk() or tarinfo.issym(): + if isinstance(self.fileobj, _Stream): + # A small but ugly workaround for the case that someone tries + # to extract a (sym)link as a file-object from a non-seekable + # stream of tar blocks. + raise StreamError("cannot extract (sym)link as file object") + else: + # A (sym)link's file object is its target's file object. + return self.extractfile(self._find_link_target(tarinfo)) + else: + # If there's no data associated with the member (directory, chrdev, + # blkdev, etc.), return None instead of a file object. + return None + + def _extract_member(self, tarinfo, targetpath, set_attrs=True): + """Extract the TarInfo object tarinfo to a physical + file called targetpath. + """ + # Fetch the TarInfo object for the given name + # and build the destination pathname, replacing + # forward slashes to platform specific separators. + targetpath = targetpath.rstrip("/") + targetpath = targetpath.replace("/", os.sep) + + # Create all upper directories. + upperdirs = os.path.dirname(targetpath) + if upperdirs and not os.path.exists(upperdirs): + # Create directories that are not part of the archive with + # default permissions. + os.makedirs(upperdirs) + + if tarinfo.islnk() or tarinfo.issym(): + self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname)) + else: + self._dbg(1, tarinfo.name) + + if tarinfo.isreg(): + self.makefile(tarinfo, targetpath) + elif tarinfo.isdir(): + self.makedir(tarinfo, targetpath) + elif tarinfo.isfifo(): + self.makefifo(tarinfo, targetpath) + elif tarinfo.ischr() or tarinfo.isblk(): + self.makedev(tarinfo, targetpath) + elif tarinfo.islnk() or tarinfo.issym(): + self.makelink(tarinfo, targetpath) + elif tarinfo.type not in SUPPORTED_TYPES: + self.makeunknown(tarinfo, targetpath) + else: + self.makefile(tarinfo, targetpath) + + if set_attrs: + self.chown(tarinfo, targetpath) + if not tarinfo.issym(): + self.chmod(tarinfo, targetpath) + self.utime(tarinfo, targetpath) + + #-------------------------------------------------------------------------- + # Below are the different file methods. They are called via + # _extract_member() when extract() is called. They can be replaced in a + # subclass to implement other functionality. + + def makedir(self, tarinfo, targetpath): + """Make a directory called targetpath. + """ + try: + # Use a safe mode for the directory, the real mode is set + # later in _extract_member(). + os.mkdir(targetpath, 0o700) + except EnvironmentError as e: + if e.errno != errno.EEXIST: + raise + + def makefile(self, tarinfo, targetpath): + """Make a file called targetpath. + """ + source = self.fileobj + source.seek(tarinfo.offset_data) + target = bltn_open(targetpath, "wb") + if tarinfo.sparse is not None: + for offset, size in tarinfo.sparse: + target.seek(offset) + copyfileobj(source, target, size) + else: + copyfileobj(source, target, tarinfo.size) + target.seek(tarinfo.size) + target.truncate() + target.close() + + def makeunknown(self, tarinfo, targetpath): + """Make a file from a TarInfo object with an unknown type + at targetpath. + """ + self.makefile(tarinfo, targetpath) + self._dbg(1, "tarfile: Unknown file type %r, " \ + "extracted as regular file." % tarinfo.type) + + def makefifo(self, tarinfo, targetpath): + """Make a fifo called targetpath. + """ + if hasattr(os, "mkfifo"): + os.mkfifo(targetpath) + else: + raise ExtractError("fifo not supported by system") + + def makedev(self, tarinfo, targetpath): + """Make a character or block device called targetpath. + """ + if not hasattr(os, "mknod") or not hasattr(os, "makedev"): + raise ExtractError("special devices not supported by system") + + mode = tarinfo.mode + if tarinfo.isblk(): + mode |= stat.S_IFBLK + else: + mode |= stat.S_IFCHR + + os.mknod(targetpath, mode, + os.makedev(tarinfo.devmajor, tarinfo.devminor)) + + def makelink(self, tarinfo, targetpath): + """Make a (symbolic) link called targetpath. If it cannot be created + (platform limitation), we try to make a copy of the referenced file + instead of a link. + """ + try: + # For systems that support symbolic and hard links. + if tarinfo.issym(): + os.symlink(tarinfo.linkname, targetpath) + else: + # See extract(). + if os.path.exists(tarinfo._link_target): + os.link(tarinfo._link_target, targetpath) + else: + self._extract_member(self._find_link_target(tarinfo), + targetpath) + except symlink_exception: + if tarinfo.issym(): + linkpath = os.path.join(os.path.dirname(tarinfo.name), + tarinfo.linkname) + else: + linkpath = tarinfo.linkname + else: + try: + self._extract_member(self._find_link_target(tarinfo), + targetpath) + except KeyError: + raise ExtractError("unable to resolve link inside archive") + + def chown(self, tarinfo, targetpath): + """Set owner of targetpath according to tarinfo. + """ + if pwd and hasattr(os, "geteuid") and os.geteuid() == 0: + # We have to be root to do so. + try: + g = grp.getgrnam(tarinfo.gname)[2] + except KeyError: + g = tarinfo.gid + try: + u = pwd.getpwnam(tarinfo.uname)[2] + except KeyError: + u = tarinfo.uid + try: + if tarinfo.issym() and hasattr(os, "lchown"): + os.lchown(targetpath, u, g) + else: + if sys.platform != "os2emx": + os.chown(targetpath, u, g) + except EnvironmentError as e: + raise ExtractError("could not change owner") + + def chmod(self, tarinfo, targetpath): + """Set file permissions of targetpath according to tarinfo. + """ + if hasattr(os, 'chmod'): + try: + os.chmod(targetpath, tarinfo.mode) + except EnvironmentError as e: + raise ExtractError("could not change mode") + + def utime(self, tarinfo, targetpath): + """Set modification time of targetpath according to tarinfo. + """ + if not hasattr(os, 'utime'): + return + try: + os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime)) + except EnvironmentError as e: + raise ExtractError("could not change modification time") + + #-------------------------------------------------------------------------- + def next(self): + """Return the next member of the archive as a TarInfo object, when + TarFile is opened for reading. Return None if there is no more + available. + """ + self._check("ra") + if self.firstmember is not None: + m = self.firstmember + self.firstmember = None + return m + + # Read the next block. + self.fileobj.seek(self.offset) + tarinfo = None + while True: + try: + tarinfo = self.tarinfo.fromtarfile(self) + except EOFHeaderError as e: + if self.ignore_zeros: + self._dbg(2, "0x%X: %s" % (self.offset, e)) + self.offset += BLOCKSIZE + continue + except InvalidHeaderError as e: + if self.ignore_zeros: + self._dbg(2, "0x%X: %s" % (self.offset, e)) + self.offset += BLOCKSIZE + continue + elif self.offset == 0: + raise ReadError(str(e)) + except EmptyHeaderError: + if self.offset == 0: + raise ReadError("empty file") + except TruncatedHeaderError as e: + if self.offset == 0: + raise ReadError(str(e)) + except SubsequentHeaderError as e: + raise ReadError(str(e)) + break + + if tarinfo is not None: + self.members.append(tarinfo) + else: + self._loaded = True + + return tarinfo + + #-------------------------------------------------------------------------- + # Little helper methods: + + def _getmember(self, name, tarinfo=None, normalize=False): + """Find an archive member by name from bottom to top. + If tarinfo is given, it is used as the starting point. + """ + # Ensure that all members have been loaded. + members = self.getmembers() + + # Limit the member search list up to tarinfo. + if tarinfo is not None: + members = members[:members.index(tarinfo)] + + if normalize: + name = os.path.normpath(name) + + for member in reversed(members): + if normalize: + member_name = os.path.normpath(member.name) + else: + member_name = member.name + + if name == member_name: + return member + + def _load(self): + """Read through the entire archive file and look for readable + members. + """ + while True: + tarinfo = self.next() + if tarinfo is None: + break + self._loaded = True + + def _check(self, mode=None): + """Check if TarFile is still open, and if the operation's mode + corresponds to TarFile's mode. + """ + if self.closed: + raise IOError("%s is closed" % self.__class__.__name__) + if mode is not None and self.mode not in mode: + raise IOError("bad operation for mode %r" % self.mode) + + def _find_link_target(self, tarinfo): + """Find the target member of a symlink or hardlink member in the + archive. + """ + if tarinfo.issym(): + # Always search the entire archive. + linkname = os.path.dirname(tarinfo.name) + "/" + tarinfo.linkname + limit = None + else: + # Search the archive before the link, because a hard link is + # just a reference to an already archived file. + linkname = tarinfo.linkname + limit = tarinfo + + member = self._getmember(linkname, tarinfo=limit, normalize=True) + if member is None: + raise KeyError("linkname %r not found" % linkname) + return member + + def __iter__(self): + """Provide an iterator object. + """ + if self._loaded: + return iter(self.members) + else: + return TarIter(self) + + def _dbg(self, level, msg): + """Write debugging output to sys.stderr. + """ + if level <= self.debug: + print(msg, file=sys.stderr) + + def __enter__(self): + self._check() + return self + + def __exit__(self, type, value, traceback): + if type is None: + self.close() + else: + # An exception occurred. We must not call close() because + # it would try to write end-of-archive blocks and padding. + if not self._extfileobj: + self.fileobj.close() + self.closed = True +# class TarFile + +class TarIter(object): + """Iterator Class. + + for tarinfo in TarFile(...): + suite... + """ + + def __init__(self, tarfile): + """Construct a TarIter object. + """ + self.tarfile = tarfile + self.index = 0 + def __iter__(self): + """Return iterator object. + """ + return self + + def __next__(self): + """Return the next item using TarFile's next() method. + When all members have been read, set TarFile as _loaded. + """ + # Fix for SF #1100429: Under rare circumstances it can + # happen that getmembers() is called during iteration, + # which will cause TarIter to stop prematurely. + if not self.tarfile._loaded: + tarinfo = self.tarfile.next() + if not tarinfo: + self.tarfile._loaded = True + raise StopIteration + else: + try: + tarinfo = self.tarfile.members[self.index] + except IndexError: + raise StopIteration + self.index += 1 + return tarinfo + + next = __next__ # for Python 2.x + +#-------------------- +# exported functions +#-------------------- +def is_tarfile(name): + """Return True if name points to a tar archive that we + are able to handle, else return False. + """ + try: + t = open(name) + t.close() + return True + except TarError: + return False + +bltn_open = open +open = TarFile.open diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyc new file mode 100644 index 0000000..7f9a6fe Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/compat.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/compat.py new file mode 100644 index 0000000..ff328c8 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/compat.py @@ -0,0 +1,1120 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2013-2017 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +from __future__ import absolute_import + +import os +import re +import sys + +try: + import ssl +except ImportError: # pragma: no cover + ssl = None + +if sys.version_info[0] < 3: # pragma: no cover + from StringIO import StringIO + string_types = basestring, + text_type = unicode + from types import FileType as file_type + import __builtin__ as builtins + import ConfigParser as configparser + from ._backport import shutil + from urlparse import urlparse, urlunparse, urljoin, urlsplit, urlunsplit + from urllib import (urlretrieve, quote as _quote, unquote, url2pathname, + pathname2url, ContentTooShortError, splittype) + + def quote(s): + if isinstance(s, unicode): + s = s.encode('utf-8') + return _quote(s) + + import urllib2 + from urllib2 import (Request, urlopen, URLError, HTTPError, + HTTPBasicAuthHandler, HTTPPasswordMgr, + HTTPHandler, HTTPRedirectHandler, + build_opener) + if ssl: + from urllib2 import HTTPSHandler + import httplib + import xmlrpclib + import Queue as queue + from HTMLParser import HTMLParser + import htmlentitydefs + raw_input = raw_input + from itertools import ifilter as filter + from itertools import ifilterfalse as filterfalse + + _userprog = None + def splituser(host): + """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'.""" + global _userprog + if _userprog is None: + import re + _userprog = re.compile('^(.*)@(.*)$') + + match = _userprog.match(host) + if match: return match.group(1, 2) + return None, host + +else: # pragma: no cover + from io import StringIO + string_types = str, + text_type = str + from io import TextIOWrapper as file_type + import builtins + import configparser + import shutil + from urllib.parse import (urlparse, urlunparse, urljoin, splituser, quote, + unquote, urlsplit, urlunsplit, splittype) + from urllib.request import (urlopen, urlretrieve, Request, url2pathname, + pathname2url, + HTTPBasicAuthHandler, HTTPPasswordMgr, + HTTPHandler, HTTPRedirectHandler, + build_opener) + if ssl: + from urllib.request import HTTPSHandler + from urllib.error import HTTPError, URLError, ContentTooShortError + import http.client as httplib + import urllib.request as urllib2 + import xmlrpc.client as xmlrpclib + import queue + from html.parser import HTMLParser + import html.entities as htmlentitydefs + raw_input = input + from itertools import filterfalse + filter = filter + +try: + from ssl import match_hostname, CertificateError +except ImportError: # pragma: no cover + class CertificateError(ValueError): + pass + + + def _dnsname_match(dn, hostname, max_wildcards=1): + """Matching according to RFC 6125, section 6.4.3 + + http://tools.ietf.org/html/rfc6125#section-6.4.3 + """ + pats = [] + if not dn: + return False + + parts = dn.split('.') + leftmost, remainder = parts[0], parts[1:] + + wildcards = leftmost.count('*') + if wildcards > max_wildcards: + # Issue #17980: avoid denials of service by refusing more + # than one wildcard per fragment. A survey of established + # policy among SSL implementations showed it to be a + # reasonable choice. + raise CertificateError( + "too many wildcards in certificate DNS name: " + repr(dn)) + + # speed up common case w/o wildcards + if not wildcards: + return dn.lower() == hostname.lower() + + # RFC 6125, section 6.4.3, subitem 1. + # The client SHOULD NOT attempt to match a presented identifier in which + # the wildcard character comprises a label other than the left-most label. + if leftmost == '*': + # When '*' is a fragment by itself, it matches a non-empty dotless + # fragment. + pats.append('[^.]+') + elif leftmost.startswith('xn--') or hostname.startswith('xn--'): + # RFC 6125, section 6.4.3, subitem 3. + # The client SHOULD NOT attempt to match a presented identifier + # where the wildcard character is embedded within an A-label or + # U-label of an internationalized domain name. + pats.append(re.escape(leftmost)) + else: + # Otherwise, '*' matches any dotless string, e.g. www* + pats.append(re.escape(leftmost).replace(r'\*', '[^.]*')) + + # add the remaining fragments, ignore any wildcards + for frag in remainder: + pats.append(re.escape(frag)) + + pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) + return pat.match(hostname) + + + def match_hostname(cert, hostname): + """Verify that *cert* (in decoded format as returned by + SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 + rules are followed, but IP addresses are not accepted for *hostname*. + + CertificateError is raised on failure. On success, the function + returns nothing. + """ + if not cert: + raise ValueError("empty or no certificate, match_hostname needs a " + "SSL socket or SSL context with either " + "CERT_OPTIONAL or CERT_REQUIRED") + dnsnames = [] + san = cert.get('subjectAltName', ()) + for key, value in san: + if key == 'DNS': + if _dnsname_match(value, hostname): + return + dnsnames.append(value) + if not dnsnames: + # The subject is only checked when there is no dNSName entry + # in subjectAltName + for sub in cert.get('subject', ()): + for key, value in sub: + # XXX according to RFC 2818, the most specific Common Name + # must be used. + if key == 'commonName': + if _dnsname_match(value, hostname): + return + dnsnames.append(value) + if len(dnsnames) > 1: + raise CertificateError("hostname %r " + "doesn't match either of %s" + % (hostname, ', '.join(map(repr, dnsnames)))) + elif len(dnsnames) == 1: + raise CertificateError("hostname %r " + "doesn't match %r" + % (hostname, dnsnames[0])) + else: + raise CertificateError("no appropriate commonName or " + "subjectAltName fields were found") + + +try: + from types import SimpleNamespace as Container +except ImportError: # pragma: no cover + class Container(object): + """ + A generic container for when multiple values need to be returned + """ + def __init__(self, **kwargs): + self.__dict__.update(kwargs) + + +try: + from shutil import which +except ImportError: # pragma: no cover + # Implementation from Python 3.3 + def which(cmd, mode=os.F_OK | os.X_OK, path=None): + """Given a command, mode, and a PATH string, return the path which + conforms to the given mode on the PATH, or None if there is no such + file. + + `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result + of os.environ.get("PATH"), or can be overridden with a custom search + path. + + """ + # Check that a given file can be accessed with the correct mode. + # Additionally check that `file` is not a directory, as on Windows + # directories pass the os.access check. + def _access_check(fn, mode): + return (os.path.exists(fn) and os.access(fn, mode) + and not os.path.isdir(fn)) + + # If we're given a path with a directory part, look it up directly rather + # than referring to PATH directories. This includes checking relative to the + # current directory, e.g. ./script + if os.path.dirname(cmd): + if _access_check(cmd, mode): + return cmd + return None + + if path is None: + path = os.environ.get("PATH", os.defpath) + if not path: + return None + path = path.split(os.pathsep) + + if sys.platform == "win32": + # The current directory takes precedence on Windows. + if not os.curdir in path: + path.insert(0, os.curdir) + + # PATHEXT is necessary to check on Windows. + pathext = os.environ.get("PATHEXT", "").split(os.pathsep) + # See if the given file matches any of the expected path extensions. + # This will allow us to short circuit when given "python.exe". + # If it does match, only test that one, otherwise we have to try + # others. + if any(cmd.lower().endswith(ext.lower()) for ext in pathext): + files = [cmd] + else: + files = [cmd + ext for ext in pathext] + else: + # On other platforms you don't have things like PATHEXT to tell you + # what file suffixes are executable, so just pass on cmd as-is. + files = [cmd] + + seen = set() + for dir in path: + normdir = os.path.normcase(dir) + if not normdir in seen: + seen.add(normdir) + for thefile in files: + name = os.path.join(dir, thefile) + if _access_check(name, mode): + return name + return None + + +# ZipFile is a context manager in 2.7, but not in 2.6 + +from zipfile import ZipFile as BaseZipFile + +if hasattr(BaseZipFile, '__enter__'): # pragma: no cover + ZipFile = BaseZipFile +else: # pragma: no cover + from zipfile import ZipExtFile as BaseZipExtFile + + class ZipExtFile(BaseZipExtFile): + def __init__(self, base): + self.__dict__.update(base.__dict__) + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + self.close() + # return None, so if an exception occurred, it will propagate + + class ZipFile(BaseZipFile): + def __enter__(self): + return self + + def __exit__(self, *exc_info): + self.close() + # return None, so if an exception occurred, it will propagate + + def open(self, *args, **kwargs): + base = BaseZipFile.open(self, *args, **kwargs) + return ZipExtFile(base) + +try: + from platform import python_implementation +except ImportError: # pragma: no cover + def python_implementation(): + """Return a string identifying the Python implementation.""" + if 'PyPy' in sys.version: + return 'PyPy' + if os.name == 'java': + return 'Jython' + if sys.version.startswith('IronPython'): + return 'IronPython' + return 'CPython' + +try: + import sysconfig +except ImportError: # pragma: no cover + from ._backport import sysconfig + +try: + callable = callable +except NameError: # pragma: no cover + from collections import Callable + + def callable(obj): + return isinstance(obj, Callable) + + +try: + fsencode = os.fsencode + fsdecode = os.fsdecode +except AttributeError: # pragma: no cover + # Issue #99: on some systems (e.g. containerised), + # sys.getfilesystemencoding() returns None, and we need a real value, + # so fall back to utf-8. From the CPython 2.7 docs relating to Unix and + # sys.getfilesystemencoding(): the return value is "the user’s preference + # according to the result of nl_langinfo(CODESET), or None if the + # nl_langinfo(CODESET) failed." + _fsencoding = sys.getfilesystemencoding() or 'utf-8' + if _fsencoding == 'mbcs': + _fserrors = 'strict' + else: + _fserrors = 'surrogateescape' + + def fsencode(filename): + if isinstance(filename, bytes): + return filename + elif isinstance(filename, text_type): + return filename.encode(_fsencoding, _fserrors) + else: + raise TypeError("expect bytes or str, not %s" % + type(filename).__name__) + + def fsdecode(filename): + if isinstance(filename, text_type): + return filename + elif isinstance(filename, bytes): + return filename.decode(_fsencoding, _fserrors) + else: + raise TypeError("expect bytes or str, not %s" % + type(filename).__name__) + +try: + from tokenize import detect_encoding +except ImportError: # pragma: no cover + from codecs import BOM_UTF8, lookup + import re + + cookie_re = re.compile(r"coding[:=]\s*([-\w.]+)") + + def _get_normal_name(orig_enc): + """Imitates get_normal_name in tokenizer.c.""" + # Only care about the first 12 characters. + enc = orig_enc[:12].lower().replace("_", "-") + if enc == "utf-8" or enc.startswith("utf-8-"): + return "utf-8" + if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \ + enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")): + return "iso-8859-1" + return orig_enc + + def detect_encoding(readline): + """ + The detect_encoding() function is used to detect the encoding that should + be used to decode a Python source file. It requires one argument, readline, + in the same way as the tokenize() generator. + + It will call readline a maximum of twice, and return the encoding used + (as a string) and a list of any lines (left as bytes) it has read in. + + It detects the encoding from the presence of a utf-8 bom or an encoding + cookie as specified in pep-0263. If both a bom and a cookie are present, + but disagree, a SyntaxError will be raised. If the encoding cookie is an + invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, + 'utf-8-sig' is returned. + + If no encoding is specified, then the default of 'utf-8' will be returned. + """ + try: + filename = readline.__self__.name + except AttributeError: + filename = None + bom_found = False + encoding = None + default = 'utf-8' + def read_or_stop(): + try: + return readline() + except StopIteration: + return b'' + + def find_cookie(line): + try: + # Decode as UTF-8. Either the line is an encoding declaration, + # in which case it should be pure ASCII, or it must be UTF-8 + # per default encoding. + line_string = line.decode('utf-8') + except UnicodeDecodeError: + msg = "invalid or missing encoding declaration" + if filename is not None: + msg = '{} for {!r}'.format(msg, filename) + raise SyntaxError(msg) + + matches = cookie_re.findall(line_string) + if not matches: + return None + encoding = _get_normal_name(matches[0]) + try: + codec = lookup(encoding) + except LookupError: + # This behaviour mimics the Python interpreter + if filename is None: + msg = "unknown encoding: " + encoding + else: + msg = "unknown encoding for {!r}: {}".format(filename, + encoding) + raise SyntaxError(msg) + + if bom_found: + if codec.name != 'utf-8': + # This behaviour mimics the Python interpreter + if filename is None: + msg = 'encoding problem: utf-8' + else: + msg = 'encoding problem for {!r}: utf-8'.format(filename) + raise SyntaxError(msg) + encoding += '-sig' + return encoding + + first = read_or_stop() + if first.startswith(BOM_UTF8): + bom_found = True + first = first[3:] + default = 'utf-8-sig' + if not first: + return default, [] + + encoding = find_cookie(first) + if encoding: + return encoding, [first] + + second = read_or_stop() + if not second: + return default, [first] + + encoding = find_cookie(second) + if encoding: + return encoding, [first, second] + + return default, [first, second] + +# For converting & <-> & etc. +try: + from html import escape +except ImportError: + from cgi import escape +if sys.version_info[:2] < (3, 4): + unescape = HTMLParser().unescape +else: + from html import unescape + +try: + from collections import ChainMap +except ImportError: # pragma: no cover + from collections import MutableMapping + + try: + from reprlib import recursive_repr as _recursive_repr + except ImportError: + def _recursive_repr(fillvalue='...'): + ''' + Decorator to make a repr function return fillvalue for a recursive + call + ''' + + def decorating_function(user_function): + repr_running = set() + + def wrapper(self): + key = id(self), get_ident() + if key in repr_running: + return fillvalue + repr_running.add(key) + try: + result = user_function(self) + finally: + repr_running.discard(key) + return result + + # Can't use functools.wraps() here because of bootstrap issues + wrapper.__module__ = getattr(user_function, '__module__') + wrapper.__doc__ = getattr(user_function, '__doc__') + wrapper.__name__ = getattr(user_function, '__name__') + wrapper.__annotations__ = getattr(user_function, '__annotations__', {}) + return wrapper + + return decorating_function + + class ChainMap(MutableMapping): + ''' A ChainMap groups multiple dicts (or other mappings) together + to create a single, updateable view. + + The underlying mappings are stored in a list. That list is public and can + accessed or updated using the *maps* attribute. There is no other state. + + Lookups search the underlying mappings successively until a key is found. + In contrast, writes, updates, and deletions only operate on the first + mapping. + + ''' + + def __init__(self, *maps): + '''Initialize a ChainMap by setting *maps* to the given mappings. + If no mappings are provided, a single empty dictionary is used. + + ''' + self.maps = list(maps) or [{}] # always at least one map + + def __missing__(self, key): + raise KeyError(key) + + def __getitem__(self, key): + for mapping in self.maps: + try: + return mapping[key] # can't use 'key in mapping' with defaultdict + except KeyError: + pass + return self.__missing__(key) # support subclasses that define __missing__ + + def get(self, key, default=None): + return self[key] if key in self else default + + def __len__(self): + return len(set().union(*self.maps)) # reuses stored hash values if possible + + def __iter__(self): + return iter(set().union(*self.maps)) + + def __contains__(self, key): + return any(key in m for m in self.maps) + + def __bool__(self): + return any(self.maps) + + @_recursive_repr() + def __repr__(self): + return '{0.__class__.__name__}({1})'.format( + self, ', '.join(map(repr, self.maps))) + + @classmethod + def fromkeys(cls, iterable, *args): + 'Create a ChainMap with a single dict created from the iterable.' + return cls(dict.fromkeys(iterable, *args)) + + def copy(self): + 'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]' + return self.__class__(self.maps[0].copy(), *self.maps[1:]) + + __copy__ = copy + + def new_child(self): # like Django's Context.push() + 'New ChainMap with a new dict followed by all previous maps.' + return self.__class__({}, *self.maps) + + @property + def parents(self): # like Django's Context.pop() + 'New ChainMap from maps[1:].' + return self.__class__(*self.maps[1:]) + + def __setitem__(self, key, value): + self.maps[0][key] = value + + def __delitem__(self, key): + try: + del self.maps[0][key] + except KeyError: + raise KeyError('Key not found in the first mapping: {!r}'.format(key)) + + def popitem(self): + 'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.' + try: + return self.maps[0].popitem() + except KeyError: + raise KeyError('No keys found in the first mapping.') + + def pop(self, key, *args): + 'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].' + try: + return self.maps[0].pop(key, *args) + except KeyError: + raise KeyError('Key not found in the first mapping: {!r}'.format(key)) + + def clear(self): + 'Clear maps[0], leaving maps[1:] intact.' + self.maps[0].clear() + +try: + from importlib.util import cache_from_source # Python >= 3.4 +except ImportError: # pragma: no cover + try: + from imp import cache_from_source + except ImportError: # pragma: no cover + def cache_from_source(path, debug_override=None): + assert path.endswith('.py') + if debug_override is None: + debug_override = __debug__ + if debug_override: + suffix = 'c' + else: + suffix = 'o' + return path + suffix + +try: + from collections import OrderedDict +except ImportError: # pragma: no cover +## {{{ http://code.activestate.com/recipes/576693/ (r9) +# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy. +# Passes Python2.7's test suite and incorporates all the latest updates. + try: + from thread import get_ident as _get_ident + except ImportError: + from dummy_thread import get_ident as _get_ident + + try: + from _abcoll import KeysView, ValuesView, ItemsView + except ImportError: + pass + + + class OrderedDict(dict): + 'Dictionary that remembers insertion order' + # An inherited dict maps keys to values. + # The inherited dict provides __getitem__, __len__, __contains__, and get. + # The remaining methods are order-aware. + # Big-O running times for all methods are the same as for regular dictionaries. + + # The internal self.__map dictionary maps keys to links in a doubly linked list. + # The circular doubly linked list starts and ends with a sentinel element. + # The sentinel element never gets deleted (this simplifies the algorithm). + # Each link is stored as a list of length three: [PREV, NEXT, KEY]. + + def __init__(self, *args, **kwds): + '''Initialize an ordered dictionary. Signature is the same as for + regular dictionaries, but keyword arguments are not recommended + because their insertion order is arbitrary. + + ''' + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + try: + self.__root + except AttributeError: + self.__root = root = [] # sentinel node + root[:] = [root, root, None] + self.__map = {} + self.__update(*args, **kwds) + + def __setitem__(self, key, value, dict_setitem=dict.__setitem__): + 'od.__setitem__(i, y) <==> od[i]=y' + # Setting a new item creates a new link which goes at the end of the linked + # list, and the inherited dictionary is updated with the new key/value pair. + if key not in self: + root = self.__root + last = root[0] + last[1] = root[0] = self.__map[key] = [last, root, key] + dict_setitem(self, key, value) + + def __delitem__(self, key, dict_delitem=dict.__delitem__): + 'od.__delitem__(y) <==> del od[y]' + # Deleting an existing item uses self.__map to find the link which is + # then removed by updating the links in the predecessor and successor nodes. + dict_delitem(self, key) + link_prev, link_next, key = self.__map.pop(key) + link_prev[1] = link_next + link_next[0] = link_prev + + def __iter__(self): + 'od.__iter__() <==> iter(od)' + root = self.__root + curr = root[1] + while curr is not root: + yield curr[2] + curr = curr[1] + + def __reversed__(self): + 'od.__reversed__() <==> reversed(od)' + root = self.__root + curr = root[0] + while curr is not root: + yield curr[2] + curr = curr[0] + + def clear(self): + 'od.clear() -> None. Remove all items from od.' + try: + for node in self.__map.itervalues(): + del node[:] + root = self.__root + root[:] = [root, root, None] + self.__map.clear() + except AttributeError: + pass + dict.clear(self) + + def popitem(self, last=True): + '''od.popitem() -> (k, v), return and remove a (key, value) pair. + Pairs are returned in LIFO order if last is true or FIFO order if false. + + ''' + if not self: + raise KeyError('dictionary is empty') + root = self.__root + if last: + link = root[0] + link_prev = link[0] + link_prev[1] = root + root[0] = link_prev + else: + link = root[1] + link_next = link[1] + root[1] = link_next + link_next[0] = root + key = link[2] + del self.__map[key] + value = dict.pop(self, key) + return key, value + + # -- the following methods do not depend on the internal structure -- + + def keys(self): + 'od.keys() -> list of keys in od' + return list(self) + + def values(self): + 'od.values() -> list of values in od' + return [self[key] for key in self] + + def items(self): + 'od.items() -> list of (key, value) pairs in od' + return [(key, self[key]) for key in self] + + def iterkeys(self): + 'od.iterkeys() -> an iterator over the keys in od' + return iter(self) + + def itervalues(self): + 'od.itervalues -> an iterator over the values in od' + for k in self: + yield self[k] + + def iteritems(self): + 'od.iteritems -> an iterator over the (key, value) items in od' + for k in self: + yield (k, self[k]) + + def update(*args, **kwds): + '''od.update(E, **F) -> None. Update od from dict/iterable E and F. + + If E is a dict instance, does: for k in E: od[k] = E[k] + If E has a .keys() method, does: for k in E.keys(): od[k] = E[k] + Or if E is an iterable of items, does: for k, v in E: od[k] = v + In either case, this is followed by: for k, v in F.items(): od[k] = v + + ''' + if len(args) > 2: + raise TypeError('update() takes at most 2 positional ' + 'arguments (%d given)' % (len(args),)) + elif not args: + raise TypeError('update() takes at least 1 argument (0 given)') + self = args[0] + # Make progressively weaker assumptions about "other" + other = () + if len(args) == 2: + other = args[1] + if isinstance(other, dict): + for key in other: + self[key] = other[key] + elif hasattr(other, 'keys'): + for key in other.keys(): + self[key] = other[key] + else: + for key, value in other: + self[key] = value + for key, value in kwds.items(): + self[key] = value + + __update = update # let subclasses override update without breaking __init__ + + __marker = object() + + def pop(self, key, default=__marker): + '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value. + If key is not found, d is returned if given, otherwise KeyError is raised. + + ''' + if key in self: + result = self[key] + del self[key] + return result + if default is self.__marker: + raise KeyError(key) + return default + + def setdefault(self, key, default=None): + 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' + if key in self: + return self[key] + self[key] = default + return default + + def __repr__(self, _repr_running=None): + 'od.__repr__() <==> repr(od)' + if not _repr_running: _repr_running = {} + call_key = id(self), _get_ident() + if call_key in _repr_running: + return '...' + _repr_running[call_key] = 1 + try: + if not self: + return '%s()' % (self.__class__.__name__,) + return '%s(%r)' % (self.__class__.__name__, self.items()) + finally: + del _repr_running[call_key] + + def __reduce__(self): + 'Return state information for pickling' + items = [[k, self[k]] for k in self] + inst_dict = vars(self).copy() + for k in vars(OrderedDict()): + inst_dict.pop(k, None) + if inst_dict: + return (self.__class__, (items,), inst_dict) + return self.__class__, (items,) + + def copy(self): + 'od.copy() -> a shallow copy of od' + return self.__class__(self) + + @classmethod + def fromkeys(cls, iterable, value=None): + '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S + and values equal to v (which defaults to None). + + ''' + d = cls() + for key in iterable: + d[key] = value + return d + + def __eq__(self, other): + '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive + while comparison to a regular mapping is order-insensitive. + + ''' + if isinstance(other, OrderedDict): + return len(self)==len(other) and self.items() == other.items() + return dict.__eq__(self, other) + + def __ne__(self, other): + return not self == other + + # -- the following methods are only used in Python 2.7 -- + + def viewkeys(self): + "od.viewkeys() -> a set-like object providing a view on od's keys" + return KeysView(self) + + def viewvalues(self): + "od.viewvalues() -> an object providing a view on od's values" + return ValuesView(self) + + def viewitems(self): + "od.viewitems() -> a set-like object providing a view on od's items" + return ItemsView(self) + +try: + from logging.config import BaseConfigurator, valid_ident +except ImportError: # pragma: no cover + IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I) + + + def valid_ident(s): + m = IDENTIFIER.match(s) + if not m: + raise ValueError('Not a valid Python identifier: %r' % s) + return True + + + # The ConvertingXXX classes are wrappers around standard Python containers, + # and they serve to convert any suitable values in the container. The + # conversion converts base dicts, lists and tuples to their wrapped + # equivalents, whereas strings which match a conversion format are converted + # appropriately. + # + # Each wrapper should have a configurator attribute holding the actual + # configurator to use for conversion. + + class ConvertingDict(dict): + """A converting dictionary wrapper.""" + + def __getitem__(self, key): + value = dict.__getitem__(self, key) + result = self.configurator.convert(value) + #If the converted value is different, save for next time + if value is not result: + self[key] = result + if type(result) in (ConvertingDict, ConvertingList, + ConvertingTuple): + result.parent = self + result.key = key + return result + + def get(self, key, default=None): + value = dict.get(self, key, default) + result = self.configurator.convert(value) + #If the converted value is different, save for next time + if value is not result: + self[key] = result + if type(result) in (ConvertingDict, ConvertingList, + ConvertingTuple): + result.parent = self + result.key = key + return result + + def pop(self, key, default=None): + value = dict.pop(self, key, default) + result = self.configurator.convert(value) + if value is not result: + if type(result) in (ConvertingDict, ConvertingList, + ConvertingTuple): + result.parent = self + result.key = key + return result + + class ConvertingList(list): + """A converting list wrapper.""" + def __getitem__(self, key): + value = list.__getitem__(self, key) + result = self.configurator.convert(value) + #If the converted value is different, save for next time + if value is not result: + self[key] = result + if type(result) in (ConvertingDict, ConvertingList, + ConvertingTuple): + result.parent = self + result.key = key + return result + + def pop(self, idx=-1): + value = list.pop(self, idx) + result = self.configurator.convert(value) + if value is not result: + if type(result) in (ConvertingDict, ConvertingList, + ConvertingTuple): + result.parent = self + return result + + class ConvertingTuple(tuple): + """A converting tuple wrapper.""" + def __getitem__(self, key): + value = tuple.__getitem__(self, key) + result = self.configurator.convert(value) + if value is not result: + if type(result) in (ConvertingDict, ConvertingList, + ConvertingTuple): + result.parent = self + result.key = key + return result + + class BaseConfigurator(object): + """ + The configurator base class which defines some useful defaults. + """ + + CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$') + + WORD_PATTERN = re.compile(r'^\s*(\w+)\s*') + DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*') + INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*') + DIGIT_PATTERN = re.compile(r'^\d+$') + + value_converters = { + 'ext' : 'ext_convert', + 'cfg' : 'cfg_convert', + } + + # We might want to use a different one, e.g. importlib + importer = staticmethod(__import__) + + def __init__(self, config): + self.config = ConvertingDict(config) + self.config.configurator = self + + def resolve(self, s): + """ + Resolve strings to objects using standard import and attribute + syntax. + """ + name = s.split('.') + used = name.pop(0) + try: + found = self.importer(used) + for frag in name: + used += '.' + frag + try: + found = getattr(found, frag) + except AttributeError: + self.importer(used) + found = getattr(found, frag) + return found + except ImportError: + e, tb = sys.exc_info()[1:] + v = ValueError('Cannot resolve %r: %s' % (s, e)) + v.__cause__, v.__traceback__ = e, tb + raise v + + def ext_convert(self, value): + """Default converter for the ext:// protocol.""" + return self.resolve(value) + + def cfg_convert(self, value): + """Default converter for the cfg:// protocol.""" + rest = value + m = self.WORD_PATTERN.match(rest) + if m is None: + raise ValueError("Unable to convert %r" % value) + else: + rest = rest[m.end():] + d = self.config[m.groups()[0]] + #print d, rest + while rest: + m = self.DOT_PATTERN.match(rest) + if m: + d = d[m.groups()[0]] + else: + m = self.INDEX_PATTERN.match(rest) + if m: + idx = m.groups()[0] + if not self.DIGIT_PATTERN.match(idx): + d = d[idx] + else: + try: + n = int(idx) # try as number first (most likely) + d = d[n] + except TypeError: + d = d[idx] + if m: + rest = rest[m.end():] + else: + raise ValueError('Unable to convert ' + '%r at %r' % (value, rest)) + #rest should be empty + return d + + def convert(self, value): + """ + Convert values to an appropriate type. dicts, lists and tuples are + replaced by their converting alternatives. Strings are checked to + see if they have a conversion format and are converted if they do. + """ + if not isinstance(value, ConvertingDict) and isinstance(value, dict): + value = ConvertingDict(value) + value.configurator = self + elif not isinstance(value, ConvertingList) and isinstance(value, list): + value = ConvertingList(value) + value.configurator = self + elif not isinstance(value, ConvertingTuple) and\ + isinstance(value, tuple): + value = ConvertingTuple(value) + value.configurator = self + elif isinstance(value, string_types): + m = self.CONVERT_PATTERN.match(value) + if m: + d = m.groupdict() + prefix = d['prefix'] + converter = self.value_converters.get(prefix, None) + if converter: + suffix = d['suffix'] + converter = getattr(self, converter) + value = converter(suffix) + return value + + def configure_custom(self, config): + """Configure an object with a user-supplied factory.""" + c = config.pop('()') + if not callable(c): + c = self.resolve(c) + props = config.pop('.', None) + # Check for valid identifiers + kwargs = dict([(k, config[k]) for k in config if valid_ident(k)]) + result = c(**kwargs) + if props: + for name, value in props.items(): + setattr(result, name, value) + return result + + def as_tuple(self, value): + """Utility function which converts lists to tuples.""" + if isinstance(value, list): + value = tuple(value) + return value diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyc new file mode 100644 index 0000000..6d57700 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/compat.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/database.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/database.py new file mode 100644 index 0000000..b13cdac --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/database.py @@ -0,0 +1,1339 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012-2017 The Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +"""PEP 376 implementation.""" + +from __future__ import unicode_literals + +import base64 +import codecs +import contextlib +import hashlib +import logging +import os +import posixpath +import sys +import zipimport + +from . import DistlibException, resources +from .compat import StringIO +from .version import get_scheme, UnsupportedVersionError +from .metadata import (Metadata, METADATA_FILENAME, WHEEL_METADATA_FILENAME, + LEGACY_METADATA_FILENAME) +from .util import (parse_requirement, cached_property, parse_name_and_version, + read_exports, write_exports, CSVReader, CSVWriter) + + +__all__ = ['Distribution', 'BaseInstalledDistribution', + 'InstalledDistribution', 'EggInfoDistribution', + 'DistributionPath'] + + +logger = logging.getLogger(__name__) + +EXPORTS_FILENAME = 'pydist-exports.json' +COMMANDS_FILENAME = 'pydist-commands.json' + +DIST_FILES = ('INSTALLER', METADATA_FILENAME, 'RECORD', 'REQUESTED', + 'RESOURCES', EXPORTS_FILENAME, 'SHARED') + +DISTINFO_EXT = '.dist-info' + + +class _Cache(object): + """ + A simple cache mapping names and .dist-info paths to distributions + """ + def __init__(self): + """ + Initialise an instance. There is normally one for each DistributionPath. + """ + self.name = {} + self.path = {} + self.generated = False + + def clear(self): + """ + Clear the cache, setting it to its initial state. + """ + self.name.clear() + self.path.clear() + self.generated = False + + def add(self, dist): + """ + Add a distribution to the cache. + :param dist: The distribution to add. + """ + if dist.path not in self.path: + self.path[dist.path] = dist + self.name.setdefault(dist.key, []).append(dist) + + +class DistributionPath(object): + """ + Represents a set of distributions installed on a path (typically sys.path). + """ + def __init__(self, path=None, include_egg=False): + """ + Create an instance from a path, optionally including legacy (distutils/ + setuptools/distribute) distributions. + :param path: The path to use, as a list of directories. If not specified, + sys.path is used. + :param include_egg: If True, this instance will look for and return legacy + distributions as well as those based on PEP 376. + """ + if path is None: + path = sys.path + self.path = path + self._include_dist = True + self._include_egg = include_egg + + self._cache = _Cache() + self._cache_egg = _Cache() + self._cache_enabled = True + self._scheme = get_scheme('default') + + def _get_cache_enabled(self): + return self._cache_enabled + + def _set_cache_enabled(self, value): + self._cache_enabled = value + + cache_enabled = property(_get_cache_enabled, _set_cache_enabled) + + def clear_cache(self): + """ + Clears the internal cache. + """ + self._cache.clear() + self._cache_egg.clear() + + + def _yield_distributions(self): + """ + Yield .dist-info and/or .egg(-info) distributions. + """ + # We need to check if we've seen some resources already, because on + # some Linux systems (e.g. some Debian/Ubuntu variants) there are + # symlinks which alias other files in the environment. + seen = set() + for path in self.path: + finder = resources.finder_for_path(path) + if finder is None: + continue + r = finder.find('') + if not r or not r.is_container: + continue + rset = sorted(r.resources) + for entry in rset: + r = finder.find(entry) + if not r or r.path in seen: + continue + if self._include_dist and entry.endswith(DISTINFO_EXT): + possible_filenames = [METADATA_FILENAME, + WHEEL_METADATA_FILENAME, + LEGACY_METADATA_FILENAME] + for metadata_filename in possible_filenames: + metadata_path = posixpath.join(entry, metadata_filename) + pydist = finder.find(metadata_path) + if pydist: + break + else: + continue + + with contextlib.closing(pydist.as_stream()) as stream: + metadata = Metadata(fileobj=stream, scheme='legacy') + logger.debug('Found %s', r.path) + seen.add(r.path) + yield new_dist_class(r.path, metadata=metadata, + env=self) + elif self._include_egg and entry.endswith(('.egg-info', + '.egg')): + logger.debug('Found %s', r.path) + seen.add(r.path) + yield old_dist_class(r.path, self) + + def _generate_cache(self): + """ + Scan the path for distributions and populate the cache with + those that are found. + """ + gen_dist = not self._cache.generated + gen_egg = self._include_egg and not self._cache_egg.generated + if gen_dist or gen_egg: + for dist in self._yield_distributions(): + if isinstance(dist, InstalledDistribution): + self._cache.add(dist) + else: + self._cache_egg.add(dist) + + if gen_dist: + self._cache.generated = True + if gen_egg: + self._cache_egg.generated = True + + @classmethod + def distinfo_dirname(cls, name, version): + """ + The *name* and *version* parameters are converted into their + filename-escaped form, i.e. any ``'-'`` characters are replaced + with ``'_'`` other than the one in ``'dist-info'`` and the one + separating the name from the version number. + + :parameter name: is converted to a standard distribution name by replacing + any runs of non- alphanumeric characters with a single + ``'-'``. + :type name: string + :parameter version: is converted to a standard version string. Spaces + become dots, and all other non-alphanumeric characters + (except dots) become dashes, with runs of multiple + dashes condensed to a single dash. + :type version: string + :returns: directory name + :rtype: string""" + name = name.replace('-', '_') + return '-'.join([name, version]) + DISTINFO_EXT + + def get_distributions(self): + """ + Provides an iterator that looks for distributions and returns + :class:`InstalledDistribution` or + :class:`EggInfoDistribution` instances for each one of them. + + :rtype: iterator of :class:`InstalledDistribution` and + :class:`EggInfoDistribution` instances + """ + if not self._cache_enabled: + for dist in self._yield_distributions(): + yield dist + else: + self._generate_cache() + + for dist in self._cache.path.values(): + yield dist + + if self._include_egg: + for dist in self._cache_egg.path.values(): + yield dist + + def get_distribution(self, name): + """ + Looks for a named distribution on the path. + + This function only returns the first result found, as no more than one + value is expected. If nothing is found, ``None`` is returned. + + :rtype: :class:`InstalledDistribution`, :class:`EggInfoDistribution` + or ``None`` + """ + result = None + name = name.lower() + if not self._cache_enabled: + for dist in self._yield_distributions(): + if dist.key == name: + result = dist + break + else: + self._generate_cache() + + if name in self._cache.name: + result = self._cache.name[name][0] + elif self._include_egg and name in self._cache_egg.name: + result = self._cache_egg.name[name][0] + return result + + def provides_distribution(self, name, version=None): + """ + Iterates over all distributions to find which distributions provide *name*. + If a *version* is provided, it will be used to filter the results. + + This function only returns the first result found, since no more than + one values are expected. If the directory is not found, returns ``None``. + + :parameter version: a version specifier that indicates the version + required, conforming to the format in ``PEP-345`` + + :type name: string + :type version: string + """ + matcher = None + if version is not None: + try: + matcher = self._scheme.matcher('%s (%s)' % (name, version)) + except ValueError: + raise DistlibException('invalid name or version: %r, %r' % + (name, version)) + + for dist in self.get_distributions(): + # We hit a problem on Travis where enum34 was installed and doesn't + # have a provides attribute ... + if not hasattr(dist, 'provides'): + logger.debug('No "provides": %s', dist) + else: + provided = dist.provides + + for p in provided: + p_name, p_ver = parse_name_and_version(p) + if matcher is None: + if p_name == name: + yield dist + break + else: + if p_name == name and matcher.match(p_ver): + yield dist + break + + def get_file_path(self, name, relative_path): + """ + Return the path to a resource file. + """ + dist = self.get_distribution(name) + if dist is None: + raise LookupError('no distribution named %r found' % name) + return dist.get_resource_path(relative_path) + + def get_exported_entries(self, category, name=None): + """ + Return all of the exported entries in a particular category. + + :param category: The category to search for entries. + :param name: If specified, only entries with that name are returned. + """ + for dist in self.get_distributions(): + r = dist.exports + if category in r: + d = r[category] + if name is not None: + if name in d: + yield d[name] + else: + for v in d.values(): + yield v + + +class Distribution(object): + """ + A base class for distributions, whether installed or from indexes. + Either way, it must have some metadata, so that's all that's needed + for construction. + """ + + build_time_dependency = False + """ + Set to True if it's known to be only a build-time dependency (i.e. + not needed after installation). + """ + + requested = False + """A boolean that indicates whether the ``REQUESTED`` metadata file is + present (in other words, whether the package was installed by user + request or it was installed as a dependency).""" + + def __init__(self, metadata): + """ + Initialise an instance. + :param metadata: The instance of :class:`Metadata` describing this + distribution. + """ + self.metadata = metadata + self.name = metadata.name + self.key = self.name.lower() # for case-insensitive comparisons + self.version = metadata.version + self.locator = None + self.digest = None + self.extras = None # additional features requested + self.context = None # environment marker overrides + self.download_urls = set() + self.digests = {} + + @property + def source_url(self): + """ + The source archive download URL for this distribution. + """ + return self.metadata.source_url + + download_url = source_url # Backward compatibility + + @property + def name_and_version(self): + """ + A utility property which displays the name and version in parentheses. + """ + return '%s (%s)' % (self.name, self.version) + + @property + def provides(self): + """ + A set of distribution names and versions provided by this distribution. + :return: A set of "name (version)" strings. + """ + plist = self.metadata.provides + s = '%s (%s)' % (self.name, self.version) + if s not in plist: + plist.append(s) + return plist + + def _get_requirements(self, req_attr): + md = self.metadata + logger.debug('Getting requirements from metadata %r', md.todict()) + reqts = getattr(md, req_attr) + return set(md.get_requirements(reqts, extras=self.extras, + env=self.context)) + + @property + def run_requires(self): + return self._get_requirements('run_requires') + + @property + def meta_requires(self): + return self._get_requirements('meta_requires') + + @property + def build_requires(self): + return self._get_requirements('build_requires') + + @property + def test_requires(self): + return self._get_requirements('test_requires') + + @property + def dev_requires(self): + return self._get_requirements('dev_requires') + + def matches_requirement(self, req): + """ + Say if this instance matches (fulfills) a requirement. + :param req: The requirement to match. + :rtype req: str + :return: True if it matches, else False. + """ + # Requirement may contain extras - parse to lose those + # from what's passed to the matcher + r = parse_requirement(req) + scheme = get_scheme(self.metadata.scheme) + try: + matcher = scheme.matcher(r.requirement) + except UnsupportedVersionError: + # XXX compat-mode if cannot read the version + logger.warning('could not read version %r - using name only', + req) + name = req.split()[0] + matcher = scheme.matcher(name) + + name = matcher.key # case-insensitive + + result = False + for p in self.provides: + p_name, p_ver = parse_name_and_version(p) + if p_name != name: + continue + try: + result = matcher.match(p_ver) + break + except UnsupportedVersionError: + pass + return result + + def __repr__(self): + """ + Return a textual representation of this instance, + """ + if self.source_url: + suffix = ' [%s]' % self.source_url + else: + suffix = '' + return '<Distribution %s (%s)%s>' % (self.name, self.version, suffix) + + def __eq__(self, other): + """ + See if this distribution is the same as another. + :param other: The distribution to compare with. To be equal to one + another. distributions must have the same type, name, + version and source_url. + :return: True if it is the same, else False. + """ + if type(other) is not type(self): + result = False + else: + result = (self.name == other.name and + self.version == other.version and + self.source_url == other.source_url) + return result + + def __hash__(self): + """ + Compute hash in a way which matches the equality test. + """ + return hash(self.name) + hash(self.version) + hash(self.source_url) + + +class BaseInstalledDistribution(Distribution): + """ + This is the base class for installed distributions (whether PEP 376 or + legacy). + """ + + hasher = None + + def __init__(self, metadata, path, env=None): + """ + Initialise an instance. + :param metadata: An instance of :class:`Metadata` which describes the + distribution. This will normally have been initialised + from a metadata file in the ``path``. + :param path: The path of the ``.dist-info`` or ``.egg-info`` + directory for the distribution. + :param env: This is normally the :class:`DistributionPath` + instance where this distribution was found. + """ + super(BaseInstalledDistribution, self).__init__(metadata) + self.path = path + self.dist_path = env + + def get_hash(self, data, hasher=None): + """ + Get the hash of some data, using a particular hash algorithm, if + specified. + + :param data: The data to be hashed. + :type data: bytes + :param hasher: The name of a hash implementation, supported by hashlib, + or ``None``. Examples of valid values are ``'sha1'``, + ``'sha224'``, ``'sha384'``, '``sha256'``, ``'md5'`` and + ``'sha512'``. If no hasher is specified, the ``hasher`` + attribute of the :class:`InstalledDistribution` instance + is used. If the hasher is determined to be ``None``, MD5 + is used as the hashing algorithm. + :returns: The hash of the data. If a hasher was explicitly specified, + the returned hash will be prefixed with the specified hasher + followed by '='. + :rtype: str + """ + if hasher is None: + hasher = self.hasher + if hasher is None: + hasher = hashlib.md5 + prefix = '' + else: + hasher = getattr(hashlib, hasher) + prefix = '%s=' % self.hasher + digest = hasher(data).digest() + digest = base64.urlsafe_b64encode(digest).rstrip(b'=').decode('ascii') + return '%s%s' % (prefix, digest) + + +class InstalledDistribution(BaseInstalledDistribution): + """ + Created with the *path* of the ``.dist-info`` directory provided to the + constructor. It reads the metadata contained in ``pydist.json`` when it is + instantiated., or uses a passed in Metadata instance (useful for when + dry-run mode is being used). + """ + + hasher = 'sha256' + + def __init__(self, path, metadata=None, env=None): + self.modules = [] + self.finder = finder = resources.finder_for_path(path) + if finder is None: + raise ValueError('finder unavailable for %s' % path) + if env and env._cache_enabled and path in env._cache.path: + metadata = env._cache.path[path].metadata + elif metadata is None: + r = finder.find(METADATA_FILENAME) + # Temporary - for Wheel 0.23 support + if r is None: + r = finder.find(WHEEL_METADATA_FILENAME) + # Temporary - for legacy support + if r is None: + r = finder.find('METADATA') + if r is None: + raise ValueError('no %s found in %s' % (METADATA_FILENAME, + path)) + with contextlib.closing(r.as_stream()) as stream: + metadata = Metadata(fileobj=stream, scheme='legacy') + + super(InstalledDistribution, self).__init__(metadata, path, env) + + if env and env._cache_enabled: + env._cache.add(self) + + r = finder.find('REQUESTED') + self.requested = r is not None + p = os.path.join(path, 'top_level.txt') + if os.path.exists(p): + with open(p, 'rb') as f: + data = f.read() + self.modules = data.splitlines() + + def __repr__(self): + return '<InstalledDistribution %r %s at %r>' % ( + self.name, self.version, self.path) + + def __str__(self): + return "%s %s" % (self.name, self.version) + + def _get_records(self): + """ + Get the list of installed files for the distribution + :return: A list of tuples of path, hash and size. Note that hash and + size might be ``None`` for some entries. The path is exactly + as stored in the file (which is as in PEP 376). + """ + results = [] + r = self.get_distinfo_resource('RECORD') + with contextlib.closing(r.as_stream()) as stream: + with CSVReader(stream=stream) as record_reader: + # Base location is parent dir of .dist-info dir + #base_location = os.path.dirname(self.path) + #base_location = os.path.abspath(base_location) + for row in record_reader: + missing = [None for i in range(len(row), 3)] + path, checksum, size = row + missing + #if not os.path.isabs(path): + # path = path.replace('/', os.sep) + # path = os.path.join(base_location, path) + results.append((path, checksum, size)) + return results + + @cached_property + def exports(self): + """ + Return the information exported by this distribution. + :return: A dictionary of exports, mapping an export category to a dict + of :class:`ExportEntry` instances describing the individual + export entries, and keyed by name. + """ + result = {} + r = self.get_distinfo_resource(EXPORTS_FILENAME) + if r: + result = self.read_exports() + return result + + def read_exports(self): + """ + Read exports data from a file in .ini format. + + :return: A dictionary of exports, mapping an export category to a list + of :class:`ExportEntry` instances describing the individual + export entries. + """ + result = {} + r = self.get_distinfo_resource(EXPORTS_FILENAME) + if r: + with contextlib.closing(r.as_stream()) as stream: + result = read_exports(stream) + return result + + def write_exports(self, exports): + """ + Write a dictionary of exports to a file in .ini format. + :param exports: A dictionary of exports, mapping an export category to + a list of :class:`ExportEntry` instances describing the + individual export entries. + """ + rf = self.get_distinfo_file(EXPORTS_FILENAME) + with open(rf, 'w') as f: + write_exports(exports, f) + + def get_resource_path(self, relative_path): + """ + NOTE: This API may change in the future. + + Return the absolute path to a resource file with the given relative + path. + + :param relative_path: The path, relative to .dist-info, of the resource + of interest. + :return: The absolute path where the resource is to be found. + """ + r = self.get_distinfo_resource('RESOURCES') + with contextlib.closing(r.as_stream()) as stream: + with CSVReader(stream=stream) as resources_reader: + for relative, destination in resources_reader: + if relative == relative_path: + return destination + raise KeyError('no resource file with relative path %r ' + 'is installed' % relative_path) + + def list_installed_files(self): + """ + Iterates over the ``RECORD`` entries and returns a tuple + ``(path, hash, size)`` for each line. + + :returns: iterator of (path, hash, size) + """ + for result in self._get_records(): + yield result + + def write_installed_files(self, paths, prefix, dry_run=False): + """ + Writes the ``RECORD`` file, using the ``paths`` iterable passed in. Any + existing ``RECORD`` file is silently overwritten. + + prefix is used to determine when to write absolute paths. + """ + prefix = os.path.join(prefix, '') + base = os.path.dirname(self.path) + base_under_prefix = base.startswith(prefix) + base = os.path.join(base, '') + record_path = self.get_distinfo_file('RECORD') + logger.info('creating %s', record_path) + if dry_run: + return None + with CSVWriter(record_path) as writer: + for path in paths: + if os.path.isdir(path) or path.endswith(('.pyc', '.pyo')): + # do not put size and hash, as in PEP-376 + hash_value = size = '' + else: + size = '%d' % os.path.getsize(path) + with open(path, 'rb') as fp: + hash_value = self.get_hash(fp.read()) + if path.startswith(base) or (base_under_prefix and + path.startswith(prefix)): + path = os.path.relpath(path, base) + writer.writerow((path, hash_value, size)) + + # add the RECORD file itself + if record_path.startswith(base): + record_path = os.path.relpath(record_path, base) + writer.writerow((record_path, '', '')) + return record_path + + def check_installed_files(self): + """ + Checks that the hashes and sizes of the files in ``RECORD`` are + matched by the files themselves. Returns a (possibly empty) list of + mismatches. Each entry in the mismatch list will be a tuple consisting + of the path, 'exists', 'size' or 'hash' according to what didn't match + (existence is checked first, then size, then hash), the expected + value and the actual value. + """ + mismatches = [] + base = os.path.dirname(self.path) + record_path = self.get_distinfo_file('RECORD') + for path, hash_value, size in self.list_installed_files(): + if not os.path.isabs(path): + path = os.path.join(base, path) + if path == record_path: + continue + if not os.path.exists(path): + mismatches.append((path, 'exists', True, False)) + elif os.path.isfile(path): + actual_size = str(os.path.getsize(path)) + if size and actual_size != size: + mismatches.append((path, 'size', size, actual_size)) + elif hash_value: + if '=' in hash_value: + hasher = hash_value.split('=', 1)[0] + else: + hasher = None + + with open(path, 'rb') as f: + actual_hash = self.get_hash(f.read(), hasher) + if actual_hash != hash_value: + mismatches.append((path, 'hash', hash_value, actual_hash)) + return mismatches + + @cached_property + def shared_locations(self): + """ + A dictionary of shared locations whose keys are in the set 'prefix', + 'purelib', 'platlib', 'scripts', 'headers', 'data' and 'namespace'. + The corresponding value is the absolute path of that category for + this distribution, and takes into account any paths selected by the + user at installation time (e.g. via command-line arguments). In the + case of the 'namespace' key, this would be a list of absolute paths + for the roots of namespace packages in this distribution. + + The first time this property is accessed, the relevant information is + read from the SHARED file in the .dist-info directory. + """ + result = {} + shared_path = os.path.join(self.path, 'SHARED') + if os.path.isfile(shared_path): + with codecs.open(shared_path, 'r', encoding='utf-8') as f: + lines = f.read().splitlines() + for line in lines: + key, value = line.split('=', 1) + if key == 'namespace': + result.setdefault(key, []).append(value) + else: + result[key] = value + return result + + def write_shared_locations(self, paths, dry_run=False): + """ + Write shared location information to the SHARED file in .dist-info. + :param paths: A dictionary as described in the documentation for + :meth:`shared_locations`. + :param dry_run: If True, the action is logged but no file is actually + written. + :return: The path of the file written to. + """ + shared_path = os.path.join(self.path, 'SHARED') + logger.info('creating %s', shared_path) + if dry_run: + return None + lines = [] + for key in ('prefix', 'lib', 'headers', 'scripts', 'data'): + path = paths[key] + if os.path.isdir(paths[key]): + lines.append('%s=%s' % (key, path)) + for ns in paths.get('namespace', ()): + lines.append('namespace=%s' % ns) + + with codecs.open(shared_path, 'w', encoding='utf-8') as f: + f.write('\n'.join(lines)) + return shared_path + + def get_distinfo_resource(self, path): + if path not in DIST_FILES: + raise DistlibException('invalid path for a dist-info file: ' + '%r at %r' % (path, self.path)) + finder = resources.finder_for_path(self.path) + if finder is None: + raise DistlibException('Unable to get a finder for %s' % self.path) + return finder.find(path) + + def get_distinfo_file(self, path): + """ + Returns a path located under the ``.dist-info`` directory. Returns a + string representing the path. + + :parameter path: a ``'/'``-separated path relative to the + ``.dist-info`` directory or an absolute path; + If *path* is an absolute path and doesn't start + with the ``.dist-info`` directory path, + a :class:`DistlibException` is raised + :type path: str + :rtype: str + """ + # Check if it is an absolute path # XXX use relpath, add tests + if path.find(os.sep) >= 0: + # it's an absolute path? + distinfo_dirname, path = path.split(os.sep)[-2:] + if distinfo_dirname != self.path.split(os.sep)[-1]: + raise DistlibException( + 'dist-info file %r does not belong to the %r %s ' + 'distribution' % (path, self.name, self.version)) + + # The file must be relative + if path not in DIST_FILES: + raise DistlibException('invalid path for a dist-info file: ' + '%r at %r' % (path, self.path)) + + return os.path.join(self.path, path) + + def list_distinfo_files(self): + """ + Iterates over the ``RECORD`` entries and returns paths for each line if + the path is pointing to a file located in the ``.dist-info`` directory + or one of its subdirectories. + + :returns: iterator of paths + """ + base = os.path.dirname(self.path) + for path, checksum, size in self._get_records(): + # XXX add separator or use real relpath algo + if not os.path.isabs(path): + path = os.path.join(base, path) + if path.startswith(self.path): + yield path + + def __eq__(self, other): + return (isinstance(other, InstalledDistribution) and + self.path == other.path) + + # See http://docs.python.org/reference/datamodel#object.__hash__ + __hash__ = object.__hash__ + + +class EggInfoDistribution(BaseInstalledDistribution): + """Created with the *path* of the ``.egg-info`` directory or file provided + to the constructor. It reads the metadata contained in the file itself, or + if the given path happens to be a directory, the metadata is read from the + file ``PKG-INFO`` under that directory.""" + + requested = True # as we have no way of knowing, assume it was + shared_locations = {} + + def __init__(self, path, env=None): + def set_name_and_version(s, n, v): + s.name = n + s.key = n.lower() # for case-insensitive comparisons + s.version = v + + self.path = path + self.dist_path = env + if env and env._cache_enabled and path in env._cache_egg.path: + metadata = env._cache_egg.path[path].metadata + set_name_and_version(self, metadata.name, metadata.version) + else: + metadata = self._get_metadata(path) + + # Need to be set before caching + set_name_and_version(self, metadata.name, metadata.version) + + if env and env._cache_enabled: + env._cache_egg.add(self) + super(EggInfoDistribution, self).__init__(metadata, path, env) + + def _get_metadata(self, path): + requires = None + + def parse_requires_data(data): + """Create a list of dependencies from a requires.txt file. + + *data*: the contents of a setuptools-produced requires.txt file. + """ + reqs = [] + lines = data.splitlines() + for line in lines: + line = line.strip() + if line.startswith('['): + logger.warning('Unexpected line: quitting requirement scan: %r', + line) + break + r = parse_requirement(line) + if not r: + logger.warning('Not recognised as a requirement: %r', line) + continue + if r.extras: + logger.warning('extra requirements in requires.txt are ' + 'not supported') + if not r.constraints: + reqs.append(r.name) + else: + cons = ', '.join('%s%s' % c for c in r.constraints) + reqs.append('%s (%s)' % (r.name, cons)) + return reqs + + def parse_requires_path(req_path): + """Create a list of dependencies from a requires.txt file. + + *req_path*: the path to a setuptools-produced requires.txt file. + """ + + reqs = [] + try: + with codecs.open(req_path, 'r', 'utf-8') as fp: + reqs = parse_requires_data(fp.read()) + except IOError: + pass + return reqs + + tl_path = tl_data = None + if path.endswith('.egg'): + if os.path.isdir(path): + p = os.path.join(path, 'EGG-INFO') + meta_path = os.path.join(p, 'PKG-INFO') + metadata = Metadata(path=meta_path, scheme='legacy') + req_path = os.path.join(p, 'requires.txt') + tl_path = os.path.join(p, 'top_level.txt') + requires = parse_requires_path(req_path) + else: + # FIXME handle the case where zipfile is not available + zipf = zipimport.zipimporter(path) + fileobj = StringIO( + zipf.get_data('EGG-INFO/PKG-INFO').decode('utf8')) + metadata = Metadata(fileobj=fileobj, scheme='legacy') + try: + data = zipf.get_data('EGG-INFO/requires.txt') + tl_data = zipf.get_data('EGG-INFO/top_level.txt').decode('utf-8') + requires = parse_requires_data(data.decode('utf-8')) + except IOError: + requires = None + elif path.endswith('.egg-info'): + if os.path.isdir(path): + req_path = os.path.join(path, 'requires.txt') + requires = parse_requires_path(req_path) + path = os.path.join(path, 'PKG-INFO') + tl_path = os.path.join(path, 'top_level.txt') + metadata = Metadata(path=path, scheme='legacy') + else: + raise DistlibException('path must end with .egg-info or .egg, ' + 'got %r' % path) + + if requires: + metadata.add_requirements(requires) + # look for top-level modules in top_level.txt, if present + if tl_data is None: + if tl_path is not None and os.path.exists(tl_path): + with open(tl_path, 'rb') as f: + tl_data = f.read().decode('utf-8') + if not tl_data: + tl_data = [] + else: + tl_data = tl_data.splitlines() + self.modules = tl_data + return metadata + + def __repr__(self): + return '<EggInfoDistribution %r %s at %r>' % ( + self.name, self.version, self.path) + + def __str__(self): + return "%s %s" % (self.name, self.version) + + def check_installed_files(self): + """ + Checks that the hashes and sizes of the files in ``RECORD`` are + matched by the files themselves. Returns a (possibly empty) list of + mismatches. Each entry in the mismatch list will be a tuple consisting + of the path, 'exists', 'size' or 'hash' according to what didn't match + (existence is checked first, then size, then hash), the expected + value and the actual value. + """ + mismatches = [] + record_path = os.path.join(self.path, 'installed-files.txt') + if os.path.exists(record_path): + for path, _, _ in self.list_installed_files(): + if path == record_path: + continue + if not os.path.exists(path): + mismatches.append((path, 'exists', True, False)) + return mismatches + + def list_installed_files(self): + """ + Iterates over the ``installed-files.txt`` entries and returns a tuple + ``(path, hash, size)`` for each line. + + :returns: a list of (path, hash, size) + """ + + def _md5(path): + f = open(path, 'rb') + try: + content = f.read() + finally: + f.close() + return hashlib.md5(content).hexdigest() + + def _size(path): + return os.stat(path).st_size + + record_path = os.path.join(self.path, 'installed-files.txt') + result = [] + if os.path.exists(record_path): + with codecs.open(record_path, 'r', encoding='utf-8') as f: + for line in f: + line = line.strip() + p = os.path.normpath(os.path.join(self.path, line)) + # "./" is present as a marker between installed files + # and installation metadata files + if not os.path.exists(p): + logger.warning('Non-existent file: %s', p) + if p.endswith(('.pyc', '.pyo')): + continue + #otherwise fall through and fail + if not os.path.isdir(p): + result.append((p, _md5(p), _size(p))) + result.append((record_path, None, None)) + return result + + def list_distinfo_files(self, absolute=False): + """ + Iterates over the ``installed-files.txt`` entries and returns paths for + each line if the path is pointing to a file located in the + ``.egg-info`` directory or one of its subdirectories. + + :parameter absolute: If *absolute* is ``True``, each returned path is + transformed into a local absolute path. Otherwise the + raw value from ``installed-files.txt`` is returned. + :type absolute: boolean + :returns: iterator of paths + """ + record_path = os.path.join(self.path, 'installed-files.txt') + if os.path.exists(record_path): + skip = True + with codecs.open(record_path, 'r', encoding='utf-8') as f: + for line in f: + line = line.strip() + if line == './': + skip = False + continue + if not skip: + p = os.path.normpath(os.path.join(self.path, line)) + if p.startswith(self.path): + if absolute: + yield p + else: + yield line + + def __eq__(self, other): + return (isinstance(other, EggInfoDistribution) and + self.path == other.path) + + # See http://docs.python.org/reference/datamodel#object.__hash__ + __hash__ = object.__hash__ + +new_dist_class = InstalledDistribution +old_dist_class = EggInfoDistribution + + +class DependencyGraph(object): + """ + Represents a dependency graph between distributions. + + The dependency relationships are stored in an ``adjacency_list`` that maps + distributions to a list of ``(other, label)`` tuples where ``other`` + is a distribution and the edge is labeled with ``label`` (i.e. the version + specifier, if such was provided). Also, for more efficient traversal, for + every distribution ``x``, a list of predecessors is kept in + ``reverse_list[x]``. An edge from distribution ``a`` to + distribution ``b`` means that ``a`` depends on ``b``. If any missing + dependencies are found, they are stored in ``missing``, which is a + dictionary that maps distributions to a list of requirements that were not + provided by any other distributions. + """ + + def __init__(self): + self.adjacency_list = {} + self.reverse_list = {} + self.missing = {} + + def add_distribution(self, distribution): + """Add the *distribution* to the graph. + + :type distribution: :class:`distutils2.database.InstalledDistribution` + or :class:`distutils2.database.EggInfoDistribution` + """ + self.adjacency_list[distribution] = [] + self.reverse_list[distribution] = [] + #self.missing[distribution] = [] + + def add_edge(self, x, y, label=None): + """Add an edge from distribution *x* to distribution *y* with the given + *label*. + + :type x: :class:`distutils2.database.InstalledDistribution` or + :class:`distutils2.database.EggInfoDistribution` + :type y: :class:`distutils2.database.InstalledDistribution` or + :class:`distutils2.database.EggInfoDistribution` + :type label: ``str`` or ``None`` + """ + self.adjacency_list[x].append((y, label)) + # multiple edges are allowed, so be careful + if x not in self.reverse_list[y]: + self.reverse_list[y].append(x) + + def add_missing(self, distribution, requirement): + """ + Add a missing *requirement* for the given *distribution*. + + :type distribution: :class:`distutils2.database.InstalledDistribution` + or :class:`distutils2.database.EggInfoDistribution` + :type requirement: ``str`` + """ + logger.debug('%s missing %r', distribution, requirement) + self.missing.setdefault(distribution, []).append(requirement) + + def _repr_dist(self, dist): + return '%s %s' % (dist.name, dist.version) + + def repr_node(self, dist, level=1): + """Prints only a subgraph""" + output = [self._repr_dist(dist)] + for other, label in self.adjacency_list[dist]: + dist = self._repr_dist(other) + if label is not None: + dist = '%s [%s]' % (dist, label) + output.append(' ' * level + str(dist)) + suboutput = self.repr_node(other, level + 1) + subs = suboutput.split('\n') + output.extend(subs[1:]) + return '\n'.join(output) + + def to_dot(self, f, skip_disconnected=True): + """Writes a DOT output for the graph to the provided file *f*. + + If *skip_disconnected* is set to ``True``, then all distributions + that are not dependent on any other distribution are skipped. + + :type f: has to support ``file``-like operations + :type skip_disconnected: ``bool`` + """ + disconnected = [] + + f.write("digraph dependencies {\n") + for dist, adjs in self.adjacency_list.items(): + if len(adjs) == 0 and not skip_disconnected: + disconnected.append(dist) + for other, label in adjs: + if not label is None: + f.write('"%s" -> "%s" [label="%s"]\n' % + (dist.name, other.name, label)) + else: + f.write('"%s" -> "%s"\n' % (dist.name, other.name)) + if not skip_disconnected and len(disconnected) > 0: + f.write('subgraph disconnected {\n') + f.write('label = "Disconnected"\n') + f.write('bgcolor = red\n') + + for dist in disconnected: + f.write('"%s"' % dist.name) + f.write('\n') + f.write('}\n') + f.write('}\n') + + def topological_sort(self): + """ + Perform a topological sort of the graph. + :return: A tuple, the first element of which is a topologically sorted + list of distributions, and the second element of which is a + list of distributions that cannot be sorted because they have + circular dependencies and so form a cycle. + """ + result = [] + # Make a shallow copy of the adjacency list + alist = {} + for k, v in self.adjacency_list.items(): + alist[k] = v[:] + while True: + # See what we can remove in this run + to_remove = [] + for k, v in list(alist.items())[:]: + if not v: + to_remove.append(k) + del alist[k] + if not to_remove: + # What's left in alist (if anything) is a cycle. + break + # Remove from the adjacency list of others + for k, v in alist.items(): + alist[k] = [(d, r) for d, r in v if d not in to_remove] + logger.debug('Moving to result: %s', + ['%s (%s)' % (d.name, d.version) for d in to_remove]) + result.extend(to_remove) + return result, list(alist.keys()) + + def __repr__(self): + """Representation of the graph""" + output = [] + for dist, adjs in self.adjacency_list.items(): + output.append(self.repr_node(dist)) + return '\n'.join(output) + + +def make_graph(dists, scheme='default'): + """Makes a dependency graph from the given distributions. + + :parameter dists: a list of distributions + :type dists: list of :class:`distutils2.database.InstalledDistribution` and + :class:`distutils2.database.EggInfoDistribution` instances + :rtype: a :class:`DependencyGraph` instance + """ + scheme = get_scheme(scheme) + graph = DependencyGraph() + provided = {} # maps names to lists of (version, dist) tuples + + # first, build the graph and find out what's provided + for dist in dists: + graph.add_distribution(dist) + + for p in dist.provides: + name, version = parse_name_and_version(p) + logger.debug('Add to provided: %s, %s, %s', name, version, dist) + provided.setdefault(name, []).append((version, dist)) + + # now make the edges + for dist in dists: + requires = (dist.run_requires | dist.meta_requires | + dist.build_requires | dist.dev_requires) + for req in requires: + try: + matcher = scheme.matcher(req) + except UnsupportedVersionError: + # XXX compat-mode if cannot read the version + logger.warning('could not read version %r - using name only', + req) + name = req.split()[0] + matcher = scheme.matcher(name) + + name = matcher.key # case-insensitive + + matched = False + if name in provided: + for version, provider in provided[name]: + try: + match = matcher.match(version) + except UnsupportedVersionError: + match = False + + if match: + graph.add_edge(dist, provider, req) + matched = True + break + if not matched: + graph.add_missing(dist, req) + return graph + + +def get_dependent_dists(dists, dist): + """Recursively generate a list of distributions from *dists* that are + dependent on *dist*. + + :param dists: a list of distributions + :param dist: a distribution, member of *dists* for which we are interested + """ + if dist not in dists: + raise DistlibException('given distribution %r is not a member ' + 'of the list' % dist.name) + graph = make_graph(dists) + + dep = [dist] # dependent distributions + todo = graph.reverse_list[dist] # list of nodes we should inspect + + while todo: + d = todo.pop() + dep.append(d) + for succ in graph.reverse_list[d]: + if succ not in dep: + todo.append(succ) + + dep.pop(0) # remove dist from dep, was there to prevent infinite loops + return dep + + +def get_required_dists(dists, dist): + """Recursively generate a list of distributions from *dists* that are + required by *dist*. + + :param dists: a list of distributions + :param dist: a distribution, member of *dists* for which we are interested + """ + if dist not in dists: + raise DistlibException('given distribution %r is not a member ' + 'of the list' % dist.name) + graph = make_graph(dists) + + req = [] # required distributions + todo = graph.adjacency_list[dist] # list of nodes we should inspect + + while todo: + d = todo.pop()[0] + req.append(d) + for pred in graph.adjacency_list[d]: + if pred not in req: + todo.append(pred) + + return req + + +def make_dist(name, version, **kwargs): + """ + A convenience method for making a dist given just a name and version. + """ + summary = kwargs.pop('summary', 'Placeholder for summary') + md = Metadata(**kwargs) + md.name = name + md.version = version + md.summary = summary or 'Placeholder for summary' + return Distribution(md) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyc new file mode 100644 index 0000000..41edc49 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/database.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/index.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/index.py new file mode 100644 index 0000000..2406be2 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/index.py @@ -0,0 +1,516 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2013 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +import hashlib +import logging +import os +import shutil +import subprocess +import tempfile +try: + from threading import Thread +except ImportError: + from dummy_threading import Thread + +from . import DistlibException +from .compat import (HTTPBasicAuthHandler, Request, HTTPPasswordMgr, + urlparse, build_opener, string_types) +from .util import cached_property, zip_dir, ServerProxy + +logger = logging.getLogger(__name__) + +DEFAULT_INDEX = 'https://pypi.python.org/pypi' +DEFAULT_REALM = 'pypi' + +class PackageIndex(object): + """ + This class represents a package index compatible with PyPI, the Python + Package Index. + """ + + boundary = b'----------ThIs_Is_tHe_distlib_index_bouNdaRY_$' + + def __init__(self, url=None): + """ + Initialise an instance. + + :param url: The URL of the index. If not specified, the URL for PyPI is + used. + """ + self.url = url or DEFAULT_INDEX + self.read_configuration() + scheme, netloc, path, params, query, frag = urlparse(self.url) + if params or query or frag or scheme not in ('http', 'https'): + raise DistlibException('invalid repository: %s' % self.url) + self.password_handler = None + self.ssl_verifier = None + self.gpg = None + self.gpg_home = None + with open(os.devnull, 'w') as sink: + # Use gpg by default rather than gpg2, as gpg2 insists on + # prompting for passwords + for s in ('gpg', 'gpg2'): + try: + rc = subprocess.check_call([s, '--version'], stdout=sink, + stderr=sink) + if rc == 0: + self.gpg = s + break + except OSError: + pass + + def _get_pypirc_command(self): + """ + Get the distutils command for interacting with PyPI configurations. + :return: the command. + """ + from distutils.core import Distribution + from distutils.config import PyPIRCCommand + d = Distribution() + return PyPIRCCommand(d) + + def read_configuration(self): + """ + Read the PyPI access configuration as supported by distutils, getting + PyPI to do the actual work. This populates ``username``, ``password``, + ``realm`` and ``url`` attributes from the configuration. + """ + # get distutils to do the work + c = self._get_pypirc_command() + c.repository = self.url + cfg = c._read_pypirc() + self.username = cfg.get('username') + self.password = cfg.get('password') + self.realm = cfg.get('realm', 'pypi') + self.url = cfg.get('repository', self.url) + + def save_configuration(self): + """ + Save the PyPI access configuration. You must have set ``username`` and + ``password`` attributes before calling this method. + + Again, distutils is used to do the actual work. + """ + self.check_credentials() + # get distutils to do the work + c = self._get_pypirc_command() + c._store_pypirc(self.username, self.password) + + def check_credentials(self): + """ + Check that ``username`` and ``password`` have been set, and raise an + exception if not. + """ + if self.username is None or self.password is None: + raise DistlibException('username and password must be set') + pm = HTTPPasswordMgr() + _, netloc, _, _, _, _ = urlparse(self.url) + pm.add_password(self.realm, netloc, self.username, self.password) + self.password_handler = HTTPBasicAuthHandler(pm) + + def register(self, metadata): + """ + Register a distribution on PyPI, using the provided metadata. + + :param metadata: A :class:`Metadata` instance defining at least a name + and version number for the distribution to be + registered. + :return: The HTTP response received from PyPI upon submission of the + request. + """ + self.check_credentials() + metadata.validate() + d = metadata.todict() + d[':action'] = 'verify' + request = self.encode_request(d.items(), []) + response = self.send_request(request) + d[':action'] = 'submit' + request = self.encode_request(d.items(), []) + return self.send_request(request) + + def _reader(self, name, stream, outbuf): + """ + Thread runner for reading lines of from a subprocess into a buffer. + + :param name: The logical name of the stream (used for logging only). + :param stream: The stream to read from. This will typically a pipe + connected to the output stream of a subprocess. + :param outbuf: The list to append the read lines to. + """ + while True: + s = stream.readline() + if not s: + break + s = s.decode('utf-8').rstrip() + outbuf.append(s) + logger.debug('%s: %s' % (name, s)) + stream.close() + + def get_sign_command(self, filename, signer, sign_password, + keystore=None): + """ + Return a suitable command for signing a file. + + :param filename: The pathname to the file to be signed. + :param signer: The identifier of the signer of the file. + :param sign_password: The passphrase for the signer's + private key used for signing. + :param keystore: The path to a directory which contains the keys + used in verification. If not specified, the + instance's ``gpg_home`` attribute is used instead. + :return: The signing command as a list suitable to be + passed to :class:`subprocess.Popen`. + """ + cmd = [self.gpg, '--status-fd', '2', '--no-tty'] + if keystore is None: + keystore = self.gpg_home + if keystore: + cmd.extend(['--homedir', keystore]) + if sign_password is not None: + cmd.extend(['--batch', '--passphrase-fd', '0']) + td = tempfile.mkdtemp() + sf = os.path.join(td, os.path.basename(filename) + '.asc') + cmd.extend(['--detach-sign', '--armor', '--local-user', + signer, '--output', sf, filename]) + logger.debug('invoking: %s', ' '.join(cmd)) + return cmd, sf + + def run_command(self, cmd, input_data=None): + """ + Run a command in a child process , passing it any input data specified. + + :param cmd: The command to run. + :param input_data: If specified, this must be a byte string containing + data to be sent to the child process. + :return: A tuple consisting of the subprocess' exit code, a list of + lines read from the subprocess' ``stdout``, and a list of + lines read from the subprocess' ``stderr``. + """ + kwargs = { + 'stdout': subprocess.PIPE, + 'stderr': subprocess.PIPE, + } + if input_data is not None: + kwargs['stdin'] = subprocess.PIPE + stdout = [] + stderr = [] + p = subprocess.Popen(cmd, **kwargs) + # We don't use communicate() here because we may need to + # get clever with interacting with the command + t1 = Thread(target=self._reader, args=('stdout', p.stdout, stdout)) + t1.start() + t2 = Thread(target=self._reader, args=('stderr', p.stderr, stderr)) + t2.start() + if input_data is not None: + p.stdin.write(input_data) + p.stdin.close() + + p.wait() + t1.join() + t2.join() + return p.returncode, stdout, stderr + + def sign_file(self, filename, signer, sign_password, keystore=None): + """ + Sign a file. + + :param filename: The pathname to the file to be signed. + :param signer: The identifier of the signer of the file. + :param sign_password: The passphrase for the signer's + private key used for signing. + :param keystore: The path to a directory which contains the keys + used in signing. If not specified, the instance's + ``gpg_home`` attribute is used instead. + :return: The absolute pathname of the file where the signature is + stored. + """ + cmd, sig_file = self.get_sign_command(filename, signer, sign_password, + keystore) + rc, stdout, stderr = self.run_command(cmd, + sign_password.encode('utf-8')) + if rc != 0: + raise DistlibException('sign command failed with error ' + 'code %s' % rc) + return sig_file + + def upload_file(self, metadata, filename, signer=None, sign_password=None, + filetype='sdist', pyversion='source', keystore=None): + """ + Upload a release file to the index. + + :param metadata: A :class:`Metadata` instance defining at least a name + and version number for the file to be uploaded. + :param filename: The pathname of the file to be uploaded. + :param signer: The identifier of the signer of the file. + :param sign_password: The passphrase for the signer's + private key used for signing. + :param filetype: The type of the file being uploaded. This is the + distutils command which produced that file, e.g. + ``sdist`` or ``bdist_wheel``. + :param pyversion: The version of Python which the release relates + to. For code compatible with any Python, this would + be ``source``, otherwise it would be e.g. ``3.2``. + :param keystore: The path to a directory which contains the keys + used in signing. If not specified, the instance's + ``gpg_home`` attribute is used instead. + :return: The HTTP response received from PyPI upon submission of the + request. + """ + self.check_credentials() + if not os.path.exists(filename): + raise DistlibException('not found: %s' % filename) + metadata.validate() + d = metadata.todict() + sig_file = None + if signer: + if not self.gpg: + logger.warning('no signing program available - not signed') + else: + sig_file = self.sign_file(filename, signer, sign_password, + keystore) + with open(filename, 'rb') as f: + file_data = f.read() + md5_digest = hashlib.md5(file_data).hexdigest() + sha256_digest = hashlib.sha256(file_data).hexdigest() + d.update({ + ':action': 'file_upload', + 'protocol_version': '1', + 'filetype': filetype, + 'pyversion': pyversion, + 'md5_digest': md5_digest, + 'sha256_digest': sha256_digest, + }) + files = [('content', os.path.basename(filename), file_data)] + if sig_file: + with open(sig_file, 'rb') as f: + sig_data = f.read() + files.append(('gpg_signature', os.path.basename(sig_file), + sig_data)) + shutil.rmtree(os.path.dirname(sig_file)) + request = self.encode_request(d.items(), files) + return self.send_request(request) + + def upload_documentation(self, metadata, doc_dir): + """ + Upload documentation to the index. + + :param metadata: A :class:`Metadata` instance defining at least a name + and version number for the documentation to be + uploaded. + :param doc_dir: The pathname of the directory which contains the + documentation. This should be the directory that + contains the ``index.html`` for the documentation. + :return: The HTTP response received from PyPI upon submission of the + request. + """ + self.check_credentials() + if not os.path.isdir(doc_dir): + raise DistlibException('not a directory: %r' % doc_dir) + fn = os.path.join(doc_dir, 'index.html') + if not os.path.exists(fn): + raise DistlibException('not found: %r' % fn) + metadata.validate() + name, version = metadata.name, metadata.version + zip_data = zip_dir(doc_dir).getvalue() + fields = [(':action', 'doc_upload'), + ('name', name), ('version', version)] + files = [('content', name, zip_data)] + request = self.encode_request(fields, files) + return self.send_request(request) + + def get_verify_command(self, signature_filename, data_filename, + keystore=None): + """ + Return a suitable command for verifying a file. + + :param signature_filename: The pathname to the file containing the + signature. + :param data_filename: The pathname to the file containing the + signed data. + :param keystore: The path to a directory which contains the keys + used in verification. If not specified, the + instance's ``gpg_home`` attribute is used instead. + :return: The verifying command as a list suitable to be + passed to :class:`subprocess.Popen`. + """ + cmd = [self.gpg, '--status-fd', '2', '--no-tty'] + if keystore is None: + keystore = self.gpg_home + if keystore: + cmd.extend(['--homedir', keystore]) + cmd.extend(['--verify', signature_filename, data_filename]) + logger.debug('invoking: %s', ' '.join(cmd)) + return cmd + + def verify_signature(self, signature_filename, data_filename, + keystore=None): + """ + Verify a signature for a file. + + :param signature_filename: The pathname to the file containing the + signature. + :param data_filename: The pathname to the file containing the + signed data. + :param keystore: The path to a directory which contains the keys + used in verification. If not specified, the + instance's ``gpg_home`` attribute is used instead. + :return: True if the signature was verified, else False. + """ + if not self.gpg: + raise DistlibException('verification unavailable because gpg ' + 'unavailable') + cmd = self.get_verify_command(signature_filename, data_filename, + keystore) + rc, stdout, stderr = self.run_command(cmd) + if rc not in (0, 1): + raise DistlibException('verify command failed with error ' + 'code %s' % rc) + return rc == 0 + + def download_file(self, url, destfile, digest=None, reporthook=None): + """ + This is a convenience method for downloading a file from an URL. + Normally, this will be a file from the index, though currently + no check is made for this (i.e. a file can be downloaded from + anywhere). + + The method is just like the :func:`urlretrieve` function in the + standard library, except that it allows digest computation to be + done during download and checking that the downloaded data + matched any expected value. + + :param url: The URL of the file to be downloaded (assumed to be + available via an HTTP GET request). + :param destfile: The pathname where the downloaded file is to be + saved. + :param digest: If specified, this must be a (hasher, value) + tuple, where hasher is the algorithm used (e.g. + ``'md5'``) and ``value`` is the expected value. + :param reporthook: The same as for :func:`urlretrieve` in the + standard library. + """ + if digest is None: + digester = None + logger.debug('No digest specified') + else: + if isinstance(digest, (list, tuple)): + hasher, digest = digest + else: + hasher = 'md5' + digester = getattr(hashlib, hasher)() + logger.debug('Digest specified: %s' % digest) + # The following code is equivalent to urlretrieve. + # We need to do it this way so that we can compute the + # digest of the file as we go. + with open(destfile, 'wb') as dfp: + # addinfourl is not a context manager on 2.x + # so we have to use try/finally + sfp = self.send_request(Request(url)) + try: + headers = sfp.info() + blocksize = 8192 + size = -1 + read = 0 + blocknum = 0 + if "content-length" in headers: + size = int(headers["Content-Length"]) + if reporthook: + reporthook(blocknum, blocksize, size) + while True: + block = sfp.read(blocksize) + if not block: + break + read += len(block) + dfp.write(block) + if digester: + digester.update(block) + blocknum += 1 + if reporthook: + reporthook(blocknum, blocksize, size) + finally: + sfp.close() + + # check that we got the whole file, if we can + if size >= 0 and read < size: + raise DistlibException( + 'retrieval incomplete: got only %d out of %d bytes' + % (read, size)) + # if we have a digest, it must match. + if digester: + actual = digester.hexdigest() + if digest != actual: + raise DistlibException('%s digest mismatch for %s: expected ' + '%s, got %s' % (hasher, destfile, + digest, actual)) + logger.debug('Digest verified: %s', digest) + + def send_request(self, req): + """ + Send a standard library :class:`Request` to PyPI and return its + response. + + :param req: The request to send. + :return: The HTTP response from PyPI (a standard library HTTPResponse). + """ + handlers = [] + if self.password_handler: + handlers.append(self.password_handler) + if self.ssl_verifier: + handlers.append(self.ssl_verifier) + opener = build_opener(*handlers) + return opener.open(req) + + def encode_request(self, fields, files): + """ + Encode fields and files for posting to an HTTP server. + + :param fields: The fields to send as a list of (fieldname, value) + tuples. + :param files: The files to send as a list of (fieldname, filename, + file_bytes) tuple. + """ + # Adapted from packaging, which in turn was adapted from + # http://code.activestate.com/recipes/146306 + + parts = [] + boundary = self.boundary + for k, values in fields: + if not isinstance(values, (list, tuple)): + values = [values] + + for v in values: + parts.extend(( + b'--' + boundary, + ('Content-Disposition: form-data; name="%s"' % + k).encode('utf-8'), + b'', + v.encode('utf-8'))) + for key, filename, value in files: + parts.extend(( + b'--' + boundary, + ('Content-Disposition: form-data; name="%s"; filename="%s"' % + (key, filename)).encode('utf-8'), + b'', + value)) + + parts.extend((b'--' + boundary + b'--', b'')) + + body = b'\r\n'.join(parts) + ct = b'multipart/form-data; boundary=' + boundary + headers = { + 'Content-type': ct, + 'Content-length': str(len(body)) + } + return Request(self.url, body, headers) + + def search(self, terms, operator=None): + if isinstance(terms, string_types): + terms = {'name': terms} + rpc_proxy = ServerProxy(self.url, timeout=3.0) + try: + return rpc_proxy.search(terms, operator or 'and') + finally: + rpc_proxy('close')() diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/index.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/index.pyc new file mode 100644 index 0000000..a118a1f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/index.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/locators.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/locators.py new file mode 100644 index 0000000..5c655c3 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/locators.py @@ -0,0 +1,1295 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012-2015 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# + +import gzip +from io import BytesIO +import json +import logging +import os +import posixpath +import re +try: + import threading +except ImportError: # pragma: no cover + import dummy_threading as threading +import zlib + +from . import DistlibException +from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url, + queue, quote, unescape, string_types, build_opener, + HTTPRedirectHandler as BaseRedirectHandler, text_type, + Request, HTTPError, URLError) +from .database import Distribution, DistributionPath, make_dist +from .metadata import Metadata, MetadataInvalidError +from .util import (cached_property, parse_credentials, ensure_slash, + split_filename, get_project_data, parse_requirement, + parse_name_and_version, ServerProxy, normalize_name) +from .version import get_scheme, UnsupportedVersionError +from .wheel import Wheel, is_compatible + +logger = logging.getLogger(__name__) + +HASHER_HASH = re.compile(r'^(\w+)=([a-f0-9]+)') +CHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I) +HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml') +DEFAULT_INDEX = 'https://pypi.python.org/pypi' + +def get_all_distribution_names(url=None): + """ + Return all distribution names known by an index. + :param url: The URL of the index. + :return: A list of all known distribution names. + """ + if url is None: + url = DEFAULT_INDEX + client = ServerProxy(url, timeout=3.0) + try: + return client.list_packages() + finally: + client('close')() + +class RedirectHandler(BaseRedirectHandler): + """ + A class to work around a bug in some Python 3.2.x releases. + """ + # There's a bug in the base version for some 3.2.x + # (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header + # returns e.g. /abc, it bails because it says the scheme '' + # is bogus, when actually it should use the request's + # URL for the scheme. See Python issue #13696. + def http_error_302(self, req, fp, code, msg, headers): + # Some servers (incorrectly) return multiple Location headers + # (so probably same goes for URI). Use first header. + newurl = None + for key in ('location', 'uri'): + if key in headers: + newurl = headers[key] + break + if newurl is None: # pragma: no cover + return + urlparts = urlparse(newurl) + if urlparts.scheme == '': + newurl = urljoin(req.get_full_url(), newurl) + if hasattr(headers, 'replace_header'): + headers.replace_header(key, newurl) + else: + headers[key] = newurl + return BaseRedirectHandler.http_error_302(self, req, fp, code, msg, + headers) + + http_error_301 = http_error_303 = http_error_307 = http_error_302 + +class Locator(object): + """ + A base class for locators - things that locate distributions. + """ + source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz') + binary_extensions = ('.egg', '.exe', '.whl') + excluded_extensions = ('.pdf',) + + # A list of tags indicating which wheels you want to match. The default + # value of None matches against the tags compatible with the running + # Python. If you want to match other values, set wheel_tags on a locator + # instance to a list of tuples (pyver, abi, arch) which you want to match. + wheel_tags = None + + downloadable_extensions = source_extensions + ('.whl',) + + def __init__(self, scheme='default'): + """ + Initialise an instance. + :param scheme: Because locators look for most recent versions, they + need to know the version scheme to use. This specifies + the current PEP-recommended scheme - use ``'legacy'`` + if you need to support existing distributions on PyPI. + """ + self._cache = {} + self.scheme = scheme + # Because of bugs in some of the handlers on some of the platforms, + # we use our own opener rather than just using urlopen. + self.opener = build_opener(RedirectHandler()) + # If get_project() is called from locate(), the matcher instance + # is set from the requirement passed to locate(). See issue #18 for + # why this can be useful to know. + self.matcher = None + self.errors = queue.Queue() + + def get_errors(self): + """ + Return any errors which have occurred. + """ + result = [] + while not self.errors.empty(): # pragma: no cover + try: + e = self.errors.get(False) + result.append(e) + except self.errors.Empty: + continue + self.errors.task_done() + return result + + def clear_errors(self): + """ + Clear any errors which may have been logged. + """ + # Just get the errors and throw them away + self.get_errors() + + def clear_cache(self): + self._cache.clear() + + def _get_scheme(self): + return self._scheme + + def _set_scheme(self, value): + self._scheme = value + + scheme = property(_get_scheme, _set_scheme) + + def _get_project(self, name): + """ + For a given project, get a dictionary mapping available versions to Distribution + instances. + + This should be implemented in subclasses. + + If called from a locate() request, self.matcher will be set to a + matcher for the requirement to satisfy, otherwise it will be None. + """ + raise NotImplementedError('Please implement in the subclass') + + def get_distribution_names(self): + """ + Return all the distribution names known to this locator. + """ + raise NotImplementedError('Please implement in the subclass') + + def get_project(self, name): + """ + For a given project, get a dictionary mapping available versions to Distribution + instances. + + This calls _get_project to do all the work, and just implements a caching layer on top. + """ + if self._cache is None: # pragma: no cover + result = self._get_project(name) + elif name in self._cache: + result = self._cache[name] + else: + self.clear_errors() + result = self._get_project(name) + self._cache[name] = result + return result + + def score_url(self, url): + """ + Give an url a score which can be used to choose preferred URLs + for a given project release. + """ + t = urlparse(url) + basename = posixpath.basename(t.path) + compatible = True + is_wheel = basename.endswith('.whl') + is_downloadable = basename.endswith(self.downloadable_extensions) + if is_wheel: + compatible = is_compatible(Wheel(basename), self.wheel_tags) + return (t.scheme == 'https', 'pypi.python.org' in t.netloc, + is_downloadable, is_wheel, compatible, basename) + + def prefer_url(self, url1, url2): + """ + Choose one of two URLs where both are candidates for distribution + archives for the same version of a distribution (for example, + .tar.gz vs. zip). + + The current implementation favours https:// URLs over http://, archives + from PyPI over those from other locations, wheel compatibility (if a + wheel) and then the archive name. + """ + result = url2 + if url1: + s1 = self.score_url(url1) + s2 = self.score_url(url2) + if s1 > s2: + result = url1 + if result != url2: + logger.debug('Not replacing %r with %r', url1, url2) + else: + logger.debug('Replacing %r with %r', url1, url2) + return result + + def split_filename(self, filename, project_name): + """ + Attempt to split a filename in project name, version and Python version. + """ + return split_filename(filename, project_name) + + def convert_url_to_download_info(self, url, project_name): + """ + See if a URL is a candidate for a download URL for a project (the URL + has typically been scraped from an HTML page). + + If it is, a dictionary is returned with keys "name", "version", + "filename" and "url"; otherwise, None is returned. + """ + def same_project(name1, name2): + return normalize_name(name1) == normalize_name(name2) + + result = None + scheme, netloc, path, params, query, frag = urlparse(url) + if frag.lower().startswith('egg='): # pragma: no cover + logger.debug('%s: version hint in fragment: %r', + project_name, frag) + m = HASHER_HASH.match(frag) + if m: + algo, digest = m.groups() + else: + algo, digest = None, None + origpath = path + if path and path[-1] == '/': # pragma: no cover + path = path[:-1] + if path.endswith('.whl'): + try: + wheel = Wheel(path) + if not is_compatible(wheel, self.wheel_tags): + logger.debug('Wheel not compatible: %s', path) + else: + if project_name is None: + include = True + else: + include = same_project(wheel.name, project_name) + if include: + result = { + 'name': wheel.name, + 'version': wheel.version, + 'filename': wheel.filename, + 'url': urlunparse((scheme, netloc, origpath, + params, query, '')), + 'python-version': ', '.join( + ['.'.join(list(v[2:])) for v in wheel.pyver]), + } + except Exception as e: # pragma: no cover + logger.warning('invalid path for wheel: %s', path) + elif not path.endswith(self.downloadable_extensions): # pragma: no cover + logger.debug('Not downloadable: %s', path) + else: # downloadable extension + path = filename = posixpath.basename(path) + for ext in self.downloadable_extensions: + if path.endswith(ext): + path = path[:-len(ext)] + t = self.split_filename(path, project_name) + if not t: # pragma: no cover + logger.debug('No match for project/version: %s', path) + else: + name, version, pyver = t + if not project_name or same_project(project_name, name): + result = { + 'name': name, + 'version': version, + 'filename': filename, + 'url': urlunparse((scheme, netloc, origpath, + params, query, '')), + #'packagetype': 'sdist', + } + if pyver: # pragma: no cover + result['python-version'] = pyver + break + if result and algo: + result['%s_digest' % algo] = digest + return result + + def _get_digest(self, info): + """ + Get a digest from a dictionary by looking at keys of the form + 'algo_digest'. + + Returns a 2-tuple (algo, digest) if found, else None. Currently + looks only for SHA256, then MD5. + """ + result = None + for algo in ('sha256', 'md5'): + key = '%s_digest' % algo + if key in info: + result = (algo, info[key]) + break + return result + + def _update_version_data(self, result, info): + """ + Update a result dictionary (the final result from _get_project) with a + dictionary for a specific version, which typically holds information + gleaned from a filename or URL for an archive for the distribution. + """ + name = info.pop('name') + version = info.pop('version') + if version in result: + dist = result[version] + md = dist.metadata + else: + dist = make_dist(name, version, scheme=self.scheme) + md = dist.metadata + dist.digest = digest = self._get_digest(info) + url = info['url'] + result['digests'][url] = digest + if md.source_url != info['url']: + md.source_url = self.prefer_url(md.source_url, url) + result['urls'].setdefault(version, set()).add(url) + dist.locator = self + result[version] = dist + + def locate(self, requirement, prereleases=False): + """ + Find the most recent distribution which matches the given + requirement. + + :param requirement: A requirement of the form 'foo (1.0)' or perhaps + 'foo (>= 1.0, < 2.0, != 1.3)' + :param prereleases: If ``True``, allow pre-release versions + to be located. Otherwise, pre-release versions + are not returned. + :return: A :class:`Distribution` instance, or ``None`` if no such + distribution could be located. + """ + result = None + r = parse_requirement(requirement) + if r is None: # pragma: no cover + raise DistlibException('Not a valid requirement: %r' % requirement) + scheme = get_scheme(self.scheme) + self.matcher = matcher = scheme.matcher(r.requirement) + logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__) + versions = self.get_project(r.name) + if len(versions) > 2: # urls and digests keys are present + # sometimes, versions are invalid + slist = [] + vcls = matcher.version_class + for k in versions: + if k in ('urls', 'digests'): + continue + try: + if not matcher.match(k): + logger.debug('%s did not match %r', matcher, k) + else: + if prereleases or not vcls(k).is_prerelease: + slist.append(k) + else: + logger.debug('skipping pre-release ' + 'version %s of %s', k, matcher.name) + except Exception: # pragma: no cover + logger.warning('error matching %s with %r', matcher, k) + pass # slist.append(k) + if len(slist) > 1: + slist = sorted(slist, key=scheme.key) + if slist: + logger.debug('sorted list: %s', slist) + version = slist[-1] + result = versions[version] + if result: + if r.extras: + result.extras = r.extras + result.download_urls = versions.get('urls', {}).get(version, set()) + d = {} + sd = versions.get('digests', {}) + for url in result.download_urls: + if url in sd: # pragma: no cover + d[url] = sd[url] + result.digests = d + self.matcher = None + return result + + +class PyPIRPCLocator(Locator): + """ + This locator uses XML-RPC to locate distributions. It therefore + cannot be used with simple mirrors (that only mirror file content). + """ + def __init__(self, url, **kwargs): + """ + Initialise an instance. + + :param url: The URL to use for XML-RPC. + :param kwargs: Passed to the superclass constructor. + """ + super(PyPIRPCLocator, self).__init__(**kwargs) + self.base_url = url + self.client = ServerProxy(url, timeout=3.0) + + def get_distribution_names(self): + """ + Return all the distribution names known to this locator. + """ + return set(self.client.list_packages()) + + def _get_project(self, name): + result = {'urls': {}, 'digests': {}} + versions = self.client.package_releases(name, True) + for v in versions: + urls = self.client.release_urls(name, v) + data = self.client.release_data(name, v) + metadata = Metadata(scheme=self.scheme) + metadata.name = data['name'] + metadata.version = data['version'] + metadata.license = data.get('license') + metadata.keywords = data.get('keywords', []) + metadata.summary = data.get('summary') + dist = Distribution(metadata) + if urls: + info = urls[0] + metadata.source_url = info['url'] + dist.digest = self._get_digest(info) + dist.locator = self + result[v] = dist + for info in urls: + url = info['url'] + digest = self._get_digest(info) + result['urls'].setdefault(v, set()).add(url) + result['digests'][url] = digest + return result + +class PyPIJSONLocator(Locator): + """ + This locator uses PyPI's JSON interface. It's very limited in functionality + and probably not worth using. + """ + def __init__(self, url, **kwargs): + super(PyPIJSONLocator, self).__init__(**kwargs) + self.base_url = ensure_slash(url) + + def get_distribution_names(self): + """ + Return all the distribution names known to this locator. + """ + raise NotImplementedError('Not available from this locator') + + def _get_project(self, name): + result = {'urls': {}, 'digests': {}} + url = urljoin(self.base_url, '%s/json' % quote(name)) + try: + resp = self.opener.open(url) + data = resp.read().decode() # for now + d = json.loads(data) + md = Metadata(scheme=self.scheme) + data = d['info'] + md.name = data['name'] + md.version = data['version'] + md.license = data.get('license') + md.keywords = data.get('keywords', []) + md.summary = data.get('summary') + dist = Distribution(md) + dist.locator = self + urls = d['urls'] + result[md.version] = dist + for info in d['urls']: + url = info['url'] + dist.download_urls.add(url) + dist.digests[url] = self._get_digest(info) + result['urls'].setdefault(md.version, set()).add(url) + result['digests'][url] = self._get_digest(info) + # Now get other releases + for version, infos in d['releases'].items(): + if version == md.version: + continue # already done + omd = Metadata(scheme=self.scheme) + omd.name = md.name + omd.version = version + odist = Distribution(omd) + odist.locator = self + result[version] = odist + for info in infos: + url = info['url'] + odist.download_urls.add(url) + odist.digests[url] = self._get_digest(info) + result['urls'].setdefault(version, set()).add(url) + result['digests'][url] = self._get_digest(info) +# for info in urls: +# md.source_url = info['url'] +# dist.digest = self._get_digest(info) +# dist.locator = self +# for info in urls: +# url = info['url'] +# result['urls'].setdefault(md.version, set()).add(url) +# result['digests'][url] = self._get_digest(info) + except Exception as e: + self.errors.put(text_type(e)) + logger.exception('JSON fetch failed: %s', e) + return result + + +class Page(object): + """ + This class represents a scraped HTML page. + """ + # The following slightly hairy-looking regex just looks for the contents of + # an anchor link, which has an attribute "href" either immediately preceded + # or immediately followed by a "rel" attribute. The attribute values can be + # declared with double quotes, single quotes or no quotes - which leads to + # the length of the expression. + _href = re.compile(""" +(rel\\s*=\\s*(?:"(?P<rel1>[^"]*)"|'(?P<rel2>[^']*)'|(?P<rel3>[^>\\s\n]*))\\s+)? +href\\s*=\\s*(?:"(?P<url1>[^"]*)"|'(?P<url2>[^']*)'|(?P<url3>[^>\\s\n]*)) +(\\s+rel\\s*=\\s*(?:"(?P<rel4>[^"]*)"|'(?P<rel5>[^']*)'|(?P<rel6>[^>\\s\n]*)))? +""", re.I | re.S | re.X) + _base = re.compile(r"""<base\s+href\s*=\s*['"]?([^'">]+)""", re.I | re.S) + + def __init__(self, data, url): + """ + Initialise an instance with the Unicode page contents and the URL they + came from. + """ + self.data = data + self.base_url = self.url = url + m = self._base.search(self.data) + if m: + self.base_url = m.group(1) + + _clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I) + + @cached_property + def links(self): + """ + Return the URLs of all the links on a page together with information + about their "rel" attribute, for determining which ones to treat as + downloads and which ones to queue for further scraping. + """ + def clean(url): + "Tidy up an URL." + scheme, netloc, path, params, query, frag = urlparse(url) + return urlunparse((scheme, netloc, quote(path), + params, query, frag)) + + result = set() + for match in self._href.finditer(self.data): + d = match.groupdict('') + rel = (d['rel1'] or d['rel2'] or d['rel3'] or + d['rel4'] or d['rel5'] or d['rel6']) + url = d['url1'] or d['url2'] or d['url3'] + url = urljoin(self.base_url, url) + url = unescape(url) + url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url) + result.add((url, rel)) + # We sort the result, hoping to bring the most recent versions + # to the front + result = sorted(result, key=lambda t: t[0], reverse=True) + return result + + +class SimpleScrapingLocator(Locator): + """ + A locator which scrapes HTML pages to locate downloads for a distribution. + This runs multiple threads to do the I/O; performance is at least as good + as pip's PackageFinder, which works in an analogous fashion. + """ + + # These are used to deal with various Content-Encoding schemes. + decoders = { + 'deflate': zlib.decompress, + 'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(d)).read(), + 'none': lambda b: b, + } + + def __init__(self, url, timeout=None, num_workers=10, **kwargs): + """ + Initialise an instance. + :param url: The root URL to use for scraping. + :param timeout: The timeout, in seconds, to be applied to requests. + This defaults to ``None`` (no timeout specified). + :param num_workers: The number of worker threads you want to do I/O, + This defaults to 10. + :param kwargs: Passed to the superclass. + """ + super(SimpleScrapingLocator, self).__init__(**kwargs) + self.base_url = ensure_slash(url) + self.timeout = timeout + self._page_cache = {} + self._seen = set() + self._to_fetch = queue.Queue() + self._bad_hosts = set() + self.skip_externals = False + self.num_workers = num_workers + self._lock = threading.RLock() + # See issue #45: we need to be resilient when the locator is used + # in a thread, e.g. with concurrent.futures. We can't use self._lock + # as it is for coordinating our internal threads - the ones created + # in _prepare_threads. + self._gplock = threading.RLock() + self.platform_check = False # See issue #112 + + def _prepare_threads(self): + """ + Threads are created only when get_project is called, and terminate + before it returns. They are there primarily to parallelise I/O (i.e. + fetching web pages). + """ + self._threads = [] + for i in range(self.num_workers): + t = threading.Thread(target=self._fetch) + t.setDaemon(True) + t.start() + self._threads.append(t) + + def _wait_threads(self): + """ + Tell all the threads to terminate (by sending a sentinel value) and + wait for them to do so. + """ + # Note that you need two loops, since you can't say which + # thread will get each sentinel + for t in self._threads: + self._to_fetch.put(None) # sentinel + for t in self._threads: + t.join() + self._threads = [] + + def _get_project(self, name): + result = {'urls': {}, 'digests': {}} + with self._gplock: + self.result = result + self.project_name = name + url = urljoin(self.base_url, '%s/' % quote(name)) + self._seen.clear() + self._page_cache.clear() + self._prepare_threads() + try: + logger.debug('Queueing %s', url) + self._to_fetch.put(url) + self._to_fetch.join() + finally: + self._wait_threads() + del self.result + return result + + platform_dependent = re.compile(r'\b(linux_(i\d86|x86_64|arm\w+)|' + r'win(32|_amd64)|macosx_?\d+)\b', re.I) + + def _is_platform_dependent(self, url): + """ + Does an URL refer to a platform-specific download? + """ + return self.platform_dependent.search(url) + + def _process_download(self, url): + """ + See if an URL is a suitable download for a project. + + If it is, register information in the result dictionary (for + _get_project) about the specific version it's for. + + Note that the return value isn't actually used other than as a boolean + value. + """ + if self.platform_check and self._is_platform_dependent(url): + info = None + else: + info = self.convert_url_to_download_info(url, self.project_name) + logger.debug('process_download: %s -> %s', url, info) + if info: + with self._lock: # needed because self.result is shared + self._update_version_data(self.result, info) + return info + + def _should_queue(self, link, referrer, rel): + """ + Determine whether a link URL from a referring page and with a + particular "rel" attribute should be queued for scraping. + """ + scheme, netloc, path, _, _, _ = urlparse(link) + if path.endswith(self.source_extensions + self.binary_extensions + + self.excluded_extensions): + result = False + elif self.skip_externals and not link.startswith(self.base_url): + result = False + elif not referrer.startswith(self.base_url): + result = False + elif rel not in ('homepage', 'download'): + result = False + elif scheme not in ('http', 'https', 'ftp'): + result = False + elif self._is_platform_dependent(link): + result = False + else: + host = netloc.split(':', 1)[0] + if host.lower() == 'localhost': + result = False + else: + result = True + logger.debug('should_queue: %s (%s) from %s -> %s', link, rel, + referrer, result) + return result + + def _fetch(self): + """ + Get a URL to fetch from the work queue, get the HTML page, examine its + links for download candidates and candidates for further scraping. + + This is a handy method to run in a thread. + """ + while True: + url = self._to_fetch.get() + try: + if url: + page = self.get_page(url) + if page is None: # e.g. after an error + continue + for link, rel in page.links: + if link not in self._seen: + try: + self._seen.add(link) + if (not self._process_download(link) and + self._should_queue(link, url, rel)): + logger.debug('Queueing %s from %s', link, url) + self._to_fetch.put(link) + except MetadataInvalidError: # e.g. invalid versions + pass + except Exception as e: # pragma: no cover + self.errors.put(text_type(e)) + finally: + # always do this, to avoid hangs :-) + self._to_fetch.task_done() + if not url: + #logger.debug('Sentinel seen, quitting.') + break + + def get_page(self, url): + """ + Get the HTML for an URL, possibly from an in-memory cache. + + XXX TODO Note: this cache is never actually cleared. It's assumed that + the data won't get stale over the lifetime of a locator instance (not + necessarily true for the default_locator). + """ + # http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api + scheme, netloc, path, _, _, _ = urlparse(url) + if scheme == 'file' and os.path.isdir(url2pathname(path)): + url = urljoin(ensure_slash(url), 'index.html') + + if url in self._page_cache: + result = self._page_cache[url] + logger.debug('Returning %s from cache: %s', url, result) + else: + host = netloc.split(':', 1)[0] + result = None + if host in self._bad_hosts: + logger.debug('Skipping %s due to bad host %s', url, host) + else: + req = Request(url, headers={'Accept-encoding': 'identity'}) + try: + logger.debug('Fetching %s', url) + resp = self.opener.open(req, timeout=self.timeout) + logger.debug('Fetched %s', url) + headers = resp.info() + content_type = headers.get('Content-Type', '') + if HTML_CONTENT_TYPE.match(content_type): + final_url = resp.geturl() + data = resp.read() + encoding = headers.get('Content-Encoding') + if encoding: + decoder = self.decoders[encoding] # fail if not found + data = decoder(data) + encoding = 'utf-8' + m = CHARSET.search(content_type) + if m: + encoding = m.group(1) + try: + data = data.decode(encoding) + except UnicodeError: # pragma: no cover + data = data.decode('latin-1') # fallback + result = Page(data, final_url) + self._page_cache[final_url] = result + except HTTPError as e: + if e.code != 404: + logger.exception('Fetch failed: %s: %s', url, e) + except URLError as e: # pragma: no cover + logger.exception('Fetch failed: %s: %s', url, e) + with self._lock: + self._bad_hosts.add(host) + except Exception as e: # pragma: no cover + logger.exception('Fetch failed: %s: %s', url, e) + finally: + self._page_cache[url] = result # even if None (failure) + return result + + _distname_re = re.compile('<a href=[^>]*>([^<]+)<') + + def get_distribution_names(self): + """ + Return all the distribution names known to this locator. + """ + result = set() + page = self.get_page(self.base_url) + if not page: + raise DistlibException('Unable to get %s' % self.base_url) + for match in self._distname_re.finditer(page.data): + result.add(match.group(1)) + return result + +class DirectoryLocator(Locator): + """ + This class locates distributions in a directory tree. + """ + + def __init__(self, path, **kwargs): + """ + Initialise an instance. + :param path: The root of the directory tree to search. + :param kwargs: Passed to the superclass constructor, + except for: + * recursive - if True (the default), subdirectories are + recursed into. If False, only the top-level directory + is searched, + """ + self.recursive = kwargs.pop('recursive', True) + super(DirectoryLocator, self).__init__(**kwargs) + path = os.path.abspath(path) + if not os.path.isdir(path): # pragma: no cover + raise DistlibException('Not a directory: %r' % path) + self.base_dir = path + + def should_include(self, filename, parent): + """ + Should a filename be considered as a candidate for a distribution + archive? As well as the filename, the directory which contains it + is provided, though not used by the current implementation. + """ + return filename.endswith(self.downloadable_extensions) + + def _get_project(self, name): + result = {'urls': {}, 'digests': {}} + for root, dirs, files in os.walk(self.base_dir): + for fn in files: + if self.should_include(fn, root): + fn = os.path.join(root, fn) + url = urlunparse(('file', '', + pathname2url(os.path.abspath(fn)), + '', '', '')) + info = self.convert_url_to_download_info(url, name) + if info: + self._update_version_data(result, info) + if not self.recursive: + break + return result + + def get_distribution_names(self): + """ + Return all the distribution names known to this locator. + """ + result = set() + for root, dirs, files in os.walk(self.base_dir): + for fn in files: + if self.should_include(fn, root): + fn = os.path.join(root, fn) + url = urlunparse(('file', '', + pathname2url(os.path.abspath(fn)), + '', '', '')) + info = self.convert_url_to_download_info(url, None) + if info: + result.add(info['name']) + if not self.recursive: + break + return result + +class JSONLocator(Locator): + """ + This locator uses special extended metadata (not available on PyPI) and is + the basis of performant dependency resolution in distlib. Other locators + require archive downloads before dependencies can be determined! As you + might imagine, that can be slow. + """ + def get_distribution_names(self): + """ + Return all the distribution names known to this locator. + """ + raise NotImplementedError('Not available from this locator') + + def _get_project(self, name): + result = {'urls': {}, 'digests': {}} + data = get_project_data(name) + if data: + for info in data.get('files', []): + if info['ptype'] != 'sdist' or info['pyversion'] != 'source': + continue + # We don't store summary in project metadata as it makes + # the data bigger for no benefit during dependency + # resolution + dist = make_dist(data['name'], info['version'], + summary=data.get('summary', + 'Placeholder for summary'), + scheme=self.scheme) + md = dist.metadata + md.source_url = info['url'] + # TODO SHA256 digest + if 'digest' in info and info['digest']: + dist.digest = ('md5', info['digest']) + md.dependencies = info.get('requirements', {}) + dist.exports = info.get('exports', {}) + result[dist.version] = dist + result['urls'].setdefault(dist.version, set()).add(info['url']) + return result + +class DistPathLocator(Locator): + """ + This locator finds installed distributions in a path. It can be useful for + adding to an :class:`AggregatingLocator`. + """ + def __init__(self, distpath, **kwargs): + """ + Initialise an instance. + + :param distpath: A :class:`DistributionPath` instance to search. + """ + super(DistPathLocator, self).__init__(**kwargs) + assert isinstance(distpath, DistributionPath) + self.distpath = distpath + + def _get_project(self, name): + dist = self.distpath.get_distribution(name) + if dist is None: + result = {'urls': {}, 'digests': {}} + else: + result = { + dist.version: dist, + 'urls': {dist.version: set([dist.source_url])}, + 'digests': {dist.version: set([None])} + } + return result + + +class AggregatingLocator(Locator): + """ + This class allows you to chain and/or merge a list of locators. + """ + def __init__(self, *locators, **kwargs): + """ + Initialise an instance. + + :param locators: The list of locators to search. + :param kwargs: Passed to the superclass constructor, + except for: + * merge - if False (the default), the first successful + search from any of the locators is returned. If True, + the results from all locators are merged (this can be + slow). + """ + self.merge = kwargs.pop('merge', False) + self.locators = locators + super(AggregatingLocator, self).__init__(**kwargs) + + def clear_cache(self): + super(AggregatingLocator, self).clear_cache() + for locator in self.locators: + locator.clear_cache() + + def _set_scheme(self, value): + self._scheme = value + for locator in self.locators: + locator.scheme = value + + scheme = property(Locator.scheme.fget, _set_scheme) + + def _get_project(self, name): + result = {} + for locator in self.locators: + d = locator.get_project(name) + if d: + if self.merge: + files = result.get('urls', {}) + digests = result.get('digests', {}) + # next line could overwrite result['urls'], result['digests'] + result.update(d) + df = result.get('urls') + if files and df: + for k, v in files.items(): + if k in df: + df[k] |= v + else: + df[k] = v + dd = result.get('digests') + if digests and dd: + dd.update(digests) + else: + # See issue #18. If any dists are found and we're looking + # for specific constraints, we only return something if + # a match is found. For example, if a DirectoryLocator + # returns just foo (1.0) while we're looking for + # foo (>= 2.0), we'll pretend there was nothing there so + # that subsequent locators can be queried. Otherwise we + # would just return foo (1.0) which would then lead to a + # failure to find foo (>= 2.0), because other locators + # weren't searched. Note that this only matters when + # merge=False. + if self.matcher is None: + found = True + else: + found = False + for k in d: + if self.matcher.match(k): + found = True + break + if found: + result = d + break + return result + + def get_distribution_names(self): + """ + Return all the distribution names known to this locator. + """ + result = set() + for locator in self.locators: + try: + result |= locator.get_distribution_names() + except NotImplementedError: + pass + return result + + +# We use a legacy scheme simply because most of the dists on PyPI use legacy +# versions which don't conform to PEP 426 / PEP 440. +default_locator = AggregatingLocator( + JSONLocator(), + SimpleScrapingLocator('https://pypi.python.org/simple/', + timeout=3.0), + scheme='legacy') + +locate = default_locator.locate + +NAME_VERSION_RE = re.compile(r'(?P<name>[\w-]+)\s*' + r'\(\s*(==\s*)?(?P<ver>[^)]+)\)$') + +class DependencyFinder(object): + """ + Locate dependencies for distributions. + """ + + def __init__(self, locator=None): + """ + Initialise an instance, using the specified locator + to locate distributions. + """ + self.locator = locator or default_locator + self.scheme = get_scheme(self.locator.scheme) + + def add_distribution(self, dist): + """ + Add a distribution to the finder. This will update internal information + about who provides what. + :param dist: The distribution to add. + """ + logger.debug('adding distribution %s', dist) + name = dist.key + self.dists_by_name[name] = dist + self.dists[(name, dist.version)] = dist + for p in dist.provides: + name, version = parse_name_and_version(p) + logger.debug('Add to provided: %s, %s, %s', name, version, dist) + self.provided.setdefault(name, set()).add((version, dist)) + + def remove_distribution(self, dist): + """ + Remove a distribution from the finder. This will update internal + information about who provides what. + :param dist: The distribution to remove. + """ + logger.debug('removing distribution %s', dist) + name = dist.key + del self.dists_by_name[name] + del self.dists[(name, dist.version)] + for p in dist.provides: + name, version = parse_name_and_version(p) + logger.debug('Remove from provided: %s, %s, %s', name, version, dist) + s = self.provided[name] + s.remove((version, dist)) + if not s: + del self.provided[name] + + def get_matcher(self, reqt): + """ + Get a version matcher for a requirement. + :param reqt: The requirement + :type reqt: str + :return: A version matcher (an instance of + :class:`distlib.version.Matcher`). + """ + try: + matcher = self.scheme.matcher(reqt) + except UnsupportedVersionError: # pragma: no cover + # XXX compat-mode if cannot read the version + name = reqt.split()[0] + matcher = self.scheme.matcher(name) + return matcher + + def find_providers(self, reqt): + """ + Find the distributions which can fulfill a requirement. + + :param reqt: The requirement. + :type reqt: str + :return: A set of distribution which can fulfill the requirement. + """ + matcher = self.get_matcher(reqt) + name = matcher.key # case-insensitive + result = set() + provided = self.provided + if name in provided: + for version, provider in provided[name]: + try: + match = matcher.match(version) + except UnsupportedVersionError: + match = False + + if match: + result.add(provider) + break + return result + + def try_to_replace(self, provider, other, problems): + """ + Attempt to replace one provider with another. This is typically used + when resolving dependencies from multiple sources, e.g. A requires + (B >= 1.0) while C requires (B >= 1.1). + + For successful replacement, ``provider`` must meet all the requirements + which ``other`` fulfills. + + :param provider: The provider we are trying to replace with. + :param other: The provider we're trying to replace. + :param problems: If False is returned, this will contain what + problems prevented replacement. This is currently + a tuple of the literal string 'cantreplace', + ``provider``, ``other`` and the set of requirements + that ``provider`` couldn't fulfill. + :return: True if we can replace ``other`` with ``provider``, else + False. + """ + rlist = self.reqts[other] + unmatched = set() + for s in rlist: + matcher = self.get_matcher(s) + if not matcher.match(provider.version): + unmatched.add(s) + if unmatched: + # can't replace other with provider + problems.add(('cantreplace', provider, other, + frozenset(unmatched))) + result = False + else: + # can replace other with provider + self.remove_distribution(other) + del self.reqts[other] + for s in rlist: + self.reqts.setdefault(provider, set()).add(s) + self.add_distribution(provider) + result = True + return result + + def find(self, requirement, meta_extras=None, prereleases=False): + """ + Find a distribution and all distributions it depends on. + + :param requirement: The requirement specifying the distribution to + find, or a Distribution instance. + :param meta_extras: A list of meta extras such as :test:, :build: and + so on. + :param prereleases: If ``True``, allow pre-release versions to be + returned - otherwise, don't return prereleases + unless they're all that's available. + + Return a set of :class:`Distribution` instances and a set of + problems. + + The distributions returned should be such that they have the + :attr:`required` attribute set to ``True`` if they were + from the ``requirement`` passed to ``find()``, and they have the + :attr:`build_time_dependency` attribute set to ``True`` unless they + are post-installation dependencies of the ``requirement``. + + The problems should be a tuple consisting of the string + ``'unsatisfied'`` and the requirement which couldn't be satisfied + by any distribution known to the locator. + """ + + self.provided = {} + self.dists = {} + self.dists_by_name = {} + self.reqts = {} + + meta_extras = set(meta_extras or []) + if ':*:' in meta_extras: + meta_extras.remove(':*:') + # :meta: and :run: are implicitly included + meta_extras |= set([':test:', ':build:', ':dev:']) + + if isinstance(requirement, Distribution): + dist = odist = requirement + logger.debug('passed %s as requirement', odist) + else: + dist = odist = self.locator.locate(requirement, + prereleases=prereleases) + if dist is None: + raise DistlibException('Unable to locate %r' % requirement) + logger.debug('located %s', odist) + dist.requested = True + problems = set() + todo = set([dist]) + install_dists = set([odist]) + while todo: + dist = todo.pop() + name = dist.key # case-insensitive + if name not in self.dists_by_name: + self.add_distribution(dist) + else: + #import pdb; pdb.set_trace() + other = self.dists_by_name[name] + if other != dist: + self.try_to_replace(dist, other, problems) + + ireqts = dist.run_requires | dist.meta_requires + sreqts = dist.build_requires + ereqts = set() + if meta_extras and dist in install_dists: + for key in ('test', 'build', 'dev'): + e = ':%s:' % key + if e in meta_extras: + ereqts |= getattr(dist, '%s_requires' % key) + all_reqts = ireqts | sreqts | ereqts + for r in all_reqts: + providers = self.find_providers(r) + if not providers: + logger.debug('No providers found for %r', r) + provider = self.locator.locate(r, prereleases=prereleases) + # If no provider is found and we didn't consider + # prereleases, consider them now. + if provider is None and not prereleases: + provider = self.locator.locate(r, prereleases=True) + if provider is None: + logger.debug('Cannot satisfy %r', r) + problems.add(('unsatisfied', r)) + else: + n, v = provider.key, provider.version + if (n, v) not in self.dists: + todo.add(provider) + providers.add(provider) + if r in ireqts and dist in install_dists: + install_dists.add(provider) + logger.debug('Adding %s to install_dists', + provider.name_and_version) + for p in providers: + name = p.key + if name not in self.dists_by_name: + self.reqts.setdefault(p, set()).add(r) + else: + other = self.dists_by_name[name] + if other != p: + # see if other can be replaced by p + self.try_to_replace(p, other, problems) + + dists = set(self.dists.values()) + for dist in dists: + dist.build_time_dependency = dist not in install_dists + if dist.build_time_dependency: + logger.debug('%s is a build-time dependency only.', + dist.name_and_version) + logger.debug('find done for %s', odist) + return dists, problems diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyc new file mode 100644 index 0000000..2c48a1f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/locators.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/manifest.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/manifest.py new file mode 100644 index 0000000..ca0fe44 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/manifest.py @@ -0,0 +1,393 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012-2013 Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +""" +Class representing the list of files in a distribution. + +Equivalent to distutils.filelist, but fixes some problems. +""" +import fnmatch +import logging +import os +import re +import sys + +from . import DistlibException +from .compat import fsdecode +from .util import convert_path + + +__all__ = ['Manifest'] + +logger = logging.getLogger(__name__) + +# a \ followed by some spaces + EOL +_COLLAPSE_PATTERN = re.compile('\\\\w*\n', re.M) +_COMMENTED_LINE = re.compile('#.*?(?=\n)|\n(?=$)', re.M | re.S) + +# +# Due to the different results returned by fnmatch.translate, we need +# to do slightly different processing for Python 2.7 and 3.2 ... this needed +# to be brought in for Python 3.6 onwards. +# +_PYTHON_VERSION = sys.version_info[:2] + +class Manifest(object): + """A list of files built by on exploring the filesystem and filtered by + applying various patterns to what we find there. + """ + + def __init__(self, base=None): + """ + Initialise an instance. + + :param base: The base directory to explore under. + """ + self.base = os.path.abspath(os.path.normpath(base or os.getcwd())) + self.prefix = self.base + os.sep + self.allfiles = None + self.files = set() + + # + # Public API + # + + def findall(self): + """Find all files under the base and set ``allfiles`` to the absolute + pathnames of files found. + """ + from stat import S_ISREG, S_ISDIR, S_ISLNK + + self.allfiles = allfiles = [] + root = self.base + stack = [root] + pop = stack.pop + push = stack.append + + while stack: + root = pop() + names = os.listdir(root) + + for name in names: + fullname = os.path.join(root, name) + + # Avoid excess stat calls -- just one will do, thank you! + stat = os.stat(fullname) + mode = stat.st_mode + if S_ISREG(mode): + allfiles.append(fsdecode(fullname)) + elif S_ISDIR(mode) and not S_ISLNK(mode): + push(fullname) + + def add(self, item): + """ + Add a file to the manifest. + + :param item: The pathname to add. This can be relative to the base. + """ + if not item.startswith(self.prefix): + item = os.path.join(self.base, item) + self.files.add(os.path.normpath(item)) + + def add_many(self, items): + """ + Add a list of files to the manifest. + + :param items: The pathnames to add. These can be relative to the base. + """ + for item in items: + self.add(item) + + def sorted(self, wantdirs=False): + """ + Return sorted files in directory order + """ + + def add_dir(dirs, d): + dirs.add(d) + logger.debug('add_dir added %s', d) + if d != self.base: + parent, _ = os.path.split(d) + assert parent not in ('', '/') + add_dir(dirs, parent) + + result = set(self.files) # make a copy! + if wantdirs: + dirs = set() + for f in result: + add_dir(dirs, os.path.dirname(f)) + result |= dirs + return [os.path.join(*path_tuple) for path_tuple in + sorted(os.path.split(path) for path in result)] + + def clear(self): + """Clear all collected files.""" + self.files = set() + self.allfiles = [] + + def process_directive(self, directive): + """ + Process a directive which either adds some files from ``allfiles`` to + ``files``, or removes some files from ``files``. + + :param directive: The directive to process. This should be in a format + compatible with distutils ``MANIFEST.in`` files: + + http://docs.python.org/distutils/sourcedist.html#commands + """ + # Parse the line: split it up, make sure the right number of words + # is there, and return the relevant words. 'action' is always + # defined: it's the first word of the line. Which of the other + # three are defined depends on the action; it'll be either + # patterns, (dir and patterns), or (dirpattern). + action, patterns, thedir, dirpattern = self._parse_directive(directive) + + # OK, now we know that the action is valid and we have the + # right number of words on the line for that action -- so we + # can proceed with minimal error-checking. + if action == 'include': + for pattern in patterns: + if not self._include_pattern(pattern, anchor=True): + logger.warning('no files found matching %r', pattern) + + elif action == 'exclude': + for pattern in patterns: + found = self._exclude_pattern(pattern, anchor=True) + #if not found: + # logger.warning('no previously-included files ' + # 'found matching %r', pattern) + + elif action == 'global-include': + for pattern in patterns: + if not self._include_pattern(pattern, anchor=False): + logger.warning('no files found matching %r ' + 'anywhere in distribution', pattern) + + elif action == 'global-exclude': + for pattern in patterns: + found = self._exclude_pattern(pattern, anchor=False) + #if not found: + # logger.warning('no previously-included files ' + # 'matching %r found anywhere in ' + # 'distribution', pattern) + + elif action == 'recursive-include': + for pattern in patterns: + if not self._include_pattern(pattern, prefix=thedir): + logger.warning('no files found matching %r ' + 'under directory %r', pattern, thedir) + + elif action == 'recursive-exclude': + for pattern in patterns: + found = self._exclude_pattern(pattern, prefix=thedir) + #if not found: + # logger.warning('no previously-included files ' + # 'matching %r found under directory %r', + # pattern, thedir) + + elif action == 'graft': + if not self._include_pattern(None, prefix=dirpattern): + logger.warning('no directories found matching %r', + dirpattern) + + elif action == 'prune': + if not self._exclude_pattern(None, prefix=dirpattern): + logger.warning('no previously-included directories found ' + 'matching %r', dirpattern) + else: # pragma: no cover + # This should never happen, as it should be caught in + # _parse_template_line + raise DistlibException( + 'invalid action %r' % action) + + # + # Private API + # + + def _parse_directive(self, directive): + """ + Validate a directive. + :param directive: The directive to validate. + :return: A tuple of action, patterns, thedir, dir_patterns + """ + words = directive.split() + if len(words) == 1 and words[0] not in ('include', 'exclude', + 'global-include', + 'global-exclude', + 'recursive-include', + 'recursive-exclude', + 'graft', 'prune'): + # no action given, let's use the default 'include' + words.insert(0, 'include') + + action = words[0] + patterns = thedir = dir_pattern = None + + if action in ('include', 'exclude', + 'global-include', 'global-exclude'): + if len(words) < 2: + raise DistlibException( + '%r expects <pattern1> <pattern2> ...' % action) + + patterns = [convert_path(word) for word in words[1:]] + + elif action in ('recursive-include', 'recursive-exclude'): + if len(words) < 3: + raise DistlibException( + '%r expects <dir> <pattern1> <pattern2> ...' % action) + + thedir = convert_path(words[1]) + patterns = [convert_path(word) for word in words[2:]] + + elif action in ('graft', 'prune'): + if len(words) != 2: + raise DistlibException( + '%r expects a single <dir_pattern>' % action) + + dir_pattern = convert_path(words[1]) + + else: + raise DistlibException('unknown action %r' % action) + + return action, patterns, thedir, dir_pattern + + def _include_pattern(self, pattern, anchor=True, prefix=None, + is_regex=False): + """Select strings (presumably filenames) from 'self.files' that + match 'pattern', a Unix-style wildcard (glob) pattern. + + Patterns are not quite the same as implemented by the 'fnmatch' + module: '*' and '?' match non-special characters, where "special" + is platform-dependent: slash on Unix; colon, slash, and backslash on + DOS/Windows; and colon on Mac OS. + + If 'anchor' is true (the default), then the pattern match is more + stringent: "*.py" will match "foo.py" but not "foo/bar.py". If + 'anchor' is false, both of these will match. + + If 'prefix' is supplied, then only filenames starting with 'prefix' + (itself a pattern) and ending with 'pattern', with anything in between + them, will match. 'anchor' is ignored in this case. + + If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and + 'pattern' is assumed to be either a string containing a regex or a + regex object -- no translation is done, the regex is just compiled + and used as-is. + + Selected strings will be added to self.files. + + Return True if files are found. + """ + # XXX docstring lying about what the special chars are? + found = False + pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex) + + # delayed loading of allfiles list + if self.allfiles is None: + self.findall() + + for name in self.allfiles: + if pattern_re.search(name): + self.files.add(name) + found = True + return found + + def _exclude_pattern(self, pattern, anchor=True, prefix=None, + is_regex=False): + """Remove strings (presumably filenames) from 'files' that match + 'pattern'. + + Other parameters are the same as for 'include_pattern()', above. + The list 'self.files' is modified in place. Return True if files are + found. + + This API is public to allow e.g. exclusion of SCM subdirs, e.g. when + packaging source distributions + """ + found = False + pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex) + for f in list(self.files): + if pattern_re.search(f): + self.files.remove(f) + found = True + return found + + def _translate_pattern(self, pattern, anchor=True, prefix=None, + is_regex=False): + """Translate a shell-like wildcard pattern to a compiled regular + expression. + + Return the compiled regex. If 'is_regex' true, + then 'pattern' is directly compiled to a regex (if it's a string) + or just returned as-is (assumes it's a regex object). + """ + if is_regex: + if isinstance(pattern, str): + return re.compile(pattern) + else: + return pattern + + if _PYTHON_VERSION > (3, 2): + # ditch start and end characters + start, _, end = self._glob_to_re('_').partition('_') + + if pattern: + pattern_re = self._glob_to_re(pattern) + if _PYTHON_VERSION > (3, 2): + assert pattern_re.startswith(start) and pattern_re.endswith(end) + else: + pattern_re = '' + + base = re.escape(os.path.join(self.base, '')) + if prefix is not None: + # ditch end of pattern character + if _PYTHON_VERSION <= (3, 2): + empty_pattern = self._glob_to_re('') + prefix_re = self._glob_to_re(prefix)[:-len(empty_pattern)] + else: + prefix_re = self._glob_to_re(prefix) + assert prefix_re.startswith(start) and prefix_re.endswith(end) + prefix_re = prefix_re[len(start): len(prefix_re) - len(end)] + sep = os.sep + if os.sep == '\\': + sep = r'\\' + if _PYTHON_VERSION <= (3, 2): + pattern_re = '^' + base + sep.join((prefix_re, + '.*' + pattern_re)) + else: + pattern_re = pattern_re[len(start): len(pattern_re) - len(end)] + pattern_re = r'%s%s%s%s.*%s%s' % (start, base, prefix_re, sep, + pattern_re, end) + else: # no prefix -- respect anchor flag + if anchor: + if _PYTHON_VERSION <= (3, 2): + pattern_re = '^' + base + pattern_re + else: + pattern_re = r'%s%s%s' % (start, base, pattern_re[len(start):]) + + return re.compile(pattern_re) + + def _glob_to_re(self, pattern): + """Translate a shell-like glob pattern to a regular expression. + + Return a string containing the regex. Differs from + 'fnmatch.translate()' in that '*' does not match "special characters" + (which are platform-specific). + """ + pattern_re = fnmatch.translate(pattern) + + # '?' and '*' in the glob pattern become '.' and '.*' in the RE, which + # IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix, + # and by extension they shouldn't match such "special characters" under + # any OS. So change all non-escaped dots in the RE to match any + # character except the special characters (currently: just os.sep). + sep = os.sep + if os.sep == '\\': + # we're using a regex to manipulate a regex, so we need + # to escape the backslash twice + sep = r'\\\\' + escaped = r'\1[^%s]' % sep + pattern_re = re.sub(r'((?<!\\)(\\\\)*)\.', escaped, pattern_re) + return pattern_re diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/manifest.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/manifest.pyc new file mode 100644 index 0000000..d85dd4d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/manifest.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/markers.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/markers.py new file mode 100644 index 0000000..ee1f3e2 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/markers.py @@ -0,0 +1,131 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012-2017 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +""" +Parser for the environment markers micro-language defined in PEP 508. +""" + +# Note: In PEP 345, the micro-language was Python compatible, so the ast +# module could be used to parse it. However, PEP 508 introduced operators such +# as ~= and === which aren't in Python, necessitating a different approach. + +import os +import sys +import platform +import re + +from .compat import python_implementation, urlparse, string_types +from .util import in_venv, parse_marker + +__all__ = ['interpret'] + +def _is_literal(o): + if not isinstance(o, string_types) or not o: + return False + return o[0] in '\'"' + +class Evaluator(object): + """ + This class is used to evaluate marker expessions. + """ + + operations = { + '==': lambda x, y: x == y, + '===': lambda x, y: x == y, + '~=': lambda x, y: x == y or x > y, + '!=': lambda x, y: x != y, + '<': lambda x, y: x < y, + '<=': lambda x, y: x == y or x < y, + '>': lambda x, y: x > y, + '>=': lambda x, y: x == y or x > y, + 'and': lambda x, y: x and y, + 'or': lambda x, y: x or y, + 'in': lambda x, y: x in y, + 'not in': lambda x, y: x not in y, + } + + def evaluate(self, expr, context): + """ + Evaluate a marker expression returned by the :func:`parse_requirement` + function in the specified context. + """ + if isinstance(expr, string_types): + if expr[0] in '\'"': + result = expr[1:-1] + else: + if expr not in context: + raise SyntaxError('unknown variable: %s' % expr) + result = context[expr] + else: + assert isinstance(expr, dict) + op = expr['op'] + if op not in self.operations: + raise NotImplementedError('op not implemented: %s' % op) + elhs = expr['lhs'] + erhs = expr['rhs'] + if _is_literal(expr['lhs']) and _is_literal(expr['rhs']): + raise SyntaxError('invalid comparison: %s %s %s' % (elhs, op, erhs)) + + lhs = self.evaluate(elhs, context) + rhs = self.evaluate(erhs, context) + result = self.operations[op](lhs, rhs) + return result + +def default_context(): + def format_full_version(info): + version = '%s.%s.%s' % (info.major, info.minor, info.micro) + kind = info.releaselevel + if kind != 'final': + version += kind[0] + str(info.serial) + return version + + if hasattr(sys, 'implementation'): + implementation_version = format_full_version(sys.implementation.version) + implementation_name = sys.implementation.name + else: + implementation_version = '0' + implementation_name = '' + + result = { + 'implementation_name': implementation_name, + 'implementation_version': implementation_version, + 'os_name': os.name, + 'platform_machine': platform.machine(), + 'platform_python_implementation': platform.python_implementation(), + 'platform_release': platform.release(), + 'platform_system': platform.system(), + 'platform_version': platform.version(), + 'platform_in_venv': str(in_venv()), + 'python_full_version': platform.python_version(), + 'python_version': platform.python_version()[:3], + 'sys_platform': sys.platform, + } + return result + +DEFAULT_CONTEXT = default_context() +del default_context + +evaluator = Evaluator() + +def interpret(marker, execution_context=None): + """ + Interpret a marker and return a result depending on environment. + + :param marker: The marker to interpret. + :type marker: str + :param execution_context: The context used for name lookup. + :type execution_context: mapping + """ + try: + expr, rest = parse_marker(marker) + except Exception as e: + raise SyntaxError('Unable to interpret marker syntax: %s: %s' % (marker, e)) + if rest and rest[0] != '#': + raise SyntaxError('unexpected trailing data in marker: %s: %s' % (marker, rest)) + context = dict(DEFAULT_CONTEXT) + if execution_context: + context.update(execution_context) + return evaluator.evaluate(expr, context) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pyc new file mode 100644 index 0000000..7cdb3ff Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/markers.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.py new file mode 100644 index 0000000..77eed7f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.py @@ -0,0 +1,1094 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012 The Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +"""Implementation of the Metadata for Python packages PEPs. + +Supports all metadata formats (1.0, 1.1, 1.2, and 2.0 experimental). +""" +from __future__ import unicode_literals + +import codecs +from email import message_from_file +import json +import logging +import re + + +from . import DistlibException, __version__ +from .compat import StringIO, string_types, text_type +from .markers import interpret +from .util import extract_by_key, get_extras +from .version import get_scheme, PEP440_VERSION_RE + +logger = logging.getLogger(__name__) + + +class MetadataMissingError(DistlibException): + """A required metadata is missing""" + + +class MetadataConflictError(DistlibException): + """Attempt to read or write metadata fields that are conflictual.""" + + +class MetadataUnrecognizedVersionError(DistlibException): + """Unknown metadata version number.""" + + +class MetadataInvalidError(DistlibException): + """A metadata value is invalid""" + +# public API of this module +__all__ = ['Metadata', 'PKG_INFO_ENCODING', 'PKG_INFO_PREFERRED_VERSION'] + +# Encoding used for the PKG-INFO files +PKG_INFO_ENCODING = 'utf-8' + +# preferred version. Hopefully will be changed +# to 1.2 once PEP 345 is supported everywhere +PKG_INFO_PREFERRED_VERSION = '1.1' + +_LINE_PREFIX_1_2 = re.compile('\n \\|') +_LINE_PREFIX_PRE_1_2 = re.compile('\n ') +_241_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', + 'Summary', 'Description', + 'Keywords', 'Home-page', 'Author', 'Author-email', + 'License') + +_314_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', + 'Supported-Platform', 'Summary', 'Description', + 'Keywords', 'Home-page', 'Author', 'Author-email', + 'License', 'Classifier', 'Download-URL', 'Obsoletes', + 'Provides', 'Requires') + +_314_MARKERS = ('Obsoletes', 'Provides', 'Requires', 'Classifier', + 'Download-URL') + +_345_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', + 'Supported-Platform', 'Summary', 'Description', + 'Keywords', 'Home-page', 'Author', 'Author-email', + 'Maintainer', 'Maintainer-email', 'License', + 'Classifier', 'Download-URL', 'Obsoletes-Dist', + 'Project-URL', 'Provides-Dist', 'Requires-Dist', + 'Requires-Python', 'Requires-External') + +_345_MARKERS = ('Provides-Dist', 'Requires-Dist', 'Requires-Python', + 'Obsoletes-Dist', 'Requires-External', 'Maintainer', + 'Maintainer-email', 'Project-URL') + +_426_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', + 'Supported-Platform', 'Summary', 'Description', + 'Keywords', 'Home-page', 'Author', 'Author-email', + 'Maintainer', 'Maintainer-email', 'License', + 'Classifier', 'Download-URL', 'Obsoletes-Dist', + 'Project-URL', 'Provides-Dist', 'Requires-Dist', + 'Requires-Python', 'Requires-External', 'Private-Version', + 'Obsoleted-By', 'Setup-Requires-Dist', 'Extension', + 'Provides-Extra') + +_426_MARKERS = ('Private-Version', 'Provides-Extra', 'Obsoleted-By', + 'Setup-Requires-Dist', 'Extension') + +# See issue #106: Sometimes 'Requires' occurs wrongly in the metadata. Include +# it in the tuple literal below to allow it (for now) +_566_FIELDS = _426_FIELDS + ('Description-Content-Type', 'Requires') + +_566_MARKERS = ('Description-Content-Type',) + +_ALL_FIELDS = set() +_ALL_FIELDS.update(_241_FIELDS) +_ALL_FIELDS.update(_314_FIELDS) +_ALL_FIELDS.update(_345_FIELDS) +_ALL_FIELDS.update(_426_FIELDS) +_ALL_FIELDS.update(_566_FIELDS) + +EXTRA_RE = re.compile(r'''extra\s*==\s*("([^"]+)"|'([^']+)')''') + + +def _version2fieldlist(version): + if version == '1.0': + return _241_FIELDS + elif version == '1.1': + return _314_FIELDS + elif version == '1.2': + return _345_FIELDS + elif version in ('1.3', '2.1'): + return _345_FIELDS + _566_FIELDS + elif version == '2.0': + return _426_FIELDS + raise MetadataUnrecognizedVersionError(version) + + +def _best_version(fields): + """Detect the best version depending on the fields used.""" + def _has_marker(keys, markers): + for marker in markers: + if marker in keys: + return True + return False + + keys = [] + for key, value in fields.items(): + if value in ([], 'UNKNOWN', None): + continue + keys.append(key) + + possible_versions = ['1.0', '1.1', '1.2', '1.3', '2.0', '2.1'] + + # first let's try to see if a field is not part of one of the version + for key in keys: + if key not in _241_FIELDS and '1.0' in possible_versions: + possible_versions.remove('1.0') + logger.debug('Removed 1.0 due to %s', key) + if key not in _314_FIELDS and '1.1' in possible_versions: + possible_versions.remove('1.1') + logger.debug('Removed 1.1 due to %s', key) + if key not in _345_FIELDS and '1.2' in possible_versions: + possible_versions.remove('1.2') + logger.debug('Removed 1.2 due to %s', key) + if key not in _566_FIELDS and '1.3' in possible_versions: + possible_versions.remove('1.3') + logger.debug('Removed 1.3 due to %s', key) + if key not in _566_FIELDS and '2.1' in possible_versions: + if key != 'Description': # In 2.1, description allowed after headers + possible_versions.remove('2.1') + logger.debug('Removed 2.1 due to %s', key) + if key not in _426_FIELDS and '2.0' in possible_versions: + possible_versions.remove('2.0') + logger.debug('Removed 2.0 due to %s', key) + + # possible_version contains qualified versions + if len(possible_versions) == 1: + return possible_versions[0] # found ! + elif len(possible_versions) == 0: + logger.debug('Out of options - unknown metadata set: %s', fields) + raise MetadataConflictError('Unknown metadata set') + + # let's see if one unique marker is found + is_1_1 = '1.1' in possible_versions and _has_marker(keys, _314_MARKERS) + is_1_2 = '1.2' in possible_versions and _has_marker(keys, _345_MARKERS) + is_2_1 = '2.1' in possible_versions and _has_marker(keys, _566_MARKERS) + is_2_0 = '2.0' in possible_versions and _has_marker(keys, _426_MARKERS) + if int(is_1_1) + int(is_1_2) + int(is_2_1) + int(is_2_0) > 1: + raise MetadataConflictError('You used incompatible 1.1/1.2/2.0/2.1 fields') + + # we have the choice, 1.0, or 1.2, or 2.0 + # - 1.0 has a broken Summary field but works with all tools + # - 1.1 is to avoid + # - 1.2 fixes Summary but has little adoption + # - 2.0 adds more features and is very new + if not is_1_1 and not is_1_2 and not is_2_1 and not is_2_0: + # we couldn't find any specific marker + if PKG_INFO_PREFERRED_VERSION in possible_versions: + return PKG_INFO_PREFERRED_VERSION + if is_1_1: + return '1.1' + if is_1_2: + return '1.2' + if is_2_1: + return '2.1' + + return '2.0' + +_ATTR2FIELD = { + 'metadata_version': 'Metadata-Version', + 'name': 'Name', + 'version': 'Version', + 'platform': 'Platform', + 'supported_platform': 'Supported-Platform', + 'summary': 'Summary', + 'description': 'Description', + 'keywords': 'Keywords', + 'home_page': 'Home-page', + 'author': 'Author', + 'author_email': 'Author-email', + 'maintainer': 'Maintainer', + 'maintainer_email': 'Maintainer-email', + 'license': 'License', + 'classifier': 'Classifier', + 'download_url': 'Download-URL', + 'obsoletes_dist': 'Obsoletes-Dist', + 'provides_dist': 'Provides-Dist', + 'requires_dist': 'Requires-Dist', + 'setup_requires_dist': 'Setup-Requires-Dist', + 'requires_python': 'Requires-Python', + 'requires_external': 'Requires-External', + 'requires': 'Requires', + 'provides': 'Provides', + 'obsoletes': 'Obsoletes', + 'project_url': 'Project-URL', + 'private_version': 'Private-Version', + 'obsoleted_by': 'Obsoleted-By', + 'extension': 'Extension', + 'provides_extra': 'Provides-Extra', +} + +_PREDICATE_FIELDS = ('Requires-Dist', 'Obsoletes-Dist', 'Provides-Dist') +_VERSIONS_FIELDS = ('Requires-Python',) +_VERSION_FIELDS = ('Version',) +_LISTFIELDS = ('Platform', 'Classifier', 'Obsoletes', + 'Requires', 'Provides', 'Obsoletes-Dist', + 'Provides-Dist', 'Requires-Dist', 'Requires-External', + 'Project-URL', 'Supported-Platform', 'Setup-Requires-Dist', + 'Provides-Extra', 'Extension') +_LISTTUPLEFIELDS = ('Project-URL',) + +_ELEMENTSFIELD = ('Keywords',) + +_UNICODEFIELDS = ('Author', 'Maintainer', 'Summary', 'Description') + +_MISSING = object() + +_FILESAFE = re.compile('[^A-Za-z0-9.]+') + + +def _get_name_and_version(name, version, for_filename=False): + """Return the distribution name with version. + + If for_filename is true, return a filename-escaped form.""" + if for_filename: + # For both name and version any runs of non-alphanumeric or '.' + # characters are replaced with a single '-'. Additionally any + # spaces in the version string become '.' + name = _FILESAFE.sub('-', name) + version = _FILESAFE.sub('-', version.replace(' ', '.')) + return '%s-%s' % (name, version) + + +class LegacyMetadata(object): + """The legacy metadata of a release. + + Supports versions 1.0, 1.1 and 1.2 (auto-detected). You can + instantiate the class with one of these arguments (or none): + - *path*, the path to a metadata file + - *fileobj* give a file-like object with metadata as content + - *mapping* is a dict-like object + - *scheme* is a version scheme name + """ + # TODO document the mapping API and UNKNOWN default key + + def __init__(self, path=None, fileobj=None, mapping=None, + scheme='default'): + if [path, fileobj, mapping].count(None) < 2: + raise TypeError('path, fileobj and mapping are exclusive') + self._fields = {} + self.requires_files = [] + self._dependencies = None + self.scheme = scheme + if path is not None: + self.read(path) + elif fileobj is not None: + self.read_file(fileobj) + elif mapping is not None: + self.update(mapping) + self.set_metadata_version() + + def set_metadata_version(self): + self._fields['Metadata-Version'] = _best_version(self._fields) + + def _write_field(self, fileobj, name, value): + fileobj.write('%s: %s\n' % (name, value)) + + def __getitem__(self, name): + return self.get(name) + + def __setitem__(self, name, value): + return self.set(name, value) + + def __delitem__(self, name): + field_name = self._convert_name(name) + try: + del self._fields[field_name] + except KeyError: + raise KeyError(name) + + def __contains__(self, name): + return (name in self._fields or + self._convert_name(name) in self._fields) + + def _convert_name(self, name): + if name in _ALL_FIELDS: + return name + name = name.replace('-', '_').lower() + return _ATTR2FIELD.get(name, name) + + def _default_value(self, name): + if name in _LISTFIELDS or name in _ELEMENTSFIELD: + return [] + return 'UNKNOWN' + + def _remove_line_prefix(self, value): + if self.metadata_version in ('1.0', '1.1'): + return _LINE_PREFIX_PRE_1_2.sub('\n', value) + else: + return _LINE_PREFIX_1_2.sub('\n', value) + + def __getattr__(self, name): + if name in _ATTR2FIELD: + return self[name] + raise AttributeError(name) + + # + # Public API + # + +# dependencies = property(_get_dependencies, _set_dependencies) + + def get_fullname(self, filesafe=False): + """Return the distribution name with version. + + If filesafe is true, return a filename-escaped form.""" + return _get_name_and_version(self['Name'], self['Version'], filesafe) + + def is_field(self, name): + """return True if name is a valid metadata key""" + name = self._convert_name(name) + return name in _ALL_FIELDS + + def is_multi_field(self, name): + name = self._convert_name(name) + return name in _LISTFIELDS + + def read(self, filepath): + """Read the metadata values from a file path.""" + fp = codecs.open(filepath, 'r', encoding='utf-8') + try: + self.read_file(fp) + finally: + fp.close() + + def read_file(self, fileob): + """Read the metadata values from a file object.""" + msg = message_from_file(fileob) + self._fields['Metadata-Version'] = msg['metadata-version'] + + # When reading, get all the fields we can + for field in _ALL_FIELDS: + if field not in msg: + continue + if field in _LISTFIELDS: + # we can have multiple lines + values = msg.get_all(field) + if field in _LISTTUPLEFIELDS and values is not None: + values = [tuple(value.split(',')) for value in values] + self.set(field, values) + else: + # single line + value = msg[field] + if value is not None and value != 'UNKNOWN': + self.set(field, value) + # logger.debug('Attempting to set metadata for %s', self) + # self.set_metadata_version() + + def write(self, filepath, skip_unknown=False): + """Write the metadata fields to filepath.""" + fp = codecs.open(filepath, 'w', encoding='utf-8') + try: + self.write_file(fp, skip_unknown) + finally: + fp.close() + + def write_file(self, fileobject, skip_unknown=False): + """Write the PKG-INFO format data to a file object.""" + self.set_metadata_version() + + for field in _version2fieldlist(self['Metadata-Version']): + values = self.get(field) + if skip_unknown and values in ('UNKNOWN', [], ['UNKNOWN']): + continue + if field in _ELEMENTSFIELD: + self._write_field(fileobject, field, ','.join(values)) + continue + if field not in _LISTFIELDS: + if field == 'Description': + if self.metadata_version in ('1.0', '1.1'): + values = values.replace('\n', '\n ') + else: + values = values.replace('\n', '\n |') + values = [values] + + if field in _LISTTUPLEFIELDS: + values = [','.join(value) for value in values] + + for value in values: + self._write_field(fileobject, field, value) + + def update(self, other=None, **kwargs): + """Set metadata values from the given iterable `other` and kwargs. + + Behavior is like `dict.update`: If `other` has a ``keys`` method, + they are looped over and ``self[key]`` is assigned ``other[key]``. + Else, ``other`` is an iterable of ``(key, value)`` iterables. + + Keys that don't match a metadata field or that have an empty value are + dropped. + """ + def _set(key, value): + if key in _ATTR2FIELD and value: + self.set(self._convert_name(key), value) + + if not other: + # other is None or empty container + pass + elif hasattr(other, 'keys'): + for k in other.keys(): + _set(k, other[k]) + else: + for k, v in other: + _set(k, v) + + if kwargs: + for k, v in kwargs.items(): + _set(k, v) + + def set(self, name, value): + """Control then set a metadata field.""" + name = self._convert_name(name) + + if ((name in _ELEMENTSFIELD or name == 'Platform') and + not isinstance(value, (list, tuple))): + if isinstance(value, string_types): + value = [v.strip() for v in value.split(',')] + else: + value = [] + elif (name in _LISTFIELDS and + not isinstance(value, (list, tuple))): + if isinstance(value, string_types): + value = [value] + else: + value = [] + + if logger.isEnabledFor(logging.WARNING): + project_name = self['Name'] + + scheme = get_scheme(self.scheme) + if name in _PREDICATE_FIELDS and value is not None: + for v in value: + # check that the values are valid + if not scheme.is_valid_matcher(v.split(';')[0]): + logger.warning( + "'%s': '%s' is not valid (field '%s')", + project_name, v, name) + # FIXME this rejects UNKNOWN, is that right? + elif name in _VERSIONS_FIELDS and value is not None: + if not scheme.is_valid_constraint_list(value): + logger.warning("'%s': '%s' is not a valid version (field '%s')", + project_name, value, name) + elif name in _VERSION_FIELDS and value is not None: + if not scheme.is_valid_version(value): + logger.warning("'%s': '%s' is not a valid version (field '%s')", + project_name, value, name) + + if name in _UNICODEFIELDS: + if name == 'Description': + value = self._remove_line_prefix(value) + + self._fields[name] = value + + def get(self, name, default=_MISSING): + """Get a metadata field.""" + name = self._convert_name(name) + if name not in self._fields: + if default is _MISSING: + default = self._default_value(name) + return default + if name in _UNICODEFIELDS: + value = self._fields[name] + return value + elif name in _LISTFIELDS: + value = self._fields[name] + if value is None: + return [] + res = [] + for val in value: + if name not in _LISTTUPLEFIELDS: + res.append(val) + else: + # That's for Project-URL + res.append((val[0], val[1])) + return res + + elif name in _ELEMENTSFIELD: + value = self._fields[name] + if isinstance(value, string_types): + return value.split(',') + return self._fields[name] + + def check(self, strict=False): + """Check if the metadata is compliant. If strict is True then raise if + no Name or Version are provided""" + self.set_metadata_version() + + # XXX should check the versions (if the file was loaded) + missing, warnings = [], [] + + for attr in ('Name', 'Version'): # required by PEP 345 + if attr not in self: + missing.append(attr) + + if strict and missing != []: + msg = 'missing required metadata: %s' % ', '.join(missing) + raise MetadataMissingError(msg) + + for attr in ('Home-page', 'Author'): + if attr not in self: + missing.append(attr) + + # checking metadata 1.2 (XXX needs to check 1.1, 1.0) + if self['Metadata-Version'] != '1.2': + return missing, warnings + + scheme = get_scheme(self.scheme) + + def are_valid_constraints(value): + for v in value: + if not scheme.is_valid_matcher(v.split(';')[0]): + return False + return True + + for fields, controller in ((_PREDICATE_FIELDS, are_valid_constraints), + (_VERSIONS_FIELDS, + scheme.is_valid_constraint_list), + (_VERSION_FIELDS, + scheme.is_valid_version)): + for field in fields: + value = self.get(field, None) + if value is not None and not controller(value): + warnings.append("Wrong value for '%s': %s" % (field, value)) + + return missing, warnings + + def todict(self, skip_missing=False): + """Return fields as a dict. + + Field names will be converted to use the underscore-lowercase style + instead of hyphen-mixed case (i.e. home_page instead of Home-page). + """ + self.set_metadata_version() + + mapping_1_0 = ( + ('metadata_version', 'Metadata-Version'), + ('name', 'Name'), + ('version', 'Version'), + ('summary', 'Summary'), + ('home_page', 'Home-page'), + ('author', 'Author'), + ('author_email', 'Author-email'), + ('license', 'License'), + ('description', 'Description'), + ('keywords', 'Keywords'), + ('platform', 'Platform'), + ('classifiers', 'Classifier'), + ('download_url', 'Download-URL'), + ) + + data = {} + for key, field_name in mapping_1_0: + if not skip_missing or field_name in self._fields: + data[key] = self[field_name] + + if self['Metadata-Version'] == '1.2': + mapping_1_2 = ( + ('requires_dist', 'Requires-Dist'), + ('requires_python', 'Requires-Python'), + ('requires_external', 'Requires-External'), + ('provides_dist', 'Provides-Dist'), + ('obsoletes_dist', 'Obsoletes-Dist'), + ('project_url', 'Project-URL'), + ('maintainer', 'Maintainer'), + ('maintainer_email', 'Maintainer-email'), + ) + for key, field_name in mapping_1_2: + if not skip_missing or field_name in self._fields: + if key != 'project_url': + data[key] = self[field_name] + else: + data[key] = [','.join(u) for u in self[field_name]] + + elif self['Metadata-Version'] == '1.1': + mapping_1_1 = ( + ('provides', 'Provides'), + ('requires', 'Requires'), + ('obsoletes', 'Obsoletes'), + ) + for key, field_name in mapping_1_1: + if not skip_missing or field_name in self._fields: + data[key] = self[field_name] + + return data + + def add_requirements(self, requirements): + if self['Metadata-Version'] == '1.1': + # we can't have 1.1 metadata *and* Setuptools requires + for field in ('Obsoletes', 'Requires', 'Provides'): + if field in self: + del self[field] + self['Requires-Dist'] += requirements + + # Mapping API + # TODO could add iter* variants + + def keys(self): + return list(_version2fieldlist(self['Metadata-Version'])) + + def __iter__(self): + for key in self.keys(): + yield key + + def values(self): + return [self[key] for key in self.keys()] + + def items(self): + return [(key, self[key]) for key in self.keys()] + + def __repr__(self): + return '<%s %s %s>' % (self.__class__.__name__, self.name, + self.version) + + +METADATA_FILENAME = 'pydist.json' +WHEEL_METADATA_FILENAME = 'metadata.json' +LEGACY_METADATA_FILENAME = 'METADATA' + + +class Metadata(object): + """ + The metadata of a release. This implementation uses 2.0 (JSON) + metadata where possible. If not possible, it wraps a LegacyMetadata + instance which handles the key-value metadata format. + """ + + METADATA_VERSION_MATCHER = re.compile(r'^\d+(\.\d+)*$') + + NAME_MATCHER = re.compile('^[0-9A-Z]([0-9A-Z_.-]*[0-9A-Z])?$', re.I) + + VERSION_MATCHER = PEP440_VERSION_RE + + SUMMARY_MATCHER = re.compile('.{1,2047}') + + METADATA_VERSION = '2.0' + + GENERATOR = 'distlib (%s)' % __version__ + + MANDATORY_KEYS = { + 'name': (), + 'version': (), + 'summary': ('legacy',), + } + + INDEX_KEYS = ('name version license summary description author ' + 'author_email keywords platform home_page classifiers ' + 'download_url') + + DEPENDENCY_KEYS = ('extras run_requires test_requires build_requires ' + 'dev_requires provides meta_requires obsoleted_by ' + 'supports_environments') + + SYNTAX_VALIDATORS = { + 'metadata_version': (METADATA_VERSION_MATCHER, ()), + 'name': (NAME_MATCHER, ('legacy',)), + 'version': (VERSION_MATCHER, ('legacy',)), + 'summary': (SUMMARY_MATCHER, ('legacy',)), + } + + __slots__ = ('_legacy', '_data', 'scheme') + + def __init__(self, path=None, fileobj=None, mapping=None, + scheme='default'): + if [path, fileobj, mapping].count(None) < 2: + raise TypeError('path, fileobj and mapping are exclusive') + self._legacy = None + self._data = None + self.scheme = scheme + #import pdb; pdb.set_trace() + if mapping is not None: + try: + self._validate_mapping(mapping, scheme) + self._data = mapping + except MetadataUnrecognizedVersionError: + self._legacy = LegacyMetadata(mapping=mapping, scheme=scheme) + self.validate() + else: + data = None + if path: + with open(path, 'rb') as f: + data = f.read() + elif fileobj: + data = fileobj.read() + if data is None: + # Initialised with no args - to be added + self._data = { + 'metadata_version': self.METADATA_VERSION, + 'generator': self.GENERATOR, + } + else: + if not isinstance(data, text_type): + data = data.decode('utf-8') + try: + self._data = json.loads(data) + self._validate_mapping(self._data, scheme) + except ValueError: + # Note: MetadataUnrecognizedVersionError does not + # inherit from ValueError (it's a DistlibException, + # which should not inherit from ValueError). + # The ValueError comes from the json.load - if that + # succeeds and we get a validation error, we want + # that to propagate + self._legacy = LegacyMetadata(fileobj=StringIO(data), + scheme=scheme) + self.validate() + + common_keys = set(('name', 'version', 'license', 'keywords', 'summary')) + + none_list = (None, list) + none_dict = (None, dict) + + mapped_keys = { + 'run_requires': ('Requires-Dist', list), + 'build_requires': ('Setup-Requires-Dist', list), + 'dev_requires': none_list, + 'test_requires': none_list, + 'meta_requires': none_list, + 'extras': ('Provides-Extra', list), + 'modules': none_list, + 'namespaces': none_list, + 'exports': none_dict, + 'commands': none_dict, + 'classifiers': ('Classifier', list), + 'source_url': ('Download-URL', None), + 'metadata_version': ('Metadata-Version', None), + } + + del none_list, none_dict + + def __getattribute__(self, key): + common = object.__getattribute__(self, 'common_keys') + mapped = object.__getattribute__(self, 'mapped_keys') + if key in mapped: + lk, maker = mapped[key] + if self._legacy: + if lk is None: + result = None if maker is None else maker() + else: + result = self._legacy.get(lk) + else: + value = None if maker is None else maker() + if key not in ('commands', 'exports', 'modules', 'namespaces', + 'classifiers'): + result = self._data.get(key, value) + else: + # special cases for PEP 459 + sentinel = object() + result = sentinel + d = self._data.get('extensions') + if d: + if key == 'commands': + result = d.get('python.commands', value) + elif key == 'classifiers': + d = d.get('python.details') + if d: + result = d.get(key, value) + else: + d = d.get('python.exports') + if not d: + d = self._data.get('python.exports') + if d: + result = d.get(key, value) + if result is sentinel: + result = value + elif key not in common: + result = object.__getattribute__(self, key) + elif self._legacy: + result = self._legacy.get(key) + else: + result = self._data.get(key) + return result + + def _validate_value(self, key, value, scheme=None): + if key in self.SYNTAX_VALIDATORS: + pattern, exclusions = self.SYNTAX_VALIDATORS[key] + if (scheme or self.scheme) not in exclusions: + m = pattern.match(value) + if not m: + raise MetadataInvalidError("'%s' is an invalid value for " + "the '%s' property" % (value, + key)) + + def __setattr__(self, key, value): + self._validate_value(key, value) + common = object.__getattribute__(self, 'common_keys') + mapped = object.__getattribute__(self, 'mapped_keys') + if key in mapped: + lk, _ = mapped[key] + if self._legacy: + if lk is None: + raise NotImplementedError + self._legacy[lk] = value + elif key not in ('commands', 'exports', 'modules', 'namespaces', + 'classifiers'): + self._data[key] = value + else: + # special cases for PEP 459 + d = self._data.setdefault('extensions', {}) + if key == 'commands': + d['python.commands'] = value + elif key == 'classifiers': + d = d.setdefault('python.details', {}) + d[key] = value + else: + d = d.setdefault('python.exports', {}) + d[key] = value + elif key not in common: + object.__setattr__(self, key, value) + else: + if key == 'keywords': + if isinstance(value, string_types): + value = value.strip() + if value: + value = value.split() + else: + value = [] + if self._legacy: + self._legacy[key] = value + else: + self._data[key] = value + + @property + def name_and_version(self): + return _get_name_and_version(self.name, self.version, True) + + @property + def provides(self): + if self._legacy: + result = self._legacy['Provides-Dist'] + else: + result = self._data.setdefault('provides', []) + s = '%s (%s)' % (self.name, self.version) + if s not in result: + result.append(s) + return result + + @provides.setter + def provides(self, value): + if self._legacy: + self._legacy['Provides-Dist'] = value + else: + self._data['provides'] = value + + def get_requirements(self, reqts, extras=None, env=None): + """ + Base method to get dependencies, given a set of extras + to satisfy and an optional environment context. + :param reqts: A list of sometimes-wanted dependencies, + perhaps dependent on extras and environment. + :param extras: A list of optional components being requested. + :param env: An optional environment for marker evaluation. + """ + if self._legacy: + result = reqts + else: + result = [] + extras = get_extras(extras or [], self.extras) + for d in reqts: + if 'extra' not in d and 'environment' not in d: + # unconditional + include = True + else: + if 'extra' not in d: + # Not extra-dependent - only environment-dependent + include = True + else: + include = d.get('extra') in extras + if include: + # Not excluded because of extras, check environment + marker = d.get('environment') + if marker: + include = interpret(marker, env) + if include: + result.extend(d['requires']) + for key in ('build', 'dev', 'test'): + e = ':%s:' % key + if e in extras: + extras.remove(e) + # A recursive call, but it should terminate since 'test' + # has been removed from the extras + reqts = self._data.get('%s_requires' % key, []) + result.extend(self.get_requirements(reqts, extras=extras, + env=env)) + return result + + @property + def dictionary(self): + if self._legacy: + return self._from_legacy() + return self._data + + @property + def dependencies(self): + if self._legacy: + raise NotImplementedError + else: + return extract_by_key(self._data, self.DEPENDENCY_KEYS) + + @dependencies.setter + def dependencies(self, value): + if self._legacy: + raise NotImplementedError + else: + self._data.update(value) + + def _validate_mapping(self, mapping, scheme): + if mapping.get('metadata_version') != self.METADATA_VERSION: + raise MetadataUnrecognizedVersionError() + missing = [] + for key, exclusions in self.MANDATORY_KEYS.items(): + if key not in mapping: + if scheme not in exclusions: + missing.append(key) + if missing: + msg = 'Missing metadata items: %s' % ', '.join(missing) + raise MetadataMissingError(msg) + for k, v in mapping.items(): + self._validate_value(k, v, scheme) + + def validate(self): + if self._legacy: + missing, warnings = self._legacy.check(True) + if missing or warnings: + logger.warning('Metadata: missing: %s, warnings: %s', + missing, warnings) + else: + self._validate_mapping(self._data, self.scheme) + + def todict(self): + if self._legacy: + return self._legacy.todict(True) + else: + result = extract_by_key(self._data, self.INDEX_KEYS) + return result + + def _from_legacy(self): + assert self._legacy and not self._data + result = { + 'metadata_version': self.METADATA_VERSION, + 'generator': self.GENERATOR, + } + lmd = self._legacy.todict(True) # skip missing ones + for k in ('name', 'version', 'license', 'summary', 'description', + 'classifier'): + if k in lmd: + if k == 'classifier': + nk = 'classifiers' + else: + nk = k + result[nk] = lmd[k] + kw = lmd.get('Keywords', []) + if kw == ['']: + kw = [] + result['keywords'] = kw + keys = (('requires_dist', 'run_requires'), + ('setup_requires_dist', 'build_requires')) + for ok, nk in keys: + if ok in lmd and lmd[ok]: + result[nk] = [{'requires': lmd[ok]}] + result['provides'] = self.provides + author = {} + maintainer = {} + return result + + LEGACY_MAPPING = { + 'name': 'Name', + 'version': 'Version', + 'license': 'License', + 'summary': 'Summary', + 'description': 'Description', + 'classifiers': 'Classifier', + } + + def _to_legacy(self): + def process_entries(entries): + reqts = set() + for e in entries: + extra = e.get('extra') + env = e.get('environment') + rlist = e['requires'] + for r in rlist: + if not env and not extra: + reqts.add(r) + else: + marker = '' + if extra: + marker = 'extra == "%s"' % extra + if env: + if marker: + marker = '(%s) and %s' % (env, marker) + else: + marker = env + reqts.add(';'.join((r, marker))) + return reqts + + assert self._data and not self._legacy + result = LegacyMetadata() + nmd = self._data + for nk, ok in self.LEGACY_MAPPING.items(): + if nk in nmd: + result[ok] = nmd[nk] + r1 = process_entries(self.run_requires + self.meta_requires) + r2 = process_entries(self.build_requires + self.dev_requires) + if self.extras: + result['Provides-Extra'] = sorted(self.extras) + result['Requires-Dist'] = sorted(r1) + result['Setup-Requires-Dist'] = sorted(r2) + # TODO: other fields such as contacts + return result + + def write(self, path=None, fileobj=None, legacy=False, skip_unknown=True): + if [path, fileobj].count(None) != 1: + raise ValueError('Exactly one of path and fileobj is needed') + self.validate() + if legacy: + if self._legacy: + legacy_md = self._legacy + else: + legacy_md = self._to_legacy() + if path: + legacy_md.write(path, skip_unknown=skip_unknown) + else: + legacy_md.write_file(fileobj, skip_unknown=skip_unknown) + else: + if self._legacy: + d = self._from_legacy() + else: + d = self._data + if fileobj: + json.dump(d, fileobj, ensure_ascii=True, indent=2, + sort_keys=True) + else: + with codecs.open(path, 'w', 'utf-8') as f: + json.dump(d, f, ensure_ascii=True, indent=2, + sort_keys=True) + + def add_requirements(self, requirements): + if self._legacy: + self._legacy.add_requirements(requirements) + else: + run_requires = self._data.setdefault('run_requires', []) + always = None + for entry in run_requires: + if 'environment' not in entry and 'extra' not in entry: + always = entry + break + if always is None: + always = { 'requires': requirements } + run_requires.insert(0, always) + else: + rset = set(always['requires']) | set(requirements) + always['requires'] = sorted(rset) + + def __repr__(self): + name = self.name or '(no name)' + version = self.version or 'no version' + return '<%s %s %s (%s)>' % (self.__class__.__name__, + self.metadata_version, name, version) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyc new file mode 100644 index 0000000..ecd9074 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/resources.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/resources.py new file mode 100644 index 0000000..1884016 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/resources.py @@ -0,0 +1,355 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2013-2017 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +from __future__ import unicode_literals + +import bisect +import io +import logging +import os +import pkgutil +import shutil +import sys +import types +import zipimport + +from . import DistlibException +from .util import cached_property, get_cache_base, path_to_cache_dir, Cache + +logger = logging.getLogger(__name__) + + +cache = None # created when needed + + +class ResourceCache(Cache): + def __init__(self, base=None): + if base is None: + # Use native string to avoid issues on 2.x: see Python #20140. + base = os.path.join(get_cache_base(), str('resource-cache')) + super(ResourceCache, self).__init__(base) + + def is_stale(self, resource, path): + """ + Is the cache stale for the given resource? + + :param resource: The :class:`Resource` being cached. + :param path: The path of the resource in the cache. + :return: True if the cache is stale. + """ + # Cache invalidation is a hard problem :-) + return True + + def get(self, resource): + """ + Get a resource into the cache, + + :param resource: A :class:`Resource` instance. + :return: The pathname of the resource in the cache. + """ + prefix, path = resource.finder.get_cache_info(resource) + if prefix is None: + result = path + else: + result = os.path.join(self.base, self.prefix_to_dir(prefix), path) + dirname = os.path.dirname(result) + if not os.path.isdir(dirname): + os.makedirs(dirname) + if not os.path.exists(result): + stale = True + else: + stale = self.is_stale(resource, path) + if stale: + # write the bytes of the resource to the cache location + with open(result, 'wb') as f: + f.write(resource.bytes) + return result + + +class ResourceBase(object): + def __init__(self, finder, name): + self.finder = finder + self.name = name + + +class Resource(ResourceBase): + """ + A class representing an in-package resource, such as a data file. This is + not normally instantiated by user code, but rather by a + :class:`ResourceFinder` which manages the resource. + """ + is_container = False # Backwards compatibility + + def as_stream(self): + """ + Get the resource as a stream. + + This is not a property to make it obvious that it returns a new stream + each time. + """ + return self.finder.get_stream(self) + + @cached_property + def file_path(self): + global cache + if cache is None: + cache = ResourceCache() + return cache.get(self) + + @cached_property + def bytes(self): + return self.finder.get_bytes(self) + + @cached_property + def size(self): + return self.finder.get_size(self) + + +class ResourceContainer(ResourceBase): + is_container = True # Backwards compatibility + + @cached_property + def resources(self): + return self.finder.get_resources(self) + + +class ResourceFinder(object): + """ + Resource finder for file system resources. + """ + + if sys.platform.startswith('java'): + skipped_extensions = ('.pyc', '.pyo', '.class') + else: + skipped_extensions = ('.pyc', '.pyo') + + def __init__(self, module): + self.module = module + self.loader = getattr(module, '__loader__', None) + self.base = os.path.dirname(getattr(module, '__file__', '')) + + def _adjust_path(self, path): + return os.path.realpath(path) + + def _make_path(self, resource_name): + # Issue #50: need to preserve type of path on Python 2.x + # like os.path._get_sep + if isinstance(resource_name, bytes): # should only happen on 2.x + sep = b'/' + else: + sep = '/' + parts = resource_name.split(sep) + parts.insert(0, self.base) + result = os.path.join(*parts) + return self._adjust_path(result) + + def _find(self, path): + return os.path.exists(path) + + def get_cache_info(self, resource): + return None, resource.path + + def find(self, resource_name): + path = self._make_path(resource_name) + if not self._find(path): + result = None + else: + if self._is_directory(path): + result = ResourceContainer(self, resource_name) + else: + result = Resource(self, resource_name) + result.path = path + return result + + def get_stream(self, resource): + return open(resource.path, 'rb') + + def get_bytes(self, resource): + with open(resource.path, 'rb') as f: + return f.read() + + def get_size(self, resource): + return os.path.getsize(resource.path) + + def get_resources(self, resource): + def allowed(f): + return (f != '__pycache__' and not + f.endswith(self.skipped_extensions)) + return set([f for f in os.listdir(resource.path) if allowed(f)]) + + def is_container(self, resource): + return self._is_directory(resource.path) + + _is_directory = staticmethod(os.path.isdir) + + def iterator(self, resource_name): + resource = self.find(resource_name) + if resource is not None: + todo = [resource] + while todo: + resource = todo.pop(0) + yield resource + if resource.is_container: + rname = resource.name + for name in resource.resources: + if not rname: + new_name = name + else: + new_name = '/'.join([rname, name]) + child = self.find(new_name) + if child.is_container: + todo.append(child) + else: + yield child + + +class ZipResourceFinder(ResourceFinder): + """ + Resource finder for resources in .zip files. + """ + def __init__(self, module): + super(ZipResourceFinder, self).__init__(module) + archive = self.loader.archive + self.prefix_len = 1 + len(archive) + # PyPy doesn't have a _files attr on zipimporter, and you can't set one + if hasattr(self.loader, '_files'): + self._files = self.loader._files + else: + self._files = zipimport._zip_directory_cache[archive] + self.index = sorted(self._files) + + def _adjust_path(self, path): + return path + + def _find(self, path): + path = path[self.prefix_len:] + if path in self._files: + result = True + else: + if path and path[-1] != os.sep: + path = path + os.sep + i = bisect.bisect(self.index, path) + try: + result = self.index[i].startswith(path) + except IndexError: + result = False + if not result: + logger.debug('_find failed: %r %r', path, self.loader.prefix) + else: + logger.debug('_find worked: %r %r', path, self.loader.prefix) + return result + + def get_cache_info(self, resource): + prefix = self.loader.archive + path = resource.path[1 + len(prefix):] + return prefix, path + + def get_bytes(self, resource): + return self.loader.get_data(resource.path) + + def get_stream(self, resource): + return io.BytesIO(self.get_bytes(resource)) + + def get_size(self, resource): + path = resource.path[self.prefix_len:] + return self._files[path][3] + + def get_resources(self, resource): + path = resource.path[self.prefix_len:] + if path and path[-1] != os.sep: + path += os.sep + plen = len(path) + result = set() + i = bisect.bisect(self.index, path) + while i < len(self.index): + if not self.index[i].startswith(path): + break + s = self.index[i][plen:] + result.add(s.split(os.sep, 1)[0]) # only immediate children + i += 1 + return result + + def _is_directory(self, path): + path = path[self.prefix_len:] + if path and path[-1] != os.sep: + path += os.sep + i = bisect.bisect(self.index, path) + try: + result = self.index[i].startswith(path) + except IndexError: + result = False + return result + +_finder_registry = { + type(None): ResourceFinder, + zipimport.zipimporter: ZipResourceFinder +} + +try: + # In Python 3.6, _frozen_importlib -> _frozen_importlib_external + try: + import _frozen_importlib_external as _fi + except ImportError: + import _frozen_importlib as _fi + _finder_registry[_fi.SourceFileLoader] = ResourceFinder + _finder_registry[_fi.FileFinder] = ResourceFinder + del _fi +except (ImportError, AttributeError): + pass + + +def register_finder(loader, finder_maker): + _finder_registry[type(loader)] = finder_maker + +_finder_cache = {} + + +def finder(package): + """ + Return a resource finder for a package. + :param package: The name of the package. + :return: A :class:`ResourceFinder` instance for the package. + """ + if package in _finder_cache: + result = _finder_cache[package] + else: + if package not in sys.modules: + __import__(package) + module = sys.modules[package] + path = getattr(module, '__path__', None) + if path is None: + raise DistlibException('You cannot get a finder for a module, ' + 'only for a package') + loader = getattr(module, '__loader__', None) + finder_maker = _finder_registry.get(type(loader)) + if finder_maker is None: + raise DistlibException('Unable to locate finder for %r' % package) + result = finder_maker(module) + _finder_cache[package] = result + return result + + +_dummy_module = types.ModuleType(str('__dummy__')) + + +def finder_for_path(path): + """ + Return a resource finder for a path, which should represent a container. + + :param path: The path. + :return: A :class:`ResourceFinder` instance for the path. + """ + result = None + # calls any path hooks, gets importer into cache + pkgutil.get_importer(path) + loader = sys.path_importer_cache.get(path) + finder = _finder_registry.get(type(loader)) + if finder: + module = _dummy_module + module.__file__ = os.path.join(path, '') + module.__loader__ = loader + result = finder(module) + return result diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/resources.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/resources.pyc new file mode 100644 index 0000000..881514e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/resources.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.py new file mode 100644 index 0000000..8e22cb9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.py @@ -0,0 +1,417 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2013-2015 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +from io import BytesIO +import logging +import os +import re +import struct +import sys + +from .compat import sysconfig, detect_encoding, ZipFile +from .resources import finder +from .util import (FileOperator, get_export_entry, convert_path, + get_executable, in_venv) + +logger = logging.getLogger(__name__) + +_DEFAULT_MANIFEST = ''' +<?xml version="1.0" encoding="UTF-8" standalone="yes"?> +<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0"> + <assemblyIdentity version="1.0.0.0" + processorArchitecture="X86" + name="%s" + type="win32"/> + + <!-- Identify the application security requirements. --> + <trustInfo xmlns="urn:schemas-microsoft-com:asm.v3"> + <security> + <requestedPrivileges> + <requestedExecutionLevel level="asInvoker" uiAccess="false"/> + </requestedPrivileges> + </security> + </trustInfo> +</assembly>'''.strip() + +# check if Python is called on the first line with this expression +FIRST_LINE_RE = re.compile(b'^#!.*pythonw?[0-9.]*([ \t].*)?$') +SCRIPT_TEMPLATE = r'''# -*- coding: utf-8 -*- +if __name__ == '__main__': + import sys, re + + def _resolve(module, func): + __import__(module) + mod = sys.modules[module] + parts = func.split('.') + result = getattr(mod, parts.pop(0)) + for p in parts: + result = getattr(result, p) + return result + + try: + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + + func = _resolve('%(module)s', '%(func)s') + rc = func() # None interpreted as 0 + except Exception as e: # only supporting Python >= 2.6 + sys.stderr.write('%%s\n' %% e) + rc = 1 + sys.exit(rc) +''' + + +def _enquote_executable(executable): + if ' ' in executable: + # make sure we quote only the executable in case of env + # for example /usr/bin/env "/dir with spaces/bin/jython" + # instead of "/usr/bin/env /dir with spaces/bin/jython" + # otherwise whole + if executable.startswith('/usr/bin/env '): + env, _executable = executable.split(' ', 1) + if ' ' in _executable and not _executable.startswith('"'): + executable = '%s "%s"' % (env, _executable) + else: + if not executable.startswith('"'): + executable = '"%s"' % executable + return executable + + +class ScriptMaker(object): + """ + A class to copy or create scripts from source scripts or callable + specifications. + """ + script_template = SCRIPT_TEMPLATE + + executable = None # for shebangs + + def __init__(self, source_dir, target_dir, add_launchers=True, + dry_run=False, fileop=None): + self.source_dir = source_dir + self.target_dir = target_dir + self.add_launchers = add_launchers + self.force = False + self.clobber = False + # It only makes sense to set mode bits on POSIX. + self.set_mode = (os.name == 'posix') or (os.name == 'java' and + os._name == 'posix') + self.variants = set(('', 'X.Y')) + self._fileop = fileop or FileOperator(dry_run) + + self._is_nt = os.name == 'nt' or ( + os.name == 'java' and os._name == 'nt') + + def _get_alternate_executable(self, executable, options): + if options.get('gui', False) and self._is_nt: # pragma: no cover + dn, fn = os.path.split(executable) + fn = fn.replace('python', 'pythonw') + executable = os.path.join(dn, fn) + return executable + + if sys.platform.startswith('java'): # pragma: no cover + def _is_shell(self, executable): + """ + Determine if the specified executable is a script + (contains a #! line) + """ + try: + with open(executable) as fp: + return fp.read(2) == '#!' + except (OSError, IOError): + logger.warning('Failed to open %s', executable) + return False + + def _fix_jython_executable(self, executable): + if self._is_shell(executable): + # Workaround for Jython is not needed on Linux systems. + import java + + if java.lang.System.getProperty('os.name') == 'Linux': + return executable + elif executable.lower().endswith('jython.exe'): + # Use wrapper exe for Jython on Windows + return executable + return '/usr/bin/env %s' % executable + + def _build_shebang(self, executable, post_interp): + """ + Build a shebang line. In the simple case (on Windows, or a shebang line + which is not too long or contains spaces) use a simple formulation for + the shebang. Otherwise, use /bin/sh as the executable, with a contrived + shebang which allows the script to run either under Python or sh, using + suitable quoting. Thanks to Harald Nordgren for his input. + + See also: http://www.in-ulm.de/~mascheck/various/shebang/#length + https://hg.mozilla.org/mozilla-central/file/tip/mach + """ + if os.name != 'posix': + simple_shebang = True + else: + # Add 3 for '#!' prefix and newline suffix. + shebang_length = len(executable) + len(post_interp) + 3 + if sys.platform == 'darwin': + max_shebang_length = 512 + else: + max_shebang_length = 127 + simple_shebang = ((b' ' not in executable) and + (shebang_length <= max_shebang_length)) + + if simple_shebang: + result = b'#!' + executable + post_interp + b'\n' + else: + result = b'#!/bin/sh\n' + result += b"'''exec' " + executable + post_interp + b' "$0" "$@"\n' + result += b"' '''" + return result + + def _get_shebang(self, encoding, post_interp=b'', options=None): + enquote = True + if self.executable: + executable = self.executable + enquote = False # assume this will be taken care of + elif not sysconfig.is_python_build(): + executable = get_executable() + elif in_venv(): # pragma: no cover + executable = os.path.join(sysconfig.get_path('scripts'), + 'python%s' % sysconfig.get_config_var('EXE')) + else: # pragma: no cover + executable = os.path.join( + sysconfig.get_config_var('BINDIR'), + 'python%s%s' % (sysconfig.get_config_var('VERSION'), + sysconfig.get_config_var('EXE'))) + if options: + executable = self._get_alternate_executable(executable, options) + + if sys.platform.startswith('java'): # pragma: no cover + executable = self._fix_jython_executable(executable) + # Normalise case for Windows + executable = os.path.normcase(executable) + # If the user didn't specify an executable, it may be necessary to + # cater for executable paths with spaces (not uncommon on Windows) + if enquote: + executable = _enquote_executable(executable) + # Issue #51: don't use fsencode, since we later try to + # check that the shebang is decodable using utf-8. + executable = executable.encode('utf-8') + # in case of IronPython, play safe and enable frames support + if (sys.platform == 'cli' and '-X:Frames' not in post_interp + and '-X:FullFrames' not in post_interp): # pragma: no cover + post_interp += b' -X:Frames' + shebang = self._build_shebang(executable, post_interp) + # Python parser starts to read a script using UTF-8 until + # it gets a #coding:xxx cookie. The shebang has to be the + # first line of a file, the #coding:xxx cookie cannot be + # written before. So the shebang has to be decodable from + # UTF-8. + try: + shebang.decode('utf-8') + except UnicodeDecodeError: # pragma: no cover + raise ValueError( + 'The shebang (%r) is not decodable from utf-8' % shebang) + # If the script is encoded to a custom encoding (use a + # #coding:xxx cookie), the shebang has to be decodable from + # the script encoding too. + if encoding != 'utf-8': + try: + shebang.decode(encoding) + except UnicodeDecodeError: # pragma: no cover + raise ValueError( + 'The shebang (%r) is not decodable ' + 'from the script encoding (%r)' % (shebang, encoding)) + return shebang + + def _get_script_text(self, entry): + return self.script_template % dict(module=entry.prefix, + func=entry.suffix) + + manifest = _DEFAULT_MANIFEST + + def get_manifest(self, exename): + base = os.path.basename(exename) + return self.manifest % base + + def _write_script(self, names, shebang, script_bytes, filenames, ext): + use_launcher = self.add_launchers and self._is_nt + linesep = os.linesep.encode('utf-8') + if not shebang.endswith(linesep): + shebang += linesep + if not use_launcher: + script_bytes = shebang + script_bytes + else: # pragma: no cover + if ext == 'py': + launcher = self._get_launcher('t') + else: + launcher = self._get_launcher('w') + stream = BytesIO() + with ZipFile(stream, 'w') as zf: + zf.writestr('__main__.py', script_bytes) + zip_data = stream.getvalue() + script_bytes = launcher + shebang + zip_data + for name in names: + outname = os.path.join(self.target_dir, name) + if use_launcher: # pragma: no cover + n, e = os.path.splitext(outname) + if e.startswith('.py'): + outname = n + outname = '%s.exe' % outname + try: + self._fileop.write_binary_file(outname, script_bytes) + except Exception: + # Failed writing an executable - it might be in use. + logger.warning('Failed to write executable - trying to ' + 'use .deleteme logic') + dfname = '%s.deleteme' % outname + if os.path.exists(dfname): + os.remove(dfname) # Not allowed to fail here + os.rename(outname, dfname) # nor here + self._fileop.write_binary_file(outname, script_bytes) + logger.debug('Able to replace executable using ' + '.deleteme logic') + try: + os.remove(dfname) + except Exception: + pass # still in use - ignore error + else: + if self._is_nt and not outname.endswith('.' + ext): # pragma: no cover + outname = '%s.%s' % (outname, ext) + if os.path.exists(outname) and not self.clobber: + logger.warning('Skipping existing file %s', outname) + continue + self._fileop.write_binary_file(outname, script_bytes) + if self.set_mode: + self._fileop.set_executable_mode([outname]) + filenames.append(outname) + + def _make_script(self, entry, filenames, options=None): + post_interp = b'' + if options: + args = options.get('interpreter_args', []) + if args: + args = ' %s' % ' '.join(args) + post_interp = args.encode('utf-8') + shebang = self._get_shebang('utf-8', post_interp, options=options) + script = self._get_script_text(entry).encode('utf-8') + name = entry.name + scriptnames = set() + if '' in self.variants: + scriptnames.add(name) + if 'X' in self.variants: + scriptnames.add('%s%s' % (name, sys.version[0])) + if 'X.Y' in self.variants: + scriptnames.add('%s-%s' % (name, sys.version[:3])) + if options and options.get('gui', False): + ext = 'pyw' + else: + ext = 'py' + self._write_script(scriptnames, shebang, script, filenames, ext) + + def _copy_script(self, script, filenames): + adjust = False + script = os.path.join(self.source_dir, convert_path(script)) + outname = os.path.join(self.target_dir, os.path.basename(script)) + if not self.force and not self._fileop.newer(script, outname): + logger.debug('not copying %s (up-to-date)', script) + return + + # Always open the file, but ignore failures in dry-run mode -- + # that way, we'll get accurate feedback if we can read the + # script. + try: + f = open(script, 'rb') + except IOError: # pragma: no cover + if not self.dry_run: + raise + f = None + else: + first_line = f.readline() + if not first_line: # pragma: no cover + logger.warning('%s: %s is an empty file (skipping)', + self.get_command_name(), script) + return + + match = FIRST_LINE_RE.match(first_line.replace(b'\r\n', b'\n')) + if match: + adjust = True + post_interp = match.group(1) or b'' + + if not adjust: + if f: + f.close() + self._fileop.copy_file(script, outname) + if self.set_mode: + self._fileop.set_executable_mode([outname]) + filenames.append(outname) + else: + logger.info('copying and adjusting %s -> %s', script, + self.target_dir) + if not self._fileop.dry_run: + encoding, lines = detect_encoding(f.readline) + f.seek(0) + shebang = self._get_shebang(encoding, post_interp) + if b'pythonw' in first_line: # pragma: no cover + ext = 'pyw' + else: + ext = 'py' + n = os.path.basename(outname) + self._write_script([n], shebang, f.read(), filenames, ext) + if f: + f.close() + + @property + def dry_run(self): + return self._fileop.dry_run + + @dry_run.setter + def dry_run(self, value): + self._fileop.dry_run = value + + if os.name == 'nt' or (os.name == 'java' and os._name == 'nt'): # pragma: no cover + # Executable launcher support. + # Launchers are from https://bitbucket.org/vinay.sajip/simple_launcher/ + + def _get_launcher(self, kind): + if struct.calcsize('P') == 8: # 64-bit + bits = '64' + else: + bits = '32' + name = '%s%s.exe' % (kind, bits) + # Issue 31: don't hardcode an absolute package name, but + # determine it relative to the current package + distlib_package = __name__.rsplit('.', 1)[0] + result = finder(distlib_package).find(name).bytes + return result + + # Public API follows + + def make(self, specification, options=None): + """ + Make a script. + + :param specification: The specification, which is either a valid export + entry specification (to make a script from a + callable) or a filename (to make a script by + copying from a source location). + :param options: A dictionary of options controlling script generation. + :return: A list of all absolute pathnames written to. + """ + filenames = [] + entry = get_export_entry(specification) + if entry is None: + self._copy_script(specification, filenames) + else: + self._make_script(entry, filenames, options=options) + return filenames + + def make_multiple(self, specifications, options=None): + """ + Take a list of specifications and make scripts from them, + :param specifications: A list of specifications. + :return: A list of all absolute pathnames written to, + """ + filenames = [] + for specification in specifications: + filenames.extend(self.make(specification, options)) + return filenames diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.pyc new file mode 100644 index 0000000..7aa09af Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/t32.exe b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/t32.exe new file mode 100644 index 0000000..a09d926 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/t32.exe differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/t64.exe b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/t64.exe new file mode 100644 index 0000000..9da9b40 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/t64.exe differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/util.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/util.py new file mode 100644 index 0000000..9d4bfd3 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/util.py @@ -0,0 +1,1756 @@ +# +# Copyright (C) 2012-2017 The Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +import codecs +from collections import deque +import contextlib +import csv +from glob import iglob as std_iglob +import io +import json +import logging +import os +import py_compile +import re +import socket +try: + import ssl +except ImportError: # pragma: no cover + ssl = None +import subprocess +import sys +import tarfile +import tempfile +import textwrap + +try: + import threading +except ImportError: # pragma: no cover + import dummy_threading as threading +import time + +from . import DistlibException +from .compat import (string_types, text_type, shutil, raw_input, StringIO, + cache_from_source, urlopen, urljoin, httplib, xmlrpclib, + splittype, HTTPHandler, BaseConfigurator, valid_ident, + Container, configparser, URLError, ZipFile, fsdecode, + unquote, urlparse) + +logger = logging.getLogger(__name__) + +# +# Requirement parsing code as per PEP 508 +# + +IDENTIFIER = re.compile(r'^([\w\.-]+)\s*') +VERSION_IDENTIFIER = re.compile(r'^([\w\.*+-]+)\s*') +COMPARE_OP = re.compile(r'^(<=?|>=?|={2,3}|[~!]=)\s*') +MARKER_OP = re.compile(r'^((<=?)|(>=?)|={2,3}|[~!]=|in|not\s+in)\s*') +OR = re.compile(r'^or\b\s*') +AND = re.compile(r'^and\b\s*') +NON_SPACE = re.compile(r'(\S+)\s*') +STRING_CHUNK = re.compile(r'([\s\w\.{}()*+#:;,/?!~`@$%^&=|<>\[\]-]+)') + + +def parse_marker(marker_string): + """ + Parse a marker string and return a dictionary containing a marker expression. + + The dictionary will contain keys "op", "lhs" and "rhs" for non-terminals in + the expression grammar, or strings. A string contained in quotes is to be + interpreted as a literal string, and a string not contained in quotes is a + variable (such as os_name). + """ + def marker_var(remaining): + # either identifier, or literal string + m = IDENTIFIER.match(remaining) + if m: + result = m.groups()[0] + remaining = remaining[m.end():] + elif not remaining: + raise SyntaxError('unexpected end of input') + else: + q = remaining[0] + if q not in '\'"': + raise SyntaxError('invalid expression: %s' % remaining) + oq = '\'"'.replace(q, '') + remaining = remaining[1:] + parts = [q] + while remaining: + # either a string chunk, or oq, or q to terminate + if remaining[0] == q: + break + elif remaining[0] == oq: + parts.append(oq) + remaining = remaining[1:] + else: + m = STRING_CHUNK.match(remaining) + if not m: + raise SyntaxError('error in string literal: %s' % remaining) + parts.append(m.groups()[0]) + remaining = remaining[m.end():] + else: + s = ''.join(parts) + raise SyntaxError('unterminated string: %s' % s) + parts.append(q) + result = ''.join(parts) + remaining = remaining[1:].lstrip() # skip past closing quote + return result, remaining + + def marker_expr(remaining): + if remaining and remaining[0] == '(': + result, remaining = marker(remaining[1:].lstrip()) + if remaining[0] != ')': + raise SyntaxError('unterminated parenthesis: %s' % remaining) + remaining = remaining[1:].lstrip() + else: + lhs, remaining = marker_var(remaining) + while remaining: + m = MARKER_OP.match(remaining) + if not m: + break + op = m.groups()[0] + remaining = remaining[m.end():] + rhs, remaining = marker_var(remaining) + lhs = {'op': op, 'lhs': lhs, 'rhs': rhs} + result = lhs + return result, remaining + + def marker_and(remaining): + lhs, remaining = marker_expr(remaining) + while remaining: + m = AND.match(remaining) + if not m: + break + remaining = remaining[m.end():] + rhs, remaining = marker_expr(remaining) + lhs = {'op': 'and', 'lhs': lhs, 'rhs': rhs} + return lhs, remaining + + def marker(remaining): + lhs, remaining = marker_and(remaining) + while remaining: + m = OR.match(remaining) + if not m: + break + remaining = remaining[m.end():] + rhs, remaining = marker_and(remaining) + lhs = {'op': 'or', 'lhs': lhs, 'rhs': rhs} + return lhs, remaining + + return marker(marker_string) + + +def parse_requirement(req): + """ + Parse a requirement passed in as a string. Return a Container + whose attributes contain the various parts of the requirement. + """ + remaining = req.strip() + if not remaining or remaining.startswith('#'): + return None + m = IDENTIFIER.match(remaining) + if not m: + raise SyntaxError('name expected: %s' % remaining) + distname = m.groups()[0] + remaining = remaining[m.end():] + extras = mark_expr = versions = uri = None + if remaining and remaining[0] == '[': + i = remaining.find(']', 1) + if i < 0: + raise SyntaxError('unterminated extra: %s' % remaining) + s = remaining[1:i] + remaining = remaining[i + 1:].lstrip() + extras = [] + while s: + m = IDENTIFIER.match(s) + if not m: + raise SyntaxError('malformed extra: %s' % s) + extras.append(m.groups()[0]) + s = s[m.end():] + if not s: + break + if s[0] != ',': + raise SyntaxError('comma expected in extras: %s' % s) + s = s[1:].lstrip() + if not extras: + extras = None + if remaining: + if remaining[0] == '@': + # it's a URI + remaining = remaining[1:].lstrip() + m = NON_SPACE.match(remaining) + if not m: + raise SyntaxError('invalid URI: %s' % remaining) + uri = m.groups()[0] + t = urlparse(uri) + # there are issues with Python and URL parsing, so this test + # is a bit crude. See bpo-20271, bpo-23505. Python doesn't + # always parse invalid URLs correctly - it should raise + # exceptions for malformed URLs + if not (t.scheme and t.netloc): + raise SyntaxError('Invalid URL: %s' % uri) + remaining = remaining[m.end():].lstrip() + else: + + def get_versions(ver_remaining): + """ + Return a list of operator, version tuples if any are + specified, else None. + """ + m = COMPARE_OP.match(ver_remaining) + versions = None + if m: + versions = [] + while True: + op = m.groups()[0] + ver_remaining = ver_remaining[m.end():] + m = VERSION_IDENTIFIER.match(ver_remaining) + if not m: + raise SyntaxError('invalid version: %s' % ver_remaining) + v = m.groups()[0] + versions.append((op, v)) + ver_remaining = ver_remaining[m.end():] + if not ver_remaining or ver_remaining[0] != ',': + break + ver_remaining = ver_remaining[1:].lstrip() + m = COMPARE_OP.match(ver_remaining) + if not m: + raise SyntaxError('invalid constraint: %s' % ver_remaining) + if not versions: + versions = None + return versions, ver_remaining + + if remaining[0] != '(': + versions, remaining = get_versions(remaining) + else: + i = remaining.find(')', 1) + if i < 0: + raise SyntaxError('unterminated parenthesis: %s' % remaining) + s = remaining[1:i] + remaining = remaining[i + 1:].lstrip() + # As a special diversion from PEP 508, allow a version number + # a.b.c in parentheses as a synonym for ~= a.b.c (because this + # is allowed in earlier PEPs) + if COMPARE_OP.match(s): + versions, _ = get_versions(s) + else: + m = VERSION_IDENTIFIER.match(s) + if not m: + raise SyntaxError('invalid constraint: %s' % s) + v = m.groups()[0] + s = s[m.end():].lstrip() + if s: + raise SyntaxError('invalid constraint: %s' % s) + versions = [('~=', v)] + + if remaining: + if remaining[0] != ';': + raise SyntaxError('invalid requirement: %s' % remaining) + remaining = remaining[1:].lstrip() + + mark_expr, remaining = parse_marker(remaining) + + if remaining and remaining[0] != '#': + raise SyntaxError('unexpected trailing data: %s' % remaining) + + if not versions: + rs = distname + else: + rs = '%s %s' % (distname, ', '.join(['%s %s' % con for con in versions])) + return Container(name=distname, extras=extras, constraints=versions, + marker=mark_expr, url=uri, requirement=rs) + + +def get_resources_dests(resources_root, rules): + """Find destinations for resources files""" + + def get_rel_path(root, path): + # normalizes and returns a lstripped-/-separated path + root = root.replace(os.path.sep, '/') + path = path.replace(os.path.sep, '/') + assert path.startswith(root) + return path[len(root):].lstrip('/') + + destinations = {} + for base, suffix, dest in rules: + prefix = os.path.join(resources_root, base) + for abs_base in iglob(prefix): + abs_glob = os.path.join(abs_base, suffix) + for abs_path in iglob(abs_glob): + resource_file = get_rel_path(resources_root, abs_path) + if dest is None: # remove the entry if it was here + destinations.pop(resource_file, None) + else: + rel_path = get_rel_path(abs_base, abs_path) + rel_dest = dest.replace(os.path.sep, '/').rstrip('/') + destinations[resource_file] = rel_dest + '/' + rel_path + return destinations + + +def in_venv(): + if hasattr(sys, 'real_prefix'): + # virtualenv venvs + result = True + else: + # PEP 405 venvs + result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix) + return result + + +def get_executable(): +# The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as +# changes to the stub launcher mean that sys.executable always points +# to the stub on OS X +# if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__' +# in os.environ): +# result = os.environ['__PYVENV_LAUNCHER__'] +# else: +# result = sys.executable +# return result + result = os.path.normcase(sys.executable) + if not isinstance(result, text_type): + result = fsdecode(result) + return result + + +def proceed(prompt, allowed_chars, error_prompt=None, default=None): + p = prompt + while True: + s = raw_input(p) + p = prompt + if not s and default: + s = default + if s: + c = s[0].lower() + if c in allowed_chars: + break + if error_prompt: + p = '%c: %s\n%s' % (c, error_prompt, prompt) + return c + + +def extract_by_key(d, keys): + if isinstance(keys, string_types): + keys = keys.split() + result = {} + for key in keys: + if key in d: + result[key] = d[key] + return result + +def read_exports(stream): + if sys.version_info[0] >= 3: + # needs to be a text stream + stream = codecs.getreader('utf-8')(stream) + # Try to load as JSON, falling back on legacy format + data = stream.read() + stream = StringIO(data) + try: + jdata = json.load(stream) + result = jdata['extensions']['python.exports']['exports'] + for group, entries in result.items(): + for k, v in entries.items(): + s = '%s = %s' % (k, v) + entry = get_export_entry(s) + assert entry is not None + entries[k] = entry + return result + except Exception: + stream.seek(0, 0) + + def read_stream(cp, stream): + if hasattr(cp, 'read_file'): + cp.read_file(stream) + else: + cp.readfp(stream) + + cp = configparser.ConfigParser() + try: + read_stream(cp, stream) + except configparser.MissingSectionHeaderError: + stream.close() + data = textwrap.dedent(data) + stream = StringIO(data) + read_stream(cp, stream) + + result = {} + for key in cp.sections(): + result[key] = entries = {} + for name, value in cp.items(key): + s = '%s = %s' % (name, value) + entry = get_export_entry(s) + assert entry is not None + #entry.dist = self + entries[name] = entry + return result + + +def write_exports(exports, stream): + if sys.version_info[0] >= 3: + # needs to be a text stream + stream = codecs.getwriter('utf-8')(stream) + cp = configparser.ConfigParser() + for k, v in exports.items(): + # TODO check k, v for valid values + cp.add_section(k) + for entry in v.values(): + if entry.suffix is None: + s = entry.prefix + else: + s = '%s:%s' % (entry.prefix, entry.suffix) + if entry.flags: + s = '%s [%s]' % (s, ', '.join(entry.flags)) + cp.set(k, entry.name, s) + cp.write(stream) + + +@contextlib.contextmanager +def tempdir(): + td = tempfile.mkdtemp() + try: + yield td + finally: + shutil.rmtree(td) + +@contextlib.contextmanager +def chdir(d): + cwd = os.getcwd() + try: + os.chdir(d) + yield + finally: + os.chdir(cwd) + + +@contextlib.contextmanager +def socket_timeout(seconds=15): + cto = socket.getdefaulttimeout() + try: + socket.setdefaulttimeout(seconds) + yield + finally: + socket.setdefaulttimeout(cto) + + +class cached_property(object): + def __init__(self, func): + self.func = func + #for attr in ('__name__', '__module__', '__doc__'): + # setattr(self, attr, getattr(func, attr, None)) + + def __get__(self, obj, cls=None): + if obj is None: + return self + value = self.func(obj) + object.__setattr__(obj, self.func.__name__, value) + #obj.__dict__[self.func.__name__] = value = self.func(obj) + return value + +def convert_path(pathname): + """Return 'pathname' as a name that will work on the native filesystem. + + The path is split on '/' and put back together again using the current + directory separator. Needed because filenames in the setup script are + always supplied in Unix style, and have to be converted to the local + convention before we can actually use them in the filesystem. Raises + ValueError on non-Unix-ish systems if 'pathname' either starts or + ends with a slash. + """ + if os.sep == '/': + return pathname + if not pathname: + return pathname + if pathname[0] == '/': + raise ValueError("path '%s' cannot be absolute" % pathname) + if pathname[-1] == '/': + raise ValueError("path '%s' cannot end with '/'" % pathname) + + paths = pathname.split('/') + while os.curdir in paths: + paths.remove(os.curdir) + if not paths: + return os.curdir + return os.path.join(*paths) + + +class FileOperator(object): + def __init__(self, dry_run=False): + self.dry_run = dry_run + self.ensured = set() + self._init_record() + + def _init_record(self): + self.record = False + self.files_written = set() + self.dirs_created = set() + + def record_as_written(self, path): + if self.record: + self.files_written.add(path) + + def newer(self, source, target): + """Tell if the target is newer than the source. + + Returns true if 'source' exists and is more recently modified than + 'target', or if 'source' exists and 'target' doesn't. + + Returns false if both exist and 'target' is the same age or younger + than 'source'. Raise PackagingFileError if 'source' does not exist. + + Note that this test is not very accurate: files created in the same + second will have the same "age". + """ + if not os.path.exists(source): + raise DistlibException("file '%r' does not exist" % + os.path.abspath(source)) + if not os.path.exists(target): + return True + + return os.stat(source).st_mtime > os.stat(target).st_mtime + + def copy_file(self, infile, outfile, check=True): + """Copy a file respecting dry-run and force flags. + """ + self.ensure_dir(os.path.dirname(outfile)) + logger.info('Copying %s to %s', infile, outfile) + if not self.dry_run: + msg = None + if check: + if os.path.islink(outfile): + msg = '%s is a symlink' % outfile + elif os.path.exists(outfile) and not os.path.isfile(outfile): + msg = '%s is a non-regular file' % outfile + if msg: + raise ValueError(msg + ' which would be overwritten') + shutil.copyfile(infile, outfile) + self.record_as_written(outfile) + + def copy_stream(self, instream, outfile, encoding=None): + assert not os.path.isdir(outfile) + self.ensure_dir(os.path.dirname(outfile)) + logger.info('Copying stream %s to %s', instream, outfile) + if not self.dry_run: + if encoding is None: + outstream = open(outfile, 'wb') + else: + outstream = codecs.open(outfile, 'w', encoding=encoding) + try: + shutil.copyfileobj(instream, outstream) + finally: + outstream.close() + self.record_as_written(outfile) + + def write_binary_file(self, path, data): + self.ensure_dir(os.path.dirname(path)) + if not self.dry_run: + if os.path.exists(path): + os.remove(path) + with open(path, 'wb') as f: + f.write(data) + self.record_as_written(path) + + def write_text_file(self, path, data, encoding): + self.write_binary_file(path, data.encode(encoding)) + + def set_mode(self, bits, mask, files): + if os.name == 'posix' or (os.name == 'java' and os._name == 'posix'): + # Set the executable bits (owner, group, and world) on + # all the files specified. + for f in files: + if self.dry_run: + logger.info("changing mode of %s", f) + else: + mode = (os.stat(f).st_mode | bits) & mask + logger.info("changing mode of %s to %o", f, mode) + os.chmod(f, mode) + + set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f) + + def ensure_dir(self, path): + path = os.path.abspath(path) + if path not in self.ensured and not os.path.exists(path): + self.ensured.add(path) + d, f = os.path.split(path) + self.ensure_dir(d) + logger.info('Creating %s' % path) + if not self.dry_run: + os.mkdir(path) + if self.record: + self.dirs_created.add(path) + + def byte_compile(self, path, optimize=False, force=False, prefix=None, hashed_invalidation=False): + dpath = cache_from_source(path, not optimize) + logger.info('Byte-compiling %s to %s', path, dpath) + if not self.dry_run: + if force or self.newer(path, dpath): + if not prefix: + diagpath = None + else: + assert path.startswith(prefix) + diagpath = path[len(prefix):] + compile_kwargs = {} + if hashed_invalidation and hasattr(py_compile, 'PycInvalidationMode'): + compile_kwargs['invalidation_mode'] = py_compile.PycInvalidationMode.CHECKED_HASH + py_compile.compile(path, dpath, diagpath, True, **compile_kwargs) # raise error + self.record_as_written(dpath) + return dpath + + def ensure_removed(self, path): + if os.path.exists(path): + if os.path.isdir(path) and not os.path.islink(path): + logger.debug('Removing directory tree at %s', path) + if not self.dry_run: + shutil.rmtree(path) + if self.record: + if path in self.dirs_created: + self.dirs_created.remove(path) + else: + if os.path.islink(path): + s = 'link' + else: + s = 'file' + logger.debug('Removing %s %s', s, path) + if not self.dry_run: + os.remove(path) + if self.record: + if path in self.files_written: + self.files_written.remove(path) + + def is_writable(self, path): + result = False + while not result: + if os.path.exists(path): + result = os.access(path, os.W_OK) + break + parent = os.path.dirname(path) + if parent == path: + break + path = parent + return result + + def commit(self): + """ + Commit recorded changes, turn off recording, return + changes. + """ + assert self.record + result = self.files_written, self.dirs_created + self._init_record() + return result + + def rollback(self): + if not self.dry_run: + for f in list(self.files_written): + if os.path.exists(f): + os.remove(f) + # dirs should all be empty now, except perhaps for + # __pycache__ subdirs + # reverse so that subdirs appear before their parents + dirs = sorted(self.dirs_created, reverse=True) + for d in dirs: + flist = os.listdir(d) + if flist: + assert flist == ['__pycache__'] + sd = os.path.join(d, flist[0]) + os.rmdir(sd) + os.rmdir(d) # should fail if non-empty + self._init_record() + +def resolve(module_name, dotted_path): + if module_name in sys.modules: + mod = sys.modules[module_name] + else: + mod = __import__(module_name) + if dotted_path is None: + result = mod + else: + parts = dotted_path.split('.') + result = getattr(mod, parts.pop(0)) + for p in parts: + result = getattr(result, p) + return result + + +class ExportEntry(object): + def __init__(self, name, prefix, suffix, flags): + self.name = name + self.prefix = prefix + self.suffix = suffix + self.flags = flags + + @cached_property + def value(self): + return resolve(self.prefix, self.suffix) + + def __repr__(self): # pragma: no cover + return '<ExportEntry %s = %s:%s %s>' % (self.name, self.prefix, + self.suffix, self.flags) + + def __eq__(self, other): + if not isinstance(other, ExportEntry): + result = False + else: + result = (self.name == other.name and + self.prefix == other.prefix and + self.suffix == other.suffix and + self.flags == other.flags) + return result + + __hash__ = object.__hash__ + + +ENTRY_RE = re.compile(r'''(?P<name>(\w|[-.+])+) + \s*=\s*(?P<callable>(\w+)([:\.]\w+)*) + \s*(\[\s*(?P<flags>\w+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])? + ''', re.VERBOSE) + +def get_export_entry(specification): + m = ENTRY_RE.search(specification) + if not m: + result = None + if '[' in specification or ']' in specification: + raise DistlibException("Invalid specification " + "'%s'" % specification) + else: + d = m.groupdict() + name = d['name'] + path = d['callable'] + colons = path.count(':') + if colons == 0: + prefix, suffix = path, None + else: + if colons != 1: + raise DistlibException("Invalid specification " + "'%s'" % specification) + prefix, suffix = path.split(':') + flags = d['flags'] + if flags is None: + if '[' in specification or ']' in specification: + raise DistlibException("Invalid specification " + "'%s'" % specification) + flags = [] + else: + flags = [f.strip() for f in flags.split(',')] + result = ExportEntry(name, prefix, suffix, flags) + return result + + +def get_cache_base(suffix=None): + """ + Return the default base location for distlib caches. If the directory does + not exist, it is created. Use the suffix provided for the base directory, + and default to '.distlib' if it isn't provided. + + On Windows, if LOCALAPPDATA is defined in the environment, then it is + assumed to be a directory, and will be the parent directory of the result. + On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home + directory - using os.expanduser('~') - will be the parent directory of + the result. + + The result is just the directory '.distlib' in the parent directory as + determined above, or with the name specified with ``suffix``. + """ + if suffix is None: + suffix = '.distlib' + if os.name == 'nt' and 'LOCALAPPDATA' in os.environ: + result = os.path.expandvars('$localappdata') + else: + # Assume posix, or old Windows + result = os.path.expanduser('~') + # we use 'isdir' instead of 'exists', because we want to + # fail if there's a file with that name + if os.path.isdir(result): + usable = os.access(result, os.W_OK) + if not usable: + logger.warning('Directory exists but is not writable: %s', result) + else: + try: + os.makedirs(result) + usable = True + except OSError: + logger.warning('Unable to create %s', result, exc_info=True) + usable = False + if not usable: + result = tempfile.mkdtemp() + logger.warning('Default location unusable, using %s', result) + return os.path.join(result, suffix) + + +def path_to_cache_dir(path): + """ + Convert an absolute path to a directory name for use in a cache. + + The algorithm used is: + + #. On Windows, any ``':'`` in the drive is replaced with ``'---'``. + #. Any occurrence of ``os.sep`` is replaced with ``'--'``. + #. ``'.cache'`` is appended. + """ + d, p = os.path.splitdrive(os.path.abspath(path)) + if d: + d = d.replace(':', '---') + p = p.replace(os.sep, '--') + return d + p + '.cache' + + +def ensure_slash(s): + if not s.endswith('/'): + return s + '/' + return s + + +def parse_credentials(netloc): + username = password = None + if '@' in netloc: + prefix, netloc = netloc.split('@', 1) + if ':' not in prefix: + username = prefix + else: + username, password = prefix.split(':', 1) + return username, password, netloc + + +def get_process_umask(): + result = os.umask(0o22) + os.umask(result) + return result + +def is_string_sequence(seq): + result = True + i = None + for i, s in enumerate(seq): + if not isinstance(s, string_types): + result = False + break + assert i is not None + return result + +PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-' + '([a-z0-9_.+-]+)', re.I) +PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)') + + +def split_filename(filename, project_name=None): + """ + Extract name, version, python version from a filename (no extension) + + Return name, version, pyver or None + """ + result = None + pyver = None + filename = unquote(filename).replace(' ', '-') + m = PYTHON_VERSION.search(filename) + if m: + pyver = m.group(1) + filename = filename[:m.start()] + if project_name and len(filename) > len(project_name) + 1: + m = re.match(re.escape(project_name) + r'\b', filename) + if m: + n = m.end() + result = filename[:n], filename[n + 1:], pyver + if result is None: + m = PROJECT_NAME_AND_VERSION.match(filename) + if m: + result = m.group(1), m.group(3), pyver + return result + +# Allow spaces in name because of legacy dists like "Twisted Core" +NAME_VERSION_RE = re.compile(r'(?P<name>[\w .-]+)\s*' + r'\(\s*(?P<ver>[^\s)]+)\)$') + +def parse_name_and_version(p): + """ + A utility method used to get name and version from a string. + + From e.g. a Provides-Dist value. + + :param p: A value in a form 'foo (1.0)' + :return: The name and version as a tuple. + """ + m = NAME_VERSION_RE.match(p) + if not m: + raise DistlibException('Ill-formed name/version string: \'%s\'' % p) + d = m.groupdict() + return d['name'].strip().lower(), d['ver'] + +def get_extras(requested, available): + result = set() + requested = set(requested or []) + available = set(available or []) + if '*' in requested: + requested.remove('*') + result |= available + for r in requested: + if r == '-': + result.add(r) + elif r.startswith('-'): + unwanted = r[1:] + if unwanted not in available: + logger.warning('undeclared extra: %s' % unwanted) + if unwanted in result: + result.remove(unwanted) + else: + if r not in available: + logger.warning('undeclared extra: %s' % r) + result.add(r) + return result +# +# Extended metadata functionality +# + +def _get_external_data(url): + result = {} + try: + # urlopen might fail if it runs into redirections, + # because of Python issue #13696. Fixed in locators + # using a custom redirect handler. + resp = urlopen(url) + headers = resp.info() + ct = headers.get('Content-Type') + if not ct.startswith('application/json'): + logger.debug('Unexpected response for JSON request: %s', ct) + else: + reader = codecs.getreader('utf-8')(resp) + #data = reader.read().decode('utf-8') + #result = json.loads(data) + result = json.load(reader) + except Exception as e: + logger.exception('Failed to get external data for %s: %s', url, e) + return result + +_external_data_base_url = 'https://www.red-dove.com/pypi/projects/' + +def get_project_data(name): + url = '%s/%s/project.json' % (name[0].upper(), name) + url = urljoin(_external_data_base_url, url) + result = _get_external_data(url) + return result + +def get_package_data(name, version): + url = '%s/%s/package-%s.json' % (name[0].upper(), name, version) + url = urljoin(_external_data_base_url, url) + return _get_external_data(url) + + +class Cache(object): + """ + A class implementing a cache for resources that need to live in the file system + e.g. shared libraries. This class was moved from resources to here because it + could be used by other modules, e.g. the wheel module. + """ + + def __init__(self, base): + """ + Initialise an instance. + + :param base: The base directory where the cache should be located. + """ + # we use 'isdir' instead of 'exists', because we want to + # fail if there's a file with that name + if not os.path.isdir(base): # pragma: no cover + os.makedirs(base) + if (os.stat(base).st_mode & 0o77) != 0: + logger.warning('Directory \'%s\' is not private', base) + self.base = os.path.abspath(os.path.normpath(base)) + + def prefix_to_dir(self, prefix): + """ + Converts a resource prefix to a directory name in the cache. + """ + return path_to_cache_dir(prefix) + + def clear(self): + """ + Clear the cache. + """ + not_removed = [] + for fn in os.listdir(self.base): + fn = os.path.join(self.base, fn) + try: + if os.path.islink(fn) or os.path.isfile(fn): + os.remove(fn) + elif os.path.isdir(fn): + shutil.rmtree(fn) + except Exception: + not_removed.append(fn) + return not_removed + + +class EventMixin(object): + """ + A very simple publish/subscribe system. + """ + def __init__(self): + self._subscribers = {} + + def add(self, event, subscriber, append=True): + """ + Add a subscriber for an event. + + :param event: The name of an event. + :param subscriber: The subscriber to be added (and called when the + event is published). + :param append: Whether to append or prepend the subscriber to an + existing subscriber list for the event. + """ + subs = self._subscribers + if event not in subs: + subs[event] = deque([subscriber]) + else: + sq = subs[event] + if append: + sq.append(subscriber) + else: + sq.appendleft(subscriber) + + def remove(self, event, subscriber): + """ + Remove a subscriber for an event. + + :param event: The name of an event. + :param subscriber: The subscriber to be removed. + """ + subs = self._subscribers + if event not in subs: + raise ValueError('No subscribers: %r' % event) + subs[event].remove(subscriber) + + def get_subscribers(self, event): + """ + Return an iterator for the subscribers for an event. + :param event: The event to return subscribers for. + """ + return iter(self._subscribers.get(event, ())) + + def publish(self, event, *args, **kwargs): + """ + Publish a event and return a list of values returned by its + subscribers. + + :param event: The event to publish. + :param args: The positional arguments to pass to the event's + subscribers. + :param kwargs: The keyword arguments to pass to the event's + subscribers. + """ + result = [] + for subscriber in self.get_subscribers(event): + try: + value = subscriber(event, *args, **kwargs) + except Exception: + logger.exception('Exception during event publication') + value = None + result.append(value) + logger.debug('publish %s: args = %s, kwargs = %s, result = %s', + event, args, kwargs, result) + return result + +# +# Simple sequencing +# +class Sequencer(object): + def __init__(self): + self._preds = {} + self._succs = {} + self._nodes = set() # nodes with no preds/succs + + def add_node(self, node): + self._nodes.add(node) + + def remove_node(self, node, edges=False): + if node in self._nodes: + self._nodes.remove(node) + if edges: + for p in set(self._preds.get(node, ())): + self.remove(p, node) + for s in set(self._succs.get(node, ())): + self.remove(node, s) + # Remove empties + for k, v in list(self._preds.items()): + if not v: + del self._preds[k] + for k, v in list(self._succs.items()): + if not v: + del self._succs[k] + + def add(self, pred, succ): + assert pred != succ + self._preds.setdefault(succ, set()).add(pred) + self._succs.setdefault(pred, set()).add(succ) + + def remove(self, pred, succ): + assert pred != succ + try: + preds = self._preds[succ] + succs = self._succs[pred] + except KeyError: # pragma: no cover + raise ValueError('%r not a successor of anything' % succ) + try: + preds.remove(pred) + succs.remove(succ) + except KeyError: # pragma: no cover + raise ValueError('%r not a successor of %r' % (succ, pred)) + + def is_step(self, step): + return (step in self._preds or step in self._succs or + step in self._nodes) + + def get_steps(self, final): + if not self.is_step(final): + raise ValueError('Unknown: %r' % final) + result = [] + todo = [] + seen = set() + todo.append(final) + while todo: + step = todo.pop(0) + if step in seen: + # if a step was already seen, + # move it to the end (so it will appear earlier + # when reversed on return) ... but not for the + # final step, as that would be confusing for + # users + if step != final: + result.remove(step) + result.append(step) + else: + seen.add(step) + result.append(step) + preds = self._preds.get(step, ()) + todo.extend(preds) + return reversed(result) + + @property + def strong_connections(self): + #http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm + index_counter = [0] + stack = [] + lowlinks = {} + index = {} + result = [] + + graph = self._succs + + def strongconnect(node): + # set the depth index for this node to the smallest unused index + index[node] = index_counter[0] + lowlinks[node] = index_counter[0] + index_counter[0] += 1 + stack.append(node) + + # Consider successors + try: + successors = graph[node] + except Exception: + successors = [] + for successor in successors: + if successor not in lowlinks: + # Successor has not yet been visited + strongconnect(successor) + lowlinks[node] = min(lowlinks[node],lowlinks[successor]) + elif successor in stack: + # the successor is in the stack and hence in the current + # strongly connected component (SCC) + lowlinks[node] = min(lowlinks[node],index[successor]) + + # If `node` is a root node, pop the stack and generate an SCC + if lowlinks[node] == index[node]: + connected_component = [] + + while True: + successor = stack.pop() + connected_component.append(successor) + if successor == node: break + component = tuple(connected_component) + # storing the result + result.append(component) + + for node in graph: + if node not in lowlinks: + strongconnect(node) + + return result + + @property + def dot(self): + result = ['digraph G {'] + for succ in self._preds: + preds = self._preds[succ] + for pred in preds: + result.append(' %s -> %s;' % (pred, succ)) + for node in self._nodes: + result.append(' %s;' % node) + result.append('}') + return '\n'.join(result) + +# +# Unarchiving functionality for zip, tar, tgz, tbz, whl +# + +ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip', + '.tgz', '.tbz', '.whl') + +def unarchive(archive_filename, dest_dir, format=None, check=True): + + def check_path(path): + if not isinstance(path, text_type): + path = path.decode('utf-8') + p = os.path.abspath(os.path.join(dest_dir, path)) + if not p.startswith(dest_dir) or p[plen] != os.sep: + raise ValueError('path outside destination: %r' % p) + + dest_dir = os.path.abspath(dest_dir) + plen = len(dest_dir) + archive = None + if format is None: + if archive_filename.endswith(('.zip', '.whl')): + format = 'zip' + elif archive_filename.endswith(('.tar.gz', '.tgz')): + format = 'tgz' + mode = 'r:gz' + elif archive_filename.endswith(('.tar.bz2', '.tbz')): + format = 'tbz' + mode = 'r:bz2' + elif archive_filename.endswith('.tar'): + format = 'tar' + mode = 'r' + else: # pragma: no cover + raise ValueError('Unknown format for %r' % archive_filename) + try: + if format == 'zip': + archive = ZipFile(archive_filename, 'r') + if check: + names = archive.namelist() + for name in names: + check_path(name) + else: + archive = tarfile.open(archive_filename, mode) + if check: + names = archive.getnames() + for name in names: + check_path(name) + if format != 'zip' and sys.version_info[0] < 3: + # See Python issue 17153. If the dest path contains Unicode, + # tarfile extraction fails on Python 2.x if a member path name + # contains non-ASCII characters - it leads to an implicit + # bytes -> unicode conversion using ASCII to decode. + for tarinfo in archive.getmembers(): + if not isinstance(tarinfo.name, text_type): + tarinfo.name = tarinfo.name.decode('utf-8') + archive.extractall(dest_dir) + + finally: + if archive: + archive.close() + + +def zip_dir(directory): + """zip a directory tree into a BytesIO object""" + result = io.BytesIO() + dlen = len(directory) + with ZipFile(result, "w") as zf: + for root, dirs, files in os.walk(directory): + for name in files: + full = os.path.join(root, name) + rel = root[dlen:] + dest = os.path.join(rel, name) + zf.write(full, dest) + return result + +# +# Simple progress bar +# + +UNITS = ('', 'K', 'M', 'G','T','P') + + +class Progress(object): + unknown = 'UNKNOWN' + + def __init__(self, minval=0, maxval=100): + assert maxval is None or maxval >= minval + self.min = self.cur = minval + self.max = maxval + self.started = None + self.elapsed = 0 + self.done = False + + def update(self, curval): + assert self.min <= curval + assert self.max is None or curval <= self.max + self.cur = curval + now = time.time() + if self.started is None: + self.started = now + else: + self.elapsed = now - self.started + + def increment(self, incr): + assert incr >= 0 + self.update(self.cur + incr) + + def start(self): + self.update(self.min) + return self + + def stop(self): + if self.max is not None: + self.update(self.max) + self.done = True + + @property + def maximum(self): + return self.unknown if self.max is None else self.max + + @property + def percentage(self): + if self.done: + result = '100 %' + elif self.max is None: + result = ' ?? %' + else: + v = 100.0 * (self.cur - self.min) / (self.max - self.min) + result = '%3d %%' % v + return result + + def format_duration(self, duration): + if (duration <= 0) and self.max is None or self.cur == self.min: + result = '??:??:??' + #elif duration < 1: + # result = '--:--:--' + else: + result = time.strftime('%H:%M:%S', time.gmtime(duration)) + return result + + @property + def ETA(self): + if self.done: + prefix = 'Done' + t = self.elapsed + #import pdb; pdb.set_trace() + else: + prefix = 'ETA ' + if self.max is None: + t = -1 + elif self.elapsed == 0 or (self.cur == self.min): + t = 0 + else: + #import pdb; pdb.set_trace() + t = float(self.max - self.min) + t /= self.cur - self.min + t = (t - 1) * self.elapsed + return '%s: %s' % (prefix, self.format_duration(t)) + + @property + def speed(self): + if self.elapsed == 0: + result = 0.0 + else: + result = (self.cur - self.min) / self.elapsed + for unit in UNITS: + if result < 1000: + break + result /= 1000.0 + return '%d %sB/s' % (result, unit) + +# +# Glob functionality +# + +RICH_GLOB = re.compile(r'\{([^}]*)\}') +_CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]') +_CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$') + + +def iglob(path_glob): + """Extended globbing function that supports ** and {opt1,opt2,opt3}.""" + if _CHECK_RECURSIVE_GLOB.search(path_glob): + msg = """invalid glob %r: recursive glob "**" must be used alone""" + raise ValueError(msg % path_glob) + if _CHECK_MISMATCH_SET.search(path_glob): + msg = """invalid glob %r: mismatching set marker '{' or '}'""" + raise ValueError(msg % path_glob) + return _iglob(path_glob) + + +def _iglob(path_glob): + rich_path_glob = RICH_GLOB.split(path_glob, 1) + if len(rich_path_glob) > 1: + assert len(rich_path_glob) == 3, rich_path_glob + prefix, set, suffix = rich_path_glob + for item in set.split(','): + for path in _iglob(''.join((prefix, item, suffix))): + yield path + else: + if '**' not in path_glob: + for item in std_iglob(path_glob): + yield item + else: + prefix, radical = path_glob.split('**', 1) + if prefix == '': + prefix = '.' + if radical == '': + radical = '*' + else: + # we support both + radical = radical.lstrip('/') + radical = radical.lstrip('\\') + for path, dir, files in os.walk(prefix): + path = os.path.normpath(path) + for fn in _iglob(os.path.join(path, radical)): + yield fn + +if ssl: + from .compat import (HTTPSHandler as BaseHTTPSHandler, match_hostname, + CertificateError) + + +# +# HTTPSConnection which verifies certificates/matches domains +# + + class HTTPSConnection(httplib.HTTPSConnection): + ca_certs = None # set this to the path to the certs file (.pem) + check_domain = True # only used if ca_certs is not None + + # noinspection PyPropertyAccess + def connect(self): + sock = socket.create_connection((self.host, self.port), self.timeout) + if getattr(self, '_tunnel_host', False): + self.sock = sock + self._tunnel() + + if not hasattr(ssl, 'SSLContext'): + # For 2.x + if self.ca_certs: + cert_reqs = ssl.CERT_REQUIRED + else: + cert_reqs = ssl.CERT_NONE + self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, + cert_reqs=cert_reqs, + ssl_version=ssl.PROTOCOL_SSLv23, + ca_certs=self.ca_certs) + else: # pragma: no cover + context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) + context.options |= ssl.OP_NO_SSLv2 + if self.cert_file: + context.load_cert_chain(self.cert_file, self.key_file) + kwargs = {} + if self.ca_certs: + context.verify_mode = ssl.CERT_REQUIRED + context.load_verify_locations(cafile=self.ca_certs) + if getattr(ssl, 'HAS_SNI', False): + kwargs['server_hostname'] = self.host + self.sock = context.wrap_socket(sock, **kwargs) + if self.ca_certs and self.check_domain: + try: + match_hostname(self.sock.getpeercert(), self.host) + logger.debug('Host verified: %s', self.host) + except CertificateError: # pragma: no cover + self.sock.shutdown(socket.SHUT_RDWR) + self.sock.close() + raise + + class HTTPSHandler(BaseHTTPSHandler): + def __init__(self, ca_certs, check_domain=True): + BaseHTTPSHandler.__init__(self) + self.ca_certs = ca_certs + self.check_domain = check_domain + + def _conn_maker(self, *args, **kwargs): + """ + This is called to create a connection instance. Normally you'd + pass a connection class to do_open, but it doesn't actually check for + a class, and just expects a callable. As long as we behave just as a + constructor would have, we should be OK. If it ever changes so that + we *must* pass a class, we'll create an UnsafeHTTPSConnection class + which just sets check_domain to False in the class definition, and + choose which one to pass to do_open. + """ + result = HTTPSConnection(*args, **kwargs) + if self.ca_certs: + result.ca_certs = self.ca_certs + result.check_domain = self.check_domain + return result + + def https_open(self, req): + try: + return self.do_open(self._conn_maker, req) + except URLError as e: + if 'certificate verify failed' in str(e.reason): + raise CertificateError('Unable to verify server certificate ' + 'for %s' % req.host) + else: + raise + + # + # To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The- + # Middle proxy using HTTP listens on port 443, or an index mistakenly serves + # HTML containing a http://xyz link when it should be https://xyz), + # you can use the following handler class, which does not allow HTTP traffic. + # + # It works by inheriting from HTTPHandler - so build_opener won't add a + # handler for HTTP itself. + # + class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler): + def http_open(self, req): + raise URLError('Unexpected HTTP request on what should be a secure ' + 'connection: %s' % req) + +# +# XML-RPC with timeouts +# + +_ver_info = sys.version_info[:2] + +if _ver_info == (2, 6): + class HTTP(httplib.HTTP): + def __init__(self, host='', port=None, **kwargs): + if port == 0: # 0 means use port 0, not the default port + port = None + self._setup(self._connection_class(host, port, **kwargs)) + + + if ssl: + class HTTPS(httplib.HTTPS): + def __init__(self, host='', port=None, **kwargs): + if port == 0: # 0 means use port 0, not the default port + port = None + self._setup(self._connection_class(host, port, **kwargs)) + + +class Transport(xmlrpclib.Transport): + def __init__(self, timeout, use_datetime=0): + self.timeout = timeout + xmlrpclib.Transport.__init__(self, use_datetime) + + def make_connection(self, host): + h, eh, x509 = self.get_host_info(host) + if _ver_info == (2, 6): + result = HTTP(h, timeout=self.timeout) + else: + if not self._connection or host != self._connection[0]: + self._extra_headers = eh + self._connection = host, httplib.HTTPConnection(h) + result = self._connection[1] + return result + +if ssl: + class SafeTransport(xmlrpclib.SafeTransport): + def __init__(self, timeout, use_datetime=0): + self.timeout = timeout + xmlrpclib.SafeTransport.__init__(self, use_datetime) + + def make_connection(self, host): + h, eh, kwargs = self.get_host_info(host) + if not kwargs: + kwargs = {} + kwargs['timeout'] = self.timeout + if _ver_info == (2, 6): + result = HTTPS(host, None, **kwargs) + else: + if not self._connection or host != self._connection[0]: + self._extra_headers = eh + self._connection = host, httplib.HTTPSConnection(h, None, + **kwargs) + result = self._connection[1] + return result + + +class ServerProxy(xmlrpclib.ServerProxy): + def __init__(self, uri, **kwargs): + self.timeout = timeout = kwargs.pop('timeout', None) + # The above classes only come into play if a timeout + # is specified + if timeout is not None: + scheme, _ = splittype(uri) + use_datetime = kwargs.get('use_datetime', 0) + if scheme == 'https': + tcls = SafeTransport + else: + tcls = Transport + kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime) + self.transport = t + xmlrpclib.ServerProxy.__init__(self, uri, **kwargs) + +# +# CSV functionality. This is provided because on 2.x, the csv module can't +# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files. +# + +def _csv_open(fn, mode, **kwargs): + if sys.version_info[0] < 3: + mode += 'b' + else: + kwargs['newline'] = '' + # Python 3 determines encoding from locale. Force 'utf-8' + # file encoding to match other forced utf-8 encoding + kwargs['encoding'] = 'utf-8' + return open(fn, mode, **kwargs) + + +class CSVBase(object): + defaults = { + 'delimiter': str(','), # The strs are used because we need native + 'quotechar': str('"'), # str in the csv API (2.x won't take + 'lineterminator': str('\n') # Unicode) + } + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + self.stream.close() + + +class CSVReader(CSVBase): + def __init__(self, **kwargs): + if 'stream' in kwargs: + stream = kwargs['stream'] + if sys.version_info[0] >= 3: + # needs to be a text stream + stream = codecs.getreader('utf-8')(stream) + self.stream = stream + else: + self.stream = _csv_open(kwargs['path'], 'r') + self.reader = csv.reader(self.stream, **self.defaults) + + def __iter__(self): + return self + + def next(self): + result = next(self.reader) + if sys.version_info[0] < 3: + for i, item in enumerate(result): + if not isinstance(item, text_type): + result[i] = item.decode('utf-8') + return result + + __next__ = next + +class CSVWriter(CSVBase): + def __init__(self, fn, **kwargs): + self.stream = _csv_open(fn, 'w') + self.writer = csv.writer(self.stream, **self.defaults) + + def writerow(self, row): + if sys.version_info[0] < 3: + r = [] + for item in row: + if isinstance(item, text_type): + item = item.encode('utf-8') + r.append(item) + row = r + self.writer.writerow(row) + +# +# Configurator functionality +# + +class Configurator(BaseConfigurator): + + value_converters = dict(BaseConfigurator.value_converters) + value_converters['inc'] = 'inc_convert' + + def __init__(self, config, base=None): + super(Configurator, self).__init__(config) + self.base = base or os.getcwd() + + def configure_custom(self, config): + def convert(o): + if isinstance(o, (list, tuple)): + result = type(o)([convert(i) for i in o]) + elif isinstance(o, dict): + if '()' in o: + result = self.configure_custom(o) + else: + result = {} + for k in o: + result[k] = convert(o[k]) + else: + result = self.convert(o) + return result + + c = config.pop('()') + if not callable(c): + c = self.resolve(c) + props = config.pop('.', None) + # Check for valid identifiers + args = config.pop('[]', ()) + if args: + args = tuple([convert(o) for o in args]) + items = [(k, convert(config[k])) for k in config if valid_ident(k)] + kwargs = dict(items) + result = c(*args, **kwargs) + if props: + for n, v in props.items(): + setattr(result, n, convert(v)) + return result + + def __getitem__(self, key): + result = self.config[key] + if isinstance(result, dict) and '()' in result: + self.config[key] = result = self.configure_custom(result) + return result + + def inc_convert(self, value): + """Default converter for the inc:// protocol.""" + if not os.path.isabs(value): + value = os.path.join(self.base, value) + with codecs.open(value, 'r', encoding='utf-8') as f: + result = json.load(f) + return result + + +class SubprocessMixin(object): + """ + Mixin for running subprocesses and capturing their output + """ + def __init__(self, verbose=False, progress=None): + self.verbose = verbose + self.progress = progress + + def reader(self, stream, context): + """ + Read lines from a subprocess' output stream and either pass to a progress + callable (if specified) or write progress information to sys.stderr. + """ + progress = self.progress + verbose = self.verbose + while True: + s = stream.readline() + if not s: + break + if progress is not None: + progress(s, context) + else: + if not verbose: + sys.stderr.write('.') + else: + sys.stderr.write(s.decode('utf-8')) + sys.stderr.flush() + stream.close() + + def run_command(self, cmd, **kwargs): + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, **kwargs) + t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout')) + t1.start() + t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr')) + t2.start() + p.wait() + t1.join() + t2.join() + if self.progress is not None: + self.progress('done.', 'main') + elif self.verbose: + sys.stderr.write('done.\n') + return p + + +def normalize_name(name): + """Normalize a python package name a la PEP 503""" + # https://www.python.org/dev/peps/pep-0503/#normalized-names + return re.sub('[-_.]+', '-', name).lower() diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyc new file mode 100644 index 0000000..2cc6237 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/util.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/version.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/version.py new file mode 100644 index 0000000..3eebe18 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/version.py @@ -0,0 +1,736 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012-2017 The Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +""" +Implementation of a flexible versioning scheme providing support for PEP-440, +setuptools-compatible and semantic versioning. +""" + +import logging +import re + +from .compat import string_types +from .util import parse_requirement + +__all__ = ['NormalizedVersion', 'NormalizedMatcher', + 'LegacyVersion', 'LegacyMatcher', + 'SemanticVersion', 'SemanticMatcher', + 'UnsupportedVersionError', 'get_scheme'] + +logger = logging.getLogger(__name__) + + +class UnsupportedVersionError(ValueError): + """This is an unsupported version.""" + pass + + +class Version(object): + def __init__(self, s): + self._string = s = s.strip() + self._parts = parts = self.parse(s) + assert isinstance(parts, tuple) + assert len(parts) > 0 + + def parse(self, s): + raise NotImplementedError('please implement in a subclass') + + def _check_compatible(self, other): + if type(self) != type(other): + raise TypeError('cannot compare %r and %r' % (self, other)) + + def __eq__(self, other): + self._check_compatible(other) + return self._parts == other._parts + + def __ne__(self, other): + return not self.__eq__(other) + + def __lt__(self, other): + self._check_compatible(other) + return self._parts < other._parts + + def __gt__(self, other): + return not (self.__lt__(other) or self.__eq__(other)) + + def __le__(self, other): + return self.__lt__(other) or self.__eq__(other) + + def __ge__(self, other): + return self.__gt__(other) or self.__eq__(other) + + # See http://docs.python.org/reference/datamodel#object.__hash__ + def __hash__(self): + return hash(self._parts) + + def __repr__(self): + return "%s('%s')" % (self.__class__.__name__, self._string) + + def __str__(self): + return self._string + + @property + def is_prerelease(self): + raise NotImplementedError('Please implement in subclasses.') + + +class Matcher(object): + version_class = None + + # value is either a callable or the name of a method + _operators = { + '<': lambda v, c, p: v < c, + '>': lambda v, c, p: v > c, + '<=': lambda v, c, p: v == c or v < c, + '>=': lambda v, c, p: v == c or v > c, + '==': lambda v, c, p: v == c, + '===': lambda v, c, p: v == c, + # by default, compatible => >=. + '~=': lambda v, c, p: v == c or v > c, + '!=': lambda v, c, p: v != c, + } + + # this is a method only to support alternative implementations + # via overriding + def parse_requirement(self, s): + return parse_requirement(s) + + def __init__(self, s): + if self.version_class is None: + raise ValueError('Please specify a version class') + self._string = s = s.strip() + r = self.parse_requirement(s) + if not r: + raise ValueError('Not valid: %r' % s) + self.name = r.name + self.key = self.name.lower() # for case-insensitive comparisons + clist = [] + if r.constraints: + # import pdb; pdb.set_trace() + for op, s in r.constraints: + if s.endswith('.*'): + if op not in ('==', '!='): + raise ValueError('\'.*\' not allowed for ' + '%r constraints' % op) + # Could be a partial version (e.g. for '2.*') which + # won't parse as a version, so keep it as a string + vn, prefix = s[:-2], True + # Just to check that vn is a valid version + self.version_class(vn) + else: + # Should parse as a version, so we can create an + # instance for the comparison + vn, prefix = self.version_class(s), False + clist.append((op, vn, prefix)) + self._parts = tuple(clist) + + def match(self, version): + """ + Check if the provided version matches the constraints. + + :param version: The version to match against this instance. + :type version: String or :class:`Version` instance. + """ + if isinstance(version, string_types): + version = self.version_class(version) + for operator, constraint, prefix in self._parts: + f = self._operators.get(operator) + if isinstance(f, string_types): + f = getattr(self, f) + if not f: + msg = ('%r not implemented ' + 'for %s' % (operator, self.__class__.__name__)) + raise NotImplementedError(msg) + if not f(version, constraint, prefix): + return False + return True + + @property + def exact_version(self): + result = None + if len(self._parts) == 1 and self._parts[0][0] in ('==', '==='): + result = self._parts[0][1] + return result + + def _check_compatible(self, other): + if type(self) != type(other) or self.name != other.name: + raise TypeError('cannot compare %s and %s' % (self, other)) + + def __eq__(self, other): + self._check_compatible(other) + return self.key == other.key and self._parts == other._parts + + def __ne__(self, other): + return not self.__eq__(other) + + # See http://docs.python.org/reference/datamodel#object.__hash__ + def __hash__(self): + return hash(self.key) + hash(self._parts) + + def __repr__(self): + return "%s(%r)" % (self.__class__.__name__, self._string) + + def __str__(self): + return self._string + + +PEP440_VERSION_RE = re.compile(r'^v?(\d+!)?(\d+(\.\d+)*)((a|b|c|rc)(\d+))?' + r'(\.(post)(\d+))?(\.(dev)(\d+))?' + r'(\+([a-zA-Z\d]+(\.[a-zA-Z\d]+)?))?$') + + +def _pep_440_key(s): + s = s.strip() + m = PEP440_VERSION_RE.match(s) + if not m: + raise UnsupportedVersionError('Not a valid version: %s' % s) + groups = m.groups() + nums = tuple(int(v) for v in groups[1].split('.')) + while len(nums) > 1 and nums[-1] == 0: + nums = nums[:-1] + + if not groups[0]: + epoch = 0 + else: + epoch = int(groups[0]) + pre = groups[4:6] + post = groups[7:9] + dev = groups[10:12] + local = groups[13] + if pre == (None, None): + pre = () + else: + pre = pre[0], int(pre[1]) + if post == (None, None): + post = () + else: + post = post[0], int(post[1]) + if dev == (None, None): + dev = () + else: + dev = dev[0], int(dev[1]) + if local is None: + local = () + else: + parts = [] + for part in local.split('.'): + # to ensure that numeric compares as > lexicographic, avoid + # comparing them directly, but encode a tuple which ensures + # correct sorting + if part.isdigit(): + part = (1, int(part)) + else: + part = (0, part) + parts.append(part) + local = tuple(parts) + if not pre: + # either before pre-release, or final release and after + if not post and dev: + # before pre-release + pre = ('a', -1) # to sort before a0 + else: + pre = ('z',) # to sort after all pre-releases + # now look at the state of post and dev. + if not post: + post = ('_',) # sort before 'a' + if not dev: + dev = ('final',) + + #print('%s -> %s' % (s, m.groups())) + return epoch, nums, pre, post, dev, local + + +_normalized_key = _pep_440_key + + +class NormalizedVersion(Version): + """A rational version. + + Good: + 1.2 # equivalent to "1.2.0" + 1.2.0 + 1.2a1 + 1.2.3a2 + 1.2.3b1 + 1.2.3c1 + 1.2.3.4 + TODO: fill this out + + Bad: + 1 # minimum two numbers + 1.2a # release level must have a release serial + 1.2.3b + """ + def parse(self, s): + result = _normalized_key(s) + # _normalized_key loses trailing zeroes in the release + # clause, since that's needed to ensure that X.Y == X.Y.0 == X.Y.0.0 + # However, PEP 440 prefix matching needs it: for example, + # (~= 1.4.5.0) matches differently to (~= 1.4.5.0.0). + m = PEP440_VERSION_RE.match(s) # must succeed + groups = m.groups() + self._release_clause = tuple(int(v) for v in groups[1].split('.')) + return result + + PREREL_TAGS = set(['a', 'b', 'c', 'rc', 'dev']) + + @property + def is_prerelease(self): + return any(t[0] in self.PREREL_TAGS for t in self._parts if t) + + +def _match_prefix(x, y): + x = str(x) + y = str(y) + if x == y: + return True + if not x.startswith(y): + return False + n = len(y) + return x[n] == '.' + + +class NormalizedMatcher(Matcher): + version_class = NormalizedVersion + + # value is either a callable or the name of a method + _operators = { + '~=': '_match_compatible', + '<': '_match_lt', + '>': '_match_gt', + '<=': '_match_le', + '>=': '_match_ge', + '==': '_match_eq', + '===': '_match_arbitrary', + '!=': '_match_ne', + } + + def _adjust_local(self, version, constraint, prefix): + if prefix: + strip_local = '+' not in constraint and version._parts[-1] + else: + # both constraint and version are + # NormalizedVersion instances. + # If constraint does not have a local component, + # ensure the version doesn't, either. + strip_local = not constraint._parts[-1] and version._parts[-1] + if strip_local: + s = version._string.split('+', 1)[0] + version = self.version_class(s) + return version, constraint + + def _match_lt(self, version, constraint, prefix): + version, constraint = self._adjust_local(version, constraint, prefix) + if version >= constraint: + return False + release_clause = constraint._release_clause + pfx = '.'.join([str(i) for i in release_clause]) + return not _match_prefix(version, pfx) + + def _match_gt(self, version, constraint, prefix): + version, constraint = self._adjust_local(version, constraint, prefix) + if version <= constraint: + return False + release_clause = constraint._release_clause + pfx = '.'.join([str(i) for i in release_clause]) + return not _match_prefix(version, pfx) + + def _match_le(self, version, constraint, prefix): + version, constraint = self._adjust_local(version, constraint, prefix) + return version <= constraint + + def _match_ge(self, version, constraint, prefix): + version, constraint = self._adjust_local(version, constraint, prefix) + return version >= constraint + + def _match_eq(self, version, constraint, prefix): + version, constraint = self._adjust_local(version, constraint, prefix) + if not prefix: + result = (version == constraint) + else: + result = _match_prefix(version, constraint) + return result + + def _match_arbitrary(self, version, constraint, prefix): + return str(version) == str(constraint) + + def _match_ne(self, version, constraint, prefix): + version, constraint = self._adjust_local(version, constraint, prefix) + if not prefix: + result = (version != constraint) + else: + result = not _match_prefix(version, constraint) + return result + + def _match_compatible(self, version, constraint, prefix): + version, constraint = self._adjust_local(version, constraint, prefix) + if version == constraint: + return True + if version < constraint: + return False +# if not prefix: +# return True + release_clause = constraint._release_clause + if len(release_clause) > 1: + release_clause = release_clause[:-1] + pfx = '.'.join([str(i) for i in release_clause]) + return _match_prefix(version, pfx) + +_REPLACEMENTS = ( + (re.compile('[.+-]$'), ''), # remove trailing puncts + (re.compile(r'^[.](\d)'), r'0.\1'), # .N -> 0.N at start + (re.compile('^[.-]'), ''), # remove leading puncts + (re.compile(r'^\((.*)\)$'), r'\1'), # remove parentheses + (re.compile(r'^v(ersion)?\s*(\d+)'), r'\2'), # remove leading v(ersion) + (re.compile(r'^r(ev)?\s*(\d+)'), r'\2'), # remove leading v(ersion) + (re.compile('[.]{2,}'), '.'), # multiple runs of '.' + (re.compile(r'\b(alfa|apha)\b'), 'alpha'), # misspelt alpha + (re.compile(r'\b(pre-alpha|prealpha)\b'), + 'pre.alpha'), # standardise + (re.compile(r'\(beta\)$'), 'beta'), # remove parentheses +) + +_SUFFIX_REPLACEMENTS = ( + (re.compile('^[:~._+-]+'), ''), # remove leading puncts + (re.compile('[,*")([\\]]'), ''), # remove unwanted chars + (re.compile('[~:+_ -]'), '.'), # replace illegal chars + (re.compile('[.]{2,}'), '.'), # multiple runs of '.' + (re.compile(r'\.$'), ''), # trailing '.' +) + +_NUMERIC_PREFIX = re.compile(r'(\d+(\.\d+)*)') + + +def _suggest_semantic_version(s): + """ + Try to suggest a semantic form for a version for which + _suggest_normalized_version couldn't come up with anything. + """ + result = s.strip().lower() + for pat, repl in _REPLACEMENTS: + result = pat.sub(repl, result) + if not result: + result = '0.0.0' + + # Now look for numeric prefix, and separate it out from + # the rest. + #import pdb; pdb.set_trace() + m = _NUMERIC_PREFIX.match(result) + if not m: + prefix = '0.0.0' + suffix = result + else: + prefix = m.groups()[0].split('.') + prefix = [int(i) for i in prefix] + while len(prefix) < 3: + prefix.append(0) + if len(prefix) == 3: + suffix = result[m.end():] + else: + suffix = '.'.join([str(i) for i in prefix[3:]]) + result[m.end():] + prefix = prefix[:3] + prefix = '.'.join([str(i) for i in prefix]) + suffix = suffix.strip() + if suffix: + #import pdb; pdb.set_trace() + # massage the suffix. + for pat, repl in _SUFFIX_REPLACEMENTS: + suffix = pat.sub(repl, suffix) + + if not suffix: + result = prefix + else: + sep = '-' if 'dev' in suffix else '+' + result = prefix + sep + suffix + if not is_semver(result): + result = None + return result + + +def _suggest_normalized_version(s): + """Suggest a normalized version close to the given version string. + + If you have a version string that isn't rational (i.e. NormalizedVersion + doesn't like it) then you might be able to get an equivalent (or close) + rational version from this function. + + This does a number of simple normalizations to the given string, based + on observation of versions currently in use on PyPI. Given a dump of + those version during PyCon 2009, 4287 of them: + - 2312 (53.93%) match NormalizedVersion without change + with the automatic suggestion + - 3474 (81.04%) match when using this suggestion method + + @param s {str} An irrational version string. + @returns A rational version string, or None, if couldn't determine one. + """ + try: + _normalized_key(s) + return s # already rational + except UnsupportedVersionError: + pass + + rs = s.lower() + + # part of this could use maketrans + for orig, repl in (('-alpha', 'a'), ('-beta', 'b'), ('alpha', 'a'), + ('beta', 'b'), ('rc', 'c'), ('-final', ''), + ('-pre', 'c'), + ('-release', ''), ('.release', ''), ('-stable', ''), + ('+', '.'), ('_', '.'), (' ', ''), ('.final', ''), + ('final', '')): + rs = rs.replace(orig, repl) + + # if something ends with dev or pre, we add a 0 + rs = re.sub(r"pre$", r"pre0", rs) + rs = re.sub(r"dev$", r"dev0", rs) + + # if we have something like "b-2" or "a.2" at the end of the + # version, that is probably beta, alpha, etc + # let's remove the dash or dot + rs = re.sub(r"([abc]|rc)[\-\.](\d+)$", r"\1\2", rs) + + # 1.0-dev-r371 -> 1.0.dev371 + # 0.1-dev-r79 -> 0.1.dev79 + rs = re.sub(r"[\-\.](dev)[\-\.]?r?(\d+)$", r".\1\2", rs) + + # Clean: 2.0.a.3, 2.0.b1, 0.9.0~c1 + rs = re.sub(r"[.~]?([abc])\.?", r"\1", rs) + + # Clean: v0.3, v1.0 + if rs.startswith('v'): + rs = rs[1:] + + # Clean leading '0's on numbers. + #TODO: unintended side-effect on, e.g., "2003.05.09" + # PyPI stats: 77 (~2%) better + rs = re.sub(r"\b0+(\d+)(?!\d)", r"\1", rs) + + # Clean a/b/c with no version. E.g. "1.0a" -> "1.0a0". Setuptools infers + # zero. + # PyPI stats: 245 (7.56%) better + rs = re.sub(r"(\d+[abc])$", r"\g<1>0", rs) + + # the 'dev-rNNN' tag is a dev tag + rs = re.sub(r"\.?(dev-r|dev\.r)\.?(\d+)$", r".dev\2", rs) + + # clean the - when used as a pre delimiter + rs = re.sub(r"-(a|b|c)(\d+)$", r"\1\2", rs) + + # a terminal "dev" or "devel" can be changed into ".dev0" + rs = re.sub(r"[\.\-](dev|devel)$", r".dev0", rs) + + # a terminal "dev" can be changed into ".dev0" + rs = re.sub(r"(?![\.\-])dev$", r".dev0", rs) + + # a terminal "final" or "stable" can be removed + rs = re.sub(r"(final|stable)$", "", rs) + + # The 'r' and the '-' tags are post release tags + # 0.4a1.r10 -> 0.4a1.post10 + # 0.9.33-17222 -> 0.9.33.post17222 + # 0.9.33-r17222 -> 0.9.33.post17222 + rs = re.sub(r"\.?(r|-|-r)\.?(\d+)$", r".post\2", rs) + + # Clean 'r' instead of 'dev' usage: + # 0.9.33+r17222 -> 0.9.33.dev17222 + # 1.0dev123 -> 1.0.dev123 + # 1.0.git123 -> 1.0.dev123 + # 1.0.bzr123 -> 1.0.dev123 + # 0.1a0dev.123 -> 0.1a0.dev123 + # PyPI stats: ~150 (~4%) better + rs = re.sub(r"\.?(dev|git|bzr)\.?(\d+)$", r".dev\2", rs) + + # Clean '.pre' (normalized from '-pre' above) instead of 'c' usage: + # 0.2.pre1 -> 0.2c1 + # 0.2-c1 -> 0.2c1 + # 1.0preview123 -> 1.0c123 + # PyPI stats: ~21 (0.62%) better + rs = re.sub(r"\.?(pre|preview|-c)(\d+)$", r"c\g<2>", rs) + + # Tcl/Tk uses "px" for their post release markers + rs = re.sub(r"p(\d+)$", r".post\1", rs) + + try: + _normalized_key(rs) + except UnsupportedVersionError: + rs = None + return rs + +# +# Legacy version processing (distribute-compatible) +# + +_VERSION_PART = re.compile(r'([a-z]+|\d+|[\.-])', re.I) +_VERSION_REPLACE = { + 'pre': 'c', + 'preview': 'c', + '-': 'final-', + 'rc': 'c', + 'dev': '@', + '': None, + '.': None, +} + + +def _legacy_key(s): + def get_parts(s): + result = [] + for p in _VERSION_PART.split(s.lower()): + p = _VERSION_REPLACE.get(p, p) + if p: + if '0' <= p[:1] <= '9': + p = p.zfill(8) + else: + p = '*' + p + result.append(p) + result.append('*final') + return result + + result = [] + for p in get_parts(s): + if p.startswith('*'): + if p < '*final': + while result and result[-1] == '*final-': + result.pop() + while result and result[-1] == '00000000': + result.pop() + result.append(p) + return tuple(result) + + +class LegacyVersion(Version): + def parse(self, s): + return _legacy_key(s) + + @property + def is_prerelease(self): + result = False + for x in self._parts: + if (isinstance(x, string_types) and x.startswith('*') and + x < '*final'): + result = True + break + return result + + +class LegacyMatcher(Matcher): + version_class = LegacyVersion + + _operators = dict(Matcher._operators) + _operators['~='] = '_match_compatible' + + numeric_re = re.compile(r'^(\d+(\.\d+)*)') + + def _match_compatible(self, version, constraint, prefix): + if version < constraint: + return False + m = self.numeric_re.match(str(constraint)) + if not m: + logger.warning('Cannot compute compatible match for version %s ' + ' and constraint %s', version, constraint) + return True + s = m.groups()[0] + if '.' in s: + s = s.rsplit('.', 1)[0] + return _match_prefix(version, s) + +# +# Semantic versioning +# + +_SEMVER_RE = re.compile(r'^(\d+)\.(\d+)\.(\d+)' + r'(-[a-z0-9]+(\.[a-z0-9-]+)*)?' + r'(\+[a-z0-9]+(\.[a-z0-9-]+)*)?$', re.I) + + +def is_semver(s): + return _SEMVER_RE.match(s) + + +def _semantic_key(s): + def make_tuple(s, absent): + if s is None: + result = (absent,) + else: + parts = s[1:].split('.') + # We can't compare ints and strings on Python 3, so fudge it + # by zero-filling numeric values so simulate a numeric comparison + result = tuple([p.zfill(8) if p.isdigit() else p for p in parts]) + return result + + m = is_semver(s) + if not m: + raise UnsupportedVersionError(s) + groups = m.groups() + major, minor, patch = [int(i) for i in groups[:3]] + # choose the '|' and '*' so that versions sort correctly + pre, build = make_tuple(groups[3], '|'), make_tuple(groups[5], '*') + return (major, minor, patch), pre, build + + +class SemanticVersion(Version): + def parse(self, s): + return _semantic_key(s) + + @property + def is_prerelease(self): + return self._parts[1][0] != '|' + + +class SemanticMatcher(Matcher): + version_class = SemanticVersion + + +class VersionScheme(object): + def __init__(self, key, matcher, suggester=None): + self.key = key + self.matcher = matcher + self.suggester = suggester + + def is_valid_version(self, s): + try: + self.matcher.version_class(s) + result = True + except UnsupportedVersionError: + result = False + return result + + def is_valid_matcher(self, s): + try: + self.matcher(s) + result = True + except UnsupportedVersionError: + result = False + return result + + def is_valid_constraint_list(self, s): + """ + Used for processing some metadata fields + """ + return self.is_valid_matcher('dummy_name (%s)' % s) + + def suggest(self, s): + if self.suggester is None: + result = None + else: + result = self.suggester(s) + return result + +_SCHEMES = { + 'normalized': VersionScheme(_normalized_key, NormalizedMatcher, + _suggest_normalized_version), + 'legacy': VersionScheme(_legacy_key, LegacyMatcher, lambda self, s: s), + 'semantic': VersionScheme(_semantic_key, SemanticMatcher, + _suggest_semantic_version), +} + +_SCHEMES['default'] = _SCHEMES['normalized'] + + +def get_scheme(name): + if name not in _SCHEMES: + raise ValueError('unknown scheme name: %r' % name) + return _SCHEMES[name] diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyc new file mode 100644 index 0000000..d6af3ce Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/version.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/w32.exe b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/w32.exe new file mode 100644 index 0000000..732215a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/w32.exe differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/w64.exe b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/w64.exe new file mode 100644 index 0000000..c41bd0a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/w64.exe differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.py new file mode 100644 index 0000000..b04bfae --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.py @@ -0,0 +1,988 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2013-2017 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +from __future__ import unicode_literals + +import base64 +import codecs +import datetime +import distutils.util +from email import message_from_file +import hashlib +import imp +import json +import logging +import os +import posixpath +import re +import shutil +import sys +import tempfile +import zipfile + +from . import __version__, DistlibException +from .compat import sysconfig, ZipFile, fsdecode, text_type, filter +from .database import InstalledDistribution +from .metadata import Metadata, METADATA_FILENAME, WHEEL_METADATA_FILENAME +from .util import (FileOperator, convert_path, CSVReader, CSVWriter, Cache, + cached_property, get_cache_base, read_exports, tempdir) +from .version import NormalizedVersion, UnsupportedVersionError + +logger = logging.getLogger(__name__) + +cache = None # created when needed + +if hasattr(sys, 'pypy_version_info'): # pragma: no cover + IMP_PREFIX = 'pp' +elif sys.platform.startswith('java'): # pragma: no cover + IMP_PREFIX = 'jy' +elif sys.platform == 'cli': # pragma: no cover + IMP_PREFIX = 'ip' +else: + IMP_PREFIX = 'cp' + +VER_SUFFIX = sysconfig.get_config_var('py_version_nodot') +if not VER_SUFFIX: # pragma: no cover + VER_SUFFIX = '%s%s' % sys.version_info[:2] +PYVER = 'py' + VER_SUFFIX +IMPVER = IMP_PREFIX + VER_SUFFIX + +ARCH = distutils.util.get_platform().replace('-', '_').replace('.', '_') + +ABI = sysconfig.get_config_var('SOABI') +if ABI and ABI.startswith('cpython-'): + ABI = ABI.replace('cpython-', 'cp') +else: + def _derive_abi(): + parts = ['cp', VER_SUFFIX] + if sysconfig.get_config_var('Py_DEBUG'): + parts.append('d') + if sysconfig.get_config_var('WITH_PYMALLOC'): + parts.append('m') + if sysconfig.get_config_var('Py_UNICODE_SIZE') == 4: + parts.append('u') + return ''.join(parts) + ABI = _derive_abi() + del _derive_abi + +FILENAME_RE = re.compile(r''' +(?P<nm>[^-]+) +-(?P<vn>\d+[^-]*) +(-(?P<bn>\d+[^-]*))? +-(?P<py>\w+\d+(\.\w+\d+)*) +-(?P<bi>\w+) +-(?P<ar>\w+(\.\w+)*) +\.whl$ +''', re.IGNORECASE | re.VERBOSE) + +NAME_VERSION_RE = re.compile(r''' +(?P<nm>[^-]+) +-(?P<vn>\d+[^-]*) +(-(?P<bn>\d+[^-]*))?$ +''', re.IGNORECASE | re.VERBOSE) + +SHEBANG_RE = re.compile(br'\s*#![^\r\n]*') +SHEBANG_DETAIL_RE = re.compile(br'^(\s*#!("[^"]+"|\S+))\s+(.*)$') +SHEBANG_PYTHON = b'#!python' +SHEBANG_PYTHONW = b'#!pythonw' + +if os.sep == '/': + to_posix = lambda o: o +else: + to_posix = lambda o: o.replace(os.sep, '/') + + +class Mounter(object): + def __init__(self): + self.impure_wheels = {} + self.libs = {} + + def add(self, pathname, extensions): + self.impure_wheels[pathname] = extensions + self.libs.update(extensions) + + def remove(self, pathname): + extensions = self.impure_wheels.pop(pathname) + for k, v in extensions: + if k in self.libs: + del self.libs[k] + + def find_module(self, fullname, path=None): + if fullname in self.libs: + result = self + else: + result = None + return result + + def load_module(self, fullname): + if fullname in sys.modules: + result = sys.modules[fullname] + else: + if fullname not in self.libs: + raise ImportError('unable to find extension for %s' % fullname) + result = imp.load_dynamic(fullname, self.libs[fullname]) + result.__loader__ = self + parts = fullname.rsplit('.', 1) + if len(parts) > 1: + result.__package__ = parts[0] + return result + +_hook = Mounter() + + +class Wheel(object): + """ + Class to build and install from Wheel files (PEP 427). + """ + + wheel_version = (1, 1) + hash_kind = 'sha256' + + def __init__(self, filename=None, sign=False, verify=False): + """ + Initialise an instance using a (valid) filename. + """ + self.sign = sign + self.should_verify = verify + self.buildver = '' + self.pyver = [PYVER] + self.abi = ['none'] + self.arch = ['any'] + self.dirname = os.getcwd() + if filename is None: + self.name = 'dummy' + self.version = '0.1' + self._filename = self.filename + else: + m = NAME_VERSION_RE.match(filename) + if m: + info = m.groupdict('') + self.name = info['nm'] + # Reinstate the local version separator + self.version = info['vn'].replace('_', '-') + self.buildver = info['bn'] + self._filename = self.filename + else: + dirname, filename = os.path.split(filename) + m = FILENAME_RE.match(filename) + if not m: + raise DistlibException('Invalid name or ' + 'filename: %r' % filename) + if dirname: + self.dirname = os.path.abspath(dirname) + self._filename = filename + info = m.groupdict('') + self.name = info['nm'] + self.version = info['vn'] + self.buildver = info['bn'] + self.pyver = info['py'].split('.') + self.abi = info['bi'].split('.') + self.arch = info['ar'].split('.') + + @property + def filename(self): + """ + Build and return a filename from the various components. + """ + if self.buildver: + buildver = '-' + self.buildver + else: + buildver = '' + pyver = '.'.join(self.pyver) + abi = '.'.join(self.abi) + arch = '.'.join(self.arch) + # replace - with _ as a local version separator + version = self.version.replace('-', '_') + return '%s-%s%s-%s-%s-%s.whl' % (self.name, version, buildver, + pyver, abi, arch) + + @property + def exists(self): + path = os.path.join(self.dirname, self.filename) + return os.path.isfile(path) + + @property + def tags(self): + for pyver in self.pyver: + for abi in self.abi: + for arch in self.arch: + yield pyver, abi, arch + + @cached_property + def metadata(self): + pathname = os.path.join(self.dirname, self.filename) + name_ver = '%s-%s' % (self.name, self.version) + info_dir = '%s.dist-info' % name_ver + wrapper = codecs.getreader('utf-8') + with ZipFile(pathname, 'r') as zf: + wheel_metadata = self.get_wheel_metadata(zf) + wv = wheel_metadata['Wheel-Version'].split('.', 1) + file_version = tuple([int(i) for i in wv]) + if file_version < (1, 1): + fns = [WHEEL_METADATA_FILENAME, METADATA_FILENAME, 'METADATA'] + else: + fns = [WHEEL_METADATA_FILENAME, METADATA_FILENAME] + result = None + for fn in fns: + try: + metadata_filename = posixpath.join(info_dir, fn) + with zf.open(metadata_filename) as bf: + wf = wrapper(bf) + result = Metadata(fileobj=wf) + if result: + break + except KeyError: + pass + if not result: + raise ValueError('Invalid wheel, because metadata is ' + 'missing: looked in %s' % ', '.join(fns)) + return result + + def get_wheel_metadata(self, zf): + name_ver = '%s-%s' % (self.name, self.version) + info_dir = '%s.dist-info' % name_ver + metadata_filename = posixpath.join(info_dir, 'WHEEL') + with zf.open(metadata_filename) as bf: + wf = codecs.getreader('utf-8')(bf) + message = message_from_file(wf) + return dict(message) + + @cached_property + def info(self): + pathname = os.path.join(self.dirname, self.filename) + with ZipFile(pathname, 'r') as zf: + result = self.get_wheel_metadata(zf) + return result + + def process_shebang(self, data): + m = SHEBANG_RE.match(data) + if m: + end = m.end() + shebang, data_after_shebang = data[:end], data[end:] + # Preserve any arguments after the interpreter + if b'pythonw' in shebang.lower(): + shebang_python = SHEBANG_PYTHONW + else: + shebang_python = SHEBANG_PYTHON + m = SHEBANG_DETAIL_RE.match(shebang) + if m: + args = b' ' + m.groups()[-1] + else: + args = b'' + shebang = shebang_python + args + data = shebang + data_after_shebang + else: + cr = data.find(b'\r') + lf = data.find(b'\n') + if cr < 0 or cr > lf: + term = b'\n' + else: + if data[cr:cr + 2] == b'\r\n': + term = b'\r\n' + else: + term = b'\r' + data = SHEBANG_PYTHON + term + data + return data + + def get_hash(self, data, hash_kind=None): + if hash_kind is None: + hash_kind = self.hash_kind + try: + hasher = getattr(hashlib, hash_kind) + except AttributeError: + raise DistlibException('Unsupported hash algorithm: %r' % hash_kind) + result = hasher(data).digest() + result = base64.urlsafe_b64encode(result).rstrip(b'=').decode('ascii') + return hash_kind, result + + def write_record(self, records, record_path, base): + records = list(records) # make a copy for sorting + p = to_posix(os.path.relpath(record_path, base)) + records.append((p, '', '')) + records.sort() + with CSVWriter(record_path) as writer: + for row in records: + writer.writerow(row) + + def write_records(self, info, libdir, archive_paths): + records = [] + distinfo, info_dir = info + hasher = getattr(hashlib, self.hash_kind) + for ap, p in archive_paths: + with open(p, 'rb') as f: + data = f.read() + digest = '%s=%s' % self.get_hash(data) + size = os.path.getsize(p) + records.append((ap, digest, size)) + + p = os.path.join(distinfo, 'RECORD') + self.write_record(records, p, libdir) + ap = to_posix(os.path.join(info_dir, 'RECORD')) + archive_paths.append((ap, p)) + + def build_zip(self, pathname, archive_paths): + with ZipFile(pathname, 'w', zipfile.ZIP_DEFLATED) as zf: + for ap, p in archive_paths: + logger.debug('Wrote %s to %s in wheel', p, ap) + zf.write(p, ap) + + def build(self, paths, tags=None, wheel_version=None): + """ + Build a wheel from files in specified paths, and use any specified tags + when determining the name of the wheel. + """ + if tags is None: + tags = {} + + libkey = list(filter(lambda o: o in paths, ('purelib', 'platlib')))[0] + if libkey == 'platlib': + is_pure = 'false' + default_pyver = [IMPVER] + default_abi = [ABI] + default_arch = [ARCH] + else: + is_pure = 'true' + default_pyver = [PYVER] + default_abi = ['none'] + default_arch = ['any'] + + self.pyver = tags.get('pyver', default_pyver) + self.abi = tags.get('abi', default_abi) + self.arch = tags.get('arch', default_arch) + + libdir = paths[libkey] + + name_ver = '%s-%s' % (self.name, self.version) + data_dir = '%s.data' % name_ver + info_dir = '%s.dist-info' % name_ver + + archive_paths = [] + + # First, stuff which is not in site-packages + for key in ('data', 'headers', 'scripts'): + if key not in paths: + continue + path = paths[key] + if os.path.isdir(path): + for root, dirs, files in os.walk(path): + for fn in files: + p = fsdecode(os.path.join(root, fn)) + rp = os.path.relpath(p, path) + ap = to_posix(os.path.join(data_dir, key, rp)) + archive_paths.append((ap, p)) + if key == 'scripts' and not p.endswith('.exe'): + with open(p, 'rb') as f: + data = f.read() + data = self.process_shebang(data) + with open(p, 'wb') as f: + f.write(data) + + # Now, stuff which is in site-packages, other than the + # distinfo stuff. + path = libdir + distinfo = None + for root, dirs, files in os.walk(path): + if root == path: + # At the top level only, save distinfo for later + # and skip it for now + for i, dn in enumerate(dirs): + dn = fsdecode(dn) + if dn.endswith('.dist-info'): + distinfo = os.path.join(root, dn) + del dirs[i] + break + assert distinfo, '.dist-info directory expected, not found' + + for fn in files: + # comment out next suite to leave .pyc files in + if fsdecode(fn).endswith(('.pyc', '.pyo')): + continue + p = os.path.join(root, fn) + rp = to_posix(os.path.relpath(p, path)) + archive_paths.append((rp, p)) + + # Now distinfo. Assumed to be flat, i.e. os.listdir is enough. + files = os.listdir(distinfo) + for fn in files: + if fn not in ('RECORD', 'INSTALLER', 'SHARED', 'WHEEL'): + p = fsdecode(os.path.join(distinfo, fn)) + ap = to_posix(os.path.join(info_dir, fn)) + archive_paths.append((ap, p)) + + wheel_metadata = [ + 'Wheel-Version: %d.%d' % (wheel_version or self.wheel_version), + 'Generator: distlib %s' % __version__, + 'Root-Is-Purelib: %s' % is_pure, + ] + for pyver, abi, arch in self.tags: + wheel_metadata.append('Tag: %s-%s-%s' % (pyver, abi, arch)) + p = os.path.join(distinfo, 'WHEEL') + with open(p, 'w') as f: + f.write('\n'.join(wheel_metadata)) + ap = to_posix(os.path.join(info_dir, 'WHEEL')) + archive_paths.append((ap, p)) + + # Now, at last, RECORD. + # Paths in here are archive paths - nothing else makes sense. + self.write_records((distinfo, info_dir), libdir, archive_paths) + # Now, ready to build the zip file + pathname = os.path.join(self.dirname, self.filename) + self.build_zip(pathname, archive_paths) + return pathname + + def install(self, paths, maker, **kwargs): + """ + Install a wheel to the specified paths. If kwarg ``warner`` is + specified, it should be a callable, which will be called with two + tuples indicating the wheel version of this software and the wheel + version in the file, if there is a discrepancy in the versions. + This can be used to issue any warnings to raise any exceptions. + If kwarg ``lib_only`` is True, only the purelib/platlib files are + installed, and the headers, scripts, data and dist-info metadata are + not written. If kwarg ``bytecode_hashed_invalidation`` is True, written + bytecode will try to use file-hash based invalidation (PEP-552) on + supported interpreter versions (CPython 2.7+). + + The return value is a :class:`InstalledDistribution` instance unless + ``options.lib_only`` is True, in which case the return value is ``None``. + """ + + dry_run = maker.dry_run + warner = kwargs.get('warner') + lib_only = kwargs.get('lib_only', False) + bc_hashed_invalidation = kwargs.get('bytecode_hashed_invalidation', False) + + pathname = os.path.join(self.dirname, self.filename) + name_ver = '%s-%s' % (self.name, self.version) + data_dir = '%s.data' % name_ver + info_dir = '%s.dist-info' % name_ver + + metadata_name = posixpath.join(info_dir, METADATA_FILENAME) + wheel_metadata_name = posixpath.join(info_dir, 'WHEEL') + record_name = posixpath.join(info_dir, 'RECORD') + + wrapper = codecs.getreader('utf-8') + + with ZipFile(pathname, 'r') as zf: + with zf.open(wheel_metadata_name) as bwf: + wf = wrapper(bwf) + message = message_from_file(wf) + wv = message['Wheel-Version'].split('.', 1) + file_version = tuple([int(i) for i in wv]) + if (file_version != self.wheel_version) and warner: + warner(self.wheel_version, file_version) + + if message['Root-Is-Purelib'] == 'true': + libdir = paths['purelib'] + else: + libdir = paths['platlib'] + + records = {} + with zf.open(record_name) as bf: + with CSVReader(stream=bf) as reader: + for row in reader: + p = row[0] + records[p] = row + + data_pfx = posixpath.join(data_dir, '') + info_pfx = posixpath.join(info_dir, '') + script_pfx = posixpath.join(data_dir, 'scripts', '') + + # make a new instance rather than a copy of maker's, + # as we mutate it + fileop = FileOperator(dry_run=dry_run) + fileop.record = True # so we can rollback if needed + + bc = not sys.dont_write_bytecode # Double negatives. Lovely! + + outfiles = [] # for RECORD writing + + # for script copying/shebang processing + workdir = tempfile.mkdtemp() + # set target dir later + # we default add_launchers to False, as the + # Python Launcher should be used instead + maker.source_dir = workdir + maker.target_dir = None + try: + for zinfo in zf.infolist(): + arcname = zinfo.filename + if isinstance(arcname, text_type): + u_arcname = arcname + else: + u_arcname = arcname.decode('utf-8') + # The signature file won't be in RECORD, + # and we don't currently don't do anything with it + if u_arcname.endswith('/RECORD.jws'): + continue + row = records[u_arcname] + if row[2] and str(zinfo.file_size) != row[2]: + raise DistlibException('size mismatch for ' + '%s' % u_arcname) + if row[1]: + kind, value = row[1].split('=', 1) + with zf.open(arcname) as bf: + data = bf.read() + _, digest = self.get_hash(data, kind) + if digest != value: + raise DistlibException('digest mismatch for ' + '%s' % arcname) + + if lib_only and u_arcname.startswith((info_pfx, data_pfx)): + logger.debug('lib_only: skipping %s', u_arcname) + continue + is_script = (u_arcname.startswith(script_pfx) + and not u_arcname.endswith('.exe')) + + if u_arcname.startswith(data_pfx): + _, where, rp = u_arcname.split('/', 2) + outfile = os.path.join(paths[where], convert_path(rp)) + else: + # meant for site-packages. + if u_arcname in (wheel_metadata_name, record_name): + continue + outfile = os.path.join(libdir, convert_path(u_arcname)) + if not is_script: + with zf.open(arcname) as bf: + fileop.copy_stream(bf, outfile) + outfiles.append(outfile) + # Double check the digest of the written file + if not dry_run and row[1]: + with open(outfile, 'rb') as bf: + data = bf.read() + _, newdigest = self.get_hash(data, kind) + if newdigest != digest: + raise DistlibException('digest mismatch ' + 'on write for ' + '%s' % outfile) + if bc and outfile.endswith('.py'): + try: + pyc = fileop.byte_compile(outfile, + hashed_invalidation=bc_hashed_invalidation) + outfiles.append(pyc) + except Exception: + # Don't give up if byte-compilation fails, + # but log it and perhaps warn the user + logger.warning('Byte-compilation failed', + exc_info=True) + else: + fn = os.path.basename(convert_path(arcname)) + workname = os.path.join(workdir, fn) + with zf.open(arcname) as bf: + fileop.copy_stream(bf, workname) + + dn, fn = os.path.split(outfile) + maker.target_dir = dn + filenames = maker.make(fn) + fileop.set_executable_mode(filenames) + outfiles.extend(filenames) + + if lib_only: + logger.debug('lib_only: returning None') + dist = None + else: + # Generate scripts + + # Try to get pydist.json so we can see if there are + # any commands to generate. If this fails (e.g. because + # of a legacy wheel), log a warning but don't give up. + commands = None + file_version = self.info['Wheel-Version'] + if file_version == '1.0': + # Use legacy info + ep = posixpath.join(info_dir, 'entry_points.txt') + try: + with zf.open(ep) as bwf: + epdata = read_exports(bwf) + commands = {} + for key in ('console', 'gui'): + k = '%s_scripts' % key + if k in epdata: + commands['wrap_%s' % key] = d = {} + for v in epdata[k].values(): + s = '%s:%s' % (v.prefix, v.suffix) + if v.flags: + s += ' %s' % v.flags + d[v.name] = s + except Exception: + logger.warning('Unable to read legacy script ' + 'metadata, so cannot generate ' + 'scripts') + else: + try: + with zf.open(metadata_name) as bwf: + wf = wrapper(bwf) + commands = json.load(wf).get('extensions') + if commands: + commands = commands.get('python.commands') + except Exception: + logger.warning('Unable to read JSON metadata, so ' + 'cannot generate scripts') + if commands: + console_scripts = commands.get('wrap_console', {}) + gui_scripts = commands.get('wrap_gui', {}) + if console_scripts or gui_scripts: + script_dir = paths.get('scripts', '') + if not os.path.isdir(script_dir): + raise ValueError('Valid script path not ' + 'specified') + maker.target_dir = script_dir + for k, v in console_scripts.items(): + script = '%s = %s' % (k, v) + filenames = maker.make(script) + fileop.set_executable_mode(filenames) + + if gui_scripts: + options = {'gui': True } + for k, v in gui_scripts.items(): + script = '%s = %s' % (k, v) + filenames = maker.make(script, options) + fileop.set_executable_mode(filenames) + + p = os.path.join(libdir, info_dir) + dist = InstalledDistribution(p) + + # Write SHARED + paths = dict(paths) # don't change passed in dict + del paths['purelib'] + del paths['platlib'] + paths['lib'] = libdir + p = dist.write_shared_locations(paths, dry_run) + if p: + outfiles.append(p) + + # Write RECORD + dist.write_installed_files(outfiles, paths['prefix'], + dry_run) + return dist + except Exception: # pragma: no cover + logger.exception('installation failed.') + fileop.rollback() + raise + finally: + shutil.rmtree(workdir) + + def _get_dylib_cache(self): + global cache + if cache is None: + # Use native string to avoid issues on 2.x: see Python #20140. + base = os.path.join(get_cache_base(), str('dylib-cache'), + sys.version[:3]) + cache = Cache(base) + return cache + + def _get_extensions(self): + pathname = os.path.join(self.dirname, self.filename) + name_ver = '%s-%s' % (self.name, self.version) + info_dir = '%s.dist-info' % name_ver + arcname = posixpath.join(info_dir, 'EXTENSIONS') + wrapper = codecs.getreader('utf-8') + result = [] + with ZipFile(pathname, 'r') as zf: + try: + with zf.open(arcname) as bf: + wf = wrapper(bf) + extensions = json.load(wf) + cache = self._get_dylib_cache() + prefix = cache.prefix_to_dir(pathname) + cache_base = os.path.join(cache.base, prefix) + if not os.path.isdir(cache_base): + os.makedirs(cache_base) + for name, relpath in extensions.items(): + dest = os.path.join(cache_base, convert_path(relpath)) + if not os.path.exists(dest): + extract = True + else: + file_time = os.stat(dest).st_mtime + file_time = datetime.datetime.fromtimestamp(file_time) + info = zf.getinfo(relpath) + wheel_time = datetime.datetime(*info.date_time) + extract = wheel_time > file_time + if extract: + zf.extract(relpath, cache_base) + result.append((name, dest)) + except KeyError: + pass + return result + + def is_compatible(self): + """ + Determine if a wheel is compatible with the running system. + """ + return is_compatible(self) + + def is_mountable(self): + """ + Determine if a wheel is asserted as mountable by its metadata. + """ + return True # for now - metadata details TBD + + def mount(self, append=False): + pathname = os.path.abspath(os.path.join(self.dirname, self.filename)) + if not self.is_compatible(): + msg = 'Wheel %s not compatible with this Python.' % pathname + raise DistlibException(msg) + if not self.is_mountable(): + msg = 'Wheel %s is marked as not mountable.' % pathname + raise DistlibException(msg) + if pathname in sys.path: + logger.debug('%s already in path', pathname) + else: + if append: + sys.path.append(pathname) + else: + sys.path.insert(0, pathname) + extensions = self._get_extensions() + if extensions: + if _hook not in sys.meta_path: + sys.meta_path.append(_hook) + _hook.add(pathname, extensions) + + def unmount(self): + pathname = os.path.abspath(os.path.join(self.dirname, self.filename)) + if pathname not in sys.path: + logger.debug('%s not in path', pathname) + else: + sys.path.remove(pathname) + if pathname in _hook.impure_wheels: + _hook.remove(pathname) + if not _hook.impure_wheels: + if _hook in sys.meta_path: + sys.meta_path.remove(_hook) + + def verify(self): + pathname = os.path.join(self.dirname, self.filename) + name_ver = '%s-%s' % (self.name, self.version) + data_dir = '%s.data' % name_ver + info_dir = '%s.dist-info' % name_ver + + metadata_name = posixpath.join(info_dir, METADATA_FILENAME) + wheel_metadata_name = posixpath.join(info_dir, 'WHEEL') + record_name = posixpath.join(info_dir, 'RECORD') + + wrapper = codecs.getreader('utf-8') + + with ZipFile(pathname, 'r') as zf: + with zf.open(wheel_metadata_name) as bwf: + wf = wrapper(bwf) + message = message_from_file(wf) + wv = message['Wheel-Version'].split('.', 1) + file_version = tuple([int(i) for i in wv]) + # TODO version verification + + records = {} + with zf.open(record_name) as bf: + with CSVReader(stream=bf) as reader: + for row in reader: + p = row[0] + records[p] = row + + for zinfo in zf.infolist(): + arcname = zinfo.filename + if isinstance(arcname, text_type): + u_arcname = arcname + else: + u_arcname = arcname.decode('utf-8') + if '..' in u_arcname: + raise DistlibException('invalid entry in ' + 'wheel: %r' % u_arcname) + + # The signature file won't be in RECORD, + # and we don't currently don't do anything with it + if u_arcname.endswith('/RECORD.jws'): + continue + row = records[u_arcname] + if row[2] and str(zinfo.file_size) != row[2]: + raise DistlibException('size mismatch for ' + '%s' % u_arcname) + if row[1]: + kind, value = row[1].split('=', 1) + with zf.open(arcname) as bf: + data = bf.read() + _, digest = self.get_hash(data, kind) + if digest != value: + raise DistlibException('digest mismatch for ' + '%s' % arcname) + + def update(self, modifier, dest_dir=None, **kwargs): + """ + Update the contents of a wheel in a generic way. The modifier should + be a callable which expects a dictionary argument: its keys are + archive-entry paths, and its values are absolute filesystem paths + where the contents the corresponding archive entries can be found. The + modifier is free to change the contents of the files pointed to, add + new entries and remove entries, before returning. This method will + extract the entire contents of the wheel to a temporary location, call + the modifier, and then use the passed (and possibly updated) + dictionary to write a new wheel. If ``dest_dir`` is specified, the new + wheel is written there -- otherwise, the original wheel is overwritten. + + The modifier should return True if it updated the wheel, else False. + This method returns the same value the modifier returns. + """ + + def get_version(path_map, info_dir): + version = path = None + key = '%s/%s' % (info_dir, METADATA_FILENAME) + if key not in path_map: + key = '%s/PKG-INFO' % info_dir + if key in path_map: + path = path_map[key] + version = Metadata(path=path).version + return version, path + + def update_version(version, path): + updated = None + try: + v = NormalizedVersion(version) + i = version.find('-') + if i < 0: + updated = '%s+1' % version + else: + parts = [int(s) for s in version[i + 1:].split('.')] + parts[-1] += 1 + updated = '%s+%s' % (version[:i], + '.'.join(str(i) for i in parts)) + except UnsupportedVersionError: + logger.debug('Cannot update non-compliant (PEP-440) ' + 'version %r', version) + if updated: + md = Metadata(path=path) + md.version = updated + legacy = not path.endswith(METADATA_FILENAME) + md.write(path=path, legacy=legacy) + logger.debug('Version updated from %r to %r', version, + updated) + + pathname = os.path.join(self.dirname, self.filename) + name_ver = '%s-%s' % (self.name, self.version) + info_dir = '%s.dist-info' % name_ver + record_name = posixpath.join(info_dir, 'RECORD') + with tempdir() as workdir: + with ZipFile(pathname, 'r') as zf: + path_map = {} + for zinfo in zf.infolist(): + arcname = zinfo.filename + if isinstance(arcname, text_type): + u_arcname = arcname + else: + u_arcname = arcname.decode('utf-8') + if u_arcname == record_name: + continue + if '..' in u_arcname: + raise DistlibException('invalid entry in ' + 'wheel: %r' % u_arcname) + zf.extract(zinfo, workdir) + path = os.path.join(workdir, convert_path(u_arcname)) + path_map[u_arcname] = path + + # Remember the version. + original_version, _ = get_version(path_map, info_dir) + # Files extracted. Call the modifier. + modified = modifier(path_map, **kwargs) + if modified: + # Something changed - need to build a new wheel. + current_version, path = get_version(path_map, info_dir) + if current_version and (current_version == original_version): + # Add or update local version to signify changes. + update_version(current_version, path) + # Decide where the new wheel goes. + if dest_dir is None: + fd, newpath = tempfile.mkstemp(suffix='.whl', + prefix='wheel-update-', + dir=workdir) + os.close(fd) + else: + if not os.path.isdir(dest_dir): + raise DistlibException('Not a directory: %r' % dest_dir) + newpath = os.path.join(dest_dir, self.filename) + archive_paths = list(path_map.items()) + distinfo = os.path.join(workdir, info_dir) + info = distinfo, info_dir + self.write_records(info, workdir, archive_paths) + self.build_zip(newpath, archive_paths) + if dest_dir is None: + shutil.copyfile(newpath, pathname) + return modified + +def compatible_tags(): + """ + Return (pyver, abi, arch) tuples compatible with this Python. + """ + versions = [VER_SUFFIX] + major = VER_SUFFIX[0] + for minor in range(sys.version_info[1] - 1, - 1, -1): + versions.append(''.join([major, str(minor)])) + + abis = [] + for suffix, _, _ in imp.get_suffixes(): + if suffix.startswith('.abi'): + abis.append(suffix.split('.', 2)[1]) + abis.sort() + if ABI != 'none': + abis.insert(0, ABI) + abis.append('none') + result = [] + + arches = [ARCH] + if sys.platform == 'darwin': + m = re.match(r'(\w+)_(\d+)_(\d+)_(\w+)$', ARCH) + if m: + name, major, minor, arch = m.groups() + minor = int(minor) + matches = [arch] + if arch in ('i386', 'ppc'): + matches.append('fat') + if arch in ('i386', 'ppc', 'x86_64'): + matches.append('fat3') + if arch in ('ppc64', 'x86_64'): + matches.append('fat64') + if arch in ('i386', 'x86_64'): + matches.append('intel') + if arch in ('i386', 'x86_64', 'intel', 'ppc', 'ppc64'): + matches.append('universal') + while minor >= 0: + for match in matches: + s = '%s_%s_%s_%s' % (name, major, minor, match) + if s != ARCH: # already there + arches.append(s) + minor -= 1 + + # Most specific - our Python version, ABI and arch + for abi in abis: + for arch in arches: + result.append((''.join((IMP_PREFIX, versions[0])), abi, arch)) + + # where no ABI / arch dependency, but IMP_PREFIX dependency + for i, version in enumerate(versions): + result.append((''.join((IMP_PREFIX, version)), 'none', 'any')) + if i == 0: + result.append((''.join((IMP_PREFIX, version[0])), 'none', 'any')) + + # no IMP_PREFIX, ABI or arch dependency + for i, version in enumerate(versions): + result.append((''.join(('py', version)), 'none', 'any')) + if i == 0: + result.append((''.join(('py', version[0])), 'none', 'any')) + return set(result) + + +COMPATIBLE_TAGS = compatible_tags() + +del compatible_tags + + +def is_compatible(wheel, tags=None): + if not isinstance(wheel, Wheel): + wheel = Wheel(wheel) # assume it's a filename + result = False + if tags is None: + tags = COMPATIBLE_TAGS + for ver, abi, arch in tags: + if ver in wheel.pyver and abi in wheel.abi and arch in wheel.arch: + result = True + break + return result diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyc new file mode 100644 index 0000000..a9513a0 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/distro.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/distro.py new file mode 100644 index 0000000..aa4defc --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/distro.py @@ -0,0 +1,1197 @@ +# Copyright 2015,2016,2017 Nir Cohen +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +The ``distro`` package (``distro`` stands for Linux Distribution) provides +information about the Linux distribution it runs on, such as a reliable +machine-readable distro ID, or version information. + +It is a renewed alternative implementation for Python's original +:py:func:`platform.linux_distribution` function, but it provides much more +functionality. An alternative implementation became necessary because Python +3.5 deprecated this function, and Python 3.7 is expected to remove it +altogether. Its predecessor function :py:func:`platform.dist` was already +deprecated since Python 2.6 and is also expected to be removed in Python 3.7. +Still, there are many cases in which access to OS distribution information +is needed. See `Python issue 1322 <https://bugs.python.org/issue1322>`_ for +more information. +""" + +import os +import re +import sys +import json +import shlex +import logging +import argparse +import subprocess + + +_UNIXCONFDIR = os.environ.get('UNIXCONFDIR', '/etc') +_OS_RELEASE_BASENAME = 'os-release' + +#: Translation table for normalizing the "ID" attribute defined in os-release +#: files, for use by the :func:`distro.id` method. +#: +#: * Key: Value as defined in the os-release file, translated to lower case, +#: with blanks translated to underscores. +#: +#: * Value: Normalized value. +NORMALIZED_OS_ID = {} + +#: Translation table for normalizing the "Distributor ID" attribute returned by +#: the lsb_release command, for use by the :func:`distro.id` method. +#: +#: * Key: Value as returned by the lsb_release command, translated to lower +#: case, with blanks translated to underscores. +#: +#: * Value: Normalized value. +NORMALIZED_LSB_ID = { + 'enterpriseenterprise': 'oracle', # Oracle Enterprise Linux + 'redhatenterpriseworkstation': 'rhel', # RHEL 6, 7 Workstation + 'redhatenterpriseserver': 'rhel', # RHEL 6, 7 Server +} + +#: Translation table for normalizing the distro ID derived from the file name +#: of distro release files, for use by the :func:`distro.id` method. +#: +#: * Key: Value as derived from the file name of a distro release file, +#: translated to lower case, with blanks translated to underscores. +#: +#: * Value: Normalized value. +NORMALIZED_DISTRO_ID = { + 'redhat': 'rhel', # RHEL 6.x, 7.x +} + +# Pattern for content of distro release file (reversed) +_DISTRO_RELEASE_CONTENT_REVERSED_PATTERN = re.compile( + r'(?:[^)]*\)(.*)\()? *(?:STL )?([\d.+\-a-z]*\d) *(?:esaeler *)?(.+)') + +# Pattern for base file name of distro release file +_DISTRO_RELEASE_BASENAME_PATTERN = re.compile( + r'(\w+)[-_](release|version)$') + +# Base file names to be ignored when searching for distro release file +_DISTRO_RELEASE_IGNORE_BASENAMES = ( + 'debian_version', + 'lsb-release', + 'oem-release', + _OS_RELEASE_BASENAME, + 'system-release' +) + + +def linux_distribution(full_distribution_name=True): + """ + Return information about the current OS distribution as a tuple + ``(id_name, version, codename)`` with items as follows: + + * ``id_name``: If *full_distribution_name* is false, the result of + :func:`distro.id`. Otherwise, the result of :func:`distro.name`. + + * ``version``: The result of :func:`distro.version`. + + * ``codename``: The result of :func:`distro.codename`. + + The interface of this function is compatible with the original + :py:func:`platform.linux_distribution` function, supporting a subset of + its parameters. + + The data it returns may not exactly be the same, because it uses more data + sources than the original function, and that may lead to different data if + the OS distribution is not consistent across multiple data sources it + provides (there are indeed such distributions ...). + + Another reason for differences is the fact that the :func:`distro.id` + method normalizes the distro ID string to a reliable machine-readable value + for a number of popular OS distributions. + """ + return _distro.linux_distribution(full_distribution_name) + + +def id(): + """ + Return the distro ID of the current distribution, as a + machine-readable string. + + For a number of OS distributions, the returned distro ID value is + *reliable*, in the sense that it is documented and that it does not change + across releases of the distribution. + + This package maintains the following reliable distro ID values: + + ============== ========================================= + Distro ID Distribution + ============== ========================================= + "ubuntu" Ubuntu + "debian" Debian + "rhel" RedHat Enterprise Linux + "centos" CentOS + "fedora" Fedora + "sles" SUSE Linux Enterprise Server + "opensuse" openSUSE + "amazon" Amazon Linux + "arch" Arch Linux + "cloudlinux" CloudLinux OS + "exherbo" Exherbo Linux + "gentoo" GenToo Linux + "ibm_powerkvm" IBM PowerKVM + "kvmibm" KVM for IBM z Systems + "linuxmint" Linux Mint + "mageia" Mageia + "mandriva" Mandriva Linux + "parallels" Parallels + "pidora" Pidora + "raspbian" Raspbian + "oracle" Oracle Linux (and Oracle Enterprise Linux) + "scientific" Scientific Linux + "slackware" Slackware + "xenserver" XenServer + "openbsd" OpenBSD + "netbsd" NetBSD + "freebsd" FreeBSD + ============== ========================================= + + If you have a need to get distros for reliable IDs added into this set, + or if you find that the :func:`distro.id` function returns a different + distro ID for one of the listed distros, please create an issue in the + `distro issue tracker`_. + + **Lookup hierarchy and transformations:** + + First, the ID is obtained from the following sources, in the specified + order. The first available and non-empty value is used: + + * the value of the "ID" attribute of the os-release file, + + * the value of the "Distributor ID" attribute returned by the lsb_release + command, + + * the first part of the file name of the distro release file, + + The so determined ID value then passes the following transformations, + before it is returned by this method: + + * it is translated to lower case, + + * blanks (which should not be there anyway) are translated to underscores, + + * a normalization of the ID is performed, based upon + `normalization tables`_. The purpose of this normalization is to ensure + that the ID is as reliable as possible, even across incompatible changes + in the OS distributions. A common reason for an incompatible change is + the addition of an os-release file, or the addition of the lsb_release + command, with ID values that differ from what was previously determined + from the distro release file name. + """ + return _distro.id() + + +def name(pretty=False): + """ + Return the name of the current OS distribution, as a human-readable + string. + + If *pretty* is false, the name is returned without version or codename. + (e.g. "CentOS Linux") + + If *pretty* is true, the version and codename are appended. + (e.g. "CentOS Linux 7.1.1503 (Core)") + + **Lookup hierarchy:** + + The name is obtained from the following sources, in the specified order. + The first available and non-empty value is used: + + * If *pretty* is false: + + - the value of the "NAME" attribute of the os-release file, + + - the value of the "Distributor ID" attribute returned by the lsb_release + command, + + - the value of the "<name>" field of the distro release file. + + * If *pretty* is true: + + - the value of the "PRETTY_NAME" attribute of the os-release file, + + - the value of the "Description" attribute returned by the lsb_release + command, + + - the value of the "<name>" field of the distro release file, appended + with the value of the pretty version ("<version_id>" and "<codename>" + fields) of the distro release file, if available. + """ + return _distro.name(pretty) + + +def version(pretty=False, best=False): + """ + Return the version of the current OS distribution, as a human-readable + string. + + If *pretty* is false, the version is returned without codename (e.g. + "7.0"). + + If *pretty* is true, the codename in parenthesis is appended, if the + codename is non-empty (e.g. "7.0 (Maipo)"). + + Some distributions provide version numbers with different precisions in + the different sources of distribution information. Examining the different + sources in a fixed priority order does not always yield the most precise + version (e.g. for Debian 8.2, or CentOS 7.1). + + The *best* parameter can be used to control the approach for the returned + version: + + If *best* is false, the first non-empty version number in priority order of + the examined sources is returned. + + If *best* is true, the most precise version number out of all examined + sources is returned. + + **Lookup hierarchy:** + + In all cases, the version number is obtained from the following sources. + If *best* is false, this order represents the priority order: + + * the value of the "VERSION_ID" attribute of the os-release file, + * the value of the "Release" attribute returned by the lsb_release + command, + * the version number parsed from the "<version_id>" field of the first line + of the distro release file, + * the version number parsed from the "PRETTY_NAME" attribute of the + os-release file, if it follows the format of the distro release files. + * the version number parsed from the "Description" attribute returned by + the lsb_release command, if it follows the format of the distro release + files. + """ + return _distro.version(pretty, best) + + +def version_parts(best=False): + """ + Return the version of the current OS distribution as a tuple + ``(major, minor, build_number)`` with items as follows: + + * ``major``: The result of :func:`distro.major_version`. + + * ``minor``: The result of :func:`distro.minor_version`. + + * ``build_number``: The result of :func:`distro.build_number`. + + For a description of the *best* parameter, see the :func:`distro.version` + method. + """ + return _distro.version_parts(best) + + +def major_version(best=False): + """ + Return the major version of the current OS distribution, as a string, + if provided. + Otherwise, the empty string is returned. The major version is the first + part of the dot-separated version string. + + For a description of the *best* parameter, see the :func:`distro.version` + method. + """ + return _distro.major_version(best) + + +def minor_version(best=False): + """ + Return the minor version of the current OS distribution, as a string, + if provided. + Otherwise, the empty string is returned. The minor version is the second + part of the dot-separated version string. + + For a description of the *best* parameter, see the :func:`distro.version` + method. + """ + return _distro.minor_version(best) + + +def build_number(best=False): + """ + Return the build number of the current OS distribution, as a string, + if provided. + Otherwise, the empty string is returned. The build number is the third part + of the dot-separated version string. + + For a description of the *best* parameter, see the :func:`distro.version` + method. + """ + return _distro.build_number(best) + + +def like(): + """ + Return a space-separated list of distro IDs of distributions that are + closely related to the current OS distribution in regards to packaging + and programming interfaces, for example distributions the current + distribution is a derivative from. + + **Lookup hierarchy:** + + This information item is only provided by the os-release file. + For details, see the description of the "ID_LIKE" attribute in the + `os-release man page + <http://www.freedesktop.org/software/systemd/man/os-release.html>`_. + """ + return _distro.like() + + +def codename(): + """ + Return the codename for the release of the current OS distribution, + as a string. + + If the distribution does not have a codename, an empty string is returned. + + Note that the returned codename is not always really a codename. For + example, openSUSE returns "x86_64". This function does not handle such + cases in any special way and just returns the string it finds, if any. + + **Lookup hierarchy:** + + * the codename within the "VERSION" attribute of the os-release file, if + provided, + + * the value of the "Codename" attribute returned by the lsb_release + command, + + * the value of the "<codename>" field of the distro release file. + """ + return _distro.codename() + + +def info(pretty=False, best=False): + """ + Return certain machine-readable information items about the current OS + distribution in a dictionary, as shown in the following example: + + .. sourcecode:: python + + { + 'id': 'rhel', + 'version': '7.0', + 'version_parts': { + 'major': '7', + 'minor': '0', + 'build_number': '' + }, + 'like': 'fedora', + 'codename': 'Maipo' + } + + The dictionary structure and keys are always the same, regardless of which + information items are available in the underlying data sources. The values + for the various keys are as follows: + + * ``id``: The result of :func:`distro.id`. + + * ``version``: The result of :func:`distro.version`. + + * ``version_parts -> major``: The result of :func:`distro.major_version`. + + * ``version_parts -> minor``: The result of :func:`distro.minor_version`. + + * ``version_parts -> build_number``: The result of + :func:`distro.build_number`. + + * ``like``: The result of :func:`distro.like`. + + * ``codename``: The result of :func:`distro.codename`. + + For a description of the *pretty* and *best* parameters, see the + :func:`distro.version` method. + """ + return _distro.info(pretty, best) + + +def os_release_info(): + """ + Return a dictionary containing key-value pairs for the information items + from the os-release file data source of the current OS distribution. + + See `os-release file`_ for details about these information items. + """ + return _distro.os_release_info() + + +def lsb_release_info(): + """ + Return a dictionary containing key-value pairs for the information items + from the lsb_release command data source of the current OS distribution. + + See `lsb_release command output`_ for details about these information + items. + """ + return _distro.lsb_release_info() + + +def distro_release_info(): + """ + Return a dictionary containing key-value pairs for the information items + from the distro release file data source of the current OS distribution. + + See `distro release file`_ for details about these information items. + """ + return _distro.distro_release_info() + + +def uname_info(): + """ + Return a dictionary containing key-value pairs for the information items + from the distro release file data source of the current OS distribution. + """ + return _distro.uname_info() + + +def os_release_attr(attribute): + """ + Return a single named information item from the os-release file data source + of the current OS distribution. + + Parameters: + + * ``attribute`` (string): Key of the information item. + + Returns: + + * (string): Value of the information item, if the item exists. + The empty string, if the item does not exist. + + See `os-release file`_ for details about these information items. + """ + return _distro.os_release_attr(attribute) + + +def lsb_release_attr(attribute): + """ + Return a single named information item from the lsb_release command output + data source of the current OS distribution. + + Parameters: + + * ``attribute`` (string): Key of the information item. + + Returns: + + * (string): Value of the information item, if the item exists. + The empty string, if the item does not exist. + + See `lsb_release command output`_ for details about these information + items. + """ + return _distro.lsb_release_attr(attribute) + + +def distro_release_attr(attribute): + """ + Return a single named information item from the distro release file + data source of the current OS distribution. + + Parameters: + + * ``attribute`` (string): Key of the information item. + + Returns: + + * (string): Value of the information item, if the item exists. + The empty string, if the item does not exist. + + See `distro release file`_ for details about these information items. + """ + return _distro.distro_release_attr(attribute) + + +def uname_attr(attribute): + """ + Return a single named information item from the distro release file + data source of the current OS distribution. + + Parameters: + + * ``attribute`` (string): Key of the information item. + + Returns: + + * (string): Value of the information item, if the item exists. + The empty string, if the item does not exist. + """ + return _distro.uname_attr(attribute) + + +class cached_property(object): + """A version of @property which caches the value. On access, it calls the + underlying function and sets the value in `__dict__` so future accesses + will not re-call the property. + """ + def __init__(self, f): + self._fname = f.__name__ + self._f = f + + def __get__(self, obj, owner): + assert obj is not None, 'call {} on an instance'.format(self._fname) + ret = obj.__dict__[self._fname] = self._f(obj) + return ret + + +class LinuxDistribution(object): + """ + Provides information about a OS distribution. + + This package creates a private module-global instance of this class with + default initialization arguments, that is used by the + `consolidated accessor functions`_ and `single source accessor functions`_. + By using default initialization arguments, that module-global instance + returns data about the current OS distribution (i.e. the distro this + package runs on). + + Normally, it is not necessary to create additional instances of this class. + However, in situations where control is needed over the exact data sources + that are used, instances of this class can be created with a specific + distro release file, or a specific os-release file, or without invoking the + lsb_release command. + """ + + def __init__(self, + include_lsb=True, + os_release_file='', + distro_release_file='', + include_uname=True): + """ + The initialization method of this class gathers information from the + available data sources, and stores that in private instance attributes. + Subsequent access to the information items uses these private instance + attributes, so that the data sources are read only once. + + Parameters: + + * ``include_lsb`` (bool): Controls whether the + `lsb_release command output`_ is included as a data source. + + If the lsb_release command is not available in the program execution + path, the data source for the lsb_release command will be empty. + + * ``os_release_file`` (string): The path name of the + `os-release file`_ that is to be used as a data source. + + An empty string (the default) will cause the default path name to + be used (see `os-release file`_ for details). + + If the specified or defaulted os-release file does not exist, the + data source for the os-release file will be empty. + + * ``distro_release_file`` (string): The path name of the + `distro release file`_ that is to be used as a data source. + + An empty string (the default) will cause a default search algorithm + to be used (see `distro release file`_ for details). + + If the specified distro release file does not exist, or if no default + distro release file can be found, the data source for the distro + release file will be empty. + + * ``include_name`` (bool): Controls whether uname command output is + included as a data source. If the uname command is not available in + the program execution path the data source for the uname command will + be empty. + + Public instance attributes: + + * ``os_release_file`` (string): The path name of the + `os-release file`_ that is actually used as a data source. The + empty string if no distro release file is used as a data source. + + * ``distro_release_file`` (string): The path name of the + `distro release file`_ that is actually used as a data source. The + empty string if no distro release file is used as a data source. + + * ``include_lsb`` (bool): The result of the ``include_lsb`` parameter. + This controls whether the lsb information will be loaded. + + * ``include_uname`` (bool): The result of the ``include_uname`` + parameter. This controls whether the uname information will + be loaded. + + Raises: + + * :py:exc:`IOError`: Some I/O issue with an os-release file or distro + release file. + + * :py:exc:`subprocess.CalledProcessError`: The lsb_release command had + some issue (other than not being available in the program execution + path). + + * :py:exc:`UnicodeError`: A data source has unexpected characters or + uses an unexpected encoding. + """ + self.os_release_file = os_release_file or \ + os.path.join(_UNIXCONFDIR, _OS_RELEASE_BASENAME) + self.distro_release_file = distro_release_file or '' # updated later + self.include_lsb = include_lsb + self.include_uname = include_uname + + def __repr__(self): + """Return repr of all info + """ + return \ + "LinuxDistribution(" \ + "os_release_file={self.os_release_file!r}, " \ + "distro_release_file={self.distro_release_file!r}, " \ + "include_lsb={self.include_lsb!r}, " \ + "include_uname={self.include_uname!r}, " \ + "_os_release_info={self._os_release_info!r}, " \ + "_lsb_release_info={self._lsb_release_info!r}, " \ + "_distro_release_info={self._distro_release_info!r}, " \ + "_uname_info={self._uname_info!r})".format( + self=self) + + def linux_distribution(self, full_distribution_name=True): + """ + Return information about the OS distribution that is compatible + with Python's :func:`platform.linux_distribution`, supporting a subset + of its parameters. + + For details, see :func:`distro.linux_distribution`. + """ + return ( + self.name() if full_distribution_name else self.id(), + self.version(), + self.codename() + ) + + def id(self): + """Return the distro ID of the OS distribution, as a string. + + For details, see :func:`distro.id`. + """ + def normalize(distro_id, table): + distro_id = distro_id.lower().replace(' ', '_') + return table.get(distro_id, distro_id) + + distro_id = self.os_release_attr('id') + if distro_id: + return normalize(distro_id, NORMALIZED_OS_ID) + + distro_id = self.lsb_release_attr('distributor_id') + if distro_id: + return normalize(distro_id, NORMALIZED_LSB_ID) + + distro_id = self.distro_release_attr('id') + if distro_id: + return normalize(distro_id, NORMALIZED_DISTRO_ID) + + distro_id = self.uname_attr('id') + if distro_id: + return normalize(distro_id, NORMALIZED_DISTRO_ID) + + return '' + + def name(self, pretty=False): + """ + Return the name of the OS distribution, as a string. + + For details, see :func:`distro.name`. + """ + name = self.os_release_attr('name') \ + or self.lsb_release_attr('distributor_id') \ + or self.distro_release_attr('name') \ + or self.uname_attr('name') + if pretty: + name = self.os_release_attr('pretty_name') \ + or self.lsb_release_attr('description') + if not name: + name = self.distro_release_attr('name') \ + or self.uname_attr('name') + version = self.version(pretty=True) + if version: + name = name + ' ' + version + return name or '' + + def version(self, pretty=False, best=False): + """ + Return the version of the OS distribution, as a string. + + For details, see :func:`distro.version`. + """ + versions = [ + self.os_release_attr('version_id'), + self.lsb_release_attr('release'), + self.distro_release_attr('version_id'), + self._parse_distro_release_content( + self.os_release_attr('pretty_name')).get('version_id', ''), + self._parse_distro_release_content( + self.lsb_release_attr('description')).get('version_id', ''), + self.uname_attr('release') + ] + version = '' + if best: + # This algorithm uses the last version in priority order that has + # the best precision. If the versions are not in conflict, that + # does not matter; otherwise, using the last one instead of the + # first one might be considered a surprise. + for v in versions: + if v.count(".") > version.count(".") or version == '': + version = v + else: + for v in versions: + if v != '': + version = v + break + if pretty and version and self.codename(): + version = u'{0} ({1})'.format(version, self.codename()) + return version + + def version_parts(self, best=False): + """ + Return the version of the OS distribution, as a tuple of version + numbers. + + For details, see :func:`distro.version_parts`. + """ + version_str = self.version(best=best) + if version_str: + version_regex = re.compile(r'(\d+)\.?(\d+)?\.?(\d+)?') + matches = version_regex.match(version_str) + if matches: + major, minor, build_number = matches.groups() + return major, minor or '', build_number or '' + return '', '', '' + + def major_version(self, best=False): + """ + Return the major version number of the current distribution. + + For details, see :func:`distro.major_version`. + """ + return self.version_parts(best)[0] + + def minor_version(self, best=False): + """ + Return the minor version number of the current distribution. + + For details, see :func:`distro.minor_version`. + """ + return self.version_parts(best)[1] + + def build_number(self, best=False): + """ + Return the build number of the current distribution. + + For details, see :func:`distro.build_number`. + """ + return self.version_parts(best)[2] + + def like(self): + """ + Return the IDs of distributions that are like the OS distribution. + + For details, see :func:`distro.like`. + """ + return self.os_release_attr('id_like') or '' + + def codename(self): + """ + Return the codename of the OS distribution. + + For details, see :func:`distro.codename`. + """ + return self.os_release_attr('codename') \ + or self.lsb_release_attr('codename') \ + or self.distro_release_attr('codename') \ + or '' + + def info(self, pretty=False, best=False): + """ + Return certain machine-readable information about the OS + distribution. + + For details, see :func:`distro.info`. + """ + return dict( + id=self.id(), + version=self.version(pretty, best), + version_parts=dict( + major=self.major_version(best), + minor=self.minor_version(best), + build_number=self.build_number(best) + ), + like=self.like(), + codename=self.codename(), + ) + + def os_release_info(self): + """ + Return a dictionary containing key-value pairs for the information + items from the os-release file data source of the OS distribution. + + For details, see :func:`distro.os_release_info`. + """ + return self._os_release_info + + def lsb_release_info(self): + """ + Return a dictionary containing key-value pairs for the information + items from the lsb_release command data source of the OS + distribution. + + For details, see :func:`distro.lsb_release_info`. + """ + return self._lsb_release_info + + def distro_release_info(self): + """ + Return a dictionary containing key-value pairs for the information + items from the distro release file data source of the OS + distribution. + + For details, see :func:`distro.distro_release_info`. + """ + return self._distro_release_info + + def uname_info(self): + """ + Return a dictionary containing key-value pairs for the information + items from the uname command data source of the OS distribution. + + For details, see :func:`distro.uname_info`. + """ + + def os_release_attr(self, attribute): + """ + Return a single named information item from the os-release file data + source of the OS distribution. + + For details, see :func:`distro.os_release_attr`. + """ + return self._os_release_info.get(attribute, '') + + def lsb_release_attr(self, attribute): + """ + Return a single named information item from the lsb_release command + output data source of the OS distribution. + + For details, see :func:`distro.lsb_release_attr`. + """ + return self._lsb_release_info.get(attribute, '') + + def distro_release_attr(self, attribute): + """ + Return a single named information item from the distro release file + data source of the OS distribution. + + For details, see :func:`distro.distro_release_attr`. + """ + return self._distro_release_info.get(attribute, '') + + def uname_attr(self, attribute): + """ + Return a single named information item from the uname command + output data source of the OS distribution. + + For details, see :func:`distro.uname_release_attr`. + """ + return self._uname_info.get(attribute, '') + + @cached_property + def _os_release_info(self): + """ + Get the information items from the specified os-release file. + + Returns: + A dictionary containing all information items. + """ + if os.path.isfile(self.os_release_file): + with open(self.os_release_file) as release_file: + return self._parse_os_release_content(release_file) + return {} + + @staticmethod + def _parse_os_release_content(lines): + """ + Parse the lines of an os-release file. + + Parameters: + + * lines: Iterable through the lines in the os-release file. + Each line must be a unicode string or a UTF-8 encoded byte + string. + + Returns: + A dictionary containing all information items. + """ + props = {} + lexer = shlex.shlex(lines, posix=True) + lexer.whitespace_split = True + + # The shlex module defines its `wordchars` variable using literals, + # making it dependent on the encoding of the Python source file. + # In Python 2.6 and 2.7, the shlex source file is encoded in + # 'iso-8859-1', and the `wordchars` variable is defined as a byte + # string. This causes a UnicodeDecodeError to be raised when the + # parsed content is a unicode object. The following fix resolves that + # (... but it should be fixed in shlex...): + if sys.version_info[0] == 2 and isinstance(lexer.wordchars, bytes): + lexer.wordchars = lexer.wordchars.decode('iso-8859-1') + + tokens = list(lexer) + for token in tokens: + # At this point, all shell-like parsing has been done (i.e. + # comments processed, quotes and backslash escape sequences + # processed, multi-line values assembled, trailing newlines + # stripped, etc.), so the tokens are now either: + # * variable assignments: var=value + # * commands or their arguments (not allowed in os-release) + if '=' in token: + k, v = token.split('=', 1) + if isinstance(v, bytes): + v = v.decode('utf-8') + props[k.lower()] = v + if k == 'VERSION': + # this handles cases in which the codename is in + # the `(CODENAME)` (rhel, centos, fedora) format + # or in the `, CODENAME` format (Ubuntu). + codename = re.search(r'(\(\D+\))|,(\s+)?\D+', v) + if codename: + codename = codename.group() + codename = codename.strip('()') + codename = codename.strip(',') + codename = codename.strip() + # codename appears within paranthese. + props['codename'] = codename + else: + props['codename'] = '' + else: + # Ignore any tokens that are not variable assignments + pass + return props + + @cached_property + def _lsb_release_info(self): + """ + Get the information items from the lsb_release command output. + + Returns: + A dictionary containing all information items. + """ + if not self.include_lsb: + return {} + with open(os.devnull, 'w') as devnull: + try: + cmd = ('lsb_release', '-a') + stdout = subprocess.check_output(cmd, stderr=devnull) + except OSError: # Command not found + return {} + content = stdout.decode(sys.getfilesystemencoding()).splitlines() + return self._parse_lsb_release_content(content) + + @staticmethod + def _parse_lsb_release_content(lines): + """ + Parse the output of the lsb_release command. + + Parameters: + + * lines: Iterable through the lines of the lsb_release output. + Each line must be a unicode string or a UTF-8 encoded byte + string. + + Returns: + A dictionary containing all information items. + """ + props = {} + for line in lines: + kv = line.strip('\n').split(':', 1) + if len(kv) != 2: + # Ignore lines without colon. + continue + k, v = kv + props.update({k.replace(' ', '_').lower(): v.strip()}) + return props + + @cached_property + def _uname_info(self): + with open(os.devnull, 'w') as devnull: + try: + cmd = ('uname', '-rs') + stdout = subprocess.check_output(cmd, stderr=devnull) + except OSError: + return {} + content = stdout.decode(sys.getfilesystemencoding()).splitlines() + return self._parse_uname_content(content) + + @staticmethod + def _parse_uname_content(lines): + props = {} + match = re.search(r'^([^\s]+)\s+([\d\.]+)', lines[0].strip()) + if match: + name, version = match.groups() + + # This is to prevent the Linux kernel version from + # appearing as the 'best' version on otherwise + # identifiable distributions. + if name == 'Linux': + return {} + props['id'] = name.lower() + props['name'] = name + props['release'] = version + return props + + @cached_property + def _distro_release_info(self): + """ + Get the information items from the specified distro release file. + + Returns: + A dictionary containing all information items. + """ + if self.distro_release_file: + # If it was specified, we use it and parse what we can, even if + # its file name or content does not match the expected pattern. + distro_info = self._parse_distro_release_file( + self.distro_release_file) + basename = os.path.basename(self.distro_release_file) + # The file name pattern for user-specified distro release files + # is somewhat more tolerant (compared to when searching for the + # file), because we want to use what was specified as best as + # possible. + match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename) + if match: + distro_info['id'] = match.group(1) + return distro_info + else: + try: + basenames = os.listdir(_UNIXCONFDIR) + # We sort for repeatability in cases where there are multiple + # distro specific files; e.g. CentOS, Oracle, Enterprise all + # containing `redhat-release` on top of their own. + basenames.sort() + except OSError: + # This may occur when /etc is not readable but we can't be + # sure about the *-release files. Check common entries of + # /etc for information. If they turn out to not be there the + # error is handled in `_parse_distro_release_file()`. + basenames = ['SuSE-release', + 'arch-release', + 'base-release', + 'centos-release', + 'fedora-release', + 'gentoo-release', + 'mageia-release', + 'mandrake-release', + 'mandriva-release', + 'mandrivalinux-release', + 'manjaro-release', + 'oracle-release', + 'redhat-release', + 'sl-release', + 'slackware-version'] + for basename in basenames: + if basename in _DISTRO_RELEASE_IGNORE_BASENAMES: + continue + match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename) + if match: + filepath = os.path.join(_UNIXCONFDIR, basename) + distro_info = self._parse_distro_release_file(filepath) + if 'name' in distro_info: + # The name is always present if the pattern matches + self.distro_release_file = filepath + distro_info['id'] = match.group(1) + return distro_info + return {} + + def _parse_distro_release_file(self, filepath): + """ + Parse a distro release file. + + Parameters: + + * filepath: Path name of the distro release file. + + Returns: + A dictionary containing all information items. + """ + try: + with open(filepath) as fp: + # Only parse the first line. For instance, on SLES there + # are multiple lines. We don't want them... + return self._parse_distro_release_content(fp.readline()) + except (OSError, IOError): + # Ignore not being able to read a specific, seemingly version + # related file. + # See https://github.com/nir0s/distro/issues/162 + return {} + + @staticmethod + def _parse_distro_release_content(line): + """ + Parse a line from a distro release file. + + Parameters: + * line: Line from the distro release file. Must be a unicode string + or a UTF-8 encoded byte string. + + Returns: + A dictionary containing all information items. + """ + if isinstance(line, bytes): + line = line.decode('utf-8') + matches = _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN.match( + line.strip()[::-1]) + distro_info = {} + if matches: + # regexp ensures non-None + distro_info['name'] = matches.group(3)[::-1] + if matches.group(2): + distro_info['version_id'] = matches.group(2)[::-1] + if matches.group(1): + distro_info['codename'] = matches.group(1)[::-1] + elif line: + distro_info['name'] = line.strip() + return distro_info + + +_distro = LinuxDistribution() + + +def main(): + logger = logging.getLogger(__name__) + logger.setLevel(logging.DEBUG) + logger.addHandler(logging.StreamHandler(sys.stdout)) + + parser = argparse.ArgumentParser(description="OS distro info tool") + parser.add_argument( + '--json', + '-j', + help="Output in machine readable format", + action="store_true") + args = parser.parse_args() + + if args.json: + logger.info(json.dumps(info(), indent=4, sort_keys=True)) + else: + logger.info('Name: %s', name(pretty=True)) + distribution_version = version(pretty=True) + logger.info('Version: %s', distribution_version) + distribution_codename = codename() + logger.info('Codename: %s', distribution_codename) + + +if __name__ == '__main__': + main() diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/distro.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/distro.pyc new file mode 100644 index 0000000..015e880 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/distro.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/__init__.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/__init__.py new file mode 100644 index 0000000..0491234 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/__init__.py @@ -0,0 +1,35 @@ +""" +HTML parsing library based on the `WHATWG HTML specification +<https://whatwg.org/html>`_. The parser is designed to be compatible with +existing HTML found in the wild and implements well-defined error recovery that +is largely compatible with modern desktop web browsers. + +Example usage:: + + from pip._vendor import html5lib + with open("my_document.html", "rb") as f: + tree = html5lib.parse(f) + +For convenience, this module re-exports the following names: + +* :func:`~.html5parser.parse` +* :func:`~.html5parser.parseFragment` +* :class:`~.html5parser.HTMLParser` +* :func:`~.treebuilders.getTreeBuilder` +* :func:`~.treewalkers.getTreeWalker` +* :func:`~.serializer.serialize` +""" + +from __future__ import absolute_import, division, unicode_literals + +from .html5parser import HTMLParser, parse, parseFragment +from .treebuilders import getTreeBuilder +from .treewalkers import getTreeWalker +from .serializer import serialize + +__all__ = ["HTMLParser", "parse", "parseFragment", "getTreeBuilder", + "getTreeWalker", "serialize"] + +# this has to be at the top level, see how setup.py parses this +#: Distribution version number. +__version__ = "1.0.1" diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/__init__.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/__init__.pyc new file mode 100644 index 0000000..b0d7613 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_ihatexml.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_ihatexml.py new file mode 100644 index 0000000..4c77717 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_ihatexml.py @@ -0,0 +1,288 @@ +from __future__ import absolute_import, division, unicode_literals + +import re +import warnings + +from .constants import DataLossWarning + +baseChar = """ +[#x0041-#x005A] | [#x0061-#x007A] | [#x00C0-#x00D6] | [#x00D8-#x00F6] | +[#x00F8-#x00FF] | [#x0100-#x0131] | [#x0134-#x013E] | [#x0141-#x0148] | +[#x014A-#x017E] | [#x0180-#x01C3] | [#x01CD-#x01F0] | [#x01F4-#x01F5] | +[#x01FA-#x0217] | [#x0250-#x02A8] | [#x02BB-#x02C1] | #x0386 | +[#x0388-#x038A] | #x038C | [#x038E-#x03A1] | [#x03A3-#x03CE] | +[#x03D0-#x03D6] | #x03DA | #x03DC | #x03DE | #x03E0 | [#x03E2-#x03F3] | +[#x0401-#x040C] | [#x040E-#x044F] | [#x0451-#x045C] | [#x045E-#x0481] | +[#x0490-#x04C4] | [#x04C7-#x04C8] | [#x04CB-#x04CC] | [#x04D0-#x04EB] | +[#x04EE-#x04F5] | [#x04F8-#x04F9] | [#x0531-#x0556] | #x0559 | +[#x0561-#x0586] | [#x05D0-#x05EA] | [#x05F0-#x05F2] | [#x0621-#x063A] | +[#x0641-#x064A] | [#x0671-#x06B7] | [#x06BA-#x06BE] | [#x06C0-#x06CE] | +[#x06D0-#x06D3] | #x06D5 | [#x06E5-#x06E6] | [#x0905-#x0939] | #x093D | +[#x0958-#x0961] | [#x0985-#x098C] | [#x098F-#x0990] | [#x0993-#x09A8] | +[#x09AA-#x09B0] | #x09B2 | [#x09B6-#x09B9] | [#x09DC-#x09DD] | +[#x09DF-#x09E1] | [#x09F0-#x09F1] | [#x0A05-#x0A0A] | [#x0A0F-#x0A10] | +[#x0A13-#x0A28] | [#x0A2A-#x0A30] | [#x0A32-#x0A33] | [#x0A35-#x0A36] | +[#x0A38-#x0A39] | [#x0A59-#x0A5C] | #x0A5E | [#x0A72-#x0A74] | +[#x0A85-#x0A8B] | #x0A8D | [#x0A8F-#x0A91] | [#x0A93-#x0AA8] | +[#x0AAA-#x0AB0] | [#x0AB2-#x0AB3] | [#x0AB5-#x0AB9] | #x0ABD | #x0AE0 | +[#x0B05-#x0B0C] | [#x0B0F-#x0B10] | [#x0B13-#x0B28] | [#x0B2A-#x0B30] | +[#x0B32-#x0B33] | [#x0B36-#x0B39] | #x0B3D | [#x0B5C-#x0B5D] | +[#x0B5F-#x0B61] | [#x0B85-#x0B8A] | [#x0B8E-#x0B90] | [#x0B92-#x0B95] | +[#x0B99-#x0B9A] | #x0B9C | [#x0B9E-#x0B9F] | [#x0BA3-#x0BA4] | +[#x0BA8-#x0BAA] | [#x0BAE-#x0BB5] | [#x0BB7-#x0BB9] | [#x0C05-#x0C0C] | +[#x0C0E-#x0C10] | [#x0C12-#x0C28] | [#x0C2A-#x0C33] | [#x0C35-#x0C39] | +[#x0C60-#x0C61] | [#x0C85-#x0C8C] | [#x0C8E-#x0C90] | [#x0C92-#x0CA8] | +[#x0CAA-#x0CB3] | [#x0CB5-#x0CB9] | #x0CDE | [#x0CE0-#x0CE1] | +[#x0D05-#x0D0C] | [#x0D0E-#x0D10] | [#x0D12-#x0D28] | [#x0D2A-#x0D39] | +[#x0D60-#x0D61] | [#x0E01-#x0E2E] | #x0E30 | [#x0E32-#x0E33] | +[#x0E40-#x0E45] | [#x0E81-#x0E82] | #x0E84 | [#x0E87-#x0E88] | #x0E8A | +#x0E8D | [#x0E94-#x0E97] | [#x0E99-#x0E9F] | [#x0EA1-#x0EA3] | #x0EA5 | +#x0EA7 | [#x0EAA-#x0EAB] | [#x0EAD-#x0EAE] | #x0EB0 | [#x0EB2-#x0EB3] | +#x0EBD | [#x0EC0-#x0EC4] | [#x0F40-#x0F47] | [#x0F49-#x0F69] | +[#x10A0-#x10C5] | [#x10D0-#x10F6] | #x1100 | [#x1102-#x1103] | +[#x1105-#x1107] | #x1109 | [#x110B-#x110C] | [#x110E-#x1112] | #x113C | +#x113E | #x1140 | #x114C | #x114E | #x1150 | [#x1154-#x1155] | #x1159 | +[#x115F-#x1161] | #x1163 | #x1165 | #x1167 | #x1169 | [#x116D-#x116E] | +[#x1172-#x1173] | #x1175 | #x119E | #x11A8 | #x11AB | [#x11AE-#x11AF] | +[#x11B7-#x11B8] | #x11BA | [#x11BC-#x11C2] | #x11EB | #x11F0 | #x11F9 | +[#x1E00-#x1E9B] | [#x1EA0-#x1EF9] | [#x1F00-#x1F15] | [#x1F18-#x1F1D] | +[#x1F20-#x1F45] | [#x1F48-#x1F4D] | [#x1F50-#x1F57] | #x1F59 | #x1F5B | +#x1F5D | [#x1F5F-#x1F7D] | [#x1F80-#x1FB4] | [#x1FB6-#x1FBC] | #x1FBE | +[#x1FC2-#x1FC4] | [#x1FC6-#x1FCC] | [#x1FD0-#x1FD3] | [#x1FD6-#x1FDB] | +[#x1FE0-#x1FEC] | [#x1FF2-#x1FF4] | [#x1FF6-#x1FFC] | #x2126 | +[#x212A-#x212B] | #x212E | [#x2180-#x2182] | [#x3041-#x3094] | +[#x30A1-#x30FA] | [#x3105-#x312C] | [#xAC00-#xD7A3]""" + +ideographic = """[#x4E00-#x9FA5] | #x3007 | [#x3021-#x3029]""" + +combiningCharacter = """ +[#x0300-#x0345] | [#x0360-#x0361] | [#x0483-#x0486] | [#x0591-#x05A1] | +[#x05A3-#x05B9] | [#x05BB-#x05BD] | #x05BF | [#x05C1-#x05C2] | #x05C4 | +[#x064B-#x0652] | #x0670 | [#x06D6-#x06DC] | [#x06DD-#x06DF] | +[#x06E0-#x06E4] | [#x06E7-#x06E8] | [#x06EA-#x06ED] | [#x0901-#x0903] | +#x093C | [#x093E-#x094C] | #x094D | [#x0951-#x0954] | [#x0962-#x0963] | +[#x0981-#x0983] | #x09BC | #x09BE | #x09BF | [#x09C0-#x09C4] | +[#x09C7-#x09C8] | [#x09CB-#x09CD] | #x09D7 | [#x09E2-#x09E3] | #x0A02 | +#x0A3C | #x0A3E | #x0A3F | [#x0A40-#x0A42] | [#x0A47-#x0A48] | +[#x0A4B-#x0A4D] | [#x0A70-#x0A71] | [#x0A81-#x0A83] | #x0ABC | +[#x0ABE-#x0AC5] | [#x0AC7-#x0AC9] | [#x0ACB-#x0ACD] | [#x0B01-#x0B03] | +#x0B3C | [#x0B3E-#x0B43] | [#x0B47-#x0B48] | [#x0B4B-#x0B4D] | +[#x0B56-#x0B57] | [#x0B82-#x0B83] | [#x0BBE-#x0BC2] | [#x0BC6-#x0BC8] | +[#x0BCA-#x0BCD] | #x0BD7 | [#x0C01-#x0C03] | [#x0C3E-#x0C44] | +[#x0C46-#x0C48] | [#x0C4A-#x0C4D] | [#x0C55-#x0C56] | [#x0C82-#x0C83] | +[#x0CBE-#x0CC4] | [#x0CC6-#x0CC8] | [#x0CCA-#x0CCD] | [#x0CD5-#x0CD6] | +[#x0D02-#x0D03] | [#x0D3E-#x0D43] | [#x0D46-#x0D48] | [#x0D4A-#x0D4D] | +#x0D57 | #x0E31 | [#x0E34-#x0E3A] | [#x0E47-#x0E4E] | #x0EB1 | +[#x0EB4-#x0EB9] | [#x0EBB-#x0EBC] | [#x0EC8-#x0ECD] | [#x0F18-#x0F19] | +#x0F35 | #x0F37 | #x0F39 | #x0F3E | #x0F3F | [#x0F71-#x0F84] | +[#x0F86-#x0F8B] | [#x0F90-#x0F95] | #x0F97 | [#x0F99-#x0FAD] | +[#x0FB1-#x0FB7] | #x0FB9 | [#x20D0-#x20DC] | #x20E1 | [#x302A-#x302F] | +#x3099 | #x309A""" + +digit = """ +[#x0030-#x0039] | [#x0660-#x0669] | [#x06F0-#x06F9] | [#x0966-#x096F] | +[#x09E6-#x09EF] | [#x0A66-#x0A6F] | [#x0AE6-#x0AEF] | [#x0B66-#x0B6F] | +[#x0BE7-#x0BEF] | [#x0C66-#x0C6F] | [#x0CE6-#x0CEF] | [#x0D66-#x0D6F] | +[#x0E50-#x0E59] | [#x0ED0-#x0ED9] | [#x0F20-#x0F29]""" + +extender = """ +#x00B7 | #x02D0 | #x02D1 | #x0387 | #x0640 | #x0E46 | #x0EC6 | #x3005 | +#[#x3031-#x3035] | [#x309D-#x309E] | [#x30FC-#x30FE]""" + +letter = " | ".join([baseChar, ideographic]) + +# Without the +name = " | ".join([letter, digit, ".", "-", "_", combiningCharacter, + extender]) +nameFirst = " | ".join([letter, "_"]) + +reChar = re.compile(r"#x([\d|A-F]{4,4})") +reCharRange = re.compile(r"\[#x([\d|A-F]{4,4})-#x([\d|A-F]{4,4})\]") + + +def charStringToList(chars): + charRanges = [item.strip() for item in chars.split(" | ")] + rv = [] + for item in charRanges: + foundMatch = False + for regexp in (reChar, reCharRange): + match = regexp.match(item) + if match is not None: + rv.append([hexToInt(item) for item in match.groups()]) + if len(rv[-1]) == 1: + rv[-1] = rv[-1] * 2 + foundMatch = True + break + if not foundMatch: + assert len(item) == 1 + + rv.append([ord(item)] * 2) + rv = normaliseCharList(rv) + return rv + + +def normaliseCharList(charList): + charList = sorted(charList) + for item in charList: + assert item[1] >= item[0] + rv = [] + i = 0 + while i < len(charList): + j = 1 + rv.append(charList[i]) + while i + j < len(charList) and charList[i + j][0] <= rv[-1][1] + 1: + rv[-1][1] = charList[i + j][1] + j += 1 + i += j + return rv + +# We don't really support characters above the BMP :( +max_unicode = int("FFFF", 16) + + +def missingRanges(charList): + rv = [] + if charList[0] != 0: + rv.append([0, charList[0][0] - 1]) + for i, item in enumerate(charList[:-1]): + rv.append([item[1] + 1, charList[i + 1][0] - 1]) + if charList[-1][1] != max_unicode: + rv.append([charList[-1][1] + 1, max_unicode]) + return rv + + +def listToRegexpStr(charList): + rv = [] + for item in charList: + if item[0] == item[1]: + rv.append(escapeRegexp(chr(item[0]))) + else: + rv.append(escapeRegexp(chr(item[0])) + "-" + + escapeRegexp(chr(item[1]))) + return "[%s]" % "".join(rv) + + +def hexToInt(hex_str): + return int(hex_str, 16) + + +def escapeRegexp(string): + specialCharacters = (".", "^", "$", "*", "+", "?", "{", "}", + "[", "]", "|", "(", ")", "-") + for char in specialCharacters: + string = string.replace(char, "\\" + char) + + return string + +# output from the above +nonXmlNameBMPRegexp = re.compile('[\x00-,/:-@\\[-\\^`\\{-\xb6\xb8-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u02cf\u02d2-\u02ff\u0346-\u035f\u0362-\u0385\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482\u0487-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u0590\u05a2\u05ba\u05be\u05c0\u05c3\u05c5-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u063f\u0653-\u065f\u066a-\u066f\u06b8-\u06b9\u06bf\u06cf\u06d4\u06e9\u06ee-\u06ef\u06fa-\u0900\u0904\u093a-\u093b\u094e-\u0950\u0955-\u0957\u0964-\u0965\u0970-\u0980\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09bb\u09bd\u09c5-\u09c6\u09c9-\u09ca\u09ce-\u09d6\u09d8-\u09db\u09de\u09e4-\u09e5\u09f2-\u0a01\u0a03-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a3b\u0a3d\u0a43-\u0a46\u0a49-\u0a4a\u0a4e-\u0a58\u0a5d\u0a5f-\u0a65\u0a75-\u0a80\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abb\u0ac6\u0aca\u0ace-\u0adf\u0ae1-\u0ae5\u0af0-\u0b00\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3b\u0b44-\u0b46\u0b49-\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b62-\u0b65\u0b70-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce-\u0bd6\u0bd8-\u0be6\u0bf0-\u0c00\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c3d\u0c45\u0c49\u0c4e-\u0c54\u0c57-\u0c5f\u0c62-\u0c65\u0c70-\u0c81\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cbd\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce2-\u0ce5\u0cf0-\u0d01\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d3d\u0d44-\u0d45\u0d49\u0d4e-\u0d56\u0d58-\u0d5f\u0d62-\u0d65\u0d70-\u0e00\u0e2f\u0e3b-\u0e3f\u0e4f\u0e5a-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eba\u0ebe-\u0ebf\u0ec5\u0ec7\u0ece-\u0ecf\u0eda-\u0f17\u0f1a-\u0f1f\u0f2a-\u0f34\u0f36\u0f38\u0f3a-\u0f3d\u0f48\u0f6a-\u0f70\u0f85\u0f8c-\u0f8f\u0f96\u0f98\u0fae-\u0fb0\u0fb8\u0fba-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u20cf\u20dd-\u20e0\u20e2-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3004\u3006\u3008-\u3020\u3030\u3036-\u3040\u3095-\u3098\u309b-\u309c\u309f-\u30a0\u30fb\u30ff-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]') # noqa + +nonXmlNameFirstBMPRegexp = re.compile('[\x00-@\\[-\\^`\\{-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u0385\u0387\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u0640\u064b-\u0670\u06b8-\u06b9\u06bf\u06cf\u06d4\u06d6-\u06e4\u06e7-\u0904\u093a-\u093c\u093e-\u0957\u0962-\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09db\u09de\u09e2-\u09ef\u09f2-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a58\u0a5d\u0a5f-\u0a71\u0a75-\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abc\u0abe-\u0adf\u0ae1-\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3c\u0b3e-\u0b5b\u0b5e\u0b62-\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c5f\u0c62-\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cdd\u0cdf\u0ce2-\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d5f\u0d62-\u0e00\u0e2f\u0e31\u0e34-\u0e3f\u0e46-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eb1\u0eb4-\u0ebc\u0ebe-\u0ebf\u0ec5-\u0f3f\u0f48\u0f6a-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3006\u3008-\u3020\u302a-\u3040\u3095-\u30a0\u30fb-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]') # noqa + +# Simpler things +nonPubidCharRegexp = re.compile("[^\x20\x0D\x0Aa-zA-Z0-9\\-'()+,./:=?;!*#@$_%]") + + +class InfosetFilter(object): + replacementRegexp = re.compile(r"U[\dA-F]{5,5}") + + def __init__(self, + dropXmlnsLocalName=False, + dropXmlnsAttrNs=False, + preventDoubleDashComments=False, + preventDashAtCommentEnd=False, + replaceFormFeedCharacters=True, + preventSingleQuotePubid=False): + + self.dropXmlnsLocalName = dropXmlnsLocalName + self.dropXmlnsAttrNs = dropXmlnsAttrNs + + self.preventDoubleDashComments = preventDoubleDashComments + self.preventDashAtCommentEnd = preventDashAtCommentEnd + + self.replaceFormFeedCharacters = replaceFormFeedCharacters + + self.preventSingleQuotePubid = preventSingleQuotePubid + + self.replaceCache = {} + + def coerceAttribute(self, name, namespace=None): + if self.dropXmlnsLocalName and name.startswith("xmlns:"): + warnings.warn("Attributes cannot begin with xmlns", DataLossWarning) + return None + elif (self.dropXmlnsAttrNs and + namespace == "http://www.w3.org/2000/xmlns/"): + warnings.warn("Attributes cannot be in the xml namespace", DataLossWarning) + return None + else: + return self.toXmlName(name) + + def coerceElement(self, name): + return self.toXmlName(name) + + def coerceComment(self, data): + if self.preventDoubleDashComments: + while "--" in data: + warnings.warn("Comments cannot contain adjacent dashes", DataLossWarning) + data = data.replace("--", "- -") + if data.endswith("-"): + warnings.warn("Comments cannot end in a dash", DataLossWarning) + data += " " + return data + + def coerceCharacters(self, data): + if self.replaceFormFeedCharacters: + for _ in range(data.count("\x0C")): + warnings.warn("Text cannot contain U+000C", DataLossWarning) + data = data.replace("\x0C", " ") + # Other non-xml characters + return data + + def coercePubid(self, data): + dataOutput = data + for char in nonPubidCharRegexp.findall(data): + warnings.warn("Coercing non-XML pubid", DataLossWarning) + replacement = self.getReplacementCharacter(char) + dataOutput = dataOutput.replace(char, replacement) + if self.preventSingleQuotePubid and dataOutput.find("'") >= 0: + warnings.warn("Pubid cannot contain single quote", DataLossWarning) + dataOutput = dataOutput.replace("'", self.getReplacementCharacter("'")) + return dataOutput + + def toXmlName(self, name): + nameFirst = name[0] + nameRest = name[1:] + m = nonXmlNameFirstBMPRegexp.match(nameFirst) + if m: + warnings.warn("Coercing non-XML name", DataLossWarning) + nameFirstOutput = self.getReplacementCharacter(nameFirst) + else: + nameFirstOutput = nameFirst + + nameRestOutput = nameRest + replaceChars = set(nonXmlNameBMPRegexp.findall(nameRest)) + for char in replaceChars: + warnings.warn("Coercing non-XML name", DataLossWarning) + replacement = self.getReplacementCharacter(char) + nameRestOutput = nameRestOutput.replace(char, replacement) + return nameFirstOutput + nameRestOutput + + def getReplacementCharacter(self, char): + if char in self.replaceCache: + replacement = self.replaceCache[char] + else: + replacement = self.escapeChar(char) + return replacement + + def fromXmlName(self, name): + for item in set(self.replacementRegexp.findall(name)): + name = name.replace(item, self.unescapeChar(item)) + return name + + def escapeChar(self, char): + replacement = "U%05X" % ord(char) + self.replaceCache[char] = replacement + return replacement + + def unescapeChar(self, charcode): + return chr(int(charcode[1:], 16)) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_ihatexml.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_ihatexml.pyc new file mode 100644 index 0000000..49131be Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_ihatexml.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.py new file mode 100644 index 0000000..a65e55f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.py @@ -0,0 +1,923 @@ +from __future__ import absolute_import, division, unicode_literals + +from pip._vendor.six import text_type, binary_type +from pip._vendor.six.moves import http_client, urllib + +import codecs +import re + +from pip._vendor import webencodings + +from .constants import EOF, spaceCharacters, asciiLetters, asciiUppercase +from .constants import _ReparseException +from . import _utils + +from io import StringIO + +try: + from io import BytesIO +except ImportError: + BytesIO = StringIO + +# Non-unicode versions of constants for use in the pre-parser +spaceCharactersBytes = frozenset([item.encode("ascii") for item in spaceCharacters]) +asciiLettersBytes = frozenset([item.encode("ascii") for item in asciiLetters]) +asciiUppercaseBytes = frozenset([item.encode("ascii") for item in asciiUppercase]) +spacesAngleBrackets = spaceCharactersBytes | frozenset([b">", b"<"]) + + +invalid_unicode_no_surrogate = "[\u0001-\u0008\u000B\u000E-\u001F\u007F-\u009F\uFDD0-\uFDEF\uFFFE\uFFFF\U0001FFFE\U0001FFFF\U0002FFFE\U0002FFFF\U0003FFFE\U0003FFFF\U0004FFFE\U0004FFFF\U0005FFFE\U0005FFFF\U0006FFFE\U0006FFFF\U0007FFFE\U0007FFFF\U0008FFFE\U0008FFFF\U0009FFFE\U0009FFFF\U000AFFFE\U000AFFFF\U000BFFFE\U000BFFFF\U000CFFFE\U000CFFFF\U000DFFFE\U000DFFFF\U000EFFFE\U000EFFFF\U000FFFFE\U000FFFFF\U0010FFFE\U0010FFFF]" # noqa + +if _utils.supports_lone_surrogates: + # Use one extra step of indirection and create surrogates with + # eval. Not using this indirection would introduce an illegal + # unicode literal on platforms not supporting such lone + # surrogates. + assert invalid_unicode_no_surrogate[-1] == "]" and invalid_unicode_no_surrogate.count("]") == 1 + invalid_unicode_re = re.compile(invalid_unicode_no_surrogate[:-1] + + eval('"\\uD800-\\uDFFF"') + # pylint:disable=eval-used + "]") +else: + invalid_unicode_re = re.compile(invalid_unicode_no_surrogate) + +non_bmp_invalid_codepoints = set([0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE, + 0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF, + 0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE, + 0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF, + 0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE, + 0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF, + 0x10FFFE, 0x10FFFF]) + +ascii_punctuation_re = re.compile("[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005C\u005B-\u0060\u007B-\u007E]") + +# Cache for charsUntil() +charsUntilRegEx = {} + + +class BufferedStream(object): + """Buffering for streams that do not have buffering of their own + + The buffer is implemented as a list of chunks on the assumption that + joining many strings will be slow since it is O(n**2) + """ + + def __init__(self, stream): + self.stream = stream + self.buffer = [] + self.position = [-1, 0] # chunk number, offset + + def tell(self): + pos = 0 + for chunk in self.buffer[:self.position[0]]: + pos += len(chunk) + pos += self.position[1] + return pos + + def seek(self, pos): + assert pos <= self._bufferedBytes() + offset = pos + i = 0 + while len(self.buffer[i]) < offset: + offset -= len(self.buffer[i]) + i += 1 + self.position = [i, offset] + + def read(self, bytes): + if not self.buffer: + return self._readStream(bytes) + elif (self.position[0] == len(self.buffer) and + self.position[1] == len(self.buffer[-1])): + return self._readStream(bytes) + else: + return self._readFromBuffer(bytes) + + def _bufferedBytes(self): + return sum([len(item) for item in self.buffer]) + + def _readStream(self, bytes): + data = self.stream.read(bytes) + self.buffer.append(data) + self.position[0] += 1 + self.position[1] = len(data) + return data + + def _readFromBuffer(self, bytes): + remainingBytes = bytes + rv = [] + bufferIndex = self.position[0] + bufferOffset = self.position[1] + while bufferIndex < len(self.buffer) and remainingBytes != 0: + assert remainingBytes > 0 + bufferedData = self.buffer[bufferIndex] + + if remainingBytes <= len(bufferedData) - bufferOffset: + bytesToRead = remainingBytes + self.position = [bufferIndex, bufferOffset + bytesToRead] + else: + bytesToRead = len(bufferedData) - bufferOffset + self.position = [bufferIndex, len(bufferedData)] + bufferIndex += 1 + rv.append(bufferedData[bufferOffset:bufferOffset + bytesToRead]) + remainingBytes -= bytesToRead + + bufferOffset = 0 + + if remainingBytes: + rv.append(self._readStream(remainingBytes)) + + return b"".join(rv) + + +def HTMLInputStream(source, **kwargs): + # Work around Python bug #20007: read(0) closes the connection. + # http://bugs.python.org/issue20007 + if (isinstance(source, http_client.HTTPResponse) or + # Also check for addinfourl wrapping HTTPResponse + (isinstance(source, urllib.response.addbase) and + isinstance(source.fp, http_client.HTTPResponse))): + isUnicode = False + elif hasattr(source, "read"): + isUnicode = isinstance(source.read(0), text_type) + else: + isUnicode = isinstance(source, text_type) + + if isUnicode: + encodings = [x for x in kwargs if x.endswith("_encoding")] + if encodings: + raise TypeError("Cannot set an encoding with a unicode input, set %r" % encodings) + + return HTMLUnicodeInputStream(source, **kwargs) + else: + return HTMLBinaryInputStream(source, **kwargs) + + +class HTMLUnicodeInputStream(object): + """Provides a unicode stream of characters to the HTMLTokenizer. + + This class takes care of character encoding and removing or replacing + incorrect byte-sequences and also provides column and line tracking. + + """ + + _defaultChunkSize = 10240 + + def __init__(self, source): + """Initialises the HTMLInputStream. + + HTMLInputStream(source, [encoding]) -> Normalized stream from source + for use by html5lib. + + source can be either a file-object, local filename or a string. + + The optional encoding parameter must be a string that indicates + the encoding. If specified, that encoding will be used, + regardless of any BOM or later declaration (such as in a meta + element) + + """ + + if not _utils.supports_lone_surrogates: + # Such platforms will have already checked for such + # surrogate errors, so no need to do this checking. + self.reportCharacterErrors = None + elif len("\U0010FFFF") == 1: + self.reportCharacterErrors = self.characterErrorsUCS4 + else: + self.reportCharacterErrors = self.characterErrorsUCS2 + + # List of where new lines occur + self.newLines = [0] + + self.charEncoding = (lookupEncoding("utf-8"), "certain") + self.dataStream = self.openStream(source) + + self.reset() + + def reset(self): + self.chunk = "" + self.chunkSize = 0 + self.chunkOffset = 0 + self.errors = [] + + # number of (complete) lines in previous chunks + self.prevNumLines = 0 + # number of columns in the last line of the previous chunk + self.prevNumCols = 0 + + # Deal with CR LF and surrogates split over chunk boundaries + self._bufferedCharacter = None + + def openStream(self, source): + """Produces a file object from source. + + source can be either a file object, local filename or a string. + + """ + # Already a file object + if hasattr(source, 'read'): + stream = source + else: + stream = StringIO(source) + + return stream + + def _position(self, offset): + chunk = self.chunk + nLines = chunk.count('\n', 0, offset) + positionLine = self.prevNumLines + nLines + lastLinePos = chunk.rfind('\n', 0, offset) + if lastLinePos == -1: + positionColumn = self.prevNumCols + offset + else: + positionColumn = offset - (lastLinePos + 1) + return (positionLine, positionColumn) + + def position(self): + """Returns (line, col) of the current position in the stream.""" + line, col = self._position(self.chunkOffset) + return (line + 1, col) + + def char(self): + """ Read one character from the stream or queue if available. Return + EOF when EOF is reached. + """ + # Read a new chunk from the input stream if necessary + if self.chunkOffset >= self.chunkSize: + if not self.readChunk(): + return EOF + + chunkOffset = self.chunkOffset + char = self.chunk[chunkOffset] + self.chunkOffset = chunkOffset + 1 + + return char + + def readChunk(self, chunkSize=None): + if chunkSize is None: + chunkSize = self._defaultChunkSize + + self.prevNumLines, self.prevNumCols = self._position(self.chunkSize) + + self.chunk = "" + self.chunkSize = 0 + self.chunkOffset = 0 + + data = self.dataStream.read(chunkSize) + + # Deal with CR LF and surrogates broken across chunks + if self._bufferedCharacter: + data = self._bufferedCharacter + data + self._bufferedCharacter = None + elif not data: + # We have no more data, bye-bye stream + return False + + if len(data) > 1: + lastv = ord(data[-1]) + if lastv == 0x0D or 0xD800 <= lastv <= 0xDBFF: + self._bufferedCharacter = data[-1] + data = data[:-1] + + if self.reportCharacterErrors: + self.reportCharacterErrors(data) + + # Replace invalid characters + data = data.replace("\r\n", "\n") + data = data.replace("\r", "\n") + + self.chunk = data + self.chunkSize = len(data) + + return True + + def characterErrorsUCS4(self, data): + for _ in range(len(invalid_unicode_re.findall(data))): + self.errors.append("invalid-codepoint") + + def characterErrorsUCS2(self, data): + # Someone picked the wrong compile option + # You lose + skip = False + for match in invalid_unicode_re.finditer(data): + if skip: + continue + codepoint = ord(match.group()) + pos = match.start() + # Pretty sure there should be endianness issues here + if _utils.isSurrogatePair(data[pos:pos + 2]): + # We have a surrogate pair! + char_val = _utils.surrogatePairToCodepoint(data[pos:pos + 2]) + if char_val in non_bmp_invalid_codepoints: + self.errors.append("invalid-codepoint") + skip = True + elif (codepoint >= 0xD800 and codepoint <= 0xDFFF and + pos == len(data) - 1): + self.errors.append("invalid-codepoint") + else: + skip = False + self.errors.append("invalid-codepoint") + + def charsUntil(self, characters, opposite=False): + """ Returns a string of characters from the stream up to but not + including any character in 'characters' or EOF. 'characters' must be + a container that supports the 'in' method and iteration over its + characters. + """ + + # Use a cache of regexps to find the required characters + try: + chars = charsUntilRegEx[(characters, opposite)] + except KeyError: + if __debug__: + for c in characters: + assert(ord(c) < 128) + regex = "".join(["\\x%02x" % ord(c) for c in characters]) + if not opposite: + regex = "^%s" % regex + chars = charsUntilRegEx[(characters, opposite)] = re.compile("[%s]+" % regex) + + rv = [] + + while True: + # Find the longest matching prefix + m = chars.match(self.chunk, self.chunkOffset) + if m is None: + # If nothing matched, and it wasn't because we ran out of chunk, + # then stop + if self.chunkOffset != self.chunkSize: + break + else: + end = m.end() + # If not the whole chunk matched, return everything + # up to the part that didn't match + if end != self.chunkSize: + rv.append(self.chunk[self.chunkOffset:end]) + self.chunkOffset = end + break + # If the whole remainder of the chunk matched, + # use it all and read the next chunk + rv.append(self.chunk[self.chunkOffset:]) + if not self.readChunk(): + # Reached EOF + break + + r = "".join(rv) + return r + + def unget(self, char): + # Only one character is allowed to be ungotten at once - it must + # be consumed again before any further call to unget + if char is not None: + if self.chunkOffset == 0: + # unget is called quite rarely, so it's a good idea to do + # more work here if it saves a bit of work in the frequently + # called char and charsUntil. + # So, just prepend the ungotten character onto the current + # chunk: + self.chunk = char + self.chunk + self.chunkSize += 1 + else: + self.chunkOffset -= 1 + assert self.chunk[self.chunkOffset] == char + + +class HTMLBinaryInputStream(HTMLUnicodeInputStream): + """Provides a unicode stream of characters to the HTMLTokenizer. + + This class takes care of character encoding and removing or replacing + incorrect byte-sequences and also provides column and line tracking. + + """ + + def __init__(self, source, override_encoding=None, transport_encoding=None, + same_origin_parent_encoding=None, likely_encoding=None, + default_encoding="windows-1252", useChardet=True): + """Initialises the HTMLInputStream. + + HTMLInputStream(source, [encoding]) -> Normalized stream from source + for use by html5lib. + + source can be either a file-object, local filename or a string. + + The optional encoding parameter must be a string that indicates + the encoding. If specified, that encoding will be used, + regardless of any BOM or later declaration (such as in a meta + element) + + """ + # Raw Stream - for unicode objects this will encode to utf-8 and set + # self.charEncoding as appropriate + self.rawStream = self.openStream(source) + + HTMLUnicodeInputStream.__init__(self, self.rawStream) + + # Encoding Information + # Number of bytes to use when looking for a meta element with + # encoding information + self.numBytesMeta = 1024 + # Number of bytes to use when using detecting encoding using chardet + self.numBytesChardet = 100 + # Things from args + self.override_encoding = override_encoding + self.transport_encoding = transport_encoding + self.same_origin_parent_encoding = same_origin_parent_encoding + self.likely_encoding = likely_encoding + self.default_encoding = default_encoding + + # Determine encoding + self.charEncoding = self.determineEncoding(useChardet) + assert self.charEncoding[0] is not None + + # Call superclass + self.reset() + + def reset(self): + self.dataStream = self.charEncoding[0].codec_info.streamreader(self.rawStream, 'replace') + HTMLUnicodeInputStream.reset(self) + + def openStream(self, source): + """Produces a file object from source. + + source can be either a file object, local filename or a string. + + """ + # Already a file object + if hasattr(source, 'read'): + stream = source + else: + stream = BytesIO(source) + + try: + stream.seek(stream.tell()) + except: # pylint:disable=bare-except + stream = BufferedStream(stream) + + return stream + + def determineEncoding(self, chardet=True): + # BOMs take precedence over everything + # This will also read past the BOM if present + charEncoding = self.detectBOM(), "certain" + if charEncoding[0] is not None: + return charEncoding + + # If we've been overriden, we've been overriden + charEncoding = lookupEncoding(self.override_encoding), "certain" + if charEncoding[0] is not None: + return charEncoding + + # Now check the transport layer + charEncoding = lookupEncoding(self.transport_encoding), "certain" + if charEncoding[0] is not None: + return charEncoding + + # Look for meta elements with encoding information + charEncoding = self.detectEncodingMeta(), "tentative" + if charEncoding[0] is not None: + return charEncoding + + # Parent document encoding + charEncoding = lookupEncoding(self.same_origin_parent_encoding), "tentative" + if charEncoding[0] is not None and not charEncoding[0].name.startswith("utf-16"): + return charEncoding + + # "likely" encoding + charEncoding = lookupEncoding(self.likely_encoding), "tentative" + if charEncoding[0] is not None: + return charEncoding + + # Guess with chardet, if available + if chardet: + try: + from pip._vendor.chardet.universaldetector import UniversalDetector + except ImportError: + pass + else: + buffers = [] + detector = UniversalDetector() + while not detector.done: + buffer = self.rawStream.read(self.numBytesChardet) + assert isinstance(buffer, bytes) + if not buffer: + break + buffers.append(buffer) + detector.feed(buffer) + detector.close() + encoding = lookupEncoding(detector.result['encoding']) + self.rawStream.seek(0) + if encoding is not None: + return encoding, "tentative" + + # Try the default encoding + charEncoding = lookupEncoding(self.default_encoding), "tentative" + if charEncoding[0] is not None: + return charEncoding + + # Fallback to html5lib's default if even that hasn't worked + return lookupEncoding("windows-1252"), "tentative" + + def changeEncoding(self, newEncoding): + assert self.charEncoding[1] != "certain" + newEncoding = lookupEncoding(newEncoding) + if newEncoding is None: + return + if newEncoding.name in ("utf-16be", "utf-16le"): + newEncoding = lookupEncoding("utf-8") + assert newEncoding is not None + elif newEncoding == self.charEncoding[0]: + self.charEncoding = (self.charEncoding[0], "certain") + else: + self.rawStream.seek(0) + self.charEncoding = (newEncoding, "certain") + self.reset() + raise _ReparseException("Encoding changed from %s to %s" % (self.charEncoding[0], newEncoding)) + + def detectBOM(self): + """Attempts to detect at BOM at the start of the stream. If + an encoding can be determined from the BOM return the name of the + encoding otherwise return None""" + bomDict = { + codecs.BOM_UTF8: 'utf-8', + codecs.BOM_UTF16_LE: 'utf-16le', codecs.BOM_UTF16_BE: 'utf-16be', + codecs.BOM_UTF32_LE: 'utf-32le', codecs.BOM_UTF32_BE: 'utf-32be' + } + + # Go to beginning of file and read in 4 bytes + string = self.rawStream.read(4) + assert isinstance(string, bytes) + + # Try detecting the BOM using bytes from the string + encoding = bomDict.get(string[:3]) # UTF-8 + seek = 3 + if not encoding: + # Need to detect UTF-32 before UTF-16 + encoding = bomDict.get(string) # UTF-32 + seek = 4 + if not encoding: + encoding = bomDict.get(string[:2]) # UTF-16 + seek = 2 + + # Set the read position past the BOM if one was found, otherwise + # set it to the start of the stream + if encoding: + self.rawStream.seek(seek) + return lookupEncoding(encoding) + else: + self.rawStream.seek(0) + return None + + def detectEncodingMeta(self): + """Report the encoding declared by the meta element + """ + buffer = self.rawStream.read(self.numBytesMeta) + assert isinstance(buffer, bytes) + parser = EncodingParser(buffer) + self.rawStream.seek(0) + encoding = parser.getEncoding() + + if encoding is not None and encoding.name in ("utf-16be", "utf-16le"): + encoding = lookupEncoding("utf-8") + + return encoding + + +class EncodingBytes(bytes): + """String-like object with an associated position and various extra methods + If the position is ever greater than the string length then an exception is + raised""" + def __new__(self, value): + assert isinstance(value, bytes) + return bytes.__new__(self, value.lower()) + + def __init__(self, value): + # pylint:disable=unused-argument + self._position = -1 + + def __iter__(self): + return self + + def __next__(self): + p = self._position = self._position + 1 + if p >= len(self): + raise StopIteration + elif p < 0: + raise TypeError + return self[p:p + 1] + + def next(self): + # Py2 compat + return self.__next__() + + def previous(self): + p = self._position + if p >= len(self): + raise StopIteration + elif p < 0: + raise TypeError + self._position = p = p - 1 + return self[p:p + 1] + + def setPosition(self, position): + if self._position >= len(self): + raise StopIteration + self._position = position + + def getPosition(self): + if self._position >= len(self): + raise StopIteration + if self._position >= 0: + return self._position + else: + return None + + position = property(getPosition, setPosition) + + def getCurrentByte(self): + return self[self.position:self.position + 1] + + currentByte = property(getCurrentByte) + + def skip(self, chars=spaceCharactersBytes): + """Skip past a list of characters""" + p = self.position # use property for the error-checking + while p < len(self): + c = self[p:p + 1] + if c not in chars: + self._position = p + return c + p += 1 + self._position = p + return None + + def skipUntil(self, chars): + p = self.position + while p < len(self): + c = self[p:p + 1] + if c in chars: + self._position = p + return c + p += 1 + self._position = p + return None + + def matchBytes(self, bytes): + """Look for a sequence of bytes at the start of a string. If the bytes + are found return True and advance the position to the byte after the + match. Otherwise return False and leave the position alone""" + p = self.position + data = self[p:p + len(bytes)] + rv = data.startswith(bytes) + if rv: + self.position += len(bytes) + return rv + + def jumpTo(self, bytes): + """Look for the next sequence of bytes matching a given sequence. If + a match is found advance the position to the last byte of the match""" + newPosition = self[self.position:].find(bytes) + if newPosition > -1: + # XXX: This is ugly, but I can't see a nicer way to fix this. + if self._position == -1: + self._position = 0 + self._position += (newPosition + len(bytes) - 1) + return True + else: + raise StopIteration + + +class EncodingParser(object): + """Mini parser for detecting character encoding from meta elements""" + + def __init__(self, data): + """string - the data to work on for encoding detection""" + self.data = EncodingBytes(data) + self.encoding = None + + def getEncoding(self): + methodDispatch = ( + (b"<!--", self.handleComment), + (b"<meta", self.handleMeta), + (b"</", self.handlePossibleEndTag), + (b"<!", self.handleOther), + (b"<?", self.handleOther), + (b"<", self.handlePossibleStartTag)) + for _ in self.data: + keepParsing = True + for key, method in methodDispatch: + if self.data.matchBytes(key): + try: + keepParsing = method() + break + except StopIteration: + keepParsing = False + break + if not keepParsing: + break + + return self.encoding + + def handleComment(self): + """Skip over comments""" + return self.data.jumpTo(b"-->") + + def handleMeta(self): + if self.data.currentByte not in spaceCharactersBytes: + # if we have <meta not followed by a space so just keep going + return True + # We have a valid meta element we want to search for attributes + hasPragma = False + pendingEncoding = None + while True: + # Try to find the next attribute after the current position + attr = self.getAttribute() + if attr is None: + return True + else: + if attr[0] == b"http-equiv": + hasPragma = attr[1] == b"content-type" + if hasPragma and pendingEncoding is not None: + self.encoding = pendingEncoding + return False + elif attr[0] == b"charset": + tentativeEncoding = attr[1] + codec = lookupEncoding(tentativeEncoding) + if codec is not None: + self.encoding = codec + return False + elif attr[0] == b"content": + contentParser = ContentAttrParser(EncodingBytes(attr[1])) + tentativeEncoding = contentParser.parse() + if tentativeEncoding is not None: + codec = lookupEncoding(tentativeEncoding) + if codec is not None: + if hasPragma: + self.encoding = codec + return False + else: + pendingEncoding = codec + + def handlePossibleStartTag(self): + return self.handlePossibleTag(False) + + def handlePossibleEndTag(self): + next(self.data) + return self.handlePossibleTag(True) + + def handlePossibleTag(self, endTag): + data = self.data + if data.currentByte not in asciiLettersBytes: + # If the next byte is not an ascii letter either ignore this + # fragment (possible start tag case) or treat it according to + # handleOther + if endTag: + data.previous() + self.handleOther() + return True + + c = data.skipUntil(spacesAngleBrackets) + if c == b"<": + # return to the first step in the overall "two step" algorithm + # reprocessing the < byte + data.previous() + else: + # Read all attributes + attr = self.getAttribute() + while attr is not None: + attr = self.getAttribute() + return True + + def handleOther(self): + return self.data.jumpTo(b">") + + def getAttribute(self): + """Return a name,value pair for the next attribute in the stream, + if one is found, or None""" + data = self.data + # Step 1 (skip chars) + c = data.skip(spaceCharactersBytes | frozenset([b"/"])) + assert c is None or len(c) == 1 + # Step 2 + if c in (b">", None): + return None + # Step 3 + attrName = [] + attrValue = [] + # Step 4 attribute name + while True: + if c == b"=" and attrName: + break + elif c in spaceCharactersBytes: + # Step 6! + c = data.skip() + break + elif c in (b"/", b">"): + return b"".join(attrName), b"" + elif c in asciiUppercaseBytes: + attrName.append(c.lower()) + elif c is None: + return None + else: + attrName.append(c) + # Step 5 + c = next(data) + # Step 7 + if c != b"=": + data.previous() + return b"".join(attrName), b"" + # Step 8 + next(data) + # Step 9 + c = data.skip() + # Step 10 + if c in (b"'", b'"'): + # 10.1 + quoteChar = c + while True: + # 10.2 + c = next(data) + # 10.3 + if c == quoteChar: + next(data) + return b"".join(attrName), b"".join(attrValue) + # 10.4 + elif c in asciiUppercaseBytes: + attrValue.append(c.lower()) + # 10.5 + else: + attrValue.append(c) + elif c == b">": + return b"".join(attrName), b"" + elif c in asciiUppercaseBytes: + attrValue.append(c.lower()) + elif c is None: + return None + else: + attrValue.append(c) + # Step 11 + while True: + c = next(data) + if c in spacesAngleBrackets: + return b"".join(attrName), b"".join(attrValue) + elif c in asciiUppercaseBytes: + attrValue.append(c.lower()) + elif c is None: + return None + else: + attrValue.append(c) + + +class ContentAttrParser(object): + def __init__(self, data): + assert isinstance(data, bytes) + self.data = data + + def parse(self): + try: + # Check if the attr name is charset + # otherwise return + self.data.jumpTo(b"charset") + self.data.position += 1 + self.data.skip() + if not self.data.currentByte == b"=": + # If there is no = sign keep looking for attrs + return None + self.data.position += 1 + self.data.skip() + # Look for an encoding between matching quote marks + if self.data.currentByte in (b'"', b"'"): + quoteMark = self.data.currentByte + self.data.position += 1 + oldPosition = self.data.position + if self.data.jumpTo(quoteMark): + return self.data[oldPosition:self.data.position] + else: + return None + else: + # Unquoted value + oldPosition = self.data.position + try: + self.data.skipUntil(spaceCharactersBytes) + return self.data[oldPosition:self.data.position] + except StopIteration: + # Return the whole remaining value + return self.data[oldPosition:] + except StopIteration: + return None + + +def lookupEncoding(encoding): + """Return the python codec name corresponding to an encoding or None if the + string doesn't correspond to a valid encoding.""" + if isinstance(encoding, binary_type): + try: + encoding = encoding.decode("ascii") + except UnicodeDecodeError: + return None + + if encoding is not None: + try: + return webencodings.lookup(encoding) + except AttributeError: + return None + else: + return None diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyc new file mode 100644 index 0000000..ddd0452 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.py new file mode 100644 index 0000000..178f6e7 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.py @@ -0,0 +1,1721 @@ +from __future__ import absolute_import, division, unicode_literals + +from pip._vendor.six import unichr as chr + +from collections import deque + +from .constants import spaceCharacters +from .constants import entities +from .constants import asciiLetters, asciiUpper2Lower +from .constants import digits, hexDigits, EOF +from .constants import tokenTypes, tagTokenTypes +from .constants import replacementCharacters + +from ._inputstream import HTMLInputStream + +from ._trie import Trie + +entitiesTrie = Trie(entities) + + +class HTMLTokenizer(object): + """ This class takes care of tokenizing HTML. + + * self.currentToken + Holds the token that is currently being processed. + + * self.state + Holds a reference to the method to be invoked... XXX + + * self.stream + Points to HTMLInputStream object. + """ + + def __init__(self, stream, parser=None, **kwargs): + + self.stream = HTMLInputStream(stream, **kwargs) + self.parser = parser + + # Setup the initial tokenizer state + self.escapeFlag = False + self.lastFourChars = [] + self.state = self.dataState + self.escape = False + + # The current token being created + self.currentToken = None + super(HTMLTokenizer, self).__init__() + + def __iter__(self): + """ This is where the magic happens. + + We do our usually processing through the states and when we have a token + to return we yield the token which pauses processing until the next token + is requested. + """ + self.tokenQueue = deque([]) + # Start processing. When EOF is reached self.state will return False + # instead of True and the loop will terminate. + while self.state(): + while self.stream.errors: + yield {"type": tokenTypes["ParseError"], "data": self.stream.errors.pop(0)} + while self.tokenQueue: + yield self.tokenQueue.popleft() + + def consumeNumberEntity(self, isHex): + """This function returns either U+FFFD or the character based on the + decimal or hexadecimal representation. It also discards ";" if present. + If not present self.tokenQueue.append({"type": tokenTypes["ParseError"]}) is invoked. + """ + + allowed = digits + radix = 10 + if isHex: + allowed = hexDigits + radix = 16 + + charStack = [] + + # Consume all the characters that are in range while making sure we + # don't hit an EOF. + c = self.stream.char() + while c in allowed and c is not EOF: + charStack.append(c) + c = self.stream.char() + + # Convert the set of characters consumed to an int. + charAsInt = int("".join(charStack), radix) + + # Certain characters get replaced with others + if charAsInt in replacementCharacters: + char = replacementCharacters[charAsInt] + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "illegal-codepoint-for-numeric-entity", + "datavars": {"charAsInt": charAsInt}}) + elif ((0xD800 <= charAsInt <= 0xDFFF) or + (charAsInt > 0x10FFFF)): + char = "\uFFFD" + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "illegal-codepoint-for-numeric-entity", + "datavars": {"charAsInt": charAsInt}}) + else: + # Should speed up this check somehow (e.g. move the set to a constant) + if ((0x0001 <= charAsInt <= 0x0008) or + (0x000E <= charAsInt <= 0x001F) or + (0x007F <= charAsInt <= 0x009F) or + (0xFDD0 <= charAsInt <= 0xFDEF) or + charAsInt in frozenset([0x000B, 0xFFFE, 0xFFFF, 0x1FFFE, + 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE, + 0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, + 0x5FFFF, 0x6FFFE, 0x6FFFF, 0x7FFFE, + 0x7FFFF, 0x8FFFE, 0x8FFFF, 0x9FFFE, + 0x9FFFF, 0xAFFFE, 0xAFFFF, 0xBFFFE, + 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE, + 0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, + 0xFFFFF, 0x10FFFE, 0x10FFFF])): + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": + "illegal-codepoint-for-numeric-entity", + "datavars": {"charAsInt": charAsInt}}) + try: + # Try/except needed as UCS-2 Python builds' unichar only works + # within the BMP. + char = chr(charAsInt) + except ValueError: + v = charAsInt - 0x10000 + char = chr(0xD800 | (v >> 10)) + chr(0xDC00 | (v & 0x3FF)) + + # Discard the ; if present. Otherwise, put it back on the queue and + # invoke parseError on parser. + if c != ";": + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "numeric-entity-without-semicolon"}) + self.stream.unget(c) + + return char + + def consumeEntity(self, allowedChar=None, fromAttribute=False): + # Initialise to the default output for when no entity is matched + output = "&" + + charStack = [self.stream.char()] + if (charStack[0] in spaceCharacters or charStack[0] in (EOF, "<", "&") or + (allowedChar is not None and allowedChar == charStack[0])): + self.stream.unget(charStack[0]) + + elif charStack[0] == "#": + # Read the next character to see if it's hex or decimal + hex = False + charStack.append(self.stream.char()) + if charStack[-1] in ("x", "X"): + hex = True + charStack.append(self.stream.char()) + + # charStack[-1] should be the first digit + if (hex and charStack[-1] in hexDigits) \ + or (not hex and charStack[-1] in digits): + # At least one digit found, so consume the whole number + self.stream.unget(charStack[-1]) + output = self.consumeNumberEntity(hex) + else: + # No digits found + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "expected-numeric-entity"}) + self.stream.unget(charStack.pop()) + output = "&" + "".join(charStack) + + else: + # At this point in the process might have named entity. Entities + # are stored in the global variable "entities". + # + # Consume characters and compare to these to a substring of the + # entity names in the list until the substring no longer matches. + while (charStack[-1] is not EOF): + if not entitiesTrie.has_keys_with_prefix("".join(charStack)): + break + charStack.append(self.stream.char()) + + # At this point we have a string that starts with some characters + # that may match an entity + # Try to find the longest entity the string will match to take care + # of ¬i for instance. + try: + entityName = entitiesTrie.longest_prefix("".join(charStack[:-1])) + entityLength = len(entityName) + except KeyError: + entityName = None + + if entityName is not None: + if entityName[-1] != ";": + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "named-entity-without-semicolon"}) + if (entityName[-1] != ";" and fromAttribute and + (charStack[entityLength] in asciiLetters or + charStack[entityLength] in digits or + charStack[entityLength] == "=")): + self.stream.unget(charStack.pop()) + output = "&" + "".join(charStack) + else: + output = entities[entityName] + self.stream.unget(charStack.pop()) + output += "".join(charStack[entityLength:]) + else: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "expected-named-entity"}) + self.stream.unget(charStack.pop()) + output = "&" + "".join(charStack) + + if fromAttribute: + self.currentToken["data"][-1][1] += output + else: + if output in spaceCharacters: + tokenType = "SpaceCharacters" + else: + tokenType = "Characters" + self.tokenQueue.append({"type": tokenTypes[tokenType], "data": output}) + + def processEntityInAttribute(self, allowedChar): + """This method replaces the need for "entityInAttributeValueState". + """ + self.consumeEntity(allowedChar=allowedChar, fromAttribute=True) + + def emitCurrentToken(self): + """This method is a generic handler for emitting the tags. It also sets + the state to "data" because that's what's needed after a token has been + emitted. + """ + token = self.currentToken + # Add token to the queue to be yielded + if (token["type"] in tagTokenTypes): + token["name"] = token["name"].translate(asciiUpper2Lower) + if token["type"] == tokenTypes["EndTag"]: + if token["data"]: + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "attributes-in-end-tag"}) + if token["selfClosing"]: + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "self-closing-flag-on-end-tag"}) + self.tokenQueue.append(token) + self.state = self.dataState + + # Below are the various tokenizer states worked out. + def dataState(self): + data = self.stream.char() + if data == "&": + self.state = self.entityDataState + elif data == "<": + self.state = self.tagOpenState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.tokenQueue.append({"type": tokenTypes["Characters"], + "data": "\u0000"}) + elif data is EOF: + # Tokenization ends. + return False + elif data in spaceCharacters: + # Directly after emitting a token you switch back to the "data + # state". At that point spaceCharacters are important so they are + # emitted separately. + self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data": + data + self.stream.charsUntil(spaceCharacters, True)}) + # No need to update lastFourChars here, since the first space will + # have already been appended to lastFourChars and will have broken + # any <!-- or --> sequences + else: + chars = self.stream.charsUntil(("&", "<", "\u0000")) + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": + data + chars}) + return True + + def entityDataState(self): + self.consumeEntity() + self.state = self.dataState + return True + + def rcdataState(self): + data = self.stream.char() + if data == "&": + self.state = self.characterReferenceInRcdata + elif data == "<": + self.state = self.rcdataLessThanSignState + elif data == EOF: + # Tokenization ends. + return False + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.tokenQueue.append({"type": tokenTypes["Characters"], + "data": "\uFFFD"}) + elif data in spaceCharacters: + # Directly after emitting a token you switch back to the "data + # state". At that point spaceCharacters are important so they are + # emitted separately. + self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data": + data + self.stream.charsUntil(spaceCharacters, True)}) + # No need to update lastFourChars here, since the first space will + # have already been appended to lastFourChars and will have broken + # any <!-- or --> sequences + else: + chars = self.stream.charsUntil(("&", "<", "\u0000")) + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": + data + chars}) + return True + + def characterReferenceInRcdata(self): + self.consumeEntity() + self.state = self.rcdataState + return True + + def rawtextState(self): + data = self.stream.char() + if data == "<": + self.state = self.rawtextLessThanSignState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.tokenQueue.append({"type": tokenTypes["Characters"], + "data": "\uFFFD"}) + elif data == EOF: + # Tokenization ends. + return False + else: + chars = self.stream.charsUntil(("<", "\u0000")) + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": + data + chars}) + return True + + def scriptDataState(self): + data = self.stream.char() + if data == "<": + self.state = self.scriptDataLessThanSignState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.tokenQueue.append({"type": tokenTypes["Characters"], + "data": "\uFFFD"}) + elif data == EOF: + # Tokenization ends. + return False + else: + chars = self.stream.charsUntil(("<", "\u0000")) + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": + data + chars}) + return True + + def plaintextState(self): + data = self.stream.char() + if data == EOF: + # Tokenization ends. + return False + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.tokenQueue.append({"type": tokenTypes["Characters"], + "data": "\uFFFD"}) + else: + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": + data + self.stream.charsUntil("\u0000")}) + return True + + def tagOpenState(self): + data = self.stream.char() + if data == "!": + self.state = self.markupDeclarationOpenState + elif data == "/": + self.state = self.closeTagOpenState + elif data in asciiLetters: + self.currentToken = {"type": tokenTypes["StartTag"], + "name": data, "data": [], + "selfClosing": False, + "selfClosingAcknowledged": False} + self.state = self.tagNameState + elif data == ">": + # XXX In theory it could be something besides a tag name. But + # do we really care? + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "expected-tag-name-but-got-right-bracket"}) + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<>"}) + self.state = self.dataState + elif data == "?": + # XXX In theory it could be something besides a tag name. But + # do we really care? + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "expected-tag-name-but-got-question-mark"}) + self.stream.unget(data) + self.state = self.bogusCommentState + else: + # XXX + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "expected-tag-name"}) + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) + self.stream.unget(data) + self.state = self.dataState + return True + + def closeTagOpenState(self): + data = self.stream.char() + if data in asciiLetters: + self.currentToken = {"type": tokenTypes["EndTag"], "name": data, + "data": [], "selfClosing": False} + self.state = self.tagNameState + elif data == ">": + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "expected-closing-tag-but-got-right-bracket"}) + self.state = self.dataState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "expected-closing-tag-but-got-eof"}) + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) + self.state = self.dataState + else: + # XXX data can be _'_... + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "expected-closing-tag-but-got-char", + "datavars": {"data": data}}) + self.stream.unget(data) + self.state = self.bogusCommentState + return True + + def tagNameState(self): + data = self.stream.char() + if data in spaceCharacters: + self.state = self.beforeAttributeNameState + elif data == ">": + self.emitCurrentToken() + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-tag-name"}) + self.state = self.dataState + elif data == "/": + self.state = self.selfClosingStartTagState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.currentToken["name"] += "\uFFFD" + else: + self.currentToken["name"] += data + # (Don't use charsUntil here, because tag names are + # very short and it's faster to not do anything fancy) + return True + + def rcdataLessThanSignState(self): + data = self.stream.char() + if data == "/": + self.temporaryBuffer = "" + self.state = self.rcdataEndTagOpenState + else: + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) + self.stream.unget(data) + self.state = self.rcdataState + return True + + def rcdataEndTagOpenState(self): + data = self.stream.char() + if data in asciiLetters: + self.temporaryBuffer += data + self.state = self.rcdataEndTagNameState + else: + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) + self.stream.unget(data) + self.state = self.rcdataState + return True + + def rcdataEndTagNameState(self): + appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower() + data = self.stream.char() + if data in spaceCharacters and appropriate: + self.currentToken = {"type": tokenTypes["EndTag"], + "name": self.temporaryBuffer, + "data": [], "selfClosing": False} + self.state = self.beforeAttributeNameState + elif data == "/" and appropriate: + self.currentToken = {"type": tokenTypes["EndTag"], + "name": self.temporaryBuffer, + "data": [], "selfClosing": False} + self.state = self.selfClosingStartTagState + elif data == ">" and appropriate: + self.currentToken = {"type": tokenTypes["EndTag"], + "name": self.temporaryBuffer, + "data": [], "selfClosing": False} + self.emitCurrentToken() + self.state = self.dataState + elif data in asciiLetters: + self.temporaryBuffer += data + else: + self.tokenQueue.append({"type": tokenTypes["Characters"], + "data": "</" + self.temporaryBuffer}) + self.stream.unget(data) + self.state = self.rcdataState + return True + + def rawtextLessThanSignState(self): + data = self.stream.char() + if data == "/": + self.temporaryBuffer = "" + self.state = self.rawtextEndTagOpenState + else: + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) + self.stream.unget(data) + self.state = self.rawtextState + return True + + def rawtextEndTagOpenState(self): + data = self.stream.char() + if data in asciiLetters: + self.temporaryBuffer += data + self.state = self.rawtextEndTagNameState + else: + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) + self.stream.unget(data) + self.state = self.rawtextState + return True + + def rawtextEndTagNameState(self): + appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower() + data = self.stream.char() + if data in spaceCharacters and appropriate: + self.currentToken = {"type": tokenTypes["EndTag"], + "name": self.temporaryBuffer, + "data": [], "selfClosing": False} + self.state = self.beforeAttributeNameState + elif data == "/" and appropriate: + self.currentToken = {"type": tokenTypes["EndTag"], + "name": self.temporaryBuffer, + "data": [], "selfClosing": False} + self.state = self.selfClosingStartTagState + elif data == ">" and appropriate: + self.currentToken = {"type": tokenTypes["EndTag"], + "name": self.temporaryBuffer, + "data": [], "selfClosing": False} + self.emitCurrentToken() + self.state = self.dataState + elif data in asciiLetters: + self.temporaryBuffer += data + else: + self.tokenQueue.append({"type": tokenTypes["Characters"], + "data": "</" + self.temporaryBuffer}) + self.stream.unget(data) + self.state = self.rawtextState + return True + + def scriptDataLessThanSignState(self): + data = self.stream.char() + if data == "/": + self.temporaryBuffer = "" + self.state = self.scriptDataEndTagOpenState + elif data == "!": + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<!"}) + self.state = self.scriptDataEscapeStartState + else: + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) + self.stream.unget(data) + self.state = self.scriptDataState + return True + + def scriptDataEndTagOpenState(self): + data = self.stream.char() + if data in asciiLetters: + self.temporaryBuffer += data + self.state = self.scriptDataEndTagNameState + else: + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) + self.stream.unget(data) + self.state = self.scriptDataState + return True + + def scriptDataEndTagNameState(self): + appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower() + data = self.stream.char() + if data in spaceCharacters and appropriate: + self.currentToken = {"type": tokenTypes["EndTag"], + "name": self.temporaryBuffer, + "data": [], "selfClosing": False} + self.state = self.beforeAttributeNameState + elif data == "/" and appropriate: + self.currentToken = {"type": tokenTypes["EndTag"], + "name": self.temporaryBuffer, + "data": [], "selfClosing": False} + self.state = self.selfClosingStartTagState + elif data == ">" and appropriate: + self.currentToken = {"type": tokenTypes["EndTag"], + "name": self.temporaryBuffer, + "data": [], "selfClosing": False} + self.emitCurrentToken() + self.state = self.dataState + elif data in asciiLetters: + self.temporaryBuffer += data + else: + self.tokenQueue.append({"type": tokenTypes["Characters"], + "data": "</" + self.temporaryBuffer}) + self.stream.unget(data) + self.state = self.scriptDataState + return True + + def scriptDataEscapeStartState(self): + data = self.stream.char() + if data == "-": + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) + self.state = self.scriptDataEscapeStartDashState + else: + self.stream.unget(data) + self.state = self.scriptDataState + return True + + def scriptDataEscapeStartDashState(self): + data = self.stream.char() + if data == "-": + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) + self.state = self.scriptDataEscapedDashDashState + else: + self.stream.unget(data) + self.state = self.scriptDataState + return True + + def scriptDataEscapedState(self): + data = self.stream.char() + if data == "-": + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) + self.state = self.scriptDataEscapedDashState + elif data == "<": + self.state = self.scriptDataEscapedLessThanSignState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.tokenQueue.append({"type": tokenTypes["Characters"], + "data": "\uFFFD"}) + elif data == EOF: + self.state = self.dataState + else: + chars = self.stream.charsUntil(("<", "-", "\u0000")) + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": + data + chars}) + return True + + def scriptDataEscapedDashState(self): + data = self.stream.char() + if data == "-": + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) + self.state = self.scriptDataEscapedDashDashState + elif data == "<": + self.state = self.scriptDataEscapedLessThanSignState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.tokenQueue.append({"type": tokenTypes["Characters"], + "data": "\uFFFD"}) + self.state = self.scriptDataEscapedState + elif data == EOF: + self.state = self.dataState + else: + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) + self.state = self.scriptDataEscapedState + return True + + def scriptDataEscapedDashDashState(self): + data = self.stream.char() + if data == "-": + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) + elif data == "<": + self.state = self.scriptDataEscapedLessThanSignState + elif data == ">": + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"}) + self.state = self.scriptDataState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.tokenQueue.append({"type": tokenTypes["Characters"], + "data": "\uFFFD"}) + self.state = self.scriptDataEscapedState + elif data == EOF: + self.state = self.dataState + else: + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) + self.state = self.scriptDataEscapedState + return True + + def scriptDataEscapedLessThanSignState(self): + data = self.stream.char() + if data == "/": + self.temporaryBuffer = "" + self.state = self.scriptDataEscapedEndTagOpenState + elif data in asciiLetters: + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<" + data}) + self.temporaryBuffer = data + self.state = self.scriptDataDoubleEscapeStartState + else: + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) + self.stream.unget(data) + self.state = self.scriptDataEscapedState + return True + + def scriptDataEscapedEndTagOpenState(self): + data = self.stream.char() + if data in asciiLetters: + self.temporaryBuffer = data + self.state = self.scriptDataEscapedEndTagNameState + else: + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) + self.stream.unget(data) + self.state = self.scriptDataEscapedState + return True + + def scriptDataEscapedEndTagNameState(self): + appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower() + data = self.stream.char() + if data in spaceCharacters and appropriate: + self.currentToken = {"type": tokenTypes["EndTag"], + "name": self.temporaryBuffer, + "data": [], "selfClosing": False} + self.state = self.beforeAttributeNameState + elif data == "/" and appropriate: + self.currentToken = {"type": tokenTypes["EndTag"], + "name": self.temporaryBuffer, + "data": [], "selfClosing": False} + self.state = self.selfClosingStartTagState + elif data == ">" and appropriate: + self.currentToken = {"type": tokenTypes["EndTag"], + "name": self.temporaryBuffer, + "data": [], "selfClosing": False} + self.emitCurrentToken() + self.state = self.dataState + elif data in asciiLetters: + self.temporaryBuffer += data + else: + self.tokenQueue.append({"type": tokenTypes["Characters"], + "data": "</" + self.temporaryBuffer}) + self.stream.unget(data) + self.state = self.scriptDataEscapedState + return True + + def scriptDataDoubleEscapeStartState(self): + data = self.stream.char() + if data in (spaceCharacters | frozenset(("/", ">"))): + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) + if self.temporaryBuffer.lower() == "script": + self.state = self.scriptDataDoubleEscapedState + else: + self.state = self.scriptDataEscapedState + elif data in asciiLetters: + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) + self.temporaryBuffer += data + else: + self.stream.unget(data) + self.state = self.scriptDataEscapedState + return True + + def scriptDataDoubleEscapedState(self): + data = self.stream.char() + if data == "-": + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) + self.state = self.scriptDataDoubleEscapedDashState + elif data == "<": + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) + self.state = self.scriptDataDoubleEscapedLessThanSignState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.tokenQueue.append({"type": tokenTypes["Characters"], + "data": "\uFFFD"}) + elif data == EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-script-in-script"}) + self.state = self.dataState + else: + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) + return True + + def scriptDataDoubleEscapedDashState(self): + data = self.stream.char() + if data == "-": + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) + self.state = self.scriptDataDoubleEscapedDashDashState + elif data == "<": + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) + self.state = self.scriptDataDoubleEscapedLessThanSignState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.tokenQueue.append({"type": tokenTypes["Characters"], + "data": "\uFFFD"}) + self.state = self.scriptDataDoubleEscapedState + elif data == EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-script-in-script"}) + self.state = self.dataState + else: + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) + self.state = self.scriptDataDoubleEscapedState + return True + + def scriptDataDoubleEscapedDashDashState(self): + data = self.stream.char() + if data == "-": + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) + elif data == "<": + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) + self.state = self.scriptDataDoubleEscapedLessThanSignState + elif data == ">": + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"}) + self.state = self.scriptDataState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.tokenQueue.append({"type": tokenTypes["Characters"], + "data": "\uFFFD"}) + self.state = self.scriptDataDoubleEscapedState + elif data == EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-script-in-script"}) + self.state = self.dataState + else: + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) + self.state = self.scriptDataDoubleEscapedState + return True + + def scriptDataDoubleEscapedLessThanSignState(self): + data = self.stream.char() + if data == "/": + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "/"}) + self.temporaryBuffer = "" + self.state = self.scriptDataDoubleEscapeEndState + else: + self.stream.unget(data) + self.state = self.scriptDataDoubleEscapedState + return True + + def scriptDataDoubleEscapeEndState(self): + data = self.stream.char() + if data in (spaceCharacters | frozenset(("/", ">"))): + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) + if self.temporaryBuffer.lower() == "script": + self.state = self.scriptDataEscapedState + else: + self.state = self.scriptDataDoubleEscapedState + elif data in asciiLetters: + self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) + self.temporaryBuffer += data + else: + self.stream.unget(data) + self.state = self.scriptDataDoubleEscapedState + return True + + def beforeAttributeNameState(self): + data = self.stream.char() + if data in spaceCharacters: + self.stream.charsUntil(spaceCharacters, True) + elif data in asciiLetters: + self.currentToken["data"].append([data, ""]) + self.state = self.attributeNameState + elif data == ">": + self.emitCurrentToken() + elif data == "/": + self.state = self.selfClosingStartTagState + elif data in ("'", '"', "=", "<"): + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "invalid-character-in-attribute-name"}) + self.currentToken["data"].append([data, ""]) + self.state = self.attributeNameState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.currentToken["data"].append(["\uFFFD", ""]) + self.state = self.attributeNameState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "expected-attribute-name-but-got-eof"}) + self.state = self.dataState + else: + self.currentToken["data"].append([data, ""]) + self.state = self.attributeNameState + return True + + def attributeNameState(self): + data = self.stream.char() + leavingThisState = True + emitToken = False + if data == "=": + self.state = self.beforeAttributeValueState + elif data in asciiLetters: + self.currentToken["data"][-1][0] += data +\ + self.stream.charsUntil(asciiLetters, True) + leavingThisState = False + elif data == ">": + # XXX If we emit here the attributes are converted to a dict + # without being checked and when the code below runs we error + # because data is a dict not a list + emitToken = True + elif data in spaceCharacters: + self.state = self.afterAttributeNameState + elif data == "/": + self.state = self.selfClosingStartTagState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.currentToken["data"][-1][0] += "\uFFFD" + leavingThisState = False + elif data in ("'", '"', "<"): + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": + "invalid-character-in-attribute-name"}) + self.currentToken["data"][-1][0] += data + leavingThisState = False + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "eof-in-attribute-name"}) + self.state = self.dataState + else: + self.currentToken["data"][-1][0] += data + leavingThisState = False + + if leavingThisState: + # Attributes are not dropped at this stage. That happens when the + # start tag token is emitted so values can still be safely appended + # to attributes, but we do want to report the parse error in time. + self.currentToken["data"][-1][0] = ( + self.currentToken["data"][-1][0].translate(asciiUpper2Lower)) + for name, _ in self.currentToken["data"][:-1]: + if self.currentToken["data"][-1][0] == name: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "duplicate-attribute"}) + break + # XXX Fix for above XXX + if emitToken: + self.emitCurrentToken() + return True + + def afterAttributeNameState(self): + data = self.stream.char() + if data in spaceCharacters: + self.stream.charsUntil(spaceCharacters, True) + elif data == "=": + self.state = self.beforeAttributeValueState + elif data == ">": + self.emitCurrentToken() + elif data in asciiLetters: + self.currentToken["data"].append([data, ""]) + self.state = self.attributeNameState + elif data == "/": + self.state = self.selfClosingStartTagState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.currentToken["data"].append(["\uFFFD", ""]) + self.state = self.attributeNameState + elif data in ("'", '"', "<"): + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "invalid-character-after-attribute-name"}) + self.currentToken["data"].append([data, ""]) + self.state = self.attributeNameState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "expected-end-of-tag-but-got-eof"}) + self.state = self.dataState + else: + self.currentToken["data"].append([data, ""]) + self.state = self.attributeNameState + return True + + def beforeAttributeValueState(self): + data = self.stream.char() + if data in spaceCharacters: + self.stream.charsUntil(spaceCharacters, True) + elif data == "\"": + self.state = self.attributeValueDoubleQuotedState + elif data == "&": + self.state = self.attributeValueUnQuotedState + self.stream.unget(data) + elif data == "'": + self.state = self.attributeValueSingleQuotedState + elif data == ">": + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "expected-attribute-value-but-got-right-bracket"}) + self.emitCurrentToken() + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.currentToken["data"][-1][1] += "\uFFFD" + self.state = self.attributeValueUnQuotedState + elif data in ("=", "<", "`"): + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "equals-in-unquoted-attribute-value"}) + self.currentToken["data"][-1][1] += data + self.state = self.attributeValueUnQuotedState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "expected-attribute-value-but-got-eof"}) + self.state = self.dataState + else: + self.currentToken["data"][-1][1] += data + self.state = self.attributeValueUnQuotedState + return True + + def attributeValueDoubleQuotedState(self): + data = self.stream.char() + if data == "\"": + self.state = self.afterAttributeValueState + elif data == "&": + self.processEntityInAttribute('"') + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.currentToken["data"][-1][1] += "\uFFFD" + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-attribute-value-double-quote"}) + self.state = self.dataState + else: + self.currentToken["data"][-1][1] += data +\ + self.stream.charsUntil(("\"", "&", "\u0000")) + return True + + def attributeValueSingleQuotedState(self): + data = self.stream.char() + if data == "'": + self.state = self.afterAttributeValueState + elif data == "&": + self.processEntityInAttribute("'") + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.currentToken["data"][-1][1] += "\uFFFD" + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-attribute-value-single-quote"}) + self.state = self.dataState + else: + self.currentToken["data"][-1][1] += data +\ + self.stream.charsUntil(("'", "&", "\u0000")) + return True + + def attributeValueUnQuotedState(self): + data = self.stream.char() + if data in spaceCharacters: + self.state = self.beforeAttributeNameState + elif data == "&": + self.processEntityInAttribute(">") + elif data == ">": + self.emitCurrentToken() + elif data in ('"', "'", "=", "<", "`"): + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-character-in-unquoted-attribute-value"}) + self.currentToken["data"][-1][1] += data + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.currentToken["data"][-1][1] += "\uFFFD" + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-attribute-value-no-quotes"}) + self.state = self.dataState + else: + self.currentToken["data"][-1][1] += data + self.stream.charsUntil( + frozenset(("&", ">", '"', "'", "=", "<", "`", "\u0000")) | spaceCharacters) + return True + + def afterAttributeValueState(self): + data = self.stream.char() + if data in spaceCharacters: + self.state = self.beforeAttributeNameState + elif data == ">": + self.emitCurrentToken() + elif data == "/": + self.state = self.selfClosingStartTagState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-EOF-after-attribute-value"}) + self.stream.unget(data) + self.state = self.dataState + else: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-character-after-attribute-value"}) + self.stream.unget(data) + self.state = self.beforeAttributeNameState + return True + + def selfClosingStartTagState(self): + data = self.stream.char() + if data == ">": + self.currentToken["selfClosing"] = True + self.emitCurrentToken() + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": + "unexpected-EOF-after-solidus-in-tag"}) + self.stream.unget(data) + self.state = self.dataState + else: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-character-after-solidus-in-tag"}) + self.stream.unget(data) + self.state = self.beforeAttributeNameState + return True + + def bogusCommentState(self): + # Make a new comment token and give it as value all the characters + # until the first > or EOF (charsUntil checks for EOF automatically) + # and emit it. + data = self.stream.charsUntil(">") + data = data.replace("\u0000", "\uFFFD") + self.tokenQueue.append( + {"type": tokenTypes["Comment"], "data": data}) + + # Eat the character directly after the bogus comment which is either a + # ">" or an EOF. + self.stream.char() + self.state = self.dataState + return True + + def markupDeclarationOpenState(self): + charStack = [self.stream.char()] + if charStack[-1] == "-": + charStack.append(self.stream.char()) + if charStack[-1] == "-": + self.currentToken = {"type": tokenTypes["Comment"], "data": ""} + self.state = self.commentStartState + return True + elif charStack[-1] in ('d', 'D'): + matched = True + for expected in (('o', 'O'), ('c', 'C'), ('t', 'T'), + ('y', 'Y'), ('p', 'P'), ('e', 'E')): + charStack.append(self.stream.char()) + if charStack[-1] not in expected: + matched = False + break + if matched: + self.currentToken = {"type": tokenTypes["Doctype"], + "name": "", + "publicId": None, "systemId": None, + "correct": True} + self.state = self.doctypeState + return True + elif (charStack[-1] == "[" and + self.parser is not None and + self.parser.tree.openElements and + self.parser.tree.openElements[-1].namespace != self.parser.tree.defaultNamespace): + matched = True + for expected in ["C", "D", "A", "T", "A", "["]: + charStack.append(self.stream.char()) + if charStack[-1] != expected: + matched = False + break + if matched: + self.state = self.cdataSectionState + return True + + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "expected-dashes-or-doctype"}) + + while charStack: + self.stream.unget(charStack.pop()) + self.state = self.bogusCommentState + return True + + def commentStartState(self): + data = self.stream.char() + if data == "-": + self.state = self.commentStartDashState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.currentToken["data"] += "\uFFFD" + elif data == ">": + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "incorrect-comment"}) + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-comment"}) + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + self.currentToken["data"] += data + self.state = self.commentState + return True + + def commentStartDashState(self): + data = self.stream.char() + if data == "-": + self.state = self.commentEndState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.currentToken["data"] += "-\uFFFD" + elif data == ">": + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "incorrect-comment"}) + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-comment"}) + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + self.currentToken["data"] += "-" + data + self.state = self.commentState + return True + + def commentState(self): + data = self.stream.char() + if data == "-": + self.state = self.commentEndDashState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.currentToken["data"] += "\uFFFD" + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "eof-in-comment"}) + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + self.currentToken["data"] += data + \ + self.stream.charsUntil(("-", "\u0000")) + return True + + def commentEndDashState(self): + data = self.stream.char() + if data == "-": + self.state = self.commentEndState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.currentToken["data"] += "-\uFFFD" + self.state = self.commentState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-comment-end-dash"}) + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + self.currentToken["data"] += "-" + data + self.state = self.commentState + return True + + def commentEndState(self): + data = self.stream.char() + if data == ">": + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.currentToken["data"] += "--\uFFFD" + self.state = self.commentState + elif data == "!": + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-bang-after-double-dash-in-comment"}) + self.state = self.commentEndBangState + elif data == "-": + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-dash-after-double-dash-in-comment"}) + self.currentToken["data"] += data + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-comment-double-dash"}) + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + # XXX + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-char-in-comment"}) + self.currentToken["data"] += "--" + data + self.state = self.commentState + return True + + def commentEndBangState(self): + data = self.stream.char() + if data == ">": + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + elif data == "-": + self.currentToken["data"] += "--!" + self.state = self.commentEndDashState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.currentToken["data"] += "--!\uFFFD" + self.state = self.commentState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-comment-end-bang-state"}) + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + self.currentToken["data"] += "--!" + data + self.state = self.commentState + return True + + def doctypeState(self): + data = self.stream.char() + if data in spaceCharacters: + self.state = self.beforeDoctypeNameState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "expected-doctype-name-but-got-eof"}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "need-space-after-doctype"}) + self.stream.unget(data) + self.state = self.beforeDoctypeNameState + return True + + def beforeDoctypeNameState(self): + data = self.stream.char() + if data in spaceCharacters: + pass + elif data == ">": + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "expected-doctype-name-but-got-right-bracket"}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.currentToken["name"] = "\uFFFD" + self.state = self.doctypeNameState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "expected-doctype-name-but-got-eof"}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + self.currentToken["name"] = data + self.state = self.doctypeNameState + return True + + def doctypeNameState(self): + data = self.stream.char() + if data in spaceCharacters: + self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower) + self.state = self.afterDoctypeNameState + elif data == ">": + self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower) + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.currentToken["name"] += "\uFFFD" + self.state = self.doctypeNameState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-doctype-name"}) + self.currentToken["correct"] = False + self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower) + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + self.currentToken["name"] += data + return True + + def afterDoctypeNameState(self): + data = self.stream.char() + if data in spaceCharacters: + pass + elif data == ">": + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + elif data is EOF: + self.currentToken["correct"] = False + self.stream.unget(data) + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-doctype"}) + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + if data in ("p", "P"): + matched = True + for expected in (("u", "U"), ("b", "B"), ("l", "L"), + ("i", "I"), ("c", "C")): + data = self.stream.char() + if data not in expected: + matched = False + break + if matched: + self.state = self.afterDoctypePublicKeywordState + return True + elif data in ("s", "S"): + matched = True + for expected in (("y", "Y"), ("s", "S"), ("t", "T"), + ("e", "E"), ("m", "M")): + data = self.stream.char() + if data not in expected: + matched = False + break + if matched: + self.state = self.afterDoctypeSystemKeywordState + return True + + # All the characters read before the current 'data' will be + # [a-zA-Z], so they're garbage in the bogus doctype and can be + # discarded; only the latest character might be '>' or EOF + # and needs to be ungetted + self.stream.unget(data) + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "expected-space-or-right-bracket-in-doctype", "datavars": + {"data": data}}) + self.currentToken["correct"] = False + self.state = self.bogusDoctypeState + + return True + + def afterDoctypePublicKeywordState(self): + data = self.stream.char() + if data in spaceCharacters: + self.state = self.beforeDoctypePublicIdentifierState + elif data in ("'", '"'): + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-char-in-doctype"}) + self.stream.unget(data) + self.state = self.beforeDoctypePublicIdentifierState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-doctype"}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + self.stream.unget(data) + self.state = self.beforeDoctypePublicIdentifierState + return True + + def beforeDoctypePublicIdentifierState(self): + data = self.stream.char() + if data in spaceCharacters: + pass + elif data == "\"": + self.currentToken["publicId"] = "" + self.state = self.doctypePublicIdentifierDoubleQuotedState + elif data == "'": + self.currentToken["publicId"] = "" + self.state = self.doctypePublicIdentifierSingleQuotedState + elif data == ">": + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-end-of-doctype"}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-doctype"}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-char-in-doctype"}) + self.currentToken["correct"] = False + self.state = self.bogusDoctypeState + return True + + def doctypePublicIdentifierDoubleQuotedState(self): + data = self.stream.char() + if data == "\"": + self.state = self.afterDoctypePublicIdentifierState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.currentToken["publicId"] += "\uFFFD" + elif data == ">": + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-end-of-doctype"}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-doctype"}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + self.currentToken["publicId"] += data + return True + + def doctypePublicIdentifierSingleQuotedState(self): + data = self.stream.char() + if data == "'": + self.state = self.afterDoctypePublicIdentifierState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.currentToken["publicId"] += "\uFFFD" + elif data == ">": + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-end-of-doctype"}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-doctype"}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + self.currentToken["publicId"] += data + return True + + def afterDoctypePublicIdentifierState(self): + data = self.stream.char() + if data in spaceCharacters: + self.state = self.betweenDoctypePublicAndSystemIdentifiersState + elif data == ">": + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + elif data == '"': + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-char-in-doctype"}) + self.currentToken["systemId"] = "" + self.state = self.doctypeSystemIdentifierDoubleQuotedState + elif data == "'": + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-char-in-doctype"}) + self.currentToken["systemId"] = "" + self.state = self.doctypeSystemIdentifierSingleQuotedState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-doctype"}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-char-in-doctype"}) + self.currentToken["correct"] = False + self.state = self.bogusDoctypeState + return True + + def betweenDoctypePublicAndSystemIdentifiersState(self): + data = self.stream.char() + if data in spaceCharacters: + pass + elif data == ">": + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + elif data == '"': + self.currentToken["systemId"] = "" + self.state = self.doctypeSystemIdentifierDoubleQuotedState + elif data == "'": + self.currentToken["systemId"] = "" + self.state = self.doctypeSystemIdentifierSingleQuotedState + elif data == EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-doctype"}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-char-in-doctype"}) + self.currentToken["correct"] = False + self.state = self.bogusDoctypeState + return True + + def afterDoctypeSystemKeywordState(self): + data = self.stream.char() + if data in spaceCharacters: + self.state = self.beforeDoctypeSystemIdentifierState + elif data in ("'", '"'): + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-char-in-doctype"}) + self.stream.unget(data) + self.state = self.beforeDoctypeSystemIdentifierState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-doctype"}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + self.stream.unget(data) + self.state = self.beforeDoctypeSystemIdentifierState + return True + + def beforeDoctypeSystemIdentifierState(self): + data = self.stream.char() + if data in spaceCharacters: + pass + elif data == "\"": + self.currentToken["systemId"] = "" + self.state = self.doctypeSystemIdentifierDoubleQuotedState + elif data == "'": + self.currentToken["systemId"] = "" + self.state = self.doctypeSystemIdentifierSingleQuotedState + elif data == ">": + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-char-in-doctype"}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-doctype"}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-char-in-doctype"}) + self.currentToken["correct"] = False + self.state = self.bogusDoctypeState + return True + + def doctypeSystemIdentifierDoubleQuotedState(self): + data = self.stream.char() + if data == "\"": + self.state = self.afterDoctypeSystemIdentifierState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.currentToken["systemId"] += "\uFFFD" + elif data == ">": + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-end-of-doctype"}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-doctype"}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + self.currentToken["systemId"] += data + return True + + def doctypeSystemIdentifierSingleQuotedState(self): + data = self.stream.char() + if data == "'": + self.state = self.afterDoctypeSystemIdentifierState + elif data == "\u0000": + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + self.currentToken["systemId"] += "\uFFFD" + elif data == ">": + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-end-of-doctype"}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-doctype"}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + self.currentToken["systemId"] += data + return True + + def afterDoctypeSystemIdentifierState(self): + data = self.stream.char() + if data in spaceCharacters: + pass + elif data == ">": + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + elif data is EOF: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "eof-in-doctype"}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": + "unexpected-char-in-doctype"}) + self.state = self.bogusDoctypeState + return True + + def bogusDoctypeState(self): + data = self.stream.char() + if data == ">": + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + elif data is EOF: + # XXX EMIT + self.stream.unget(data) + self.tokenQueue.append(self.currentToken) + self.state = self.dataState + else: + pass + return True + + def cdataSectionState(self): + data = [] + while True: + data.append(self.stream.charsUntil("]")) + data.append(self.stream.charsUntil(">")) + char = self.stream.char() + if char == EOF: + break + else: + assert char == ">" + if data[-1][-2:] == "]]": + data[-1] = data[-1][:-2] + break + else: + data.append(char) + + data = "".join(data) # pylint:disable=redefined-variable-type + # Deal with null here rather than in the parser + nullCount = data.count("\u0000") + if nullCount > 0: + for _ in range(nullCount): + self.tokenQueue.append({"type": tokenTypes["ParseError"], + "data": "invalid-codepoint"}) + data = data.replace("\u0000", "\uFFFD") + if data: + self.tokenQueue.append({"type": tokenTypes["Characters"], + "data": data}) + self.state = self.dataState + return True diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.pyc new file mode 100644 index 0000000..0a2c719 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/__init__.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/__init__.py new file mode 100644 index 0000000..a5ba4bf --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/__init__.py @@ -0,0 +1,14 @@ +from __future__ import absolute_import, division, unicode_literals + +from .py import Trie as PyTrie + +Trie = PyTrie + +# pylint:disable=wrong-import-position +try: + from .datrie import Trie as DATrie +except ImportError: + pass +else: + Trie = DATrie +# pylint:enable=wrong-import-position diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/__init__.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/__init__.pyc new file mode 100644 index 0000000..8a07bfb Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/_base.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/_base.py new file mode 100644 index 0000000..a1158bb --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/_base.py @@ -0,0 +1,37 @@ +from __future__ import absolute_import, division, unicode_literals + +from collections import Mapping + + +class Trie(Mapping): + """Abstract base class for tries""" + + def keys(self, prefix=None): + # pylint:disable=arguments-differ + keys = super(Trie, self).keys() + + if prefix is None: + return set(keys) + + return {x for x in keys if x.startswith(prefix)} + + def has_keys_with_prefix(self, prefix): + for key in self.keys(): + if key.startswith(prefix): + return True + + return False + + def longest_prefix(self, prefix): + if prefix in self: + return prefix + + for i in range(1, len(prefix) + 1): + if prefix[:-i] in self: + return prefix[:-i] + + raise KeyError(prefix) + + def longest_prefix_item(self, prefix): + lprefix = self.longest_prefix(prefix) + return (lprefix, self[lprefix]) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/_base.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/_base.pyc new file mode 100644 index 0000000..b8e445d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/_base.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/datrie.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/datrie.py new file mode 100644 index 0000000..e2e5f86 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/datrie.py @@ -0,0 +1,44 @@ +from __future__ import absolute_import, division, unicode_literals + +from datrie import Trie as DATrie +from pip._vendor.six import text_type + +from ._base import Trie as ABCTrie + + +class Trie(ABCTrie): + def __init__(self, data): + chars = set() + for key in data.keys(): + if not isinstance(key, text_type): + raise TypeError("All keys must be strings") + for char in key: + chars.add(char) + + self._data = DATrie("".join(chars)) + for key, value in data.items(): + self._data[key] = value + + def __contains__(self, key): + return key in self._data + + def __len__(self): + return len(self._data) + + def __iter__(self): + raise NotImplementedError() + + def __getitem__(self, key): + return self._data[key] + + def keys(self, prefix=None): + return self._data.keys(prefix) + + def has_keys_with_prefix(self, prefix): + return self._data.has_keys_with_prefix(prefix) + + def longest_prefix(self, prefix): + return self._data.longest_prefix(prefix) + + def longest_prefix_item(self, prefix): + return self._data.longest_prefix_item(prefix) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/datrie.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/datrie.pyc new file mode 100644 index 0000000..e5fc26a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/datrie.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/py.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/py.py new file mode 100644 index 0000000..c178b21 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/py.py @@ -0,0 +1,67 @@ +from __future__ import absolute_import, division, unicode_literals +from pip._vendor.six import text_type + +from bisect import bisect_left + +from ._base import Trie as ABCTrie + + +class Trie(ABCTrie): + def __init__(self, data): + if not all(isinstance(x, text_type) for x in data.keys()): + raise TypeError("All keys must be strings") + + self._data = data + self._keys = sorted(data.keys()) + self._cachestr = "" + self._cachepoints = (0, len(data)) + + def __contains__(self, key): + return key in self._data + + def __len__(self): + return len(self._data) + + def __iter__(self): + return iter(self._data) + + def __getitem__(self, key): + return self._data[key] + + def keys(self, prefix=None): + if prefix is None or prefix == "" or not self._keys: + return set(self._keys) + + if prefix.startswith(self._cachestr): + lo, hi = self._cachepoints + start = i = bisect_left(self._keys, prefix, lo, hi) + else: + start = i = bisect_left(self._keys, prefix) + + keys = set() + if start == len(self._keys): + return keys + + while self._keys[i].startswith(prefix): + keys.add(self._keys[i]) + i += 1 + + self._cachestr = prefix + self._cachepoints = (start, i) + + return keys + + def has_keys_with_prefix(self, prefix): + if prefix in self._data: + return True + + if prefix.startswith(self._cachestr): + lo, hi = self._cachepoints + i = bisect_left(self._keys, prefix, lo, hi) + else: + i = bisect_left(self._keys, prefix) + + if i == len(self._keys): + return False + + return self._keys[i].startswith(prefix) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/py.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/py.pyc new file mode 100644 index 0000000..14acccd Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/py.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_utils.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_utils.py new file mode 100644 index 0000000..0703afb --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_utils.py @@ -0,0 +1,124 @@ +from __future__ import absolute_import, division, unicode_literals + +from types import ModuleType + +from pip._vendor.six import text_type + +try: + import xml.etree.cElementTree as default_etree +except ImportError: + import xml.etree.ElementTree as default_etree + + +__all__ = ["default_etree", "MethodDispatcher", "isSurrogatePair", + "surrogatePairToCodepoint", "moduleFactoryFactory", + "supports_lone_surrogates"] + + +# Platforms not supporting lone surrogates (\uD800-\uDFFF) should be +# caught by the below test. In general this would be any platform +# using UTF-16 as its encoding of unicode strings, such as +# Jython. This is because UTF-16 itself is based on the use of such +# surrogates, and there is no mechanism to further escape such +# escapes. +try: + _x = eval('"\\uD800"') # pylint:disable=eval-used + if not isinstance(_x, text_type): + # We need this with u"" because of http://bugs.jython.org/issue2039 + _x = eval('u"\\uD800"') # pylint:disable=eval-used + assert isinstance(_x, text_type) +except: # pylint:disable=bare-except + supports_lone_surrogates = False +else: + supports_lone_surrogates = True + + +class MethodDispatcher(dict): + """Dict with 2 special properties: + + On initiation, keys that are lists, sets or tuples are converted to + multiple keys so accessing any one of the items in the original + list-like object returns the matching value + + md = MethodDispatcher({("foo", "bar"):"baz"}) + md["foo"] == "baz" + + A default value which can be set through the default attribute. + """ + + def __init__(self, items=()): + # Using _dictEntries instead of directly assigning to self is about + # twice as fast. Please do careful performance testing before changing + # anything here. + _dictEntries = [] + for name, value in items: + if isinstance(name, (list, tuple, frozenset, set)): + for item in name: + _dictEntries.append((item, value)) + else: + _dictEntries.append((name, value)) + dict.__init__(self, _dictEntries) + assert len(self) == len(_dictEntries) + self.default = None + + def __getitem__(self, key): + return dict.get(self, key, self.default) + + +# Some utility functions to deal with weirdness around UCS2 vs UCS4 +# python builds + +def isSurrogatePair(data): + return (len(data) == 2 and + ord(data[0]) >= 0xD800 and ord(data[0]) <= 0xDBFF and + ord(data[1]) >= 0xDC00 and ord(data[1]) <= 0xDFFF) + + +def surrogatePairToCodepoint(data): + char_val = (0x10000 + (ord(data[0]) - 0xD800) * 0x400 + + (ord(data[1]) - 0xDC00)) + return char_val + +# Module Factory Factory (no, this isn't Java, I know) +# Here to stop this being duplicated all over the place. + + +def moduleFactoryFactory(factory): + moduleCache = {} + + def moduleFactory(baseModule, *args, **kwargs): + if isinstance(ModuleType.__name__, type("")): + name = "_%s_factory" % baseModule.__name__ + else: + name = b"_%s_factory" % baseModule.__name__ + + kwargs_tuple = tuple(kwargs.items()) + + try: + return moduleCache[name][args][kwargs_tuple] + except KeyError: + mod = ModuleType(name) + objs = factory(baseModule, *args, **kwargs) + mod.__dict__.update(objs) + if "name" not in moduleCache: + moduleCache[name] = {} + if "args" not in moduleCache[name]: + moduleCache[name][args] = {} + if "kwargs" not in moduleCache[name][args]: + moduleCache[name][args][kwargs_tuple] = {} + moduleCache[name][args][kwargs_tuple] = mod + return mod + + return moduleFactory + + +def memoize(func): + cache = {} + + def wrapped(*args, **kwargs): + key = (tuple(args), tuple(kwargs.items())) + if key not in cache: + cache[key] = func(*args, **kwargs) + return cache[key] + + return wrapped diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_utils.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_utils.pyc new file mode 100644 index 0000000..eab6ea6 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/_utils.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/constants.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/constants.py new file mode 100644 index 0000000..1ff8041 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/constants.py @@ -0,0 +1,2947 @@ +from __future__ import absolute_import, division, unicode_literals + +import string + +EOF = None + +E = { + "null-character": + "Null character in input stream, replaced with U+FFFD.", + "invalid-codepoint": + "Invalid codepoint in stream.", + "incorrectly-placed-solidus": + "Solidus (/) incorrectly placed in tag.", + "incorrect-cr-newline-entity": + "Incorrect CR newline entity, replaced with LF.", + "illegal-windows-1252-entity": + "Entity used with illegal number (windows-1252 reference).", + "cant-convert-numeric-entity": + "Numeric entity couldn't be converted to character " + "(codepoint U+%(charAsInt)08x).", + "illegal-codepoint-for-numeric-entity": + "Numeric entity represents an illegal codepoint: " + "U+%(charAsInt)08x.", + "numeric-entity-without-semicolon": + "Numeric entity didn't end with ';'.", + "expected-numeric-entity-but-got-eof": + "Numeric entity expected. Got end of file instead.", + "expected-numeric-entity": + "Numeric entity expected but none found.", + "named-entity-without-semicolon": + "Named entity didn't end with ';'.", + "expected-named-entity": + "Named entity expected. Got none.", + "attributes-in-end-tag": + "End tag contains unexpected attributes.", + 'self-closing-flag-on-end-tag': + "End tag contains unexpected self-closing flag.", + "expected-tag-name-but-got-right-bracket": + "Expected tag name. Got '>' instead.", + "expected-tag-name-but-got-question-mark": + "Expected tag name. Got '?' instead. (HTML doesn't " + "support processing instructions.)", + "expected-tag-name": + "Expected tag name. Got something else instead", + "expected-closing-tag-but-got-right-bracket": + "Expected closing tag. Got '>' instead. Ignoring '</>'.", + "expected-closing-tag-but-got-eof": + "Expected closing tag. Unexpected end of file.", + "expected-closing-tag-but-got-char": + "Expected closing tag. Unexpected character '%(data)s' found.", + "eof-in-tag-name": + "Unexpected end of file in the tag name.", + "expected-attribute-name-but-got-eof": + "Unexpected end of file. Expected attribute name instead.", + "eof-in-attribute-name": + "Unexpected end of file in attribute name.", + "invalid-character-in-attribute-name": + "Invalid character in attribute name", + "duplicate-attribute": + "Dropped duplicate attribute on tag.", + "expected-end-of-tag-name-but-got-eof": + "Unexpected end of file. Expected = or end of tag.", + "expected-attribute-value-but-got-eof": + "Unexpected end of file. Expected attribute value.", + "expected-attribute-value-but-got-right-bracket": + "Expected attribute value. Got '>' instead.", + 'equals-in-unquoted-attribute-value': + "Unexpected = in unquoted attribute", + 'unexpected-character-in-unquoted-attribute-value': + "Unexpected character in unquoted attribute", + "invalid-character-after-attribute-name": + "Unexpected character after attribute name.", + "unexpected-character-after-attribute-value": + "Unexpected character after attribute value.", + "eof-in-attribute-value-double-quote": + "Unexpected end of file in attribute value (\").", + "eof-in-attribute-value-single-quote": + "Unexpected end of file in attribute value (').", + "eof-in-attribute-value-no-quotes": + "Unexpected end of file in attribute value.", + "unexpected-EOF-after-solidus-in-tag": + "Unexpected end of file in tag. Expected >", + "unexpected-character-after-solidus-in-tag": + "Unexpected character after / in tag. Expected >", + "expected-dashes-or-doctype": + "Expected '--' or 'DOCTYPE'. Not found.", + "unexpected-bang-after-double-dash-in-comment": + "Unexpected ! after -- in comment", + "unexpected-space-after-double-dash-in-comment": + "Unexpected space after -- in comment", + "incorrect-comment": + "Incorrect comment.", + "eof-in-comment": + "Unexpected end of file in comment.", + "eof-in-comment-end-dash": + "Unexpected end of file in comment (-)", + "unexpected-dash-after-double-dash-in-comment": + "Unexpected '-' after '--' found in comment.", + "eof-in-comment-double-dash": + "Unexpected end of file in comment (--).", + "eof-in-comment-end-space-state": + "Unexpected end of file in comment.", + "eof-in-comment-end-bang-state": + "Unexpected end of file in comment.", + "unexpected-char-in-comment": + "Unexpected character in comment found.", + "need-space-after-doctype": + "No space after literal string 'DOCTYPE'.", + "expected-doctype-name-but-got-right-bracket": + "Unexpected > character. Expected DOCTYPE name.", + "expected-doctype-name-but-got-eof": + "Unexpected end of file. Expected DOCTYPE name.", + "eof-in-doctype-name": + "Unexpected end of file in DOCTYPE name.", + "eof-in-doctype": + "Unexpected end of file in DOCTYPE.", + "expected-space-or-right-bracket-in-doctype": + "Expected space or '>'. Got '%(data)s'", + "unexpected-end-of-doctype": + "Unexpected end of DOCTYPE.", + "unexpected-char-in-doctype": + "Unexpected character in DOCTYPE.", + "eof-in-innerhtml": + "XXX innerHTML EOF", + "unexpected-doctype": + "Unexpected DOCTYPE. Ignored.", + "non-html-root": + "html needs to be the first start tag.", + "expected-doctype-but-got-eof": + "Unexpected End of file. Expected DOCTYPE.", + "unknown-doctype": + "Erroneous DOCTYPE.", + "expected-doctype-but-got-chars": + "Unexpected non-space characters. Expected DOCTYPE.", + "expected-doctype-but-got-start-tag": + "Unexpected start tag (%(name)s). Expected DOCTYPE.", + "expected-doctype-but-got-end-tag": + "Unexpected end tag (%(name)s). Expected DOCTYPE.", + "end-tag-after-implied-root": + "Unexpected end tag (%(name)s) after the (implied) root element.", + "expected-named-closing-tag-but-got-eof": + "Unexpected end of file. Expected end tag (%(name)s).", + "two-heads-are-not-better-than-one": + "Unexpected start tag head in existing head. Ignored.", + "unexpected-end-tag": + "Unexpected end tag (%(name)s). Ignored.", + "unexpected-start-tag-out-of-my-head": + "Unexpected start tag (%(name)s) that can be in head. Moved.", + "unexpected-start-tag": + "Unexpected start tag (%(name)s).", + "missing-end-tag": + "Missing end tag (%(name)s).", + "missing-end-tags": + "Missing end tags (%(name)s).", + "unexpected-start-tag-implies-end-tag": + "Unexpected start tag (%(startName)s) " + "implies end tag (%(endName)s).", + "unexpected-start-tag-treated-as": + "Unexpected start tag (%(originalName)s). Treated as %(newName)s.", + "deprecated-tag": + "Unexpected start tag %(name)s. Don't use it!", + "unexpected-start-tag-ignored": + "Unexpected start tag %(name)s. Ignored.", + "expected-one-end-tag-but-got-another": + "Unexpected end tag (%(gotName)s). " + "Missing end tag (%(expectedName)s).", + "end-tag-too-early": + "End tag (%(name)s) seen too early. Expected other end tag.", + "end-tag-too-early-named": + "Unexpected end tag (%(gotName)s). Expected end tag (%(expectedName)s).", + "end-tag-too-early-ignored": + "End tag (%(name)s) seen too early. Ignored.", + "adoption-agency-1.1": + "End tag (%(name)s) violates step 1, " + "paragraph 1 of the adoption agency algorithm.", + "adoption-agency-1.2": + "End tag (%(name)s) violates step 1, " + "paragraph 2 of the adoption agency algorithm.", + "adoption-agency-1.3": + "End tag (%(name)s) violates step 1, " + "paragraph 3 of the adoption agency algorithm.", + "adoption-agency-4.4": + "End tag (%(name)s) violates step 4, " + "paragraph 4 of the adoption agency algorithm.", + "unexpected-end-tag-treated-as": + "Unexpected end tag (%(originalName)s). Treated as %(newName)s.", + "no-end-tag": + "This element (%(name)s) has no end tag.", + "unexpected-implied-end-tag-in-table": + "Unexpected implied end tag (%(name)s) in the table phase.", + "unexpected-implied-end-tag-in-table-body": + "Unexpected implied end tag (%(name)s) in the table body phase.", + "unexpected-char-implies-table-voodoo": + "Unexpected non-space characters in " + "table context caused voodoo mode.", + "unexpected-hidden-input-in-table": + "Unexpected input with type hidden in table context.", + "unexpected-form-in-table": + "Unexpected form in table context.", + "unexpected-start-tag-implies-table-voodoo": + "Unexpected start tag (%(name)s) in " + "table context caused voodoo mode.", + "unexpected-end-tag-implies-table-voodoo": + "Unexpected end tag (%(name)s) in " + "table context caused voodoo mode.", + "unexpected-cell-in-table-body": + "Unexpected table cell start tag (%(name)s) " + "in the table body phase.", + "unexpected-cell-end-tag": + "Got table cell end tag (%(name)s) " + "while required end tags are missing.", + "unexpected-end-tag-in-table-body": + "Unexpected end tag (%(name)s) in the table body phase. Ignored.", + "unexpected-implied-end-tag-in-table-row": + "Unexpected implied end tag (%(name)s) in the table row phase.", + "unexpected-end-tag-in-table-row": + "Unexpected end tag (%(name)s) in the table row phase. Ignored.", + "unexpected-select-in-select": + "Unexpected select start tag in the select phase " + "treated as select end tag.", + "unexpected-input-in-select": + "Unexpected input start tag in the select phase.", + "unexpected-start-tag-in-select": + "Unexpected start tag token (%(name)s in the select phase. " + "Ignored.", + "unexpected-end-tag-in-select": + "Unexpected end tag (%(name)s) in the select phase. Ignored.", + "unexpected-table-element-start-tag-in-select-in-table": + "Unexpected table element start tag (%(name)s) in the select in table phase.", + "unexpected-table-element-end-tag-in-select-in-table": + "Unexpected table element end tag (%(name)s) in the select in table phase.", + "unexpected-char-after-body": + "Unexpected non-space characters in the after body phase.", + "unexpected-start-tag-after-body": + "Unexpected start tag token (%(name)s)" + " in the after body phase.", + "unexpected-end-tag-after-body": + "Unexpected end tag token (%(name)s)" + " in the after body phase.", + "unexpected-char-in-frameset": + "Unexpected characters in the frameset phase. Characters ignored.", + "unexpected-start-tag-in-frameset": + "Unexpected start tag token (%(name)s)" + " in the frameset phase. Ignored.", + "unexpected-frameset-in-frameset-innerhtml": + "Unexpected end tag token (frameset) " + "in the frameset phase (innerHTML).", + "unexpected-end-tag-in-frameset": + "Unexpected end tag token (%(name)s)" + " in the frameset phase. Ignored.", + "unexpected-char-after-frameset": + "Unexpected non-space characters in the " + "after frameset phase. Ignored.", + "unexpected-start-tag-after-frameset": + "Unexpected start tag (%(name)s)" + " in the after frameset phase. Ignored.", + "unexpected-end-tag-after-frameset": + "Unexpected end tag (%(name)s)" + " in the after frameset phase. Ignored.", + "unexpected-end-tag-after-body-innerhtml": + "Unexpected end tag after body(innerHtml)", + "expected-eof-but-got-char": + "Unexpected non-space characters. Expected end of file.", + "expected-eof-but-got-start-tag": + "Unexpected start tag (%(name)s)" + ". Expected end of file.", + "expected-eof-but-got-end-tag": + "Unexpected end tag (%(name)s)" + ". Expected end of file.", + "eof-in-table": + "Unexpected end of file. Expected table content.", + "eof-in-select": + "Unexpected end of file. Expected select content.", + "eof-in-frameset": + "Unexpected end of file. Expected frameset content.", + "eof-in-script-in-script": + "Unexpected end of file. Expected script content.", + "eof-in-foreign-lands": + "Unexpected end of file. Expected foreign content", + "non-void-element-with-trailing-solidus": + "Trailing solidus not allowed on element %(name)s", + "unexpected-html-element-in-foreign-content": + "Element %(name)s not allowed in a non-html context", + "unexpected-end-tag-before-html": + "Unexpected end tag (%(name)s) before html.", + "unexpected-inhead-noscript-tag": + "Element %(name)s not allowed in a inhead-noscript context", + "eof-in-head-noscript": + "Unexpected end of file. Expected inhead-noscript content", + "char-in-head-noscript": + "Unexpected non-space character. Expected inhead-noscript content", + "XXX-undefined-error": + "Undefined error (this sucks and should be fixed)", +} + +namespaces = { + "html": "http://www.w3.org/1999/xhtml", + "mathml": "http://www.w3.org/1998/Math/MathML", + "svg": "http://www.w3.org/2000/svg", + "xlink": "http://www.w3.org/1999/xlink", + "xml": "http://www.w3.org/XML/1998/namespace", + "xmlns": "http://www.w3.org/2000/xmlns/" +} + +scopingElements = frozenset([ + (namespaces["html"], "applet"), + (namespaces["html"], "caption"), + (namespaces["html"], "html"), + (namespaces["html"], "marquee"), + (namespaces["html"], "object"), + (namespaces["html"], "table"), + (namespaces["html"], "td"), + (namespaces["html"], "th"), + (namespaces["mathml"], "mi"), + (namespaces["mathml"], "mo"), + (namespaces["mathml"], "mn"), + (namespaces["mathml"], "ms"), + (namespaces["mathml"], "mtext"), + (namespaces["mathml"], "annotation-xml"), + (namespaces["svg"], "foreignObject"), + (namespaces["svg"], "desc"), + (namespaces["svg"], "title"), +]) + +formattingElements = frozenset([ + (namespaces["html"], "a"), + (namespaces["html"], "b"), + (namespaces["html"], "big"), + (namespaces["html"], "code"), + (namespaces["html"], "em"), + (namespaces["html"], "font"), + (namespaces["html"], "i"), + (namespaces["html"], "nobr"), + (namespaces["html"], "s"), + (namespaces["html"], "small"), + (namespaces["html"], "strike"), + (namespaces["html"], "strong"), + (namespaces["html"], "tt"), + (namespaces["html"], "u") +]) + +specialElements = frozenset([ + (namespaces["html"], "address"), + (namespaces["html"], "applet"), + (namespaces["html"], "area"), + (namespaces["html"], "article"), + (namespaces["html"], "aside"), + (namespaces["html"], "base"), + (namespaces["html"], "basefont"), + (namespaces["html"], "bgsound"), + (namespaces["html"], "blockquote"), + (namespaces["html"], "body"), + (namespaces["html"], "br"), + (namespaces["html"], "button"), + (namespaces["html"], "caption"), + (namespaces["html"], "center"), + (namespaces["html"], "col"), + (namespaces["html"], "colgroup"), + (namespaces["html"], "command"), + (namespaces["html"], "dd"), + (namespaces["html"], "details"), + (namespaces["html"], "dir"), + (namespaces["html"], "div"), + (namespaces["html"], "dl"), + (namespaces["html"], "dt"), + (namespaces["html"], "embed"), + (namespaces["html"], "fieldset"), + (namespaces["html"], "figure"), + (namespaces["html"], "footer"), + (namespaces["html"], "form"), + (namespaces["html"], "frame"), + (namespaces["html"], "frameset"), + (namespaces["html"], "h1"), + (namespaces["html"], "h2"), + (namespaces["html"], "h3"), + (namespaces["html"], "h4"), + (namespaces["html"], "h5"), + (namespaces["html"], "h6"), + (namespaces["html"], "head"), + (namespaces["html"], "header"), + (namespaces["html"], "hr"), + (namespaces["html"], "html"), + (namespaces["html"], "iframe"), + # Note that image is commented out in the spec as "this isn't an + # element that can end up on the stack, so it doesn't matter," + (namespaces["html"], "image"), + (namespaces["html"], "img"), + (namespaces["html"], "input"), + (namespaces["html"], "isindex"), + (namespaces["html"], "li"), + (namespaces["html"], "link"), + (namespaces["html"], "listing"), + (namespaces["html"], "marquee"), + (namespaces["html"], "menu"), + (namespaces["html"], "meta"), + (namespaces["html"], "nav"), + (namespaces["html"], "noembed"), + (namespaces["html"], "noframes"), + (namespaces["html"], "noscript"), + (namespaces["html"], "object"), + (namespaces["html"], "ol"), + (namespaces["html"], "p"), + (namespaces["html"], "param"), + (namespaces["html"], "plaintext"), + (namespaces["html"], "pre"), + (namespaces["html"], "script"), + (namespaces["html"], "section"), + (namespaces["html"], "select"), + (namespaces["html"], "style"), + (namespaces["html"], "table"), + (namespaces["html"], "tbody"), + (namespaces["html"], "td"), + (namespaces["html"], "textarea"), + (namespaces["html"], "tfoot"), + (namespaces["html"], "th"), + (namespaces["html"], "thead"), + (namespaces["html"], "title"), + (namespaces["html"], "tr"), + (namespaces["html"], "ul"), + (namespaces["html"], "wbr"), + (namespaces["html"], "xmp"), + (namespaces["svg"], "foreignObject") +]) + +htmlIntegrationPointElements = frozenset([ + (namespaces["mathml"], "annotation-xml"), + (namespaces["svg"], "foreignObject"), + (namespaces["svg"], "desc"), + (namespaces["svg"], "title") +]) + +mathmlTextIntegrationPointElements = frozenset([ + (namespaces["mathml"], "mi"), + (namespaces["mathml"], "mo"), + (namespaces["mathml"], "mn"), + (namespaces["mathml"], "ms"), + (namespaces["mathml"], "mtext") +]) + +adjustSVGAttributes = { + "attributename": "attributeName", + "attributetype": "attributeType", + "basefrequency": "baseFrequency", + "baseprofile": "baseProfile", + "calcmode": "calcMode", + "clippathunits": "clipPathUnits", + "contentscripttype": "contentScriptType", + "contentstyletype": "contentStyleType", + "diffuseconstant": "diffuseConstant", + "edgemode": "edgeMode", + "externalresourcesrequired": "externalResourcesRequired", + "filterres": "filterRes", + "filterunits": "filterUnits", + "glyphref": "glyphRef", + "gradienttransform": "gradientTransform", + "gradientunits": "gradientUnits", + "kernelmatrix": "kernelMatrix", + "kernelunitlength": "kernelUnitLength", + "keypoints": "keyPoints", + "keysplines": "keySplines", + "keytimes": "keyTimes", + "lengthadjust": "lengthAdjust", + "limitingconeangle": "limitingConeAngle", + "markerheight": "markerHeight", + "markerunits": "markerUnits", + "markerwidth": "markerWidth", + "maskcontentunits": "maskContentUnits", + "maskunits": "maskUnits", + "numoctaves": "numOctaves", + "pathlength": "pathLength", + "patterncontentunits": "patternContentUnits", + "patterntransform": "patternTransform", + "patternunits": "patternUnits", + "pointsatx": "pointsAtX", + "pointsaty": "pointsAtY", + "pointsatz": "pointsAtZ", + "preservealpha": "preserveAlpha", + "preserveaspectratio": "preserveAspectRatio", + "primitiveunits": "primitiveUnits", + "refx": "refX", + "refy": "refY", + "repeatcount": "repeatCount", + "repeatdur": "repeatDur", + "requiredextensions": "requiredExtensions", + "requiredfeatures": "requiredFeatures", + "specularconstant": "specularConstant", + "specularexponent": "specularExponent", + "spreadmethod": "spreadMethod", + "startoffset": "startOffset", + "stddeviation": "stdDeviation", + "stitchtiles": "stitchTiles", + "surfacescale": "surfaceScale", + "systemlanguage": "systemLanguage", + "tablevalues": "tableValues", + "targetx": "targetX", + "targety": "targetY", + "textlength": "textLength", + "viewbox": "viewBox", + "viewtarget": "viewTarget", + "xchannelselector": "xChannelSelector", + "ychannelselector": "yChannelSelector", + "zoomandpan": "zoomAndPan" +} + +adjustMathMLAttributes = {"definitionurl": "definitionURL"} + +adjustForeignAttributes = { + "xlink:actuate": ("xlink", "actuate", namespaces["xlink"]), + "xlink:arcrole": ("xlink", "arcrole", namespaces["xlink"]), + "xlink:href": ("xlink", "href", namespaces["xlink"]), + "xlink:role": ("xlink", "role", namespaces["xlink"]), + "xlink:show": ("xlink", "show", namespaces["xlink"]), + "xlink:title": ("xlink", "title", namespaces["xlink"]), + "xlink:type": ("xlink", "type", namespaces["xlink"]), + "xml:base": ("xml", "base", namespaces["xml"]), + "xml:lang": ("xml", "lang", namespaces["xml"]), + "xml:space": ("xml", "space", namespaces["xml"]), + "xmlns": (None, "xmlns", namespaces["xmlns"]), + "xmlns:xlink": ("xmlns", "xlink", namespaces["xmlns"]) +} + +unadjustForeignAttributes = dict([((ns, local), qname) for qname, (prefix, local, ns) in + adjustForeignAttributes.items()]) + +spaceCharacters = frozenset([ + "\t", + "\n", + "\u000C", + " ", + "\r" +]) + +tableInsertModeElements = frozenset([ + "table", + "tbody", + "tfoot", + "thead", + "tr" +]) + +asciiLowercase = frozenset(string.ascii_lowercase) +asciiUppercase = frozenset(string.ascii_uppercase) +asciiLetters = frozenset(string.ascii_letters) +digits = frozenset(string.digits) +hexDigits = frozenset(string.hexdigits) + +asciiUpper2Lower = dict([(ord(c), ord(c.lower())) + for c in string.ascii_uppercase]) + +# Heading elements need to be ordered +headingElements = ( + "h1", + "h2", + "h3", + "h4", + "h5", + "h6" +) + +voidElements = frozenset([ + "base", + "command", + "event-source", + "link", + "meta", + "hr", + "br", + "img", + "embed", + "param", + "area", + "col", + "input", + "source", + "track" +]) + +cdataElements = frozenset(['title', 'textarea']) + +rcdataElements = frozenset([ + 'style', + 'script', + 'xmp', + 'iframe', + 'noembed', + 'noframes', + 'noscript' +]) + +booleanAttributes = { + "": frozenset(["irrelevant", "itemscope"]), + "style": frozenset(["scoped"]), + "img": frozenset(["ismap"]), + "audio": frozenset(["autoplay", "controls"]), + "video": frozenset(["autoplay", "controls"]), + "script": frozenset(["defer", "async"]), + "details": frozenset(["open"]), + "datagrid": frozenset(["multiple", "disabled"]), + "command": frozenset(["hidden", "disabled", "checked", "default"]), + "hr": frozenset(["noshade"]), + "menu": frozenset(["autosubmit"]), + "fieldset": frozenset(["disabled", "readonly"]), + "option": frozenset(["disabled", "readonly", "selected"]), + "optgroup": frozenset(["disabled", "readonly"]), + "button": frozenset(["disabled", "autofocus"]), + "input": frozenset(["disabled", "readonly", "required", "autofocus", "checked", "ismap"]), + "select": frozenset(["disabled", "readonly", "autofocus", "multiple"]), + "output": frozenset(["disabled", "readonly"]), + "iframe": frozenset(["seamless"]), +} + +# entitiesWindows1252 has to be _ordered_ and needs to have an index. It +# therefore can't be a frozenset. +entitiesWindows1252 = ( + 8364, # 0x80 0x20AC EURO SIGN + 65533, # 0x81 UNDEFINED + 8218, # 0x82 0x201A SINGLE LOW-9 QUOTATION MARK + 402, # 0x83 0x0192 LATIN SMALL LETTER F WITH HOOK + 8222, # 0x84 0x201E DOUBLE LOW-9 QUOTATION MARK + 8230, # 0x85 0x2026 HORIZONTAL ELLIPSIS + 8224, # 0x86 0x2020 DAGGER + 8225, # 0x87 0x2021 DOUBLE DAGGER + 710, # 0x88 0x02C6 MODIFIER LETTER CIRCUMFLEX ACCENT + 8240, # 0x89 0x2030 PER MILLE SIGN + 352, # 0x8A 0x0160 LATIN CAPITAL LETTER S WITH CARON + 8249, # 0x8B 0x2039 SINGLE LEFT-POINTING ANGLE QUOTATION MARK + 338, # 0x8C 0x0152 LATIN CAPITAL LIGATURE OE + 65533, # 0x8D UNDEFINED + 381, # 0x8E 0x017D LATIN CAPITAL LETTER Z WITH CARON + 65533, # 0x8F UNDEFINED + 65533, # 0x90 UNDEFINED + 8216, # 0x91 0x2018 LEFT SINGLE QUOTATION MARK + 8217, # 0x92 0x2019 RIGHT SINGLE QUOTATION MARK + 8220, # 0x93 0x201C LEFT DOUBLE QUOTATION MARK + 8221, # 0x94 0x201D RIGHT DOUBLE QUOTATION MARK + 8226, # 0x95 0x2022 BULLET + 8211, # 0x96 0x2013 EN DASH + 8212, # 0x97 0x2014 EM DASH + 732, # 0x98 0x02DC SMALL TILDE + 8482, # 0x99 0x2122 TRADE MARK SIGN + 353, # 0x9A 0x0161 LATIN SMALL LETTER S WITH CARON + 8250, # 0x9B 0x203A SINGLE RIGHT-POINTING ANGLE QUOTATION MARK + 339, # 0x9C 0x0153 LATIN SMALL LIGATURE OE + 65533, # 0x9D UNDEFINED + 382, # 0x9E 0x017E LATIN SMALL LETTER Z WITH CARON + 376 # 0x9F 0x0178 LATIN CAPITAL LETTER Y WITH DIAERESIS +) + +xmlEntities = frozenset(['lt;', 'gt;', 'amp;', 'apos;', 'quot;']) + +entities = { + "AElig": "\xc6", + "AElig;": "\xc6", + "AMP": "&", + "AMP;": "&", + "Aacute": "\xc1", + "Aacute;": "\xc1", + "Abreve;": "\u0102", + "Acirc": "\xc2", + "Acirc;": "\xc2", + "Acy;": "\u0410", + "Afr;": "\U0001d504", + "Agrave": "\xc0", + "Agrave;": "\xc0", + "Alpha;": "\u0391", + "Amacr;": "\u0100", + "And;": "\u2a53", + "Aogon;": "\u0104", + "Aopf;": "\U0001d538", + "ApplyFunction;": "\u2061", + "Aring": "\xc5", + "Aring;": "\xc5", + "Ascr;": "\U0001d49c", + "Assign;": "\u2254", + "Atilde": "\xc3", + "Atilde;": "\xc3", + "Auml": "\xc4", + "Auml;": "\xc4", + "Backslash;": "\u2216", + "Barv;": "\u2ae7", + "Barwed;": "\u2306", + "Bcy;": "\u0411", + "Because;": "\u2235", + "Bernoullis;": "\u212c", + "Beta;": "\u0392", + "Bfr;": "\U0001d505", + "Bopf;": "\U0001d539", + "Breve;": "\u02d8", + "Bscr;": "\u212c", + "Bumpeq;": "\u224e", + "CHcy;": "\u0427", + "COPY": "\xa9", + "COPY;": "\xa9", + "Cacute;": "\u0106", + "Cap;": "\u22d2", + "CapitalDifferentialD;": "\u2145", + "Cayleys;": "\u212d", + "Ccaron;": "\u010c", + "Ccedil": "\xc7", + "Ccedil;": "\xc7", + "Ccirc;": "\u0108", + "Cconint;": "\u2230", + "Cdot;": "\u010a", + "Cedilla;": "\xb8", + "CenterDot;": "\xb7", + "Cfr;": "\u212d", + "Chi;": "\u03a7", + "CircleDot;": "\u2299", + "CircleMinus;": "\u2296", + "CirclePlus;": "\u2295", + "CircleTimes;": "\u2297", + "ClockwiseContourIntegral;": "\u2232", + "CloseCurlyDoubleQuote;": "\u201d", + "CloseCurlyQuote;": "\u2019", + "Colon;": "\u2237", + "Colone;": "\u2a74", + "Congruent;": "\u2261", + "Conint;": "\u222f", + "ContourIntegral;": "\u222e", + "Copf;": "\u2102", + "Coproduct;": "\u2210", + "CounterClockwiseContourIntegral;": "\u2233", + "Cross;": "\u2a2f", + "Cscr;": "\U0001d49e", + "Cup;": "\u22d3", + "CupCap;": "\u224d", + "DD;": "\u2145", + "DDotrahd;": "\u2911", + "DJcy;": "\u0402", + "DScy;": "\u0405", + "DZcy;": "\u040f", + "Dagger;": "\u2021", + "Darr;": "\u21a1", + "Dashv;": "\u2ae4", + "Dcaron;": "\u010e", + "Dcy;": "\u0414", + "Del;": "\u2207", + "Delta;": "\u0394", + "Dfr;": "\U0001d507", + "DiacriticalAcute;": "\xb4", + "DiacriticalDot;": "\u02d9", + "DiacriticalDoubleAcute;": "\u02dd", + "DiacriticalGrave;": "`", + "DiacriticalTilde;": "\u02dc", + "Diamond;": "\u22c4", + "DifferentialD;": "\u2146", + "Dopf;": "\U0001d53b", + "Dot;": "\xa8", + "DotDot;": "\u20dc", + "DotEqual;": "\u2250", + "DoubleContourIntegral;": "\u222f", + "DoubleDot;": "\xa8", + "DoubleDownArrow;": "\u21d3", + "DoubleLeftArrow;": "\u21d0", + "DoubleLeftRightArrow;": "\u21d4", + "DoubleLeftTee;": "\u2ae4", + "DoubleLongLeftArrow;": "\u27f8", + "DoubleLongLeftRightArrow;": "\u27fa", + "DoubleLongRightArrow;": "\u27f9", + "DoubleRightArrow;": "\u21d2", + "DoubleRightTee;": "\u22a8", + "DoubleUpArrow;": "\u21d1", + "DoubleUpDownArrow;": "\u21d5", + "DoubleVerticalBar;": "\u2225", + "DownArrow;": "\u2193", + "DownArrowBar;": "\u2913", + "DownArrowUpArrow;": "\u21f5", + "DownBreve;": "\u0311", + "DownLeftRightVector;": "\u2950", + "DownLeftTeeVector;": "\u295e", + "DownLeftVector;": "\u21bd", + "DownLeftVectorBar;": "\u2956", + "DownRightTeeVector;": "\u295f", + "DownRightVector;": "\u21c1", + "DownRightVectorBar;": "\u2957", + "DownTee;": "\u22a4", + "DownTeeArrow;": "\u21a7", + "Downarrow;": "\u21d3", + "Dscr;": "\U0001d49f", + "Dstrok;": "\u0110", + "ENG;": "\u014a", + "ETH": "\xd0", + "ETH;": "\xd0", + "Eacute": "\xc9", + "Eacute;": "\xc9", + "Ecaron;": "\u011a", + "Ecirc": "\xca", + "Ecirc;": "\xca", + "Ecy;": "\u042d", + "Edot;": "\u0116", + "Efr;": "\U0001d508", + "Egrave": "\xc8", + "Egrave;": "\xc8", + "Element;": "\u2208", + "Emacr;": "\u0112", + "EmptySmallSquare;": "\u25fb", + "EmptyVerySmallSquare;": "\u25ab", + "Eogon;": "\u0118", + "Eopf;": "\U0001d53c", + "Epsilon;": "\u0395", + "Equal;": "\u2a75", + "EqualTilde;": "\u2242", + "Equilibrium;": "\u21cc", + "Escr;": "\u2130", + "Esim;": "\u2a73", + "Eta;": "\u0397", + "Euml": "\xcb", + "Euml;": "\xcb", + "Exists;": "\u2203", + "ExponentialE;": "\u2147", + "Fcy;": "\u0424", + "Ffr;": "\U0001d509", + "FilledSmallSquare;": "\u25fc", + "FilledVerySmallSquare;": "\u25aa", + "Fopf;": "\U0001d53d", + "ForAll;": "\u2200", + "Fouriertrf;": "\u2131", + "Fscr;": "\u2131", + "GJcy;": "\u0403", + "GT": ">", + "GT;": ">", + "Gamma;": "\u0393", + "Gammad;": "\u03dc", + "Gbreve;": "\u011e", + "Gcedil;": "\u0122", + "Gcirc;": "\u011c", + "Gcy;": "\u0413", + "Gdot;": "\u0120", + "Gfr;": "\U0001d50a", + "Gg;": "\u22d9", + "Gopf;": "\U0001d53e", + "GreaterEqual;": "\u2265", + "GreaterEqualLess;": "\u22db", + "GreaterFullEqual;": "\u2267", + "GreaterGreater;": "\u2aa2", + "GreaterLess;": "\u2277", + "GreaterSlantEqual;": "\u2a7e", + "GreaterTilde;": "\u2273", + "Gscr;": "\U0001d4a2", + "Gt;": "\u226b", + "HARDcy;": "\u042a", + "Hacek;": "\u02c7", + "Hat;": "^", + "Hcirc;": "\u0124", + "Hfr;": "\u210c", + "HilbertSpace;": "\u210b", + "Hopf;": "\u210d", + "HorizontalLine;": "\u2500", + "Hscr;": "\u210b", + "Hstrok;": "\u0126", + "HumpDownHump;": "\u224e", + "HumpEqual;": "\u224f", + "IEcy;": "\u0415", + "IJlig;": "\u0132", + "IOcy;": "\u0401", + "Iacute": "\xcd", + "Iacute;": "\xcd", + "Icirc": "\xce", + "Icirc;": "\xce", + "Icy;": "\u0418", + "Idot;": "\u0130", + "Ifr;": "\u2111", + "Igrave": "\xcc", + "Igrave;": "\xcc", + "Im;": "\u2111", + "Imacr;": "\u012a", + "ImaginaryI;": "\u2148", + "Implies;": "\u21d2", + "Int;": "\u222c", + "Integral;": "\u222b", + "Intersection;": "\u22c2", + "InvisibleComma;": "\u2063", + "InvisibleTimes;": "\u2062", + "Iogon;": "\u012e", + "Iopf;": "\U0001d540", + "Iota;": "\u0399", + "Iscr;": "\u2110", + "Itilde;": "\u0128", + "Iukcy;": "\u0406", + "Iuml": "\xcf", + "Iuml;": "\xcf", + "Jcirc;": "\u0134", + "Jcy;": "\u0419", + "Jfr;": "\U0001d50d", + "Jopf;": "\U0001d541", + "Jscr;": "\U0001d4a5", + "Jsercy;": "\u0408", + "Jukcy;": "\u0404", + "KHcy;": "\u0425", + "KJcy;": "\u040c", + "Kappa;": "\u039a", + "Kcedil;": "\u0136", + "Kcy;": "\u041a", + "Kfr;": "\U0001d50e", + "Kopf;": "\U0001d542", + "Kscr;": "\U0001d4a6", + "LJcy;": "\u0409", + "LT": "<", + "LT;": "<", + "Lacute;": "\u0139", + "Lambda;": "\u039b", + "Lang;": "\u27ea", + "Laplacetrf;": "\u2112", + "Larr;": "\u219e", + "Lcaron;": "\u013d", + "Lcedil;": "\u013b", + "Lcy;": "\u041b", + "LeftAngleBracket;": "\u27e8", + "LeftArrow;": "\u2190", + "LeftArrowBar;": "\u21e4", + "LeftArrowRightArrow;": "\u21c6", + "LeftCeiling;": "\u2308", + "LeftDoubleBracket;": "\u27e6", + "LeftDownTeeVector;": "\u2961", + "LeftDownVector;": "\u21c3", + "LeftDownVectorBar;": "\u2959", + "LeftFloor;": "\u230a", + "LeftRightArrow;": "\u2194", + "LeftRightVector;": "\u294e", + "LeftTee;": "\u22a3", + "LeftTeeArrow;": "\u21a4", + "LeftTeeVector;": "\u295a", + "LeftTriangle;": "\u22b2", + "LeftTriangleBar;": "\u29cf", + "LeftTriangleEqual;": "\u22b4", + "LeftUpDownVector;": "\u2951", + "LeftUpTeeVector;": "\u2960", + "LeftUpVector;": "\u21bf", + "LeftUpVectorBar;": "\u2958", + "LeftVector;": "\u21bc", + "LeftVectorBar;": "\u2952", + "Leftarrow;": "\u21d0", + "Leftrightarrow;": "\u21d4", + "LessEqualGreater;": "\u22da", + "LessFullEqual;": "\u2266", + "LessGreater;": "\u2276", + "LessLess;": "\u2aa1", + "LessSlantEqual;": "\u2a7d", + "LessTilde;": "\u2272", + "Lfr;": "\U0001d50f", + "Ll;": "\u22d8", + "Lleftarrow;": "\u21da", + "Lmidot;": "\u013f", + "LongLeftArrow;": "\u27f5", + "LongLeftRightArrow;": "\u27f7", + "LongRightArrow;": "\u27f6", + "Longleftarrow;": "\u27f8", + "Longleftrightarrow;": "\u27fa", + "Longrightarrow;": "\u27f9", + "Lopf;": "\U0001d543", + "LowerLeftArrow;": "\u2199", + "LowerRightArrow;": "\u2198", + "Lscr;": "\u2112", + "Lsh;": "\u21b0", + "Lstrok;": "\u0141", + "Lt;": "\u226a", + "Map;": "\u2905", + "Mcy;": "\u041c", + "MediumSpace;": "\u205f", + "Mellintrf;": "\u2133", + "Mfr;": "\U0001d510", + "MinusPlus;": "\u2213", + "Mopf;": "\U0001d544", + "Mscr;": "\u2133", + "Mu;": "\u039c", + "NJcy;": "\u040a", + "Nacute;": "\u0143", + "Ncaron;": "\u0147", + "Ncedil;": "\u0145", + "Ncy;": "\u041d", + "NegativeMediumSpace;": "\u200b", + "NegativeThickSpace;": "\u200b", + "NegativeThinSpace;": "\u200b", + "NegativeVeryThinSpace;": "\u200b", + "NestedGreaterGreater;": "\u226b", + "NestedLessLess;": "\u226a", + "NewLine;": "\n", + "Nfr;": "\U0001d511", + "NoBreak;": "\u2060", + "NonBreakingSpace;": "\xa0", + "Nopf;": "\u2115", + "Not;": "\u2aec", + "NotCongruent;": "\u2262", + "NotCupCap;": "\u226d", + "NotDoubleVerticalBar;": "\u2226", + "NotElement;": "\u2209", + "NotEqual;": "\u2260", + "NotEqualTilde;": "\u2242\u0338", + "NotExists;": "\u2204", + "NotGreater;": "\u226f", + "NotGreaterEqual;": "\u2271", + "NotGreaterFullEqual;": "\u2267\u0338", + "NotGreaterGreater;": "\u226b\u0338", + "NotGreaterLess;": "\u2279", + "NotGreaterSlantEqual;": "\u2a7e\u0338", + "NotGreaterTilde;": "\u2275", + "NotHumpDownHump;": "\u224e\u0338", + "NotHumpEqual;": "\u224f\u0338", + "NotLeftTriangle;": "\u22ea", + "NotLeftTriangleBar;": "\u29cf\u0338", + "NotLeftTriangleEqual;": "\u22ec", + "NotLess;": "\u226e", + "NotLessEqual;": "\u2270", + "NotLessGreater;": "\u2278", + "NotLessLess;": "\u226a\u0338", + "NotLessSlantEqual;": "\u2a7d\u0338", + "NotLessTilde;": "\u2274", + "NotNestedGreaterGreater;": "\u2aa2\u0338", + "NotNestedLessLess;": "\u2aa1\u0338", + "NotPrecedes;": "\u2280", + "NotPrecedesEqual;": "\u2aaf\u0338", + "NotPrecedesSlantEqual;": "\u22e0", + "NotReverseElement;": "\u220c", + "NotRightTriangle;": "\u22eb", + "NotRightTriangleBar;": "\u29d0\u0338", + "NotRightTriangleEqual;": "\u22ed", + "NotSquareSubset;": "\u228f\u0338", + "NotSquareSubsetEqual;": "\u22e2", + "NotSquareSuperset;": "\u2290\u0338", + "NotSquareSupersetEqual;": "\u22e3", + "NotSubset;": "\u2282\u20d2", + "NotSubsetEqual;": "\u2288", + "NotSucceeds;": "\u2281", + "NotSucceedsEqual;": "\u2ab0\u0338", + "NotSucceedsSlantEqual;": "\u22e1", + "NotSucceedsTilde;": "\u227f\u0338", + "NotSuperset;": "\u2283\u20d2", + "NotSupersetEqual;": "\u2289", + "NotTilde;": "\u2241", + "NotTildeEqual;": "\u2244", + "NotTildeFullEqual;": "\u2247", + "NotTildeTilde;": "\u2249", + "NotVerticalBar;": "\u2224", + "Nscr;": "\U0001d4a9", + "Ntilde": "\xd1", + "Ntilde;": "\xd1", + "Nu;": "\u039d", + "OElig;": "\u0152", + "Oacute": "\xd3", + "Oacute;": "\xd3", + "Ocirc": "\xd4", + "Ocirc;": "\xd4", + "Ocy;": "\u041e", + "Odblac;": "\u0150", + "Ofr;": "\U0001d512", + "Ograve": "\xd2", + "Ograve;": "\xd2", + "Omacr;": "\u014c", + "Omega;": "\u03a9", + "Omicron;": "\u039f", + "Oopf;": "\U0001d546", + "OpenCurlyDoubleQuote;": "\u201c", + "OpenCurlyQuote;": "\u2018", + "Or;": "\u2a54", + "Oscr;": "\U0001d4aa", + "Oslash": "\xd8", + "Oslash;": "\xd8", + "Otilde": "\xd5", + "Otilde;": "\xd5", + "Otimes;": "\u2a37", + "Ouml": "\xd6", + "Ouml;": "\xd6", + "OverBar;": "\u203e", + "OverBrace;": "\u23de", + "OverBracket;": "\u23b4", + "OverParenthesis;": "\u23dc", + "PartialD;": "\u2202", + "Pcy;": "\u041f", + "Pfr;": "\U0001d513", + "Phi;": "\u03a6", + "Pi;": "\u03a0", + "PlusMinus;": "\xb1", + "Poincareplane;": "\u210c", + "Popf;": "\u2119", + "Pr;": "\u2abb", + "Precedes;": "\u227a", + "PrecedesEqual;": "\u2aaf", + "PrecedesSlantEqual;": "\u227c", + "PrecedesTilde;": "\u227e", + "Prime;": "\u2033", + "Product;": "\u220f", + "Proportion;": "\u2237", + "Proportional;": "\u221d", + "Pscr;": "\U0001d4ab", + "Psi;": "\u03a8", + "QUOT": "\"", + "QUOT;": "\"", + "Qfr;": "\U0001d514", + "Qopf;": "\u211a", + "Qscr;": "\U0001d4ac", + "RBarr;": "\u2910", + "REG": "\xae", + "REG;": "\xae", + "Racute;": "\u0154", + "Rang;": "\u27eb", + "Rarr;": "\u21a0", + "Rarrtl;": "\u2916", + "Rcaron;": "\u0158", + "Rcedil;": "\u0156", + "Rcy;": "\u0420", + "Re;": "\u211c", + "ReverseElement;": "\u220b", + "ReverseEquilibrium;": "\u21cb", + "ReverseUpEquilibrium;": "\u296f", + "Rfr;": "\u211c", + "Rho;": "\u03a1", + "RightAngleBracket;": "\u27e9", + "RightArrow;": "\u2192", + "RightArrowBar;": "\u21e5", + "RightArrowLeftArrow;": "\u21c4", + "RightCeiling;": "\u2309", + "RightDoubleBracket;": "\u27e7", + "RightDownTeeVector;": "\u295d", + "RightDownVector;": "\u21c2", + "RightDownVectorBar;": "\u2955", + "RightFloor;": "\u230b", + "RightTee;": "\u22a2", + "RightTeeArrow;": "\u21a6", + "RightTeeVector;": "\u295b", + "RightTriangle;": "\u22b3", + "RightTriangleBar;": "\u29d0", + "RightTriangleEqual;": "\u22b5", + "RightUpDownVector;": "\u294f", + "RightUpTeeVector;": "\u295c", + "RightUpVector;": "\u21be", + "RightUpVectorBar;": "\u2954", + "RightVector;": "\u21c0", + "RightVectorBar;": "\u2953", + "Rightarrow;": "\u21d2", + "Ropf;": "\u211d", + "RoundImplies;": "\u2970", + "Rrightarrow;": "\u21db", + "Rscr;": "\u211b", + "Rsh;": "\u21b1", + "RuleDelayed;": "\u29f4", + "SHCHcy;": "\u0429", + "SHcy;": "\u0428", + "SOFTcy;": "\u042c", + "Sacute;": "\u015a", + "Sc;": "\u2abc", + "Scaron;": "\u0160", + "Scedil;": "\u015e", + "Scirc;": "\u015c", + "Scy;": "\u0421", + "Sfr;": "\U0001d516", + "ShortDownArrow;": "\u2193", + "ShortLeftArrow;": "\u2190", + "ShortRightArrow;": "\u2192", + "ShortUpArrow;": "\u2191", + "Sigma;": "\u03a3", + "SmallCircle;": "\u2218", + "Sopf;": "\U0001d54a", + "Sqrt;": "\u221a", + "Square;": "\u25a1", + "SquareIntersection;": "\u2293", + "SquareSubset;": "\u228f", + "SquareSubsetEqual;": "\u2291", + "SquareSuperset;": "\u2290", + "SquareSupersetEqual;": "\u2292", + "SquareUnion;": "\u2294", + "Sscr;": "\U0001d4ae", + "Star;": "\u22c6", + "Sub;": "\u22d0", + "Subset;": "\u22d0", + "SubsetEqual;": "\u2286", + "Succeeds;": "\u227b", + "SucceedsEqual;": "\u2ab0", + "SucceedsSlantEqual;": "\u227d", + "SucceedsTilde;": "\u227f", + "SuchThat;": "\u220b", + "Sum;": "\u2211", + "Sup;": "\u22d1", + "Superset;": "\u2283", + "SupersetEqual;": "\u2287", + "Supset;": "\u22d1", + "THORN": "\xde", + "THORN;": "\xde", + "TRADE;": "\u2122", + "TSHcy;": "\u040b", + "TScy;": "\u0426", + "Tab;": "\t", + "Tau;": "\u03a4", + "Tcaron;": "\u0164", + "Tcedil;": "\u0162", + "Tcy;": "\u0422", + "Tfr;": "\U0001d517", + "Therefore;": "\u2234", + "Theta;": "\u0398", + "ThickSpace;": "\u205f\u200a", + "ThinSpace;": "\u2009", + "Tilde;": "\u223c", + "TildeEqual;": "\u2243", + "TildeFullEqual;": "\u2245", + "TildeTilde;": "\u2248", + "Topf;": "\U0001d54b", + "TripleDot;": "\u20db", + "Tscr;": "\U0001d4af", + "Tstrok;": "\u0166", + "Uacute": "\xda", + "Uacute;": "\xda", + "Uarr;": "\u219f", + "Uarrocir;": "\u2949", + "Ubrcy;": "\u040e", + "Ubreve;": "\u016c", + "Ucirc": "\xdb", + "Ucirc;": "\xdb", + "Ucy;": "\u0423", + "Udblac;": "\u0170", + "Ufr;": "\U0001d518", + "Ugrave": "\xd9", + "Ugrave;": "\xd9", + "Umacr;": "\u016a", + "UnderBar;": "_", + "UnderBrace;": "\u23df", + "UnderBracket;": "\u23b5", + "UnderParenthesis;": "\u23dd", + "Union;": "\u22c3", + "UnionPlus;": "\u228e", + "Uogon;": "\u0172", + "Uopf;": "\U0001d54c", + "UpArrow;": "\u2191", + "UpArrowBar;": "\u2912", + "UpArrowDownArrow;": "\u21c5", + "UpDownArrow;": "\u2195", + "UpEquilibrium;": "\u296e", + "UpTee;": "\u22a5", + "UpTeeArrow;": "\u21a5", + "Uparrow;": "\u21d1", + "Updownarrow;": "\u21d5", + "UpperLeftArrow;": "\u2196", + "UpperRightArrow;": "\u2197", + "Upsi;": "\u03d2", + "Upsilon;": "\u03a5", + "Uring;": "\u016e", + "Uscr;": "\U0001d4b0", + "Utilde;": "\u0168", + "Uuml": "\xdc", + "Uuml;": "\xdc", + "VDash;": "\u22ab", + "Vbar;": "\u2aeb", + "Vcy;": "\u0412", + "Vdash;": "\u22a9", + "Vdashl;": "\u2ae6", + "Vee;": "\u22c1", + "Verbar;": "\u2016", + "Vert;": "\u2016", + "VerticalBar;": "\u2223", + "VerticalLine;": "|", + "VerticalSeparator;": "\u2758", + "VerticalTilde;": "\u2240", + "VeryThinSpace;": "\u200a", + "Vfr;": "\U0001d519", + "Vopf;": "\U0001d54d", + "Vscr;": "\U0001d4b1", + "Vvdash;": "\u22aa", + "Wcirc;": "\u0174", + "Wedge;": "\u22c0", + "Wfr;": "\U0001d51a", + "Wopf;": "\U0001d54e", + "Wscr;": "\U0001d4b2", + "Xfr;": "\U0001d51b", + "Xi;": "\u039e", + "Xopf;": "\U0001d54f", + "Xscr;": "\U0001d4b3", + "YAcy;": "\u042f", + "YIcy;": "\u0407", + "YUcy;": "\u042e", + "Yacute": "\xdd", + "Yacute;": "\xdd", + "Ycirc;": "\u0176", + "Ycy;": "\u042b", + "Yfr;": "\U0001d51c", + "Yopf;": "\U0001d550", + "Yscr;": "\U0001d4b4", + "Yuml;": "\u0178", + "ZHcy;": "\u0416", + "Zacute;": "\u0179", + "Zcaron;": "\u017d", + "Zcy;": "\u0417", + "Zdot;": "\u017b", + "ZeroWidthSpace;": "\u200b", + "Zeta;": "\u0396", + "Zfr;": "\u2128", + "Zopf;": "\u2124", + "Zscr;": "\U0001d4b5", + "aacute": "\xe1", + "aacute;": "\xe1", + "abreve;": "\u0103", + "ac;": "\u223e", + "acE;": "\u223e\u0333", + "acd;": "\u223f", + "acirc": "\xe2", + "acirc;": "\xe2", + "acute": "\xb4", + "acute;": "\xb4", + "acy;": "\u0430", + "aelig": "\xe6", + "aelig;": "\xe6", + "af;": "\u2061", + "afr;": "\U0001d51e", + "agrave": "\xe0", + "agrave;": "\xe0", + "alefsym;": "\u2135", + "aleph;": "\u2135", + "alpha;": "\u03b1", + "amacr;": "\u0101", + "amalg;": "\u2a3f", + "amp": "&", + "amp;": "&", + "and;": "\u2227", + "andand;": "\u2a55", + "andd;": "\u2a5c", + "andslope;": "\u2a58", + "andv;": "\u2a5a", + "ang;": "\u2220", + "ange;": "\u29a4", + "angle;": "\u2220", + "angmsd;": "\u2221", + "angmsdaa;": "\u29a8", + "angmsdab;": "\u29a9", + "angmsdac;": "\u29aa", + "angmsdad;": "\u29ab", + "angmsdae;": "\u29ac", + "angmsdaf;": "\u29ad", + "angmsdag;": "\u29ae", + "angmsdah;": "\u29af", + "angrt;": "\u221f", + "angrtvb;": "\u22be", + "angrtvbd;": "\u299d", + "angsph;": "\u2222", + "angst;": "\xc5", + "angzarr;": "\u237c", + "aogon;": "\u0105", + "aopf;": "\U0001d552", + "ap;": "\u2248", + "apE;": "\u2a70", + "apacir;": "\u2a6f", + "ape;": "\u224a", + "apid;": "\u224b", + "apos;": "'", + "approx;": "\u2248", + "approxeq;": "\u224a", + "aring": "\xe5", + "aring;": "\xe5", + "ascr;": "\U0001d4b6", + "ast;": "*", + "asymp;": "\u2248", + "asympeq;": "\u224d", + "atilde": "\xe3", + "atilde;": "\xe3", + "auml": "\xe4", + "auml;": "\xe4", + "awconint;": "\u2233", + "awint;": "\u2a11", + "bNot;": "\u2aed", + "backcong;": "\u224c", + "backepsilon;": "\u03f6", + "backprime;": "\u2035", + "backsim;": "\u223d", + "backsimeq;": "\u22cd", + "barvee;": "\u22bd", + "barwed;": "\u2305", + "barwedge;": "\u2305", + "bbrk;": "\u23b5", + "bbrktbrk;": "\u23b6", + "bcong;": "\u224c", + "bcy;": "\u0431", + "bdquo;": "\u201e", + "becaus;": "\u2235", + "because;": "\u2235", + "bemptyv;": "\u29b0", + "bepsi;": "\u03f6", + "bernou;": "\u212c", + "beta;": "\u03b2", + "beth;": "\u2136", + "between;": "\u226c", + "bfr;": "\U0001d51f", + "bigcap;": "\u22c2", + "bigcirc;": "\u25ef", + "bigcup;": "\u22c3", + "bigodot;": "\u2a00", + "bigoplus;": "\u2a01", + "bigotimes;": "\u2a02", + "bigsqcup;": "\u2a06", + "bigstar;": "\u2605", + "bigtriangledown;": "\u25bd", + "bigtriangleup;": "\u25b3", + "biguplus;": "\u2a04", + "bigvee;": "\u22c1", + "bigwedge;": "\u22c0", + "bkarow;": "\u290d", + "blacklozenge;": "\u29eb", + "blacksquare;": "\u25aa", + "blacktriangle;": "\u25b4", + "blacktriangledown;": "\u25be", + "blacktriangleleft;": "\u25c2", + "blacktriangleright;": "\u25b8", + "blank;": "\u2423", + "blk12;": "\u2592", + "blk14;": "\u2591", + "blk34;": "\u2593", + "block;": "\u2588", + "bne;": "=\u20e5", + "bnequiv;": "\u2261\u20e5", + "bnot;": "\u2310", + "bopf;": "\U0001d553", + "bot;": "\u22a5", + "bottom;": "\u22a5", + "bowtie;": "\u22c8", + "boxDL;": "\u2557", + "boxDR;": "\u2554", + "boxDl;": "\u2556", + "boxDr;": "\u2553", + "boxH;": "\u2550", + "boxHD;": "\u2566", + "boxHU;": "\u2569", + "boxHd;": "\u2564", + "boxHu;": "\u2567", + "boxUL;": "\u255d", + "boxUR;": "\u255a", + "boxUl;": "\u255c", + "boxUr;": "\u2559", + "boxV;": "\u2551", + "boxVH;": "\u256c", + "boxVL;": "\u2563", + "boxVR;": "\u2560", + "boxVh;": "\u256b", + "boxVl;": "\u2562", + "boxVr;": "\u255f", + "boxbox;": "\u29c9", + "boxdL;": "\u2555", + "boxdR;": "\u2552", + "boxdl;": "\u2510", + "boxdr;": "\u250c", + "boxh;": "\u2500", + "boxhD;": "\u2565", + "boxhU;": "\u2568", + "boxhd;": "\u252c", + "boxhu;": "\u2534", + "boxminus;": "\u229f", + "boxplus;": "\u229e", + "boxtimes;": "\u22a0", + "boxuL;": "\u255b", + "boxuR;": "\u2558", + "boxul;": "\u2518", + "boxur;": "\u2514", + "boxv;": "\u2502", + "boxvH;": "\u256a", + "boxvL;": "\u2561", + "boxvR;": "\u255e", + "boxvh;": "\u253c", + "boxvl;": "\u2524", + "boxvr;": "\u251c", + "bprime;": "\u2035", + "breve;": "\u02d8", + "brvbar": "\xa6", + "brvbar;": "\xa6", + "bscr;": "\U0001d4b7", + "bsemi;": "\u204f", + "bsim;": "\u223d", + "bsime;": "\u22cd", + "bsol;": "\\", + "bsolb;": "\u29c5", + "bsolhsub;": "\u27c8", + "bull;": "\u2022", + "bullet;": "\u2022", + "bump;": "\u224e", + "bumpE;": "\u2aae", + "bumpe;": "\u224f", + "bumpeq;": "\u224f", + "cacute;": "\u0107", + "cap;": "\u2229", + "capand;": "\u2a44", + "capbrcup;": "\u2a49", + "capcap;": "\u2a4b", + "capcup;": "\u2a47", + "capdot;": "\u2a40", + "caps;": "\u2229\ufe00", + "caret;": "\u2041", + "caron;": "\u02c7", + "ccaps;": "\u2a4d", + "ccaron;": "\u010d", + "ccedil": "\xe7", + "ccedil;": "\xe7", + "ccirc;": "\u0109", + "ccups;": "\u2a4c", + "ccupssm;": "\u2a50", + "cdot;": "\u010b", + "cedil": "\xb8", + "cedil;": "\xb8", + "cemptyv;": "\u29b2", + "cent": "\xa2", + "cent;": "\xa2", + "centerdot;": "\xb7", + "cfr;": "\U0001d520", + "chcy;": "\u0447", + "check;": "\u2713", + "checkmark;": "\u2713", + "chi;": "\u03c7", + "cir;": "\u25cb", + "cirE;": "\u29c3", + "circ;": "\u02c6", + "circeq;": "\u2257", + "circlearrowleft;": "\u21ba", + "circlearrowright;": "\u21bb", + "circledR;": "\xae", + "circledS;": "\u24c8", + "circledast;": "\u229b", + "circledcirc;": "\u229a", + "circleddash;": "\u229d", + "cire;": "\u2257", + "cirfnint;": "\u2a10", + "cirmid;": "\u2aef", + "cirscir;": "\u29c2", + "clubs;": "\u2663", + "clubsuit;": "\u2663", + "colon;": ":", + "colone;": "\u2254", + "coloneq;": "\u2254", + "comma;": ",", + "commat;": "@", + "comp;": "\u2201", + "compfn;": "\u2218", + "complement;": "\u2201", + "complexes;": "\u2102", + "cong;": "\u2245", + "congdot;": "\u2a6d", + "conint;": "\u222e", + "copf;": "\U0001d554", + "coprod;": "\u2210", + "copy": "\xa9", + "copy;": "\xa9", + "copysr;": "\u2117", + "crarr;": "\u21b5", + "cross;": "\u2717", + "cscr;": "\U0001d4b8", + "csub;": "\u2acf", + "csube;": "\u2ad1", + "csup;": "\u2ad0", + "csupe;": "\u2ad2", + "ctdot;": "\u22ef", + "cudarrl;": "\u2938", + "cudarrr;": "\u2935", + "cuepr;": "\u22de", + "cuesc;": "\u22df", + "cularr;": "\u21b6", + "cularrp;": "\u293d", + "cup;": "\u222a", + "cupbrcap;": "\u2a48", + "cupcap;": "\u2a46", + "cupcup;": "\u2a4a", + "cupdot;": "\u228d", + "cupor;": "\u2a45", + "cups;": "\u222a\ufe00", + "curarr;": "\u21b7", + "curarrm;": "\u293c", + "curlyeqprec;": "\u22de", + "curlyeqsucc;": "\u22df", + "curlyvee;": "\u22ce", + "curlywedge;": "\u22cf", + "curren": "\xa4", + "curren;": "\xa4", + "curvearrowleft;": "\u21b6", + "curvearrowright;": "\u21b7", + "cuvee;": "\u22ce", + "cuwed;": "\u22cf", + "cwconint;": "\u2232", + "cwint;": "\u2231", + "cylcty;": "\u232d", + "dArr;": "\u21d3", + "dHar;": "\u2965", + "dagger;": "\u2020", + "daleth;": "\u2138", + "darr;": "\u2193", + "dash;": "\u2010", + "dashv;": "\u22a3", + "dbkarow;": "\u290f", + "dblac;": "\u02dd", + "dcaron;": "\u010f", + "dcy;": "\u0434", + "dd;": "\u2146", + "ddagger;": "\u2021", + "ddarr;": "\u21ca", + "ddotseq;": "\u2a77", + "deg": "\xb0", + "deg;": "\xb0", + "delta;": "\u03b4", + "demptyv;": "\u29b1", + "dfisht;": "\u297f", + "dfr;": "\U0001d521", + "dharl;": "\u21c3", + "dharr;": "\u21c2", + "diam;": "\u22c4", + "diamond;": "\u22c4", + "diamondsuit;": "\u2666", + "diams;": "\u2666", + "die;": "\xa8", + "digamma;": "\u03dd", + "disin;": "\u22f2", + "div;": "\xf7", + "divide": "\xf7", + "divide;": "\xf7", + "divideontimes;": "\u22c7", + "divonx;": "\u22c7", + "djcy;": "\u0452", + "dlcorn;": "\u231e", + "dlcrop;": "\u230d", + "dollar;": "$", + "dopf;": "\U0001d555", + "dot;": "\u02d9", + "doteq;": "\u2250", + "doteqdot;": "\u2251", + "dotminus;": "\u2238", + "dotplus;": "\u2214", + "dotsquare;": "\u22a1", + "doublebarwedge;": "\u2306", + "downarrow;": "\u2193", + "downdownarrows;": "\u21ca", + "downharpoonleft;": "\u21c3", + "downharpoonright;": "\u21c2", + "drbkarow;": "\u2910", + "drcorn;": "\u231f", + "drcrop;": "\u230c", + "dscr;": "\U0001d4b9", + "dscy;": "\u0455", + "dsol;": "\u29f6", + "dstrok;": "\u0111", + "dtdot;": "\u22f1", + "dtri;": "\u25bf", + "dtrif;": "\u25be", + "duarr;": "\u21f5", + "duhar;": "\u296f", + "dwangle;": "\u29a6", + "dzcy;": "\u045f", + "dzigrarr;": "\u27ff", + "eDDot;": "\u2a77", + "eDot;": "\u2251", + "eacute": "\xe9", + "eacute;": "\xe9", + "easter;": "\u2a6e", + "ecaron;": "\u011b", + "ecir;": "\u2256", + "ecirc": "\xea", + "ecirc;": "\xea", + "ecolon;": "\u2255", + "ecy;": "\u044d", + "edot;": "\u0117", + "ee;": "\u2147", + "efDot;": "\u2252", + "efr;": "\U0001d522", + "eg;": "\u2a9a", + "egrave": "\xe8", + "egrave;": "\xe8", + "egs;": "\u2a96", + "egsdot;": "\u2a98", + "el;": "\u2a99", + "elinters;": "\u23e7", + "ell;": "\u2113", + "els;": "\u2a95", + "elsdot;": "\u2a97", + "emacr;": "\u0113", + "empty;": "\u2205", + "emptyset;": "\u2205", + "emptyv;": "\u2205", + "emsp13;": "\u2004", + "emsp14;": "\u2005", + "emsp;": "\u2003", + "eng;": "\u014b", + "ensp;": "\u2002", + "eogon;": "\u0119", + "eopf;": "\U0001d556", + "epar;": "\u22d5", + "eparsl;": "\u29e3", + "eplus;": "\u2a71", + "epsi;": "\u03b5", + "epsilon;": "\u03b5", + "epsiv;": "\u03f5", + "eqcirc;": "\u2256", + "eqcolon;": "\u2255", + "eqsim;": "\u2242", + "eqslantgtr;": "\u2a96", + "eqslantless;": "\u2a95", + "equals;": "=", + "equest;": "\u225f", + "equiv;": "\u2261", + "equivDD;": "\u2a78", + "eqvparsl;": "\u29e5", + "erDot;": "\u2253", + "erarr;": "\u2971", + "escr;": "\u212f", + "esdot;": "\u2250", + "esim;": "\u2242", + "eta;": "\u03b7", + "eth": "\xf0", + "eth;": "\xf0", + "euml": "\xeb", + "euml;": "\xeb", + "euro;": "\u20ac", + "excl;": "!", + "exist;": "\u2203", + "expectation;": "\u2130", + "exponentiale;": "\u2147", + "fallingdotseq;": "\u2252", + "fcy;": "\u0444", + "female;": "\u2640", + "ffilig;": "\ufb03", + "fflig;": "\ufb00", + "ffllig;": "\ufb04", + "ffr;": "\U0001d523", + "filig;": "\ufb01", + "fjlig;": "fj", + "flat;": "\u266d", + "fllig;": "\ufb02", + "fltns;": "\u25b1", + "fnof;": "\u0192", + "fopf;": "\U0001d557", + "forall;": "\u2200", + "fork;": "\u22d4", + "forkv;": "\u2ad9", + "fpartint;": "\u2a0d", + "frac12": "\xbd", + "frac12;": "\xbd", + "frac13;": "\u2153", + "frac14": "\xbc", + "frac14;": "\xbc", + "frac15;": "\u2155", + "frac16;": "\u2159", + "frac18;": "\u215b", + "frac23;": "\u2154", + "frac25;": "\u2156", + "frac34": "\xbe", + "frac34;": "\xbe", + "frac35;": "\u2157", + "frac38;": "\u215c", + "frac45;": "\u2158", + "frac56;": "\u215a", + "frac58;": "\u215d", + "frac78;": "\u215e", + "frasl;": "\u2044", + "frown;": "\u2322", + "fscr;": "\U0001d4bb", + "gE;": "\u2267", + "gEl;": "\u2a8c", + "gacute;": "\u01f5", + "gamma;": "\u03b3", + "gammad;": "\u03dd", + "gap;": "\u2a86", + "gbreve;": "\u011f", + "gcirc;": "\u011d", + "gcy;": "\u0433", + "gdot;": "\u0121", + "ge;": "\u2265", + "gel;": "\u22db", + "geq;": "\u2265", + "geqq;": "\u2267", + "geqslant;": "\u2a7e", + "ges;": "\u2a7e", + "gescc;": "\u2aa9", + "gesdot;": "\u2a80", + "gesdoto;": "\u2a82", + "gesdotol;": "\u2a84", + "gesl;": "\u22db\ufe00", + "gesles;": "\u2a94", + "gfr;": "\U0001d524", + "gg;": "\u226b", + "ggg;": "\u22d9", + "gimel;": "\u2137", + "gjcy;": "\u0453", + "gl;": "\u2277", + "glE;": "\u2a92", + "gla;": "\u2aa5", + "glj;": "\u2aa4", + "gnE;": "\u2269", + "gnap;": "\u2a8a", + "gnapprox;": "\u2a8a", + "gne;": "\u2a88", + "gneq;": "\u2a88", + "gneqq;": "\u2269", + "gnsim;": "\u22e7", + "gopf;": "\U0001d558", + "grave;": "`", + "gscr;": "\u210a", + "gsim;": "\u2273", + "gsime;": "\u2a8e", + "gsiml;": "\u2a90", + "gt": ">", + "gt;": ">", + "gtcc;": "\u2aa7", + "gtcir;": "\u2a7a", + "gtdot;": "\u22d7", + "gtlPar;": "\u2995", + "gtquest;": "\u2a7c", + "gtrapprox;": "\u2a86", + "gtrarr;": "\u2978", + "gtrdot;": "\u22d7", + "gtreqless;": "\u22db", + "gtreqqless;": "\u2a8c", + "gtrless;": "\u2277", + "gtrsim;": "\u2273", + "gvertneqq;": "\u2269\ufe00", + "gvnE;": "\u2269\ufe00", + "hArr;": "\u21d4", + "hairsp;": "\u200a", + "half;": "\xbd", + "hamilt;": "\u210b", + "hardcy;": "\u044a", + "harr;": "\u2194", + "harrcir;": "\u2948", + "harrw;": "\u21ad", + "hbar;": "\u210f", + "hcirc;": "\u0125", + "hearts;": "\u2665", + "heartsuit;": "\u2665", + "hellip;": "\u2026", + "hercon;": "\u22b9", + "hfr;": "\U0001d525", + "hksearow;": "\u2925", + "hkswarow;": "\u2926", + "hoarr;": "\u21ff", + "homtht;": "\u223b", + "hookleftarrow;": "\u21a9", + "hookrightarrow;": "\u21aa", + "hopf;": "\U0001d559", + "horbar;": "\u2015", + "hscr;": "\U0001d4bd", + "hslash;": "\u210f", + "hstrok;": "\u0127", + "hybull;": "\u2043", + "hyphen;": "\u2010", + "iacute": "\xed", + "iacute;": "\xed", + "ic;": "\u2063", + "icirc": "\xee", + "icirc;": "\xee", + "icy;": "\u0438", + "iecy;": "\u0435", + "iexcl": "\xa1", + "iexcl;": "\xa1", + "iff;": "\u21d4", + "ifr;": "\U0001d526", + "igrave": "\xec", + "igrave;": "\xec", + "ii;": "\u2148", + "iiiint;": "\u2a0c", + "iiint;": "\u222d", + "iinfin;": "\u29dc", + "iiota;": "\u2129", + "ijlig;": "\u0133", + "imacr;": "\u012b", + "image;": "\u2111", + "imagline;": "\u2110", + "imagpart;": "\u2111", + "imath;": "\u0131", + "imof;": "\u22b7", + "imped;": "\u01b5", + "in;": "\u2208", + "incare;": "\u2105", + "infin;": "\u221e", + "infintie;": "\u29dd", + "inodot;": "\u0131", + "int;": "\u222b", + "intcal;": "\u22ba", + "integers;": "\u2124", + "intercal;": "\u22ba", + "intlarhk;": "\u2a17", + "intprod;": "\u2a3c", + "iocy;": "\u0451", + "iogon;": "\u012f", + "iopf;": "\U0001d55a", + "iota;": "\u03b9", + "iprod;": "\u2a3c", + "iquest": "\xbf", + "iquest;": "\xbf", + "iscr;": "\U0001d4be", + "isin;": "\u2208", + "isinE;": "\u22f9", + "isindot;": "\u22f5", + "isins;": "\u22f4", + "isinsv;": "\u22f3", + "isinv;": "\u2208", + "it;": "\u2062", + "itilde;": "\u0129", + "iukcy;": "\u0456", + "iuml": "\xef", + "iuml;": "\xef", + "jcirc;": "\u0135", + "jcy;": "\u0439", + "jfr;": "\U0001d527", + "jmath;": "\u0237", + "jopf;": "\U0001d55b", + "jscr;": "\U0001d4bf", + "jsercy;": "\u0458", + "jukcy;": "\u0454", + "kappa;": "\u03ba", + "kappav;": "\u03f0", + "kcedil;": "\u0137", + "kcy;": "\u043a", + "kfr;": "\U0001d528", + "kgreen;": "\u0138", + "khcy;": "\u0445", + "kjcy;": "\u045c", + "kopf;": "\U0001d55c", + "kscr;": "\U0001d4c0", + "lAarr;": "\u21da", + "lArr;": "\u21d0", + "lAtail;": "\u291b", + "lBarr;": "\u290e", + "lE;": "\u2266", + "lEg;": "\u2a8b", + "lHar;": "\u2962", + "lacute;": "\u013a", + "laemptyv;": "\u29b4", + "lagran;": "\u2112", + "lambda;": "\u03bb", + "lang;": "\u27e8", + "langd;": "\u2991", + "langle;": "\u27e8", + "lap;": "\u2a85", + "laquo": "\xab", + "laquo;": "\xab", + "larr;": "\u2190", + "larrb;": "\u21e4", + "larrbfs;": "\u291f", + "larrfs;": "\u291d", + "larrhk;": "\u21a9", + "larrlp;": "\u21ab", + "larrpl;": "\u2939", + "larrsim;": "\u2973", + "larrtl;": "\u21a2", + "lat;": "\u2aab", + "latail;": "\u2919", + "late;": "\u2aad", + "lates;": "\u2aad\ufe00", + "lbarr;": "\u290c", + "lbbrk;": "\u2772", + "lbrace;": "{", + "lbrack;": "[", + "lbrke;": "\u298b", + "lbrksld;": "\u298f", + "lbrkslu;": "\u298d", + "lcaron;": "\u013e", + "lcedil;": "\u013c", + "lceil;": "\u2308", + "lcub;": "{", + "lcy;": "\u043b", + "ldca;": "\u2936", + "ldquo;": "\u201c", + "ldquor;": "\u201e", + "ldrdhar;": "\u2967", + "ldrushar;": "\u294b", + "ldsh;": "\u21b2", + "le;": "\u2264", + "leftarrow;": "\u2190", + "leftarrowtail;": "\u21a2", + "leftharpoondown;": "\u21bd", + "leftharpoonup;": "\u21bc", + "leftleftarrows;": "\u21c7", + "leftrightarrow;": "\u2194", + "leftrightarrows;": "\u21c6", + "leftrightharpoons;": "\u21cb", + "leftrightsquigarrow;": "\u21ad", + "leftthreetimes;": "\u22cb", + "leg;": "\u22da", + "leq;": "\u2264", + "leqq;": "\u2266", + "leqslant;": "\u2a7d", + "les;": "\u2a7d", + "lescc;": "\u2aa8", + "lesdot;": "\u2a7f", + "lesdoto;": "\u2a81", + "lesdotor;": "\u2a83", + "lesg;": "\u22da\ufe00", + "lesges;": "\u2a93", + "lessapprox;": "\u2a85", + "lessdot;": "\u22d6", + "lesseqgtr;": "\u22da", + "lesseqqgtr;": "\u2a8b", + "lessgtr;": "\u2276", + "lesssim;": "\u2272", + "lfisht;": "\u297c", + "lfloor;": "\u230a", + "lfr;": "\U0001d529", + "lg;": "\u2276", + "lgE;": "\u2a91", + "lhard;": "\u21bd", + "lharu;": "\u21bc", + "lharul;": "\u296a", + "lhblk;": "\u2584", + "ljcy;": "\u0459", + "ll;": "\u226a", + "llarr;": "\u21c7", + "llcorner;": "\u231e", + "llhard;": "\u296b", + "lltri;": "\u25fa", + "lmidot;": "\u0140", + "lmoust;": "\u23b0", + "lmoustache;": "\u23b0", + "lnE;": "\u2268", + "lnap;": "\u2a89", + "lnapprox;": "\u2a89", + "lne;": "\u2a87", + "lneq;": "\u2a87", + "lneqq;": "\u2268", + "lnsim;": "\u22e6", + "loang;": "\u27ec", + "loarr;": "\u21fd", + "lobrk;": "\u27e6", + "longleftarrow;": "\u27f5", + "longleftrightarrow;": "\u27f7", + "longmapsto;": "\u27fc", + "longrightarrow;": "\u27f6", + "looparrowleft;": "\u21ab", + "looparrowright;": "\u21ac", + "lopar;": "\u2985", + "lopf;": "\U0001d55d", + "loplus;": "\u2a2d", + "lotimes;": "\u2a34", + "lowast;": "\u2217", + "lowbar;": "_", + "loz;": "\u25ca", + "lozenge;": "\u25ca", + "lozf;": "\u29eb", + "lpar;": "(", + "lparlt;": "\u2993", + "lrarr;": "\u21c6", + "lrcorner;": "\u231f", + "lrhar;": "\u21cb", + "lrhard;": "\u296d", + "lrm;": "\u200e", + "lrtri;": "\u22bf", + "lsaquo;": "\u2039", + "lscr;": "\U0001d4c1", + "lsh;": "\u21b0", + "lsim;": "\u2272", + "lsime;": "\u2a8d", + "lsimg;": "\u2a8f", + "lsqb;": "[", + "lsquo;": "\u2018", + "lsquor;": "\u201a", + "lstrok;": "\u0142", + "lt": "<", + "lt;": "<", + "ltcc;": "\u2aa6", + "ltcir;": "\u2a79", + "ltdot;": "\u22d6", + "lthree;": "\u22cb", + "ltimes;": "\u22c9", + "ltlarr;": "\u2976", + "ltquest;": "\u2a7b", + "ltrPar;": "\u2996", + "ltri;": "\u25c3", + "ltrie;": "\u22b4", + "ltrif;": "\u25c2", + "lurdshar;": "\u294a", + "luruhar;": "\u2966", + "lvertneqq;": "\u2268\ufe00", + "lvnE;": "\u2268\ufe00", + "mDDot;": "\u223a", + "macr": "\xaf", + "macr;": "\xaf", + "male;": "\u2642", + "malt;": "\u2720", + "maltese;": "\u2720", + "map;": "\u21a6", + "mapsto;": "\u21a6", + "mapstodown;": "\u21a7", + "mapstoleft;": "\u21a4", + "mapstoup;": "\u21a5", + "marker;": "\u25ae", + "mcomma;": "\u2a29", + "mcy;": "\u043c", + "mdash;": "\u2014", + "measuredangle;": "\u2221", + "mfr;": "\U0001d52a", + "mho;": "\u2127", + "micro": "\xb5", + "micro;": "\xb5", + "mid;": "\u2223", + "midast;": "*", + "midcir;": "\u2af0", + "middot": "\xb7", + "middot;": "\xb7", + "minus;": "\u2212", + "minusb;": "\u229f", + "minusd;": "\u2238", + "minusdu;": "\u2a2a", + "mlcp;": "\u2adb", + "mldr;": "\u2026", + "mnplus;": "\u2213", + "models;": "\u22a7", + "mopf;": "\U0001d55e", + "mp;": "\u2213", + "mscr;": "\U0001d4c2", + "mstpos;": "\u223e", + "mu;": "\u03bc", + "multimap;": "\u22b8", + "mumap;": "\u22b8", + "nGg;": "\u22d9\u0338", + "nGt;": "\u226b\u20d2", + "nGtv;": "\u226b\u0338", + "nLeftarrow;": "\u21cd", + "nLeftrightarrow;": "\u21ce", + "nLl;": "\u22d8\u0338", + "nLt;": "\u226a\u20d2", + "nLtv;": "\u226a\u0338", + "nRightarrow;": "\u21cf", + "nVDash;": "\u22af", + "nVdash;": "\u22ae", + "nabla;": "\u2207", + "nacute;": "\u0144", + "nang;": "\u2220\u20d2", + "nap;": "\u2249", + "napE;": "\u2a70\u0338", + "napid;": "\u224b\u0338", + "napos;": "\u0149", + "napprox;": "\u2249", + "natur;": "\u266e", + "natural;": "\u266e", + "naturals;": "\u2115", + "nbsp": "\xa0", + "nbsp;": "\xa0", + "nbump;": "\u224e\u0338", + "nbumpe;": "\u224f\u0338", + "ncap;": "\u2a43", + "ncaron;": "\u0148", + "ncedil;": "\u0146", + "ncong;": "\u2247", + "ncongdot;": "\u2a6d\u0338", + "ncup;": "\u2a42", + "ncy;": "\u043d", + "ndash;": "\u2013", + "ne;": "\u2260", + "neArr;": "\u21d7", + "nearhk;": "\u2924", + "nearr;": "\u2197", + "nearrow;": "\u2197", + "nedot;": "\u2250\u0338", + "nequiv;": "\u2262", + "nesear;": "\u2928", + "nesim;": "\u2242\u0338", + "nexist;": "\u2204", + "nexists;": "\u2204", + "nfr;": "\U0001d52b", + "ngE;": "\u2267\u0338", + "nge;": "\u2271", + "ngeq;": "\u2271", + "ngeqq;": "\u2267\u0338", + "ngeqslant;": "\u2a7e\u0338", + "nges;": "\u2a7e\u0338", + "ngsim;": "\u2275", + "ngt;": "\u226f", + "ngtr;": "\u226f", + "nhArr;": "\u21ce", + "nharr;": "\u21ae", + "nhpar;": "\u2af2", + "ni;": "\u220b", + "nis;": "\u22fc", + "nisd;": "\u22fa", + "niv;": "\u220b", + "njcy;": "\u045a", + "nlArr;": "\u21cd", + "nlE;": "\u2266\u0338", + "nlarr;": "\u219a", + "nldr;": "\u2025", + "nle;": "\u2270", + "nleftarrow;": "\u219a", + "nleftrightarrow;": "\u21ae", + "nleq;": "\u2270", + "nleqq;": "\u2266\u0338", + "nleqslant;": "\u2a7d\u0338", + "nles;": "\u2a7d\u0338", + "nless;": "\u226e", + "nlsim;": "\u2274", + "nlt;": "\u226e", + "nltri;": "\u22ea", + "nltrie;": "\u22ec", + "nmid;": "\u2224", + "nopf;": "\U0001d55f", + "not": "\xac", + "not;": "\xac", + "notin;": "\u2209", + "notinE;": "\u22f9\u0338", + "notindot;": "\u22f5\u0338", + "notinva;": "\u2209", + "notinvb;": "\u22f7", + "notinvc;": "\u22f6", + "notni;": "\u220c", + "notniva;": "\u220c", + "notnivb;": "\u22fe", + "notnivc;": "\u22fd", + "npar;": "\u2226", + "nparallel;": "\u2226", + "nparsl;": "\u2afd\u20e5", + "npart;": "\u2202\u0338", + "npolint;": "\u2a14", + "npr;": "\u2280", + "nprcue;": "\u22e0", + "npre;": "\u2aaf\u0338", + "nprec;": "\u2280", + "npreceq;": "\u2aaf\u0338", + "nrArr;": "\u21cf", + "nrarr;": "\u219b", + "nrarrc;": "\u2933\u0338", + "nrarrw;": "\u219d\u0338", + "nrightarrow;": "\u219b", + "nrtri;": "\u22eb", + "nrtrie;": "\u22ed", + "nsc;": "\u2281", + "nsccue;": "\u22e1", + "nsce;": "\u2ab0\u0338", + "nscr;": "\U0001d4c3", + "nshortmid;": "\u2224", + "nshortparallel;": "\u2226", + "nsim;": "\u2241", + "nsime;": "\u2244", + "nsimeq;": "\u2244", + "nsmid;": "\u2224", + "nspar;": "\u2226", + "nsqsube;": "\u22e2", + "nsqsupe;": "\u22e3", + "nsub;": "\u2284", + "nsubE;": "\u2ac5\u0338", + "nsube;": "\u2288", + "nsubset;": "\u2282\u20d2", + "nsubseteq;": "\u2288", + "nsubseteqq;": "\u2ac5\u0338", + "nsucc;": "\u2281", + "nsucceq;": "\u2ab0\u0338", + "nsup;": "\u2285", + "nsupE;": "\u2ac6\u0338", + "nsupe;": "\u2289", + "nsupset;": "\u2283\u20d2", + "nsupseteq;": "\u2289", + "nsupseteqq;": "\u2ac6\u0338", + "ntgl;": "\u2279", + "ntilde": "\xf1", + "ntilde;": "\xf1", + "ntlg;": "\u2278", + "ntriangleleft;": "\u22ea", + "ntrianglelefteq;": "\u22ec", + "ntriangleright;": "\u22eb", + "ntrianglerighteq;": "\u22ed", + "nu;": "\u03bd", + "num;": "#", + "numero;": "\u2116", + "numsp;": "\u2007", + "nvDash;": "\u22ad", + "nvHarr;": "\u2904", + "nvap;": "\u224d\u20d2", + "nvdash;": "\u22ac", + "nvge;": "\u2265\u20d2", + "nvgt;": ">\u20d2", + "nvinfin;": "\u29de", + "nvlArr;": "\u2902", + "nvle;": "\u2264\u20d2", + "nvlt;": "<\u20d2", + "nvltrie;": "\u22b4\u20d2", + "nvrArr;": "\u2903", + "nvrtrie;": "\u22b5\u20d2", + "nvsim;": "\u223c\u20d2", + "nwArr;": "\u21d6", + "nwarhk;": "\u2923", + "nwarr;": "\u2196", + "nwarrow;": "\u2196", + "nwnear;": "\u2927", + "oS;": "\u24c8", + "oacute": "\xf3", + "oacute;": "\xf3", + "oast;": "\u229b", + "ocir;": "\u229a", + "ocirc": "\xf4", + "ocirc;": "\xf4", + "ocy;": "\u043e", + "odash;": "\u229d", + "odblac;": "\u0151", + "odiv;": "\u2a38", + "odot;": "\u2299", + "odsold;": "\u29bc", + "oelig;": "\u0153", + "ofcir;": "\u29bf", + "ofr;": "\U0001d52c", + "ogon;": "\u02db", + "ograve": "\xf2", + "ograve;": "\xf2", + "ogt;": "\u29c1", + "ohbar;": "\u29b5", + "ohm;": "\u03a9", + "oint;": "\u222e", + "olarr;": "\u21ba", + "olcir;": "\u29be", + "olcross;": "\u29bb", + "oline;": "\u203e", + "olt;": "\u29c0", + "omacr;": "\u014d", + "omega;": "\u03c9", + "omicron;": "\u03bf", + "omid;": "\u29b6", + "ominus;": "\u2296", + "oopf;": "\U0001d560", + "opar;": "\u29b7", + "operp;": "\u29b9", + "oplus;": "\u2295", + "or;": "\u2228", + "orarr;": "\u21bb", + "ord;": "\u2a5d", + "order;": "\u2134", + "orderof;": "\u2134", + "ordf": "\xaa", + "ordf;": "\xaa", + "ordm": "\xba", + "ordm;": "\xba", + "origof;": "\u22b6", + "oror;": "\u2a56", + "orslope;": "\u2a57", + "orv;": "\u2a5b", + "oscr;": "\u2134", + "oslash": "\xf8", + "oslash;": "\xf8", + "osol;": "\u2298", + "otilde": "\xf5", + "otilde;": "\xf5", + "otimes;": "\u2297", + "otimesas;": "\u2a36", + "ouml": "\xf6", + "ouml;": "\xf6", + "ovbar;": "\u233d", + "par;": "\u2225", + "para": "\xb6", + "para;": "\xb6", + "parallel;": "\u2225", + "parsim;": "\u2af3", + "parsl;": "\u2afd", + "part;": "\u2202", + "pcy;": "\u043f", + "percnt;": "%", + "period;": ".", + "permil;": "\u2030", + "perp;": "\u22a5", + "pertenk;": "\u2031", + "pfr;": "\U0001d52d", + "phi;": "\u03c6", + "phiv;": "\u03d5", + "phmmat;": "\u2133", + "phone;": "\u260e", + "pi;": "\u03c0", + "pitchfork;": "\u22d4", + "piv;": "\u03d6", + "planck;": "\u210f", + "planckh;": "\u210e", + "plankv;": "\u210f", + "plus;": "+", + "plusacir;": "\u2a23", + "plusb;": "\u229e", + "pluscir;": "\u2a22", + "plusdo;": "\u2214", + "plusdu;": "\u2a25", + "pluse;": "\u2a72", + "plusmn": "\xb1", + "plusmn;": "\xb1", + "plussim;": "\u2a26", + "plustwo;": "\u2a27", + "pm;": "\xb1", + "pointint;": "\u2a15", + "popf;": "\U0001d561", + "pound": "\xa3", + "pound;": "\xa3", + "pr;": "\u227a", + "prE;": "\u2ab3", + "prap;": "\u2ab7", + "prcue;": "\u227c", + "pre;": "\u2aaf", + "prec;": "\u227a", + "precapprox;": "\u2ab7", + "preccurlyeq;": "\u227c", + "preceq;": "\u2aaf", + "precnapprox;": "\u2ab9", + "precneqq;": "\u2ab5", + "precnsim;": "\u22e8", + "precsim;": "\u227e", + "prime;": "\u2032", + "primes;": "\u2119", + "prnE;": "\u2ab5", + "prnap;": "\u2ab9", + "prnsim;": "\u22e8", + "prod;": "\u220f", + "profalar;": "\u232e", + "profline;": "\u2312", + "profsurf;": "\u2313", + "prop;": "\u221d", + "propto;": "\u221d", + "prsim;": "\u227e", + "prurel;": "\u22b0", + "pscr;": "\U0001d4c5", + "psi;": "\u03c8", + "puncsp;": "\u2008", + "qfr;": "\U0001d52e", + "qint;": "\u2a0c", + "qopf;": "\U0001d562", + "qprime;": "\u2057", + "qscr;": "\U0001d4c6", + "quaternions;": "\u210d", + "quatint;": "\u2a16", + "quest;": "?", + "questeq;": "\u225f", + "quot": "\"", + "quot;": "\"", + "rAarr;": "\u21db", + "rArr;": "\u21d2", + "rAtail;": "\u291c", + "rBarr;": "\u290f", + "rHar;": "\u2964", + "race;": "\u223d\u0331", + "racute;": "\u0155", + "radic;": "\u221a", + "raemptyv;": "\u29b3", + "rang;": "\u27e9", + "rangd;": "\u2992", + "range;": "\u29a5", + "rangle;": "\u27e9", + "raquo": "\xbb", + "raquo;": "\xbb", + "rarr;": "\u2192", + "rarrap;": "\u2975", + "rarrb;": "\u21e5", + "rarrbfs;": "\u2920", + "rarrc;": "\u2933", + "rarrfs;": "\u291e", + "rarrhk;": "\u21aa", + "rarrlp;": "\u21ac", + "rarrpl;": "\u2945", + "rarrsim;": "\u2974", + "rarrtl;": "\u21a3", + "rarrw;": "\u219d", + "ratail;": "\u291a", + "ratio;": "\u2236", + "rationals;": "\u211a", + "rbarr;": "\u290d", + "rbbrk;": "\u2773", + "rbrace;": "}", + "rbrack;": "]", + "rbrke;": "\u298c", + "rbrksld;": "\u298e", + "rbrkslu;": "\u2990", + "rcaron;": "\u0159", + "rcedil;": "\u0157", + "rceil;": "\u2309", + "rcub;": "}", + "rcy;": "\u0440", + "rdca;": "\u2937", + "rdldhar;": "\u2969", + "rdquo;": "\u201d", + "rdquor;": "\u201d", + "rdsh;": "\u21b3", + "real;": "\u211c", + "realine;": "\u211b", + "realpart;": "\u211c", + "reals;": "\u211d", + "rect;": "\u25ad", + "reg": "\xae", + "reg;": "\xae", + "rfisht;": "\u297d", + "rfloor;": "\u230b", + "rfr;": "\U0001d52f", + "rhard;": "\u21c1", + "rharu;": "\u21c0", + "rharul;": "\u296c", + "rho;": "\u03c1", + "rhov;": "\u03f1", + "rightarrow;": "\u2192", + "rightarrowtail;": "\u21a3", + "rightharpoondown;": "\u21c1", + "rightharpoonup;": "\u21c0", + "rightleftarrows;": "\u21c4", + "rightleftharpoons;": "\u21cc", + "rightrightarrows;": "\u21c9", + "rightsquigarrow;": "\u219d", + "rightthreetimes;": "\u22cc", + "ring;": "\u02da", + "risingdotseq;": "\u2253", + "rlarr;": "\u21c4", + "rlhar;": "\u21cc", + "rlm;": "\u200f", + "rmoust;": "\u23b1", + "rmoustache;": "\u23b1", + "rnmid;": "\u2aee", + "roang;": "\u27ed", + "roarr;": "\u21fe", + "robrk;": "\u27e7", + "ropar;": "\u2986", + "ropf;": "\U0001d563", + "roplus;": "\u2a2e", + "rotimes;": "\u2a35", + "rpar;": ")", + "rpargt;": "\u2994", + "rppolint;": "\u2a12", + "rrarr;": "\u21c9", + "rsaquo;": "\u203a", + "rscr;": "\U0001d4c7", + "rsh;": "\u21b1", + "rsqb;": "]", + "rsquo;": "\u2019", + "rsquor;": "\u2019", + "rthree;": "\u22cc", + "rtimes;": "\u22ca", + "rtri;": "\u25b9", + "rtrie;": "\u22b5", + "rtrif;": "\u25b8", + "rtriltri;": "\u29ce", + "ruluhar;": "\u2968", + "rx;": "\u211e", + "sacute;": "\u015b", + "sbquo;": "\u201a", + "sc;": "\u227b", + "scE;": "\u2ab4", + "scap;": "\u2ab8", + "scaron;": "\u0161", + "sccue;": "\u227d", + "sce;": "\u2ab0", + "scedil;": "\u015f", + "scirc;": "\u015d", + "scnE;": "\u2ab6", + "scnap;": "\u2aba", + "scnsim;": "\u22e9", + "scpolint;": "\u2a13", + "scsim;": "\u227f", + "scy;": "\u0441", + "sdot;": "\u22c5", + "sdotb;": "\u22a1", + "sdote;": "\u2a66", + "seArr;": "\u21d8", + "searhk;": "\u2925", + "searr;": "\u2198", + "searrow;": "\u2198", + "sect": "\xa7", + "sect;": "\xa7", + "semi;": ";", + "seswar;": "\u2929", + "setminus;": "\u2216", + "setmn;": "\u2216", + "sext;": "\u2736", + "sfr;": "\U0001d530", + "sfrown;": "\u2322", + "sharp;": "\u266f", + "shchcy;": "\u0449", + "shcy;": "\u0448", + "shortmid;": "\u2223", + "shortparallel;": "\u2225", + "shy": "\xad", + "shy;": "\xad", + "sigma;": "\u03c3", + "sigmaf;": "\u03c2", + "sigmav;": "\u03c2", + "sim;": "\u223c", + "simdot;": "\u2a6a", + "sime;": "\u2243", + "simeq;": "\u2243", + "simg;": "\u2a9e", + "simgE;": "\u2aa0", + "siml;": "\u2a9d", + "simlE;": "\u2a9f", + "simne;": "\u2246", + "simplus;": "\u2a24", + "simrarr;": "\u2972", + "slarr;": "\u2190", + "smallsetminus;": "\u2216", + "smashp;": "\u2a33", + "smeparsl;": "\u29e4", + "smid;": "\u2223", + "smile;": "\u2323", + "smt;": "\u2aaa", + "smte;": "\u2aac", + "smtes;": "\u2aac\ufe00", + "softcy;": "\u044c", + "sol;": "/", + "solb;": "\u29c4", + "solbar;": "\u233f", + "sopf;": "\U0001d564", + "spades;": "\u2660", + "spadesuit;": "\u2660", + "spar;": "\u2225", + "sqcap;": "\u2293", + "sqcaps;": "\u2293\ufe00", + "sqcup;": "\u2294", + "sqcups;": "\u2294\ufe00", + "sqsub;": "\u228f", + "sqsube;": "\u2291", + "sqsubset;": "\u228f", + "sqsubseteq;": "\u2291", + "sqsup;": "\u2290", + "sqsupe;": "\u2292", + "sqsupset;": "\u2290", + "sqsupseteq;": "\u2292", + "squ;": "\u25a1", + "square;": "\u25a1", + "squarf;": "\u25aa", + "squf;": "\u25aa", + "srarr;": "\u2192", + "sscr;": "\U0001d4c8", + "ssetmn;": "\u2216", + "ssmile;": "\u2323", + "sstarf;": "\u22c6", + "star;": "\u2606", + "starf;": "\u2605", + "straightepsilon;": "\u03f5", + "straightphi;": "\u03d5", + "strns;": "\xaf", + "sub;": "\u2282", + "subE;": "\u2ac5", + "subdot;": "\u2abd", + "sube;": "\u2286", + "subedot;": "\u2ac3", + "submult;": "\u2ac1", + "subnE;": "\u2acb", + "subne;": "\u228a", + "subplus;": "\u2abf", + "subrarr;": "\u2979", + "subset;": "\u2282", + "subseteq;": "\u2286", + "subseteqq;": "\u2ac5", + "subsetneq;": "\u228a", + "subsetneqq;": "\u2acb", + "subsim;": "\u2ac7", + "subsub;": "\u2ad5", + "subsup;": "\u2ad3", + "succ;": "\u227b", + "succapprox;": "\u2ab8", + "succcurlyeq;": "\u227d", + "succeq;": "\u2ab0", + "succnapprox;": "\u2aba", + "succneqq;": "\u2ab6", + "succnsim;": "\u22e9", + "succsim;": "\u227f", + "sum;": "\u2211", + "sung;": "\u266a", + "sup1": "\xb9", + "sup1;": "\xb9", + "sup2": "\xb2", + "sup2;": "\xb2", + "sup3": "\xb3", + "sup3;": "\xb3", + "sup;": "\u2283", + "supE;": "\u2ac6", + "supdot;": "\u2abe", + "supdsub;": "\u2ad8", + "supe;": "\u2287", + "supedot;": "\u2ac4", + "suphsol;": "\u27c9", + "suphsub;": "\u2ad7", + "suplarr;": "\u297b", + "supmult;": "\u2ac2", + "supnE;": "\u2acc", + "supne;": "\u228b", + "supplus;": "\u2ac0", + "supset;": "\u2283", + "supseteq;": "\u2287", + "supseteqq;": "\u2ac6", + "supsetneq;": "\u228b", + "supsetneqq;": "\u2acc", + "supsim;": "\u2ac8", + "supsub;": "\u2ad4", + "supsup;": "\u2ad6", + "swArr;": "\u21d9", + "swarhk;": "\u2926", + "swarr;": "\u2199", + "swarrow;": "\u2199", + "swnwar;": "\u292a", + "szlig": "\xdf", + "szlig;": "\xdf", + "target;": "\u2316", + "tau;": "\u03c4", + "tbrk;": "\u23b4", + "tcaron;": "\u0165", + "tcedil;": "\u0163", + "tcy;": "\u0442", + "tdot;": "\u20db", + "telrec;": "\u2315", + "tfr;": "\U0001d531", + "there4;": "\u2234", + "therefore;": "\u2234", + "theta;": "\u03b8", + "thetasym;": "\u03d1", + "thetav;": "\u03d1", + "thickapprox;": "\u2248", + "thicksim;": "\u223c", + "thinsp;": "\u2009", + "thkap;": "\u2248", + "thksim;": "\u223c", + "thorn": "\xfe", + "thorn;": "\xfe", + "tilde;": "\u02dc", + "times": "\xd7", + "times;": "\xd7", + "timesb;": "\u22a0", + "timesbar;": "\u2a31", + "timesd;": "\u2a30", + "tint;": "\u222d", + "toea;": "\u2928", + "top;": "\u22a4", + "topbot;": "\u2336", + "topcir;": "\u2af1", + "topf;": "\U0001d565", + "topfork;": "\u2ada", + "tosa;": "\u2929", + "tprime;": "\u2034", + "trade;": "\u2122", + "triangle;": "\u25b5", + "triangledown;": "\u25bf", + "triangleleft;": "\u25c3", + "trianglelefteq;": "\u22b4", + "triangleq;": "\u225c", + "triangleright;": "\u25b9", + "trianglerighteq;": "\u22b5", + "tridot;": "\u25ec", + "trie;": "\u225c", + "triminus;": "\u2a3a", + "triplus;": "\u2a39", + "trisb;": "\u29cd", + "tritime;": "\u2a3b", + "trpezium;": "\u23e2", + "tscr;": "\U0001d4c9", + "tscy;": "\u0446", + "tshcy;": "\u045b", + "tstrok;": "\u0167", + "twixt;": "\u226c", + "twoheadleftarrow;": "\u219e", + "twoheadrightarrow;": "\u21a0", + "uArr;": "\u21d1", + "uHar;": "\u2963", + "uacute": "\xfa", + "uacute;": "\xfa", + "uarr;": "\u2191", + "ubrcy;": "\u045e", + "ubreve;": "\u016d", + "ucirc": "\xfb", + "ucirc;": "\xfb", + "ucy;": "\u0443", + "udarr;": "\u21c5", + "udblac;": "\u0171", + "udhar;": "\u296e", + "ufisht;": "\u297e", + "ufr;": "\U0001d532", + "ugrave": "\xf9", + "ugrave;": "\xf9", + "uharl;": "\u21bf", + "uharr;": "\u21be", + "uhblk;": "\u2580", + "ulcorn;": "\u231c", + "ulcorner;": "\u231c", + "ulcrop;": "\u230f", + "ultri;": "\u25f8", + "umacr;": "\u016b", + "uml": "\xa8", + "uml;": "\xa8", + "uogon;": "\u0173", + "uopf;": "\U0001d566", + "uparrow;": "\u2191", + "updownarrow;": "\u2195", + "upharpoonleft;": "\u21bf", + "upharpoonright;": "\u21be", + "uplus;": "\u228e", + "upsi;": "\u03c5", + "upsih;": "\u03d2", + "upsilon;": "\u03c5", + "upuparrows;": "\u21c8", + "urcorn;": "\u231d", + "urcorner;": "\u231d", + "urcrop;": "\u230e", + "uring;": "\u016f", + "urtri;": "\u25f9", + "uscr;": "\U0001d4ca", + "utdot;": "\u22f0", + "utilde;": "\u0169", + "utri;": "\u25b5", + "utrif;": "\u25b4", + "uuarr;": "\u21c8", + "uuml": "\xfc", + "uuml;": "\xfc", + "uwangle;": "\u29a7", + "vArr;": "\u21d5", + "vBar;": "\u2ae8", + "vBarv;": "\u2ae9", + "vDash;": "\u22a8", + "vangrt;": "\u299c", + "varepsilon;": "\u03f5", + "varkappa;": "\u03f0", + "varnothing;": "\u2205", + "varphi;": "\u03d5", + "varpi;": "\u03d6", + "varpropto;": "\u221d", + "varr;": "\u2195", + "varrho;": "\u03f1", + "varsigma;": "\u03c2", + "varsubsetneq;": "\u228a\ufe00", + "varsubsetneqq;": "\u2acb\ufe00", + "varsupsetneq;": "\u228b\ufe00", + "varsupsetneqq;": "\u2acc\ufe00", + "vartheta;": "\u03d1", + "vartriangleleft;": "\u22b2", + "vartriangleright;": "\u22b3", + "vcy;": "\u0432", + "vdash;": "\u22a2", + "vee;": "\u2228", + "veebar;": "\u22bb", + "veeeq;": "\u225a", + "vellip;": "\u22ee", + "verbar;": "|", + "vert;": "|", + "vfr;": "\U0001d533", + "vltri;": "\u22b2", + "vnsub;": "\u2282\u20d2", + "vnsup;": "\u2283\u20d2", + "vopf;": "\U0001d567", + "vprop;": "\u221d", + "vrtri;": "\u22b3", + "vscr;": "\U0001d4cb", + "vsubnE;": "\u2acb\ufe00", + "vsubne;": "\u228a\ufe00", + "vsupnE;": "\u2acc\ufe00", + "vsupne;": "\u228b\ufe00", + "vzigzag;": "\u299a", + "wcirc;": "\u0175", + "wedbar;": "\u2a5f", + "wedge;": "\u2227", + "wedgeq;": "\u2259", + "weierp;": "\u2118", + "wfr;": "\U0001d534", + "wopf;": "\U0001d568", + "wp;": "\u2118", + "wr;": "\u2240", + "wreath;": "\u2240", + "wscr;": "\U0001d4cc", + "xcap;": "\u22c2", + "xcirc;": "\u25ef", + "xcup;": "\u22c3", + "xdtri;": "\u25bd", + "xfr;": "\U0001d535", + "xhArr;": "\u27fa", + "xharr;": "\u27f7", + "xi;": "\u03be", + "xlArr;": "\u27f8", + "xlarr;": "\u27f5", + "xmap;": "\u27fc", + "xnis;": "\u22fb", + "xodot;": "\u2a00", + "xopf;": "\U0001d569", + "xoplus;": "\u2a01", + "xotime;": "\u2a02", + "xrArr;": "\u27f9", + "xrarr;": "\u27f6", + "xscr;": "\U0001d4cd", + "xsqcup;": "\u2a06", + "xuplus;": "\u2a04", + "xutri;": "\u25b3", + "xvee;": "\u22c1", + "xwedge;": "\u22c0", + "yacute": "\xfd", + "yacute;": "\xfd", + "yacy;": "\u044f", + "ycirc;": "\u0177", + "ycy;": "\u044b", + "yen": "\xa5", + "yen;": "\xa5", + "yfr;": "\U0001d536", + "yicy;": "\u0457", + "yopf;": "\U0001d56a", + "yscr;": "\U0001d4ce", + "yucy;": "\u044e", + "yuml": "\xff", + "yuml;": "\xff", + "zacute;": "\u017a", + "zcaron;": "\u017e", + "zcy;": "\u0437", + "zdot;": "\u017c", + "zeetrf;": "\u2128", + "zeta;": "\u03b6", + "zfr;": "\U0001d537", + "zhcy;": "\u0436", + "zigrarr;": "\u21dd", + "zopf;": "\U0001d56b", + "zscr;": "\U0001d4cf", + "zwj;": "\u200d", + "zwnj;": "\u200c", +} + +replacementCharacters = { + 0x0: "\uFFFD", + 0x0d: "\u000D", + 0x80: "\u20AC", + 0x81: "\u0081", + 0x82: "\u201A", + 0x83: "\u0192", + 0x84: "\u201E", + 0x85: "\u2026", + 0x86: "\u2020", + 0x87: "\u2021", + 0x88: "\u02C6", + 0x89: "\u2030", + 0x8A: "\u0160", + 0x8B: "\u2039", + 0x8C: "\u0152", + 0x8D: "\u008D", + 0x8E: "\u017D", + 0x8F: "\u008F", + 0x90: "\u0090", + 0x91: "\u2018", + 0x92: "\u2019", + 0x93: "\u201C", + 0x94: "\u201D", + 0x95: "\u2022", + 0x96: "\u2013", + 0x97: "\u2014", + 0x98: "\u02DC", + 0x99: "\u2122", + 0x9A: "\u0161", + 0x9B: "\u203A", + 0x9C: "\u0153", + 0x9D: "\u009D", + 0x9E: "\u017E", + 0x9F: "\u0178", +} + +tokenTypes = { + "Doctype": 0, + "Characters": 1, + "SpaceCharacters": 2, + "StartTag": 3, + "EndTag": 4, + "EmptyTag": 5, + "Comment": 6, + "ParseError": 7 +} + +tagTokenTypes = frozenset([tokenTypes["StartTag"], tokenTypes["EndTag"], + tokenTypes["EmptyTag"]]) + + +prefixes = dict([(v, k) for k, v in namespaces.items()]) +prefixes["http://www.w3.org/1998/Math/MathML"] = "math" + + +class DataLossWarning(UserWarning): + """Raised when the current tree is unable to represent the input data""" + pass + + +class _ReparseException(Exception): + pass diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/constants.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/constants.pyc new file mode 100644 index 0000000..8af45aa Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/constants.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/__init__.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/__init__.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/__init__.pyc new file mode 100644 index 0000000..abaedf4 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/alphabeticalattributes.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/alphabeticalattributes.py new file mode 100644 index 0000000..5ba926e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/alphabeticalattributes.py @@ -0,0 +1,29 @@ +from __future__ import absolute_import, division, unicode_literals + +from . import base + +from collections import OrderedDict + + +def _attr_key(attr): + """Return an appropriate key for an attribute for sorting + + Attributes have a namespace that can be either ``None`` or a string. We + can't compare the two because they're different types, so we convert + ``None`` to an empty string first. + + """ + return (attr[0][0] or ''), attr[0][1] + + +class Filter(base.Filter): + """Alphabetizes attributes for elements""" + def __iter__(self): + for token in base.Filter.__iter__(self): + if token["type"] in ("StartTag", "EmptyTag"): + attrs = OrderedDict() + for name, value in sorted(token["data"].items(), + key=_attr_key): + attrs[name] = value + token["data"] = attrs + yield token diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/alphabeticalattributes.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/alphabeticalattributes.pyc new file mode 100644 index 0000000..357fdc7 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/alphabeticalattributes.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/base.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/base.py new file mode 100644 index 0000000..c7dbaed --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/base.py @@ -0,0 +1,12 @@ +from __future__ import absolute_import, division, unicode_literals + + +class Filter(object): + def __init__(self, source): + self.source = source + + def __iter__(self): + return iter(self.source) + + def __getattr__(self, name): + return getattr(self.source, name) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/base.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/base.pyc new file mode 100644 index 0000000..0ce5272 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/base.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/inject_meta_charset.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/inject_meta_charset.py new file mode 100644 index 0000000..aefb5c8 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/inject_meta_charset.py @@ -0,0 +1,73 @@ +from __future__ import absolute_import, division, unicode_literals + +from . import base + + +class Filter(base.Filter): + """Injects ``<meta charset=ENCODING>`` tag into head of document""" + def __init__(self, source, encoding): + """Creates a Filter + + :arg source: the source token stream + + :arg encoding: the encoding to set + + """ + base.Filter.__init__(self, source) + self.encoding = encoding + + def __iter__(self): + state = "pre_head" + meta_found = (self.encoding is None) + pending = [] + + for token in base.Filter.__iter__(self): + type = token["type"] + if type == "StartTag": + if token["name"].lower() == "head": + state = "in_head" + + elif type == "EmptyTag": + if token["name"].lower() == "meta": + # replace charset with actual encoding + has_http_equiv_content_type = False + for (namespace, name), value in token["data"].items(): + if namespace is not None: + continue + elif name.lower() == 'charset': + token["data"][(namespace, name)] = self.encoding + meta_found = True + break + elif name == 'http-equiv' and value.lower() == 'content-type': + has_http_equiv_content_type = True + else: + if has_http_equiv_content_type and (None, "content") in token["data"]: + token["data"][(None, "content")] = 'text/html; charset=%s' % self.encoding + meta_found = True + + elif token["name"].lower() == "head" and not meta_found: + # insert meta into empty head + yield {"type": "StartTag", "name": "head", + "data": token["data"]} + yield {"type": "EmptyTag", "name": "meta", + "data": {(None, "charset"): self.encoding}} + yield {"type": "EndTag", "name": "head"} + meta_found = True + continue + + elif type == "EndTag": + if token["name"].lower() == "head" and pending: + # insert meta into head (if necessary) and flush pending queue + yield pending.pop(0) + if not meta_found: + yield {"type": "EmptyTag", "name": "meta", + "data": {(None, "charset"): self.encoding}} + while pending: + yield pending.pop(0) + meta_found = True + state = "post_head" + + if state == "in_head": + pending.append(token) + else: + yield token diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/inject_meta_charset.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/inject_meta_charset.pyc new file mode 100644 index 0000000..a4c926f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/inject_meta_charset.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/lint.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/lint.py new file mode 100644 index 0000000..fcc07ee --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/lint.py @@ -0,0 +1,93 @@ +from __future__ import absolute_import, division, unicode_literals + +from pip._vendor.six import text_type + +from . import base +from ..constants import namespaces, voidElements + +from ..constants import spaceCharacters +spaceCharacters = "".join(spaceCharacters) + + +class Filter(base.Filter): + """Lints the token stream for errors + + If it finds any errors, it'll raise an ``AssertionError``. + + """ + def __init__(self, source, require_matching_tags=True): + """Creates a Filter + + :arg source: the source token stream + + :arg require_matching_tags: whether or not to require matching tags + + """ + super(Filter, self).__init__(source) + self.require_matching_tags = require_matching_tags + + def __iter__(self): + open_elements = [] + for token in base.Filter.__iter__(self): + type = token["type"] + if type in ("StartTag", "EmptyTag"): + namespace = token["namespace"] + name = token["name"] + assert namespace is None or isinstance(namespace, text_type) + assert namespace != "" + assert isinstance(name, text_type) + assert name != "" + assert isinstance(token["data"], dict) + if (not namespace or namespace == namespaces["html"]) and name in voidElements: + assert type == "EmptyTag" + else: + assert type == "StartTag" + if type == "StartTag" and self.require_matching_tags: + open_elements.append((namespace, name)) + for (namespace, name), value in token["data"].items(): + assert namespace is None or isinstance(namespace, text_type) + assert namespace != "" + assert isinstance(name, text_type) + assert name != "" + assert isinstance(value, text_type) + + elif type == "EndTag": + namespace = token["namespace"] + name = token["name"] + assert namespace is None or isinstance(namespace, text_type) + assert namespace != "" + assert isinstance(name, text_type) + assert name != "" + if (not namespace or namespace == namespaces["html"]) and name in voidElements: + assert False, "Void element reported as EndTag token: %(tag)s" % {"tag": name} + elif self.require_matching_tags: + start = open_elements.pop() + assert start == (namespace, name) + + elif type == "Comment": + data = token["data"] + assert isinstance(data, text_type) + + elif type in ("Characters", "SpaceCharacters"): + data = token["data"] + assert isinstance(data, text_type) + assert data != "" + if type == "SpaceCharacters": + assert data.strip(spaceCharacters) == "" + + elif type == "Doctype": + name = token["name"] + assert name is None or isinstance(name, text_type) + assert token["publicId"] is None or isinstance(name, text_type) + assert token["systemId"] is None or isinstance(name, text_type) + + elif type == "Entity": + assert isinstance(token["name"], text_type) + + elif type == "SerializerError": + assert isinstance(token["data"], text_type) + + else: + assert False, "Unknown token type: %(type)s" % {"type": type} + + yield token diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/lint.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/lint.pyc new file mode 100644 index 0000000..7af0d51 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/lint.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/optionaltags.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/optionaltags.py new file mode 100644 index 0000000..4a86501 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/optionaltags.py @@ -0,0 +1,207 @@ +from __future__ import absolute_import, division, unicode_literals + +from . import base + + +class Filter(base.Filter): + """Removes optional tags from the token stream""" + def slider(self): + previous1 = previous2 = None + for token in self.source: + if previous1 is not None: + yield previous2, previous1, token + previous2 = previous1 + previous1 = token + if previous1 is not None: + yield previous2, previous1, None + + def __iter__(self): + for previous, token, next in self.slider(): + type = token["type"] + if type == "StartTag": + if (token["data"] or + not self.is_optional_start(token["name"], previous, next)): + yield token + elif type == "EndTag": + if not self.is_optional_end(token["name"], next): + yield token + else: + yield token + + def is_optional_start(self, tagname, previous, next): + type = next and next["type"] or None + if tagname in 'html': + # An html element's start tag may be omitted if the first thing + # inside the html element is not a space character or a comment. + return type not in ("Comment", "SpaceCharacters") + elif tagname == 'head': + # A head element's start tag may be omitted if the first thing + # inside the head element is an element. + # XXX: we also omit the start tag if the head element is empty + if type in ("StartTag", "EmptyTag"): + return True + elif type == "EndTag": + return next["name"] == "head" + elif tagname == 'body': + # A body element's start tag may be omitted if the first thing + # inside the body element is not a space character or a comment, + # except if the first thing inside the body element is a script + # or style element and the node immediately preceding the body + # element is a head element whose end tag has been omitted. + if type in ("Comment", "SpaceCharacters"): + return False + elif type == "StartTag": + # XXX: we do not look at the preceding event, so we never omit + # the body element's start tag if it's followed by a script or + # a style element. + return next["name"] not in ('script', 'style') + else: + return True + elif tagname == 'colgroup': + # A colgroup element's start tag may be omitted if the first thing + # inside the colgroup element is a col element, and if the element + # is not immediately preceded by another colgroup element whose + # end tag has been omitted. + if type in ("StartTag", "EmptyTag"): + # XXX: we do not look at the preceding event, so instead we never + # omit the colgroup element's end tag when it is immediately + # followed by another colgroup element. See is_optional_end. + return next["name"] == "col" + else: + return False + elif tagname == 'tbody': + # A tbody element's start tag may be omitted if the first thing + # inside the tbody element is a tr element, and if the element is + # not immediately preceded by a tbody, thead, or tfoot element + # whose end tag has been omitted. + if type == "StartTag": + # omit the thead and tfoot elements' end tag when they are + # immediately followed by a tbody element. See is_optional_end. + if previous and previous['type'] == 'EndTag' and \ + previous['name'] in ('tbody', 'thead', 'tfoot'): + return False + return next["name"] == 'tr' + else: + return False + return False + + def is_optional_end(self, tagname, next): + type = next and next["type"] or None + if tagname in ('html', 'head', 'body'): + # An html element's end tag may be omitted if the html element + # is not immediately followed by a space character or a comment. + return type not in ("Comment", "SpaceCharacters") + elif tagname in ('li', 'optgroup', 'tr'): + # A li element's end tag may be omitted if the li element is + # immediately followed by another li element or if there is + # no more content in the parent element. + # An optgroup element's end tag may be omitted if the optgroup + # element is immediately followed by another optgroup element, + # or if there is no more content in the parent element. + # A tr element's end tag may be omitted if the tr element is + # immediately followed by another tr element, or if there is + # no more content in the parent element. + if type == "StartTag": + return next["name"] == tagname + else: + return type == "EndTag" or type is None + elif tagname in ('dt', 'dd'): + # A dt element's end tag may be omitted if the dt element is + # immediately followed by another dt element or a dd element. + # A dd element's end tag may be omitted if the dd element is + # immediately followed by another dd element or a dt element, + # or if there is no more content in the parent element. + if type == "StartTag": + return next["name"] in ('dt', 'dd') + elif tagname == 'dd': + return type == "EndTag" or type is None + else: + return False + elif tagname == 'p': + # A p element's end tag may be omitted if the p element is + # immediately followed by an address, article, aside, + # blockquote, datagrid, dialog, dir, div, dl, fieldset, + # footer, form, h1, h2, h3, h4, h5, h6, header, hr, menu, + # nav, ol, p, pre, section, table, or ul, element, or if + # there is no more content in the parent element. + if type in ("StartTag", "EmptyTag"): + return next["name"] in ('address', 'article', 'aside', + 'blockquote', 'datagrid', 'dialog', + 'dir', 'div', 'dl', 'fieldset', 'footer', + 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', + 'header', 'hr', 'menu', 'nav', 'ol', + 'p', 'pre', 'section', 'table', 'ul') + else: + return type == "EndTag" or type is None + elif tagname == 'option': + # An option element's end tag may be omitted if the option + # element is immediately followed by another option element, + # or if it is immediately followed by an <code>optgroup</code> + # element, or if there is no more content in the parent + # element. + if type == "StartTag": + return next["name"] in ('option', 'optgroup') + else: + return type == "EndTag" or type is None + elif tagname in ('rt', 'rp'): + # An rt element's end tag may be omitted if the rt element is + # immediately followed by an rt or rp element, or if there is + # no more content in the parent element. + # An rp element's end tag may be omitted if the rp element is + # immediately followed by an rt or rp element, or if there is + # no more content in the parent element. + if type == "StartTag": + return next["name"] in ('rt', 'rp') + else: + return type == "EndTag" or type is None + elif tagname == 'colgroup': + # A colgroup element's end tag may be omitted if the colgroup + # element is not immediately followed by a space character or + # a comment. + if type in ("Comment", "SpaceCharacters"): + return False + elif type == "StartTag": + # XXX: we also look for an immediately following colgroup + # element. See is_optional_start. + return next["name"] != 'colgroup' + else: + return True + elif tagname in ('thead', 'tbody'): + # A thead element's end tag may be omitted if the thead element + # is immediately followed by a tbody or tfoot element. + # A tbody element's end tag may be omitted if the tbody element + # is immediately followed by a tbody or tfoot element, or if + # there is no more content in the parent element. + # A tfoot element's end tag may be omitted if the tfoot element + # is immediately followed by a tbody element, or if there is no + # more content in the parent element. + # XXX: we never omit the end tag when the following element is + # a tbody. See is_optional_start. + if type == "StartTag": + return next["name"] in ['tbody', 'tfoot'] + elif tagname == 'tbody': + return type == "EndTag" or type is None + else: + return False + elif tagname == 'tfoot': + # A tfoot element's end tag may be omitted if the tfoot element + # is immediately followed by a tbody element, or if there is no + # more content in the parent element. + # XXX: we never omit the end tag when the following element is + # a tbody. See is_optional_start. + if type == "StartTag": + return next["name"] == 'tbody' + else: + return type == "EndTag" or type is None + elif tagname in ('td', 'th'): + # A td element's end tag may be omitted if the td element is + # immediately followed by a td or th element, or if there is + # no more content in the parent element. + # A th element's end tag may be omitted if the th element is + # immediately followed by a td or th element, or if there is + # no more content in the parent element. + if type == "StartTag": + return next["name"] in ('td', 'th') + else: + return type == "EndTag" or type is None + return False diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/optionaltags.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/optionaltags.pyc new file mode 100644 index 0000000..2d33d2b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/optionaltags.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/sanitizer.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/sanitizer.py new file mode 100644 index 0000000..af8e77b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/sanitizer.py @@ -0,0 +1,896 @@ +from __future__ import absolute_import, division, unicode_literals + +import re +from xml.sax.saxutils import escape, unescape + +from pip._vendor.six.moves import urllib_parse as urlparse + +from . import base +from ..constants import namespaces, prefixes + +__all__ = ["Filter"] + + +allowed_elements = frozenset(( + (namespaces['html'], 'a'), + (namespaces['html'], 'abbr'), + (namespaces['html'], 'acronym'), + (namespaces['html'], 'address'), + (namespaces['html'], 'area'), + (namespaces['html'], 'article'), + (namespaces['html'], 'aside'), + (namespaces['html'], 'audio'), + (namespaces['html'], 'b'), + (namespaces['html'], 'big'), + (namespaces['html'], 'blockquote'), + (namespaces['html'], 'br'), + (namespaces['html'], 'button'), + (namespaces['html'], 'canvas'), + (namespaces['html'], 'caption'), + (namespaces['html'], 'center'), + (namespaces['html'], 'cite'), + (namespaces['html'], 'code'), + (namespaces['html'], 'col'), + (namespaces['html'], 'colgroup'), + (namespaces['html'], 'command'), + (namespaces['html'], 'datagrid'), + (namespaces['html'], 'datalist'), + (namespaces['html'], 'dd'), + (namespaces['html'], 'del'), + (namespaces['html'], 'details'), + (namespaces['html'], 'dfn'), + (namespaces['html'], 'dialog'), + (namespaces['html'], 'dir'), + (namespaces['html'], 'div'), + (namespaces['html'], 'dl'), + (namespaces['html'], 'dt'), + (namespaces['html'], 'em'), + (namespaces['html'], 'event-source'), + (namespaces['html'], 'fieldset'), + (namespaces['html'], 'figcaption'), + (namespaces['html'], 'figure'), + (namespaces['html'], 'footer'), + (namespaces['html'], 'font'), + (namespaces['html'], 'form'), + (namespaces['html'], 'header'), + (namespaces['html'], 'h1'), + (namespaces['html'], 'h2'), + (namespaces['html'], 'h3'), + (namespaces['html'], 'h4'), + (namespaces['html'], 'h5'), + (namespaces['html'], 'h6'), + (namespaces['html'], 'hr'), + (namespaces['html'], 'i'), + (namespaces['html'], 'img'), + (namespaces['html'], 'input'), + (namespaces['html'], 'ins'), + (namespaces['html'], 'keygen'), + (namespaces['html'], 'kbd'), + (namespaces['html'], 'label'), + (namespaces['html'], 'legend'), + (namespaces['html'], 'li'), + (namespaces['html'], 'm'), + (namespaces['html'], 'map'), + (namespaces['html'], 'menu'), + (namespaces['html'], 'meter'), + (namespaces['html'], 'multicol'), + (namespaces['html'], 'nav'), + (namespaces['html'], 'nextid'), + (namespaces['html'], 'ol'), + (namespaces['html'], 'output'), + (namespaces['html'], 'optgroup'), + (namespaces['html'], 'option'), + (namespaces['html'], 'p'), + (namespaces['html'], 'pre'), + (namespaces['html'], 'progress'), + (namespaces['html'], 'q'), + (namespaces['html'], 's'), + (namespaces['html'], 'samp'), + (namespaces['html'], 'section'), + (namespaces['html'], 'select'), + (namespaces['html'], 'small'), + (namespaces['html'], 'sound'), + (namespaces['html'], 'source'), + (namespaces['html'], 'spacer'), + (namespaces['html'], 'span'), + (namespaces['html'], 'strike'), + (namespaces['html'], 'strong'), + (namespaces['html'], 'sub'), + (namespaces['html'], 'sup'), + (namespaces['html'], 'table'), + (namespaces['html'], 'tbody'), + (namespaces['html'], 'td'), + (namespaces['html'], 'textarea'), + (namespaces['html'], 'time'), + (namespaces['html'], 'tfoot'), + (namespaces['html'], 'th'), + (namespaces['html'], 'thead'), + (namespaces['html'], 'tr'), + (namespaces['html'], 'tt'), + (namespaces['html'], 'u'), + (namespaces['html'], 'ul'), + (namespaces['html'], 'var'), + (namespaces['html'], 'video'), + (namespaces['mathml'], 'maction'), + (namespaces['mathml'], 'math'), + (namespaces['mathml'], 'merror'), + (namespaces['mathml'], 'mfrac'), + (namespaces['mathml'], 'mi'), + (namespaces['mathml'], 'mmultiscripts'), + (namespaces['mathml'], 'mn'), + (namespaces['mathml'], 'mo'), + (namespaces['mathml'], 'mover'), + (namespaces['mathml'], 'mpadded'), + (namespaces['mathml'], 'mphantom'), + (namespaces['mathml'], 'mprescripts'), + (namespaces['mathml'], 'mroot'), + (namespaces['mathml'], 'mrow'), + (namespaces['mathml'], 'mspace'), + (namespaces['mathml'], 'msqrt'), + (namespaces['mathml'], 'mstyle'), + (namespaces['mathml'], 'msub'), + (namespaces['mathml'], 'msubsup'), + (namespaces['mathml'], 'msup'), + (namespaces['mathml'], 'mtable'), + (namespaces['mathml'], 'mtd'), + (namespaces['mathml'], 'mtext'), + (namespaces['mathml'], 'mtr'), + (namespaces['mathml'], 'munder'), + (namespaces['mathml'], 'munderover'), + (namespaces['mathml'], 'none'), + (namespaces['svg'], 'a'), + (namespaces['svg'], 'animate'), + (namespaces['svg'], 'animateColor'), + (namespaces['svg'], 'animateMotion'), + (namespaces['svg'], 'animateTransform'), + (namespaces['svg'], 'clipPath'), + (namespaces['svg'], 'circle'), + (namespaces['svg'], 'defs'), + (namespaces['svg'], 'desc'), + (namespaces['svg'], 'ellipse'), + (namespaces['svg'], 'font-face'), + (namespaces['svg'], 'font-face-name'), + (namespaces['svg'], 'font-face-src'), + (namespaces['svg'], 'g'), + (namespaces['svg'], 'glyph'), + (namespaces['svg'], 'hkern'), + (namespaces['svg'], 'linearGradient'), + (namespaces['svg'], 'line'), + (namespaces['svg'], 'marker'), + (namespaces['svg'], 'metadata'), + (namespaces['svg'], 'missing-glyph'), + (namespaces['svg'], 'mpath'), + (namespaces['svg'], 'path'), + (namespaces['svg'], 'polygon'), + (namespaces['svg'], 'polyline'), + (namespaces['svg'], 'radialGradient'), + (namespaces['svg'], 'rect'), + (namespaces['svg'], 'set'), + (namespaces['svg'], 'stop'), + (namespaces['svg'], 'svg'), + (namespaces['svg'], 'switch'), + (namespaces['svg'], 'text'), + (namespaces['svg'], 'title'), + (namespaces['svg'], 'tspan'), + (namespaces['svg'], 'use'), +)) + +allowed_attributes = frozenset(( + # HTML attributes + (None, 'abbr'), + (None, 'accept'), + (None, 'accept-charset'), + (None, 'accesskey'), + (None, 'action'), + (None, 'align'), + (None, 'alt'), + (None, 'autocomplete'), + (None, 'autofocus'), + (None, 'axis'), + (None, 'background'), + (None, 'balance'), + (None, 'bgcolor'), + (None, 'bgproperties'), + (None, 'border'), + (None, 'bordercolor'), + (None, 'bordercolordark'), + (None, 'bordercolorlight'), + (None, 'bottompadding'), + (None, 'cellpadding'), + (None, 'cellspacing'), + (None, 'ch'), + (None, 'challenge'), + (None, 'char'), + (None, 'charoff'), + (None, 'choff'), + (None, 'charset'), + (None, 'checked'), + (None, 'cite'), + (None, 'class'), + (None, 'clear'), + (None, 'color'), + (None, 'cols'), + (None, 'colspan'), + (None, 'compact'), + (None, 'contenteditable'), + (None, 'controls'), + (None, 'coords'), + (None, 'data'), + (None, 'datafld'), + (None, 'datapagesize'), + (None, 'datasrc'), + (None, 'datetime'), + (None, 'default'), + (None, 'delay'), + (None, 'dir'), + (None, 'disabled'), + (None, 'draggable'), + (None, 'dynsrc'), + (None, 'enctype'), + (None, 'end'), + (None, 'face'), + (None, 'for'), + (None, 'form'), + (None, 'frame'), + (None, 'galleryimg'), + (None, 'gutter'), + (None, 'headers'), + (None, 'height'), + (None, 'hidefocus'), + (None, 'hidden'), + (None, 'high'), + (None, 'href'), + (None, 'hreflang'), + (None, 'hspace'), + (None, 'icon'), + (None, 'id'), + (None, 'inputmode'), + (None, 'ismap'), + (None, 'keytype'), + (None, 'label'), + (None, 'leftspacing'), + (None, 'lang'), + (None, 'list'), + (None, 'longdesc'), + (None, 'loop'), + (None, 'loopcount'), + (None, 'loopend'), + (None, 'loopstart'), + (None, 'low'), + (None, 'lowsrc'), + (None, 'max'), + (None, 'maxlength'), + (None, 'media'), + (None, 'method'), + (None, 'min'), + (None, 'multiple'), + (None, 'name'), + (None, 'nohref'), + (None, 'noshade'), + (None, 'nowrap'), + (None, 'open'), + (None, 'optimum'), + (None, 'pattern'), + (None, 'ping'), + (None, 'point-size'), + (None, 'poster'), + (None, 'pqg'), + (None, 'preload'), + (None, 'prompt'), + (None, 'radiogroup'), + (None, 'readonly'), + (None, 'rel'), + (None, 'repeat-max'), + (None, 'repeat-min'), + (None, 'replace'), + (None, 'required'), + (None, 'rev'), + (None, 'rightspacing'), + (None, 'rows'), + (None, 'rowspan'), + (None, 'rules'), + (None, 'scope'), + (None, 'selected'), + (None, 'shape'), + (None, 'size'), + (None, 'span'), + (None, 'src'), + (None, 'start'), + (None, 'step'), + (None, 'style'), + (None, 'summary'), + (None, 'suppress'), + (None, 'tabindex'), + (None, 'target'), + (None, 'template'), + (None, 'title'), + (None, 'toppadding'), + (None, 'type'), + (None, 'unselectable'), + (None, 'usemap'), + (None, 'urn'), + (None, 'valign'), + (None, 'value'), + (None, 'variable'), + (None, 'volume'), + (None, 'vspace'), + (None, 'vrml'), + (None, 'width'), + (None, 'wrap'), + (namespaces['xml'], 'lang'), + # MathML attributes + (None, 'actiontype'), + (None, 'align'), + (None, 'columnalign'), + (None, 'columnalign'), + (None, 'columnalign'), + (None, 'columnlines'), + (None, 'columnspacing'), + (None, 'columnspan'), + (None, 'depth'), + (None, 'display'), + (None, 'displaystyle'), + (None, 'equalcolumns'), + (None, 'equalrows'), + (None, 'fence'), + (None, 'fontstyle'), + (None, 'fontweight'), + (None, 'frame'), + (None, 'height'), + (None, 'linethickness'), + (None, 'lspace'), + (None, 'mathbackground'), + (None, 'mathcolor'), + (None, 'mathvariant'), + (None, 'mathvariant'), + (None, 'maxsize'), + (None, 'minsize'), + (None, 'other'), + (None, 'rowalign'), + (None, 'rowalign'), + (None, 'rowalign'), + (None, 'rowlines'), + (None, 'rowspacing'), + (None, 'rowspan'), + (None, 'rspace'), + (None, 'scriptlevel'), + (None, 'selection'), + (None, 'separator'), + (None, 'stretchy'), + (None, 'width'), + (None, 'width'), + (namespaces['xlink'], 'href'), + (namespaces['xlink'], 'show'), + (namespaces['xlink'], 'type'), + # SVG attributes + (None, 'accent-height'), + (None, 'accumulate'), + (None, 'additive'), + (None, 'alphabetic'), + (None, 'arabic-form'), + (None, 'ascent'), + (None, 'attributeName'), + (None, 'attributeType'), + (None, 'baseProfile'), + (None, 'bbox'), + (None, 'begin'), + (None, 'by'), + (None, 'calcMode'), + (None, 'cap-height'), + (None, 'class'), + (None, 'clip-path'), + (None, 'color'), + (None, 'color-rendering'), + (None, 'content'), + (None, 'cx'), + (None, 'cy'), + (None, 'd'), + (None, 'dx'), + (None, 'dy'), + (None, 'descent'), + (None, 'display'), + (None, 'dur'), + (None, 'end'), + (None, 'fill'), + (None, 'fill-opacity'), + (None, 'fill-rule'), + (None, 'font-family'), + (None, 'font-size'), + (None, 'font-stretch'), + (None, 'font-style'), + (None, 'font-variant'), + (None, 'font-weight'), + (None, 'from'), + (None, 'fx'), + (None, 'fy'), + (None, 'g1'), + (None, 'g2'), + (None, 'glyph-name'), + (None, 'gradientUnits'), + (None, 'hanging'), + (None, 'height'), + (None, 'horiz-adv-x'), + (None, 'horiz-origin-x'), + (None, 'id'), + (None, 'ideographic'), + (None, 'k'), + (None, 'keyPoints'), + (None, 'keySplines'), + (None, 'keyTimes'), + (None, 'lang'), + (None, 'marker-end'), + (None, 'marker-mid'), + (None, 'marker-start'), + (None, 'markerHeight'), + (None, 'markerUnits'), + (None, 'markerWidth'), + (None, 'mathematical'), + (None, 'max'), + (None, 'min'), + (None, 'name'), + (None, 'offset'), + (None, 'opacity'), + (None, 'orient'), + (None, 'origin'), + (None, 'overline-position'), + (None, 'overline-thickness'), + (None, 'panose-1'), + (None, 'path'), + (None, 'pathLength'), + (None, 'points'), + (None, 'preserveAspectRatio'), + (None, 'r'), + (None, 'refX'), + (None, 'refY'), + (None, 'repeatCount'), + (None, 'repeatDur'), + (None, 'requiredExtensions'), + (None, 'requiredFeatures'), + (None, 'restart'), + (None, 'rotate'), + (None, 'rx'), + (None, 'ry'), + (None, 'slope'), + (None, 'stemh'), + (None, 'stemv'), + (None, 'stop-color'), + (None, 'stop-opacity'), + (None, 'strikethrough-position'), + (None, 'strikethrough-thickness'), + (None, 'stroke'), + (None, 'stroke-dasharray'), + (None, 'stroke-dashoffset'), + (None, 'stroke-linecap'), + (None, 'stroke-linejoin'), + (None, 'stroke-miterlimit'), + (None, 'stroke-opacity'), + (None, 'stroke-width'), + (None, 'systemLanguage'), + (None, 'target'), + (None, 'text-anchor'), + (None, 'to'), + (None, 'transform'), + (None, 'type'), + (None, 'u1'), + (None, 'u2'), + (None, 'underline-position'), + (None, 'underline-thickness'), + (None, 'unicode'), + (None, 'unicode-range'), + (None, 'units-per-em'), + (None, 'values'), + (None, 'version'), + (None, 'viewBox'), + (None, 'visibility'), + (None, 'width'), + (None, 'widths'), + (None, 'x'), + (None, 'x-height'), + (None, 'x1'), + (None, 'x2'), + (namespaces['xlink'], 'actuate'), + (namespaces['xlink'], 'arcrole'), + (namespaces['xlink'], 'href'), + (namespaces['xlink'], 'role'), + (namespaces['xlink'], 'show'), + (namespaces['xlink'], 'title'), + (namespaces['xlink'], 'type'), + (namespaces['xml'], 'base'), + (namespaces['xml'], 'lang'), + (namespaces['xml'], 'space'), + (None, 'y'), + (None, 'y1'), + (None, 'y2'), + (None, 'zoomAndPan'), +)) + +attr_val_is_uri = frozenset(( + (None, 'href'), + (None, 'src'), + (None, 'cite'), + (None, 'action'), + (None, 'longdesc'), + (None, 'poster'), + (None, 'background'), + (None, 'datasrc'), + (None, 'dynsrc'), + (None, 'lowsrc'), + (None, 'ping'), + (namespaces['xlink'], 'href'), + (namespaces['xml'], 'base'), +)) + +svg_attr_val_allows_ref = frozenset(( + (None, 'clip-path'), + (None, 'color-profile'), + (None, 'cursor'), + (None, 'fill'), + (None, 'filter'), + (None, 'marker'), + (None, 'marker-start'), + (None, 'marker-mid'), + (None, 'marker-end'), + (None, 'mask'), + (None, 'stroke'), +)) + +svg_allow_local_href = frozenset(( + (None, 'altGlyph'), + (None, 'animate'), + (None, 'animateColor'), + (None, 'animateMotion'), + (None, 'animateTransform'), + (None, 'cursor'), + (None, 'feImage'), + (None, 'filter'), + (None, 'linearGradient'), + (None, 'pattern'), + (None, 'radialGradient'), + (None, 'textpath'), + (None, 'tref'), + (None, 'set'), + (None, 'use') +)) + +allowed_css_properties = frozenset(( + 'azimuth', + 'background-color', + 'border-bottom-color', + 'border-collapse', + 'border-color', + 'border-left-color', + 'border-right-color', + 'border-top-color', + 'clear', + 'color', + 'cursor', + 'direction', + 'display', + 'elevation', + 'float', + 'font', + 'font-family', + 'font-size', + 'font-style', + 'font-variant', + 'font-weight', + 'height', + 'letter-spacing', + 'line-height', + 'overflow', + 'pause', + 'pause-after', + 'pause-before', + 'pitch', + 'pitch-range', + 'richness', + 'speak', + 'speak-header', + 'speak-numeral', + 'speak-punctuation', + 'speech-rate', + 'stress', + 'text-align', + 'text-decoration', + 'text-indent', + 'unicode-bidi', + 'vertical-align', + 'voice-family', + 'volume', + 'white-space', + 'width', +)) + +allowed_css_keywords = frozenset(( + 'auto', + 'aqua', + 'black', + 'block', + 'blue', + 'bold', + 'both', + 'bottom', + 'brown', + 'center', + 'collapse', + 'dashed', + 'dotted', + 'fuchsia', + 'gray', + 'green', + '!important', + 'italic', + 'left', + 'lime', + 'maroon', + 'medium', + 'none', + 'navy', + 'normal', + 'nowrap', + 'olive', + 'pointer', + 'purple', + 'red', + 'right', + 'solid', + 'silver', + 'teal', + 'top', + 'transparent', + 'underline', + 'white', + 'yellow', +)) + +allowed_svg_properties = frozenset(( + 'fill', + 'fill-opacity', + 'fill-rule', + 'stroke', + 'stroke-width', + 'stroke-linecap', + 'stroke-linejoin', + 'stroke-opacity', +)) + +allowed_protocols = frozenset(( + 'ed2k', + 'ftp', + 'http', + 'https', + 'irc', + 'mailto', + 'news', + 'gopher', + 'nntp', + 'telnet', + 'webcal', + 'xmpp', + 'callto', + 'feed', + 'urn', + 'aim', + 'rsync', + 'tag', + 'ssh', + 'sftp', + 'rtsp', + 'afs', + 'data', +)) + +allowed_content_types = frozenset(( + 'image/png', + 'image/jpeg', + 'image/gif', + 'image/webp', + 'image/bmp', + 'text/plain', +)) + + +data_content_type = re.compile(r''' + ^ + # Match a content type <application>/<type> + (?P<content_type>[-a-zA-Z0-9.]+/[-a-zA-Z0-9.]+) + # Match any character set and encoding + (?:(?:;charset=(?:[-a-zA-Z0-9]+)(?:;(?:base64))?) + |(?:;(?:base64))?(?:;charset=(?:[-a-zA-Z0-9]+))?) + # Assume the rest is data + ,.* + $ + ''', + re.VERBOSE) + + +class Filter(base.Filter): + """Sanitizes token stream of XHTML+MathML+SVG and of inline style attributes""" + def __init__(self, + source, + allowed_elements=allowed_elements, + allowed_attributes=allowed_attributes, + allowed_css_properties=allowed_css_properties, + allowed_css_keywords=allowed_css_keywords, + allowed_svg_properties=allowed_svg_properties, + allowed_protocols=allowed_protocols, + allowed_content_types=allowed_content_types, + attr_val_is_uri=attr_val_is_uri, + svg_attr_val_allows_ref=svg_attr_val_allows_ref, + svg_allow_local_href=svg_allow_local_href): + """Creates a Filter + + :arg allowed_elements: set of elements to allow--everything else will + be escaped + + :arg allowed_attributes: set of attributes to allow in + elements--everything else will be stripped + + :arg allowed_css_properties: set of CSS properties to allow--everything + else will be stripped + + :arg allowed_css_keywords: set of CSS keywords to allow--everything + else will be stripped + + :arg allowed_svg_properties: set of SVG properties to allow--everything + else will be removed + + :arg allowed_protocols: set of allowed protocols for URIs + + :arg allowed_content_types: set of allowed content types for ``data`` URIs. + + :arg attr_val_is_uri: set of attributes that have URI values--values + that have a scheme not listed in ``allowed_protocols`` are removed + + :arg svg_attr_val_allows_ref: set of SVG attributes that can have + references + + :arg svg_allow_local_href: set of SVG elements that can have local + hrefs--these are removed + + """ + super(Filter, self).__init__(source) + self.allowed_elements = allowed_elements + self.allowed_attributes = allowed_attributes + self.allowed_css_properties = allowed_css_properties + self.allowed_css_keywords = allowed_css_keywords + self.allowed_svg_properties = allowed_svg_properties + self.allowed_protocols = allowed_protocols + self.allowed_content_types = allowed_content_types + self.attr_val_is_uri = attr_val_is_uri + self.svg_attr_val_allows_ref = svg_attr_val_allows_ref + self.svg_allow_local_href = svg_allow_local_href + + def __iter__(self): + for token in base.Filter.__iter__(self): + token = self.sanitize_token(token) + if token: + yield token + + # Sanitize the +html+, escaping all elements not in ALLOWED_ELEMENTS, and + # stripping out all attributes not in ALLOWED_ATTRIBUTES. Style attributes + # are parsed, and a restricted set, specified by ALLOWED_CSS_PROPERTIES and + # ALLOWED_CSS_KEYWORDS, are allowed through. attributes in ATTR_VAL_IS_URI + # are scanned, and only URI schemes specified in ALLOWED_PROTOCOLS are + # allowed. + # + # sanitize_html('<script> do_nasty_stuff() </script>') + # => <script> do_nasty_stuff() </script> + # sanitize_html('<a href="javascript: sucker();">Click here for $100</a>') + # => <a>Click here for $100</a> + def sanitize_token(self, token): + + # accommodate filters which use token_type differently + token_type = token["type"] + if token_type in ("StartTag", "EndTag", "EmptyTag"): + name = token["name"] + namespace = token["namespace"] + if ((namespace, name) in self.allowed_elements or + (namespace is None and + (namespaces["html"], name) in self.allowed_elements)): + return self.allowed_token(token) + else: + return self.disallowed_token(token) + elif token_type == "Comment": + pass + else: + return token + + def allowed_token(self, token): + if "data" in token: + attrs = token["data"] + attr_names = set(attrs.keys()) + + # Remove forbidden attributes + for to_remove in (attr_names - self.allowed_attributes): + del token["data"][to_remove] + attr_names.remove(to_remove) + + # Remove attributes with disallowed URL values + for attr in (attr_names & self.attr_val_is_uri): + assert attr in attrs + # I don't have a clue where this regexp comes from or why it matches those + # characters, nor why we call unescape. I just know it's always been here. + # Should you be worried by this comment in a sanitizer? Yes. On the other hand, all + # this will do is remove *more* than it otherwise would. + val_unescaped = re.sub("[`\x00-\x20\x7f-\xa0\\s]+", '', + unescape(attrs[attr])).lower() + # remove replacement characters from unescaped characters + val_unescaped = val_unescaped.replace("\ufffd", "") + try: + uri = urlparse.urlparse(val_unescaped) + except ValueError: + uri = None + del attrs[attr] + if uri and uri.scheme: + if uri.scheme not in self.allowed_protocols: + del attrs[attr] + if uri.scheme == 'data': + m = data_content_type.match(uri.path) + if not m: + del attrs[attr] + elif m.group('content_type') not in self.allowed_content_types: + del attrs[attr] + + for attr in self.svg_attr_val_allows_ref: + if attr in attrs: + attrs[attr] = re.sub(r'url\s*\(\s*[^#\s][^)]+?\)', + ' ', + unescape(attrs[attr])) + if (token["name"] in self.svg_allow_local_href and + (namespaces['xlink'], 'href') in attrs and re.search(r'^\s*[^#\s].*', + attrs[(namespaces['xlink'], 'href')])): + del attrs[(namespaces['xlink'], 'href')] + if (None, 'style') in attrs: + attrs[(None, 'style')] = self.sanitize_css(attrs[(None, 'style')]) + token["data"] = attrs + return token + + def disallowed_token(self, token): + token_type = token["type"] + if token_type == "EndTag": + token["data"] = "</%s>" % token["name"] + elif token["data"]: + assert token_type in ("StartTag", "EmptyTag") + attrs = [] + for (ns, name), v in token["data"].items(): + attrs.append(' %s="%s"' % (name if ns is None else "%s:%s" % (prefixes[ns], name), escape(v))) + token["data"] = "<%s%s>" % (token["name"], ''.join(attrs)) + else: + token["data"] = "<%s>" % token["name"] + if token.get("selfClosing"): + token["data"] = token["data"][:-1] + "/>" + + token["type"] = "Characters" + + del token["name"] + return token + + def sanitize_css(self, style): + # disallow urls + style = re.compile(r'url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ', style) + + # gauntlet + if not re.match(r"""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style): + return '' + if not re.match(r"^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$", style): + return '' + + clean = [] + for prop, value in re.findall(r"([-\w]+)\s*:\s*([^:;]*)", style): + if not value: + continue + if prop.lower() in self.allowed_css_properties: + clean.append(prop + ': ' + value + ';') + elif prop.split('-')[0].lower() in ['background', 'border', 'margin', + 'padding']: + for keyword in value.split(): + if keyword not in self.allowed_css_keywords and \ + not re.match(r"^(#[0-9a-fA-F]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$", keyword): # noqa + break + else: + clean.append(prop + ': ' + value + ';') + elif prop.lower() in self.allowed_svg_properties: + clean.append(prop + ': ' + value + ';') + + return ' '.join(clean) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/sanitizer.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/sanitizer.pyc new file mode 100644 index 0000000..632746f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/sanitizer.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/whitespace.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/whitespace.py new file mode 100644 index 0000000..0d12584 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/whitespace.py @@ -0,0 +1,38 @@ +from __future__ import absolute_import, division, unicode_literals + +import re + +from . import base +from ..constants import rcdataElements, spaceCharacters +spaceCharacters = "".join(spaceCharacters) + +SPACES_REGEX = re.compile("[%s]+" % spaceCharacters) + + +class Filter(base.Filter): + """Collapses whitespace except in pre, textarea, and script elements""" + spacePreserveElements = frozenset(["pre", "textarea"] + list(rcdataElements)) + + def __iter__(self): + preserve = 0 + for token in base.Filter.__iter__(self): + type = token["type"] + if type == "StartTag" \ + and (preserve or token["name"] in self.spacePreserveElements): + preserve += 1 + + elif type == "EndTag" and preserve: + preserve -= 1 + + elif not preserve and type == "SpaceCharacters" and token["data"]: + # Test on token["data"] above to not introduce spaces where there were not + token["data"] = " " + + elif not preserve and type == "Characters": + token["data"] = collapse_spaces(token["data"]) + + yield token + + +def collapse_spaces(text): + return SPACES_REGEX.sub(' ', text) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/whitespace.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/whitespace.pyc new file mode 100644 index 0000000..e3c3e8d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/whitespace.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.py new file mode 100644 index 0000000..ae41a13 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.py @@ -0,0 +1,2791 @@ +from __future__ import absolute_import, division, unicode_literals +from pip._vendor.six import with_metaclass, viewkeys + +import types +from collections import OrderedDict + +from . import _inputstream +from . import _tokenizer + +from . import treebuilders +from .treebuilders.base import Marker + +from . import _utils +from .constants import ( + spaceCharacters, asciiUpper2Lower, + specialElements, headingElements, cdataElements, rcdataElements, + tokenTypes, tagTokenTypes, + namespaces, + htmlIntegrationPointElements, mathmlTextIntegrationPointElements, + adjustForeignAttributes as adjustForeignAttributesMap, + adjustMathMLAttributes, adjustSVGAttributes, + E, + _ReparseException +) + + +def parse(doc, treebuilder="etree", namespaceHTMLElements=True, **kwargs): + """Parse an HTML document as a string or file-like object into a tree + + :arg doc: the document to parse as a string or file-like object + + :arg treebuilder: the treebuilder to use when parsing + + :arg namespaceHTMLElements: whether or not to namespace HTML elements + + :returns: parsed tree + + Example: + + >>> from html5lib.html5parser import parse + >>> parse('<html><body><p>This is a doc</p></body></html>') + <Element u'{http://www.w3.org/1999/xhtml}html' at 0x7feac4909db0> + + """ + tb = treebuilders.getTreeBuilder(treebuilder) + p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements) + return p.parse(doc, **kwargs) + + +def parseFragment(doc, container="div", treebuilder="etree", namespaceHTMLElements=True, **kwargs): + """Parse an HTML fragment as a string or file-like object into a tree + + :arg doc: the fragment to parse as a string or file-like object + + :arg container: the container context to parse the fragment in + + :arg treebuilder: the treebuilder to use when parsing + + :arg namespaceHTMLElements: whether or not to namespace HTML elements + + :returns: parsed tree + + Example: + + >>> from html5lib.html5libparser import parseFragment + >>> parseFragment('<b>this is a fragment</b>') + <Element u'DOCUMENT_FRAGMENT' at 0x7feac484b090> + + """ + tb = treebuilders.getTreeBuilder(treebuilder) + p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements) + return p.parseFragment(doc, container=container, **kwargs) + + +def method_decorator_metaclass(function): + class Decorated(type): + def __new__(meta, classname, bases, classDict): + for attributeName, attribute in classDict.items(): + if isinstance(attribute, types.FunctionType): + attribute = function(attribute) + + classDict[attributeName] = attribute + return type.__new__(meta, classname, bases, classDict) + return Decorated + + +class HTMLParser(object): + """HTML parser + + Generates a tree structure from a stream of (possibly malformed) HTML. + + """ + + def __init__(self, tree=None, strict=False, namespaceHTMLElements=True, debug=False): + """ + :arg tree: a treebuilder class controlling the type of tree that will be + returned. Built in treebuilders can be accessed through + html5lib.treebuilders.getTreeBuilder(treeType) + + :arg strict: raise an exception when a parse error is encountered + + :arg namespaceHTMLElements: whether or not to namespace HTML elements + + :arg debug: whether or not to enable debug mode which logs things + + Example: + + >>> from html5lib.html5parser import HTMLParser + >>> parser = HTMLParser() # generates parser with etree builder + >>> parser = HTMLParser('lxml', strict=True) # generates parser with lxml builder which is strict + + """ + + # Raise an exception on the first error encountered + self.strict = strict + + if tree is None: + tree = treebuilders.getTreeBuilder("etree") + self.tree = tree(namespaceHTMLElements) + self.errors = [] + + self.phases = dict([(name, cls(self, self.tree)) for name, cls in + getPhases(debug).items()]) + + def _parse(self, stream, innerHTML=False, container="div", scripting=False, **kwargs): + + self.innerHTMLMode = innerHTML + self.container = container + self.scripting = scripting + self.tokenizer = _tokenizer.HTMLTokenizer(stream, parser=self, **kwargs) + self.reset() + + try: + self.mainLoop() + except _ReparseException: + self.reset() + self.mainLoop() + + def reset(self): + self.tree.reset() + self.firstStartTag = False + self.errors = [] + self.log = [] # only used with debug mode + # "quirks" / "limited quirks" / "no quirks" + self.compatMode = "no quirks" + + if self.innerHTMLMode: + self.innerHTML = self.container.lower() + + if self.innerHTML in cdataElements: + self.tokenizer.state = self.tokenizer.rcdataState + elif self.innerHTML in rcdataElements: + self.tokenizer.state = self.tokenizer.rawtextState + elif self.innerHTML == 'plaintext': + self.tokenizer.state = self.tokenizer.plaintextState + else: + # state already is data state + # self.tokenizer.state = self.tokenizer.dataState + pass + self.phase = self.phases["beforeHtml"] + self.phase.insertHtmlElement() + self.resetInsertionMode() + else: + self.innerHTML = False # pylint:disable=redefined-variable-type + self.phase = self.phases["initial"] + + self.lastPhase = None + + self.beforeRCDataPhase = None + + self.framesetOK = True + + @property + def documentEncoding(self): + """Name of the character encoding that was used to decode the input stream, or + :obj:`None` if that is not determined yet + + """ + if not hasattr(self, 'tokenizer'): + return None + return self.tokenizer.stream.charEncoding[0].name + + def isHTMLIntegrationPoint(self, element): + if (element.name == "annotation-xml" and + element.namespace == namespaces["mathml"]): + return ("encoding" in element.attributes and + element.attributes["encoding"].translate( + asciiUpper2Lower) in + ("text/html", "application/xhtml+xml")) + else: + return (element.namespace, element.name) in htmlIntegrationPointElements + + def isMathMLTextIntegrationPoint(self, element): + return (element.namespace, element.name) in mathmlTextIntegrationPointElements + + def mainLoop(self): + CharactersToken = tokenTypes["Characters"] + SpaceCharactersToken = tokenTypes["SpaceCharacters"] + StartTagToken = tokenTypes["StartTag"] + EndTagToken = tokenTypes["EndTag"] + CommentToken = tokenTypes["Comment"] + DoctypeToken = tokenTypes["Doctype"] + ParseErrorToken = tokenTypes["ParseError"] + + for token in self.normalizedTokens(): + prev_token = None + new_token = token + while new_token is not None: + prev_token = new_token + currentNode = self.tree.openElements[-1] if self.tree.openElements else None + currentNodeNamespace = currentNode.namespace if currentNode else None + currentNodeName = currentNode.name if currentNode else None + + type = new_token["type"] + + if type == ParseErrorToken: + self.parseError(new_token["data"], new_token.get("datavars", {})) + new_token = None + else: + if (len(self.tree.openElements) == 0 or + currentNodeNamespace == self.tree.defaultNamespace or + (self.isMathMLTextIntegrationPoint(currentNode) and + ((type == StartTagToken and + token["name"] not in frozenset(["mglyph", "malignmark"])) or + type in (CharactersToken, SpaceCharactersToken))) or + (currentNodeNamespace == namespaces["mathml"] and + currentNodeName == "annotation-xml" and + type == StartTagToken and + token["name"] == "svg") or + (self.isHTMLIntegrationPoint(currentNode) and + type in (StartTagToken, CharactersToken, SpaceCharactersToken))): + phase = self.phase + else: + phase = self.phases["inForeignContent"] + + if type == CharactersToken: + new_token = phase.processCharacters(new_token) + elif type == SpaceCharactersToken: + new_token = phase.processSpaceCharacters(new_token) + elif type == StartTagToken: + new_token = phase.processStartTag(new_token) + elif type == EndTagToken: + new_token = phase.processEndTag(new_token) + elif type == CommentToken: + new_token = phase.processComment(new_token) + elif type == DoctypeToken: + new_token = phase.processDoctype(new_token) + + if (type == StartTagToken and prev_token["selfClosing"] and + not prev_token["selfClosingAcknowledged"]): + self.parseError("non-void-element-with-trailing-solidus", + {"name": prev_token["name"]}) + + # When the loop finishes it's EOF + reprocess = True + phases = [] + while reprocess: + phases.append(self.phase) + reprocess = self.phase.processEOF() + if reprocess: + assert self.phase not in phases + + def normalizedTokens(self): + for token in self.tokenizer: + yield self.normalizeToken(token) + + def parse(self, stream, *args, **kwargs): + """Parse a HTML document into a well-formed tree + + :arg stream: a file-like object or string containing the HTML to be parsed + + The optional encoding parameter must be a string that indicates + the encoding. If specified, that encoding will be used, + regardless of any BOM or later declaration (such as in a meta + element). + + :arg scripting: treat noscript elements as if JavaScript was turned on + + :returns: parsed tree + + Example: + + >>> from html5lib.html5parser import HTMLParser + >>> parser = HTMLParser() + >>> parser.parse('<html><body><p>This is a doc</p></body></html>') + <Element u'{http://www.w3.org/1999/xhtml}html' at 0x7feac4909db0> + + """ + self._parse(stream, False, None, *args, **kwargs) + return self.tree.getDocument() + + def parseFragment(self, stream, *args, **kwargs): + """Parse a HTML fragment into a well-formed tree fragment + + :arg container: name of the element we're setting the innerHTML + property if set to None, default to 'div' + + :arg stream: a file-like object or string containing the HTML to be parsed + + The optional encoding parameter must be a string that indicates + the encoding. If specified, that encoding will be used, + regardless of any BOM or later declaration (such as in a meta + element) + + :arg scripting: treat noscript elements as if JavaScript was turned on + + :returns: parsed tree + + Example: + + >>> from html5lib.html5libparser import HTMLParser + >>> parser = HTMLParser() + >>> parser.parseFragment('<b>this is a fragment</b>') + <Element u'DOCUMENT_FRAGMENT' at 0x7feac484b090> + + """ + self._parse(stream, True, *args, **kwargs) + return self.tree.getFragment() + + def parseError(self, errorcode="XXX-undefined-error", datavars=None): + # XXX The idea is to make errorcode mandatory. + if datavars is None: + datavars = {} + self.errors.append((self.tokenizer.stream.position(), errorcode, datavars)) + if self.strict: + raise ParseError(E[errorcode] % datavars) + + def normalizeToken(self, token): + # HTML5 specific normalizations to the token stream + if token["type"] == tokenTypes["StartTag"]: + raw = token["data"] + token["data"] = OrderedDict(raw) + if len(raw) > len(token["data"]): + # we had some duplicated attribute, fix so first wins + token["data"].update(raw[::-1]) + + return token + + def adjustMathMLAttributes(self, token): + adjust_attributes(token, adjustMathMLAttributes) + + def adjustSVGAttributes(self, token): + adjust_attributes(token, adjustSVGAttributes) + + def adjustForeignAttributes(self, token): + adjust_attributes(token, adjustForeignAttributesMap) + + def reparseTokenNormal(self, token): + # pylint:disable=unused-argument + self.parser.phase() + + def resetInsertionMode(self): + # The name of this method is mostly historical. (It's also used in the + # specification.) + last = False + newModes = { + "select": "inSelect", + "td": "inCell", + "th": "inCell", + "tr": "inRow", + "tbody": "inTableBody", + "thead": "inTableBody", + "tfoot": "inTableBody", + "caption": "inCaption", + "colgroup": "inColumnGroup", + "table": "inTable", + "head": "inBody", + "body": "inBody", + "frameset": "inFrameset", + "html": "beforeHead" + } + for node in self.tree.openElements[::-1]: + nodeName = node.name + new_phase = None + if node == self.tree.openElements[0]: + assert self.innerHTML + last = True + nodeName = self.innerHTML + # Check for conditions that should only happen in the innerHTML + # case + if nodeName in ("select", "colgroup", "head", "html"): + assert self.innerHTML + + if not last and node.namespace != self.tree.defaultNamespace: + continue + + if nodeName in newModes: + new_phase = self.phases[newModes[nodeName]] + break + elif last: + new_phase = self.phases["inBody"] + break + + self.phase = new_phase + + def parseRCDataRawtext(self, token, contentType): + # Generic RCDATA/RAWTEXT Parsing algorithm + assert contentType in ("RAWTEXT", "RCDATA") + + self.tree.insertElement(token) + + if contentType == "RAWTEXT": + self.tokenizer.state = self.tokenizer.rawtextState + else: + self.tokenizer.state = self.tokenizer.rcdataState + + self.originalPhase = self.phase + + self.phase = self.phases["text"] + + +@_utils.memoize +def getPhases(debug): + def log(function): + """Logger that records which phase processes each token""" + type_names = dict((value, key) for key, value in + tokenTypes.items()) + + def wrapped(self, *args, **kwargs): + if function.__name__.startswith("process") and len(args) > 0: + token = args[0] + try: + info = {"type": type_names[token['type']]} + except: + raise + if token['type'] in tagTokenTypes: + info["name"] = token['name'] + + self.parser.log.append((self.parser.tokenizer.state.__name__, + self.parser.phase.__class__.__name__, + self.__class__.__name__, + function.__name__, + info)) + return function(self, *args, **kwargs) + else: + return function(self, *args, **kwargs) + return wrapped + + def getMetaclass(use_metaclass, metaclass_func): + if use_metaclass: + return method_decorator_metaclass(metaclass_func) + else: + return type + + # pylint:disable=unused-argument + class Phase(with_metaclass(getMetaclass(debug, log))): + """Base class for helper object that implements each phase of processing + """ + + def __init__(self, parser, tree): + self.parser = parser + self.tree = tree + + def processEOF(self): + raise NotImplementedError + + def processComment(self, token): + # For most phases the following is correct. Where it's not it will be + # overridden. + self.tree.insertComment(token, self.tree.openElements[-1]) + + def processDoctype(self, token): + self.parser.parseError("unexpected-doctype") + + def processCharacters(self, token): + self.tree.insertText(token["data"]) + + def processSpaceCharacters(self, token): + self.tree.insertText(token["data"]) + + def processStartTag(self, token): + return self.startTagHandler[token["name"]](token) + + def startTagHtml(self, token): + if not self.parser.firstStartTag and token["name"] == "html": + self.parser.parseError("non-html-root") + # XXX Need a check here to see if the first start tag token emitted is + # this token... If it's not, invoke self.parser.parseError(). + for attr, value in token["data"].items(): + if attr not in self.tree.openElements[0].attributes: + self.tree.openElements[0].attributes[attr] = value + self.parser.firstStartTag = False + + def processEndTag(self, token): + return self.endTagHandler[token["name"]](token) + + class InitialPhase(Phase): + def processSpaceCharacters(self, token): + pass + + def processComment(self, token): + self.tree.insertComment(token, self.tree.document) + + def processDoctype(self, token): + name = token["name"] + publicId = token["publicId"] + systemId = token["systemId"] + correct = token["correct"] + + if (name != "html" or publicId is not None or + systemId is not None and systemId != "about:legacy-compat"): + self.parser.parseError("unknown-doctype") + + if publicId is None: + publicId = "" + + self.tree.insertDoctype(token) + + if publicId != "": + publicId = publicId.translate(asciiUpper2Lower) + + if (not correct or token["name"] != "html" or + publicId.startswith( + ("+//silmaril//dtd html pro v0r11 19970101//", + "-//advasoft ltd//dtd html 3.0 aswedit + extensions//", + "-//as//dtd html 3.0 aswedit + extensions//", + "-//ietf//dtd html 2.0 level 1//", + "-//ietf//dtd html 2.0 level 2//", + "-//ietf//dtd html 2.0 strict level 1//", + "-//ietf//dtd html 2.0 strict level 2//", + "-//ietf//dtd html 2.0 strict//", + "-//ietf//dtd html 2.0//", + "-//ietf//dtd html 2.1e//", + "-//ietf//dtd html 3.0//", + "-//ietf//dtd html 3.2 final//", + "-//ietf//dtd html 3.2//", + "-//ietf//dtd html 3//", + "-//ietf//dtd html level 0//", + "-//ietf//dtd html level 1//", + "-//ietf//dtd html level 2//", + "-//ietf//dtd html level 3//", + "-//ietf//dtd html strict level 0//", + "-//ietf//dtd html strict level 1//", + "-//ietf//dtd html strict level 2//", + "-//ietf//dtd html strict level 3//", + "-//ietf//dtd html strict//", + "-//ietf//dtd html//", + "-//metrius//dtd metrius presentational//", + "-//microsoft//dtd internet explorer 2.0 html strict//", + "-//microsoft//dtd internet explorer 2.0 html//", + "-//microsoft//dtd internet explorer 2.0 tables//", + "-//microsoft//dtd internet explorer 3.0 html strict//", + "-//microsoft//dtd internet explorer 3.0 html//", + "-//microsoft//dtd internet explorer 3.0 tables//", + "-//netscape comm. corp.//dtd html//", + "-//netscape comm. corp.//dtd strict html//", + "-//o'reilly and associates//dtd html 2.0//", + "-//o'reilly and associates//dtd html extended 1.0//", + "-//o'reilly and associates//dtd html extended relaxed 1.0//", + "-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//", + "-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//", + "-//spyglass//dtd html 2.0 extended//", + "-//sq//dtd html 2.0 hotmetal + extensions//", + "-//sun microsystems corp.//dtd hotjava html//", + "-//sun microsystems corp.//dtd hotjava strict html//", + "-//w3c//dtd html 3 1995-03-24//", + "-//w3c//dtd html 3.2 draft//", + "-//w3c//dtd html 3.2 final//", + "-//w3c//dtd html 3.2//", + "-//w3c//dtd html 3.2s draft//", + "-//w3c//dtd html 4.0 frameset//", + "-//w3c//dtd html 4.0 transitional//", + "-//w3c//dtd html experimental 19960712//", + "-//w3c//dtd html experimental 970421//", + "-//w3c//dtd w3 html//", + "-//w3o//dtd w3 html 3.0//", + "-//webtechs//dtd mozilla html 2.0//", + "-//webtechs//dtd mozilla html//")) or + publicId in ("-//w3o//dtd w3 html strict 3.0//en//", + "-/w3c/dtd html 4.0 transitional/en", + "html") or + publicId.startswith( + ("-//w3c//dtd html 4.01 frameset//", + "-//w3c//dtd html 4.01 transitional//")) and + systemId is None or + systemId and systemId.lower() == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd"): + self.parser.compatMode = "quirks" + elif (publicId.startswith( + ("-//w3c//dtd xhtml 1.0 frameset//", + "-//w3c//dtd xhtml 1.0 transitional//")) or + publicId.startswith( + ("-//w3c//dtd html 4.01 frameset//", + "-//w3c//dtd html 4.01 transitional//")) and + systemId is not None): + self.parser.compatMode = "limited quirks" + + self.parser.phase = self.parser.phases["beforeHtml"] + + def anythingElse(self): + self.parser.compatMode = "quirks" + self.parser.phase = self.parser.phases["beforeHtml"] + + def processCharacters(self, token): + self.parser.parseError("expected-doctype-but-got-chars") + self.anythingElse() + return token + + def processStartTag(self, token): + self.parser.parseError("expected-doctype-but-got-start-tag", + {"name": token["name"]}) + self.anythingElse() + return token + + def processEndTag(self, token): + self.parser.parseError("expected-doctype-but-got-end-tag", + {"name": token["name"]}) + self.anythingElse() + return token + + def processEOF(self): + self.parser.parseError("expected-doctype-but-got-eof") + self.anythingElse() + return True + + class BeforeHtmlPhase(Phase): + # helper methods + def insertHtmlElement(self): + self.tree.insertRoot(impliedTagToken("html", "StartTag")) + self.parser.phase = self.parser.phases["beforeHead"] + + # other + def processEOF(self): + self.insertHtmlElement() + return True + + def processComment(self, token): + self.tree.insertComment(token, self.tree.document) + + def processSpaceCharacters(self, token): + pass + + def processCharacters(self, token): + self.insertHtmlElement() + return token + + def processStartTag(self, token): + if token["name"] == "html": + self.parser.firstStartTag = True + self.insertHtmlElement() + return token + + def processEndTag(self, token): + if token["name"] not in ("head", "body", "html", "br"): + self.parser.parseError("unexpected-end-tag-before-html", + {"name": token["name"]}) + else: + self.insertHtmlElement() + return token + + class BeforeHeadPhase(Phase): + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = _utils.MethodDispatcher([ + ("html", self.startTagHtml), + ("head", self.startTagHead) + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = _utils.MethodDispatcher([ + (("head", "body", "html", "br"), self.endTagImplyHead) + ]) + self.endTagHandler.default = self.endTagOther + + def processEOF(self): + self.startTagHead(impliedTagToken("head", "StartTag")) + return True + + def processSpaceCharacters(self, token): + pass + + def processCharacters(self, token): + self.startTagHead(impliedTagToken("head", "StartTag")) + return token + + def startTagHtml(self, token): + return self.parser.phases["inBody"].processStartTag(token) + + def startTagHead(self, token): + self.tree.insertElement(token) + self.tree.headPointer = self.tree.openElements[-1] + self.parser.phase = self.parser.phases["inHead"] + + def startTagOther(self, token): + self.startTagHead(impliedTagToken("head", "StartTag")) + return token + + def endTagImplyHead(self, token): + self.startTagHead(impliedTagToken("head", "StartTag")) + return token + + def endTagOther(self, token): + self.parser.parseError("end-tag-after-implied-root", + {"name": token["name"]}) + + class InHeadPhase(Phase): + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = _utils.MethodDispatcher([ + ("html", self.startTagHtml), + ("title", self.startTagTitle), + (("noframes", "style"), self.startTagNoFramesStyle), + ("noscript", self.startTagNoscript), + ("script", self.startTagScript), + (("base", "basefont", "bgsound", "command", "link"), + self.startTagBaseLinkCommand), + ("meta", self.startTagMeta), + ("head", self.startTagHead) + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = _utils.MethodDispatcher([ + ("head", self.endTagHead), + (("br", "html", "body"), self.endTagHtmlBodyBr) + ]) + self.endTagHandler.default = self.endTagOther + + # the real thing + def processEOF(self): + self.anythingElse() + return True + + def processCharacters(self, token): + self.anythingElse() + return token + + def startTagHtml(self, token): + return self.parser.phases["inBody"].processStartTag(token) + + def startTagHead(self, token): + self.parser.parseError("two-heads-are-not-better-than-one") + + def startTagBaseLinkCommand(self, token): + self.tree.insertElement(token) + self.tree.openElements.pop() + token["selfClosingAcknowledged"] = True + + def startTagMeta(self, token): + self.tree.insertElement(token) + self.tree.openElements.pop() + token["selfClosingAcknowledged"] = True + + attributes = token["data"] + if self.parser.tokenizer.stream.charEncoding[1] == "tentative": + if "charset" in attributes: + self.parser.tokenizer.stream.changeEncoding(attributes["charset"]) + elif ("content" in attributes and + "http-equiv" in attributes and + attributes["http-equiv"].lower() == "content-type"): + # Encoding it as UTF-8 here is a hack, as really we should pass + # the abstract Unicode string, and just use the + # ContentAttrParser on that, but using UTF-8 allows all chars + # to be encoded and as a ASCII-superset works. + data = _inputstream.EncodingBytes(attributes["content"].encode("utf-8")) + parser = _inputstream.ContentAttrParser(data) + codec = parser.parse() + self.parser.tokenizer.stream.changeEncoding(codec) + + def startTagTitle(self, token): + self.parser.parseRCDataRawtext(token, "RCDATA") + + def startTagNoFramesStyle(self, token): + # Need to decide whether to implement the scripting-disabled case + self.parser.parseRCDataRawtext(token, "RAWTEXT") + + def startTagNoscript(self, token): + if self.parser.scripting: + self.parser.parseRCDataRawtext(token, "RAWTEXT") + else: + self.tree.insertElement(token) + self.parser.phase = self.parser.phases["inHeadNoscript"] + + def startTagScript(self, token): + self.tree.insertElement(token) + self.parser.tokenizer.state = self.parser.tokenizer.scriptDataState + self.parser.originalPhase = self.parser.phase + self.parser.phase = self.parser.phases["text"] + + def startTagOther(self, token): + self.anythingElse() + return token + + def endTagHead(self, token): + node = self.parser.tree.openElements.pop() + assert node.name == "head", "Expected head got %s" % node.name + self.parser.phase = self.parser.phases["afterHead"] + + def endTagHtmlBodyBr(self, token): + self.anythingElse() + return token + + def endTagOther(self, token): + self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) + + def anythingElse(self): + self.endTagHead(impliedTagToken("head")) + + class InHeadNoscriptPhase(Phase): + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = _utils.MethodDispatcher([ + ("html", self.startTagHtml), + (("basefont", "bgsound", "link", "meta", "noframes", "style"), self.startTagBaseLinkCommand), + (("head", "noscript"), self.startTagHeadNoscript), + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = _utils.MethodDispatcher([ + ("noscript", self.endTagNoscript), + ("br", self.endTagBr), + ]) + self.endTagHandler.default = self.endTagOther + + def processEOF(self): + self.parser.parseError("eof-in-head-noscript") + self.anythingElse() + return True + + def processComment(self, token): + return self.parser.phases["inHead"].processComment(token) + + def processCharacters(self, token): + self.parser.parseError("char-in-head-noscript") + self.anythingElse() + return token + + def processSpaceCharacters(self, token): + return self.parser.phases["inHead"].processSpaceCharacters(token) + + def startTagHtml(self, token): + return self.parser.phases["inBody"].processStartTag(token) + + def startTagBaseLinkCommand(self, token): + return self.parser.phases["inHead"].processStartTag(token) + + def startTagHeadNoscript(self, token): + self.parser.parseError("unexpected-start-tag", {"name": token["name"]}) + + def startTagOther(self, token): + self.parser.parseError("unexpected-inhead-noscript-tag", {"name": token["name"]}) + self.anythingElse() + return token + + def endTagNoscript(self, token): + node = self.parser.tree.openElements.pop() + assert node.name == "noscript", "Expected noscript got %s" % node.name + self.parser.phase = self.parser.phases["inHead"] + + def endTagBr(self, token): + self.parser.parseError("unexpected-inhead-noscript-tag", {"name": token["name"]}) + self.anythingElse() + return token + + def endTagOther(self, token): + self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) + + def anythingElse(self): + # Caller must raise parse error first! + self.endTagNoscript(impliedTagToken("noscript")) + + class AfterHeadPhase(Phase): + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = _utils.MethodDispatcher([ + ("html", self.startTagHtml), + ("body", self.startTagBody), + ("frameset", self.startTagFrameset), + (("base", "basefont", "bgsound", "link", "meta", "noframes", "script", + "style", "title"), + self.startTagFromHead), + ("head", self.startTagHead) + ]) + self.startTagHandler.default = self.startTagOther + self.endTagHandler = _utils.MethodDispatcher([(("body", "html", "br"), + self.endTagHtmlBodyBr)]) + self.endTagHandler.default = self.endTagOther + + def processEOF(self): + self.anythingElse() + return True + + def processCharacters(self, token): + self.anythingElse() + return token + + def startTagHtml(self, token): + return self.parser.phases["inBody"].processStartTag(token) + + def startTagBody(self, token): + self.parser.framesetOK = False + self.tree.insertElement(token) + self.parser.phase = self.parser.phases["inBody"] + + def startTagFrameset(self, token): + self.tree.insertElement(token) + self.parser.phase = self.parser.phases["inFrameset"] + + def startTagFromHead(self, token): + self.parser.parseError("unexpected-start-tag-out-of-my-head", + {"name": token["name"]}) + self.tree.openElements.append(self.tree.headPointer) + self.parser.phases["inHead"].processStartTag(token) + for node in self.tree.openElements[::-1]: + if node.name == "head": + self.tree.openElements.remove(node) + break + + def startTagHead(self, token): + self.parser.parseError("unexpected-start-tag", {"name": token["name"]}) + + def startTagOther(self, token): + self.anythingElse() + return token + + def endTagHtmlBodyBr(self, token): + self.anythingElse() + return token + + def endTagOther(self, token): + self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) + + def anythingElse(self): + self.tree.insertElement(impliedTagToken("body", "StartTag")) + self.parser.phase = self.parser.phases["inBody"] + self.parser.framesetOK = True + + class InBodyPhase(Phase): + # http://www.whatwg.org/specs/web-apps/current-work/#parsing-main-inbody + # the really-really-really-very crazy mode + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + # Set this to the default handler + self.processSpaceCharacters = self.processSpaceCharactersNonPre + + self.startTagHandler = _utils.MethodDispatcher([ + ("html", self.startTagHtml), + (("base", "basefont", "bgsound", "command", "link", "meta", + "script", "style", "title"), + self.startTagProcessInHead), + ("body", self.startTagBody), + ("frameset", self.startTagFrameset), + (("address", "article", "aside", "blockquote", "center", "details", + "dir", "div", "dl", "fieldset", "figcaption", "figure", + "footer", "header", "hgroup", "main", "menu", "nav", "ol", "p", + "section", "summary", "ul"), + self.startTagCloseP), + (headingElements, self.startTagHeading), + (("pre", "listing"), self.startTagPreListing), + ("form", self.startTagForm), + (("li", "dd", "dt"), self.startTagListItem), + ("plaintext", self.startTagPlaintext), + ("a", self.startTagA), + (("b", "big", "code", "em", "font", "i", "s", "small", "strike", + "strong", "tt", "u"), self.startTagFormatting), + ("nobr", self.startTagNobr), + ("button", self.startTagButton), + (("applet", "marquee", "object"), self.startTagAppletMarqueeObject), + ("xmp", self.startTagXmp), + ("table", self.startTagTable), + (("area", "br", "embed", "img", "keygen", "wbr"), + self.startTagVoidFormatting), + (("param", "source", "track"), self.startTagParamSource), + ("input", self.startTagInput), + ("hr", self.startTagHr), + ("image", self.startTagImage), + ("isindex", self.startTagIsIndex), + ("textarea", self.startTagTextarea), + ("iframe", self.startTagIFrame), + ("noscript", self.startTagNoscript), + (("noembed", "noframes"), self.startTagRawtext), + ("select", self.startTagSelect), + (("rp", "rt"), self.startTagRpRt), + (("option", "optgroup"), self.startTagOpt), + (("math"), self.startTagMath), + (("svg"), self.startTagSvg), + (("caption", "col", "colgroup", "frame", "head", + "tbody", "td", "tfoot", "th", "thead", + "tr"), self.startTagMisplaced) + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = _utils.MethodDispatcher([ + ("body", self.endTagBody), + ("html", self.endTagHtml), + (("address", "article", "aside", "blockquote", "button", "center", + "details", "dialog", "dir", "div", "dl", "fieldset", "figcaption", "figure", + "footer", "header", "hgroup", "listing", "main", "menu", "nav", "ol", "pre", + "section", "summary", "ul"), self.endTagBlock), + ("form", self.endTagForm), + ("p", self.endTagP), + (("dd", "dt", "li"), self.endTagListItem), + (headingElements, self.endTagHeading), + (("a", "b", "big", "code", "em", "font", "i", "nobr", "s", "small", + "strike", "strong", "tt", "u"), self.endTagFormatting), + (("applet", "marquee", "object"), self.endTagAppletMarqueeObject), + ("br", self.endTagBr), + ]) + self.endTagHandler.default = self.endTagOther + + def isMatchingFormattingElement(self, node1, node2): + return (node1.name == node2.name and + node1.namespace == node2.namespace and + node1.attributes == node2.attributes) + + # helper + def addFormattingElement(self, token): + self.tree.insertElement(token) + element = self.tree.openElements[-1] + + matchingElements = [] + for node in self.tree.activeFormattingElements[::-1]: + if node is Marker: + break + elif self.isMatchingFormattingElement(node, element): + matchingElements.append(node) + + assert len(matchingElements) <= 3 + if len(matchingElements) == 3: + self.tree.activeFormattingElements.remove(matchingElements[-1]) + self.tree.activeFormattingElements.append(element) + + # the real deal + def processEOF(self): + allowed_elements = frozenset(("dd", "dt", "li", "p", "tbody", "td", + "tfoot", "th", "thead", "tr", "body", + "html")) + for node in self.tree.openElements[::-1]: + if node.name not in allowed_elements: + self.parser.parseError("expected-closing-tag-but-got-eof") + break + # Stop parsing + + def processSpaceCharactersDropNewline(self, token): + # Sometimes (start of <pre>, <listing>, and <textarea> blocks) we + # want to drop leading newlines + data = token["data"] + self.processSpaceCharacters = self.processSpaceCharactersNonPre + if (data.startswith("\n") and + self.tree.openElements[-1].name in ("pre", "listing", "textarea") and + not self.tree.openElements[-1].hasContent()): + data = data[1:] + if data: + self.tree.reconstructActiveFormattingElements() + self.tree.insertText(data) + + def processCharacters(self, token): + if token["data"] == "\u0000": + # The tokenizer should always emit null on its own + return + self.tree.reconstructActiveFormattingElements() + self.tree.insertText(token["data"]) + # This must be bad for performance + if (self.parser.framesetOK and + any([char not in spaceCharacters + for char in token["data"]])): + self.parser.framesetOK = False + + def processSpaceCharactersNonPre(self, token): + self.tree.reconstructActiveFormattingElements() + self.tree.insertText(token["data"]) + + def startTagProcessInHead(self, token): + return self.parser.phases["inHead"].processStartTag(token) + + def startTagBody(self, token): + self.parser.parseError("unexpected-start-tag", {"name": "body"}) + if (len(self.tree.openElements) == 1 or + self.tree.openElements[1].name != "body"): + assert self.parser.innerHTML + else: + self.parser.framesetOK = False + for attr, value in token["data"].items(): + if attr not in self.tree.openElements[1].attributes: + self.tree.openElements[1].attributes[attr] = value + + def startTagFrameset(self, token): + self.parser.parseError("unexpected-start-tag", {"name": "frameset"}) + if (len(self.tree.openElements) == 1 or self.tree.openElements[1].name != "body"): + assert self.parser.innerHTML + elif not self.parser.framesetOK: + pass + else: + if self.tree.openElements[1].parent: + self.tree.openElements[1].parent.removeChild(self.tree.openElements[1]) + while self.tree.openElements[-1].name != "html": + self.tree.openElements.pop() + self.tree.insertElement(token) + self.parser.phase = self.parser.phases["inFrameset"] + + def startTagCloseP(self, token): + if self.tree.elementInScope("p", variant="button"): + self.endTagP(impliedTagToken("p")) + self.tree.insertElement(token) + + def startTagPreListing(self, token): + if self.tree.elementInScope("p", variant="button"): + self.endTagP(impliedTagToken("p")) + self.tree.insertElement(token) + self.parser.framesetOK = False + self.processSpaceCharacters = self.processSpaceCharactersDropNewline + + def startTagForm(self, token): + if self.tree.formPointer: + self.parser.parseError("unexpected-start-tag", {"name": "form"}) + else: + if self.tree.elementInScope("p", variant="button"): + self.endTagP(impliedTagToken("p")) + self.tree.insertElement(token) + self.tree.formPointer = self.tree.openElements[-1] + + def startTagListItem(self, token): + self.parser.framesetOK = False + + stopNamesMap = {"li": ["li"], + "dt": ["dt", "dd"], + "dd": ["dt", "dd"]} + stopNames = stopNamesMap[token["name"]] + for node in reversed(self.tree.openElements): + if node.name in stopNames: + self.parser.phase.processEndTag( + impliedTagToken(node.name, "EndTag")) + break + if (node.nameTuple in specialElements and + node.name not in ("address", "div", "p")): + break + + if self.tree.elementInScope("p", variant="button"): + self.parser.phase.processEndTag( + impliedTagToken("p", "EndTag")) + + self.tree.insertElement(token) + + def startTagPlaintext(self, token): + if self.tree.elementInScope("p", variant="button"): + self.endTagP(impliedTagToken("p")) + self.tree.insertElement(token) + self.parser.tokenizer.state = self.parser.tokenizer.plaintextState + + def startTagHeading(self, token): + if self.tree.elementInScope("p", variant="button"): + self.endTagP(impliedTagToken("p")) + if self.tree.openElements[-1].name in headingElements: + self.parser.parseError("unexpected-start-tag", {"name": token["name"]}) + self.tree.openElements.pop() + self.tree.insertElement(token) + + def startTagA(self, token): + afeAElement = self.tree.elementInActiveFormattingElements("a") + if afeAElement: + self.parser.parseError("unexpected-start-tag-implies-end-tag", + {"startName": "a", "endName": "a"}) + self.endTagFormatting(impliedTagToken("a")) + if afeAElement in self.tree.openElements: + self.tree.openElements.remove(afeAElement) + if afeAElement in self.tree.activeFormattingElements: + self.tree.activeFormattingElements.remove(afeAElement) + self.tree.reconstructActiveFormattingElements() + self.addFormattingElement(token) + + def startTagFormatting(self, token): + self.tree.reconstructActiveFormattingElements() + self.addFormattingElement(token) + + def startTagNobr(self, token): + self.tree.reconstructActiveFormattingElements() + if self.tree.elementInScope("nobr"): + self.parser.parseError("unexpected-start-tag-implies-end-tag", + {"startName": "nobr", "endName": "nobr"}) + self.processEndTag(impliedTagToken("nobr")) + # XXX Need tests that trigger the following + self.tree.reconstructActiveFormattingElements() + self.addFormattingElement(token) + + def startTagButton(self, token): + if self.tree.elementInScope("button"): + self.parser.parseError("unexpected-start-tag-implies-end-tag", + {"startName": "button", "endName": "button"}) + self.processEndTag(impliedTagToken("button")) + return token + else: + self.tree.reconstructActiveFormattingElements() + self.tree.insertElement(token) + self.parser.framesetOK = False + + def startTagAppletMarqueeObject(self, token): + self.tree.reconstructActiveFormattingElements() + self.tree.insertElement(token) + self.tree.activeFormattingElements.append(Marker) + self.parser.framesetOK = False + + def startTagXmp(self, token): + if self.tree.elementInScope("p", variant="button"): + self.endTagP(impliedTagToken("p")) + self.tree.reconstructActiveFormattingElements() + self.parser.framesetOK = False + self.parser.parseRCDataRawtext(token, "RAWTEXT") + + def startTagTable(self, token): + if self.parser.compatMode != "quirks": + if self.tree.elementInScope("p", variant="button"): + self.processEndTag(impliedTagToken("p")) + self.tree.insertElement(token) + self.parser.framesetOK = False + self.parser.phase = self.parser.phases["inTable"] + + def startTagVoidFormatting(self, token): + self.tree.reconstructActiveFormattingElements() + self.tree.insertElement(token) + self.tree.openElements.pop() + token["selfClosingAcknowledged"] = True + self.parser.framesetOK = False + + def startTagInput(self, token): + framesetOK = self.parser.framesetOK + self.startTagVoidFormatting(token) + if ("type" in token["data"] and + token["data"]["type"].translate(asciiUpper2Lower) == "hidden"): + # input type=hidden doesn't change framesetOK + self.parser.framesetOK = framesetOK + + def startTagParamSource(self, token): + self.tree.insertElement(token) + self.tree.openElements.pop() + token["selfClosingAcknowledged"] = True + + def startTagHr(self, token): + if self.tree.elementInScope("p", variant="button"): + self.endTagP(impliedTagToken("p")) + self.tree.insertElement(token) + self.tree.openElements.pop() + token["selfClosingAcknowledged"] = True + self.parser.framesetOK = False + + def startTagImage(self, token): + # No really... + self.parser.parseError("unexpected-start-tag-treated-as", + {"originalName": "image", "newName": "img"}) + self.processStartTag(impliedTagToken("img", "StartTag", + attributes=token["data"], + selfClosing=token["selfClosing"])) + + def startTagIsIndex(self, token): + self.parser.parseError("deprecated-tag", {"name": "isindex"}) + if self.tree.formPointer: + return + form_attrs = {} + if "action" in token["data"]: + form_attrs["action"] = token["data"]["action"] + self.processStartTag(impliedTagToken("form", "StartTag", + attributes=form_attrs)) + self.processStartTag(impliedTagToken("hr", "StartTag")) + self.processStartTag(impliedTagToken("label", "StartTag")) + # XXX Localization ... + if "prompt" in token["data"]: + prompt = token["data"]["prompt"] + else: + prompt = "This is a searchable index. Enter search keywords: " + self.processCharacters( + {"type": tokenTypes["Characters"], "data": prompt}) + attributes = token["data"].copy() + if "action" in attributes: + del attributes["action"] + if "prompt" in attributes: + del attributes["prompt"] + attributes["name"] = "isindex" + self.processStartTag(impliedTagToken("input", "StartTag", + attributes=attributes, + selfClosing=token["selfClosing"])) + self.processEndTag(impliedTagToken("label")) + self.processStartTag(impliedTagToken("hr", "StartTag")) + self.processEndTag(impliedTagToken("form")) + + def startTagTextarea(self, token): + self.tree.insertElement(token) + self.parser.tokenizer.state = self.parser.tokenizer.rcdataState + self.processSpaceCharacters = self.processSpaceCharactersDropNewline + self.parser.framesetOK = False + + def startTagIFrame(self, token): + self.parser.framesetOK = False + self.startTagRawtext(token) + + def startTagNoscript(self, token): + if self.parser.scripting: + self.startTagRawtext(token) + else: + self.startTagOther(token) + + def startTagRawtext(self, token): + """iframe, noembed noframes, noscript(if scripting enabled)""" + self.parser.parseRCDataRawtext(token, "RAWTEXT") + + def startTagOpt(self, token): + if self.tree.openElements[-1].name == "option": + self.parser.phase.processEndTag(impliedTagToken("option")) + self.tree.reconstructActiveFormattingElements() + self.parser.tree.insertElement(token) + + def startTagSelect(self, token): + self.tree.reconstructActiveFormattingElements() + self.tree.insertElement(token) + self.parser.framesetOK = False + if self.parser.phase in (self.parser.phases["inTable"], + self.parser.phases["inCaption"], + self.parser.phases["inColumnGroup"], + self.parser.phases["inTableBody"], + self.parser.phases["inRow"], + self.parser.phases["inCell"]): + self.parser.phase = self.parser.phases["inSelectInTable"] + else: + self.parser.phase = self.parser.phases["inSelect"] + + def startTagRpRt(self, token): + if self.tree.elementInScope("ruby"): + self.tree.generateImpliedEndTags() + if self.tree.openElements[-1].name != "ruby": + self.parser.parseError() + self.tree.insertElement(token) + + def startTagMath(self, token): + self.tree.reconstructActiveFormattingElements() + self.parser.adjustMathMLAttributes(token) + self.parser.adjustForeignAttributes(token) + token["namespace"] = namespaces["mathml"] + self.tree.insertElement(token) + # Need to get the parse error right for the case where the token + # has a namespace not equal to the xmlns attribute + if token["selfClosing"]: + self.tree.openElements.pop() + token["selfClosingAcknowledged"] = True + + def startTagSvg(self, token): + self.tree.reconstructActiveFormattingElements() + self.parser.adjustSVGAttributes(token) + self.parser.adjustForeignAttributes(token) + token["namespace"] = namespaces["svg"] + self.tree.insertElement(token) + # Need to get the parse error right for the case where the token + # has a namespace not equal to the xmlns attribute + if token["selfClosing"]: + self.tree.openElements.pop() + token["selfClosingAcknowledged"] = True + + def startTagMisplaced(self, token): + """ Elements that should be children of other elements that have a + different insertion mode; here they are ignored + "caption", "col", "colgroup", "frame", "frameset", "head", + "option", "optgroup", "tbody", "td", "tfoot", "th", "thead", + "tr", "noscript" + """ + self.parser.parseError("unexpected-start-tag-ignored", {"name": token["name"]}) + + def startTagOther(self, token): + self.tree.reconstructActiveFormattingElements() + self.tree.insertElement(token) + + def endTagP(self, token): + if not self.tree.elementInScope("p", variant="button"): + self.startTagCloseP(impliedTagToken("p", "StartTag")) + self.parser.parseError("unexpected-end-tag", {"name": "p"}) + self.endTagP(impliedTagToken("p", "EndTag")) + else: + self.tree.generateImpliedEndTags("p") + if self.tree.openElements[-1].name != "p": + self.parser.parseError("unexpected-end-tag", {"name": "p"}) + node = self.tree.openElements.pop() + while node.name != "p": + node = self.tree.openElements.pop() + + def endTagBody(self, token): + if not self.tree.elementInScope("body"): + self.parser.parseError() + return + elif self.tree.openElements[-1].name != "body": + for node in self.tree.openElements[2:]: + if node.name not in frozenset(("dd", "dt", "li", "optgroup", + "option", "p", "rp", "rt", + "tbody", "td", "tfoot", + "th", "thead", "tr", "body", + "html")): + # Not sure this is the correct name for the parse error + self.parser.parseError( + "expected-one-end-tag-but-got-another", + {"gotName": "body", "expectedName": node.name}) + break + self.parser.phase = self.parser.phases["afterBody"] + + def endTagHtml(self, token): + # We repeat the test for the body end tag token being ignored here + if self.tree.elementInScope("body"): + self.endTagBody(impliedTagToken("body")) + return token + + def endTagBlock(self, token): + # Put us back in the right whitespace handling mode + if token["name"] == "pre": + self.processSpaceCharacters = self.processSpaceCharactersNonPre + inScope = self.tree.elementInScope(token["name"]) + if inScope: + self.tree.generateImpliedEndTags() + if self.tree.openElements[-1].name != token["name"]: + self.parser.parseError("end-tag-too-early", {"name": token["name"]}) + if inScope: + node = self.tree.openElements.pop() + while node.name != token["name"]: + node = self.tree.openElements.pop() + + def endTagForm(self, token): + node = self.tree.formPointer + self.tree.formPointer = None + if node is None or not self.tree.elementInScope(node): + self.parser.parseError("unexpected-end-tag", + {"name": "form"}) + else: + self.tree.generateImpliedEndTags() + if self.tree.openElements[-1] != node: + self.parser.parseError("end-tag-too-early-ignored", + {"name": "form"}) + self.tree.openElements.remove(node) + + def endTagListItem(self, token): + if token["name"] == "li": + variant = "list" + else: + variant = None + if not self.tree.elementInScope(token["name"], variant=variant): + self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) + else: + self.tree.generateImpliedEndTags(exclude=token["name"]) + if self.tree.openElements[-1].name != token["name"]: + self.parser.parseError( + "end-tag-too-early", + {"name": token["name"]}) + node = self.tree.openElements.pop() + while node.name != token["name"]: + node = self.tree.openElements.pop() + + def endTagHeading(self, token): + for item in headingElements: + if self.tree.elementInScope(item): + self.tree.generateImpliedEndTags() + break + if self.tree.openElements[-1].name != token["name"]: + self.parser.parseError("end-tag-too-early", {"name": token["name"]}) + + for item in headingElements: + if self.tree.elementInScope(item): + item = self.tree.openElements.pop() + while item.name not in headingElements: + item = self.tree.openElements.pop() + break + + def endTagFormatting(self, token): + """The much-feared adoption agency algorithm""" + # http://svn.whatwg.org/webapps/complete.html#adoptionAgency revision 7867 + # XXX Better parseError messages appreciated. + + # Step 1 + outerLoopCounter = 0 + + # Step 2 + while outerLoopCounter < 8: + + # Step 3 + outerLoopCounter += 1 + + # Step 4: + + # Let the formatting element be the last element in + # the list of active formatting elements that: + # - is between the end of the list and the last scope + # marker in the list, if any, or the start of the list + # otherwise, and + # - has the same tag name as the token. + formattingElement = self.tree.elementInActiveFormattingElements( + token["name"]) + if (not formattingElement or + (formattingElement in self.tree.openElements and + not self.tree.elementInScope(formattingElement.name))): + # If there is no such node, then abort these steps + # and instead act as described in the "any other + # end tag" entry below. + self.endTagOther(token) + return + + # Otherwise, if there is such a node, but that node is + # not in the stack of open elements, then this is a + # parse error; remove the element from the list, and + # abort these steps. + elif formattingElement not in self.tree.openElements: + self.parser.parseError("adoption-agency-1.2", {"name": token["name"]}) + self.tree.activeFormattingElements.remove(formattingElement) + return + + # Otherwise, if there is such a node, and that node is + # also in the stack of open elements, but the element + # is not in scope, then this is a parse error; ignore + # the token, and abort these steps. + elif not self.tree.elementInScope(formattingElement.name): + self.parser.parseError("adoption-agency-4.4", {"name": token["name"]}) + return + + # Otherwise, there is a formatting element and that + # element is in the stack and is in scope. If the + # element is not the current node, this is a parse + # error. In any case, proceed with the algorithm as + # written in the following steps. + else: + if formattingElement != self.tree.openElements[-1]: + self.parser.parseError("adoption-agency-1.3", {"name": token["name"]}) + + # Step 5: + + # Let the furthest block be the topmost node in the + # stack of open elements that is lower in the stack + # than the formatting element, and is an element in + # the special category. There might not be one. + afeIndex = self.tree.openElements.index(formattingElement) + furthestBlock = None + for element in self.tree.openElements[afeIndex:]: + if element.nameTuple in specialElements: + furthestBlock = element + break + + # Step 6: + + # If there is no furthest block, then the UA must + # first pop all the nodes from the bottom of the stack + # of open elements, from the current node up to and + # including the formatting element, then remove the + # formatting element from the list of active + # formatting elements, and finally abort these steps. + if furthestBlock is None: + element = self.tree.openElements.pop() + while element != formattingElement: + element = self.tree.openElements.pop() + self.tree.activeFormattingElements.remove(element) + return + + # Step 7 + commonAncestor = self.tree.openElements[afeIndex - 1] + + # Step 8: + # The bookmark is supposed to help us identify where to reinsert + # nodes in step 15. We have to ensure that we reinsert nodes after + # the node before the active formatting element. Note the bookmark + # can move in step 9.7 + bookmark = self.tree.activeFormattingElements.index(formattingElement) + + # Step 9 + lastNode = node = furthestBlock + innerLoopCounter = 0 + + index = self.tree.openElements.index(node) + while innerLoopCounter < 3: + innerLoopCounter += 1 + # Node is element before node in open elements + index -= 1 + node = self.tree.openElements[index] + if node not in self.tree.activeFormattingElements: + self.tree.openElements.remove(node) + continue + # Step 9.6 + if node == formattingElement: + break + # Step 9.7 + if lastNode == furthestBlock: + bookmark = self.tree.activeFormattingElements.index(node) + 1 + # Step 9.8 + clone = node.cloneNode() + # Replace node with clone + self.tree.activeFormattingElements[ + self.tree.activeFormattingElements.index(node)] = clone + self.tree.openElements[ + self.tree.openElements.index(node)] = clone + node = clone + # Step 9.9 + # Remove lastNode from its parents, if any + if lastNode.parent: + lastNode.parent.removeChild(lastNode) + node.appendChild(lastNode) + # Step 9.10 + lastNode = node + + # Step 10 + # Foster parent lastNode if commonAncestor is a + # table, tbody, tfoot, thead, or tr we need to foster + # parent the lastNode + if lastNode.parent: + lastNode.parent.removeChild(lastNode) + + if commonAncestor.name in frozenset(("table", "tbody", "tfoot", "thead", "tr")): + parent, insertBefore = self.tree.getTableMisnestedNodePosition() + parent.insertBefore(lastNode, insertBefore) + else: + commonAncestor.appendChild(lastNode) + + # Step 11 + clone = formattingElement.cloneNode() + + # Step 12 + furthestBlock.reparentChildren(clone) + + # Step 13 + furthestBlock.appendChild(clone) + + # Step 14 + self.tree.activeFormattingElements.remove(formattingElement) + self.tree.activeFormattingElements.insert(bookmark, clone) + + # Step 15 + self.tree.openElements.remove(formattingElement) + self.tree.openElements.insert( + self.tree.openElements.index(furthestBlock) + 1, clone) + + def endTagAppletMarqueeObject(self, token): + if self.tree.elementInScope(token["name"]): + self.tree.generateImpliedEndTags() + if self.tree.openElements[-1].name != token["name"]: + self.parser.parseError("end-tag-too-early", {"name": token["name"]}) + + if self.tree.elementInScope(token["name"]): + element = self.tree.openElements.pop() + while element.name != token["name"]: + element = self.tree.openElements.pop() + self.tree.clearActiveFormattingElements() + + def endTagBr(self, token): + self.parser.parseError("unexpected-end-tag-treated-as", + {"originalName": "br", "newName": "br element"}) + self.tree.reconstructActiveFormattingElements() + self.tree.insertElement(impliedTagToken("br", "StartTag")) + self.tree.openElements.pop() + + def endTagOther(self, token): + for node in self.tree.openElements[::-1]: + if node.name == token["name"]: + self.tree.generateImpliedEndTags(exclude=token["name"]) + if self.tree.openElements[-1].name != token["name"]: + self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) + while self.tree.openElements.pop() != node: + pass + break + else: + if node.nameTuple in specialElements: + self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) + break + + class TextPhase(Phase): + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + self.startTagHandler = _utils.MethodDispatcher([]) + self.startTagHandler.default = self.startTagOther + self.endTagHandler = _utils.MethodDispatcher([ + ("script", self.endTagScript)]) + self.endTagHandler.default = self.endTagOther + + def processCharacters(self, token): + self.tree.insertText(token["data"]) + + def processEOF(self): + self.parser.parseError("expected-named-closing-tag-but-got-eof", + {"name": self.tree.openElements[-1].name}) + self.tree.openElements.pop() + self.parser.phase = self.parser.originalPhase + return True + + def startTagOther(self, token): + assert False, "Tried to process start tag %s in RCDATA/RAWTEXT mode" % token['name'] + + def endTagScript(self, token): + node = self.tree.openElements.pop() + assert node.name == "script" + self.parser.phase = self.parser.originalPhase + # The rest of this method is all stuff that only happens if + # document.write works + + def endTagOther(self, token): + self.tree.openElements.pop() + self.parser.phase = self.parser.originalPhase + + class InTablePhase(Phase): + # http://www.whatwg.org/specs/web-apps/current-work/#in-table + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + self.startTagHandler = _utils.MethodDispatcher([ + ("html", self.startTagHtml), + ("caption", self.startTagCaption), + ("colgroup", self.startTagColgroup), + ("col", self.startTagCol), + (("tbody", "tfoot", "thead"), self.startTagRowGroup), + (("td", "th", "tr"), self.startTagImplyTbody), + ("table", self.startTagTable), + (("style", "script"), self.startTagStyleScript), + ("input", self.startTagInput), + ("form", self.startTagForm) + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = _utils.MethodDispatcher([ + ("table", self.endTagTable), + (("body", "caption", "col", "colgroup", "html", "tbody", "td", + "tfoot", "th", "thead", "tr"), self.endTagIgnore) + ]) + self.endTagHandler.default = self.endTagOther + + # helper methods + def clearStackToTableContext(self): + # "clear the stack back to a table context" + while self.tree.openElements[-1].name not in ("table", "html"): + # self.parser.parseError("unexpected-implied-end-tag-in-table", + # {"name": self.tree.openElements[-1].name}) + self.tree.openElements.pop() + # When the current node is <html> it's an innerHTML case + + # processing methods + def processEOF(self): + if self.tree.openElements[-1].name != "html": + self.parser.parseError("eof-in-table") + else: + assert self.parser.innerHTML + # Stop parsing + + def processSpaceCharacters(self, token): + originalPhase = self.parser.phase + self.parser.phase = self.parser.phases["inTableText"] + self.parser.phase.originalPhase = originalPhase + self.parser.phase.processSpaceCharacters(token) + + def processCharacters(self, token): + originalPhase = self.parser.phase + self.parser.phase = self.parser.phases["inTableText"] + self.parser.phase.originalPhase = originalPhase + self.parser.phase.processCharacters(token) + + def insertText(self, token): + # If we get here there must be at least one non-whitespace character + # Do the table magic! + self.tree.insertFromTable = True + self.parser.phases["inBody"].processCharacters(token) + self.tree.insertFromTable = False + + def startTagCaption(self, token): + self.clearStackToTableContext() + self.tree.activeFormattingElements.append(Marker) + self.tree.insertElement(token) + self.parser.phase = self.parser.phases["inCaption"] + + def startTagColgroup(self, token): + self.clearStackToTableContext() + self.tree.insertElement(token) + self.parser.phase = self.parser.phases["inColumnGroup"] + + def startTagCol(self, token): + self.startTagColgroup(impliedTagToken("colgroup", "StartTag")) + return token + + def startTagRowGroup(self, token): + self.clearStackToTableContext() + self.tree.insertElement(token) + self.parser.phase = self.parser.phases["inTableBody"] + + def startTagImplyTbody(self, token): + self.startTagRowGroup(impliedTagToken("tbody", "StartTag")) + return token + + def startTagTable(self, token): + self.parser.parseError("unexpected-start-tag-implies-end-tag", + {"startName": "table", "endName": "table"}) + self.parser.phase.processEndTag(impliedTagToken("table")) + if not self.parser.innerHTML: + return token + + def startTagStyleScript(self, token): + return self.parser.phases["inHead"].processStartTag(token) + + def startTagInput(self, token): + if ("type" in token["data"] and + token["data"]["type"].translate(asciiUpper2Lower) == "hidden"): + self.parser.parseError("unexpected-hidden-input-in-table") + self.tree.insertElement(token) + # XXX associate with form + self.tree.openElements.pop() + else: + self.startTagOther(token) + + def startTagForm(self, token): + self.parser.parseError("unexpected-form-in-table") + if self.tree.formPointer is None: + self.tree.insertElement(token) + self.tree.formPointer = self.tree.openElements[-1] + self.tree.openElements.pop() + + def startTagOther(self, token): + self.parser.parseError("unexpected-start-tag-implies-table-voodoo", {"name": token["name"]}) + # Do the table magic! + self.tree.insertFromTable = True + self.parser.phases["inBody"].processStartTag(token) + self.tree.insertFromTable = False + + def endTagTable(self, token): + if self.tree.elementInScope("table", variant="table"): + self.tree.generateImpliedEndTags() + if self.tree.openElements[-1].name != "table": + self.parser.parseError("end-tag-too-early-named", + {"gotName": "table", + "expectedName": self.tree.openElements[-1].name}) + while self.tree.openElements[-1].name != "table": + self.tree.openElements.pop() + self.tree.openElements.pop() + self.parser.resetInsertionMode() + else: + # innerHTML case + assert self.parser.innerHTML + self.parser.parseError() + + def endTagIgnore(self, token): + self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) + + def endTagOther(self, token): + self.parser.parseError("unexpected-end-tag-implies-table-voodoo", {"name": token["name"]}) + # Do the table magic! + self.tree.insertFromTable = True + self.parser.phases["inBody"].processEndTag(token) + self.tree.insertFromTable = False + + class InTableTextPhase(Phase): + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + self.originalPhase = None + self.characterTokens = [] + + def flushCharacters(self): + data = "".join([item["data"] for item in self.characterTokens]) + if any([item not in spaceCharacters for item in data]): + token = {"type": tokenTypes["Characters"], "data": data} + self.parser.phases["inTable"].insertText(token) + elif data: + self.tree.insertText(data) + self.characterTokens = [] + + def processComment(self, token): + self.flushCharacters() + self.parser.phase = self.originalPhase + return token + + def processEOF(self): + self.flushCharacters() + self.parser.phase = self.originalPhase + return True + + def processCharacters(self, token): + if token["data"] == "\u0000": + return + self.characterTokens.append(token) + + def processSpaceCharacters(self, token): + # pretty sure we should never reach here + self.characterTokens.append(token) + # assert False + + def processStartTag(self, token): + self.flushCharacters() + self.parser.phase = self.originalPhase + return token + + def processEndTag(self, token): + self.flushCharacters() + self.parser.phase = self.originalPhase + return token + + class InCaptionPhase(Phase): + # http://www.whatwg.org/specs/web-apps/current-work/#in-caption + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = _utils.MethodDispatcher([ + ("html", self.startTagHtml), + (("caption", "col", "colgroup", "tbody", "td", "tfoot", "th", + "thead", "tr"), self.startTagTableElement) + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = _utils.MethodDispatcher([ + ("caption", self.endTagCaption), + ("table", self.endTagTable), + (("body", "col", "colgroup", "html", "tbody", "td", "tfoot", "th", + "thead", "tr"), self.endTagIgnore) + ]) + self.endTagHandler.default = self.endTagOther + + def ignoreEndTagCaption(self): + return not self.tree.elementInScope("caption", variant="table") + + def processEOF(self): + self.parser.phases["inBody"].processEOF() + + def processCharacters(self, token): + return self.parser.phases["inBody"].processCharacters(token) + + def startTagTableElement(self, token): + self.parser.parseError() + # XXX Have to duplicate logic here to find out if the tag is ignored + ignoreEndTag = self.ignoreEndTagCaption() + self.parser.phase.processEndTag(impliedTagToken("caption")) + if not ignoreEndTag: + return token + + def startTagOther(self, token): + return self.parser.phases["inBody"].processStartTag(token) + + def endTagCaption(self, token): + if not self.ignoreEndTagCaption(): + # AT this code is quite similar to endTagTable in "InTable" + self.tree.generateImpliedEndTags() + if self.tree.openElements[-1].name != "caption": + self.parser.parseError("expected-one-end-tag-but-got-another", + {"gotName": "caption", + "expectedName": self.tree.openElements[-1].name}) + while self.tree.openElements[-1].name != "caption": + self.tree.openElements.pop() + self.tree.openElements.pop() + self.tree.clearActiveFormattingElements() + self.parser.phase = self.parser.phases["inTable"] + else: + # innerHTML case + assert self.parser.innerHTML + self.parser.parseError() + + def endTagTable(self, token): + self.parser.parseError() + ignoreEndTag = self.ignoreEndTagCaption() + self.parser.phase.processEndTag(impliedTagToken("caption")) + if not ignoreEndTag: + return token + + def endTagIgnore(self, token): + self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) + + def endTagOther(self, token): + return self.parser.phases["inBody"].processEndTag(token) + + class InColumnGroupPhase(Phase): + # http://www.whatwg.org/specs/web-apps/current-work/#in-column + + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = _utils.MethodDispatcher([ + ("html", self.startTagHtml), + ("col", self.startTagCol) + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = _utils.MethodDispatcher([ + ("colgroup", self.endTagColgroup), + ("col", self.endTagCol) + ]) + self.endTagHandler.default = self.endTagOther + + def ignoreEndTagColgroup(self): + return self.tree.openElements[-1].name == "html" + + def processEOF(self): + if self.tree.openElements[-1].name == "html": + assert self.parser.innerHTML + return + else: + ignoreEndTag = self.ignoreEndTagColgroup() + self.endTagColgroup(impliedTagToken("colgroup")) + if not ignoreEndTag: + return True + + def processCharacters(self, token): + ignoreEndTag = self.ignoreEndTagColgroup() + self.endTagColgroup(impliedTagToken("colgroup")) + if not ignoreEndTag: + return token + + def startTagCol(self, token): + self.tree.insertElement(token) + self.tree.openElements.pop() + token["selfClosingAcknowledged"] = True + + def startTagOther(self, token): + ignoreEndTag = self.ignoreEndTagColgroup() + self.endTagColgroup(impliedTagToken("colgroup")) + if not ignoreEndTag: + return token + + def endTagColgroup(self, token): + if self.ignoreEndTagColgroup(): + # innerHTML case + assert self.parser.innerHTML + self.parser.parseError() + else: + self.tree.openElements.pop() + self.parser.phase = self.parser.phases["inTable"] + + def endTagCol(self, token): + self.parser.parseError("no-end-tag", {"name": "col"}) + + def endTagOther(self, token): + ignoreEndTag = self.ignoreEndTagColgroup() + self.endTagColgroup(impliedTagToken("colgroup")) + if not ignoreEndTag: + return token + + class InTableBodyPhase(Phase): + # http://www.whatwg.org/specs/web-apps/current-work/#in-table0 + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + self.startTagHandler = _utils.MethodDispatcher([ + ("html", self.startTagHtml), + ("tr", self.startTagTr), + (("td", "th"), self.startTagTableCell), + (("caption", "col", "colgroup", "tbody", "tfoot", "thead"), + self.startTagTableOther) + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = _utils.MethodDispatcher([ + (("tbody", "tfoot", "thead"), self.endTagTableRowGroup), + ("table", self.endTagTable), + (("body", "caption", "col", "colgroup", "html", "td", "th", + "tr"), self.endTagIgnore) + ]) + self.endTagHandler.default = self.endTagOther + + # helper methods + def clearStackToTableBodyContext(self): + while self.tree.openElements[-1].name not in ("tbody", "tfoot", + "thead", "html"): + # self.parser.parseError("unexpected-implied-end-tag-in-table", + # {"name": self.tree.openElements[-1].name}) + self.tree.openElements.pop() + if self.tree.openElements[-1].name == "html": + assert self.parser.innerHTML + + # the rest + def processEOF(self): + self.parser.phases["inTable"].processEOF() + + def processSpaceCharacters(self, token): + return self.parser.phases["inTable"].processSpaceCharacters(token) + + def processCharacters(self, token): + return self.parser.phases["inTable"].processCharacters(token) + + def startTagTr(self, token): + self.clearStackToTableBodyContext() + self.tree.insertElement(token) + self.parser.phase = self.parser.phases["inRow"] + + def startTagTableCell(self, token): + self.parser.parseError("unexpected-cell-in-table-body", + {"name": token["name"]}) + self.startTagTr(impliedTagToken("tr", "StartTag")) + return token + + def startTagTableOther(self, token): + # XXX AT Any ideas on how to share this with endTagTable? + if (self.tree.elementInScope("tbody", variant="table") or + self.tree.elementInScope("thead", variant="table") or + self.tree.elementInScope("tfoot", variant="table")): + self.clearStackToTableBodyContext() + self.endTagTableRowGroup( + impliedTagToken(self.tree.openElements[-1].name)) + return token + else: + # innerHTML case + assert self.parser.innerHTML + self.parser.parseError() + + def startTagOther(self, token): + return self.parser.phases["inTable"].processStartTag(token) + + def endTagTableRowGroup(self, token): + if self.tree.elementInScope(token["name"], variant="table"): + self.clearStackToTableBodyContext() + self.tree.openElements.pop() + self.parser.phase = self.parser.phases["inTable"] + else: + self.parser.parseError("unexpected-end-tag-in-table-body", + {"name": token["name"]}) + + def endTagTable(self, token): + if (self.tree.elementInScope("tbody", variant="table") or + self.tree.elementInScope("thead", variant="table") or + self.tree.elementInScope("tfoot", variant="table")): + self.clearStackToTableBodyContext() + self.endTagTableRowGroup( + impliedTagToken(self.tree.openElements[-1].name)) + return token + else: + # innerHTML case + assert self.parser.innerHTML + self.parser.parseError() + + def endTagIgnore(self, token): + self.parser.parseError("unexpected-end-tag-in-table-body", + {"name": token["name"]}) + + def endTagOther(self, token): + return self.parser.phases["inTable"].processEndTag(token) + + class InRowPhase(Phase): + # http://www.whatwg.org/specs/web-apps/current-work/#in-row + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + self.startTagHandler = _utils.MethodDispatcher([ + ("html", self.startTagHtml), + (("td", "th"), self.startTagTableCell), + (("caption", "col", "colgroup", "tbody", "tfoot", "thead", + "tr"), self.startTagTableOther) + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = _utils.MethodDispatcher([ + ("tr", self.endTagTr), + ("table", self.endTagTable), + (("tbody", "tfoot", "thead"), self.endTagTableRowGroup), + (("body", "caption", "col", "colgroup", "html", "td", "th"), + self.endTagIgnore) + ]) + self.endTagHandler.default = self.endTagOther + + # helper methods (XXX unify this with other table helper methods) + def clearStackToTableRowContext(self): + while self.tree.openElements[-1].name not in ("tr", "html"): + self.parser.parseError("unexpected-implied-end-tag-in-table-row", + {"name": self.tree.openElements[-1].name}) + self.tree.openElements.pop() + + def ignoreEndTagTr(self): + return not self.tree.elementInScope("tr", variant="table") + + # the rest + def processEOF(self): + self.parser.phases["inTable"].processEOF() + + def processSpaceCharacters(self, token): + return self.parser.phases["inTable"].processSpaceCharacters(token) + + def processCharacters(self, token): + return self.parser.phases["inTable"].processCharacters(token) + + def startTagTableCell(self, token): + self.clearStackToTableRowContext() + self.tree.insertElement(token) + self.parser.phase = self.parser.phases["inCell"] + self.tree.activeFormattingElements.append(Marker) + + def startTagTableOther(self, token): + ignoreEndTag = self.ignoreEndTagTr() + self.endTagTr(impliedTagToken("tr")) + # XXX how are we sure it's always ignored in the innerHTML case? + if not ignoreEndTag: + return token + + def startTagOther(self, token): + return self.parser.phases["inTable"].processStartTag(token) + + def endTagTr(self, token): + if not self.ignoreEndTagTr(): + self.clearStackToTableRowContext() + self.tree.openElements.pop() + self.parser.phase = self.parser.phases["inTableBody"] + else: + # innerHTML case + assert self.parser.innerHTML + self.parser.parseError() + + def endTagTable(self, token): + ignoreEndTag = self.ignoreEndTagTr() + self.endTagTr(impliedTagToken("tr")) + # Reprocess the current tag if the tr end tag was not ignored + # XXX how are we sure it's always ignored in the innerHTML case? + if not ignoreEndTag: + return token + + def endTagTableRowGroup(self, token): + if self.tree.elementInScope(token["name"], variant="table"): + self.endTagTr(impliedTagToken("tr")) + return token + else: + self.parser.parseError() + + def endTagIgnore(self, token): + self.parser.parseError("unexpected-end-tag-in-table-row", + {"name": token["name"]}) + + def endTagOther(self, token): + return self.parser.phases["inTable"].processEndTag(token) + + class InCellPhase(Phase): + # http://www.whatwg.org/specs/web-apps/current-work/#in-cell + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + self.startTagHandler = _utils.MethodDispatcher([ + ("html", self.startTagHtml), + (("caption", "col", "colgroup", "tbody", "td", "tfoot", "th", + "thead", "tr"), self.startTagTableOther) + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = _utils.MethodDispatcher([ + (("td", "th"), self.endTagTableCell), + (("body", "caption", "col", "colgroup", "html"), self.endTagIgnore), + (("table", "tbody", "tfoot", "thead", "tr"), self.endTagImply) + ]) + self.endTagHandler.default = self.endTagOther + + # helper + def closeCell(self): + if self.tree.elementInScope("td", variant="table"): + self.endTagTableCell(impliedTagToken("td")) + elif self.tree.elementInScope("th", variant="table"): + self.endTagTableCell(impliedTagToken("th")) + + # the rest + def processEOF(self): + self.parser.phases["inBody"].processEOF() + + def processCharacters(self, token): + return self.parser.phases["inBody"].processCharacters(token) + + def startTagTableOther(self, token): + if (self.tree.elementInScope("td", variant="table") or + self.tree.elementInScope("th", variant="table")): + self.closeCell() + return token + else: + # innerHTML case + assert self.parser.innerHTML + self.parser.parseError() + + def startTagOther(self, token): + return self.parser.phases["inBody"].processStartTag(token) + + def endTagTableCell(self, token): + if self.tree.elementInScope(token["name"], variant="table"): + self.tree.generateImpliedEndTags(token["name"]) + if self.tree.openElements[-1].name != token["name"]: + self.parser.parseError("unexpected-cell-end-tag", + {"name": token["name"]}) + while True: + node = self.tree.openElements.pop() + if node.name == token["name"]: + break + else: + self.tree.openElements.pop() + self.tree.clearActiveFormattingElements() + self.parser.phase = self.parser.phases["inRow"] + else: + self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) + + def endTagIgnore(self, token): + self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) + + def endTagImply(self, token): + if self.tree.elementInScope(token["name"], variant="table"): + self.closeCell() + return token + else: + # sometimes innerHTML case + self.parser.parseError() + + def endTagOther(self, token): + return self.parser.phases["inBody"].processEndTag(token) + + class InSelectPhase(Phase): + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = _utils.MethodDispatcher([ + ("html", self.startTagHtml), + ("option", self.startTagOption), + ("optgroup", self.startTagOptgroup), + ("select", self.startTagSelect), + (("input", "keygen", "textarea"), self.startTagInput), + ("script", self.startTagScript) + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = _utils.MethodDispatcher([ + ("option", self.endTagOption), + ("optgroup", self.endTagOptgroup), + ("select", self.endTagSelect) + ]) + self.endTagHandler.default = self.endTagOther + + # http://www.whatwg.org/specs/web-apps/current-work/#in-select + def processEOF(self): + if self.tree.openElements[-1].name != "html": + self.parser.parseError("eof-in-select") + else: + assert self.parser.innerHTML + + def processCharacters(self, token): + if token["data"] == "\u0000": + return + self.tree.insertText(token["data"]) + + def startTagOption(self, token): + # We need to imply </option> if <option> is the current node. + if self.tree.openElements[-1].name == "option": + self.tree.openElements.pop() + self.tree.insertElement(token) + + def startTagOptgroup(self, token): + if self.tree.openElements[-1].name == "option": + self.tree.openElements.pop() + if self.tree.openElements[-1].name == "optgroup": + self.tree.openElements.pop() + self.tree.insertElement(token) + + def startTagSelect(self, token): + self.parser.parseError("unexpected-select-in-select") + self.endTagSelect(impliedTagToken("select")) + + def startTagInput(self, token): + self.parser.parseError("unexpected-input-in-select") + if self.tree.elementInScope("select", variant="select"): + self.endTagSelect(impliedTagToken("select")) + return token + else: + assert self.parser.innerHTML + + def startTagScript(self, token): + return self.parser.phases["inHead"].processStartTag(token) + + def startTagOther(self, token): + self.parser.parseError("unexpected-start-tag-in-select", + {"name": token["name"]}) + + def endTagOption(self, token): + if self.tree.openElements[-1].name == "option": + self.tree.openElements.pop() + else: + self.parser.parseError("unexpected-end-tag-in-select", + {"name": "option"}) + + def endTagOptgroup(self, token): + # </optgroup> implicitly closes <option> + if (self.tree.openElements[-1].name == "option" and + self.tree.openElements[-2].name == "optgroup"): + self.tree.openElements.pop() + # It also closes </optgroup> + if self.tree.openElements[-1].name == "optgroup": + self.tree.openElements.pop() + # But nothing else + else: + self.parser.parseError("unexpected-end-tag-in-select", + {"name": "optgroup"}) + + def endTagSelect(self, token): + if self.tree.elementInScope("select", variant="select"): + node = self.tree.openElements.pop() + while node.name != "select": + node = self.tree.openElements.pop() + self.parser.resetInsertionMode() + else: + # innerHTML case + assert self.parser.innerHTML + self.parser.parseError() + + def endTagOther(self, token): + self.parser.parseError("unexpected-end-tag-in-select", + {"name": token["name"]}) + + class InSelectInTablePhase(Phase): + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = _utils.MethodDispatcher([ + (("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"), + self.startTagTable) + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = _utils.MethodDispatcher([ + (("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"), + self.endTagTable) + ]) + self.endTagHandler.default = self.endTagOther + + def processEOF(self): + self.parser.phases["inSelect"].processEOF() + + def processCharacters(self, token): + return self.parser.phases["inSelect"].processCharacters(token) + + def startTagTable(self, token): + self.parser.parseError("unexpected-table-element-start-tag-in-select-in-table", {"name": token["name"]}) + self.endTagOther(impliedTagToken("select")) + return token + + def startTagOther(self, token): + return self.parser.phases["inSelect"].processStartTag(token) + + def endTagTable(self, token): + self.parser.parseError("unexpected-table-element-end-tag-in-select-in-table", {"name": token["name"]}) + if self.tree.elementInScope(token["name"], variant="table"): + self.endTagOther(impliedTagToken("select")) + return token + + def endTagOther(self, token): + return self.parser.phases["inSelect"].processEndTag(token) + + class InForeignContentPhase(Phase): + breakoutElements = frozenset(["b", "big", "blockquote", "body", "br", + "center", "code", "dd", "div", "dl", "dt", + "em", "embed", "h1", "h2", "h3", + "h4", "h5", "h6", "head", "hr", "i", "img", + "li", "listing", "menu", "meta", "nobr", + "ol", "p", "pre", "ruby", "s", "small", + "span", "strong", "strike", "sub", "sup", + "table", "tt", "u", "ul", "var"]) + + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + def adjustSVGTagNames(self, token): + replacements = {"altglyph": "altGlyph", + "altglyphdef": "altGlyphDef", + "altglyphitem": "altGlyphItem", + "animatecolor": "animateColor", + "animatemotion": "animateMotion", + "animatetransform": "animateTransform", + "clippath": "clipPath", + "feblend": "feBlend", + "fecolormatrix": "feColorMatrix", + "fecomponenttransfer": "feComponentTransfer", + "fecomposite": "feComposite", + "feconvolvematrix": "feConvolveMatrix", + "fediffuselighting": "feDiffuseLighting", + "fedisplacementmap": "feDisplacementMap", + "fedistantlight": "feDistantLight", + "feflood": "feFlood", + "fefunca": "feFuncA", + "fefuncb": "feFuncB", + "fefuncg": "feFuncG", + "fefuncr": "feFuncR", + "fegaussianblur": "feGaussianBlur", + "feimage": "feImage", + "femerge": "feMerge", + "femergenode": "feMergeNode", + "femorphology": "feMorphology", + "feoffset": "feOffset", + "fepointlight": "fePointLight", + "fespecularlighting": "feSpecularLighting", + "fespotlight": "feSpotLight", + "fetile": "feTile", + "feturbulence": "feTurbulence", + "foreignobject": "foreignObject", + "glyphref": "glyphRef", + "lineargradient": "linearGradient", + "radialgradient": "radialGradient", + "textpath": "textPath"} + + if token["name"] in replacements: + token["name"] = replacements[token["name"]] + + def processCharacters(self, token): + if token["data"] == "\u0000": + token["data"] = "\uFFFD" + elif (self.parser.framesetOK and + any(char not in spaceCharacters for char in token["data"])): + self.parser.framesetOK = False + Phase.processCharacters(self, token) + + def processStartTag(self, token): + currentNode = self.tree.openElements[-1] + if (token["name"] in self.breakoutElements or + (token["name"] == "font" and + set(token["data"].keys()) & set(["color", "face", "size"]))): + self.parser.parseError("unexpected-html-element-in-foreign-content", + {"name": token["name"]}) + while (self.tree.openElements[-1].namespace != + self.tree.defaultNamespace and + not self.parser.isHTMLIntegrationPoint(self.tree.openElements[-1]) and + not self.parser.isMathMLTextIntegrationPoint(self.tree.openElements[-1])): + self.tree.openElements.pop() + return token + + else: + if currentNode.namespace == namespaces["mathml"]: + self.parser.adjustMathMLAttributes(token) + elif currentNode.namespace == namespaces["svg"]: + self.adjustSVGTagNames(token) + self.parser.adjustSVGAttributes(token) + self.parser.adjustForeignAttributes(token) + token["namespace"] = currentNode.namespace + self.tree.insertElement(token) + if token["selfClosing"]: + self.tree.openElements.pop() + token["selfClosingAcknowledged"] = True + + def processEndTag(self, token): + nodeIndex = len(self.tree.openElements) - 1 + node = self.tree.openElements[-1] + if node.name.translate(asciiUpper2Lower) != token["name"]: + self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) + + while True: + if node.name.translate(asciiUpper2Lower) == token["name"]: + # XXX this isn't in the spec but it seems necessary + if self.parser.phase == self.parser.phases["inTableText"]: + self.parser.phase.flushCharacters() + self.parser.phase = self.parser.phase.originalPhase + while self.tree.openElements.pop() != node: + assert self.tree.openElements + new_token = None + break + nodeIndex -= 1 + + node = self.tree.openElements[nodeIndex] + if node.namespace != self.tree.defaultNamespace: + continue + else: + new_token = self.parser.phase.processEndTag(token) + break + return new_token + + class AfterBodyPhase(Phase): + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = _utils.MethodDispatcher([ + ("html", self.startTagHtml) + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = _utils.MethodDispatcher([("html", self.endTagHtml)]) + self.endTagHandler.default = self.endTagOther + + def processEOF(self): + # Stop parsing + pass + + def processComment(self, token): + # This is needed because data is to be appended to the <html> element + # here and not to whatever is currently open. + self.tree.insertComment(token, self.tree.openElements[0]) + + def processCharacters(self, token): + self.parser.parseError("unexpected-char-after-body") + self.parser.phase = self.parser.phases["inBody"] + return token + + def startTagHtml(self, token): + return self.parser.phases["inBody"].processStartTag(token) + + def startTagOther(self, token): + self.parser.parseError("unexpected-start-tag-after-body", + {"name": token["name"]}) + self.parser.phase = self.parser.phases["inBody"] + return token + + def endTagHtml(self, name): + if self.parser.innerHTML: + self.parser.parseError("unexpected-end-tag-after-body-innerhtml") + else: + self.parser.phase = self.parser.phases["afterAfterBody"] + + def endTagOther(self, token): + self.parser.parseError("unexpected-end-tag-after-body", + {"name": token["name"]}) + self.parser.phase = self.parser.phases["inBody"] + return token + + class InFramesetPhase(Phase): + # http://www.whatwg.org/specs/web-apps/current-work/#in-frameset + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = _utils.MethodDispatcher([ + ("html", self.startTagHtml), + ("frameset", self.startTagFrameset), + ("frame", self.startTagFrame), + ("noframes", self.startTagNoframes) + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = _utils.MethodDispatcher([ + ("frameset", self.endTagFrameset) + ]) + self.endTagHandler.default = self.endTagOther + + def processEOF(self): + if self.tree.openElements[-1].name != "html": + self.parser.parseError("eof-in-frameset") + else: + assert self.parser.innerHTML + + def processCharacters(self, token): + self.parser.parseError("unexpected-char-in-frameset") + + def startTagFrameset(self, token): + self.tree.insertElement(token) + + def startTagFrame(self, token): + self.tree.insertElement(token) + self.tree.openElements.pop() + + def startTagNoframes(self, token): + return self.parser.phases["inBody"].processStartTag(token) + + def startTagOther(self, token): + self.parser.parseError("unexpected-start-tag-in-frameset", + {"name": token["name"]}) + + def endTagFrameset(self, token): + if self.tree.openElements[-1].name == "html": + # innerHTML case + self.parser.parseError("unexpected-frameset-in-frameset-innerhtml") + else: + self.tree.openElements.pop() + if (not self.parser.innerHTML and + self.tree.openElements[-1].name != "frameset"): + # If we're not in innerHTML mode and the current node is not a + # "frameset" element (anymore) then switch. + self.parser.phase = self.parser.phases["afterFrameset"] + + def endTagOther(self, token): + self.parser.parseError("unexpected-end-tag-in-frameset", + {"name": token["name"]}) + + class AfterFramesetPhase(Phase): + # http://www.whatwg.org/specs/web-apps/current-work/#after3 + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = _utils.MethodDispatcher([ + ("html", self.startTagHtml), + ("noframes", self.startTagNoframes) + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = _utils.MethodDispatcher([ + ("html", self.endTagHtml) + ]) + self.endTagHandler.default = self.endTagOther + + def processEOF(self): + # Stop parsing + pass + + def processCharacters(self, token): + self.parser.parseError("unexpected-char-after-frameset") + + def startTagNoframes(self, token): + return self.parser.phases["inHead"].processStartTag(token) + + def startTagOther(self, token): + self.parser.parseError("unexpected-start-tag-after-frameset", + {"name": token["name"]}) + + def endTagHtml(self, token): + self.parser.phase = self.parser.phases["afterAfterFrameset"] + + def endTagOther(self, token): + self.parser.parseError("unexpected-end-tag-after-frameset", + {"name": token["name"]}) + + class AfterAfterBodyPhase(Phase): + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = _utils.MethodDispatcher([ + ("html", self.startTagHtml) + ]) + self.startTagHandler.default = self.startTagOther + + def processEOF(self): + pass + + def processComment(self, token): + self.tree.insertComment(token, self.tree.document) + + def processSpaceCharacters(self, token): + return self.parser.phases["inBody"].processSpaceCharacters(token) + + def processCharacters(self, token): + self.parser.parseError("expected-eof-but-got-char") + self.parser.phase = self.parser.phases["inBody"] + return token + + def startTagHtml(self, token): + return self.parser.phases["inBody"].processStartTag(token) + + def startTagOther(self, token): + self.parser.parseError("expected-eof-but-got-start-tag", + {"name": token["name"]}) + self.parser.phase = self.parser.phases["inBody"] + return token + + def processEndTag(self, token): + self.parser.parseError("expected-eof-but-got-end-tag", + {"name": token["name"]}) + self.parser.phase = self.parser.phases["inBody"] + return token + + class AfterAfterFramesetPhase(Phase): + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = _utils.MethodDispatcher([ + ("html", self.startTagHtml), + ("noframes", self.startTagNoFrames) + ]) + self.startTagHandler.default = self.startTagOther + + def processEOF(self): + pass + + def processComment(self, token): + self.tree.insertComment(token, self.tree.document) + + def processSpaceCharacters(self, token): + return self.parser.phases["inBody"].processSpaceCharacters(token) + + def processCharacters(self, token): + self.parser.parseError("expected-eof-but-got-char") + + def startTagHtml(self, token): + return self.parser.phases["inBody"].processStartTag(token) + + def startTagNoFrames(self, token): + return self.parser.phases["inHead"].processStartTag(token) + + def startTagOther(self, token): + self.parser.parseError("expected-eof-but-got-start-tag", + {"name": token["name"]}) + + def processEndTag(self, token): + self.parser.parseError("expected-eof-but-got-end-tag", + {"name": token["name"]}) + # pylint:enable=unused-argument + + return { + "initial": InitialPhase, + "beforeHtml": BeforeHtmlPhase, + "beforeHead": BeforeHeadPhase, + "inHead": InHeadPhase, + "inHeadNoscript": InHeadNoscriptPhase, + "afterHead": AfterHeadPhase, + "inBody": InBodyPhase, + "text": TextPhase, + "inTable": InTablePhase, + "inTableText": InTableTextPhase, + "inCaption": InCaptionPhase, + "inColumnGroup": InColumnGroupPhase, + "inTableBody": InTableBodyPhase, + "inRow": InRowPhase, + "inCell": InCellPhase, + "inSelect": InSelectPhase, + "inSelectInTable": InSelectInTablePhase, + "inForeignContent": InForeignContentPhase, + "afterBody": AfterBodyPhase, + "inFrameset": InFramesetPhase, + "afterFrameset": AfterFramesetPhase, + "afterAfterBody": AfterAfterBodyPhase, + "afterAfterFrameset": AfterAfterFramesetPhase, + # XXX after after frameset + } + + +def adjust_attributes(token, replacements): + needs_adjustment = viewkeys(token['data']) & viewkeys(replacements) + if needs_adjustment: + token['data'] = OrderedDict((replacements.get(k, k), v) + for k, v in token['data'].items()) + + +def impliedTagToken(name, type="EndTag", attributes=None, + selfClosing=False): + if attributes is None: + attributes = {} + return {"type": tokenTypes[type], "name": name, "data": attributes, + "selfClosing": selfClosing} + + +class ParseError(Exception): + """Error in parsed document""" + pass diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyc new file mode 100644 index 0000000..39c3168 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/serializer.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/serializer.py new file mode 100644 index 0000000..53f4d44 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/serializer.py @@ -0,0 +1,409 @@ +from __future__ import absolute_import, division, unicode_literals +from pip._vendor.six import text_type + +import re + +from codecs import register_error, xmlcharrefreplace_errors + +from .constants import voidElements, booleanAttributes, spaceCharacters +from .constants import rcdataElements, entities, xmlEntities +from . import treewalkers, _utils +from xml.sax.saxutils import escape + +_quoteAttributeSpecChars = "".join(spaceCharacters) + "\"'=<>`" +_quoteAttributeSpec = re.compile("[" + _quoteAttributeSpecChars + "]") +_quoteAttributeLegacy = re.compile("[" + _quoteAttributeSpecChars + + "\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n" + "\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15" + "\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x2f\x60\xa0\u1680\u180e\u180f\u2000" + "\u2001\u2002\u2003\u2004\u2005\u2006\u2007" + "\u2008\u2009\u200a\u2028\u2029\u202f\u205f" + "\u3000]") + + +_encode_entity_map = {} +_is_ucs4 = len("\U0010FFFF") == 1 +for k, v in list(entities.items()): + # skip multi-character entities + if ((_is_ucs4 and len(v) > 1) or + (not _is_ucs4 and len(v) > 2)): + continue + if v != "&": + if len(v) == 2: + v = _utils.surrogatePairToCodepoint(v) + else: + v = ord(v) + if v not in _encode_entity_map or k.islower(): + # prefer < over < and similarly for &, >, etc. + _encode_entity_map[v] = k + + +def htmlentityreplace_errors(exc): + if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)): + res = [] + codepoints = [] + skip = False + for i, c in enumerate(exc.object[exc.start:exc.end]): + if skip: + skip = False + continue + index = i + exc.start + if _utils.isSurrogatePair(exc.object[index:min([exc.end, index + 2])]): + codepoint = _utils.surrogatePairToCodepoint(exc.object[index:index + 2]) + skip = True + else: + codepoint = ord(c) + codepoints.append(codepoint) + for cp in codepoints: + e = _encode_entity_map.get(cp) + if e: + res.append("&") + res.append(e) + if not e.endswith(";"): + res.append(";") + else: + res.append("&#x%s;" % (hex(cp)[2:])) + return ("".join(res), exc.end) + else: + return xmlcharrefreplace_errors(exc) + + +register_error("htmlentityreplace", htmlentityreplace_errors) + + +def serialize(input, tree="etree", encoding=None, **serializer_opts): + """Serializes the input token stream using the specified treewalker + + :arg input: the token stream to serialize + + :arg tree: the treewalker to use + + :arg encoding: the encoding to use + + :arg serializer_opts: any options to pass to the + :py:class:`html5lib.serializer.HTMLSerializer` that gets created + + :returns: the tree serialized as a string + + Example: + + >>> from html5lib.html5parser import parse + >>> from html5lib.serializer import serialize + >>> token_stream = parse('<html><body><p>Hi!</p></body></html>') + >>> serialize(token_stream, omit_optional_tags=False) + '<html><head></head><body><p>Hi!</p></body></html>' + + """ + # XXX: Should we cache this? + walker = treewalkers.getTreeWalker(tree) + s = HTMLSerializer(**serializer_opts) + return s.render(walker(input), encoding) + + +class HTMLSerializer(object): + + # attribute quoting options + quote_attr_values = "legacy" # be secure by default + quote_char = '"' + use_best_quote_char = True + + # tag syntax options + omit_optional_tags = True + minimize_boolean_attributes = True + use_trailing_solidus = False + space_before_trailing_solidus = True + + # escaping options + escape_lt_in_attrs = False + escape_rcdata = False + resolve_entities = True + + # miscellaneous options + alphabetical_attributes = False + inject_meta_charset = True + strip_whitespace = False + sanitize = False + + options = ("quote_attr_values", "quote_char", "use_best_quote_char", + "omit_optional_tags", "minimize_boolean_attributes", + "use_trailing_solidus", "space_before_trailing_solidus", + "escape_lt_in_attrs", "escape_rcdata", "resolve_entities", + "alphabetical_attributes", "inject_meta_charset", + "strip_whitespace", "sanitize") + + def __init__(self, **kwargs): + """Initialize HTMLSerializer + + :arg inject_meta_charset: Whether or not to inject the meta charset. + + Defaults to ``True``. + + :arg quote_attr_values: Whether to quote attribute values that don't + require quoting per legacy browser behavior (``"legacy"``), when + required by the standard (``"spec"``), or always (``"always"``). + + Defaults to ``"legacy"``. + + :arg quote_char: Use given quote character for attribute quoting. + + Defaults to ``"`` which will use double quotes unless attribute + value contains a double quote, in which case single quotes are + used. + + :arg escape_lt_in_attrs: Whether or not to escape ``<`` in attribute + values. + + Defaults to ``False``. + + :arg escape_rcdata: Whether to escape characters that need to be + escaped within normal elements within rcdata elements such as + style. + + Defaults to ``False``. + + :arg resolve_entities: Whether to resolve named character entities that + appear in the source tree. The XML predefined entities < > + & " ' are unaffected by this setting. + + Defaults to ``True``. + + :arg strip_whitespace: Whether to remove semantically meaningless + whitespace. (This compresses all whitespace to a single space + except within ``pre``.) + + Defaults to ``False``. + + :arg minimize_boolean_attributes: Shortens boolean attributes to give + just the attribute value, for example:: + + <input disabled="disabled"> + + becomes:: + + <input disabled> + + Defaults to ``True``. + + :arg use_trailing_solidus: Includes a close-tag slash at the end of the + start tag of void elements (empty elements whose end tag is + forbidden). E.g. ``<hr/>``. + + Defaults to ``False``. + + :arg space_before_trailing_solidus: Places a space immediately before + the closing slash in a tag using a trailing solidus. E.g. + ``<hr />``. Requires ``use_trailing_solidus=True``. + + Defaults to ``True``. + + :arg sanitize: Strip all unsafe or unknown constructs from output. + See :py:class:`html5lib.filters.sanitizer.Filter`. + + Defaults to ``False``. + + :arg omit_optional_tags: Omit start/end tags that are optional. + + Defaults to ``True``. + + :arg alphabetical_attributes: Reorder attributes to be in alphabetical order. + + Defaults to ``False``. + + """ + unexpected_args = frozenset(kwargs) - frozenset(self.options) + if len(unexpected_args) > 0: + raise TypeError("__init__() got an unexpected keyword argument '%s'" % next(iter(unexpected_args))) + if 'quote_char' in kwargs: + self.use_best_quote_char = False + for attr in self.options: + setattr(self, attr, kwargs.get(attr, getattr(self, attr))) + self.errors = [] + self.strict = False + + def encode(self, string): + assert(isinstance(string, text_type)) + if self.encoding: + return string.encode(self.encoding, "htmlentityreplace") + else: + return string + + def encodeStrict(self, string): + assert(isinstance(string, text_type)) + if self.encoding: + return string.encode(self.encoding, "strict") + else: + return string + + def serialize(self, treewalker, encoding=None): + # pylint:disable=too-many-nested-blocks + self.encoding = encoding + in_cdata = False + self.errors = [] + + if encoding and self.inject_meta_charset: + from .filters.inject_meta_charset import Filter + treewalker = Filter(treewalker, encoding) + # Alphabetical attributes is here under the assumption that none of + # the later filters add or change order of attributes; it needs to be + # before the sanitizer so escaped elements come out correctly + if self.alphabetical_attributes: + from .filters.alphabeticalattributes import Filter + treewalker = Filter(treewalker) + # WhitespaceFilter should be used before OptionalTagFilter + # for maximum efficiently of this latter filter + if self.strip_whitespace: + from .filters.whitespace import Filter + treewalker = Filter(treewalker) + if self.sanitize: + from .filters.sanitizer import Filter + treewalker = Filter(treewalker) + if self.omit_optional_tags: + from .filters.optionaltags import Filter + treewalker = Filter(treewalker) + + for token in treewalker: + type = token["type"] + if type == "Doctype": + doctype = "<!DOCTYPE %s" % token["name"] + + if token["publicId"]: + doctype += ' PUBLIC "%s"' % token["publicId"] + elif token["systemId"]: + doctype += " SYSTEM" + if token["systemId"]: + if token["systemId"].find('"') >= 0: + if token["systemId"].find("'") >= 0: + self.serializeError("System identifer contains both single and double quote characters") + quote_char = "'" + else: + quote_char = '"' + doctype += " %s%s%s" % (quote_char, token["systemId"], quote_char) + + doctype += ">" + yield self.encodeStrict(doctype) + + elif type in ("Characters", "SpaceCharacters"): + if type == "SpaceCharacters" or in_cdata: + if in_cdata and token["data"].find("</") >= 0: + self.serializeError("Unexpected </ in CDATA") + yield self.encode(token["data"]) + else: + yield self.encode(escape(token["data"])) + + elif type in ("StartTag", "EmptyTag"): + name = token["name"] + yield self.encodeStrict("<%s" % name) + if name in rcdataElements and not self.escape_rcdata: + in_cdata = True + elif in_cdata: + self.serializeError("Unexpected child element of a CDATA element") + for (_, attr_name), attr_value in token["data"].items(): + # TODO: Add namespace support here + k = attr_name + v = attr_value + yield self.encodeStrict(' ') + + yield self.encodeStrict(k) + if not self.minimize_boolean_attributes or \ + (k not in booleanAttributes.get(name, tuple()) and + k not in booleanAttributes.get("", tuple())): + yield self.encodeStrict("=") + if self.quote_attr_values == "always" or len(v) == 0: + quote_attr = True + elif self.quote_attr_values == "spec": + quote_attr = _quoteAttributeSpec.search(v) is not None + elif self.quote_attr_values == "legacy": + quote_attr = _quoteAttributeLegacy.search(v) is not None + else: + raise ValueError("quote_attr_values must be one of: " + "'always', 'spec', or 'legacy'") + v = v.replace("&", "&") + if self.escape_lt_in_attrs: + v = v.replace("<", "<") + if quote_attr: + quote_char = self.quote_char + if self.use_best_quote_char: + if "'" in v and '"' not in v: + quote_char = '"' + elif '"' in v and "'" not in v: + quote_char = "'" + if quote_char == "'": + v = v.replace("'", "'") + else: + v = v.replace('"', """) + yield self.encodeStrict(quote_char) + yield self.encode(v) + yield self.encodeStrict(quote_char) + else: + yield self.encode(v) + if name in voidElements and self.use_trailing_solidus: + if self.space_before_trailing_solidus: + yield self.encodeStrict(" /") + else: + yield self.encodeStrict("/") + yield self.encode(">") + + elif type == "EndTag": + name = token["name"] + if name in rcdataElements: + in_cdata = False + elif in_cdata: + self.serializeError("Unexpected child element of a CDATA element") + yield self.encodeStrict("</%s>" % name) + + elif type == "Comment": + data = token["data"] + if data.find("--") >= 0: + self.serializeError("Comment contains --") + yield self.encodeStrict("<!--%s-->" % token["data"]) + + elif type == "Entity": + name = token["name"] + key = name + ";" + if key not in entities: + self.serializeError("Entity %s not recognized" % name) + if self.resolve_entities and key not in xmlEntities: + data = entities[key] + else: + data = "&%s;" % name + yield self.encodeStrict(data) + + else: + self.serializeError(token["data"]) + + def render(self, treewalker, encoding=None): + """Serializes the stream from the treewalker into a string + + :arg treewalker: the treewalker to serialize + + :arg encoding: the string encoding to use + + :returns: the serialized tree + + Example: + + >>> from html5lib import parse, getTreeWalker + >>> from html5lib.serializer import HTMLSerializer + >>> token_stream = parse('<html><body>Hi!</body></html>') + >>> walker = getTreeWalker('etree') + >>> serializer = HTMLSerializer(omit_optional_tags=False) + >>> serializer.render(walker(token_stream)) + '<html><head></head><body>Hi!</body></html>' + + """ + if encoding: + return b"".join(list(self.serialize(treewalker, encoding))) + else: + return "".join(list(self.serialize(treewalker))) + + def serializeError(self, data="XXX ERROR MESSAGE NEEDED"): + # XXX The idea is to make data mandatory. + self.errors.append(data) + if self.strict: + raise SerializeError + + +class SerializeError(Exception): + """Error in serialized tree""" + pass diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/serializer.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/serializer.pyc new file mode 100644 index 0000000..108a1ba Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/serializer.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treeadapters/__init__.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treeadapters/__init__.py new file mode 100644 index 0000000..7ef5959 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treeadapters/__init__.py @@ -0,0 +1,30 @@ +"""Tree adapters let you convert from one tree structure to another + +Example: + +.. code-block:: python + + from pip._vendor import html5lib + from pip._vendor.html5lib.treeadapters import genshi + + doc = '<html><body>Hi!</body></html>' + treebuilder = html5lib.getTreeBuilder('etree') + parser = html5lib.HTMLParser(tree=treebuilder) + tree = parser.parse(doc) + TreeWalker = html5lib.getTreeWalker('etree') + + genshi_tree = genshi.to_genshi(TreeWalker(tree)) + +""" +from __future__ import absolute_import, division, unicode_literals + +from . import sax + +__all__ = ["sax"] + +try: + from . import genshi # noqa +except ImportError: + pass +else: + __all__.append("genshi") diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treeadapters/__init__.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treeadapters/__init__.pyc new file mode 100644 index 0000000..57b99fd Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treeadapters/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treeadapters/genshi.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treeadapters/genshi.py new file mode 100644 index 0000000..61d5fb6 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treeadapters/genshi.py @@ -0,0 +1,54 @@ +from __future__ import absolute_import, division, unicode_literals + +from genshi.core import QName, Attrs +from genshi.core import START, END, TEXT, COMMENT, DOCTYPE + + +def to_genshi(walker): + """Convert a tree to a genshi tree + + :arg walker: the treewalker to use to walk the tree to convert it + + :returns: generator of genshi nodes + + """ + text = [] + for token in walker: + type = token["type"] + if type in ("Characters", "SpaceCharacters"): + text.append(token["data"]) + elif text: + yield TEXT, "".join(text), (None, -1, -1) + text = [] + + if type in ("StartTag", "EmptyTag"): + if token["namespace"]: + name = "{%s}%s" % (token["namespace"], token["name"]) + else: + name = token["name"] + attrs = Attrs([(QName("{%s}%s" % attr if attr[0] is not None else attr[1]), value) + for attr, value in token["data"].items()]) + yield (START, (QName(name), attrs), (None, -1, -1)) + if type == "EmptyTag": + type = "EndTag" + + if type == "EndTag": + if token["namespace"]: + name = "{%s}%s" % (token["namespace"], token["name"]) + else: + name = token["name"] + + yield END, QName(name), (None, -1, -1) + + elif type == "Comment": + yield COMMENT, token["data"], (None, -1, -1) + + elif type == "Doctype": + yield DOCTYPE, (token["name"], token["publicId"], + token["systemId"]), (None, -1, -1) + + else: + pass # FIXME: What to do? + + if text: + yield TEXT, "".join(text), (None, -1, -1) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treeadapters/genshi.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treeadapters/genshi.pyc new file mode 100644 index 0000000..a66e95b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treeadapters/genshi.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treeadapters/sax.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treeadapters/sax.py new file mode 100644 index 0000000..f4ccea5 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treeadapters/sax.py @@ -0,0 +1,50 @@ +from __future__ import absolute_import, division, unicode_literals + +from xml.sax.xmlreader import AttributesNSImpl + +from ..constants import adjustForeignAttributes, unadjustForeignAttributes + +prefix_mapping = {} +for prefix, localName, namespace in adjustForeignAttributes.values(): + if prefix is not None: + prefix_mapping[prefix] = namespace + + +def to_sax(walker, handler): + """Call SAX-like content handler based on treewalker walker + + :arg walker: the treewalker to use to walk the tree to convert it + + :arg handler: SAX handler to use + + """ + handler.startDocument() + for prefix, namespace in prefix_mapping.items(): + handler.startPrefixMapping(prefix, namespace) + + for token in walker: + type = token["type"] + if type == "Doctype": + continue + elif type in ("StartTag", "EmptyTag"): + attrs = AttributesNSImpl(token["data"], + unadjustForeignAttributes) + handler.startElementNS((token["namespace"], token["name"]), + token["name"], + attrs) + if type == "EmptyTag": + handler.endElementNS((token["namespace"], token["name"]), + token["name"]) + elif type == "EndTag": + handler.endElementNS((token["namespace"], token["name"]), + token["name"]) + elif type in ("Characters", "SpaceCharacters"): + handler.characters(token["data"]) + elif type == "Comment": + pass + else: + assert False, "Unknown token type" + + for prefix, namespace in prefix_mapping.items(): + handler.endPrefixMapping(prefix) + handler.endDocument() diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treeadapters/sax.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treeadapters/sax.pyc new file mode 100644 index 0000000..1e721d8 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treeadapters/sax.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/__init__.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/__init__.py new file mode 100644 index 0000000..d44447e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/__init__.py @@ -0,0 +1,88 @@ +"""A collection of modules for building different kinds of trees from HTML +documents. + +To create a treebuilder for a new type of tree, you need to do +implement several things: + +1. A set of classes for various types of elements: Document, Doctype, Comment, + Element. These must implement the interface of ``base.treebuilders.Node`` + (although comment nodes have a different signature for their constructor, + see ``treebuilders.etree.Comment``) Textual content may also be implemented + as another node type, or not, as your tree implementation requires. + +2. A treebuilder object (called ``TreeBuilder`` by convention) that inherits + from ``treebuilders.base.TreeBuilder``. This has 4 required attributes: + + * ``documentClass`` - the class to use for the bottommost node of a document + * ``elementClass`` - the class to use for HTML Elements + * ``commentClass`` - the class to use for comments + * ``doctypeClass`` - the class to use for doctypes + + It also has one required method: + + * ``getDocument`` - Returns the root node of the complete document tree + +3. If you wish to run the unit tests, you must also create a ``testSerializer`` + method on your treebuilder which accepts a node and returns a string + containing Node and its children serialized according to the format used in + the unittests + +""" + +from __future__ import absolute_import, division, unicode_literals + +from .._utils import default_etree + +treeBuilderCache = {} + + +def getTreeBuilder(treeType, implementation=None, **kwargs): + """Get a TreeBuilder class for various types of trees with built-in support + + :arg treeType: the name of the tree type required (case-insensitive). Supported + values are: + + * "dom" - A generic builder for DOM implementations, defaulting to a + xml.dom.minidom based implementation. + * "etree" - A generic builder for tree implementations exposing an + ElementTree-like interface, defaulting to xml.etree.cElementTree if + available and xml.etree.ElementTree if not. + * "lxml" - A etree-based builder for lxml.etree, handling limitations + of lxml's implementation. + + :arg implementation: (Currently applies to the "etree" and "dom" tree + types). A module implementing the tree type e.g. xml.etree.ElementTree + or xml.etree.cElementTree. + + :arg kwargs: Any additional options to pass to the TreeBuilder when + creating it. + + Example: + + >>> from html5lib.treebuilders import getTreeBuilder + >>> builder = getTreeBuilder('etree') + + """ + + treeType = treeType.lower() + if treeType not in treeBuilderCache: + if treeType == "dom": + from . import dom + # Come up with a sane default (pref. from the stdlib) + if implementation is None: + from xml.dom import minidom + implementation = minidom + # NEVER cache here, caching is done in the dom submodule + return dom.getDomModule(implementation, **kwargs).TreeBuilder + elif treeType == "lxml": + from . import etree_lxml + treeBuilderCache[treeType] = etree_lxml.TreeBuilder + elif treeType == "etree": + from . import etree + if implementation is None: + implementation = default_etree + # NEVER cache here, caching is done in the etree submodule + return etree.getETreeModule(implementation, **kwargs).TreeBuilder + else: + raise ValueError("""Unrecognised treebuilder "%s" """ % treeType) + return treeBuilderCache.get(treeType) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/__init__.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/__init__.pyc new file mode 100644 index 0000000..927a29e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.py new file mode 100644 index 0000000..73973db --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.py @@ -0,0 +1,417 @@ +from __future__ import absolute_import, division, unicode_literals +from pip._vendor.six import text_type + +from ..constants import scopingElements, tableInsertModeElements, namespaces + +# The scope markers are inserted when entering object elements, +# marquees, table cells, and table captions, and are used to prevent formatting +# from "leaking" into tables, object elements, and marquees. +Marker = None + +listElementsMap = { + None: (frozenset(scopingElements), False), + "button": (frozenset(scopingElements | set([(namespaces["html"], "button")])), False), + "list": (frozenset(scopingElements | set([(namespaces["html"], "ol"), + (namespaces["html"], "ul")])), False), + "table": (frozenset([(namespaces["html"], "html"), + (namespaces["html"], "table")]), False), + "select": (frozenset([(namespaces["html"], "optgroup"), + (namespaces["html"], "option")]), True) +} + + +class Node(object): + """Represents an item in the tree""" + def __init__(self, name): + """Creates a Node + + :arg name: The tag name associated with the node + + """ + # The tag name assocaited with the node + self.name = name + # The parent of the current node (or None for the document node) + self.parent = None + # The value of the current node (applies to text nodes and comments) + self.value = None + # A dict holding name -> value pairs for attributes of the node + self.attributes = {} + # A list of child nodes of the current node. This must include all + # elements but not necessarily other node types. + self.childNodes = [] + # A list of miscellaneous flags that can be set on the node. + self._flags = [] + + def __str__(self): + attributesStr = " ".join(["%s=\"%s\"" % (name, value) + for name, value in + self.attributes.items()]) + if attributesStr: + return "<%s %s>" % (self.name, attributesStr) + else: + return "<%s>" % (self.name) + + def __repr__(self): + return "<%s>" % (self.name) + + def appendChild(self, node): + """Insert node as a child of the current node + + :arg node: the node to insert + + """ + raise NotImplementedError + + def insertText(self, data, insertBefore=None): + """Insert data as text in the current node, positioned before the + start of node insertBefore or to the end of the node's text. + + :arg data: the data to insert + + :arg insertBefore: True if you want to insert the text before the node + and False if you want to insert it after the node + + """ + raise NotImplementedError + + def insertBefore(self, node, refNode): + """Insert node as a child of the current node, before refNode in the + list of child nodes. Raises ValueError if refNode is not a child of + the current node + + :arg node: the node to insert + + :arg refNode: the child node to insert the node before + + """ + raise NotImplementedError + + def removeChild(self, node): + """Remove node from the children of the current node + + :arg node: the child node to remove + + """ + raise NotImplementedError + + def reparentChildren(self, newParent): + """Move all the children of the current node to newParent. + This is needed so that trees that don't store text as nodes move the + text in the correct way + + :arg newParent: the node to move all this node's children to + + """ + # XXX - should this method be made more general? + for child in self.childNodes: + newParent.appendChild(child) + self.childNodes = [] + + def cloneNode(self): + """Return a shallow copy of the current node i.e. a node with the same + name and attributes but with no parent or child nodes + """ + raise NotImplementedError + + def hasContent(self): + """Return true if the node has children or text, false otherwise + """ + raise NotImplementedError + + +class ActiveFormattingElements(list): + def append(self, node): + equalCount = 0 + if node != Marker: + for element in self[::-1]: + if element == Marker: + break + if self.nodesEqual(element, node): + equalCount += 1 + if equalCount == 3: + self.remove(element) + break + list.append(self, node) + + def nodesEqual(self, node1, node2): + if not node1.nameTuple == node2.nameTuple: + return False + + if not node1.attributes == node2.attributes: + return False + + return True + + +class TreeBuilder(object): + """Base treebuilder implementation + + * documentClass - the class to use for the bottommost node of a document + * elementClass - the class to use for HTML Elements + * commentClass - the class to use for comments + * doctypeClass - the class to use for doctypes + + """ + # pylint:disable=not-callable + + # Document class + documentClass = None + + # The class to use for creating a node + elementClass = None + + # The class to use for creating comments + commentClass = None + + # The class to use for creating doctypes + doctypeClass = None + + # Fragment class + fragmentClass = None + + def __init__(self, namespaceHTMLElements): + """Create a TreeBuilder + + :arg namespaceHTMLElements: whether or not to namespace HTML elements + + """ + if namespaceHTMLElements: + self.defaultNamespace = "http://www.w3.org/1999/xhtml" + else: + self.defaultNamespace = None + self.reset() + + def reset(self): + self.openElements = [] + self.activeFormattingElements = ActiveFormattingElements() + + # XXX - rename these to headElement, formElement + self.headPointer = None + self.formPointer = None + + self.insertFromTable = False + + self.document = self.documentClass() + + def elementInScope(self, target, variant=None): + + # If we pass a node in we match that. if we pass a string + # match any node with that name + exactNode = hasattr(target, "nameTuple") + if not exactNode: + if isinstance(target, text_type): + target = (namespaces["html"], target) + assert isinstance(target, tuple) + + listElements, invert = listElementsMap[variant] + + for node in reversed(self.openElements): + if exactNode and node == target: + return True + elif not exactNode and node.nameTuple == target: + return True + elif (invert ^ (node.nameTuple in listElements)): + return False + + assert False # We should never reach this point + + def reconstructActiveFormattingElements(self): + # Within this algorithm the order of steps described in the + # specification is not quite the same as the order of steps in the + # code. It should still do the same though. + + # Step 1: stop the algorithm when there's nothing to do. + if not self.activeFormattingElements: + return + + # Step 2 and step 3: we start with the last element. So i is -1. + i = len(self.activeFormattingElements) - 1 + entry = self.activeFormattingElements[i] + if entry == Marker or entry in self.openElements: + return + + # Step 6 + while entry != Marker and entry not in self.openElements: + if i == 0: + # This will be reset to 0 below + i = -1 + break + i -= 1 + # Step 5: let entry be one earlier in the list. + entry = self.activeFormattingElements[i] + + while True: + # Step 7 + i += 1 + + # Step 8 + entry = self.activeFormattingElements[i] + clone = entry.cloneNode() # Mainly to get a new copy of the attributes + + # Step 9 + element = self.insertElement({"type": "StartTag", + "name": clone.name, + "namespace": clone.namespace, + "data": clone.attributes}) + + # Step 10 + self.activeFormattingElements[i] = element + + # Step 11 + if element == self.activeFormattingElements[-1]: + break + + def clearActiveFormattingElements(self): + entry = self.activeFormattingElements.pop() + while self.activeFormattingElements and entry != Marker: + entry = self.activeFormattingElements.pop() + + def elementInActiveFormattingElements(self, name): + """Check if an element exists between the end of the active + formatting elements and the last marker. If it does, return it, else + return false""" + + for item in self.activeFormattingElements[::-1]: + # Check for Marker first because if it's a Marker it doesn't have a + # name attribute. + if item == Marker: + break + elif item.name == name: + return item + return False + + def insertRoot(self, token): + element = self.createElement(token) + self.openElements.append(element) + self.document.appendChild(element) + + def insertDoctype(self, token): + name = token["name"] + publicId = token["publicId"] + systemId = token["systemId"] + + doctype = self.doctypeClass(name, publicId, systemId) + self.document.appendChild(doctype) + + def insertComment(self, token, parent=None): + if parent is None: + parent = self.openElements[-1] + parent.appendChild(self.commentClass(token["data"])) + + def createElement(self, token): + """Create an element but don't insert it anywhere""" + name = token["name"] + namespace = token.get("namespace", self.defaultNamespace) + element = self.elementClass(name, namespace) + element.attributes = token["data"] + return element + + def _getInsertFromTable(self): + return self._insertFromTable + + def _setInsertFromTable(self, value): + """Switch the function used to insert an element from the + normal one to the misnested table one and back again""" + self._insertFromTable = value + if value: + self.insertElement = self.insertElementTable + else: + self.insertElement = self.insertElementNormal + + insertFromTable = property(_getInsertFromTable, _setInsertFromTable) + + def insertElementNormal(self, token): + name = token["name"] + assert isinstance(name, text_type), "Element %s not unicode" % name + namespace = token.get("namespace", self.defaultNamespace) + element = self.elementClass(name, namespace) + element.attributes = token["data"] + self.openElements[-1].appendChild(element) + self.openElements.append(element) + return element + + def insertElementTable(self, token): + """Create an element and insert it into the tree""" + element = self.createElement(token) + if self.openElements[-1].name not in tableInsertModeElements: + return self.insertElementNormal(token) + else: + # We should be in the InTable mode. This means we want to do + # special magic element rearranging + parent, insertBefore = self.getTableMisnestedNodePosition() + if insertBefore is None: + parent.appendChild(element) + else: + parent.insertBefore(element, insertBefore) + self.openElements.append(element) + return element + + def insertText(self, data, parent=None): + """Insert text data.""" + if parent is None: + parent = self.openElements[-1] + + if (not self.insertFromTable or (self.insertFromTable and + self.openElements[-1].name + not in tableInsertModeElements)): + parent.insertText(data) + else: + # We should be in the InTable mode. This means we want to do + # special magic element rearranging + parent, insertBefore = self.getTableMisnestedNodePosition() + parent.insertText(data, insertBefore) + + def getTableMisnestedNodePosition(self): + """Get the foster parent element, and sibling to insert before + (or None) when inserting a misnested table node""" + # The foster parent element is the one which comes before the most + # recently opened table element + # XXX - this is really inelegant + lastTable = None + fosterParent = None + insertBefore = None + for elm in self.openElements[::-1]: + if elm.name == "table": + lastTable = elm + break + if lastTable: + # XXX - we should really check that this parent is actually a + # node here + if lastTable.parent: + fosterParent = lastTable.parent + insertBefore = lastTable + else: + fosterParent = self.openElements[ + self.openElements.index(lastTable) - 1] + else: + fosterParent = self.openElements[0] + return fosterParent, insertBefore + + def generateImpliedEndTags(self, exclude=None): + name = self.openElements[-1].name + # XXX td, th and tr are not actually needed + if (name in frozenset(("dd", "dt", "li", "option", "optgroup", "p", "rp", "rt")) and + name != exclude): + self.openElements.pop() + # XXX This is not entirely what the specification says. We should + # investigate it more closely. + self.generateImpliedEndTags(exclude) + + def getDocument(self): + """Return the final tree""" + return self.document + + def getFragment(self): + """Return the final fragment""" + # assert self.innerHTML + fragment = self.fragmentClass() + self.openElements[0].reparentChildren(fragment) + return fragment + + def testSerializer(self, node): + """Serialize the subtree of node in the format required by unit tests + + :arg node: the node from which to start serializing + + """ + raise NotImplementedError diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyc new file mode 100644 index 0000000..5eb194b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.py new file mode 100644 index 0000000..dcfac22 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.py @@ -0,0 +1,236 @@ +from __future__ import absolute_import, division, unicode_literals + + +from collections import MutableMapping +from xml.dom import minidom, Node +import weakref + +from . import base +from .. import constants +from ..constants import namespaces +from .._utils import moduleFactoryFactory + + +def getDomBuilder(DomImplementation): + Dom = DomImplementation + + class AttrList(MutableMapping): + def __init__(self, element): + self.element = element + + def __iter__(self): + return iter(self.element.attributes.keys()) + + def __setitem__(self, name, value): + if isinstance(name, tuple): + raise NotImplementedError + else: + attr = self.element.ownerDocument.createAttribute(name) + attr.value = value + self.element.attributes[name] = attr + + def __len__(self): + return len(self.element.attributes) + + def items(self): + return list(self.element.attributes.items()) + + def values(self): + return list(self.element.attributes.values()) + + def __getitem__(self, name): + if isinstance(name, tuple): + raise NotImplementedError + else: + return self.element.attributes[name].value + + def __delitem__(self, name): + if isinstance(name, tuple): + raise NotImplementedError + else: + del self.element.attributes[name] + + class NodeBuilder(base.Node): + def __init__(self, element): + base.Node.__init__(self, element.nodeName) + self.element = element + + namespace = property(lambda self: hasattr(self.element, "namespaceURI") and + self.element.namespaceURI or None) + + def appendChild(self, node): + node.parent = self + self.element.appendChild(node.element) + + def insertText(self, data, insertBefore=None): + text = self.element.ownerDocument.createTextNode(data) + if insertBefore: + self.element.insertBefore(text, insertBefore.element) + else: + self.element.appendChild(text) + + def insertBefore(self, node, refNode): + self.element.insertBefore(node.element, refNode.element) + node.parent = self + + def removeChild(self, node): + if node.element.parentNode == self.element: + self.element.removeChild(node.element) + node.parent = None + + def reparentChildren(self, newParent): + while self.element.hasChildNodes(): + child = self.element.firstChild + self.element.removeChild(child) + newParent.element.appendChild(child) + self.childNodes = [] + + def getAttributes(self): + return AttrList(self.element) + + def setAttributes(self, attributes): + if attributes: + for name, value in list(attributes.items()): + if isinstance(name, tuple): + if name[0] is not None: + qualifiedName = (name[0] + ":" + name[1]) + else: + qualifiedName = name[1] + self.element.setAttributeNS(name[2], qualifiedName, + value) + else: + self.element.setAttribute( + name, value) + attributes = property(getAttributes, setAttributes) + + def cloneNode(self): + return NodeBuilder(self.element.cloneNode(False)) + + def hasContent(self): + return self.element.hasChildNodes() + + def getNameTuple(self): + if self.namespace is None: + return namespaces["html"], self.name + else: + return self.namespace, self.name + + nameTuple = property(getNameTuple) + + class TreeBuilder(base.TreeBuilder): # pylint:disable=unused-variable + def documentClass(self): + self.dom = Dom.getDOMImplementation().createDocument(None, None, None) + return weakref.proxy(self) + + def insertDoctype(self, token): + name = token["name"] + publicId = token["publicId"] + systemId = token["systemId"] + + domimpl = Dom.getDOMImplementation() + doctype = domimpl.createDocumentType(name, publicId, systemId) + self.document.appendChild(NodeBuilder(doctype)) + if Dom == minidom: + doctype.ownerDocument = self.dom + + def elementClass(self, name, namespace=None): + if namespace is None and self.defaultNamespace is None: + node = self.dom.createElement(name) + else: + node = self.dom.createElementNS(namespace, name) + + return NodeBuilder(node) + + def commentClass(self, data): + return NodeBuilder(self.dom.createComment(data)) + + def fragmentClass(self): + return NodeBuilder(self.dom.createDocumentFragment()) + + def appendChild(self, node): + self.dom.appendChild(node.element) + + def testSerializer(self, element): + return testSerializer(element) + + def getDocument(self): + return self.dom + + def getFragment(self): + return base.TreeBuilder.getFragment(self).element + + def insertText(self, data, parent=None): + data = data + if parent != self: + base.TreeBuilder.insertText(self, data, parent) + else: + # HACK: allow text nodes as children of the document node + if hasattr(self.dom, '_child_node_types'): + # pylint:disable=protected-access + if Node.TEXT_NODE not in self.dom._child_node_types: + self.dom._child_node_types = list(self.dom._child_node_types) + self.dom._child_node_types.append(Node.TEXT_NODE) + self.dom.appendChild(self.dom.createTextNode(data)) + + implementation = DomImplementation + name = None + + def testSerializer(element): + element.normalize() + rv = [] + + def serializeElement(element, indent=0): + if element.nodeType == Node.DOCUMENT_TYPE_NODE: + if element.name: + if element.publicId or element.systemId: + publicId = element.publicId or "" + systemId = element.systemId or "" + rv.append("""|%s<!DOCTYPE %s "%s" "%s">""" % + (' ' * indent, element.name, publicId, systemId)) + else: + rv.append("|%s<!DOCTYPE %s>" % (' ' * indent, element.name)) + else: + rv.append("|%s<!DOCTYPE >" % (' ' * indent,)) + elif element.nodeType == Node.DOCUMENT_NODE: + rv.append("#document") + elif element.nodeType == Node.DOCUMENT_FRAGMENT_NODE: + rv.append("#document-fragment") + elif element.nodeType == Node.COMMENT_NODE: + rv.append("|%s<!-- %s -->" % (' ' * indent, element.nodeValue)) + elif element.nodeType == Node.TEXT_NODE: + rv.append("|%s\"%s\"" % (' ' * indent, element.nodeValue)) + else: + if (hasattr(element, "namespaceURI") and + element.namespaceURI is not None): + name = "%s %s" % (constants.prefixes[element.namespaceURI], + element.nodeName) + else: + name = element.nodeName + rv.append("|%s<%s>" % (' ' * indent, name)) + if element.hasAttributes(): + attributes = [] + for i in range(len(element.attributes)): + attr = element.attributes.item(i) + name = attr.nodeName + value = attr.value + ns = attr.namespaceURI + if ns: + name = "%s %s" % (constants.prefixes[ns], attr.localName) + else: + name = attr.nodeName + attributes.append((name, value)) + + for name, value in sorted(attributes): + rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value)) + indent += 2 + for child in element.childNodes: + serializeElement(child, indent) + serializeElement(element, 0) + + return "\n".join(rv) + + return locals() + + +# The actual means to get a module! +getDomModule = moduleFactoryFactory(getDomBuilder) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyc new file mode 100644 index 0000000..a687b29 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.py new file mode 100644 index 0000000..0dedf44 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.py @@ -0,0 +1,340 @@ +from __future__ import absolute_import, division, unicode_literals +# pylint:disable=protected-access + +from pip._vendor.six import text_type + +import re + +from . import base +from .. import _ihatexml +from .. import constants +from ..constants import namespaces +from .._utils import moduleFactoryFactory + +tag_regexp = re.compile("{([^}]*)}(.*)") + + +def getETreeBuilder(ElementTreeImplementation, fullTree=False): + ElementTree = ElementTreeImplementation + ElementTreeCommentType = ElementTree.Comment("asd").tag + + class Element(base.Node): + def __init__(self, name, namespace=None): + self._name = name + self._namespace = namespace + self._element = ElementTree.Element(self._getETreeTag(name, + namespace)) + if namespace is None: + self.nameTuple = namespaces["html"], self._name + else: + self.nameTuple = self._namespace, self._name + self.parent = None + self._childNodes = [] + self._flags = [] + + def _getETreeTag(self, name, namespace): + if namespace is None: + etree_tag = name + else: + etree_tag = "{%s}%s" % (namespace, name) + return etree_tag + + def _setName(self, name): + self._name = name + self._element.tag = self._getETreeTag(self._name, self._namespace) + + def _getName(self): + return self._name + + name = property(_getName, _setName) + + def _setNamespace(self, namespace): + self._namespace = namespace + self._element.tag = self._getETreeTag(self._name, self._namespace) + + def _getNamespace(self): + return self._namespace + + namespace = property(_getNamespace, _setNamespace) + + def _getAttributes(self): + return self._element.attrib + + def _setAttributes(self, attributes): + # Delete existing attributes first + # XXX - there may be a better way to do this... + for key in list(self._element.attrib.keys()): + del self._element.attrib[key] + for key, value in attributes.items(): + if isinstance(key, tuple): + name = "{%s}%s" % (key[2], key[1]) + else: + name = key + self._element.set(name, value) + + attributes = property(_getAttributes, _setAttributes) + + def _getChildNodes(self): + return self._childNodes + + def _setChildNodes(self, value): + del self._element[:] + self._childNodes = [] + for element in value: + self.insertChild(element) + + childNodes = property(_getChildNodes, _setChildNodes) + + def hasContent(self): + """Return true if the node has children or text""" + return bool(self._element.text or len(self._element)) + + def appendChild(self, node): + self._childNodes.append(node) + self._element.append(node._element) + node.parent = self + + def insertBefore(self, node, refNode): + index = list(self._element).index(refNode._element) + self._element.insert(index, node._element) + node.parent = self + + def removeChild(self, node): + self._childNodes.remove(node) + self._element.remove(node._element) + node.parent = None + + def insertText(self, data, insertBefore=None): + if not(len(self._element)): + if not self._element.text: + self._element.text = "" + self._element.text += data + elif insertBefore is None: + # Insert the text as the tail of the last child element + if not self._element[-1].tail: + self._element[-1].tail = "" + self._element[-1].tail += data + else: + # Insert the text before the specified node + children = list(self._element) + index = children.index(insertBefore._element) + if index > 0: + if not self._element[index - 1].tail: + self._element[index - 1].tail = "" + self._element[index - 1].tail += data + else: + if not self._element.text: + self._element.text = "" + self._element.text += data + + def cloneNode(self): + element = type(self)(self.name, self.namespace) + for name, value in self.attributes.items(): + element.attributes[name] = value + return element + + def reparentChildren(self, newParent): + if newParent.childNodes: + newParent.childNodes[-1]._element.tail += self._element.text + else: + if not newParent._element.text: + newParent._element.text = "" + if self._element.text is not None: + newParent._element.text += self._element.text + self._element.text = "" + base.Node.reparentChildren(self, newParent) + + class Comment(Element): + def __init__(self, data): + # Use the superclass constructor to set all properties on the + # wrapper element + self._element = ElementTree.Comment(data) + self.parent = None + self._childNodes = [] + self._flags = [] + + def _getData(self): + return self._element.text + + def _setData(self, value): + self._element.text = value + + data = property(_getData, _setData) + + class DocumentType(Element): + def __init__(self, name, publicId, systemId): + Element.__init__(self, "<!DOCTYPE>") + self._element.text = name + self.publicId = publicId + self.systemId = systemId + + def _getPublicId(self): + return self._element.get("publicId", "") + + def _setPublicId(self, value): + if value is not None: + self._element.set("publicId", value) + + publicId = property(_getPublicId, _setPublicId) + + def _getSystemId(self): + return self._element.get("systemId", "") + + def _setSystemId(self, value): + if value is not None: + self._element.set("systemId", value) + + systemId = property(_getSystemId, _setSystemId) + + class Document(Element): + def __init__(self): + Element.__init__(self, "DOCUMENT_ROOT") + + class DocumentFragment(Element): + def __init__(self): + Element.__init__(self, "DOCUMENT_FRAGMENT") + + def testSerializer(element): + rv = [] + + def serializeElement(element, indent=0): + if not(hasattr(element, "tag")): + element = element.getroot() + if element.tag == "<!DOCTYPE>": + if element.get("publicId") or element.get("systemId"): + publicId = element.get("publicId") or "" + systemId = element.get("systemId") or "" + rv.append("""<!DOCTYPE %s "%s" "%s">""" % + (element.text, publicId, systemId)) + else: + rv.append("<!DOCTYPE %s>" % (element.text,)) + elif element.tag == "DOCUMENT_ROOT": + rv.append("#document") + if element.text is not None: + rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text)) + if element.tail is not None: + raise TypeError("Document node cannot have tail") + if hasattr(element, "attrib") and len(element.attrib): + raise TypeError("Document node cannot have attributes") + elif element.tag == ElementTreeCommentType: + rv.append("|%s<!-- %s -->" % (' ' * indent, element.text)) + else: + assert isinstance(element.tag, text_type), \ + "Expected unicode, got %s, %s" % (type(element.tag), element.tag) + nsmatch = tag_regexp.match(element.tag) + + if nsmatch is None: + name = element.tag + else: + ns, name = nsmatch.groups() + prefix = constants.prefixes[ns] + name = "%s %s" % (prefix, name) + rv.append("|%s<%s>" % (' ' * indent, name)) + + if hasattr(element, "attrib"): + attributes = [] + for name, value in element.attrib.items(): + nsmatch = tag_regexp.match(name) + if nsmatch is not None: + ns, name = nsmatch.groups() + prefix = constants.prefixes[ns] + attr_string = "%s %s" % (prefix, name) + else: + attr_string = name + attributes.append((attr_string, value)) + + for name, value in sorted(attributes): + rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value)) + if element.text: + rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text)) + indent += 2 + for child in element: + serializeElement(child, indent) + if element.tail: + rv.append("|%s\"%s\"" % (' ' * (indent - 2), element.tail)) + serializeElement(element, 0) + + return "\n".join(rv) + + def tostring(element): # pylint:disable=unused-variable + """Serialize an element and its child nodes to a string""" + rv = [] + filter = _ihatexml.InfosetFilter() + + def serializeElement(element): + if isinstance(element, ElementTree.ElementTree): + element = element.getroot() + + if element.tag == "<!DOCTYPE>": + if element.get("publicId") or element.get("systemId"): + publicId = element.get("publicId") or "" + systemId = element.get("systemId") or "" + rv.append("""<!DOCTYPE %s PUBLIC "%s" "%s">""" % + (element.text, publicId, systemId)) + else: + rv.append("<!DOCTYPE %s>" % (element.text,)) + elif element.tag == "DOCUMENT_ROOT": + if element.text is not None: + rv.append(element.text) + if element.tail is not None: + raise TypeError("Document node cannot have tail") + if hasattr(element, "attrib") and len(element.attrib): + raise TypeError("Document node cannot have attributes") + + for child in element: + serializeElement(child) + + elif element.tag == ElementTreeCommentType: + rv.append("<!--%s-->" % (element.text,)) + else: + # This is assumed to be an ordinary element + if not element.attrib: + rv.append("<%s>" % (filter.fromXmlName(element.tag),)) + else: + attr = " ".join(["%s=\"%s\"" % ( + filter.fromXmlName(name), value) + for name, value in element.attrib.items()]) + rv.append("<%s %s>" % (element.tag, attr)) + if element.text: + rv.append(element.text) + + for child in element: + serializeElement(child) + + rv.append("</%s>" % (element.tag,)) + + if element.tail: + rv.append(element.tail) + + serializeElement(element) + + return "".join(rv) + + class TreeBuilder(base.TreeBuilder): # pylint:disable=unused-variable + documentClass = Document + doctypeClass = DocumentType + elementClass = Element + commentClass = Comment + fragmentClass = DocumentFragment + implementation = ElementTreeImplementation + + def testSerializer(self, element): + return testSerializer(element) + + def getDocument(self): + if fullTree: + return self.document._element + else: + if self.defaultNamespace is not None: + return self.document._element.find( + "{%s}html" % self.defaultNamespace) + else: + return self.document._element.find("html") + + def getFragment(self): + return base.TreeBuilder.getFragment(self)._element + + return locals() + + +getETreeModule = moduleFactoryFactory(getETreeBuilder) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyc new file mode 100644 index 0000000..d4de0c8 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.py new file mode 100644 index 0000000..ca12a99 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.py @@ -0,0 +1,366 @@ +"""Module for supporting the lxml.etree library. The idea here is to use as much +of the native library as possible, without using fragile hacks like custom element +names that break between releases. The downside of this is that we cannot represent +all possible trees; specifically the following are known to cause problems: + +Text or comments as siblings of the root element +Docypes with no name + +When any of these things occur, we emit a DataLossWarning +""" + +from __future__ import absolute_import, division, unicode_literals +# pylint:disable=protected-access + +import warnings +import re +import sys + +from . import base +from ..constants import DataLossWarning +from .. import constants +from . import etree as etree_builders +from .. import _ihatexml + +import lxml.etree as etree + + +fullTree = True +tag_regexp = re.compile("{([^}]*)}(.*)") + +comment_type = etree.Comment("asd").tag + + +class DocumentType(object): + def __init__(self, name, publicId, systemId): + self.name = name + self.publicId = publicId + self.systemId = systemId + + +class Document(object): + def __init__(self): + self._elementTree = None + self._childNodes = [] + + def appendChild(self, element): + self._elementTree.getroot().addnext(element._element) + + def _getChildNodes(self): + return self._childNodes + + childNodes = property(_getChildNodes) + + +def testSerializer(element): + rv = [] + infosetFilter = _ihatexml.InfosetFilter(preventDoubleDashComments=True) + + def serializeElement(element, indent=0): + if not hasattr(element, "tag"): + if hasattr(element, "getroot"): + # Full tree case + rv.append("#document") + if element.docinfo.internalDTD: + if not (element.docinfo.public_id or + element.docinfo.system_url): + dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name + else: + dtd_str = """<!DOCTYPE %s "%s" "%s">""" % ( + element.docinfo.root_name, + element.docinfo.public_id, + element.docinfo.system_url) + rv.append("|%s%s" % (' ' * (indent + 2), dtd_str)) + next_element = element.getroot() + while next_element.getprevious() is not None: + next_element = next_element.getprevious() + while next_element is not None: + serializeElement(next_element, indent + 2) + next_element = next_element.getnext() + elif isinstance(element, str) or isinstance(element, bytes): + # Text in a fragment + assert isinstance(element, str) or sys.version_info[0] == 2 + rv.append("|%s\"%s\"" % (' ' * indent, element)) + else: + # Fragment case + rv.append("#document-fragment") + for next_element in element: + serializeElement(next_element, indent + 2) + elif element.tag == comment_type: + rv.append("|%s<!-- %s -->" % (' ' * indent, element.text)) + if hasattr(element, "tail") and element.tail: + rv.append("|%s\"%s\"" % (' ' * indent, element.tail)) + else: + assert isinstance(element, etree._Element) + nsmatch = etree_builders.tag_regexp.match(element.tag) + if nsmatch is not None: + ns = nsmatch.group(1) + tag = nsmatch.group(2) + prefix = constants.prefixes[ns] + rv.append("|%s<%s %s>" % (' ' * indent, prefix, + infosetFilter.fromXmlName(tag))) + else: + rv.append("|%s<%s>" % (' ' * indent, + infosetFilter.fromXmlName(element.tag))) + + if hasattr(element, "attrib"): + attributes = [] + for name, value in element.attrib.items(): + nsmatch = tag_regexp.match(name) + if nsmatch is not None: + ns, name = nsmatch.groups() + name = infosetFilter.fromXmlName(name) + prefix = constants.prefixes[ns] + attr_string = "%s %s" % (prefix, name) + else: + attr_string = infosetFilter.fromXmlName(name) + attributes.append((attr_string, value)) + + for name, value in sorted(attributes): + rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value)) + + if element.text: + rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text)) + indent += 2 + for child in element: + serializeElement(child, indent) + if hasattr(element, "tail") and element.tail: + rv.append("|%s\"%s\"" % (' ' * (indent - 2), element.tail)) + serializeElement(element, 0) + + return "\n".join(rv) + + +def tostring(element): + """Serialize an element and its child nodes to a string""" + rv = [] + + def serializeElement(element): + if not hasattr(element, "tag"): + if element.docinfo.internalDTD: + if element.docinfo.doctype: + dtd_str = element.docinfo.doctype + else: + dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name + rv.append(dtd_str) + serializeElement(element.getroot()) + + elif element.tag == comment_type: + rv.append("<!--%s-->" % (element.text,)) + + else: + # This is assumed to be an ordinary element + if not element.attrib: + rv.append("<%s>" % (element.tag,)) + else: + attr = " ".join(["%s=\"%s\"" % (name, value) + for name, value in element.attrib.items()]) + rv.append("<%s %s>" % (element.tag, attr)) + if element.text: + rv.append(element.text) + + for child in element: + serializeElement(child) + + rv.append("</%s>" % (element.tag,)) + + if hasattr(element, "tail") and element.tail: + rv.append(element.tail) + + serializeElement(element) + + return "".join(rv) + + +class TreeBuilder(base.TreeBuilder): + documentClass = Document + doctypeClass = DocumentType + elementClass = None + commentClass = None + fragmentClass = Document + implementation = etree + + def __init__(self, namespaceHTMLElements, fullTree=False): + builder = etree_builders.getETreeModule(etree, fullTree=fullTree) + infosetFilter = self.infosetFilter = _ihatexml.InfosetFilter(preventDoubleDashComments=True) + self.namespaceHTMLElements = namespaceHTMLElements + + class Attributes(dict): + def __init__(self, element, value=None): + if value is None: + value = {} + self._element = element + dict.__init__(self, value) # pylint:disable=non-parent-init-called + for key, value in self.items(): + if isinstance(key, tuple): + name = "{%s}%s" % (key[2], infosetFilter.coerceAttribute(key[1])) + else: + name = infosetFilter.coerceAttribute(key) + self._element._element.attrib[name] = value + + def __setitem__(self, key, value): + dict.__setitem__(self, key, value) + if isinstance(key, tuple): + name = "{%s}%s" % (key[2], infosetFilter.coerceAttribute(key[1])) + else: + name = infosetFilter.coerceAttribute(key) + self._element._element.attrib[name] = value + + class Element(builder.Element): + def __init__(self, name, namespace): + name = infosetFilter.coerceElement(name) + builder.Element.__init__(self, name, namespace=namespace) + self._attributes = Attributes(self) + + def _setName(self, name): + self._name = infosetFilter.coerceElement(name) + self._element.tag = self._getETreeTag( + self._name, self._namespace) + + def _getName(self): + return infosetFilter.fromXmlName(self._name) + + name = property(_getName, _setName) + + def _getAttributes(self): + return self._attributes + + def _setAttributes(self, attributes): + self._attributes = Attributes(self, attributes) + + attributes = property(_getAttributes, _setAttributes) + + def insertText(self, data, insertBefore=None): + data = infosetFilter.coerceCharacters(data) + builder.Element.insertText(self, data, insertBefore) + + def appendChild(self, child): + builder.Element.appendChild(self, child) + + class Comment(builder.Comment): + def __init__(self, data): + data = infosetFilter.coerceComment(data) + builder.Comment.__init__(self, data) + + def _setData(self, data): + data = infosetFilter.coerceComment(data) + self._element.text = data + + def _getData(self): + return self._element.text + + data = property(_getData, _setData) + + self.elementClass = Element + self.commentClass = Comment + # self.fragmentClass = builder.DocumentFragment + base.TreeBuilder.__init__(self, namespaceHTMLElements) + + def reset(self): + base.TreeBuilder.reset(self) + self.insertComment = self.insertCommentInitial + self.initial_comments = [] + self.doctype = None + + def testSerializer(self, element): + return testSerializer(element) + + def getDocument(self): + if fullTree: + return self.document._elementTree + else: + return self.document._elementTree.getroot() + + def getFragment(self): + fragment = [] + element = self.openElements[0]._element + if element.text: + fragment.append(element.text) + fragment.extend(list(element)) + if element.tail: + fragment.append(element.tail) + return fragment + + def insertDoctype(self, token): + name = token["name"] + publicId = token["publicId"] + systemId = token["systemId"] + + if not name: + warnings.warn("lxml cannot represent empty doctype", DataLossWarning) + self.doctype = None + else: + coercedName = self.infosetFilter.coerceElement(name) + if coercedName != name: + warnings.warn("lxml cannot represent non-xml doctype", DataLossWarning) + + doctype = self.doctypeClass(coercedName, publicId, systemId) + self.doctype = doctype + + def insertCommentInitial(self, data, parent=None): + assert parent is None or parent is self.document + assert self.document._elementTree is None + self.initial_comments.append(data) + + def insertCommentMain(self, data, parent=None): + if (parent == self.document and + self.document._elementTree.getroot()[-1].tag == comment_type): + warnings.warn("lxml cannot represent adjacent comments beyond the root elements", DataLossWarning) + super(TreeBuilder, self).insertComment(data, parent) + + def insertRoot(self, token): + # Because of the way libxml2 works, it doesn't seem to be possible to + # alter information like the doctype after the tree has been parsed. + # Therefore we need to use the built-in parser to create our initial + # tree, after which we can add elements like normal + docStr = "" + if self.doctype: + assert self.doctype.name + docStr += "<!DOCTYPE %s" % self.doctype.name + if (self.doctype.publicId is not None or + self.doctype.systemId is not None): + docStr += (' PUBLIC "%s" ' % + (self.infosetFilter.coercePubid(self.doctype.publicId or ""))) + if self.doctype.systemId: + sysid = self.doctype.systemId + if sysid.find("'") >= 0 and sysid.find('"') >= 0: + warnings.warn("DOCTYPE system cannot contain single and double quotes", DataLossWarning) + sysid = sysid.replace("'", 'U00027') + if sysid.find("'") >= 0: + docStr += '"%s"' % sysid + else: + docStr += "'%s'" % sysid + else: + docStr += "''" + docStr += ">" + if self.doctype.name != token["name"]: + warnings.warn("lxml cannot represent doctype with a different name to the root element", DataLossWarning) + docStr += "<THIS_SHOULD_NEVER_APPEAR_PUBLICLY/>" + root = etree.fromstring(docStr) + + # Append the initial comments: + for comment_token in self.initial_comments: + comment = self.commentClass(comment_token["data"]) + root.addprevious(comment._element) + + # Create the root document and add the ElementTree to it + self.document = self.documentClass() + self.document._elementTree = root.getroottree() + + # Give the root element the right name + name = token["name"] + namespace = token.get("namespace", self.defaultNamespace) + if namespace is None: + etree_tag = name + else: + etree_tag = "{%s}%s" % (namespace, name) + root.tag = etree_tag + + # Add the root element to the internal child/open data structures + root_element = self.elementClass(name, namespace) + root_element._element = root + self.document._childNodes.append(root_element) + self.openElements.append(root_element) + + # Reset to the default insert comment function + self.insertComment = self.insertCommentMain diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyc new file mode 100644 index 0000000..ff771a2 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/__init__.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/__init__.py new file mode 100644 index 0000000..9bec207 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/__init__.py @@ -0,0 +1,154 @@ +"""A collection of modules for iterating through different kinds of +tree, generating tokens identical to those produced by the tokenizer +module. + +To create a tree walker for a new type of tree, you need to do +implement a tree walker object (called TreeWalker by convention) that +implements a 'serialize' method taking a tree as sole argument and +returning an iterator generating tokens. +""" + +from __future__ import absolute_import, division, unicode_literals + +from .. import constants +from .._utils import default_etree + +__all__ = ["getTreeWalker", "pprint"] + +treeWalkerCache = {} + + +def getTreeWalker(treeType, implementation=None, **kwargs): + """Get a TreeWalker class for various types of tree with built-in support + + :arg str treeType: the name of the tree type required (case-insensitive). + Supported values are: + + * "dom": The xml.dom.minidom DOM implementation + * "etree": A generic walker for tree implementations exposing an + elementtree-like interface (known to work with ElementTree, + cElementTree and lxml.etree). + * "lxml": Optimized walker for lxml.etree + * "genshi": a Genshi stream + + :arg implementation: A module implementing the tree type e.g. + xml.etree.ElementTree or cElementTree (Currently applies to the "etree" + tree type only). + + :arg kwargs: keyword arguments passed to the etree walker--for other + walkers, this has no effect + + :returns: a TreeWalker class + + """ + + treeType = treeType.lower() + if treeType not in treeWalkerCache: + if treeType == "dom": + from . import dom + treeWalkerCache[treeType] = dom.TreeWalker + elif treeType == "genshi": + from . import genshi + treeWalkerCache[treeType] = genshi.TreeWalker + elif treeType == "lxml": + from . import etree_lxml + treeWalkerCache[treeType] = etree_lxml.TreeWalker + elif treeType == "etree": + from . import etree + if implementation is None: + implementation = default_etree + # XXX: NEVER cache here, caching is done in the etree submodule + return etree.getETreeModule(implementation, **kwargs).TreeWalker + return treeWalkerCache.get(treeType) + + +def concatenateCharacterTokens(tokens): + pendingCharacters = [] + for token in tokens: + type = token["type"] + if type in ("Characters", "SpaceCharacters"): + pendingCharacters.append(token["data"]) + else: + if pendingCharacters: + yield {"type": "Characters", "data": "".join(pendingCharacters)} + pendingCharacters = [] + yield token + if pendingCharacters: + yield {"type": "Characters", "data": "".join(pendingCharacters)} + + +def pprint(walker): + """Pretty printer for tree walkers + + Takes a TreeWalker instance and pretty prints the output of walking the tree. + + :arg walker: a TreeWalker instance + + """ + output = [] + indent = 0 + for token in concatenateCharacterTokens(walker): + type = token["type"] + if type in ("StartTag", "EmptyTag"): + # tag name + if token["namespace"] and token["namespace"] != constants.namespaces["html"]: + if token["namespace"] in constants.prefixes: + ns = constants.prefixes[token["namespace"]] + else: + ns = token["namespace"] + name = "%s %s" % (ns, token["name"]) + else: + name = token["name"] + output.append("%s<%s>" % (" " * indent, name)) + indent += 2 + # attributes (sorted for consistent ordering) + attrs = token["data"] + for (namespace, localname), value in sorted(attrs.items()): + if namespace: + if namespace in constants.prefixes: + ns = constants.prefixes[namespace] + else: + ns = namespace + name = "%s %s" % (ns, localname) + else: + name = localname + output.append("%s%s=\"%s\"" % (" " * indent, name, value)) + # self-closing + if type == "EmptyTag": + indent -= 2 + + elif type == "EndTag": + indent -= 2 + + elif type == "Comment": + output.append("%s<!-- %s -->" % (" " * indent, token["data"])) + + elif type == "Doctype": + if token["name"]: + if token["publicId"]: + output.append("""%s<!DOCTYPE %s "%s" "%s">""" % + (" " * indent, + token["name"], + token["publicId"], + token["systemId"] if token["systemId"] else "")) + elif token["systemId"]: + output.append("""%s<!DOCTYPE %s "" "%s">""" % + (" " * indent, + token["name"], + token["systemId"])) + else: + output.append("%s<!DOCTYPE %s>" % (" " * indent, + token["name"])) + else: + output.append("%s<!DOCTYPE >" % (" " * indent,)) + + elif type == "Characters": + output.append("%s\"%s\"" % (" " * indent, token["data"])) + + elif type == "SpaceCharacters": + assert False, "concatenateCharacterTokens should have got rid of all Space tokens" + + else: + raise ValueError("Unknown token type, %s" % type) + + return "\n".join(output) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/__init__.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/__init__.pyc new file mode 100644 index 0000000..fe7d856 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.py new file mode 100644 index 0000000..80c474c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.py @@ -0,0 +1,252 @@ +from __future__ import absolute_import, division, unicode_literals + +from xml.dom import Node +from ..constants import namespaces, voidElements, spaceCharacters + +__all__ = ["DOCUMENT", "DOCTYPE", "TEXT", "ELEMENT", "COMMENT", "ENTITY", "UNKNOWN", + "TreeWalker", "NonRecursiveTreeWalker"] + +DOCUMENT = Node.DOCUMENT_NODE +DOCTYPE = Node.DOCUMENT_TYPE_NODE +TEXT = Node.TEXT_NODE +ELEMENT = Node.ELEMENT_NODE +COMMENT = Node.COMMENT_NODE +ENTITY = Node.ENTITY_NODE +UNKNOWN = "<#UNKNOWN#>" + +spaceCharacters = "".join(spaceCharacters) + + +class TreeWalker(object): + """Walks a tree yielding tokens + + Tokens are dicts that all have a ``type`` field specifying the type of the + token. + + """ + def __init__(self, tree): + """Creates a TreeWalker + + :arg tree: the tree to walk + + """ + self.tree = tree + + def __iter__(self): + raise NotImplementedError + + def error(self, msg): + """Generates an error token with the given message + + :arg msg: the error message + + :returns: SerializeError token + + """ + return {"type": "SerializeError", "data": msg} + + def emptyTag(self, namespace, name, attrs, hasChildren=False): + """Generates an EmptyTag token + + :arg namespace: the namespace of the token--can be ``None`` + + :arg name: the name of the element + + :arg attrs: the attributes of the element as a dict + + :arg hasChildren: whether or not to yield a SerializationError because + this tag shouldn't have children + + :returns: EmptyTag token + + """ + yield {"type": "EmptyTag", "name": name, + "namespace": namespace, + "data": attrs} + if hasChildren: + yield self.error("Void element has children") + + def startTag(self, namespace, name, attrs): + """Generates a StartTag token + + :arg namespace: the namespace of the token--can be ``None`` + + :arg name: the name of the element + + :arg attrs: the attributes of the element as a dict + + :returns: StartTag token + + """ + return {"type": "StartTag", + "name": name, + "namespace": namespace, + "data": attrs} + + def endTag(self, namespace, name): + """Generates an EndTag token + + :arg namespace: the namespace of the token--can be ``None`` + + :arg name: the name of the element + + :returns: EndTag token + + """ + return {"type": "EndTag", + "name": name, + "namespace": namespace} + + def text(self, data): + """Generates SpaceCharacters and Characters tokens + + Depending on what's in the data, this generates one or more + ``SpaceCharacters`` and ``Characters`` tokens. + + For example: + + >>> from html5lib.treewalkers.base import TreeWalker + >>> # Give it an empty tree just so it instantiates + >>> walker = TreeWalker([]) + >>> list(walker.text('')) + [] + >>> list(walker.text(' ')) + [{u'data': ' ', u'type': u'SpaceCharacters'}] + >>> list(walker.text(' abc ')) # doctest: +NORMALIZE_WHITESPACE + [{u'data': ' ', u'type': u'SpaceCharacters'}, + {u'data': u'abc', u'type': u'Characters'}, + {u'data': u' ', u'type': u'SpaceCharacters'}] + + :arg data: the text data + + :returns: one or more ``SpaceCharacters`` and ``Characters`` tokens + + """ + data = data + middle = data.lstrip(spaceCharacters) + left = data[:len(data) - len(middle)] + if left: + yield {"type": "SpaceCharacters", "data": left} + data = middle + middle = data.rstrip(spaceCharacters) + right = data[len(middle):] + if middle: + yield {"type": "Characters", "data": middle} + if right: + yield {"type": "SpaceCharacters", "data": right} + + def comment(self, data): + """Generates a Comment token + + :arg data: the comment + + :returns: Comment token + + """ + return {"type": "Comment", "data": data} + + def doctype(self, name, publicId=None, systemId=None): + """Generates a Doctype token + + :arg name: + + :arg publicId: + + :arg systemId: + + :returns: the Doctype token + + """ + return {"type": "Doctype", + "name": name, + "publicId": publicId, + "systemId": systemId} + + def entity(self, name): + """Generates an Entity token + + :arg name: the entity name + + :returns: an Entity token + + """ + return {"type": "Entity", "name": name} + + def unknown(self, nodeType): + """Handles unknown node types""" + return self.error("Unknown node type: " + nodeType) + + +class NonRecursiveTreeWalker(TreeWalker): + def getNodeDetails(self, node): + raise NotImplementedError + + def getFirstChild(self, node): + raise NotImplementedError + + def getNextSibling(self, node): + raise NotImplementedError + + def getParentNode(self, node): + raise NotImplementedError + + def __iter__(self): + currentNode = self.tree + while currentNode is not None: + details = self.getNodeDetails(currentNode) + type, details = details[0], details[1:] + hasChildren = False + + if type == DOCTYPE: + yield self.doctype(*details) + + elif type == TEXT: + for token in self.text(*details): + yield token + + elif type == ELEMENT: + namespace, name, attributes, hasChildren = details + if (not namespace or namespace == namespaces["html"]) and name in voidElements: + for token in self.emptyTag(namespace, name, attributes, + hasChildren): + yield token + hasChildren = False + else: + yield self.startTag(namespace, name, attributes) + + elif type == COMMENT: + yield self.comment(details[0]) + + elif type == ENTITY: + yield self.entity(details[0]) + + elif type == DOCUMENT: + hasChildren = True + + else: + yield self.unknown(details[0]) + + if hasChildren: + firstChild = self.getFirstChild(currentNode) + else: + firstChild = None + + if firstChild is not None: + currentNode = firstChild + else: + while currentNode is not None: + details = self.getNodeDetails(currentNode) + type, details = details[0], details[1:] + if type == ELEMENT: + namespace, name, attributes, hasChildren = details + if (namespace and namespace != namespaces["html"]) or name not in voidElements: + yield self.endTag(namespace, name) + if self.tree is currentNode: + currentNode = None + break + nextSibling = self.getNextSibling(currentNode) + if nextSibling is not None: + currentNode = nextSibling + break + else: + currentNode = self.getParentNode(currentNode) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.pyc new file mode 100644 index 0000000..278e2d1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/dom.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/dom.py new file mode 100644 index 0000000..b0c89b0 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/dom.py @@ -0,0 +1,43 @@ +from __future__ import absolute_import, division, unicode_literals + +from xml.dom import Node + +from . import base + + +class TreeWalker(base.NonRecursiveTreeWalker): + def getNodeDetails(self, node): + if node.nodeType == Node.DOCUMENT_TYPE_NODE: + return base.DOCTYPE, node.name, node.publicId, node.systemId + + elif node.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE): + return base.TEXT, node.nodeValue + + elif node.nodeType == Node.ELEMENT_NODE: + attrs = {} + for attr in list(node.attributes.keys()): + attr = node.getAttributeNode(attr) + if attr.namespaceURI: + attrs[(attr.namespaceURI, attr.localName)] = attr.value + else: + attrs[(None, attr.name)] = attr.value + return (base.ELEMENT, node.namespaceURI, node.nodeName, + attrs, node.hasChildNodes()) + + elif node.nodeType == Node.COMMENT_NODE: + return base.COMMENT, node.nodeValue + + elif node.nodeType in (Node.DOCUMENT_NODE, Node.DOCUMENT_FRAGMENT_NODE): + return (base.DOCUMENT,) + + else: + return base.UNKNOWN, node.nodeType + + def getFirstChild(self, node): + return node.firstChild + + def getNextSibling(self, node): + return node.nextSibling + + def getParentNode(self, node): + return node.parentNode diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/dom.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/dom.pyc new file mode 100644 index 0000000..d3a2482 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/dom.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree.py new file mode 100644 index 0000000..95fc0c1 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree.py @@ -0,0 +1,130 @@ +from __future__ import absolute_import, division, unicode_literals + +from collections import OrderedDict +import re + +from pip._vendor.six import string_types + +from . import base +from .._utils import moduleFactoryFactory + +tag_regexp = re.compile("{([^}]*)}(.*)") + + +def getETreeBuilder(ElementTreeImplementation): + ElementTree = ElementTreeImplementation + ElementTreeCommentType = ElementTree.Comment("asd").tag + + class TreeWalker(base.NonRecursiveTreeWalker): # pylint:disable=unused-variable + """Given the particular ElementTree representation, this implementation, + to avoid using recursion, returns "nodes" as tuples with the following + content: + + 1. The current element + + 2. The index of the element relative to its parent + + 3. A stack of ancestor elements + + 4. A flag "text", "tail" or None to indicate if the current node is a + text node; either the text or tail of the current element (1) + """ + def getNodeDetails(self, node): + if isinstance(node, tuple): # It might be the root Element + elt, _, _, flag = node + if flag in ("text", "tail"): + return base.TEXT, getattr(elt, flag) + else: + node = elt + + if not(hasattr(node, "tag")): + node = node.getroot() + + if node.tag in ("DOCUMENT_ROOT", "DOCUMENT_FRAGMENT"): + return (base.DOCUMENT,) + + elif node.tag == "<!DOCTYPE>": + return (base.DOCTYPE, node.text, + node.get("publicId"), node.get("systemId")) + + elif node.tag == ElementTreeCommentType: + return base.COMMENT, node.text + + else: + assert isinstance(node.tag, string_types), type(node.tag) + # This is assumed to be an ordinary element + match = tag_regexp.match(node.tag) + if match: + namespace, tag = match.groups() + else: + namespace = None + tag = node.tag + attrs = OrderedDict() + for name, value in list(node.attrib.items()): + match = tag_regexp.match(name) + if match: + attrs[(match.group(1), match.group(2))] = value + else: + attrs[(None, name)] = value + return (base.ELEMENT, namespace, tag, + attrs, len(node) or node.text) + + def getFirstChild(self, node): + if isinstance(node, tuple): + element, key, parents, flag = node + else: + element, key, parents, flag = node, None, [], None + + if flag in ("text", "tail"): + return None + else: + if element.text: + return element, key, parents, "text" + elif len(element): + parents.append(element) + return element[0], 0, parents, None + else: + return None + + def getNextSibling(self, node): + if isinstance(node, tuple): + element, key, parents, flag = node + else: + return None + + if flag == "text": + if len(element): + parents.append(element) + return element[0], 0, parents, None + else: + return None + else: + if element.tail and flag != "tail": + return element, key, parents, "tail" + elif key < len(parents[-1]) - 1: + return parents[-1][key + 1], key + 1, parents, None + else: + return None + + def getParentNode(self, node): + if isinstance(node, tuple): + element, key, parents, flag = node + else: + return None + + if flag == "text": + if not parents: + return element + else: + return element, key, parents, None + else: + parent = parents.pop() + if not parents: + return parent + else: + assert list(parents[-1]).count(parent) == 1 + return parent, list(parents[-1]).index(parent), parents, None + + return locals() + +getETreeModule = moduleFactoryFactory(getETreeBuilder) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree.pyc new file mode 100644 index 0000000..c49364e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.py new file mode 100644 index 0000000..e81ddf3 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.py @@ -0,0 +1,213 @@ +from __future__ import absolute_import, division, unicode_literals +from pip._vendor.six import text_type + +from lxml import etree +from ..treebuilders.etree import tag_regexp + +from . import base + +from .. import _ihatexml + + +def ensure_str(s): + if s is None: + return None + elif isinstance(s, text_type): + return s + else: + return s.decode("ascii", "strict") + + +class Root(object): + def __init__(self, et): + self.elementtree = et + self.children = [] + + try: + if et.docinfo.internalDTD: + self.children.append(Doctype(self, + ensure_str(et.docinfo.root_name), + ensure_str(et.docinfo.public_id), + ensure_str(et.docinfo.system_url))) + except AttributeError: + pass + + try: + node = et.getroot() + except AttributeError: + node = et + + while node.getprevious() is not None: + node = node.getprevious() + while node is not None: + self.children.append(node) + node = node.getnext() + + self.text = None + self.tail = None + + def __getitem__(self, key): + return self.children[key] + + def getnext(self): + return None + + def __len__(self): + return 1 + + +class Doctype(object): + def __init__(self, root_node, name, public_id, system_id): + self.root_node = root_node + self.name = name + self.public_id = public_id + self.system_id = system_id + + self.text = None + self.tail = None + + def getnext(self): + return self.root_node.children[1] + + +class FragmentRoot(Root): + def __init__(self, children): + self.children = [FragmentWrapper(self, child) for child in children] + self.text = self.tail = None + + def getnext(self): + return None + + +class FragmentWrapper(object): + def __init__(self, fragment_root, obj): + self.root_node = fragment_root + self.obj = obj + if hasattr(self.obj, 'text'): + self.text = ensure_str(self.obj.text) + else: + self.text = None + if hasattr(self.obj, 'tail'): + self.tail = ensure_str(self.obj.tail) + else: + self.tail = None + + def __getattr__(self, name): + return getattr(self.obj, name) + + def getnext(self): + siblings = self.root_node.children + idx = siblings.index(self) + if idx < len(siblings) - 1: + return siblings[idx + 1] + else: + return None + + def __getitem__(self, key): + return self.obj[key] + + def __bool__(self): + return bool(self.obj) + + def getparent(self): + return None + + def __str__(self): + return str(self.obj) + + def __unicode__(self): + return str(self.obj) + + def __len__(self): + return len(self.obj) + + +class TreeWalker(base.NonRecursiveTreeWalker): + def __init__(self, tree): + # pylint:disable=redefined-variable-type + if isinstance(tree, list): + self.fragmentChildren = set(tree) + tree = FragmentRoot(tree) + else: + self.fragmentChildren = set() + tree = Root(tree) + base.NonRecursiveTreeWalker.__init__(self, tree) + self.filter = _ihatexml.InfosetFilter() + + def getNodeDetails(self, node): + if isinstance(node, tuple): # Text node + node, key = node + assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key + return base.TEXT, ensure_str(getattr(node, key)) + + elif isinstance(node, Root): + return (base.DOCUMENT,) + + elif isinstance(node, Doctype): + return base.DOCTYPE, node.name, node.public_id, node.system_id + + elif isinstance(node, FragmentWrapper) and not hasattr(node, "tag"): + return base.TEXT, ensure_str(node.obj) + + elif node.tag == etree.Comment: + return base.COMMENT, ensure_str(node.text) + + elif node.tag == etree.Entity: + return base.ENTITY, ensure_str(node.text)[1:-1] # strip &; + + else: + # This is assumed to be an ordinary element + match = tag_regexp.match(ensure_str(node.tag)) + if match: + namespace, tag = match.groups() + else: + namespace = None + tag = ensure_str(node.tag) + attrs = {} + for name, value in list(node.attrib.items()): + name = ensure_str(name) + value = ensure_str(value) + match = tag_regexp.match(name) + if match: + attrs[(match.group(1), match.group(2))] = value + else: + attrs[(None, name)] = value + return (base.ELEMENT, namespace, self.filter.fromXmlName(tag), + attrs, len(node) > 0 or node.text) + + def getFirstChild(self, node): + assert not isinstance(node, tuple), "Text nodes have no children" + + assert len(node) or node.text, "Node has no children" + if node.text: + return (node, "text") + else: + return node[0] + + def getNextSibling(self, node): + if isinstance(node, tuple): # Text node + node, key = node + assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key + if key == "text": + # XXX: we cannot use a "bool(node) and node[0] or None" construct here + # because node[0] might evaluate to False if it has no child element + if len(node): + return node[0] + else: + return None + else: # tail + return node.getnext() + + return (node, "tail") if node.tail else node.getnext() + + def getParentNode(self, node): + if isinstance(node, tuple): # Text node + node, key = node + assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key + if key == "text": + return node + # else: fallback to "normal" processing + elif node in self.fragmentChildren: + return None + + return node.getparent() diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyc new file mode 100644 index 0000000..2fc4e66 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/genshi.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/genshi.py new file mode 100644 index 0000000..7483be2 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/genshi.py @@ -0,0 +1,69 @@ +from __future__ import absolute_import, division, unicode_literals + +from genshi.core import QName +from genshi.core import START, END, XML_NAMESPACE, DOCTYPE, TEXT +from genshi.core import START_NS, END_NS, START_CDATA, END_CDATA, PI, COMMENT + +from . import base + +from ..constants import voidElements, namespaces + + +class TreeWalker(base.TreeWalker): + def __iter__(self): + # Buffer the events so we can pass in the following one + previous = None + for event in self.tree: + if previous is not None: + for token in self.tokens(previous, event): + yield token + previous = event + + # Don't forget the final event! + if previous is not None: + for token in self.tokens(previous, None): + yield token + + def tokens(self, event, next): + kind, data, _ = event + if kind == START: + tag, attribs = data + name = tag.localname + namespace = tag.namespace + converted_attribs = {} + for k, v in attribs: + if isinstance(k, QName): + converted_attribs[(k.namespace, k.localname)] = v + else: + converted_attribs[(None, k)] = v + + if namespace == namespaces["html"] and name in voidElements: + for token in self.emptyTag(namespace, name, converted_attribs, + not next or next[0] != END or + next[1] != tag): + yield token + else: + yield self.startTag(namespace, name, converted_attribs) + + elif kind == END: + name = data.localname + namespace = data.namespace + if namespace != namespaces["html"] or name not in voidElements: + yield self.endTag(namespace, name) + + elif kind == COMMENT: + yield self.comment(data) + + elif kind == TEXT: + for token in self.text(data): + yield token + + elif kind == DOCTYPE: + yield self.doctype(*data) + + elif kind in (XML_NAMESPACE, DOCTYPE, START_NS, END_NS, + START_CDATA, END_CDATA, PI): + pass + + else: + yield self.unknown(kind) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/genshi.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/genshi.pyc new file mode 100644 index 0000000..a7859a6 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/genshi.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/__init__.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/__init__.py new file mode 100644 index 0000000..847bf93 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/__init__.py @@ -0,0 +1,2 @@ +from .package_data import __version__ +from .core import * diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/__init__.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/__init__.pyc new file mode 100644 index 0000000..9eb1355 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/codec.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/codec.py new file mode 100644 index 0000000..98c65ea --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/codec.py @@ -0,0 +1,118 @@ +from .core import encode, decode, alabel, ulabel, IDNAError +import codecs +import re + +_unicode_dots_re = re.compile(u'[\u002e\u3002\uff0e\uff61]') + +class Codec(codecs.Codec): + + def encode(self, data, errors='strict'): + + if errors != 'strict': + raise IDNAError("Unsupported error handling \"{0}\"".format(errors)) + + if not data: + return "", 0 + + return encode(data), len(data) + + def decode(self, data, errors='strict'): + + if errors != 'strict': + raise IDNAError("Unsupported error handling \"{0}\"".format(errors)) + + if not data: + return u"", 0 + + return decode(data), len(data) + +class IncrementalEncoder(codecs.BufferedIncrementalEncoder): + def _buffer_encode(self, data, errors, final): + if errors != 'strict': + raise IDNAError("Unsupported error handling \"{0}\"".format(errors)) + + if not data: + return ("", 0) + + labels = _unicode_dots_re.split(data) + trailing_dot = u'' + if labels: + if not labels[-1]: + trailing_dot = '.' + del labels[-1] + elif not final: + # Keep potentially unfinished label until the next call + del labels[-1] + if labels: + trailing_dot = '.' + + result = [] + size = 0 + for label in labels: + result.append(alabel(label)) + if size: + size += 1 + size += len(label) + + # Join with U+002E + result = ".".join(result) + trailing_dot + size += len(trailing_dot) + return (result, size) + +class IncrementalDecoder(codecs.BufferedIncrementalDecoder): + def _buffer_decode(self, data, errors, final): + if errors != 'strict': + raise IDNAError("Unsupported error handling \"{0}\"".format(errors)) + + if not data: + return (u"", 0) + + # IDNA allows decoding to operate on Unicode strings, too. + if isinstance(data, unicode): + labels = _unicode_dots_re.split(data) + else: + # Must be ASCII string + data = str(data) + unicode(data, "ascii") + labels = data.split(".") + + trailing_dot = u'' + if labels: + if not labels[-1]: + trailing_dot = u'.' + del labels[-1] + elif not final: + # Keep potentially unfinished label until the next call + del labels[-1] + if labels: + trailing_dot = u'.' + + result = [] + size = 0 + for label in labels: + result.append(ulabel(label)) + if size: + size += 1 + size += len(label) + + result = u".".join(result) + trailing_dot + size += len(trailing_dot) + return (result, size) + + +class StreamWriter(Codec, codecs.StreamWriter): + pass + +class StreamReader(Codec, codecs.StreamReader): + pass + +def getregentry(): + return codecs.CodecInfo( + name='idna', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamwriter=StreamWriter, + streamreader=StreamReader, + ) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/codec.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/codec.pyc new file mode 100644 index 0000000..60a7d38 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/codec.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/compat.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/compat.py new file mode 100644 index 0000000..4d47f33 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/compat.py @@ -0,0 +1,12 @@ +from .core import * +from .codec import * + +def ToASCII(label): + return encode(label) + +def ToUnicode(label): + return decode(label) + +def nameprep(s): + raise NotImplementedError("IDNA 2008 does not utilise nameprep protocol") + diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/compat.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/compat.pyc new file mode 100644 index 0000000..4038aee Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/compat.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/core.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/core.py new file mode 100644 index 0000000..104624a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/core.py @@ -0,0 +1,396 @@ +from . import idnadata +import bisect +import unicodedata +import re +import sys +from .intranges import intranges_contain + +_virama_combining_class = 9 +_alabel_prefix = b'xn--' +_unicode_dots_re = re.compile(u'[\u002e\u3002\uff0e\uff61]') + +if sys.version_info[0] == 3: + unicode = str + unichr = chr + +class IDNAError(UnicodeError): + """ Base exception for all IDNA-encoding related problems """ + pass + + +class IDNABidiError(IDNAError): + """ Exception when bidirectional requirements are not satisfied """ + pass + + +class InvalidCodepoint(IDNAError): + """ Exception when a disallowed or unallocated codepoint is used """ + pass + + +class InvalidCodepointContext(IDNAError): + """ Exception when the codepoint is not valid in the context it is used """ + pass + + +def _combining_class(cp): + v = unicodedata.combining(unichr(cp)) + if v == 0: + if not unicodedata.name(unichr(cp)): + raise ValueError("Unknown character in unicodedata") + return v + +def _is_script(cp, script): + return intranges_contain(ord(cp), idnadata.scripts[script]) + +def _punycode(s): + return s.encode('punycode') + +def _unot(s): + return 'U+{0:04X}'.format(s) + + +def valid_label_length(label): + + if len(label) > 63: + return False + return True + + +def valid_string_length(label, trailing_dot): + + if len(label) > (254 if trailing_dot else 253): + return False + return True + + +def check_bidi(label, check_ltr=False): + + # Bidi rules should only be applied if string contains RTL characters + bidi_label = False + for (idx, cp) in enumerate(label, 1): + direction = unicodedata.bidirectional(cp) + if direction == '': + # String likely comes from a newer version of Unicode + raise IDNABidiError('Unknown directionality in label {0} at position {1}'.format(repr(label), idx)) + if direction in ['R', 'AL', 'AN']: + bidi_label = True + if not bidi_label and not check_ltr: + return True + + # Bidi rule 1 + direction = unicodedata.bidirectional(label[0]) + if direction in ['R', 'AL']: + rtl = True + elif direction == 'L': + rtl = False + else: + raise IDNABidiError('First codepoint in label {0} must be directionality L, R or AL'.format(repr(label))) + + valid_ending = False + number_type = False + for (idx, cp) in enumerate(label, 1): + direction = unicodedata.bidirectional(cp) + + if rtl: + # Bidi rule 2 + if not direction in ['R', 'AL', 'AN', 'EN', 'ES', 'CS', 'ET', 'ON', 'BN', 'NSM']: + raise IDNABidiError('Invalid direction for codepoint at position {0} in a right-to-left label'.format(idx)) + # Bidi rule 3 + if direction in ['R', 'AL', 'EN', 'AN']: + valid_ending = True + elif direction != 'NSM': + valid_ending = False + # Bidi rule 4 + if direction in ['AN', 'EN']: + if not number_type: + number_type = direction + else: + if number_type != direction: + raise IDNABidiError('Can not mix numeral types in a right-to-left label') + else: + # Bidi rule 5 + if not direction in ['L', 'EN', 'ES', 'CS', 'ET', 'ON', 'BN', 'NSM']: + raise IDNABidiError('Invalid direction for codepoint at position {0} in a left-to-right label'.format(idx)) + # Bidi rule 6 + if direction in ['L', 'EN']: + valid_ending = True + elif direction != 'NSM': + valid_ending = False + + if not valid_ending: + raise IDNABidiError('Label ends with illegal codepoint directionality') + + return True + + +def check_initial_combiner(label): + + if unicodedata.category(label[0])[0] == 'M': + raise IDNAError('Label begins with an illegal combining character') + return True + + +def check_hyphen_ok(label): + + if label[2:4] == '--': + raise IDNAError('Label has disallowed hyphens in 3rd and 4th position') + if label[0] == '-' or label[-1] == '-': + raise IDNAError('Label must not start or end with a hyphen') + return True + + +def check_nfc(label): + + if unicodedata.normalize('NFC', label) != label: + raise IDNAError('Label must be in Normalization Form C') + + +def valid_contextj(label, pos): + + cp_value = ord(label[pos]) + + if cp_value == 0x200c: + + if pos > 0: + if _combining_class(ord(label[pos - 1])) == _virama_combining_class: + return True + + ok = False + for i in range(pos-1, -1, -1): + joining_type = idnadata.joining_types.get(ord(label[i])) + if joining_type == ord('T'): + continue + if joining_type in [ord('L'), ord('D')]: + ok = True + break + + if not ok: + return False + + ok = False + for i in range(pos+1, len(label)): + joining_type = idnadata.joining_types.get(ord(label[i])) + if joining_type == ord('T'): + continue + if joining_type in [ord('R'), ord('D')]: + ok = True + break + return ok + + if cp_value == 0x200d: + + if pos > 0: + if _combining_class(ord(label[pos - 1])) == _virama_combining_class: + return True + return False + + else: + + return False + + +def valid_contexto(label, pos, exception=False): + + cp_value = ord(label[pos]) + + if cp_value == 0x00b7: + if 0 < pos < len(label)-1: + if ord(label[pos - 1]) == 0x006c and ord(label[pos + 1]) == 0x006c: + return True + return False + + elif cp_value == 0x0375: + if pos < len(label)-1 and len(label) > 1: + return _is_script(label[pos + 1], 'Greek') + return False + + elif cp_value == 0x05f3 or cp_value == 0x05f4: + if pos > 0: + return _is_script(label[pos - 1], 'Hebrew') + return False + + elif cp_value == 0x30fb: + for cp in label: + if cp == u'\u30fb': + continue + if _is_script(cp, 'Hiragana') or _is_script(cp, 'Katakana') or _is_script(cp, 'Han'): + return True + return False + + elif 0x660 <= cp_value <= 0x669: + for cp in label: + if 0x6f0 <= ord(cp) <= 0x06f9: + return False + return True + + elif 0x6f0 <= cp_value <= 0x6f9: + for cp in label: + if 0x660 <= ord(cp) <= 0x0669: + return False + return True + + +def check_label(label): + + if isinstance(label, (bytes, bytearray)): + label = label.decode('utf-8') + if len(label) == 0: + raise IDNAError('Empty Label') + + check_nfc(label) + check_hyphen_ok(label) + check_initial_combiner(label) + + for (pos, cp) in enumerate(label): + cp_value = ord(cp) + if intranges_contain(cp_value, idnadata.codepoint_classes['PVALID']): + continue + elif intranges_contain(cp_value, idnadata.codepoint_classes['CONTEXTJ']): + try: + if not valid_contextj(label, pos): + raise InvalidCodepointContext('Joiner {0} not allowed at position {1} in {2}'.format( + _unot(cp_value), pos+1, repr(label))) + except ValueError: + raise IDNAError('Unknown codepoint adjacent to joiner {0} at position {1} in {2}'.format( + _unot(cp_value), pos+1, repr(label))) + elif intranges_contain(cp_value, idnadata.codepoint_classes['CONTEXTO']): + if not valid_contexto(label, pos): + raise InvalidCodepointContext('Codepoint {0} not allowed at position {1} in {2}'.format(_unot(cp_value), pos+1, repr(label))) + else: + raise InvalidCodepoint('Codepoint {0} at position {1} of {2} not allowed'.format(_unot(cp_value), pos+1, repr(label))) + + check_bidi(label) + + +def alabel(label): + + try: + label = label.encode('ascii') + ulabel(label) + if not valid_label_length(label): + raise IDNAError('Label too long') + return label + except UnicodeEncodeError: + pass + + if not label: + raise IDNAError('No Input') + + label = unicode(label) + check_label(label) + label = _punycode(label) + label = _alabel_prefix + label + + if not valid_label_length(label): + raise IDNAError('Label too long') + + return label + + +def ulabel(label): + + if not isinstance(label, (bytes, bytearray)): + try: + label = label.encode('ascii') + except UnicodeEncodeError: + check_label(label) + return label + + label = label.lower() + if label.startswith(_alabel_prefix): + label = label[len(_alabel_prefix):] + else: + check_label(label) + return label.decode('ascii') + + label = label.decode('punycode') + check_label(label) + return label + + +def uts46_remap(domain, std3_rules=True, transitional=False): + """Re-map the characters in the string according to UTS46 processing.""" + from .uts46data import uts46data + output = u"" + try: + for pos, char in enumerate(domain): + code_point = ord(char) + uts46row = uts46data[code_point if code_point < 256 else + bisect.bisect_left(uts46data, (code_point, "Z")) - 1] + status = uts46row[1] + replacement = uts46row[2] if len(uts46row) == 3 else None + if (status == "V" or + (status == "D" and not transitional) or + (status == "3" and not std3_rules and replacement is None)): + output += char + elif replacement is not None and (status == "M" or + (status == "3" and not std3_rules) or + (status == "D" and transitional)): + output += replacement + elif status != "I": + raise IndexError() + return unicodedata.normalize("NFC", output) + except IndexError: + raise InvalidCodepoint( + "Codepoint {0} not allowed at position {1} in {2}".format( + _unot(code_point), pos + 1, repr(domain))) + + +def encode(s, strict=False, uts46=False, std3_rules=False, transitional=False): + + if isinstance(s, (bytes, bytearray)): + s = s.decode("ascii") + if uts46: + s = uts46_remap(s, std3_rules, transitional) + trailing_dot = False + result = [] + if strict: + labels = s.split('.') + else: + labels = _unicode_dots_re.split(s) + if not labels or labels == ['']: + raise IDNAError('Empty domain') + if labels[-1] == '': + del labels[-1] + trailing_dot = True + for label in labels: + s = alabel(label) + if s: + result.append(s) + else: + raise IDNAError('Empty label') + if trailing_dot: + result.append(b'') + s = b'.'.join(result) + if not valid_string_length(s, trailing_dot): + raise IDNAError('Domain too long') + return s + + +def decode(s, strict=False, uts46=False, std3_rules=False): + + if isinstance(s, (bytes, bytearray)): + s = s.decode("ascii") + if uts46: + s = uts46_remap(s, std3_rules, False) + trailing_dot = False + result = [] + if not strict: + labels = _unicode_dots_re.split(s) + else: + labels = s.split(u'.') + if not labels or labels == ['']: + raise IDNAError('Empty domain') + if not labels[-1]: + del labels[-1] + trailing_dot = True + for label in labels: + s = ulabel(label) + if s: + result.append(s) + else: + raise IDNAError('Empty label') + if trailing_dot: + result.append(u'') + return u'.'.join(result) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/core.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/core.pyc new file mode 100644 index 0000000..ecc8af8 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/core.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/idnadata.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/idnadata.py new file mode 100644 index 0000000..a80c959 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/idnadata.py @@ -0,0 +1,1979 @@ +# This file is automatically generated by tools/idna-data + +__version__ = "11.0.0" +scripts = { + 'Greek': ( + 0x37000000374, + 0x37500000378, + 0x37a0000037e, + 0x37f00000380, + 0x38400000385, + 0x38600000387, + 0x3880000038b, + 0x38c0000038d, + 0x38e000003a2, + 0x3a3000003e2, + 0x3f000000400, + 0x1d2600001d2b, + 0x1d5d00001d62, + 0x1d6600001d6b, + 0x1dbf00001dc0, + 0x1f0000001f16, + 0x1f1800001f1e, + 0x1f2000001f46, + 0x1f4800001f4e, + 0x1f5000001f58, + 0x1f5900001f5a, + 0x1f5b00001f5c, + 0x1f5d00001f5e, + 0x1f5f00001f7e, + 0x1f8000001fb5, + 0x1fb600001fc5, + 0x1fc600001fd4, + 0x1fd600001fdc, + 0x1fdd00001ff0, + 0x1ff200001ff5, + 0x1ff600001fff, + 0x212600002127, + 0xab650000ab66, + 0x101400001018f, + 0x101a0000101a1, + 0x1d2000001d246, + ), + 'Han': ( + 0x2e8000002e9a, + 0x2e9b00002ef4, + 0x2f0000002fd6, + 0x300500003006, + 0x300700003008, + 0x30210000302a, + 0x30380000303c, + 0x340000004db6, + 0x4e0000009ff0, + 0xf9000000fa6e, + 0xfa700000fada, + 0x200000002a6d7, + 0x2a7000002b735, + 0x2b7400002b81e, + 0x2b8200002cea2, + 0x2ceb00002ebe1, + 0x2f8000002fa1e, + ), + 'Hebrew': ( + 0x591000005c8, + 0x5d0000005eb, + 0x5ef000005f5, + 0xfb1d0000fb37, + 0xfb380000fb3d, + 0xfb3e0000fb3f, + 0xfb400000fb42, + 0xfb430000fb45, + 0xfb460000fb50, + ), + 'Hiragana': ( + 0x304100003097, + 0x309d000030a0, + 0x1b0010001b11f, + 0x1f2000001f201, + ), + 'Katakana': ( + 0x30a1000030fb, + 0x30fd00003100, + 0x31f000003200, + 0x32d0000032ff, + 0x330000003358, + 0xff660000ff70, + 0xff710000ff9e, + 0x1b0000001b001, + ), +} +joining_types = { + 0x600: 85, + 0x601: 85, + 0x602: 85, + 0x603: 85, + 0x604: 85, + 0x605: 85, + 0x608: 85, + 0x60b: 85, + 0x620: 68, + 0x621: 85, + 0x622: 82, + 0x623: 82, + 0x624: 82, + 0x625: 82, + 0x626: 68, + 0x627: 82, + 0x628: 68, + 0x629: 82, + 0x62a: 68, + 0x62b: 68, + 0x62c: 68, + 0x62d: 68, + 0x62e: 68, + 0x62f: 82, + 0x630: 82, + 0x631: 82, + 0x632: 82, + 0x633: 68, + 0x634: 68, + 0x635: 68, + 0x636: 68, + 0x637: 68, + 0x638: 68, + 0x639: 68, + 0x63a: 68, + 0x63b: 68, + 0x63c: 68, + 0x63d: 68, + 0x63e: 68, + 0x63f: 68, + 0x640: 67, + 0x641: 68, + 0x642: 68, + 0x643: 68, + 0x644: 68, + 0x645: 68, + 0x646: 68, + 0x647: 68, + 0x648: 82, + 0x649: 68, + 0x64a: 68, + 0x66e: 68, + 0x66f: 68, + 0x671: 82, + 0x672: 82, + 0x673: 82, + 0x674: 85, + 0x675: 82, + 0x676: 82, + 0x677: 82, + 0x678: 68, + 0x679: 68, + 0x67a: 68, + 0x67b: 68, + 0x67c: 68, + 0x67d: 68, + 0x67e: 68, + 0x67f: 68, + 0x680: 68, + 0x681: 68, + 0x682: 68, + 0x683: 68, + 0x684: 68, + 0x685: 68, + 0x686: 68, + 0x687: 68, + 0x688: 82, + 0x689: 82, + 0x68a: 82, + 0x68b: 82, + 0x68c: 82, + 0x68d: 82, + 0x68e: 82, + 0x68f: 82, + 0x690: 82, + 0x691: 82, + 0x692: 82, + 0x693: 82, + 0x694: 82, + 0x695: 82, + 0x696: 82, + 0x697: 82, + 0x698: 82, + 0x699: 82, + 0x69a: 68, + 0x69b: 68, + 0x69c: 68, + 0x69d: 68, + 0x69e: 68, + 0x69f: 68, + 0x6a0: 68, + 0x6a1: 68, + 0x6a2: 68, + 0x6a3: 68, + 0x6a4: 68, + 0x6a5: 68, + 0x6a6: 68, + 0x6a7: 68, + 0x6a8: 68, + 0x6a9: 68, + 0x6aa: 68, + 0x6ab: 68, + 0x6ac: 68, + 0x6ad: 68, + 0x6ae: 68, + 0x6af: 68, + 0x6b0: 68, + 0x6b1: 68, + 0x6b2: 68, + 0x6b3: 68, + 0x6b4: 68, + 0x6b5: 68, + 0x6b6: 68, + 0x6b7: 68, + 0x6b8: 68, + 0x6b9: 68, + 0x6ba: 68, + 0x6bb: 68, + 0x6bc: 68, + 0x6bd: 68, + 0x6be: 68, + 0x6bf: 68, + 0x6c0: 82, + 0x6c1: 68, + 0x6c2: 68, + 0x6c3: 82, + 0x6c4: 82, + 0x6c5: 82, + 0x6c6: 82, + 0x6c7: 82, + 0x6c8: 82, + 0x6c9: 82, + 0x6ca: 82, + 0x6cb: 82, + 0x6cc: 68, + 0x6cd: 82, + 0x6ce: 68, + 0x6cf: 82, + 0x6d0: 68, + 0x6d1: 68, + 0x6d2: 82, + 0x6d3: 82, + 0x6d5: 82, + 0x6dd: 85, + 0x6ee: 82, + 0x6ef: 82, + 0x6fa: 68, + 0x6fb: 68, + 0x6fc: 68, + 0x6ff: 68, + 0x70f: 84, + 0x710: 82, + 0x712: 68, + 0x713: 68, + 0x714: 68, + 0x715: 82, + 0x716: 82, + 0x717: 82, + 0x718: 82, + 0x719: 82, + 0x71a: 68, + 0x71b: 68, + 0x71c: 68, + 0x71d: 68, + 0x71e: 82, + 0x71f: 68, + 0x720: 68, + 0x721: 68, + 0x722: 68, + 0x723: 68, + 0x724: 68, + 0x725: 68, + 0x726: 68, + 0x727: 68, + 0x728: 82, + 0x729: 68, + 0x72a: 82, + 0x72b: 68, + 0x72c: 82, + 0x72d: 68, + 0x72e: 68, + 0x72f: 82, + 0x74d: 82, + 0x74e: 68, + 0x74f: 68, + 0x750: 68, + 0x751: 68, + 0x752: 68, + 0x753: 68, + 0x754: 68, + 0x755: 68, + 0x756: 68, + 0x757: 68, + 0x758: 68, + 0x759: 82, + 0x75a: 82, + 0x75b: 82, + 0x75c: 68, + 0x75d: 68, + 0x75e: 68, + 0x75f: 68, + 0x760: 68, + 0x761: 68, + 0x762: 68, + 0x763: 68, + 0x764: 68, + 0x765: 68, + 0x766: 68, + 0x767: 68, + 0x768: 68, + 0x769: 68, + 0x76a: 68, + 0x76b: 82, + 0x76c: 82, + 0x76d: 68, + 0x76e: 68, + 0x76f: 68, + 0x770: 68, + 0x771: 82, + 0x772: 68, + 0x773: 82, + 0x774: 82, + 0x775: 68, + 0x776: 68, + 0x777: 68, + 0x778: 82, + 0x779: 82, + 0x77a: 68, + 0x77b: 68, + 0x77c: 68, + 0x77d: 68, + 0x77e: 68, + 0x77f: 68, + 0x7ca: 68, + 0x7cb: 68, + 0x7cc: 68, + 0x7cd: 68, + 0x7ce: 68, + 0x7cf: 68, + 0x7d0: 68, + 0x7d1: 68, + 0x7d2: 68, + 0x7d3: 68, + 0x7d4: 68, + 0x7d5: 68, + 0x7d6: 68, + 0x7d7: 68, + 0x7d8: 68, + 0x7d9: 68, + 0x7da: 68, + 0x7db: 68, + 0x7dc: 68, + 0x7dd: 68, + 0x7de: 68, + 0x7df: 68, + 0x7e0: 68, + 0x7e1: 68, + 0x7e2: 68, + 0x7e3: 68, + 0x7e4: 68, + 0x7e5: 68, + 0x7e6: 68, + 0x7e7: 68, + 0x7e8: 68, + 0x7e9: 68, + 0x7ea: 68, + 0x7fa: 67, + 0x840: 82, + 0x841: 68, + 0x842: 68, + 0x843: 68, + 0x844: 68, + 0x845: 68, + 0x846: 82, + 0x847: 82, + 0x848: 68, + 0x849: 82, + 0x84a: 68, + 0x84b: 68, + 0x84c: 68, + 0x84d: 68, + 0x84e: 68, + 0x84f: 68, + 0x850: 68, + 0x851: 68, + 0x852: 68, + 0x853: 68, + 0x854: 82, + 0x855: 68, + 0x856: 85, + 0x857: 85, + 0x858: 85, + 0x860: 68, + 0x861: 85, + 0x862: 68, + 0x863: 68, + 0x864: 68, + 0x865: 68, + 0x866: 85, + 0x867: 82, + 0x868: 68, + 0x869: 82, + 0x86a: 82, + 0x8a0: 68, + 0x8a1: 68, + 0x8a2: 68, + 0x8a3: 68, + 0x8a4: 68, + 0x8a5: 68, + 0x8a6: 68, + 0x8a7: 68, + 0x8a8: 68, + 0x8a9: 68, + 0x8aa: 82, + 0x8ab: 82, + 0x8ac: 82, + 0x8ad: 85, + 0x8ae: 82, + 0x8af: 68, + 0x8b0: 68, + 0x8b1: 82, + 0x8b2: 82, + 0x8b3: 68, + 0x8b4: 68, + 0x8b6: 68, + 0x8b7: 68, + 0x8b8: 68, + 0x8b9: 82, + 0x8ba: 68, + 0x8bb: 68, + 0x8bc: 68, + 0x8bd: 68, + 0x8e2: 85, + 0x1806: 85, + 0x1807: 68, + 0x180a: 67, + 0x180e: 85, + 0x1820: 68, + 0x1821: 68, + 0x1822: 68, + 0x1823: 68, + 0x1824: 68, + 0x1825: 68, + 0x1826: 68, + 0x1827: 68, + 0x1828: 68, + 0x1829: 68, + 0x182a: 68, + 0x182b: 68, + 0x182c: 68, + 0x182d: 68, + 0x182e: 68, + 0x182f: 68, + 0x1830: 68, + 0x1831: 68, + 0x1832: 68, + 0x1833: 68, + 0x1834: 68, + 0x1835: 68, + 0x1836: 68, + 0x1837: 68, + 0x1838: 68, + 0x1839: 68, + 0x183a: 68, + 0x183b: 68, + 0x183c: 68, + 0x183d: 68, + 0x183e: 68, + 0x183f: 68, + 0x1840: 68, + 0x1841: 68, + 0x1842: 68, + 0x1843: 68, + 0x1844: 68, + 0x1845: 68, + 0x1846: 68, + 0x1847: 68, + 0x1848: 68, + 0x1849: 68, + 0x184a: 68, + 0x184b: 68, + 0x184c: 68, + 0x184d: 68, + 0x184e: 68, + 0x184f: 68, + 0x1850: 68, + 0x1851: 68, + 0x1852: 68, + 0x1853: 68, + 0x1854: 68, + 0x1855: 68, + 0x1856: 68, + 0x1857: 68, + 0x1858: 68, + 0x1859: 68, + 0x185a: 68, + 0x185b: 68, + 0x185c: 68, + 0x185d: 68, + 0x185e: 68, + 0x185f: 68, + 0x1860: 68, + 0x1861: 68, + 0x1862: 68, + 0x1863: 68, + 0x1864: 68, + 0x1865: 68, + 0x1866: 68, + 0x1867: 68, + 0x1868: 68, + 0x1869: 68, + 0x186a: 68, + 0x186b: 68, + 0x186c: 68, + 0x186d: 68, + 0x186e: 68, + 0x186f: 68, + 0x1870: 68, + 0x1871: 68, + 0x1872: 68, + 0x1873: 68, + 0x1874: 68, + 0x1875: 68, + 0x1876: 68, + 0x1877: 68, + 0x1878: 68, + 0x1880: 85, + 0x1881: 85, + 0x1882: 85, + 0x1883: 85, + 0x1884: 85, + 0x1885: 84, + 0x1886: 84, + 0x1887: 68, + 0x1888: 68, + 0x1889: 68, + 0x188a: 68, + 0x188b: 68, + 0x188c: 68, + 0x188d: 68, + 0x188e: 68, + 0x188f: 68, + 0x1890: 68, + 0x1891: 68, + 0x1892: 68, + 0x1893: 68, + 0x1894: 68, + 0x1895: 68, + 0x1896: 68, + 0x1897: 68, + 0x1898: 68, + 0x1899: 68, + 0x189a: 68, + 0x189b: 68, + 0x189c: 68, + 0x189d: 68, + 0x189e: 68, + 0x189f: 68, + 0x18a0: 68, + 0x18a1: 68, + 0x18a2: 68, + 0x18a3: 68, + 0x18a4: 68, + 0x18a5: 68, + 0x18a6: 68, + 0x18a7: 68, + 0x18a8: 68, + 0x18aa: 68, + 0x200c: 85, + 0x200d: 67, + 0x202f: 85, + 0x2066: 85, + 0x2067: 85, + 0x2068: 85, + 0x2069: 85, + 0xa840: 68, + 0xa841: 68, + 0xa842: 68, + 0xa843: 68, + 0xa844: 68, + 0xa845: 68, + 0xa846: 68, + 0xa847: 68, + 0xa848: 68, + 0xa849: 68, + 0xa84a: 68, + 0xa84b: 68, + 0xa84c: 68, + 0xa84d: 68, + 0xa84e: 68, + 0xa84f: 68, + 0xa850: 68, + 0xa851: 68, + 0xa852: 68, + 0xa853: 68, + 0xa854: 68, + 0xa855: 68, + 0xa856: 68, + 0xa857: 68, + 0xa858: 68, + 0xa859: 68, + 0xa85a: 68, + 0xa85b: 68, + 0xa85c: 68, + 0xa85d: 68, + 0xa85e: 68, + 0xa85f: 68, + 0xa860: 68, + 0xa861: 68, + 0xa862: 68, + 0xa863: 68, + 0xa864: 68, + 0xa865: 68, + 0xa866: 68, + 0xa867: 68, + 0xa868: 68, + 0xa869: 68, + 0xa86a: 68, + 0xa86b: 68, + 0xa86c: 68, + 0xa86d: 68, + 0xa86e: 68, + 0xa86f: 68, + 0xa870: 68, + 0xa871: 68, + 0xa872: 76, + 0xa873: 85, + 0x10ac0: 68, + 0x10ac1: 68, + 0x10ac2: 68, + 0x10ac3: 68, + 0x10ac4: 68, + 0x10ac5: 82, + 0x10ac6: 85, + 0x10ac7: 82, + 0x10ac8: 85, + 0x10ac9: 82, + 0x10aca: 82, + 0x10acb: 85, + 0x10acc: 85, + 0x10acd: 76, + 0x10ace: 82, + 0x10acf: 82, + 0x10ad0: 82, + 0x10ad1: 82, + 0x10ad2: 82, + 0x10ad3: 68, + 0x10ad4: 68, + 0x10ad5: 68, + 0x10ad6: 68, + 0x10ad7: 76, + 0x10ad8: 68, + 0x10ad9: 68, + 0x10ada: 68, + 0x10adb: 68, + 0x10adc: 68, + 0x10add: 82, + 0x10ade: 68, + 0x10adf: 68, + 0x10ae0: 68, + 0x10ae1: 82, + 0x10ae2: 85, + 0x10ae3: 85, + 0x10ae4: 82, + 0x10aeb: 68, + 0x10aec: 68, + 0x10aed: 68, + 0x10aee: 68, + 0x10aef: 82, + 0x10b80: 68, + 0x10b81: 82, + 0x10b82: 68, + 0x10b83: 82, + 0x10b84: 82, + 0x10b85: 82, + 0x10b86: 68, + 0x10b87: 68, + 0x10b88: 68, + 0x10b89: 82, + 0x10b8a: 68, + 0x10b8b: 68, + 0x10b8c: 82, + 0x10b8d: 68, + 0x10b8e: 82, + 0x10b8f: 82, + 0x10b90: 68, + 0x10b91: 82, + 0x10ba9: 82, + 0x10baa: 82, + 0x10bab: 82, + 0x10bac: 82, + 0x10bad: 68, + 0x10bae: 68, + 0x10baf: 85, + 0x10d00: 76, + 0x10d01: 68, + 0x10d02: 68, + 0x10d03: 68, + 0x10d04: 68, + 0x10d05: 68, + 0x10d06: 68, + 0x10d07: 68, + 0x10d08: 68, + 0x10d09: 68, + 0x10d0a: 68, + 0x10d0b: 68, + 0x10d0c: 68, + 0x10d0d: 68, + 0x10d0e: 68, + 0x10d0f: 68, + 0x10d10: 68, + 0x10d11: 68, + 0x10d12: 68, + 0x10d13: 68, + 0x10d14: 68, + 0x10d15: 68, + 0x10d16: 68, + 0x10d17: 68, + 0x10d18: 68, + 0x10d19: 68, + 0x10d1a: 68, + 0x10d1b: 68, + 0x10d1c: 68, + 0x10d1d: 68, + 0x10d1e: 68, + 0x10d1f: 68, + 0x10d20: 68, + 0x10d21: 68, + 0x10d22: 82, + 0x10d23: 68, + 0x10f30: 68, + 0x10f31: 68, + 0x10f32: 68, + 0x10f33: 82, + 0x10f34: 68, + 0x10f35: 68, + 0x10f36: 68, + 0x10f37: 68, + 0x10f38: 68, + 0x10f39: 68, + 0x10f3a: 68, + 0x10f3b: 68, + 0x10f3c: 68, + 0x10f3d: 68, + 0x10f3e: 68, + 0x10f3f: 68, + 0x10f40: 68, + 0x10f41: 68, + 0x10f42: 68, + 0x10f43: 68, + 0x10f44: 68, + 0x10f45: 85, + 0x10f51: 68, + 0x10f52: 68, + 0x10f53: 68, + 0x10f54: 82, + 0x110bd: 85, + 0x110cd: 85, + 0x1e900: 68, + 0x1e901: 68, + 0x1e902: 68, + 0x1e903: 68, + 0x1e904: 68, + 0x1e905: 68, + 0x1e906: 68, + 0x1e907: 68, + 0x1e908: 68, + 0x1e909: 68, + 0x1e90a: 68, + 0x1e90b: 68, + 0x1e90c: 68, + 0x1e90d: 68, + 0x1e90e: 68, + 0x1e90f: 68, + 0x1e910: 68, + 0x1e911: 68, + 0x1e912: 68, + 0x1e913: 68, + 0x1e914: 68, + 0x1e915: 68, + 0x1e916: 68, + 0x1e917: 68, + 0x1e918: 68, + 0x1e919: 68, + 0x1e91a: 68, + 0x1e91b: 68, + 0x1e91c: 68, + 0x1e91d: 68, + 0x1e91e: 68, + 0x1e91f: 68, + 0x1e920: 68, + 0x1e921: 68, + 0x1e922: 68, + 0x1e923: 68, + 0x1e924: 68, + 0x1e925: 68, + 0x1e926: 68, + 0x1e927: 68, + 0x1e928: 68, + 0x1e929: 68, + 0x1e92a: 68, + 0x1e92b: 68, + 0x1e92c: 68, + 0x1e92d: 68, + 0x1e92e: 68, + 0x1e92f: 68, + 0x1e930: 68, + 0x1e931: 68, + 0x1e932: 68, + 0x1e933: 68, + 0x1e934: 68, + 0x1e935: 68, + 0x1e936: 68, + 0x1e937: 68, + 0x1e938: 68, + 0x1e939: 68, + 0x1e93a: 68, + 0x1e93b: 68, + 0x1e93c: 68, + 0x1e93d: 68, + 0x1e93e: 68, + 0x1e93f: 68, + 0x1e940: 68, + 0x1e941: 68, + 0x1e942: 68, + 0x1e943: 68, +} +codepoint_classes = { + 'PVALID': ( + 0x2d0000002e, + 0x300000003a, + 0x610000007b, + 0xdf000000f7, + 0xf800000100, + 0x10100000102, + 0x10300000104, + 0x10500000106, + 0x10700000108, + 0x1090000010a, + 0x10b0000010c, + 0x10d0000010e, + 0x10f00000110, + 0x11100000112, + 0x11300000114, + 0x11500000116, + 0x11700000118, + 0x1190000011a, + 0x11b0000011c, + 0x11d0000011e, + 0x11f00000120, + 0x12100000122, + 0x12300000124, + 0x12500000126, + 0x12700000128, + 0x1290000012a, + 0x12b0000012c, + 0x12d0000012e, + 0x12f00000130, + 0x13100000132, + 0x13500000136, + 0x13700000139, + 0x13a0000013b, + 0x13c0000013d, + 0x13e0000013f, + 0x14200000143, + 0x14400000145, + 0x14600000147, + 0x14800000149, + 0x14b0000014c, + 0x14d0000014e, + 0x14f00000150, + 0x15100000152, + 0x15300000154, + 0x15500000156, + 0x15700000158, + 0x1590000015a, + 0x15b0000015c, + 0x15d0000015e, + 0x15f00000160, + 0x16100000162, + 0x16300000164, + 0x16500000166, + 0x16700000168, + 0x1690000016a, + 0x16b0000016c, + 0x16d0000016e, + 0x16f00000170, + 0x17100000172, + 0x17300000174, + 0x17500000176, + 0x17700000178, + 0x17a0000017b, + 0x17c0000017d, + 0x17e0000017f, + 0x18000000181, + 0x18300000184, + 0x18500000186, + 0x18800000189, + 0x18c0000018e, + 0x19200000193, + 0x19500000196, + 0x1990000019c, + 0x19e0000019f, + 0x1a1000001a2, + 0x1a3000001a4, + 0x1a5000001a6, + 0x1a8000001a9, + 0x1aa000001ac, + 0x1ad000001ae, + 0x1b0000001b1, + 0x1b4000001b5, + 0x1b6000001b7, + 0x1b9000001bc, + 0x1bd000001c4, + 0x1ce000001cf, + 0x1d0000001d1, + 0x1d2000001d3, + 0x1d4000001d5, + 0x1d6000001d7, + 0x1d8000001d9, + 0x1da000001db, + 0x1dc000001de, + 0x1df000001e0, + 0x1e1000001e2, + 0x1e3000001e4, + 0x1e5000001e6, + 0x1e7000001e8, + 0x1e9000001ea, + 0x1eb000001ec, + 0x1ed000001ee, + 0x1ef000001f1, + 0x1f5000001f6, + 0x1f9000001fa, + 0x1fb000001fc, + 0x1fd000001fe, + 0x1ff00000200, + 0x20100000202, + 0x20300000204, + 0x20500000206, + 0x20700000208, + 0x2090000020a, + 0x20b0000020c, + 0x20d0000020e, + 0x20f00000210, + 0x21100000212, + 0x21300000214, + 0x21500000216, + 0x21700000218, + 0x2190000021a, + 0x21b0000021c, + 0x21d0000021e, + 0x21f00000220, + 0x22100000222, + 0x22300000224, + 0x22500000226, + 0x22700000228, + 0x2290000022a, + 0x22b0000022c, + 0x22d0000022e, + 0x22f00000230, + 0x23100000232, + 0x2330000023a, + 0x23c0000023d, + 0x23f00000241, + 0x24200000243, + 0x24700000248, + 0x2490000024a, + 0x24b0000024c, + 0x24d0000024e, + 0x24f000002b0, + 0x2b9000002c2, + 0x2c6000002d2, + 0x2ec000002ed, + 0x2ee000002ef, + 0x30000000340, + 0x34200000343, + 0x3460000034f, + 0x35000000370, + 0x37100000372, + 0x37300000374, + 0x37700000378, + 0x37b0000037e, + 0x39000000391, + 0x3ac000003cf, + 0x3d7000003d8, + 0x3d9000003da, + 0x3db000003dc, + 0x3dd000003de, + 0x3df000003e0, + 0x3e1000003e2, + 0x3e3000003e4, + 0x3e5000003e6, + 0x3e7000003e8, + 0x3e9000003ea, + 0x3eb000003ec, + 0x3ed000003ee, + 0x3ef000003f0, + 0x3f3000003f4, + 0x3f8000003f9, + 0x3fb000003fd, + 0x43000000460, + 0x46100000462, + 0x46300000464, + 0x46500000466, + 0x46700000468, + 0x4690000046a, + 0x46b0000046c, + 0x46d0000046e, + 0x46f00000470, + 0x47100000472, + 0x47300000474, + 0x47500000476, + 0x47700000478, + 0x4790000047a, + 0x47b0000047c, + 0x47d0000047e, + 0x47f00000480, + 0x48100000482, + 0x48300000488, + 0x48b0000048c, + 0x48d0000048e, + 0x48f00000490, + 0x49100000492, + 0x49300000494, + 0x49500000496, + 0x49700000498, + 0x4990000049a, + 0x49b0000049c, + 0x49d0000049e, + 0x49f000004a0, + 0x4a1000004a2, + 0x4a3000004a4, + 0x4a5000004a6, + 0x4a7000004a8, + 0x4a9000004aa, + 0x4ab000004ac, + 0x4ad000004ae, + 0x4af000004b0, + 0x4b1000004b2, + 0x4b3000004b4, + 0x4b5000004b6, + 0x4b7000004b8, + 0x4b9000004ba, + 0x4bb000004bc, + 0x4bd000004be, + 0x4bf000004c0, + 0x4c2000004c3, + 0x4c4000004c5, + 0x4c6000004c7, + 0x4c8000004c9, + 0x4ca000004cb, + 0x4cc000004cd, + 0x4ce000004d0, + 0x4d1000004d2, + 0x4d3000004d4, + 0x4d5000004d6, + 0x4d7000004d8, + 0x4d9000004da, + 0x4db000004dc, + 0x4dd000004de, + 0x4df000004e0, + 0x4e1000004e2, + 0x4e3000004e4, + 0x4e5000004e6, + 0x4e7000004e8, + 0x4e9000004ea, + 0x4eb000004ec, + 0x4ed000004ee, + 0x4ef000004f0, + 0x4f1000004f2, + 0x4f3000004f4, + 0x4f5000004f6, + 0x4f7000004f8, + 0x4f9000004fa, + 0x4fb000004fc, + 0x4fd000004fe, + 0x4ff00000500, + 0x50100000502, + 0x50300000504, + 0x50500000506, + 0x50700000508, + 0x5090000050a, + 0x50b0000050c, + 0x50d0000050e, + 0x50f00000510, + 0x51100000512, + 0x51300000514, + 0x51500000516, + 0x51700000518, + 0x5190000051a, + 0x51b0000051c, + 0x51d0000051e, + 0x51f00000520, + 0x52100000522, + 0x52300000524, + 0x52500000526, + 0x52700000528, + 0x5290000052a, + 0x52b0000052c, + 0x52d0000052e, + 0x52f00000530, + 0x5590000055a, + 0x56000000587, + 0x58800000589, + 0x591000005be, + 0x5bf000005c0, + 0x5c1000005c3, + 0x5c4000005c6, + 0x5c7000005c8, + 0x5d0000005eb, + 0x5ef000005f3, + 0x6100000061b, + 0x62000000640, + 0x64100000660, + 0x66e00000675, + 0x679000006d4, + 0x6d5000006dd, + 0x6df000006e9, + 0x6ea000006f0, + 0x6fa00000700, + 0x7100000074b, + 0x74d000007b2, + 0x7c0000007f6, + 0x7fd000007fe, + 0x8000000082e, + 0x8400000085c, + 0x8600000086b, + 0x8a0000008b5, + 0x8b6000008be, + 0x8d3000008e2, + 0x8e300000958, + 0x96000000964, + 0x96600000970, + 0x97100000984, + 0x9850000098d, + 0x98f00000991, + 0x993000009a9, + 0x9aa000009b1, + 0x9b2000009b3, + 0x9b6000009ba, + 0x9bc000009c5, + 0x9c7000009c9, + 0x9cb000009cf, + 0x9d7000009d8, + 0x9e0000009e4, + 0x9e6000009f2, + 0x9fc000009fd, + 0x9fe000009ff, + 0xa0100000a04, + 0xa0500000a0b, + 0xa0f00000a11, + 0xa1300000a29, + 0xa2a00000a31, + 0xa3200000a33, + 0xa3500000a36, + 0xa3800000a3a, + 0xa3c00000a3d, + 0xa3e00000a43, + 0xa4700000a49, + 0xa4b00000a4e, + 0xa5100000a52, + 0xa5c00000a5d, + 0xa6600000a76, + 0xa8100000a84, + 0xa8500000a8e, + 0xa8f00000a92, + 0xa9300000aa9, + 0xaaa00000ab1, + 0xab200000ab4, + 0xab500000aba, + 0xabc00000ac6, + 0xac700000aca, + 0xacb00000ace, + 0xad000000ad1, + 0xae000000ae4, + 0xae600000af0, + 0xaf900000b00, + 0xb0100000b04, + 0xb0500000b0d, + 0xb0f00000b11, + 0xb1300000b29, + 0xb2a00000b31, + 0xb3200000b34, + 0xb3500000b3a, + 0xb3c00000b45, + 0xb4700000b49, + 0xb4b00000b4e, + 0xb5600000b58, + 0xb5f00000b64, + 0xb6600000b70, + 0xb7100000b72, + 0xb8200000b84, + 0xb8500000b8b, + 0xb8e00000b91, + 0xb9200000b96, + 0xb9900000b9b, + 0xb9c00000b9d, + 0xb9e00000ba0, + 0xba300000ba5, + 0xba800000bab, + 0xbae00000bba, + 0xbbe00000bc3, + 0xbc600000bc9, + 0xbca00000bce, + 0xbd000000bd1, + 0xbd700000bd8, + 0xbe600000bf0, + 0xc0000000c0d, + 0xc0e00000c11, + 0xc1200000c29, + 0xc2a00000c3a, + 0xc3d00000c45, + 0xc4600000c49, + 0xc4a00000c4e, + 0xc5500000c57, + 0xc5800000c5b, + 0xc6000000c64, + 0xc6600000c70, + 0xc8000000c84, + 0xc8500000c8d, + 0xc8e00000c91, + 0xc9200000ca9, + 0xcaa00000cb4, + 0xcb500000cba, + 0xcbc00000cc5, + 0xcc600000cc9, + 0xcca00000cce, + 0xcd500000cd7, + 0xcde00000cdf, + 0xce000000ce4, + 0xce600000cf0, + 0xcf100000cf3, + 0xd0000000d04, + 0xd0500000d0d, + 0xd0e00000d11, + 0xd1200000d45, + 0xd4600000d49, + 0xd4a00000d4f, + 0xd5400000d58, + 0xd5f00000d64, + 0xd6600000d70, + 0xd7a00000d80, + 0xd8200000d84, + 0xd8500000d97, + 0xd9a00000db2, + 0xdb300000dbc, + 0xdbd00000dbe, + 0xdc000000dc7, + 0xdca00000dcb, + 0xdcf00000dd5, + 0xdd600000dd7, + 0xdd800000de0, + 0xde600000df0, + 0xdf200000df4, + 0xe0100000e33, + 0xe3400000e3b, + 0xe4000000e4f, + 0xe5000000e5a, + 0xe8100000e83, + 0xe8400000e85, + 0xe8700000e89, + 0xe8a00000e8b, + 0xe8d00000e8e, + 0xe9400000e98, + 0xe9900000ea0, + 0xea100000ea4, + 0xea500000ea6, + 0xea700000ea8, + 0xeaa00000eac, + 0xead00000eb3, + 0xeb400000eba, + 0xebb00000ebe, + 0xec000000ec5, + 0xec600000ec7, + 0xec800000ece, + 0xed000000eda, + 0xede00000ee0, + 0xf0000000f01, + 0xf0b00000f0c, + 0xf1800000f1a, + 0xf2000000f2a, + 0xf3500000f36, + 0xf3700000f38, + 0xf3900000f3a, + 0xf3e00000f43, + 0xf4400000f48, + 0xf4900000f4d, + 0xf4e00000f52, + 0xf5300000f57, + 0xf5800000f5c, + 0xf5d00000f69, + 0xf6a00000f6d, + 0xf7100000f73, + 0xf7400000f75, + 0xf7a00000f81, + 0xf8200000f85, + 0xf8600000f93, + 0xf9400000f98, + 0xf9900000f9d, + 0xf9e00000fa2, + 0xfa300000fa7, + 0xfa800000fac, + 0xfad00000fb9, + 0xfba00000fbd, + 0xfc600000fc7, + 0x10000000104a, + 0x10500000109e, + 0x10d0000010fb, + 0x10fd00001100, + 0x120000001249, + 0x124a0000124e, + 0x125000001257, + 0x125800001259, + 0x125a0000125e, + 0x126000001289, + 0x128a0000128e, + 0x1290000012b1, + 0x12b2000012b6, + 0x12b8000012bf, + 0x12c0000012c1, + 0x12c2000012c6, + 0x12c8000012d7, + 0x12d800001311, + 0x131200001316, + 0x13180000135b, + 0x135d00001360, + 0x138000001390, + 0x13a0000013f6, + 0x14010000166d, + 0x166f00001680, + 0x16810000169b, + 0x16a0000016eb, + 0x16f1000016f9, + 0x17000000170d, + 0x170e00001715, + 0x172000001735, + 0x174000001754, + 0x17600000176d, + 0x176e00001771, + 0x177200001774, + 0x1780000017b4, + 0x17b6000017d4, + 0x17d7000017d8, + 0x17dc000017de, + 0x17e0000017ea, + 0x18100000181a, + 0x182000001879, + 0x1880000018ab, + 0x18b0000018f6, + 0x19000000191f, + 0x19200000192c, + 0x19300000193c, + 0x19460000196e, + 0x197000001975, + 0x1980000019ac, + 0x19b0000019ca, + 0x19d0000019da, + 0x1a0000001a1c, + 0x1a2000001a5f, + 0x1a6000001a7d, + 0x1a7f00001a8a, + 0x1a9000001a9a, + 0x1aa700001aa8, + 0x1ab000001abe, + 0x1b0000001b4c, + 0x1b5000001b5a, + 0x1b6b00001b74, + 0x1b8000001bf4, + 0x1c0000001c38, + 0x1c4000001c4a, + 0x1c4d00001c7e, + 0x1cd000001cd3, + 0x1cd400001cfa, + 0x1d0000001d2c, + 0x1d2f00001d30, + 0x1d3b00001d3c, + 0x1d4e00001d4f, + 0x1d6b00001d78, + 0x1d7900001d9b, + 0x1dc000001dfa, + 0x1dfb00001e00, + 0x1e0100001e02, + 0x1e0300001e04, + 0x1e0500001e06, + 0x1e0700001e08, + 0x1e0900001e0a, + 0x1e0b00001e0c, + 0x1e0d00001e0e, + 0x1e0f00001e10, + 0x1e1100001e12, + 0x1e1300001e14, + 0x1e1500001e16, + 0x1e1700001e18, + 0x1e1900001e1a, + 0x1e1b00001e1c, + 0x1e1d00001e1e, + 0x1e1f00001e20, + 0x1e2100001e22, + 0x1e2300001e24, + 0x1e2500001e26, + 0x1e2700001e28, + 0x1e2900001e2a, + 0x1e2b00001e2c, + 0x1e2d00001e2e, + 0x1e2f00001e30, + 0x1e3100001e32, + 0x1e3300001e34, + 0x1e3500001e36, + 0x1e3700001e38, + 0x1e3900001e3a, + 0x1e3b00001e3c, + 0x1e3d00001e3e, + 0x1e3f00001e40, + 0x1e4100001e42, + 0x1e4300001e44, + 0x1e4500001e46, + 0x1e4700001e48, + 0x1e4900001e4a, + 0x1e4b00001e4c, + 0x1e4d00001e4e, + 0x1e4f00001e50, + 0x1e5100001e52, + 0x1e5300001e54, + 0x1e5500001e56, + 0x1e5700001e58, + 0x1e5900001e5a, + 0x1e5b00001e5c, + 0x1e5d00001e5e, + 0x1e5f00001e60, + 0x1e6100001e62, + 0x1e6300001e64, + 0x1e6500001e66, + 0x1e6700001e68, + 0x1e6900001e6a, + 0x1e6b00001e6c, + 0x1e6d00001e6e, + 0x1e6f00001e70, + 0x1e7100001e72, + 0x1e7300001e74, + 0x1e7500001e76, + 0x1e7700001e78, + 0x1e7900001e7a, + 0x1e7b00001e7c, + 0x1e7d00001e7e, + 0x1e7f00001e80, + 0x1e8100001e82, + 0x1e8300001e84, + 0x1e8500001e86, + 0x1e8700001e88, + 0x1e8900001e8a, + 0x1e8b00001e8c, + 0x1e8d00001e8e, + 0x1e8f00001e90, + 0x1e9100001e92, + 0x1e9300001e94, + 0x1e9500001e9a, + 0x1e9c00001e9e, + 0x1e9f00001ea0, + 0x1ea100001ea2, + 0x1ea300001ea4, + 0x1ea500001ea6, + 0x1ea700001ea8, + 0x1ea900001eaa, + 0x1eab00001eac, + 0x1ead00001eae, + 0x1eaf00001eb0, + 0x1eb100001eb2, + 0x1eb300001eb4, + 0x1eb500001eb6, + 0x1eb700001eb8, + 0x1eb900001eba, + 0x1ebb00001ebc, + 0x1ebd00001ebe, + 0x1ebf00001ec0, + 0x1ec100001ec2, + 0x1ec300001ec4, + 0x1ec500001ec6, + 0x1ec700001ec8, + 0x1ec900001eca, + 0x1ecb00001ecc, + 0x1ecd00001ece, + 0x1ecf00001ed0, + 0x1ed100001ed2, + 0x1ed300001ed4, + 0x1ed500001ed6, + 0x1ed700001ed8, + 0x1ed900001eda, + 0x1edb00001edc, + 0x1edd00001ede, + 0x1edf00001ee0, + 0x1ee100001ee2, + 0x1ee300001ee4, + 0x1ee500001ee6, + 0x1ee700001ee8, + 0x1ee900001eea, + 0x1eeb00001eec, + 0x1eed00001eee, + 0x1eef00001ef0, + 0x1ef100001ef2, + 0x1ef300001ef4, + 0x1ef500001ef6, + 0x1ef700001ef8, + 0x1ef900001efa, + 0x1efb00001efc, + 0x1efd00001efe, + 0x1eff00001f08, + 0x1f1000001f16, + 0x1f2000001f28, + 0x1f3000001f38, + 0x1f4000001f46, + 0x1f5000001f58, + 0x1f6000001f68, + 0x1f7000001f71, + 0x1f7200001f73, + 0x1f7400001f75, + 0x1f7600001f77, + 0x1f7800001f79, + 0x1f7a00001f7b, + 0x1f7c00001f7d, + 0x1fb000001fb2, + 0x1fb600001fb7, + 0x1fc600001fc7, + 0x1fd000001fd3, + 0x1fd600001fd8, + 0x1fe000001fe3, + 0x1fe400001fe8, + 0x1ff600001ff7, + 0x214e0000214f, + 0x218400002185, + 0x2c3000002c5f, + 0x2c6100002c62, + 0x2c6500002c67, + 0x2c6800002c69, + 0x2c6a00002c6b, + 0x2c6c00002c6d, + 0x2c7100002c72, + 0x2c7300002c75, + 0x2c7600002c7c, + 0x2c8100002c82, + 0x2c8300002c84, + 0x2c8500002c86, + 0x2c8700002c88, + 0x2c8900002c8a, + 0x2c8b00002c8c, + 0x2c8d00002c8e, + 0x2c8f00002c90, + 0x2c9100002c92, + 0x2c9300002c94, + 0x2c9500002c96, + 0x2c9700002c98, + 0x2c9900002c9a, + 0x2c9b00002c9c, + 0x2c9d00002c9e, + 0x2c9f00002ca0, + 0x2ca100002ca2, + 0x2ca300002ca4, + 0x2ca500002ca6, + 0x2ca700002ca8, + 0x2ca900002caa, + 0x2cab00002cac, + 0x2cad00002cae, + 0x2caf00002cb0, + 0x2cb100002cb2, + 0x2cb300002cb4, + 0x2cb500002cb6, + 0x2cb700002cb8, + 0x2cb900002cba, + 0x2cbb00002cbc, + 0x2cbd00002cbe, + 0x2cbf00002cc0, + 0x2cc100002cc2, + 0x2cc300002cc4, + 0x2cc500002cc6, + 0x2cc700002cc8, + 0x2cc900002cca, + 0x2ccb00002ccc, + 0x2ccd00002cce, + 0x2ccf00002cd0, + 0x2cd100002cd2, + 0x2cd300002cd4, + 0x2cd500002cd6, + 0x2cd700002cd8, + 0x2cd900002cda, + 0x2cdb00002cdc, + 0x2cdd00002cde, + 0x2cdf00002ce0, + 0x2ce100002ce2, + 0x2ce300002ce5, + 0x2cec00002ced, + 0x2cee00002cf2, + 0x2cf300002cf4, + 0x2d0000002d26, + 0x2d2700002d28, + 0x2d2d00002d2e, + 0x2d3000002d68, + 0x2d7f00002d97, + 0x2da000002da7, + 0x2da800002daf, + 0x2db000002db7, + 0x2db800002dbf, + 0x2dc000002dc7, + 0x2dc800002dcf, + 0x2dd000002dd7, + 0x2dd800002ddf, + 0x2de000002e00, + 0x2e2f00002e30, + 0x300500003008, + 0x302a0000302e, + 0x303c0000303d, + 0x304100003097, + 0x30990000309b, + 0x309d0000309f, + 0x30a1000030fb, + 0x30fc000030ff, + 0x310500003130, + 0x31a0000031bb, + 0x31f000003200, + 0x340000004db6, + 0x4e0000009ff0, + 0xa0000000a48d, + 0xa4d00000a4fe, + 0xa5000000a60d, + 0xa6100000a62c, + 0xa6410000a642, + 0xa6430000a644, + 0xa6450000a646, + 0xa6470000a648, + 0xa6490000a64a, + 0xa64b0000a64c, + 0xa64d0000a64e, + 0xa64f0000a650, + 0xa6510000a652, + 0xa6530000a654, + 0xa6550000a656, + 0xa6570000a658, + 0xa6590000a65a, + 0xa65b0000a65c, + 0xa65d0000a65e, + 0xa65f0000a660, + 0xa6610000a662, + 0xa6630000a664, + 0xa6650000a666, + 0xa6670000a668, + 0xa6690000a66a, + 0xa66b0000a66c, + 0xa66d0000a670, + 0xa6740000a67e, + 0xa67f0000a680, + 0xa6810000a682, + 0xa6830000a684, + 0xa6850000a686, + 0xa6870000a688, + 0xa6890000a68a, + 0xa68b0000a68c, + 0xa68d0000a68e, + 0xa68f0000a690, + 0xa6910000a692, + 0xa6930000a694, + 0xa6950000a696, + 0xa6970000a698, + 0xa6990000a69a, + 0xa69b0000a69c, + 0xa69e0000a6e6, + 0xa6f00000a6f2, + 0xa7170000a720, + 0xa7230000a724, + 0xa7250000a726, + 0xa7270000a728, + 0xa7290000a72a, + 0xa72b0000a72c, + 0xa72d0000a72e, + 0xa72f0000a732, + 0xa7330000a734, + 0xa7350000a736, + 0xa7370000a738, + 0xa7390000a73a, + 0xa73b0000a73c, + 0xa73d0000a73e, + 0xa73f0000a740, + 0xa7410000a742, + 0xa7430000a744, + 0xa7450000a746, + 0xa7470000a748, + 0xa7490000a74a, + 0xa74b0000a74c, + 0xa74d0000a74e, + 0xa74f0000a750, + 0xa7510000a752, + 0xa7530000a754, + 0xa7550000a756, + 0xa7570000a758, + 0xa7590000a75a, + 0xa75b0000a75c, + 0xa75d0000a75e, + 0xa75f0000a760, + 0xa7610000a762, + 0xa7630000a764, + 0xa7650000a766, + 0xa7670000a768, + 0xa7690000a76a, + 0xa76b0000a76c, + 0xa76d0000a76e, + 0xa76f0000a770, + 0xa7710000a779, + 0xa77a0000a77b, + 0xa77c0000a77d, + 0xa77f0000a780, + 0xa7810000a782, + 0xa7830000a784, + 0xa7850000a786, + 0xa7870000a789, + 0xa78c0000a78d, + 0xa78e0000a790, + 0xa7910000a792, + 0xa7930000a796, + 0xa7970000a798, + 0xa7990000a79a, + 0xa79b0000a79c, + 0xa79d0000a79e, + 0xa79f0000a7a0, + 0xa7a10000a7a2, + 0xa7a30000a7a4, + 0xa7a50000a7a6, + 0xa7a70000a7a8, + 0xa7a90000a7aa, + 0xa7af0000a7b0, + 0xa7b50000a7b6, + 0xa7b70000a7b8, + 0xa7b90000a7ba, + 0xa7f70000a7f8, + 0xa7fa0000a828, + 0xa8400000a874, + 0xa8800000a8c6, + 0xa8d00000a8da, + 0xa8e00000a8f8, + 0xa8fb0000a8fc, + 0xa8fd0000a92e, + 0xa9300000a954, + 0xa9800000a9c1, + 0xa9cf0000a9da, + 0xa9e00000a9ff, + 0xaa000000aa37, + 0xaa400000aa4e, + 0xaa500000aa5a, + 0xaa600000aa77, + 0xaa7a0000aac3, + 0xaadb0000aade, + 0xaae00000aaf0, + 0xaaf20000aaf7, + 0xab010000ab07, + 0xab090000ab0f, + 0xab110000ab17, + 0xab200000ab27, + 0xab280000ab2f, + 0xab300000ab5b, + 0xab600000ab66, + 0xabc00000abeb, + 0xabec0000abee, + 0xabf00000abfa, + 0xac000000d7a4, + 0xfa0e0000fa10, + 0xfa110000fa12, + 0xfa130000fa15, + 0xfa1f0000fa20, + 0xfa210000fa22, + 0xfa230000fa25, + 0xfa270000fa2a, + 0xfb1e0000fb1f, + 0xfe200000fe30, + 0xfe730000fe74, + 0x100000001000c, + 0x1000d00010027, + 0x100280001003b, + 0x1003c0001003e, + 0x1003f0001004e, + 0x100500001005e, + 0x10080000100fb, + 0x101fd000101fe, + 0x102800001029d, + 0x102a0000102d1, + 0x102e0000102e1, + 0x1030000010320, + 0x1032d00010341, + 0x103420001034a, + 0x103500001037b, + 0x103800001039e, + 0x103a0000103c4, + 0x103c8000103d0, + 0x104280001049e, + 0x104a0000104aa, + 0x104d8000104fc, + 0x1050000010528, + 0x1053000010564, + 0x1060000010737, + 0x1074000010756, + 0x1076000010768, + 0x1080000010806, + 0x1080800010809, + 0x1080a00010836, + 0x1083700010839, + 0x1083c0001083d, + 0x1083f00010856, + 0x1086000010877, + 0x108800001089f, + 0x108e0000108f3, + 0x108f4000108f6, + 0x1090000010916, + 0x109200001093a, + 0x10980000109b8, + 0x109be000109c0, + 0x10a0000010a04, + 0x10a0500010a07, + 0x10a0c00010a14, + 0x10a1500010a18, + 0x10a1900010a36, + 0x10a3800010a3b, + 0x10a3f00010a40, + 0x10a6000010a7d, + 0x10a8000010a9d, + 0x10ac000010ac8, + 0x10ac900010ae7, + 0x10b0000010b36, + 0x10b4000010b56, + 0x10b6000010b73, + 0x10b8000010b92, + 0x10c0000010c49, + 0x10cc000010cf3, + 0x10d0000010d28, + 0x10d3000010d3a, + 0x10f0000010f1d, + 0x10f2700010f28, + 0x10f3000010f51, + 0x1100000011047, + 0x1106600011070, + 0x1107f000110bb, + 0x110d0000110e9, + 0x110f0000110fa, + 0x1110000011135, + 0x1113600011140, + 0x1114400011147, + 0x1115000011174, + 0x1117600011177, + 0x11180000111c5, + 0x111c9000111cd, + 0x111d0000111db, + 0x111dc000111dd, + 0x1120000011212, + 0x1121300011238, + 0x1123e0001123f, + 0x1128000011287, + 0x1128800011289, + 0x1128a0001128e, + 0x1128f0001129e, + 0x1129f000112a9, + 0x112b0000112eb, + 0x112f0000112fa, + 0x1130000011304, + 0x113050001130d, + 0x1130f00011311, + 0x1131300011329, + 0x1132a00011331, + 0x1133200011334, + 0x113350001133a, + 0x1133b00011345, + 0x1134700011349, + 0x1134b0001134e, + 0x1135000011351, + 0x1135700011358, + 0x1135d00011364, + 0x113660001136d, + 0x1137000011375, + 0x114000001144b, + 0x114500001145a, + 0x1145e0001145f, + 0x11480000114c6, + 0x114c7000114c8, + 0x114d0000114da, + 0x11580000115b6, + 0x115b8000115c1, + 0x115d8000115de, + 0x1160000011641, + 0x1164400011645, + 0x116500001165a, + 0x11680000116b8, + 0x116c0000116ca, + 0x117000001171b, + 0x1171d0001172c, + 0x117300001173a, + 0x118000001183b, + 0x118c0000118ea, + 0x118ff00011900, + 0x11a0000011a3f, + 0x11a4700011a48, + 0x11a5000011a84, + 0x11a8600011a9a, + 0x11a9d00011a9e, + 0x11ac000011af9, + 0x11c0000011c09, + 0x11c0a00011c37, + 0x11c3800011c41, + 0x11c5000011c5a, + 0x11c7200011c90, + 0x11c9200011ca8, + 0x11ca900011cb7, + 0x11d0000011d07, + 0x11d0800011d0a, + 0x11d0b00011d37, + 0x11d3a00011d3b, + 0x11d3c00011d3e, + 0x11d3f00011d48, + 0x11d5000011d5a, + 0x11d6000011d66, + 0x11d6700011d69, + 0x11d6a00011d8f, + 0x11d9000011d92, + 0x11d9300011d99, + 0x11da000011daa, + 0x11ee000011ef7, + 0x120000001239a, + 0x1248000012544, + 0x130000001342f, + 0x1440000014647, + 0x1680000016a39, + 0x16a4000016a5f, + 0x16a6000016a6a, + 0x16ad000016aee, + 0x16af000016af5, + 0x16b0000016b37, + 0x16b4000016b44, + 0x16b5000016b5a, + 0x16b6300016b78, + 0x16b7d00016b90, + 0x16e6000016e80, + 0x16f0000016f45, + 0x16f5000016f7f, + 0x16f8f00016fa0, + 0x16fe000016fe2, + 0x17000000187f2, + 0x1880000018af3, + 0x1b0000001b11f, + 0x1b1700001b2fc, + 0x1bc000001bc6b, + 0x1bc700001bc7d, + 0x1bc800001bc89, + 0x1bc900001bc9a, + 0x1bc9d0001bc9f, + 0x1da000001da37, + 0x1da3b0001da6d, + 0x1da750001da76, + 0x1da840001da85, + 0x1da9b0001daa0, + 0x1daa10001dab0, + 0x1e0000001e007, + 0x1e0080001e019, + 0x1e01b0001e022, + 0x1e0230001e025, + 0x1e0260001e02b, + 0x1e8000001e8c5, + 0x1e8d00001e8d7, + 0x1e9220001e94b, + 0x1e9500001e95a, + 0x200000002a6d7, + 0x2a7000002b735, + 0x2b7400002b81e, + 0x2b8200002cea2, + 0x2ceb00002ebe1, + ), + 'CONTEXTJ': ( + 0x200c0000200e, + ), + 'CONTEXTO': ( + 0xb7000000b8, + 0x37500000376, + 0x5f3000005f5, + 0x6600000066a, + 0x6f0000006fa, + 0x30fb000030fc, + ), +} diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/idnadata.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/idnadata.pyc new file mode 100644 index 0000000..a00ead0 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/idnadata.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/intranges.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/intranges.py new file mode 100644 index 0000000..fa8a735 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/intranges.py @@ -0,0 +1,53 @@ +""" +Given a list of integers, made up of (hopefully) a small number of long runs +of consecutive integers, compute a representation of the form +((start1, end1), (start2, end2) ...). Then answer the question "was x present +in the original list?" in time O(log(# runs)). +""" + +import bisect + +def intranges_from_list(list_): + """Represent a list of integers as a sequence of ranges: + ((start_0, end_0), (start_1, end_1), ...), such that the original + integers are exactly those x such that start_i <= x < end_i for some i. + + Ranges are encoded as single integers (start << 32 | end), not as tuples. + """ + + sorted_list = sorted(list_) + ranges = [] + last_write = -1 + for i in range(len(sorted_list)): + if i+1 < len(sorted_list): + if sorted_list[i] == sorted_list[i+1]-1: + continue + current_range = sorted_list[last_write+1:i+1] + ranges.append(_encode_range(current_range[0], current_range[-1] + 1)) + last_write = i + + return tuple(ranges) + +def _encode_range(start, end): + return (start << 32) | end + +def _decode_range(r): + return (r >> 32), (r & ((1 << 32) - 1)) + + +def intranges_contain(int_, ranges): + """Determine if `int_` falls into one of the ranges in `ranges`.""" + tuple_ = _encode_range(int_, 0) + pos = bisect.bisect_left(ranges, tuple_) + # we could be immediately ahead of a tuple (start, end) + # with start < int_ <= end + if pos > 0: + left, right = _decode_range(ranges[pos-1]) + if left <= int_ < right: + return True + # or we could be immediately behind a tuple (int_, end) + if pos < len(ranges): + left, _ = _decode_range(ranges[pos]) + if left == int_: + return True + return False diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/intranges.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/intranges.pyc new file mode 100644 index 0000000..ebfb1b0 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/intranges.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/package_data.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/package_data.py new file mode 100644 index 0000000..257e898 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/package_data.py @@ -0,0 +1,2 @@ +__version__ = '2.8' + diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/package_data.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/package_data.pyc new file mode 100644 index 0000000..46c2a26 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/package_data.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/uts46data.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/uts46data.py new file mode 100644 index 0000000..a68ed4c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/uts46data.py @@ -0,0 +1,8205 @@ +# This file is automatically generated by tools/idna-data +# vim: set fileencoding=utf-8 : + +"""IDNA Mapping Table from UTS46.""" + + +__version__ = "11.0.0" +def _seg_0(): + return [ + (0x0, '3'), + (0x1, '3'), + (0x2, '3'), + (0x3, '3'), + (0x4, '3'), + (0x5, '3'), + (0x6, '3'), + (0x7, '3'), + (0x8, '3'), + (0x9, '3'), + (0xA, '3'), + (0xB, '3'), + (0xC, '3'), + (0xD, '3'), + (0xE, '3'), + (0xF, '3'), + (0x10, '3'), + (0x11, '3'), + (0x12, '3'), + (0x13, '3'), + (0x14, '3'), + (0x15, '3'), + (0x16, '3'), + (0x17, '3'), + (0x18, '3'), + (0x19, '3'), + (0x1A, '3'), + (0x1B, '3'), + (0x1C, '3'), + (0x1D, '3'), + (0x1E, '3'), + (0x1F, '3'), + (0x20, '3'), + (0x21, '3'), + (0x22, '3'), + (0x23, '3'), + (0x24, '3'), + (0x25, '3'), + (0x26, '3'), + (0x27, '3'), + (0x28, '3'), + (0x29, '3'), + (0x2A, '3'), + (0x2B, '3'), + (0x2C, '3'), + (0x2D, 'V'), + (0x2E, 'V'), + (0x2F, '3'), + (0x30, 'V'), + (0x31, 'V'), + (0x32, 'V'), + (0x33, 'V'), + (0x34, 'V'), + (0x35, 'V'), + (0x36, 'V'), + (0x37, 'V'), + (0x38, 'V'), + (0x39, 'V'), + (0x3A, '3'), + (0x3B, '3'), + (0x3C, '3'), + (0x3D, '3'), + (0x3E, '3'), + (0x3F, '3'), + (0x40, '3'), + (0x41, 'M', u'a'), + (0x42, 'M', u'b'), + (0x43, 'M', u'c'), + (0x44, 'M', u'd'), + (0x45, 'M', u'e'), + (0x46, 'M', u'f'), + (0x47, 'M', u'g'), + (0x48, 'M', u'h'), + (0x49, 'M', u'i'), + (0x4A, 'M', u'j'), + (0x4B, 'M', u'k'), + (0x4C, 'M', u'l'), + (0x4D, 'M', u'm'), + (0x4E, 'M', u'n'), + (0x4F, 'M', u'o'), + (0x50, 'M', u'p'), + (0x51, 'M', u'q'), + (0x52, 'M', u'r'), + (0x53, 'M', u's'), + (0x54, 'M', u't'), + (0x55, 'M', u'u'), + (0x56, 'M', u'v'), + (0x57, 'M', u'w'), + (0x58, 'M', u'x'), + (0x59, 'M', u'y'), + (0x5A, 'M', u'z'), + (0x5B, '3'), + (0x5C, '3'), + (0x5D, '3'), + (0x5E, '3'), + (0x5F, '3'), + (0x60, '3'), + (0x61, 'V'), + (0x62, 'V'), + (0x63, 'V'), + ] + +def _seg_1(): + return [ + (0x64, 'V'), + (0x65, 'V'), + (0x66, 'V'), + (0x67, 'V'), + (0x68, 'V'), + (0x69, 'V'), + (0x6A, 'V'), + (0x6B, 'V'), + (0x6C, 'V'), + (0x6D, 'V'), + (0x6E, 'V'), + (0x6F, 'V'), + (0x70, 'V'), + (0x71, 'V'), + (0x72, 'V'), + (0x73, 'V'), + (0x74, 'V'), + (0x75, 'V'), + (0x76, 'V'), + (0x77, 'V'), + (0x78, 'V'), + (0x79, 'V'), + (0x7A, 'V'), + (0x7B, '3'), + (0x7C, '3'), + (0x7D, '3'), + (0x7E, '3'), + (0x7F, '3'), + (0x80, 'X'), + (0x81, 'X'), + (0x82, 'X'), + (0x83, 'X'), + (0x84, 'X'), + (0x85, 'X'), + (0x86, 'X'), + (0x87, 'X'), + (0x88, 'X'), + (0x89, 'X'), + (0x8A, 'X'), + (0x8B, 'X'), + (0x8C, 'X'), + (0x8D, 'X'), + (0x8E, 'X'), + (0x8F, 'X'), + (0x90, 'X'), + (0x91, 'X'), + (0x92, 'X'), + (0x93, 'X'), + (0x94, 'X'), + (0x95, 'X'), + (0x96, 'X'), + (0x97, 'X'), + (0x98, 'X'), + (0x99, 'X'), + (0x9A, 'X'), + (0x9B, 'X'), + (0x9C, 'X'), + (0x9D, 'X'), + (0x9E, 'X'), + (0x9F, 'X'), + (0xA0, '3', u' '), + (0xA1, 'V'), + (0xA2, 'V'), + (0xA3, 'V'), + (0xA4, 'V'), + (0xA5, 'V'), + (0xA6, 'V'), + (0xA7, 'V'), + (0xA8, '3', u' ̈'), + (0xA9, 'V'), + (0xAA, 'M', u'a'), + (0xAB, 'V'), + (0xAC, 'V'), + (0xAD, 'I'), + (0xAE, 'V'), + (0xAF, '3', u' ̄'), + (0xB0, 'V'), + (0xB1, 'V'), + (0xB2, 'M', u'2'), + (0xB3, 'M', u'3'), + (0xB4, '3', u' ́'), + (0xB5, 'M', u'μ'), + (0xB6, 'V'), + (0xB7, 'V'), + (0xB8, '3', u' ̧'), + (0xB9, 'M', u'1'), + (0xBA, 'M', u'o'), + (0xBB, 'V'), + (0xBC, 'M', u'1⁄4'), + (0xBD, 'M', u'1⁄2'), + (0xBE, 'M', u'3⁄4'), + (0xBF, 'V'), + (0xC0, 'M', u'à'), + (0xC1, 'M', u'á'), + (0xC2, 'M', u'â'), + (0xC3, 'M', u'ã'), + (0xC4, 'M', u'ä'), + (0xC5, 'M', u'å'), + (0xC6, 'M', u'æ'), + (0xC7, 'M', u'ç'), + ] + +def _seg_2(): + return [ + (0xC8, 'M', u'è'), + (0xC9, 'M', u'é'), + (0xCA, 'M', u'ê'), + (0xCB, 'M', u'ë'), + (0xCC, 'M', u'ì'), + (0xCD, 'M', u'í'), + (0xCE, 'M', u'î'), + (0xCF, 'M', u'ï'), + (0xD0, 'M', u'ð'), + (0xD1, 'M', u'ñ'), + (0xD2, 'M', u'ò'), + (0xD3, 'M', u'ó'), + (0xD4, 'M', u'ô'), + (0xD5, 'M', u'õ'), + (0xD6, 'M', u'ö'), + (0xD7, 'V'), + (0xD8, 'M', u'ø'), + (0xD9, 'M', u'ù'), + (0xDA, 'M', u'ú'), + (0xDB, 'M', u'û'), + (0xDC, 'M', u'ü'), + (0xDD, 'M', u'ý'), + (0xDE, 'M', u'þ'), + (0xDF, 'D', u'ss'), + (0xE0, 'V'), + (0xE1, 'V'), + (0xE2, 'V'), + (0xE3, 'V'), + (0xE4, 'V'), + (0xE5, 'V'), + (0xE6, 'V'), + (0xE7, 'V'), + (0xE8, 'V'), + (0xE9, 'V'), + (0xEA, 'V'), + (0xEB, 'V'), + (0xEC, 'V'), + (0xED, 'V'), + (0xEE, 'V'), + (0xEF, 'V'), + (0xF0, 'V'), + (0xF1, 'V'), + (0xF2, 'V'), + (0xF3, 'V'), + (0xF4, 'V'), + (0xF5, 'V'), + (0xF6, 'V'), + (0xF7, 'V'), + (0xF8, 'V'), + (0xF9, 'V'), + (0xFA, 'V'), + (0xFB, 'V'), + (0xFC, 'V'), + (0xFD, 'V'), + (0xFE, 'V'), + (0xFF, 'V'), + (0x100, 'M', u'ā'), + (0x101, 'V'), + (0x102, 'M', u'ă'), + (0x103, 'V'), + (0x104, 'M', u'ą'), + (0x105, 'V'), + (0x106, 'M', u'ć'), + (0x107, 'V'), + (0x108, 'M', u'ĉ'), + (0x109, 'V'), + (0x10A, 'M', u'ċ'), + (0x10B, 'V'), + (0x10C, 'M', u'č'), + (0x10D, 'V'), + (0x10E, 'M', u'ď'), + (0x10F, 'V'), + (0x110, 'M', u'đ'), + (0x111, 'V'), + (0x112, 'M', u'ē'), + (0x113, 'V'), + (0x114, 'M', u'ĕ'), + (0x115, 'V'), + (0x116, 'M', u'ė'), + (0x117, 'V'), + (0x118, 'M', u'ę'), + (0x119, 'V'), + (0x11A, 'M', u'ě'), + (0x11B, 'V'), + (0x11C, 'M', u'ĝ'), + (0x11D, 'V'), + (0x11E, 'M', u'ğ'), + (0x11F, 'V'), + (0x120, 'M', u'ġ'), + (0x121, 'V'), + (0x122, 'M', u'ģ'), + (0x123, 'V'), + (0x124, 'M', u'ĥ'), + (0x125, 'V'), + (0x126, 'M', u'ħ'), + (0x127, 'V'), + (0x128, 'M', u'ĩ'), + (0x129, 'V'), + (0x12A, 'M', u'ī'), + (0x12B, 'V'), + ] + +def _seg_3(): + return [ + (0x12C, 'M', u'ĭ'), + (0x12D, 'V'), + (0x12E, 'M', u'į'), + (0x12F, 'V'), + (0x130, 'M', u'i̇'), + (0x131, 'V'), + (0x132, 'M', u'ij'), + (0x134, 'M', u'ĵ'), + (0x135, 'V'), + (0x136, 'M', u'ķ'), + (0x137, 'V'), + (0x139, 'M', u'ĺ'), + (0x13A, 'V'), + (0x13B, 'M', u'ļ'), + (0x13C, 'V'), + (0x13D, 'M', u'ľ'), + (0x13E, 'V'), + (0x13F, 'M', u'l·'), + (0x141, 'M', u'ł'), + (0x142, 'V'), + (0x143, 'M', u'ń'), + (0x144, 'V'), + (0x145, 'M', u'ņ'), + (0x146, 'V'), + (0x147, 'M', u'ň'), + (0x148, 'V'), + (0x149, 'M', u'ʼn'), + (0x14A, 'M', u'ŋ'), + (0x14B, 'V'), + (0x14C, 'M', u'ō'), + (0x14D, 'V'), + (0x14E, 'M', u'ŏ'), + (0x14F, 'V'), + (0x150, 'M', u'ő'), + (0x151, 'V'), + (0x152, 'M', u'œ'), + (0x153, 'V'), + (0x154, 'M', u'ŕ'), + (0x155, 'V'), + (0x156, 'M', u'ŗ'), + (0x157, 'V'), + (0x158, 'M', u'ř'), + (0x159, 'V'), + (0x15A, 'M', u'ś'), + (0x15B, 'V'), + (0x15C, 'M', u'ŝ'), + (0x15D, 'V'), + (0x15E, 'M', u'ş'), + (0x15F, 'V'), + (0x160, 'M', u'š'), + (0x161, 'V'), + (0x162, 'M', u'ţ'), + (0x163, 'V'), + (0x164, 'M', u'ť'), + (0x165, 'V'), + (0x166, 'M', u'ŧ'), + (0x167, 'V'), + (0x168, 'M', u'ũ'), + (0x169, 'V'), + (0x16A, 'M', u'ū'), + (0x16B, 'V'), + (0x16C, 'M', u'ŭ'), + (0x16D, 'V'), + (0x16E, 'M', u'ů'), + (0x16F, 'V'), + (0x170, 'M', u'ű'), + (0x171, 'V'), + (0x172, 'M', u'ų'), + (0x173, 'V'), + (0x174, 'M', u'ŵ'), + (0x175, 'V'), + (0x176, 'M', u'ŷ'), + (0x177, 'V'), + (0x178, 'M', u'ÿ'), + (0x179, 'M', u'ź'), + (0x17A, 'V'), + (0x17B, 'M', u'ż'), + (0x17C, 'V'), + (0x17D, 'M', u'ž'), + (0x17E, 'V'), + (0x17F, 'M', u's'), + (0x180, 'V'), + (0x181, 'M', u'ɓ'), + (0x182, 'M', u'ƃ'), + (0x183, 'V'), + (0x184, 'M', u'ƅ'), + (0x185, 'V'), + (0x186, 'M', u'ɔ'), + (0x187, 'M', u'ƈ'), + (0x188, 'V'), + (0x189, 'M', u'ɖ'), + (0x18A, 'M', u'ɗ'), + (0x18B, 'M', u'ƌ'), + (0x18C, 'V'), + (0x18E, 'M', u'ǝ'), + (0x18F, 'M', u'ə'), + (0x190, 'M', u'ɛ'), + (0x191, 'M', u'ƒ'), + (0x192, 'V'), + (0x193, 'M', u'ɠ'), + ] + +def _seg_4(): + return [ + (0x194, 'M', u'ɣ'), + (0x195, 'V'), + (0x196, 'M', u'ɩ'), + (0x197, 'M', u'ɨ'), + (0x198, 'M', u'ƙ'), + (0x199, 'V'), + (0x19C, 'M', u'ɯ'), + (0x19D, 'M', u'ɲ'), + (0x19E, 'V'), + (0x19F, 'M', u'ɵ'), + (0x1A0, 'M', u'ơ'), + (0x1A1, 'V'), + (0x1A2, 'M', u'ƣ'), + (0x1A3, 'V'), + (0x1A4, 'M', u'ƥ'), + (0x1A5, 'V'), + (0x1A6, 'M', u'ʀ'), + (0x1A7, 'M', u'ƨ'), + (0x1A8, 'V'), + (0x1A9, 'M', u'ʃ'), + (0x1AA, 'V'), + (0x1AC, 'M', u'ƭ'), + (0x1AD, 'V'), + (0x1AE, 'M', u'ʈ'), + (0x1AF, 'M', u'ư'), + (0x1B0, 'V'), + (0x1B1, 'M', u'ʊ'), + (0x1B2, 'M', u'ʋ'), + (0x1B3, 'M', u'ƴ'), + (0x1B4, 'V'), + (0x1B5, 'M', u'ƶ'), + (0x1B6, 'V'), + (0x1B7, 'M', u'ʒ'), + (0x1B8, 'M', u'ƹ'), + (0x1B9, 'V'), + (0x1BC, 'M', u'ƽ'), + (0x1BD, 'V'), + (0x1C4, 'M', u'dž'), + (0x1C7, 'M', u'lj'), + (0x1CA, 'M', u'nj'), + (0x1CD, 'M', u'ǎ'), + (0x1CE, 'V'), + (0x1CF, 'M', u'ǐ'), + (0x1D0, 'V'), + (0x1D1, 'M', u'ǒ'), + (0x1D2, 'V'), + (0x1D3, 'M', u'ǔ'), + (0x1D4, 'V'), + (0x1D5, 'M', u'ǖ'), + (0x1D6, 'V'), + (0x1D7, 'M', u'ǘ'), + (0x1D8, 'V'), + (0x1D9, 'M', u'ǚ'), + (0x1DA, 'V'), + (0x1DB, 'M', u'ǜ'), + (0x1DC, 'V'), + (0x1DE, 'M', u'ǟ'), + (0x1DF, 'V'), + (0x1E0, 'M', u'ǡ'), + (0x1E1, 'V'), + (0x1E2, 'M', u'ǣ'), + (0x1E3, 'V'), + (0x1E4, 'M', u'ǥ'), + (0x1E5, 'V'), + (0x1E6, 'M', u'ǧ'), + (0x1E7, 'V'), + (0x1E8, 'M', u'ǩ'), + (0x1E9, 'V'), + (0x1EA, 'M', u'ǫ'), + (0x1EB, 'V'), + (0x1EC, 'M', u'ǭ'), + (0x1ED, 'V'), + (0x1EE, 'M', u'ǯ'), + (0x1EF, 'V'), + (0x1F1, 'M', u'dz'), + (0x1F4, 'M', u'ǵ'), + (0x1F5, 'V'), + (0x1F6, 'M', u'ƕ'), + (0x1F7, 'M', u'ƿ'), + (0x1F8, 'M', u'ǹ'), + (0x1F9, 'V'), + (0x1FA, 'M', u'ǻ'), + (0x1FB, 'V'), + (0x1FC, 'M', u'ǽ'), + (0x1FD, 'V'), + (0x1FE, 'M', u'ǿ'), + (0x1FF, 'V'), + (0x200, 'M', u'ȁ'), + (0x201, 'V'), + (0x202, 'M', u'ȃ'), + (0x203, 'V'), + (0x204, 'M', u'ȅ'), + (0x205, 'V'), + (0x206, 'M', u'ȇ'), + (0x207, 'V'), + (0x208, 'M', u'ȉ'), + (0x209, 'V'), + (0x20A, 'M', u'ȋ'), + (0x20B, 'V'), + (0x20C, 'M', u'ȍ'), + ] + +def _seg_5(): + return [ + (0x20D, 'V'), + (0x20E, 'M', u'ȏ'), + (0x20F, 'V'), + (0x210, 'M', u'ȑ'), + (0x211, 'V'), + (0x212, 'M', u'ȓ'), + (0x213, 'V'), + (0x214, 'M', u'ȕ'), + (0x215, 'V'), + (0x216, 'M', u'ȗ'), + (0x217, 'V'), + (0x218, 'M', u'ș'), + (0x219, 'V'), + (0x21A, 'M', u'ț'), + (0x21B, 'V'), + (0x21C, 'M', u'ȝ'), + (0x21D, 'V'), + (0x21E, 'M', u'ȟ'), + (0x21F, 'V'), + (0x220, 'M', u'ƞ'), + (0x221, 'V'), + (0x222, 'M', u'ȣ'), + (0x223, 'V'), + (0x224, 'M', u'ȥ'), + (0x225, 'V'), + (0x226, 'M', u'ȧ'), + (0x227, 'V'), + (0x228, 'M', u'ȩ'), + (0x229, 'V'), + (0x22A, 'M', u'ȫ'), + (0x22B, 'V'), + (0x22C, 'M', u'ȭ'), + (0x22D, 'V'), + (0x22E, 'M', u'ȯ'), + (0x22F, 'V'), + (0x230, 'M', u'ȱ'), + (0x231, 'V'), + (0x232, 'M', u'ȳ'), + (0x233, 'V'), + (0x23A, 'M', u'ⱥ'), + (0x23B, 'M', u'ȼ'), + (0x23C, 'V'), + (0x23D, 'M', u'ƚ'), + (0x23E, 'M', u'ⱦ'), + (0x23F, 'V'), + (0x241, 'M', u'ɂ'), + (0x242, 'V'), + (0x243, 'M', u'ƀ'), + (0x244, 'M', u'ʉ'), + (0x245, 'M', u'ʌ'), + (0x246, 'M', u'ɇ'), + (0x247, 'V'), + (0x248, 'M', u'ɉ'), + (0x249, 'V'), + (0x24A, 'M', u'ɋ'), + (0x24B, 'V'), + (0x24C, 'M', u'ɍ'), + (0x24D, 'V'), + (0x24E, 'M', u'ɏ'), + (0x24F, 'V'), + (0x2B0, 'M', u'h'), + (0x2B1, 'M', u'ɦ'), + (0x2B2, 'M', u'j'), + (0x2B3, 'M', u'r'), + (0x2B4, 'M', u'ɹ'), + (0x2B5, 'M', u'ɻ'), + (0x2B6, 'M', u'ʁ'), + (0x2B7, 'M', u'w'), + (0x2B8, 'M', u'y'), + (0x2B9, 'V'), + (0x2D8, '3', u' ̆'), + (0x2D9, '3', u' ̇'), + (0x2DA, '3', u' ̊'), + (0x2DB, '3', u' ̨'), + (0x2DC, '3', u' ̃'), + (0x2DD, '3', u' ̋'), + (0x2DE, 'V'), + (0x2E0, 'M', u'ɣ'), + (0x2E1, 'M', u'l'), + (0x2E2, 'M', u's'), + (0x2E3, 'M', u'x'), + (0x2E4, 'M', u'ʕ'), + (0x2E5, 'V'), + (0x340, 'M', u'̀'), + (0x341, 'M', u'́'), + (0x342, 'V'), + (0x343, 'M', u'̓'), + (0x344, 'M', u'̈́'), + (0x345, 'M', u'ι'), + (0x346, 'V'), + (0x34F, 'I'), + (0x350, 'V'), + (0x370, 'M', u'ͱ'), + (0x371, 'V'), + (0x372, 'M', u'ͳ'), + (0x373, 'V'), + (0x374, 'M', u'ʹ'), + (0x375, 'V'), + (0x376, 'M', u'ͷ'), + (0x377, 'V'), + ] + +def _seg_6(): + return [ + (0x378, 'X'), + (0x37A, '3', u' ι'), + (0x37B, 'V'), + (0x37E, '3', u';'), + (0x37F, 'M', u'ϳ'), + (0x380, 'X'), + (0x384, '3', u' ́'), + (0x385, '3', u' ̈́'), + (0x386, 'M', u'ά'), + (0x387, 'M', u'·'), + (0x388, 'M', u'έ'), + (0x389, 'M', u'ή'), + (0x38A, 'M', u'ί'), + (0x38B, 'X'), + (0x38C, 'M', u'ό'), + (0x38D, 'X'), + (0x38E, 'M', u'ύ'), + (0x38F, 'M', u'ώ'), + (0x390, 'V'), + (0x391, 'M', u'α'), + (0x392, 'M', u'β'), + (0x393, 'M', u'γ'), + (0x394, 'M', u'δ'), + (0x395, 'M', u'ε'), + (0x396, 'M', u'ζ'), + (0x397, 'M', u'η'), + (0x398, 'M', u'θ'), + (0x399, 'M', u'ι'), + (0x39A, 'M', u'κ'), + (0x39B, 'M', u'λ'), + (0x39C, 'M', u'μ'), + (0x39D, 'M', u'ν'), + (0x39E, 'M', u'ξ'), + (0x39F, 'M', u'ο'), + (0x3A0, 'M', u'π'), + (0x3A1, 'M', u'ρ'), + (0x3A2, 'X'), + (0x3A3, 'M', u'σ'), + (0x3A4, 'M', u'τ'), + (0x3A5, 'M', u'υ'), + (0x3A6, 'M', u'φ'), + (0x3A7, 'M', u'χ'), + (0x3A8, 'M', u'ψ'), + (0x3A9, 'M', u'ω'), + (0x3AA, 'M', u'ϊ'), + (0x3AB, 'M', u'ϋ'), + (0x3AC, 'V'), + (0x3C2, 'D', u'σ'), + (0x3C3, 'V'), + (0x3CF, 'M', u'ϗ'), + (0x3D0, 'M', u'β'), + (0x3D1, 'M', u'θ'), + (0x3D2, 'M', u'υ'), + (0x3D3, 'M', u'ύ'), + (0x3D4, 'M', u'ϋ'), + (0x3D5, 'M', u'φ'), + (0x3D6, 'M', u'π'), + (0x3D7, 'V'), + (0x3D8, 'M', u'ϙ'), + (0x3D9, 'V'), + (0x3DA, 'M', u'ϛ'), + (0x3DB, 'V'), + (0x3DC, 'M', u'ϝ'), + (0x3DD, 'V'), + (0x3DE, 'M', u'ϟ'), + (0x3DF, 'V'), + (0x3E0, 'M', u'ϡ'), + (0x3E1, 'V'), + (0x3E2, 'M', u'ϣ'), + (0x3E3, 'V'), + (0x3E4, 'M', u'ϥ'), + (0x3E5, 'V'), + (0x3E6, 'M', u'ϧ'), + (0x3E7, 'V'), + (0x3E8, 'M', u'ϩ'), + (0x3E9, 'V'), + (0x3EA, 'M', u'ϫ'), + (0x3EB, 'V'), + (0x3EC, 'M', u'ϭ'), + (0x3ED, 'V'), + (0x3EE, 'M', u'ϯ'), + (0x3EF, 'V'), + (0x3F0, 'M', u'κ'), + (0x3F1, 'M', u'ρ'), + (0x3F2, 'M', u'σ'), + (0x3F3, 'V'), + (0x3F4, 'M', u'θ'), + (0x3F5, 'M', u'ε'), + (0x3F6, 'V'), + (0x3F7, 'M', u'ϸ'), + (0x3F8, 'V'), + (0x3F9, 'M', u'σ'), + (0x3FA, 'M', u'ϻ'), + (0x3FB, 'V'), + (0x3FD, 'M', u'ͻ'), + (0x3FE, 'M', u'ͼ'), + (0x3FF, 'M', u'ͽ'), + (0x400, 'M', u'ѐ'), + (0x401, 'M', u'ё'), + (0x402, 'M', u'ђ'), + ] + +def _seg_7(): + return [ + (0x403, 'M', u'ѓ'), + (0x404, 'M', u'є'), + (0x405, 'M', u'ѕ'), + (0x406, 'M', u'і'), + (0x407, 'M', u'ї'), + (0x408, 'M', u'ј'), + (0x409, 'M', u'љ'), + (0x40A, 'M', u'њ'), + (0x40B, 'M', u'ћ'), + (0x40C, 'M', u'ќ'), + (0x40D, 'M', u'ѝ'), + (0x40E, 'M', u'ў'), + (0x40F, 'M', u'џ'), + (0x410, 'M', u'а'), + (0x411, 'M', u'б'), + (0x412, 'M', u'в'), + (0x413, 'M', u'г'), + (0x414, 'M', u'д'), + (0x415, 'M', u'е'), + (0x416, 'M', u'ж'), + (0x417, 'M', u'з'), + (0x418, 'M', u'и'), + (0x419, 'M', u'й'), + (0x41A, 'M', u'к'), + (0x41B, 'M', u'л'), + (0x41C, 'M', u'м'), + (0x41D, 'M', u'н'), + (0x41E, 'M', u'о'), + (0x41F, 'M', u'п'), + (0x420, 'M', u'р'), + (0x421, 'M', u'с'), + (0x422, 'M', u'т'), + (0x423, 'M', u'у'), + (0x424, 'M', u'ф'), + (0x425, 'M', u'х'), + (0x426, 'M', u'ц'), + (0x427, 'M', u'ч'), + (0x428, 'M', u'ш'), + (0x429, 'M', u'щ'), + (0x42A, 'M', u'ъ'), + (0x42B, 'M', u'ы'), + (0x42C, 'M', u'ь'), + (0x42D, 'M', u'э'), + (0x42E, 'M', u'ю'), + (0x42F, 'M', u'я'), + (0x430, 'V'), + (0x460, 'M', u'ѡ'), + (0x461, 'V'), + (0x462, 'M', u'ѣ'), + (0x463, 'V'), + (0x464, 'M', u'ѥ'), + (0x465, 'V'), + (0x466, 'M', u'ѧ'), + (0x467, 'V'), + (0x468, 'M', u'ѩ'), + (0x469, 'V'), + (0x46A, 'M', u'ѫ'), + (0x46B, 'V'), + (0x46C, 'M', u'ѭ'), + (0x46D, 'V'), + (0x46E, 'M', u'ѯ'), + (0x46F, 'V'), + (0x470, 'M', u'ѱ'), + (0x471, 'V'), + (0x472, 'M', u'ѳ'), + (0x473, 'V'), + (0x474, 'M', u'ѵ'), + (0x475, 'V'), + (0x476, 'M', u'ѷ'), + (0x477, 'V'), + (0x478, 'M', u'ѹ'), + (0x479, 'V'), + (0x47A, 'M', u'ѻ'), + (0x47B, 'V'), + (0x47C, 'M', u'ѽ'), + (0x47D, 'V'), + (0x47E, 'M', u'ѿ'), + (0x47F, 'V'), + (0x480, 'M', u'ҁ'), + (0x481, 'V'), + (0x48A, 'M', u'ҋ'), + (0x48B, 'V'), + (0x48C, 'M', u'ҍ'), + (0x48D, 'V'), + (0x48E, 'M', u'ҏ'), + (0x48F, 'V'), + (0x490, 'M', u'ґ'), + (0x491, 'V'), + (0x492, 'M', u'ғ'), + (0x493, 'V'), + (0x494, 'M', u'ҕ'), + (0x495, 'V'), + (0x496, 'M', u'җ'), + (0x497, 'V'), + (0x498, 'M', u'ҙ'), + (0x499, 'V'), + (0x49A, 'M', u'қ'), + (0x49B, 'V'), + (0x49C, 'M', u'ҝ'), + (0x49D, 'V'), + ] + +def _seg_8(): + return [ + (0x49E, 'M', u'ҟ'), + (0x49F, 'V'), + (0x4A0, 'M', u'ҡ'), + (0x4A1, 'V'), + (0x4A2, 'M', u'ң'), + (0x4A3, 'V'), + (0x4A4, 'M', u'ҥ'), + (0x4A5, 'V'), + (0x4A6, 'M', u'ҧ'), + (0x4A7, 'V'), + (0x4A8, 'M', u'ҩ'), + (0x4A9, 'V'), + (0x4AA, 'M', u'ҫ'), + (0x4AB, 'V'), + (0x4AC, 'M', u'ҭ'), + (0x4AD, 'V'), + (0x4AE, 'M', u'ү'), + (0x4AF, 'V'), + (0x4B0, 'M', u'ұ'), + (0x4B1, 'V'), + (0x4B2, 'M', u'ҳ'), + (0x4B3, 'V'), + (0x4B4, 'M', u'ҵ'), + (0x4B5, 'V'), + (0x4B6, 'M', u'ҷ'), + (0x4B7, 'V'), + (0x4B8, 'M', u'ҹ'), + (0x4B9, 'V'), + (0x4BA, 'M', u'һ'), + (0x4BB, 'V'), + (0x4BC, 'M', u'ҽ'), + (0x4BD, 'V'), + (0x4BE, 'M', u'ҿ'), + (0x4BF, 'V'), + (0x4C0, 'X'), + (0x4C1, 'M', u'ӂ'), + (0x4C2, 'V'), + (0x4C3, 'M', u'ӄ'), + (0x4C4, 'V'), + (0x4C5, 'M', u'ӆ'), + (0x4C6, 'V'), + (0x4C7, 'M', u'ӈ'), + (0x4C8, 'V'), + (0x4C9, 'M', u'ӊ'), + (0x4CA, 'V'), + (0x4CB, 'M', u'ӌ'), + (0x4CC, 'V'), + (0x4CD, 'M', u'ӎ'), + (0x4CE, 'V'), + (0x4D0, 'M', u'ӑ'), + (0x4D1, 'V'), + (0x4D2, 'M', u'ӓ'), + (0x4D3, 'V'), + (0x4D4, 'M', u'ӕ'), + (0x4D5, 'V'), + (0x4D6, 'M', u'ӗ'), + (0x4D7, 'V'), + (0x4D8, 'M', u'ә'), + (0x4D9, 'V'), + (0x4DA, 'M', u'ӛ'), + (0x4DB, 'V'), + (0x4DC, 'M', u'ӝ'), + (0x4DD, 'V'), + (0x4DE, 'M', u'ӟ'), + (0x4DF, 'V'), + (0x4E0, 'M', u'ӡ'), + (0x4E1, 'V'), + (0x4E2, 'M', u'ӣ'), + (0x4E3, 'V'), + (0x4E4, 'M', u'ӥ'), + (0x4E5, 'V'), + (0x4E6, 'M', u'ӧ'), + (0x4E7, 'V'), + (0x4E8, 'M', u'ө'), + (0x4E9, 'V'), + (0x4EA, 'M', u'ӫ'), + (0x4EB, 'V'), + (0x4EC, 'M', u'ӭ'), + (0x4ED, 'V'), + (0x4EE, 'M', u'ӯ'), + (0x4EF, 'V'), + (0x4F0, 'M', u'ӱ'), + (0x4F1, 'V'), + (0x4F2, 'M', u'ӳ'), + (0x4F3, 'V'), + (0x4F4, 'M', u'ӵ'), + (0x4F5, 'V'), + (0x4F6, 'M', u'ӷ'), + (0x4F7, 'V'), + (0x4F8, 'M', u'ӹ'), + (0x4F9, 'V'), + (0x4FA, 'M', u'ӻ'), + (0x4FB, 'V'), + (0x4FC, 'M', u'ӽ'), + (0x4FD, 'V'), + (0x4FE, 'M', u'ӿ'), + (0x4FF, 'V'), + (0x500, 'M', u'ԁ'), + (0x501, 'V'), + (0x502, 'M', u'ԃ'), + ] + +def _seg_9(): + return [ + (0x503, 'V'), + (0x504, 'M', u'ԅ'), + (0x505, 'V'), + (0x506, 'M', u'ԇ'), + (0x507, 'V'), + (0x508, 'M', u'ԉ'), + (0x509, 'V'), + (0x50A, 'M', u'ԋ'), + (0x50B, 'V'), + (0x50C, 'M', u'ԍ'), + (0x50D, 'V'), + (0x50E, 'M', u'ԏ'), + (0x50F, 'V'), + (0x510, 'M', u'ԑ'), + (0x511, 'V'), + (0x512, 'M', u'ԓ'), + (0x513, 'V'), + (0x514, 'M', u'ԕ'), + (0x515, 'V'), + (0x516, 'M', u'ԗ'), + (0x517, 'V'), + (0x518, 'M', u'ԙ'), + (0x519, 'V'), + (0x51A, 'M', u'ԛ'), + (0x51B, 'V'), + (0x51C, 'M', u'ԝ'), + (0x51D, 'V'), + (0x51E, 'M', u'ԟ'), + (0x51F, 'V'), + (0x520, 'M', u'ԡ'), + (0x521, 'V'), + (0x522, 'M', u'ԣ'), + (0x523, 'V'), + (0x524, 'M', u'ԥ'), + (0x525, 'V'), + (0x526, 'M', u'ԧ'), + (0x527, 'V'), + (0x528, 'M', u'ԩ'), + (0x529, 'V'), + (0x52A, 'M', u'ԫ'), + (0x52B, 'V'), + (0x52C, 'M', u'ԭ'), + (0x52D, 'V'), + (0x52E, 'M', u'ԯ'), + (0x52F, 'V'), + (0x530, 'X'), + (0x531, 'M', u'ա'), + (0x532, 'M', u'բ'), + (0x533, 'M', u'գ'), + (0x534, 'M', u'դ'), + (0x535, 'M', u'ե'), + (0x536, 'M', u'զ'), + (0x537, 'M', u'է'), + (0x538, 'M', u'ը'), + (0x539, 'M', u'թ'), + (0x53A, 'M', u'ժ'), + (0x53B, 'M', u'ի'), + (0x53C, 'M', u'լ'), + (0x53D, 'M', u'խ'), + (0x53E, 'M', u'ծ'), + (0x53F, 'M', u'կ'), + (0x540, 'M', u'հ'), + (0x541, 'M', u'ձ'), + (0x542, 'M', u'ղ'), + (0x543, 'M', u'ճ'), + (0x544, 'M', u'մ'), + (0x545, 'M', u'յ'), + (0x546, 'M', u'ն'), + (0x547, 'M', u'շ'), + (0x548, 'M', u'ո'), + (0x549, 'M', u'չ'), + (0x54A, 'M', u'պ'), + (0x54B, 'M', u'ջ'), + (0x54C, 'M', u'ռ'), + (0x54D, 'M', u'ս'), + (0x54E, 'M', u'վ'), + (0x54F, 'M', u'տ'), + (0x550, 'M', u'ր'), + (0x551, 'M', u'ց'), + (0x552, 'M', u'ւ'), + (0x553, 'M', u'փ'), + (0x554, 'M', u'ք'), + (0x555, 'M', u'օ'), + (0x556, 'M', u'ֆ'), + (0x557, 'X'), + (0x559, 'V'), + (0x587, 'M', u'եւ'), + (0x588, 'V'), + (0x58B, 'X'), + (0x58D, 'V'), + (0x590, 'X'), + (0x591, 'V'), + (0x5C8, 'X'), + (0x5D0, 'V'), + (0x5EB, 'X'), + (0x5EF, 'V'), + (0x5F5, 'X'), + (0x606, 'V'), + (0x61C, 'X'), + (0x61E, 'V'), + ] + +def _seg_10(): + return [ + (0x675, 'M', u'اٴ'), + (0x676, 'M', u'وٴ'), + (0x677, 'M', u'ۇٴ'), + (0x678, 'M', u'يٴ'), + (0x679, 'V'), + (0x6DD, 'X'), + (0x6DE, 'V'), + (0x70E, 'X'), + (0x710, 'V'), + (0x74B, 'X'), + (0x74D, 'V'), + (0x7B2, 'X'), + (0x7C0, 'V'), + (0x7FB, 'X'), + (0x7FD, 'V'), + (0x82E, 'X'), + (0x830, 'V'), + (0x83F, 'X'), + (0x840, 'V'), + (0x85C, 'X'), + (0x85E, 'V'), + (0x85F, 'X'), + (0x860, 'V'), + (0x86B, 'X'), + (0x8A0, 'V'), + (0x8B5, 'X'), + (0x8B6, 'V'), + (0x8BE, 'X'), + (0x8D3, 'V'), + (0x8E2, 'X'), + (0x8E3, 'V'), + (0x958, 'M', u'क़'), + (0x959, 'M', u'ख़'), + (0x95A, 'M', u'ग़'), + (0x95B, 'M', u'ज़'), + (0x95C, 'M', u'ड़'), + (0x95D, 'M', u'ढ़'), + (0x95E, 'M', u'फ़'), + (0x95F, 'M', u'य़'), + (0x960, 'V'), + (0x984, 'X'), + (0x985, 'V'), + (0x98D, 'X'), + (0x98F, 'V'), + (0x991, 'X'), + (0x993, 'V'), + (0x9A9, 'X'), + (0x9AA, 'V'), + (0x9B1, 'X'), + (0x9B2, 'V'), + (0x9B3, 'X'), + (0x9B6, 'V'), + (0x9BA, 'X'), + (0x9BC, 'V'), + (0x9C5, 'X'), + (0x9C7, 'V'), + (0x9C9, 'X'), + (0x9CB, 'V'), + (0x9CF, 'X'), + (0x9D7, 'V'), + (0x9D8, 'X'), + (0x9DC, 'M', u'ড়'), + (0x9DD, 'M', u'ঢ়'), + (0x9DE, 'X'), + (0x9DF, 'M', u'য়'), + (0x9E0, 'V'), + (0x9E4, 'X'), + (0x9E6, 'V'), + (0x9FF, 'X'), + (0xA01, 'V'), + (0xA04, 'X'), + (0xA05, 'V'), + (0xA0B, 'X'), + (0xA0F, 'V'), + (0xA11, 'X'), + (0xA13, 'V'), + (0xA29, 'X'), + (0xA2A, 'V'), + (0xA31, 'X'), + (0xA32, 'V'), + (0xA33, 'M', u'ਲ਼'), + (0xA34, 'X'), + (0xA35, 'V'), + (0xA36, 'M', u'ਸ਼'), + (0xA37, 'X'), + (0xA38, 'V'), + (0xA3A, 'X'), + (0xA3C, 'V'), + (0xA3D, 'X'), + (0xA3E, 'V'), + (0xA43, 'X'), + (0xA47, 'V'), + (0xA49, 'X'), + (0xA4B, 'V'), + (0xA4E, 'X'), + (0xA51, 'V'), + (0xA52, 'X'), + (0xA59, 'M', u'ਖ਼'), + (0xA5A, 'M', u'ਗ਼'), + (0xA5B, 'M', u'ਜ਼'), + ] + +def _seg_11(): + return [ + (0xA5C, 'V'), + (0xA5D, 'X'), + (0xA5E, 'M', u'ਫ਼'), + (0xA5F, 'X'), + (0xA66, 'V'), + (0xA77, 'X'), + (0xA81, 'V'), + (0xA84, 'X'), + (0xA85, 'V'), + (0xA8E, 'X'), + (0xA8F, 'V'), + (0xA92, 'X'), + (0xA93, 'V'), + (0xAA9, 'X'), + (0xAAA, 'V'), + (0xAB1, 'X'), + (0xAB2, 'V'), + (0xAB4, 'X'), + (0xAB5, 'V'), + (0xABA, 'X'), + (0xABC, 'V'), + (0xAC6, 'X'), + (0xAC7, 'V'), + (0xACA, 'X'), + (0xACB, 'V'), + (0xACE, 'X'), + (0xAD0, 'V'), + (0xAD1, 'X'), + (0xAE0, 'V'), + (0xAE4, 'X'), + (0xAE6, 'V'), + (0xAF2, 'X'), + (0xAF9, 'V'), + (0xB00, 'X'), + (0xB01, 'V'), + (0xB04, 'X'), + (0xB05, 'V'), + (0xB0D, 'X'), + (0xB0F, 'V'), + (0xB11, 'X'), + (0xB13, 'V'), + (0xB29, 'X'), + (0xB2A, 'V'), + (0xB31, 'X'), + (0xB32, 'V'), + (0xB34, 'X'), + (0xB35, 'V'), + (0xB3A, 'X'), + (0xB3C, 'V'), + (0xB45, 'X'), + (0xB47, 'V'), + (0xB49, 'X'), + (0xB4B, 'V'), + (0xB4E, 'X'), + (0xB56, 'V'), + (0xB58, 'X'), + (0xB5C, 'M', u'ଡ଼'), + (0xB5D, 'M', u'ଢ଼'), + (0xB5E, 'X'), + (0xB5F, 'V'), + (0xB64, 'X'), + (0xB66, 'V'), + (0xB78, 'X'), + (0xB82, 'V'), + (0xB84, 'X'), + (0xB85, 'V'), + (0xB8B, 'X'), + (0xB8E, 'V'), + (0xB91, 'X'), + (0xB92, 'V'), + (0xB96, 'X'), + (0xB99, 'V'), + (0xB9B, 'X'), + (0xB9C, 'V'), + (0xB9D, 'X'), + (0xB9E, 'V'), + (0xBA0, 'X'), + (0xBA3, 'V'), + (0xBA5, 'X'), + (0xBA8, 'V'), + (0xBAB, 'X'), + (0xBAE, 'V'), + (0xBBA, 'X'), + (0xBBE, 'V'), + (0xBC3, 'X'), + (0xBC6, 'V'), + (0xBC9, 'X'), + (0xBCA, 'V'), + (0xBCE, 'X'), + (0xBD0, 'V'), + (0xBD1, 'X'), + (0xBD7, 'V'), + (0xBD8, 'X'), + (0xBE6, 'V'), + (0xBFB, 'X'), + (0xC00, 'V'), + (0xC0D, 'X'), + (0xC0E, 'V'), + (0xC11, 'X'), + (0xC12, 'V'), + ] + +def _seg_12(): + return [ + (0xC29, 'X'), + (0xC2A, 'V'), + (0xC3A, 'X'), + (0xC3D, 'V'), + (0xC45, 'X'), + (0xC46, 'V'), + (0xC49, 'X'), + (0xC4A, 'V'), + (0xC4E, 'X'), + (0xC55, 'V'), + (0xC57, 'X'), + (0xC58, 'V'), + (0xC5B, 'X'), + (0xC60, 'V'), + (0xC64, 'X'), + (0xC66, 'V'), + (0xC70, 'X'), + (0xC78, 'V'), + (0xC8D, 'X'), + (0xC8E, 'V'), + (0xC91, 'X'), + (0xC92, 'V'), + (0xCA9, 'X'), + (0xCAA, 'V'), + (0xCB4, 'X'), + (0xCB5, 'V'), + (0xCBA, 'X'), + (0xCBC, 'V'), + (0xCC5, 'X'), + (0xCC6, 'V'), + (0xCC9, 'X'), + (0xCCA, 'V'), + (0xCCE, 'X'), + (0xCD5, 'V'), + (0xCD7, 'X'), + (0xCDE, 'V'), + (0xCDF, 'X'), + (0xCE0, 'V'), + (0xCE4, 'X'), + (0xCE6, 'V'), + (0xCF0, 'X'), + (0xCF1, 'V'), + (0xCF3, 'X'), + (0xD00, 'V'), + (0xD04, 'X'), + (0xD05, 'V'), + (0xD0D, 'X'), + (0xD0E, 'V'), + (0xD11, 'X'), + (0xD12, 'V'), + (0xD45, 'X'), + (0xD46, 'V'), + (0xD49, 'X'), + (0xD4A, 'V'), + (0xD50, 'X'), + (0xD54, 'V'), + (0xD64, 'X'), + (0xD66, 'V'), + (0xD80, 'X'), + (0xD82, 'V'), + (0xD84, 'X'), + (0xD85, 'V'), + (0xD97, 'X'), + (0xD9A, 'V'), + (0xDB2, 'X'), + (0xDB3, 'V'), + (0xDBC, 'X'), + (0xDBD, 'V'), + (0xDBE, 'X'), + (0xDC0, 'V'), + (0xDC7, 'X'), + (0xDCA, 'V'), + (0xDCB, 'X'), + (0xDCF, 'V'), + (0xDD5, 'X'), + (0xDD6, 'V'), + (0xDD7, 'X'), + (0xDD8, 'V'), + (0xDE0, 'X'), + (0xDE6, 'V'), + (0xDF0, 'X'), + (0xDF2, 'V'), + (0xDF5, 'X'), + (0xE01, 'V'), + (0xE33, 'M', u'ํา'), + (0xE34, 'V'), + (0xE3B, 'X'), + (0xE3F, 'V'), + (0xE5C, 'X'), + (0xE81, 'V'), + (0xE83, 'X'), + (0xE84, 'V'), + (0xE85, 'X'), + (0xE87, 'V'), + (0xE89, 'X'), + (0xE8A, 'V'), + (0xE8B, 'X'), + (0xE8D, 'V'), + (0xE8E, 'X'), + (0xE94, 'V'), + ] + +def _seg_13(): + return [ + (0xE98, 'X'), + (0xE99, 'V'), + (0xEA0, 'X'), + (0xEA1, 'V'), + (0xEA4, 'X'), + (0xEA5, 'V'), + (0xEA6, 'X'), + (0xEA7, 'V'), + (0xEA8, 'X'), + (0xEAA, 'V'), + (0xEAC, 'X'), + (0xEAD, 'V'), + (0xEB3, 'M', u'ໍາ'), + (0xEB4, 'V'), + (0xEBA, 'X'), + (0xEBB, 'V'), + (0xEBE, 'X'), + (0xEC0, 'V'), + (0xEC5, 'X'), + (0xEC6, 'V'), + (0xEC7, 'X'), + (0xEC8, 'V'), + (0xECE, 'X'), + (0xED0, 'V'), + (0xEDA, 'X'), + (0xEDC, 'M', u'ຫນ'), + (0xEDD, 'M', u'ຫມ'), + (0xEDE, 'V'), + (0xEE0, 'X'), + (0xF00, 'V'), + (0xF0C, 'M', u'་'), + (0xF0D, 'V'), + (0xF43, 'M', u'གྷ'), + (0xF44, 'V'), + (0xF48, 'X'), + (0xF49, 'V'), + (0xF4D, 'M', u'ཌྷ'), + (0xF4E, 'V'), + (0xF52, 'M', u'དྷ'), + (0xF53, 'V'), + (0xF57, 'M', u'བྷ'), + (0xF58, 'V'), + (0xF5C, 'M', u'ཛྷ'), + (0xF5D, 'V'), + (0xF69, 'M', u'ཀྵ'), + (0xF6A, 'V'), + (0xF6D, 'X'), + (0xF71, 'V'), + (0xF73, 'M', u'ཱི'), + (0xF74, 'V'), + (0xF75, 'M', u'ཱུ'), + (0xF76, 'M', u'ྲྀ'), + (0xF77, 'M', u'ྲཱྀ'), + (0xF78, 'M', u'ླྀ'), + (0xF79, 'M', u'ླཱྀ'), + (0xF7A, 'V'), + (0xF81, 'M', u'ཱྀ'), + (0xF82, 'V'), + (0xF93, 'M', u'ྒྷ'), + (0xF94, 'V'), + (0xF98, 'X'), + (0xF99, 'V'), + (0xF9D, 'M', u'ྜྷ'), + (0xF9E, 'V'), + (0xFA2, 'M', u'ྡྷ'), + (0xFA3, 'V'), + (0xFA7, 'M', u'ྦྷ'), + (0xFA8, 'V'), + (0xFAC, 'M', u'ྫྷ'), + (0xFAD, 'V'), + (0xFB9, 'M', u'ྐྵ'), + (0xFBA, 'V'), + (0xFBD, 'X'), + (0xFBE, 'V'), + (0xFCD, 'X'), + (0xFCE, 'V'), + (0xFDB, 'X'), + (0x1000, 'V'), + (0x10A0, 'X'), + (0x10C7, 'M', u'ⴧ'), + (0x10C8, 'X'), + (0x10CD, 'M', u'ⴭ'), + (0x10CE, 'X'), + (0x10D0, 'V'), + (0x10FC, 'M', u'ნ'), + (0x10FD, 'V'), + (0x115F, 'X'), + (0x1161, 'V'), + (0x1249, 'X'), + (0x124A, 'V'), + (0x124E, 'X'), + (0x1250, 'V'), + (0x1257, 'X'), + (0x1258, 'V'), + (0x1259, 'X'), + (0x125A, 'V'), + (0x125E, 'X'), + (0x1260, 'V'), + (0x1289, 'X'), + (0x128A, 'V'), + ] + +def _seg_14(): + return [ + (0x128E, 'X'), + (0x1290, 'V'), + (0x12B1, 'X'), + (0x12B2, 'V'), + (0x12B6, 'X'), + (0x12B8, 'V'), + (0x12BF, 'X'), + (0x12C0, 'V'), + (0x12C1, 'X'), + (0x12C2, 'V'), + (0x12C6, 'X'), + (0x12C8, 'V'), + (0x12D7, 'X'), + (0x12D8, 'V'), + (0x1311, 'X'), + (0x1312, 'V'), + (0x1316, 'X'), + (0x1318, 'V'), + (0x135B, 'X'), + (0x135D, 'V'), + (0x137D, 'X'), + (0x1380, 'V'), + (0x139A, 'X'), + (0x13A0, 'V'), + (0x13F6, 'X'), + (0x13F8, 'M', u'Ᏸ'), + (0x13F9, 'M', u'Ᏹ'), + (0x13FA, 'M', u'Ᏺ'), + (0x13FB, 'M', u'Ᏻ'), + (0x13FC, 'M', u'Ᏼ'), + (0x13FD, 'M', u'Ᏽ'), + (0x13FE, 'X'), + (0x1400, 'V'), + (0x1680, 'X'), + (0x1681, 'V'), + (0x169D, 'X'), + (0x16A0, 'V'), + (0x16F9, 'X'), + (0x1700, 'V'), + (0x170D, 'X'), + (0x170E, 'V'), + (0x1715, 'X'), + (0x1720, 'V'), + (0x1737, 'X'), + (0x1740, 'V'), + (0x1754, 'X'), + (0x1760, 'V'), + (0x176D, 'X'), + (0x176E, 'V'), + (0x1771, 'X'), + (0x1772, 'V'), + (0x1774, 'X'), + (0x1780, 'V'), + (0x17B4, 'X'), + (0x17B6, 'V'), + (0x17DE, 'X'), + (0x17E0, 'V'), + (0x17EA, 'X'), + (0x17F0, 'V'), + (0x17FA, 'X'), + (0x1800, 'V'), + (0x1806, 'X'), + (0x1807, 'V'), + (0x180B, 'I'), + (0x180E, 'X'), + (0x1810, 'V'), + (0x181A, 'X'), + (0x1820, 'V'), + (0x1879, 'X'), + (0x1880, 'V'), + (0x18AB, 'X'), + (0x18B0, 'V'), + (0x18F6, 'X'), + (0x1900, 'V'), + (0x191F, 'X'), + (0x1920, 'V'), + (0x192C, 'X'), + (0x1930, 'V'), + (0x193C, 'X'), + (0x1940, 'V'), + (0x1941, 'X'), + (0x1944, 'V'), + (0x196E, 'X'), + (0x1970, 'V'), + (0x1975, 'X'), + (0x1980, 'V'), + (0x19AC, 'X'), + (0x19B0, 'V'), + (0x19CA, 'X'), + (0x19D0, 'V'), + (0x19DB, 'X'), + (0x19DE, 'V'), + (0x1A1C, 'X'), + (0x1A1E, 'V'), + (0x1A5F, 'X'), + (0x1A60, 'V'), + (0x1A7D, 'X'), + (0x1A7F, 'V'), + (0x1A8A, 'X'), + (0x1A90, 'V'), + ] + +def _seg_15(): + return [ + (0x1A9A, 'X'), + (0x1AA0, 'V'), + (0x1AAE, 'X'), + (0x1AB0, 'V'), + (0x1ABF, 'X'), + (0x1B00, 'V'), + (0x1B4C, 'X'), + (0x1B50, 'V'), + (0x1B7D, 'X'), + (0x1B80, 'V'), + (0x1BF4, 'X'), + (0x1BFC, 'V'), + (0x1C38, 'X'), + (0x1C3B, 'V'), + (0x1C4A, 'X'), + (0x1C4D, 'V'), + (0x1C80, 'M', u'в'), + (0x1C81, 'M', u'д'), + (0x1C82, 'M', u'о'), + (0x1C83, 'M', u'с'), + (0x1C84, 'M', u'т'), + (0x1C86, 'M', u'ъ'), + (0x1C87, 'M', u'ѣ'), + (0x1C88, 'M', u'ꙋ'), + (0x1C89, 'X'), + (0x1CC0, 'V'), + (0x1CC8, 'X'), + (0x1CD0, 'V'), + (0x1CFA, 'X'), + (0x1D00, 'V'), + (0x1D2C, 'M', u'a'), + (0x1D2D, 'M', u'æ'), + (0x1D2E, 'M', u'b'), + (0x1D2F, 'V'), + (0x1D30, 'M', u'd'), + (0x1D31, 'M', u'e'), + (0x1D32, 'M', u'ǝ'), + (0x1D33, 'M', u'g'), + (0x1D34, 'M', u'h'), + (0x1D35, 'M', u'i'), + (0x1D36, 'M', u'j'), + (0x1D37, 'M', u'k'), + (0x1D38, 'M', u'l'), + (0x1D39, 'M', u'm'), + (0x1D3A, 'M', u'n'), + (0x1D3B, 'V'), + (0x1D3C, 'M', u'o'), + (0x1D3D, 'M', u'ȣ'), + (0x1D3E, 'M', u'p'), + (0x1D3F, 'M', u'r'), + (0x1D40, 'M', u't'), + (0x1D41, 'M', u'u'), + (0x1D42, 'M', u'w'), + (0x1D43, 'M', u'a'), + (0x1D44, 'M', u'ɐ'), + (0x1D45, 'M', u'ɑ'), + (0x1D46, 'M', u'ᴂ'), + (0x1D47, 'M', u'b'), + (0x1D48, 'M', u'd'), + (0x1D49, 'M', u'e'), + (0x1D4A, 'M', u'ə'), + (0x1D4B, 'M', u'ɛ'), + (0x1D4C, 'M', u'ɜ'), + (0x1D4D, 'M', u'g'), + (0x1D4E, 'V'), + (0x1D4F, 'M', u'k'), + (0x1D50, 'M', u'm'), + (0x1D51, 'M', u'ŋ'), + (0x1D52, 'M', u'o'), + (0x1D53, 'M', u'ɔ'), + (0x1D54, 'M', u'ᴖ'), + (0x1D55, 'M', u'ᴗ'), + (0x1D56, 'M', u'p'), + (0x1D57, 'M', u't'), + (0x1D58, 'M', u'u'), + (0x1D59, 'M', u'ᴝ'), + (0x1D5A, 'M', u'ɯ'), + (0x1D5B, 'M', u'v'), + (0x1D5C, 'M', u'ᴥ'), + (0x1D5D, 'M', u'β'), + (0x1D5E, 'M', u'γ'), + (0x1D5F, 'M', u'δ'), + (0x1D60, 'M', u'φ'), + (0x1D61, 'M', u'χ'), + (0x1D62, 'M', u'i'), + (0x1D63, 'M', u'r'), + (0x1D64, 'M', u'u'), + (0x1D65, 'M', u'v'), + (0x1D66, 'M', u'β'), + (0x1D67, 'M', u'γ'), + (0x1D68, 'M', u'ρ'), + (0x1D69, 'M', u'φ'), + (0x1D6A, 'M', u'χ'), + (0x1D6B, 'V'), + (0x1D78, 'M', u'н'), + (0x1D79, 'V'), + (0x1D9B, 'M', u'ɒ'), + (0x1D9C, 'M', u'c'), + (0x1D9D, 'M', u'ɕ'), + (0x1D9E, 'M', u'ð'), + ] + +def _seg_16(): + return [ + (0x1D9F, 'M', u'ɜ'), + (0x1DA0, 'M', u'f'), + (0x1DA1, 'M', u'ɟ'), + (0x1DA2, 'M', u'ɡ'), + (0x1DA3, 'M', u'ɥ'), + (0x1DA4, 'M', u'ɨ'), + (0x1DA5, 'M', u'ɩ'), + (0x1DA6, 'M', u'ɪ'), + (0x1DA7, 'M', u'ᵻ'), + (0x1DA8, 'M', u'ʝ'), + (0x1DA9, 'M', u'ɭ'), + (0x1DAA, 'M', u'ᶅ'), + (0x1DAB, 'M', u'ʟ'), + (0x1DAC, 'M', u'ɱ'), + (0x1DAD, 'M', u'ɰ'), + (0x1DAE, 'M', u'ɲ'), + (0x1DAF, 'M', u'ɳ'), + (0x1DB0, 'M', u'ɴ'), + (0x1DB1, 'M', u'ɵ'), + (0x1DB2, 'M', u'ɸ'), + (0x1DB3, 'M', u'ʂ'), + (0x1DB4, 'M', u'ʃ'), + (0x1DB5, 'M', u'ƫ'), + (0x1DB6, 'M', u'ʉ'), + (0x1DB7, 'M', u'ʊ'), + (0x1DB8, 'M', u'ᴜ'), + (0x1DB9, 'M', u'ʋ'), + (0x1DBA, 'M', u'ʌ'), + (0x1DBB, 'M', u'z'), + (0x1DBC, 'M', u'ʐ'), + (0x1DBD, 'M', u'ʑ'), + (0x1DBE, 'M', u'ʒ'), + (0x1DBF, 'M', u'θ'), + (0x1DC0, 'V'), + (0x1DFA, 'X'), + (0x1DFB, 'V'), + (0x1E00, 'M', u'ḁ'), + (0x1E01, 'V'), + (0x1E02, 'M', u'ḃ'), + (0x1E03, 'V'), + (0x1E04, 'M', u'ḅ'), + (0x1E05, 'V'), + (0x1E06, 'M', u'ḇ'), + (0x1E07, 'V'), + (0x1E08, 'M', u'ḉ'), + (0x1E09, 'V'), + (0x1E0A, 'M', u'ḋ'), + (0x1E0B, 'V'), + (0x1E0C, 'M', u'ḍ'), + (0x1E0D, 'V'), + (0x1E0E, 'M', u'ḏ'), + (0x1E0F, 'V'), + (0x1E10, 'M', u'ḑ'), + (0x1E11, 'V'), + (0x1E12, 'M', u'ḓ'), + (0x1E13, 'V'), + (0x1E14, 'M', u'ḕ'), + (0x1E15, 'V'), + (0x1E16, 'M', u'ḗ'), + (0x1E17, 'V'), + (0x1E18, 'M', u'ḙ'), + (0x1E19, 'V'), + (0x1E1A, 'M', u'ḛ'), + (0x1E1B, 'V'), + (0x1E1C, 'M', u'ḝ'), + (0x1E1D, 'V'), + (0x1E1E, 'M', u'ḟ'), + (0x1E1F, 'V'), + (0x1E20, 'M', u'ḡ'), + (0x1E21, 'V'), + (0x1E22, 'M', u'ḣ'), + (0x1E23, 'V'), + (0x1E24, 'M', u'ḥ'), + (0x1E25, 'V'), + (0x1E26, 'M', u'ḧ'), + (0x1E27, 'V'), + (0x1E28, 'M', u'ḩ'), + (0x1E29, 'V'), + (0x1E2A, 'M', u'ḫ'), + (0x1E2B, 'V'), + (0x1E2C, 'M', u'ḭ'), + (0x1E2D, 'V'), + (0x1E2E, 'M', u'ḯ'), + (0x1E2F, 'V'), + (0x1E30, 'M', u'ḱ'), + (0x1E31, 'V'), + (0x1E32, 'M', u'ḳ'), + (0x1E33, 'V'), + (0x1E34, 'M', u'ḵ'), + (0x1E35, 'V'), + (0x1E36, 'M', u'ḷ'), + (0x1E37, 'V'), + (0x1E38, 'M', u'ḹ'), + (0x1E39, 'V'), + (0x1E3A, 'M', u'ḻ'), + (0x1E3B, 'V'), + (0x1E3C, 'M', u'ḽ'), + (0x1E3D, 'V'), + (0x1E3E, 'M', u'ḿ'), + (0x1E3F, 'V'), + ] + +def _seg_17(): + return [ + (0x1E40, 'M', u'ṁ'), + (0x1E41, 'V'), + (0x1E42, 'M', u'ṃ'), + (0x1E43, 'V'), + (0x1E44, 'M', u'ṅ'), + (0x1E45, 'V'), + (0x1E46, 'M', u'ṇ'), + (0x1E47, 'V'), + (0x1E48, 'M', u'ṉ'), + (0x1E49, 'V'), + (0x1E4A, 'M', u'ṋ'), + (0x1E4B, 'V'), + (0x1E4C, 'M', u'ṍ'), + (0x1E4D, 'V'), + (0x1E4E, 'M', u'ṏ'), + (0x1E4F, 'V'), + (0x1E50, 'M', u'ṑ'), + (0x1E51, 'V'), + (0x1E52, 'M', u'ṓ'), + (0x1E53, 'V'), + (0x1E54, 'M', u'ṕ'), + (0x1E55, 'V'), + (0x1E56, 'M', u'ṗ'), + (0x1E57, 'V'), + (0x1E58, 'M', u'ṙ'), + (0x1E59, 'V'), + (0x1E5A, 'M', u'ṛ'), + (0x1E5B, 'V'), + (0x1E5C, 'M', u'ṝ'), + (0x1E5D, 'V'), + (0x1E5E, 'M', u'ṟ'), + (0x1E5F, 'V'), + (0x1E60, 'M', u'ṡ'), + (0x1E61, 'V'), + (0x1E62, 'M', u'ṣ'), + (0x1E63, 'V'), + (0x1E64, 'M', u'ṥ'), + (0x1E65, 'V'), + (0x1E66, 'M', u'ṧ'), + (0x1E67, 'V'), + (0x1E68, 'M', u'ṩ'), + (0x1E69, 'V'), + (0x1E6A, 'M', u'ṫ'), + (0x1E6B, 'V'), + (0x1E6C, 'M', u'ṭ'), + (0x1E6D, 'V'), + (0x1E6E, 'M', u'ṯ'), + (0x1E6F, 'V'), + (0x1E70, 'M', u'ṱ'), + (0x1E71, 'V'), + (0x1E72, 'M', u'ṳ'), + (0x1E73, 'V'), + (0x1E74, 'M', u'ṵ'), + (0x1E75, 'V'), + (0x1E76, 'M', u'ṷ'), + (0x1E77, 'V'), + (0x1E78, 'M', u'ṹ'), + (0x1E79, 'V'), + (0x1E7A, 'M', u'ṻ'), + (0x1E7B, 'V'), + (0x1E7C, 'M', u'ṽ'), + (0x1E7D, 'V'), + (0x1E7E, 'M', u'ṿ'), + (0x1E7F, 'V'), + (0x1E80, 'M', u'ẁ'), + (0x1E81, 'V'), + (0x1E82, 'M', u'ẃ'), + (0x1E83, 'V'), + (0x1E84, 'M', u'ẅ'), + (0x1E85, 'V'), + (0x1E86, 'M', u'ẇ'), + (0x1E87, 'V'), + (0x1E88, 'M', u'ẉ'), + (0x1E89, 'V'), + (0x1E8A, 'M', u'ẋ'), + (0x1E8B, 'V'), + (0x1E8C, 'M', u'ẍ'), + (0x1E8D, 'V'), + (0x1E8E, 'M', u'ẏ'), + (0x1E8F, 'V'), + (0x1E90, 'M', u'ẑ'), + (0x1E91, 'V'), + (0x1E92, 'M', u'ẓ'), + (0x1E93, 'V'), + (0x1E94, 'M', u'ẕ'), + (0x1E95, 'V'), + (0x1E9A, 'M', u'aʾ'), + (0x1E9B, 'M', u'ṡ'), + (0x1E9C, 'V'), + (0x1E9E, 'M', u'ss'), + (0x1E9F, 'V'), + (0x1EA0, 'M', u'ạ'), + (0x1EA1, 'V'), + (0x1EA2, 'M', u'ả'), + (0x1EA3, 'V'), + (0x1EA4, 'M', u'ấ'), + (0x1EA5, 'V'), + (0x1EA6, 'M', u'ầ'), + (0x1EA7, 'V'), + (0x1EA8, 'M', u'ẩ'), + ] + +def _seg_18(): + return [ + (0x1EA9, 'V'), + (0x1EAA, 'M', u'ẫ'), + (0x1EAB, 'V'), + (0x1EAC, 'M', u'ậ'), + (0x1EAD, 'V'), + (0x1EAE, 'M', u'ắ'), + (0x1EAF, 'V'), + (0x1EB0, 'M', u'ằ'), + (0x1EB1, 'V'), + (0x1EB2, 'M', u'ẳ'), + (0x1EB3, 'V'), + (0x1EB4, 'M', u'ẵ'), + (0x1EB5, 'V'), + (0x1EB6, 'M', u'ặ'), + (0x1EB7, 'V'), + (0x1EB8, 'M', u'ẹ'), + (0x1EB9, 'V'), + (0x1EBA, 'M', u'ẻ'), + (0x1EBB, 'V'), + (0x1EBC, 'M', u'ẽ'), + (0x1EBD, 'V'), + (0x1EBE, 'M', u'ế'), + (0x1EBF, 'V'), + (0x1EC0, 'M', u'ề'), + (0x1EC1, 'V'), + (0x1EC2, 'M', u'ể'), + (0x1EC3, 'V'), + (0x1EC4, 'M', u'ễ'), + (0x1EC5, 'V'), + (0x1EC6, 'M', u'ệ'), + (0x1EC7, 'V'), + (0x1EC8, 'M', u'ỉ'), + (0x1EC9, 'V'), + (0x1ECA, 'M', u'ị'), + (0x1ECB, 'V'), + (0x1ECC, 'M', u'ọ'), + (0x1ECD, 'V'), + (0x1ECE, 'M', u'ỏ'), + (0x1ECF, 'V'), + (0x1ED0, 'M', u'ố'), + (0x1ED1, 'V'), + (0x1ED2, 'M', u'ồ'), + (0x1ED3, 'V'), + (0x1ED4, 'M', u'ổ'), + (0x1ED5, 'V'), + (0x1ED6, 'M', u'ỗ'), + (0x1ED7, 'V'), + (0x1ED8, 'M', u'ộ'), + (0x1ED9, 'V'), + (0x1EDA, 'M', u'ớ'), + (0x1EDB, 'V'), + (0x1EDC, 'M', u'ờ'), + (0x1EDD, 'V'), + (0x1EDE, 'M', u'ở'), + (0x1EDF, 'V'), + (0x1EE0, 'M', u'ỡ'), + (0x1EE1, 'V'), + (0x1EE2, 'M', u'ợ'), + (0x1EE3, 'V'), + (0x1EE4, 'M', u'ụ'), + (0x1EE5, 'V'), + (0x1EE6, 'M', u'ủ'), + (0x1EE7, 'V'), + (0x1EE8, 'M', u'ứ'), + (0x1EE9, 'V'), + (0x1EEA, 'M', u'ừ'), + (0x1EEB, 'V'), + (0x1EEC, 'M', u'ử'), + (0x1EED, 'V'), + (0x1EEE, 'M', u'ữ'), + (0x1EEF, 'V'), + (0x1EF0, 'M', u'ự'), + (0x1EF1, 'V'), + (0x1EF2, 'M', u'ỳ'), + (0x1EF3, 'V'), + (0x1EF4, 'M', u'ỵ'), + (0x1EF5, 'V'), + (0x1EF6, 'M', u'ỷ'), + (0x1EF7, 'V'), + (0x1EF8, 'M', u'ỹ'), + (0x1EF9, 'V'), + (0x1EFA, 'M', u'ỻ'), + (0x1EFB, 'V'), + (0x1EFC, 'M', u'ỽ'), + (0x1EFD, 'V'), + (0x1EFE, 'M', u'ỿ'), + (0x1EFF, 'V'), + (0x1F08, 'M', u'ἀ'), + (0x1F09, 'M', u'ἁ'), + (0x1F0A, 'M', u'ἂ'), + (0x1F0B, 'M', u'ἃ'), + (0x1F0C, 'M', u'ἄ'), + (0x1F0D, 'M', u'ἅ'), + (0x1F0E, 'M', u'ἆ'), + (0x1F0F, 'M', u'ἇ'), + (0x1F10, 'V'), + (0x1F16, 'X'), + (0x1F18, 'M', u'ἐ'), + (0x1F19, 'M', u'ἑ'), + (0x1F1A, 'M', u'ἒ'), + ] + +def _seg_19(): + return [ + (0x1F1B, 'M', u'ἓ'), + (0x1F1C, 'M', u'ἔ'), + (0x1F1D, 'M', u'ἕ'), + (0x1F1E, 'X'), + (0x1F20, 'V'), + (0x1F28, 'M', u'ἠ'), + (0x1F29, 'M', u'ἡ'), + (0x1F2A, 'M', u'ἢ'), + (0x1F2B, 'M', u'ἣ'), + (0x1F2C, 'M', u'ἤ'), + (0x1F2D, 'M', u'ἥ'), + (0x1F2E, 'M', u'ἦ'), + (0x1F2F, 'M', u'ἧ'), + (0x1F30, 'V'), + (0x1F38, 'M', u'ἰ'), + (0x1F39, 'M', u'ἱ'), + (0x1F3A, 'M', u'ἲ'), + (0x1F3B, 'M', u'ἳ'), + (0x1F3C, 'M', u'ἴ'), + (0x1F3D, 'M', u'ἵ'), + (0x1F3E, 'M', u'ἶ'), + (0x1F3F, 'M', u'ἷ'), + (0x1F40, 'V'), + (0x1F46, 'X'), + (0x1F48, 'M', u'ὀ'), + (0x1F49, 'M', u'ὁ'), + (0x1F4A, 'M', u'ὂ'), + (0x1F4B, 'M', u'ὃ'), + (0x1F4C, 'M', u'ὄ'), + (0x1F4D, 'M', u'ὅ'), + (0x1F4E, 'X'), + (0x1F50, 'V'), + (0x1F58, 'X'), + (0x1F59, 'M', u'ὑ'), + (0x1F5A, 'X'), + (0x1F5B, 'M', u'ὓ'), + (0x1F5C, 'X'), + (0x1F5D, 'M', u'ὕ'), + (0x1F5E, 'X'), + (0x1F5F, 'M', u'ὗ'), + (0x1F60, 'V'), + (0x1F68, 'M', u'ὠ'), + (0x1F69, 'M', u'ὡ'), + (0x1F6A, 'M', u'ὢ'), + (0x1F6B, 'M', u'ὣ'), + (0x1F6C, 'M', u'ὤ'), + (0x1F6D, 'M', u'ὥ'), + (0x1F6E, 'M', u'ὦ'), + (0x1F6F, 'M', u'ὧ'), + (0x1F70, 'V'), + (0x1F71, 'M', u'ά'), + (0x1F72, 'V'), + (0x1F73, 'M', u'έ'), + (0x1F74, 'V'), + (0x1F75, 'M', u'ή'), + (0x1F76, 'V'), + (0x1F77, 'M', u'ί'), + (0x1F78, 'V'), + (0x1F79, 'M', u'ό'), + (0x1F7A, 'V'), + (0x1F7B, 'M', u'ύ'), + (0x1F7C, 'V'), + (0x1F7D, 'M', u'ώ'), + (0x1F7E, 'X'), + (0x1F80, 'M', u'ἀι'), + (0x1F81, 'M', u'ἁι'), + (0x1F82, 'M', u'ἂι'), + (0x1F83, 'M', u'ἃι'), + (0x1F84, 'M', u'ἄι'), + (0x1F85, 'M', u'ἅι'), + (0x1F86, 'M', u'ἆι'), + (0x1F87, 'M', u'ἇι'), + (0x1F88, 'M', u'ἀι'), + (0x1F89, 'M', u'ἁι'), + (0x1F8A, 'M', u'ἂι'), + (0x1F8B, 'M', u'ἃι'), + (0x1F8C, 'M', u'ἄι'), + (0x1F8D, 'M', u'ἅι'), + (0x1F8E, 'M', u'ἆι'), + (0x1F8F, 'M', u'ἇι'), + (0x1F90, 'M', u'ἠι'), + (0x1F91, 'M', u'ἡι'), + (0x1F92, 'M', u'ἢι'), + (0x1F93, 'M', u'ἣι'), + (0x1F94, 'M', u'ἤι'), + (0x1F95, 'M', u'ἥι'), + (0x1F96, 'M', u'ἦι'), + (0x1F97, 'M', u'ἧι'), + (0x1F98, 'M', u'ἠι'), + (0x1F99, 'M', u'ἡι'), + (0x1F9A, 'M', u'ἢι'), + (0x1F9B, 'M', u'ἣι'), + (0x1F9C, 'M', u'ἤι'), + (0x1F9D, 'M', u'ἥι'), + (0x1F9E, 'M', u'ἦι'), + (0x1F9F, 'M', u'ἧι'), + (0x1FA0, 'M', u'ὠι'), + (0x1FA1, 'M', u'ὡι'), + (0x1FA2, 'M', u'ὢι'), + (0x1FA3, 'M', u'ὣι'), + ] + +def _seg_20(): + return [ + (0x1FA4, 'M', u'ὤι'), + (0x1FA5, 'M', u'ὥι'), + (0x1FA6, 'M', u'ὦι'), + (0x1FA7, 'M', u'ὧι'), + (0x1FA8, 'M', u'ὠι'), + (0x1FA9, 'M', u'ὡι'), + (0x1FAA, 'M', u'ὢι'), + (0x1FAB, 'M', u'ὣι'), + (0x1FAC, 'M', u'ὤι'), + (0x1FAD, 'M', u'ὥι'), + (0x1FAE, 'M', u'ὦι'), + (0x1FAF, 'M', u'ὧι'), + (0x1FB0, 'V'), + (0x1FB2, 'M', u'ὰι'), + (0x1FB3, 'M', u'αι'), + (0x1FB4, 'M', u'άι'), + (0x1FB5, 'X'), + (0x1FB6, 'V'), + (0x1FB7, 'M', u'ᾶι'), + (0x1FB8, 'M', u'ᾰ'), + (0x1FB9, 'M', u'ᾱ'), + (0x1FBA, 'M', u'ὰ'), + (0x1FBB, 'M', u'ά'), + (0x1FBC, 'M', u'αι'), + (0x1FBD, '3', u' ̓'), + (0x1FBE, 'M', u'ι'), + (0x1FBF, '3', u' ̓'), + (0x1FC0, '3', u' ͂'), + (0x1FC1, '3', u' ̈͂'), + (0x1FC2, 'M', u'ὴι'), + (0x1FC3, 'M', u'ηι'), + (0x1FC4, 'M', u'ήι'), + (0x1FC5, 'X'), + (0x1FC6, 'V'), + (0x1FC7, 'M', u'ῆι'), + (0x1FC8, 'M', u'ὲ'), + (0x1FC9, 'M', u'έ'), + (0x1FCA, 'M', u'ὴ'), + (0x1FCB, 'M', u'ή'), + (0x1FCC, 'M', u'ηι'), + (0x1FCD, '3', u' ̓̀'), + (0x1FCE, '3', u' ̓́'), + (0x1FCF, '3', u' ̓͂'), + (0x1FD0, 'V'), + (0x1FD3, 'M', u'ΐ'), + (0x1FD4, 'X'), + (0x1FD6, 'V'), + (0x1FD8, 'M', u'ῐ'), + (0x1FD9, 'M', u'ῑ'), + (0x1FDA, 'M', u'ὶ'), + (0x1FDB, 'M', u'ί'), + (0x1FDC, 'X'), + (0x1FDD, '3', u' ̔̀'), + (0x1FDE, '3', u' ̔́'), + (0x1FDF, '3', u' ̔͂'), + (0x1FE0, 'V'), + (0x1FE3, 'M', u'ΰ'), + (0x1FE4, 'V'), + (0x1FE8, 'M', u'ῠ'), + (0x1FE9, 'M', u'ῡ'), + (0x1FEA, 'M', u'ὺ'), + (0x1FEB, 'M', u'ύ'), + (0x1FEC, 'M', u'ῥ'), + (0x1FED, '3', u' ̈̀'), + (0x1FEE, '3', u' ̈́'), + (0x1FEF, '3', u'`'), + (0x1FF0, 'X'), + (0x1FF2, 'M', u'ὼι'), + (0x1FF3, 'M', u'ωι'), + (0x1FF4, 'M', u'ώι'), + (0x1FF5, 'X'), + (0x1FF6, 'V'), + (0x1FF7, 'M', u'ῶι'), + (0x1FF8, 'M', u'ὸ'), + (0x1FF9, 'M', u'ό'), + (0x1FFA, 'M', u'ὼ'), + (0x1FFB, 'M', u'ώ'), + (0x1FFC, 'M', u'ωι'), + (0x1FFD, '3', u' ́'), + (0x1FFE, '3', u' ̔'), + (0x1FFF, 'X'), + (0x2000, '3', u' '), + (0x200B, 'I'), + (0x200C, 'D', u''), + (0x200E, 'X'), + (0x2010, 'V'), + (0x2011, 'M', u'‐'), + (0x2012, 'V'), + (0x2017, '3', u' ̳'), + (0x2018, 'V'), + (0x2024, 'X'), + (0x2027, 'V'), + (0x2028, 'X'), + (0x202F, '3', u' '), + (0x2030, 'V'), + (0x2033, 'M', u'′′'), + (0x2034, 'M', u'′′′'), + (0x2035, 'V'), + (0x2036, 'M', u'‵‵'), + (0x2037, 'M', u'‵‵‵'), + ] + +def _seg_21(): + return [ + (0x2038, 'V'), + (0x203C, '3', u'!!'), + (0x203D, 'V'), + (0x203E, '3', u' ̅'), + (0x203F, 'V'), + (0x2047, '3', u'??'), + (0x2048, '3', u'?!'), + (0x2049, '3', u'!?'), + (0x204A, 'V'), + (0x2057, 'M', u'′′′′'), + (0x2058, 'V'), + (0x205F, '3', u' '), + (0x2060, 'I'), + (0x2061, 'X'), + (0x2064, 'I'), + (0x2065, 'X'), + (0x2070, 'M', u'0'), + (0x2071, 'M', u'i'), + (0x2072, 'X'), + (0x2074, 'M', u'4'), + (0x2075, 'M', u'5'), + (0x2076, 'M', u'6'), + (0x2077, 'M', u'7'), + (0x2078, 'M', u'8'), + (0x2079, 'M', u'9'), + (0x207A, '3', u'+'), + (0x207B, 'M', u'−'), + (0x207C, '3', u'='), + (0x207D, '3', u'('), + (0x207E, '3', u')'), + (0x207F, 'M', u'n'), + (0x2080, 'M', u'0'), + (0x2081, 'M', u'1'), + (0x2082, 'M', u'2'), + (0x2083, 'M', u'3'), + (0x2084, 'M', u'4'), + (0x2085, 'M', u'5'), + (0x2086, 'M', u'6'), + (0x2087, 'M', u'7'), + (0x2088, 'M', u'8'), + (0x2089, 'M', u'9'), + (0x208A, '3', u'+'), + (0x208B, 'M', u'−'), + (0x208C, '3', u'='), + (0x208D, '3', u'('), + (0x208E, '3', u')'), + (0x208F, 'X'), + (0x2090, 'M', u'a'), + (0x2091, 'M', u'e'), + (0x2092, 'M', u'o'), + (0x2093, 'M', u'x'), + (0x2094, 'M', u'ə'), + (0x2095, 'M', u'h'), + (0x2096, 'M', u'k'), + (0x2097, 'M', u'l'), + (0x2098, 'M', u'm'), + (0x2099, 'M', u'n'), + (0x209A, 'M', u'p'), + (0x209B, 'M', u's'), + (0x209C, 'M', u't'), + (0x209D, 'X'), + (0x20A0, 'V'), + (0x20A8, 'M', u'rs'), + (0x20A9, 'V'), + (0x20C0, 'X'), + (0x20D0, 'V'), + (0x20F1, 'X'), + (0x2100, '3', u'a/c'), + (0x2101, '3', u'a/s'), + (0x2102, 'M', u'c'), + (0x2103, 'M', u'°c'), + (0x2104, 'V'), + (0x2105, '3', u'c/o'), + (0x2106, '3', u'c/u'), + (0x2107, 'M', u'ɛ'), + (0x2108, 'V'), + (0x2109, 'M', u'°f'), + (0x210A, 'M', u'g'), + (0x210B, 'M', u'h'), + (0x210F, 'M', u'ħ'), + (0x2110, 'M', u'i'), + (0x2112, 'M', u'l'), + (0x2114, 'V'), + (0x2115, 'M', u'n'), + (0x2116, 'M', u'no'), + (0x2117, 'V'), + (0x2119, 'M', u'p'), + (0x211A, 'M', u'q'), + (0x211B, 'M', u'r'), + (0x211E, 'V'), + (0x2120, 'M', u'sm'), + (0x2121, 'M', u'tel'), + (0x2122, 'M', u'tm'), + (0x2123, 'V'), + (0x2124, 'M', u'z'), + (0x2125, 'V'), + (0x2126, 'M', u'ω'), + (0x2127, 'V'), + (0x2128, 'M', u'z'), + (0x2129, 'V'), + ] + +def _seg_22(): + return [ + (0x212A, 'M', u'k'), + (0x212B, 'M', u'å'), + (0x212C, 'M', u'b'), + (0x212D, 'M', u'c'), + (0x212E, 'V'), + (0x212F, 'M', u'e'), + (0x2131, 'M', u'f'), + (0x2132, 'X'), + (0x2133, 'M', u'm'), + (0x2134, 'M', u'o'), + (0x2135, 'M', u'א'), + (0x2136, 'M', u'ב'), + (0x2137, 'M', u'ג'), + (0x2138, 'M', u'ד'), + (0x2139, 'M', u'i'), + (0x213A, 'V'), + (0x213B, 'M', u'fax'), + (0x213C, 'M', u'π'), + (0x213D, 'M', u'γ'), + (0x213F, 'M', u'π'), + (0x2140, 'M', u'∑'), + (0x2141, 'V'), + (0x2145, 'M', u'd'), + (0x2147, 'M', u'e'), + (0x2148, 'M', u'i'), + (0x2149, 'M', u'j'), + (0x214A, 'V'), + (0x2150, 'M', u'1⁄7'), + (0x2151, 'M', u'1⁄9'), + (0x2152, 'M', u'1⁄10'), + (0x2153, 'M', u'1⁄3'), + (0x2154, 'M', u'2⁄3'), + (0x2155, 'M', u'1⁄5'), + (0x2156, 'M', u'2⁄5'), + (0x2157, 'M', u'3⁄5'), + (0x2158, 'M', u'4⁄5'), + (0x2159, 'M', u'1⁄6'), + (0x215A, 'M', u'5⁄6'), + (0x215B, 'M', u'1⁄8'), + (0x215C, 'M', u'3⁄8'), + (0x215D, 'M', u'5⁄8'), + (0x215E, 'M', u'7⁄8'), + (0x215F, 'M', u'1⁄'), + (0x2160, 'M', u'i'), + (0x2161, 'M', u'ii'), + (0x2162, 'M', u'iii'), + (0x2163, 'M', u'iv'), + (0x2164, 'M', u'v'), + (0x2165, 'M', u'vi'), + (0x2166, 'M', u'vii'), + (0x2167, 'M', u'viii'), + (0x2168, 'M', u'ix'), + (0x2169, 'M', u'x'), + (0x216A, 'M', u'xi'), + (0x216B, 'M', u'xii'), + (0x216C, 'M', u'l'), + (0x216D, 'M', u'c'), + (0x216E, 'M', u'd'), + (0x216F, 'M', u'm'), + (0x2170, 'M', u'i'), + (0x2171, 'M', u'ii'), + (0x2172, 'M', u'iii'), + (0x2173, 'M', u'iv'), + (0x2174, 'M', u'v'), + (0x2175, 'M', u'vi'), + (0x2176, 'M', u'vii'), + (0x2177, 'M', u'viii'), + (0x2178, 'M', u'ix'), + (0x2179, 'M', u'x'), + (0x217A, 'M', u'xi'), + (0x217B, 'M', u'xii'), + (0x217C, 'M', u'l'), + (0x217D, 'M', u'c'), + (0x217E, 'M', u'd'), + (0x217F, 'M', u'm'), + (0x2180, 'V'), + (0x2183, 'X'), + (0x2184, 'V'), + (0x2189, 'M', u'0⁄3'), + (0x218A, 'V'), + (0x218C, 'X'), + (0x2190, 'V'), + (0x222C, 'M', u'∫∫'), + (0x222D, 'M', u'∫∫∫'), + (0x222E, 'V'), + (0x222F, 'M', u'∮∮'), + (0x2230, 'M', u'∮∮∮'), + (0x2231, 'V'), + (0x2260, '3'), + (0x2261, 'V'), + (0x226E, '3'), + (0x2270, 'V'), + (0x2329, 'M', u'〈'), + (0x232A, 'M', u'〉'), + (0x232B, 'V'), + (0x2427, 'X'), + (0x2440, 'V'), + (0x244B, 'X'), + (0x2460, 'M', u'1'), + (0x2461, 'M', u'2'), + ] + +def _seg_23(): + return [ + (0x2462, 'M', u'3'), + (0x2463, 'M', u'4'), + (0x2464, 'M', u'5'), + (0x2465, 'M', u'6'), + (0x2466, 'M', u'7'), + (0x2467, 'M', u'8'), + (0x2468, 'M', u'9'), + (0x2469, 'M', u'10'), + (0x246A, 'M', u'11'), + (0x246B, 'M', u'12'), + (0x246C, 'M', u'13'), + (0x246D, 'M', u'14'), + (0x246E, 'M', u'15'), + (0x246F, 'M', u'16'), + (0x2470, 'M', u'17'), + (0x2471, 'M', u'18'), + (0x2472, 'M', u'19'), + (0x2473, 'M', u'20'), + (0x2474, '3', u'(1)'), + (0x2475, '3', u'(2)'), + (0x2476, '3', u'(3)'), + (0x2477, '3', u'(4)'), + (0x2478, '3', u'(5)'), + (0x2479, '3', u'(6)'), + (0x247A, '3', u'(7)'), + (0x247B, '3', u'(8)'), + (0x247C, '3', u'(9)'), + (0x247D, '3', u'(10)'), + (0x247E, '3', u'(11)'), + (0x247F, '3', u'(12)'), + (0x2480, '3', u'(13)'), + (0x2481, '3', u'(14)'), + (0x2482, '3', u'(15)'), + (0x2483, '3', u'(16)'), + (0x2484, '3', u'(17)'), + (0x2485, '3', u'(18)'), + (0x2486, '3', u'(19)'), + (0x2487, '3', u'(20)'), + (0x2488, 'X'), + (0x249C, '3', u'(a)'), + (0x249D, '3', u'(b)'), + (0x249E, '3', u'(c)'), + (0x249F, '3', u'(d)'), + (0x24A0, '3', u'(e)'), + (0x24A1, '3', u'(f)'), + (0x24A2, '3', u'(g)'), + (0x24A3, '3', u'(h)'), + (0x24A4, '3', u'(i)'), + (0x24A5, '3', u'(j)'), + (0x24A6, '3', u'(k)'), + (0x24A7, '3', u'(l)'), + (0x24A8, '3', u'(m)'), + (0x24A9, '3', u'(n)'), + (0x24AA, '3', u'(o)'), + (0x24AB, '3', u'(p)'), + (0x24AC, '3', u'(q)'), + (0x24AD, '3', u'(r)'), + (0x24AE, '3', u'(s)'), + (0x24AF, '3', u'(t)'), + (0x24B0, '3', u'(u)'), + (0x24B1, '3', u'(v)'), + (0x24B2, '3', u'(w)'), + (0x24B3, '3', u'(x)'), + (0x24B4, '3', u'(y)'), + (0x24B5, '3', u'(z)'), + (0x24B6, 'M', u'a'), + (0x24B7, 'M', u'b'), + (0x24B8, 'M', u'c'), + (0x24B9, 'M', u'd'), + (0x24BA, 'M', u'e'), + (0x24BB, 'M', u'f'), + (0x24BC, 'M', u'g'), + (0x24BD, 'M', u'h'), + (0x24BE, 'M', u'i'), + (0x24BF, 'M', u'j'), + (0x24C0, 'M', u'k'), + (0x24C1, 'M', u'l'), + (0x24C2, 'M', u'm'), + (0x24C3, 'M', u'n'), + (0x24C4, 'M', u'o'), + (0x24C5, 'M', u'p'), + (0x24C6, 'M', u'q'), + (0x24C7, 'M', u'r'), + (0x24C8, 'M', u's'), + (0x24C9, 'M', u't'), + (0x24CA, 'M', u'u'), + (0x24CB, 'M', u'v'), + (0x24CC, 'M', u'w'), + (0x24CD, 'M', u'x'), + (0x24CE, 'M', u'y'), + (0x24CF, 'M', u'z'), + (0x24D0, 'M', u'a'), + (0x24D1, 'M', u'b'), + (0x24D2, 'M', u'c'), + (0x24D3, 'M', u'd'), + (0x24D4, 'M', u'e'), + (0x24D5, 'M', u'f'), + (0x24D6, 'M', u'g'), + (0x24D7, 'M', u'h'), + (0x24D8, 'M', u'i'), + ] + +def _seg_24(): + return [ + (0x24D9, 'M', u'j'), + (0x24DA, 'M', u'k'), + (0x24DB, 'M', u'l'), + (0x24DC, 'M', u'm'), + (0x24DD, 'M', u'n'), + (0x24DE, 'M', u'o'), + (0x24DF, 'M', u'p'), + (0x24E0, 'M', u'q'), + (0x24E1, 'M', u'r'), + (0x24E2, 'M', u's'), + (0x24E3, 'M', u't'), + (0x24E4, 'M', u'u'), + (0x24E5, 'M', u'v'), + (0x24E6, 'M', u'w'), + (0x24E7, 'M', u'x'), + (0x24E8, 'M', u'y'), + (0x24E9, 'M', u'z'), + (0x24EA, 'M', u'0'), + (0x24EB, 'V'), + (0x2A0C, 'M', u'∫∫∫∫'), + (0x2A0D, 'V'), + (0x2A74, '3', u'::='), + (0x2A75, '3', u'=='), + (0x2A76, '3', u'==='), + (0x2A77, 'V'), + (0x2ADC, 'M', u'⫝̸'), + (0x2ADD, 'V'), + (0x2B74, 'X'), + (0x2B76, 'V'), + (0x2B96, 'X'), + (0x2B98, 'V'), + (0x2BC9, 'X'), + (0x2BCA, 'V'), + (0x2BFF, 'X'), + (0x2C00, 'M', u'ⰰ'), + (0x2C01, 'M', u'ⰱ'), + (0x2C02, 'M', u'ⰲ'), + (0x2C03, 'M', u'ⰳ'), + (0x2C04, 'M', u'ⰴ'), + (0x2C05, 'M', u'ⰵ'), + (0x2C06, 'M', u'ⰶ'), + (0x2C07, 'M', u'ⰷ'), + (0x2C08, 'M', u'ⰸ'), + (0x2C09, 'M', u'ⰹ'), + (0x2C0A, 'M', u'ⰺ'), + (0x2C0B, 'M', u'ⰻ'), + (0x2C0C, 'M', u'ⰼ'), + (0x2C0D, 'M', u'ⰽ'), + (0x2C0E, 'M', u'ⰾ'), + (0x2C0F, 'M', u'ⰿ'), + (0x2C10, 'M', u'ⱀ'), + (0x2C11, 'M', u'ⱁ'), + (0x2C12, 'M', u'ⱂ'), + (0x2C13, 'M', u'ⱃ'), + (0x2C14, 'M', u'ⱄ'), + (0x2C15, 'M', u'ⱅ'), + (0x2C16, 'M', u'ⱆ'), + (0x2C17, 'M', u'ⱇ'), + (0x2C18, 'M', u'ⱈ'), + (0x2C19, 'M', u'ⱉ'), + (0x2C1A, 'M', u'ⱊ'), + (0x2C1B, 'M', u'ⱋ'), + (0x2C1C, 'M', u'ⱌ'), + (0x2C1D, 'M', u'ⱍ'), + (0x2C1E, 'M', u'ⱎ'), + (0x2C1F, 'M', u'ⱏ'), + (0x2C20, 'M', u'ⱐ'), + (0x2C21, 'M', u'ⱑ'), + (0x2C22, 'M', u'ⱒ'), + (0x2C23, 'M', u'ⱓ'), + (0x2C24, 'M', u'ⱔ'), + (0x2C25, 'M', u'ⱕ'), + (0x2C26, 'M', u'ⱖ'), + (0x2C27, 'M', u'ⱗ'), + (0x2C28, 'M', u'ⱘ'), + (0x2C29, 'M', u'ⱙ'), + (0x2C2A, 'M', u'ⱚ'), + (0x2C2B, 'M', u'ⱛ'), + (0x2C2C, 'M', u'ⱜ'), + (0x2C2D, 'M', u'ⱝ'), + (0x2C2E, 'M', u'ⱞ'), + (0x2C2F, 'X'), + (0x2C30, 'V'), + (0x2C5F, 'X'), + (0x2C60, 'M', u'ⱡ'), + (0x2C61, 'V'), + (0x2C62, 'M', u'ɫ'), + (0x2C63, 'M', u'ᵽ'), + (0x2C64, 'M', u'ɽ'), + (0x2C65, 'V'), + (0x2C67, 'M', u'ⱨ'), + (0x2C68, 'V'), + (0x2C69, 'M', u'ⱪ'), + (0x2C6A, 'V'), + (0x2C6B, 'M', u'ⱬ'), + (0x2C6C, 'V'), + (0x2C6D, 'M', u'ɑ'), + (0x2C6E, 'M', u'ɱ'), + (0x2C6F, 'M', u'ɐ'), + (0x2C70, 'M', u'ɒ'), + ] + +def _seg_25(): + return [ + (0x2C71, 'V'), + (0x2C72, 'M', u'ⱳ'), + (0x2C73, 'V'), + (0x2C75, 'M', u'ⱶ'), + (0x2C76, 'V'), + (0x2C7C, 'M', u'j'), + (0x2C7D, 'M', u'v'), + (0x2C7E, 'M', u'ȿ'), + (0x2C7F, 'M', u'ɀ'), + (0x2C80, 'M', u'ⲁ'), + (0x2C81, 'V'), + (0x2C82, 'M', u'ⲃ'), + (0x2C83, 'V'), + (0x2C84, 'M', u'ⲅ'), + (0x2C85, 'V'), + (0x2C86, 'M', u'ⲇ'), + (0x2C87, 'V'), + (0x2C88, 'M', u'ⲉ'), + (0x2C89, 'V'), + (0x2C8A, 'M', u'ⲋ'), + (0x2C8B, 'V'), + (0x2C8C, 'M', u'ⲍ'), + (0x2C8D, 'V'), + (0x2C8E, 'M', u'ⲏ'), + (0x2C8F, 'V'), + (0x2C90, 'M', u'ⲑ'), + (0x2C91, 'V'), + (0x2C92, 'M', u'ⲓ'), + (0x2C93, 'V'), + (0x2C94, 'M', u'ⲕ'), + (0x2C95, 'V'), + (0x2C96, 'M', u'ⲗ'), + (0x2C97, 'V'), + (0x2C98, 'M', u'ⲙ'), + (0x2C99, 'V'), + (0x2C9A, 'M', u'ⲛ'), + (0x2C9B, 'V'), + (0x2C9C, 'M', u'ⲝ'), + (0x2C9D, 'V'), + (0x2C9E, 'M', u'ⲟ'), + (0x2C9F, 'V'), + (0x2CA0, 'M', u'ⲡ'), + (0x2CA1, 'V'), + (0x2CA2, 'M', u'ⲣ'), + (0x2CA3, 'V'), + (0x2CA4, 'M', u'ⲥ'), + (0x2CA5, 'V'), + (0x2CA6, 'M', u'ⲧ'), + (0x2CA7, 'V'), + (0x2CA8, 'M', u'ⲩ'), + (0x2CA9, 'V'), + (0x2CAA, 'M', u'ⲫ'), + (0x2CAB, 'V'), + (0x2CAC, 'M', u'ⲭ'), + (0x2CAD, 'V'), + (0x2CAE, 'M', u'ⲯ'), + (0x2CAF, 'V'), + (0x2CB0, 'M', u'ⲱ'), + (0x2CB1, 'V'), + (0x2CB2, 'M', u'ⲳ'), + (0x2CB3, 'V'), + (0x2CB4, 'M', u'ⲵ'), + (0x2CB5, 'V'), + (0x2CB6, 'M', u'ⲷ'), + (0x2CB7, 'V'), + (0x2CB8, 'M', u'ⲹ'), + (0x2CB9, 'V'), + (0x2CBA, 'M', u'ⲻ'), + (0x2CBB, 'V'), + (0x2CBC, 'M', u'ⲽ'), + (0x2CBD, 'V'), + (0x2CBE, 'M', u'ⲿ'), + (0x2CBF, 'V'), + (0x2CC0, 'M', u'ⳁ'), + (0x2CC1, 'V'), + (0x2CC2, 'M', u'ⳃ'), + (0x2CC3, 'V'), + (0x2CC4, 'M', u'ⳅ'), + (0x2CC5, 'V'), + (0x2CC6, 'M', u'ⳇ'), + (0x2CC7, 'V'), + (0x2CC8, 'M', u'ⳉ'), + (0x2CC9, 'V'), + (0x2CCA, 'M', u'ⳋ'), + (0x2CCB, 'V'), + (0x2CCC, 'M', u'ⳍ'), + (0x2CCD, 'V'), + (0x2CCE, 'M', u'ⳏ'), + (0x2CCF, 'V'), + (0x2CD0, 'M', u'ⳑ'), + (0x2CD1, 'V'), + (0x2CD2, 'M', u'ⳓ'), + (0x2CD3, 'V'), + (0x2CD4, 'M', u'ⳕ'), + (0x2CD5, 'V'), + (0x2CD6, 'M', u'ⳗ'), + (0x2CD7, 'V'), + (0x2CD8, 'M', u'ⳙ'), + (0x2CD9, 'V'), + (0x2CDA, 'M', u'ⳛ'), + ] + +def _seg_26(): + return [ + (0x2CDB, 'V'), + (0x2CDC, 'M', u'ⳝ'), + (0x2CDD, 'V'), + (0x2CDE, 'M', u'ⳟ'), + (0x2CDF, 'V'), + (0x2CE0, 'M', u'ⳡ'), + (0x2CE1, 'V'), + (0x2CE2, 'M', u'ⳣ'), + (0x2CE3, 'V'), + (0x2CEB, 'M', u'ⳬ'), + (0x2CEC, 'V'), + (0x2CED, 'M', u'ⳮ'), + (0x2CEE, 'V'), + (0x2CF2, 'M', u'ⳳ'), + (0x2CF3, 'V'), + (0x2CF4, 'X'), + (0x2CF9, 'V'), + (0x2D26, 'X'), + (0x2D27, 'V'), + (0x2D28, 'X'), + (0x2D2D, 'V'), + (0x2D2E, 'X'), + (0x2D30, 'V'), + (0x2D68, 'X'), + (0x2D6F, 'M', u'ⵡ'), + (0x2D70, 'V'), + (0x2D71, 'X'), + (0x2D7F, 'V'), + (0x2D97, 'X'), + (0x2DA0, 'V'), + (0x2DA7, 'X'), + (0x2DA8, 'V'), + (0x2DAF, 'X'), + (0x2DB0, 'V'), + (0x2DB7, 'X'), + (0x2DB8, 'V'), + (0x2DBF, 'X'), + (0x2DC0, 'V'), + (0x2DC7, 'X'), + (0x2DC8, 'V'), + (0x2DCF, 'X'), + (0x2DD0, 'V'), + (0x2DD7, 'X'), + (0x2DD8, 'V'), + (0x2DDF, 'X'), + (0x2DE0, 'V'), + (0x2E4F, 'X'), + (0x2E80, 'V'), + (0x2E9A, 'X'), + (0x2E9B, 'V'), + (0x2E9F, 'M', u'母'), + (0x2EA0, 'V'), + (0x2EF3, 'M', u'龟'), + (0x2EF4, 'X'), + (0x2F00, 'M', u'一'), + (0x2F01, 'M', u'丨'), + (0x2F02, 'M', u'丶'), + (0x2F03, 'M', u'丿'), + (0x2F04, 'M', u'乙'), + (0x2F05, 'M', u'亅'), + (0x2F06, 'M', u'二'), + (0x2F07, 'M', u'亠'), + (0x2F08, 'M', u'人'), + (0x2F09, 'M', u'儿'), + (0x2F0A, 'M', u'入'), + (0x2F0B, 'M', u'八'), + (0x2F0C, 'M', u'冂'), + (0x2F0D, 'M', u'冖'), + (0x2F0E, 'M', u'冫'), + (0x2F0F, 'M', u'几'), + (0x2F10, 'M', u'凵'), + (0x2F11, 'M', u'刀'), + (0x2F12, 'M', u'力'), + (0x2F13, 'M', u'勹'), + (0x2F14, 'M', u'匕'), + (0x2F15, 'M', u'匚'), + (0x2F16, 'M', u'匸'), + (0x2F17, 'M', u'十'), + (0x2F18, 'M', u'卜'), + (0x2F19, 'M', u'卩'), + (0x2F1A, 'M', u'厂'), + (0x2F1B, 'M', u'厶'), + (0x2F1C, 'M', u'又'), + (0x2F1D, 'M', u'口'), + (0x2F1E, 'M', u'囗'), + (0x2F1F, 'M', u'土'), + (0x2F20, 'M', u'士'), + (0x2F21, 'M', u'夂'), + (0x2F22, 'M', u'夊'), + (0x2F23, 'M', u'夕'), + (0x2F24, 'M', u'大'), + (0x2F25, 'M', u'女'), + (0x2F26, 'M', u'子'), + (0x2F27, 'M', u'宀'), + (0x2F28, 'M', u'寸'), + (0x2F29, 'M', u'小'), + (0x2F2A, 'M', u'尢'), + (0x2F2B, 'M', u'尸'), + (0x2F2C, 'M', u'屮'), + (0x2F2D, 'M', u'山'), + ] + +def _seg_27(): + return [ + (0x2F2E, 'M', u'巛'), + (0x2F2F, 'M', u'工'), + (0x2F30, 'M', u'己'), + (0x2F31, 'M', u'巾'), + (0x2F32, 'M', u'干'), + (0x2F33, 'M', u'幺'), + (0x2F34, 'M', u'广'), + (0x2F35, 'M', u'廴'), + (0x2F36, 'M', u'廾'), + (0x2F37, 'M', u'弋'), + (0x2F38, 'M', u'弓'), + (0x2F39, 'M', u'彐'), + (0x2F3A, 'M', u'彡'), + (0x2F3B, 'M', u'彳'), + (0x2F3C, 'M', u'心'), + (0x2F3D, 'M', u'戈'), + (0x2F3E, 'M', u'戶'), + (0x2F3F, 'M', u'手'), + (0x2F40, 'M', u'支'), + (0x2F41, 'M', u'攴'), + (0x2F42, 'M', u'文'), + (0x2F43, 'M', u'斗'), + (0x2F44, 'M', u'斤'), + (0x2F45, 'M', u'方'), + (0x2F46, 'M', u'无'), + (0x2F47, 'M', u'日'), + (0x2F48, 'M', u'曰'), + (0x2F49, 'M', u'月'), + (0x2F4A, 'M', u'木'), + (0x2F4B, 'M', u'欠'), + (0x2F4C, 'M', u'止'), + (0x2F4D, 'M', u'歹'), + (0x2F4E, 'M', u'殳'), + (0x2F4F, 'M', u'毋'), + (0x2F50, 'M', u'比'), + (0x2F51, 'M', u'毛'), + (0x2F52, 'M', u'氏'), + (0x2F53, 'M', u'气'), + (0x2F54, 'M', u'水'), + (0x2F55, 'M', u'火'), + (0x2F56, 'M', u'爪'), + (0x2F57, 'M', u'父'), + (0x2F58, 'M', u'爻'), + (0x2F59, 'M', u'爿'), + (0x2F5A, 'M', u'片'), + (0x2F5B, 'M', u'牙'), + (0x2F5C, 'M', u'牛'), + (0x2F5D, 'M', u'犬'), + (0x2F5E, 'M', u'玄'), + (0x2F5F, 'M', u'玉'), + (0x2F60, 'M', u'瓜'), + (0x2F61, 'M', u'瓦'), + (0x2F62, 'M', u'甘'), + (0x2F63, 'M', u'生'), + (0x2F64, 'M', u'用'), + (0x2F65, 'M', u'田'), + (0x2F66, 'M', u'疋'), + (0x2F67, 'M', u'疒'), + (0x2F68, 'M', u'癶'), + (0x2F69, 'M', u'白'), + (0x2F6A, 'M', u'皮'), + (0x2F6B, 'M', u'皿'), + (0x2F6C, 'M', u'目'), + (0x2F6D, 'M', u'矛'), + (0x2F6E, 'M', u'矢'), + (0x2F6F, 'M', u'石'), + (0x2F70, 'M', u'示'), + (0x2F71, 'M', u'禸'), + (0x2F72, 'M', u'禾'), + (0x2F73, 'M', u'穴'), + (0x2F74, 'M', u'立'), + (0x2F75, 'M', u'竹'), + (0x2F76, 'M', u'米'), + (0x2F77, 'M', u'糸'), + (0x2F78, 'M', u'缶'), + (0x2F79, 'M', u'网'), + (0x2F7A, 'M', u'羊'), + (0x2F7B, 'M', u'羽'), + (0x2F7C, 'M', u'老'), + (0x2F7D, 'M', u'而'), + (0x2F7E, 'M', u'耒'), + (0x2F7F, 'M', u'耳'), + (0x2F80, 'M', u'聿'), + (0x2F81, 'M', u'肉'), + (0x2F82, 'M', u'臣'), + (0x2F83, 'M', u'自'), + (0x2F84, 'M', u'至'), + (0x2F85, 'M', u'臼'), + (0x2F86, 'M', u'舌'), + (0x2F87, 'M', u'舛'), + (0x2F88, 'M', u'舟'), + (0x2F89, 'M', u'艮'), + (0x2F8A, 'M', u'色'), + (0x2F8B, 'M', u'艸'), + (0x2F8C, 'M', u'虍'), + (0x2F8D, 'M', u'虫'), + (0x2F8E, 'M', u'血'), + (0x2F8F, 'M', u'行'), + (0x2F90, 'M', u'衣'), + (0x2F91, 'M', u'襾'), + ] + +def _seg_28(): + return [ + (0x2F92, 'M', u'見'), + (0x2F93, 'M', u'角'), + (0x2F94, 'M', u'言'), + (0x2F95, 'M', u'谷'), + (0x2F96, 'M', u'豆'), + (0x2F97, 'M', u'豕'), + (0x2F98, 'M', u'豸'), + (0x2F99, 'M', u'貝'), + (0x2F9A, 'M', u'赤'), + (0x2F9B, 'M', u'走'), + (0x2F9C, 'M', u'足'), + (0x2F9D, 'M', u'身'), + (0x2F9E, 'M', u'車'), + (0x2F9F, 'M', u'辛'), + (0x2FA0, 'M', u'辰'), + (0x2FA1, 'M', u'辵'), + (0x2FA2, 'M', u'邑'), + (0x2FA3, 'M', u'酉'), + (0x2FA4, 'M', u'釆'), + (0x2FA5, 'M', u'里'), + (0x2FA6, 'M', u'金'), + (0x2FA7, 'M', u'長'), + (0x2FA8, 'M', u'門'), + (0x2FA9, 'M', u'阜'), + (0x2FAA, 'M', u'隶'), + (0x2FAB, 'M', u'隹'), + (0x2FAC, 'M', u'雨'), + (0x2FAD, 'M', u'靑'), + (0x2FAE, 'M', u'非'), + (0x2FAF, 'M', u'面'), + (0x2FB0, 'M', u'革'), + (0x2FB1, 'M', u'韋'), + (0x2FB2, 'M', u'韭'), + (0x2FB3, 'M', u'音'), + (0x2FB4, 'M', u'頁'), + (0x2FB5, 'M', u'風'), + (0x2FB6, 'M', u'飛'), + (0x2FB7, 'M', u'食'), + (0x2FB8, 'M', u'首'), + (0x2FB9, 'M', u'香'), + (0x2FBA, 'M', u'馬'), + (0x2FBB, 'M', u'骨'), + (0x2FBC, 'M', u'高'), + (0x2FBD, 'M', u'髟'), + (0x2FBE, 'M', u'鬥'), + (0x2FBF, 'M', u'鬯'), + (0x2FC0, 'M', u'鬲'), + (0x2FC1, 'M', u'鬼'), + (0x2FC2, 'M', u'魚'), + (0x2FC3, 'M', u'鳥'), + (0x2FC4, 'M', u'鹵'), + (0x2FC5, 'M', u'鹿'), + (0x2FC6, 'M', u'麥'), + (0x2FC7, 'M', u'麻'), + (0x2FC8, 'M', u'黃'), + (0x2FC9, 'M', u'黍'), + (0x2FCA, 'M', u'黑'), + (0x2FCB, 'M', u'黹'), + (0x2FCC, 'M', u'黽'), + (0x2FCD, 'M', u'鼎'), + (0x2FCE, 'M', u'鼓'), + (0x2FCF, 'M', u'鼠'), + (0x2FD0, 'M', u'鼻'), + (0x2FD1, 'M', u'齊'), + (0x2FD2, 'M', u'齒'), + (0x2FD3, 'M', u'龍'), + (0x2FD4, 'M', u'龜'), + (0x2FD5, 'M', u'龠'), + (0x2FD6, 'X'), + (0x3000, '3', u' '), + (0x3001, 'V'), + (0x3002, 'M', u'.'), + (0x3003, 'V'), + (0x3036, 'M', u'〒'), + (0x3037, 'V'), + (0x3038, 'M', u'十'), + (0x3039, 'M', u'卄'), + (0x303A, 'M', u'卅'), + (0x303B, 'V'), + (0x3040, 'X'), + (0x3041, 'V'), + (0x3097, 'X'), + (0x3099, 'V'), + (0x309B, '3', u' ゙'), + (0x309C, '3', u' ゚'), + (0x309D, 'V'), + (0x309F, 'M', u'より'), + (0x30A0, 'V'), + (0x30FF, 'M', u'コト'), + (0x3100, 'X'), + (0x3105, 'V'), + (0x3130, 'X'), + (0x3131, 'M', u'ᄀ'), + (0x3132, 'M', u'ᄁ'), + (0x3133, 'M', u'ᆪ'), + (0x3134, 'M', u'ᄂ'), + (0x3135, 'M', u'ᆬ'), + (0x3136, 'M', u'ᆭ'), + (0x3137, 'M', u'ᄃ'), + (0x3138, 'M', u'ᄄ'), + ] + +def _seg_29(): + return [ + (0x3139, 'M', u'ᄅ'), + (0x313A, 'M', u'ᆰ'), + (0x313B, 'M', u'ᆱ'), + (0x313C, 'M', u'ᆲ'), + (0x313D, 'M', u'ᆳ'), + (0x313E, 'M', u'ᆴ'), + (0x313F, 'M', u'ᆵ'), + (0x3140, 'M', u'ᄚ'), + (0x3141, 'M', u'ᄆ'), + (0x3142, 'M', u'ᄇ'), + (0x3143, 'M', u'ᄈ'), + (0x3144, 'M', u'ᄡ'), + (0x3145, 'M', u'ᄉ'), + (0x3146, 'M', u'ᄊ'), + (0x3147, 'M', u'ᄋ'), + (0x3148, 'M', u'ᄌ'), + (0x3149, 'M', u'ᄍ'), + (0x314A, 'M', u'ᄎ'), + (0x314B, 'M', u'ᄏ'), + (0x314C, 'M', u'ᄐ'), + (0x314D, 'M', u'ᄑ'), + (0x314E, 'M', u'ᄒ'), + (0x314F, 'M', u'ᅡ'), + (0x3150, 'M', u'ᅢ'), + (0x3151, 'M', u'ᅣ'), + (0x3152, 'M', u'ᅤ'), + (0x3153, 'M', u'ᅥ'), + (0x3154, 'M', u'ᅦ'), + (0x3155, 'M', u'ᅧ'), + (0x3156, 'M', u'ᅨ'), + (0x3157, 'M', u'ᅩ'), + (0x3158, 'M', u'ᅪ'), + (0x3159, 'M', u'ᅫ'), + (0x315A, 'M', u'ᅬ'), + (0x315B, 'M', u'ᅭ'), + (0x315C, 'M', u'ᅮ'), + (0x315D, 'M', u'ᅯ'), + (0x315E, 'M', u'ᅰ'), + (0x315F, 'M', u'ᅱ'), + (0x3160, 'M', u'ᅲ'), + (0x3161, 'M', u'ᅳ'), + (0x3162, 'M', u'ᅴ'), + (0x3163, 'M', u'ᅵ'), + (0x3164, 'X'), + (0x3165, 'M', u'ᄔ'), + (0x3166, 'M', u'ᄕ'), + (0x3167, 'M', u'ᇇ'), + (0x3168, 'M', u'ᇈ'), + (0x3169, 'M', u'ᇌ'), + (0x316A, 'M', u'ᇎ'), + (0x316B, 'M', u'ᇓ'), + (0x316C, 'M', u'ᇗ'), + (0x316D, 'M', u'ᇙ'), + (0x316E, 'M', u'ᄜ'), + (0x316F, 'M', u'ᇝ'), + (0x3170, 'M', u'ᇟ'), + (0x3171, 'M', u'ᄝ'), + (0x3172, 'M', u'ᄞ'), + (0x3173, 'M', u'ᄠ'), + (0x3174, 'M', u'ᄢ'), + (0x3175, 'M', u'ᄣ'), + (0x3176, 'M', u'ᄧ'), + (0x3177, 'M', u'ᄩ'), + (0x3178, 'M', u'ᄫ'), + (0x3179, 'M', u'ᄬ'), + (0x317A, 'M', u'ᄭ'), + (0x317B, 'M', u'ᄮ'), + (0x317C, 'M', u'ᄯ'), + (0x317D, 'M', u'ᄲ'), + (0x317E, 'M', u'ᄶ'), + (0x317F, 'M', u'ᅀ'), + (0x3180, 'M', u'ᅇ'), + (0x3181, 'M', u'ᅌ'), + (0x3182, 'M', u'ᇱ'), + (0x3183, 'M', u'ᇲ'), + (0x3184, 'M', u'ᅗ'), + (0x3185, 'M', u'ᅘ'), + (0x3186, 'M', u'ᅙ'), + (0x3187, 'M', u'ᆄ'), + (0x3188, 'M', u'ᆅ'), + (0x3189, 'M', u'ᆈ'), + (0x318A, 'M', u'ᆑ'), + (0x318B, 'M', u'ᆒ'), + (0x318C, 'M', u'ᆔ'), + (0x318D, 'M', u'ᆞ'), + (0x318E, 'M', u'ᆡ'), + (0x318F, 'X'), + (0x3190, 'V'), + (0x3192, 'M', u'一'), + (0x3193, 'M', u'二'), + (0x3194, 'M', u'三'), + (0x3195, 'M', u'四'), + (0x3196, 'M', u'上'), + (0x3197, 'M', u'中'), + (0x3198, 'M', u'下'), + (0x3199, 'M', u'甲'), + (0x319A, 'M', u'乙'), + (0x319B, 'M', u'丙'), + (0x319C, 'M', u'丁'), + (0x319D, 'M', u'天'), + ] + +def _seg_30(): + return [ + (0x319E, 'M', u'地'), + (0x319F, 'M', u'人'), + (0x31A0, 'V'), + (0x31BB, 'X'), + (0x31C0, 'V'), + (0x31E4, 'X'), + (0x31F0, 'V'), + (0x3200, '3', u'(ᄀ)'), + (0x3201, '3', u'(ᄂ)'), + (0x3202, '3', u'(ᄃ)'), + (0x3203, '3', u'(ᄅ)'), + (0x3204, '3', u'(ᄆ)'), + (0x3205, '3', u'(ᄇ)'), + (0x3206, '3', u'(ᄉ)'), + (0x3207, '3', u'(ᄋ)'), + (0x3208, '3', u'(ᄌ)'), + (0x3209, '3', u'(ᄎ)'), + (0x320A, '3', u'(ᄏ)'), + (0x320B, '3', u'(ᄐ)'), + (0x320C, '3', u'(ᄑ)'), + (0x320D, '3', u'(ᄒ)'), + (0x320E, '3', u'(가)'), + (0x320F, '3', u'(나)'), + (0x3210, '3', u'(다)'), + (0x3211, '3', u'(라)'), + (0x3212, '3', u'(마)'), + (0x3213, '3', u'(바)'), + (0x3214, '3', u'(사)'), + (0x3215, '3', u'(아)'), + (0x3216, '3', u'(자)'), + (0x3217, '3', u'(차)'), + (0x3218, '3', u'(카)'), + (0x3219, '3', u'(타)'), + (0x321A, '3', u'(파)'), + (0x321B, '3', u'(하)'), + (0x321C, '3', u'(주)'), + (0x321D, '3', u'(오전)'), + (0x321E, '3', u'(오후)'), + (0x321F, 'X'), + (0x3220, '3', u'(一)'), + (0x3221, '3', u'(二)'), + (0x3222, '3', u'(三)'), + (0x3223, '3', u'(四)'), + (0x3224, '3', u'(五)'), + (0x3225, '3', u'(六)'), + (0x3226, '3', u'(七)'), + (0x3227, '3', u'(八)'), + (0x3228, '3', u'(九)'), + (0x3229, '3', u'(十)'), + (0x322A, '3', u'(月)'), + (0x322B, '3', u'(火)'), + (0x322C, '3', u'(水)'), + (0x322D, '3', u'(木)'), + (0x322E, '3', u'(金)'), + (0x322F, '3', u'(土)'), + (0x3230, '3', u'(日)'), + (0x3231, '3', u'(株)'), + (0x3232, '3', u'(有)'), + (0x3233, '3', u'(社)'), + (0x3234, '3', u'(名)'), + (0x3235, '3', u'(特)'), + (0x3236, '3', u'(財)'), + (0x3237, '3', u'(祝)'), + (0x3238, '3', u'(労)'), + (0x3239, '3', u'(代)'), + (0x323A, '3', u'(呼)'), + (0x323B, '3', u'(学)'), + (0x323C, '3', u'(監)'), + (0x323D, '3', u'(企)'), + (0x323E, '3', u'(資)'), + (0x323F, '3', u'(協)'), + (0x3240, '3', u'(祭)'), + (0x3241, '3', u'(休)'), + (0x3242, '3', u'(自)'), + (0x3243, '3', u'(至)'), + (0x3244, 'M', u'問'), + (0x3245, 'M', u'幼'), + (0x3246, 'M', u'文'), + (0x3247, 'M', u'箏'), + (0x3248, 'V'), + (0x3250, 'M', u'pte'), + (0x3251, 'M', u'21'), + (0x3252, 'M', u'22'), + (0x3253, 'M', u'23'), + (0x3254, 'M', u'24'), + (0x3255, 'M', u'25'), + (0x3256, 'M', u'26'), + (0x3257, 'M', u'27'), + (0x3258, 'M', u'28'), + (0x3259, 'M', u'29'), + (0x325A, 'M', u'30'), + (0x325B, 'M', u'31'), + (0x325C, 'M', u'32'), + (0x325D, 'M', u'33'), + (0x325E, 'M', u'34'), + (0x325F, 'M', u'35'), + (0x3260, 'M', u'ᄀ'), + (0x3261, 'M', u'ᄂ'), + (0x3262, 'M', u'ᄃ'), + (0x3263, 'M', u'ᄅ'), + ] + +def _seg_31(): + return [ + (0x3264, 'M', u'ᄆ'), + (0x3265, 'M', u'ᄇ'), + (0x3266, 'M', u'ᄉ'), + (0x3267, 'M', u'ᄋ'), + (0x3268, 'M', u'ᄌ'), + (0x3269, 'M', u'ᄎ'), + (0x326A, 'M', u'ᄏ'), + (0x326B, 'M', u'ᄐ'), + (0x326C, 'M', u'ᄑ'), + (0x326D, 'M', u'ᄒ'), + (0x326E, 'M', u'가'), + (0x326F, 'M', u'나'), + (0x3270, 'M', u'다'), + (0x3271, 'M', u'라'), + (0x3272, 'M', u'마'), + (0x3273, 'M', u'바'), + (0x3274, 'M', u'사'), + (0x3275, 'M', u'아'), + (0x3276, 'M', u'자'), + (0x3277, 'M', u'차'), + (0x3278, 'M', u'카'), + (0x3279, 'M', u'타'), + (0x327A, 'M', u'파'), + (0x327B, 'M', u'하'), + (0x327C, 'M', u'참고'), + (0x327D, 'M', u'주의'), + (0x327E, 'M', u'우'), + (0x327F, 'V'), + (0x3280, 'M', u'一'), + (0x3281, 'M', u'二'), + (0x3282, 'M', u'三'), + (0x3283, 'M', u'四'), + (0x3284, 'M', u'五'), + (0x3285, 'M', u'六'), + (0x3286, 'M', u'七'), + (0x3287, 'M', u'八'), + (0x3288, 'M', u'九'), + (0x3289, 'M', u'十'), + (0x328A, 'M', u'月'), + (0x328B, 'M', u'火'), + (0x328C, 'M', u'水'), + (0x328D, 'M', u'木'), + (0x328E, 'M', u'金'), + (0x328F, 'M', u'土'), + (0x3290, 'M', u'日'), + (0x3291, 'M', u'株'), + (0x3292, 'M', u'有'), + (0x3293, 'M', u'社'), + (0x3294, 'M', u'名'), + (0x3295, 'M', u'特'), + (0x3296, 'M', u'財'), + (0x3297, 'M', u'祝'), + (0x3298, 'M', u'労'), + (0x3299, 'M', u'秘'), + (0x329A, 'M', u'男'), + (0x329B, 'M', u'女'), + (0x329C, 'M', u'適'), + (0x329D, 'M', u'優'), + (0x329E, 'M', u'印'), + (0x329F, 'M', u'注'), + (0x32A0, 'M', u'項'), + (0x32A1, 'M', u'休'), + (0x32A2, 'M', u'写'), + (0x32A3, 'M', u'正'), + (0x32A4, 'M', u'上'), + (0x32A5, 'M', u'中'), + (0x32A6, 'M', u'下'), + (0x32A7, 'M', u'左'), + (0x32A8, 'M', u'右'), + (0x32A9, 'M', u'医'), + (0x32AA, 'M', u'宗'), + (0x32AB, 'M', u'学'), + (0x32AC, 'M', u'監'), + (0x32AD, 'M', u'企'), + (0x32AE, 'M', u'資'), + (0x32AF, 'M', u'協'), + (0x32B0, 'M', u'夜'), + (0x32B1, 'M', u'36'), + (0x32B2, 'M', u'37'), + (0x32B3, 'M', u'38'), + (0x32B4, 'M', u'39'), + (0x32B5, 'M', u'40'), + (0x32B6, 'M', u'41'), + (0x32B7, 'M', u'42'), + (0x32B8, 'M', u'43'), + (0x32B9, 'M', u'44'), + (0x32BA, 'M', u'45'), + (0x32BB, 'M', u'46'), + (0x32BC, 'M', u'47'), + (0x32BD, 'M', u'48'), + (0x32BE, 'M', u'49'), + (0x32BF, 'M', u'50'), + (0x32C0, 'M', u'1月'), + (0x32C1, 'M', u'2月'), + (0x32C2, 'M', u'3月'), + (0x32C3, 'M', u'4月'), + (0x32C4, 'M', u'5月'), + (0x32C5, 'M', u'6月'), + (0x32C6, 'M', u'7月'), + (0x32C7, 'M', u'8月'), + ] + +def _seg_32(): + return [ + (0x32C8, 'M', u'9月'), + (0x32C9, 'M', u'10月'), + (0x32CA, 'M', u'11月'), + (0x32CB, 'M', u'12月'), + (0x32CC, 'M', u'hg'), + (0x32CD, 'M', u'erg'), + (0x32CE, 'M', u'ev'), + (0x32CF, 'M', u'ltd'), + (0x32D0, 'M', u'ア'), + (0x32D1, 'M', u'イ'), + (0x32D2, 'M', u'ウ'), + (0x32D3, 'M', u'エ'), + (0x32D4, 'M', u'オ'), + (0x32D5, 'M', u'カ'), + (0x32D6, 'M', u'キ'), + (0x32D7, 'M', u'ク'), + (0x32D8, 'M', u'ケ'), + (0x32D9, 'M', u'コ'), + (0x32DA, 'M', u'サ'), + (0x32DB, 'M', u'シ'), + (0x32DC, 'M', u'ス'), + (0x32DD, 'M', u'セ'), + (0x32DE, 'M', u'ソ'), + (0x32DF, 'M', u'タ'), + (0x32E0, 'M', u'チ'), + (0x32E1, 'M', u'ツ'), + (0x32E2, 'M', u'テ'), + (0x32E3, 'M', u'ト'), + (0x32E4, 'M', u'ナ'), + (0x32E5, 'M', u'ニ'), + (0x32E6, 'M', u'ヌ'), + (0x32E7, 'M', u'ネ'), + (0x32E8, 'M', u'ノ'), + (0x32E9, 'M', u'ハ'), + (0x32EA, 'M', u'ヒ'), + (0x32EB, 'M', u'フ'), + (0x32EC, 'M', u'ヘ'), + (0x32ED, 'M', u'ホ'), + (0x32EE, 'M', u'マ'), + (0x32EF, 'M', u'ミ'), + (0x32F0, 'M', u'ム'), + (0x32F1, 'M', u'メ'), + (0x32F2, 'M', u'モ'), + (0x32F3, 'M', u'ヤ'), + (0x32F4, 'M', u'ユ'), + (0x32F5, 'M', u'ヨ'), + (0x32F6, 'M', u'ラ'), + (0x32F7, 'M', u'リ'), + (0x32F8, 'M', u'ル'), + (0x32F9, 'M', u'レ'), + (0x32FA, 'M', u'ロ'), + (0x32FB, 'M', u'ワ'), + (0x32FC, 'M', u'ヰ'), + (0x32FD, 'M', u'ヱ'), + (0x32FE, 'M', u'ヲ'), + (0x32FF, 'X'), + (0x3300, 'M', u'アパート'), + (0x3301, 'M', u'アルファ'), + (0x3302, 'M', u'アンペア'), + (0x3303, 'M', u'アール'), + (0x3304, 'M', u'イニング'), + (0x3305, 'M', u'インチ'), + (0x3306, 'M', u'ウォン'), + (0x3307, 'M', u'エスクード'), + (0x3308, 'M', u'エーカー'), + (0x3309, 'M', u'オンス'), + (0x330A, 'M', u'オーム'), + (0x330B, 'M', u'カイリ'), + (0x330C, 'M', u'カラット'), + (0x330D, 'M', u'カロリー'), + (0x330E, 'M', u'ガロン'), + (0x330F, 'M', u'ガンマ'), + (0x3310, 'M', u'ギガ'), + (0x3311, 'M', u'ギニー'), + (0x3312, 'M', u'キュリー'), + (0x3313, 'M', u'ギルダー'), + (0x3314, 'M', u'キロ'), + (0x3315, 'M', u'キログラム'), + (0x3316, 'M', u'キロメートル'), + (0x3317, 'M', u'キロワット'), + (0x3318, 'M', u'グラム'), + (0x3319, 'M', u'グラムトン'), + (0x331A, 'M', u'クルゼイロ'), + (0x331B, 'M', u'クローネ'), + (0x331C, 'M', u'ケース'), + (0x331D, 'M', u'コルナ'), + (0x331E, 'M', u'コーポ'), + (0x331F, 'M', u'サイクル'), + (0x3320, 'M', u'サンチーム'), + (0x3321, 'M', u'シリング'), + (0x3322, 'M', u'センチ'), + (0x3323, 'M', u'セント'), + (0x3324, 'M', u'ダース'), + (0x3325, 'M', u'デシ'), + (0x3326, 'M', u'ドル'), + (0x3327, 'M', u'トン'), + (0x3328, 'M', u'ナノ'), + (0x3329, 'M', u'ノット'), + (0x332A, 'M', u'ハイツ'), + (0x332B, 'M', u'パーセント'), + ] + +def _seg_33(): + return [ + (0x332C, 'M', u'パーツ'), + (0x332D, 'M', u'バーレル'), + (0x332E, 'M', u'ピアストル'), + (0x332F, 'M', u'ピクル'), + (0x3330, 'M', u'ピコ'), + (0x3331, 'M', u'ビル'), + (0x3332, 'M', u'ファラッド'), + (0x3333, 'M', u'フィート'), + (0x3334, 'M', u'ブッシェル'), + (0x3335, 'M', u'フラン'), + (0x3336, 'M', u'ヘクタール'), + (0x3337, 'M', u'ペソ'), + (0x3338, 'M', u'ペニヒ'), + (0x3339, 'M', u'ヘルツ'), + (0x333A, 'M', u'ペンス'), + (0x333B, 'M', u'ページ'), + (0x333C, 'M', u'ベータ'), + (0x333D, 'M', u'ポイント'), + (0x333E, 'M', u'ボルト'), + (0x333F, 'M', u'ホン'), + (0x3340, 'M', u'ポンド'), + (0x3341, 'M', u'ホール'), + (0x3342, 'M', u'ホーン'), + (0x3343, 'M', u'マイクロ'), + (0x3344, 'M', u'マイル'), + (0x3345, 'M', u'マッハ'), + (0x3346, 'M', u'マルク'), + (0x3347, 'M', u'マンション'), + (0x3348, 'M', u'ミクロン'), + (0x3349, 'M', u'ミリ'), + (0x334A, 'M', u'ミリバール'), + (0x334B, 'M', u'メガ'), + (0x334C, 'M', u'メガトン'), + (0x334D, 'M', u'メートル'), + (0x334E, 'M', u'ヤード'), + (0x334F, 'M', u'ヤール'), + (0x3350, 'M', u'ユアン'), + (0x3351, 'M', u'リットル'), + (0x3352, 'M', u'リラ'), + (0x3353, 'M', u'ルピー'), + (0x3354, 'M', u'ルーブル'), + (0x3355, 'M', u'レム'), + (0x3356, 'M', u'レントゲン'), + (0x3357, 'M', u'ワット'), + (0x3358, 'M', u'0点'), + (0x3359, 'M', u'1点'), + (0x335A, 'M', u'2点'), + (0x335B, 'M', u'3点'), + (0x335C, 'M', u'4点'), + (0x335D, 'M', u'5点'), + (0x335E, 'M', u'6点'), + (0x335F, 'M', u'7点'), + (0x3360, 'M', u'8点'), + (0x3361, 'M', u'9点'), + (0x3362, 'M', u'10点'), + (0x3363, 'M', u'11点'), + (0x3364, 'M', u'12点'), + (0x3365, 'M', u'13点'), + (0x3366, 'M', u'14点'), + (0x3367, 'M', u'15点'), + (0x3368, 'M', u'16点'), + (0x3369, 'M', u'17点'), + (0x336A, 'M', u'18点'), + (0x336B, 'M', u'19点'), + (0x336C, 'M', u'20点'), + (0x336D, 'M', u'21点'), + (0x336E, 'M', u'22点'), + (0x336F, 'M', u'23点'), + (0x3370, 'M', u'24点'), + (0x3371, 'M', u'hpa'), + (0x3372, 'M', u'da'), + (0x3373, 'M', u'au'), + (0x3374, 'M', u'bar'), + (0x3375, 'M', u'ov'), + (0x3376, 'M', u'pc'), + (0x3377, 'M', u'dm'), + (0x3378, 'M', u'dm2'), + (0x3379, 'M', u'dm3'), + (0x337A, 'M', u'iu'), + (0x337B, 'M', u'平成'), + (0x337C, 'M', u'昭和'), + (0x337D, 'M', u'大正'), + (0x337E, 'M', u'明治'), + (0x337F, 'M', u'株式会社'), + (0x3380, 'M', u'pa'), + (0x3381, 'M', u'na'), + (0x3382, 'M', u'μa'), + (0x3383, 'M', u'ma'), + (0x3384, 'M', u'ka'), + (0x3385, 'M', u'kb'), + (0x3386, 'M', u'mb'), + (0x3387, 'M', u'gb'), + (0x3388, 'M', u'cal'), + (0x3389, 'M', u'kcal'), + (0x338A, 'M', u'pf'), + (0x338B, 'M', u'nf'), + (0x338C, 'M', u'μf'), + (0x338D, 'M', u'μg'), + (0x338E, 'M', u'mg'), + (0x338F, 'M', u'kg'), + ] + +def _seg_34(): + return [ + (0x3390, 'M', u'hz'), + (0x3391, 'M', u'khz'), + (0x3392, 'M', u'mhz'), + (0x3393, 'M', u'ghz'), + (0x3394, 'M', u'thz'), + (0x3395, 'M', u'μl'), + (0x3396, 'M', u'ml'), + (0x3397, 'M', u'dl'), + (0x3398, 'M', u'kl'), + (0x3399, 'M', u'fm'), + (0x339A, 'M', u'nm'), + (0x339B, 'M', u'μm'), + (0x339C, 'M', u'mm'), + (0x339D, 'M', u'cm'), + (0x339E, 'M', u'km'), + (0x339F, 'M', u'mm2'), + (0x33A0, 'M', u'cm2'), + (0x33A1, 'M', u'm2'), + (0x33A2, 'M', u'km2'), + (0x33A3, 'M', u'mm3'), + (0x33A4, 'M', u'cm3'), + (0x33A5, 'M', u'm3'), + (0x33A6, 'M', u'km3'), + (0x33A7, 'M', u'm∕s'), + (0x33A8, 'M', u'm∕s2'), + (0x33A9, 'M', u'pa'), + (0x33AA, 'M', u'kpa'), + (0x33AB, 'M', u'mpa'), + (0x33AC, 'M', u'gpa'), + (0x33AD, 'M', u'rad'), + (0x33AE, 'M', u'rad∕s'), + (0x33AF, 'M', u'rad∕s2'), + (0x33B0, 'M', u'ps'), + (0x33B1, 'M', u'ns'), + (0x33B2, 'M', u'μs'), + (0x33B3, 'M', u'ms'), + (0x33B4, 'M', u'pv'), + (0x33B5, 'M', u'nv'), + (0x33B6, 'M', u'μv'), + (0x33B7, 'M', u'mv'), + (0x33B8, 'M', u'kv'), + (0x33B9, 'M', u'mv'), + (0x33BA, 'M', u'pw'), + (0x33BB, 'M', u'nw'), + (0x33BC, 'M', u'μw'), + (0x33BD, 'M', u'mw'), + (0x33BE, 'M', u'kw'), + (0x33BF, 'M', u'mw'), + (0x33C0, 'M', u'kω'), + (0x33C1, 'M', u'mω'), + (0x33C2, 'X'), + (0x33C3, 'M', u'bq'), + (0x33C4, 'M', u'cc'), + (0x33C5, 'M', u'cd'), + (0x33C6, 'M', u'c∕kg'), + (0x33C7, 'X'), + (0x33C8, 'M', u'db'), + (0x33C9, 'M', u'gy'), + (0x33CA, 'M', u'ha'), + (0x33CB, 'M', u'hp'), + (0x33CC, 'M', u'in'), + (0x33CD, 'M', u'kk'), + (0x33CE, 'M', u'km'), + (0x33CF, 'M', u'kt'), + (0x33D0, 'M', u'lm'), + (0x33D1, 'M', u'ln'), + (0x33D2, 'M', u'log'), + (0x33D3, 'M', u'lx'), + (0x33D4, 'M', u'mb'), + (0x33D5, 'M', u'mil'), + (0x33D6, 'M', u'mol'), + (0x33D7, 'M', u'ph'), + (0x33D8, 'X'), + (0x33D9, 'M', u'ppm'), + (0x33DA, 'M', u'pr'), + (0x33DB, 'M', u'sr'), + (0x33DC, 'M', u'sv'), + (0x33DD, 'M', u'wb'), + (0x33DE, 'M', u'v∕m'), + (0x33DF, 'M', u'a∕m'), + (0x33E0, 'M', u'1日'), + (0x33E1, 'M', u'2日'), + (0x33E2, 'M', u'3日'), + (0x33E3, 'M', u'4日'), + (0x33E4, 'M', u'5日'), + (0x33E5, 'M', u'6日'), + (0x33E6, 'M', u'7日'), + (0x33E7, 'M', u'8日'), + (0x33E8, 'M', u'9日'), + (0x33E9, 'M', u'10日'), + (0x33EA, 'M', u'11日'), + (0x33EB, 'M', u'12日'), + (0x33EC, 'M', u'13日'), + (0x33ED, 'M', u'14日'), + (0x33EE, 'M', u'15日'), + (0x33EF, 'M', u'16日'), + (0x33F0, 'M', u'17日'), + (0x33F1, 'M', u'18日'), + (0x33F2, 'M', u'19日'), + (0x33F3, 'M', u'20日'), + ] + +def _seg_35(): + return [ + (0x33F4, 'M', u'21日'), + (0x33F5, 'M', u'22日'), + (0x33F6, 'M', u'23日'), + (0x33F7, 'M', u'24日'), + (0x33F8, 'M', u'25日'), + (0x33F9, 'M', u'26日'), + (0x33FA, 'M', u'27日'), + (0x33FB, 'M', u'28日'), + (0x33FC, 'M', u'29日'), + (0x33FD, 'M', u'30日'), + (0x33FE, 'M', u'31日'), + (0x33FF, 'M', u'gal'), + (0x3400, 'V'), + (0x4DB6, 'X'), + (0x4DC0, 'V'), + (0x9FF0, 'X'), + (0xA000, 'V'), + (0xA48D, 'X'), + (0xA490, 'V'), + (0xA4C7, 'X'), + (0xA4D0, 'V'), + (0xA62C, 'X'), + (0xA640, 'M', u'ꙁ'), + (0xA641, 'V'), + (0xA642, 'M', u'ꙃ'), + (0xA643, 'V'), + (0xA644, 'M', u'ꙅ'), + (0xA645, 'V'), + (0xA646, 'M', u'ꙇ'), + (0xA647, 'V'), + (0xA648, 'M', u'ꙉ'), + (0xA649, 'V'), + (0xA64A, 'M', u'ꙋ'), + (0xA64B, 'V'), + (0xA64C, 'M', u'ꙍ'), + (0xA64D, 'V'), + (0xA64E, 'M', u'ꙏ'), + (0xA64F, 'V'), + (0xA650, 'M', u'ꙑ'), + (0xA651, 'V'), + (0xA652, 'M', u'ꙓ'), + (0xA653, 'V'), + (0xA654, 'M', u'ꙕ'), + (0xA655, 'V'), + (0xA656, 'M', u'ꙗ'), + (0xA657, 'V'), + (0xA658, 'M', u'ꙙ'), + (0xA659, 'V'), + (0xA65A, 'M', u'ꙛ'), + (0xA65B, 'V'), + (0xA65C, 'M', u'ꙝ'), + (0xA65D, 'V'), + (0xA65E, 'M', u'ꙟ'), + (0xA65F, 'V'), + (0xA660, 'M', u'ꙡ'), + (0xA661, 'V'), + (0xA662, 'M', u'ꙣ'), + (0xA663, 'V'), + (0xA664, 'M', u'ꙥ'), + (0xA665, 'V'), + (0xA666, 'M', u'ꙧ'), + (0xA667, 'V'), + (0xA668, 'M', u'ꙩ'), + (0xA669, 'V'), + (0xA66A, 'M', u'ꙫ'), + (0xA66B, 'V'), + (0xA66C, 'M', u'ꙭ'), + (0xA66D, 'V'), + (0xA680, 'M', u'ꚁ'), + (0xA681, 'V'), + (0xA682, 'M', u'ꚃ'), + (0xA683, 'V'), + (0xA684, 'M', u'ꚅ'), + (0xA685, 'V'), + (0xA686, 'M', u'ꚇ'), + (0xA687, 'V'), + (0xA688, 'M', u'ꚉ'), + (0xA689, 'V'), + (0xA68A, 'M', u'ꚋ'), + (0xA68B, 'V'), + (0xA68C, 'M', u'ꚍ'), + (0xA68D, 'V'), + (0xA68E, 'M', u'ꚏ'), + (0xA68F, 'V'), + (0xA690, 'M', u'ꚑ'), + (0xA691, 'V'), + (0xA692, 'M', u'ꚓ'), + (0xA693, 'V'), + (0xA694, 'M', u'ꚕ'), + (0xA695, 'V'), + (0xA696, 'M', u'ꚗ'), + (0xA697, 'V'), + (0xA698, 'M', u'ꚙ'), + (0xA699, 'V'), + (0xA69A, 'M', u'ꚛ'), + (0xA69B, 'V'), + (0xA69C, 'M', u'ъ'), + (0xA69D, 'M', u'ь'), + (0xA69E, 'V'), + (0xA6F8, 'X'), + ] + +def _seg_36(): + return [ + (0xA700, 'V'), + (0xA722, 'M', u'ꜣ'), + (0xA723, 'V'), + (0xA724, 'M', u'ꜥ'), + (0xA725, 'V'), + (0xA726, 'M', u'ꜧ'), + (0xA727, 'V'), + (0xA728, 'M', u'ꜩ'), + (0xA729, 'V'), + (0xA72A, 'M', u'ꜫ'), + (0xA72B, 'V'), + (0xA72C, 'M', u'ꜭ'), + (0xA72D, 'V'), + (0xA72E, 'M', u'ꜯ'), + (0xA72F, 'V'), + (0xA732, 'M', u'ꜳ'), + (0xA733, 'V'), + (0xA734, 'M', u'ꜵ'), + (0xA735, 'V'), + (0xA736, 'M', u'ꜷ'), + (0xA737, 'V'), + (0xA738, 'M', u'ꜹ'), + (0xA739, 'V'), + (0xA73A, 'M', u'ꜻ'), + (0xA73B, 'V'), + (0xA73C, 'M', u'ꜽ'), + (0xA73D, 'V'), + (0xA73E, 'M', u'ꜿ'), + (0xA73F, 'V'), + (0xA740, 'M', u'ꝁ'), + (0xA741, 'V'), + (0xA742, 'M', u'ꝃ'), + (0xA743, 'V'), + (0xA744, 'M', u'ꝅ'), + (0xA745, 'V'), + (0xA746, 'M', u'ꝇ'), + (0xA747, 'V'), + (0xA748, 'M', u'ꝉ'), + (0xA749, 'V'), + (0xA74A, 'M', u'ꝋ'), + (0xA74B, 'V'), + (0xA74C, 'M', u'ꝍ'), + (0xA74D, 'V'), + (0xA74E, 'M', u'ꝏ'), + (0xA74F, 'V'), + (0xA750, 'M', u'ꝑ'), + (0xA751, 'V'), + (0xA752, 'M', u'ꝓ'), + (0xA753, 'V'), + (0xA754, 'M', u'ꝕ'), + (0xA755, 'V'), + (0xA756, 'M', u'ꝗ'), + (0xA757, 'V'), + (0xA758, 'M', u'ꝙ'), + (0xA759, 'V'), + (0xA75A, 'M', u'ꝛ'), + (0xA75B, 'V'), + (0xA75C, 'M', u'ꝝ'), + (0xA75D, 'V'), + (0xA75E, 'M', u'ꝟ'), + (0xA75F, 'V'), + (0xA760, 'M', u'ꝡ'), + (0xA761, 'V'), + (0xA762, 'M', u'ꝣ'), + (0xA763, 'V'), + (0xA764, 'M', u'ꝥ'), + (0xA765, 'V'), + (0xA766, 'M', u'ꝧ'), + (0xA767, 'V'), + (0xA768, 'M', u'ꝩ'), + (0xA769, 'V'), + (0xA76A, 'M', u'ꝫ'), + (0xA76B, 'V'), + (0xA76C, 'M', u'ꝭ'), + (0xA76D, 'V'), + (0xA76E, 'M', u'ꝯ'), + (0xA76F, 'V'), + (0xA770, 'M', u'ꝯ'), + (0xA771, 'V'), + (0xA779, 'M', u'ꝺ'), + (0xA77A, 'V'), + (0xA77B, 'M', u'ꝼ'), + (0xA77C, 'V'), + (0xA77D, 'M', u'ᵹ'), + (0xA77E, 'M', u'ꝿ'), + (0xA77F, 'V'), + (0xA780, 'M', u'ꞁ'), + (0xA781, 'V'), + (0xA782, 'M', u'ꞃ'), + (0xA783, 'V'), + (0xA784, 'M', u'ꞅ'), + (0xA785, 'V'), + (0xA786, 'M', u'ꞇ'), + (0xA787, 'V'), + (0xA78B, 'M', u'ꞌ'), + (0xA78C, 'V'), + (0xA78D, 'M', u'ɥ'), + (0xA78E, 'V'), + (0xA790, 'M', u'ꞑ'), + (0xA791, 'V'), + ] + +def _seg_37(): + return [ + (0xA792, 'M', u'ꞓ'), + (0xA793, 'V'), + (0xA796, 'M', u'ꞗ'), + (0xA797, 'V'), + (0xA798, 'M', u'ꞙ'), + (0xA799, 'V'), + (0xA79A, 'M', u'ꞛ'), + (0xA79B, 'V'), + (0xA79C, 'M', u'ꞝ'), + (0xA79D, 'V'), + (0xA79E, 'M', u'ꞟ'), + (0xA79F, 'V'), + (0xA7A0, 'M', u'ꞡ'), + (0xA7A1, 'V'), + (0xA7A2, 'M', u'ꞣ'), + (0xA7A3, 'V'), + (0xA7A4, 'M', u'ꞥ'), + (0xA7A5, 'V'), + (0xA7A6, 'M', u'ꞧ'), + (0xA7A7, 'V'), + (0xA7A8, 'M', u'ꞩ'), + (0xA7A9, 'V'), + (0xA7AA, 'M', u'ɦ'), + (0xA7AB, 'M', u'ɜ'), + (0xA7AC, 'M', u'ɡ'), + (0xA7AD, 'M', u'ɬ'), + (0xA7AE, 'M', u'ɪ'), + (0xA7AF, 'V'), + (0xA7B0, 'M', u'ʞ'), + (0xA7B1, 'M', u'ʇ'), + (0xA7B2, 'M', u'ʝ'), + (0xA7B3, 'M', u'ꭓ'), + (0xA7B4, 'M', u'ꞵ'), + (0xA7B5, 'V'), + (0xA7B6, 'M', u'ꞷ'), + (0xA7B7, 'V'), + (0xA7B8, 'X'), + (0xA7B9, 'V'), + (0xA7BA, 'X'), + (0xA7F7, 'V'), + (0xA7F8, 'M', u'ħ'), + (0xA7F9, 'M', u'œ'), + (0xA7FA, 'V'), + (0xA82C, 'X'), + (0xA830, 'V'), + (0xA83A, 'X'), + (0xA840, 'V'), + (0xA878, 'X'), + (0xA880, 'V'), + (0xA8C6, 'X'), + (0xA8CE, 'V'), + (0xA8DA, 'X'), + (0xA8E0, 'V'), + (0xA954, 'X'), + (0xA95F, 'V'), + (0xA97D, 'X'), + (0xA980, 'V'), + (0xA9CE, 'X'), + (0xA9CF, 'V'), + (0xA9DA, 'X'), + (0xA9DE, 'V'), + (0xA9FF, 'X'), + (0xAA00, 'V'), + (0xAA37, 'X'), + (0xAA40, 'V'), + (0xAA4E, 'X'), + (0xAA50, 'V'), + (0xAA5A, 'X'), + (0xAA5C, 'V'), + (0xAAC3, 'X'), + (0xAADB, 'V'), + (0xAAF7, 'X'), + (0xAB01, 'V'), + (0xAB07, 'X'), + (0xAB09, 'V'), + (0xAB0F, 'X'), + (0xAB11, 'V'), + (0xAB17, 'X'), + (0xAB20, 'V'), + (0xAB27, 'X'), + (0xAB28, 'V'), + (0xAB2F, 'X'), + (0xAB30, 'V'), + (0xAB5C, 'M', u'ꜧ'), + (0xAB5D, 'M', u'ꬷ'), + (0xAB5E, 'M', u'ɫ'), + (0xAB5F, 'M', u'ꭒ'), + (0xAB60, 'V'), + (0xAB66, 'X'), + (0xAB70, 'M', u'Ꭰ'), + (0xAB71, 'M', u'Ꭱ'), + (0xAB72, 'M', u'Ꭲ'), + (0xAB73, 'M', u'Ꭳ'), + (0xAB74, 'M', u'Ꭴ'), + (0xAB75, 'M', u'Ꭵ'), + (0xAB76, 'M', u'Ꭶ'), + (0xAB77, 'M', u'Ꭷ'), + (0xAB78, 'M', u'Ꭸ'), + (0xAB79, 'M', u'Ꭹ'), + (0xAB7A, 'M', u'Ꭺ'), + ] + +def _seg_38(): + return [ + (0xAB7B, 'M', u'Ꭻ'), + (0xAB7C, 'M', u'Ꭼ'), + (0xAB7D, 'M', u'Ꭽ'), + (0xAB7E, 'M', u'Ꭾ'), + (0xAB7F, 'M', u'Ꭿ'), + (0xAB80, 'M', u'Ꮀ'), + (0xAB81, 'M', u'Ꮁ'), + (0xAB82, 'M', u'Ꮂ'), + (0xAB83, 'M', u'Ꮃ'), + (0xAB84, 'M', u'Ꮄ'), + (0xAB85, 'M', u'Ꮅ'), + (0xAB86, 'M', u'Ꮆ'), + (0xAB87, 'M', u'Ꮇ'), + (0xAB88, 'M', u'Ꮈ'), + (0xAB89, 'M', u'Ꮉ'), + (0xAB8A, 'M', u'Ꮊ'), + (0xAB8B, 'M', u'Ꮋ'), + (0xAB8C, 'M', u'Ꮌ'), + (0xAB8D, 'M', u'Ꮍ'), + (0xAB8E, 'M', u'Ꮎ'), + (0xAB8F, 'M', u'Ꮏ'), + (0xAB90, 'M', u'Ꮐ'), + (0xAB91, 'M', u'Ꮑ'), + (0xAB92, 'M', u'Ꮒ'), + (0xAB93, 'M', u'Ꮓ'), + (0xAB94, 'M', u'Ꮔ'), + (0xAB95, 'M', u'Ꮕ'), + (0xAB96, 'M', u'Ꮖ'), + (0xAB97, 'M', u'Ꮗ'), + (0xAB98, 'M', u'Ꮘ'), + (0xAB99, 'M', u'Ꮙ'), + (0xAB9A, 'M', u'Ꮚ'), + (0xAB9B, 'M', u'Ꮛ'), + (0xAB9C, 'M', u'Ꮜ'), + (0xAB9D, 'M', u'Ꮝ'), + (0xAB9E, 'M', u'Ꮞ'), + (0xAB9F, 'M', u'Ꮟ'), + (0xABA0, 'M', u'Ꮠ'), + (0xABA1, 'M', u'Ꮡ'), + (0xABA2, 'M', u'Ꮢ'), + (0xABA3, 'M', u'Ꮣ'), + (0xABA4, 'M', u'Ꮤ'), + (0xABA5, 'M', u'Ꮥ'), + (0xABA6, 'M', u'Ꮦ'), + (0xABA7, 'M', u'Ꮧ'), + (0xABA8, 'M', u'Ꮨ'), + (0xABA9, 'M', u'Ꮩ'), + (0xABAA, 'M', u'Ꮪ'), + (0xABAB, 'M', u'Ꮫ'), + (0xABAC, 'M', u'Ꮬ'), + (0xABAD, 'M', u'Ꮭ'), + (0xABAE, 'M', u'Ꮮ'), + (0xABAF, 'M', u'Ꮯ'), + (0xABB0, 'M', u'Ꮰ'), + (0xABB1, 'M', u'Ꮱ'), + (0xABB2, 'M', u'Ꮲ'), + (0xABB3, 'M', u'Ꮳ'), + (0xABB4, 'M', u'Ꮴ'), + (0xABB5, 'M', u'Ꮵ'), + (0xABB6, 'M', u'Ꮶ'), + (0xABB7, 'M', u'Ꮷ'), + (0xABB8, 'M', u'Ꮸ'), + (0xABB9, 'M', u'Ꮹ'), + (0xABBA, 'M', u'Ꮺ'), + (0xABBB, 'M', u'Ꮻ'), + (0xABBC, 'M', u'Ꮼ'), + (0xABBD, 'M', u'Ꮽ'), + (0xABBE, 'M', u'Ꮾ'), + (0xABBF, 'M', u'Ꮿ'), + (0xABC0, 'V'), + (0xABEE, 'X'), + (0xABF0, 'V'), + (0xABFA, 'X'), + (0xAC00, 'V'), + (0xD7A4, 'X'), + (0xD7B0, 'V'), + (0xD7C7, 'X'), + (0xD7CB, 'V'), + (0xD7FC, 'X'), + (0xF900, 'M', u'豈'), + (0xF901, 'M', u'更'), + (0xF902, 'M', u'車'), + (0xF903, 'M', u'賈'), + (0xF904, 'M', u'滑'), + (0xF905, 'M', u'串'), + (0xF906, 'M', u'句'), + (0xF907, 'M', u'龜'), + (0xF909, 'M', u'契'), + (0xF90A, 'M', u'金'), + (0xF90B, 'M', u'喇'), + (0xF90C, 'M', u'奈'), + (0xF90D, 'M', u'懶'), + (0xF90E, 'M', u'癩'), + (0xF90F, 'M', u'羅'), + (0xF910, 'M', u'蘿'), + (0xF911, 'M', u'螺'), + (0xF912, 'M', u'裸'), + (0xF913, 'M', u'邏'), + (0xF914, 'M', u'樂'), + (0xF915, 'M', u'洛'), + ] + +def _seg_39(): + return [ + (0xF916, 'M', u'烙'), + (0xF917, 'M', u'珞'), + (0xF918, 'M', u'落'), + (0xF919, 'M', u'酪'), + (0xF91A, 'M', u'駱'), + (0xF91B, 'M', u'亂'), + (0xF91C, 'M', u'卵'), + (0xF91D, 'M', u'欄'), + (0xF91E, 'M', u'爛'), + (0xF91F, 'M', u'蘭'), + (0xF920, 'M', u'鸞'), + (0xF921, 'M', u'嵐'), + (0xF922, 'M', u'濫'), + (0xF923, 'M', u'藍'), + (0xF924, 'M', u'襤'), + (0xF925, 'M', u'拉'), + (0xF926, 'M', u'臘'), + (0xF927, 'M', u'蠟'), + (0xF928, 'M', u'廊'), + (0xF929, 'M', u'朗'), + (0xF92A, 'M', u'浪'), + (0xF92B, 'M', u'狼'), + (0xF92C, 'M', u'郎'), + (0xF92D, 'M', u'來'), + (0xF92E, 'M', u'冷'), + (0xF92F, 'M', u'勞'), + (0xF930, 'M', u'擄'), + (0xF931, 'M', u'櫓'), + (0xF932, 'M', u'爐'), + (0xF933, 'M', u'盧'), + (0xF934, 'M', u'老'), + (0xF935, 'M', u'蘆'), + (0xF936, 'M', u'虜'), + (0xF937, 'M', u'路'), + (0xF938, 'M', u'露'), + (0xF939, 'M', u'魯'), + (0xF93A, 'M', u'鷺'), + (0xF93B, 'M', u'碌'), + (0xF93C, 'M', u'祿'), + (0xF93D, 'M', u'綠'), + (0xF93E, 'M', u'菉'), + (0xF93F, 'M', u'錄'), + (0xF940, 'M', u'鹿'), + (0xF941, 'M', u'論'), + (0xF942, 'M', u'壟'), + (0xF943, 'M', u'弄'), + (0xF944, 'M', u'籠'), + (0xF945, 'M', u'聾'), + (0xF946, 'M', u'牢'), + (0xF947, 'M', u'磊'), + (0xF948, 'M', u'賂'), + (0xF949, 'M', u'雷'), + (0xF94A, 'M', u'壘'), + (0xF94B, 'M', u'屢'), + (0xF94C, 'M', u'樓'), + (0xF94D, 'M', u'淚'), + (0xF94E, 'M', u'漏'), + (0xF94F, 'M', u'累'), + (0xF950, 'M', u'縷'), + (0xF951, 'M', u'陋'), + (0xF952, 'M', u'勒'), + (0xF953, 'M', u'肋'), + (0xF954, 'M', u'凜'), + (0xF955, 'M', u'凌'), + (0xF956, 'M', u'稜'), + (0xF957, 'M', u'綾'), + (0xF958, 'M', u'菱'), + (0xF959, 'M', u'陵'), + (0xF95A, 'M', u'讀'), + (0xF95B, 'M', u'拏'), + (0xF95C, 'M', u'樂'), + (0xF95D, 'M', u'諾'), + (0xF95E, 'M', u'丹'), + (0xF95F, 'M', u'寧'), + (0xF960, 'M', u'怒'), + (0xF961, 'M', u'率'), + (0xF962, 'M', u'異'), + (0xF963, 'M', u'北'), + (0xF964, 'M', u'磻'), + (0xF965, 'M', u'便'), + (0xF966, 'M', u'復'), + (0xF967, 'M', u'不'), + (0xF968, 'M', u'泌'), + (0xF969, 'M', u'數'), + (0xF96A, 'M', u'索'), + (0xF96B, 'M', u'參'), + (0xF96C, 'M', u'塞'), + (0xF96D, 'M', u'省'), + (0xF96E, 'M', u'葉'), + (0xF96F, 'M', u'說'), + (0xF970, 'M', u'殺'), + (0xF971, 'M', u'辰'), + (0xF972, 'M', u'沈'), + (0xF973, 'M', u'拾'), + (0xF974, 'M', u'若'), + (0xF975, 'M', u'掠'), + (0xF976, 'M', u'略'), + (0xF977, 'M', u'亮'), + (0xF978, 'M', u'兩'), + (0xF979, 'M', u'凉'), + ] + +def _seg_40(): + return [ + (0xF97A, 'M', u'梁'), + (0xF97B, 'M', u'糧'), + (0xF97C, 'M', u'良'), + (0xF97D, 'M', u'諒'), + (0xF97E, 'M', u'量'), + (0xF97F, 'M', u'勵'), + (0xF980, 'M', u'呂'), + (0xF981, 'M', u'女'), + (0xF982, 'M', u'廬'), + (0xF983, 'M', u'旅'), + (0xF984, 'M', u'濾'), + (0xF985, 'M', u'礪'), + (0xF986, 'M', u'閭'), + (0xF987, 'M', u'驪'), + (0xF988, 'M', u'麗'), + (0xF989, 'M', u'黎'), + (0xF98A, 'M', u'力'), + (0xF98B, 'M', u'曆'), + (0xF98C, 'M', u'歷'), + (0xF98D, 'M', u'轢'), + (0xF98E, 'M', u'年'), + (0xF98F, 'M', u'憐'), + (0xF990, 'M', u'戀'), + (0xF991, 'M', u'撚'), + (0xF992, 'M', u'漣'), + (0xF993, 'M', u'煉'), + (0xF994, 'M', u'璉'), + (0xF995, 'M', u'秊'), + (0xF996, 'M', u'練'), + (0xF997, 'M', u'聯'), + (0xF998, 'M', u'輦'), + (0xF999, 'M', u'蓮'), + (0xF99A, 'M', u'連'), + (0xF99B, 'M', u'鍊'), + (0xF99C, 'M', u'列'), + (0xF99D, 'M', u'劣'), + (0xF99E, 'M', u'咽'), + (0xF99F, 'M', u'烈'), + (0xF9A0, 'M', u'裂'), + (0xF9A1, 'M', u'說'), + (0xF9A2, 'M', u'廉'), + (0xF9A3, 'M', u'念'), + (0xF9A4, 'M', u'捻'), + (0xF9A5, 'M', u'殮'), + (0xF9A6, 'M', u'簾'), + (0xF9A7, 'M', u'獵'), + (0xF9A8, 'M', u'令'), + (0xF9A9, 'M', u'囹'), + (0xF9AA, 'M', u'寧'), + (0xF9AB, 'M', u'嶺'), + (0xF9AC, 'M', u'怜'), + (0xF9AD, 'M', u'玲'), + (0xF9AE, 'M', u'瑩'), + (0xF9AF, 'M', u'羚'), + (0xF9B0, 'M', u'聆'), + (0xF9B1, 'M', u'鈴'), + (0xF9B2, 'M', u'零'), + (0xF9B3, 'M', u'靈'), + (0xF9B4, 'M', u'領'), + (0xF9B5, 'M', u'例'), + (0xF9B6, 'M', u'禮'), + (0xF9B7, 'M', u'醴'), + (0xF9B8, 'M', u'隸'), + (0xF9B9, 'M', u'惡'), + (0xF9BA, 'M', u'了'), + (0xF9BB, 'M', u'僚'), + (0xF9BC, 'M', u'寮'), + (0xF9BD, 'M', u'尿'), + (0xF9BE, 'M', u'料'), + (0xF9BF, 'M', u'樂'), + (0xF9C0, 'M', u'燎'), + (0xF9C1, 'M', u'療'), + (0xF9C2, 'M', u'蓼'), + (0xF9C3, 'M', u'遼'), + (0xF9C4, 'M', u'龍'), + (0xF9C5, 'M', u'暈'), + (0xF9C6, 'M', u'阮'), + (0xF9C7, 'M', u'劉'), + (0xF9C8, 'M', u'杻'), + (0xF9C9, 'M', u'柳'), + (0xF9CA, 'M', u'流'), + (0xF9CB, 'M', u'溜'), + (0xF9CC, 'M', u'琉'), + (0xF9CD, 'M', u'留'), + (0xF9CE, 'M', u'硫'), + (0xF9CF, 'M', u'紐'), + (0xF9D0, 'M', u'類'), + (0xF9D1, 'M', u'六'), + (0xF9D2, 'M', u'戮'), + (0xF9D3, 'M', u'陸'), + (0xF9D4, 'M', u'倫'), + (0xF9D5, 'M', u'崙'), + (0xF9D6, 'M', u'淪'), + (0xF9D7, 'M', u'輪'), + (0xF9D8, 'M', u'律'), + (0xF9D9, 'M', u'慄'), + (0xF9DA, 'M', u'栗'), + (0xF9DB, 'M', u'率'), + (0xF9DC, 'M', u'隆'), + (0xF9DD, 'M', u'利'), + ] + +def _seg_41(): + return [ + (0xF9DE, 'M', u'吏'), + (0xF9DF, 'M', u'履'), + (0xF9E0, 'M', u'易'), + (0xF9E1, 'M', u'李'), + (0xF9E2, 'M', u'梨'), + (0xF9E3, 'M', u'泥'), + (0xF9E4, 'M', u'理'), + (0xF9E5, 'M', u'痢'), + (0xF9E6, 'M', u'罹'), + (0xF9E7, 'M', u'裏'), + (0xF9E8, 'M', u'裡'), + (0xF9E9, 'M', u'里'), + (0xF9EA, 'M', u'離'), + (0xF9EB, 'M', u'匿'), + (0xF9EC, 'M', u'溺'), + (0xF9ED, 'M', u'吝'), + (0xF9EE, 'M', u'燐'), + (0xF9EF, 'M', u'璘'), + (0xF9F0, 'M', u'藺'), + (0xF9F1, 'M', u'隣'), + (0xF9F2, 'M', u'鱗'), + (0xF9F3, 'M', u'麟'), + (0xF9F4, 'M', u'林'), + (0xF9F5, 'M', u'淋'), + (0xF9F6, 'M', u'臨'), + (0xF9F7, 'M', u'立'), + (0xF9F8, 'M', u'笠'), + (0xF9F9, 'M', u'粒'), + (0xF9FA, 'M', u'狀'), + (0xF9FB, 'M', u'炙'), + (0xF9FC, 'M', u'識'), + (0xF9FD, 'M', u'什'), + (0xF9FE, 'M', u'茶'), + (0xF9FF, 'M', u'刺'), + (0xFA00, 'M', u'切'), + (0xFA01, 'M', u'度'), + (0xFA02, 'M', u'拓'), + (0xFA03, 'M', u'糖'), + (0xFA04, 'M', u'宅'), + (0xFA05, 'M', u'洞'), + (0xFA06, 'M', u'暴'), + (0xFA07, 'M', u'輻'), + (0xFA08, 'M', u'行'), + (0xFA09, 'M', u'降'), + (0xFA0A, 'M', u'見'), + (0xFA0B, 'M', u'廓'), + (0xFA0C, 'M', u'兀'), + (0xFA0D, 'M', u'嗀'), + (0xFA0E, 'V'), + (0xFA10, 'M', u'塚'), + (0xFA11, 'V'), + (0xFA12, 'M', u'晴'), + (0xFA13, 'V'), + (0xFA15, 'M', u'凞'), + (0xFA16, 'M', u'猪'), + (0xFA17, 'M', u'益'), + (0xFA18, 'M', u'礼'), + (0xFA19, 'M', u'神'), + (0xFA1A, 'M', u'祥'), + (0xFA1B, 'M', u'福'), + (0xFA1C, 'M', u'靖'), + (0xFA1D, 'M', u'精'), + (0xFA1E, 'M', u'羽'), + (0xFA1F, 'V'), + (0xFA20, 'M', u'蘒'), + (0xFA21, 'V'), + (0xFA22, 'M', u'諸'), + (0xFA23, 'V'), + (0xFA25, 'M', u'逸'), + (0xFA26, 'M', u'都'), + (0xFA27, 'V'), + (0xFA2A, 'M', u'飯'), + (0xFA2B, 'M', u'飼'), + (0xFA2C, 'M', u'館'), + (0xFA2D, 'M', u'鶴'), + (0xFA2E, 'M', u'郞'), + (0xFA2F, 'M', u'隷'), + (0xFA30, 'M', u'侮'), + (0xFA31, 'M', u'僧'), + (0xFA32, 'M', u'免'), + (0xFA33, 'M', u'勉'), + (0xFA34, 'M', u'勤'), + (0xFA35, 'M', u'卑'), + (0xFA36, 'M', u'喝'), + (0xFA37, 'M', u'嘆'), + (0xFA38, 'M', u'器'), + (0xFA39, 'M', u'塀'), + (0xFA3A, 'M', u'墨'), + (0xFA3B, 'M', u'層'), + (0xFA3C, 'M', u'屮'), + (0xFA3D, 'M', u'悔'), + (0xFA3E, 'M', u'慨'), + (0xFA3F, 'M', u'憎'), + (0xFA40, 'M', u'懲'), + (0xFA41, 'M', u'敏'), + (0xFA42, 'M', u'既'), + (0xFA43, 'M', u'暑'), + (0xFA44, 'M', u'梅'), + (0xFA45, 'M', u'海'), + (0xFA46, 'M', u'渚'), + ] + +def _seg_42(): + return [ + (0xFA47, 'M', u'漢'), + (0xFA48, 'M', u'煮'), + (0xFA49, 'M', u'爫'), + (0xFA4A, 'M', u'琢'), + (0xFA4B, 'M', u'碑'), + (0xFA4C, 'M', u'社'), + (0xFA4D, 'M', u'祉'), + (0xFA4E, 'M', u'祈'), + (0xFA4F, 'M', u'祐'), + (0xFA50, 'M', u'祖'), + (0xFA51, 'M', u'祝'), + (0xFA52, 'M', u'禍'), + (0xFA53, 'M', u'禎'), + (0xFA54, 'M', u'穀'), + (0xFA55, 'M', u'突'), + (0xFA56, 'M', u'節'), + (0xFA57, 'M', u'練'), + (0xFA58, 'M', u'縉'), + (0xFA59, 'M', u'繁'), + (0xFA5A, 'M', u'署'), + (0xFA5B, 'M', u'者'), + (0xFA5C, 'M', u'臭'), + (0xFA5D, 'M', u'艹'), + (0xFA5F, 'M', u'著'), + (0xFA60, 'M', u'褐'), + (0xFA61, 'M', u'視'), + (0xFA62, 'M', u'謁'), + (0xFA63, 'M', u'謹'), + (0xFA64, 'M', u'賓'), + (0xFA65, 'M', u'贈'), + (0xFA66, 'M', u'辶'), + (0xFA67, 'M', u'逸'), + (0xFA68, 'M', u'難'), + (0xFA69, 'M', u'響'), + (0xFA6A, 'M', u'頻'), + (0xFA6B, 'M', u'恵'), + (0xFA6C, 'M', u'𤋮'), + (0xFA6D, 'M', u'舘'), + (0xFA6E, 'X'), + (0xFA70, 'M', u'並'), + (0xFA71, 'M', u'况'), + (0xFA72, 'M', u'全'), + (0xFA73, 'M', u'侀'), + (0xFA74, 'M', u'充'), + (0xFA75, 'M', u'冀'), + (0xFA76, 'M', u'勇'), + (0xFA77, 'M', u'勺'), + (0xFA78, 'M', u'喝'), + (0xFA79, 'M', u'啕'), + (0xFA7A, 'M', u'喙'), + (0xFA7B, 'M', u'嗢'), + (0xFA7C, 'M', u'塚'), + (0xFA7D, 'M', u'墳'), + (0xFA7E, 'M', u'奄'), + (0xFA7F, 'M', u'奔'), + (0xFA80, 'M', u'婢'), + (0xFA81, 'M', u'嬨'), + (0xFA82, 'M', u'廒'), + (0xFA83, 'M', u'廙'), + (0xFA84, 'M', u'彩'), + (0xFA85, 'M', u'徭'), + (0xFA86, 'M', u'惘'), + (0xFA87, 'M', u'慎'), + (0xFA88, 'M', u'愈'), + (0xFA89, 'M', u'憎'), + (0xFA8A, 'M', u'慠'), + (0xFA8B, 'M', u'懲'), + (0xFA8C, 'M', u'戴'), + (0xFA8D, 'M', u'揄'), + (0xFA8E, 'M', u'搜'), + (0xFA8F, 'M', u'摒'), + (0xFA90, 'M', u'敖'), + (0xFA91, 'M', u'晴'), + (0xFA92, 'M', u'朗'), + (0xFA93, 'M', u'望'), + (0xFA94, 'M', u'杖'), + (0xFA95, 'M', u'歹'), + (0xFA96, 'M', u'殺'), + (0xFA97, 'M', u'流'), + (0xFA98, 'M', u'滛'), + (0xFA99, 'M', u'滋'), + (0xFA9A, 'M', u'漢'), + (0xFA9B, 'M', u'瀞'), + (0xFA9C, 'M', u'煮'), + (0xFA9D, 'M', u'瞧'), + (0xFA9E, 'M', u'爵'), + (0xFA9F, 'M', u'犯'), + (0xFAA0, 'M', u'猪'), + (0xFAA1, 'M', u'瑱'), + (0xFAA2, 'M', u'甆'), + (0xFAA3, 'M', u'画'), + (0xFAA4, 'M', u'瘝'), + (0xFAA5, 'M', u'瘟'), + (0xFAA6, 'M', u'益'), + (0xFAA7, 'M', u'盛'), + (0xFAA8, 'M', u'直'), + (0xFAA9, 'M', u'睊'), + (0xFAAA, 'M', u'着'), + (0xFAAB, 'M', u'磌'), + (0xFAAC, 'M', u'窱'), + ] + +def _seg_43(): + return [ + (0xFAAD, 'M', u'節'), + (0xFAAE, 'M', u'类'), + (0xFAAF, 'M', u'絛'), + (0xFAB0, 'M', u'練'), + (0xFAB1, 'M', u'缾'), + (0xFAB2, 'M', u'者'), + (0xFAB3, 'M', u'荒'), + (0xFAB4, 'M', u'華'), + (0xFAB5, 'M', u'蝹'), + (0xFAB6, 'M', u'襁'), + (0xFAB7, 'M', u'覆'), + (0xFAB8, 'M', u'視'), + (0xFAB9, 'M', u'調'), + (0xFABA, 'M', u'諸'), + (0xFABB, 'M', u'請'), + (0xFABC, 'M', u'謁'), + (0xFABD, 'M', u'諾'), + (0xFABE, 'M', u'諭'), + (0xFABF, 'M', u'謹'), + (0xFAC0, 'M', u'變'), + (0xFAC1, 'M', u'贈'), + (0xFAC2, 'M', u'輸'), + (0xFAC3, 'M', u'遲'), + (0xFAC4, 'M', u'醙'), + (0xFAC5, 'M', u'鉶'), + (0xFAC6, 'M', u'陼'), + (0xFAC7, 'M', u'難'), + (0xFAC8, 'M', u'靖'), + (0xFAC9, 'M', u'韛'), + (0xFACA, 'M', u'響'), + (0xFACB, 'M', u'頋'), + (0xFACC, 'M', u'頻'), + (0xFACD, 'M', u'鬒'), + (0xFACE, 'M', u'龜'), + (0xFACF, 'M', u'𢡊'), + (0xFAD0, 'M', u'𢡄'), + (0xFAD1, 'M', u'𣏕'), + (0xFAD2, 'M', u'㮝'), + (0xFAD3, 'M', u'䀘'), + (0xFAD4, 'M', u'䀹'), + (0xFAD5, 'M', u'𥉉'), + (0xFAD6, 'M', u'𥳐'), + (0xFAD7, 'M', u'𧻓'), + (0xFAD8, 'M', u'齃'), + (0xFAD9, 'M', u'龎'), + (0xFADA, 'X'), + (0xFB00, 'M', u'ff'), + (0xFB01, 'M', u'fi'), + (0xFB02, 'M', u'fl'), + (0xFB03, 'M', u'ffi'), + (0xFB04, 'M', u'ffl'), + (0xFB05, 'M', u'st'), + (0xFB07, 'X'), + (0xFB13, 'M', u'մն'), + (0xFB14, 'M', u'մե'), + (0xFB15, 'M', u'մի'), + (0xFB16, 'M', u'վն'), + (0xFB17, 'M', u'մխ'), + (0xFB18, 'X'), + (0xFB1D, 'M', u'יִ'), + (0xFB1E, 'V'), + (0xFB1F, 'M', u'ײַ'), + (0xFB20, 'M', u'ע'), + (0xFB21, 'M', u'א'), + (0xFB22, 'M', u'ד'), + (0xFB23, 'M', u'ה'), + (0xFB24, 'M', u'כ'), + (0xFB25, 'M', u'ל'), + (0xFB26, 'M', u'ם'), + (0xFB27, 'M', u'ר'), + (0xFB28, 'M', u'ת'), + (0xFB29, '3', u'+'), + (0xFB2A, 'M', u'שׁ'), + (0xFB2B, 'M', u'שׂ'), + (0xFB2C, 'M', u'שּׁ'), + (0xFB2D, 'M', u'שּׂ'), + (0xFB2E, 'M', u'אַ'), + (0xFB2F, 'M', u'אָ'), + (0xFB30, 'M', u'אּ'), + (0xFB31, 'M', u'בּ'), + (0xFB32, 'M', u'גּ'), + (0xFB33, 'M', u'דּ'), + (0xFB34, 'M', u'הּ'), + (0xFB35, 'M', u'וּ'), + (0xFB36, 'M', u'זּ'), + (0xFB37, 'X'), + (0xFB38, 'M', u'טּ'), + (0xFB39, 'M', u'יּ'), + (0xFB3A, 'M', u'ךּ'), + (0xFB3B, 'M', u'כּ'), + (0xFB3C, 'M', u'לּ'), + (0xFB3D, 'X'), + (0xFB3E, 'M', u'מּ'), + (0xFB3F, 'X'), + (0xFB40, 'M', u'נּ'), + (0xFB41, 'M', u'סּ'), + (0xFB42, 'X'), + (0xFB43, 'M', u'ףּ'), + (0xFB44, 'M', u'פּ'), + (0xFB45, 'X'), + ] + +def _seg_44(): + return [ + (0xFB46, 'M', u'צּ'), + (0xFB47, 'M', u'קּ'), + (0xFB48, 'M', u'רּ'), + (0xFB49, 'M', u'שּ'), + (0xFB4A, 'M', u'תּ'), + (0xFB4B, 'M', u'וֹ'), + (0xFB4C, 'M', u'בֿ'), + (0xFB4D, 'M', u'כֿ'), + (0xFB4E, 'M', u'פֿ'), + (0xFB4F, 'M', u'אל'), + (0xFB50, 'M', u'ٱ'), + (0xFB52, 'M', u'ٻ'), + (0xFB56, 'M', u'پ'), + (0xFB5A, 'M', u'ڀ'), + (0xFB5E, 'M', u'ٺ'), + (0xFB62, 'M', u'ٿ'), + (0xFB66, 'M', u'ٹ'), + (0xFB6A, 'M', u'ڤ'), + (0xFB6E, 'M', u'ڦ'), + (0xFB72, 'M', u'ڄ'), + (0xFB76, 'M', u'ڃ'), + (0xFB7A, 'M', u'چ'), + (0xFB7E, 'M', u'ڇ'), + (0xFB82, 'M', u'ڍ'), + (0xFB84, 'M', u'ڌ'), + (0xFB86, 'M', u'ڎ'), + (0xFB88, 'M', u'ڈ'), + (0xFB8A, 'M', u'ژ'), + (0xFB8C, 'M', u'ڑ'), + (0xFB8E, 'M', u'ک'), + (0xFB92, 'M', u'گ'), + (0xFB96, 'M', u'ڳ'), + (0xFB9A, 'M', u'ڱ'), + (0xFB9E, 'M', u'ں'), + (0xFBA0, 'M', u'ڻ'), + (0xFBA4, 'M', u'ۀ'), + (0xFBA6, 'M', u'ہ'), + (0xFBAA, 'M', u'ھ'), + (0xFBAE, 'M', u'ے'), + (0xFBB0, 'M', u'ۓ'), + (0xFBB2, 'V'), + (0xFBC2, 'X'), + (0xFBD3, 'M', u'ڭ'), + (0xFBD7, 'M', u'ۇ'), + (0xFBD9, 'M', u'ۆ'), + (0xFBDB, 'M', u'ۈ'), + (0xFBDD, 'M', u'ۇٴ'), + (0xFBDE, 'M', u'ۋ'), + (0xFBE0, 'M', u'ۅ'), + (0xFBE2, 'M', u'ۉ'), + (0xFBE4, 'M', u'ې'), + (0xFBE8, 'M', u'ى'), + (0xFBEA, 'M', u'ئا'), + (0xFBEC, 'M', u'ئە'), + (0xFBEE, 'M', u'ئو'), + (0xFBF0, 'M', u'ئۇ'), + (0xFBF2, 'M', u'ئۆ'), + (0xFBF4, 'M', u'ئۈ'), + (0xFBF6, 'M', u'ئې'), + (0xFBF9, 'M', u'ئى'), + (0xFBFC, 'M', u'ی'), + (0xFC00, 'M', u'ئج'), + (0xFC01, 'M', u'ئح'), + (0xFC02, 'M', u'ئم'), + (0xFC03, 'M', u'ئى'), + (0xFC04, 'M', u'ئي'), + (0xFC05, 'M', u'بج'), + (0xFC06, 'M', u'بح'), + (0xFC07, 'M', u'بخ'), + (0xFC08, 'M', u'بم'), + (0xFC09, 'M', u'بى'), + (0xFC0A, 'M', u'بي'), + (0xFC0B, 'M', u'تج'), + (0xFC0C, 'M', u'تح'), + (0xFC0D, 'M', u'تخ'), + (0xFC0E, 'M', u'تم'), + (0xFC0F, 'M', u'تى'), + (0xFC10, 'M', u'تي'), + (0xFC11, 'M', u'ثج'), + (0xFC12, 'M', u'ثم'), + (0xFC13, 'M', u'ثى'), + (0xFC14, 'M', u'ثي'), + (0xFC15, 'M', u'جح'), + (0xFC16, 'M', u'جم'), + (0xFC17, 'M', u'حج'), + (0xFC18, 'M', u'حم'), + (0xFC19, 'M', u'خج'), + (0xFC1A, 'M', u'خح'), + (0xFC1B, 'M', u'خم'), + (0xFC1C, 'M', u'سج'), + (0xFC1D, 'M', u'سح'), + (0xFC1E, 'M', u'سخ'), + (0xFC1F, 'M', u'سم'), + (0xFC20, 'M', u'صح'), + (0xFC21, 'M', u'صم'), + (0xFC22, 'M', u'ضج'), + (0xFC23, 'M', u'ضح'), + (0xFC24, 'M', u'ضخ'), + (0xFC25, 'M', u'ضم'), + (0xFC26, 'M', u'طح'), + ] + +def _seg_45(): + return [ + (0xFC27, 'M', u'طم'), + (0xFC28, 'M', u'ظم'), + (0xFC29, 'M', u'عج'), + (0xFC2A, 'M', u'عم'), + (0xFC2B, 'M', u'غج'), + (0xFC2C, 'M', u'غم'), + (0xFC2D, 'M', u'فج'), + (0xFC2E, 'M', u'فح'), + (0xFC2F, 'M', u'فخ'), + (0xFC30, 'M', u'فم'), + (0xFC31, 'M', u'فى'), + (0xFC32, 'M', u'في'), + (0xFC33, 'M', u'قح'), + (0xFC34, 'M', u'قم'), + (0xFC35, 'M', u'قى'), + (0xFC36, 'M', u'قي'), + (0xFC37, 'M', u'كا'), + (0xFC38, 'M', u'كج'), + (0xFC39, 'M', u'كح'), + (0xFC3A, 'M', u'كخ'), + (0xFC3B, 'M', u'كل'), + (0xFC3C, 'M', u'كم'), + (0xFC3D, 'M', u'كى'), + (0xFC3E, 'M', u'كي'), + (0xFC3F, 'M', u'لج'), + (0xFC40, 'M', u'لح'), + (0xFC41, 'M', u'لخ'), + (0xFC42, 'M', u'لم'), + (0xFC43, 'M', u'لى'), + (0xFC44, 'M', u'لي'), + (0xFC45, 'M', u'مج'), + (0xFC46, 'M', u'مح'), + (0xFC47, 'M', u'مخ'), + (0xFC48, 'M', u'مم'), + (0xFC49, 'M', u'مى'), + (0xFC4A, 'M', u'مي'), + (0xFC4B, 'M', u'نج'), + (0xFC4C, 'M', u'نح'), + (0xFC4D, 'M', u'نخ'), + (0xFC4E, 'M', u'نم'), + (0xFC4F, 'M', u'نى'), + (0xFC50, 'M', u'ني'), + (0xFC51, 'M', u'هج'), + (0xFC52, 'M', u'هم'), + (0xFC53, 'M', u'هى'), + (0xFC54, 'M', u'هي'), + (0xFC55, 'M', u'يج'), + (0xFC56, 'M', u'يح'), + (0xFC57, 'M', u'يخ'), + (0xFC58, 'M', u'يم'), + (0xFC59, 'M', u'يى'), + (0xFC5A, 'M', u'يي'), + (0xFC5B, 'M', u'ذٰ'), + (0xFC5C, 'M', u'رٰ'), + (0xFC5D, 'M', u'ىٰ'), + (0xFC5E, '3', u' ٌّ'), + (0xFC5F, '3', u' ٍّ'), + (0xFC60, '3', u' َّ'), + (0xFC61, '3', u' ُّ'), + (0xFC62, '3', u' ِّ'), + (0xFC63, '3', u' ّٰ'), + (0xFC64, 'M', u'ئر'), + (0xFC65, 'M', u'ئز'), + (0xFC66, 'M', u'ئم'), + (0xFC67, 'M', u'ئن'), + (0xFC68, 'M', u'ئى'), + (0xFC69, 'M', u'ئي'), + (0xFC6A, 'M', u'بر'), + (0xFC6B, 'M', u'بز'), + (0xFC6C, 'M', u'بم'), + (0xFC6D, 'M', u'بن'), + (0xFC6E, 'M', u'بى'), + (0xFC6F, 'M', u'بي'), + (0xFC70, 'M', u'تر'), + (0xFC71, 'M', u'تز'), + (0xFC72, 'M', u'تم'), + (0xFC73, 'M', u'تن'), + (0xFC74, 'M', u'تى'), + (0xFC75, 'M', u'تي'), + (0xFC76, 'M', u'ثر'), + (0xFC77, 'M', u'ثز'), + (0xFC78, 'M', u'ثم'), + (0xFC79, 'M', u'ثن'), + (0xFC7A, 'M', u'ثى'), + (0xFC7B, 'M', u'ثي'), + (0xFC7C, 'M', u'فى'), + (0xFC7D, 'M', u'في'), + (0xFC7E, 'M', u'قى'), + (0xFC7F, 'M', u'قي'), + (0xFC80, 'M', u'كا'), + (0xFC81, 'M', u'كل'), + (0xFC82, 'M', u'كم'), + (0xFC83, 'M', u'كى'), + (0xFC84, 'M', u'كي'), + (0xFC85, 'M', u'لم'), + (0xFC86, 'M', u'لى'), + (0xFC87, 'M', u'لي'), + (0xFC88, 'M', u'ما'), + (0xFC89, 'M', u'مم'), + (0xFC8A, 'M', u'نر'), + ] + +def _seg_46(): + return [ + (0xFC8B, 'M', u'نز'), + (0xFC8C, 'M', u'نم'), + (0xFC8D, 'M', u'نن'), + (0xFC8E, 'M', u'نى'), + (0xFC8F, 'M', u'ني'), + (0xFC90, 'M', u'ىٰ'), + (0xFC91, 'M', u'ير'), + (0xFC92, 'M', u'يز'), + (0xFC93, 'M', u'يم'), + (0xFC94, 'M', u'ين'), + (0xFC95, 'M', u'يى'), + (0xFC96, 'M', u'يي'), + (0xFC97, 'M', u'ئج'), + (0xFC98, 'M', u'ئح'), + (0xFC99, 'M', u'ئخ'), + (0xFC9A, 'M', u'ئم'), + (0xFC9B, 'M', u'ئه'), + (0xFC9C, 'M', u'بج'), + (0xFC9D, 'M', u'بح'), + (0xFC9E, 'M', u'بخ'), + (0xFC9F, 'M', u'بم'), + (0xFCA0, 'M', u'به'), + (0xFCA1, 'M', u'تج'), + (0xFCA2, 'M', u'تح'), + (0xFCA3, 'M', u'تخ'), + (0xFCA4, 'M', u'تم'), + (0xFCA5, 'M', u'ته'), + (0xFCA6, 'M', u'ثم'), + (0xFCA7, 'M', u'جح'), + (0xFCA8, 'M', u'جم'), + (0xFCA9, 'M', u'حج'), + (0xFCAA, 'M', u'حم'), + (0xFCAB, 'M', u'خج'), + (0xFCAC, 'M', u'خم'), + (0xFCAD, 'M', u'سج'), + (0xFCAE, 'M', u'سح'), + (0xFCAF, 'M', u'سخ'), + (0xFCB0, 'M', u'سم'), + (0xFCB1, 'M', u'صح'), + (0xFCB2, 'M', u'صخ'), + (0xFCB3, 'M', u'صم'), + (0xFCB4, 'M', u'ضج'), + (0xFCB5, 'M', u'ضح'), + (0xFCB6, 'M', u'ضخ'), + (0xFCB7, 'M', u'ضم'), + (0xFCB8, 'M', u'طح'), + (0xFCB9, 'M', u'ظم'), + (0xFCBA, 'M', u'عج'), + (0xFCBB, 'M', u'عم'), + (0xFCBC, 'M', u'غج'), + (0xFCBD, 'M', u'غم'), + (0xFCBE, 'M', u'فج'), + (0xFCBF, 'M', u'فح'), + (0xFCC0, 'M', u'فخ'), + (0xFCC1, 'M', u'فم'), + (0xFCC2, 'M', u'قح'), + (0xFCC3, 'M', u'قم'), + (0xFCC4, 'M', u'كج'), + (0xFCC5, 'M', u'كح'), + (0xFCC6, 'M', u'كخ'), + (0xFCC7, 'M', u'كل'), + (0xFCC8, 'M', u'كم'), + (0xFCC9, 'M', u'لج'), + (0xFCCA, 'M', u'لح'), + (0xFCCB, 'M', u'لخ'), + (0xFCCC, 'M', u'لم'), + (0xFCCD, 'M', u'له'), + (0xFCCE, 'M', u'مج'), + (0xFCCF, 'M', u'مح'), + (0xFCD0, 'M', u'مخ'), + (0xFCD1, 'M', u'مم'), + (0xFCD2, 'M', u'نج'), + (0xFCD3, 'M', u'نح'), + (0xFCD4, 'M', u'نخ'), + (0xFCD5, 'M', u'نم'), + (0xFCD6, 'M', u'نه'), + (0xFCD7, 'M', u'هج'), + (0xFCD8, 'M', u'هم'), + (0xFCD9, 'M', u'هٰ'), + (0xFCDA, 'M', u'يج'), + (0xFCDB, 'M', u'يح'), + (0xFCDC, 'M', u'يخ'), + (0xFCDD, 'M', u'يم'), + (0xFCDE, 'M', u'يه'), + (0xFCDF, 'M', u'ئم'), + (0xFCE0, 'M', u'ئه'), + (0xFCE1, 'M', u'بم'), + (0xFCE2, 'M', u'به'), + (0xFCE3, 'M', u'تم'), + (0xFCE4, 'M', u'ته'), + (0xFCE5, 'M', u'ثم'), + (0xFCE6, 'M', u'ثه'), + (0xFCE7, 'M', u'سم'), + (0xFCE8, 'M', u'سه'), + (0xFCE9, 'M', u'شم'), + (0xFCEA, 'M', u'شه'), + (0xFCEB, 'M', u'كل'), + (0xFCEC, 'M', u'كم'), + (0xFCED, 'M', u'لم'), + (0xFCEE, 'M', u'نم'), + ] + +def _seg_47(): + return [ + (0xFCEF, 'M', u'نه'), + (0xFCF0, 'M', u'يم'), + (0xFCF1, 'M', u'يه'), + (0xFCF2, 'M', u'ـَّ'), + (0xFCF3, 'M', u'ـُّ'), + (0xFCF4, 'M', u'ـِّ'), + (0xFCF5, 'M', u'طى'), + (0xFCF6, 'M', u'طي'), + (0xFCF7, 'M', u'عى'), + (0xFCF8, 'M', u'عي'), + (0xFCF9, 'M', u'غى'), + (0xFCFA, 'M', u'غي'), + (0xFCFB, 'M', u'سى'), + (0xFCFC, 'M', u'سي'), + (0xFCFD, 'M', u'شى'), + (0xFCFE, 'M', u'شي'), + (0xFCFF, 'M', u'حى'), + (0xFD00, 'M', u'حي'), + (0xFD01, 'M', u'جى'), + (0xFD02, 'M', u'جي'), + (0xFD03, 'M', u'خى'), + (0xFD04, 'M', u'خي'), + (0xFD05, 'M', u'صى'), + (0xFD06, 'M', u'صي'), + (0xFD07, 'M', u'ضى'), + (0xFD08, 'M', u'ضي'), + (0xFD09, 'M', u'شج'), + (0xFD0A, 'M', u'شح'), + (0xFD0B, 'M', u'شخ'), + (0xFD0C, 'M', u'شم'), + (0xFD0D, 'M', u'شر'), + (0xFD0E, 'M', u'سر'), + (0xFD0F, 'M', u'صر'), + (0xFD10, 'M', u'ضر'), + (0xFD11, 'M', u'طى'), + (0xFD12, 'M', u'طي'), + (0xFD13, 'M', u'عى'), + (0xFD14, 'M', u'عي'), + (0xFD15, 'M', u'غى'), + (0xFD16, 'M', u'غي'), + (0xFD17, 'M', u'سى'), + (0xFD18, 'M', u'سي'), + (0xFD19, 'M', u'شى'), + (0xFD1A, 'M', u'شي'), + (0xFD1B, 'M', u'حى'), + (0xFD1C, 'M', u'حي'), + (0xFD1D, 'M', u'جى'), + (0xFD1E, 'M', u'جي'), + (0xFD1F, 'M', u'خى'), + (0xFD20, 'M', u'خي'), + (0xFD21, 'M', u'صى'), + (0xFD22, 'M', u'صي'), + (0xFD23, 'M', u'ضى'), + (0xFD24, 'M', u'ضي'), + (0xFD25, 'M', u'شج'), + (0xFD26, 'M', u'شح'), + (0xFD27, 'M', u'شخ'), + (0xFD28, 'M', u'شم'), + (0xFD29, 'M', u'شر'), + (0xFD2A, 'M', u'سر'), + (0xFD2B, 'M', u'صر'), + (0xFD2C, 'M', u'ضر'), + (0xFD2D, 'M', u'شج'), + (0xFD2E, 'M', u'شح'), + (0xFD2F, 'M', u'شخ'), + (0xFD30, 'M', u'شم'), + (0xFD31, 'M', u'سه'), + (0xFD32, 'M', u'شه'), + (0xFD33, 'M', u'طم'), + (0xFD34, 'M', u'سج'), + (0xFD35, 'M', u'سح'), + (0xFD36, 'M', u'سخ'), + (0xFD37, 'M', u'شج'), + (0xFD38, 'M', u'شح'), + (0xFD39, 'M', u'شخ'), + (0xFD3A, 'M', u'طم'), + (0xFD3B, 'M', u'ظم'), + (0xFD3C, 'M', u'اً'), + (0xFD3E, 'V'), + (0xFD40, 'X'), + (0xFD50, 'M', u'تجم'), + (0xFD51, 'M', u'تحج'), + (0xFD53, 'M', u'تحم'), + (0xFD54, 'M', u'تخم'), + (0xFD55, 'M', u'تمج'), + (0xFD56, 'M', u'تمح'), + (0xFD57, 'M', u'تمخ'), + (0xFD58, 'M', u'جمح'), + (0xFD5A, 'M', u'حمي'), + (0xFD5B, 'M', u'حمى'), + (0xFD5C, 'M', u'سحج'), + (0xFD5D, 'M', u'سجح'), + (0xFD5E, 'M', u'سجى'), + (0xFD5F, 'M', u'سمح'), + (0xFD61, 'M', u'سمج'), + (0xFD62, 'M', u'سمم'), + (0xFD64, 'M', u'صحح'), + (0xFD66, 'M', u'صمم'), + (0xFD67, 'M', u'شحم'), + (0xFD69, 'M', u'شجي'), + ] + +def _seg_48(): + return [ + (0xFD6A, 'M', u'شمخ'), + (0xFD6C, 'M', u'شمم'), + (0xFD6E, 'M', u'ضحى'), + (0xFD6F, 'M', u'ضخم'), + (0xFD71, 'M', u'طمح'), + (0xFD73, 'M', u'طمم'), + (0xFD74, 'M', u'طمي'), + (0xFD75, 'M', u'عجم'), + (0xFD76, 'M', u'عمم'), + (0xFD78, 'M', u'عمى'), + (0xFD79, 'M', u'غمم'), + (0xFD7A, 'M', u'غمي'), + (0xFD7B, 'M', u'غمى'), + (0xFD7C, 'M', u'فخم'), + (0xFD7E, 'M', u'قمح'), + (0xFD7F, 'M', u'قمم'), + (0xFD80, 'M', u'لحم'), + (0xFD81, 'M', u'لحي'), + (0xFD82, 'M', u'لحى'), + (0xFD83, 'M', u'لجج'), + (0xFD85, 'M', u'لخم'), + (0xFD87, 'M', u'لمح'), + (0xFD89, 'M', u'محج'), + (0xFD8A, 'M', u'محم'), + (0xFD8B, 'M', u'محي'), + (0xFD8C, 'M', u'مجح'), + (0xFD8D, 'M', u'مجم'), + (0xFD8E, 'M', u'مخج'), + (0xFD8F, 'M', u'مخم'), + (0xFD90, 'X'), + (0xFD92, 'M', u'مجخ'), + (0xFD93, 'M', u'همج'), + (0xFD94, 'M', u'همم'), + (0xFD95, 'M', u'نحم'), + (0xFD96, 'M', u'نحى'), + (0xFD97, 'M', u'نجم'), + (0xFD99, 'M', u'نجى'), + (0xFD9A, 'M', u'نمي'), + (0xFD9B, 'M', u'نمى'), + (0xFD9C, 'M', u'يمم'), + (0xFD9E, 'M', u'بخي'), + (0xFD9F, 'M', u'تجي'), + (0xFDA0, 'M', u'تجى'), + (0xFDA1, 'M', u'تخي'), + (0xFDA2, 'M', u'تخى'), + (0xFDA3, 'M', u'تمي'), + (0xFDA4, 'M', u'تمى'), + (0xFDA5, 'M', u'جمي'), + (0xFDA6, 'M', u'جحى'), + (0xFDA7, 'M', u'جمى'), + (0xFDA8, 'M', u'سخى'), + (0xFDA9, 'M', u'صحي'), + (0xFDAA, 'M', u'شحي'), + (0xFDAB, 'M', u'ضحي'), + (0xFDAC, 'M', u'لجي'), + (0xFDAD, 'M', u'لمي'), + (0xFDAE, 'M', u'يحي'), + (0xFDAF, 'M', u'يجي'), + (0xFDB0, 'M', u'يمي'), + (0xFDB1, 'M', u'ممي'), + (0xFDB2, 'M', u'قمي'), + (0xFDB3, 'M', u'نحي'), + (0xFDB4, 'M', u'قمح'), + (0xFDB5, 'M', u'لحم'), + (0xFDB6, 'M', u'عمي'), + (0xFDB7, 'M', u'كمي'), + (0xFDB8, 'M', u'نجح'), + (0xFDB9, 'M', u'مخي'), + (0xFDBA, 'M', u'لجم'), + (0xFDBB, 'M', u'كمم'), + (0xFDBC, 'M', u'لجم'), + (0xFDBD, 'M', u'نجح'), + (0xFDBE, 'M', u'جحي'), + (0xFDBF, 'M', u'حجي'), + (0xFDC0, 'M', u'مجي'), + (0xFDC1, 'M', u'فمي'), + (0xFDC2, 'M', u'بحي'), + (0xFDC3, 'M', u'كمم'), + (0xFDC4, 'M', u'عجم'), + (0xFDC5, 'M', u'صمم'), + (0xFDC6, 'M', u'سخي'), + (0xFDC7, 'M', u'نجي'), + (0xFDC8, 'X'), + (0xFDF0, 'M', u'صلے'), + (0xFDF1, 'M', u'قلے'), + (0xFDF2, 'M', u'الله'), + (0xFDF3, 'M', u'اكبر'), + (0xFDF4, 'M', u'محمد'), + (0xFDF5, 'M', u'صلعم'), + (0xFDF6, 'M', u'رسول'), + (0xFDF7, 'M', u'عليه'), + (0xFDF8, 'M', u'وسلم'), + (0xFDF9, 'M', u'صلى'), + (0xFDFA, '3', u'صلى الله عليه وسلم'), + (0xFDFB, '3', u'جل جلاله'), + (0xFDFC, 'M', u'ریال'), + (0xFDFD, 'V'), + (0xFDFE, 'X'), + (0xFE00, 'I'), + (0xFE10, '3', u','), + ] + +def _seg_49(): + return [ + (0xFE11, 'M', u'、'), + (0xFE12, 'X'), + (0xFE13, '3', u':'), + (0xFE14, '3', u';'), + (0xFE15, '3', u'!'), + (0xFE16, '3', u'?'), + (0xFE17, 'M', u'〖'), + (0xFE18, 'M', u'〗'), + (0xFE19, 'X'), + (0xFE20, 'V'), + (0xFE30, 'X'), + (0xFE31, 'M', u'—'), + (0xFE32, 'M', u'–'), + (0xFE33, '3', u'_'), + (0xFE35, '3', u'('), + (0xFE36, '3', u')'), + (0xFE37, '3', u'{'), + (0xFE38, '3', u'}'), + (0xFE39, 'M', u'〔'), + (0xFE3A, 'M', u'〕'), + (0xFE3B, 'M', u'【'), + (0xFE3C, 'M', u'】'), + (0xFE3D, 'M', u'《'), + (0xFE3E, 'M', u'》'), + (0xFE3F, 'M', u'〈'), + (0xFE40, 'M', u'〉'), + (0xFE41, 'M', u'「'), + (0xFE42, 'M', u'」'), + (0xFE43, 'M', u'『'), + (0xFE44, 'M', u'』'), + (0xFE45, 'V'), + (0xFE47, '3', u'['), + (0xFE48, '3', u']'), + (0xFE49, '3', u' ̅'), + (0xFE4D, '3', u'_'), + (0xFE50, '3', u','), + (0xFE51, 'M', u'、'), + (0xFE52, 'X'), + (0xFE54, '3', u';'), + (0xFE55, '3', u':'), + (0xFE56, '3', u'?'), + (0xFE57, '3', u'!'), + (0xFE58, 'M', u'—'), + (0xFE59, '3', u'('), + (0xFE5A, '3', u')'), + (0xFE5B, '3', u'{'), + (0xFE5C, '3', u'}'), + (0xFE5D, 'M', u'〔'), + (0xFE5E, 'M', u'〕'), + (0xFE5F, '3', u'#'), + (0xFE60, '3', u'&'), + (0xFE61, '3', u'*'), + (0xFE62, '3', u'+'), + (0xFE63, 'M', u'-'), + (0xFE64, '3', u'<'), + (0xFE65, '3', u'>'), + (0xFE66, '3', u'='), + (0xFE67, 'X'), + (0xFE68, '3', u'\\'), + (0xFE69, '3', u'$'), + (0xFE6A, '3', u'%'), + (0xFE6B, '3', u'@'), + (0xFE6C, 'X'), + (0xFE70, '3', u' ً'), + (0xFE71, 'M', u'ـً'), + (0xFE72, '3', u' ٌ'), + (0xFE73, 'V'), + (0xFE74, '3', u' ٍ'), + (0xFE75, 'X'), + (0xFE76, '3', u' َ'), + (0xFE77, 'M', u'ـَ'), + (0xFE78, '3', u' ُ'), + (0xFE79, 'M', u'ـُ'), + (0xFE7A, '3', u' ِ'), + (0xFE7B, 'M', u'ـِ'), + (0xFE7C, '3', u' ّ'), + (0xFE7D, 'M', u'ـّ'), + (0xFE7E, '3', u' ْ'), + (0xFE7F, 'M', u'ـْ'), + (0xFE80, 'M', u'ء'), + (0xFE81, 'M', u'آ'), + (0xFE83, 'M', u'أ'), + (0xFE85, 'M', u'ؤ'), + (0xFE87, 'M', u'إ'), + (0xFE89, 'M', u'ئ'), + (0xFE8D, 'M', u'ا'), + (0xFE8F, 'M', u'ب'), + (0xFE93, 'M', u'ة'), + (0xFE95, 'M', u'ت'), + (0xFE99, 'M', u'ث'), + (0xFE9D, 'M', u'ج'), + (0xFEA1, 'M', u'ح'), + (0xFEA5, 'M', u'خ'), + (0xFEA9, 'M', u'د'), + (0xFEAB, 'M', u'ذ'), + (0xFEAD, 'M', u'ر'), + (0xFEAF, 'M', u'ز'), + (0xFEB1, 'M', u'س'), + (0xFEB5, 'M', u'ش'), + (0xFEB9, 'M', u'ص'), + ] + +def _seg_50(): + return [ + (0xFEBD, 'M', u'ض'), + (0xFEC1, 'M', u'ط'), + (0xFEC5, 'M', u'ظ'), + (0xFEC9, 'M', u'ع'), + (0xFECD, 'M', u'غ'), + (0xFED1, 'M', u'ف'), + (0xFED5, 'M', u'ق'), + (0xFED9, 'M', u'ك'), + (0xFEDD, 'M', u'ل'), + (0xFEE1, 'M', u'م'), + (0xFEE5, 'M', u'ن'), + (0xFEE9, 'M', u'ه'), + (0xFEED, 'M', u'و'), + (0xFEEF, 'M', u'ى'), + (0xFEF1, 'M', u'ي'), + (0xFEF5, 'M', u'لآ'), + (0xFEF7, 'M', u'لأ'), + (0xFEF9, 'M', u'لإ'), + (0xFEFB, 'M', u'لا'), + (0xFEFD, 'X'), + (0xFEFF, 'I'), + (0xFF00, 'X'), + (0xFF01, '3', u'!'), + (0xFF02, '3', u'"'), + (0xFF03, '3', u'#'), + (0xFF04, '3', u'$'), + (0xFF05, '3', u'%'), + (0xFF06, '3', u'&'), + (0xFF07, '3', u'\''), + (0xFF08, '3', u'('), + (0xFF09, '3', u')'), + (0xFF0A, '3', u'*'), + (0xFF0B, '3', u'+'), + (0xFF0C, '3', u','), + (0xFF0D, 'M', u'-'), + (0xFF0E, 'M', u'.'), + (0xFF0F, '3', u'/'), + (0xFF10, 'M', u'0'), + (0xFF11, 'M', u'1'), + (0xFF12, 'M', u'2'), + (0xFF13, 'M', u'3'), + (0xFF14, 'M', u'4'), + (0xFF15, 'M', u'5'), + (0xFF16, 'M', u'6'), + (0xFF17, 'M', u'7'), + (0xFF18, 'M', u'8'), + (0xFF19, 'M', u'9'), + (0xFF1A, '3', u':'), + (0xFF1B, '3', u';'), + (0xFF1C, '3', u'<'), + (0xFF1D, '3', u'='), + (0xFF1E, '3', u'>'), + (0xFF1F, '3', u'?'), + (0xFF20, '3', u'@'), + (0xFF21, 'M', u'a'), + (0xFF22, 'M', u'b'), + (0xFF23, 'M', u'c'), + (0xFF24, 'M', u'd'), + (0xFF25, 'M', u'e'), + (0xFF26, 'M', u'f'), + (0xFF27, 'M', u'g'), + (0xFF28, 'M', u'h'), + (0xFF29, 'M', u'i'), + (0xFF2A, 'M', u'j'), + (0xFF2B, 'M', u'k'), + (0xFF2C, 'M', u'l'), + (0xFF2D, 'M', u'm'), + (0xFF2E, 'M', u'n'), + (0xFF2F, 'M', u'o'), + (0xFF30, 'M', u'p'), + (0xFF31, 'M', u'q'), + (0xFF32, 'M', u'r'), + (0xFF33, 'M', u's'), + (0xFF34, 'M', u't'), + (0xFF35, 'M', u'u'), + (0xFF36, 'M', u'v'), + (0xFF37, 'M', u'w'), + (0xFF38, 'M', u'x'), + (0xFF39, 'M', u'y'), + (0xFF3A, 'M', u'z'), + (0xFF3B, '3', u'['), + (0xFF3C, '3', u'\\'), + (0xFF3D, '3', u']'), + (0xFF3E, '3', u'^'), + (0xFF3F, '3', u'_'), + (0xFF40, '3', u'`'), + (0xFF41, 'M', u'a'), + (0xFF42, 'M', u'b'), + (0xFF43, 'M', u'c'), + (0xFF44, 'M', u'd'), + (0xFF45, 'M', u'e'), + (0xFF46, 'M', u'f'), + (0xFF47, 'M', u'g'), + (0xFF48, 'M', u'h'), + (0xFF49, 'M', u'i'), + (0xFF4A, 'M', u'j'), + (0xFF4B, 'M', u'k'), + (0xFF4C, 'M', u'l'), + (0xFF4D, 'M', u'm'), + (0xFF4E, 'M', u'n'), + ] + +def _seg_51(): + return [ + (0xFF4F, 'M', u'o'), + (0xFF50, 'M', u'p'), + (0xFF51, 'M', u'q'), + (0xFF52, 'M', u'r'), + (0xFF53, 'M', u's'), + (0xFF54, 'M', u't'), + (0xFF55, 'M', u'u'), + (0xFF56, 'M', u'v'), + (0xFF57, 'M', u'w'), + (0xFF58, 'M', u'x'), + (0xFF59, 'M', u'y'), + (0xFF5A, 'M', u'z'), + (0xFF5B, '3', u'{'), + (0xFF5C, '3', u'|'), + (0xFF5D, '3', u'}'), + (0xFF5E, '3', u'~'), + (0xFF5F, 'M', u'⦅'), + (0xFF60, 'M', u'⦆'), + (0xFF61, 'M', u'.'), + (0xFF62, 'M', u'「'), + (0xFF63, 'M', u'」'), + (0xFF64, 'M', u'、'), + (0xFF65, 'M', u'・'), + (0xFF66, 'M', u'ヲ'), + (0xFF67, 'M', u'ァ'), + (0xFF68, 'M', u'ィ'), + (0xFF69, 'M', u'ゥ'), + (0xFF6A, 'M', u'ェ'), + (0xFF6B, 'M', u'ォ'), + (0xFF6C, 'M', u'ャ'), + (0xFF6D, 'M', u'ュ'), + (0xFF6E, 'M', u'ョ'), + (0xFF6F, 'M', u'ッ'), + (0xFF70, 'M', u'ー'), + (0xFF71, 'M', u'ア'), + (0xFF72, 'M', u'イ'), + (0xFF73, 'M', u'ウ'), + (0xFF74, 'M', u'エ'), + (0xFF75, 'M', u'オ'), + (0xFF76, 'M', u'カ'), + (0xFF77, 'M', u'キ'), + (0xFF78, 'M', u'ク'), + (0xFF79, 'M', u'ケ'), + (0xFF7A, 'M', u'コ'), + (0xFF7B, 'M', u'サ'), + (0xFF7C, 'M', u'シ'), + (0xFF7D, 'M', u'ス'), + (0xFF7E, 'M', u'セ'), + (0xFF7F, 'M', u'ソ'), + (0xFF80, 'M', u'タ'), + (0xFF81, 'M', u'チ'), + (0xFF82, 'M', u'ツ'), + (0xFF83, 'M', u'テ'), + (0xFF84, 'M', u'ト'), + (0xFF85, 'M', u'ナ'), + (0xFF86, 'M', u'ニ'), + (0xFF87, 'M', u'ヌ'), + (0xFF88, 'M', u'ネ'), + (0xFF89, 'M', u'ノ'), + (0xFF8A, 'M', u'ハ'), + (0xFF8B, 'M', u'ヒ'), + (0xFF8C, 'M', u'フ'), + (0xFF8D, 'M', u'ヘ'), + (0xFF8E, 'M', u'ホ'), + (0xFF8F, 'M', u'マ'), + (0xFF90, 'M', u'ミ'), + (0xFF91, 'M', u'ム'), + (0xFF92, 'M', u'メ'), + (0xFF93, 'M', u'モ'), + (0xFF94, 'M', u'ヤ'), + (0xFF95, 'M', u'ユ'), + (0xFF96, 'M', u'ヨ'), + (0xFF97, 'M', u'ラ'), + (0xFF98, 'M', u'リ'), + (0xFF99, 'M', u'ル'), + (0xFF9A, 'M', u'レ'), + (0xFF9B, 'M', u'ロ'), + (0xFF9C, 'M', u'ワ'), + (0xFF9D, 'M', u'ン'), + (0xFF9E, 'M', u'゙'), + (0xFF9F, 'M', u'゚'), + (0xFFA0, 'X'), + (0xFFA1, 'M', u'ᄀ'), + (0xFFA2, 'M', u'ᄁ'), + (0xFFA3, 'M', u'ᆪ'), + (0xFFA4, 'M', u'ᄂ'), + (0xFFA5, 'M', u'ᆬ'), + (0xFFA6, 'M', u'ᆭ'), + (0xFFA7, 'M', u'ᄃ'), + (0xFFA8, 'M', u'ᄄ'), + (0xFFA9, 'M', u'ᄅ'), + (0xFFAA, 'M', u'ᆰ'), + (0xFFAB, 'M', u'ᆱ'), + (0xFFAC, 'M', u'ᆲ'), + (0xFFAD, 'M', u'ᆳ'), + (0xFFAE, 'M', u'ᆴ'), + (0xFFAF, 'M', u'ᆵ'), + (0xFFB0, 'M', u'ᄚ'), + (0xFFB1, 'M', u'ᄆ'), + (0xFFB2, 'M', u'ᄇ'), + ] + +def _seg_52(): + return [ + (0xFFB3, 'M', u'ᄈ'), + (0xFFB4, 'M', u'ᄡ'), + (0xFFB5, 'M', u'ᄉ'), + (0xFFB6, 'M', u'ᄊ'), + (0xFFB7, 'M', u'ᄋ'), + (0xFFB8, 'M', u'ᄌ'), + (0xFFB9, 'M', u'ᄍ'), + (0xFFBA, 'M', u'ᄎ'), + (0xFFBB, 'M', u'ᄏ'), + (0xFFBC, 'M', u'ᄐ'), + (0xFFBD, 'M', u'ᄑ'), + (0xFFBE, 'M', u'ᄒ'), + (0xFFBF, 'X'), + (0xFFC2, 'M', u'ᅡ'), + (0xFFC3, 'M', u'ᅢ'), + (0xFFC4, 'M', u'ᅣ'), + (0xFFC5, 'M', u'ᅤ'), + (0xFFC6, 'M', u'ᅥ'), + (0xFFC7, 'M', u'ᅦ'), + (0xFFC8, 'X'), + (0xFFCA, 'M', u'ᅧ'), + (0xFFCB, 'M', u'ᅨ'), + (0xFFCC, 'M', u'ᅩ'), + (0xFFCD, 'M', u'ᅪ'), + (0xFFCE, 'M', u'ᅫ'), + (0xFFCF, 'M', u'ᅬ'), + (0xFFD0, 'X'), + (0xFFD2, 'M', u'ᅭ'), + (0xFFD3, 'M', u'ᅮ'), + (0xFFD4, 'M', u'ᅯ'), + (0xFFD5, 'M', u'ᅰ'), + (0xFFD6, 'M', u'ᅱ'), + (0xFFD7, 'M', u'ᅲ'), + (0xFFD8, 'X'), + (0xFFDA, 'M', u'ᅳ'), + (0xFFDB, 'M', u'ᅴ'), + (0xFFDC, 'M', u'ᅵ'), + (0xFFDD, 'X'), + (0xFFE0, 'M', u'¢'), + (0xFFE1, 'M', u'£'), + (0xFFE2, 'M', u'¬'), + (0xFFE3, '3', u' ̄'), + (0xFFE4, 'M', u'¦'), + (0xFFE5, 'M', u'¥'), + (0xFFE6, 'M', u'₩'), + (0xFFE7, 'X'), + (0xFFE8, 'M', u'│'), + (0xFFE9, 'M', u'←'), + (0xFFEA, 'M', u'↑'), + (0xFFEB, 'M', u'→'), + (0xFFEC, 'M', u'↓'), + (0xFFED, 'M', u'■'), + (0xFFEE, 'M', u'○'), + (0xFFEF, 'X'), + (0x10000, 'V'), + (0x1000C, 'X'), + (0x1000D, 'V'), + (0x10027, 'X'), + (0x10028, 'V'), + (0x1003B, 'X'), + (0x1003C, 'V'), + (0x1003E, 'X'), + (0x1003F, 'V'), + (0x1004E, 'X'), + (0x10050, 'V'), + (0x1005E, 'X'), + (0x10080, 'V'), + (0x100FB, 'X'), + (0x10100, 'V'), + (0x10103, 'X'), + (0x10107, 'V'), + (0x10134, 'X'), + (0x10137, 'V'), + (0x1018F, 'X'), + (0x10190, 'V'), + (0x1019C, 'X'), + (0x101A0, 'V'), + (0x101A1, 'X'), + (0x101D0, 'V'), + (0x101FE, 'X'), + (0x10280, 'V'), + (0x1029D, 'X'), + (0x102A0, 'V'), + (0x102D1, 'X'), + (0x102E0, 'V'), + (0x102FC, 'X'), + (0x10300, 'V'), + (0x10324, 'X'), + (0x1032D, 'V'), + (0x1034B, 'X'), + (0x10350, 'V'), + (0x1037B, 'X'), + (0x10380, 'V'), + (0x1039E, 'X'), + (0x1039F, 'V'), + (0x103C4, 'X'), + (0x103C8, 'V'), + (0x103D6, 'X'), + (0x10400, 'M', u'𐐨'), + (0x10401, 'M', u'𐐩'), + ] + +def _seg_53(): + return [ + (0x10402, 'M', u'𐐪'), + (0x10403, 'M', u'𐐫'), + (0x10404, 'M', u'𐐬'), + (0x10405, 'M', u'𐐭'), + (0x10406, 'M', u'𐐮'), + (0x10407, 'M', u'𐐯'), + (0x10408, 'M', u'𐐰'), + (0x10409, 'M', u'𐐱'), + (0x1040A, 'M', u'𐐲'), + (0x1040B, 'M', u'𐐳'), + (0x1040C, 'M', u'𐐴'), + (0x1040D, 'M', u'𐐵'), + (0x1040E, 'M', u'𐐶'), + (0x1040F, 'M', u'𐐷'), + (0x10410, 'M', u'𐐸'), + (0x10411, 'M', u'𐐹'), + (0x10412, 'M', u'𐐺'), + (0x10413, 'M', u'𐐻'), + (0x10414, 'M', u'𐐼'), + (0x10415, 'M', u'𐐽'), + (0x10416, 'M', u'𐐾'), + (0x10417, 'M', u'𐐿'), + (0x10418, 'M', u'𐑀'), + (0x10419, 'M', u'𐑁'), + (0x1041A, 'M', u'𐑂'), + (0x1041B, 'M', u'𐑃'), + (0x1041C, 'M', u'𐑄'), + (0x1041D, 'M', u'𐑅'), + (0x1041E, 'M', u'𐑆'), + (0x1041F, 'M', u'𐑇'), + (0x10420, 'M', u'𐑈'), + (0x10421, 'M', u'𐑉'), + (0x10422, 'M', u'𐑊'), + (0x10423, 'M', u'𐑋'), + (0x10424, 'M', u'𐑌'), + (0x10425, 'M', u'𐑍'), + (0x10426, 'M', u'𐑎'), + (0x10427, 'M', u'𐑏'), + (0x10428, 'V'), + (0x1049E, 'X'), + (0x104A0, 'V'), + (0x104AA, 'X'), + (0x104B0, 'M', u'𐓘'), + (0x104B1, 'M', u'𐓙'), + (0x104B2, 'M', u'𐓚'), + (0x104B3, 'M', u'𐓛'), + (0x104B4, 'M', u'𐓜'), + (0x104B5, 'M', u'𐓝'), + (0x104B6, 'M', u'𐓞'), + (0x104B7, 'M', u'𐓟'), + (0x104B8, 'M', u'𐓠'), + (0x104B9, 'M', u'𐓡'), + (0x104BA, 'M', u'𐓢'), + (0x104BB, 'M', u'𐓣'), + (0x104BC, 'M', u'𐓤'), + (0x104BD, 'M', u'𐓥'), + (0x104BE, 'M', u'𐓦'), + (0x104BF, 'M', u'𐓧'), + (0x104C0, 'M', u'𐓨'), + (0x104C1, 'M', u'𐓩'), + (0x104C2, 'M', u'𐓪'), + (0x104C3, 'M', u'𐓫'), + (0x104C4, 'M', u'𐓬'), + (0x104C5, 'M', u'𐓭'), + (0x104C6, 'M', u'𐓮'), + (0x104C7, 'M', u'𐓯'), + (0x104C8, 'M', u'𐓰'), + (0x104C9, 'M', u'𐓱'), + (0x104CA, 'M', u'𐓲'), + (0x104CB, 'M', u'𐓳'), + (0x104CC, 'M', u'𐓴'), + (0x104CD, 'M', u'𐓵'), + (0x104CE, 'M', u'𐓶'), + (0x104CF, 'M', u'𐓷'), + (0x104D0, 'M', u'𐓸'), + (0x104D1, 'M', u'𐓹'), + (0x104D2, 'M', u'𐓺'), + (0x104D3, 'M', u'𐓻'), + (0x104D4, 'X'), + (0x104D8, 'V'), + (0x104FC, 'X'), + (0x10500, 'V'), + (0x10528, 'X'), + (0x10530, 'V'), + (0x10564, 'X'), + (0x1056F, 'V'), + (0x10570, 'X'), + (0x10600, 'V'), + (0x10737, 'X'), + (0x10740, 'V'), + (0x10756, 'X'), + (0x10760, 'V'), + (0x10768, 'X'), + (0x10800, 'V'), + (0x10806, 'X'), + (0x10808, 'V'), + (0x10809, 'X'), + (0x1080A, 'V'), + (0x10836, 'X'), + (0x10837, 'V'), + ] + +def _seg_54(): + return [ + (0x10839, 'X'), + (0x1083C, 'V'), + (0x1083D, 'X'), + (0x1083F, 'V'), + (0x10856, 'X'), + (0x10857, 'V'), + (0x1089F, 'X'), + (0x108A7, 'V'), + (0x108B0, 'X'), + (0x108E0, 'V'), + (0x108F3, 'X'), + (0x108F4, 'V'), + (0x108F6, 'X'), + (0x108FB, 'V'), + (0x1091C, 'X'), + (0x1091F, 'V'), + (0x1093A, 'X'), + (0x1093F, 'V'), + (0x10940, 'X'), + (0x10980, 'V'), + (0x109B8, 'X'), + (0x109BC, 'V'), + (0x109D0, 'X'), + (0x109D2, 'V'), + (0x10A04, 'X'), + (0x10A05, 'V'), + (0x10A07, 'X'), + (0x10A0C, 'V'), + (0x10A14, 'X'), + (0x10A15, 'V'), + (0x10A18, 'X'), + (0x10A19, 'V'), + (0x10A36, 'X'), + (0x10A38, 'V'), + (0x10A3B, 'X'), + (0x10A3F, 'V'), + (0x10A49, 'X'), + (0x10A50, 'V'), + (0x10A59, 'X'), + (0x10A60, 'V'), + (0x10AA0, 'X'), + (0x10AC0, 'V'), + (0x10AE7, 'X'), + (0x10AEB, 'V'), + (0x10AF7, 'X'), + (0x10B00, 'V'), + (0x10B36, 'X'), + (0x10B39, 'V'), + (0x10B56, 'X'), + (0x10B58, 'V'), + (0x10B73, 'X'), + (0x10B78, 'V'), + (0x10B92, 'X'), + (0x10B99, 'V'), + (0x10B9D, 'X'), + (0x10BA9, 'V'), + (0x10BB0, 'X'), + (0x10C00, 'V'), + (0x10C49, 'X'), + (0x10C80, 'M', u'𐳀'), + (0x10C81, 'M', u'𐳁'), + (0x10C82, 'M', u'𐳂'), + (0x10C83, 'M', u'𐳃'), + (0x10C84, 'M', u'𐳄'), + (0x10C85, 'M', u'𐳅'), + (0x10C86, 'M', u'𐳆'), + (0x10C87, 'M', u'𐳇'), + (0x10C88, 'M', u'𐳈'), + (0x10C89, 'M', u'𐳉'), + (0x10C8A, 'M', u'𐳊'), + (0x10C8B, 'M', u'𐳋'), + (0x10C8C, 'M', u'𐳌'), + (0x10C8D, 'M', u'𐳍'), + (0x10C8E, 'M', u'𐳎'), + (0x10C8F, 'M', u'𐳏'), + (0x10C90, 'M', u'𐳐'), + (0x10C91, 'M', u'𐳑'), + (0x10C92, 'M', u'𐳒'), + (0x10C93, 'M', u'𐳓'), + (0x10C94, 'M', u'𐳔'), + (0x10C95, 'M', u'𐳕'), + (0x10C96, 'M', u'𐳖'), + (0x10C97, 'M', u'𐳗'), + (0x10C98, 'M', u'𐳘'), + (0x10C99, 'M', u'𐳙'), + (0x10C9A, 'M', u'𐳚'), + (0x10C9B, 'M', u'𐳛'), + (0x10C9C, 'M', u'𐳜'), + (0x10C9D, 'M', u'𐳝'), + (0x10C9E, 'M', u'𐳞'), + (0x10C9F, 'M', u'𐳟'), + (0x10CA0, 'M', u'𐳠'), + (0x10CA1, 'M', u'𐳡'), + (0x10CA2, 'M', u'𐳢'), + (0x10CA3, 'M', u'𐳣'), + (0x10CA4, 'M', u'𐳤'), + (0x10CA5, 'M', u'𐳥'), + (0x10CA6, 'M', u'𐳦'), + (0x10CA7, 'M', u'𐳧'), + (0x10CA8, 'M', u'𐳨'), + ] + +def _seg_55(): + return [ + (0x10CA9, 'M', u'𐳩'), + (0x10CAA, 'M', u'𐳪'), + (0x10CAB, 'M', u'𐳫'), + (0x10CAC, 'M', u'𐳬'), + (0x10CAD, 'M', u'𐳭'), + (0x10CAE, 'M', u'𐳮'), + (0x10CAF, 'M', u'𐳯'), + (0x10CB0, 'M', u'𐳰'), + (0x10CB1, 'M', u'𐳱'), + (0x10CB2, 'M', u'𐳲'), + (0x10CB3, 'X'), + (0x10CC0, 'V'), + (0x10CF3, 'X'), + (0x10CFA, 'V'), + (0x10D28, 'X'), + (0x10D30, 'V'), + (0x10D3A, 'X'), + (0x10E60, 'V'), + (0x10E7F, 'X'), + (0x10F00, 'V'), + (0x10F28, 'X'), + (0x10F30, 'V'), + (0x10F5A, 'X'), + (0x11000, 'V'), + (0x1104E, 'X'), + (0x11052, 'V'), + (0x11070, 'X'), + (0x1107F, 'V'), + (0x110BD, 'X'), + (0x110BE, 'V'), + (0x110C2, 'X'), + (0x110D0, 'V'), + (0x110E9, 'X'), + (0x110F0, 'V'), + (0x110FA, 'X'), + (0x11100, 'V'), + (0x11135, 'X'), + (0x11136, 'V'), + (0x11147, 'X'), + (0x11150, 'V'), + (0x11177, 'X'), + (0x11180, 'V'), + (0x111CE, 'X'), + (0x111D0, 'V'), + (0x111E0, 'X'), + (0x111E1, 'V'), + (0x111F5, 'X'), + (0x11200, 'V'), + (0x11212, 'X'), + (0x11213, 'V'), + (0x1123F, 'X'), + (0x11280, 'V'), + (0x11287, 'X'), + (0x11288, 'V'), + (0x11289, 'X'), + (0x1128A, 'V'), + (0x1128E, 'X'), + (0x1128F, 'V'), + (0x1129E, 'X'), + (0x1129F, 'V'), + (0x112AA, 'X'), + (0x112B0, 'V'), + (0x112EB, 'X'), + (0x112F0, 'V'), + (0x112FA, 'X'), + (0x11300, 'V'), + (0x11304, 'X'), + (0x11305, 'V'), + (0x1130D, 'X'), + (0x1130F, 'V'), + (0x11311, 'X'), + (0x11313, 'V'), + (0x11329, 'X'), + (0x1132A, 'V'), + (0x11331, 'X'), + (0x11332, 'V'), + (0x11334, 'X'), + (0x11335, 'V'), + (0x1133A, 'X'), + (0x1133B, 'V'), + (0x11345, 'X'), + (0x11347, 'V'), + (0x11349, 'X'), + (0x1134B, 'V'), + (0x1134E, 'X'), + (0x11350, 'V'), + (0x11351, 'X'), + (0x11357, 'V'), + (0x11358, 'X'), + (0x1135D, 'V'), + (0x11364, 'X'), + (0x11366, 'V'), + (0x1136D, 'X'), + (0x11370, 'V'), + (0x11375, 'X'), + (0x11400, 'V'), + (0x1145A, 'X'), + (0x1145B, 'V'), + (0x1145C, 'X'), + (0x1145D, 'V'), + ] + +def _seg_56(): + return [ + (0x1145F, 'X'), + (0x11480, 'V'), + (0x114C8, 'X'), + (0x114D0, 'V'), + (0x114DA, 'X'), + (0x11580, 'V'), + (0x115B6, 'X'), + (0x115B8, 'V'), + (0x115DE, 'X'), + (0x11600, 'V'), + (0x11645, 'X'), + (0x11650, 'V'), + (0x1165A, 'X'), + (0x11660, 'V'), + (0x1166D, 'X'), + (0x11680, 'V'), + (0x116B8, 'X'), + (0x116C0, 'V'), + (0x116CA, 'X'), + (0x11700, 'V'), + (0x1171B, 'X'), + (0x1171D, 'V'), + (0x1172C, 'X'), + (0x11730, 'V'), + (0x11740, 'X'), + (0x11800, 'V'), + (0x1183C, 'X'), + (0x118A0, 'M', u'𑣀'), + (0x118A1, 'M', u'𑣁'), + (0x118A2, 'M', u'𑣂'), + (0x118A3, 'M', u'𑣃'), + (0x118A4, 'M', u'𑣄'), + (0x118A5, 'M', u'𑣅'), + (0x118A6, 'M', u'𑣆'), + (0x118A7, 'M', u'𑣇'), + (0x118A8, 'M', u'𑣈'), + (0x118A9, 'M', u'𑣉'), + (0x118AA, 'M', u'𑣊'), + (0x118AB, 'M', u'𑣋'), + (0x118AC, 'M', u'𑣌'), + (0x118AD, 'M', u'𑣍'), + (0x118AE, 'M', u'𑣎'), + (0x118AF, 'M', u'𑣏'), + (0x118B0, 'M', u'𑣐'), + (0x118B1, 'M', u'𑣑'), + (0x118B2, 'M', u'𑣒'), + (0x118B3, 'M', u'𑣓'), + (0x118B4, 'M', u'𑣔'), + (0x118B5, 'M', u'𑣕'), + (0x118B6, 'M', u'𑣖'), + (0x118B7, 'M', u'𑣗'), + (0x118B8, 'M', u'𑣘'), + (0x118B9, 'M', u'𑣙'), + (0x118BA, 'M', u'𑣚'), + (0x118BB, 'M', u'𑣛'), + (0x118BC, 'M', u'𑣜'), + (0x118BD, 'M', u'𑣝'), + (0x118BE, 'M', u'𑣞'), + (0x118BF, 'M', u'𑣟'), + (0x118C0, 'V'), + (0x118F3, 'X'), + (0x118FF, 'V'), + (0x11900, 'X'), + (0x11A00, 'V'), + (0x11A48, 'X'), + (0x11A50, 'V'), + (0x11A84, 'X'), + (0x11A86, 'V'), + (0x11AA3, 'X'), + (0x11AC0, 'V'), + (0x11AF9, 'X'), + (0x11C00, 'V'), + (0x11C09, 'X'), + (0x11C0A, 'V'), + (0x11C37, 'X'), + (0x11C38, 'V'), + (0x11C46, 'X'), + (0x11C50, 'V'), + (0x11C6D, 'X'), + (0x11C70, 'V'), + (0x11C90, 'X'), + (0x11C92, 'V'), + (0x11CA8, 'X'), + (0x11CA9, 'V'), + (0x11CB7, 'X'), + (0x11D00, 'V'), + (0x11D07, 'X'), + (0x11D08, 'V'), + (0x11D0A, 'X'), + (0x11D0B, 'V'), + (0x11D37, 'X'), + (0x11D3A, 'V'), + (0x11D3B, 'X'), + (0x11D3C, 'V'), + (0x11D3E, 'X'), + (0x11D3F, 'V'), + (0x11D48, 'X'), + (0x11D50, 'V'), + (0x11D5A, 'X'), + (0x11D60, 'V'), + ] + +def _seg_57(): + return [ + (0x11D66, 'X'), + (0x11D67, 'V'), + (0x11D69, 'X'), + (0x11D6A, 'V'), + (0x11D8F, 'X'), + (0x11D90, 'V'), + (0x11D92, 'X'), + (0x11D93, 'V'), + (0x11D99, 'X'), + (0x11DA0, 'V'), + (0x11DAA, 'X'), + (0x11EE0, 'V'), + (0x11EF9, 'X'), + (0x12000, 'V'), + (0x1239A, 'X'), + (0x12400, 'V'), + (0x1246F, 'X'), + (0x12470, 'V'), + (0x12475, 'X'), + (0x12480, 'V'), + (0x12544, 'X'), + (0x13000, 'V'), + (0x1342F, 'X'), + (0x14400, 'V'), + (0x14647, 'X'), + (0x16800, 'V'), + (0x16A39, 'X'), + (0x16A40, 'V'), + (0x16A5F, 'X'), + (0x16A60, 'V'), + (0x16A6A, 'X'), + (0x16A6E, 'V'), + (0x16A70, 'X'), + (0x16AD0, 'V'), + (0x16AEE, 'X'), + (0x16AF0, 'V'), + (0x16AF6, 'X'), + (0x16B00, 'V'), + (0x16B46, 'X'), + (0x16B50, 'V'), + (0x16B5A, 'X'), + (0x16B5B, 'V'), + (0x16B62, 'X'), + (0x16B63, 'V'), + (0x16B78, 'X'), + (0x16B7D, 'V'), + (0x16B90, 'X'), + (0x16E60, 'V'), + (0x16E9B, 'X'), + (0x16F00, 'V'), + (0x16F45, 'X'), + (0x16F50, 'V'), + (0x16F7F, 'X'), + (0x16F8F, 'V'), + (0x16FA0, 'X'), + (0x16FE0, 'V'), + (0x16FE2, 'X'), + (0x17000, 'V'), + (0x187F2, 'X'), + (0x18800, 'V'), + (0x18AF3, 'X'), + (0x1B000, 'V'), + (0x1B11F, 'X'), + (0x1B170, 'V'), + (0x1B2FC, 'X'), + (0x1BC00, 'V'), + (0x1BC6B, 'X'), + (0x1BC70, 'V'), + (0x1BC7D, 'X'), + (0x1BC80, 'V'), + (0x1BC89, 'X'), + (0x1BC90, 'V'), + (0x1BC9A, 'X'), + (0x1BC9C, 'V'), + (0x1BCA0, 'I'), + (0x1BCA4, 'X'), + (0x1D000, 'V'), + (0x1D0F6, 'X'), + (0x1D100, 'V'), + (0x1D127, 'X'), + (0x1D129, 'V'), + (0x1D15E, 'M', u'𝅗𝅥'), + (0x1D15F, 'M', u'𝅘𝅥'), + (0x1D160, 'M', u'𝅘𝅥𝅮'), + (0x1D161, 'M', u'𝅘𝅥𝅯'), + (0x1D162, 'M', u'𝅘𝅥𝅰'), + (0x1D163, 'M', u'𝅘𝅥𝅱'), + (0x1D164, 'M', u'𝅘𝅥𝅲'), + (0x1D165, 'V'), + (0x1D173, 'X'), + (0x1D17B, 'V'), + (0x1D1BB, 'M', u'𝆹𝅥'), + (0x1D1BC, 'M', u'𝆺𝅥'), + (0x1D1BD, 'M', u'𝆹𝅥𝅮'), + (0x1D1BE, 'M', u'𝆺𝅥𝅮'), + (0x1D1BF, 'M', u'𝆹𝅥𝅯'), + (0x1D1C0, 'M', u'𝆺𝅥𝅯'), + (0x1D1C1, 'V'), + (0x1D1E9, 'X'), + (0x1D200, 'V'), + ] + +def _seg_58(): + return [ + (0x1D246, 'X'), + (0x1D2E0, 'V'), + (0x1D2F4, 'X'), + (0x1D300, 'V'), + (0x1D357, 'X'), + (0x1D360, 'V'), + (0x1D379, 'X'), + (0x1D400, 'M', u'a'), + (0x1D401, 'M', u'b'), + (0x1D402, 'M', u'c'), + (0x1D403, 'M', u'd'), + (0x1D404, 'M', u'e'), + (0x1D405, 'M', u'f'), + (0x1D406, 'M', u'g'), + (0x1D407, 'M', u'h'), + (0x1D408, 'M', u'i'), + (0x1D409, 'M', u'j'), + (0x1D40A, 'M', u'k'), + (0x1D40B, 'M', u'l'), + (0x1D40C, 'M', u'm'), + (0x1D40D, 'M', u'n'), + (0x1D40E, 'M', u'o'), + (0x1D40F, 'M', u'p'), + (0x1D410, 'M', u'q'), + (0x1D411, 'M', u'r'), + (0x1D412, 'M', u's'), + (0x1D413, 'M', u't'), + (0x1D414, 'M', u'u'), + (0x1D415, 'M', u'v'), + (0x1D416, 'M', u'w'), + (0x1D417, 'M', u'x'), + (0x1D418, 'M', u'y'), + (0x1D419, 'M', u'z'), + (0x1D41A, 'M', u'a'), + (0x1D41B, 'M', u'b'), + (0x1D41C, 'M', u'c'), + (0x1D41D, 'M', u'd'), + (0x1D41E, 'M', u'e'), + (0x1D41F, 'M', u'f'), + (0x1D420, 'M', u'g'), + (0x1D421, 'M', u'h'), + (0x1D422, 'M', u'i'), + (0x1D423, 'M', u'j'), + (0x1D424, 'M', u'k'), + (0x1D425, 'M', u'l'), + (0x1D426, 'M', u'm'), + (0x1D427, 'M', u'n'), + (0x1D428, 'M', u'o'), + (0x1D429, 'M', u'p'), + (0x1D42A, 'M', u'q'), + (0x1D42B, 'M', u'r'), + (0x1D42C, 'M', u's'), + (0x1D42D, 'M', u't'), + (0x1D42E, 'M', u'u'), + (0x1D42F, 'M', u'v'), + (0x1D430, 'M', u'w'), + (0x1D431, 'M', u'x'), + (0x1D432, 'M', u'y'), + (0x1D433, 'M', u'z'), + (0x1D434, 'M', u'a'), + (0x1D435, 'M', u'b'), + (0x1D436, 'M', u'c'), + (0x1D437, 'M', u'd'), + (0x1D438, 'M', u'e'), + (0x1D439, 'M', u'f'), + (0x1D43A, 'M', u'g'), + (0x1D43B, 'M', u'h'), + (0x1D43C, 'M', u'i'), + (0x1D43D, 'M', u'j'), + (0x1D43E, 'M', u'k'), + (0x1D43F, 'M', u'l'), + (0x1D440, 'M', u'm'), + (0x1D441, 'M', u'n'), + (0x1D442, 'M', u'o'), + (0x1D443, 'M', u'p'), + (0x1D444, 'M', u'q'), + (0x1D445, 'M', u'r'), + (0x1D446, 'M', u's'), + (0x1D447, 'M', u't'), + (0x1D448, 'M', u'u'), + (0x1D449, 'M', u'v'), + (0x1D44A, 'M', u'w'), + (0x1D44B, 'M', u'x'), + (0x1D44C, 'M', u'y'), + (0x1D44D, 'M', u'z'), + (0x1D44E, 'M', u'a'), + (0x1D44F, 'M', u'b'), + (0x1D450, 'M', u'c'), + (0x1D451, 'M', u'd'), + (0x1D452, 'M', u'e'), + (0x1D453, 'M', u'f'), + (0x1D454, 'M', u'g'), + (0x1D455, 'X'), + (0x1D456, 'M', u'i'), + (0x1D457, 'M', u'j'), + (0x1D458, 'M', u'k'), + (0x1D459, 'M', u'l'), + (0x1D45A, 'M', u'm'), + (0x1D45B, 'M', u'n'), + (0x1D45C, 'M', u'o'), + ] + +def _seg_59(): + return [ + (0x1D45D, 'M', u'p'), + (0x1D45E, 'M', u'q'), + (0x1D45F, 'M', u'r'), + (0x1D460, 'M', u's'), + (0x1D461, 'M', u't'), + (0x1D462, 'M', u'u'), + (0x1D463, 'M', u'v'), + (0x1D464, 'M', u'w'), + (0x1D465, 'M', u'x'), + (0x1D466, 'M', u'y'), + (0x1D467, 'M', u'z'), + (0x1D468, 'M', u'a'), + (0x1D469, 'M', u'b'), + (0x1D46A, 'M', u'c'), + (0x1D46B, 'M', u'd'), + (0x1D46C, 'M', u'e'), + (0x1D46D, 'M', u'f'), + (0x1D46E, 'M', u'g'), + (0x1D46F, 'M', u'h'), + (0x1D470, 'M', u'i'), + (0x1D471, 'M', u'j'), + (0x1D472, 'M', u'k'), + (0x1D473, 'M', u'l'), + (0x1D474, 'M', u'm'), + (0x1D475, 'M', u'n'), + (0x1D476, 'M', u'o'), + (0x1D477, 'M', u'p'), + (0x1D478, 'M', u'q'), + (0x1D479, 'M', u'r'), + (0x1D47A, 'M', u's'), + (0x1D47B, 'M', u't'), + (0x1D47C, 'M', u'u'), + (0x1D47D, 'M', u'v'), + (0x1D47E, 'M', u'w'), + (0x1D47F, 'M', u'x'), + (0x1D480, 'M', u'y'), + (0x1D481, 'M', u'z'), + (0x1D482, 'M', u'a'), + (0x1D483, 'M', u'b'), + (0x1D484, 'M', u'c'), + (0x1D485, 'M', u'd'), + (0x1D486, 'M', u'e'), + (0x1D487, 'M', u'f'), + (0x1D488, 'M', u'g'), + (0x1D489, 'M', u'h'), + (0x1D48A, 'M', u'i'), + (0x1D48B, 'M', u'j'), + (0x1D48C, 'M', u'k'), + (0x1D48D, 'M', u'l'), + (0x1D48E, 'M', u'm'), + (0x1D48F, 'M', u'n'), + (0x1D490, 'M', u'o'), + (0x1D491, 'M', u'p'), + (0x1D492, 'M', u'q'), + (0x1D493, 'M', u'r'), + (0x1D494, 'M', u's'), + (0x1D495, 'M', u't'), + (0x1D496, 'M', u'u'), + (0x1D497, 'M', u'v'), + (0x1D498, 'M', u'w'), + (0x1D499, 'M', u'x'), + (0x1D49A, 'M', u'y'), + (0x1D49B, 'M', u'z'), + (0x1D49C, 'M', u'a'), + (0x1D49D, 'X'), + (0x1D49E, 'M', u'c'), + (0x1D49F, 'M', u'd'), + (0x1D4A0, 'X'), + (0x1D4A2, 'M', u'g'), + (0x1D4A3, 'X'), + (0x1D4A5, 'M', u'j'), + (0x1D4A6, 'M', u'k'), + (0x1D4A7, 'X'), + (0x1D4A9, 'M', u'n'), + (0x1D4AA, 'M', u'o'), + (0x1D4AB, 'M', u'p'), + (0x1D4AC, 'M', u'q'), + (0x1D4AD, 'X'), + (0x1D4AE, 'M', u's'), + (0x1D4AF, 'M', u't'), + (0x1D4B0, 'M', u'u'), + (0x1D4B1, 'M', u'v'), + (0x1D4B2, 'M', u'w'), + (0x1D4B3, 'M', u'x'), + (0x1D4B4, 'M', u'y'), + (0x1D4B5, 'M', u'z'), + (0x1D4B6, 'M', u'a'), + (0x1D4B7, 'M', u'b'), + (0x1D4B8, 'M', u'c'), + (0x1D4B9, 'M', u'd'), + (0x1D4BA, 'X'), + (0x1D4BB, 'M', u'f'), + (0x1D4BC, 'X'), + (0x1D4BD, 'M', u'h'), + (0x1D4BE, 'M', u'i'), + (0x1D4BF, 'M', u'j'), + (0x1D4C0, 'M', u'k'), + (0x1D4C1, 'M', u'l'), + (0x1D4C2, 'M', u'm'), + (0x1D4C3, 'M', u'n'), + ] + +def _seg_60(): + return [ + (0x1D4C4, 'X'), + (0x1D4C5, 'M', u'p'), + (0x1D4C6, 'M', u'q'), + (0x1D4C7, 'M', u'r'), + (0x1D4C8, 'M', u's'), + (0x1D4C9, 'M', u't'), + (0x1D4CA, 'M', u'u'), + (0x1D4CB, 'M', u'v'), + (0x1D4CC, 'M', u'w'), + (0x1D4CD, 'M', u'x'), + (0x1D4CE, 'M', u'y'), + (0x1D4CF, 'M', u'z'), + (0x1D4D0, 'M', u'a'), + (0x1D4D1, 'M', u'b'), + (0x1D4D2, 'M', u'c'), + (0x1D4D3, 'M', u'd'), + (0x1D4D4, 'M', u'e'), + (0x1D4D5, 'M', u'f'), + (0x1D4D6, 'M', u'g'), + (0x1D4D7, 'M', u'h'), + (0x1D4D8, 'M', u'i'), + (0x1D4D9, 'M', u'j'), + (0x1D4DA, 'M', u'k'), + (0x1D4DB, 'M', u'l'), + (0x1D4DC, 'M', u'm'), + (0x1D4DD, 'M', u'n'), + (0x1D4DE, 'M', u'o'), + (0x1D4DF, 'M', u'p'), + (0x1D4E0, 'M', u'q'), + (0x1D4E1, 'M', u'r'), + (0x1D4E2, 'M', u's'), + (0x1D4E3, 'M', u't'), + (0x1D4E4, 'M', u'u'), + (0x1D4E5, 'M', u'v'), + (0x1D4E6, 'M', u'w'), + (0x1D4E7, 'M', u'x'), + (0x1D4E8, 'M', u'y'), + (0x1D4E9, 'M', u'z'), + (0x1D4EA, 'M', u'a'), + (0x1D4EB, 'M', u'b'), + (0x1D4EC, 'M', u'c'), + (0x1D4ED, 'M', u'd'), + (0x1D4EE, 'M', u'e'), + (0x1D4EF, 'M', u'f'), + (0x1D4F0, 'M', u'g'), + (0x1D4F1, 'M', u'h'), + (0x1D4F2, 'M', u'i'), + (0x1D4F3, 'M', u'j'), + (0x1D4F4, 'M', u'k'), + (0x1D4F5, 'M', u'l'), + (0x1D4F6, 'M', u'm'), + (0x1D4F7, 'M', u'n'), + (0x1D4F8, 'M', u'o'), + (0x1D4F9, 'M', u'p'), + (0x1D4FA, 'M', u'q'), + (0x1D4FB, 'M', u'r'), + (0x1D4FC, 'M', u's'), + (0x1D4FD, 'M', u't'), + (0x1D4FE, 'M', u'u'), + (0x1D4FF, 'M', u'v'), + (0x1D500, 'M', u'w'), + (0x1D501, 'M', u'x'), + (0x1D502, 'M', u'y'), + (0x1D503, 'M', u'z'), + (0x1D504, 'M', u'a'), + (0x1D505, 'M', u'b'), + (0x1D506, 'X'), + (0x1D507, 'M', u'd'), + (0x1D508, 'M', u'e'), + (0x1D509, 'M', u'f'), + (0x1D50A, 'M', u'g'), + (0x1D50B, 'X'), + (0x1D50D, 'M', u'j'), + (0x1D50E, 'M', u'k'), + (0x1D50F, 'M', u'l'), + (0x1D510, 'M', u'm'), + (0x1D511, 'M', u'n'), + (0x1D512, 'M', u'o'), + (0x1D513, 'M', u'p'), + (0x1D514, 'M', u'q'), + (0x1D515, 'X'), + (0x1D516, 'M', u's'), + (0x1D517, 'M', u't'), + (0x1D518, 'M', u'u'), + (0x1D519, 'M', u'v'), + (0x1D51A, 'M', u'w'), + (0x1D51B, 'M', u'x'), + (0x1D51C, 'M', u'y'), + (0x1D51D, 'X'), + (0x1D51E, 'M', u'a'), + (0x1D51F, 'M', u'b'), + (0x1D520, 'M', u'c'), + (0x1D521, 'M', u'd'), + (0x1D522, 'M', u'e'), + (0x1D523, 'M', u'f'), + (0x1D524, 'M', u'g'), + (0x1D525, 'M', u'h'), + (0x1D526, 'M', u'i'), + (0x1D527, 'M', u'j'), + (0x1D528, 'M', u'k'), + ] + +def _seg_61(): + return [ + (0x1D529, 'M', u'l'), + (0x1D52A, 'M', u'm'), + (0x1D52B, 'M', u'n'), + (0x1D52C, 'M', u'o'), + (0x1D52D, 'M', u'p'), + (0x1D52E, 'M', u'q'), + (0x1D52F, 'M', u'r'), + (0x1D530, 'M', u's'), + (0x1D531, 'M', u't'), + (0x1D532, 'M', u'u'), + (0x1D533, 'M', u'v'), + (0x1D534, 'M', u'w'), + (0x1D535, 'M', u'x'), + (0x1D536, 'M', u'y'), + (0x1D537, 'M', u'z'), + (0x1D538, 'M', u'a'), + (0x1D539, 'M', u'b'), + (0x1D53A, 'X'), + (0x1D53B, 'M', u'd'), + (0x1D53C, 'M', u'e'), + (0x1D53D, 'M', u'f'), + (0x1D53E, 'M', u'g'), + (0x1D53F, 'X'), + (0x1D540, 'M', u'i'), + (0x1D541, 'M', u'j'), + (0x1D542, 'M', u'k'), + (0x1D543, 'M', u'l'), + (0x1D544, 'M', u'm'), + (0x1D545, 'X'), + (0x1D546, 'M', u'o'), + (0x1D547, 'X'), + (0x1D54A, 'M', u's'), + (0x1D54B, 'M', u't'), + (0x1D54C, 'M', u'u'), + (0x1D54D, 'M', u'v'), + (0x1D54E, 'M', u'w'), + (0x1D54F, 'M', u'x'), + (0x1D550, 'M', u'y'), + (0x1D551, 'X'), + (0x1D552, 'M', u'a'), + (0x1D553, 'M', u'b'), + (0x1D554, 'M', u'c'), + (0x1D555, 'M', u'd'), + (0x1D556, 'M', u'e'), + (0x1D557, 'M', u'f'), + (0x1D558, 'M', u'g'), + (0x1D559, 'M', u'h'), + (0x1D55A, 'M', u'i'), + (0x1D55B, 'M', u'j'), + (0x1D55C, 'M', u'k'), + (0x1D55D, 'M', u'l'), + (0x1D55E, 'M', u'm'), + (0x1D55F, 'M', u'n'), + (0x1D560, 'M', u'o'), + (0x1D561, 'M', u'p'), + (0x1D562, 'M', u'q'), + (0x1D563, 'M', u'r'), + (0x1D564, 'M', u's'), + (0x1D565, 'M', u't'), + (0x1D566, 'M', u'u'), + (0x1D567, 'M', u'v'), + (0x1D568, 'M', u'w'), + (0x1D569, 'M', u'x'), + (0x1D56A, 'M', u'y'), + (0x1D56B, 'M', u'z'), + (0x1D56C, 'M', u'a'), + (0x1D56D, 'M', u'b'), + (0x1D56E, 'M', u'c'), + (0x1D56F, 'M', u'd'), + (0x1D570, 'M', u'e'), + (0x1D571, 'M', u'f'), + (0x1D572, 'M', u'g'), + (0x1D573, 'M', u'h'), + (0x1D574, 'M', u'i'), + (0x1D575, 'M', u'j'), + (0x1D576, 'M', u'k'), + (0x1D577, 'M', u'l'), + (0x1D578, 'M', u'm'), + (0x1D579, 'M', u'n'), + (0x1D57A, 'M', u'o'), + (0x1D57B, 'M', u'p'), + (0x1D57C, 'M', u'q'), + (0x1D57D, 'M', u'r'), + (0x1D57E, 'M', u's'), + (0x1D57F, 'M', u't'), + (0x1D580, 'M', u'u'), + (0x1D581, 'M', u'v'), + (0x1D582, 'M', u'w'), + (0x1D583, 'M', u'x'), + (0x1D584, 'M', u'y'), + (0x1D585, 'M', u'z'), + (0x1D586, 'M', u'a'), + (0x1D587, 'M', u'b'), + (0x1D588, 'M', u'c'), + (0x1D589, 'M', u'd'), + (0x1D58A, 'M', u'e'), + (0x1D58B, 'M', u'f'), + (0x1D58C, 'M', u'g'), + (0x1D58D, 'M', u'h'), + (0x1D58E, 'M', u'i'), + ] + +def _seg_62(): + return [ + (0x1D58F, 'M', u'j'), + (0x1D590, 'M', u'k'), + (0x1D591, 'M', u'l'), + (0x1D592, 'M', u'm'), + (0x1D593, 'M', u'n'), + (0x1D594, 'M', u'o'), + (0x1D595, 'M', u'p'), + (0x1D596, 'M', u'q'), + (0x1D597, 'M', u'r'), + (0x1D598, 'M', u's'), + (0x1D599, 'M', u't'), + (0x1D59A, 'M', u'u'), + (0x1D59B, 'M', u'v'), + (0x1D59C, 'M', u'w'), + (0x1D59D, 'M', u'x'), + (0x1D59E, 'M', u'y'), + (0x1D59F, 'M', u'z'), + (0x1D5A0, 'M', u'a'), + (0x1D5A1, 'M', u'b'), + (0x1D5A2, 'M', u'c'), + (0x1D5A3, 'M', u'd'), + (0x1D5A4, 'M', u'e'), + (0x1D5A5, 'M', u'f'), + (0x1D5A6, 'M', u'g'), + (0x1D5A7, 'M', u'h'), + (0x1D5A8, 'M', u'i'), + (0x1D5A9, 'M', u'j'), + (0x1D5AA, 'M', u'k'), + (0x1D5AB, 'M', u'l'), + (0x1D5AC, 'M', u'm'), + (0x1D5AD, 'M', u'n'), + (0x1D5AE, 'M', u'o'), + (0x1D5AF, 'M', u'p'), + (0x1D5B0, 'M', u'q'), + (0x1D5B1, 'M', u'r'), + (0x1D5B2, 'M', u's'), + (0x1D5B3, 'M', u't'), + (0x1D5B4, 'M', u'u'), + (0x1D5B5, 'M', u'v'), + (0x1D5B6, 'M', u'w'), + (0x1D5B7, 'M', u'x'), + (0x1D5B8, 'M', u'y'), + (0x1D5B9, 'M', u'z'), + (0x1D5BA, 'M', u'a'), + (0x1D5BB, 'M', u'b'), + (0x1D5BC, 'M', u'c'), + (0x1D5BD, 'M', u'd'), + (0x1D5BE, 'M', u'e'), + (0x1D5BF, 'M', u'f'), + (0x1D5C0, 'M', u'g'), + (0x1D5C1, 'M', u'h'), + (0x1D5C2, 'M', u'i'), + (0x1D5C3, 'M', u'j'), + (0x1D5C4, 'M', u'k'), + (0x1D5C5, 'M', u'l'), + (0x1D5C6, 'M', u'm'), + (0x1D5C7, 'M', u'n'), + (0x1D5C8, 'M', u'o'), + (0x1D5C9, 'M', u'p'), + (0x1D5CA, 'M', u'q'), + (0x1D5CB, 'M', u'r'), + (0x1D5CC, 'M', u's'), + (0x1D5CD, 'M', u't'), + (0x1D5CE, 'M', u'u'), + (0x1D5CF, 'M', u'v'), + (0x1D5D0, 'M', u'w'), + (0x1D5D1, 'M', u'x'), + (0x1D5D2, 'M', u'y'), + (0x1D5D3, 'M', u'z'), + (0x1D5D4, 'M', u'a'), + (0x1D5D5, 'M', u'b'), + (0x1D5D6, 'M', u'c'), + (0x1D5D7, 'M', u'd'), + (0x1D5D8, 'M', u'e'), + (0x1D5D9, 'M', u'f'), + (0x1D5DA, 'M', u'g'), + (0x1D5DB, 'M', u'h'), + (0x1D5DC, 'M', u'i'), + (0x1D5DD, 'M', u'j'), + (0x1D5DE, 'M', u'k'), + (0x1D5DF, 'M', u'l'), + (0x1D5E0, 'M', u'm'), + (0x1D5E1, 'M', u'n'), + (0x1D5E2, 'M', u'o'), + (0x1D5E3, 'M', u'p'), + (0x1D5E4, 'M', u'q'), + (0x1D5E5, 'M', u'r'), + (0x1D5E6, 'M', u's'), + (0x1D5E7, 'M', u't'), + (0x1D5E8, 'M', u'u'), + (0x1D5E9, 'M', u'v'), + (0x1D5EA, 'M', u'w'), + (0x1D5EB, 'M', u'x'), + (0x1D5EC, 'M', u'y'), + (0x1D5ED, 'M', u'z'), + (0x1D5EE, 'M', u'a'), + (0x1D5EF, 'M', u'b'), + (0x1D5F0, 'M', u'c'), + (0x1D5F1, 'M', u'd'), + (0x1D5F2, 'M', u'e'), + ] + +def _seg_63(): + return [ + (0x1D5F3, 'M', u'f'), + (0x1D5F4, 'M', u'g'), + (0x1D5F5, 'M', u'h'), + (0x1D5F6, 'M', u'i'), + (0x1D5F7, 'M', u'j'), + (0x1D5F8, 'M', u'k'), + (0x1D5F9, 'M', u'l'), + (0x1D5FA, 'M', u'm'), + (0x1D5FB, 'M', u'n'), + (0x1D5FC, 'M', u'o'), + (0x1D5FD, 'M', u'p'), + (0x1D5FE, 'M', u'q'), + (0x1D5FF, 'M', u'r'), + (0x1D600, 'M', u's'), + (0x1D601, 'M', u't'), + (0x1D602, 'M', u'u'), + (0x1D603, 'M', u'v'), + (0x1D604, 'M', u'w'), + (0x1D605, 'M', u'x'), + (0x1D606, 'M', u'y'), + (0x1D607, 'M', u'z'), + (0x1D608, 'M', u'a'), + (0x1D609, 'M', u'b'), + (0x1D60A, 'M', u'c'), + (0x1D60B, 'M', u'd'), + (0x1D60C, 'M', u'e'), + (0x1D60D, 'M', u'f'), + (0x1D60E, 'M', u'g'), + (0x1D60F, 'M', u'h'), + (0x1D610, 'M', u'i'), + (0x1D611, 'M', u'j'), + (0x1D612, 'M', u'k'), + (0x1D613, 'M', u'l'), + (0x1D614, 'M', u'm'), + (0x1D615, 'M', u'n'), + (0x1D616, 'M', u'o'), + (0x1D617, 'M', u'p'), + (0x1D618, 'M', u'q'), + (0x1D619, 'M', u'r'), + (0x1D61A, 'M', u's'), + (0x1D61B, 'M', u't'), + (0x1D61C, 'M', u'u'), + (0x1D61D, 'M', u'v'), + (0x1D61E, 'M', u'w'), + (0x1D61F, 'M', u'x'), + (0x1D620, 'M', u'y'), + (0x1D621, 'M', u'z'), + (0x1D622, 'M', u'a'), + (0x1D623, 'M', u'b'), + (0x1D624, 'M', u'c'), + (0x1D625, 'M', u'd'), + (0x1D626, 'M', u'e'), + (0x1D627, 'M', u'f'), + (0x1D628, 'M', u'g'), + (0x1D629, 'M', u'h'), + (0x1D62A, 'M', u'i'), + (0x1D62B, 'M', u'j'), + (0x1D62C, 'M', u'k'), + (0x1D62D, 'M', u'l'), + (0x1D62E, 'M', u'm'), + (0x1D62F, 'M', u'n'), + (0x1D630, 'M', u'o'), + (0x1D631, 'M', u'p'), + (0x1D632, 'M', u'q'), + (0x1D633, 'M', u'r'), + (0x1D634, 'M', u's'), + (0x1D635, 'M', u't'), + (0x1D636, 'M', u'u'), + (0x1D637, 'M', u'v'), + (0x1D638, 'M', u'w'), + (0x1D639, 'M', u'x'), + (0x1D63A, 'M', u'y'), + (0x1D63B, 'M', u'z'), + (0x1D63C, 'M', u'a'), + (0x1D63D, 'M', u'b'), + (0x1D63E, 'M', u'c'), + (0x1D63F, 'M', u'd'), + (0x1D640, 'M', u'e'), + (0x1D641, 'M', u'f'), + (0x1D642, 'M', u'g'), + (0x1D643, 'M', u'h'), + (0x1D644, 'M', u'i'), + (0x1D645, 'M', u'j'), + (0x1D646, 'M', u'k'), + (0x1D647, 'M', u'l'), + (0x1D648, 'M', u'm'), + (0x1D649, 'M', u'n'), + (0x1D64A, 'M', u'o'), + (0x1D64B, 'M', u'p'), + (0x1D64C, 'M', u'q'), + (0x1D64D, 'M', u'r'), + (0x1D64E, 'M', u's'), + (0x1D64F, 'M', u't'), + (0x1D650, 'M', u'u'), + (0x1D651, 'M', u'v'), + (0x1D652, 'M', u'w'), + (0x1D653, 'M', u'x'), + (0x1D654, 'M', u'y'), + (0x1D655, 'M', u'z'), + (0x1D656, 'M', u'a'), + ] + +def _seg_64(): + return [ + (0x1D657, 'M', u'b'), + (0x1D658, 'M', u'c'), + (0x1D659, 'M', u'd'), + (0x1D65A, 'M', u'e'), + (0x1D65B, 'M', u'f'), + (0x1D65C, 'M', u'g'), + (0x1D65D, 'M', u'h'), + (0x1D65E, 'M', u'i'), + (0x1D65F, 'M', u'j'), + (0x1D660, 'M', u'k'), + (0x1D661, 'M', u'l'), + (0x1D662, 'M', u'm'), + (0x1D663, 'M', u'n'), + (0x1D664, 'M', u'o'), + (0x1D665, 'M', u'p'), + (0x1D666, 'M', u'q'), + (0x1D667, 'M', u'r'), + (0x1D668, 'M', u's'), + (0x1D669, 'M', u't'), + (0x1D66A, 'M', u'u'), + (0x1D66B, 'M', u'v'), + (0x1D66C, 'M', u'w'), + (0x1D66D, 'M', u'x'), + (0x1D66E, 'M', u'y'), + (0x1D66F, 'M', u'z'), + (0x1D670, 'M', u'a'), + (0x1D671, 'M', u'b'), + (0x1D672, 'M', u'c'), + (0x1D673, 'M', u'd'), + (0x1D674, 'M', u'e'), + (0x1D675, 'M', u'f'), + (0x1D676, 'M', u'g'), + (0x1D677, 'M', u'h'), + (0x1D678, 'M', u'i'), + (0x1D679, 'M', u'j'), + (0x1D67A, 'M', u'k'), + (0x1D67B, 'M', u'l'), + (0x1D67C, 'M', u'm'), + (0x1D67D, 'M', u'n'), + (0x1D67E, 'M', u'o'), + (0x1D67F, 'M', u'p'), + (0x1D680, 'M', u'q'), + (0x1D681, 'M', u'r'), + (0x1D682, 'M', u's'), + (0x1D683, 'M', u't'), + (0x1D684, 'M', u'u'), + (0x1D685, 'M', u'v'), + (0x1D686, 'M', u'w'), + (0x1D687, 'M', u'x'), + (0x1D688, 'M', u'y'), + (0x1D689, 'M', u'z'), + (0x1D68A, 'M', u'a'), + (0x1D68B, 'M', u'b'), + (0x1D68C, 'M', u'c'), + (0x1D68D, 'M', u'd'), + (0x1D68E, 'M', u'e'), + (0x1D68F, 'M', u'f'), + (0x1D690, 'M', u'g'), + (0x1D691, 'M', u'h'), + (0x1D692, 'M', u'i'), + (0x1D693, 'M', u'j'), + (0x1D694, 'M', u'k'), + (0x1D695, 'M', u'l'), + (0x1D696, 'M', u'm'), + (0x1D697, 'M', u'n'), + (0x1D698, 'M', u'o'), + (0x1D699, 'M', u'p'), + (0x1D69A, 'M', u'q'), + (0x1D69B, 'M', u'r'), + (0x1D69C, 'M', u's'), + (0x1D69D, 'M', u't'), + (0x1D69E, 'M', u'u'), + (0x1D69F, 'M', u'v'), + (0x1D6A0, 'M', u'w'), + (0x1D6A1, 'M', u'x'), + (0x1D6A2, 'M', u'y'), + (0x1D6A3, 'M', u'z'), + (0x1D6A4, 'M', u'ı'), + (0x1D6A5, 'M', u'ȷ'), + (0x1D6A6, 'X'), + (0x1D6A8, 'M', u'α'), + (0x1D6A9, 'M', u'β'), + (0x1D6AA, 'M', u'γ'), + (0x1D6AB, 'M', u'δ'), + (0x1D6AC, 'M', u'ε'), + (0x1D6AD, 'M', u'ζ'), + (0x1D6AE, 'M', u'η'), + (0x1D6AF, 'M', u'θ'), + (0x1D6B0, 'M', u'ι'), + (0x1D6B1, 'M', u'κ'), + (0x1D6B2, 'M', u'λ'), + (0x1D6B3, 'M', u'μ'), + (0x1D6B4, 'M', u'ν'), + (0x1D6B5, 'M', u'ξ'), + (0x1D6B6, 'M', u'ο'), + (0x1D6B7, 'M', u'π'), + (0x1D6B8, 'M', u'ρ'), + (0x1D6B9, 'M', u'θ'), + (0x1D6BA, 'M', u'σ'), + (0x1D6BB, 'M', u'τ'), + ] + +def _seg_65(): + return [ + (0x1D6BC, 'M', u'υ'), + (0x1D6BD, 'M', u'φ'), + (0x1D6BE, 'M', u'χ'), + (0x1D6BF, 'M', u'ψ'), + (0x1D6C0, 'M', u'ω'), + (0x1D6C1, 'M', u'∇'), + (0x1D6C2, 'M', u'α'), + (0x1D6C3, 'M', u'β'), + (0x1D6C4, 'M', u'γ'), + (0x1D6C5, 'M', u'δ'), + (0x1D6C6, 'M', u'ε'), + (0x1D6C7, 'M', u'ζ'), + (0x1D6C8, 'M', u'η'), + (0x1D6C9, 'M', u'θ'), + (0x1D6CA, 'M', u'ι'), + (0x1D6CB, 'M', u'κ'), + (0x1D6CC, 'M', u'λ'), + (0x1D6CD, 'M', u'μ'), + (0x1D6CE, 'M', u'ν'), + (0x1D6CF, 'M', u'ξ'), + (0x1D6D0, 'M', u'ο'), + (0x1D6D1, 'M', u'π'), + (0x1D6D2, 'M', u'ρ'), + (0x1D6D3, 'M', u'σ'), + (0x1D6D5, 'M', u'τ'), + (0x1D6D6, 'M', u'υ'), + (0x1D6D7, 'M', u'φ'), + (0x1D6D8, 'M', u'χ'), + (0x1D6D9, 'M', u'ψ'), + (0x1D6DA, 'M', u'ω'), + (0x1D6DB, 'M', u'∂'), + (0x1D6DC, 'M', u'ε'), + (0x1D6DD, 'M', u'θ'), + (0x1D6DE, 'M', u'κ'), + (0x1D6DF, 'M', u'φ'), + (0x1D6E0, 'M', u'ρ'), + (0x1D6E1, 'M', u'π'), + (0x1D6E2, 'M', u'α'), + (0x1D6E3, 'M', u'β'), + (0x1D6E4, 'M', u'γ'), + (0x1D6E5, 'M', u'δ'), + (0x1D6E6, 'M', u'ε'), + (0x1D6E7, 'M', u'ζ'), + (0x1D6E8, 'M', u'η'), + (0x1D6E9, 'M', u'θ'), + (0x1D6EA, 'M', u'ι'), + (0x1D6EB, 'M', u'κ'), + (0x1D6EC, 'M', u'λ'), + (0x1D6ED, 'M', u'μ'), + (0x1D6EE, 'M', u'ν'), + (0x1D6EF, 'M', u'ξ'), + (0x1D6F0, 'M', u'ο'), + (0x1D6F1, 'M', u'π'), + (0x1D6F2, 'M', u'ρ'), + (0x1D6F3, 'M', u'θ'), + (0x1D6F4, 'M', u'σ'), + (0x1D6F5, 'M', u'τ'), + (0x1D6F6, 'M', u'υ'), + (0x1D6F7, 'M', u'φ'), + (0x1D6F8, 'M', u'χ'), + (0x1D6F9, 'M', u'ψ'), + (0x1D6FA, 'M', u'ω'), + (0x1D6FB, 'M', u'∇'), + (0x1D6FC, 'M', u'α'), + (0x1D6FD, 'M', u'β'), + (0x1D6FE, 'M', u'γ'), + (0x1D6FF, 'M', u'δ'), + (0x1D700, 'M', u'ε'), + (0x1D701, 'M', u'ζ'), + (0x1D702, 'M', u'η'), + (0x1D703, 'M', u'θ'), + (0x1D704, 'M', u'ι'), + (0x1D705, 'M', u'κ'), + (0x1D706, 'M', u'λ'), + (0x1D707, 'M', u'μ'), + (0x1D708, 'M', u'ν'), + (0x1D709, 'M', u'ξ'), + (0x1D70A, 'M', u'ο'), + (0x1D70B, 'M', u'π'), + (0x1D70C, 'M', u'ρ'), + (0x1D70D, 'M', u'σ'), + (0x1D70F, 'M', u'τ'), + (0x1D710, 'M', u'υ'), + (0x1D711, 'M', u'φ'), + (0x1D712, 'M', u'χ'), + (0x1D713, 'M', u'ψ'), + (0x1D714, 'M', u'ω'), + (0x1D715, 'M', u'∂'), + (0x1D716, 'M', u'ε'), + (0x1D717, 'M', u'θ'), + (0x1D718, 'M', u'κ'), + (0x1D719, 'M', u'φ'), + (0x1D71A, 'M', u'ρ'), + (0x1D71B, 'M', u'π'), + (0x1D71C, 'M', u'α'), + (0x1D71D, 'M', u'β'), + (0x1D71E, 'M', u'γ'), + (0x1D71F, 'M', u'δ'), + (0x1D720, 'M', u'ε'), + (0x1D721, 'M', u'ζ'), + ] + +def _seg_66(): + return [ + (0x1D722, 'M', u'η'), + (0x1D723, 'M', u'θ'), + (0x1D724, 'M', u'ι'), + (0x1D725, 'M', u'κ'), + (0x1D726, 'M', u'λ'), + (0x1D727, 'M', u'μ'), + (0x1D728, 'M', u'ν'), + (0x1D729, 'M', u'ξ'), + (0x1D72A, 'M', u'ο'), + (0x1D72B, 'M', u'π'), + (0x1D72C, 'M', u'ρ'), + (0x1D72D, 'M', u'θ'), + (0x1D72E, 'M', u'σ'), + (0x1D72F, 'M', u'τ'), + (0x1D730, 'M', u'υ'), + (0x1D731, 'M', u'φ'), + (0x1D732, 'M', u'χ'), + (0x1D733, 'M', u'ψ'), + (0x1D734, 'M', u'ω'), + (0x1D735, 'M', u'∇'), + (0x1D736, 'M', u'α'), + (0x1D737, 'M', u'β'), + (0x1D738, 'M', u'γ'), + (0x1D739, 'M', u'δ'), + (0x1D73A, 'M', u'ε'), + (0x1D73B, 'M', u'ζ'), + (0x1D73C, 'M', u'η'), + (0x1D73D, 'M', u'θ'), + (0x1D73E, 'M', u'ι'), + (0x1D73F, 'M', u'κ'), + (0x1D740, 'M', u'λ'), + (0x1D741, 'M', u'μ'), + (0x1D742, 'M', u'ν'), + (0x1D743, 'M', u'ξ'), + (0x1D744, 'M', u'ο'), + (0x1D745, 'M', u'π'), + (0x1D746, 'M', u'ρ'), + (0x1D747, 'M', u'σ'), + (0x1D749, 'M', u'τ'), + (0x1D74A, 'M', u'υ'), + (0x1D74B, 'M', u'φ'), + (0x1D74C, 'M', u'χ'), + (0x1D74D, 'M', u'ψ'), + (0x1D74E, 'M', u'ω'), + (0x1D74F, 'M', u'∂'), + (0x1D750, 'M', u'ε'), + (0x1D751, 'M', u'θ'), + (0x1D752, 'M', u'κ'), + (0x1D753, 'M', u'φ'), + (0x1D754, 'M', u'ρ'), + (0x1D755, 'M', u'π'), + (0x1D756, 'M', u'α'), + (0x1D757, 'M', u'β'), + (0x1D758, 'M', u'γ'), + (0x1D759, 'M', u'δ'), + (0x1D75A, 'M', u'ε'), + (0x1D75B, 'M', u'ζ'), + (0x1D75C, 'M', u'η'), + (0x1D75D, 'M', u'θ'), + (0x1D75E, 'M', u'ι'), + (0x1D75F, 'M', u'κ'), + (0x1D760, 'M', u'λ'), + (0x1D761, 'M', u'μ'), + (0x1D762, 'M', u'ν'), + (0x1D763, 'M', u'ξ'), + (0x1D764, 'M', u'ο'), + (0x1D765, 'M', u'π'), + (0x1D766, 'M', u'ρ'), + (0x1D767, 'M', u'θ'), + (0x1D768, 'M', u'σ'), + (0x1D769, 'M', u'τ'), + (0x1D76A, 'M', u'υ'), + (0x1D76B, 'M', u'φ'), + (0x1D76C, 'M', u'χ'), + (0x1D76D, 'M', u'ψ'), + (0x1D76E, 'M', u'ω'), + (0x1D76F, 'M', u'∇'), + (0x1D770, 'M', u'α'), + (0x1D771, 'M', u'β'), + (0x1D772, 'M', u'γ'), + (0x1D773, 'M', u'δ'), + (0x1D774, 'M', u'ε'), + (0x1D775, 'M', u'ζ'), + (0x1D776, 'M', u'η'), + (0x1D777, 'M', u'θ'), + (0x1D778, 'M', u'ι'), + (0x1D779, 'M', u'κ'), + (0x1D77A, 'M', u'λ'), + (0x1D77B, 'M', u'μ'), + (0x1D77C, 'M', u'ν'), + (0x1D77D, 'M', u'ξ'), + (0x1D77E, 'M', u'ο'), + (0x1D77F, 'M', u'π'), + (0x1D780, 'M', u'ρ'), + (0x1D781, 'M', u'σ'), + (0x1D783, 'M', u'τ'), + (0x1D784, 'M', u'υ'), + (0x1D785, 'M', u'φ'), + (0x1D786, 'M', u'χ'), + (0x1D787, 'M', u'ψ'), + ] + +def _seg_67(): + return [ + (0x1D788, 'M', u'ω'), + (0x1D789, 'M', u'∂'), + (0x1D78A, 'M', u'ε'), + (0x1D78B, 'M', u'θ'), + (0x1D78C, 'M', u'κ'), + (0x1D78D, 'M', u'φ'), + (0x1D78E, 'M', u'ρ'), + (0x1D78F, 'M', u'π'), + (0x1D790, 'M', u'α'), + (0x1D791, 'M', u'β'), + (0x1D792, 'M', u'γ'), + (0x1D793, 'M', u'δ'), + (0x1D794, 'M', u'ε'), + (0x1D795, 'M', u'ζ'), + (0x1D796, 'M', u'η'), + (0x1D797, 'M', u'θ'), + (0x1D798, 'M', u'ι'), + (0x1D799, 'M', u'κ'), + (0x1D79A, 'M', u'λ'), + (0x1D79B, 'M', u'μ'), + (0x1D79C, 'M', u'ν'), + (0x1D79D, 'M', u'ξ'), + (0x1D79E, 'M', u'ο'), + (0x1D79F, 'M', u'π'), + (0x1D7A0, 'M', u'ρ'), + (0x1D7A1, 'M', u'θ'), + (0x1D7A2, 'M', u'σ'), + (0x1D7A3, 'M', u'τ'), + (0x1D7A4, 'M', u'υ'), + (0x1D7A5, 'M', u'φ'), + (0x1D7A6, 'M', u'χ'), + (0x1D7A7, 'M', u'ψ'), + (0x1D7A8, 'M', u'ω'), + (0x1D7A9, 'M', u'∇'), + (0x1D7AA, 'M', u'α'), + (0x1D7AB, 'M', u'β'), + (0x1D7AC, 'M', u'γ'), + (0x1D7AD, 'M', u'δ'), + (0x1D7AE, 'M', u'ε'), + (0x1D7AF, 'M', u'ζ'), + (0x1D7B0, 'M', u'η'), + (0x1D7B1, 'M', u'θ'), + (0x1D7B2, 'M', u'ι'), + (0x1D7B3, 'M', u'κ'), + (0x1D7B4, 'M', u'λ'), + (0x1D7B5, 'M', u'μ'), + (0x1D7B6, 'M', u'ν'), + (0x1D7B7, 'M', u'ξ'), + (0x1D7B8, 'M', u'ο'), + (0x1D7B9, 'M', u'π'), + (0x1D7BA, 'M', u'ρ'), + (0x1D7BB, 'M', u'σ'), + (0x1D7BD, 'M', u'τ'), + (0x1D7BE, 'M', u'υ'), + (0x1D7BF, 'M', u'φ'), + (0x1D7C0, 'M', u'χ'), + (0x1D7C1, 'M', u'ψ'), + (0x1D7C2, 'M', u'ω'), + (0x1D7C3, 'M', u'∂'), + (0x1D7C4, 'M', u'ε'), + (0x1D7C5, 'M', u'θ'), + (0x1D7C6, 'M', u'κ'), + (0x1D7C7, 'M', u'φ'), + (0x1D7C8, 'M', u'ρ'), + (0x1D7C9, 'M', u'π'), + (0x1D7CA, 'M', u'ϝ'), + (0x1D7CC, 'X'), + (0x1D7CE, 'M', u'0'), + (0x1D7CF, 'M', u'1'), + (0x1D7D0, 'M', u'2'), + (0x1D7D1, 'M', u'3'), + (0x1D7D2, 'M', u'4'), + (0x1D7D3, 'M', u'5'), + (0x1D7D4, 'M', u'6'), + (0x1D7D5, 'M', u'7'), + (0x1D7D6, 'M', u'8'), + (0x1D7D7, 'M', u'9'), + (0x1D7D8, 'M', u'0'), + (0x1D7D9, 'M', u'1'), + (0x1D7DA, 'M', u'2'), + (0x1D7DB, 'M', u'3'), + (0x1D7DC, 'M', u'4'), + (0x1D7DD, 'M', u'5'), + (0x1D7DE, 'M', u'6'), + (0x1D7DF, 'M', u'7'), + (0x1D7E0, 'M', u'8'), + (0x1D7E1, 'M', u'9'), + (0x1D7E2, 'M', u'0'), + (0x1D7E3, 'M', u'1'), + (0x1D7E4, 'M', u'2'), + (0x1D7E5, 'M', u'3'), + (0x1D7E6, 'M', u'4'), + (0x1D7E7, 'M', u'5'), + (0x1D7E8, 'M', u'6'), + (0x1D7E9, 'M', u'7'), + (0x1D7EA, 'M', u'8'), + (0x1D7EB, 'M', u'9'), + (0x1D7EC, 'M', u'0'), + (0x1D7ED, 'M', u'1'), + (0x1D7EE, 'M', u'2'), + ] + +def _seg_68(): + return [ + (0x1D7EF, 'M', u'3'), + (0x1D7F0, 'M', u'4'), + (0x1D7F1, 'M', u'5'), + (0x1D7F2, 'M', u'6'), + (0x1D7F3, 'M', u'7'), + (0x1D7F4, 'M', u'8'), + (0x1D7F5, 'M', u'9'), + (0x1D7F6, 'M', u'0'), + (0x1D7F7, 'M', u'1'), + (0x1D7F8, 'M', u'2'), + (0x1D7F9, 'M', u'3'), + (0x1D7FA, 'M', u'4'), + (0x1D7FB, 'M', u'5'), + (0x1D7FC, 'M', u'6'), + (0x1D7FD, 'M', u'7'), + (0x1D7FE, 'M', u'8'), + (0x1D7FF, 'M', u'9'), + (0x1D800, 'V'), + (0x1DA8C, 'X'), + (0x1DA9B, 'V'), + (0x1DAA0, 'X'), + (0x1DAA1, 'V'), + (0x1DAB0, 'X'), + (0x1E000, 'V'), + (0x1E007, 'X'), + (0x1E008, 'V'), + (0x1E019, 'X'), + (0x1E01B, 'V'), + (0x1E022, 'X'), + (0x1E023, 'V'), + (0x1E025, 'X'), + (0x1E026, 'V'), + (0x1E02B, 'X'), + (0x1E800, 'V'), + (0x1E8C5, 'X'), + (0x1E8C7, 'V'), + (0x1E8D7, 'X'), + (0x1E900, 'M', u'𞤢'), + (0x1E901, 'M', u'𞤣'), + (0x1E902, 'M', u'𞤤'), + (0x1E903, 'M', u'𞤥'), + (0x1E904, 'M', u'𞤦'), + (0x1E905, 'M', u'𞤧'), + (0x1E906, 'M', u'𞤨'), + (0x1E907, 'M', u'𞤩'), + (0x1E908, 'M', u'𞤪'), + (0x1E909, 'M', u'𞤫'), + (0x1E90A, 'M', u'𞤬'), + (0x1E90B, 'M', u'𞤭'), + (0x1E90C, 'M', u'𞤮'), + (0x1E90D, 'M', u'𞤯'), + (0x1E90E, 'M', u'𞤰'), + (0x1E90F, 'M', u'𞤱'), + (0x1E910, 'M', u'𞤲'), + (0x1E911, 'M', u'𞤳'), + (0x1E912, 'M', u'𞤴'), + (0x1E913, 'M', u'𞤵'), + (0x1E914, 'M', u'𞤶'), + (0x1E915, 'M', u'𞤷'), + (0x1E916, 'M', u'𞤸'), + (0x1E917, 'M', u'𞤹'), + (0x1E918, 'M', u'𞤺'), + (0x1E919, 'M', u'𞤻'), + (0x1E91A, 'M', u'𞤼'), + (0x1E91B, 'M', u'𞤽'), + (0x1E91C, 'M', u'𞤾'), + (0x1E91D, 'M', u'𞤿'), + (0x1E91E, 'M', u'𞥀'), + (0x1E91F, 'M', u'𞥁'), + (0x1E920, 'M', u'𞥂'), + (0x1E921, 'M', u'𞥃'), + (0x1E922, 'V'), + (0x1E94B, 'X'), + (0x1E950, 'V'), + (0x1E95A, 'X'), + (0x1E95E, 'V'), + (0x1E960, 'X'), + (0x1EC71, 'V'), + (0x1ECB5, 'X'), + (0x1EE00, 'M', u'ا'), + (0x1EE01, 'M', u'ب'), + (0x1EE02, 'M', u'ج'), + (0x1EE03, 'M', u'د'), + (0x1EE04, 'X'), + (0x1EE05, 'M', u'و'), + (0x1EE06, 'M', u'ز'), + (0x1EE07, 'M', u'ح'), + (0x1EE08, 'M', u'ط'), + (0x1EE09, 'M', u'ي'), + (0x1EE0A, 'M', u'ك'), + (0x1EE0B, 'M', u'ل'), + (0x1EE0C, 'M', u'م'), + (0x1EE0D, 'M', u'ن'), + (0x1EE0E, 'M', u'س'), + (0x1EE0F, 'M', u'ع'), + (0x1EE10, 'M', u'ف'), + (0x1EE11, 'M', u'ص'), + (0x1EE12, 'M', u'ق'), + (0x1EE13, 'M', u'ر'), + (0x1EE14, 'M', u'ش'), + ] + +def _seg_69(): + return [ + (0x1EE15, 'M', u'ت'), + (0x1EE16, 'M', u'ث'), + (0x1EE17, 'M', u'خ'), + (0x1EE18, 'M', u'ذ'), + (0x1EE19, 'M', u'ض'), + (0x1EE1A, 'M', u'ظ'), + (0x1EE1B, 'M', u'غ'), + (0x1EE1C, 'M', u'ٮ'), + (0x1EE1D, 'M', u'ں'), + (0x1EE1E, 'M', u'ڡ'), + (0x1EE1F, 'M', u'ٯ'), + (0x1EE20, 'X'), + (0x1EE21, 'M', u'ب'), + (0x1EE22, 'M', u'ج'), + (0x1EE23, 'X'), + (0x1EE24, 'M', u'ه'), + (0x1EE25, 'X'), + (0x1EE27, 'M', u'ح'), + (0x1EE28, 'X'), + (0x1EE29, 'M', u'ي'), + (0x1EE2A, 'M', u'ك'), + (0x1EE2B, 'M', u'ل'), + (0x1EE2C, 'M', u'م'), + (0x1EE2D, 'M', u'ن'), + (0x1EE2E, 'M', u'س'), + (0x1EE2F, 'M', u'ع'), + (0x1EE30, 'M', u'ف'), + (0x1EE31, 'M', u'ص'), + (0x1EE32, 'M', u'ق'), + (0x1EE33, 'X'), + (0x1EE34, 'M', u'ش'), + (0x1EE35, 'M', u'ت'), + (0x1EE36, 'M', u'ث'), + (0x1EE37, 'M', u'خ'), + (0x1EE38, 'X'), + (0x1EE39, 'M', u'ض'), + (0x1EE3A, 'X'), + (0x1EE3B, 'M', u'غ'), + (0x1EE3C, 'X'), + (0x1EE42, 'M', u'ج'), + (0x1EE43, 'X'), + (0x1EE47, 'M', u'ح'), + (0x1EE48, 'X'), + (0x1EE49, 'M', u'ي'), + (0x1EE4A, 'X'), + (0x1EE4B, 'M', u'ل'), + (0x1EE4C, 'X'), + (0x1EE4D, 'M', u'ن'), + (0x1EE4E, 'M', u'س'), + (0x1EE4F, 'M', u'ع'), + (0x1EE50, 'X'), + (0x1EE51, 'M', u'ص'), + (0x1EE52, 'M', u'ق'), + (0x1EE53, 'X'), + (0x1EE54, 'M', u'ش'), + (0x1EE55, 'X'), + (0x1EE57, 'M', u'خ'), + (0x1EE58, 'X'), + (0x1EE59, 'M', u'ض'), + (0x1EE5A, 'X'), + (0x1EE5B, 'M', u'غ'), + (0x1EE5C, 'X'), + (0x1EE5D, 'M', u'ں'), + (0x1EE5E, 'X'), + (0x1EE5F, 'M', u'ٯ'), + (0x1EE60, 'X'), + (0x1EE61, 'M', u'ب'), + (0x1EE62, 'M', u'ج'), + (0x1EE63, 'X'), + (0x1EE64, 'M', u'ه'), + (0x1EE65, 'X'), + (0x1EE67, 'M', u'ح'), + (0x1EE68, 'M', u'ط'), + (0x1EE69, 'M', u'ي'), + (0x1EE6A, 'M', u'ك'), + (0x1EE6B, 'X'), + (0x1EE6C, 'M', u'م'), + (0x1EE6D, 'M', u'ن'), + (0x1EE6E, 'M', u'س'), + (0x1EE6F, 'M', u'ع'), + (0x1EE70, 'M', u'ف'), + (0x1EE71, 'M', u'ص'), + (0x1EE72, 'M', u'ق'), + (0x1EE73, 'X'), + (0x1EE74, 'M', u'ش'), + (0x1EE75, 'M', u'ت'), + (0x1EE76, 'M', u'ث'), + (0x1EE77, 'M', u'خ'), + (0x1EE78, 'X'), + (0x1EE79, 'M', u'ض'), + (0x1EE7A, 'M', u'ظ'), + (0x1EE7B, 'M', u'غ'), + (0x1EE7C, 'M', u'ٮ'), + (0x1EE7D, 'X'), + (0x1EE7E, 'M', u'ڡ'), + (0x1EE7F, 'X'), + (0x1EE80, 'M', u'ا'), + (0x1EE81, 'M', u'ب'), + (0x1EE82, 'M', u'ج'), + (0x1EE83, 'M', u'د'), + ] + +def _seg_70(): + return [ + (0x1EE84, 'M', u'ه'), + (0x1EE85, 'M', u'و'), + (0x1EE86, 'M', u'ز'), + (0x1EE87, 'M', u'ح'), + (0x1EE88, 'M', u'ط'), + (0x1EE89, 'M', u'ي'), + (0x1EE8A, 'X'), + (0x1EE8B, 'M', u'ل'), + (0x1EE8C, 'M', u'م'), + (0x1EE8D, 'M', u'ن'), + (0x1EE8E, 'M', u'س'), + (0x1EE8F, 'M', u'ع'), + (0x1EE90, 'M', u'ف'), + (0x1EE91, 'M', u'ص'), + (0x1EE92, 'M', u'ق'), + (0x1EE93, 'M', u'ر'), + (0x1EE94, 'M', u'ش'), + (0x1EE95, 'M', u'ت'), + (0x1EE96, 'M', u'ث'), + (0x1EE97, 'M', u'خ'), + (0x1EE98, 'M', u'ذ'), + (0x1EE99, 'M', u'ض'), + (0x1EE9A, 'M', u'ظ'), + (0x1EE9B, 'M', u'غ'), + (0x1EE9C, 'X'), + (0x1EEA1, 'M', u'ب'), + (0x1EEA2, 'M', u'ج'), + (0x1EEA3, 'M', u'د'), + (0x1EEA4, 'X'), + (0x1EEA5, 'M', u'و'), + (0x1EEA6, 'M', u'ز'), + (0x1EEA7, 'M', u'ح'), + (0x1EEA8, 'M', u'ط'), + (0x1EEA9, 'M', u'ي'), + (0x1EEAA, 'X'), + (0x1EEAB, 'M', u'ل'), + (0x1EEAC, 'M', u'م'), + (0x1EEAD, 'M', u'ن'), + (0x1EEAE, 'M', u'س'), + (0x1EEAF, 'M', u'ع'), + (0x1EEB0, 'M', u'ف'), + (0x1EEB1, 'M', u'ص'), + (0x1EEB2, 'M', u'ق'), + (0x1EEB3, 'M', u'ر'), + (0x1EEB4, 'M', u'ش'), + (0x1EEB5, 'M', u'ت'), + (0x1EEB6, 'M', u'ث'), + (0x1EEB7, 'M', u'خ'), + (0x1EEB8, 'M', u'ذ'), + (0x1EEB9, 'M', u'ض'), + (0x1EEBA, 'M', u'ظ'), + (0x1EEBB, 'M', u'غ'), + (0x1EEBC, 'X'), + (0x1EEF0, 'V'), + (0x1EEF2, 'X'), + (0x1F000, 'V'), + (0x1F02C, 'X'), + (0x1F030, 'V'), + (0x1F094, 'X'), + (0x1F0A0, 'V'), + (0x1F0AF, 'X'), + (0x1F0B1, 'V'), + (0x1F0C0, 'X'), + (0x1F0C1, 'V'), + (0x1F0D0, 'X'), + (0x1F0D1, 'V'), + (0x1F0F6, 'X'), + (0x1F101, '3', u'0,'), + (0x1F102, '3', u'1,'), + (0x1F103, '3', u'2,'), + (0x1F104, '3', u'3,'), + (0x1F105, '3', u'4,'), + (0x1F106, '3', u'5,'), + (0x1F107, '3', u'6,'), + (0x1F108, '3', u'7,'), + (0x1F109, '3', u'8,'), + (0x1F10A, '3', u'9,'), + (0x1F10B, 'V'), + (0x1F10D, 'X'), + (0x1F110, '3', u'(a)'), + (0x1F111, '3', u'(b)'), + (0x1F112, '3', u'(c)'), + (0x1F113, '3', u'(d)'), + (0x1F114, '3', u'(e)'), + (0x1F115, '3', u'(f)'), + (0x1F116, '3', u'(g)'), + (0x1F117, '3', u'(h)'), + (0x1F118, '3', u'(i)'), + (0x1F119, '3', u'(j)'), + (0x1F11A, '3', u'(k)'), + (0x1F11B, '3', u'(l)'), + (0x1F11C, '3', u'(m)'), + (0x1F11D, '3', u'(n)'), + (0x1F11E, '3', u'(o)'), + (0x1F11F, '3', u'(p)'), + (0x1F120, '3', u'(q)'), + (0x1F121, '3', u'(r)'), + (0x1F122, '3', u'(s)'), + (0x1F123, '3', u'(t)'), + (0x1F124, '3', u'(u)'), + ] + +def _seg_71(): + return [ + (0x1F125, '3', u'(v)'), + (0x1F126, '3', u'(w)'), + (0x1F127, '3', u'(x)'), + (0x1F128, '3', u'(y)'), + (0x1F129, '3', u'(z)'), + (0x1F12A, 'M', u'〔s〕'), + (0x1F12B, 'M', u'c'), + (0x1F12C, 'M', u'r'), + (0x1F12D, 'M', u'cd'), + (0x1F12E, 'M', u'wz'), + (0x1F12F, 'V'), + (0x1F130, 'M', u'a'), + (0x1F131, 'M', u'b'), + (0x1F132, 'M', u'c'), + (0x1F133, 'M', u'd'), + (0x1F134, 'M', u'e'), + (0x1F135, 'M', u'f'), + (0x1F136, 'M', u'g'), + (0x1F137, 'M', u'h'), + (0x1F138, 'M', u'i'), + (0x1F139, 'M', u'j'), + (0x1F13A, 'M', u'k'), + (0x1F13B, 'M', u'l'), + (0x1F13C, 'M', u'm'), + (0x1F13D, 'M', u'n'), + (0x1F13E, 'M', u'o'), + (0x1F13F, 'M', u'p'), + (0x1F140, 'M', u'q'), + (0x1F141, 'M', u'r'), + (0x1F142, 'M', u's'), + (0x1F143, 'M', u't'), + (0x1F144, 'M', u'u'), + (0x1F145, 'M', u'v'), + (0x1F146, 'M', u'w'), + (0x1F147, 'M', u'x'), + (0x1F148, 'M', u'y'), + (0x1F149, 'M', u'z'), + (0x1F14A, 'M', u'hv'), + (0x1F14B, 'M', u'mv'), + (0x1F14C, 'M', u'sd'), + (0x1F14D, 'M', u'ss'), + (0x1F14E, 'M', u'ppv'), + (0x1F14F, 'M', u'wc'), + (0x1F150, 'V'), + (0x1F16A, 'M', u'mc'), + (0x1F16B, 'M', u'md'), + (0x1F16C, 'X'), + (0x1F170, 'V'), + (0x1F190, 'M', u'dj'), + (0x1F191, 'V'), + (0x1F1AD, 'X'), + (0x1F1E6, 'V'), + (0x1F200, 'M', u'ほか'), + (0x1F201, 'M', u'ココ'), + (0x1F202, 'M', u'サ'), + (0x1F203, 'X'), + (0x1F210, 'M', u'手'), + (0x1F211, 'M', u'字'), + (0x1F212, 'M', u'双'), + (0x1F213, 'M', u'デ'), + (0x1F214, 'M', u'二'), + (0x1F215, 'M', u'多'), + (0x1F216, 'M', u'解'), + (0x1F217, 'M', u'天'), + (0x1F218, 'M', u'交'), + (0x1F219, 'M', u'映'), + (0x1F21A, 'M', u'無'), + (0x1F21B, 'M', u'料'), + (0x1F21C, 'M', u'前'), + (0x1F21D, 'M', u'後'), + (0x1F21E, 'M', u'再'), + (0x1F21F, 'M', u'新'), + (0x1F220, 'M', u'初'), + (0x1F221, 'M', u'終'), + (0x1F222, 'M', u'生'), + (0x1F223, 'M', u'販'), + (0x1F224, 'M', u'声'), + (0x1F225, 'M', u'吹'), + (0x1F226, 'M', u'演'), + (0x1F227, 'M', u'投'), + (0x1F228, 'M', u'捕'), + (0x1F229, 'M', u'一'), + (0x1F22A, 'M', u'三'), + (0x1F22B, 'M', u'遊'), + (0x1F22C, 'M', u'左'), + (0x1F22D, 'M', u'中'), + (0x1F22E, 'M', u'右'), + (0x1F22F, 'M', u'指'), + (0x1F230, 'M', u'走'), + (0x1F231, 'M', u'打'), + (0x1F232, 'M', u'禁'), + (0x1F233, 'M', u'空'), + (0x1F234, 'M', u'合'), + (0x1F235, 'M', u'満'), + (0x1F236, 'M', u'有'), + (0x1F237, 'M', u'月'), + (0x1F238, 'M', u'申'), + (0x1F239, 'M', u'割'), + (0x1F23A, 'M', u'営'), + (0x1F23B, 'M', u'配'), + ] + +def _seg_72(): + return [ + (0x1F23C, 'X'), + (0x1F240, 'M', u'〔本〕'), + (0x1F241, 'M', u'〔三〕'), + (0x1F242, 'M', u'〔二〕'), + (0x1F243, 'M', u'〔安〕'), + (0x1F244, 'M', u'〔点〕'), + (0x1F245, 'M', u'〔打〕'), + (0x1F246, 'M', u'〔盗〕'), + (0x1F247, 'M', u'〔勝〕'), + (0x1F248, 'M', u'〔敗〕'), + (0x1F249, 'X'), + (0x1F250, 'M', u'得'), + (0x1F251, 'M', u'可'), + (0x1F252, 'X'), + (0x1F260, 'V'), + (0x1F266, 'X'), + (0x1F300, 'V'), + (0x1F6D5, 'X'), + (0x1F6E0, 'V'), + (0x1F6ED, 'X'), + (0x1F6F0, 'V'), + (0x1F6FA, 'X'), + (0x1F700, 'V'), + (0x1F774, 'X'), + (0x1F780, 'V'), + (0x1F7D9, 'X'), + (0x1F800, 'V'), + (0x1F80C, 'X'), + (0x1F810, 'V'), + (0x1F848, 'X'), + (0x1F850, 'V'), + (0x1F85A, 'X'), + (0x1F860, 'V'), + (0x1F888, 'X'), + (0x1F890, 'V'), + (0x1F8AE, 'X'), + (0x1F900, 'V'), + (0x1F90C, 'X'), + (0x1F910, 'V'), + (0x1F93F, 'X'), + (0x1F940, 'V'), + (0x1F971, 'X'), + (0x1F973, 'V'), + (0x1F977, 'X'), + (0x1F97A, 'V'), + (0x1F97B, 'X'), + (0x1F97C, 'V'), + (0x1F9A3, 'X'), + (0x1F9B0, 'V'), + (0x1F9BA, 'X'), + (0x1F9C0, 'V'), + (0x1F9C3, 'X'), + (0x1F9D0, 'V'), + (0x1FA00, 'X'), + (0x1FA60, 'V'), + (0x1FA6E, 'X'), + (0x20000, 'V'), + (0x2A6D7, 'X'), + (0x2A700, 'V'), + (0x2B735, 'X'), + (0x2B740, 'V'), + (0x2B81E, 'X'), + (0x2B820, 'V'), + (0x2CEA2, 'X'), + (0x2CEB0, 'V'), + (0x2EBE1, 'X'), + (0x2F800, 'M', u'丽'), + (0x2F801, 'M', u'丸'), + (0x2F802, 'M', u'乁'), + (0x2F803, 'M', u'𠄢'), + (0x2F804, 'M', u'你'), + (0x2F805, 'M', u'侮'), + (0x2F806, 'M', u'侻'), + (0x2F807, 'M', u'倂'), + (0x2F808, 'M', u'偺'), + (0x2F809, 'M', u'備'), + (0x2F80A, 'M', u'僧'), + (0x2F80B, 'M', u'像'), + (0x2F80C, 'M', u'㒞'), + (0x2F80D, 'M', u'𠘺'), + (0x2F80E, 'M', u'免'), + (0x2F80F, 'M', u'兔'), + (0x2F810, 'M', u'兤'), + (0x2F811, 'M', u'具'), + (0x2F812, 'M', u'𠔜'), + (0x2F813, 'M', u'㒹'), + (0x2F814, 'M', u'內'), + (0x2F815, 'M', u'再'), + (0x2F816, 'M', u'𠕋'), + (0x2F817, 'M', u'冗'), + (0x2F818, 'M', u'冤'), + (0x2F819, 'M', u'仌'), + (0x2F81A, 'M', u'冬'), + (0x2F81B, 'M', u'况'), + (0x2F81C, 'M', u'𩇟'), + (0x2F81D, 'M', u'凵'), + (0x2F81E, 'M', u'刃'), + (0x2F81F, 'M', u'㓟'), + (0x2F820, 'M', u'刻'), + (0x2F821, 'M', u'剆'), + ] + +def _seg_73(): + return [ + (0x2F822, 'M', u'割'), + (0x2F823, 'M', u'剷'), + (0x2F824, 'M', u'㔕'), + (0x2F825, 'M', u'勇'), + (0x2F826, 'M', u'勉'), + (0x2F827, 'M', u'勤'), + (0x2F828, 'M', u'勺'), + (0x2F829, 'M', u'包'), + (0x2F82A, 'M', u'匆'), + (0x2F82B, 'M', u'北'), + (0x2F82C, 'M', u'卉'), + (0x2F82D, 'M', u'卑'), + (0x2F82E, 'M', u'博'), + (0x2F82F, 'M', u'即'), + (0x2F830, 'M', u'卽'), + (0x2F831, 'M', u'卿'), + (0x2F834, 'M', u'𠨬'), + (0x2F835, 'M', u'灰'), + (0x2F836, 'M', u'及'), + (0x2F837, 'M', u'叟'), + (0x2F838, 'M', u'𠭣'), + (0x2F839, 'M', u'叫'), + (0x2F83A, 'M', u'叱'), + (0x2F83B, 'M', u'吆'), + (0x2F83C, 'M', u'咞'), + (0x2F83D, 'M', u'吸'), + (0x2F83E, 'M', u'呈'), + (0x2F83F, 'M', u'周'), + (0x2F840, 'M', u'咢'), + (0x2F841, 'M', u'哶'), + (0x2F842, 'M', u'唐'), + (0x2F843, 'M', u'啓'), + (0x2F844, 'M', u'啣'), + (0x2F845, 'M', u'善'), + (0x2F847, 'M', u'喙'), + (0x2F848, 'M', u'喫'), + (0x2F849, 'M', u'喳'), + (0x2F84A, 'M', u'嗂'), + (0x2F84B, 'M', u'圖'), + (0x2F84C, 'M', u'嘆'), + (0x2F84D, 'M', u'圗'), + (0x2F84E, 'M', u'噑'), + (0x2F84F, 'M', u'噴'), + (0x2F850, 'M', u'切'), + (0x2F851, 'M', u'壮'), + (0x2F852, 'M', u'城'), + (0x2F853, 'M', u'埴'), + (0x2F854, 'M', u'堍'), + (0x2F855, 'M', u'型'), + (0x2F856, 'M', u'堲'), + (0x2F857, 'M', u'報'), + (0x2F858, 'M', u'墬'), + (0x2F859, 'M', u'𡓤'), + (0x2F85A, 'M', u'売'), + (0x2F85B, 'M', u'壷'), + (0x2F85C, 'M', u'夆'), + (0x2F85D, 'M', u'多'), + (0x2F85E, 'M', u'夢'), + (0x2F85F, 'M', u'奢'), + (0x2F860, 'M', u'𡚨'), + (0x2F861, 'M', u'𡛪'), + (0x2F862, 'M', u'姬'), + (0x2F863, 'M', u'娛'), + (0x2F864, 'M', u'娧'), + (0x2F865, 'M', u'姘'), + (0x2F866, 'M', u'婦'), + (0x2F867, 'M', u'㛮'), + (0x2F868, 'X'), + (0x2F869, 'M', u'嬈'), + (0x2F86A, 'M', u'嬾'), + (0x2F86C, 'M', u'𡧈'), + (0x2F86D, 'M', u'寃'), + (0x2F86E, 'M', u'寘'), + (0x2F86F, 'M', u'寧'), + (0x2F870, 'M', u'寳'), + (0x2F871, 'M', u'𡬘'), + (0x2F872, 'M', u'寿'), + (0x2F873, 'M', u'将'), + (0x2F874, 'X'), + (0x2F875, 'M', u'尢'), + (0x2F876, 'M', u'㞁'), + (0x2F877, 'M', u'屠'), + (0x2F878, 'M', u'屮'), + (0x2F879, 'M', u'峀'), + (0x2F87A, 'M', u'岍'), + (0x2F87B, 'M', u'𡷤'), + (0x2F87C, 'M', u'嵃'), + (0x2F87D, 'M', u'𡷦'), + (0x2F87E, 'M', u'嵮'), + (0x2F87F, 'M', u'嵫'), + (0x2F880, 'M', u'嵼'), + (0x2F881, 'M', u'巡'), + (0x2F882, 'M', u'巢'), + (0x2F883, 'M', u'㠯'), + (0x2F884, 'M', u'巽'), + (0x2F885, 'M', u'帨'), + (0x2F886, 'M', u'帽'), + (0x2F887, 'M', u'幩'), + (0x2F888, 'M', u'㡢'), + (0x2F889, 'M', u'𢆃'), + ] + +def _seg_74(): + return [ + (0x2F88A, 'M', u'㡼'), + (0x2F88B, 'M', u'庰'), + (0x2F88C, 'M', u'庳'), + (0x2F88D, 'M', u'庶'), + (0x2F88E, 'M', u'廊'), + (0x2F88F, 'M', u'𪎒'), + (0x2F890, 'M', u'廾'), + (0x2F891, 'M', u'𢌱'), + (0x2F893, 'M', u'舁'), + (0x2F894, 'M', u'弢'), + (0x2F896, 'M', u'㣇'), + (0x2F897, 'M', u'𣊸'), + (0x2F898, 'M', u'𦇚'), + (0x2F899, 'M', u'形'), + (0x2F89A, 'M', u'彫'), + (0x2F89B, 'M', u'㣣'), + (0x2F89C, 'M', u'徚'), + (0x2F89D, 'M', u'忍'), + (0x2F89E, 'M', u'志'), + (0x2F89F, 'M', u'忹'), + (0x2F8A0, 'M', u'悁'), + (0x2F8A1, 'M', u'㤺'), + (0x2F8A2, 'M', u'㤜'), + (0x2F8A3, 'M', u'悔'), + (0x2F8A4, 'M', u'𢛔'), + (0x2F8A5, 'M', u'惇'), + (0x2F8A6, 'M', u'慈'), + (0x2F8A7, 'M', u'慌'), + (0x2F8A8, 'M', u'慎'), + (0x2F8A9, 'M', u'慌'), + (0x2F8AA, 'M', u'慺'), + (0x2F8AB, 'M', u'憎'), + (0x2F8AC, 'M', u'憲'), + (0x2F8AD, 'M', u'憤'), + (0x2F8AE, 'M', u'憯'), + (0x2F8AF, 'M', u'懞'), + (0x2F8B0, 'M', u'懲'), + (0x2F8B1, 'M', u'懶'), + (0x2F8B2, 'M', u'成'), + (0x2F8B3, 'M', u'戛'), + (0x2F8B4, 'M', u'扝'), + (0x2F8B5, 'M', u'抱'), + (0x2F8B6, 'M', u'拔'), + (0x2F8B7, 'M', u'捐'), + (0x2F8B8, 'M', u'𢬌'), + (0x2F8B9, 'M', u'挽'), + (0x2F8BA, 'M', u'拼'), + (0x2F8BB, 'M', u'捨'), + (0x2F8BC, 'M', u'掃'), + (0x2F8BD, 'M', u'揤'), + (0x2F8BE, 'M', u'𢯱'), + (0x2F8BF, 'M', u'搢'), + (0x2F8C0, 'M', u'揅'), + (0x2F8C1, 'M', u'掩'), + (0x2F8C2, 'M', u'㨮'), + (0x2F8C3, 'M', u'摩'), + (0x2F8C4, 'M', u'摾'), + (0x2F8C5, 'M', u'撝'), + (0x2F8C6, 'M', u'摷'), + (0x2F8C7, 'M', u'㩬'), + (0x2F8C8, 'M', u'敏'), + (0x2F8C9, 'M', u'敬'), + (0x2F8CA, 'M', u'𣀊'), + (0x2F8CB, 'M', u'旣'), + (0x2F8CC, 'M', u'書'), + (0x2F8CD, 'M', u'晉'), + (0x2F8CE, 'M', u'㬙'), + (0x2F8CF, 'M', u'暑'), + (0x2F8D0, 'M', u'㬈'), + (0x2F8D1, 'M', u'㫤'), + (0x2F8D2, 'M', u'冒'), + (0x2F8D3, 'M', u'冕'), + (0x2F8D4, 'M', u'最'), + (0x2F8D5, 'M', u'暜'), + (0x2F8D6, 'M', u'肭'), + (0x2F8D7, 'M', u'䏙'), + (0x2F8D8, 'M', u'朗'), + (0x2F8D9, 'M', u'望'), + (0x2F8DA, 'M', u'朡'), + (0x2F8DB, 'M', u'杞'), + (0x2F8DC, 'M', u'杓'), + (0x2F8DD, 'M', u'𣏃'), + (0x2F8DE, 'M', u'㭉'), + (0x2F8DF, 'M', u'柺'), + (0x2F8E0, 'M', u'枅'), + (0x2F8E1, 'M', u'桒'), + (0x2F8E2, 'M', u'梅'), + (0x2F8E3, 'M', u'𣑭'), + (0x2F8E4, 'M', u'梎'), + (0x2F8E5, 'M', u'栟'), + (0x2F8E6, 'M', u'椔'), + (0x2F8E7, 'M', u'㮝'), + (0x2F8E8, 'M', u'楂'), + (0x2F8E9, 'M', u'榣'), + (0x2F8EA, 'M', u'槪'), + (0x2F8EB, 'M', u'檨'), + (0x2F8EC, 'M', u'𣚣'), + (0x2F8ED, 'M', u'櫛'), + (0x2F8EE, 'M', u'㰘'), + (0x2F8EF, 'M', u'次'), + ] + +def _seg_75(): + return [ + (0x2F8F0, 'M', u'𣢧'), + (0x2F8F1, 'M', u'歔'), + (0x2F8F2, 'M', u'㱎'), + (0x2F8F3, 'M', u'歲'), + (0x2F8F4, 'M', u'殟'), + (0x2F8F5, 'M', u'殺'), + (0x2F8F6, 'M', u'殻'), + (0x2F8F7, 'M', u'𣪍'), + (0x2F8F8, 'M', u'𡴋'), + (0x2F8F9, 'M', u'𣫺'), + (0x2F8FA, 'M', u'汎'), + (0x2F8FB, 'M', u'𣲼'), + (0x2F8FC, 'M', u'沿'), + (0x2F8FD, 'M', u'泍'), + (0x2F8FE, 'M', u'汧'), + (0x2F8FF, 'M', u'洖'), + (0x2F900, 'M', u'派'), + (0x2F901, 'M', u'海'), + (0x2F902, 'M', u'流'), + (0x2F903, 'M', u'浩'), + (0x2F904, 'M', u'浸'), + (0x2F905, 'M', u'涅'), + (0x2F906, 'M', u'𣴞'), + (0x2F907, 'M', u'洴'), + (0x2F908, 'M', u'港'), + (0x2F909, 'M', u'湮'), + (0x2F90A, 'M', u'㴳'), + (0x2F90B, 'M', u'滋'), + (0x2F90C, 'M', u'滇'), + (0x2F90D, 'M', u'𣻑'), + (0x2F90E, 'M', u'淹'), + (0x2F90F, 'M', u'潮'), + (0x2F910, 'M', u'𣽞'), + (0x2F911, 'M', u'𣾎'), + (0x2F912, 'M', u'濆'), + (0x2F913, 'M', u'瀹'), + (0x2F914, 'M', u'瀞'), + (0x2F915, 'M', u'瀛'), + (0x2F916, 'M', u'㶖'), + (0x2F917, 'M', u'灊'), + (0x2F918, 'M', u'災'), + (0x2F919, 'M', u'灷'), + (0x2F91A, 'M', u'炭'), + (0x2F91B, 'M', u'𠔥'), + (0x2F91C, 'M', u'煅'), + (0x2F91D, 'M', u'𤉣'), + (0x2F91E, 'M', u'熜'), + (0x2F91F, 'X'), + (0x2F920, 'M', u'爨'), + (0x2F921, 'M', u'爵'), + (0x2F922, 'M', u'牐'), + (0x2F923, 'M', u'𤘈'), + (0x2F924, 'M', u'犀'), + (0x2F925, 'M', u'犕'), + (0x2F926, 'M', u'𤜵'), + (0x2F927, 'M', u'𤠔'), + (0x2F928, 'M', u'獺'), + (0x2F929, 'M', u'王'), + (0x2F92A, 'M', u'㺬'), + (0x2F92B, 'M', u'玥'), + (0x2F92C, 'M', u'㺸'), + (0x2F92E, 'M', u'瑇'), + (0x2F92F, 'M', u'瑜'), + (0x2F930, 'M', u'瑱'), + (0x2F931, 'M', u'璅'), + (0x2F932, 'M', u'瓊'), + (0x2F933, 'M', u'㼛'), + (0x2F934, 'M', u'甤'), + (0x2F935, 'M', u'𤰶'), + (0x2F936, 'M', u'甾'), + (0x2F937, 'M', u'𤲒'), + (0x2F938, 'M', u'異'), + (0x2F939, 'M', u'𢆟'), + (0x2F93A, 'M', u'瘐'), + (0x2F93B, 'M', u'𤾡'), + (0x2F93C, 'M', u'𤾸'), + (0x2F93D, 'M', u'𥁄'), + (0x2F93E, 'M', u'㿼'), + (0x2F93F, 'M', u'䀈'), + (0x2F940, 'M', u'直'), + (0x2F941, 'M', u'𥃳'), + (0x2F942, 'M', u'𥃲'), + (0x2F943, 'M', u'𥄙'), + (0x2F944, 'M', u'𥄳'), + (0x2F945, 'M', u'眞'), + (0x2F946, 'M', u'真'), + (0x2F948, 'M', u'睊'), + (0x2F949, 'M', u'䀹'), + (0x2F94A, 'M', u'瞋'), + (0x2F94B, 'M', u'䁆'), + (0x2F94C, 'M', u'䂖'), + (0x2F94D, 'M', u'𥐝'), + (0x2F94E, 'M', u'硎'), + (0x2F94F, 'M', u'碌'), + (0x2F950, 'M', u'磌'), + (0x2F951, 'M', u'䃣'), + (0x2F952, 'M', u'𥘦'), + (0x2F953, 'M', u'祖'), + (0x2F954, 'M', u'𥚚'), + (0x2F955, 'M', u'𥛅'), + ] + +def _seg_76(): + return [ + (0x2F956, 'M', u'福'), + (0x2F957, 'M', u'秫'), + (0x2F958, 'M', u'䄯'), + (0x2F959, 'M', u'穀'), + (0x2F95A, 'M', u'穊'), + (0x2F95B, 'M', u'穏'), + (0x2F95C, 'M', u'𥥼'), + (0x2F95D, 'M', u'𥪧'), + (0x2F95F, 'X'), + (0x2F960, 'M', u'䈂'), + (0x2F961, 'M', u'𥮫'), + (0x2F962, 'M', u'篆'), + (0x2F963, 'M', u'築'), + (0x2F964, 'M', u'䈧'), + (0x2F965, 'M', u'𥲀'), + (0x2F966, 'M', u'糒'), + (0x2F967, 'M', u'䊠'), + (0x2F968, 'M', u'糨'), + (0x2F969, 'M', u'糣'), + (0x2F96A, 'M', u'紀'), + (0x2F96B, 'M', u'𥾆'), + (0x2F96C, 'M', u'絣'), + (0x2F96D, 'M', u'䌁'), + (0x2F96E, 'M', u'緇'), + (0x2F96F, 'M', u'縂'), + (0x2F970, 'M', u'繅'), + (0x2F971, 'M', u'䌴'), + (0x2F972, 'M', u'𦈨'), + (0x2F973, 'M', u'𦉇'), + (0x2F974, 'M', u'䍙'), + (0x2F975, 'M', u'𦋙'), + (0x2F976, 'M', u'罺'), + (0x2F977, 'M', u'𦌾'), + (0x2F978, 'M', u'羕'), + (0x2F979, 'M', u'翺'), + (0x2F97A, 'M', u'者'), + (0x2F97B, 'M', u'𦓚'), + (0x2F97C, 'M', u'𦔣'), + (0x2F97D, 'M', u'聠'), + (0x2F97E, 'M', u'𦖨'), + (0x2F97F, 'M', u'聰'), + (0x2F980, 'M', u'𣍟'), + (0x2F981, 'M', u'䏕'), + (0x2F982, 'M', u'育'), + (0x2F983, 'M', u'脃'), + (0x2F984, 'M', u'䐋'), + (0x2F985, 'M', u'脾'), + (0x2F986, 'M', u'媵'), + (0x2F987, 'M', u'𦞧'), + (0x2F988, 'M', u'𦞵'), + (0x2F989, 'M', u'𣎓'), + (0x2F98A, 'M', u'𣎜'), + (0x2F98B, 'M', u'舁'), + (0x2F98C, 'M', u'舄'), + (0x2F98D, 'M', u'辞'), + (0x2F98E, 'M', u'䑫'), + (0x2F98F, 'M', u'芑'), + (0x2F990, 'M', u'芋'), + (0x2F991, 'M', u'芝'), + (0x2F992, 'M', u'劳'), + (0x2F993, 'M', u'花'), + (0x2F994, 'M', u'芳'), + (0x2F995, 'M', u'芽'), + (0x2F996, 'M', u'苦'), + (0x2F997, 'M', u'𦬼'), + (0x2F998, 'M', u'若'), + (0x2F999, 'M', u'茝'), + (0x2F99A, 'M', u'荣'), + (0x2F99B, 'M', u'莭'), + (0x2F99C, 'M', u'茣'), + (0x2F99D, 'M', u'莽'), + (0x2F99E, 'M', u'菧'), + (0x2F99F, 'M', u'著'), + (0x2F9A0, 'M', u'荓'), + (0x2F9A1, 'M', u'菊'), + (0x2F9A2, 'M', u'菌'), + (0x2F9A3, 'M', u'菜'), + (0x2F9A4, 'M', u'𦰶'), + (0x2F9A5, 'M', u'𦵫'), + (0x2F9A6, 'M', u'𦳕'), + (0x2F9A7, 'M', u'䔫'), + (0x2F9A8, 'M', u'蓱'), + (0x2F9A9, 'M', u'蓳'), + (0x2F9AA, 'M', u'蔖'), + (0x2F9AB, 'M', u'𧏊'), + (0x2F9AC, 'M', u'蕤'), + (0x2F9AD, 'M', u'𦼬'), + (0x2F9AE, 'M', u'䕝'), + (0x2F9AF, 'M', u'䕡'), + (0x2F9B0, 'M', u'𦾱'), + (0x2F9B1, 'M', u'𧃒'), + (0x2F9B2, 'M', u'䕫'), + (0x2F9B3, 'M', u'虐'), + (0x2F9B4, 'M', u'虜'), + (0x2F9B5, 'M', u'虧'), + (0x2F9B6, 'M', u'虩'), + (0x2F9B7, 'M', u'蚩'), + (0x2F9B8, 'M', u'蚈'), + (0x2F9B9, 'M', u'蜎'), + (0x2F9BA, 'M', u'蛢'), + ] + +def _seg_77(): + return [ + (0x2F9BB, 'M', u'蝹'), + (0x2F9BC, 'M', u'蜨'), + (0x2F9BD, 'M', u'蝫'), + (0x2F9BE, 'M', u'螆'), + (0x2F9BF, 'X'), + (0x2F9C0, 'M', u'蟡'), + (0x2F9C1, 'M', u'蠁'), + (0x2F9C2, 'M', u'䗹'), + (0x2F9C3, 'M', u'衠'), + (0x2F9C4, 'M', u'衣'), + (0x2F9C5, 'M', u'𧙧'), + (0x2F9C6, 'M', u'裗'), + (0x2F9C7, 'M', u'裞'), + (0x2F9C8, 'M', u'䘵'), + (0x2F9C9, 'M', u'裺'), + (0x2F9CA, 'M', u'㒻'), + (0x2F9CB, 'M', u'𧢮'), + (0x2F9CC, 'M', u'𧥦'), + (0x2F9CD, 'M', u'䚾'), + (0x2F9CE, 'M', u'䛇'), + (0x2F9CF, 'M', u'誠'), + (0x2F9D0, 'M', u'諭'), + (0x2F9D1, 'M', u'變'), + (0x2F9D2, 'M', u'豕'), + (0x2F9D3, 'M', u'𧲨'), + (0x2F9D4, 'M', u'貫'), + (0x2F9D5, 'M', u'賁'), + (0x2F9D6, 'M', u'贛'), + (0x2F9D7, 'M', u'起'), + (0x2F9D8, 'M', u'𧼯'), + (0x2F9D9, 'M', u'𠠄'), + (0x2F9DA, 'M', u'跋'), + (0x2F9DB, 'M', u'趼'), + (0x2F9DC, 'M', u'跰'), + (0x2F9DD, 'M', u'𠣞'), + (0x2F9DE, 'M', u'軔'), + (0x2F9DF, 'M', u'輸'), + (0x2F9E0, 'M', u'𨗒'), + (0x2F9E1, 'M', u'𨗭'), + (0x2F9E2, 'M', u'邔'), + (0x2F9E3, 'M', u'郱'), + (0x2F9E4, 'M', u'鄑'), + (0x2F9E5, 'M', u'𨜮'), + (0x2F9E6, 'M', u'鄛'), + (0x2F9E7, 'M', u'鈸'), + (0x2F9E8, 'M', u'鋗'), + (0x2F9E9, 'M', u'鋘'), + (0x2F9EA, 'M', u'鉼'), + (0x2F9EB, 'M', u'鏹'), + (0x2F9EC, 'M', u'鐕'), + (0x2F9ED, 'M', u'𨯺'), + (0x2F9EE, 'M', u'開'), + (0x2F9EF, 'M', u'䦕'), + (0x2F9F0, 'M', u'閷'), + (0x2F9F1, 'M', u'𨵷'), + (0x2F9F2, 'M', u'䧦'), + (0x2F9F3, 'M', u'雃'), + (0x2F9F4, 'M', u'嶲'), + (0x2F9F5, 'M', u'霣'), + (0x2F9F6, 'M', u'𩅅'), + (0x2F9F7, 'M', u'𩈚'), + (0x2F9F8, 'M', u'䩮'), + (0x2F9F9, 'M', u'䩶'), + (0x2F9FA, 'M', u'韠'), + (0x2F9FB, 'M', u'𩐊'), + (0x2F9FC, 'M', u'䪲'), + (0x2F9FD, 'M', u'𩒖'), + (0x2F9FE, 'M', u'頋'), + (0x2FA00, 'M', u'頩'), + (0x2FA01, 'M', u'𩖶'), + (0x2FA02, 'M', u'飢'), + (0x2FA03, 'M', u'䬳'), + (0x2FA04, 'M', u'餩'), + (0x2FA05, 'M', u'馧'), + (0x2FA06, 'M', u'駂'), + (0x2FA07, 'M', u'駾'), + (0x2FA08, 'M', u'䯎'), + (0x2FA09, 'M', u'𩬰'), + (0x2FA0A, 'M', u'鬒'), + (0x2FA0B, 'M', u'鱀'), + (0x2FA0C, 'M', u'鳽'), + (0x2FA0D, 'M', u'䳎'), + (0x2FA0E, 'M', u'䳭'), + (0x2FA0F, 'M', u'鵧'), + (0x2FA10, 'M', u'𪃎'), + (0x2FA11, 'M', u'䳸'), + (0x2FA12, 'M', u'𪄅'), + (0x2FA13, 'M', u'𪈎'), + (0x2FA14, 'M', u'𪊑'), + (0x2FA15, 'M', u'麻'), + (0x2FA16, 'M', u'䵖'), + (0x2FA17, 'M', u'黹'), + (0x2FA18, 'M', u'黾'), + (0x2FA19, 'M', u'鼅'), + (0x2FA1A, 'M', u'鼏'), + (0x2FA1B, 'M', u'鼖'), + (0x2FA1C, 'M', u'鼻'), + (0x2FA1D, 'M', u'𪘀'), + (0x2FA1E, 'X'), + (0xE0100, 'I'), + ] + +def _seg_78(): + return [ + (0xE01F0, 'X'), + ] + +uts46data = tuple( + _seg_0() + + _seg_1() + + _seg_2() + + _seg_3() + + _seg_4() + + _seg_5() + + _seg_6() + + _seg_7() + + _seg_8() + + _seg_9() + + _seg_10() + + _seg_11() + + _seg_12() + + _seg_13() + + _seg_14() + + _seg_15() + + _seg_16() + + _seg_17() + + _seg_18() + + _seg_19() + + _seg_20() + + _seg_21() + + _seg_22() + + _seg_23() + + _seg_24() + + _seg_25() + + _seg_26() + + _seg_27() + + _seg_28() + + _seg_29() + + _seg_30() + + _seg_31() + + _seg_32() + + _seg_33() + + _seg_34() + + _seg_35() + + _seg_36() + + _seg_37() + + _seg_38() + + _seg_39() + + _seg_40() + + _seg_41() + + _seg_42() + + _seg_43() + + _seg_44() + + _seg_45() + + _seg_46() + + _seg_47() + + _seg_48() + + _seg_49() + + _seg_50() + + _seg_51() + + _seg_52() + + _seg_53() + + _seg_54() + + _seg_55() + + _seg_56() + + _seg_57() + + _seg_58() + + _seg_59() + + _seg_60() + + _seg_61() + + _seg_62() + + _seg_63() + + _seg_64() + + _seg_65() + + _seg_66() + + _seg_67() + + _seg_68() + + _seg_69() + + _seg_70() + + _seg_71() + + _seg_72() + + _seg_73() + + _seg_74() + + _seg_75() + + _seg_76() + + _seg_77() + + _seg_78() +) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/uts46data.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/uts46data.pyc new file mode 100644 index 0000000..0f4a737 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/idna/uts46data.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/ipaddress.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/ipaddress.py new file mode 100644 index 0000000..f2d0766 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/ipaddress.py @@ -0,0 +1,2419 @@ +# Copyright 2007 Google Inc. +# Licensed to PSF under a Contributor Agreement. + +"""A fast, lightweight IPv4/IPv6 manipulation library in Python. + +This library is used to create/poke/manipulate IPv4 and IPv6 addresses +and networks. + +""" + +from __future__ import unicode_literals + + +import itertools +import struct + +__version__ = '1.0.22' + +# Compatibility functions +_compat_int_types = (int,) +try: + _compat_int_types = (int, long) +except NameError: + pass +try: + _compat_str = unicode +except NameError: + _compat_str = str + assert bytes != str +if b'\0'[0] == 0: # Python 3 semantics + def _compat_bytes_to_byte_vals(byt): + return byt +else: + def _compat_bytes_to_byte_vals(byt): + return [struct.unpack(b'!B', b)[0] for b in byt] +try: + _compat_int_from_byte_vals = int.from_bytes +except AttributeError: + def _compat_int_from_byte_vals(bytvals, endianess): + assert endianess == 'big' + res = 0 + for bv in bytvals: + assert isinstance(bv, _compat_int_types) + res = (res << 8) + bv + return res + + +def _compat_to_bytes(intval, length, endianess): + assert isinstance(intval, _compat_int_types) + assert endianess == 'big' + if length == 4: + if intval < 0 or intval >= 2 ** 32: + raise struct.error("integer out of range for 'I' format code") + return struct.pack(b'!I', intval) + elif length == 16: + if intval < 0 or intval >= 2 ** 128: + raise struct.error("integer out of range for 'QQ' format code") + return struct.pack(b'!QQ', intval >> 64, intval & 0xffffffffffffffff) + else: + raise NotImplementedError() + + +if hasattr(int, 'bit_length'): + # Not int.bit_length , since that won't work in 2.7 where long exists + def _compat_bit_length(i): + return i.bit_length() +else: + def _compat_bit_length(i): + for res in itertools.count(): + if i >> res == 0: + return res + + +def _compat_range(start, end, step=1): + assert step > 0 + i = start + while i < end: + yield i + i += step + + +class _TotalOrderingMixin(object): + __slots__ = () + + # Helper that derives the other comparison operations from + # __lt__ and __eq__ + # We avoid functools.total_ordering because it doesn't handle + # NotImplemented correctly yet (http://bugs.python.org/issue10042) + def __eq__(self, other): + raise NotImplementedError + + def __ne__(self, other): + equal = self.__eq__(other) + if equal is NotImplemented: + return NotImplemented + return not equal + + def __lt__(self, other): + raise NotImplementedError + + def __le__(self, other): + less = self.__lt__(other) + if less is NotImplemented or not less: + return self.__eq__(other) + return less + + def __gt__(self, other): + less = self.__lt__(other) + if less is NotImplemented: + return NotImplemented + equal = self.__eq__(other) + if equal is NotImplemented: + return NotImplemented + return not (less or equal) + + def __ge__(self, other): + less = self.__lt__(other) + if less is NotImplemented: + return NotImplemented + return not less + + +IPV4LENGTH = 32 +IPV6LENGTH = 128 + + +class AddressValueError(ValueError): + """A Value Error related to the address.""" + + +class NetmaskValueError(ValueError): + """A Value Error related to the netmask.""" + + +def ip_address(address): + """Take an IP string/int and return an object of the correct type. + + Args: + address: A string or integer, the IP address. Either IPv4 or + IPv6 addresses may be supplied; integers less than 2**32 will + be considered to be IPv4 by default. + + Returns: + An IPv4Address or IPv6Address object. + + Raises: + ValueError: if the *address* passed isn't either a v4 or a v6 + address + + """ + try: + return IPv4Address(address) + except (AddressValueError, NetmaskValueError): + pass + + try: + return IPv6Address(address) + except (AddressValueError, NetmaskValueError): + pass + + if isinstance(address, bytes): + raise AddressValueError( + '%r does not appear to be an IPv4 or IPv6 address. ' + 'Did you pass in a bytes (str in Python 2) instead of' + ' a unicode object?' % address) + + raise ValueError('%r does not appear to be an IPv4 or IPv6 address' % + address) + + +def ip_network(address, strict=True): + """Take an IP string/int and return an object of the correct type. + + Args: + address: A string or integer, the IP network. Either IPv4 or + IPv6 networks may be supplied; integers less than 2**32 will + be considered to be IPv4 by default. + + Returns: + An IPv4Network or IPv6Network object. + + Raises: + ValueError: if the string passed isn't either a v4 or a v6 + address. Or if the network has host bits set. + + """ + try: + return IPv4Network(address, strict) + except (AddressValueError, NetmaskValueError): + pass + + try: + return IPv6Network(address, strict) + except (AddressValueError, NetmaskValueError): + pass + + if isinstance(address, bytes): + raise AddressValueError( + '%r does not appear to be an IPv4 or IPv6 network. ' + 'Did you pass in a bytes (str in Python 2) instead of' + ' a unicode object?' % address) + + raise ValueError('%r does not appear to be an IPv4 or IPv6 network' % + address) + + +def ip_interface(address): + """Take an IP string/int and return an object of the correct type. + + Args: + address: A string or integer, the IP address. Either IPv4 or + IPv6 addresses may be supplied; integers less than 2**32 will + be considered to be IPv4 by default. + + Returns: + An IPv4Interface or IPv6Interface object. + + Raises: + ValueError: if the string passed isn't either a v4 or a v6 + address. + + Notes: + The IPv?Interface classes describe an Address on a particular + Network, so they're basically a combination of both the Address + and Network classes. + + """ + try: + return IPv4Interface(address) + except (AddressValueError, NetmaskValueError): + pass + + try: + return IPv6Interface(address) + except (AddressValueError, NetmaskValueError): + pass + + raise ValueError('%r does not appear to be an IPv4 or IPv6 interface' % + address) + + +def v4_int_to_packed(address): + """Represent an address as 4 packed bytes in network (big-endian) order. + + Args: + address: An integer representation of an IPv4 IP address. + + Returns: + The integer address packed as 4 bytes in network (big-endian) order. + + Raises: + ValueError: If the integer is negative or too large to be an + IPv4 IP address. + + """ + try: + return _compat_to_bytes(address, 4, 'big') + except (struct.error, OverflowError): + raise ValueError("Address negative or too large for IPv4") + + +def v6_int_to_packed(address): + """Represent an address as 16 packed bytes in network (big-endian) order. + + Args: + address: An integer representation of an IPv6 IP address. + + Returns: + The integer address packed as 16 bytes in network (big-endian) order. + + """ + try: + return _compat_to_bytes(address, 16, 'big') + except (struct.error, OverflowError): + raise ValueError("Address negative or too large for IPv6") + + +def _split_optional_netmask(address): + """Helper to split the netmask and raise AddressValueError if needed""" + addr = _compat_str(address).split('/') + if len(addr) > 2: + raise AddressValueError("Only one '/' permitted in %r" % address) + return addr + + +def _find_address_range(addresses): + """Find a sequence of sorted deduplicated IPv#Address. + + Args: + addresses: a list of IPv#Address objects. + + Yields: + A tuple containing the first and last IP addresses in the sequence. + + """ + it = iter(addresses) + first = last = next(it) + for ip in it: + if ip._ip != last._ip + 1: + yield first, last + first = ip + last = ip + yield first, last + + +def _count_righthand_zero_bits(number, bits): + """Count the number of zero bits on the right hand side. + + Args: + number: an integer. + bits: maximum number of bits to count. + + Returns: + The number of zero bits on the right hand side of the number. + + """ + if number == 0: + return bits + return min(bits, _compat_bit_length(~number & (number - 1))) + + +def summarize_address_range(first, last): + """Summarize a network range given the first and last IP addresses. + + Example: + >>> list(summarize_address_range(IPv4Address('192.0.2.0'), + ... IPv4Address('192.0.2.130'))) + ... #doctest: +NORMALIZE_WHITESPACE + [IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/31'), + IPv4Network('192.0.2.130/32')] + + Args: + first: the first IPv4Address or IPv6Address in the range. + last: the last IPv4Address or IPv6Address in the range. + + Returns: + An iterator of the summarized IPv(4|6) network objects. + + Raise: + TypeError: + If the first and last objects are not IP addresses. + If the first and last objects are not the same version. + ValueError: + If the last object is not greater than the first. + If the version of the first address is not 4 or 6. + + """ + if (not (isinstance(first, _BaseAddress) and + isinstance(last, _BaseAddress))): + raise TypeError('first and last must be IP addresses, not networks') + if first.version != last.version: + raise TypeError("%s and %s are not of the same version" % ( + first, last)) + if first > last: + raise ValueError('last IP address must be greater than first') + + if first.version == 4: + ip = IPv4Network + elif first.version == 6: + ip = IPv6Network + else: + raise ValueError('unknown IP version') + + ip_bits = first._max_prefixlen + first_int = first._ip + last_int = last._ip + while first_int <= last_int: + nbits = min(_count_righthand_zero_bits(first_int, ip_bits), + _compat_bit_length(last_int - first_int + 1) - 1) + net = ip((first_int, ip_bits - nbits)) + yield net + first_int += 1 << nbits + if first_int - 1 == ip._ALL_ONES: + break + + +def _collapse_addresses_internal(addresses): + """Loops through the addresses, collapsing concurrent netblocks. + + Example: + + ip1 = IPv4Network('192.0.2.0/26') + ip2 = IPv4Network('192.0.2.64/26') + ip3 = IPv4Network('192.0.2.128/26') + ip4 = IPv4Network('192.0.2.192/26') + + _collapse_addresses_internal([ip1, ip2, ip3, ip4]) -> + [IPv4Network('192.0.2.0/24')] + + This shouldn't be called directly; it is called via + collapse_addresses([]). + + Args: + addresses: A list of IPv4Network's or IPv6Network's + + Returns: + A list of IPv4Network's or IPv6Network's depending on what we were + passed. + + """ + # First merge + to_merge = list(addresses) + subnets = {} + while to_merge: + net = to_merge.pop() + supernet = net.supernet() + existing = subnets.get(supernet) + if existing is None: + subnets[supernet] = net + elif existing != net: + # Merge consecutive subnets + del subnets[supernet] + to_merge.append(supernet) + # Then iterate over resulting networks, skipping subsumed subnets + last = None + for net in sorted(subnets.values()): + if last is not None: + # Since they are sorted, + # last.network_address <= net.network_address is a given. + if last.broadcast_address >= net.broadcast_address: + continue + yield net + last = net + + +def collapse_addresses(addresses): + """Collapse a list of IP objects. + + Example: + collapse_addresses([IPv4Network('192.0.2.0/25'), + IPv4Network('192.0.2.128/25')]) -> + [IPv4Network('192.0.2.0/24')] + + Args: + addresses: An iterator of IPv4Network or IPv6Network objects. + + Returns: + An iterator of the collapsed IPv(4|6)Network objects. + + Raises: + TypeError: If passed a list of mixed version objects. + + """ + addrs = [] + ips = [] + nets = [] + + # split IP addresses and networks + for ip in addresses: + if isinstance(ip, _BaseAddress): + if ips and ips[-1]._version != ip._version: + raise TypeError("%s and %s are not of the same version" % ( + ip, ips[-1])) + ips.append(ip) + elif ip._prefixlen == ip._max_prefixlen: + if ips and ips[-1]._version != ip._version: + raise TypeError("%s and %s are not of the same version" % ( + ip, ips[-1])) + try: + ips.append(ip.ip) + except AttributeError: + ips.append(ip.network_address) + else: + if nets and nets[-1]._version != ip._version: + raise TypeError("%s and %s are not of the same version" % ( + ip, nets[-1])) + nets.append(ip) + + # sort and dedup + ips = sorted(set(ips)) + + # find consecutive address ranges in the sorted sequence and summarize them + if ips: + for first, last in _find_address_range(ips): + addrs.extend(summarize_address_range(first, last)) + + return _collapse_addresses_internal(addrs + nets) + + +def get_mixed_type_key(obj): + """Return a key suitable for sorting between networks and addresses. + + Address and Network objects are not sortable by default; they're + fundamentally different so the expression + + IPv4Address('192.0.2.0') <= IPv4Network('192.0.2.0/24') + + doesn't make any sense. There are some times however, where you may wish + to have ipaddress sort these for you anyway. If you need to do this, you + can use this function as the key= argument to sorted(). + + Args: + obj: either a Network or Address object. + Returns: + appropriate key. + + """ + if isinstance(obj, _BaseNetwork): + return obj._get_networks_key() + elif isinstance(obj, _BaseAddress): + return obj._get_address_key() + return NotImplemented + + +class _IPAddressBase(_TotalOrderingMixin): + + """The mother class.""" + + __slots__ = () + + @property + def exploded(self): + """Return the longhand version of the IP address as a string.""" + return self._explode_shorthand_ip_string() + + @property + def compressed(self): + """Return the shorthand version of the IP address as a string.""" + return _compat_str(self) + + @property + def reverse_pointer(self): + """The name of the reverse DNS pointer for the IP address, e.g.: + >>> ipaddress.ip_address("127.0.0.1").reverse_pointer + '1.0.0.127.in-addr.arpa' + >>> ipaddress.ip_address("2001:db8::1").reverse_pointer + '1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa' + + """ + return self._reverse_pointer() + + @property + def version(self): + msg = '%200s has no version specified' % (type(self),) + raise NotImplementedError(msg) + + def _check_int_address(self, address): + if address < 0: + msg = "%d (< 0) is not permitted as an IPv%d address" + raise AddressValueError(msg % (address, self._version)) + if address > self._ALL_ONES: + msg = "%d (>= 2**%d) is not permitted as an IPv%d address" + raise AddressValueError(msg % (address, self._max_prefixlen, + self._version)) + + def _check_packed_address(self, address, expected_len): + address_len = len(address) + if address_len != expected_len: + msg = ( + '%r (len %d != %d) is not permitted as an IPv%d address. ' + 'Did you pass in a bytes (str in Python 2) instead of' + ' a unicode object?') + raise AddressValueError(msg % (address, address_len, + expected_len, self._version)) + + @classmethod + def _ip_int_from_prefix(cls, prefixlen): + """Turn the prefix length into a bitwise netmask + + Args: + prefixlen: An integer, the prefix length. + + Returns: + An integer. + + """ + return cls._ALL_ONES ^ (cls._ALL_ONES >> prefixlen) + + @classmethod + def _prefix_from_ip_int(cls, ip_int): + """Return prefix length from the bitwise netmask. + + Args: + ip_int: An integer, the netmask in expanded bitwise format + + Returns: + An integer, the prefix length. + + Raises: + ValueError: If the input intermingles zeroes & ones + """ + trailing_zeroes = _count_righthand_zero_bits(ip_int, + cls._max_prefixlen) + prefixlen = cls._max_prefixlen - trailing_zeroes + leading_ones = ip_int >> trailing_zeroes + all_ones = (1 << prefixlen) - 1 + if leading_ones != all_ones: + byteslen = cls._max_prefixlen // 8 + details = _compat_to_bytes(ip_int, byteslen, 'big') + msg = 'Netmask pattern %r mixes zeroes & ones' + raise ValueError(msg % details) + return prefixlen + + @classmethod + def _report_invalid_netmask(cls, netmask_str): + msg = '%r is not a valid netmask' % netmask_str + raise NetmaskValueError(msg) + + @classmethod + def _prefix_from_prefix_string(cls, prefixlen_str): + """Return prefix length from a numeric string + + Args: + prefixlen_str: The string to be converted + + Returns: + An integer, the prefix length. + + Raises: + NetmaskValueError: If the input is not a valid netmask + """ + # int allows a leading +/- as well as surrounding whitespace, + # so we ensure that isn't the case + if not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str): + cls._report_invalid_netmask(prefixlen_str) + try: + prefixlen = int(prefixlen_str) + except ValueError: + cls._report_invalid_netmask(prefixlen_str) + if not (0 <= prefixlen <= cls._max_prefixlen): + cls._report_invalid_netmask(prefixlen_str) + return prefixlen + + @classmethod + def _prefix_from_ip_string(cls, ip_str): + """Turn a netmask/hostmask string into a prefix length + + Args: + ip_str: The netmask/hostmask to be converted + + Returns: + An integer, the prefix length. + + Raises: + NetmaskValueError: If the input is not a valid netmask/hostmask + """ + # Parse the netmask/hostmask like an IP address. + try: + ip_int = cls._ip_int_from_string(ip_str) + except AddressValueError: + cls._report_invalid_netmask(ip_str) + + # Try matching a netmask (this would be /1*0*/ as a bitwise regexp). + # Note that the two ambiguous cases (all-ones and all-zeroes) are + # treated as netmasks. + try: + return cls._prefix_from_ip_int(ip_int) + except ValueError: + pass + + # Invert the bits, and try matching a /0+1+/ hostmask instead. + ip_int ^= cls._ALL_ONES + try: + return cls._prefix_from_ip_int(ip_int) + except ValueError: + cls._report_invalid_netmask(ip_str) + + def __reduce__(self): + return self.__class__, (_compat_str(self),) + + +class _BaseAddress(_IPAddressBase): + + """A generic IP object. + + This IP class contains the version independent methods which are + used by single IP addresses. + """ + + __slots__ = () + + def __int__(self): + return self._ip + + def __eq__(self, other): + try: + return (self._ip == other._ip and + self._version == other._version) + except AttributeError: + return NotImplemented + + def __lt__(self, other): + if not isinstance(other, _IPAddressBase): + return NotImplemented + if not isinstance(other, _BaseAddress): + raise TypeError('%s and %s are not of the same type' % ( + self, other)) + if self._version != other._version: + raise TypeError('%s and %s are not of the same version' % ( + self, other)) + if self._ip != other._ip: + return self._ip < other._ip + return False + + # Shorthand for Integer addition and subtraction. This is not + # meant to ever support addition/subtraction of addresses. + def __add__(self, other): + if not isinstance(other, _compat_int_types): + return NotImplemented + return self.__class__(int(self) + other) + + def __sub__(self, other): + if not isinstance(other, _compat_int_types): + return NotImplemented + return self.__class__(int(self) - other) + + def __repr__(self): + return '%s(%r)' % (self.__class__.__name__, _compat_str(self)) + + def __str__(self): + return _compat_str(self._string_from_ip_int(self._ip)) + + def __hash__(self): + return hash(hex(int(self._ip))) + + def _get_address_key(self): + return (self._version, self) + + def __reduce__(self): + return self.__class__, (self._ip,) + + +class _BaseNetwork(_IPAddressBase): + + """A generic IP network object. + + This IP class contains the version independent methods which are + used by networks. + + """ + def __init__(self, address): + self._cache = {} + + def __repr__(self): + return '%s(%r)' % (self.__class__.__name__, _compat_str(self)) + + def __str__(self): + return '%s/%d' % (self.network_address, self.prefixlen) + + def hosts(self): + """Generate Iterator over usable hosts in a network. + + This is like __iter__ except it doesn't return the network + or broadcast addresses. + + """ + network = int(self.network_address) + broadcast = int(self.broadcast_address) + for x in _compat_range(network + 1, broadcast): + yield self._address_class(x) + + def __iter__(self): + network = int(self.network_address) + broadcast = int(self.broadcast_address) + for x in _compat_range(network, broadcast + 1): + yield self._address_class(x) + + def __getitem__(self, n): + network = int(self.network_address) + broadcast = int(self.broadcast_address) + if n >= 0: + if network + n > broadcast: + raise IndexError('address out of range') + return self._address_class(network + n) + else: + n += 1 + if broadcast + n < network: + raise IndexError('address out of range') + return self._address_class(broadcast + n) + + def __lt__(self, other): + if not isinstance(other, _IPAddressBase): + return NotImplemented + if not isinstance(other, _BaseNetwork): + raise TypeError('%s and %s are not of the same type' % ( + self, other)) + if self._version != other._version: + raise TypeError('%s and %s are not of the same version' % ( + self, other)) + if self.network_address != other.network_address: + return self.network_address < other.network_address + if self.netmask != other.netmask: + return self.netmask < other.netmask + return False + + def __eq__(self, other): + try: + return (self._version == other._version and + self.network_address == other.network_address and + int(self.netmask) == int(other.netmask)) + except AttributeError: + return NotImplemented + + def __hash__(self): + return hash(int(self.network_address) ^ int(self.netmask)) + + def __contains__(self, other): + # always false if one is v4 and the other is v6. + if self._version != other._version: + return False + # dealing with another network. + if isinstance(other, _BaseNetwork): + return False + # dealing with another address + else: + # address + return (int(self.network_address) <= int(other._ip) <= + int(self.broadcast_address)) + + def overlaps(self, other): + """Tell if self is partly contained in other.""" + return self.network_address in other or ( + self.broadcast_address in other or ( + other.network_address in self or ( + other.broadcast_address in self))) + + @property + def broadcast_address(self): + x = self._cache.get('broadcast_address') + if x is None: + x = self._address_class(int(self.network_address) | + int(self.hostmask)) + self._cache['broadcast_address'] = x + return x + + @property + def hostmask(self): + x = self._cache.get('hostmask') + if x is None: + x = self._address_class(int(self.netmask) ^ self._ALL_ONES) + self._cache['hostmask'] = x + return x + + @property + def with_prefixlen(self): + return '%s/%d' % (self.network_address, self._prefixlen) + + @property + def with_netmask(self): + return '%s/%s' % (self.network_address, self.netmask) + + @property + def with_hostmask(self): + return '%s/%s' % (self.network_address, self.hostmask) + + @property + def num_addresses(self): + """Number of hosts in the current subnet.""" + return int(self.broadcast_address) - int(self.network_address) + 1 + + @property + def _address_class(self): + # Returning bare address objects (rather than interfaces) allows for + # more consistent behaviour across the network address, broadcast + # address and individual host addresses. + msg = '%200s has no associated address class' % (type(self),) + raise NotImplementedError(msg) + + @property + def prefixlen(self): + return self._prefixlen + + def address_exclude(self, other): + """Remove an address from a larger block. + + For example: + + addr1 = ip_network('192.0.2.0/28') + addr2 = ip_network('192.0.2.1/32') + list(addr1.address_exclude(addr2)) = + [IPv4Network('192.0.2.0/32'), IPv4Network('192.0.2.2/31'), + IPv4Network('192.0.2.4/30'), IPv4Network('192.0.2.8/29')] + + or IPv6: + + addr1 = ip_network('2001:db8::1/32') + addr2 = ip_network('2001:db8::1/128') + list(addr1.address_exclude(addr2)) = + [ip_network('2001:db8::1/128'), + ip_network('2001:db8::2/127'), + ip_network('2001:db8::4/126'), + ip_network('2001:db8::8/125'), + ... + ip_network('2001:db8:8000::/33')] + + Args: + other: An IPv4Network or IPv6Network object of the same type. + + Returns: + An iterator of the IPv(4|6)Network objects which is self + minus other. + + Raises: + TypeError: If self and other are of differing address + versions, or if other is not a network object. + ValueError: If other is not completely contained by self. + + """ + if not self._version == other._version: + raise TypeError("%s and %s are not of the same version" % ( + self, other)) + + if not isinstance(other, _BaseNetwork): + raise TypeError("%s is not a network object" % other) + + if not other.subnet_of(self): + raise ValueError('%s not contained in %s' % (other, self)) + if other == self: + return + + # Make sure we're comparing the network of other. + other = other.__class__('%s/%s' % (other.network_address, + other.prefixlen)) + + s1, s2 = self.subnets() + while s1 != other and s2 != other: + if other.subnet_of(s1): + yield s2 + s1, s2 = s1.subnets() + elif other.subnet_of(s2): + yield s1 + s1, s2 = s2.subnets() + else: + # If we got here, there's a bug somewhere. + raise AssertionError('Error performing exclusion: ' + 's1: %s s2: %s other: %s' % + (s1, s2, other)) + if s1 == other: + yield s2 + elif s2 == other: + yield s1 + else: + # If we got here, there's a bug somewhere. + raise AssertionError('Error performing exclusion: ' + 's1: %s s2: %s other: %s' % + (s1, s2, other)) + + def compare_networks(self, other): + """Compare two IP objects. + + This is only concerned about the comparison of the integer + representation of the network addresses. This means that the + host bits aren't considered at all in this method. If you want + to compare host bits, you can easily enough do a + 'HostA._ip < HostB._ip' + + Args: + other: An IP object. + + Returns: + If the IP versions of self and other are the same, returns: + + -1 if self < other: + eg: IPv4Network('192.0.2.0/25') < IPv4Network('192.0.2.128/25') + IPv6Network('2001:db8::1000/124') < + IPv6Network('2001:db8::2000/124') + 0 if self == other + eg: IPv4Network('192.0.2.0/24') == IPv4Network('192.0.2.0/24') + IPv6Network('2001:db8::1000/124') == + IPv6Network('2001:db8::1000/124') + 1 if self > other + eg: IPv4Network('192.0.2.128/25') > IPv4Network('192.0.2.0/25') + IPv6Network('2001:db8::2000/124') > + IPv6Network('2001:db8::1000/124') + + Raises: + TypeError if the IP versions are different. + + """ + # does this need to raise a ValueError? + if self._version != other._version: + raise TypeError('%s and %s are not of the same type' % ( + self, other)) + # self._version == other._version below here: + if self.network_address < other.network_address: + return -1 + if self.network_address > other.network_address: + return 1 + # self.network_address == other.network_address below here: + if self.netmask < other.netmask: + return -1 + if self.netmask > other.netmask: + return 1 + return 0 + + def _get_networks_key(self): + """Network-only key function. + + Returns an object that identifies this address' network and + netmask. This function is a suitable "key" argument for sorted() + and list.sort(). + + """ + return (self._version, self.network_address, self.netmask) + + def subnets(self, prefixlen_diff=1, new_prefix=None): + """The subnets which join to make the current subnet. + + In the case that self contains only one IP + (self._prefixlen == 32 for IPv4 or self._prefixlen == 128 + for IPv6), yield an iterator with just ourself. + + Args: + prefixlen_diff: An integer, the amount the prefix length + should be increased by. This should not be set if + new_prefix is also set. + new_prefix: The desired new prefix length. This must be a + larger number (smaller prefix) than the existing prefix. + This should not be set if prefixlen_diff is also set. + + Returns: + An iterator of IPv(4|6) objects. + + Raises: + ValueError: The prefixlen_diff is too small or too large. + OR + prefixlen_diff and new_prefix are both set or new_prefix + is a smaller number than the current prefix (smaller + number means a larger network) + + """ + if self._prefixlen == self._max_prefixlen: + yield self + return + + if new_prefix is not None: + if new_prefix < self._prefixlen: + raise ValueError('new prefix must be longer') + if prefixlen_diff != 1: + raise ValueError('cannot set prefixlen_diff and new_prefix') + prefixlen_diff = new_prefix - self._prefixlen + + if prefixlen_diff < 0: + raise ValueError('prefix length diff must be > 0') + new_prefixlen = self._prefixlen + prefixlen_diff + + if new_prefixlen > self._max_prefixlen: + raise ValueError( + 'prefix length diff %d is invalid for netblock %s' % ( + new_prefixlen, self)) + + start = int(self.network_address) + end = int(self.broadcast_address) + 1 + step = (int(self.hostmask) + 1) >> prefixlen_diff + for new_addr in _compat_range(start, end, step): + current = self.__class__((new_addr, new_prefixlen)) + yield current + + def supernet(self, prefixlen_diff=1, new_prefix=None): + """The supernet containing the current network. + + Args: + prefixlen_diff: An integer, the amount the prefix length of + the network should be decreased by. For example, given a + /24 network and a prefixlen_diff of 3, a supernet with a + /21 netmask is returned. + + Returns: + An IPv4 network object. + + Raises: + ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have + a negative prefix length. + OR + If prefixlen_diff and new_prefix are both set or new_prefix is a + larger number than the current prefix (larger number means a + smaller network) + + """ + if self._prefixlen == 0: + return self + + if new_prefix is not None: + if new_prefix > self._prefixlen: + raise ValueError('new prefix must be shorter') + if prefixlen_diff != 1: + raise ValueError('cannot set prefixlen_diff and new_prefix') + prefixlen_diff = self._prefixlen - new_prefix + + new_prefixlen = self.prefixlen - prefixlen_diff + if new_prefixlen < 0: + raise ValueError( + 'current prefixlen is %d, cannot have a prefixlen_diff of %d' % + (self.prefixlen, prefixlen_diff)) + return self.__class__(( + int(self.network_address) & (int(self.netmask) << prefixlen_diff), + new_prefixlen)) + + @property + def is_multicast(self): + """Test if the address is reserved for multicast use. + + Returns: + A boolean, True if the address is a multicast address. + See RFC 2373 2.7 for details. + + """ + return (self.network_address.is_multicast and + self.broadcast_address.is_multicast) + + @staticmethod + def _is_subnet_of(a, b): + try: + # Always false if one is v4 and the other is v6. + if a._version != b._version: + raise TypeError("%s and %s are not of the same version" (a, b)) + return (b.network_address <= a.network_address and + b.broadcast_address >= a.broadcast_address) + except AttributeError: + raise TypeError("Unable to test subnet containment " + "between %s and %s" % (a, b)) + + def subnet_of(self, other): + """Return True if this network is a subnet of other.""" + return self._is_subnet_of(self, other) + + def supernet_of(self, other): + """Return True if this network is a supernet of other.""" + return self._is_subnet_of(other, self) + + @property + def is_reserved(self): + """Test if the address is otherwise IETF reserved. + + Returns: + A boolean, True if the address is within one of the + reserved IPv6 Network ranges. + + """ + return (self.network_address.is_reserved and + self.broadcast_address.is_reserved) + + @property + def is_link_local(self): + """Test if the address is reserved for link-local. + + Returns: + A boolean, True if the address is reserved per RFC 4291. + + """ + return (self.network_address.is_link_local and + self.broadcast_address.is_link_local) + + @property + def is_private(self): + """Test if this address is allocated for private networks. + + Returns: + A boolean, True if the address is reserved per + iana-ipv4-special-registry or iana-ipv6-special-registry. + + """ + return (self.network_address.is_private and + self.broadcast_address.is_private) + + @property + def is_global(self): + """Test if this address is allocated for public networks. + + Returns: + A boolean, True if the address is not reserved per + iana-ipv4-special-registry or iana-ipv6-special-registry. + + """ + return not self.is_private + + @property + def is_unspecified(self): + """Test if the address is unspecified. + + Returns: + A boolean, True if this is the unspecified address as defined in + RFC 2373 2.5.2. + + """ + return (self.network_address.is_unspecified and + self.broadcast_address.is_unspecified) + + @property + def is_loopback(self): + """Test if the address is a loopback address. + + Returns: + A boolean, True if the address is a loopback address as defined in + RFC 2373 2.5.3. + + """ + return (self.network_address.is_loopback and + self.broadcast_address.is_loopback) + + +class _BaseV4(object): + + """Base IPv4 object. + + The following methods are used by IPv4 objects in both single IP + addresses and networks. + + """ + + __slots__ = () + _version = 4 + # Equivalent to 255.255.255.255 or 32 bits of 1's. + _ALL_ONES = (2 ** IPV4LENGTH) - 1 + _DECIMAL_DIGITS = frozenset('0123456789') + + # the valid octets for host and netmasks. only useful for IPv4. + _valid_mask_octets = frozenset([255, 254, 252, 248, 240, 224, 192, 128, 0]) + + _max_prefixlen = IPV4LENGTH + # There are only a handful of valid v4 netmasks, so we cache them all + # when constructed (see _make_netmask()). + _netmask_cache = {} + + def _explode_shorthand_ip_string(self): + return _compat_str(self) + + @classmethod + def _make_netmask(cls, arg): + """Make a (netmask, prefix_len) tuple from the given argument. + + Argument can be: + - an integer (the prefix length) + - a string representing the prefix length (e.g. "24") + - a string representing the prefix netmask (e.g. "255.255.255.0") + """ + if arg not in cls._netmask_cache: + if isinstance(arg, _compat_int_types): + prefixlen = arg + else: + try: + # Check for a netmask in prefix length form + prefixlen = cls._prefix_from_prefix_string(arg) + except NetmaskValueError: + # Check for a netmask or hostmask in dotted-quad form. + # This may raise NetmaskValueError. + prefixlen = cls._prefix_from_ip_string(arg) + netmask = IPv4Address(cls._ip_int_from_prefix(prefixlen)) + cls._netmask_cache[arg] = netmask, prefixlen + return cls._netmask_cache[arg] + + @classmethod + def _ip_int_from_string(cls, ip_str): + """Turn the given IP string into an integer for comparison. + + Args: + ip_str: A string, the IP ip_str. + + Returns: + The IP ip_str as an integer. + + Raises: + AddressValueError: if ip_str isn't a valid IPv4 Address. + + """ + if not ip_str: + raise AddressValueError('Address cannot be empty') + + octets = ip_str.split('.') + if len(octets) != 4: + raise AddressValueError("Expected 4 octets in %r" % ip_str) + + try: + return _compat_int_from_byte_vals( + map(cls._parse_octet, octets), 'big') + except ValueError as exc: + raise AddressValueError("%s in %r" % (exc, ip_str)) + + @classmethod + def _parse_octet(cls, octet_str): + """Convert a decimal octet into an integer. + + Args: + octet_str: A string, the number to parse. + + Returns: + The octet as an integer. + + Raises: + ValueError: if the octet isn't strictly a decimal from [0..255]. + + """ + if not octet_str: + raise ValueError("Empty octet not permitted") + # Whitelist the characters, since int() allows a lot of bizarre stuff. + if not cls._DECIMAL_DIGITS.issuperset(octet_str): + msg = "Only decimal digits permitted in %r" + raise ValueError(msg % octet_str) + # We do the length check second, since the invalid character error + # is likely to be more informative for the user + if len(octet_str) > 3: + msg = "At most 3 characters permitted in %r" + raise ValueError(msg % octet_str) + # Convert to integer (we know digits are legal) + octet_int = int(octet_str, 10) + # Any octets that look like they *might* be written in octal, + # and which don't look exactly the same in both octal and + # decimal are rejected as ambiguous + if octet_int > 7 and octet_str[0] == '0': + msg = "Ambiguous (octal/decimal) value in %r not permitted" + raise ValueError(msg % octet_str) + if octet_int > 255: + raise ValueError("Octet %d (> 255) not permitted" % octet_int) + return octet_int + + @classmethod + def _string_from_ip_int(cls, ip_int): + """Turns a 32-bit integer into dotted decimal notation. + + Args: + ip_int: An integer, the IP address. + + Returns: + The IP address as a string in dotted decimal notation. + + """ + return '.'.join(_compat_str(struct.unpack(b'!B', b)[0] + if isinstance(b, bytes) + else b) + for b in _compat_to_bytes(ip_int, 4, 'big')) + + def _is_hostmask(self, ip_str): + """Test if the IP string is a hostmask (rather than a netmask). + + Args: + ip_str: A string, the potential hostmask. + + Returns: + A boolean, True if the IP string is a hostmask. + + """ + bits = ip_str.split('.') + try: + parts = [x for x in map(int, bits) if x in self._valid_mask_octets] + except ValueError: + return False + if len(parts) != len(bits): + return False + if parts[0] < parts[-1]: + return True + return False + + def _reverse_pointer(self): + """Return the reverse DNS pointer name for the IPv4 address. + + This implements the method described in RFC1035 3.5. + + """ + reverse_octets = _compat_str(self).split('.')[::-1] + return '.'.join(reverse_octets) + '.in-addr.arpa' + + @property + def max_prefixlen(self): + return self._max_prefixlen + + @property + def version(self): + return self._version + + +class IPv4Address(_BaseV4, _BaseAddress): + + """Represent and manipulate single IPv4 Addresses.""" + + __slots__ = ('_ip', '__weakref__') + + def __init__(self, address): + + """ + Args: + address: A string or integer representing the IP + + Additionally, an integer can be passed, so + IPv4Address('192.0.2.1') == IPv4Address(3221225985). + or, more generally + IPv4Address(int(IPv4Address('192.0.2.1'))) == + IPv4Address('192.0.2.1') + + Raises: + AddressValueError: If ipaddress isn't a valid IPv4 address. + + """ + # Efficient constructor from integer. + if isinstance(address, _compat_int_types): + self._check_int_address(address) + self._ip = address + return + + # Constructing from a packed address + if isinstance(address, bytes): + self._check_packed_address(address, 4) + bvs = _compat_bytes_to_byte_vals(address) + self._ip = _compat_int_from_byte_vals(bvs, 'big') + return + + # Assume input argument to be string or any object representation + # which converts into a formatted IP string. + addr_str = _compat_str(address) + if '/' in addr_str: + raise AddressValueError("Unexpected '/' in %r" % address) + self._ip = self._ip_int_from_string(addr_str) + + @property + def packed(self): + """The binary representation of this address.""" + return v4_int_to_packed(self._ip) + + @property + def is_reserved(self): + """Test if the address is otherwise IETF reserved. + + Returns: + A boolean, True if the address is within the + reserved IPv4 Network range. + + """ + return self in self._constants._reserved_network + + @property + def is_private(self): + """Test if this address is allocated for private networks. + + Returns: + A boolean, True if the address is reserved per + iana-ipv4-special-registry. + + """ + return any(self in net for net in self._constants._private_networks) + + @property + def is_global(self): + return ( + self not in self._constants._public_network and + not self.is_private) + + @property + def is_multicast(self): + """Test if the address is reserved for multicast use. + + Returns: + A boolean, True if the address is multicast. + See RFC 3171 for details. + + """ + return self in self._constants._multicast_network + + @property + def is_unspecified(self): + """Test if the address is unspecified. + + Returns: + A boolean, True if this is the unspecified address as defined in + RFC 5735 3. + + """ + return self == self._constants._unspecified_address + + @property + def is_loopback(self): + """Test if the address is a loopback address. + + Returns: + A boolean, True if the address is a loopback per RFC 3330. + + """ + return self in self._constants._loopback_network + + @property + def is_link_local(self): + """Test if the address is reserved for link-local. + + Returns: + A boolean, True if the address is link-local per RFC 3927. + + """ + return self in self._constants._linklocal_network + + +class IPv4Interface(IPv4Address): + + def __init__(self, address): + if isinstance(address, (bytes, _compat_int_types)): + IPv4Address.__init__(self, address) + self.network = IPv4Network(self._ip) + self._prefixlen = self._max_prefixlen + return + + if isinstance(address, tuple): + IPv4Address.__init__(self, address[0]) + if len(address) > 1: + self._prefixlen = int(address[1]) + else: + self._prefixlen = self._max_prefixlen + + self.network = IPv4Network(address, strict=False) + self.netmask = self.network.netmask + self.hostmask = self.network.hostmask + return + + addr = _split_optional_netmask(address) + IPv4Address.__init__(self, addr[0]) + + self.network = IPv4Network(address, strict=False) + self._prefixlen = self.network._prefixlen + + self.netmask = self.network.netmask + self.hostmask = self.network.hostmask + + def __str__(self): + return '%s/%d' % (self._string_from_ip_int(self._ip), + self.network.prefixlen) + + def __eq__(self, other): + address_equal = IPv4Address.__eq__(self, other) + if not address_equal or address_equal is NotImplemented: + return address_equal + try: + return self.network == other.network + except AttributeError: + # An interface with an associated network is NOT the + # same as an unassociated address. That's why the hash + # takes the extra info into account. + return False + + def __lt__(self, other): + address_less = IPv4Address.__lt__(self, other) + if address_less is NotImplemented: + return NotImplemented + try: + return (self.network < other.network or + self.network == other.network and address_less) + except AttributeError: + # We *do* allow addresses and interfaces to be sorted. The + # unassociated address is considered less than all interfaces. + return False + + def __hash__(self): + return self._ip ^ self._prefixlen ^ int(self.network.network_address) + + __reduce__ = _IPAddressBase.__reduce__ + + @property + def ip(self): + return IPv4Address(self._ip) + + @property + def with_prefixlen(self): + return '%s/%s' % (self._string_from_ip_int(self._ip), + self._prefixlen) + + @property + def with_netmask(self): + return '%s/%s' % (self._string_from_ip_int(self._ip), + self.netmask) + + @property + def with_hostmask(self): + return '%s/%s' % (self._string_from_ip_int(self._ip), + self.hostmask) + + +class IPv4Network(_BaseV4, _BaseNetwork): + + """This class represents and manipulates 32-bit IPv4 network + addresses.. + + Attributes: [examples for IPv4Network('192.0.2.0/27')] + .network_address: IPv4Address('192.0.2.0') + .hostmask: IPv4Address('0.0.0.31') + .broadcast_address: IPv4Address('192.0.2.32') + .netmask: IPv4Address('255.255.255.224') + .prefixlen: 27 + + """ + # Class to use when creating address objects + _address_class = IPv4Address + + def __init__(self, address, strict=True): + + """Instantiate a new IPv4 network object. + + Args: + address: A string or integer representing the IP [& network]. + '192.0.2.0/24' + '192.0.2.0/255.255.255.0' + '192.0.0.2/0.0.0.255' + are all functionally the same in IPv4. Similarly, + '192.0.2.1' + '192.0.2.1/255.255.255.255' + '192.0.2.1/32' + are also functionally equivalent. That is to say, failing to + provide a subnetmask will create an object with a mask of /32. + + If the mask (portion after the / in the argument) is given in + dotted quad form, it is treated as a netmask if it starts with a + non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it + starts with a zero field (e.g. 0.255.255.255 == /8), with the + single exception of an all-zero mask which is treated as a + netmask == /0. If no mask is given, a default of /32 is used. + + Additionally, an integer can be passed, so + IPv4Network('192.0.2.1') == IPv4Network(3221225985) + or, more generally + IPv4Interface(int(IPv4Interface('192.0.2.1'))) == + IPv4Interface('192.0.2.1') + + Raises: + AddressValueError: If ipaddress isn't a valid IPv4 address. + NetmaskValueError: If the netmask isn't valid for + an IPv4 address. + ValueError: If strict is True and a network address is not + supplied. + + """ + _BaseNetwork.__init__(self, address) + + # Constructing from a packed address or integer + if isinstance(address, (_compat_int_types, bytes)): + self.network_address = IPv4Address(address) + self.netmask, self._prefixlen = self._make_netmask( + self._max_prefixlen) + # fixme: address/network test here. + return + + if isinstance(address, tuple): + if len(address) > 1: + arg = address[1] + else: + # We weren't given an address[1] + arg = self._max_prefixlen + self.network_address = IPv4Address(address[0]) + self.netmask, self._prefixlen = self._make_netmask(arg) + packed = int(self.network_address) + if packed & int(self.netmask) != packed: + if strict: + raise ValueError('%s has host bits set' % self) + else: + self.network_address = IPv4Address(packed & + int(self.netmask)) + return + + # Assume input argument to be string or any object representation + # which converts into a formatted IP prefix string. + addr = _split_optional_netmask(address) + self.network_address = IPv4Address(self._ip_int_from_string(addr[0])) + + if len(addr) == 2: + arg = addr[1] + else: + arg = self._max_prefixlen + self.netmask, self._prefixlen = self._make_netmask(arg) + + if strict: + if (IPv4Address(int(self.network_address) & int(self.netmask)) != + self.network_address): + raise ValueError('%s has host bits set' % self) + self.network_address = IPv4Address(int(self.network_address) & + int(self.netmask)) + + if self._prefixlen == (self._max_prefixlen - 1): + self.hosts = self.__iter__ + + @property + def is_global(self): + """Test if this address is allocated for public networks. + + Returns: + A boolean, True if the address is not reserved per + iana-ipv4-special-registry. + + """ + return (not (self.network_address in IPv4Network('100.64.0.0/10') and + self.broadcast_address in IPv4Network('100.64.0.0/10')) and + not self.is_private) + + +class _IPv4Constants(object): + + _linklocal_network = IPv4Network('169.254.0.0/16') + + _loopback_network = IPv4Network('127.0.0.0/8') + + _multicast_network = IPv4Network('224.0.0.0/4') + + _public_network = IPv4Network('100.64.0.0/10') + + _private_networks = [ + IPv4Network('0.0.0.0/8'), + IPv4Network('10.0.0.0/8'), + IPv4Network('127.0.0.0/8'), + IPv4Network('169.254.0.0/16'), + IPv4Network('172.16.0.0/12'), + IPv4Network('192.0.0.0/29'), + IPv4Network('192.0.0.170/31'), + IPv4Network('192.0.2.0/24'), + IPv4Network('192.168.0.0/16'), + IPv4Network('198.18.0.0/15'), + IPv4Network('198.51.100.0/24'), + IPv4Network('203.0.113.0/24'), + IPv4Network('240.0.0.0/4'), + IPv4Network('255.255.255.255/32'), + ] + + _reserved_network = IPv4Network('240.0.0.0/4') + + _unspecified_address = IPv4Address('0.0.0.0') + + +IPv4Address._constants = _IPv4Constants + + +class _BaseV6(object): + + """Base IPv6 object. + + The following methods are used by IPv6 objects in both single IP + addresses and networks. + + """ + + __slots__ = () + _version = 6 + _ALL_ONES = (2 ** IPV6LENGTH) - 1 + _HEXTET_COUNT = 8 + _HEX_DIGITS = frozenset('0123456789ABCDEFabcdef') + _max_prefixlen = IPV6LENGTH + + # There are only a bunch of valid v6 netmasks, so we cache them all + # when constructed (see _make_netmask()). + _netmask_cache = {} + + @classmethod + def _make_netmask(cls, arg): + """Make a (netmask, prefix_len) tuple from the given argument. + + Argument can be: + - an integer (the prefix length) + - a string representing the prefix length (e.g. "24") + - a string representing the prefix netmask (e.g. "255.255.255.0") + """ + if arg not in cls._netmask_cache: + if isinstance(arg, _compat_int_types): + prefixlen = arg + else: + prefixlen = cls._prefix_from_prefix_string(arg) + netmask = IPv6Address(cls._ip_int_from_prefix(prefixlen)) + cls._netmask_cache[arg] = netmask, prefixlen + return cls._netmask_cache[arg] + + @classmethod + def _ip_int_from_string(cls, ip_str): + """Turn an IPv6 ip_str into an integer. + + Args: + ip_str: A string, the IPv6 ip_str. + + Returns: + An int, the IPv6 address + + Raises: + AddressValueError: if ip_str isn't a valid IPv6 Address. + + """ + if not ip_str: + raise AddressValueError('Address cannot be empty') + + parts = ip_str.split(':') + + # An IPv6 address needs at least 2 colons (3 parts). + _min_parts = 3 + if len(parts) < _min_parts: + msg = "At least %d parts expected in %r" % (_min_parts, ip_str) + raise AddressValueError(msg) + + # If the address has an IPv4-style suffix, convert it to hexadecimal. + if '.' in parts[-1]: + try: + ipv4_int = IPv4Address(parts.pop())._ip + except AddressValueError as exc: + raise AddressValueError("%s in %r" % (exc, ip_str)) + parts.append('%x' % ((ipv4_int >> 16) & 0xFFFF)) + parts.append('%x' % (ipv4_int & 0xFFFF)) + + # An IPv6 address can't have more than 8 colons (9 parts). + # The extra colon comes from using the "::" notation for a single + # leading or trailing zero part. + _max_parts = cls._HEXTET_COUNT + 1 + if len(parts) > _max_parts: + msg = "At most %d colons permitted in %r" % ( + _max_parts - 1, ip_str) + raise AddressValueError(msg) + + # Disregarding the endpoints, find '::' with nothing in between. + # This indicates that a run of zeroes has been skipped. + skip_index = None + for i in _compat_range(1, len(parts) - 1): + if not parts[i]: + if skip_index is not None: + # Can't have more than one '::' + msg = "At most one '::' permitted in %r" % ip_str + raise AddressValueError(msg) + skip_index = i + + # parts_hi is the number of parts to copy from above/before the '::' + # parts_lo is the number of parts to copy from below/after the '::' + if skip_index is not None: + # If we found a '::', then check if it also covers the endpoints. + parts_hi = skip_index + parts_lo = len(parts) - skip_index - 1 + if not parts[0]: + parts_hi -= 1 + if parts_hi: + msg = "Leading ':' only permitted as part of '::' in %r" + raise AddressValueError(msg % ip_str) # ^: requires ^:: + if not parts[-1]: + parts_lo -= 1 + if parts_lo: + msg = "Trailing ':' only permitted as part of '::' in %r" + raise AddressValueError(msg % ip_str) # :$ requires ::$ + parts_skipped = cls._HEXTET_COUNT - (parts_hi + parts_lo) + if parts_skipped < 1: + msg = "Expected at most %d other parts with '::' in %r" + raise AddressValueError(msg % (cls._HEXTET_COUNT - 1, ip_str)) + else: + # Otherwise, allocate the entire address to parts_hi. The + # endpoints could still be empty, but _parse_hextet() will check + # for that. + if len(parts) != cls._HEXTET_COUNT: + msg = "Exactly %d parts expected without '::' in %r" + raise AddressValueError(msg % (cls._HEXTET_COUNT, ip_str)) + if not parts[0]: + msg = "Leading ':' only permitted as part of '::' in %r" + raise AddressValueError(msg % ip_str) # ^: requires ^:: + if not parts[-1]: + msg = "Trailing ':' only permitted as part of '::' in %r" + raise AddressValueError(msg % ip_str) # :$ requires ::$ + parts_hi = len(parts) + parts_lo = 0 + parts_skipped = 0 + + try: + # Now, parse the hextets into a 128-bit integer. + ip_int = 0 + for i in range(parts_hi): + ip_int <<= 16 + ip_int |= cls._parse_hextet(parts[i]) + ip_int <<= 16 * parts_skipped + for i in range(-parts_lo, 0): + ip_int <<= 16 + ip_int |= cls._parse_hextet(parts[i]) + return ip_int + except ValueError as exc: + raise AddressValueError("%s in %r" % (exc, ip_str)) + + @classmethod + def _parse_hextet(cls, hextet_str): + """Convert an IPv6 hextet string into an integer. + + Args: + hextet_str: A string, the number to parse. + + Returns: + The hextet as an integer. + + Raises: + ValueError: if the input isn't strictly a hex number from + [0..FFFF]. + + """ + # Whitelist the characters, since int() allows a lot of bizarre stuff. + if not cls._HEX_DIGITS.issuperset(hextet_str): + raise ValueError("Only hex digits permitted in %r" % hextet_str) + # We do the length check second, since the invalid character error + # is likely to be more informative for the user + if len(hextet_str) > 4: + msg = "At most 4 characters permitted in %r" + raise ValueError(msg % hextet_str) + # Length check means we can skip checking the integer value + return int(hextet_str, 16) + + @classmethod + def _compress_hextets(cls, hextets): + """Compresses a list of hextets. + + Compresses a list of strings, replacing the longest continuous + sequence of "0" in the list with "" and adding empty strings at + the beginning or at the end of the string such that subsequently + calling ":".join(hextets) will produce the compressed version of + the IPv6 address. + + Args: + hextets: A list of strings, the hextets to compress. + + Returns: + A list of strings. + + """ + best_doublecolon_start = -1 + best_doublecolon_len = 0 + doublecolon_start = -1 + doublecolon_len = 0 + for index, hextet in enumerate(hextets): + if hextet == '0': + doublecolon_len += 1 + if doublecolon_start == -1: + # Start of a sequence of zeros. + doublecolon_start = index + if doublecolon_len > best_doublecolon_len: + # This is the longest sequence of zeros so far. + best_doublecolon_len = doublecolon_len + best_doublecolon_start = doublecolon_start + else: + doublecolon_len = 0 + doublecolon_start = -1 + + if best_doublecolon_len > 1: + best_doublecolon_end = (best_doublecolon_start + + best_doublecolon_len) + # For zeros at the end of the address. + if best_doublecolon_end == len(hextets): + hextets += [''] + hextets[best_doublecolon_start:best_doublecolon_end] = [''] + # For zeros at the beginning of the address. + if best_doublecolon_start == 0: + hextets = [''] + hextets + + return hextets + + @classmethod + def _string_from_ip_int(cls, ip_int=None): + """Turns a 128-bit integer into hexadecimal notation. + + Args: + ip_int: An integer, the IP address. + + Returns: + A string, the hexadecimal representation of the address. + + Raises: + ValueError: The address is bigger than 128 bits of all ones. + + """ + if ip_int is None: + ip_int = int(cls._ip) + + if ip_int > cls._ALL_ONES: + raise ValueError('IPv6 address is too large') + + hex_str = '%032x' % ip_int + hextets = ['%x' % int(hex_str[x:x + 4], 16) for x in range(0, 32, 4)] + + hextets = cls._compress_hextets(hextets) + return ':'.join(hextets) + + def _explode_shorthand_ip_string(self): + """Expand a shortened IPv6 address. + + Args: + ip_str: A string, the IPv6 address. + + Returns: + A string, the expanded IPv6 address. + + """ + if isinstance(self, IPv6Network): + ip_str = _compat_str(self.network_address) + elif isinstance(self, IPv6Interface): + ip_str = _compat_str(self.ip) + else: + ip_str = _compat_str(self) + + ip_int = self._ip_int_from_string(ip_str) + hex_str = '%032x' % ip_int + parts = [hex_str[x:x + 4] for x in range(0, 32, 4)] + if isinstance(self, (_BaseNetwork, IPv6Interface)): + return '%s/%d' % (':'.join(parts), self._prefixlen) + return ':'.join(parts) + + def _reverse_pointer(self): + """Return the reverse DNS pointer name for the IPv6 address. + + This implements the method described in RFC3596 2.5. + + """ + reverse_chars = self.exploded[::-1].replace(':', '') + return '.'.join(reverse_chars) + '.ip6.arpa' + + @property + def max_prefixlen(self): + return self._max_prefixlen + + @property + def version(self): + return self._version + + +class IPv6Address(_BaseV6, _BaseAddress): + + """Represent and manipulate single IPv6 Addresses.""" + + __slots__ = ('_ip', '__weakref__') + + def __init__(self, address): + """Instantiate a new IPv6 address object. + + Args: + address: A string or integer representing the IP + + Additionally, an integer can be passed, so + IPv6Address('2001:db8::') == + IPv6Address(42540766411282592856903984951653826560) + or, more generally + IPv6Address(int(IPv6Address('2001:db8::'))) == + IPv6Address('2001:db8::') + + Raises: + AddressValueError: If address isn't a valid IPv6 address. + + """ + # Efficient constructor from integer. + if isinstance(address, _compat_int_types): + self._check_int_address(address) + self._ip = address + return + + # Constructing from a packed address + if isinstance(address, bytes): + self._check_packed_address(address, 16) + bvs = _compat_bytes_to_byte_vals(address) + self._ip = _compat_int_from_byte_vals(bvs, 'big') + return + + # Assume input argument to be string or any object representation + # which converts into a formatted IP string. + addr_str = _compat_str(address) + if '/' in addr_str: + raise AddressValueError("Unexpected '/' in %r" % address) + self._ip = self._ip_int_from_string(addr_str) + + @property + def packed(self): + """The binary representation of this address.""" + return v6_int_to_packed(self._ip) + + @property + def is_multicast(self): + """Test if the address is reserved for multicast use. + + Returns: + A boolean, True if the address is a multicast address. + See RFC 2373 2.7 for details. + + """ + return self in self._constants._multicast_network + + @property + def is_reserved(self): + """Test if the address is otherwise IETF reserved. + + Returns: + A boolean, True if the address is within one of the + reserved IPv6 Network ranges. + + """ + return any(self in x for x in self._constants._reserved_networks) + + @property + def is_link_local(self): + """Test if the address is reserved for link-local. + + Returns: + A boolean, True if the address is reserved per RFC 4291. + + """ + return self in self._constants._linklocal_network + + @property + def is_site_local(self): + """Test if the address is reserved for site-local. + + Note that the site-local address space has been deprecated by RFC 3879. + Use is_private to test if this address is in the space of unique local + addresses as defined by RFC 4193. + + Returns: + A boolean, True if the address is reserved per RFC 3513 2.5.6. + + """ + return self in self._constants._sitelocal_network + + @property + def is_private(self): + """Test if this address is allocated for private networks. + + Returns: + A boolean, True if the address is reserved per + iana-ipv6-special-registry. + + """ + return any(self in net for net in self._constants._private_networks) + + @property + def is_global(self): + """Test if this address is allocated for public networks. + + Returns: + A boolean, true if the address is not reserved per + iana-ipv6-special-registry. + + """ + return not self.is_private + + @property + def is_unspecified(self): + """Test if the address is unspecified. + + Returns: + A boolean, True if this is the unspecified address as defined in + RFC 2373 2.5.2. + + """ + return self._ip == 0 + + @property + def is_loopback(self): + """Test if the address is a loopback address. + + Returns: + A boolean, True if the address is a loopback address as defined in + RFC 2373 2.5.3. + + """ + return self._ip == 1 + + @property + def ipv4_mapped(self): + """Return the IPv4 mapped address. + + Returns: + If the IPv6 address is a v4 mapped address, return the + IPv4 mapped address. Return None otherwise. + + """ + if (self._ip >> 32) != 0xFFFF: + return None + return IPv4Address(self._ip & 0xFFFFFFFF) + + @property + def teredo(self): + """Tuple of embedded teredo IPs. + + Returns: + Tuple of the (server, client) IPs or None if the address + doesn't appear to be a teredo address (doesn't start with + 2001::/32) + + """ + if (self._ip >> 96) != 0x20010000: + return None + return (IPv4Address((self._ip >> 64) & 0xFFFFFFFF), + IPv4Address(~self._ip & 0xFFFFFFFF)) + + @property + def sixtofour(self): + """Return the IPv4 6to4 embedded address. + + Returns: + The IPv4 6to4-embedded address if present or None if the + address doesn't appear to contain a 6to4 embedded address. + + """ + if (self._ip >> 112) != 0x2002: + return None + return IPv4Address((self._ip >> 80) & 0xFFFFFFFF) + + +class IPv6Interface(IPv6Address): + + def __init__(self, address): + if isinstance(address, (bytes, _compat_int_types)): + IPv6Address.__init__(self, address) + self.network = IPv6Network(self._ip) + self._prefixlen = self._max_prefixlen + return + if isinstance(address, tuple): + IPv6Address.__init__(self, address[0]) + if len(address) > 1: + self._prefixlen = int(address[1]) + else: + self._prefixlen = self._max_prefixlen + self.network = IPv6Network(address, strict=False) + self.netmask = self.network.netmask + self.hostmask = self.network.hostmask + return + + addr = _split_optional_netmask(address) + IPv6Address.__init__(self, addr[0]) + self.network = IPv6Network(address, strict=False) + self.netmask = self.network.netmask + self._prefixlen = self.network._prefixlen + self.hostmask = self.network.hostmask + + def __str__(self): + return '%s/%d' % (self._string_from_ip_int(self._ip), + self.network.prefixlen) + + def __eq__(self, other): + address_equal = IPv6Address.__eq__(self, other) + if not address_equal or address_equal is NotImplemented: + return address_equal + try: + return self.network == other.network + except AttributeError: + # An interface with an associated network is NOT the + # same as an unassociated address. That's why the hash + # takes the extra info into account. + return False + + def __lt__(self, other): + address_less = IPv6Address.__lt__(self, other) + if address_less is NotImplemented: + return NotImplemented + try: + return (self.network < other.network or + self.network == other.network and address_less) + except AttributeError: + # We *do* allow addresses and interfaces to be sorted. The + # unassociated address is considered less than all interfaces. + return False + + def __hash__(self): + return self._ip ^ self._prefixlen ^ int(self.network.network_address) + + __reduce__ = _IPAddressBase.__reduce__ + + @property + def ip(self): + return IPv6Address(self._ip) + + @property + def with_prefixlen(self): + return '%s/%s' % (self._string_from_ip_int(self._ip), + self._prefixlen) + + @property + def with_netmask(self): + return '%s/%s' % (self._string_from_ip_int(self._ip), + self.netmask) + + @property + def with_hostmask(self): + return '%s/%s' % (self._string_from_ip_int(self._ip), + self.hostmask) + + @property + def is_unspecified(self): + return self._ip == 0 and self.network.is_unspecified + + @property + def is_loopback(self): + return self._ip == 1 and self.network.is_loopback + + +class IPv6Network(_BaseV6, _BaseNetwork): + + """This class represents and manipulates 128-bit IPv6 networks. + + Attributes: [examples for IPv6('2001:db8::1000/124')] + .network_address: IPv6Address('2001:db8::1000') + .hostmask: IPv6Address('::f') + .broadcast_address: IPv6Address('2001:db8::100f') + .netmask: IPv6Address('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff0') + .prefixlen: 124 + + """ + + # Class to use when creating address objects + _address_class = IPv6Address + + def __init__(self, address, strict=True): + """Instantiate a new IPv6 Network object. + + Args: + address: A string or integer representing the IPv6 network or the + IP and prefix/netmask. + '2001:db8::/128' + '2001:db8:0000:0000:0000:0000:0000:0000/128' + '2001:db8::' + are all functionally the same in IPv6. That is to say, + failing to provide a subnetmask will create an object with + a mask of /128. + + Additionally, an integer can be passed, so + IPv6Network('2001:db8::') == + IPv6Network(42540766411282592856903984951653826560) + or, more generally + IPv6Network(int(IPv6Network('2001:db8::'))) == + IPv6Network('2001:db8::') + + strict: A boolean. If true, ensure that we have been passed + A true network address, eg, 2001:db8::1000/124 and not an + IP address on a network, eg, 2001:db8::1/124. + + Raises: + AddressValueError: If address isn't a valid IPv6 address. + NetmaskValueError: If the netmask isn't valid for + an IPv6 address. + ValueError: If strict was True and a network address was not + supplied. + + """ + _BaseNetwork.__init__(self, address) + + # Efficient constructor from integer or packed address + if isinstance(address, (bytes, _compat_int_types)): + self.network_address = IPv6Address(address) + self.netmask, self._prefixlen = self._make_netmask( + self._max_prefixlen) + return + + if isinstance(address, tuple): + if len(address) > 1: + arg = address[1] + else: + arg = self._max_prefixlen + self.netmask, self._prefixlen = self._make_netmask(arg) + self.network_address = IPv6Address(address[0]) + packed = int(self.network_address) + if packed & int(self.netmask) != packed: + if strict: + raise ValueError('%s has host bits set' % self) + else: + self.network_address = IPv6Address(packed & + int(self.netmask)) + return + + # Assume input argument to be string or any object representation + # which converts into a formatted IP prefix string. + addr = _split_optional_netmask(address) + + self.network_address = IPv6Address(self._ip_int_from_string(addr[0])) + + if len(addr) == 2: + arg = addr[1] + else: + arg = self._max_prefixlen + self.netmask, self._prefixlen = self._make_netmask(arg) + + if strict: + if (IPv6Address(int(self.network_address) & int(self.netmask)) != + self.network_address): + raise ValueError('%s has host bits set' % self) + self.network_address = IPv6Address(int(self.network_address) & + int(self.netmask)) + + if self._prefixlen == (self._max_prefixlen - 1): + self.hosts = self.__iter__ + + def hosts(self): + """Generate Iterator over usable hosts in a network. + + This is like __iter__ except it doesn't return the + Subnet-Router anycast address. + + """ + network = int(self.network_address) + broadcast = int(self.broadcast_address) + for x in _compat_range(network + 1, broadcast + 1): + yield self._address_class(x) + + @property + def is_site_local(self): + """Test if the address is reserved for site-local. + + Note that the site-local address space has been deprecated by RFC 3879. + Use is_private to test if this address is in the space of unique local + addresses as defined by RFC 4193. + + Returns: + A boolean, True if the address is reserved per RFC 3513 2.5.6. + + """ + return (self.network_address.is_site_local and + self.broadcast_address.is_site_local) + + +class _IPv6Constants(object): + + _linklocal_network = IPv6Network('fe80::/10') + + _multicast_network = IPv6Network('ff00::/8') + + _private_networks = [ + IPv6Network('::1/128'), + IPv6Network('::/128'), + IPv6Network('::ffff:0:0/96'), + IPv6Network('100::/64'), + IPv6Network('2001::/23'), + IPv6Network('2001:2::/48'), + IPv6Network('2001:db8::/32'), + IPv6Network('2001:10::/28'), + IPv6Network('fc00::/7'), + IPv6Network('fe80::/10'), + ] + + _reserved_networks = [ + IPv6Network('::/8'), IPv6Network('100::/8'), + IPv6Network('200::/7'), IPv6Network('400::/6'), + IPv6Network('800::/5'), IPv6Network('1000::/4'), + IPv6Network('4000::/3'), IPv6Network('6000::/3'), + IPv6Network('8000::/3'), IPv6Network('A000::/3'), + IPv6Network('C000::/3'), IPv6Network('E000::/4'), + IPv6Network('F000::/5'), IPv6Network('F800::/6'), + IPv6Network('FE00::/9'), + ] + + _sitelocal_network = IPv6Network('fec0::/10') + + +IPv6Address._constants = _IPv6Constants diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyc new file mode 100644 index 0000000..3736532 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/ipaddress.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/lockfile/__init__.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/lockfile/__init__.py new file mode 100644 index 0000000..a6f44a5 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/lockfile/__init__.py @@ -0,0 +1,347 @@ +# -*- coding: utf-8 -*- + +""" +lockfile.py - Platform-independent advisory file locks. + +Requires Python 2.5 unless you apply 2.4.diff +Locking is done on a per-thread basis instead of a per-process basis. + +Usage: + +>>> lock = LockFile('somefile') +>>> try: +... lock.acquire() +... except AlreadyLocked: +... print 'somefile', 'is locked already.' +... except LockFailed: +... print 'somefile', 'can\\'t be locked.' +... else: +... print 'got lock' +got lock +>>> print lock.is_locked() +True +>>> lock.release() + +>>> lock = LockFile('somefile') +>>> print lock.is_locked() +False +>>> with lock: +... print lock.is_locked() +True +>>> print lock.is_locked() +False + +>>> lock = LockFile('somefile') +>>> # It is okay to lock twice from the same thread... +>>> with lock: +... lock.acquire() +... +>>> # Though no counter is kept, so you can't unlock multiple times... +>>> print lock.is_locked() +False + +Exceptions: + + Error - base class for other exceptions + LockError - base class for all locking exceptions + AlreadyLocked - Another thread or process already holds the lock + LockFailed - Lock failed for some other reason + UnlockError - base class for all unlocking exceptions + AlreadyUnlocked - File was not locked. + NotMyLock - File was locked but not by the current thread/process +""" + +from __future__ import absolute_import + +import functools +import os +import socket +import threading +import warnings + +# Work with PEP8 and non-PEP8 versions of threading module. +if not hasattr(threading, "current_thread"): + threading.current_thread = threading.currentThread +if not hasattr(threading.Thread, "get_name"): + threading.Thread.get_name = threading.Thread.getName + +__all__ = ['Error', 'LockError', 'LockTimeout', 'AlreadyLocked', + 'LockFailed', 'UnlockError', 'NotLocked', 'NotMyLock', + 'LinkFileLock', 'MkdirFileLock', 'SQLiteFileLock', + 'LockBase', 'locked'] + + +class Error(Exception): + """ + Base class for other exceptions. + + >>> try: + ... raise Error + ... except Exception: + ... pass + """ + pass + + +class LockError(Error): + """ + Base class for error arising from attempts to acquire the lock. + + >>> try: + ... raise LockError + ... except Error: + ... pass + """ + pass + + +class LockTimeout(LockError): + """Raised when lock creation fails within a user-defined period of time. + + >>> try: + ... raise LockTimeout + ... except LockError: + ... pass + """ + pass + + +class AlreadyLocked(LockError): + """Some other thread/process is locking the file. + + >>> try: + ... raise AlreadyLocked + ... except LockError: + ... pass + """ + pass + + +class LockFailed(LockError): + """Lock file creation failed for some other reason. + + >>> try: + ... raise LockFailed + ... except LockError: + ... pass + """ + pass + + +class UnlockError(Error): + """ + Base class for errors arising from attempts to release the lock. + + >>> try: + ... raise UnlockError + ... except Error: + ... pass + """ + pass + + +class NotLocked(UnlockError): + """Raised when an attempt is made to unlock an unlocked file. + + >>> try: + ... raise NotLocked + ... except UnlockError: + ... pass + """ + pass + + +class NotMyLock(UnlockError): + """Raised when an attempt is made to unlock a file someone else locked. + + >>> try: + ... raise NotMyLock + ... except UnlockError: + ... pass + """ + pass + + +class _SharedBase(object): + def __init__(self, path): + self.path = path + + def acquire(self, timeout=None): + """ + Acquire the lock. + + * If timeout is omitted (or None), wait forever trying to lock the + file. + + * If timeout > 0, try to acquire the lock for that many seconds. If + the lock period expires and the file is still locked, raise + LockTimeout. + + * If timeout <= 0, raise AlreadyLocked immediately if the file is + already locked. + """ + raise NotImplemented("implement in subclass") + + def release(self): + """ + Release the lock. + + If the file is not locked, raise NotLocked. + """ + raise NotImplemented("implement in subclass") + + def __enter__(self): + """ + Context manager support. + """ + self.acquire() + return self + + def __exit__(self, *_exc): + """ + Context manager support. + """ + self.release() + + def __repr__(self): + return "<%s: %r>" % (self.__class__.__name__, self.path) + + +class LockBase(_SharedBase): + """Base class for platform-specific lock classes.""" + def __init__(self, path, threaded=True, timeout=None): + """ + >>> lock = LockBase('somefile') + >>> lock = LockBase('somefile', threaded=False) + """ + super(LockBase, self).__init__(path) + self.lock_file = os.path.abspath(path) + ".lock" + self.hostname = socket.gethostname() + self.pid = os.getpid() + if threaded: + t = threading.current_thread() + # Thread objects in Python 2.4 and earlier do not have ident + # attrs. Worm around that. + ident = getattr(t, "ident", hash(t)) + self.tname = "-%x" % (ident & 0xffffffff) + else: + self.tname = "" + dirname = os.path.dirname(self.lock_file) + + # unique name is mostly about the current process, but must + # also contain the path -- otherwise, two adjacent locked + # files conflict (one file gets locked, creating lock-file and + # unique file, the other one gets locked, creating lock-file + # and overwriting the already existing lock-file, then one + # gets unlocked, deleting both lock-file and unique file, + # finally the last lock errors out upon releasing. + self.unique_name = os.path.join(dirname, + "%s%s.%s%s" % (self.hostname, + self.tname, + self.pid, + hash(self.path))) + self.timeout = timeout + + def is_locked(self): + """ + Tell whether or not the file is locked. + """ + raise NotImplemented("implement in subclass") + + def i_am_locking(self): + """ + Return True if this object is locking the file. + """ + raise NotImplemented("implement in subclass") + + def break_lock(self): + """ + Remove a lock. Useful if a locking thread failed to unlock. + """ + raise NotImplemented("implement in subclass") + + def __repr__(self): + return "<%s: %r -- %r>" % (self.__class__.__name__, self.unique_name, + self.path) + + +def _fl_helper(cls, mod, *args, **kwds): + warnings.warn("Import from %s module instead of lockfile package" % mod, + DeprecationWarning, stacklevel=2) + # This is a bit funky, but it's only for awhile. The way the unit tests + # are constructed this function winds up as an unbound method, so it + # actually takes three args, not two. We want to toss out self. + if not isinstance(args[0], str): + # We are testing, avoid the first arg + args = args[1:] + if len(args) == 1 and not kwds: + kwds["threaded"] = True + return cls(*args, **kwds) + + +def LinkFileLock(*args, **kwds): + """Factory function provided for backwards compatibility. + + Do not use in new code. Instead, import LinkLockFile from the + lockfile.linklockfile module. + """ + from . import linklockfile + return _fl_helper(linklockfile.LinkLockFile, "lockfile.linklockfile", + *args, **kwds) + + +def MkdirFileLock(*args, **kwds): + """Factory function provided for backwards compatibility. + + Do not use in new code. Instead, import MkdirLockFile from the + lockfile.mkdirlockfile module. + """ + from . import mkdirlockfile + return _fl_helper(mkdirlockfile.MkdirLockFile, "lockfile.mkdirlockfile", + *args, **kwds) + + +def SQLiteFileLock(*args, **kwds): + """Factory function provided for backwards compatibility. + + Do not use in new code. Instead, import SQLiteLockFile from the + lockfile.mkdirlockfile module. + """ + from . import sqlitelockfile + return _fl_helper(sqlitelockfile.SQLiteLockFile, "lockfile.sqlitelockfile", + *args, **kwds) + + +def locked(path, timeout=None): + """Decorator which enables locks for decorated function. + + Arguments: + - path: path for lockfile. + - timeout (optional): Timeout for acquiring lock. + + Usage: + @locked('/var/run/myname', timeout=0) + def myname(...): + ... + """ + def decor(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + lock = FileLock(path, timeout=timeout) + lock.acquire() + try: + return func(*args, **kwargs) + finally: + lock.release() + return wrapper + return decor + + +if hasattr(os, "link"): + from . import linklockfile as _llf + LockFile = _llf.LinkLockFile +else: + from . import mkdirlockfile as _mlf + LockFile = _mlf.MkdirLockFile + +FileLock = LockFile diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/lockfile/__init__.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/lockfile/__init__.pyc new file mode 100644 index 0000000..778c91a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/lockfile/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/lockfile/linklockfile.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/lockfile/linklockfile.py new file mode 100644 index 0000000..2ca9be0 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/lockfile/linklockfile.py @@ -0,0 +1,73 @@ +from __future__ import absolute_import + +import time +import os + +from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout, + AlreadyLocked) + + +class LinkLockFile(LockBase): + """Lock access to a file using atomic property of link(2). + + >>> lock = LinkLockFile('somefile') + >>> lock = LinkLockFile('somefile', threaded=False) + """ + + def acquire(self, timeout=None): + try: + open(self.unique_name, "wb").close() + except IOError: + raise LockFailed("failed to create %s" % self.unique_name) + + timeout = timeout if timeout is not None else self.timeout + end_time = time.time() + if timeout is not None and timeout > 0: + end_time += timeout + + while True: + # Try and create a hard link to it. + try: + os.link(self.unique_name, self.lock_file) + except OSError: + # Link creation failed. Maybe we've double-locked? + nlinks = os.stat(self.unique_name).st_nlink + if nlinks == 2: + # The original link plus the one I created == 2. We're + # good to go. + return + else: + # Otherwise the lock creation failed. + if timeout is not None and time.time() > end_time: + os.unlink(self.unique_name) + if timeout > 0: + raise LockTimeout("Timeout waiting to acquire" + " lock for %s" % + self.path) + else: + raise AlreadyLocked("%s is already locked" % + self.path) + time.sleep(timeout is not None and timeout / 10 or 0.1) + else: + # Link creation succeeded. We're good to go. + return + + def release(self): + if not self.is_locked(): + raise NotLocked("%s is not locked" % self.path) + elif not os.path.exists(self.unique_name): + raise NotMyLock("%s is locked, but not by me" % self.path) + os.unlink(self.unique_name) + os.unlink(self.lock_file) + + def is_locked(self): + return os.path.exists(self.lock_file) + + def i_am_locking(self): + return (self.is_locked() and + os.path.exists(self.unique_name) and + os.stat(self.unique_name).st_nlink == 2) + + def break_lock(self): + if os.path.exists(self.lock_file): + os.unlink(self.lock_file) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/lockfile/linklockfile.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/lockfile/linklockfile.pyc new file mode 100644 index 0000000..ed9cd29 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/lockfile/linklockfile.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/lockfile/mkdirlockfile.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/lockfile/mkdirlockfile.py new file mode 100644 index 0000000..05a8c96 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/lockfile/mkdirlockfile.py @@ -0,0 +1,84 @@ +from __future__ import absolute_import, division + +import time +import os +import sys +import errno + +from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout, + AlreadyLocked) + + +class MkdirLockFile(LockBase): + """Lock file by creating a directory.""" + def __init__(self, path, threaded=True, timeout=None): + """ + >>> lock = MkdirLockFile('somefile') + >>> lock = MkdirLockFile('somefile', threaded=False) + """ + LockBase.__init__(self, path, threaded, timeout) + # Lock file itself is a directory. Place the unique file name into + # it. + self.unique_name = os.path.join(self.lock_file, + "%s.%s%s" % (self.hostname, + self.tname, + self.pid)) + + def acquire(self, timeout=None): + timeout = timeout if timeout is not None else self.timeout + end_time = time.time() + if timeout is not None and timeout > 0: + end_time += timeout + + if timeout is None: + wait = 0.1 + else: + wait = max(0, timeout / 10) + + while True: + try: + os.mkdir(self.lock_file) + except OSError: + err = sys.exc_info()[1] + if err.errno == errno.EEXIST: + # Already locked. + if os.path.exists(self.unique_name): + # Already locked by me. + return + if timeout is not None and time.time() > end_time: + if timeout > 0: + raise LockTimeout("Timeout waiting to acquire" + " lock for %s" % + self.path) + else: + # Someone else has the lock. + raise AlreadyLocked("%s is already locked" % + self.path) + time.sleep(wait) + else: + # Couldn't create the lock for some other reason + raise LockFailed("failed to create %s" % self.lock_file) + else: + open(self.unique_name, "wb").close() + return + + def release(self): + if not self.is_locked(): + raise NotLocked("%s is not locked" % self.path) + elif not os.path.exists(self.unique_name): + raise NotMyLock("%s is locked, but not by me" % self.path) + os.unlink(self.unique_name) + os.rmdir(self.lock_file) + + def is_locked(self): + return os.path.exists(self.lock_file) + + def i_am_locking(self): + return (self.is_locked() and + os.path.exists(self.unique_name)) + + def break_lock(self): + if os.path.exists(self.lock_file): + for name in os.listdir(self.lock_file): + os.unlink(os.path.join(self.lock_file, name)) + os.rmdir(self.lock_file) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/lockfile/mkdirlockfile.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/lockfile/mkdirlockfile.pyc new file mode 100644 index 0000000..28f754c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/lockfile/mkdirlockfile.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/lockfile/pidlockfile.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/lockfile/pidlockfile.py new file mode 100644 index 0000000..069e85b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/lockfile/pidlockfile.py @@ -0,0 +1,190 @@ +# -*- coding: utf-8 -*- + +# pidlockfile.py +# +# Copyright © 2008–2009 Ben Finney <ben+python@benfinney.id.au> +# +# This is free software: you may copy, modify, and/or distribute this work +# under the terms of the Python Software Foundation License, version 2 or +# later as published by the Python Software Foundation. +# No warranty expressed or implied. See the file LICENSE.PSF-2 for details. + +""" Lockfile behaviour implemented via Unix PID files. + """ + +from __future__ import absolute_import + +import errno +import os +import time + +from . import (LockBase, AlreadyLocked, LockFailed, NotLocked, NotMyLock, + LockTimeout) + + +class PIDLockFile(LockBase): + """ Lockfile implemented as a Unix PID file. + + The lock file is a normal file named by the attribute `path`. + A lock's PID file contains a single line of text, containing + the process ID (PID) of the process that acquired the lock. + + >>> lock = PIDLockFile('somefile') + >>> lock = PIDLockFile('somefile') + """ + + def __init__(self, path, threaded=False, timeout=None): + # pid lockfiles don't support threaded operation, so always force + # False as the threaded arg. + LockBase.__init__(self, path, False, timeout) + self.unique_name = self.path + + def read_pid(self): + """ Get the PID from the lock file. + """ + return read_pid_from_pidfile(self.path) + + def is_locked(self): + """ Test if the lock is currently held. + + The lock is held if the PID file for this lock exists. + + """ + return os.path.exists(self.path) + + def i_am_locking(self): + """ Test if the lock is held by the current process. + + Returns ``True`` if the current process ID matches the + number stored in the PID file. + """ + return self.is_locked() and os.getpid() == self.read_pid() + + def acquire(self, timeout=None): + """ Acquire the lock. + + Creates the PID file for this lock, or raises an error if + the lock could not be acquired. + """ + + timeout = timeout if timeout is not None else self.timeout + end_time = time.time() + if timeout is not None and timeout > 0: + end_time += timeout + + while True: + try: + write_pid_to_pidfile(self.path) + except OSError as exc: + if exc.errno == errno.EEXIST: + # The lock creation failed. Maybe sleep a bit. + if time.time() > end_time: + if timeout is not None and timeout > 0: + raise LockTimeout("Timeout waiting to acquire" + " lock for %s" % + self.path) + else: + raise AlreadyLocked("%s is already locked" % + self.path) + time.sleep(timeout is not None and timeout / 10 or 0.1) + else: + raise LockFailed("failed to create %s" % self.path) + else: + return + + def release(self): + """ Release the lock. + + Removes the PID file to release the lock, or raises an + error if the current process does not hold the lock. + + """ + if not self.is_locked(): + raise NotLocked("%s is not locked" % self.path) + if not self.i_am_locking(): + raise NotMyLock("%s is locked, but not by me" % self.path) + remove_existing_pidfile(self.path) + + def break_lock(self): + """ Break an existing lock. + + Removes the PID file if it already exists, otherwise does + nothing. + + """ + remove_existing_pidfile(self.path) + + +def read_pid_from_pidfile(pidfile_path): + """ Read the PID recorded in the named PID file. + + Read and return the numeric PID recorded as text in the named + PID file. If the PID file cannot be read, or if the content is + not a valid PID, return ``None``. + + """ + pid = None + try: + pidfile = open(pidfile_path, 'r') + except IOError: + pass + else: + # According to the FHS 2.3 section on PID files in /var/run: + # + # The file must consist of the process identifier in + # ASCII-encoded decimal, followed by a newline character. + # + # Programs that read PID files should be somewhat flexible + # in what they accept; i.e., they should ignore extra + # whitespace, leading zeroes, absence of the trailing + # newline, or additional lines in the PID file. + + line = pidfile.readline().strip() + try: + pid = int(line) + except ValueError: + pass + pidfile.close() + + return pid + + +def write_pid_to_pidfile(pidfile_path): + """ Write the PID in the named PID file. + + Get the numeric process ID (“PID”) of the current process + and write it to the named file as a line of text. + + """ + open_flags = (os.O_CREAT | os.O_EXCL | os.O_WRONLY) + open_mode = 0o644 + pidfile_fd = os.open(pidfile_path, open_flags, open_mode) + pidfile = os.fdopen(pidfile_fd, 'w') + + # According to the FHS 2.3 section on PID files in /var/run: + # + # The file must consist of the process identifier in + # ASCII-encoded decimal, followed by a newline character. For + # example, if crond was process number 25, /var/run/crond.pid + # would contain three characters: two, five, and newline. + + pid = os.getpid() + pidfile.write("%s\n" % pid) + pidfile.close() + + +def remove_existing_pidfile(pidfile_path): + """ Remove the named PID file if it exists. + + Removing a PID file that doesn't already exist puts us in the + desired state, so we ignore the condition if the file does not + exist. + + """ + try: + os.remove(pidfile_path) + except OSError as exc: + if exc.errno == errno.ENOENT: + pass + else: + raise diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/lockfile/pidlockfile.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/lockfile/pidlockfile.pyc new file mode 100644 index 0000000..6b0f05e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/lockfile/pidlockfile.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/lockfile/sqlitelockfile.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/lockfile/sqlitelockfile.py new file mode 100644 index 0000000..f997e24 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/lockfile/sqlitelockfile.py @@ -0,0 +1,156 @@ +from __future__ import absolute_import, division + +import time +import os + +try: + unicode +except NameError: + unicode = str + +from . import LockBase, NotLocked, NotMyLock, LockTimeout, AlreadyLocked + + +class SQLiteLockFile(LockBase): + "Demonstrate SQL-based locking." + + testdb = None + + def __init__(self, path, threaded=True, timeout=None): + """ + >>> lock = SQLiteLockFile('somefile') + >>> lock = SQLiteLockFile('somefile', threaded=False) + """ + LockBase.__init__(self, path, threaded, timeout) + self.lock_file = unicode(self.lock_file) + self.unique_name = unicode(self.unique_name) + + if SQLiteLockFile.testdb is None: + import tempfile + _fd, testdb = tempfile.mkstemp() + os.close(_fd) + os.unlink(testdb) + del _fd, tempfile + SQLiteLockFile.testdb = testdb + + import sqlite3 + self.connection = sqlite3.connect(SQLiteLockFile.testdb) + + c = self.connection.cursor() + try: + c.execute("create table locks" + "(" + " lock_file varchar(32)," + " unique_name varchar(32)" + ")") + except sqlite3.OperationalError: + pass + else: + self.connection.commit() + import atexit + atexit.register(os.unlink, SQLiteLockFile.testdb) + + def acquire(self, timeout=None): + timeout = timeout if timeout is not None else self.timeout + end_time = time.time() + if timeout is not None and timeout > 0: + end_time += timeout + + if timeout is None: + wait = 0.1 + elif timeout <= 0: + wait = 0 + else: + wait = timeout / 10 + + cursor = self.connection.cursor() + + while True: + if not self.is_locked(): + # Not locked. Try to lock it. + cursor.execute("insert into locks" + " (lock_file, unique_name)" + " values" + " (?, ?)", + (self.lock_file, self.unique_name)) + self.connection.commit() + + # Check to see if we are the only lock holder. + cursor.execute("select * from locks" + " where unique_name = ?", + (self.unique_name,)) + rows = cursor.fetchall() + if len(rows) > 1: + # Nope. Someone else got there. Remove our lock. + cursor.execute("delete from locks" + " where unique_name = ?", + (self.unique_name,)) + self.connection.commit() + else: + # Yup. We're done, so go home. + return + else: + # Check to see if we are the only lock holder. + cursor.execute("select * from locks" + " where unique_name = ?", + (self.unique_name,)) + rows = cursor.fetchall() + if len(rows) == 1: + # We're the locker, so go home. + return + + # Maybe we should wait a bit longer. + if timeout is not None and time.time() > end_time: + if timeout > 0: + # No more waiting. + raise LockTimeout("Timeout waiting to acquire" + " lock for %s" % + self.path) + else: + # Someone else has the lock and we are impatient.. + raise AlreadyLocked("%s is already locked" % self.path) + + # Well, okay. We'll give it a bit longer. + time.sleep(wait) + + def release(self): + if not self.is_locked(): + raise NotLocked("%s is not locked" % self.path) + if not self.i_am_locking(): + raise NotMyLock("%s is locked, but not by me (by %s)" % + (self.unique_name, self._who_is_locking())) + cursor = self.connection.cursor() + cursor.execute("delete from locks" + " where unique_name = ?", + (self.unique_name,)) + self.connection.commit() + + def _who_is_locking(self): + cursor = self.connection.cursor() + cursor.execute("select unique_name from locks" + " where lock_file = ?", + (self.lock_file,)) + return cursor.fetchone()[0] + + def is_locked(self): + cursor = self.connection.cursor() + cursor.execute("select * from locks" + " where lock_file = ?", + (self.lock_file,)) + rows = cursor.fetchall() + return not not rows + + def i_am_locking(self): + cursor = self.connection.cursor() + cursor.execute("select * from locks" + " where lock_file = ?" + " and unique_name = ?", + (self.lock_file, self.unique_name)) + return not not cursor.fetchall() + + def break_lock(self): + cursor = self.connection.cursor() + cursor.execute("delete from locks" + " where lock_file = ?", + (self.lock_file,)) + self.connection.commit() diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/lockfile/sqlitelockfile.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/lockfile/sqlitelockfile.pyc new file mode 100644 index 0000000..ee99a8b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/lockfile/sqlitelockfile.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/lockfile/symlinklockfile.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/lockfile/symlinklockfile.py new file mode 100644 index 0000000..23b41f5 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/lockfile/symlinklockfile.py @@ -0,0 +1,70 @@ +from __future__ import absolute_import + +import os +import time + +from . import (LockBase, NotLocked, NotMyLock, LockTimeout, + AlreadyLocked) + + +class SymlinkLockFile(LockBase): + """Lock access to a file using symlink(2).""" + + def __init__(self, path, threaded=True, timeout=None): + # super(SymlinkLockFile).__init(...) + LockBase.__init__(self, path, threaded, timeout) + # split it back! + self.unique_name = os.path.split(self.unique_name)[1] + + def acquire(self, timeout=None): + # Hopefully unnecessary for symlink. + # try: + # open(self.unique_name, "wb").close() + # except IOError: + # raise LockFailed("failed to create %s" % self.unique_name) + timeout = timeout if timeout is not None else self.timeout + end_time = time.time() + if timeout is not None and timeout > 0: + end_time += timeout + + while True: + # Try and create a symbolic link to it. + try: + os.symlink(self.unique_name, self.lock_file) + except OSError: + # Link creation failed. Maybe we've double-locked? + if self.i_am_locking(): + # Linked to out unique name. Proceed. + return + else: + # Otherwise the lock creation failed. + if timeout is not None and time.time() > end_time: + if timeout > 0: + raise LockTimeout("Timeout waiting to acquire" + " lock for %s" % + self.path) + else: + raise AlreadyLocked("%s is already locked" % + self.path) + time.sleep(timeout / 10 if timeout is not None else 0.1) + else: + # Link creation succeeded. We're good to go. + return + + def release(self): + if not self.is_locked(): + raise NotLocked("%s is not locked" % self.path) + elif not self.i_am_locking(): + raise NotMyLock("%s is locked, but not by me" % self.path) + os.unlink(self.lock_file) + + def is_locked(self): + return os.path.islink(self.lock_file) + + def i_am_locking(self): + return (os.path.islink(self.lock_file) + and os.readlink(self.lock_file) == self.unique_name) + + def break_lock(self): + if os.path.islink(self.lock_file): # exists && link + os.unlink(self.lock_file) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/lockfile/symlinklockfile.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/lockfile/symlinklockfile.pyc new file mode 100644 index 0000000..b5714de Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/lockfile/symlinklockfile.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/msgpack/__init__.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/msgpack/__init__.py new file mode 100644 index 0000000..2afca5a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/msgpack/__init__.py @@ -0,0 +1,66 @@ +# coding: utf-8 +from pip._vendor.msgpack._version import version +from pip._vendor.msgpack.exceptions import * + +from collections import namedtuple + + +class ExtType(namedtuple('ExtType', 'code data')): + """ExtType represents ext type in msgpack.""" + def __new__(cls, code, data): + if not isinstance(code, int): + raise TypeError("code must be int") + if not isinstance(data, bytes): + raise TypeError("data must be bytes") + if not 0 <= code <= 127: + raise ValueError("code must be 0~127") + return super(ExtType, cls).__new__(cls, code, data) + + +import os +if os.environ.get('MSGPACK_PUREPYTHON'): + from pip._vendor.msgpack.fallback import Packer, unpackb, Unpacker +else: + try: + from pip._vendor.msgpack._packer import Packer + from pip._vendor.msgpack._unpacker import unpackb, Unpacker + except ImportError: + from pip._vendor.msgpack.fallback import Packer, unpackb, Unpacker + + +def pack(o, stream, **kwargs): + """ + Pack object `o` and write it to `stream` + + See :class:`Packer` for options. + """ + packer = Packer(**kwargs) + stream.write(packer.pack(o)) + + +def packb(o, **kwargs): + """ + Pack object `o` and return packed bytes + + See :class:`Packer` for options. + """ + return Packer(**kwargs).pack(o) + + +def unpack(stream, **kwargs): + """ + Unpack an object from `stream`. + + Raises `ExtraData` when `stream` contains extra bytes. + See :class:`Unpacker` for options. + """ + data = stream.read() + return unpackb(data, **kwargs) + + +# alias for compatibility to simplejson/marshal/pickle. +load = unpack +loads = unpackb + +dump = pack +dumps = packb diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/msgpack/__init__.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/msgpack/__init__.pyc new file mode 100644 index 0000000..44a03c5 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/msgpack/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/msgpack/_version.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/msgpack/_version.py new file mode 100644 index 0000000..d28f0de --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/msgpack/_version.py @@ -0,0 +1 @@ +version = (0, 5, 6) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/msgpack/_version.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/msgpack/_version.pyc new file mode 100644 index 0000000..8373fef Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/msgpack/_version.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/msgpack/exceptions.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/msgpack/exceptions.py new file mode 100644 index 0000000..9766881 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/msgpack/exceptions.py @@ -0,0 +1,41 @@ +class UnpackException(Exception): + """Deprecated. Use Exception instead to catch all exception during unpacking.""" + + +class BufferFull(UnpackException): + pass + + +class OutOfData(UnpackException): + pass + + +class UnpackValueError(UnpackException, ValueError): + """Deprecated. Use ValueError instead.""" + + +class ExtraData(UnpackValueError): + def __init__(self, unpacked, extra): + self.unpacked = unpacked + self.extra = extra + + def __str__(self): + return "unpack(b) received extra data." + + +class PackException(Exception): + """Deprecated. Use Exception instead to catch all exception during packing.""" + + +class PackValueError(PackException, ValueError): + """PackValueError is raised when type of input data is supported but it's value is unsupported. + + Deprecated. Use ValueError instead. + """ + + +class PackOverflowError(PackValueError, OverflowError): + """PackOverflowError is raised when integer value is out of range of msgpack support [-2**31, 2**32). + + Deprecated. Use ValueError instead. + """ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/msgpack/exceptions.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/msgpack/exceptions.pyc new file mode 100644 index 0000000..e8a97f7 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/msgpack/exceptions.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/msgpack/fallback.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/msgpack/fallback.py new file mode 100644 index 0000000..9418421 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/msgpack/fallback.py @@ -0,0 +1,977 @@ +"""Fallback pure Python implementation of msgpack""" + +import sys +import struct +import warnings + +if sys.version_info[0] == 3: + PY3 = True + int_types = int + Unicode = str + xrange = range + def dict_iteritems(d): + return d.items() +else: + PY3 = False + int_types = (int, long) + Unicode = unicode + def dict_iteritems(d): + return d.iteritems() + + +if hasattr(sys, 'pypy_version_info'): + # cStringIO is slow on PyPy, StringIO is faster. However: PyPy's own + # StringBuilder is fastest. + from __pypy__ import newlist_hint + try: + from __pypy__.builders import BytesBuilder as StringBuilder + except ImportError: + from __pypy__.builders import StringBuilder + USING_STRINGBUILDER = True + class StringIO(object): + def __init__(self, s=b''): + if s: + self.builder = StringBuilder(len(s)) + self.builder.append(s) + else: + self.builder = StringBuilder() + def write(self, s): + if isinstance(s, memoryview): + s = s.tobytes() + elif isinstance(s, bytearray): + s = bytes(s) + self.builder.append(s) + def getvalue(self): + return self.builder.build() +else: + USING_STRINGBUILDER = False + from io import BytesIO as StringIO + newlist_hint = lambda size: [] + + +from pip._vendor.msgpack.exceptions import ( + BufferFull, + OutOfData, + UnpackValueError, + PackValueError, + PackOverflowError, + ExtraData) + +from pip._vendor.msgpack import ExtType + + +EX_SKIP = 0 +EX_CONSTRUCT = 1 +EX_READ_ARRAY_HEADER = 2 +EX_READ_MAP_HEADER = 3 + +TYPE_IMMEDIATE = 0 +TYPE_ARRAY = 1 +TYPE_MAP = 2 +TYPE_RAW = 3 +TYPE_BIN = 4 +TYPE_EXT = 5 + +DEFAULT_RECURSE_LIMIT = 511 + + +def _check_type_strict(obj, t, type=type, tuple=tuple): + if type(t) is tuple: + return type(obj) in t + else: + return type(obj) is t + + +def _get_data_from_buffer(obj): + try: + view = memoryview(obj) + except TypeError: + # try to use legacy buffer protocol if 2.7, otherwise re-raise + if not PY3: + view = memoryview(buffer(obj)) + warnings.warn("using old buffer interface to unpack %s; " + "this leads to unpacking errors if slicing is used and " + "will be removed in a future version" % type(obj), + RuntimeWarning) + else: + raise + if view.itemsize != 1: + raise ValueError("cannot unpack from multi-byte object") + return view + + +def unpack(stream, **kwargs): + warnings.warn( + "Direct calling implementation's unpack() is deprecated, Use msgpack.unpack() or unpackb() instead.", + PendingDeprecationWarning) + data = stream.read() + return unpackb(data, **kwargs) + + +def unpackb(packed, **kwargs): + """ + Unpack an object from `packed`. + + Raises `ExtraData` when `packed` contains extra bytes. + See :class:`Unpacker` for options. + """ + unpacker = Unpacker(None, **kwargs) + unpacker.feed(packed) + try: + ret = unpacker._unpack() + except OutOfData: + raise UnpackValueError("Data is not enough.") + if unpacker._got_extradata(): + raise ExtraData(ret, unpacker._get_extradata()) + return ret + + +class Unpacker(object): + """Streaming unpacker. + + arguments: + + :param file_like: + File-like object having `.read(n)` method. + If specified, unpacker reads serialized data from it and :meth:`feed()` is not usable. + + :param int read_size: + Used as `file_like.read(read_size)`. (default: `min(16*1024, max_buffer_size)`) + + :param bool use_list: + If true, unpack msgpack array to Python list. + Otherwise, unpack to Python tuple. (default: True) + + :param bool raw: + If true, unpack msgpack raw to Python bytes (default). + Otherwise, unpack to Python str (or unicode on Python 2) by decoding + with UTF-8 encoding (recommended). + Currently, the default is true, but it will be changed to false in + near future. So you must specify it explicitly for keeping backward + compatibility. + + *encoding* option which is deprecated overrides this option. + + :param callable object_hook: + When specified, it should be callable. + Unpacker calls it with a dict argument after unpacking msgpack map. + (See also simplejson) + + :param callable object_pairs_hook: + When specified, it should be callable. + Unpacker calls it with a list of key-value pairs after unpacking msgpack map. + (See also simplejson) + + :param str encoding: + Encoding used for decoding msgpack raw. + If it is None (default), msgpack raw is deserialized to Python bytes. + + :param str unicode_errors: + (deprecated) Used for decoding msgpack raw with *encoding*. + (default: `'strict'`) + + :param int max_buffer_size: + Limits size of data waiting unpacked. 0 means system's INT_MAX (default). + Raises `BufferFull` exception when it is insufficient. + You should set this parameter when unpacking data from untrusted source. + + :param int max_str_len: + Limits max length of str. (default: 2**31-1) + + :param int max_bin_len: + Limits max length of bin. (default: 2**31-1) + + :param int max_array_len: + Limits max length of array. (default: 2**31-1) + + :param int max_map_len: + Limits max length of map. (default: 2**31-1) + + + example of streaming deserialize from file-like object:: + + unpacker = Unpacker(file_like, raw=False) + for o in unpacker: + process(o) + + example of streaming deserialize from socket:: + + unpacker = Unpacker(raw=False) + while True: + buf = sock.recv(1024**2) + if not buf: + break + unpacker.feed(buf) + for o in unpacker: + process(o) + """ + + def __init__(self, file_like=None, read_size=0, use_list=True, raw=True, + object_hook=None, object_pairs_hook=None, list_hook=None, + encoding=None, unicode_errors=None, max_buffer_size=0, + ext_hook=ExtType, + max_str_len=2147483647, # 2**32-1 + max_bin_len=2147483647, + max_array_len=2147483647, + max_map_len=2147483647, + max_ext_len=2147483647): + + if encoding is not None: + warnings.warn( + "encoding is deprecated, Use raw=False instead.", + PendingDeprecationWarning) + + if unicode_errors is None: + unicode_errors = 'strict' + + if file_like is None: + self._feeding = True + else: + if not callable(file_like.read): + raise TypeError("`file_like.read` must be callable") + self.file_like = file_like + self._feeding = False + + #: array of bytes fed. + self._buffer = bytearray() + # Some very old pythons don't support `struct.unpack_from()` with a + # `bytearray`. So we wrap it in a `buffer()` there. + if sys.version_info < (2, 7, 6): + self._buffer_view = buffer(self._buffer) + else: + self._buffer_view = self._buffer + #: Which position we currently reads + self._buff_i = 0 + + # When Unpacker is used as an iterable, between the calls to next(), + # the buffer is not "consumed" completely, for efficiency sake. + # Instead, it is done sloppily. To make sure we raise BufferFull at + # the correct moments, we have to keep track of how sloppy we were. + # Furthermore, when the buffer is incomplete (that is: in the case + # we raise an OutOfData) we need to rollback the buffer to the correct + # state, which _buf_checkpoint records. + self._buf_checkpoint = 0 + + self._max_buffer_size = max_buffer_size or 2**31-1 + if read_size > self._max_buffer_size: + raise ValueError("read_size must be smaller than max_buffer_size") + self._read_size = read_size or min(self._max_buffer_size, 16*1024) + self._raw = bool(raw) + self._encoding = encoding + self._unicode_errors = unicode_errors + self._use_list = use_list + self._list_hook = list_hook + self._object_hook = object_hook + self._object_pairs_hook = object_pairs_hook + self._ext_hook = ext_hook + self._max_str_len = max_str_len + self._max_bin_len = max_bin_len + self._max_array_len = max_array_len + self._max_map_len = max_map_len + self._max_ext_len = max_ext_len + self._stream_offset = 0 + + if list_hook is not None and not callable(list_hook): + raise TypeError('`list_hook` is not callable') + if object_hook is not None and not callable(object_hook): + raise TypeError('`object_hook` is not callable') + if object_pairs_hook is not None and not callable(object_pairs_hook): + raise TypeError('`object_pairs_hook` is not callable') + if object_hook is not None and object_pairs_hook is not None: + raise TypeError("object_pairs_hook and object_hook are mutually " + "exclusive") + if not callable(ext_hook): + raise TypeError("`ext_hook` is not callable") + + def feed(self, next_bytes): + assert self._feeding + view = _get_data_from_buffer(next_bytes) + if (len(self._buffer) - self._buff_i + len(view) > self._max_buffer_size): + raise BufferFull + + # Strip buffer before checkpoint before reading file. + if self._buf_checkpoint > 0: + del self._buffer[:self._buf_checkpoint] + self._buff_i -= self._buf_checkpoint + self._buf_checkpoint = 0 + + self._buffer += view + + def _consume(self): + """ Gets rid of the used parts of the buffer. """ + self._stream_offset += self._buff_i - self._buf_checkpoint + self._buf_checkpoint = self._buff_i + + def _got_extradata(self): + return self._buff_i < len(self._buffer) + + def _get_extradata(self): + return self._buffer[self._buff_i:] + + def read_bytes(self, n): + return self._read(n) + + def _read(self, n): + # (int) -> bytearray + self._reserve(n) + i = self._buff_i + self._buff_i = i+n + return self._buffer[i:i+n] + + def _reserve(self, n): + remain_bytes = len(self._buffer) - self._buff_i - n + + # Fast path: buffer has n bytes already + if remain_bytes >= 0: + return + + if self._feeding: + self._buff_i = self._buf_checkpoint + raise OutOfData + + # Strip buffer before checkpoint before reading file. + if self._buf_checkpoint > 0: + del self._buffer[:self._buf_checkpoint] + self._buff_i -= self._buf_checkpoint + self._buf_checkpoint = 0 + + # Read from file + remain_bytes = -remain_bytes + while remain_bytes > 0: + to_read_bytes = max(self._read_size, remain_bytes) + read_data = self.file_like.read(to_read_bytes) + if not read_data: + break + assert isinstance(read_data, bytes) + self._buffer += read_data + remain_bytes -= len(read_data) + + if len(self._buffer) < n + self._buff_i: + self._buff_i = 0 # rollback + raise OutOfData + + def _read_header(self, execute=EX_CONSTRUCT): + typ = TYPE_IMMEDIATE + n = 0 + obj = None + self._reserve(1) + b = self._buffer[self._buff_i] + self._buff_i += 1 + if b & 0b10000000 == 0: + obj = b + elif b & 0b11100000 == 0b11100000: + obj = -1 - (b ^ 0xff) + elif b & 0b11100000 == 0b10100000: + n = b & 0b00011111 + typ = TYPE_RAW + if n > self._max_str_len: + raise UnpackValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) + obj = self._read(n) + elif b & 0b11110000 == 0b10010000: + n = b & 0b00001111 + typ = TYPE_ARRAY + if n > self._max_array_len: + raise UnpackValueError("%s exceeds max_array_len(%s)", n, self._max_array_len) + elif b & 0b11110000 == 0b10000000: + n = b & 0b00001111 + typ = TYPE_MAP + if n > self._max_map_len: + raise UnpackValueError("%s exceeds max_map_len(%s)", n, self._max_map_len) + elif b == 0xc0: + obj = None + elif b == 0xc2: + obj = False + elif b == 0xc3: + obj = True + elif b == 0xc4: + typ = TYPE_BIN + self._reserve(1) + n = self._buffer[self._buff_i] + self._buff_i += 1 + if n > self._max_bin_len: + raise UnpackValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len)) + obj = self._read(n) + elif b == 0xc5: + typ = TYPE_BIN + self._reserve(2) + n = struct.unpack_from(">H", self._buffer_view, self._buff_i)[0] + self._buff_i += 2 + if n > self._max_bin_len: + raise UnpackValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len)) + obj = self._read(n) + elif b == 0xc6: + typ = TYPE_BIN + self._reserve(4) + n = struct.unpack_from(">I", self._buffer_view, self._buff_i)[0] + self._buff_i += 4 + if n > self._max_bin_len: + raise UnpackValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len)) + obj = self._read(n) + elif b == 0xc7: # ext 8 + typ = TYPE_EXT + self._reserve(2) + L, n = struct.unpack_from('Bb', self._buffer_view, self._buff_i) + self._buff_i += 2 + if L > self._max_ext_len: + raise UnpackValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len)) + obj = self._read(L) + elif b == 0xc8: # ext 16 + typ = TYPE_EXT + self._reserve(3) + L, n = struct.unpack_from('>Hb', self._buffer_view, self._buff_i) + self._buff_i += 3 + if L > self._max_ext_len: + raise UnpackValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len)) + obj = self._read(L) + elif b == 0xc9: # ext 32 + typ = TYPE_EXT + self._reserve(5) + L, n = struct.unpack_from('>Ib', self._buffer_view, self._buff_i) + self._buff_i += 5 + if L > self._max_ext_len: + raise UnpackValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len)) + obj = self._read(L) + elif b == 0xca: + self._reserve(4) + obj = struct.unpack_from(">f", self._buffer_view, self._buff_i)[0] + self._buff_i += 4 + elif b == 0xcb: + self._reserve(8) + obj = struct.unpack_from(">d", self._buffer_view, self._buff_i)[0] + self._buff_i += 8 + elif b == 0xcc: + self._reserve(1) + obj = self._buffer[self._buff_i] + self._buff_i += 1 + elif b == 0xcd: + self._reserve(2) + obj = struct.unpack_from(">H", self._buffer_view, self._buff_i)[0] + self._buff_i += 2 + elif b == 0xce: + self._reserve(4) + obj = struct.unpack_from(">I", self._buffer_view, self._buff_i)[0] + self._buff_i += 4 + elif b == 0xcf: + self._reserve(8) + obj = struct.unpack_from(">Q", self._buffer_view, self._buff_i)[0] + self._buff_i += 8 + elif b == 0xd0: + self._reserve(1) + obj = struct.unpack_from("b", self._buffer_view, self._buff_i)[0] + self._buff_i += 1 + elif b == 0xd1: + self._reserve(2) + obj = struct.unpack_from(">h", self._buffer_view, self._buff_i)[0] + self._buff_i += 2 + elif b == 0xd2: + self._reserve(4) + obj = struct.unpack_from(">i", self._buffer_view, self._buff_i)[0] + self._buff_i += 4 + elif b == 0xd3: + self._reserve(8) + obj = struct.unpack_from(">q", self._buffer_view, self._buff_i)[0] + self._buff_i += 8 + elif b == 0xd4: # fixext 1 + typ = TYPE_EXT + if self._max_ext_len < 1: + raise UnpackValueError("%s exceeds max_ext_len(%s)" % (1, self._max_ext_len)) + self._reserve(2) + n, obj = struct.unpack_from("b1s", self._buffer_view, self._buff_i) + self._buff_i += 2 + elif b == 0xd5: # fixext 2 + typ = TYPE_EXT + if self._max_ext_len < 2: + raise UnpackValueError("%s exceeds max_ext_len(%s)" % (2, self._max_ext_len)) + self._reserve(3) + n, obj = struct.unpack_from("b2s", self._buffer_view, self._buff_i) + self._buff_i += 3 + elif b == 0xd6: # fixext 4 + typ = TYPE_EXT + if self._max_ext_len < 4: + raise UnpackValueError("%s exceeds max_ext_len(%s)" % (4, self._max_ext_len)) + self._reserve(5) + n, obj = struct.unpack_from("b4s", self._buffer_view, self._buff_i) + self._buff_i += 5 + elif b == 0xd7: # fixext 8 + typ = TYPE_EXT + if self._max_ext_len < 8: + raise UnpackValueError("%s exceeds max_ext_len(%s)" % (8, self._max_ext_len)) + self._reserve(9) + n, obj = struct.unpack_from("b8s", self._buffer_view, self._buff_i) + self._buff_i += 9 + elif b == 0xd8: # fixext 16 + typ = TYPE_EXT + if self._max_ext_len < 16: + raise UnpackValueError("%s exceeds max_ext_len(%s)" % (16, self._max_ext_len)) + self._reserve(17) + n, obj = struct.unpack_from("b16s", self._buffer_view, self._buff_i) + self._buff_i += 17 + elif b == 0xd9: + typ = TYPE_RAW + self._reserve(1) + n = self._buffer[self._buff_i] + self._buff_i += 1 + if n > self._max_str_len: + raise UnpackValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) + obj = self._read(n) + elif b == 0xda: + typ = TYPE_RAW + self._reserve(2) + n, = struct.unpack_from(">H", self._buffer_view, self._buff_i) + self._buff_i += 2 + if n > self._max_str_len: + raise UnpackValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) + obj = self._read(n) + elif b == 0xdb: + typ = TYPE_RAW + self._reserve(4) + n, = struct.unpack_from(">I", self._buffer_view, self._buff_i) + self._buff_i += 4 + if n > self._max_str_len: + raise UnpackValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) + obj = self._read(n) + elif b == 0xdc: + typ = TYPE_ARRAY + self._reserve(2) + n, = struct.unpack_from(">H", self._buffer_view, self._buff_i) + self._buff_i += 2 + if n > self._max_array_len: + raise UnpackValueError("%s exceeds max_array_len(%s)", n, self._max_array_len) + elif b == 0xdd: + typ = TYPE_ARRAY + self._reserve(4) + n, = struct.unpack_from(">I", self._buffer_view, self._buff_i) + self._buff_i += 4 + if n > self._max_array_len: + raise UnpackValueError("%s exceeds max_array_len(%s)", n, self._max_array_len) + elif b == 0xde: + self._reserve(2) + n, = struct.unpack_from(">H", self._buffer_view, self._buff_i) + self._buff_i += 2 + if n > self._max_map_len: + raise UnpackValueError("%s exceeds max_map_len(%s)", n, self._max_map_len) + typ = TYPE_MAP + elif b == 0xdf: + self._reserve(4) + n, = struct.unpack_from(">I", self._buffer_view, self._buff_i) + self._buff_i += 4 + if n > self._max_map_len: + raise UnpackValueError("%s exceeds max_map_len(%s)", n, self._max_map_len) + typ = TYPE_MAP + else: + raise UnpackValueError("Unknown header: 0x%x" % b) + return typ, n, obj + + def _unpack(self, execute=EX_CONSTRUCT): + typ, n, obj = self._read_header(execute) + + if execute == EX_READ_ARRAY_HEADER: + if typ != TYPE_ARRAY: + raise UnpackValueError("Expected array") + return n + if execute == EX_READ_MAP_HEADER: + if typ != TYPE_MAP: + raise UnpackValueError("Expected map") + return n + # TODO should we eliminate the recursion? + if typ == TYPE_ARRAY: + if execute == EX_SKIP: + for i in xrange(n): + # TODO check whether we need to call `list_hook` + self._unpack(EX_SKIP) + return + ret = newlist_hint(n) + for i in xrange(n): + ret.append(self._unpack(EX_CONSTRUCT)) + if self._list_hook is not None: + ret = self._list_hook(ret) + # TODO is the interaction between `list_hook` and `use_list` ok? + return ret if self._use_list else tuple(ret) + if typ == TYPE_MAP: + if execute == EX_SKIP: + for i in xrange(n): + # TODO check whether we need to call hooks + self._unpack(EX_SKIP) + self._unpack(EX_SKIP) + return + if self._object_pairs_hook is not None: + ret = self._object_pairs_hook( + (self._unpack(EX_CONSTRUCT), + self._unpack(EX_CONSTRUCT)) + for _ in xrange(n)) + else: + ret = {} + for _ in xrange(n): + key = self._unpack(EX_CONSTRUCT) + ret[key] = self._unpack(EX_CONSTRUCT) + if self._object_hook is not None: + ret = self._object_hook(ret) + return ret + if execute == EX_SKIP: + return + if typ == TYPE_RAW: + if self._encoding is not None: + obj = obj.decode(self._encoding, self._unicode_errors) + elif self._raw: + obj = bytes(obj) + else: + obj = obj.decode('utf_8') + return obj + if typ == TYPE_EXT: + return self._ext_hook(n, bytes(obj)) + if typ == TYPE_BIN: + return bytes(obj) + assert typ == TYPE_IMMEDIATE + return obj + + def __iter__(self): + return self + + def __next__(self): + try: + ret = self._unpack(EX_CONSTRUCT) + self._consume() + return ret + except OutOfData: + self._consume() + raise StopIteration + + next = __next__ + + def skip(self, write_bytes=None): + self._unpack(EX_SKIP) + if write_bytes is not None: + warnings.warn("`write_bytes` option is deprecated. Use `.tell()` instead.", DeprecationWarning) + write_bytes(self._buffer[self._buf_checkpoint:self._buff_i]) + self._consume() + + def unpack(self, write_bytes=None): + ret = self._unpack(EX_CONSTRUCT) + if write_bytes is not None: + warnings.warn("`write_bytes` option is deprecated. Use `.tell()` instead.", DeprecationWarning) + write_bytes(self._buffer[self._buf_checkpoint:self._buff_i]) + self._consume() + return ret + + def read_array_header(self, write_bytes=None): + ret = self._unpack(EX_READ_ARRAY_HEADER) + if write_bytes is not None: + warnings.warn("`write_bytes` option is deprecated. Use `.tell()` instead.", DeprecationWarning) + write_bytes(self._buffer[self._buf_checkpoint:self._buff_i]) + self._consume() + return ret + + def read_map_header(self, write_bytes=None): + ret = self._unpack(EX_READ_MAP_HEADER) + if write_bytes is not None: + warnings.warn("`write_bytes` option is deprecated. Use `.tell()` instead.", DeprecationWarning) + write_bytes(self._buffer[self._buf_checkpoint:self._buff_i]) + self._consume() + return ret + + def tell(self): + return self._stream_offset + + +class Packer(object): + """ + MessagePack Packer + + usage: + + packer = Packer() + astream.write(packer.pack(a)) + astream.write(packer.pack(b)) + + Packer's constructor has some keyword arguments: + + :param callable default: + Convert user type to builtin type that Packer supports. + See also simplejson's document. + + :param bool use_single_float: + Use single precision float type for float. (default: False) + + :param bool autoreset: + Reset buffer after each pack and return its content as `bytes`. (default: True). + If set this to false, use `bytes()` to get content and `.reset()` to clear buffer. + + :param bool use_bin_type: + Use bin type introduced in msgpack spec 2.0 for bytes. + It also enables str8 type for unicode. + + :param bool strict_types: + If set to true, types will be checked to be exact. Derived classes + from serializeable types will not be serialized and will be + treated as unsupported type and forwarded to default. + Additionally tuples will not be serialized as lists. + This is useful when trying to implement accurate serialization + for python types. + + :param str encoding: + (deprecated) Convert unicode to bytes with this encoding. (default: 'utf-8') + + :param str unicode_errors: + Error handler for encoding unicode. (default: 'strict') + """ + def __init__(self, default=None, encoding=None, unicode_errors=None, + use_single_float=False, autoreset=True, use_bin_type=False, + strict_types=False): + if encoding is None: + encoding = 'utf_8' + else: + warnings.warn( + "encoding is deprecated, Use raw=False instead.", + PendingDeprecationWarning) + + if unicode_errors is None: + unicode_errors = 'strict' + + self._strict_types = strict_types + self._use_float = use_single_float + self._autoreset = autoreset + self._use_bin_type = use_bin_type + self._encoding = encoding + self._unicode_errors = unicode_errors + self._buffer = StringIO() + if default is not None: + if not callable(default): + raise TypeError("default must be callable") + self._default = default + + def _pack(self, obj, nest_limit=DEFAULT_RECURSE_LIMIT, + check=isinstance, check_type_strict=_check_type_strict): + default_used = False + if self._strict_types: + check = check_type_strict + list_types = list + else: + list_types = (list, tuple) + while True: + if nest_limit < 0: + raise PackValueError("recursion limit exceeded") + if obj is None: + return self._buffer.write(b"\xc0") + if check(obj, bool): + if obj: + return self._buffer.write(b"\xc3") + return self._buffer.write(b"\xc2") + if check(obj, int_types): + if 0 <= obj < 0x80: + return self._buffer.write(struct.pack("B", obj)) + if -0x20 <= obj < 0: + return self._buffer.write(struct.pack("b", obj)) + if 0x80 <= obj <= 0xff: + return self._buffer.write(struct.pack("BB", 0xcc, obj)) + if -0x80 <= obj < 0: + return self._buffer.write(struct.pack(">Bb", 0xd0, obj)) + if 0xff < obj <= 0xffff: + return self._buffer.write(struct.pack(">BH", 0xcd, obj)) + if -0x8000 <= obj < -0x80: + return self._buffer.write(struct.pack(">Bh", 0xd1, obj)) + if 0xffff < obj <= 0xffffffff: + return self._buffer.write(struct.pack(">BI", 0xce, obj)) + if -0x80000000 <= obj < -0x8000: + return self._buffer.write(struct.pack(">Bi", 0xd2, obj)) + if 0xffffffff < obj <= 0xffffffffffffffff: + return self._buffer.write(struct.pack(">BQ", 0xcf, obj)) + if -0x8000000000000000 <= obj < -0x80000000: + return self._buffer.write(struct.pack(">Bq", 0xd3, obj)) + if not default_used and self._default is not None: + obj = self._default(obj) + default_used = True + continue + raise PackOverflowError("Integer value out of range") + if check(obj, (bytes, bytearray)): + n = len(obj) + if n >= 2**32: + raise PackValueError("%s is too large" % type(obj).__name__) + self._pack_bin_header(n) + return self._buffer.write(obj) + if check(obj, Unicode): + if self._encoding is None: + raise TypeError( + "Can't encode unicode string: " + "no encoding is specified") + obj = obj.encode(self._encoding, self._unicode_errors) + n = len(obj) + if n >= 2**32: + raise PackValueError("String is too large") + self._pack_raw_header(n) + return self._buffer.write(obj) + if check(obj, memoryview): + n = len(obj) * obj.itemsize + if n >= 2**32: + raise PackValueError("Memoryview is too large") + self._pack_bin_header(n) + return self._buffer.write(obj) + if check(obj, float): + if self._use_float: + return self._buffer.write(struct.pack(">Bf", 0xca, obj)) + return self._buffer.write(struct.pack(">Bd", 0xcb, obj)) + if check(obj, ExtType): + code = obj.code + data = obj.data + assert isinstance(code, int) + assert isinstance(data, bytes) + L = len(data) + if L == 1: + self._buffer.write(b'\xd4') + elif L == 2: + self._buffer.write(b'\xd5') + elif L == 4: + self._buffer.write(b'\xd6') + elif L == 8: + self._buffer.write(b'\xd7') + elif L == 16: + self._buffer.write(b'\xd8') + elif L <= 0xff: + self._buffer.write(struct.pack(">BB", 0xc7, L)) + elif L <= 0xffff: + self._buffer.write(struct.pack(">BH", 0xc8, L)) + else: + self._buffer.write(struct.pack(">BI", 0xc9, L)) + self._buffer.write(struct.pack("b", code)) + self._buffer.write(data) + return + if check(obj, list_types): + n = len(obj) + self._pack_array_header(n) + for i in xrange(n): + self._pack(obj[i], nest_limit - 1) + return + if check(obj, dict): + return self._pack_map_pairs(len(obj), dict_iteritems(obj), + nest_limit - 1) + if not default_used and self._default is not None: + obj = self._default(obj) + default_used = 1 + continue + raise TypeError("Cannot serialize %r" % (obj, )) + + def pack(self, obj): + try: + self._pack(obj) + except: + self._buffer = StringIO() # force reset + raise + ret = self._buffer.getvalue() + if self._autoreset: + self._buffer = StringIO() + elif USING_STRINGBUILDER: + self._buffer = StringIO(ret) + return ret + + def pack_map_pairs(self, pairs): + self._pack_map_pairs(len(pairs), pairs) + ret = self._buffer.getvalue() + if self._autoreset: + self._buffer = StringIO() + elif USING_STRINGBUILDER: + self._buffer = StringIO(ret) + return ret + + def pack_array_header(self, n): + if n >= 2**32: + raise PackValueError + self._pack_array_header(n) + ret = self._buffer.getvalue() + if self._autoreset: + self._buffer = StringIO() + elif USING_STRINGBUILDER: + self._buffer = StringIO(ret) + return ret + + def pack_map_header(self, n): + if n >= 2**32: + raise PackValueError + self._pack_map_header(n) + ret = self._buffer.getvalue() + if self._autoreset: + self._buffer = StringIO() + elif USING_STRINGBUILDER: + self._buffer = StringIO(ret) + return ret + + def pack_ext_type(self, typecode, data): + if not isinstance(typecode, int): + raise TypeError("typecode must have int type.") + if not 0 <= typecode <= 127: + raise ValueError("typecode should be 0-127") + if not isinstance(data, bytes): + raise TypeError("data must have bytes type") + L = len(data) + if L > 0xffffffff: + raise PackValueError("Too large data") + if L == 1: + self._buffer.write(b'\xd4') + elif L == 2: + self._buffer.write(b'\xd5') + elif L == 4: + self._buffer.write(b'\xd6') + elif L == 8: + self._buffer.write(b'\xd7') + elif L == 16: + self._buffer.write(b'\xd8') + elif L <= 0xff: + self._buffer.write(b'\xc7' + struct.pack('B', L)) + elif L <= 0xffff: + self._buffer.write(b'\xc8' + struct.pack('>H', L)) + else: + self._buffer.write(b'\xc9' + struct.pack('>I', L)) + self._buffer.write(struct.pack('B', typecode)) + self._buffer.write(data) + + def _pack_array_header(self, n): + if n <= 0x0f: + return self._buffer.write(struct.pack('B', 0x90 + n)) + if n <= 0xffff: + return self._buffer.write(struct.pack(">BH", 0xdc, n)) + if n <= 0xffffffff: + return self._buffer.write(struct.pack(">BI", 0xdd, n)) + raise PackValueError("Array is too large") + + def _pack_map_header(self, n): + if n <= 0x0f: + return self._buffer.write(struct.pack('B', 0x80 + n)) + if n <= 0xffff: + return self._buffer.write(struct.pack(">BH", 0xde, n)) + if n <= 0xffffffff: + return self._buffer.write(struct.pack(">BI", 0xdf, n)) + raise PackValueError("Dict is too large") + + def _pack_map_pairs(self, n, pairs, nest_limit=DEFAULT_RECURSE_LIMIT): + self._pack_map_header(n) + for (k, v) in pairs: + self._pack(k, nest_limit - 1) + self._pack(v, nest_limit - 1) + + def _pack_raw_header(self, n): + if n <= 0x1f: + self._buffer.write(struct.pack('B', 0xa0 + n)) + elif self._use_bin_type and n <= 0xff: + self._buffer.write(struct.pack('>BB', 0xd9, n)) + elif n <= 0xffff: + self._buffer.write(struct.pack(">BH", 0xda, n)) + elif n <= 0xffffffff: + self._buffer.write(struct.pack(">BI", 0xdb, n)) + else: + raise PackValueError('Raw is too large') + + def _pack_bin_header(self, n): + if not self._use_bin_type: + return self._pack_raw_header(n) + elif n <= 0xff: + return self._buffer.write(struct.pack('>BB', 0xc4, n)) + elif n <= 0xffff: + return self._buffer.write(struct.pack(">BH", 0xc5, n)) + elif n <= 0xffffffff: + return self._buffer.write(struct.pack(">BI", 0xc6, n)) + else: + raise PackValueError('Bin is too large') + + def bytes(self): + return self._buffer.getvalue() + + def reset(self): + self._buffer = StringIO() diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/msgpack/fallback.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/msgpack/fallback.pyc new file mode 100644 index 0000000..a73971c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/msgpack/fallback.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/__about__.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/__about__.py new file mode 100644 index 0000000..7481c9e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/__about__.py @@ -0,0 +1,27 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + +__all__ = [ + "__title__", + "__summary__", + "__uri__", + "__version__", + "__author__", + "__email__", + "__license__", + "__copyright__", +] + +__title__ = "packaging" +__summary__ = "Core utilities for Python packages" +__uri__ = "https://github.com/pypa/packaging" + +__version__ = "19.0" + +__author__ = "Donald Stufft and individual contributors" +__email__ = "donald@stufft.io" + +__license__ = "BSD or Apache License, Version 2.0" +__copyright__ = "Copyright 2014-2019 %s" % __author__ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/__about__.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/__about__.pyc new file mode 100644 index 0000000..44b2ce6 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/__about__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/__init__.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/__init__.py new file mode 100644 index 0000000..a0cf67d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/__init__.py @@ -0,0 +1,26 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + +from .__about__ import ( + __author__, + __copyright__, + __email__, + __license__, + __summary__, + __title__, + __uri__, + __version__, +) + +__all__ = [ + "__title__", + "__summary__", + "__uri__", + "__version__", + "__author__", + "__email__", + "__license__", + "__copyright__", +] diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/__init__.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/__init__.pyc new file mode 100644 index 0000000..54132ac Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/_compat.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/_compat.py new file mode 100644 index 0000000..25da473 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/_compat.py @@ -0,0 +1,31 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + +import sys + + +PY2 = sys.version_info[0] == 2 +PY3 = sys.version_info[0] == 3 + +# flake8: noqa + +if PY3: + string_types = (str,) +else: + string_types = (basestring,) + + +def with_metaclass(meta, *bases): + """ + Create a base class with a metaclass. + """ + # This requires a bit of explanation: the basic idea is to make a dummy + # metaclass for one level of class instantiation that replaces itself with + # the actual metaclass. + class metaclass(meta): + def __new__(cls, name, this_bases, d): + return meta(name, bases, d) + + return type.__new__(metaclass, "temporary_class", (), {}) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/_compat.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/_compat.pyc new file mode 100644 index 0000000..437a12d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/_compat.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.py new file mode 100644 index 0000000..68dcca6 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.py @@ -0,0 +1,68 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + + +class Infinity(object): + def __repr__(self): + return "Infinity" + + def __hash__(self): + return hash(repr(self)) + + def __lt__(self, other): + return False + + def __le__(self, other): + return False + + def __eq__(self, other): + return isinstance(other, self.__class__) + + def __ne__(self, other): + return not isinstance(other, self.__class__) + + def __gt__(self, other): + return True + + def __ge__(self, other): + return True + + def __neg__(self): + return NegativeInfinity + + +Infinity = Infinity() + + +class NegativeInfinity(object): + def __repr__(self): + return "-Infinity" + + def __hash__(self): + return hash(repr(self)) + + def __lt__(self, other): + return True + + def __le__(self, other): + return True + + def __eq__(self, other): + return isinstance(other, self.__class__) + + def __ne__(self, other): + return not isinstance(other, self.__class__) + + def __gt__(self, other): + return False + + def __ge__(self, other): + return False + + def __neg__(self): + return Infinity + + +NegativeInfinity = NegativeInfinity() diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyc new file mode 100644 index 0000000..d590349 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/markers.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/markers.py new file mode 100644 index 0000000..5482476 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/markers.py @@ -0,0 +1,296 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + +import operator +import os +import platform +import sys + +from pip._vendor.pyparsing import ParseException, ParseResults, stringStart, stringEnd +from pip._vendor.pyparsing import ZeroOrMore, Group, Forward, QuotedString +from pip._vendor.pyparsing import Literal as L # noqa + +from ._compat import string_types +from .specifiers import Specifier, InvalidSpecifier + + +__all__ = [ + "InvalidMarker", + "UndefinedComparison", + "UndefinedEnvironmentName", + "Marker", + "default_environment", +] + + +class InvalidMarker(ValueError): + """ + An invalid marker was found, users should refer to PEP 508. + """ + + +class UndefinedComparison(ValueError): + """ + An invalid operation was attempted on a value that doesn't support it. + """ + + +class UndefinedEnvironmentName(ValueError): + """ + A name was attempted to be used that does not exist inside of the + environment. + """ + + +class Node(object): + def __init__(self, value): + self.value = value + + def __str__(self): + return str(self.value) + + def __repr__(self): + return "<{0}({1!r})>".format(self.__class__.__name__, str(self)) + + def serialize(self): + raise NotImplementedError + + +class Variable(Node): + def serialize(self): + return str(self) + + +class Value(Node): + def serialize(self): + return '"{0}"'.format(self) + + +class Op(Node): + def serialize(self): + return str(self) + + +VARIABLE = ( + L("implementation_version") + | L("platform_python_implementation") + | L("implementation_name") + | L("python_full_version") + | L("platform_release") + | L("platform_version") + | L("platform_machine") + | L("platform_system") + | L("python_version") + | L("sys_platform") + | L("os_name") + | L("os.name") + | L("sys.platform") # PEP-345 + | L("platform.version") # PEP-345 + | L("platform.machine") # PEP-345 + | L("platform.python_implementation") # PEP-345 + | L("python_implementation") # PEP-345 + | L("extra") # undocumented setuptools legacy +) +ALIASES = { + "os.name": "os_name", + "sys.platform": "sys_platform", + "platform.version": "platform_version", + "platform.machine": "platform_machine", + "platform.python_implementation": "platform_python_implementation", + "python_implementation": "platform_python_implementation", +} +VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0]))) + +VERSION_CMP = ( + L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<") +) + +MARKER_OP = VERSION_CMP | L("not in") | L("in") +MARKER_OP.setParseAction(lambda s, l, t: Op(t[0])) + +MARKER_VALUE = QuotedString("'") | QuotedString('"') +MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0])) + +BOOLOP = L("and") | L("or") + +MARKER_VAR = VARIABLE | MARKER_VALUE + +MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR) +MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0])) + +LPAREN = L("(").suppress() +RPAREN = L(")").suppress() + +MARKER_EXPR = Forward() +MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN) +MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR) + +MARKER = stringStart + MARKER_EXPR + stringEnd + + +def _coerce_parse_result(results): + if isinstance(results, ParseResults): + return [_coerce_parse_result(i) for i in results] + else: + return results + + +def _format_marker(marker, first=True): + assert isinstance(marker, (list, tuple, string_types)) + + # Sometimes we have a structure like [[...]] which is a single item list + # where the single item is itself it's own list. In that case we want skip + # the rest of this function so that we don't get extraneous () on the + # outside. + if ( + isinstance(marker, list) + and len(marker) == 1 + and isinstance(marker[0], (list, tuple)) + ): + return _format_marker(marker[0]) + + if isinstance(marker, list): + inner = (_format_marker(m, first=False) for m in marker) + if first: + return " ".join(inner) + else: + return "(" + " ".join(inner) + ")" + elif isinstance(marker, tuple): + return " ".join([m.serialize() for m in marker]) + else: + return marker + + +_operators = { + "in": lambda lhs, rhs: lhs in rhs, + "not in": lambda lhs, rhs: lhs not in rhs, + "<": operator.lt, + "<=": operator.le, + "==": operator.eq, + "!=": operator.ne, + ">=": operator.ge, + ">": operator.gt, +} + + +def _eval_op(lhs, op, rhs): + try: + spec = Specifier("".join([op.serialize(), rhs])) + except InvalidSpecifier: + pass + else: + return spec.contains(lhs) + + oper = _operators.get(op.serialize()) + if oper is None: + raise UndefinedComparison( + "Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs) + ) + + return oper(lhs, rhs) + + +_undefined = object() + + +def _get_env(environment, name): + value = environment.get(name, _undefined) + + if value is _undefined: + raise UndefinedEnvironmentName( + "{0!r} does not exist in evaluation environment.".format(name) + ) + + return value + + +def _evaluate_markers(markers, environment): + groups = [[]] + + for marker in markers: + assert isinstance(marker, (list, tuple, string_types)) + + if isinstance(marker, list): + groups[-1].append(_evaluate_markers(marker, environment)) + elif isinstance(marker, tuple): + lhs, op, rhs = marker + + if isinstance(lhs, Variable): + lhs_value = _get_env(environment, lhs.value) + rhs_value = rhs.value + else: + lhs_value = lhs.value + rhs_value = _get_env(environment, rhs.value) + + groups[-1].append(_eval_op(lhs_value, op, rhs_value)) + else: + assert marker in ["and", "or"] + if marker == "or": + groups.append([]) + + return any(all(item) for item in groups) + + +def format_full_version(info): + version = "{0.major}.{0.minor}.{0.micro}".format(info) + kind = info.releaselevel + if kind != "final": + version += kind[0] + str(info.serial) + return version + + +def default_environment(): + if hasattr(sys, "implementation"): + iver = format_full_version(sys.implementation.version) + implementation_name = sys.implementation.name + else: + iver = "0" + implementation_name = "" + + return { + "implementation_name": implementation_name, + "implementation_version": iver, + "os_name": os.name, + "platform_machine": platform.machine(), + "platform_release": platform.release(), + "platform_system": platform.system(), + "platform_version": platform.version(), + "python_full_version": platform.python_version(), + "platform_python_implementation": platform.python_implementation(), + "python_version": platform.python_version()[:3], + "sys_platform": sys.platform, + } + + +class Marker(object): + def __init__(self, marker): + try: + self._markers = _coerce_parse_result(MARKER.parseString(marker)) + except ParseException as e: + err_str = "Invalid marker: {0!r}, parse error at {1!r}".format( + marker, marker[e.loc : e.loc + 8] + ) + raise InvalidMarker(err_str) + + def __str__(self): + return _format_marker(self._markers) + + def __repr__(self): + return "<Marker({0!r})>".format(str(self)) + + def evaluate(self, environment=None): + """Evaluate a marker. + + Return the boolean from evaluating the given marker against the + environment. environment is an optional argument to override all or + part of the determined environment. + + The environment is determined from the current Python process. + """ + current_environment = default_environment() + if environment is not None: + current_environment.update(environment) + + return _evaluate_markers(self._markers, current_environment) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyc new file mode 100644 index 0000000..c58e9d0 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/markers.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/requirements.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/requirements.py new file mode 100644 index 0000000..dbc5f11 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/requirements.py @@ -0,0 +1,138 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + +import string +import re + +from pip._vendor.pyparsing import stringStart, stringEnd, originalTextFor, ParseException +from pip._vendor.pyparsing import ZeroOrMore, Word, Optional, Regex, Combine +from pip._vendor.pyparsing import Literal as L # noqa +from pip._vendor.six.moves.urllib import parse as urlparse + +from .markers import MARKER_EXPR, Marker +from .specifiers import LegacySpecifier, Specifier, SpecifierSet + + +class InvalidRequirement(ValueError): + """ + An invalid requirement was found, users should refer to PEP 508. + """ + + +ALPHANUM = Word(string.ascii_letters + string.digits) + +LBRACKET = L("[").suppress() +RBRACKET = L("]").suppress() +LPAREN = L("(").suppress() +RPAREN = L(")").suppress() +COMMA = L(",").suppress() +SEMICOLON = L(";").suppress() +AT = L("@").suppress() + +PUNCTUATION = Word("-_.") +IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM) +IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END)) + +NAME = IDENTIFIER("name") +EXTRA = IDENTIFIER + +URI = Regex(r"[^ ]+")("url") +URL = AT + URI + +EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA) +EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras") + +VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE) +VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE) + +VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY +VERSION_MANY = Combine( + VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), joinString=",", adjacent=False +)("_raw_spec") +_VERSION_SPEC = Optional(((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY)) +_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or "") + +VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier") +VERSION_SPEC.setParseAction(lambda s, l, t: t[1]) + +MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker") +MARKER_EXPR.setParseAction( + lambda s, l, t: Marker(s[t._original_start : t._original_end]) +) +MARKER_SEPARATOR = SEMICOLON +MARKER = MARKER_SEPARATOR + MARKER_EXPR + +VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER) +URL_AND_MARKER = URL + Optional(MARKER) + +NAMED_REQUIREMENT = NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER) + +REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd +# pyparsing isn't thread safe during initialization, so we do it eagerly, see +# issue #104 +REQUIREMENT.parseString("x[]") + + +class Requirement(object): + """Parse a requirement. + + Parse a given requirement string into its parts, such as name, specifier, + URL, and extras. Raises InvalidRequirement on a badly-formed requirement + string. + """ + + # TODO: Can we test whether something is contained within a requirement? + # If so how do we do that? Do we need to test against the _name_ of + # the thing as well as the version? What about the markers? + # TODO: Can we normalize the name and extra name? + + def __init__(self, requirement_string): + try: + req = REQUIREMENT.parseString(requirement_string) + except ParseException as e: + raise InvalidRequirement( + 'Parse error at "{0!r}": {1}'.format( + requirement_string[e.loc : e.loc + 8], e.msg + ) + ) + + self.name = req.name + if req.url: + parsed_url = urlparse.urlparse(req.url) + if parsed_url.scheme == "file": + if urlparse.urlunparse(parsed_url) != req.url: + raise InvalidRequirement("Invalid URL given") + elif not (parsed_url.scheme and parsed_url.netloc) or ( + not parsed_url.scheme and not parsed_url.netloc + ): + raise InvalidRequirement("Invalid URL: {0}".format(req.url)) + self.url = req.url + else: + self.url = None + self.extras = set(req.extras.asList() if req.extras else []) + self.specifier = SpecifierSet(req.specifier) + self.marker = req.marker if req.marker else None + + def __str__(self): + parts = [self.name] + + if self.extras: + parts.append("[{0}]".format(",".join(sorted(self.extras)))) + + if self.specifier: + parts.append(str(self.specifier)) + + if self.url: + parts.append("@ {0}".format(self.url)) + if self.marker: + parts.append(" ") + + if self.marker: + parts.append("; {0}".format(self.marker)) + + return "".join(parts) + + def __repr__(self): + return "<Requirement({0!r})>".format(str(self)) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/requirements.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/requirements.pyc new file mode 100644 index 0000000..2a02fc1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/requirements.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.py new file mode 100644 index 0000000..743576a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.py @@ -0,0 +1,749 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + +import abc +import functools +import itertools +import re + +from ._compat import string_types, with_metaclass +from .version import Version, LegacyVersion, parse + + +class InvalidSpecifier(ValueError): + """ + An invalid specifier was found, users should refer to PEP 440. + """ + + +class BaseSpecifier(with_metaclass(abc.ABCMeta, object)): + @abc.abstractmethod + def __str__(self): + """ + Returns the str representation of this Specifier like object. This + should be representative of the Specifier itself. + """ + + @abc.abstractmethod + def __hash__(self): + """ + Returns a hash value for this Specifier like object. + """ + + @abc.abstractmethod + def __eq__(self, other): + """ + Returns a boolean representing whether or not the two Specifier like + objects are equal. + """ + + @abc.abstractmethod + def __ne__(self, other): + """ + Returns a boolean representing whether or not the two Specifier like + objects are not equal. + """ + + @abc.abstractproperty + def prereleases(self): + """ + Returns whether or not pre-releases as a whole are allowed by this + specifier. + """ + + @prereleases.setter + def prereleases(self, value): + """ + Sets whether or not pre-releases as a whole are allowed by this + specifier. + """ + + @abc.abstractmethod + def contains(self, item, prereleases=None): + """ + Determines if the given item is contained within this specifier. + """ + + @abc.abstractmethod + def filter(self, iterable, prereleases=None): + """ + Takes an iterable of items and filters them so that only items which + are contained within this specifier are allowed in it. + """ + + +class _IndividualSpecifier(BaseSpecifier): + + _operators = {} + + def __init__(self, spec="", prereleases=None): + match = self._regex.search(spec) + if not match: + raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec)) + + self._spec = (match.group("operator").strip(), match.group("version").strip()) + + # Store whether or not this Specifier should accept prereleases + self._prereleases = prereleases + + def __repr__(self): + pre = ( + ", prereleases={0!r}".format(self.prereleases) + if self._prereleases is not None + else "" + ) + + return "<{0}({1!r}{2})>".format(self.__class__.__name__, str(self), pre) + + def __str__(self): + return "{0}{1}".format(*self._spec) + + def __hash__(self): + return hash(self._spec) + + def __eq__(self, other): + if isinstance(other, string_types): + try: + other = self.__class__(other) + except InvalidSpecifier: + return NotImplemented + elif not isinstance(other, self.__class__): + return NotImplemented + + return self._spec == other._spec + + def __ne__(self, other): + if isinstance(other, string_types): + try: + other = self.__class__(other) + except InvalidSpecifier: + return NotImplemented + elif not isinstance(other, self.__class__): + return NotImplemented + + return self._spec != other._spec + + def _get_operator(self, op): + return getattr(self, "_compare_{0}".format(self._operators[op])) + + def _coerce_version(self, version): + if not isinstance(version, (LegacyVersion, Version)): + version = parse(version) + return version + + @property + def operator(self): + return self._spec[0] + + @property + def version(self): + return self._spec[1] + + @property + def prereleases(self): + return self._prereleases + + @prereleases.setter + def prereleases(self, value): + self._prereleases = value + + def __contains__(self, item): + return self.contains(item) + + def contains(self, item, prereleases=None): + # Determine if prereleases are to be allowed or not. + if prereleases is None: + prereleases = self.prereleases + + # Normalize item to a Version or LegacyVersion, this allows us to have + # a shortcut for ``"2.0" in Specifier(">=2") + item = self._coerce_version(item) + + # Determine if we should be supporting prereleases in this specifier + # or not, if we do not support prereleases than we can short circuit + # logic if this version is a prereleases. + if item.is_prerelease and not prereleases: + return False + + # Actually do the comparison to determine if this item is contained + # within this Specifier or not. + return self._get_operator(self.operator)(item, self.version) + + def filter(self, iterable, prereleases=None): + yielded = False + found_prereleases = [] + + kw = {"prereleases": prereleases if prereleases is not None else True} + + # Attempt to iterate over all the values in the iterable and if any of + # them match, yield them. + for version in iterable: + parsed_version = self._coerce_version(version) + + if self.contains(parsed_version, **kw): + # If our version is a prerelease, and we were not set to allow + # prereleases, then we'll store it for later incase nothing + # else matches this specifier. + if parsed_version.is_prerelease and not ( + prereleases or self.prereleases + ): + found_prereleases.append(version) + # Either this is not a prerelease, or we should have been + # accepting prereleases from the beginning. + else: + yielded = True + yield version + + # Now that we've iterated over everything, determine if we've yielded + # any values, and if we have not and we have any prereleases stored up + # then we will go ahead and yield the prereleases. + if not yielded and found_prereleases: + for version in found_prereleases: + yield version + + +class LegacySpecifier(_IndividualSpecifier): + + _regex_str = r""" + (?P<operator>(==|!=|<=|>=|<|>)) + \s* + (?P<version> + [^,;\s)]* # Since this is a "legacy" specifier, and the version + # string can be just about anything, we match everything + # except for whitespace, a semi-colon for marker support, + # a closing paren since versions can be enclosed in + # them, and a comma since it's a version separator. + ) + """ + + _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE) + + _operators = { + "==": "equal", + "!=": "not_equal", + "<=": "less_than_equal", + ">=": "greater_than_equal", + "<": "less_than", + ">": "greater_than", + } + + def _coerce_version(self, version): + if not isinstance(version, LegacyVersion): + version = LegacyVersion(str(version)) + return version + + def _compare_equal(self, prospective, spec): + return prospective == self._coerce_version(spec) + + def _compare_not_equal(self, prospective, spec): + return prospective != self._coerce_version(spec) + + def _compare_less_than_equal(self, prospective, spec): + return prospective <= self._coerce_version(spec) + + def _compare_greater_than_equal(self, prospective, spec): + return prospective >= self._coerce_version(spec) + + def _compare_less_than(self, prospective, spec): + return prospective < self._coerce_version(spec) + + def _compare_greater_than(self, prospective, spec): + return prospective > self._coerce_version(spec) + + +def _require_version_compare(fn): + @functools.wraps(fn) + def wrapped(self, prospective, spec): + if not isinstance(prospective, Version): + return False + return fn(self, prospective, spec) + + return wrapped + + +class Specifier(_IndividualSpecifier): + + _regex_str = r""" + (?P<operator>(~=|==|!=|<=|>=|<|>|===)) + (?P<version> + (?: + # The identity operators allow for an escape hatch that will + # do an exact string match of the version you wish to install. + # This will not be parsed by PEP 440 and we cannot determine + # any semantic meaning from it. This operator is discouraged + # but included entirely as an escape hatch. + (?<====) # Only match for the identity operator + \s* + [^\s]* # We just match everything, except for whitespace + # since we are only testing for strict identity. + ) + | + (?: + # The (non)equality operators allow for wild card and local + # versions to be specified so we have to define these two + # operators separately to enable that. + (?<===|!=) # Only match for equals and not equals + + \s* + v? + (?:[0-9]+!)? # epoch + [0-9]+(?:\.[0-9]+)* # release + (?: # pre release + [-_\.]? + (a|b|c|rc|alpha|beta|pre|preview) + [-_\.]? + [0-9]* + )? + (?: # post release + (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) + )? + + # You cannot use a wild card and a dev or local version + # together so group them with a | and make them optional. + (?: + (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release + (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local + | + \.\* # Wild card syntax of .* + )? + ) + | + (?: + # The compatible operator requires at least two digits in the + # release segment. + (?<=~=) # Only match for the compatible operator + + \s* + v? + (?:[0-9]+!)? # epoch + [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *) + (?: # pre release + [-_\.]? + (a|b|c|rc|alpha|beta|pre|preview) + [-_\.]? + [0-9]* + )? + (?: # post release + (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) + )? + (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release + ) + | + (?: + # All other operators only allow a sub set of what the + # (non)equality operators do. Specifically they do not allow + # local versions to be specified nor do they allow the prefix + # matching wild cards. + (?<!==|!=|~=) # We have special cases for these + # operators so we want to make sure they + # don't match here. + + \s* + v? + (?:[0-9]+!)? # epoch + [0-9]+(?:\.[0-9]+)* # release + (?: # pre release + [-_\.]? + (a|b|c|rc|alpha|beta|pre|preview) + [-_\.]? + [0-9]* + )? + (?: # post release + (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) + )? + (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release + ) + ) + """ + + _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE) + + _operators = { + "~=": "compatible", + "==": "equal", + "!=": "not_equal", + "<=": "less_than_equal", + ">=": "greater_than_equal", + "<": "less_than", + ">": "greater_than", + "===": "arbitrary", + } + + @_require_version_compare + def _compare_compatible(self, prospective, spec): + # Compatible releases have an equivalent combination of >= and ==. That + # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to + # implement this in terms of the other specifiers instead of + # implementing it ourselves. The only thing we need to do is construct + # the other specifiers. + + # We want everything but the last item in the version, but we want to + # ignore post and dev releases and we want to treat the pre-release as + # it's own separate segment. + prefix = ".".join( + list( + itertools.takewhile( + lambda x: (not x.startswith("post") and not x.startswith("dev")), + _version_split(spec), + ) + )[:-1] + ) + + # Add the prefix notation to the end of our string + prefix += ".*" + + return self._get_operator(">=")(prospective, spec) and self._get_operator("==")( + prospective, prefix + ) + + @_require_version_compare + def _compare_equal(self, prospective, spec): + # We need special logic to handle prefix matching + if spec.endswith(".*"): + # In the case of prefix matching we want to ignore local segment. + prospective = Version(prospective.public) + # Split the spec out by dots, and pretend that there is an implicit + # dot in between a release segment and a pre-release segment. + spec = _version_split(spec[:-2]) # Remove the trailing .* + + # Split the prospective version out by dots, and pretend that there + # is an implicit dot in between a release segment and a pre-release + # segment. + prospective = _version_split(str(prospective)) + + # Shorten the prospective version to be the same length as the spec + # so that we can determine if the specifier is a prefix of the + # prospective version or not. + prospective = prospective[: len(spec)] + + # Pad out our two sides with zeros so that they both equal the same + # length. + spec, prospective = _pad_version(spec, prospective) + else: + # Convert our spec string into a Version + spec = Version(spec) + + # If the specifier does not have a local segment, then we want to + # act as if the prospective version also does not have a local + # segment. + if not spec.local: + prospective = Version(prospective.public) + + return prospective == spec + + @_require_version_compare + def _compare_not_equal(self, prospective, spec): + return not self._compare_equal(prospective, spec) + + @_require_version_compare + def _compare_less_than_equal(self, prospective, spec): + return prospective <= Version(spec) + + @_require_version_compare + def _compare_greater_than_equal(self, prospective, spec): + return prospective >= Version(spec) + + @_require_version_compare + def _compare_less_than(self, prospective, spec): + # Convert our spec to a Version instance, since we'll want to work with + # it as a version. + spec = Version(spec) + + # Check to see if the prospective version is less than the spec + # version. If it's not we can short circuit and just return False now + # instead of doing extra unneeded work. + if not prospective < spec: + return False + + # This special case is here so that, unless the specifier itself + # includes is a pre-release version, that we do not accept pre-release + # versions for the version mentioned in the specifier (e.g. <3.1 should + # not match 3.1.dev0, but should match 3.0.dev0). + if not spec.is_prerelease and prospective.is_prerelease: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # If we've gotten to here, it means that prospective version is both + # less than the spec version *and* it's not a pre-release of the same + # version in the spec. + return True + + @_require_version_compare + def _compare_greater_than(self, prospective, spec): + # Convert our spec to a Version instance, since we'll want to work with + # it as a version. + spec = Version(spec) + + # Check to see if the prospective version is greater than the spec + # version. If it's not we can short circuit and just return False now + # instead of doing extra unneeded work. + if not prospective > spec: + return False + + # This special case is here so that, unless the specifier itself + # includes is a post-release version, that we do not accept + # post-release versions for the version mentioned in the specifier + # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0). + if not spec.is_postrelease and prospective.is_postrelease: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # Ensure that we do not allow a local version of the version mentioned + # in the specifier, which is technically greater than, to match. + if prospective.local is not None: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # If we've gotten to here, it means that prospective version is both + # greater than the spec version *and* it's not a pre-release of the + # same version in the spec. + return True + + def _compare_arbitrary(self, prospective, spec): + return str(prospective).lower() == str(spec).lower() + + @property + def prereleases(self): + # If there is an explicit prereleases set for this, then we'll just + # blindly use that. + if self._prereleases is not None: + return self._prereleases + + # Look at all of our specifiers and determine if they are inclusive + # operators, and if they are if they are including an explicit + # prerelease. + operator, version = self._spec + if operator in ["==", ">=", "<=", "~=", "==="]: + # The == specifier can include a trailing .*, if it does we + # want to remove before parsing. + if operator == "==" and version.endswith(".*"): + version = version[:-2] + + # Parse the version, and if it is a pre-release than this + # specifier allows pre-releases. + if parse(version).is_prerelease: + return True + + return False + + @prereleases.setter + def prereleases(self, value): + self._prereleases = value + + +_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$") + + +def _version_split(version): + result = [] + for item in version.split("."): + match = _prefix_regex.search(item) + if match: + result.extend(match.groups()) + else: + result.append(item) + return result + + +def _pad_version(left, right): + left_split, right_split = [], [] + + # Get the release segment of our versions + left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left))) + right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right))) + + # Get the rest of our versions + left_split.append(left[len(left_split[0]) :]) + right_split.append(right[len(right_split[0]) :]) + + # Insert our padding + left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0]))) + right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0]))) + + return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split))) + + +class SpecifierSet(BaseSpecifier): + def __init__(self, specifiers="", prereleases=None): + # Split on , to break each indidivual specifier into it's own item, and + # strip each item to remove leading/trailing whitespace. + specifiers = [s.strip() for s in specifiers.split(",") if s.strip()] + + # Parsed each individual specifier, attempting first to make it a + # Specifier and falling back to a LegacySpecifier. + parsed = set() + for specifier in specifiers: + try: + parsed.add(Specifier(specifier)) + except InvalidSpecifier: + parsed.add(LegacySpecifier(specifier)) + + # Turn our parsed specifiers into a frozen set and save them for later. + self._specs = frozenset(parsed) + + # Store our prereleases value so we can use it later to determine if + # we accept prereleases or not. + self._prereleases = prereleases + + def __repr__(self): + pre = ( + ", prereleases={0!r}".format(self.prereleases) + if self._prereleases is not None + else "" + ) + + return "<SpecifierSet({0!r}{1})>".format(str(self), pre) + + def __str__(self): + return ",".join(sorted(str(s) for s in self._specs)) + + def __hash__(self): + return hash(self._specs) + + def __and__(self, other): + if isinstance(other, string_types): + other = SpecifierSet(other) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + specifier = SpecifierSet() + specifier._specs = frozenset(self._specs | other._specs) + + if self._prereleases is None and other._prereleases is not None: + specifier._prereleases = other._prereleases + elif self._prereleases is not None and other._prereleases is None: + specifier._prereleases = self._prereleases + elif self._prereleases == other._prereleases: + specifier._prereleases = self._prereleases + else: + raise ValueError( + "Cannot combine SpecifierSets with True and False prerelease " + "overrides." + ) + + return specifier + + def __eq__(self, other): + if isinstance(other, string_types): + other = SpecifierSet(other) + elif isinstance(other, _IndividualSpecifier): + other = SpecifierSet(str(other)) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + return self._specs == other._specs + + def __ne__(self, other): + if isinstance(other, string_types): + other = SpecifierSet(other) + elif isinstance(other, _IndividualSpecifier): + other = SpecifierSet(str(other)) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + return self._specs != other._specs + + def __len__(self): + return len(self._specs) + + def __iter__(self): + return iter(self._specs) + + @property + def prereleases(self): + # If we have been given an explicit prerelease modifier, then we'll + # pass that through here. + if self._prereleases is not None: + return self._prereleases + + # If we don't have any specifiers, and we don't have a forced value, + # then we'll just return None since we don't know if this should have + # pre-releases or not. + if not self._specs: + return None + + # Otherwise we'll see if any of the given specifiers accept + # prereleases, if any of them do we'll return True, otherwise False. + return any(s.prereleases for s in self._specs) + + @prereleases.setter + def prereleases(self, value): + self._prereleases = value + + def __contains__(self, item): + return self.contains(item) + + def contains(self, item, prereleases=None): + # Ensure that our item is a Version or LegacyVersion instance. + if not isinstance(item, (LegacyVersion, Version)): + item = parse(item) + + # Determine if we're forcing a prerelease or not, if we're not forcing + # one for this particular filter call, then we'll use whatever the + # SpecifierSet thinks for whether or not we should support prereleases. + if prereleases is None: + prereleases = self.prereleases + + # We can determine if we're going to allow pre-releases by looking to + # see if any of the underlying items supports them. If none of them do + # and this item is a pre-release then we do not allow it and we can + # short circuit that here. + # Note: This means that 1.0.dev1 would not be contained in something + # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0 + if not prereleases and item.is_prerelease: + return False + + # We simply dispatch to the underlying specs here to make sure that the + # given version is contained within all of them. + # Note: This use of all() here means that an empty set of specifiers + # will always return True, this is an explicit design decision. + return all(s.contains(item, prereleases=prereleases) for s in self._specs) + + def filter(self, iterable, prereleases=None): + # Determine if we're forcing a prerelease or not, if we're not forcing + # one for this particular filter call, then we'll use whatever the + # SpecifierSet thinks for whether or not we should support prereleases. + if prereleases is None: + prereleases = self.prereleases + + # If we have any specifiers, then we want to wrap our iterable in the + # filter method for each one, this will act as a logical AND amongst + # each specifier. + if self._specs: + for spec in self._specs: + iterable = spec.filter(iterable, prereleases=bool(prereleases)) + return iterable + # If we do not have any specifiers, then we need to have a rough filter + # which will filter out any pre-releases, unless there are no final + # releases, and which will filter out LegacyVersion in general. + else: + filtered = [] + found_prereleases = [] + + for item in iterable: + # Ensure that we some kind of Version class for this item. + if not isinstance(item, (LegacyVersion, Version)): + parsed_version = parse(item) + else: + parsed_version = item + + # Filter out any item which is parsed as a LegacyVersion + if isinstance(parsed_version, LegacyVersion): + continue + + # Store any item which is a pre-release for later unless we've + # already found a final version or we are accepting prereleases + if parsed_version.is_prerelease and not prereleases: + if not filtered: + found_prereleases.append(item) + else: + filtered.append(item) + + # If we've found no items except for pre-releases, then we'll go + # ahead and use the pre-releases + if not filtered and found_prereleases and prereleases is None: + return found_prereleases + + return filtered diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyc new file mode 100644 index 0000000..37c7979 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/utils.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/utils.py new file mode 100644 index 0000000..8841878 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/utils.py @@ -0,0 +1,57 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + +import re + +from .version import InvalidVersion, Version + + +_canonicalize_regex = re.compile(r"[-_.]+") + + +def canonicalize_name(name): + # This is taken from PEP 503. + return _canonicalize_regex.sub("-", name).lower() + + +def canonicalize_version(version): + """ + This is very similar to Version.__str__, but has one subtle differences + with the way it handles the release segment. + """ + + try: + version = Version(version) + except InvalidVersion: + # Legacy versions cannot be normalized + return version + + parts = [] + + # Epoch + if version.epoch != 0: + parts.append("{0}!".format(version.epoch)) + + # Release segment + # NB: This strips trailing '.0's to normalize + parts.append(re.sub(r"(\.0)+$", "", ".".join(str(x) for x in version.release))) + + # Pre-release + if version.pre is not None: + parts.append("".join(str(x) for x in version.pre)) + + # Post-release + if version.post is not None: + parts.append(".post{0}".format(version.post)) + + # Development release + if version.dev is not None: + parts.append(".dev{0}".format(version.dev)) + + # Local version segment + if version.local is not None: + parts.append("+{0}".format(version.local)) + + return "".join(parts) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/utils.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/utils.pyc new file mode 100644 index 0000000..0ded898 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/utils.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/version.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/version.py new file mode 100644 index 0000000..95157a1 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/version.py @@ -0,0 +1,420 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + +import collections +import itertools +import re + +from ._structures import Infinity + + +__all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"] + + +_Version = collections.namedtuple( + "_Version", ["epoch", "release", "dev", "pre", "post", "local"] +) + + +def parse(version): + """ + Parse the given version string and return either a :class:`Version` object + or a :class:`LegacyVersion` object depending on if the given version is + a valid PEP 440 version or a legacy version. + """ + try: + return Version(version) + except InvalidVersion: + return LegacyVersion(version) + + +class InvalidVersion(ValueError): + """ + An invalid version was found, users should refer to PEP 440. + """ + + +class _BaseVersion(object): + def __hash__(self): + return hash(self._key) + + def __lt__(self, other): + return self._compare(other, lambda s, o: s < o) + + def __le__(self, other): + return self._compare(other, lambda s, o: s <= o) + + def __eq__(self, other): + return self._compare(other, lambda s, o: s == o) + + def __ge__(self, other): + return self._compare(other, lambda s, o: s >= o) + + def __gt__(self, other): + return self._compare(other, lambda s, o: s > o) + + def __ne__(self, other): + return self._compare(other, lambda s, o: s != o) + + def _compare(self, other, method): + if not isinstance(other, _BaseVersion): + return NotImplemented + + return method(self._key, other._key) + + +class LegacyVersion(_BaseVersion): + def __init__(self, version): + self._version = str(version) + self._key = _legacy_cmpkey(self._version) + + def __str__(self): + return self._version + + def __repr__(self): + return "<LegacyVersion({0})>".format(repr(str(self))) + + @property + def public(self): + return self._version + + @property + def base_version(self): + return self._version + + @property + def epoch(self): + return -1 + + @property + def release(self): + return None + + @property + def pre(self): + return None + + @property + def post(self): + return None + + @property + def dev(self): + return None + + @property + def local(self): + return None + + @property + def is_prerelease(self): + return False + + @property + def is_postrelease(self): + return False + + @property + def is_devrelease(self): + return False + + +_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE) + +_legacy_version_replacement_map = { + "pre": "c", + "preview": "c", + "-": "final-", + "rc": "c", + "dev": "@", +} + + +def _parse_version_parts(s): + for part in _legacy_version_component_re.split(s): + part = _legacy_version_replacement_map.get(part, part) + + if not part or part == ".": + continue + + if part[:1] in "0123456789": + # pad for numeric comparison + yield part.zfill(8) + else: + yield "*" + part + + # ensure that alpha/beta/candidate are before final + yield "*final" + + +def _legacy_cmpkey(version): + # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch + # greater than or equal to 0. This will effectively put the LegacyVersion, + # which uses the defacto standard originally implemented by setuptools, + # as before all PEP 440 versions. + epoch = -1 + + # This scheme is taken from pkg_resources.parse_version setuptools prior to + # it's adoption of the packaging library. + parts = [] + for part in _parse_version_parts(version.lower()): + if part.startswith("*"): + # remove "-" before a prerelease tag + if part < "*final": + while parts and parts[-1] == "*final-": + parts.pop() + + # remove trailing zeros from each series of numeric parts + while parts and parts[-1] == "00000000": + parts.pop() + + parts.append(part) + parts = tuple(parts) + + return epoch, parts + + +# Deliberately not anchored to the start and end of the string, to make it +# easier for 3rd party code to reuse +VERSION_PATTERN = r""" + v? + (?: + (?:(?P<epoch>[0-9]+)!)? # epoch + (?P<release>[0-9]+(?:\.[0-9]+)*) # release segment + (?P<pre> # pre-release + [-_\.]? + (?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview)) + [-_\.]? + (?P<pre_n>[0-9]+)? + )? + (?P<post> # post release + (?:-(?P<post_n1>[0-9]+)) + | + (?: + [-_\.]? + (?P<post_l>post|rev|r) + [-_\.]? + (?P<post_n2>[0-9]+)? + ) + )? + (?P<dev> # dev release + [-_\.]? + (?P<dev_l>dev) + [-_\.]? + (?P<dev_n>[0-9]+)? + )? + ) + (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version +""" + + +class Version(_BaseVersion): + + _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE) + + def __init__(self, version): + # Validate the version and parse it into pieces + match = self._regex.search(version) + if not match: + raise InvalidVersion("Invalid version: '{0}'".format(version)) + + # Store the parsed out pieces of the version + self._version = _Version( + epoch=int(match.group("epoch")) if match.group("epoch") else 0, + release=tuple(int(i) for i in match.group("release").split(".")), + pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")), + post=_parse_letter_version( + match.group("post_l"), match.group("post_n1") or match.group("post_n2") + ), + dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")), + local=_parse_local_version(match.group("local")), + ) + + # Generate a key which will be used for sorting + self._key = _cmpkey( + self._version.epoch, + self._version.release, + self._version.pre, + self._version.post, + self._version.dev, + self._version.local, + ) + + def __repr__(self): + return "<Version({0})>".format(repr(str(self))) + + def __str__(self): + parts = [] + + # Epoch + if self.epoch != 0: + parts.append("{0}!".format(self.epoch)) + + # Release segment + parts.append(".".join(str(x) for x in self.release)) + + # Pre-release + if self.pre is not None: + parts.append("".join(str(x) for x in self.pre)) + + # Post-release + if self.post is not None: + parts.append(".post{0}".format(self.post)) + + # Development release + if self.dev is not None: + parts.append(".dev{0}".format(self.dev)) + + # Local version segment + if self.local is not None: + parts.append("+{0}".format(self.local)) + + return "".join(parts) + + @property + def epoch(self): + return self._version.epoch + + @property + def release(self): + return self._version.release + + @property + def pre(self): + return self._version.pre + + @property + def post(self): + return self._version.post[1] if self._version.post else None + + @property + def dev(self): + return self._version.dev[1] if self._version.dev else None + + @property + def local(self): + if self._version.local: + return ".".join(str(x) for x in self._version.local) + else: + return None + + @property + def public(self): + return str(self).split("+", 1)[0] + + @property + def base_version(self): + parts = [] + + # Epoch + if self.epoch != 0: + parts.append("{0}!".format(self.epoch)) + + # Release segment + parts.append(".".join(str(x) for x in self.release)) + + return "".join(parts) + + @property + def is_prerelease(self): + return self.dev is not None or self.pre is not None + + @property + def is_postrelease(self): + return self.post is not None + + @property + def is_devrelease(self): + return self.dev is not None + + +def _parse_letter_version(letter, number): + if letter: + # We consider there to be an implicit 0 in a pre-release if there is + # not a numeral associated with it. + if number is None: + number = 0 + + # We normalize any letters to their lower case form + letter = letter.lower() + + # We consider some words to be alternate spellings of other words and + # in those cases we want to normalize the spellings to our preferred + # spelling. + if letter == "alpha": + letter = "a" + elif letter == "beta": + letter = "b" + elif letter in ["c", "pre", "preview"]: + letter = "rc" + elif letter in ["rev", "r"]: + letter = "post" + + return letter, int(number) + if not letter and number: + # We assume if we are given a number, but we are not given a letter + # then this is using the implicit post release syntax (e.g. 1.0-1) + letter = "post" + + return letter, int(number) + + +_local_version_separators = re.compile(r"[\._-]") + + +def _parse_local_version(local): + """ + Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve"). + """ + if local is not None: + return tuple( + part.lower() if not part.isdigit() else int(part) + for part in _local_version_separators.split(local) + ) + + +def _cmpkey(epoch, release, pre, post, dev, local): + # When we compare a release version, we want to compare it with all of the + # trailing zeros removed. So we'll use a reverse the list, drop all the now + # leading zeros until we come to something non zero, then take the rest + # re-reverse it back into the correct order and make it a tuple and use + # that for our sorting key. + release = tuple( + reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release)))) + ) + + # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0. + # We'll do this by abusing the pre segment, but we _only_ want to do this + # if there is not a pre or a post segment. If we have one of those then + # the normal sorting rules will handle this case correctly. + if pre is None and post is None and dev is not None: + pre = -Infinity + # Versions without a pre-release (except as noted above) should sort after + # those with one. + elif pre is None: + pre = Infinity + + # Versions without a post segment should sort before those with one. + if post is None: + post = -Infinity + + # Versions without a development segment should sort after those with one. + if dev is None: + dev = Infinity + + if local is None: + # Versions without a local segment should sort before those with one. + local = -Infinity + else: + # Versions with a local segment need that segment parsed to implement + # the sorting rules in PEP440. + # - Alpha numeric segments sort before numeric segments + # - Alpha numeric segments sort lexicographically + # - Numeric segments sort numerically + # - Shorter versions sort before longer versions when the prefixes + # match exactly + local = tuple((i, "") if isinstance(i, int) else (-Infinity, i) for i in local) + + return epoch, release, pre, post, dev, local diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyc new file mode 100644 index 0000000..b98941a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/packaging/version.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/__init__.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/__init__.py new file mode 100644 index 0000000..9c1a098 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/__init__.py @@ -0,0 +1,4 @@ +"""Wrappers to build Python packages using PEP 517 hooks +""" + +__version__ = '0.5.0' diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/__init__.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/__init__.pyc new file mode 100644 index 0000000..71b3b60 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/_in_process.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/_in_process.py new file mode 100644 index 0000000..d6524b6 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/_in_process.py @@ -0,0 +1,207 @@ +"""This is invoked in a subprocess to call the build backend hooks. + +It expects: +- Command line args: hook_name, control_dir +- Environment variable: PEP517_BUILD_BACKEND=entry.point:spec +- control_dir/input.json: + - {"kwargs": {...}} + +Results: +- control_dir/output.json + - {"return_val": ...} +""" +from glob import glob +from importlib import import_module +import os +from os.path import join as pjoin +import re +import shutil +import sys + +# This is run as a script, not a module, so it can't do a relative import +import compat + + +class BackendUnavailable(Exception): + """Raised if we cannot import the backend""" + + +def _build_backend(): + """Find and load the build backend""" + ep = os.environ['PEP517_BUILD_BACKEND'] + mod_path, _, obj_path = ep.partition(':') + try: + obj = import_module(mod_path) + except ImportError: + raise BackendUnavailable + if obj_path: + for path_part in obj_path.split('.'): + obj = getattr(obj, path_part) + return obj + + +def get_requires_for_build_wheel(config_settings): + """Invoke the optional get_requires_for_build_wheel hook + + Returns [] if the hook is not defined. + """ + backend = _build_backend() + try: + hook = backend.get_requires_for_build_wheel + except AttributeError: + return [] + else: + return hook(config_settings) + + +def prepare_metadata_for_build_wheel(metadata_directory, config_settings): + """Invoke optional prepare_metadata_for_build_wheel + + Implements a fallback by building a wheel if the hook isn't defined. + """ + backend = _build_backend() + try: + hook = backend.prepare_metadata_for_build_wheel + except AttributeError: + return _get_wheel_metadata_from_wheel(backend, metadata_directory, + config_settings) + else: + return hook(metadata_directory, config_settings) + + +WHEEL_BUILT_MARKER = 'PEP517_ALREADY_BUILT_WHEEL' + + +def _dist_info_files(whl_zip): + """Identify the .dist-info folder inside a wheel ZipFile.""" + res = [] + for path in whl_zip.namelist(): + m = re.match(r'[^/\\]+-[^/\\]+\.dist-info/', path) + if m: + res.append(path) + if res: + return res + raise Exception("No .dist-info folder found in wheel") + + +def _get_wheel_metadata_from_wheel( + backend, metadata_directory, config_settings): + """Build a wheel and extract the metadata from it. + + Fallback for when the build backend does not + define the 'get_wheel_metadata' hook. + """ + from zipfile import ZipFile + whl_basename = backend.build_wheel(metadata_directory, config_settings) + with open(os.path.join(metadata_directory, WHEEL_BUILT_MARKER), 'wb'): + pass # Touch marker file + + whl_file = os.path.join(metadata_directory, whl_basename) + with ZipFile(whl_file) as zipf: + dist_info = _dist_info_files(zipf) + zipf.extractall(path=metadata_directory, members=dist_info) + return dist_info[0].split('/')[0] + + +def _find_already_built_wheel(metadata_directory): + """Check for a wheel already built during the get_wheel_metadata hook. + """ + if not metadata_directory: + return None + metadata_parent = os.path.dirname(metadata_directory) + if not os.path.isfile(pjoin(metadata_parent, WHEEL_BUILT_MARKER)): + return None + + whl_files = glob(os.path.join(metadata_parent, '*.whl')) + if not whl_files: + print('Found wheel built marker, but no .whl files') + return None + if len(whl_files) > 1: + print('Found multiple .whl files; unspecified behaviour. ' + 'Will call build_wheel.') + return None + + # Exactly one .whl file + return whl_files[0] + + +def build_wheel(wheel_directory, config_settings, metadata_directory=None): + """Invoke the mandatory build_wheel hook. + + If a wheel was already built in the + prepare_metadata_for_build_wheel fallback, this + will copy it rather than rebuilding the wheel. + """ + prebuilt_whl = _find_already_built_wheel(metadata_directory) + if prebuilt_whl: + shutil.copy2(prebuilt_whl, wheel_directory) + return os.path.basename(prebuilt_whl) + + return _build_backend().build_wheel(wheel_directory, config_settings, + metadata_directory) + + +def get_requires_for_build_sdist(config_settings): + """Invoke the optional get_requires_for_build_wheel hook + + Returns [] if the hook is not defined. + """ + backend = _build_backend() + try: + hook = backend.get_requires_for_build_sdist + except AttributeError: + return [] + else: + return hook(config_settings) + + +class _DummyException(Exception): + """Nothing should ever raise this exception""" + + +class GotUnsupportedOperation(Exception): + """For internal use when backend raises UnsupportedOperation""" + + +def build_sdist(sdist_directory, config_settings): + """Invoke the mandatory build_sdist hook.""" + backend = _build_backend() + try: + return backend.build_sdist(sdist_directory, config_settings) + except getattr(backend, 'UnsupportedOperation', _DummyException): + raise GotUnsupportedOperation + + +HOOK_NAMES = { + 'get_requires_for_build_wheel', + 'prepare_metadata_for_build_wheel', + 'build_wheel', + 'get_requires_for_build_sdist', + 'build_sdist', +} + + +def main(): + if len(sys.argv) < 3: + sys.exit("Needs args: hook_name, control_dir") + hook_name = sys.argv[1] + control_dir = sys.argv[2] + if hook_name not in HOOK_NAMES: + sys.exit("Unknown hook: %s" % hook_name) + hook = globals()[hook_name] + + hook_input = compat.read_json(pjoin(control_dir, 'input.json')) + + json_out = {'unsupported': False, 'return_val': None} + try: + json_out['return_val'] = hook(**hook_input['kwargs']) + except BackendUnavailable: + json_out['no_backend'] = True + except GotUnsupportedOperation: + json_out['unsupported'] = True + + compat.write_json(json_out, pjoin(control_dir, 'output.json'), indent=2) + + +if __name__ == '__main__': + main() diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/_in_process.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/_in_process.pyc new file mode 100644 index 0000000..df5c36b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/_in_process.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/build.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/build.py new file mode 100644 index 0000000..ac6c949 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/build.py @@ -0,0 +1,108 @@ +"""Build a project using PEP 517 hooks. +""" +import argparse +import logging +import os +import contextlib +from pip._vendor import pytoml +import shutil +import errno +import tempfile + +from .envbuild import BuildEnvironment +from .wrappers import Pep517HookCaller + +log = logging.getLogger(__name__) + + +@contextlib.contextmanager +def tempdir(): + td = tempfile.mkdtemp() + try: + yield td + finally: + shutil.rmtree(td) + + +def _do_build(hooks, env, dist, dest): + get_requires_name = 'get_requires_for_build_{dist}'.format(**locals()) + get_requires = getattr(hooks, get_requires_name) + reqs = get_requires({}) + log.info('Got build requires: %s', reqs) + + env.pip_install(reqs) + log.info('Installed dynamic build dependencies') + + with tempdir() as td: + log.info('Trying to build %s in %s', dist, td) + build_name = 'build_{dist}'.format(**locals()) + build = getattr(hooks, build_name) + filename = build(td, {}) + source = os.path.join(td, filename) + shutil.move(source, os.path.join(dest, os.path.basename(filename))) + + +def mkdir_p(*args, **kwargs): + """Like `mkdir`, but does not raise an exception if the + directory already exists. + """ + try: + return os.mkdir(*args, **kwargs) + except OSError as exc: + if exc.errno != errno.EEXIST: + raise + + +def build(source_dir, dist, dest=None): + pyproject = os.path.join(source_dir, 'pyproject.toml') + dest = os.path.join(source_dir, dest or 'dist') + mkdir_p(dest) + + with open(pyproject) as f: + pyproject_data = pytoml.load(f) + # Ensure the mandatory data can be loaded + buildsys = pyproject_data['build-system'] + requires = buildsys['requires'] + backend = buildsys['build-backend'] + + hooks = Pep517HookCaller(source_dir, backend) + + with BuildEnvironment() as env: + env.pip_install(requires) + _do_build(hooks, env, dist, dest) + + +parser = argparse.ArgumentParser() +parser.add_argument( + 'source_dir', + help="A directory containing pyproject.toml", +) +parser.add_argument( + '--binary', '-b', + action='store_true', + default=False, +) +parser.add_argument( + '--source', '-s', + action='store_true', + default=False, +) +parser.add_argument( + '--out-dir', '-o', + help="Destination in which to save the builds relative to source dir", +) + + +def main(args): + # determine which dists to build + dists = list(filter(None, ( + 'sdist' if args.source or not args.binary else None, + 'wheel' if args.binary or not args.source else None, + ))) + + for dist in dists: + build(args.source_dir, dist, args.out_dir) + + +if __name__ == '__main__': + main(parser.parse_args()) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/build.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/build.pyc new file mode 100644 index 0000000..ccf728d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/build.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/check.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/check.py new file mode 100644 index 0000000..f4cdc6b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/check.py @@ -0,0 +1,202 @@ +"""Check a project and backend by attempting to build using PEP 517 hooks. +""" +import argparse +import logging +import os +from os.path import isfile, join as pjoin +from pip._vendor.pytoml import TomlError, load as toml_load +import shutil +from subprocess import CalledProcessError +import sys +import tarfile +from tempfile import mkdtemp +import zipfile + +from .colorlog import enable_colourful_output +from .envbuild import BuildEnvironment +from .wrappers import Pep517HookCaller + +log = logging.getLogger(__name__) + + +def check_build_sdist(hooks, build_sys_requires): + with BuildEnvironment() as env: + try: + env.pip_install(build_sys_requires) + log.info('Installed static build dependencies') + except CalledProcessError: + log.error('Failed to install static build dependencies') + return False + + try: + reqs = hooks.get_requires_for_build_sdist({}) + log.info('Got build requires: %s', reqs) + except Exception: + log.error('Failure in get_requires_for_build_sdist', exc_info=True) + return False + + try: + env.pip_install(reqs) + log.info('Installed dynamic build dependencies') + except CalledProcessError: + log.error('Failed to install dynamic build dependencies') + return False + + td = mkdtemp() + log.info('Trying to build sdist in %s', td) + try: + try: + filename = hooks.build_sdist(td, {}) + log.info('build_sdist returned %r', filename) + except Exception: + log.info('Failure in build_sdist', exc_info=True) + return False + + if not filename.endswith('.tar.gz'): + log.error( + "Filename %s doesn't have .tar.gz extension", filename) + return False + + path = pjoin(td, filename) + if isfile(path): + log.info("Output file %s exists", path) + else: + log.error("Output file %s does not exist", path) + return False + + if tarfile.is_tarfile(path): + log.info("Output file is a tar file") + else: + log.error("Output file is not a tar file") + return False + + finally: + shutil.rmtree(td) + + return True + + +def check_build_wheel(hooks, build_sys_requires): + with BuildEnvironment() as env: + try: + env.pip_install(build_sys_requires) + log.info('Installed static build dependencies') + except CalledProcessError: + log.error('Failed to install static build dependencies') + return False + + try: + reqs = hooks.get_requires_for_build_wheel({}) + log.info('Got build requires: %s', reqs) + except Exception: + log.error('Failure in get_requires_for_build_sdist', exc_info=True) + return False + + try: + env.pip_install(reqs) + log.info('Installed dynamic build dependencies') + except CalledProcessError: + log.error('Failed to install dynamic build dependencies') + return False + + td = mkdtemp() + log.info('Trying to build wheel in %s', td) + try: + try: + filename = hooks.build_wheel(td, {}) + log.info('build_wheel returned %r', filename) + except Exception: + log.info('Failure in build_wheel', exc_info=True) + return False + + if not filename.endswith('.whl'): + log.error("Filename %s doesn't have .whl extension", filename) + return False + + path = pjoin(td, filename) + if isfile(path): + log.info("Output file %s exists", path) + else: + log.error("Output file %s does not exist", path) + return False + + if zipfile.is_zipfile(path): + log.info("Output file is a zip file") + else: + log.error("Output file is not a zip file") + return False + + finally: + shutil.rmtree(td) + + return True + + +def check(source_dir): + pyproject = pjoin(source_dir, 'pyproject.toml') + if isfile(pyproject): + log.info('Found pyproject.toml') + else: + log.error('Missing pyproject.toml') + return False + + try: + with open(pyproject) as f: + pyproject_data = toml_load(f) + # Ensure the mandatory data can be loaded + buildsys = pyproject_data['build-system'] + requires = buildsys['requires'] + backend = buildsys['build-backend'] + log.info('Loaded pyproject.toml') + except (TomlError, KeyError): + log.error("Invalid pyproject.toml", exc_info=True) + return False + + hooks = Pep517HookCaller(source_dir, backend) + + sdist_ok = check_build_sdist(hooks, requires) + wheel_ok = check_build_wheel(hooks, requires) + + if not sdist_ok: + log.warning('Sdist checks failed; scroll up to see') + if not wheel_ok: + log.warning('Wheel checks failed') + + return sdist_ok + + +def main(argv=None): + ap = argparse.ArgumentParser() + ap.add_argument( + 'source_dir', + help="A directory containing pyproject.toml") + args = ap.parse_args(argv) + + enable_colourful_output() + + ok = check(args.source_dir) + + if ok: + print(ansi('Checks passed', 'green')) + else: + print(ansi('Checks failed', 'red')) + sys.exit(1) + + +ansi_codes = { + 'reset': '\x1b[0m', + 'bold': '\x1b[1m', + 'red': '\x1b[31m', + 'green': '\x1b[32m', +} + + +def ansi(s, attr): + if os.name != 'nt' and sys.stdout.isatty(): + return ansi_codes[attr] + str(s) + ansi_codes['reset'] + else: + return str(s) + + +if __name__ == '__main__': + main() diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/check.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/check.pyc new file mode 100644 index 0000000..80f74f3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/check.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/colorlog.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/colorlog.py new file mode 100644 index 0000000..69c8a59 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/colorlog.py @@ -0,0 +1,115 @@ +"""Nicer log formatting with colours. + +Code copied from Tornado, Apache licensed. +""" +# Copyright 2012 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging +import sys + +try: + import curses +except ImportError: + curses = None + + +def _stderr_supports_color(): + color = False + if curses and hasattr(sys.stderr, 'isatty') and sys.stderr.isatty(): + try: + curses.setupterm() + if curses.tigetnum("colors") > 0: + color = True + except Exception: + pass + return color + + +class LogFormatter(logging.Formatter): + """Log formatter with colour support + """ + DEFAULT_COLORS = { + logging.INFO: 2, # Green + logging.WARNING: 3, # Yellow + logging.ERROR: 1, # Red + logging.CRITICAL: 1, + } + + def __init__(self, color=True, datefmt=None): + r""" + :arg bool color: Enables color support. + :arg string fmt: Log message format. + It will be applied to the attributes dict of log records. The + text between ``%(color)s`` and ``%(end_color)s`` will be colored + depending on the level if color support is on. + :arg dict colors: color mappings from logging level to terminal color + code + :arg string datefmt: Datetime format. + Used for formatting ``(asctime)`` placeholder in ``prefix_fmt``. + .. versionchanged:: 3.2 + Added ``fmt`` and ``datefmt`` arguments. + """ + logging.Formatter.__init__(self, datefmt=datefmt) + self._colors = {} + if color and _stderr_supports_color(): + # The curses module has some str/bytes confusion in + # python3. Until version 3.2.3, most methods return + # bytes, but only accept strings. In addition, we want to + # output these strings with the logging module, which + # works with unicode strings. The explicit calls to + # unicode() below are harmless in python2 but will do the + # right conversion in python 3. + fg_color = (curses.tigetstr("setaf") or + curses.tigetstr("setf") or "") + if (3, 0) < sys.version_info < (3, 2, 3): + fg_color = str(fg_color, "ascii") + + for levelno, code in self.DEFAULT_COLORS.items(): + self._colors[levelno] = str( + curses.tparm(fg_color, code), "ascii") + self._normal = str(curses.tigetstr("sgr0"), "ascii") + + scr = curses.initscr() + self.termwidth = scr.getmaxyx()[1] + curses.endwin() + else: + self._normal = '' + # Default width is usually 80, but too wide is + # worse than too narrow + self.termwidth = 70 + + def formatMessage(self, record): + mlen = len(record.message) + right_text = '{initial}-{name}'.format(initial=record.levelname[0], + name=record.name) + if mlen + len(right_text) < self.termwidth: + space = ' ' * (self.termwidth - (mlen + len(right_text))) + else: + space = ' ' + + if record.levelno in self._colors: + start_color = self._colors[record.levelno] + end_color = self._normal + else: + start_color = end_color = '' + + return record.message + space + start_color + right_text + end_color + + +def enable_colourful_output(level=logging.INFO): + handler = logging.StreamHandler() + handler.setFormatter(LogFormatter()) + logging.root.addHandler(handler) + logging.root.setLevel(level) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/colorlog.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/colorlog.pyc new file mode 100644 index 0000000..777a7ef Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/colorlog.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/compat.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/compat.py new file mode 100644 index 0000000..01c66fc --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/compat.py @@ -0,0 +1,23 @@ +"""Handle reading and writing JSON in UTF-8, on Python 3 and 2.""" +import json +import sys + +if sys.version_info[0] >= 3: + # Python 3 + def write_json(obj, path, **kwargs): + with open(path, 'w', encoding='utf-8') as f: + json.dump(obj, f, **kwargs) + + def read_json(path): + with open(path, 'r', encoding='utf-8') as f: + return json.load(f) + +else: + # Python 2 + def write_json(obj, path, **kwargs): + with open(path, 'wb') as f: + json.dump(obj, f, encoding='utf-8', **kwargs) + + def read_json(path): + with open(path, 'rb') as f: + return json.load(f) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/compat.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/compat.pyc new file mode 100644 index 0000000..203f8c9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/compat.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/envbuild.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/envbuild.py new file mode 100644 index 0000000..f7ac5f4 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/envbuild.py @@ -0,0 +1,158 @@ +"""Build wheels/sdists by installing build deps to a temporary environment. +""" + +import os +import logging +from pip._vendor import pytoml +import shutil +from subprocess import check_call +import sys +from sysconfig import get_paths +from tempfile import mkdtemp + +from .wrappers import Pep517HookCaller + +log = logging.getLogger(__name__) + + +def _load_pyproject(source_dir): + with open(os.path.join(source_dir, 'pyproject.toml')) as f: + pyproject_data = pytoml.load(f) + buildsys = pyproject_data['build-system'] + return buildsys['requires'], buildsys['build-backend'] + + +class BuildEnvironment(object): + """Context manager to install build deps in a simple temporary environment + + Based on code I wrote for pip, which is MIT licensed. + """ + # Copyright (c) 2008-2016 The pip developers (see AUTHORS.txt file) + # + # Permission is hereby granted, free of charge, to any person obtaining + # a copy of this software and associated documentation files (the + # "Software"), to deal in the Software without restriction, including + # without limitation the rights to use, copy, modify, merge, publish, + # distribute, sublicense, and/or sell copies of the Software, and to + # permit persons to whom the Software is furnished to do so, subject to + # the following conditions: + # + # The above copyright notice and this permission notice shall be + # included in all copies or substantial portions of the Software. + # + # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + path = None + + def __init__(self, cleanup=True): + self._cleanup = cleanup + + def __enter__(self): + self.path = mkdtemp(prefix='pep517-build-env-') + log.info('Temporary build environment: %s', self.path) + + self.save_path = os.environ.get('PATH', None) + self.save_pythonpath = os.environ.get('PYTHONPATH', None) + + install_scheme = 'nt' if (os.name == 'nt') else 'posix_prefix' + install_dirs = get_paths(install_scheme, vars={ + 'base': self.path, + 'platbase': self.path, + }) + + scripts = install_dirs['scripts'] + if self.save_path: + os.environ['PATH'] = scripts + os.pathsep + self.save_path + else: + os.environ['PATH'] = scripts + os.pathsep + os.defpath + + if install_dirs['purelib'] == install_dirs['platlib']: + lib_dirs = install_dirs['purelib'] + else: + lib_dirs = install_dirs['purelib'] + os.pathsep + \ + install_dirs['platlib'] + if self.save_pythonpath: + os.environ['PYTHONPATH'] = lib_dirs + os.pathsep + \ + self.save_pythonpath + else: + os.environ['PYTHONPATH'] = lib_dirs + + return self + + def pip_install(self, reqs): + """Install dependencies into this env by calling pip in a subprocess""" + if not reqs: + return + log.info('Calling pip to install %s', reqs) + check_call([ + sys.executable, '-m', 'pip', 'install', '--ignore-installed', + '--prefix', self.path] + list(reqs)) + + def __exit__(self, exc_type, exc_val, exc_tb): + needs_cleanup = ( + self._cleanup and + self.path is not None and + os.path.isdir(self.path) + ) + if needs_cleanup: + shutil.rmtree(self.path) + + if self.save_path is None: + os.environ.pop('PATH', None) + else: + os.environ['PATH'] = self.save_path + + if self.save_pythonpath is None: + os.environ.pop('PYTHONPATH', None) + else: + os.environ['PYTHONPATH'] = self.save_pythonpath + + +def build_wheel(source_dir, wheel_dir, config_settings=None): + """Build a wheel from a source directory using PEP 517 hooks. + + :param str source_dir: Source directory containing pyproject.toml + :param str wheel_dir: Target directory to create wheel in + :param dict config_settings: Options to pass to build backend + + This is a blocking function which will run pip in a subprocess to install + build requirements. + """ + if config_settings is None: + config_settings = {} + requires, backend = _load_pyproject(source_dir) + hooks = Pep517HookCaller(source_dir, backend) + + with BuildEnvironment() as env: + env.pip_install(requires) + reqs = hooks.get_requires_for_build_wheel(config_settings) + env.pip_install(reqs) + return hooks.build_wheel(wheel_dir, config_settings) + + +def build_sdist(source_dir, sdist_dir, config_settings=None): + """Build an sdist from a source directory using PEP 517 hooks. + + :param str source_dir: Source directory containing pyproject.toml + :param str sdist_dir: Target directory to place sdist in + :param dict config_settings: Options to pass to build backend + + This is a blocking function which will run pip in a subprocess to install + build requirements. + """ + if config_settings is None: + config_settings = {} + requires, backend = _load_pyproject(source_dir) + hooks = Pep517HookCaller(source_dir, backend) + + with BuildEnvironment() as env: + env.pip_install(requires) + reqs = hooks.get_requires_for_build_sdist(config_settings) + env.pip_install(reqs) + return hooks.build_sdist(sdist_dir, config_settings) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/envbuild.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/envbuild.pyc new file mode 100644 index 0000000..934c18b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/envbuild.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/wrappers.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/wrappers.py new file mode 100644 index 0000000..b14b899 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/wrappers.py @@ -0,0 +1,163 @@ +from contextlib import contextmanager +import os +from os.path import dirname, abspath, join as pjoin +import shutil +from subprocess import check_call +import sys +from tempfile import mkdtemp + +from . import compat + +_in_proc_script = pjoin(dirname(abspath(__file__)), '_in_process.py') + + +@contextmanager +def tempdir(): + td = mkdtemp() + try: + yield td + finally: + shutil.rmtree(td) + + +class BackendUnavailable(Exception): + """Will be raised if the backend cannot be imported in the hook process.""" + + +class UnsupportedOperation(Exception): + """May be raised by build_sdist if the backend indicates that it can't.""" + + +def default_subprocess_runner(cmd, cwd=None, extra_environ=None): + """The default method of calling the wrapper subprocess.""" + env = os.environ.copy() + if extra_environ: + env.update(extra_environ) + + check_call(cmd, cwd=cwd, env=env) + + +class Pep517HookCaller(object): + """A wrapper around a source directory to be built with a PEP 517 backend. + + source_dir : The path to the source directory, containing pyproject.toml. + backend : The build backend spec, as per PEP 517, from pyproject.toml. + """ + def __init__(self, source_dir, build_backend): + self.source_dir = abspath(source_dir) + self.build_backend = build_backend + self._subprocess_runner = default_subprocess_runner + + # TODO: Is this over-engineered? Maybe frontends only need to + # set this when creating the wrapper, not on every call. + @contextmanager + def subprocess_runner(self, runner): + prev = self._subprocess_runner + self._subprocess_runner = runner + yield + self._subprocess_runner = prev + + def get_requires_for_build_wheel(self, config_settings=None): + """Identify packages required for building a wheel + + Returns a list of dependency specifications, e.g.: + ["wheel >= 0.25", "setuptools"] + + This does not include requirements specified in pyproject.toml. + It returns the result of calling the equivalently named hook in a + subprocess. + """ + return self._call_hook('get_requires_for_build_wheel', { + 'config_settings': config_settings + }) + + def prepare_metadata_for_build_wheel( + self, metadata_directory, config_settings=None): + """Prepare a *.dist-info folder with metadata for this project. + + Returns the name of the newly created folder. + + If the build backend defines a hook with this name, it will be called + in a subprocess. If not, the backend will be asked to build a wheel, + and the dist-info extracted from that. + """ + return self._call_hook('prepare_metadata_for_build_wheel', { + 'metadata_directory': abspath(metadata_directory), + 'config_settings': config_settings, + }) + + def build_wheel( + self, wheel_directory, config_settings=None, + metadata_directory=None): + """Build a wheel from this project. + + Returns the name of the newly created file. + + In general, this will call the 'build_wheel' hook in the backend. + However, if that was previously called by + 'prepare_metadata_for_build_wheel', and the same metadata_directory is + used, the previously built wheel will be copied to wheel_directory. + """ + if metadata_directory is not None: + metadata_directory = abspath(metadata_directory) + return self._call_hook('build_wheel', { + 'wheel_directory': abspath(wheel_directory), + 'config_settings': config_settings, + 'metadata_directory': metadata_directory, + }) + + def get_requires_for_build_sdist(self, config_settings=None): + """Identify packages required for building a wheel + + Returns a list of dependency specifications, e.g.: + ["setuptools >= 26"] + + This does not include requirements specified in pyproject.toml. + It returns the result of calling the equivalently named hook in a + subprocess. + """ + return self._call_hook('get_requires_for_build_sdist', { + 'config_settings': config_settings + }) + + def build_sdist(self, sdist_directory, config_settings=None): + """Build an sdist from this project. + + Returns the name of the newly created file. + + This calls the 'build_sdist' backend hook in a subprocess. + """ + return self._call_hook('build_sdist', { + 'sdist_directory': abspath(sdist_directory), + 'config_settings': config_settings, + }) + + def _call_hook(self, hook_name, kwargs): + # On Python 2, pytoml returns Unicode values (which is correct) but the + # environment passed to check_call needs to contain string values. We + # convert here by encoding using ASCII (the backend can only contain + # letters, digits and _, . and : characters, and will be used as a + # Python identifier, so non-ASCII content is wrong on Python 2 in + # any case). + if sys.version_info[0] == 2: + build_backend = self.build_backend.encode('ASCII') + else: + build_backend = self.build_backend + + with tempdir() as td: + compat.write_json({'kwargs': kwargs}, pjoin(td, 'input.json'), + indent=2) + + # Run the hook in a subprocess + self._subprocess_runner( + [sys.executable, _in_proc_script, hook_name, td], + cwd=self.source_dir, + extra_environ={'PEP517_BUILD_BACKEND': build_backend} + ) + + data = compat.read_json(pjoin(td, 'output.json')) + if data.get('unsupported'): + raise UnsupportedOperation + if data.get('no_backend'): + raise BackendUnavailable + return data['return_val'] diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/wrappers.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/wrappers.pyc new file mode 100644 index 0000000..39a0d18 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/pep517/wrappers.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.py new file mode 100644 index 0000000..9c4fd8e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.py @@ -0,0 +1,3171 @@ +# coding: utf-8 +""" +Package resource API +-------------------- + +A resource is a logical file contained within a package, or a logical +subdirectory thereof. The package resource API expects resource names +to have their path parts separated with ``/``, *not* whatever the local +path separator is. Do not use os.path operations to manipulate resource +names being passed into the API. + +The package resource API is designed to work with normal filesystem packages, +.egg files, and unpacked .egg files. It can also work in a limited way with +.zip files and with custom PEP 302 loaders that support the ``get_data()`` +method. +""" + +from __future__ import absolute_import + +import sys +import os +import io +import time +import re +import types +import zipfile +import zipimport +import warnings +import stat +import functools +import pkgutil +import operator +import platform +import collections +import plistlib +import email.parser +import errno +import tempfile +import textwrap +import itertools +import inspect +from pkgutil import get_importer + +try: + import _imp +except ImportError: + # Python 3.2 compatibility + import imp as _imp + +try: + FileExistsError +except NameError: + FileExistsError = OSError + +from pip._vendor import six +from pip._vendor.six.moves import urllib, map, filter + +# capture these to bypass sandboxing +from os import utime +try: + from os import mkdir, rename, unlink + WRITE_SUPPORT = True +except ImportError: + # no write support, probably under GAE + WRITE_SUPPORT = False + +from os import open as os_open +from os.path import isdir, split + +try: + import importlib.machinery as importlib_machinery + # access attribute to force import under delayed import mechanisms. + importlib_machinery.__name__ +except ImportError: + importlib_machinery = None + +from . import py31compat +from pip._vendor import appdirs +from pip._vendor import packaging +__import__('pip._vendor.packaging.version') +__import__('pip._vendor.packaging.specifiers') +__import__('pip._vendor.packaging.requirements') +__import__('pip._vendor.packaging.markers') + + +__metaclass__ = type + + +if (3, 0) < sys.version_info < (3, 4): + raise RuntimeError("Python 3.4 or later is required") + +if six.PY2: + # Those builtin exceptions are only defined in Python 3 + PermissionError = None + NotADirectoryError = None + +# declare some globals that will be defined later to +# satisfy the linters. +require = None +working_set = None +add_activation_listener = None +resources_stream = None +cleanup_resources = None +resource_dir = None +resource_stream = None +set_extraction_path = None +resource_isdir = None +resource_string = None +iter_entry_points = None +resource_listdir = None +resource_filename = None +resource_exists = None +_distribution_finders = None +_namespace_handlers = None +_namespace_packages = None + + +class PEP440Warning(RuntimeWarning): + """ + Used when there is an issue with a version or specifier not complying with + PEP 440. + """ + + +def parse_version(v): + try: + return packaging.version.Version(v) + except packaging.version.InvalidVersion: + return packaging.version.LegacyVersion(v) + + +_state_vars = {} + + +def _declare_state(vartype, **kw): + globals().update(kw) + _state_vars.update(dict.fromkeys(kw, vartype)) + + +def __getstate__(): + state = {} + g = globals() + for k, v in _state_vars.items(): + state[k] = g['_sget_' + v](g[k]) + return state + + +def __setstate__(state): + g = globals() + for k, v in state.items(): + g['_sset_' + _state_vars[k]](k, g[k], v) + return state + + +def _sget_dict(val): + return val.copy() + + +def _sset_dict(key, ob, state): + ob.clear() + ob.update(state) + + +def _sget_object(val): + return val.__getstate__() + + +def _sset_object(key, ob, state): + ob.__setstate__(state) + + +_sget_none = _sset_none = lambda *args: None + + +def get_supported_platform(): + """Return this platform's maximum compatible version. + + distutils.util.get_platform() normally reports the minimum version + of Mac OS X that would be required to *use* extensions produced by + distutils. But what we want when checking compatibility is to know the + version of Mac OS X that we are *running*. To allow usage of packages that + explicitly require a newer version of Mac OS X, we must also know the + current version of the OS. + + If this condition occurs for any other platform with a version in its + platform strings, this function should be extended accordingly. + """ + plat = get_build_platform() + m = macosVersionString.match(plat) + if m is not None and sys.platform == "darwin": + try: + plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3)) + except ValueError: + # not Mac OS X + pass + return plat + + +__all__ = [ + # Basic resource access and distribution/entry point discovery + 'require', 'run_script', 'get_provider', 'get_distribution', + 'load_entry_point', 'get_entry_map', 'get_entry_info', + 'iter_entry_points', + 'resource_string', 'resource_stream', 'resource_filename', + 'resource_listdir', 'resource_exists', 'resource_isdir', + + # Environmental control + 'declare_namespace', 'working_set', 'add_activation_listener', + 'find_distributions', 'set_extraction_path', 'cleanup_resources', + 'get_default_cache', + + # Primary implementation classes + 'Environment', 'WorkingSet', 'ResourceManager', + 'Distribution', 'Requirement', 'EntryPoint', + + # Exceptions + 'ResolutionError', 'VersionConflict', 'DistributionNotFound', + 'UnknownExtra', 'ExtractionError', + + # Warnings + 'PEP440Warning', + + # Parsing functions and string utilities + 'parse_requirements', 'parse_version', 'safe_name', 'safe_version', + 'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections', + 'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker', + + # filesystem utilities + 'ensure_directory', 'normalize_path', + + # Distribution "precedence" constants + 'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST', + + # "Provider" interfaces, implementations, and registration/lookup APIs + 'IMetadataProvider', 'IResourceProvider', 'FileMetadata', + 'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider', + 'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider', + 'register_finder', 'register_namespace_handler', 'register_loader_type', + 'fixup_namespace_packages', 'get_importer', + + # Warnings + 'PkgResourcesDeprecationWarning', + + # Deprecated/backward compatibility only + 'run_main', 'AvailableDistributions', +] + + +class ResolutionError(Exception): + """Abstract base for dependency resolution errors""" + + def __repr__(self): + return self.__class__.__name__ + repr(self.args) + + +class VersionConflict(ResolutionError): + """ + An already-installed version conflicts with the requested version. + + Should be initialized with the installed Distribution and the requested + Requirement. + """ + + _template = "{self.dist} is installed but {self.req} is required" + + @property + def dist(self): + return self.args[0] + + @property + def req(self): + return self.args[1] + + def report(self): + return self._template.format(**locals()) + + def with_context(self, required_by): + """ + If required_by is non-empty, return a version of self that is a + ContextualVersionConflict. + """ + if not required_by: + return self + args = self.args + (required_by,) + return ContextualVersionConflict(*args) + + +class ContextualVersionConflict(VersionConflict): + """ + A VersionConflict that accepts a third parameter, the set of the + requirements that required the installed Distribution. + """ + + _template = VersionConflict._template + ' by {self.required_by}' + + @property + def required_by(self): + return self.args[2] + + +class DistributionNotFound(ResolutionError): + """A requested distribution was not found""" + + _template = ("The '{self.req}' distribution was not found " + "and is required by {self.requirers_str}") + + @property + def req(self): + return self.args[0] + + @property + def requirers(self): + return self.args[1] + + @property + def requirers_str(self): + if not self.requirers: + return 'the application' + return ', '.join(self.requirers) + + def report(self): + return self._template.format(**locals()) + + def __str__(self): + return self.report() + + +class UnknownExtra(ResolutionError): + """Distribution doesn't have an "extra feature" of the given name""" + + +_provider_factories = {} + +PY_MAJOR = sys.version[:3] +EGG_DIST = 3 +BINARY_DIST = 2 +SOURCE_DIST = 1 +CHECKOUT_DIST = 0 +DEVELOP_DIST = -1 + + +def register_loader_type(loader_type, provider_factory): + """Register `provider_factory` to make providers for `loader_type` + + `loader_type` is the type or class of a PEP 302 ``module.__loader__``, + and `provider_factory` is a function that, passed a *module* object, + returns an ``IResourceProvider`` for that module. + """ + _provider_factories[loader_type] = provider_factory + + +def get_provider(moduleOrReq): + """Return an IResourceProvider for the named module or requirement""" + if isinstance(moduleOrReq, Requirement): + return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0] + try: + module = sys.modules[moduleOrReq] + except KeyError: + __import__(moduleOrReq) + module = sys.modules[moduleOrReq] + loader = getattr(module, '__loader__', None) + return _find_adapter(_provider_factories, loader)(module) + + +def _macosx_vers(_cache=[]): + if not _cache: + version = platform.mac_ver()[0] + # fallback for MacPorts + if version == '': + plist = '/System/Library/CoreServices/SystemVersion.plist' + if os.path.exists(plist): + if hasattr(plistlib, 'readPlist'): + plist_content = plistlib.readPlist(plist) + if 'ProductVersion' in plist_content: + version = plist_content['ProductVersion'] + + _cache.append(version.split('.')) + return _cache[0] + + +def _macosx_arch(machine): + return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine) + + +def get_build_platform(): + """Return this platform's string for platform-specific distributions + + XXX Currently this is the same as ``distutils.util.get_platform()``, but it + needs some hacks for Linux and Mac OS X. + """ + from sysconfig import get_platform + + plat = get_platform() + if sys.platform == "darwin" and not plat.startswith('macosx-'): + try: + version = _macosx_vers() + machine = os.uname()[4].replace(" ", "_") + return "macosx-%d.%d-%s" % ( + int(version[0]), int(version[1]), + _macosx_arch(machine), + ) + except ValueError: + # if someone is running a non-Mac darwin system, this will fall + # through to the default implementation + pass + return plat + + +macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)") +darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)") +# XXX backward compat +get_platform = get_build_platform + + +def compatible_platforms(provided, required): + """Can code for the `provided` platform run on the `required` platform? + + Returns true if either platform is ``None``, or the platforms are equal. + + XXX Needs compatibility checks for Linux and other unixy OSes. + """ + if provided is None or required is None or provided == required: + # easy case + return True + + # Mac OS X special cases + reqMac = macosVersionString.match(required) + if reqMac: + provMac = macosVersionString.match(provided) + + # is this a Mac package? + if not provMac: + # this is backwards compatibility for packages built before + # setuptools 0.6. All packages built after this point will + # use the new macosx designation. + provDarwin = darwinVersionString.match(provided) + if provDarwin: + dversion = int(provDarwin.group(1)) + macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2)) + if dversion == 7 and macosversion >= "10.3" or \ + dversion == 8 and macosversion >= "10.4": + return True + # egg isn't macosx or legacy darwin + return False + + # are they the same major version and machine type? + if provMac.group(1) != reqMac.group(1) or \ + provMac.group(3) != reqMac.group(3): + return False + + # is the required OS major update >= the provided one? + if int(provMac.group(2)) > int(reqMac.group(2)): + return False + + return True + + # XXX Linux and other platforms' special cases should go here + return False + + +def run_script(dist_spec, script_name): + """Locate distribution `dist_spec` and run its `script_name` script""" + ns = sys._getframe(1).f_globals + name = ns['__name__'] + ns.clear() + ns['__name__'] = name + require(dist_spec)[0].run_script(script_name, ns) + + +# backward compatibility +run_main = run_script + + +def get_distribution(dist): + """Return a current distribution object for a Requirement or string""" + if isinstance(dist, six.string_types): + dist = Requirement.parse(dist) + if isinstance(dist, Requirement): + dist = get_provider(dist) + if not isinstance(dist, Distribution): + raise TypeError("Expected string, Requirement, or Distribution", dist) + return dist + + +def load_entry_point(dist, group, name): + """Return `name` entry point of `group` for `dist` or raise ImportError""" + return get_distribution(dist).load_entry_point(group, name) + + +def get_entry_map(dist, group=None): + """Return the entry point map for `group`, or the full entry map""" + return get_distribution(dist).get_entry_map(group) + + +def get_entry_info(dist, group, name): + """Return the EntryPoint object for `group`+`name`, or ``None``""" + return get_distribution(dist).get_entry_info(group, name) + + +class IMetadataProvider: + def has_metadata(name): + """Does the package's distribution contain the named metadata?""" + + def get_metadata(name): + """The named metadata resource as a string""" + + def get_metadata_lines(name): + """Yield named metadata resource as list of non-blank non-comment lines + + Leading and trailing whitespace is stripped from each line, and lines + with ``#`` as the first non-blank character are omitted.""" + + def metadata_isdir(name): + """Is the named metadata a directory? (like ``os.path.isdir()``)""" + + def metadata_listdir(name): + """List of metadata names in the directory (like ``os.listdir()``)""" + + def run_script(script_name, namespace): + """Execute the named script in the supplied namespace dictionary""" + + +class IResourceProvider(IMetadataProvider): + """An object that provides access to package resources""" + + def get_resource_filename(manager, resource_name): + """Return a true filesystem path for `resource_name` + + `manager` must be an ``IResourceManager``""" + + def get_resource_stream(manager, resource_name): + """Return a readable file-like object for `resource_name` + + `manager` must be an ``IResourceManager``""" + + def get_resource_string(manager, resource_name): + """Return a string containing the contents of `resource_name` + + `manager` must be an ``IResourceManager``""" + + def has_resource(resource_name): + """Does the package contain the named resource?""" + + def resource_isdir(resource_name): + """Is the named resource a directory? (like ``os.path.isdir()``)""" + + def resource_listdir(resource_name): + """List of resource names in the directory (like ``os.listdir()``)""" + + +class WorkingSet: + """A collection of active distributions on sys.path (or a similar list)""" + + def __init__(self, entries=None): + """Create working set from list of path entries (default=sys.path)""" + self.entries = [] + self.entry_keys = {} + self.by_key = {} + self.callbacks = [] + + if entries is None: + entries = sys.path + + for entry in entries: + self.add_entry(entry) + + @classmethod + def _build_master(cls): + """ + Prepare the master working set. + """ + ws = cls() + try: + from __main__ import __requires__ + except ImportError: + # The main program does not list any requirements + return ws + + # ensure the requirements are met + try: + ws.require(__requires__) + except VersionConflict: + return cls._build_from_requirements(__requires__) + + return ws + + @classmethod + def _build_from_requirements(cls, req_spec): + """ + Build a working set from a requirement spec. Rewrites sys.path. + """ + # try it without defaults already on sys.path + # by starting with an empty path + ws = cls([]) + reqs = parse_requirements(req_spec) + dists = ws.resolve(reqs, Environment()) + for dist in dists: + ws.add(dist) + + # add any missing entries from sys.path + for entry in sys.path: + if entry not in ws.entries: + ws.add_entry(entry) + + # then copy back to sys.path + sys.path[:] = ws.entries + return ws + + def add_entry(self, entry): + """Add a path item to ``.entries``, finding any distributions on it + + ``find_distributions(entry, True)`` is used to find distributions + corresponding to the path entry, and they are added. `entry` is + always appended to ``.entries``, even if it is already present. + (This is because ``sys.path`` can contain the same value more than + once, and the ``.entries`` of the ``sys.path`` WorkingSet should always + equal ``sys.path``.) + """ + self.entry_keys.setdefault(entry, []) + self.entries.append(entry) + for dist in find_distributions(entry, True): + self.add(dist, entry, False) + + def __contains__(self, dist): + """True if `dist` is the active distribution for its project""" + return self.by_key.get(dist.key) == dist + + def find(self, req): + """Find a distribution matching requirement `req` + + If there is an active distribution for the requested project, this + returns it as long as it meets the version requirement specified by + `req`. But, if there is an active distribution for the project and it + does *not* meet the `req` requirement, ``VersionConflict`` is raised. + If there is no active distribution for the requested project, ``None`` + is returned. + """ + dist = self.by_key.get(req.key) + if dist is not None and dist not in req: + # XXX add more info + raise VersionConflict(dist, req) + return dist + + def iter_entry_points(self, group, name=None): + """Yield entry point objects from `group` matching `name` + + If `name` is None, yields all entry points in `group` from all + distributions in the working set, otherwise only ones matching + both `group` and `name` are yielded (in distribution order). + """ + return ( + entry + for dist in self + for entry in dist.get_entry_map(group).values() + if name is None or name == entry.name + ) + + def run_script(self, requires, script_name): + """Locate distribution for `requires` and run `script_name` script""" + ns = sys._getframe(1).f_globals + name = ns['__name__'] + ns.clear() + ns['__name__'] = name + self.require(requires)[0].run_script(script_name, ns) + + def __iter__(self): + """Yield distributions for non-duplicate projects in the working set + + The yield order is the order in which the items' path entries were + added to the working set. + """ + seen = {} + for item in self.entries: + if item not in self.entry_keys: + # workaround a cache issue + continue + + for key in self.entry_keys[item]: + if key not in seen: + seen[key] = 1 + yield self.by_key[key] + + def add(self, dist, entry=None, insert=True, replace=False): + """Add `dist` to working set, associated with `entry` + + If `entry` is unspecified, it defaults to the ``.location`` of `dist`. + On exit from this routine, `entry` is added to the end of the working + set's ``.entries`` (if it wasn't already present). + + `dist` is only added to the working set if it's for a project that + doesn't already have a distribution in the set, unless `replace=True`. + If it's added, any callbacks registered with the ``subscribe()`` method + will be called. + """ + if insert: + dist.insert_on(self.entries, entry, replace=replace) + + if entry is None: + entry = dist.location + keys = self.entry_keys.setdefault(entry, []) + keys2 = self.entry_keys.setdefault(dist.location, []) + if not replace and dist.key in self.by_key: + # ignore hidden distros + return + + self.by_key[dist.key] = dist + if dist.key not in keys: + keys.append(dist.key) + if dist.key not in keys2: + keys2.append(dist.key) + self._added_new(dist) + + def resolve(self, requirements, env=None, installer=None, + replace_conflicting=False, extras=None): + """List all distributions needed to (recursively) meet `requirements` + + `requirements` must be a sequence of ``Requirement`` objects. `env`, + if supplied, should be an ``Environment`` instance. If + not supplied, it defaults to all distributions available within any + entry or distribution in the working set. `installer`, if supplied, + will be invoked with each requirement that cannot be met by an + already-installed distribution; it should return a ``Distribution`` or + ``None``. + + Unless `replace_conflicting=True`, raises a VersionConflict exception + if + any requirements are found on the path that have the correct name but + the wrong version. Otherwise, if an `installer` is supplied it will be + invoked to obtain the correct version of the requirement and activate + it. + + `extras` is a list of the extras to be used with these requirements. + This is important because extra requirements may look like `my_req; + extra = "my_extra"`, which would otherwise be interpreted as a purely + optional requirement. Instead, we want to be able to assert that these + requirements are truly required. + """ + + # set up the stack + requirements = list(requirements)[::-1] + # set of processed requirements + processed = {} + # key -> dist + best = {} + to_activate = [] + + req_extras = _ReqExtras() + + # Mapping of requirement to set of distributions that required it; + # useful for reporting info about conflicts. + required_by = collections.defaultdict(set) + + while requirements: + # process dependencies breadth-first + req = requirements.pop(0) + if req in processed: + # Ignore cyclic or redundant dependencies + continue + + if not req_extras.markers_pass(req, extras): + continue + + dist = best.get(req.key) + if dist is None: + # Find the best distribution and add it to the map + dist = self.by_key.get(req.key) + if dist is None or (dist not in req and replace_conflicting): + ws = self + if env is None: + if dist is None: + env = Environment(self.entries) + else: + # Use an empty environment and workingset to avoid + # any further conflicts with the conflicting + # distribution + env = Environment([]) + ws = WorkingSet([]) + dist = best[req.key] = env.best_match( + req, ws, installer, + replace_conflicting=replace_conflicting + ) + if dist is None: + requirers = required_by.get(req, None) + raise DistributionNotFound(req, requirers) + to_activate.append(dist) + if dist not in req: + # Oops, the "best" so far conflicts with a dependency + dependent_req = required_by[req] + raise VersionConflict(dist, req).with_context(dependent_req) + + # push the new requirements onto the stack + new_requirements = dist.requires(req.extras)[::-1] + requirements.extend(new_requirements) + + # Register the new requirements needed by req + for new_requirement in new_requirements: + required_by[new_requirement].add(req.project_name) + req_extras[new_requirement] = req.extras + + processed[req] = True + + # return list of distros to activate + return to_activate + + def find_plugins( + self, plugin_env, full_env=None, installer=None, fallback=True): + """Find all activatable distributions in `plugin_env` + + Example usage:: + + distributions, errors = working_set.find_plugins( + Environment(plugin_dirlist) + ) + # add plugins+libs to sys.path + map(working_set.add, distributions) + # display errors + print('Could not load', errors) + + The `plugin_env` should be an ``Environment`` instance that contains + only distributions that are in the project's "plugin directory" or + directories. The `full_env`, if supplied, should be an ``Environment`` + contains all currently-available distributions. If `full_env` is not + supplied, one is created automatically from the ``WorkingSet`` this + method is called on, which will typically mean that every directory on + ``sys.path`` will be scanned for distributions. + + `installer` is a standard installer callback as used by the + ``resolve()`` method. The `fallback` flag indicates whether we should + attempt to resolve older versions of a plugin if the newest version + cannot be resolved. + + This method returns a 2-tuple: (`distributions`, `error_info`), where + `distributions` is a list of the distributions found in `plugin_env` + that were loadable, along with any other distributions that are needed + to resolve their dependencies. `error_info` is a dictionary mapping + unloadable plugin distributions to an exception instance describing the + error that occurred. Usually this will be a ``DistributionNotFound`` or + ``VersionConflict`` instance. + """ + + plugin_projects = list(plugin_env) + # scan project names in alphabetic order + plugin_projects.sort() + + error_info = {} + distributions = {} + + if full_env is None: + env = Environment(self.entries) + env += plugin_env + else: + env = full_env + plugin_env + + shadow_set = self.__class__([]) + # put all our entries in shadow_set + list(map(shadow_set.add, self)) + + for project_name in plugin_projects: + + for dist in plugin_env[project_name]: + + req = [dist.as_requirement()] + + try: + resolvees = shadow_set.resolve(req, env, installer) + + except ResolutionError as v: + # save error info + error_info[dist] = v + if fallback: + # try the next older version of project + continue + else: + # give up on this project, keep going + break + + else: + list(map(shadow_set.add, resolvees)) + distributions.update(dict.fromkeys(resolvees)) + + # success, no need to try any more versions of this project + break + + distributions = list(distributions) + distributions.sort() + + return distributions, error_info + + def require(self, *requirements): + """Ensure that distributions matching `requirements` are activated + + `requirements` must be a string or a (possibly-nested) sequence + thereof, specifying the distributions and versions required. The + return value is a sequence of the distributions that needed to be + activated to fulfill the requirements; all relevant distributions are + included, even if they were already activated in this working set. + """ + needed = self.resolve(parse_requirements(requirements)) + + for dist in needed: + self.add(dist) + + return needed + + def subscribe(self, callback, existing=True): + """Invoke `callback` for all distributions + + If `existing=True` (default), + call on all existing ones, as well. + """ + if callback in self.callbacks: + return + self.callbacks.append(callback) + if not existing: + return + for dist in self: + callback(dist) + + def _added_new(self, dist): + for callback in self.callbacks: + callback(dist) + + def __getstate__(self): + return ( + self.entries[:], self.entry_keys.copy(), self.by_key.copy(), + self.callbacks[:] + ) + + def __setstate__(self, e_k_b_c): + entries, keys, by_key, callbacks = e_k_b_c + self.entries = entries[:] + self.entry_keys = keys.copy() + self.by_key = by_key.copy() + self.callbacks = callbacks[:] + + +class _ReqExtras(dict): + """ + Map each requirement to the extras that demanded it. + """ + + def markers_pass(self, req, extras=None): + """ + Evaluate markers for req against each extra that + demanded it. + + Return False if the req has a marker and fails + evaluation. Otherwise, return True. + """ + extra_evals = ( + req.marker.evaluate({'extra': extra}) + for extra in self.get(req, ()) + (extras or (None,)) + ) + return not req.marker or any(extra_evals) + + +class Environment: + """Searchable snapshot of distributions on a search path""" + + def __init__( + self, search_path=None, platform=get_supported_platform(), + python=PY_MAJOR): + """Snapshot distributions available on a search path + + Any distributions found on `search_path` are added to the environment. + `search_path` should be a sequence of ``sys.path`` items. If not + supplied, ``sys.path`` is used. + + `platform` is an optional string specifying the name of the platform + that platform-specific distributions must be compatible with. If + unspecified, it defaults to the current platform. `python` is an + optional string naming the desired version of Python (e.g. ``'3.6'``); + it defaults to the current version. + + You may explicitly set `platform` (and/or `python`) to ``None`` if you + wish to map *all* distributions, not just those compatible with the + running platform or Python version. + """ + self._distmap = {} + self.platform = platform + self.python = python + self.scan(search_path) + + def can_add(self, dist): + """Is distribution `dist` acceptable for this environment? + + The distribution must match the platform and python version + requirements specified when this environment was created, or False + is returned. + """ + py_compat = ( + self.python is None + or dist.py_version is None + or dist.py_version == self.python + ) + return py_compat and compatible_platforms(dist.platform, self.platform) + + def remove(self, dist): + """Remove `dist` from the environment""" + self._distmap[dist.key].remove(dist) + + def scan(self, search_path=None): + """Scan `search_path` for distributions usable in this environment + + Any distributions found are added to the environment. + `search_path` should be a sequence of ``sys.path`` items. If not + supplied, ``sys.path`` is used. Only distributions conforming to + the platform/python version defined at initialization are added. + """ + if search_path is None: + search_path = sys.path + + for item in search_path: + for dist in find_distributions(item): + self.add(dist) + + def __getitem__(self, project_name): + """Return a newest-to-oldest list of distributions for `project_name` + + Uses case-insensitive `project_name` comparison, assuming all the + project's distributions use their project's name converted to all + lowercase as their key. + + """ + distribution_key = project_name.lower() + return self._distmap.get(distribution_key, []) + + def add(self, dist): + """Add `dist` if we ``can_add()`` it and it has not already been added + """ + if self.can_add(dist) and dist.has_version(): + dists = self._distmap.setdefault(dist.key, []) + if dist not in dists: + dists.append(dist) + dists.sort(key=operator.attrgetter('hashcmp'), reverse=True) + + def best_match( + self, req, working_set, installer=None, replace_conflicting=False): + """Find distribution best matching `req` and usable on `working_set` + + This calls the ``find(req)`` method of the `working_set` to see if a + suitable distribution is already active. (This may raise + ``VersionConflict`` if an unsuitable version of the project is already + active in the specified `working_set`.) If a suitable distribution + isn't active, this method returns the newest distribution in the + environment that meets the ``Requirement`` in `req`. If no suitable + distribution is found, and `installer` is supplied, then the result of + calling the environment's ``obtain(req, installer)`` method will be + returned. + """ + try: + dist = working_set.find(req) + except VersionConflict: + if not replace_conflicting: + raise + dist = None + if dist is not None: + return dist + for dist in self[req.key]: + if dist in req: + return dist + # try to download/install + return self.obtain(req, installer) + + def obtain(self, requirement, installer=None): + """Obtain a distribution matching `requirement` (e.g. via download) + + Obtain a distro that matches requirement (e.g. via download). In the + base ``Environment`` class, this routine just returns + ``installer(requirement)``, unless `installer` is None, in which case + None is returned instead. This method is a hook that allows subclasses + to attempt other ways of obtaining a distribution before falling back + to the `installer` argument.""" + if installer is not None: + return installer(requirement) + + def __iter__(self): + """Yield the unique project names of the available distributions""" + for key in self._distmap.keys(): + if self[key]: + yield key + + def __iadd__(self, other): + """In-place addition of a distribution or environment""" + if isinstance(other, Distribution): + self.add(other) + elif isinstance(other, Environment): + for project in other: + for dist in other[project]: + self.add(dist) + else: + raise TypeError("Can't add %r to environment" % (other,)) + return self + + def __add__(self, other): + """Add an environment or distribution to an environment""" + new = self.__class__([], platform=None, python=None) + for env in self, other: + new += env + return new + + +# XXX backward compatibility +AvailableDistributions = Environment + + +class ExtractionError(RuntimeError): + """An error occurred extracting a resource + + The following attributes are available from instances of this exception: + + manager + The resource manager that raised this exception + + cache_path + The base directory for resource extraction + + original_error + The exception instance that caused extraction to fail + """ + + +class ResourceManager: + """Manage resource extraction and packages""" + extraction_path = None + + def __init__(self): + self.cached_files = {} + + def resource_exists(self, package_or_requirement, resource_name): + """Does the named resource exist?""" + return get_provider(package_or_requirement).has_resource(resource_name) + + def resource_isdir(self, package_or_requirement, resource_name): + """Is the named resource an existing directory?""" + return get_provider(package_or_requirement).resource_isdir( + resource_name + ) + + def resource_filename(self, package_or_requirement, resource_name): + """Return a true filesystem path for specified resource""" + return get_provider(package_or_requirement).get_resource_filename( + self, resource_name + ) + + def resource_stream(self, package_or_requirement, resource_name): + """Return a readable file-like object for specified resource""" + return get_provider(package_or_requirement).get_resource_stream( + self, resource_name + ) + + def resource_string(self, package_or_requirement, resource_name): + """Return specified resource as a string""" + return get_provider(package_or_requirement).get_resource_string( + self, resource_name + ) + + def resource_listdir(self, package_or_requirement, resource_name): + """List the contents of the named resource directory""" + return get_provider(package_or_requirement).resource_listdir( + resource_name + ) + + def extraction_error(self): + """Give an error message for problems extracting file(s)""" + + old_exc = sys.exc_info()[1] + cache_path = self.extraction_path or get_default_cache() + + tmpl = textwrap.dedent(""" + Can't extract file(s) to egg cache + + The following error occurred while trying to extract file(s) + to the Python egg cache: + + {old_exc} + + The Python egg cache directory is currently set to: + + {cache_path} + + Perhaps your account does not have write access to this directory? + You can change the cache directory by setting the PYTHON_EGG_CACHE + environment variable to point to an accessible directory. + """).lstrip() + err = ExtractionError(tmpl.format(**locals())) + err.manager = self + err.cache_path = cache_path + err.original_error = old_exc + raise err + + def get_cache_path(self, archive_name, names=()): + """Return absolute location in cache for `archive_name` and `names` + + The parent directory of the resulting path will be created if it does + not already exist. `archive_name` should be the base filename of the + enclosing egg (which may not be the name of the enclosing zipfile!), + including its ".egg" extension. `names`, if provided, should be a + sequence of path name parts "under" the egg's extraction location. + + This method should only be called by resource providers that need to + obtain an extraction location, and only for names they intend to + extract, as it tracks the generated names for possible cleanup later. + """ + extract_path = self.extraction_path or get_default_cache() + target_path = os.path.join(extract_path, archive_name + '-tmp', *names) + try: + _bypass_ensure_directory(target_path) + except Exception: + self.extraction_error() + + self._warn_unsafe_extraction_path(extract_path) + + self.cached_files[target_path] = 1 + return target_path + + @staticmethod + def _warn_unsafe_extraction_path(path): + """ + If the default extraction path is overridden and set to an insecure + location, such as /tmp, it opens up an opportunity for an attacker to + replace an extracted file with an unauthorized payload. Warn the user + if a known insecure location is used. + + See Distribute #375 for more details. + """ + if os.name == 'nt' and not path.startswith(os.environ['windir']): + # On Windows, permissions are generally restrictive by default + # and temp directories are not writable by other users, so + # bypass the warning. + return + mode = os.stat(path).st_mode + if mode & stat.S_IWOTH or mode & stat.S_IWGRP: + msg = ( + "%s is writable by group/others and vulnerable to attack " + "when " + "used with get_resource_filename. Consider a more secure " + "location (set with .set_extraction_path or the " + "PYTHON_EGG_CACHE environment variable)." % path + ) + warnings.warn(msg, UserWarning) + + def postprocess(self, tempname, filename): + """Perform any platform-specific postprocessing of `tempname` + + This is where Mac header rewrites should be done; other platforms don't + have anything special they should do. + + Resource providers should call this method ONLY after successfully + extracting a compressed resource. They must NOT call it on resources + that are already in the filesystem. + + `tempname` is the current (temporary) name of the file, and `filename` + is the name it will be renamed to by the caller after this routine + returns. + """ + + if os.name == 'posix': + # Make the resource executable + mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777 + os.chmod(tempname, mode) + + def set_extraction_path(self, path): + """Set the base path where resources will be extracted to, if needed. + + If you do not call this routine before any extractions take place, the + path defaults to the return value of ``get_default_cache()``. (Which + is based on the ``PYTHON_EGG_CACHE`` environment variable, with various + platform-specific fallbacks. See that routine's documentation for more + details.) + + Resources are extracted to subdirectories of this path based upon + information given by the ``IResourceProvider``. You may set this to a + temporary directory, but then you must call ``cleanup_resources()`` to + delete the extracted files when done. There is no guarantee that + ``cleanup_resources()`` will be able to remove all extracted files. + + (Note: you may not change the extraction path for a given resource + manager once resources have been extracted, unless you first call + ``cleanup_resources()``.) + """ + if self.cached_files: + raise ValueError( + "Can't change extraction path, files already extracted" + ) + + self.extraction_path = path + + def cleanup_resources(self, force=False): + """ + Delete all extracted resource files and directories, returning a list + of the file and directory names that could not be successfully removed. + This function does not have any concurrency protection, so it should + generally only be called when the extraction path is a temporary + directory exclusive to a single process. This method is not + automatically called; you must call it explicitly or register it as an + ``atexit`` function if you wish to ensure cleanup of a temporary + directory used for extractions. + """ + # XXX + + +def get_default_cache(): + """ + Return the ``PYTHON_EGG_CACHE`` environment variable + or a platform-relevant user cache dir for an app + named "Python-Eggs". + """ + return ( + os.environ.get('PYTHON_EGG_CACHE') + or appdirs.user_cache_dir(appname='Python-Eggs') + ) + + +def safe_name(name): + """Convert an arbitrary string to a standard distribution name + + Any runs of non-alphanumeric/. characters are replaced with a single '-'. + """ + return re.sub('[^A-Za-z0-9.]+', '-', name) + + +def safe_version(version): + """ + Convert an arbitrary string to a standard version string + """ + try: + # normalize the version + return str(packaging.version.Version(version)) + except packaging.version.InvalidVersion: + version = version.replace(' ', '.') + return re.sub('[^A-Za-z0-9.]+', '-', version) + + +def safe_extra(extra): + """Convert an arbitrary string to a standard 'extra' name + + Any runs of non-alphanumeric characters are replaced with a single '_', + and the result is always lowercased. + """ + return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower() + + +def to_filename(name): + """Convert a project or version name to its filename-escaped form + + Any '-' characters are currently replaced with '_'. + """ + return name.replace('-', '_') + + +def invalid_marker(text): + """ + Validate text as a PEP 508 environment marker; return an exception + if invalid or False otherwise. + """ + try: + evaluate_marker(text) + except SyntaxError as e: + e.filename = None + e.lineno = None + return e + return False + + +def evaluate_marker(text, extra=None): + """ + Evaluate a PEP 508 environment marker. + Return a boolean indicating the marker result in this environment. + Raise SyntaxError if marker is invalid. + + This implementation uses the 'pyparsing' module. + """ + try: + marker = packaging.markers.Marker(text) + return marker.evaluate() + except packaging.markers.InvalidMarker as e: + raise SyntaxError(e) + + +class NullProvider: + """Try to implement resources and metadata for arbitrary PEP 302 loaders""" + + egg_name = None + egg_info = None + loader = None + + def __init__(self, module): + self.loader = getattr(module, '__loader__', None) + self.module_path = os.path.dirname(getattr(module, '__file__', '')) + + def get_resource_filename(self, manager, resource_name): + return self._fn(self.module_path, resource_name) + + def get_resource_stream(self, manager, resource_name): + return io.BytesIO(self.get_resource_string(manager, resource_name)) + + def get_resource_string(self, manager, resource_name): + return self._get(self._fn(self.module_path, resource_name)) + + def has_resource(self, resource_name): + return self._has(self._fn(self.module_path, resource_name)) + + def has_metadata(self, name): + return self.egg_info and self._has(self._fn(self.egg_info, name)) + + def get_metadata(self, name): + if not self.egg_info: + return "" + value = self._get(self._fn(self.egg_info, name)) + return value.decode('utf-8') if six.PY3 else value + + def get_metadata_lines(self, name): + return yield_lines(self.get_metadata(name)) + + def resource_isdir(self, resource_name): + return self._isdir(self._fn(self.module_path, resource_name)) + + def metadata_isdir(self, name): + return self.egg_info and self._isdir(self._fn(self.egg_info, name)) + + def resource_listdir(self, resource_name): + return self._listdir(self._fn(self.module_path, resource_name)) + + def metadata_listdir(self, name): + if self.egg_info: + return self._listdir(self._fn(self.egg_info, name)) + return [] + + def run_script(self, script_name, namespace): + script = 'scripts/' + script_name + if not self.has_metadata(script): + raise ResolutionError( + "Script {script!r} not found in metadata at {self.egg_info!r}" + .format(**locals()), + ) + script_text = self.get_metadata(script).replace('\r\n', '\n') + script_text = script_text.replace('\r', '\n') + script_filename = self._fn(self.egg_info, script) + namespace['__file__'] = script_filename + if os.path.exists(script_filename): + source = open(script_filename).read() + code = compile(source, script_filename, 'exec') + exec(code, namespace, namespace) + else: + from linecache import cache + cache[script_filename] = ( + len(script_text), 0, script_text.split('\n'), script_filename + ) + script_code = compile(script_text, script_filename, 'exec') + exec(script_code, namespace, namespace) + + def _has(self, path): + raise NotImplementedError( + "Can't perform this operation for unregistered loader type" + ) + + def _isdir(self, path): + raise NotImplementedError( + "Can't perform this operation for unregistered loader type" + ) + + def _listdir(self, path): + raise NotImplementedError( + "Can't perform this operation for unregistered loader type" + ) + + def _fn(self, base, resource_name): + if resource_name: + return os.path.join(base, *resource_name.split('/')) + return base + + def _get(self, path): + if hasattr(self.loader, 'get_data'): + return self.loader.get_data(path) + raise NotImplementedError( + "Can't perform this operation for loaders without 'get_data()'" + ) + + +register_loader_type(object, NullProvider) + + +class EggProvider(NullProvider): + """Provider based on a virtual filesystem""" + + def __init__(self, module): + NullProvider.__init__(self, module) + self._setup_prefix() + + def _setup_prefix(self): + # we assume here that our metadata may be nested inside a "basket" + # of multiple eggs; that's why we use module_path instead of .archive + path = self.module_path + old = None + while path != old: + if _is_egg_path(path): + self.egg_name = os.path.basename(path) + self.egg_info = os.path.join(path, 'EGG-INFO') + self.egg_root = path + break + old = path + path, base = os.path.split(path) + + +class DefaultProvider(EggProvider): + """Provides access to package resources in the filesystem""" + + def _has(self, path): + return os.path.exists(path) + + def _isdir(self, path): + return os.path.isdir(path) + + def _listdir(self, path): + return os.listdir(path) + + def get_resource_stream(self, manager, resource_name): + return open(self._fn(self.module_path, resource_name), 'rb') + + def _get(self, path): + with open(path, 'rb') as stream: + return stream.read() + + @classmethod + def _register(cls): + loader_names = 'SourceFileLoader', 'SourcelessFileLoader', + for name in loader_names: + loader_cls = getattr(importlib_machinery, name, type(None)) + register_loader_type(loader_cls, cls) + + +DefaultProvider._register() + + +class EmptyProvider(NullProvider): + """Provider that returns nothing for all requests""" + + module_path = None + + _isdir = _has = lambda self, path: False + + def _get(self, path): + return '' + + def _listdir(self, path): + return [] + + def __init__(self): + pass + + +empty_provider = EmptyProvider() + + +class ZipManifests(dict): + """ + zip manifest builder + """ + + @classmethod + def build(cls, path): + """ + Build a dictionary similar to the zipimport directory + caches, except instead of tuples, store ZipInfo objects. + + Use a platform-specific path separator (os.sep) for the path keys + for compatibility with pypy on Windows. + """ + with zipfile.ZipFile(path) as zfile: + items = ( + ( + name.replace('/', os.sep), + zfile.getinfo(name), + ) + for name in zfile.namelist() + ) + return dict(items) + + load = build + + +class MemoizedZipManifests(ZipManifests): + """ + Memoized zipfile manifests. + """ + manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime') + + def load(self, path): + """ + Load a manifest at path or return a suitable manifest already loaded. + """ + path = os.path.normpath(path) + mtime = os.stat(path).st_mtime + + if path not in self or self[path].mtime != mtime: + manifest = self.build(path) + self[path] = self.manifest_mod(manifest, mtime) + + return self[path].manifest + + +class ZipProvider(EggProvider): + """Resource support for zips and eggs""" + + eagers = None + _zip_manifests = MemoizedZipManifests() + + def __init__(self, module): + EggProvider.__init__(self, module) + self.zip_pre = self.loader.archive + os.sep + + def _zipinfo_name(self, fspath): + # Convert a virtual filename (full path to file) into a zipfile subpath + # usable with the zipimport directory cache for our target archive + fspath = fspath.rstrip(os.sep) + if fspath == self.loader.archive: + return '' + if fspath.startswith(self.zip_pre): + return fspath[len(self.zip_pre):] + raise AssertionError( + "%s is not a subpath of %s" % (fspath, self.zip_pre) + ) + + def _parts(self, zip_path): + # Convert a zipfile subpath into an egg-relative path part list. + # pseudo-fs path + fspath = self.zip_pre + zip_path + if fspath.startswith(self.egg_root + os.sep): + return fspath[len(self.egg_root) + 1:].split(os.sep) + raise AssertionError( + "%s is not a subpath of %s" % (fspath, self.egg_root) + ) + + @property + def zipinfo(self): + return self._zip_manifests.load(self.loader.archive) + + def get_resource_filename(self, manager, resource_name): + if not self.egg_name: + raise NotImplementedError( + "resource_filename() only supported for .egg, not .zip" + ) + # no need to lock for extraction, since we use temp names + zip_path = self._resource_to_zip(resource_name) + eagers = self._get_eager_resources() + if '/'.join(self._parts(zip_path)) in eagers: + for name in eagers: + self._extract_resource(manager, self._eager_to_zip(name)) + return self._extract_resource(manager, zip_path) + + @staticmethod + def _get_date_and_size(zip_stat): + size = zip_stat.file_size + # ymdhms+wday, yday, dst + date_time = zip_stat.date_time + (0, 0, -1) + # 1980 offset already done + timestamp = time.mktime(date_time) + return timestamp, size + + def _extract_resource(self, manager, zip_path): + + if zip_path in self._index(): + for name in self._index()[zip_path]: + last = self._extract_resource( + manager, os.path.join(zip_path, name) + ) + # return the extracted directory name + return os.path.dirname(last) + + timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) + + if not WRITE_SUPPORT: + raise IOError('"os.rename" and "os.unlink" are not supported ' + 'on this platform') + try: + + real_path = manager.get_cache_path( + self.egg_name, self._parts(zip_path) + ) + + if self._is_current(real_path, zip_path): + return real_path + + outf, tmpnam = _mkstemp( + ".$extract", + dir=os.path.dirname(real_path), + ) + os.write(outf, self.loader.get_data(zip_path)) + os.close(outf) + utime(tmpnam, (timestamp, timestamp)) + manager.postprocess(tmpnam, real_path) + + try: + rename(tmpnam, real_path) + + except os.error: + if os.path.isfile(real_path): + if self._is_current(real_path, zip_path): + # the file became current since it was checked above, + # so proceed. + return real_path + # Windows, del old file and retry + elif os.name == 'nt': + unlink(real_path) + rename(tmpnam, real_path) + return real_path + raise + + except os.error: + # report a user-friendly error + manager.extraction_error() + + return real_path + + def _is_current(self, file_path, zip_path): + """ + Return True if the file_path is current for this zip_path + """ + timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) + if not os.path.isfile(file_path): + return False + stat = os.stat(file_path) + if stat.st_size != size or stat.st_mtime != timestamp: + return False + # check that the contents match + zip_contents = self.loader.get_data(zip_path) + with open(file_path, 'rb') as f: + file_contents = f.read() + return zip_contents == file_contents + + def _get_eager_resources(self): + if self.eagers is None: + eagers = [] + for name in ('native_libs.txt', 'eager_resources.txt'): + if self.has_metadata(name): + eagers.extend(self.get_metadata_lines(name)) + self.eagers = eagers + return self.eagers + + def _index(self): + try: + return self._dirindex + except AttributeError: + ind = {} + for path in self.zipinfo: + parts = path.split(os.sep) + while parts: + parent = os.sep.join(parts[:-1]) + if parent in ind: + ind[parent].append(parts[-1]) + break + else: + ind[parent] = [parts.pop()] + self._dirindex = ind + return ind + + def _has(self, fspath): + zip_path = self._zipinfo_name(fspath) + return zip_path in self.zipinfo or zip_path in self._index() + + def _isdir(self, fspath): + return self._zipinfo_name(fspath) in self._index() + + def _listdir(self, fspath): + return list(self._index().get(self._zipinfo_name(fspath), ())) + + def _eager_to_zip(self, resource_name): + return self._zipinfo_name(self._fn(self.egg_root, resource_name)) + + def _resource_to_zip(self, resource_name): + return self._zipinfo_name(self._fn(self.module_path, resource_name)) + + +register_loader_type(zipimport.zipimporter, ZipProvider) + + +class FileMetadata(EmptyProvider): + """Metadata handler for standalone PKG-INFO files + + Usage:: + + metadata = FileMetadata("/path/to/PKG-INFO") + + This provider rejects all data and metadata requests except for PKG-INFO, + which is treated as existing, and will be the contents of the file at + the provided location. + """ + + def __init__(self, path): + self.path = path + + def has_metadata(self, name): + return name == 'PKG-INFO' and os.path.isfile(self.path) + + def get_metadata(self, name): + if name != 'PKG-INFO': + raise KeyError("No metadata except PKG-INFO is available") + + with io.open(self.path, encoding='utf-8', errors="replace") as f: + metadata = f.read() + self._warn_on_replacement(metadata) + return metadata + + def _warn_on_replacement(self, metadata): + # Python 2.7 compat for: replacement_char = '�' + replacement_char = b'\xef\xbf\xbd'.decode('utf-8') + if replacement_char in metadata: + tmpl = "{self.path} could not be properly decoded in UTF-8" + msg = tmpl.format(**locals()) + warnings.warn(msg) + + def get_metadata_lines(self, name): + return yield_lines(self.get_metadata(name)) + + +class PathMetadata(DefaultProvider): + """Metadata provider for egg directories + + Usage:: + + # Development eggs: + + egg_info = "/path/to/PackageName.egg-info" + base_dir = os.path.dirname(egg_info) + metadata = PathMetadata(base_dir, egg_info) + dist_name = os.path.splitext(os.path.basename(egg_info))[0] + dist = Distribution(basedir, project_name=dist_name, metadata=metadata) + + # Unpacked egg directories: + + egg_path = "/path/to/PackageName-ver-pyver-etc.egg" + metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO')) + dist = Distribution.from_filename(egg_path, metadata=metadata) + """ + + def __init__(self, path, egg_info): + self.module_path = path + self.egg_info = egg_info + + +class EggMetadata(ZipProvider): + """Metadata provider for .egg files""" + + def __init__(self, importer): + """Create a metadata provider from a zipimporter""" + + self.zip_pre = importer.archive + os.sep + self.loader = importer + if importer.prefix: + self.module_path = os.path.join(importer.archive, importer.prefix) + else: + self.module_path = importer.archive + self._setup_prefix() + + +_declare_state('dict', _distribution_finders={}) + + +def register_finder(importer_type, distribution_finder): + """Register `distribution_finder` to find distributions in sys.path items + + `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item + handler), and `distribution_finder` is a callable that, passed a path + item and the importer instance, yields ``Distribution`` instances found on + that path item. See ``pkg_resources.find_on_path`` for an example.""" + _distribution_finders[importer_type] = distribution_finder + + +def find_distributions(path_item, only=False): + """Yield distributions accessible via `path_item`""" + importer = get_importer(path_item) + finder = _find_adapter(_distribution_finders, importer) + return finder(importer, path_item, only) + + +def find_eggs_in_zip(importer, path_item, only=False): + """ + Find eggs in zip files; possibly multiple nested eggs. + """ + if importer.archive.endswith('.whl'): + # wheels are not supported with this finder + # they don't have PKG-INFO metadata, and won't ever contain eggs + return + metadata = EggMetadata(importer) + if metadata.has_metadata('PKG-INFO'): + yield Distribution.from_filename(path_item, metadata=metadata) + if only: + # don't yield nested distros + return + for subitem in metadata.resource_listdir('/'): + if _is_egg_path(subitem): + subpath = os.path.join(path_item, subitem) + dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath) + for dist in dists: + yield dist + elif subitem.lower().endswith('.dist-info'): + subpath = os.path.join(path_item, subitem) + submeta = EggMetadata(zipimport.zipimporter(subpath)) + submeta.egg_info = subpath + yield Distribution.from_location(path_item, subitem, submeta) + + +register_finder(zipimport.zipimporter, find_eggs_in_zip) + + +def find_nothing(importer, path_item, only=False): + return () + + +register_finder(object, find_nothing) + + +def _by_version_descending(names): + """ + Given a list of filenames, return them in descending order + by version number. + + >>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg' + >>> _by_version_descending(names) + ['Python-2.7.10.egg', 'Python-2.7.2.egg', 'foo', 'bar'] + >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg' + >>> _by_version_descending(names) + ['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg'] + >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg' + >>> _by_version_descending(names) + ['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg'] + """ + def _by_version(name): + """ + Parse each component of the filename + """ + name, ext = os.path.splitext(name) + parts = itertools.chain(name.split('-'), [ext]) + return [packaging.version.parse(part) for part in parts] + + return sorted(names, key=_by_version, reverse=True) + + +def find_on_path(importer, path_item, only=False): + """Yield distributions accessible on a sys.path directory""" + path_item = _normalize_cached(path_item) + + if _is_unpacked_egg(path_item): + yield Distribution.from_filename( + path_item, metadata=PathMetadata( + path_item, os.path.join(path_item, 'EGG-INFO') + ) + ) + return + + entries = safe_listdir(path_item) + + # for performance, before sorting by version, + # screen entries for only those that will yield + # distributions + filtered = ( + entry + for entry in entries + if dist_factory(path_item, entry, only) + ) + + # scan for .egg and .egg-info in directory + path_item_entries = _by_version_descending(filtered) + for entry in path_item_entries: + fullpath = os.path.join(path_item, entry) + factory = dist_factory(path_item, entry, only) + for dist in factory(fullpath): + yield dist + + +def dist_factory(path_item, entry, only): + """ + Return a dist_factory for a path_item and entry + """ + lower = entry.lower() + is_meta = any(map(lower.endswith, ('.egg-info', '.dist-info'))) + return ( + distributions_from_metadata + if is_meta else + find_distributions + if not only and _is_egg_path(entry) else + resolve_egg_link + if not only and lower.endswith('.egg-link') else + NoDists() + ) + + +class NoDists: + """ + >>> bool(NoDists()) + False + + >>> list(NoDists()('anything')) + [] + """ + def __bool__(self): + return False + if six.PY2: + __nonzero__ = __bool__ + + def __call__(self, fullpath): + return iter(()) + + +def safe_listdir(path): + """ + Attempt to list contents of path, but suppress some exceptions. + """ + try: + return os.listdir(path) + except (PermissionError, NotADirectoryError): + pass + except OSError as e: + # Ignore the directory if does not exist, not a directory or + # permission denied + ignorable = ( + e.errno in (errno.ENOTDIR, errno.EACCES, errno.ENOENT) + # Python 2 on Windows needs to be handled this way :( + or getattr(e, "winerror", None) == 267 + ) + if not ignorable: + raise + return () + + +def distributions_from_metadata(path): + root = os.path.dirname(path) + if os.path.isdir(path): + if len(os.listdir(path)) == 0: + # empty metadata dir; skip + return + metadata = PathMetadata(root, path) + else: + metadata = FileMetadata(path) + entry = os.path.basename(path) + yield Distribution.from_location( + root, entry, metadata, precedence=DEVELOP_DIST, + ) + + +def non_empty_lines(path): + """ + Yield non-empty lines from file at path + """ + with open(path) as f: + for line in f: + line = line.strip() + if line: + yield line + + +def resolve_egg_link(path): + """ + Given a path to an .egg-link, resolve distributions + present in the referenced path. + """ + referenced_paths = non_empty_lines(path) + resolved_paths = ( + os.path.join(os.path.dirname(path), ref) + for ref in referenced_paths + ) + dist_groups = map(find_distributions, resolved_paths) + return next(dist_groups, ()) + + +register_finder(pkgutil.ImpImporter, find_on_path) + +if hasattr(importlib_machinery, 'FileFinder'): + register_finder(importlib_machinery.FileFinder, find_on_path) + +_declare_state('dict', _namespace_handlers={}) +_declare_state('dict', _namespace_packages={}) + + +def register_namespace_handler(importer_type, namespace_handler): + """Register `namespace_handler` to declare namespace packages + + `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item + handler), and `namespace_handler` is a callable like this:: + + def namespace_handler(importer, path_entry, moduleName, module): + # return a path_entry to use for child packages + + Namespace handlers are only called if the importer object has already + agreed that it can handle the relevant path item, and they should only + return a subpath if the module __path__ does not already contain an + equivalent subpath. For an example namespace handler, see + ``pkg_resources.file_ns_handler``. + """ + _namespace_handlers[importer_type] = namespace_handler + + +def _handle_ns(packageName, path_item): + """Ensure that named package includes a subpath of path_item (if needed)""" + + importer = get_importer(path_item) + if importer is None: + return None + + # capture warnings due to #1111 + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + loader = importer.find_module(packageName) + + if loader is None: + return None + module = sys.modules.get(packageName) + if module is None: + module = sys.modules[packageName] = types.ModuleType(packageName) + module.__path__ = [] + _set_parent_ns(packageName) + elif not hasattr(module, '__path__'): + raise TypeError("Not a package:", packageName) + handler = _find_adapter(_namespace_handlers, importer) + subpath = handler(importer, path_item, packageName, module) + if subpath is not None: + path = module.__path__ + path.append(subpath) + loader.load_module(packageName) + _rebuild_mod_path(path, packageName, module) + return subpath + + +def _rebuild_mod_path(orig_path, package_name, module): + """ + Rebuild module.__path__ ensuring that all entries are ordered + corresponding to their sys.path order + """ + sys_path = [_normalize_cached(p) for p in sys.path] + + def safe_sys_path_index(entry): + """ + Workaround for #520 and #513. + """ + try: + return sys_path.index(entry) + except ValueError: + return float('inf') + + def position_in_sys_path(path): + """ + Return the ordinal of the path based on its position in sys.path + """ + path_parts = path.split(os.sep) + module_parts = package_name.count('.') + 1 + parts = path_parts[:-module_parts] + return safe_sys_path_index(_normalize_cached(os.sep.join(parts))) + + new_path = sorted(orig_path, key=position_in_sys_path) + new_path = [_normalize_cached(p) for p in new_path] + + if isinstance(module.__path__, list): + module.__path__[:] = new_path + else: + module.__path__ = new_path + + +def declare_namespace(packageName): + """Declare that package 'packageName' is a namespace package""" + + _imp.acquire_lock() + try: + if packageName in _namespace_packages: + return + + path = sys.path + parent, _, _ = packageName.rpartition('.') + + if parent: + declare_namespace(parent) + if parent not in _namespace_packages: + __import__(parent) + try: + path = sys.modules[parent].__path__ + except AttributeError: + raise TypeError("Not a package:", parent) + + # Track what packages are namespaces, so when new path items are added, + # they can be updated + _namespace_packages.setdefault(parent or None, []).append(packageName) + _namespace_packages.setdefault(packageName, []) + + for path_item in path: + # Ensure all the parent's path items are reflected in the child, + # if they apply + _handle_ns(packageName, path_item) + + finally: + _imp.release_lock() + + +def fixup_namespace_packages(path_item, parent=None): + """Ensure that previously-declared namespace packages include path_item""" + _imp.acquire_lock() + try: + for package in _namespace_packages.get(parent, ()): + subpath = _handle_ns(package, path_item) + if subpath: + fixup_namespace_packages(subpath, package) + finally: + _imp.release_lock() + + +def file_ns_handler(importer, path_item, packageName, module): + """Compute an ns-package subpath for a filesystem or zipfile importer""" + + subpath = os.path.join(path_item, packageName.split('.')[-1]) + normalized = _normalize_cached(subpath) + for item in module.__path__: + if _normalize_cached(item) == normalized: + break + else: + # Only return the path if it's not already there + return subpath + + +register_namespace_handler(pkgutil.ImpImporter, file_ns_handler) +register_namespace_handler(zipimport.zipimporter, file_ns_handler) + +if hasattr(importlib_machinery, 'FileFinder'): + register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler) + + +def null_ns_handler(importer, path_item, packageName, module): + return None + + +register_namespace_handler(object, null_ns_handler) + + +def normalize_path(filename): + """Normalize a file/dir name for comparison purposes""" + return os.path.normcase(os.path.realpath(os.path.normpath(_cygwin_patch(filename)))) + + +def _cygwin_patch(filename): # pragma: nocover + """ + Contrary to POSIX 2008, on Cygwin, getcwd (3) contains + symlink components. Using + os.path.abspath() works around this limitation. A fix in os.getcwd() + would probably better, in Cygwin even more so, except + that this seems to be by design... + """ + return os.path.abspath(filename) if sys.platform == 'cygwin' else filename + + +def _normalize_cached(filename, _cache={}): + try: + return _cache[filename] + except KeyError: + _cache[filename] = result = normalize_path(filename) + return result + + +def _is_egg_path(path): + """ + Determine if given path appears to be an egg. + """ + return path.lower().endswith('.egg') + + +def _is_unpacked_egg(path): + """ + Determine if given path appears to be an unpacked egg. + """ + return ( + _is_egg_path(path) and + os.path.isfile(os.path.join(path, 'EGG-INFO', 'PKG-INFO')) + ) + + +def _set_parent_ns(packageName): + parts = packageName.split('.') + name = parts.pop() + if parts: + parent = '.'.join(parts) + setattr(sys.modules[parent], name, sys.modules[packageName]) + + +def yield_lines(strs): + """Yield non-empty/non-comment lines of a string or sequence""" + if isinstance(strs, six.string_types): + for s in strs.splitlines(): + s = s.strip() + # skip blank lines/comments + if s and not s.startswith('#'): + yield s + else: + for ss in strs: + for s in yield_lines(ss): + yield s + + +MODULE = re.compile(r"\w+(\.\w+)*$").match +EGG_NAME = re.compile( + r""" + (?P<name>[^-]+) ( + -(?P<ver>[^-]+) ( + -py(?P<pyver>[^-]+) ( + -(?P<plat>.+) + )? + )? + )? + """, + re.VERBOSE | re.IGNORECASE, +).match + + +class EntryPoint: + """Object representing an advertised importable object""" + + def __init__(self, name, module_name, attrs=(), extras=(), dist=None): + if not MODULE(module_name): + raise ValueError("Invalid module name", module_name) + self.name = name + self.module_name = module_name + self.attrs = tuple(attrs) + self.extras = tuple(extras) + self.dist = dist + + def __str__(self): + s = "%s = %s" % (self.name, self.module_name) + if self.attrs: + s += ':' + '.'.join(self.attrs) + if self.extras: + s += ' [%s]' % ','.join(self.extras) + return s + + def __repr__(self): + return "EntryPoint.parse(%r)" % str(self) + + def load(self, require=True, *args, **kwargs): + """ + Require packages for this EntryPoint, then resolve it. + """ + if not require or args or kwargs: + warnings.warn( + "Parameters to load are deprecated. Call .resolve and " + ".require separately.", + PkgResourcesDeprecationWarning, + stacklevel=2, + ) + if require: + self.require(*args, **kwargs) + return self.resolve() + + def resolve(self): + """ + Resolve the entry point from its module and attrs. + """ + module = __import__(self.module_name, fromlist=['__name__'], level=0) + try: + return functools.reduce(getattr, self.attrs, module) + except AttributeError as exc: + raise ImportError(str(exc)) + + def require(self, env=None, installer=None): + if self.extras and not self.dist: + raise UnknownExtra("Can't require() without a distribution", self) + + # Get the requirements for this entry point with all its extras and + # then resolve them. We have to pass `extras` along when resolving so + # that the working set knows what extras we want. Otherwise, for + # dist-info distributions, the working set will assume that the + # requirements for that extra are purely optional and skip over them. + reqs = self.dist.requires(self.extras) + items = working_set.resolve(reqs, env, installer, extras=self.extras) + list(map(working_set.add, items)) + + pattern = re.compile( + r'\s*' + r'(?P<name>.+?)\s*' + r'=\s*' + r'(?P<module>[\w.]+)\s*' + r'(:\s*(?P<attr>[\w.]+))?\s*' + r'(?P<extras>\[.*\])?\s*$' + ) + + @classmethod + def parse(cls, src, dist=None): + """Parse a single entry point from string `src` + + Entry point syntax follows the form:: + + name = some.module:some.attr [extra1, extra2] + + The entry name and module name are required, but the ``:attrs`` and + ``[extras]`` parts are optional + """ + m = cls.pattern.match(src) + if not m: + msg = "EntryPoint must be in 'name=module:attrs [extras]' format" + raise ValueError(msg, src) + res = m.groupdict() + extras = cls._parse_extras(res['extras']) + attrs = res['attr'].split('.') if res['attr'] else () + return cls(res['name'], res['module'], attrs, extras, dist) + + @classmethod + def _parse_extras(cls, extras_spec): + if not extras_spec: + return () + req = Requirement.parse('x' + extras_spec) + if req.specs: + raise ValueError() + return req.extras + + @classmethod + def parse_group(cls, group, lines, dist=None): + """Parse an entry point group""" + if not MODULE(group): + raise ValueError("Invalid group name", group) + this = {} + for line in yield_lines(lines): + ep = cls.parse(line, dist) + if ep.name in this: + raise ValueError("Duplicate entry point", group, ep.name) + this[ep.name] = ep + return this + + @classmethod + def parse_map(cls, data, dist=None): + """Parse a map of entry point groups""" + if isinstance(data, dict): + data = data.items() + else: + data = split_sections(data) + maps = {} + for group, lines in data: + if group is None: + if not lines: + continue + raise ValueError("Entry points must be listed in groups") + group = group.strip() + if group in maps: + raise ValueError("Duplicate group name", group) + maps[group] = cls.parse_group(group, lines, dist) + return maps + + +def _remove_md5_fragment(location): + if not location: + return '' + parsed = urllib.parse.urlparse(location) + if parsed[-1].startswith('md5='): + return urllib.parse.urlunparse(parsed[:-1] + ('',)) + return location + + +def _version_from_file(lines): + """ + Given an iterable of lines from a Metadata file, return + the value of the Version field, if present, or None otherwise. + """ + def is_version_line(line): + return line.lower().startswith('version:') + version_lines = filter(is_version_line, lines) + line = next(iter(version_lines), '') + _, _, value = line.partition(':') + return safe_version(value.strip()) or None + + +class Distribution: + """Wrap an actual or potential sys.path entry w/metadata""" + PKG_INFO = 'PKG-INFO' + + def __init__( + self, location=None, metadata=None, project_name=None, + version=None, py_version=PY_MAJOR, platform=None, + precedence=EGG_DIST): + self.project_name = safe_name(project_name or 'Unknown') + if version is not None: + self._version = safe_version(version) + self.py_version = py_version + self.platform = platform + self.location = location + self.precedence = precedence + self._provider = metadata or empty_provider + + @classmethod + def from_location(cls, location, basename, metadata=None, **kw): + project_name, version, py_version, platform = [None] * 4 + basename, ext = os.path.splitext(basename) + if ext.lower() in _distributionImpl: + cls = _distributionImpl[ext.lower()] + + match = EGG_NAME(basename) + if match: + project_name, version, py_version, platform = match.group( + 'name', 'ver', 'pyver', 'plat' + ) + return cls( + location, metadata, project_name=project_name, version=version, + py_version=py_version, platform=platform, **kw + )._reload_version() + + def _reload_version(self): + return self + + @property + def hashcmp(self): + return ( + self.parsed_version, + self.precedence, + self.key, + _remove_md5_fragment(self.location), + self.py_version or '', + self.platform or '', + ) + + def __hash__(self): + return hash(self.hashcmp) + + def __lt__(self, other): + return self.hashcmp < other.hashcmp + + def __le__(self, other): + return self.hashcmp <= other.hashcmp + + def __gt__(self, other): + return self.hashcmp > other.hashcmp + + def __ge__(self, other): + return self.hashcmp >= other.hashcmp + + def __eq__(self, other): + if not isinstance(other, self.__class__): + # It's not a Distribution, so they are not equal + return False + return self.hashcmp == other.hashcmp + + def __ne__(self, other): + return not self == other + + # These properties have to be lazy so that we don't have to load any + # metadata until/unless it's actually needed. (i.e., some distributions + # may not know their name or version without loading PKG-INFO) + + @property + def key(self): + try: + return self._key + except AttributeError: + self._key = key = self.project_name.lower() + return key + + @property + def parsed_version(self): + if not hasattr(self, "_parsed_version"): + self._parsed_version = parse_version(self.version) + + return self._parsed_version + + def _warn_legacy_version(self): + LV = packaging.version.LegacyVersion + is_legacy = isinstance(self._parsed_version, LV) + if not is_legacy: + return + + # While an empty version is technically a legacy version and + # is not a valid PEP 440 version, it's also unlikely to + # actually come from someone and instead it is more likely that + # it comes from setuptools attempting to parse a filename and + # including it in the list. So for that we'll gate this warning + # on if the version is anything at all or not. + if not self.version: + return + + tmpl = textwrap.dedent(""" + '{project_name} ({version})' is being parsed as a legacy, + non PEP 440, + version. You may find odd behavior and sort order. + In particular it will be sorted as less than 0.0. It + is recommended to migrate to PEP 440 compatible + versions. + """).strip().replace('\n', ' ') + + warnings.warn(tmpl.format(**vars(self)), PEP440Warning) + + @property + def version(self): + try: + return self._version + except AttributeError: + version = _version_from_file(self._get_metadata(self.PKG_INFO)) + if version is None: + tmpl = "Missing 'Version:' header and/or %s file" + raise ValueError(tmpl % self.PKG_INFO, self) + return version + + @property + def _dep_map(self): + """ + A map of extra to its list of (direct) requirements + for this distribution, including the null extra. + """ + try: + return self.__dep_map + except AttributeError: + self.__dep_map = self._filter_extras(self._build_dep_map()) + return self.__dep_map + + @staticmethod + def _filter_extras(dm): + """ + Given a mapping of extras to dependencies, strip off + environment markers and filter out any dependencies + not matching the markers. + """ + for extra in list(filter(None, dm)): + new_extra = extra + reqs = dm.pop(extra) + new_extra, _, marker = extra.partition(':') + fails_marker = marker and ( + invalid_marker(marker) + or not evaluate_marker(marker) + ) + if fails_marker: + reqs = [] + new_extra = safe_extra(new_extra) or None + + dm.setdefault(new_extra, []).extend(reqs) + return dm + + def _build_dep_map(self): + dm = {} + for name in 'requires.txt', 'depends.txt': + for extra, reqs in split_sections(self._get_metadata(name)): + dm.setdefault(extra, []).extend(parse_requirements(reqs)) + return dm + + def requires(self, extras=()): + """List of Requirements needed for this distro if `extras` are used""" + dm = self._dep_map + deps = [] + deps.extend(dm.get(None, ())) + for ext in extras: + try: + deps.extend(dm[safe_extra(ext)]) + except KeyError: + raise UnknownExtra( + "%s has no such extra feature %r" % (self, ext) + ) + return deps + + def _get_metadata(self, name): + if self.has_metadata(name): + for line in self.get_metadata_lines(name): + yield line + + def activate(self, path=None, replace=False): + """Ensure distribution is importable on `path` (default=sys.path)""" + if path is None: + path = sys.path + self.insert_on(path, replace=replace) + if path is sys.path: + fixup_namespace_packages(self.location) + for pkg in self._get_metadata('namespace_packages.txt'): + if pkg in sys.modules: + declare_namespace(pkg) + + def egg_name(self): + """Return what this distribution's standard .egg filename should be""" + filename = "%s-%s-py%s" % ( + to_filename(self.project_name), to_filename(self.version), + self.py_version or PY_MAJOR + ) + + if self.platform: + filename += '-' + self.platform + return filename + + def __repr__(self): + if self.location: + return "%s (%s)" % (self, self.location) + else: + return str(self) + + def __str__(self): + try: + version = getattr(self, 'version', None) + except ValueError: + version = None + version = version or "[unknown version]" + return "%s %s" % (self.project_name, version) + + def __getattr__(self, attr): + """Delegate all unrecognized public attributes to .metadata provider""" + if attr.startswith('_'): + raise AttributeError(attr) + return getattr(self._provider, attr) + + def __dir__(self): + return list( + set(super(Distribution, self).__dir__()) + | set( + attr for attr in self._provider.__dir__() + if not attr.startswith('_') + ) + ) + + if not hasattr(object, '__dir__'): + # python 2.7 not supported + del __dir__ + + @classmethod + def from_filename(cls, filename, metadata=None, **kw): + return cls.from_location( + _normalize_cached(filename), os.path.basename(filename), metadata, + **kw + ) + + def as_requirement(self): + """Return a ``Requirement`` that matches this distribution exactly""" + if isinstance(self.parsed_version, packaging.version.Version): + spec = "%s==%s" % (self.project_name, self.parsed_version) + else: + spec = "%s===%s" % (self.project_name, self.parsed_version) + + return Requirement.parse(spec) + + def load_entry_point(self, group, name): + """Return the `name` entry point of `group` or raise ImportError""" + ep = self.get_entry_info(group, name) + if ep is None: + raise ImportError("Entry point %r not found" % ((group, name),)) + return ep.load() + + def get_entry_map(self, group=None): + """Return the entry point map for `group`, or the full entry map""" + try: + ep_map = self._ep_map + except AttributeError: + ep_map = self._ep_map = EntryPoint.parse_map( + self._get_metadata('entry_points.txt'), self + ) + if group is not None: + return ep_map.get(group, {}) + return ep_map + + def get_entry_info(self, group, name): + """Return the EntryPoint object for `group`+`name`, or ``None``""" + return self.get_entry_map(group).get(name) + + def insert_on(self, path, loc=None, replace=False): + """Ensure self.location is on path + + If replace=False (default): + - If location is already in path anywhere, do nothing. + - Else: + - If it's an egg and its parent directory is on path, + insert just ahead of the parent. + - Else: add to the end of path. + If replace=True: + - If location is already on path anywhere (not eggs) + or higher priority than its parent (eggs) + do nothing. + - Else: + - If it's an egg and its parent directory is on path, + insert just ahead of the parent, + removing any lower-priority entries. + - Else: add it to the front of path. + """ + + loc = loc or self.location + if not loc: + return + + nloc = _normalize_cached(loc) + bdir = os.path.dirname(nloc) + npath = [(p and _normalize_cached(p) or p) for p in path] + + for p, item in enumerate(npath): + if item == nloc: + if replace: + break + else: + # don't modify path (even removing duplicates) if + # found and not replace + return + elif item == bdir and self.precedence == EGG_DIST: + # if it's an .egg, give it precedence over its directory + # UNLESS it's already been added to sys.path and replace=False + if (not replace) and nloc in npath[p:]: + return + if path is sys.path: + self.check_version_conflict() + path.insert(p, loc) + npath.insert(p, nloc) + break + else: + if path is sys.path: + self.check_version_conflict() + if replace: + path.insert(0, loc) + else: + path.append(loc) + return + + # p is the spot where we found or inserted loc; now remove duplicates + while True: + try: + np = npath.index(nloc, p + 1) + except ValueError: + break + else: + del npath[np], path[np] + # ha! + p = np + + return + + def check_version_conflict(self): + if self.key == 'setuptools': + # ignore the inevitable setuptools self-conflicts :( + return + + nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt')) + loc = normalize_path(self.location) + for modname in self._get_metadata('top_level.txt'): + if (modname not in sys.modules or modname in nsp + or modname in _namespace_packages): + continue + if modname in ('pkg_resources', 'setuptools', 'site'): + continue + fn = getattr(sys.modules[modname], '__file__', None) + if fn and (normalize_path(fn).startswith(loc) or + fn.startswith(self.location)): + continue + issue_warning( + "Module %s was already imported from %s, but %s is being added" + " to sys.path" % (modname, fn, self.location), + ) + + def has_version(self): + try: + self.version + except ValueError: + issue_warning("Unbuilt egg for " + repr(self)) + return False + return True + + def clone(self, **kw): + """Copy this distribution, substituting in any changed keyword args""" + names = 'project_name version py_version platform location precedence' + for attr in names.split(): + kw.setdefault(attr, getattr(self, attr, None)) + kw.setdefault('metadata', self._provider) + return self.__class__(**kw) + + @property + def extras(self): + return [dep for dep in self._dep_map if dep] + + +class EggInfoDistribution(Distribution): + def _reload_version(self): + """ + Packages installed by distutils (e.g. numpy or scipy), + which uses an old safe_version, and so + their version numbers can get mangled when + converted to filenames (e.g., 1.11.0.dev0+2329eae to + 1.11.0.dev0_2329eae). These distributions will not be + parsed properly + downstream by Distribution and safe_version, so + take an extra step and try to get the version number from + the metadata file itself instead of the filename. + """ + md_version = _version_from_file(self._get_metadata(self.PKG_INFO)) + if md_version: + self._version = md_version + return self + + +class DistInfoDistribution(Distribution): + """ + Wrap an actual or potential sys.path entry + w/metadata, .dist-info style. + """ + PKG_INFO = 'METADATA' + EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])") + + @property + def _parsed_pkg_info(self): + """Parse and cache metadata""" + try: + return self._pkg_info + except AttributeError: + metadata = self.get_metadata(self.PKG_INFO) + self._pkg_info = email.parser.Parser().parsestr(metadata) + return self._pkg_info + + @property + def _dep_map(self): + try: + return self.__dep_map + except AttributeError: + self.__dep_map = self._compute_dependencies() + return self.__dep_map + + def _compute_dependencies(self): + """Recompute this distribution's dependencies.""" + dm = self.__dep_map = {None: []} + + reqs = [] + # Including any condition expressions + for req in self._parsed_pkg_info.get_all('Requires-Dist') or []: + reqs.extend(parse_requirements(req)) + + def reqs_for_extra(extra): + for req in reqs: + if not req.marker or req.marker.evaluate({'extra': extra}): + yield req + + common = frozenset(reqs_for_extra(None)) + dm[None].extend(common) + + for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []: + s_extra = safe_extra(extra.strip()) + dm[s_extra] = list(frozenset(reqs_for_extra(extra)) - common) + + return dm + + +_distributionImpl = { + '.egg': Distribution, + '.egg-info': EggInfoDistribution, + '.dist-info': DistInfoDistribution, +} + + +def issue_warning(*args, **kw): + level = 1 + g = globals() + try: + # find the first stack frame that is *not* code in + # the pkg_resources module, to use for the warning + while sys._getframe(level).f_globals is g: + level += 1 + except ValueError: + pass + warnings.warn(stacklevel=level + 1, *args, **kw) + + +class RequirementParseError(ValueError): + def __str__(self): + return ' '.join(self.args) + + +def parse_requirements(strs): + """Yield ``Requirement`` objects for each specification in `strs` + + `strs` must be a string, or a (possibly-nested) iterable thereof. + """ + # create a steppable iterator, so we can handle \-continuations + lines = iter(yield_lines(strs)) + + for line in lines: + # Drop comments -- a hash without a space may be in a URL. + if ' #' in line: + line = line[:line.find(' #')] + # If there is a line continuation, drop it, and append the next line. + if line.endswith('\\'): + line = line[:-2].strip() + try: + line += next(lines) + except StopIteration: + return + yield Requirement(line) + + +class Requirement(packaging.requirements.Requirement): + def __init__(self, requirement_string): + """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!""" + try: + super(Requirement, self).__init__(requirement_string) + except packaging.requirements.InvalidRequirement as e: + raise RequirementParseError(str(e)) + self.unsafe_name = self.name + project_name = safe_name(self.name) + self.project_name, self.key = project_name, project_name.lower() + self.specs = [ + (spec.operator, spec.version) for spec in self.specifier] + self.extras = tuple(map(safe_extra, self.extras)) + self.hashCmp = ( + self.key, + self.specifier, + frozenset(self.extras), + str(self.marker) if self.marker else None, + ) + self.__hash = hash(self.hashCmp) + + def __eq__(self, other): + return ( + isinstance(other, Requirement) and + self.hashCmp == other.hashCmp + ) + + def __ne__(self, other): + return not self == other + + def __contains__(self, item): + if isinstance(item, Distribution): + if item.key != self.key: + return False + + item = item.version + + # Allow prereleases always in order to match the previous behavior of + # this method. In the future this should be smarter and follow PEP 440 + # more accurately. + return self.specifier.contains(item, prereleases=True) + + def __hash__(self): + return self.__hash + + def __repr__(self): + return "Requirement.parse(%r)" % str(self) + + @staticmethod + def parse(s): + req, = parse_requirements(s) + return req + + +def _always_object(classes): + """ + Ensure object appears in the mro even + for old-style classes. + """ + if object not in classes: + return classes + (object,) + return classes + + +def _find_adapter(registry, ob): + """Return an adapter factory for `ob` from `registry`""" + types = _always_object(inspect.getmro(getattr(ob, '__class__', type(ob)))) + for t in types: + if t in registry: + return registry[t] + + +def ensure_directory(path): + """Ensure that the parent directory of `path` exists""" + dirname = os.path.dirname(path) + py31compat.makedirs(dirname, exist_ok=True) + + +def _bypass_ensure_directory(path): + """Sandbox-bypassing version of ensure_directory()""" + if not WRITE_SUPPORT: + raise IOError('"os.mkdir" not supported on this platform.') + dirname, filename = split(path) + if dirname and filename and not isdir(dirname): + _bypass_ensure_directory(dirname) + try: + mkdir(dirname, 0o755) + except FileExistsError: + pass + + +def split_sections(s): + """Split a string or iterable thereof into (section, content) pairs + + Each ``section`` is a stripped version of the section header ("[section]") + and each ``content`` is a list of stripped lines excluding blank lines and + comment-only lines. If there are any such lines before the first section + header, they're returned in a first ``section`` of ``None``. + """ + section = None + content = [] + for line in yield_lines(s): + if line.startswith("["): + if line.endswith("]"): + if section or content: + yield section, content + section = line[1:-1].strip() + content = [] + else: + raise ValueError("Invalid section heading", line) + else: + content.append(line) + + # wrap up last segment + yield section, content + + +def _mkstemp(*args, **kw): + old_open = os.open + try: + # temporarily bypass sandboxing + os.open = os_open + return tempfile.mkstemp(*args, **kw) + finally: + # and then put it back + os.open = old_open + + +# Silence the PEP440Warning by default, so that end users don't get hit by it +# randomly just because they use pkg_resources. We want to append the rule +# because we want earlier uses of filterwarnings to take precedence over this +# one. +warnings.filterwarnings("ignore", category=PEP440Warning, append=True) + + +# from jaraco.functools 1.3 +def _call_aside(f, *args, **kwargs): + f(*args, **kwargs) + return f + + +@_call_aside +def _initialize(g=globals()): + "Set up global resource manager (deliberately not state-saved)" + manager = ResourceManager() + g['_manager'] = manager + g.update( + (name, getattr(manager, name)) + for name in dir(manager) + if not name.startswith('_') + ) + + +@_call_aside +def _initialize_master_working_set(): + """ + Prepare the master working set and make the ``require()`` + API available. + + This function has explicit effects on the global state + of pkg_resources. It is intended to be invoked once at + the initialization of this module. + + Invocation by other packages is unsupported and done + at their own risk. + """ + working_set = WorkingSet._build_master() + _declare_state('object', working_set=working_set) + + require = working_set.require + iter_entry_points = working_set.iter_entry_points + add_activation_listener = working_set.subscribe + run_script = working_set.run_script + # backward compatibility + run_main = run_script + # Activate all distributions already on sys.path with replace=False and + # ensure that all distributions added to the working set in the future + # (e.g. by calling ``require()``) will get activated as well, + # with higher priority (replace=True). + tuple( + dist.activate(replace=False) + for dist in working_set + ) + add_activation_listener( + lambda dist: dist.activate(replace=True), + existing=False, + ) + working_set.entries = [] + # match order + list(map(working_set.add_entry, sys.path)) + globals().update(locals()) + +class PkgResourcesDeprecationWarning(Warning): + """ + Base class for warning about deprecations in ``pkg_resources`` + + This class is not derived from ``DeprecationWarning``, and as such is + visible by default. + """ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyc new file mode 100644 index 0000000..202823c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/pkg_resources/py31compat.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/pkg_resources/py31compat.py new file mode 100644 index 0000000..a2d3007 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/pkg_resources/py31compat.py @@ -0,0 +1,23 @@ +import os +import errno +import sys + +from pip._vendor import six + + +def _makedirs_31(path, exist_ok=False): + try: + os.makedirs(path) + except OSError as exc: + if not exist_ok or exc.errno != errno.EEXIST: + raise + + +# rely on compatibility behavior until mode considerations +# and exists_ok considerations are disentangled. +# See https://github.com/pypa/setuptools/pull/1083#issuecomment-315168663 +needs_makedirs = ( + six.PY2 or + (3, 4) <= sys.version_info < (3, 4, 1) +) +makedirs = _makedirs_31 if needs_makedirs else os.makedirs diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/pkg_resources/py31compat.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/pkg_resources/py31compat.pyc new file mode 100644 index 0000000..e80464b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/pkg_resources/py31compat.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/progress/__init__.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/progress/__init__.py new file mode 100644 index 0000000..a41f65d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/progress/__init__.py @@ -0,0 +1,127 @@ +# Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com> +# +# Permission to use, copy, modify, and distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +from __future__ import division + +from collections import deque +from datetime import timedelta +from math import ceil +from sys import stderr +from time import time + + +__version__ = '1.4' + + +class Infinite(object): + file = stderr + sma_window = 10 # Simple Moving Average window + + def __init__(self, *args, **kwargs): + self.index = 0 + self.start_ts = time() + self.avg = 0 + self._ts = self.start_ts + self._xput = deque(maxlen=self.sma_window) + for key, val in kwargs.items(): + setattr(self, key, val) + + def __getitem__(self, key): + if key.startswith('_'): + return None + return getattr(self, key, None) + + @property + def elapsed(self): + return int(time() - self.start_ts) + + @property + def elapsed_td(self): + return timedelta(seconds=self.elapsed) + + def update_avg(self, n, dt): + if n > 0: + self._xput.append(dt / n) + self.avg = sum(self._xput) / len(self._xput) + + def update(self): + pass + + def start(self): + pass + + def finish(self): + pass + + def next(self, n=1): + now = time() + dt = now - self._ts + self.update_avg(n, dt) + self._ts = now + self.index = self.index + n + self.update() + + def iter(self, it): + try: + for x in it: + yield x + self.next() + finally: + self.finish() + + +class Progress(Infinite): + def __init__(self, *args, **kwargs): + super(Progress, self).__init__(*args, **kwargs) + self.max = kwargs.get('max', 100) + + @property + def eta(self): + return int(ceil(self.avg * self.remaining)) + + @property + def eta_td(self): + return timedelta(seconds=self.eta) + + @property + def percent(self): + return self.progress * 100 + + @property + def progress(self): + return min(1, self.index / self.max) + + @property + def remaining(self): + return max(self.max - self.index, 0) + + def start(self): + self.update() + + def goto(self, index): + incr = index - self.index + self.next(incr) + + def iter(self, it): + try: + self.max = len(it) + except TypeError: + pass + + try: + for x in it: + yield x + self.next() + finally: + self.finish() diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/progress/__init__.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/progress/__init__.pyc new file mode 100644 index 0000000..3367216 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/progress/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/progress/bar.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/progress/bar.py new file mode 100644 index 0000000..025e61c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/progress/bar.py @@ -0,0 +1,94 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com> +# +# Permission to use, copy, modify, and distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +from __future__ import unicode_literals + +import sys + +from . import Progress +from .helpers import WritelnMixin + + +class Bar(WritelnMixin, Progress): + width = 32 + message = '' + suffix = '%(index)d/%(max)d' + bar_prefix = ' |' + bar_suffix = '| ' + empty_fill = ' ' + fill = '#' + hide_cursor = True + + def update(self): + filled_length = int(self.width * self.progress) + empty_length = self.width - filled_length + + message = self.message % self + bar = self.fill * filled_length + empty = self.empty_fill * empty_length + suffix = self.suffix % self + line = ''.join([message, self.bar_prefix, bar, empty, self.bar_suffix, + suffix]) + self.writeln(line) + + +class ChargingBar(Bar): + suffix = '%(percent)d%%' + bar_prefix = ' ' + bar_suffix = ' ' + empty_fill = '∙' + fill = '█' + + +class FillingSquaresBar(ChargingBar): + empty_fill = '▢' + fill = '▣' + + +class FillingCirclesBar(ChargingBar): + empty_fill = '◯' + fill = '◉' + + +class IncrementalBar(Bar): + if sys.platform.startswith('win'): + phases = (u' ', u'▌', u'█') + else: + phases = (' ', '▏', '▎', '▍', '▌', '▋', '▊', '▉', '█') + + def update(self): + nphases = len(self.phases) + filled_len = self.width * self.progress + nfull = int(filled_len) # Number of full chars + phase = int((filled_len - nfull) * nphases) # Phase of last char + nempty = self.width - nfull # Number of empty chars + + message = self.message % self + bar = self.phases[-1] * nfull + current = self.phases[phase] if phase > 0 else '' + empty = self.empty_fill * max(0, nempty - len(current)) + suffix = self.suffix % self + line = ''.join([message, self.bar_prefix, bar, current, empty, + self.bar_suffix, suffix]) + self.writeln(line) + + +class PixelBar(IncrementalBar): + phases = ('⡀', '⡄', '⡆', '⡇', '⣇', '⣧', '⣷', '⣿') + + +class ShadyBar(IncrementalBar): + phases = (' ', '░', '▒', '▓', '█') diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/progress/bar.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/progress/bar.pyc new file mode 100644 index 0000000..85ae217 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/progress/bar.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/progress/counter.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/progress/counter.py new file mode 100644 index 0000000..6b45a1e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/progress/counter.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com> +# +# Permission to use, copy, modify, and distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +from __future__ import unicode_literals +from . import Infinite, Progress +from .helpers import WriteMixin + + +class Counter(WriteMixin, Infinite): + message = '' + hide_cursor = True + + def update(self): + self.write(str(self.index)) + + +class Countdown(WriteMixin, Progress): + hide_cursor = True + + def update(self): + self.write(str(self.remaining)) + + +class Stack(WriteMixin, Progress): + phases = (' ', '▁', '▂', '▃', '▄', '▅', '▆', '▇', '█') + hide_cursor = True + + def update(self): + nphases = len(self.phases) + i = min(nphases - 1, int(self.progress * nphases)) + self.write(self.phases[i]) + + +class Pie(Stack): + phases = ('○', '◔', '◑', '◕', '●') diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/progress/counter.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/progress/counter.pyc new file mode 100644 index 0000000..5584033 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/progress/counter.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/progress/helpers.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/progress/helpers.py new file mode 100644 index 0000000..0cde44e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/progress/helpers.py @@ -0,0 +1,91 @@ +# Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com> +# +# Permission to use, copy, modify, and distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +from __future__ import print_function + + +HIDE_CURSOR = '\x1b[?25l' +SHOW_CURSOR = '\x1b[?25h' + + +class WriteMixin(object): + hide_cursor = False + + def __init__(self, message=None, **kwargs): + super(WriteMixin, self).__init__(**kwargs) + self._width = 0 + if message: + self.message = message + + if self.file and self.file.isatty(): + if self.hide_cursor: + print(HIDE_CURSOR, end='', file=self.file) + print(self.message, end='', file=self.file) + self.file.flush() + + def write(self, s): + if self.file and self.file.isatty(): + b = '\b' * self._width + c = s.ljust(self._width) + print(b + c, end='', file=self.file) + self._width = max(self._width, len(s)) + self.file.flush() + + def finish(self): + if self.file and self.file.isatty() and self.hide_cursor: + print(SHOW_CURSOR, end='', file=self.file) + + +class WritelnMixin(object): + hide_cursor = False + + def __init__(self, message=None, **kwargs): + super(WritelnMixin, self).__init__(**kwargs) + if message: + self.message = message + + if self.file and self.file.isatty() and self.hide_cursor: + print(HIDE_CURSOR, end='', file=self.file) + + def clearln(self): + if self.file and self.file.isatty(): + print('\r\x1b[K', end='', file=self.file) + + def writeln(self, line): + if self.file and self.file.isatty(): + self.clearln() + print(line, end='', file=self.file) + self.file.flush() + + def finish(self): + if self.file and self.file.isatty(): + print(file=self.file) + if self.hide_cursor: + print(SHOW_CURSOR, end='', file=self.file) + + +from signal import signal, SIGINT +from sys import exit + + +class SigIntMixin(object): + """Registers a signal handler that calls finish on SIGINT""" + + def __init__(self, *args, **kwargs): + super(SigIntMixin, self).__init__(*args, **kwargs) + signal(SIGINT, self._sigint_handler) + + def _sigint_handler(self, signum, frame): + self.finish() + exit(0) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/progress/helpers.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/progress/helpers.pyc new file mode 100644 index 0000000..8f9025f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/progress/helpers.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/progress/spinner.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/progress/spinner.py new file mode 100644 index 0000000..464c7b2 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/progress/spinner.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com> +# +# Permission to use, copy, modify, and distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +from __future__ import unicode_literals +from . import Infinite +from .helpers import WriteMixin + + +class Spinner(WriteMixin, Infinite): + message = '' + phases = ('-', '\\', '|', '/') + hide_cursor = True + + def update(self): + i = self.index % len(self.phases) + self.write(self.phases[i]) + + +class PieSpinner(Spinner): + phases = ['◷', '◶', '◵', '◴'] + + +class MoonSpinner(Spinner): + phases = ['◑', '◒', '◐', '◓'] + + +class LineSpinner(Spinner): + phases = ['⎺', '⎻', '⎼', '⎽', '⎼', '⎻'] + +class PixelSpinner(Spinner): + phases = ['⣾','⣷', '⣯', '⣟', '⡿', '⢿', '⣻', '⣽'] diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/progress/spinner.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/progress/spinner.pyc new file mode 100644 index 0000000..b410a4f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/progress/spinner.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/pyparsing.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/pyparsing.py new file mode 100644 index 0000000..bea4d9c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/pyparsing.py @@ -0,0 +1,6452 @@ +#-*- coding: utf-8 -*- +# module pyparsing.py +# +# Copyright (c) 2003-2019 Paul T. McGuire +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# + +__doc__ = \ +""" +pyparsing module - Classes and methods to define and execute parsing grammars +============================================================================= + +The pyparsing module is an alternative approach to creating and +executing simple grammars, vs. the traditional lex/yacc approach, or the +use of regular expressions. With pyparsing, you don't need to learn +a new syntax for defining grammars or matching expressions - the parsing +module provides a library of classes that you use to construct the +grammar directly in Python. + +Here is a program to parse "Hello, World!" (or any greeting of the form +``"<salutation>, <addressee>!"``), built up using :class:`Word`, +:class:`Literal`, and :class:`And` elements +(the :class:`'+'<ParserElement.__add__>` operators create :class:`And` expressions, +and the strings are auto-converted to :class:`Literal` expressions):: + + from pip._vendor.pyparsing import Word, alphas + + # define grammar of a greeting + greet = Word(alphas) + "," + Word(alphas) + "!" + + hello = "Hello, World!" + print (hello, "->", greet.parseString(hello)) + +The program outputs the following:: + + Hello, World! -> ['Hello', ',', 'World', '!'] + +The Python representation of the grammar is quite readable, owing to the +self-explanatory class names, and the use of '+', '|' and '^' operators. + +The :class:`ParseResults` object returned from +:class:`ParserElement.parseString` can be +accessed as a nested list, a dictionary, or an object with named +attributes. + +The pyparsing module handles some of the problems that are typically +vexing when writing text parsers: + + - extra or missing whitespace (the above program will also handle + "Hello,World!", "Hello , World !", etc.) + - quoted strings + - embedded comments + + +Getting Started - +----------------- +Visit the classes :class:`ParserElement` and :class:`ParseResults` to +see the base classes that most other pyparsing +classes inherit from. Use the docstrings for examples of how to: + + - construct literal match expressions from :class:`Literal` and + :class:`CaselessLiteral` classes + - construct character word-group expressions using the :class:`Word` + class + - see how to create repetitive expressions using :class:`ZeroOrMore` + and :class:`OneOrMore` classes + - use :class:`'+'<And>`, :class:`'|'<MatchFirst>`, :class:`'^'<Or>`, + and :class:`'&'<Each>` operators to combine simple expressions into + more complex ones + - associate names with your parsed results using + :class:`ParserElement.setResultsName` + - find some helpful expression short-cuts like :class:`delimitedList` + and :class:`oneOf` + - find more useful common expressions in the :class:`pyparsing_common` + namespace class +""" + +__version__ = "2.3.1" +__versionTime__ = "09 Jan 2019 23:26 UTC" +__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>" + +import string +from weakref import ref as wkref +import copy +import sys +import warnings +import re +import sre_constants +import collections +import pprint +import traceback +import types +from datetime import datetime + +try: + # Python 3 + from itertools import filterfalse +except ImportError: + from itertools import ifilterfalse as filterfalse + +try: + from _thread import RLock +except ImportError: + from threading import RLock + +try: + # Python 3 + from collections.abc import Iterable + from collections.abc import MutableMapping +except ImportError: + # Python 2.7 + from collections import Iterable + from collections import MutableMapping + +try: + from collections import OrderedDict as _OrderedDict +except ImportError: + try: + from ordereddict import OrderedDict as _OrderedDict + except ImportError: + _OrderedDict = None + +try: + from types import SimpleNamespace +except ImportError: + class SimpleNamespace: pass + + +#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) ) + +__all__ = [ +'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty', +'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal', +'PrecededBy', 'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or', +'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException', +'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException', +'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', +'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore', 'Char', +'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col', +'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString', +'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums', +'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno', +'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral', +'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables', +'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity', +'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd', +'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute', +'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr', 'withClass', +'CloseMatch', 'tokenMap', 'pyparsing_common', 'pyparsing_unicode', 'unicode_set', +] + +system_version = tuple(sys.version_info)[:3] +PY_3 = system_version[0] == 3 +if PY_3: + _MAX_INT = sys.maxsize + basestring = str + unichr = chr + unicode = str + _ustr = str + + # build list of single arg builtins, that can be used as parse actions + singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max] + +else: + _MAX_INT = sys.maxint + range = xrange + + def _ustr(obj): + """Drop-in replacement for str(obj) that tries to be Unicode + friendly. It first tries str(obj). If that fails with + a UnicodeEncodeError, then it tries unicode(obj). It then + < returns the unicode object | encodes it with the default + encoding | ... >. + """ + if isinstance(obj,unicode): + return obj + + try: + # If this works, then _ustr(obj) has the same behaviour as str(obj), so + # it won't break any existing code. + return str(obj) + + except UnicodeEncodeError: + # Else encode it + ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace') + xmlcharref = Regex(r'&#\d+;') + xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:]) + return xmlcharref.transformString(ret) + + # build list of single arg builtins, tolerant of Python version, that can be used as parse actions + singleArgBuiltins = [] + import __builtin__ + for fname in "sum len sorted reversed list tuple set any all min max".split(): + try: + singleArgBuiltins.append(getattr(__builtin__,fname)) + except AttributeError: + continue + +_generatorType = type((y for y in range(1))) + +def _xml_escape(data): + """Escape &, <, >, ", ', etc. in a string of data.""" + + # ampersand must be replaced first + from_symbols = '&><"\'' + to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split()) + for from_,to_ in zip(from_symbols, to_symbols): + data = data.replace(from_, to_) + return data + +alphas = string.ascii_uppercase + string.ascii_lowercase +nums = "0123456789" +hexnums = nums + "ABCDEFabcdef" +alphanums = alphas + nums +_bslash = chr(92) +printables = "".join(c for c in string.printable if c not in string.whitespace) + +class ParseBaseException(Exception): + """base exception class for all parsing runtime exceptions""" + # Performance tuning: we construct a *lot* of these, so keep this + # constructor as small and fast as possible + def __init__( self, pstr, loc=0, msg=None, elem=None ): + self.loc = loc + if msg is None: + self.msg = pstr + self.pstr = "" + else: + self.msg = msg + self.pstr = pstr + self.parserElement = elem + self.args = (pstr, loc, msg) + + @classmethod + def _from_exception(cls, pe): + """ + internal factory method to simplify creating one type of ParseException + from another - avoids having __init__ signature conflicts among subclasses + """ + return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement) + + def __getattr__( self, aname ): + """supported attributes by name are: + - lineno - returns the line number of the exception text + - col - returns the column number of the exception text + - line - returns the line containing the exception text + """ + if( aname == "lineno" ): + return lineno( self.loc, self.pstr ) + elif( aname in ("col", "column") ): + return col( self.loc, self.pstr ) + elif( aname == "line" ): + return line( self.loc, self.pstr ) + else: + raise AttributeError(aname) + + def __str__( self ): + return "%s (at char %d), (line:%d, col:%d)" % \ + ( self.msg, self.loc, self.lineno, self.column ) + def __repr__( self ): + return _ustr(self) + def markInputline( self, markerString = ">!<" ): + """Extracts the exception line from the input string, and marks + the location of the exception with a special symbol. + """ + line_str = self.line + line_column = self.column - 1 + if markerString: + line_str = "".join((line_str[:line_column], + markerString, line_str[line_column:])) + return line_str.strip() + def __dir__(self): + return "lineno col line".split() + dir(type(self)) + +class ParseException(ParseBaseException): + """ + Exception thrown when parse expressions don't match class; + supported attributes by name are: + - lineno - returns the line number of the exception text + - col - returns the column number of the exception text + - line - returns the line containing the exception text + + Example:: + + try: + Word(nums).setName("integer").parseString("ABC") + except ParseException as pe: + print(pe) + print("column: {}".format(pe.col)) + + prints:: + + Expected integer (at char 0), (line:1, col:1) + column: 1 + + """ + + @staticmethod + def explain(exc, depth=16): + """ + Method to take an exception and translate the Python internal traceback into a list + of the pyparsing expressions that caused the exception to be raised. + + Parameters: + + - exc - exception raised during parsing (need not be a ParseException, in support + of Python exceptions that might be raised in a parse action) + - depth (default=16) - number of levels back in the stack trace to list expression + and function names; if None, the full stack trace names will be listed; if 0, only + the failing input line, marker, and exception string will be shown + + Returns a multi-line string listing the ParserElements and/or function names in the + exception's stack trace. + + Note: the diagnostic output will include string representations of the expressions + that failed to parse. These representations will be more helpful if you use `setName` to + give identifiable names to your expressions. Otherwise they will use the default string + forms, which may be cryptic to read. + + explain() is only supported under Python 3. + """ + import inspect + + if depth is None: + depth = sys.getrecursionlimit() + ret = [] + if isinstance(exc, ParseBaseException): + ret.append(exc.line) + ret.append(' ' * (exc.col - 1) + '^') + ret.append("{0}: {1}".format(type(exc).__name__, exc)) + + if depth > 0: + callers = inspect.getinnerframes(exc.__traceback__, context=depth) + seen = set() + for i, ff in enumerate(callers[-depth:]): + frm = ff.frame + + f_self = frm.f_locals.get('self', None) + if isinstance(f_self, ParserElement): + if frm.f_code.co_name not in ('parseImpl', '_parseNoCache'): + continue + if f_self in seen: + continue + seen.add(f_self) + + self_type = type(f_self) + ret.append("{0}.{1} - {2}".format(self_type.__module__, + self_type.__name__, + f_self)) + elif f_self is not None: + self_type = type(f_self) + ret.append("{0}.{1}".format(self_type.__module__, + self_type.__name__)) + else: + code = frm.f_code + if code.co_name in ('wrapper', '<module>'): + continue + + ret.append("{0}".format(code.co_name)) + + depth -= 1 + if not depth: + break + + return '\n'.join(ret) + + +class ParseFatalException(ParseBaseException): + """user-throwable exception thrown when inconsistent parse content + is found; stops all parsing immediately""" + pass + +class ParseSyntaxException(ParseFatalException): + """just like :class:`ParseFatalException`, but thrown internally + when an :class:`ErrorStop<And._ErrorStop>` ('-' operator) indicates + that parsing is to stop immediately because an unbacktrackable + syntax error has been found. + """ + pass + +#~ class ReparseException(ParseBaseException): + #~ """Experimental class - parse actions can raise this exception to cause + #~ pyparsing to reparse the input string: + #~ - with a modified input string, and/or + #~ - with a modified start location + #~ Set the values of the ReparseException in the constructor, and raise the + #~ exception in a parse action to cause pyparsing to use the new string/location. + #~ Setting the values as None causes no change to be made. + #~ """ + #~ def __init_( self, newstring, restartLoc ): + #~ self.newParseText = newstring + #~ self.reparseLoc = restartLoc + +class RecursiveGrammarException(Exception): + """exception thrown by :class:`ParserElement.validate` if the + grammar could be improperly recursive + """ + def __init__( self, parseElementList ): + self.parseElementTrace = parseElementList + + def __str__( self ): + return "RecursiveGrammarException: %s" % self.parseElementTrace + +class _ParseResultsWithOffset(object): + def __init__(self,p1,p2): + self.tup = (p1,p2) + def __getitem__(self,i): + return self.tup[i] + def __repr__(self): + return repr(self.tup[0]) + def setOffset(self,i): + self.tup = (self.tup[0],i) + +class ParseResults(object): + """Structured parse results, to provide multiple means of access to + the parsed data: + + - as a list (``len(results)``) + - by list index (``results[0], results[1]``, etc.) + - by attribute (``results.<resultsName>`` - see :class:`ParserElement.setResultsName`) + + Example:: + + integer = Word(nums) + date_str = (integer.setResultsName("year") + '/' + + integer.setResultsName("month") + '/' + + integer.setResultsName("day")) + # equivalent form: + # date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + # parseString returns a ParseResults object + result = date_str.parseString("1999/12/31") + + def test(s, fn=repr): + print("%s -> %s" % (s, fn(eval(s)))) + test("list(result)") + test("result[0]") + test("result['month']") + test("result.day") + test("'month' in result") + test("'minutes' in result") + test("result.dump()", str) + + prints:: + + list(result) -> ['1999', '/', '12', '/', '31'] + result[0] -> '1999' + result['month'] -> '12' + result.day -> '31' + 'month' in result -> True + 'minutes' in result -> False + result.dump() -> ['1999', '/', '12', '/', '31'] + - day: 31 + - month: 12 + - year: 1999 + """ + def __new__(cls, toklist=None, name=None, asList=True, modal=True ): + if isinstance(toklist, cls): + return toklist + retobj = object.__new__(cls) + retobj.__doinit = True + return retobj + + # Performance tuning: we construct a *lot* of these, so keep this + # constructor as small and fast as possible + def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance ): + if self.__doinit: + self.__doinit = False + self.__name = None + self.__parent = None + self.__accumNames = {} + self.__asList = asList + self.__modal = modal + if toklist is None: + toklist = [] + if isinstance(toklist, list): + self.__toklist = toklist[:] + elif isinstance(toklist, _generatorType): + self.__toklist = list(toklist) + else: + self.__toklist = [toklist] + self.__tokdict = dict() + + if name is not None and name: + if not modal: + self.__accumNames[name] = 0 + if isinstance(name,int): + name = _ustr(name) # will always return a str, but use _ustr for consistency + self.__name = name + if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None,'',[])): + if isinstance(toklist,basestring): + toklist = [ toklist ] + if asList: + if isinstance(toklist,ParseResults): + self[name] = _ParseResultsWithOffset(ParseResults(toklist.__toklist), 0) + else: + self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0) + self[name].__name = name + else: + try: + self[name] = toklist[0] + except (KeyError,TypeError,IndexError): + self[name] = toklist + + def __getitem__( self, i ): + if isinstance( i, (int,slice) ): + return self.__toklist[i] + else: + if i not in self.__accumNames: + return self.__tokdict[i][-1][0] + else: + return ParseResults([ v[0] for v in self.__tokdict[i] ]) + + def __setitem__( self, k, v, isinstance=isinstance ): + if isinstance(v,_ParseResultsWithOffset): + self.__tokdict[k] = self.__tokdict.get(k,list()) + [v] + sub = v[0] + elif isinstance(k,(int,slice)): + self.__toklist[k] = v + sub = v + else: + self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)] + sub = v + if isinstance(sub,ParseResults): + sub.__parent = wkref(self) + + def __delitem__( self, i ): + if isinstance(i,(int,slice)): + mylen = len( self.__toklist ) + del self.__toklist[i] + + # convert int to slice + if isinstance(i, int): + if i < 0: + i += mylen + i = slice(i, i+1) + # get removed indices + removed = list(range(*i.indices(mylen))) + removed.reverse() + # fixup indices in token dictionary + for name,occurrences in self.__tokdict.items(): + for j in removed: + for k, (value, position) in enumerate(occurrences): + occurrences[k] = _ParseResultsWithOffset(value, position - (position > j)) + else: + del self.__tokdict[i] + + def __contains__( self, k ): + return k in self.__tokdict + + def __len__( self ): return len( self.__toklist ) + def __bool__(self): return ( not not self.__toklist ) + __nonzero__ = __bool__ + def __iter__( self ): return iter( self.__toklist ) + def __reversed__( self ): return iter( self.__toklist[::-1] ) + def _iterkeys( self ): + if hasattr(self.__tokdict, "iterkeys"): + return self.__tokdict.iterkeys() + else: + return iter(self.__tokdict) + + def _itervalues( self ): + return (self[k] for k in self._iterkeys()) + + def _iteritems( self ): + return ((k, self[k]) for k in self._iterkeys()) + + if PY_3: + keys = _iterkeys + """Returns an iterator of all named result keys.""" + + values = _itervalues + """Returns an iterator of all named result values.""" + + items = _iteritems + """Returns an iterator of all named result key-value tuples.""" + + else: + iterkeys = _iterkeys + """Returns an iterator of all named result keys (Python 2.x only).""" + + itervalues = _itervalues + """Returns an iterator of all named result values (Python 2.x only).""" + + iteritems = _iteritems + """Returns an iterator of all named result key-value tuples (Python 2.x only).""" + + def keys( self ): + """Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x).""" + return list(self.iterkeys()) + + def values( self ): + """Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x).""" + return list(self.itervalues()) + + def items( self ): + """Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x).""" + return list(self.iteritems()) + + def haskeys( self ): + """Since keys() returns an iterator, this method is helpful in bypassing + code that looks for the existence of any defined results names.""" + return bool(self.__tokdict) + + def pop( self, *args, **kwargs): + """ + Removes and returns item at specified index (default= ``last``). + Supports both ``list`` and ``dict`` semantics for ``pop()``. If + passed no argument or an integer argument, it will use ``list`` + semantics and pop tokens from the list of parsed tokens. If passed + a non-integer argument (most likely a string), it will use ``dict`` + semantics and pop the corresponding value from any defined results + names. A second default return value argument is supported, just as in + ``dict.pop()``. + + Example:: + + def remove_first(tokens): + tokens.pop(0) + print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] + print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321'] + + label = Word(alphas) + patt = label("LABEL") + OneOrMore(Word(nums)) + print(patt.parseString("AAB 123 321").dump()) + + # Use pop() in a parse action to remove named result (note that corresponding value is not + # removed from list form of results) + def remove_LABEL(tokens): + tokens.pop("LABEL") + return tokens + patt.addParseAction(remove_LABEL) + print(patt.parseString("AAB 123 321").dump()) + + prints:: + + ['AAB', '123', '321'] + - LABEL: AAB + + ['AAB', '123', '321'] + """ + if not args: + args = [-1] + for k,v in kwargs.items(): + if k == 'default': + args = (args[0], v) + else: + raise TypeError("pop() got an unexpected keyword argument '%s'" % k) + if (isinstance(args[0], int) or + len(args) == 1 or + args[0] in self): + index = args[0] + ret = self[index] + del self[index] + return ret + else: + defaultvalue = args[1] + return defaultvalue + + def get(self, key, defaultValue=None): + """ + Returns named result matching the given key, or if there is no + such name, then returns the given ``defaultValue`` or ``None`` if no + ``defaultValue`` is specified. + + Similar to ``dict.get()``. + + Example:: + + integer = Word(nums) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + result = date_str.parseString("1999/12/31") + print(result.get("year")) # -> '1999' + print(result.get("hour", "not specified")) # -> 'not specified' + print(result.get("hour")) # -> None + """ + if key in self: + return self[key] + else: + return defaultValue + + def insert( self, index, insStr ): + """ + Inserts new element at location index in the list of parsed tokens. + + Similar to ``list.insert()``. + + Example:: + + print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] + + # use a parse action to insert the parse location in the front of the parsed results + def insert_locn(locn, tokens): + tokens.insert(0, locn) + print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321'] + """ + self.__toklist.insert(index, insStr) + # fixup indices in token dictionary + for name,occurrences in self.__tokdict.items(): + for k, (value, position) in enumerate(occurrences): + occurrences[k] = _ParseResultsWithOffset(value, position + (position > index)) + + def append( self, item ): + """ + Add single element to end of ParseResults list of elements. + + Example:: + + print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] + + # use a parse action to compute the sum of the parsed integers, and add it to the end + def append_sum(tokens): + tokens.append(sum(map(int, tokens))) + print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444] + """ + self.__toklist.append(item) + + def extend( self, itemseq ): + """ + Add sequence of elements to end of ParseResults list of elements. + + Example:: + + patt = OneOrMore(Word(alphas)) + + # use a parse action to append the reverse of the matched strings, to make a palindrome + def make_palindrome(tokens): + tokens.extend(reversed([t[::-1] for t in tokens])) + return ''.join(tokens) + print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl' + """ + if isinstance(itemseq, ParseResults): + self += itemseq + else: + self.__toklist.extend(itemseq) + + def clear( self ): + """ + Clear all elements and results names. + """ + del self.__toklist[:] + self.__tokdict.clear() + + def __getattr__( self, name ): + try: + return self[name] + except KeyError: + return "" + + if name in self.__tokdict: + if name not in self.__accumNames: + return self.__tokdict[name][-1][0] + else: + return ParseResults([ v[0] for v in self.__tokdict[name] ]) + else: + return "" + + def __add__( self, other ): + ret = self.copy() + ret += other + return ret + + def __iadd__( self, other ): + if other.__tokdict: + offset = len(self.__toklist) + addoffset = lambda a: offset if a<0 else a+offset + otheritems = other.__tokdict.items() + otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) ) + for (k,vlist) in otheritems for v in vlist] + for k,v in otherdictitems: + self[k] = v + if isinstance(v[0],ParseResults): + v[0].__parent = wkref(self) + + self.__toklist += other.__toklist + self.__accumNames.update( other.__accumNames ) + return self + + def __radd__(self, other): + if isinstance(other,int) and other == 0: + # useful for merging many ParseResults using sum() builtin + return self.copy() + else: + # this may raise a TypeError - so be it + return other + self + + def __repr__( self ): + return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) ) + + def __str__( self ): + return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']' + + def _asStringList( self, sep='' ): + out = [] + for item in self.__toklist: + if out and sep: + out.append(sep) + if isinstance( item, ParseResults ): + out += item._asStringList() + else: + out.append( _ustr(item) ) + return out + + def asList( self ): + """ + Returns the parse results as a nested list of matching tokens, all converted to strings. + + Example:: + + patt = OneOrMore(Word(alphas)) + result = patt.parseString("sldkj lsdkj sldkj") + # even though the result prints in string-like form, it is actually a pyparsing ParseResults + print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj'] + + # Use asList() to create an actual list + result_list = result.asList() + print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj'] + """ + return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist] + + def asDict( self ): + """ + Returns the named parse results as a nested dictionary. + + Example:: + + integer = Word(nums) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + result = date_str.parseString('12/31/1999') + print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]}) + + result_dict = result.asDict() + print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'} + + # even though a ParseResults supports dict-like access, sometime you just need to have a dict + import json + print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable + print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"} + """ + if PY_3: + item_fn = self.items + else: + item_fn = self.iteritems + + def toItem(obj): + if isinstance(obj, ParseResults): + if obj.haskeys(): + return obj.asDict() + else: + return [toItem(v) for v in obj] + else: + return obj + + return dict((k,toItem(v)) for k,v in item_fn()) + + def copy( self ): + """ + Returns a new copy of a :class:`ParseResults` object. + """ + ret = ParseResults( self.__toklist ) + ret.__tokdict = dict(self.__tokdict.items()) + ret.__parent = self.__parent + ret.__accumNames.update( self.__accumNames ) + ret.__name = self.__name + return ret + + def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ): + """ + (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names. + """ + nl = "\n" + out = [] + namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items() + for v in vlist) + nextLevelIndent = indent + " " + + # collapse out indents if formatting is not desired + if not formatted: + indent = "" + nextLevelIndent = "" + nl = "" + + selfTag = None + if doctag is not None: + selfTag = doctag + else: + if self.__name: + selfTag = self.__name + + if not selfTag: + if namedItemsOnly: + return "" + else: + selfTag = "ITEM" + + out += [ nl, indent, "<", selfTag, ">" ] + + for i,res in enumerate(self.__toklist): + if isinstance(res,ParseResults): + if i in namedItems: + out += [ res.asXML(namedItems[i], + namedItemsOnly and doctag is None, + nextLevelIndent, + formatted)] + else: + out += [ res.asXML(None, + namedItemsOnly and doctag is None, + nextLevelIndent, + formatted)] + else: + # individual token, see if there is a name for it + resTag = None + if i in namedItems: + resTag = namedItems[i] + if not resTag: + if namedItemsOnly: + continue + else: + resTag = "ITEM" + xmlBodyText = _xml_escape(_ustr(res)) + out += [ nl, nextLevelIndent, "<", resTag, ">", + xmlBodyText, + "</", resTag, ">" ] + + out += [ nl, indent, "</", selfTag, ">" ] + return "".join(out) + + def __lookup(self,sub): + for k,vlist in self.__tokdict.items(): + for v,loc in vlist: + if sub is v: + return k + return None + + def getName(self): + r""" + Returns the results name for this token expression. Useful when several + different expressions might match at a particular location. + + Example:: + + integer = Word(nums) + ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d") + house_number_expr = Suppress('#') + Word(nums, alphanums) + user_data = (Group(house_number_expr)("house_number") + | Group(ssn_expr)("ssn") + | Group(integer)("age")) + user_info = OneOrMore(user_data) + + result = user_info.parseString("22 111-22-3333 #221B") + for item in result: + print(item.getName(), ':', item[0]) + + prints:: + + age : 22 + ssn : 111-22-3333 + house_number : 221B + """ + if self.__name: + return self.__name + elif self.__parent: + par = self.__parent() + if par: + return par.__lookup(self) + else: + return None + elif (len(self) == 1 and + len(self.__tokdict) == 1 and + next(iter(self.__tokdict.values()))[0][1] in (0,-1)): + return next(iter(self.__tokdict.keys())) + else: + return None + + def dump(self, indent='', depth=0, full=True): + """ + Diagnostic method for listing out the contents of + a :class:`ParseResults`. Accepts an optional ``indent`` argument so + that this string can be embedded in a nested display of other data. + + Example:: + + integer = Word(nums) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + result = date_str.parseString('12/31/1999') + print(result.dump()) + + prints:: + + ['12', '/', '31', '/', '1999'] + - day: 1999 + - month: 31 + - year: 12 + """ + out = [] + NL = '\n' + out.append( indent+_ustr(self.asList()) ) + if full: + if self.haskeys(): + items = sorted((str(k), v) for k,v in self.items()) + for k,v in items: + if out: + out.append(NL) + out.append( "%s%s- %s: " % (indent,(' '*depth), k) ) + if isinstance(v,ParseResults): + if v: + out.append( v.dump(indent,depth+1) ) + else: + out.append(_ustr(v)) + else: + out.append(repr(v)) + elif any(isinstance(vv,ParseResults) for vv in self): + v = self + for i,vv in enumerate(v): + if isinstance(vv,ParseResults): + out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),vv.dump(indent,depth+1) )) + else: + out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),_ustr(vv))) + + return "".join(out) + + def pprint(self, *args, **kwargs): + """ + Pretty-printer for parsed results as a list, using the + `pprint <https://docs.python.org/3/library/pprint.html>`_ module. + Accepts additional positional or keyword args as defined for + `pprint.pprint <https://docs.python.org/3/library/pprint.html#pprint.pprint>`_ . + + Example:: + + ident = Word(alphas, alphanums) + num = Word(nums) + func = Forward() + term = ident | num | Group('(' + func + ')') + func <<= ident + Group(Optional(delimitedList(term))) + result = func.parseString("fna a,b,(fnb c,d,200),100") + result.pprint(width=40) + + prints:: + + ['fna', + ['a', + 'b', + ['(', 'fnb', ['c', 'd', '200'], ')'], + '100']] + """ + pprint.pprint(self.asList(), *args, **kwargs) + + # add support for pickle protocol + def __getstate__(self): + return ( self.__toklist, + ( self.__tokdict.copy(), + self.__parent is not None and self.__parent() or None, + self.__accumNames, + self.__name ) ) + + def __setstate__(self,state): + self.__toklist = state[0] + (self.__tokdict, + par, + inAccumNames, + self.__name) = state[1] + self.__accumNames = {} + self.__accumNames.update(inAccumNames) + if par is not None: + self.__parent = wkref(par) + else: + self.__parent = None + + def __getnewargs__(self): + return self.__toklist, self.__name, self.__asList, self.__modal + + def __dir__(self): + return (dir(type(self)) + list(self.keys())) + +MutableMapping.register(ParseResults) + +def col (loc,strg): + """Returns current column within a string, counting newlines as line separators. + The first column is number 1. + + Note: the default parsing behavior is to expand tabs in the input string + before starting the parsing process. See + :class:`ParserElement.parseString` for more + information on parsing strings containing ``<TAB>`` s, and suggested + methods to maintain a consistent view of the parsed string, the parse + location, and line and column positions within the parsed string. + """ + s = strg + return 1 if 0<loc<len(s) and s[loc-1] == '\n' else loc - s.rfind("\n", 0, loc) + +def lineno(loc,strg): + """Returns current line number within a string, counting newlines as line separators. + The first line is number 1. + + Note - the default parsing behavior is to expand tabs in the input string + before starting the parsing process. See :class:`ParserElement.parseString` + for more information on parsing strings containing ``<TAB>`` s, and + suggested methods to maintain a consistent view of the parsed string, the + parse location, and line and column positions within the parsed string. + """ + return strg.count("\n",0,loc) + 1 + +def line( loc, strg ): + """Returns the line of text containing loc within a string, counting newlines as line separators. + """ + lastCR = strg.rfind("\n", 0, loc) + nextCR = strg.find("\n", loc) + if nextCR >= 0: + return strg[lastCR+1:nextCR] + else: + return strg[lastCR+1:] + +def _defaultStartDebugAction( instring, loc, expr ): + print (("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))) + +def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ): + print ("Matched " + _ustr(expr) + " -> " + str(toks.asList())) + +def _defaultExceptionDebugAction( instring, loc, expr, exc ): + print ("Exception raised:" + _ustr(exc)) + +def nullDebugAction(*args): + """'Do-nothing' debug action, to suppress debugging output during parsing.""" + pass + +# Only works on Python 3.x - nonlocal is toxic to Python 2 installs +#~ 'decorator to trim function calls to match the arity of the target' +#~ def _trim_arity(func, maxargs=3): + #~ if func in singleArgBuiltins: + #~ return lambda s,l,t: func(t) + #~ limit = 0 + #~ foundArity = False + #~ def wrapper(*args): + #~ nonlocal limit,foundArity + #~ while 1: + #~ try: + #~ ret = func(*args[limit:]) + #~ foundArity = True + #~ return ret + #~ except TypeError: + #~ if limit == maxargs or foundArity: + #~ raise + #~ limit += 1 + #~ continue + #~ return wrapper + +# this version is Python 2.x-3.x cross-compatible +'decorator to trim function calls to match the arity of the target' +def _trim_arity(func, maxargs=2): + if func in singleArgBuiltins: + return lambda s,l,t: func(t) + limit = [0] + foundArity = [False] + + # traceback return data structure changed in Py3.5 - normalize back to plain tuples + if system_version[:2] >= (3,5): + def extract_stack(limit=0): + # special handling for Python 3.5.0 - extra deep call stack by 1 + offset = -3 if system_version == (3,5,0) else -2 + frame_summary = traceback.extract_stack(limit=-offset+limit-1)[offset] + return [frame_summary[:2]] + def extract_tb(tb, limit=0): + frames = traceback.extract_tb(tb, limit=limit) + frame_summary = frames[-1] + return [frame_summary[:2]] + else: + extract_stack = traceback.extract_stack + extract_tb = traceback.extract_tb + + # synthesize what would be returned by traceback.extract_stack at the call to + # user's parse action 'func', so that we don't incur call penalty at parse time + + LINE_DIFF = 6 + # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND + # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!! + this_line = extract_stack(limit=2)[-1] + pa_call_line_synth = (this_line[0], this_line[1]+LINE_DIFF) + + def wrapper(*args): + while 1: + try: + ret = func(*args[limit[0]:]) + foundArity[0] = True + return ret + except TypeError: + # re-raise TypeErrors if they did not come from our arity testing + if foundArity[0]: + raise + else: + try: + tb = sys.exc_info()[-1] + if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth: + raise + finally: + del tb + + if limit[0] <= maxargs: + limit[0] += 1 + continue + raise + + # copy func name to wrapper for sensible debug output + func_name = "<parse action>" + try: + func_name = getattr(func, '__name__', + getattr(func, '__class__').__name__) + except Exception: + func_name = str(func) + wrapper.__name__ = func_name + + return wrapper + +class ParserElement(object): + """Abstract base level parser element class.""" + DEFAULT_WHITE_CHARS = " \n\t\r" + verbose_stacktrace = False + + @staticmethod + def setDefaultWhitespaceChars( chars ): + r""" + Overrides the default whitespace chars + + Example:: + + # default whitespace chars are space, <TAB> and newline + OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl'] + + # change to just treat newline as significant + ParserElement.setDefaultWhitespaceChars(" \t") + OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def'] + """ + ParserElement.DEFAULT_WHITE_CHARS = chars + + @staticmethod + def inlineLiteralsUsing(cls): + """ + Set class to be used for inclusion of string literals into a parser. + + Example:: + + # default literal class used is Literal + integer = Word(nums) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31'] + + + # change to Suppress + ParserElement.inlineLiteralsUsing(Suppress) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + date_str.parseString("1999/12/31") # -> ['1999', '12', '31'] + """ + ParserElement._literalStringClass = cls + + def __init__( self, savelist=False ): + self.parseAction = list() + self.failAction = None + #~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall + self.strRepr = None + self.resultsName = None + self.saveAsList = savelist + self.skipWhitespace = True + self.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS) + self.copyDefaultWhiteChars = True + self.mayReturnEmpty = False # used when checking for left-recursion + self.keepTabs = False + self.ignoreExprs = list() + self.debug = False + self.streamlined = False + self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index + self.errmsg = "" + self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all) + self.debugActions = ( None, None, None ) #custom debug actions + self.re = None + self.callPreparse = True # used to avoid redundant calls to preParse + self.callDuringTry = False + + def copy( self ): + """ + Make a copy of this :class:`ParserElement`. Useful for defining + different parse actions for the same parsing pattern, using copies of + the original parse element. + + Example:: + + integer = Word(nums).setParseAction(lambda toks: int(toks[0])) + integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K") + integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M") + + print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M")) + + prints:: + + [5120, 100, 655360, 268435456] + + Equivalent form of ``expr.copy()`` is just ``expr()``:: + + integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M") + """ + cpy = copy.copy( self ) + cpy.parseAction = self.parseAction[:] + cpy.ignoreExprs = self.ignoreExprs[:] + if self.copyDefaultWhiteChars: + cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS + return cpy + + def setName( self, name ): + """ + Define name for this expression, makes debugging and exception messages clearer. + + Example:: + + Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1) + Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1) + """ + self.name = name + self.errmsg = "Expected " + self.name + if hasattr(self,"exception"): + self.exception.msg = self.errmsg + return self + + def setResultsName( self, name, listAllMatches=False ): + """ + Define name for referencing matching tokens as a nested attribute + of the returned parse results. + NOTE: this returns a *copy* of the original :class:`ParserElement` object; + this is so that the client can define a basic element, such as an + integer, and reference it in multiple places with different names. + + You can also set results names using the abbreviated syntax, + ``expr("name")`` in place of ``expr.setResultsName("name")`` + - see :class:`__call__`. + + Example:: + + date_str = (integer.setResultsName("year") + '/' + + integer.setResultsName("month") + '/' + + integer.setResultsName("day")) + + # equivalent form: + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + """ + newself = self.copy() + if name.endswith("*"): + name = name[:-1] + listAllMatches=True + newself.resultsName = name + newself.modalResults = not listAllMatches + return newself + + def setBreak(self,breakFlag = True): + """Method to invoke the Python pdb debugger when this element is + about to be parsed. Set ``breakFlag`` to True to enable, False to + disable. + """ + if breakFlag: + _parseMethod = self._parse + def breaker(instring, loc, doActions=True, callPreParse=True): + import pdb + pdb.set_trace() + return _parseMethod( instring, loc, doActions, callPreParse ) + breaker._originalParseMethod = _parseMethod + self._parse = breaker + else: + if hasattr(self._parse,"_originalParseMethod"): + self._parse = self._parse._originalParseMethod + return self + + def setParseAction( self, *fns, **kwargs ): + """ + Define one or more actions to perform when successfully matching parse element definition. + Parse action fn is a callable method with 0-3 arguments, called as ``fn(s,loc,toks)`` , + ``fn(loc,toks)`` , ``fn(toks)`` , or just ``fn()`` , where: + + - s = the original string being parsed (see note below) + - loc = the location of the matching substring + - toks = a list of the matched tokens, packaged as a :class:`ParseResults` object + + If the functions in fns modify the tokens, they can return them as the return + value from fn, and the modified list of tokens will replace the original. + Otherwise, fn does not need to return any value. + + Optional keyword arguments: + - callDuringTry = (default= ``False`` ) indicate if parse action should be run during lookaheads and alternate testing + + Note: the default parsing behavior is to expand tabs in the input string + before starting the parsing process. See :class:`parseString for more + information on parsing strings containing ``<TAB>`` s, and suggested + methods to maintain a consistent view of the parsed string, the parse + location, and line and column positions within the parsed string. + + Example:: + + integer = Word(nums) + date_str = integer + '/' + integer + '/' + integer + + date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31'] + + # use parse action to convert to ints at parse time + integer = Word(nums).setParseAction(lambda toks: int(toks[0])) + date_str = integer + '/' + integer + '/' + integer + + # note that integer fields are now ints, not strings + date_str.parseString("1999/12/31") # -> [1999, '/', 12, '/', 31] + """ + self.parseAction = list(map(_trim_arity, list(fns))) + self.callDuringTry = kwargs.get("callDuringTry", False) + return self + + def addParseAction( self, *fns, **kwargs ): + """ + Add one or more parse actions to expression's list of parse actions. See :class:`setParseAction`. + + See examples in :class:`copy`. + """ + self.parseAction += list(map(_trim_arity, list(fns))) + self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False) + return self + + def addCondition(self, *fns, **kwargs): + """Add a boolean predicate function to expression's list of parse actions. See + :class:`setParseAction` for function call signatures. Unlike ``setParseAction``, + functions passed to ``addCondition`` need to return boolean success/fail of the condition. + + Optional keyword arguments: + - message = define a custom message to be used in the raised exception + - fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException + + Example:: + + integer = Word(nums).setParseAction(lambda toks: int(toks[0])) + year_int = integer.copy() + year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later") + date_str = year_int + '/' + integer + '/' + integer + + result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1) + """ + msg = kwargs.get("message", "failed user-defined condition") + exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException + for fn in fns: + fn = _trim_arity(fn) + def pa(s,l,t): + if not bool(fn(s,l,t)): + raise exc_type(s,l,msg) + self.parseAction.append(pa) + self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False) + return self + + def setFailAction( self, fn ): + """Define action to perform if parsing fails at this expression. + Fail acton fn is a callable function that takes the arguments + ``fn(s,loc,expr,err)`` where: + - s = string being parsed + - loc = location where expression match was attempted and failed + - expr = the parse expression that failed + - err = the exception thrown + The function returns no value. It may throw :class:`ParseFatalException` + if it is desired to stop parsing immediately.""" + self.failAction = fn + return self + + def _skipIgnorables( self, instring, loc ): + exprsFound = True + while exprsFound: + exprsFound = False + for e in self.ignoreExprs: + try: + while 1: + loc,dummy = e._parse( instring, loc ) + exprsFound = True + except ParseException: + pass + return loc + + def preParse( self, instring, loc ): + if self.ignoreExprs: + loc = self._skipIgnorables( instring, loc ) + + if self.skipWhitespace: + wt = self.whiteChars + instrlen = len(instring) + while loc < instrlen and instring[loc] in wt: + loc += 1 + + return loc + + def parseImpl( self, instring, loc, doActions=True ): + return loc, [] + + def postParse( self, instring, loc, tokenlist ): + return tokenlist + + #~ @profile + def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ): + debugging = ( self.debug ) #and doActions ) + + if debugging or self.failAction: + #~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )) + if (self.debugActions[0] ): + self.debugActions[0]( instring, loc, self ) + if callPreParse and self.callPreparse: + preloc = self.preParse( instring, loc ) + else: + preloc = loc + tokensStart = preloc + try: + try: + loc,tokens = self.parseImpl( instring, preloc, doActions ) + except IndexError: + raise ParseException( instring, len(instring), self.errmsg, self ) + except ParseBaseException as err: + #~ print ("Exception raised:", err) + if self.debugActions[2]: + self.debugActions[2]( instring, tokensStart, self, err ) + if self.failAction: + self.failAction( instring, tokensStart, self, err ) + raise + else: + if callPreParse and self.callPreparse: + preloc = self.preParse( instring, loc ) + else: + preloc = loc + tokensStart = preloc + if self.mayIndexError or preloc >= len(instring): + try: + loc,tokens = self.parseImpl( instring, preloc, doActions ) + except IndexError: + raise ParseException( instring, len(instring), self.errmsg, self ) + else: + loc,tokens = self.parseImpl( instring, preloc, doActions ) + + tokens = self.postParse( instring, loc, tokens ) + + retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults ) + if self.parseAction and (doActions or self.callDuringTry): + if debugging: + try: + for fn in self.parseAction: + try: + tokens = fn( instring, tokensStart, retTokens ) + except IndexError as parse_action_exc: + exc = ParseException("exception raised in parse action") + exc.__cause__ = parse_action_exc + raise exc + + if tokens is not None and tokens is not retTokens: + retTokens = ParseResults( tokens, + self.resultsName, + asList=self.saveAsList and isinstance(tokens,(ParseResults,list)), + modal=self.modalResults ) + except ParseBaseException as err: + #~ print "Exception raised in user parse action:", err + if (self.debugActions[2] ): + self.debugActions[2]( instring, tokensStart, self, err ) + raise + else: + for fn in self.parseAction: + try: + tokens = fn( instring, tokensStart, retTokens ) + except IndexError as parse_action_exc: + exc = ParseException("exception raised in parse action") + exc.__cause__ = parse_action_exc + raise exc + + if tokens is not None and tokens is not retTokens: + retTokens = ParseResults( tokens, + self.resultsName, + asList=self.saveAsList and isinstance(tokens,(ParseResults,list)), + modal=self.modalResults ) + if debugging: + #~ print ("Matched",self,"->",retTokens.asList()) + if (self.debugActions[1] ): + self.debugActions[1]( instring, tokensStart, loc, self, retTokens ) + + return loc, retTokens + + def tryParse( self, instring, loc ): + try: + return self._parse( instring, loc, doActions=False )[0] + except ParseFatalException: + raise ParseException( instring, loc, self.errmsg, self) + + def canParseNext(self, instring, loc): + try: + self.tryParse(instring, loc) + except (ParseException, IndexError): + return False + else: + return True + + class _UnboundedCache(object): + def __init__(self): + cache = {} + self.not_in_cache = not_in_cache = object() + + def get(self, key): + return cache.get(key, not_in_cache) + + def set(self, key, value): + cache[key] = value + + def clear(self): + cache.clear() + + def cache_len(self): + return len(cache) + + self.get = types.MethodType(get, self) + self.set = types.MethodType(set, self) + self.clear = types.MethodType(clear, self) + self.__len__ = types.MethodType(cache_len, self) + + if _OrderedDict is not None: + class _FifoCache(object): + def __init__(self, size): + self.not_in_cache = not_in_cache = object() + + cache = _OrderedDict() + + def get(self, key): + return cache.get(key, not_in_cache) + + def set(self, key, value): + cache[key] = value + while len(cache) > size: + try: + cache.popitem(False) + except KeyError: + pass + + def clear(self): + cache.clear() + + def cache_len(self): + return len(cache) + + self.get = types.MethodType(get, self) + self.set = types.MethodType(set, self) + self.clear = types.MethodType(clear, self) + self.__len__ = types.MethodType(cache_len, self) + + else: + class _FifoCache(object): + def __init__(self, size): + self.not_in_cache = not_in_cache = object() + + cache = {} + key_fifo = collections.deque([], size) + + def get(self, key): + return cache.get(key, not_in_cache) + + def set(self, key, value): + cache[key] = value + while len(key_fifo) > size: + cache.pop(key_fifo.popleft(), None) + key_fifo.append(key) + + def clear(self): + cache.clear() + key_fifo.clear() + + def cache_len(self): + return len(cache) + + self.get = types.MethodType(get, self) + self.set = types.MethodType(set, self) + self.clear = types.MethodType(clear, self) + self.__len__ = types.MethodType(cache_len, self) + + # argument cache for optimizing repeated calls when backtracking through recursive expressions + packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail + packrat_cache_lock = RLock() + packrat_cache_stats = [0, 0] + + # this method gets repeatedly called during backtracking with the same arguments - + # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression + def _parseCache( self, instring, loc, doActions=True, callPreParse=True ): + HIT, MISS = 0, 1 + lookup = (self, instring, loc, callPreParse, doActions) + with ParserElement.packrat_cache_lock: + cache = ParserElement.packrat_cache + value = cache.get(lookup) + if value is cache.not_in_cache: + ParserElement.packrat_cache_stats[MISS] += 1 + try: + value = self._parseNoCache(instring, loc, doActions, callPreParse) + except ParseBaseException as pe: + # cache a copy of the exception, without the traceback + cache.set(lookup, pe.__class__(*pe.args)) + raise + else: + cache.set(lookup, (value[0], value[1].copy())) + return value + else: + ParserElement.packrat_cache_stats[HIT] += 1 + if isinstance(value, Exception): + raise value + return (value[0], value[1].copy()) + + _parse = _parseNoCache + + @staticmethod + def resetCache(): + ParserElement.packrat_cache.clear() + ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats) + + _packratEnabled = False + @staticmethod + def enablePackrat(cache_size_limit=128): + """Enables "packrat" parsing, which adds memoizing to the parsing logic. + Repeated parse attempts at the same string location (which happens + often in many complex grammars) can immediately return a cached value, + instead of re-executing parsing/validating code. Memoizing is done of + both valid results and parsing exceptions. + + Parameters: + + - cache_size_limit - (default= ``128``) - if an integer value is provided + will limit the size of the packrat cache; if None is passed, then + the cache size will be unbounded; if 0 is passed, the cache will + be effectively disabled. + + This speedup may break existing programs that use parse actions that + have side-effects. For this reason, packrat parsing is disabled when + you first import pyparsing. To activate the packrat feature, your + program must call the class method :class:`ParserElement.enablePackrat`. + For best results, call ``enablePackrat()`` immediately after + importing pyparsing. + + Example:: + + from pip._vendor import pyparsing + pyparsing.ParserElement.enablePackrat() + """ + if not ParserElement._packratEnabled: + ParserElement._packratEnabled = True + if cache_size_limit is None: + ParserElement.packrat_cache = ParserElement._UnboundedCache() + else: + ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit) + ParserElement._parse = ParserElement._parseCache + + def parseString( self, instring, parseAll=False ): + """ + Execute the parse expression with the given string. + This is the main interface to the client code, once the complete + expression has been built. + + If you want the grammar to require that the entire input string be + successfully parsed, then set ``parseAll`` to True (equivalent to ending + the grammar with ``StringEnd()``). + + Note: ``parseString`` implicitly calls ``expandtabs()`` on the input string, + in order to report proper column numbers in parse actions. + If the input string contains tabs and + the grammar uses parse actions that use the ``loc`` argument to index into the + string being parsed, you can ensure you have a consistent view of the input + string by: + + - calling ``parseWithTabs`` on your grammar before calling ``parseString`` + (see :class:`parseWithTabs`) + - define your parse action using the full ``(s,loc,toks)`` signature, and + reference the input string using the parse action's ``s`` argument + - explictly expand the tabs in your input string before calling + ``parseString`` + + Example:: + + Word('a').parseString('aaaaabaaa') # -> ['aaaaa'] + Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text + """ + ParserElement.resetCache() + if not self.streamlined: + self.streamline() + #~ self.saveAsList = True + for e in self.ignoreExprs: + e.streamline() + if not self.keepTabs: + instring = instring.expandtabs() + try: + loc, tokens = self._parse( instring, 0 ) + if parseAll: + loc = self.preParse( instring, loc ) + se = Empty() + StringEnd() + se._parse( instring, loc ) + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + else: + # catch and re-raise exception from here, clears out pyparsing internal stack trace + raise exc + else: + return tokens + + def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ): + """ + Scan the input string for expression matches. Each match will return the + matching tokens, start location, and end location. May be called with optional + ``maxMatches`` argument, to clip scanning after 'n' matches are found. If + ``overlap`` is specified, then overlapping matches will be reported. + + Note that the start and end locations are reported relative to the string + being parsed. See :class:`parseString` for more information on parsing + strings with embedded tabs. + + Example:: + + source = "sldjf123lsdjjkf345sldkjf879lkjsfd987" + print(source) + for tokens,start,end in Word(alphas).scanString(source): + print(' '*start + '^'*(end-start)) + print(' '*start + tokens[0]) + + prints:: + + sldjf123lsdjjkf345sldkjf879lkjsfd987 + ^^^^^ + sldjf + ^^^^^^^ + lsdjjkf + ^^^^^^ + sldkjf + ^^^^^^ + lkjsfd + """ + if not self.streamlined: + self.streamline() + for e in self.ignoreExprs: + e.streamline() + + if not self.keepTabs: + instring = _ustr(instring).expandtabs() + instrlen = len(instring) + loc = 0 + preparseFn = self.preParse + parseFn = self._parse + ParserElement.resetCache() + matches = 0 + try: + while loc <= instrlen and matches < maxMatches: + try: + preloc = preparseFn( instring, loc ) + nextLoc,tokens = parseFn( instring, preloc, callPreParse=False ) + except ParseException: + loc = preloc+1 + else: + if nextLoc > loc: + matches += 1 + yield tokens, preloc, nextLoc + if overlap: + nextloc = preparseFn( instring, loc ) + if nextloc > loc: + loc = nextLoc + else: + loc += 1 + else: + loc = nextLoc + else: + loc = preloc+1 + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + else: + # catch and re-raise exception from here, clears out pyparsing internal stack trace + raise exc + + def transformString( self, instring ): + """ + Extension to :class:`scanString`, to modify matching text with modified tokens that may + be returned from a parse action. To use ``transformString``, define a grammar and + attach a parse action to it that modifies the returned token list. + Invoking ``transformString()`` on a target string will then scan for matches, + and replace the matched text patterns according to the logic in the parse + action. ``transformString()`` returns the resulting transformed string. + + Example:: + + wd = Word(alphas) + wd.setParseAction(lambda toks: toks[0].title()) + + print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york.")) + + prints:: + + Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York. + """ + out = [] + lastE = 0 + # force preservation of <TAB>s, to minimize unwanted transformation of string, and to + # keep string locs straight between transformString and scanString + self.keepTabs = True + try: + for t,s,e in self.scanString( instring ): + out.append( instring[lastE:s] ) + if t: + if isinstance(t,ParseResults): + out += t.asList() + elif isinstance(t,list): + out += t + else: + out.append(t) + lastE = e + out.append(instring[lastE:]) + out = [o for o in out if o] + return "".join(map(_ustr,_flatten(out))) + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + else: + # catch and re-raise exception from here, clears out pyparsing internal stack trace + raise exc + + def searchString( self, instring, maxMatches=_MAX_INT ): + """ + Another extension to :class:`scanString`, simplifying the access to the tokens found + to match the given parse expression. May be called with optional + ``maxMatches`` argument, to clip searching after 'n' matches are found. + + Example:: + + # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters + cap_word = Word(alphas.upper(), alphas.lower()) + + print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")) + + # the sum() builtin can be used to merge results into a single ParseResults object + print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))) + + prints:: + + [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']] + ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity'] + """ + try: + return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ]) + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + else: + # catch and re-raise exception from here, clears out pyparsing internal stack trace + raise exc + + def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False): + """ + Generator method to split a string using the given expression as a separator. + May be called with optional ``maxsplit`` argument, to limit the number of splits; + and the optional ``includeSeparators`` argument (default= ``False``), if the separating + matching text should be included in the split results. + + Example:: + + punc = oneOf(list(".,;:/-!?")) + print(list(punc.split("This, this?, this sentence, is badly punctuated!"))) + + prints:: + + ['This', ' this', '', ' this sentence', ' is badly punctuated', ''] + """ + splits = 0 + last = 0 + for t,s,e in self.scanString(instring, maxMatches=maxsplit): + yield instring[last:s] + if includeSeparators: + yield t[0] + last = e + yield instring[last:] + + def __add__(self, other ): + """ + Implementation of + operator - returns :class:`And`. Adding strings to a ParserElement + converts them to :class:`Literal`s by default. + + Example:: + + greet = Word(alphas) + "," + Word(alphas) + "!" + hello = "Hello, World!" + print (hello, "->", greet.parseString(hello)) + + prints:: + + Hello, World! -> ['Hello', ',', 'World', '!'] + """ + if isinstance( other, basestring ): + other = ParserElement._literalStringClass( other ) + if not isinstance( other, ParserElement ): + warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return None + return And( [ self, other ] ) + + def __radd__(self, other ): + """ + Implementation of + operator when left operand is not a :class:`ParserElement` + """ + if isinstance( other, basestring ): + other = ParserElement._literalStringClass( other ) + if not isinstance( other, ParserElement ): + warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return None + return other + self + + def __sub__(self, other): + """ + Implementation of - operator, returns :class:`And` with error stop + """ + if isinstance( other, basestring ): + other = ParserElement._literalStringClass( other ) + if not isinstance( other, ParserElement ): + warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return None + return self + And._ErrorStop() + other + + def __rsub__(self, other ): + """ + Implementation of - operator when left operand is not a :class:`ParserElement` + """ + if isinstance( other, basestring ): + other = ParserElement._literalStringClass( other ) + if not isinstance( other, ParserElement ): + warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return None + return other - self + + def __mul__(self,other): + """ + Implementation of * operator, allows use of ``expr * 3`` in place of + ``expr + expr + expr``. Expressions may also me multiplied by a 2-integer + tuple, similar to ``{min,max}`` multipliers in regular expressions. Tuples + may also include ``None`` as in: + - ``expr*(n,None)`` or ``expr*(n,)`` is equivalent + to ``expr*n + ZeroOrMore(expr)`` + (read as "at least n instances of ``expr``") + - ``expr*(None,n)`` is equivalent to ``expr*(0,n)`` + (read as "0 to n instances of ``expr``") + - ``expr*(None,None)`` is equivalent to ``ZeroOrMore(expr)`` + - ``expr*(1,None)`` is equivalent to ``OneOrMore(expr)`` + + Note that ``expr*(None,n)`` does not raise an exception if + more than n exprs exist in the input stream; that is, + ``expr*(None,n)`` does not enforce a maximum number of expr + occurrences. If this behavior is desired, then write + ``expr*(None,n) + ~expr`` + """ + if isinstance(other,int): + minElements, optElements = other,0 + elif isinstance(other,tuple): + other = (other + (None, None))[:2] + if other[0] is None: + other = (0, other[1]) + if isinstance(other[0],int) and other[1] is None: + if other[0] == 0: + return ZeroOrMore(self) + if other[0] == 1: + return OneOrMore(self) + else: + return self*other[0] + ZeroOrMore(self) + elif isinstance(other[0],int) and isinstance(other[1],int): + minElements, optElements = other + optElements -= minElements + else: + raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1])) + else: + raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other)) + + if minElements < 0: + raise ValueError("cannot multiply ParserElement by negative value") + if optElements < 0: + raise ValueError("second tuple value must be greater or equal to first tuple value") + if minElements == optElements == 0: + raise ValueError("cannot multiply ParserElement by 0 or (0,0)") + + if (optElements): + def makeOptionalList(n): + if n>1: + return Optional(self + makeOptionalList(n-1)) + else: + return Optional(self) + if minElements: + if minElements == 1: + ret = self + makeOptionalList(optElements) + else: + ret = And([self]*minElements) + makeOptionalList(optElements) + else: + ret = makeOptionalList(optElements) + else: + if minElements == 1: + ret = self + else: + ret = And([self]*minElements) + return ret + + def __rmul__(self, other): + return self.__mul__(other) + + def __or__(self, other ): + """ + Implementation of | operator - returns :class:`MatchFirst` + """ + if isinstance( other, basestring ): + other = ParserElement._literalStringClass( other ) + if not isinstance( other, ParserElement ): + warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return None + return MatchFirst( [ self, other ] ) + + def __ror__(self, other ): + """ + Implementation of | operator when left operand is not a :class:`ParserElement` + """ + if isinstance( other, basestring ): + other = ParserElement._literalStringClass( other ) + if not isinstance( other, ParserElement ): + warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return None + return other | self + + def __xor__(self, other ): + """ + Implementation of ^ operator - returns :class:`Or` + """ + if isinstance( other, basestring ): + other = ParserElement._literalStringClass( other ) + if not isinstance( other, ParserElement ): + warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return None + return Or( [ self, other ] ) + + def __rxor__(self, other ): + """ + Implementation of ^ operator when left operand is not a :class:`ParserElement` + """ + if isinstance( other, basestring ): + other = ParserElement._literalStringClass( other ) + if not isinstance( other, ParserElement ): + warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return None + return other ^ self + + def __and__(self, other ): + """ + Implementation of & operator - returns :class:`Each` + """ + if isinstance( other, basestring ): + other = ParserElement._literalStringClass( other ) + if not isinstance( other, ParserElement ): + warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return None + return Each( [ self, other ] ) + + def __rand__(self, other ): + """ + Implementation of & operator when left operand is not a :class:`ParserElement` + """ + if isinstance( other, basestring ): + other = ParserElement._literalStringClass( other ) + if not isinstance( other, ParserElement ): + warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return None + return other & self + + def __invert__( self ): + """ + Implementation of ~ operator - returns :class:`NotAny` + """ + return NotAny( self ) + + def __call__(self, name=None): + """ + Shortcut for :class:`setResultsName`, with ``listAllMatches=False``. + + If ``name`` is given with a trailing ``'*'`` character, then ``listAllMatches`` will be + passed as ``True``. + + If ``name` is omitted, same as calling :class:`copy`. + + Example:: + + # these are equivalent + userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno") + userdata = Word(alphas)("name") + Word(nums+"-")("socsecno") + """ + if name is not None: + return self.setResultsName(name) + else: + return self.copy() + + def suppress( self ): + """ + Suppresses the output of this :class:`ParserElement`; useful to keep punctuation from + cluttering up returned output. + """ + return Suppress( self ) + + def leaveWhitespace( self ): + """ + Disables the skipping of whitespace before matching the characters in the + :class:`ParserElement`'s defined pattern. This is normally only used internally by + the pyparsing module, but may be needed in some whitespace-sensitive grammars. + """ + self.skipWhitespace = False + return self + + def setWhitespaceChars( self, chars ): + """ + Overrides the default whitespace chars + """ + self.skipWhitespace = True + self.whiteChars = chars + self.copyDefaultWhiteChars = False + return self + + def parseWithTabs( self ): + """ + Overrides default behavior to expand ``<TAB>``s to spaces before parsing the input string. + Must be called before ``parseString`` when the input grammar contains elements that + match ``<TAB>`` characters. + """ + self.keepTabs = True + return self + + def ignore( self, other ): + """ + Define expression to be ignored (e.g., comments) while doing pattern + matching; may be called repeatedly, to define multiple comment or other + ignorable patterns. + + Example:: + + patt = OneOrMore(Word(alphas)) + patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj'] + + patt.ignore(cStyleComment) + patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd'] + """ + if isinstance(other, basestring): + other = Suppress(other) + + if isinstance( other, Suppress ): + if other not in self.ignoreExprs: + self.ignoreExprs.append(other) + else: + self.ignoreExprs.append( Suppress( other.copy() ) ) + return self + + def setDebugActions( self, startAction, successAction, exceptionAction ): + """ + Enable display of debugging messages while doing pattern matching. + """ + self.debugActions = (startAction or _defaultStartDebugAction, + successAction or _defaultSuccessDebugAction, + exceptionAction or _defaultExceptionDebugAction) + self.debug = True + return self + + def setDebug( self, flag=True ): + """ + Enable display of debugging messages while doing pattern matching. + Set ``flag`` to True to enable, False to disable. + + Example:: + + wd = Word(alphas).setName("alphaword") + integer = Word(nums).setName("numword") + term = wd | integer + + # turn on debugging for wd + wd.setDebug() + + OneOrMore(term).parseString("abc 123 xyz 890") + + prints:: + + Match alphaword at loc 0(1,1) + Matched alphaword -> ['abc'] + Match alphaword at loc 3(1,4) + Exception raised:Expected alphaword (at char 4), (line:1, col:5) + Match alphaword at loc 7(1,8) + Matched alphaword -> ['xyz'] + Match alphaword at loc 11(1,12) + Exception raised:Expected alphaword (at char 12), (line:1, col:13) + Match alphaword at loc 15(1,16) + Exception raised:Expected alphaword (at char 15), (line:1, col:16) + + The output shown is that produced by the default debug actions - custom debug actions can be + specified using :class:`setDebugActions`. Prior to attempting + to match the ``wd`` expression, the debugging message ``"Match <exprname> at loc <n>(<line>,<col>)"`` + is shown. Then if the parse succeeds, a ``"Matched"`` message is shown, or an ``"Exception raised"`` + message is shown. Also note the use of :class:`setName` to assign a human-readable name to the expression, + which makes debugging and exception messages easier to understand - for instance, the default + name created for the :class:`Word` expression without calling ``setName`` is ``"W:(ABCD...)"``. + """ + if flag: + self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction ) + else: + self.debug = False + return self + + def __str__( self ): + return self.name + + def __repr__( self ): + return _ustr(self) + + def streamline( self ): + self.streamlined = True + self.strRepr = None + return self + + def checkRecursion( self, parseElementList ): + pass + + def validate( self, validateTrace=[] ): + """ + Check defined expressions for valid structure, check for infinite recursive definitions. + """ + self.checkRecursion( [] ) + + def parseFile( self, file_or_filename, parseAll=False ): + """ + Execute the parse expression on the given file or filename. + If a filename is specified (instead of a file object), + the entire file is opened, read, and closed before parsing. + """ + try: + file_contents = file_or_filename.read() + except AttributeError: + with open(file_or_filename, "r") as f: + file_contents = f.read() + try: + return self.parseString(file_contents, parseAll) + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + else: + # catch and re-raise exception from here, clears out pyparsing internal stack trace + raise exc + + def __eq__(self,other): + if isinstance(other, ParserElement): + return self is other or vars(self) == vars(other) + elif isinstance(other, basestring): + return self.matches(other) + else: + return super(ParserElement,self)==other + + def __ne__(self,other): + return not (self == other) + + def __hash__(self): + return hash(id(self)) + + def __req__(self,other): + return self == other + + def __rne__(self,other): + return not (self == other) + + def matches(self, testString, parseAll=True): + """ + Method for quick testing of a parser against a test string. Good for simple + inline microtests of sub expressions while building up larger parser. + + Parameters: + - testString - to test against this expression for a match + - parseAll - (default= ``True``) - flag to pass to :class:`parseString` when running tests + + Example:: + + expr = Word(nums) + assert expr.matches("100") + """ + try: + self.parseString(_ustr(testString), parseAll=parseAll) + return True + except ParseBaseException: + return False + + def runTests(self, tests, parseAll=True, comment='#', + fullDump=True, printResults=True, failureTests=False, postParse=None): + """ + Execute the parse expression on a series of test strings, showing each + test, the parsed results or where the parse failed. Quick and easy way to + run a parse expression against a list of sample strings. + + Parameters: + - tests - a list of separate test strings, or a multiline string of test strings + - parseAll - (default= ``True``) - flag to pass to :class:`parseString` when running tests + - comment - (default= ``'#'``) - expression for indicating embedded comments in the test + string; pass None to disable comment filtering + - fullDump - (default= ``True``) - dump results as list followed by results names in nested outline; + if False, only dump nested list + - printResults - (default= ``True``) prints test output to stdout + - failureTests - (default= ``False``) indicates if these tests are expected to fail parsing + - postParse - (default= ``None``) optional callback for successful parse results; called as + `fn(test_string, parse_results)` and returns a string to be added to the test output + + Returns: a (success, results) tuple, where success indicates that all tests succeeded + (or failed if ``failureTests`` is True), and the results contain a list of lines of each + test's output + + Example:: + + number_expr = pyparsing_common.number.copy() + + result = number_expr.runTests(''' + # unsigned integer + 100 + # negative integer + -100 + # float with scientific notation + 6.02e23 + # integer with scientific notation + 1e-12 + ''') + print("Success" if result[0] else "Failed!") + + result = number_expr.runTests(''' + # stray character + 100Z + # missing leading digit before '.' + -.100 + # too many '.' + 3.14.159 + ''', failureTests=True) + print("Success" if result[0] else "Failed!") + + prints:: + + # unsigned integer + 100 + [100] + + # negative integer + -100 + [-100] + + # float with scientific notation + 6.02e23 + [6.02e+23] + + # integer with scientific notation + 1e-12 + [1e-12] + + Success + + # stray character + 100Z + ^ + FAIL: Expected end of text (at char 3), (line:1, col:4) + + # missing leading digit before '.' + -.100 + ^ + FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1) + + # too many '.' + 3.14.159 + ^ + FAIL: Expected end of text (at char 4), (line:1, col:5) + + Success + + Each test string must be on a single line. If you want to test a string that spans multiple + lines, create a test like this:: + + expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines") + + (Note that this is a raw string literal, you must include the leading 'r'.) + """ + if isinstance(tests, basestring): + tests = list(map(str.strip, tests.rstrip().splitlines())) + if isinstance(comment, basestring): + comment = Literal(comment) + allResults = [] + comments = [] + success = True + for t in tests: + if comment is not None and comment.matches(t, False) or comments and not t: + comments.append(t) + continue + if not t: + continue + out = ['\n'.join(comments), t] + comments = [] + try: + # convert newline marks to actual newlines, and strip leading BOM if present + t = t.replace(r'\n','\n').lstrip('\ufeff') + result = self.parseString(t, parseAll=parseAll) + out.append(result.dump(full=fullDump)) + success = success and not failureTests + if postParse is not None: + try: + pp_value = postParse(t, result) + if pp_value is not None: + out.append(str(pp_value)) + except Exception as e: + out.append("{0} failed: {1}: {2}".format(postParse.__name__, type(e).__name__, e)) + except ParseBaseException as pe: + fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else "" + if '\n' in t: + out.append(line(pe.loc, t)) + out.append(' '*(col(pe.loc,t)-1) + '^' + fatal) + else: + out.append(' '*pe.loc + '^' + fatal) + out.append("FAIL: " + str(pe)) + success = success and failureTests + result = pe + except Exception as exc: + out.append("FAIL-EXCEPTION: " + str(exc)) + success = success and failureTests + result = exc + + if printResults: + if fullDump: + out.append('') + print('\n'.join(out)) + + allResults.append((t, result)) + + return success, allResults + + +class Token(ParserElement): + """Abstract :class:`ParserElement` subclass, for defining atomic + matching patterns. + """ + def __init__( self ): + super(Token,self).__init__( savelist=False ) + + +class Empty(Token): + """An empty token, will always match. + """ + def __init__( self ): + super(Empty,self).__init__() + self.name = "Empty" + self.mayReturnEmpty = True + self.mayIndexError = False + + +class NoMatch(Token): + """A token that will never match. + """ + def __init__( self ): + super(NoMatch,self).__init__() + self.name = "NoMatch" + self.mayReturnEmpty = True + self.mayIndexError = False + self.errmsg = "Unmatchable token" + + def parseImpl( self, instring, loc, doActions=True ): + raise ParseException(instring, loc, self.errmsg, self) + + +class Literal(Token): + """Token to exactly match a specified string. + + Example:: + + Literal('blah').parseString('blah') # -> ['blah'] + Literal('blah').parseString('blahfooblah') # -> ['blah'] + Literal('blah').parseString('bla') # -> Exception: Expected "blah" + + For case-insensitive matching, use :class:`CaselessLiteral`. + + For keyword matching (force word break before and after the matched string), + use :class:`Keyword` or :class:`CaselessKeyword`. + """ + def __init__( self, matchString ): + super(Literal,self).__init__() + self.match = matchString + self.matchLen = len(matchString) + try: + self.firstMatchChar = matchString[0] + except IndexError: + warnings.warn("null string passed to Literal; use Empty() instead", + SyntaxWarning, stacklevel=2) + self.__class__ = Empty + self.name = '"%s"' % _ustr(self.match) + self.errmsg = "Expected " + self.name + self.mayReturnEmpty = False + self.mayIndexError = False + + # Performance tuning: this routine gets called a *lot* + # if this is a single character match string and the first character matches, + # short-circuit as quickly as possible, and avoid calling startswith + #~ @profile + def parseImpl( self, instring, loc, doActions=True ): + if (instring[loc] == self.firstMatchChar and + (self.matchLen==1 or instring.startswith(self.match,loc)) ): + return loc+self.matchLen, self.match + raise ParseException(instring, loc, self.errmsg, self) +_L = Literal +ParserElement._literalStringClass = Literal + +class Keyword(Token): + """Token to exactly match a specified string as a keyword, that is, + it must be immediately followed by a non-keyword character. Compare + with :class:`Literal`: + + - ``Literal("if")`` will match the leading ``'if'`` in + ``'ifAndOnlyIf'``. + - ``Keyword("if")`` will not; it will only match the leading + ``'if'`` in ``'if x=1'``, or ``'if(y==2)'`` + + Accepts two optional constructor arguments in addition to the + keyword string: + + - ``identChars`` is a string of characters that would be valid + identifier characters, defaulting to all alphanumerics + "_" and + "$" + - ``caseless`` allows case-insensitive matching, default is ``False``. + + Example:: + + Keyword("start").parseString("start") # -> ['start'] + Keyword("start").parseString("starting") # -> Exception + + For case-insensitive matching, use :class:`CaselessKeyword`. + """ + DEFAULT_KEYWORD_CHARS = alphanums+"_$" + + def __init__( self, matchString, identChars=None, caseless=False ): + super(Keyword,self).__init__() + if identChars is None: + identChars = Keyword.DEFAULT_KEYWORD_CHARS + self.match = matchString + self.matchLen = len(matchString) + try: + self.firstMatchChar = matchString[0] + except IndexError: + warnings.warn("null string passed to Keyword; use Empty() instead", + SyntaxWarning, stacklevel=2) + self.name = '"%s"' % self.match + self.errmsg = "Expected " + self.name + self.mayReturnEmpty = False + self.mayIndexError = False + self.caseless = caseless + if caseless: + self.caselessmatch = matchString.upper() + identChars = identChars.upper() + self.identChars = set(identChars) + + def parseImpl( self, instring, loc, doActions=True ): + if self.caseless: + if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and + (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and + (loc == 0 or instring[loc-1].upper() not in self.identChars) ): + return loc+self.matchLen, self.match + else: + if (instring[loc] == self.firstMatchChar and + (self.matchLen==1 or instring.startswith(self.match,loc)) and + (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and + (loc == 0 or instring[loc-1] not in self.identChars) ): + return loc+self.matchLen, self.match + raise ParseException(instring, loc, self.errmsg, self) + + def copy(self): + c = super(Keyword,self).copy() + c.identChars = Keyword.DEFAULT_KEYWORD_CHARS + return c + + @staticmethod + def setDefaultKeywordChars( chars ): + """Overrides the default Keyword chars + """ + Keyword.DEFAULT_KEYWORD_CHARS = chars + +class CaselessLiteral(Literal): + """Token to match a specified string, ignoring case of letters. + Note: the matched results will always be in the case of the given + match string, NOT the case of the input text. + + Example:: + + OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD'] + + (Contrast with example for :class:`CaselessKeyword`.) + """ + def __init__( self, matchString ): + super(CaselessLiteral,self).__init__( matchString.upper() ) + # Preserve the defining literal. + self.returnString = matchString + self.name = "'%s'" % self.returnString + self.errmsg = "Expected " + self.name + + def parseImpl( self, instring, loc, doActions=True ): + if instring[ loc:loc+self.matchLen ].upper() == self.match: + return loc+self.matchLen, self.returnString + raise ParseException(instring, loc, self.errmsg, self) + +class CaselessKeyword(Keyword): + """ + Caseless version of :class:`Keyword`. + + Example:: + + OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD'] + + (Contrast with example for :class:`CaselessLiteral`.) + """ + def __init__( self, matchString, identChars=None ): + super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True ) + +class CloseMatch(Token): + """A variation on :class:`Literal` which matches "close" matches, + that is, strings with at most 'n' mismatching characters. + :class:`CloseMatch` takes parameters: + + - ``match_string`` - string to be matched + - ``maxMismatches`` - (``default=1``) maximum number of + mismatches allowed to count as a match + + The results from a successful parse will contain the matched text + from the input string and the following named results: + + - ``mismatches`` - a list of the positions within the + match_string where mismatches were found + - ``original`` - the original match_string used to compare + against the input string + + If ``mismatches`` is an empty list, then the match was an exact + match. + + Example:: + + patt = CloseMatch("ATCATCGAATGGA") + patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']}) + patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1) + + # exact match + patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']}) + + # close match allowing up to 2 mismatches + patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2) + patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']}) + """ + def __init__(self, match_string, maxMismatches=1): + super(CloseMatch,self).__init__() + self.name = match_string + self.match_string = match_string + self.maxMismatches = maxMismatches + self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches) + self.mayIndexError = False + self.mayReturnEmpty = False + + def parseImpl( self, instring, loc, doActions=True ): + start = loc + instrlen = len(instring) + maxloc = start + len(self.match_string) + + if maxloc <= instrlen: + match_string = self.match_string + match_stringloc = 0 + mismatches = [] + maxMismatches = self.maxMismatches + + for match_stringloc,s_m in enumerate(zip(instring[loc:maxloc], self.match_string)): + src,mat = s_m + if src != mat: + mismatches.append(match_stringloc) + if len(mismatches) > maxMismatches: + break + else: + loc = match_stringloc + 1 + results = ParseResults([instring[start:loc]]) + results['original'] = self.match_string + results['mismatches'] = mismatches + return loc, results + + raise ParseException(instring, loc, self.errmsg, self) + + +class Word(Token): + """Token for matching words composed of allowed character sets. + Defined with string containing all allowed initial characters, an + optional string containing allowed body characters (if omitted, + defaults to the initial character set), and an optional minimum, + maximum, and/or exact length. The default value for ``min`` is + 1 (a minimum value < 1 is not valid); the default values for + ``max`` and ``exact`` are 0, meaning no maximum or exact + length restriction. An optional ``excludeChars`` parameter can + list characters that might be found in the input ``bodyChars`` + string; useful to define a word of all printables except for one or + two characters, for instance. + + :class:`srange` is useful for defining custom character set strings + for defining ``Word`` expressions, using range notation from + regular expression character sets. + + A common mistake is to use :class:`Word` to match a specific literal + string, as in ``Word("Address")``. Remember that :class:`Word` + uses the string argument to define *sets* of matchable characters. + This expression would match "Add", "AAA", "dAred", or any other word + made up of the characters 'A', 'd', 'r', 'e', and 's'. To match an + exact literal string, use :class:`Literal` or :class:`Keyword`. + + pyparsing includes helper strings for building Words: + + - :class:`alphas` + - :class:`nums` + - :class:`alphanums` + - :class:`hexnums` + - :class:`alphas8bit` (alphabetic characters in ASCII range 128-255 + - accented, tilded, umlauted, etc.) + - :class:`punc8bit` (non-alphabetic characters in ASCII range + 128-255 - currency, symbols, superscripts, diacriticals, etc.) + - :class:`printables` (any non-whitespace character) + + Example:: + + # a word composed of digits + integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9")) + + # a word with a leading capital, and zero or more lowercase + capital_word = Word(alphas.upper(), alphas.lower()) + + # hostnames are alphanumeric, with leading alpha, and '-' + hostname = Word(alphas, alphanums+'-') + + # roman numeral (not a strict parser, accepts invalid mix of characters) + roman = Word("IVXLCDM") + + # any string of non-whitespace characters, except for ',' + csv_value = Word(printables, excludeChars=",") + """ + def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ): + super(Word,self).__init__() + if excludeChars: + initChars = ''.join(c for c in initChars if c not in excludeChars) + if bodyChars: + bodyChars = ''.join(c for c in bodyChars if c not in excludeChars) + self.initCharsOrig = initChars + self.initChars = set(initChars) + if bodyChars : + self.bodyCharsOrig = bodyChars + self.bodyChars = set(bodyChars) + else: + self.bodyCharsOrig = initChars + self.bodyChars = set(initChars) + + self.maxSpecified = max > 0 + + if min < 1: + raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted") + + self.minLen = min + + if max > 0: + self.maxLen = max + else: + self.maxLen = _MAX_INT + + if exact > 0: + self.maxLen = exact + self.minLen = exact + + self.name = _ustr(self) + self.errmsg = "Expected " + self.name + self.mayIndexError = False + self.asKeyword = asKeyword + + if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0): + if self.bodyCharsOrig == self.initCharsOrig: + self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig) + elif len(self.initCharsOrig) == 1: + self.reString = "%s[%s]*" % \ + (re.escape(self.initCharsOrig), + _escapeRegexRangeChars(self.bodyCharsOrig),) + else: + self.reString = "[%s][%s]*" % \ + (_escapeRegexRangeChars(self.initCharsOrig), + _escapeRegexRangeChars(self.bodyCharsOrig),) + if self.asKeyword: + self.reString = r"\b"+self.reString+r"\b" + try: + self.re = re.compile( self.reString ) + except Exception: + self.re = None + + def parseImpl( self, instring, loc, doActions=True ): + if self.re: + result = self.re.match(instring,loc) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + return loc, result.group() + + if not(instring[ loc ] in self.initChars): + raise ParseException(instring, loc, self.errmsg, self) + + start = loc + loc += 1 + instrlen = len(instring) + bodychars = self.bodyChars + maxloc = start + self.maxLen + maxloc = min( maxloc, instrlen ) + while loc < maxloc and instring[loc] in bodychars: + loc += 1 + + throwException = False + if loc - start < self.minLen: + throwException = True + if self.maxSpecified and loc < instrlen and instring[loc] in bodychars: + throwException = True + if self.asKeyword: + if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars): + throwException = True + + if throwException: + raise ParseException(instring, loc, self.errmsg, self) + + return loc, instring[start:loc] + + def __str__( self ): + try: + return super(Word,self).__str__() + except Exception: + pass + + + if self.strRepr is None: + + def charsAsStr(s): + if len(s)>4: + return s[:4]+"..." + else: + return s + + if ( self.initCharsOrig != self.bodyCharsOrig ): + self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) ) + else: + self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig) + + return self.strRepr + + +class Char(Word): + """A short-cut class for defining ``Word(characters, exact=1)``, + when defining a match of any single character in a string of + characters. + """ + def __init__(self, charset): + super(Char, self).__init__(charset, exact=1) + self.reString = "[%s]" % _escapeRegexRangeChars(self.initCharsOrig) + self.re = re.compile( self.reString ) + + +class Regex(Token): + r"""Token for matching strings that match a given regular + expression. Defined with string specifying the regular expression in + a form recognized by the stdlib Python `re module <https://docs.python.org/3/library/re.html>`_. + If the given regex contains named groups (defined using ``(?P<name>...)``), + these will be preserved as named parse results. + + Example:: + + realnum = Regex(r"[+-]?\d+\.\d*") + date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)') + # ref: https://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression + roman = Regex(r"M{0,4}(CM|CD|D?{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})") + """ + compiledREtype = type(re.compile("[A-Z]")) + def __init__( self, pattern, flags=0, asGroupList=False, asMatch=False): + """The parameters ``pattern`` and ``flags`` are passed + to the ``re.compile()`` function as-is. See the Python + `re module <https://docs.python.org/3/library/re.html>`_ module for an + explanation of the acceptable patterns and flags. + """ + super(Regex,self).__init__() + + if isinstance(pattern, basestring): + if not pattern: + warnings.warn("null string passed to Regex; use Empty() instead", + SyntaxWarning, stacklevel=2) + + self.pattern = pattern + self.flags = flags + + try: + self.re = re.compile(self.pattern, self.flags) + self.reString = self.pattern + except sre_constants.error: + warnings.warn("invalid pattern (%s) passed to Regex" % pattern, + SyntaxWarning, stacklevel=2) + raise + + elif isinstance(pattern, Regex.compiledREtype): + self.re = pattern + self.pattern = \ + self.reString = str(pattern) + self.flags = flags + + else: + raise ValueError("Regex may only be constructed with a string or a compiled RE object") + + self.name = _ustr(self) + self.errmsg = "Expected " + self.name + self.mayIndexError = False + self.mayReturnEmpty = True + self.asGroupList = asGroupList + self.asMatch = asMatch + + def parseImpl( self, instring, loc, doActions=True ): + result = self.re.match(instring,loc) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + if self.asMatch: + ret = result + elif self.asGroupList: + ret = result.groups() + else: + ret = ParseResults(result.group()) + d = result.groupdict() + if d: + for k, v in d.items(): + ret[k] = v + return loc,ret + + def __str__( self ): + try: + return super(Regex,self).__str__() + except Exception: + pass + + if self.strRepr is None: + self.strRepr = "Re:(%s)" % repr(self.pattern) + + return self.strRepr + + def sub(self, repl): + """ + Return Regex with an attached parse action to transform the parsed + result as if called using `re.sub(expr, repl, string) <https://docs.python.org/3/library/re.html#re.sub>`_. + + Example:: + + make_html = Regex(r"(\w+):(.*?):").sub(r"<\1>\2</\1>") + print(make_html.transformString("h1:main title:")) + # prints "<h1>main title</h1>" + """ + if self.asGroupList: + warnings.warn("cannot use sub() with Regex(asGroupList=True)", + SyntaxWarning, stacklevel=2) + raise SyntaxError() + + if self.asMatch and callable(repl): + warnings.warn("cannot use sub() with a callable with Regex(asMatch=True)", + SyntaxWarning, stacklevel=2) + raise SyntaxError() + + if self.asMatch: + def pa(tokens): + return tokens[0].expand(repl) + else: + def pa(tokens): + return self.re.sub(repl, tokens[0]) + return self.addParseAction(pa) + +class QuotedString(Token): + r""" + Token for matching strings that are delimited by quoting characters. + + Defined with the following parameters: + + - quoteChar - string of one or more characters defining the + quote delimiting string + - escChar - character to escape quotes, typically backslash + (default= ``None`` ) + - escQuote - special quote sequence to escape an embedded quote + string (such as SQL's ``""`` to escape an embedded ``"``) + (default= ``None`` ) + - multiline - boolean indicating whether quotes can span + multiple lines (default= ``False`` ) + - unquoteResults - boolean indicating whether the matched text + should be unquoted (default= ``True`` ) + - endQuoteChar - string of one or more characters defining the + end of the quote delimited string (default= ``None`` => same as + quoteChar) + - convertWhitespaceEscapes - convert escaped whitespace + (``'\t'``, ``'\n'``, etc.) to actual whitespace + (default= ``True`` ) + + Example:: + + qs = QuotedString('"') + print(qs.searchString('lsjdf "This is the quote" sldjf')) + complex_qs = QuotedString('{{', endQuoteChar='}}') + print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf')) + sql_qs = QuotedString('"', escQuote='""') + print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf')) + + prints:: + + [['This is the quote']] + [['This is the "quote"']] + [['This is the quote with "embedded" quotes']] + """ + def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True): + super(QuotedString,self).__init__() + + # remove white space from quote chars - wont work anyway + quoteChar = quoteChar.strip() + if not quoteChar: + warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2) + raise SyntaxError() + + if endQuoteChar is None: + endQuoteChar = quoteChar + else: + endQuoteChar = endQuoteChar.strip() + if not endQuoteChar: + warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2) + raise SyntaxError() + + self.quoteChar = quoteChar + self.quoteCharLen = len(quoteChar) + self.firstQuoteChar = quoteChar[0] + self.endQuoteChar = endQuoteChar + self.endQuoteCharLen = len(endQuoteChar) + self.escChar = escChar + self.escQuote = escQuote + self.unquoteResults = unquoteResults + self.convertWhitespaceEscapes = convertWhitespaceEscapes + + if multiline: + self.flags = re.MULTILINE | re.DOTALL + self.pattern = r'%s(?:[^%s%s]' % \ + ( re.escape(self.quoteChar), + _escapeRegexRangeChars(self.endQuoteChar[0]), + (escChar is not None and _escapeRegexRangeChars(escChar) or '') ) + else: + self.flags = 0 + self.pattern = r'%s(?:[^%s\n\r%s]' % \ + ( re.escape(self.quoteChar), + _escapeRegexRangeChars(self.endQuoteChar[0]), + (escChar is not None and _escapeRegexRangeChars(escChar) or '') ) + if len(self.endQuoteChar) > 1: + self.pattern += ( + '|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]), + _escapeRegexRangeChars(self.endQuoteChar[i])) + for i in range(len(self.endQuoteChar)-1,0,-1)) + ')' + ) + if escQuote: + self.pattern += (r'|(?:%s)' % re.escape(escQuote)) + if escChar: + self.pattern += (r'|(?:%s.)' % re.escape(escChar)) + self.escCharReplacePattern = re.escape(self.escChar)+"(.)" + self.pattern += (r')*%s' % re.escape(self.endQuoteChar)) + + try: + self.re = re.compile(self.pattern, self.flags) + self.reString = self.pattern + except sre_constants.error: + warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern, + SyntaxWarning, stacklevel=2) + raise + + self.name = _ustr(self) + self.errmsg = "Expected " + self.name + self.mayIndexError = False + self.mayReturnEmpty = True + + def parseImpl( self, instring, loc, doActions=True ): + result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + ret = result.group() + + if self.unquoteResults: + + # strip off quotes + ret = ret[self.quoteCharLen:-self.endQuoteCharLen] + + if isinstance(ret,basestring): + # replace escaped whitespace + if '\\' in ret and self.convertWhitespaceEscapes: + ws_map = { + r'\t' : '\t', + r'\n' : '\n', + r'\f' : '\f', + r'\r' : '\r', + } + for wslit,wschar in ws_map.items(): + ret = ret.replace(wslit, wschar) + + # replace escaped characters + if self.escChar: + ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret) + + # replace escaped quotes + if self.escQuote: + ret = ret.replace(self.escQuote, self.endQuoteChar) + + return loc, ret + + def __str__( self ): + try: + return super(QuotedString,self).__str__() + except Exception: + pass + + if self.strRepr is None: + self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar) + + return self.strRepr + + +class CharsNotIn(Token): + """Token for matching words composed of characters *not* in a given + set (will include whitespace in matched characters if not listed in + the provided exclusion set - see example). Defined with string + containing all disallowed characters, and an optional minimum, + maximum, and/or exact length. The default value for ``min`` is + 1 (a minimum value < 1 is not valid); the default values for + ``max`` and ``exact`` are 0, meaning no maximum or exact + length restriction. + + Example:: + + # define a comma-separated-value as anything that is not a ',' + csv_value = CharsNotIn(',') + print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213")) + + prints:: + + ['dkls', 'lsdkjf', 's12 34', '@!#', '213'] + """ + def __init__( self, notChars, min=1, max=0, exact=0 ): + super(CharsNotIn,self).__init__() + self.skipWhitespace = False + self.notChars = notChars + + if min < 1: + raise ValueError( + "cannot specify a minimum length < 1; use " + + "Optional(CharsNotIn()) if zero-length char group is permitted") + + self.minLen = min + + if max > 0: + self.maxLen = max + else: + self.maxLen = _MAX_INT + + if exact > 0: + self.maxLen = exact + self.minLen = exact + + self.name = _ustr(self) + self.errmsg = "Expected " + self.name + self.mayReturnEmpty = ( self.minLen == 0 ) + self.mayIndexError = False + + def parseImpl( self, instring, loc, doActions=True ): + if instring[loc] in self.notChars: + raise ParseException(instring, loc, self.errmsg, self) + + start = loc + loc += 1 + notchars = self.notChars + maxlen = min( start+self.maxLen, len(instring) ) + while loc < maxlen and \ + (instring[loc] not in notchars): + loc += 1 + + if loc - start < self.minLen: + raise ParseException(instring, loc, self.errmsg, self) + + return loc, instring[start:loc] + + def __str__( self ): + try: + return super(CharsNotIn, self).__str__() + except Exception: + pass + + if self.strRepr is None: + if len(self.notChars) > 4: + self.strRepr = "!W:(%s...)" % self.notChars[:4] + else: + self.strRepr = "!W:(%s)" % self.notChars + + return self.strRepr + +class White(Token): + """Special matching class for matching whitespace. Normally, + whitespace is ignored by pyparsing grammars. This class is included + when some whitespace structures are significant. Define with + a string containing the whitespace characters to be matched; default + is ``" \\t\\r\\n"``. Also takes optional ``min``, + ``max``, and ``exact`` arguments, as defined for the + :class:`Word` class. + """ + whiteStrs = { + ' ' : '<SP>', + '\t': '<TAB>', + '\n': '<LF>', + '\r': '<CR>', + '\f': '<FF>', + 'u\00A0': '<NBSP>', + 'u\1680': '<OGHAM_SPACE_MARK>', + 'u\180E': '<MONGOLIAN_VOWEL_SEPARATOR>', + 'u\2000': '<EN_QUAD>', + 'u\2001': '<EM_QUAD>', + 'u\2002': '<EN_SPACE>', + 'u\2003': '<EM_SPACE>', + 'u\2004': '<THREE-PER-EM_SPACE>', + 'u\2005': '<FOUR-PER-EM_SPACE>', + 'u\2006': '<SIX-PER-EM_SPACE>', + 'u\2007': '<FIGURE_SPACE>', + 'u\2008': '<PUNCTUATION_SPACE>', + 'u\2009': '<THIN_SPACE>', + 'u\200A': '<HAIR_SPACE>', + 'u\200B': '<ZERO_WIDTH_SPACE>', + 'u\202F': '<NNBSP>', + 'u\205F': '<MMSP>', + 'u\3000': '<IDEOGRAPHIC_SPACE>', + } + def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0): + super(White,self).__init__() + self.matchWhite = ws + self.setWhitespaceChars( "".join(c for c in self.whiteChars if c not in self.matchWhite) ) + #~ self.leaveWhitespace() + self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite)) + self.mayReturnEmpty = True + self.errmsg = "Expected " + self.name + + self.minLen = min + + if max > 0: + self.maxLen = max + else: + self.maxLen = _MAX_INT + + if exact > 0: + self.maxLen = exact + self.minLen = exact + + def parseImpl( self, instring, loc, doActions=True ): + if not(instring[ loc ] in self.matchWhite): + raise ParseException(instring, loc, self.errmsg, self) + start = loc + loc += 1 + maxloc = start + self.maxLen + maxloc = min( maxloc, len(instring) ) + while loc < maxloc and instring[loc] in self.matchWhite: + loc += 1 + + if loc - start < self.minLen: + raise ParseException(instring, loc, self.errmsg, self) + + return loc, instring[start:loc] + + +class _PositionToken(Token): + def __init__( self ): + super(_PositionToken,self).__init__() + self.name=self.__class__.__name__ + self.mayReturnEmpty = True + self.mayIndexError = False + +class GoToColumn(_PositionToken): + """Token to advance to a specific column of input text; useful for + tabular report scraping. + """ + def __init__( self, colno ): + super(GoToColumn,self).__init__() + self.col = colno + + def preParse( self, instring, loc ): + if col(loc,instring) != self.col: + instrlen = len(instring) + if self.ignoreExprs: + loc = self._skipIgnorables( instring, loc ) + while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col : + loc += 1 + return loc + + def parseImpl( self, instring, loc, doActions=True ): + thiscol = col( loc, instring ) + if thiscol > self.col: + raise ParseException( instring, loc, "Text not in expected column", self ) + newloc = loc + self.col - thiscol + ret = instring[ loc: newloc ] + return newloc, ret + + +class LineStart(_PositionToken): + """Matches if current position is at the beginning of a line within + the parse string + + Example:: + + test = '''\ + AAA this line + AAA and this line + AAA but not this one + B AAA and definitely not this one + ''' + + for t in (LineStart() + 'AAA' + restOfLine).searchString(test): + print(t) + + prints:: + + ['AAA', ' this line'] + ['AAA', ' and this line'] + + """ + def __init__( self ): + super(LineStart,self).__init__() + self.errmsg = "Expected start of line" + + def parseImpl( self, instring, loc, doActions=True ): + if col(loc, instring) == 1: + return loc, [] + raise ParseException(instring, loc, self.errmsg, self) + +class LineEnd(_PositionToken): + """Matches if current position is at the end of a line within the + parse string + """ + def __init__( self ): + super(LineEnd,self).__init__() + self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") ) + self.errmsg = "Expected end of line" + + def parseImpl( self, instring, loc, doActions=True ): + if loc<len(instring): + if instring[loc] == "\n": + return loc+1, "\n" + else: + raise ParseException(instring, loc, self.errmsg, self) + elif loc == len(instring): + return loc+1, [] + else: + raise ParseException(instring, loc, self.errmsg, self) + +class StringStart(_PositionToken): + """Matches if current position is at the beginning of the parse + string + """ + def __init__( self ): + super(StringStart,self).__init__() + self.errmsg = "Expected start of text" + + def parseImpl( self, instring, loc, doActions=True ): + if loc != 0: + # see if entire string up to here is just whitespace and ignoreables + if loc != self.preParse( instring, 0 ): + raise ParseException(instring, loc, self.errmsg, self) + return loc, [] + +class StringEnd(_PositionToken): + """Matches if current position is at the end of the parse string + """ + def __init__( self ): + super(StringEnd,self).__init__() + self.errmsg = "Expected end of text" + + def parseImpl( self, instring, loc, doActions=True ): + if loc < len(instring): + raise ParseException(instring, loc, self.errmsg, self) + elif loc == len(instring): + return loc+1, [] + elif loc > len(instring): + return loc, [] + else: + raise ParseException(instring, loc, self.errmsg, self) + +class WordStart(_PositionToken): + """Matches if the current position is at the beginning of a Word, + and is not preceded by any character in a given set of + ``wordChars`` (default= ``printables``). To emulate the + ``\b`` behavior of regular expressions, use + ``WordStart(alphanums)``. ``WordStart`` will also match at + the beginning of the string being parsed, or at the beginning of + a line. + """ + def __init__(self, wordChars = printables): + super(WordStart,self).__init__() + self.wordChars = set(wordChars) + self.errmsg = "Not at the start of a word" + + def parseImpl(self, instring, loc, doActions=True ): + if loc != 0: + if (instring[loc-1] in self.wordChars or + instring[loc] not in self.wordChars): + raise ParseException(instring, loc, self.errmsg, self) + return loc, [] + +class WordEnd(_PositionToken): + """Matches if the current position is at the end of a Word, and is + not followed by any character in a given set of ``wordChars`` + (default= ``printables``). To emulate the ``\b`` behavior of + regular expressions, use ``WordEnd(alphanums)``. ``WordEnd`` + will also match at the end of the string being parsed, or at the end + of a line. + """ + def __init__(self, wordChars = printables): + super(WordEnd,self).__init__() + self.wordChars = set(wordChars) + self.skipWhitespace = False + self.errmsg = "Not at the end of a word" + + def parseImpl(self, instring, loc, doActions=True ): + instrlen = len(instring) + if instrlen>0 and loc<instrlen: + if (instring[loc] in self.wordChars or + instring[loc-1] not in self.wordChars): + raise ParseException(instring, loc, self.errmsg, self) + return loc, [] + + +class ParseExpression(ParserElement): + """Abstract subclass of ParserElement, for combining and + post-processing parsed tokens. + """ + def __init__( self, exprs, savelist = False ): + super(ParseExpression,self).__init__(savelist) + if isinstance( exprs, _generatorType ): + exprs = list(exprs) + + if isinstance( exprs, basestring ): + self.exprs = [ ParserElement._literalStringClass( exprs ) ] + elif isinstance( exprs, Iterable ): + exprs = list(exprs) + # if sequence of strings provided, wrap with Literal + if all(isinstance(expr, basestring) for expr in exprs): + exprs = map(ParserElement._literalStringClass, exprs) + self.exprs = list(exprs) + else: + try: + self.exprs = list( exprs ) + except TypeError: + self.exprs = [ exprs ] + self.callPreparse = False + + def __getitem__( self, i ): + return self.exprs[i] + + def append( self, other ): + self.exprs.append( other ) + self.strRepr = None + return self + + def leaveWhitespace( self ): + """Extends ``leaveWhitespace`` defined in base class, and also invokes ``leaveWhitespace`` on + all contained expressions.""" + self.skipWhitespace = False + self.exprs = [ e.copy() for e in self.exprs ] + for e in self.exprs: + e.leaveWhitespace() + return self + + def ignore( self, other ): + if isinstance( other, Suppress ): + if other not in self.ignoreExprs: + super( ParseExpression, self).ignore( other ) + for e in self.exprs: + e.ignore( self.ignoreExprs[-1] ) + else: + super( ParseExpression, self).ignore( other ) + for e in self.exprs: + e.ignore( self.ignoreExprs[-1] ) + return self + + def __str__( self ): + try: + return super(ParseExpression,self).__str__() + except Exception: + pass + + if self.strRepr is None: + self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) ) + return self.strRepr + + def streamline( self ): + super(ParseExpression,self).streamline() + + for e in self.exprs: + e.streamline() + + # collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d ) + # but only if there are no parse actions or resultsNames on the nested And's + # (likewise for Or's and MatchFirst's) + if ( len(self.exprs) == 2 ): + other = self.exprs[0] + if ( isinstance( other, self.__class__ ) and + not(other.parseAction) and + other.resultsName is None and + not other.debug ): + self.exprs = other.exprs[:] + [ self.exprs[1] ] + self.strRepr = None + self.mayReturnEmpty |= other.mayReturnEmpty + self.mayIndexError |= other.mayIndexError + + other = self.exprs[-1] + if ( isinstance( other, self.__class__ ) and + not(other.parseAction) and + other.resultsName is None and + not other.debug ): + self.exprs = self.exprs[:-1] + other.exprs[:] + self.strRepr = None + self.mayReturnEmpty |= other.mayReturnEmpty + self.mayIndexError |= other.mayIndexError + + self.errmsg = "Expected " + _ustr(self) + + return self + + def setResultsName( self, name, listAllMatches=False ): + ret = super(ParseExpression,self).setResultsName(name,listAllMatches) + return ret + + def validate( self, validateTrace=[] ): + tmp = validateTrace[:]+[self] + for e in self.exprs: + e.validate(tmp) + self.checkRecursion( [] ) + + def copy(self): + ret = super(ParseExpression,self).copy() + ret.exprs = [e.copy() for e in self.exprs] + return ret + +class And(ParseExpression): + """ + Requires all given :class:`ParseExpression` s to be found in the given order. + Expressions may be separated by whitespace. + May be constructed using the ``'+'`` operator. + May also be constructed using the ``'-'`` operator, which will + suppress backtracking. + + Example:: + + integer = Word(nums) + name_expr = OneOrMore(Word(alphas)) + + expr = And([integer("id"),name_expr("name"),integer("age")]) + # more easily written as: + expr = integer("id") + name_expr("name") + integer("age") + """ + + class _ErrorStop(Empty): + def __init__(self, *args, **kwargs): + super(And._ErrorStop,self).__init__(*args, **kwargs) + self.name = '-' + self.leaveWhitespace() + + def __init__( self, exprs, savelist = True ): + super(And,self).__init__(exprs, savelist) + self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) + self.setWhitespaceChars( self.exprs[0].whiteChars ) + self.skipWhitespace = self.exprs[0].skipWhitespace + self.callPreparse = True + + def streamline(self): + super(And, self).streamline() + self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) + return self + + def parseImpl( self, instring, loc, doActions=True ): + # pass False as last arg to _parse for first element, since we already + # pre-parsed the string as part of our And pre-parsing + loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False ) + errorStop = False + for e in self.exprs[1:]: + if isinstance(e, And._ErrorStop): + errorStop = True + continue + if errorStop: + try: + loc, exprtokens = e._parse( instring, loc, doActions ) + except ParseSyntaxException: + raise + except ParseBaseException as pe: + pe.__traceback__ = None + raise ParseSyntaxException._from_exception(pe) + except IndexError: + raise ParseSyntaxException(instring, len(instring), self.errmsg, self) + else: + loc, exprtokens = e._parse( instring, loc, doActions ) + if exprtokens or exprtokens.haskeys(): + resultlist += exprtokens + return loc, resultlist + + def __iadd__(self, other ): + if isinstance( other, basestring ): + other = ParserElement._literalStringClass( other ) + return self.append( other ) #And( [ self, other ] ) + + def checkRecursion( self, parseElementList ): + subRecCheckList = parseElementList[:] + [ self ] + for e in self.exprs: + e.checkRecursion( subRecCheckList ) + if not e.mayReturnEmpty: + break + + def __str__( self ): + if hasattr(self,"name"): + return self.name + + if self.strRepr is None: + self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}" + + return self.strRepr + + +class Or(ParseExpression): + """Requires that at least one :class:`ParseExpression` is found. If + two expressions match, the expression that matches the longest + string will be used. May be constructed using the ``'^'`` + operator. + + Example:: + + # construct Or using '^' operator + + number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums)) + print(number.searchString("123 3.1416 789")) + + prints:: + + [['123'], ['3.1416'], ['789']] + """ + def __init__( self, exprs, savelist = False ): + super(Or,self).__init__(exprs, savelist) + if self.exprs: + self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) + else: + self.mayReturnEmpty = True + + def streamline(self): + super(Or, self).streamline() + self.saveAsList = any(e.saveAsList for e in self.exprs) + return self + + def parseImpl( self, instring, loc, doActions=True ): + maxExcLoc = -1 + maxException = None + matches = [] + for e in self.exprs: + try: + loc2 = e.tryParse( instring, loc ) + except ParseException as err: + err.__traceback__ = None + if err.loc > maxExcLoc: + maxException = err + maxExcLoc = err.loc + except IndexError: + if len(instring) > maxExcLoc: + maxException = ParseException(instring,len(instring),e.errmsg,self) + maxExcLoc = len(instring) + else: + # save match among all matches, to retry longest to shortest + matches.append((loc2, e)) + + if matches: + matches.sort(key=lambda x: -x[0]) + for _,e in matches: + try: + return e._parse( instring, loc, doActions ) + except ParseException as err: + err.__traceback__ = None + if err.loc > maxExcLoc: + maxException = err + maxExcLoc = err.loc + + if maxException is not None: + maxException.msg = self.errmsg + raise maxException + else: + raise ParseException(instring, loc, "no defined alternatives to match", self) + + + def __ixor__(self, other ): + if isinstance( other, basestring ): + other = ParserElement._literalStringClass( other ) + return self.append( other ) #Or( [ self, other ] ) + + def __str__( self ): + if hasattr(self,"name"): + return self.name + + if self.strRepr is None: + self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}" + + return self.strRepr + + def checkRecursion( self, parseElementList ): + subRecCheckList = parseElementList[:] + [ self ] + for e in self.exprs: + e.checkRecursion( subRecCheckList ) + + +class MatchFirst(ParseExpression): + """Requires that at least one :class:`ParseExpression` is found. If + two expressions match, the first one listed is the one that will + match. May be constructed using the ``'|'`` operator. + + Example:: + + # construct MatchFirst using '|' operator + + # watch the order of expressions to match + number = Word(nums) | Combine(Word(nums) + '.' + Word(nums)) + print(number.searchString("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']] + + # put more selective expression first + number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums) + print(number.searchString("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']] + """ + def __init__( self, exprs, savelist = False ): + super(MatchFirst,self).__init__(exprs, savelist) + if self.exprs: + self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) + # self.saveAsList = any(e.saveAsList for e in self.exprs) + else: + self.mayReturnEmpty = True + + def streamline(self): + super(MatchFirst, self).streamline() + self.saveAsList = any(e.saveAsList for e in self.exprs) + return self + + def parseImpl( self, instring, loc, doActions=True ): + maxExcLoc = -1 + maxException = None + for e in self.exprs: + try: + ret = e._parse( instring, loc, doActions ) + return ret + except ParseException as err: + if err.loc > maxExcLoc: + maxException = err + maxExcLoc = err.loc + except IndexError: + if len(instring) > maxExcLoc: + maxException = ParseException(instring,len(instring),e.errmsg,self) + maxExcLoc = len(instring) + + # only got here if no expression matched, raise exception for match that made it the furthest + else: + if maxException is not None: + maxException.msg = self.errmsg + raise maxException + else: + raise ParseException(instring, loc, "no defined alternatives to match", self) + + def __ior__(self, other ): + if isinstance( other, basestring ): + other = ParserElement._literalStringClass( other ) + return self.append( other ) #MatchFirst( [ self, other ] ) + + def __str__( self ): + if hasattr(self,"name"): + return self.name + + if self.strRepr is None: + self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}" + + return self.strRepr + + def checkRecursion( self, parseElementList ): + subRecCheckList = parseElementList[:] + [ self ] + for e in self.exprs: + e.checkRecursion( subRecCheckList ) + + +class Each(ParseExpression): + """Requires all given :class:`ParseExpression` s to be found, but in + any order. Expressions may be separated by whitespace. + + May be constructed using the ``'&'`` operator. + + Example:: + + color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN") + shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON") + integer = Word(nums) + shape_attr = "shape:" + shape_type("shape") + posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn") + color_attr = "color:" + color("color") + size_attr = "size:" + integer("size") + + # use Each (using operator '&') to accept attributes in any order + # (shape and posn are required, color and size are optional) + shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr) + + shape_spec.runTests(''' + shape: SQUARE color: BLACK posn: 100, 120 + shape: CIRCLE size: 50 color: BLUE posn: 50,80 + color:GREEN size:20 shape:TRIANGLE posn:20,40 + ''' + ) + + prints:: + + shape: SQUARE color: BLACK posn: 100, 120 + ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']] + - color: BLACK + - posn: ['100', ',', '120'] + - x: 100 + - y: 120 + - shape: SQUARE + + + shape: CIRCLE size: 50 color: BLUE posn: 50,80 + ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']] + - color: BLUE + - posn: ['50', ',', '80'] + - x: 50 + - y: 80 + - shape: CIRCLE + - size: 50 + + + color: GREEN size: 20 shape: TRIANGLE posn: 20,40 + ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']] + - color: GREEN + - posn: ['20', ',', '40'] + - x: 20 + - y: 40 + - shape: TRIANGLE + - size: 20 + """ + def __init__( self, exprs, savelist = True ): + super(Each,self).__init__(exprs, savelist) + self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) + self.skipWhitespace = True + self.initExprGroups = True + self.saveAsList = True + + def streamline(self): + super(Each, self).streamline() + self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) + return self + + def parseImpl( self, instring, loc, doActions=True ): + if self.initExprGroups: + self.opt1map = dict((id(e.expr),e) for e in self.exprs if isinstance(e,Optional)) + opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ] + opt2 = [ e for e in self.exprs if e.mayReturnEmpty and not isinstance(e,Optional)] + self.optionals = opt1 + opt2 + self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ] + self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ] + self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ] + self.required += self.multirequired + self.initExprGroups = False + tmpLoc = loc + tmpReqd = self.required[:] + tmpOpt = self.optionals[:] + matchOrder = [] + + keepMatching = True + while keepMatching: + tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired + failed = [] + for e in tmpExprs: + try: + tmpLoc = e.tryParse( instring, tmpLoc ) + except ParseException: + failed.append(e) + else: + matchOrder.append(self.opt1map.get(id(e),e)) + if e in tmpReqd: + tmpReqd.remove(e) + elif e in tmpOpt: + tmpOpt.remove(e) + if len(failed) == len(tmpExprs): + keepMatching = False + + if tmpReqd: + missing = ", ".join(_ustr(e) for e in tmpReqd) + raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing ) + + # add any unmatched Optionals, in case they have default values defined + matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt] + + resultlist = [] + for e in matchOrder: + loc,results = e._parse(instring,loc,doActions) + resultlist.append(results) + + finalResults = sum(resultlist, ParseResults([])) + return loc, finalResults + + def __str__( self ): + if hasattr(self,"name"): + return self.name + + if self.strRepr is None: + self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}" + + return self.strRepr + + def checkRecursion( self, parseElementList ): + subRecCheckList = parseElementList[:] + [ self ] + for e in self.exprs: + e.checkRecursion( subRecCheckList ) + + +class ParseElementEnhance(ParserElement): + """Abstract subclass of :class:`ParserElement`, for combining and + post-processing parsed tokens. + """ + def __init__( self, expr, savelist=False ): + super(ParseElementEnhance,self).__init__(savelist) + if isinstance( expr, basestring ): + if issubclass(ParserElement._literalStringClass, Token): + expr = ParserElement._literalStringClass(expr) + else: + expr = ParserElement._literalStringClass(Literal(expr)) + self.expr = expr + self.strRepr = None + if expr is not None: + self.mayIndexError = expr.mayIndexError + self.mayReturnEmpty = expr.mayReturnEmpty + self.setWhitespaceChars( expr.whiteChars ) + self.skipWhitespace = expr.skipWhitespace + self.saveAsList = expr.saveAsList + self.callPreparse = expr.callPreparse + self.ignoreExprs.extend(expr.ignoreExprs) + + def parseImpl( self, instring, loc, doActions=True ): + if self.expr is not None: + return self.expr._parse( instring, loc, doActions, callPreParse=False ) + else: + raise ParseException("",loc,self.errmsg,self) + + def leaveWhitespace( self ): + self.skipWhitespace = False + self.expr = self.expr.copy() + if self.expr is not None: + self.expr.leaveWhitespace() + return self + + def ignore( self, other ): + if isinstance( other, Suppress ): + if other not in self.ignoreExprs: + super( ParseElementEnhance, self).ignore( other ) + if self.expr is not None: + self.expr.ignore( self.ignoreExprs[-1] ) + else: + super( ParseElementEnhance, self).ignore( other ) + if self.expr is not None: + self.expr.ignore( self.ignoreExprs[-1] ) + return self + + def streamline( self ): + super(ParseElementEnhance,self).streamline() + if self.expr is not None: + self.expr.streamline() + return self + + def checkRecursion( self, parseElementList ): + if self in parseElementList: + raise RecursiveGrammarException( parseElementList+[self] ) + subRecCheckList = parseElementList[:] + [ self ] + if self.expr is not None: + self.expr.checkRecursion( subRecCheckList ) + + def validate( self, validateTrace=[] ): + tmp = validateTrace[:]+[self] + if self.expr is not None: + self.expr.validate(tmp) + self.checkRecursion( [] ) + + def __str__( self ): + try: + return super(ParseElementEnhance,self).__str__() + except Exception: + pass + + if self.strRepr is None and self.expr is not None: + self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) ) + return self.strRepr + + +class FollowedBy(ParseElementEnhance): + """Lookahead matching of the given parse expression. + ``FollowedBy`` does *not* advance the parsing position within + the input string, it only verifies that the specified parse + expression matches at the current position. ``FollowedBy`` + always returns a null token list. If any results names are defined + in the lookahead expression, those *will* be returned for access by + name. + + Example:: + + # use FollowedBy to match a label only if it is followed by a ':' + data_word = Word(alphas) + label = data_word + FollowedBy(':') + attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) + + OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint() + + prints:: + + [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']] + """ + def __init__( self, expr ): + super(FollowedBy,self).__init__(expr) + self.mayReturnEmpty = True + + def parseImpl( self, instring, loc, doActions=True ): + _, ret = self.expr._parse(instring, loc, doActions=doActions) + del ret[:] + return loc, ret + + +class PrecededBy(ParseElementEnhance): + """Lookbehind matching of the given parse expression. + ``PrecededBy`` does not advance the parsing position within the + input string, it only verifies that the specified parse expression + matches prior to the current position. ``PrecededBy`` always + returns a null token list, but if a results name is defined on the + given expression, it is returned. + + Parameters: + + - expr - expression that must match prior to the current parse + location + - retreat - (default= ``None``) - (int) maximum number of characters + to lookbehind prior to the current parse location + + If the lookbehind expression is a string, Literal, Keyword, or + a Word or CharsNotIn with a specified exact or maximum length, then + the retreat parameter is not required. Otherwise, retreat must be + specified to give a maximum number of characters to look back from + the current parse position for a lookbehind match. + + Example:: + + # VB-style variable names with type prefixes + int_var = PrecededBy("#") + pyparsing_common.identifier + str_var = PrecededBy("$") + pyparsing_common.identifier + + """ + def __init__(self, expr, retreat=None): + super(PrecededBy, self).__init__(expr) + self.expr = self.expr().leaveWhitespace() + self.mayReturnEmpty = True + self.mayIndexError = False + self.exact = False + if isinstance(expr, str): + retreat = len(expr) + self.exact = True + elif isinstance(expr, (Literal, Keyword)): + retreat = expr.matchLen + self.exact = True + elif isinstance(expr, (Word, CharsNotIn)) and expr.maxLen != _MAX_INT: + retreat = expr.maxLen + self.exact = True + elif isinstance(expr, _PositionToken): + retreat = 0 + self.exact = True + self.retreat = retreat + self.errmsg = "not preceded by " + str(expr) + self.skipWhitespace = False + + def parseImpl(self, instring, loc=0, doActions=True): + if self.exact: + if loc < self.retreat: + raise ParseException(instring, loc, self.errmsg) + start = loc - self.retreat + _, ret = self.expr._parse(instring, start) + else: + # retreat specified a maximum lookbehind window, iterate + test_expr = self.expr + StringEnd() + instring_slice = instring[:loc] + last_expr = ParseException(instring, loc, self.errmsg) + for offset in range(1, min(loc, self.retreat+1)): + try: + _, ret = test_expr._parse(instring_slice, loc-offset) + except ParseBaseException as pbe: + last_expr = pbe + else: + break + else: + raise last_expr + # return empty list of tokens, but preserve any defined results names + del ret[:] + return loc, ret + + +class NotAny(ParseElementEnhance): + """Lookahead to disallow matching with the given parse expression. + ``NotAny`` does *not* advance the parsing position within the + input string, it only verifies that the specified parse expression + does *not* match at the current position. Also, ``NotAny`` does + *not* skip over leading whitespace. ``NotAny`` always returns + a null token list. May be constructed using the '~' operator. + + Example:: + + AND, OR, NOT = map(CaselessKeyword, "AND OR NOT".split()) + + # take care not to mistake keywords for identifiers + ident = ~(AND | OR | NOT) + Word(alphas) + boolean_term = Optional(NOT) + ident + + # very crude boolean expression - to support parenthesis groups and + # operation hierarchy, use infixNotation + boolean_expr = boolean_term + ZeroOrMore((AND | OR) + boolean_term) + + # integers that are followed by "." are actually floats + integer = Word(nums) + ~Char(".") + """ + def __init__( self, expr ): + super(NotAny,self).__init__(expr) + #~ self.leaveWhitespace() + self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs + self.mayReturnEmpty = True + self.errmsg = "Found unwanted token, "+_ustr(self.expr) + + def parseImpl( self, instring, loc, doActions=True ): + if self.expr.canParseNext(instring, loc): + raise ParseException(instring, loc, self.errmsg, self) + return loc, [] + + def __str__( self ): + if hasattr(self,"name"): + return self.name + + if self.strRepr is None: + self.strRepr = "~{" + _ustr(self.expr) + "}" + + return self.strRepr + +class _MultipleMatch(ParseElementEnhance): + def __init__( self, expr, stopOn=None): + super(_MultipleMatch, self).__init__(expr) + self.saveAsList = True + ender = stopOn + if isinstance(ender, basestring): + ender = ParserElement._literalStringClass(ender) + self.not_ender = ~ender if ender is not None else None + + def parseImpl( self, instring, loc, doActions=True ): + self_expr_parse = self.expr._parse + self_skip_ignorables = self._skipIgnorables + check_ender = self.not_ender is not None + if check_ender: + try_not_ender = self.not_ender.tryParse + + # must be at least one (but first see if we are the stopOn sentinel; + # if so, fail) + if check_ender: + try_not_ender(instring, loc) + loc, tokens = self_expr_parse( instring, loc, doActions, callPreParse=False ) + try: + hasIgnoreExprs = (not not self.ignoreExprs) + while 1: + if check_ender: + try_not_ender(instring, loc) + if hasIgnoreExprs: + preloc = self_skip_ignorables( instring, loc ) + else: + preloc = loc + loc, tmptokens = self_expr_parse( instring, preloc, doActions ) + if tmptokens or tmptokens.haskeys(): + tokens += tmptokens + except (ParseException,IndexError): + pass + + return loc, tokens + +class OneOrMore(_MultipleMatch): + """Repetition of one or more of the given expression. + + Parameters: + - expr - expression that must match one or more times + - stopOn - (default= ``None``) - expression for a terminating sentinel + (only required if the sentinel would ordinarily match the repetition + expression) + + Example:: + + data_word = Word(alphas) + label = data_word + FollowedBy(':') + attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join)) + + text = "shape: SQUARE posn: upper left color: BLACK" + OneOrMore(attr_expr).parseString(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']] + + # use stopOn attribute for OneOrMore to avoid reading label string as part of the data + attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) + OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']] + + # could also be written as + (attr_expr * (1,)).parseString(text).pprint() + """ + + def __str__( self ): + if hasattr(self,"name"): + return self.name + + if self.strRepr is None: + self.strRepr = "{" + _ustr(self.expr) + "}..." + + return self.strRepr + +class ZeroOrMore(_MultipleMatch): + """Optional repetition of zero or more of the given expression. + + Parameters: + - expr - expression that must match zero or more times + - stopOn - (default= ``None``) - expression for a terminating sentinel + (only required if the sentinel would ordinarily match the repetition + expression) + + Example: similar to :class:`OneOrMore` + """ + def __init__( self, expr, stopOn=None): + super(ZeroOrMore,self).__init__(expr, stopOn=stopOn) + self.mayReturnEmpty = True + + def parseImpl( self, instring, loc, doActions=True ): + try: + return super(ZeroOrMore, self).parseImpl(instring, loc, doActions) + except (ParseException,IndexError): + return loc, [] + + def __str__( self ): + if hasattr(self,"name"): + return self.name + + if self.strRepr is None: + self.strRepr = "[" + _ustr(self.expr) + "]..." + + return self.strRepr + +class _NullToken(object): + def __bool__(self): + return False + __nonzero__ = __bool__ + def __str__(self): + return "" + +_optionalNotMatched = _NullToken() +class Optional(ParseElementEnhance): + """Optional matching of the given expression. + + Parameters: + - expr - expression that must match zero or more times + - default (optional) - value to be returned if the optional expression is not found. + + Example:: + + # US postal code can be a 5-digit zip, plus optional 4-digit qualifier + zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4))) + zip.runTests(''' + # traditional ZIP code + 12345 + + # ZIP+4 form + 12101-0001 + + # invalid ZIP + 98765- + ''') + + prints:: + + # traditional ZIP code + 12345 + ['12345'] + + # ZIP+4 form + 12101-0001 + ['12101-0001'] + + # invalid ZIP + 98765- + ^ + FAIL: Expected end of text (at char 5), (line:1, col:6) + """ + def __init__( self, expr, default=_optionalNotMatched ): + super(Optional,self).__init__( expr, savelist=False ) + self.saveAsList = self.expr.saveAsList + self.defaultValue = default + self.mayReturnEmpty = True + + def parseImpl( self, instring, loc, doActions=True ): + try: + loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False ) + except (ParseException,IndexError): + if self.defaultValue is not _optionalNotMatched: + if self.expr.resultsName: + tokens = ParseResults([ self.defaultValue ]) + tokens[self.expr.resultsName] = self.defaultValue + else: + tokens = [ self.defaultValue ] + else: + tokens = [] + return loc, tokens + + def __str__( self ): + if hasattr(self,"name"): + return self.name + + if self.strRepr is None: + self.strRepr = "[" + _ustr(self.expr) + "]" + + return self.strRepr + +class SkipTo(ParseElementEnhance): + """Token for skipping over all undefined text until the matched + expression is found. + + Parameters: + - expr - target expression marking the end of the data to be skipped + - include - (default= ``False``) if True, the target expression is also parsed + (the skipped text and target expression are returned as a 2-element list). + - ignore - (default= ``None``) used to define grammars (typically quoted strings and + comments) that might contain false matches to the target expression + - failOn - (default= ``None``) define expressions that are not allowed to be + included in the skipped test; if found before the target expression is found, + the SkipTo is not a match + + Example:: + + report = ''' + Outstanding Issues Report - 1 Jan 2000 + + # | Severity | Description | Days Open + -----+----------+-------------------------------------------+----------- + 101 | Critical | Intermittent system crash | 6 + 94 | Cosmetic | Spelling error on Login ('log|n') | 14 + 79 | Minor | System slow when running too many reports | 47 + ''' + integer = Word(nums) + SEP = Suppress('|') + # use SkipTo to simply match everything up until the next SEP + # - ignore quoted strings, so that a '|' character inside a quoted string does not match + # - parse action will call token.strip() for each matched token, i.e., the description body + string_data = SkipTo(SEP, ignore=quotedString) + string_data.setParseAction(tokenMap(str.strip)) + ticket_expr = (integer("issue_num") + SEP + + string_data("sev") + SEP + + string_data("desc") + SEP + + integer("days_open")) + + for tkt in ticket_expr.searchString(report): + print tkt.dump() + + prints:: + + ['101', 'Critical', 'Intermittent system crash', '6'] + - days_open: 6 + - desc: Intermittent system crash + - issue_num: 101 + - sev: Critical + ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14'] + - days_open: 14 + - desc: Spelling error on Login ('log|n') + - issue_num: 94 + - sev: Cosmetic + ['79', 'Minor', 'System slow when running too many reports', '47'] + - days_open: 47 + - desc: System slow when running too many reports + - issue_num: 79 + - sev: Minor + """ + def __init__( self, other, include=False, ignore=None, failOn=None ): + super( SkipTo, self ).__init__( other ) + self.ignoreExpr = ignore + self.mayReturnEmpty = True + self.mayIndexError = False + self.includeMatch = include + self.saveAsList = False + if isinstance(failOn, basestring): + self.failOn = ParserElement._literalStringClass(failOn) + else: + self.failOn = failOn + self.errmsg = "No match found for "+_ustr(self.expr) + + def parseImpl( self, instring, loc, doActions=True ): + startloc = loc + instrlen = len(instring) + expr = self.expr + expr_parse = self.expr._parse + self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None + self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None + + tmploc = loc + while tmploc <= instrlen: + if self_failOn_canParseNext is not None: + # break if failOn expression matches + if self_failOn_canParseNext(instring, tmploc): + break + + if self_ignoreExpr_tryParse is not None: + # advance past ignore expressions + while 1: + try: + tmploc = self_ignoreExpr_tryParse(instring, tmploc) + except ParseBaseException: + break + + try: + expr_parse(instring, tmploc, doActions=False, callPreParse=False) + except (ParseException, IndexError): + # no match, advance loc in string + tmploc += 1 + else: + # matched skipto expr, done + break + + else: + # ran off the end of the input string without matching skipto expr, fail + raise ParseException(instring, loc, self.errmsg, self) + + # build up return values + loc = tmploc + skiptext = instring[startloc:loc] + skipresult = ParseResults(skiptext) + + if self.includeMatch: + loc, mat = expr_parse(instring,loc,doActions,callPreParse=False) + skipresult += mat + + return loc, skipresult + +class Forward(ParseElementEnhance): + """Forward declaration of an expression to be defined later - + used for recursive grammars, such as algebraic infix notation. + When the expression is known, it is assigned to the ``Forward`` + variable using the '<<' operator. + + Note: take care when assigning to ``Forward`` not to overlook + precedence of operators. + + Specifically, '|' has a lower precedence than '<<', so that:: + + fwdExpr << a | b | c + + will actually be evaluated as:: + + (fwdExpr << a) | b | c + + thereby leaving b and c out as parseable alternatives. It is recommended that you + explicitly group the values inserted into the ``Forward``:: + + fwdExpr << (a | b | c) + + Converting to use the '<<=' operator instead will avoid this problem. + + See :class:`ParseResults.pprint` for an example of a recursive + parser created using ``Forward``. + """ + def __init__( self, other=None ): + super(Forward,self).__init__( other, savelist=False ) + + def __lshift__( self, other ): + if isinstance( other, basestring ): + other = ParserElement._literalStringClass(other) + self.expr = other + self.strRepr = None + self.mayIndexError = self.expr.mayIndexError + self.mayReturnEmpty = self.expr.mayReturnEmpty + self.setWhitespaceChars( self.expr.whiteChars ) + self.skipWhitespace = self.expr.skipWhitespace + self.saveAsList = self.expr.saveAsList + self.ignoreExprs.extend(self.expr.ignoreExprs) + return self + + def __ilshift__(self, other): + return self << other + + def leaveWhitespace( self ): + self.skipWhitespace = False + return self + + def streamline( self ): + if not self.streamlined: + self.streamlined = True + if self.expr is not None: + self.expr.streamline() + return self + + def validate( self, validateTrace=[] ): + if self not in validateTrace: + tmp = validateTrace[:]+[self] + if self.expr is not None: + self.expr.validate(tmp) + self.checkRecursion([]) + + def __str__( self ): + if hasattr(self,"name"): + return self.name + return self.__class__.__name__ + ": ..." + + # stubbed out for now - creates awful memory and perf issues + self._revertClass = self.__class__ + self.__class__ = _ForwardNoRecurse + try: + if self.expr is not None: + retString = _ustr(self.expr) + else: + retString = "None" + finally: + self.__class__ = self._revertClass + return self.__class__.__name__ + ": " + retString + + def copy(self): + if self.expr is not None: + return super(Forward,self).copy() + else: + ret = Forward() + ret <<= self + return ret + +class _ForwardNoRecurse(Forward): + def __str__( self ): + return "..." + +class TokenConverter(ParseElementEnhance): + """ + Abstract subclass of :class:`ParseExpression`, for converting parsed results. + """ + def __init__( self, expr, savelist=False ): + super(TokenConverter,self).__init__( expr )#, savelist ) + self.saveAsList = False + +class Combine(TokenConverter): + """Converter to concatenate all matching tokens to a single string. + By default, the matching patterns must also be contiguous in the + input string; this can be disabled by specifying + ``'adjacent=False'`` in the constructor. + + Example:: + + real = Word(nums) + '.' + Word(nums) + print(real.parseString('3.1416')) # -> ['3', '.', '1416'] + # will also erroneously match the following + print(real.parseString('3. 1416')) # -> ['3', '.', '1416'] + + real = Combine(Word(nums) + '.' + Word(nums)) + print(real.parseString('3.1416')) # -> ['3.1416'] + # no match when there are internal spaces + print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...) + """ + def __init__( self, expr, joinString="", adjacent=True ): + super(Combine,self).__init__( expr ) + # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself + if adjacent: + self.leaveWhitespace() + self.adjacent = adjacent + self.skipWhitespace = True + self.joinString = joinString + self.callPreparse = True + + def ignore( self, other ): + if self.adjacent: + ParserElement.ignore(self, other) + else: + super( Combine, self).ignore( other ) + return self + + def postParse( self, instring, loc, tokenlist ): + retToks = tokenlist.copy() + del retToks[:] + retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults) + + if self.resultsName and retToks.haskeys(): + return [ retToks ] + else: + return retToks + +class Group(TokenConverter): + """Converter to return the matched tokens as a list - useful for + returning tokens of :class:`ZeroOrMore` and :class:`OneOrMore` expressions. + + Example:: + + ident = Word(alphas) + num = Word(nums) + term = ident | num + func = ident + Optional(delimitedList(term)) + print(func.parseString("fn a,b,100")) # -> ['fn', 'a', 'b', '100'] + + func = ident + Group(Optional(delimitedList(term))) + print(func.parseString("fn a,b,100")) # -> ['fn', ['a', 'b', '100']] + """ + def __init__( self, expr ): + super(Group,self).__init__( expr ) + self.saveAsList = expr.saveAsList + + def postParse( self, instring, loc, tokenlist ): + return [ tokenlist ] + +class Dict(TokenConverter): + """Converter to return a repetitive expression as a list, but also + as a dictionary. Each element can also be referenced using the first + token in the expression as its key. Useful for tabular report + scraping when the first column can be used as a item key. + + Example:: + + data_word = Word(alphas) + label = data_word + FollowedBy(':') + attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join)) + + text = "shape: SQUARE posn: upper left color: light blue texture: burlap" + attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) + + # print attributes as plain groups + print(OneOrMore(attr_expr).parseString(text).dump()) + + # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names + result = Dict(OneOrMore(Group(attr_expr))).parseString(text) + print(result.dump()) + + # access named fields as dict entries, or output as dict + print(result['shape']) + print(result.asDict()) + + prints:: + + ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap'] + [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] + - color: light blue + - posn: upper left + - shape: SQUARE + - texture: burlap + SQUARE + {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'} + + See more examples at :class:`ParseResults` of accessing fields by results name. + """ + def __init__( self, expr ): + super(Dict,self).__init__( expr ) + self.saveAsList = True + + def postParse( self, instring, loc, tokenlist ): + for i,tok in enumerate(tokenlist): + if len(tok) == 0: + continue + ikey = tok[0] + if isinstance(ikey,int): + ikey = _ustr(tok[0]).strip() + if len(tok)==1: + tokenlist[ikey] = _ParseResultsWithOffset("",i) + elif len(tok)==2 and not isinstance(tok[1],ParseResults): + tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i) + else: + dictvalue = tok.copy() #ParseResults(i) + del dictvalue[0] + if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.haskeys()): + tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i) + else: + tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i) + + if self.resultsName: + return [ tokenlist ] + else: + return tokenlist + + +class Suppress(TokenConverter): + """Converter for ignoring the results of a parsed expression. + + Example:: + + source = "a, b, c,d" + wd = Word(alphas) + wd_list1 = wd + ZeroOrMore(',' + wd) + print(wd_list1.parseString(source)) + + # often, delimiters that are useful during parsing are just in the + # way afterward - use Suppress to keep them out of the parsed output + wd_list2 = wd + ZeroOrMore(Suppress(',') + wd) + print(wd_list2.parseString(source)) + + prints:: + + ['a', ',', 'b', ',', 'c', ',', 'd'] + ['a', 'b', 'c', 'd'] + + (See also :class:`delimitedList`.) + """ + def postParse( self, instring, loc, tokenlist ): + return [] + + def suppress( self ): + return self + + +class OnlyOnce(object): + """Wrapper for parse actions, to ensure they are only called once. + """ + def __init__(self, methodCall): + self.callable = _trim_arity(methodCall) + self.called = False + def __call__(self,s,l,t): + if not self.called: + results = self.callable(s,l,t) + self.called = True + return results + raise ParseException(s,l,"") + def reset(self): + self.called = False + +def traceParseAction(f): + """Decorator for debugging parse actions. + + When the parse action is called, this decorator will print + ``">> entering method-name(line:<current_source_line>, <parse_location>, <matched_tokens>)"``. + When the parse action completes, the decorator will print + ``"<<"`` followed by the returned value, or any exception that the parse action raised. + + Example:: + + wd = Word(alphas) + + @traceParseAction + def remove_duplicate_chars(tokens): + return ''.join(sorted(set(''.join(tokens)))) + + wds = OneOrMore(wd).setParseAction(remove_duplicate_chars) + print(wds.parseString("slkdjs sld sldd sdlf sdljf")) + + prints:: + + >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {})) + <<leaving remove_duplicate_chars (ret: 'dfjkls') + ['dfjkls'] + """ + f = _trim_arity(f) + def z(*paArgs): + thisFunc = f.__name__ + s,l,t = paArgs[-3:] + if len(paArgs)>3: + thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc + sys.stderr.write( ">>entering %s(line: '%s', %d, %r)\n" % (thisFunc,line(l,s),l,t) ) + try: + ret = f(*paArgs) + except Exception as exc: + sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) ) + raise + sys.stderr.write( "<<leaving %s (ret: %r)\n" % (thisFunc,ret) ) + return ret + try: + z.__name__ = f.__name__ + except AttributeError: + pass + return z + +# +# global helpers +# +def delimitedList( expr, delim=",", combine=False ): + """Helper to define a delimited list of expressions - the delimiter + defaults to ','. By default, the list elements and delimiters can + have intervening whitespace, and comments, but this can be + overridden by passing ``combine=True`` in the constructor. If + ``combine`` is set to ``True``, the matching tokens are + returned as a single token string, with the delimiters included; + otherwise, the matching tokens are returned as a list of tokens, + with the delimiters suppressed. + + Example:: + + delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc'] + delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE'] + """ + dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..." + if combine: + return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName) + else: + return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName) + +def countedArray( expr, intExpr=None ): + """Helper to define a counted list of expressions. + + This helper defines a pattern of the form:: + + integer expr expr expr... + + where the leading integer tells how many expr expressions follow. + The matched tokens returns the array of expr tokens as a list - the + leading count token is suppressed. + + If ``intExpr`` is specified, it should be a pyparsing expression + that produces an integer value. + + Example:: + + countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd'] + + # in this parser, the leading integer value is given in binary, + # '10' indicating that 2 values are in the array + binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2)) + countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd'] + """ + arrayExpr = Forward() + def countFieldParseAction(s,l,t): + n = t[0] + arrayExpr << (n and Group(And([expr]*n)) or Group(empty)) + return [] + if intExpr is None: + intExpr = Word(nums).setParseAction(lambda t:int(t[0])) + else: + intExpr = intExpr.copy() + intExpr.setName("arrayLen") + intExpr.addParseAction(countFieldParseAction, callDuringTry=True) + return ( intExpr + arrayExpr ).setName('(len) ' + _ustr(expr) + '...') + +def _flatten(L): + ret = [] + for i in L: + if isinstance(i,list): + ret.extend(_flatten(i)) + else: + ret.append(i) + return ret + +def matchPreviousLiteral(expr): + """Helper to define an expression that is indirectly defined from + the tokens matched in a previous expression, that is, it looks for + a 'repeat' of a previous expression. For example:: + + first = Word(nums) + second = matchPreviousLiteral(first) + matchExpr = first + ":" + second + + will match ``"1:1"``, but not ``"1:2"``. Because this + matches a previous literal, will also match the leading + ``"1:1"`` in ``"1:10"``. If this is not desired, use + :class:`matchPreviousExpr`. Do *not* use with packrat parsing + enabled. + """ + rep = Forward() + def copyTokenToRepeater(s,l,t): + if t: + if len(t) == 1: + rep << t[0] + else: + # flatten t tokens + tflat = _flatten(t.asList()) + rep << And(Literal(tt) for tt in tflat) + else: + rep << Empty() + expr.addParseAction(copyTokenToRepeater, callDuringTry=True) + rep.setName('(prev) ' + _ustr(expr)) + return rep + +def matchPreviousExpr(expr): + """Helper to define an expression that is indirectly defined from + the tokens matched in a previous expression, that is, it looks for + a 'repeat' of a previous expression. For example:: + + first = Word(nums) + second = matchPreviousExpr(first) + matchExpr = first + ":" + second + + will match ``"1:1"``, but not ``"1:2"``. Because this + matches by expressions, will *not* match the leading ``"1:1"`` + in ``"1:10"``; the expressions are evaluated first, and then + compared, so ``"1"`` is compared with ``"10"``. Do *not* use + with packrat parsing enabled. + """ + rep = Forward() + e2 = expr.copy() + rep <<= e2 + def copyTokenToRepeater(s,l,t): + matchTokens = _flatten(t.asList()) + def mustMatchTheseTokens(s,l,t): + theseTokens = _flatten(t.asList()) + if theseTokens != matchTokens: + raise ParseException("",0,"") + rep.setParseAction( mustMatchTheseTokens, callDuringTry=True ) + expr.addParseAction(copyTokenToRepeater, callDuringTry=True) + rep.setName('(prev) ' + _ustr(expr)) + return rep + +def _escapeRegexRangeChars(s): + #~ escape these chars: ^-] + for c in r"\^-]": + s = s.replace(c,_bslash+c) + s = s.replace("\n",r"\n") + s = s.replace("\t",r"\t") + return _ustr(s) + +def oneOf( strs, caseless=False, useRegex=True ): + """Helper to quickly define a set of alternative Literals, and makes + sure to do longest-first testing when there is a conflict, + regardless of the input order, but returns + a :class:`MatchFirst` for best performance. + + Parameters: + + - strs - a string of space-delimited literals, or a collection of + string literals + - caseless - (default= ``False``) - treat all literals as + caseless + - useRegex - (default= ``True``) - as an optimization, will + generate a Regex object; otherwise, will generate + a :class:`MatchFirst` object (if ``caseless=True``, or if + creating a :class:`Regex` raises an exception) + + Example:: + + comp_oper = oneOf("< = > <= >= !=") + var = Word(alphas) + number = Word(nums) + term = var | number + comparison_expr = term + comp_oper + term + print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12")) + + prints:: + + [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']] + """ + if caseless: + isequal = ( lambda a,b: a.upper() == b.upper() ) + masks = ( lambda a,b: b.upper().startswith(a.upper()) ) + parseElementClass = CaselessLiteral + else: + isequal = ( lambda a,b: a == b ) + masks = ( lambda a,b: b.startswith(a) ) + parseElementClass = Literal + + symbols = [] + if isinstance(strs,basestring): + symbols = strs.split() + elif isinstance(strs, Iterable): + symbols = list(strs) + else: + warnings.warn("Invalid argument to oneOf, expected string or iterable", + SyntaxWarning, stacklevel=2) + if not symbols: + return NoMatch() + + i = 0 + while i < len(symbols)-1: + cur = symbols[i] + for j,other in enumerate(symbols[i+1:]): + if ( isequal(other, cur) ): + del symbols[i+j+1] + break + elif ( masks(cur, other) ): + del symbols[i+j+1] + symbols.insert(i,other) + cur = other + break + else: + i += 1 + + if not caseless and useRegex: + #~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] )) + try: + if len(symbols)==len("".join(symbols)): + return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ).setName(' | '.join(symbols)) + else: + return Regex( "|".join(re.escape(sym) for sym in symbols) ).setName(' | '.join(symbols)) + except Exception: + warnings.warn("Exception creating Regex for oneOf, building MatchFirst", + SyntaxWarning, stacklevel=2) + + + # last resort, just use MatchFirst + return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols)) + +def dictOf( key, value ): + """Helper to easily and clearly define a dictionary by specifying + the respective patterns for the key and value. Takes care of + defining the :class:`Dict`, :class:`ZeroOrMore`, and + :class:`Group` tokens in the proper order. The key pattern + can include delimiting markers or punctuation, as long as they are + suppressed, thereby leaving the significant key text. The value + pattern can include named results, so that the :class:`Dict` results + can include named token fields. + + Example:: + + text = "shape: SQUARE posn: upper left color: light blue texture: burlap" + attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) + print(OneOrMore(attr_expr).parseString(text).dump()) + + attr_label = label + attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join) + + # similar to Dict, but simpler call format + result = dictOf(attr_label, attr_value).parseString(text) + print(result.dump()) + print(result['shape']) + print(result.shape) # object attribute access works too + print(result.asDict()) + + prints:: + + [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] + - color: light blue + - posn: upper left + - shape: SQUARE + - texture: burlap + SQUARE + SQUARE + {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'} + """ + return Dict(OneOrMore(Group(key + value))) + +def originalTextFor(expr, asString=True): + """Helper to return the original, untokenized text for a given + expression. Useful to restore the parsed fields of an HTML start + tag into the raw tag text itself, or to revert separate tokens with + intervening whitespace back to the original matching input text. By + default, returns astring containing the original parsed text. + + If the optional ``asString`` argument is passed as + ``False``, then the return value is + a :class:`ParseResults` containing any results names that + were originally matched, and a single token containing the original + matched text from the input string. So if the expression passed to + :class:`originalTextFor` contains expressions with defined + results names, you must set ``asString`` to ``False`` if you + want to preserve those results name values. + + Example:: + + src = "this is test <b> bold <i>text</i> </b> normal text " + for tag in ("b","i"): + opener,closer = makeHTMLTags(tag) + patt = originalTextFor(opener + SkipTo(closer) + closer) + print(patt.searchString(src)[0]) + + prints:: + + ['<b> bold <i>text</i> </b>'] + ['<i>text</i>'] + """ + locMarker = Empty().setParseAction(lambda s,loc,t: loc) + endlocMarker = locMarker.copy() + endlocMarker.callPreparse = False + matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end") + if asString: + extractText = lambda s,l,t: s[t._original_start:t._original_end] + else: + def extractText(s,l,t): + t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]] + matchExpr.setParseAction(extractText) + matchExpr.ignoreExprs = expr.ignoreExprs + return matchExpr + +def ungroup(expr): + """Helper to undo pyparsing's default grouping of And expressions, + even if all but one are non-empty. + """ + return TokenConverter(expr).setParseAction(lambda t:t[0]) + +def locatedExpr(expr): + """Helper to decorate a returned token with its starting and ending + locations in the input string. + + This helper adds the following results names: + + - locn_start = location where matched expression begins + - locn_end = location where matched expression ends + - value = the actual parsed results + + Be careful if the input text contains ``<TAB>`` characters, you + may want to call :class:`ParserElement.parseWithTabs` + + Example:: + + wd = Word(alphas) + for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"): + print(match) + + prints:: + + [[0, 'ljsdf', 5]] + [[8, 'lksdjjf', 15]] + [[18, 'lkkjj', 23]] + """ + locator = Empty().setParseAction(lambda s,l,t: l) + return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end")) + + +# convenience constants for positional expressions +empty = Empty().setName("empty") +lineStart = LineStart().setName("lineStart") +lineEnd = LineEnd().setName("lineEnd") +stringStart = StringStart().setName("stringStart") +stringEnd = StringEnd().setName("stringEnd") + +_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1]) +_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16))) +_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8))) +_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r'\]', exact=1) +_charRange = Group(_singleChar + Suppress("-") + _singleChar) +_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]" + +def srange(s): + r"""Helper to easily define string ranges for use in Word + construction. Borrows syntax from regexp '[]' string range + definitions:: + + srange("[0-9]") -> "0123456789" + srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz" + srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_" + + The input string must be enclosed in []'s, and the returned string + is the expanded character set joined into a single string. The + values enclosed in the []'s may be: + + - a single character + - an escaped character with a leading backslash (such as ``\-`` + or ``\]``) + - an escaped hex character with a leading ``'\x'`` + (``\x21``, which is a ``'!'`` character) (``\0x##`` + is also supported for backwards compatibility) + - an escaped octal character with a leading ``'\0'`` + (``\041``, which is a ``'!'`` character) + - a range of any of the above, separated by a dash (``'a-z'``, + etc.) + - any combination of the above (``'aeiouy'``, + ``'a-zA-Z0-9_$'``, etc.) + """ + _expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1)) + try: + return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body) + except Exception: + return "" + +def matchOnlyAtCol(n): + """Helper method for defining parse actions that require matching at + a specific column in the input text. + """ + def verifyCol(strg,locn,toks): + if col(locn,strg) != n: + raise ParseException(strg,locn,"matched token not at column %d" % n) + return verifyCol + +def replaceWith(replStr): + """Helper method for common parse actions that simply return + a literal value. Especially useful when used with + :class:`transformString<ParserElement.transformString>` (). + + Example:: + + num = Word(nums).setParseAction(lambda toks: int(toks[0])) + na = oneOf("N/A NA").setParseAction(replaceWith(math.nan)) + term = na | num + + OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234] + """ + return lambda s,l,t: [replStr] + +def removeQuotes(s,l,t): + """Helper parse action for removing quotation marks from parsed + quoted strings. + + Example:: + + # by default, quotation marks are included in parsed results + quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"] + + # use removeQuotes to strip quotation marks from parsed results + quotedString.setParseAction(removeQuotes) + quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"] + """ + return t[0][1:-1] + +def tokenMap(func, *args): + """Helper to define a parse action by mapping a function to all + elements of a ParseResults list. If any additional args are passed, + they are forwarded to the given function as additional arguments + after the token, as in + ``hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))``, + which will convert the parsed data to an integer using base 16. + + Example (compare the last to example in :class:`ParserElement.transformString`:: + + hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16)) + hex_ints.runTests(''' + 00 11 22 aa FF 0a 0d 1a + ''') + + upperword = Word(alphas).setParseAction(tokenMap(str.upper)) + OneOrMore(upperword).runTests(''' + my kingdom for a horse + ''') + + wd = Word(alphas).setParseAction(tokenMap(str.title)) + OneOrMore(wd).setParseAction(' '.join).runTests(''' + now is the winter of our discontent made glorious summer by this sun of york + ''') + + prints:: + + 00 11 22 aa FF 0a 0d 1a + [0, 17, 34, 170, 255, 10, 13, 26] + + my kingdom for a horse + ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE'] + + now is the winter of our discontent made glorious summer by this sun of york + ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York'] + """ + def pa(s,l,t): + return [func(tokn, *args) for tokn in t] + + try: + func_name = getattr(func, '__name__', + getattr(func, '__class__').__name__) + except Exception: + func_name = str(func) + pa.__name__ = func_name + + return pa + +upcaseTokens = tokenMap(lambda t: _ustr(t).upper()) +"""(Deprecated) Helper parse action to convert tokens to upper case. +Deprecated in favor of :class:`pyparsing_common.upcaseTokens`""" + +downcaseTokens = tokenMap(lambda t: _ustr(t).lower()) +"""(Deprecated) Helper parse action to convert tokens to lower case. +Deprecated in favor of :class:`pyparsing_common.downcaseTokens`""" + +def _makeTags(tagStr, xml): + """Internal helper to construct opening and closing tag expressions, given a tag name""" + if isinstance(tagStr,basestring): + resname = tagStr + tagStr = Keyword(tagStr, caseless=not xml) + else: + resname = tagStr.name + + tagAttrName = Word(alphas,alphanums+"_-:") + if (xml): + tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes ) + openTag = Suppress("<") + tagStr("tag") + \ + Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \ + Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">") + else: + printablesLessRAbrack = "".join(c for c in printables if c not in ">") + tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack) + openTag = Suppress("<") + tagStr("tag") + \ + Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \ + Optional( Suppress("=") + tagAttrValue ) ))) + \ + Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">") + closeTag = Combine(_L("</") + tagStr + ">") + + openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % resname) + closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % resname) + openTag.tag = resname + closeTag.tag = resname + return openTag, closeTag + +def makeHTMLTags(tagStr): + """Helper to construct opening and closing tag expressions for HTML, + given a tag name. Matches tags in either upper or lower case, + attributes with namespaces and with quoted or unquoted values. + + Example:: + + text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>' + # makeHTMLTags returns pyparsing expressions for the opening and + # closing tags as a 2-tuple + a,a_end = makeHTMLTags("A") + link_expr = a + SkipTo(a_end)("link_text") + a_end + + for link in link_expr.searchString(text): + # attributes in the <A> tag (like "href" shown here) are + # also accessible as named results + print(link.link_text, '->', link.href) + + prints:: + + pyparsing -> https://github.com/pyparsing/pyparsing/wiki + """ + return _makeTags( tagStr, False ) + +def makeXMLTags(tagStr): + """Helper to construct opening and closing tag expressions for XML, + given a tag name. Matches tags only in the given upper/lower case. + + Example: similar to :class:`makeHTMLTags` + """ + return _makeTags( tagStr, True ) + +def withAttribute(*args,**attrDict): + """Helper to create a validating parse action to be used with start + tags created with :class:`makeXMLTags` or + :class:`makeHTMLTags`. Use ``withAttribute`` to qualify + a starting tag with a required attribute value, to avoid false + matches on common tags such as ``<TD>`` or ``<DIV>``. + + Call ``withAttribute`` with a series of attribute names and + values. Specify the list of filter attributes names and values as: + + - keyword arguments, as in ``(align="right")``, or + - as an explicit dict with ``**`` operator, when an attribute + name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}`` + - a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align","right"))`` + + For attribute names with a namespace prefix, you must use the second + form. Attribute names are matched insensitive to upper/lower case. + + If just testing for ``class`` (with or without a namespace), use + :class:`withClass`. + + To verify that the attribute exists, but without specifying a value, + pass ``withAttribute.ANY_VALUE`` as the value. + + Example:: + + html = ''' + <div> + Some text + <div type="grid">1 4 0 1 0</div> + <div type="graph">1,3 2,3 1,1</div> + <div>this has no type</div> + </div> + + ''' + div,div_end = makeHTMLTags("div") + + # only match div tag having a type attribute with value "grid" + div_grid = div().setParseAction(withAttribute(type="grid")) + grid_expr = div_grid + SkipTo(div | div_end)("body") + for grid_header in grid_expr.searchString(html): + print(grid_header.body) + + # construct a match with any div tag having a type attribute, regardless of the value + div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE)) + div_expr = div_any_type + SkipTo(div | div_end)("body") + for div_header in div_expr.searchString(html): + print(div_header.body) + + prints:: + + 1 4 0 1 0 + + 1 4 0 1 0 + 1,3 2,3 1,1 + """ + if args: + attrs = args[:] + else: + attrs = attrDict.items() + attrs = [(k,v) for k,v in attrs] + def pa(s,l,tokens): + for attrName,attrValue in attrs: + if attrName not in tokens: + raise ParseException(s,l,"no matching attribute " + attrName) + if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue: + raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" % + (attrName, tokens[attrName], attrValue)) + return pa +withAttribute.ANY_VALUE = object() + +def withClass(classname, namespace=''): + """Simplified version of :class:`withAttribute` when + matching on a div class - made difficult because ``class`` is + a reserved word in Python. + + Example:: + + html = ''' + <div> + Some text + <div class="grid">1 4 0 1 0</div> + <div class="graph">1,3 2,3 1,1</div> + <div>this <div> has no class</div> + </div> + + ''' + div,div_end = makeHTMLTags("div") + div_grid = div().setParseAction(withClass("grid")) + + grid_expr = div_grid + SkipTo(div | div_end)("body") + for grid_header in grid_expr.searchString(html): + print(grid_header.body) + + div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE)) + div_expr = div_any_type + SkipTo(div | div_end)("body") + for div_header in div_expr.searchString(html): + print(div_header.body) + + prints:: + + 1 4 0 1 0 + + 1 4 0 1 0 + 1,3 2,3 1,1 + """ + classattr = "%s:class" % namespace if namespace else "class" + return withAttribute(**{classattr : classname}) + +opAssoc = SimpleNamespace() +opAssoc.LEFT = object() +opAssoc.RIGHT = object() + +def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ): + """Helper method for constructing grammars of expressions made up of + operators working in a precedence hierarchy. Operators may be unary + or binary, left- or right-associative. Parse actions can also be + attached to operator expressions. The generated parser will also + recognize the use of parentheses to override operator precedences + (see example below). + + Note: if you define a deep operator list, you may see performance + issues when using infixNotation. See + :class:`ParserElement.enablePackrat` for a mechanism to potentially + improve your parser performance. + + Parameters: + - baseExpr - expression representing the most basic element for the + nested + - opList - list of tuples, one for each operator precedence level + in the expression grammar; each tuple is of the form ``(opExpr, + numTerms, rightLeftAssoc, parseAction)``, where: + + - opExpr is the pyparsing expression for the operator; may also + be a string, which will be converted to a Literal; if numTerms + is 3, opExpr is a tuple of two expressions, for the two + operators separating the 3 terms + - numTerms is the number of terms for this operator (must be 1, + 2, or 3) + - rightLeftAssoc is the indicator whether the operator is right + or left associative, using the pyparsing-defined constants + ``opAssoc.RIGHT`` and ``opAssoc.LEFT``. + - parseAction is the parse action to be associated with + expressions matching this operator expression (the parse action + tuple member may be omitted); if the parse action is passed + a tuple or list of functions, this is equivalent to calling + ``setParseAction(*fn)`` + (:class:`ParserElement.setParseAction`) + - lpar - expression for matching left-parentheses + (default= ``Suppress('(')``) + - rpar - expression for matching right-parentheses + (default= ``Suppress(')')``) + + Example:: + + # simple example of four-function arithmetic with ints and + # variable names + integer = pyparsing_common.signed_integer + varname = pyparsing_common.identifier + + arith_expr = infixNotation(integer | varname, + [ + ('-', 1, opAssoc.RIGHT), + (oneOf('* /'), 2, opAssoc.LEFT), + (oneOf('+ -'), 2, opAssoc.LEFT), + ]) + + arith_expr.runTests(''' + 5+3*6 + (5+3)*6 + -2--11 + ''', fullDump=False) + + prints:: + + 5+3*6 + [[5, '+', [3, '*', 6]]] + + (5+3)*6 + [[[5, '+', 3], '*', 6]] + + -2--11 + [[['-', 2], '-', ['-', 11]]] + """ + # captive version of FollowedBy that does not do parse actions or capture results names + class _FB(FollowedBy): + def parseImpl(self, instring, loc, doActions=True): + self.expr.tryParse(instring, loc) + return loc, [] + + ret = Forward() + lastExpr = baseExpr | ( lpar + ret + rpar ) + for i,operDef in enumerate(opList): + opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4] + termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr + if arity == 3: + if opExpr is None or len(opExpr) != 2: + raise ValueError( + "if numterms=3, opExpr must be a tuple or list of two expressions") + opExpr1, opExpr2 = opExpr + thisExpr = Forward().setName(termName) + if rightLeftAssoc == opAssoc.LEFT: + if arity == 1: + matchExpr = _FB(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) ) + elif arity == 2: + if opExpr is not None: + matchExpr = _FB(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) ) + else: + matchExpr = _FB(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) ) + elif arity == 3: + matchExpr = _FB(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \ + Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr ) + else: + raise ValueError("operator must be unary (1), binary (2), or ternary (3)") + elif rightLeftAssoc == opAssoc.RIGHT: + if arity == 1: + # try to avoid LR with this extra test + if not isinstance(opExpr, Optional): + opExpr = Optional(opExpr) + matchExpr = _FB(opExpr.expr + thisExpr) + Group( opExpr + thisExpr ) + elif arity == 2: + if opExpr is not None: + matchExpr = _FB(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) ) + else: + matchExpr = _FB(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) ) + elif arity == 3: + matchExpr = _FB(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \ + Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr ) + else: + raise ValueError("operator must be unary (1), binary (2), or ternary (3)") + else: + raise ValueError("operator must indicate right or left associativity") + if pa: + if isinstance(pa, (tuple, list)): + matchExpr.setParseAction(*pa) + else: + matchExpr.setParseAction(pa) + thisExpr <<= ( matchExpr.setName(termName) | lastExpr ) + lastExpr = thisExpr + ret <<= lastExpr + return ret + +operatorPrecedence = infixNotation +"""(Deprecated) Former name of :class:`infixNotation`, will be +dropped in a future release.""" + +dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"').setName("string enclosed in double quotes") +sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("string enclosed in single quotes") +quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"'| + Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("quotedString using single or double quotes") +unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal") + +def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()): + """Helper method for defining nested lists enclosed in opening and + closing delimiters ("(" and ")" are the default). + + Parameters: + - opener - opening character for a nested list + (default= ``"("``); can also be a pyparsing expression + - closer - closing character for a nested list + (default= ``")"``); can also be a pyparsing expression + - content - expression for items within the nested lists + (default= ``None``) + - ignoreExpr - expression for ignoring opening and closing + delimiters (default= :class:`quotedString`) + + If an expression is not provided for the content argument, the + nested expression will capture all whitespace-delimited content + between delimiters as a list of separate values. + + Use the ``ignoreExpr`` argument to define expressions that may + contain opening or closing characters that should not be treated as + opening or closing characters for nesting, such as quotedString or + a comment expression. Specify multiple expressions using an + :class:`Or` or :class:`MatchFirst`. The default is + :class:`quotedString`, but if no expressions are to be ignored, then + pass ``None`` for this argument. + + Example:: + + data_type = oneOf("void int short long char float double") + decl_data_type = Combine(data_type + Optional(Word('*'))) + ident = Word(alphas+'_', alphanums+'_') + number = pyparsing_common.number + arg = Group(decl_data_type + ident) + LPAR,RPAR = map(Suppress, "()") + + code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment)) + + c_function = (decl_data_type("type") + + ident("name") + + LPAR + Optional(delimitedList(arg), [])("args") + RPAR + + code_body("body")) + c_function.ignore(cStyleComment) + + source_code = ''' + int is_odd(int x) { + return (x%2); + } + + int dec_to_hex(char hchar) { + if (hchar >= '0' && hchar <= '9') { + return (ord(hchar)-ord('0')); + } else { + return (10+ord(hchar)-ord('A')); + } + } + ''' + for func in c_function.searchString(source_code): + print("%(name)s (%(type)s) args: %(args)s" % func) + + + prints:: + + is_odd (int) args: [['int', 'x']] + dec_to_hex (int) args: [['char', 'hchar']] + """ + if opener == closer: + raise ValueError("opening and closing strings cannot be the same") + if content is None: + if isinstance(opener,basestring) and isinstance(closer,basestring): + if len(opener) == 1 and len(closer)==1: + if ignoreExpr is not None: + content = (Combine(OneOrMore(~ignoreExpr + + CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1)) + ).setParseAction(lambda t:t[0].strip())) + else: + content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS + ).setParseAction(lambda t:t[0].strip())) + else: + if ignoreExpr is not None: + content = (Combine(OneOrMore(~ignoreExpr + + ~Literal(opener) + ~Literal(closer) + + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1)) + ).setParseAction(lambda t:t[0].strip())) + else: + content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) + + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1)) + ).setParseAction(lambda t:t[0].strip())) + else: + raise ValueError("opening and closing arguments must be strings if no content expression is given") + ret = Forward() + if ignoreExpr is not None: + ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) ) + else: + ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) ) + ret.setName('nested %s%s expression' % (opener,closer)) + return ret + +def indentedBlock(blockStatementExpr, indentStack, indent=True): + """Helper method for defining space-delimited indentation blocks, + such as those used to define block statements in Python source code. + + Parameters: + + - blockStatementExpr - expression defining syntax of statement that + is repeated within the indented block + - indentStack - list created by caller to manage indentation stack + (multiple statementWithIndentedBlock expressions within a single + grammar should share a common indentStack) + - indent - boolean indicating whether block must be indented beyond + the the current level; set to False for block of left-most + statements (default= ``True``) + + A valid block must contain at least one ``blockStatement``. + + Example:: + + data = ''' + def A(z): + A1 + B = 100 + G = A2 + A2 + A3 + B + def BB(a,b,c): + BB1 + def BBA(): + bba1 + bba2 + bba3 + C + D + def spam(x,y): + def eggs(z): + pass + ''' + + + indentStack = [1] + stmt = Forward() + + identifier = Word(alphas, alphanums) + funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":") + func_body = indentedBlock(stmt, indentStack) + funcDef = Group( funcDecl + func_body ) + + rvalue = Forward() + funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")") + rvalue << (funcCall | identifier | Word(nums)) + assignment = Group(identifier + "=" + rvalue) + stmt << ( funcDef | assignment | identifier ) + + module_body = OneOrMore(stmt) + + parseTree = module_body.parseString(data) + parseTree.pprint() + + prints:: + + [['def', + 'A', + ['(', 'z', ')'], + ':', + [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]], + 'B', + ['def', + 'BB', + ['(', 'a', 'b', 'c', ')'], + ':', + [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]], + 'C', + 'D', + ['def', + 'spam', + ['(', 'x', 'y', ')'], + ':', + [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] + """ + def checkPeerIndent(s,l,t): + if l >= len(s): return + curCol = col(l,s) + if curCol != indentStack[-1]: + if curCol > indentStack[-1]: + raise ParseFatalException(s,l,"illegal nesting") + raise ParseException(s,l,"not a peer entry") + + def checkSubIndent(s,l,t): + curCol = col(l,s) + if curCol > indentStack[-1]: + indentStack.append( curCol ) + else: + raise ParseException(s,l,"not a subentry") + + def checkUnindent(s,l,t): + if l >= len(s): return + curCol = col(l,s) + if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]): + raise ParseException(s,l,"not an unindent") + indentStack.pop() + + NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress()) + INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT') + PEER = Empty().setParseAction(checkPeerIndent).setName('') + UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT') + if indent: + smExpr = Group( Optional(NL) + + #~ FollowedBy(blockStatementExpr) + + INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT) + else: + smExpr = Group( Optional(NL) + + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) ) + blockStatementExpr.ignore(_bslash + LineEnd()) + return smExpr.setName('indented block') + +alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") +punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") + +anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:").setName('any tag')) +_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(),'><& "\'')) +commonHTMLEntity = Regex('&(?P<entity>' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity") +def replaceHTMLEntity(t): + """Helper parser action to replace common HTML entities with their special characters""" + return _htmlEntityMap.get(t.entity) + +# it's easy to get these comment structures wrong - they're very common, so may as well make them available +cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment") +"Comment of the form ``/* ... */``" + +htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment") +"Comment of the form ``<!-- ... -->``" + +restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line") +dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment") +"Comment of the form ``// ... (to end of line)``" + +cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/'| dblSlashComment).setName("C++ style comment") +"Comment of either form :class:`cStyleComment` or :class:`dblSlashComment`" + +javaStyleComment = cppStyleComment +"Same as :class:`cppStyleComment`" + +pythonStyleComment = Regex(r"#.*").setName("Python style comment") +"Comment of the form ``# ... (to end of line)``" + +_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') + + Optional( Word(" \t") + + ~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem") +commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList") +"""(Deprecated) Predefined expression of 1 or more printable words or +quoted strings, separated by commas. + +This expression is deprecated in favor of :class:`pyparsing_common.comma_separated_list`. +""" + +# some other useful expressions - using lower-case class name since we are really using this as a namespace +class pyparsing_common: + """Here are some common low-level expressions that may be useful in + jump-starting parser development: + + - numeric forms (:class:`integers<integer>`, :class:`reals<real>`, + :class:`scientific notation<sci_real>`) + - common :class:`programming identifiers<identifier>` + - network addresses (:class:`MAC<mac_address>`, + :class:`IPv4<ipv4_address>`, :class:`IPv6<ipv6_address>`) + - ISO8601 :class:`dates<iso8601_date>` and + :class:`datetime<iso8601_datetime>` + - :class:`UUID<uuid>` + - :class:`comma-separated list<comma_separated_list>` + + Parse actions: + + - :class:`convertToInteger` + - :class:`convertToFloat` + - :class:`convertToDate` + - :class:`convertToDatetime` + - :class:`stripHTMLTags` + - :class:`upcaseTokens` + - :class:`downcaseTokens` + + Example:: + + pyparsing_common.number.runTests(''' + # any int or real number, returned as the appropriate type + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + ''') + + pyparsing_common.fnumber.runTests(''' + # any int or real number, returned as float + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + ''') + + pyparsing_common.hex_integer.runTests(''' + # hex numbers + 100 + FF + ''') + + pyparsing_common.fraction.runTests(''' + # fractions + 1/2 + -3/4 + ''') + + pyparsing_common.mixed_integer.runTests(''' + # mixed fractions + 1 + 1/2 + -3/4 + 1-3/4 + ''') + + import uuid + pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) + pyparsing_common.uuid.runTests(''' + # uuid + 12345678-1234-5678-1234-567812345678 + ''') + + prints:: + + # any int or real number, returned as the appropriate type + 100 + [100] + + -100 + [-100] + + +100 + [100] + + 3.14159 + [3.14159] + + 6.02e23 + [6.02e+23] + + 1e-12 + [1e-12] + + # any int or real number, returned as float + 100 + [100.0] + + -100 + [-100.0] + + +100 + [100.0] + + 3.14159 + [3.14159] + + 6.02e23 + [6.02e+23] + + 1e-12 + [1e-12] + + # hex numbers + 100 + [256] + + FF + [255] + + # fractions + 1/2 + [0.5] + + -3/4 + [-0.75] + + # mixed fractions + 1 + [1] + + 1/2 + [0.5] + + -3/4 + [-0.75] + + 1-3/4 + [1.75] + + # uuid + 12345678-1234-5678-1234-567812345678 + [UUID('12345678-1234-5678-1234-567812345678')] + """ + + convertToInteger = tokenMap(int) + """ + Parse action for converting parsed integers to Python int + """ + + convertToFloat = tokenMap(float) + """ + Parse action for converting parsed numbers to Python float + """ + + integer = Word(nums).setName("integer").setParseAction(convertToInteger) + """expression that parses an unsigned integer, returns an int""" + + hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int,16)) + """expression that parses a hexadecimal integer, returns an int""" + + signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger) + """expression that parses an integer with optional leading sign, returns an int""" + + fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction") + """fractional expression of an integer divided by an integer, returns a float""" + fraction.addParseAction(lambda t: t[0]/t[-1]) + + mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction") + """mixed integer of the form 'integer - fraction', with optional leading integer, returns float""" + mixed_integer.addParseAction(sum) + + real = Regex(r'[+-]?\d+\.\d*').setName("real number").setParseAction(convertToFloat) + """expression that parses a floating point number and returns a float""" + + sci_real = Regex(r'[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat) + """expression that parses a floating point number with optional + scientific notation and returns a float""" + + # streamlining this expression makes the docs nicer-looking + number = (sci_real | real | signed_integer).streamline() + """any numeric expression, returns the corresponding Python type""" + + fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat) + """any int or real number, returned as float""" + + identifier = Word(alphas+'_', alphanums+'_').setName("identifier") + """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')""" + + ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address") + "IPv4 address (``0.0.0.0 - 255.255.255.255``)" + + _ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer") + _full_ipv6_address = (_ipv6_part + (':' + _ipv6_part)*7).setName("full IPv6 address") + _short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part)*(0,6)) + "::" + Optional(_ipv6_part + (':' + _ipv6_part)*(0,6))).setName("short IPv6 address") + _short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8) + _mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address") + ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address") + "IPv6 address (long, short, or mixed form)" + + mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address") + "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)" + + @staticmethod + def convertToDate(fmt="%Y-%m-%d"): + """ + Helper to create a parse action for converting parsed date string to Python datetime.date + + Params - + - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%d"``) + + Example:: + + date_expr = pyparsing_common.iso8601_date.copy() + date_expr.setParseAction(pyparsing_common.convertToDate()) + print(date_expr.parseString("1999-12-31")) + + prints:: + + [datetime.date(1999, 12, 31)] + """ + def cvt_fn(s,l,t): + try: + return datetime.strptime(t[0], fmt).date() + except ValueError as ve: + raise ParseException(s, l, str(ve)) + return cvt_fn + + @staticmethod + def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"): + """Helper to create a parse action for converting parsed + datetime string to Python datetime.datetime + + Params - + - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``) + + Example:: + + dt_expr = pyparsing_common.iso8601_datetime.copy() + dt_expr.setParseAction(pyparsing_common.convertToDatetime()) + print(dt_expr.parseString("1999-12-31T23:59:59.999")) + + prints:: + + [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] + """ + def cvt_fn(s,l,t): + try: + return datetime.strptime(t[0], fmt) + except ValueError as ve: + raise ParseException(s, l, str(ve)) + return cvt_fn + + iso8601_date = Regex(r'(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?').setName("ISO8601 date") + "ISO8601 date (``yyyy-mm-dd``)" + + iso8601_datetime = Regex(r'(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime") + "ISO8601 datetime (``yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)``) - trailing seconds, milliseconds, and timezone optional; accepts separating ``'T'`` or ``' '``" + + uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID") + "UUID (``xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx``)" + + _html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress() + @staticmethod + def stripHTMLTags(s, l, tokens): + """Parse action to remove HTML tags from web page HTML source + + Example:: + + # strip HTML links from normal text + text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>' + td,td_end = makeHTMLTags("TD") + table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end + print(table_text.parseString(text).body) + + Prints:: + + More info at the pyparsing wiki page + """ + return pyparsing_common._html_stripper.transformString(tokens[0]) + + _commasepitem = Combine(OneOrMore(~Literal(",") + ~LineEnd() + Word(printables, excludeChars=',') + + Optional( White(" \t") ) ) ).streamline().setName("commaItem") + comma_separated_list = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("comma separated list") + """Predefined expression of 1 or more printable words or quoted strings, separated by commas.""" + + upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper())) + """Parse action to convert tokens to upper case.""" + + downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower())) + """Parse action to convert tokens to lower case.""" + + +class _lazyclassproperty(object): + def __init__(self, fn): + self.fn = fn + self.__doc__ = fn.__doc__ + self.__name__ = fn.__name__ + + def __get__(self, obj, cls): + if cls is None: + cls = type(obj) + if not hasattr(cls, '_intern') or any(cls._intern is getattr(superclass, '_intern', []) for superclass in cls.__mro__[1:]): + cls._intern = {} + attrname = self.fn.__name__ + if attrname not in cls._intern: + cls._intern[attrname] = self.fn(cls) + return cls._intern[attrname] + + +class unicode_set(object): + """ + A set of Unicode characters, for language-specific strings for + ``alphas``, ``nums``, ``alphanums``, and ``printables``. + A unicode_set is defined by a list of ranges in the Unicode character + set, in a class attribute ``_ranges``, such as:: + + _ranges = [(0x0020, 0x007e), (0x00a0, 0x00ff),] + + A unicode set can also be defined using multiple inheritance of other unicode sets:: + + class CJK(Chinese, Japanese, Korean): + pass + """ + _ranges = [] + + @classmethod + def _get_chars_for_ranges(cls): + ret = [] + for cc in cls.__mro__: + if cc is unicode_set: + break + for rr in cc._ranges: + ret.extend(range(rr[0], rr[-1]+1)) + return [unichr(c) for c in sorted(set(ret))] + + @_lazyclassproperty + def printables(cls): + "all non-whitespace characters in this range" + return u''.join(filterfalse(unicode.isspace, cls._get_chars_for_ranges())) + + @_lazyclassproperty + def alphas(cls): + "all alphabetic characters in this range" + return u''.join(filter(unicode.isalpha, cls._get_chars_for_ranges())) + + @_lazyclassproperty + def nums(cls): + "all numeric digit characters in this range" + return u''.join(filter(unicode.isdigit, cls._get_chars_for_ranges())) + + @_lazyclassproperty + def alphanums(cls): + "all alphanumeric characters in this range" + return cls.alphas + cls.nums + + +class pyparsing_unicode(unicode_set): + """ + A namespace class for defining common language unicode_sets. + """ + _ranges = [(32, sys.maxunicode)] + + class Latin1(unicode_set): + "Unicode set for Latin-1 Unicode Character Range" + _ranges = [(0x0020, 0x007e), (0x00a0, 0x00ff),] + + class LatinA(unicode_set): + "Unicode set for Latin-A Unicode Character Range" + _ranges = [(0x0100, 0x017f),] + + class LatinB(unicode_set): + "Unicode set for Latin-B Unicode Character Range" + _ranges = [(0x0180, 0x024f),] + + class Greek(unicode_set): + "Unicode set for Greek Unicode Character Ranges" + _ranges = [ + (0x0370, 0x03ff), (0x1f00, 0x1f15), (0x1f18, 0x1f1d), (0x1f20, 0x1f45), (0x1f48, 0x1f4d), + (0x1f50, 0x1f57), (0x1f59,), (0x1f5b,), (0x1f5d,), (0x1f5f, 0x1f7d), (0x1f80, 0x1fb4), (0x1fb6, 0x1fc4), + (0x1fc6, 0x1fd3), (0x1fd6, 0x1fdb), (0x1fdd, 0x1fef), (0x1ff2, 0x1ff4), (0x1ff6, 0x1ffe), + ] + + class Cyrillic(unicode_set): + "Unicode set for Cyrillic Unicode Character Range" + _ranges = [(0x0400, 0x04ff)] + + class Chinese(unicode_set): + "Unicode set for Chinese Unicode Character Range" + _ranges = [(0x4e00, 0x9fff), (0x3000, 0x303f), ] + + class Japanese(unicode_set): + "Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges" + _ranges = [ ] + + class Kanji(unicode_set): + "Unicode set for Kanji Unicode Character Range" + _ranges = [(0x4E00, 0x9Fbf), (0x3000, 0x303f), ] + + class Hiragana(unicode_set): + "Unicode set for Hiragana Unicode Character Range" + _ranges = [(0x3040, 0x309f), ] + + class Katakana(unicode_set): + "Unicode set for Katakana Unicode Character Range" + _ranges = [(0x30a0, 0x30ff), ] + + class Korean(unicode_set): + "Unicode set for Korean Unicode Character Range" + _ranges = [(0xac00, 0xd7af), (0x1100, 0x11ff), (0x3130, 0x318f), (0xa960, 0xa97f), (0xd7b0, 0xd7ff), (0x3000, 0x303f), ] + + class CJK(Chinese, Japanese, Korean): + "Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range" + pass + + class Thai(unicode_set): + "Unicode set for Thai Unicode Character Range" + _ranges = [(0x0e01, 0x0e3a), (0x0e3f, 0x0e5b), ] + + class Arabic(unicode_set): + "Unicode set for Arabic Unicode Character Range" + _ranges = [(0x0600, 0x061b), (0x061e, 0x06ff), (0x0700, 0x077f), ] + + class Hebrew(unicode_set): + "Unicode set for Hebrew Unicode Character Range" + _ranges = [(0x0590, 0x05ff), ] + + class Devanagari(unicode_set): + "Unicode set for Devanagari Unicode Character Range" + _ranges = [(0x0900, 0x097f), (0xa8e0, 0xa8ff)] + +pyparsing_unicode.Japanese._ranges = (pyparsing_unicode.Japanese.Kanji._ranges + + pyparsing_unicode.Japanese.Hiragana._ranges + + pyparsing_unicode.Japanese.Katakana._ranges) + +# define ranges in language character sets +if PY_3: + setattr(pyparsing_unicode, "العربية", pyparsing_unicode.Arabic) + setattr(pyparsing_unicode, "中文", pyparsing_unicode.Chinese) + setattr(pyparsing_unicode, "кириллица", pyparsing_unicode.Cyrillic) + setattr(pyparsing_unicode, "Ελληνικά", pyparsing_unicode.Greek) + setattr(pyparsing_unicode, "עִברִית", pyparsing_unicode.Hebrew) + setattr(pyparsing_unicode, "日本語", pyparsing_unicode.Japanese) + setattr(pyparsing_unicode.Japanese, "漢字", pyparsing_unicode.Japanese.Kanji) + setattr(pyparsing_unicode.Japanese, "カタカナ", pyparsing_unicode.Japanese.Katakana) + setattr(pyparsing_unicode.Japanese, "ひらがな", pyparsing_unicode.Japanese.Hiragana) + setattr(pyparsing_unicode, "한국어", pyparsing_unicode.Korean) + setattr(pyparsing_unicode, "ไทย", pyparsing_unicode.Thai) + setattr(pyparsing_unicode, "देवनागरी", pyparsing_unicode.Devanagari) + + +if __name__ == "__main__": + + selectToken = CaselessLiteral("select") + fromToken = CaselessLiteral("from") + + ident = Word(alphas, alphanums + "_$") + + columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) + columnNameList = Group(delimitedList(columnName)).setName("columns") + columnSpec = ('*' | columnNameList) + + tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) + tableNameList = Group(delimitedList(tableName)).setName("tables") + + simpleSQL = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables") + + # demo runTests method, including embedded comments in test string + simpleSQL.runTests(""" + # '*' as column list and dotted table name + select * from SYS.XYZZY + + # caseless match on "SELECT", and casts back to "select" + SELECT * from XYZZY, ABC + + # list of column names, and mixed case SELECT keyword + Select AA,BB,CC from Sys.dual + + # multiple tables + Select A, B, C from Sys.dual, Table2 + + # invalid SELECT keyword - should fail + Xelect A, B, C from Sys.dual + + # incomplete command - should fail + Select + + # invalid column name - should fail + Select ^^^ frox Sys.dual + + """) + + pyparsing_common.number.runTests(""" + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + """) + + # any int or real number, returned as float + pyparsing_common.fnumber.runTests(""" + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + """) + + pyparsing_common.hex_integer.runTests(""" + 100 + FF + """) + + import uuid + pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) + pyparsing_common.uuid.runTests(""" + 12345678-1234-5678-1234-567812345678 + """) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyc new file mode 100644 index 0000000..0568956 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/pyparsing.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/pytoml/__init__.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/pytoml/__init__.py new file mode 100644 index 0000000..8ed060f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/pytoml/__init__.py @@ -0,0 +1,4 @@ +from .core import TomlError +from .parser import load, loads +from .test import translate_to_test +from .writer import dump, dumps \ No newline at end of file diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/pytoml/__init__.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/pytoml/__init__.pyc new file mode 100644 index 0000000..67e49d5 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/pytoml/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/pytoml/core.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/pytoml/core.py new file mode 100644 index 0000000..c182734 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/pytoml/core.py @@ -0,0 +1,13 @@ +class TomlError(RuntimeError): + def __init__(self, message, line, col, filename): + RuntimeError.__init__(self, message, line, col, filename) + self.message = message + self.line = line + self.col = col + self.filename = filename + + def __str__(self): + return '{}({}, {}): {}'.format(self.filename, self.line, self.col, self.message) + + def __repr__(self): + return 'TomlError({!r}, {!r}, {!r}, {!r})'.format(self.message, self.line, self.col, self.filename) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/pytoml/core.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/pytoml/core.pyc new file mode 100644 index 0000000..feb7f0c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/pytoml/core.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/pytoml/parser.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/pytoml/parser.py new file mode 100644 index 0000000..3493aa6 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/pytoml/parser.py @@ -0,0 +1,341 @@ +import string, re, sys, datetime +from .core import TomlError +from .utils import rfc3339_re, parse_rfc3339_re + +if sys.version_info[0] == 2: + _chr = unichr +else: + _chr = chr + +def load(fin, translate=lambda t, x, v: v, object_pairs_hook=dict): + return loads(fin.read(), translate=translate, object_pairs_hook=object_pairs_hook, filename=getattr(fin, 'name', repr(fin))) + +def loads(s, filename='<string>', translate=lambda t, x, v: v, object_pairs_hook=dict): + if isinstance(s, bytes): + s = s.decode('utf-8') + + s = s.replace('\r\n', '\n') + + root = object_pairs_hook() + tables = object_pairs_hook() + scope = root + + src = _Source(s, filename=filename) + ast = _p_toml(src, object_pairs_hook=object_pairs_hook) + + def error(msg): + raise TomlError(msg, pos[0], pos[1], filename) + + def process_value(v, object_pairs_hook): + kind, text, value, pos = v + if kind == 'str' and value.startswith('\n'): + value = value[1:] + if kind == 'array': + if value and any(k != value[0][0] for k, t, v, p in value[1:]): + error('array-type-mismatch') + value = [process_value(item, object_pairs_hook=object_pairs_hook) for item in value] + elif kind == 'table': + value = object_pairs_hook([(k, process_value(value[k], object_pairs_hook=object_pairs_hook)) for k in value]) + return translate(kind, text, value) + + for kind, value, pos in ast: + if kind == 'kv': + k, v = value + if k in scope: + error('duplicate_keys. Key "{0}" was used more than once.'.format(k)) + scope[k] = process_value(v, object_pairs_hook=object_pairs_hook) + else: + is_table_array = (kind == 'table_array') + cur = tables + for name in value[:-1]: + if isinstance(cur.get(name), list): + d, cur = cur[name][-1] + else: + d, cur = cur.setdefault(name, (None, object_pairs_hook())) + + scope = object_pairs_hook() + name = value[-1] + if name not in cur: + if is_table_array: + cur[name] = [(scope, object_pairs_hook())] + else: + cur[name] = (scope, object_pairs_hook()) + elif isinstance(cur[name], list): + if not is_table_array: + error('table_type_mismatch') + cur[name].append((scope, object_pairs_hook())) + else: + if is_table_array: + error('table_type_mismatch') + old_scope, next_table = cur[name] + if old_scope is not None: + error('duplicate_tables') + cur[name] = (scope, next_table) + + def merge_tables(scope, tables): + if scope is None: + scope = object_pairs_hook() + for k in tables: + if k in scope: + error('key_table_conflict') + v = tables[k] + if isinstance(v, list): + scope[k] = [merge_tables(sc, tbl) for sc, tbl in v] + else: + scope[k] = merge_tables(v[0], v[1]) + return scope + + return merge_tables(root, tables) + +class _Source: + def __init__(self, s, filename=None): + self.s = s + self._pos = (1, 1) + self._last = None + self._filename = filename + self.backtrack_stack = [] + + def last(self): + return self._last + + def pos(self): + return self._pos + + def fail(self): + return self._expect(None) + + def consume_dot(self): + if self.s: + self._last = self.s[0] + self.s = self[1:] + self._advance(self._last) + return self._last + return None + + def expect_dot(self): + return self._expect(self.consume_dot()) + + def consume_eof(self): + if not self.s: + self._last = '' + return True + return False + + def expect_eof(self): + return self._expect(self.consume_eof()) + + def consume(self, s): + if self.s.startswith(s): + self.s = self.s[len(s):] + self._last = s + self._advance(s) + return True + return False + + def expect(self, s): + return self._expect(self.consume(s)) + + def consume_re(self, re): + m = re.match(self.s) + if m: + self.s = self.s[len(m.group(0)):] + self._last = m + self._advance(m.group(0)) + return m + return None + + def expect_re(self, re): + return self._expect(self.consume_re(re)) + + def __enter__(self): + self.backtrack_stack.append((self.s, self._pos)) + + def __exit__(self, type, value, traceback): + if type is None: + self.backtrack_stack.pop() + else: + self.s, self._pos = self.backtrack_stack.pop() + return type == TomlError + + def commit(self): + self.backtrack_stack[-1] = (self.s, self._pos) + + def _expect(self, r): + if not r: + raise TomlError('msg', self._pos[0], self._pos[1], self._filename) + return r + + def _advance(self, s): + suffix_pos = s.rfind('\n') + if suffix_pos == -1: + self._pos = (self._pos[0], self._pos[1] + len(s)) + else: + self._pos = (self._pos[0] + s.count('\n'), len(s) - suffix_pos) + +_ews_re = re.compile(r'(?:[ \t]|#[^\n]*\n|#[^\n]*\Z|\n)*') +def _p_ews(s): + s.expect_re(_ews_re) + +_ws_re = re.compile(r'[ \t]*') +def _p_ws(s): + s.expect_re(_ws_re) + +_escapes = { 'b': '\b', 'n': '\n', 'r': '\r', 't': '\t', '"': '"', + '\\': '\\', 'f': '\f' } + +_basicstr_re = re.compile(r'[^"\\\000-\037]*') +_short_uni_re = re.compile(r'u([0-9a-fA-F]{4})') +_long_uni_re = re.compile(r'U([0-9a-fA-F]{8})') +_escapes_re = re.compile(r'[btnfr\"\\]') +_newline_esc_re = re.compile('\n[ \t\n]*') +def _p_basicstr_content(s, content=_basicstr_re): + res = [] + while True: + res.append(s.expect_re(content).group(0)) + if not s.consume('\\'): + break + if s.consume_re(_newline_esc_re): + pass + elif s.consume_re(_short_uni_re) or s.consume_re(_long_uni_re): + v = int(s.last().group(1), 16) + if 0xd800 <= v < 0xe000: + s.fail() + res.append(_chr(v)) + else: + s.expect_re(_escapes_re) + res.append(_escapes[s.last().group(0)]) + return ''.join(res) + +_key_re = re.compile(r'[0-9a-zA-Z-_]+') +def _p_key(s): + with s: + s.expect('"') + r = _p_basicstr_content(s, _basicstr_re) + s.expect('"') + return r + if s.consume('\''): + if s.consume('\'\''): + r = s.expect_re(_litstr_ml_re).group(0) + s.expect('\'\'\'') + else: + r = s.expect_re(_litstr_re).group(0) + s.expect('\'') + return r + return s.expect_re(_key_re).group(0) + +_float_re = re.compile(r'[+-]?(?:0|[1-9](?:_?\d)*)(?:\.\d(?:_?\d)*)?(?:[eE][+-]?(?:\d(?:_?\d)*))?') + +_basicstr_ml_re = re.compile(r'(?:""?(?!")|[^"\\\000-\011\013-\037])*') +_litstr_re = re.compile(r"[^'\000\010\012-\037]*") +_litstr_ml_re = re.compile(r"(?:(?:|'|'')(?:[^'\000-\010\013-\037]))*") +def _p_value(s, object_pairs_hook): + pos = s.pos() + + if s.consume('true'): + return 'bool', s.last(), True, pos + if s.consume('false'): + return 'bool', s.last(), False, pos + + if s.consume('"'): + if s.consume('""'): + r = _p_basicstr_content(s, _basicstr_ml_re) + s.expect('"""') + else: + r = _p_basicstr_content(s, _basicstr_re) + s.expect('"') + return 'str', r, r, pos + + if s.consume('\''): + if s.consume('\'\''): + r = s.expect_re(_litstr_ml_re).group(0) + s.expect('\'\'\'') + else: + r = s.expect_re(_litstr_re).group(0) + s.expect('\'') + return 'str', r, r, pos + + if s.consume_re(rfc3339_re): + m = s.last() + return 'datetime', m.group(0), parse_rfc3339_re(m), pos + + if s.consume_re(_float_re): + m = s.last().group(0) + r = m.replace('_','') + if '.' in m or 'e' in m or 'E' in m: + return 'float', m, float(r), pos + else: + return 'int', m, int(r, 10), pos + + if s.consume('['): + items = [] + with s: + while True: + _p_ews(s) + items.append(_p_value(s, object_pairs_hook=object_pairs_hook)) + s.commit() + _p_ews(s) + s.expect(',') + s.commit() + _p_ews(s) + s.expect(']') + return 'array', None, items, pos + + if s.consume('{'): + _p_ws(s) + items = object_pairs_hook() + if not s.consume('}'): + k = _p_key(s) + _p_ws(s) + s.expect('=') + _p_ws(s) + items[k] = _p_value(s, object_pairs_hook=object_pairs_hook) + _p_ws(s) + while s.consume(','): + _p_ws(s) + k = _p_key(s) + _p_ws(s) + s.expect('=') + _p_ws(s) + items[k] = _p_value(s, object_pairs_hook=object_pairs_hook) + _p_ws(s) + s.expect('}') + return 'table', None, items, pos + + s.fail() + +def _p_stmt(s, object_pairs_hook): + pos = s.pos() + if s.consume( '['): + is_array = s.consume('[') + _p_ws(s) + keys = [_p_key(s)] + _p_ws(s) + while s.consume('.'): + _p_ws(s) + keys.append(_p_key(s)) + _p_ws(s) + s.expect(']') + if is_array: + s.expect(']') + return 'table_array' if is_array else 'table', keys, pos + + key = _p_key(s) + _p_ws(s) + s.expect('=') + _p_ws(s) + value = _p_value(s, object_pairs_hook=object_pairs_hook) + return 'kv', (key, value), pos + +_stmtsep_re = re.compile(r'(?:[ \t]*(?:#[^\n]*)?\n)+[ \t]*') +def _p_toml(s, object_pairs_hook): + stmts = [] + _p_ews(s) + with s: + stmts.append(_p_stmt(s, object_pairs_hook=object_pairs_hook)) + while True: + s.commit() + s.expect_re(_stmtsep_re) + stmts.append(_p_stmt(s, object_pairs_hook=object_pairs_hook)) + _p_ews(s) + s.expect_eof() + return stmts diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/pytoml/parser.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/pytoml/parser.pyc new file mode 100644 index 0000000..d44feeb Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/pytoml/parser.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/pytoml/test.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/pytoml/test.py new file mode 100644 index 0000000..ec8abfc --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/pytoml/test.py @@ -0,0 +1,30 @@ +import datetime +from .utils import format_rfc3339 + +try: + _string_types = (str, unicode) + _int_types = (int, long) +except NameError: + _string_types = str + _int_types = int + +def translate_to_test(v): + if isinstance(v, dict): + return { k: translate_to_test(v) for k, v in v.items() } + if isinstance(v, list): + a = [translate_to_test(x) for x in v] + if v and isinstance(v[0], dict): + return a + else: + return {'type': 'array', 'value': a} + if isinstance(v, datetime.datetime): + return {'type': 'datetime', 'value': format_rfc3339(v)} + if isinstance(v, bool): + return {'type': 'bool', 'value': 'true' if v else 'false'} + if isinstance(v, _int_types): + return {'type': 'integer', 'value': str(v)} + if isinstance(v, float): + return {'type': 'float', 'value': '{:.17}'.format(v)} + if isinstance(v, _string_types): + return {'type': 'string', 'value': v} + raise RuntimeError('unexpected value: {!r}'.format(v)) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/pytoml/test.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/pytoml/test.pyc new file mode 100644 index 0000000..57b1063 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/pytoml/test.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/pytoml/utils.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/pytoml/utils.py new file mode 100644 index 0000000..636a680 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/pytoml/utils.py @@ -0,0 +1,67 @@ +import datetime +import re + +rfc3339_re = re.compile(r'(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2})(\.\d+)?(?:Z|([+-]\d{2}):(\d{2}))') + +def parse_rfc3339(v): + m = rfc3339_re.match(v) + if not m or m.group(0) != v: + return None + return parse_rfc3339_re(m) + +def parse_rfc3339_re(m): + r = map(int, m.groups()[:6]) + if m.group(7): + micro = float(m.group(7)) + else: + micro = 0 + + if m.group(8): + g = int(m.group(8), 10) * 60 + int(m.group(9), 10) + tz = _TimeZone(datetime.timedelta(0, g * 60)) + else: + tz = _TimeZone(datetime.timedelta(0, 0)) + + y, m, d, H, M, S = r + return datetime.datetime(y, m, d, H, M, S, int(micro * 1000000), tz) + + +def format_rfc3339(v): + offs = v.utcoffset() + offs = int(offs.total_seconds()) // 60 if offs is not None else 0 + + if offs == 0: + suffix = 'Z' + else: + if offs > 0: + suffix = '+' + else: + suffix = '-' + offs = -offs + suffix = '{0}{1:02}:{2:02}'.format(suffix, offs // 60, offs % 60) + + if v.microsecond: + return v.strftime('%Y-%m-%dT%H:%M:%S.%f') + suffix + else: + return v.strftime('%Y-%m-%dT%H:%M:%S') + suffix + +class _TimeZone(datetime.tzinfo): + def __init__(self, offset): + self._offset = offset + + def utcoffset(self, dt): + return self._offset + + def dst(self, dt): + return None + + def tzname(self, dt): + m = self._offset.total_seconds() // 60 + if m < 0: + res = '-' + m = -m + else: + res = '+' + h = m // 60 + m = m - h * 60 + return '{}{:.02}{:.02}'.format(res, h, m) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/pytoml/utils.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/pytoml/utils.pyc new file mode 100644 index 0000000..6d1117e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/pytoml/utils.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/pytoml/writer.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/pytoml/writer.py new file mode 100644 index 0000000..73b5089 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/pytoml/writer.py @@ -0,0 +1,106 @@ +from __future__ import unicode_literals +import io, datetime, math, string, sys + +from .utils import format_rfc3339 + +if sys.version_info[0] == 3: + long = int + unicode = str + + +def dumps(obj, sort_keys=False): + fout = io.StringIO() + dump(obj, fout, sort_keys=sort_keys) + return fout.getvalue() + + +_escapes = {'\n': 'n', '\r': 'r', '\\': '\\', '\t': 't', '\b': 'b', '\f': 'f', '"': '"'} + + +def _escape_string(s): + res = [] + start = 0 + + def flush(): + if start != i: + res.append(s[start:i]) + return i + 1 + + i = 0 + while i < len(s): + c = s[i] + if c in '"\\\n\r\t\b\f': + start = flush() + res.append('\\' + _escapes[c]) + elif ord(c) < 0x20: + start = flush() + res.append('\\u%04x' % ord(c)) + i += 1 + + flush() + return '"' + ''.join(res) + '"' + + +_key_chars = string.digits + string.ascii_letters + '-_' +def _escape_id(s): + if any(c not in _key_chars for c in s): + return _escape_string(s) + return s + + +def _format_value(v): + if isinstance(v, bool): + return 'true' if v else 'false' + if isinstance(v, int) or isinstance(v, long): + return unicode(v) + if isinstance(v, float): + if math.isnan(v) or math.isinf(v): + raise ValueError("{0} is not a valid TOML value".format(v)) + else: + return repr(v) + elif isinstance(v, unicode) or isinstance(v, bytes): + return _escape_string(v) + elif isinstance(v, datetime.datetime): + return format_rfc3339(v) + elif isinstance(v, list): + return '[{0}]'.format(', '.join(_format_value(obj) for obj in v)) + elif isinstance(v, dict): + return '{{{0}}}'.format(', '.join('{} = {}'.format(_escape_id(k), _format_value(obj)) for k, obj in v.items())) + else: + raise RuntimeError(v) + + +def dump(obj, fout, sort_keys=False): + tables = [((), obj, False)] + + while tables: + name, table, is_array = tables.pop() + if name: + section_name = '.'.join(_escape_id(c) for c in name) + if is_array: + fout.write('[[{0}]]\n'.format(section_name)) + else: + fout.write('[{0}]\n'.format(section_name)) + + table_keys = sorted(table.keys()) if sort_keys else table.keys() + new_tables = [] + has_kv = False + for k in table_keys: + v = table[k] + if isinstance(v, dict): + new_tables.append((name + (k,), v, False)) + elif isinstance(v, list) and v and all(isinstance(o, dict) for o in v): + new_tables.extend((name + (k,), d, True) for d in v) + elif v is None: + # based on mojombo's comment: https://github.com/toml-lang/toml/issues/146#issuecomment-25019344 + fout.write( + '#{} = null # To use: uncomment and replace null with value\n'.format(_escape_id(k))) + has_kv = True + else: + fout.write('{0} = {1}\n'.format(_escape_id(k), _format_value(v))) + has_kv = True + + tables.extend(reversed(new_tables)) + + if (name or has_kv) and tables: + fout.write('\n') diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/pytoml/writer.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/pytoml/writer.pyc new file mode 100644 index 0000000..19a79a1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/pytoml/writer.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/__init__.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/__init__.py new file mode 100644 index 0000000..80c4ce1 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/__init__.py @@ -0,0 +1,133 @@ +# -*- coding: utf-8 -*- + +# __ +# /__) _ _ _ _ _/ _ +# / ( (- (/ (/ (- _) / _) +# / + +""" +Requests HTTP Library +~~~~~~~~~~~~~~~~~~~~~ + +Requests is an HTTP library, written in Python, for human beings. Basic GET +usage: + + >>> import requests + >>> r = requests.get('https://www.python.org') + >>> r.status_code + 200 + >>> 'Python is a programming language' in r.content + True + +... or POST: + + >>> payload = dict(key1='value1', key2='value2') + >>> r = requests.post('https://httpbin.org/post', data=payload) + >>> print(r.text) + { + ... + "form": { + "key2": "value2", + "key1": "value1" + }, + ... + } + +The other HTTP methods are supported - see `requests.api`. Full documentation +is at <http://python-requests.org>. + +:copyright: (c) 2017 by Kenneth Reitz. +:license: Apache 2.0, see LICENSE for more details. +""" + +from pip._vendor import urllib3 +from pip._vendor import chardet +import warnings +from .exceptions import RequestsDependencyWarning + + +def check_compatibility(urllib3_version, chardet_version): + urllib3_version = urllib3_version.split('.') + assert urllib3_version != ['dev'] # Verify urllib3 isn't installed from git. + + # Sometimes, urllib3 only reports its version as 16.1. + if len(urllib3_version) == 2: + urllib3_version.append('0') + + # Check urllib3 for compatibility. + major, minor, patch = urllib3_version # noqa: F811 + major, minor, patch = int(major), int(minor), int(patch) + # urllib3 >= 1.21.1, <= 1.24 + assert major == 1 + assert minor >= 21 + assert minor <= 24 + + # Check chardet for compatibility. + major, minor, patch = chardet_version.split('.')[:3] + major, minor, patch = int(major), int(minor), int(patch) + # chardet >= 3.0.2, < 3.1.0 + assert major == 3 + assert minor < 1 + assert patch >= 2 + + +def _check_cryptography(cryptography_version): + # cryptography < 1.3.4 + try: + cryptography_version = list(map(int, cryptography_version.split('.'))) + except ValueError: + return + + if cryptography_version < [1, 3, 4]: + warning = 'Old version of cryptography ({}) may cause slowdown.'.format(cryptography_version) + warnings.warn(warning, RequestsDependencyWarning) + +# Check imported dependencies for compatibility. +try: + check_compatibility(urllib3.__version__, chardet.__version__) +except (AssertionError, ValueError): + warnings.warn("urllib3 ({}) or chardet ({}) doesn't match a supported " + "version!".format(urllib3.__version__, chardet.__version__), + RequestsDependencyWarning) + +# Attempt to enable urllib3's SNI support, if possible +from pip._internal.utils.compat import WINDOWS +if not WINDOWS: + try: + from pip._vendor.urllib3.contrib import pyopenssl + pyopenssl.inject_into_urllib3() + + # Check cryptography version + from cryptography import __version__ as cryptography_version + _check_cryptography(cryptography_version) + except ImportError: + pass + +# urllib3's DependencyWarnings should be silenced. +from pip._vendor.urllib3.exceptions import DependencyWarning +warnings.simplefilter('ignore', DependencyWarning) + +from .__version__ import __title__, __description__, __url__, __version__ +from .__version__ import __build__, __author__, __author_email__, __license__ +from .__version__ import __copyright__, __cake__ + +from . import utils +from . import packages +from .models import Request, Response, PreparedRequest +from .api import request, get, head, post, patch, put, delete, options +from .sessions import session, Session +from .status_codes import codes +from .exceptions import ( + RequestException, Timeout, URLRequired, + TooManyRedirects, HTTPError, ConnectionError, + FileModeWarning, ConnectTimeout, ReadTimeout +) + +# Set default logging handler to avoid "No handler found" warnings. +import logging +from logging import NullHandler + +logging.getLogger(__name__).addHandler(NullHandler()) + +# FileModeWarnings go off per the default. +warnings.simplefilter('default', FileModeWarning, append=True) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/__init__.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/__init__.pyc new file mode 100644 index 0000000..e6e4619 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/__version__.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/__version__.py new file mode 100644 index 0000000..f5b5d03 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/__version__.py @@ -0,0 +1,14 @@ +# .-. .-. .-. . . .-. .-. .-. .-. +# |( |- |.| | | |- `-. | `-. +# ' ' `-' `-`.`-' `-' `-' ' `-' + +__title__ = 'requests' +__description__ = 'Python HTTP for Humans.' +__url__ = 'http://python-requests.org' +__version__ = '2.21.0' +__build__ = 0x022100 +__author__ = 'Kenneth Reitz' +__author_email__ = 'me@kennethreitz.org' +__license__ = 'Apache 2.0' +__copyright__ = 'Copyright 2018 Kenneth Reitz' +__cake__ = u'\u2728 \U0001f370 \u2728' diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/__version__.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/__version__.pyc new file mode 100644 index 0000000..15ec548 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/__version__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/_internal_utils.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/_internal_utils.py new file mode 100644 index 0000000..759d9a5 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/_internal_utils.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- + +""" +requests._internal_utils +~~~~~~~~~~~~~~ + +Provides utility functions that are consumed internally by Requests +which depend on extremely few external helpers (such as compat) +""" + +from .compat import is_py2, builtin_str, str + + +def to_native_string(string, encoding='ascii'): + """Given a string object, regardless of type, returns a representation of + that string in the native string type, encoding and decoding where + necessary. This assumes ASCII unless told otherwise. + """ + if isinstance(string, builtin_str): + out = string + else: + if is_py2: + out = string.encode(encoding) + else: + out = string.decode(encoding) + + return out + + +def unicode_is_ascii(u_string): + """Determine if unicode string only contains ASCII characters. + + :param str u_string: unicode string to check. Must be unicode + and not Python 2 `str`. + :rtype: bool + """ + assert isinstance(u_string, str) + try: + u_string.encode('ascii') + return True + except UnicodeEncodeError: + return False diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/_internal_utils.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/_internal_utils.pyc new file mode 100644 index 0000000..6cbe79f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/_internal_utils.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/adapters.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/adapters.py new file mode 100644 index 0000000..c30e7c9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/adapters.py @@ -0,0 +1,533 @@ +# -*- coding: utf-8 -*- + +""" +requests.adapters +~~~~~~~~~~~~~~~~~ + +This module contains the transport adapters that Requests uses to define +and maintain connections. +""" + +import os.path +import socket + +from pip._vendor.urllib3.poolmanager import PoolManager, proxy_from_url +from pip._vendor.urllib3.response import HTTPResponse +from pip._vendor.urllib3.util import parse_url +from pip._vendor.urllib3.util import Timeout as TimeoutSauce +from pip._vendor.urllib3.util.retry import Retry +from pip._vendor.urllib3.exceptions import ClosedPoolError +from pip._vendor.urllib3.exceptions import ConnectTimeoutError +from pip._vendor.urllib3.exceptions import HTTPError as _HTTPError +from pip._vendor.urllib3.exceptions import MaxRetryError +from pip._vendor.urllib3.exceptions import NewConnectionError +from pip._vendor.urllib3.exceptions import ProxyError as _ProxyError +from pip._vendor.urllib3.exceptions import ProtocolError +from pip._vendor.urllib3.exceptions import ReadTimeoutError +from pip._vendor.urllib3.exceptions import SSLError as _SSLError +from pip._vendor.urllib3.exceptions import ResponseError +from pip._vendor.urllib3.exceptions import LocationValueError + +from .models import Response +from .compat import urlparse, basestring +from .utils import (DEFAULT_CA_BUNDLE_PATH, extract_zipped_paths, + get_encoding_from_headers, prepend_scheme_if_needed, + get_auth_from_url, urldefragauth, select_proxy) +from .structures import CaseInsensitiveDict +from .cookies import extract_cookies_to_jar +from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError, + ProxyError, RetryError, InvalidSchema, InvalidProxyURL, + InvalidURL) +from .auth import _basic_auth_str + +try: + from pip._vendor.urllib3.contrib.socks import SOCKSProxyManager +except ImportError: + def SOCKSProxyManager(*args, **kwargs): + raise InvalidSchema("Missing dependencies for SOCKS support.") + +DEFAULT_POOLBLOCK = False +DEFAULT_POOLSIZE = 10 +DEFAULT_RETRIES = 0 +DEFAULT_POOL_TIMEOUT = None + + +class BaseAdapter(object): + """The Base Transport Adapter""" + + def __init__(self): + super(BaseAdapter, self).__init__() + + def send(self, request, stream=False, timeout=None, verify=True, + cert=None, proxies=None): + """Sends PreparedRequest object. Returns Response object. + + :param request: The :class:`PreparedRequest <PreparedRequest>` being sent. + :param stream: (optional) Whether to stream the request content. + :param timeout: (optional) How long to wait for the server to send + data before giving up, as a float, or a :ref:`(connect timeout, + read timeout) <timeouts>` tuple. + :type timeout: float or tuple + :param verify: (optional) Either a boolean, in which case it controls whether we verify + the server's TLS certificate, or a string, in which case it must be a path + to a CA bundle to use + :param cert: (optional) Any user-provided SSL certificate to be trusted. + :param proxies: (optional) The proxies dictionary to apply to the request. + """ + raise NotImplementedError + + def close(self): + """Cleans up adapter specific items.""" + raise NotImplementedError + + +class HTTPAdapter(BaseAdapter): + """The built-in HTTP Adapter for urllib3. + + Provides a general-case interface for Requests sessions to contact HTTP and + HTTPS urls by implementing the Transport Adapter interface. This class will + usually be created by the :class:`Session <Session>` class under the + covers. + + :param pool_connections: The number of urllib3 connection pools to cache. + :param pool_maxsize: The maximum number of connections to save in the pool. + :param max_retries: The maximum number of retries each connection + should attempt. Note, this applies only to failed DNS lookups, socket + connections and connection timeouts, never to requests where data has + made it to the server. By default, Requests does not retry failed + connections. If you need granular control over the conditions under + which we retry a request, import urllib3's ``Retry`` class and pass + that instead. + :param pool_block: Whether the connection pool should block for connections. + + Usage:: + + >>> import requests + >>> s = requests.Session() + >>> a = requests.adapters.HTTPAdapter(max_retries=3) + >>> s.mount('http://', a) + """ + __attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize', + '_pool_block'] + + def __init__(self, pool_connections=DEFAULT_POOLSIZE, + pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES, + pool_block=DEFAULT_POOLBLOCK): + if max_retries == DEFAULT_RETRIES: + self.max_retries = Retry(0, read=False) + else: + self.max_retries = Retry.from_int(max_retries) + self.config = {} + self.proxy_manager = {} + + super(HTTPAdapter, self).__init__() + + self._pool_connections = pool_connections + self._pool_maxsize = pool_maxsize + self._pool_block = pool_block + + self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block) + + def __getstate__(self): + return {attr: getattr(self, attr, None) for attr in self.__attrs__} + + def __setstate__(self, state): + # Can't handle by adding 'proxy_manager' to self.__attrs__ because + # self.poolmanager uses a lambda function, which isn't pickleable. + self.proxy_manager = {} + self.config = {} + + for attr, value in state.items(): + setattr(self, attr, value) + + self.init_poolmanager(self._pool_connections, self._pool_maxsize, + block=self._pool_block) + + def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs): + """Initializes a urllib3 PoolManager. + + This method should not be called from user code, and is only + exposed for use when subclassing the + :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. + + :param connections: The number of urllib3 connection pools to cache. + :param maxsize: The maximum number of connections to save in the pool. + :param block: Block when no free connections are available. + :param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager. + """ + # save these values for pickling + self._pool_connections = connections + self._pool_maxsize = maxsize + self._pool_block = block + + self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize, + block=block, strict=True, **pool_kwargs) + + def proxy_manager_for(self, proxy, **proxy_kwargs): + """Return urllib3 ProxyManager for the given proxy. + + This method should not be called from user code, and is only + exposed for use when subclassing the + :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. + + :param proxy: The proxy to return a urllib3 ProxyManager for. + :param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager. + :returns: ProxyManager + :rtype: urllib3.ProxyManager + """ + if proxy in self.proxy_manager: + manager = self.proxy_manager[proxy] + elif proxy.lower().startswith('socks'): + username, password = get_auth_from_url(proxy) + manager = self.proxy_manager[proxy] = SOCKSProxyManager( + proxy, + username=username, + password=password, + num_pools=self._pool_connections, + maxsize=self._pool_maxsize, + block=self._pool_block, + **proxy_kwargs + ) + else: + proxy_headers = self.proxy_headers(proxy) + manager = self.proxy_manager[proxy] = proxy_from_url( + proxy, + proxy_headers=proxy_headers, + num_pools=self._pool_connections, + maxsize=self._pool_maxsize, + block=self._pool_block, + **proxy_kwargs) + + return manager + + def cert_verify(self, conn, url, verify, cert): + """Verify a SSL certificate. This method should not be called from user + code, and is only exposed for use when subclassing the + :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. + + :param conn: The urllib3 connection object associated with the cert. + :param url: The requested URL. + :param verify: Either a boolean, in which case it controls whether we verify + the server's TLS certificate, or a string, in which case it must be a path + to a CA bundle to use + :param cert: The SSL certificate to verify. + """ + if url.lower().startswith('https') and verify: + + cert_loc = None + + # Allow self-specified cert location. + if verify is not True: + cert_loc = verify + + if not cert_loc: + cert_loc = extract_zipped_paths(DEFAULT_CA_BUNDLE_PATH) + + if not cert_loc or not os.path.exists(cert_loc): + raise IOError("Could not find a suitable TLS CA certificate bundle, " + "invalid path: {}".format(cert_loc)) + + conn.cert_reqs = 'CERT_REQUIRED' + + if not os.path.isdir(cert_loc): + conn.ca_certs = cert_loc + else: + conn.ca_cert_dir = cert_loc + else: + conn.cert_reqs = 'CERT_NONE' + conn.ca_certs = None + conn.ca_cert_dir = None + + if cert: + if not isinstance(cert, basestring): + conn.cert_file = cert[0] + conn.key_file = cert[1] + else: + conn.cert_file = cert + conn.key_file = None + if conn.cert_file and not os.path.exists(conn.cert_file): + raise IOError("Could not find the TLS certificate file, " + "invalid path: {}".format(conn.cert_file)) + if conn.key_file and not os.path.exists(conn.key_file): + raise IOError("Could not find the TLS key file, " + "invalid path: {}".format(conn.key_file)) + + def build_response(self, req, resp): + """Builds a :class:`Response <requests.Response>` object from a urllib3 + response. This should not be called from user code, and is only exposed + for use when subclassing the + :class:`HTTPAdapter <requests.adapters.HTTPAdapter>` + + :param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response. + :param resp: The urllib3 response object. + :rtype: requests.Response + """ + response = Response() + + # Fallback to None if there's no status_code, for whatever reason. + response.status_code = getattr(resp, 'status', None) + + # Make headers case-insensitive. + response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {})) + + # Set encoding. + response.encoding = get_encoding_from_headers(response.headers) + response.raw = resp + response.reason = response.raw.reason + + if isinstance(req.url, bytes): + response.url = req.url.decode('utf-8') + else: + response.url = req.url + + # Add new cookies from the server. + extract_cookies_to_jar(response.cookies, req, resp) + + # Give the Response some context. + response.request = req + response.connection = self + + return response + + def get_connection(self, url, proxies=None): + """Returns a urllib3 connection for the given URL. This should not be + called from user code, and is only exposed for use when subclassing the + :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. + + :param url: The URL to connect to. + :param proxies: (optional) A Requests-style dictionary of proxies used on this request. + :rtype: urllib3.ConnectionPool + """ + proxy = select_proxy(url, proxies) + + if proxy: + proxy = prepend_scheme_if_needed(proxy, 'http') + proxy_url = parse_url(proxy) + if not proxy_url.host: + raise InvalidProxyURL("Please check proxy URL. It is malformed" + " and could be missing the host.") + proxy_manager = self.proxy_manager_for(proxy) + conn = proxy_manager.connection_from_url(url) + else: + # Only scheme should be lower case + parsed = urlparse(url) + url = parsed.geturl() + conn = self.poolmanager.connection_from_url(url) + + return conn + + def close(self): + """Disposes of any internal state. + + Currently, this closes the PoolManager and any active ProxyManager, + which closes any pooled connections. + """ + self.poolmanager.clear() + for proxy in self.proxy_manager.values(): + proxy.clear() + + def request_url(self, request, proxies): + """Obtain the url to use when making the final request. + + If the message is being sent through a HTTP proxy, the full URL has to + be used. Otherwise, we should only use the path portion of the URL. + + This should not be called from user code, and is only exposed for use + when subclassing the + :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. + + :param request: The :class:`PreparedRequest <PreparedRequest>` being sent. + :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs. + :rtype: str + """ + proxy = select_proxy(request.url, proxies) + scheme = urlparse(request.url).scheme + + is_proxied_http_request = (proxy and scheme != 'https') + using_socks_proxy = False + if proxy: + proxy_scheme = urlparse(proxy).scheme.lower() + using_socks_proxy = proxy_scheme.startswith('socks') + + url = request.path_url + if is_proxied_http_request and not using_socks_proxy: + url = urldefragauth(request.url) + + return url + + def add_headers(self, request, **kwargs): + """Add any headers needed by the connection. As of v2.0 this does + nothing by default, but is left for overriding by users that subclass + the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. + + This should not be called from user code, and is only exposed for use + when subclassing the + :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. + + :param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to. + :param kwargs: The keyword arguments from the call to send(). + """ + pass + + def proxy_headers(self, proxy): + """Returns a dictionary of the headers to add to any request sent + through a proxy. This works with urllib3 magic to ensure that they are + correctly sent to the proxy, rather than in a tunnelled request if + CONNECT is being used. + + This should not be called from user code, and is only exposed for use + when subclassing the + :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. + + :param proxy: The url of the proxy being used for this request. + :rtype: dict + """ + headers = {} + username, password = get_auth_from_url(proxy) + + if username: + headers['Proxy-Authorization'] = _basic_auth_str(username, + password) + + return headers + + def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None): + """Sends PreparedRequest object. Returns Response object. + + :param request: The :class:`PreparedRequest <PreparedRequest>` being sent. + :param stream: (optional) Whether to stream the request content. + :param timeout: (optional) How long to wait for the server to send + data before giving up, as a float, or a :ref:`(connect timeout, + read timeout) <timeouts>` tuple. + :type timeout: float or tuple or urllib3 Timeout object + :param verify: (optional) Either a boolean, in which case it controls whether + we verify the server's TLS certificate, or a string, in which case it + must be a path to a CA bundle to use + :param cert: (optional) Any user-provided SSL certificate to be trusted. + :param proxies: (optional) The proxies dictionary to apply to the request. + :rtype: requests.Response + """ + + try: + conn = self.get_connection(request.url, proxies) + except LocationValueError as e: + raise InvalidURL(e, request=request) + + self.cert_verify(conn, request.url, verify, cert) + url = self.request_url(request, proxies) + self.add_headers(request, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies) + + chunked = not (request.body is None or 'Content-Length' in request.headers) + + if isinstance(timeout, tuple): + try: + connect, read = timeout + timeout = TimeoutSauce(connect=connect, read=read) + except ValueError as e: + # this may raise a string formatting error. + err = ("Invalid timeout {}. Pass a (connect, read) " + "timeout tuple, or a single float to set " + "both timeouts to the same value".format(timeout)) + raise ValueError(err) + elif isinstance(timeout, TimeoutSauce): + pass + else: + timeout = TimeoutSauce(connect=timeout, read=timeout) + + try: + if not chunked: + resp = conn.urlopen( + method=request.method, + url=url, + body=request.body, + headers=request.headers, + redirect=False, + assert_same_host=False, + preload_content=False, + decode_content=False, + retries=self.max_retries, + timeout=timeout + ) + + # Send the request. + else: + if hasattr(conn, 'proxy_pool'): + conn = conn.proxy_pool + + low_conn = conn._get_conn(timeout=DEFAULT_POOL_TIMEOUT) + + try: + low_conn.putrequest(request.method, + url, + skip_accept_encoding=True) + + for header, value in request.headers.items(): + low_conn.putheader(header, value) + + low_conn.endheaders() + + for i in request.body: + low_conn.send(hex(len(i))[2:].encode('utf-8')) + low_conn.send(b'\r\n') + low_conn.send(i) + low_conn.send(b'\r\n') + low_conn.send(b'0\r\n\r\n') + + # Receive the response from the server + try: + # For Python 2.7, use buffering of HTTP responses + r = low_conn.getresponse(buffering=True) + except TypeError: + # For compatibility with Python 3.3+ + r = low_conn.getresponse() + + resp = HTTPResponse.from_httplib( + r, + pool=conn, + connection=low_conn, + preload_content=False, + decode_content=False + ) + except: + # If we hit any problems here, clean up the connection. + # Then, reraise so that we can handle the actual exception. + low_conn.close() + raise + + except (ProtocolError, socket.error) as err: + raise ConnectionError(err, request=request) + + except MaxRetryError as e: + if isinstance(e.reason, ConnectTimeoutError): + # TODO: Remove this in 3.0.0: see #2811 + if not isinstance(e.reason, NewConnectionError): + raise ConnectTimeout(e, request=request) + + if isinstance(e.reason, ResponseError): + raise RetryError(e, request=request) + + if isinstance(e.reason, _ProxyError): + raise ProxyError(e, request=request) + + if isinstance(e.reason, _SSLError): + # This branch is for urllib3 v1.22 and later. + raise SSLError(e, request=request) + + raise ConnectionError(e, request=request) + + except ClosedPoolError as e: + raise ConnectionError(e, request=request) + + except _ProxyError as e: + raise ProxyError(e) + + except (_SSLError, _HTTPError) as e: + if isinstance(e, _SSLError): + # This branch is for urllib3 versions earlier than v1.22 + raise SSLError(e, request=request) + elif isinstance(e, ReadTimeoutError): + raise ReadTimeout(e, request=request) + else: + raise + + return self.build_response(request, resp) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pyc new file mode 100644 index 0000000..0771e67 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/adapters.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/api.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/api.py new file mode 100644 index 0000000..abada96 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/api.py @@ -0,0 +1,158 @@ +# -*- coding: utf-8 -*- + +""" +requests.api +~~~~~~~~~~~~ + +This module implements the Requests API. + +:copyright: (c) 2012 by Kenneth Reitz. +:license: Apache2, see LICENSE for more details. +""" + +from . import sessions + + +def request(method, url, **kwargs): + """Constructs and sends a :class:`Request <Request>`. + + :param method: method for the new :class:`Request` object. + :param url: URL for the new :class:`Request` object. + :param params: (optional) Dictionary, list of tuples or bytes to send + in the body of the :class:`Request`. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`. + :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. + :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. + :param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload. + ``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')`` + or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string + defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers + to add for the file. + :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth. + :param timeout: (optional) How many seconds to wait for the server to send data + before giving up, as a float, or a :ref:`(connect timeout, read + timeout) <timeouts>` tuple. + :type timeout: float or tuple + :param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``. + :type allow_redirects: bool + :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. + :param verify: (optional) Either a boolean, in which case it controls whether we verify + the server's TLS certificate, or a string, in which case it must be a path + to a CA bundle to use. Defaults to ``True``. + :param stream: (optional) if ``False``, the response content will be immediately downloaded. + :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. + :return: :class:`Response <Response>` object + :rtype: requests.Response + + Usage:: + + >>> import requests + >>> req = requests.request('GET', 'https://httpbin.org/get') + <Response [200]> + """ + + # By using the 'with' statement we are sure the session is closed, thus we + # avoid leaving sockets open which can trigger a ResourceWarning in some + # cases, and look like a memory leak in others. + with sessions.Session() as session: + return session.request(method=method, url=url, **kwargs) + + +def get(url, params=None, **kwargs): + r"""Sends a GET request. + + :param url: URL for the new :class:`Request` object. + :param params: (optional) Dictionary, list of tuples or bytes to send + in the body of the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :return: :class:`Response <Response>` object + :rtype: requests.Response + """ + + kwargs.setdefault('allow_redirects', True) + return request('get', url, params=params, **kwargs) + + +def options(url, **kwargs): + r"""Sends an OPTIONS request. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :return: :class:`Response <Response>` object + :rtype: requests.Response + """ + + kwargs.setdefault('allow_redirects', True) + return request('options', url, **kwargs) + + +def head(url, **kwargs): + r"""Sends a HEAD request. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :return: :class:`Response <Response>` object + :rtype: requests.Response + """ + + kwargs.setdefault('allow_redirects', False) + return request('head', url, **kwargs) + + +def post(url, data=None, json=None, **kwargs): + r"""Sends a POST request. + + :param url: URL for the new :class:`Request` object. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param json: (optional) json data to send in the body of the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :return: :class:`Response <Response>` object + :rtype: requests.Response + """ + + return request('post', url, data=data, json=json, **kwargs) + + +def put(url, data=None, **kwargs): + r"""Sends a PUT request. + + :param url: URL for the new :class:`Request` object. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param json: (optional) json data to send in the body of the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :return: :class:`Response <Response>` object + :rtype: requests.Response + """ + + return request('put', url, data=data, **kwargs) + + +def patch(url, data=None, **kwargs): + r"""Sends a PATCH request. + + :param url: URL for the new :class:`Request` object. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param json: (optional) json data to send in the body of the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :return: :class:`Response <Response>` object + :rtype: requests.Response + """ + + return request('patch', url, data=data, **kwargs) + + +def delete(url, **kwargs): + r"""Sends a DELETE request. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :return: :class:`Response <Response>` object + :rtype: requests.Response + """ + + return request('delete', url, **kwargs) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/api.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/api.pyc new file mode 100644 index 0000000..5fddac2 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/api.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/auth.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/auth.py new file mode 100644 index 0000000..bdde51c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/auth.py @@ -0,0 +1,305 @@ +# -*- coding: utf-8 -*- + +""" +requests.auth +~~~~~~~~~~~~~ + +This module contains the authentication handlers for Requests. +""" + +import os +import re +import time +import hashlib +import threading +import warnings + +from base64 import b64encode + +from .compat import urlparse, str, basestring +from .cookies import extract_cookies_to_jar +from ._internal_utils import to_native_string +from .utils import parse_dict_header + +CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded' +CONTENT_TYPE_MULTI_PART = 'multipart/form-data' + + +def _basic_auth_str(username, password): + """Returns a Basic Auth string.""" + + # "I want us to put a big-ol' comment on top of it that + # says that this behaviour is dumb but we need to preserve + # it because people are relying on it." + # - Lukasa + # + # These are here solely to maintain backwards compatibility + # for things like ints. This will be removed in 3.0.0. + if not isinstance(username, basestring): + warnings.warn( + "Non-string usernames will no longer be supported in Requests " + "3.0.0. Please convert the object you've passed in ({!r}) to " + "a string or bytes object in the near future to avoid " + "problems.".format(username), + category=DeprecationWarning, + ) + username = str(username) + + if not isinstance(password, basestring): + warnings.warn( + "Non-string passwords will no longer be supported in Requests " + "3.0.0. Please convert the object you've passed in ({!r}) to " + "a string or bytes object in the near future to avoid " + "problems.".format(password), + category=DeprecationWarning, + ) + password = str(password) + # -- End Removal -- + + if isinstance(username, str): + username = username.encode('latin1') + + if isinstance(password, str): + password = password.encode('latin1') + + authstr = 'Basic ' + to_native_string( + b64encode(b':'.join((username, password))).strip() + ) + + return authstr + + +class AuthBase(object): + """Base class that all auth implementations derive from""" + + def __call__(self, r): + raise NotImplementedError('Auth hooks must be callable.') + + +class HTTPBasicAuth(AuthBase): + """Attaches HTTP Basic Authentication to the given Request object.""" + + def __init__(self, username, password): + self.username = username + self.password = password + + def __eq__(self, other): + return all([ + self.username == getattr(other, 'username', None), + self.password == getattr(other, 'password', None) + ]) + + def __ne__(self, other): + return not self == other + + def __call__(self, r): + r.headers['Authorization'] = _basic_auth_str(self.username, self.password) + return r + + +class HTTPProxyAuth(HTTPBasicAuth): + """Attaches HTTP Proxy Authentication to a given Request object.""" + + def __call__(self, r): + r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password) + return r + + +class HTTPDigestAuth(AuthBase): + """Attaches HTTP Digest Authentication to the given Request object.""" + + def __init__(self, username, password): + self.username = username + self.password = password + # Keep state in per-thread local storage + self._thread_local = threading.local() + + def init_per_thread_state(self): + # Ensure state is initialized just once per-thread + if not hasattr(self._thread_local, 'init'): + self._thread_local.init = True + self._thread_local.last_nonce = '' + self._thread_local.nonce_count = 0 + self._thread_local.chal = {} + self._thread_local.pos = None + self._thread_local.num_401_calls = None + + def build_digest_header(self, method, url): + """ + :rtype: str + """ + + realm = self._thread_local.chal['realm'] + nonce = self._thread_local.chal['nonce'] + qop = self._thread_local.chal.get('qop') + algorithm = self._thread_local.chal.get('algorithm') + opaque = self._thread_local.chal.get('opaque') + hash_utf8 = None + + if algorithm is None: + _algorithm = 'MD5' + else: + _algorithm = algorithm.upper() + # lambdas assume digest modules are imported at the top level + if _algorithm == 'MD5' or _algorithm == 'MD5-SESS': + def md5_utf8(x): + if isinstance(x, str): + x = x.encode('utf-8') + return hashlib.md5(x).hexdigest() + hash_utf8 = md5_utf8 + elif _algorithm == 'SHA': + def sha_utf8(x): + if isinstance(x, str): + x = x.encode('utf-8') + return hashlib.sha1(x).hexdigest() + hash_utf8 = sha_utf8 + elif _algorithm == 'SHA-256': + def sha256_utf8(x): + if isinstance(x, str): + x = x.encode('utf-8') + return hashlib.sha256(x).hexdigest() + hash_utf8 = sha256_utf8 + elif _algorithm == 'SHA-512': + def sha512_utf8(x): + if isinstance(x, str): + x = x.encode('utf-8') + return hashlib.sha512(x).hexdigest() + hash_utf8 = sha512_utf8 + + KD = lambda s, d: hash_utf8("%s:%s" % (s, d)) + + if hash_utf8 is None: + return None + + # XXX not implemented yet + entdig = None + p_parsed = urlparse(url) + #: path is request-uri defined in RFC 2616 which should not be empty + path = p_parsed.path or "/" + if p_parsed.query: + path += '?' + p_parsed.query + + A1 = '%s:%s:%s' % (self.username, realm, self.password) + A2 = '%s:%s' % (method, path) + + HA1 = hash_utf8(A1) + HA2 = hash_utf8(A2) + + if nonce == self._thread_local.last_nonce: + self._thread_local.nonce_count += 1 + else: + self._thread_local.nonce_count = 1 + ncvalue = '%08x' % self._thread_local.nonce_count + s = str(self._thread_local.nonce_count).encode('utf-8') + s += nonce.encode('utf-8') + s += time.ctime().encode('utf-8') + s += os.urandom(8) + + cnonce = (hashlib.sha1(s).hexdigest()[:16]) + if _algorithm == 'MD5-SESS': + HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce)) + + if not qop: + respdig = KD(HA1, "%s:%s" % (nonce, HA2)) + elif qop == 'auth' or 'auth' in qop.split(','): + noncebit = "%s:%s:%s:%s:%s" % ( + nonce, ncvalue, cnonce, 'auth', HA2 + ) + respdig = KD(HA1, noncebit) + else: + # XXX handle auth-int. + return None + + self._thread_local.last_nonce = nonce + + # XXX should the partial digests be encoded too? + base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \ + 'response="%s"' % (self.username, realm, nonce, path, respdig) + if opaque: + base += ', opaque="%s"' % opaque + if algorithm: + base += ', algorithm="%s"' % algorithm + if entdig: + base += ', digest="%s"' % entdig + if qop: + base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce) + + return 'Digest %s' % (base) + + def handle_redirect(self, r, **kwargs): + """Reset num_401_calls counter on redirects.""" + if r.is_redirect: + self._thread_local.num_401_calls = 1 + + def handle_401(self, r, **kwargs): + """ + Takes the given response and tries digest-auth, if needed. + + :rtype: requests.Response + """ + + # If response is not 4xx, do not auth + # See https://github.com/requests/requests/issues/3772 + if not 400 <= r.status_code < 500: + self._thread_local.num_401_calls = 1 + return r + + if self._thread_local.pos is not None: + # Rewind the file position indicator of the body to where + # it was to resend the request. + r.request.body.seek(self._thread_local.pos) + s_auth = r.headers.get('www-authenticate', '') + + if 'digest' in s_auth.lower() and self._thread_local.num_401_calls < 2: + + self._thread_local.num_401_calls += 1 + pat = re.compile(r'digest ', flags=re.IGNORECASE) + self._thread_local.chal = parse_dict_header(pat.sub('', s_auth, count=1)) + + # Consume content and release the original connection + # to allow our new request to reuse the same one. + r.content + r.close() + prep = r.request.copy() + extract_cookies_to_jar(prep._cookies, r.request, r.raw) + prep.prepare_cookies(prep._cookies) + + prep.headers['Authorization'] = self.build_digest_header( + prep.method, prep.url) + _r = r.connection.send(prep, **kwargs) + _r.history.append(r) + _r.request = prep + + return _r + + self._thread_local.num_401_calls = 1 + return r + + def __call__(self, r): + # Initialize per-thread state, if needed + self.init_per_thread_state() + # If we have a saved nonce, skip the 401 + if self._thread_local.last_nonce: + r.headers['Authorization'] = self.build_digest_header(r.method, r.url) + try: + self._thread_local.pos = r.body.tell() + except AttributeError: + # In the case of HTTPDigestAuth being reused and the body of + # the previous request was a file-like object, pos has the + # file position of the previous body. Ensure it's set to + # None. + self._thread_local.pos = None + r.register_hook('response', self.handle_401) + r.register_hook('response', self.handle_redirect) + self._thread_local.num_401_calls = 1 + + return r + + def __eq__(self, other): + return all([ + self.username == getattr(other, 'username', None), + self.password == getattr(other, 'password', None) + ]) + + def __ne__(self, other): + return not self == other diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/auth.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/auth.pyc new file mode 100644 index 0000000..04d38e0 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/auth.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/certs.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/certs.py new file mode 100644 index 0000000..06a594e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/certs.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +requests.certs +~~~~~~~~~~~~~~ + +This module returns the preferred default CA certificate bundle. There is +only one — the one from the certifi package. + +If you are packaging Requests, e.g., for a Linux distribution or a managed +environment, you can change the definition of where() to return a separately +packaged CA bundle. +""" +from pip._vendor.certifi import where + +if __name__ == '__main__': + print(where()) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/certs.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/certs.pyc new file mode 100644 index 0000000..e6bbad0 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/certs.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/compat.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/compat.py new file mode 100644 index 0000000..6a86893 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/compat.py @@ -0,0 +1,74 @@ +# -*- coding: utf-8 -*- + +""" +requests.compat +~~~~~~~~~~~~~~~ + +This module handles import compatibility issues between Python 2 and +Python 3. +""" + +from pip._vendor import chardet + +import sys + +# ------- +# Pythons +# ------- + +# Syntax sugar. +_ver = sys.version_info + +#: Python 2.x? +is_py2 = (_ver[0] == 2) + +#: Python 3.x? +is_py3 = (_ver[0] == 3) + +# Note: We've patched out simplejson support in pip because it prevents +# upgrading simplejson on Windows. +# try: +# import simplejson as json +# except (ImportError, SyntaxError): +# # simplejson does not support Python 3.2, it throws a SyntaxError +# # because of u'...' Unicode literals. +import json + +# --------- +# Specifics +# --------- + +if is_py2: + from urllib import ( + quote, unquote, quote_plus, unquote_plus, urlencode, getproxies, + proxy_bypass, proxy_bypass_environment, getproxies_environment) + from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag + from urllib2 import parse_http_list + import cookielib + from Cookie import Morsel + from StringIO import StringIO + from collections import Callable, Mapping, MutableMapping, OrderedDict + + + builtin_str = str + bytes = str + str = unicode + basestring = basestring + numeric_types = (int, long, float) + integer_types = (int, long) + +elif is_py3: + from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag + from urllib.request import parse_http_list, getproxies, proxy_bypass, proxy_bypass_environment, getproxies_environment + from http import cookiejar as cookielib + from http.cookies import Morsel + from io import StringIO + from collections import OrderedDict + from collections.abc import Callable, Mapping, MutableMapping + + builtin_str = str + str = str + bytes = bytes + basestring = (str, bytes) + numeric_types = (int, float) + integer_types = (int,) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/compat.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/compat.pyc new file mode 100644 index 0000000..cd461fb Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/compat.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/cookies.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/cookies.py new file mode 100644 index 0000000..56fccd9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/cookies.py @@ -0,0 +1,549 @@ +# -*- coding: utf-8 -*- + +""" +requests.cookies +~~~~~~~~~~~~~~~~ + +Compatibility code to be able to use `cookielib.CookieJar` with requests. + +requests.utils imports from here, so be careful with imports. +""" + +import copy +import time +import calendar + +from ._internal_utils import to_native_string +from .compat import cookielib, urlparse, urlunparse, Morsel, MutableMapping + +try: + import threading +except ImportError: + import dummy_threading as threading + + +class MockRequest(object): + """Wraps a `requests.Request` to mimic a `urllib2.Request`. + + The code in `cookielib.CookieJar` expects this interface in order to correctly + manage cookie policies, i.e., determine whether a cookie can be set, given the + domains of the request and the cookie. + + The original request object is read-only. The client is responsible for collecting + the new headers via `get_new_headers()` and interpreting them appropriately. You + probably want `get_cookie_header`, defined below. + """ + + def __init__(self, request): + self._r = request + self._new_headers = {} + self.type = urlparse(self._r.url).scheme + + def get_type(self): + return self.type + + def get_host(self): + return urlparse(self._r.url).netloc + + def get_origin_req_host(self): + return self.get_host() + + def get_full_url(self): + # Only return the response's URL if the user hadn't set the Host + # header + if not self._r.headers.get('Host'): + return self._r.url + # If they did set it, retrieve it and reconstruct the expected domain + host = to_native_string(self._r.headers['Host'], encoding='utf-8') + parsed = urlparse(self._r.url) + # Reconstruct the URL as we expect it + return urlunparse([ + parsed.scheme, host, parsed.path, parsed.params, parsed.query, + parsed.fragment + ]) + + def is_unverifiable(self): + return True + + def has_header(self, name): + return name in self._r.headers or name in self._new_headers + + def get_header(self, name, default=None): + return self._r.headers.get(name, self._new_headers.get(name, default)) + + def add_header(self, key, val): + """cookielib has no legitimate use for this method; add it back if you find one.""" + raise NotImplementedError("Cookie headers should be added with add_unredirected_header()") + + def add_unredirected_header(self, name, value): + self._new_headers[name] = value + + def get_new_headers(self): + return self._new_headers + + @property + def unverifiable(self): + return self.is_unverifiable() + + @property + def origin_req_host(self): + return self.get_origin_req_host() + + @property + def host(self): + return self.get_host() + + +class MockResponse(object): + """Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`. + + ...what? Basically, expose the parsed HTTP headers from the server response + the way `cookielib` expects to see them. + """ + + def __init__(self, headers): + """Make a MockResponse for `cookielib` to read. + + :param headers: a httplib.HTTPMessage or analogous carrying the headers + """ + self._headers = headers + + def info(self): + return self._headers + + def getheaders(self, name): + self._headers.getheaders(name) + + +def extract_cookies_to_jar(jar, request, response): + """Extract the cookies from the response into a CookieJar. + + :param jar: cookielib.CookieJar (not necessarily a RequestsCookieJar) + :param request: our own requests.Request object + :param response: urllib3.HTTPResponse object + """ + if not (hasattr(response, '_original_response') and + response._original_response): + return + # the _original_response field is the wrapped httplib.HTTPResponse object, + req = MockRequest(request) + # pull out the HTTPMessage with the headers and put it in the mock: + res = MockResponse(response._original_response.msg) + jar.extract_cookies(res, req) + + +def get_cookie_header(jar, request): + """ + Produce an appropriate Cookie header string to be sent with `request`, or None. + + :rtype: str + """ + r = MockRequest(request) + jar.add_cookie_header(r) + return r.get_new_headers().get('Cookie') + + +def remove_cookie_by_name(cookiejar, name, domain=None, path=None): + """Unsets a cookie by name, by default over all domains and paths. + + Wraps CookieJar.clear(), is O(n). + """ + clearables = [] + for cookie in cookiejar: + if cookie.name != name: + continue + if domain is not None and domain != cookie.domain: + continue + if path is not None and path != cookie.path: + continue + clearables.append((cookie.domain, cookie.path, cookie.name)) + + for domain, path, name in clearables: + cookiejar.clear(domain, path, name) + + +class CookieConflictError(RuntimeError): + """There are two cookies that meet the criteria specified in the cookie jar. + Use .get and .set and include domain and path args in order to be more specific. + """ + + +class RequestsCookieJar(cookielib.CookieJar, MutableMapping): + """Compatibility class; is a cookielib.CookieJar, but exposes a dict + interface. + + This is the CookieJar we create by default for requests and sessions that + don't specify one, since some clients may expect response.cookies and + session.cookies to support dict operations. + + Requests does not use the dict interface internally; it's just for + compatibility with external client code. All requests code should work + out of the box with externally provided instances of ``CookieJar``, e.g. + ``LWPCookieJar`` and ``FileCookieJar``. + + Unlike a regular CookieJar, this class is pickleable. + + .. warning:: dictionary operations that are normally O(1) may be O(n). + """ + + def get(self, name, default=None, domain=None, path=None): + """Dict-like get() that also supports optional domain and path args in + order to resolve naming collisions from using one cookie jar over + multiple domains. + + .. warning:: operation is O(n), not O(1). + """ + try: + return self._find_no_duplicates(name, domain, path) + except KeyError: + return default + + def set(self, name, value, **kwargs): + """Dict-like set() that also supports optional domain and path args in + order to resolve naming collisions from using one cookie jar over + multiple domains. + """ + # support client code that unsets cookies by assignment of a None value: + if value is None: + remove_cookie_by_name(self, name, domain=kwargs.get('domain'), path=kwargs.get('path')) + return + + if isinstance(value, Morsel): + c = morsel_to_cookie(value) + else: + c = create_cookie(name, value, **kwargs) + self.set_cookie(c) + return c + + def iterkeys(self): + """Dict-like iterkeys() that returns an iterator of names of cookies + from the jar. + + .. seealso:: itervalues() and iteritems(). + """ + for cookie in iter(self): + yield cookie.name + + def keys(self): + """Dict-like keys() that returns a list of names of cookies from the + jar. + + .. seealso:: values() and items(). + """ + return list(self.iterkeys()) + + def itervalues(self): + """Dict-like itervalues() that returns an iterator of values of cookies + from the jar. + + .. seealso:: iterkeys() and iteritems(). + """ + for cookie in iter(self): + yield cookie.value + + def values(self): + """Dict-like values() that returns a list of values of cookies from the + jar. + + .. seealso:: keys() and items(). + """ + return list(self.itervalues()) + + def iteritems(self): + """Dict-like iteritems() that returns an iterator of name-value tuples + from the jar. + + .. seealso:: iterkeys() and itervalues(). + """ + for cookie in iter(self): + yield cookie.name, cookie.value + + def items(self): + """Dict-like items() that returns a list of name-value tuples from the + jar. Allows client-code to call ``dict(RequestsCookieJar)`` and get a + vanilla python dict of key value pairs. + + .. seealso:: keys() and values(). + """ + return list(self.iteritems()) + + def list_domains(self): + """Utility method to list all the domains in the jar.""" + domains = [] + for cookie in iter(self): + if cookie.domain not in domains: + domains.append(cookie.domain) + return domains + + def list_paths(self): + """Utility method to list all the paths in the jar.""" + paths = [] + for cookie in iter(self): + if cookie.path not in paths: + paths.append(cookie.path) + return paths + + def multiple_domains(self): + """Returns True if there are multiple domains in the jar. + Returns False otherwise. + + :rtype: bool + """ + domains = [] + for cookie in iter(self): + if cookie.domain is not None and cookie.domain in domains: + return True + domains.append(cookie.domain) + return False # there is only one domain in jar + + def get_dict(self, domain=None, path=None): + """Takes as an argument an optional domain and path and returns a plain + old Python dict of name-value pairs of cookies that meet the + requirements. + + :rtype: dict + """ + dictionary = {} + for cookie in iter(self): + if ( + (domain is None or cookie.domain == domain) and + (path is None or cookie.path == path) + ): + dictionary[cookie.name] = cookie.value + return dictionary + + def __contains__(self, name): + try: + return super(RequestsCookieJar, self).__contains__(name) + except CookieConflictError: + return True + + def __getitem__(self, name): + """Dict-like __getitem__() for compatibility with client code. Throws + exception if there are more than one cookie with name. In that case, + use the more explicit get() method instead. + + .. warning:: operation is O(n), not O(1). + """ + return self._find_no_duplicates(name) + + def __setitem__(self, name, value): + """Dict-like __setitem__ for compatibility with client code. Throws + exception if there is already a cookie of that name in the jar. In that + case, use the more explicit set() method instead. + """ + self.set(name, value) + + def __delitem__(self, name): + """Deletes a cookie given a name. Wraps ``cookielib.CookieJar``'s + ``remove_cookie_by_name()``. + """ + remove_cookie_by_name(self, name) + + def set_cookie(self, cookie, *args, **kwargs): + if hasattr(cookie.value, 'startswith') and cookie.value.startswith('"') and cookie.value.endswith('"'): + cookie.value = cookie.value.replace('\\"', '') + return super(RequestsCookieJar, self).set_cookie(cookie, *args, **kwargs) + + def update(self, other): + """Updates this jar with cookies from another CookieJar or dict-like""" + if isinstance(other, cookielib.CookieJar): + for cookie in other: + self.set_cookie(copy.copy(cookie)) + else: + super(RequestsCookieJar, self).update(other) + + def _find(self, name, domain=None, path=None): + """Requests uses this method internally to get cookie values. + + If there are conflicting cookies, _find arbitrarily chooses one. + See _find_no_duplicates if you want an exception thrown if there are + conflicting cookies. + + :param name: a string containing name of cookie + :param domain: (optional) string containing domain of cookie + :param path: (optional) string containing path of cookie + :return: cookie.value + """ + for cookie in iter(self): + if cookie.name == name: + if domain is None or cookie.domain == domain: + if path is None or cookie.path == path: + return cookie.value + + raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path)) + + def _find_no_duplicates(self, name, domain=None, path=None): + """Both ``__get_item__`` and ``get`` call this function: it's never + used elsewhere in Requests. + + :param name: a string containing name of cookie + :param domain: (optional) string containing domain of cookie + :param path: (optional) string containing path of cookie + :raises KeyError: if cookie is not found + :raises CookieConflictError: if there are multiple cookies + that match name and optionally domain and path + :return: cookie.value + """ + toReturn = None + for cookie in iter(self): + if cookie.name == name: + if domain is None or cookie.domain == domain: + if path is None or cookie.path == path: + if toReturn is not None: # if there are multiple cookies that meet passed in criteria + raise CookieConflictError('There are multiple cookies with name, %r' % (name)) + toReturn = cookie.value # we will eventually return this as long as no cookie conflict + + if toReturn: + return toReturn + raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path)) + + def __getstate__(self): + """Unlike a normal CookieJar, this class is pickleable.""" + state = self.__dict__.copy() + # remove the unpickleable RLock object + state.pop('_cookies_lock') + return state + + def __setstate__(self, state): + """Unlike a normal CookieJar, this class is pickleable.""" + self.__dict__.update(state) + if '_cookies_lock' not in self.__dict__: + self._cookies_lock = threading.RLock() + + def copy(self): + """Return a copy of this RequestsCookieJar.""" + new_cj = RequestsCookieJar() + new_cj.set_policy(self.get_policy()) + new_cj.update(self) + return new_cj + + def get_policy(self): + """Return the CookiePolicy instance used.""" + return self._policy + + +def _copy_cookie_jar(jar): + if jar is None: + return None + + if hasattr(jar, 'copy'): + # We're dealing with an instance of RequestsCookieJar + return jar.copy() + # We're dealing with a generic CookieJar instance + new_jar = copy.copy(jar) + new_jar.clear() + for cookie in jar: + new_jar.set_cookie(copy.copy(cookie)) + return new_jar + + +def create_cookie(name, value, **kwargs): + """Make a cookie from underspecified parameters. + + By default, the pair of `name` and `value` will be set for the domain '' + and sent on every request (this is sometimes called a "supercookie"). + """ + result = { + 'version': 0, + 'name': name, + 'value': value, + 'port': None, + 'domain': '', + 'path': '/', + 'secure': False, + 'expires': None, + 'discard': True, + 'comment': None, + 'comment_url': None, + 'rest': {'HttpOnly': None}, + 'rfc2109': False, + } + + badargs = set(kwargs) - set(result) + if badargs: + err = 'create_cookie() got unexpected keyword arguments: %s' + raise TypeError(err % list(badargs)) + + result.update(kwargs) + result['port_specified'] = bool(result['port']) + result['domain_specified'] = bool(result['domain']) + result['domain_initial_dot'] = result['domain'].startswith('.') + result['path_specified'] = bool(result['path']) + + return cookielib.Cookie(**result) + + +def morsel_to_cookie(morsel): + """Convert a Morsel object into a Cookie containing the one k/v pair.""" + + expires = None + if morsel['max-age']: + try: + expires = int(time.time() + int(morsel['max-age'])) + except ValueError: + raise TypeError('max-age: %s must be integer' % morsel['max-age']) + elif morsel['expires']: + time_template = '%a, %d-%b-%Y %H:%M:%S GMT' + expires = calendar.timegm( + time.strptime(morsel['expires'], time_template) + ) + return create_cookie( + comment=morsel['comment'], + comment_url=bool(morsel['comment']), + discard=False, + domain=morsel['domain'], + expires=expires, + name=morsel.key, + path=morsel['path'], + port=None, + rest={'HttpOnly': morsel['httponly']}, + rfc2109=False, + secure=bool(morsel['secure']), + value=morsel.value, + version=morsel['version'] or 0, + ) + + +def cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True): + """Returns a CookieJar from a key/value dictionary. + + :param cookie_dict: Dict of key/values to insert into CookieJar. + :param cookiejar: (optional) A cookiejar to add the cookies to. + :param overwrite: (optional) If False, will not replace cookies + already in the jar with new ones. + :rtype: CookieJar + """ + if cookiejar is None: + cookiejar = RequestsCookieJar() + + if cookie_dict is not None: + names_from_jar = [cookie.name for cookie in cookiejar] + for name in cookie_dict: + if overwrite or (name not in names_from_jar): + cookiejar.set_cookie(create_cookie(name, cookie_dict[name])) + + return cookiejar + + +def merge_cookies(cookiejar, cookies): + """Add cookies to cookiejar and returns a merged CookieJar. + + :param cookiejar: CookieJar object to add the cookies to. + :param cookies: Dictionary or CookieJar object to be added. + :rtype: CookieJar + """ + if not isinstance(cookiejar, cookielib.CookieJar): + raise ValueError('You can only merge into CookieJar') + + if isinstance(cookies, dict): + cookiejar = cookiejar_from_dict( + cookies, cookiejar=cookiejar, overwrite=False) + elif isinstance(cookies, cookielib.CookieJar): + try: + cookiejar.update(cookies) + except AttributeError: + for cookie_in_jar in cookies: + cookiejar.set_cookie(cookie_in_jar) + + return cookiejar diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/cookies.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/cookies.pyc new file mode 100644 index 0000000..6533967 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/cookies.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.py new file mode 100644 index 0000000..a91e1fd --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.py @@ -0,0 +1,126 @@ +# -*- coding: utf-8 -*- + +""" +requests.exceptions +~~~~~~~~~~~~~~~~~~~ + +This module contains the set of Requests' exceptions. +""" +from pip._vendor.urllib3.exceptions import HTTPError as BaseHTTPError + + +class RequestException(IOError): + """There was an ambiguous exception that occurred while handling your + request. + """ + + def __init__(self, *args, **kwargs): + """Initialize RequestException with `request` and `response` objects.""" + response = kwargs.pop('response', None) + self.response = response + self.request = kwargs.pop('request', None) + if (response is not None and not self.request and + hasattr(response, 'request')): + self.request = self.response.request + super(RequestException, self).__init__(*args, **kwargs) + + +class HTTPError(RequestException): + """An HTTP error occurred.""" + + +class ConnectionError(RequestException): + """A Connection error occurred.""" + + +class ProxyError(ConnectionError): + """A proxy error occurred.""" + + +class SSLError(ConnectionError): + """An SSL error occurred.""" + + +class Timeout(RequestException): + """The request timed out. + + Catching this error will catch both + :exc:`~requests.exceptions.ConnectTimeout` and + :exc:`~requests.exceptions.ReadTimeout` errors. + """ + + +class ConnectTimeout(ConnectionError, Timeout): + """The request timed out while trying to connect to the remote server. + + Requests that produced this error are safe to retry. + """ + + +class ReadTimeout(Timeout): + """The server did not send any data in the allotted amount of time.""" + + +class URLRequired(RequestException): + """A valid URL is required to make a request.""" + + +class TooManyRedirects(RequestException): + """Too many redirects.""" + + +class MissingSchema(RequestException, ValueError): + """The URL schema (e.g. http or https) is missing.""" + + +class InvalidSchema(RequestException, ValueError): + """See defaults.py for valid schemas.""" + + +class InvalidURL(RequestException, ValueError): + """The URL provided was somehow invalid.""" + + +class InvalidHeader(RequestException, ValueError): + """The header value provided was somehow invalid.""" + + +class InvalidProxyURL(InvalidURL): + """The proxy URL provided is invalid.""" + + +class ChunkedEncodingError(RequestException): + """The server declared chunked encoding but sent an invalid chunk.""" + + +class ContentDecodingError(RequestException, BaseHTTPError): + """Failed to decode response content""" + + +class StreamConsumedError(RequestException, TypeError): + """The content for this response was already consumed""" + + +class RetryError(RequestException): + """Custom retries logic failed""" + + +class UnrewindableBodyError(RequestException): + """Requests encountered an error when trying to rewind a body""" + +# Warnings + + +class RequestsWarning(Warning): + """Base warning for Requests.""" + pass + + +class FileModeWarning(RequestsWarning, DeprecationWarning): + """A file was opened in text mode, but Requests determined its binary length.""" + pass + + +class RequestsDependencyWarning(RequestsWarning): + """An imported dependency doesn't match the expected version range.""" + pass diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyc new file mode 100644 index 0000000..25605af Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/help.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/help.py new file mode 100644 index 0000000..3c3072b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/help.py @@ -0,0 +1,119 @@ +"""Module containing bug report helper(s).""" +from __future__ import print_function + +import json +import platform +import sys +import ssl + +from pip._vendor import idna +from pip._vendor import urllib3 +from pip._vendor import chardet + +from . import __version__ as requests_version + +try: + from pip._vendor.urllib3.contrib import pyopenssl +except ImportError: + pyopenssl = None + OpenSSL = None + cryptography = None +else: + import OpenSSL + import cryptography + + +def _implementation(): + """Return a dict with the Python implementation and version. + + Provide both the name and the version of the Python implementation + currently running. For example, on CPython 2.7.5 it will return + {'name': 'CPython', 'version': '2.7.5'}. + + This function works best on CPython and PyPy: in particular, it probably + doesn't work for Jython or IronPython. Future investigation should be done + to work out the correct shape of the code for those platforms. + """ + implementation = platform.python_implementation() + + if implementation == 'CPython': + implementation_version = platform.python_version() + elif implementation == 'PyPy': + implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major, + sys.pypy_version_info.minor, + sys.pypy_version_info.micro) + if sys.pypy_version_info.releaselevel != 'final': + implementation_version = ''.join([ + implementation_version, sys.pypy_version_info.releaselevel + ]) + elif implementation == 'Jython': + implementation_version = platform.python_version() # Complete Guess + elif implementation == 'IronPython': + implementation_version = platform.python_version() # Complete Guess + else: + implementation_version = 'Unknown' + + return {'name': implementation, 'version': implementation_version} + + +def info(): + """Generate information for a bug report.""" + try: + platform_info = { + 'system': platform.system(), + 'release': platform.release(), + } + except IOError: + platform_info = { + 'system': 'Unknown', + 'release': 'Unknown', + } + + implementation_info = _implementation() + urllib3_info = {'version': urllib3.__version__} + chardet_info = {'version': chardet.__version__} + + pyopenssl_info = { + 'version': None, + 'openssl_version': '', + } + if OpenSSL: + pyopenssl_info = { + 'version': OpenSSL.__version__, + 'openssl_version': '%x' % OpenSSL.SSL.OPENSSL_VERSION_NUMBER, + } + cryptography_info = { + 'version': getattr(cryptography, '__version__', ''), + } + idna_info = { + 'version': getattr(idna, '__version__', ''), + } + + system_ssl = ssl.OPENSSL_VERSION_NUMBER + system_ssl_info = { + 'version': '%x' % system_ssl if system_ssl is not None else '' + } + + return { + 'platform': platform_info, + 'implementation': implementation_info, + 'system_ssl': system_ssl_info, + 'using_pyopenssl': pyopenssl is not None, + 'pyOpenSSL': pyopenssl_info, + 'urllib3': urllib3_info, + 'chardet': chardet_info, + 'cryptography': cryptography_info, + 'idna': idna_info, + 'requests': { + 'version': requests_version, + }, + } + + +def main(): + """Pretty-print the bug information as JSON.""" + print(json.dumps(info(), sort_keys=True, indent=2)) + + +if __name__ == '__main__': + main() diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/help.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/help.pyc new file mode 100644 index 0000000..b1d85fa Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/help.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/hooks.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/hooks.py new file mode 100644 index 0000000..7a51f21 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/hooks.py @@ -0,0 +1,34 @@ +# -*- coding: utf-8 -*- + +""" +requests.hooks +~~~~~~~~~~~~~~ + +This module provides the capabilities for the Requests hooks system. + +Available hooks: + +``response``: + The response generated from a Request. +""" +HOOKS = ['response'] + + +def default_hooks(): + return {event: [] for event in HOOKS} + +# TODO: response is the only one + + +def dispatch_hook(key, hooks, hook_data, **kwargs): + """Dispatches a hook dictionary on a given piece of data.""" + hooks = hooks or {} + hooks = hooks.get(key) + if hooks: + if hasattr(hooks, '__call__'): + hooks = [hooks] + for hook in hooks: + _hook_data = hook(hook_data, **kwargs) + if _hook_data is not None: + hook_data = _hook_data + return hook_data diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/hooks.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/hooks.pyc new file mode 100644 index 0000000..d37b644 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/hooks.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/models.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/models.py new file mode 100644 index 0000000..0839957 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/models.py @@ -0,0 +1,953 @@ +# -*- coding: utf-8 -*- + +""" +requests.models +~~~~~~~~~~~~~~~ + +This module contains the primary objects that power Requests. +""" + +import datetime +import sys + +# Import encoding now, to avoid implicit import later. +# Implicit import within threads may cause LookupError when standard library is in a ZIP, +# such as in Embedded Python. See https://github.com/requests/requests/issues/3578. +import encodings.idna + +from pip._vendor.urllib3.fields import RequestField +from pip._vendor.urllib3.filepost import encode_multipart_formdata +from pip._vendor.urllib3.util import parse_url +from pip._vendor.urllib3.exceptions import ( + DecodeError, ReadTimeoutError, ProtocolError, LocationParseError) + +from io import UnsupportedOperation +from .hooks import default_hooks +from .structures import CaseInsensitiveDict + +from .auth import HTTPBasicAuth +from .cookies import cookiejar_from_dict, get_cookie_header, _copy_cookie_jar +from .exceptions import ( + HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError, + ContentDecodingError, ConnectionError, StreamConsumedError) +from ._internal_utils import to_native_string, unicode_is_ascii +from .utils import ( + guess_filename, get_auth_from_url, requote_uri, + stream_decode_response_unicode, to_key_val_list, parse_header_links, + iter_slices, guess_json_utf, super_len, check_header_validity) +from .compat import ( + Callable, Mapping, + cookielib, urlunparse, urlsplit, urlencode, str, bytes, + is_py2, chardet, builtin_str, basestring) +from .compat import json as complexjson +from .status_codes import codes + +#: The set of HTTP status codes that indicate an automatically +#: processable redirect. +REDIRECT_STATI = ( + codes.moved, # 301 + codes.found, # 302 + codes.other, # 303 + codes.temporary_redirect, # 307 + codes.permanent_redirect, # 308 +) + +DEFAULT_REDIRECT_LIMIT = 30 +CONTENT_CHUNK_SIZE = 10 * 1024 +ITER_CHUNK_SIZE = 512 + + +class RequestEncodingMixin(object): + @property + def path_url(self): + """Build the path URL to use.""" + + url = [] + + p = urlsplit(self.url) + + path = p.path + if not path: + path = '/' + + url.append(path) + + query = p.query + if query: + url.append('?') + url.append(query) + + return ''.join(url) + + @staticmethod + def _encode_params(data): + """Encode parameters in a piece of data. + + Will successfully encode parameters when passed as a dict or a list of + 2-tuples. Order is retained if data is a list of 2-tuples but arbitrary + if parameters are supplied as a dict. + """ + + if isinstance(data, (str, bytes)): + return data + elif hasattr(data, 'read'): + return data + elif hasattr(data, '__iter__'): + result = [] + for k, vs in to_key_val_list(data): + if isinstance(vs, basestring) or not hasattr(vs, '__iter__'): + vs = [vs] + for v in vs: + if v is not None: + result.append( + (k.encode('utf-8') if isinstance(k, str) else k, + v.encode('utf-8') if isinstance(v, str) else v)) + return urlencode(result, doseq=True) + else: + return data + + @staticmethod + def _encode_files(files, data): + """Build the body for a multipart/form-data request. + + Will successfully encode files when passed as a dict or a list of + tuples. Order is retained if data is a list of tuples but arbitrary + if parameters are supplied as a dict. + The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype) + or 4-tuples (filename, fileobj, contentype, custom_headers). + """ + if (not files): + raise ValueError("Files must be provided.") + elif isinstance(data, basestring): + raise ValueError("Data must not be a string.") + + new_fields = [] + fields = to_key_val_list(data or {}) + files = to_key_val_list(files or {}) + + for field, val in fields: + if isinstance(val, basestring) or not hasattr(val, '__iter__'): + val = [val] + for v in val: + if v is not None: + # Don't call str() on bytestrings: in Py3 it all goes wrong. + if not isinstance(v, bytes): + v = str(v) + + new_fields.append( + (field.decode('utf-8') if isinstance(field, bytes) else field, + v.encode('utf-8') if isinstance(v, str) else v)) + + for (k, v) in files: + # support for explicit filename + ft = None + fh = None + if isinstance(v, (tuple, list)): + if len(v) == 2: + fn, fp = v + elif len(v) == 3: + fn, fp, ft = v + else: + fn, fp, ft, fh = v + else: + fn = guess_filename(v) or k + fp = v + + if isinstance(fp, (str, bytes, bytearray)): + fdata = fp + elif hasattr(fp, 'read'): + fdata = fp.read() + elif fp is None: + continue + else: + fdata = fp + + rf = RequestField(name=k, data=fdata, filename=fn, headers=fh) + rf.make_multipart(content_type=ft) + new_fields.append(rf) + + body, content_type = encode_multipart_formdata(new_fields) + + return body, content_type + + +class RequestHooksMixin(object): + def register_hook(self, event, hook): + """Properly register a hook.""" + + if event not in self.hooks: + raise ValueError('Unsupported event specified, with event name "%s"' % (event)) + + if isinstance(hook, Callable): + self.hooks[event].append(hook) + elif hasattr(hook, '__iter__'): + self.hooks[event].extend(h for h in hook if isinstance(h, Callable)) + + def deregister_hook(self, event, hook): + """Deregister a previously registered hook. + Returns True if the hook existed, False if not. + """ + + try: + self.hooks[event].remove(hook) + return True + except ValueError: + return False + + +class Request(RequestHooksMixin): + """A user-created :class:`Request <Request>` object. + + Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server. + + :param method: HTTP method to use. + :param url: URL to send. + :param headers: dictionary of headers to send. + :param files: dictionary of {filename: fileobject} files to multipart upload. + :param data: the body to attach to the request. If a dictionary or + list of tuples ``[(key, value)]`` is provided, form-encoding will + take place. + :param json: json for the body to attach to the request (if files or data is not specified). + :param params: URL parameters to append to the URL. If a dictionary or + list of tuples ``[(key, value)]`` is provided, form-encoding will + take place. + :param auth: Auth handler or (user, pass) tuple. + :param cookies: dictionary or CookieJar of cookies to attach to this request. + :param hooks: dictionary of callback hooks, for internal usage. + + Usage:: + + >>> import requests + >>> req = requests.Request('GET', 'https://httpbin.org/get') + >>> req.prepare() + <PreparedRequest [GET]> + """ + + def __init__(self, + method=None, url=None, headers=None, files=None, data=None, + params=None, auth=None, cookies=None, hooks=None, json=None): + + # Default empty dicts for dict params. + data = [] if data is None else data + files = [] if files is None else files + headers = {} if headers is None else headers + params = {} if params is None else params + hooks = {} if hooks is None else hooks + + self.hooks = default_hooks() + for (k, v) in list(hooks.items()): + self.register_hook(event=k, hook=v) + + self.method = method + self.url = url + self.headers = headers + self.files = files + self.data = data + self.json = json + self.params = params + self.auth = auth + self.cookies = cookies + + def __repr__(self): + return '<Request [%s]>' % (self.method) + + def prepare(self): + """Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it.""" + p = PreparedRequest() + p.prepare( + method=self.method, + url=self.url, + headers=self.headers, + files=self.files, + data=self.data, + json=self.json, + params=self.params, + auth=self.auth, + cookies=self.cookies, + hooks=self.hooks, + ) + return p + + +class PreparedRequest(RequestEncodingMixin, RequestHooksMixin): + """The fully mutable :class:`PreparedRequest <PreparedRequest>` object, + containing the exact bytes that will be sent to the server. + + Generated from either a :class:`Request <Request>` object or manually. + + Usage:: + + >>> import requests + >>> req = requests.Request('GET', 'https://httpbin.org/get') + >>> r = req.prepare() + <PreparedRequest [GET]> + + >>> s = requests.Session() + >>> s.send(r) + <Response [200]> + """ + + def __init__(self): + #: HTTP verb to send to the server. + self.method = None + #: HTTP URL to send the request to. + self.url = None + #: dictionary of HTTP headers. + self.headers = None + # The `CookieJar` used to create the Cookie header will be stored here + # after prepare_cookies is called + self._cookies = None + #: request body to send to the server. + self.body = None + #: dictionary of callback hooks, for internal usage. + self.hooks = default_hooks() + #: integer denoting starting position of a readable file-like body. + self._body_position = None + + def prepare(self, + method=None, url=None, headers=None, files=None, data=None, + params=None, auth=None, cookies=None, hooks=None, json=None): + """Prepares the entire request with the given parameters.""" + + self.prepare_method(method) + self.prepare_url(url, params) + self.prepare_headers(headers) + self.prepare_cookies(cookies) + self.prepare_body(data, files, json) + self.prepare_auth(auth, url) + + # Note that prepare_auth must be last to enable authentication schemes + # such as OAuth to work on a fully prepared request. + + # This MUST go after prepare_auth. Authenticators could add a hook + self.prepare_hooks(hooks) + + def __repr__(self): + return '<PreparedRequest [%s]>' % (self.method) + + def copy(self): + p = PreparedRequest() + p.method = self.method + p.url = self.url + p.headers = self.headers.copy() if self.headers is not None else None + p._cookies = _copy_cookie_jar(self._cookies) + p.body = self.body + p.hooks = self.hooks + p._body_position = self._body_position + return p + + def prepare_method(self, method): + """Prepares the given HTTP method.""" + self.method = method + if self.method is not None: + self.method = to_native_string(self.method.upper()) + + @staticmethod + def _get_idna_encoded_host(host): + from pip._vendor import idna + + try: + host = idna.encode(host, uts46=True).decode('utf-8') + except idna.IDNAError: + raise UnicodeError + return host + + def prepare_url(self, url, params): + """Prepares the given HTTP URL.""" + #: Accept objects that have string representations. + #: We're unable to blindly call unicode/str functions + #: as this will include the bytestring indicator (b'') + #: on python 3.x. + #: https://github.com/requests/requests/pull/2238 + if isinstance(url, bytes): + url = url.decode('utf8') + else: + url = unicode(url) if is_py2 else str(url) + + # Remove leading whitespaces from url + url = url.lstrip() + + # Don't do any URL preparation for non-HTTP schemes like `mailto`, + # `data` etc to work around exceptions from `url_parse`, which + # handles RFC 3986 only. + if ':' in url and not url.lower().startswith('http'): + self.url = url + return + + # Support for unicode domain names and paths. + try: + scheme, auth, host, port, path, query, fragment = parse_url(url) + except LocationParseError as e: + raise InvalidURL(*e.args) + + if not scheme: + error = ("Invalid URL {0!r}: No schema supplied. Perhaps you meant http://{0}?") + error = error.format(to_native_string(url, 'utf8')) + + raise MissingSchema(error) + + if not host: + raise InvalidURL("Invalid URL %r: No host supplied" % url) + + # In general, we want to try IDNA encoding the hostname if the string contains + # non-ASCII characters. This allows users to automatically get the correct IDNA + # behaviour. For strings containing only ASCII characters, we need to also verify + # it doesn't start with a wildcard (*), before allowing the unencoded hostname. + if not unicode_is_ascii(host): + try: + host = self._get_idna_encoded_host(host) + except UnicodeError: + raise InvalidURL('URL has an invalid label.') + elif host.startswith(u'*'): + raise InvalidURL('URL has an invalid label.') + + # Carefully reconstruct the network location + netloc = auth or '' + if netloc: + netloc += '@' + netloc += host + if port: + netloc += ':' + str(port) + + # Bare domains aren't valid URLs. + if not path: + path = '/' + + if is_py2: + if isinstance(scheme, str): + scheme = scheme.encode('utf-8') + if isinstance(netloc, str): + netloc = netloc.encode('utf-8') + if isinstance(path, str): + path = path.encode('utf-8') + if isinstance(query, str): + query = query.encode('utf-8') + if isinstance(fragment, str): + fragment = fragment.encode('utf-8') + + if isinstance(params, (str, bytes)): + params = to_native_string(params) + + enc_params = self._encode_params(params) + if enc_params: + if query: + query = '%s&%s' % (query, enc_params) + else: + query = enc_params + + url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment])) + self.url = url + + def prepare_headers(self, headers): + """Prepares the given HTTP headers.""" + + self.headers = CaseInsensitiveDict() + if headers: + for header in headers.items(): + # Raise exception on invalid header value. + check_header_validity(header) + name, value = header + self.headers[to_native_string(name)] = value + + def prepare_body(self, data, files, json=None): + """Prepares the given HTTP body data.""" + + # Check if file, fo, generator, iterator. + # If not, run through normal process. + + # Nottin' on you. + body = None + content_type = None + + if not data and json is not None: + # urllib3 requires a bytes-like body. Python 2's json.dumps + # provides this natively, but Python 3 gives a Unicode string. + content_type = 'application/json' + body = complexjson.dumps(json) + if not isinstance(body, bytes): + body = body.encode('utf-8') + + is_stream = all([ + hasattr(data, '__iter__'), + not isinstance(data, (basestring, list, tuple, Mapping)) + ]) + + try: + length = super_len(data) + except (TypeError, AttributeError, UnsupportedOperation): + length = None + + if is_stream: + body = data + + if getattr(body, 'tell', None) is not None: + # Record the current file position before reading. + # This will allow us to rewind a file in the event + # of a redirect. + try: + self._body_position = body.tell() + except (IOError, OSError): + # This differentiates from None, allowing us to catch + # a failed `tell()` later when trying to rewind the body + self._body_position = object() + + if files: + raise NotImplementedError('Streamed bodies and files are mutually exclusive.') + + if length: + self.headers['Content-Length'] = builtin_str(length) + else: + self.headers['Transfer-Encoding'] = 'chunked' + else: + # Multi-part file uploads. + if files: + (body, content_type) = self._encode_files(files, data) + else: + if data: + body = self._encode_params(data) + if isinstance(data, basestring) or hasattr(data, 'read'): + content_type = None + else: + content_type = 'application/x-www-form-urlencoded' + + self.prepare_content_length(body) + + # Add content-type if it wasn't explicitly provided. + if content_type and ('content-type' not in self.headers): + self.headers['Content-Type'] = content_type + + self.body = body + + def prepare_content_length(self, body): + """Prepare Content-Length header based on request method and body""" + if body is not None: + length = super_len(body) + if length: + # If length exists, set it. Otherwise, we fallback + # to Transfer-Encoding: chunked. + self.headers['Content-Length'] = builtin_str(length) + elif self.method not in ('GET', 'HEAD') and self.headers.get('Content-Length') is None: + # Set Content-Length to 0 for methods that can have a body + # but don't provide one. (i.e. not GET or HEAD) + self.headers['Content-Length'] = '0' + + def prepare_auth(self, auth, url=''): + """Prepares the given HTTP auth data.""" + + # If no Auth is explicitly provided, extract it from the URL first. + if auth is None: + url_auth = get_auth_from_url(self.url) + auth = url_auth if any(url_auth) else None + + if auth: + if isinstance(auth, tuple) and len(auth) == 2: + # special-case basic HTTP auth + auth = HTTPBasicAuth(*auth) + + # Allow auth to make its changes. + r = auth(self) + + # Update self to reflect the auth changes. + self.__dict__.update(r.__dict__) + + # Recompute Content-Length + self.prepare_content_length(self.body) + + def prepare_cookies(self, cookies): + """Prepares the given HTTP cookie data. + + This function eventually generates a ``Cookie`` header from the + given cookies using cookielib. Due to cookielib's design, the header + will not be regenerated if it already exists, meaning this function + can only be called once for the life of the + :class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls + to ``prepare_cookies`` will have no actual effect, unless the "Cookie" + header is removed beforehand. + """ + if isinstance(cookies, cookielib.CookieJar): + self._cookies = cookies + else: + self._cookies = cookiejar_from_dict(cookies) + + cookie_header = get_cookie_header(self._cookies, self) + if cookie_header is not None: + self.headers['Cookie'] = cookie_header + + def prepare_hooks(self, hooks): + """Prepares the given hooks.""" + # hooks can be passed as None to the prepare method and to this + # method. To prevent iterating over None, simply use an empty list + # if hooks is False-y + hooks = hooks or [] + for event in hooks: + self.register_hook(event, hooks[event]) + + +class Response(object): + """The :class:`Response <Response>` object, which contains a + server's response to an HTTP request. + """ + + __attrs__ = [ + '_content', 'status_code', 'headers', 'url', 'history', + 'encoding', 'reason', 'cookies', 'elapsed', 'request' + ] + + def __init__(self): + self._content = False + self._content_consumed = False + self._next = None + + #: Integer Code of responded HTTP Status, e.g. 404 or 200. + self.status_code = None + + #: Case-insensitive Dictionary of Response Headers. + #: For example, ``headers['content-encoding']`` will return the + #: value of a ``'Content-Encoding'`` response header. + self.headers = CaseInsensitiveDict() + + #: File-like object representation of response (for advanced usage). + #: Use of ``raw`` requires that ``stream=True`` be set on the request. + # This requirement does not apply for use internally to Requests. + self.raw = None + + #: Final URL location of Response. + self.url = None + + #: Encoding to decode with when accessing r.text. + self.encoding = None + + #: A list of :class:`Response <Response>` objects from + #: the history of the Request. Any redirect responses will end + #: up here. The list is sorted from the oldest to the most recent request. + self.history = [] + + #: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK". + self.reason = None + + #: A CookieJar of Cookies the server sent back. + self.cookies = cookiejar_from_dict({}) + + #: The amount of time elapsed between sending the request + #: and the arrival of the response (as a timedelta). + #: This property specifically measures the time taken between sending + #: the first byte of the request and finishing parsing the headers. It + #: is therefore unaffected by consuming the response content or the + #: value of the ``stream`` keyword argument. + self.elapsed = datetime.timedelta(0) + + #: The :class:`PreparedRequest <PreparedRequest>` object to which this + #: is a response. + self.request = None + + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() + + def __getstate__(self): + # Consume everything; accessing the content attribute makes + # sure the content has been fully read. + if not self._content_consumed: + self.content + + return {attr: getattr(self, attr, None) for attr in self.__attrs__} + + def __setstate__(self, state): + for name, value in state.items(): + setattr(self, name, value) + + # pickled objects do not have .raw + setattr(self, '_content_consumed', True) + setattr(self, 'raw', None) + + def __repr__(self): + return '<Response [%s]>' % (self.status_code) + + def __bool__(self): + """Returns True if :attr:`status_code` is less than 400. + + This attribute checks if the status code of the response is between + 400 and 600 to see if there was a client error or a server error. If + the status code, is between 200 and 400, this will return True. This + is **not** a check to see if the response code is ``200 OK``. + """ + return self.ok + + def __nonzero__(self): + """Returns True if :attr:`status_code` is less than 400. + + This attribute checks if the status code of the response is between + 400 and 600 to see if there was a client error or a server error. If + the status code, is between 200 and 400, this will return True. This + is **not** a check to see if the response code is ``200 OK``. + """ + return self.ok + + def __iter__(self): + """Allows you to use a response as an iterator.""" + return self.iter_content(128) + + @property + def ok(self): + """Returns True if :attr:`status_code` is less than 400, False if not. + + This attribute checks if the status code of the response is between + 400 and 600 to see if there was a client error or a server error. If + the status code is between 200 and 400, this will return True. This + is **not** a check to see if the response code is ``200 OK``. + """ + try: + self.raise_for_status() + except HTTPError: + return False + return True + + @property + def is_redirect(self): + """True if this Response is a well-formed HTTP redirect that could have + been processed automatically (by :meth:`Session.resolve_redirects`). + """ + return ('location' in self.headers and self.status_code in REDIRECT_STATI) + + @property + def is_permanent_redirect(self): + """True if this Response one of the permanent versions of redirect.""" + return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect)) + + @property + def next(self): + """Returns a PreparedRequest for the next request in a redirect chain, if there is one.""" + return self._next + + @property + def apparent_encoding(self): + """The apparent encoding, provided by the chardet library.""" + return chardet.detect(self.content)['encoding'] + + def iter_content(self, chunk_size=1, decode_unicode=False): + """Iterates over the response data. When stream=True is set on the + request, this avoids reading the content at once into memory for + large responses. The chunk size is the number of bytes it should + read into memory. This is not necessarily the length of each item + returned as decoding can take place. + + chunk_size must be of type int or None. A value of None will + function differently depending on the value of `stream`. + stream=True will read data as it arrives in whatever size the + chunks are received. If stream=False, data is returned as + a single chunk. + + If decode_unicode is True, content will be decoded using the best + available encoding based on the response. + """ + + def generate(): + # Special case for urllib3. + if hasattr(self.raw, 'stream'): + try: + for chunk in self.raw.stream(chunk_size, decode_content=True): + yield chunk + except ProtocolError as e: + raise ChunkedEncodingError(e) + except DecodeError as e: + raise ContentDecodingError(e) + except ReadTimeoutError as e: + raise ConnectionError(e) + else: + # Standard file-like object. + while True: + chunk = self.raw.read(chunk_size) + if not chunk: + break + yield chunk + + self._content_consumed = True + + if self._content_consumed and isinstance(self._content, bool): + raise StreamConsumedError() + elif chunk_size is not None and not isinstance(chunk_size, int): + raise TypeError("chunk_size must be an int, it is instead a %s." % type(chunk_size)) + # simulate reading small chunks of the content + reused_chunks = iter_slices(self._content, chunk_size) + + stream_chunks = generate() + + chunks = reused_chunks if self._content_consumed else stream_chunks + + if decode_unicode: + chunks = stream_decode_response_unicode(chunks, self) + + return chunks + + def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None): + """Iterates over the response data, one line at a time. When + stream=True is set on the request, this avoids reading the + content at once into memory for large responses. + + .. note:: This method is not reentrant safe. + """ + + pending = None + + for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode): + + if pending is not None: + chunk = pending + chunk + + if delimiter: + lines = chunk.split(delimiter) + else: + lines = chunk.splitlines() + + if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]: + pending = lines.pop() + else: + pending = None + + for line in lines: + yield line + + if pending is not None: + yield pending + + @property + def content(self): + """Content of the response, in bytes.""" + + if self._content is False: + # Read the contents. + if self._content_consumed: + raise RuntimeError( + 'The content for this response was already consumed') + + if self.status_code == 0 or self.raw is None: + self._content = None + else: + self._content = b''.join(self.iter_content(CONTENT_CHUNK_SIZE)) or b'' + + self._content_consumed = True + # don't need to release the connection; that's been handled by urllib3 + # since we exhausted the data. + return self._content + + @property + def text(self): + """Content of the response, in unicode. + + If Response.encoding is None, encoding will be guessed using + ``chardet``. + + The encoding of the response content is determined based solely on HTTP + headers, following RFC 2616 to the letter. If you can take advantage of + non-HTTP knowledge to make a better guess at the encoding, you should + set ``r.encoding`` appropriately before accessing this property. + """ + + # Try charset from content-type + content = None + encoding = self.encoding + + if not self.content: + return str('') + + # Fallback to auto-detected encoding. + if self.encoding is None: + encoding = self.apparent_encoding + + # Decode unicode from given encoding. + try: + content = str(self.content, encoding, errors='replace') + except (LookupError, TypeError): + # A LookupError is raised if the encoding was not found which could + # indicate a misspelling or similar mistake. + # + # A TypeError can be raised if encoding is None + # + # So we try blindly encoding. + content = str(self.content, errors='replace') + + return content + + def json(self, **kwargs): + r"""Returns the json-encoded content of a response, if any. + + :param \*\*kwargs: Optional arguments that ``json.loads`` takes. + :raises ValueError: If the response body does not contain valid json. + """ + + if not self.encoding and self.content and len(self.content) > 3: + # No encoding set. JSON RFC 4627 section 3 states we should expect + # UTF-8, -16 or -32. Detect which one to use; If the detection or + # decoding fails, fall back to `self.text` (using chardet to make + # a best guess). + encoding = guess_json_utf(self.content) + if encoding is not None: + try: + return complexjson.loads( + self.content.decode(encoding), **kwargs + ) + except UnicodeDecodeError: + # Wrong UTF codec detected; usually because it's not UTF-8 + # but some other 8-bit codec. This is an RFC violation, + # and the server didn't bother to tell us what codec *was* + # used. + pass + return complexjson.loads(self.text, **kwargs) + + @property + def links(self): + """Returns the parsed header links of the response, if any.""" + + header = self.headers.get('link') + + # l = MultiDict() + l = {} + + if header: + links = parse_header_links(header) + + for link in links: + key = link.get('rel') or link.get('url') + l[key] = link + + return l + + def raise_for_status(self): + """Raises stored :class:`HTTPError`, if one occurred.""" + + http_error_msg = '' + if isinstance(self.reason, bytes): + # We attempt to decode utf-8 first because some servers + # choose to localize their reason strings. If the string + # isn't utf-8, we fall back to iso-8859-1 for all other + # encodings. (See PR #3538) + try: + reason = self.reason.decode('utf-8') + except UnicodeDecodeError: + reason = self.reason.decode('iso-8859-1') + else: + reason = self.reason + + if 400 <= self.status_code < 500: + http_error_msg = u'%s Client Error: %s for url: %s' % (self.status_code, reason, self.url) + + elif 500 <= self.status_code < 600: + http_error_msg = u'%s Server Error: %s for url: %s' % (self.status_code, reason, self.url) + + if http_error_msg: + raise HTTPError(http_error_msg, response=self) + + def close(self): + """Releases the connection back to the pool. Once this method has been + called the underlying ``raw`` object must not be accessed again. + + *Note: Should not normally need to be called explicitly.* + """ + if not self._content_consumed: + self.raw.close() + + release_conn = getattr(self.raw, 'release_conn', None) + if release_conn is not None: + release_conn() diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/models.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/models.pyc new file mode 100644 index 0000000..289ae1f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/models.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages.py new file mode 100644 index 0000000..9582fa7 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages.py @@ -0,0 +1,16 @@ +import sys + +# This code exists for backwards compatibility reasons. +# I don't like it either. Just look the other way. :) + +for package in ('urllib3', 'idna', 'chardet'): + vendored_package = "pip._vendor." + package + locals()[package] = __import__(vendored_package) + # This traversal is apparently necessary such that the identities are + # preserved (requests.packages.urllib3.* is urllib3.*) + for mod in list(sys.modules): + if mod == vendored_package or mod.startswith(vendored_package + '.'): + unprefixed_mod = mod[len("pip._vendor."):] + sys.modules['pip._vendor.requests.packages.' + unprefixed_mod] = sys.modules[mod] + +# Kinda cool, though, right? diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages.pyc new file mode 100644 index 0000000..97120da Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/sessions.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/sessions.py new file mode 100644 index 0000000..d73d700 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/sessions.py @@ -0,0 +1,770 @@ +# -*- coding: utf-8 -*- + +""" +requests.session +~~~~~~~~~~~~~~~~ + +This module provides a Session object to manage and persist settings across +requests (cookies, auth, proxies). +""" +import os +import sys +import time +from datetime import timedelta + +from .auth import _basic_auth_str +from .compat import cookielib, is_py3, OrderedDict, urljoin, urlparse, Mapping +from .cookies import ( + cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies) +from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT +from .hooks import default_hooks, dispatch_hook +from ._internal_utils import to_native_string +from .utils import to_key_val_list, default_headers, DEFAULT_PORTS +from .exceptions import ( + TooManyRedirects, InvalidSchema, ChunkedEncodingError, ContentDecodingError) + +from .structures import CaseInsensitiveDict +from .adapters import HTTPAdapter + +from .utils import ( + requote_uri, get_environ_proxies, get_netrc_auth, should_bypass_proxies, + get_auth_from_url, rewind_body +) + +from .status_codes import codes + +# formerly defined here, reexposed here for backward compatibility +from .models import REDIRECT_STATI + +# Preferred clock, based on which one is more accurate on a given system. +if sys.platform == 'win32': + try: # Python 3.4+ + preferred_clock = time.perf_counter + except AttributeError: # Earlier than Python 3. + preferred_clock = time.clock +else: + preferred_clock = time.time + + +def merge_setting(request_setting, session_setting, dict_class=OrderedDict): + """Determines appropriate setting for a given request, taking into account + the explicit setting on that request, and the setting in the session. If a + setting is a dictionary, they will be merged together using `dict_class` + """ + + if session_setting is None: + return request_setting + + if request_setting is None: + return session_setting + + # Bypass if not a dictionary (e.g. verify) + if not ( + isinstance(session_setting, Mapping) and + isinstance(request_setting, Mapping) + ): + return request_setting + + merged_setting = dict_class(to_key_val_list(session_setting)) + merged_setting.update(to_key_val_list(request_setting)) + + # Remove keys that are set to None. Extract keys first to avoid altering + # the dictionary during iteration. + none_keys = [k for (k, v) in merged_setting.items() if v is None] + for key in none_keys: + del merged_setting[key] + + return merged_setting + + +def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict): + """Properly merges both requests and session hooks. + + This is necessary because when request_hooks == {'response': []}, the + merge breaks Session hooks entirely. + """ + if session_hooks is None or session_hooks.get('response') == []: + return request_hooks + + if request_hooks is None or request_hooks.get('response') == []: + return session_hooks + + return merge_setting(request_hooks, session_hooks, dict_class) + + +class SessionRedirectMixin(object): + + def get_redirect_target(self, resp): + """Receives a Response. Returns a redirect URI or ``None``""" + # Due to the nature of how requests processes redirects this method will + # be called at least once upon the original response and at least twice + # on each subsequent redirect response (if any). + # If a custom mixin is used to handle this logic, it may be advantageous + # to cache the redirect location onto the response object as a private + # attribute. + if resp.is_redirect: + location = resp.headers['location'] + # Currently the underlying http module on py3 decode headers + # in latin1, but empirical evidence suggests that latin1 is very + # rarely used with non-ASCII characters in HTTP headers. + # It is more likely to get UTF8 header rather than latin1. + # This causes incorrect handling of UTF8 encoded location headers. + # To solve this, we re-encode the location in latin1. + if is_py3: + location = location.encode('latin1') + return to_native_string(location, 'utf8') + return None + + def should_strip_auth(self, old_url, new_url): + """Decide whether Authorization header should be removed when redirecting""" + old_parsed = urlparse(old_url) + new_parsed = urlparse(new_url) + if old_parsed.hostname != new_parsed.hostname: + return True + # Special case: allow http -> https redirect when using the standard + # ports. This isn't specified by RFC 7235, but is kept to avoid + # breaking backwards compatibility with older versions of requests + # that allowed any redirects on the same host. + if (old_parsed.scheme == 'http' and old_parsed.port in (80, None) + and new_parsed.scheme == 'https' and new_parsed.port in (443, None)): + return False + + # Handle default port usage corresponding to scheme. + changed_port = old_parsed.port != new_parsed.port + changed_scheme = old_parsed.scheme != new_parsed.scheme + default_port = (DEFAULT_PORTS.get(old_parsed.scheme, None), None) + if (not changed_scheme and old_parsed.port in default_port + and new_parsed.port in default_port): + return False + + # Standard case: root URI must match + return changed_port or changed_scheme + + def resolve_redirects(self, resp, req, stream=False, timeout=None, + verify=True, cert=None, proxies=None, yield_requests=False, **adapter_kwargs): + """Receives a Response. Returns a generator of Responses or Requests.""" + + hist = [] # keep track of history + + url = self.get_redirect_target(resp) + previous_fragment = urlparse(req.url).fragment + while url: + prepared_request = req.copy() + + # Update history and keep track of redirects. + # resp.history must ignore the original request in this loop + hist.append(resp) + resp.history = hist[1:] + + try: + resp.content # Consume socket so it can be released + except (ChunkedEncodingError, ContentDecodingError, RuntimeError): + resp.raw.read(decode_content=False) + + if len(resp.history) >= self.max_redirects: + raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects, response=resp) + + # Release the connection back into the pool. + resp.close() + + # Handle redirection without scheme (see: RFC 1808 Section 4) + if url.startswith('//'): + parsed_rurl = urlparse(resp.url) + url = '%s:%s' % (to_native_string(parsed_rurl.scheme), url) + + # Normalize url case and attach previous fragment if needed (RFC 7231 7.1.2) + parsed = urlparse(url) + if parsed.fragment == '' and previous_fragment: + parsed = parsed._replace(fragment=previous_fragment) + elif parsed.fragment: + previous_fragment = parsed.fragment + url = parsed.geturl() + + # Facilitate relative 'location' headers, as allowed by RFC 7231. + # (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource') + # Compliant with RFC3986, we percent encode the url. + if not parsed.netloc: + url = urljoin(resp.url, requote_uri(url)) + else: + url = requote_uri(url) + + prepared_request.url = to_native_string(url) + + self.rebuild_method(prepared_request, resp) + + # https://github.com/requests/requests/issues/1084 + if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect): + # https://github.com/requests/requests/issues/3490 + purged_headers = ('Content-Length', 'Content-Type', 'Transfer-Encoding') + for header in purged_headers: + prepared_request.headers.pop(header, None) + prepared_request.body = None + + headers = prepared_request.headers + try: + del headers['Cookie'] + except KeyError: + pass + + # Extract any cookies sent on the response to the cookiejar + # in the new request. Because we've mutated our copied prepared + # request, use the old one that we haven't yet touched. + extract_cookies_to_jar(prepared_request._cookies, req, resp.raw) + merge_cookies(prepared_request._cookies, self.cookies) + prepared_request.prepare_cookies(prepared_request._cookies) + + # Rebuild auth and proxy information. + proxies = self.rebuild_proxies(prepared_request, proxies) + self.rebuild_auth(prepared_request, resp) + + # A failed tell() sets `_body_position` to `object()`. This non-None + # value ensures `rewindable` will be True, allowing us to raise an + # UnrewindableBodyError, instead of hanging the connection. + rewindable = ( + prepared_request._body_position is not None and + ('Content-Length' in headers or 'Transfer-Encoding' in headers) + ) + + # Attempt to rewind consumed file-like object. + if rewindable: + rewind_body(prepared_request) + + # Override the original request. + req = prepared_request + + if yield_requests: + yield req + else: + + resp = self.send( + req, + stream=stream, + timeout=timeout, + verify=verify, + cert=cert, + proxies=proxies, + allow_redirects=False, + **adapter_kwargs + ) + + extract_cookies_to_jar(self.cookies, prepared_request, resp.raw) + + # extract redirect url, if any, for the next loop + url = self.get_redirect_target(resp) + yield resp + + def rebuild_auth(self, prepared_request, response): + """When being redirected we may want to strip authentication from the + request to avoid leaking credentials. This method intelligently removes + and reapplies authentication where possible to avoid credential loss. + """ + headers = prepared_request.headers + url = prepared_request.url + + if 'Authorization' in headers and self.should_strip_auth(response.request.url, url): + # If we get redirected to a new host, we should strip out any + # authentication headers. + del headers['Authorization'] + + # .netrc might have more auth for us on our new host. + new_auth = get_netrc_auth(url) if self.trust_env else None + if new_auth is not None: + prepared_request.prepare_auth(new_auth) + + return + + def rebuild_proxies(self, prepared_request, proxies): + """This method re-evaluates the proxy configuration by considering the + environment variables. If we are redirected to a URL covered by + NO_PROXY, we strip the proxy configuration. Otherwise, we set missing + proxy keys for this URL (in case they were stripped by a previous + redirect). + + This method also replaces the Proxy-Authorization header where + necessary. + + :rtype: dict + """ + proxies = proxies if proxies is not None else {} + headers = prepared_request.headers + url = prepared_request.url + scheme = urlparse(url).scheme + new_proxies = proxies.copy() + no_proxy = proxies.get('no_proxy') + + bypass_proxy = should_bypass_proxies(url, no_proxy=no_proxy) + if self.trust_env and not bypass_proxy: + environ_proxies = get_environ_proxies(url, no_proxy=no_proxy) + + proxy = environ_proxies.get(scheme, environ_proxies.get('all')) + + if proxy: + new_proxies.setdefault(scheme, proxy) + + if 'Proxy-Authorization' in headers: + del headers['Proxy-Authorization'] + + try: + username, password = get_auth_from_url(new_proxies[scheme]) + except KeyError: + username, password = None, None + + if username and password: + headers['Proxy-Authorization'] = _basic_auth_str(username, password) + + return new_proxies + + def rebuild_method(self, prepared_request, response): + """When being redirected we may want to change the method of the request + based on certain specs or browser behavior. + """ + method = prepared_request.method + + # https://tools.ietf.org/html/rfc7231#section-6.4.4 + if response.status_code == codes.see_other and method != 'HEAD': + method = 'GET' + + # Do what the browsers do, despite standards... + # First, turn 302s into GETs. + if response.status_code == codes.found and method != 'HEAD': + method = 'GET' + + # Second, if a POST is responded to with a 301, turn it into a GET. + # This bizarre behaviour is explained in Issue 1704. + if response.status_code == codes.moved and method == 'POST': + method = 'GET' + + prepared_request.method = method + + +class Session(SessionRedirectMixin): + """A Requests session. + + Provides cookie persistence, connection-pooling, and configuration. + + Basic Usage:: + + >>> import requests + >>> s = requests.Session() + >>> s.get('https://httpbin.org/get') + <Response [200]> + + Or as a context manager:: + + >>> with requests.Session() as s: + >>> s.get('https://httpbin.org/get') + <Response [200]> + """ + + __attrs__ = [ + 'headers', 'cookies', 'auth', 'proxies', 'hooks', 'params', 'verify', + 'cert', 'prefetch', 'adapters', 'stream', 'trust_env', + 'max_redirects', + ] + + def __init__(self): + + #: A case-insensitive dictionary of headers to be sent on each + #: :class:`Request <Request>` sent from this + #: :class:`Session <Session>`. + self.headers = default_headers() + + #: Default Authentication tuple or object to attach to + #: :class:`Request <Request>`. + self.auth = None + + #: Dictionary mapping protocol or protocol and host to the URL of the proxy + #: (e.g. {'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}) to + #: be used on each :class:`Request <Request>`. + self.proxies = {} + + #: Event-handling hooks. + self.hooks = default_hooks() + + #: Dictionary of querystring data to attach to each + #: :class:`Request <Request>`. The dictionary values may be lists for + #: representing multivalued query parameters. + self.params = {} + + #: Stream response content default. + self.stream = False + + #: SSL Verification default. + self.verify = True + + #: SSL client certificate default, if String, path to ssl client + #: cert file (.pem). If Tuple, ('cert', 'key') pair. + self.cert = None + + #: Maximum number of redirects allowed. If the request exceeds this + #: limit, a :class:`TooManyRedirects` exception is raised. + #: This defaults to requests.models.DEFAULT_REDIRECT_LIMIT, which is + #: 30. + self.max_redirects = DEFAULT_REDIRECT_LIMIT + + #: Trust environment settings for proxy configuration, default + #: authentication and similar. + self.trust_env = True + + #: A CookieJar containing all currently outstanding cookies set on this + #: session. By default it is a + #: :class:`RequestsCookieJar <requests.cookies.RequestsCookieJar>`, but + #: may be any other ``cookielib.CookieJar`` compatible object. + self.cookies = cookiejar_from_dict({}) + + # Default connection adapters. + self.adapters = OrderedDict() + self.mount('https://', HTTPAdapter()) + self.mount('http://', HTTPAdapter()) + + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() + + def prepare_request(self, request): + """Constructs a :class:`PreparedRequest <PreparedRequest>` for + transmission and returns it. The :class:`PreparedRequest` has settings + merged from the :class:`Request <Request>` instance and those of the + :class:`Session`. + + :param request: :class:`Request` instance to prepare with this + session's settings. + :rtype: requests.PreparedRequest + """ + cookies = request.cookies or {} + + # Bootstrap CookieJar. + if not isinstance(cookies, cookielib.CookieJar): + cookies = cookiejar_from_dict(cookies) + + # Merge with session cookies + merged_cookies = merge_cookies( + merge_cookies(RequestsCookieJar(), self.cookies), cookies) + + # Set environment's basic authentication if not explicitly set. + auth = request.auth + if self.trust_env and not auth and not self.auth: + auth = get_netrc_auth(request.url) + + p = PreparedRequest() + p.prepare( + method=request.method.upper(), + url=request.url, + files=request.files, + data=request.data, + json=request.json, + headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict), + params=merge_setting(request.params, self.params), + auth=merge_setting(auth, self.auth), + cookies=merged_cookies, + hooks=merge_hooks(request.hooks, self.hooks), + ) + return p + + def request(self, method, url, + params=None, data=None, headers=None, cookies=None, files=None, + auth=None, timeout=None, allow_redirects=True, proxies=None, + hooks=None, stream=None, verify=None, cert=None, json=None): + """Constructs a :class:`Request <Request>`, prepares it and sends it. + Returns :class:`Response <Response>` object. + + :param method: method for the new :class:`Request` object. + :param url: URL for the new :class:`Request` object. + :param params: (optional) Dictionary or bytes to be sent in the query + string for the :class:`Request`. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param json: (optional) json to send in the body of the + :class:`Request`. + :param headers: (optional) Dictionary of HTTP Headers to send with the + :class:`Request`. + :param cookies: (optional) Dict or CookieJar object to send with the + :class:`Request`. + :param files: (optional) Dictionary of ``'filename': file-like-objects`` + for multipart encoding upload. + :param auth: (optional) Auth tuple or callable to enable + Basic/Digest/Custom HTTP Auth. + :param timeout: (optional) How long to wait for the server to send + data before giving up, as a float, or a :ref:`(connect timeout, + read timeout) <timeouts>` tuple. + :type timeout: float or tuple + :param allow_redirects: (optional) Set to True by default. + :type allow_redirects: bool + :param proxies: (optional) Dictionary mapping protocol or protocol and + hostname to the URL of the proxy. + :param stream: (optional) whether to immediately download the response + content. Defaults to ``False``. + :param verify: (optional) Either a boolean, in which case it controls whether we verify + the server's TLS certificate, or a string, in which case it must be a path + to a CA bundle to use. Defaults to ``True``. + :param cert: (optional) if String, path to ssl client cert file (.pem). + If Tuple, ('cert', 'key') pair. + :rtype: requests.Response + """ + # Create the Request. + req = Request( + method=method.upper(), + url=url, + headers=headers, + files=files, + data=data or {}, + json=json, + params=params or {}, + auth=auth, + cookies=cookies, + hooks=hooks, + ) + prep = self.prepare_request(req) + + proxies = proxies or {} + + settings = self.merge_environment_settings( + prep.url, proxies, stream, verify, cert + ) + + # Send the request. + send_kwargs = { + 'timeout': timeout, + 'allow_redirects': allow_redirects, + } + send_kwargs.update(settings) + resp = self.send(prep, **send_kwargs) + + return resp + + def get(self, url, **kwargs): + r"""Sends a GET request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + kwargs.setdefault('allow_redirects', True) + return self.request('GET', url, **kwargs) + + def options(self, url, **kwargs): + r"""Sends a OPTIONS request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + kwargs.setdefault('allow_redirects', True) + return self.request('OPTIONS', url, **kwargs) + + def head(self, url, **kwargs): + r"""Sends a HEAD request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + kwargs.setdefault('allow_redirects', False) + return self.request('HEAD', url, **kwargs) + + def post(self, url, data=None, json=None, **kwargs): + r"""Sends a POST request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param json: (optional) json to send in the body of the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + return self.request('POST', url, data=data, json=json, **kwargs) + + def put(self, url, data=None, **kwargs): + r"""Sends a PUT request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + return self.request('PUT', url, data=data, **kwargs) + + def patch(self, url, data=None, **kwargs): + r"""Sends a PATCH request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + return self.request('PATCH', url, data=data, **kwargs) + + def delete(self, url, **kwargs): + r"""Sends a DELETE request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + return self.request('DELETE', url, **kwargs) + + def send(self, request, **kwargs): + """Send a given PreparedRequest. + + :rtype: requests.Response + """ + # Set defaults that the hooks can utilize to ensure they always have + # the correct parameters to reproduce the previous request. + kwargs.setdefault('stream', self.stream) + kwargs.setdefault('verify', self.verify) + kwargs.setdefault('cert', self.cert) + kwargs.setdefault('proxies', self.proxies) + + # It's possible that users might accidentally send a Request object. + # Guard against that specific failure case. + if isinstance(request, Request): + raise ValueError('You can only send PreparedRequests.') + + # Set up variables needed for resolve_redirects and dispatching of hooks + allow_redirects = kwargs.pop('allow_redirects', True) + stream = kwargs.get('stream') + hooks = request.hooks + + # Get the appropriate adapter to use + adapter = self.get_adapter(url=request.url) + + # Start time (approximately) of the request + start = preferred_clock() + + # Send the request + r = adapter.send(request, **kwargs) + + # Total elapsed time of the request (approximately) + elapsed = preferred_clock() - start + r.elapsed = timedelta(seconds=elapsed) + + # Response manipulation hooks + r = dispatch_hook('response', hooks, r, **kwargs) + + # Persist cookies + if r.history: + + # If the hooks create history then we want those cookies too + for resp in r.history: + extract_cookies_to_jar(self.cookies, resp.request, resp.raw) + + extract_cookies_to_jar(self.cookies, request, r.raw) + + # Redirect resolving generator. + gen = self.resolve_redirects(r, request, **kwargs) + + # Resolve redirects if allowed. + history = [resp for resp in gen] if allow_redirects else [] + + # Shuffle things around if there's history. + if history: + # Insert the first (original) request at the start + history.insert(0, r) + # Get the last request made + r = history.pop() + r.history = history + + # If redirects aren't being followed, store the response on the Request for Response.next(). + if not allow_redirects: + try: + r._next = next(self.resolve_redirects(r, request, yield_requests=True, **kwargs)) + except StopIteration: + pass + + if not stream: + r.content + + return r + + def merge_environment_settings(self, url, proxies, stream, verify, cert): + """ + Check the environment and merge it with some settings. + + :rtype: dict + """ + # Gather clues from the surrounding environment. + if self.trust_env: + # Set environment's proxies. + no_proxy = proxies.get('no_proxy') if proxies is not None else None + env_proxies = get_environ_proxies(url, no_proxy=no_proxy) + for (k, v) in env_proxies.items(): + proxies.setdefault(k, v) + + # Look for requests environment configuration and be compatible + # with cURL. + if verify is True or verify is None: + verify = (os.environ.get('REQUESTS_CA_BUNDLE') or + os.environ.get('CURL_CA_BUNDLE')) + + # Merge all the kwargs. + proxies = merge_setting(proxies, self.proxies) + stream = merge_setting(stream, self.stream) + verify = merge_setting(verify, self.verify) + cert = merge_setting(cert, self.cert) + + return {'verify': verify, 'proxies': proxies, 'stream': stream, + 'cert': cert} + + def get_adapter(self, url): + """ + Returns the appropriate connection adapter for the given URL. + + :rtype: requests.adapters.BaseAdapter + """ + for (prefix, adapter) in self.adapters.items(): + + if url.lower().startswith(prefix.lower()): + return adapter + + # Nothing matches :-/ + raise InvalidSchema("No connection adapters were found for '%s'" % url) + + def close(self): + """Closes all adapters and as such the session""" + for v in self.adapters.values(): + v.close() + + def mount(self, prefix, adapter): + """Registers a connection adapter to a prefix. + + Adapters are sorted in descending order by prefix length. + """ + self.adapters[prefix] = adapter + keys_to_move = [k for k in self.adapters if len(k) < len(prefix)] + + for key in keys_to_move: + self.adapters[key] = self.adapters.pop(key) + + def __getstate__(self): + state = {attr: getattr(self, attr, None) for attr in self.__attrs__} + return state + + def __setstate__(self, state): + for attr, value in state.items(): + setattr(self, attr, value) + + +def session(): + """ + Returns a :class:`Session` for context-management. + + .. deprecated:: 1.0.0 + + This method has been deprecated since version 1.0.0 and is only kept for + backwards compatibility. New code should use :class:`~requests.sessions.Session` + to create a session. This may be removed at a future date. + + :rtype: Session + """ + return Session() diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pyc new file mode 100644 index 0000000..4ea50eb Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/sessions.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/status_codes.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/status_codes.py new file mode 100644 index 0000000..813e8c4 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/status_codes.py @@ -0,0 +1,120 @@ +# -*- coding: utf-8 -*- + +r""" +The ``codes`` object defines a mapping from common names for HTTP statuses +to their numerical codes, accessible either as attributes or as dictionary +items. + +>>> requests.codes['temporary_redirect'] +307 +>>> requests.codes.teapot +418 +>>> requests.codes['\o/'] +200 + +Some codes have multiple names, and both upper- and lower-case versions of +the names are allowed. For example, ``codes.ok``, ``codes.OK``, and +``codes.okay`` all correspond to the HTTP status code 200. +""" + +from .structures import LookupDict + +_codes = { + + # Informational. + 100: ('continue',), + 101: ('switching_protocols',), + 102: ('processing',), + 103: ('checkpoint',), + 122: ('uri_too_long', 'request_uri_too_long'), + 200: ('ok', 'okay', 'all_ok', 'all_okay', 'all_good', '\\o/', '✓'), + 201: ('created',), + 202: ('accepted',), + 203: ('non_authoritative_info', 'non_authoritative_information'), + 204: ('no_content',), + 205: ('reset_content', 'reset'), + 206: ('partial_content', 'partial'), + 207: ('multi_status', 'multiple_status', 'multi_stati', 'multiple_stati'), + 208: ('already_reported',), + 226: ('im_used',), + + # Redirection. + 300: ('multiple_choices',), + 301: ('moved_permanently', 'moved', '\\o-'), + 302: ('found',), + 303: ('see_other', 'other'), + 304: ('not_modified',), + 305: ('use_proxy',), + 306: ('switch_proxy',), + 307: ('temporary_redirect', 'temporary_moved', 'temporary'), + 308: ('permanent_redirect', + 'resume_incomplete', 'resume',), # These 2 to be removed in 3.0 + + # Client Error. + 400: ('bad_request', 'bad'), + 401: ('unauthorized',), + 402: ('payment_required', 'payment'), + 403: ('forbidden',), + 404: ('not_found', '-o-'), + 405: ('method_not_allowed', 'not_allowed'), + 406: ('not_acceptable',), + 407: ('proxy_authentication_required', 'proxy_auth', 'proxy_authentication'), + 408: ('request_timeout', 'timeout'), + 409: ('conflict',), + 410: ('gone',), + 411: ('length_required',), + 412: ('precondition_failed', 'precondition'), + 413: ('request_entity_too_large',), + 414: ('request_uri_too_large',), + 415: ('unsupported_media_type', 'unsupported_media', 'media_type'), + 416: ('requested_range_not_satisfiable', 'requested_range', 'range_not_satisfiable'), + 417: ('expectation_failed',), + 418: ('im_a_teapot', 'teapot', 'i_am_a_teapot'), + 421: ('misdirected_request',), + 422: ('unprocessable_entity', 'unprocessable'), + 423: ('locked',), + 424: ('failed_dependency', 'dependency'), + 425: ('unordered_collection', 'unordered'), + 426: ('upgrade_required', 'upgrade'), + 428: ('precondition_required', 'precondition'), + 429: ('too_many_requests', 'too_many'), + 431: ('header_fields_too_large', 'fields_too_large'), + 444: ('no_response', 'none'), + 449: ('retry_with', 'retry'), + 450: ('blocked_by_windows_parental_controls', 'parental_controls'), + 451: ('unavailable_for_legal_reasons', 'legal_reasons'), + 499: ('client_closed_request',), + + # Server Error. + 500: ('internal_server_error', 'server_error', '/o\\', '✗'), + 501: ('not_implemented',), + 502: ('bad_gateway',), + 503: ('service_unavailable', 'unavailable'), + 504: ('gateway_timeout',), + 505: ('http_version_not_supported', 'http_version'), + 506: ('variant_also_negotiates',), + 507: ('insufficient_storage',), + 509: ('bandwidth_limit_exceeded', 'bandwidth'), + 510: ('not_extended',), + 511: ('network_authentication_required', 'network_auth', 'network_authentication'), +} + +codes = LookupDict(name='status_codes') + +def _init(): + for code, titles in _codes.items(): + for title in titles: + setattr(codes, title, code) + if not title.startswith(('\\', '/')): + setattr(codes, title.upper(), code) + + def doc(code): + names = ', '.join('``%s``' % n for n in _codes[code]) + return '* %d: %s' % (code, names) + + global __doc__ + __doc__ = (__doc__ + '\n' + + '\n'.join(doc(code) for code in sorted(_codes)) + if __doc__ is not None else None) + +_init() diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/status_codes.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/status_codes.pyc new file mode 100644 index 0000000..9e79901 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/status_codes.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/structures.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/structures.py new file mode 100644 index 0000000..da930e2 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/structures.py @@ -0,0 +1,103 @@ +# -*- coding: utf-8 -*- + +""" +requests.structures +~~~~~~~~~~~~~~~~~~~ + +Data structures that power Requests. +""" + +from .compat import OrderedDict, Mapping, MutableMapping + + +class CaseInsensitiveDict(MutableMapping): + """A case-insensitive ``dict``-like object. + + Implements all methods and operations of + ``MutableMapping`` as well as dict's ``copy``. Also + provides ``lower_items``. + + All keys are expected to be strings. The structure remembers the + case of the last key to be set, and ``iter(instance)``, + ``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()`` + will contain case-sensitive keys. However, querying and contains + testing is case insensitive:: + + cid = CaseInsensitiveDict() + cid['Accept'] = 'application/json' + cid['aCCEPT'] == 'application/json' # True + list(cid) == ['Accept'] # True + + For example, ``headers['content-encoding']`` will return the + value of a ``'Content-Encoding'`` response header, regardless + of how the header name was originally stored. + + If the constructor, ``.update``, or equality comparison + operations are given keys that have equal ``.lower()``s, the + behavior is undefined. + """ + + def __init__(self, data=None, **kwargs): + self._store = OrderedDict() + if data is None: + data = {} + self.update(data, **kwargs) + + def __setitem__(self, key, value): + # Use the lowercased key for lookups, but store the actual + # key alongside the value. + self._store[key.lower()] = (key, value) + + def __getitem__(self, key): + return self._store[key.lower()][1] + + def __delitem__(self, key): + del self._store[key.lower()] + + def __iter__(self): + return (casedkey for casedkey, mappedvalue in self._store.values()) + + def __len__(self): + return len(self._store) + + def lower_items(self): + """Like iteritems(), but with all lowercase keys.""" + return ( + (lowerkey, keyval[1]) + for (lowerkey, keyval) + in self._store.items() + ) + + def __eq__(self, other): + if isinstance(other, Mapping): + other = CaseInsensitiveDict(other) + else: + return NotImplemented + # Compare insensitively + return dict(self.lower_items()) == dict(other.lower_items()) + + # Copy is required + def copy(self): + return CaseInsensitiveDict(self._store.values()) + + def __repr__(self): + return str(dict(self.items())) + + +class LookupDict(dict): + """Dictionary lookup object.""" + + def __init__(self, name=None): + self.name = name + super(LookupDict, self).__init__() + + def __repr__(self): + return '<lookup \'%s\'>' % (self.name) + + def __getitem__(self, key): + # We allow fall-through here, so values default to None + + return self.__dict__.get(key, None) + + def get(self, key, default=None): + return self.__dict__.get(key, default) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/structures.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/structures.pyc new file mode 100644 index 0000000..3a81745 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/structures.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/utils.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/utils.py new file mode 100644 index 0000000..8170a8d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/utils.py @@ -0,0 +1,977 @@ +# -*- coding: utf-8 -*- + +""" +requests.utils +~~~~~~~~~~~~~~ + +This module provides utility functions that are used within Requests +that are also useful for external consumption. +""" + +import codecs +import contextlib +import io +import os +import re +import socket +import struct +import sys +import tempfile +import warnings +import zipfile + +from .__version__ import __version__ +from . import certs +# to_native_string is unused here, but imported here for backwards compatibility +from ._internal_utils import to_native_string +from .compat import parse_http_list as _parse_list_header +from .compat import ( + quote, urlparse, bytes, str, OrderedDict, unquote, getproxies, + proxy_bypass, urlunparse, basestring, integer_types, is_py3, + proxy_bypass_environment, getproxies_environment, Mapping) +from .cookies import cookiejar_from_dict +from .structures import CaseInsensitiveDict +from .exceptions import ( + InvalidURL, InvalidHeader, FileModeWarning, UnrewindableBodyError) + +NETRC_FILES = ('.netrc', '_netrc') + +DEFAULT_CA_BUNDLE_PATH = certs.where() + +DEFAULT_PORTS = {'http': 80, 'https': 443} + + +if sys.platform == 'win32': + # provide a proxy_bypass version on Windows without DNS lookups + + def proxy_bypass_registry(host): + try: + if is_py3: + import winreg + else: + import _winreg as winreg + except ImportError: + return False + + try: + internetSettings = winreg.OpenKey(winreg.HKEY_CURRENT_USER, + r'Software\Microsoft\Windows\CurrentVersion\Internet Settings') + # ProxyEnable could be REG_SZ or REG_DWORD, normalizing it + proxyEnable = int(winreg.QueryValueEx(internetSettings, + 'ProxyEnable')[0]) + # ProxyOverride is almost always a string + proxyOverride = winreg.QueryValueEx(internetSettings, + 'ProxyOverride')[0] + except OSError: + return False + if not proxyEnable or not proxyOverride: + return False + + # make a check value list from the registry entry: replace the + # '<local>' string by the localhost entry and the corresponding + # canonical entry. + proxyOverride = proxyOverride.split(';') + # now check if we match one of the registry values. + for test in proxyOverride: + if test == '<local>': + if '.' not in host: + return True + test = test.replace(".", r"\.") # mask dots + test = test.replace("*", r".*") # change glob sequence + test = test.replace("?", r".") # change glob char + if re.match(test, host, re.I): + return True + return False + + def proxy_bypass(host): # noqa + """Return True, if the host should be bypassed. + + Checks proxy settings gathered from the environment, if specified, + or the registry. + """ + if getproxies_environment(): + return proxy_bypass_environment(host) + else: + return proxy_bypass_registry(host) + + +def dict_to_sequence(d): + """Returns an internal sequence dictionary update.""" + + if hasattr(d, 'items'): + d = d.items() + + return d + + +def super_len(o): + total_length = None + current_position = 0 + + if hasattr(o, '__len__'): + total_length = len(o) + + elif hasattr(o, 'len'): + total_length = o.len + + elif hasattr(o, 'fileno'): + try: + fileno = o.fileno() + except io.UnsupportedOperation: + pass + else: + total_length = os.fstat(fileno).st_size + + # Having used fstat to determine the file length, we need to + # confirm that this file was opened up in binary mode. + if 'b' not in o.mode: + warnings.warn(( + "Requests has determined the content-length for this " + "request using the binary size of the file: however, the " + "file has been opened in text mode (i.e. without the 'b' " + "flag in the mode). This may lead to an incorrect " + "content-length. In Requests 3.0, support will be removed " + "for files in text mode."), + FileModeWarning + ) + + if hasattr(o, 'tell'): + try: + current_position = o.tell() + except (OSError, IOError): + # This can happen in some weird situations, such as when the file + # is actually a special file descriptor like stdin. In this + # instance, we don't know what the length is, so set it to zero and + # let requests chunk it instead. + if total_length is not None: + current_position = total_length + else: + if hasattr(o, 'seek') and total_length is None: + # StringIO and BytesIO have seek but no useable fileno + try: + # seek to end of file + o.seek(0, 2) + total_length = o.tell() + + # seek back to current position to support + # partially read file-like objects + o.seek(current_position or 0) + except (OSError, IOError): + total_length = 0 + + if total_length is None: + total_length = 0 + + return max(0, total_length - current_position) + + +def get_netrc_auth(url, raise_errors=False): + """Returns the Requests tuple auth for a given url from netrc.""" + + try: + from netrc import netrc, NetrcParseError + + netrc_path = None + + for f in NETRC_FILES: + try: + loc = os.path.expanduser('~/{}'.format(f)) + except KeyError: + # os.path.expanduser can fail when $HOME is undefined and + # getpwuid fails. See https://bugs.python.org/issue20164 & + # https://github.com/requests/requests/issues/1846 + return + + if os.path.exists(loc): + netrc_path = loc + break + + # Abort early if there isn't one. + if netrc_path is None: + return + + ri = urlparse(url) + + # Strip port numbers from netloc. This weird `if...encode`` dance is + # used for Python 3.2, which doesn't support unicode literals. + splitstr = b':' + if isinstance(url, str): + splitstr = splitstr.decode('ascii') + host = ri.netloc.split(splitstr)[0] + + try: + _netrc = netrc(netrc_path).authenticators(host) + if _netrc: + # Return with login / password + login_i = (0 if _netrc[0] else 1) + return (_netrc[login_i], _netrc[2]) + except (NetrcParseError, IOError): + # If there was a parsing error or a permissions issue reading the file, + # we'll just skip netrc auth unless explicitly asked to raise errors. + if raise_errors: + raise + + # AppEngine hackiness. + except (ImportError, AttributeError): + pass + + +def guess_filename(obj): + """Tries to guess the filename of the given object.""" + name = getattr(obj, 'name', None) + if (name and isinstance(name, basestring) and name[0] != '<' and + name[-1] != '>'): + return os.path.basename(name) + + +def extract_zipped_paths(path): + """Replace nonexistent paths that look like they refer to a member of a zip + archive with the location of an extracted copy of the target, or else + just return the provided path unchanged. + """ + if os.path.exists(path): + # this is already a valid path, no need to do anything further + return path + + # find the first valid part of the provided path and treat that as a zip archive + # assume the rest of the path is the name of a member in the archive + archive, member = os.path.split(path) + while archive and not os.path.exists(archive): + archive, prefix = os.path.split(archive) + member = '/'.join([prefix, member]) + + if not zipfile.is_zipfile(archive): + return path + + zip_file = zipfile.ZipFile(archive) + if member not in zip_file.namelist(): + return path + + # we have a valid zip archive and a valid member of that archive + tmp = tempfile.gettempdir() + extracted_path = os.path.join(tmp, *member.split('/')) + if not os.path.exists(extracted_path): + extracted_path = zip_file.extract(member, path=tmp) + + return extracted_path + + +def from_key_val_list(value): + """Take an object and test to see if it can be represented as a + dictionary. Unless it can not be represented as such, return an + OrderedDict, e.g., + + :: + + >>> from_key_val_list([('key', 'val')]) + OrderedDict([('key', 'val')]) + >>> from_key_val_list('string') + ValueError: cannot encode objects that are not 2-tuples + >>> from_key_val_list({'key': 'val'}) + OrderedDict([('key', 'val')]) + + :rtype: OrderedDict + """ + if value is None: + return None + + if isinstance(value, (str, bytes, bool, int)): + raise ValueError('cannot encode objects that are not 2-tuples') + + return OrderedDict(value) + + +def to_key_val_list(value): + """Take an object and test to see if it can be represented as a + dictionary. If it can be, return a list of tuples, e.g., + + :: + + >>> to_key_val_list([('key', 'val')]) + [('key', 'val')] + >>> to_key_val_list({'key': 'val'}) + [('key', 'val')] + >>> to_key_val_list('string') + ValueError: cannot encode objects that are not 2-tuples. + + :rtype: list + """ + if value is None: + return None + + if isinstance(value, (str, bytes, bool, int)): + raise ValueError('cannot encode objects that are not 2-tuples') + + if isinstance(value, Mapping): + value = value.items() + + return list(value) + + +# From mitsuhiko/werkzeug (used with permission). +def parse_list_header(value): + """Parse lists as described by RFC 2068 Section 2. + + In particular, parse comma-separated lists where the elements of + the list may include quoted-strings. A quoted-string could + contain a comma. A non-quoted string could have quotes in the + middle. Quotes are removed automatically after parsing. + + It basically works like :func:`parse_set_header` just that items + may appear multiple times and case sensitivity is preserved. + + The return value is a standard :class:`list`: + + >>> parse_list_header('token, "quoted value"') + ['token', 'quoted value'] + + To create a header from the :class:`list` again, use the + :func:`dump_header` function. + + :param value: a string with a list header. + :return: :class:`list` + :rtype: list + """ + result = [] + for item in _parse_list_header(value): + if item[:1] == item[-1:] == '"': + item = unquote_header_value(item[1:-1]) + result.append(item) + return result + + +# From mitsuhiko/werkzeug (used with permission). +def parse_dict_header(value): + """Parse lists of key, value pairs as described by RFC 2068 Section 2 and + convert them into a python dict: + + >>> d = parse_dict_header('foo="is a fish", bar="as well"') + >>> type(d) is dict + True + >>> sorted(d.items()) + [('bar', 'as well'), ('foo', 'is a fish')] + + If there is no value for a key it will be `None`: + + >>> parse_dict_header('key_without_value') + {'key_without_value': None} + + To create a header from the :class:`dict` again, use the + :func:`dump_header` function. + + :param value: a string with a dict header. + :return: :class:`dict` + :rtype: dict + """ + result = {} + for item in _parse_list_header(value): + if '=' not in item: + result[item] = None + continue + name, value = item.split('=', 1) + if value[:1] == value[-1:] == '"': + value = unquote_header_value(value[1:-1]) + result[name] = value + return result + + +# From mitsuhiko/werkzeug (used with permission). +def unquote_header_value(value, is_filename=False): + r"""Unquotes a header value. (Reversal of :func:`quote_header_value`). + This does not use the real unquoting but what browsers are actually + using for quoting. + + :param value: the header value to unquote. + :rtype: str + """ + if value and value[0] == value[-1] == '"': + # this is not the real unquoting, but fixing this so that the + # RFC is met will result in bugs with internet explorer and + # probably some other browsers as well. IE for example is + # uploading files with "C:\foo\bar.txt" as filename + value = value[1:-1] + + # if this is a filename and the starting characters look like + # a UNC path, then just return the value without quotes. Using the + # replace sequence below on a UNC path has the effect of turning + # the leading double slash into a single slash and then + # _fix_ie_filename() doesn't work correctly. See #458. + if not is_filename or value[:2] != '\\\\': + return value.replace('\\\\', '\\').replace('\\"', '"') + return value + + +def dict_from_cookiejar(cj): + """Returns a key/value dictionary from a CookieJar. + + :param cj: CookieJar object to extract cookies from. + :rtype: dict + """ + + cookie_dict = {} + + for cookie in cj: + cookie_dict[cookie.name] = cookie.value + + return cookie_dict + + +def add_dict_to_cookiejar(cj, cookie_dict): + """Returns a CookieJar from a key/value dictionary. + + :param cj: CookieJar to insert cookies into. + :param cookie_dict: Dict of key/values to insert into CookieJar. + :rtype: CookieJar + """ + + return cookiejar_from_dict(cookie_dict, cj) + + +def get_encodings_from_content(content): + """Returns encodings from given content string. + + :param content: bytestring to extract encodings from. + """ + warnings.warn(( + 'In requests 3.0, get_encodings_from_content will be removed. For ' + 'more information, please see the discussion on issue #2266. (This' + ' warning should only appear once.)'), + DeprecationWarning) + + charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I) + pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I) + xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]') + + return (charset_re.findall(content) + + pragma_re.findall(content) + + xml_re.findall(content)) + + +def _parse_content_type_header(header): + """Returns content type and parameters from given header + + :param header: string + :return: tuple containing content type and dictionary of + parameters + """ + + tokens = header.split(';') + content_type, params = tokens[0].strip(), tokens[1:] + params_dict = {} + items_to_strip = "\"' " + + for param in params: + param = param.strip() + if param: + key, value = param, True + index_of_equals = param.find("=") + if index_of_equals != -1: + key = param[:index_of_equals].strip(items_to_strip) + value = param[index_of_equals + 1:].strip(items_to_strip) + params_dict[key.lower()] = value + return content_type, params_dict + + +def get_encoding_from_headers(headers): + """Returns encodings from given HTTP Header Dict. + + :param headers: dictionary to extract encoding from. + :rtype: str + """ + + content_type = headers.get('content-type') + + if not content_type: + return None + + content_type, params = _parse_content_type_header(content_type) + + if 'charset' in params: + return params['charset'].strip("'\"") + + if 'text' in content_type: + return 'ISO-8859-1' + + +def stream_decode_response_unicode(iterator, r): + """Stream decodes a iterator.""" + + if r.encoding is None: + for item in iterator: + yield item + return + + decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace') + for chunk in iterator: + rv = decoder.decode(chunk) + if rv: + yield rv + rv = decoder.decode(b'', final=True) + if rv: + yield rv + + +def iter_slices(string, slice_length): + """Iterate over slices of a string.""" + pos = 0 + if slice_length is None or slice_length <= 0: + slice_length = len(string) + while pos < len(string): + yield string[pos:pos + slice_length] + pos += slice_length + + +def get_unicode_from_response(r): + """Returns the requested content back in unicode. + + :param r: Response object to get unicode content from. + + Tried: + + 1. charset from content-type + 2. fall back and replace all unicode characters + + :rtype: str + """ + warnings.warn(( + 'In requests 3.0, get_unicode_from_response will be removed. For ' + 'more information, please see the discussion on issue #2266. (This' + ' warning should only appear once.)'), + DeprecationWarning) + + tried_encodings = [] + + # Try charset from content-type + encoding = get_encoding_from_headers(r.headers) + + if encoding: + try: + return str(r.content, encoding) + except UnicodeError: + tried_encodings.append(encoding) + + # Fall back: + try: + return str(r.content, encoding, errors='replace') + except TypeError: + return r.content + + +# The unreserved URI characters (RFC 3986) +UNRESERVED_SET = frozenset( + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + "0123456789-._~") + + +def unquote_unreserved(uri): + """Un-escape any percent-escape sequences in a URI that are unreserved + characters. This leaves all reserved, illegal and non-ASCII bytes encoded. + + :rtype: str + """ + parts = uri.split('%') + for i in range(1, len(parts)): + h = parts[i][0:2] + if len(h) == 2 and h.isalnum(): + try: + c = chr(int(h, 16)) + except ValueError: + raise InvalidURL("Invalid percent-escape sequence: '%s'" % h) + + if c in UNRESERVED_SET: + parts[i] = c + parts[i][2:] + else: + parts[i] = '%' + parts[i] + else: + parts[i] = '%' + parts[i] + return ''.join(parts) + + +def requote_uri(uri): + """Re-quote the given URI. + + This function passes the given URI through an unquote/quote cycle to + ensure that it is fully and consistently quoted. + + :rtype: str + """ + safe_with_percent = "!#$%&'()*+,/:;=?@[]~" + safe_without_percent = "!#$&'()*+,/:;=?@[]~" + try: + # Unquote only the unreserved characters + # Then quote only illegal characters (do not quote reserved, + # unreserved, or '%') + return quote(unquote_unreserved(uri), safe=safe_with_percent) + except InvalidURL: + # We couldn't unquote the given URI, so let's try quoting it, but + # there may be unquoted '%'s in the URI. We need to make sure they're + # properly quoted so they do not cause issues elsewhere. + return quote(uri, safe=safe_without_percent) + + +def address_in_network(ip, net): + """This function allows you to check if an IP belongs to a network subnet + + Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24 + returns False if ip = 192.168.1.1 and net = 192.168.100.0/24 + + :rtype: bool + """ + ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0] + netaddr, bits = net.split('/') + netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0] + network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask + return (ipaddr & netmask) == (network & netmask) + + +def dotted_netmask(mask): + """Converts mask from /xx format to xxx.xxx.xxx.xxx + + Example: if mask is 24 function returns 255.255.255.0 + + :rtype: str + """ + bits = 0xffffffff ^ (1 << 32 - mask) - 1 + return socket.inet_ntoa(struct.pack('>I', bits)) + + +def is_ipv4_address(string_ip): + """ + :rtype: bool + """ + try: + socket.inet_aton(string_ip) + except socket.error: + return False + return True + + +def is_valid_cidr(string_network): + """ + Very simple check of the cidr format in no_proxy variable. + + :rtype: bool + """ + if string_network.count('/') == 1: + try: + mask = int(string_network.split('/')[1]) + except ValueError: + return False + + if mask < 1 or mask > 32: + return False + + try: + socket.inet_aton(string_network.split('/')[0]) + except socket.error: + return False + else: + return False + return True + + +@contextlib.contextmanager +def set_environ(env_name, value): + """Set the environment variable 'env_name' to 'value' + + Save previous value, yield, and then restore the previous value stored in + the environment variable 'env_name'. + + If 'value' is None, do nothing""" + value_changed = value is not None + if value_changed: + old_value = os.environ.get(env_name) + os.environ[env_name] = value + try: + yield + finally: + if value_changed: + if old_value is None: + del os.environ[env_name] + else: + os.environ[env_name] = old_value + + +def should_bypass_proxies(url, no_proxy): + """ + Returns whether we should bypass proxies or not. + + :rtype: bool + """ + # Prioritize lowercase environment variables over uppercase + # to keep a consistent behaviour with other http projects (curl, wget). + get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper()) + + # First check whether no_proxy is defined. If it is, check that the URL + # we're getting isn't in the no_proxy list. + no_proxy_arg = no_proxy + if no_proxy is None: + no_proxy = get_proxy('no_proxy') + parsed = urlparse(url) + + if parsed.hostname is None: + # URLs don't always have hostnames, e.g. file:/// urls. + return True + + if no_proxy: + # We need to check whether we match here. We need to see if we match + # the end of the hostname, both with and without the port. + no_proxy = ( + host for host in no_proxy.replace(' ', '').split(',') if host + ) + + if is_ipv4_address(parsed.hostname): + for proxy_ip in no_proxy: + if is_valid_cidr(proxy_ip): + if address_in_network(parsed.hostname, proxy_ip): + return True + elif parsed.hostname == proxy_ip: + # If no_proxy ip was defined in plain IP notation instead of cidr notation & + # matches the IP of the index + return True + else: + host_with_port = parsed.hostname + if parsed.port: + host_with_port += ':{}'.format(parsed.port) + + for host in no_proxy: + if parsed.hostname.endswith(host) or host_with_port.endswith(host): + # The URL does match something in no_proxy, so we don't want + # to apply the proxies on this URL. + return True + + with set_environ('no_proxy', no_proxy_arg): + # parsed.hostname can be `None` in cases such as a file URI. + try: + bypass = proxy_bypass(parsed.hostname) + except (TypeError, socket.gaierror): + bypass = False + + if bypass: + return True + + return False + + +def get_environ_proxies(url, no_proxy=None): + """ + Return a dict of environment proxies. + + :rtype: dict + """ + if should_bypass_proxies(url, no_proxy=no_proxy): + return {} + else: + return getproxies() + + +def select_proxy(url, proxies): + """Select a proxy for the url, if applicable. + + :param url: The url being for the request + :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs + """ + proxies = proxies or {} + urlparts = urlparse(url) + if urlparts.hostname is None: + return proxies.get(urlparts.scheme, proxies.get('all')) + + proxy_keys = [ + urlparts.scheme + '://' + urlparts.hostname, + urlparts.scheme, + 'all://' + urlparts.hostname, + 'all', + ] + proxy = None + for proxy_key in proxy_keys: + if proxy_key in proxies: + proxy = proxies[proxy_key] + break + + return proxy + + +def default_user_agent(name="python-requests"): + """ + Return a string representing the default user agent. + + :rtype: str + """ + return '%s/%s' % (name, __version__) + + +def default_headers(): + """ + :rtype: requests.structures.CaseInsensitiveDict + """ + return CaseInsensitiveDict({ + 'User-Agent': default_user_agent(), + 'Accept-Encoding': ', '.join(('gzip', 'deflate')), + 'Accept': '*/*', + 'Connection': 'keep-alive', + }) + + +def parse_header_links(value): + """Return a list of parsed link headers proxies. + + i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg" + + :rtype: list + """ + + links = [] + + replace_chars = ' \'"' + + value = value.strip(replace_chars) + if not value: + return links + + for val in re.split(', *<', value): + try: + url, params = val.split(';', 1) + except ValueError: + url, params = val, '' + + link = {'url': url.strip('<> \'"')} + + for param in params.split(';'): + try: + key, value = param.split('=') + except ValueError: + break + + link[key.strip(replace_chars)] = value.strip(replace_chars) + + links.append(link) + + return links + + +# Null bytes; no need to recreate these on each call to guess_json_utf +_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3 +_null2 = _null * 2 +_null3 = _null * 3 + + +def guess_json_utf(data): + """ + :rtype: str + """ + # JSON always starts with two ASCII characters, so detection is as + # easy as counting the nulls and from their location and count + # determine the encoding. Also detect a BOM, if present. + sample = data[:4] + if sample in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE): + return 'utf-32' # BOM included + if sample[:3] == codecs.BOM_UTF8: + return 'utf-8-sig' # BOM included, MS style (discouraged) + if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE): + return 'utf-16' # BOM included + nullcount = sample.count(_null) + if nullcount == 0: + return 'utf-8' + if nullcount == 2: + if sample[::2] == _null2: # 1st and 3rd are null + return 'utf-16-be' + if sample[1::2] == _null2: # 2nd and 4th are null + return 'utf-16-le' + # Did not detect 2 valid UTF-16 ascii-range characters + if nullcount == 3: + if sample[:3] == _null3: + return 'utf-32-be' + if sample[1:] == _null3: + return 'utf-32-le' + # Did not detect a valid UTF-32 ascii-range character + return None + + +def prepend_scheme_if_needed(url, new_scheme): + """Given a URL that may or may not have a scheme, prepend the given scheme. + Does not replace a present scheme with the one provided as an argument. + + :rtype: str + """ + scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme) + + # urlparse is a finicky beast, and sometimes decides that there isn't a + # netloc present. Assume that it's being over-cautious, and switch netloc + # and path if urlparse decided there was no netloc. + if not netloc: + netloc, path = path, netloc + + return urlunparse((scheme, netloc, path, params, query, fragment)) + + +def get_auth_from_url(url): + """Given a url with authentication components, extract them into a tuple of + username,password. + + :rtype: (str,str) + """ + parsed = urlparse(url) + + try: + auth = (unquote(parsed.username), unquote(parsed.password)) + except (AttributeError, TypeError): + auth = ('', '') + + return auth + + +# Moved outside of function to avoid recompile every call +_CLEAN_HEADER_REGEX_BYTE = re.compile(b'^\\S[^\\r\\n]*$|^$') +_CLEAN_HEADER_REGEX_STR = re.compile(r'^\S[^\r\n]*$|^$') + + +def check_header_validity(header): + """Verifies that header value is a string which doesn't contain + leading whitespace or return characters. This prevents unintended + header injection. + + :param header: tuple, in the format (name, value). + """ + name, value = header + + if isinstance(value, bytes): + pat = _CLEAN_HEADER_REGEX_BYTE + else: + pat = _CLEAN_HEADER_REGEX_STR + try: + if not pat.match(value): + raise InvalidHeader("Invalid return character or leading space in header: %s" % name) + except TypeError: + raise InvalidHeader("Value for header {%s: %s} must be of type str or " + "bytes, not %s" % (name, value, type(value))) + + +def urldefragauth(url): + """ + Given a url remove the fragment and the authentication part. + + :rtype: str + """ + scheme, netloc, path, params, query, fragment = urlparse(url) + + # see func:`prepend_scheme_if_needed` + if not netloc: + netloc, path = path, netloc + + netloc = netloc.rsplit('@', 1)[-1] + + return urlunparse((scheme, netloc, path, params, query, '')) + + +def rewind_body(prepared_request): + """Move file pointer back to its recorded starting position + so it can be read again on redirect. + """ + body_seek = getattr(prepared_request.body, 'seek', None) + if body_seek is not None and isinstance(prepared_request._body_position, integer_types): + try: + body_seek(prepared_request._body_position) + except (IOError, OSError): + raise UnrewindableBodyError("An error occurred when rewinding request " + "body for redirect.") + else: + raise UnrewindableBodyError("Unable to rewind request body for redirect.") diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/utils.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/utils.pyc new file mode 100644 index 0000000..0e265de Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/requests/utils.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/retrying.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/retrying.py new file mode 100644 index 0000000..6d1e627 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/retrying.py @@ -0,0 +1,267 @@ +## Copyright 2013-2014 Ray Holder +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. + +import random +from pip._vendor import six +import sys +import time +import traceback + + +# sys.maxint / 2, since Python 3.2 doesn't have a sys.maxint... +MAX_WAIT = 1073741823 + + +def retry(*dargs, **dkw): + """ + Decorator function that instantiates the Retrying object + @param *dargs: positional arguments passed to Retrying object + @param **dkw: keyword arguments passed to the Retrying object + """ + # support both @retry and @retry() as valid syntax + if len(dargs) == 1 and callable(dargs[0]): + def wrap_simple(f): + + @six.wraps(f) + def wrapped_f(*args, **kw): + return Retrying().call(f, *args, **kw) + + return wrapped_f + + return wrap_simple(dargs[0]) + + else: + def wrap(f): + + @six.wraps(f) + def wrapped_f(*args, **kw): + return Retrying(*dargs, **dkw).call(f, *args, **kw) + + return wrapped_f + + return wrap + + +class Retrying(object): + + def __init__(self, + stop=None, wait=None, + stop_max_attempt_number=None, + stop_max_delay=None, + wait_fixed=None, + wait_random_min=None, wait_random_max=None, + wait_incrementing_start=None, wait_incrementing_increment=None, + wait_exponential_multiplier=None, wait_exponential_max=None, + retry_on_exception=None, + retry_on_result=None, + wrap_exception=False, + stop_func=None, + wait_func=None, + wait_jitter_max=None): + + self._stop_max_attempt_number = 5 if stop_max_attempt_number is None else stop_max_attempt_number + self._stop_max_delay = 100 if stop_max_delay is None else stop_max_delay + self._wait_fixed = 1000 if wait_fixed is None else wait_fixed + self._wait_random_min = 0 if wait_random_min is None else wait_random_min + self._wait_random_max = 1000 if wait_random_max is None else wait_random_max + self._wait_incrementing_start = 0 if wait_incrementing_start is None else wait_incrementing_start + self._wait_incrementing_increment = 100 if wait_incrementing_increment is None else wait_incrementing_increment + self._wait_exponential_multiplier = 1 if wait_exponential_multiplier is None else wait_exponential_multiplier + self._wait_exponential_max = MAX_WAIT if wait_exponential_max is None else wait_exponential_max + self._wait_jitter_max = 0 if wait_jitter_max is None else wait_jitter_max + + # TODO add chaining of stop behaviors + # stop behavior + stop_funcs = [] + if stop_max_attempt_number is not None: + stop_funcs.append(self.stop_after_attempt) + + if stop_max_delay is not None: + stop_funcs.append(self.stop_after_delay) + + if stop_func is not None: + self.stop = stop_func + + elif stop is None: + self.stop = lambda attempts, delay: any(f(attempts, delay) for f in stop_funcs) + + else: + self.stop = getattr(self, stop) + + # TODO add chaining of wait behaviors + # wait behavior + wait_funcs = [lambda *args, **kwargs: 0] + if wait_fixed is not None: + wait_funcs.append(self.fixed_sleep) + + if wait_random_min is not None or wait_random_max is not None: + wait_funcs.append(self.random_sleep) + + if wait_incrementing_start is not None or wait_incrementing_increment is not None: + wait_funcs.append(self.incrementing_sleep) + + if wait_exponential_multiplier is not None or wait_exponential_max is not None: + wait_funcs.append(self.exponential_sleep) + + if wait_func is not None: + self.wait = wait_func + + elif wait is None: + self.wait = lambda attempts, delay: max(f(attempts, delay) for f in wait_funcs) + + else: + self.wait = getattr(self, wait) + + # retry on exception filter + if retry_on_exception is None: + self._retry_on_exception = self.always_reject + else: + self._retry_on_exception = retry_on_exception + + # TODO simplify retrying by Exception types + # retry on result filter + if retry_on_result is None: + self._retry_on_result = self.never_reject + else: + self._retry_on_result = retry_on_result + + self._wrap_exception = wrap_exception + + def stop_after_attempt(self, previous_attempt_number, delay_since_first_attempt_ms): + """Stop after the previous attempt >= stop_max_attempt_number.""" + return previous_attempt_number >= self._stop_max_attempt_number + + def stop_after_delay(self, previous_attempt_number, delay_since_first_attempt_ms): + """Stop after the time from the first attempt >= stop_max_delay.""" + return delay_since_first_attempt_ms >= self._stop_max_delay + + def no_sleep(self, previous_attempt_number, delay_since_first_attempt_ms): + """Don't sleep at all before retrying.""" + return 0 + + def fixed_sleep(self, previous_attempt_number, delay_since_first_attempt_ms): + """Sleep a fixed amount of time between each retry.""" + return self._wait_fixed + + def random_sleep(self, previous_attempt_number, delay_since_first_attempt_ms): + """Sleep a random amount of time between wait_random_min and wait_random_max""" + return random.randint(self._wait_random_min, self._wait_random_max) + + def incrementing_sleep(self, previous_attempt_number, delay_since_first_attempt_ms): + """ + Sleep an incremental amount of time after each attempt, starting at + wait_incrementing_start and incrementing by wait_incrementing_increment + """ + result = self._wait_incrementing_start + (self._wait_incrementing_increment * (previous_attempt_number - 1)) + if result < 0: + result = 0 + return result + + def exponential_sleep(self, previous_attempt_number, delay_since_first_attempt_ms): + exp = 2 ** previous_attempt_number + result = self._wait_exponential_multiplier * exp + if result > self._wait_exponential_max: + result = self._wait_exponential_max + if result < 0: + result = 0 + return result + + def never_reject(self, result): + return False + + def always_reject(self, result): + return True + + def should_reject(self, attempt): + reject = False + if attempt.has_exception: + reject |= self._retry_on_exception(attempt.value[1]) + else: + reject |= self._retry_on_result(attempt.value) + + return reject + + def call(self, fn, *args, **kwargs): + start_time = int(round(time.time() * 1000)) + attempt_number = 1 + while True: + try: + attempt = Attempt(fn(*args, **kwargs), attempt_number, False) + except: + tb = sys.exc_info() + attempt = Attempt(tb, attempt_number, True) + + if not self.should_reject(attempt): + return attempt.get(self._wrap_exception) + + delay_since_first_attempt_ms = int(round(time.time() * 1000)) - start_time + if self.stop(attempt_number, delay_since_first_attempt_ms): + if not self._wrap_exception and attempt.has_exception: + # get() on an attempt with an exception should cause it to be raised, but raise just in case + raise attempt.get() + else: + raise RetryError(attempt) + else: + sleep = self.wait(attempt_number, delay_since_first_attempt_ms) + if self._wait_jitter_max: + jitter = random.random() * self._wait_jitter_max + sleep = sleep + max(0, jitter) + time.sleep(sleep / 1000.0) + + attempt_number += 1 + + +class Attempt(object): + """ + An Attempt encapsulates a call to a target function that may end as a + normal return value from the function or an Exception depending on what + occurred during the execution. + """ + + def __init__(self, value, attempt_number, has_exception): + self.value = value + self.attempt_number = attempt_number + self.has_exception = has_exception + + def get(self, wrap_exception=False): + """ + Return the return value of this Attempt instance or raise an Exception. + If wrap_exception is true, this Attempt is wrapped inside of a + RetryError before being raised. + """ + if self.has_exception: + if wrap_exception: + raise RetryError(self) + else: + six.reraise(self.value[0], self.value[1], self.value[2]) + else: + return self.value + + def __repr__(self): + if self.has_exception: + return "Attempts: {0}, Error:\n{1}".format(self.attempt_number, "".join(traceback.format_tb(self.value[2]))) + else: + return "Attempts: {0}, Value: {1}".format(self.attempt_number, self.value) + + +class RetryError(Exception): + """ + A RetryError encapsulates the last Attempt instance right before giving up. + """ + + def __init__(self, last_attempt): + self.last_attempt = last_attempt + + def __str__(self): + return "RetryError[{0}]".format(self.last_attempt) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/retrying.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/retrying.pyc new file mode 100644 index 0000000..76704e1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/retrying.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/six.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/six.py new file mode 100644 index 0000000..89b2188 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/six.py @@ -0,0 +1,952 @@ +# Copyright (c) 2010-2018 Benjamin Peterson +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +"""Utilities for writing code that runs on Python 2 and 3""" + +from __future__ import absolute_import + +import functools +import itertools +import operator +import sys +import types + +__author__ = "Benjamin Peterson <benjamin@python.org>" +__version__ = "1.12.0" + + +# Useful for very coarse version differentiation. +PY2 = sys.version_info[0] == 2 +PY3 = sys.version_info[0] == 3 +PY34 = sys.version_info[0:2] >= (3, 4) + +if PY3: + string_types = str, + integer_types = int, + class_types = type, + text_type = str + binary_type = bytes + + MAXSIZE = sys.maxsize +else: + string_types = basestring, + integer_types = (int, long) + class_types = (type, types.ClassType) + text_type = unicode + binary_type = str + + if sys.platform.startswith("java"): + # Jython always uses 32 bits. + MAXSIZE = int((1 << 31) - 1) + else: + # It's possible to have sizeof(long) != sizeof(Py_ssize_t). + class X(object): + + def __len__(self): + return 1 << 31 + try: + len(X()) + except OverflowError: + # 32-bit + MAXSIZE = int((1 << 31) - 1) + else: + # 64-bit + MAXSIZE = int((1 << 63) - 1) + del X + + +def _add_doc(func, doc): + """Add documentation to a function.""" + func.__doc__ = doc + + +def _import_module(name): + """Import module, returning the module after the last dot.""" + __import__(name) + return sys.modules[name] + + +class _LazyDescr(object): + + def __init__(self, name): + self.name = name + + def __get__(self, obj, tp): + result = self._resolve() + setattr(obj, self.name, result) # Invokes __set__. + try: + # This is a bit ugly, but it avoids running this again by + # removing this descriptor. + delattr(obj.__class__, self.name) + except AttributeError: + pass + return result + + +class MovedModule(_LazyDescr): + + def __init__(self, name, old, new=None): + super(MovedModule, self).__init__(name) + if PY3: + if new is None: + new = name + self.mod = new + else: + self.mod = old + + def _resolve(self): + return _import_module(self.mod) + + def __getattr__(self, attr): + _module = self._resolve() + value = getattr(_module, attr) + setattr(self, attr, value) + return value + + +class _LazyModule(types.ModuleType): + + def __init__(self, name): + super(_LazyModule, self).__init__(name) + self.__doc__ = self.__class__.__doc__ + + def __dir__(self): + attrs = ["__doc__", "__name__"] + attrs += [attr.name for attr in self._moved_attributes] + return attrs + + # Subclasses should override this + _moved_attributes = [] + + +class MovedAttribute(_LazyDescr): + + def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): + super(MovedAttribute, self).__init__(name) + if PY3: + if new_mod is None: + new_mod = name + self.mod = new_mod + if new_attr is None: + if old_attr is None: + new_attr = name + else: + new_attr = old_attr + self.attr = new_attr + else: + self.mod = old_mod + if old_attr is None: + old_attr = name + self.attr = old_attr + + def _resolve(self): + module = _import_module(self.mod) + return getattr(module, self.attr) + + +class _SixMetaPathImporter(object): + + """ + A meta path importer to import six.moves and its submodules. + + This class implements a PEP302 finder and loader. It should be compatible + with Python 2.5 and all existing versions of Python3 + """ + + def __init__(self, six_module_name): + self.name = six_module_name + self.known_modules = {} + + def _add_module(self, mod, *fullnames): + for fullname in fullnames: + self.known_modules[self.name + "." + fullname] = mod + + def _get_module(self, fullname): + return self.known_modules[self.name + "." + fullname] + + def find_module(self, fullname, path=None): + if fullname in self.known_modules: + return self + return None + + def __get_module(self, fullname): + try: + return self.known_modules[fullname] + except KeyError: + raise ImportError("This loader does not know module " + fullname) + + def load_module(self, fullname): + try: + # in case of a reload + return sys.modules[fullname] + except KeyError: + pass + mod = self.__get_module(fullname) + if isinstance(mod, MovedModule): + mod = mod._resolve() + else: + mod.__loader__ = self + sys.modules[fullname] = mod + return mod + + def is_package(self, fullname): + """ + Return true, if the named module is a package. + + We need this method to get correct spec objects with + Python 3.4 (see PEP451) + """ + return hasattr(self.__get_module(fullname), "__path__") + + def get_code(self, fullname): + """Return None + + Required, if is_package is implemented""" + self.__get_module(fullname) # eventually raises ImportError + return None + get_source = get_code # same as get_code + +_importer = _SixMetaPathImporter(__name__) + + +class _MovedItems(_LazyModule): + + """Lazy loading of moved objects""" + __path__ = [] # mark as package + + +_moved_attributes = [ + MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), + MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), + MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), + MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), + MovedAttribute("intern", "__builtin__", "sys"), + MovedAttribute("map", "itertools", "builtins", "imap", "map"), + MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), + MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), + MovedAttribute("getoutput", "commands", "subprocess"), + MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), + MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), + MovedAttribute("reduce", "__builtin__", "functools"), + MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), + MovedAttribute("StringIO", "StringIO", "io"), + MovedAttribute("UserDict", "UserDict", "collections"), + MovedAttribute("UserList", "UserList", "collections"), + MovedAttribute("UserString", "UserString", "collections"), + MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), + MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), + MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), + MovedModule("builtins", "__builtin__"), + MovedModule("configparser", "ConfigParser"), + MovedModule("copyreg", "copy_reg"), + MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), + MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), + MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), + MovedModule("http_cookies", "Cookie", "http.cookies"), + MovedModule("html_entities", "htmlentitydefs", "html.entities"), + MovedModule("html_parser", "HTMLParser", "html.parser"), + MovedModule("http_client", "httplib", "http.client"), + MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), + MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"), + MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), + MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), + MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), + MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), + MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), + MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), + MovedModule("cPickle", "cPickle", "pickle"), + MovedModule("queue", "Queue"), + MovedModule("reprlib", "repr"), + MovedModule("socketserver", "SocketServer"), + MovedModule("_thread", "thread", "_thread"), + MovedModule("tkinter", "Tkinter"), + MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), + MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), + MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), + MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), + MovedModule("tkinter_tix", "Tix", "tkinter.tix"), + MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), + MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), + MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), + MovedModule("tkinter_colorchooser", "tkColorChooser", + "tkinter.colorchooser"), + MovedModule("tkinter_commondialog", "tkCommonDialog", + "tkinter.commondialog"), + MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), + MovedModule("tkinter_font", "tkFont", "tkinter.font"), + MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), + MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", + "tkinter.simpledialog"), + MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), + MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), + MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), + MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), + MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), + MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), +] +# Add windows specific modules. +if sys.platform == "win32": + _moved_attributes += [ + MovedModule("winreg", "_winreg"), + ] + +for attr in _moved_attributes: + setattr(_MovedItems, attr.name, attr) + if isinstance(attr, MovedModule): + _importer._add_module(attr, "moves." + attr.name) +del attr + +_MovedItems._moved_attributes = _moved_attributes + +moves = _MovedItems(__name__ + ".moves") +_importer._add_module(moves, "moves") + + +class Module_six_moves_urllib_parse(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_parse""" + + +_urllib_parse_moved_attributes = [ + MovedAttribute("ParseResult", "urlparse", "urllib.parse"), + MovedAttribute("SplitResult", "urlparse", "urllib.parse"), + MovedAttribute("parse_qs", "urlparse", "urllib.parse"), + MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), + MovedAttribute("urldefrag", "urlparse", "urllib.parse"), + MovedAttribute("urljoin", "urlparse", "urllib.parse"), + MovedAttribute("urlparse", "urlparse", "urllib.parse"), + MovedAttribute("urlsplit", "urlparse", "urllib.parse"), + MovedAttribute("urlunparse", "urlparse", "urllib.parse"), + MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), + MovedAttribute("quote", "urllib", "urllib.parse"), + MovedAttribute("quote_plus", "urllib", "urllib.parse"), + MovedAttribute("unquote", "urllib", "urllib.parse"), + MovedAttribute("unquote_plus", "urllib", "urllib.parse"), + MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"), + MovedAttribute("urlencode", "urllib", "urllib.parse"), + MovedAttribute("splitquery", "urllib", "urllib.parse"), + MovedAttribute("splittag", "urllib", "urllib.parse"), + MovedAttribute("splituser", "urllib", "urllib.parse"), + MovedAttribute("splitvalue", "urllib", "urllib.parse"), + MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), + MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), + MovedAttribute("uses_params", "urlparse", "urllib.parse"), + MovedAttribute("uses_query", "urlparse", "urllib.parse"), + MovedAttribute("uses_relative", "urlparse", "urllib.parse"), +] +for attr in _urllib_parse_moved_attributes: + setattr(Module_six_moves_urllib_parse, attr.name, attr) +del attr + +Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes + +_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), + "moves.urllib_parse", "moves.urllib.parse") + + +class Module_six_moves_urllib_error(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_error""" + + +_urllib_error_moved_attributes = [ + MovedAttribute("URLError", "urllib2", "urllib.error"), + MovedAttribute("HTTPError", "urllib2", "urllib.error"), + MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), +] +for attr in _urllib_error_moved_attributes: + setattr(Module_six_moves_urllib_error, attr.name, attr) +del attr + +Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes + +_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), + "moves.urllib_error", "moves.urllib.error") + + +class Module_six_moves_urllib_request(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_request""" + + +_urllib_request_moved_attributes = [ + MovedAttribute("urlopen", "urllib2", "urllib.request"), + MovedAttribute("install_opener", "urllib2", "urllib.request"), + MovedAttribute("build_opener", "urllib2", "urllib.request"), + MovedAttribute("pathname2url", "urllib", "urllib.request"), + MovedAttribute("url2pathname", "urllib", "urllib.request"), + MovedAttribute("getproxies", "urllib", "urllib.request"), + MovedAttribute("Request", "urllib2", "urllib.request"), + MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), + MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), + MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), + MovedAttribute("BaseHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), + MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), + MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), + MovedAttribute("FileHandler", "urllib2", "urllib.request"), + MovedAttribute("FTPHandler", "urllib2", "urllib.request"), + MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), + MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), + MovedAttribute("urlretrieve", "urllib", "urllib.request"), + MovedAttribute("urlcleanup", "urllib", "urllib.request"), + MovedAttribute("URLopener", "urllib", "urllib.request"), + MovedAttribute("FancyURLopener", "urllib", "urllib.request"), + MovedAttribute("proxy_bypass", "urllib", "urllib.request"), + MovedAttribute("parse_http_list", "urllib2", "urllib.request"), + MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"), +] +for attr in _urllib_request_moved_attributes: + setattr(Module_six_moves_urllib_request, attr.name, attr) +del attr + +Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes + +_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), + "moves.urllib_request", "moves.urllib.request") + + +class Module_six_moves_urllib_response(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_response""" + + +_urllib_response_moved_attributes = [ + MovedAttribute("addbase", "urllib", "urllib.response"), + MovedAttribute("addclosehook", "urllib", "urllib.response"), + MovedAttribute("addinfo", "urllib", "urllib.response"), + MovedAttribute("addinfourl", "urllib", "urllib.response"), +] +for attr in _urllib_response_moved_attributes: + setattr(Module_six_moves_urllib_response, attr.name, attr) +del attr + +Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes + +_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), + "moves.urllib_response", "moves.urllib.response") + + +class Module_six_moves_urllib_robotparser(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_robotparser""" + + +_urllib_robotparser_moved_attributes = [ + MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), +] +for attr in _urllib_robotparser_moved_attributes: + setattr(Module_six_moves_urllib_robotparser, attr.name, attr) +del attr + +Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes + +_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), + "moves.urllib_robotparser", "moves.urllib.robotparser") + + +class Module_six_moves_urllib(types.ModuleType): + + """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" + __path__ = [] # mark as package + parse = _importer._get_module("moves.urllib_parse") + error = _importer._get_module("moves.urllib_error") + request = _importer._get_module("moves.urllib_request") + response = _importer._get_module("moves.urllib_response") + robotparser = _importer._get_module("moves.urllib_robotparser") + + def __dir__(self): + return ['parse', 'error', 'request', 'response', 'robotparser'] + +_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), + "moves.urllib") + + +def add_move(move): + """Add an item to six.moves.""" + setattr(_MovedItems, move.name, move) + + +def remove_move(name): + """Remove item from six.moves.""" + try: + delattr(_MovedItems, name) + except AttributeError: + try: + del moves.__dict__[name] + except KeyError: + raise AttributeError("no such move, %r" % (name,)) + + +if PY3: + _meth_func = "__func__" + _meth_self = "__self__" + + _func_closure = "__closure__" + _func_code = "__code__" + _func_defaults = "__defaults__" + _func_globals = "__globals__" +else: + _meth_func = "im_func" + _meth_self = "im_self" + + _func_closure = "func_closure" + _func_code = "func_code" + _func_defaults = "func_defaults" + _func_globals = "func_globals" + + +try: + advance_iterator = next +except NameError: + def advance_iterator(it): + return it.next() +next = advance_iterator + + +try: + callable = callable +except NameError: + def callable(obj): + return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) + + +if PY3: + def get_unbound_function(unbound): + return unbound + + create_bound_method = types.MethodType + + def create_unbound_method(func, cls): + return func + + Iterator = object +else: + def get_unbound_function(unbound): + return unbound.im_func + + def create_bound_method(func, obj): + return types.MethodType(func, obj, obj.__class__) + + def create_unbound_method(func, cls): + return types.MethodType(func, None, cls) + + class Iterator(object): + + def next(self): + return type(self).__next__(self) + + callable = callable +_add_doc(get_unbound_function, + """Get the function out of a possibly unbound function""") + + +get_method_function = operator.attrgetter(_meth_func) +get_method_self = operator.attrgetter(_meth_self) +get_function_closure = operator.attrgetter(_func_closure) +get_function_code = operator.attrgetter(_func_code) +get_function_defaults = operator.attrgetter(_func_defaults) +get_function_globals = operator.attrgetter(_func_globals) + + +if PY3: + def iterkeys(d, **kw): + return iter(d.keys(**kw)) + + def itervalues(d, **kw): + return iter(d.values(**kw)) + + def iteritems(d, **kw): + return iter(d.items(**kw)) + + def iterlists(d, **kw): + return iter(d.lists(**kw)) + + viewkeys = operator.methodcaller("keys") + + viewvalues = operator.methodcaller("values") + + viewitems = operator.methodcaller("items") +else: + def iterkeys(d, **kw): + return d.iterkeys(**kw) + + def itervalues(d, **kw): + return d.itervalues(**kw) + + def iteritems(d, **kw): + return d.iteritems(**kw) + + def iterlists(d, **kw): + return d.iterlists(**kw) + + viewkeys = operator.methodcaller("viewkeys") + + viewvalues = operator.methodcaller("viewvalues") + + viewitems = operator.methodcaller("viewitems") + +_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") +_add_doc(itervalues, "Return an iterator over the values of a dictionary.") +_add_doc(iteritems, + "Return an iterator over the (key, value) pairs of a dictionary.") +_add_doc(iterlists, + "Return an iterator over the (key, [values]) pairs of a dictionary.") + + +if PY3: + def b(s): + return s.encode("latin-1") + + def u(s): + return s + unichr = chr + import struct + int2byte = struct.Struct(">B").pack + del struct + byte2int = operator.itemgetter(0) + indexbytes = operator.getitem + iterbytes = iter + import io + StringIO = io.StringIO + BytesIO = io.BytesIO + _assertCountEqual = "assertCountEqual" + if sys.version_info[1] <= 1: + _assertRaisesRegex = "assertRaisesRegexp" + _assertRegex = "assertRegexpMatches" + else: + _assertRaisesRegex = "assertRaisesRegex" + _assertRegex = "assertRegex" +else: + def b(s): + return s + # Workaround for standalone backslash + + def u(s): + return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") + unichr = unichr + int2byte = chr + + def byte2int(bs): + return ord(bs[0]) + + def indexbytes(buf, i): + return ord(buf[i]) + iterbytes = functools.partial(itertools.imap, ord) + import StringIO + StringIO = BytesIO = StringIO.StringIO + _assertCountEqual = "assertItemsEqual" + _assertRaisesRegex = "assertRaisesRegexp" + _assertRegex = "assertRegexpMatches" +_add_doc(b, """Byte literal""") +_add_doc(u, """Text literal""") + + +def assertCountEqual(self, *args, **kwargs): + return getattr(self, _assertCountEqual)(*args, **kwargs) + + +def assertRaisesRegex(self, *args, **kwargs): + return getattr(self, _assertRaisesRegex)(*args, **kwargs) + + +def assertRegex(self, *args, **kwargs): + return getattr(self, _assertRegex)(*args, **kwargs) + + +if PY3: + exec_ = getattr(moves.builtins, "exec") + + def reraise(tp, value, tb=None): + try: + if value is None: + value = tp() + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + finally: + value = None + tb = None + +else: + def exec_(_code_, _globs_=None, _locs_=None): + """Execute code in a namespace.""" + if _globs_ is None: + frame = sys._getframe(1) + _globs_ = frame.f_globals + if _locs_ is None: + _locs_ = frame.f_locals + del frame + elif _locs_ is None: + _locs_ = _globs_ + exec("""exec _code_ in _globs_, _locs_""") + + exec_("""def reraise(tp, value, tb=None): + try: + raise tp, value, tb + finally: + tb = None +""") + + +if sys.version_info[:2] == (3, 2): + exec_("""def raise_from(value, from_value): + try: + if from_value is None: + raise value + raise value from from_value + finally: + value = None +""") +elif sys.version_info[:2] > (3, 2): + exec_("""def raise_from(value, from_value): + try: + raise value from from_value + finally: + value = None +""") +else: + def raise_from(value, from_value): + raise value + + +print_ = getattr(moves.builtins, "print", None) +if print_ is None: + def print_(*args, **kwargs): + """The new-style print function for Python 2.4 and 2.5.""" + fp = kwargs.pop("file", sys.stdout) + if fp is None: + return + + def write(data): + if not isinstance(data, basestring): + data = str(data) + # If the file has an encoding, encode unicode with it. + if (isinstance(fp, file) and + isinstance(data, unicode) and + fp.encoding is not None): + errors = getattr(fp, "errors", None) + if errors is None: + errors = "strict" + data = data.encode(fp.encoding, errors) + fp.write(data) + want_unicode = False + sep = kwargs.pop("sep", None) + if sep is not None: + if isinstance(sep, unicode): + want_unicode = True + elif not isinstance(sep, str): + raise TypeError("sep must be None or a string") + end = kwargs.pop("end", None) + if end is not None: + if isinstance(end, unicode): + want_unicode = True + elif not isinstance(end, str): + raise TypeError("end must be None or a string") + if kwargs: + raise TypeError("invalid keyword arguments to print()") + if not want_unicode: + for arg in args: + if isinstance(arg, unicode): + want_unicode = True + break + if want_unicode: + newline = unicode("\n") + space = unicode(" ") + else: + newline = "\n" + space = " " + if sep is None: + sep = space + if end is None: + end = newline + for i, arg in enumerate(args): + if i: + write(sep) + write(arg) + write(end) +if sys.version_info[:2] < (3, 3): + _print = print_ + + def print_(*args, **kwargs): + fp = kwargs.get("file", sys.stdout) + flush = kwargs.pop("flush", False) + _print(*args, **kwargs) + if flush and fp is not None: + fp.flush() + +_add_doc(reraise, """Reraise an exception.""") + +if sys.version_info[0:2] < (3, 4): + def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, + updated=functools.WRAPPER_UPDATES): + def wrapper(f): + f = functools.wraps(wrapped, assigned, updated)(f) + f.__wrapped__ = wrapped + return f + return wrapper +else: + wraps = functools.wraps + + +def with_metaclass(meta, *bases): + """Create a base class with a metaclass.""" + # This requires a bit of explanation: the basic idea is to make a dummy + # metaclass for one level of class instantiation that replaces itself with + # the actual metaclass. + class metaclass(type): + + def __new__(cls, name, this_bases, d): + return meta(name, bases, d) + + @classmethod + def __prepare__(cls, name, this_bases): + return meta.__prepare__(name, bases) + return type.__new__(metaclass, 'temporary_class', (), {}) + + +def add_metaclass(metaclass): + """Class decorator for creating a class with a metaclass.""" + def wrapper(cls): + orig_vars = cls.__dict__.copy() + slots = orig_vars.get('__slots__') + if slots is not None: + if isinstance(slots, str): + slots = [slots] + for slots_var in slots: + orig_vars.pop(slots_var) + orig_vars.pop('__dict__', None) + orig_vars.pop('__weakref__', None) + if hasattr(cls, '__qualname__'): + orig_vars['__qualname__'] = cls.__qualname__ + return metaclass(cls.__name__, cls.__bases__, orig_vars) + return wrapper + + +def ensure_binary(s, encoding='utf-8', errors='strict'): + """Coerce **s** to six.binary_type. + + For Python 2: + - `unicode` -> encoded to `str` + - `str` -> `str` + + For Python 3: + - `str` -> encoded to `bytes` + - `bytes` -> `bytes` + """ + if isinstance(s, text_type): + return s.encode(encoding, errors) + elif isinstance(s, binary_type): + return s + else: + raise TypeError("not expecting type '%s'" % type(s)) + + +def ensure_str(s, encoding='utf-8', errors='strict'): + """Coerce *s* to `str`. + + For Python 2: + - `unicode` -> encoded to `str` + - `str` -> `str` + + For Python 3: + - `str` -> `str` + - `bytes` -> decoded to `str` + """ + if not isinstance(s, (text_type, binary_type)): + raise TypeError("not expecting type '%s'" % type(s)) + if PY2 and isinstance(s, text_type): + s = s.encode(encoding, errors) + elif PY3 and isinstance(s, binary_type): + s = s.decode(encoding, errors) + return s + + +def ensure_text(s, encoding='utf-8', errors='strict'): + """Coerce *s* to six.text_type. + + For Python 2: + - `unicode` -> `unicode` + - `str` -> `unicode` + + For Python 3: + - `str` -> `str` + - `bytes` -> decoded to `str` + """ + if isinstance(s, binary_type): + return s.decode(encoding, errors) + elif isinstance(s, text_type): + return s + else: + raise TypeError("not expecting type '%s'" % type(s)) + + + +def python_2_unicode_compatible(klass): + """ + A decorator that defines __unicode__ and __str__ methods under Python 2. + Under Python 3 it does nothing. + + To support Python 2 and 3 with a single code base, define a __str__ method + returning text and apply this decorator to the class. + """ + if PY2: + if '__str__' not in klass.__dict__: + raise ValueError("@python_2_unicode_compatible cannot be applied " + "to %s because it doesn't define __str__()." % + klass.__name__) + klass.__unicode__ = klass.__str__ + klass.__str__ = lambda self: self.__unicode__().encode('utf-8') + return klass + + +# Complete the moves implementation. +# This code is at the end of this module to speed up module loading. +# Turn this module into a package. +__path__ = [] # required for PEP 302 and PEP 451 +__package__ = __name__ # see PEP 366 @ReservedAssignment +if globals().get("__spec__") is not None: + __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable +# Remove other six meta path importers, since they cause problems. This can +# happen if six is removed from sys.modules and then reloaded. (Setuptools does +# this for some reason.) +if sys.meta_path: + for i, importer in enumerate(sys.meta_path): + # Here's some real nastiness: Another "instance" of the six module might + # be floating around. Therefore, we can't use isinstance() to check for + # the six meta path importer, since the other six instance will have + # inserted an importer with different class. + if (type(importer).__name__ == "_SixMetaPathImporter" and + importer.name == __name__): + del sys.meta_path[i] + break + del i, importer +# Finally, add the importer to the meta path import hook. +sys.meta_path.append(_importer) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/six.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/six.pyc new file mode 100644 index 0000000..3f81327 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/six.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/__init__.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/__init__.py new file mode 100644 index 0000000..148a9c3 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/__init__.py @@ -0,0 +1,92 @@ +""" +urllib3 - Thread-safe connection pooling and re-using. +""" + +from __future__ import absolute_import +import warnings + +from .connectionpool import ( + HTTPConnectionPool, + HTTPSConnectionPool, + connection_from_url +) + +from . import exceptions +from .filepost import encode_multipart_formdata +from .poolmanager import PoolManager, ProxyManager, proxy_from_url +from .response import HTTPResponse +from .util.request import make_headers +from .util.url import get_host +from .util.timeout import Timeout +from .util.retry import Retry + + +# Set default logging handler to avoid "No handler found" warnings. +import logging +from logging import NullHandler + +__author__ = 'Andrey Petrov (andrey.petrov@shazow.net)' +__license__ = 'MIT' +__version__ = '1.24.1' + +__all__ = ( + 'HTTPConnectionPool', + 'HTTPSConnectionPool', + 'PoolManager', + 'ProxyManager', + 'HTTPResponse', + 'Retry', + 'Timeout', + 'add_stderr_logger', + 'connection_from_url', + 'disable_warnings', + 'encode_multipart_formdata', + 'get_host', + 'make_headers', + 'proxy_from_url', +) + +logging.getLogger(__name__).addHandler(NullHandler()) + + +def add_stderr_logger(level=logging.DEBUG): + """ + Helper for quickly adding a StreamHandler to the logger. Useful for + debugging. + + Returns the handler after adding it. + """ + # This method needs to be in this __init__.py to get the __name__ correct + # even if urllib3 is vendored within another package. + logger = logging.getLogger(__name__) + handler = logging.StreamHandler() + handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s')) + logger.addHandler(handler) + logger.setLevel(level) + logger.debug('Added a stderr logging handler to logger: %s', __name__) + return handler + + +# ... Clean up. +del NullHandler + + +# All warning filters *must* be appended unless you're really certain that they +# shouldn't be: otherwise, it's very hard for users to use most Python +# mechanisms to silence them. +# SecurityWarning's always go off by default. +warnings.simplefilter('always', exceptions.SecurityWarning, append=True) +# SubjectAltNameWarning's should go off once per host +warnings.simplefilter('default', exceptions.SubjectAltNameWarning, append=True) +# InsecurePlatformWarning's don't vary between requests, so we keep it default. +warnings.simplefilter('default', exceptions.InsecurePlatformWarning, + append=True) +# SNIMissingWarnings should go off only once. +warnings.simplefilter('default', exceptions.SNIMissingWarning, append=True) + + +def disable_warnings(category=exceptions.HTTPWarning): + """ + Helper for quickly disabling all urllib3 warnings. + """ + warnings.simplefilter('ignore', category) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/__init__.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/__init__.pyc new file mode 100644 index 0000000..7e908e8 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/_collections.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/_collections.py new file mode 100644 index 0000000..34f2381 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/_collections.py @@ -0,0 +1,329 @@ +from __future__ import absolute_import +try: + from collections.abc import Mapping, MutableMapping +except ImportError: + from collections import Mapping, MutableMapping +try: + from threading import RLock +except ImportError: # Platform-specific: No threads available + class RLock: + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_value, traceback): + pass + + +from collections import OrderedDict +from .exceptions import InvalidHeader +from .packages.six import iterkeys, itervalues, PY3 + + +__all__ = ['RecentlyUsedContainer', 'HTTPHeaderDict'] + + +_Null = object() + + +class RecentlyUsedContainer(MutableMapping): + """ + Provides a thread-safe dict-like container which maintains up to + ``maxsize`` keys while throwing away the least-recently-used keys beyond + ``maxsize``. + + :param maxsize: + Maximum number of recent elements to retain. + + :param dispose_func: + Every time an item is evicted from the container, + ``dispose_func(value)`` is called. Callback which will get called + """ + + ContainerCls = OrderedDict + + def __init__(self, maxsize=10, dispose_func=None): + self._maxsize = maxsize + self.dispose_func = dispose_func + + self._container = self.ContainerCls() + self.lock = RLock() + + def __getitem__(self, key): + # Re-insert the item, moving it to the end of the eviction line. + with self.lock: + item = self._container.pop(key) + self._container[key] = item + return item + + def __setitem__(self, key, value): + evicted_value = _Null + with self.lock: + # Possibly evict the existing value of 'key' + evicted_value = self._container.get(key, _Null) + self._container[key] = value + + # If we didn't evict an existing value, we might have to evict the + # least recently used item from the beginning of the container. + if len(self._container) > self._maxsize: + _key, evicted_value = self._container.popitem(last=False) + + if self.dispose_func and evicted_value is not _Null: + self.dispose_func(evicted_value) + + def __delitem__(self, key): + with self.lock: + value = self._container.pop(key) + + if self.dispose_func: + self.dispose_func(value) + + def __len__(self): + with self.lock: + return len(self._container) + + def __iter__(self): + raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.') + + def clear(self): + with self.lock: + # Copy pointers to all values, then wipe the mapping + values = list(itervalues(self._container)) + self._container.clear() + + if self.dispose_func: + for value in values: + self.dispose_func(value) + + def keys(self): + with self.lock: + return list(iterkeys(self._container)) + + +class HTTPHeaderDict(MutableMapping): + """ + :param headers: + An iterable of field-value pairs. Must not contain multiple field names + when compared case-insensitively. + + :param kwargs: + Additional field-value pairs to pass in to ``dict.update``. + + A ``dict`` like container for storing HTTP Headers. + + Field names are stored and compared case-insensitively in compliance with + RFC 7230. Iteration provides the first case-sensitive key seen for each + case-insensitive pair. + + Using ``__setitem__`` syntax overwrites fields that compare equal + case-insensitively in order to maintain ``dict``'s api. For fields that + compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add`` + in a loop. + + If multiple fields that are equal case-insensitively are passed to the + constructor or ``.update``, the behavior is undefined and some will be + lost. + + >>> headers = HTTPHeaderDict() + >>> headers.add('Set-Cookie', 'foo=bar') + >>> headers.add('set-cookie', 'baz=quxx') + >>> headers['content-length'] = '7' + >>> headers['SET-cookie'] + 'foo=bar, baz=quxx' + >>> headers['Content-Length'] + '7' + """ + + def __init__(self, headers=None, **kwargs): + super(HTTPHeaderDict, self).__init__() + self._container = OrderedDict() + if headers is not None: + if isinstance(headers, HTTPHeaderDict): + self._copy_from(headers) + else: + self.extend(headers) + if kwargs: + self.extend(kwargs) + + def __setitem__(self, key, val): + self._container[key.lower()] = [key, val] + return self._container[key.lower()] + + def __getitem__(self, key): + val = self._container[key.lower()] + return ', '.join(val[1:]) + + def __delitem__(self, key): + del self._container[key.lower()] + + def __contains__(self, key): + return key.lower() in self._container + + def __eq__(self, other): + if not isinstance(other, Mapping) and not hasattr(other, 'keys'): + return False + if not isinstance(other, type(self)): + other = type(self)(other) + return (dict((k.lower(), v) for k, v in self.itermerged()) == + dict((k.lower(), v) for k, v in other.itermerged())) + + def __ne__(self, other): + return not self.__eq__(other) + + if not PY3: # Python 2 + iterkeys = MutableMapping.iterkeys + itervalues = MutableMapping.itervalues + + __marker = object() + + def __len__(self): + return len(self._container) + + def __iter__(self): + # Only provide the originally cased names + for vals in self._container.values(): + yield vals[0] + + def pop(self, key, default=__marker): + '''D.pop(k[,d]) -> v, remove specified key and return the corresponding value. + If key is not found, d is returned if given, otherwise KeyError is raised. + ''' + # Using the MutableMapping function directly fails due to the private marker. + # Using ordinary dict.pop would expose the internal structures. + # So let's reinvent the wheel. + try: + value = self[key] + except KeyError: + if default is self.__marker: + raise + return default + else: + del self[key] + return value + + def discard(self, key): + try: + del self[key] + except KeyError: + pass + + def add(self, key, val): + """Adds a (name, value) pair, doesn't overwrite the value if it already + exists. + + >>> headers = HTTPHeaderDict(foo='bar') + >>> headers.add('Foo', 'baz') + >>> headers['foo'] + 'bar, baz' + """ + key_lower = key.lower() + new_vals = [key, val] + # Keep the common case aka no item present as fast as possible + vals = self._container.setdefault(key_lower, new_vals) + if new_vals is not vals: + vals.append(val) + + def extend(self, *args, **kwargs): + """Generic import function for any type of header-like object. + Adapted version of MutableMapping.update in order to insert items + with self.add instead of self.__setitem__ + """ + if len(args) > 1: + raise TypeError("extend() takes at most 1 positional " + "arguments ({0} given)".format(len(args))) + other = args[0] if len(args) >= 1 else () + + if isinstance(other, HTTPHeaderDict): + for key, val in other.iteritems(): + self.add(key, val) + elif isinstance(other, Mapping): + for key in other: + self.add(key, other[key]) + elif hasattr(other, "keys"): + for key in other.keys(): + self.add(key, other[key]) + else: + for key, value in other: + self.add(key, value) + + for key, value in kwargs.items(): + self.add(key, value) + + def getlist(self, key, default=__marker): + """Returns a list of all the values for the named field. Returns an + empty list if the key doesn't exist.""" + try: + vals = self._container[key.lower()] + except KeyError: + if default is self.__marker: + return [] + return default + else: + return vals[1:] + + # Backwards compatibility for httplib + getheaders = getlist + getallmatchingheaders = getlist + iget = getlist + + # Backwards compatibility for http.cookiejar + get_all = getlist + + def __repr__(self): + return "%s(%s)" % (type(self).__name__, dict(self.itermerged())) + + def _copy_from(self, other): + for key in other: + val = other.getlist(key) + if isinstance(val, list): + # Don't need to convert tuples + val = list(val) + self._container[key.lower()] = [key] + val + + def copy(self): + clone = type(self)() + clone._copy_from(self) + return clone + + def iteritems(self): + """Iterate over all header lines, including duplicate ones.""" + for key in self: + vals = self._container[key.lower()] + for val in vals[1:]: + yield vals[0], val + + def itermerged(self): + """Iterate over all headers, merging duplicate ones together.""" + for key in self: + val = self._container[key.lower()] + yield val[0], ', '.join(val[1:]) + + def items(self): + return list(self.iteritems()) + + @classmethod + def from_httplib(cls, message): # Python 2 + """Read headers from a Python 2 httplib message object.""" + # python2.7 does not expose a proper API for exporting multiheaders + # efficiently. This function re-reads raw lines from the message + # object and extracts the multiheaders properly. + obs_fold_continued_leaders = (' ', '\t') + headers = [] + + for line in message.headers: + if line.startswith(obs_fold_continued_leaders): + if not headers: + # We received a header line that starts with OWS as described + # in RFC-7230 S3.2.4. This indicates a multiline header, but + # there exists no previous header to which we can attach it. + raise InvalidHeader( + 'Header continuation with no previous header: %s' % line + ) + else: + key, value = headers[-1] + headers[-1] = (key, value + ' ' + line.strip()) + continue + + key, value = line.split(':', 1) + headers.append((key, value.strip())) + + return cls(headers) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/_collections.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/_collections.pyc new file mode 100644 index 0000000..ff91b55 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/_collections.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/connection.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/connection.py new file mode 100644 index 0000000..02b3665 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/connection.py @@ -0,0 +1,391 @@ +from __future__ import absolute_import +import datetime +import logging +import os +import socket +from socket import error as SocketError, timeout as SocketTimeout +import warnings +from .packages import six +from .packages.six.moves.http_client import HTTPConnection as _HTTPConnection +from .packages.six.moves.http_client import HTTPException # noqa: F401 + +try: # Compiled with SSL? + import ssl + BaseSSLError = ssl.SSLError +except (ImportError, AttributeError): # Platform-specific: No SSL. + ssl = None + + class BaseSSLError(BaseException): + pass + + +try: # Python 3: + # Not a no-op, we're adding this to the namespace so it can be imported. + ConnectionError = ConnectionError +except NameError: # Python 2: + class ConnectionError(Exception): + pass + + +from .exceptions import ( + NewConnectionError, + ConnectTimeoutError, + SubjectAltNameWarning, + SystemTimeWarning, +) +from .packages.ssl_match_hostname import match_hostname, CertificateError + +from .util.ssl_ import ( + resolve_cert_reqs, + resolve_ssl_version, + assert_fingerprint, + create_urllib3_context, + ssl_wrap_socket +) + + +from .util import connection + +from ._collections import HTTPHeaderDict + +log = logging.getLogger(__name__) + +port_by_scheme = { + 'http': 80, + 'https': 443, +} + +# When updating RECENT_DATE, move it to within two years of the current date, +# and not less than 6 months ago. +# Example: if Today is 2018-01-01, then RECENT_DATE should be any date on or +# after 2016-01-01 (today - 2 years) AND before 2017-07-01 (today - 6 months) +RECENT_DATE = datetime.date(2017, 6, 30) + + +class DummyConnection(object): + """Used to detect a failed ConnectionCls import.""" + pass + + +class HTTPConnection(_HTTPConnection, object): + """ + Based on httplib.HTTPConnection but provides an extra constructor + backwards-compatibility layer between older and newer Pythons. + + Additional keyword parameters are used to configure attributes of the connection. + Accepted parameters include: + + - ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool` + - ``source_address``: Set the source address for the current connection. + - ``socket_options``: Set specific options on the underlying socket. If not specified, then + defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling + Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy. + + For example, if you wish to enable TCP Keep Alive in addition to the defaults, + you might pass:: + + HTTPConnection.default_socket_options + [ + (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1), + ] + + Or you may want to disable the defaults by passing an empty list (e.g., ``[]``). + """ + + default_port = port_by_scheme['http'] + + #: Disable Nagle's algorithm by default. + #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]`` + default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)] + + #: Whether this connection verifies the host's certificate. + is_verified = False + + def __init__(self, *args, **kw): + if six.PY3: # Python 3 + kw.pop('strict', None) + + # Pre-set source_address. + self.source_address = kw.get('source_address') + + #: The socket options provided by the user. If no options are + #: provided, we use the default options. + self.socket_options = kw.pop('socket_options', self.default_socket_options) + + _HTTPConnection.__init__(self, *args, **kw) + + @property + def host(self): + """ + Getter method to remove any trailing dots that indicate the hostname is an FQDN. + + In general, SSL certificates don't include the trailing dot indicating a + fully-qualified domain name, and thus, they don't validate properly when + checked against a domain name that includes the dot. In addition, some + servers may not expect to receive the trailing dot when provided. + + However, the hostname with trailing dot is critical to DNS resolution; doing a + lookup with the trailing dot will properly only resolve the appropriate FQDN, + whereas a lookup without a trailing dot will search the system's search domain + list. Thus, it's important to keep the original host around for use only in + those cases where it's appropriate (i.e., when doing DNS lookup to establish the + actual TCP connection across which we're going to send HTTP requests). + """ + return self._dns_host.rstrip('.') + + @host.setter + def host(self, value): + """ + Setter for the `host` property. + + We assume that only urllib3 uses the _dns_host attribute; httplib itself + only uses `host`, and it seems reasonable that other libraries follow suit. + """ + self._dns_host = value + + def _new_conn(self): + """ Establish a socket connection and set nodelay settings on it. + + :return: New socket connection. + """ + extra_kw = {} + if self.source_address: + extra_kw['source_address'] = self.source_address + + if self.socket_options: + extra_kw['socket_options'] = self.socket_options + + try: + conn = connection.create_connection( + (self._dns_host, self.port), self.timeout, **extra_kw) + + except SocketTimeout as e: + raise ConnectTimeoutError( + self, "Connection to %s timed out. (connect timeout=%s)" % + (self.host, self.timeout)) + + except SocketError as e: + raise NewConnectionError( + self, "Failed to establish a new connection: %s" % e) + + return conn + + def _prepare_conn(self, conn): + self.sock = conn + if self._tunnel_host: + # TODO: Fix tunnel so it doesn't depend on self.sock state. + self._tunnel() + # Mark this connection as not reusable + self.auto_open = 0 + + def connect(self): + conn = self._new_conn() + self._prepare_conn(conn) + + def request_chunked(self, method, url, body=None, headers=None): + """ + Alternative to the common request method, which sends the + body with chunked encoding and not as one block + """ + headers = HTTPHeaderDict(headers if headers is not None else {}) + skip_accept_encoding = 'accept-encoding' in headers + skip_host = 'host' in headers + self.putrequest( + method, + url, + skip_accept_encoding=skip_accept_encoding, + skip_host=skip_host + ) + for header, value in headers.items(): + self.putheader(header, value) + if 'transfer-encoding' not in headers: + self.putheader('Transfer-Encoding', 'chunked') + self.endheaders() + + if body is not None: + stringish_types = six.string_types + (bytes,) + if isinstance(body, stringish_types): + body = (body,) + for chunk in body: + if not chunk: + continue + if not isinstance(chunk, bytes): + chunk = chunk.encode('utf8') + len_str = hex(len(chunk))[2:] + self.send(len_str.encode('utf-8')) + self.send(b'\r\n') + self.send(chunk) + self.send(b'\r\n') + + # After the if clause, to always have a closed body + self.send(b'0\r\n\r\n') + + +class HTTPSConnection(HTTPConnection): + default_port = port_by_scheme['https'] + + ssl_version = None + + def __init__(self, host, port=None, key_file=None, cert_file=None, + strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, + ssl_context=None, server_hostname=None, **kw): + + HTTPConnection.__init__(self, host, port, strict=strict, + timeout=timeout, **kw) + + self.key_file = key_file + self.cert_file = cert_file + self.ssl_context = ssl_context + self.server_hostname = server_hostname + + # Required property for Google AppEngine 1.9.0 which otherwise causes + # HTTPS requests to go out as HTTP. (See Issue #356) + self._protocol = 'https' + + def connect(self): + conn = self._new_conn() + self._prepare_conn(conn) + + if self.ssl_context is None: + self.ssl_context = create_urllib3_context( + ssl_version=resolve_ssl_version(None), + cert_reqs=resolve_cert_reqs(None), + ) + + self.sock = ssl_wrap_socket( + sock=conn, + keyfile=self.key_file, + certfile=self.cert_file, + ssl_context=self.ssl_context, + server_hostname=self.server_hostname + ) + + +class VerifiedHTTPSConnection(HTTPSConnection): + """ + Based on httplib.HTTPSConnection but wraps the socket with + SSL certification. + """ + cert_reqs = None + ca_certs = None + ca_cert_dir = None + ssl_version = None + assert_fingerprint = None + + def set_cert(self, key_file=None, cert_file=None, + cert_reqs=None, ca_certs=None, + assert_hostname=None, assert_fingerprint=None, + ca_cert_dir=None): + """ + This method should only be called once, before the connection is used. + """ + # If cert_reqs is not provided, we can try to guess. If the user gave + # us a cert database, we assume they want to use it: otherwise, if + # they gave us an SSL Context object we should use whatever is set for + # it. + if cert_reqs is None: + if ca_certs or ca_cert_dir: + cert_reqs = 'CERT_REQUIRED' + elif self.ssl_context is not None: + cert_reqs = self.ssl_context.verify_mode + + self.key_file = key_file + self.cert_file = cert_file + self.cert_reqs = cert_reqs + self.assert_hostname = assert_hostname + self.assert_fingerprint = assert_fingerprint + self.ca_certs = ca_certs and os.path.expanduser(ca_certs) + self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir) + + def connect(self): + # Add certificate verification + conn = self._new_conn() + hostname = self.host + + if self._tunnel_host: + self.sock = conn + # Calls self._set_hostport(), so self.host is + # self._tunnel_host below. + self._tunnel() + # Mark this connection as not reusable + self.auto_open = 0 + + # Override the host with the one we're requesting data from. + hostname = self._tunnel_host + + server_hostname = hostname + if self.server_hostname is not None: + server_hostname = self.server_hostname + + is_time_off = datetime.date.today() < RECENT_DATE + if is_time_off: + warnings.warn(( + 'System time is way off (before {0}). This will probably ' + 'lead to SSL verification errors').format(RECENT_DATE), + SystemTimeWarning + ) + + # Wrap socket using verification with the root certs in + # trusted_root_certs + if self.ssl_context is None: + self.ssl_context = create_urllib3_context( + ssl_version=resolve_ssl_version(self.ssl_version), + cert_reqs=resolve_cert_reqs(self.cert_reqs), + ) + + context = self.ssl_context + context.verify_mode = resolve_cert_reqs(self.cert_reqs) + self.sock = ssl_wrap_socket( + sock=conn, + keyfile=self.key_file, + certfile=self.cert_file, + ca_certs=self.ca_certs, + ca_cert_dir=self.ca_cert_dir, + server_hostname=server_hostname, + ssl_context=context) + + if self.assert_fingerprint: + assert_fingerprint(self.sock.getpeercert(binary_form=True), + self.assert_fingerprint) + elif context.verify_mode != ssl.CERT_NONE \ + and not getattr(context, 'check_hostname', False) \ + and self.assert_hostname is not False: + # While urllib3 attempts to always turn off hostname matching from + # the TLS library, this cannot always be done. So we check whether + # the TLS Library still thinks it's matching hostnames. + cert = self.sock.getpeercert() + if not cert.get('subjectAltName', ()): + warnings.warn(( + 'Certificate for {0} has no `subjectAltName`, falling back to check for a ' + '`commonName` for now. This feature is being removed by major browsers and ' + 'deprecated by RFC 2818. (See https://github.com/shazow/urllib3/issues/497 ' + 'for details.)'.format(hostname)), + SubjectAltNameWarning + ) + _match_hostname(cert, self.assert_hostname or server_hostname) + + self.is_verified = ( + context.verify_mode == ssl.CERT_REQUIRED or + self.assert_fingerprint is not None + ) + + +def _match_hostname(cert, asserted_hostname): + try: + match_hostname(cert, asserted_hostname) + except CertificateError as e: + log.error( + 'Certificate did not match expected hostname: %s. ' + 'Certificate: %s', asserted_hostname, cert + ) + # Add cert to exception and reraise so client code can inspect + # the cert when catching the exception, if they want to + e._peer_cert = cert + raise + + +if ssl: + # Make a copy for testing. + UnverifiedHTTPSConnection = HTTPSConnection + HTTPSConnection = VerifiedHTTPSConnection +else: + HTTPSConnection = DummyConnection diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/connection.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/connection.pyc new file mode 100644 index 0000000..0faf4b8 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/connection.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/connectionpool.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/connectionpool.py new file mode 100644 index 0000000..f7a8f19 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/connectionpool.py @@ -0,0 +1,896 @@ +from __future__ import absolute_import +import errno +import logging +import sys +import warnings + +from socket import error as SocketError, timeout as SocketTimeout +import socket + + +from .exceptions import ( + ClosedPoolError, + ProtocolError, + EmptyPoolError, + HeaderParsingError, + HostChangedError, + LocationValueError, + MaxRetryError, + ProxyError, + ReadTimeoutError, + SSLError, + TimeoutError, + InsecureRequestWarning, + NewConnectionError, +) +from .packages.ssl_match_hostname import CertificateError +from .packages import six +from .packages.six.moves import queue +from .connection import ( + port_by_scheme, + DummyConnection, + HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection, + HTTPException, BaseSSLError, +) +from .request import RequestMethods +from .response import HTTPResponse + +from .util.connection import is_connection_dropped +from .util.request import set_file_position +from .util.response import assert_header_parsing +from .util.retry import Retry +from .util.timeout import Timeout +from .util.url import get_host, Url, NORMALIZABLE_SCHEMES +from .util.queue import LifoQueue + + +xrange = six.moves.xrange + +log = logging.getLogger(__name__) + +_Default = object() + + +# Pool objects +class ConnectionPool(object): + """ + Base class for all connection pools, such as + :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`. + """ + + scheme = None + QueueCls = LifoQueue + + def __init__(self, host, port=None): + if not host: + raise LocationValueError("No host specified.") + + self.host = _ipv6_host(host, self.scheme) + self._proxy_host = host.lower() + self.port = port + + def __str__(self): + return '%s(host=%r, port=%r)' % (type(self).__name__, + self.host, self.port) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.close() + # Return False to re-raise any potential exceptions + return False + + def close(self): + """ + Close all pooled connections and disable the pool. + """ + pass + + +# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252 +_blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK} + + +class HTTPConnectionPool(ConnectionPool, RequestMethods): + """ + Thread-safe connection pool for one host. + + :param host: + Host used for this HTTP Connection (e.g. "localhost"), passed into + :class:`httplib.HTTPConnection`. + + :param port: + Port used for this HTTP Connection (None is equivalent to 80), passed + into :class:`httplib.HTTPConnection`. + + :param strict: + Causes BadStatusLine to be raised if the status line can't be parsed + as a valid HTTP/1.0 or 1.1 status line, passed into + :class:`httplib.HTTPConnection`. + + .. note:: + Only works in Python 2. This parameter is ignored in Python 3. + + :param timeout: + Socket timeout in seconds for each individual connection. This can + be a float or integer, which sets the timeout for the HTTP request, + or an instance of :class:`urllib3.util.Timeout` which gives you more + fine-grained control over request timeouts. After the constructor has + been parsed, this is always a `urllib3.util.Timeout` object. + + :param maxsize: + Number of connections to save that can be reused. More than 1 is useful + in multithreaded situations. If ``block`` is set to False, more + connections will be created but they will not be saved once they've + been used. + + :param block: + If set to True, no more than ``maxsize`` connections will be used at + a time. When no free connections are available, the call will block + until a connection has been released. This is a useful side effect for + particular multithreaded situations where one does not want to use more + than maxsize connections per host to prevent flooding. + + :param headers: + Headers to include with all requests, unless other headers are given + explicitly. + + :param retries: + Retry configuration to use by default with requests in this pool. + + :param _proxy: + Parsed proxy URL, should not be used directly, instead, see + :class:`urllib3.connectionpool.ProxyManager`" + + :param _proxy_headers: + A dictionary with proxy headers, should not be used directly, + instead, see :class:`urllib3.connectionpool.ProxyManager`" + + :param \\**conn_kw: + Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`, + :class:`urllib3.connection.HTTPSConnection` instances. + """ + + scheme = 'http' + ConnectionCls = HTTPConnection + ResponseCls = HTTPResponse + + def __init__(self, host, port=None, strict=False, + timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False, + headers=None, retries=None, + _proxy=None, _proxy_headers=None, + **conn_kw): + ConnectionPool.__init__(self, host, port) + RequestMethods.__init__(self, headers) + + self.strict = strict + + if not isinstance(timeout, Timeout): + timeout = Timeout.from_float(timeout) + + if retries is None: + retries = Retry.DEFAULT + + self.timeout = timeout + self.retries = retries + + self.pool = self.QueueCls(maxsize) + self.block = block + + self.proxy = _proxy + self.proxy_headers = _proxy_headers or {} + + # Fill the queue up so that doing get() on it will block properly + for _ in xrange(maxsize): + self.pool.put(None) + + # These are mostly for testing and debugging purposes. + self.num_connections = 0 + self.num_requests = 0 + self.conn_kw = conn_kw + + if self.proxy: + # Enable Nagle's algorithm for proxies, to avoid packet fragmentation. + # We cannot know if the user has added default socket options, so we cannot replace the + # list. + self.conn_kw.setdefault('socket_options', []) + + def _new_conn(self): + """ + Return a fresh :class:`HTTPConnection`. + """ + self.num_connections += 1 + log.debug("Starting new HTTP connection (%d): %s:%s", + self.num_connections, self.host, self.port or "80") + + conn = self.ConnectionCls(host=self.host, port=self.port, + timeout=self.timeout.connect_timeout, + strict=self.strict, **self.conn_kw) + return conn + + def _get_conn(self, timeout=None): + """ + Get a connection. Will return a pooled connection if one is available. + + If no connections are available and :prop:`.block` is ``False``, then a + fresh connection is returned. + + :param timeout: + Seconds to wait before giving up and raising + :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and + :prop:`.block` is ``True``. + """ + conn = None + try: + conn = self.pool.get(block=self.block, timeout=timeout) + + except AttributeError: # self.pool is None + raise ClosedPoolError(self, "Pool is closed.") + + except queue.Empty: + if self.block: + raise EmptyPoolError(self, + "Pool reached maximum size and no more " + "connections are allowed.") + pass # Oh well, we'll create a new connection then + + # If this is a persistent connection, check if it got disconnected + if conn and is_connection_dropped(conn): + log.debug("Resetting dropped connection: %s", self.host) + conn.close() + if getattr(conn, 'auto_open', 1) == 0: + # This is a proxied connection that has been mutated by + # httplib._tunnel() and cannot be reused (since it would + # attempt to bypass the proxy) + conn = None + + return conn or self._new_conn() + + def _put_conn(self, conn): + """ + Put a connection back into the pool. + + :param conn: + Connection object for the current host and port as returned by + :meth:`._new_conn` or :meth:`._get_conn`. + + If the pool is already full, the connection is closed and discarded + because we exceeded maxsize. If connections are discarded frequently, + then maxsize should be increased. + + If the pool is closed, then the connection will be closed and discarded. + """ + try: + self.pool.put(conn, block=False) + return # Everything is dandy, done. + except AttributeError: + # self.pool is None. + pass + except queue.Full: + # This should never happen if self.block == True + log.warning( + "Connection pool is full, discarding connection: %s", + self.host) + + # Connection never got put back into the pool, close it. + if conn: + conn.close() + + def _validate_conn(self, conn): + """ + Called right before a request is made, after the socket is created. + """ + pass + + def _prepare_proxy(self, conn): + # Nothing to do for HTTP connections. + pass + + def _get_timeout(self, timeout): + """ Helper that always returns a :class:`urllib3.util.Timeout` """ + if timeout is _Default: + return self.timeout.clone() + + if isinstance(timeout, Timeout): + return timeout.clone() + else: + # User passed us an int/float. This is for backwards compatibility, + # can be removed later + return Timeout.from_float(timeout) + + def _raise_timeout(self, err, url, timeout_value): + """Is the error actually a timeout? Will raise a ReadTimeout or pass""" + + if isinstance(err, SocketTimeout): + raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value) + + # See the above comment about EAGAIN in Python 3. In Python 2 we have + # to specifically catch it and throw the timeout error + if hasattr(err, 'errno') and err.errno in _blocking_errnos: + raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value) + + # Catch possible read timeouts thrown as SSL errors. If not the + # case, rethrow the original. We need to do this because of: + # http://bugs.python.org/issue10272 + if 'timed out' in str(err) or 'did not complete (read)' in str(err): # Python < 2.7.4 + raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value) + + def _make_request(self, conn, method, url, timeout=_Default, chunked=False, + **httplib_request_kw): + """ + Perform a request on a given urllib connection object taken from our + pool. + + :param conn: + a connection from one of our connection pools + + :param timeout: + Socket timeout in seconds for the request. This can be a + float or integer, which will set the same timeout value for + the socket connect and the socket read, or an instance of + :class:`urllib3.util.Timeout`, which gives you more fine-grained + control over your timeouts. + """ + self.num_requests += 1 + + timeout_obj = self._get_timeout(timeout) + timeout_obj.start_connect() + conn.timeout = timeout_obj.connect_timeout + + # Trigger any extra validation we need to do. + try: + self._validate_conn(conn) + except (SocketTimeout, BaseSSLError) as e: + # Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout. + self._raise_timeout(err=e, url=url, timeout_value=conn.timeout) + raise + + # conn.request() calls httplib.*.request, not the method in + # urllib3.request. It also calls makefile (recv) on the socket. + if chunked: + conn.request_chunked(method, url, **httplib_request_kw) + else: + conn.request(method, url, **httplib_request_kw) + + # Reset the timeout for the recv() on the socket + read_timeout = timeout_obj.read_timeout + + # App Engine doesn't have a sock attr + if getattr(conn, 'sock', None): + # In Python 3 socket.py will catch EAGAIN and return None when you + # try and read into the file pointer created by http.client, which + # instead raises a BadStatusLine exception. Instead of catching + # the exception and assuming all BadStatusLine exceptions are read + # timeouts, check for a zero timeout before making the request. + if read_timeout == 0: + raise ReadTimeoutError( + self, url, "Read timed out. (read timeout=%s)" % read_timeout) + if read_timeout is Timeout.DEFAULT_TIMEOUT: + conn.sock.settimeout(socket.getdefaulttimeout()) + else: # None or a value + conn.sock.settimeout(read_timeout) + + # Receive the response from the server + try: + try: # Python 2.7, use buffering of HTTP responses + httplib_response = conn.getresponse(buffering=True) + except TypeError: # Python 3 + try: + httplib_response = conn.getresponse() + except Exception as e: + # Remove the TypeError from the exception chain in Python 3; + # otherwise it looks like a programming error was the cause. + six.raise_from(e, None) + except (SocketTimeout, BaseSSLError, SocketError) as e: + self._raise_timeout(err=e, url=url, timeout_value=read_timeout) + raise + + # AppEngine doesn't have a version attr. + http_version = getattr(conn, '_http_vsn_str', 'HTTP/?') + log.debug("%s://%s:%s \"%s %s %s\" %s %s", self.scheme, self.host, self.port, + method, url, http_version, httplib_response.status, + httplib_response.length) + + try: + assert_header_parsing(httplib_response.msg) + except (HeaderParsingError, TypeError) as hpe: # Platform-specific: Python 3 + log.warning( + 'Failed to parse headers (url=%s): %s', + self._absolute_url(url), hpe, exc_info=True) + + return httplib_response + + def _absolute_url(self, path): + return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url + + def close(self): + """ + Close all pooled connections and disable the pool. + """ + if self.pool is None: + return + # Disable access to the pool + old_pool, self.pool = self.pool, None + + try: + while True: + conn = old_pool.get(block=False) + if conn: + conn.close() + + except queue.Empty: + pass # Done. + + def is_same_host(self, url): + """ + Check if the given ``url`` is a member of the same host as this + connection pool. + """ + if url.startswith('/'): + return True + + # TODO: Add optional support for socket.gethostbyname checking. + scheme, host, port = get_host(url) + + host = _ipv6_host(host, self.scheme) + + # Use explicit default port for comparison when none is given + if self.port and not port: + port = port_by_scheme.get(scheme) + elif not self.port and port == port_by_scheme.get(scheme): + port = None + + return (scheme, host, port) == (self.scheme, self.host, self.port) + + def urlopen(self, method, url, body=None, headers=None, retries=None, + redirect=True, assert_same_host=True, timeout=_Default, + pool_timeout=None, release_conn=None, chunked=False, + body_pos=None, **response_kw): + """ + Get a connection from the pool and perform an HTTP request. This is the + lowest level call for making a request, so you'll need to specify all + the raw details. + + .. note:: + + More commonly, it's appropriate to use a convenience method provided + by :class:`.RequestMethods`, such as :meth:`request`. + + .. note:: + + `release_conn` will only behave as expected if + `preload_content=False` because we want to make + `preload_content=False` the default behaviour someday soon without + breaking backwards compatibility. + + :param method: + HTTP request method (such as GET, POST, PUT, etc.) + + :param body: + Data to send in the request body (useful for creating + POST requests, see HTTPConnectionPool.post_url for + more convenience). + + :param headers: + Dictionary of custom headers to send, such as User-Agent, + If-None-Match, etc. If None, pool headers are used. If provided, + these headers completely replace any pool-specific headers. + + :param retries: + Configure the number of retries to allow before raising a + :class:`~urllib3.exceptions.MaxRetryError` exception. + + Pass ``None`` to retry until you receive a response. Pass a + :class:`~urllib3.util.retry.Retry` object for fine-grained control + over different types of retries. + Pass an integer number to retry connection errors that many times, + but no other types of errors. Pass zero to never retry. + + If ``False``, then retries are disabled and any exception is raised + immediately. Also, instead of raising a MaxRetryError on redirects, + the redirect response will be returned. + + :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int. + + :param redirect: + If True, automatically handle redirects (status codes 301, 302, + 303, 307, 308). Each redirect counts as a retry. Disabling retries + will disable redirect, too. + + :param assert_same_host: + If ``True``, will make sure that the host of the pool requests is + consistent else will raise HostChangedError. When False, you can + use the pool on an HTTP proxy and request foreign hosts. + + :param timeout: + If specified, overrides the default timeout for this one + request. It may be a float (in seconds) or an instance of + :class:`urllib3.util.Timeout`. + + :param pool_timeout: + If set and the pool is set to block=True, then this method will + block for ``pool_timeout`` seconds and raise EmptyPoolError if no + connection is available within the time period. + + :param release_conn: + If False, then the urlopen call will not release the connection + back into the pool once a response is received (but will release if + you read the entire contents of the response such as when + `preload_content=True`). This is useful if you're not preloading + the response's content immediately. You will need to call + ``r.release_conn()`` on the response ``r`` to return the connection + back into the pool. If None, it takes the value of + ``response_kw.get('preload_content', True)``. + + :param chunked: + If True, urllib3 will send the body using chunked transfer + encoding. Otherwise, urllib3 will send the body using the standard + content-length form. Defaults to False. + + :param int body_pos: + Position to seek to in file-like body in the event of a retry or + redirect. Typically this won't need to be set because urllib3 will + auto-populate the value when needed. + + :param \\**response_kw: + Additional parameters are passed to + :meth:`urllib3.response.HTTPResponse.from_httplib` + """ + if headers is None: + headers = self.headers + + if not isinstance(retries, Retry): + retries = Retry.from_int(retries, redirect=redirect, default=self.retries) + + if release_conn is None: + release_conn = response_kw.get('preload_content', True) + + # Check host + if assert_same_host and not self.is_same_host(url): + raise HostChangedError(self, url, retries) + + conn = None + + # Track whether `conn` needs to be released before + # returning/raising/recursing. Update this variable if necessary, and + # leave `release_conn` constant throughout the function. That way, if + # the function recurses, the original value of `release_conn` will be + # passed down into the recursive call, and its value will be respected. + # + # See issue #651 [1] for details. + # + # [1] <https://github.com/shazow/urllib3/issues/651> + release_this_conn = release_conn + + # Merge the proxy headers. Only do this in HTTP. We have to copy the + # headers dict so we can safely change it without those changes being + # reflected in anyone else's copy. + if self.scheme == 'http': + headers = headers.copy() + headers.update(self.proxy_headers) + + # Must keep the exception bound to a separate variable or else Python 3 + # complains about UnboundLocalError. + err = None + + # Keep track of whether we cleanly exited the except block. This + # ensures we do proper cleanup in finally. + clean_exit = False + + # Rewind body position, if needed. Record current position + # for future rewinds in the event of a redirect/retry. + body_pos = set_file_position(body, body_pos) + + try: + # Request a connection from the queue. + timeout_obj = self._get_timeout(timeout) + conn = self._get_conn(timeout=pool_timeout) + + conn.timeout = timeout_obj.connect_timeout + + is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None) + if is_new_proxy_conn: + self._prepare_proxy(conn) + + # Make the request on the httplib connection object. + httplib_response = self._make_request(conn, method, url, + timeout=timeout_obj, + body=body, headers=headers, + chunked=chunked) + + # If we're going to release the connection in ``finally:``, then + # the response doesn't need to know about the connection. Otherwise + # it will also try to release it and we'll have a double-release + # mess. + response_conn = conn if not release_conn else None + + # Pass method to Response for length checking + response_kw['request_method'] = method + + # Import httplib's response into our own wrapper object + response = self.ResponseCls.from_httplib(httplib_response, + pool=self, + connection=response_conn, + retries=retries, + **response_kw) + + # Everything went great! + clean_exit = True + + except queue.Empty: + # Timed out by queue. + raise EmptyPoolError(self, "No pool connections are available.") + + except (TimeoutError, HTTPException, SocketError, ProtocolError, + BaseSSLError, SSLError, CertificateError) as e: + # Discard the connection for these exceptions. It will be + # replaced during the next _get_conn() call. + clean_exit = False + if isinstance(e, (BaseSSLError, CertificateError)): + e = SSLError(e) + elif isinstance(e, (SocketError, NewConnectionError)) and self.proxy: + e = ProxyError('Cannot connect to proxy.', e) + elif isinstance(e, (SocketError, HTTPException)): + e = ProtocolError('Connection aborted.', e) + + retries = retries.increment(method, url, error=e, _pool=self, + _stacktrace=sys.exc_info()[2]) + retries.sleep() + + # Keep track of the error for the retry warning. + err = e + + finally: + if not clean_exit: + # We hit some kind of exception, handled or otherwise. We need + # to throw the connection away unless explicitly told not to. + # Close the connection, set the variable to None, and make sure + # we put the None back in the pool to avoid leaking it. + conn = conn and conn.close() + release_this_conn = True + + if release_this_conn: + # Put the connection back to be reused. If the connection is + # expired then it will be None, which will get replaced with a + # fresh connection during _get_conn. + self._put_conn(conn) + + if not conn: + # Try again + log.warning("Retrying (%r) after connection " + "broken by '%r': %s", retries, err, url) + return self.urlopen(method, url, body, headers, retries, + redirect, assert_same_host, + timeout=timeout, pool_timeout=pool_timeout, + release_conn=release_conn, body_pos=body_pos, + **response_kw) + + def drain_and_release_conn(response): + try: + # discard any remaining response body, the connection will be + # released back to the pool once the entire response is read + response.read() + except (TimeoutError, HTTPException, SocketError, ProtocolError, + BaseSSLError, SSLError) as e: + pass + + # Handle redirect? + redirect_location = redirect and response.get_redirect_location() + if redirect_location: + if response.status == 303: + method = 'GET' + + try: + retries = retries.increment(method, url, response=response, _pool=self) + except MaxRetryError: + if retries.raise_on_redirect: + # Drain and release the connection for this response, since + # we're not returning it to be released manually. + drain_and_release_conn(response) + raise + return response + + # drain and return the connection to the pool before recursing + drain_and_release_conn(response) + + retries.sleep_for_retry(response) + log.debug("Redirecting %s -> %s", url, redirect_location) + return self.urlopen( + method, redirect_location, body, headers, + retries=retries, redirect=redirect, + assert_same_host=assert_same_host, + timeout=timeout, pool_timeout=pool_timeout, + release_conn=release_conn, body_pos=body_pos, + **response_kw) + + # Check if we should retry the HTTP response. + has_retry_after = bool(response.getheader('Retry-After')) + if retries.is_retry(method, response.status, has_retry_after): + try: + retries = retries.increment(method, url, response=response, _pool=self) + except MaxRetryError: + if retries.raise_on_status: + # Drain and release the connection for this response, since + # we're not returning it to be released manually. + drain_and_release_conn(response) + raise + return response + + # drain and return the connection to the pool before recursing + drain_and_release_conn(response) + + retries.sleep(response) + log.debug("Retry: %s", url) + return self.urlopen( + method, url, body, headers, + retries=retries, redirect=redirect, + assert_same_host=assert_same_host, + timeout=timeout, pool_timeout=pool_timeout, + release_conn=release_conn, + body_pos=body_pos, **response_kw) + + return response + + +class HTTPSConnectionPool(HTTPConnectionPool): + """ + Same as :class:`.HTTPConnectionPool`, but HTTPS. + + When Python is compiled with the :mod:`ssl` module, then + :class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates, + instead of :class:`.HTTPSConnection`. + + :class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``, + ``assert_hostname`` and ``host`` in this order to verify connections. + If ``assert_hostname`` is False, no verification is done. + + The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``, + ``ca_cert_dir``, and ``ssl_version`` are only used if :mod:`ssl` is + available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade + the connection socket into an SSL socket. + """ + + scheme = 'https' + ConnectionCls = HTTPSConnection + + def __init__(self, host, port=None, + strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, + block=False, headers=None, retries=None, + _proxy=None, _proxy_headers=None, + key_file=None, cert_file=None, cert_reqs=None, + ca_certs=None, ssl_version=None, + assert_hostname=None, assert_fingerprint=None, + ca_cert_dir=None, **conn_kw): + + HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize, + block, headers, retries, _proxy, _proxy_headers, + **conn_kw) + + if ca_certs and cert_reqs is None: + cert_reqs = 'CERT_REQUIRED' + + self.key_file = key_file + self.cert_file = cert_file + self.cert_reqs = cert_reqs + self.ca_certs = ca_certs + self.ca_cert_dir = ca_cert_dir + self.ssl_version = ssl_version + self.assert_hostname = assert_hostname + self.assert_fingerprint = assert_fingerprint + + def _prepare_conn(self, conn): + """ + Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket` + and establish the tunnel if proxy is used. + """ + + if isinstance(conn, VerifiedHTTPSConnection): + conn.set_cert(key_file=self.key_file, + cert_file=self.cert_file, + cert_reqs=self.cert_reqs, + ca_certs=self.ca_certs, + ca_cert_dir=self.ca_cert_dir, + assert_hostname=self.assert_hostname, + assert_fingerprint=self.assert_fingerprint) + conn.ssl_version = self.ssl_version + return conn + + def _prepare_proxy(self, conn): + """ + Establish tunnel connection early, because otherwise httplib + would improperly set Host: header to proxy's IP:port. + """ + conn.set_tunnel(self._proxy_host, self.port, self.proxy_headers) + conn.connect() + + def _new_conn(self): + """ + Return a fresh :class:`httplib.HTTPSConnection`. + """ + self.num_connections += 1 + log.debug("Starting new HTTPS connection (%d): %s:%s", + self.num_connections, self.host, self.port or "443") + + if not self.ConnectionCls or self.ConnectionCls is DummyConnection: + raise SSLError("Can't connect to HTTPS URL because the SSL " + "module is not available.") + + actual_host = self.host + actual_port = self.port + if self.proxy is not None: + actual_host = self.proxy.host + actual_port = self.proxy.port + + conn = self.ConnectionCls(host=actual_host, port=actual_port, + timeout=self.timeout.connect_timeout, + strict=self.strict, **self.conn_kw) + + return self._prepare_conn(conn) + + def _validate_conn(self, conn): + """ + Called right before a request is made, after the socket is created. + """ + super(HTTPSConnectionPool, self)._validate_conn(conn) + + # Force connect early to allow us to validate the connection. + if not getattr(conn, 'sock', None): # AppEngine might not have `.sock` + conn.connect() + + if not conn.is_verified: + warnings.warn(( + 'Unverified HTTPS request is being made. ' + 'Adding certificate verification is strongly advised. See: ' + 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html' + '#ssl-warnings'), + InsecureRequestWarning) + + +def connection_from_url(url, **kw): + """ + Given a url, return an :class:`.ConnectionPool` instance of its host. + + This is a shortcut for not having to parse out the scheme, host, and port + of the url before creating an :class:`.ConnectionPool` instance. + + :param url: + Absolute URL string that must include the scheme. Port is optional. + + :param \\**kw: + Passes additional parameters to the constructor of the appropriate + :class:`.ConnectionPool`. Useful for specifying things like + timeout, maxsize, headers, etc. + + Example:: + + >>> conn = connection_from_url('http://google.com/') + >>> r = conn.request('GET', '/') + """ + scheme, host, port = get_host(url) + port = port or port_by_scheme.get(scheme, 80) + if scheme == 'https': + return HTTPSConnectionPool(host, port=port, **kw) + else: + return HTTPConnectionPool(host, port=port, **kw) + + +def _ipv6_host(host, scheme): + """ + Process IPv6 address literals + """ + + # httplib doesn't like it when we include brackets in IPv6 addresses + # Specifically, if we include brackets but also pass the port then + # httplib crazily doubles up the square brackets on the Host header. + # Instead, we need to make sure we never pass ``None`` as the port. + # However, for backward compatibility reasons we can't actually + # *assert* that. See http://bugs.python.org/issue28539 + # + # Also if an IPv6 address literal has a zone identifier, the + # percent sign might be URIencoded, convert it back into ASCII + if host.startswith('[') and host.endswith(']'): + host = host.replace('%25', '%').strip('[]') + if scheme in NORMALIZABLE_SCHEMES: + host = host.lower() + return host diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/connectionpool.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/connectionpool.pyc new file mode 100644 index 0000000..1377d43 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/connectionpool.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/__init__.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/__init__.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/__init__.pyc new file mode 100644 index 0000000..89eaf47 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/_appengine_environ.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/_appengine_environ.py new file mode 100644 index 0000000..f3e0094 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/_appengine_environ.py @@ -0,0 +1,30 @@ +""" +This module provides means to detect the App Engine environment. +""" + +import os + + +def is_appengine(): + return (is_local_appengine() or + is_prod_appengine() or + is_prod_appengine_mvms()) + + +def is_appengine_sandbox(): + return is_appengine() and not is_prod_appengine_mvms() + + +def is_local_appengine(): + return ('APPENGINE_RUNTIME' in os.environ and + 'Development/' in os.environ['SERVER_SOFTWARE']) + + +def is_prod_appengine(): + return ('APPENGINE_RUNTIME' in os.environ and + 'Google App Engine/' in os.environ['SERVER_SOFTWARE'] and + not is_prod_appengine_mvms()) + + +def is_prod_appengine_mvms(): + return os.environ.get('GAE_VM', False) == 'true' diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/_appengine_environ.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/_appengine_environ.pyc new file mode 100644 index 0000000..afa1640 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/_appengine_environ.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__init__.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__init__.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__init__.pyc new file mode 100644 index 0000000..edb477e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/_securetransport/bindings.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/_securetransport/bindings.py new file mode 100644 index 0000000..bcf41c0 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/_securetransport/bindings.py @@ -0,0 +1,593 @@ +""" +This module uses ctypes to bind a whole bunch of functions and constants from +SecureTransport. The goal here is to provide the low-level API to +SecureTransport. These are essentially the C-level functions and constants, and +they're pretty gross to work with. + +This code is a bastardised version of the code found in Will Bond's oscrypto +library. An enormous debt is owed to him for blazing this trail for us. For +that reason, this code should be considered to be covered both by urllib3's +license and by oscrypto's: + + Copyright (c) 2015-2016 Will Bond <will@wbond.net> + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. +""" +from __future__ import absolute_import + +import platform +from ctypes.util import find_library +from ctypes import ( + c_void_p, c_int32, c_char_p, c_size_t, c_byte, c_uint32, c_ulong, c_long, + c_bool +) +from ctypes import CDLL, POINTER, CFUNCTYPE + + +security_path = find_library('Security') +if not security_path: + raise ImportError('The library Security could not be found') + + +core_foundation_path = find_library('CoreFoundation') +if not core_foundation_path: + raise ImportError('The library CoreFoundation could not be found') + + +version = platform.mac_ver()[0] +version_info = tuple(map(int, version.split('.'))) +if version_info < (10, 8): + raise OSError( + 'Only OS X 10.8 and newer are supported, not %s.%s' % ( + version_info[0], version_info[1] + ) + ) + +Security = CDLL(security_path, use_errno=True) +CoreFoundation = CDLL(core_foundation_path, use_errno=True) + +Boolean = c_bool +CFIndex = c_long +CFStringEncoding = c_uint32 +CFData = c_void_p +CFString = c_void_p +CFArray = c_void_p +CFMutableArray = c_void_p +CFDictionary = c_void_p +CFError = c_void_p +CFType = c_void_p +CFTypeID = c_ulong + +CFTypeRef = POINTER(CFType) +CFAllocatorRef = c_void_p + +OSStatus = c_int32 + +CFDataRef = POINTER(CFData) +CFStringRef = POINTER(CFString) +CFArrayRef = POINTER(CFArray) +CFMutableArrayRef = POINTER(CFMutableArray) +CFDictionaryRef = POINTER(CFDictionary) +CFArrayCallBacks = c_void_p +CFDictionaryKeyCallBacks = c_void_p +CFDictionaryValueCallBacks = c_void_p + +SecCertificateRef = POINTER(c_void_p) +SecExternalFormat = c_uint32 +SecExternalItemType = c_uint32 +SecIdentityRef = POINTER(c_void_p) +SecItemImportExportFlags = c_uint32 +SecItemImportExportKeyParameters = c_void_p +SecKeychainRef = POINTER(c_void_p) +SSLProtocol = c_uint32 +SSLCipherSuite = c_uint32 +SSLContextRef = POINTER(c_void_p) +SecTrustRef = POINTER(c_void_p) +SSLConnectionRef = c_uint32 +SecTrustResultType = c_uint32 +SecTrustOptionFlags = c_uint32 +SSLProtocolSide = c_uint32 +SSLConnectionType = c_uint32 +SSLSessionOption = c_uint32 + + +try: + Security.SecItemImport.argtypes = [ + CFDataRef, + CFStringRef, + POINTER(SecExternalFormat), + POINTER(SecExternalItemType), + SecItemImportExportFlags, + POINTER(SecItemImportExportKeyParameters), + SecKeychainRef, + POINTER(CFArrayRef), + ] + Security.SecItemImport.restype = OSStatus + + Security.SecCertificateGetTypeID.argtypes = [] + Security.SecCertificateGetTypeID.restype = CFTypeID + + Security.SecIdentityGetTypeID.argtypes = [] + Security.SecIdentityGetTypeID.restype = CFTypeID + + Security.SecKeyGetTypeID.argtypes = [] + Security.SecKeyGetTypeID.restype = CFTypeID + + Security.SecCertificateCreateWithData.argtypes = [ + CFAllocatorRef, + CFDataRef + ] + Security.SecCertificateCreateWithData.restype = SecCertificateRef + + Security.SecCertificateCopyData.argtypes = [ + SecCertificateRef + ] + Security.SecCertificateCopyData.restype = CFDataRef + + Security.SecCopyErrorMessageString.argtypes = [ + OSStatus, + c_void_p + ] + Security.SecCopyErrorMessageString.restype = CFStringRef + + Security.SecIdentityCreateWithCertificate.argtypes = [ + CFTypeRef, + SecCertificateRef, + POINTER(SecIdentityRef) + ] + Security.SecIdentityCreateWithCertificate.restype = OSStatus + + Security.SecKeychainCreate.argtypes = [ + c_char_p, + c_uint32, + c_void_p, + Boolean, + c_void_p, + POINTER(SecKeychainRef) + ] + Security.SecKeychainCreate.restype = OSStatus + + Security.SecKeychainDelete.argtypes = [ + SecKeychainRef + ] + Security.SecKeychainDelete.restype = OSStatus + + Security.SecPKCS12Import.argtypes = [ + CFDataRef, + CFDictionaryRef, + POINTER(CFArrayRef) + ] + Security.SecPKCS12Import.restype = OSStatus + + SSLReadFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, c_void_p, POINTER(c_size_t)) + SSLWriteFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, POINTER(c_byte), POINTER(c_size_t)) + + Security.SSLSetIOFuncs.argtypes = [ + SSLContextRef, + SSLReadFunc, + SSLWriteFunc + ] + Security.SSLSetIOFuncs.restype = OSStatus + + Security.SSLSetPeerID.argtypes = [ + SSLContextRef, + c_char_p, + c_size_t + ] + Security.SSLSetPeerID.restype = OSStatus + + Security.SSLSetCertificate.argtypes = [ + SSLContextRef, + CFArrayRef + ] + Security.SSLSetCertificate.restype = OSStatus + + Security.SSLSetCertificateAuthorities.argtypes = [ + SSLContextRef, + CFTypeRef, + Boolean + ] + Security.SSLSetCertificateAuthorities.restype = OSStatus + + Security.SSLSetConnection.argtypes = [ + SSLContextRef, + SSLConnectionRef + ] + Security.SSLSetConnection.restype = OSStatus + + Security.SSLSetPeerDomainName.argtypes = [ + SSLContextRef, + c_char_p, + c_size_t + ] + Security.SSLSetPeerDomainName.restype = OSStatus + + Security.SSLHandshake.argtypes = [ + SSLContextRef + ] + Security.SSLHandshake.restype = OSStatus + + Security.SSLRead.argtypes = [ + SSLContextRef, + c_char_p, + c_size_t, + POINTER(c_size_t) + ] + Security.SSLRead.restype = OSStatus + + Security.SSLWrite.argtypes = [ + SSLContextRef, + c_char_p, + c_size_t, + POINTER(c_size_t) + ] + Security.SSLWrite.restype = OSStatus + + Security.SSLClose.argtypes = [ + SSLContextRef + ] + Security.SSLClose.restype = OSStatus + + Security.SSLGetNumberSupportedCiphers.argtypes = [ + SSLContextRef, + POINTER(c_size_t) + ] + Security.SSLGetNumberSupportedCiphers.restype = OSStatus + + Security.SSLGetSupportedCiphers.argtypes = [ + SSLContextRef, + POINTER(SSLCipherSuite), + POINTER(c_size_t) + ] + Security.SSLGetSupportedCiphers.restype = OSStatus + + Security.SSLSetEnabledCiphers.argtypes = [ + SSLContextRef, + POINTER(SSLCipherSuite), + c_size_t + ] + Security.SSLSetEnabledCiphers.restype = OSStatus + + Security.SSLGetNumberEnabledCiphers.argtype = [ + SSLContextRef, + POINTER(c_size_t) + ] + Security.SSLGetNumberEnabledCiphers.restype = OSStatus + + Security.SSLGetEnabledCiphers.argtypes = [ + SSLContextRef, + POINTER(SSLCipherSuite), + POINTER(c_size_t) + ] + Security.SSLGetEnabledCiphers.restype = OSStatus + + Security.SSLGetNegotiatedCipher.argtypes = [ + SSLContextRef, + POINTER(SSLCipherSuite) + ] + Security.SSLGetNegotiatedCipher.restype = OSStatus + + Security.SSLGetNegotiatedProtocolVersion.argtypes = [ + SSLContextRef, + POINTER(SSLProtocol) + ] + Security.SSLGetNegotiatedProtocolVersion.restype = OSStatus + + Security.SSLCopyPeerTrust.argtypes = [ + SSLContextRef, + POINTER(SecTrustRef) + ] + Security.SSLCopyPeerTrust.restype = OSStatus + + Security.SecTrustSetAnchorCertificates.argtypes = [ + SecTrustRef, + CFArrayRef + ] + Security.SecTrustSetAnchorCertificates.restype = OSStatus + + Security.SecTrustSetAnchorCertificatesOnly.argstypes = [ + SecTrustRef, + Boolean + ] + Security.SecTrustSetAnchorCertificatesOnly.restype = OSStatus + + Security.SecTrustEvaluate.argtypes = [ + SecTrustRef, + POINTER(SecTrustResultType) + ] + Security.SecTrustEvaluate.restype = OSStatus + + Security.SecTrustGetCertificateCount.argtypes = [ + SecTrustRef + ] + Security.SecTrustGetCertificateCount.restype = CFIndex + + Security.SecTrustGetCertificateAtIndex.argtypes = [ + SecTrustRef, + CFIndex + ] + Security.SecTrustGetCertificateAtIndex.restype = SecCertificateRef + + Security.SSLCreateContext.argtypes = [ + CFAllocatorRef, + SSLProtocolSide, + SSLConnectionType + ] + Security.SSLCreateContext.restype = SSLContextRef + + Security.SSLSetSessionOption.argtypes = [ + SSLContextRef, + SSLSessionOption, + Boolean + ] + Security.SSLSetSessionOption.restype = OSStatus + + Security.SSLSetProtocolVersionMin.argtypes = [ + SSLContextRef, + SSLProtocol + ] + Security.SSLSetProtocolVersionMin.restype = OSStatus + + Security.SSLSetProtocolVersionMax.argtypes = [ + SSLContextRef, + SSLProtocol + ] + Security.SSLSetProtocolVersionMax.restype = OSStatus + + Security.SecCopyErrorMessageString.argtypes = [ + OSStatus, + c_void_p + ] + Security.SecCopyErrorMessageString.restype = CFStringRef + + Security.SSLReadFunc = SSLReadFunc + Security.SSLWriteFunc = SSLWriteFunc + Security.SSLContextRef = SSLContextRef + Security.SSLProtocol = SSLProtocol + Security.SSLCipherSuite = SSLCipherSuite + Security.SecIdentityRef = SecIdentityRef + Security.SecKeychainRef = SecKeychainRef + Security.SecTrustRef = SecTrustRef + Security.SecTrustResultType = SecTrustResultType + Security.SecExternalFormat = SecExternalFormat + Security.OSStatus = OSStatus + + Security.kSecImportExportPassphrase = CFStringRef.in_dll( + Security, 'kSecImportExportPassphrase' + ) + Security.kSecImportItemIdentity = CFStringRef.in_dll( + Security, 'kSecImportItemIdentity' + ) + + # CoreFoundation time! + CoreFoundation.CFRetain.argtypes = [ + CFTypeRef + ] + CoreFoundation.CFRetain.restype = CFTypeRef + + CoreFoundation.CFRelease.argtypes = [ + CFTypeRef + ] + CoreFoundation.CFRelease.restype = None + + CoreFoundation.CFGetTypeID.argtypes = [ + CFTypeRef + ] + CoreFoundation.CFGetTypeID.restype = CFTypeID + + CoreFoundation.CFStringCreateWithCString.argtypes = [ + CFAllocatorRef, + c_char_p, + CFStringEncoding + ] + CoreFoundation.CFStringCreateWithCString.restype = CFStringRef + + CoreFoundation.CFStringGetCStringPtr.argtypes = [ + CFStringRef, + CFStringEncoding + ] + CoreFoundation.CFStringGetCStringPtr.restype = c_char_p + + CoreFoundation.CFStringGetCString.argtypes = [ + CFStringRef, + c_char_p, + CFIndex, + CFStringEncoding + ] + CoreFoundation.CFStringGetCString.restype = c_bool + + CoreFoundation.CFDataCreate.argtypes = [ + CFAllocatorRef, + c_char_p, + CFIndex + ] + CoreFoundation.CFDataCreate.restype = CFDataRef + + CoreFoundation.CFDataGetLength.argtypes = [ + CFDataRef + ] + CoreFoundation.CFDataGetLength.restype = CFIndex + + CoreFoundation.CFDataGetBytePtr.argtypes = [ + CFDataRef + ] + CoreFoundation.CFDataGetBytePtr.restype = c_void_p + + CoreFoundation.CFDictionaryCreate.argtypes = [ + CFAllocatorRef, + POINTER(CFTypeRef), + POINTER(CFTypeRef), + CFIndex, + CFDictionaryKeyCallBacks, + CFDictionaryValueCallBacks + ] + CoreFoundation.CFDictionaryCreate.restype = CFDictionaryRef + + CoreFoundation.CFDictionaryGetValue.argtypes = [ + CFDictionaryRef, + CFTypeRef + ] + CoreFoundation.CFDictionaryGetValue.restype = CFTypeRef + + CoreFoundation.CFArrayCreate.argtypes = [ + CFAllocatorRef, + POINTER(CFTypeRef), + CFIndex, + CFArrayCallBacks, + ] + CoreFoundation.CFArrayCreate.restype = CFArrayRef + + CoreFoundation.CFArrayCreateMutable.argtypes = [ + CFAllocatorRef, + CFIndex, + CFArrayCallBacks + ] + CoreFoundation.CFArrayCreateMutable.restype = CFMutableArrayRef + + CoreFoundation.CFArrayAppendValue.argtypes = [ + CFMutableArrayRef, + c_void_p + ] + CoreFoundation.CFArrayAppendValue.restype = None + + CoreFoundation.CFArrayGetCount.argtypes = [ + CFArrayRef + ] + CoreFoundation.CFArrayGetCount.restype = CFIndex + + CoreFoundation.CFArrayGetValueAtIndex.argtypes = [ + CFArrayRef, + CFIndex + ] + CoreFoundation.CFArrayGetValueAtIndex.restype = c_void_p + + CoreFoundation.kCFAllocatorDefault = CFAllocatorRef.in_dll( + CoreFoundation, 'kCFAllocatorDefault' + ) + CoreFoundation.kCFTypeArrayCallBacks = c_void_p.in_dll(CoreFoundation, 'kCFTypeArrayCallBacks') + CoreFoundation.kCFTypeDictionaryKeyCallBacks = c_void_p.in_dll( + CoreFoundation, 'kCFTypeDictionaryKeyCallBacks' + ) + CoreFoundation.kCFTypeDictionaryValueCallBacks = c_void_p.in_dll( + CoreFoundation, 'kCFTypeDictionaryValueCallBacks' + ) + + CoreFoundation.CFTypeRef = CFTypeRef + CoreFoundation.CFArrayRef = CFArrayRef + CoreFoundation.CFStringRef = CFStringRef + CoreFoundation.CFDictionaryRef = CFDictionaryRef + +except (AttributeError): + raise ImportError('Error initializing ctypes') + + +class CFConst(object): + """ + A class object that acts as essentially a namespace for CoreFoundation + constants. + """ + kCFStringEncodingUTF8 = CFStringEncoding(0x08000100) + + +class SecurityConst(object): + """ + A class object that acts as essentially a namespace for Security constants. + """ + kSSLSessionOptionBreakOnServerAuth = 0 + + kSSLProtocol2 = 1 + kSSLProtocol3 = 2 + kTLSProtocol1 = 4 + kTLSProtocol11 = 7 + kTLSProtocol12 = 8 + + kSSLClientSide = 1 + kSSLStreamType = 0 + + kSecFormatPEMSequence = 10 + + kSecTrustResultInvalid = 0 + kSecTrustResultProceed = 1 + # This gap is present on purpose: this was kSecTrustResultConfirm, which + # is deprecated. + kSecTrustResultDeny = 3 + kSecTrustResultUnspecified = 4 + kSecTrustResultRecoverableTrustFailure = 5 + kSecTrustResultFatalTrustFailure = 6 + kSecTrustResultOtherError = 7 + + errSSLProtocol = -9800 + errSSLWouldBlock = -9803 + errSSLClosedGraceful = -9805 + errSSLClosedNoNotify = -9816 + errSSLClosedAbort = -9806 + + errSSLXCertChainInvalid = -9807 + errSSLCrypto = -9809 + errSSLInternal = -9810 + errSSLCertExpired = -9814 + errSSLCertNotYetValid = -9815 + errSSLUnknownRootCert = -9812 + errSSLNoRootCert = -9813 + errSSLHostNameMismatch = -9843 + errSSLPeerHandshakeFail = -9824 + errSSLPeerUserCancelled = -9839 + errSSLWeakPeerEphemeralDHKey = -9850 + errSSLServerAuthCompleted = -9841 + errSSLRecordOverflow = -9847 + + errSecVerifyFailed = -67808 + errSecNoTrustSettings = -25263 + errSecItemNotFound = -25300 + errSecInvalidTrustSettings = -25262 + + # Cipher suites. We only pick the ones our default cipher string allows. + TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = 0xC02C + TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = 0xC030 + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = 0xC02B + TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = 0xC02F + TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 = 0x00A3 + TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = 0x009F + TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 = 0x00A2 + TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = 0x009E + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = 0xC024 + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = 0xC028 + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = 0xC00A + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = 0xC014 + TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = 0x006B + TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 = 0x006A + TLS_DHE_RSA_WITH_AES_256_CBC_SHA = 0x0039 + TLS_DHE_DSS_WITH_AES_256_CBC_SHA = 0x0038 + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = 0xC023 + TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = 0xC027 + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = 0xC009 + TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = 0xC013 + TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = 0x0067 + TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 = 0x0040 + TLS_DHE_RSA_WITH_AES_128_CBC_SHA = 0x0033 + TLS_DHE_DSS_WITH_AES_128_CBC_SHA = 0x0032 + TLS_RSA_WITH_AES_256_GCM_SHA384 = 0x009D + TLS_RSA_WITH_AES_128_GCM_SHA256 = 0x009C + TLS_RSA_WITH_AES_256_CBC_SHA256 = 0x003D + TLS_RSA_WITH_AES_128_CBC_SHA256 = 0x003C + TLS_RSA_WITH_AES_256_CBC_SHA = 0x0035 + TLS_RSA_WITH_AES_128_CBC_SHA = 0x002F + TLS_AES_128_GCM_SHA256 = 0x1301 + TLS_AES_256_GCM_SHA384 = 0x1302 + TLS_CHACHA20_POLY1305_SHA256 = 0x1303 diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/_securetransport/bindings.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/_securetransport/bindings.pyc new file mode 100644 index 0000000..2d1b517 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/_securetransport/bindings.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/_securetransport/low_level.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/_securetransport/low_level.py new file mode 100644 index 0000000..b13cd9e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/_securetransport/low_level.py @@ -0,0 +1,346 @@ +""" +Low-level helpers for the SecureTransport bindings. + +These are Python functions that are not directly related to the high-level APIs +but are necessary to get them to work. They include a whole bunch of low-level +CoreFoundation messing about and memory management. The concerns in this module +are almost entirely about trying to avoid memory leaks and providing +appropriate and useful assistance to the higher-level code. +""" +import base64 +import ctypes +import itertools +import re +import os +import ssl +import tempfile + +from .bindings import Security, CoreFoundation, CFConst + + +# This regular expression is used to grab PEM data out of a PEM bundle. +_PEM_CERTS_RE = re.compile( + b"-----BEGIN CERTIFICATE-----\n(.*?)\n-----END CERTIFICATE-----", re.DOTALL +) + + +def _cf_data_from_bytes(bytestring): + """ + Given a bytestring, create a CFData object from it. This CFData object must + be CFReleased by the caller. + """ + return CoreFoundation.CFDataCreate( + CoreFoundation.kCFAllocatorDefault, bytestring, len(bytestring) + ) + + +def _cf_dictionary_from_tuples(tuples): + """ + Given a list of Python tuples, create an associated CFDictionary. + """ + dictionary_size = len(tuples) + + # We need to get the dictionary keys and values out in the same order. + keys = (t[0] for t in tuples) + values = (t[1] for t in tuples) + cf_keys = (CoreFoundation.CFTypeRef * dictionary_size)(*keys) + cf_values = (CoreFoundation.CFTypeRef * dictionary_size)(*values) + + return CoreFoundation.CFDictionaryCreate( + CoreFoundation.kCFAllocatorDefault, + cf_keys, + cf_values, + dictionary_size, + CoreFoundation.kCFTypeDictionaryKeyCallBacks, + CoreFoundation.kCFTypeDictionaryValueCallBacks, + ) + + +def _cf_string_to_unicode(value): + """ + Creates a Unicode string from a CFString object. Used entirely for error + reporting. + + Yes, it annoys me quite a lot that this function is this complex. + """ + value_as_void_p = ctypes.cast(value, ctypes.POINTER(ctypes.c_void_p)) + + string = CoreFoundation.CFStringGetCStringPtr( + value_as_void_p, + CFConst.kCFStringEncodingUTF8 + ) + if string is None: + buffer = ctypes.create_string_buffer(1024) + result = CoreFoundation.CFStringGetCString( + value_as_void_p, + buffer, + 1024, + CFConst.kCFStringEncodingUTF8 + ) + if not result: + raise OSError('Error copying C string from CFStringRef') + string = buffer.value + if string is not None: + string = string.decode('utf-8') + return string + + +def _assert_no_error(error, exception_class=None): + """ + Checks the return code and throws an exception if there is an error to + report + """ + if error == 0: + return + + cf_error_string = Security.SecCopyErrorMessageString(error, None) + output = _cf_string_to_unicode(cf_error_string) + CoreFoundation.CFRelease(cf_error_string) + + if output is None or output == u'': + output = u'OSStatus %s' % error + + if exception_class is None: + exception_class = ssl.SSLError + + raise exception_class(output) + + +def _cert_array_from_pem(pem_bundle): + """ + Given a bundle of certs in PEM format, turns them into a CFArray of certs + that can be used to validate a cert chain. + """ + # Normalize the PEM bundle's line endings. + pem_bundle = pem_bundle.replace(b"\r\n", b"\n") + + der_certs = [ + base64.b64decode(match.group(1)) + for match in _PEM_CERTS_RE.finditer(pem_bundle) + ] + if not der_certs: + raise ssl.SSLError("No root certificates specified") + + cert_array = CoreFoundation.CFArrayCreateMutable( + CoreFoundation.kCFAllocatorDefault, + 0, + ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks) + ) + if not cert_array: + raise ssl.SSLError("Unable to allocate memory!") + + try: + for der_bytes in der_certs: + certdata = _cf_data_from_bytes(der_bytes) + if not certdata: + raise ssl.SSLError("Unable to allocate memory!") + cert = Security.SecCertificateCreateWithData( + CoreFoundation.kCFAllocatorDefault, certdata + ) + CoreFoundation.CFRelease(certdata) + if not cert: + raise ssl.SSLError("Unable to build cert object!") + + CoreFoundation.CFArrayAppendValue(cert_array, cert) + CoreFoundation.CFRelease(cert) + except Exception: + # We need to free the array before the exception bubbles further. + # We only want to do that if an error occurs: otherwise, the caller + # should free. + CoreFoundation.CFRelease(cert_array) + + return cert_array + + +def _is_cert(item): + """ + Returns True if a given CFTypeRef is a certificate. + """ + expected = Security.SecCertificateGetTypeID() + return CoreFoundation.CFGetTypeID(item) == expected + + +def _is_identity(item): + """ + Returns True if a given CFTypeRef is an identity. + """ + expected = Security.SecIdentityGetTypeID() + return CoreFoundation.CFGetTypeID(item) == expected + + +def _temporary_keychain(): + """ + This function creates a temporary Mac keychain that we can use to work with + credentials. This keychain uses a one-time password and a temporary file to + store the data. We expect to have one keychain per socket. The returned + SecKeychainRef must be freed by the caller, including calling + SecKeychainDelete. + + Returns a tuple of the SecKeychainRef and the path to the temporary + directory that contains it. + """ + # Unfortunately, SecKeychainCreate requires a path to a keychain. This + # means we cannot use mkstemp to use a generic temporary file. Instead, + # we're going to create a temporary directory and a filename to use there. + # This filename will be 8 random bytes expanded into base64. We also need + # some random bytes to password-protect the keychain we're creating, so we + # ask for 40 random bytes. + random_bytes = os.urandom(40) + filename = base64.b16encode(random_bytes[:8]).decode('utf-8') + password = base64.b16encode(random_bytes[8:]) # Must be valid UTF-8 + tempdirectory = tempfile.mkdtemp() + + keychain_path = os.path.join(tempdirectory, filename).encode('utf-8') + + # We now want to create the keychain itself. + keychain = Security.SecKeychainRef() + status = Security.SecKeychainCreate( + keychain_path, + len(password), + password, + False, + None, + ctypes.byref(keychain) + ) + _assert_no_error(status) + + # Having created the keychain, we want to pass it off to the caller. + return keychain, tempdirectory + + +def _load_items_from_file(keychain, path): + """ + Given a single file, loads all the trust objects from it into arrays and + the keychain. + Returns a tuple of lists: the first list is a list of identities, the + second a list of certs. + """ + certificates = [] + identities = [] + result_array = None + + with open(path, 'rb') as f: + raw_filedata = f.read() + + try: + filedata = CoreFoundation.CFDataCreate( + CoreFoundation.kCFAllocatorDefault, + raw_filedata, + len(raw_filedata) + ) + result_array = CoreFoundation.CFArrayRef() + result = Security.SecItemImport( + filedata, # cert data + None, # Filename, leaving it out for now + None, # What the type of the file is, we don't care + None, # what's in the file, we don't care + 0, # import flags + None, # key params, can include passphrase in the future + keychain, # The keychain to insert into + ctypes.byref(result_array) # Results + ) + _assert_no_error(result) + + # A CFArray is not very useful to us as an intermediary + # representation, so we are going to extract the objects we want + # and then free the array. We don't need to keep hold of keys: the + # keychain already has them! + result_count = CoreFoundation.CFArrayGetCount(result_array) + for index in range(result_count): + item = CoreFoundation.CFArrayGetValueAtIndex( + result_array, index + ) + item = ctypes.cast(item, CoreFoundation.CFTypeRef) + + if _is_cert(item): + CoreFoundation.CFRetain(item) + certificates.append(item) + elif _is_identity(item): + CoreFoundation.CFRetain(item) + identities.append(item) + finally: + if result_array: + CoreFoundation.CFRelease(result_array) + + CoreFoundation.CFRelease(filedata) + + return (identities, certificates) + + +def _load_client_cert_chain(keychain, *paths): + """ + Load certificates and maybe keys from a number of files. Has the end goal + of returning a CFArray containing one SecIdentityRef, and then zero or more + SecCertificateRef objects, suitable for use as a client certificate trust + chain. + """ + # Ok, the strategy. + # + # This relies on knowing that macOS will not give you a SecIdentityRef + # unless you have imported a key into a keychain. This is a somewhat + # artificial limitation of macOS (for example, it doesn't necessarily + # affect iOS), but there is nothing inside Security.framework that lets you + # get a SecIdentityRef without having a key in a keychain. + # + # So the policy here is we take all the files and iterate them in order. + # Each one will use SecItemImport to have one or more objects loaded from + # it. We will also point at a keychain that macOS can use to work with the + # private key. + # + # Once we have all the objects, we'll check what we actually have. If we + # already have a SecIdentityRef in hand, fab: we'll use that. Otherwise, + # we'll take the first certificate (which we assume to be our leaf) and + # ask the keychain to give us a SecIdentityRef with that cert's associated + # key. + # + # We'll then return a CFArray containing the trust chain: one + # SecIdentityRef and then zero-or-more SecCertificateRef objects. The + # responsibility for freeing this CFArray will be with the caller. This + # CFArray must remain alive for the entire connection, so in practice it + # will be stored with a single SSLSocket, along with the reference to the + # keychain. + certificates = [] + identities = [] + + # Filter out bad paths. + paths = (path for path in paths if path) + + try: + for file_path in paths: + new_identities, new_certs = _load_items_from_file( + keychain, file_path + ) + identities.extend(new_identities) + certificates.extend(new_certs) + + # Ok, we have everything. The question is: do we have an identity? If + # not, we want to grab one from the first cert we have. + if not identities: + new_identity = Security.SecIdentityRef() + status = Security.SecIdentityCreateWithCertificate( + keychain, + certificates[0], + ctypes.byref(new_identity) + ) + _assert_no_error(status) + identities.append(new_identity) + + # We now want to release the original certificate, as we no longer + # need it. + CoreFoundation.CFRelease(certificates.pop(0)) + + # We now need to build a new CFArray that holds the trust chain. + trust_chain = CoreFoundation.CFArrayCreateMutable( + CoreFoundation.kCFAllocatorDefault, + 0, + ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks), + ) + for item in itertools.chain(identities, certificates): + # ArrayAppendValue does a CFRetain on the item. That's fine, + # because the finally block will release our other refs to them. + CoreFoundation.CFArrayAppendValue(trust_chain, item) + + return trust_chain + finally: + for obj in itertools.chain(identities, certificates): + CoreFoundation.CFRelease(obj) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/_securetransport/low_level.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/_securetransport/low_level.pyc new file mode 100644 index 0000000..9dfddf9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/_securetransport/low_level.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/appengine.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/appengine.py new file mode 100644 index 0000000..9b42952 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/appengine.py @@ -0,0 +1,289 @@ +""" +This module provides a pool manager that uses Google App Engine's +`URLFetch Service <https://cloud.google.com/appengine/docs/python/urlfetch>`_. + +Example usage:: + + from pip._vendor.urllib3 import PoolManager + from pip._vendor.urllib3.contrib.appengine import AppEngineManager, is_appengine_sandbox + + if is_appengine_sandbox(): + # AppEngineManager uses AppEngine's URLFetch API behind the scenes + http = AppEngineManager() + else: + # PoolManager uses a socket-level API behind the scenes + http = PoolManager() + + r = http.request('GET', 'https://google.com/') + +There are `limitations <https://cloud.google.com/appengine/docs/python/\ +urlfetch/#Python_Quotas_and_limits>`_ to the URLFetch service and it may not be +the best choice for your application. There are three options for using +urllib3 on Google App Engine: + +1. You can use :class:`AppEngineManager` with URLFetch. URLFetch is + cost-effective in many circumstances as long as your usage is within the + limitations. +2. You can use a normal :class:`~urllib3.PoolManager` by enabling sockets. + Sockets also have `limitations and restrictions + <https://cloud.google.com/appengine/docs/python/sockets/\ + #limitations-and-restrictions>`_ and have a lower free quota than URLFetch. + To use sockets, be sure to specify the following in your ``app.yaml``:: + + env_variables: + GAE_USE_SOCKETS_HTTPLIB : 'true' + +3. If you are using `App Engine Flexible +<https://cloud.google.com/appengine/docs/flexible/>`_, you can use the standard +:class:`PoolManager` without any configuration or special environment variables. +""" + +from __future__ import absolute_import +import io +import logging +import warnings +from ..packages.six.moves.urllib.parse import urljoin + +from ..exceptions import ( + HTTPError, + HTTPWarning, + MaxRetryError, + ProtocolError, + TimeoutError, + SSLError +) + +from ..request import RequestMethods +from ..response import HTTPResponse +from ..util.timeout import Timeout +from ..util.retry import Retry +from . import _appengine_environ + +try: + from google.appengine.api import urlfetch +except ImportError: + urlfetch = None + + +log = logging.getLogger(__name__) + + +class AppEnginePlatformWarning(HTTPWarning): + pass + + +class AppEnginePlatformError(HTTPError): + pass + + +class AppEngineManager(RequestMethods): + """ + Connection manager for Google App Engine sandbox applications. + + This manager uses the URLFetch service directly instead of using the + emulated httplib, and is subject to URLFetch limitations as described in + the App Engine documentation `here + <https://cloud.google.com/appengine/docs/python/urlfetch>`_. + + Notably it will raise an :class:`AppEnginePlatformError` if: + * URLFetch is not available. + * If you attempt to use this on App Engine Flexible, as full socket + support is available. + * If a request size is more than 10 megabytes. + * If a response size is more than 32 megabtyes. + * If you use an unsupported request method such as OPTIONS. + + Beyond those cases, it will raise normal urllib3 errors. + """ + + def __init__(self, headers=None, retries=None, validate_certificate=True, + urlfetch_retries=True): + if not urlfetch: + raise AppEnginePlatformError( + "URLFetch is not available in this environment.") + + if is_prod_appengine_mvms(): + raise AppEnginePlatformError( + "Use normal urllib3.PoolManager instead of AppEngineManager" + "on Managed VMs, as using URLFetch is not necessary in " + "this environment.") + + warnings.warn( + "urllib3 is using URLFetch on Google App Engine sandbox instead " + "of sockets. To use sockets directly instead of URLFetch see " + "https://urllib3.readthedocs.io/en/latest/reference/urllib3.contrib.html.", + AppEnginePlatformWarning) + + RequestMethods.__init__(self, headers) + self.validate_certificate = validate_certificate + self.urlfetch_retries = urlfetch_retries + + self.retries = retries or Retry.DEFAULT + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + # Return False to re-raise any potential exceptions + return False + + def urlopen(self, method, url, body=None, headers=None, + retries=None, redirect=True, timeout=Timeout.DEFAULT_TIMEOUT, + **response_kw): + + retries = self._get_retries(retries, redirect) + + try: + follow_redirects = ( + redirect and + retries.redirect != 0 and + retries.total) + response = urlfetch.fetch( + url, + payload=body, + method=method, + headers=headers or {}, + allow_truncated=False, + follow_redirects=self.urlfetch_retries and follow_redirects, + deadline=self._get_absolute_timeout(timeout), + validate_certificate=self.validate_certificate, + ) + except urlfetch.DeadlineExceededError as e: + raise TimeoutError(self, e) + + except urlfetch.InvalidURLError as e: + if 'too large' in str(e): + raise AppEnginePlatformError( + "URLFetch request too large, URLFetch only " + "supports requests up to 10mb in size.", e) + raise ProtocolError(e) + + except urlfetch.DownloadError as e: + if 'Too many redirects' in str(e): + raise MaxRetryError(self, url, reason=e) + raise ProtocolError(e) + + except urlfetch.ResponseTooLargeError as e: + raise AppEnginePlatformError( + "URLFetch response too large, URLFetch only supports" + "responses up to 32mb in size.", e) + + except urlfetch.SSLCertificateError as e: + raise SSLError(e) + + except urlfetch.InvalidMethodError as e: + raise AppEnginePlatformError( + "URLFetch does not support method: %s" % method, e) + + http_response = self._urlfetch_response_to_http_response( + response, retries=retries, **response_kw) + + # Handle redirect? + redirect_location = redirect and http_response.get_redirect_location() + if redirect_location: + # Check for redirect response + if (self.urlfetch_retries and retries.raise_on_redirect): + raise MaxRetryError(self, url, "too many redirects") + else: + if http_response.status == 303: + method = 'GET' + + try: + retries = retries.increment(method, url, response=http_response, _pool=self) + except MaxRetryError: + if retries.raise_on_redirect: + raise MaxRetryError(self, url, "too many redirects") + return http_response + + retries.sleep_for_retry(http_response) + log.debug("Redirecting %s -> %s", url, redirect_location) + redirect_url = urljoin(url, redirect_location) + return self.urlopen( + method, redirect_url, body, headers, + retries=retries, redirect=redirect, + timeout=timeout, **response_kw) + + # Check if we should retry the HTTP response. + has_retry_after = bool(http_response.getheader('Retry-After')) + if retries.is_retry(method, http_response.status, has_retry_after): + retries = retries.increment( + method, url, response=http_response, _pool=self) + log.debug("Retry: %s", url) + retries.sleep(http_response) + return self.urlopen( + method, url, + body=body, headers=headers, + retries=retries, redirect=redirect, + timeout=timeout, **response_kw) + + return http_response + + def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw): + + if is_prod_appengine(): + # Production GAE handles deflate encoding automatically, but does + # not remove the encoding header. + content_encoding = urlfetch_resp.headers.get('content-encoding') + + if content_encoding == 'deflate': + del urlfetch_resp.headers['content-encoding'] + + transfer_encoding = urlfetch_resp.headers.get('transfer-encoding') + # We have a full response's content, + # so let's make sure we don't report ourselves as chunked data. + if transfer_encoding == 'chunked': + encodings = transfer_encoding.split(",") + encodings.remove('chunked') + urlfetch_resp.headers['transfer-encoding'] = ','.join(encodings) + + original_response = HTTPResponse( + # In order for decoding to work, we must present the content as + # a file-like object. + body=io.BytesIO(urlfetch_resp.content), + msg=urlfetch_resp.header_msg, + headers=urlfetch_resp.headers, + status=urlfetch_resp.status_code, + **response_kw + ) + + return HTTPResponse( + body=io.BytesIO(urlfetch_resp.content), + headers=urlfetch_resp.headers, + status=urlfetch_resp.status_code, + original_response=original_response, + **response_kw + ) + + def _get_absolute_timeout(self, timeout): + if timeout is Timeout.DEFAULT_TIMEOUT: + return None # Defer to URLFetch's default. + if isinstance(timeout, Timeout): + if timeout._read is not None or timeout._connect is not None: + warnings.warn( + "URLFetch does not support granular timeout settings, " + "reverting to total or default URLFetch timeout.", + AppEnginePlatformWarning) + return timeout.total + return timeout + + def _get_retries(self, retries, redirect): + if not isinstance(retries, Retry): + retries = Retry.from_int( + retries, redirect=redirect, default=self.retries) + + if retries.connect or retries.read or retries.redirect: + warnings.warn( + "URLFetch only supports total retries and does not " + "recognize connect, read, or redirect retry parameters.", + AppEnginePlatformWarning) + + return retries + + +# Alias methods from _appengine_environ to maintain public API interface. + +is_appengine = _appengine_environ.is_appengine +is_appengine_sandbox = _appengine_environ.is_appengine_sandbox +is_local_appengine = _appengine_environ.is_local_appengine +is_prod_appengine = _appengine_environ.is_prod_appengine +is_prod_appengine_mvms = _appengine_environ.is_prod_appengine_mvms diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/appengine.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/appengine.pyc new file mode 100644 index 0000000..af9167f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/appengine.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/ntlmpool.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/ntlmpool.py new file mode 100644 index 0000000..8ea127c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/ntlmpool.py @@ -0,0 +1,111 @@ +""" +NTLM authenticating pool, contributed by erikcederstran + +Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10 +""" +from __future__ import absolute_import + +from logging import getLogger +from ntlm import ntlm + +from .. import HTTPSConnectionPool +from ..packages.six.moves.http_client import HTTPSConnection + + +log = getLogger(__name__) + + +class NTLMConnectionPool(HTTPSConnectionPool): + """ + Implements an NTLM authentication version of an urllib3 connection pool + """ + + scheme = 'https' + + def __init__(self, user, pw, authurl, *args, **kwargs): + """ + authurl is a random URL on the server that is protected by NTLM. + user is the Windows user, probably in the DOMAIN\\username format. + pw is the password for the user. + """ + super(NTLMConnectionPool, self).__init__(*args, **kwargs) + self.authurl = authurl + self.rawuser = user + user_parts = user.split('\\', 1) + self.domain = user_parts[0].upper() + self.user = user_parts[1] + self.pw = pw + + def _new_conn(self): + # Performs the NTLM handshake that secures the connection. The socket + # must be kept open while requests are performed. + self.num_connections += 1 + log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s', + self.num_connections, self.host, self.authurl) + + headers = {'Connection': 'Keep-Alive'} + req_header = 'Authorization' + resp_header = 'www-authenticate' + + conn = HTTPSConnection(host=self.host, port=self.port) + + # Send negotiation message + headers[req_header] = ( + 'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser)) + log.debug('Request headers: %s', headers) + conn.request('GET', self.authurl, None, headers) + res = conn.getresponse() + reshdr = dict(res.getheaders()) + log.debug('Response status: %s %s', res.status, res.reason) + log.debug('Response headers: %s', reshdr) + log.debug('Response data: %s [...]', res.read(100)) + + # Remove the reference to the socket, so that it can not be closed by + # the response object (we want to keep the socket open) + res.fp = None + + # Server should respond with a challenge message + auth_header_values = reshdr[resp_header].split(', ') + auth_header_value = None + for s in auth_header_values: + if s[:5] == 'NTLM ': + auth_header_value = s[5:] + if auth_header_value is None: + raise Exception('Unexpected %s response header: %s' % + (resp_header, reshdr[resp_header])) + + # Send authentication message + ServerChallenge, NegotiateFlags = \ + ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value) + auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge, + self.user, + self.domain, + self.pw, + NegotiateFlags) + headers[req_header] = 'NTLM %s' % auth_msg + log.debug('Request headers: %s', headers) + conn.request('GET', self.authurl, None, headers) + res = conn.getresponse() + log.debug('Response status: %s %s', res.status, res.reason) + log.debug('Response headers: %s', dict(res.getheaders())) + log.debug('Response data: %s [...]', res.read()[:100]) + if res.status != 200: + if res.status == 401: + raise Exception('Server rejected request: wrong ' + 'username or password') + raise Exception('Wrong server response: %s %s' % + (res.status, res.reason)) + + res.fp = None + log.debug('Connection established') + return conn + + def urlopen(self, method, url, body=None, headers=None, retries=3, + redirect=True, assert_same_host=True): + if headers is None: + headers = {} + headers['Connection'] = 'Keep-Alive' + return super(NTLMConnectionPool, self).urlopen(method, url, body, + headers, retries, + redirect, + assert_same_host) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/ntlmpool.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/ntlmpool.pyc new file mode 100644 index 0000000..b7b1025 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/ntlmpool.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/pyopenssl.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/pyopenssl.py new file mode 100644 index 0000000..363667c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/pyopenssl.py @@ -0,0 +1,466 @@ +""" +SSL with SNI_-support for Python 2. Follow these instructions if you would +like to verify SSL certificates in Python 2. Note, the default libraries do +*not* do certificate checking; you need to do additional work to validate +certificates yourself. + +This needs the following packages installed: + +* pyOpenSSL (tested with 16.0.0) +* cryptography (minimum 1.3.4, from pyopenssl) +* idna (minimum 2.0, from cryptography) + +However, pyopenssl depends on cryptography, which depends on idna, so while we +use all three directly here we end up having relatively few packages required. + +You can install them with the following command: + + pip install pyopenssl cryptography idna + +To activate certificate checking, call +:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code +before you begin making HTTP requests. This can be done in a ``sitecustomize`` +module, or at any other time before your application begins using ``urllib3``, +like this:: + + try: + import urllib3.contrib.pyopenssl + urllib3.contrib.pyopenssl.inject_into_urllib3() + except ImportError: + pass + +Now you can use :mod:`urllib3` as you normally would, and it will support SNI +when the required modules are installed. + +Activating this module also has the positive side effect of disabling SSL/TLS +compression in Python 2 (see `CRIME attack`_). + +If you want to configure the default list of supported cipher suites, you can +set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable. + +.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication +.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit) +""" +from __future__ import absolute_import + +import OpenSSL.SSL +from cryptography import x509 +from cryptography.hazmat.backends.openssl import backend as openssl_backend +from cryptography.hazmat.backends.openssl.x509 import _Certificate +try: + from cryptography.x509 import UnsupportedExtension +except ImportError: + # UnsupportedExtension is gone in cryptography >= 2.1.0 + class UnsupportedExtension(Exception): + pass + +from socket import timeout, error as SocketError +from io import BytesIO + +try: # Platform-specific: Python 2 + from socket import _fileobject +except ImportError: # Platform-specific: Python 3 + _fileobject = None + from ..packages.backports.makefile import backport_makefile + +import logging +import ssl +from ..packages import six +import sys + +from .. import util + +__all__ = ['inject_into_urllib3', 'extract_from_urllib3'] + +# SNI always works. +HAS_SNI = True + +# Map from urllib3 to PyOpenSSL compatible parameter-values. +_openssl_versions = { + ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD, + ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD, +} + +if hasattr(ssl, 'PROTOCOL_TLSv1_1') and hasattr(OpenSSL.SSL, 'TLSv1_1_METHOD'): + _openssl_versions[ssl.PROTOCOL_TLSv1_1] = OpenSSL.SSL.TLSv1_1_METHOD + +if hasattr(ssl, 'PROTOCOL_TLSv1_2') and hasattr(OpenSSL.SSL, 'TLSv1_2_METHOD'): + _openssl_versions[ssl.PROTOCOL_TLSv1_2] = OpenSSL.SSL.TLSv1_2_METHOD + +try: + _openssl_versions.update({ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD}) +except AttributeError: + pass + +_stdlib_to_openssl_verify = { + ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE, + ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER, + ssl.CERT_REQUIRED: + OpenSSL.SSL.VERIFY_PEER + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT, +} +_openssl_to_stdlib_verify = dict( + (v, k) for k, v in _stdlib_to_openssl_verify.items() +) + +# OpenSSL will only write 16K at a time +SSL_WRITE_BLOCKSIZE = 16384 + +orig_util_HAS_SNI = util.HAS_SNI +orig_util_SSLContext = util.ssl_.SSLContext + + +log = logging.getLogger(__name__) + + +def inject_into_urllib3(): + 'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.' + + _validate_dependencies_met() + + util.ssl_.SSLContext = PyOpenSSLContext + util.HAS_SNI = HAS_SNI + util.ssl_.HAS_SNI = HAS_SNI + util.IS_PYOPENSSL = True + util.ssl_.IS_PYOPENSSL = True + + +def extract_from_urllib3(): + 'Undo monkey-patching by :func:`inject_into_urllib3`.' + + util.ssl_.SSLContext = orig_util_SSLContext + util.HAS_SNI = orig_util_HAS_SNI + util.ssl_.HAS_SNI = orig_util_HAS_SNI + util.IS_PYOPENSSL = False + util.ssl_.IS_PYOPENSSL = False + + +def _validate_dependencies_met(): + """ + Verifies that PyOpenSSL's package-level dependencies have been met. + Throws `ImportError` if they are not met. + """ + # Method added in `cryptography==1.1`; not available in older versions + from cryptography.x509.extensions import Extensions + if getattr(Extensions, "get_extension_for_class", None) is None: + raise ImportError("'cryptography' module missing required functionality. " + "Try upgrading to v1.3.4 or newer.") + + # pyOpenSSL 0.14 and above use cryptography for OpenSSL bindings. The _x509 + # attribute is only present on those versions. + from OpenSSL.crypto import X509 + x509 = X509() + if getattr(x509, "_x509", None) is None: + raise ImportError("'pyOpenSSL' module missing required functionality. " + "Try upgrading to v0.14 or newer.") + + +def _dnsname_to_stdlib(name): + """ + Converts a dNSName SubjectAlternativeName field to the form used by the + standard library on the given Python version. + + Cryptography produces a dNSName as a unicode string that was idna-decoded + from ASCII bytes. We need to idna-encode that string to get it back, and + then on Python 3 we also need to convert to unicode via UTF-8 (the stdlib + uses PyUnicode_FromStringAndSize on it, which decodes via UTF-8). + + If the name cannot be idna-encoded then we return None signalling that + the name given should be skipped. + """ + def idna_encode(name): + """ + Borrowed wholesale from the Python Cryptography Project. It turns out + that we can't just safely call `idna.encode`: it can explode for + wildcard names. This avoids that problem. + """ + from pip._vendor import idna + + try: + for prefix in [u'*.', u'.']: + if name.startswith(prefix): + name = name[len(prefix):] + return prefix.encode('ascii') + idna.encode(name) + return idna.encode(name) + except idna.core.IDNAError: + return None + + name = idna_encode(name) + if name is None: + return None + elif sys.version_info >= (3, 0): + name = name.decode('utf-8') + return name + + +def get_subj_alt_name(peer_cert): + """ + Given an PyOpenSSL certificate, provides all the subject alternative names. + """ + # Pass the cert to cryptography, which has much better APIs for this. + if hasattr(peer_cert, "to_cryptography"): + cert = peer_cert.to_cryptography() + else: + # This is technically using private APIs, but should work across all + # relevant versions before PyOpenSSL got a proper API for this. + cert = _Certificate(openssl_backend, peer_cert._x509) + + # We want to find the SAN extension. Ask Cryptography to locate it (it's + # faster than looping in Python) + try: + ext = cert.extensions.get_extension_for_class( + x509.SubjectAlternativeName + ).value + except x509.ExtensionNotFound: + # No such extension, return the empty list. + return [] + except (x509.DuplicateExtension, UnsupportedExtension, + x509.UnsupportedGeneralNameType, UnicodeError) as e: + # A problem has been found with the quality of the certificate. Assume + # no SAN field is present. + log.warning( + "A problem was encountered with the certificate that prevented " + "urllib3 from finding the SubjectAlternativeName field. This can " + "affect certificate validation. The error was %s", + e, + ) + return [] + + # We want to return dNSName and iPAddress fields. We need to cast the IPs + # back to strings because the match_hostname function wants them as + # strings. + # Sadly the DNS names need to be idna encoded and then, on Python 3, UTF-8 + # decoded. This is pretty frustrating, but that's what the standard library + # does with certificates, and so we need to attempt to do the same. + # We also want to skip over names which cannot be idna encoded. + names = [ + ('DNS', name) for name in map(_dnsname_to_stdlib, ext.get_values_for_type(x509.DNSName)) + if name is not None + ] + names.extend( + ('IP Address', str(name)) + for name in ext.get_values_for_type(x509.IPAddress) + ) + + return names + + +class WrappedSocket(object): + '''API-compatibility wrapper for Python OpenSSL's Connection-class. + + Note: _makefile_refs, _drop() and _reuse() are needed for the garbage + collector of pypy. + ''' + + def __init__(self, connection, socket, suppress_ragged_eofs=True): + self.connection = connection + self.socket = socket + self.suppress_ragged_eofs = suppress_ragged_eofs + self._makefile_refs = 0 + self._closed = False + + def fileno(self): + return self.socket.fileno() + + # Copy-pasted from Python 3.5 source code + def _decref_socketios(self): + if self._makefile_refs > 0: + self._makefile_refs -= 1 + if self._closed: + self.close() + + def recv(self, *args, **kwargs): + try: + data = self.connection.recv(*args, **kwargs) + except OpenSSL.SSL.SysCallError as e: + if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'): + return b'' + else: + raise SocketError(str(e)) + except OpenSSL.SSL.ZeroReturnError as e: + if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN: + return b'' + else: + raise + except OpenSSL.SSL.WantReadError: + if not util.wait_for_read(self.socket, self.socket.gettimeout()): + raise timeout('The read operation timed out') + else: + return self.recv(*args, **kwargs) + else: + return data + + def recv_into(self, *args, **kwargs): + try: + return self.connection.recv_into(*args, **kwargs) + except OpenSSL.SSL.SysCallError as e: + if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'): + return 0 + else: + raise SocketError(str(e)) + except OpenSSL.SSL.ZeroReturnError as e: + if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN: + return 0 + else: + raise + except OpenSSL.SSL.WantReadError: + if not util.wait_for_read(self.socket, self.socket.gettimeout()): + raise timeout('The read operation timed out') + else: + return self.recv_into(*args, **kwargs) + + def settimeout(self, timeout): + return self.socket.settimeout(timeout) + + def _send_until_done(self, data): + while True: + try: + return self.connection.send(data) + except OpenSSL.SSL.WantWriteError: + if not util.wait_for_write(self.socket, self.socket.gettimeout()): + raise timeout() + continue + except OpenSSL.SSL.SysCallError as e: + raise SocketError(str(e)) + + def sendall(self, data): + total_sent = 0 + while total_sent < len(data): + sent = self._send_until_done(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE]) + total_sent += sent + + def shutdown(self): + # FIXME rethrow compatible exceptions should we ever use this + self.connection.shutdown() + + def close(self): + if self._makefile_refs < 1: + try: + self._closed = True + return self.connection.close() + except OpenSSL.SSL.Error: + return + else: + self._makefile_refs -= 1 + + def getpeercert(self, binary_form=False): + x509 = self.connection.get_peer_certificate() + + if not x509: + return x509 + + if binary_form: + return OpenSSL.crypto.dump_certificate( + OpenSSL.crypto.FILETYPE_ASN1, + x509) + + return { + 'subject': ( + (('commonName', x509.get_subject().CN),), + ), + 'subjectAltName': get_subj_alt_name(x509) + } + + def _reuse(self): + self._makefile_refs += 1 + + def _drop(self): + if self._makefile_refs < 1: + self.close() + else: + self._makefile_refs -= 1 + + +if _fileobject: # Platform-specific: Python 2 + def makefile(self, mode, bufsize=-1): + self._makefile_refs += 1 + return _fileobject(self, mode, bufsize, close=True) +else: # Platform-specific: Python 3 + makefile = backport_makefile + +WrappedSocket.makefile = makefile + + +class PyOpenSSLContext(object): + """ + I am a wrapper class for the PyOpenSSL ``Context`` object. I am responsible + for translating the interface of the standard library ``SSLContext`` object + to calls into PyOpenSSL. + """ + def __init__(self, protocol): + self.protocol = _openssl_versions[protocol] + self._ctx = OpenSSL.SSL.Context(self.protocol) + self._options = 0 + self.check_hostname = False + + @property + def options(self): + return self._options + + @options.setter + def options(self, value): + self._options = value + self._ctx.set_options(value) + + @property + def verify_mode(self): + return _openssl_to_stdlib_verify[self._ctx.get_verify_mode()] + + @verify_mode.setter + def verify_mode(self, value): + self._ctx.set_verify( + _stdlib_to_openssl_verify[value], + _verify_callback + ) + + def set_default_verify_paths(self): + self._ctx.set_default_verify_paths() + + def set_ciphers(self, ciphers): + if isinstance(ciphers, six.text_type): + ciphers = ciphers.encode('utf-8') + self._ctx.set_cipher_list(ciphers) + + def load_verify_locations(self, cafile=None, capath=None, cadata=None): + if cafile is not None: + cafile = cafile.encode('utf-8') + if capath is not None: + capath = capath.encode('utf-8') + self._ctx.load_verify_locations(cafile, capath) + if cadata is not None: + self._ctx.load_verify_locations(BytesIO(cadata)) + + def load_cert_chain(self, certfile, keyfile=None, password=None): + self._ctx.use_certificate_chain_file(certfile) + if password is not None: + self._ctx.set_passwd_cb(lambda max_length, prompt_twice, userdata: password) + self._ctx.use_privatekey_file(keyfile or certfile) + + def wrap_socket(self, sock, server_side=False, + do_handshake_on_connect=True, suppress_ragged_eofs=True, + server_hostname=None): + cnx = OpenSSL.SSL.Connection(self._ctx, sock) + + if isinstance(server_hostname, six.text_type): # Platform-specific: Python 3 + server_hostname = server_hostname.encode('utf-8') + + if server_hostname is not None: + cnx.set_tlsext_host_name(server_hostname) + + cnx.set_connect_state() + + while True: + try: + cnx.do_handshake() + except OpenSSL.SSL.WantReadError: + if not util.wait_for_read(sock, sock.gettimeout()): + raise timeout('select timed out') + continue + except OpenSSL.SSL.Error as e: + raise ssl.SSLError('bad handshake: %r' % e) + break + + return WrappedSocket(cnx, sock) + + +def _verify_callback(cnx, x509, err_no, err_depth, return_code): + return err_no == 0 diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/pyopenssl.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/pyopenssl.pyc new file mode 100644 index 0000000..f9478e8 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/pyopenssl.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/securetransport.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/securetransport.py new file mode 100644 index 0000000..77cb59e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/securetransport.py @@ -0,0 +1,804 @@ +""" +SecureTranport support for urllib3 via ctypes. + +This makes platform-native TLS available to urllib3 users on macOS without the +use of a compiler. This is an important feature because the Python Package +Index is moving to become a TLSv1.2-or-higher server, and the default OpenSSL +that ships with macOS is not capable of doing TLSv1.2. The only way to resolve +this is to give macOS users an alternative solution to the problem, and that +solution is to use SecureTransport. + +We use ctypes here because this solution must not require a compiler. That's +because pip is not allowed to require a compiler either. + +This is not intended to be a seriously long-term solution to this problem. +The hope is that PEP 543 will eventually solve this issue for us, at which +point we can retire this contrib module. But in the short term, we need to +solve the impending tire fire that is Python on Mac without this kind of +contrib module. So...here we are. + +To use this module, simply import and inject it:: + + import urllib3.contrib.securetransport + urllib3.contrib.securetransport.inject_into_urllib3() + +Happy TLSing! +""" +from __future__ import absolute_import + +import contextlib +import ctypes +import errno +import os.path +import shutil +import socket +import ssl +import threading +import weakref + +from .. import util +from ._securetransport.bindings import ( + Security, SecurityConst, CoreFoundation +) +from ._securetransport.low_level import ( + _assert_no_error, _cert_array_from_pem, _temporary_keychain, + _load_client_cert_chain +) + +try: # Platform-specific: Python 2 + from socket import _fileobject +except ImportError: # Platform-specific: Python 3 + _fileobject = None + from ..packages.backports.makefile import backport_makefile + +__all__ = ['inject_into_urllib3', 'extract_from_urllib3'] + +# SNI always works +HAS_SNI = True + +orig_util_HAS_SNI = util.HAS_SNI +orig_util_SSLContext = util.ssl_.SSLContext + +# This dictionary is used by the read callback to obtain a handle to the +# calling wrapped socket. This is a pretty silly approach, but for now it'll +# do. I feel like I should be able to smuggle a handle to the wrapped socket +# directly in the SSLConnectionRef, but for now this approach will work I +# guess. +# +# We need to lock around this structure for inserts, but we don't do it for +# reads/writes in the callbacks. The reasoning here goes as follows: +# +# 1. It is not possible to call into the callbacks before the dictionary is +# populated, so once in the callback the id must be in the dictionary. +# 2. The callbacks don't mutate the dictionary, they only read from it, and +# so cannot conflict with any of the insertions. +# +# This is good: if we had to lock in the callbacks we'd drastically slow down +# the performance of this code. +_connection_refs = weakref.WeakValueDictionary() +_connection_ref_lock = threading.Lock() + +# Limit writes to 16kB. This is OpenSSL's limit, but we'll cargo-cult it over +# for no better reason than we need *a* limit, and this one is right there. +SSL_WRITE_BLOCKSIZE = 16384 + +# This is our equivalent of util.ssl_.DEFAULT_CIPHERS, but expanded out to +# individual cipher suites. We need to do this because this is how +# SecureTransport wants them. +CIPHER_SUITES = [ + SecurityConst.TLS_AES_256_GCM_SHA384, + SecurityConst.TLS_CHACHA20_POLY1305_SHA256, + SecurityConst.TLS_AES_128_GCM_SHA256, + SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + SecurityConst.TLS_DHE_DSS_WITH_AES_256_GCM_SHA384, + SecurityConst.TLS_DHE_RSA_WITH_AES_256_GCM_SHA384, + SecurityConst.TLS_DHE_DSS_WITH_AES_128_GCM_SHA256, + SecurityConst.TLS_DHE_RSA_WITH_AES_128_GCM_SHA256, + SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, + SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, + SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA256, + SecurityConst.TLS_DHE_DSS_WITH_AES_256_CBC_SHA256, + SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA, + SecurityConst.TLS_DHE_DSS_WITH_AES_256_CBC_SHA, + SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, + SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, + SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, + SecurityConst.TLS_DHE_DSS_WITH_AES_128_CBC_SHA256, + SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA, + SecurityConst.TLS_DHE_DSS_WITH_AES_128_CBC_SHA, + SecurityConst.TLS_RSA_WITH_AES_256_GCM_SHA384, + SecurityConst.TLS_RSA_WITH_AES_128_GCM_SHA256, + SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA256, + SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA256, + SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA, + SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA, +] + +# Basically this is simple: for PROTOCOL_SSLv23 we turn it into a low of +# TLSv1 and a high of TLSv1.2. For everything else, we pin to that version. +_protocol_to_min_max = { + ssl.PROTOCOL_SSLv23: (SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol12), +} + +if hasattr(ssl, "PROTOCOL_SSLv2"): + _protocol_to_min_max[ssl.PROTOCOL_SSLv2] = ( + SecurityConst.kSSLProtocol2, SecurityConst.kSSLProtocol2 + ) +if hasattr(ssl, "PROTOCOL_SSLv3"): + _protocol_to_min_max[ssl.PROTOCOL_SSLv3] = ( + SecurityConst.kSSLProtocol3, SecurityConst.kSSLProtocol3 + ) +if hasattr(ssl, "PROTOCOL_TLSv1"): + _protocol_to_min_max[ssl.PROTOCOL_TLSv1] = ( + SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol1 + ) +if hasattr(ssl, "PROTOCOL_TLSv1_1"): + _protocol_to_min_max[ssl.PROTOCOL_TLSv1_1] = ( + SecurityConst.kTLSProtocol11, SecurityConst.kTLSProtocol11 + ) +if hasattr(ssl, "PROTOCOL_TLSv1_2"): + _protocol_to_min_max[ssl.PROTOCOL_TLSv1_2] = ( + SecurityConst.kTLSProtocol12, SecurityConst.kTLSProtocol12 + ) +if hasattr(ssl, "PROTOCOL_TLS"): + _protocol_to_min_max[ssl.PROTOCOL_TLS] = _protocol_to_min_max[ssl.PROTOCOL_SSLv23] + + +def inject_into_urllib3(): + """ + Monkey-patch urllib3 with SecureTransport-backed SSL-support. + """ + util.ssl_.SSLContext = SecureTransportContext + util.HAS_SNI = HAS_SNI + util.ssl_.HAS_SNI = HAS_SNI + util.IS_SECURETRANSPORT = True + util.ssl_.IS_SECURETRANSPORT = True + + +def extract_from_urllib3(): + """ + Undo monkey-patching by :func:`inject_into_urllib3`. + """ + util.ssl_.SSLContext = orig_util_SSLContext + util.HAS_SNI = orig_util_HAS_SNI + util.ssl_.HAS_SNI = orig_util_HAS_SNI + util.IS_SECURETRANSPORT = False + util.ssl_.IS_SECURETRANSPORT = False + + +def _read_callback(connection_id, data_buffer, data_length_pointer): + """ + SecureTransport read callback. This is called by ST to request that data + be returned from the socket. + """ + wrapped_socket = None + try: + wrapped_socket = _connection_refs.get(connection_id) + if wrapped_socket is None: + return SecurityConst.errSSLInternal + base_socket = wrapped_socket.socket + + requested_length = data_length_pointer[0] + + timeout = wrapped_socket.gettimeout() + error = None + read_count = 0 + + try: + while read_count < requested_length: + if timeout is None or timeout >= 0: + if not util.wait_for_read(base_socket, timeout): + raise socket.error(errno.EAGAIN, 'timed out') + + remaining = requested_length - read_count + buffer = (ctypes.c_char * remaining).from_address( + data_buffer + read_count + ) + chunk_size = base_socket.recv_into(buffer, remaining) + read_count += chunk_size + if not chunk_size: + if not read_count: + return SecurityConst.errSSLClosedGraceful + break + except (socket.error) as e: + error = e.errno + + if error is not None and error != errno.EAGAIN: + data_length_pointer[0] = read_count + if error == errno.ECONNRESET or error == errno.EPIPE: + return SecurityConst.errSSLClosedAbort + raise + + data_length_pointer[0] = read_count + + if read_count != requested_length: + return SecurityConst.errSSLWouldBlock + + return 0 + except Exception as e: + if wrapped_socket is not None: + wrapped_socket._exception = e + return SecurityConst.errSSLInternal + + +def _write_callback(connection_id, data_buffer, data_length_pointer): + """ + SecureTransport write callback. This is called by ST to request that data + actually be sent on the network. + """ + wrapped_socket = None + try: + wrapped_socket = _connection_refs.get(connection_id) + if wrapped_socket is None: + return SecurityConst.errSSLInternal + base_socket = wrapped_socket.socket + + bytes_to_write = data_length_pointer[0] + data = ctypes.string_at(data_buffer, bytes_to_write) + + timeout = wrapped_socket.gettimeout() + error = None + sent = 0 + + try: + while sent < bytes_to_write: + if timeout is None or timeout >= 0: + if not util.wait_for_write(base_socket, timeout): + raise socket.error(errno.EAGAIN, 'timed out') + chunk_sent = base_socket.send(data) + sent += chunk_sent + + # This has some needless copying here, but I'm not sure there's + # much value in optimising this data path. + data = data[chunk_sent:] + except (socket.error) as e: + error = e.errno + + if error is not None and error != errno.EAGAIN: + data_length_pointer[0] = sent + if error == errno.ECONNRESET or error == errno.EPIPE: + return SecurityConst.errSSLClosedAbort + raise + + data_length_pointer[0] = sent + + if sent != bytes_to_write: + return SecurityConst.errSSLWouldBlock + + return 0 + except Exception as e: + if wrapped_socket is not None: + wrapped_socket._exception = e + return SecurityConst.errSSLInternal + + +# We need to keep these two objects references alive: if they get GC'd while +# in use then SecureTransport could attempt to call a function that is in freed +# memory. That would be...uh...bad. Yeah, that's the word. Bad. +_read_callback_pointer = Security.SSLReadFunc(_read_callback) +_write_callback_pointer = Security.SSLWriteFunc(_write_callback) + + +class WrappedSocket(object): + """ + API-compatibility wrapper for Python's OpenSSL wrapped socket object. + + Note: _makefile_refs, _drop(), and _reuse() are needed for the garbage + collector of PyPy. + """ + def __init__(self, socket): + self.socket = socket + self.context = None + self._makefile_refs = 0 + self._closed = False + self._exception = None + self._keychain = None + self._keychain_dir = None + self._client_cert_chain = None + + # We save off the previously-configured timeout and then set it to + # zero. This is done because we use select and friends to handle the + # timeouts, but if we leave the timeout set on the lower socket then + # Python will "kindly" call select on that socket again for us. Avoid + # that by forcing the timeout to zero. + self._timeout = self.socket.gettimeout() + self.socket.settimeout(0) + + @contextlib.contextmanager + def _raise_on_error(self): + """ + A context manager that can be used to wrap calls that do I/O from + SecureTransport. If any of the I/O callbacks hit an exception, this + context manager will correctly propagate the exception after the fact. + This avoids silently swallowing those exceptions. + + It also correctly forces the socket closed. + """ + self._exception = None + + # We explicitly don't catch around this yield because in the unlikely + # event that an exception was hit in the block we don't want to swallow + # it. + yield + if self._exception is not None: + exception, self._exception = self._exception, None + self.close() + raise exception + + def _set_ciphers(self): + """ + Sets up the allowed ciphers. By default this matches the set in + util.ssl_.DEFAULT_CIPHERS, at least as supported by macOS. This is done + custom and doesn't allow changing at this time, mostly because parsing + OpenSSL cipher strings is going to be a freaking nightmare. + """ + ciphers = (Security.SSLCipherSuite * len(CIPHER_SUITES))(*CIPHER_SUITES) + result = Security.SSLSetEnabledCiphers( + self.context, ciphers, len(CIPHER_SUITES) + ) + _assert_no_error(result) + + def _custom_validate(self, verify, trust_bundle): + """ + Called when we have set custom validation. We do this in two cases: + first, when cert validation is entirely disabled; and second, when + using a custom trust DB. + """ + # If we disabled cert validation, just say: cool. + if not verify: + return + + # We want data in memory, so load it up. + if os.path.isfile(trust_bundle): + with open(trust_bundle, 'rb') as f: + trust_bundle = f.read() + + cert_array = None + trust = Security.SecTrustRef() + + try: + # Get a CFArray that contains the certs we want. + cert_array = _cert_array_from_pem(trust_bundle) + + # Ok, now the hard part. We want to get the SecTrustRef that ST has + # created for this connection, shove our CAs into it, tell ST to + # ignore everything else it knows, and then ask if it can build a + # chain. This is a buuuunch of code. + result = Security.SSLCopyPeerTrust( + self.context, ctypes.byref(trust) + ) + _assert_no_error(result) + if not trust: + raise ssl.SSLError("Failed to copy trust reference") + + result = Security.SecTrustSetAnchorCertificates(trust, cert_array) + _assert_no_error(result) + + result = Security.SecTrustSetAnchorCertificatesOnly(trust, True) + _assert_no_error(result) + + trust_result = Security.SecTrustResultType() + result = Security.SecTrustEvaluate( + trust, ctypes.byref(trust_result) + ) + _assert_no_error(result) + finally: + if trust: + CoreFoundation.CFRelease(trust) + + if cert_array is not None: + CoreFoundation.CFRelease(cert_array) + + # Ok, now we can look at what the result was. + successes = ( + SecurityConst.kSecTrustResultUnspecified, + SecurityConst.kSecTrustResultProceed + ) + if trust_result.value not in successes: + raise ssl.SSLError( + "certificate verify failed, error code: %d" % + trust_result.value + ) + + def handshake(self, + server_hostname, + verify, + trust_bundle, + min_version, + max_version, + client_cert, + client_key, + client_key_passphrase): + """ + Actually performs the TLS handshake. This is run automatically by + wrapped socket, and shouldn't be needed in user code. + """ + # First, we do the initial bits of connection setup. We need to create + # a context, set its I/O funcs, and set the connection reference. + self.context = Security.SSLCreateContext( + None, SecurityConst.kSSLClientSide, SecurityConst.kSSLStreamType + ) + result = Security.SSLSetIOFuncs( + self.context, _read_callback_pointer, _write_callback_pointer + ) + _assert_no_error(result) + + # Here we need to compute the handle to use. We do this by taking the + # id of self modulo 2**31 - 1. If this is already in the dictionary, we + # just keep incrementing by one until we find a free space. + with _connection_ref_lock: + handle = id(self) % 2147483647 + while handle in _connection_refs: + handle = (handle + 1) % 2147483647 + _connection_refs[handle] = self + + result = Security.SSLSetConnection(self.context, handle) + _assert_no_error(result) + + # If we have a server hostname, we should set that too. + if server_hostname: + if not isinstance(server_hostname, bytes): + server_hostname = server_hostname.encode('utf-8') + + result = Security.SSLSetPeerDomainName( + self.context, server_hostname, len(server_hostname) + ) + _assert_no_error(result) + + # Setup the ciphers. + self._set_ciphers() + + # Set the minimum and maximum TLS versions. + result = Security.SSLSetProtocolVersionMin(self.context, min_version) + _assert_no_error(result) + result = Security.SSLSetProtocolVersionMax(self.context, max_version) + _assert_no_error(result) + + # If there's a trust DB, we need to use it. We do that by telling + # SecureTransport to break on server auth. We also do that if we don't + # want to validate the certs at all: we just won't actually do any + # authing in that case. + if not verify or trust_bundle is not None: + result = Security.SSLSetSessionOption( + self.context, + SecurityConst.kSSLSessionOptionBreakOnServerAuth, + True + ) + _assert_no_error(result) + + # If there's a client cert, we need to use it. + if client_cert: + self._keychain, self._keychain_dir = _temporary_keychain() + self._client_cert_chain = _load_client_cert_chain( + self._keychain, client_cert, client_key + ) + result = Security.SSLSetCertificate( + self.context, self._client_cert_chain + ) + _assert_no_error(result) + + while True: + with self._raise_on_error(): + result = Security.SSLHandshake(self.context) + + if result == SecurityConst.errSSLWouldBlock: + raise socket.timeout("handshake timed out") + elif result == SecurityConst.errSSLServerAuthCompleted: + self._custom_validate(verify, trust_bundle) + continue + else: + _assert_no_error(result) + break + + def fileno(self): + return self.socket.fileno() + + # Copy-pasted from Python 3.5 source code + def _decref_socketios(self): + if self._makefile_refs > 0: + self._makefile_refs -= 1 + if self._closed: + self.close() + + def recv(self, bufsiz): + buffer = ctypes.create_string_buffer(bufsiz) + bytes_read = self.recv_into(buffer, bufsiz) + data = buffer[:bytes_read] + return data + + def recv_into(self, buffer, nbytes=None): + # Read short on EOF. + if self._closed: + return 0 + + if nbytes is None: + nbytes = len(buffer) + + buffer = (ctypes.c_char * nbytes).from_buffer(buffer) + processed_bytes = ctypes.c_size_t(0) + + with self._raise_on_error(): + result = Security.SSLRead( + self.context, buffer, nbytes, ctypes.byref(processed_bytes) + ) + + # There are some result codes that we want to treat as "not always + # errors". Specifically, those are errSSLWouldBlock, + # errSSLClosedGraceful, and errSSLClosedNoNotify. + if (result == SecurityConst.errSSLWouldBlock): + # If we didn't process any bytes, then this was just a time out. + # However, we can get errSSLWouldBlock in situations when we *did* + # read some data, and in those cases we should just read "short" + # and return. + if processed_bytes.value == 0: + # Timed out, no data read. + raise socket.timeout("recv timed out") + elif result in (SecurityConst.errSSLClosedGraceful, SecurityConst.errSSLClosedNoNotify): + # The remote peer has closed this connection. We should do so as + # well. Note that we don't actually return here because in + # principle this could actually be fired along with return data. + # It's unlikely though. + self.close() + else: + _assert_no_error(result) + + # Ok, we read and probably succeeded. We should return whatever data + # was actually read. + return processed_bytes.value + + def settimeout(self, timeout): + self._timeout = timeout + + def gettimeout(self): + return self._timeout + + def send(self, data): + processed_bytes = ctypes.c_size_t(0) + + with self._raise_on_error(): + result = Security.SSLWrite( + self.context, data, len(data), ctypes.byref(processed_bytes) + ) + + if result == SecurityConst.errSSLWouldBlock and processed_bytes.value == 0: + # Timed out + raise socket.timeout("send timed out") + else: + _assert_no_error(result) + + # We sent, and probably succeeded. Tell them how much we sent. + return processed_bytes.value + + def sendall(self, data): + total_sent = 0 + while total_sent < len(data): + sent = self.send(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE]) + total_sent += sent + + def shutdown(self): + with self._raise_on_error(): + Security.SSLClose(self.context) + + def close(self): + # TODO: should I do clean shutdown here? Do I have to? + if self._makefile_refs < 1: + self._closed = True + if self.context: + CoreFoundation.CFRelease(self.context) + self.context = None + if self._client_cert_chain: + CoreFoundation.CFRelease(self._client_cert_chain) + self._client_cert_chain = None + if self._keychain: + Security.SecKeychainDelete(self._keychain) + CoreFoundation.CFRelease(self._keychain) + shutil.rmtree(self._keychain_dir) + self._keychain = self._keychain_dir = None + return self.socket.close() + else: + self._makefile_refs -= 1 + + def getpeercert(self, binary_form=False): + # Urgh, annoying. + # + # Here's how we do this: + # + # 1. Call SSLCopyPeerTrust to get hold of the trust object for this + # connection. + # 2. Call SecTrustGetCertificateAtIndex for index 0 to get the leaf. + # 3. To get the CN, call SecCertificateCopyCommonName and process that + # string so that it's of the appropriate type. + # 4. To get the SAN, we need to do something a bit more complex: + # a. Call SecCertificateCopyValues to get the data, requesting + # kSecOIDSubjectAltName. + # b. Mess about with this dictionary to try to get the SANs out. + # + # This is gross. Really gross. It's going to be a few hundred LoC extra + # just to repeat something that SecureTransport can *already do*. So my + # operating assumption at this time is that what we want to do is + # instead to just flag to urllib3 that it shouldn't do its own hostname + # validation when using SecureTransport. + if not binary_form: + raise ValueError( + "SecureTransport only supports dumping binary certs" + ) + trust = Security.SecTrustRef() + certdata = None + der_bytes = None + + try: + # Grab the trust store. + result = Security.SSLCopyPeerTrust( + self.context, ctypes.byref(trust) + ) + _assert_no_error(result) + if not trust: + # Probably we haven't done the handshake yet. No biggie. + return None + + cert_count = Security.SecTrustGetCertificateCount(trust) + if not cert_count: + # Also a case that might happen if we haven't handshaked. + # Handshook? Handshaken? + return None + + leaf = Security.SecTrustGetCertificateAtIndex(trust, 0) + assert leaf + + # Ok, now we want the DER bytes. + certdata = Security.SecCertificateCopyData(leaf) + assert certdata + + data_length = CoreFoundation.CFDataGetLength(certdata) + data_buffer = CoreFoundation.CFDataGetBytePtr(certdata) + der_bytes = ctypes.string_at(data_buffer, data_length) + finally: + if certdata: + CoreFoundation.CFRelease(certdata) + if trust: + CoreFoundation.CFRelease(trust) + + return der_bytes + + def _reuse(self): + self._makefile_refs += 1 + + def _drop(self): + if self._makefile_refs < 1: + self.close() + else: + self._makefile_refs -= 1 + + +if _fileobject: # Platform-specific: Python 2 + def makefile(self, mode, bufsize=-1): + self._makefile_refs += 1 + return _fileobject(self, mode, bufsize, close=True) +else: # Platform-specific: Python 3 + def makefile(self, mode="r", buffering=None, *args, **kwargs): + # We disable buffering with SecureTransport because it conflicts with + # the buffering that ST does internally (see issue #1153 for more). + buffering = 0 + return backport_makefile(self, mode, buffering, *args, **kwargs) + +WrappedSocket.makefile = makefile + + +class SecureTransportContext(object): + """ + I am a wrapper class for the SecureTransport library, to translate the + interface of the standard library ``SSLContext`` object to calls into + SecureTransport. + """ + def __init__(self, protocol): + self._min_version, self._max_version = _protocol_to_min_max[protocol] + self._options = 0 + self._verify = False + self._trust_bundle = None + self._client_cert = None + self._client_key = None + self._client_key_passphrase = None + + @property + def check_hostname(self): + """ + SecureTransport cannot have its hostname checking disabled. For more, + see the comment on getpeercert() in this file. + """ + return True + + @check_hostname.setter + def check_hostname(self, value): + """ + SecureTransport cannot have its hostname checking disabled. For more, + see the comment on getpeercert() in this file. + """ + pass + + @property + def options(self): + # TODO: Well, crap. + # + # So this is the bit of the code that is the most likely to cause us + # trouble. Essentially we need to enumerate all of the SSL options that + # users might want to use and try to see if we can sensibly translate + # them, or whether we should just ignore them. + return self._options + + @options.setter + def options(self, value): + # TODO: Update in line with above. + self._options = value + + @property + def verify_mode(self): + return ssl.CERT_REQUIRED if self._verify else ssl.CERT_NONE + + @verify_mode.setter + def verify_mode(self, value): + self._verify = True if value == ssl.CERT_REQUIRED else False + + def set_default_verify_paths(self): + # So, this has to do something a bit weird. Specifically, what it does + # is nothing. + # + # This means that, if we had previously had load_verify_locations + # called, this does not undo that. We need to do that because it turns + # out that the rest of the urllib3 code will attempt to load the + # default verify paths if it hasn't been told about any paths, even if + # the context itself was sometime earlier. We resolve that by just + # ignoring it. + pass + + def load_default_certs(self): + return self.set_default_verify_paths() + + def set_ciphers(self, ciphers): + # For now, we just require the default cipher string. + if ciphers != util.ssl_.DEFAULT_CIPHERS: + raise ValueError( + "SecureTransport doesn't support custom cipher strings" + ) + + def load_verify_locations(self, cafile=None, capath=None, cadata=None): + # OK, we only really support cadata and cafile. + if capath is not None: + raise ValueError( + "SecureTransport does not support cert directories" + ) + + self._trust_bundle = cafile or cadata + + def load_cert_chain(self, certfile, keyfile=None, password=None): + self._client_cert = certfile + self._client_key = keyfile + self._client_cert_passphrase = password + + def wrap_socket(self, sock, server_side=False, + do_handshake_on_connect=True, suppress_ragged_eofs=True, + server_hostname=None): + # So, what do we do here? Firstly, we assert some properties. This is a + # stripped down shim, so there is some functionality we don't support. + # See PEP 543 for the real deal. + assert not server_side + assert do_handshake_on_connect + assert suppress_ragged_eofs + + # Ok, we're good to go. Now we want to create the wrapped socket object + # and store it in the appropriate place. + wrapped_socket = WrappedSocket(sock) + + # Now we can handshake + wrapped_socket.handshake( + server_hostname, self._verify, self._trust_bundle, + self._min_version, self._max_version, self._client_cert, + self._client_key, self._client_key_passphrase + ) + return wrapped_socket diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/securetransport.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/securetransport.pyc new file mode 100644 index 0000000..52e904a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/securetransport.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/socks.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/socks.py new file mode 100644 index 0000000..811e312 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/socks.py @@ -0,0 +1,192 @@ +# -*- coding: utf-8 -*- +""" +This module contains provisional support for SOCKS proxies from within +urllib3. This module supports SOCKS4 (specifically the SOCKS4A variant) and +SOCKS5. To enable its functionality, either install PySocks or install this +module with the ``socks`` extra. + +The SOCKS implementation supports the full range of urllib3 features. It also +supports the following SOCKS features: + +- SOCKS4 +- SOCKS4a +- SOCKS5 +- Usernames and passwords for the SOCKS proxy + +Known Limitations: + +- Currently PySocks does not support contacting remote websites via literal + IPv6 addresses. Any such connection attempt will fail. You must use a domain + name. +- Currently PySocks does not support IPv6 connections to the SOCKS proxy. Any + such connection attempt will fail. +""" +from __future__ import absolute_import + +try: + import socks +except ImportError: + import warnings + from ..exceptions import DependencyWarning + + warnings.warn(( + 'SOCKS support in urllib3 requires the installation of optional ' + 'dependencies: specifically, PySocks. For more information, see ' + 'https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies' + ), + DependencyWarning + ) + raise + +from socket import error as SocketError, timeout as SocketTimeout + +from ..connection import ( + HTTPConnection, HTTPSConnection +) +from ..connectionpool import ( + HTTPConnectionPool, HTTPSConnectionPool +) +from ..exceptions import ConnectTimeoutError, NewConnectionError +from ..poolmanager import PoolManager +from ..util.url import parse_url + +try: + import ssl +except ImportError: + ssl = None + + +class SOCKSConnection(HTTPConnection): + """ + A plain-text HTTP connection that connects via a SOCKS proxy. + """ + def __init__(self, *args, **kwargs): + self._socks_options = kwargs.pop('_socks_options') + super(SOCKSConnection, self).__init__(*args, **kwargs) + + def _new_conn(self): + """ + Establish a new connection via the SOCKS proxy. + """ + extra_kw = {} + if self.source_address: + extra_kw['source_address'] = self.source_address + + if self.socket_options: + extra_kw['socket_options'] = self.socket_options + + try: + conn = socks.create_connection( + (self.host, self.port), + proxy_type=self._socks_options['socks_version'], + proxy_addr=self._socks_options['proxy_host'], + proxy_port=self._socks_options['proxy_port'], + proxy_username=self._socks_options['username'], + proxy_password=self._socks_options['password'], + proxy_rdns=self._socks_options['rdns'], + timeout=self.timeout, + **extra_kw + ) + + except SocketTimeout as e: + raise ConnectTimeoutError( + self, "Connection to %s timed out. (connect timeout=%s)" % + (self.host, self.timeout)) + + except socks.ProxyError as e: + # This is fragile as hell, but it seems to be the only way to raise + # useful errors here. + if e.socket_err: + error = e.socket_err + if isinstance(error, SocketTimeout): + raise ConnectTimeoutError( + self, + "Connection to %s timed out. (connect timeout=%s)" % + (self.host, self.timeout) + ) + else: + raise NewConnectionError( + self, + "Failed to establish a new connection: %s" % error + ) + else: + raise NewConnectionError( + self, + "Failed to establish a new connection: %s" % e + ) + + except SocketError as e: # Defensive: PySocks should catch all these. + raise NewConnectionError( + self, "Failed to establish a new connection: %s" % e) + + return conn + + +# We don't need to duplicate the Verified/Unverified distinction from +# urllib3/connection.py here because the HTTPSConnection will already have been +# correctly set to either the Verified or Unverified form by that module. This +# means the SOCKSHTTPSConnection will automatically be the correct type. +class SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection): + pass + + +class SOCKSHTTPConnectionPool(HTTPConnectionPool): + ConnectionCls = SOCKSConnection + + +class SOCKSHTTPSConnectionPool(HTTPSConnectionPool): + ConnectionCls = SOCKSHTTPSConnection + + +class SOCKSProxyManager(PoolManager): + """ + A version of the urllib3 ProxyManager that routes connections via the + defined SOCKS proxy. + """ + pool_classes_by_scheme = { + 'http': SOCKSHTTPConnectionPool, + 'https': SOCKSHTTPSConnectionPool, + } + + def __init__(self, proxy_url, username=None, password=None, + num_pools=10, headers=None, **connection_pool_kw): + parsed = parse_url(proxy_url) + + if username is None and password is None and parsed.auth is not None: + split = parsed.auth.split(':') + if len(split) == 2: + username, password = split + if parsed.scheme == 'socks5': + socks_version = socks.PROXY_TYPE_SOCKS5 + rdns = False + elif parsed.scheme == 'socks5h': + socks_version = socks.PROXY_TYPE_SOCKS5 + rdns = True + elif parsed.scheme == 'socks4': + socks_version = socks.PROXY_TYPE_SOCKS4 + rdns = False + elif parsed.scheme == 'socks4a': + socks_version = socks.PROXY_TYPE_SOCKS4 + rdns = True + else: + raise ValueError( + "Unable to determine SOCKS version from %s" % proxy_url + ) + + self.proxy_url = proxy_url + + socks_options = { + 'socks_version': socks_version, + 'proxy_host': parsed.host, + 'proxy_port': parsed.port, + 'username': username, + 'password': password, + 'rdns': rdns + } + connection_pool_kw['_socks_options'] = socks_options + + super(SOCKSProxyManager, self).__init__( + num_pools, headers, **connection_pool_kw + ) + + self.pool_classes_by_scheme = SOCKSProxyManager.pool_classes_by_scheme diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/socks.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/socks.pyc new file mode 100644 index 0000000..7c63546 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/contrib/socks.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/exceptions.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/exceptions.py new file mode 100644 index 0000000..7bbaa98 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/exceptions.py @@ -0,0 +1,246 @@ +from __future__ import absolute_import +from .packages.six.moves.http_client import ( + IncompleteRead as httplib_IncompleteRead +) +# Base Exceptions + + +class HTTPError(Exception): + "Base exception used by this module." + pass + + +class HTTPWarning(Warning): + "Base warning used by this module." + pass + + +class PoolError(HTTPError): + "Base exception for errors caused within a pool." + def __init__(self, pool, message): + self.pool = pool + HTTPError.__init__(self, "%s: %s" % (pool, message)) + + def __reduce__(self): + # For pickling purposes. + return self.__class__, (None, None) + + +class RequestError(PoolError): + "Base exception for PoolErrors that have associated URLs." + def __init__(self, pool, url, message): + self.url = url + PoolError.__init__(self, pool, message) + + def __reduce__(self): + # For pickling purposes. + return self.__class__, (None, self.url, None) + + +class SSLError(HTTPError): + "Raised when SSL certificate fails in an HTTPS connection." + pass + + +class ProxyError(HTTPError): + "Raised when the connection to a proxy fails." + pass + + +class DecodeError(HTTPError): + "Raised when automatic decoding based on Content-Type fails." + pass + + +class ProtocolError(HTTPError): + "Raised when something unexpected happens mid-request/response." + pass + + +#: Renamed to ProtocolError but aliased for backwards compatibility. +ConnectionError = ProtocolError + + +# Leaf Exceptions + +class MaxRetryError(RequestError): + """Raised when the maximum number of retries is exceeded. + + :param pool: The connection pool + :type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool` + :param string url: The requested Url + :param exceptions.Exception reason: The underlying error + + """ + + def __init__(self, pool, url, reason=None): + self.reason = reason + + message = "Max retries exceeded with url: %s (Caused by %r)" % ( + url, reason) + + RequestError.__init__(self, pool, url, message) + + +class HostChangedError(RequestError): + "Raised when an existing pool gets a request for a foreign host." + + def __init__(self, pool, url, retries=3): + message = "Tried to open a foreign host with url: %s" % url + RequestError.__init__(self, pool, url, message) + self.retries = retries + + +class TimeoutStateError(HTTPError): + """ Raised when passing an invalid state to a timeout """ + pass + + +class TimeoutError(HTTPError): + """ Raised when a socket timeout error occurs. + + Catching this error will catch both :exc:`ReadTimeoutErrors + <ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`. + """ + pass + + +class ReadTimeoutError(TimeoutError, RequestError): + "Raised when a socket timeout occurs while receiving data from a server" + pass + + +# This timeout error does not have a URL attached and needs to inherit from the +# base HTTPError +class ConnectTimeoutError(TimeoutError): + "Raised when a socket timeout occurs while connecting to a server" + pass + + +class NewConnectionError(ConnectTimeoutError, PoolError): + "Raised when we fail to establish a new connection. Usually ECONNREFUSED." + pass + + +class EmptyPoolError(PoolError): + "Raised when a pool runs out of connections and no more are allowed." + pass + + +class ClosedPoolError(PoolError): + "Raised when a request enters a pool after the pool has been closed." + pass + + +class LocationValueError(ValueError, HTTPError): + "Raised when there is something wrong with a given URL input." + pass + + +class LocationParseError(LocationValueError): + "Raised when get_host or similar fails to parse the URL input." + + def __init__(self, location): + message = "Failed to parse: %s" % location + HTTPError.__init__(self, message) + + self.location = location + + +class ResponseError(HTTPError): + "Used as a container for an error reason supplied in a MaxRetryError." + GENERIC_ERROR = 'too many error responses' + SPECIFIC_ERROR = 'too many {status_code} error responses' + + +class SecurityWarning(HTTPWarning): + "Warned when performing security reducing actions" + pass + + +class SubjectAltNameWarning(SecurityWarning): + "Warned when connecting to a host with a certificate missing a SAN." + pass + + +class InsecureRequestWarning(SecurityWarning): + "Warned when making an unverified HTTPS request." + pass + + +class SystemTimeWarning(SecurityWarning): + "Warned when system time is suspected to be wrong" + pass + + +class InsecurePlatformWarning(SecurityWarning): + "Warned when certain SSL configuration is not available on a platform." + pass + + +class SNIMissingWarning(HTTPWarning): + "Warned when making a HTTPS request without SNI available." + pass + + +class DependencyWarning(HTTPWarning): + """ + Warned when an attempt is made to import a module with missing optional + dependencies. + """ + pass + + +class ResponseNotChunked(ProtocolError, ValueError): + "Response needs to be chunked in order to read it as chunks." + pass + + +class BodyNotHttplibCompatible(HTTPError): + """ + Body should be httplib.HTTPResponse like (have an fp attribute which + returns raw chunks) for read_chunked(). + """ + pass + + +class IncompleteRead(HTTPError, httplib_IncompleteRead): + """ + Response length doesn't match expected Content-Length + + Subclass of http_client.IncompleteRead to allow int value + for `partial` to avoid creating large objects on streamed + reads. + """ + def __init__(self, partial, expected): + super(IncompleteRead, self).__init__(partial, expected) + + def __repr__(self): + return ('IncompleteRead(%i bytes read, ' + '%i more expected)' % (self.partial, self.expected)) + + +class InvalidHeader(HTTPError): + "The header provided was somehow invalid." + pass + + +class ProxySchemeUnknown(AssertionError, ValueError): + "ProxyManager does not support the supplied scheme" + # TODO(t-8ch): Stop inheriting from AssertionError in v2.0. + + def __init__(self, scheme): + message = "Not supported proxy scheme %s" % scheme + super(ProxySchemeUnknown, self).__init__(message) + + +class HeaderParsingError(HTTPError): + "Raised by assert_header_parsing, but we convert it to a log.warning statement." + def __init__(self, defects, unparsed_data): + message = '%s, unparsed data: %r' % (defects or 'Unknown', unparsed_data) + super(HeaderParsingError, self).__init__(message) + + +class UnrewindableBodyError(HTTPError): + "urllib3 encountered an error when trying to rewind a body" + pass diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/exceptions.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/exceptions.pyc new file mode 100644 index 0000000..340385c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/exceptions.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/fields.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/fields.py new file mode 100644 index 0000000..37fe64a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/fields.py @@ -0,0 +1,178 @@ +from __future__ import absolute_import +import email.utils +import mimetypes + +from .packages import six + + +def guess_content_type(filename, default='application/octet-stream'): + """ + Guess the "Content-Type" of a file. + + :param filename: + The filename to guess the "Content-Type" of using :mod:`mimetypes`. + :param default: + If no "Content-Type" can be guessed, default to `default`. + """ + if filename: + return mimetypes.guess_type(filename)[0] or default + return default + + +def format_header_param(name, value): + """ + Helper function to format and quote a single header parameter. + + Particularly useful for header parameters which might contain + non-ASCII values, like file names. This follows RFC 2231, as + suggested by RFC 2388 Section 4.4. + + :param name: + The name of the parameter, a string expected to be ASCII only. + :param value: + The value of the parameter, provided as a unicode string. + """ + if not any(ch in value for ch in '"\\\r\n'): + result = '%s="%s"' % (name, value) + try: + result.encode('ascii') + except (UnicodeEncodeError, UnicodeDecodeError): + pass + else: + return result + if not six.PY3 and isinstance(value, six.text_type): # Python 2: + value = value.encode('utf-8') + value = email.utils.encode_rfc2231(value, 'utf-8') + value = '%s*=%s' % (name, value) + return value + + +class RequestField(object): + """ + A data container for request body parameters. + + :param name: + The name of this request field. + :param data: + The data/value body. + :param filename: + An optional filename of the request field. + :param headers: + An optional dict-like object of headers to initially use for the field. + """ + def __init__(self, name, data, filename=None, headers=None): + self._name = name + self._filename = filename + self.data = data + self.headers = {} + if headers: + self.headers = dict(headers) + + @classmethod + def from_tuples(cls, fieldname, value): + """ + A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters. + + Supports constructing :class:`~urllib3.fields.RequestField` from + parameter of key/value strings AND key/filetuple. A filetuple is a + (filename, data, MIME type) tuple where the MIME type is optional. + For example:: + + 'foo': 'bar', + 'fakefile': ('foofile.txt', 'contents of foofile'), + 'realfile': ('barfile.txt', open('realfile').read()), + 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'), + 'nonamefile': 'contents of nonamefile field', + + Field names and filenames must be unicode. + """ + if isinstance(value, tuple): + if len(value) == 3: + filename, data, content_type = value + else: + filename, data = value + content_type = guess_content_type(filename) + else: + filename = None + content_type = None + data = value + + request_param = cls(fieldname, data, filename=filename) + request_param.make_multipart(content_type=content_type) + + return request_param + + def _render_part(self, name, value): + """ + Overridable helper function to format a single header parameter. + + :param name: + The name of the parameter, a string expected to be ASCII only. + :param value: + The value of the parameter, provided as a unicode string. + """ + return format_header_param(name, value) + + def _render_parts(self, header_parts): + """ + Helper function to format and quote a single header. + + Useful for single headers that are composed of multiple items. E.g., + 'Content-Disposition' fields. + + :param header_parts: + A sequence of (k, v) tuples or a :class:`dict` of (k, v) to format + as `k1="v1"; k2="v2"; ...`. + """ + parts = [] + iterable = header_parts + if isinstance(header_parts, dict): + iterable = header_parts.items() + + for name, value in iterable: + if value is not None: + parts.append(self._render_part(name, value)) + + return '; '.join(parts) + + def render_headers(self): + """ + Renders the headers for this request field. + """ + lines = [] + + sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location'] + for sort_key in sort_keys: + if self.headers.get(sort_key, False): + lines.append('%s: %s' % (sort_key, self.headers[sort_key])) + + for header_name, header_value in self.headers.items(): + if header_name not in sort_keys: + if header_value: + lines.append('%s: %s' % (header_name, header_value)) + + lines.append('\r\n') + return '\r\n'.join(lines) + + def make_multipart(self, content_disposition=None, content_type=None, + content_location=None): + """ + Makes this request field into a multipart request field. + + This method overrides "Content-Disposition", "Content-Type" and + "Content-Location" headers to the request parameter. + + :param content_type: + The 'Content-Type' of the request body. + :param content_location: + The 'Content-Location' of the request body. + + """ + self.headers['Content-Disposition'] = content_disposition or 'form-data' + self.headers['Content-Disposition'] += '; '.join([ + '', self._render_parts( + (('name', self._name), ('filename', self._filename)) + ) + ]) + self.headers['Content-Type'] = content_type + self.headers['Content-Location'] = content_location diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/fields.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/fields.pyc new file mode 100644 index 0000000..9c28f60 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/fields.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/filepost.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/filepost.py new file mode 100644 index 0000000..78f1e19 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/filepost.py @@ -0,0 +1,98 @@ +from __future__ import absolute_import +import binascii +import codecs +import os + +from io import BytesIO + +from .packages import six +from .packages.six import b +from .fields import RequestField + +writer = codecs.lookup('utf-8')[3] + + +def choose_boundary(): + """ + Our embarrassingly-simple replacement for mimetools.choose_boundary. + """ + boundary = binascii.hexlify(os.urandom(16)) + if six.PY3: + boundary = boundary.decode('ascii') + return boundary + + +def iter_field_objects(fields): + """ + Iterate over fields. + + Supports list of (k, v) tuples and dicts, and lists of + :class:`~urllib3.fields.RequestField`. + + """ + if isinstance(fields, dict): + i = six.iteritems(fields) + else: + i = iter(fields) + + for field in i: + if isinstance(field, RequestField): + yield field + else: + yield RequestField.from_tuples(*field) + + +def iter_fields(fields): + """ + .. deprecated:: 1.6 + + Iterate over fields. + + The addition of :class:`~urllib3.fields.RequestField` makes this function + obsolete. Instead, use :func:`iter_field_objects`, which returns + :class:`~urllib3.fields.RequestField` objects. + + Supports list of (k, v) tuples and dicts. + """ + if isinstance(fields, dict): + return ((k, v) for k, v in six.iteritems(fields)) + + return ((k, v) for k, v in fields) + + +def encode_multipart_formdata(fields, boundary=None): + """ + Encode a dictionary of ``fields`` using the multipart/form-data MIME format. + + :param fields: + Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`). + + :param boundary: + If not specified, then a random boundary will be generated using + :func:`urllib3.filepost.choose_boundary`. + """ + body = BytesIO() + if boundary is None: + boundary = choose_boundary() + + for field in iter_field_objects(fields): + body.write(b('--%s\r\n' % (boundary))) + + writer(body).write(field.render_headers()) + data = field.data + + if isinstance(data, int): + data = str(data) # Backwards compatibility + + if isinstance(data, six.text_type): + writer(body).write(data) + else: + body.write(data) + + body.write(b'\r\n') + + body.write(b('--%s--\r\n' % (boundary))) + + content_type = str('multipart/form-data; boundary=%s' % boundary) + + return body.getvalue(), content_type diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/filepost.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/filepost.pyc new file mode 100644 index 0000000..5483c4b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/filepost.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/packages/__init__.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/packages/__init__.py new file mode 100644 index 0000000..170e974 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/packages/__init__.py @@ -0,0 +1,5 @@ +from __future__ import absolute_import + +from . import ssl_match_hostname + +__all__ = ('ssl_match_hostname', ) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/packages/__init__.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/packages/__init__.pyc new file mode 100644 index 0000000..18dd237 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/packages/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/packages/backports/__init__.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/packages/backports/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/packages/backports/__init__.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/packages/backports/__init__.pyc new file mode 100644 index 0000000..0a329e9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/packages/backports/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/packages/backports/makefile.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/packages/backports/makefile.py new file mode 100644 index 0000000..740db37 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/packages/backports/makefile.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +""" +backports.makefile +~~~~~~~~~~~~~~~~~~ + +Backports the Python 3 ``socket.makefile`` method for use with anything that +wants to create a "fake" socket object. +""" +import io + +from socket import SocketIO + + +def backport_makefile(self, mode="r", buffering=None, encoding=None, + errors=None, newline=None): + """ + Backport of ``socket.makefile`` from Python 3.5. + """ + if not set(mode) <= {"r", "w", "b"}: + raise ValueError( + "invalid mode %r (only r, w, b allowed)" % (mode,) + ) + writing = "w" in mode + reading = "r" in mode or not writing + assert reading or writing + binary = "b" in mode + rawmode = "" + if reading: + rawmode += "r" + if writing: + rawmode += "w" + raw = SocketIO(self, rawmode) + self._makefile_refs += 1 + if buffering is None: + buffering = -1 + if buffering < 0: + buffering = io.DEFAULT_BUFFER_SIZE + if buffering == 0: + if not binary: + raise ValueError("unbuffered streams must be binary") + return raw + if reading and writing: + buffer = io.BufferedRWPair(raw, raw, buffering) + elif reading: + buffer = io.BufferedReader(raw, buffering) + else: + assert writing + buffer = io.BufferedWriter(raw, buffering) + if binary: + return buffer + text = io.TextIOWrapper(buffer, encoding, errors, newline) + text.mode = mode + return text diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/packages/backports/makefile.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/packages/backports/makefile.pyc new file mode 100644 index 0000000..c64045c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/packages/backports/makefile.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/packages/six.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/packages/six.py new file mode 100644 index 0000000..190c023 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/packages/six.py @@ -0,0 +1,868 @@ +"""Utilities for writing code that runs on Python 2 and 3""" + +# Copyright (c) 2010-2015 Benjamin Peterson +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from __future__ import absolute_import + +import functools +import itertools +import operator +import sys +import types + +__author__ = "Benjamin Peterson <benjamin@python.org>" +__version__ = "1.10.0" + + +# Useful for very coarse version differentiation. +PY2 = sys.version_info[0] == 2 +PY3 = sys.version_info[0] == 3 +PY34 = sys.version_info[0:2] >= (3, 4) + +if PY3: + string_types = str, + integer_types = int, + class_types = type, + text_type = str + binary_type = bytes + + MAXSIZE = sys.maxsize +else: + string_types = basestring, + integer_types = (int, long) + class_types = (type, types.ClassType) + text_type = unicode + binary_type = str + + if sys.platform.startswith("java"): + # Jython always uses 32 bits. + MAXSIZE = int((1 << 31) - 1) + else: + # It's possible to have sizeof(long) != sizeof(Py_ssize_t). + class X(object): + + def __len__(self): + return 1 << 31 + try: + len(X()) + except OverflowError: + # 32-bit + MAXSIZE = int((1 << 31) - 1) + else: + # 64-bit + MAXSIZE = int((1 << 63) - 1) + del X + + +def _add_doc(func, doc): + """Add documentation to a function.""" + func.__doc__ = doc + + +def _import_module(name): + """Import module, returning the module after the last dot.""" + __import__(name) + return sys.modules[name] + + +class _LazyDescr(object): + + def __init__(self, name): + self.name = name + + def __get__(self, obj, tp): + result = self._resolve() + setattr(obj, self.name, result) # Invokes __set__. + try: + # This is a bit ugly, but it avoids running this again by + # removing this descriptor. + delattr(obj.__class__, self.name) + except AttributeError: + pass + return result + + +class MovedModule(_LazyDescr): + + def __init__(self, name, old, new=None): + super(MovedModule, self).__init__(name) + if PY3: + if new is None: + new = name + self.mod = new + else: + self.mod = old + + def _resolve(self): + return _import_module(self.mod) + + def __getattr__(self, attr): + _module = self._resolve() + value = getattr(_module, attr) + setattr(self, attr, value) + return value + + +class _LazyModule(types.ModuleType): + + def __init__(self, name): + super(_LazyModule, self).__init__(name) + self.__doc__ = self.__class__.__doc__ + + def __dir__(self): + attrs = ["__doc__", "__name__"] + attrs += [attr.name for attr in self._moved_attributes] + return attrs + + # Subclasses should override this + _moved_attributes = [] + + +class MovedAttribute(_LazyDescr): + + def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): + super(MovedAttribute, self).__init__(name) + if PY3: + if new_mod is None: + new_mod = name + self.mod = new_mod + if new_attr is None: + if old_attr is None: + new_attr = name + else: + new_attr = old_attr + self.attr = new_attr + else: + self.mod = old_mod + if old_attr is None: + old_attr = name + self.attr = old_attr + + def _resolve(self): + module = _import_module(self.mod) + return getattr(module, self.attr) + + +class _SixMetaPathImporter(object): + + """ + A meta path importer to import six.moves and its submodules. + + This class implements a PEP302 finder and loader. It should be compatible + with Python 2.5 and all existing versions of Python3 + """ + + def __init__(self, six_module_name): + self.name = six_module_name + self.known_modules = {} + + def _add_module(self, mod, *fullnames): + for fullname in fullnames: + self.known_modules[self.name + "." + fullname] = mod + + def _get_module(self, fullname): + return self.known_modules[self.name + "." + fullname] + + def find_module(self, fullname, path=None): + if fullname in self.known_modules: + return self + return None + + def __get_module(self, fullname): + try: + return self.known_modules[fullname] + except KeyError: + raise ImportError("This loader does not know module " + fullname) + + def load_module(self, fullname): + try: + # in case of a reload + return sys.modules[fullname] + except KeyError: + pass + mod = self.__get_module(fullname) + if isinstance(mod, MovedModule): + mod = mod._resolve() + else: + mod.__loader__ = self + sys.modules[fullname] = mod + return mod + + def is_package(self, fullname): + """ + Return true, if the named module is a package. + + We need this method to get correct spec objects with + Python 3.4 (see PEP451) + """ + return hasattr(self.__get_module(fullname), "__path__") + + def get_code(self, fullname): + """Return None + + Required, if is_package is implemented""" + self.__get_module(fullname) # eventually raises ImportError + return None + get_source = get_code # same as get_code + +_importer = _SixMetaPathImporter(__name__) + + +class _MovedItems(_LazyModule): + + """Lazy loading of moved objects""" + __path__ = [] # mark as package + + +_moved_attributes = [ + MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), + MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), + MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), + MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), + MovedAttribute("intern", "__builtin__", "sys"), + MovedAttribute("map", "itertools", "builtins", "imap", "map"), + MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), + MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), + MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), + MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), + MovedAttribute("reduce", "__builtin__", "functools"), + MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), + MovedAttribute("StringIO", "StringIO", "io"), + MovedAttribute("UserDict", "UserDict", "collections"), + MovedAttribute("UserList", "UserList", "collections"), + MovedAttribute("UserString", "UserString", "collections"), + MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), + MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), + MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), + MovedModule("builtins", "__builtin__"), + MovedModule("configparser", "ConfigParser"), + MovedModule("copyreg", "copy_reg"), + MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), + MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), + MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), + MovedModule("http_cookies", "Cookie", "http.cookies"), + MovedModule("html_entities", "htmlentitydefs", "html.entities"), + MovedModule("html_parser", "HTMLParser", "html.parser"), + MovedModule("http_client", "httplib", "http.client"), + MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), + MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), + MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), + MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), + MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), + MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), + MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), + MovedModule("cPickle", "cPickle", "pickle"), + MovedModule("queue", "Queue"), + MovedModule("reprlib", "repr"), + MovedModule("socketserver", "SocketServer"), + MovedModule("_thread", "thread", "_thread"), + MovedModule("tkinter", "Tkinter"), + MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), + MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), + MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), + MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), + MovedModule("tkinter_tix", "Tix", "tkinter.tix"), + MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), + MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), + MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), + MovedModule("tkinter_colorchooser", "tkColorChooser", + "tkinter.colorchooser"), + MovedModule("tkinter_commondialog", "tkCommonDialog", + "tkinter.commondialog"), + MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), + MovedModule("tkinter_font", "tkFont", "tkinter.font"), + MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), + MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", + "tkinter.simpledialog"), + MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), + MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), + MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), + MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), + MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), + MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), +] +# Add windows specific modules. +if sys.platform == "win32": + _moved_attributes += [ + MovedModule("winreg", "_winreg"), + ] + +for attr in _moved_attributes: + setattr(_MovedItems, attr.name, attr) + if isinstance(attr, MovedModule): + _importer._add_module(attr, "moves." + attr.name) +del attr + +_MovedItems._moved_attributes = _moved_attributes + +moves = _MovedItems(__name__ + ".moves") +_importer._add_module(moves, "moves") + + +class Module_six_moves_urllib_parse(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_parse""" + + +_urllib_parse_moved_attributes = [ + MovedAttribute("ParseResult", "urlparse", "urllib.parse"), + MovedAttribute("SplitResult", "urlparse", "urllib.parse"), + MovedAttribute("parse_qs", "urlparse", "urllib.parse"), + MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), + MovedAttribute("urldefrag", "urlparse", "urllib.parse"), + MovedAttribute("urljoin", "urlparse", "urllib.parse"), + MovedAttribute("urlparse", "urlparse", "urllib.parse"), + MovedAttribute("urlsplit", "urlparse", "urllib.parse"), + MovedAttribute("urlunparse", "urlparse", "urllib.parse"), + MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), + MovedAttribute("quote", "urllib", "urllib.parse"), + MovedAttribute("quote_plus", "urllib", "urllib.parse"), + MovedAttribute("unquote", "urllib", "urllib.parse"), + MovedAttribute("unquote_plus", "urllib", "urllib.parse"), + MovedAttribute("urlencode", "urllib", "urllib.parse"), + MovedAttribute("splitquery", "urllib", "urllib.parse"), + MovedAttribute("splittag", "urllib", "urllib.parse"), + MovedAttribute("splituser", "urllib", "urllib.parse"), + MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), + MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), + MovedAttribute("uses_params", "urlparse", "urllib.parse"), + MovedAttribute("uses_query", "urlparse", "urllib.parse"), + MovedAttribute("uses_relative", "urlparse", "urllib.parse"), +] +for attr in _urllib_parse_moved_attributes: + setattr(Module_six_moves_urllib_parse, attr.name, attr) +del attr + +Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes + +_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), + "moves.urllib_parse", "moves.urllib.parse") + + +class Module_six_moves_urllib_error(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_error""" + + +_urllib_error_moved_attributes = [ + MovedAttribute("URLError", "urllib2", "urllib.error"), + MovedAttribute("HTTPError", "urllib2", "urllib.error"), + MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), +] +for attr in _urllib_error_moved_attributes: + setattr(Module_six_moves_urllib_error, attr.name, attr) +del attr + +Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes + +_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), + "moves.urllib_error", "moves.urllib.error") + + +class Module_six_moves_urllib_request(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_request""" + + +_urllib_request_moved_attributes = [ + MovedAttribute("urlopen", "urllib2", "urllib.request"), + MovedAttribute("install_opener", "urllib2", "urllib.request"), + MovedAttribute("build_opener", "urllib2", "urllib.request"), + MovedAttribute("pathname2url", "urllib", "urllib.request"), + MovedAttribute("url2pathname", "urllib", "urllib.request"), + MovedAttribute("getproxies", "urllib", "urllib.request"), + MovedAttribute("Request", "urllib2", "urllib.request"), + MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), + MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), + MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), + MovedAttribute("BaseHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), + MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), + MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), + MovedAttribute("FileHandler", "urllib2", "urllib.request"), + MovedAttribute("FTPHandler", "urllib2", "urllib.request"), + MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), + MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), + MovedAttribute("urlretrieve", "urllib", "urllib.request"), + MovedAttribute("urlcleanup", "urllib", "urllib.request"), + MovedAttribute("URLopener", "urllib", "urllib.request"), + MovedAttribute("FancyURLopener", "urllib", "urllib.request"), + MovedAttribute("proxy_bypass", "urllib", "urllib.request"), +] +for attr in _urllib_request_moved_attributes: + setattr(Module_six_moves_urllib_request, attr.name, attr) +del attr + +Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes + +_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), + "moves.urllib_request", "moves.urllib.request") + + +class Module_six_moves_urllib_response(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_response""" + + +_urllib_response_moved_attributes = [ + MovedAttribute("addbase", "urllib", "urllib.response"), + MovedAttribute("addclosehook", "urllib", "urllib.response"), + MovedAttribute("addinfo", "urllib", "urllib.response"), + MovedAttribute("addinfourl", "urllib", "urllib.response"), +] +for attr in _urllib_response_moved_attributes: + setattr(Module_six_moves_urllib_response, attr.name, attr) +del attr + +Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes + +_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), + "moves.urllib_response", "moves.urllib.response") + + +class Module_six_moves_urllib_robotparser(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_robotparser""" + + +_urllib_robotparser_moved_attributes = [ + MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), +] +for attr in _urllib_robotparser_moved_attributes: + setattr(Module_six_moves_urllib_robotparser, attr.name, attr) +del attr + +Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes + +_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), + "moves.urllib_robotparser", "moves.urllib.robotparser") + + +class Module_six_moves_urllib(types.ModuleType): + + """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" + __path__ = [] # mark as package + parse = _importer._get_module("moves.urllib_parse") + error = _importer._get_module("moves.urllib_error") + request = _importer._get_module("moves.urllib_request") + response = _importer._get_module("moves.urllib_response") + robotparser = _importer._get_module("moves.urllib_robotparser") + + def __dir__(self): + return ['parse', 'error', 'request', 'response', 'robotparser'] + +_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), + "moves.urllib") + + +def add_move(move): + """Add an item to six.moves.""" + setattr(_MovedItems, move.name, move) + + +def remove_move(name): + """Remove item from six.moves.""" + try: + delattr(_MovedItems, name) + except AttributeError: + try: + del moves.__dict__[name] + except KeyError: + raise AttributeError("no such move, %r" % (name,)) + + +if PY3: + _meth_func = "__func__" + _meth_self = "__self__" + + _func_closure = "__closure__" + _func_code = "__code__" + _func_defaults = "__defaults__" + _func_globals = "__globals__" +else: + _meth_func = "im_func" + _meth_self = "im_self" + + _func_closure = "func_closure" + _func_code = "func_code" + _func_defaults = "func_defaults" + _func_globals = "func_globals" + + +try: + advance_iterator = next +except NameError: + def advance_iterator(it): + return it.next() +next = advance_iterator + + +try: + callable = callable +except NameError: + def callable(obj): + return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) + + +if PY3: + def get_unbound_function(unbound): + return unbound + + create_bound_method = types.MethodType + + def create_unbound_method(func, cls): + return func + + Iterator = object +else: + def get_unbound_function(unbound): + return unbound.im_func + + def create_bound_method(func, obj): + return types.MethodType(func, obj, obj.__class__) + + def create_unbound_method(func, cls): + return types.MethodType(func, None, cls) + + class Iterator(object): + + def next(self): + return type(self).__next__(self) + + callable = callable +_add_doc(get_unbound_function, + """Get the function out of a possibly unbound function""") + + +get_method_function = operator.attrgetter(_meth_func) +get_method_self = operator.attrgetter(_meth_self) +get_function_closure = operator.attrgetter(_func_closure) +get_function_code = operator.attrgetter(_func_code) +get_function_defaults = operator.attrgetter(_func_defaults) +get_function_globals = operator.attrgetter(_func_globals) + + +if PY3: + def iterkeys(d, **kw): + return iter(d.keys(**kw)) + + def itervalues(d, **kw): + return iter(d.values(**kw)) + + def iteritems(d, **kw): + return iter(d.items(**kw)) + + def iterlists(d, **kw): + return iter(d.lists(**kw)) + + viewkeys = operator.methodcaller("keys") + + viewvalues = operator.methodcaller("values") + + viewitems = operator.methodcaller("items") +else: + def iterkeys(d, **kw): + return d.iterkeys(**kw) + + def itervalues(d, **kw): + return d.itervalues(**kw) + + def iteritems(d, **kw): + return d.iteritems(**kw) + + def iterlists(d, **kw): + return d.iterlists(**kw) + + viewkeys = operator.methodcaller("viewkeys") + + viewvalues = operator.methodcaller("viewvalues") + + viewitems = operator.methodcaller("viewitems") + +_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") +_add_doc(itervalues, "Return an iterator over the values of a dictionary.") +_add_doc(iteritems, + "Return an iterator over the (key, value) pairs of a dictionary.") +_add_doc(iterlists, + "Return an iterator over the (key, [values]) pairs of a dictionary.") + + +if PY3: + def b(s): + return s.encode("latin-1") + + def u(s): + return s + unichr = chr + import struct + int2byte = struct.Struct(">B").pack + del struct + byte2int = operator.itemgetter(0) + indexbytes = operator.getitem + iterbytes = iter + import io + StringIO = io.StringIO + BytesIO = io.BytesIO + _assertCountEqual = "assertCountEqual" + if sys.version_info[1] <= 1: + _assertRaisesRegex = "assertRaisesRegexp" + _assertRegex = "assertRegexpMatches" + else: + _assertRaisesRegex = "assertRaisesRegex" + _assertRegex = "assertRegex" +else: + def b(s): + return s + # Workaround for standalone backslash + + def u(s): + return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") + unichr = unichr + int2byte = chr + + def byte2int(bs): + return ord(bs[0]) + + def indexbytes(buf, i): + return ord(buf[i]) + iterbytes = functools.partial(itertools.imap, ord) + import StringIO + StringIO = BytesIO = StringIO.StringIO + _assertCountEqual = "assertItemsEqual" + _assertRaisesRegex = "assertRaisesRegexp" + _assertRegex = "assertRegexpMatches" +_add_doc(b, """Byte literal""") +_add_doc(u, """Text literal""") + + +def assertCountEqual(self, *args, **kwargs): + return getattr(self, _assertCountEqual)(*args, **kwargs) + + +def assertRaisesRegex(self, *args, **kwargs): + return getattr(self, _assertRaisesRegex)(*args, **kwargs) + + +def assertRegex(self, *args, **kwargs): + return getattr(self, _assertRegex)(*args, **kwargs) + + +if PY3: + exec_ = getattr(moves.builtins, "exec") + + def reraise(tp, value, tb=None): + if value is None: + value = tp() + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + +else: + def exec_(_code_, _globs_=None, _locs_=None): + """Execute code in a namespace.""" + if _globs_ is None: + frame = sys._getframe(1) + _globs_ = frame.f_globals + if _locs_ is None: + _locs_ = frame.f_locals + del frame + elif _locs_ is None: + _locs_ = _globs_ + exec("""exec _code_ in _globs_, _locs_""") + + exec_("""def reraise(tp, value, tb=None): + raise tp, value, tb +""") + + +if sys.version_info[:2] == (3, 2): + exec_("""def raise_from(value, from_value): + if from_value is None: + raise value + raise value from from_value +""") +elif sys.version_info[:2] > (3, 2): + exec_("""def raise_from(value, from_value): + raise value from from_value +""") +else: + def raise_from(value, from_value): + raise value + + +print_ = getattr(moves.builtins, "print", None) +if print_ is None: + def print_(*args, **kwargs): + """The new-style print function for Python 2.4 and 2.5.""" + fp = kwargs.pop("file", sys.stdout) + if fp is None: + return + + def write(data): + if not isinstance(data, basestring): + data = str(data) + # If the file has an encoding, encode unicode with it. + if (isinstance(fp, file) and + isinstance(data, unicode) and + fp.encoding is not None): + errors = getattr(fp, "errors", None) + if errors is None: + errors = "strict" + data = data.encode(fp.encoding, errors) + fp.write(data) + want_unicode = False + sep = kwargs.pop("sep", None) + if sep is not None: + if isinstance(sep, unicode): + want_unicode = True + elif not isinstance(sep, str): + raise TypeError("sep must be None or a string") + end = kwargs.pop("end", None) + if end is not None: + if isinstance(end, unicode): + want_unicode = True + elif not isinstance(end, str): + raise TypeError("end must be None or a string") + if kwargs: + raise TypeError("invalid keyword arguments to print()") + if not want_unicode: + for arg in args: + if isinstance(arg, unicode): + want_unicode = True + break + if want_unicode: + newline = unicode("\n") + space = unicode(" ") + else: + newline = "\n" + space = " " + if sep is None: + sep = space + if end is None: + end = newline + for i, arg in enumerate(args): + if i: + write(sep) + write(arg) + write(end) +if sys.version_info[:2] < (3, 3): + _print = print_ + + def print_(*args, **kwargs): + fp = kwargs.get("file", sys.stdout) + flush = kwargs.pop("flush", False) + _print(*args, **kwargs) + if flush and fp is not None: + fp.flush() + +_add_doc(reraise, """Reraise an exception.""") + +if sys.version_info[0:2] < (3, 4): + def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, + updated=functools.WRAPPER_UPDATES): + def wrapper(f): + f = functools.wraps(wrapped, assigned, updated)(f) + f.__wrapped__ = wrapped + return f + return wrapper +else: + wraps = functools.wraps + + +def with_metaclass(meta, *bases): + """Create a base class with a metaclass.""" + # This requires a bit of explanation: the basic idea is to make a dummy + # metaclass for one level of class instantiation that replaces itself with + # the actual metaclass. + class metaclass(meta): + + def __new__(cls, name, this_bases, d): + return meta(name, bases, d) + return type.__new__(metaclass, 'temporary_class', (), {}) + + +def add_metaclass(metaclass): + """Class decorator for creating a class with a metaclass.""" + def wrapper(cls): + orig_vars = cls.__dict__.copy() + slots = orig_vars.get('__slots__') + if slots is not None: + if isinstance(slots, str): + slots = [slots] + for slots_var in slots: + orig_vars.pop(slots_var) + orig_vars.pop('__dict__', None) + orig_vars.pop('__weakref__', None) + return metaclass(cls.__name__, cls.__bases__, orig_vars) + return wrapper + + +def python_2_unicode_compatible(klass): + """ + A decorator that defines __unicode__ and __str__ methods under Python 2. + Under Python 3 it does nothing. + + To support Python 2 and 3 with a single code base, define a __str__ method + returning text and apply this decorator to the class. + """ + if PY2: + if '__str__' not in klass.__dict__: + raise ValueError("@python_2_unicode_compatible cannot be applied " + "to %s because it doesn't define __str__()." % + klass.__name__) + klass.__unicode__ = klass.__str__ + klass.__str__ = lambda self: self.__unicode__().encode('utf-8') + return klass + + +# Complete the moves implementation. +# This code is at the end of this module to speed up module loading. +# Turn this module into a package. +__path__ = [] # required for PEP 302 and PEP 451 +__package__ = __name__ # see PEP 366 @ReservedAssignment +if globals().get("__spec__") is not None: + __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable +# Remove other six meta path importers, since they cause problems. This can +# happen if six is removed from sys.modules and then reloaded. (Setuptools does +# this for some reason.) +if sys.meta_path: + for i, importer in enumerate(sys.meta_path): + # Here's some real nastiness: Another "instance" of the six module might + # be floating around. Therefore, we can't use isinstance() to check for + # the six meta path importer, since the other six instance will have + # inserted an importer with different class. + if (type(importer).__name__ == "_SixMetaPathImporter" and + importer.name == __name__): + del sys.meta_path[i] + break + del i, importer +# Finally, add the importer to the meta path import hook. +sys.meta_path.append(_importer) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/packages/six.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/packages/six.pyc new file mode 100644 index 0000000..41758bf Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/packages/six.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/packages/ssl_match_hostname/__init__.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/packages/ssl_match_hostname/__init__.py new file mode 100644 index 0000000..d6594eb --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/packages/ssl_match_hostname/__init__.py @@ -0,0 +1,19 @@ +import sys + +try: + # Our match_hostname function is the same as 3.5's, so we only want to + # import the match_hostname function if it's at least that good. + if sys.version_info < (3, 5): + raise ImportError("Fallback to vendored code") + + from ssl import CertificateError, match_hostname +except ImportError: + try: + # Backport of the function from a pypi module + from backports.ssl_match_hostname import CertificateError, match_hostname + except ImportError: + # Our vendored copy + from ._implementation import CertificateError, match_hostname + +# Not needed, but documenting what we provide. +__all__ = ('CertificateError', 'match_hostname') diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/packages/ssl_match_hostname/__init__.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/packages/ssl_match_hostname/__init__.pyc new file mode 100644 index 0000000..c6ae3c3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/packages/ssl_match_hostname/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/packages/ssl_match_hostname/_implementation.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/packages/ssl_match_hostname/_implementation.py new file mode 100644 index 0000000..970cf65 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/packages/ssl_match_hostname/_implementation.py @@ -0,0 +1,156 @@ +"""The match_hostname() function from Python 3.3.3, essential when using SSL.""" + +# Note: This file is under the PSF license as the code comes from the python +# stdlib. http://docs.python.org/3/license.html + +import re +import sys + +# ipaddress has been backported to 2.6+ in pypi. If it is installed on the +# system, use it to handle IPAddress ServerAltnames (this was added in +# python-3.5) otherwise only do DNS matching. This allows +# backports.ssl_match_hostname to continue to be used in Python 2.7. +try: + from pip._vendor import ipaddress +except ImportError: + ipaddress = None + +__version__ = '3.5.0.1' + + +class CertificateError(ValueError): + pass + + +def _dnsname_match(dn, hostname, max_wildcards=1): + """Matching according to RFC 6125, section 6.4.3 + + http://tools.ietf.org/html/rfc6125#section-6.4.3 + """ + pats = [] + if not dn: + return False + + # Ported from python3-syntax: + # leftmost, *remainder = dn.split(r'.') + parts = dn.split(r'.') + leftmost = parts[0] + remainder = parts[1:] + + wildcards = leftmost.count('*') + if wildcards > max_wildcards: + # Issue #17980: avoid denials of service by refusing more + # than one wildcard per fragment. A survey of established + # policy among SSL implementations showed it to be a + # reasonable choice. + raise CertificateError( + "too many wildcards in certificate DNS name: " + repr(dn)) + + # speed up common case w/o wildcards + if not wildcards: + return dn.lower() == hostname.lower() + + # RFC 6125, section 6.4.3, subitem 1. + # The client SHOULD NOT attempt to match a presented identifier in which + # the wildcard character comprises a label other than the left-most label. + if leftmost == '*': + # When '*' is a fragment by itself, it matches a non-empty dotless + # fragment. + pats.append('[^.]+') + elif leftmost.startswith('xn--') or hostname.startswith('xn--'): + # RFC 6125, section 6.4.3, subitem 3. + # The client SHOULD NOT attempt to match a presented identifier + # where the wildcard character is embedded within an A-label or + # U-label of an internationalized domain name. + pats.append(re.escape(leftmost)) + else: + # Otherwise, '*' matches any dotless string, e.g. www* + pats.append(re.escape(leftmost).replace(r'\*', '[^.]*')) + + # add the remaining fragments, ignore any wildcards + for frag in remainder: + pats.append(re.escape(frag)) + + pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) + return pat.match(hostname) + + +def _to_unicode(obj): + if isinstance(obj, str) and sys.version_info < (3,): + obj = unicode(obj, encoding='ascii', errors='strict') + return obj + +def _ipaddress_match(ipname, host_ip): + """Exact matching of IP addresses. + + RFC 6125 explicitly doesn't define an algorithm for this + (section 1.7.2 - "Out of Scope"). + """ + # OpenSSL may add a trailing newline to a subjectAltName's IP address + # Divergence from upstream: ipaddress can't handle byte str + ip = ipaddress.ip_address(_to_unicode(ipname).rstrip()) + return ip == host_ip + + +def match_hostname(cert, hostname): + """Verify that *cert* (in decoded format as returned by + SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 + rules are followed, but IP addresses are not accepted for *hostname*. + + CertificateError is raised on failure. On success, the function + returns nothing. + """ + if not cert: + raise ValueError("empty or no certificate, match_hostname needs a " + "SSL socket or SSL context with either " + "CERT_OPTIONAL or CERT_REQUIRED") + try: + # Divergence from upstream: ipaddress can't handle byte str + host_ip = ipaddress.ip_address(_to_unicode(hostname)) + except ValueError: + # Not an IP address (common case) + host_ip = None + except UnicodeError: + # Divergence from upstream: Have to deal with ipaddress not taking + # byte strings. addresses should be all ascii, so we consider it not + # an ipaddress in this case + host_ip = None + except AttributeError: + # Divergence from upstream: Make ipaddress library optional + if ipaddress is None: + host_ip = None + else: + raise + dnsnames = [] + san = cert.get('subjectAltName', ()) + for key, value in san: + if key == 'DNS': + if host_ip is None and _dnsname_match(value, hostname): + return + dnsnames.append(value) + elif key == 'IP Address': + if host_ip is not None and _ipaddress_match(value, host_ip): + return + dnsnames.append(value) + if not dnsnames: + # The subject is only checked when there is no dNSName entry + # in subjectAltName + for sub in cert.get('subject', ()): + for key, value in sub: + # XXX according to RFC 2818, the most specific Common Name + # must be used. + if key == 'commonName': + if _dnsname_match(value, hostname): + return + dnsnames.append(value) + if len(dnsnames) > 1: + raise CertificateError("hostname %r " + "doesn't match either of %s" + % (hostname, ', '.join(map(repr, dnsnames)))) + elif len(dnsnames) == 1: + raise CertificateError("hostname %r " + "doesn't match %r" + % (hostname, dnsnames[0])) + else: + raise CertificateError("no appropriate commonName or " + "subjectAltName fields were found") diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/packages/ssl_match_hostname/_implementation.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/packages/ssl_match_hostname/_implementation.pyc new file mode 100644 index 0000000..6cfcd1d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/packages/ssl_match_hostname/_implementation.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/poolmanager.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/poolmanager.py new file mode 100644 index 0000000..fe5491c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/poolmanager.py @@ -0,0 +1,450 @@ +from __future__ import absolute_import +import collections +import functools +import logging + +from ._collections import RecentlyUsedContainer +from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool +from .connectionpool import port_by_scheme +from .exceptions import LocationValueError, MaxRetryError, ProxySchemeUnknown +from .packages.six.moves.urllib.parse import urljoin +from .request import RequestMethods +from .util.url import parse_url +from .util.retry import Retry + + +__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url'] + + +log = logging.getLogger(__name__) + +SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs', + 'ssl_version', 'ca_cert_dir', 'ssl_context') + +# All known keyword arguments that could be provided to the pool manager, its +# pools, or the underlying connections. This is used to construct a pool key. +_key_fields = ( + 'key_scheme', # str + 'key_host', # str + 'key_port', # int + 'key_timeout', # int or float or Timeout + 'key_retries', # int or Retry + 'key_strict', # bool + 'key_block', # bool + 'key_source_address', # str + 'key_key_file', # str + 'key_cert_file', # str + 'key_cert_reqs', # str + 'key_ca_certs', # str + 'key_ssl_version', # str + 'key_ca_cert_dir', # str + 'key_ssl_context', # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext + 'key_maxsize', # int + 'key_headers', # dict + 'key__proxy', # parsed proxy url + 'key__proxy_headers', # dict + 'key_socket_options', # list of (level (int), optname (int), value (int or str)) tuples + 'key__socks_options', # dict + 'key_assert_hostname', # bool or string + 'key_assert_fingerprint', # str + 'key_server_hostname', #str +) + +#: The namedtuple class used to construct keys for the connection pool. +#: All custom key schemes should include the fields in this key at a minimum. +PoolKey = collections.namedtuple('PoolKey', _key_fields) + + +def _default_key_normalizer(key_class, request_context): + """ + Create a pool key out of a request context dictionary. + + According to RFC 3986, both the scheme and host are case-insensitive. + Therefore, this function normalizes both before constructing the pool + key for an HTTPS request. If you wish to change this behaviour, provide + alternate callables to ``key_fn_by_scheme``. + + :param key_class: + The class to use when constructing the key. This should be a namedtuple + with the ``scheme`` and ``host`` keys at a minimum. + :type key_class: namedtuple + :param request_context: + A dictionary-like object that contain the context for a request. + :type request_context: dict + + :return: A namedtuple that can be used as a connection pool key. + :rtype: PoolKey + """ + # Since we mutate the dictionary, make a copy first + context = request_context.copy() + context['scheme'] = context['scheme'].lower() + context['host'] = context['host'].lower() + + # These are both dictionaries and need to be transformed into frozensets + for key in ('headers', '_proxy_headers', '_socks_options'): + if key in context and context[key] is not None: + context[key] = frozenset(context[key].items()) + + # The socket_options key may be a list and needs to be transformed into a + # tuple. + socket_opts = context.get('socket_options') + if socket_opts is not None: + context['socket_options'] = tuple(socket_opts) + + # Map the kwargs to the names in the namedtuple - this is necessary since + # namedtuples can't have fields starting with '_'. + for key in list(context.keys()): + context['key_' + key] = context.pop(key) + + # Default to ``None`` for keys missing from the context + for field in key_class._fields: + if field not in context: + context[field] = None + + return key_class(**context) + + +#: A dictionary that maps a scheme to a callable that creates a pool key. +#: This can be used to alter the way pool keys are constructed, if desired. +#: Each PoolManager makes a copy of this dictionary so they can be configured +#: globally here, or individually on the instance. +key_fn_by_scheme = { + 'http': functools.partial(_default_key_normalizer, PoolKey), + 'https': functools.partial(_default_key_normalizer, PoolKey), +} + +pool_classes_by_scheme = { + 'http': HTTPConnectionPool, + 'https': HTTPSConnectionPool, +} + + +class PoolManager(RequestMethods): + """ + Allows for arbitrary requests while transparently keeping track of + necessary connection pools for you. + + :param num_pools: + Number of connection pools to cache before discarding the least + recently used pool. + + :param headers: + Headers to include with all requests, unless other headers are given + explicitly. + + :param \\**connection_pool_kw: + Additional parameters are used to create fresh + :class:`urllib3.connectionpool.ConnectionPool` instances. + + Example:: + + >>> manager = PoolManager(num_pools=2) + >>> r = manager.request('GET', 'http://google.com/') + >>> r = manager.request('GET', 'http://google.com/mail') + >>> r = manager.request('GET', 'http://yahoo.com/') + >>> len(manager.pools) + 2 + + """ + + proxy = None + + def __init__(self, num_pools=10, headers=None, **connection_pool_kw): + RequestMethods.__init__(self, headers) + self.connection_pool_kw = connection_pool_kw + self.pools = RecentlyUsedContainer(num_pools, + dispose_func=lambda p: p.close()) + + # Locally set the pool classes and keys so other PoolManagers can + # override them. + self.pool_classes_by_scheme = pool_classes_by_scheme + self.key_fn_by_scheme = key_fn_by_scheme.copy() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.clear() + # Return False to re-raise any potential exceptions + return False + + def _new_pool(self, scheme, host, port, request_context=None): + """ + Create a new :class:`ConnectionPool` based on host, port, scheme, and + any additional pool keyword arguments. + + If ``request_context`` is provided, it is provided as keyword arguments + to the pool class used. This method is used to actually create the + connection pools handed out by :meth:`connection_from_url` and + companion methods. It is intended to be overridden for customization. + """ + pool_cls = self.pool_classes_by_scheme[scheme] + if request_context is None: + request_context = self.connection_pool_kw.copy() + + # Although the context has everything necessary to create the pool, + # this function has historically only used the scheme, host, and port + # in the positional args. When an API change is acceptable these can + # be removed. + for key in ('scheme', 'host', 'port'): + request_context.pop(key, None) + + if scheme == 'http': + for kw in SSL_KEYWORDS: + request_context.pop(kw, None) + + return pool_cls(host, port, **request_context) + + def clear(self): + """ + Empty our store of pools and direct them all to close. + + This will not affect in-flight connections, but they will not be + re-used after completion. + """ + self.pools.clear() + + def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None): + """ + Get a :class:`ConnectionPool` based on the host, port, and scheme. + + If ``port`` isn't given, it will be derived from the ``scheme`` using + ``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is + provided, it is merged with the instance's ``connection_pool_kw`` + variable and used to create the new connection pool, if one is + needed. + """ + + if not host: + raise LocationValueError("No host specified.") + + request_context = self._merge_pool_kwargs(pool_kwargs) + request_context['scheme'] = scheme or 'http' + if not port: + port = port_by_scheme.get(request_context['scheme'].lower(), 80) + request_context['port'] = port + request_context['host'] = host + + return self.connection_from_context(request_context) + + def connection_from_context(self, request_context): + """ + Get a :class:`ConnectionPool` based on the request context. + + ``request_context`` must at least contain the ``scheme`` key and its + value must be a key in ``key_fn_by_scheme`` instance variable. + """ + scheme = request_context['scheme'].lower() + pool_key_constructor = self.key_fn_by_scheme[scheme] + pool_key = pool_key_constructor(request_context) + + return self.connection_from_pool_key(pool_key, request_context=request_context) + + def connection_from_pool_key(self, pool_key, request_context=None): + """ + Get a :class:`ConnectionPool` based on the provided pool key. + + ``pool_key`` should be a namedtuple that only contains immutable + objects. At a minimum it must have the ``scheme``, ``host``, and + ``port`` fields. + """ + with self.pools.lock: + # If the scheme, host, or port doesn't match existing open + # connections, open a new ConnectionPool. + pool = self.pools.get(pool_key) + if pool: + return pool + + # Make a fresh ConnectionPool of the desired type + scheme = request_context['scheme'] + host = request_context['host'] + port = request_context['port'] + pool = self._new_pool(scheme, host, port, request_context=request_context) + self.pools[pool_key] = pool + + return pool + + def connection_from_url(self, url, pool_kwargs=None): + """ + Similar to :func:`urllib3.connectionpool.connection_from_url`. + + If ``pool_kwargs`` is not provided and a new pool needs to be + constructed, ``self.connection_pool_kw`` is used to initialize + the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs`` + is provided, it is used instead. Note that if a new pool does not + need to be created for the request, the provided ``pool_kwargs`` are + not used. + """ + u = parse_url(url) + return self.connection_from_host(u.host, port=u.port, scheme=u.scheme, + pool_kwargs=pool_kwargs) + + def _merge_pool_kwargs(self, override): + """ + Merge a dictionary of override values for self.connection_pool_kw. + + This does not modify self.connection_pool_kw and returns a new dict. + Any keys in the override dictionary with a value of ``None`` are + removed from the merged dictionary. + """ + base_pool_kwargs = self.connection_pool_kw.copy() + if override: + for key, value in override.items(): + if value is None: + try: + del base_pool_kwargs[key] + except KeyError: + pass + else: + base_pool_kwargs[key] = value + return base_pool_kwargs + + def urlopen(self, method, url, redirect=True, **kw): + """ + Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen` + with custom cross-host redirect logic and only sends the request-uri + portion of the ``url``. + + The given ``url`` parameter must be absolute, such that an appropriate + :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it. + """ + u = parse_url(url) + conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme) + + kw['assert_same_host'] = False + kw['redirect'] = False + + if 'headers' not in kw: + kw['headers'] = self.headers.copy() + + if self.proxy is not None and u.scheme == "http": + response = conn.urlopen(method, url, **kw) + else: + response = conn.urlopen(method, u.request_uri, **kw) + + redirect_location = redirect and response.get_redirect_location() + if not redirect_location: + return response + + # Support relative URLs for redirecting. + redirect_location = urljoin(url, redirect_location) + + # RFC 7231, Section 6.4.4 + if response.status == 303: + method = 'GET' + + retries = kw.get('retries') + if not isinstance(retries, Retry): + retries = Retry.from_int(retries, redirect=redirect) + + # Strip headers marked as unsafe to forward to the redirected location. + # Check remove_headers_on_redirect to avoid a potential network call within + # conn.is_same_host() which may use socket.gethostbyname() in the future. + if (retries.remove_headers_on_redirect + and not conn.is_same_host(redirect_location)): + for header in retries.remove_headers_on_redirect: + kw['headers'].pop(header, None) + + try: + retries = retries.increment(method, url, response=response, _pool=conn) + except MaxRetryError: + if retries.raise_on_redirect: + raise + return response + + kw['retries'] = retries + kw['redirect'] = redirect + + log.info("Redirecting %s -> %s", url, redirect_location) + return self.urlopen(method, redirect_location, **kw) + + +class ProxyManager(PoolManager): + """ + Behaves just like :class:`PoolManager`, but sends all requests through + the defined proxy, using the CONNECT method for HTTPS URLs. + + :param proxy_url: + The URL of the proxy to be used. + + :param proxy_headers: + A dictionary containing headers that will be sent to the proxy. In case + of HTTP they are being sent with each request, while in the + HTTPS/CONNECT case they are sent only once. Could be used for proxy + authentication. + + Example: + >>> proxy = urllib3.ProxyManager('http://localhost:3128/') + >>> r1 = proxy.request('GET', 'http://google.com/') + >>> r2 = proxy.request('GET', 'http://httpbin.org/') + >>> len(proxy.pools) + 1 + >>> r3 = proxy.request('GET', 'https://httpbin.org/') + >>> r4 = proxy.request('GET', 'https://twitter.com/') + >>> len(proxy.pools) + 3 + + """ + + def __init__(self, proxy_url, num_pools=10, headers=None, + proxy_headers=None, **connection_pool_kw): + + if isinstance(proxy_url, HTTPConnectionPool): + proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host, + proxy_url.port) + proxy = parse_url(proxy_url) + if not proxy.port: + port = port_by_scheme.get(proxy.scheme, 80) + proxy = proxy._replace(port=port) + + if proxy.scheme not in ("http", "https"): + raise ProxySchemeUnknown(proxy.scheme) + + self.proxy = proxy + self.proxy_headers = proxy_headers or {} + + connection_pool_kw['_proxy'] = self.proxy + connection_pool_kw['_proxy_headers'] = self.proxy_headers + + super(ProxyManager, self).__init__( + num_pools, headers, **connection_pool_kw) + + def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None): + if scheme == "https": + return super(ProxyManager, self).connection_from_host( + host, port, scheme, pool_kwargs=pool_kwargs) + + return super(ProxyManager, self).connection_from_host( + self.proxy.host, self.proxy.port, self.proxy.scheme, pool_kwargs=pool_kwargs) + + def _set_proxy_headers(self, url, headers=None): + """ + Sets headers needed by proxies: specifically, the Accept and Host + headers. Only sets headers not provided by the user. + """ + headers_ = {'Accept': '*/*'} + + netloc = parse_url(url).netloc + if netloc: + headers_['Host'] = netloc + + if headers: + headers_.update(headers) + return headers_ + + def urlopen(self, method, url, redirect=True, **kw): + "Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute." + u = parse_url(url) + + if u.scheme == "http": + # For proxied HTTPS requests, httplib sets the necessary headers + # on the CONNECT to the proxy. For HTTP, we'll definitely + # need to set 'Host' at the very least. + headers = kw.get('headers', self.headers) + kw['headers'] = self._set_proxy_headers(url, headers) + + return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw) + + +def proxy_from_url(url, **kw): + return ProxyManager(proxy_url=url, **kw) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/poolmanager.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/poolmanager.pyc new file mode 100644 index 0000000..0730aa2 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/poolmanager.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/request.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/request.py new file mode 100644 index 0000000..8f2f44b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/request.py @@ -0,0 +1,150 @@ +from __future__ import absolute_import + +from .filepost import encode_multipart_formdata +from .packages.six.moves.urllib.parse import urlencode + + +__all__ = ['RequestMethods'] + + +class RequestMethods(object): + """ + Convenience mixin for classes who implement a :meth:`urlopen` method, such + as :class:`~urllib3.connectionpool.HTTPConnectionPool` and + :class:`~urllib3.poolmanager.PoolManager`. + + Provides behavior for making common types of HTTP request methods and + decides which type of request field encoding to use. + + Specifically, + + :meth:`.request_encode_url` is for sending requests whose fields are + encoded in the URL (such as GET, HEAD, DELETE). + + :meth:`.request_encode_body` is for sending requests whose fields are + encoded in the *body* of the request using multipart or www-form-urlencoded + (such as for POST, PUT, PATCH). + + :meth:`.request` is for making any kind of request, it will look up the + appropriate encoding format and use one of the above two methods to make + the request. + + Initializer parameters: + + :param headers: + Headers to include with all requests, unless other headers are given + explicitly. + """ + + _encode_url_methods = {'DELETE', 'GET', 'HEAD', 'OPTIONS'} + + def __init__(self, headers=None): + self.headers = headers or {} + + def urlopen(self, method, url, body=None, headers=None, + encode_multipart=True, multipart_boundary=None, + **kw): # Abstract + raise NotImplementedError("Classes extending RequestMethods must implement " + "their own ``urlopen`` method.") + + def request(self, method, url, fields=None, headers=None, **urlopen_kw): + """ + Make a request using :meth:`urlopen` with the appropriate encoding of + ``fields`` based on the ``method`` used. + + This is a convenience method that requires the least amount of manual + effort. It can be used in most situations, while still having the + option to drop down to more specific methods when necessary, such as + :meth:`request_encode_url`, :meth:`request_encode_body`, + or even the lowest level :meth:`urlopen`. + """ + method = method.upper() + + urlopen_kw['request_url'] = url + + if method in self._encode_url_methods: + return self.request_encode_url(method, url, fields=fields, + headers=headers, + **urlopen_kw) + else: + return self.request_encode_body(method, url, fields=fields, + headers=headers, + **urlopen_kw) + + def request_encode_url(self, method, url, fields=None, headers=None, + **urlopen_kw): + """ + Make a request using :meth:`urlopen` with the ``fields`` encoded in + the url. This is useful for request methods like GET, HEAD, DELETE, etc. + """ + if headers is None: + headers = self.headers + + extra_kw = {'headers': headers} + extra_kw.update(urlopen_kw) + + if fields: + url += '?' + urlencode(fields) + + return self.urlopen(method, url, **extra_kw) + + def request_encode_body(self, method, url, fields=None, headers=None, + encode_multipart=True, multipart_boundary=None, + **urlopen_kw): + """ + Make a request using :meth:`urlopen` with the ``fields`` encoded in + the body. This is useful for request methods like POST, PUT, PATCH, etc. + + When ``encode_multipart=True`` (default), then + :meth:`urllib3.filepost.encode_multipart_formdata` is used to encode + the payload with the appropriate content type. Otherwise + :meth:`urllib.urlencode` is used with the + 'application/x-www-form-urlencoded' content type. + + Multipart encoding must be used when posting files, and it's reasonably + safe to use it in other times too. However, it may break request + signing, such as with OAuth. + + Supports an optional ``fields`` parameter of key/value strings AND + key/filetuple. A filetuple is a (filename, data, MIME type) tuple where + the MIME type is optional. For example:: + + fields = { + 'foo': 'bar', + 'fakefile': ('foofile.txt', 'contents of foofile'), + 'realfile': ('barfile.txt', open('realfile').read()), + 'typedfile': ('bazfile.bin', open('bazfile').read(), + 'image/jpeg'), + 'nonamefile': 'contents of nonamefile field', + } + + When uploading a file, providing a filename (the first parameter of the + tuple) is optional but recommended to best mimic behavior of browsers. + + Note that if ``headers`` are supplied, the 'Content-Type' header will + be overwritten because it depends on the dynamic random boundary string + which is used to compose the body of the request. The random boundary + string can be explicitly set with the ``multipart_boundary`` parameter. + """ + if headers is None: + headers = self.headers + + extra_kw = {'headers': {}} + + if fields: + if 'body' in urlopen_kw: + raise TypeError( + "request got values for both 'fields' and 'body', can only specify one.") + + if encode_multipart: + body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary) + else: + body, content_type = urlencode(fields), 'application/x-www-form-urlencoded' + + extra_kw['body'] = body + extra_kw['headers'] = {'Content-Type': content_type} + + extra_kw['headers'].update(headers) + extra_kw.update(urlopen_kw) + + return self.urlopen(method, url, **extra_kw) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/request.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/request.pyc new file mode 100644 index 0000000..0cad2bb Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/request.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/response.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/response.py new file mode 100644 index 0000000..c112690 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/response.py @@ -0,0 +1,705 @@ +from __future__ import absolute_import +from contextlib import contextmanager +import zlib +import io +import logging +from socket import timeout as SocketTimeout +from socket import error as SocketError + +from ._collections import HTTPHeaderDict +from .exceptions import ( + BodyNotHttplibCompatible, ProtocolError, DecodeError, ReadTimeoutError, + ResponseNotChunked, IncompleteRead, InvalidHeader +) +from .packages.six import string_types as basestring, PY3 +from .packages.six.moves import http_client as httplib +from .connection import HTTPException, BaseSSLError +from .util.response import is_fp_closed, is_response_to_head + +log = logging.getLogger(__name__) + + +class DeflateDecoder(object): + + def __init__(self): + self._first_try = True + self._data = b'' + self._obj = zlib.decompressobj() + + def __getattr__(self, name): + return getattr(self._obj, name) + + def decompress(self, data): + if not data: + return data + + if not self._first_try: + return self._obj.decompress(data) + + self._data += data + try: + decompressed = self._obj.decompress(data) + if decompressed: + self._first_try = False + self._data = None + return decompressed + except zlib.error: + self._first_try = False + self._obj = zlib.decompressobj(-zlib.MAX_WBITS) + try: + return self.decompress(self._data) + finally: + self._data = None + + +class GzipDecoderState(object): + + FIRST_MEMBER = 0 + OTHER_MEMBERS = 1 + SWALLOW_DATA = 2 + + +class GzipDecoder(object): + + def __init__(self): + self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS) + self._state = GzipDecoderState.FIRST_MEMBER + + def __getattr__(self, name): + return getattr(self._obj, name) + + def decompress(self, data): + ret = bytearray() + if self._state == GzipDecoderState.SWALLOW_DATA or not data: + return bytes(ret) + while True: + try: + ret += self._obj.decompress(data) + except zlib.error: + previous_state = self._state + # Ignore data after the first error + self._state = GzipDecoderState.SWALLOW_DATA + if previous_state == GzipDecoderState.OTHER_MEMBERS: + # Allow trailing garbage acceptable in other gzip clients + return bytes(ret) + raise + data = self._obj.unused_data + if not data: + return bytes(ret) + self._state = GzipDecoderState.OTHER_MEMBERS + self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS) + + +class MultiDecoder(object): + """ + From RFC7231: + If one or more encodings have been applied to a representation, the + sender that applied the encodings MUST generate a Content-Encoding + header field that lists the content codings in the order in which + they were applied. + """ + + def __init__(self, modes): + self._decoders = [_get_decoder(m.strip()) for m in modes.split(',')] + + def flush(self): + return self._decoders[0].flush() + + def decompress(self, data): + for d in reversed(self._decoders): + data = d.decompress(data) + return data + + +def _get_decoder(mode): + if ',' in mode: + return MultiDecoder(mode) + + if mode == 'gzip': + return GzipDecoder() + + return DeflateDecoder() + + +class HTTPResponse(io.IOBase): + """ + HTTP Response container. + + Backwards-compatible to httplib's HTTPResponse but the response ``body`` is + loaded and decoded on-demand when the ``data`` property is accessed. This + class is also compatible with the Python standard library's :mod:`io` + module, and can hence be treated as a readable object in the context of that + framework. + + Extra parameters for behaviour not present in httplib.HTTPResponse: + + :param preload_content: + If True, the response's body will be preloaded during construction. + + :param decode_content: + If True, will attempt to decode the body based on the + 'content-encoding' header. + + :param original_response: + When this HTTPResponse wrapper is generated from an httplib.HTTPResponse + object, it's convenient to include the original for debug purposes. It's + otherwise unused. + + :param retries: + The retries contains the last :class:`~urllib3.util.retry.Retry` that + was used during the request. + + :param enforce_content_length: + Enforce content length checking. Body returned by server must match + value of Content-Length header, if present. Otherwise, raise error. + """ + + CONTENT_DECODERS = ['gzip', 'deflate'] + REDIRECT_STATUSES = [301, 302, 303, 307, 308] + + def __init__(self, body='', headers=None, status=0, version=0, reason=None, + strict=0, preload_content=True, decode_content=True, + original_response=None, pool=None, connection=None, msg=None, + retries=None, enforce_content_length=False, + request_method=None, request_url=None): + + if isinstance(headers, HTTPHeaderDict): + self.headers = headers + else: + self.headers = HTTPHeaderDict(headers) + self.status = status + self.version = version + self.reason = reason + self.strict = strict + self.decode_content = decode_content + self.retries = retries + self.enforce_content_length = enforce_content_length + + self._decoder = None + self._body = None + self._fp = None + self._original_response = original_response + self._fp_bytes_read = 0 + self.msg = msg + self._request_url = request_url + + if body and isinstance(body, (basestring, bytes)): + self._body = body + + self._pool = pool + self._connection = connection + + if hasattr(body, 'read'): + self._fp = body + + # Are we using the chunked-style of transfer encoding? + self.chunked = False + self.chunk_left = None + tr_enc = self.headers.get('transfer-encoding', '').lower() + # Don't incur the penalty of creating a list and then discarding it + encodings = (enc.strip() for enc in tr_enc.split(",")) + if "chunked" in encodings: + self.chunked = True + + # Determine length of response + self.length_remaining = self._init_length(request_method) + + # If requested, preload the body. + if preload_content and not self._body: + self._body = self.read(decode_content=decode_content) + + def get_redirect_location(self): + """ + Should we redirect and where to? + + :returns: Truthy redirect location string if we got a redirect status + code and valid location. ``None`` if redirect status and no + location. ``False`` if not a redirect status code. + """ + if self.status in self.REDIRECT_STATUSES: + return self.headers.get('location') + + return False + + def release_conn(self): + if not self._pool or not self._connection: + return + + self._pool._put_conn(self._connection) + self._connection = None + + @property + def data(self): + # For backwords-compat with earlier urllib3 0.4 and earlier. + if self._body: + return self._body + + if self._fp: + return self.read(cache_content=True) + + @property + def connection(self): + return self._connection + + def isclosed(self): + return is_fp_closed(self._fp) + + def tell(self): + """ + Obtain the number of bytes pulled over the wire so far. May differ from + the amount of content returned by :meth:``HTTPResponse.read`` if bytes + are encoded on the wire (e.g, compressed). + """ + return self._fp_bytes_read + + def _init_length(self, request_method): + """ + Set initial length value for Response content if available. + """ + length = self.headers.get('content-length') + + if length is not None: + if self.chunked: + # This Response will fail with an IncompleteRead if it can't be + # received as chunked. This method falls back to attempt reading + # the response before raising an exception. + log.warning("Received response with both Content-Length and " + "Transfer-Encoding set. This is expressly forbidden " + "by RFC 7230 sec 3.3.2. Ignoring Content-Length and " + "attempting to process response as Transfer-Encoding: " + "chunked.") + return None + + try: + # RFC 7230 section 3.3.2 specifies multiple content lengths can + # be sent in a single Content-Length header + # (e.g. Content-Length: 42, 42). This line ensures the values + # are all valid ints and that as long as the `set` length is 1, + # all values are the same. Otherwise, the header is invalid. + lengths = set([int(val) for val in length.split(',')]) + if len(lengths) > 1: + raise InvalidHeader("Content-Length contained multiple " + "unmatching values (%s)" % length) + length = lengths.pop() + except ValueError: + length = None + else: + if length < 0: + length = None + + # Convert status to int for comparison + # In some cases, httplib returns a status of "_UNKNOWN" + try: + status = int(self.status) + except ValueError: + status = 0 + + # Check for responses that shouldn't include a body + if status in (204, 304) or 100 <= status < 200 or request_method == 'HEAD': + length = 0 + + return length + + def _init_decoder(self): + """ + Set-up the _decoder attribute if necessary. + """ + # Note: content-encoding value should be case-insensitive, per RFC 7230 + # Section 3.2 + content_encoding = self.headers.get('content-encoding', '').lower() + if self._decoder is None: + if content_encoding in self.CONTENT_DECODERS: + self._decoder = _get_decoder(content_encoding) + elif ',' in content_encoding: + encodings = [e.strip() for e in content_encoding.split(',') if e.strip() in self.CONTENT_DECODERS] + if len(encodings): + self._decoder = _get_decoder(content_encoding) + + def _decode(self, data, decode_content, flush_decoder): + """ + Decode the data passed in and potentially flush the decoder. + """ + try: + if decode_content and self._decoder: + data = self._decoder.decompress(data) + except (IOError, zlib.error) as e: + content_encoding = self.headers.get('content-encoding', '').lower() + raise DecodeError( + "Received response with content-encoding: %s, but " + "failed to decode it." % content_encoding, e) + + if flush_decoder and decode_content: + data += self._flush_decoder() + + return data + + def _flush_decoder(self): + """ + Flushes the decoder. Should only be called if the decoder is actually + being used. + """ + if self._decoder: + buf = self._decoder.decompress(b'') + return buf + self._decoder.flush() + + return b'' + + @contextmanager + def _error_catcher(self): + """ + Catch low-level python exceptions, instead re-raising urllib3 + variants, so that low-level exceptions are not leaked in the + high-level api. + + On exit, release the connection back to the pool. + """ + clean_exit = False + + try: + try: + yield + + except SocketTimeout: + # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but + # there is yet no clean way to get at it from this context. + raise ReadTimeoutError(self._pool, None, 'Read timed out.') + + except BaseSSLError as e: + # FIXME: Is there a better way to differentiate between SSLErrors? + if 'read operation timed out' not in str(e): # Defensive: + # This shouldn't happen but just in case we're missing an edge + # case, let's avoid swallowing SSL errors. + raise + + raise ReadTimeoutError(self._pool, None, 'Read timed out.') + + except (HTTPException, SocketError) as e: + # This includes IncompleteRead. + raise ProtocolError('Connection broken: %r' % e, e) + + # If no exception is thrown, we should avoid cleaning up + # unnecessarily. + clean_exit = True + finally: + # If we didn't terminate cleanly, we need to throw away our + # connection. + if not clean_exit: + # The response may not be closed but we're not going to use it + # anymore so close it now to ensure that the connection is + # released back to the pool. + if self._original_response: + self._original_response.close() + + # Closing the response may not actually be sufficient to close + # everything, so if we have a hold of the connection close that + # too. + if self._connection: + self._connection.close() + + # If we hold the original response but it's closed now, we should + # return the connection back to the pool. + if self._original_response and self._original_response.isclosed(): + self.release_conn() + + def read(self, amt=None, decode_content=None, cache_content=False): + """ + Similar to :meth:`httplib.HTTPResponse.read`, but with two additional + parameters: ``decode_content`` and ``cache_content``. + + :param amt: + How much of the content to read. If specified, caching is skipped + because it doesn't make sense to cache partial content as the full + response. + + :param decode_content: + If True, will attempt to decode the body based on the + 'content-encoding' header. + + :param cache_content: + If True, will save the returned data such that the same result is + returned despite of the state of the underlying file object. This + is useful if you want the ``.data`` property to continue working + after having ``.read()`` the file object. (Overridden if ``amt`` is + set.) + """ + self._init_decoder() + if decode_content is None: + decode_content = self.decode_content + + if self._fp is None: + return + + flush_decoder = False + data = None + + with self._error_catcher(): + if amt is None: + # cStringIO doesn't like amt=None + data = self._fp.read() + flush_decoder = True + else: + cache_content = False + data = self._fp.read(amt) + if amt != 0 and not data: # Platform-specific: Buggy versions of Python. + # Close the connection when no data is returned + # + # This is redundant to what httplib/http.client _should_ + # already do. However, versions of python released before + # December 15, 2012 (http://bugs.python.org/issue16298) do + # not properly close the connection in all cases. There is + # no harm in redundantly calling close. + self._fp.close() + flush_decoder = True + if self.enforce_content_length and self.length_remaining not in (0, None): + # This is an edge case that httplib failed to cover due + # to concerns of backward compatibility. We're + # addressing it here to make sure IncompleteRead is + # raised during streaming, so all calls with incorrect + # Content-Length are caught. + raise IncompleteRead(self._fp_bytes_read, self.length_remaining) + + if data: + self._fp_bytes_read += len(data) + if self.length_remaining is not None: + self.length_remaining -= len(data) + + data = self._decode(data, decode_content, flush_decoder) + + if cache_content: + self._body = data + + return data + + def stream(self, amt=2**16, decode_content=None): + """ + A generator wrapper for the read() method. A call will block until + ``amt`` bytes have been read from the connection or until the + connection is closed. + + :param amt: + How much of the content to read. The generator will return up to + much data per iteration, but may return less. This is particularly + likely when using compressed data. However, the empty string will + never be returned. + + :param decode_content: + If True, will attempt to decode the body based on the + 'content-encoding' header. + """ + if self.chunked and self.supports_chunked_reads(): + for line in self.read_chunked(amt, decode_content=decode_content): + yield line + else: + while not is_fp_closed(self._fp): + data = self.read(amt=amt, decode_content=decode_content) + + if data: + yield data + + @classmethod + def from_httplib(ResponseCls, r, **response_kw): + """ + Given an :class:`httplib.HTTPResponse` instance ``r``, return a + corresponding :class:`urllib3.response.HTTPResponse` object. + + Remaining parameters are passed to the HTTPResponse constructor, along + with ``original_response=r``. + """ + headers = r.msg + + if not isinstance(headers, HTTPHeaderDict): + if PY3: # Python 3 + headers = HTTPHeaderDict(headers.items()) + else: # Python 2 + headers = HTTPHeaderDict.from_httplib(headers) + + # HTTPResponse objects in Python 3 don't have a .strict attribute + strict = getattr(r, 'strict', 0) + resp = ResponseCls(body=r, + headers=headers, + status=r.status, + version=r.version, + reason=r.reason, + strict=strict, + original_response=r, + **response_kw) + return resp + + # Backwards-compatibility methods for httplib.HTTPResponse + def getheaders(self): + return self.headers + + def getheader(self, name, default=None): + return self.headers.get(name, default) + + # Backwards compatibility for http.cookiejar + def info(self): + return self.headers + + # Overrides from io.IOBase + def close(self): + if not self.closed: + self._fp.close() + + if self._connection: + self._connection.close() + + @property + def closed(self): + if self._fp is None: + return True + elif hasattr(self._fp, 'isclosed'): + return self._fp.isclosed() + elif hasattr(self._fp, 'closed'): + return self._fp.closed + else: + return True + + def fileno(self): + if self._fp is None: + raise IOError("HTTPResponse has no file to get a fileno from") + elif hasattr(self._fp, "fileno"): + return self._fp.fileno() + else: + raise IOError("The file-like object this HTTPResponse is wrapped " + "around has no file descriptor") + + def flush(self): + if self._fp is not None and hasattr(self._fp, 'flush'): + return self._fp.flush() + + def readable(self): + # This method is required for `io` module compatibility. + return True + + def readinto(self, b): + # This method is required for `io` module compatibility. + temp = self.read(len(b)) + if len(temp) == 0: + return 0 + else: + b[:len(temp)] = temp + return len(temp) + + def supports_chunked_reads(self): + """ + Checks if the underlying file-like object looks like a + httplib.HTTPResponse object. We do this by testing for the fp + attribute. If it is present we assume it returns raw chunks as + processed by read_chunked(). + """ + return hasattr(self._fp, 'fp') + + def _update_chunk_length(self): + # First, we'll figure out length of a chunk and then + # we'll try to read it from socket. + if self.chunk_left is not None: + return + line = self._fp.fp.readline() + line = line.split(b';', 1)[0] + try: + self.chunk_left = int(line, 16) + except ValueError: + # Invalid chunked protocol response, abort. + self.close() + raise httplib.IncompleteRead(line) + + def _handle_chunk(self, amt): + returned_chunk = None + if amt is None: + chunk = self._fp._safe_read(self.chunk_left) + returned_chunk = chunk + self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. + self.chunk_left = None + elif amt < self.chunk_left: + value = self._fp._safe_read(amt) + self.chunk_left = self.chunk_left - amt + returned_chunk = value + elif amt == self.chunk_left: + value = self._fp._safe_read(amt) + self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. + self.chunk_left = None + returned_chunk = value + else: # amt > self.chunk_left + returned_chunk = self._fp._safe_read(self.chunk_left) + self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. + self.chunk_left = None + return returned_chunk + + def read_chunked(self, amt=None, decode_content=None): + """ + Similar to :meth:`HTTPResponse.read`, but with an additional + parameter: ``decode_content``. + + :param amt: + How much of the content to read. If specified, caching is skipped + because it doesn't make sense to cache partial content as the full + response. + + :param decode_content: + If True, will attempt to decode the body based on the + 'content-encoding' header. + """ + self._init_decoder() + # FIXME: Rewrite this method and make it a class with a better structured logic. + if not self.chunked: + raise ResponseNotChunked( + "Response is not chunked. " + "Header 'transfer-encoding: chunked' is missing.") + if not self.supports_chunked_reads(): + raise BodyNotHttplibCompatible( + "Body should be httplib.HTTPResponse like. " + "It should have have an fp attribute which returns raw chunks.") + + with self._error_catcher(): + # Don't bother reading the body of a HEAD request. + if self._original_response and is_response_to_head(self._original_response): + self._original_response.close() + return + + # If a response is already read and closed + # then return immediately. + if self._fp.fp is None: + return + + while True: + self._update_chunk_length() + if self.chunk_left == 0: + break + chunk = self._handle_chunk(amt) + decoded = self._decode(chunk, decode_content=decode_content, + flush_decoder=False) + if decoded: + yield decoded + + if decode_content: + # On CPython and PyPy, we should never need to flush the + # decoder. However, on Jython we *might* need to, so + # lets defensively do it anyway. + decoded = self._flush_decoder() + if decoded: # Platform-specific: Jython. + yield decoded + + # Chunk content ends with \r\n: discard it. + while True: + line = self._fp.fp.readline() + if not line: + # Some sites may not end with '\r\n'. + break + if line == b'\r\n': + break + + # We read everything; close the "file". + if self._original_response: + self._original_response.close() + + def geturl(self): + """ + Returns the URL that was the source of this response. + If the request that generated this response redirected, this method + will return the final redirect location. + """ + if self.retries is not None and len(self.retries.history): + return self.retries.history[-1].redirect_location + else: + return self._request_url diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/response.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/response.pyc new file mode 100644 index 0000000..0e8ade8 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/response.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/__init__.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/__init__.py new file mode 100644 index 0000000..2f2770b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/__init__.py @@ -0,0 +1,54 @@ +from __future__ import absolute_import +# For backwards compatibility, provide imports that used to be here. +from .connection import is_connection_dropped +from .request import make_headers +from .response import is_fp_closed +from .ssl_ import ( + SSLContext, + HAS_SNI, + IS_PYOPENSSL, + IS_SECURETRANSPORT, + assert_fingerprint, + resolve_cert_reqs, + resolve_ssl_version, + ssl_wrap_socket, +) +from .timeout import ( + current_time, + Timeout, +) + +from .retry import Retry +from .url import ( + get_host, + parse_url, + split_first, + Url, +) +from .wait import ( + wait_for_read, + wait_for_write +) + +__all__ = ( + 'HAS_SNI', + 'IS_PYOPENSSL', + 'IS_SECURETRANSPORT', + 'SSLContext', + 'Retry', + 'Timeout', + 'Url', + 'assert_fingerprint', + 'current_time', + 'is_connection_dropped', + 'is_fp_closed', + 'get_host', + 'parse_url', + 'make_headers', + 'resolve_cert_reqs', + 'resolve_ssl_version', + 'split_first', + 'ssl_wrap_socket', + 'wait_for_read', + 'wait_for_write' +) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/__init__.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/__init__.pyc new file mode 100644 index 0000000..b848289 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/connection.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/connection.py new file mode 100644 index 0000000..5ad70b2 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/connection.py @@ -0,0 +1,134 @@ +from __future__ import absolute_import +import socket +from .wait import NoWayToWaitForSocketError, wait_for_read +from ..contrib import _appengine_environ + + +def is_connection_dropped(conn): # Platform-specific + """ + Returns True if the connection is dropped and should be closed. + + :param conn: + :class:`httplib.HTTPConnection` object. + + Note: For platforms like AppEngine, this will always return ``False`` to + let the platform handle connection recycling transparently for us. + """ + sock = getattr(conn, 'sock', False) + if sock is False: # Platform-specific: AppEngine + return False + if sock is None: # Connection already closed (such as by httplib). + return True + try: + # Returns True if readable, which here means it's been dropped + return wait_for_read(sock, timeout=0.0) + except NoWayToWaitForSocketError: # Platform-specific: AppEngine + return False + + +# This function is copied from socket.py in the Python 2.7 standard +# library test suite. Added to its signature is only `socket_options`. +# One additional modification is that we avoid binding to IPv6 servers +# discovered in DNS if the system doesn't have IPv6 functionality. +def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, + source_address=None, socket_options=None): + """Connect to *address* and return the socket object. + + Convenience function. Connect to *address* (a 2-tuple ``(host, + port)``) and return the socket object. Passing the optional + *timeout* parameter will set the timeout on the socket instance + before attempting to connect. If no *timeout* is supplied, the + global default timeout setting returned by :func:`getdefaulttimeout` + is used. If *source_address* is set it must be a tuple of (host, port) + for the socket to bind as a source address before making the connection. + An host of '' or port 0 tells the OS to use the default. + """ + + host, port = address + if host.startswith('['): + host = host.strip('[]') + err = None + + # Using the value from allowed_gai_family() in the context of getaddrinfo lets + # us select whether to work with IPv4 DNS records, IPv6 records, or both. + # The original create_connection function always returns all records. + family = allowed_gai_family() + + for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM): + af, socktype, proto, canonname, sa = res + sock = None + try: + sock = socket.socket(af, socktype, proto) + + # If provided, set socket level options before connecting. + _set_socket_options(sock, socket_options) + + if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT: + sock.settimeout(timeout) + if source_address: + sock.bind(source_address) + sock.connect(sa) + return sock + + except socket.error as e: + err = e + if sock is not None: + sock.close() + sock = None + + if err is not None: + raise err + + raise socket.error("getaddrinfo returns an empty list") + + +def _set_socket_options(sock, options): + if options is None: + return + + for opt in options: + sock.setsockopt(*opt) + + +def allowed_gai_family(): + """This function is designed to work in the context of + getaddrinfo, where family=socket.AF_UNSPEC is the default and + will perform a DNS search for both IPv6 and IPv4 records.""" + + family = socket.AF_INET + if HAS_IPV6: + family = socket.AF_UNSPEC + return family + + +def _has_ipv6(host): + """ Returns True if the system can bind an IPv6 address. """ + sock = None + has_ipv6 = False + + # App Engine doesn't support IPV6 sockets and actually has a quota on the + # number of sockets that can be used, so just early out here instead of + # creating a socket needlessly. + # See https://github.com/urllib3/urllib3/issues/1446 + if _appengine_environ.is_appengine_sandbox(): + return False + + if socket.has_ipv6: + # has_ipv6 returns true if cPython was compiled with IPv6 support. + # It does not tell us if the system has IPv6 support enabled. To + # determine that we must bind to an IPv6 address. + # https://github.com/shazow/urllib3/pull/611 + # https://bugs.python.org/issue658327 + try: + sock = socket.socket(socket.AF_INET6) + sock.bind((host, 0)) + has_ipv6 = True + except Exception: + pass + + if sock: + sock.close() + return has_ipv6 + + +HAS_IPV6 = _has_ipv6('::1') diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/connection.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/connection.pyc new file mode 100644 index 0000000..ec50afb Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/connection.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/queue.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/queue.py new file mode 100644 index 0000000..d3d379a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/queue.py @@ -0,0 +1,21 @@ +import collections +from ..packages import six +from ..packages.six.moves import queue + +if six.PY2: + # Queue is imported for side effects on MS Windows. See issue #229. + import Queue as _unused_module_Queue # noqa: F401 + + +class LifoQueue(queue.Queue): + def _init(self, _): + self.queue = collections.deque() + + def _qsize(self, len=len): + return len(self.queue) + + def _put(self, item): + self.queue.append(item) + + def _get(self): + return self.queue.pop() diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/queue.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/queue.pyc new file mode 100644 index 0000000..a7953b3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/queue.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/request.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/request.py new file mode 100644 index 0000000..3ddfcd5 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/request.py @@ -0,0 +1,118 @@ +from __future__ import absolute_import +from base64 import b64encode + +from ..packages.six import b, integer_types +from ..exceptions import UnrewindableBodyError + +ACCEPT_ENCODING = 'gzip,deflate' +_FAILEDTELL = object() + + +def make_headers(keep_alive=None, accept_encoding=None, user_agent=None, + basic_auth=None, proxy_basic_auth=None, disable_cache=None): + """ + Shortcuts for generating request headers. + + :param keep_alive: + If ``True``, adds 'connection: keep-alive' header. + + :param accept_encoding: + Can be a boolean, list, or string. + ``True`` translates to 'gzip,deflate'. + List will get joined by comma. + String will be used as provided. + + :param user_agent: + String representing the user-agent you want, such as + "python-urllib3/0.6" + + :param basic_auth: + Colon-separated username:password string for 'authorization: basic ...' + auth header. + + :param proxy_basic_auth: + Colon-separated username:password string for 'proxy-authorization: basic ...' + auth header. + + :param disable_cache: + If ``True``, adds 'cache-control: no-cache' header. + + Example:: + + >>> make_headers(keep_alive=True, user_agent="Batman/1.0") + {'connection': 'keep-alive', 'user-agent': 'Batman/1.0'} + >>> make_headers(accept_encoding=True) + {'accept-encoding': 'gzip,deflate'} + """ + headers = {} + if accept_encoding: + if isinstance(accept_encoding, str): + pass + elif isinstance(accept_encoding, list): + accept_encoding = ','.join(accept_encoding) + else: + accept_encoding = ACCEPT_ENCODING + headers['accept-encoding'] = accept_encoding + + if user_agent: + headers['user-agent'] = user_agent + + if keep_alive: + headers['connection'] = 'keep-alive' + + if basic_auth: + headers['authorization'] = 'Basic ' + \ + b64encode(b(basic_auth)).decode('utf-8') + + if proxy_basic_auth: + headers['proxy-authorization'] = 'Basic ' + \ + b64encode(b(proxy_basic_auth)).decode('utf-8') + + if disable_cache: + headers['cache-control'] = 'no-cache' + + return headers + + +def set_file_position(body, pos): + """ + If a position is provided, move file to that point. + Otherwise, we'll attempt to record a position for future use. + """ + if pos is not None: + rewind_body(body, pos) + elif getattr(body, 'tell', None) is not None: + try: + pos = body.tell() + except (IOError, OSError): + # This differentiates from None, allowing us to catch + # a failed `tell()` later when trying to rewind the body. + pos = _FAILEDTELL + + return pos + + +def rewind_body(body, body_pos): + """ + Attempt to rewind body to a certain position. + Primarily used for request redirects and retries. + + :param body: + File-like object that supports seek. + + :param int pos: + Position to seek to in file. + """ + body_seek = getattr(body, 'seek', None) + if body_seek is not None and isinstance(body_pos, integer_types): + try: + body_seek(body_pos) + except (IOError, OSError): + raise UnrewindableBodyError("An error occurred when rewinding request " + "body for redirect/retry.") + elif body_pos is _FAILEDTELL: + raise UnrewindableBodyError("Unable to record file position for rewinding " + "request body during a redirect/retry.") + else: + raise ValueError("body_pos must be of type integer, " + "instead it was %s." % type(body_pos)) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/request.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/request.pyc new file mode 100644 index 0000000..958f7af Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/request.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/response.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/response.py new file mode 100644 index 0000000..3d54864 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/response.py @@ -0,0 +1,87 @@ +from __future__ import absolute_import +from ..packages.six.moves import http_client as httplib + +from ..exceptions import HeaderParsingError + + +def is_fp_closed(obj): + """ + Checks whether a given file-like object is closed. + + :param obj: + The file-like object to check. + """ + + try: + # Check `isclosed()` first, in case Python3 doesn't set `closed`. + # GH Issue #928 + return obj.isclosed() + except AttributeError: + pass + + try: + # Check via the official file-like-object way. + return obj.closed + except AttributeError: + pass + + try: + # Check if the object is a container for another file-like object that + # gets released on exhaustion (e.g. HTTPResponse). + return obj.fp is None + except AttributeError: + pass + + raise ValueError("Unable to determine whether fp is closed.") + + +def assert_header_parsing(headers): + """ + Asserts whether all headers have been successfully parsed. + Extracts encountered errors from the result of parsing headers. + + Only works on Python 3. + + :param headers: Headers to verify. + :type headers: `httplib.HTTPMessage`. + + :raises urllib3.exceptions.HeaderParsingError: + If parsing errors are found. + """ + + # This will fail silently if we pass in the wrong kind of parameter. + # To make debugging easier add an explicit check. + if not isinstance(headers, httplib.HTTPMessage): + raise TypeError('expected httplib.Message, got {0}.'.format( + type(headers))) + + defects = getattr(headers, 'defects', None) + get_payload = getattr(headers, 'get_payload', None) + + unparsed_data = None + if get_payload: + # get_payload is actually email.message.Message.get_payload; + # we're only interested in the result if it's not a multipart message + if not headers.is_multipart(): + payload = get_payload() + + if isinstance(payload, (bytes, str)): + unparsed_data = payload + + if defects or unparsed_data: + raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data) + + +def is_response_to_head(response): + """ + Checks whether the request of a response has been a HEAD-request. + Handles the quirks of AppEngine. + + :param conn: + :type conn: :class:`httplib.HTTPResponse` + """ + # FIXME: Can we do this somehow without accessing private httplib _method? + method = response._method + if isinstance(method, int): # Platform-specific: Appengine + return method == 3 + return method.upper() == 'HEAD' diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/response.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/response.pyc new file mode 100644 index 0000000..9936e88 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/response.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/retry.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/retry.py new file mode 100644 index 0000000..e7d0abd --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/retry.py @@ -0,0 +1,411 @@ +from __future__ import absolute_import +import time +import logging +from collections import namedtuple +from itertools import takewhile +import email +import re + +from ..exceptions import ( + ConnectTimeoutError, + MaxRetryError, + ProtocolError, + ReadTimeoutError, + ResponseError, + InvalidHeader, +) +from ..packages import six + + +log = logging.getLogger(__name__) + + +# Data structure for representing the metadata of requests that result in a retry. +RequestHistory = namedtuple('RequestHistory', ["method", "url", "error", + "status", "redirect_location"]) + + +class Retry(object): + """ Retry configuration. + + Each retry attempt will create a new Retry object with updated values, so + they can be safely reused. + + Retries can be defined as a default for a pool:: + + retries = Retry(connect=5, read=2, redirect=5) + http = PoolManager(retries=retries) + response = http.request('GET', 'http://example.com/') + + Or per-request (which overrides the default for the pool):: + + response = http.request('GET', 'http://example.com/', retries=Retry(10)) + + Retries can be disabled by passing ``False``:: + + response = http.request('GET', 'http://example.com/', retries=False) + + Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless + retries are disabled, in which case the causing exception will be raised. + + :param int total: + Total number of retries to allow. Takes precedence over other counts. + + Set to ``None`` to remove this constraint and fall back on other + counts. It's a good idea to set this to some sensibly-high value to + account for unexpected edge cases and avoid infinite retry loops. + + Set to ``0`` to fail on the first retry. + + Set to ``False`` to disable and imply ``raise_on_redirect=False``. + + :param int connect: + How many connection-related errors to retry on. + + These are errors raised before the request is sent to the remote server, + which we assume has not triggered the server to process the request. + + Set to ``0`` to fail on the first retry of this type. + + :param int read: + How many times to retry on read errors. + + These errors are raised after the request was sent to the server, so the + request may have side-effects. + + Set to ``0`` to fail on the first retry of this type. + + :param int redirect: + How many redirects to perform. Limit this to avoid infinite redirect + loops. + + A redirect is a HTTP response with a status code 301, 302, 303, 307 or + 308. + + Set to ``0`` to fail on the first retry of this type. + + Set to ``False`` to disable and imply ``raise_on_redirect=False``. + + :param int status: + How many times to retry on bad status codes. + + These are retries made on responses, where status code matches + ``status_forcelist``. + + Set to ``0`` to fail on the first retry of this type. + + :param iterable method_whitelist: + Set of uppercased HTTP method verbs that we should retry on. + + By default, we only retry on methods which are considered to be + idempotent (multiple requests with the same parameters end with the + same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`. + + Set to a ``False`` value to retry on any verb. + + :param iterable status_forcelist: + A set of integer HTTP status codes that we should force a retry on. + A retry is initiated if the request method is in ``method_whitelist`` + and the response status code is in ``status_forcelist``. + + By default, this is disabled with ``None``. + + :param float backoff_factor: + A backoff factor to apply between attempts after the second try + (most errors are resolved immediately by a second try without a + delay). urllib3 will sleep for:: + + {backoff factor} * (2 ** ({number of total retries} - 1)) + + seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep + for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer + than :attr:`Retry.BACKOFF_MAX`. + + By default, backoff is disabled (set to 0). + + :param bool raise_on_redirect: Whether, if the number of redirects is + exhausted, to raise a MaxRetryError, or to return a response with a + response code in the 3xx range. + + :param bool raise_on_status: Similar meaning to ``raise_on_redirect``: + whether we should raise an exception, or return a response, + if status falls in ``status_forcelist`` range and retries have + been exhausted. + + :param tuple history: The history of the request encountered during + each call to :meth:`~Retry.increment`. The list is in the order + the requests occurred. Each list item is of class :class:`RequestHistory`. + + :param bool respect_retry_after_header: + Whether to respect Retry-After header on status codes defined as + :attr:`Retry.RETRY_AFTER_STATUS_CODES` or not. + + :param iterable remove_headers_on_redirect: + Sequence of headers to remove from the request when a response + indicating a redirect is returned before firing off the redirected + request. + """ + + DEFAULT_METHOD_WHITELIST = frozenset([ + 'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE']) + + RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503]) + + DEFAULT_REDIRECT_HEADERS_BLACKLIST = frozenset(['Authorization']) + + #: Maximum backoff time. + BACKOFF_MAX = 120 + + def __init__(self, total=10, connect=None, read=None, redirect=None, status=None, + method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None, + backoff_factor=0, raise_on_redirect=True, raise_on_status=True, + history=None, respect_retry_after_header=True, + remove_headers_on_redirect=DEFAULT_REDIRECT_HEADERS_BLACKLIST): + + self.total = total + self.connect = connect + self.read = read + self.status = status + + if redirect is False or total is False: + redirect = 0 + raise_on_redirect = False + + self.redirect = redirect + self.status_forcelist = status_forcelist or set() + self.method_whitelist = method_whitelist + self.backoff_factor = backoff_factor + self.raise_on_redirect = raise_on_redirect + self.raise_on_status = raise_on_status + self.history = history or tuple() + self.respect_retry_after_header = respect_retry_after_header + self.remove_headers_on_redirect = remove_headers_on_redirect + + def new(self, **kw): + params = dict( + total=self.total, + connect=self.connect, read=self.read, redirect=self.redirect, status=self.status, + method_whitelist=self.method_whitelist, + status_forcelist=self.status_forcelist, + backoff_factor=self.backoff_factor, + raise_on_redirect=self.raise_on_redirect, + raise_on_status=self.raise_on_status, + history=self.history, + remove_headers_on_redirect=self.remove_headers_on_redirect + ) + params.update(kw) + return type(self)(**params) + + @classmethod + def from_int(cls, retries, redirect=True, default=None): + """ Backwards-compatibility for the old retries format.""" + if retries is None: + retries = default if default is not None else cls.DEFAULT + + if isinstance(retries, Retry): + return retries + + redirect = bool(redirect) and None + new_retries = cls(retries, redirect=redirect) + log.debug("Converted retries value: %r -> %r", retries, new_retries) + return new_retries + + def get_backoff_time(self): + """ Formula for computing the current backoff + + :rtype: float + """ + # We want to consider only the last consecutive errors sequence (Ignore redirects). + consecutive_errors_len = len(list(takewhile(lambda x: x.redirect_location is None, + reversed(self.history)))) + if consecutive_errors_len <= 1: + return 0 + + backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1)) + return min(self.BACKOFF_MAX, backoff_value) + + def parse_retry_after(self, retry_after): + # Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4 + if re.match(r"^\s*[0-9]+\s*$", retry_after): + seconds = int(retry_after) + else: + retry_date_tuple = email.utils.parsedate(retry_after) + if retry_date_tuple is None: + raise InvalidHeader("Invalid Retry-After header: %s" % retry_after) + retry_date = time.mktime(retry_date_tuple) + seconds = retry_date - time.time() + + if seconds < 0: + seconds = 0 + + return seconds + + def get_retry_after(self, response): + """ Get the value of Retry-After in seconds. """ + + retry_after = response.getheader("Retry-After") + + if retry_after is None: + return None + + return self.parse_retry_after(retry_after) + + def sleep_for_retry(self, response=None): + retry_after = self.get_retry_after(response) + if retry_after: + time.sleep(retry_after) + return True + + return False + + def _sleep_backoff(self): + backoff = self.get_backoff_time() + if backoff <= 0: + return + time.sleep(backoff) + + def sleep(self, response=None): + """ Sleep between retry attempts. + + This method will respect a server's ``Retry-After`` response header + and sleep the duration of the time requested. If that is not present, it + will use an exponential backoff. By default, the backoff factor is 0 and + this method will return immediately. + """ + + if response: + slept = self.sleep_for_retry(response) + if slept: + return + + self._sleep_backoff() + + def _is_connection_error(self, err): + """ Errors when we're fairly sure that the server did not receive the + request, so it should be safe to retry. + """ + return isinstance(err, ConnectTimeoutError) + + def _is_read_error(self, err): + """ Errors that occur after the request has been started, so we should + assume that the server began processing it. + """ + return isinstance(err, (ReadTimeoutError, ProtocolError)) + + def _is_method_retryable(self, method): + """ Checks if a given HTTP method should be retried upon, depending if + it is included on the method whitelist. + """ + if self.method_whitelist and method.upper() not in self.method_whitelist: + return False + + return True + + def is_retry(self, method, status_code, has_retry_after=False): + """ Is this method/status code retryable? (Based on whitelists and control + variables such as the number of total retries to allow, whether to + respect the Retry-After header, whether this header is present, and + whether the returned status code is on the list of status codes to + be retried upon on the presence of the aforementioned header) + """ + if not self._is_method_retryable(method): + return False + + if self.status_forcelist and status_code in self.status_forcelist: + return True + + return (self.total and self.respect_retry_after_header and + has_retry_after and (status_code in self.RETRY_AFTER_STATUS_CODES)) + + def is_exhausted(self): + """ Are we out of retries? """ + retry_counts = (self.total, self.connect, self.read, self.redirect, self.status) + retry_counts = list(filter(None, retry_counts)) + if not retry_counts: + return False + + return min(retry_counts) < 0 + + def increment(self, method=None, url=None, response=None, error=None, + _pool=None, _stacktrace=None): + """ Return a new Retry object with incremented retry counters. + + :param response: A response object, or None, if the server did not + return a response. + :type response: :class:`~urllib3.response.HTTPResponse` + :param Exception error: An error encountered during the request, or + None if the response was received successfully. + + :return: A new ``Retry`` object. + """ + if self.total is False and error: + # Disabled, indicate to re-raise the error. + raise six.reraise(type(error), error, _stacktrace) + + total = self.total + if total is not None: + total -= 1 + + connect = self.connect + read = self.read + redirect = self.redirect + status_count = self.status + cause = 'unknown' + status = None + redirect_location = None + + if error and self._is_connection_error(error): + # Connect retry? + if connect is False: + raise six.reraise(type(error), error, _stacktrace) + elif connect is not None: + connect -= 1 + + elif error and self._is_read_error(error): + # Read retry? + if read is False or not self._is_method_retryable(method): + raise six.reraise(type(error), error, _stacktrace) + elif read is not None: + read -= 1 + + elif response and response.get_redirect_location(): + # Redirect retry? + if redirect is not None: + redirect -= 1 + cause = 'too many redirects' + redirect_location = response.get_redirect_location() + status = response.status + + else: + # Incrementing because of a server error like a 500 in + # status_forcelist and a the given method is in the whitelist + cause = ResponseError.GENERIC_ERROR + if response and response.status: + if status_count is not None: + status_count -= 1 + cause = ResponseError.SPECIFIC_ERROR.format( + status_code=response.status) + status = response.status + + history = self.history + (RequestHistory(method, url, error, status, redirect_location),) + + new_retry = self.new( + total=total, + connect=connect, read=read, redirect=redirect, status=status_count, + history=history) + + if new_retry.is_exhausted(): + raise MaxRetryError(_pool, url, error or ResponseError(cause)) + + log.debug("Incremented Retry for (url='%s'): %r", url, new_retry) + + return new_retry + + def __repr__(self): + return ('{cls.__name__}(total={self.total}, connect={self.connect}, ' + 'read={self.read}, redirect={self.redirect}, status={self.status})').format( + cls=type(self), self=self) + + +# For backwards compatibility (equivalent to pre-v1.9): +Retry.DEFAULT = Retry(3) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/retry.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/retry.pyc new file mode 100644 index 0000000..e558997 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/retry.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/ssl_.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/ssl_.py new file mode 100644 index 0000000..dfc553f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/ssl_.py @@ -0,0 +1,381 @@ +from __future__ import absolute_import +import errno +import warnings +import hmac +import socket + +from binascii import hexlify, unhexlify +from hashlib import md5, sha1, sha256 + +from ..exceptions import SSLError, InsecurePlatformWarning, SNIMissingWarning +from ..packages import six + + +SSLContext = None +HAS_SNI = False +IS_PYOPENSSL = False +IS_SECURETRANSPORT = False + +# Maps the length of a digest to a possible hash function producing this digest +HASHFUNC_MAP = { + 32: md5, + 40: sha1, + 64: sha256, +} + + +def _const_compare_digest_backport(a, b): + """ + Compare two digests of equal length in constant time. + + The digests must be of type str/bytes. + Returns True if the digests match, and False otherwise. + """ + result = abs(len(a) - len(b)) + for l, r in zip(bytearray(a), bytearray(b)): + result |= l ^ r + return result == 0 + + +_const_compare_digest = getattr(hmac, 'compare_digest', + _const_compare_digest_backport) + + +try: # Test for SSL features + import ssl + from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23 + from ssl import HAS_SNI # Has SNI? +except ImportError: + pass + + +try: + from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION +except ImportError: + OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000 + OP_NO_COMPRESSION = 0x20000 + + +# Python 2.7 doesn't have inet_pton on non-Linux so we fallback on inet_aton in +# those cases. This means that we can only detect IPv4 addresses in this case. +if hasattr(socket, 'inet_pton'): + inet_pton = socket.inet_pton +else: + # Maybe we can use ipaddress if the user has urllib3[secure]? + try: + from pip._vendor import ipaddress + + def inet_pton(_, host): + if isinstance(host, bytes): + host = host.decode('ascii') + return ipaddress.ip_address(host) + + except ImportError: # Platform-specific: Non-Linux + def inet_pton(_, host): + return socket.inet_aton(host) + + +# A secure default. +# Sources for more information on TLS ciphers: +# +# - https://wiki.mozilla.org/Security/Server_Side_TLS +# - https://www.ssllabs.com/projects/best-practices/index.html +# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/ +# +# The general intent is: +# - Prefer TLS 1.3 cipher suites +# - prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE), +# - prefer ECDHE over DHE for better performance, +# - prefer any AES-GCM and ChaCha20 over any AES-CBC for better performance and +# security, +# - prefer AES-GCM over ChaCha20 because hardware-accelerated AES is common, +# - disable NULL authentication, MD5 MACs and DSS for security reasons. +DEFAULT_CIPHERS = ':'.join([ + 'TLS13-AES-256-GCM-SHA384', + 'TLS13-CHACHA20-POLY1305-SHA256', + 'TLS13-AES-128-GCM-SHA256', + 'ECDH+AESGCM', + 'ECDH+CHACHA20', + 'DH+AESGCM', + 'DH+CHACHA20', + 'ECDH+AES256', + 'DH+AES256', + 'ECDH+AES128', + 'DH+AES', + 'RSA+AESGCM', + 'RSA+AES', + '!aNULL', + '!eNULL', + '!MD5', +]) + +try: + from ssl import SSLContext # Modern SSL? +except ImportError: + import sys + + class SSLContext(object): # Platform-specific: Python 2 + def __init__(self, protocol_version): + self.protocol = protocol_version + # Use default values from a real SSLContext + self.check_hostname = False + self.verify_mode = ssl.CERT_NONE + self.ca_certs = None + self.options = 0 + self.certfile = None + self.keyfile = None + self.ciphers = None + + def load_cert_chain(self, certfile, keyfile): + self.certfile = certfile + self.keyfile = keyfile + + def load_verify_locations(self, cafile=None, capath=None): + self.ca_certs = cafile + + if capath is not None: + raise SSLError("CA directories not supported in older Pythons") + + def set_ciphers(self, cipher_suite): + self.ciphers = cipher_suite + + def wrap_socket(self, socket, server_hostname=None, server_side=False): + warnings.warn( + 'A true SSLContext object is not available. This prevents ' + 'urllib3 from configuring SSL appropriately and may cause ' + 'certain SSL connections to fail. You can upgrade to a newer ' + 'version of Python to solve this. For more information, see ' + 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html' + '#ssl-warnings', + InsecurePlatformWarning + ) + kwargs = { + 'keyfile': self.keyfile, + 'certfile': self.certfile, + 'ca_certs': self.ca_certs, + 'cert_reqs': self.verify_mode, + 'ssl_version': self.protocol, + 'server_side': server_side, + } + return wrap_socket(socket, ciphers=self.ciphers, **kwargs) + + +def assert_fingerprint(cert, fingerprint): + """ + Checks if given fingerprint matches the supplied certificate. + + :param cert: + Certificate as bytes object. + :param fingerprint: + Fingerprint as string of hexdigits, can be interspersed by colons. + """ + + fingerprint = fingerprint.replace(':', '').lower() + digest_length = len(fingerprint) + hashfunc = HASHFUNC_MAP.get(digest_length) + if not hashfunc: + raise SSLError( + 'Fingerprint of invalid length: {0}'.format(fingerprint)) + + # We need encode() here for py32; works on py2 and p33. + fingerprint_bytes = unhexlify(fingerprint.encode()) + + cert_digest = hashfunc(cert).digest() + + if not _const_compare_digest(cert_digest, fingerprint_bytes): + raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".' + .format(fingerprint, hexlify(cert_digest))) + + +def resolve_cert_reqs(candidate): + """ + Resolves the argument to a numeric constant, which can be passed to + the wrap_socket function/method from the ssl module. + Defaults to :data:`ssl.CERT_NONE`. + If given a string it is assumed to be the name of the constant in the + :mod:`ssl` module or its abbreviation. + (So you can specify `REQUIRED` instead of `CERT_REQUIRED`. + If it's neither `None` nor a string we assume it is already the numeric + constant which can directly be passed to wrap_socket. + """ + if candidate is None: + return CERT_NONE + + if isinstance(candidate, str): + res = getattr(ssl, candidate, None) + if res is None: + res = getattr(ssl, 'CERT_' + candidate) + return res + + return candidate + + +def resolve_ssl_version(candidate): + """ + like resolve_cert_reqs + """ + if candidate is None: + return PROTOCOL_SSLv23 + + if isinstance(candidate, str): + res = getattr(ssl, candidate, None) + if res is None: + res = getattr(ssl, 'PROTOCOL_' + candidate) + return res + + return candidate + + +def create_urllib3_context(ssl_version=None, cert_reqs=None, + options=None, ciphers=None): + """All arguments have the same meaning as ``ssl_wrap_socket``. + + By default, this function does a lot of the same work that + ``ssl.create_default_context`` does on Python 3.4+. It: + + - Disables SSLv2, SSLv3, and compression + - Sets a restricted set of server ciphers + + If you wish to enable SSLv3, you can do:: + + from pip._vendor.urllib3.util import ssl_ + context = ssl_.create_urllib3_context() + context.options &= ~ssl_.OP_NO_SSLv3 + + You can do the same to enable compression (substituting ``COMPRESSION`` + for ``SSLv3`` in the last line above). + + :param ssl_version: + The desired protocol version to use. This will default to + PROTOCOL_SSLv23 which will negotiate the highest protocol that both + the server and your installation of OpenSSL support. + :param cert_reqs: + Whether to require the certificate verification. This defaults to + ``ssl.CERT_REQUIRED``. + :param options: + Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``, + ``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``. + :param ciphers: + Which cipher suites to allow the server to select. + :returns: + Constructed SSLContext object with specified options + :rtype: SSLContext + """ + context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23) + + context.set_ciphers(ciphers or DEFAULT_CIPHERS) + + # Setting the default here, as we may have no ssl module on import + cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs + + if options is None: + options = 0 + # SSLv2 is easily broken and is considered harmful and dangerous + options |= OP_NO_SSLv2 + # SSLv3 has several problems and is now dangerous + options |= OP_NO_SSLv3 + # Disable compression to prevent CRIME attacks for OpenSSL 1.0+ + # (issue #309) + options |= OP_NO_COMPRESSION + + context.options |= options + + context.verify_mode = cert_reqs + if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2 + # We do our own verification, including fingerprints and alternative + # hostnames. So disable it here + context.check_hostname = False + return context + + +def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None, + ca_certs=None, server_hostname=None, + ssl_version=None, ciphers=None, ssl_context=None, + ca_cert_dir=None): + """ + All arguments except for server_hostname, ssl_context, and ca_cert_dir have + the same meaning as they do when using :func:`ssl.wrap_socket`. + + :param server_hostname: + When SNI is supported, the expected hostname of the certificate + :param ssl_context: + A pre-made :class:`SSLContext` object. If none is provided, one will + be created using :func:`create_urllib3_context`. + :param ciphers: + A string of ciphers we wish the client to support. + :param ca_cert_dir: + A directory containing CA certificates in multiple separate files, as + supported by OpenSSL's -CApath flag or the capath argument to + SSLContext.load_verify_locations(). + """ + context = ssl_context + if context is None: + # Note: This branch of code and all the variables in it are no longer + # used by urllib3 itself. We should consider deprecating and removing + # this code. + context = create_urllib3_context(ssl_version, cert_reqs, + ciphers=ciphers) + + if ca_certs or ca_cert_dir: + try: + context.load_verify_locations(ca_certs, ca_cert_dir) + except IOError as e: # Platform-specific: Python 2.7 + raise SSLError(e) + # Py33 raises FileNotFoundError which subclasses OSError + # These are not equivalent unless we check the errno attribute + except OSError as e: # Platform-specific: Python 3.3 and beyond + if e.errno == errno.ENOENT: + raise SSLError(e) + raise + elif getattr(context, 'load_default_certs', None) is not None: + # try to load OS default certs; works well on Windows (require Python3.4+) + context.load_default_certs() + + if certfile: + context.load_cert_chain(certfile, keyfile) + + # If we detect server_hostname is an IP address then the SNI + # extension should not be used according to RFC3546 Section 3.1 + # We shouldn't warn the user if SNI isn't available but we would + # not be using SNI anyways due to IP address for server_hostname. + if ((server_hostname is not None and not is_ipaddress(server_hostname)) + or IS_SECURETRANSPORT): + if HAS_SNI and server_hostname is not None: + return context.wrap_socket(sock, server_hostname=server_hostname) + + warnings.warn( + 'An HTTPS request has been made, but the SNI (Server Name ' + 'Indication) extension to TLS is not available on this platform. ' + 'This may cause the server to present an incorrect TLS ' + 'certificate, which can cause validation failures. You can upgrade to ' + 'a newer version of Python to solve this. For more information, see ' + 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html' + '#ssl-warnings', + SNIMissingWarning + ) + + return context.wrap_socket(sock) + + +def is_ipaddress(hostname): + """Detects whether the hostname given is an IP address. + + :param str hostname: Hostname to examine. + :return: True if the hostname is an IP address, False otherwise. + """ + if six.PY3 and isinstance(hostname, bytes): + # IDN A-label bytes are ASCII compatible. + hostname = hostname.decode('ascii') + + families = [socket.AF_INET] + if hasattr(socket, 'AF_INET6'): + families.append(socket.AF_INET6) + + for af in families: + try: + inet_pton(af, hostname) + except (socket.error, ValueError, OSError): + pass + else: + return True + return False diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/ssl_.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/ssl_.pyc new file mode 100644 index 0000000..f34480c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/ssl_.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/timeout.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/timeout.py new file mode 100644 index 0000000..cec817e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/timeout.py @@ -0,0 +1,242 @@ +from __future__ import absolute_import +# The default socket timeout, used by httplib to indicate that no timeout was +# specified by the user +from socket import _GLOBAL_DEFAULT_TIMEOUT +import time + +from ..exceptions import TimeoutStateError + +# A sentinel value to indicate that no timeout was specified by the user in +# urllib3 +_Default = object() + + +# Use time.monotonic if available. +current_time = getattr(time, "monotonic", time.time) + + +class Timeout(object): + """ Timeout configuration. + + Timeouts can be defined as a default for a pool:: + + timeout = Timeout(connect=2.0, read=7.0) + http = PoolManager(timeout=timeout) + response = http.request('GET', 'http://example.com/') + + Or per-request (which overrides the default for the pool):: + + response = http.request('GET', 'http://example.com/', timeout=Timeout(10)) + + Timeouts can be disabled by setting all the parameters to ``None``:: + + no_timeout = Timeout(connect=None, read=None) + response = http.request('GET', 'http://example.com/, timeout=no_timeout) + + + :param total: + This combines the connect and read timeouts into one; the read timeout + will be set to the time leftover from the connect attempt. In the + event that both a connect timeout and a total are specified, or a read + timeout and a total are specified, the shorter timeout will be applied. + + Defaults to None. + + :type total: integer, float, or None + + :param connect: + The maximum amount of time to wait for a connection attempt to a server + to succeed. Omitting the parameter will default the connect timeout to + the system default, probably `the global default timeout in socket.py + <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_. + None will set an infinite timeout for connection attempts. + + :type connect: integer, float, or None + + :param read: + The maximum amount of time to wait between consecutive + read operations for a response from the server. Omitting + the parameter will default the read timeout to the system + default, probably `the global default timeout in socket.py + <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_. + None will set an infinite timeout. + + :type read: integer, float, or None + + .. note:: + + Many factors can affect the total amount of time for urllib3 to return + an HTTP response. + + For example, Python's DNS resolver does not obey the timeout specified + on the socket. Other factors that can affect total request time include + high CPU load, high swap, the program running at a low priority level, + or other behaviors. + + In addition, the read and total timeouts only measure the time between + read operations on the socket connecting the client and the server, + not the total amount of time for the request to return a complete + response. For most requests, the timeout is raised because the server + has not sent the first byte in the specified time. This is not always + the case; if a server streams one byte every fifteen seconds, a timeout + of 20 seconds will not trigger, even though the request will take + several minutes to complete. + + If your goal is to cut off any request after a set amount of wall clock + time, consider having a second "watcher" thread to cut off a slow + request. + """ + + #: A sentinel object representing the default timeout value + DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT + + def __init__(self, total=None, connect=_Default, read=_Default): + self._connect = self._validate_timeout(connect, 'connect') + self._read = self._validate_timeout(read, 'read') + self.total = self._validate_timeout(total, 'total') + self._start_connect = None + + def __str__(self): + return '%s(connect=%r, read=%r, total=%r)' % ( + type(self).__name__, self._connect, self._read, self.total) + + @classmethod + def _validate_timeout(cls, value, name): + """ Check that a timeout attribute is valid. + + :param value: The timeout value to validate + :param name: The name of the timeout attribute to validate. This is + used to specify in error messages. + :return: The validated and casted version of the given value. + :raises ValueError: If it is a numeric value less than or equal to + zero, or the type is not an integer, float, or None. + """ + if value is _Default: + return cls.DEFAULT_TIMEOUT + + if value is None or value is cls.DEFAULT_TIMEOUT: + return value + + if isinstance(value, bool): + raise ValueError("Timeout cannot be a boolean value. It must " + "be an int, float or None.") + try: + float(value) + except (TypeError, ValueError): + raise ValueError("Timeout value %s was %s, but it must be an " + "int, float or None." % (name, value)) + + try: + if value <= 0: + raise ValueError("Attempted to set %s timeout to %s, but the " + "timeout cannot be set to a value less " + "than or equal to 0." % (name, value)) + except TypeError: # Python 3 + raise ValueError("Timeout value %s was %s, but it must be an " + "int, float or None." % (name, value)) + + return value + + @classmethod + def from_float(cls, timeout): + """ Create a new Timeout from a legacy timeout value. + + The timeout value used by httplib.py sets the same timeout on the + connect(), and recv() socket requests. This creates a :class:`Timeout` + object that sets the individual timeouts to the ``timeout`` value + passed to this function. + + :param timeout: The legacy timeout value. + :type timeout: integer, float, sentinel default object, or None + :return: Timeout object + :rtype: :class:`Timeout` + """ + return Timeout(read=timeout, connect=timeout) + + def clone(self): + """ Create a copy of the timeout object + + Timeout properties are stored per-pool but each request needs a fresh + Timeout object to ensure each one has its own start/stop configured. + + :return: a copy of the timeout object + :rtype: :class:`Timeout` + """ + # We can't use copy.deepcopy because that will also create a new object + # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to + # detect the user default. + return Timeout(connect=self._connect, read=self._read, + total=self.total) + + def start_connect(self): + """ Start the timeout clock, used during a connect() attempt + + :raises urllib3.exceptions.TimeoutStateError: if you attempt + to start a timer that has been started already. + """ + if self._start_connect is not None: + raise TimeoutStateError("Timeout timer has already been started.") + self._start_connect = current_time() + return self._start_connect + + def get_connect_duration(self): + """ Gets the time elapsed since the call to :meth:`start_connect`. + + :return: Elapsed time. + :rtype: float + :raises urllib3.exceptions.TimeoutStateError: if you attempt + to get duration for a timer that hasn't been started. + """ + if self._start_connect is None: + raise TimeoutStateError("Can't get connect duration for timer " + "that has not started.") + return current_time() - self._start_connect + + @property + def connect_timeout(self): + """ Get the value to use when setting a connection timeout. + + This will be a positive float or integer, the value None + (never timeout), or the default system timeout. + + :return: Connect timeout. + :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None + """ + if self.total is None: + return self._connect + + if self._connect is None or self._connect is self.DEFAULT_TIMEOUT: + return self.total + + return min(self._connect, self.total) + + @property + def read_timeout(self): + """ Get the value for the read timeout. + + This assumes some time has elapsed in the connection timeout and + computes the read timeout appropriately. + + If self.total is set, the read timeout is dependent on the amount of + time taken by the connect timeout. If the connection time has not been + established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be + raised. + + :return: Value to use for the read timeout. + :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None + :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect` + has not yet been called on this object. + """ + if (self.total is not None and + self.total is not self.DEFAULT_TIMEOUT and + self._read is not None and + self._read is not self.DEFAULT_TIMEOUT): + # In case the connect timeout has not yet been established. + if self._start_connect is None: + return self._read + return max(0, min(self.total - self.get_connect_duration(), + self._read)) + elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT: + return max(0, self.total - self.get_connect_duration()) + else: + return self._read diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/timeout.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/timeout.pyc new file mode 100644 index 0000000..419235c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/timeout.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/url.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/url.py new file mode 100644 index 0000000..6b6f996 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/url.py @@ -0,0 +1,230 @@ +from __future__ import absolute_import +from collections import namedtuple + +from ..exceptions import LocationParseError + + +url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment'] + +# We only want to normalize urls with an HTTP(S) scheme. +# urllib3 infers URLs without a scheme (None) to be http. +NORMALIZABLE_SCHEMES = ('http', 'https', None) + + +class Url(namedtuple('Url', url_attrs)): + """ + Datastructure for representing an HTTP URL. Used as a return value for + :func:`parse_url`. Both the scheme and host are normalized as they are + both case-insensitive according to RFC 3986. + """ + __slots__ = () + + def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None, + query=None, fragment=None): + if path and not path.startswith('/'): + path = '/' + path + if scheme: + scheme = scheme.lower() + if host and scheme in NORMALIZABLE_SCHEMES: + host = host.lower() + return super(Url, cls).__new__(cls, scheme, auth, host, port, path, + query, fragment) + + @property + def hostname(self): + """For backwards-compatibility with urlparse. We're nice like that.""" + return self.host + + @property + def request_uri(self): + """Absolute path including the query string.""" + uri = self.path or '/' + + if self.query is not None: + uri += '?' + self.query + + return uri + + @property + def netloc(self): + """Network location including host and port""" + if self.port: + return '%s:%d' % (self.host, self.port) + return self.host + + @property + def url(self): + """ + Convert self into a url + + This function should more or less round-trip with :func:`.parse_url`. The + returned url may not be exactly the same as the url inputted to + :func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls + with a blank port will have : removed). + + Example: :: + + >>> U = parse_url('http://google.com/mail/') + >>> U.url + 'http://google.com/mail/' + >>> Url('http', 'username:password', 'host.com', 80, + ... '/path', 'query', 'fragment').url + 'http://username:password@host.com:80/path?query#fragment' + """ + scheme, auth, host, port, path, query, fragment = self + url = '' + + # We use "is not None" we want things to happen with empty strings (or 0 port) + if scheme is not None: + url += scheme + '://' + if auth is not None: + url += auth + '@' + if host is not None: + url += host + if port is not None: + url += ':' + str(port) + if path is not None: + url += path + if query is not None: + url += '?' + query + if fragment is not None: + url += '#' + fragment + + return url + + def __str__(self): + return self.url + + +def split_first(s, delims): + """ + Given a string and an iterable of delimiters, split on the first found + delimiter. Return two split parts and the matched delimiter. + + If not found, then the first part is the full input string. + + Example:: + + >>> split_first('foo/bar?baz', '?/=') + ('foo', 'bar?baz', '/') + >>> split_first('foo/bar?baz', '123') + ('foo/bar?baz', '', None) + + Scales linearly with number of delims. Not ideal for large number of delims. + """ + min_idx = None + min_delim = None + for d in delims: + idx = s.find(d) + if idx < 0: + continue + + if min_idx is None or idx < min_idx: + min_idx = idx + min_delim = d + + if min_idx is None or min_idx < 0: + return s, '', None + + return s[:min_idx], s[min_idx + 1:], min_delim + + +def parse_url(url): + """ + Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is + performed to parse incomplete urls. Fields not provided will be None. + + Partly backwards-compatible with :mod:`urlparse`. + + Example:: + + >>> parse_url('http://google.com/mail/') + Url(scheme='http', host='google.com', port=None, path='/mail/', ...) + >>> parse_url('google.com:80') + Url(scheme=None, host='google.com', port=80, path=None, ...) + >>> parse_url('/foo?bar') + Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...) + """ + + # While this code has overlap with stdlib's urlparse, it is much + # simplified for our needs and less annoying. + # Additionally, this implementations does silly things to be optimal + # on CPython. + + if not url: + # Empty + return Url() + + scheme = None + auth = None + host = None + port = None + path = None + fragment = None + query = None + + # Scheme + if '://' in url: + scheme, url = url.split('://', 1) + + # Find the earliest Authority Terminator + # (http://tools.ietf.org/html/rfc3986#section-3.2) + url, path_, delim = split_first(url, ['/', '?', '#']) + + if delim: + # Reassemble the path + path = delim + path_ + + # Auth + if '@' in url: + # Last '@' denotes end of auth part + auth, url = url.rsplit('@', 1) + + # IPv6 + if url and url[0] == '[': + host, url = url.split(']', 1) + host += ']' + + # Port + if ':' in url: + _host, port = url.split(':', 1) + + if not host: + host = _host + + if port: + # If given, ports must be integers. No whitespace, no plus or + # minus prefixes, no non-integer digits such as ^2 (superscript). + if not port.isdigit(): + raise LocationParseError(url) + try: + port = int(port) + except ValueError: + raise LocationParseError(url) + else: + # Blank ports are cool, too. (rfc3986#section-3.2.3) + port = None + + elif not host and url: + host = url + + if not path: + return Url(scheme, auth, host, port, path, query, fragment) + + # Fragment + if '#' in path: + path, fragment = path.split('#', 1) + + # Query + if '?' in path: + path, query = path.split('?', 1) + + return Url(scheme, auth, host, port, path, query, fragment) + + +def get_host(url): + """ + Deprecated. Use :func:`parse_url` instead. + """ + p = parse_url(url) + return p.scheme or 'http', p.hostname, p.port diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/url.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/url.pyc new file mode 100644 index 0000000..4f6e667 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/url.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/wait.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/wait.py new file mode 100644 index 0000000..4db71ba --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/wait.py @@ -0,0 +1,150 @@ +import errno +from functools import partial +import select +import sys +try: + from time import monotonic +except ImportError: + from time import time as monotonic + +__all__ = ["NoWayToWaitForSocketError", "wait_for_read", "wait_for_write"] + + +class NoWayToWaitForSocketError(Exception): + pass + + +# How should we wait on sockets? +# +# There are two types of APIs you can use for waiting on sockets: the fancy +# modern stateful APIs like epoll/kqueue, and the older stateless APIs like +# select/poll. The stateful APIs are more efficient when you have a lots of +# sockets to keep track of, because you can set them up once and then use them +# lots of times. But we only ever want to wait on a single socket at a time +# and don't want to keep track of state, so the stateless APIs are actually +# more efficient. So we want to use select() or poll(). +# +# Now, how do we choose between select() and poll()? On traditional Unixes, +# select() has a strange calling convention that makes it slow, or fail +# altogether, for high-numbered file descriptors. The point of poll() is to fix +# that, so on Unixes, we prefer poll(). +# +# On Windows, there is no poll() (or at least Python doesn't provide a wrapper +# for it), but that's OK, because on Windows, select() doesn't have this +# strange calling convention; plain select() works fine. +# +# So: on Windows we use select(), and everywhere else we use poll(). We also +# fall back to select() in case poll() is somehow broken or missing. + +if sys.version_info >= (3, 5): + # Modern Python, that retries syscalls by default + def _retry_on_intr(fn, timeout): + return fn(timeout) +else: + # Old and broken Pythons. + def _retry_on_intr(fn, timeout): + if timeout is None: + deadline = float("inf") + else: + deadline = monotonic() + timeout + + while True: + try: + return fn(timeout) + # OSError for 3 <= pyver < 3.5, select.error for pyver <= 2.7 + except (OSError, select.error) as e: + # 'e.args[0]' incantation works for both OSError and select.error + if e.args[0] != errno.EINTR: + raise + else: + timeout = deadline - monotonic() + if timeout < 0: + timeout = 0 + if timeout == float("inf"): + timeout = None + continue + + +def select_wait_for_socket(sock, read=False, write=False, timeout=None): + if not read and not write: + raise RuntimeError("must specify at least one of read=True, write=True") + rcheck = [] + wcheck = [] + if read: + rcheck.append(sock) + if write: + wcheck.append(sock) + # When doing a non-blocking connect, most systems signal success by + # marking the socket writable. Windows, though, signals success by marked + # it as "exceptional". We paper over the difference by checking the write + # sockets for both conditions. (The stdlib selectors module does the same + # thing.) + fn = partial(select.select, rcheck, wcheck, wcheck) + rready, wready, xready = _retry_on_intr(fn, timeout) + return bool(rready or wready or xready) + + +def poll_wait_for_socket(sock, read=False, write=False, timeout=None): + if not read and not write: + raise RuntimeError("must specify at least one of read=True, write=True") + mask = 0 + if read: + mask |= select.POLLIN + if write: + mask |= select.POLLOUT + poll_obj = select.poll() + poll_obj.register(sock, mask) + + # For some reason, poll() takes timeout in milliseconds + def do_poll(t): + if t is not None: + t *= 1000 + return poll_obj.poll(t) + + return bool(_retry_on_intr(do_poll, timeout)) + + +def null_wait_for_socket(*args, **kwargs): + raise NoWayToWaitForSocketError("no select-equivalent available") + + +def _have_working_poll(): + # Apparently some systems have a select.poll that fails as soon as you try + # to use it, either due to strange configuration or broken monkeypatching + # from libraries like eventlet/greenlet. + try: + poll_obj = select.poll() + _retry_on_intr(poll_obj.poll, 0) + except (AttributeError, OSError): + return False + else: + return True + + +def wait_for_socket(*args, **kwargs): + # We delay choosing which implementation to use until the first time we're + # called. We could do it at import time, but then we might make the wrong + # decision if someone goes wild with monkeypatching select.poll after + # we're imported. + global wait_for_socket + if _have_working_poll(): + wait_for_socket = poll_wait_for_socket + elif hasattr(select, "select"): + wait_for_socket = select_wait_for_socket + else: # Platform-specific: Appengine. + wait_for_socket = null_wait_for_socket + return wait_for_socket(*args, **kwargs) + + +def wait_for_read(sock, timeout=None): + """ Waits for reading to be available on a given socket. + Returns True if the socket is readable, or False if the timeout expired. + """ + return wait_for_socket(sock, read=True, timeout=timeout) + + +def wait_for_write(sock, timeout=None): + """ Waits for writing to be available on a given socket. + Returns True if the socket is readable, or False if the timeout expired. + """ + return wait_for_socket(sock, write=True, timeout=timeout) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/wait.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/wait.pyc new file mode 100644 index 0000000..ea678e9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/util/wait.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.py new file mode 100644 index 0000000..d21d697 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.py @@ -0,0 +1,342 @@ +# coding: utf-8 +""" + + webencodings + ~~~~~~~~~~~~ + + This is a Python implementation of the `WHATWG Encoding standard + <http://encoding.spec.whatwg.org/>`. See README for details. + + :copyright: Copyright 2012 by Simon Sapin + :license: BSD, see LICENSE for details. + +""" + +from __future__ import unicode_literals + +import codecs + +from .labels import LABELS + + +VERSION = '0.5.1' + + +# Some names in Encoding are not valid Python aliases. Remap these. +PYTHON_NAMES = { + 'iso-8859-8-i': 'iso-8859-8', + 'x-mac-cyrillic': 'mac-cyrillic', + 'macintosh': 'mac-roman', + 'windows-874': 'cp874'} + +CACHE = {} + + +def ascii_lower(string): + r"""Transform (only) ASCII letters to lower case: A-Z is mapped to a-z. + + :param string: An Unicode string. + :returns: A new Unicode string. + + This is used for `ASCII case-insensitive + <http://encoding.spec.whatwg.org/#ascii-case-insensitive>`_ + matching of encoding labels. + The same matching is also used, among other things, + for `CSS keywords <http://dev.w3.org/csswg/css-values/#keywords>`_. + + This is different from the :meth:`~py:str.lower` method of Unicode strings + which also affect non-ASCII characters, + sometimes mapping them into the ASCII range: + + >>> keyword = u'Bac\N{KELVIN SIGN}ground' + >>> assert keyword.lower() == u'background' + >>> assert ascii_lower(keyword) != keyword.lower() + >>> assert ascii_lower(keyword) == u'bac\N{KELVIN SIGN}ground' + + """ + # This turns out to be faster than unicode.translate() + return string.encode('utf8').lower().decode('utf8') + + +def lookup(label): + """ + Look for an encoding by its label. + This is the spec’s `get an encoding + <http://encoding.spec.whatwg.org/#concept-encoding-get>`_ algorithm. + Supported labels are listed there. + + :param label: A string. + :returns: + An :class:`Encoding` object, or :obj:`None` for an unknown label. + + """ + # Only strip ASCII whitespace: U+0009, U+000A, U+000C, U+000D, and U+0020. + label = ascii_lower(label.strip('\t\n\f\r ')) + name = LABELS.get(label) + if name is None: + return None + encoding = CACHE.get(name) + if encoding is None: + if name == 'x-user-defined': + from .x_user_defined import codec_info + else: + python_name = PYTHON_NAMES.get(name, name) + # Any python_name value that gets to here should be valid. + codec_info = codecs.lookup(python_name) + encoding = Encoding(name, codec_info) + CACHE[name] = encoding + return encoding + + +def _get_encoding(encoding_or_label): + """ + Accept either an encoding object or label. + + :param encoding: An :class:`Encoding` object or a label string. + :returns: An :class:`Encoding` object. + :raises: :exc:`~exceptions.LookupError` for an unknown label. + + """ + if hasattr(encoding_or_label, 'codec_info'): + return encoding_or_label + + encoding = lookup(encoding_or_label) + if encoding is None: + raise LookupError('Unknown encoding label: %r' % encoding_or_label) + return encoding + + +class Encoding(object): + """Reresents a character encoding such as UTF-8, + that can be used for decoding or encoding. + + .. attribute:: name + + Canonical name of the encoding + + .. attribute:: codec_info + + The actual implementation of the encoding, + a stdlib :class:`~codecs.CodecInfo` object. + See :func:`codecs.register`. + + """ + def __init__(self, name, codec_info): + self.name = name + self.codec_info = codec_info + + def __repr__(self): + return '<Encoding %s>' % self.name + + +#: The UTF-8 encoding. Should be used for new content and formats. +UTF8 = lookup('utf-8') + +_UTF16LE = lookup('utf-16le') +_UTF16BE = lookup('utf-16be') + + +def decode(input, fallback_encoding, errors='replace'): + """ + Decode a single string. + + :param input: A byte string + :param fallback_encoding: + An :class:`Encoding` object or a label string. + The encoding to use if :obj:`input` does note have a BOM. + :param errors: Type of error handling. See :func:`codecs.register`. + :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. + :return: + A ``(output, encoding)`` tuple of an Unicode string + and an :obj:`Encoding`. + + """ + # Fail early if `encoding` is an invalid label. + fallback_encoding = _get_encoding(fallback_encoding) + bom_encoding, input = _detect_bom(input) + encoding = bom_encoding or fallback_encoding + return encoding.codec_info.decode(input, errors)[0], encoding + + +def _detect_bom(input): + """Return (bom_encoding, input), with any BOM removed from the input.""" + if input.startswith(b'\xFF\xFE'): + return _UTF16LE, input[2:] + if input.startswith(b'\xFE\xFF'): + return _UTF16BE, input[2:] + if input.startswith(b'\xEF\xBB\xBF'): + return UTF8, input[3:] + return None, input + + +def encode(input, encoding=UTF8, errors='strict'): + """ + Encode a single string. + + :param input: An Unicode string. + :param encoding: An :class:`Encoding` object or a label string. + :param errors: Type of error handling. See :func:`codecs.register`. + :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. + :return: A byte string. + + """ + return _get_encoding(encoding).codec_info.encode(input, errors)[0] + + +def iter_decode(input, fallback_encoding, errors='replace'): + """ + "Pull"-based decoder. + + :param input: + An iterable of byte strings. + + The input is first consumed just enough to determine the encoding + based on the precense of a BOM, + then consumed on demand when the return value is. + :param fallback_encoding: + An :class:`Encoding` object or a label string. + The encoding to use if :obj:`input` does note have a BOM. + :param errors: Type of error handling. See :func:`codecs.register`. + :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. + :returns: + An ``(output, encoding)`` tuple. + :obj:`output` is an iterable of Unicode strings, + :obj:`encoding` is the :obj:`Encoding` that is being used. + + """ + + decoder = IncrementalDecoder(fallback_encoding, errors) + generator = _iter_decode_generator(input, decoder) + encoding = next(generator) + return generator, encoding + + +def _iter_decode_generator(input, decoder): + """Return a generator that first yields the :obj:`Encoding`, + then yields output chukns as Unicode strings. + + """ + decode = decoder.decode + input = iter(input) + for chunck in input: + output = decode(chunck) + if output: + assert decoder.encoding is not None + yield decoder.encoding + yield output + break + else: + # Input exhausted without determining the encoding + output = decode(b'', final=True) + assert decoder.encoding is not None + yield decoder.encoding + if output: + yield output + return + + for chunck in input: + output = decode(chunck) + if output: + yield output + output = decode(b'', final=True) + if output: + yield output + + +def iter_encode(input, encoding=UTF8, errors='strict'): + """ + “Pull”-based encoder. + + :param input: An iterable of Unicode strings. + :param encoding: An :class:`Encoding` object or a label string. + :param errors: Type of error handling. See :func:`codecs.register`. + :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. + :returns: An iterable of byte strings. + + """ + # Fail early if `encoding` is an invalid label. + encode = IncrementalEncoder(encoding, errors).encode + return _iter_encode_generator(input, encode) + + +def _iter_encode_generator(input, encode): + for chunck in input: + output = encode(chunck) + if output: + yield output + output = encode('', final=True) + if output: + yield output + + +class IncrementalDecoder(object): + """ + “Push”-based decoder. + + :param fallback_encoding: + An :class:`Encoding` object or a label string. + The encoding to use if :obj:`input` does note have a BOM. + :param errors: Type of error handling. See :func:`codecs.register`. + :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. + + """ + def __init__(self, fallback_encoding, errors='replace'): + # Fail early if `encoding` is an invalid label. + self._fallback_encoding = _get_encoding(fallback_encoding) + self._errors = errors + self._buffer = b'' + self._decoder = None + #: The actual :class:`Encoding` that is being used, + #: or :obj:`None` if that is not determined yet. + #: (Ie. if there is not enough input yet to determine + #: if there is a BOM.) + self.encoding = None # Not known yet. + + def decode(self, input, final=False): + """Decode one chunk of the input. + + :param input: A byte string. + :param final: + Indicate that no more input is available. + Must be :obj:`True` if this is the last call. + :returns: An Unicode string. + + """ + decoder = self._decoder + if decoder is not None: + return decoder(input, final) + + input = self._buffer + input + encoding, input = _detect_bom(input) + if encoding is None: + if len(input) < 3 and not final: # Not enough data yet. + self._buffer = input + return '' + else: # No BOM + encoding = self._fallback_encoding + decoder = encoding.codec_info.incrementaldecoder(self._errors).decode + self._decoder = decoder + self.encoding = encoding + return decoder(input, final) + + +class IncrementalEncoder(object): + """ + “Push”-based encoder. + + :param encoding: An :class:`Encoding` object or a label string. + :param errors: Type of error handling. See :func:`codecs.register`. + :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. + + .. method:: encode(input, final=False) + + :param input: An Unicode string. + :param final: + Indicate that no more input is available. + Must be :obj:`True` if this is the last call. + :returns: A byte string. + + """ + def __init__(self, encoding=UTF8, errors='strict'): + encoding = _get_encoding(encoding) + self.encode = encoding.codec_info.incrementalencoder(errors).encode diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.pyc new file mode 100644 index 0000000..4c4a7a8 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/webencodings/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/webencodings/labels.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/webencodings/labels.py new file mode 100644 index 0000000..29cbf91 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/webencodings/labels.py @@ -0,0 +1,231 @@ +""" + + webencodings.labels + ~~~~~~~~~~~~~~~~~~~ + + Map encoding labels to their name. + + :copyright: Copyright 2012 by Simon Sapin + :license: BSD, see LICENSE for details. + +""" + +# XXX Do not edit! +# This file is automatically generated by mklabels.py + +LABELS = { + 'unicode-1-1-utf-8': 'utf-8', + 'utf-8': 'utf-8', + 'utf8': 'utf-8', + '866': 'ibm866', + 'cp866': 'ibm866', + 'csibm866': 'ibm866', + 'ibm866': 'ibm866', + 'csisolatin2': 'iso-8859-2', + 'iso-8859-2': 'iso-8859-2', + 'iso-ir-101': 'iso-8859-2', + 'iso8859-2': 'iso-8859-2', + 'iso88592': 'iso-8859-2', + 'iso_8859-2': 'iso-8859-2', + 'iso_8859-2:1987': 'iso-8859-2', + 'l2': 'iso-8859-2', + 'latin2': 'iso-8859-2', + 'csisolatin3': 'iso-8859-3', + 'iso-8859-3': 'iso-8859-3', + 'iso-ir-109': 'iso-8859-3', + 'iso8859-3': 'iso-8859-3', + 'iso88593': 'iso-8859-3', + 'iso_8859-3': 'iso-8859-3', + 'iso_8859-3:1988': 'iso-8859-3', + 'l3': 'iso-8859-3', + 'latin3': 'iso-8859-3', + 'csisolatin4': 'iso-8859-4', + 'iso-8859-4': 'iso-8859-4', + 'iso-ir-110': 'iso-8859-4', + 'iso8859-4': 'iso-8859-4', + 'iso88594': 'iso-8859-4', + 'iso_8859-4': 'iso-8859-4', + 'iso_8859-4:1988': 'iso-8859-4', + 'l4': 'iso-8859-4', + 'latin4': 'iso-8859-4', + 'csisolatincyrillic': 'iso-8859-5', + 'cyrillic': 'iso-8859-5', + 'iso-8859-5': 'iso-8859-5', + 'iso-ir-144': 'iso-8859-5', + 'iso8859-5': 'iso-8859-5', + 'iso88595': 'iso-8859-5', + 'iso_8859-5': 'iso-8859-5', + 'iso_8859-5:1988': 'iso-8859-5', + 'arabic': 'iso-8859-6', + 'asmo-708': 'iso-8859-6', + 'csiso88596e': 'iso-8859-6', + 'csiso88596i': 'iso-8859-6', + 'csisolatinarabic': 'iso-8859-6', + 'ecma-114': 'iso-8859-6', + 'iso-8859-6': 'iso-8859-6', + 'iso-8859-6-e': 'iso-8859-6', + 'iso-8859-6-i': 'iso-8859-6', + 'iso-ir-127': 'iso-8859-6', + 'iso8859-6': 'iso-8859-6', + 'iso88596': 'iso-8859-6', + 'iso_8859-6': 'iso-8859-6', + 'iso_8859-6:1987': 'iso-8859-6', + 'csisolatingreek': 'iso-8859-7', + 'ecma-118': 'iso-8859-7', + 'elot_928': 'iso-8859-7', + 'greek': 'iso-8859-7', + 'greek8': 'iso-8859-7', + 'iso-8859-7': 'iso-8859-7', + 'iso-ir-126': 'iso-8859-7', + 'iso8859-7': 'iso-8859-7', + 'iso88597': 'iso-8859-7', + 'iso_8859-7': 'iso-8859-7', + 'iso_8859-7:1987': 'iso-8859-7', + 'sun_eu_greek': 'iso-8859-7', + 'csiso88598e': 'iso-8859-8', + 'csisolatinhebrew': 'iso-8859-8', + 'hebrew': 'iso-8859-8', + 'iso-8859-8': 'iso-8859-8', + 'iso-8859-8-e': 'iso-8859-8', + 'iso-ir-138': 'iso-8859-8', + 'iso8859-8': 'iso-8859-8', + 'iso88598': 'iso-8859-8', + 'iso_8859-8': 'iso-8859-8', + 'iso_8859-8:1988': 'iso-8859-8', + 'visual': 'iso-8859-8', + 'csiso88598i': 'iso-8859-8-i', + 'iso-8859-8-i': 'iso-8859-8-i', + 'logical': 'iso-8859-8-i', + 'csisolatin6': 'iso-8859-10', + 'iso-8859-10': 'iso-8859-10', + 'iso-ir-157': 'iso-8859-10', + 'iso8859-10': 'iso-8859-10', + 'iso885910': 'iso-8859-10', + 'l6': 'iso-8859-10', + 'latin6': 'iso-8859-10', + 'iso-8859-13': 'iso-8859-13', + 'iso8859-13': 'iso-8859-13', + 'iso885913': 'iso-8859-13', + 'iso-8859-14': 'iso-8859-14', + 'iso8859-14': 'iso-8859-14', + 'iso885914': 'iso-8859-14', + 'csisolatin9': 'iso-8859-15', + 'iso-8859-15': 'iso-8859-15', + 'iso8859-15': 'iso-8859-15', + 'iso885915': 'iso-8859-15', + 'iso_8859-15': 'iso-8859-15', + 'l9': 'iso-8859-15', + 'iso-8859-16': 'iso-8859-16', + 'cskoi8r': 'koi8-r', + 'koi': 'koi8-r', + 'koi8': 'koi8-r', + 'koi8-r': 'koi8-r', + 'koi8_r': 'koi8-r', + 'koi8-u': 'koi8-u', + 'csmacintosh': 'macintosh', + 'mac': 'macintosh', + 'macintosh': 'macintosh', + 'x-mac-roman': 'macintosh', + 'dos-874': 'windows-874', + 'iso-8859-11': 'windows-874', + 'iso8859-11': 'windows-874', + 'iso885911': 'windows-874', + 'tis-620': 'windows-874', + 'windows-874': 'windows-874', + 'cp1250': 'windows-1250', + 'windows-1250': 'windows-1250', + 'x-cp1250': 'windows-1250', + 'cp1251': 'windows-1251', + 'windows-1251': 'windows-1251', + 'x-cp1251': 'windows-1251', + 'ansi_x3.4-1968': 'windows-1252', + 'ascii': 'windows-1252', + 'cp1252': 'windows-1252', + 'cp819': 'windows-1252', + 'csisolatin1': 'windows-1252', + 'ibm819': 'windows-1252', + 'iso-8859-1': 'windows-1252', + 'iso-ir-100': 'windows-1252', + 'iso8859-1': 'windows-1252', + 'iso88591': 'windows-1252', + 'iso_8859-1': 'windows-1252', + 'iso_8859-1:1987': 'windows-1252', + 'l1': 'windows-1252', + 'latin1': 'windows-1252', + 'us-ascii': 'windows-1252', + 'windows-1252': 'windows-1252', + 'x-cp1252': 'windows-1252', + 'cp1253': 'windows-1253', + 'windows-1253': 'windows-1253', + 'x-cp1253': 'windows-1253', + 'cp1254': 'windows-1254', + 'csisolatin5': 'windows-1254', + 'iso-8859-9': 'windows-1254', + 'iso-ir-148': 'windows-1254', + 'iso8859-9': 'windows-1254', + 'iso88599': 'windows-1254', + 'iso_8859-9': 'windows-1254', + 'iso_8859-9:1989': 'windows-1254', + 'l5': 'windows-1254', + 'latin5': 'windows-1254', + 'windows-1254': 'windows-1254', + 'x-cp1254': 'windows-1254', + 'cp1255': 'windows-1255', + 'windows-1255': 'windows-1255', + 'x-cp1255': 'windows-1255', + 'cp1256': 'windows-1256', + 'windows-1256': 'windows-1256', + 'x-cp1256': 'windows-1256', + 'cp1257': 'windows-1257', + 'windows-1257': 'windows-1257', + 'x-cp1257': 'windows-1257', + 'cp1258': 'windows-1258', + 'windows-1258': 'windows-1258', + 'x-cp1258': 'windows-1258', + 'x-mac-cyrillic': 'x-mac-cyrillic', + 'x-mac-ukrainian': 'x-mac-cyrillic', + 'chinese': 'gbk', + 'csgb2312': 'gbk', + 'csiso58gb231280': 'gbk', + 'gb2312': 'gbk', + 'gb_2312': 'gbk', + 'gb_2312-80': 'gbk', + 'gbk': 'gbk', + 'iso-ir-58': 'gbk', + 'x-gbk': 'gbk', + 'gb18030': 'gb18030', + 'hz-gb-2312': 'hz-gb-2312', + 'big5': 'big5', + 'big5-hkscs': 'big5', + 'cn-big5': 'big5', + 'csbig5': 'big5', + 'x-x-big5': 'big5', + 'cseucpkdfmtjapanese': 'euc-jp', + 'euc-jp': 'euc-jp', + 'x-euc-jp': 'euc-jp', + 'csiso2022jp': 'iso-2022-jp', + 'iso-2022-jp': 'iso-2022-jp', + 'csshiftjis': 'shift_jis', + 'ms_kanji': 'shift_jis', + 'shift-jis': 'shift_jis', + 'shift_jis': 'shift_jis', + 'sjis': 'shift_jis', + 'windows-31j': 'shift_jis', + 'x-sjis': 'shift_jis', + 'cseuckr': 'euc-kr', + 'csksc56011987': 'euc-kr', + 'euc-kr': 'euc-kr', + 'iso-ir-149': 'euc-kr', + 'korean': 'euc-kr', + 'ks_c_5601-1987': 'euc-kr', + 'ks_c_5601-1989': 'euc-kr', + 'ksc5601': 'euc-kr', + 'ksc_5601': 'euc-kr', + 'windows-949': 'euc-kr', + 'csiso2022kr': 'iso-2022-kr', + 'iso-2022-kr': 'iso-2022-kr', + 'utf-16be': 'utf-16be', + 'utf-16': 'utf-16le', + 'utf-16le': 'utf-16le', + 'x-user-defined': 'x-user-defined', +} diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/webencodings/labels.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/webencodings/labels.pyc new file mode 100644 index 0000000..a30413b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/webencodings/labels.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/webencodings/mklabels.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/webencodings/mklabels.py new file mode 100644 index 0000000..295dc92 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/webencodings/mklabels.py @@ -0,0 +1,59 @@ +""" + + webencodings.mklabels + ~~~~~~~~~~~~~~~~~~~~~ + + Regenarate the webencodings.labels module. + + :copyright: Copyright 2012 by Simon Sapin + :license: BSD, see LICENSE for details. + +""" + +import json +try: + from urllib import urlopen +except ImportError: + from urllib.request import urlopen + + +def assert_lower(string): + assert string == string.lower() + return string + + +def generate(url): + parts = ['''\ +""" + + webencodings.labels + ~~~~~~~~~~~~~~~~~~~ + + Map encoding labels to their name. + + :copyright: Copyright 2012 by Simon Sapin + :license: BSD, see LICENSE for details. + +""" + +# XXX Do not edit! +# This file is automatically generated by mklabels.py + +LABELS = { +'''] + labels = [ + (repr(assert_lower(label)).lstrip('u'), + repr(encoding['name']).lstrip('u')) + for category in json.loads(urlopen(url).read().decode('ascii')) + for encoding in category['encodings'] + for label in encoding['labels']] + max_len = max(len(label) for label, name in labels) + parts.extend( + ' %s:%s %s,\n' % (label, ' ' * (max_len - len(label)), name) + for label, name in labels) + parts.append('}') + return ''.join(parts) + + +if __name__ == '__main__': + print(generate('http://encoding.spec.whatwg.org/encodings.json')) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/webencodings/mklabels.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/webencodings/mklabels.pyc new file mode 100644 index 0000000..97efdb5 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/webencodings/mklabels.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/webencodings/tests.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/webencodings/tests.py new file mode 100644 index 0000000..e12c10d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/webencodings/tests.py @@ -0,0 +1,153 @@ +# coding: utf-8 +""" + + webencodings.tests + ~~~~~~~~~~~~~~~~~~ + + A basic test suite for Encoding. + + :copyright: Copyright 2012 by Simon Sapin + :license: BSD, see LICENSE for details. + +""" + +from __future__ import unicode_literals + +from . import (lookup, LABELS, decode, encode, iter_decode, iter_encode, + IncrementalDecoder, IncrementalEncoder, UTF8) + + +def assert_raises(exception, function, *args, **kwargs): + try: + function(*args, **kwargs) + except exception: + return + else: # pragma: no cover + raise AssertionError('Did not raise %s.' % exception) + + +def test_labels(): + assert lookup('utf-8').name == 'utf-8' + assert lookup('Utf-8').name == 'utf-8' + assert lookup('UTF-8').name == 'utf-8' + assert lookup('utf8').name == 'utf-8' + assert lookup('utf8').name == 'utf-8' + assert lookup('utf8 ').name == 'utf-8' + assert lookup(' \r\nutf8\t').name == 'utf-8' + assert lookup('u8') is None # Python label. + assert lookup('utf-8 ') is None # Non-ASCII white space. + + assert lookup('US-ASCII').name == 'windows-1252' + assert lookup('iso-8859-1').name == 'windows-1252' + assert lookup('latin1').name == 'windows-1252' + assert lookup('LATIN1').name == 'windows-1252' + assert lookup('latin-1') is None + assert lookup('LATİN1') is None # ASCII-only case insensitivity. + + +def test_all_labels(): + for label in LABELS: + assert decode(b'', label) == ('', lookup(label)) + assert encode('', label) == b'' + for repeat in [0, 1, 12]: + output, _ = iter_decode([b''] * repeat, label) + assert list(output) == [] + assert list(iter_encode([''] * repeat, label)) == [] + decoder = IncrementalDecoder(label) + assert decoder.decode(b'') == '' + assert decoder.decode(b'', final=True) == '' + encoder = IncrementalEncoder(label) + assert encoder.encode('') == b'' + assert encoder.encode('', final=True) == b'' + # All encoding names are valid labels too: + for name in set(LABELS.values()): + assert lookup(name).name == name + + +def test_invalid_label(): + assert_raises(LookupError, decode, b'\xEF\xBB\xBF\xc3\xa9', 'invalid') + assert_raises(LookupError, encode, 'é', 'invalid') + assert_raises(LookupError, iter_decode, [], 'invalid') + assert_raises(LookupError, iter_encode, [], 'invalid') + assert_raises(LookupError, IncrementalDecoder, 'invalid') + assert_raises(LookupError, IncrementalEncoder, 'invalid') + + +def test_decode(): + assert decode(b'\x80', 'latin1') == ('€', lookup('latin1')) + assert decode(b'\x80', lookup('latin1')) == ('€', lookup('latin1')) + assert decode(b'\xc3\xa9', 'utf8') == ('é', lookup('utf8')) + assert decode(b'\xc3\xa9', UTF8) == ('é', lookup('utf8')) + assert decode(b'\xc3\xa9', 'ascii') == ('é', lookup('ascii')) + assert decode(b'\xEF\xBB\xBF\xc3\xa9', 'ascii') == ('é', lookup('utf8')) # UTF-8 with BOM + + assert decode(b'\xFE\xFF\x00\xe9', 'ascii') == ('é', lookup('utf-16be')) # UTF-16-BE with BOM + assert decode(b'\xFF\xFE\xe9\x00', 'ascii') == ('é', lookup('utf-16le')) # UTF-16-LE with BOM + assert decode(b'\xFE\xFF\xe9\x00', 'ascii') == ('\ue900', lookup('utf-16be')) + assert decode(b'\xFF\xFE\x00\xe9', 'ascii') == ('\ue900', lookup('utf-16le')) + + assert decode(b'\x00\xe9', 'UTF-16BE') == ('é', lookup('utf-16be')) + assert decode(b'\xe9\x00', 'UTF-16LE') == ('é', lookup('utf-16le')) + assert decode(b'\xe9\x00', 'UTF-16') == ('é', lookup('utf-16le')) + + assert decode(b'\xe9\x00', 'UTF-16BE') == ('\ue900', lookup('utf-16be')) + assert decode(b'\x00\xe9', 'UTF-16LE') == ('\ue900', lookup('utf-16le')) + assert decode(b'\x00\xe9', 'UTF-16') == ('\ue900', lookup('utf-16le')) + + +def test_encode(): + assert encode('é', 'latin1') == b'\xe9' + assert encode('é', 'utf8') == b'\xc3\xa9' + assert encode('é', 'utf8') == b'\xc3\xa9' + assert encode('é', 'utf-16') == b'\xe9\x00' + assert encode('é', 'utf-16le') == b'\xe9\x00' + assert encode('é', 'utf-16be') == b'\x00\xe9' + + +def test_iter_decode(): + def iter_decode_to_string(input, fallback_encoding): + output, _encoding = iter_decode(input, fallback_encoding) + return ''.join(output) + assert iter_decode_to_string([], 'latin1') == '' + assert iter_decode_to_string([b''], 'latin1') == '' + assert iter_decode_to_string([b'\xe9'], 'latin1') == 'é' + assert iter_decode_to_string([b'hello'], 'latin1') == 'hello' + assert iter_decode_to_string([b'he', b'llo'], 'latin1') == 'hello' + assert iter_decode_to_string([b'hell', b'o'], 'latin1') == 'hello' + assert iter_decode_to_string([b'\xc3\xa9'], 'latin1') == 'é' + assert iter_decode_to_string([b'\xEF\xBB\xBF\xc3\xa9'], 'latin1') == 'é' + assert iter_decode_to_string([ + b'\xEF\xBB\xBF', b'\xc3', b'\xa9'], 'latin1') == 'é' + assert iter_decode_to_string([ + b'\xEF\xBB\xBF', b'a', b'\xc3'], 'latin1') == 'a\uFFFD' + assert iter_decode_to_string([ + b'', b'\xEF', b'', b'', b'\xBB\xBF\xc3', b'\xa9'], 'latin1') == 'é' + assert iter_decode_to_string([b'\xEF\xBB\xBF'], 'latin1') == '' + assert iter_decode_to_string([b'\xEF\xBB'], 'latin1') == 'ï»' + assert iter_decode_to_string([b'\xFE\xFF\x00\xe9'], 'latin1') == 'é' + assert iter_decode_to_string([b'\xFF\xFE\xe9\x00'], 'latin1') == 'é' + assert iter_decode_to_string([ + b'', b'\xFF', b'', b'', b'\xFE\xe9', b'\x00'], 'latin1') == 'é' + assert iter_decode_to_string([ + b'', b'h\xe9', b'llo'], 'x-user-defined') == 'h\uF7E9llo' + + +def test_iter_encode(): + assert b''.join(iter_encode([], 'latin1')) == b'' + assert b''.join(iter_encode([''], 'latin1')) == b'' + assert b''.join(iter_encode(['é'], 'latin1')) == b'\xe9' + assert b''.join(iter_encode(['', 'é', '', ''], 'latin1')) == b'\xe9' + assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16')) == b'\xe9\x00' + assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16le')) == b'\xe9\x00' + assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16be')) == b'\x00\xe9' + assert b''.join(iter_encode([ + '', 'h\uF7E9', '', 'llo'], 'x-user-defined')) == b'h\xe9llo' + + +def test_x_user_defined(): + encoded = b'2,\x0c\x0b\x1aO\xd9#\xcb\x0f\xc9\xbbt\xcf\xa8\xca' + decoded = '2,\x0c\x0b\x1aO\uf7d9#\uf7cb\x0f\uf7c9\uf7bbt\uf7cf\uf7a8\uf7ca' + encoded = b'aa' + decoded = 'aa' + assert decode(encoded, 'x-user-defined') == (decoded, lookup('x-user-defined')) + assert encode(decoded, 'x-user-defined') == encoded diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/webencodings/tests.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/webencodings/tests.pyc new file mode 100644 index 0000000..5514139 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/webencodings/tests.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/webencodings/x_user_defined.py b/project/venv/lib/python2.7/site-packages/pip/_vendor/webencodings/x_user_defined.py new file mode 100644 index 0000000..d16e326 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pip/_vendor/webencodings/x_user_defined.py @@ -0,0 +1,325 @@ +# coding: utf-8 +""" + + webencodings.x_user_defined + ~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + An implementation of the x-user-defined encoding. + + :copyright: Copyright 2012 by Simon Sapin + :license: BSD, see LICENSE for details. + +""" + +from __future__ import unicode_literals + +import codecs + + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self, input, errors='strict'): + return codecs.charmap_encode(input, errors, encoding_table) + + def decode(self, input, errors='strict'): + return codecs.charmap_decode(input, errors, decoding_table) + + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input, self.errors, encoding_table)[0] + + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input, self.errors, decoding_table)[0] + + +class StreamWriter(Codec, codecs.StreamWriter): + pass + + +class StreamReader(Codec, codecs.StreamReader): + pass + + +### encodings module API + +codec_info = codecs.CodecInfo( + name='x-user-defined', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, +) + + +### Decoding Table + +# Python 3: +# for c in range(256): print(' %r' % chr(c if c < 128 else c + 0xF700)) +decoding_table = ( + '\x00' + '\x01' + '\x02' + '\x03' + '\x04' + '\x05' + '\x06' + '\x07' + '\x08' + '\t' + '\n' + '\x0b' + '\x0c' + '\r' + '\x0e' + '\x0f' + '\x10' + '\x11' + '\x12' + '\x13' + '\x14' + '\x15' + '\x16' + '\x17' + '\x18' + '\x19' + '\x1a' + '\x1b' + '\x1c' + '\x1d' + '\x1e' + '\x1f' + ' ' + '!' + '"' + '#' + '$' + '%' + '&' + "'" + '(' + ')' + '*' + '+' + ',' + '-' + '.' + '/' + '0' + '1' + '2' + '3' + '4' + '5' + '6' + '7' + '8' + '9' + ':' + ';' + '<' + '=' + '>' + '?' + '@' + 'A' + 'B' + 'C' + 'D' + 'E' + 'F' + 'G' + 'H' + 'I' + 'J' + 'K' + 'L' + 'M' + 'N' + 'O' + 'P' + 'Q' + 'R' + 'S' + 'T' + 'U' + 'V' + 'W' + 'X' + 'Y' + 'Z' + '[' + '\\' + ']' + '^' + '_' + '`' + 'a' + 'b' + 'c' + 'd' + 'e' + 'f' + 'g' + 'h' + 'i' + 'j' + 'k' + 'l' + 'm' + 'n' + 'o' + 'p' + 'q' + 'r' + 's' + 't' + 'u' + 'v' + 'w' + 'x' + 'y' + 'z' + '{' + '|' + '}' + '~' + '\x7f' + '\uf780' + '\uf781' + '\uf782' + '\uf783' + '\uf784' + '\uf785' + '\uf786' + '\uf787' + '\uf788' + '\uf789' + '\uf78a' + '\uf78b' + '\uf78c' + '\uf78d' + '\uf78e' + '\uf78f' + '\uf790' + '\uf791' + '\uf792' + '\uf793' + '\uf794' + '\uf795' + '\uf796' + '\uf797' + '\uf798' + '\uf799' + '\uf79a' + '\uf79b' + '\uf79c' + '\uf79d' + '\uf79e' + '\uf79f' + '\uf7a0' + '\uf7a1' + '\uf7a2' + '\uf7a3' + '\uf7a4' + '\uf7a5' + '\uf7a6' + '\uf7a7' + '\uf7a8' + '\uf7a9' + '\uf7aa' + '\uf7ab' + '\uf7ac' + '\uf7ad' + '\uf7ae' + '\uf7af' + '\uf7b0' + '\uf7b1' + '\uf7b2' + '\uf7b3' + '\uf7b4' + '\uf7b5' + '\uf7b6' + '\uf7b7' + '\uf7b8' + '\uf7b9' + '\uf7ba' + '\uf7bb' + '\uf7bc' + '\uf7bd' + '\uf7be' + '\uf7bf' + '\uf7c0' + '\uf7c1' + '\uf7c2' + '\uf7c3' + '\uf7c4' + '\uf7c5' + '\uf7c6' + '\uf7c7' + '\uf7c8' + '\uf7c9' + '\uf7ca' + '\uf7cb' + '\uf7cc' + '\uf7cd' + '\uf7ce' + '\uf7cf' + '\uf7d0' + '\uf7d1' + '\uf7d2' + '\uf7d3' + '\uf7d4' + '\uf7d5' + '\uf7d6' + '\uf7d7' + '\uf7d8' + '\uf7d9' + '\uf7da' + '\uf7db' + '\uf7dc' + '\uf7dd' + '\uf7de' + '\uf7df' + '\uf7e0' + '\uf7e1' + '\uf7e2' + '\uf7e3' + '\uf7e4' + '\uf7e5' + '\uf7e6' + '\uf7e7' + '\uf7e8' + '\uf7e9' + '\uf7ea' + '\uf7eb' + '\uf7ec' + '\uf7ed' + '\uf7ee' + '\uf7ef' + '\uf7f0' + '\uf7f1' + '\uf7f2' + '\uf7f3' + '\uf7f4' + '\uf7f5' + '\uf7f6' + '\uf7f7' + '\uf7f8' + '\uf7f9' + '\uf7fa' + '\uf7fb' + '\uf7fc' + '\uf7fd' + '\uf7fe' + '\uf7ff' +) + +### Encoding table +encoding_table = codecs.charmap_build(decoding_table) diff --git a/project/venv/lib/python2.7/site-packages/pip/_vendor/webencodings/x_user_defined.pyc b/project/venv/lib/python2.7/site-packages/pip/_vendor/webencodings/x_user_defined.pyc new file mode 100644 index 0000000..1a145bf Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pip/_vendor/webencodings/x_user_defined.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pkg_resources/__init__.py b/project/venv/lib/python2.7/site-packages/pkg_resources/__init__.py new file mode 100644 index 0000000..97e08d6 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pkg_resources/__init__.py @@ -0,0 +1,3286 @@ +# coding: utf-8 +""" +Package resource API +-------------------- + +A resource is a logical file contained within a package, or a logical +subdirectory thereof. The package resource API expects resource names +to have their path parts separated with ``/``, *not* whatever the local +path separator is. Do not use os.path operations to manipulate resource +names being passed into the API. + +The package resource API is designed to work with normal filesystem packages, +.egg files, and unpacked .egg files. It can also work in a limited way with +.zip files and with custom PEP 302 loaders that support the ``get_data()`` +method. +""" + +from __future__ import absolute_import + +import sys +import os +import io +import time +import re +import types +import zipfile +import zipimport +import warnings +import stat +import functools +import pkgutil +import operator +import platform +import collections +import plistlib +import email.parser +import errno +import tempfile +import textwrap +import itertools +import inspect +import ntpath +import posixpath +from pkgutil import get_importer + +try: + import _imp +except ImportError: + # Python 3.2 compatibility + import imp as _imp + +try: + FileExistsError +except NameError: + FileExistsError = OSError + +from pkg_resources.extern import six +from pkg_resources.extern.six.moves import urllib, map, filter + +# capture these to bypass sandboxing +from os import utime +try: + from os import mkdir, rename, unlink + WRITE_SUPPORT = True +except ImportError: + # no write support, probably under GAE + WRITE_SUPPORT = False + +from os import open as os_open +from os.path import isdir, split + +try: + import importlib.machinery as importlib_machinery + # access attribute to force import under delayed import mechanisms. + importlib_machinery.__name__ +except ImportError: + importlib_machinery = None + +from . import py31compat +from pkg_resources.extern import appdirs +from pkg_resources.extern import packaging +__import__('pkg_resources.extern.packaging.version') +__import__('pkg_resources.extern.packaging.specifiers') +__import__('pkg_resources.extern.packaging.requirements') +__import__('pkg_resources.extern.packaging.markers') + + +__metaclass__ = type + + +if (3, 0) < sys.version_info < (3, 4): + raise RuntimeError("Python 3.4 or later is required") + +if six.PY2: + # Those builtin exceptions are only defined in Python 3 + PermissionError = None + NotADirectoryError = None + +# declare some globals that will be defined later to +# satisfy the linters. +require = None +working_set = None +add_activation_listener = None +resources_stream = None +cleanup_resources = None +resource_dir = None +resource_stream = None +set_extraction_path = None +resource_isdir = None +resource_string = None +iter_entry_points = None +resource_listdir = None +resource_filename = None +resource_exists = None +_distribution_finders = None +_namespace_handlers = None +_namespace_packages = None + + +class PEP440Warning(RuntimeWarning): + """ + Used when there is an issue with a version or specifier not complying with + PEP 440. + """ + + +def parse_version(v): + try: + return packaging.version.Version(v) + except packaging.version.InvalidVersion: + return packaging.version.LegacyVersion(v) + + +_state_vars = {} + + +def _declare_state(vartype, **kw): + globals().update(kw) + _state_vars.update(dict.fromkeys(kw, vartype)) + + +def __getstate__(): + state = {} + g = globals() + for k, v in _state_vars.items(): + state[k] = g['_sget_' + v](g[k]) + return state + + +def __setstate__(state): + g = globals() + for k, v in state.items(): + g['_sset_' + _state_vars[k]](k, g[k], v) + return state + + +def _sget_dict(val): + return val.copy() + + +def _sset_dict(key, ob, state): + ob.clear() + ob.update(state) + + +def _sget_object(val): + return val.__getstate__() + + +def _sset_object(key, ob, state): + ob.__setstate__(state) + + +_sget_none = _sset_none = lambda *args: None + + +def get_supported_platform(): + """Return this platform's maximum compatible version. + + distutils.util.get_platform() normally reports the minimum version + of Mac OS X that would be required to *use* extensions produced by + distutils. But what we want when checking compatibility is to know the + version of Mac OS X that we are *running*. To allow usage of packages that + explicitly require a newer version of Mac OS X, we must also know the + current version of the OS. + + If this condition occurs for any other platform with a version in its + platform strings, this function should be extended accordingly. + """ + plat = get_build_platform() + m = macosVersionString.match(plat) + if m is not None and sys.platform == "darwin": + try: + plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3)) + except ValueError: + # not Mac OS X + pass + return plat + + +__all__ = [ + # Basic resource access and distribution/entry point discovery + 'require', 'run_script', 'get_provider', 'get_distribution', + 'load_entry_point', 'get_entry_map', 'get_entry_info', + 'iter_entry_points', + 'resource_string', 'resource_stream', 'resource_filename', + 'resource_listdir', 'resource_exists', 'resource_isdir', + + # Environmental control + 'declare_namespace', 'working_set', 'add_activation_listener', + 'find_distributions', 'set_extraction_path', 'cleanup_resources', + 'get_default_cache', + + # Primary implementation classes + 'Environment', 'WorkingSet', 'ResourceManager', + 'Distribution', 'Requirement', 'EntryPoint', + + # Exceptions + 'ResolutionError', 'VersionConflict', 'DistributionNotFound', + 'UnknownExtra', 'ExtractionError', + + # Warnings + 'PEP440Warning', + + # Parsing functions and string utilities + 'parse_requirements', 'parse_version', 'safe_name', 'safe_version', + 'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections', + 'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker', + + # filesystem utilities + 'ensure_directory', 'normalize_path', + + # Distribution "precedence" constants + 'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST', + + # "Provider" interfaces, implementations, and registration/lookup APIs + 'IMetadataProvider', 'IResourceProvider', 'FileMetadata', + 'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider', + 'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider', + 'register_finder', 'register_namespace_handler', 'register_loader_type', + 'fixup_namespace_packages', 'get_importer', + + # Warnings + 'PkgResourcesDeprecationWarning', + + # Deprecated/backward compatibility only + 'run_main', 'AvailableDistributions', +] + + +class ResolutionError(Exception): + """Abstract base for dependency resolution errors""" + + def __repr__(self): + return self.__class__.__name__ + repr(self.args) + + +class VersionConflict(ResolutionError): + """ + An already-installed version conflicts with the requested version. + + Should be initialized with the installed Distribution and the requested + Requirement. + """ + + _template = "{self.dist} is installed but {self.req} is required" + + @property + def dist(self): + return self.args[0] + + @property + def req(self): + return self.args[1] + + def report(self): + return self._template.format(**locals()) + + def with_context(self, required_by): + """ + If required_by is non-empty, return a version of self that is a + ContextualVersionConflict. + """ + if not required_by: + return self + args = self.args + (required_by,) + return ContextualVersionConflict(*args) + + +class ContextualVersionConflict(VersionConflict): + """ + A VersionConflict that accepts a third parameter, the set of the + requirements that required the installed Distribution. + """ + + _template = VersionConflict._template + ' by {self.required_by}' + + @property + def required_by(self): + return self.args[2] + + +class DistributionNotFound(ResolutionError): + """A requested distribution was not found""" + + _template = ("The '{self.req}' distribution was not found " + "and is required by {self.requirers_str}") + + @property + def req(self): + return self.args[0] + + @property + def requirers(self): + return self.args[1] + + @property + def requirers_str(self): + if not self.requirers: + return 'the application' + return ', '.join(self.requirers) + + def report(self): + return self._template.format(**locals()) + + def __str__(self): + return self.report() + + +class UnknownExtra(ResolutionError): + """Distribution doesn't have an "extra feature" of the given name""" + + +_provider_factories = {} + +PY_MAJOR = sys.version[:3] +EGG_DIST = 3 +BINARY_DIST = 2 +SOURCE_DIST = 1 +CHECKOUT_DIST = 0 +DEVELOP_DIST = -1 + + +def register_loader_type(loader_type, provider_factory): + """Register `provider_factory` to make providers for `loader_type` + + `loader_type` is the type or class of a PEP 302 ``module.__loader__``, + and `provider_factory` is a function that, passed a *module* object, + returns an ``IResourceProvider`` for that module. + """ + _provider_factories[loader_type] = provider_factory + + +def get_provider(moduleOrReq): + """Return an IResourceProvider for the named module or requirement""" + if isinstance(moduleOrReq, Requirement): + return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0] + try: + module = sys.modules[moduleOrReq] + except KeyError: + __import__(moduleOrReq) + module = sys.modules[moduleOrReq] + loader = getattr(module, '__loader__', None) + return _find_adapter(_provider_factories, loader)(module) + + +def _macosx_vers(_cache=[]): + if not _cache: + version = platform.mac_ver()[0] + # fallback for MacPorts + if version == '': + plist = '/System/Library/CoreServices/SystemVersion.plist' + if os.path.exists(plist): + if hasattr(plistlib, 'readPlist'): + plist_content = plistlib.readPlist(plist) + if 'ProductVersion' in plist_content: + version = plist_content['ProductVersion'] + + _cache.append(version.split('.')) + return _cache[0] + + +def _macosx_arch(machine): + return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine) + + +def get_build_platform(): + """Return this platform's string for platform-specific distributions + + XXX Currently this is the same as ``distutils.util.get_platform()``, but it + needs some hacks for Linux and Mac OS X. + """ + from sysconfig import get_platform + + plat = get_platform() + if sys.platform == "darwin" and not plat.startswith('macosx-'): + try: + version = _macosx_vers() + machine = os.uname()[4].replace(" ", "_") + return "macosx-%d.%d-%s" % ( + int(version[0]), int(version[1]), + _macosx_arch(machine), + ) + except ValueError: + # if someone is running a non-Mac darwin system, this will fall + # through to the default implementation + pass + return plat + + +macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)") +darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)") +# XXX backward compat +get_platform = get_build_platform + + +def compatible_platforms(provided, required): + """Can code for the `provided` platform run on the `required` platform? + + Returns true if either platform is ``None``, or the platforms are equal. + + XXX Needs compatibility checks for Linux and other unixy OSes. + """ + if provided is None or required is None or provided == required: + # easy case + return True + + # Mac OS X special cases + reqMac = macosVersionString.match(required) + if reqMac: + provMac = macosVersionString.match(provided) + + # is this a Mac package? + if not provMac: + # this is backwards compatibility for packages built before + # setuptools 0.6. All packages built after this point will + # use the new macosx designation. + provDarwin = darwinVersionString.match(provided) + if provDarwin: + dversion = int(provDarwin.group(1)) + macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2)) + if dversion == 7 and macosversion >= "10.3" or \ + dversion == 8 and macosversion >= "10.4": + return True + # egg isn't macosx or legacy darwin + return False + + # are they the same major version and machine type? + if provMac.group(1) != reqMac.group(1) or \ + provMac.group(3) != reqMac.group(3): + return False + + # is the required OS major update >= the provided one? + if int(provMac.group(2)) > int(reqMac.group(2)): + return False + + return True + + # XXX Linux and other platforms' special cases should go here + return False + + +def run_script(dist_spec, script_name): + """Locate distribution `dist_spec` and run its `script_name` script""" + ns = sys._getframe(1).f_globals + name = ns['__name__'] + ns.clear() + ns['__name__'] = name + require(dist_spec)[0].run_script(script_name, ns) + + +# backward compatibility +run_main = run_script + + +def get_distribution(dist): + """Return a current distribution object for a Requirement or string""" + if isinstance(dist, six.string_types): + dist = Requirement.parse(dist) + if isinstance(dist, Requirement): + dist = get_provider(dist) + if not isinstance(dist, Distribution): + raise TypeError("Expected string, Requirement, or Distribution", dist) + return dist + + +def load_entry_point(dist, group, name): + """Return `name` entry point of `group` for `dist` or raise ImportError""" + return get_distribution(dist).load_entry_point(group, name) + + +def get_entry_map(dist, group=None): + """Return the entry point map for `group`, or the full entry map""" + return get_distribution(dist).get_entry_map(group) + + +def get_entry_info(dist, group, name): + """Return the EntryPoint object for `group`+`name`, or ``None``""" + return get_distribution(dist).get_entry_info(group, name) + + +class IMetadataProvider: + def has_metadata(name): + """Does the package's distribution contain the named metadata?""" + + def get_metadata(name): + """The named metadata resource as a string""" + + def get_metadata_lines(name): + """Yield named metadata resource as list of non-blank non-comment lines + + Leading and trailing whitespace is stripped from each line, and lines + with ``#`` as the first non-blank character are omitted.""" + + def metadata_isdir(name): + """Is the named metadata a directory? (like ``os.path.isdir()``)""" + + def metadata_listdir(name): + """List of metadata names in the directory (like ``os.listdir()``)""" + + def run_script(script_name, namespace): + """Execute the named script in the supplied namespace dictionary""" + + +class IResourceProvider(IMetadataProvider): + """An object that provides access to package resources""" + + def get_resource_filename(manager, resource_name): + """Return a true filesystem path for `resource_name` + + `manager` must be an ``IResourceManager``""" + + def get_resource_stream(manager, resource_name): + """Return a readable file-like object for `resource_name` + + `manager` must be an ``IResourceManager``""" + + def get_resource_string(manager, resource_name): + """Return a string containing the contents of `resource_name` + + `manager` must be an ``IResourceManager``""" + + def has_resource(resource_name): + """Does the package contain the named resource?""" + + def resource_isdir(resource_name): + """Is the named resource a directory? (like ``os.path.isdir()``)""" + + def resource_listdir(resource_name): + """List of resource names in the directory (like ``os.listdir()``)""" + + +class WorkingSet: + """A collection of active distributions on sys.path (or a similar list)""" + + def __init__(self, entries=None): + """Create working set from list of path entries (default=sys.path)""" + self.entries = [] + self.entry_keys = {} + self.by_key = {} + self.callbacks = [] + + if entries is None: + entries = sys.path + + for entry in entries: + self.add_entry(entry) + + @classmethod + def _build_master(cls): + """ + Prepare the master working set. + """ + ws = cls() + try: + from __main__ import __requires__ + except ImportError: + # The main program does not list any requirements + return ws + + # ensure the requirements are met + try: + ws.require(__requires__) + except VersionConflict: + return cls._build_from_requirements(__requires__) + + return ws + + @classmethod + def _build_from_requirements(cls, req_spec): + """ + Build a working set from a requirement spec. Rewrites sys.path. + """ + # try it without defaults already on sys.path + # by starting with an empty path + ws = cls([]) + reqs = parse_requirements(req_spec) + dists = ws.resolve(reqs, Environment()) + for dist in dists: + ws.add(dist) + + # add any missing entries from sys.path + for entry in sys.path: + if entry not in ws.entries: + ws.add_entry(entry) + + # then copy back to sys.path + sys.path[:] = ws.entries + return ws + + def add_entry(self, entry): + """Add a path item to ``.entries``, finding any distributions on it + + ``find_distributions(entry, True)`` is used to find distributions + corresponding to the path entry, and they are added. `entry` is + always appended to ``.entries``, even if it is already present. + (This is because ``sys.path`` can contain the same value more than + once, and the ``.entries`` of the ``sys.path`` WorkingSet should always + equal ``sys.path``.) + """ + self.entry_keys.setdefault(entry, []) + self.entries.append(entry) + for dist in find_distributions(entry, True): + self.add(dist, entry, False) + + def __contains__(self, dist): + """True if `dist` is the active distribution for its project""" + return self.by_key.get(dist.key) == dist + + def find(self, req): + """Find a distribution matching requirement `req` + + If there is an active distribution for the requested project, this + returns it as long as it meets the version requirement specified by + `req`. But, if there is an active distribution for the project and it + does *not* meet the `req` requirement, ``VersionConflict`` is raised. + If there is no active distribution for the requested project, ``None`` + is returned. + """ + dist = self.by_key.get(req.key) + if dist is not None and dist not in req: + # XXX add more info + raise VersionConflict(dist, req) + return dist + + def iter_entry_points(self, group, name=None): + """Yield entry point objects from `group` matching `name` + + If `name` is None, yields all entry points in `group` from all + distributions in the working set, otherwise only ones matching + both `group` and `name` are yielded (in distribution order). + """ + return ( + entry + for dist in self + for entry in dist.get_entry_map(group).values() + if name is None or name == entry.name + ) + + def run_script(self, requires, script_name): + """Locate distribution for `requires` and run `script_name` script""" + ns = sys._getframe(1).f_globals + name = ns['__name__'] + ns.clear() + ns['__name__'] = name + self.require(requires)[0].run_script(script_name, ns) + + def __iter__(self): + """Yield distributions for non-duplicate projects in the working set + + The yield order is the order in which the items' path entries were + added to the working set. + """ + seen = {} + for item in self.entries: + if item not in self.entry_keys: + # workaround a cache issue + continue + + for key in self.entry_keys[item]: + if key not in seen: + seen[key] = 1 + yield self.by_key[key] + + def add(self, dist, entry=None, insert=True, replace=False): + """Add `dist` to working set, associated with `entry` + + If `entry` is unspecified, it defaults to the ``.location`` of `dist`. + On exit from this routine, `entry` is added to the end of the working + set's ``.entries`` (if it wasn't already present). + + `dist` is only added to the working set if it's for a project that + doesn't already have a distribution in the set, unless `replace=True`. + If it's added, any callbacks registered with the ``subscribe()`` method + will be called. + """ + if insert: + dist.insert_on(self.entries, entry, replace=replace) + + if entry is None: + entry = dist.location + keys = self.entry_keys.setdefault(entry, []) + keys2 = self.entry_keys.setdefault(dist.location, []) + if not replace and dist.key in self.by_key: + # ignore hidden distros + return + + self.by_key[dist.key] = dist + if dist.key not in keys: + keys.append(dist.key) + if dist.key not in keys2: + keys2.append(dist.key) + self._added_new(dist) + + def resolve(self, requirements, env=None, installer=None, + replace_conflicting=False, extras=None): + """List all distributions needed to (recursively) meet `requirements` + + `requirements` must be a sequence of ``Requirement`` objects. `env`, + if supplied, should be an ``Environment`` instance. If + not supplied, it defaults to all distributions available within any + entry or distribution in the working set. `installer`, if supplied, + will be invoked with each requirement that cannot be met by an + already-installed distribution; it should return a ``Distribution`` or + ``None``. + + Unless `replace_conflicting=True`, raises a VersionConflict exception + if + any requirements are found on the path that have the correct name but + the wrong version. Otherwise, if an `installer` is supplied it will be + invoked to obtain the correct version of the requirement and activate + it. + + `extras` is a list of the extras to be used with these requirements. + This is important because extra requirements may look like `my_req; + extra = "my_extra"`, which would otherwise be interpreted as a purely + optional requirement. Instead, we want to be able to assert that these + requirements are truly required. + """ + + # set up the stack + requirements = list(requirements)[::-1] + # set of processed requirements + processed = {} + # key -> dist + best = {} + to_activate = [] + + req_extras = _ReqExtras() + + # Mapping of requirement to set of distributions that required it; + # useful for reporting info about conflicts. + required_by = collections.defaultdict(set) + + while requirements: + # process dependencies breadth-first + req = requirements.pop(0) + if req in processed: + # Ignore cyclic or redundant dependencies + continue + + if not req_extras.markers_pass(req, extras): + continue + + dist = best.get(req.key) + if dist is None: + # Find the best distribution and add it to the map + dist = self.by_key.get(req.key) + if dist is None or (dist not in req and replace_conflicting): + ws = self + if env is None: + if dist is None: + env = Environment(self.entries) + else: + # Use an empty environment and workingset to avoid + # any further conflicts with the conflicting + # distribution + env = Environment([]) + ws = WorkingSet([]) + dist = best[req.key] = env.best_match( + req, ws, installer, + replace_conflicting=replace_conflicting + ) + if dist is None: + requirers = required_by.get(req, None) + raise DistributionNotFound(req, requirers) + to_activate.append(dist) + if dist not in req: + # Oops, the "best" so far conflicts with a dependency + dependent_req = required_by[req] + raise VersionConflict(dist, req).with_context(dependent_req) + + # push the new requirements onto the stack + new_requirements = dist.requires(req.extras)[::-1] + requirements.extend(new_requirements) + + # Register the new requirements needed by req + for new_requirement in new_requirements: + required_by[new_requirement].add(req.project_name) + req_extras[new_requirement] = req.extras + + processed[req] = True + + # return list of distros to activate + return to_activate + + def find_plugins( + self, plugin_env, full_env=None, installer=None, fallback=True): + """Find all activatable distributions in `plugin_env` + + Example usage:: + + distributions, errors = working_set.find_plugins( + Environment(plugin_dirlist) + ) + # add plugins+libs to sys.path + map(working_set.add, distributions) + # display errors + print('Could not load', errors) + + The `plugin_env` should be an ``Environment`` instance that contains + only distributions that are in the project's "plugin directory" or + directories. The `full_env`, if supplied, should be an ``Environment`` + contains all currently-available distributions. If `full_env` is not + supplied, one is created automatically from the ``WorkingSet`` this + method is called on, which will typically mean that every directory on + ``sys.path`` will be scanned for distributions. + + `installer` is a standard installer callback as used by the + ``resolve()`` method. The `fallback` flag indicates whether we should + attempt to resolve older versions of a plugin if the newest version + cannot be resolved. + + This method returns a 2-tuple: (`distributions`, `error_info`), where + `distributions` is a list of the distributions found in `plugin_env` + that were loadable, along with any other distributions that are needed + to resolve their dependencies. `error_info` is a dictionary mapping + unloadable plugin distributions to an exception instance describing the + error that occurred. Usually this will be a ``DistributionNotFound`` or + ``VersionConflict`` instance. + """ + + plugin_projects = list(plugin_env) + # scan project names in alphabetic order + plugin_projects.sort() + + error_info = {} + distributions = {} + + if full_env is None: + env = Environment(self.entries) + env += plugin_env + else: + env = full_env + plugin_env + + shadow_set = self.__class__([]) + # put all our entries in shadow_set + list(map(shadow_set.add, self)) + + for project_name in plugin_projects: + + for dist in plugin_env[project_name]: + + req = [dist.as_requirement()] + + try: + resolvees = shadow_set.resolve(req, env, installer) + + except ResolutionError as v: + # save error info + error_info[dist] = v + if fallback: + # try the next older version of project + continue + else: + # give up on this project, keep going + break + + else: + list(map(shadow_set.add, resolvees)) + distributions.update(dict.fromkeys(resolvees)) + + # success, no need to try any more versions of this project + break + + distributions = list(distributions) + distributions.sort() + + return distributions, error_info + + def require(self, *requirements): + """Ensure that distributions matching `requirements` are activated + + `requirements` must be a string or a (possibly-nested) sequence + thereof, specifying the distributions and versions required. The + return value is a sequence of the distributions that needed to be + activated to fulfill the requirements; all relevant distributions are + included, even if they were already activated in this working set. + """ + needed = self.resolve(parse_requirements(requirements)) + + for dist in needed: + self.add(dist) + + return needed + + def subscribe(self, callback, existing=True): + """Invoke `callback` for all distributions + + If `existing=True` (default), + call on all existing ones, as well. + """ + if callback in self.callbacks: + return + self.callbacks.append(callback) + if not existing: + return + for dist in self: + callback(dist) + + def _added_new(self, dist): + for callback in self.callbacks: + callback(dist) + + def __getstate__(self): + return ( + self.entries[:], self.entry_keys.copy(), self.by_key.copy(), + self.callbacks[:] + ) + + def __setstate__(self, e_k_b_c): + entries, keys, by_key, callbacks = e_k_b_c + self.entries = entries[:] + self.entry_keys = keys.copy() + self.by_key = by_key.copy() + self.callbacks = callbacks[:] + + +class _ReqExtras(dict): + """ + Map each requirement to the extras that demanded it. + """ + + def markers_pass(self, req, extras=None): + """ + Evaluate markers for req against each extra that + demanded it. + + Return False if the req has a marker and fails + evaluation. Otherwise, return True. + """ + extra_evals = ( + req.marker.evaluate({'extra': extra}) + for extra in self.get(req, ()) + (extras or (None,)) + ) + return not req.marker or any(extra_evals) + + +class Environment: + """Searchable snapshot of distributions on a search path""" + + def __init__( + self, search_path=None, platform=get_supported_platform(), + python=PY_MAJOR): + """Snapshot distributions available on a search path + + Any distributions found on `search_path` are added to the environment. + `search_path` should be a sequence of ``sys.path`` items. If not + supplied, ``sys.path`` is used. + + `platform` is an optional string specifying the name of the platform + that platform-specific distributions must be compatible with. If + unspecified, it defaults to the current platform. `python` is an + optional string naming the desired version of Python (e.g. ``'3.6'``); + it defaults to the current version. + + You may explicitly set `platform` (and/or `python`) to ``None`` if you + wish to map *all* distributions, not just those compatible with the + running platform or Python version. + """ + self._distmap = {} + self.platform = platform + self.python = python + self.scan(search_path) + + def can_add(self, dist): + """Is distribution `dist` acceptable for this environment? + + The distribution must match the platform and python version + requirements specified when this environment was created, or False + is returned. + """ + py_compat = ( + self.python is None + or dist.py_version is None + or dist.py_version == self.python + ) + return py_compat and compatible_platforms(dist.platform, self.platform) + + def remove(self, dist): + """Remove `dist` from the environment""" + self._distmap[dist.key].remove(dist) + + def scan(self, search_path=None): + """Scan `search_path` for distributions usable in this environment + + Any distributions found are added to the environment. + `search_path` should be a sequence of ``sys.path`` items. If not + supplied, ``sys.path`` is used. Only distributions conforming to + the platform/python version defined at initialization are added. + """ + if search_path is None: + search_path = sys.path + + for item in search_path: + for dist in find_distributions(item): + self.add(dist) + + def __getitem__(self, project_name): + """Return a newest-to-oldest list of distributions for `project_name` + + Uses case-insensitive `project_name` comparison, assuming all the + project's distributions use their project's name converted to all + lowercase as their key. + + """ + distribution_key = project_name.lower() + return self._distmap.get(distribution_key, []) + + def add(self, dist): + """Add `dist` if we ``can_add()`` it and it has not already been added + """ + if self.can_add(dist) and dist.has_version(): + dists = self._distmap.setdefault(dist.key, []) + if dist not in dists: + dists.append(dist) + dists.sort(key=operator.attrgetter('hashcmp'), reverse=True) + + def best_match( + self, req, working_set, installer=None, replace_conflicting=False): + """Find distribution best matching `req` and usable on `working_set` + + This calls the ``find(req)`` method of the `working_set` to see if a + suitable distribution is already active. (This may raise + ``VersionConflict`` if an unsuitable version of the project is already + active in the specified `working_set`.) If a suitable distribution + isn't active, this method returns the newest distribution in the + environment that meets the ``Requirement`` in `req`. If no suitable + distribution is found, and `installer` is supplied, then the result of + calling the environment's ``obtain(req, installer)`` method will be + returned. + """ + try: + dist = working_set.find(req) + except VersionConflict: + if not replace_conflicting: + raise + dist = None + if dist is not None: + return dist + for dist in self[req.key]: + if dist in req: + return dist + # try to download/install + return self.obtain(req, installer) + + def obtain(self, requirement, installer=None): + """Obtain a distribution matching `requirement` (e.g. via download) + + Obtain a distro that matches requirement (e.g. via download). In the + base ``Environment`` class, this routine just returns + ``installer(requirement)``, unless `installer` is None, in which case + None is returned instead. This method is a hook that allows subclasses + to attempt other ways of obtaining a distribution before falling back + to the `installer` argument.""" + if installer is not None: + return installer(requirement) + + def __iter__(self): + """Yield the unique project names of the available distributions""" + for key in self._distmap.keys(): + if self[key]: + yield key + + def __iadd__(self, other): + """In-place addition of a distribution or environment""" + if isinstance(other, Distribution): + self.add(other) + elif isinstance(other, Environment): + for project in other: + for dist in other[project]: + self.add(dist) + else: + raise TypeError("Can't add %r to environment" % (other,)) + return self + + def __add__(self, other): + """Add an environment or distribution to an environment""" + new = self.__class__([], platform=None, python=None) + for env in self, other: + new += env + return new + + +# XXX backward compatibility +AvailableDistributions = Environment + + +class ExtractionError(RuntimeError): + """An error occurred extracting a resource + + The following attributes are available from instances of this exception: + + manager + The resource manager that raised this exception + + cache_path + The base directory for resource extraction + + original_error + The exception instance that caused extraction to fail + """ + + +class ResourceManager: + """Manage resource extraction and packages""" + extraction_path = None + + def __init__(self): + self.cached_files = {} + + def resource_exists(self, package_or_requirement, resource_name): + """Does the named resource exist?""" + return get_provider(package_or_requirement).has_resource(resource_name) + + def resource_isdir(self, package_or_requirement, resource_name): + """Is the named resource an existing directory?""" + return get_provider(package_or_requirement).resource_isdir( + resource_name + ) + + def resource_filename(self, package_or_requirement, resource_name): + """Return a true filesystem path for specified resource""" + return get_provider(package_or_requirement).get_resource_filename( + self, resource_name + ) + + def resource_stream(self, package_or_requirement, resource_name): + """Return a readable file-like object for specified resource""" + return get_provider(package_or_requirement).get_resource_stream( + self, resource_name + ) + + def resource_string(self, package_or_requirement, resource_name): + """Return specified resource as a string""" + return get_provider(package_or_requirement).get_resource_string( + self, resource_name + ) + + def resource_listdir(self, package_or_requirement, resource_name): + """List the contents of the named resource directory""" + return get_provider(package_or_requirement).resource_listdir( + resource_name + ) + + def extraction_error(self): + """Give an error message for problems extracting file(s)""" + + old_exc = sys.exc_info()[1] + cache_path = self.extraction_path or get_default_cache() + + tmpl = textwrap.dedent(""" + Can't extract file(s) to egg cache + + The following error occurred while trying to extract file(s) + to the Python egg cache: + + {old_exc} + + The Python egg cache directory is currently set to: + + {cache_path} + + Perhaps your account does not have write access to this directory? + You can change the cache directory by setting the PYTHON_EGG_CACHE + environment variable to point to an accessible directory. + """).lstrip() + err = ExtractionError(tmpl.format(**locals())) + err.manager = self + err.cache_path = cache_path + err.original_error = old_exc + raise err + + def get_cache_path(self, archive_name, names=()): + """Return absolute location in cache for `archive_name` and `names` + + The parent directory of the resulting path will be created if it does + not already exist. `archive_name` should be the base filename of the + enclosing egg (which may not be the name of the enclosing zipfile!), + including its ".egg" extension. `names`, if provided, should be a + sequence of path name parts "under" the egg's extraction location. + + This method should only be called by resource providers that need to + obtain an extraction location, and only for names they intend to + extract, as it tracks the generated names for possible cleanup later. + """ + extract_path = self.extraction_path or get_default_cache() + target_path = os.path.join(extract_path, archive_name + '-tmp', *names) + try: + _bypass_ensure_directory(target_path) + except Exception: + self.extraction_error() + + self._warn_unsafe_extraction_path(extract_path) + + self.cached_files[target_path] = 1 + return target_path + + @staticmethod + def _warn_unsafe_extraction_path(path): + """ + If the default extraction path is overridden and set to an insecure + location, such as /tmp, it opens up an opportunity for an attacker to + replace an extracted file with an unauthorized payload. Warn the user + if a known insecure location is used. + + See Distribute #375 for more details. + """ + if os.name == 'nt' and not path.startswith(os.environ['windir']): + # On Windows, permissions are generally restrictive by default + # and temp directories are not writable by other users, so + # bypass the warning. + return + mode = os.stat(path).st_mode + if mode & stat.S_IWOTH or mode & stat.S_IWGRP: + msg = ( + "%s is writable by group/others and vulnerable to attack " + "when " + "used with get_resource_filename. Consider a more secure " + "location (set with .set_extraction_path or the " + "PYTHON_EGG_CACHE environment variable)." % path + ) + warnings.warn(msg, UserWarning) + + def postprocess(self, tempname, filename): + """Perform any platform-specific postprocessing of `tempname` + + This is where Mac header rewrites should be done; other platforms don't + have anything special they should do. + + Resource providers should call this method ONLY after successfully + extracting a compressed resource. They must NOT call it on resources + that are already in the filesystem. + + `tempname` is the current (temporary) name of the file, and `filename` + is the name it will be renamed to by the caller after this routine + returns. + """ + + if os.name == 'posix': + # Make the resource executable + mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777 + os.chmod(tempname, mode) + + def set_extraction_path(self, path): + """Set the base path where resources will be extracted to, if needed. + + If you do not call this routine before any extractions take place, the + path defaults to the return value of ``get_default_cache()``. (Which + is based on the ``PYTHON_EGG_CACHE`` environment variable, with various + platform-specific fallbacks. See that routine's documentation for more + details.) + + Resources are extracted to subdirectories of this path based upon + information given by the ``IResourceProvider``. You may set this to a + temporary directory, but then you must call ``cleanup_resources()`` to + delete the extracted files when done. There is no guarantee that + ``cleanup_resources()`` will be able to remove all extracted files. + + (Note: you may not change the extraction path for a given resource + manager once resources have been extracted, unless you first call + ``cleanup_resources()``.) + """ + if self.cached_files: + raise ValueError( + "Can't change extraction path, files already extracted" + ) + + self.extraction_path = path + + def cleanup_resources(self, force=False): + """ + Delete all extracted resource files and directories, returning a list + of the file and directory names that could not be successfully removed. + This function does not have any concurrency protection, so it should + generally only be called when the extraction path is a temporary + directory exclusive to a single process. This method is not + automatically called; you must call it explicitly or register it as an + ``atexit`` function if you wish to ensure cleanup of a temporary + directory used for extractions. + """ + # XXX + + +def get_default_cache(): + """ + Return the ``PYTHON_EGG_CACHE`` environment variable + or a platform-relevant user cache dir for an app + named "Python-Eggs". + """ + return ( + os.environ.get('PYTHON_EGG_CACHE') + or appdirs.user_cache_dir(appname='Python-Eggs') + ) + + +def safe_name(name): + """Convert an arbitrary string to a standard distribution name + + Any runs of non-alphanumeric/. characters are replaced with a single '-'. + """ + return re.sub('[^A-Za-z0-9.]+', '-', name) + + +def safe_version(version): + """ + Convert an arbitrary string to a standard version string + """ + try: + # normalize the version + return str(packaging.version.Version(version)) + except packaging.version.InvalidVersion: + version = version.replace(' ', '.') + return re.sub('[^A-Za-z0-9.]+', '-', version) + + +def safe_extra(extra): + """Convert an arbitrary string to a standard 'extra' name + + Any runs of non-alphanumeric characters are replaced with a single '_', + and the result is always lowercased. + """ + return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower() + + +def to_filename(name): + """Convert a project or version name to its filename-escaped form + + Any '-' characters are currently replaced with '_'. + """ + return name.replace('-', '_') + + +def invalid_marker(text): + """ + Validate text as a PEP 508 environment marker; return an exception + if invalid or False otherwise. + """ + try: + evaluate_marker(text) + except SyntaxError as e: + e.filename = None + e.lineno = None + return e + return False + + +def evaluate_marker(text, extra=None): + """ + Evaluate a PEP 508 environment marker. + Return a boolean indicating the marker result in this environment. + Raise SyntaxError if marker is invalid. + + This implementation uses the 'pyparsing' module. + """ + try: + marker = packaging.markers.Marker(text) + return marker.evaluate() + except packaging.markers.InvalidMarker as e: + raise SyntaxError(e) + + +class NullProvider: + """Try to implement resources and metadata for arbitrary PEP 302 loaders""" + + egg_name = None + egg_info = None + loader = None + + def __init__(self, module): + self.loader = getattr(module, '__loader__', None) + self.module_path = os.path.dirname(getattr(module, '__file__', '')) + + def get_resource_filename(self, manager, resource_name): + return self._fn(self.module_path, resource_name) + + def get_resource_stream(self, manager, resource_name): + return io.BytesIO(self.get_resource_string(manager, resource_name)) + + def get_resource_string(self, manager, resource_name): + return self._get(self._fn(self.module_path, resource_name)) + + def has_resource(self, resource_name): + return self._has(self._fn(self.module_path, resource_name)) + + def _get_metadata_path(self, name): + return self._fn(self.egg_info, name) + + def has_metadata(self, name): + if not self.egg_info: + return self.egg_info + + path = self._get_metadata_path(name) + return self._has(path) + + def get_metadata(self, name): + if not self.egg_info: + return "" + value = self._get(self._fn(self.egg_info, name)) + return value.decode('utf-8') if six.PY3 else value + + def get_metadata_lines(self, name): + return yield_lines(self.get_metadata(name)) + + def resource_isdir(self, resource_name): + return self._isdir(self._fn(self.module_path, resource_name)) + + def metadata_isdir(self, name): + return self.egg_info and self._isdir(self._fn(self.egg_info, name)) + + def resource_listdir(self, resource_name): + return self._listdir(self._fn(self.module_path, resource_name)) + + def metadata_listdir(self, name): + if self.egg_info: + return self._listdir(self._fn(self.egg_info, name)) + return [] + + def run_script(self, script_name, namespace): + script = 'scripts/' + script_name + if not self.has_metadata(script): + raise ResolutionError( + "Script {script!r} not found in metadata at {self.egg_info!r}" + .format(**locals()), + ) + script_text = self.get_metadata(script).replace('\r\n', '\n') + script_text = script_text.replace('\r', '\n') + script_filename = self._fn(self.egg_info, script) + namespace['__file__'] = script_filename + if os.path.exists(script_filename): + source = open(script_filename).read() + code = compile(source, script_filename, 'exec') + exec(code, namespace, namespace) + else: + from linecache import cache + cache[script_filename] = ( + len(script_text), 0, script_text.split('\n'), script_filename + ) + script_code = compile(script_text, script_filename, 'exec') + exec(script_code, namespace, namespace) + + def _has(self, path): + raise NotImplementedError( + "Can't perform this operation for unregistered loader type" + ) + + def _isdir(self, path): + raise NotImplementedError( + "Can't perform this operation for unregistered loader type" + ) + + def _listdir(self, path): + raise NotImplementedError( + "Can't perform this operation for unregistered loader type" + ) + + def _fn(self, base, resource_name): + self._validate_resource_path(resource_name) + if resource_name: + return os.path.join(base, *resource_name.split('/')) + return base + + @staticmethod + def _validate_resource_path(path): + """ + Validate the resource paths according to the docs. + https://setuptools.readthedocs.io/en/latest/pkg_resources.html#basic-resource-access + + >>> warned = getfixture('recwarn') + >>> warnings.simplefilter('always') + >>> vrp = NullProvider._validate_resource_path + >>> vrp('foo/bar.txt') + >>> bool(warned) + False + >>> vrp('../foo/bar.txt') + >>> bool(warned) + True + >>> warned.clear() + >>> vrp('/foo/bar.txt') + >>> bool(warned) + True + >>> vrp('foo/../../bar.txt') + >>> bool(warned) + True + >>> warned.clear() + >>> vrp('foo/f../bar.txt') + >>> bool(warned) + False + + Windows path separators are straight-up disallowed. + >>> vrp(r'\\foo/bar.txt') + Traceback (most recent call last): + ... + ValueError: Use of .. or absolute path in a resource path \ +is not allowed. + + >>> vrp(r'C:\\foo/bar.txt') + Traceback (most recent call last): + ... + ValueError: Use of .. or absolute path in a resource path \ +is not allowed. + + Blank values are allowed + + >>> vrp('') + >>> bool(warned) + False + + Non-string values are not. + + >>> vrp(None) + Traceback (most recent call last): + ... + AttributeError: ... + """ + invalid = ( + os.path.pardir in path.split(posixpath.sep) or + posixpath.isabs(path) or + ntpath.isabs(path) + ) + if not invalid: + return + + msg = "Use of .. or absolute path in a resource path is not allowed." + + # Aggressively disallow Windows absolute paths + if ntpath.isabs(path) and not posixpath.isabs(path): + raise ValueError(msg) + + # for compatibility, warn; in future + # raise ValueError(msg) + warnings.warn( + msg[:-1] + " and will raise exceptions in a future release.", + DeprecationWarning, + stacklevel=4, + ) + + def _get(self, path): + if hasattr(self.loader, 'get_data'): + return self.loader.get_data(path) + raise NotImplementedError( + "Can't perform this operation for loaders without 'get_data()'" + ) + + +register_loader_type(object, NullProvider) + + +class EggProvider(NullProvider): + """Provider based on a virtual filesystem""" + + def __init__(self, module): + NullProvider.__init__(self, module) + self._setup_prefix() + + def _setup_prefix(self): + # we assume here that our metadata may be nested inside a "basket" + # of multiple eggs; that's why we use module_path instead of .archive + path = self.module_path + old = None + while path != old: + if _is_egg_path(path): + self.egg_name = os.path.basename(path) + self.egg_info = os.path.join(path, 'EGG-INFO') + self.egg_root = path + break + old = path + path, base = os.path.split(path) + + +class DefaultProvider(EggProvider): + """Provides access to package resources in the filesystem""" + + def _has(self, path): + return os.path.exists(path) + + def _isdir(self, path): + return os.path.isdir(path) + + def _listdir(self, path): + return os.listdir(path) + + def get_resource_stream(self, manager, resource_name): + return open(self._fn(self.module_path, resource_name), 'rb') + + def _get(self, path): + with open(path, 'rb') as stream: + return stream.read() + + @classmethod + def _register(cls): + loader_names = 'SourceFileLoader', 'SourcelessFileLoader', + for name in loader_names: + loader_cls = getattr(importlib_machinery, name, type(None)) + register_loader_type(loader_cls, cls) + + +DefaultProvider._register() + + +class EmptyProvider(NullProvider): + """Provider that returns nothing for all requests""" + + module_path = None + + _isdir = _has = lambda self, path: False + + def _get(self, path): + return '' + + def _listdir(self, path): + return [] + + def __init__(self): + pass + + +empty_provider = EmptyProvider() + + +class ZipManifests(dict): + """ + zip manifest builder + """ + + @classmethod + def build(cls, path): + """ + Build a dictionary similar to the zipimport directory + caches, except instead of tuples, store ZipInfo objects. + + Use a platform-specific path separator (os.sep) for the path keys + for compatibility with pypy on Windows. + """ + with zipfile.ZipFile(path) as zfile: + items = ( + ( + name.replace('/', os.sep), + zfile.getinfo(name), + ) + for name in zfile.namelist() + ) + return dict(items) + + load = build + + +class MemoizedZipManifests(ZipManifests): + """ + Memoized zipfile manifests. + """ + manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime') + + def load(self, path): + """ + Load a manifest at path or return a suitable manifest already loaded. + """ + path = os.path.normpath(path) + mtime = os.stat(path).st_mtime + + if path not in self or self[path].mtime != mtime: + manifest = self.build(path) + self[path] = self.manifest_mod(manifest, mtime) + + return self[path].manifest + + +class ZipProvider(EggProvider): + """Resource support for zips and eggs""" + + eagers = None + _zip_manifests = MemoizedZipManifests() + + def __init__(self, module): + EggProvider.__init__(self, module) + self.zip_pre = self.loader.archive + os.sep + + def _zipinfo_name(self, fspath): + # Convert a virtual filename (full path to file) into a zipfile subpath + # usable with the zipimport directory cache for our target archive + fspath = fspath.rstrip(os.sep) + if fspath == self.loader.archive: + return '' + if fspath.startswith(self.zip_pre): + return fspath[len(self.zip_pre):] + raise AssertionError( + "%s is not a subpath of %s" % (fspath, self.zip_pre) + ) + + def _parts(self, zip_path): + # Convert a zipfile subpath into an egg-relative path part list. + # pseudo-fs path + fspath = self.zip_pre + zip_path + if fspath.startswith(self.egg_root + os.sep): + return fspath[len(self.egg_root) + 1:].split(os.sep) + raise AssertionError( + "%s is not a subpath of %s" % (fspath, self.egg_root) + ) + + @property + def zipinfo(self): + return self._zip_manifests.load(self.loader.archive) + + def get_resource_filename(self, manager, resource_name): + if not self.egg_name: + raise NotImplementedError( + "resource_filename() only supported for .egg, not .zip" + ) + # no need to lock for extraction, since we use temp names + zip_path = self._resource_to_zip(resource_name) + eagers = self._get_eager_resources() + if '/'.join(self._parts(zip_path)) in eagers: + for name in eagers: + self._extract_resource(manager, self._eager_to_zip(name)) + return self._extract_resource(manager, zip_path) + + @staticmethod + def _get_date_and_size(zip_stat): + size = zip_stat.file_size + # ymdhms+wday, yday, dst + date_time = zip_stat.date_time + (0, 0, -1) + # 1980 offset already done + timestamp = time.mktime(date_time) + return timestamp, size + + def _extract_resource(self, manager, zip_path): + + if zip_path in self._index(): + for name in self._index()[zip_path]: + last = self._extract_resource( + manager, os.path.join(zip_path, name) + ) + # return the extracted directory name + return os.path.dirname(last) + + timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) + + if not WRITE_SUPPORT: + raise IOError('"os.rename" and "os.unlink" are not supported ' + 'on this platform') + try: + + real_path = manager.get_cache_path( + self.egg_name, self._parts(zip_path) + ) + + if self._is_current(real_path, zip_path): + return real_path + + outf, tmpnam = _mkstemp( + ".$extract", + dir=os.path.dirname(real_path), + ) + os.write(outf, self.loader.get_data(zip_path)) + os.close(outf) + utime(tmpnam, (timestamp, timestamp)) + manager.postprocess(tmpnam, real_path) + + try: + rename(tmpnam, real_path) + + except os.error: + if os.path.isfile(real_path): + if self._is_current(real_path, zip_path): + # the file became current since it was checked above, + # so proceed. + return real_path + # Windows, del old file and retry + elif os.name == 'nt': + unlink(real_path) + rename(tmpnam, real_path) + return real_path + raise + + except os.error: + # report a user-friendly error + manager.extraction_error() + + return real_path + + def _is_current(self, file_path, zip_path): + """ + Return True if the file_path is current for this zip_path + """ + timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) + if not os.path.isfile(file_path): + return False + stat = os.stat(file_path) + if stat.st_size != size or stat.st_mtime != timestamp: + return False + # check that the contents match + zip_contents = self.loader.get_data(zip_path) + with open(file_path, 'rb') as f: + file_contents = f.read() + return zip_contents == file_contents + + def _get_eager_resources(self): + if self.eagers is None: + eagers = [] + for name in ('native_libs.txt', 'eager_resources.txt'): + if self.has_metadata(name): + eagers.extend(self.get_metadata_lines(name)) + self.eagers = eagers + return self.eagers + + def _index(self): + try: + return self._dirindex + except AttributeError: + ind = {} + for path in self.zipinfo: + parts = path.split(os.sep) + while parts: + parent = os.sep.join(parts[:-1]) + if parent in ind: + ind[parent].append(parts[-1]) + break + else: + ind[parent] = [parts.pop()] + self._dirindex = ind + return ind + + def _has(self, fspath): + zip_path = self._zipinfo_name(fspath) + return zip_path in self.zipinfo or zip_path in self._index() + + def _isdir(self, fspath): + return self._zipinfo_name(fspath) in self._index() + + def _listdir(self, fspath): + return list(self._index().get(self._zipinfo_name(fspath), ())) + + def _eager_to_zip(self, resource_name): + return self._zipinfo_name(self._fn(self.egg_root, resource_name)) + + def _resource_to_zip(self, resource_name): + return self._zipinfo_name(self._fn(self.module_path, resource_name)) + + +register_loader_type(zipimport.zipimporter, ZipProvider) + + +class FileMetadata(EmptyProvider): + """Metadata handler for standalone PKG-INFO files + + Usage:: + + metadata = FileMetadata("/path/to/PKG-INFO") + + This provider rejects all data and metadata requests except for PKG-INFO, + which is treated as existing, and will be the contents of the file at + the provided location. + """ + + def __init__(self, path): + self.path = path + + def _get_metadata_path(self, name): + return self.path + + def has_metadata(self, name): + return name == 'PKG-INFO' and os.path.isfile(self.path) + + def get_metadata(self, name): + if name != 'PKG-INFO': + raise KeyError("No metadata except PKG-INFO is available") + + with io.open(self.path, encoding='utf-8', errors="replace") as f: + metadata = f.read() + self._warn_on_replacement(metadata) + return metadata + + def _warn_on_replacement(self, metadata): + # Python 2.7 compat for: replacement_char = '�' + replacement_char = b'\xef\xbf\xbd'.decode('utf-8') + if replacement_char in metadata: + tmpl = "{self.path} could not be properly decoded in UTF-8" + msg = tmpl.format(**locals()) + warnings.warn(msg) + + def get_metadata_lines(self, name): + return yield_lines(self.get_metadata(name)) + + +class PathMetadata(DefaultProvider): + """Metadata provider for egg directories + + Usage:: + + # Development eggs: + + egg_info = "/path/to/PackageName.egg-info" + base_dir = os.path.dirname(egg_info) + metadata = PathMetadata(base_dir, egg_info) + dist_name = os.path.splitext(os.path.basename(egg_info))[0] + dist = Distribution(basedir, project_name=dist_name, metadata=metadata) + + # Unpacked egg directories: + + egg_path = "/path/to/PackageName-ver-pyver-etc.egg" + metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO')) + dist = Distribution.from_filename(egg_path, metadata=metadata) + """ + + def __init__(self, path, egg_info): + self.module_path = path + self.egg_info = egg_info + + +class EggMetadata(ZipProvider): + """Metadata provider for .egg files""" + + def __init__(self, importer): + """Create a metadata provider from a zipimporter""" + + self.zip_pre = importer.archive + os.sep + self.loader = importer + if importer.prefix: + self.module_path = os.path.join(importer.archive, importer.prefix) + else: + self.module_path = importer.archive + self._setup_prefix() + + +_declare_state('dict', _distribution_finders={}) + + +def register_finder(importer_type, distribution_finder): + """Register `distribution_finder` to find distributions in sys.path items + + `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item + handler), and `distribution_finder` is a callable that, passed a path + item and the importer instance, yields ``Distribution`` instances found on + that path item. See ``pkg_resources.find_on_path`` for an example.""" + _distribution_finders[importer_type] = distribution_finder + + +def find_distributions(path_item, only=False): + """Yield distributions accessible via `path_item`""" + importer = get_importer(path_item) + finder = _find_adapter(_distribution_finders, importer) + return finder(importer, path_item, only) + + +def find_eggs_in_zip(importer, path_item, only=False): + """ + Find eggs in zip files; possibly multiple nested eggs. + """ + if importer.archive.endswith('.whl'): + # wheels are not supported with this finder + # they don't have PKG-INFO metadata, and won't ever contain eggs + return + metadata = EggMetadata(importer) + if metadata.has_metadata('PKG-INFO'): + yield Distribution.from_filename(path_item, metadata=metadata) + if only: + # don't yield nested distros + return + for subitem in metadata.resource_listdir(''): + if _is_egg_path(subitem): + subpath = os.path.join(path_item, subitem) + dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath) + for dist in dists: + yield dist + elif subitem.lower().endswith('.dist-info'): + subpath = os.path.join(path_item, subitem) + submeta = EggMetadata(zipimport.zipimporter(subpath)) + submeta.egg_info = subpath + yield Distribution.from_location(path_item, subitem, submeta) + + +register_finder(zipimport.zipimporter, find_eggs_in_zip) + + +def find_nothing(importer, path_item, only=False): + return () + + +register_finder(object, find_nothing) + + +def _by_version_descending(names): + """ + Given a list of filenames, return them in descending order + by version number. + + >>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg' + >>> _by_version_descending(names) + ['Python-2.7.10.egg', 'Python-2.7.2.egg', 'foo', 'bar'] + >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg' + >>> _by_version_descending(names) + ['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg'] + >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg' + >>> _by_version_descending(names) + ['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg'] + """ + def _by_version(name): + """ + Parse each component of the filename + """ + name, ext = os.path.splitext(name) + parts = itertools.chain(name.split('-'), [ext]) + return [packaging.version.parse(part) for part in parts] + + return sorted(names, key=_by_version, reverse=True) + + +def find_on_path(importer, path_item, only=False): + """Yield distributions accessible on a sys.path directory""" + path_item = _normalize_cached(path_item) + + if _is_unpacked_egg(path_item): + yield Distribution.from_filename( + path_item, metadata=PathMetadata( + path_item, os.path.join(path_item, 'EGG-INFO') + ) + ) + return + + entries = safe_listdir(path_item) + + # for performance, before sorting by version, + # screen entries for only those that will yield + # distributions + filtered = ( + entry + for entry in entries + if dist_factory(path_item, entry, only) + ) + + # scan for .egg and .egg-info in directory + path_item_entries = _by_version_descending(filtered) + for entry in path_item_entries: + fullpath = os.path.join(path_item, entry) + factory = dist_factory(path_item, entry, only) + for dist in factory(fullpath): + yield dist + + +def dist_factory(path_item, entry, only): + """ + Return a dist_factory for a path_item and entry + """ + lower = entry.lower() + is_meta = any(map(lower.endswith, ('.egg-info', '.dist-info'))) + return ( + distributions_from_metadata + if is_meta else + find_distributions + if not only and _is_egg_path(entry) else + resolve_egg_link + if not only and lower.endswith('.egg-link') else + NoDists() + ) + + +class NoDists: + """ + >>> bool(NoDists()) + False + + >>> list(NoDists()('anything')) + [] + """ + def __bool__(self): + return False + if six.PY2: + __nonzero__ = __bool__ + + def __call__(self, fullpath): + return iter(()) + + +def safe_listdir(path): + """ + Attempt to list contents of path, but suppress some exceptions. + """ + try: + return os.listdir(path) + except (PermissionError, NotADirectoryError): + pass + except OSError as e: + # Ignore the directory if does not exist, not a directory or + # permission denied + ignorable = ( + e.errno in (errno.ENOTDIR, errno.EACCES, errno.ENOENT) + # Python 2 on Windows needs to be handled this way :( + or getattr(e, "winerror", None) == 267 + ) + if not ignorable: + raise + return () + + +def distributions_from_metadata(path): + root = os.path.dirname(path) + if os.path.isdir(path): + if len(os.listdir(path)) == 0: + # empty metadata dir; skip + return + metadata = PathMetadata(root, path) + else: + metadata = FileMetadata(path) + entry = os.path.basename(path) + yield Distribution.from_location( + root, entry, metadata, precedence=DEVELOP_DIST, + ) + + +def non_empty_lines(path): + """ + Yield non-empty lines from file at path + """ + with open(path) as f: + for line in f: + line = line.strip() + if line: + yield line + + +def resolve_egg_link(path): + """ + Given a path to an .egg-link, resolve distributions + present in the referenced path. + """ + referenced_paths = non_empty_lines(path) + resolved_paths = ( + os.path.join(os.path.dirname(path), ref) + for ref in referenced_paths + ) + dist_groups = map(find_distributions, resolved_paths) + return next(dist_groups, ()) + + +register_finder(pkgutil.ImpImporter, find_on_path) + +if hasattr(importlib_machinery, 'FileFinder'): + register_finder(importlib_machinery.FileFinder, find_on_path) + +_declare_state('dict', _namespace_handlers={}) +_declare_state('dict', _namespace_packages={}) + + +def register_namespace_handler(importer_type, namespace_handler): + """Register `namespace_handler` to declare namespace packages + + `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item + handler), and `namespace_handler` is a callable like this:: + + def namespace_handler(importer, path_entry, moduleName, module): + # return a path_entry to use for child packages + + Namespace handlers are only called if the importer object has already + agreed that it can handle the relevant path item, and they should only + return a subpath if the module __path__ does not already contain an + equivalent subpath. For an example namespace handler, see + ``pkg_resources.file_ns_handler``. + """ + _namespace_handlers[importer_type] = namespace_handler + + +def _handle_ns(packageName, path_item): + """Ensure that named package includes a subpath of path_item (if needed)""" + + importer = get_importer(path_item) + if importer is None: + return None + + # capture warnings due to #1111 + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + loader = importer.find_module(packageName) + + if loader is None: + return None + module = sys.modules.get(packageName) + if module is None: + module = sys.modules[packageName] = types.ModuleType(packageName) + module.__path__ = [] + _set_parent_ns(packageName) + elif not hasattr(module, '__path__'): + raise TypeError("Not a package:", packageName) + handler = _find_adapter(_namespace_handlers, importer) + subpath = handler(importer, path_item, packageName, module) + if subpath is not None: + path = module.__path__ + path.append(subpath) + loader.load_module(packageName) + _rebuild_mod_path(path, packageName, module) + return subpath + + +def _rebuild_mod_path(orig_path, package_name, module): + """ + Rebuild module.__path__ ensuring that all entries are ordered + corresponding to their sys.path order + """ + sys_path = [_normalize_cached(p) for p in sys.path] + + def safe_sys_path_index(entry): + """ + Workaround for #520 and #513. + """ + try: + return sys_path.index(entry) + except ValueError: + return float('inf') + + def position_in_sys_path(path): + """ + Return the ordinal of the path based on its position in sys.path + """ + path_parts = path.split(os.sep) + module_parts = package_name.count('.') + 1 + parts = path_parts[:-module_parts] + return safe_sys_path_index(_normalize_cached(os.sep.join(parts))) + + new_path = sorted(orig_path, key=position_in_sys_path) + new_path = [_normalize_cached(p) for p in new_path] + + if isinstance(module.__path__, list): + module.__path__[:] = new_path + else: + module.__path__ = new_path + + +def declare_namespace(packageName): + """Declare that package 'packageName' is a namespace package""" + + _imp.acquire_lock() + try: + if packageName in _namespace_packages: + return + + path = sys.path + parent, _, _ = packageName.rpartition('.') + + if parent: + declare_namespace(parent) + if parent not in _namespace_packages: + __import__(parent) + try: + path = sys.modules[parent].__path__ + except AttributeError: + raise TypeError("Not a package:", parent) + + # Track what packages are namespaces, so when new path items are added, + # they can be updated + _namespace_packages.setdefault(parent or None, []).append(packageName) + _namespace_packages.setdefault(packageName, []) + + for path_item in path: + # Ensure all the parent's path items are reflected in the child, + # if they apply + _handle_ns(packageName, path_item) + + finally: + _imp.release_lock() + + +def fixup_namespace_packages(path_item, parent=None): + """Ensure that previously-declared namespace packages include path_item""" + _imp.acquire_lock() + try: + for package in _namespace_packages.get(parent, ()): + subpath = _handle_ns(package, path_item) + if subpath: + fixup_namespace_packages(subpath, package) + finally: + _imp.release_lock() + + +def file_ns_handler(importer, path_item, packageName, module): + """Compute an ns-package subpath for a filesystem or zipfile importer""" + + subpath = os.path.join(path_item, packageName.split('.')[-1]) + normalized = _normalize_cached(subpath) + for item in module.__path__: + if _normalize_cached(item) == normalized: + break + else: + # Only return the path if it's not already there + return subpath + + +register_namespace_handler(pkgutil.ImpImporter, file_ns_handler) +register_namespace_handler(zipimport.zipimporter, file_ns_handler) + +if hasattr(importlib_machinery, 'FileFinder'): + register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler) + + +def null_ns_handler(importer, path_item, packageName, module): + return None + + +register_namespace_handler(object, null_ns_handler) + + +def normalize_path(filename): + """Normalize a file/dir name for comparison purposes""" + return os.path.normcase(os.path.realpath(os.path.normpath(_cygwin_patch(filename)))) + + +def _cygwin_patch(filename): # pragma: nocover + """ + Contrary to POSIX 2008, on Cygwin, getcwd (3) contains + symlink components. Using + os.path.abspath() works around this limitation. A fix in os.getcwd() + would probably better, in Cygwin even more so, except + that this seems to be by design... + """ + return os.path.abspath(filename) if sys.platform == 'cygwin' else filename + + +def _normalize_cached(filename, _cache={}): + try: + return _cache[filename] + except KeyError: + _cache[filename] = result = normalize_path(filename) + return result + + +def _is_egg_path(path): + """ + Determine if given path appears to be an egg. + """ + return path.lower().endswith('.egg') + + +def _is_unpacked_egg(path): + """ + Determine if given path appears to be an unpacked egg. + """ + return ( + _is_egg_path(path) and + os.path.isfile(os.path.join(path, 'EGG-INFO', 'PKG-INFO')) + ) + + +def _set_parent_ns(packageName): + parts = packageName.split('.') + name = parts.pop() + if parts: + parent = '.'.join(parts) + setattr(sys.modules[parent], name, sys.modules[packageName]) + + +def yield_lines(strs): + """Yield non-empty/non-comment lines of a string or sequence""" + if isinstance(strs, six.string_types): + for s in strs.splitlines(): + s = s.strip() + # skip blank lines/comments + if s and not s.startswith('#'): + yield s + else: + for ss in strs: + for s in yield_lines(ss): + yield s + + +MODULE = re.compile(r"\w+(\.\w+)*$").match +EGG_NAME = re.compile( + r""" + (?P<name>[^-]+) ( + -(?P<ver>[^-]+) ( + -py(?P<pyver>[^-]+) ( + -(?P<plat>.+) + )? + )? + )? + """, + re.VERBOSE | re.IGNORECASE, +).match + + +class EntryPoint: + """Object representing an advertised importable object""" + + def __init__(self, name, module_name, attrs=(), extras=(), dist=None): + if not MODULE(module_name): + raise ValueError("Invalid module name", module_name) + self.name = name + self.module_name = module_name + self.attrs = tuple(attrs) + self.extras = tuple(extras) + self.dist = dist + + def __str__(self): + s = "%s = %s" % (self.name, self.module_name) + if self.attrs: + s += ':' + '.'.join(self.attrs) + if self.extras: + s += ' [%s]' % ','.join(self.extras) + return s + + def __repr__(self): + return "EntryPoint.parse(%r)" % str(self) + + def load(self, require=True, *args, **kwargs): + """ + Require packages for this EntryPoint, then resolve it. + """ + if not require or args or kwargs: + warnings.warn( + "Parameters to load are deprecated. Call .resolve and " + ".require separately.", + PkgResourcesDeprecationWarning, + stacklevel=2, + ) + if require: + self.require(*args, **kwargs) + return self.resolve() + + def resolve(self): + """ + Resolve the entry point from its module and attrs. + """ + module = __import__(self.module_name, fromlist=['__name__'], level=0) + try: + return functools.reduce(getattr, self.attrs, module) + except AttributeError as exc: + raise ImportError(str(exc)) + + def require(self, env=None, installer=None): + if self.extras and not self.dist: + raise UnknownExtra("Can't require() without a distribution", self) + + # Get the requirements for this entry point with all its extras and + # then resolve them. We have to pass `extras` along when resolving so + # that the working set knows what extras we want. Otherwise, for + # dist-info distributions, the working set will assume that the + # requirements for that extra are purely optional and skip over them. + reqs = self.dist.requires(self.extras) + items = working_set.resolve(reqs, env, installer, extras=self.extras) + list(map(working_set.add, items)) + + pattern = re.compile( + r'\s*' + r'(?P<name>.+?)\s*' + r'=\s*' + r'(?P<module>[\w.]+)\s*' + r'(:\s*(?P<attr>[\w.]+))?\s*' + r'(?P<extras>\[.*\])?\s*$' + ) + + @classmethod + def parse(cls, src, dist=None): + """Parse a single entry point from string `src` + + Entry point syntax follows the form:: + + name = some.module:some.attr [extra1, extra2] + + The entry name and module name are required, but the ``:attrs`` and + ``[extras]`` parts are optional + """ + m = cls.pattern.match(src) + if not m: + msg = "EntryPoint must be in 'name=module:attrs [extras]' format" + raise ValueError(msg, src) + res = m.groupdict() + extras = cls._parse_extras(res['extras']) + attrs = res['attr'].split('.') if res['attr'] else () + return cls(res['name'], res['module'], attrs, extras, dist) + + @classmethod + def _parse_extras(cls, extras_spec): + if not extras_spec: + return () + req = Requirement.parse('x' + extras_spec) + if req.specs: + raise ValueError() + return req.extras + + @classmethod + def parse_group(cls, group, lines, dist=None): + """Parse an entry point group""" + if not MODULE(group): + raise ValueError("Invalid group name", group) + this = {} + for line in yield_lines(lines): + ep = cls.parse(line, dist) + if ep.name in this: + raise ValueError("Duplicate entry point", group, ep.name) + this[ep.name] = ep + return this + + @classmethod + def parse_map(cls, data, dist=None): + """Parse a map of entry point groups""" + if isinstance(data, dict): + data = data.items() + else: + data = split_sections(data) + maps = {} + for group, lines in data: + if group is None: + if not lines: + continue + raise ValueError("Entry points must be listed in groups") + group = group.strip() + if group in maps: + raise ValueError("Duplicate group name", group) + maps[group] = cls.parse_group(group, lines, dist) + return maps + + +def _remove_md5_fragment(location): + if not location: + return '' + parsed = urllib.parse.urlparse(location) + if parsed[-1].startswith('md5='): + return urllib.parse.urlunparse(parsed[:-1] + ('',)) + return location + + +def _version_from_file(lines): + """ + Given an iterable of lines from a Metadata file, return + the value of the Version field, if present, or None otherwise. + """ + def is_version_line(line): + return line.lower().startswith('version:') + version_lines = filter(is_version_line, lines) + line = next(iter(version_lines), '') + _, _, value = line.partition(':') + return safe_version(value.strip()) or None + + +class Distribution: + """Wrap an actual or potential sys.path entry w/metadata""" + PKG_INFO = 'PKG-INFO' + + def __init__( + self, location=None, metadata=None, project_name=None, + version=None, py_version=PY_MAJOR, platform=None, + precedence=EGG_DIST): + self.project_name = safe_name(project_name or 'Unknown') + if version is not None: + self._version = safe_version(version) + self.py_version = py_version + self.platform = platform + self.location = location + self.precedence = precedence + self._provider = metadata or empty_provider + + @classmethod + def from_location(cls, location, basename, metadata=None, **kw): + project_name, version, py_version, platform = [None] * 4 + basename, ext = os.path.splitext(basename) + if ext.lower() in _distributionImpl: + cls = _distributionImpl[ext.lower()] + + match = EGG_NAME(basename) + if match: + project_name, version, py_version, platform = match.group( + 'name', 'ver', 'pyver', 'plat' + ) + return cls( + location, metadata, project_name=project_name, version=version, + py_version=py_version, platform=platform, **kw + )._reload_version() + + def _reload_version(self): + return self + + @property + def hashcmp(self): + return ( + self.parsed_version, + self.precedence, + self.key, + _remove_md5_fragment(self.location), + self.py_version or '', + self.platform or '', + ) + + def __hash__(self): + return hash(self.hashcmp) + + def __lt__(self, other): + return self.hashcmp < other.hashcmp + + def __le__(self, other): + return self.hashcmp <= other.hashcmp + + def __gt__(self, other): + return self.hashcmp > other.hashcmp + + def __ge__(self, other): + return self.hashcmp >= other.hashcmp + + def __eq__(self, other): + if not isinstance(other, self.__class__): + # It's not a Distribution, so they are not equal + return False + return self.hashcmp == other.hashcmp + + def __ne__(self, other): + return not self == other + + # These properties have to be lazy so that we don't have to load any + # metadata until/unless it's actually needed. (i.e., some distributions + # may not know their name or version without loading PKG-INFO) + + @property + def key(self): + try: + return self._key + except AttributeError: + self._key = key = self.project_name.lower() + return key + + @property + def parsed_version(self): + if not hasattr(self, "_parsed_version"): + self._parsed_version = parse_version(self.version) + + return self._parsed_version + + def _warn_legacy_version(self): + LV = packaging.version.LegacyVersion + is_legacy = isinstance(self._parsed_version, LV) + if not is_legacy: + return + + # While an empty version is technically a legacy version and + # is not a valid PEP 440 version, it's also unlikely to + # actually come from someone and instead it is more likely that + # it comes from setuptools attempting to parse a filename and + # including it in the list. So for that we'll gate this warning + # on if the version is anything at all or not. + if not self.version: + return + + tmpl = textwrap.dedent(""" + '{project_name} ({version})' is being parsed as a legacy, + non PEP 440, + version. You may find odd behavior and sort order. + In particular it will be sorted as less than 0.0. It + is recommended to migrate to PEP 440 compatible + versions. + """).strip().replace('\n', ' ') + + warnings.warn(tmpl.format(**vars(self)), PEP440Warning) + + @property + def version(self): + try: + return self._version + except AttributeError: + version = self._get_version() + if version is None: + path = self._get_metadata_path_for_display(self.PKG_INFO) + msg = ( + "Missing 'Version:' header and/or {} file at path: {}" + ).format(self.PKG_INFO, path) + raise ValueError(msg, self) + + return version + + @property + def _dep_map(self): + """ + A map of extra to its list of (direct) requirements + for this distribution, including the null extra. + """ + try: + return self.__dep_map + except AttributeError: + self.__dep_map = self._filter_extras(self._build_dep_map()) + return self.__dep_map + + @staticmethod + def _filter_extras(dm): + """ + Given a mapping of extras to dependencies, strip off + environment markers and filter out any dependencies + not matching the markers. + """ + for extra in list(filter(None, dm)): + new_extra = extra + reqs = dm.pop(extra) + new_extra, _, marker = extra.partition(':') + fails_marker = marker and ( + invalid_marker(marker) + or not evaluate_marker(marker) + ) + if fails_marker: + reqs = [] + new_extra = safe_extra(new_extra) or None + + dm.setdefault(new_extra, []).extend(reqs) + return dm + + def _build_dep_map(self): + dm = {} + for name in 'requires.txt', 'depends.txt': + for extra, reqs in split_sections(self._get_metadata(name)): + dm.setdefault(extra, []).extend(parse_requirements(reqs)) + return dm + + def requires(self, extras=()): + """List of Requirements needed for this distro if `extras` are used""" + dm = self._dep_map + deps = [] + deps.extend(dm.get(None, ())) + for ext in extras: + try: + deps.extend(dm[safe_extra(ext)]) + except KeyError: + raise UnknownExtra( + "%s has no such extra feature %r" % (self, ext) + ) + return deps + + def _get_metadata_path_for_display(self, name): + """ + Return the path to the given metadata file, if available. + """ + try: + # We need to access _get_metadata_path() on the provider object + # directly rather than through this class's __getattr__() + # since _get_metadata_path() is marked private. + path = self._provider._get_metadata_path(name) + + # Handle exceptions e.g. in case the distribution's metadata + # provider doesn't support _get_metadata_path(). + except Exception: + return '[could not detect]' + + return path + + def _get_metadata(self, name): + if self.has_metadata(name): + for line in self.get_metadata_lines(name): + yield line + + def _get_version(self): + lines = self._get_metadata(self.PKG_INFO) + version = _version_from_file(lines) + + return version + + def activate(self, path=None, replace=False): + """Ensure distribution is importable on `path` (default=sys.path)""" + if path is None: + path = sys.path + self.insert_on(path, replace=replace) + if path is sys.path: + fixup_namespace_packages(self.location) + for pkg in self._get_metadata('namespace_packages.txt'): + if pkg in sys.modules: + declare_namespace(pkg) + + def egg_name(self): + """Return what this distribution's standard .egg filename should be""" + filename = "%s-%s-py%s" % ( + to_filename(self.project_name), to_filename(self.version), + self.py_version or PY_MAJOR + ) + + if self.platform: + filename += '-' + self.platform + return filename + + def __repr__(self): + if self.location: + return "%s (%s)" % (self, self.location) + else: + return str(self) + + def __str__(self): + try: + version = getattr(self, 'version', None) + except ValueError: + version = None + version = version or "[unknown version]" + return "%s %s" % (self.project_name, version) + + def __getattr__(self, attr): + """Delegate all unrecognized public attributes to .metadata provider""" + if attr.startswith('_'): + raise AttributeError(attr) + return getattr(self._provider, attr) + + def __dir__(self): + return list( + set(super(Distribution, self).__dir__()) + | set( + attr for attr in self._provider.__dir__() + if not attr.startswith('_') + ) + ) + + if not hasattr(object, '__dir__'): + # python 2.7 not supported + del __dir__ + + @classmethod + def from_filename(cls, filename, metadata=None, **kw): + return cls.from_location( + _normalize_cached(filename), os.path.basename(filename), metadata, + **kw + ) + + def as_requirement(self): + """Return a ``Requirement`` that matches this distribution exactly""" + if isinstance(self.parsed_version, packaging.version.Version): + spec = "%s==%s" % (self.project_name, self.parsed_version) + else: + spec = "%s===%s" % (self.project_name, self.parsed_version) + + return Requirement.parse(spec) + + def load_entry_point(self, group, name): + """Return the `name` entry point of `group` or raise ImportError""" + ep = self.get_entry_info(group, name) + if ep is None: + raise ImportError("Entry point %r not found" % ((group, name),)) + return ep.load() + + def get_entry_map(self, group=None): + """Return the entry point map for `group`, or the full entry map""" + try: + ep_map = self._ep_map + except AttributeError: + ep_map = self._ep_map = EntryPoint.parse_map( + self._get_metadata('entry_points.txt'), self + ) + if group is not None: + return ep_map.get(group, {}) + return ep_map + + def get_entry_info(self, group, name): + """Return the EntryPoint object for `group`+`name`, or ``None``""" + return self.get_entry_map(group).get(name) + + def insert_on(self, path, loc=None, replace=False): + """Ensure self.location is on path + + If replace=False (default): + - If location is already in path anywhere, do nothing. + - Else: + - If it's an egg and its parent directory is on path, + insert just ahead of the parent. + - Else: add to the end of path. + If replace=True: + - If location is already on path anywhere (not eggs) + or higher priority than its parent (eggs) + do nothing. + - Else: + - If it's an egg and its parent directory is on path, + insert just ahead of the parent, + removing any lower-priority entries. + - Else: add it to the front of path. + """ + + loc = loc or self.location + if not loc: + return + + nloc = _normalize_cached(loc) + bdir = os.path.dirname(nloc) + npath = [(p and _normalize_cached(p) or p) for p in path] + + for p, item in enumerate(npath): + if item == nloc: + if replace: + break + else: + # don't modify path (even removing duplicates) if + # found and not replace + return + elif item == bdir and self.precedence == EGG_DIST: + # if it's an .egg, give it precedence over its directory + # UNLESS it's already been added to sys.path and replace=False + if (not replace) and nloc in npath[p:]: + return + if path is sys.path: + self.check_version_conflict() + path.insert(p, loc) + npath.insert(p, nloc) + break + else: + if path is sys.path: + self.check_version_conflict() + if replace: + path.insert(0, loc) + else: + path.append(loc) + return + + # p is the spot where we found or inserted loc; now remove duplicates + while True: + try: + np = npath.index(nloc, p + 1) + except ValueError: + break + else: + del npath[np], path[np] + # ha! + p = np + + return + + def check_version_conflict(self): + if self.key == 'setuptools': + # ignore the inevitable setuptools self-conflicts :( + return + + nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt')) + loc = normalize_path(self.location) + for modname in self._get_metadata('top_level.txt'): + if (modname not in sys.modules or modname in nsp + or modname in _namespace_packages): + continue + if modname in ('pkg_resources', 'setuptools', 'site'): + continue + fn = getattr(sys.modules[modname], '__file__', None) + if fn and (normalize_path(fn).startswith(loc) or + fn.startswith(self.location)): + continue + issue_warning( + "Module %s was already imported from %s, but %s is being added" + " to sys.path" % (modname, fn, self.location), + ) + + def has_version(self): + try: + self.version + except ValueError: + issue_warning("Unbuilt egg for " + repr(self)) + return False + return True + + def clone(self, **kw): + """Copy this distribution, substituting in any changed keyword args""" + names = 'project_name version py_version platform location precedence' + for attr in names.split(): + kw.setdefault(attr, getattr(self, attr, None)) + kw.setdefault('metadata', self._provider) + return self.__class__(**kw) + + @property + def extras(self): + return [dep for dep in self._dep_map if dep] + + +class EggInfoDistribution(Distribution): + def _reload_version(self): + """ + Packages installed by distutils (e.g. numpy or scipy), + which uses an old safe_version, and so + their version numbers can get mangled when + converted to filenames (e.g., 1.11.0.dev0+2329eae to + 1.11.0.dev0_2329eae). These distributions will not be + parsed properly + downstream by Distribution and safe_version, so + take an extra step and try to get the version number from + the metadata file itself instead of the filename. + """ + md_version = self._get_version() + if md_version: + self._version = md_version + return self + + +class DistInfoDistribution(Distribution): + """ + Wrap an actual or potential sys.path entry + w/metadata, .dist-info style. + """ + PKG_INFO = 'METADATA' + EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])") + + @property + def _parsed_pkg_info(self): + """Parse and cache metadata""" + try: + return self._pkg_info + except AttributeError: + metadata = self.get_metadata(self.PKG_INFO) + self._pkg_info = email.parser.Parser().parsestr(metadata) + return self._pkg_info + + @property + def _dep_map(self): + try: + return self.__dep_map + except AttributeError: + self.__dep_map = self._compute_dependencies() + return self.__dep_map + + def _compute_dependencies(self): + """Recompute this distribution's dependencies.""" + dm = self.__dep_map = {None: []} + + reqs = [] + # Including any condition expressions + for req in self._parsed_pkg_info.get_all('Requires-Dist') or []: + reqs.extend(parse_requirements(req)) + + def reqs_for_extra(extra): + for req in reqs: + if not req.marker or req.marker.evaluate({'extra': extra}): + yield req + + common = frozenset(reqs_for_extra(None)) + dm[None].extend(common) + + for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []: + s_extra = safe_extra(extra.strip()) + dm[s_extra] = list(frozenset(reqs_for_extra(extra)) - common) + + return dm + + +_distributionImpl = { + '.egg': Distribution, + '.egg-info': EggInfoDistribution, + '.dist-info': DistInfoDistribution, +} + + +def issue_warning(*args, **kw): + level = 1 + g = globals() + try: + # find the first stack frame that is *not* code in + # the pkg_resources module, to use for the warning + while sys._getframe(level).f_globals is g: + level += 1 + except ValueError: + pass + warnings.warn(stacklevel=level + 1, *args, **kw) + + +class RequirementParseError(ValueError): + def __str__(self): + return ' '.join(self.args) + + +def parse_requirements(strs): + """Yield ``Requirement`` objects for each specification in `strs` + + `strs` must be a string, or a (possibly-nested) iterable thereof. + """ + # create a steppable iterator, so we can handle \-continuations + lines = iter(yield_lines(strs)) + + for line in lines: + # Drop comments -- a hash without a space may be in a URL. + if ' #' in line: + line = line[:line.find(' #')] + # If there is a line continuation, drop it, and append the next line. + if line.endswith('\\'): + line = line[:-2].strip() + try: + line += next(lines) + except StopIteration: + return + yield Requirement(line) + + +class Requirement(packaging.requirements.Requirement): + def __init__(self, requirement_string): + """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!""" + try: + super(Requirement, self).__init__(requirement_string) + except packaging.requirements.InvalidRequirement as e: + raise RequirementParseError(str(e)) + self.unsafe_name = self.name + project_name = safe_name(self.name) + self.project_name, self.key = project_name, project_name.lower() + self.specs = [ + (spec.operator, spec.version) for spec in self.specifier] + self.extras = tuple(map(safe_extra, self.extras)) + self.hashCmp = ( + self.key, + self.specifier, + frozenset(self.extras), + str(self.marker) if self.marker else None, + ) + self.__hash = hash(self.hashCmp) + + def __eq__(self, other): + return ( + isinstance(other, Requirement) and + self.hashCmp == other.hashCmp + ) + + def __ne__(self, other): + return not self == other + + def __contains__(self, item): + if isinstance(item, Distribution): + if item.key != self.key: + return False + + item = item.version + + # Allow prereleases always in order to match the previous behavior of + # this method. In the future this should be smarter and follow PEP 440 + # more accurately. + return self.specifier.contains(item, prereleases=True) + + def __hash__(self): + return self.__hash + + def __repr__(self): + return "Requirement.parse(%r)" % str(self) + + @staticmethod + def parse(s): + req, = parse_requirements(s) + return req + + +def _always_object(classes): + """ + Ensure object appears in the mro even + for old-style classes. + """ + if object not in classes: + return classes + (object,) + return classes + + +def _find_adapter(registry, ob): + """Return an adapter factory for `ob` from `registry`""" + types = _always_object(inspect.getmro(getattr(ob, '__class__', type(ob)))) + for t in types: + if t in registry: + return registry[t] + + +def ensure_directory(path): + """Ensure that the parent directory of `path` exists""" + dirname = os.path.dirname(path) + py31compat.makedirs(dirname, exist_ok=True) + + +def _bypass_ensure_directory(path): + """Sandbox-bypassing version of ensure_directory()""" + if not WRITE_SUPPORT: + raise IOError('"os.mkdir" not supported on this platform.') + dirname, filename = split(path) + if dirname and filename and not isdir(dirname): + _bypass_ensure_directory(dirname) + try: + mkdir(dirname, 0o755) + except FileExistsError: + pass + + +def split_sections(s): + """Split a string or iterable thereof into (section, content) pairs + + Each ``section`` is a stripped version of the section header ("[section]") + and each ``content`` is a list of stripped lines excluding blank lines and + comment-only lines. If there are any such lines before the first section + header, they're returned in a first ``section`` of ``None``. + """ + section = None + content = [] + for line in yield_lines(s): + if line.startswith("["): + if line.endswith("]"): + if section or content: + yield section, content + section = line[1:-1].strip() + content = [] + else: + raise ValueError("Invalid section heading", line) + else: + content.append(line) + + # wrap up last segment + yield section, content + + +def _mkstemp(*args, **kw): + old_open = os.open + try: + # temporarily bypass sandboxing + os.open = os_open + return tempfile.mkstemp(*args, **kw) + finally: + # and then put it back + os.open = old_open + + +# Silence the PEP440Warning by default, so that end users don't get hit by it +# randomly just because they use pkg_resources. We want to append the rule +# because we want earlier uses of filterwarnings to take precedence over this +# one. +warnings.filterwarnings("ignore", category=PEP440Warning, append=True) + + +# from jaraco.functools 1.3 +def _call_aside(f, *args, **kwargs): + f(*args, **kwargs) + return f + + +@_call_aside +def _initialize(g=globals()): + "Set up global resource manager (deliberately not state-saved)" + manager = ResourceManager() + g['_manager'] = manager + g.update( + (name, getattr(manager, name)) + for name in dir(manager) + if not name.startswith('_') + ) + + +@_call_aside +def _initialize_master_working_set(): + """ + Prepare the master working set and make the ``require()`` + API available. + + This function has explicit effects on the global state + of pkg_resources. It is intended to be invoked once at + the initialization of this module. + + Invocation by other packages is unsupported and done + at their own risk. + """ + working_set = WorkingSet._build_master() + _declare_state('object', working_set=working_set) + + require = working_set.require + iter_entry_points = working_set.iter_entry_points + add_activation_listener = working_set.subscribe + run_script = working_set.run_script + # backward compatibility + run_main = run_script + # Activate all distributions already on sys.path with replace=False and + # ensure that all distributions added to the working set in the future + # (e.g. by calling ``require()``) will get activated as well, + # with higher priority (replace=True). + tuple( + dist.activate(replace=False) + for dist in working_set + ) + add_activation_listener( + lambda dist: dist.activate(replace=True), + existing=False, + ) + working_set.entries = [] + # match order + list(map(working_set.add_entry, sys.path)) + globals().update(locals()) + +class PkgResourcesDeprecationWarning(Warning): + """ + Base class for warning about deprecations in ``pkg_resources`` + + This class is not derived from ``DeprecationWarning``, and as such is + visible by default. + """ diff --git a/project/venv/lib/python2.7/site-packages/pkg_resources/__init__.pyc b/project/venv/lib/python2.7/site-packages/pkg_resources/__init__.pyc new file mode 100644 index 0000000..c4b62fe Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pkg_resources/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/__init__.py b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/__init__.pyc b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/__init__.pyc new file mode 100644 index 0000000..c726675 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/appdirs.py b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/appdirs.py new file mode 100644 index 0000000..ae67001 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/appdirs.py @@ -0,0 +1,608 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# Copyright (c) 2005-2010 ActiveState Software Inc. +# Copyright (c) 2013 Eddy Petrișor + +"""Utilities for determining application-specific dirs. + +See <http://github.com/ActiveState/appdirs> for details and usage. +""" +# Dev Notes: +# - MSDN on where to store app data files: +# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120 +# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html +# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html + +__version_info__ = (1, 4, 3) +__version__ = '.'.join(map(str, __version_info__)) + + +import sys +import os + +PY3 = sys.version_info[0] == 3 + +if PY3: + unicode = str + +if sys.platform.startswith('java'): + import platform + os_name = platform.java_ver()[3][0] + if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc. + system = 'win32' + elif os_name.startswith('Mac'): # "Mac OS X", etc. + system = 'darwin' + else: # "Linux", "SunOS", "FreeBSD", etc. + # Setting this to "linux2" is not ideal, but only Windows or Mac + # are actually checked for and the rest of the module expects + # *sys.platform* style strings. + system = 'linux2' +else: + system = sys.platform + + + +def user_data_dir(appname=None, appauthor=None, version=None, roaming=False): + r"""Return full path to the user-specific data dir for this application. + + "appname" is the name of application. + If None, just the system directory is returned. + "appauthor" (only used on Windows) is the name of the + appauthor or distributing body for this application. Typically + it is the owning company name. This falls back to appname. You may + pass False to disable it. + "version" is an optional version path element to append to the + path. You might want to use this if you want multiple versions + of your app to be able to run independently. If used, this + would typically be "<major>.<minor>". + Only applied when appname is present. + "roaming" (boolean, default False) can be set True to use the Windows + roaming appdata directory. That means that for users on a Windows + network setup for roaming profiles, this user data will be + sync'd on login. See + <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx> + for a discussion of issues. + + Typical user data directories are: + Mac OS X: ~/Library/Application Support/<AppName> + Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined + Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName> + Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName> + Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName> + Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName> + + For Unix, we follow the XDG spec and support $XDG_DATA_HOME. + That means, by default "~/.local/share/<AppName>". + """ + if system == "win32": + if appauthor is None: + appauthor = appname + const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA" + path = os.path.normpath(_get_win_folder(const)) + if appname: + if appauthor is not False: + path = os.path.join(path, appauthor, appname) + else: + path = os.path.join(path, appname) + elif system == 'darwin': + path = os.path.expanduser('~/Library/Application Support/') + if appname: + path = os.path.join(path, appname) + else: + path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share")) + if appname: + path = os.path.join(path, appname) + if appname and version: + path = os.path.join(path, version) + return path + + +def site_data_dir(appname=None, appauthor=None, version=None, multipath=False): + r"""Return full path to the user-shared data dir for this application. + + "appname" is the name of application. + If None, just the system directory is returned. + "appauthor" (only used on Windows) is the name of the + appauthor or distributing body for this application. Typically + it is the owning company name. This falls back to appname. You may + pass False to disable it. + "version" is an optional version path element to append to the + path. You might want to use this if you want multiple versions + of your app to be able to run independently. If used, this + would typically be "<major>.<minor>". + Only applied when appname is present. + "multipath" is an optional parameter only applicable to *nix + which indicates that the entire list of data dirs should be + returned. By default, the first item from XDG_DATA_DIRS is + returned, or '/usr/local/share/<AppName>', + if XDG_DATA_DIRS is not set + + Typical site data directories are: + Mac OS X: /Library/Application Support/<AppName> + Unix: /usr/local/share/<AppName> or /usr/share/<AppName> + Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName> + Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) + Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7. + + For Unix, this is using the $XDG_DATA_DIRS[0] default. + + WARNING: Do not use this on Windows. See the Vista-Fail note above for why. + """ + if system == "win32": + if appauthor is None: + appauthor = appname + path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA")) + if appname: + if appauthor is not False: + path = os.path.join(path, appauthor, appname) + else: + path = os.path.join(path, appname) + elif system == 'darwin': + path = os.path.expanduser('/Library/Application Support') + if appname: + path = os.path.join(path, appname) + else: + # XDG default for $XDG_DATA_DIRS + # only first, if multipath is False + path = os.getenv('XDG_DATA_DIRS', + os.pathsep.join(['/usr/local/share', '/usr/share'])) + pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)] + if appname: + if version: + appname = os.path.join(appname, version) + pathlist = [os.sep.join([x, appname]) for x in pathlist] + + if multipath: + path = os.pathsep.join(pathlist) + else: + path = pathlist[0] + return path + + if appname and version: + path = os.path.join(path, version) + return path + + +def user_config_dir(appname=None, appauthor=None, version=None, roaming=False): + r"""Return full path to the user-specific config dir for this application. + + "appname" is the name of application. + If None, just the system directory is returned. + "appauthor" (only used on Windows) is the name of the + appauthor or distributing body for this application. Typically + it is the owning company name. This falls back to appname. You may + pass False to disable it. + "version" is an optional version path element to append to the + path. You might want to use this if you want multiple versions + of your app to be able to run independently. If used, this + would typically be "<major>.<minor>". + Only applied when appname is present. + "roaming" (boolean, default False) can be set True to use the Windows + roaming appdata directory. That means that for users on a Windows + network setup for roaming profiles, this user data will be + sync'd on login. See + <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx> + for a discussion of issues. + + Typical user config directories are: + Mac OS X: same as user_data_dir + Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined + Win *: same as user_data_dir + + For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME. + That means, by default "~/.config/<AppName>". + """ + if system in ["win32", "darwin"]: + path = user_data_dir(appname, appauthor, None, roaming) + else: + path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config")) + if appname: + path = os.path.join(path, appname) + if appname and version: + path = os.path.join(path, version) + return path + + +def site_config_dir(appname=None, appauthor=None, version=None, multipath=False): + r"""Return full path to the user-shared data dir for this application. + + "appname" is the name of application. + If None, just the system directory is returned. + "appauthor" (only used on Windows) is the name of the + appauthor or distributing body for this application. Typically + it is the owning company name. This falls back to appname. You may + pass False to disable it. + "version" is an optional version path element to append to the + path. You might want to use this if you want multiple versions + of your app to be able to run independently. If used, this + would typically be "<major>.<minor>". + Only applied when appname is present. + "multipath" is an optional parameter only applicable to *nix + which indicates that the entire list of config dirs should be + returned. By default, the first item from XDG_CONFIG_DIRS is + returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set + + Typical site config directories are: + Mac OS X: same as site_data_dir + Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in + $XDG_CONFIG_DIRS + Win *: same as site_data_dir + Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) + + For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False + + WARNING: Do not use this on Windows. See the Vista-Fail note above for why. + """ + if system in ["win32", "darwin"]: + path = site_data_dir(appname, appauthor) + if appname and version: + path = os.path.join(path, version) + else: + # XDG default for $XDG_CONFIG_DIRS + # only first, if multipath is False + path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg') + pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)] + if appname: + if version: + appname = os.path.join(appname, version) + pathlist = [os.sep.join([x, appname]) for x in pathlist] + + if multipath: + path = os.pathsep.join(pathlist) + else: + path = pathlist[0] + return path + + +def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True): + r"""Return full path to the user-specific cache dir for this application. + + "appname" is the name of application. + If None, just the system directory is returned. + "appauthor" (only used on Windows) is the name of the + appauthor or distributing body for this application. Typically + it is the owning company name. This falls back to appname. You may + pass False to disable it. + "version" is an optional version path element to append to the + path. You might want to use this if you want multiple versions + of your app to be able to run independently. If used, this + would typically be "<major>.<minor>". + Only applied when appname is present. + "opinion" (boolean) can be False to disable the appending of + "Cache" to the base app data dir for Windows. See + discussion below. + + Typical user cache directories are: + Mac OS X: ~/Library/Caches/<AppName> + Unix: ~/.cache/<AppName> (XDG default) + Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache + Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache + + On Windows the only suggestion in the MSDN docs is that local settings go in + the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming + app data dir (the default returned by `user_data_dir` above). Apps typically + put cache data somewhere *under* the given dir here. Some examples: + ...\Mozilla\Firefox\Profiles\<ProfileName>\Cache + ...\Acme\SuperApp\Cache\1.0 + OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value. + This can be disabled with the `opinion=False` option. + """ + if system == "win32": + if appauthor is None: + appauthor = appname + path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA")) + if appname: + if appauthor is not False: + path = os.path.join(path, appauthor, appname) + else: + path = os.path.join(path, appname) + if opinion: + path = os.path.join(path, "Cache") + elif system == 'darwin': + path = os.path.expanduser('~/Library/Caches') + if appname: + path = os.path.join(path, appname) + else: + path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache')) + if appname: + path = os.path.join(path, appname) + if appname and version: + path = os.path.join(path, version) + return path + + +def user_state_dir(appname=None, appauthor=None, version=None, roaming=False): + r"""Return full path to the user-specific state dir for this application. + + "appname" is the name of application. + If None, just the system directory is returned. + "appauthor" (only used on Windows) is the name of the + appauthor or distributing body for this application. Typically + it is the owning company name. This falls back to appname. You may + pass False to disable it. + "version" is an optional version path element to append to the + path. You might want to use this if you want multiple versions + of your app to be able to run independently. If used, this + would typically be "<major>.<minor>". + Only applied when appname is present. + "roaming" (boolean, default False) can be set True to use the Windows + roaming appdata directory. That means that for users on a Windows + network setup for roaming profiles, this user data will be + sync'd on login. See + <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx> + for a discussion of issues. + + Typical user state directories are: + Mac OS X: same as user_data_dir + Unix: ~/.local/state/<AppName> # or in $XDG_STATE_HOME, if defined + Win *: same as user_data_dir + + For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state> + to extend the XDG spec and support $XDG_STATE_HOME. + + That means, by default "~/.local/state/<AppName>". + """ + if system in ["win32", "darwin"]: + path = user_data_dir(appname, appauthor, None, roaming) + else: + path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state")) + if appname: + path = os.path.join(path, appname) + if appname and version: + path = os.path.join(path, version) + return path + + +def user_log_dir(appname=None, appauthor=None, version=None, opinion=True): + r"""Return full path to the user-specific log dir for this application. + + "appname" is the name of application. + If None, just the system directory is returned. + "appauthor" (only used on Windows) is the name of the + appauthor or distributing body for this application. Typically + it is the owning company name. This falls back to appname. You may + pass False to disable it. + "version" is an optional version path element to append to the + path. You might want to use this if you want multiple versions + of your app to be able to run independently. If used, this + would typically be "<major>.<minor>". + Only applied when appname is present. + "opinion" (boolean) can be False to disable the appending of + "Logs" to the base app data dir for Windows, and "log" to the + base cache dir for Unix. See discussion below. + + Typical user log directories are: + Mac OS X: ~/Library/Logs/<AppName> + Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined + Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs + Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs + + On Windows the only suggestion in the MSDN docs is that local settings + go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in + examples of what some windows apps use for a logs dir.) + + OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA` + value for Windows and appends "log" to the user cache dir for Unix. + This can be disabled with the `opinion=False` option. + """ + if system == "darwin": + path = os.path.join( + os.path.expanduser('~/Library/Logs'), + appname) + elif system == "win32": + path = user_data_dir(appname, appauthor, version) + version = False + if opinion: + path = os.path.join(path, "Logs") + else: + path = user_cache_dir(appname, appauthor, version) + version = False + if opinion: + path = os.path.join(path, "log") + if appname and version: + path = os.path.join(path, version) + return path + + +class AppDirs(object): + """Convenience wrapper for getting application dirs.""" + def __init__(self, appname=None, appauthor=None, version=None, + roaming=False, multipath=False): + self.appname = appname + self.appauthor = appauthor + self.version = version + self.roaming = roaming + self.multipath = multipath + + @property + def user_data_dir(self): + return user_data_dir(self.appname, self.appauthor, + version=self.version, roaming=self.roaming) + + @property + def site_data_dir(self): + return site_data_dir(self.appname, self.appauthor, + version=self.version, multipath=self.multipath) + + @property + def user_config_dir(self): + return user_config_dir(self.appname, self.appauthor, + version=self.version, roaming=self.roaming) + + @property + def site_config_dir(self): + return site_config_dir(self.appname, self.appauthor, + version=self.version, multipath=self.multipath) + + @property + def user_cache_dir(self): + return user_cache_dir(self.appname, self.appauthor, + version=self.version) + + @property + def user_state_dir(self): + return user_state_dir(self.appname, self.appauthor, + version=self.version) + + @property + def user_log_dir(self): + return user_log_dir(self.appname, self.appauthor, + version=self.version) + + +#---- internal support stuff + +def _get_win_folder_from_registry(csidl_name): + """This is a fallback technique at best. I'm not sure if using the + registry for this guarantees us the correct answer for all CSIDL_* + names. + """ + if PY3: + import winreg as _winreg + else: + import _winreg + + shell_folder_name = { + "CSIDL_APPDATA": "AppData", + "CSIDL_COMMON_APPDATA": "Common AppData", + "CSIDL_LOCAL_APPDATA": "Local AppData", + }[csidl_name] + + key = _winreg.OpenKey( + _winreg.HKEY_CURRENT_USER, + r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders" + ) + dir, type = _winreg.QueryValueEx(key, shell_folder_name) + return dir + + +def _get_win_folder_with_pywin32(csidl_name): + from win32com.shell import shellcon, shell + dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0) + # Try to make this a unicode path because SHGetFolderPath does + # not return unicode strings when there is unicode data in the + # path. + try: + dir = unicode(dir) + + # Downgrade to short path name if have highbit chars. See + # <http://bugs.activestate.com/show_bug.cgi?id=85099>. + has_high_char = False + for c in dir: + if ord(c) > 255: + has_high_char = True + break + if has_high_char: + try: + import win32api + dir = win32api.GetShortPathName(dir) + except ImportError: + pass + except UnicodeError: + pass + return dir + + +def _get_win_folder_with_ctypes(csidl_name): + import ctypes + + csidl_const = { + "CSIDL_APPDATA": 26, + "CSIDL_COMMON_APPDATA": 35, + "CSIDL_LOCAL_APPDATA": 28, + }[csidl_name] + + buf = ctypes.create_unicode_buffer(1024) + ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf) + + # Downgrade to short path name if have highbit chars. See + # <http://bugs.activestate.com/show_bug.cgi?id=85099>. + has_high_char = False + for c in buf: + if ord(c) > 255: + has_high_char = True + break + if has_high_char: + buf2 = ctypes.create_unicode_buffer(1024) + if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024): + buf = buf2 + + return buf.value + +def _get_win_folder_with_jna(csidl_name): + import array + from com.sun import jna + from com.sun.jna.platform import win32 + + buf_size = win32.WinDef.MAX_PATH * 2 + buf = array.zeros('c', buf_size) + shell = win32.Shell32.INSTANCE + shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf) + dir = jna.Native.toString(buf.tostring()).rstrip("\0") + + # Downgrade to short path name if have highbit chars. See + # <http://bugs.activestate.com/show_bug.cgi?id=85099>. + has_high_char = False + for c in dir: + if ord(c) > 255: + has_high_char = True + break + if has_high_char: + buf = array.zeros('c', buf_size) + kernel = win32.Kernel32.INSTANCE + if kernel.GetShortPathName(dir, buf, buf_size): + dir = jna.Native.toString(buf.tostring()).rstrip("\0") + + return dir + +if system == "win32": + try: + import win32com.shell + _get_win_folder = _get_win_folder_with_pywin32 + except ImportError: + try: + from ctypes import windll + _get_win_folder = _get_win_folder_with_ctypes + except ImportError: + try: + import com.sun.jna + _get_win_folder = _get_win_folder_with_jna + except ImportError: + _get_win_folder = _get_win_folder_from_registry + + +#---- self test code + +if __name__ == "__main__": + appname = "MyApp" + appauthor = "MyCompany" + + props = ("user_data_dir", + "user_config_dir", + "user_cache_dir", + "user_state_dir", + "user_log_dir", + "site_data_dir", + "site_config_dir") + + print("-- app dirs %s --" % __version__) + + print("-- app dirs (with optional 'version')") + dirs = AppDirs(appname, appauthor, version="1.0") + for prop in props: + print("%s: %s" % (prop, getattr(dirs, prop))) + + print("\n-- app dirs (without optional 'version')") + dirs = AppDirs(appname, appauthor) + for prop in props: + print("%s: %s" % (prop, getattr(dirs, prop))) + + print("\n-- app dirs (without optional 'appauthor')") + dirs = AppDirs(appname) + for prop in props: + print("%s: %s" % (prop, getattr(dirs, prop))) + + print("\n-- app dirs (with disabled 'appauthor')") + dirs = AppDirs(appname, appauthor=False) + for prop in props: + print("%s: %s" % (prop, getattr(dirs, prop))) diff --git a/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/appdirs.pyc b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/appdirs.pyc new file mode 100644 index 0000000..e4e4098 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/appdirs.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/__about__.py b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/__about__.py new file mode 100644 index 0000000..95d330e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/__about__.py @@ -0,0 +1,21 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + +__all__ = [ + "__title__", "__summary__", "__uri__", "__version__", "__author__", + "__email__", "__license__", "__copyright__", +] + +__title__ = "packaging" +__summary__ = "Core utilities for Python packages" +__uri__ = "https://github.com/pypa/packaging" + +__version__ = "16.8" + +__author__ = "Donald Stufft and individual contributors" +__email__ = "donald@stufft.io" + +__license__ = "BSD or Apache License, Version 2.0" +__copyright__ = "Copyright 2014-2016 %s" % __author__ diff --git a/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/__about__.pyc b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/__about__.pyc new file mode 100644 index 0000000..c3c0d68 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/__about__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/__init__.py b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/__init__.py new file mode 100644 index 0000000..5ee6220 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/__init__.py @@ -0,0 +1,14 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + +from .__about__ import ( + __author__, __copyright__, __email__, __license__, __summary__, __title__, + __uri__, __version__ +) + +__all__ = [ + "__title__", "__summary__", "__uri__", "__version__", "__author__", + "__email__", "__license__", "__copyright__", +] diff --git a/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/__init__.pyc b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/__init__.pyc new file mode 100644 index 0000000..7c83d2d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/_compat.py b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/_compat.py new file mode 100644 index 0000000..210bb80 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/_compat.py @@ -0,0 +1,30 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + +import sys + + +PY2 = sys.version_info[0] == 2 +PY3 = sys.version_info[0] == 3 + +# flake8: noqa + +if PY3: + string_types = str, +else: + string_types = basestring, + + +def with_metaclass(meta, *bases): + """ + Create a base class with a metaclass. + """ + # This requires a bit of explanation: the basic idea is to make a dummy + # metaclass for one level of class instantiation that replaces itself with + # the actual metaclass. + class metaclass(meta): + def __new__(cls, name, this_bases, d): + return meta(name, bases, d) + return type.__new__(metaclass, 'temporary_class', (), {}) diff --git a/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/_compat.pyc b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/_compat.pyc new file mode 100644 index 0000000..80a3f02 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/_compat.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/_structures.py b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/_structures.py new file mode 100644 index 0000000..ccc2786 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/_structures.py @@ -0,0 +1,68 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + + +class Infinity(object): + + def __repr__(self): + return "Infinity" + + def __hash__(self): + return hash(repr(self)) + + def __lt__(self, other): + return False + + def __le__(self, other): + return False + + def __eq__(self, other): + return isinstance(other, self.__class__) + + def __ne__(self, other): + return not isinstance(other, self.__class__) + + def __gt__(self, other): + return True + + def __ge__(self, other): + return True + + def __neg__(self): + return NegativeInfinity + +Infinity = Infinity() + + +class NegativeInfinity(object): + + def __repr__(self): + return "-Infinity" + + def __hash__(self): + return hash(repr(self)) + + def __lt__(self, other): + return True + + def __le__(self, other): + return True + + def __eq__(self, other): + return isinstance(other, self.__class__) + + def __ne__(self, other): + return not isinstance(other, self.__class__) + + def __gt__(self, other): + return False + + def __ge__(self, other): + return False + + def __neg__(self): + return Infinity + +NegativeInfinity = NegativeInfinity() diff --git a/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/_structures.pyc b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/_structures.pyc new file mode 100644 index 0000000..cc12347 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/_structures.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/markers.py b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/markers.py new file mode 100644 index 0000000..892e578 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/markers.py @@ -0,0 +1,301 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + +import operator +import os +import platform +import sys + +from pkg_resources.extern.pyparsing import ParseException, ParseResults, stringStart, stringEnd +from pkg_resources.extern.pyparsing import ZeroOrMore, Group, Forward, QuotedString +from pkg_resources.extern.pyparsing import Literal as L # noqa + +from ._compat import string_types +from .specifiers import Specifier, InvalidSpecifier + + +__all__ = [ + "InvalidMarker", "UndefinedComparison", "UndefinedEnvironmentName", + "Marker", "default_environment", +] + + +class InvalidMarker(ValueError): + """ + An invalid marker was found, users should refer to PEP 508. + """ + + +class UndefinedComparison(ValueError): + """ + An invalid operation was attempted on a value that doesn't support it. + """ + + +class UndefinedEnvironmentName(ValueError): + """ + A name was attempted to be used that does not exist inside of the + environment. + """ + + +class Node(object): + + def __init__(self, value): + self.value = value + + def __str__(self): + return str(self.value) + + def __repr__(self): + return "<{0}({1!r})>".format(self.__class__.__name__, str(self)) + + def serialize(self): + raise NotImplementedError + + +class Variable(Node): + + def serialize(self): + return str(self) + + +class Value(Node): + + def serialize(self): + return '"{0}"'.format(self) + + +class Op(Node): + + def serialize(self): + return str(self) + + +VARIABLE = ( + L("implementation_version") | + L("platform_python_implementation") | + L("implementation_name") | + L("python_full_version") | + L("platform_release") | + L("platform_version") | + L("platform_machine") | + L("platform_system") | + L("python_version") | + L("sys_platform") | + L("os_name") | + L("os.name") | # PEP-345 + L("sys.platform") | # PEP-345 + L("platform.version") | # PEP-345 + L("platform.machine") | # PEP-345 + L("platform.python_implementation") | # PEP-345 + L("python_implementation") | # undocumented setuptools legacy + L("extra") +) +ALIASES = { + 'os.name': 'os_name', + 'sys.platform': 'sys_platform', + 'platform.version': 'platform_version', + 'platform.machine': 'platform_machine', + 'platform.python_implementation': 'platform_python_implementation', + 'python_implementation': 'platform_python_implementation' +} +VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0]))) + +VERSION_CMP = ( + L("===") | + L("==") | + L(">=") | + L("<=") | + L("!=") | + L("~=") | + L(">") | + L("<") +) + +MARKER_OP = VERSION_CMP | L("not in") | L("in") +MARKER_OP.setParseAction(lambda s, l, t: Op(t[0])) + +MARKER_VALUE = QuotedString("'") | QuotedString('"') +MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0])) + +BOOLOP = L("and") | L("or") + +MARKER_VAR = VARIABLE | MARKER_VALUE + +MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR) +MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0])) + +LPAREN = L("(").suppress() +RPAREN = L(")").suppress() + +MARKER_EXPR = Forward() +MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN) +MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR) + +MARKER = stringStart + MARKER_EXPR + stringEnd + + +def _coerce_parse_result(results): + if isinstance(results, ParseResults): + return [_coerce_parse_result(i) for i in results] + else: + return results + + +def _format_marker(marker, first=True): + assert isinstance(marker, (list, tuple, string_types)) + + # Sometimes we have a structure like [[...]] which is a single item list + # where the single item is itself it's own list. In that case we want skip + # the rest of this function so that we don't get extraneous () on the + # outside. + if (isinstance(marker, list) and len(marker) == 1 and + isinstance(marker[0], (list, tuple))): + return _format_marker(marker[0]) + + if isinstance(marker, list): + inner = (_format_marker(m, first=False) for m in marker) + if first: + return " ".join(inner) + else: + return "(" + " ".join(inner) + ")" + elif isinstance(marker, tuple): + return " ".join([m.serialize() for m in marker]) + else: + return marker + + +_operators = { + "in": lambda lhs, rhs: lhs in rhs, + "not in": lambda lhs, rhs: lhs not in rhs, + "<": operator.lt, + "<=": operator.le, + "==": operator.eq, + "!=": operator.ne, + ">=": operator.ge, + ">": operator.gt, +} + + +def _eval_op(lhs, op, rhs): + try: + spec = Specifier("".join([op.serialize(), rhs])) + except InvalidSpecifier: + pass + else: + return spec.contains(lhs) + + oper = _operators.get(op.serialize()) + if oper is None: + raise UndefinedComparison( + "Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs) + ) + + return oper(lhs, rhs) + + +_undefined = object() + + +def _get_env(environment, name): + value = environment.get(name, _undefined) + + if value is _undefined: + raise UndefinedEnvironmentName( + "{0!r} does not exist in evaluation environment.".format(name) + ) + + return value + + +def _evaluate_markers(markers, environment): + groups = [[]] + + for marker in markers: + assert isinstance(marker, (list, tuple, string_types)) + + if isinstance(marker, list): + groups[-1].append(_evaluate_markers(marker, environment)) + elif isinstance(marker, tuple): + lhs, op, rhs = marker + + if isinstance(lhs, Variable): + lhs_value = _get_env(environment, lhs.value) + rhs_value = rhs.value + else: + lhs_value = lhs.value + rhs_value = _get_env(environment, rhs.value) + + groups[-1].append(_eval_op(lhs_value, op, rhs_value)) + else: + assert marker in ["and", "or"] + if marker == "or": + groups.append([]) + + return any(all(item) for item in groups) + + +def format_full_version(info): + version = '{0.major}.{0.minor}.{0.micro}'.format(info) + kind = info.releaselevel + if kind != 'final': + version += kind[0] + str(info.serial) + return version + + +def default_environment(): + if hasattr(sys, 'implementation'): + iver = format_full_version(sys.implementation.version) + implementation_name = sys.implementation.name + else: + iver = '0' + implementation_name = '' + + return { + "implementation_name": implementation_name, + "implementation_version": iver, + "os_name": os.name, + "platform_machine": platform.machine(), + "platform_release": platform.release(), + "platform_system": platform.system(), + "platform_version": platform.version(), + "python_full_version": platform.python_version(), + "platform_python_implementation": platform.python_implementation(), + "python_version": platform.python_version()[:3], + "sys_platform": sys.platform, + } + + +class Marker(object): + + def __init__(self, marker): + try: + self._markers = _coerce_parse_result(MARKER.parseString(marker)) + except ParseException as e: + err_str = "Invalid marker: {0!r}, parse error at {1!r}".format( + marker, marker[e.loc:e.loc + 8]) + raise InvalidMarker(err_str) + + def __str__(self): + return _format_marker(self._markers) + + def __repr__(self): + return "<Marker({0!r})>".format(str(self)) + + def evaluate(self, environment=None): + """Evaluate a marker. + + Return the boolean from evaluating the given marker against the + environment. environment is an optional argument to override all or + part of the determined environment. + + The environment is determined from the current Python process. + """ + current_environment = default_environment() + if environment is not None: + current_environment.update(environment) + + return _evaluate_markers(self._markers, current_environment) diff --git a/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/markers.pyc b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/markers.pyc new file mode 100644 index 0000000..056a741 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/markers.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/requirements.py b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/requirements.py new file mode 100644 index 0000000..0c8c4a3 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/requirements.py @@ -0,0 +1,127 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + +import string +import re + +from pkg_resources.extern.pyparsing import stringStart, stringEnd, originalTextFor, ParseException +from pkg_resources.extern.pyparsing import ZeroOrMore, Word, Optional, Regex, Combine +from pkg_resources.extern.pyparsing import Literal as L # noqa +from pkg_resources.extern.six.moves.urllib import parse as urlparse + +from .markers import MARKER_EXPR, Marker +from .specifiers import LegacySpecifier, Specifier, SpecifierSet + + +class InvalidRequirement(ValueError): + """ + An invalid requirement was found, users should refer to PEP 508. + """ + + +ALPHANUM = Word(string.ascii_letters + string.digits) + +LBRACKET = L("[").suppress() +RBRACKET = L("]").suppress() +LPAREN = L("(").suppress() +RPAREN = L(")").suppress() +COMMA = L(",").suppress() +SEMICOLON = L(";").suppress() +AT = L("@").suppress() + +PUNCTUATION = Word("-_.") +IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM) +IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END)) + +NAME = IDENTIFIER("name") +EXTRA = IDENTIFIER + +URI = Regex(r'[^ ]+')("url") +URL = (AT + URI) + +EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA) +EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras") + +VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE) +VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE) + +VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY +VERSION_MANY = Combine(VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), + joinString=",", adjacent=False)("_raw_spec") +_VERSION_SPEC = Optional(((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY)) +_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or '') + +VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier") +VERSION_SPEC.setParseAction(lambda s, l, t: t[1]) + +MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker") +MARKER_EXPR.setParseAction( + lambda s, l, t: Marker(s[t._original_start:t._original_end]) +) +MARKER_SEPERATOR = SEMICOLON +MARKER = MARKER_SEPERATOR + MARKER_EXPR + +VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER) +URL_AND_MARKER = URL + Optional(MARKER) + +NAMED_REQUIREMENT = \ + NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER) + +REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd + + +class Requirement(object): + """Parse a requirement. + + Parse a given requirement string into its parts, such as name, specifier, + URL, and extras. Raises InvalidRequirement on a badly-formed requirement + string. + """ + + # TODO: Can we test whether something is contained within a requirement? + # If so how do we do that? Do we need to test against the _name_ of + # the thing as well as the version? What about the markers? + # TODO: Can we normalize the name and extra name? + + def __init__(self, requirement_string): + try: + req = REQUIREMENT.parseString(requirement_string) + except ParseException as e: + raise InvalidRequirement( + "Invalid requirement, parse error at \"{0!r}\"".format( + requirement_string[e.loc:e.loc + 8])) + + self.name = req.name + if req.url: + parsed_url = urlparse.urlparse(req.url) + if not (parsed_url.scheme and parsed_url.netloc) or ( + not parsed_url.scheme and not parsed_url.netloc): + raise InvalidRequirement("Invalid URL given") + self.url = req.url + else: + self.url = None + self.extras = set(req.extras.asList() if req.extras else []) + self.specifier = SpecifierSet(req.specifier) + self.marker = req.marker if req.marker else None + + def __str__(self): + parts = [self.name] + + if self.extras: + parts.append("[{0}]".format(",".join(sorted(self.extras)))) + + if self.specifier: + parts.append(str(self.specifier)) + + if self.url: + parts.append("@ {0}".format(self.url)) + + if self.marker: + parts.append("; {0}".format(self.marker)) + + return "".join(parts) + + def __repr__(self): + return "<Requirement({0!r})>".format(str(self)) diff --git a/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/requirements.pyc b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/requirements.pyc new file mode 100644 index 0000000..4cd8048 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/requirements.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/specifiers.py b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/specifiers.py new file mode 100644 index 0000000..7f5a76c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/specifiers.py @@ -0,0 +1,774 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + +import abc +import functools +import itertools +import re + +from ._compat import string_types, with_metaclass +from .version import Version, LegacyVersion, parse + + +class InvalidSpecifier(ValueError): + """ + An invalid specifier was found, users should refer to PEP 440. + """ + + +class BaseSpecifier(with_metaclass(abc.ABCMeta, object)): + + @abc.abstractmethod + def __str__(self): + """ + Returns the str representation of this Specifier like object. This + should be representative of the Specifier itself. + """ + + @abc.abstractmethod + def __hash__(self): + """ + Returns a hash value for this Specifier like object. + """ + + @abc.abstractmethod + def __eq__(self, other): + """ + Returns a boolean representing whether or not the two Specifier like + objects are equal. + """ + + @abc.abstractmethod + def __ne__(self, other): + """ + Returns a boolean representing whether or not the two Specifier like + objects are not equal. + """ + + @abc.abstractproperty + def prereleases(self): + """ + Returns whether or not pre-releases as a whole are allowed by this + specifier. + """ + + @prereleases.setter + def prereleases(self, value): + """ + Sets whether or not pre-releases as a whole are allowed by this + specifier. + """ + + @abc.abstractmethod + def contains(self, item, prereleases=None): + """ + Determines if the given item is contained within this specifier. + """ + + @abc.abstractmethod + def filter(self, iterable, prereleases=None): + """ + Takes an iterable of items and filters them so that only items which + are contained within this specifier are allowed in it. + """ + + +class _IndividualSpecifier(BaseSpecifier): + + _operators = {} + + def __init__(self, spec="", prereleases=None): + match = self._regex.search(spec) + if not match: + raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec)) + + self._spec = ( + match.group("operator").strip(), + match.group("version").strip(), + ) + + # Store whether or not this Specifier should accept prereleases + self._prereleases = prereleases + + def __repr__(self): + pre = ( + ", prereleases={0!r}".format(self.prereleases) + if self._prereleases is not None + else "" + ) + + return "<{0}({1!r}{2})>".format( + self.__class__.__name__, + str(self), + pre, + ) + + def __str__(self): + return "{0}{1}".format(*self._spec) + + def __hash__(self): + return hash(self._spec) + + def __eq__(self, other): + if isinstance(other, string_types): + try: + other = self.__class__(other) + except InvalidSpecifier: + return NotImplemented + elif not isinstance(other, self.__class__): + return NotImplemented + + return self._spec == other._spec + + def __ne__(self, other): + if isinstance(other, string_types): + try: + other = self.__class__(other) + except InvalidSpecifier: + return NotImplemented + elif not isinstance(other, self.__class__): + return NotImplemented + + return self._spec != other._spec + + def _get_operator(self, op): + return getattr(self, "_compare_{0}".format(self._operators[op])) + + def _coerce_version(self, version): + if not isinstance(version, (LegacyVersion, Version)): + version = parse(version) + return version + + @property + def operator(self): + return self._spec[0] + + @property + def version(self): + return self._spec[1] + + @property + def prereleases(self): + return self._prereleases + + @prereleases.setter + def prereleases(self, value): + self._prereleases = value + + def __contains__(self, item): + return self.contains(item) + + def contains(self, item, prereleases=None): + # Determine if prereleases are to be allowed or not. + if prereleases is None: + prereleases = self.prereleases + + # Normalize item to a Version or LegacyVersion, this allows us to have + # a shortcut for ``"2.0" in Specifier(">=2") + item = self._coerce_version(item) + + # Determine if we should be supporting prereleases in this specifier + # or not, if we do not support prereleases than we can short circuit + # logic if this version is a prereleases. + if item.is_prerelease and not prereleases: + return False + + # Actually do the comparison to determine if this item is contained + # within this Specifier or not. + return self._get_operator(self.operator)(item, self.version) + + def filter(self, iterable, prereleases=None): + yielded = False + found_prereleases = [] + + kw = {"prereleases": prereleases if prereleases is not None else True} + + # Attempt to iterate over all the values in the iterable and if any of + # them match, yield them. + for version in iterable: + parsed_version = self._coerce_version(version) + + if self.contains(parsed_version, **kw): + # If our version is a prerelease, and we were not set to allow + # prereleases, then we'll store it for later incase nothing + # else matches this specifier. + if (parsed_version.is_prerelease and not + (prereleases or self.prereleases)): + found_prereleases.append(version) + # Either this is not a prerelease, or we should have been + # accepting prereleases from the begining. + else: + yielded = True + yield version + + # Now that we've iterated over everything, determine if we've yielded + # any values, and if we have not and we have any prereleases stored up + # then we will go ahead and yield the prereleases. + if not yielded and found_prereleases: + for version in found_prereleases: + yield version + + +class LegacySpecifier(_IndividualSpecifier): + + _regex_str = ( + r""" + (?P<operator>(==|!=|<=|>=|<|>)) + \s* + (?P<version> + [^,;\s)]* # Since this is a "legacy" specifier, and the version + # string can be just about anything, we match everything + # except for whitespace, a semi-colon for marker support, + # a closing paren since versions can be enclosed in + # them, and a comma since it's a version separator. + ) + """ + ) + + _regex = re.compile( + r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE) + + _operators = { + "==": "equal", + "!=": "not_equal", + "<=": "less_than_equal", + ">=": "greater_than_equal", + "<": "less_than", + ">": "greater_than", + } + + def _coerce_version(self, version): + if not isinstance(version, LegacyVersion): + version = LegacyVersion(str(version)) + return version + + def _compare_equal(self, prospective, spec): + return prospective == self._coerce_version(spec) + + def _compare_not_equal(self, prospective, spec): + return prospective != self._coerce_version(spec) + + def _compare_less_than_equal(self, prospective, spec): + return prospective <= self._coerce_version(spec) + + def _compare_greater_than_equal(self, prospective, spec): + return prospective >= self._coerce_version(spec) + + def _compare_less_than(self, prospective, spec): + return prospective < self._coerce_version(spec) + + def _compare_greater_than(self, prospective, spec): + return prospective > self._coerce_version(spec) + + +def _require_version_compare(fn): + @functools.wraps(fn) + def wrapped(self, prospective, spec): + if not isinstance(prospective, Version): + return False + return fn(self, prospective, spec) + return wrapped + + +class Specifier(_IndividualSpecifier): + + _regex_str = ( + r""" + (?P<operator>(~=|==|!=|<=|>=|<|>|===)) + (?P<version> + (?: + # The identity operators allow for an escape hatch that will + # do an exact string match of the version you wish to install. + # This will not be parsed by PEP 440 and we cannot determine + # any semantic meaning from it. This operator is discouraged + # but included entirely as an escape hatch. + (?<====) # Only match for the identity operator + \s* + [^\s]* # We just match everything, except for whitespace + # since we are only testing for strict identity. + ) + | + (?: + # The (non)equality operators allow for wild card and local + # versions to be specified so we have to define these two + # operators separately to enable that. + (?<===|!=) # Only match for equals and not equals + + \s* + v? + (?:[0-9]+!)? # epoch + [0-9]+(?:\.[0-9]+)* # release + (?: # pre release + [-_\.]? + (a|b|c|rc|alpha|beta|pre|preview) + [-_\.]? + [0-9]* + )? + (?: # post release + (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) + )? + + # You cannot use a wild card and a dev or local version + # together so group them with a | and make them optional. + (?: + (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release + (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local + | + \.\* # Wild card syntax of .* + )? + ) + | + (?: + # The compatible operator requires at least two digits in the + # release segment. + (?<=~=) # Only match for the compatible operator + + \s* + v? + (?:[0-9]+!)? # epoch + [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *) + (?: # pre release + [-_\.]? + (a|b|c|rc|alpha|beta|pre|preview) + [-_\.]? + [0-9]* + )? + (?: # post release + (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) + )? + (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release + ) + | + (?: + # All other operators only allow a sub set of what the + # (non)equality operators do. Specifically they do not allow + # local versions to be specified nor do they allow the prefix + # matching wild cards. + (?<!==|!=|~=) # We have special cases for these + # operators so we want to make sure they + # don't match here. + + \s* + v? + (?:[0-9]+!)? # epoch + [0-9]+(?:\.[0-9]+)* # release + (?: # pre release + [-_\.]? + (a|b|c|rc|alpha|beta|pre|preview) + [-_\.]? + [0-9]* + )? + (?: # post release + (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) + )? + (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release + ) + ) + """ + ) + + _regex = re.compile( + r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE) + + _operators = { + "~=": "compatible", + "==": "equal", + "!=": "not_equal", + "<=": "less_than_equal", + ">=": "greater_than_equal", + "<": "less_than", + ">": "greater_than", + "===": "arbitrary", + } + + @_require_version_compare + def _compare_compatible(self, prospective, spec): + # Compatible releases have an equivalent combination of >= and ==. That + # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to + # implement this in terms of the other specifiers instead of + # implementing it ourselves. The only thing we need to do is construct + # the other specifiers. + + # We want everything but the last item in the version, but we want to + # ignore post and dev releases and we want to treat the pre-release as + # it's own separate segment. + prefix = ".".join( + list( + itertools.takewhile( + lambda x: (not x.startswith("post") and not + x.startswith("dev")), + _version_split(spec), + ) + )[:-1] + ) + + # Add the prefix notation to the end of our string + prefix += ".*" + + return (self._get_operator(">=")(prospective, spec) and + self._get_operator("==")(prospective, prefix)) + + @_require_version_compare + def _compare_equal(self, prospective, spec): + # We need special logic to handle prefix matching + if spec.endswith(".*"): + # In the case of prefix matching we want to ignore local segment. + prospective = Version(prospective.public) + # Split the spec out by dots, and pretend that there is an implicit + # dot in between a release segment and a pre-release segment. + spec = _version_split(spec[:-2]) # Remove the trailing .* + + # Split the prospective version out by dots, and pretend that there + # is an implicit dot in between a release segment and a pre-release + # segment. + prospective = _version_split(str(prospective)) + + # Shorten the prospective version to be the same length as the spec + # so that we can determine if the specifier is a prefix of the + # prospective version or not. + prospective = prospective[:len(spec)] + + # Pad out our two sides with zeros so that they both equal the same + # length. + spec, prospective = _pad_version(spec, prospective) + else: + # Convert our spec string into a Version + spec = Version(spec) + + # If the specifier does not have a local segment, then we want to + # act as if the prospective version also does not have a local + # segment. + if not spec.local: + prospective = Version(prospective.public) + + return prospective == spec + + @_require_version_compare + def _compare_not_equal(self, prospective, spec): + return not self._compare_equal(prospective, spec) + + @_require_version_compare + def _compare_less_than_equal(self, prospective, spec): + return prospective <= Version(spec) + + @_require_version_compare + def _compare_greater_than_equal(self, prospective, spec): + return prospective >= Version(spec) + + @_require_version_compare + def _compare_less_than(self, prospective, spec): + # Convert our spec to a Version instance, since we'll want to work with + # it as a version. + spec = Version(spec) + + # Check to see if the prospective version is less than the spec + # version. If it's not we can short circuit and just return False now + # instead of doing extra unneeded work. + if not prospective < spec: + return False + + # This special case is here so that, unless the specifier itself + # includes is a pre-release version, that we do not accept pre-release + # versions for the version mentioned in the specifier (e.g. <3.1 should + # not match 3.1.dev0, but should match 3.0.dev0). + if not spec.is_prerelease and prospective.is_prerelease: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # If we've gotten to here, it means that prospective version is both + # less than the spec version *and* it's not a pre-release of the same + # version in the spec. + return True + + @_require_version_compare + def _compare_greater_than(self, prospective, spec): + # Convert our spec to a Version instance, since we'll want to work with + # it as a version. + spec = Version(spec) + + # Check to see if the prospective version is greater than the spec + # version. If it's not we can short circuit and just return False now + # instead of doing extra unneeded work. + if not prospective > spec: + return False + + # This special case is here so that, unless the specifier itself + # includes is a post-release version, that we do not accept + # post-release versions for the version mentioned in the specifier + # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0). + if not spec.is_postrelease and prospective.is_postrelease: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # Ensure that we do not allow a local version of the version mentioned + # in the specifier, which is techincally greater than, to match. + if prospective.local is not None: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # If we've gotten to here, it means that prospective version is both + # greater than the spec version *and* it's not a pre-release of the + # same version in the spec. + return True + + def _compare_arbitrary(self, prospective, spec): + return str(prospective).lower() == str(spec).lower() + + @property + def prereleases(self): + # If there is an explicit prereleases set for this, then we'll just + # blindly use that. + if self._prereleases is not None: + return self._prereleases + + # Look at all of our specifiers and determine if they are inclusive + # operators, and if they are if they are including an explicit + # prerelease. + operator, version = self._spec + if operator in ["==", ">=", "<=", "~=", "==="]: + # The == specifier can include a trailing .*, if it does we + # want to remove before parsing. + if operator == "==" and version.endswith(".*"): + version = version[:-2] + + # Parse the version, and if it is a pre-release than this + # specifier allows pre-releases. + if parse(version).is_prerelease: + return True + + return False + + @prereleases.setter + def prereleases(self, value): + self._prereleases = value + + +_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$") + + +def _version_split(version): + result = [] + for item in version.split("."): + match = _prefix_regex.search(item) + if match: + result.extend(match.groups()) + else: + result.append(item) + return result + + +def _pad_version(left, right): + left_split, right_split = [], [] + + # Get the release segment of our versions + left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left))) + right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right))) + + # Get the rest of our versions + left_split.append(left[len(left_split[0]):]) + right_split.append(right[len(right_split[0]):]) + + # Insert our padding + left_split.insert( + 1, + ["0"] * max(0, len(right_split[0]) - len(left_split[0])), + ) + right_split.insert( + 1, + ["0"] * max(0, len(left_split[0]) - len(right_split[0])), + ) + + return ( + list(itertools.chain(*left_split)), + list(itertools.chain(*right_split)), + ) + + +class SpecifierSet(BaseSpecifier): + + def __init__(self, specifiers="", prereleases=None): + # Split on , to break each indidivual specifier into it's own item, and + # strip each item to remove leading/trailing whitespace. + specifiers = [s.strip() for s in specifiers.split(",") if s.strip()] + + # Parsed each individual specifier, attempting first to make it a + # Specifier and falling back to a LegacySpecifier. + parsed = set() + for specifier in specifiers: + try: + parsed.add(Specifier(specifier)) + except InvalidSpecifier: + parsed.add(LegacySpecifier(specifier)) + + # Turn our parsed specifiers into a frozen set and save them for later. + self._specs = frozenset(parsed) + + # Store our prereleases value so we can use it later to determine if + # we accept prereleases or not. + self._prereleases = prereleases + + def __repr__(self): + pre = ( + ", prereleases={0!r}".format(self.prereleases) + if self._prereleases is not None + else "" + ) + + return "<SpecifierSet({0!r}{1})>".format(str(self), pre) + + def __str__(self): + return ",".join(sorted(str(s) for s in self._specs)) + + def __hash__(self): + return hash(self._specs) + + def __and__(self, other): + if isinstance(other, string_types): + other = SpecifierSet(other) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + specifier = SpecifierSet() + specifier._specs = frozenset(self._specs | other._specs) + + if self._prereleases is None and other._prereleases is not None: + specifier._prereleases = other._prereleases + elif self._prereleases is not None and other._prereleases is None: + specifier._prereleases = self._prereleases + elif self._prereleases == other._prereleases: + specifier._prereleases = self._prereleases + else: + raise ValueError( + "Cannot combine SpecifierSets with True and False prerelease " + "overrides." + ) + + return specifier + + def __eq__(self, other): + if isinstance(other, string_types): + other = SpecifierSet(other) + elif isinstance(other, _IndividualSpecifier): + other = SpecifierSet(str(other)) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + return self._specs == other._specs + + def __ne__(self, other): + if isinstance(other, string_types): + other = SpecifierSet(other) + elif isinstance(other, _IndividualSpecifier): + other = SpecifierSet(str(other)) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + return self._specs != other._specs + + def __len__(self): + return len(self._specs) + + def __iter__(self): + return iter(self._specs) + + @property + def prereleases(self): + # If we have been given an explicit prerelease modifier, then we'll + # pass that through here. + if self._prereleases is not None: + return self._prereleases + + # If we don't have any specifiers, and we don't have a forced value, + # then we'll just return None since we don't know if this should have + # pre-releases or not. + if not self._specs: + return None + + # Otherwise we'll see if any of the given specifiers accept + # prereleases, if any of them do we'll return True, otherwise False. + return any(s.prereleases for s in self._specs) + + @prereleases.setter + def prereleases(self, value): + self._prereleases = value + + def __contains__(self, item): + return self.contains(item) + + def contains(self, item, prereleases=None): + # Ensure that our item is a Version or LegacyVersion instance. + if not isinstance(item, (LegacyVersion, Version)): + item = parse(item) + + # Determine if we're forcing a prerelease or not, if we're not forcing + # one for this particular filter call, then we'll use whatever the + # SpecifierSet thinks for whether or not we should support prereleases. + if prereleases is None: + prereleases = self.prereleases + + # We can determine if we're going to allow pre-releases by looking to + # see if any of the underlying items supports them. If none of them do + # and this item is a pre-release then we do not allow it and we can + # short circuit that here. + # Note: This means that 1.0.dev1 would not be contained in something + # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0 + if not prereleases and item.is_prerelease: + return False + + # We simply dispatch to the underlying specs here to make sure that the + # given version is contained within all of them. + # Note: This use of all() here means that an empty set of specifiers + # will always return True, this is an explicit design decision. + return all( + s.contains(item, prereleases=prereleases) + for s in self._specs + ) + + def filter(self, iterable, prereleases=None): + # Determine if we're forcing a prerelease or not, if we're not forcing + # one for this particular filter call, then we'll use whatever the + # SpecifierSet thinks for whether or not we should support prereleases. + if prereleases is None: + prereleases = self.prereleases + + # If we have any specifiers, then we want to wrap our iterable in the + # filter method for each one, this will act as a logical AND amongst + # each specifier. + if self._specs: + for spec in self._specs: + iterable = spec.filter(iterable, prereleases=bool(prereleases)) + return iterable + # If we do not have any specifiers, then we need to have a rough filter + # which will filter out any pre-releases, unless there are no final + # releases, and which will filter out LegacyVersion in general. + else: + filtered = [] + found_prereleases = [] + + for item in iterable: + # Ensure that we some kind of Version class for this item. + if not isinstance(item, (LegacyVersion, Version)): + parsed_version = parse(item) + else: + parsed_version = item + + # Filter out any item which is parsed as a LegacyVersion + if isinstance(parsed_version, LegacyVersion): + continue + + # Store any item which is a pre-release for later unless we've + # already found a final version or we are accepting prereleases + if parsed_version.is_prerelease and not prereleases: + if not filtered: + found_prereleases.append(item) + else: + filtered.append(item) + + # If we've found no items except for pre-releases, then we'll go + # ahead and use the pre-releases + if not filtered and found_prereleases and prereleases is None: + return found_prereleases + + return filtered diff --git a/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/specifiers.pyc b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/specifiers.pyc new file mode 100644 index 0000000..d73a0b8 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/specifiers.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/utils.py b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/utils.py new file mode 100644 index 0000000..942387c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/utils.py @@ -0,0 +1,14 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + +import re + + +_canonicalize_regex = re.compile(r"[-_.]+") + + +def canonicalize_name(name): + # This is taken from PEP 503. + return _canonicalize_regex.sub("-", name).lower() diff --git a/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/utils.pyc b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/utils.pyc new file mode 100644 index 0000000..f65a023 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/utils.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/version.py b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/version.py new file mode 100644 index 0000000..83b5ee8 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/version.py @@ -0,0 +1,393 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + +import collections +import itertools +import re + +from ._structures import Infinity + + +__all__ = [ + "parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN" +] + + +_Version = collections.namedtuple( + "_Version", + ["epoch", "release", "dev", "pre", "post", "local"], +) + + +def parse(version): + """ + Parse the given version string and return either a :class:`Version` object + or a :class:`LegacyVersion` object depending on if the given version is + a valid PEP 440 version or a legacy version. + """ + try: + return Version(version) + except InvalidVersion: + return LegacyVersion(version) + + +class InvalidVersion(ValueError): + """ + An invalid version was found, users should refer to PEP 440. + """ + + +class _BaseVersion(object): + + def __hash__(self): + return hash(self._key) + + def __lt__(self, other): + return self._compare(other, lambda s, o: s < o) + + def __le__(self, other): + return self._compare(other, lambda s, o: s <= o) + + def __eq__(self, other): + return self._compare(other, lambda s, o: s == o) + + def __ge__(self, other): + return self._compare(other, lambda s, o: s >= o) + + def __gt__(self, other): + return self._compare(other, lambda s, o: s > o) + + def __ne__(self, other): + return self._compare(other, lambda s, o: s != o) + + def _compare(self, other, method): + if not isinstance(other, _BaseVersion): + return NotImplemented + + return method(self._key, other._key) + + +class LegacyVersion(_BaseVersion): + + def __init__(self, version): + self._version = str(version) + self._key = _legacy_cmpkey(self._version) + + def __str__(self): + return self._version + + def __repr__(self): + return "<LegacyVersion({0})>".format(repr(str(self))) + + @property + def public(self): + return self._version + + @property + def base_version(self): + return self._version + + @property + def local(self): + return None + + @property + def is_prerelease(self): + return False + + @property + def is_postrelease(self): + return False + + +_legacy_version_component_re = re.compile( + r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE, +) + +_legacy_version_replacement_map = { + "pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@", +} + + +def _parse_version_parts(s): + for part in _legacy_version_component_re.split(s): + part = _legacy_version_replacement_map.get(part, part) + + if not part or part == ".": + continue + + if part[:1] in "0123456789": + # pad for numeric comparison + yield part.zfill(8) + else: + yield "*" + part + + # ensure that alpha/beta/candidate are before final + yield "*final" + + +def _legacy_cmpkey(version): + # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch + # greater than or equal to 0. This will effectively put the LegacyVersion, + # which uses the defacto standard originally implemented by setuptools, + # as before all PEP 440 versions. + epoch = -1 + + # This scheme is taken from pkg_resources.parse_version setuptools prior to + # it's adoption of the packaging library. + parts = [] + for part in _parse_version_parts(version.lower()): + if part.startswith("*"): + # remove "-" before a prerelease tag + if part < "*final": + while parts and parts[-1] == "*final-": + parts.pop() + + # remove trailing zeros from each series of numeric parts + while parts and parts[-1] == "00000000": + parts.pop() + + parts.append(part) + parts = tuple(parts) + + return epoch, parts + +# Deliberately not anchored to the start and end of the string, to make it +# easier for 3rd party code to reuse +VERSION_PATTERN = r""" + v? + (?: + (?:(?P<epoch>[0-9]+)!)? # epoch + (?P<release>[0-9]+(?:\.[0-9]+)*) # release segment + (?P<pre> # pre-release + [-_\.]? + (?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview)) + [-_\.]? + (?P<pre_n>[0-9]+)? + )? + (?P<post> # post release + (?:-(?P<post_n1>[0-9]+)) + | + (?: + [-_\.]? + (?P<post_l>post|rev|r) + [-_\.]? + (?P<post_n2>[0-9]+)? + ) + )? + (?P<dev> # dev release + [-_\.]? + (?P<dev_l>dev) + [-_\.]? + (?P<dev_n>[0-9]+)? + )? + ) + (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version +""" + + +class Version(_BaseVersion): + + _regex = re.compile( + r"^\s*" + VERSION_PATTERN + r"\s*$", + re.VERBOSE | re.IGNORECASE, + ) + + def __init__(self, version): + # Validate the version and parse it into pieces + match = self._regex.search(version) + if not match: + raise InvalidVersion("Invalid version: '{0}'".format(version)) + + # Store the parsed out pieces of the version + self._version = _Version( + epoch=int(match.group("epoch")) if match.group("epoch") else 0, + release=tuple(int(i) for i in match.group("release").split(".")), + pre=_parse_letter_version( + match.group("pre_l"), + match.group("pre_n"), + ), + post=_parse_letter_version( + match.group("post_l"), + match.group("post_n1") or match.group("post_n2"), + ), + dev=_parse_letter_version( + match.group("dev_l"), + match.group("dev_n"), + ), + local=_parse_local_version(match.group("local")), + ) + + # Generate a key which will be used for sorting + self._key = _cmpkey( + self._version.epoch, + self._version.release, + self._version.pre, + self._version.post, + self._version.dev, + self._version.local, + ) + + def __repr__(self): + return "<Version({0})>".format(repr(str(self))) + + def __str__(self): + parts = [] + + # Epoch + if self._version.epoch != 0: + parts.append("{0}!".format(self._version.epoch)) + + # Release segment + parts.append(".".join(str(x) for x in self._version.release)) + + # Pre-release + if self._version.pre is not None: + parts.append("".join(str(x) for x in self._version.pre)) + + # Post-release + if self._version.post is not None: + parts.append(".post{0}".format(self._version.post[1])) + + # Development release + if self._version.dev is not None: + parts.append(".dev{0}".format(self._version.dev[1])) + + # Local version segment + if self._version.local is not None: + parts.append( + "+{0}".format(".".join(str(x) for x in self._version.local)) + ) + + return "".join(parts) + + @property + def public(self): + return str(self).split("+", 1)[0] + + @property + def base_version(self): + parts = [] + + # Epoch + if self._version.epoch != 0: + parts.append("{0}!".format(self._version.epoch)) + + # Release segment + parts.append(".".join(str(x) for x in self._version.release)) + + return "".join(parts) + + @property + def local(self): + version_string = str(self) + if "+" in version_string: + return version_string.split("+", 1)[1] + + @property + def is_prerelease(self): + return bool(self._version.dev or self._version.pre) + + @property + def is_postrelease(self): + return bool(self._version.post) + + +def _parse_letter_version(letter, number): + if letter: + # We consider there to be an implicit 0 in a pre-release if there is + # not a numeral associated with it. + if number is None: + number = 0 + + # We normalize any letters to their lower case form + letter = letter.lower() + + # We consider some words to be alternate spellings of other words and + # in those cases we want to normalize the spellings to our preferred + # spelling. + if letter == "alpha": + letter = "a" + elif letter == "beta": + letter = "b" + elif letter in ["c", "pre", "preview"]: + letter = "rc" + elif letter in ["rev", "r"]: + letter = "post" + + return letter, int(number) + if not letter and number: + # We assume if we are given a number, but we are not given a letter + # then this is using the implicit post release syntax (e.g. 1.0-1) + letter = "post" + + return letter, int(number) + + +_local_version_seperators = re.compile(r"[\._-]") + + +def _parse_local_version(local): + """ + Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve"). + """ + if local is not None: + return tuple( + part.lower() if not part.isdigit() else int(part) + for part in _local_version_seperators.split(local) + ) + + +def _cmpkey(epoch, release, pre, post, dev, local): + # When we compare a release version, we want to compare it with all of the + # trailing zeros removed. So we'll use a reverse the list, drop all the now + # leading zeros until we come to something non zero, then take the rest + # re-reverse it back into the correct order and make it a tuple and use + # that for our sorting key. + release = tuple( + reversed(list( + itertools.dropwhile( + lambda x: x == 0, + reversed(release), + ) + )) + ) + + # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0. + # We'll do this by abusing the pre segment, but we _only_ want to do this + # if there is not a pre or a post segment. If we have one of those then + # the normal sorting rules will handle this case correctly. + if pre is None and post is None and dev is not None: + pre = -Infinity + # Versions without a pre-release (except as noted above) should sort after + # those with one. + elif pre is None: + pre = Infinity + + # Versions without a post segment should sort before those with one. + if post is None: + post = -Infinity + + # Versions without a development segment should sort after those with one. + if dev is None: + dev = Infinity + + if local is None: + # Versions without a local segment should sort before those with one. + local = -Infinity + else: + # Versions with a local segment need that segment parsed to implement + # the sorting rules in PEP440. + # - Alpha numeric segments sort before numeric segments + # - Alpha numeric segments sort lexicographically + # - Numeric segments sort numerically + # - Shorter versions sort before longer versions when the prefixes + # match exactly + local = tuple( + (i, "") if isinstance(i, int) else (-Infinity, i) + for i in local + ) + + return epoch, release, pre, post, dev, local diff --git a/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/version.pyc b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/version.pyc new file mode 100644 index 0000000..4bc9a72 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/version.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/pyparsing.py b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/pyparsing.py new file mode 100644 index 0000000..cf75e1e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/pyparsing.py @@ -0,0 +1,5742 @@ +# module pyparsing.py +# +# Copyright (c) 2003-2018 Paul T. McGuire +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# + +__doc__ = \ +""" +pyparsing module - Classes and methods to define and execute parsing grammars +============================================================================= + +The pyparsing module is an alternative approach to creating and executing simple grammars, +vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you +don't need to learn a new syntax for defining grammars or matching expressions - the parsing module +provides a library of classes that you use to construct the grammar directly in Python. + +Here is a program to parse "Hello, World!" (or any greeting of the form +C{"<salutation>, <addressee>!"}), built up using L{Word}, L{Literal}, and L{And} elements +(L{'+'<ParserElement.__add__>} operator gives L{And} expressions, strings are auto-converted to +L{Literal} expressions):: + + from pyparsing import Word, alphas + + # define grammar of a greeting + greet = Word(alphas) + "," + Word(alphas) + "!" + + hello = "Hello, World!" + print (hello, "->", greet.parseString(hello)) + +The program outputs the following:: + + Hello, World! -> ['Hello', ',', 'World', '!'] + +The Python representation of the grammar is quite readable, owing to the self-explanatory +class names, and the use of '+', '|' and '^' operators. + +The L{ParseResults} object returned from L{ParserElement.parseString<ParserElement.parseString>} can be accessed as a nested list, a dictionary, or an +object with named attributes. + +The pyparsing module handles some of the problems that are typically vexing when writing text parsers: + - extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.) + - quoted strings + - embedded comments + + +Getting Started - +----------------- +Visit the classes L{ParserElement} and L{ParseResults} to see the base classes that most other pyparsing +classes inherit from. Use the docstrings for examples of how to: + - construct literal match expressions from L{Literal} and L{CaselessLiteral} classes + - construct character word-group expressions using the L{Word} class + - see how to create repetitive expressions using L{ZeroOrMore} and L{OneOrMore} classes + - use L{'+'<And>}, L{'|'<MatchFirst>}, L{'^'<Or>}, and L{'&'<Each>} operators to combine simple expressions into more complex ones + - associate names with your parsed results using L{ParserElement.setResultsName} + - find some helpful expression short-cuts like L{delimitedList} and L{oneOf} + - find more useful common expressions in the L{pyparsing_common} namespace class +""" + +__version__ = "2.2.1" +__versionTime__ = "18 Sep 2018 00:49 UTC" +__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>" + +import string +from weakref import ref as wkref +import copy +import sys +import warnings +import re +import sre_constants +import collections +import pprint +import traceback +import types +from datetime import datetime + +try: + from _thread import RLock +except ImportError: + from threading import RLock + +try: + # Python 3 + from collections.abc import Iterable + from collections.abc import MutableMapping +except ImportError: + # Python 2.7 + from collections import Iterable + from collections import MutableMapping + +try: + from collections import OrderedDict as _OrderedDict +except ImportError: + try: + from ordereddict import OrderedDict as _OrderedDict + except ImportError: + _OrderedDict = None + +#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) ) + +__all__ = [ +'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty', +'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal', +'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or', +'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException', +'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException', +'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', +'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore', +'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col', +'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString', +'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums', +'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno', +'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral', +'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables', +'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity', +'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd', +'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute', +'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr', 'withClass', +'CloseMatch', 'tokenMap', 'pyparsing_common', +] + +system_version = tuple(sys.version_info)[:3] +PY_3 = system_version[0] == 3 +if PY_3: + _MAX_INT = sys.maxsize + basestring = str + unichr = chr + _ustr = str + + # build list of single arg builtins, that can be used as parse actions + singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max] + +else: + _MAX_INT = sys.maxint + range = xrange + + def _ustr(obj): + """Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries + str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It + then < returns the unicode object | encodes it with the default encoding | ... >. + """ + if isinstance(obj,unicode): + return obj + + try: + # If this works, then _ustr(obj) has the same behaviour as str(obj), so + # it won't break any existing code. + return str(obj) + + except UnicodeEncodeError: + # Else encode it + ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace') + xmlcharref = Regex(r'&#\d+;') + xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:]) + return xmlcharref.transformString(ret) + + # build list of single arg builtins, tolerant of Python version, that can be used as parse actions + singleArgBuiltins = [] + import __builtin__ + for fname in "sum len sorted reversed list tuple set any all min max".split(): + try: + singleArgBuiltins.append(getattr(__builtin__,fname)) + except AttributeError: + continue + +_generatorType = type((y for y in range(1))) + +def _xml_escape(data): + """Escape &, <, >, ", ', etc. in a string of data.""" + + # ampersand must be replaced first + from_symbols = '&><"\'' + to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split()) + for from_,to_ in zip(from_symbols, to_symbols): + data = data.replace(from_, to_) + return data + +class _Constants(object): + pass + +alphas = string.ascii_uppercase + string.ascii_lowercase +nums = "0123456789" +hexnums = nums + "ABCDEFabcdef" +alphanums = alphas + nums +_bslash = chr(92) +printables = "".join(c for c in string.printable if c not in string.whitespace) + +class ParseBaseException(Exception): + """base exception class for all parsing runtime exceptions""" + # Performance tuning: we construct a *lot* of these, so keep this + # constructor as small and fast as possible + def __init__( self, pstr, loc=0, msg=None, elem=None ): + self.loc = loc + if msg is None: + self.msg = pstr + self.pstr = "" + else: + self.msg = msg + self.pstr = pstr + self.parserElement = elem + self.args = (pstr, loc, msg) + + @classmethod + def _from_exception(cls, pe): + """ + internal factory method to simplify creating one type of ParseException + from another - avoids having __init__ signature conflicts among subclasses + """ + return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement) + + def __getattr__( self, aname ): + """supported attributes by name are: + - lineno - returns the line number of the exception text + - col - returns the column number of the exception text + - line - returns the line containing the exception text + """ + if( aname == "lineno" ): + return lineno( self.loc, self.pstr ) + elif( aname in ("col", "column") ): + return col( self.loc, self.pstr ) + elif( aname == "line" ): + return line( self.loc, self.pstr ) + else: + raise AttributeError(aname) + + def __str__( self ): + return "%s (at char %d), (line:%d, col:%d)" % \ + ( self.msg, self.loc, self.lineno, self.column ) + def __repr__( self ): + return _ustr(self) + def markInputline( self, markerString = ">!<" ): + """Extracts the exception line from the input string, and marks + the location of the exception with a special symbol. + """ + line_str = self.line + line_column = self.column - 1 + if markerString: + line_str = "".join((line_str[:line_column], + markerString, line_str[line_column:])) + return line_str.strip() + def __dir__(self): + return "lineno col line".split() + dir(type(self)) + +class ParseException(ParseBaseException): + """ + Exception thrown when parse expressions don't match class; + supported attributes by name are: + - lineno - returns the line number of the exception text + - col - returns the column number of the exception text + - line - returns the line containing the exception text + + Example:: + try: + Word(nums).setName("integer").parseString("ABC") + except ParseException as pe: + print(pe) + print("column: {}".format(pe.col)) + + prints:: + Expected integer (at char 0), (line:1, col:1) + column: 1 + """ + pass + +class ParseFatalException(ParseBaseException): + """user-throwable exception thrown when inconsistent parse content + is found; stops all parsing immediately""" + pass + +class ParseSyntaxException(ParseFatalException): + """just like L{ParseFatalException}, but thrown internally when an + L{ErrorStop<And._ErrorStop>} ('-' operator) indicates that parsing is to stop + immediately because an unbacktrackable syntax error has been found""" + pass + +#~ class ReparseException(ParseBaseException): + #~ """Experimental class - parse actions can raise this exception to cause + #~ pyparsing to reparse the input string: + #~ - with a modified input string, and/or + #~ - with a modified start location + #~ Set the values of the ReparseException in the constructor, and raise the + #~ exception in a parse action to cause pyparsing to use the new string/location. + #~ Setting the values as None causes no change to be made. + #~ """ + #~ def __init_( self, newstring, restartLoc ): + #~ self.newParseText = newstring + #~ self.reparseLoc = restartLoc + +class RecursiveGrammarException(Exception): + """exception thrown by L{ParserElement.validate} if the grammar could be improperly recursive""" + def __init__( self, parseElementList ): + self.parseElementTrace = parseElementList + + def __str__( self ): + return "RecursiveGrammarException: %s" % self.parseElementTrace + +class _ParseResultsWithOffset(object): + def __init__(self,p1,p2): + self.tup = (p1,p2) + def __getitem__(self,i): + return self.tup[i] + def __repr__(self): + return repr(self.tup[0]) + def setOffset(self,i): + self.tup = (self.tup[0],i) + +class ParseResults(object): + """ + Structured parse results, to provide multiple means of access to the parsed data: + - as a list (C{len(results)}) + - by list index (C{results[0], results[1]}, etc.) + - by attribute (C{results.<resultsName>} - see L{ParserElement.setResultsName}) + + Example:: + integer = Word(nums) + date_str = (integer.setResultsName("year") + '/' + + integer.setResultsName("month") + '/' + + integer.setResultsName("day")) + # equivalent form: + # date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + # parseString returns a ParseResults object + result = date_str.parseString("1999/12/31") + + def test(s, fn=repr): + print("%s -> %s" % (s, fn(eval(s)))) + test("list(result)") + test("result[0]") + test("result['month']") + test("result.day") + test("'month' in result") + test("'minutes' in result") + test("result.dump()", str) + prints:: + list(result) -> ['1999', '/', '12', '/', '31'] + result[0] -> '1999' + result['month'] -> '12' + result.day -> '31' + 'month' in result -> True + 'minutes' in result -> False + result.dump() -> ['1999', '/', '12', '/', '31'] + - day: 31 + - month: 12 + - year: 1999 + """ + def __new__(cls, toklist=None, name=None, asList=True, modal=True ): + if isinstance(toklist, cls): + return toklist + retobj = object.__new__(cls) + retobj.__doinit = True + return retobj + + # Performance tuning: we construct a *lot* of these, so keep this + # constructor as small and fast as possible + def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance ): + if self.__doinit: + self.__doinit = False + self.__name = None + self.__parent = None + self.__accumNames = {} + self.__asList = asList + self.__modal = modal + if toklist is None: + toklist = [] + if isinstance(toklist, list): + self.__toklist = toklist[:] + elif isinstance(toklist, _generatorType): + self.__toklist = list(toklist) + else: + self.__toklist = [toklist] + self.__tokdict = dict() + + if name is not None and name: + if not modal: + self.__accumNames[name] = 0 + if isinstance(name,int): + name = _ustr(name) # will always return a str, but use _ustr for consistency + self.__name = name + if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None,'',[])): + if isinstance(toklist,basestring): + toklist = [ toklist ] + if asList: + if isinstance(toklist,ParseResults): + self[name] = _ParseResultsWithOffset(toklist.copy(),0) + else: + self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0) + self[name].__name = name + else: + try: + self[name] = toklist[0] + except (KeyError,TypeError,IndexError): + self[name] = toklist + + def __getitem__( self, i ): + if isinstance( i, (int,slice) ): + return self.__toklist[i] + else: + if i not in self.__accumNames: + return self.__tokdict[i][-1][0] + else: + return ParseResults([ v[0] for v in self.__tokdict[i] ]) + + def __setitem__( self, k, v, isinstance=isinstance ): + if isinstance(v,_ParseResultsWithOffset): + self.__tokdict[k] = self.__tokdict.get(k,list()) + [v] + sub = v[0] + elif isinstance(k,(int,slice)): + self.__toklist[k] = v + sub = v + else: + self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)] + sub = v + if isinstance(sub,ParseResults): + sub.__parent = wkref(self) + + def __delitem__( self, i ): + if isinstance(i,(int,slice)): + mylen = len( self.__toklist ) + del self.__toklist[i] + + # convert int to slice + if isinstance(i, int): + if i < 0: + i += mylen + i = slice(i, i+1) + # get removed indices + removed = list(range(*i.indices(mylen))) + removed.reverse() + # fixup indices in token dictionary + for name,occurrences in self.__tokdict.items(): + for j in removed: + for k, (value, position) in enumerate(occurrences): + occurrences[k] = _ParseResultsWithOffset(value, position - (position > j)) + else: + del self.__tokdict[i] + + def __contains__( self, k ): + return k in self.__tokdict + + def __len__( self ): return len( self.__toklist ) + def __bool__(self): return ( not not self.__toklist ) + __nonzero__ = __bool__ + def __iter__( self ): return iter( self.__toklist ) + def __reversed__( self ): return iter( self.__toklist[::-1] ) + def _iterkeys( self ): + if hasattr(self.__tokdict, "iterkeys"): + return self.__tokdict.iterkeys() + else: + return iter(self.__tokdict) + + def _itervalues( self ): + return (self[k] for k in self._iterkeys()) + + def _iteritems( self ): + return ((k, self[k]) for k in self._iterkeys()) + + if PY_3: + keys = _iterkeys + """Returns an iterator of all named result keys (Python 3.x only).""" + + values = _itervalues + """Returns an iterator of all named result values (Python 3.x only).""" + + items = _iteritems + """Returns an iterator of all named result key-value tuples (Python 3.x only).""" + + else: + iterkeys = _iterkeys + """Returns an iterator of all named result keys (Python 2.x only).""" + + itervalues = _itervalues + """Returns an iterator of all named result values (Python 2.x only).""" + + iteritems = _iteritems + """Returns an iterator of all named result key-value tuples (Python 2.x only).""" + + def keys( self ): + """Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x).""" + return list(self.iterkeys()) + + def values( self ): + """Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x).""" + return list(self.itervalues()) + + def items( self ): + """Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x).""" + return list(self.iteritems()) + + def haskeys( self ): + """Since keys() returns an iterator, this method is helpful in bypassing + code that looks for the existence of any defined results names.""" + return bool(self.__tokdict) + + def pop( self, *args, **kwargs): + """ + Removes and returns item at specified index (default=C{last}). + Supports both C{list} and C{dict} semantics for C{pop()}. If passed no + argument or an integer argument, it will use C{list} semantics + and pop tokens from the list of parsed tokens. If passed a + non-integer argument (most likely a string), it will use C{dict} + semantics and pop the corresponding value from any defined + results names. A second default return value argument is + supported, just as in C{dict.pop()}. + + Example:: + def remove_first(tokens): + tokens.pop(0) + print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] + print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321'] + + label = Word(alphas) + patt = label("LABEL") + OneOrMore(Word(nums)) + print(patt.parseString("AAB 123 321").dump()) + + # Use pop() in a parse action to remove named result (note that corresponding value is not + # removed from list form of results) + def remove_LABEL(tokens): + tokens.pop("LABEL") + return tokens + patt.addParseAction(remove_LABEL) + print(patt.parseString("AAB 123 321").dump()) + prints:: + ['AAB', '123', '321'] + - LABEL: AAB + + ['AAB', '123', '321'] + """ + if not args: + args = [-1] + for k,v in kwargs.items(): + if k == 'default': + args = (args[0], v) + else: + raise TypeError("pop() got an unexpected keyword argument '%s'" % k) + if (isinstance(args[0], int) or + len(args) == 1 or + args[0] in self): + index = args[0] + ret = self[index] + del self[index] + return ret + else: + defaultvalue = args[1] + return defaultvalue + + def get(self, key, defaultValue=None): + """ + Returns named result matching the given key, or if there is no + such name, then returns the given C{defaultValue} or C{None} if no + C{defaultValue} is specified. + + Similar to C{dict.get()}. + + Example:: + integer = Word(nums) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + result = date_str.parseString("1999/12/31") + print(result.get("year")) # -> '1999' + print(result.get("hour", "not specified")) # -> 'not specified' + print(result.get("hour")) # -> None + """ + if key in self: + return self[key] + else: + return defaultValue + + def insert( self, index, insStr ): + """ + Inserts new element at location index in the list of parsed tokens. + + Similar to C{list.insert()}. + + Example:: + print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] + + # use a parse action to insert the parse location in the front of the parsed results + def insert_locn(locn, tokens): + tokens.insert(0, locn) + print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321'] + """ + self.__toklist.insert(index, insStr) + # fixup indices in token dictionary + for name,occurrences in self.__tokdict.items(): + for k, (value, position) in enumerate(occurrences): + occurrences[k] = _ParseResultsWithOffset(value, position + (position > index)) + + def append( self, item ): + """ + Add single element to end of ParseResults list of elements. + + Example:: + print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] + + # use a parse action to compute the sum of the parsed integers, and add it to the end + def append_sum(tokens): + tokens.append(sum(map(int, tokens))) + print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444] + """ + self.__toklist.append(item) + + def extend( self, itemseq ): + """ + Add sequence of elements to end of ParseResults list of elements. + + Example:: + patt = OneOrMore(Word(alphas)) + + # use a parse action to append the reverse of the matched strings, to make a palindrome + def make_palindrome(tokens): + tokens.extend(reversed([t[::-1] for t in tokens])) + return ''.join(tokens) + print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl' + """ + if isinstance(itemseq, ParseResults): + self += itemseq + else: + self.__toklist.extend(itemseq) + + def clear( self ): + """ + Clear all elements and results names. + """ + del self.__toklist[:] + self.__tokdict.clear() + + def __getattr__( self, name ): + try: + return self[name] + except KeyError: + return "" + + if name in self.__tokdict: + if name not in self.__accumNames: + return self.__tokdict[name][-1][0] + else: + return ParseResults([ v[0] for v in self.__tokdict[name] ]) + else: + return "" + + def __add__( self, other ): + ret = self.copy() + ret += other + return ret + + def __iadd__( self, other ): + if other.__tokdict: + offset = len(self.__toklist) + addoffset = lambda a: offset if a<0 else a+offset + otheritems = other.__tokdict.items() + otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) ) + for (k,vlist) in otheritems for v in vlist] + for k,v in otherdictitems: + self[k] = v + if isinstance(v[0],ParseResults): + v[0].__parent = wkref(self) + + self.__toklist += other.__toklist + self.__accumNames.update( other.__accumNames ) + return self + + def __radd__(self, other): + if isinstance(other,int) and other == 0: + # useful for merging many ParseResults using sum() builtin + return self.copy() + else: + # this may raise a TypeError - so be it + return other + self + + def __repr__( self ): + return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) ) + + def __str__( self ): + return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']' + + def _asStringList( self, sep='' ): + out = [] + for item in self.__toklist: + if out and sep: + out.append(sep) + if isinstance( item, ParseResults ): + out += item._asStringList() + else: + out.append( _ustr(item) ) + return out + + def asList( self ): + """ + Returns the parse results as a nested list of matching tokens, all converted to strings. + + Example:: + patt = OneOrMore(Word(alphas)) + result = patt.parseString("sldkj lsdkj sldkj") + # even though the result prints in string-like form, it is actually a pyparsing ParseResults + print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj'] + + # Use asList() to create an actual list + result_list = result.asList() + print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj'] + """ + return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist] + + def asDict( self ): + """ + Returns the named parse results as a nested dictionary. + + Example:: + integer = Word(nums) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + result = date_str.parseString('12/31/1999') + print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]}) + + result_dict = result.asDict() + print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'} + + # even though a ParseResults supports dict-like access, sometime you just need to have a dict + import json + print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable + print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"} + """ + if PY_3: + item_fn = self.items + else: + item_fn = self.iteritems + + def toItem(obj): + if isinstance(obj, ParseResults): + if obj.haskeys(): + return obj.asDict() + else: + return [toItem(v) for v in obj] + else: + return obj + + return dict((k,toItem(v)) for k,v in item_fn()) + + def copy( self ): + """ + Returns a new copy of a C{ParseResults} object. + """ + ret = ParseResults( self.__toklist ) + ret.__tokdict = self.__tokdict.copy() + ret.__parent = self.__parent + ret.__accumNames.update( self.__accumNames ) + ret.__name = self.__name + return ret + + def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ): + """ + (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names. + """ + nl = "\n" + out = [] + namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items() + for v in vlist) + nextLevelIndent = indent + " " + + # collapse out indents if formatting is not desired + if not formatted: + indent = "" + nextLevelIndent = "" + nl = "" + + selfTag = None + if doctag is not None: + selfTag = doctag + else: + if self.__name: + selfTag = self.__name + + if not selfTag: + if namedItemsOnly: + return "" + else: + selfTag = "ITEM" + + out += [ nl, indent, "<", selfTag, ">" ] + + for i,res in enumerate(self.__toklist): + if isinstance(res,ParseResults): + if i in namedItems: + out += [ res.asXML(namedItems[i], + namedItemsOnly and doctag is None, + nextLevelIndent, + formatted)] + else: + out += [ res.asXML(None, + namedItemsOnly and doctag is None, + nextLevelIndent, + formatted)] + else: + # individual token, see if there is a name for it + resTag = None + if i in namedItems: + resTag = namedItems[i] + if not resTag: + if namedItemsOnly: + continue + else: + resTag = "ITEM" + xmlBodyText = _xml_escape(_ustr(res)) + out += [ nl, nextLevelIndent, "<", resTag, ">", + xmlBodyText, + "</", resTag, ">" ] + + out += [ nl, indent, "</", selfTag, ">" ] + return "".join(out) + + def __lookup(self,sub): + for k,vlist in self.__tokdict.items(): + for v,loc in vlist: + if sub is v: + return k + return None + + def getName(self): + r""" + Returns the results name for this token expression. Useful when several + different expressions might match at a particular location. + + Example:: + integer = Word(nums) + ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d") + house_number_expr = Suppress('#') + Word(nums, alphanums) + user_data = (Group(house_number_expr)("house_number") + | Group(ssn_expr)("ssn") + | Group(integer)("age")) + user_info = OneOrMore(user_data) + + result = user_info.parseString("22 111-22-3333 #221B") + for item in result: + print(item.getName(), ':', item[0]) + prints:: + age : 22 + ssn : 111-22-3333 + house_number : 221B + """ + if self.__name: + return self.__name + elif self.__parent: + par = self.__parent() + if par: + return par.__lookup(self) + else: + return None + elif (len(self) == 1 and + len(self.__tokdict) == 1 and + next(iter(self.__tokdict.values()))[0][1] in (0,-1)): + return next(iter(self.__tokdict.keys())) + else: + return None + + def dump(self, indent='', depth=0, full=True): + """ + Diagnostic method for listing out the contents of a C{ParseResults}. + Accepts an optional C{indent} argument so that this string can be embedded + in a nested display of other data. + + Example:: + integer = Word(nums) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + result = date_str.parseString('12/31/1999') + print(result.dump()) + prints:: + ['12', '/', '31', '/', '1999'] + - day: 1999 + - month: 31 + - year: 12 + """ + out = [] + NL = '\n' + out.append( indent+_ustr(self.asList()) ) + if full: + if self.haskeys(): + items = sorted((str(k), v) for k,v in self.items()) + for k,v in items: + if out: + out.append(NL) + out.append( "%s%s- %s: " % (indent,(' '*depth), k) ) + if isinstance(v,ParseResults): + if v: + out.append( v.dump(indent,depth+1) ) + else: + out.append(_ustr(v)) + else: + out.append(repr(v)) + elif any(isinstance(vv,ParseResults) for vv in self): + v = self + for i,vv in enumerate(v): + if isinstance(vv,ParseResults): + out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),vv.dump(indent,depth+1) )) + else: + out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),_ustr(vv))) + + return "".join(out) + + def pprint(self, *args, **kwargs): + """ + Pretty-printer for parsed results as a list, using the C{pprint} module. + Accepts additional positional or keyword args as defined for the + C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint}) + + Example:: + ident = Word(alphas, alphanums) + num = Word(nums) + func = Forward() + term = ident | num | Group('(' + func + ')') + func <<= ident + Group(Optional(delimitedList(term))) + result = func.parseString("fna a,b,(fnb c,d,200),100") + result.pprint(width=40) + prints:: + ['fna', + ['a', + 'b', + ['(', 'fnb', ['c', 'd', '200'], ')'], + '100']] + """ + pprint.pprint(self.asList(), *args, **kwargs) + + # add support for pickle protocol + def __getstate__(self): + return ( self.__toklist, + ( self.__tokdict.copy(), + self.__parent is not None and self.__parent() or None, + self.__accumNames, + self.__name ) ) + + def __setstate__(self,state): + self.__toklist = state[0] + (self.__tokdict, + par, + inAccumNames, + self.__name) = state[1] + self.__accumNames = {} + self.__accumNames.update(inAccumNames) + if par is not None: + self.__parent = wkref(par) + else: + self.__parent = None + + def __getnewargs__(self): + return self.__toklist, self.__name, self.__asList, self.__modal + + def __dir__(self): + return (dir(type(self)) + list(self.keys())) + +MutableMapping.register(ParseResults) + +def col (loc,strg): + """Returns current column within a string, counting newlines as line separators. + The first column is number 1. + + Note: the default parsing behavior is to expand tabs in the input string + before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information + on parsing strings containing C{<TAB>}s, and suggested methods to maintain a + consistent view of the parsed string, the parse location, and line and column + positions within the parsed string. + """ + s = strg + return 1 if 0<loc<len(s) and s[loc-1] == '\n' else loc - s.rfind("\n", 0, loc) + +def lineno(loc,strg): + """Returns current line number within a string, counting newlines as line separators. + The first line is number 1. + + Note: the default parsing behavior is to expand tabs in the input string + before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information + on parsing strings containing C{<TAB>}s, and suggested methods to maintain a + consistent view of the parsed string, the parse location, and line and column + positions within the parsed string. + """ + return strg.count("\n",0,loc) + 1 + +def line( loc, strg ): + """Returns the line of text containing loc within a string, counting newlines as line separators. + """ + lastCR = strg.rfind("\n", 0, loc) + nextCR = strg.find("\n", loc) + if nextCR >= 0: + return strg[lastCR+1:nextCR] + else: + return strg[lastCR+1:] + +def _defaultStartDebugAction( instring, loc, expr ): + print (("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))) + +def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ): + print ("Matched " + _ustr(expr) + " -> " + str(toks.asList())) + +def _defaultExceptionDebugAction( instring, loc, expr, exc ): + print ("Exception raised:" + _ustr(exc)) + +def nullDebugAction(*args): + """'Do-nothing' debug action, to suppress debugging output during parsing.""" + pass + +# Only works on Python 3.x - nonlocal is toxic to Python 2 installs +#~ 'decorator to trim function calls to match the arity of the target' +#~ def _trim_arity(func, maxargs=3): + #~ if func in singleArgBuiltins: + #~ return lambda s,l,t: func(t) + #~ limit = 0 + #~ foundArity = False + #~ def wrapper(*args): + #~ nonlocal limit,foundArity + #~ while 1: + #~ try: + #~ ret = func(*args[limit:]) + #~ foundArity = True + #~ return ret + #~ except TypeError: + #~ if limit == maxargs or foundArity: + #~ raise + #~ limit += 1 + #~ continue + #~ return wrapper + +# this version is Python 2.x-3.x cross-compatible +'decorator to trim function calls to match the arity of the target' +def _trim_arity(func, maxargs=2): + if func in singleArgBuiltins: + return lambda s,l,t: func(t) + limit = [0] + foundArity = [False] + + # traceback return data structure changed in Py3.5 - normalize back to plain tuples + if system_version[:2] >= (3,5): + def extract_stack(limit=0): + # special handling for Python 3.5.0 - extra deep call stack by 1 + offset = -3 if system_version == (3,5,0) else -2 + frame_summary = traceback.extract_stack(limit=-offset+limit-1)[offset] + return [frame_summary[:2]] + def extract_tb(tb, limit=0): + frames = traceback.extract_tb(tb, limit=limit) + frame_summary = frames[-1] + return [frame_summary[:2]] + else: + extract_stack = traceback.extract_stack + extract_tb = traceback.extract_tb + + # synthesize what would be returned by traceback.extract_stack at the call to + # user's parse action 'func', so that we don't incur call penalty at parse time + + LINE_DIFF = 6 + # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND + # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!! + this_line = extract_stack(limit=2)[-1] + pa_call_line_synth = (this_line[0], this_line[1]+LINE_DIFF) + + def wrapper(*args): + while 1: + try: + ret = func(*args[limit[0]:]) + foundArity[0] = True + return ret + except TypeError: + # re-raise TypeErrors if they did not come from our arity testing + if foundArity[0]: + raise + else: + try: + tb = sys.exc_info()[-1] + if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth: + raise + finally: + del tb + + if limit[0] <= maxargs: + limit[0] += 1 + continue + raise + + # copy func name to wrapper for sensible debug output + func_name = "<parse action>" + try: + func_name = getattr(func, '__name__', + getattr(func, '__class__').__name__) + except Exception: + func_name = str(func) + wrapper.__name__ = func_name + + return wrapper + +class ParserElement(object): + """Abstract base level parser element class.""" + DEFAULT_WHITE_CHARS = " \n\t\r" + verbose_stacktrace = False + + @staticmethod + def setDefaultWhitespaceChars( chars ): + r""" + Overrides the default whitespace chars + + Example:: + # default whitespace chars are space, <TAB> and newline + OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl'] + + # change to just treat newline as significant + ParserElement.setDefaultWhitespaceChars(" \t") + OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def'] + """ + ParserElement.DEFAULT_WHITE_CHARS = chars + + @staticmethod + def inlineLiteralsUsing(cls): + """ + Set class to be used for inclusion of string literals into a parser. + + Example:: + # default literal class used is Literal + integer = Word(nums) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31'] + + + # change to Suppress + ParserElement.inlineLiteralsUsing(Suppress) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + date_str.parseString("1999/12/31") # -> ['1999', '12', '31'] + """ + ParserElement._literalStringClass = cls + + def __init__( self, savelist=False ): + self.parseAction = list() + self.failAction = None + #~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall + self.strRepr = None + self.resultsName = None + self.saveAsList = savelist + self.skipWhitespace = True + self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS + self.copyDefaultWhiteChars = True + self.mayReturnEmpty = False # used when checking for left-recursion + self.keepTabs = False + self.ignoreExprs = list() + self.debug = False + self.streamlined = False + self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index + self.errmsg = "" + self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all) + self.debugActions = ( None, None, None ) #custom debug actions + self.re = None + self.callPreparse = True # used to avoid redundant calls to preParse + self.callDuringTry = False + + def copy( self ): + """ + Make a copy of this C{ParserElement}. Useful for defining different parse actions + for the same parsing pattern, using copies of the original parse element. + + Example:: + integer = Word(nums).setParseAction(lambda toks: int(toks[0])) + integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K") + integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M") + + print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M")) + prints:: + [5120, 100, 655360, 268435456] + Equivalent form of C{expr.copy()} is just C{expr()}:: + integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M") + """ + cpy = copy.copy( self ) + cpy.parseAction = self.parseAction[:] + cpy.ignoreExprs = self.ignoreExprs[:] + if self.copyDefaultWhiteChars: + cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS + return cpy + + def setName( self, name ): + """ + Define name for this expression, makes debugging and exception messages clearer. + + Example:: + Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1) + Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1) + """ + self.name = name + self.errmsg = "Expected " + self.name + if hasattr(self,"exception"): + self.exception.msg = self.errmsg + return self + + def setResultsName( self, name, listAllMatches=False ): + """ + Define name for referencing matching tokens as a nested attribute + of the returned parse results. + NOTE: this returns a *copy* of the original C{ParserElement} object; + this is so that the client can define a basic element, such as an + integer, and reference it in multiple places with different names. + + You can also set results names using the abbreviated syntax, + C{expr("name")} in place of C{expr.setResultsName("name")} - + see L{I{__call__}<__call__>}. + + Example:: + date_str = (integer.setResultsName("year") + '/' + + integer.setResultsName("month") + '/' + + integer.setResultsName("day")) + + # equivalent form: + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + """ + newself = self.copy() + if name.endswith("*"): + name = name[:-1] + listAllMatches=True + newself.resultsName = name + newself.modalResults = not listAllMatches + return newself + + def setBreak(self,breakFlag = True): + """Method to invoke the Python pdb debugger when this element is + about to be parsed. Set C{breakFlag} to True to enable, False to + disable. + """ + if breakFlag: + _parseMethod = self._parse + def breaker(instring, loc, doActions=True, callPreParse=True): + import pdb + pdb.set_trace() + return _parseMethod( instring, loc, doActions, callPreParse ) + breaker._originalParseMethod = _parseMethod + self._parse = breaker + else: + if hasattr(self._parse,"_originalParseMethod"): + self._parse = self._parse._originalParseMethod + return self + + def setParseAction( self, *fns, **kwargs ): + """ + Define one or more actions to perform when successfully matching parse element definition. + Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)}, + C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where: + - s = the original string being parsed (see note below) + - loc = the location of the matching substring + - toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object + If the functions in fns modify the tokens, they can return them as the return + value from fn, and the modified list of tokens will replace the original. + Otherwise, fn does not need to return any value. + + Optional keyword arguments: + - callDuringTry = (default=C{False}) indicate if parse action should be run during lookaheads and alternate testing + + Note: the default parsing behavior is to expand tabs in the input string + before starting the parsing process. See L{I{parseString}<parseString>} for more information + on parsing strings containing C{<TAB>}s, and suggested methods to maintain a + consistent view of the parsed string, the parse location, and line and column + positions within the parsed string. + + Example:: + integer = Word(nums) + date_str = integer + '/' + integer + '/' + integer + + date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31'] + + # use parse action to convert to ints at parse time + integer = Word(nums).setParseAction(lambda toks: int(toks[0])) + date_str = integer + '/' + integer + '/' + integer + + # note that integer fields are now ints, not strings + date_str.parseString("1999/12/31") # -> [1999, '/', 12, '/', 31] + """ + self.parseAction = list(map(_trim_arity, list(fns))) + self.callDuringTry = kwargs.get("callDuringTry", False) + return self + + def addParseAction( self, *fns, **kwargs ): + """ + Add one or more parse actions to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}. + + See examples in L{I{copy}<copy>}. + """ + self.parseAction += list(map(_trim_arity, list(fns))) + self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False) + return self + + def addCondition(self, *fns, **kwargs): + """Add a boolean predicate function to expression's list of parse actions. See + L{I{setParseAction}<setParseAction>} for function call signatures. Unlike C{setParseAction}, + functions passed to C{addCondition} need to return boolean success/fail of the condition. + + Optional keyword arguments: + - message = define a custom message to be used in the raised exception + - fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException + + Example:: + integer = Word(nums).setParseAction(lambda toks: int(toks[0])) + year_int = integer.copy() + year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later") + date_str = year_int + '/' + integer + '/' + integer + + result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1) + """ + msg = kwargs.get("message", "failed user-defined condition") + exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException + for fn in fns: + def pa(s,l,t): + if not bool(_trim_arity(fn)(s,l,t)): + raise exc_type(s,l,msg) + self.parseAction.append(pa) + self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False) + return self + + def setFailAction( self, fn ): + """Define action to perform if parsing fails at this expression. + Fail acton fn is a callable function that takes the arguments + C{fn(s,loc,expr,err)} where: + - s = string being parsed + - loc = location where expression match was attempted and failed + - expr = the parse expression that failed + - err = the exception thrown + The function returns no value. It may throw C{L{ParseFatalException}} + if it is desired to stop parsing immediately.""" + self.failAction = fn + return self + + def _skipIgnorables( self, instring, loc ): + exprsFound = True + while exprsFound: + exprsFound = False + for e in self.ignoreExprs: + try: + while 1: + loc,dummy = e._parse( instring, loc ) + exprsFound = True + except ParseException: + pass + return loc + + def preParse( self, instring, loc ): + if self.ignoreExprs: + loc = self._skipIgnorables( instring, loc ) + + if self.skipWhitespace: + wt = self.whiteChars + instrlen = len(instring) + while loc < instrlen and instring[loc] in wt: + loc += 1 + + return loc + + def parseImpl( self, instring, loc, doActions=True ): + return loc, [] + + def postParse( self, instring, loc, tokenlist ): + return tokenlist + + #~ @profile + def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ): + debugging = ( self.debug ) #and doActions ) + + if debugging or self.failAction: + #~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )) + if (self.debugActions[0] ): + self.debugActions[0]( instring, loc, self ) + if callPreParse and self.callPreparse: + preloc = self.preParse( instring, loc ) + else: + preloc = loc + tokensStart = preloc + try: + try: + loc,tokens = self.parseImpl( instring, preloc, doActions ) + except IndexError: + raise ParseException( instring, len(instring), self.errmsg, self ) + except ParseBaseException as err: + #~ print ("Exception raised:", err) + if self.debugActions[2]: + self.debugActions[2]( instring, tokensStart, self, err ) + if self.failAction: + self.failAction( instring, tokensStart, self, err ) + raise + else: + if callPreParse and self.callPreparse: + preloc = self.preParse( instring, loc ) + else: + preloc = loc + tokensStart = preloc + if self.mayIndexError or preloc >= len(instring): + try: + loc,tokens = self.parseImpl( instring, preloc, doActions ) + except IndexError: + raise ParseException( instring, len(instring), self.errmsg, self ) + else: + loc,tokens = self.parseImpl( instring, preloc, doActions ) + + tokens = self.postParse( instring, loc, tokens ) + + retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults ) + if self.parseAction and (doActions or self.callDuringTry): + if debugging: + try: + for fn in self.parseAction: + tokens = fn( instring, tokensStart, retTokens ) + if tokens is not None: + retTokens = ParseResults( tokens, + self.resultsName, + asList=self.saveAsList and isinstance(tokens,(ParseResults,list)), + modal=self.modalResults ) + except ParseBaseException as err: + #~ print "Exception raised in user parse action:", err + if (self.debugActions[2] ): + self.debugActions[2]( instring, tokensStart, self, err ) + raise + else: + for fn in self.parseAction: + tokens = fn( instring, tokensStart, retTokens ) + if tokens is not None: + retTokens = ParseResults( tokens, + self.resultsName, + asList=self.saveAsList and isinstance(tokens,(ParseResults,list)), + modal=self.modalResults ) + if debugging: + #~ print ("Matched",self,"->",retTokens.asList()) + if (self.debugActions[1] ): + self.debugActions[1]( instring, tokensStart, loc, self, retTokens ) + + return loc, retTokens + + def tryParse( self, instring, loc ): + try: + return self._parse( instring, loc, doActions=False )[0] + except ParseFatalException: + raise ParseException( instring, loc, self.errmsg, self) + + def canParseNext(self, instring, loc): + try: + self.tryParse(instring, loc) + except (ParseException, IndexError): + return False + else: + return True + + class _UnboundedCache(object): + def __init__(self): + cache = {} + self.not_in_cache = not_in_cache = object() + + def get(self, key): + return cache.get(key, not_in_cache) + + def set(self, key, value): + cache[key] = value + + def clear(self): + cache.clear() + + def cache_len(self): + return len(cache) + + self.get = types.MethodType(get, self) + self.set = types.MethodType(set, self) + self.clear = types.MethodType(clear, self) + self.__len__ = types.MethodType(cache_len, self) + + if _OrderedDict is not None: + class _FifoCache(object): + def __init__(self, size): + self.not_in_cache = not_in_cache = object() + + cache = _OrderedDict() + + def get(self, key): + return cache.get(key, not_in_cache) + + def set(self, key, value): + cache[key] = value + while len(cache) > size: + try: + cache.popitem(False) + except KeyError: + pass + + def clear(self): + cache.clear() + + def cache_len(self): + return len(cache) + + self.get = types.MethodType(get, self) + self.set = types.MethodType(set, self) + self.clear = types.MethodType(clear, self) + self.__len__ = types.MethodType(cache_len, self) + + else: + class _FifoCache(object): + def __init__(self, size): + self.not_in_cache = not_in_cache = object() + + cache = {} + key_fifo = collections.deque([], size) + + def get(self, key): + return cache.get(key, not_in_cache) + + def set(self, key, value): + cache[key] = value + while len(key_fifo) > size: + cache.pop(key_fifo.popleft(), None) + key_fifo.append(key) + + def clear(self): + cache.clear() + key_fifo.clear() + + def cache_len(self): + return len(cache) + + self.get = types.MethodType(get, self) + self.set = types.MethodType(set, self) + self.clear = types.MethodType(clear, self) + self.__len__ = types.MethodType(cache_len, self) + + # argument cache for optimizing repeated calls when backtracking through recursive expressions + packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail + packrat_cache_lock = RLock() + packrat_cache_stats = [0, 0] + + # this method gets repeatedly called during backtracking with the same arguments - + # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression + def _parseCache( self, instring, loc, doActions=True, callPreParse=True ): + HIT, MISS = 0, 1 + lookup = (self, instring, loc, callPreParse, doActions) + with ParserElement.packrat_cache_lock: + cache = ParserElement.packrat_cache + value = cache.get(lookup) + if value is cache.not_in_cache: + ParserElement.packrat_cache_stats[MISS] += 1 + try: + value = self._parseNoCache(instring, loc, doActions, callPreParse) + except ParseBaseException as pe: + # cache a copy of the exception, without the traceback + cache.set(lookup, pe.__class__(*pe.args)) + raise + else: + cache.set(lookup, (value[0], value[1].copy())) + return value + else: + ParserElement.packrat_cache_stats[HIT] += 1 + if isinstance(value, Exception): + raise value + return (value[0], value[1].copy()) + + _parse = _parseNoCache + + @staticmethod + def resetCache(): + ParserElement.packrat_cache.clear() + ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats) + + _packratEnabled = False + @staticmethod + def enablePackrat(cache_size_limit=128): + """Enables "packrat" parsing, which adds memoizing to the parsing logic. + Repeated parse attempts at the same string location (which happens + often in many complex grammars) can immediately return a cached value, + instead of re-executing parsing/validating code. Memoizing is done of + both valid results and parsing exceptions. + + Parameters: + - cache_size_limit - (default=C{128}) - if an integer value is provided + will limit the size of the packrat cache; if None is passed, then + the cache size will be unbounded; if 0 is passed, the cache will + be effectively disabled. + + This speedup may break existing programs that use parse actions that + have side-effects. For this reason, packrat parsing is disabled when + you first import pyparsing. To activate the packrat feature, your + program must call the class method C{ParserElement.enablePackrat()}. If + your program uses C{psyco} to "compile as you go", you must call + C{enablePackrat} before calling C{psyco.full()}. If you do not do this, + Python will crash. For best results, call C{enablePackrat()} immediately + after importing pyparsing. + + Example:: + import pyparsing + pyparsing.ParserElement.enablePackrat() + """ + if not ParserElement._packratEnabled: + ParserElement._packratEnabled = True + if cache_size_limit is None: + ParserElement.packrat_cache = ParserElement._UnboundedCache() + else: + ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit) + ParserElement._parse = ParserElement._parseCache + + def parseString( self, instring, parseAll=False ): + """ + Execute the parse expression with the given string. + This is the main interface to the client code, once the complete + expression has been built. + + If you want the grammar to require that the entire input string be + successfully parsed, then set C{parseAll} to True (equivalent to ending + the grammar with C{L{StringEnd()}}). + + Note: C{parseString} implicitly calls C{expandtabs()} on the input string, + in order to report proper column numbers in parse actions. + If the input string contains tabs and + the grammar uses parse actions that use the C{loc} argument to index into the + string being parsed, you can ensure you have a consistent view of the input + string by: + - calling C{parseWithTabs} on your grammar before calling C{parseString} + (see L{I{parseWithTabs}<parseWithTabs>}) + - define your parse action using the full C{(s,loc,toks)} signature, and + reference the input string using the parse action's C{s} argument + - explictly expand the tabs in your input string before calling + C{parseString} + + Example:: + Word('a').parseString('aaaaabaaa') # -> ['aaaaa'] + Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text + """ + ParserElement.resetCache() + if not self.streamlined: + self.streamline() + #~ self.saveAsList = True + for e in self.ignoreExprs: + e.streamline() + if not self.keepTabs: + instring = instring.expandtabs() + try: + loc, tokens = self._parse( instring, 0 ) + if parseAll: + loc = self.preParse( instring, loc ) + se = Empty() + StringEnd() + se._parse( instring, loc ) + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + else: + # catch and re-raise exception from here, clears out pyparsing internal stack trace + raise exc + else: + return tokens + + def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ): + """ + Scan the input string for expression matches. Each match will return the + matching tokens, start location, and end location. May be called with optional + C{maxMatches} argument, to clip scanning after 'n' matches are found. If + C{overlap} is specified, then overlapping matches will be reported. + + Note that the start and end locations are reported relative to the string + being parsed. See L{I{parseString}<parseString>} for more information on parsing + strings with embedded tabs. + + Example:: + source = "sldjf123lsdjjkf345sldkjf879lkjsfd987" + print(source) + for tokens,start,end in Word(alphas).scanString(source): + print(' '*start + '^'*(end-start)) + print(' '*start + tokens[0]) + + prints:: + + sldjf123lsdjjkf345sldkjf879lkjsfd987 + ^^^^^ + sldjf + ^^^^^^^ + lsdjjkf + ^^^^^^ + sldkjf + ^^^^^^ + lkjsfd + """ + if not self.streamlined: + self.streamline() + for e in self.ignoreExprs: + e.streamline() + + if not self.keepTabs: + instring = _ustr(instring).expandtabs() + instrlen = len(instring) + loc = 0 + preparseFn = self.preParse + parseFn = self._parse + ParserElement.resetCache() + matches = 0 + try: + while loc <= instrlen and matches < maxMatches: + try: + preloc = preparseFn( instring, loc ) + nextLoc,tokens = parseFn( instring, preloc, callPreParse=False ) + except ParseException: + loc = preloc+1 + else: + if nextLoc > loc: + matches += 1 + yield tokens, preloc, nextLoc + if overlap: + nextloc = preparseFn( instring, loc ) + if nextloc > loc: + loc = nextLoc + else: + loc += 1 + else: + loc = nextLoc + else: + loc = preloc+1 + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + else: + # catch and re-raise exception from here, clears out pyparsing internal stack trace + raise exc + + def transformString( self, instring ): + """ + Extension to C{L{scanString}}, to modify matching text with modified tokens that may + be returned from a parse action. To use C{transformString}, define a grammar and + attach a parse action to it that modifies the returned token list. + Invoking C{transformString()} on a target string will then scan for matches, + and replace the matched text patterns according to the logic in the parse + action. C{transformString()} returns the resulting transformed string. + + Example:: + wd = Word(alphas) + wd.setParseAction(lambda toks: toks[0].title()) + + print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york.")) + Prints:: + Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York. + """ + out = [] + lastE = 0 + # force preservation of <TAB>s, to minimize unwanted transformation of string, and to + # keep string locs straight between transformString and scanString + self.keepTabs = True + try: + for t,s,e in self.scanString( instring ): + out.append( instring[lastE:s] ) + if t: + if isinstance(t,ParseResults): + out += t.asList() + elif isinstance(t,list): + out += t + else: + out.append(t) + lastE = e + out.append(instring[lastE:]) + out = [o for o in out if o] + return "".join(map(_ustr,_flatten(out))) + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + else: + # catch and re-raise exception from here, clears out pyparsing internal stack trace + raise exc + + def searchString( self, instring, maxMatches=_MAX_INT ): + """ + Another extension to C{L{scanString}}, simplifying the access to the tokens found + to match the given parse expression. May be called with optional + C{maxMatches} argument, to clip searching after 'n' matches are found. + + Example:: + # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters + cap_word = Word(alphas.upper(), alphas.lower()) + + print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")) + + # the sum() builtin can be used to merge results into a single ParseResults object + print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))) + prints:: + [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']] + ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity'] + """ + try: + return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ]) + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + else: + # catch and re-raise exception from here, clears out pyparsing internal stack trace + raise exc + + def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False): + """ + Generator method to split a string using the given expression as a separator. + May be called with optional C{maxsplit} argument, to limit the number of splits; + and the optional C{includeSeparators} argument (default=C{False}), if the separating + matching text should be included in the split results. + + Example:: + punc = oneOf(list(".,;:/-!?")) + print(list(punc.split("This, this?, this sentence, is badly punctuated!"))) + prints:: + ['This', ' this', '', ' this sentence', ' is badly punctuated', ''] + """ + splits = 0 + last = 0 + for t,s,e in self.scanString(instring, maxMatches=maxsplit): + yield instring[last:s] + if includeSeparators: + yield t[0] + last = e + yield instring[last:] + + def __add__(self, other ): + """ + Implementation of + operator - returns C{L{And}}. Adding strings to a ParserElement + converts them to L{Literal}s by default. + + Example:: + greet = Word(alphas) + "," + Word(alphas) + "!" + hello = "Hello, World!" + print (hello, "->", greet.parseString(hello)) + Prints:: + Hello, World! -> ['Hello', ',', 'World', '!'] + """ + if isinstance( other, basestring ): + other = ParserElement._literalStringClass( other ) + if not isinstance( other, ParserElement ): + warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return None + return And( [ self, other ] ) + + def __radd__(self, other ): + """ + Implementation of + operator when left operand is not a C{L{ParserElement}} + """ + if isinstance( other, basestring ): + other = ParserElement._literalStringClass( other ) + if not isinstance( other, ParserElement ): + warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return None + return other + self + + def __sub__(self, other): + """ + Implementation of - operator, returns C{L{And}} with error stop + """ + if isinstance( other, basestring ): + other = ParserElement._literalStringClass( other ) + if not isinstance( other, ParserElement ): + warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return None + return self + And._ErrorStop() + other + + def __rsub__(self, other ): + """ + Implementation of - operator when left operand is not a C{L{ParserElement}} + """ + if isinstance( other, basestring ): + other = ParserElement._literalStringClass( other ) + if not isinstance( other, ParserElement ): + warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return None + return other - self + + def __mul__(self,other): + """ + Implementation of * operator, allows use of C{expr * 3} in place of + C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer + tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples + may also include C{None} as in: + - C{expr*(n,None)} or C{expr*(n,)} is equivalent + to C{expr*n + L{ZeroOrMore}(expr)} + (read as "at least n instances of C{expr}") + - C{expr*(None,n)} is equivalent to C{expr*(0,n)} + (read as "0 to n instances of C{expr}") + - C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)} + - C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)} + + Note that C{expr*(None,n)} does not raise an exception if + more than n exprs exist in the input stream; that is, + C{expr*(None,n)} does not enforce a maximum number of expr + occurrences. If this behavior is desired, then write + C{expr*(None,n) + ~expr} + """ + if isinstance(other,int): + minElements, optElements = other,0 + elif isinstance(other,tuple): + other = (other + (None, None))[:2] + if other[0] is None: + other = (0, other[1]) + if isinstance(other[0],int) and other[1] is None: + if other[0] == 0: + return ZeroOrMore(self) + if other[0] == 1: + return OneOrMore(self) + else: + return self*other[0] + ZeroOrMore(self) + elif isinstance(other[0],int) and isinstance(other[1],int): + minElements, optElements = other + optElements -= minElements + else: + raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1])) + else: + raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other)) + + if minElements < 0: + raise ValueError("cannot multiply ParserElement by negative value") + if optElements < 0: + raise ValueError("second tuple value must be greater or equal to first tuple value") + if minElements == optElements == 0: + raise ValueError("cannot multiply ParserElement by 0 or (0,0)") + + if (optElements): + def makeOptionalList(n): + if n>1: + return Optional(self + makeOptionalList(n-1)) + else: + return Optional(self) + if minElements: + if minElements == 1: + ret = self + makeOptionalList(optElements) + else: + ret = And([self]*minElements) + makeOptionalList(optElements) + else: + ret = makeOptionalList(optElements) + else: + if minElements == 1: + ret = self + else: + ret = And([self]*minElements) + return ret + + def __rmul__(self, other): + return self.__mul__(other) + + def __or__(self, other ): + """ + Implementation of | operator - returns C{L{MatchFirst}} + """ + if isinstance( other, basestring ): + other = ParserElement._literalStringClass( other ) + if not isinstance( other, ParserElement ): + warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return None + return MatchFirst( [ self, other ] ) + + def __ror__(self, other ): + """ + Implementation of | operator when left operand is not a C{L{ParserElement}} + """ + if isinstance( other, basestring ): + other = ParserElement._literalStringClass( other ) + if not isinstance( other, ParserElement ): + warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return None + return other | self + + def __xor__(self, other ): + """ + Implementation of ^ operator - returns C{L{Or}} + """ + if isinstance( other, basestring ): + other = ParserElement._literalStringClass( other ) + if not isinstance( other, ParserElement ): + warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return None + return Or( [ self, other ] ) + + def __rxor__(self, other ): + """ + Implementation of ^ operator when left operand is not a C{L{ParserElement}} + """ + if isinstance( other, basestring ): + other = ParserElement._literalStringClass( other ) + if not isinstance( other, ParserElement ): + warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return None + return other ^ self + + def __and__(self, other ): + """ + Implementation of & operator - returns C{L{Each}} + """ + if isinstance( other, basestring ): + other = ParserElement._literalStringClass( other ) + if not isinstance( other, ParserElement ): + warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return None + return Each( [ self, other ] ) + + def __rand__(self, other ): + """ + Implementation of & operator when left operand is not a C{L{ParserElement}} + """ + if isinstance( other, basestring ): + other = ParserElement._literalStringClass( other ) + if not isinstance( other, ParserElement ): + warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return None + return other & self + + def __invert__( self ): + """ + Implementation of ~ operator - returns C{L{NotAny}} + """ + return NotAny( self ) + + def __call__(self, name=None): + """ + Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}. + + If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be + passed as C{True}. + + If C{name} is omitted, same as calling C{L{copy}}. + + Example:: + # these are equivalent + userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno") + userdata = Word(alphas)("name") + Word(nums+"-")("socsecno") + """ + if name is not None: + return self.setResultsName(name) + else: + return self.copy() + + def suppress( self ): + """ + Suppresses the output of this C{ParserElement}; useful to keep punctuation from + cluttering up returned output. + """ + return Suppress( self ) + + def leaveWhitespace( self ): + """ + Disables the skipping of whitespace before matching the characters in the + C{ParserElement}'s defined pattern. This is normally only used internally by + the pyparsing module, but may be needed in some whitespace-sensitive grammars. + """ + self.skipWhitespace = False + return self + + def setWhitespaceChars( self, chars ): + """ + Overrides the default whitespace chars + """ + self.skipWhitespace = True + self.whiteChars = chars + self.copyDefaultWhiteChars = False + return self + + def parseWithTabs( self ): + """ + Overrides default behavior to expand C{<TAB>}s to spaces before parsing the input string. + Must be called before C{parseString} when the input grammar contains elements that + match C{<TAB>} characters. + """ + self.keepTabs = True + return self + + def ignore( self, other ): + """ + Define expression to be ignored (e.g., comments) while doing pattern + matching; may be called repeatedly, to define multiple comment or other + ignorable patterns. + + Example:: + patt = OneOrMore(Word(alphas)) + patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj'] + + patt.ignore(cStyleComment) + patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd'] + """ + if isinstance(other, basestring): + other = Suppress(other) + + if isinstance( other, Suppress ): + if other not in self.ignoreExprs: + self.ignoreExprs.append(other) + else: + self.ignoreExprs.append( Suppress( other.copy() ) ) + return self + + def setDebugActions( self, startAction, successAction, exceptionAction ): + """ + Enable display of debugging messages while doing pattern matching. + """ + self.debugActions = (startAction or _defaultStartDebugAction, + successAction or _defaultSuccessDebugAction, + exceptionAction or _defaultExceptionDebugAction) + self.debug = True + return self + + def setDebug( self, flag=True ): + """ + Enable display of debugging messages while doing pattern matching. + Set C{flag} to True to enable, False to disable. + + Example:: + wd = Word(alphas).setName("alphaword") + integer = Word(nums).setName("numword") + term = wd | integer + + # turn on debugging for wd + wd.setDebug() + + OneOrMore(term).parseString("abc 123 xyz 890") + + prints:: + Match alphaword at loc 0(1,1) + Matched alphaword -> ['abc'] + Match alphaword at loc 3(1,4) + Exception raised:Expected alphaword (at char 4), (line:1, col:5) + Match alphaword at loc 7(1,8) + Matched alphaword -> ['xyz'] + Match alphaword at loc 11(1,12) + Exception raised:Expected alphaword (at char 12), (line:1, col:13) + Match alphaword at loc 15(1,16) + Exception raised:Expected alphaword (at char 15), (line:1, col:16) + + The output shown is that produced by the default debug actions - custom debug actions can be + specified using L{setDebugActions}. Prior to attempting + to match the C{wd} expression, the debugging message C{"Match <exprname> at loc <n>(<line>,<col>)"} + is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"} + message is shown. Also note the use of L{setName} to assign a human-readable name to the expression, + which makes debugging and exception messages easier to understand - for instance, the default + name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}. + """ + if flag: + self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction ) + else: + self.debug = False + return self + + def __str__( self ): + return self.name + + def __repr__( self ): + return _ustr(self) + + def streamline( self ): + self.streamlined = True + self.strRepr = None + return self + + def checkRecursion( self, parseElementList ): + pass + + def validate( self, validateTrace=[] ): + """ + Check defined expressions for valid structure, check for infinite recursive definitions. + """ + self.checkRecursion( [] ) + + def parseFile( self, file_or_filename, parseAll=False ): + """ + Execute the parse expression on the given file or filename. + If a filename is specified (instead of a file object), + the entire file is opened, read, and closed before parsing. + """ + try: + file_contents = file_or_filename.read() + except AttributeError: + with open(file_or_filename, "r") as f: + file_contents = f.read() + try: + return self.parseString(file_contents, parseAll) + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + else: + # catch and re-raise exception from here, clears out pyparsing internal stack trace + raise exc + + def __eq__(self,other): + if isinstance(other, ParserElement): + return self is other or vars(self) == vars(other) + elif isinstance(other, basestring): + return self.matches(other) + else: + return super(ParserElement,self)==other + + def __ne__(self,other): + return not (self == other) + + def __hash__(self): + return hash(id(self)) + + def __req__(self,other): + return self == other + + def __rne__(self,other): + return not (self == other) + + def matches(self, testString, parseAll=True): + """ + Method for quick testing of a parser against a test string. Good for simple + inline microtests of sub expressions while building up larger parser. + + Parameters: + - testString - to test against this expression for a match + - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests + + Example:: + expr = Word(nums) + assert expr.matches("100") + """ + try: + self.parseString(_ustr(testString), parseAll=parseAll) + return True + except ParseBaseException: + return False + + def runTests(self, tests, parseAll=True, comment='#', fullDump=True, printResults=True, failureTests=False): + """ + Execute the parse expression on a series of test strings, showing each + test, the parsed results or where the parse failed. Quick and easy way to + run a parse expression against a list of sample strings. + + Parameters: + - tests - a list of separate test strings, or a multiline string of test strings + - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests + - comment - (default=C{'#'}) - expression for indicating embedded comments in the test + string; pass None to disable comment filtering + - fullDump - (default=C{True}) - dump results as list followed by results names in nested outline; + if False, only dump nested list + - printResults - (default=C{True}) prints test output to stdout + - failureTests - (default=C{False}) indicates if these tests are expected to fail parsing + + Returns: a (success, results) tuple, where success indicates that all tests succeeded + (or failed if C{failureTests} is True), and the results contain a list of lines of each + test's output + + Example:: + number_expr = pyparsing_common.number.copy() + + result = number_expr.runTests(''' + # unsigned integer + 100 + # negative integer + -100 + # float with scientific notation + 6.02e23 + # integer with scientific notation + 1e-12 + ''') + print("Success" if result[0] else "Failed!") + + result = number_expr.runTests(''' + # stray character + 100Z + # missing leading digit before '.' + -.100 + # too many '.' + 3.14.159 + ''', failureTests=True) + print("Success" if result[0] else "Failed!") + prints:: + # unsigned integer + 100 + [100] + + # negative integer + -100 + [-100] + + # float with scientific notation + 6.02e23 + [6.02e+23] + + # integer with scientific notation + 1e-12 + [1e-12] + + Success + + # stray character + 100Z + ^ + FAIL: Expected end of text (at char 3), (line:1, col:4) + + # missing leading digit before '.' + -.100 + ^ + FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1) + + # too many '.' + 3.14.159 + ^ + FAIL: Expected end of text (at char 4), (line:1, col:5) + + Success + + Each test string must be on a single line. If you want to test a string that spans multiple + lines, create a test like this:: + + expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines") + + (Note that this is a raw string literal, you must include the leading 'r'.) + """ + if isinstance(tests, basestring): + tests = list(map(str.strip, tests.rstrip().splitlines())) + if isinstance(comment, basestring): + comment = Literal(comment) + allResults = [] + comments = [] + success = True + for t in tests: + if comment is not None and comment.matches(t, False) or comments and not t: + comments.append(t) + continue + if not t: + continue + out = ['\n'.join(comments), t] + comments = [] + try: + t = t.replace(r'\n','\n') + result = self.parseString(t, parseAll=parseAll) + out.append(result.dump(full=fullDump)) + success = success and not failureTests + except ParseBaseException as pe: + fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else "" + if '\n' in t: + out.append(line(pe.loc, t)) + out.append(' '*(col(pe.loc,t)-1) + '^' + fatal) + else: + out.append(' '*pe.loc + '^' + fatal) + out.append("FAIL: " + str(pe)) + success = success and failureTests + result = pe + except Exception as exc: + out.append("FAIL-EXCEPTION: " + str(exc)) + success = success and failureTests + result = exc + + if printResults: + if fullDump: + out.append('') + print('\n'.join(out)) + + allResults.append((t, result)) + + return success, allResults + + +class Token(ParserElement): + """ + Abstract C{ParserElement} subclass, for defining atomic matching patterns. + """ + def __init__( self ): + super(Token,self).__init__( savelist=False ) + + +class Empty(Token): + """ + An empty token, will always match. + """ + def __init__( self ): + super(Empty,self).__init__() + self.name = "Empty" + self.mayReturnEmpty = True + self.mayIndexError = False + + +class NoMatch(Token): + """ + A token that will never match. + """ + def __init__( self ): + super(NoMatch,self).__init__() + self.name = "NoMatch" + self.mayReturnEmpty = True + self.mayIndexError = False + self.errmsg = "Unmatchable token" + + def parseImpl( self, instring, loc, doActions=True ): + raise ParseException(instring, loc, self.errmsg, self) + + +class Literal(Token): + """ + Token to exactly match a specified string. + + Example:: + Literal('blah').parseString('blah') # -> ['blah'] + Literal('blah').parseString('blahfooblah') # -> ['blah'] + Literal('blah').parseString('bla') # -> Exception: Expected "blah" + + For case-insensitive matching, use L{CaselessLiteral}. + + For keyword matching (force word break before and after the matched string), + use L{Keyword} or L{CaselessKeyword}. + """ + def __init__( self, matchString ): + super(Literal,self).__init__() + self.match = matchString + self.matchLen = len(matchString) + try: + self.firstMatchChar = matchString[0] + except IndexError: + warnings.warn("null string passed to Literal; use Empty() instead", + SyntaxWarning, stacklevel=2) + self.__class__ = Empty + self.name = '"%s"' % _ustr(self.match) + self.errmsg = "Expected " + self.name + self.mayReturnEmpty = False + self.mayIndexError = False + + # Performance tuning: this routine gets called a *lot* + # if this is a single character match string and the first character matches, + # short-circuit as quickly as possible, and avoid calling startswith + #~ @profile + def parseImpl( self, instring, loc, doActions=True ): + if (instring[loc] == self.firstMatchChar and + (self.matchLen==1 or instring.startswith(self.match,loc)) ): + return loc+self.matchLen, self.match + raise ParseException(instring, loc, self.errmsg, self) +_L = Literal +ParserElement._literalStringClass = Literal + +class Keyword(Token): + """ + Token to exactly match a specified string as a keyword, that is, it must be + immediately followed by a non-keyword character. Compare with C{L{Literal}}: + - C{Literal("if")} will match the leading C{'if'} in C{'ifAndOnlyIf'}. + - C{Keyword("if")} will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'} + Accepts two optional constructor arguments in addition to the keyword string: + - C{identChars} is a string of characters that would be valid identifier characters, + defaulting to all alphanumerics + "_" and "$" + - C{caseless} allows case-insensitive matching, default is C{False}. + + Example:: + Keyword("start").parseString("start") # -> ['start'] + Keyword("start").parseString("starting") # -> Exception + + For case-insensitive matching, use L{CaselessKeyword}. + """ + DEFAULT_KEYWORD_CHARS = alphanums+"_$" + + def __init__( self, matchString, identChars=None, caseless=False ): + super(Keyword,self).__init__() + if identChars is None: + identChars = Keyword.DEFAULT_KEYWORD_CHARS + self.match = matchString + self.matchLen = len(matchString) + try: + self.firstMatchChar = matchString[0] + except IndexError: + warnings.warn("null string passed to Keyword; use Empty() instead", + SyntaxWarning, stacklevel=2) + self.name = '"%s"' % self.match + self.errmsg = "Expected " + self.name + self.mayReturnEmpty = False + self.mayIndexError = False + self.caseless = caseless + if caseless: + self.caselessmatch = matchString.upper() + identChars = identChars.upper() + self.identChars = set(identChars) + + def parseImpl( self, instring, loc, doActions=True ): + if self.caseless: + if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and + (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and + (loc == 0 or instring[loc-1].upper() not in self.identChars) ): + return loc+self.matchLen, self.match + else: + if (instring[loc] == self.firstMatchChar and + (self.matchLen==1 or instring.startswith(self.match,loc)) and + (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and + (loc == 0 or instring[loc-1] not in self.identChars) ): + return loc+self.matchLen, self.match + raise ParseException(instring, loc, self.errmsg, self) + + def copy(self): + c = super(Keyword,self).copy() + c.identChars = Keyword.DEFAULT_KEYWORD_CHARS + return c + + @staticmethod + def setDefaultKeywordChars( chars ): + """Overrides the default Keyword chars + """ + Keyword.DEFAULT_KEYWORD_CHARS = chars + +class CaselessLiteral(Literal): + """ + Token to match a specified string, ignoring case of letters. + Note: the matched results will always be in the case of the given + match string, NOT the case of the input text. + + Example:: + OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD'] + + (Contrast with example for L{CaselessKeyword}.) + """ + def __init__( self, matchString ): + super(CaselessLiteral,self).__init__( matchString.upper() ) + # Preserve the defining literal. + self.returnString = matchString + self.name = "'%s'" % self.returnString + self.errmsg = "Expected " + self.name + + def parseImpl( self, instring, loc, doActions=True ): + if instring[ loc:loc+self.matchLen ].upper() == self.match: + return loc+self.matchLen, self.returnString + raise ParseException(instring, loc, self.errmsg, self) + +class CaselessKeyword(Keyword): + """ + Caseless version of L{Keyword}. + + Example:: + OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD'] + + (Contrast with example for L{CaselessLiteral}.) + """ + def __init__( self, matchString, identChars=None ): + super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True ) + + def parseImpl( self, instring, loc, doActions=True ): + if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and + (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ): + return loc+self.matchLen, self.match + raise ParseException(instring, loc, self.errmsg, self) + +class CloseMatch(Token): + """ + A variation on L{Literal} which matches "close" matches, that is, + strings with at most 'n' mismatching characters. C{CloseMatch} takes parameters: + - C{match_string} - string to be matched + - C{maxMismatches} - (C{default=1}) maximum number of mismatches allowed to count as a match + + The results from a successful parse will contain the matched text from the input string and the following named results: + - C{mismatches} - a list of the positions within the match_string where mismatches were found + - C{original} - the original match_string used to compare against the input string + + If C{mismatches} is an empty list, then the match was an exact match. + + Example:: + patt = CloseMatch("ATCATCGAATGGA") + patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']}) + patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1) + + # exact match + patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']}) + + # close match allowing up to 2 mismatches + patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2) + patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']}) + """ + def __init__(self, match_string, maxMismatches=1): + super(CloseMatch,self).__init__() + self.name = match_string + self.match_string = match_string + self.maxMismatches = maxMismatches + self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches) + self.mayIndexError = False + self.mayReturnEmpty = False + + def parseImpl( self, instring, loc, doActions=True ): + start = loc + instrlen = len(instring) + maxloc = start + len(self.match_string) + + if maxloc <= instrlen: + match_string = self.match_string + match_stringloc = 0 + mismatches = [] + maxMismatches = self.maxMismatches + + for match_stringloc,s_m in enumerate(zip(instring[loc:maxloc], self.match_string)): + src,mat = s_m + if src != mat: + mismatches.append(match_stringloc) + if len(mismatches) > maxMismatches: + break + else: + loc = match_stringloc + 1 + results = ParseResults([instring[start:loc]]) + results['original'] = self.match_string + results['mismatches'] = mismatches + return loc, results + + raise ParseException(instring, loc, self.errmsg, self) + + +class Word(Token): + """ + Token for matching words composed of allowed character sets. + Defined with string containing all allowed initial characters, + an optional string containing allowed body characters (if omitted, + defaults to the initial character set), and an optional minimum, + maximum, and/or exact length. The default value for C{min} is 1 (a + minimum value < 1 is not valid); the default values for C{max} and C{exact} + are 0, meaning no maximum or exact length restriction. An optional + C{excludeChars} parameter can list characters that might be found in + the input C{bodyChars} string; useful to define a word of all printables + except for one or two characters, for instance. + + L{srange} is useful for defining custom character set strings for defining + C{Word} expressions, using range notation from regular expression character sets. + + A common mistake is to use C{Word} to match a specific literal string, as in + C{Word("Address")}. Remember that C{Word} uses the string argument to define + I{sets} of matchable characters. This expression would match "Add", "AAA", + "dAred", or any other word made up of the characters 'A', 'd', 'r', 'e', and 's'. + To match an exact literal string, use L{Literal} or L{Keyword}. + + pyparsing includes helper strings for building Words: + - L{alphas} + - L{nums} + - L{alphanums} + - L{hexnums} + - L{alphas8bit} (alphabetic characters in ASCII range 128-255 - accented, tilded, umlauted, etc.) + - L{punc8bit} (non-alphabetic characters in ASCII range 128-255 - currency, symbols, superscripts, diacriticals, etc.) + - L{printables} (any non-whitespace character) + + Example:: + # a word composed of digits + integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9")) + + # a word with a leading capital, and zero or more lowercase + capital_word = Word(alphas.upper(), alphas.lower()) + + # hostnames are alphanumeric, with leading alpha, and '-' + hostname = Word(alphas, alphanums+'-') + + # roman numeral (not a strict parser, accepts invalid mix of characters) + roman = Word("IVXLCDM") + + # any string of non-whitespace characters, except for ',' + csv_value = Word(printables, excludeChars=",") + """ + def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ): + super(Word,self).__init__() + if excludeChars: + initChars = ''.join(c for c in initChars if c not in excludeChars) + if bodyChars: + bodyChars = ''.join(c for c in bodyChars if c not in excludeChars) + self.initCharsOrig = initChars + self.initChars = set(initChars) + if bodyChars : + self.bodyCharsOrig = bodyChars + self.bodyChars = set(bodyChars) + else: + self.bodyCharsOrig = initChars + self.bodyChars = set(initChars) + + self.maxSpecified = max > 0 + + if min < 1: + raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted") + + self.minLen = min + + if max > 0: + self.maxLen = max + else: + self.maxLen = _MAX_INT + + if exact > 0: + self.maxLen = exact + self.minLen = exact + + self.name = _ustr(self) + self.errmsg = "Expected " + self.name + self.mayIndexError = False + self.asKeyword = asKeyword + + if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0): + if self.bodyCharsOrig == self.initCharsOrig: + self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig) + elif len(self.initCharsOrig) == 1: + self.reString = "%s[%s]*" % \ + (re.escape(self.initCharsOrig), + _escapeRegexRangeChars(self.bodyCharsOrig),) + else: + self.reString = "[%s][%s]*" % \ + (_escapeRegexRangeChars(self.initCharsOrig), + _escapeRegexRangeChars(self.bodyCharsOrig),) + if self.asKeyword: + self.reString = r"\b"+self.reString+r"\b" + try: + self.re = re.compile( self.reString ) + except Exception: + self.re = None + + def parseImpl( self, instring, loc, doActions=True ): + if self.re: + result = self.re.match(instring,loc) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + return loc, result.group() + + if not(instring[ loc ] in self.initChars): + raise ParseException(instring, loc, self.errmsg, self) + + start = loc + loc += 1 + instrlen = len(instring) + bodychars = self.bodyChars + maxloc = start + self.maxLen + maxloc = min( maxloc, instrlen ) + while loc < maxloc and instring[loc] in bodychars: + loc += 1 + + throwException = False + if loc - start < self.minLen: + throwException = True + if self.maxSpecified and loc < instrlen and instring[loc] in bodychars: + throwException = True + if self.asKeyword: + if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars): + throwException = True + + if throwException: + raise ParseException(instring, loc, self.errmsg, self) + + return loc, instring[start:loc] + + def __str__( self ): + try: + return super(Word,self).__str__() + except Exception: + pass + + + if self.strRepr is None: + + def charsAsStr(s): + if len(s)>4: + return s[:4]+"..." + else: + return s + + if ( self.initCharsOrig != self.bodyCharsOrig ): + self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) ) + else: + self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig) + + return self.strRepr + + +class Regex(Token): + r""" + Token for matching strings that match a given regular expression. + Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module. + If the given regex contains named groups (defined using C{(?P<name>...)}), these will be preserved as + named parse results. + + Example:: + realnum = Regex(r"[+-]?\d+\.\d*") + date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)') + # ref: http://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression + roman = Regex(r"M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})") + """ + compiledREtype = type(re.compile("[A-Z]")) + def __init__( self, pattern, flags=0): + """The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags.""" + super(Regex,self).__init__() + + if isinstance(pattern, basestring): + if not pattern: + warnings.warn("null string passed to Regex; use Empty() instead", + SyntaxWarning, stacklevel=2) + + self.pattern = pattern + self.flags = flags + + try: + self.re = re.compile(self.pattern, self.flags) + self.reString = self.pattern + except sre_constants.error: + warnings.warn("invalid pattern (%s) passed to Regex" % pattern, + SyntaxWarning, stacklevel=2) + raise + + elif isinstance(pattern, Regex.compiledREtype): + self.re = pattern + self.pattern = \ + self.reString = str(pattern) + self.flags = flags + + else: + raise ValueError("Regex may only be constructed with a string or a compiled RE object") + + self.name = _ustr(self) + self.errmsg = "Expected " + self.name + self.mayIndexError = False + self.mayReturnEmpty = True + + def parseImpl( self, instring, loc, doActions=True ): + result = self.re.match(instring,loc) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + d = result.groupdict() + ret = ParseResults(result.group()) + if d: + for k in d: + ret[k] = d[k] + return loc,ret + + def __str__( self ): + try: + return super(Regex,self).__str__() + except Exception: + pass + + if self.strRepr is None: + self.strRepr = "Re:(%s)" % repr(self.pattern) + + return self.strRepr + + +class QuotedString(Token): + r""" + Token for matching strings that are delimited by quoting characters. + + Defined with the following parameters: + - quoteChar - string of one or more characters defining the quote delimiting string + - escChar - character to escape quotes, typically backslash (default=C{None}) + - escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=C{None}) + - multiline - boolean indicating whether quotes can span multiple lines (default=C{False}) + - unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True}) + - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar) + - convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True}) + + Example:: + qs = QuotedString('"') + print(qs.searchString('lsjdf "This is the quote" sldjf')) + complex_qs = QuotedString('{{', endQuoteChar='}}') + print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf')) + sql_qs = QuotedString('"', escQuote='""') + print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf')) + prints:: + [['This is the quote']] + [['This is the "quote"']] + [['This is the quote with "embedded" quotes']] + """ + def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True): + super(QuotedString,self).__init__() + + # remove white space from quote chars - wont work anyway + quoteChar = quoteChar.strip() + if not quoteChar: + warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2) + raise SyntaxError() + + if endQuoteChar is None: + endQuoteChar = quoteChar + else: + endQuoteChar = endQuoteChar.strip() + if not endQuoteChar: + warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2) + raise SyntaxError() + + self.quoteChar = quoteChar + self.quoteCharLen = len(quoteChar) + self.firstQuoteChar = quoteChar[0] + self.endQuoteChar = endQuoteChar + self.endQuoteCharLen = len(endQuoteChar) + self.escChar = escChar + self.escQuote = escQuote + self.unquoteResults = unquoteResults + self.convertWhitespaceEscapes = convertWhitespaceEscapes + + if multiline: + self.flags = re.MULTILINE | re.DOTALL + self.pattern = r'%s(?:[^%s%s]' % \ + ( re.escape(self.quoteChar), + _escapeRegexRangeChars(self.endQuoteChar[0]), + (escChar is not None and _escapeRegexRangeChars(escChar) or '') ) + else: + self.flags = 0 + self.pattern = r'%s(?:[^%s\n\r%s]' % \ + ( re.escape(self.quoteChar), + _escapeRegexRangeChars(self.endQuoteChar[0]), + (escChar is not None and _escapeRegexRangeChars(escChar) or '') ) + if len(self.endQuoteChar) > 1: + self.pattern += ( + '|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]), + _escapeRegexRangeChars(self.endQuoteChar[i])) + for i in range(len(self.endQuoteChar)-1,0,-1)) + ')' + ) + if escQuote: + self.pattern += (r'|(?:%s)' % re.escape(escQuote)) + if escChar: + self.pattern += (r'|(?:%s.)' % re.escape(escChar)) + self.escCharReplacePattern = re.escape(self.escChar)+"(.)" + self.pattern += (r')*%s' % re.escape(self.endQuoteChar)) + + try: + self.re = re.compile(self.pattern, self.flags) + self.reString = self.pattern + except sre_constants.error: + warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern, + SyntaxWarning, stacklevel=2) + raise + + self.name = _ustr(self) + self.errmsg = "Expected " + self.name + self.mayIndexError = False + self.mayReturnEmpty = True + + def parseImpl( self, instring, loc, doActions=True ): + result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + ret = result.group() + + if self.unquoteResults: + + # strip off quotes + ret = ret[self.quoteCharLen:-self.endQuoteCharLen] + + if isinstance(ret,basestring): + # replace escaped whitespace + if '\\' in ret and self.convertWhitespaceEscapes: + ws_map = { + r'\t' : '\t', + r'\n' : '\n', + r'\f' : '\f', + r'\r' : '\r', + } + for wslit,wschar in ws_map.items(): + ret = ret.replace(wslit, wschar) + + # replace escaped characters + if self.escChar: + ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret) + + # replace escaped quotes + if self.escQuote: + ret = ret.replace(self.escQuote, self.endQuoteChar) + + return loc, ret + + def __str__( self ): + try: + return super(QuotedString,self).__str__() + except Exception: + pass + + if self.strRepr is None: + self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar) + + return self.strRepr + + +class CharsNotIn(Token): + """ + Token for matching words composed of characters I{not} in a given set (will + include whitespace in matched characters if not listed in the provided exclusion set - see example). + Defined with string containing all disallowed characters, and an optional + minimum, maximum, and/or exact length. The default value for C{min} is 1 (a + minimum value < 1 is not valid); the default values for C{max} and C{exact} + are 0, meaning no maximum or exact length restriction. + + Example:: + # define a comma-separated-value as anything that is not a ',' + csv_value = CharsNotIn(',') + print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213")) + prints:: + ['dkls', 'lsdkjf', 's12 34', '@!#', '213'] + """ + def __init__( self, notChars, min=1, max=0, exact=0 ): + super(CharsNotIn,self).__init__() + self.skipWhitespace = False + self.notChars = notChars + + if min < 1: + raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted") + + self.minLen = min + + if max > 0: + self.maxLen = max + else: + self.maxLen = _MAX_INT + + if exact > 0: + self.maxLen = exact + self.minLen = exact + + self.name = _ustr(self) + self.errmsg = "Expected " + self.name + self.mayReturnEmpty = ( self.minLen == 0 ) + self.mayIndexError = False + + def parseImpl( self, instring, loc, doActions=True ): + if instring[loc] in self.notChars: + raise ParseException(instring, loc, self.errmsg, self) + + start = loc + loc += 1 + notchars = self.notChars + maxlen = min( start+self.maxLen, len(instring) ) + while loc < maxlen and \ + (instring[loc] not in notchars): + loc += 1 + + if loc - start < self.minLen: + raise ParseException(instring, loc, self.errmsg, self) + + return loc, instring[start:loc] + + def __str__( self ): + try: + return super(CharsNotIn, self).__str__() + except Exception: + pass + + if self.strRepr is None: + if len(self.notChars) > 4: + self.strRepr = "!W:(%s...)" % self.notChars[:4] + else: + self.strRepr = "!W:(%s)" % self.notChars + + return self.strRepr + +class White(Token): + """ + Special matching class for matching whitespace. Normally, whitespace is ignored + by pyparsing grammars. This class is included when some whitespace structures + are significant. Define with a string containing the whitespace characters to be + matched; default is C{" \\t\\r\\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments, + as defined for the C{L{Word}} class. + """ + whiteStrs = { + " " : "<SPC>", + "\t": "<TAB>", + "\n": "<LF>", + "\r": "<CR>", + "\f": "<FF>", + } + def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0): + super(White,self).__init__() + self.matchWhite = ws + self.setWhitespaceChars( "".join(c for c in self.whiteChars if c not in self.matchWhite) ) + #~ self.leaveWhitespace() + self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite)) + self.mayReturnEmpty = True + self.errmsg = "Expected " + self.name + + self.minLen = min + + if max > 0: + self.maxLen = max + else: + self.maxLen = _MAX_INT + + if exact > 0: + self.maxLen = exact + self.minLen = exact + + def parseImpl( self, instring, loc, doActions=True ): + if not(instring[ loc ] in self.matchWhite): + raise ParseException(instring, loc, self.errmsg, self) + start = loc + loc += 1 + maxloc = start + self.maxLen + maxloc = min( maxloc, len(instring) ) + while loc < maxloc and instring[loc] in self.matchWhite: + loc += 1 + + if loc - start < self.minLen: + raise ParseException(instring, loc, self.errmsg, self) + + return loc, instring[start:loc] + + +class _PositionToken(Token): + def __init__( self ): + super(_PositionToken,self).__init__() + self.name=self.__class__.__name__ + self.mayReturnEmpty = True + self.mayIndexError = False + +class GoToColumn(_PositionToken): + """ + Token to advance to a specific column of input text; useful for tabular report scraping. + """ + def __init__( self, colno ): + super(GoToColumn,self).__init__() + self.col = colno + + def preParse( self, instring, loc ): + if col(loc,instring) != self.col: + instrlen = len(instring) + if self.ignoreExprs: + loc = self._skipIgnorables( instring, loc ) + while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col : + loc += 1 + return loc + + def parseImpl( self, instring, loc, doActions=True ): + thiscol = col( loc, instring ) + if thiscol > self.col: + raise ParseException( instring, loc, "Text not in expected column", self ) + newloc = loc + self.col - thiscol + ret = instring[ loc: newloc ] + return newloc, ret + + +class LineStart(_PositionToken): + """ + Matches if current position is at the beginning of a line within the parse string + + Example:: + + test = '''\ + AAA this line + AAA and this line + AAA but not this one + B AAA and definitely not this one + ''' + + for t in (LineStart() + 'AAA' + restOfLine).searchString(test): + print(t) + + Prints:: + ['AAA', ' this line'] + ['AAA', ' and this line'] + + """ + def __init__( self ): + super(LineStart,self).__init__() + self.errmsg = "Expected start of line" + + def parseImpl( self, instring, loc, doActions=True ): + if col(loc, instring) == 1: + return loc, [] + raise ParseException(instring, loc, self.errmsg, self) + +class LineEnd(_PositionToken): + """ + Matches if current position is at the end of a line within the parse string + """ + def __init__( self ): + super(LineEnd,self).__init__() + self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") ) + self.errmsg = "Expected end of line" + + def parseImpl( self, instring, loc, doActions=True ): + if loc<len(instring): + if instring[loc] == "\n": + return loc+1, "\n" + else: + raise ParseException(instring, loc, self.errmsg, self) + elif loc == len(instring): + return loc+1, [] + else: + raise ParseException(instring, loc, self.errmsg, self) + +class StringStart(_PositionToken): + """ + Matches if current position is at the beginning of the parse string + """ + def __init__( self ): + super(StringStart,self).__init__() + self.errmsg = "Expected start of text" + + def parseImpl( self, instring, loc, doActions=True ): + if loc != 0: + # see if entire string up to here is just whitespace and ignoreables + if loc != self.preParse( instring, 0 ): + raise ParseException(instring, loc, self.errmsg, self) + return loc, [] + +class StringEnd(_PositionToken): + """ + Matches if current position is at the end of the parse string + """ + def __init__( self ): + super(StringEnd,self).__init__() + self.errmsg = "Expected end of text" + + def parseImpl( self, instring, loc, doActions=True ): + if loc < len(instring): + raise ParseException(instring, loc, self.errmsg, self) + elif loc == len(instring): + return loc+1, [] + elif loc > len(instring): + return loc, [] + else: + raise ParseException(instring, loc, self.errmsg, self) + +class WordStart(_PositionToken): + """ + Matches if the current position is at the beginning of a Word, and + is not preceded by any character in a given set of C{wordChars} + (default=C{printables}). To emulate the C{\b} behavior of regular expressions, + use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of + the string being parsed, or at the beginning of a line. + """ + def __init__(self, wordChars = printables): + super(WordStart,self).__init__() + self.wordChars = set(wordChars) + self.errmsg = "Not at the start of a word" + + def parseImpl(self, instring, loc, doActions=True ): + if loc != 0: + if (instring[loc-1] in self.wordChars or + instring[loc] not in self.wordChars): + raise ParseException(instring, loc, self.errmsg, self) + return loc, [] + +class WordEnd(_PositionToken): + """ + Matches if the current position is at the end of a Word, and + is not followed by any character in a given set of C{wordChars} + (default=C{printables}). To emulate the C{\b} behavior of regular expressions, + use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of + the string being parsed, or at the end of a line. + """ + def __init__(self, wordChars = printables): + super(WordEnd,self).__init__() + self.wordChars = set(wordChars) + self.skipWhitespace = False + self.errmsg = "Not at the end of a word" + + def parseImpl(self, instring, loc, doActions=True ): + instrlen = len(instring) + if instrlen>0 and loc<instrlen: + if (instring[loc] in self.wordChars or + instring[loc-1] not in self.wordChars): + raise ParseException(instring, loc, self.errmsg, self) + return loc, [] + + +class ParseExpression(ParserElement): + """ + Abstract subclass of ParserElement, for combining and post-processing parsed tokens. + """ + def __init__( self, exprs, savelist = False ): + super(ParseExpression,self).__init__(savelist) + if isinstance( exprs, _generatorType ): + exprs = list(exprs) + + if isinstance( exprs, basestring ): + self.exprs = [ ParserElement._literalStringClass( exprs ) ] + elif isinstance( exprs, Iterable ): + exprs = list(exprs) + # if sequence of strings provided, wrap with Literal + if all(isinstance(expr, basestring) for expr in exprs): + exprs = map(ParserElement._literalStringClass, exprs) + self.exprs = list(exprs) + else: + try: + self.exprs = list( exprs ) + except TypeError: + self.exprs = [ exprs ] + self.callPreparse = False + + def __getitem__( self, i ): + return self.exprs[i] + + def append( self, other ): + self.exprs.append( other ) + self.strRepr = None + return self + + def leaveWhitespace( self ): + """Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on + all contained expressions.""" + self.skipWhitespace = False + self.exprs = [ e.copy() for e in self.exprs ] + for e in self.exprs: + e.leaveWhitespace() + return self + + def ignore( self, other ): + if isinstance( other, Suppress ): + if other not in self.ignoreExprs: + super( ParseExpression, self).ignore( other ) + for e in self.exprs: + e.ignore( self.ignoreExprs[-1] ) + else: + super( ParseExpression, self).ignore( other ) + for e in self.exprs: + e.ignore( self.ignoreExprs[-1] ) + return self + + def __str__( self ): + try: + return super(ParseExpression,self).__str__() + except Exception: + pass + + if self.strRepr is None: + self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) ) + return self.strRepr + + def streamline( self ): + super(ParseExpression,self).streamline() + + for e in self.exprs: + e.streamline() + + # collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d ) + # but only if there are no parse actions or resultsNames on the nested And's + # (likewise for Or's and MatchFirst's) + if ( len(self.exprs) == 2 ): + other = self.exprs[0] + if ( isinstance( other, self.__class__ ) and + not(other.parseAction) and + other.resultsName is None and + not other.debug ): + self.exprs = other.exprs[:] + [ self.exprs[1] ] + self.strRepr = None + self.mayReturnEmpty |= other.mayReturnEmpty + self.mayIndexError |= other.mayIndexError + + other = self.exprs[-1] + if ( isinstance( other, self.__class__ ) and + not(other.parseAction) and + other.resultsName is None and + not other.debug ): + self.exprs = self.exprs[:-1] + other.exprs[:] + self.strRepr = None + self.mayReturnEmpty |= other.mayReturnEmpty + self.mayIndexError |= other.mayIndexError + + self.errmsg = "Expected " + _ustr(self) + + return self + + def setResultsName( self, name, listAllMatches=False ): + ret = super(ParseExpression,self).setResultsName(name,listAllMatches) + return ret + + def validate( self, validateTrace=[] ): + tmp = validateTrace[:]+[self] + for e in self.exprs: + e.validate(tmp) + self.checkRecursion( [] ) + + def copy(self): + ret = super(ParseExpression,self).copy() + ret.exprs = [e.copy() for e in self.exprs] + return ret + +class And(ParseExpression): + """ + Requires all given C{ParseExpression}s to be found in the given order. + Expressions may be separated by whitespace. + May be constructed using the C{'+'} operator. + May also be constructed using the C{'-'} operator, which will suppress backtracking. + + Example:: + integer = Word(nums) + name_expr = OneOrMore(Word(alphas)) + + expr = And([integer("id"),name_expr("name"),integer("age")]) + # more easily written as: + expr = integer("id") + name_expr("name") + integer("age") + """ + + class _ErrorStop(Empty): + def __init__(self, *args, **kwargs): + super(And._ErrorStop,self).__init__(*args, **kwargs) + self.name = '-' + self.leaveWhitespace() + + def __init__( self, exprs, savelist = True ): + super(And,self).__init__(exprs, savelist) + self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) + self.setWhitespaceChars( self.exprs[0].whiteChars ) + self.skipWhitespace = self.exprs[0].skipWhitespace + self.callPreparse = True + + def parseImpl( self, instring, loc, doActions=True ): + # pass False as last arg to _parse for first element, since we already + # pre-parsed the string as part of our And pre-parsing + loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False ) + errorStop = False + for e in self.exprs[1:]: + if isinstance(e, And._ErrorStop): + errorStop = True + continue + if errorStop: + try: + loc, exprtokens = e._parse( instring, loc, doActions ) + except ParseSyntaxException: + raise + except ParseBaseException as pe: + pe.__traceback__ = None + raise ParseSyntaxException._from_exception(pe) + except IndexError: + raise ParseSyntaxException(instring, len(instring), self.errmsg, self) + else: + loc, exprtokens = e._parse( instring, loc, doActions ) + if exprtokens or exprtokens.haskeys(): + resultlist += exprtokens + return loc, resultlist + + def __iadd__(self, other ): + if isinstance( other, basestring ): + other = ParserElement._literalStringClass( other ) + return self.append( other ) #And( [ self, other ] ) + + def checkRecursion( self, parseElementList ): + subRecCheckList = parseElementList[:] + [ self ] + for e in self.exprs: + e.checkRecursion( subRecCheckList ) + if not e.mayReturnEmpty: + break + + def __str__( self ): + if hasattr(self,"name"): + return self.name + + if self.strRepr is None: + self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}" + + return self.strRepr + + +class Or(ParseExpression): + """ + Requires that at least one C{ParseExpression} is found. + If two expressions match, the expression that matches the longest string will be used. + May be constructed using the C{'^'} operator. + + Example:: + # construct Or using '^' operator + + number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums)) + print(number.searchString("123 3.1416 789")) + prints:: + [['123'], ['3.1416'], ['789']] + """ + def __init__( self, exprs, savelist = False ): + super(Or,self).__init__(exprs, savelist) + if self.exprs: + self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) + else: + self.mayReturnEmpty = True + + def parseImpl( self, instring, loc, doActions=True ): + maxExcLoc = -1 + maxException = None + matches = [] + for e in self.exprs: + try: + loc2 = e.tryParse( instring, loc ) + except ParseException as err: + err.__traceback__ = None + if err.loc > maxExcLoc: + maxException = err + maxExcLoc = err.loc + except IndexError: + if len(instring) > maxExcLoc: + maxException = ParseException(instring,len(instring),e.errmsg,self) + maxExcLoc = len(instring) + else: + # save match among all matches, to retry longest to shortest + matches.append((loc2, e)) + + if matches: + matches.sort(key=lambda x: -x[0]) + for _,e in matches: + try: + return e._parse( instring, loc, doActions ) + except ParseException as err: + err.__traceback__ = None + if err.loc > maxExcLoc: + maxException = err + maxExcLoc = err.loc + + if maxException is not None: + maxException.msg = self.errmsg + raise maxException + else: + raise ParseException(instring, loc, "no defined alternatives to match", self) + + + def __ixor__(self, other ): + if isinstance( other, basestring ): + other = ParserElement._literalStringClass( other ) + return self.append( other ) #Or( [ self, other ] ) + + def __str__( self ): + if hasattr(self,"name"): + return self.name + + if self.strRepr is None: + self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}" + + return self.strRepr + + def checkRecursion( self, parseElementList ): + subRecCheckList = parseElementList[:] + [ self ] + for e in self.exprs: + e.checkRecursion( subRecCheckList ) + + +class MatchFirst(ParseExpression): + """ + Requires that at least one C{ParseExpression} is found. + If two expressions match, the first one listed is the one that will match. + May be constructed using the C{'|'} operator. + + Example:: + # construct MatchFirst using '|' operator + + # watch the order of expressions to match + number = Word(nums) | Combine(Word(nums) + '.' + Word(nums)) + print(number.searchString("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']] + + # put more selective expression first + number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums) + print(number.searchString("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']] + """ + def __init__( self, exprs, savelist = False ): + super(MatchFirst,self).__init__(exprs, savelist) + if self.exprs: + self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) + else: + self.mayReturnEmpty = True + + def parseImpl( self, instring, loc, doActions=True ): + maxExcLoc = -1 + maxException = None + for e in self.exprs: + try: + ret = e._parse( instring, loc, doActions ) + return ret + except ParseException as err: + if err.loc > maxExcLoc: + maxException = err + maxExcLoc = err.loc + except IndexError: + if len(instring) > maxExcLoc: + maxException = ParseException(instring,len(instring),e.errmsg,self) + maxExcLoc = len(instring) + + # only got here if no expression matched, raise exception for match that made it the furthest + else: + if maxException is not None: + maxException.msg = self.errmsg + raise maxException + else: + raise ParseException(instring, loc, "no defined alternatives to match", self) + + def __ior__(self, other ): + if isinstance( other, basestring ): + other = ParserElement._literalStringClass( other ) + return self.append( other ) #MatchFirst( [ self, other ] ) + + def __str__( self ): + if hasattr(self,"name"): + return self.name + + if self.strRepr is None: + self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}" + + return self.strRepr + + def checkRecursion( self, parseElementList ): + subRecCheckList = parseElementList[:] + [ self ] + for e in self.exprs: + e.checkRecursion( subRecCheckList ) + + +class Each(ParseExpression): + """ + Requires all given C{ParseExpression}s to be found, but in any order. + Expressions may be separated by whitespace. + May be constructed using the C{'&'} operator. + + Example:: + color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN") + shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON") + integer = Word(nums) + shape_attr = "shape:" + shape_type("shape") + posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn") + color_attr = "color:" + color("color") + size_attr = "size:" + integer("size") + + # use Each (using operator '&') to accept attributes in any order + # (shape and posn are required, color and size are optional) + shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr) + + shape_spec.runTests(''' + shape: SQUARE color: BLACK posn: 100, 120 + shape: CIRCLE size: 50 color: BLUE posn: 50,80 + color:GREEN size:20 shape:TRIANGLE posn:20,40 + ''' + ) + prints:: + shape: SQUARE color: BLACK posn: 100, 120 + ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']] + - color: BLACK + - posn: ['100', ',', '120'] + - x: 100 + - y: 120 + - shape: SQUARE + + + shape: CIRCLE size: 50 color: BLUE posn: 50,80 + ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']] + - color: BLUE + - posn: ['50', ',', '80'] + - x: 50 + - y: 80 + - shape: CIRCLE + - size: 50 + + + color: GREEN size: 20 shape: TRIANGLE posn: 20,40 + ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']] + - color: GREEN + - posn: ['20', ',', '40'] + - x: 20 + - y: 40 + - shape: TRIANGLE + - size: 20 + """ + def __init__( self, exprs, savelist = True ): + super(Each,self).__init__(exprs, savelist) + self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) + self.skipWhitespace = True + self.initExprGroups = True + + def parseImpl( self, instring, loc, doActions=True ): + if self.initExprGroups: + self.opt1map = dict((id(e.expr),e) for e in self.exprs if isinstance(e,Optional)) + opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ] + opt2 = [ e for e in self.exprs if e.mayReturnEmpty and not isinstance(e,Optional)] + self.optionals = opt1 + opt2 + self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ] + self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ] + self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ] + self.required += self.multirequired + self.initExprGroups = False + tmpLoc = loc + tmpReqd = self.required[:] + tmpOpt = self.optionals[:] + matchOrder = [] + + keepMatching = True + while keepMatching: + tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired + failed = [] + for e in tmpExprs: + try: + tmpLoc = e.tryParse( instring, tmpLoc ) + except ParseException: + failed.append(e) + else: + matchOrder.append(self.opt1map.get(id(e),e)) + if e in tmpReqd: + tmpReqd.remove(e) + elif e in tmpOpt: + tmpOpt.remove(e) + if len(failed) == len(tmpExprs): + keepMatching = False + + if tmpReqd: + missing = ", ".join(_ustr(e) for e in tmpReqd) + raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing ) + + # add any unmatched Optionals, in case they have default values defined + matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt] + + resultlist = [] + for e in matchOrder: + loc,results = e._parse(instring,loc,doActions) + resultlist.append(results) + + finalResults = sum(resultlist, ParseResults([])) + return loc, finalResults + + def __str__( self ): + if hasattr(self,"name"): + return self.name + + if self.strRepr is None: + self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}" + + return self.strRepr + + def checkRecursion( self, parseElementList ): + subRecCheckList = parseElementList[:] + [ self ] + for e in self.exprs: + e.checkRecursion( subRecCheckList ) + + +class ParseElementEnhance(ParserElement): + """ + Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens. + """ + def __init__( self, expr, savelist=False ): + super(ParseElementEnhance,self).__init__(savelist) + if isinstance( expr, basestring ): + if issubclass(ParserElement._literalStringClass, Token): + expr = ParserElement._literalStringClass(expr) + else: + expr = ParserElement._literalStringClass(Literal(expr)) + self.expr = expr + self.strRepr = None + if expr is not None: + self.mayIndexError = expr.mayIndexError + self.mayReturnEmpty = expr.mayReturnEmpty + self.setWhitespaceChars( expr.whiteChars ) + self.skipWhitespace = expr.skipWhitespace + self.saveAsList = expr.saveAsList + self.callPreparse = expr.callPreparse + self.ignoreExprs.extend(expr.ignoreExprs) + + def parseImpl( self, instring, loc, doActions=True ): + if self.expr is not None: + return self.expr._parse( instring, loc, doActions, callPreParse=False ) + else: + raise ParseException("",loc,self.errmsg,self) + + def leaveWhitespace( self ): + self.skipWhitespace = False + self.expr = self.expr.copy() + if self.expr is not None: + self.expr.leaveWhitespace() + return self + + def ignore( self, other ): + if isinstance( other, Suppress ): + if other not in self.ignoreExprs: + super( ParseElementEnhance, self).ignore( other ) + if self.expr is not None: + self.expr.ignore( self.ignoreExprs[-1] ) + else: + super( ParseElementEnhance, self).ignore( other ) + if self.expr is not None: + self.expr.ignore( self.ignoreExprs[-1] ) + return self + + def streamline( self ): + super(ParseElementEnhance,self).streamline() + if self.expr is not None: + self.expr.streamline() + return self + + def checkRecursion( self, parseElementList ): + if self in parseElementList: + raise RecursiveGrammarException( parseElementList+[self] ) + subRecCheckList = parseElementList[:] + [ self ] + if self.expr is not None: + self.expr.checkRecursion( subRecCheckList ) + + def validate( self, validateTrace=[] ): + tmp = validateTrace[:]+[self] + if self.expr is not None: + self.expr.validate(tmp) + self.checkRecursion( [] ) + + def __str__( self ): + try: + return super(ParseElementEnhance,self).__str__() + except Exception: + pass + + if self.strRepr is None and self.expr is not None: + self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) ) + return self.strRepr + + +class FollowedBy(ParseElementEnhance): + """ + Lookahead matching of the given parse expression. C{FollowedBy} + does I{not} advance the parsing position within the input string, it only + verifies that the specified parse expression matches at the current + position. C{FollowedBy} always returns a null token list. + + Example:: + # use FollowedBy to match a label only if it is followed by a ':' + data_word = Word(alphas) + label = data_word + FollowedBy(':') + attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) + + OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint() + prints:: + [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']] + """ + def __init__( self, expr ): + super(FollowedBy,self).__init__(expr) + self.mayReturnEmpty = True + + def parseImpl( self, instring, loc, doActions=True ): + self.expr.tryParse( instring, loc ) + return loc, [] + + +class NotAny(ParseElementEnhance): + """ + Lookahead to disallow matching with the given parse expression. C{NotAny} + does I{not} advance the parsing position within the input string, it only + verifies that the specified parse expression does I{not} match at the current + position. Also, C{NotAny} does I{not} skip over leading whitespace. C{NotAny} + always returns a null token list. May be constructed using the '~' operator. + + Example:: + + """ + def __init__( self, expr ): + super(NotAny,self).__init__(expr) + #~ self.leaveWhitespace() + self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs + self.mayReturnEmpty = True + self.errmsg = "Found unwanted token, "+_ustr(self.expr) + + def parseImpl( self, instring, loc, doActions=True ): + if self.expr.canParseNext(instring, loc): + raise ParseException(instring, loc, self.errmsg, self) + return loc, [] + + def __str__( self ): + if hasattr(self,"name"): + return self.name + + if self.strRepr is None: + self.strRepr = "~{" + _ustr(self.expr) + "}" + + return self.strRepr + +class _MultipleMatch(ParseElementEnhance): + def __init__( self, expr, stopOn=None): + super(_MultipleMatch, self).__init__(expr) + self.saveAsList = True + ender = stopOn + if isinstance(ender, basestring): + ender = ParserElement._literalStringClass(ender) + self.not_ender = ~ender if ender is not None else None + + def parseImpl( self, instring, loc, doActions=True ): + self_expr_parse = self.expr._parse + self_skip_ignorables = self._skipIgnorables + check_ender = self.not_ender is not None + if check_ender: + try_not_ender = self.not_ender.tryParse + + # must be at least one (but first see if we are the stopOn sentinel; + # if so, fail) + if check_ender: + try_not_ender(instring, loc) + loc, tokens = self_expr_parse( instring, loc, doActions, callPreParse=False ) + try: + hasIgnoreExprs = (not not self.ignoreExprs) + while 1: + if check_ender: + try_not_ender(instring, loc) + if hasIgnoreExprs: + preloc = self_skip_ignorables( instring, loc ) + else: + preloc = loc + loc, tmptokens = self_expr_parse( instring, preloc, doActions ) + if tmptokens or tmptokens.haskeys(): + tokens += tmptokens + except (ParseException,IndexError): + pass + + return loc, tokens + +class OneOrMore(_MultipleMatch): + """ + Repetition of one or more of the given expression. + + Parameters: + - expr - expression that must match one or more times + - stopOn - (default=C{None}) - expression for a terminating sentinel + (only required if the sentinel would ordinarily match the repetition + expression) + + Example:: + data_word = Word(alphas) + label = data_word + FollowedBy(':') + attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join)) + + text = "shape: SQUARE posn: upper left color: BLACK" + OneOrMore(attr_expr).parseString(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']] + + # use stopOn attribute for OneOrMore to avoid reading label string as part of the data + attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) + OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']] + + # could also be written as + (attr_expr * (1,)).parseString(text).pprint() + """ + + def __str__( self ): + if hasattr(self,"name"): + return self.name + + if self.strRepr is None: + self.strRepr = "{" + _ustr(self.expr) + "}..." + + return self.strRepr + +class ZeroOrMore(_MultipleMatch): + """ + Optional repetition of zero or more of the given expression. + + Parameters: + - expr - expression that must match zero or more times + - stopOn - (default=C{None}) - expression for a terminating sentinel + (only required if the sentinel would ordinarily match the repetition + expression) + + Example: similar to L{OneOrMore} + """ + def __init__( self, expr, stopOn=None): + super(ZeroOrMore,self).__init__(expr, stopOn=stopOn) + self.mayReturnEmpty = True + + def parseImpl( self, instring, loc, doActions=True ): + try: + return super(ZeroOrMore, self).parseImpl(instring, loc, doActions) + except (ParseException,IndexError): + return loc, [] + + def __str__( self ): + if hasattr(self,"name"): + return self.name + + if self.strRepr is None: + self.strRepr = "[" + _ustr(self.expr) + "]..." + + return self.strRepr + +class _NullToken(object): + def __bool__(self): + return False + __nonzero__ = __bool__ + def __str__(self): + return "" + +_optionalNotMatched = _NullToken() +class Optional(ParseElementEnhance): + """ + Optional matching of the given expression. + + Parameters: + - expr - expression that must match zero or more times + - default (optional) - value to be returned if the optional expression is not found. + + Example:: + # US postal code can be a 5-digit zip, plus optional 4-digit qualifier + zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4))) + zip.runTests(''' + # traditional ZIP code + 12345 + + # ZIP+4 form + 12101-0001 + + # invalid ZIP + 98765- + ''') + prints:: + # traditional ZIP code + 12345 + ['12345'] + + # ZIP+4 form + 12101-0001 + ['12101-0001'] + + # invalid ZIP + 98765- + ^ + FAIL: Expected end of text (at char 5), (line:1, col:6) + """ + def __init__( self, expr, default=_optionalNotMatched ): + super(Optional,self).__init__( expr, savelist=False ) + self.saveAsList = self.expr.saveAsList + self.defaultValue = default + self.mayReturnEmpty = True + + def parseImpl( self, instring, loc, doActions=True ): + try: + loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False ) + except (ParseException,IndexError): + if self.defaultValue is not _optionalNotMatched: + if self.expr.resultsName: + tokens = ParseResults([ self.defaultValue ]) + tokens[self.expr.resultsName] = self.defaultValue + else: + tokens = [ self.defaultValue ] + else: + tokens = [] + return loc, tokens + + def __str__( self ): + if hasattr(self,"name"): + return self.name + + if self.strRepr is None: + self.strRepr = "[" + _ustr(self.expr) + "]" + + return self.strRepr + +class SkipTo(ParseElementEnhance): + """ + Token for skipping over all undefined text until the matched expression is found. + + Parameters: + - expr - target expression marking the end of the data to be skipped + - include - (default=C{False}) if True, the target expression is also parsed + (the skipped text and target expression are returned as a 2-element list). + - ignore - (default=C{None}) used to define grammars (typically quoted strings and + comments) that might contain false matches to the target expression + - failOn - (default=C{None}) define expressions that are not allowed to be + included in the skipped test; if found before the target expression is found, + the SkipTo is not a match + + Example:: + report = ''' + Outstanding Issues Report - 1 Jan 2000 + + # | Severity | Description | Days Open + -----+----------+-------------------------------------------+----------- + 101 | Critical | Intermittent system crash | 6 + 94 | Cosmetic | Spelling error on Login ('log|n') | 14 + 79 | Minor | System slow when running too many reports | 47 + ''' + integer = Word(nums) + SEP = Suppress('|') + # use SkipTo to simply match everything up until the next SEP + # - ignore quoted strings, so that a '|' character inside a quoted string does not match + # - parse action will call token.strip() for each matched token, i.e., the description body + string_data = SkipTo(SEP, ignore=quotedString) + string_data.setParseAction(tokenMap(str.strip)) + ticket_expr = (integer("issue_num") + SEP + + string_data("sev") + SEP + + string_data("desc") + SEP + + integer("days_open")) + + for tkt in ticket_expr.searchString(report): + print tkt.dump() + prints:: + ['101', 'Critical', 'Intermittent system crash', '6'] + - days_open: 6 + - desc: Intermittent system crash + - issue_num: 101 + - sev: Critical + ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14'] + - days_open: 14 + - desc: Spelling error on Login ('log|n') + - issue_num: 94 + - sev: Cosmetic + ['79', 'Minor', 'System slow when running too many reports', '47'] + - days_open: 47 + - desc: System slow when running too many reports + - issue_num: 79 + - sev: Minor + """ + def __init__( self, other, include=False, ignore=None, failOn=None ): + super( SkipTo, self ).__init__( other ) + self.ignoreExpr = ignore + self.mayReturnEmpty = True + self.mayIndexError = False + self.includeMatch = include + self.asList = False + if isinstance(failOn, basestring): + self.failOn = ParserElement._literalStringClass(failOn) + else: + self.failOn = failOn + self.errmsg = "No match found for "+_ustr(self.expr) + + def parseImpl( self, instring, loc, doActions=True ): + startloc = loc + instrlen = len(instring) + expr = self.expr + expr_parse = self.expr._parse + self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None + self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None + + tmploc = loc + while tmploc <= instrlen: + if self_failOn_canParseNext is not None: + # break if failOn expression matches + if self_failOn_canParseNext(instring, tmploc): + break + + if self_ignoreExpr_tryParse is not None: + # advance past ignore expressions + while 1: + try: + tmploc = self_ignoreExpr_tryParse(instring, tmploc) + except ParseBaseException: + break + + try: + expr_parse(instring, tmploc, doActions=False, callPreParse=False) + except (ParseException, IndexError): + # no match, advance loc in string + tmploc += 1 + else: + # matched skipto expr, done + break + + else: + # ran off the end of the input string without matching skipto expr, fail + raise ParseException(instring, loc, self.errmsg, self) + + # build up return values + loc = tmploc + skiptext = instring[startloc:loc] + skipresult = ParseResults(skiptext) + + if self.includeMatch: + loc, mat = expr_parse(instring,loc,doActions,callPreParse=False) + skipresult += mat + + return loc, skipresult + +class Forward(ParseElementEnhance): + """ + Forward declaration of an expression to be defined later - + used for recursive grammars, such as algebraic infix notation. + When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator. + + Note: take care when assigning to C{Forward} not to overlook precedence of operators. + Specifically, '|' has a lower precedence than '<<', so that:: + fwdExpr << a | b | c + will actually be evaluated as:: + (fwdExpr << a) | b | c + thereby leaving b and c out as parseable alternatives. It is recommended that you + explicitly group the values inserted into the C{Forward}:: + fwdExpr << (a | b | c) + Converting to use the '<<=' operator instead will avoid this problem. + + See L{ParseResults.pprint} for an example of a recursive parser created using + C{Forward}. + """ + def __init__( self, other=None ): + super(Forward,self).__init__( other, savelist=False ) + + def __lshift__( self, other ): + if isinstance( other, basestring ): + other = ParserElement._literalStringClass(other) + self.expr = other + self.strRepr = None + self.mayIndexError = self.expr.mayIndexError + self.mayReturnEmpty = self.expr.mayReturnEmpty + self.setWhitespaceChars( self.expr.whiteChars ) + self.skipWhitespace = self.expr.skipWhitespace + self.saveAsList = self.expr.saveAsList + self.ignoreExprs.extend(self.expr.ignoreExprs) + return self + + def __ilshift__(self, other): + return self << other + + def leaveWhitespace( self ): + self.skipWhitespace = False + return self + + def streamline( self ): + if not self.streamlined: + self.streamlined = True + if self.expr is not None: + self.expr.streamline() + return self + + def validate( self, validateTrace=[] ): + if self not in validateTrace: + tmp = validateTrace[:]+[self] + if self.expr is not None: + self.expr.validate(tmp) + self.checkRecursion([]) + + def __str__( self ): + if hasattr(self,"name"): + return self.name + return self.__class__.__name__ + ": ..." + + # stubbed out for now - creates awful memory and perf issues + self._revertClass = self.__class__ + self.__class__ = _ForwardNoRecurse + try: + if self.expr is not None: + retString = _ustr(self.expr) + else: + retString = "None" + finally: + self.__class__ = self._revertClass + return self.__class__.__name__ + ": " + retString + + def copy(self): + if self.expr is not None: + return super(Forward,self).copy() + else: + ret = Forward() + ret <<= self + return ret + +class _ForwardNoRecurse(Forward): + def __str__( self ): + return "..." + +class TokenConverter(ParseElementEnhance): + """ + Abstract subclass of C{ParseExpression}, for converting parsed results. + """ + def __init__( self, expr, savelist=False ): + super(TokenConverter,self).__init__( expr )#, savelist ) + self.saveAsList = False + +class Combine(TokenConverter): + """ + Converter to concatenate all matching tokens to a single string. + By default, the matching patterns must also be contiguous in the input string; + this can be disabled by specifying C{'adjacent=False'} in the constructor. + + Example:: + real = Word(nums) + '.' + Word(nums) + print(real.parseString('3.1416')) # -> ['3', '.', '1416'] + # will also erroneously match the following + print(real.parseString('3. 1416')) # -> ['3', '.', '1416'] + + real = Combine(Word(nums) + '.' + Word(nums)) + print(real.parseString('3.1416')) # -> ['3.1416'] + # no match when there are internal spaces + print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...) + """ + def __init__( self, expr, joinString="", adjacent=True ): + super(Combine,self).__init__( expr ) + # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself + if adjacent: + self.leaveWhitespace() + self.adjacent = adjacent + self.skipWhitespace = True + self.joinString = joinString + self.callPreparse = True + + def ignore( self, other ): + if self.adjacent: + ParserElement.ignore(self, other) + else: + super( Combine, self).ignore( other ) + return self + + def postParse( self, instring, loc, tokenlist ): + retToks = tokenlist.copy() + del retToks[:] + retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults) + + if self.resultsName and retToks.haskeys(): + return [ retToks ] + else: + return retToks + +class Group(TokenConverter): + """ + Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions. + + Example:: + ident = Word(alphas) + num = Word(nums) + term = ident | num + func = ident + Optional(delimitedList(term)) + print(func.parseString("fn a,b,100")) # -> ['fn', 'a', 'b', '100'] + + func = ident + Group(Optional(delimitedList(term))) + print(func.parseString("fn a,b,100")) # -> ['fn', ['a', 'b', '100']] + """ + def __init__( self, expr ): + super(Group,self).__init__( expr ) + self.saveAsList = True + + def postParse( self, instring, loc, tokenlist ): + return [ tokenlist ] + +class Dict(TokenConverter): + """ + Converter to return a repetitive expression as a list, but also as a dictionary. + Each element can also be referenced using the first token in the expression as its key. + Useful for tabular report scraping when the first column can be used as a item key. + + Example:: + data_word = Word(alphas) + label = data_word + FollowedBy(':') + attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join)) + + text = "shape: SQUARE posn: upper left color: light blue texture: burlap" + attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) + + # print attributes as plain groups + print(OneOrMore(attr_expr).parseString(text).dump()) + + # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names + result = Dict(OneOrMore(Group(attr_expr))).parseString(text) + print(result.dump()) + + # access named fields as dict entries, or output as dict + print(result['shape']) + print(result.asDict()) + prints:: + ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap'] + + [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] + - color: light blue + - posn: upper left + - shape: SQUARE + - texture: burlap + SQUARE + {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'} + See more examples at L{ParseResults} of accessing fields by results name. + """ + def __init__( self, expr ): + super(Dict,self).__init__( expr ) + self.saveAsList = True + + def postParse( self, instring, loc, tokenlist ): + for i,tok in enumerate(tokenlist): + if len(tok) == 0: + continue + ikey = tok[0] + if isinstance(ikey,int): + ikey = _ustr(tok[0]).strip() + if len(tok)==1: + tokenlist[ikey] = _ParseResultsWithOffset("",i) + elif len(tok)==2 and not isinstance(tok[1],ParseResults): + tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i) + else: + dictvalue = tok.copy() #ParseResults(i) + del dictvalue[0] + if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.haskeys()): + tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i) + else: + tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i) + + if self.resultsName: + return [ tokenlist ] + else: + return tokenlist + + +class Suppress(TokenConverter): + """ + Converter for ignoring the results of a parsed expression. + + Example:: + source = "a, b, c,d" + wd = Word(alphas) + wd_list1 = wd + ZeroOrMore(',' + wd) + print(wd_list1.parseString(source)) + + # often, delimiters that are useful during parsing are just in the + # way afterward - use Suppress to keep them out of the parsed output + wd_list2 = wd + ZeroOrMore(Suppress(',') + wd) + print(wd_list2.parseString(source)) + prints:: + ['a', ',', 'b', ',', 'c', ',', 'd'] + ['a', 'b', 'c', 'd'] + (See also L{delimitedList}.) + """ + def postParse( self, instring, loc, tokenlist ): + return [] + + def suppress( self ): + return self + + +class OnlyOnce(object): + """ + Wrapper for parse actions, to ensure they are only called once. + """ + def __init__(self, methodCall): + self.callable = _trim_arity(methodCall) + self.called = False + def __call__(self,s,l,t): + if not self.called: + results = self.callable(s,l,t) + self.called = True + return results + raise ParseException(s,l,"") + def reset(self): + self.called = False + +def traceParseAction(f): + """ + Decorator for debugging parse actions. + + When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".} + When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised. + + Example:: + wd = Word(alphas) + + @traceParseAction + def remove_duplicate_chars(tokens): + return ''.join(sorted(set(''.join(tokens)))) + + wds = OneOrMore(wd).setParseAction(remove_duplicate_chars) + print(wds.parseString("slkdjs sld sldd sdlf sdljf")) + prints:: + >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {})) + <<leaving remove_duplicate_chars (ret: 'dfjkls') + ['dfjkls'] + """ + f = _trim_arity(f) + def z(*paArgs): + thisFunc = f.__name__ + s,l,t = paArgs[-3:] + if len(paArgs)>3: + thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc + sys.stderr.write( ">>entering %s(line: '%s', %d, %r)\n" % (thisFunc,line(l,s),l,t) ) + try: + ret = f(*paArgs) + except Exception as exc: + sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) ) + raise + sys.stderr.write( "<<leaving %s (ret: %r)\n" % (thisFunc,ret) ) + return ret + try: + z.__name__ = f.__name__ + except AttributeError: + pass + return z + +# +# global helpers +# +def delimitedList( expr, delim=",", combine=False ): + """ + Helper to define a delimited list of expressions - the delimiter defaults to ','. + By default, the list elements and delimiters can have intervening whitespace, and + comments, but this can be overridden by passing C{combine=True} in the constructor. + If C{combine} is set to C{True}, the matching tokens are returned as a single token + string, with the delimiters included; otherwise, the matching tokens are returned + as a list of tokens, with the delimiters suppressed. + + Example:: + delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc'] + delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE'] + """ + dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..." + if combine: + return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName) + else: + return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName) + +def countedArray( expr, intExpr=None ): + """ + Helper to define a counted list of expressions. + This helper defines a pattern of the form:: + integer expr expr expr... + where the leading integer tells how many expr expressions follow. + The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed. + + If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value. + + Example:: + countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd'] + + # in this parser, the leading integer value is given in binary, + # '10' indicating that 2 values are in the array + binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2)) + countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd'] + """ + arrayExpr = Forward() + def countFieldParseAction(s,l,t): + n = t[0] + arrayExpr << (n and Group(And([expr]*n)) or Group(empty)) + return [] + if intExpr is None: + intExpr = Word(nums).setParseAction(lambda t:int(t[0])) + else: + intExpr = intExpr.copy() + intExpr.setName("arrayLen") + intExpr.addParseAction(countFieldParseAction, callDuringTry=True) + return ( intExpr + arrayExpr ).setName('(len) ' + _ustr(expr) + '...') + +def _flatten(L): + ret = [] + for i in L: + if isinstance(i,list): + ret.extend(_flatten(i)) + else: + ret.append(i) + return ret + +def matchPreviousLiteral(expr): + """ + Helper to define an expression that is indirectly defined from + the tokens matched in a previous expression, that is, it looks + for a 'repeat' of a previous expression. For example:: + first = Word(nums) + second = matchPreviousLiteral(first) + matchExpr = first + ":" + second + will match C{"1:1"}, but not C{"1:2"}. Because this matches a + previous literal, will also match the leading C{"1:1"} in C{"1:10"}. + If this is not desired, use C{matchPreviousExpr}. + Do I{not} use with packrat parsing enabled. + """ + rep = Forward() + def copyTokenToRepeater(s,l,t): + if t: + if len(t) == 1: + rep << t[0] + else: + # flatten t tokens + tflat = _flatten(t.asList()) + rep << And(Literal(tt) for tt in tflat) + else: + rep << Empty() + expr.addParseAction(copyTokenToRepeater, callDuringTry=True) + rep.setName('(prev) ' + _ustr(expr)) + return rep + +def matchPreviousExpr(expr): + """ + Helper to define an expression that is indirectly defined from + the tokens matched in a previous expression, that is, it looks + for a 'repeat' of a previous expression. For example:: + first = Word(nums) + second = matchPreviousExpr(first) + matchExpr = first + ":" + second + will match C{"1:1"}, but not C{"1:2"}. Because this matches by + expressions, will I{not} match the leading C{"1:1"} in C{"1:10"}; + the expressions are evaluated first, and then compared, so + C{"1"} is compared with C{"10"}. + Do I{not} use with packrat parsing enabled. + """ + rep = Forward() + e2 = expr.copy() + rep <<= e2 + def copyTokenToRepeater(s,l,t): + matchTokens = _flatten(t.asList()) + def mustMatchTheseTokens(s,l,t): + theseTokens = _flatten(t.asList()) + if theseTokens != matchTokens: + raise ParseException("",0,"") + rep.setParseAction( mustMatchTheseTokens, callDuringTry=True ) + expr.addParseAction(copyTokenToRepeater, callDuringTry=True) + rep.setName('(prev) ' + _ustr(expr)) + return rep + +def _escapeRegexRangeChars(s): + #~ escape these chars: ^-] + for c in r"\^-]": + s = s.replace(c,_bslash+c) + s = s.replace("\n",r"\n") + s = s.replace("\t",r"\t") + return _ustr(s) + +def oneOf( strs, caseless=False, useRegex=True ): + """ + Helper to quickly define a set of alternative Literals, and makes sure to do + longest-first testing when there is a conflict, regardless of the input order, + but returns a C{L{MatchFirst}} for best performance. + + Parameters: + - strs - a string of space-delimited literals, or a collection of string literals + - caseless - (default=C{False}) - treat all literals as caseless + - useRegex - (default=C{True}) - as an optimization, will generate a Regex + object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or + if creating a C{Regex} raises an exception) + + Example:: + comp_oper = oneOf("< = > <= >= !=") + var = Word(alphas) + number = Word(nums) + term = var | number + comparison_expr = term + comp_oper + term + print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12")) + prints:: + [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']] + """ + if caseless: + isequal = ( lambda a,b: a.upper() == b.upper() ) + masks = ( lambda a,b: b.upper().startswith(a.upper()) ) + parseElementClass = CaselessLiteral + else: + isequal = ( lambda a,b: a == b ) + masks = ( lambda a,b: b.startswith(a) ) + parseElementClass = Literal + + symbols = [] + if isinstance(strs,basestring): + symbols = strs.split() + elif isinstance(strs, Iterable): + symbols = list(strs) + else: + warnings.warn("Invalid argument to oneOf, expected string or iterable", + SyntaxWarning, stacklevel=2) + if not symbols: + return NoMatch() + + i = 0 + while i < len(symbols)-1: + cur = symbols[i] + for j,other in enumerate(symbols[i+1:]): + if ( isequal(other, cur) ): + del symbols[i+j+1] + break + elif ( masks(cur, other) ): + del symbols[i+j+1] + symbols.insert(i,other) + cur = other + break + else: + i += 1 + + if not caseless and useRegex: + #~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] )) + try: + if len(symbols)==len("".join(symbols)): + return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ).setName(' | '.join(symbols)) + else: + return Regex( "|".join(re.escape(sym) for sym in symbols) ).setName(' | '.join(symbols)) + except Exception: + warnings.warn("Exception creating Regex for oneOf, building MatchFirst", + SyntaxWarning, stacklevel=2) + + + # last resort, just use MatchFirst + return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols)) + +def dictOf( key, value ): + """ + Helper to easily and clearly define a dictionary by specifying the respective patterns + for the key and value. Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens + in the proper order. The key pattern can include delimiting markers or punctuation, + as long as they are suppressed, thereby leaving the significant key text. The value + pattern can include named results, so that the C{Dict} results can include named token + fields. + + Example:: + text = "shape: SQUARE posn: upper left color: light blue texture: burlap" + attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) + print(OneOrMore(attr_expr).parseString(text).dump()) + + attr_label = label + attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join) + + # similar to Dict, but simpler call format + result = dictOf(attr_label, attr_value).parseString(text) + print(result.dump()) + print(result['shape']) + print(result.shape) # object attribute access works too + print(result.asDict()) + prints:: + [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] + - color: light blue + - posn: upper left + - shape: SQUARE + - texture: burlap + SQUARE + SQUARE + {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'} + """ + return Dict( ZeroOrMore( Group ( key + value ) ) ) + +def originalTextFor(expr, asString=True): + """ + Helper to return the original, untokenized text for a given expression. Useful to + restore the parsed fields of an HTML start tag into the raw tag text itself, or to + revert separate tokens with intervening whitespace back to the original matching + input text. By default, returns astring containing the original parsed text. + + If the optional C{asString} argument is passed as C{False}, then the return value is a + C{L{ParseResults}} containing any results names that were originally matched, and a + single token containing the original matched text from the input string. So if + the expression passed to C{L{originalTextFor}} contains expressions with defined + results names, you must set C{asString} to C{False} if you want to preserve those + results name values. + + Example:: + src = "this is test <b> bold <i>text</i> </b> normal text " + for tag in ("b","i"): + opener,closer = makeHTMLTags(tag) + patt = originalTextFor(opener + SkipTo(closer) + closer) + print(patt.searchString(src)[0]) + prints:: + ['<b> bold <i>text</i> </b>'] + ['<i>text</i>'] + """ + locMarker = Empty().setParseAction(lambda s,loc,t: loc) + endlocMarker = locMarker.copy() + endlocMarker.callPreparse = False + matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end") + if asString: + extractText = lambda s,l,t: s[t._original_start:t._original_end] + else: + def extractText(s,l,t): + t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]] + matchExpr.setParseAction(extractText) + matchExpr.ignoreExprs = expr.ignoreExprs + return matchExpr + +def ungroup(expr): + """ + Helper to undo pyparsing's default grouping of And expressions, even + if all but one are non-empty. + """ + return TokenConverter(expr).setParseAction(lambda t:t[0]) + +def locatedExpr(expr): + """ + Helper to decorate a returned token with its starting and ending locations in the input string. + This helper adds the following results names: + - locn_start = location where matched expression begins + - locn_end = location where matched expression ends + - value = the actual parsed results + + Be careful if the input text contains C{<TAB>} characters, you may want to call + C{L{ParserElement.parseWithTabs}} + + Example:: + wd = Word(alphas) + for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"): + print(match) + prints:: + [[0, 'ljsdf', 5]] + [[8, 'lksdjjf', 15]] + [[18, 'lkkjj', 23]] + """ + locator = Empty().setParseAction(lambda s,l,t: l) + return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end")) + + +# convenience constants for positional expressions +empty = Empty().setName("empty") +lineStart = LineStart().setName("lineStart") +lineEnd = LineEnd().setName("lineEnd") +stringStart = StringStart().setName("stringStart") +stringEnd = StringEnd().setName("stringEnd") + +_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1]) +_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16))) +_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8))) +_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r'\]', exact=1) +_charRange = Group(_singleChar + Suppress("-") + _singleChar) +_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]" + +def srange(s): + r""" + Helper to easily define string ranges for use in Word construction. Borrows + syntax from regexp '[]' string range definitions:: + srange("[0-9]") -> "0123456789" + srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz" + srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_" + The input string must be enclosed in []'s, and the returned string is the expanded + character set joined into a single string. + The values enclosed in the []'s may be: + - a single character + - an escaped character with a leading backslash (such as C{\-} or C{\]}) + - an escaped hex character with a leading C{'\x'} (C{\x21}, which is a C{'!'} character) + (C{\0x##} is also supported for backwards compatibility) + - an escaped octal character with a leading C{'\0'} (C{\041}, which is a C{'!'} character) + - a range of any of the above, separated by a dash (C{'a-z'}, etc.) + - any combination of the above (C{'aeiouy'}, C{'a-zA-Z0-9_$'}, etc.) + """ + _expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1)) + try: + return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body) + except Exception: + return "" + +def matchOnlyAtCol(n): + """ + Helper method for defining parse actions that require matching at a specific + column in the input text. + """ + def verifyCol(strg,locn,toks): + if col(locn,strg) != n: + raise ParseException(strg,locn,"matched token not at column %d" % n) + return verifyCol + +def replaceWith(replStr): + """ + Helper method for common parse actions that simply return a literal value. Especially + useful when used with C{L{transformString<ParserElement.transformString>}()}. + + Example:: + num = Word(nums).setParseAction(lambda toks: int(toks[0])) + na = oneOf("N/A NA").setParseAction(replaceWith(math.nan)) + term = na | num + + OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234] + """ + return lambda s,l,t: [replStr] + +def removeQuotes(s,l,t): + """ + Helper parse action for removing quotation marks from parsed quoted strings. + + Example:: + # by default, quotation marks are included in parsed results + quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"] + + # use removeQuotes to strip quotation marks from parsed results + quotedString.setParseAction(removeQuotes) + quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"] + """ + return t[0][1:-1] + +def tokenMap(func, *args): + """ + Helper to define a parse action by mapping a function to all elements of a ParseResults list.If any additional + args are passed, they are forwarded to the given function as additional arguments after + the token, as in C{hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))}, which will convert the + parsed data to an integer using base 16. + + Example (compare the last to example in L{ParserElement.transformString}:: + hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16)) + hex_ints.runTests(''' + 00 11 22 aa FF 0a 0d 1a + ''') + + upperword = Word(alphas).setParseAction(tokenMap(str.upper)) + OneOrMore(upperword).runTests(''' + my kingdom for a horse + ''') + + wd = Word(alphas).setParseAction(tokenMap(str.title)) + OneOrMore(wd).setParseAction(' '.join).runTests(''' + now is the winter of our discontent made glorious summer by this sun of york + ''') + prints:: + 00 11 22 aa FF 0a 0d 1a + [0, 17, 34, 170, 255, 10, 13, 26] + + my kingdom for a horse + ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE'] + + now is the winter of our discontent made glorious summer by this sun of york + ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York'] + """ + def pa(s,l,t): + return [func(tokn, *args) for tokn in t] + + try: + func_name = getattr(func, '__name__', + getattr(func, '__class__').__name__) + except Exception: + func_name = str(func) + pa.__name__ = func_name + + return pa + +upcaseTokens = tokenMap(lambda t: _ustr(t).upper()) +"""(Deprecated) Helper parse action to convert tokens to upper case. Deprecated in favor of L{pyparsing_common.upcaseTokens}""" + +downcaseTokens = tokenMap(lambda t: _ustr(t).lower()) +"""(Deprecated) Helper parse action to convert tokens to lower case. Deprecated in favor of L{pyparsing_common.downcaseTokens}""" + +def _makeTags(tagStr, xml): + """Internal helper to construct opening and closing tag expressions, given a tag name""" + if isinstance(tagStr,basestring): + resname = tagStr + tagStr = Keyword(tagStr, caseless=not xml) + else: + resname = tagStr.name + + tagAttrName = Word(alphas,alphanums+"_-:") + if (xml): + tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes ) + openTag = Suppress("<") + tagStr("tag") + \ + Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \ + Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">") + else: + printablesLessRAbrack = "".join(c for c in printables if c not in ">") + tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack) + openTag = Suppress("<") + tagStr("tag") + \ + Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \ + Optional( Suppress("=") + tagAttrValue ) ))) + \ + Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">") + closeTag = Combine(_L("</") + tagStr + ">") + + openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % resname) + closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % resname) + openTag.tag = resname + closeTag.tag = resname + return openTag, closeTag + +def makeHTMLTags(tagStr): + """ + Helper to construct opening and closing tag expressions for HTML, given a tag name. Matches + tags in either upper or lower case, attributes with namespaces and with quoted or unquoted values. + + Example:: + text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>' + # makeHTMLTags returns pyparsing expressions for the opening and closing tags as a 2-tuple + a,a_end = makeHTMLTags("A") + link_expr = a + SkipTo(a_end)("link_text") + a_end + + for link in link_expr.searchString(text): + # attributes in the <A> tag (like "href" shown here) are also accessible as named results + print(link.link_text, '->', link.href) + prints:: + pyparsing -> http://pyparsing.wikispaces.com + """ + return _makeTags( tagStr, False ) + +def makeXMLTags(tagStr): + """ + Helper to construct opening and closing tag expressions for XML, given a tag name. Matches + tags only in the given upper/lower case. + + Example: similar to L{makeHTMLTags} + """ + return _makeTags( tagStr, True ) + +def withAttribute(*args,**attrDict): + """ + Helper to create a validating parse action to be used with start tags created + with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag + with a required attribute value, to avoid false matches on common tags such as + C{<TD>} or C{<DIV>}. + + Call C{withAttribute} with a series of attribute names and values. Specify the list + of filter attributes names and values as: + - keyword arguments, as in C{(align="right")}, or + - as an explicit dict with C{**} operator, when an attribute name is also a Python + reserved word, as in C{**{"class":"Customer", "align":"right"}} + - a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") ) + For attribute names with a namespace prefix, you must use the second form. Attribute + names are matched insensitive to upper/lower case. + + If just testing for C{class} (with or without a namespace), use C{L{withClass}}. + + To verify that the attribute exists, but without specifying a value, pass + C{withAttribute.ANY_VALUE} as the value. + + Example:: + html = ''' + <div> + Some text + <div type="grid">1 4 0 1 0</div> + <div type="graph">1,3 2,3 1,1</div> + <div>this has no type</div> + </div> + + ''' + div,div_end = makeHTMLTags("div") + + # only match div tag having a type attribute with value "grid" + div_grid = div().setParseAction(withAttribute(type="grid")) + grid_expr = div_grid + SkipTo(div | div_end)("body") + for grid_header in grid_expr.searchString(html): + print(grid_header.body) + + # construct a match with any div tag having a type attribute, regardless of the value + div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE)) + div_expr = div_any_type + SkipTo(div | div_end)("body") + for div_header in div_expr.searchString(html): + print(div_header.body) + prints:: + 1 4 0 1 0 + + 1 4 0 1 0 + 1,3 2,3 1,1 + """ + if args: + attrs = args[:] + else: + attrs = attrDict.items() + attrs = [(k,v) for k,v in attrs] + def pa(s,l,tokens): + for attrName,attrValue in attrs: + if attrName not in tokens: + raise ParseException(s,l,"no matching attribute " + attrName) + if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue: + raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" % + (attrName, tokens[attrName], attrValue)) + return pa +withAttribute.ANY_VALUE = object() + +def withClass(classname, namespace=''): + """ + Simplified version of C{L{withAttribute}} when matching on a div class - made + difficult because C{class} is a reserved word in Python. + + Example:: + html = ''' + <div> + Some text + <div class="grid">1 4 0 1 0</div> + <div class="graph">1,3 2,3 1,1</div> + <div>this <div> has no class</div> + </div> + + ''' + div,div_end = makeHTMLTags("div") + div_grid = div().setParseAction(withClass("grid")) + + grid_expr = div_grid + SkipTo(div | div_end)("body") + for grid_header in grid_expr.searchString(html): + print(grid_header.body) + + div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE)) + div_expr = div_any_type + SkipTo(div | div_end)("body") + for div_header in div_expr.searchString(html): + print(div_header.body) + prints:: + 1 4 0 1 0 + + 1 4 0 1 0 + 1,3 2,3 1,1 + """ + classattr = "%s:class" % namespace if namespace else "class" + return withAttribute(**{classattr : classname}) + +opAssoc = _Constants() +opAssoc.LEFT = object() +opAssoc.RIGHT = object() + +def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ): + """ + Helper method for constructing grammars of expressions made up of + operators working in a precedence hierarchy. Operators may be unary or + binary, left- or right-associative. Parse actions can also be attached + to operator expressions. The generated parser will also recognize the use + of parentheses to override operator precedences (see example below). + + Note: if you define a deep operator list, you may see performance issues + when using infixNotation. See L{ParserElement.enablePackrat} for a + mechanism to potentially improve your parser performance. + + Parameters: + - baseExpr - expression representing the most basic element for the nested + - opList - list of tuples, one for each operator precedence level in the + expression grammar; each tuple is of the form + (opExpr, numTerms, rightLeftAssoc, parseAction), where: + - opExpr is the pyparsing expression for the operator; + may also be a string, which will be converted to a Literal; + if numTerms is 3, opExpr is a tuple of two expressions, for the + two operators separating the 3 terms + - numTerms is the number of terms for this operator (must + be 1, 2, or 3) + - rightLeftAssoc is the indicator whether the operator is + right or left associative, using the pyparsing-defined + constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}. + - parseAction is the parse action to be associated with + expressions matching this operator expression (the + parse action tuple member may be omitted); if the parse action + is passed a tuple or list of functions, this is equivalent to + calling C{setParseAction(*fn)} (L{ParserElement.setParseAction}) + - lpar - expression for matching left-parentheses (default=C{Suppress('(')}) + - rpar - expression for matching right-parentheses (default=C{Suppress(')')}) + + Example:: + # simple example of four-function arithmetic with ints and variable names + integer = pyparsing_common.signed_integer + varname = pyparsing_common.identifier + + arith_expr = infixNotation(integer | varname, + [ + ('-', 1, opAssoc.RIGHT), + (oneOf('* /'), 2, opAssoc.LEFT), + (oneOf('+ -'), 2, opAssoc.LEFT), + ]) + + arith_expr.runTests(''' + 5+3*6 + (5+3)*6 + -2--11 + ''', fullDump=False) + prints:: + 5+3*6 + [[5, '+', [3, '*', 6]]] + + (5+3)*6 + [[[5, '+', 3], '*', 6]] + + -2--11 + [[['-', 2], '-', ['-', 11]]] + """ + ret = Forward() + lastExpr = baseExpr | ( lpar + ret + rpar ) + for i,operDef in enumerate(opList): + opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4] + termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr + if arity == 3: + if opExpr is None or len(opExpr) != 2: + raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions") + opExpr1, opExpr2 = opExpr + thisExpr = Forward().setName(termName) + if rightLeftAssoc == opAssoc.LEFT: + if arity == 1: + matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) ) + elif arity == 2: + if opExpr is not None: + matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) ) + else: + matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) ) + elif arity == 3: + matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \ + Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr ) + else: + raise ValueError("operator must be unary (1), binary (2), or ternary (3)") + elif rightLeftAssoc == opAssoc.RIGHT: + if arity == 1: + # try to avoid LR with this extra test + if not isinstance(opExpr, Optional): + opExpr = Optional(opExpr) + matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr ) + elif arity == 2: + if opExpr is not None: + matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) ) + else: + matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) ) + elif arity == 3: + matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \ + Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr ) + else: + raise ValueError("operator must be unary (1), binary (2), or ternary (3)") + else: + raise ValueError("operator must indicate right or left associativity") + if pa: + if isinstance(pa, (tuple, list)): + matchExpr.setParseAction(*pa) + else: + matchExpr.setParseAction(pa) + thisExpr <<= ( matchExpr.setName(termName) | lastExpr ) + lastExpr = thisExpr + ret <<= lastExpr + return ret + +operatorPrecedence = infixNotation +"""(Deprecated) Former name of C{L{infixNotation}}, will be dropped in a future release.""" + +dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"').setName("string enclosed in double quotes") +sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("string enclosed in single quotes") +quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"'| + Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("quotedString using single or double quotes") +unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal") + +def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()): + """ + Helper method for defining nested lists enclosed in opening and closing + delimiters ("(" and ")" are the default). + + Parameters: + - opener - opening character for a nested list (default=C{"("}); can also be a pyparsing expression + - closer - closing character for a nested list (default=C{")"}); can also be a pyparsing expression + - content - expression for items within the nested lists (default=C{None}) + - ignoreExpr - expression for ignoring opening and closing delimiters (default=C{quotedString}) + + If an expression is not provided for the content argument, the nested + expression will capture all whitespace-delimited content between delimiters + as a list of separate values. + + Use the C{ignoreExpr} argument to define expressions that may contain + opening or closing characters that should not be treated as opening + or closing characters for nesting, such as quotedString or a comment + expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}. + The default is L{quotedString}, but if no expressions are to be ignored, + then pass C{None} for this argument. + + Example:: + data_type = oneOf("void int short long char float double") + decl_data_type = Combine(data_type + Optional(Word('*'))) + ident = Word(alphas+'_', alphanums+'_') + number = pyparsing_common.number + arg = Group(decl_data_type + ident) + LPAR,RPAR = map(Suppress, "()") + + code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment)) + + c_function = (decl_data_type("type") + + ident("name") + + LPAR + Optional(delimitedList(arg), [])("args") + RPAR + + code_body("body")) + c_function.ignore(cStyleComment) + + source_code = ''' + int is_odd(int x) { + return (x%2); + } + + int dec_to_hex(char hchar) { + if (hchar >= '0' && hchar <= '9') { + return (ord(hchar)-ord('0')); + } else { + return (10+ord(hchar)-ord('A')); + } + } + ''' + for func in c_function.searchString(source_code): + print("%(name)s (%(type)s) args: %(args)s" % func) + + prints:: + is_odd (int) args: [['int', 'x']] + dec_to_hex (int) args: [['char', 'hchar']] + """ + if opener == closer: + raise ValueError("opening and closing strings cannot be the same") + if content is None: + if isinstance(opener,basestring) and isinstance(closer,basestring): + if len(opener) == 1 and len(closer)==1: + if ignoreExpr is not None: + content = (Combine(OneOrMore(~ignoreExpr + + CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1)) + ).setParseAction(lambda t:t[0].strip())) + else: + content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS + ).setParseAction(lambda t:t[0].strip())) + else: + if ignoreExpr is not None: + content = (Combine(OneOrMore(~ignoreExpr + + ~Literal(opener) + ~Literal(closer) + + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1)) + ).setParseAction(lambda t:t[0].strip())) + else: + content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) + + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1)) + ).setParseAction(lambda t:t[0].strip())) + else: + raise ValueError("opening and closing arguments must be strings if no content expression is given") + ret = Forward() + if ignoreExpr is not None: + ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) ) + else: + ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) ) + ret.setName('nested %s%s expression' % (opener,closer)) + return ret + +def indentedBlock(blockStatementExpr, indentStack, indent=True): + """ + Helper method for defining space-delimited indentation blocks, such as + those used to define block statements in Python source code. + + Parameters: + - blockStatementExpr - expression defining syntax of statement that + is repeated within the indented block + - indentStack - list created by caller to manage indentation stack + (multiple statementWithIndentedBlock expressions within a single grammar + should share a common indentStack) + - indent - boolean indicating whether block must be indented beyond the + the current level; set to False for block of left-most statements + (default=C{True}) + + A valid block must contain at least one C{blockStatement}. + + Example:: + data = ''' + def A(z): + A1 + B = 100 + G = A2 + A2 + A3 + B + def BB(a,b,c): + BB1 + def BBA(): + bba1 + bba2 + bba3 + C + D + def spam(x,y): + def eggs(z): + pass + ''' + + + indentStack = [1] + stmt = Forward() + + identifier = Word(alphas, alphanums) + funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":") + func_body = indentedBlock(stmt, indentStack) + funcDef = Group( funcDecl + func_body ) + + rvalue = Forward() + funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")") + rvalue << (funcCall | identifier | Word(nums)) + assignment = Group(identifier + "=" + rvalue) + stmt << ( funcDef | assignment | identifier ) + + module_body = OneOrMore(stmt) + + parseTree = module_body.parseString(data) + parseTree.pprint() + prints:: + [['def', + 'A', + ['(', 'z', ')'], + ':', + [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]], + 'B', + ['def', + 'BB', + ['(', 'a', 'b', 'c', ')'], + ':', + [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]], + 'C', + 'D', + ['def', + 'spam', + ['(', 'x', 'y', ')'], + ':', + [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] + """ + def checkPeerIndent(s,l,t): + if l >= len(s): return + curCol = col(l,s) + if curCol != indentStack[-1]: + if curCol > indentStack[-1]: + raise ParseFatalException(s,l,"illegal nesting") + raise ParseException(s,l,"not a peer entry") + + def checkSubIndent(s,l,t): + curCol = col(l,s) + if curCol > indentStack[-1]: + indentStack.append( curCol ) + else: + raise ParseException(s,l,"not a subentry") + + def checkUnindent(s,l,t): + if l >= len(s): return + curCol = col(l,s) + if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]): + raise ParseException(s,l,"not an unindent") + indentStack.pop() + + NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress()) + INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT') + PEER = Empty().setParseAction(checkPeerIndent).setName('') + UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT') + if indent: + smExpr = Group( Optional(NL) + + #~ FollowedBy(blockStatementExpr) + + INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT) + else: + smExpr = Group( Optional(NL) + + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) ) + blockStatementExpr.ignore(_bslash + LineEnd()) + return smExpr.setName('indented block') + +alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") +punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") + +anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:").setName('any tag')) +_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(),'><& "\'')) +commonHTMLEntity = Regex('&(?P<entity>' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity") +def replaceHTMLEntity(t): + """Helper parser action to replace common HTML entities with their special characters""" + return _htmlEntityMap.get(t.entity) + +# it's easy to get these comment structures wrong - they're very common, so may as well make them available +cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment") +"Comment of the form C{/* ... */}" + +htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment") +"Comment of the form C{<!-- ... -->}" + +restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line") +dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment") +"Comment of the form C{// ... (to end of line)}" + +cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/'| dblSlashComment).setName("C++ style comment") +"Comment of either form C{L{cStyleComment}} or C{L{dblSlashComment}}" + +javaStyleComment = cppStyleComment +"Same as C{L{cppStyleComment}}" + +pythonStyleComment = Regex(r"#.*").setName("Python style comment") +"Comment of the form C{# ... (to end of line)}" + +_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') + + Optional( Word(" \t") + + ~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem") +commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList") +"""(Deprecated) Predefined expression of 1 or more printable words or quoted strings, separated by commas. + This expression is deprecated in favor of L{pyparsing_common.comma_separated_list}.""" + +# some other useful expressions - using lower-case class name since we are really using this as a namespace +class pyparsing_common: + """ + Here are some common low-level expressions that may be useful in jump-starting parser development: + - numeric forms (L{integers<integer>}, L{reals<real>}, L{scientific notation<sci_real>}) + - common L{programming identifiers<identifier>} + - network addresses (L{MAC<mac_address>}, L{IPv4<ipv4_address>}, L{IPv6<ipv6_address>}) + - ISO8601 L{dates<iso8601_date>} and L{datetime<iso8601_datetime>} + - L{UUID<uuid>} + - L{comma-separated list<comma_separated_list>} + Parse actions: + - C{L{convertToInteger}} + - C{L{convertToFloat}} + - C{L{convertToDate}} + - C{L{convertToDatetime}} + - C{L{stripHTMLTags}} + - C{L{upcaseTokens}} + - C{L{downcaseTokens}} + + Example:: + pyparsing_common.number.runTests(''' + # any int or real number, returned as the appropriate type + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + ''') + + pyparsing_common.fnumber.runTests(''' + # any int or real number, returned as float + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + ''') + + pyparsing_common.hex_integer.runTests(''' + # hex numbers + 100 + FF + ''') + + pyparsing_common.fraction.runTests(''' + # fractions + 1/2 + -3/4 + ''') + + pyparsing_common.mixed_integer.runTests(''' + # mixed fractions + 1 + 1/2 + -3/4 + 1-3/4 + ''') + + import uuid + pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) + pyparsing_common.uuid.runTests(''' + # uuid + 12345678-1234-5678-1234-567812345678 + ''') + prints:: + # any int or real number, returned as the appropriate type + 100 + [100] + + -100 + [-100] + + +100 + [100] + + 3.14159 + [3.14159] + + 6.02e23 + [6.02e+23] + + 1e-12 + [1e-12] + + # any int or real number, returned as float + 100 + [100.0] + + -100 + [-100.0] + + +100 + [100.0] + + 3.14159 + [3.14159] + + 6.02e23 + [6.02e+23] + + 1e-12 + [1e-12] + + # hex numbers + 100 + [256] + + FF + [255] + + # fractions + 1/2 + [0.5] + + -3/4 + [-0.75] + + # mixed fractions + 1 + [1] + + 1/2 + [0.5] + + -3/4 + [-0.75] + + 1-3/4 + [1.75] + + # uuid + 12345678-1234-5678-1234-567812345678 + [UUID('12345678-1234-5678-1234-567812345678')] + """ + + convertToInteger = tokenMap(int) + """ + Parse action for converting parsed integers to Python int + """ + + convertToFloat = tokenMap(float) + """ + Parse action for converting parsed numbers to Python float + """ + + integer = Word(nums).setName("integer").setParseAction(convertToInteger) + """expression that parses an unsigned integer, returns an int""" + + hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int,16)) + """expression that parses a hexadecimal integer, returns an int""" + + signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger) + """expression that parses an integer with optional leading sign, returns an int""" + + fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction") + """fractional expression of an integer divided by an integer, returns a float""" + fraction.addParseAction(lambda t: t[0]/t[-1]) + + mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction") + """mixed integer of the form 'integer - fraction', with optional leading integer, returns float""" + mixed_integer.addParseAction(sum) + + real = Regex(r'[+-]?\d+\.\d*').setName("real number").setParseAction(convertToFloat) + """expression that parses a floating point number and returns a float""" + + sci_real = Regex(r'[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat) + """expression that parses a floating point number with optional scientific notation and returns a float""" + + # streamlining this expression makes the docs nicer-looking + number = (sci_real | real | signed_integer).streamline() + """any numeric expression, returns the corresponding Python type""" + + fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat) + """any int or real number, returned as float""" + + identifier = Word(alphas+'_', alphanums+'_').setName("identifier") + """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')""" + + ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address") + "IPv4 address (C{0.0.0.0 - 255.255.255.255})" + + _ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer") + _full_ipv6_address = (_ipv6_part + (':' + _ipv6_part)*7).setName("full IPv6 address") + _short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part)*(0,6)) + "::" + Optional(_ipv6_part + (':' + _ipv6_part)*(0,6))).setName("short IPv6 address") + _short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8) + _mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address") + ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address") + "IPv6 address (long, short, or mixed form)" + + mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address") + "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)" + + @staticmethod + def convertToDate(fmt="%Y-%m-%d"): + """ + Helper to create a parse action for converting parsed date string to Python datetime.date + + Params - + - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"}) + + Example:: + date_expr = pyparsing_common.iso8601_date.copy() + date_expr.setParseAction(pyparsing_common.convertToDate()) + print(date_expr.parseString("1999-12-31")) + prints:: + [datetime.date(1999, 12, 31)] + """ + def cvt_fn(s,l,t): + try: + return datetime.strptime(t[0], fmt).date() + except ValueError as ve: + raise ParseException(s, l, str(ve)) + return cvt_fn + + @staticmethod + def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"): + """ + Helper to create a parse action for converting parsed datetime string to Python datetime.datetime + + Params - + - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"}) + + Example:: + dt_expr = pyparsing_common.iso8601_datetime.copy() + dt_expr.setParseAction(pyparsing_common.convertToDatetime()) + print(dt_expr.parseString("1999-12-31T23:59:59.999")) + prints:: + [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] + """ + def cvt_fn(s,l,t): + try: + return datetime.strptime(t[0], fmt) + except ValueError as ve: + raise ParseException(s, l, str(ve)) + return cvt_fn + + iso8601_date = Regex(r'(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?').setName("ISO8601 date") + "ISO8601 date (C{yyyy-mm-dd})" + + iso8601_datetime = Regex(r'(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime") + "ISO8601 datetime (C{yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)}) - trailing seconds, milliseconds, and timezone optional; accepts separating C{'T'} or C{' '}" + + uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID") + "UUID (C{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx})" + + _html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress() + @staticmethod + def stripHTMLTags(s, l, tokens): + """ + Parse action to remove HTML tags from web page HTML source + + Example:: + # strip HTML links from normal text + text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>' + td,td_end = makeHTMLTags("TD") + table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end + + print(table_text.parseString(text).body) # -> 'More info at the pyparsing wiki page' + """ + return pyparsing_common._html_stripper.transformString(tokens[0]) + + _commasepitem = Combine(OneOrMore(~Literal(",") + ~LineEnd() + Word(printables, excludeChars=',') + + Optional( White(" \t") ) ) ).streamline().setName("commaItem") + comma_separated_list = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("comma separated list") + """Predefined expression of 1 or more printable words or quoted strings, separated by commas.""" + + upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper())) + """Parse action to convert tokens to upper case.""" + + downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower())) + """Parse action to convert tokens to lower case.""" + + +if __name__ == "__main__": + + selectToken = CaselessLiteral("select") + fromToken = CaselessLiteral("from") + + ident = Word(alphas, alphanums + "_$") + + columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) + columnNameList = Group(delimitedList(columnName)).setName("columns") + columnSpec = ('*' | columnNameList) + + tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) + tableNameList = Group(delimitedList(tableName)).setName("tables") + + simpleSQL = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables") + + # demo runTests method, including embedded comments in test string + simpleSQL.runTests(""" + # '*' as column list and dotted table name + select * from SYS.XYZZY + + # caseless match on "SELECT", and casts back to "select" + SELECT * from XYZZY, ABC + + # list of column names, and mixed case SELECT keyword + Select AA,BB,CC from Sys.dual + + # multiple tables + Select A, B, C from Sys.dual, Table2 + + # invalid SELECT keyword - should fail + Xelect A, B, C from Sys.dual + + # incomplete command - should fail + Select + + # invalid column name - should fail + Select ^^^ frox Sys.dual + + """) + + pyparsing_common.number.runTests(""" + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + """) + + # any int or real number, returned as float + pyparsing_common.fnumber.runTests(""" + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + """) + + pyparsing_common.hex_integer.runTests(""" + 100 + FF + """) + + import uuid + pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) + pyparsing_common.uuid.runTests(""" + 12345678-1234-5678-1234-567812345678 + """) diff --git a/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/pyparsing.pyc b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/pyparsing.pyc new file mode 100644 index 0000000..8c93308 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/pyparsing.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/six.py b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/six.py new file mode 100644 index 0000000..190c023 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/six.py @@ -0,0 +1,868 @@ +"""Utilities for writing code that runs on Python 2 and 3""" + +# Copyright (c) 2010-2015 Benjamin Peterson +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from __future__ import absolute_import + +import functools +import itertools +import operator +import sys +import types + +__author__ = "Benjamin Peterson <benjamin@python.org>" +__version__ = "1.10.0" + + +# Useful for very coarse version differentiation. +PY2 = sys.version_info[0] == 2 +PY3 = sys.version_info[0] == 3 +PY34 = sys.version_info[0:2] >= (3, 4) + +if PY3: + string_types = str, + integer_types = int, + class_types = type, + text_type = str + binary_type = bytes + + MAXSIZE = sys.maxsize +else: + string_types = basestring, + integer_types = (int, long) + class_types = (type, types.ClassType) + text_type = unicode + binary_type = str + + if sys.platform.startswith("java"): + # Jython always uses 32 bits. + MAXSIZE = int((1 << 31) - 1) + else: + # It's possible to have sizeof(long) != sizeof(Py_ssize_t). + class X(object): + + def __len__(self): + return 1 << 31 + try: + len(X()) + except OverflowError: + # 32-bit + MAXSIZE = int((1 << 31) - 1) + else: + # 64-bit + MAXSIZE = int((1 << 63) - 1) + del X + + +def _add_doc(func, doc): + """Add documentation to a function.""" + func.__doc__ = doc + + +def _import_module(name): + """Import module, returning the module after the last dot.""" + __import__(name) + return sys.modules[name] + + +class _LazyDescr(object): + + def __init__(self, name): + self.name = name + + def __get__(self, obj, tp): + result = self._resolve() + setattr(obj, self.name, result) # Invokes __set__. + try: + # This is a bit ugly, but it avoids running this again by + # removing this descriptor. + delattr(obj.__class__, self.name) + except AttributeError: + pass + return result + + +class MovedModule(_LazyDescr): + + def __init__(self, name, old, new=None): + super(MovedModule, self).__init__(name) + if PY3: + if new is None: + new = name + self.mod = new + else: + self.mod = old + + def _resolve(self): + return _import_module(self.mod) + + def __getattr__(self, attr): + _module = self._resolve() + value = getattr(_module, attr) + setattr(self, attr, value) + return value + + +class _LazyModule(types.ModuleType): + + def __init__(self, name): + super(_LazyModule, self).__init__(name) + self.__doc__ = self.__class__.__doc__ + + def __dir__(self): + attrs = ["__doc__", "__name__"] + attrs += [attr.name for attr in self._moved_attributes] + return attrs + + # Subclasses should override this + _moved_attributes = [] + + +class MovedAttribute(_LazyDescr): + + def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): + super(MovedAttribute, self).__init__(name) + if PY3: + if new_mod is None: + new_mod = name + self.mod = new_mod + if new_attr is None: + if old_attr is None: + new_attr = name + else: + new_attr = old_attr + self.attr = new_attr + else: + self.mod = old_mod + if old_attr is None: + old_attr = name + self.attr = old_attr + + def _resolve(self): + module = _import_module(self.mod) + return getattr(module, self.attr) + + +class _SixMetaPathImporter(object): + + """ + A meta path importer to import six.moves and its submodules. + + This class implements a PEP302 finder and loader. It should be compatible + with Python 2.5 and all existing versions of Python3 + """ + + def __init__(self, six_module_name): + self.name = six_module_name + self.known_modules = {} + + def _add_module(self, mod, *fullnames): + for fullname in fullnames: + self.known_modules[self.name + "." + fullname] = mod + + def _get_module(self, fullname): + return self.known_modules[self.name + "." + fullname] + + def find_module(self, fullname, path=None): + if fullname in self.known_modules: + return self + return None + + def __get_module(self, fullname): + try: + return self.known_modules[fullname] + except KeyError: + raise ImportError("This loader does not know module " + fullname) + + def load_module(self, fullname): + try: + # in case of a reload + return sys.modules[fullname] + except KeyError: + pass + mod = self.__get_module(fullname) + if isinstance(mod, MovedModule): + mod = mod._resolve() + else: + mod.__loader__ = self + sys.modules[fullname] = mod + return mod + + def is_package(self, fullname): + """ + Return true, if the named module is a package. + + We need this method to get correct spec objects with + Python 3.4 (see PEP451) + """ + return hasattr(self.__get_module(fullname), "__path__") + + def get_code(self, fullname): + """Return None + + Required, if is_package is implemented""" + self.__get_module(fullname) # eventually raises ImportError + return None + get_source = get_code # same as get_code + +_importer = _SixMetaPathImporter(__name__) + + +class _MovedItems(_LazyModule): + + """Lazy loading of moved objects""" + __path__ = [] # mark as package + + +_moved_attributes = [ + MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), + MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), + MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), + MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), + MovedAttribute("intern", "__builtin__", "sys"), + MovedAttribute("map", "itertools", "builtins", "imap", "map"), + MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), + MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), + MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), + MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), + MovedAttribute("reduce", "__builtin__", "functools"), + MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), + MovedAttribute("StringIO", "StringIO", "io"), + MovedAttribute("UserDict", "UserDict", "collections"), + MovedAttribute("UserList", "UserList", "collections"), + MovedAttribute("UserString", "UserString", "collections"), + MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), + MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), + MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), + MovedModule("builtins", "__builtin__"), + MovedModule("configparser", "ConfigParser"), + MovedModule("copyreg", "copy_reg"), + MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), + MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), + MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), + MovedModule("http_cookies", "Cookie", "http.cookies"), + MovedModule("html_entities", "htmlentitydefs", "html.entities"), + MovedModule("html_parser", "HTMLParser", "html.parser"), + MovedModule("http_client", "httplib", "http.client"), + MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), + MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), + MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), + MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), + MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), + MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), + MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), + MovedModule("cPickle", "cPickle", "pickle"), + MovedModule("queue", "Queue"), + MovedModule("reprlib", "repr"), + MovedModule("socketserver", "SocketServer"), + MovedModule("_thread", "thread", "_thread"), + MovedModule("tkinter", "Tkinter"), + MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), + MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), + MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), + MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), + MovedModule("tkinter_tix", "Tix", "tkinter.tix"), + MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), + MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), + MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), + MovedModule("tkinter_colorchooser", "tkColorChooser", + "tkinter.colorchooser"), + MovedModule("tkinter_commondialog", "tkCommonDialog", + "tkinter.commondialog"), + MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), + MovedModule("tkinter_font", "tkFont", "tkinter.font"), + MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), + MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", + "tkinter.simpledialog"), + MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), + MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), + MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), + MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), + MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), + MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), +] +# Add windows specific modules. +if sys.platform == "win32": + _moved_attributes += [ + MovedModule("winreg", "_winreg"), + ] + +for attr in _moved_attributes: + setattr(_MovedItems, attr.name, attr) + if isinstance(attr, MovedModule): + _importer._add_module(attr, "moves." + attr.name) +del attr + +_MovedItems._moved_attributes = _moved_attributes + +moves = _MovedItems(__name__ + ".moves") +_importer._add_module(moves, "moves") + + +class Module_six_moves_urllib_parse(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_parse""" + + +_urllib_parse_moved_attributes = [ + MovedAttribute("ParseResult", "urlparse", "urllib.parse"), + MovedAttribute("SplitResult", "urlparse", "urllib.parse"), + MovedAttribute("parse_qs", "urlparse", "urllib.parse"), + MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), + MovedAttribute("urldefrag", "urlparse", "urllib.parse"), + MovedAttribute("urljoin", "urlparse", "urllib.parse"), + MovedAttribute("urlparse", "urlparse", "urllib.parse"), + MovedAttribute("urlsplit", "urlparse", "urllib.parse"), + MovedAttribute("urlunparse", "urlparse", "urllib.parse"), + MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), + MovedAttribute("quote", "urllib", "urllib.parse"), + MovedAttribute("quote_plus", "urllib", "urllib.parse"), + MovedAttribute("unquote", "urllib", "urllib.parse"), + MovedAttribute("unquote_plus", "urllib", "urllib.parse"), + MovedAttribute("urlencode", "urllib", "urllib.parse"), + MovedAttribute("splitquery", "urllib", "urllib.parse"), + MovedAttribute("splittag", "urllib", "urllib.parse"), + MovedAttribute("splituser", "urllib", "urllib.parse"), + MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), + MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), + MovedAttribute("uses_params", "urlparse", "urllib.parse"), + MovedAttribute("uses_query", "urlparse", "urllib.parse"), + MovedAttribute("uses_relative", "urlparse", "urllib.parse"), +] +for attr in _urllib_parse_moved_attributes: + setattr(Module_six_moves_urllib_parse, attr.name, attr) +del attr + +Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes + +_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), + "moves.urllib_parse", "moves.urllib.parse") + + +class Module_six_moves_urllib_error(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_error""" + + +_urllib_error_moved_attributes = [ + MovedAttribute("URLError", "urllib2", "urllib.error"), + MovedAttribute("HTTPError", "urllib2", "urllib.error"), + MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), +] +for attr in _urllib_error_moved_attributes: + setattr(Module_six_moves_urllib_error, attr.name, attr) +del attr + +Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes + +_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), + "moves.urllib_error", "moves.urllib.error") + + +class Module_six_moves_urllib_request(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_request""" + + +_urllib_request_moved_attributes = [ + MovedAttribute("urlopen", "urllib2", "urllib.request"), + MovedAttribute("install_opener", "urllib2", "urllib.request"), + MovedAttribute("build_opener", "urllib2", "urllib.request"), + MovedAttribute("pathname2url", "urllib", "urllib.request"), + MovedAttribute("url2pathname", "urllib", "urllib.request"), + MovedAttribute("getproxies", "urllib", "urllib.request"), + MovedAttribute("Request", "urllib2", "urllib.request"), + MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), + MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), + MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), + MovedAttribute("BaseHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), + MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), + MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), + MovedAttribute("FileHandler", "urllib2", "urllib.request"), + MovedAttribute("FTPHandler", "urllib2", "urllib.request"), + MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), + MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), + MovedAttribute("urlretrieve", "urllib", "urllib.request"), + MovedAttribute("urlcleanup", "urllib", "urllib.request"), + MovedAttribute("URLopener", "urllib", "urllib.request"), + MovedAttribute("FancyURLopener", "urllib", "urllib.request"), + MovedAttribute("proxy_bypass", "urllib", "urllib.request"), +] +for attr in _urllib_request_moved_attributes: + setattr(Module_six_moves_urllib_request, attr.name, attr) +del attr + +Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes + +_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), + "moves.urllib_request", "moves.urllib.request") + + +class Module_six_moves_urllib_response(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_response""" + + +_urllib_response_moved_attributes = [ + MovedAttribute("addbase", "urllib", "urllib.response"), + MovedAttribute("addclosehook", "urllib", "urllib.response"), + MovedAttribute("addinfo", "urllib", "urllib.response"), + MovedAttribute("addinfourl", "urllib", "urllib.response"), +] +for attr in _urllib_response_moved_attributes: + setattr(Module_six_moves_urllib_response, attr.name, attr) +del attr + +Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes + +_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), + "moves.urllib_response", "moves.urllib.response") + + +class Module_six_moves_urllib_robotparser(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_robotparser""" + + +_urllib_robotparser_moved_attributes = [ + MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), +] +for attr in _urllib_robotparser_moved_attributes: + setattr(Module_six_moves_urllib_robotparser, attr.name, attr) +del attr + +Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes + +_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), + "moves.urllib_robotparser", "moves.urllib.robotparser") + + +class Module_six_moves_urllib(types.ModuleType): + + """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" + __path__ = [] # mark as package + parse = _importer._get_module("moves.urllib_parse") + error = _importer._get_module("moves.urllib_error") + request = _importer._get_module("moves.urllib_request") + response = _importer._get_module("moves.urllib_response") + robotparser = _importer._get_module("moves.urllib_robotparser") + + def __dir__(self): + return ['parse', 'error', 'request', 'response', 'robotparser'] + +_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), + "moves.urllib") + + +def add_move(move): + """Add an item to six.moves.""" + setattr(_MovedItems, move.name, move) + + +def remove_move(name): + """Remove item from six.moves.""" + try: + delattr(_MovedItems, name) + except AttributeError: + try: + del moves.__dict__[name] + except KeyError: + raise AttributeError("no such move, %r" % (name,)) + + +if PY3: + _meth_func = "__func__" + _meth_self = "__self__" + + _func_closure = "__closure__" + _func_code = "__code__" + _func_defaults = "__defaults__" + _func_globals = "__globals__" +else: + _meth_func = "im_func" + _meth_self = "im_self" + + _func_closure = "func_closure" + _func_code = "func_code" + _func_defaults = "func_defaults" + _func_globals = "func_globals" + + +try: + advance_iterator = next +except NameError: + def advance_iterator(it): + return it.next() +next = advance_iterator + + +try: + callable = callable +except NameError: + def callable(obj): + return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) + + +if PY3: + def get_unbound_function(unbound): + return unbound + + create_bound_method = types.MethodType + + def create_unbound_method(func, cls): + return func + + Iterator = object +else: + def get_unbound_function(unbound): + return unbound.im_func + + def create_bound_method(func, obj): + return types.MethodType(func, obj, obj.__class__) + + def create_unbound_method(func, cls): + return types.MethodType(func, None, cls) + + class Iterator(object): + + def next(self): + return type(self).__next__(self) + + callable = callable +_add_doc(get_unbound_function, + """Get the function out of a possibly unbound function""") + + +get_method_function = operator.attrgetter(_meth_func) +get_method_self = operator.attrgetter(_meth_self) +get_function_closure = operator.attrgetter(_func_closure) +get_function_code = operator.attrgetter(_func_code) +get_function_defaults = operator.attrgetter(_func_defaults) +get_function_globals = operator.attrgetter(_func_globals) + + +if PY3: + def iterkeys(d, **kw): + return iter(d.keys(**kw)) + + def itervalues(d, **kw): + return iter(d.values(**kw)) + + def iteritems(d, **kw): + return iter(d.items(**kw)) + + def iterlists(d, **kw): + return iter(d.lists(**kw)) + + viewkeys = operator.methodcaller("keys") + + viewvalues = operator.methodcaller("values") + + viewitems = operator.methodcaller("items") +else: + def iterkeys(d, **kw): + return d.iterkeys(**kw) + + def itervalues(d, **kw): + return d.itervalues(**kw) + + def iteritems(d, **kw): + return d.iteritems(**kw) + + def iterlists(d, **kw): + return d.iterlists(**kw) + + viewkeys = operator.methodcaller("viewkeys") + + viewvalues = operator.methodcaller("viewvalues") + + viewitems = operator.methodcaller("viewitems") + +_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") +_add_doc(itervalues, "Return an iterator over the values of a dictionary.") +_add_doc(iteritems, + "Return an iterator over the (key, value) pairs of a dictionary.") +_add_doc(iterlists, + "Return an iterator over the (key, [values]) pairs of a dictionary.") + + +if PY3: + def b(s): + return s.encode("latin-1") + + def u(s): + return s + unichr = chr + import struct + int2byte = struct.Struct(">B").pack + del struct + byte2int = operator.itemgetter(0) + indexbytes = operator.getitem + iterbytes = iter + import io + StringIO = io.StringIO + BytesIO = io.BytesIO + _assertCountEqual = "assertCountEqual" + if sys.version_info[1] <= 1: + _assertRaisesRegex = "assertRaisesRegexp" + _assertRegex = "assertRegexpMatches" + else: + _assertRaisesRegex = "assertRaisesRegex" + _assertRegex = "assertRegex" +else: + def b(s): + return s + # Workaround for standalone backslash + + def u(s): + return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") + unichr = unichr + int2byte = chr + + def byte2int(bs): + return ord(bs[0]) + + def indexbytes(buf, i): + return ord(buf[i]) + iterbytes = functools.partial(itertools.imap, ord) + import StringIO + StringIO = BytesIO = StringIO.StringIO + _assertCountEqual = "assertItemsEqual" + _assertRaisesRegex = "assertRaisesRegexp" + _assertRegex = "assertRegexpMatches" +_add_doc(b, """Byte literal""") +_add_doc(u, """Text literal""") + + +def assertCountEqual(self, *args, **kwargs): + return getattr(self, _assertCountEqual)(*args, **kwargs) + + +def assertRaisesRegex(self, *args, **kwargs): + return getattr(self, _assertRaisesRegex)(*args, **kwargs) + + +def assertRegex(self, *args, **kwargs): + return getattr(self, _assertRegex)(*args, **kwargs) + + +if PY3: + exec_ = getattr(moves.builtins, "exec") + + def reraise(tp, value, tb=None): + if value is None: + value = tp() + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + +else: + def exec_(_code_, _globs_=None, _locs_=None): + """Execute code in a namespace.""" + if _globs_ is None: + frame = sys._getframe(1) + _globs_ = frame.f_globals + if _locs_ is None: + _locs_ = frame.f_locals + del frame + elif _locs_ is None: + _locs_ = _globs_ + exec("""exec _code_ in _globs_, _locs_""") + + exec_("""def reraise(tp, value, tb=None): + raise tp, value, tb +""") + + +if sys.version_info[:2] == (3, 2): + exec_("""def raise_from(value, from_value): + if from_value is None: + raise value + raise value from from_value +""") +elif sys.version_info[:2] > (3, 2): + exec_("""def raise_from(value, from_value): + raise value from from_value +""") +else: + def raise_from(value, from_value): + raise value + + +print_ = getattr(moves.builtins, "print", None) +if print_ is None: + def print_(*args, **kwargs): + """The new-style print function for Python 2.4 and 2.5.""" + fp = kwargs.pop("file", sys.stdout) + if fp is None: + return + + def write(data): + if not isinstance(data, basestring): + data = str(data) + # If the file has an encoding, encode unicode with it. + if (isinstance(fp, file) and + isinstance(data, unicode) and + fp.encoding is not None): + errors = getattr(fp, "errors", None) + if errors is None: + errors = "strict" + data = data.encode(fp.encoding, errors) + fp.write(data) + want_unicode = False + sep = kwargs.pop("sep", None) + if sep is not None: + if isinstance(sep, unicode): + want_unicode = True + elif not isinstance(sep, str): + raise TypeError("sep must be None or a string") + end = kwargs.pop("end", None) + if end is not None: + if isinstance(end, unicode): + want_unicode = True + elif not isinstance(end, str): + raise TypeError("end must be None or a string") + if kwargs: + raise TypeError("invalid keyword arguments to print()") + if not want_unicode: + for arg in args: + if isinstance(arg, unicode): + want_unicode = True + break + if want_unicode: + newline = unicode("\n") + space = unicode(" ") + else: + newline = "\n" + space = " " + if sep is None: + sep = space + if end is None: + end = newline + for i, arg in enumerate(args): + if i: + write(sep) + write(arg) + write(end) +if sys.version_info[:2] < (3, 3): + _print = print_ + + def print_(*args, **kwargs): + fp = kwargs.get("file", sys.stdout) + flush = kwargs.pop("flush", False) + _print(*args, **kwargs) + if flush and fp is not None: + fp.flush() + +_add_doc(reraise, """Reraise an exception.""") + +if sys.version_info[0:2] < (3, 4): + def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, + updated=functools.WRAPPER_UPDATES): + def wrapper(f): + f = functools.wraps(wrapped, assigned, updated)(f) + f.__wrapped__ = wrapped + return f + return wrapper +else: + wraps = functools.wraps + + +def with_metaclass(meta, *bases): + """Create a base class with a metaclass.""" + # This requires a bit of explanation: the basic idea is to make a dummy + # metaclass for one level of class instantiation that replaces itself with + # the actual metaclass. + class metaclass(meta): + + def __new__(cls, name, this_bases, d): + return meta(name, bases, d) + return type.__new__(metaclass, 'temporary_class', (), {}) + + +def add_metaclass(metaclass): + """Class decorator for creating a class with a metaclass.""" + def wrapper(cls): + orig_vars = cls.__dict__.copy() + slots = orig_vars.get('__slots__') + if slots is not None: + if isinstance(slots, str): + slots = [slots] + for slots_var in slots: + orig_vars.pop(slots_var) + orig_vars.pop('__dict__', None) + orig_vars.pop('__weakref__', None) + return metaclass(cls.__name__, cls.__bases__, orig_vars) + return wrapper + + +def python_2_unicode_compatible(klass): + """ + A decorator that defines __unicode__ and __str__ methods under Python 2. + Under Python 3 it does nothing. + + To support Python 2 and 3 with a single code base, define a __str__ method + returning text and apply this decorator to the class. + """ + if PY2: + if '__str__' not in klass.__dict__: + raise ValueError("@python_2_unicode_compatible cannot be applied " + "to %s because it doesn't define __str__()." % + klass.__name__) + klass.__unicode__ = klass.__str__ + klass.__str__ = lambda self: self.__unicode__().encode('utf-8') + return klass + + +# Complete the moves implementation. +# This code is at the end of this module to speed up module loading. +# Turn this module into a package. +__path__ = [] # required for PEP 302 and PEP 451 +__package__ = __name__ # see PEP 366 @ReservedAssignment +if globals().get("__spec__") is not None: + __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable +# Remove other six meta path importers, since they cause problems. This can +# happen if six is removed from sys.modules and then reloaded. (Setuptools does +# this for some reason.) +if sys.meta_path: + for i, importer in enumerate(sys.meta_path): + # Here's some real nastiness: Another "instance" of the six module might + # be floating around. Therefore, we can't use isinstance() to check for + # the six meta path importer, since the other six instance will have + # inserted an importer with different class. + if (type(importer).__name__ == "_SixMetaPathImporter" and + importer.name == __name__): + del sys.meta_path[i] + break + del i, importer +# Finally, add the importer to the meta path import hook. +sys.meta_path.append(_importer) diff --git a/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/six.pyc b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/six.pyc new file mode 100644 index 0000000..22e8bd0 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pkg_resources/_vendor/six.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pkg_resources/extern/__init__.py b/project/venv/lib/python2.7/site-packages/pkg_resources/extern/__init__.py new file mode 100644 index 0000000..c1eb9e9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pkg_resources/extern/__init__.py @@ -0,0 +1,73 @@ +import sys + + +class VendorImporter: + """ + A PEP 302 meta path importer for finding optionally-vendored + or otherwise naturally-installed packages from root_name. + """ + + def __init__(self, root_name, vendored_names=(), vendor_pkg=None): + self.root_name = root_name + self.vendored_names = set(vendored_names) + self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor') + + @property + def search_path(self): + """ + Search first the vendor package then as a natural package. + """ + yield self.vendor_pkg + '.' + yield '' + + def find_module(self, fullname, path=None): + """ + Return self when fullname starts with root_name and the + target module is one vendored through this importer. + """ + root, base, target = fullname.partition(self.root_name + '.') + if root: + return + if not any(map(target.startswith, self.vendored_names)): + return + return self + + def load_module(self, fullname): + """ + Iterate over the search path to locate and load fullname. + """ + root, base, target = fullname.partition(self.root_name + '.') + for prefix in self.search_path: + try: + extant = prefix + target + __import__(extant) + mod = sys.modules[extant] + sys.modules[fullname] = mod + # mysterious hack: + # Remove the reference to the extant package/module + # on later Python versions to cause relative imports + # in the vendor package to resolve the same modules + # as those going through this importer. + if prefix and sys.version_info > (3, 3): + del sys.modules[extant] + return mod + except ImportError: + pass + else: + raise ImportError( + "The '{target}' package is required; " + "normally this is bundled with this package so if you get " + "this warning, consult the packager of your " + "distribution.".format(**locals()) + ) + + def install(self): + """ + Install this importer into sys.meta_path if not already present. + """ + if self not in sys.meta_path: + sys.meta_path.append(self) + + +names = 'packaging', 'pyparsing', 'six', 'appdirs' +VendorImporter(__name__, names).install() diff --git a/project/venv/lib/python2.7/site-packages/pkg_resources/extern/__init__.pyc b/project/venv/lib/python2.7/site-packages/pkg_resources/extern/__init__.pyc new file mode 100644 index 0000000..0f6f77e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pkg_resources/extern/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/pkg_resources/py31compat.py b/project/venv/lib/python2.7/site-packages/pkg_resources/py31compat.py new file mode 100644 index 0000000..a381c42 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/pkg_resources/py31compat.py @@ -0,0 +1,23 @@ +import os +import errno +import sys + +from .extern import six + + +def _makedirs_31(path, exist_ok=False): + try: + os.makedirs(path) + except OSError as exc: + if not exist_ok or exc.errno != errno.EEXIST: + raise + + +# rely on compatibility behavior until mode considerations +# and exists_ok considerations are disentangled. +# See https://github.com/pypa/setuptools/pull/1083#issuecomment-315168663 +needs_makedirs = ( + six.PY2 or + (3, 4) <= sys.version_info < (3, 4, 1) +) +makedirs = _makedirs_31 if needs_makedirs else os.makedirs diff --git a/project/venv/lib/python2.7/site-packages/pkg_resources/py31compat.pyc b/project/venv/lib/python2.7/site-packages/pkg_resources/py31compat.pyc new file mode 100644 index 0000000..8b5044b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/pkg_resources/py31compat.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy-1.2.1.dist-info/INSTALLER b/project/venv/lib/python2.7/site-packages/scipy-1.2.1.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy-1.2.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/project/venv/lib/python2.7/site-packages/scipy-1.2.1.dist-info/METADATA b/project/venv/lib/python2.7/site-packages/scipy-1.2.1.dist-info/METADATA new file mode 100644 index 0000000..0936b0c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy-1.2.1.dist-info/METADATA @@ -0,0 +1,50 @@ +Metadata-Version: 2.1 +Name: scipy +Version: 1.2.1 +Summary: SciPy: Scientific Library for Python +Home-page: https://www.scipy.org +Maintainer: SciPy Developers +Maintainer-email: scipy-dev@python.org +License: BSD +Download-URL: https://github.com/scipy/scipy/releases +Platform: Windows +Platform: Linux +Platform: Solaris +Platform: Mac OS-X +Platform: Unix +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Science/Research +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Programming Language :: C +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Topic :: Software Development +Classifier: Topic :: Scientific/Engineering +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX +Classifier: Operating System :: Unix +Classifier: Operating System :: MacOS +Requires-Python: >=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.* +Requires-Dist: numpy (>=1.8.2) + +SciPy (pronounced "Sigh Pie") is open-source software for mathematics, +science, and engineering. The SciPy library +depends on NumPy, which provides convenient and fast N-dimensional +array manipulation. The SciPy library is built to work with NumPy +arrays, and provides many user-friendly and efficient numerical +routines such as routines for numerical integration and optimization. +Together, they run on all popular operating systems, are quick to +install, and are free of charge. NumPy and SciPy are easy to use, +but powerful enough to be depended upon by some of the world's +leading scientists and engineers. If you need to manipulate +numbers on a computer and display or publish the results, +give SciPy a try! + + + diff --git a/project/venv/lib/python2.7/site-packages/scipy-1.2.1.dist-info/RECORD b/project/venv/lib/python2.7/site-packages/scipy-1.2.1.dist-info/RECORD new file mode 100644 index 0000000..29c3dec --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy-1.2.1.dist-info/RECORD @@ -0,0 +1,1554 @@ +scipy-1.2.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +scipy-1.2.1.dist-info/METADATA,sha256=7NaRVVbGD7OllwHfbWwRSQolC02ZsF-jt80UBkcNPOM,1983 +scipy-1.2.1.dist-info/RECORD,, +scipy-1.2.1.dist-info/WHEEL,sha256=M5Ujap42zjfAFnpJOoFU72TFHuBKh-JF0Rqu5vZhkVE,110 +scipy-1.2.1.dist-info/top_level.txt,sha256=7wj5qJez-Vf-iL7K4uj9tRbdVCKVxpl7PqdN9UGMfuA,6 +scipy/.libs/libgfortran-ed201abd.so.3.0.0,sha256=xkH_25s9lTuTrEf3fE8GZK68sYSPsIlP3SJ6Qze9aIM,1023960 +scipy/.libs/libopenblasp-r0-382c8f3a.3.5.dev.so,sha256=MgisyqBW3cKbBpB8GwsyuN2JKQUJtVPllGByvSYLck4,29764696 +scipy/HACKING.rst.txt,sha256=vGdtk1NCRso3Az7TaME2GeJBggtbOlBaaCAFzJjneHs,22904 +scipy/INSTALL.rst.txt,sha256=Adb-7OPr19G0MHLSCXjVTtBFem3mTWLPmCzOtLpOL-M,6977 +scipy/LICENSE.txt,sha256=qMROaN0Ydf2rAKUnmFKioDt7cvrjGINIjy75n_pEELo,52947 +scipy/THANKS.txt,sha256=sxdIxpnu8j1iZwgFCrtYGG0hTXMx_dGdznpOcU42kGw,11509 +scipy/__config__.py,sha256=MSkYnRKtv2Ifw3RfI09_9uWjb6sqSHoVga6fk57Kv3k,1123 +scipy/__config__.pyc,, +scipy/__init__.py,sha256=6_EGCsUhXWxG59rJDtrU328Sa7HgLfTBcJ10KrjzyGQ,4260 +scipy/__init__.pyc,, +scipy/_build_utils/__init__.py,sha256=PM6Caqo72W7orUGNs65cR1FZc6iS2kiq0zQXLLTDVNE,714 +scipy/_build_utils/__init__.pyc,, +scipy/_build_utils/_fortran.py,sha256=E4GphPEFtmJuNRHKatDXuXF6Q12pc921Hw3VSK6UfBw,3901 +scipy/_build_utils/_fortran.pyc,, +scipy/_build_utils/system_info.py,sha256=vi6fdBwzWfl71SpYDaZfY9A0FS1sfkjGdj5BF209tyk,5987 +scipy/_build_utils/system_info.pyc,, +scipy/_distributor_init.py,sha256=5z0JS-bSGJ02fbwj9Zdo_416Vemn-f4Kc-Fkc_PPOJA,331 +scipy/_distributor_init.pyc,, +scipy/_lib/__init__.py,sha256=o0ZskBdSonP_HjTKBbIpjuklQFRpZ39hRxyIdm4cBt4,420 +scipy/_lib/__init__.pyc,, +scipy/_lib/_ccallback.py,sha256=pEYflFFDqIZVoPuRLjZbdYEGM8I0hYWxsO4M547DWkg,6197 +scipy/_lib/_ccallback.pyc,, +scipy/_lib/_ccallback_c.so,sha256=MDDtdtQU1T1sochKECusGSBrx3Jk-gIPnlLoibYzSTE,60056 +scipy/_lib/_fpumode.so,sha256=69w_PVBJWPViTcfDw0NxvTEHZkCna8ECgHHAdlo5Z2I,4952 +scipy/_lib/_gcutils.py,sha256=meDF0lzssX9E3LONfXvuymVgWNNZ7SGjXCo3uVpvDsI,2645 +scipy/_lib/_gcutils.pyc,, +scipy/_lib/_numpy_compat.py,sha256=jLjS9gtewJs6q5XGkKecqQ57yoKrRxjk7UdzIsV36ls,30886 +scipy/_lib/_numpy_compat.pyc,, +scipy/_lib/_test_ccallback.so,sha256=kx5A4D7564zvHobDSWSqJFrc_eE9TZ5bxif6n1c2H3c,14352 +scipy/_lib/_testutils.py,sha256=uz231Jfmw0CatQBSq5yLYGmtrpiCtzr1bpMbcZEFixg,3438 +scipy/_lib/_testutils.pyc,, +scipy/_lib/_threadsafety.py,sha256=9mMyHYPRT1TPKjyc5gxHMpqcgsVZbuSGnu5sD0hPwKA,1530 +scipy/_lib/_threadsafety.pyc,, +scipy/_lib/_tmpdirs.py,sha256=U1iDf4GkXF4npE7yhSh0GJ5gF2Zw9C2LG8OAAkpZs6o,2439 +scipy/_lib/_tmpdirs.pyc,, +scipy/_lib/_util.py,sha256=tQbbLvS3sB6pRdTWScASpDat5WJQJhRBd2LM-2Z615A,14110 +scipy/_lib/_util.pyc,, +scipy/_lib/_version.py,sha256=7yEtbUxWwZ4sxAGsu4NneA36NErazjNAKSJTIPku8bs,4793 +scipy/_lib/_version.pyc,, +scipy/_lib/decorator.py,sha256=YFLOY6W_inCyZGU8P6WiDC6iuPTSQ-wFJ1twnP6YczY,16061 +scipy/_lib/decorator.pyc,, +scipy/_lib/messagestream.so,sha256=_7d_mnr4Jw5QVM09lR--iDhLt80PJCpTzeDXZ2ZA2E4,38040 +scipy/_lib/setup.py,sha256=rNn7YHua4FgABT8rUPFhwSWH_tInAAmRfUyaCAOEE_c,1890 +scipy/_lib/setup.pyc,, +scipy/_lib/six.py,sha256=8Jl5Wmng3gMVWuIGd_GdCYxnommVDVv3sh1i9ep-wSk,7418 +scipy/_lib/six.pyc,, +scipy/_lib/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/_lib/tests/__init__.pyc,, +scipy/_lib/tests/test__gcutils.py,sha256=otNdxi2hFkuKyk2fvqYpk3L_gFyoBj7RQUQAPWg7fdQ,3276 +scipy/_lib/tests/test__gcutils.pyc,, +scipy/_lib/tests/test__testutils.py,sha256=7gevubDsV2X2wHq6bjBQ3IBfqn8O5gqz5uw-LfP9wIk,866 +scipy/_lib/tests/test__testutils.pyc,, +scipy/_lib/tests/test__threadsafety.py,sha256=Ol2y8EfE2KjhHVJmioiqnQ2OIyQ7H-6yDmd-DhJEU2k,1388 +scipy/_lib/tests/test__threadsafety.pyc,, +scipy/_lib/tests/test__util.py,sha256=a6U3CUMJijpa5DcDFpExEQuWpdmx-0oeuct7a0KH7Wc,3778 +scipy/_lib/tests/test__util.pyc,, +scipy/_lib/tests/test__version.py,sha256=b-uu3F692AfbjJjUgYq-evfQwUm_X6A0f4UPF0bhoDo,2052 +scipy/_lib/tests/test__version.pyc,, +scipy/_lib/tests/test_ccallback.py,sha256=wDFov5oN_E62JFxmaPkIHR1Xj0-WQIZFBqkDd11HrOg,6061 +scipy/_lib/tests/test_ccallback.pyc,, +scipy/_lib/tests/test_import_cycles.py,sha256=_VZavEZS_-Wfj-JyX7cVksjFxqqqv8GEr6lutBdG8aU,1284 +scipy/_lib/tests/test_import_cycles.pyc,, +scipy/_lib/tests/test_tmpdirs.py,sha256=GRexu0Y3e_0QqEwDndnidntKNiuYFr2kWSvwiRZvrnk,1310 +scipy/_lib/tests/test_tmpdirs.pyc,, +scipy/_lib/tests/test_warnings.py,sha256=kdRLEpNRzm3-oH2nxFzmhd51Ye-gzlQ1z3tR_JYFPL8,4134 +scipy/_lib/tests/test_warnings.pyc,, +scipy/cluster/__init__.py,sha256=fMT1FSKuMKmLfOSWEvJ8lUFi6eqQCvXPbY1VXHQU3CI,938 +scipy/cluster/__init__.pyc,, +scipy/cluster/_hierarchy.so,sha256=xWa4hmeuHDbIRFVIUyraGi5aoIa30YuvTc9_yadGkPU,347424 +scipy/cluster/_optimal_leaf_ordering.so,sha256=jepsYbcx-WOUTcd4j5bns8PMoCSUu-RttSU0kaNU-VM,283168 +scipy/cluster/_vq.so,sha256=tT0FVrRxonYu_Fx-accPxWbn7VfluN6PXt6KwcfADCI,117144 +scipy/cluster/hierarchy.py,sha256=nwUGLwPr81RaMtSVoMGbpat9_1NquO0u8fc9G3S43oI,148538 +scipy/cluster/hierarchy.pyc,, +scipy/cluster/setup.py,sha256=lXB2kVLDoi0n3-OvRTN8p4WOR6wSNIsD6y_PZlsce-c,1061 +scipy/cluster/setup.pyc,, +scipy/cluster/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/cluster/tests/__init__.pyc,, +scipy/cluster/tests/hierarchy_test_data.py,sha256=7syUYdIaDVr7hgvMliX0CW4386utjBJn1DOgX0USXls,6850 +scipy/cluster/tests/hierarchy_test_data.pyc,, +scipy/cluster/tests/test_hierarchy.py,sha256=yipgeI5udO5G8NwCAZPuACqjqFn1X8i0zSLsAB6MWno,41545 +scipy/cluster/tests/test_hierarchy.pyc,, +scipy/cluster/tests/test_vq.py,sha256=UHxom3Rz1T1LOcHfUcvLAfPhG8rpR3j__zqyoBDBMgY,11621 +scipy/cluster/tests/test_vq.pyc,, +scipy/cluster/vq.py,sha256=lL52BY4UrTek6MW1ev8KEOzQiY2bZS6xmlt0lgaBo3k,25948 +scipy/cluster/vq.pyc,, +scipy/conftest.py,sha256=h-IhTUZIj2TDb-t5suNksJsB0GcO6MSv9PMfTFvwDNU,1170 +scipy/conftest.pyc,, +scipy/constants/__init__.py,sha256=ZSrsM4BJWMGQlzhJ4_NhuBPseFsbVbwNYRad_RtVqDo,12194 +scipy/constants/__init__.pyc,, +scipy/constants/codata.py,sha256=5tnyeLdkMCVWjYXWrWtEgbwyljogjZFNXO3audFZkrA,116010 +scipy/constants/codata.pyc,, +scipy/constants/constants.py,sha256=G2JxtfNv1pNTplk3ry6Fw-kv5JE_86iLZ8ER-bVU9uo,8252 +scipy/constants/constants.pyc,, +scipy/constants/setup.py,sha256=a43WvRC7lCvJ9zkBeARc5VmgH-FDqfUYssyT_bNVnWI,414 +scipy/constants/setup.pyc,, +scipy/constants/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/constants/tests/__init__.pyc,, +scipy/constants/tests/test_codata.py,sha256=knMpNDdi3w6Jy1EEH2ZfopiiQp6Lw-bpob3qBapkrEM,1935 +scipy/constants/tests/test_codata.pyc,, +scipy/constants/tests/test_constants.py,sha256=sbRXwcKhXGLDaHagAlfTtSdRUJ6wvTWD3EOygUJ--PQ,1646 +scipy/constants/tests/test_constants.pyc,, +scipy/fftpack/__init__.py,sha256=exiwMPVZ1GNVe8_00aEtjHvxBwj2CcJYfm1rYv9CqHk,3466 +scipy/fftpack/__init__.pyc,, +scipy/fftpack/_fftpack.so,sha256=IThLjjhIp5S8GRPu4adzLFkAu5xXGkdLPUL1Sr0Cb_s,400432 +scipy/fftpack/basic.py,sha256=VjpNgSVeOMsYuEKm4AjHro7aAflhpZfyGmioFAVZJcg,21895 +scipy/fftpack/basic.pyc,, +scipy/fftpack/convolve.so,sha256=NHbGM_UG1fI2ubdnFJoUfhHweAJvkjPiJoIiUoB9bRA,97736 +scipy/fftpack/helper.py,sha256=INt0fyD1fTndKloJLqLYikf4_hBOs6vnfkDW6HzD5gg,9065 +scipy/fftpack/helper.pyc,, +scipy/fftpack/pseudo_diffs.py,sha256=ZUK2ZZIyhvetW-D9o07UtbkBTzY3jW0E6pr6ND8yBOU,14335 +scipy/fftpack/pseudo_diffs.pyc,, +scipy/fftpack/realtransforms.py,sha256=GTxY_rrCAj4XSPJyq49AeCr3jcBDkG00opQio-1rsPs,23900 +scipy/fftpack/realtransforms.pyc,, +scipy/fftpack/setup.py,sha256=VyqEUcpQ87orF8tmvHRE2Y_qvZWoX5mWZnCEh-pOolg,1164 +scipy/fftpack/setup.pyc,, +scipy/fftpack/tests/Makefile,sha256=T1A-tl_FYYCxzSjdza6E0g8ByRAEudtInKdk0SVDI50,214 +scipy/fftpack/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/fftpack/tests/__init__.pyc,, +scipy/fftpack/tests/fftw_dct.c,sha256=JGhPna2aRTiZtZYgfvNKRUina8W5anV62cypdbvBxDU,3454 +scipy/fftpack/tests/fftw_double_ref.npz,sha256=FERj5UxYKVlQniqq72hxNYcGtHuweYWyFFgLz3G-6XI,154380 +scipy/fftpack/tests/fftw_single_ref.npz,sha256=qLPaeZ0ZFLQhIIdt9J7iq5ep23s98FPOiqxc23XjZes,87404 +scipy/fftpack/tests/gen_fftw_ref.py,sha256=fVBt_vsbOPjjZtReJtD-dVwA_akkyV5K2da-1tgOW8Y,1524 +scipy/fftpack/tests/gen_fftw_ref.pyc,, +scipy/fftpack/tests/gendata.m,sha256=Bicu6oaHljL9WJt7OlLa00RxDtIAGcXOcQmfyks73VA,432 +scipy/fftpack/tests/gendata.py,sha256=9o1K2w-eM039r-OqI8QH0FDHc5-avB33ST_7kv4NLng,229 +scipy/fftpack/tests/gendata.pyc,, +scipy/fftpack/tests/test.npz,sha256=Nt6ASiLY_eoFRZDOSd3zyFmDi32JGTxWs7y2YMv0N5c,11968 +scipy/fftpack/tests/test_basic.py,sha256=hZO0-VJKO612NvH0SowRBMlBdmD8j_bMl3zQVsWHx9w,34043 +scipy/fftpack/tests/test_basic.pyc,, +scipy/fftpack/tests/test_helper.py,sha256=KSr06sv1usJ7geStz2u_IKrfWx09IglpscmgJefWswY,14882 +scipy/fftpack/tests/test_helper.pyc,, +scipy/fftpack/tests/test_import.py,sha256=xuGea0P1Pvc4qTxSvJ_o5pHaGQgTQaoka-W3F-AGEFQ,1250 +scipy/fftpack/tests/test_import.pyc,, +scipy/fftpack/tests/test_pseudo_diffs.py,sha256=ydHxsoNnbcr5YAVjKtYBqQERkMh6dXw1JKasHkTuKnU,13511 +scipy/fftpack/tests/test_pseudo_diffs.pyc,, +scipy/fftpack/tests/test_real_transforms.py,sha256=eiLtiWK-w57xF0mLjweWSbbAErPh1BMKpdvgeyFEzDA,24901 +scipy/fftpack/tests/test_real_transforms.pyc,, +scipy/integrate/__init__.py,sha256=0KS4EpUMfNNwxsyAOQMgBSg4BWLiTtCLeB7VeqpbfKE,3725 +scipy/integrate/__init__.pyc,, +scipy/integrate/_bvp.py,sha256=6j8hs8nQo9iceioZFcxxzWsauXG4UwOxVPaoIXpzHNA,39966 +scipy/integrate/_bvp.pyc,, +scipy/integrate/_dop.so,sha256=aTLL45tyfOFo5JoJ4BNSvKD1GDLO6RVGXhAipU8Vh84,126968 +scipy/integrate/_ivp/__init__.py,sha256=qZ8nCDZ4SK4wZLGQh0JKUUU_pw_X2Hvk5BboZYwB6QQ,314 +scipy/integrate/_ivp/__init__.pyc,, +scipy/integrate/_ivp/base.py,sha256=ZMq_GtfON6Zjlu2qUDoOmh89sblupj0ZLF6g5cKAYDc,9643 +scipy/integrate/_ivp/base.pyc,, +scipy/integrate/_ivp/bdf.py,sha256=QEVeq55z7WUUkuMLKklAKwZoode_tLcvGBdrfoZyiaQ,16907 +scipy/integrate/_ivp/bdf.pyc,, +scipy/integrate/_ivp/common.py,sha256=GuZbSC0cDllijab44P423fN7tAW15bdJwVu53XKjz78,14648 +scipy/integrate/_ivp/common.pyc,, +scipy/integrate/_ivp/ivp.py,sha256=qff2K9LxVYE7GACQLdSrkvMcyu3js_vbqD35fdG1Cjc,23115 +scipy/integrate/_ivp/ivp.pyc,, +scipy/integrate/_ivp/lsoda.py,sha256=vB-ro9tb0rwmmAu0BKujbK_h6zg02gdeIE3Cl0YT7zk,8108 +scipy/integrate/_ivp/lsoda.pyc,, +scipy/integrate/_ivp/radau.py,sha256=9YUt54BQGlpUXGEzUVXcOo4NrMvXHlrcI0XJhrDt-tA,19178 +scipy/integrate/_ivp/radau.pyc,, +scipy/integrate/_ivp/rk.py,sha256=bhu2TJIWZuKWVVtvwVa5f7OJeItriW0XMvbhKl0F9p4,14641 +scipy/integrate/_ivp/rk.pyc,, +scipy/integrate/_ode.py,sha256=vKWtCCRWLBX5FR9lyUr5W0KuEG3BbO6_ax7bp-5fyV0,48002 +scipy/integrate/_ode.pyc,, +scipy/integrate/_odepack.so,sha256=KfjvwsmIuaRwXA-XUKOWX1t3BetfJusNTQCgUtXwVCc,104088 +scipy/integrate/_quadpack.so,sha256=FYxPro6_6NbXRQhOrH7Y2QcMBQu3nQvulpq7i18N5eY,124776 +scipy/integrate/_test_multivariate.so,sha256=kfaUJxyTbcta0LOrBVGALa4eGTKQ4nnqx8Iz6VbuQ9k,6888 +scipy/integrate/_test_odeint_banded.so,sha256=XHAmG3FIvHMqBrN3YpI9sm8zGrgy4W_33tRAMKf8X9M,120752 +scipy/integrate/lsoda.so,sha256=ydORCVCtJmPxUta1896DCOhGzEivTp3yzV4dbFH5LCI,116848 +scipy/integrate/odepack.py,sha256=Z1ShZ00REseQf_OjBzJM3y_eKT_Pd-BDDm5AzHjxDR8,10783 +scipy/integrate/odepack.pyc,, +scipy/integrate/quadpack.py,sha256=HCTjKPCVuS5aJwB73asF4elQ2NXHsaGDvqX4aiuxBfM,36158 +scipy/integrate/quadpack.pyc,, +scipy/integrate/quadrature.py,sha256=6r6DisbtpDFFpICP8ENEUqAqwPCifz0y9rWoeljvnYg,30956 +scipy/integrate/quadrature.pyc,, +scipy/integrate/setup.py,sha256=XrJhJ9A7HwVL3A1aN3w5UW6NPlwcq8XpauKhpqXFjXY,3797 +scipy/integrate/setup.pyc,, +scipy/integrate/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/integrate/tests/__init__.pyc,, +scipy/integrate/tests/_test_multivariate.c,sha256=og8rxyAWtKOlSQgWi53s-egQfrGcyr1rvleexb4_13Y,2305 +scipy/integrate/tests/banded5x5.f,sha256=MAFTUt9QraZ_sYyEXu64YsVAYNvGaGp5palgPdbJFLU,6668 +scipy/integrate/tests/test_banded_ode_solvers.py,sha256=nLgN1PhJCzNkzU_I16uv3DlvGkjVGO_fPTOE2R4FfpE,6782 +scipy/integrate/tests/test_banded_ode_solvers.pyc,, +scipy/integrate/tests/test_bvp.py,sha256=kit_L2B7-QlS4DXVSEGo3ay8LkiHxq_8ZRmqymdtQqI,15523 +scipy/integrate/tests/test_bvp.pyc,, +scipy/integrate/tests/test_integrate.py,sha256=Hrgg7r0getx9-IhuaqU6AJUokEiJUp23hly93AlIdSc,24516 +scipy/integrate/tests/test_integrate.pyc,, +scipy/integrate/tests/test_ivp.py,sha256=BdSJztXCbD58Q34SXw9scRoAiTBNVnjs2g31fDw3UEE,25697 +scipy/integrate/tests/test_ivp.pyc,, +scipy/integrate/tests/test_odeint_jac.py,sha256=OzrOna4A18oytqnO8ZZrccZ4IsQC-0YrVtFWOF7qJpI,1821 +scipy/integrate/tests/test_odeint_jac.pyc,, +scipy/integrate/tests/test_quadpack.py,sha256=vGEBZGf7H5C3B7sfI2YsBjsevwHomCKh5lPEpUkZhfk,14086 +scipy/integrate/tests/test_quadpack.pyc,, +scipy/integrate/tests/test_quadrature.py,sha256=TC5Zo_cQ-SyWr2OSBvGZ978QFMRHlwB4YA4bQpfALP8,8515 +scipy/integrate/tests/test_quadrature.pyc,, +scipy/integrate/vode.so,sha256=fpR7g-RqpDQlSFvQkawKr-z-jqEjZ2uTKQ9rMrwziEA,223408 +scipy/interpolate/__init__.py,sha256=dVe1TCEmRwudWhR6W0roRRYMY-bQVqME32kX0YOUZ6A,3336 +scipy/interpolate/__init__.pyc,, +scipy/interpolate/_bspl.so,sha256=aNK0oiuej3R8r-IpvQLSgIKWGTG9Y-WElYiwDo_h3M8,290368 +scipy/interpolate/_bsplines.py,sha256=-Gsr4RnrmXrg7YCccNDkACewVeujggswUyd1J5tp3Z4,34703 +scipy/interpolate/_bsplines.pyc,, +scipy/interpolate/_cubic.py,sha256=8Scar1xXixkLXX4rPyooY55ckLvmevLTmUlWLXHppec,29339 +scipy/interpolate/_cubic.pyc,, +scipy/interpolate/_fitpack.so,sha256=ar_KKQMGKoC_zmzFkvTwlNtAUBp3202htD8HORMPC6A,228016 +scipy/interpolate/_fitpack_impl.py,sha256=4dWvql13U4FZfSS0VpBRnJ_jisL8uFKlRf6_XUfAyhg,46629 +scipy/interpolate/_fitpack_impl.pyc,, +scipy/interpolate/_interpolate.so,sha256=eb1hegz8jPm-wn_p6hRS9jIKv81sPM2_O9sE-vKFtCU,14168 +scipy/interpolate/_pade.py,sha256=ifOu0Ms7oVDYQEGs2wp5_OzeG5TZb4frLAYufc0OtDw,1842 +scipy/interpolate/_pade.pyc,, +scipy/interpolate/_ppoly.so,sha256=leVS9NoDJgbPOy-hxFDPMWbuUIw81UOEAQgtRpS_zBk,364064 +scipy/interpolate/dfitpack.so,sha256=8DQYRpEHA6UCnu_6Y6iKRHTZ_xDiCmqpMQMPgqeoZkE,429736 +scipy/interpolate/fitpack.py,sha256=pojycaT3yhouRTio7YhoZLEvKPYHh_bL-l5V6sAv1_w,25622 +scipy/interpolate/fitpack.pyc,, +scipy/interpolate/fitpack2.py,sha256=UNfjtlOwXLFEz_1gtIjQCVXPdWOsGuQAKJCHXZRFrlQ,62623 +scipy/interpolate/fitpack2.pyc,, +scipy/interpolate/interpnd.so,sha256=6XO9G_JLo9N7ujocIB0j1l2WoNFICQdqAvzmiVCov3A,347744 +scipy/interpolate/interpnd_info.py,sha256=BO1Sae0I2T0lSNt1zEbRTwYhaq23_80Tjo1JMI5zZ4w,935 +scipy/interpolate/interpnd_info.pyc,, +scipy/interpolate/interpolate.py,sha256=ApTPzek4bjKw3w_0SKKgrZEcaa7TxzMdQZER-P8Fnhw,103213 +scipy/interpolate/interpolate.pyc,, +scipy/interpolate/interpolate_wrapper.py,sha256=ztdvwhHMiHDIUc-h9kJRo1yoRd4oq0jWHtoN0XALm1c,5637 +scipy/interpolate/interpolate_wrapper.pyc,, +scipy/interpolate/ndgriddata.py,sha256=ybqa6vjUH1fJKWrabU77TiJYITtUEfcphzLm4JB9iwo,7557 +scipy/interpolate/ndgriddata.pyc,, +scipy/interpolate/polyint.py,sha256=gXCgdK7KanlquPXTFd5Apeze8jHFb50NhoagQpZmvrw,22677 +scipy/interpolate/polyint.pyc,, +scipy/interpolate/rbf.py,sha256=gwrtbZuMGGdUXXDVUQmGNmhRUxdZle-wnN-HrYZmMKE,10649 +scipy/interpolate/rbf.pyc,, +scipy/interpolate/setup.py,sha256=_FeWn1HxqoDY--QjOmzQ9CLIftE0quzBpQAXPgk8ffg,1775 +scipy/interpolate/setup.pyc,, +scipy/interpolate/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/interpolate/tests/__init__.pyc,, +scipy/interpolate/tests/data/bug-1310.npz,sha256=jWgDwLOY8nBMI28dG56OXt4GvRZaCrsPIoKBq71FWuk,2648 +scipy/interpolate/tests/data/estimate_gradients_hang.npy,sha256=QGwQhXQX_16pjYzSiUXJ0OT1wk-SpIrQ6Pq5Vb8kd_E,35680 +scipy/interpolate/tests/test_bsplines.py,sha256=PbVbAHLzz8GVYlGLybFn2kkEHTN3vVLEkeafgyQpCtQ,43433 +scipy/interpolate/tests/test_bsplines.pyc,, +scipy/interpolate/tests/test_fitpack.py,sha256=B-xe_dr5NMnYdoK_CEmo0WoDBDAMYRQiVqIF1fpMjeQ,15120 +scipy/interpolate/tests/test_fitpack.pyc,, +scipy/interpolate/tests/test_fitpack2.py,sha256=cukE9lNVHwFtrNN7n0NlaasnsvXA-0gpq6Th8Ey3yDg,21199 +scipy/interpolate/tests/test_fitpack2.pyc,, +scipy/interpolate/tests/test_gil.py,sha256=wCboR9YcidTUnZDdZ2oU_8K33W0xiXZ_nhrpCt7Ct9k,1948 +scipy/interpolate/tests/test_gil.pyc,, +scipy/interpolate/tests/test_interpnd.py,sha256=nK48OFVOcqH7d160KjunmcENp1o9TVz_R87HA-1c_ts,13681 +scipy/interpolate/tests/test_interpnd.pyc,, +scipy/interpolate/tests/test_interpolate.py,sha256=goQr8IUh_C-9eyXnj8g0BSmKkkTS33ZhHzUQluzQrQw,104736 +scipy/interpolate/tests/test_interpolate.pyc,, +scipy/interpolate/tests/test_interpolate_wrapper.py,sha256=tVdpy0LC3YnUPjiQphWinYJx49XxtGvNbP-oKRes5UU,2850 +scipy/interpolate/tests/test_interpolate_wrapper.pyc,, +scipy/interpolate/tests/test_ndgriddata.py,sha256=q7A68pye35wmQYlTTAbp0zFZVLIbtvLmjS105_u51aY,7208 +scipy/interpolate/tests/test_ndgriddata.pyc,, +scipy/interpolate/tests/test_pade.py,sha256=Id1sTLD1yVh_6_d1jamjLPMC4ikJWiTg79R7xYymlw4,2276 +scipy/interpolate/tests/test_pade.pyc,, +scipy/interpolate/tests/test_polyint.py,sha256=oJrbi7edpeilmhoXCEt9xshU-dBqA4IX-ghVUXkJYIU,24315 +scipy/interpolate/tests/test_polyint.pyc,, +scipy/interpolate/tests/test_rbf.py,sha256=1ZGrhOj67RLv7tpJ8az5z9_pp7xmzKaj5MULjUErE_E,4505 +scipy/interpolate/tests/test_rbf.pyc,, +scipy/interpolate/tests/test_regression.py,sha256=Qap3tGIRSRNqZlLynjM66lls4vFjMC7CJzfADUmtgLY,484 +scipy/interpolate/tests/test_regression.pyc,, +scipy/io/__init__.py,sha256=WCOa9V2IP9VjRX65niAlnGar2M3eF-L7oQh1da3UlGs,2418 +scipy/io/__init__.pyc,, +scipy/io/_fortran.py,sha256=tjljm6lVBh_yc0vlwuZs60Ad8cpMYYm9nxI7y19f6G4,9737 +scipy/io/_fortran.pyc,, +scipy/io/_test_fortran.so,sha256=1w4ostNSa7nsHb49V-YValXS3-5x4xHaMGqy7efNJe0,44216 +scipy/io/arff/__init__.py,sha256=qKSXt9_QOUv1pdo79NXN40lr63cwe4lvpFdh3NVOc7M,761 +scipy/io/arff/__init__.pyc,, +scipy/io/arff/arffread.py,sha256=xbiNfUH6-E7rqikIyEZEX7qJgZ_wldEAP2UBRbIM2nQ,19810 +scipy/io/arff/arffread.pyc,, +scipy/io/arff/setup.py,sha256=MbkeqgJjldP5oveWtVbk1pMkdnxAqEw7Rv5wTQt0r30,410 +scipy/io/arff/setup.pyc,, +scipy/io/arff/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/io/arff/tests/__init__.pyc,, +scipy/io/arff/tests/data/iris.arff,sha256=fTS6VWSX6dwoM16mYoo30dvLoJChriDcLenHAy0ZkVM,7486 +scipy/io/arff/tests/data/missing.arff,sha256=ga__Te95i1Yf-yu2kmYDBVTz0xpSTemz7jS74_OfI4I,120 +scipy/io/arff/tests/data/nodata.arff,sha256=DBXdnIe28vrbf4C-ar7ZgeFIa0kGD4pDBJ4YP-z4QHQ,229 +scipy/io/arff/tests/data/test1.arff,sha256=nUFDXUbV3sIkur55rL4qvvBdqUTbzSRrTiIPwmtmG8I,191 +scipy/io/arff/tests/data/test2.arff,sha256=COGWCYV9peOGLqlYWhqG4ANT2UqlAtoVehbJLW6fxHw,300 +scipy/io/arff/tests/data/test3.arff,sha256=jUTWGaZbzoeGBneCmKu6V6RwsRPp9_0sJaSCdBg6tyI,72 +scipy/io/arff/tests/data/test4.arff,sha256=mtyuSFKUeiRR2o3mNlwvDCxWq4DsHEBHj_8IthNzp-M,238 +scipy/io/arff/tests/data/test5.arff,sha256=2Q_prOBCfM_ggsGRavlOaJ_qnWPFf2akFXJFz0NtTIE,365 +scipy/io/arff/tests/data/test6.arff,sha256=V8FNv-WUdurutFXKTOq8DADtNDrzfW65gyOlv-lquOU,195 +scipy/io/arff/tests/data/test7.arff,sha256=rxsqdev8WeqC_nKJNwetjVYXA1-qCzWmaHlMvSaVRGk,559 +scipy/io/arff/tests/data/test8.arff,sha256=c34srlkU8hkXYpdKXVozEutiPryR8bf_5qEmiGQBoG4,429 +scipy/io/arff/tests/test_arffread.py,sha256=E7F3P0jf-4dLIQmXB11ciFh5u91A2x6V263Y4QHzoWQ,8190 +scipy/io/arff/tests/test_arffread.pyc,, +scipy/io/harwell_boeing/__init__.py,sha256=vDJktGQnCwtqpuiOJw_5u-slvBdzDPOqGZV_bHqrSRE,176 +scipy/io/harwell_boeing/__init__.pyc,, +scipy/io/harwell_boeing/_fortran_format_parser.py,sha256=cFMQQboYhufQBfF36yUk2-DWQre680L55DRVBn15nwE,9050 +scipy/io/harwell_boeing/_fortran_format_parser.pyc,, +scipy/io/harwell_boeing/hb.py,sha256=EieHlLeH10y_JHMyAtcIjPIeqIh1xBcGj3oYs3bzilI,18472 +scipy/io/harwell_boeing/hb.pyc,, +scipy/io/harwell_boeing/setup.py,sha256=75t6NXDoNXJ1KPpegNtNeorydkWXOXJi0QZDyHy06rM,417 +scipy/io/harwell_boeing/setup.pyc,, +scipy/io/harwell_boeing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/io/harwell_boeing/tests/__init__.pyc,, +scipy/io/harwell_boeing/tests/test_fortran_format.py,sha256=UU6FuoGYTi4TAheOgc3VkEuJSk31fZT_2NDca3mXyoA,2495 +scipy/io/harwell_boeing/tests/test_fortran_format.pyc,, +scipy/io/harwell_boeing/tests/test_hb.py,sha256=2w5w4thwxy9pXjZ_qIoVJ0yThU1RRh0nTuTqF8qYGVk,2450 +scipy/io/harwell_boeing/tests/test_hb.pyc,, +scipy/io/idl.py,sha256=nx9CL5xF8UNcFWoDfGJDrG3Bkvcltr-rZpFtqTikdhI,25837 +scipy/io/idl.pyc,, +scipy/io/matlab/__init__.py,sha256=kHZtgH_dvzg07EADfZjZhQNPjdPc_0dOeT7REnYFmps,508 +scipy/io/matlab/__init__.pyc,, +scipy/io/matlab/byteordercodes.py,sha256=YhGuoeJbXWMGBrF5E9y_2O741XZKonPbywaLicZInic,1874 +scipy/io/matlab/byteordercodes.pyc,, +scipy/io/matlab/mio.py,sha256=1leFxs8aHF90vG2rTr9XIk_3suIXRDENi8sdMfJi4D0,11475 +scipy/io/matlab/mio.pyc,, +scipy/io/matlab/mio4.py,sha256=jdH2qFTma5VOEnj_u7msxOybazIizyu_ZCAvlqmTCJQ,20384 +scipy/io/matlab/mio4.pyc,, +scipy/io/matlab/mio5.py,sha256=mrgtuDcj1X39e-XCEkn0TrVvzIqLkRNs63koDd72qsw,31898 +scipy/io/matlab/mio5.pyc,, +scipy/io/matlab/mio5_params.py,sha256=kFHM4aZ6d7QTqi-UNkmFGoAaqVuWsGa-rdsdVVLw5zE,7079 +scipy/io/matlab/mio5_params.pyc,, +scipy/io/matlab/mio5_utils.so,sha256=0uCoPiylRVBT4LE1d9nyOWeHAD-bREwLvs4qWXhnMAw,207232 +scipy/io/matlab/mio_utils.so,sha256=TmoAwNDp75sCnmpPkb3xk3iWwWGqXCbwa5mao5wO8jo,33752 +scipy/io/matlab/miobase.py,sha256=XchsiO-SQgj953J0WPvJrBjTuRXcwTrqBoRAcagQYrk,12090 +scipy/io/matlab/miobase.pyc,, +scipy/io/matlab/setup.py,sha256=qBuyTy3KQMAml29tlGTXCTwBZXzmAbOfn_vRPejscLU,599 +scipy/io/matlab/setup.pyc,, +scipy/io/matlab/streams.so,sha256=U3cKT9aafqunbJxlTLgS_1Co8p8WsSPlV2mFBW4cqeo,111960 +scipy/io/matlab/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/io/matlab/tests/__init__.pyc,, +scipy/io/matlab/tests/afunc.m,sha256=DCXqL9FVIu9pnE8Lpmap1b2f5qwX0AfhGEPSyb6sxno,66 +scipy/io/matlab/tests/data/bad_miuint32.mat,sha256=CVkYHp_U4jxYKRRHSuZ5fREop4tJjnZcQ02DKfObkRA,272 +scipy/io/matlab/tests/data/bad_miutf8_array_name.mat,sha256=V-jfVMkYyy8qRGcOIsNGcoO0GCgTxchrsQUBGBnfWHE,208 +scipy/io/matlab/tests/data/big_endian.mat,sha256=2ttpiaH2B6nmHnq-gsFeMvZ2ZSLOlpzt0IJiqBTcc8M,273 +scipy/io/matlab/tests/data/broken_utf8.mat,sha256=nm8aotRl6NIxlM3IgPegKR3EeevYZoJCrYpV4Sa1T5I,216 +scipy/io/matlab/tests/data/corrupted_zlib_checksum.mat,sha256=X4dvE7K9DmGEF3D6I-48hC86W41jB54H7bD8KTXjtYA,276 +scipy/io/matlab/tests/data/corrupted_zlib_data.mat,sha256=DfE1YBH-pYw-dAaEeKA6wZcyKeo9GlEfrzZtql-fO_w,3451 +scipy/io/matlab/tests/data/japanese_utf8.txt,sha256=rgxiBH7xmEKF91ZkB3oMLrqABBXINEMHPXDKdZXNBEY,270 +scipy/io/matlab/tests/data/little_endian.mat,sha256=FQP_2MNod-FFF-JefN7ZxovQ6QLCdHQ0DPL_qBCP44Y,265 +scipy/io/matlab/tests/data/logical_sparse.mat,sha256=qujUUpYewaNsFKAwGpYS05z7kdUv9TQZTHV5_lWhRrs,208 +scipy/io/matlab/tests/data/malformed1.mat,sha256=DTuTr1-IzpLMBf8u5DPb3HXmw9xJo1aWfayA5S_3zUI,2208 +scipy/io/matlab/tests/data/miuint32_for_miint32.mat,sha256=romrBP_BS46Sl2-pKWsUnxYDad2wehyjq4wwLaVqums,272 +scipy/io/matlab/tests/data/miutf8_array_name.mat,sha256=Vo8JptFr-Kg2f2cEoDg8LtELSjVNyccdJY74WP_kqtc,208 +scipy/io/matlab/tests/data/nasty_duplicate_fieldnames.mat,sha256=bvdmj6zDDUIpOfIP8J4Klo107RYCDd5VK5gtOYx3GsU,8168 +scipy/io/matlab/tests/data/one_by_zero_char.mat,sha256=Z3QdZjTlOojjUpS0cfBP4XfNQI3GTjqU0n_pnAzgQhU,184 +scipy/io/matlab/tests/data/parabola.mat,sha256=ENWuWX_uwo4Av16dIGOwnbMReAMrShDhalkq8QUI8Rg,729 +scipy/io/matlab/tests/data/single_empty_string.mat,sha256=4uTmX0oydTjmtnhxqi9SyPWCG2I24gj_5LarS80bPik,171 +scipy/io/matlab/tests/data/some_functions.mat,sha256=JA736oG3s8PPdKhdsYK-BndLUsGrJCJAIRBseSIEZtM,1397 +scipy/io/matlab/tests/data/sqr.mat,sha256=3DtGl_V4wABKCDQ0P3He5qfOzpUTC-mINdK73MKS7AM,679 +scipy/io/matlab/tests/data/test3dmatrix_6.1_SOL2.mat,sha256=-odiBIQAbOLERg0Vg682QHGfs7C8MaA_gY77OWR8x78,232 +scipy/io/matlab/tests/data/test3dmatrix_6.5.1_GLNX86.mat,sha256=G5siwvZ-7Uv5KJ6h7AA3OHL6eiFsd8Lnjx4IcoByzCU,232 +scipy/io/matlab/tests/data/test3dmatrix_7.1_GLNX86.mat,sha256=EVj1wPnoyWGIdTpkSj3YAwqzTAm27eqZNxCaJAs3pwU,213 +scipy/io/matlab/tests/data/test3dmatrix_7.4_GLNX86.mat,sha256=S_Sd3sxorDd8tZ5CxD5_J8vXbfcksLWzhUQY5b82L9g,213 +scipy/io/matlab/tests/data/test_empty_struct.mat,sha256=WoC7g7TyXqNr2T0d5xE3IUq5PRzatE0mxXjqoHX5Xec,173 +scipy/io/matlab/tests/data/test_mat4_le_floats.mat,sha256=2xvn3Cg4039shJl62T-bH-VeVP_bKtwdqvGfIxv8FJ4,38 +scipy/io/matlab/tests/data/test_skip_variable.mat,sha256=pJLVpdrdEb-9SMZxaDu-uryShlIi90l5LfXhvpVipJ0,20225 +scipy/io/matlab/tests/data/testbool_8_WIN64.mat,sha256=_xBw_2oZA7u9Xs6GJItUpSIEV4jVdfdcwzmLNFWM6ow,185 +scipy/io/matlab/tests/data/testcell_6.1_SOL2.mat,sha256=OWOBzNpWTyAHIcZABRytVMcABiRYgEoMyF9gDaIkFe4,536 +scipy/io/matlab/tests/data/testcell_6.5.1_GLNX86.mat,sha256=7111TN_sh1uMHmYx-bjd_v9uaAnWhJMhrQFAtAw6Nvk,536 +scipy/io/matlab/tests/data/testcell_7.1_GLNX86.mat,sha256=62p6LRW6PbM-Y16aUeGVhclTVqS5IxPUtsohe7MjrYo,283 +scipy/io/matlab/tests/data/testcell_7.4_GLNX86.mat,sha256=NkTA8UW98hIQ0t5hGx_leG-MzNroDelYwqx8MPnO63Q,283 +scipy/io/matlab/tests/data/testcellnest_6.1_SOL2.mat,sha256=AeNaog8HUDCVrIuGICAXYu9SGDsvV6qeGjgvWHrVQho,568 +scipy/io/matlab/tests/data/testcellnest_6.5.1_GLNX86.mat,sha256=Gl4QA0yYwGxjiajjgWS939WVAM-W2ahNIm9wwMaT5oc,568 +scipy/io/matlab/tests/data/testcellnest_7.1_GLNX86.mat,sha256=CUGtkwIU9CBa0Slx13mbaM67_ec0p-unZdu8Z4YYM3c,228 +scipy/io/matlab/tests/data/testcellnest_7.4_GLNX86.mat,sha256=TeTk5yjl5j_bcnmIkpzuYHxGGQXNu-rK6xOsN4t6lX8,228 +scipy/io/matlab/tests/data/testcomplex_4.2c_SOL2.mat,sha256=WOwauWInSVUFBuOJ1Bo3spmUQ3UWUIlsIe4tYGlrU7o,176 +scipy/io/matlab/tests/data/testcomplex_6.1_SOL2.mat,sha256=GpAEccizI8WvlrBPdvlKUv6uKbZOo_cjUK3WVVb2lo4,352 +scipy/io/matlab/tests/data/testcomplex_6.5.1_GLNX86.mat,sha256=3MEbf0zJdQGAO7x-pzFCup2QptfYJHQG59z0vVOdxl4,352 +scipy/io/matlab/tests/data/testcomplex_7.1_GLNX86.mat,sha256=VNHV2AIEkvPuhae1kKIqt5t8AMgUyr0L_CAp-ykLxt4,247 +scipy/io/matlab/tests/data/testcomplex_7.4_GLNX86.mat,sha256=8rWGf5bqY7_2mcd5w5gTYgMkXVePlLL8qT7lh8kApn0,247 +scipy/io/matlab/tests/data/testdouble_4.2c_SOL2.mat,sha256=MzT7OYPEUXHYNPBrVkyKEaG5Cas2aOA0xvrO7l4YTrQ,103 +scipy/io/matlab/tests/data/testdouble_6.1_SOL2.mat,sha256=DpB-mVKx1gsjl-3IbxfxHNuzU5dnuku-MDQCA8kALVI,272 +scipy/io/matlab/tests/data/testdouble_6.5.1_GLNX86.mat,sha256=4hY5VEubavNEv5KvcqQnd7MWWvFUzHXXpYIqUuUt-50,272 +scipy/io/matlab/tests/data/testdouble_7.1_GLNX86.mat,sha256=N2QOOIXPyy0zPZZ_qY7xIDaodMGrTq3oXNBEHZEscw0,232 +scipy/io/matlab/tests/data/testdouble_7.4_GLNX86.mat,sha256=TrkJ4Xx_dC9YrPdewlsOvYs_xag7gT3cN4HkDsJmT8I,232 +scipy/io/matlab/tests/data/testemptycell_5.3_SOL2.mat,sha256=g96Vh9FpNhkiWKsRm4U6KqeKd1hNAEyYSD7IVzdzwsU,472 +scipy/io/matlab/tests/data/testemptycell_6.5.1_GLNX86.mat,sha256=2Zw-cMv-Mjbs2HkSl0ubmh_htFUEpkn7XVHG8iM32o0,472 +scipy/io/matlab/tests/data/testemptycell_7.1_GLNX86.mat,sha256=t5Ar8EgjZ7fkTUHIVpdXg-yYWo_MBaigMDJUGWEIrmU,218 +scipy/io/matlab/tests/data/testemptycell_7.4_GLNX86.mat,sha256=5PPvfOoL-_Q5ou_2nIzIrHgeaOZGFXGxAFdYzCQuwEQ,218 +scipy/io/matlab/tests/data/testfunc_7.4_GLNX86.mat,sha256=ScTKftENe78imbMc0I5ouBlIMcEEmZgu8HVKWAMNr58,381 +scipy/io/matlab/tests/data/testhdf5_7.4_GLNX86.mat,sha256=ZoVbGk38_MCppZ0LRr6OE07HL8ZB4rHXgMj9LwUBgGg,4168 +scipy/io/matlab/tests/data/testmatrix_4.2c_SOL2.mat,sha256=14YMiKAN9JCPTqSDXxa58BK6Un7EM4hEoSGAUuwKWGQ,151 +scipy/io/matlab/tests/data/testmatrix_6.1_SOL2.mat,sha256=ZdjNbcIE75V5Aht5EVBvJX26aabvNqbUH0Q9VBnxBS4,216 +scipy/io/matlab/tests/data/testmatrix_6.5.1_GLNX86.mat,sha256=OB82QgB6SwtsxT4t453OVSj-B777XrHGEGOMgMD1XGc,216 +scipy/io/matlab/tests/data/testmatrix_7.1_GLNX86.mat,sha256=-TYB0kREY7i7gt5x15fOYjXi410pXuDWUFxPYuMwywI,193 +scipy/io/matlab/tests/data/testmatrix_7.4_GLNX86.mat,sha256=l9psDc5K1bpxNeuFlyYIYauswLnOB6dTX6-jvelW0kU,193 +scipy/io/matlab/tests/data/testminus_4.2c_SOL2.mat,sha256=2914WYQajPc9-Guy3jDOLU3YkuE4OXC_63FUSDzJzX0,38 +scipy/io/matlab/tests/data/testminus_6.1_SOL2.mat,sha256=2X2fZKomz0ktBvibj7jvHbEvt2HRA8D6hN9qA1IDicw,200 +scipy/io/matlab/tests/data/testminus_6.5.1_GLNX86.mat,sha256=i364SgUCLSYRjQsyygvY1ArjEaO5uLip3HyU-R7zaLo,200 +scipy/io/matlab/tests/data/testminus_7.1_GLNX86.mat,sha256=gtYNC9_TciYdq8X9IwyGEjiw2f1uCVTGgiOPFOiQbJc,184 +scipy/io/matlab/tests/data/testminus_7.4_GLNX86.mat,sha256=eXcoTM8vKuh4tQnl92lwdDaqssGB6G9boSHh3FOCkng,184 +scipy/io/matlab/tests/data/testmulti_4.2c_SOL2.mat,sha256=Zhyu2KCsseSJ5NARdS00uwddCs4wmjcWNP2LJFns2-Q,240 +scipy/io/matlab/tests/data/testmulti_7.1_GLNX86.mat,sha256=KI3H58BVj6k6MFsj8icSbjy_0Z-jOesWN5cafStLPG8,276 +scipy/io/matlab/tests/data/testmulti_7.4_GLNX86.mat,sha256=Yr4YKCP27yMWlK5UOK3BAEOAyMr-m0yYGcj8v1tCx-I,276 +scipy/io/matlab/tests/data/testobject_6.1_SOL2.mat,sha256=kzLxy_1o1HclPXWyA-SX5gl6LsG1ioHuN4eS6x5iZio,800 +scipy/io/matlab/tests/data/testobject_6.5.1_GLNX86.mat,sha256=dq_6_n0v7cUz9YziXn-gZFNc9xYtNxZ8exTsziWIM7s,672 +scipy/io/matlab/tests/data/testobject_7.1_GLNX86.mat,sha256=3z-boFw0SC5142YPOLo2JqdusPItVzjCFMhXAQNaQUQ,306 +scipy/io/matlab/tests/data/testobject_7.4_GLNX86.mat,sha256=5OwLTMgCBlxsDfiEUzlVjqcSbVQG-X5mIw5JfW3wQXA,306 +scipy/io/matlab/tests/data/testonechar_4.2c_SOL2.mat,sha256=BCvppGhO19-j-vxAvbdsORIiyuJqzCuQog9Ao8V1lvA,40 +scipy/io/matlab/tests/data/testonechar_6.1_SOL2.mat,sha256=ThppTHGJFrUfal5tewS70DL00dSwk1otazuVdJrTioE,200 +scipy/io/matlab/tests/data/testonechar_6.5.1_GLNX86.mat,sha256=SBfN6e7Vz1rAdi8HLguYXcHUHk1viaXTYccdEyhhob4,200 +scipy/io/matlab/tests/data/testonechar_7.1_GLNX86.mat,sha256=m8W9GqvflfAsizkhgAfT0lLcxuegZIWCLNuHVX69Jac,184 +scipy/io/matlab/tests/data/testonechar_7.4_GLNX86.mat,sha256=t9ObKZOLy3vufnER8TlvQcUkd_wmXbJSdQoG4f3rVKY,184 +scipy/io/matlab/tests/data/testscalarcell_7.4_GLNX86.mat,sha256=5LX9sLH7Y6h_N_a1XRN2GuMgp_P7ECpPsXGDOypAJg0,194 +scipy/io/matlab/tests/data/testsparse_4.2c_SOL2.mat,sha256=dFUcB1gunfWqexgR4YDZ_Ec0w0HffM1DUE1C5PVfDDc,223 +scipy/io/matlab/tests/data/testsparse_6.1_SOL2.mat,sha256=9Sgd_SPkGNim7ZL0xgD71qml3DK0yDHYC7VSNLNQEXA,280 +scipy/io/matlab/tests/data/testsparse_6.5.1_GLNX86.mat,sha256=jp1ILNxLyV6XmCCGxAz529XoZ9dhCqGEO-ExPH70_Pg,328 +scipy/io/matlab/tests/data/testsparse_7.1_GLNX86.mat,sha256=k8QuQ_4Zu7FWTzHjRnHCVZ9Yu5vwNP0WyNzu6TuiY-4,229 +scipy/io/matlab/tests/data/testsparse_7.4_GLNX86.mat,sha256=QbZOCqIvnaK0XOH3kaSXBe-m_1_Rb33psq8E-WMSBTU,229 +scipy/io/matlab/tests/data/testsparsecomplex_4.2c_SOL2.mat,sha256=QMVoBXVyl9RBGvAjLoiW85kAXYJ-hHprUMegEG69A5w,294 +scipy/io/matlab/tests/data/testsparsecomplex_6.1_SOL2.mat,sha256=WfEroAT5YF4HGAKq3jTJxlFrKaTCh3rwlSlKu__VjwA,304 +scipy/io/matlab/tests/data/testsparsecomplex_6.5.1_GLNX86.mat,sha256=e0s6cyoKJeYMArdceHpnKDvtCVcw7XuB44OBDHpoa6U,400 +scipy/io/matlab/tests/data/testsparsecomplex_7.1_GLNX86.mat,sha256=kgHcuq-deI2y8hfkGwlMOkW7lntexdPHfuz0ar6b3jo,241 +scipy/io/matlab/tests/data/testsparsecomplex_7.4_GLNX86.mat,sha256=rYCaWNLXK7f_jjMc6_UvZz6ZDuMCuVRmJV5RyeXiDm8,241 +scipy/io/matlab/tests/data/testsparsefloat_7.4_GLNX86.mat,sha256=hnNV6GZazEeqTXuA9vcOUo4xam_UnKRYGYH9PUGTLv8,219 +scipy/io/matlab/tests/data/teststring_4.2c_SOL2.mat,sha256=cAhec51DlqIYfDXXGaumOE3Hqb3cFWM1UsUK3K_lDP8,375 +scipy/io/matlab/tests/data/teststring_6.1_SOL2.mat,sha256=ciFzNGMO7gjYecony-E8vtOwBY4vXIUhyug6Euaz3Kg,288 +scipy/io/matlab/tests/data/teststring_6.5.1_GLNX86.mat,sha256=yrJrpLiwLvU_LI1D6rw1Pk1qJK1YlC7Cmw7lwyJVLtw,288 +scipy/io/matlab/tests/data/teststring_7.1_GLNX86.mat,sha256=zo7sh-8dMpGqhoNxLEnfz3Oc7RonxiY5j0B3lxk0e8o,224 +scipy/io/matlab/tests/data/teststring_7.4_GLNX86.mat,sha256=igL_CvtAcNEa1nxunDjQZY5wS0rJOlzsUkBiDreJssk,224 +scipy/io/matlab/tests/data/teststringarray_4.2c_SOL2.mat,sha256=pRldk-R0ig1k3ouvaR9oVtBwZsQcDW_b4RBEDYu1-Vk,156 +scipy/io/matlab/tests/data/teststringarray_6.1_SOL2.mat,sha256=B9IdaSsyb0wxjyYyHOj_GDO0laAeWDEJhoEhC9xdm1E,232 +scipy/io/matlab/tests/data/teststringarray_6.5.1_GLNX86.mat,sha256=t4tKGJg2NEg_Ar5MkOjCoQb2hVL8Q_Jdh9FF4TPL_4g,232 +scipy/io/matlab/tests/data/teststringarray_7.1_GLNX86.mat,sha256=lpYkBZX8K-c4FO5z0P9DMfYc7Y-yzyg11J6m-19uYTU,203 +scipy/io/matlab/tests/data/teststringarray_7.4_GLNX86.mat,sha256=lG-c7U-5Bo8j8xZLpd0JAsMYwewT6cAw4eJCZH5xf6E,203 +scipy/io/matlab/tests/data/teststruct_6.1_SOL2.mat,sha256=3GJbA4O7LP57J6IYzmJqTPeSJrEaiNSk-rg7h0ANR1w,608 +scipy/io/matlab/tests/data/teststruct_6.5.1_GLNX86.mat,sha256=fRbqAnzTeOU3dTQx7O24MfMVFr6pM5u594FRrPPkYJE,552 +scipy/io/matlab/tests/data/teststruct_7.1_GLNX86.mat,sha256=mCtI_Yot08NazvWHvehOZbTV4bW_I4-D5jBgJ6T9EbI,314 +scipy/io/matlab/tests/data/teststruct_7.4_GLNX86.mat,sha256=52qaF4HRCtPl1jE6ljbkEl2mofZVAPpmBxrm-J5OTTI,314 +scipy/io/matlab/tests/data/teststructarr_6.1_SOL2.mat,sha256=vneCpWBwApBGfeKzdZcybyajxjR-ZYf64j0l08_hU84,528 +scipy/io/matlab/tests/data/teststructarr_6.5.1_GLNX86.mat,sha256=gqhRpSfNNB5SR9sCp-wWrvokr5VV_heGnvco6dmfOvY,472 +scipy/io/matlab/tests/data/teststructarr_7.1_GLNX86.mat,sha256=6VDU0mtTBEG0bBHqKP1p8xq846eMhSZ_WvBZv8MzE7M,246 +scipy/io/matlab/tests/data/teststructarr_7.4_GLNX86.mat,sha256=ejtyxeeX_W1a2rNrEUUiG9txPW8_UtSgt8IaDOxE2pg,246 +scipy/io/matlab/tests/data/teststructnest_6.1_SOL2.mat,sha256=sbi0wUwOrbU-gBq3lyDwhAbvchdtOJkflOR_MU7uGKA,496 +scipy/io/matlab/tests/data/teststructnest_6.5.1_GLNX86.mat,sha256=uTkKtrYBTuz4kICVisEaG7V5C2nJDKjy92mPDswTLPE,416 +scipy/io/matlab/tests/data/teststructnest_7.1_GLNX86.mat,sha256=o4F2jOhYyNpJCo-BMg6v_ITZQvjenXfXHLq94e7iwRo,252 +scipy/io/matlab/tests/data/teststructnest_7.4_GLNX86.mat,sha256=CNXO12O6tedEuMG0jNma4qfbTgCswAbHwh49a3uE3Yk,252 +scipy/io/matlab/tests/data/testunicode_7.1_GLNX86.mat,sha256=KV97FCW-1XZiXrwXJoZPbgyAht79oIFHa917W1KFLwE,357 +scipy/io/matlab/tests/data/testunicode_7.4_GLNX86.mat,sha256=9-8xzACZleBkMjZnbr8t4Ncs9B6mbzrONDblPnteBPU,357 +scipy/io/matlab/tests/data/testvec_4_GLNX86.mat,sha256=GQzR3mBVS266_NBfrRC9X0dLgmeu8Jl4r4ZYMOrn1V0,93 +scipy/io/matlab/tests/gen_mat4files.m,sha256=fUnZkI5roi_iV00F64uCIwYrqCEO2PVfUmVr53g_6J8,1163 +scipy/io/matlab/tests/gen_mat5files.m,sha256=Moztv9kjP-cMrXrqy-vk6A-T7h938cQQ4RQkpy-UTnc,2485 +scipy/io/matlab/tests/save_matfile.m,sha256=GWpdggpYywjenn5ja2cGTvRBUbmyfVthi3nSksChHrw,200 +scipy/io/matlab/tests/test_byteordercodes.py,sha256=flmntS3WduzE3JBIxyd8VC0waXBQodhXZFQuI0osqXI,1003 +scipy/io/matlab/tests/test_byteordercodes.pyc,, +scipy/io/matlab/tests/test_mio.py,sha256=0t5QE_vSMeKnT1gLqQ12esR6yqOJHfygdVO5uR3CPbk,42291 +scipy/io/matlab/tests/test_mio.pyc,, +scipy/io/matlab/tests/test_mio5_utils.py,sha256=J2m67DBFfm2P_DezirQLv-6I8OsvvEVOTrbFATrOmCA,5536 +scipy/io/matlab/tests/test_mio5_utils.pyc,, +scipy/io/matlab/tests/test_mio_funcs.py,sha256=Xd3Ppb1jbi7BgnJglvsxKU3qlmviMbEyY057_F9hNkQ,1551 +scipy/io/matlab/tests/test_mio_funcs.pyc,, +scipy/io/matlab/tests/test_mio_utils.py,sha256=pS0CYkHj8duYeG2U32Ys4M4VFYPN9eyXv_3HlmqdZgQ,1549 +scipy/io/matlab/tests/test_mio_utils.pyc,, +scipy/io/matlab/tests/test_miobase.py,sha256=iHriHYFca9l-assW1C_SGK3dORI4_uaF1MGc8mbeOxc,1366 +scipy/io/matlab/tests/test_miobase.pyc,, +scipy/io/matlab/tests/test_pathological.py,sha256=D_zFry-yV5EyAlf65BddAu8Y1Hu_AnzOrKSrvPleUJE,1125 +scipy/io/matlab/tests/test_pathological.pyc,, +scipy/io/matlab/tests/test_streams.py,sha256=06iSpu_sP4pPoj45c9u4_werSQGBCI417kt_wsgkcks,5515 +scipy/io/matlab/tests/test_streams.pyc,, +scipy/io/mmio.py,sha256=qGfS_VdxEeoIH1igue2jA4wuZXJid9slKONeQ4zh-JQ,28820 +scipy/io/mmio.pyc,, +scipy/io/netcdf.py,sha256=YmFn0slxowy6YmrB4sqo9ha4-VdFmn15ilwfKzRl0Bk,39528 +scipy/io/netcdf.pyc,, +scipy/io/setup.py,sha256=70uERFZxjLgvJzgWzK0VtsAvgApllou_Yx8vQAyBVy0,639 +scipy/io/setup.pyc,, +scipy/io/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/io/tests/__init__.pyc,, +scipy/io/tests/data/array_float32_1d.sav,sha256=A_xXWkfS1sQCxP4ONezeEZvlKEXwZ1TPG2rCCFdmBNM,2628 +scipy/io/tests/data/array_float32_2d.sav,sha256=qJmN94pywXznXMHzt-L6DJgaIq_FfruVKJl_LMaI8UU,3192 +scipy/io/tests/data/array_float32_3d.sav,sha256=U7P6As7Nw6LdBY1pTOaW9C-O_NlXLXZwSgbT3H8Z8uk,13752 +scipy/io/tests/data/array_float32_4d.sav,sha256=Tl6erEw_Zq3dwVbVyPXRWqB83u_o4wkIVFOe3wQrSro,6616 +scipy/io/tests/data/array_float32_5d.sav,sha256=VmaBgCD854swYyLouDMHJf4LL6iUNgajEOQf0pUjHjg,7896 +scipy/io/tests/data/array_float32_6d.sav,sha256=lb7modI0OQDweJWbDxEV2OddffKgMgq1tvCy5EK6sOU,19416 +scipy/io/tests/data/array_float32_7d.sav,sha256=pqLWIoxev9sLCs9LLwxFlM4RCFwxHC4Q0dEEz578mpI,3288 +scipy/io/tests/data/array_float32_8d.sav,sha256=R8A004f9XLWvF6eKMNEqIrC6PGP1vLZr9sFqawqM8ZA,13656 +scipy/io/tests/data/array_float32_pointer_1d.sav,sha256=sV7qFNwHK-prG5vODa7m5HYK7HlH_lqdfsI5Y1RWDyg,2692 +scipy/io/tests/data/array_float32_pointer_2d.sav,sha256=b0brvK6xQeezoRuujmEcJNw2v6bfASLM3FSY9u5dMSg,3256 +scipy/io/tests/data/array_float32_pointer_3d.sav,sha256=a_Iyg1YjPBRh6B-N_n_BGIVjFje4K-EPibKV-bPbF7E,13816 +scipy/io/tests/data/array_float32_pointer_4d.sav,sha256=cXrkHHlPyoYstDL_OJ15-55sZOOeDNW2OJ3KWhBv-Kk,6680 +scipy/io/tests/data/array_float32_pointer_5d.sav,sha256=gRVAZ6jeqFZyIQI9JVBHed9Y0sjS-W4bLseb01rIcGs,7960 +scipy/io/tests/data/array_float32_pointer_6d.sav,sha256=9yic-CQiS0YR_ow2yUA2Nix0Nb_YCKMUsIgPhgcJT1c,19480 +scipy/io/tests/data/array_float32_pointer_7d.sav,sha256=Rp1s8RbW8eoEIRTqxba4opAyY0uhTuyy3YkwRlNspQU,3352 +scipy/io/tests/data/array_float32_pointer_8d.sav,sha256=Wk3Dd2ClAwWprXLKZon3blY7aMvMrJqz_NXzK0J5MFY,13720 +scipy/io/tests/data/example_1.nc,sha256=EkfC57dWXeljgXy5sidrJHJG12D1gmQUyPDK18WzlT4,1736 +scipy/io/tests/data/example_2.nc,sha256=wywMDspJ2QT431_sJUr_5DHqG3pt9VTvDJzfR9jeWCk,272 +scipy/io/tests/data/example_3_maskedvals.nc,sha256=P9N92jCJgKJo9VmNd7FeeJSvl4yUUFwBy6JpR4MeuME,1424 +scipy/io/tests/data/fortran-3x3d-2i.dat,sha256=oYCXgtY6qqIqLAhoh_46ob_RVQRcV4uu333pOiLKgRM,451 +scipy/io/tests/data/fortran-mixed.dat,sha256=zTi7RLEnyAat_DdC3iSEcSbyDtAu0aTKwUT-tExjasw,40 +scipy/io/tests/data/fortran-sf8-11x1x10.dat,sha256=KwaOrZOAe-wRhuxvmHIK-Wr59us40MmiA9QyWtIAUaA,888 +scipy/io/tests/data/fortran-sf8-15x10x22.dat,sha256=5ohvjjOUcIsGimSqDhpUUKwflyhVsfwKL5ElQe_SU0I,26408 +scipy/io/tests/data/fortran-sf8-1x1x1.dat,sha256=Djmoip8zn-UcxWGUPKV5wzKOYOf7pbU5L7HaR3BYlec,16 +scipy/io/tests/data/fortran-sf8-1x1x5.dat,sha256=Btgavm3w3c9md_5yFfq6Veo_5IK9KtlLF1JEPeHhZoU,48 +scipy/io/tests/data/fortran-sf8-1x1x7.dat,sha256=L0r9yAEMbfMwYQytzYsS45COqaVk-o_hi6zRY3yIiO4,64 +scipy/io/tests/data/fortran-sf8-1x3x5.dat,sha256=c2LTocHclwTIeaR1Pm3mVMyf5Pl_imfjIFwi4Lpv0Xs,128 +scipy/io/tests/data/fortran-si4-11x1x10.dat,sha256=OesvSIGsZjpKZlZsV74PNwy0Co0KH8-3gxL9-DWoa08,448 +scipy/io/tests/data/fortran-si4-15x10x22.dat,sha256=OJcKyw-GZmhHb8REXMsHDn7W5VP5bhmxgVPIAYG-Fj4,13208 +scipy/io/tests/data/fortran-si4-1x1x1.dat,sha256=1Lbx01wZPCOJHwg99MBDuc6QZKdMnccxNgICt4omfFM,12 +scipy/io/tests/data/fortran-si4-1x1x5.dat,sha256=L1St4yiHTA3v91JjnndYfUrdKfT1bWxckwnnrscEZXc,28 +scipy/io/tests/data/fortran-si4-1x1x7.dat,sha256=Dmqt-tD1v2DiPZkghGGZ9Ss-nJGfei-3yFXPO5Acpk4,36 +scipy/io/tests/data/fortran-si4-1x3x5.dat,sha256=3vl6q93m25jEcZVKD0CuKNHmhZwZKp-rv0tfHoPVP88,68 +scipy/io/tests/data/invalid_pointer.sav,sha256=JmgoISXC4r5fSmI5FqyapvmzQ4qpYLf-9N7_Et1p1HQ,1280 +scipy/io/tests/data/null_pointer.sav,sha256=P_3a_sU614F3InwM82jSMtWycSZkvqRn1apwd8XxbtE,2180 +scipy/io/tests/data/scalar_byte.sav,sha256=dNJbcE5OVDY_wHwN_UBUtfIRd13Oqu-RBEO74g5SsBA,2076 +scipy/io/tests/data/scalar_byte_descr.sav,sha256=DNTmDgDWOuzlQnrceER6YJ0NutUUwZ9tozVMBWQmuuY,2124 +scipy/io/tests/data/scalar_complex32.sav,sha256=NGd-EvmFZgt8Ko5MP3T_TLwyby6yS0BXM_OW8197hpU,2076 +scipy/io/tests/data/scalar_complex64.sav,sha256=gFBWtxuAajazupGFSbvlWUPDYK-JdWgZcEWih2-7IYU,2084 +scipy/io/tests/data/scalar_float32.sav,sha256=EwWQw2JTwq99CHVpDAh4R20R0jWaynXABaE2aTRmXrs,2072 +scipy/io/tests/data/scalar_float64.sav,sha256=iPcDlgF1t0HoabvNLWCbSiTPIa9rvVEbOGGmE_3Ilsk,2076 +scipy/io/tests/data/scalar_heap_pointer.sav,sha256=JXZbPmntXILsNOuLIKL8qdu8gDJekYrlN9DQxAWve0E,2204 +scipy/io/tests/data/scalar_int16.sav,sha256=kDBLbPYGo2pzmZDhyl8rlDv0l6TMEWLIoLtmgJXDMkk,2072 +scipy/io/tests/data/scalar_int32.sav,sha256=IzJwLvEoqWLO5JRaHp8qChfptlauU-ll3rb0TfDDM8Y,2072 +scipy/io/tests/data/scalar_int64.sav,sha256=-aSHQRiaE3wjAxINwuLX33_8qmWl4GUkTH45elTkA-8,2076 +scipy/io/tests/data/scalar_string.sav,sha256=AQ7iZ8dKk9QfnLdP9idKv1ojz0M_SwpL7XAUmbHodDQ,2124 +scipy/io/tests/data/scalar_uint16.sav,sha256=928fmxLsQM83ue4eUS3IEnsLSEzmHBklDA59JAUvGK8,2072 +scipy/io/tests/data/scalar_uint32.sav,sha256=X3RbPhS6_e-u-1S1gMyF7s9ys7oV6ZNwPrJqJ6zIJsk,2072 +scipy/io/tests/data/scalar_uint64.sav,sha256=ffVyS2oKn9PDtWjJdOjSRT2KZzy6Mscgd4u540MPHC4,2076 +scipy/io/tests/data/struct_arrays.sav,sha256=TzH-Gf0JgbP_OgeKYbV8ZbJXvWt1VetdUr6C_ziUlzg,2580 +scipy/io/tests/data/struct_arrays_byte_idl80.sav,sha256=oOmhTnmKlE60-JMJRRMv_zfFs4zqioMN8QA0ldlgQZo,1388 +scipy/io/tests/data/struct_arrays_replicated.sav,sha256=kXU8j9QI2Q8D22DVboH9fwwDQSLVvuWMJl3iIOhUAH8,2936 +scipy/io/tests/data/struct_arrays_replicated_3d.sav,sha256=s3ZUwhT6TfiVfk4AGBSyxYR4FRzo4sZQkTxFCJbIQMI,4608 +scipy/io/tests/data/struct_inherit.sav,sha256=4YajBZcIjqMQ4CI0lRUjXpYDY3rI5vzJJzOYpjWqOJk,2404 +scipy/io/tests/data/struct_pointer_arrays.sav,sha256=fkldO6-RO2uAN_AI9hM6SEaBPrBf8TfiodFGJpViaqg,2408 +scipy/io/tests/data/struct_pointer_arrays_replicated.sav,sha256=eKVerR0LoD9CuNlpwoBcn7BIdj3-8x56VNg--Qn7Hgc,2492 +scipy/io/tests/data/struct_pointer_arrays_replicated_3d.sav,sha256=vsqhGpn3YkZEYjQuI-GoX8Jg5Dv8A2uRtP0kzQkq4lg,2872 +scipy/io/tests/data/struct_pointers.sav,sha256=Zq6d5V9ZijpocxJpimrdFTQG827GADBkMB_-6AweDYI,2268 +scipy/io/tests/data/struct_pointers_replicated.sav,sha256=aIXPBIXTfPmd4IaLpYD5W_HUoIOdL5Y3Hj7WOeRM2sA,2304 +scipy/io/tests/data/struct_pointers_replicated_3d.sav,sha256=t1jhVXmhW6VotQMNZ0fv0sDO2pkN4EutGsx5No4VJQs,2456 +scipy/io/tests/data/struct_scalars.sav,sha256=LYICjERzGJ_VvYgtwJ_Up2svQTv8wBzNcVD3nsd_OPg,2316 +scipy/io/tests/data/struct_scalars_replicated.sav,sha256=lw3fC4kppi6BUWAd4n81h8_KgoUdiJl5UIt3CvJIuBs,2480 +scipy/io/tests/data/struct_scalars_replicated_3d.sav,sha256=xVAup6f1dSV_IsSwBQC3KVs0eLEZ6-o5EaZT9yUoDZI,3240 +scipy/io/tests/data/test-44100Hz-2ch-32bit-float-be.wav,sha256=gjv__ng9xH_sm34hyxCbCgO4AP--PZAfDOArH5omkjM,3586 +scipy/io/tests/data/test-44100Hz-2ch-32bit-float-le.wav,sha256=H0LLyv2lc2guzYGnx4DWXU6vB57JrRX-G9Dd4qGh0hM,3586 +scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof.wav,sha256=bFrsRqw0QXmsaDtjD6TFP8hZ5jEYMyaCmt-ka_C6GNk,1024 +scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-incomplete-chunk.wav,sha256=zMnhvZvrP4kyOWKVKfbBneyv03xvzgqXYhHNxsAxDJ4,13 +scipy/io/tests/data/test-44100Hz-le-1ch-4bytes.wav,sha256=9qTCvpgdz3raecVN1ViggHPnQjBf47xmXod9iCDsEik,17720 +scipy/io/tests/data/test-48000Hz-2ch-64bit-float-le-wavex.wav,sha256=EqYBnEgTxTKvaTAtdA5HIl47CCFIje93y4hawR6Pyu0,7792 +scipy/io/tests/data/test-8000Hz-le-2ch-1byteu.wav,sha256=R6EJshvQp5YVR4GB9u4Khn5HM1VMfJUj082i8tkBIJ8,1644 +scipy/io/tests/data/various_compressed.sav,sha256=H-7pc-RCQx5y6_IbHk1hB6OfnhvuPyW6EJq4EwI9iMc,1015 +scipy/io/tests/test_fortran.py,sha256=iNtkWtCWm1nj8xj_K9QVImqFVta1VwBeqVZHRiuJFcg,4975 +scipy/io/tests/test_fortran.pyc,, +scipy/io/tests/test_idl.py,sha256=cYLgESfgTA-urT6tVJeKBKMmTZ1FTAFJNQR3wicgHIw,19683 +scipy/io/tests/test_idl.pyc,, +scipy/io/tests/test_mmio.py,sha256=v9cB6hoZloeKXQ-plGldSYyZQ8HBbmyyp6RcRsnxero,23760 +scipy/io/tests/test_mmio.pyc,, +scipy/io/tests/test_netcdf.py,sha256=_hyVxg4kFjCImQ9bB9nL22amYFDIVM7nk_JvygOa8EY,19259 +scipy/io/tests/test_netcdf.pyc,, +scipy/io/tests/test_paths.py,sha256=VDmmUNyZ3kiAHFB-9eNdFEs3ss4ZzZ4Z4Pm1VUIkRFo,2994 +scipy/io/tests/test_paths.pyc,, +scipy/io/tests/test_wavfile.py,sha256=V0sQcgEB4HYNzKOtvB36n4jc17ZPPXtHIu4nef2LKQs,5111 +scipy/io/tests/test_wavfile.pyc,, +scipy/io/wavfile.py,sha256=DfuIpum1UW8onxDfJrwWLJu047WU0iV_EAlkonte7Fg,12547 +scipy/io/wavfile.pyc,, +scipy/linalg.pxd,sha256=M28Y_hLKRSlomUNFNm0LbL9lhYINd7mgo7maa_WiHmw,48 +scipy/linalg/__init__.py,sha256=WmiBbythZraMEEUedVfi66zdjuaPW4TCansAzrlWyhw,7176 +scipy/linalg/__init__.pyc,, +scipy/linalg/_cython_signature_generator.py,sha256=392EESFhkP2g5jBAE9qVsoYhy_svUMMsfDJ_mmrqvZU,12102 +scipy/linalg/_cython_signature_generator.pyc,, +scipy/linalg/_decomp_ldl.py,sha256=YIPnDS9vKEUBb3AEI1mZc677DJjTtUz4zwYfmLf65n4,12575 +scipy/linalg/_decomp_ldl.pyc,, +scipy/linalg/_decomp_polar.py,sha256=qhtj7GRzBgGKeQufqgVwd-IRGTsKlnL1-j7LSeBEKso,3623 +scipy/linalg/_decomp_polar.pyc,, +scipy/linalg/_decomp_qz.py,sha256=CU5T8aumB0vswo9ih5fYdPUbcwODgDO--IYyYlhkOos,14611 +scipy/linalg/_decomp_qz.pyc,, +scipy/linalg/_decomp_update.so,sha256=NNo8EWqN0XdUnWVNS6FT3AXbgpbt9LHAZlXrN54DxuM,312056 +scipy/linalg/_expm_frechet.py,sha256=yK_Rfq_hdfPgTTilR6AcVLZEapPtyBg7iKbTWgYdJXc,12387 +scipy/linalg/_expm_frechet.pyc,, +scipy/linalg/_fblas.so,sha256=uMc1MdGjoKBb5_5jnNpLuFh_-hdZTAI8atwywBKvz5o,625304 +scipy/linalg/_flapack.so,sha256=OFJMH8mxrHMBv8HdXTc57QFMb_oRQ0AdjVki7Xuk5Zc,1504048 +scipy/linalg/_flinalg.so,sha256=o42lTMqcaR_JY-sao_5xRy38hFLj5_VhpWa6etcm0WQ,79480 +scipy/linalg/_generate_pyx.py,sha256=ViGvJC3HJGSSvBKmXLoUa1Er9M3HvNSZApHMQofHAh8,25132 +scipy/linalg/_generate_pyx.pyc,, +scipy/linalg/_interpolative.so,sha256=YE_lltLeabgsUtty6Pp_ePMv5EiqpxM1qevlTYfHH94,575744 +scipy/linalg/_interpolative_backend.py,sha256=YKhuEgwa3fOVMFHNYJPrfCRoG0JwfNEHLooN4aHTKBw,44947 +scipy/linalg/_interpolative_backend.pyc,, +scipy/linalg/_matfuncs_inv_ssq.py,sha256=T6SH2wBTK_tVVEyKvsIztfPRc_smIocABJ8tJwSDJlU,28050 +scipy/linalg/_matfuncs_inv_ssq.pyc,, +scipy/linalg/_matfuncs_sqrtm.py,sha256=qlJ9LiUZnFga_-hXbjOSVlacI7P--6_e277x_q1HEL8,5874 +scipy/linalg/_matfuncs_sqrtm.pyc,, +scipy/linalg/_procrustes.py,sha256=NNuYvxtutdLm4wr9auDbTmpuWEdIbEYfoUwY5dfj584,2787 +scipy/linalg/_procrustes.pyc,, +scipy/linalg/_sketches.py,sha256=uJap4U0OH6_XOUm204t3H-cDVjQEwMA45tDM-wgGA18,3847 +scipy/linalg/_sketches.pyc,, +scipy/linalg/_solve_toeplitz.so,sha256=62aX8n4Nf_kzBONfgalCNEXpxyHoIr2fLmdbf2d_DWg,234528 +scipy/linalg/_solvers.py,sha256=Cp6wU-SyKNZI4ROBWNb0Ajhi6x8jRqbhXwleXH-QPSA,28279 +scipy/linalg/_solvers.pyc,, +scipy/linalg/_testutils.py,sha256=ABiyl7Bkg9QMD9Oxhk_X8CoenArgP6eVqduv8Np8eNY,1814 +scipy/linalg/_testutils.pyc,, +scipy/linalg/basic.py,sha256=c_Epx3aNk3KrjeGJWlr6Q_LBCU83Yvyz0iy6RDbjE2c,56515 +scipy/linalg/basic.pyc,, +scipy/linalg/blas.py,sha256=hXTrUny10bLGzOBXq-BCjiYY5D4LJ83daqqBNbg0xvg,8678 +scipy/linalg/blas.pyc,, +scipy/linalg/cython_blas.pxd,sha256=lBxsM3og48_QBmQBMUVmH6X4YiVuFRbrqmwwGVIWtSE,14403 +scipy/linalg/cython_blas.so,sha256=dvH_9MboG54aYN6iQsnLo2rWF1pwzmKXCsefSBLE90U,278888 +scipy/linalg/cython_lapack.pxd,sha256=idy3DFzWSNHA2NwFcak-jo-HoxboSHLFmbQVaIb8FDk,192579 +scipy/linalg/cython_lapack.so,sha256=GcPn75C15Ac0OP_35L2894KeMKG098kIm8urbcVjwLg,726432 +scipy/linalg/decomp.py,sha256=uWJTCIjdUnckrjmnZaM9OZergjxvtCGpMwRMblb5uEk,52862 +scipy/linalg/decomp.pyc,, +scipy/linalg/decomp_cholesky.py,sha256=OfjdRXxOR9bSAw7rUAGq90UJptlETGJkxaVn49NdFfU,11790 +scipy/linalg/decomp_cholesky.pyc,, +scipy/linalg/decomp_lu.py,sha256=3GksUwNZd0obpB0GQugzrZTP-RPZt2X_oas1Bx5_mR8,6816 +scipy/linalg/decomp_lu.pyc,, +scipy/linalg/decomp_qr.py,sha256=4ZpaMAbznuoJgrKYzxWF6Ctj10ljifm07vs0WkB1mVo,13507 +scipy/linalg/decomp_qr.pyc,, +scipy/linalg/decomp_schur.py,sha256=K3_U2JZ6AV4iM1UXYXYpZ6lBybMbalGj7JSNn_k5dGo,10318 +scipy/linalg/decomp_schur.pyc,, +scipy/linalg/decomp_svd.py,sha256=jUthNG6yLfTA0cHQrhrAwhv6pEo47MqiW2lHWykTEDE,14686 +scipy/linalg/decomp_svd.pyc,, +scipy/linalg/flinalg.py,sha256=vmpWlDSwRyGwIjOunRDraKS9LsT5BS0ZbkXSCxmVVcU,1728 +scipy/linalg/flinalg.pyc,, +scipy/linalg/interpolative.py,sha256=s27C05Y-cgnbo6KiGnPQ4VbmuxmKEU0O_JJ9BwX7o2o,30945 +scipy/linalg/interpolative.pyc,, +scipy/linalg/lapack.py,sha256=DKO4eBM4EnX6SNg8ZRBmNmFVXcB4t2KMFmaGZNLRdE8,11766 +scipy/linalg/lapack.pyc,, +scipy/linalg/linalg_version.py,sha256=GevCSWqbNAd0xlPAWy4ExL2P8W9dDZFcvmqL01Z1FkI,159 +scipy/linalg/linalg_version.pyc,, +scipy/linalg/matfuncs.py,sha256=pr6zMmFMLBXvT9567wGFUYMhqa6KfP0um_JJw3gOsq0,18286 +scipy/linalg/matfuncs.pyc,, +scipy/linalg/misc.py,sha256=T1lpERljILWJf6vIlsZ8tG_4T88aZiyXJj5zLlkyHGI,6108 +scipy/linalg/misc.pyc,, +scipy/linalg/setup.py,sha256=hnOELjNO7_N6U_wpbslv9qyc7DMKxrTbc_jlhvOOhbA,6058 +scipy/linalg/setup.pyc,, +scipy/linalg/special_matrices.py,sha256=dnPBNap21tajKQ-v23_juY1FuSrJOUSKJWVcfj7FxK4,29662 +scipy/linalg/special_matrices.pyc,, +scipy/linalg/src/id_dist/doc/doc.tex,sha256=-CUbZ01Brg-SAGsIxLas21ZVfg5bO78KL4OBo1QgjY8,37261 +scipy/linalg/src/lapack_deprecations/LICENSE,sha256=N-ZKSYiUrHw7BwAj42ielUqOz4ojuQlo0JpFXxtPezU,2266 +scipy/linalg/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/linalg/tests/__init__.pyc,, +scipy/linalg/tests/data/carex_15_data.npz,sha256=E_PhSRqHa79Z1-oQrSnB-bWZaiq5khbzHVv81lkBLB4,34462 +scipy/linalg/tests/data/carex_18_data.npz,sha256=Wfg5Rn8nUrffb7bUCUOW7dMqWSm3ZPf_oeZmZDHmysY,161487 +scipy/linalg/tests/data/carex_19_data.npz,sha256=OOj8ewQd8LI9flyhXq0aBl5kZ2Ee-ahIzH25P4Ct_Yc,34050 +scipy/linalg/tests/data/carex_20_data.npz,sha256=FOIi00pxGMcoShZ1xv7O7ne4TflRpca6Kl7p_zBU-h0,31231 +scipy/linalg/tests/data/carex_6_data.npz,sha256=GyoHNrVB6_XEubTADW2rKB5zyfuZE8biWBp4Gze2Avk,15878 +scipy/linalg/tests/data/gendare_20170120_data.npz,sha256=o9-rRR2dXCAkPg7YXNi2yWV2afuaD4O1vhZVhXg9VbU,2164 +scipy/linalg/tests/test_basic.py,sha256=XtiLQyv8auS5Yn0OgtX4qTtYkT328No7C1jfTakOGhQ,63089 +scipy/linalg/tests/test_basic.pyc,, +scipy/linalg/tests/test_blas.py,sha256=zw94Y4PJ4IS9MVRHKB5gj1QxuOJAZDsgaA00Im2XnNw,39580 +scipy/linalg/tests/test_blas.pyc,, +scipy/linalg/tests/test_build.py,sha256=lw0-zIe31p0MHHVkUSs_nvYjyojxxLwooqLJuDgN_vg,1806 +scipy/linalg/tests/test_build.pyc,, +scipy/linalg/tests/test_cython_blas.py,sha256=uhLfMoBr0j9FWv2wOVP8kMz-e6LsUede20mjjfz8Rsw,4233 +scipy/linalg/tests/test_cython_blas.pyc,, +scipy/linalg/tests/test_cython_lapack.py,sha256=RJ_1aQRF2Y7mpA5vQoobxbZwUnm4ccrpsRZlhz3q65E,582 +scipy/linalg/tests/test_cython_lapack.pyc,, +scipy/linalg/tests/test_decomp.py,sha256=UrqyAEd5KFL4lX5NWxasOhccoXFvntb8Oq9ruNdIiZw,104590 +scipy/linalg/tests/test_decomp.pyc,, +scipy/linalg/tests/test_decomp_cholesky.py,sha256=zQJXbLwbRasaQCRv9xCOUtOSx3HRlzTKnl2_5ZF3i4k,7363 +scipy/linalg/tests/test_decomp_cholesky.pyc,, +scipy/linalg/tests/test_decomp_ldl.py,sha256=GKkLBewiBRWmfeWFAuiLDwPzS_Z2gvQzq7PLWaabVE4,5107 +scipy/linalg/tests/test_decomp_ldl.pyc,, +scipy/linalg/tests/test_decomp_polar.py,sha256=rfH2Z6AXDB2meusX4IlH8paeFGDq0ENB_bGGt-4VWx4,2712 +scipy/linalg/tests/test_decomp_polar.pyc,, +scipy/linalg/tests/test_decomp_update.py,sha256=-jWnhrMtnHfzRwYAmuhQr0S7qltbk5pQ3KcrWBKPTLE,68340 +scipy/linalg/tests/test_decomp_update.pyc,, +scipy/linalg/tests/test_fblas.py,sha256=yS8NWsIkFH5SQbpbeTXrAMtKCGYAYCKjtVir7Glp7X4,18837 +scipy/linalg/tests/test_fblas.pyc,, +scipy/linalg/tests/test_interpolative.py,sha256=U2cisv1lam6v2v4tOvrKQI3rIGwLHcG5vzgdkkmfyj8,10296 +scipy/linalg/tests/test_interpolative.pyc,, +scipy/linalg/tests/test_lapack.py,sha256=sDIIcYuUf3qegcAc3TqRgLCvqgXqAK2NrC-3pr4Hg8M,45362 +scipy/linalg/tests/test_lapack.pyc,, +scipy/linalg/tests/test_matfuncs.py,sha256=oYgtIeMAs6Wpx-41owg7UUkwoE_nOGLOPchOxR8Nacg,33123 +scipy/linalg/tests/test_matfuncs.pyc,, +scipy/linalg/tests/test_procrustes.py,sha256=8-htt_sd-CRmwDwQOXsXvUUhmzVv6DTSuXQfBO3-o3E,6723 +scipy/linalg/tests/test_procrustes.pyc,, +scipy/linalg/tests/test_sketches.py,sha256=4Mu_-uMNESjZ98-KW67b0PyN1ap5LzHLOCY-rOSC5AA,2000 +scipy/linalg/tests/test_sketches.pyc,, +scipy/linalg/tests/test_solve_toeplitz.py,sha256=fBBjcczKrm0dfBwGOj0GGKSCm5QNmxWKYaWalpoCaLE,4105 +scipy/linalg/tests/test_solve_toeplitz.pyc,, +scipy/linalg/tests/test_solvers.py,sha256=xYW4zRBehhpeQlxdtAc1tKMKDeeZV6LILAlQueBWn5E,31063 +scipy/linalg/tests/test_solvers.pyc,, +scipy/linalg/tests/test_special_matrices.py,sha256=X1faKvpBanRcx5-QVhO8gh5OHUNPnXeVcAs7MWrJYmQ,23561 +scipy/linalg/tests/test_special_matrices.pyc,, +scipy/misc/__init__.py,sha256=na4tJcOp78amyVMHrYCpEJEQSVRpwL9fG5FSTP4BMM8,4259 +scipy/misc/__init__.pyc,, +scipy/misc/ascent.dat,sha256=6KhJOUhEY6uAUa7cW0CqJiqzOpHWRYps0TxqHK1aAj0,527630 +scipy/misc/common.py,sha256=wM4bxjDiffQUGjclEP_gNUcWpHE8OS7e8q6xzT8zJQM,8964 +scipy/misc/common.pyc,, +scipy/misc/doccer.py,sha256=Hou4AX22qtNDblq32PZOhpuFQHSdTTYR4qUd8oXDkpg,7722 +scipy/misc/doccer.pyc,, +scipy/misc/ecg.dat,sha256=8grTNl-5t_hF0OXEi2_mcIE3fuRmw6Igt_afNciVi68,119035 +scipy/misc/face.dat,sha256=nYsLTQgTE-K0hXSMdwRy5ale0XOBRog9hMcDBJPoKIY,1581821 +scipy/misc/pilutil.py,sha256=LCA-UsfPkOAqk8O2cLTb-k8JjXGge4KOCaM66mgxscc,20962 +scipy/misc/pilutil.pyc,, +scipy/misc/setup.py,sha256=RUA4L2SHD9U9_ppPo2toAJIRq03RmHtgaolVApJY88k,441 +scipy/misc/setup.pyc,, +scipy/misc/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/misc/tests/__init__.pyc,, +scipy/misc/tests/data/3x3x3.png,sha256=rZSw1wIlDIOn_wOpYk_Qf3Lh9t18G17JoTeB-yrV4nI,102 +scipy/misc/tests/data/3x3x4.png,sha256=d7j6K-zi168ykDjjiaLODx8lObCuQbSa_oCWgsDD1gw,139 +scipy/misc/tests/data/3x4x3.png,sha256=x059_wmWx6ONcofQ34QIxUdJF7z31OiuyhhZt0GoQVI,98 +scipy/misc/tests/data/3x4x4.png,sha256=NPjKnXwSNBkkeIzK_CgRbeTkNYWeS7sD_Tw9PWDcKPU,140 +scipy/misc/tests/data/3x5x3.png,sha256=EPHXyaNmlNma1avweqqCcRxGahoGe-DwobJrAktruL0,106 +scipy/misc/tests/data/3x5x4.png,sha256=QDQFIbNDvHET74TDyqh1teMEHelBFQm7HtohlY_4mnM,153 +scipy/misc/tests/data/4x3x3.png,sha256=cl_OIl9hiYTDIwy-JlqvyfNN79hzNUBhi0NC44oDN-c,125 +scipy/misc/tests/data/4x3x4.png,sha256=I3rLhJy2SNFLXs_oUZphRDy46wWgB33XHlXIj69R4bk,144 +scipy/misc/tests/data/4x4x3.png,sha256=m_MRnBS3nnE6-rsEiDu_JXnv310DAPuiE6YQCFVZKso,83 +scipy/misc/tests/data/4x4x4.png,sha256=o-smQ2lscUDuRPjFS6iCQ76vOYPM0n01XuC5aeKhn4Q,86 +scipy/misc/tests/data/4x5x3.png,sha256=4FpGAUnsi3BvQbFci0qZ2xJUe2XTS1H-i8Ql17ct0sE,133 +scipy/misc/tests/data/4x5x4.png,sha256=oBz98_Z2zl2SB6GOCew05H8fJWhlpPESXrppeWfW6ic,159 +scipy/misc/tests/data/5x3x3.png,sha256=CocDqUzkKBxoMbm1052LDA9ke6P_RNH6sKxQjSgHpGg,149 +scipy/misc/tests/data/5x3x4.png,sha256=ciWPmnD6KTeOWl_vcpcCUGHrfvS16iINRlRq80cx2f0,157 +scipy/misc/tests/data/5x4x3.png,sha256=mW4et2-Bc8v02EoW4XX25P9SPjHidxBXHT6WAMHqYxM,91 +scipy/misc/tests/data/5x4x4.png,sha256=C2XGRaPL-O3M6nFFw88Hp_C8DqhN8hJKKOqTj2hi-aY,97 +scipy/misc/tests/data/5x5x3.png,sha256=prvKFAEVOqzCUiy81bFzZCv9bsRNVzkXpG-0Wn87Pe8,100 +scipy/misc/tests/data/5x5x4.png,sha256=dmmCWvpJsPHw8bFhNlwCf5NVpwFh-karQcUyWLFW1o8,112 +scipy/misc/tests/data/blocks2bit.png,sha256=4nwEpxX3gdqPTEYMY-jp-oLC4MpDFB6Rplen2IZHEUU,77 +scipy/misc/tests/data/box1.png,sha256=9gzl8EMEBij5xG4c2Aa0fIJLte5wzGJW2LaYl3E6mCQ,208 +scipy/misc/tests/data/foo3x5x4indexed.png,sha256=SKZMJcbaAA_9tfzDTrr-m6OxybYdeYTqfKbcVPkxLfo,116 +scipy/misc/tests/data/icon.png,sha256=qnWhOQn9ymNm3vftPFaAlJ7MAd3IZJCkOE6LzVlMI5E,2122 +scipy/misc/tests/data/icon_mono.png,sha256=y4aRoEZHOSLf4ks_zQ8uP94YhjqUJfP-M-5D8V0FqdU,449 +scipy/misc/tests/data/icon_mono_flat.png,sha256=hedHKY7qHkuAO7KHRJHgLJZwd6cRIkPFwMP634DX5uE,412 +scipy/misc/tests/data/pattern4bit.png,sha256=wHsIh7-fFk0W9f8Hp5PxdKEQFikqfkBxoE_zmye97OY,169 +scipy/misc/tests/test_common.py,sha256=mOMcGsIFW8FjtSzwRj2eYED8Lc4uZwiE0iVuWkM9tio,1224 +scipy/misc/tests/test_common.pyc,, +scipy/misc/tests/test_doccer.py,sha256=7AHAvkhaj_jA-MmT4895pbYk_WfAqm5hYoFI_KbHWJk,3171 +scipy/misc/tests/test_doccer.pyc,, +scipy/misc/tests/test_pilutil.py,sha256=g0fDdMFau6WRoxeHpRXOlzPG6qSb_lG2iXGrslJp3qQ,10598 +scipy/misc/tests/test_pilutil.pyc,, +scipy/ndimage/__init__.py,sha256=-0HAQsoICkbdwDPU8kQfl9smIFfGRKi8wQlZI_HJKOo,4946 +scipy/ndimage/__init__.pyc,, +scipy/ndimage/_ctest.so,sha256=Azc7oUzGQe_zHe0HvDzmSDDeYedEdc7AARkwdpDa0Ok,7160 +scipy/ndimage/_ctest_oldapi.so,sha256=KroDUCqBn1smPkcwkvpnUH4q1who0xFqFI29agmIqbg,7000 +scipy/ndimage/_cytest.so,sha256=CI9Vbauvfb0z8fH4Fjihyf1MWqUiw7hmyrjSHoNQri0,40856 +scipy/ndimage/_nd_image.so,sha256=3HV0OB2GOtDIu3OGH6rgdvQuNSZtb2mpfyQ-glVC7Kw,114784 +scipy/ndimage/_ni_docstrings.py,sha256=hGsYTbwvwRrEQN_Yg0ZLkL8asQa5D0jYvSgP4gyM_Y8,5397 +scipy/ndimage/_ni_docstrings.pyc,, +scipy/ndimage/_ni_label.so,sha256=jKEUqGhIO_PvKh5h-Axy3yEvZNn3UzxGqg4VgoXT_Ro,337664 +scipy/ndimage/_ni_support.py,sha256=qfLAL9t4gvqNzhLHeJdcjSTa0GBaSDLoFLBsIVAJ67Y,3233 +scipy/ndimage/_ni_support.pyc,, +scipy/ndimage/filters.py,sha256=UXQe-JkHWCbv5hIlNfQM41H_0nR0Iuiq38BNfr7PpDY,49409 +scipy/ndimage/filters.pyc,, +scipy/ndimage/fourier.py,sha256=9wpQSB0YZcoD6Ltp9hceaTHIcBEUSjTklup-QQ74x9I,11266 +scipy/ndimage/fourier.pyc,, +scipy/ndimage/interpolation.py,sha256=q8bYJL65IQlvY6kD88PObZhygv4gr8SkxNPoE3z8tLk,27309 +scipy/ndimage/interpolation.pyc,, +scipy/ndimage/io.py,sha256=ulkxpFWuzj44OJO10g5PAfuknrRhoT7AnR88yp0e640,1249 +scipy/ndimage/io.pyc,, +scipy/ndimage/measurements.py,sha256=5GN9m5puYz-r2tv4J7VvPxXl_EfMJp8vwJCB_Dckr7k,49497 +scipy/ndimage/measurements.pyc,, +scipy/ndimage/morphology.py,sha256=N3A3n9NmIn9lS4QnMQVv7nIpL1ThJR70Z69Ifrh8a1Q,82677 +scipy/ndimage/morphology.pyc,, +scipy/ndimage/setup.py,sha256=0mI9syig1tiMujBL7FFgmlfOJEr3XvOhRgX4SHKwwyQ,1891 +scipy/ndimage/setup.pyc,, +scipy/ndimage/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/ndimage/tests/__init__.pyc,, +scipy/ndimage/tests/data/README.txt,sha256=wvMFLCAFtWPyN6gpa7JoAd2mt5wIwrrjhijeocXHHLw,278 +scipy/ndimage/tests/data/label_inputs.txt,sha256=JPbEnncwUyhlAAv6grN8ysQW9w9M7ZSIn_NPopqU7z4,294 +scipy/ndimage/tests/data/label_results.txt,sha256=Cf2_l7FCWNjIkyi-XU1MaGzmLnf2J7NK2SZ_10O-8d0,4309 +scipy/ndimage/tests/data/label_strels.txt,sha256=AU2FUAg0WghfvnPDW6lhMB1kpNdfv3coCR8blcRNBJ8,252 +scipy/ndimage/tests/dots.png,sha256=sgtW-tx0ccBpTT6BSNniioPXlnusFr-IUglK_qOVBBQ,2114 +scipy/ndimage/tests/test_c_api.py,sha256=-gVjBwutTbTPdYr-NnofO2c_jBC9_aYI9Ra31MXTAK0,3746 +scipy/ndimage/tests/test_c_api.pyc,, +scipy/ndimage/tests/test_datatypes.py,sha256=yO8nPubG2yhDWbj3YLnv22ANVARry_g0yj7ltF6GHd0,2808 +scipy/ndimage/tests/test_datatypes.pyc,, +scipy/ndimage/tests/test_filters.py,sha256=yBlftMDwF1ekpl1JFpElx4YJf4lX57t2ft1rVFJOvAA,16614 +scipy/ndimage/tests/test_filters.pyc,, +scipy/ndimage/tests/test_io.py,sha256=hGohag4FeCeU_wsL2ZWQyn7BzVW7ZZZ1ACI_4JzYjV4,1214 +scipy/ndimage/tests/test_io.pyc,, +scipy/ndimage/tests/test_measurements.py,sha256=-7eOlyWGM8cr6QCETDPtKXvWWyH2ss1vFzkBDdA831w,36055 +scipy/ndimage/tests/test_measurements.pyc,, +scipy/ndimage/tests/test_ndimage.py,sha256=Cpah7KTsWx3_7DHnaHiKXUejnEx46nqGNYWtbl1TfYc,203736 +scipy/ndimage/tests/test_ndimage.pyc,, +scipy/ndimage/tests/test_regression.py,sha256=aWjDSli_I4AydvouphVh881i4n01C43GoydHq2UQ8tU,1360 +scipy/ndimage/tests/test_regression.pyc,, +scipy/ndimage/tests/test_splines.py,sha256=wpwvho2Y-W8lEec5jgQLteR_YcNfD89I5ttlGFyt50U,2263 +scipy/ndimage/tests/test_splines.pyc,, +scipy/odr/__init__.py,sha256=13JOOFvG3GebS3JLzkHw7PQwQb-rJGhc4WyuTFkFs3o,4343 +scipy/odr/__init__.pyc,, +scipy/odr/__odrpack.so,sha256=41wrCAFpLnVS5sEgShzWLPAnTjIFDu9TawFD70mCOB8,317208 +scipy/odr/add_newdocs.py,sha256=PQlv3MAtLf3kYn4fz_mTR8hQvF8fZOX_tN3GIS7YvIk,839 +scipy/odr/add_newdocs.pyc,, +scipy/odr/models.py,sha256=0Oi7fKNbHSU8McE5VJLYyppEfM32F3otMfI9mXUefrs,4663 +scipy/odr/models.pyc,, +scipy/odr/odrpack.py,sha256=mpwDotjigVAOPElQgoKzDyFZKMdK3Dy7GdKLL1snrrc,41364 +scipy/odr/odrpack.pyc,, +scipy/odr/setup.py,sha256=xMB2D-I1aGlYLnS4NLpxr7PE1F63_ncZ7w-ERzTHE0w,1293 +scipy/odr/setup.pyc,, +scipy/odr/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/odr/tests/__init__.pyc,, +scipy/odr/tests/test_odr.py,sha256=pDfGp0aqFPA6iHpXkRIo3bu-VbqaTASIjB9T3L1UZ_w,13636 +scipy/odr/tests/test_odr.pyc,, +scipy/optimize/__init__.py,sha256=bxJ7IyJm-paIbFhc2zVbyFU_-rMv4MRcN570sYbTgOI,12115 +scipy/optimize/__init__.pyc,, +scipy/optimize/_basinhopping.py,sha256=YnPo-OnQQfFv9ChePlbsHL-V2dlLxBB5yETx-8fVLIw,29373 +scipy/optimize/_basinhopping.pyc,, +scipy/optimize/_cobyla.so,sha256=SgcTXutNX6CuGiP9KOBtdL4FJVB3Y712HNgruCRT-DA,135008 +scipy/optimize/_constraints.py,sha256=sfbBPmJFSGKx-StFNrqUG7_9cP2A8rnfSDJcNLW0Zk8,16883 +scipy/optimize/_constraints.pyc,, +scipy/optimize/_differentiable_functions.py,sha256=82XsGQmgsqIk9WqN6X5i5nUytGlyA3DHqTGQUmGObB4,18010 +scipy/optimize/_differentiable_functions.pyc,, +scipy/optimize/_differentialevolution.py,sha256=7nd8MhlyuQofjnJ4wqbeC7qn3QI99PJ_-XhWDWW1HCc,43769 +scipy/optimize/_differentialevolution.pyc,, +scipy/optimize/_dual_annealing.py,sha256=EIaWI1dQ_LUmoDuKDVjwaVsfp14MRIGnM45F4wwfp24,28866 +scipy/optimize/_dual_annealing.pyc,, +scipy/optimize/_group_columns.so,sha256=SJNe1ajXocsP_G0xqZqehNf-zK18-J72b_yQrzxBKho,196768 +scipy/optimize/_hessian_update_strategy.py,sha256=dWE5jW0iCyRWRFMsj_2KsJ3A2l2aj8c380SlZQqeKk4,15924 +scipy/optimize/_hessian_update_strategy.pyc,, +scipy/optimize/_hungarian.py,sha256=fkbEloYfM5B2aBbD6ckDNvj0GP1f6DxW90Xi43bnqlI,9858 +scipy/optimize/_hungarian.pyc,, +scipy/optimize/_lbfgsb.so,sha256=RBUt3nI2k5rzzIl5cDt-rvyxfpb1Ghj6BqLRwTyZ6Fg,137152 +scipy/optimize/_linprog.py,sha256=UW1X0kaH7Ho0h-XmeYYRi-pj0MBUPpetIM3nWde0Sjw,19904 +scipy/optimize/_linprog.pyc,, +scipy/optimize/_linprog_ip.py,sha256=ApjqCN68a2Xb9-LJYUWbEC3KcvQRLPi1DiICR3HNpC0,42029 +scipy/optimize/_linprog_ip.pyc,, +scipy/optimize/_linprog_simplex.py,sha256=MO0qOsS8HzAh-JdrBKN7fGLKIjGAyMUi3fjBv4RO9J8,23201 +scipy/optimize/_linprog_simplex.pyc,, +scipy/optimize/_linprog_util.py,sha256=HByO58ocdTHe0-Ps9MJiGlSJu-pPUbWM4I47A6idf2E,53341 +scipy/optimize/_linprog_util.pyc,, +scipy/optimize/_lsq/__init__.py,sha256=8cjOO8Lre8ZitpaUDykUlmvCjDRyAXTh7DPs7pU22sI,238 +scipy/optimize/_lsq/__init__.pyc,, +scipy/optimize/_lsq/bvls.py,sha256=rxcfQEONNGs1Co7P9cp1mXh3zS9ym8QCUOyXSAd7P8g,4996 +scipy/optimize/_lsq/bvls.pyc,, +scipy/optimize/_lsq/common.py,sha256=HjGJgWgg5UTk8ZR95ZU7CI43y9ljWD0s6QXjVTWum_M,20823 +scipy/optimize/_lsq/common.pyc,, +scipy/optimize/_lsq/dogbox.py,sha256=bKGvPkEu4IhpVTh02fS0ysJ1aGtDnXsv5fQrssEWKlg,11709 +scipy/optimize/_lsq/dogbox.pyc,, +scipy/optimize/_lsq/givens_elimination.so,sha256=WmR4jiwrjuiU6QzZy6Ovccwpkhp7SAo761QjWOiFMsQ,168256 +scipy/optimize/_lsq/least_squares.py,sha256=QXpbpqeq-isOcz3MchgnAFI2Gzxzlhspb95Kxab9W_o,37709 +scipy/optimize/_lsq/least_squares.pyc,, +scipy/optimize/_lsq/lsq_linear.py,sha256=msvplyJ4pnP6xTx3F9MnUDr4RyN_JzyE7Wu8TTbJz10,12646 +scipy/optimize/_lsq/lsq_linear.pyc,, +scipy/optimize/_lsq/setup.py,sha256=iXd4PIlk9zhFuZ5eJ3qUJGS7eLZ8z-X6tIjCYEmUFDA,482 +scipy/optimize/_lsq/setup.pyc,, +scipy/optimize/_lsq/trf.py,sha256=hn8czsBigi-pWGQ3_iYmSYl90Q_m97dpjwx9fIw9C1w,19775 +scipy/optimize/_lsq/trf.pyc,, +scipy/optimize/_lsq/trf_linear.py,sha256=aKWsRKPBDiMeIHDiI3iiaVaUnaqfZ8jEELSQqxKMaAA,7643 +scipy/optimize/_lsq/trf_linear.pyc,, +scipy/optimize/_minimize.py,sha256=S0VDJgDmv-L55ypBP4jSE2mezs1WoNOJTJQVZ2wUoDs,36483 +scipy/optimize/_minimize.pyc,, +scipy/optimize/_minpack.so,sha256=Lu2bf5_pk8tQmhwP51ZXAH7I7plX_YyvoyuVYLq9rfM,137872 +scipy/optimize/_nnls.so,sha256=mLRNSXcFZT71EqqYnAFnxpKpO_P_jQvnbFvd1j3oCN4,52544 +scipy/optimize/_numdiff.py,sha256=aXKaiAZp8stcI9GTNr9Z5x8V1-mhw49_CC38-egFjd8,23753 +scipy/optimize/_numdiff.pyc,, +scipy/optimize/_remove_redundancy.py,sha256=0NAhNDL51TMIfhhTvVpBs2BlbfmuEHru51gtTGuftuY,16423 +scipy/optimize/_remove_redundancy.pyc,, +scipy/optimize/_root.py,sha256=Ev8JvLP5-WbiOyUW_wZAWWhy-nE5caE_81Y31xkQo_k,26016 +scipy/optimize/_root.pyc,, +scipy/optimize/_root_scalar.py,sha256=OJr7jrZ0D80bZ5qbEzxKlDdfh9ZahDPiWOejpIXDh9g,14884 +scipy/optimize/_root_scalar.pyc,, +scipy/optimize/_shgo.py,sha256=wsbF99OBLLKTalTgqoBqnF8jG3dhQ0xDy2ZCMlEKkEY,62375 +scipy/optimize/_shgo.pyc,, +scipy/optimize/_shgo_lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/optimize/_shgo_lib/__init__.pyc,, +scipy/optimize/_shgo_lib/sobol_seq.py,sha256=-URYAkzQtaRGwCM_5PigwjHnr1I5zUJSFKxygycKXjw,12466 +scipy/optimize/_shgo_lib/sobol_seq.pyc,, +scipy/optimize/_shgo_lib/sobol_vec.gz,sha256=I_h0m4DMFm08kZs8ktPm5TgESf3f7g2cAA-6ZCjIx4I,295548 +scipy/optimize/_shgo_lib/triangulation.py,sha256=iioap1mPGRLAEuPrHT9ctCOfYZIoHgg9xU7iV2Ez020,26925 +scipy/optimize/_shgo_lib/triangulation.pyc,, +scipy/optimize/_slsqp.so,sha256=F1BA-LAa2BA4l-0nHG-IbcOkMip5plmcE0uVC1zEkn0,109568 +scipy/optimize/_spectral.py,sha256=qGB4k20oM0BsbBOI8GIEjx4hiMXqaLz5RXUVmZg-S3M,7986 +scipy/optimize/_spectral.pyc,, +scipy/optimize/_trlib/__init__.py,sha256=cNGWE1VffijqhPtSaqwagtBJvjJK-XrJ6K80RURLd48,524 +scipy/optimize/_trlib/__init__.pyc,, +scipy/optimize/_trlib/_trlib.so,sha256=FJZPVTHW9ZM8eJQlUwNzeJEkxjri-9yg6PkMJHld3xU,339640 +scipy/optimize/_trlib/setup.py,sha256=PNuoUA1OUtYhnbGeeTqBkqGZcetZRqDEA7PoK1KC2aA,1040 +scipy/optimize/_trlib/setup.pyc,, +scipy/optimize/_trustregion.py,sha256=uPQgW7SSHzU2HbaJmBT6y5D8NYG6IukWMsuwAtdV6QA,9226 +scipy/optimize/_trustregion.pyc,, +scipy/optimize/_trustregion_constr/__init__.py,sha256=c8J2wYGQZr9WpLIT4zE4MUgEj4YNbHEWYYYsFmxAeXI,180 +scipy/optimize/_trustregion_constr/__init__.pyc,, +scipy/optimize/_trustregion_constr/canonical_constraint.py,sha256=IDthpI2igHhFXAhuOF8SFx3K9n_CNf9vYh8Jk_-GLO8,12519 +scipy/optimize/_trustregion_constr/canonical_constraint.pyc,, +scipy/optimize/_trustregion_constr/equality_constrained_sqp.py,sha256=hwwd7Hb449vbD5b_gz6aq8mfn3lxDVHni9sZqXv6PiM,8676 +scipy/optimize/_trustregion_constr/equality_constrained_sqp.pyc,, +scipy/optimize/_trustregion_constr/minimize_trustregion_constr.py,sha256=9D9upEbn7daTXcDFj6Vgh7sYTE4yI5Jfv1p9ISr05RE,24726 +scipy/optimize/_trustregion_constr/minimize_trustregion_constr.pyc,, +scipy/optimize/_trustregion_constr/projections.py,sha256=WxWTGNwNO6NYownAcN4g_p6Eht97RAXnghk2EArr-eE,13166 +scipy/optimize/_trustregion_constr/projections.pyc,, +scipy/optimize/_trustregion_constr/qp_subproblem.py,sha256=MuPPAH2cRGrcMs3EfMdR8kJAoYro3cpVas4N-gnc5v8,22643 +scipy/optimize/_trustregion_constr/qp_subproblem.pyc,, +scipy/optimize/_trustregion_constr/report.py,sha256=op17Z-adTEHSGnKGyGPUy9oNY4cPo1mHpVdYrx6HYDc,1774 +scipy/optimize/_trustregion_constr/report.pyc,, +scipy/optimize/_trustregion_constr/setup.py,sha256=52mS3fJo4Q0atN4s82p9_LtP7p2Zvw4OiP3A7i8ulvE,424 +scipy/optimize/_trustregion_constr/setup.pyc,, +scipy/optimize/_trustregion_constr/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/optimize/_trustregion_constr/tests/__init__.pyc,, +scipy/optimize/_trustregion_constr/tests/test_canonical_constraint.py,sha256=Uv4dqulB7xakjDL43yUnRENG4CTacxXfTdw_3wx9rVE,9794 +scipy/optimize/_trustregion_constr/tests/test_canonical_constraint.pyc,, +scipy/optimize/_trustregion_constr/tests/test_projections.py,sha256=Nf4-m2bwJUQvByLj_uUm7YaRjOdqxjae6NJPg7fPrdk,9171 +scipy/optimize/_trustregion_constr/tests/test_projections.pyc,, +scipy/optimize/_trustregion_constr/tests/test_qp_subproblem.py,sha256=Qe5383S16z9uZ-cJR5K0lMyq0g8mWCV-Zcfwd2AedvE,27927 +scipy/optimize/_trustregion_constr/tests/test_qp_subproblem.pyc,, +scipy/optimize/_trustregion_constr/tr_interior_point.py,sha256=cnDaVeratWAzHRCzw4id0nTHN0bYHggRhhRAowLobes,13889 +scipy/optimize/_trustregion_constr/tr_interior_point.pyc,, +scipy/optimize/_trustregion_dogleg.py,sha256=ILwO-kcj0PTnWkg9QBhRiP5EwgvKzDq_074sAGvF9Oc,4449 +scipy/optimize/_trustregion_dogleg.pyc,, +scipy/optimize/_trustregion_exact.py,sha256=nhptYsIapyomgNHc7lVns3cqoFD6PNh_HfyDEZIpBN0,15492 +scipy/optimize/_trustregion_exact.pyc,, +scipy/optimize/_trustregion_krylov.py,sha256=yc5ylRzjOMH416aALKKTPiAQehjCqXMRRL7TdXjXq-c,3030 +scipy/optimize/_trustregion_krylov.pyc,, +scipy/optimize/_trustregion_ncg.py,sha256=l_E_tPt-573euFIUkF5ZBgKmW2fSPmeMEWq0Y4qLPYc,4646 +scipy/optimize/_trustregion_ncg.pyc,, +scipy/optimize/_tstutils.py,sha256=TgxR9eG169BtsvFukdinwcZtpQBkLInyXfuCaosdXMk,29562 +scipy/optimize/_tstutils.pyc,, +scipy/optimize/_zeros.so,sha256=rf9iotX2KNw6QWy45nMkm0mFZgf6Z3q9n-zxDFBSBXk,11768 +scipy/optimize/cobyla.py,sha256=yO4-gbR7HMx8kY53YmDnbhpBdtPeG0yHnGCNErBe2gQ,9402 +scipy/optimize/cobyla.pyc,, +scipy/optimize/lbfgsb.py,sha256=h6o1VOu5ggWKmEtEEolnU_IkiB0XLB9CgxhKcL-nj6o,17009 +scipy/optimize/lbfgsb.pyc,, +scipy/optimize/lbfgsb_src/README,sha256=q7vAotiT7affj-8xYhiy0g9r0fQBE2caLUnvjqjgSv4,3416 +scipy/optimize/linesearch.py,sha256=osxApphqRfAtBz8X_u_7Le7JjOVCiV9vIiHTNqUi9rE,26489 +scipy/optimize/linesearch.pyc,, +scipy/optimize/minpack.py,sha256=LxURCkxMriDU38YZ1iuAfA0JaObItsZVv7JEeBQy2sY,33487 +scipy/optimize/minpack.pyc,, +scipy/optimize/minpack2.so,sha256=pfMSKcVWAOQ9TqDW-rffCHAK_puSagmneG7e9dYZoN4,52288 +scipy/optimize/moduleTNC.so,sha256=VnPBw6QDi_lFSJj5wBwkcjo_tNHnjt5FfXAOsAFdIUg,50328 +scipy/optimize/nnls.py,sha256=hlsjGAzAWzFBerGW7by58uDa4kcL2NGzrkaqupyDki8,1616 +scipy/optimize/nnls.pyc,, +scipy/optimize/nonlin.py,sha256=Nqm8t285hcfjIANBG9w6I2fFJgqAdmfSa97Jg-vLdRk,47208 +scipy/optimize/nonlin.pyc,, +scipy/optimize/optimize.py,sha256=nWzGBjrnfsPklWTSp-eYDpdL3pxZviosjGVyKKrD8Xg,105291 +scipy/optimize/optimize.pyc,, +scipy/optimize/setup.py,sha256=03dkULpUvP4HXKxr4kAC07w9tcvISFEccAzMkBitjRI,3631 +scipy/optimize/setup.pyc,, +scipy/optimize/slsqp.py,sha256=DHT6_sRlrYSe5yREmUA9cqoGzhIM3lv1TWBAPokjags,19208 +scipy/optimize/slsqp.pyc,, +scipy/optimize/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/optimize/tests/__init__.pyc,, +scipy/optimize/tests/test__basinhopping.py,sha256=RAX6PErra7AbdEnsp4sVLT1lFQ19phFoBOyvSULAoq0,15398 +scipy/optimize/tests/test__basinhopping.pyc,, +scipy/optimize/tests/test__differential_evolution.py,sha256=tuZD4ncvxqCjG2mg5Q8I_G-th4D-syTqcutS040xBQw,22961 +scipy/optimize/tests/test__differential_evolution.pyc,, +scipy/optimize/tests/test__dual_annealing.py,sha256=T0WKkFYIlHDVTjFA8yGL4ZSR2mB4yAqFowHbp8iM0Do,10104 +scipy/optimize/tests/test__dual_annealing.pyc,, +scipy/optimize/tests/test__linprog_clean_inputs.py,sha256=diRQZlewiUYb7l1e2XisZYR5Inqrb2ZY15OFt9qAHzU,10867 +scipy/optimize/tests/test__linprog_clean_inputs.pyc,, +scipy/optimize/tests/test__numdiff.py,sha256=1sLsbyORt5yJ4oOVZ-Ccf3Ss-fEWfzIH4uUB81Oddio,22992 +scipy/optimize/tests/test__numdiff.pyc,, +scipy/optimize/tests/test__remove_redundancy.py,sha256=8r53izv0x-ZYNXR0C7NOb1fjBrBaQS0hxwSQ-ZB1hmU,6056 +scipy/optimize/tests/test__remove_redundancy.pyc,, +scipy/optimize/tests/test__root.py,sha256=QOJ8qbstVlK37iU6pCfV8iy0VwBRD2lz8Be_omh2r1w,2257 +scipy/optimize/tests/test__root.pyc,, +scipy/optimize/tests/test__shgo.py,sha256=l5k-d7UQf_0cfhl_TpXPtEwbdtpqoXjDjQIluqmz9Gc,26644 +scipy/optimize/tests/test__shgo.pyc,, +scipy/optimize/tests/test__spectral.py,sha256=sKv9d_YrMX3j1KEG-hd5XJVU7zOgW6dTL2RybchP5WE,6585 +scipy/optimize/tests/test__spectral.pyc,, +scipy/optimize/tests/test_cobyla.py,sha256=bFNBsbuo2mvTymG3zdyjOrnZ5Na6QywNw2Y3s4UGt98,3512 +scipy/optimize/tests/test_cobyla.pyc,, +scipy/optimize/tests/test_constraint_conversion.py,sha256=UFXBJfSJvuc9ezVKj5ASF-32eEExlTOnKBbPEoTZYvI,11980 +scipy/optimize/tests/test_constraint_conversion.pyc,, +scipy/optimize/tests/test_constraints.py,sha256=sVI71FFgVY5Cl12WZO4WVdeX0U_yjPYGWjyMptBigRc,4966 +scipy/optimize/tests/test_constraints.pyc,, +scipy/optimize/tests/test_differentiable_functions.py,sha256=0zO9bAaeE-Q43l8ebSnWkqY2T1T9t5RVkIiHu26jKsI,21514 +scipy/optimize/tests/test_differentiable_functions.pyc,, +scipy/optimize/tests/test_hessian_update_strategy.py,sha256=XG5b4zsdcb1jGffjZvxLtyI6x23CtbS6n_JZNeQXXw8,10562 +scipy/optimize/tests/test_hessian_update_strategy.pyc,, +scipy/optimize/tests/test_hungarian.py,sha256=KlGLu2UTITVRg-AdxWqMye4CVckkjYKcfHDTKrMt_0M,2155 +scipy/optimize/tests/test_hungarian.pyc,, +scipy/optimize/tests/test_lbfgsb_hessinv.py,sha256=fdXChWeBN5-27kt_TU81RI5HW5xjsKD71xgxRo-Jtr8,1212 +scipy/optimize/tests/test_lbfgsb_hessinv.pyc,, +scipy/optimize/tests/test_least_squares.py,sha256=KbbOrJq_Wh4Z3EhtEldyHzGA0_pWNMGRi4m8fxGxZL4,28578 +scipy/optimize/tests/test_least_squares.pyc,, +scipy/optimize/tests/test_linesearch.py,sha256=GF4XivwozCm7rJ0EGJHlAyG5ZjMWQhjSMGDo_Ehd-Lg,9849 +scipy/optimize/tests/test_linesearch.pyc,, +scipy/optimize/tests/test_linprog.py,sha256=egxg1kXSFbiPMteWGFY-pxwaSxFBp6Cfg9Jzg802714,54181 +scipy/optimize/tests/test_linprog.pyc,, +scipy/optimize/tests/test_lsq_common.py,sha256=pL5xIPU0S_7NheSQLDGgo861zPRmGU7pw4SmS7xQLD0,8749 +scipy/optimize/tests/test_lsq_common.pyc,, +scipy/optimize/tests/test_lsq_linear.py,sha256=mAnSRbBeB3IrDgayuHaOnUqaHSBdbDZGhqAQowwQsDU,5022 +scipy/optimize/tests/test_lsq_linear.pyc,, +scipy/optimize/tests/test_minimize_constrained.py,sha256=U0yObYCMdIO4GitiOTa4Ciotelvy2UuNwip1fld4Xo8,19877 +scipy/optimize/tests/test_minimize_constrained.pyc,, +scipy/optimize/tests/test_minpack.py,sha256=Is4c7FmHeq4vqOgy3ud6Ul9OcHck_T9cUAHEj4SaH14,29028 +scipy/optimize/tests/test_minpack.pyc,, +scipy/optimize/tests/test_nnls.py,sha256=ijlOohQMuVs-S7SpAfC7i2VcijadvZzAV8y-lQxH1K4,988 +scipy/optimize/tests/test_nnls.pyc,, +scipy/optimize/tests/test_nonlin.py,sha256=JfMVNdOra8i_4gw9tPtDtTI4IC48Vdbxbmxvz6SfeS4,15068 +scipy/optimize/tests/test_nonlin.pyc,, +scipy/optimize/tests/test_optimize.py,sha256=jEQe_psnmen7E4NJrvdTVeTTg90VC11hESck4y3h2CA,53337 +scipy/optimize/tests/test_optimize.pyc,, +scipy/optimize/tests/test_regression.py,sha256=5DAWj8tDYOiEqDiLGqKq6xyJ0vx5X15LblZh-tC6CPY,1151 +scipy/optimize/tests/test_regression.pyc,, +scipy/optimize/tests/test_slsqp.py,sha256=vrxG-NujNEISkogDaEijFeGxh03wQcFJvdVW41CJqCI,19497 +scipy/optimize/tests/test_slsqp.pyc,, +scipy/optimize/tests/test_tnc.py,sha256=Xh5iavhN0mKNiS5iZzynB6sPQVTdxG3xbdsZAZC94Zg,11161 +scipy/optimize/tests/test_tnc.pyc,, +scipy/optimize/tests/test_trustregion.py,sha256=2VHXrVkJDVofjXF-_8H5E20gynXQcuzw2WB7Ej38it4,4436 +scipy/optimize/tests/test_trustregion.pyc,, +scipy/optimize/tests/test_trustregion_exact.py,sha256=3_8WDvy_8pgZftTBOm4zZENNJTwCyErKZf5lrdycmzI,13189 +scipy/optimize/tests/test_trustregion_exact.pyc,, +scipy/optimize/tests/test_trustregion_krylov.py,sha256=7HoxgXBWdvQ83fxRaJ62DDZy6-uWn_5bZ-kGOyK0VPE,6727 +scipy/optimize/tests/test_trustregion_krylov.pyc,, +scipy/optimize/tests/test_zeros.py,sha256=u5wfNEsJqBQl8t5pLJRWANiG956OzB254mUHk4MLg2E,24223 +scipy/optimize/tests/test_zeros.pyc,, +scipy/optimize/tnc.py,sha256=Ze6ZPUZO-2HVANutwXpx_yiVfK8Iajj9ym3Yetmfagg,16537 +scipy/optimize/tnc.pyc,, +scipy/optimize/zeros.py,sha256=SUJZ6-sS3Be0pyaNQ8DBkrz97_4xf926Y1sOMtzjMao,49270 +scipy/optimize/zeros.pyc,, +scipy/setup.py,sha256=le2spCwkBOUrqOpIpBLDWTPFqdA3J4miDFvg9WrBhGk,1208 +scipy/setup.pyc,, +scipy/signal/__init__.py,sha256=9FGz73NiVHw7KvvKN-hK4_O89zRqg3N6bYD-Bdpm8YQ,15822 +scipy/signal/__init__.pyc,, +scipy/signal/_arraytools.py,sha256=I5g0B5iH7TgUFgUiBAkD0vDSEflePUUZNWwNBC_qiAs,7561 +scipy/signal/_arraytools.pyc,, +scipy/signal/_max_len_seq.py,sha256=8yksd6Mee9LssSwDyE2Yq4mozplzm5OkBT0dF3XIra4,4929 +scipy/signal/_max_len_seq.pyc,, +scipy/signal/_max_len_seq_inner.so,sha256=QbCgAtZD5IlIOiY2_FvbwoCLCOqCIv7AMJ0DsAOf4s4,178816 +scipy/signal/_peak_finding.py,sha256=PVXd_PP1i36S9zVMWxWgDeaUWANk5sxa84Px3j9ZKdM,48593 +scipy/signal/_peak_finding.pyc,, +scipy/signal/_peak_finding_utils.so,sha256=HWM1ltQ62P9ZF-R0hRDWQ_XZyTbVZ552fD5XQQpUAEY,230272 +scipy/signal/_savitzky_golay.py,sha256=1wi-xJwPXwC05bKXOc_XfgfkaEQbdzpCHVC1Fex7-Tw,13193 +scipy/signal/_savitzky_golay.pyc,, +scipy/signal/_spectral.so,sha256=3ZTio-Lsu4LiJGpQij3Ru70Q-aOlZXwxX3wyWya0HH0,55232 +scipy/signal/_upfirdn.py,sha256=wQTMW_aQXZuD4JCywI2GMJ4360KrCId842eLggE1BmY,6587 +scipy/signal/_upfirdn.pyc,, +scipy/signal/_upfirdn_apply.so,sha256=hXSgkkUMPEh6i4HTRdRpztlQgPRDg2JTexML5yHSo3Y,243040 +scipy/signal/bsplines.py,sha256=_5BeddwShLrwd-wziuUEvezVOe0wOE5SNv4SPLG2oIM,12118 +scipy/signal/bsplines.pyc,, +scipy/signal/filter_design.py,sha256=9jUwAsK5JraGPk_ScvefiYZbQo0SPoNxzTAxLHAuBYo,150855 +scipy/signal/filter_design.pyc,, +scipy/signal/fir_filter_design.py,sha256=GftDmpMaXyNzZsJC7ZAnpCsQ3xM3ZA2mLduK38ESgC4,41879 +scipy/signal/fir_filter_design.pyc,, +scipy/signal/lti_conversion.py,sha256=couS_DVM5hp4VJ9a26htxvZQzJi94C40i5pf7W7RpyQ,13967 +scipy/signal/lti_conversion.pyc,, +scipy/signal/ltisys.py,sha256=U1IWmkiwK_OTSpHB8j1VbDTwAH5ZOPT3WlWNpul72xI,122863 +scipy/signal/ltisys.pyc,, +scipy/signal/setup.py,sha256=m2E1m6NTQi9ATvpJgs8XxbO_tdwXJBX_YIh1AfnRIUE,1426 +scipy/signal/setup.pyc,, +scipy/signal/signaltools.py,sha256=1TbenrKjCFi4BQRrN_3-1pF-M-QA6YZNQs2dZvIYdT8,119085 +scipy/signal/signaltools.pyc,, +scipy/signal/sigtools.so,sha256=K_zM4hgT8VlrV1GgkvtwtbWynyoKEVUkD-wGPqDGfJ4,75616 +scipy/signal/spectral.py,sha256=PzKpE0njV-75h8nbaDv7ICuaeE3LL2yRDEvAFAI36Tg,73476 +scipy/signal/spectral.pyc,, +scipy/signal/spline.so,sha256=SkH5dvMWcIWqnzVTnTiWwBeDfdyy7a_Id80VEv9myz0,47064 +scipy/signal/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/signal/tests/__init__.pyc,, +scipy/signal/tests/mpsig.py,sha256=RyBRQTnFWcNVtRYne4uq0MciZhufJoyP8tEomwk9a90,3369 +scipy/signal/tests/mpsig.pyc,, +scipy/signal/tests/test_array_tools.py,sha256=voRKl9_DJd0F85gRdCw3RCd_bkesK_u66nuY8rT5-gk,3706 +scipy/signal/tests/test_array_tools.pyc,, +scipy/signal/tests/test_bsplines.py,sha256=BS7fxJVgWuquZWF0DPhwSpQN9xEU98Yt74ieZ0vCaxU,11827 +scipy/signal/tests/test_bsplines.pyc,, +scipy/signal/tests/test_cont2discrete.py,sha256=t0AP65DR7ks9Sb1j4_VW8hiJbZR5bpaD25tZxd8Jor8,12323 +scipy/signal/tests/test_cont2discrete.pyc,, +scipy/signal/tests/test_dltisys.py,sha256=Cw1fsi7ME_2zR4eWevUx8jilnoDqpkUvGULy80_xOvw,21810 +scipy/signal/tests/test_dltisys.pyc,, +scipy/signal/tests/test_filter_design.py,sha256=0BRX4z_OroRgSgtgiwFYn5y9jM0c3ELZ7_pso8d6KxM,161178 +scipy/signal/tests/test_filter_design.pyc,, +scipy/signal/tests/test_fir_filter_design.py,sha256=rkWwzD9LgzfuWWjvNDTZN3eWkmBVFJAtst7nVi11F-4,23711 +scipy/signal/tests/test_fir_filter_design.pyc,, +scipy/signal/tests/test_ltisys.py,sha256=6d3rpGVVLuJoYHBDWVgX9ev6Mc1rQOKV7uffKkz3pfQ,46335 +scipy/signal/tests/test_ltisys.pyc,, +scipy/signal/tests/test_max_len_seq.py,sha256=Lu9ZGW-7u7Pdh2lnQOZbjZMwJgzCpnpwp940K1d0YxQ,3181 +scipy/signal/tests/test_max_len_seq.pyc,, +scipy/signal/tests/test_peak_finding.py,sha256=jSJRQl_QLyWooM-zoQ_E8Fv3Xqq1e-sbpRcRkPIHTqo,31595 +scipy/signal/tests/test_peak_finding.pyc,, +scipy/signal/tests/test_savitzky_golay.py,sha256=6XdxOIahhqhEUTQH695tbvPjWBE3Pt_c9YwyKL2qizE,9843 +scipy/signal/tests/test_savitzky_golay.pyc,, +scipy/signal/tests/test_signaltools.py,sha256=1CkCGcQL7yl86FSVXkMMTjVpCMXZKDd4F_UNdFbl-ig,100911 +scipy/signal/tests/test_signaltools.pyc,, +scipy/signal/tests/test_spectral.py,sha256=iwaa4zq3QsMINNi-xwXO6G022X9G9wkVbVxmKf_b4OQ,53573 +scipy/signal/tests/test_spectral.pyc,, +scipy/signal/tests/test_upfirdn.py,sha256=AVsyWz7uYIcYbl1EeaPUaQjJ0dj8w8KyWWZMK97jkjw,6659 +scipy/signal/tests/test_upfirdn.pyc,, +scipy/signal/tests/test_waveforms.py,sha256=sgFGtDxpf4xp7keJO05jSSTVuoH2XyzX3y4R_w0GXI8,12071 +scipy/signal/tests/test_waveforms.pyc,, +scipy/signal/tests/test_wavelets.py,sha256=GpGyUkAkhe5a68y1hxxV5q45g_gc95L001_GyOEWVzI,5192 +scipy/signal/tests/test_wavelets.pyc,, +scipy/signal/tests/test_windows.py,sha256=Ssriflwo5g7byx6KsJAYiiKNwko87w-2aaWuByd7gO0,33273 +scipy/signal/tests/test_windows.pyc,, +scipy/signal/waveforms.py,sha256=VM4MyajbEdU5EbyliXo6gfHhKa55D670iM5leBNnybs,21071 +scipy/signal/waveforms.pyc,, +scipy/signal/wavelets.py,sha256=IEUolnsIRbSTN2Z6rT6mmVVdTcQ8P3YfQE9Gp-aDTvk,10523 +scipy/signal/wavelets.pyc,, +scipy/signal/windows/__init__.py,sha256=6TJgpJosr2hIb1_e2SftVMTNMOhxMmJ-4JaPSdW4xnU,1778 +scipy/signal/windows/__init__.pyc,, +scipy/signal/windows/setup.py,sha256=IVxOLBTza_mZ4m0n7aFGr5reC-iRY2SZw-4INW4DT54,294 +scipy/signal/windows/setup.pyc,, +scipy/signal/windows/windows.py,sha256=HVypSNUEXJRx4Zqt4uzZ6K932suZTVmRL-jT96gnL-U,73569 +scipy/signal/windows/windows.pyc,, +scipy/sparse/__init__.py,sha256=Aph861fSyW8iIN-PaCLD3_SyonYZQ4tXIUx746onGWI,6935 +scipy/sparse/__init__.pyc,, +scipy/sparse/_csparsetools.so,sha256=x73rA-HONzr8dAH25PyZpvD4oLUry8P_OJit-Ti_Xdg,446496 +scipy/sparse/_matrix_io.py,sha256=t-0hBrYQND85E0KJh66Vr20V_-u62mgwzWBZ6kNXYZo,5547 +scipy/sparse/_matrix_io.pyc,, +scipy/sparse/_sparsetools.so,sha256=pXh_PlKIitFxQrXd1amM1E2Uha-B8uimUdDaqpRa-FY,3409448 +scipy/sparse/base.py,sha256=s8MXNs8RhFHF9u3BCPrinBw3MTjd3hSHJ8YHJ8majvw,41295 +scipy/sparse/base.pyc,, +scipy/sparse/bsr.py,sha256=ZQYh51fudgfSBi6geDBf9iV41Wzq5lAXfT6qYncpHUE,25112 +scipy/sparse/bsr.pyc,, +scipy/sparse/compressed.py,sha256=QyQIwYfd_wRFifCY-gUCJJ2cuHQQQkAqeadHtY84srY,47106 +scipy/sparse/compressed.pyc,, +scipy/sparse/construct.py,sha256=u05wZf5p2ER4cj4bYCCs1IIFDq62i5IR37B_3NWWPbc,26068 +scipy/sparse/construct.pyc,, +scipy/sparse/coo.py,sha256=jU7sRbPc31RYGAnGwk0PfYXB93DtohNujnXgrXndpvk,21824 +scipy/sparse/coo.pyc,, +scipy/sparse/csc.py,sha256=L34orCSXX35YtP5kVlSvFhV62Ip3DI5xjcFAqmhXeoM,7786 +scipy/sparse/csc.pyc,, +scipy/sparse/csgraph/__init__.py,sha256=86IO9Yl6NCcABYqgzUNgIoqz3g5iEbf36IzNG5XexNw,7448 +scipy/sparse/csgraph/__init__.pyc,, +scipy/sparse/csgraph/_laplacian.py,sha256=ySRYAx3aqjEEt_XkjygSHSVqz45o4FlOI4Pxn5ICOUg,4081 +scipy/sparse/csgraph/_laplacian.pyc,, +scipy/sparse/csgraph/_min_spanning_tree.so,sha256=KSWq5P0KRXerX4W5QpMjYxBNDfBoqgj1wNbJB9p49fw,198720 +scipy/sparse/csgraph/_reordering.so,sha256=QMMeDscCU7SNkCjaDUFTlhUs7vvhN2JXqQ_slEXHY8E,328352 +scipy/sparse/csgraph/_shortest_path.so,sha256=Fg99dol3OEkPFgRZEMDSYH5fgx42PUKrqSCu5Tw01A0,261440 +scipy/sparse/csgraph/_tools.so,sha256=Eh_yI5dUrtUNsptUvNiYveWQ42Wj6QfGlyTTnqrcpAM,162624 +scipy/sparse/csgraph/_traversal.so,sha256=u3R5TCNxtK1X6PTHwoJ8ZbBZ-IhaILLzOHrDjxEb3ro,157728 +scipy/sparse/csgraph/_validation.py,sha256=K3zWrRpHABVTy_38sIL05SA0Y-RDWbp9jAk0wczDKx0,2405 +scipy/sparse/csgraph/_validation.pyc,, +scipy/sparse/csgraph/setup.py,sha256=KcdQSI5huvSB50Ye0awnDSUUg5LVZttcLHYz3NPrINw,933 +scipy/sparse/csgraph/setup.pyc,, +scipy/sparse/csgraph/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/sparse/csgraph/tests/__init__.pyc,, +scipy/sparse/csgraph/tests/test_connected_components.py,sha256=xSc7Gk3BdFWzHenekAaN5wRa7jdJpO7lgwknTq-Wh0o,3265 +scipy/sparse/csgraph/tests/test_connected_components.pyc,, +scipy/sparse/csgraph/tests/test_conversions.py,sha256=q_Lw1VQJHPlybM95IqB1lGsChEPD6TY3RgSYjK5RZ4o,2047 +scipy/sparse/csgraph/tests/test_conversions.pyc,, +scipy/sparse/csgraph/tests/test_graph_laplacian.py,sha256=P45_c6x8kC3hSdhLdxLBU4Vd_K0sJFQOtUE3Plof990,4388 +scipy/sparse/csgraph/tests/test_graph_laplacian.pyc,, +scipy/sparse/csgraph/tests/test_reordering.py,sha256=9Lm4-Yfn9i8ajk7j39x8gordMEWfDB41xglmSStrqFU,4190 +scipy/sparse/csgraph/tests/test_reordering.pyc,, +scipy/sparse/csgraph/tests/test_shortest_path.py,sha256=KwN3lv5P6svgIbGMGF8nk0cCSRVwSzm-Eo0TS7N1S_c,6718 +scipy/sparse/csgraph/tests/test_shortest_path.pyc,, +scipy/sparse/csgraph/tests/test_spanning_tree.py,sha256=f6ak34Os8Q3AtAu1hpRYWI1j2u44Y5JeUp8xOHPaJSY,2181 +scipy/sparse/csgraph/tests/test_spanning_tree.pyc,, +scipy/sparse/csgraph/tests/test_traversal.py,sha256=AKcyL7QOWvFU304_j7tjy4DLBuFWAK1vJFfRCRiCrq8,2391 +scipy/sparse/csgraph/tests/test_traversal.pyc,, +scipy/sparse/csr.py,sha256=UTdewAXlyKD_9Di7tSJXBT_iRQhmDtJpCB3OSfaBqgo,16496 +scipy/sparse/csr.pyc,, +scipy/sparse/data.py,sha256=z6giGRtIanImqvDJr6T2tRBOGBH3GvjOiHlgiTVABGo,12708 +scipy/sparse/data.pyc,, +scipy/sparse/dia.py,sha256=jHSyHdAp6Jt5woint2pIzUyLoTWP4o9YUIDYgclVfE4,13964 +scipy/sparse/dia.pyc,, +scipy/sparse/dok.py,sha256=K0y3MDwFVGB9jMCOK-kqnTfL0N9ne4l3jTRsUeT_36g,18773 +scipy/sparse/dok.pyc,, +scipy/sparse/extract.py,sha256=1Zwb2-ClIsAGJ-yeLfGqYplIve4AD-yNQALlRMRWkQc,4713 +scipy/sparse/extract.pyc,, +scipy/sparse/generate_sparsetools.py,sha256=e_TJVJAp8l7od5jX-7Z9BxmoAajWuQW6krDheBSKfZw,12723 +scipy/sparse/generate_sparsetools.pyc,, +scipy/sparse/lil.py,sha256=qUubNhdyTJmz_mBEGlmgZILeDT28VNZup7LMAhZebBE,17782 +scipy/sparse/lil.pyc,, +scipy/sparse/linalg/__init__.py,sha256=aEA5OGYXJ0KghY8Wq_3uUpNaq2N6NlzxSq222lcVPiE,3482 +scipy/sparse/linalg/__init__.pyc,, +scipy/sparse/linalg/_expm_multiply.py,sha256=I2UwMXhly9HQMVVxnIJnrVbhvnK_zomGVPCxY8J8G9U,21549 +scipy/sparse/linalg/_expm_multiply.pyc,, +scipy/sparse/linalg/_norm.py,sha256=hOxW43f-n8v_WBB73elLR2WaiSz724U5NDJCqmOMcNQ,5867 +scipy/sparse/linalg/_norm.pyc,, +scipy/sparse/linalg/_onenormest.py,sha256=0B5No3FdblmWmsJeeIxqTDW4RwuteYke3LlpxFhp1Ug,15529 +scipy/sparse/linalg/_onenormest.pyc,, +scipy/sparse/linalg/dsolve/SuperLU/License.txt,sha256=8M7fUlA7LUK4NBGgoW5v76w0bfrY_dxm9QBQFQEjRww,1681 +scipy/sparse/linalg/dsolve/__init__.py,sha256=QEFvmPNwq0agVl4pcTeFOAJA4DFhBCrYYJ1ge3-3OTY,1953 +scipy/sparse/linalg/dsolve/__init__.pyc,, +scipy/sparse/linalg/dsolve/_add_newdocs.py,sha256=ifY6tEpeYU2atQ-TfrTo816qqPGYkpXehs6gkX7BP4k,3801 +scipy/sparse/linalg/dsolve/_add_newdocs.pyc,, +scipy/sparse/linalg/dsolve/_superlu.so,sha256=RXmH4O6DdeScsEufhNtfTNhQZI7_tfIqoKjw2YFy1j4,338008 +scipy/sparse/linalg/dsolve/linsolve.py,sha256=IcU4rKTczCsS8hKaZqHphc6Z-n6Mg-ZgI-gGlRhEWrg,19279 +scipy/sparse/linalg/dsolve/linsolve.pyc,, +scipy/sparse/linalg/dsolve/setup.py,sha256=6Bo_Jdauq3xIRnKpCiHySmBG553iGhPy77vRNGpr9PA,1692 +scipy/sparse/linalg/dsolve/setup.pyc,, +scipy/sparse/linalg/dsolve/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/sparse/linalg/dsolve/tests/__init__.pyc,, +scipy/sparse/linalg/dsolve/tests/test_linsolve.py,sha256=ZVc5y57_SSy1hIFv0EBmmcpWisgkx8aoLHMqQMoXY88,24309 +scipy/sparse/linalg/dsolve/tests/test_linsolve.pyc,, +scipy/sparse/linalg/eigen/__init__.py,sha256=15Urm1FQyZfEX56_MIPmK4zIgLCkNW1nDil4QTp8cV0,439 +scipy/sparse/linalg/eigen/__init__.pyc,, +scipy/sparse/linalg/eigen/arpack/ARPACK/COPYING,sha256=CSZWb59AYXjRIU-Mx5bhZrEhPdfAXgxbRhqLisnlC74,1892 +scipy/sparse/linalg/eigen/arpack/__init__.py,sha256=l42-1QNhgDfCJgEoyypkLG3hC4134bvPF3WzHpH8ejE,628 +scipy/sparse/linalg/eigen/arpack/__init__.pyc,, +scipy/sparse/linalg/eigen/arpack/_arpack.so,sha256=1xBr_QaxgDme81aR2trTqB7ntqVUQUatzpr1_o0ed9o,621288 +scipy/sparse/linalg/eigen/arpack/arpack.py,sha256=6GMLmyJsoB4qgYH6zhEYzlCFVn6rJX6U1_vXjqCSl_Q,73623 +scipy/sparse/linalg/eigen/arpack/arpack.pyc,, +scipy/sparse/linalg/eigen/arpack/setup.py,sha256=ga_cwZCDZn5oc6SYnCb3vJfd-AG-qfi-9tLqwkp_UZQ,1264 +scipy/sparse/linalg/eigen/arpack/setup.pyc,, +scipy/sparse/linalg/eigen/arpack/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/sparse/linalg/eigen/arpack/tests/__init__.pyc,, +scipy/sparse/linalg/eigen/arpack/tests/test_arpack.py,sha256=jDd93d5Fyj0Fgu1zf838hjjR43nsdB0TK_d84QQO0ZI,31515 +scipy/sparse/linalg/eigen/arpack/tests/test_arpack.pyc,, +scipy/sparse/linalg/eigen/lobpcg/__init__.py,sha256=3iA6L0vWT8ix5KaSBI5II8dqNfBxq_1aCouWhvBwj_4,486 +scipy/sparse/linalg/eigen/lobpcg/__init__.pyc,, +scipy/sparse/linalg/eigen/lobpcg/lobpcg.py,sha256=Mdd5DT2bXhYo7PCX6gHQUXzEuBv-qrzrXX13--K_hDk,19912 +scipy/sparse/linalg/eigen/lobpcg/lobpcg.pyc,, +scipy/sparse/linalg/eigen/lobpcg/setup.py,sha256=25B8jnvvRzGIvsPWLFCigZhELLvrwHjqYMgaSgMHTa8,410 +scipy/sparse/linalg/eigen/lobpcg/setup.pyc,, +scipy/sparse/linalg/eigen/lobpcg/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/sparse/linalg/eigen/lobpcg/tests/__init__.pyc,, +scipy/sparse/linalg/eigen/lobpcg/tests/test_lobpcg.py,sha256=jfLx3_ZmfkXgbL8jzKEE9M0_1qpBYMnuYJt7NC_JDV4,7688 +scipy/sparse/linalg/eigen/lobpcg/tests/test_lobpcg.pyc,, +scipy/sparse/linalg/eigen/setup.py,sha256=XdsBP6GprbsI8CZyUiewpdcS0BUH0-A-k6VgV2cpSpg,453 +scipy/sparse/linalg/eigen/setup.pyc,, +scipy/sparse/linalg/interface.py,sha256=maKWOCEa2zwdGwqsOJmWGXKTtqZNR03aMsJ--iShjiw,21752 +scipy/sparse/linalg/interface.pyc,, +scipy/sparse/linalg/isolve/__init__.py,sha256=zJOobRo-Z3YznKAuT39bJ2QJ-IKKhnK2kcln-KpSUK0,444 +scipy/sparse/linalg/isolve/__init__.pyc,, +scipy/sparse/linalg/isolve/_gcrotmk.py,sha256=1C3VVX4ZnBS_rY3IrTNMeOkugVL7AYuMHC1hpurXh7M,15478 +scipy/sparse/linalg/isolve/_gcrotmk.pyc,, +scipy/sparse/linalg/isolve/_iterative.so,sha256=0tz_rtXyZe42qI8gBhTs-4DocYOHitoU_nZxNuP7k5c,251720 +scipy/sparse/linalg/isolve/iterative.py,sha256=LUoYmQ7ULg-PvsrL_L-49VefL6iKiPDYVlWBn1Q-6s0,24798 +scipy/sparse/linalg/isolve/iterative.pyc,, +scipy/sparse/linalg/isolve/lgmres.py,sha256=KRJLZpMOehh3IS7Z4UYaBsCVRQbBs5M76w6B8U4Jh8g,8842 +scipy/sparse/linalg/isolve/lgmres.pyc,, +scipy/sparse/linalg/isolve/lsmr.py,sha256=YQW5M78ZknjjceolDZs-X4tpAfbdKhN3RtsWRSITrzA,15128 +scipy/sparse/linalg/isolve/lsmr.pyc,, +scipy/sparse/linalg/isolve/lsqr.py,sha256=hh6c9PegtSGkJp0fnCLq5UvnqTmPuwBS-M5CYDPxWiI,19988 +scipy/sparse/linalg/isolve/lsqr.pyc,, +scipy/sparse/linalg/isolve/minres.py,sha256=0QVZA8YR3lMAgePJPqkVRxJz9gSOjgq_0Gsb4rc-qLM,10466 +scipy/sparse/linalg/isolve/minres.pyc,, +scipy/sparse/linalg/isolve/setup.py,sha256=-lrNBbz_J7VoHbTVFPNfqM-BbrmGCHp-ehhx7tHtbuk,1285 +scipy/sparse/linalg/isolve/setup.pyc,, +scipy/sparse/linalg/isolve/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/sparse/linalg/isolve/tests/__init__.pyc,, +scipy/sparse/linalg/isolve/tests/demo_lgmres.py,sha256=bxr4q4iuIXeLLd5DbJo8JqtFHgKaQI8Tg4F5y8ESc84,1680 +scipy/sparse/linalg/isolve/tests/demo_lgmres.pyc,, +scipy/sparse/linalg/isolve/tests/test_gcrotmk.py,sha256=SN77w4DTuppFMduRTEyEycjfKg_UcisioXoXgopwaAc,5491 +scipy/sparse/linalg/isolve/tests/test_gcrotmk.pyc,, +scipy/sparse/linalg/isolve/tests/test_iterative.py,sha256=4Py_bIqPpCqmRJ7rKkAzuqbB_v3wubVzSDH0KKJonGU,21115 +scipy/sparse/linalg/isolve/tests/test_iterative.pyc,, +scipy/sparse/linalg/isolve/tests/test_lgmres.py,sha256=UyCbwmiiMzWRly31byIUxs2lYW4cOqKGkWM_ApE4TSw,7120 +scipy/sparse/linalg/isolve/tests/test_lgmres.pyc,, +scipy/sparse/linalg/isolve/tests/test_lsmr.py,sha256=Kc22vHKoOHUSUhxx9XMeuMMzqEaOuO9nuqp3Vs-sYUA,5317 +scipy/sparse/linalg/isolve/tests/test_lsmr.pyc,, +scipy/sparse/linalg/isolve/tests/test_lsqr.py,sha256=NPbVz5PBkuWz4ISAFmyEREWnG3L3C-rRQ2Oi93QeohQ,4268 +scipy/sparse/linalg/isolve/tests/test_lsqr.pyc,, +scipy/sparse/linalg/isolve/tests/test_minres.py,sha256=RrEs5otsW2ae9rljst2N9yNT0UXKpEhcNf6Hum-tqfg,1743 +scipy/sparse/linalg/isolve/tests/test_minres.pyc,, +scipy/sparse/linalg/isolve/tests/test_utils.py,sha256=_DtAWc0sDr03grrIyZ1RQ93VeaNCtJkcn92cys9osOg,301 +scipy/sparse/linalg/isolve/tests/test_utils.pyc,, +scipy/sparse/linalg/isolve/utils.py,sha256=jvi1LZzRYdGMSGo7ydbIV8hEwBiYowCDUI_pdTr7Ppc,3337 +scipy/sparse/linalg/isolve/utils.pyc,, +scipy/sparse/linalg/matfuncs.py,sha256=XQPIDt1JwXrDtNmZ3QQzK1z52N2JiBjSKfPaUNAVrRA,27169 +scipy/sparse/linalg/matfuncs.pyc,, +scipy/sparse/linalg/setup.py,sha256=ykPPQ1sWaZT0NixqXcw01T4FuWvwDNPLkL3EjdGcjQ4,525 +scipy/sparse/linalg/setup.pyc,, +scipy/sparse/linalg/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/sparse/linalg/tests/__init__.pyc,, +scipy/sparse/linalg/tests/test_expm_multiply.py,sha256=GwDap_abK2CcgNesO1hLhuwzJFQJNaYRxQbCivgxlTs,9658 +scipy/sparse/linalg/tests/test_expm_multiply.pyc,, +scipy/sparse/linalg/tests/test_interface.py,sha256=KWtjQbthBqdVF6kMxQ3yr-m60cGmJYWE4un8cJZ8lDs,13342 +scipy/sparse/linalg/tests/test_interface.pyc,, +scipy/sparse/linalg/tests/test_matfuncs.py,sha256=L9U-4wdB3zlldVNx7D-_MHUoG0adou8JQt-MDWo7Hro,20275 +scipy/sparse/linalg/tests/test_matfuncs.pyc,, +scipy/sparse/linalg/tests/test_norm.py,sha256=WK6WHiIrtOgle4Wai-eJ-ALJJUZwOA-TOfo6tIlagXY,5486 +scipy/sparse/linalg/tests/test_norm.pyc,, +scipy/sparse/linalg/tests/test_onenormest.py,sha256=BU9ILz9dC1Dx9Ra6msNkd6iDAHMo5vZ-g8NIcbKod0o,9311 +scipy/sparse/linalg/tests/test_onenormest.pyc,, +scipy/sparse/setup.py,sha256=HUWhPTzJUs3JrGGnlTQBatGKf1s0o7mMBkPn7rWGbr0,2185 +scipy/sparse/setup.pyc,, +scipy/sparse/sparsetools.py,sha256=v5xmRUGH57yfB6HJKb4Y3UpUia67mxTnqiAu47JMPCw,716 +scipy/sparse/sparsetools.pyc,, +scipy/sparse/spfuncs.py,sha256=MmRMl92FLE0QiYzjKc1-bDJaPaA1oELi1OV9ymTVNW8,2823 +scipy/sparse/spfuncs.pyc,, +scipy/sparse/sputils.py,sha256=yzugUgDYtoNYzVIn3ox711DOrVXTDMhie2uzlCL29pk,15792 +scipy/sparse/sputils.pyc,, +scipy/sparse/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/sparse/tests/__init__.pyc,, +scipy/sparse/tests/data/csc_py2.npz,sha256=usJ_Gj6x_dEC2uObfdYc6D6C8JY4jjROFChQcZhNAfo,846 +scipy/sparse/tests/data/csc_py3.npz,sha256=axuEMVxwd0F-cgUS0IalpiF8KHW4GNJ3BK6bcjfGnf4,851 +scipy/sparse/tests/test_base.py,sha256=vpo5fslgV7PJCevlFuwW6FIEp4HPAZhPpALYYgNV7Dc,172613 +scipy/sparse/tests/test_base.pyc,, +scipy/sparse/tests/test_construct.py,sha256=Dgc4jNRk445_q1iJRisRFTL0_nSfd54KUJF7EqlWYy4,20223 +scipy/sparse/tests/test_construct.pyc,, +scipy/sparse/tests/test_csc.py,sha256=y_ZBKOIL2EmjLnp0dWW40aZOgv-eRPj02lygaC54NLw,859 +scipy/sparse/tests/test_csc.pyc,, +scipy/sparse/tests/test_csr.py,sha256=qCcYeywHN80L4H2XQYuJC4fyjtFHJ2Kq-bNfc7V80B8,1424 +scipy/sparse/tests/test_csr.pyc,, +scipy/sparse/tests/test_extract.py,sha256=PmeGbRYq94CJctNtg-PhEm1YLoaR7tRA0Sa2BB48X5E,1383 +scipy/sparse/tests/test_extract.pyc,, +scipy/sparse/tests/test_matrix_io.py,sha256=4r6TRFtHkemBQVBkqLBK04955oQ0TtiBY3hb_bwcXBs,2823 +scipy/sparse/tests/test_matrix_io.pyc,, +scipy/sparse/tests/test_sparsetools.py,sha256=1cCU0wAeWwbpljfbLR-L2-V0VwYNSQKvf-kuLd6hyTM,10154 +scipy/sparse/tests/test_sparsetools.pyc,, +scipy/sparse/tests/test_spfuncs.py,sha256=Ko9sEv_pLGNHeMNUm10DSp-mED1Zyi9UBsdYoq5eN14,3319 +scipy/sparse/tests/test_spfuncs.pyc,, +scipy/sparse/tests/test_sputils.py,sha256=bhmQyzT49-xPVIu7UaL6bZ_BXVrjJAtga8i6xyt4Ezg,5967 +scipy/sparse/tests/test_sputils.pyc,, +scipy/spatial/__init__.py,sha256=Nmo02Fa3Y_Sj4BliRneNxSRKQalQozuTyr6BkAZlGMo,3364 +scipy/spatial/__init__.pyc,, +scipy/spatial/_distance_wrap.so,sha256=DqpEpwbOrdlpjNivql9bUab9nEd3opVLrCXEHOUEn_Y,99800 +scipy/spatial/_hausdorff.so,sha256=LvOn08KXSaYtzeaHgOghprMbDiTjyabon5NC6AC1dFM,188864 +scipy/spatial/_plotutils.py,sha256=79mhOunzlsKz-ZXQAPvzJLTu4alPSUdaEan8TMYVSfQ,6900 +scipy/spatial/_plotutils.pyc,, +scipy/spatial/_procrustes.py,sha256=zXCjyjZzSyd_Wlg_b-r0fu6HGYpiQpE5QpXWWgJvli0,4466 +scipy/spatial/_procrustes.pyc,, +scipy/spatial/_spherical_voronoi.py,sha256=q4zWYFUKETNFuh8oRg9O_4fLns9Y0AknZBFr9iznKrI,13013 +scipy/spatial/_spherical_voronoi.pyc,, +scipy/spatial/_voronoi.so,sha256=3QMu_6DGmSD8RQLRcBDaDFude1fKXC742-KMLu3kTJI,188928 +scipy/spatial/ckdtree.so,sha256=1tGO5djJFER08qk8kvL9dvpwg8I-X2hbOjgGB2AyGU4,647936 +scipy/spatial/distance.py,sha256=DOJNaBH1gLE3nHP0NOApICbL3_ShBZ0DTP2xVUiF5k8,85765 +scipy/spatial/distance.pyc,, +scipy/spatial/kdtree.py,sha256=mI0Hg6Sneh8TD00RAWGClvv2U8u_D23ks-YoZBoosZU,38088 +scipy/spatial/kdtree.pyc,, +scipy/spatial/qhull.so,sha256=MqMgGrU4qwprV5MQn7bHxWnlNpPvlfRLkr6GSNVIotE,909400 +scipy/spatial/qhull_src/COPYING.txt,sha256=UaWgjVN7NP67hRx2FJbq1GH5C2w8RqUkh4DWOHASTpw,1635 +scipy/spatial/setup.py,sha256=VYtF8JB4-vrCDEWI6PoRATL7ThVPX9fTIR_pjMWQSzc,3121 +scipy/spatial/setup.pyc,, +scipy/spatial/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/spatial/tests/__init__.pyc,, +scipy/spatial/tests/data/cdist-X1.txt,sha256=ULnYAgX2_AwOVF-VE7XfnW5S0pzhx7UAoocxSnXMaWs,5750 +scipy/spatial/tests/data/cdist-X2.txt,sha256=_IJVjXsp3pvd8NNPNTLmVbHOrzl_RiEXz7cb86NfvZ4,11500 +scipy/spatial/tests/data/degenerate_pointset.npz,sha256=BIq8Hd2SS_LU0fIWAVVS7ZQx-emVRvvzgnaO2lh4gXU,22548 +scipy/spatial/tests/data/iris.txt,sha256=k19QSfkqhMmByqNMzwWDmM6wf5dt6whdGyfAyUO3AW0,15000 +scipy/spatial/tests/data/pdist-boolean-inp.txt,sha256=5Z9SMsXrtmzeUwJlVmGkrPDC_Km7nVpZIbBl7p3Hdc0,50000 +scipy/spatial/tests/data/pdist-chebyshev-ml-iris.txt,sha256=Yerj1wqIzcdyULlha-q02WBNGyS2Q5o2wAr0XVEkzis,178801 +scipy/spatial/tests/data/pdist-chebyshev-ml.txt,sha256=NEd2b-DONqUMV9f8gJ2yod17C_5fXGHHZ38PeFsXkyw,3041 +scipy/spatial/tests/data/pdist-cityblock-ml-iris.txt,sha256=UCWZJeMkMajbpjeG0FW60b0q-4r1geAyguNY6Chx5bM,178801 +scipy/spatial/tests/data/pdist-cityblock-ml.txt,sha256=8Iq7cF8oMJjpqd6qsDt_mKPQK0T8Ldot2P8C5rgbGIU,3041 +scipy/spatial/tests/data/pdist-correlation-ml-iris.txt,sha256=l2kEAu0Pm3OsFJsQtHf9Qdy5jnnoOu1v3MooBISnjP0,178801 +scipy/spatial/tests/data/pdist-correlation-ml.txt,sha256=S4GY3z-rf_BGuHmsnColMvR8KwYDyE9lqEbYT_a3Qag,3041 +scipy/spatial/tests/data/pdist-cosine-ml-iris.txt,sha256=hQzzoZrmw9OXAbqkxC8eTFXtJZrbFzMgcWMLbJlOv7U,178801 +scipy/spatial/tests/data/pdist-cosine-ml.txt,sha256=P92Tm6Ie8xg4jGSP7k7bmFRAP5MfxtVR_KacS73a6PI,3041 +scipy/spatial/tests/data/pdist-double-inp.txt,sha256=0Sx5yL8D8pyYDXTIBZAoTiSsRpG_eJz8uD2ttVrklhU,50000 +scipy/spatial/tests/data/pdist-euclidean-ml-iris.txt,sha256=3-UwBM7WZa4aCgmW_ZAdRSq8KYMq2gnkIUqU73Z0OLI,178801 +scipy/spatial/tests/data/pdist-euclidean-ml.txt,sha256=rkQA2-_d7uByKmw003lFXbXNDjHrUGBplZ8nB_TU5pk,3041 +scipy/spatial/tests/data/pdist-hamming-ml.txt,sha256=IAYroplsdz6n7PZ-vIMIJ4FjG9jC1OSxc3-oVJdSFDM,3041 +scipy/spatial/tests/data/pdist-jaccard-ml.txt,sha256=Zb42SoVEnlTj_N_ndnym3_d4RNZWeHm290hTtpp_zO8,3041 +scipy/spatial/tests/data/pdist-jensenshannon-ml-iris.txt,sha256=L7STTmlRX-z-YvksmiAxEe1UoTmDnQ_lnAjZH53Szp0,172738 +scipy/spatial/tests/data/pdist-jensenshannon-ml.txt,sha256=-sZUikGMWskONojs6fJIMX8VEWpviYYg4u1vipY6Bak,2818 +scipy/spatial/tests/data/pdist-minkowski-3.2-ml-iris.txt,sha256=N5L5CxRT5yf_vq6pFjorJ09Sr-RcnrAlH-_F3kEsyUU,178801 +scipy/spatial/tests/data/pdist-minkowski-3.2-ml.txt,sha256=DRgzqxRtvQVzFnpFAjNC9TDNgRtk2ZRkWPyAaeOx3q4,3041 +scipy/spatial/tests/data/pdist-minkowski-5.8-ml-iris.txt,sha256=jz7SGKU8GuJWASH2u428QL9c-G_-8nZvOFSOUlMdCyA,178801 +scipy/spatial/tests/data/pdist-seuclidean-ml-iris.txt,sha256=37H01o6GibccR_hKIwwbWxGX0Tuxnb-4Qc6rmDxwwUI,178801 +scipy/spatial/tests/data/pdist-seuclidean-ml.txt,sha256=YmcI7LZ6i-Wg1wjAkLVX7fmxzCj621Pc5itO3PvCm_k,3041 +scipy/spatial/tests/data/pdist-spearman-ml.txt,sha256=IrtJmDQliv4lDZ_UUjkZNso3EZyu7pMACxMB-rvHUj0,3041 +scipy/spatial/tests/data/random-bool-data.txt,sha256=MHAQdE4hPVzgu-csVVbm1DNJ80dP7XthJ1kb2In8ImM,6000 +scipy/spatial/tests/data/random-double-data.txt,sha256=GA8hYrHsTBeS864GJf0X6JRTvGlbpM8P8sJairmfnBU,75000 +scipy/spatial/tests/data/random-int-data.txt,sha256=xTUbCgoT4X8nll3kXu7S9lv-eJzZtwewwm5lFepxkdQ,10266 +scipy/spatial/tests/data/random-uint-data.txt,sha256=8IPpXhwglxzinL5PcK-PEqleZRlNKdx3zCVMoDklyrY,8711 +scipy/spatial/tests/data/selfdual-4d-polytope.txt,sha256=rkVhIL1mupGuqDrw1a5QFaODzZkdoaLMbGI_DbLLTzM,480 +scipy/spatial/tests/test__plotutils.py,sha256=_DIWtbfhL6KuybmBHSy9NTuHq-3BNZF3scgzFUQZoXM,2156 +scipy/spatial/tests/test__plotutils.pyc,, +scipy/spatial/tests/test__procrustes.py,sha256=0c9Vi9hs1lRBfRMxlXNUeoT-39Np_ovrOUsrhBIqq6c,5048 +scipy/spatial/tests/test__procrustes.pyc,, +scipy/spatial/tests/test_distance.py,sha256=nCwppTL5UOV5tTNujF5HHSj76b7Tob-hqW-BQJozPbk,80265 +scipy/spatial/tests/test_distance.pyc,, +scipy/spatial/tests/test_hausdorff.py,sha256=Hu0u1BmCw0m1qVKZ8Lywo3ow7ND18DqWI7m-IOysN5Q,5286 +scipy/spatial/tests/test_hausdorff.pyc,, +scipy/spatial/tests/test_kdtree.py,sha256=lZ1UybnNCfTsxiQ8mK-BnroNYYGDT4yoJ8li1kagQYI,42621 +scipy/spatial/tests/test_kdtree.pyc,, +scipy/spatial/tests/test_qhull.py,sha256=z-MXP5gyQ9bFoZwTddCbok_6YPN8qHCmyRDzW8XfaGE,36823 +scipy/spatial/tests/test_qhull.pyc,, +scipy/spatial/tests/test_spherical_voronoi.py,sha256=bTRnPRMGf9JDl-FEtmnuZwGDEQZ3baMlyu8ErbahEdg,6854 +scipy/spatial/tests/test_spherical_voronoi.pyc,, +scipy/spatial/transform/__init__.py,sha256=Rt2GaYINOxti-PuPslDDWfpsu9gq7po98LM8Z_iIINU,669 +scipy/spatial/transform/__init__.pyc,, +scipy/spatial/transform/rotation.py,sha256=ZIe6icrScovZ18t91e709vomxgELRp_kZWFx14YVZAU,62106 +scipy/spatial/transform/rotation.pyc,, +scipy/spatial/transform/setup.py,sha256=oV8cPVBIYbSi5F8FwZMhhwTs-_mmvHByEZCMzP7MnYA,296 +scipy/spatial/transform/setup.pyc,, +scipy/spatial/transform/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/spatial/transform/tests/__init__.pyc,, +scipy/spatial/transform/tests/test_rotation.py,sha256=0XF96ZwxkUJXY2DnYZKqO-c9_UzZCRwn_wGJmRR4zHQ,27468 +scipy/spatial/transform/tests/test_rotation.pyc,, +scipy/special.pxd,sha256=h8GS4dlnM_hFchSEzjL74WPstvZWYXNMJRNAJMyFzM8,37 +scipy/special/__init__.py,sha256=ffhatwu8RLdHOvsF4N0pcVkgHQl_gXMs9sx0kjkfm5w,27780 +scipy/special/__init__.pyc,, +scipy/special/_comb.so,sha256=OMIK4U8hAWnHc0FbUUEwVqtqDQ8k88fnBf2OT5sw1aM,26008 +scipy/special/_ellip_harm.py,sha256=EWqToEk06JR4O1OjHv5V_y-KWFKirpQ9Acm4UDq9Ql8,5312 +scipy/special/_ellip_harm.pyc,, +scipy/special/_ellip_harm_2.so,sha256=qbWok0Dqu8Ulo1x6UpqHn0HSYyN9y4Z_v-G_asxIroo,88456 +scipy/special/_generate_pyx.py,sha256=V9UbU2o1Nn9sb58zz-szNNQwF-fjeYu6h0-eVfJICu8,48298 +scipy/special/_generate_pyx.pyc,, +scipy/special/_logsumexp.py,sha256=-2PT36rucTzRb1OTW53KG0Mab2V_aBj-VFzX7lgzPqY,6403 +scipy/special/_logsumexp.pyc,, +scipy/special/_mptestutils.py,sha256=2GN-Idb3psy9VPPM7nFsNDEbkTf5Ao-MUHZ9LKCpRqY,14382 +scipy/special/_mptestutils.pyc,, +scipy/special/_precompute/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/special/_precompute/__init__.pyc,, +scipy/special/_precompute/expn_asy.py,sha256=Yz3u4-gGHkDP6ZnBQoJ0cmvu2uwpHmq8gqAX7Z5EHLM,1682 +scipy/special/_precompute/expn_asy.pyc,, +scipy/special/_precompute/gammainc_asy.py,sha256=K11DLc4INgiPVHFq9x8RuUuDDNaAEUduCDnL_HXjInQ,2600 +scipy/special/_precompute/gammainc_asy.pyc,, +scipy/special/_precompute/gammainc_data.py,sha256=eOeJ7dTcODGErMaPZmcell_aLTqCb4kSr9bGiZLxTS4,4175 +scipy/special/_precompute/gammainc_data.pyc,, +scipy/special/_precompute/lambertw.py,sha256=kI9myPrlhdgpzBZa-5N6nuoF-DvB_rMil5FD_Tvo9Do,2072 +scipy/special/_precompute/lambertw.pyc,, +scipy/special/_precompute/loggamma.py,sha256=mgbKFXpShHbVuecJlBXIG2iOBCCe353he2MlXmgWxyE,1179 +scipy/special/_precompute/loggamma.pyc,, +scipy/special/_precompute/setup.py,sha256=28fvQDVkYyu9XwL7lx0yAGchQyRSF2cwpD7gB1Openc,374 +scipy/special/_precompute/setup.pyc,, +scipy/special/_precompute/struve_convergence.py,sha256=G4M3mnJUIdM0GVHU7bllq_m1UzTsNNE1K68OE3dPJcc,3498 +scipy/special/_precompute/struve_convergence.pyc,, +scipy/special/_precompute/utils.py,sha256=158Rm1f-91terelCzUZS9hobQKnqgSLxOzTFnkDOfsU,1239 +scipy/special/_precompute/utils.pyc,, +scipy/special/_precompute/zetac.py,sha256=Tuq9jCzlQUoM0xgHE1NcUJ9d5iwQ_uETP_mKXWqrLqA,657 +scipy/special/_precompute/zetac.pyc,, +scipy/special/_spherical_bessel.py,sha256=HoQ5QeApcCF3YKyXUQP6NbD9PVLoaXoBQ7M3Gp54WwY,5245 +scipy/special/_spherical_bessel.pyc,, +scipy/special/_test_round.so,sha256=qbBXnCxGZgk3e3lVqGlZuRqHw-Ejf1FO0AM4n-8sAA4,180544 +scipy/special/_testutils.py,sha256=ql8qcHqPNRiQ49tZGucO-ofj7UShQvJOl1SwZIrR81w,12009 +scipy/special/_testutils.pyc,, +scipy/special/_ufuncs.so,sha256=t6E8vt8ciyygoXeajvTuoBQ8L61JwatzZsFKI4uGEBk,1711840 +scipy/special/_ufuncs_cxx.so,sha256=iKyXlk7geSdnbLtIM9bOog8OQAv0eDOmfnB_NGVHOQU,117800 +scipy/special/add_newdocs.py,sha256=VMrVb8v03NaE_LPh1J6xMJf60FvPmMydh7wQYw6b_Lg,185443 +scipy/special/add_newdocs.pyc,, +scipy/special/basic.py,sha256=z6vNzyW3xPRRFTY9fw9_OqovohyDIvltINuNZKZaNMY,67205 +scipy/special/basic.pyc,, +scipy/special/cython_special.pxd,sha256=xYRECXfy17KgsqK3APcmg15bQIcd6lFuFBnjoqox27U,13122 +scipy/special/cython_special.so,sha256=aeSs02b7ecjcEK0udZN5AfUTz0L4bhyEnVWsITVrOQU,3202896 +scipy/special/lambertw.py,sha256=sO3ZYUhpJvGmKia1ThQRMANQYWjlOy7o_6Yl8ArMK5I,3042 +scipy/special/lambertw.pyc,, +scipy/special/orthogonal.py,sha256=-nQbdHmKbN0aX0R7aroMqGVC4bRNLtL-Zvwhi6AjrwU,58880 +scipy/special/orthogonal.pyc,, +scipy/special/setup.py,sha256=c6Cpo_MEOwAXY6GCPGh2YSufczLhhelx6uF_oRFkJwY,6682 +scipy/special/setup.pyc,, +scipy/special/sf_error.py,sha256=q_Rbfkws1ttgTQKYLt6zFTdY6DFX2HajJe_lXiNWC0c,375 +scipy/special/sf_error.pyc,, +scipy/special/specfun.so,sha256=G5K9Yk5ywvbTz5AwD5t8MzWx43vwmMX2D-uYsXED1dQ,782080 +scipy/special/spfun_stats.py,sha256=YhYgydk2pWQ73fqqYL3WfxKZnlVwsKeVU40XXsz11XE,3499 +scipy/special/spfun_stats.pyc,, +scipy/special/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/special/tests/__init__.pyc,, +scipy/special/tests/data/README,sha256=ZoIaxnVYMTrPcAaPpxBoqcfXlftCee7HqH7hc2SmVPE,35139 +scipy/special/tests/data/boost.npz,sha256=iAC7TuQkTNxOJk_NLIX0zKY9AT20y6j4Web6zRUg7_w,828896 +scipy/special/tests/data/gsl.npz,sha256=jGTj9UV4y2ewceU2wNXgkRCLTTMUpn8_JiIJyJXbhvA,51371 +scipy/special/tests/data/local.npz,sha256=Wl5x2G0PdIUsAN9uPwXeeu9Ymepix3MxGf8SRJjt2kw,179327 +scipy/special/tests/test_basic.py,sha256=RcFvJxw94WXxfyGbKFlAP9t-oTkMTIiFE4x7eTP6UUU,134490 +scipy/special/tests/test_basic.pyc,, +scipy/special/tests/test_boxcox.py,sha256=yo49Xswp7stMPBVp62EcvSAFnPPfUj73BhzpctNlEO0,2738 +scipy/special/tests/test_boxcox.pyc,, +scipy/special/tests/test_cdflib.py,sha256=YCpLg-9roRVaVAWWhglQskDatI5ceNjHZZ9FCap9-ow,12495 +scipy/special/tests/test_cdflib.pyc,, +scipy/special/tests/test_cython_special.py,sha256=xmgul1XvWSOjzY2b0s6DnYmroNDftSIQFYixhBctRiw,17222 +scipy/special/tests/test_cython_special.pyc,, +scipy/special/tests/test_data.py,sha256=DyJnObjedPSDwP8ok3NBHYqKumTdzoMyO7Wk71RbqW4,22193 +scipy/special/tests/test_data.pyc,, +scipy/special/tests/test_digamma.py,sha256=R_u3YBQdpBE3n3bUWKL-Vj_wj2ixBaTALC7qjzHVsRc,1460 +scipy/special/tests/test_digamma.pyc,, +scipy/special/tests/test_ellip_harm.py,sha256=jnUsrCOUx2gJ393VLZsbryUjrF53oJYzdVUPRHtbsh0,9536 +scipy/special/tests/test_ellip_harm.pyc,, +scipy/special/tests/test_gammainc.py,sha256=Q0xNXPZIQ9-mTuwrzTbBtihXVKYpD0Pqqrt6RYVA8F0,1205 +scipy/special/tests/test_gammainc.pyc,, +scipy/special/tests/test_kolmogorov.py,sha256=k_iQ9wxRuFN1DnOeXqL9KenTWZffB4HAUpKXTka4iPw,18521 +scipy/special/tests/test_kolmogorov.pyc,, +scipy/special/tests/test_lambertw.py,sha256=60gIVFc3GlxW-D9UcgSOu8I8KfoM6_3shkhV8LTrZu8,4318 +scipy/special/tests/test_lambertw.pyc,, +scipy/special/tests/test_loggamma.py,sha256=TgHHK9j6dDAzBzfwAaOe14FoJKCtPT9W1aQG4Htcdqo,2058 +scipy/special/tests/test_loggamma.pyc,, +scipy/special/tests/test_logit.py,sha256=EuiKXpYXK2Qxzc7QK9r2Atp_r_2-0heRs2R8Q47Hhys,2911 +scipy/special/tests/test_logit.pyc,, +scipy/special/tests/test_logsumexp.py,sha256=FM8EonKn-1PzF2Vy3GFD08_R_3ZmzfDQQKRbOI3c56c,6246 +scipy/special/tests/test_logsumexp.pyc,, +scipy/special/tests/test_mpmath.py,sha256=1YhZlu0ar1Iux4PoEZRZYAloPlZYz8SJmzKo3ABpb4I,74561 +scipy/special/tests/test_mpmath.pyc,, +scipy/special/tests/test_nan_inputs.py,sha256=4WGigsBjyV2mYidxzehNrXCrH18h-jelExNRy092aZA,1775 +scipy/special/tests/test_nan_inputs.pyc,, +scipy/special/tests/test_orthogonal.py,sha256=8H0fSfEiO7ryfBnDx5xnRw-7oUdDRwdQHQlKTKqGuQU,29554 +scipy/special/tests/test_orthogonal.pyc,, +scipy/special/tests/test_orthogonal_eval.py,sha256=UQcEtZ0c0IxR9K_bDdl3JoZ7B4i-ARtONmjVmBvU9jw,8170 +scipy/special/tests/test_orthogonal_eval.pyc,, +scipy/special/tests/test_owens_t.py,sha256=pRbHCNtWTnn6jtT8D89AKKFkoo8QlEbjv0V_tpGOEok,1389 +scipy/special/tests/test_owens_t.pyc,, +scipy/special/tests/test_pcf.py,sha256=RNjEWZGFS99DOGZkkPJ8HNqLULko8UkX0nEWFYX26NE,664 +scipy/special/tests/test_pcf.pyc,, +scipy/special/tests/test_precompute_expn_asy.py,sha256=ZQtpPOcV-AtyQDh6-38fpZfMdbqVk_mTzRAHyLBb7Y4,649 +scipy/special/tests/test_precompute_expn_asy.pyc,, +scipy/special/tests/test_precompute_gammainc.py,sha256=7jkknXmd7VVB1rqr0UAjVD_Oyul0ibzzEBNPn4F87ro,4658 +scipy/special/tests/test_precompute_gammainc.pyc,, +scipy/special/tests/test_precompute_utils.py,sha256=bY9OCdu63OGaH2aXcU6Wh0WLoI2_c5WcyPXMiSCSMCQ,1361 +scipy/special/tests/test_precompute_utils.pyc,, +scipy/special/tests/test_round.py,sha256=SrjuI0KhgmWm4m-Zo9Bqeig1zkUA_z77hvcUf_XUUEc,472 +scipy/special/tests/test_round.pyc,, +scipy/special/tests/test_sf_error.py,sha256=HE6Bbj85G4BHyr50OMnuJs8Slnbq2Ltlr2_pssE9RAY,3237 +scipy/special/tests/test_sf_error.pyc,, +scipy/special/tests/test_sici.py,sha256=_GFzbvjtQAk6m--qp8wRDtYACoil7rdylw6OwNaG684,1293 +scipy/special/tests/test_sici.pyc,, +scipy/special/tests/test_spence.py,sha256=x3zcBhIjGQa_4NGuVZd3q_d65fEPfCOWuxXC6ib-2ks,1165 +scipy/special/tests/test_spence.pyc,, +scipy/special/tests/test_spfun_stats.py,sha256=6q3IdGsNGv8sISvKHrlt4wl38HPhnBySs5SzIIQL1hU,2072 +scipy/special/tests/test_spfun_stats.pyc,, +scipy/special/tests/test_sph_harm.py,sha256=AVdv1d6eiSfTnIIdsBmm2-Uz--WRLsFcYsU_5CaP0lw,1182 +scipy/special/tests/test_sph_harm.pyc,, +scipy/special/tests/test_spherical_bessel.py,sha256=czRSyg-uAKt1G2qlqg0f1vhlKa6F94SQTvbTgDPCFiQ,14407 +scipy/special/tests/test_spherical_bessel.pyc,, +scipy/special/tests/test_trig.py,sha256=mEpF_NWTY-1pPP6ZBvki4hGgWXR3kxeM5XYlSnuT6t4,2433 +scipy/special/tests/test_trig.pyc,, +scipy/special/tests/test_wrightomega.py,sha256=5RlwiSA243Mi7_JAG4HYHPTKfvE1ERqWGMSqrVAzfjg,1748 +scipy/special/tests/test_wrightomega.pyc,, +scipy/special/tests/test_zeta.py,sha256=e30NWkKQ9kw_SO6YqfnRUsrXJtJ3xJHrSnr-DsjdhIw,1228 +scipy/special/tests/test_zeta.pyc,, +scipy/stats/__init__.py,sha256=fZTHTe9aNtrrUpGfhTQuQlXXv9c3ChxJJcv-hfbKn2g,9682 +scipy/stats/__init__.pyc,, +scipy/stats/_binned_statistic.py,sha256=bLiue5NrQ-PWZ0wzXQXBEuJYfAZg3jBfT4f4giMtNOM,25939 +scipy/stats/_binned_statistic.pyc,, +scipy/stats/_constants.py,sha256=EAgBemq42oThIbjCCq3NbbyniO8x4I-ETq_cfLTUazk,681 +scipy/stats/_constants.pyc,, +scipy/stats/_continuous_distns.py,sha256=6KwcaETcGiX7n9Bp3prZn0f_4rJ0z1hoTncao4-Lq0o,212403 +scipy/stats/_continuous_distns.pyc,, +scipy/stats/_discrete_distns.py,sha256=jnGj1Gm-t70a0lS0FbLZWWac09fZX4UjNYWwGrj_ZFA,25882 +scipy/stats/_discrete_distns.pyc,, +scipy/stats/_distn_infrastructure.py,sha256=UqEs65m6FDIKzcaF6fX4RrESb_tNjCFtHR6h1aCQ7J0,120159 +scipy/stats/_distn_infrastructure.pyc,, +scipy/stats/_distr_params.py,sha256=FNRsjiiuBSRmnkGAWlQCCQ2qzI1z7BAcVlUdn7cH6YY,4506 +scipy/stats/_distr_params.pyc,, +scipy/stats/_multivariate.py,sha256=Rvsm__aQQdmiuP5BlcaWTr2kHhMceV9OEzSWOWIfjx0,121366 +scipy/stats/_multivariate.pyc,, +scipy/stats/_rvs_sampling.py,sha256=rksttl0rhdfnBCLecaCpBWTxQrP8EYUyGPyqc589HRY,7055 +scipy/stats/_rvs_sampling.pyc,, +scipy/stats/_stats.so,sha256=D1IW_NeQAx-Diz1D9RjGJuAJJjV50kDkPg54w4qdR7Y,418432 +scipy/stats/_stats_mstats_common.py,sha256=J3qzESumo4to-VPtQzdEU7HhdU1SDlGvssB7geKIE8w,13208 +scipy/stats/_stats_mstats_common.pyc,, +scipy/stats/_tukeylambda_stats.py,sha256=FTZCDa7CKhIeS70Hgj9byMtSRo2uxzDtGGNwCqS3jcE,6935 +scipy/stats/_tukeylambda_stats.pyc,, +scipy/stats/contingency.py,sha256=Bs6500ZGdj7w2gzjHNoe8acX2WyEVenFNsBQd5j60AM,9339 +scipy/stats/contingency.pyc,, +scipy/stats/distributions.py,sha256=rEjedYzRqV6ymEm94Ddg3lKCZmyDO_WwG4RevEt2bMk,819 +scipy/stats/distributions.pyc,, +scipy/stats/kde.py,sha256=DSSG77sR1pylAzxYqT3xddtN2LMaYbeMsOvjAxRxQHw,20976 +scipy/stats/kde.pyc,, +scipy/stats/morestats.py,sha256=Ojjzx6trRr8IAa_wAyZOakd9zNwxD9-e-GxhRUbtzd0,106224 +scipy/stats/morestats.pyc,, +scipy/stats/mstats.py,sha256=_BVebh0iL2Ua_6fKsK-0y2_968ROoxh3w9WiIjwjHSY,2304 +scipy/stats/mstats.pyc,, +scipy/stats/mstats_basic.py,sha256=MTCZMx_SAAAuKeLNUoklNBRySlrGIv7tXU9egHe909Q,93428 +scipy/stats/mstats_basic.pyc,, +scipy/stats/mstats_extras.py,sha256=zzDPsvYkU84NHK_Nhid2IHTesU7DU6iZi-PRiBnG1Vs,14957 +scipy/stats/mstats_extras.pyc,, +scipy/stats/mvn.so,sha256=S95R_V_GLpzLgasuBnLylRlwCL-Q345qdeJ-3d7IsiM,89176 +scipy/stats/setup.py,sha256=BNsMOw3avtZtdEjMdwfRi90VsH6v3TMLcW_HizuDAtg,938 +scipy/stats/setup.pyc,, +scipy/stats/statlib.so,sha256=k-16vuJZAkcg4TH01Sger_lfkJmwIoAxYfE_mdVBPg8,56384 +scipy/stats/stats.py,sha256=JNW9xtCMiCvbl9-KtNCA48ZSV-pGInbyxpPwixz74AM,202922 +scipy/stats/stats.pyc,, +scipy/stats/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/stats/tests/__init__.pyc,, +scipy/stats/tests/common_tests.py,sha256=5mnjb6LFJbJffpU6Ghji5qiDTfaEyc2tqufaI43aWJc,10658 +scipy/stats/tests/common_tests.pyc,, +scipy/stats/tests/data/nist_anova/AtmWtAg.dat,sha256=Qdd0i7H4cNhAABfFOZPuplhi_9SCquFpO-hNkyRcMD8,3063 +scipy/stats/tests/data/nist_anova/SiRstv.dat,sha256=x9wJ2g1qnzf4DK_w9F_WiOiDMDEg4td2z6uU77G07xM,1947 +scipy/stats/tests/data/nist_anova/SmLs01.dat,sha256=KdnJedRthF7XLA-w7XkIPIMTgzu89yBAMmZA2H4uQOQ,6055 +scipy/stats/tests/data/nist_anova/SmLs02.dat,sha256=nCPyxRk1dAoSPWiC7kG4dLaXs2GL3-KRXRt2NwgXoIA,46561 +scipy/stats/tests/data/nist_anova/SmLs03.dat,sha256=6yPHiQSk0KI4oURQOk99t-uEm-IZN-8eIPHb_y0mQ1U,451566 +scipy/stats/tests/data/nist_anova/SmLs04.dat,sha256=fI-HpgJF9cdGdBinclhVzOcWCCc5ZJZuXalUwirV-lc,6815 +scipy/stats/tests/data/nist_anova/SmLs05.dat,sha256=iJTaAWUFn7DPLTd9bQh_EMKEK1DPG0fnN8xk7BQlPRE,53799 +scipy/stats/tests/data/nist_anova/SmLs06.dat,sha256=riOkYT-LRgmJhPpCK32x7xYnD38gwnh_Eo1X8OK3eN8,523605 +scipy/stats/tests/data/nist_anova/SmLs07.dat,sha256=QtSS11d-vkVvqaIEeJ6oNwyET1CKoyQqjlfBl2sTOJA,7381 +scipy/stats/tests/data/nist_anova/SmLs08.dat,sha256=qrxQQ0I6gnhrefygKwT48x-bz-8laD8Vpn7c81nITRg,59228 +scipy/stats/tests/data/nist_anova/SmLs09.dat,sha256=qmELOQyNlH7CWOMt8PQ0Z_yxgg9Hxc4lqZOuHZxxWuc,577633 +scipy/stats/tests/data/nist_linregress/Norris.dat,sha256=zD_RTRxfqJHVZTAAyddzLDDbhCzKSfwFGr3hwZ1nq30,2591 +scipy/stats/tests/data/stable-cdf-sample-data.npy,sha256=TNsQ_TDpSBsbBFuuOlNLEq6pzuwEMFP9NcSMsGWQ_-w,27008 +scipy/stats/tests/data/stable-pdf-sample-data.npy,sha256=WrMmYqZWuX3B7PHcoBElLrhnKULUfgU0adSJVjn77QE,27008 +scipy/stats/tests/test_binned_statistic.py,sha256=113F7ow82wvi29ZJz4DQoTlmQcIsxExCMxzTNq_0KLY,13447 +scipy/stats/tests/test_binned_statistic.pyc,, +scipy/stats/tests/test_contingency.py,sha256=PQM53qSjmkZmiP1sh_JwM_qi_MmPHhIfwNHoIHi11LQ,5910 +scipy/stats/tests/test_contingency.pyc,, +scipy/stats/tests/test_continuous_basic.py,sha256=QhH2lV6dlCpM9eM0riinQ8qNe_gC8p-YJYPg8OD0Mws,16354 +scipy/stats/tests/test_continuous_basic.pyc,, +scipy/stats/tests/test_discrete_basic.py,sha256=dZAn-bCfCookQNSo82C2xtrBXba_tbF80xBgzHVmMt0,8321 +scipy/stats/tests/test_discrete_basic.pyc,, +scipy/stats/tests/test_discrete_distns.py,sha256=L71shMbjVUl7La1naTn13E5YzHt35izyY4WcTpqgpeY,1448 +scipy/stats/tests/test_discrete_distns.pyc,, +scipy/stats/tests/test_distributions.py,sha256=k2VnbSh73bxCqIROQKY5YXWXnCP_WSnVuxBRohuoWU8,139115 +scipy/stats/tests/test_distributions.pyc,, +scipy/stats/tests/test_fit.py,sha256=Prr48c19x0JIGY6HEr5K2eNr8WH2Vexpx3SodIi1JTA,3800 +scipy/stats/tests/test_fit.pyc,, +scipy/stats/tests/test_kdeoth.py,sha256=YZF9Ah7VcQX4-8lRTBEaHBF-kIaheiOKaRGVs6EPWb0,12952 +scipy/stats/tests/test_kdeoth.pyc,, +scipy/stats/tests/test_morestats.py,sha256=2lkBqqx_CWoTnTXagJzBTNXRojWLpYwLeZrdmiBpbf8,63119 +scipy/stats/tests/test_morestats.pyc,, +scipy/stats/tests/test_mstats_basic.py,sha256=Kksw2X7eOGfrhccKHB3CLX2L_aeaEqciP5_SeqG0Rn8,61447 +scipy/stats/tests/test_mstats_basic.pyc,, +scipy/stats/tests/test_mstats_extras.py,sha256=reFZowOoOX9t7MsoSdBeYrL9g0QokGGjtNY8cBBhhgk,5464 +scipy/stats/tests/test_mstats_extras.pyc,, +scipy/stats/tests/test_multivariate.py,sha256=WXNwIMOsAoznpXdKd2JY3kJnYC1IlnXqETGaq8m3Mqg,64226 +scipy/stats/tests/test_multivariate.pyc,, +scipy/stats/tests/test_rank.py,sha256=9WgcaGokBO_BF1yEjFqJwKA8p4EqEpO1Sdy-bEhbTtE,7514 +scipy/stats/tests/test_rank.pyc,, +scipy/stats/tests/test_stats.py,sha256=TXouh_Qx9ZHBdWjDu6yTedpNy1ya7vnOnTFL-GJHlDY,188047 +scipy/stats/tests/test_stats.pyc,, +scipy/stats/tests/test_tukeylambda_stats.py,sha256=xdfNnnNj7MqodMx60wzKOyvTRMNMkaN1jnw5-zUyv3o,3298 +scipy/stats/tests/test_tukeylambda_stats.pyc,, +scipy/stats/vonmises.py,sha256=-Yi3VJELKfYp2C3791Fpc6r_jf2OkgAKL5oG_Ffxgt0,963 +scipy/stats/vonmises.pyc,, +scipy/version.py,sha256=Frr6MrI6wplDDksP4w2qtPPtCubSnsoWCBVzEPIc10s,228 +scipy/version.pyc,, diff --git a/project/venv/lib/python2.7/site-packages/scipy-1.2.1.dist-info/WHEEL b/project/venv/lib/python2.7/site-packages/scipy-1.2.1.dist-info/WHEEL new file mode 100644 index 0000000..295a0ca --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy-1.2.1.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.31.1) +Root-Is-Purelib: false +Tag: cp27-cp27mu-manylinux1_x86_64 + diff --git a/project/venv/lib/python2.7/site-packages/scipy-1.2.1.dist-info/top_level.txt b/project/venv/lib/python2.7/site-packages/scipy-1.2.1.dist-info/top_level.txt new file mode 100644 index 0000000..9a635b9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy-1.2.1.dist-info/top_level.txt @@ -0,0 +1 @@ +scipy diff --git a/project/venv/lib/python2.7/site-packages/scipy/.libs/libgfortran-ed201abd.so.3.0.0 b/project/venv/lib/python2.7/site-packages/scipy/.libs/libgfortran-ed201abd.so.3.0.0 new file mode 100755 index 0000000..ea741fc Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/.libs/libgfortran-ed201abd.so.3.0.0 differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/.libs/libopenblasp-r0-382c8f3a.3.5.dev.so b/project/venv/lib/python2.7/site-packages/scipy/.libs/libopenblasp-r0-382c8f3a.3.5.dev.so new file mode 100755 index 0000000..a12ec70 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/.libs/libopenblasp-r0-382c8f3a.3.5.dev.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/HACKING.rst.txt b/project/venv/lib/python2.7/site-packages/scipy/HACKING.rst.txt new file mode 100644 index 0000000..9dcee79 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/HACKING.rst.txt @@ -0,0 +1,528 @@ +===================== +Contributing to SciPy +===================== + +This document aims to give an overview of how to contribute to SciPy. It +tries to answer commonly asked questions, and provide some insight into how the +community process works in practice. Readers who are familiar with the SciPy +community and are experienced Python coders may want to jump straight to the +`git workflow`_ documentation. + +There are a lot of ways you can contribute: + +- Contributing new code +- Fixing bugs and other maintenance work +- Improving the documentation +- Reviewing open pull requests +- Triaging issues +- Working on the `scipy.org`_ website +- Answering questions and participating on the scipy-dev and scipy-user + `mailing lists`_. + +Contributing new code +===================== + +If you have been working with the scientific Python toolstack for a while, you +probably have some code lying around of which you think "this could be useful +for others too". Perhaps it's a good idea then to contribute it to SciPy or +another open source project. The first question to ask is then, where does +this code belong? That question is hard to answer here, so we start with a +more specific one: *what code is suitable for putting into SciPy?* +Almost all of the new code added to scipy has in common that it's potentially +useful in multiple scientific domains and it fits in the scope of existing +scipy submodules. In principle new submodules can be added too, but this is +far less common. For code that is specific to a single application, there may +be an existing project that can use the code. Some scikits (`scikit-learn`_, +`scikit-image`_, `statsmodels`_, etc.) are good examples here; they have a +narrower focus and because of that more domain-specific code than SciPy. + +Now if you have code that you would like to see included in SciPy, how do you +go about it? After checking that your code can be distributed in SciPy under a +compatible license (see FAQ for details), the first step is to discuss on the +scipy-dev mailing list. All new features, as well as changes to existing code, +are discussed and decided on there. You can, and probably should, already +start this discussion before your code is finished. + +Assuming the outcome of the discussion on the mailing list is positive and you +have a function or piece of code that does what you need it to do, what next? +Before code is added to SciPy, it at least has to have good documentation, unit +tests and correct code style. + +1. Unit tests + In principle you should aim to create unit tests that exercise all the code + that you are adding. This gives some degree of confidence that your code + runs correctly, also on Python versions and hardware or OSes that you don't + have available yourself. An extensive description of how to write unit + tests is given in the NumPy `testing guidelines`_. + +2. Documentation + Clear and complete documentation is essential in order for users to be able + to find and understand the code. Documentation for individual functions + and classes -- which includes at least a basic description, type and + meaning of all parameters and returns values, and usage examples in + `doctest`_ format -- is put in docstrings. Those docstrings can be read + within the interpreter, and are compiled into a reference guide in html and + pdf format. Higher-level documentation for key (areas of) functionality is + provided in tutorial format and/or in module docstrings. A guide on how to + write documentation is given in `how to document`_. + +3. Code style + Uniformity of style in which code is written is important to others trying + to understand the code. SciPy follows the standard Python guidelines for + code style, `PEP8`_. In order to check that your code conforms to PEP8, + you can use the `pep8 package`_ style checker. Most IDEs and text editors + have settings that can help you follow PEP8, for example by translating + tabs by four spaces. Using `pyflakes`_ to check your code is also a good + idea. + +At the end of this document a checklist is given that may help to check if your +code fulfills all requirements for inclusion in SciPy. + +Another question you may have is: *where exactly do I put my code*? To answer +this, it is useful to understand how the SciPy public API (application +programming interface) is defined. For most modules the API is two levels +deep, which means your new function should appear as +``scipy.submodule.my_new_func``. ``my_new_func`` can be put in an existing or +new file under ``/scipy/<submodule>/``, its name is added to the ``__all__`` +list in that file (which lists all public functions in the file), and those +public functions are then imported in ``/scipy/<submodule>/__init__.py``. Any +private functions/classes should have a leading underscore (``_``) in their +name. A more detailed description of what the public API of SciPy is, is given +in `SciPy API`_. + +Once you think your code is ready for inclusion in SciPy, you can send a pull +request (PR) on Github. We won't go into the details of how to work with git +here, this is described well in the `git workflow`_ section of the NumPy +documentation and on the `Github help pages`_. When you send the PR for a new +feature, be sure to also mention this on the scipy-dev mailing list. This can +prompt interested people to help review your PR. Assuming that you already got +positive feedback before on the general idea of your code/feature, the purpose +of the code review is to ensure that the code is correct, efficient and meets +the requirements outlined above. In many cases the code review happens +relatively quickly, but it's possible that it stalls. If you have addressed +all feedback already given, it's perfectly fine to ask on the mailing list +again for review (after a reasonable amount of time, say a couple of weeks, has +passed). Once the review is completed, the PR is merged into the "master" +branch of SciPy. + +The above describes the requirements and process for adding code to SciPy. It +doesn't yet answer the question though how decisions are made exactly. The +basic answer is: decisions are made by consensus, by everyone who chooses to +participate in the discussion on the mailing list. This includes developers, +other users and yourself. Aiming for consensus in the discussion is important +-- SciPy is a project by and for the scientific Python community. In those +rare cases that agreement cannot be reached, the maintainers of the module +in question can decide the issue. + + +Contributing by helping maintain existing code +============================================== + +The previous section talked specifically about adding new functionality to +SciPy. A large part of that discussion also applies to maintenance of existing +code. Maintenance means fixing bugs, improving code quality, documenting +existing functionality better, adding missing unit tests, keeping +build scripts up-to-date, etc. The SciPy `issue list`_ contains all +reported bugs, build/documentation issues, etc. Fixing issues +helps improve the overall quality of SciPy, and is also a good way +of getting familiar with the project. You may also want to fix a bug because +you ran into it and need the function in question to work correctly. + +The discussion on code style and unit testing above applies equally to bug +fixes. It is usually best to start by writing a unit test that shows the +problem, i.e. it should pass but doesn't. Once you have that, you can fix the +code so that the test does pass. That should be enough to send a PR for this +issue. Unlike when adding new code, discussing this on the mailing list may +not be necessary - if the old behavior of the code is clearly incorrect, no one +will object to having it fixed. It may be necessary to add some warning or +deprecation message for the changed behavior. This should be part of the +review process. + +.. note:: + + Pull requests that *only* change code style, e.g. fixing some PEP8 issues in + a file, are discouraged. Such PRs are often not worth cluttering the git + annotate history, and take reviewer time that may be better spent in other ways. + Code style cleanups of code that is touched as part of a functional change + are fine however. + + +Reviewing pull requests +======================= + +Reviewing open pull requests (PRs) is very welcome, and a valuable way to help +increase the speed at which the project moves forward. If you have specific +knowledge/experience in a particular area (say "optimization algorithms" or +"special functions") then reviewing PRs in that area is especially valuable - +sometimes PRs with technical code have to wait for a long time to get merged +due to a shortage of appropriate reviewers. + +We encourage everyone to get involved in the review process; it's also a +great way to get familiar with the code base. Reviewers should ask +themselves some or all of the following questions: + +- Was this change adequately discussed (relevant for new features and changes + in existing behavior)? +- Is the feature scientifically sound? Algorithms may be known to work based on + literature; otherwise, closer look at correctness is valuable. +- Is the intended behavior clear under all conditions (e.g. unexpected inputs + like empty arrays or nan/inf values)? +- Does the code meet the quality, test and documentation expectation outline + under `Contributing new code`_? + +If we do not know you yet, consider introducing yourself. + + +Other ways to contribute +======================== + +There are many ways to contribute other than contributing code. + +Triaging issues (investigating bug reports for validity and possible actions to +take) is also a useful activity. SciPy has many hundreds of open issues; +closing invalid ones and correctly labeling valid ones (ideally with some first +thoughts in a comment) allows prioritizing maintenance work and finding related +issues easily when working on an existing function or submodule. + +Participating in discussions on the scipy-user and scipy-dev `mailing lists`_ is +a contribution in itself. Everyone who writes to those lists with a problem or +an idea would like to get responses, and writing such responses makes the +project and community function better and appear more welcoming. + +The `scipy.org`_ website contains a lot of information on both SciPy the +project and SciPy the community, and it can always use a new pair of hands. +The sources for the website live in their own separate repo: +https://github.com/scipy/scipy.org + + +Recommended development setup +============================= + +Since Scipy contains parts written in C, C++, and Fortran that need to be +compiled before use, make sure you have the necessary compilers and Python +development headers installed. Having compiled code also means that importing +Scipy from the development sources needs some additional steps, which are +explained below. + +First fork a copy of the main Scipy repository in Github onto your own +account and then create your local repository via:: + + $ git clone git@github.com:YOURUSERNAME/scipy.git scipy + $ cd scipy + $ git remote add upstream git://github.com/scipy/scipy.git + +To build the development version of Scipy and run tests, spawn +interactive shells with the Python import paths properly set up etc., +do one of:: + + $ python runtests.py -v + $ python runtests.py -v -s optimize + $ python runtests.py -v -t scipy.special.tests.test_basic::test_xlogy + $ python runtests.py --ipython + $ python runtests.py --python somescript.py + $ python runtests.py --bench + +This builds Scipy first, so the first time it may take some time. If +you specify ``-n``, the tests are run against the version of Scipy (if +any) found on current PYTHONPATH. *Note: if you run into a build issue, +more detailed build documentation can be found in :doc:`building/index` and at +https://github.com/scipy/scipy/tree/master/doc/source/building* + +Using ``runtests.py`` is the recommended approach to running tests. +There are also a number of alternatives to it, for example in-place +build or installing to a virtualenv. See the FAQ below for details. + +Some of the tests in Scipy are very slow and need to be separately +enabled. See the FAQ below for details. + + +SciPy structure +=============== + +All SciPy modules should follow the following conventions. In the +following, a *SciPy module* is defined as a Python package, say +``yyy``, that is located in the scipy/ directory. + +* Ideally, each SciPy module should be as self-contained as possible. + That is, it should have minimal dependencies on other packages or + modules. Even dependencies on other SciPy modules should be kept to + a minimum. A dependency on NumPy is of course assumed. + +* Directory ``yyy/`` contains: + + - A file ``setup.py`` that defines + ``configuration(parent_package='',top_path=None)`` function + for `numpy.distutils`. + + - A directory ``tests/`` that contains files ``test_<name>.py`` + corresponding to modules ``yyy/<name>{.py,.so,/}``. + +* Private modules should be prefixed with an underscore ``_``, + for instance ``yyy/_somemodule.py``. + +* User-visible functions should have good documentation following + the Numpy documentation style, see `how to document`_ + +* The ``__init__.py`` of the module should contain the main reference + documentation in its docstring. This is connected to the Sphinx + documentation under ``doc/`` via Sphinx's automodule directive. + + The reference documentation should first give a categorized list of + the contents of the module using ``autosummary::`` directives, and + after that explain points essential for understanding the use of the + module. + + Tutorial-style documentation with extensive examples should be + separate, and put under ``doc/source/tutorial/`` + +See the existing Scipy submodules for guidance. + +For further details on Numpy distutils, see: + + https://github.com/numpy/numpy/blob/master/doc/DISTUTILS.rst.txt + + +Useful links, FAQ, checklist +============================ + +Checklist before submitting a PR +-------------------------------- + + - Are there unit tests with good code coverage? + - Do all public function have docstrings including examples? + - Is the code style correct (PEP8, pyflakes) + - Is the commit message `formatted correctly`_? + - Is the new functionality tagged with ``.. versionadded:: X.Y.Z`` (with + X.Y.Z the version number of the next release - can be found in setup.py)? + - Is the new functionality mentioned in the release notes of the next + release? + - Is the new functionality added to the reference guide? + - In case of larger additions, is there a tutorial or more extensive + module-level description? + - In case compiled code is added, is it integrated correctly via setup.py + - If you are a first-time contributor, did you add yourself to THANKS.txt? + Please note that this is perfectly normal and desirable - the aim is to + give every single contributor credit, and if you don't add yourself it's + simply extra work for the reviewer (or worse, the reviewer may forget). + - Did you check that the code can be distributed under a BSD license? + + +Useful SciPy documents +---------------------- + + - The `how to document`_ guidelines + - NumPy/SciPy `testing guidelines`_ + - `SciPy API`_ + - The `SciPy Roadmap`_ + - NumPy/SciPy `git workflow`_ + - How to submit a good `bug report`_ + + +FAQ +--- + +*I based my code on existing Matlab/R/... code I found online, is this OK?* + +It depends. SciPy is distributed under a BSD license, so if the code that you +based your code on is also BSD licensed or has a BSD-compatible license (e.g. +MIT, PSF) then it's OK. Code which is GPL or Apache licensed, has no +clear license, requires citation or is free for academic use only can't be +included in SciPy. Therefore if you copied existing code with such a license +or made a direct translation to Python of it, your code can't be included. +If you're unsure, please ask on the scipy-dev mailing list. + +*Why is SciPy under the BSD license and not, say, the GPL?* + +Like Python, SciPy uses a "permissive" open source license, which allows +proprietary re-use. While this allows companies to use and modify the software +without giving anything back, it is felt that the larger user base results in +more contributions overall, and companies often publish their modifications +anyway, without being required to. See John Hunter's `BSD pitch`_. + + +*How do I set up a development version of SciPy in parallel to a released +version that I use to do my job/research?* + +One simple way to achieve this is to install the released version in +site-packages, by using a binary installer or pip for example, and set +up the development version in a virtualenv. First install +`virtualenv`_ (optionally use `virtualenvwrapper`_), then create your +virtualenv (named scipy-dev here) with:: + + $ virtualenv scipy-dev + +Now, whenever you want to switch to the virtual environment, you can use the +command ``source scipy-dev/bin/activate``, and ``deactivate`` to exit from the +virtual environment and back to your previous shell. With scipy-dev +activated, install first Scipy's dependencies:: + + $ pip install Numpy pytest Cython + +After that, you can install a development version of Scipy, for example via:: + + $ python setup.py install + +The installation goes to the virtual environment. + + +*How do I set up an in-place build for development* + +For development, you can set up an in-place build so that changes made to +``.py`` files have effect without rebuild. First, run:: + + $ python setup.py build_ext -i + +Then you need to point your PYTHONPATH environment variable to this directory. +Some IDEs (`Spyder`_ for example) have utilities to manage PYTHONPATH. On Linux +and OSX, you can run the command:: + + $ export PYTHONPATH=$PWD + +and on Windows + + $ set PYTHONPATH=/path/to/scipy + +Now editing a Python source file in SciPy allows you to immediately +test and use your changes (in ``.py`` files), by simply restarting the +interpreter. + + +*Are there any video examples for installing from source, setting up a +development environment, etc...?* + +Currently, there are two video demonstrations for Anaconda Python on macOS: + +`Anaconda SciPy Dev Part I (macOS)`_ is a four-minute +overview of installing Anaconda, building SciPy from source, and testing +changes made to SciPy from the `Spyder`_ IDE. + +`Anaconda SciPy Dev Part II (macOS)`_ shows how to use +a virtual environment to easily switch between the "pre-built version" of SciPy +installed with Anaconda and your "source-built version" of SciPy created +according to Part I. + + +*Are there any video examples of the basic development workflow?* + +`SciPy Development Workflow`_ is a five-minute example of fixing a bug and +submitting a pull request. While it's intended as a followup to +`Anaconda SciPy Dev Part I (macOS)`_ and `Anaconda SciPy Dev Part II (macOS)`_, +the process is similar for other development setups. + + +*Can I use a programming language other than Python to speed up my code?* + +Yes. The languages used in SciPy are Python, Cython, C, C++ and Fortran. All +of these have their pros and cons. If Python really doesn't offer enough +performance, one of those languages can be used. Important concerns when +using compiled languages are maintainability and portability. For +maintainability, Cython is clearly preferred over C/C++/Fortran. Cython and C +are more portable than C++/Fortran. A lot of the existing C and Fortran code +in SciPy is older, battle-tested code that was only wrapped in (but not +specifically written for) Python/SciPy. Therefore the basic advice is: use +Cython. If there's specific reasons why C/C++/Fortran should be preferred, +please discuss those reasons first. + + +*How do I debug code written in C/C++/Fortran inside Scipy?* + +The easiest way to do this is to first write a Python script that +invokes the C code whose execution you want to debug. For instance +``mytest.py``:: + + from scipy.special import hyp2f1 + print(hyp2f1(5.0, 1.0, -1.8, 0.95)) + +Now, you can run:: + + gdb --args python runtests.py -g --python mytest.py + +If you didn't compile with debug symbols enabled before, remove the +``build`` directory first. While in the debugger:: + + (gdb) break cephes_hyp2f1 + (gdb) run + +The execution will now stop at the corresponding C function and you +can step through it as usual. Instead of plain ``gdb`` you can of +course use your favourite alternative debugger; run it on the +``python`` binary with arguments ``runtests.py -g --python mytest.py``. + + +*How do I enable additional tests in Scipy?* + +Some of the tests in Scipy's test suite are very slow and not enabled +by default. You can run the full suite via:: + + $ python runtests.py -g -m full + +This invokes the test suite ``import scipy; scipy.test("full")``, +enabling also slow tests. + +There is an additional level of very slow tests (several minutes), +which are disabled also in this case. They can be enabled by setting +the environment variable ``SCIPY_XSLOW=1`` before running the test +suite. + + +.. _scikit-learn: http://scikit-learn.org + +.. _scikit-image: http://scikit-image.org/ + +.. _statsmodels: https://www.statsmodels.org/ + +.. _testing guidelines: https://github.com/numpy/numpy/blob/master/doc/TESTS.rst.txt + +.. _formatted correctly: https://docs.scipy.org/doc/numpy/dev/gitwash/development_workflow.html#writing-the-commit-message + +.. _how to document: https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt + +.. _bug report: https://scipy.org/bug-report.html + +.. _PEP8: https://www.python.org/dev/peps/pep-0008/ + +.. _pep8 package: https://pypi.python.org/pypi/pep8 + +.. _pyflakes: https://pypi.python.org/pypi/pyflakes + +.. _SciPy API: https://docs.scipy.org/doc/scipy/reference/api.html + +.. _SciPy Roadmap: https://scipy.github.io/devdocs/roadmap.html + +.. _git workflow: https://docs.scipy.org/doc/numpy/dev/gitwash/ + +.. _Github help pages: https://help.github.com/articles/set-up-git/ + +.. _issue list: https://github.com/scipy/scipy/issues + +.. _Github: https://github.com/scipy/scipy + +.. _scipy.org: https://scipy.org/ + +.. _scipy.github.com: https://scipy.github.com/ + +.. _scipy.org-new: https://github.com/scipy/scipy.org-new + +.. _documentation wiki: https://docs.scipy.org/scipy/Front%20Page/ + +.. _SciPy Central: https://web.archive.org/web/20170520065729/http://central.scipy.org/ + +.. _doctest: https://pymotw.com/3/doctest/ + +.. _virtualenv: https://virtualenv.pypa.io/ + +.. _virtualenvwrapper: https://bitbucket.org/dhellmann/virtualenvwrapper/ + +.. _bsd pitch: http://nipy.sourceforge.net/nipy/stable/faq/johns_bsd_pitch.html + +.. _Pytest: https://pytest.org/ + +.. _mailing lists: https://www.scipy.org/scipylib/mailing-lists.html + +.. _Spyder: https://www.spyder-ide.org/ + +.. _Anaconda SciPy Dev Part I (macOS): https://youtu.be/1rPOSNd0ULI + +.. _Anaconda SciPy Dev Part II (macOS): https://youtu.be/Faz29u5xIZc + +.. _SciPy Development Workflow: https://youtu.be/HgU01gJbzMY diff --git a/project/venv/lib/python2.7/site-packages/scipy/INSTALL.rst.txt b/project/venv/lib/python2.7/site-packages/scipy/INSTALL.rst.txt new file mode 100644 index 0000000..d249fbe --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/INSTALL.rst.txt @@ -0,0 +1,249 @@ +Building and installing SciPy ++++++++++++++++++++++++++++++ + +See https://www.scipy.org/install.html + +.. Contents:: + + +INTRODUCTION +============ + +It is *strongly* recommended that you use either a complete scientific Python +distribution or binary packages on your platform if they are available, in +particular on Windows and Mac OS X. You should not attempt to build SciPy if +you are not familiar with compiling software from sources. + +Recommended distributions are: + + - Enthought Canopy (https://www.enthought.com/products/canopy/) + - Anaconda (https://www.anaconda.com) + - Python(x,y) (https://python-xy.github.io/) + - WinPython (https://winpython.github.io/) + +The rest of this install documentation summarizes how to build Scipy. Note +that more extensive (and possibly more up-to-date) build instructions are +maintained at https://scipy.github.io/devdocs/building/ + + +PREREQUISITES +============= + +SciPy requires the following software installed for your platform: + +1) Python__ 2.7 or >= 3.4 + +__ https://www.python.org + +2) NumPy__ >= 1.8.2 + +__ https://www.numpy.org/ + +3) For building from source: setuptools__ + +__ https://github.com/pypa/setuptools + +4) If you want to build the documentation: Sphinx__ >= 1.2.1 + +__ http://www.sphinx-doc.org/ + +5) If you want to build SciPy master or other unreleased version from source + (Cython-generated C sources are included in official releases): + Cython__ >= 0.23.4 + +__ http://cython.org/ + +Windows +------- + +Compilers +~~~~~~~~~ + +There are two ways to build Scipy on Windows: + +1. Use Intel MKL, and Intel compilers or ifort + MSVC. This is what Anaconda + and Enthought Canopy use. +2. Use MSVC + gfortran with OpenBLAS. This is how the SciPy Windows wheels are + built. + +Mac OS X +-------- + +It is recommended to use gcc or clang, both work fine. Gcc is available for +free when installing Xcode, the developer toolsuite on Mac OS X. You also +need a fortran compiler, which is not included with Xcode: you should use a +recent gfortran from an OS X package manager (like Homebrew). + +Please do NOT use gfortran from `hpc.sourceforge.net <http://hpc.sourceforge.net>`_, +it is known to generate buggy scipy binaries. + +You should also use a BLAS/LAPACK library from an OS X package manager. +ATLAS, OpenBLAS, and MKL all work. + +As of Scipy version 1.2.0, we do not support compiling against the system +Accelerate library for BLAS and LAPACK. It does not support a sufficiently +recent LAPACK interface. + +Linux +----- + +Most common distributions include all the dependencies. You will need to +install a BLAS/LAPACK (all of ATLAS, OpenBLAS, MKL work fine) including +development headers, as well as development headers for Python itself. Those +are typically packaged as python-dev + + +INSTALLING SCIPY +================ + +For the latest information, see the web site: + + https://www.scipy.org + + +Development version from Git +---------------------------- +Use the command:: + + git clone https://github.com/scipy/scipy.git + + cd scipy + git clean -xdf + python setup.py install --user + +Documentation +------------- +Type:: + + cd scipy/doc + make html + +From tarballs +------------- +Unpack ``SciPy-<version>.tar.gz``, change to the ``SciPy-<version>/`` +directory, and run:: + + pip install . -v --user + +This may take several minutes to half an hour depending on the speed of your +computer. + + +TESTING +======= + +To test SciPy after installation (highly recommended), execute in Python + + >>> import scipy + >>> scipy.test() + +To run the full test suite use + + >>> scipy.test('full') + +If you are upgrading from an older SciPy release, please test your code for any +deprecation warnings before and after upgrading to avoid surprises: + + $ python -Wd -c my_code_that_shouldnt_break.py + +Please note that you must have version 1.0 or later of the Pytest test +framework installed in order to run the tests. More information about Pytest is +available on the website__. + +__ https://pytest.org/ + +COMPILER NOTES +============== + +You can specify which Fortran compiler to use by using the following +install command:: + + python setup.py config_fc --fcompiler=<Vendor> install + +To see a valid list of <Vendor> names, run:: + + python setup.py config_fc --help-fcompiler + +IMPORTANT: It is highly recommended that all libraries that scipy uses (e.g. +BLAS and ATLAS libraries) are built with the same Fortran compiler. In most +cases, if you mix compilers, you will not be able to import Scipy at best, have +crashes and random results at worst. + +UNINSTALLING +============ + +When installing with ``python setup.py install`` or a variation on that, you do +not get proper uninstall behavior for an older already installed Scipy version. +In many cases that's not a problem, but if it turns out to be an issue, you +need to manually uninstall it first (remove from e.g. in +``/usr/lib/python3.4/site-packages/scipy`` or +``$HOME/lib/python3.4/site-packages/scipy``). + +Alternatively, you can use ``pip install . --user`` instead of ``python +setup.py install --user`` in order to get reliable uninstall behavior. +The downside is that ``pip`` doesn't show you a build log and doesn't support +incremental rebuilds (it copies the whole source tree to a tempdir). + +TROUBLESHOOTING +=============== + +If you experience problems when building/installing/testing SciPy, you +can ask help from scipy-user@python.org or scipy-dev@python.org mailing +lists. Please include the following information in your message: + +NOTE: You can generate some of the following information (items 1-5,7) +in one command:: + + python -c 'from numpy.f2py.diagnose import run; run()' + +1) Platform information:: + + python -c 'import os, sys; print(os.name, sys.platform)' + uname -a + OS, its distribution name and version information + etc. + +2) Information about C,C++,Fortran compilers/linkers as reported by + the compilers when requesting their version information, e.g., + the output of + :: + + gcc -v + g77 --version + +3) Python version:: + + python -c 'import sys; print(sys.version)' + +4) NumPy version:: + + python -c 'import numpy; print(numpy.__version__)' + +5) ATLAS version, the locations of atlas and lapack libraries, building + information if any. If you have ATLAS version 3.3.6 or newer, then + give the output of the last command in + :: + + cd scipy/Lib/linalg + python setup_atlas_version.py build_ext --inplace --force + python -c 'import atlas_version' + +7) The output of the following commands + :: + + python INSTALLDIR/numpy/distutils/system_info.py + + where INSTALLDIR is, for example, /usr/lib/python3.4/site-packages/. + +8) Feel free to add any other relevant information. + For example, the full output (both stdout and stderr) of the SciPy + installation command can be very helpful. Since this output can be + rather large, ask before sending it into the mailing list (or + better yet, to one of the developers, if asked). + +9) In case of failing to import extension modules, the output of + :: + + ldd /path/to/ext_module.so + + can be useful. diff --git a/project/venv/lib/python2.7/site-packages/scipy/LICENSE.txt b/project/venv/lib/python2.7/site-packages/scipy/LICENSE.txt new file mode 100644 index 0000000..cf40e2a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/LICENSE.txt @@ -0,0 +1,1073 @@ +Copyright (c) 2001, 2002 Enthought, Inc. +All rights reserved. + +Copyright (c) 2003-2017 SciPy Developers. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + a. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + b. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + c. Neither the name of Enthought nor the names of the SciPy Developers + may be used to endorse or promote products derived from this software + without specific prior written permission. + + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS +BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, +OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +THE POSSIBILITY OF SUCH DAMAGE. + + + +SciPy bundles a number of libraries that are compatibly licensed. We list +these here. + +Name: Numpydoc +Files: doc/sphinxext/numpydoc/* +License: 2-clause BSD + For details, see doc/sphinxext/LICENSE.txt + +Name: scipy-sphinx-theme +Files: doc/scipy-sphinx-theme/* +License: 3-clause BSD, PSF and Apache 2.0 + For details, see doc/sphinxext/LICENSE.txt + +Name: Six +Files: scipy/_lib/six.py +License: MIT + For details, see the header inside scipy/_lib/six.py + +Name: Decorator +Files: scipy/_lib/decorator.py +License: 2-clause BSD + For details, see the header inside scipy/_lib/decorator.py + +Name: ID +Files: scipy/linalg/src/id_dist/* +License: 3-clause BSD + For details, see scipy/linalg/src/id_dist/doc/doc.tex + +Name: L-BFGS-B +Files: scipy/optimize/lbfgsb/* +License: BSD license + For details, see scipy/optimize/lbfgsb/README + +Name: SuperLU +Files: scipy/sparse/linalg/dsolve/SuperLU/* +License: 3-clause BSD + For details, see scipy/sparse/linalg/dsolve/SuperLU/License.txt + +Name: ARPACK +Files: scipy/sparse/linalg/eigen/arpack/ARPACK/* +License: 3-clause BSD + For details, see scipy/sparse/linalg/eigen/arpack/ARPACK/COPYING + +Name: Qhull +Files: scipy/spatial/qhull/* +License: Qhull license (BSD-like) + For details, see scipy/spatial/qhull/COPYING.txt + +Name: Cephes +Files: scipy/special/cephes/* +License: 3-clause BSD + Distributed under 3-clause BSD license with permission from the author, + see https://lists.debian.org/debian-legal/2004/12/msg00295.html + + Cephes Math Library Release 2.8: June, 2000 + Copyright 1984, 1995, 2000 by Stephen L. Moshier + + This software is derived from the Cephes Math Library and is + incorporated herein by permission of the author. + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the <organization> nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Name: Faddeeva +Files: scipy/special/Faddeeva.* +License: MIT + Copyright (c) 2012 Massachusetts Institute of Technology + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +Name: qd +Files: scipy/special/cephes/dd_*.[ch] +License: modified BSD license ("BSD-LBNL-License.doc") + This work was supported by the Director, Office of Science, Division + of Mathematical, Information, and Computational Sciences of the + U.S. Department of Energy under contract numbers DE-AC03-76SF00098 and + DE-AC02-05CH11231. + + Copyright (c) 2003-2009, The Regents of the University of California, + through Lawrence Berkeley National Laboratory (subject to receipt of + any required approvals from U.S. Dept. of Energy) All rights reserved. + + 1. Redistribution and use in source and binary forms, with or + without modification, are permitted provided that the following + conditions are met: + + (1) Redistributions of source code must retain the copyright + notice, this list of conditions and the following disclaimer. + + (2) Redistributions in binary form must reproduce the copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + (3) Neither the name of the University of California, Lawrence + Berkeley National Laboratory, U.S. Dept. of Energy nor the names + of its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + + 2. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 3. You are under no obligation whatsoever to provide any bug fixes, + patches, or upgrades to the features, functionality or performance of + the source code ("Enhancements") to anyone; however, if you choose to + make your Enhancements available either publicly, or directly to + Lawrence Berkeley National Laboratory, without imposing a separate + written license agreement for such Enhancements, then you hereby grant + the following license: a non-exclusive, royalty-free perpetual license + to install, use, modify, prepare derivative works, incorporate into + other computer software, distribute, and sublicense such enhancements + or derivative works thereof, in binary and source code form. + +---- + +This binary distribution of Scipy also bundles the following software: + + +Name: OpenBLAS +Files: .libs/libopenb*.so +Description: bundled as a dynamically linked library +Availability: https://github.com/xianyi/OpenBLAS/ +License: 3-clause BSD + Copyright (c) 2011-2014, The OpenBLAS Project + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + 3. Neither the name of the OpenBLAS project nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Name: LAPACK +Files: .libs/libopenb*.so +Description: bundled in OpenBLAS +Availability: https://github.com/xianyi/OpenBLAS/ +License 3-clause BSD + Copyright (c) 1992-2013 The University of Tennessee and The University + of Tennessee Research Foundation. All rights + reserved. + Copyright (c) 2000-2013 The University of California Berkeley. All + rights reserved. + Copyright (c) 2006-2013 The University of Colorado Denver. All rights + reserved. + + $COPYRIGHT$ + + Additional copyrights may follow + + $HEADER$ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer listed + in this license in the documentation and/or other materials + provided with the distribution. + + - Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + The copyright holders provide no reassurances that the source code + provided does not infringe any patent, copyright, or any other + intellectual property rights of third parties. The copyright holders + disclaim any liability to any recipient for claims brought against + recipient by any third party for infringement of that parties + intellectual property rights. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Name: GCC runtime library +Files: .libs/libgfortran*.so +Description: dynamically linked to files compiled with gcc +Availability: https://gcc.gnu.org/viewcvs/gcc/ +License: GPLv3 + runtime exception + Copyright (C) 2002-2017 Free Software Foundation, Inc. + + Libgfortran is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgfortran is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + <http://www.gnu.org/licenses/>. + +---- + +Full text of license texts referred to above follows (that they are +listed below does not necessarily imply the conditions apply to the +present binary release): + +---- + +GCC RUNTIME LIBRARY EXCEPTION + +Version 3.1, 31 March 2009 + +Copyright (C) 2009 Free Software Foundation, Inc. <http://fsf.org/> + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +This GCC Runtime Library Exception ("Exception") is an additional +permission under section 7 of the GNU General Public License, version +3 ("GPLv3"). It applies to a given file (the "Runtime Library") that +bears a notice placed by the copyright holder of the file stating that +the file is governed by GPLv3 along with this Exception. + +When you use GCC to compile a program, GCC may combine portions of +certain GCC header files and runtime libraries with the compiled +program. The purpose of this Exception is to allow compilation of +non-GPL (including proprietary) programs to use, in this way, the +header files and runtime libraries covered by this Exception. + +0. Definitions. + +A file is an "Independent Module" if it either requires the Runtime +Library for execution after a Compilation Process, or makes use of an +interface provided by the Runtime Library, but is not otherwise based +on the Runtime Library. + +"GCC" means a version of the GNU Compiler Collection, with or without +modifications, governed by version 3 (or a specified later version) of +the GNU General Public License (GPL) with the option of using any +subsequent versions published by the FSF. + +"GPL-compatible Software" is software whose conditions of propagation, +modification and use would permit combination with GCC in accord with +the license of GCC. + +"Target Code" refers to output from any compiler for a real or virtual +target processor architecture, in executable form or suitable for +input to an assembler, loader, linker and/or execution +phase. Notwithstanding that, Target Code does not include data in any +format that is used as a compiler intermediate representation, or used +for producing a compiler intermediate representation. + +The "Compilation Process" transforms code entirely represented in +non-intermediate languages designed for human-written code, and/or in +Java Virtual Machine byte code, into Target Code. Thus, for example, +use of source code generators and preprocessors need not be considered +part of the Compilation Process, since the Compilation Process can be +understood as starting with the output of the generators or +preprocessors. + +A Compilation Process is "Eligible" if it is done using GCC, alone or +with other GPL-compatible software, or if it is done without using any +work based on GCC. For example, using non-GPL-compatible Software to +optimize any GCC intermediate representations would not qualify as an +Eligible Compilation Process. + +1. Grant of Additional Permission. + +You have permission to propagate a work of Target Code formed by +combining the Runtime Library with Independent Modules, even if such +propagation would otherwise violate the terms of GPLv3, provided that +all Target Code was generated by Eligible Compilation Processes. You +may then convey such a combination under terms of your choice, +consistent with the licensing of the Independent Modules. + +2. No Weakening of GCC Copyleft. + +The availability of this Exception does not imply any general +presumption that third-party software is unaffected by the copyleft +requirements of the license of GCC. + +---- + + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/> + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + <one line to give the program's name and a brief idea of what it does.> + Copyright (C) <year> <name of author> + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <http://www.gnu.org/licenses/>. + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + <program> Copyright (C) <year> <name of author> + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +<http://www.gnu.org/licenses/>. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +<http://www.gnu.org/philosophy/why-not-lgpl.html>. diff --git a/project/venv/lib/python2.7/site-packages/scipy/THANKS.txt b/project/venv/lib/python2.7/site-packages/scipy/THANKS.txt new file mode 100644 index 0000000..d35cc75 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/THANKS.txt @@ -0,0 +1,214 @@ +SciPy is an open source library of routines for science and engineering +using Python. It is a community project sponsored by Enthought, Inc. +SciPy originated with code contributions by Travis Oliphant, Pearu +Peterson, and Eric Jones. Travis Oliphant and Eric Jones each contributed +about half the initial code. Pearu Peterson developed f2py, which is the +integral to wrapping the many Fortran libraries used in SciPy. + +Since then many people have contributed to SciPy, both in code development, +suggestions, and financial support. Below is a partial list. If you've +been left off, please email the "SciPy Developers List" <scipy-dev@python.org>. + +Please add names as needed so that we can keep up with all the contributors. + +Kumar Appaiah for Dolph Chebyshev window. +Nathan Bell for sparsetools, help with scipy.sparse and scipy.splinalg. +Robert Cimrman for UMFpack wrapper for sparse matrix module. +David M. Cooke for improvements to system_info, and LBFGSB wrapper. +Aric Hagberg for ARPACK wrappers, help with splinalg.eigen. +Chuck Harris for Zeros package in optimize (1d root-finding algorithms). +Prabhu Ramachandran for improvements to gui_thread. +Robert Kern for improvements to stats and bug-fixes. +Jean-Sebastien Roy for fmin_tnc code which he adapted from Stephen Nash's + original Fortran. +Ed Schofield for Maximum entropy and Monte Carlo modules, help with + sparse matrix module. +Travis Vaught for numerous contributions to annual conference and community + web-site and the initial work on stats module clean up. +Jeff Whitaker for Mac OS X support. +David Cournapeau for bug-fixes, refactoring of fftpack and cluster, + implementing the numscons and Bento build support, building Windows + binaries and adding single precision FFT. +Damian Eads for hierarchical clustering, dendrogram plotting, + distance functions in spatial package, vq documentation. +Anne Archibald for kd-trees and nearest neighbor in scipy.spatial. +Pauli Virtanen for Sphinx documentation generation, online documentation + framework and interpolation bugfixes. +Josef Perktold for major improvements to scipy.stats and its test suite and + fixes and tests to optimize.curve_fit and leastsq. +David Morrill for getting the scoreboard test system up and running. +Louis Luangkesorn for providing multiple tests for the stats module. +Jochen Kupper for the zoom feature in the now-deprecated plt plotting module. +Tiffany Kamm for working on the community web-site. +Mark Koudritsky for maintaining the web-site. +Andrew Straw for help with the web-page, documentation, packaging, + testing and work on the linalg module. +Stefan van der Walt for numerous bug-fixes, testing and documentation. +Jarrod Millman for release management, community coordination, and code + clean up. +Pierre Gerard-Marchant for statistical masked array functionality. +Alan McIntyre for updating SciPy tests to use the new NumPy test framework. +Matthew Brett for work on the Matlab file IO, bug-fixes, and improvements + to the testing framework. +Gary Strangman for the scipy.stats package. +Tiziano Zito for generalized symmetric and hermitian eigenvalue problem + solver. +Chris Burns for bug-fixes. +Per Brodtkorb for improvements to stats distributions. +Neilen Marais for testing and bug-fixing in the ARPACK wrappers. +Johannes Loehnert and Bart Vandereycken for fixes in the linalg + module. +David Huard for improvements to the interpolation interface. +David Warde-Farley for converting the ndimage docs to ReST. +Uwe Schmitt for wrapping non-negative least-squares. +Ondrej Certik for Debian packaging. +Paul Ivanov for porting Numeric-style C code to the new NumPy API. +Ariel Rokem for contributions on percentileofscore fixes and tests. +Yosef Meller for tests in the optimization module. +Ralf Gommers for release management, code clean up and improvements + to doc-string generation. +Bruce Southey for bug-fixes and improvements to scipy.stats. +Ernest Adrogué for the Skellam distribution. +Enzo Michelangeli for a fast kendall tau test. +David Simcha for a fisher exact test. +Warren Weckesser for bug-fixes, cleanups, and several new features. +Fabian Pedregosa for linear algebra bug-fixes, new features and refactoring. +Jake Vanderplas for wrapping ARPACK's generalized and shift-invert modes + and improving its tests. +Collin RM Stocks for wrapping pivoted QR decomposition. +Martin Teichmann for improving scipy.special.ellipk & agm accuracy, + and for linalg.qr_multiply. +Jeff Armstrong for discrete state-space and linear time-invariant functionality + in scipy.signal, and sylvester/riccati/lyapunov solvers in scipy.linalg. +Mark Wiebe for fixing type casting after changes in Numpy. +Andrey Smirnov for improvements to FIR filter design. +Anthony Scopatz for help with code review and merging. +Lars Buitinck for improvements to scipy.sparse and various other modules. +Scott Sinclair for documentation improvements and some bug fixes. +Gael Varoquaux for cleanups in scipy.sparse. +Skipper Seabold for a fix to special.gammainc. +Wes McKinney for a fix to special.gamma. +Thouis (Ray) Jones for bug fixes in ndimage. +Yaroslav Halchenko for a bug fix in ndimage. +Thomas Robitaille for the IDL 'save' reader. +Fazlul Shahriar for fixes to the NetCDF3 I/O. +Chris Jordan-Squire for bug fixes, documentation improvements and + scipy.special.logit & expit. +Christoph Gohlke for many bug fixes and help with Windows specific issues. +Jacob Silterra for cwt-based peak finding in scipy.signal. +Denis Laxalde for the unified interface to minimizers in scipy.optimize. +David Fong for the sparse LSMR solver. +Andreas Hilboll for adding several new interpolation methods. +Andrew Schein for improving the numerical precision of norm.logcdf(). +Robert Gantner for improving expm() implementation. +Sebastian Werk for Halley's method in newton(). +Bjorn Forsman for contributing signal.bode(). +Tony S. Yu for ndimage improvements. +Jonathan J. Helmus for work on ndimage. +Alex Reinhart for documentation improvements. +Patrick Varilly for cKDTree improvements. +Sturla Molden for cKDTree improvements. +Nathan Crock for bug fixes. +Steven G. Johnson for Faddeeva W and erf* implementations. +Lorenzo Luengo for whosmat() in scipy.io. +Eric Moore for orthogonal polynomial recurrences in scipy.special. +Jacob Stevenson for the basinhopping optimization algorithm +Daniel Smith for sparse matrix functionality improvements +Gustav Larsson for a bug fix in convolve2d. +Alex Griffing for expm 2009, expm_multiply, expm_frechet, + trust region optimization methods, and sparse matrix onenormest + implementations, plus bugfixes. +Nils Werner for signal windowing and wavfile-writing improvements. +Kenneth L. Ho for the wrapper around the Interpolative Decomposition code. +Juan Luis Cano for refactorings in lti, sparse docs improvements and some + trivial fixes. +Pawel Chojnacki for simple documentation fixes. +Gert-Ludwig Ingold for contributions to special functions. +Joris Vankerschaver for multivariate Gaussian functionality. +Rob Falck for the SLSQP interface and linprog. +Jörg Dietrich for the k-sample Anderson Darling test. +Blake Griffith for improvements to scipy.sparse. +Andrew Nelson for scipy.optimize.differential_evolution. +Brian Newsom for work on ctypes multivariate integration. +Nathan Woods for work on multivariate integration. +Brianna Laugher for bug fixes. +Johannes Kulick for the Dirichlet distribution and the softmax function. +Bastian Venthur for bug fixes. +Alex Rothberg for stats.combine_pvalues. +Brandon Liu for stats.combine_pvalues. +Clark Fitzgerald for namedtuple outputs in scipy.stats. +Florian Wilhelm for usage of RandomState in scipy.stats distributions. +Robert T. McGibbon for Levinson-Durbin Toeplitz solver, Hessian information + from L-BFGS-B. +Alex Conley for the Exponentially Modified Normal distribution. +Abraham Escalante for contributions to scipy.stats +Johannes Ballé for the generalized normal distribution. +Irvin Probst (ENSTA Bretagne) for pole placement. +Ian Henriksen for Cython wrappers for BLAS and LAPACK +Fukumu Tsutsumi for bug fixes. +J.J. Green for interpolation bug fixes. +François Magimel for documentation improvements. +Josh Levy-Kramer for the log survival function of the hypergeometric distribution +Will Monroe for bug fixes. +Bernardo Sulzbach for bug fixes. +Alexander Grigorevskiy for adding extra LAPACK least-square solvers and + modifying linalg.lstsq function accordingly. +Sam Lewis for enhancements to the basinhopping module. +Tadeusz Pudlik for documentation and vectorizing spherical Bessel functions. +Philip DeBoer for wrapping random SO(N) and adding random O(N) and + correlation matrices in scipy.stats. +Tyler Reddy and Nikolai Nowaczyk for scipy.spatial.SphericalVoronoi +Bill Sacks for fixes to netcdf i/o. +Kolja Glogowski for a bug fix in scipy.special. +Surhud More for enhancing scipy.optimize.curve_fit to accept covariant errors +on data. +Antonio H. Ribeiro for implementing iirnotch, iirpeak functions and + trust-exact and trust-constr optimization methods. +Matt Haberland for the interior point linear programming method and + SciPy development videos. +Ilhan Polat for bug fixes on Riccati solvers. +Sebastiano Vigna for code in the stats package related to Kendall's tau. +John Draper for bug fixes. +Alvaro Sanchez-Gonzalez for axis-dependent modes in multidimensional filters. +Alessandro Pietro Bardelli for improvements to pdist/cdist and to related tests. +Jonathan T. Siebert for bug fixes. +Thomas Keck for adding new scipy.stats distributions used in HEP +David Nicholson for bug fixes in spectral functions. +Roman Feldbauer for improvements in scipy.sparse +Dominic Antonacci for statistics documentation. +David Hagen for the object-oriented ODE solver interface. +Arno Onken for contributions to scipy.stats. +Cathy Douglass for bug fixes in ndimage. +Adam Cox for contributions to scipy.constants. +Charles Masson for the Wasserstein and the Cramér-von Mises statistical + distances. +Felix Lenders for implementing trust-trlib method. +Dezmond Goff for adding optional out parameter to pdist/cdist +Nick R. Papior for allowing a wider choice of solvers +Sean Quinn for the Moyal distribution +Lars Grüter for contributions to peak finding in scipy.signal +Jordan Heemskerk for exposing additional windowing functions in scipy.signal. +Michael Tartre (Two Sigma Investments) for contributions to weighted distance functions. +Shinya Suzuki for scipy.stats.brunnermunzel +Graham Clenaghan for bug fixes and optimizations in scipy.stats. +Konrad Griessinger for the small sample Kendall test +Tony Xiang for improvements in scipy.sparse +Roy Zywina for contributions to scipy.fftpack. +Christian H. Meyer for bug fixes in subspace_angles. +Kai Striega for improvements to the scipy.optimize.linprog simplex method. +Josua Sassen for improvements to scipy.interpolate.Rbf +Stiaan Gerber for a bug fix in scipy.optimize. +Nicolas Hug for the Yeo-Johnson transformation. + +Institutions +------------ + +Enthought for providing resources and finances for development of SciPy. +Brigham Young University for providing resources for students to work on SciPy. +Agilent which gave a genereous donation for support of SciPy. +UC Berkeley for providing travel money and hosting numerous sprints. +The University of Stellenbosch for funding the development of + the SciKits portal. +Google Inc. for updating documentation of hypergeometric distribution. +Datadog Inc. for contributions to scipy.stats. +Urthecast Inc. for exposing additional windowing functions in scipy.signal. diff --git a/project/venv/lib/python2.7/site-packages/scipy/__config__.py b/project/venv/lib/python2.7/site-packages/scipy/__config__.py new file mode 100644 index 0000000..6804df9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/__config__.py @@ -0,0 +1,29 @@ +# This file is generated by /io/scipy/setup.py +# It contains system_info results at the time of building this package. +__all__ = ["get_info","show"] + +lapack_opt_info={'libraries': ['openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'f77'} +blas_opt_info={'libraries': ['openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'f77'} +openblas_info={'libraries': ['openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'f77'} +system_info={} +openblas_lapack_info={'libraries': ['openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'f77'} +lapack_mkl_info={} +blas_mkl_info={} +mkl_info={} + +def get_info(name): + g = globals() + return g.get(name, g.get(name + "_info", {})) + +def show(): + for name,info_dict in globals().items(): + if name[0] == "_" or type(info_dict) is not type({}): continue + print(name + ":") + if not info_dict: + print(" NOT AVAILABLE") + for k,v in info_dict.items(): + v = str(v) + if k == "sources" and len(v) > 200: + v = v[:60] + " ...\n... " + v[-60:] + print(" %s = %s" % (k,v)) + \ No newline at end of file diff --git a/project/venv/lib/python2.7/site-packages/scipy/__config__.pyc b/project/venv/lib/python2.7/site-packages/scipy/__config__.pyc new file mode 100644 index 0000000..38f7108 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/__config__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/__init__.py new file mode 100644 index 0000000..4931072 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/__init__.py @@ -0,0 +1,123 @@ +""" +SciPy: A scientific computing package for Python +================================================ + +Documentation is available in the docstrings and +online at https://docs.scipy.org. + +Contents +-------- +SciPy imports all the functions from the NumPy namespace, and in +addition provides: + +Subpackages +----------- +Using any of these subpackages requires an explicit import. For example, +``import scipy.cluster``. + +:: + + cluster --- Vector Quantization / Kmeans + fftpack --- Discrete Fourier Transform algorithms + integrate --- Integration routines + interpolate --- Interpolation Tools + io --- Data input and output + linalg --- Linear algebra routines + linalg.blas --- Wrappers to BLAS library + linalg.lapack --- Wrappers to LAPACK library + misc --- Various utilities that don't have + another home. + ndimage --- n-dimensional image package + odr --- Orthogonal Distance Regression + optimize --- Optimization Tools + signal --- Signal Processing Tools + signal.windows --- Window functions + sparse --- Sparse Matrices + sparse.linalg --- Sparse Linear Algebra + sparse.linalg.dsolve --- Linear Solvers + sparse.linalg.dsolve.umfpack --- :Interface to the UMFPACK library: + Conjugate Gradient Method (LOBPCG) + sparse.linalg.eigen --- Sparse Eigenvalue Solvers + sparse.linalg.eigen.lobpcg --- Locally Optimal Block Preconditioned + Conjugate Gradient Method (LOBPCG) + spatial --- Spatial data structures and algorithms + special --- Special functions + stats --- Statistical Functions + +Utility tools +------------- +:: + + test --- Run scipy unittests + show_config --- Show scipy build configuration + show_numpy_config --- Show numpy build configuration + __version__ --- Scipy version string + __numpy_version__ --- Numpy version string + +""" +from __future__ import division, print_function, absolute_import + +__all__ = ['test'] + +from numpy import show_config as show_numpy_config +if show_numpy_config is None: + raise ImportError( + "Cannot import scipy when running from numpy source directory.") +from numpy import __version__ as __numpy_version__ + +# Import numpy symbols to scipy name space +import numpy as _num +linalg = None +from numpy import * +from numpy.random import rand, randn +from numpy.fft import fft, ifft +from numpy.lib.scimath import * + +# Allow distributors to run custom init code +from . import _distributor_init + +__all__ += _num.__all__ +__all__ += ['randn', 'rand', 'fft', 'ifft'] + +del _num +# Remove the linalg imported from numpy so that the scipy.linalg package can be +# imported. +del linalg +__all__.remove('linalg') + +# We first need to detect if we're being called as part of the scipy +# setup procedure itself in a reliable manner. +try: + __SCIPY_SETUP__ +except NameError: + __SCIPY_SETUP__ = False + + +if __SCIPY_SETUP__: + import sys as _sys + _sys.stderr.write('Running from scipy source directory.\n') + del _sys +else: + try: + from scipy.__config__ import show as show_config + except ImportError: + msg = """Error importing scipy: you cannot import scipy while + being in scipy source directory; please exit the scipy source + tree first, and relaunch your python interpreter.""" + raise ImportError(msg) + + from scipy.version import version as __version__ + from scipy._lib._version import NumpyVersion as _NumpyVersion + if _NumpyVersion(__numpy_version__) < '1.8.2': + import warnings + warnings.warn("Numpy 1.8.2 or above is recommended for this version of " + "scipy (detected version %s)" % __numpy_version__, + UserWarning) + + del _NumpyVersion + + from scipy._lib._ccallback import LowLevelCallable + + from scipy._lib._testutils import PytestTester + test = PytestTester(__name__) + del PytestTester diff --git a/project/venv/lib/python2.7/site-packages/scipy/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/__init__.pyc new file mode 100644 index 0000000..a876a7e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/_build_utils/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/_build_utils/__init__.py new file mode 100644 index 0000000..d3e9eaf --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/_build_utils/__init__.py @@ -0,0 +1,21 @@ +import numpy as np +from ._fortran import * +from scipy._lib._version import NumpyVersion + + +# Don't use deprecated Numpy C API. Define this to a fixed version instead of +# NPY_API_VERSION in order not to break compilation for released Scipy versions +# when Numpy introduces a new deprecation. Use in setup.py:: +# +# config.add_extension('_name', sources=['source_fname'], **numpy_nodepr_api) +# +if NumpyVersion(np.__version__) >= '1.10.0.dev': + numpy_nodepr_api = dict(define_macros=[("NPY_NO_DEPRECATED_API", + "NPY_1_9_API_VERSION")]) +else: + numpy_nodepr_api = dict() + + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/project/venv/lib/python2.7/site-packages/scipy/_build_utils/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/_build_utils/__init__.pyc new file mode 100644 index 0000000..2fe4675 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/_build_utils/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/_build_utils/_fortran.py b/project/venv/lib/python2.7/site-packages/scipy/_build_utils/_fortran.py new file mode 100644 index 0000000..8bb5fc9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/_build_utils/_fortran.py @@ -0,0 +1,124 @@ +import re +import os +import glob +from distutils.dep_util import newer + + +__all__ = ['needs_g77_abi_wrapper', 'split_fortran_files', + 'get_g77_abi_wrappers'] + + +def uses_mkl(info): + r_mkl = re.compile("mkl") + libraries = info.get('libraries', '') + for library in libraries: + if r_mkl.search(library): + return True + + return False + + +def needs_g77_abi_wrapper(info): + """Returns True if g77 ABI wrapper must be used.""" + return uses_mkl(info) + + +def get_g77_abi_wrappers(info): + """ + Returns file names of source files containing Fortran ABI wrapper + routines. + """ + wrapper_sources = [] + + path = os.path.abspath(os.path.dirname(__file__)) + if needs_g77_abi_wrapper(info): + wrapper_sources += [ + os.path.join(path, 'src', 'wrap_g77_abi_f.f'), + os.path.join(path, 'src', 'wrap_g77_abi_c.c'), + ] + else: + wrapper_sources += [ + os.path.join(path, 'src', 'wrap_dummy_g77_abi.f'), + ] + return wrapper_sources + + +def split_fortran_files(source_dir, subroutines=None): + """Split each file in `source_dir` into separate files per subroutine. + + Parameters + ---------- + source_dir : str + Full path to directory in which sources to be split are located. + subroutines : list of str, optional + Subroutines to split. (Default: all) + + Returns + ------- + fnames : list of str + List of file names (not including any path) that were created + in `source_dir`. + + Notes + ----- + This function is useful for code that can't be compiled with g77 because of + type casting errors which do work with gfortran. + + Created files are named: ``original_name + '_subr_i' + '.f'``, with ``i`` + starting at zero and ending at ``num_subroutines_in_file - 1``. + + """ + + if subroutines is not None: + subroutines = [x.lower() for x in subroutines] + + def split_file(fname): + with open(fname, 'rb') as f: + lines = f.readlines() + subs = [] + need_split_next = True + + # find lines with SUBROUTINE statements + for ix, line in enumerate(lines): + m = re.match(b'^\\s+subroutine\\s+([a-z0-9_]+)\\s*\\(', line, re.I) + if m and line[0] not in b'Cc!*': + if subroutines is not None: + subr_name = m.group(1).decode('ascii').lower() + subr_wanted = (subr_name in subroutines) + else: + subr_wanted = True + if subr_wanted or need_split_next: + need_split_next = subr_wanted + subs.append(ix) + + # check if no split needed + if len(subs) <= 1: + return [fname] + + # write out one file per subroutine + new_fnames = [] + num_files = len(subs) + for nfile in range(num_files): + new_fname = fname[:-2] + '_subr_' + str(nfile) + '.f' + new_fnames.append(new_fname) + if not newer(fname, new_fname): + continue + with open(new_fname, 'wb') as fn: + if nfile + 1 == num_files: + fn.writelines(lines[subs[nfile]:]) + else: + fn.writelines(lines[subs[nfile]:subs[nfile+1]]) + + return new_fnames + + exclude_pattern = re.compile('_subr_[0-9]') + source_fnames = [f for f in sorted(glob.glob(os.path.join(source_dir, '*.f'))) + if not exclude_pattern.search(os.path.basename(f))] + fnames = [] + for source_fname in source_fnames: + created_files = split_file(source_fname) + if created_files is not None: + for cfile in created_files: + fnames.append(os.path.basename(cfile)) + + return fnames diff --git a/project/venv/lib/python2.7/site-packages/scipy/_build_utils/_fortran.pyc b/project/venv/lib/python2.7/site-packages/scipy/_build_utils/_fortran.pyc new file mode 100644 index 0000000..7a776e2 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/_build_utils/_fortran.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/_build_utils/system_info.py b/project/venv/lib/python2.7/site-packages/scipy/_build_utils/system_info.py new file mode 100644 index 0000000..7502fcf --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/_build_utils/system_info.py @@ -0,0 +1,167 @@ +from __future__ import division, absolute_import, print_function + +import warnings + +import numpy as np +import numpy.distutils.system_info + +from numpy.distutils.system_info import (system_info, + numpy_info, + NotFoundError, + BlasNotFoundError, + LapackNotFoundError, + AtlasNotFoundError, + LapackSrcNotFoundError, + BlasSrcNotFoundError, + dict_append, + get_info as old_get_info) + +from scipy._lib._version import NumpyVersion + + +if NumpyVersion(np.__version__) >= "1.15.0.dev": + # For new enough numpy.distutils, the ACCELERATE=None environment + # variable in the top-level setup.py is enough, so no need to + # customize BLAS detection. + get_info = old_get_info +else: + # For numpy < 1.15.0, we need overrides. + + def get_info(name, notfound_action=0): + # Special case our custom *_opt_info + cls = {'lapack_opt': lapack_opt_info, + 'blas_opt': blas_opt_info}.get(name.lower()) + if cls is None: + return old_get_info(name, notfound_action) + return cls().get_info(notfound_action) + + # + # The following is copypaste from numpy.distutils.system_info, with + # OSX Accelerate-related parts removed. + # + + class lapack_opt_info(system_info): + + notfounderror = LapackNotFoundError + + def calc_info(self): + + lapack_mkl_info = get_info('lapack_mkl') + if lapack_mkl_info: + self.set_info(**lapack_mkl_info) + return + + openblas_info = get_info('openblas_lapack') + if openblas_info: + self.set_info(**openblas_info) + return + + openblas_info = get_info('openblas_clapack') + if openblas_info: + self.set_info(**openblas_info) + return + + atlas_info = get_info('atlas_3_10_threads') + if not atlas_info: + atlas_info = get_info('atlas_3_10') + if not atlas_info: + atlas_info = get_info('atlas_threads') + if not atlas_info: + atlas_info = get_info('atlas') + + need_lapack = 0 + need_blas = 0 + info = {} + if atlas_info: + l = atlas_info.get('define_macros', []) + if ('ATLAS_WITH_LAPACK_ATLAS', None) in l \ + or ('ATLAS_WITHOUT_LAPACK', None) in l: + need_lapack = 1 + info = atlas_info + + else: + warnings.warn(AtlasNotFoundError.__doc__, stacklevel=2) + need_blas = 1 + need_lapack = 1 + dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)]) + + if need_lapack: + lapack_info = get_info('lapack') + #lapack_info = {} ## uncomment for testing + if lapack_info: + dict_append(info, **lapack_info) + else: + warnings.warn(LapackNotFoundError.__doc__, stacklevel=2) + lapack_src_info = get_info('lapack_src') + if not lapack_src_info: + warnings.warn(LapackSrcNotFoundError.__doc__, stacklevel=2) + return + dict_append(info, libraries=[('flapack_src', lapack_src_info)]) + + if need_blas: + blas_info = get_info('blas') + if blas_info: + dict_append(info, **blas_info) + else: + warnings.warn(BlasNotFoundError.__doc__, stacklevel=2) + blas_src_info = get_info('blas_src') + if not blas_src_info: + warnings.warn(BlasSrcNotFoundError.__doc__, stacklevel=2) + return + dict_append(info, libraries=[('fblas_src', blas_src_info)]) + + self.set_info(**info) + return + + class blas_opt_info(system_info): + + notfounderror = BlasNotFoundError + + def calc_info(self): + + blas_mkl_info = get_info('blas_mkl') + if blas_mkl_info: + self.set_info(**blas_mkl_info) + return + + blis_info = get_info('blis') + if blis_info: + self.set_info(**blis_info) + return + + openblas_info = get_info('openblas') + if openblas_info: + self.set_info(**openblas_info) + return + + atlas_info = get_info('atlas_3_10_blas_threads') + if not atlas_info: + atlas_info = get_info('atlas_3_10_blas') + if not atlas_info: + atlas_info = get_info('atlas_blas_threads') + if not atlas_info: + atlas_info = get_info('atlas_blas') + + need_blas = 0 + info = {} + if atlas_info: + info = atlas_info + else: + warnings.warn(AtlasNotFoundError.__doc__, stacklevel=2) + need_blas = 1 + dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)]) + + if need_blas: + blas_info = get_info('blas') + if blas_info: + dict_append(info, **blas_info) + else: + warnings.warn(BlasNotFoundError.__doc__, stacklevel=2) + blas_src_info = get_info('blas_src') + if not blas_src_info: + warnings.warn(BlasSrcNotFoundError.__doc__, stacklevel=2) + return + dict_append(info, libraries=[('fblas_src', blas_src_info)]) + + self.set_info(**info) + return diff --git a/project/venv/lib/python2.7/site-packages/scipy/_build_utils/system_info.pyc b/project/venv/lib/python2.7/site-packages/scipy/_build_utils/system_info.pyc new file mode 100644 index 0000000..1e98e59 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/_build_utils/system_info.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/_distributor_init.py b/project/venv/lib/python2.7/site-packages/scipy/_distributor_init.py new file mode 100644 index 0000000..b02e9fb --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/_distributor_init.py @@ -0,0 +1,10 @@ +""" Distributor init file + +Distributors: you can add custom code here to support particular distributions +of scipy. + +For example, this is a good place to put any checks for hardware requirements. + +The scipy standard source distribution will not put code in this file, so you +can safely replace this file with your own version. +""" diff --git a/project/venv/lib/python2.7/site-packages/scipy/_distributor_init.pyc b/project/venv/lib/python2.7/site-packages/scipy/_distributor_init.pyc new file mode 100644 index 0000000..99eed5b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/_distributor_init.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/_lib/__init__.py new file mode 100644 index 0000000..63f690d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/_lib/__init__.py @@ -0,0 +1,16 @@ +""" +Module containing private utility functions +=========================================== + +The ``scipy._lib`` namespace is empty (for now). Tests for all +utilities in submodules of ``_lib`` can be run with:: + + from scipy import _lib + _lib.test() + +""" +from __future__ import division, print_function, absolute_import + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/_lib/__init__.pyc new file mode 100644 index 0000000..75ddfc8 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/_lib/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/_ccallback.py b/project/venv/lib/python2.7/site-packages/scipy/_lib/_ccallback.py new file mode 100644 index 0000000..94e6f22 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/_lib/_ccallback.py @@ -0,0 +1,227 @@ +from . import _ccallback_c + +import ctypes + +PyCFuncPtr = ctypes.CFUNCTYPE(ctypes.c_void_p).__bases__[0] + +ffi = None + +class CData(object): + pass + +def _import_cffi(): + global ffi, CData + + if ffi is not None: + return + + try: + import cffi + ffi = cffi.FFI() + CData = ffi.CData + except ImportError: + ffi = False + + +class LowLevelCallable(tuple): + """ + Low-level callback function. + + Parameters + ---------- + function : {PyCapsule, ctypes function pointer, cffi function pointer} + Low-level callback function. + user_data : {PyCapsule, ctypes void pointer, cffi void pointer} + User data to pass on to the callback function. + signature : str, optional + Signature of the function. If omitted, determined from *function*, + if possible. + + Attributes + ---------- + function + Callback function given + user_data + User data given + signature + Signature of the function. + + Methods + ------- + from_cython + Class method for constructing callables from Cython C-exported + functions. + + Notes + ----- + The argument ``function`` can be one of: + + - PyCapsule, whose name contains the C function signature + - ctypes function pointer + - cffi function pointer + + The signature of the low-level callback must match one of those expected + by the routine it is passed to. + + If constructing low-level functions from a PyCapsule, the name of the + capsule must be the corresponding signature, in the format:: + + return_type (arg1_type, arg2_type, ...) + + For example:: + + "void (double)" + "double (double, int *, void *)" + + The context of a PyCapsule passed in as ``function`` is used as ``user_data``, + if an explicit value for `user_data` was not given. + + """ + + # Make the class immutable + __slots__ = () + + def __new__(cls, function, user_data=None, signature=None): + # We need to hold a reference to the function & user data, + # to prevent them going out of scope + item = cls._parse_callback(function, user_data, signature) + return tuple.__new__(cls, (item, function, user_data)) + + def __repr__(self): + return "LowLevelCallable({!r}, {!r})".format(self.function, self.user_data) + + @property + def function(self): + return tuple.__getitem__(self, 1) + + @property + def user_data(self): + return tuple.__getitem__(self, 2) + + @property + def signature(self): + return _ccallback_c.get_capsule_signature(tuple.__getitem__(self, 0)) + + def __getitem__(self, idx): + raise ValueError() + + @classmethod + def from_cython(cls, module, name, user_data=None, signature=None): + """ + Create a low-level callback function from an exported Cython function. + + Parameters + ---------- + module : module + Cython module where the exported function resides + name : str + Name of the exported function + user_data : {PyCapsule, ctypes void pointer, cffi void pointer}, optional + User data to pass on to the callback function. + signature : str, optional + Signature of the function. If omitted, determined from *function*. + + """ + try: + function = module.__pyx_capi__[name] + except AttributeError: + raise ValueError("Given module is not a Cython module with __pyx_capi__ attribute") + except KeyError: + raise ValueError("No function {!r} found in __pyx_capi__ of the module".format(name)) + return cls(function, user_data, signature) + + @classmethod + def _parse_callback(cls, obj, user_data=None, signature=None): + _import_cffi() + + if isinstance(obj, LowLevelCallable): + func = tuple.__getitem__(obj, 0) + elif isinstance(obj, PyCFuncPtr): + func, signature = _get_ctypes_func(obj, signature) + elif isinstance(obj, CData): + func, signature = _get_cffi_func(obj, signature) + elif _ccallback_c.check_capsule(obj): + func = obj + else: + raise ValueError("Given input is not a callable or a low-level callable (pycapsule/ctypes/cffi)") + + if isinstance(user_data, ctypes.c_void_p): + context = _get_ctypes_data(user_data) + elif isinstance(user_data, CData): + context = _get_cffi_data(user_data) + elif user_data is None: + context = 0 + elif _ccallback_c.check_capsule(user_data): + context = user_data + else: + raise ValueError("Given user data is not a valid low-level void* pointer (pycapsule/ctypes/cffi)") + + return _ccallback_c.get_raw_capsule(func, signature, context) + + +# +# ctypes helpers +# + +def _get_ctypes_func(func, signature=None): + # Get function pointer + func_ptr = ctypes.cast(func, ctypes.c_void_p).value + + # Construct function signature + if signature is None: + signature = _typename_from_ctypes(func.restype) + " (" + for j, arg in enumerate(func.argtypes): + if j == 0: + signature += _typename_from_ctypes(arg) + else: + signature += ", " + _typename_from_ctypes(arg) + signature += ")" + + return func_ptr, signature + + +def _typename_from_ctypes(item): + if item is None: + return "void" + elif item is ctypes.c_void_p: + return "void *" + + name = item.__name__ + + pointer_level = 0 + while name.startswith("LP_"): + pointer_level += 1 + name = name[3:] + + if name.startswith('c_'): + name = name[2:] + + if pointer_level > 0: + name += " " + "*"*pointer_level + + return name + + +def _get_ctypes_data(data): + # Get voidp pointer + return ctypes.cast(data, ctypes.c_void_p).value + + +# +# CFFI helpers +# + +def _get_cffi_func(func, signature=None): + # Get function pointer + func_ptr = ffi.cast('uintptr_t', func) + + # Get signature + if signature is None: + signature = ffi.getctype(ffi.typeof(func)).replace('(*)', ' ') + + return func_ptr, signature + + +def _get_cffi_data(data): + # Get pointer + return ffi.cast('uintptr_t', data) diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/_ccallback.pyc b/project/venv/lib/python2.7/site-packages/scipy/_lib/_ccallback.pyc new file mode 100644 index 0000000..7aca745 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/_lib/_ccallback.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/_ccallback_c.so b/project/venv/lib/python2.7/site-packages/scipy/_lib/_ccallback_c.so new file mode 100755 index 0000000..0a83e68 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/_lib/_ccallback_c.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/_fpumode.so b/project/venv/lib/python2.7/site-packages/scipy/_lib/_fpumode.so new file mode 100755 index 0000000..23d4280 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/_lib/_fpumode.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/_gcutils.py b/project/venv/lib/python2.7/site-packages/scipy/_lib/_gcutils.py new file mode 100644 index 0000000..3f648a0 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/_lib/_gcutils.py @@ -0,0 +1,105 @@ +""" +Module for testing automatic garbage collection of objects + +.. autosummary:: + :toctree: generated/ + + set_gc_state - enable or disable garbage collection + gc_state - context manager for given state of garbage collector + assert_deallocated - context manager to check for circular references on object + +""" +import weakref +import gc +import sys + +from contextlib import contextmanager + +__all__ = ['set_gc_state', 'gc_state', 'assert_deallocated'] + + +IS_PYPY = '__pypy__' in sys.modules + + +class ReferenceError(AssertionError): + pass + + +def set_gc_state(state): + """ Set status of garbage collector """ + if gc.isenabled() == state: + return + if state: + gc.enable() + else: + gc.disable() + + +@contextmanager +def gc_state(state): + """ Context manager to set state of garbage collector to `state` + + Parameters + ---------- + state : bool + True for gc enabled, False for disabled + + Examples + -------- + >>> with gc_state(False): + ... assert not gc.isenabled() + >>> with gc_state(True): + ... assert gc.isenabled() + """ + orig_state = gc.isenabled() + set_gc_state(state) + yield + set_gc_state(orig_state) + + +@contextmanager +def assert_deallocated(func, *args, **kwargs): + """Context manager to check that object is deallocated + + This is useful for checking that an object can be freed directly by + reference counting, without requiring gc to break reference cycles. + GC is disabled inside the context manager. + + This check is not available on PyPy. + + Parameters + ---------- + func : callable + Callable to create object to check + \\*args : sequence + positional arguments to `func` in order to create object to check + \\*\\*kwargs : dict + keyword arguments to `func` in order to create object to check + + Examples + -------- + >>> class C(object): pass + >>> with assert_deallocated(C) as c: + ... # do something + ... del c + + >>> class C(object): + ... def __init__(self): + ... self._circular = self # Make circular reference + >>> with assert_deallocated(C) as c: #doctest: +IGNORE_EXCEPTION_DETAIL + ... # do something + ... del c + Traceback (most recent call last): + ... + ReferenceError: Remaining reference(s) to object + """ + if IS_PYPY: + raise RuntimeError("assert_deallocated is unavailable on PyPy") + + with gc_state(False): + obj = func(*args, **kwargs) + ref = weakref.ref(obj) + yield obj + del obj + if ref() is not None: + raise ReferenceError("Remaining reference(s) to object") diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/_gcutils.pyc b/project/venv/lib/python2.7/site-packages/scipy/_lib/_gcutils.pyc new file mode 100644 index 0000000..cd74e86 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/_lib/_gcutils.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/_numpy_compat.py b/project/venv/lib/python2.7/site-packages/scipy/_lib/_numpy_compat.py new file mode 100644 index 0000000..30d4092 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/_lib/_numpy_compat.py @@ -0,0 +1,781 @@ +"""Functions copypasted from newer versions of numpy. + +""" +from __future__ import division, print_function, absolute_import + +import warnings +from warnings import WarningMessage +import re +from functools import wraps +import numpy as np + +from scipy._lib._version import NumpyVersion + + +if NumpyVersion(np.__version__) > '1.7.0.dev': + _assert_warns = np.testing.assert_warns +else: + def _assert_warns(warning_class, func, *args, **kw): + r""" + Fail unless the given callable throws the specified warning. + + This definition is copypasted from numpy 1.9.0.dev. + The version in earlier numpy returns None. + + Parameters + ---------- + warning_class : class + The class defining the warning that `func` is expected to throw. + func : callable + The callable to test. + *args : Arguments + Arguments passed to `func`. + **kwargs : Kwargs + Keyword arguments passed to `func`. + + Returns + ------- + The value returned by `func`. + + """ + with warnings.catch_warnings(record=True) as l: + warnings.simplefilter('always') + result = func(*args, **kw) + if not len(l) > 0: + raise AssertionError("No warning raised when calling %s" + % func.__name__) + if not l[0].category is warning_class: + raise AssertionError("First warning for %s is not a " + "%s( is %s)" % (func.__name__, warning_class, l[0])) + return result + + +if NumpyVersion(np.__version__) >= '1.10.0': + from numpy import broadcast_to +else: + # Definition of `broadcast_to` from numpy 1.10.0. + + def _maybe_view_as_subclass(original_array, new_array): + if type(original_array) is not type(new_array): + # if input was an ndarray subclass and subclasses were OK, + # then view the result as that subclass. + new_array = new_array.view(type=type(original_array)) + # Since we have done something akin to a view from original_array, we + # should let the subclass finalize (if it has it implemented, i.e., is + # not None). + if new_array.__array_finalize__: + new_array.__array_finalize__(original_array) + return new_array + + def _broadcast_to(array, shape, subok, readonly): + shape = tuple(shape) if np.iterable(shape) else (shape,) + array = np.array(array, copy=False, subok=subok) + if not shape and array.shape: + raise ValueError('cannot broadcast a non-scalar to a scalar array') + if any(size < 0 for size in shape): + raise ValueError('all elements of broadcast shape must be non-' + 'negative') + broadcast = np.nditer( + (array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'], + op_flags=['readonly'], itershape=shape, order='C').itviews[0] + result = _maybe_view_as_subclass(array, broadcast) + if not readonly and array.flags.writeable: + result.flags.writeable = True + return result + + def broadcast_to(array, shape, subok=False): + return _broadcast_to(array, shape, subok=subok, readonly=True) + + +if NumpyVersion(np.__version__) >= '1.11.0': + def get_randint(random_state): + return random_state.randint +else: + # In NumPy versions previous to 1.11.0 the randint funtion and the randint + # method of RandomState does only work with int32 values. + def get_randint(random_state): + def randint_patched(low, high, size, dtype=np.int32): + low = max(low, np.iinfo(dtype).min, np.iinfo(np.int32).min) + high = min(high, np.iinfo(dtype).max, np.iinfo(np.int32).max) + integers = random_state.randint(low, high=high, size=size) + return integers.astype(dtype, copy=False) + return randint_patched + + +if NumpyVersion(np.__version__) >= '1.9.0': + from numpy import unique +else: + # the return_counts keyword was added in 1.9.0 + def unique(ar, return_index=False, return_inverse=False, return_counts=False): + """ + Find the unique elements of an array. + + Returns the sorted unique elements of an array. There are three optional + outputs in addition to the unique elements: the indices of the input array + that give the unique values, the indices of the unique array that + reconstruct the input array, and the number of times each unique value + comes up in the input array. + + Parameters + ---------- + ar : array_like + Input array. This will be flattened if it is not already 1-D. + return_index : bool, optional + If True, also return the indices of `ar` that result in the unique + array. + return_inverse : bool, optional + If True, also return the indices of the unique array that can be used + to reconstruct `ar`. + return_counts : bool, optional + If True, also return the number of times each unique value comes up + in `ar`. + + .. versionadded:: 1.9.0 + + Returns + ------- + unique : ndarray + The sorted unique values. + unique_indices : ndarray, optional + The indices of the first occurrences of the unique values in the + (flattened) original array. Only provided if `return_index` is True. + unique_inverse : ndarray, optional + The indices to reconstruct the (flattened) original array from the + unique array. Only provided if `return_inverse` is True. + unique_counts : ndarray, optional + The number of times each of the unique values comes up in the + original array. Only provided if `return_counts` is True. + + .. versionadded:: 1.9.0 + + Notes + ----- + Taken over from numpy 1.12.0-dev (c8408bf9c). Omitted examples, + see numpy documentation for those. + + """ + ar = np.asanyarray(ar).flatten() + + optional_indices = return_index or return_inverse + optional_returns = optional_indices or return_counts + + if ar.size == 0: + if not optional_returns: + ret = ar + else: + ret = (ar,) + if return_index: + ret += (np.empty(0, np.bool),) + if return_inverse: + ret += (np.empty(0, np.bool),) + if return_counts: + ret += (np.empty(0, np.intp),) + return ret + + if optional_indices: + perm = ar.argsort(kind='mergesort' if return_index else 'quicksort') + aux = ar[perm] + else: + ar.sort() + aux = ar + flag = np.concatenate(([True], aux[1:] != aux[:-1])) + + if not optional_returns: + ret = aux[flag] + else: + ret = (aux[flag],) + if return_index: + ret += (perm[flag],) + if return_inverse: + iflag = np.cumsum(flag) - 1 + inv_idx = np.empty(ar.shape, dtype=np.intp) + inv_idx[perm] = iflag + ret += (inv_idx,) + if return_counts: + idx = np.concatenate(np.nonzero(flag) + ([ar.size],)) + ret += (np.diff(idx),) + return ret + + +if NumpyVersion(np.__version__) > '1.12.0.dev': + polyvalfromroots = np.polynomial.polynomial.polyvalfromroots +else: + def polyvalfromroots(x, r, tensor=True): + r""" + Evaluate a polynomial specified by its roots at points x. + + This function is copypasted from numpy 1.12.0.dev. + + If `r` is of length `N`, this function returns the value + + .. math:: p(x) = \prod_{n=1}^{N} (x - r_n) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `r`. + + If `r` is a 1-D array, then `p(x)` will have the same shape as `x`. If + `r` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor is ``True`` the shape will be r.shape[1:] + + x.shape; that is, each polynomial is evaluated at every value of `x`. + If `tensor` is ``False``, the shape will be r.shape[1:]; that is, each + polynomial is evaluated only for the corresponding broadcast value of + `x`. Note that scalars have shape (,). + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with with + themselves and with the elements of `r`. + r : array_like + Array of roots. If `r` is multidimensional the first index is the + root index, while the remaining indices enumerate multiple + polynomials. For instance, in the two dimensional case the roots of + each polynomial may be thought of as stored in the columns of `r`. + tensor : boolean, optional + If True, the shape of the roots array is extended with ones on the + right, one for each dimension of `x`. Scalars have dimension 0 for + this action. The result is that every column of coefficients in `r` + is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `r` for the evaluation. This keyword is useful + when `r` is multidimensional. The default value is True. + + Returns + ------- + values : ndarray, compatible object + The shape of the returned array is described above. + + See Also + -------- + polyroots, polyfromroots, polyval + + Examples + -------- + >>> from numpy.polynomial.polynomial import polyvalfromroots + >>> polyvalfromroots(1, [1,2,3]) + 0.0 + >>> a = np.arange(4).reshape(2,2) + >>> a + array([[0, 1], + [2, 3]]) + >>> polyvalfromroots(a, [-1, 0, 1]) + array([[ -0., 0.], + [ 6., 24.]]) + >>> r = np.arange(-2, 2).reshape(2,2) # multidimensional coefficients + >>> r # each column of r defines one polynomial + array([[-2, -1], + [ 0, 1]]) + >>> b = [-2, 1] + >>> polyvalfromroots(b, r, tensor=True) + array([[-0., 3.], + [ 3., 0.]]) + >>> polyvalfromroots(b, r, tensor=False) + array([-0., 0.]) + """ + r = np.array(r, ndmin=1, copy=0) + if r.dtype.char in '?bBhHiIlLqQpP': + r = r.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray): + if tensor: + r = r.reshape(r.shape + (1,)*x.ndim) + elif x.ndim >= r.ndim: + raise ValueError("x.ndim must be < r.ndim when tensor == " + "False") + return np.prod(x - r, axis=0) + + +try: + from numpy.testing import suppress_warnings +except ImportError: + class suppress_warnings(object): + """ + Context manager and decorator doing much the same as + ``warnings.catch_warnings``. + + However, it also provides a filter mechanism to work around + https://bugs.python.org/issue4180. + + This bug causes Python before 3.4 to not reliably show warnings again + after they have been ignored once (even within catch_warnings). It + means that no "ignore" filter can be used easily, since following + tests might need to see the warning. Additionally it allows easier + specificity for testing warnings and can be nested. + + Parameters + ---------- + forwarding_rule : str, optional + One of "always", "once", "module", or "location". Analogous to + the usual warnings module filter mode, it is useful to reduce + noise mostly on the outmost level. Unsuppressed and unrecorded + warnings will be forwarded based on this rule. Defaults to "always". + "location" is equivalent to the warnings "default", match by exact + location the warning warning originated from. + + Notes + ----- + Filters added inside the context manager will be discarded again + when leaving it. Upon entering all filters defined outside a + context will be applied automatically. + + When a recording filter is added, matching warnings are stored in the + ``log`` attribute as well as in the list returned by ``record``. + + If filters are added and the ``module`` keyword is given, the + warning registry of this module will additionally be cleared when + applying it, entering the context, or exiting it. This could cause + warnings to appear a second time after leaving the context if they + were configured to be printed once (default) and were already + printed before the context was entered. + + Nesting this context manager will work as expected when the + forwarding rule is "always" (default). Unfiltered and unrecorded + warnings will be passed out and be matched by the outer level. + On the outmost level they will be printed (or caught by another + warnings context). The forwarding rule argument can modify this + behaviour. + + Like ``catch_warnings`` this context manager is not threadsafe. + + Examples + -------- + >>> with suppress_warnings() as sup: + ... sup.filter(DeprecationWarning, "Some text") + ... sup.filter(module=np.ma.core) + ... log = sup.record(FutureWarning, "Does this occur?") + ... command_giving_warnings() + ... # The FutureWarning was given once, the filtered warnings were + ... # ignored. All other warnings abide outside settings (may be + ... # printed/error) + ... assert_(len(log) == 1) + ... assert_(len(sup.log) == 1) # also stored in log attribute + + Or as a decorator: + + >>> sup = suppress_warnings() + >>> sup.filter(module=np.ma.core) # module must match exact + >>> @sup + >>> def some_function(): + ... # do something which causes a warning in np.ma.core + ... pass + """ + def __init__(self, forwarding_rule="always"): + self._entered = False + + # Suppressions are either instance or defined inside one with block: + self._suppressions = [] + + if forwarding_rule not in {"always", "module", "once", "location"}: + raise ValueError("unsupported forwarding rule.") + self._forwarding_rule = forwarding_rule + + def _clear_registries(self): + if hasattr(warnings, "_filters_mutated"): + # clearing the registry should not be necessary on new pythons, + # instead the filters should be mutated. + warnings._filters_mutated() + return + # Simply clear the registry, this should normally be harmless, + # note that on new pythons it would be invalidated anyway. + for module in self._tmp_modules: + if hasattr(module, "__warningregistry__"): + module.__warningregistry__.clear() + + def _filter(self, category=Warning, message="", module=None, record=False): + if record: + record = [] # The log where to store warnings + else: + record = None + if self._entered: + if module is None: + warnings.filterwarnings( + "always", category=category, message=message) + else: + module_regex = module.__name__.replace('.', r'\.') + '$' + warnings.filterwarnings( + "always", category=category, message=message, + module=module_regex) + self._tmp_modules.add(module) + self._clear_registries() + + self._tmp_suppressions.append( + (category, message, re.compile(message, re.I), module, record)) + else: + self._suppressions.append( + (category, message, re.compile(message, re.I), module, record)) + + return record + + def filter(self, category=Warning, message="", module=None): + """ + Add a new suppressing filter or apply it if the state is entered. + + Parameters + ---------- + category : class, optional + Warning class to filter + message : string, optional + Regular expression matching the warning message. + module : module, optional + Module to filter for. Note that the module (and its file) + must match exactly and cannot be a submodule. This may make + it unreliable for external modules. + + Notes + ----- + When added within a context, filters are only added inside + the context and will be forgotten when the context is exited. + """ + self._filter(category=category, message=message, module=module, + record=False) + + def record(self, category=Warning, message="", module=None): + """ + Append a new recording filter or apply it if the state is entered. + + All warnings matching will be appended to the ``log`` attribute. + + Parameters + ---------- + category : class, optional + Warning class to filter + message : string, optional + Regular expression matching the warning message. + module : module, optional + Module to filter for. Note that the module (and its file) + must match exactly and cannot be a submodule. This may make + it unreliable for external modules. + + Returns + ------- + log : list + A list which will be filled with all matched warnings. + + Notes + ----- + When added within a context, filters are only added inside + the context and will be forgotten when the context is exited. + """ + return self._filter(category=category, message=message, module=module, + record=True) + + def __enter__(self): + if self._entered: + raise RuntimeError("cannot enter suppress_warnings twice.") + + self._orig_show = warnings.showwarning + self._filters = warnings.filters + warnings.filters = self._filters[:] + + self._entered = True + self._tmp_suppressions = [] + self._tmp_modules = set() + self._forwarded = set() + + self.log = [] # reset global log (no need to keep same list) + + for cat, mess, _, mod, log in self._suppressions: + if log is not None: + del log[:] # clear the log + if mod is None: + warnings.filterwarnings( + "always", category=cat, message=mess) + else: + module_regex = mod.__name__.replace('.', r'\.') + '$' + warnings.filterwarnings( + "always", category=cat, message=mess, + module=module_regex) + self._tmp_modules.add(mod) + warnings.showwarning = self._showwarning + self._clear_registries() + + return self + + def __exit__(self, *exc_info): + warnings.showwarning = self._orig_show + warnings.filters = self._filters + self._clear_registries() + self._entered = False + del self._orig_show + del self._filters + + def _showwarning(self, message, category, filename, lineno, + *args, **kwargs): + use_warnmsg = kwargs.pop("use_warnmsg", None) + for cat, _, pattern, mod, rec in ( + self._suppressions + self._tmp_suppressions)[::-1]: + if (issubclass(category, cat) and + pattern.match(message.args[0]) is not None): + if mod is None: + # Message and category match, either recorded or ignored + if rec is not None: + msg = WarningMessage(message, category, filename, + lineno, **kwargs) + self.log.append(msg) + rec.append(msg) + return + # Use startswith, because warnings strips the c or o from + # .pyc/.pyo files. + elif mod.__file__.startswith(filename): + # The message and module (filename) match + if rec is not None: + msg = WarningMessage(message, category, filename, + lineno, **kwargs) + self.log.append(msg) + rec.append(msg) + return + + # There is no filter in place, so pass to the outside handler + # unless we should only pass it once + if self._forwarding_rule == "always": + if use_warnmsg is None: + self._orig_show(message, category, filename, lineno, + *args, **kwargs) + else: + self._orig_showmsg(use_warnmsg) + return + + if self._forwarding_rule == "once": + signature = (message.args, category) + elif self._forwarding_rule == "module": + signature = (message.args, category, filename) + elif self._forwarding_rule == "location": + signature = (message.args, category, filename, lineno) + + if signature in self._forwarded: + return + self._forwarded.add(signature) + if use_warnmsg is None: + self._orig_show(message, category, filename, lineno, *args, + **kwargs) + else: + self._orig_showmsg(use_warnmsg) + + def __call__(self, func): + """ + Function decorator to apply certain suppressions to a whole + function. + """ + @wraps(func) + def new_func(*args, **kwargs): + with self: + return func(*args, **kwargs) + + return new_func + +if NumpyVersion(np.__version__) >= '1.10.0': + from numpy import cov +else: + from numpy import array, average, dot + + def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, + aweights=None): + """ + Estimate a covariance matrix, given data and weights. + + Covariance indicates the level to which two variables vary together. + If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`, + then the covariance matrix element :math:`C_{ij}` is the covariance of + :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance + of :math:`x_i`. + + See the notes for an outline of the algorithm. + + Parameters + ---------- + m : array_like + A 1-D or 2-D array containing multiple variables and observations. + Each row of `m` represents a variable, and each column a single + observation of all those variables. Also see `rowvar` below. + y : array_like, optional + An additional set of variables and observations. `y` has the same form + as that of `m`. + rowvar : bool, optional + If `rowvar` is True (default), then each row represents a + variable, with observations in the columns. Otherwise, the relationship + is transposed: each column represents a variable, while the rows + contain observations. + bias : bool, optional + Default normalization (False) is by ``(N - 1)``, where ``N`` is the + number of observations given (unbiased estimate). If `bias` is True, + then normalization is by ``N``. These values can be overridden by using + the keyword ``ddof`` in numpy versions >= 1.5. + ddof : int, optional + If not ``None`` the default value implied by `bias` is overridden. + Note that ``ddof=1`` will return the unbiased estimate, even if both + `fweights` and `aweights` are specified, and ``ddof=0`` will return + the simple average. See the notes for the details. The default value + is ``None``. + + .. versionadded:: 1.5 + fweights : array_like, int, optional + 1-D array of integer freguency weights; the number of times each + observation vector should be repeated. + + .. versionadded:: 1.10 + aweights : array_like, optional + 1-D array of observation vector weights. These relative weights are + typically large for observations considered "important" and smaller for + observations considered less "important". If ``ddof=0`` the array of + weights can be used to assign probabilities to observation vectors. + + .. versionadded:: 1.10 + + Returns + ------- + out : ndarray + The covariance matrix of the variables. + + See Also + -------- + corrcoef : Normalized covariance matrix + + Notes + ----- + Assume that the observations are in the columns of the observation + array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The + steps to compute the weighted covariance are as follows:: + + >>> w = f * a + >>> v1 = np.sum(w) + >>> v2 = np.sum(w * a) + >>> m -= np.sum(m * w, axis=1, keepdims=True) / v1 + >>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2) + + Note that when ``a == 1``, the normalization factor + ``v1 / (v1**2 - ddof * v2)`` goes over to ``1 / (np.sum(f) - ddof)`` + as it should. + + Examples + -------- + Consider two variables, :math:`x_0` and :math:`x_1`, which + correlate perfectly, but in opposite directions: + + >>> x = np.array([[0, 2], [1, 1], [2, 0]]).T + >>> x + array([[0, 1, 2], + [2, 1, 0]]) + + Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance + matrix shows this clearly: + + >>> np.cov(x) + array([[ 1., -1.], + [-1., 1.]]) + + Note that element :math:`C_{0,1}`, which shows the correlation between + :math:`x_0` and :math:`x_1`, is negative. + + Further, note how `x` and `y` are combined: + + >>> x = [-2.1, -1, 4.3] + >>> y = [3, 1.1, 0.12] + >>> X = np.stack((x, y), axis=0) + >>> print(np.cov(X)) + [[ 11.71 -4.286 ] + [ -4.286 2.14413333]] + >>> print(np.cov(x, y)) + [[ 11.71 -4.286 ] + [ -4.286 2.14413333]] + >>> print(np.cov(x)) + 11.71 + + """ + # Check inputs + if ddof is not None and ddof != int(ddof): + raise ValueError( + "ddof must be integer") + + # Handles complex arrays too + m = np.asarray(m) + if m.ndim > 2: + raise ValueError("m has more than 2 dimensions") + + if y is None: + dtype = np.result_type(m, np.float64) + else: + y = np.asarray(y) + if y.ndim > 2: + raise ValueError("y has more than 2 dimensions") + dtype = np.result_type(m, y, np.float64) + + X = array(m, ndmin=2, dtype=dtype) + if not rowvar and X.shape[0] != 1: + X = X.T + if X.shape[0] == 0: + return np.array([]).reshape(0, 0) + if y is not None: + y = array(y, copy=False, ndmin=2, dtype=dtype) + if not rowvar and y.shape[0] != 1: + y = y.T + X = np.concatenate((X, y), axis=0) + + if ddof is None: + if bias == 0: + ddof = 1 + else: + ddof = 0 + + # Get the product of frequencies and weights + w = None + if fweights is not None: + fweights = np.asarray(fweights, dtype=float) + if not np.all(fweights == np.around(fweights)): + raise TypeError( + "fweights must be integer") + if fweights.ndim > 1: + raise RuntimeError( + "cannot handle multidimensional fweights") + if fweights.shape[0] != X.shape[1]: + raise RuntimeError( + "incompatible numbers of samples and fweights") + if any(fweights < 0): + raise ValueError( + "fweights cannot be negative") + w = fweights + if aweights is not None: + aweights = np.asarray(aweights, dtype=float) + if aweights.ndim > 1: + raise RuntimeError( + "cannot handle multidimensional aweights") + if aweights.shape[0] != X.shape[1]: + raise RuntimeError( + "incompatible numbers of samples and aweights") + if any(aweights < 0): + raise ValueError( + "aweights cannot be negative") + if w is None: + w = aweights + else: + w *= aweights + + avg, w_sum = average(X, axis=1, weights=w, returned=True) + w_sum = w_sum[0] + + # Determine the normalization + if w is None: + fact = X.shape[1] - ddof + elif ddof == 0: + fact = w_sum + elif aweights is None: + fact = w_sum - ddof + else: + fact = w_sum - ddof*sum(w*aweights)/w_sum + + if fact <= 0: + warnings.warn("Degrees of freedom <= 0 for slice", + RuntimeWarning, stacklevel=2) + fact = 0.0 + + X -= avg[:, None] + if w is None: + X_T = X.T + else: + X_T = (X*w).T + c = dot(X, X_T.conj()) + c *= 1. / np.float64(fact) + return c.squeeze() diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/_numpy_compat.pyc b/project/venv/lib/python2.7/site-packages/scipy/_lib/_numpy_compat.pyc new file mode 100644 index 0000000..9419501 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/_lib/_numpy_compat.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/_test_ccallback.so b/project/venv/lib/python2.7/site-packages/scipy/_lib/_test_ccallback.so new file mode 100755 index 0000000..6a37010 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/_lib/_test_ccallback.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/_testutils.py b/project/venv/lib/python2.7/site-packages/scipy/_lib/_testutils.py new file mode 100644 index 0000000..1794e51 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/_lib/_testutils.py @@ -0,0 +1,128 @@ +""" +Generic test utilities. + +""" + +from __future__ import division, print_function, absolute_import + +import os +import re +import sys + + +__all__ = ['PytestTester', 'check_free_memory'] + + +class FPUModeChangeWarning(RuntimeWarning): + """Warning about FPU mode change""" + pass + + +class PytestTester(object): + """ + Pytest test runner entry point. + """ + + def __init__(self, module_name): + self.module_name = module_name + + def __call__(self, label="fast", verbose=1, extra_argv=None, doctests=False, + coverage=False, tests=None): + import pytest + + module = sys.modules[self.module_name] + module_path = os.path.abspath(module.__path__[0]) + + pytest_args = ['-l'] + + if doctests: + raise ValueError("Doctests not supported") + + if extra_argv: + pytest_args += list(extra_argv) + + if verbose and int(verbose) > 1: + pytest_args += ["-" + "v"*(int(verbose)-1)] + + if coverage: + pytest_args += ["--cov=" + module_path] + + if label == "fast": + pytest_args += ["-m", "not slow"] + elif label != "full": + pytest_args += ["-m", label] + + if tests is None: + tests = [self.module_name] + + pytest_args += ['--pyargs'] + list(tests) + + try: + code = pytest.main(pytest_args) + except SystemExit as exc: + code = exc.code + + return (code == 0) + + +def check_free_memory(free_mb): + """ + Check *free_mb* of memory is available, otherwise do pytest.skip + """ + import pytest + + try: + mem_free = _parse_size(os.environ['SCIPY_AVAILABLE_MEM']) + msg = '{0} MB memory required, but environment SCIPY_AVAILABLE_MEM={1}'.format( + free_mb, os.environ['SCIPY_AVAILABLE_MEM']) + except KeyError: + mem_free = _get_mem_available() + if mem_free is None: + pytest.skip("Could not determine available memory; set SCIPY_AVAILABLE_MEM " + "variable to free memory in MB to run the test.") + msg = '{0} MB memory required, but {1} MB available'.format( + free_mb, mem_free/1e6) + + if mem_free < free_mb * 1e6: + pytest.skip(msg) + + +def _parse_size(size_str): + suffixes = {'': 1e6, + 'b': 1.0, + 'k': 1e3, 'M': 1e6, 'G': 1e9, 'T': 1e12, + 'kb': 1e3, 'Mb': 1e6, 'Gb': 1e9, 'Tb': 1e12, + 'kib': 1024.0, 'Mib': 1024.0**2, 'Gib': 1024.0**3, 'Tib': 1024.0**4} + m = re.match(r'^\s*(\d+)\s*({0})\s*$'.format('|'.join(suffixes.keys())), + size_str, + re.I) + if not m or m.group(2) not in suffixes: + raise ValueError("Invalid size string") + + return float(m.group(1)) * suffixes[m.group(2)] + + +def _get_mem_available(): + """ + Get information about memory available, not counting swap. + """ + try: + import psutil + return psutil.virtual_memory().available + except (ImportError, AttributeError): + pass + + if sys.platform.startswith('linux'): + info = {} + with open('/proc/meminfo', 'r') as f: + for line in f: + p = line.split() + info[p[0].strip(':').lower()] = float(p[1]) * 1e3 + + if 'memavailable' in info: + # Linux >= 3.14 + return info['memavailable'] + else: + return info['memfree'] + info['cached'] + + return None diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/_testutils.pyc b/project/venv/lib/python2.7/site-packages/scipy/_lib/_testutils.pyc new file mode 100644 index 0000000..4105351 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/_lib/_testutils.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/_threadsafety.py b/project/venv/lib/python2.7/site-packages/scipy/_lib/_threadsafety.py new file mode 100644 index 0000000..504f1d1 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/_lib/_threadsafety.py @@ -0,0 +1,60 @@ +from __future__ import division, print_function, absolute_import + +import threading + +import scipy._lib.decorator + + +__all__ = ['ReentrancyError', 'ReentrancyLock', 'non_reentrant'] + + +class ReentrancyError(RuntimeError): + pass + + +class ReentrancyLock(object): + """ + Threading lock that raises an exception for reentrant calls. + + Calls from different threads are serialized, and nested calls from the + same thread result to an error. + + The object can be used as a context manager, or to decorate functions + via the decorate() method. + + """ + + def __init__(self, err_msg): + self._rlock = threading.RLock() + self._entered = False + self._err_msg = err_msg + + def __enter__(self): + self._rlock.acquire() + if self._entered: + self._rlock.release() + raise ReentrancyError(self._err_msg) + self._entered = True + + def __exit__(self, type, value, traceback): + self._entered = False + self._rlock.release() + + def decorate(self, func): + def caller(func, *a, **kw): + with self: + return func(*a, **kw) + return scipy._lib.decorator.decorate(func, caller) + + +def non_reentrant(err_msg=None): + """ + Decorate a function with a threading lock and prevent reentrant calls. + """ + def decorator(func): + msg = err_msg + if msg is None: + msg = "%s is not re-entrant" % func.__name__ + lock = ReentrancyLock(msg) + return lock.decorate(func) + return decorator diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/_threadsafety.pyc b/project/venv/lib/python2.7/site-packages/scipy/_lib/_threadsafety.pyc new file mode 100644 index 0000000..5ba2e96 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/_lib/_threadsafety.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/_tmpdirs.py b/project/venv/lib/python2.7/site-packages/scipy/_lib/_tmpdirs.py new file mode 100644 index 0000000..8e98fd3 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/_lib/_tmpdirs.py @@ -0,0 +1,87 @@ +''' Contexts for *with* statement providing temporary directories +''' +from __future__ import division, print_function, absolute_import +import os +from contextlib import contextmanager +from shutil import rmtree +from tempfile import mkdtemp + + +@contextmanager +def tempdir(): + """Create and return a temporary directory. This has the same + behavior as mkdtemp but can be used as a context manager. + + Upon exiting the context, the directory and everything contained + in it are removed. + + Examples + -------- + >>> import os + >>> with tempdir() as tmpdir: + ... fname = os.path.join(tmpdir, 'example_file.txt') + ... with open(fname, 'wt') as fobj: + ... _ = fobj.write('a string\\n') + >>> os.path.exists(tmpdir) + False + """ + d = mkdtemp() + yield d + rmtree(d) + + +@contextmanager +def in_tempdir(): + ''' Create, return, and change directory to a temporary directory + + Examples + -------- + >>> import os + >>> my_cwd = os.getcwd() + >>> with in_tempdir() as tmpdir: + ... _ = open('test.txt', 'wt').write('some text') + ... assert os.path.isfile('test.txt') + ... assert os.path.isfile(os.path.join(tmpdir, 'test.txt')) + >>> os.path.exists(tmpdir) + False + >>> os.getcwd() == my_cwd + True + ''' + pwd = os.getcwd() + d = mkdtemp() + os.chdir(d) + yield d + os.chdir(pwd) + rmtree(d) + + +@contextmanager +def in_dir(dir=None): + """ Change directory to given directory for duration of ``with`` block + + Useful when you want to use `in_tempdir` for the final test, but + you are still debugging. For example, you may want to do this in the end: + + >>> with in_tempdir() as tmpdir: + ... # do something complicated which might break + ... pass + + But indeed the complicated thing does break, and meanwhile the + ``in_tempdir`` context manager wiped out the directory with the + temporary files that you wanted for debugging. So, while debugging, you + replace with something like: + + >>> with in_dir() as tmpdir: # Use working directory by default + ... # do something complicated which might break + ... pass + + You can then look at the temporary file outputs to debug what is happening, + fix, and finally replace ``in_dir`` with ``in_tempdir`` again. + """ + cwd = os.getcwd() + if dir is None: + yield cwd + return + os.chdir(dir) + yield dir + os.chdir(cwd) diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/_tmpdirs.pyc b/project/venv/lib/python2.7/site-packages/scipy/_lib/_tmpdirs.pyc new file mode 100644 index 0000000..7339403 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/_lib/_tmpdirs.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/_util.py b/project/venv/lib/python2.7/site-packages/scipy/_lib/_util.py new file mode 100644 index 0000000..fa7880f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/_lib/_util.py @@ -0,0 +1,418 @@ +from __future__ import division, print_function, absolute_import + +import functools +import operator +import sys +import warnings +import numbers +from collections import namedtuple +from multiprocessing import Pool +import inspect + +import numpy as np + + +def _valarray(shape, value=np.nan, typecode=None): + """Return an array of all value. + """ + + out = np.ones(shape, dtype=bool) * value + if typecode is not None: + out = out.astype(typecode) + if not isinstance(out, np.ndarray): + out = np.asarray(out) + return out + + +def _lazywhere(cond, arrays, f, fillvalue=None, f2=None): + """ + np.where(cond, x, fillvalue) always evaluates x even where cond is False. + This one only evaluates f(arr1[cond], arr2[cond], ...). + For example, + >>> a, b = np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]) + >>> def f(a, b): + return a*b + >>> _lazywhere(a > 2, (a, b), f, np.nan) + array([ nan, nan, 21., 32.]) + + Notice it assumes that all `arrays` are of the same shape, or can be + broadcasted together. + + """ + if fillvalue is None: + if f2 is None: + raise ValueError("One of (fillvalue, f2) must be given.") + else: + fillvalue = np.nan + else: + if f2 is not None: + raise ValueError("Only one of (fillvalue, f2) can be given.") + + arrays = np.broadcast_arrays(*arrays) + temp = tuple(np.extract(cond, arr) for arr in arrays) + tcode = np.mintypecode([a.dtype.char for a in arrays]) + out = _valarray(np.shape(arrays[0]), value=fillvalue, typecode=tcode) + np.place(out, cond, f(*temp)) + if f2 is not None: + temp = tuple(np.extract(~cond, arr) for arr in arrays) + np.place(out, ~cond, f2(*temp)) + + return out + + +def _lazyselect(condlist, choicelist, arrays, default=0): + """ + Mimic `np.select(condlist, choicelist)`. + + Notice it assumes that all `arrays` are of the same shape, or can be + broadcasted together. + + All functions in `choicelist` must accept array arguments in the order + given in `arrays` and must return an array of the same shape as broadcasted + `arrays`. + + Examples + -------- + >>> x = np.arange(6) + >>> np.select([x <3, x > 3], [x**2, x**3], default=0) + array([ 0, 1, 4, 0, 64, 125]) + + >>> _lazyselect([x < 3, x > 3], [lambda x: x**2, lambda x: x**3], (x,)) + array([ 0., 1., 4., 0., 64., 125.]) + + >>> a = -np.ones_like(x) + >>> _lazyselect([x < 3, x > 3], + ... [lambda x, a: x**2, lambda x, a: a * x**3], + ... (x, a), default=np.nan) + array([ 0., 1., 4., nan, -64., -125.]) + + """ + arrays = np.broadcast_arrays(*arrays) + tcode = np.mintypecode([a.dtype.char for a in arrays]) + out = _valarray(np.shape(arrays[0]), value=default, typecode=tcode) + for index in range(len(condlist)): + func, cond = choicelist[index], condlist[index] + if np.all(cond is False): + continue + cond, _ = np.broadcast_arrays(cond, arrays[0]) + temp = tuple(np.extract(cond, arr) for arr in arrays) + np.place(out, cond, func(*temp)) + return out + + +def _aligned_zeros(shape, dtype=float, order="C", align=None): + """Allocate a new ndarray with aligned memory. + + Primary use case for this currently is working around a f2py issue + in Numpy 1.9.1, where dtype.alignment is such that np.zeros() does + not necessarily create arrays aligned up to it. + + """ + dtype = np.dtype(dtype) + if align is None: + align = dtype.alignment + if not hasattr(shape, '__len__'): + shape = (shape,) + size = functools.reduce(operator.mul, shape) * dtype.itemsize + buf = np.empty(size + align + 1, np.uint8) + offset = buf.__array_interface__['data'][0] % align + if offset != 0: + offset = align - offset + # Note: slices producing 0-size arrays do not necessarily change + # data pointer --- so we use and allocate size+1 + buf = buf[offset:offset+size+1][:-1] + data = np.ndarray(shape, dtype, buf, order=order) + data.fill(0) + return data + + +def _prune_array(array): + """Return an array equivalent to the input array. If the input + array is a view of a much larger array, copy its contents to a + newly allocated array. Otherwise, return the input unchanged. + """ + if array.base is not None and array.size < array.base.size // 2: + return array.copy() + return array + + +class DeprecatedImport(object): + """ + Deprecated import, with redirection + warning. + + Examples + -------- + Suppose you previously had in some module:: + + from foo import spam + + If this has to be deprecated, do:: + + spam = DeprecatedImport("foo.spam", "baz") + + to redirect users to use "baz" module instead. + + """ + + def __init__(self, old_module_name, new_module_name): + self._old_name = old_module_name + self._new_name = new_module_name + __import__(self._new_name) + self._mod = sys.modules[self._new_name] + + def __dir__(self): + return dir(self._mod) + + def __getattr__(self, name): + warnings.warn("Module %s is deprecated, use %s instead" + % (self._old_name, self._new_name), + DeprecationWarning) + return getattr(self._mod, name) + + +# copy-pasted from scikit-learn utils/validation.py +def check_random_state(seed): + """Turn seed into a np.random.RandomState instance + + If seed is None (or np.random), return the RandomState singleton used + by np.random. + If seed is an int, return a new RandomState instance seeded with seed. + If seed is already a RandomState instance, return it. + Otherwise raise ValueError. + """ + if seed is None or seed is np.random: + return np.random.mtrand._rand + if isinstance(seed, (numbers.Integral, np.integer)): + return np.random.RandomState(seed) + if isinstance(seed, np.random.RandomState): + return seed + raise ValueError('%r cannot be used to seed a numpy.random.RandomState' + ' instance' % seed) + + +def _asarray_validated(a, check_finite=True, + sparse_ok=False, objects_ok=False, mask_ok=False, + as_inexact=False): + """ + Helper function for scipy argument validation. + + Many scipy linear algebra functions do support arbitrary array-like + input arguments. Examples of commonly unsupported inputs include + matrices containing inf/nan, sparse matrix representations, and + matrices with complicated elements. + + Parameters + ---------- + a : array_like + The array-like input. + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + Default: True + sparse_ok : bool, optional + True if scipy sparse matrices are allowed. + objects_ok : bool, optional + True if arrays with dype('O') are allowed. + mask_ok : bool, optional + True if masked arrays are allowed. + as_inexact : bool, optional + True to convert the input array to a np.inexact dtype. + + Returns + ------- + ret : ndarray + The converted validated array. + + """ + if not sparse_ok: + import scipy.sparse + if scipy.sparse.issparse(a): + msg = ('Sparse matrices are not supported by this function. ' + 'Perhaps one of the scipy.sparse.linalg functions ' + 'would work instead.') + raise ValueError(msg) + if not mask_ok: + if np.ma.isMaskedArray(a): + raise ValueError('masked arrays are not supported') + toarray = np.asarray_chkfinite if check_finite else np.asarray + a = toarray(a) + if not objects_ok: + if a.dtype is np.dtype('O'): + raise ValueError('object arrays are not supported') + if as_inexact: + if not np.issubdtype(a.dtype, np.inexact): + a = toarray(a, dtype=np.float_) + return a + + +# Add a replacement for inspect.getargspec() which is deprecated in python 3.5 +# The version below is borrowed from Django, +# https://github.com/django/django/pull/4846 + +# Note an inconsistency between inspect.getargspec(func) and +# inspect.signature(func). If `func` is a bound method, the latter does *not* +# list `self` as a first argument, while the former *does*. +# Hence cook up a common ground replacement: `getargspec_no_self` which +# mimics `inspect.getargspec` but does not list `self`. +# +# This way, the caller code does not need to know whether it uses a legacy +# .getargspec or bright and shiny .signature. + +try: + # is it python 3.3 or higher? + inspect.signature + + # Apparently, yes. Wrap inspect.signature + + ArgSpec = namedtuple('ArgSpec', ['args', 'varargs', 'keywords', 'defaults']) + + def getargspec_no_self(func): + """inspect.getargspec replacement using inspect.signature. + + inspect.getargspec is deprecated in python 3. This is a replacement + based on the (new in python 3.3) `inspect.signature`. + + Parameters + ---------- + func : callable + A callable to inspect + + Returns + ------- + argspec : ArgSpec(args, varargs, varkw, defaults) + This is similar to the result of inspect.getargspec(func) under + python 2.x. + NOTE: if the first argument of `func` is self, it is *not*, I repeat + *not* included in argspec.args. + This is done for consistency between inspect.getargspec() under + python 2.x, and inspect.signature() under python 3.x. + """ + sig = inspect.signature(func) + args = [ + p.name for p in sig.parameters.values() + if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD + ] + varargs = [ + p.name for p in sig.parameters.values() + if p.kind == inspect.Parameter.VAR_POSITIONAL + ] + varargs = varargs[0] if varargs else None + varkw = [ + p.name for p in sig.parameters.values() + if p.kind == inspect.Parameter.VAR_KEYWORD + ] + varkw = varkw[0] if varkw else None + defaults = [ + p.default for p in sig.parameters.values() + if (p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and + p.default is not p.empty) + ] or None + return ArgSpec(args, varargs, varkw, defaults) + +except AttributeError: + # python 2.x + def getargspec_no_self(func): + """inspect.getargspec replacement for compatibility with python 3.x. + + inspect.getargspec is deprecated in python 3. This wraps it, and + *removes* `self` from the argument list of `func`, if present. + This is done for forward compatibility with python 3. + + Parameters + ---------- + func : callable + A callable to inspect + + Returns + ------- + argspec : ArgSpec(args, varargs, varkw, defaults) + This is similar to the result of inspect.getargspec(func) under + python 2.x. + NOTE: if the first argument of `func` is self, it is *not*, I repeat + *not* included in argspec.args. + This is done for consistency between inspect.getargspec() under + python 2.x, and inspect.signature() under python 3.x. + """ + argspec = inspect.getargspec(func) + if argspec.args[0] == 'self': + argspec.args.pop(0) + return argspec + + +class MapWrapper(object): + """ + Parallelisation wrapper for working with map-like callables, such as + `multiprocessing.Pool.map`. + + Parameters + ---------- + pool : int or map-like callable + If `pool` is an integer, then it specifies the number of threads to + use for parallelization. If ``int(pool) == 1``, then no parallel + processing is used and the map builtin is used. + If ``pool == -1``, then the pool will utilise all available CPUs. + If `pool` is a map-like callable that follows the same + calling sequence as the built-in map function, then this callable is + used for parallelisation. + """ + def __init__(self, pool=1): + self.pool = None + self._mapfunc = map + self._own_pool = False + + if callable(pool): + self.pool = pool + self._mapfunc = self.pool + else: + # user supplies a number + if int(pool) == -1: + # use as many processors as possible + self.pool = Pool() + self._mapfunc = self.pool.map + self._own_pool = True + elif int(pool) == 1: + pass + elif int(pool) > 1: + # use the number of processors requested + self.pool = Pool(processes=int(pool)) + self._mapfunc = self.pool.map + self._own_pool = True + else: + raise RuntimeError("Number of workers specified must be -1," + " an int >= 1, or an object with a 'map' method") + + def __enter__(self): + return self + + def __del__(self): + self.close() + + def terminate(self): + if self._own_pool: + self.pool.terminate() + + def join(self): + if self._own_pool: + self.pool.join() + + def close(self): + if self._own_pool: + self.pool.close() + + def __exit__(self, exc_type, exc_value, traceback): + if self._own_pool: + if exc_type is None: + self.pool.close() + self.pool.join() + else: + self.pool.terminate() + + def __call__(self, func, iterable): + # only accept one iterable because that's all Pool.map accepts + try: + return self._mapfunc(func, iterable) + except TypeError: + # wrong number of arguments + raise TypeError("The map-like callable must be of the" + " form f(func, iterable)") diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/_util.pyc b/project/venv/lib/python2.7/site-packages/scipy/_lib/_util.pyc new file mode 100644 index 0000000..977137f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/_lib/_util.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/_version.py b/project/venv/lib/python2.7/site-packages/scipy/_lib/_version.py new file mode 100644 index 0000000..09b2494 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/_lib/_version.py @@ -0,0 +1,155 @@ +"""Utility to compare (Numpy) version strings. + +The NumpyVersion class allows properly comparing numpy version strings. +The LooseVersion and StrictVersion classes that distutils provides don't +work; they don't recognize anything like alpha/beta/rc/dev versions. + +""" + +import re + +from scipy._lib.six import string_types + + +__all__ = ['NumpyVersion'] + + +class NumpyVersion(): + """Parse and compare numpy version strings. + + Numpy has the following versioning scheme (numbers given are examples; they + can be >9) in principle): + + - Released version: '1.8.0', '1.8.1', etc. + - Alpha: '1.8.0a1', '1.8.0a2', etc. + - Beta: '1.8.0b1', '1.8.0b2', etc. + - Release candidates: '1.8.0rc1', '1.8.0rc2', etc. + - Development versions: '1.8.0.dev-f1234afa' (git commit hash appended) + - Development versions after a1: '1.8.0a1.dev-f1234afa', + '1.8.0b2.dev-f1234afa', + '1.8.1rc1.dev-f1234afa', etc. + - Development versions (no git hash available): '1.8.0.dev-Unknown' + + Comparing needs to be done against a valid version string or other + `NumpyVersion` instance. + + Parameters + ---------- + vstring : str + Numpy version string (``np.__version__``). + + Notes + ----- + All dev versions of the same (pre-)release compare equal. + + Examples + -------- + >>> from scipy._lib._version import NumpyVersion + >>> if NumpyVersion(np.__version__) < '1.7.0': + ... print('skip') + skip + + >>> NumpyVersion('1.7') # raises ValueError, add ".0" + + """ + def __init__(self, vstring): + self.vstring = vstring + ver_main = re.match(r'\d[.]\d+[.]\d+', vstring) + if not ver_main: + raise ValueError("Not a valid numpy version string") + + self.version = ver_main.group() + self.major, self.minor, self.bugfix = [int(x) for x in + self.version.split('.')] + if len(vstring) == ver_main.end(): + self.pre_release = 'final' + else: + alpha = re.match(r'a\d', vstring[ver_main.end():]) + beta = re.match(r'b\d', vstring[ver_main.end():]) + rc = re.match(r'rc\d', vstring[ver_main.end():]) + pre_rel = [m for m in [alpha, beta, rc] if m is not None] + if pre_rel: + self.pre_release = pre_rel[0].group() + else: + self.pre_release = '' + + self.is_devversion = bool(re.search(r'.dev', vstring)) + + def _compare_version(self, other): + """Compare major.minor.bugfix""" + if self.major == other.major: + if self.minor == other.minor: + if self.bugfix == other.bugfix: + vercmp = 0 + elif self.bugfix > other.bugfix: + vercmp = 1 + else: + vercmp = -1 + elif self.minor > other.minor: + vercmp = 1 + else: + vercmp = -1 + elif self.major > other.major: + vercmp = 1 + else: + vercmp = -1 + + return vercmp + + def _compare_pre_release(self, other): + """Compare alpha/beta/rc/final.""" + if self.pre_release == other.pre_release: + vercmp = 0 + elif self.pre_release == 'final': + vercmp = 1 + elif other.pre_release == 'final': + vercmp = -1 + elif self.pre_release > other.pre_release: + vercmp = 1 + else: + vercmp = -1 + + return vercmp + + def _compare(self, other): + if not isinstance(other, (string_types, NumpyVersion)): + raise ValueError("Invalid object to compare with NumpyVersion.") + + if isinstance(other, string_types): + other = NumpyVersion(other) + + vercmp = self._compare_version(other) + if vercmp == 0: + # Same x.y.z version, check for alpha/beta/rc + vercmp = self._compare_pre_release(other) + if vercmp == 0: + # Same version and same pre-release, check if dev version + if self.is_devversion is other.is_devversion: + vercmp = 0 + elif self.is_devversion: + vercmp = -1 + else: + vercmp = 1 + + return vercmp + + def __lt__(self, other): + return self._compare(other) < 0 + + def __le__(self, other): + return self._compare(other) <= 0 + + def __eq__(self, other): + return self._compare(other) == 0 + + def __ne__(self, other): + return self._compare(other) != 0 + + def __gt__(self, other): + return self._compare(other) > 0 + + def __ge__(self, other): + return self._compare(other) >= 0 + + def __repr__(self): + return "NumpyVersion(%s)" % self.vstring diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/_version.pyc b/project/venv/lib/python2.7/site-packages/scipy/_lib/_version.pyc new file mode 100644 index 0000000..fa55ebd Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/_lib/_version.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/decorator.py b/project/venv/lib/python2.7/site-packages/scipy/_lib/decorator.py new file mode 100644 index 0000000..db751b3 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/_lib/decorator.py @@ -0,0 +1,425 @@ +# ######################### LICENSE ############################ # + +# Copyright (c) 2005-2015, Michele Simionato +# All rights reserved. + +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: + +# Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# Redistributions in bytecode form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +# DAMAGE. + +""" +Decorator module, see https://pypi.python.org/pypi/decorator +for the documentation. +""" +from __future__ import print_function + +import re +import sys +import inspect +import operator +import itertools +import collections + +__version__ = '4.0.5' + +if sys.version >= '3': + from inspect import getfullargspec + + def get_init(cls): + return cls.__init__ +else: + class getfullargspec(object): + "A quick and dirty replacement for getfullargspec for Python 2.X" + def __init__(self, f): + self.args, self.varargs, self.varkw, self.defaults = \ + inspect.getargspec(f) + self.kwonlyargs = [] + self.kwonlydefaults = None + + def __iter__(self): + yield self.args + yield self.varargs + yield self.varkw + yield self.defaults + + getargspec = inspect.getargspec + + def get_init(cls): + return cls.__init__.__func__ + +# getargspec has been deprecated in Python 3.5 +ArgSpec = collections.namedtuple( + 'ArgSpec', 'args varargs varkw defaults') + + +def getargspec(f): + """A replacement for inspect.getargspec""" + spec = getfullargspec(f) + return ArgSpec(spec.args, spec.varargs, spec.varkw, spec.defaults) + + +DEF = re.compile(r'\s*def\s*([_\w][_\w\d]*)\s*\(') + + +# basic functionality +class FunctionMaker(object): + """ + An object with the ability to create functions with a given signature. + It has attributes name, doc, module, signature, defaults, dict and + methods update and make. + """ + + # Atomic get-and-increment provided by the GIL + _compile_count = itertools.count() + + def __init__(self, func=None, name=None, signature=None, + defaults=None, doc=None, module=None, funcdict=None): + self.shortsignature = signature + if func: + # func can be a class or a callable, but not an instance method + self.name = func.__name__ + if self.name == '<lambda>': # small hack for lambda functions + self.name = '_lambda_' + self.doc = func.__doc__ + self.module = func.__module__ + if inspect.isfunction(func): + argspec = getfullargspec(func) + self.annotations = getattr(func, '__annotations__', {}) + for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs', + 'kwonlydefaults'): + setattr(self, a, getattr(argspec, a)) + for i, arg in enumerate(self.args): + setattr(self, 'arg%d' % i, arg) + if sys.version < '3': # easy way + self.shortsignature = self.signature = ( + inspect.formatargspec( + formatvalue=lambda val: "", *argspec)[1:-1]) + else: # Python 3 way + allargs = list(self.args) + allshortargs = list(self.args) + if self.varargs: + allargs.append('*' + self.varargs) + allshortargs.append('*' + self.varargs) + elif self.kwonlyargs: + allargs.append('*') # single star syntax + for a in self.kwonlyargs: + allargs.append('%s=None' % a) + allshortargs.append('%s=%s' % (a, a)) + if self.varkw: + allargs.append('**' + self.varkw) + allshortargs.append('**' + self.varkw) + self.signature = ', '.join(allargs) + self.shortsignature = ', '.join(allshortargs) + self.dict = func.__dict__.copy() + # func=None happens when decorating a caller + if name: + self.name = name + if signature is not None: + self.signature = signature + if defaults: + self.defaults = defaults + if doc: + self.doc = doc + if module: + self.module = module + if funcdict: + self.dict = funcdict + # check existence required attributes + assert hasattr(self, 'name') + if not hasattr(self, 'signature'): + raise TypeError('You are decorating a non function: %s' % func) + + def update(self, func, **kw): + "Update the signature of func with the data in self" + func.__name__ = self.name + func.__doc__ = getattr(self, 'doc', None) + func.__dict__ = getattr(self, 'dict', {}) + func.__defaults__ = getattr(self, 'defaults', ()) + func.__kwdefaults__ = getattr(self, 'kwonlydefaults', None) + func.__annotations__ = getattr(self, 'annotations', None) + try: + frame = sys._getframe(3) + except AttributeError: # for IronPython and similar implementations + callermodule = '?' + else: + callermodule = frame.f_globals.get('__name__', '?') + func.__module__ = getattr(self, 'module', callermodule) + func.__dict__.update(kw) + + def make(self, src_templ, evaldict=None, addsource=False, **attrs): + "Make a new function from a given template and update the signature" + src = src_templ % vars(self) # expand name and signature + evaldict = evaldict or {} + mo = DEF.match(src) + if mo is None: + raise SyntaxError('not a valid function template\n%s' % src) + name = mo.group(1) # extract the function name + names = set([name] + [arg.strip(' *') for arg in + self.shortsignature.split(',')]) + for n in names: + if n in ('_func_', '_call_'): + raise NameError('%s is overridden in\n%s' % (n, src)) + if not src.endswith('\n'): # add a newline just for safety + src += '\n' # this is needed in old versions of Python + + # Ensure each generated function has a unique filename for profilers + # (such as cProfile) that depend on the tuple of (<filename>, + # <definition line>, <function name>) being unique. + filename = '<decorator-gen-%d>' % (next(self._compile_count),) + try: + code = compile(src, filename, 'single') + exec(code, evaldict) + except: # noqa: E722 + print('Error in generated code:', file=sys.stderr) + print(src, file=sys.stderr) + raise + func = evaldict[name] + if addsource: + attrs['__source__'] = src + self.update(func, **attrs) + return func + + @classmethod + def create(cls, obj, body, evaldict, defaults=None, + doc=None, module=None, addsource=True, **attrs): + """ + Create a function from the strings name, signature and body. + evaldict is the evaluation dictionary. If addsource is true an + attribute __source__ is added to the result. The attributes attrs + are added, if any. + """ + if isinstance(obj, str): # "name(signature)" + name, rest = obj.strip().split('(', 1) + signature = rest[:-1] # strip a right parens + func = None + else: # a function + name = None + signature = None + func = obj + self = cls(func, name, signature, defaults, doc, module) + ibody = '\n'.join(' ' + line for line in body.splitlines()) + return self.make('def %(name)s(%(signature)s):\n' + ibody, + evaldict, addsource, **attrs) + + +def decorate(func, caller): + """ + decorate(func, caller) decorates a function using a caller. + """ + evaldict = func.__globals__.copy() + evaldict['_call_'] = caller + evaldict['_func_'] = func + fun = FunctionMaker.create( + func, "return _call_(_func_, %(shortsignature)s)", + evaldict, __wrapped__=func) + if hasattr(func, '__qualname__'): + fun.__qualname__ = func.__qualname__ + return fun + + +def decorator(caller, _func=None): + """decorator(caller) converts a caller function into a decorator""" + if _func is not None: # return a decorated function + # this is obsolete behavior; you should use decorate instead + return decorate(_func, caller) + # else return a decorator function + if inspect.isclass(caller): + name = caller.__name__.lower() + callerfunc = get_init(caller) + doc = 'decorator(%s) converts functions/generators into ' \ + 'factories of %s objects' % (caller.__name__, caller.__name__) + elif inspect.isfunction(caller): + if caller.__name__ == '<lambda>': + name = '_lambda_' + else: + name = caller.__name__ + callerfunc = caller + doc = caller.__doc__ + else: # assume caller is an object with a __call__ method + name = caller.__class__.__name__.lower() + callerfunc = caller.__call__.__func__ + doc = caller.__call__.__doc__ + evaldict = callerfunc.__globals__.copy() + evaldict['_call_'] = caller + evaldict['_decorate_'] = decorate + return FunctionMaker.create( + '%s(func)' % name, 'return _decorate_(func, _call_)', + evaldict, doc=doc, module=caller.__module__, + __wrapped__=caller) + + +# ####################### contextmanager ####################### # + +try: # Python >= 3.2 + from contextlib import _GeneratorContextManager +except ImportError: # Python >= 2.5 + from contextlib import GeneratorContextManager as _GeneratorContextManager + + +class ContextManager(_GeneratorContextManager): + def __call__(self, func): + """Context manager decorator""" + return FunctionMaker.create( + func, "with _self_: return _func_(%(shortsignature)s)", + dict(_self_=self, _func_=func), __wrapped__=func) + + +init = getfullargspec(_GeneratorContextManager.__init__) +n_args = len(init.args) +if n_args == 2 and not init.varargs: # (self, genobj) Python 2.7 + def __init__(self, g, *a, **k): + return _GeneratorContextManager.__init__(self, g(*a, **k)) + ContextManager.__init__ = __init__ +elif n_args == 2 and init.varargs: # (self, gen, *a, **k) Python 3.4 + pass +elif n_args == 4: # (self, gen, args, kwds) Python 3.5 + def __init__(self, g, *a, **k): + return _GeneratorContextManager.__init__(self, g, a, k) + ContextManager.__init__ = __init__ + +contextmanager = decorator(ContextManager) + + +# ############################ dispatch_on ############################ # + +def append(a, vancestors): + """ + Append ``a`` to the list of the virtual ancestors, unless it is already + included. + """ + add = True + for j, va in enumerate(vancestors): + if issubclass(va, a): + add = False + break + if issubclass(a, va): + vancestors[j] = a + add = False + if add: + vancestors.append(a) + + +# inspired from simplegeneric by P.J. Eby and functools.singledispatch +def dispatch_on(*dispatch_args): + """ + Factory of decorators turning a function into a generic function + dispatching on the given arguments. + """ + assert dispatch_args, 'No dispatch args passed' + dispatch_str = '(%s,)' % ', '.join(dispatch_args) + + def check(arguments, wrong=operator.ne, msg=''): + """Make sure one passes the expected number of arguments""" + if wrong(len(arguments), len(dispatch_args)): + raise TypeError('Expected %d arguments, got %d%s' % + (len(dispatch_args), len(arguments), msg)) + + def gen_func_dec(func): + """Decorator turning a function into a generic function""" + + # first check the dispatch arguments + argset = set(getfullargspec(func).args) + if not set(dispatch_args) <= argset: + raise NameError('Unknown dispatch arguments %s' % dispatch_str) + + typemap = {} + + def vancestors(*types): + """ + Get a list of sets of virtual ancestors for the given types + """ + check(types) + ras = [[] for _ in range(len(dispatch_args))] + for types_ in typemap: + for t, type_, ra in zip(types, types_, ras): + if issubclass(t, type_) and type_ not in t.__mro__: + append(type_, ra) + return [set(ra) for ra in ras] + + def ancestors(*types): + """ + Get a list of virtual MROs, one for each type + """ + check(types) + lists = [] + for t, vas in zip(types, vancestors(*types)): + n_vas = len(vas) + if n_vas > 1: + raise RuntimeError( + 'Ambiguous dispatch for %s: %s' % (t, vas)) + elif n_vas == 1: + va, = vas + mro = type('t', (t, va), {}).__mro__[1:] + else: + mro = t.__mro__ + lists.append(mro[:-1]) # discard t and object + return lists + + def register(*types): + """ + Decorator to register an implementation for the given types + """ + check(types) + + def dec(f): + check(getfullargspec(f).args, operator.lt, ' in ' + f.__name__) + typemap[types] = f + return f + return dec + + def dispatch_info(*types): + """ + An utility to introspect the dispatch algorithm + """ + check(types) + lst = [] + for anc in itertools.product(*ancestors(*types)): + lst.append(tuple(a.__name__ for a in anc)) + return lst + + def _dispatch(dispatch_args, *args, **kw): + types = tuple(type(arg) for arg in dispatch_args) + try: # fast path + f = typemap[types] + except KeyError: + pass + else: + return f(*args, **kw) + combinations = itertools.product(*ancestors(*types)) + next(combinations) # the first one has been already tried + for types_ in combinations: + f = typemap.get(types_) + if f is not None: + return f(*args, **kw) + + # else call the default implementation + return func(*args, **kw) + + return FunctionMaker.create( + func, 'return _f_(%s, %%(shortsignature)s)' % dispatch_str, + dict(_f_=_dispatch), register=register, default=func, + typemap=typemap, vancestors=vancestors, ancestors=ancestors, + dispatch_info=dispatch_info, __wrapped__=func) + + gen_func_dec.__name__ = 'dispatch_on' + dispatch_str + return gen_func_dec diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/decorator.pyc b/project/venv/lib/python2.7/site-packages/scipy/_lib/decorator.pyc new file mode 100644 index 0000000..585425e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/_lib/decorator.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/messagestream.so b/project/venv/lib/python2.7/site-packages/scipy/_lib/messagestream.so new file mode 100755 index 0000000..90981f7 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/_lib/messagestream.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/setup.py b/project/venv/lib/python2.7/site-packages/scipy/_lib/setup.py new file mode 100644 index 0000000..8cb9efc --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/_lib/setup.py @@ -0,0 +1,52 @@ +from __future__ import division, print_function, absolute_import + +import os + + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + + config = Configuration('_lib', parent_package, top_path) + config.add_data_files('tests/*.py') + + include_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'src')) + depends = [os.path.join(include_dir, 'ccallback.h')] + + config.add_extension("_ccallback_c", + sources=["_ccallback_c.c"], + depends=depends, + include_dirs=[include_dir]) + + config.add_extension("_test_ccallback", + sources=["src/_test_ccallback.c"], + depends=depends, + include_dirs=[include_dir]) + + config.add_extension("_fpumode", + sources=["_fpumode.c"]) + + def get_messagestream_config(ext, build_dir): + # Generate a header file containing defines + config_cmd = config.get_config_cmd() + defines = [] + if config_cmd.check_func('open_memstream', decl=True, call=True): + defines.append(('HAVE_OPEN_MEMSTREAM', '1')) + target = os.path.join(os.path.dirname(__file__), 'src', + 'messagestream_config.h') + with open(target, 'w') as f: + for name, value in defines: + f.write('#define {0} {1}\n'.format(name, value)) + + depends = [os.path.join(include_dir, 'messagestream.h')] + config.add_extension("messagestream", + sources=["messagestream.c"] + [get_messagestream_config], + depends=depends, + include_dirs=[include_dir]) + + return config + + +if __name__ == '__main__': + from numpy.distutils.core import setup + + setup(**configuration(top_path='').todict()) diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/setup.pyc b/project/venv/lib/python2.7/site-packages/scipy/_lib/setup.pyc new file mode 100644 index 0000000..465e8c0 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/_lib/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/six.py b/project/venv/lib/python2.7/site-packages/scipy/_lib/six.py new file mode 100644 index 0000000..29d54e1 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/_lib/six.py @@ -0,0 +1,276 @@ +"""Utilities for writing code that runs on Python 2 and 3""" + +# Copyright (c) 2010-2012 Benjamin Peterson +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of +# this software and associated documentation files (the "Software"), to deal in +# the Software without restriction, including without limitation the rights to +# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +# the Software, and to permit persons to whom the Software is furnished to do so, +# subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import operator +import sys +import types + +__author__ = "Benjamin Peterson <benjamin@python.org>" +__version__ = "1.2.0" + + +# True if we are running on Python 3. +PY3 = sys.version_info[0] == 3 + +if PY3: + string_types = str, + integer_types = int, + class_types = type, + text_type = str + binary_type = bytes + + MAXSIZE = sys.maxsize +else: + string_types = basestring, + integer_types = (int, long) + class_types = (type, types.ClassType) + text_type = unicode + binary_type = str + + if sys.platform.startswith("java"): + # Jython always uses 32 bits. + MAXSIZE = int((1 << 31) - 1) + else: + # It's possible to have sizeof(long) != sizeof(Py_ssize_t). + class X(object): + def __len__(self): + return 1 << 31 + try: + len(X()) + except OverflowError: + # 32-bit + MAXSIZE = int((1 << 31) - 1) + else: + # 64-bit + MAXSIZE = int((1 << 63) - 1) + del X + + +def _add_doc(func, doc): + """Add documentation to a function.""" + func.__doc__ = doc + + +def _import_module(name): + """Import module, returning the module after the last dot.""" + __import__(name) + return sys.modules[name] + + +# Replacement for lazy loading stuff in upstream six. See gh-2764 +if PY3: + import builtins + import functools + reduce = functools.reduce + zip = builtins.zip + xrange = builtins.range +else: + import __builtin__ + import itertools + builtins = __builtin__ + reduce = __builtin__.reduce + zip = itertools.izip + xrange = __builtin__.xrange + + +if PY3: + _meth_func = "__func__" + _meth_self = "__self__" + + _func_code = "__code__" + _func_defaults = "__defaults__" + + _iterkeys = "keys" + _itervalues = "values" + _iteritems = "items" +else: + _meth_func = "im_func" + _meth_self = "im_self" + + _func_code = "func_code" + _func_defaults = "func_defaults" + + _iterkeys = "iterkeys" + _itervalues = "itervalues" + _iteritems = "iteritems" + + +try: + advance_iterator = next +except NameError: + def advance_iterator(it): + return it.next() +next = advance_iterator + + +if PY3: + def get_unbound_function(unbound): + return unbound + + Iterator = object + + def callable(obj): + return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) +else: + def get_unbound_function(unbound): + return unbound.im_func + + class Iterator(object): + + def next(self): + return type(self).__next__(self) + + callable = callable +_add_doc(get_unbound_function, + """Get the function out of a possibly unbound function""") + + +get_method_function = operator.attrgetter(_meth_func) +get_method_self = operator.attrgetter(_meth_self) +get_function_code = operator.attrgetter(_func_code) +get_function_defaults = operator.attrgetter(_func_defaults) + + +def iterkeys(d): + """Return an iterator over the keys of a dictionary.""" + return iter(getattr(d, _iterkeys)()) + + +def itervalues(d): + """Return an iterator over the values of a dictionary.""" + return iter(getattr(d, _itervalues)()) + + +def iteritems(d): + """Return an iterator over the (key, value) pairs of a dictionary.""" + return iter(getattr(d, _iteritems)()) + + +if PY3: + def b(s): + return s.encode("latin-1") + + def u(s): + return s + + if sys.version_info[1] <= 1: + def int2byte(i): + return bytes((i,)) + else: + # This is about 2x faster than the implementation above on 3.2+ + int2byte = operator.methodcaller("to_bytes", 1, "big") + import io + StringIO = io.StringIO + BytesIO = io.BytesIO +else: + def b(s): + return s + + def u(s): + return unicode(s, "unicode_escape") + int2byte = chr + import StringIO + StringIO = BytesIO = StringIO.StringIO +_add_doc(b, """Byte literal""") +_add_doc(u, """Text literal""") + + +if PY3: + import builtins + exec_ = getattr(builtins, "exec") + + def reraise(tp, value, tb=None): + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + + print_ = getattr(builtins, "print") + del builtins + +else: + def exec_(code, globs=None, locs=None): + """Execute code in a namespace.""" + if globs is None: + frame = sys._getframe(1) + globs = frame.f_globals + if locs is None: + locs = frame.f_locals + del frame + elif locs is None: + locs = globs + exec("""exec code in globs, locs""") + + exec_("""def reraise(tp, value, tb=None): + raise tp, value, tb +""") + + def print_(*args, **kwargs): + """The new-style print function.""" + fp = kwargs.pop("file", sys.stdout) + if fp is None: + return + + def write(data): + if not isinstance(data, basestring): + data = str(data) + fp.write(data) + want_unicode = False + sep = kwargs.pop("sep", None) + if sep is not None: + if isinstance(sep, unicode): + want_unicode = True + elif not isinstance(sep, str): + raise TypeError("sep must be None or a string") + end = kwargs.pop("end", None) + if end is not None: + if isinstance(end, unicode): + want_unicode = True + elif not isinstance(end, str): + raise TypeError("end must be None or a string") + if kwargs: + raise TypeError("invalid keyword arguments to print()") + if not want_unicode: + for arg in args: + if isinstance(arg, unicode): + want_unicode = True + break + if want_unicode: + newline = unicode("\n") + space = unicode(" ") + else: + newline = "\n" + space = " " + if sep is None: + sep = space + if end is None: + end = newline + for i, arg in enumerate(args): + if i: + write(sep) + write(arg) + write(end) + +_add_doc(reraise, """Reraise an exception.""") + + +def with_metaclass(meta, base=object): + """Create a base class with a metaclass.""" + return meta("NewBase", (base,), {}) diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/six.pyc b/project/venv/lib/python2.7/site-packages/scipy/_lib/six.pyc new file mode 100644 index 0000000..0cd2d54 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/_lib/six.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/__init__.pyc new file mode 100644 index 0000000..d18abae Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test__gcutils.py b/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test__gcutils.py new file mode 100644 index 0000000..d82b81f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test__gcutils.py @@ -0,0 +1,99 @@ +""" Test for assert_deallocated context manager and gc utilities +""" +from __future__ import division, print_function, absolute_import + +import gc + +from scipy._lib._gcutils import (set_gc_state, gc_state, assert_deallocated, + ReferenceError, IS_PYPY) + +from numpy.testing import assert_equal + +import pytest + + +def test_set_gc_state(): + gc_status = gc.isenabled() + try: + for state in (True, False): + gc.enable() + set_gc_state(state) + assert_equal(gc.isenabled(), state) + gc.disable() + set_gc_state(state) + assert_equal(gc.isenabled(), state) + finally: + if gc_status: + gc.enable() + + +def test_gc_state(): + # Test gc_state context manager + gc_status = gc.isenabled() + try: + for pre_state in (True, False): + set_gc_state(pre_state) + for with_state in (True, False): + # Check the gc state is with_state in with block + with gc_state(with_state): + assert_equal(gc.isenabled(), with_state) + # And returns to previous state outside block + assert_equal(gc.isenabled(), pre_state) + # Even if the gc state is set explicitly within the block + with gc_state(with_state): + assert_equal(gc.isenabled(), with_state) + set_gc_state(not with_state) + assert_equal(gc.isenabled(), pre_state) + finally: + if gc_status: + gc.enable() + + +@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy") +def test_assert_deallocated(): + # Ordinary use + class C(object): + def __init__(self, arg0, arg1, name='myname'): + self.name = name + for gc_current in (True, False): + with gc_state(gc_current): + # We are deleting from with-block context, so that's OK + with assert_deallocated(C, 0, 2, 'another name') as c: + assert_equal(c.name, 'another name') + del c + # Or not using the thing in with-block context, also OK + with assert_deallocated(C, 0, 2, name='third name'): + pass + assert_equal(gc.isenabled(), gc_current) + + +@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy") +def test_assert_deallocated_nodel(): + class C(object): + pass + with pytest.raises(ReferenceError): + # Need to delete after using if in with-block context + with assert_deallocated(C) as c: + pass + + +@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy") +def test_assert_deallocated_circular(): + class C(object): + def __init__(self): + self._circular = self + with pytest.raises(ReferenceError): + # Circular reference, no automatic garbage collection + with assert_deallocated(C) as c: + del c + + +@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy") +def test_assert_deallocated_circular2(): + class C(object): + def __init__(self): + self._circular = self + with pytest.raises(ReferenceError): + # Still circular reference, no automatic garbage collection + with assert_deallocated(C): + pass diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test__gcutils.pyc b/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test__gcutils.pyc new file mode 100644 index 0000000..7a64ce1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test__gcutils.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test__testutils.py b/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test__testutils.py new file mode 100644 index 0000000..d88cd6e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test__testutils.py @@ -0,0 +1,34 @@ +from __future__ import division, print_function, absolute_import + +import sys +from scipy._lib._testutils import _parse_size, _get_mem_available +import pytest + + +def test__parse_size(): + expected = { + '12': 12e6, + '12 b': 12, + '12k': 12e3, + ' 12 M ': 12e6, + ' 12 G ': 12e9, + ' 12Tb ': 12e12, + '12 Mib ': 12 * 1024.0**2, + '12Tib': 12 * 1024.0**4, + } + + for inp, outp in sorted(expected.items()): + if outp is None: + with pytest.raises(ValueError): + _parse_size(inp) + else: + assert _parse_size(inp) == outp + + +def test__mem_available(): + # May return None on non-Linux platforms + available = _get_mem_available() + if sys.platform.startswith('linux'): + assert available >= 0 + else: + assert available is None or available >= 0 diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test__testutils.pyc b/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test__testutils.pyc new file mode 100644 index 0000000..460286e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test__testutils.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test__threadsafety.py b/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test__threadsafety.py new file mode 100644 index 0000000..64f1ce6 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test__threadsafety.py @@ -0,0 +1,53 @@ +from __future__ import division, print_function, absolute_import + +import threading +import time +import traceback + +from numpy.testing import assert_ +from pytest import raises as assert_raises + +from scipy._lib._threadsafety import ReentrancyLock, non_reentrant, ReentrancyError + + +def test_parallel_threads(): + # Check that ReentrancyLock serializes work in parallel threads. + # + # The test is not fully deterministic, and may succeed falsely if + # the timings go wrong. + + lock = ReentrancyLock("failure") + + failflag = [False] + exceptions_raised = [] + + def worker(k): + try: + with lock: + assert_(not failflag[0]) + failflag[0] = True + time.sleep(0.1 * k) + assert_(failflag[0]) + failflag[0] = False + except Exception: + exceptions_raised.append(traceback.format_exc(2)) + + threads = [threading.Thread(target=lambda k=k: worker(k)) + for k in range(3)] + for t in threads: + t.start() + for t in threads: + t.join() + + exceptions_raised = "\n".join(exceptions_raised) + assert_(not exceptions_raised, exceptions_raised) + + +def test_reentering(): + # Check that ReentrancyLock prevents re-entering from the same thread. + + @non_reentrant() + def func(x): + return func(x) + + assert_raises(ReentrancyError, func, 0) diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test__threadsafety.pyc b/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test__threadsafety.pyc new file mode 100644 index 0000000..d1b0dc9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test__threadsafety.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test__util.py b/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test__util.py new file mode 100644 index 0000000..4421d9f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test__util.py @@ -0,0 +1,109 @@ +from __future__ import division, print_function, absolute_import +from multiprocessing import Pool +from multiprocessing.pool import Pool as PWL + +import numpy as np +from numpy.testing import assert_equal, assert_ +from pytest import raises as assert_raises + +from scipy._lib._util import _aligned_zeros, check_random_state, MapWrapper + + +def test__aligned_zeros(): + niter = 10 + + def check(shape, dtype, order, align): + err_msg = repr((shape, dtype, order, align)) + x = _aligned_zeros(shape, dtype, order, align=align) + if align is None: + align = np.dtype(dtype).alignment + assert_equal(x.__array_interface__['data'][0] % align, 0) + if hasattr(shape, '__len__'): + assert_equal(x.shape, shape, err_msg) + else: + assert_equal(x.shape, (shape,), err_msg) + assert_equal(x.dtype, dtype) + if order == "C": + assert_(x.flags.c_contiguous, err_msg) + elif order == "F": + if x.size > 0: + # Size-0 arrays get invalid flags on Numpy 1.5 + assert_(x.flags.f_contiguous, err_msg) + elif order is None: + assert_(x.flags.c_contiguous, err_msg) + else: + raise ValueError() + + # try various alignments + for align in [1, 2, 3, 4, 8, 16, 32, 64, None]: + for n in [0, 1, 3, 11]: + for order in ["C", "F", None]: + for dtype in [np.uint8, np.float64]: + for shape in [n, (1, 2, 3, n)]: + for j in range(niter): + check(shape, dtype, order, align) + + +def test_check_random_state(): + # If seed is None, return the RandomState singleton used by np.random. + # If seed is an int, return a new RandomState instance seeded with seed. + # If seed is already a RandomState instance, return it. + # Otherwise raise ValueError. + rsi = check_random_state(1) + assert_equal(type(rsi), np.random.RandomState) + rsi = check_random_state(rsi) + assert_equal(type(rsi), np.random.RandomState) + rsi = check_random_state(None) + assert_equal(type(rsi), np.random.RandomState) + assert_raises(ValueError, check_random_state, 'a') + + +class TestMapWrapper(object): + + def setup_method(self): + self.input = np.arange(10.) + self.output = np.sin(self.input) + + def test_serial(self): + p = MapWrapper(1) + assert_(p._mapfunc is map) + assert_(p.pool is None) + assert_(p._own_pool is False) + out = list(p(np.sin, self.input)) + assert_equal(out, self.output) + + with assert_raises(RuntimeError): + p = MapWrapper(0) + + def test_parallel(self): + with MapWrapper(2) as p: + out = p(np.sin, self.input) + assert_equal(list(out), self.output) + + assert_(p._own_pool is True) + assert_(isinstance(p.pool, PWL)) + assert_(p._mapfunc is not None) + + # the context manager should've closed the internal pool + # check that it has by asking it to calculate again. + with assert_raises(Exception) as excinfo: + p(np.sin, self.input) + + # on py27 an AssertionError is raised, on >py27 it's a ValueError + err_type = excinfo.type + assert_((err_type is ValueError) or (err_type is AssertionError)) + + # can also set a PoolWrapper up with a map-like callable instance + try: + p = Pool(2) + q = MapWrapper(p.map) + + assert_(q._own_pool is False) + q.close() + + # closing the PoolWrapper shouldn't close the internal pool + # because it didn't create it + out = p.map(np.sin, self.input) + assert_equal(list(out), self.output) + finally: + p.close() diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test__util.pyc b/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test__util.pyc new file mode 100644 index 0000000..760d0e6 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test__util.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test__version.py b/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test__version.py new file mode 100644 index 0000000..9b29b6b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test__version.py @@ -0,0 +1,65 @@ +from __future__ import division, absolute_import, print_function + +from numpy.testing import assert_ +from pytest import raises as assert_raises +from scipy._lib._version import NumpyVersion + + +def test_main_versions(): + assert_(NumpyVersion('1.8.0') == '1.8.0') + for ver in ['1.9.0', '2.0.0', '1.8.1']: + assert_(NumpyVersion('1.8.0') < ver) + + for ver in ['1.7.0', '1.7.1', '0.9.9']: + assert_(NumpyVersion('1.8.0') > ver) + + +def test_version_1_point_10(): + # regression test for gh-2998. + assert_(NumpyVersion('1.9.0') < '1.10.0') + assert_(NumpyVersion('1.11.0') < '1.11.1') + assert_(NumpyVersion('1.11.0') == '1.11.0') + assert_(NumpyVersion('1.99.11') < '1.99.12') + + +def test_alpha_beta_rc(): + assert_(NumpyVersion('1.8.0rc1') == '1.8.0rc1') + for ver in ['1.8.0', '1.8.0rc2']: + assert_(NumpyVersion('1.8.0rc1') < ver) + + for ver in ['1.8.0a2', '1.8.0b3', '1.7.2rc4']: + assert_(NumpyVersion('1.8.0rc1') > ver) + + assert_(NumpyVersion('1.8.0b1') > '1.8.0a2') + + +def test_dev_version(): + assert_(NumpyVersion('1.9.0.dev-Unknown') < '1.9.0') + for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev-ffffffff']: + assert_(NumpyVersion('1.9.0.dev-f16acvda') < ver) + + assert_(NumpyVersion('1.9.0.dev-f16acvda') == '1.9.0.dev-11111111') + + +def test_dev_a_b_rc_mixed(): + assert_(NumpyVersion('1.9.0a2.dev-f16acvda') == '1.9.0a2.dev-11111111') + assert_(NumpyVersion('1.9.0a2.dev-6acvda54') < '1.9.0a2') + + +def test_dev0_version(): + assert_(NumpyVersion('1.9.0.dev0+Unknown') < '1.9.0') + for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev0+ffffffff']: + assert_(NumpyVersion('1.9.0.dev0+f16acvda') < ver) + + assert_(NumpyVersion('1.9.0.dev0+f16acvda') == '1.9.0.dev0+11111111') + + +def test_dev0_a_b_rc_mixed(): + assert_(NumpyVersion('1.9.0a2.dev0+f16acvda') == '1.9.0a2.dev0+11111111') + assert_(NumpyVersion('1.9.0a2.dev0+6acvda54') < '1.9.0a2') + + +def test_raises(): + for ver in ['1.9', '1,9.0', '1.7.x']: + assert_raises(ValueError, NumpyVersion, ver) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test__version.pyc b/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test__version.pyc new file mode 100644 index 0000000..ce7ee12 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test__version.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test_ccallback.py b/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test_ccallback.py new file mode 100644 index 0000000..77f1f06 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test_ccallback.py @@ -0,0 +1,199 @@ +from __future__ import division, print_function, absolute_import + +from numpy.testing import assert_equal, assert_ +from pytest import raises as assert_raises + +import time +import pytest +import ctypes +import threading +from scipy._lib import _ccallback_c as _test_ccallback_cython +from scipy._lib import _test_ccallback +from scipy._lib._ccallback import LowLevelCallable + +try: + import cffi + HAVE_CFFI = True +except ImportError: + HAVE_CFFI = False + + +ERROR_VALUE = 2.0 + + +def callback_python(a, user_data=None): + if a == ERROR_VALUE: + raise ValueError("bad value") + + if user_data is None: + return a + 1 + else: + return a + user_data + +def _get_cffi_func(base, signature): + if not HAVE_CFFI: + pytest.skip("cffi not installed") + + # Get function address + voidp = ctypes.cast(base, ctypes.c_void_p) + address = voidp.value + + # Create corresponding cffi handle + ffi = cffi.FFI() + func = ffi.cast(signature, address) + return func + + +def _get_ctypes_data(): + value = ctypes.c_double(2.0) + return ctypes.cast(ctypes.pointer(value), ctypes.c_voidp) + + +def _get_cffi_data(): + if not HAVE_CFFI: + pytest.skip("cffi not installed") + ffi = cffi.FFI() + return ffi.new('double *', 2.0) + + +CALLERS = { + 'simple': _test_ccallback.test_call_simple, + 'nodata': _test_ccallback.test_call_nodata, + 'nonlocal': _test_ccallback.test_call_nonlocal, + 'cython': _test_ccallback_cython.test_call_cython, +} + +# These functions have signatures known to the callers +FUNCS = { + 'python': lambda: callback_python, + 'capsule': lambda: _test_ccallback.test_get_plus1_capsule(), + 'cython': lambda: LowLevelCallable.from_cython(_test_ccallback_cython, "plus1_cython"), + 'ctypes': lambda: _test_ccallback_cython.plus1_ctypes, + 'cffi': lambda: _get_cffi_func(_test_ccallback_cython.plus1_ctypes, + 'double (*)(double, int *, void *)'), + 'capsule_b': lambda: _test_ccallback.test_get_plus1b_capsule(), + 'cython_b': lambda: LowLevelCallable.from_cython(_test_ccallback_cython, "plus1b_cython"), + 'ctypes_b': lambda: _test_ccallback_cython.plus1b_ctypes, + 'cffi_b': lambda: _get_cffi_func(_test_ccallback_cython.plus1b_ctypes, + 'double (*)(double, double, int *, void *)'), +} + +# These functions have signatures the callers don't know +BAD_FUNCS = { + 'capsule_bc': lambda: _test_ccallback.test_get_plus1bc_capsule(), + 'cython_bc': lambda: LowLevelCallable.from_cython(_test_ccallback_cython, "plus1bc_cython"), + 'ctypes_bc': lambda: _test_ccallback_cython.plus1bc_ctypes, + 'cffi_bc': lambda: _get_cffi_func(_test_ccallback_cython.plus1bc_ctypes, + 'double (*)(double, double, double, int *, void *)'), +} + +USER_DATAS = { + 'ctypes': _get_ctypes_data, + 'cffi': _get_cffi_data, + 'capsule': _test_ccallback.test_get_data_capsule, +} + + +def test_callbacks(): + def check(caller, func, user_data): + caller = CALLERS[caller] + func = FUNCS[func]() + user_data = USER_DATAS[user_data]() + + if func is callback_python: + func2 = lambda x: func(x, 2.0) + else: + func2 = LowLevelCallable(func, user_data) + func = LowLevelCallable(func) + + # Test basic call + assert_equal(caller(func, 1.0), 2.0) + + # Test 'bad' value resulting to an error + assert_raises(ValueError, caller, func, ERROR_VALUE) + + # Test passing in user_data + assert_equal(caller(func2, 1.0), 3.0) + + for caller in sorted(CALLERS.keys()): + for func in sorted(FUNCS.keys()): + for user_data in sorted(USER_DATAS.keys()): + check(caller, func, user_data) + + +def test_bad_callbacks(): + def check(caller, func, user_data): + caller = CALLERS[caller] + user_data = USER_DATAS[user_data]() + func = BAD_FUNCS[func]() + + if func is callback_python: + func2 = lambda x: func(x, 2.0) + else: + func2 = LowLevelCallable(func, user_data) + func = LowLevelCallable(func) + + # Test that basic call fails + assert_raises(ValueError, caller, LowLevelCallable(func), 1.0) + + # Test that passing in user_data also fails + assert_raises(ValueError, caller, func2, 1.0) + + # Test error message + llfunc = LowLevelCallable(func) + try: + caller(llfunc, 1.0) + except ValueError as err: + msg = str(err) + assert_(llfunc.signature in msg, msg) + assert_('double (double, double, int *, void *)' in msg, msg) + + for caller in sorted(CALLERS.keys()): + for func in sorted(BAD_FUNCS.keys()): + for user_data in sorted(USER_DATAS.keys()): + check(caller, func, user_data) + + +def test_signature_override(): + caller = _test_ccallback.test_call_simple + func = _test_ccallback.test_get_plus1_capsule() + + llcallable = LowLevelCallable(func, signature="bad signature") + assert_equal(llcallable.signature, "bad signature") + assert_raises(ValueError, caller, llcallable, 3) + + llcallable = LowLevelCallable(func, signature="double (double, int *, void *)") + assert_equal(llcallable.signature, "double (double, int *, void *)") + assert_equal(caller(llcallable, 3), 4) + + +def test_threadsafety(): + def callback(a, caller): + if a <= 0: + return 1 + else: + res = caller(lambda x: callback(x, caller), a - 1) + return 2*res + + def check(caller): + caller = CALLERS[caller] + + results = [] + + count = 10 + + def run(): + time.sleep(0.01) + r = caller(lambda x: callback(x, caller), count) + results.append(r) + + threads = [threading.Thread(target=run) for j in range(20)] + for thread in threads: + thread.start() + for thread in threads: + thread.join() + + assert_equal(results, [2.0**count]*len(threads)) + + for caller in CALLERS.keys(): + check(caller) diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test_ccallback.pyc b/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test_ccallback.pyc new file mode 100644 index 0000000..8fbf501 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test_ccallback.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test_import_cycles.py b/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test_import_cycles.py new file mode 100644 index 0000000..3366912 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test_import_cycles.py @@ -0,0 +1,52 @@ +from __future__ import division, print_function, absolute_import + +import sys +import subprocess + + +MODULES = [ + "scipy.cluster", + "scipy.cluster.vq", + "scipy.cluster.hierarchy", + "scipy.constants", + "scipy.fftpack", + "scipy.integrate", + "scipy.interpolate", + "scipy.io", + "scipy.io.arff", + "scipy.io.harwell_boeing", + "scipy.io.idl", + "scipy.io.matlab", + "scipy.io.netcdf", + "scipy.io.wavfile", + "scipy.linalg", + "scipy.linalg.blas", + "scipy.linalg.cython_blas", + "scipy.linalg.lapack", + "scipy.linalg.cython_lapack", + "scipy.linalg.interpolative", + "scipy.misc", + "scipy.ndimage", + "scipy.odr", + "scipy.optimize", + "scipy.signal", + "scipy.signal.windows", + "scipy.sparse", + "scipy.sparse.linalg", + "scipy.sparse.csgraph", + "scipy.spatial", + "scipy.spatial.distance", + "scipy.special", + "scipy.stats", + "scipy.stats.distributions", + "scipy.stats.mstats", +] + + +def test_modules_importable(): + # Check that all modules are importable in a new Python + # process. This is not necessarily true (esp on Python 2) if there + # are import cycles present. + for module in MODULES: + cmd = 'import {}'.format(module) + subprocess.check_call([sys.executable, '-c', cmd]) diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test_import_cycles.pyc b/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test_import_cycles.pyc new file mode 100644 index 0000000..8802297 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test_import_cycles.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test_tmpdirs.py b/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test_tmpdirs.py new file mode 100644 index 0000000..583987b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test_tmpdirs.py @@ -0,0 +1,45 @@ +""" Test tmpdirs module """ +from __future__ import division, print_function, absolute_import + +from os import getcwd +from os.path import realpath, abspath, dirname, isfile, join as pjoin, exists + +from scipy._lib._tmpdirs import tempdir, in_tempdir, in_dir + +from numpy.testing import assert_, assert_equal + +MY_PATH = abspath(__file__) +MY_DIR = dirname(MY_PATH) + + +def test_tempdir(): + with tempdir() as tmpdir: + fname = pjoin(tmpdir, 'example_file.txt') + with open(fname, 'wt') as fobj: + fobj.write('a string\\n') + assert_(not exists(tmpdir)) + + +def test_in_tempdir(): + my_cwd = getcwd() + with in_tempdir() as tmpdir: + with open('test.txt', 'wt') as f: + f.write('some text') + assert_(isfile('test.txt')) + assert_(isfile(pjoin(tmpdir, 'test.txt'))) + assert_(not exists(tmpdir)) + assert_equal(getcwd(), my_cwd) + + +def test_given_directory(): + # Test InGivenDirectory + cwd = getcwd() + with in_dir() as tmpdir: + assert_equal(tmpdir, abspath(cwd)) + assert_equal(tmpdir, abspath(getcwd())) + with in_dir(MY_DIR) as tmpdir: + assert_equal(tmpdir, MY_DIR) + assert_equal(realpath(MY_DIR), realpath(abspath(getcwd()))) + # We were deleting the given directory! Check not so now. + assert_(isfile(MY_PATH)) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test_tmpdirs.pyc b/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test_tmpdirs.pyc new file mode 100644 index 0000000..722146d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test_tmpdirs.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test_warnings.py b/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test_warnings.py new file mode 100644 index 0000000..b1f2632 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test_warnings.py @@ -0,0 +1,126 @@ +""" +Tests which scan for certain occurrences in the code, they may not find +all of these occurrences but should catch almost all. This file was adapted +from numpy. +""" + + +from __future__ import division, absolute_import, print_function + +import os +import sys +import scipy + +import pytest + + +if sys.version_info >= (3, 4): + from pathlib import Path + import ast + import tokenize + + class ParseCall(ast.NodeVisitor): + def __init__(self): + self.ls = [] + + def visit_Attribute(self, node): + ast.NodeVisitor.generic_visit(self, node) + self.ls.append(node.attr) + + def visit_Name(self, node): + self.ls.append(node.id) + + class FindFuncs(ast.NodeVisitor): + def __init__(self, filename): + super().__init__() + self.__filename = filename + self.bad_filters = [] + self.bad_stacklevels = [] + + def visit_Call(self, node): + p = ParseCall() + p.visit(node.func) + ast.NodeVisitor.generic_visit(self, node) + + if p.ls[-1] == 'simplefilter' or p.ls[-1] == 'filterwarnings': + if node.args[0].s == "ignore": + self.bad_filters.append( + "{}:{}".format(self.__filename, node.lineno)) + + if p.ls[-1] == 'warn' and ( + len(p.ls) == 1 or p.ls[-2] == 'warnings'): + + if self.__filename == "_lib/tests/test_warnings.py": + # This file + return + + # See if stacklevel exists: + if len(node.args) == 3: + return + args = {kw.arg for kw in node.keywords} + if "stacklevel" not in args: + self.bad_stacklevels.append( + "{}:{}".format(self.__filename, node.lineno)) + + +@pytest.fixture(scope="session") +def warning_calls(): + # combined "ignore" and stacklevel error + base = Path(scipy.__file__).parent + + bad_filters = [] + bad_stacklevels = [] + + for path in base.rglob("*.py"): + # use tokenize to auto-detect encoding on systems where no + # default encoding is defined (e.g. LANG='C') + with tokenize.open(str(path)) as file: + tree = ast.parse(file.read(), filename=str(path)) + finder = FindFuncs(path.relative_to(base)) + finder.visit(tree) + bad_filters.extend(finder.bad_filters) + bad_stacklevels.extend(finder.bad_stacklevels) + + return bad_filters, bad_stacklevels + + +@pytest.mark.slow +@pytest.mark.skipif(sys.version_info < (3, 4), reason="needs Python >= 3.4") +def test_warning_calls_filters(warning_calls): + bad_filters, bad_stacklevels = warning_calls + + # There is still one simplefilter occurrence in optimize.py that could be removed. + bad_filters = [item for item in bad_filters + if 'optimize.py' not in item] + # The filterwarnings call in sparse/__init__.py is needed. + bad_filters = [item for item in bad_filters + if os.path.join('sparse', '__init__.py') not in item] + + if bad_filters: + raise AssertionError( + "warning ignore filter should not be used, instead, use\n" + "scipy._lib._numpy_compat.suppress_warnings (in tests only);\n" + "found in:\n {}".format( + "\n ".join(bad_filters))) + + +@pytest.mark.slow +@pytest.mark.skipif(sys.version_info < (3, 4), reason="needs Python >= 3.4") +@pytest.mark.xfail(reason="stacklevels currently missing") +def test_warning_calls_stacklevels(warning_calls): + bad_filters, bad_stacklevels = warning_calls + + msg = "" + + if bad_filters: + msg += ("warning ignore filter should not be used, instead, use\n" + "scipy._lib._numpy_compat.suppress_warnings (in tests only);\n" + "found in:\n {}".format("\n ".join(bad_filters))) + msg += "\n\n" + + if bad_stacklevels: + msg += "warnings should have an appropriate stacklevel:\n {}".format( + "\n ".join(bad_stacklevels)) + + if msg: + raise AssertionError(msg) diff --git a/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test_warnings.pyc b/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test_warnings.pyc new file mode 100644 index 0000000..05064fe Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/_lib/tests/test_warnings.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/cluster/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/cluster/__init__.py new file mode 100644 index 0000000..3e2fcfe --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/cluster/__init__.py @@ -0,0 +1,31 @@ +""" +========================================= +Clustering package (:mod:`scipy.cluster`) +========================================= + +.. currentmodule:: scipy.cluster + +:mod:`scipy.cluster.vq` + +Clustering algorithms are useful in information theory, target detection, +communications, compression, and other areas. The `vq` module only +supports vector quantization and the k-means algorithms. + +:mod:`scipy.cluster.hierarchy` + +The `hierarchy` module provides functions for hierarchical and +agglomerative clustering. Its features include generating hierarchical +clusters from distance matrices, +calculating statistics on clusters, cutting linkages +to generate flat clusters, and visualizing clusters with dendrograms. + +""" +from __future__ import division, print_function, absolute_import + +__all__ = ['vq', 'hierarchy'] + +from . import vq, hierarchy + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/project/venv/lib/python2.7/site-packages/scipy/cluster/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/cluster/__init__.pyc new file mode 100644 index 0000000..1eaa175 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/cluster/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/cluster/_hierarchy.so b/project/venv/lib/python2.7/site-packages/scipy/cluster/_hierarchy.so new file mode 100755 index 0000000..e411fcc Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/cluster/_hierarchy.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/cluster/_optimal_leaf_ordering.so b/project/venv/lib/python2.7/site-packages/scipy/cluster/_optimal_leaf_ordering.so new file mode 100755 index 0000000..1d56ca1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/cluster/_optimal_leaf_ordering.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/cluster/_vq.so b/project/venv/lib/python2.7/site-packages/scipy/cluster/_vq.so new file mode 100755 index 0000000..7bea5df Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/cluster/_vq.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/cluster/hierarchy.py b/project/venv/lib/python2.7/site-packages/scipy/cluster/hierarchy.py new file mode 100644 index 0000000..ed625f7 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/cluster/hierarchy.py @@ -0,0 +1,4186 @@ +""" +======================================================== +Hierarchical clustering (:mod:`scipy.cluster.hierarchy`) +======================================================== + +.. currentmodule:: scipy.cluster.hierarchy + +These functions cut hierarchical clusterings into flat clusterings +or find the roots of the forest formed by a cut by providing the flat +cluster ids of each observation. + +.. autosummary:: + :toctree: generated/ + + fcluster + fclusterdata + leaders + +These are routines for agglomerative clustering. + +.. autosummary:: + :toctree: generated/ + + linkage + single + complete + average + weighted + centroid + median + ward + +These routines compute statistics on hierarchies. + +.. autosummary:: + :toctree: generated/ + + cophenet + from_mlab_linkage + inconsistent + maxinconsts + maxdists + maxRstat + to_mlab_linkage + +Routines for visualizing flat clusters. + +.. autosummary:: + :toctree: generated/ + + dendrogram + +These are data structures and routines for representing hierarchies as +tree objects. + +.. autosummary:: + :toctree: generated/ + + ClusterNode + leaves_list + to_tree + cut_tree + optimal_leaf_ordering + +These are predicates for checking the validity of linkage and +inconsistency matrices as well as for checking isomorphism of two +flat cluster assignments. + +.. autosummary:: + :toctree: generated/ + + is_valid_im + is_valid_linkage + is_isomorphic + is_monotonic + correspond + num_obs_linkage + +Utility routines for plotting: + +.. autosummary:: + :toctree: generated/ + + set_link_color_palette + +References +---------- + +.. [1] "Statistics toolbox." API Reference Documentation. The MathWorks. + https://www.mathworks.com/access/helpdesk/help/toolbox/stats/. + Accessed October 1, 2007. + +.. [2] "Hierarchical clustering." API Reference Documentation. + The Wolfram Research, Inc. + https://reference.wolfram.com/language/HierarchicalClustering/tutorial/HierarchicalClustering.html. + Accessed October 1, 2007. + +.. [3] Gower, JC and Ross, GJS. "Minimum Spanning Trees and Single Linkage + Cluster Analysis." Applied Statistics. 18(1): pp. 54--64. 1969. + +.. [4] Ward Jr, JH. "Hierarchical grouping to optimize an objective + function." Journal of the American Statistical Association. 58(301): + pp. 236--44. 1963. + +.. [5] Johnson, SC. "Hierarchical clustering schemes." Psychometrika. + 32(2): pp. 241--54. 1966. + +.. [6] Sneath, PH and Sokal, RR. "Numerical taxonomy." Nature. 193: pp. + 855--60. 1962. + +.. [7] Batagelj, V. "Comparing resemblance measures." Journal of + Classification. 12: pp. 73--90. 1995. + +.. [8] Sokal, RR and Michener, CD. "A statistical method for evaluating + systematic relationships." Scientific Bulletins. 38(22): + pp. 1409--38. 1958. + +.. [9] Edelbrock, C. "Mixture model tests of hierarchical clustering + algorithms: the problem of classifying everybody." Multivariate + Behavioral Research. 14: pp. 367--84. 1979. + +.. [10] Jain, A., and Dubes, R., "Algorithms for Clustering Data." + Prentice-Hall. Englewood Cliffs, NJ. 1988. + +.. [11] Fisher, RA "The use of multiple measurements in taxonomic + problems." Annals of Eugenics, 7(2): 179-188. 1936 + + +* MATLAB and MathWorks are registered trademarks of The MathWorks, Inc. + +* Mathematica is a registered trademark of The Wolfram Research, Inc. + +""" +from __future__ import division, print_function, absolute_import + +# Copyright (C) Damian Eads, 2007-2008. New BSD License. + +# hierarchy.py (derived from cluster.py, http://scipy-cluster.googlecode.com) +# +# Author: Damian Eads +# Date: September 22, 2007 +# +# Copyright (c) 2007, 2008, Damian Eads +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# - Redistributions of source code must retain the above +# copyright notice, this list of conditions and the +# following disclaimer. +# - Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# - Neither the name of the author nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import warnings +import bisect +from collections import deque + +import numpy as np +from . import _hierarchy, _optimal_leaf_ordering +import scipy.spatial.distance as distance + +from scipy._lib.six import string_types +from scipy._lib.six import xrange + +_LINKAGE_METHODS = {'single': 0, 'complete': 1, 'average': 2, 'centroid': 3, + 'median': 4, 'ward': 5, 'weighted': 6} +_EUCLIDEAN_METHODS = ('centroid', 'median', 'ward') + +__all__ = ['ClusterNode', 'average', 'centroid', 'complete', 'cophenet', + 'correspond', 'cut_tree', 'dendrogram', 'fcluster', 'fclusterdata', + 'from_mlab_linkage', 'inconsistent', 'is_isomorphic', + 'is_monotonic', 'is_valid_im', 'is_valid_linkage', 'leaders', + 'leaves_list', 'linkage', 'maxRstat', 'maxdists', 'maxinconsts', + 'median', 'num_obs_linkage', 'optimal_leaf_ordering', + 'set_link_color_palette', 'single', 'to_mlab_linkage', 'to_tree', + 'ward', 'weighted', 'distance'] + + +class ClusterWarning(UserWarning): + pass + + +def _warning(s): + warnings.warn('scipy.cluster: %s' % s, ClusterWarning, stacklevel=3) + + +def _copy_array_if_base_present(a): + """ + Copy the array if its base points to a parent array. + """ + if a.base is not None: + return a.copy() + elif np.issubsctype(a, np.float32): + return np.array(a, dtype=np.double) + else: + return a + + +def _copy_arrays_if_base_present(T): + """ + Accept a tuple of arrays T. Copies the array T[i] if its base array + points to an actual array. Otherwise, the reference is just copied. + This is useful if the arrays are being passed to a C function that + does not do proper striding. + """ + l = [_copy_array_if_base_present(a) for a in T] + return l + + +def _randdm(pnts): + """ + Generate a random distance matrix stored in condensed form. + + Parameters + ---------- + pnts : int + The number of points in the distance matrix. Has to be at least 2. + + Returns + ------- + D : ndarray + A ``pnts * (pnts - 1) / 2`` sized vector is returned. + """ + if pnts >= 2: + D = np.random.rand(pnts * (pnts - 1) / 2) + else: + raise ValueError("The number of points in the distance matrix " + "must be at least 2.") + return D + + +def single(y): + """ + Perform single/min/nearest linkage on the condensed distance matrix ``y``. + + Parameters + ---------- + y : ndarray + The upper triangular of the distance matrix. The result of + ``pdist`` is returned in this form. + + Returns + ------- + Z : ndarray + The linkage matrix. + + See Also + -------- + linkage: for advanced creation of hierarchical clusterings. + scipy.spatial.distance.pdist : pairwise distance metrics + + Examples + -------- + >>> from scipy.cluster.hierarchy import single, fcluster + >>> from scipy.spatial.distance import pdist + + First we need a toy dataset to play with:: + + x x x x + x x + + x x + x x x x + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + Then we get a condensed distance matrix from this dataset: + + >>> y = pdist(X) + + Finally, we can perform the clustering: + + >>> Z = single(y) + >>> Z + array([[ 0., 1., 1., 2.], + [ 2., 12., 1., 3.], + [ 3., 4., 1., 2.], + [ 5., 14., 1., 3.], + [ 6., 7., 1., 2.], + [ 8., 16., 1., 3.], + [ 9., 10., 1., 2.], + [11., 18., 1., 3.], + [13., 15., 2., 6.], + [17., 20., 2., 9.], + [19., 21., 2., 12.]]) + + The linkage matrix ``Z`` represents a dendrogram - see + `scipy.cluster.hierarchy.linkage` for a detailed explanation of its + contents. + + We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster + each initial point would belong given a distance threshold: + + >>> fcluster(Z, 0.9, criterion='distance') + array([ 7, 8, 9, 10, 11, 12, 4, 5, 6, 1, 2, 3], dtype=int32) + >>> fcluster(Z, 1, criterion='distance') + array([3, 3, 3, 4, 4, 4, 2, 2, 2, 1, 1, 1], dtype=int32) + >>> fcluster(Z, 2, criterion='distance') + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) + + Also `scipy.cluster.hierarchy.dendrogram` can be used to generate a + plot of the dendrogram. + """ + return linkage(y, method='single', metric='euclidean') + + +def complete(y): + """ + Perform complete/max/farthest point linkage on a condensed distance matrix. + + Parameters + ---------- + y : ndarray + The upper triangular of the distance matrix. The result of + ``pdist`` is returned in this form. + + Returns + ------- + Z : ndarray + A linkage matrix containing the hierarchical clustering. See + the `linkage` function documentation for more information + on its structure. + + See Also + -------- + linkage: for advanced creation of hierarchical clusterings. + scipy.spatial.distance.pdist : pairwise distance metrics + + Examples + -------- + >>> from scipy.cluster.hierarchy import complete, fcluster + >>> from scipy.spatial.distance import pdist + + First we need a toy dataset to play with:: + + x x x x + x x + + x x + x x x x + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + Then we get a condensed distance matrix from this dataset: + + >>> y = pdist(X) + + Finally, we can perform the clustering: + + >>> Z = complete(y) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.41421356, 3. ], + [ 5. , 13. , 1.41421356, 3. ], + [ 8. , 14. , 1.41421356, 3. ], + [11. , 15. , 1.41421356, 3. ], + [16. , 17. , 4.12310563, 6. ], + [18. , 19. , 4.12310563, 6. ], + [20. , 21. , 5.65685425, 12. ]]) + + The linkage matrix ``Z`` represents a dendrogram - see + `scipy.cluster.hierarchy.linkage` for a detailed explanation of its + contents. + + We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster + each initial point would belong given a distance threshold: + + >>> fcluster(Z, 0.9, criterion='distance') + array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=int32) + >>> fcluster(Z, 1.5, criterion='distance') + array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32) + >>> fcluster(Z, 4.5, criterion='distance') + array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2], dtype=int32) + >>> fcluster(Z, 6, criterion='distance') + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) + + Also `scipy.cluster.hierarchy.dendrogram` can be used to generate a + plot of the dendrogram. + """ + return linkage(y, method='complete', metric='euclidean') + + +def average(y): + """ + Perform average/UPGMA linkage on a condensed distance matrix. + + Parameters + ---------- + y : ndarray + The upper triangular of the distance matrix. The result of + ``pdist`` is returned in this form. + + Returns + ------- + Z : ndarray + A linkage matrix containing the hierarchical clustering. See + `linkage` for more information on its structure. + + See Also + -------- + linkage: for advanced creation of hierarchical clusterings. + scipy.spatial.distance.pdist : pairwise distance metrics + + Examples + -------- + >>> from scipy.cluster.hierarchy import average, fcluster + >>> from scipy.spatial.distance import pdist + + First we need a toy dataset to play with:: + + x x x x + x x + + x x + x x x x + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + Then we get a condensed distance matrix from this dataset: + + >>> y = pdist(X) + + Finally, we can perform the clustering: + + >>> Z = average(y) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.20710678, 3. ], + [ 5. , 13. , 1.20710678, 3. ], + [ 8. , 14. , 1.20710678, 3. ], + [11. , 15. , 1.20710678, 3. ], + [16. , 17. , 3.39675184, 6. ], + [18. , 19. , 3.39675184, 6. ], + [20. , 21. , 4.09206523, 12. ]]) + + The linkage matrix ``Z`` represents a dendrogram - see + `scipy.cluster.hierarchy.linkage` for a detailed explanation of its + contents. + + We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster + each initial point would belong given a distance threshold: + + >>> fcluster(Z, 0.9, criterion='distance') + array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=int32) + >>> fcluster(Z, 1.5, criterion='distance') + array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32) + >>> fcluster(Z, 4, criterion='distance') + array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2], dtype=int32) + >>> fcluster(Z, 6, criterion='distance') + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) + + Also `scipy.cluster.hierarchy.dendrogram` can be used to generate a + plot of the dendrogram. + + """ + return linkage(y, method='average', metric='euclidean') + + +def weighted(y): + """ + Perform weighted/WPGMA linkage on the condensed distance matrix. + + See `linkage` for more information on the return + structure and algorithm. + + Parameters + ---------- + y : ndarray + The upper triangular of the distance matrix. The result of + ``pdist`` is returned in this form. + + Returns + ------- + Z : ndarray + A linkage matrix containing the hierarchical clustering. See + `linkage` for more information on its structure. + + See Also + -------- + linkage : for advanced creation of hierarchical clusterings. + scipy.spatial.distance.pdist : pairwise distance metrics + + Examples + -------- + >>> from scipy.cluster.hierarchy import weighted, fcluster + >>> from scipy.spatial.distance import pdist + + First we need a toy dataset to play with:: + + x x x x + x x + + x x + x x x x + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + Then we get a condensed distance matrix from this dataset: + + >>> y = pdist(X) + + Finally, we can perform the clustering: + + >>> Z = weighted(y) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 9. , 11. , 1. , 2. ], + [ 2. , 12. , 1.20710678, 3. ], + [ 8. , 13. , 1.20710678, 3. ], + [ 5. , 14. , 1.20710678, 3. ], + [10. , 15. , 1.20710678, 3. ], + [18. , 19. , 3.05595762, 6. ], + [16. , 17. , 3.32379407, 6. ], + [20. , 21. , 4.06357713, 12. ]]) + + The linkage matrix ``Z`` represents a dendrogram - see + `scipy.cluster.hierarchy.linkage` for a detailed explanation of its + contents. + + We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster + each initial point would belong given a distance threshold: + + >>> fcluster(Z, 0.9, criterion='distance') + array([ 7, 8, 9, 1, 2, 3, 10, 11, 12, 4, 6, 5], dtype=int32) + >>> fcluster(Z, 1.5, criterion='distance') + array([3, 3, 3, 1, 1, 1, 4, 4, 4, 2, 2, 2], dtype=int32) + >>> fcluster(Z, 4, criterion='distance') + array([2, 2, 2, 1, 1, 1, 2, 2, 2, 1, 1, 1], dtype=int32) + >>> fcluster(Z, 6, criterion='distance') + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) + + Also `scipy.cluster.hierarchy.dendrogram` can be used to generate a + plot of the dendrogram. + + """ + return linkage(y, method='weighted', metric='euclidean') + + +def centroid(y): + """ + Perform centroid/UPGMC linkage. + + See `linkage` for more information on the input matrix, + return structure, and algorithm. + + The following are common calling conventions: + + 1. ``Z = centroid(y)`` + + Performs centroid/UPGMC linkage on the condensed distance + matrix ``y``. + + 2. ``Z = centroid(X)`` + + Performs centroid/UPGMC linkage on the observation matrix ``X`` + using Euclidean distance as the distance metric. + + Parameters + ---------- + y : ndarray + A condensed distance matrix. A condensed + distance matrix is a flat array containing the upper + triangular of the distance matrix. This is the form that + ``pdist`` returns. Alternatively, a collection of + m observation vectors in n dimensions may be passed as + a m by n array. + + Returns + ------- + Z : ndarray + A linkage matrix containing the hierarchical clustering. See + the `linkage` function documentation for more information + on its structure. + + See Also + -------- + linkage: for advanced creation of hierarchical clusterings. + scipy.spatial.distance.pdist : pairwise distance metrics + + Examples + -------- + >>> from scipy.cluster.hierarchy import centroid, fcluster + >>> from scipy.spatial.distance import pdist + + First we need a toy dataset to play with:: + + x x x x + x x + + x x + x x x x + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + Then we get a condensed distance matrix from this dataset: + + >>> y = pdist(X) + + Finally, we can perform the clustering: + + >>> Z = centroid(y) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 2. , 12. , 1.11803399, 3. ], + [ 5. , 13. , 1.11803399, 3. ], + [ 8. , 15. , 1.11803399, 3. ], + [11. , 14. , 1.11803399, 3. ], + [18. , 19. , 3.33333333, 6. ], + [16. , 17. , 3.33333333, 6. ], + [20. , 21. , 3.33333333, 12. ]]) + + The linkage matrix ``Z`` represents a dendrogram - see + `scipy.cluster.hierarchy.linkage` for a detailed explanation of its + contents. + + We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster + each initial point would belong given a distance threshold: + + >>> fcluster(Z, 0.9, criterion='distance') + array([ 7, 8, 9, 10, 11, 12, 1, 2, 3, 4, 5, 6], dtype=int32) + >>> fcluster(Z, 1.1, criterion='distance') + array([5, 5, 6, 7, 7, 8, 1, 1, 2, 3, 3, 4], dtype=int32) + >>> fcluster(Z, 2, criterion='distance') + array([3, 3, 3, 4, 4, 4, 1, 1, 1, 2, 2, 2], dtype=int32) + >>> fcluster(Z, 4, criterion='distance') + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) + + Also `scipy.cluster.hierarchy.dendrogram` can be used to generate a + plot of the dendrogram. + + """ + return linkage(y, method='centroid', metric='euclidean') + + +def median(y): + """ + Perform median/WPGMC linkage. + + See `linkage` for more information on the return structure + and algorithm. + + The following are common calling conventions: + + 1. ``Z = median(y)`` + + Performs median/WPGMC linkage on the condensed distance matrix + ``y``. See ``linkage`` for more information on the return + structure and algorithm. + + 2. ``Z = median(X)`` + + Performs median/WPGMC linkage on the observation matrix ``X`` + using Euclidean distance as the distance metric. See `linkage` + for more information on the return structure and algorithm. + + Parameters + ---------- + y : ndarray + A condensed distance matrix. A condensed + distance matrix is a flat array containing the upper + triangular of the distance matrix. This is the form that + ``pdist`` returns. Alternatively, a collection of + m observation vectors in n dimensions may be passed as + a m by n array. + + Returns + ------- + Z : ndarray + The hierarchical clustering encoded as a linkage matrix. + + See Also + -------- + linkage: for advanced creation of hierarchical clusterings. + scipy.spatial.distance.pdist : pairwise distance metrics + + Examples + -------- + >>> from scipy.cluster.hierarchy import median, fcluster + >>> from scipy.spatial.distance import pdist + + First we need a toy dataset to play with:: + + x x x x + x x + + x x + x x x x + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + Then we get a condensed distance matrix from this dataset: + + >>> y = pdist(X) + + Finally, we can perform the clustering: + + >>> Z = median(y) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 2. , 12. , 1.11803399, 3. ], + [ 5. , 13. , 1.11803399, 3. ], + [ 8. , 15. , 1.11803399, 3. ], + [11. , 14. , 1.11803399, 3. ], + [18. , 19. , 3. , 6. ], + [16. , 17. , 3.5 , 6. ], + [20. , 21. , 3.25 , 12. ]]) + + The linkage matrix ``Z`` represents a dendrogram - see + `scipy.cluster.hierarchy.linkage` for a detailed explanation of its + contents. + + We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster + each initial point would belong given a distance threshold: + + >>> fcluster(Z, 0.9, criterion='distance') + array([ 7, 8, 9, 10, 11, 12, 1, 2, 3, 4, 5, 6], dtype=int32) + >>> fcluster(Z, 1.1, criterion='distance') + array([5, 5, 6, 7, 7, 8, 1, 1, 2, 3, 3, 4], dtype=int32) + >>> fcluster(Z, 2, criterion='distance') + array([3, 3, 3, 4, 4, 4, 1, 1, 1, 2, 2, 2], dtype=int32) + >>> fcluster(Z, 4, criterion='distance') + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) + + Also `scipy.cluster.hierarchy.dendrogram` can be used to generate a + plot of the dendrogram. + + """ + return linkage(y, method='median', metric='euclidean') + + +def ward(y): + """ + Perform Ward's linkage on a condensed distance matrix. + + See `linkage` for more information on the return structure + and algorithm. + + The following are common calling conventions: + + 1. ``Z = ward(y)`` + Performs Ward's linkage on the condensed distance matrix ``y``. + + 2. ``Z = ward(X)`` + Performs Ward's linkage on the observation matrix ``X`` using + Euclidean distance as the distance metric. + + Parameters + ---------- + y : ndarray + A condensed distance matrix. A condensed + distance matrix is a flat array containing the upper + triangular of the distance matrix. This is the form that + ``pdist`` returns. Alternatively, a collection of + m observation vectors in n dimensions may be passed as + a m by n array. + + Returns + ------- + Z : ndarray + The hierarchical clustering encoded as a linkage matrix. See + `linkage` for more information on the return structure and + algorithm. + + See Also + -------- + linkage: for advanced creation of hierarchical clusterings. + scipy.spatial.distance.pdist : pairwise distance metrics + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, fcluster + >>> from scipy.spatial.distance import pdist + + First we need a toy dataset to play with:: + + x x x x + x x + + x x + x x x x + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + Then we get a condensed distance matrix from this dataset: + + >>> y = pdist(X) + + Finally, we can perform the clustering: + + >>> Z = ward(y) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.29099445, 3. ], + [ 5. , 13. , 1.29099445, 3. ], + [ 8. , 14. , 1.29099445, 3. ], + [11. , 15. , 1.29099445, 3. ], + [16. , 17. , 5.77350269, 6. ], + [18. , 19. , 5.77350269, 6. ], + [20. , 21. , 8.16496581, 12. ]]) + + The linkage matrix ``Z`` represents a dendrogram - see + `scipy.cluster.hierarchy.linkage` for a detailed explanation of its + contents. + + We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster + each initial point would belong given a distance threshold: + + >>> fcluster(Z, 0.9, criterion='distance') + array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=int32) + >>> fcluster(Z, 1.1, criterion='distance') + array([1, 1, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8], dtype=int32) + >>> fcluster(Z, 3, criterion='distance') + array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32) + >>> fcluster(Z, 9, criterion='distance') + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) + + Also `scipy.cluster.hierarchy.dendrogram` can be used to generate a + plot of the dendrogram. + + """ + return linkage(y, method='ward', metric='euclidean') + + +def linkage(y, method='single', metric='euclidean', optimal_ordering=False): + """ + Perform hierarchical/agglomerative clustering. + + The input y may be either a 1d condensed distance matrix + or a 2d array of observation vectors. + + If y is a 1d condensed distance matrix, + then y must be a :math:`\\binom{n}{2}` sized + vector where n is the number of original observations paired + in the distance matrix. The behavior of this function is very + similar to the MATLAB linkage function. + + A :math:`(n-1)` by 4 matrix ``Z`` is returned. At the + :math:`i`-th iteration, clusters with indices ``Z[i, 0]`` and + ``Z[i, 1]`` are combined to form cluster :math:`n + i`. A + cluster with an index less than :math:`n` corresponds to one of + the :math:`n` original observations. The distance between + clusters ``Z[i, 0]`` and ``Z[i, 1]`` is given by ``Z[i, 2]``. The + fourth value ``Z[i, 3]`` represents the number of original + observations in the newly formed cluster. + + The following linkage methods are used to compute the distance + :math:`d(s, t)` between two clusters :math:`s` and + :math:`t`. The algorithm begins with a forest of clusters that + have yet to be used in the hierarchy being formed. When two + clusters :math:`s` and :math:`t` from this forest are combined + into a single cluster :math:`u`, :math:`s` and :math:`t` are + removed from the forest, and :math:`u` is added to the + forest. When only one cluster remains in the forest, the algorithm + stops, and this cluster becomes the root. + + A distance matrix is maintained at each iteration. The ``d[i,j]`` + entry corresponds to the distance between cluster :math:`i` and + :math:`j` in the original forest. + + At each iteration, the algorithm must update the distance matrix + to reflect the distance of the newly formed cluster u with the + remaining clusters in the forest. + + Suppose there are :math:`|u|` original observations + :math:`u[0], \\ldots, u[|u|-1]` in cluster :math:`u` and + :math:`|v|` original objects :math:`v[0], \\ldots, v[|v|-1]` in + cluster :math:`v`. Recall :math:`s` and :math:`t` are + combined to form cluster :math:`u`. Let :math:`v` be any + remaining cluster in the forest that is not :math:`u`. + + The following are methods for calculating the distance between the + newly formed cluster :math:`u` and each :math:`v`. + + * method='single' assigns + + .. math:: + d(u,v) = \\min(dist(u[i],v[j])) + + for all points :math:`i` in cluster :math:`u` and + :math:`j` in cluster :math:`v`. This is also known as the + Nearest Point Algorithm. + + * method='complete' assigns + + .. math:: + d(u, v) = \\max(dist(u[i],v[j])) + + for all points :math:`i` in cluster u and :math:`j` in + cluster :math:`v`. This is also known by the Farthest Point + Algorithm or Voor Hees Algorithm. + + * method='average' assigns + + .. math:: + d(u,v) = \\sum_{ij} \\frac{d(u[i], v[j])} + {(|u|*|v|)} + + for all points :math:`i` and :math:`j` where :math:`|u|` + and :math:`|v|` are the cardinalities of clusters :math:`u` + and :math:`v`, respectively. This is also called the UPGMA + algorithm. + + * method='weighted' assigns + + .. math:: + d(u,v) = (dist(s,v) + dist(t,v))/2 + + where cluster u was formed with cluster s and t and v + is a remaining cluster in the forest. (also called WPGMA) + + * method='centroid' assigns + + .. math:: + dist(s,t) = ||c_s-c_t||_2 + + where :math:`c_s` and :math:`c_t` are the centroids of + clusters :math:`s` and :math:`t`, respectively. When two + clusters :math:`s` and :math:`t` are combined into a new + cluster :math:`u`, the new centroid is computed over all the + original objects in clusters :math:`s` and :math:`t`. The + distance then becomes the Euclidean distance between the + centroid of :math:`u` and the centroid of a remaining cluster + :math:`v` in the forest. This is also known as the UPGMC + algorithm. + + * method='median' assigns :math:`d(s,t)` like the ``centroid`` + method. When two clusters :math:`s` and :math:`t` are combined + into a new cluster :math:`u`, the average of centroids s and t + give the new centroid :math:`u`. This is also known as the + WPGMC algorithm. + + * method='ward' uses the Ward variance minimization algorithm. + The new entry :math:`d(u,v)` is computed as follows, + + .. math:: + + d(u,v) = \\sqrt{\\frac{|v|+|s|} + {T}d(v,s)^2 + + \\frac{|v|+|t|} + {T}d(v,t)^2 + - \\frac{|v|} + {T}d(s,t)^2} + + where :math:`u` is the newly joined cluster consisting of + clusters :math:`s` and :math:`t`, :math:`v` is an unused + cluster in the forest, :math:`T=|v|+|s|+|t|`, and + :math:`|*|` is the cardinality of its argument. This is also + known as the incremental algorithm. + + Warning: When the minimum distance pair in the forest is chosen, there + may be two or more pairs with the same minimum distance. This + implementation may choose a different minimum than the MATLAB + version. + + Parameters + ---------- + y : ndarray + A condensed distance matrix. A condensed distance matrix + is a flat array containing the upper triangular of the distance matrix. + This is the form that ``pdist`` returns. Alternatively, a collection of + :math:`m` observation vectors in :math:`n` dimensions may be passed as + an :math:`m` by :math:`n` array. All elements of the condensed distance + matrix must be finite, i.e. no NaNs or infs. + method : str, optional + The linkage algorithm to use. See the ``Linkage Methods`` section below + for full descriptions. + metric : str or function, optional + The distance metric to use in the case that y is a collection of + observation vectors; ignored otherwise. See the ``pdist`` + function for a list of valid distance metrics. A custom distance + function can also be used. + optimal_ordering : bool, optional + If True, the linkage matrix will be reordered so that the distance + between successive leaves is minimal. This results in a more intuitive + tree structure when the data are visualized. defaults to False, because + this algorithm can be slow, particularly on large datasets [2]_. See + also the `optimal_leaf_ordering` function. + + .. versionadded:: 1.0.0 + + Returns + ------- + Z : ndarray + The hierarchical clustering encoded as a linkage matrix. + + Notes + ----- + 1. For method 'single' an optimized algorithm based on minimum spanning + tree is implemented. It has time complexity :math:`O(n^2)`. + For methods 'complete', 'average', 'weighted' and 'ward' an algorithm + called nearest-neighbors chain is implemented. It also has time + complexity :math:`O(n^2)`. + For other methods a naive algorithm is implemented with :math:`O(n^3)` + time complexity. + All algorithms use :math:`O(n^2)` memory. + Refer to [1]_ for details about the algorithms. + 2. Methods 'centroid', 'median' and 'ward' are correctly defined only if + Euclidean pairwise metric is used. If `y` is passed as precomputed + pairwise distances, then it is a user responsibility to assure that + these distances are in fact Euclidean, otherwise the produced result + will be incorrect. + + See Also + -------- + scipy.spatial.distance.pdist : pairwise distance metrics + + References + ---------- + .. [1] Daniel Mullner, "Modern hierarchical, agglomerative clustering + algorithms", :arXiv:`1109.2378v1`. + .. [2] Ziv Bar-Joseph, David K. Gifford, Tommi S. Jaakkola, "Fast optimal + leaf ordering for hierarchical clustering", 2001. Bioinformatics + :doi:`10.1093/bioinformatics/17.suppl_1.S22` + + Examples + -------- + >>> from scipy.cluster.hierarchy import dendrogram, linkage + >>> from matplotlib import pyplot as plt + >>> X = [[i] for i in [2, 8, 0, 4, 1, 9, 9, 0]] + + >>> Z = linkage(X, 'ward') + >>> fig = plt.figure(figsize=(25, 10)) + >>> dn = dendrogram(Z) + + >>> Z = linkage(X, 'single') + >>> fig = plt.figure(figsize=(25, 10)) + >>> dn = dendrogram(Z) + >>> plt.show() + """ + if method not in _LINKAGE_METHODS: + raise ValueError("Invalid method: {0}".format(method)) + + y = _convert_to_double(np.asarray(y, order='c')) + + if y.ndim == 1: + distance.is_valid_y(y, throw=True, name='y') + [y] = _copy_arrays_if_base_present([y]) + elif y.ndim == 2: + if method in _EUCLIDEAN_METHODS and metric != 'euclidean': + raise ValueError("Method '{0}' requires the distance metric " + "to be Euclidean".format(method)) + if y.shape[0] == y.shape[1] and np.allclose(np.diag(y), 0): + if np.all(y >= 0) and np.allclose(y, y.T): + _warning('The symmetric non-negative hollow observation ' + 'matrix looks suspiciously like an uncondensed ' + 'distance matrix') + y = distance.pdist(y, metric) + else: + raise ValueError("`y` must be 1 or 2 dimensional.") + + if not np.all(np.isfinite(y)): + raise ValueError("The condensed distance matrix must contain only " + "finite values.") + + n = int(distance.num_obs_y(y)) + method_code = _LINKAGE_METHODS[method] + + if method == 'single': + result = _hierarchy.mst_single_linkage(y, n) + elif method in ['complete', 'average', 'weighted', 'ward']: + result = _hierarchy.nn_chain(y, n, method_code) + else: + result = _hierarchy.fast_linkage(y, n, method_code) + + if optimal_ordering: + return optimal_leaf_ordering(result, y) + else: + return result + + +class ClusterNode: + """ + A tree node class for representing a cluster. + + Leaf nodes correspond to original observations, while non-leaf nodes + correspond to non-singleton clusters. + + The `to_tree` function converts a matrix returned by the linkage + function into an easy-to-use tree representation. + + All parameter names are also attributes. + + Parameters + ---------- + id : int + The node id. + left : ClusterNode instance, optional + The left child tree node. + right : ClusterNode instance, optional + The right child tree node. + dist : float, optional + Distance for this cluster in the linkage matrix. + count : int, optional + The number of samples in this cluster. + + See Also + -------- + to_tree : for converting a linkage matrix ``Z`` into a tree object. + + """ + + def __init__(self, id, left=None, right=None, dist=0, count=1): + if id < 0: + raise ValueError('The id must be non-negative.') + if dist < 0: + raise ValueError('The distance must be non-negative.') + if (left is None and right is not None) or \ + (left is not None and right is None): + raise ValueError('Only full or proper binary trees are permitted.' + ' This node has one child.') + if count < 1: + raise ValueError('A cluster must contain at least one original ' + 'observation.') + self.id = id + self.left = left + self.right = right + self.dist = dist + if self.left is None: + self.count = count + else: + self.count = left.count + right.count + + def __lt__(self, node): + if not isinstance(node, ClusterNode): + raise ValueError("Can't compare ClusterNode " + "to type {}".format(type(node))) + return self.dist < node.dist + + def __gt__(self, node): + if not isinstance(node, ClusterNode): + raise ValueError("Can't compare ClusterNode " + "to type {}".format(type(node))) + return self.dist > node.dist + + def __eq__(self, node): + if not isinstance(node, ClusterNode): + raise ValueError("Can't compare ClusterNode " + "to type {}".format(type(node))) + return self.dist == node.dist + + def get_id(self): + """ + The identifier of the target node. + + For ``0 <= i < n``, `i` corresponds to original observation i. + For ``n <= i < 2n-1``, `i` corresponds to non-singleton cluster formed + at iteration ``i-n``. + + Returns + ------- + id : int + The identifier of the target node. + + """ + return self.id + + def get_count(self): + """ + The number of leaf nodes (original observations) belonging to + the cluster node nd. If the target node is a leaf, 1 is + returned. + + Returns + ------- + get_count : int + The number of leaf nodes below the target node. + + """ + return self.count + + def get_left(self): + """ + Return a reference to the left child tree object. + + Returns + ------- + left : ClusterNode + The left child of the target node. If the node is a leaf, + None is returned. + + """ + return self.left + + def get_right(self): + """ + Return a reference to the right child tree object. + + Returns + ------- + right : ClusterNode + The left child of the target node. If the node is a leaf, + None is returned. + + """ + return self.right + + def is_leaf(self): + """ + Return True if the target node is a leaf. + + Returns + ------- + leafness : bool + True if the target node is a leaf node. + + """ + return self.left is None + + def pre_order(self, func=(lambda x: x.id)): + """ + Perform pre-order traversal without recursive function calls. + + When a leaf node is first encountered, ``func`` is called with + the leaf node as its argument, and its result is appended to + the list. + + For example, the statement:: + + ids = root.pre_order(lambda x: x.id) + + returns a list of the node ids corresponding to the leaf nodes + of the tree as they appear from left to right. + + Parameters + ---------- + func : function + Applied to each leaf ClusterNode object in the pre-order traversal. + Given the ``i``-th leaf node in the pre-order traversal ``n[i]``, + the result of ``func(n[i])`` is stored in ``L[i]``. If not + provided, the index of the original observation to which the node + corresponds is used. + + Returns + ------- + L : list + The pre-order traversal. + + """ + # Do a preorder traversal, caching the result. To avoid having to do + # recursion, we'll store the previous index we've visited in a vector. + n = self.count + + curNode = [None] * (2 * n) + lvisited = set() + rvisited = set() + curNode[0] = self + k = 0 + preorder = [] + while k >= 0: + nd = curNode[k] + ndid = nd.id + if nd.is_leaf(): + preorder.append(func(nd)) + k = k - 1 + else: + if ndid not in lvisited: + curNode[k + 1] = nd.left + lvisited.add(ndid) + k = k + 1 + elif ndid not in rvisited: + curNode[k + 1] = nd.right + rvisited.add(ndid) + k = k + 1 + # If we've visited the left and right of this non-leaf + # node already, go up in the tree. + else: + k = k - 1 + + return preorder + + +_cnode_bare = ClusterNode(0) +_cnode_type = type(ClusterNode) + + +def _order_cluster_tree(Z): + """ + Return clustering nodes in bottom-up order by distance. + + Parameters + ---------- + Z : scipy.cluster.linkage array + The linkage matrix. + + Returns + ------- + nodes : list + A list of ClusterNode objects. + """ + q = deque() + tree = to_tree(Z) + q.append(tree) + nodes = [] + + while q: + node = q.popleft() + if not node.is_leaf(): + bisect.insort_left(nodes, node) + q.append(node.get_right()) + q.append(node.get_left()) + return nodes + + +def cut_tree(Z, n_clusters=None, height=None): + """ + Given a linkage matrix Z, return the cut tree. + + Parameters + ---------- + Z : scipy.cluster.linkage array + The linkage matrix. + n_clusters : array_like, optional + Number of clusters in the tree at the cut point. + height : array_like, optional + The height at which to cut the tree. Only possible for ultrametric + trees. + + Returns + ------- + cutree : array + An array indicating group membership at each agglomeration step. I.e., + for a full cut tree, in the first column each data point is in its own + cluster. At the next step, two nodes are merged. Finally all + singleton and non-singleton clusters are in one group. If `n_clusters` + or `height` is given, the columns correspond to the columns of + `n_clusters` or `height`. + + Examples + -------- + >>> from scipy import cluster + >>> np.random.seed(23) + >>> X = np.random.randn(50, 4) + >>> Z = cluster.hierarchy.ward(X) + >>> cutree = cluster.hierarchy.cut_tree(Z, n_clusters=[5, 10]) + >>> cutree[:10] + array([[0, 0], + [1, 1], + [2, 2], + [3, 3], + [3, 4], + [2, 2], + [0, 0], + [1, 5], + [3, 6], + [4, 7]]) + + """ + nobs = num_obs_linkage(Z) + nodes = _order_cluster_tree(Z) + + if height is not None and n_clusters is not None: + raise ValueError("At least one of either height or n_clusters " + "must be None") + elif height is None and n_clusters is None: # return the full cut tree + cols_idx = np.arange(nobs) + elif height is not None: + heights = np.array([x.dist for x in nodes]) + cols_idx = np.searchsorted(heights, height) + else: + cols_idx = nobs - np.searchsorted(np.arange(nobs), n_clusters) + + try: + n_cols = len(cols_idx) + except TypeError: # scalar + n_cols = 1 + cols_idx = np.array([cols_idx]) + + groups = np.zeros((n_cols, nobs), dtype=int) + last_group = np.arange(nobs) + if 0 in cols_idx: + groups[0] = last_group + + for i, node in enumerate(nodes): + idx = node.pre_order() + this_group = last_group.copy() + this_group[idx] = last_group[idx].min() + this_group[this_group > last_group[idx].max()] -= 1 + if i + 1 in cols_idx: + groups[np.nonzero(i + 1 == cols_idx)[0]] = this_group + last_group = this_group + + return groups.T + + +def to_tree(Z, rd=False): + """ + Convert a linkage matrix into an easy-to-use tree object. + + The reference to the root `ClusterNode` object is returned (by default). + + Each `ClusterNode` object has a ``left``, ``right``, ``dist``, ``id``, + and ``count`` attribute. The left and right attributes point to + ClusterNode objects that were combined to generate the cluster. + If both are None then the `ClusterNode` object is a leaf node, its count + must be 1, and its distance is meaningless but set to 0. + + *Note: This function is provided for the convenience of the library + user. ClusterNodes are not used as input to any of the functions in this + library.* + + Parameters + ---------- + Z : ndarray + The linkage matrix in proper form (see the `linkage` + function documentation). + rd : bool, optional + When False (default), a reference to the root `ClusterNode` object is + returned. Otherwise, a tuple ``(r, d)`` is returned. ``r`` is a + reference to the root node while ``d`` is a list of `ClusterNode` + objects - one per original entry in the linkage matrix plus entries + for all clustering steps. If a cluster id is + less than the number of samples ``n`` in the data that the linkage + matrix describes, then it corresponds to a singleton cluster (leaf + node). + See `linkage` for more information on the assignment of cluster ids + to clusters. + + Returns + ------- + tree : ClusterNode or tuple (ClusterNode, list of ClusterNode) + If ``rd`` is False, a `ClusterNode`. + If ``rd`` is True, a list of length ``2*n - 1``, with ``n`` the number + of samples. See the description of `rd` above for more details. + + See Also + -------- + linkage, is_valid_linkage, ClusterNode + + Examples + -------- + >>> from scipy.cluster import hierarchy + >>> x = np.random.rand(10).reshape(5, 2) + >>> Z = hierarchy.linkage(x) + >>> hierarchy.to_tree(Z) + <scipy.cluster.hierarchy.ClusterNode object at ... + >>> rootnode, nodelist = hierarchy.to_tree(Z, rd=True) + >>> rootnode + <scipy.cluster.hierarchy.ClusterNode object at ... + >>> len(nodelist) + 9 + + """ + Z = np.asarray(Z, order='c') + is_valid_linkage(Z, throw=True, name='Z') + + # Number of original objects is equal to the number of rows minus 1. + n = Z.shape[0] + 1 + + # Create a list full of None's to store the node objects + d = [None] * (n * 2 - 1) + + # Create the nodes corresponding to the n original objects. + for i in xrange(0, n): + d[i] = ClusterNode(i) + + nd = None + + for i in xrange(0, n - 1): + fi = int(Z[i, 0]) + fj = int(Z[i, 1]) + if fi > i + n: + raise ValueError(('Corrupt matrix Z. Index to derivative cluster ' + 'is used before it is formed. See row %d, ' + 'column 0') % fi) + if fj > i + n: + raise ValueError(('Corrupt matrix Z. Index to derivative cluster ' + 'is used before it is formed. See row %d, ' + 'column 1') % fj) + nd = ClusterNode(i + n, d[fi], d[fj], Z[i, 2]) + # ^ id ^ left ^ right ^ dist + if Z[i, 3] != nd.count: + raise ValueError(('Corrupt matrix Z. The count Z[%d,3] is ' + 'incorrect.') % i) + d[n + i] = nd + + if rd: + return (nd, d) + else: + return nd + + +def optimal_leaf_ordering(Z, y, metric='euclidean'): + """ + Given a linkage matrix Z and distance, reorder the cut tree. + + Parameters + ---------- + Z : ndarray + The hierarchical clustering encoded as a linkage matrix. See + `linkage` for more information on the return structure and + algorithm. + y : ndarray + The condensed distance matrix from which Z was generated. + Alternatively, a collection of m observation vectors in n + dimensions may be passed as a m by n array. + metric : str or function, optional + The distance metric to use in the case that y is a collection of + observation vectors; ignored otherwise. See the ``pdist`` + function for a list of valid distance metrics. A custom distance + function can also be used. + + Returns + ------- + Z_ordered : ndarray + A copy of the linkage matrix Z, reordered to minimize the distance + between adjacent leaves. + + Examples + -------- + >>> from scipy.cluster import hierarchy + >>> np.random.seed(23) + >>> X = np.random.randn(10,10) + >>> Z = hierarchy.ward(X) + >>> hierarchy.leaves_list(Z) + array([0, 5, 3, 9, 6, 8, 1, 4, 2, 7], dtype=int32) + >>> hierarchy.leaves_list(hierarchy.optimal_leaf_ordering(Z, X)) + array([3, 9, 0, 5, 8, 2, 7, 4, 1, 6], dtype=int32) + + """ + Z = np.asarray(Z, order='c') + is_valid_linkage(Z, throw=True, name='Z') + + y = _convert_to_double(np.asarray(y, order='c')) + + if y.ndim == 1: + distance.is_valid_y(y, throw=True, name='y') + [y] = _copy_arrays_if_base_present([y]) + elif y.ndim == 2: + if y.shape[0] == y.shape[1] and np.allclose(np.diag(y), 0): + if np.all(y >= 0) and np.allclose(y, y.T): + _warning('The symmetric non-negative hollow observation ' + 'matrix looks suspiciously like an uncondensed ' + 'distance matrix') + y = distance.pdist(y, metric) + else: + raise ValueError("`y` must be 1 or 2 dimensional.") + + if not np.all(np.isfinite(y)): + raise ValueError("The condensed distance matrix must contain only " + "finite values.") + + return _optimal_leaf_ordering.optimal_leaf_ordering(Z, y) + + +def _convert_to_bool(X): + if X.dtype != bool: + X = X.astype(bool) + if not X.flags.contiguous: + X = X.copy() + return X + + +def _convert_to_double(X): + if X.dtype != np.double: + X = X.astype(np.double) + if not X.flags.contiguous: + X = X.copy() + return X + + +def cophenet(Z, Y=None): + """ + Calculate the cophenetic distances between each observation in + the hierarchical clustering defined by the linkage ``Z``. + + Suppose ``p`` and ``q`` are original observations in + disjoint clusters ``s`` and ``t``, respectively and + ``s`` and ``t`` are joined by a direct parent cluster + ``u``. The cophenetic distance between observations + ``i`` and ``j`` is simply the distance between + clusters ``s`` and ``t``. + + Parameters + ---------- + Z : ndarray + The hierarchical clustering encoded as an array + (see `linkage` function). + Y : ndarray (optional) + Calculates the cophenetic correlation coefficient ``c`` of a + hierarchical clustering defined by the linkage matrix `Z` + of a set of :math:`n` observations in :math:`m` + dimensions. `Y` is the condensed distance matrix from which + `Z` was generated. + + Returns + ------- + c : ndarray + The cophentic correlation distance (if ``Y`` is passed). + d : ndarray + The cophenetic distance matrix in condensed form. The + :math:`ij` th entry is the cophenetic distance between + original observations :math:`i` and :math:`j`. + + See Also + -------- + linkage: for a description of what a linkage matrix is. + scipy.spatial.distance.squareform: transforming condensed matrices into square ones. + + Examples + -------- + >>> from scipy.cluster.hierarchy import single, cophenet + >>> from scipy.spatial.distance import pdist, squareform + + Given a dataset ``X`` and a linkage matrix ``Z``, the cophenetic distance + between two points of ``X`` is the distance between the largest two + distinct clusters that each of the points: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + ``X`` corresponds to this dataset :: + + x x x x + x x + + x x + x x x x + + >>> Z = single(pdist(X)) + >>> Z + array([[ 0., 1., 1., 2.], + [ 2., 12., 1., 3.], + [ 3., 4., 1., 2.], + [ 5., 14., 1., 3.], + [ 6., 7., 1., 2.], + [ 8., 16., 1., 3.], + [ 9., 10., 1., 2.], + [11., 18., 1., 3.], + [13., 15., 2., 6.], + [17., 20., 2., 9.], + [19., 21., 2., 12.]]) + >>> cophenet(Z) + array([1., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 2., 2., 2., 2., 2., + 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 1., 2., 2., + 2., 2., 2., 2., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., + 1., 1., 2., 2., 2., 1., 2., 2., 2., 2., 2., 2., 1., 1., 1.]) + + The output of the `scipy.cluster.hierarchy.cophenet` method is + represented in condensed form. We can use + `scipy.spatial.distance.squareform` to see the output as a + regular matrix (where each element ``ij`` denotes the cophenetic distance + between each ``i``, ``j`` pair of points in ``X``): + + >>> squareform(cophenet(Z)) + array([[0., 1., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2.], + [1., 0., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2.], + [1., 1., 0., 2., 2., 2., 2., 2., 2., 2., 2., 2.], + [2., 2., 2., 0., 1., 1., 2., 2., 2., 2., 2., 2.], + [2., 2., 2., 1., 0., 1., 2., 2., 2., 2., 2., 2.], + [2., 2., 2., 1., 1., 0., 2., 2., 2., 2., 2., 2.], + [2., 2., 2., 2., 2., 2., 0., 1., 1., 2., 2., 2.], + [2., 2., 2., 2., 2., 2., 1., 0., 1., 2., 2., 2.], + [2., 2., 2., 2., 2., 2., 1., 1., 0., 2., 2., 2.], + [2., 2., 2., 2., 2., 2., 2., 2., 2., 0., 1., 1.], + [2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 0., 1.], + [2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 1., 0.]]) + + In this example, the cophenetic distance between points on ``X`` that are + very close (i.e. in the same corner) is 1. For other pairs of points is 2, + because the points will be located in clusters at different + corners - thus the distance between these clusters will be larger. + + """ + Z = np.asarray(Z, order='c') + is_valid_linkage(Z, throw=True, name='Z') + Zs = Z.shape + n = Zs[0] + 1 + + zz = np.zeros((n * (n-1)) // 2, dtype=np.double) + # Since the C code does not support striding using strides. + # The dimensions are used instead. + Z = _convert_to_double(Z) + + _hierarchy.cophenetic_distances(Z, zz, int(n)) + if Y is None: + return zz + + Y = np.asarray(Y, order='c') + distance.is_valid_y(Y, throw=True, name='Y') + + z = zz.mean() + y = Y.mean() + Yy = Y - y + Zz = zz - z + numerator = (Yy * Zz) + denomA = Yy**2 + denomB = Zz**2 + c = numerator.sum() / np.sqrt((denomA.sum() * denomB.sum())) + return (c, zz) + + +def inconsistent(Z, d=2): + r""" + Calculate inconsistency statistics on a linkage matrix. + + Parameters + ---------- + Z : ndarray + The :math:`(n-1)` by 4 matrix encoding the linkage (hierarchical + clustering). See `linkage` documentation for more information on its + form. + d : int, optional + The number of links up to `d` levels below each non-singleton cluster. + + Returns + ------- + R : ndarray + A :math:`(n-1)` by 4 matrix where the ``i``'th row contains the link + statistics for the non-singleton cluster ``i``. The link statistics are + computed over the link heights for links :math:`d` levels below the + cluster ``i``. ``R[i,0]`` and ``R[i,1]`` are the mean and standard + deviation of the link heights, respectively; ``R[i,2]`` is the number + of links included in the calculation; and ``R[i,3]`` is the + inconsistency coefficient, + + .. math:: \frac{\mathtt{Z[i,2]} - \mathtt{R[i,0]}} {R[i,1]} + + Notes + ----- + This function behaves similarly to the MATLAB(TM) ``inconsistent`` + function. + + Examples + -------- + >>> from scipy.cluster.hierarchy import inconsistent, linkage + >>> from matplotlib import pyplot as plt + >>> X = [[i] for i in [2, 8, 0, 4, 1, 9, 9, 0]] + >>> Z = linkage(X, 'ward') + >>> print(Z) + [[ 5. 6. 0. 2. ] + [ 2. 7. 0. 2. ] + [ 0. 4. 1. 2. ] + [ 1. 8. 1.15470054 3. ] + [ 9. 10. 2.12132034 4. ] + [ 3. 12. 4.11096096 5. ] + [11. 13. 14.07183949 8. ]] + >>> inconsistent(Z) + array([[ 0. , 0. , 1. , 0. ], + [ 0. , 0. , 1. , 0. ], + [ 1. , 0. , 1. , 0. ], + [ 0.57735027, 0.81649658, 2. , 0.70710678], + [ 1.04044011, 1.06123822, 3. , 1.01850858], + [ 3.11614065, 1.40688837, 2. , 0.70710678], + [ 6.44583366, 6.76770586, 3. , 1.12682288]]) + + """ + Z = np.asarray(Z, order='c') + + Zs = Z.shape + is_valid_linkage(Z, throw=True, name='Z') + if (not d == np.floor(d)) or d < 0: + raise ValueError('The second argument d must be a nonnegative ' + 'integer value.') + + # Since the C code does not support striding using strides. + # The dimensions are used instead. + [Z] = _copy_arrays_if_base_present([Z]) + + n = Zs[0] + 1 + R = np.zeros((n - 1, 4), dtype=np.double) + + _hierarchy.inconsistent(Z, R, int(n), int(d)) + return R + + +def from_mlab_linkage(Z): + """ + Convert a linkage matrix generated by MATLAB(TM) to a new + linkage matrix compatible with this module. + + The conversion does two things: + + * the indices are converted from ``1..N`` to ``0..(N-1)`` form, + and + + * a fourth column ``Z[:,3]`` is added where ``Z[i,3]`` represents the + number of original observations (leaves) in the non-singleton + cluster ``i``. + + This function is useful when loading in linkages from legacy data + files generated by MATLAB. + + Parameters + ---------- + Z : ndarray + A linkage matrix generated by MATLAB(TM). + + Returns + ------- + ZS : ndarray + A linkage matrix compatible with ``scipy.cluster.hierarchy``. + + See Also + -------- + linkage: for a description of what a linkage matrix is. + to_mlab_linkage: transform from Scipy to MATLAB format. + + Examples + -------- + >>> import numpy as np + >>> from scipy.cluster.hierarchy import ward, from_mlab_linkage + + Given a linkage matrix in MATLAB format ``mZ``, we can use + `scipy.cluster.hierarchy.from_mlab_linkage` to import + it into Scipy format: + + >>> mZ = np.array([[1, 2, 1], [4, 5, 1], [7, 8, 1], + ... [10, 11, 1], [3, 13, 1.29099445], + ... [6, 14, 1.29099445], + ... [9, 15, 1.29099445], + ... [12, 16, 1.29099445], + ... [17, 18, 5.77350269], + ... [19, 20, 5.77350269], + ... [21, 22, 8.16496581]]) + + >>> Z = from_mlab_linkage(mZ) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.29099445, 3. ], + [ 5. , 13. , 1.29099445, 3. ], + [ 8. , 14. , 1.29099445, 3. ], + [ 11. , 15. , 1.29099445, 3. ], + [ 16. , 17. , 5.77350269, 6. ], + [ 18. , 19. , 5.77350269, 6. ], + [ 20. , 21. , 8.16496581, 12. ]]) + + As expected, the linkage matrix ``Z`` returned includes an + additional column counting the number of original samples in + each cluster. Also, all cluster indexes are reduced by 1 + (MATLAB format uses 1-indexing, whereas Scipy uses 0-indexing). + + """ + Z = np.asarray(Z, dtype=np.double, order='c') + Zs = Z.shape + + # If it's empty, return it. + if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0): + return Z.copy() + + if len(Zs) != 2: + raise ValueError("The linkage array must be rectangular.") + + # If it contains no rows, return it. + if Zs[0] == 0: + return Z.copy() + + Zpart = Z.copy() + if Zpart[:, 0:2].min() != 1.0 and Zpart[:, 0:2].max() != 2 * Zs[0]: + raise ValueError('The format of the indices is not 1..N') + + Zpart[:, 0:2] -= 1.0 + CS = np.zeros((Zs[0],), dtype=np.double) + _hierarchy.calculate_cluster_sizes(Zpart, CS, int(Zs[0]) + 1) + return np.hstack([Zpart, CS.reshape(Zs[0], 1)]) + + +def to_mlab_linkage(Z): + """ + Convert a linkage matrix to a MATLAB(TM) compatible one. + + Converts a linkage matrix ``Z`` generated by the linkage function + of this module to a MATLAB(TM) compatible one. The return linkage + matrix has the last column removed and the cluster indices are + converted to ``1..N`` indexing. + + Parameters + ---------- + Z : ndarray + A linkage matrix generated by ``scipy.cluster.hierarchy``. + + Returns + ------- + to_mlab_linkage : ndarray + A linkage matrix compatible with MATLAB(TM)'s hierarchical + clustering functions. + + The return linkage matrix has the last column removed + and the cluster indices are converted to ``1..N`` indexing. + + See Also + -------- + linkage: for a description of what a linkage matrix is. + from_mlab_linkage: transform from Matlab to Scipy format. + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, to_mlab_linkage + >>> from scipy.spatial.distance import pdist + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = ward(pdist(X)) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.29099445, 3. ], + [ 5. , 13. , 1.29099445, 3. ], + [ 8. , 14. , 1.29099445, 3. ], + [11. , 15. , 1.29099445, 3. ], + [16. , 17. , 5.77350269, 6. ], + [18. , 19. , 5.77350269, 6. ], + [20. , 21. , 8.16496581, 12. ]]) + + After a linkage matrix ``Z`` has been created, we can use + `scipy.cluster.hierarchy.to_mlab_linkage` to convert it + into MATLAB format: + + >>> mZ = to_mlab_linkage(Z) + >>> mZ + array([[ 1. , 2. , 1. ], + [ 4. , 5. , 1. ], + [ 7. , 8. , 1. ], + [ 10. , 11. , 1. ], + [ 3. , 13. , 1.29099445], + [ 6. , 14. , 1.29099445], + [ 9. , 15. , 1.29099445], + [ 12. , 16. , 1.29099445], + [ 17. , 18. , 5.77350269], + [ 19. , 20. , 5.77350269], + [ 21. , 22. , 8.16496581]]) + + The new linkage matrix ``mZ`` uses 1-indexing for all the + clusters (instead of 0-indexing). Also, the last column of + the original linkage matrix has been dropped. + + """ + Z = np.asarray(Z, order='c', dtype=np.double) + Zs = Z.shape + if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0): + return Z.copy() + is_valid_linkage(Z, throw=True, name='Z') + + ZP = Z[:, 0:3].copy() + ZP[:, 0:2] += 1.0 + + return ZP + + +def is_monotonic(Z): + """ + Return True if the linkage passed is monotonic. + + The linkage is monotonic if for every cluster :math:`s` and :math:`t` + joined, the distance between them is no less than the distance + between any previously joined clusters. + + Parameters + ---------- + Z : ndarray + The linkage matrix to check for monotonicity. + + Returns + ------- + b : bool + A boolean indicating whether the linkage is monotonic. + + See Also + -------- + linkage: for a description of what a linkage matrix is. + + Examples + -------- + >>> from scipy.cluster.hierarchy import median, ward, is_monotonic + >>> from scipy.spatial.distance import pdist + + By definition, some hierarchical clustering algorithms - such as + `scipy.cluster.hierarchy.ward` - produce monotonic assignments of + samples to clusters; however, this is not always true for other + hierarchical methods - e.g. `scipy.cluster.hierarchy.median`. + + Given a linkage matrix ``Z`` (as the result of a hierarchical clustering + method) we can test programmatically whether if is has the monotonicity + property or not, using `scipy.cluster.hierarchy.is_monotonic`: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = ward(pdist(X)) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.29099445, 3. ], + [ 5. , 13. , 1.29099445, 3. ], + [ 8. , 14. , 1.29099445, 3. ], + [11. , 15. , 1.29099445, 3. ], + [16. , 17. , 5.77350269, 6. ], + [18. , 19. , 5.77350269, 6. ], + [20. , 21. , 8.16496581, 12. ]]) + >>> is_monotonic(Z) + True + + >>> Z = median(pdist(X)) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 2. , 12. , 1.11803399, 3. ], + [ 5. , 13. , 1.11803399, 3. ], + [ 8. , 15. , 1.11803399, 3. ], + [11. , 14. , 1.11803399, 3. ], + [18. , 19. , 3. , 6. ], + [16. , 17. , 3.5 , 6. ], + [20. , 21. , 3.25 , 12. ]]) + >>> is_monotonic(Z) + False + + Note that this method is equivalent to just verifying that the distances + in the third column of the linkage matrix appear in a monotonically + increasing order. + + """ + Z = np.asarray(Z, order='c') + is_valid_linkage(Z, throw=True, name='Z') + + # We expect the i'th value to be greater than its successor. + return (Z[1:, 2] >= Z[:-1, 2]).all() + + +def is_valid_im(R, warning=False, throw=False, name=None): + """Return True if the inconsistency matrix passed is valid. + + It must be a :math:`n` by 4 array of doubles. The standard + deviations ``R[:,1]`` must be nonnegative. The link counts + ``R[:,2]`` must be positive and no greater than :math:`n-1`. + + Parameters + ---------- + R : ndarray + The inconsistency matrix to check for validity. + warning : bool, optional + When True, issues a Python warning if the linkage + matrix passed is invalid. + throw : bool, optional + When True, throws a Python exception if the linkage + matrix passed is invalid. + name : str, optional + This string refers to the variable name of the invalid + linkage matrix. + + Returns + ------- + b : bool + True if the inconsistency matrix is valid. + + See Also + -------- + linkage: for a description of what a linkage matrix is. + inconsistent: for the creation of a inconsistency matrix. + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, inconsistent, is_valid_im + >>> from scipy.spatial.distance import pdist + + Given a data set ``X``, we can apply a clustering method to obtain a + linkage matrix ``Z``. `scipy.cluster.hierarchy.inconsistent` can + be also used to obtain the inconsistency matrix ``R`` associated to + this clustering process: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = ward(pdist(X)) + >>> R = inconsistent(Z) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.29099445, 3. ], + [ 5. , 13. , 1.29099445, 3. ], + [ 8. , 14. , 1.29099445, 3. ], + [11. , 15. , 1.29099445, 3. ], + [16. , 17. , 5.77350269, 6. ], + [18. , 19. , 5.77350269, 6. ], + [20. , 21. , 8.16496581, 12. ]]) + >>> R + array([[1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1.14549722, 0.20576415, 2. , 0.70710678], + [1.14549722, 0.20576415, 2. , 0.70710678], + [1.14549722, 0.20576415, 2. , 0.70710678], + [1.14549722, 0.20576415, 2. , 0.70710678], + [2.78516386, 2.58797734, 3. , 1.15470054], + [2.78516386, 2.58797734, 3. , 1.15470054], + [6.57065706, 1.38071187, 3. , 1.15470054]]) + + Now we can use `scipy.cluster.hierarchy.is_valid_im` to verify that + ``R`` is correct: + + >>> is_valid_im(R) + True + + However, if ``R`` is wrongly constructed (e.g one of the standard + deviations is set to a negative value) then the check will fail: + + >>> R[-1,1] = R[-1,1] * -1 + >>> is_valid_im(R) + False + + """ + R = np.asarray(R, order='c') + valid = True + name_str = "%r " % name if name else '' + try: + if type(R) != np.ndarray: + raise TypeError('Variable %spassed as inconsistency matrix is not ' + 'a numpy array.' % name_str) + if R.dtype != np.double: + raise TypeError('Inconsistency matrix %smust contain doubles ' + '(double).' % name_str) + if len(R.shape) != 2: + raise ValueError('Inconsistency matrix %smust have shape=2 (i.e. ' + 'be two-dimensional).' % name_str) + if R.shape[1] != 4: + raise ValueError('Inconsistency matrix %smust have 4 columns.' % + name_str) + if R.shape[0] < 1: + raise ValueError('Inconsistency matrix %smust have at least one ' + 'row.' % name_str) + if (R[:, 0] < 0).any(): + raise ValueError('Inconsistency matrix %scontains negative link ' + 'height means.' % name_str) + if (R[:, 1] < 0).any(): + raise ValueError('Inconsistency matrix %scontains negative link ' + 'height standard deviations.' % name_str) + if (R[:, 2] < 0).any(): + raise ValueError('Inconsistency matrix %scontains negative link ' + 'counts.' % name_str) + except Exception as e: + if throw: + raise + if warning: + _warning(str(e)) + valid = False + + return valid + + +def is_valid_linkage(Z, warning=False, throw=False, name=None): + """ + Check the validity of a linkage matrix. + + A linkage matrix is valid if it is a two dimensional array (type double) + with :math:`n` rows and 4 columns. The first two columns must contain + indices between 0 and :math:`2n-1`. For a given row ``i``, the following + two expressions have to hold: + + .. math:: + + 0 \\leq \\mathtt{Z[i,0]} \\leq i+n-1 + 0 \\leq Z[i,1] \\leq i+n-1 + + I.e. a cluster cannot join another cluster unless the cluster being joined + has been generated. + + Parameters + ---------- + Z : array_like + Linkage matrix. + warning : bool, optional + When True, issues a Python warning if the linkage + matrix passed is invalid. + throw : bool, optional + When True, throws a Python exception if the linkage + matrix passed is invalid. + name : str, optional + This string refers to the variable name of the invalid + linkage matrix. + + Returns + ------- + b : bool + True if the inconsistency matrix is valid. + + See Also + -------- + linkage: for a description of what a linkage matrix is. + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, is_valid_linkage + >>> from scipy.spatial.distance import pdist + + All linkage matrices generated by the clustering methods in this module + will be valid (i.e. they will have the appropriate dimensions and the two + required expressions will hold for all the rows). + + We can check this using `scipy.cluster.hierarchy.is_valid_linkage`: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = ward(pdist(X)) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.29099445, 3. ], + [ 5. , 13. , 1.29099445, 3. ], + [ 8. , 14. , 1.29099445, 3. ], + [11. , 15. , 1.29099445, 3. ], + [16. , 17. , 5.77350269, 6. ], + [18. , 19. , 5.77350269, 6. ], + [20. , 21. , 8.16496581, 12. ]]) + >>> is_valid_linkage(Z) + True + + However, is we create a linkage matrix in a wrong way - or if we modify + a valid one in a way that any of the required expressions don't hold + anymore, then the check will fail: + + >>> Z[3][1] = 20 # the cluster number 20 is not defined at this point + >>> is_valid_linkage(Z) + False + + """ + Z = np.asarray(Z, order='c') + valid = True + name_str = "%r " % name if name else '' + try: + if type(Z) != np.ndarray: + raise TypeError('Passed linkage argument %sis not a valid array.' % + name_str) + if Z.dtype != np.double: + raise TypeError('Linkage matrix %smust contain doubles.' % name_str) + if len(Z.shape) != 2: + raise ValueError('Linkage matrix %smust have shape=2 (i.e. be ' + 'two-dimensional).' % name_str) + if Z.shape[1] != 4: + raise ValueError('Linkage matrix %smust have 4 columns.' % name_str) + if Z.shape[0] == 0: + raise ValueError('Linkage must be computed on at least two ' + 'observations.') + n = Z.shape[0] + if n > 1: + if ((Z[:, 0] < 0).any() or (Z[:, 1] < 0).any()): + raise ValueError('Linkage %scontains negative indices.' % + name_str) + if (Z[:, 2] < 0).any(): + raise ValueError('Linkage %scontains negative distances.' % + name_str) + if (Z[:, 3] < 0).any(): + raise ValueError('Linkage %scontains negative counts.' % + name_str) + if _check_hierarchy_uses_cluster_before_formed(Z): + raise ValueError('Linkage %suses non-singleton cluster before ' + 'it is formed.' % name_str) + if _check_hierarchy_uses_cluster_more_than_once(Z): + raise ValueError('Linkage %suses the same cluster more than once.' + % name_str) + except Exception as e: + if throw: + raise + if warning: + _warning(str(e)) + valid = False + + return valid + + +def _check_hierarchy_uses_cluster_before_formed(Z): + n = Z.shape[0] + 1 + for i in xrange(0, n - 1): + if Z[i, 0] >= n + i or Z[i, 1] >= n + i: + return True + return False + + +def _check_hierarchy_uses_cluster_more_than_once(Z): + n = Z.shape[0] + 1 + chosen = set([]) + for i in xrange(0, n - 1): + if (Z[i, 0] in chosen) or (Z[i, 1] in chosen) or Z[i, 0] == Z[i, 1]: + return True + chosen.add(Z[i, 0]) + chosen.add(Z[i, 1]) + return False + + +def _check_hierarchy_not_all_clusters_used(Z): + n = Z.shape[0] + 1 + chosen = set([]) + for i in xrange(0, n - 1): + chosen.add(int(Z[i, 0])) + chosen.add(int(Z[i, 1])) + must_chosen = set(range(0, 2 * n - 2)) + return len(must_chosen.difference(chosen)) > 0 + + +def num_obs_linkage(Z): + """ + Return the number of original observations of the linkage matrix passed. + + Parameters + ---------- + Z : ndarray + The linkage matrix on which to perform the operation. + + Returns + ------- + n : int + The number of original observations in the linkage. + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, num_obs_linkage + >>> from scipy.spatial.distance import pdist + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = ward(pdist(X)) + + ``Z`` is a linkage matrix obtained after using the Ward clustering method + with ``X``, a dataset with 12 data points. + + >>> num_obs_linkage(Z) + 12 + + """ + Z = np.asarray(Z, order='c') + is_valid_linkage(Z, throw=True, name='Z') + return (Z.shape[0] + 1) + + +def correspond(Z, Y): + """ + Check for correspondence between linkage and condensed distance matrices. + + They must have the same number of original observations for + the check to succeed. + + This function is useful as a sanity check in algorithms that make + extensive use of linkage and distance matrices that must + correspond to the same set of original observations. + + Parameters + ---------- + Z : array_like + The linkage matrix to check for correspondence. + Y : array_like + The condensed distance matrix to check for correspondence. + + Returns + ------- + b : bool + A boolean indicating whether the linkage matrix and distance + matrix could possibly correspond to one another. + + See Also + -------- + linkage: for a description of what a linkage matrix is. + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, correspond + >>> from scipy.spatial.distance import pdist + + This method can be used to check if a given linkage matrix ``Z`` has been + obtained from the application of a cluster method over a dataset ``X``: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + >>> X_condensed = pdist(X) + >>> Z = ward(X_condensed) + + Here we can compare ``Z`` and ``X`` (in condensed form): + + >>> correspond(Z, X_condensed) + True + + """ + is_valid_linkage(Z, throw=True) + distance.is_valid_y(Y, throw=True) + Z = np.asarray(Z, order='c') + Y = np.asarray(Y, order='c') + return distance.num_obs_y(Y) == num_obs_linkage(Z) + + +def fcluster(Z, t, criterion='inconsistent', depth=2, R=None, monocrit=None): + """ + Form flat clusters from the hierarchical clustering defined by + the given linkage matrix. + + Parameters + ---------- + Z : ndarray + The hierarchical clustering encoded with the matrix returned + by the `linkage` function. + t : scalar + For criteria 'inconsistent', 'distance' or 'monocrit', + this is the threshold to apply when forming flat clusters. + For 'maxclust' or 'maxclust_monocrit' criteria, + this would be max number of clusters requested. + criterion : str, optional + The criterion to use in forming flat clusters. This can + be any of the following values: + + ``inconsistent`` : + If a cluster node and all its + descendants have an inconsistent value less than or equal + to `t` then all its leaf descendants belong to the + same flat cluster. When no non-singleton cluster meets + this criterion, every node is assigned to its own + cluster. (Default) + + ``distance`` : + Forms flat clusters so that the original + observations in each flat cluster have no greater a + cophenetic distance than `t`. + + ``maxclust`` : + Finds a minimum threshold ``r`` so that + the cophenetic distance between any two original + observations in the same flat cluster is no more than + ``r`` and no more than `t` flat clusters are formed. + + ``monocrit`` : + Forms a flat cluster from a cluster node c + with index i when ``monocrit[j] <= t``. + + For example, to threshold on the maximum mean distance + as computed in the inconsistency matrix R with a + threshold of 0.8 do:: + + MR = maxRstat(Z, R, 3) + cluster(Z, t=0.8, criterion='monocrit', monocrit=MR) + + ``maxclust_monocrit`` : + Forms a flat cluster from a + non-singleton cluster node ``c`` when ``monocrit[i] <= + r`` for all cluster indices ``i`` below and including + ``c``. ``r`` is minimized such that no more than ``t`` + flat clusters are formed. monocrit must be + monotonic. For example, to minimize the threshold t on + maximum inconsistency values so that no more than 3 flat + clusters are formed, do:: + + MI = maxinconsts(Z, R) + cluster(Z, t=3, criterion='maxclust_monocrit', monocrit=MI) + + depth : int, optional + The maximum depth to perform the inconsistency calculation. + It has no meaning for the other criteria. Default is 2. + R : ndarray, optional + The inconsistency matrix to use for the 'inconsistent' + criterion. This matrix is computed if not provided. + monocrit : ndarray, optional + An array of length n-1. `monocrit[i]` is the + statistics upon which non-singleton i is thresholded. The + monocrit vector must be monotonic, i.e. given a node c with + index i, for all node indices j corresponding to nodes + below c, ``monocrit[i] >= monocrit[j]``. + + Returns + ------- + fcluster : ndarray + An array of length ``n``. ``T[i]`` is the flat cluster number to + which original observation ``i`` belongs. + + See Also + -------- + linkage : for information about hierarchical clustering methods work. + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, fcluster + >>> from scipy.spatial.distance import pdist + + All cluster linkage methods - e.g. `scipy.cluster.hierarchy.ward` + generate a linkage matrix ``Z`` as their output: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = ward(pdist(X)) + + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.29099445, 3. ], + [ 5. , 13. , 1.29099445, 3. ], + [ 8. , 14. , 1.29099445, 3. ], + [11. , 15. , 1.29099445, 3. ], + [16. , 17. , 5.77350269, 6. ], + [18. , 19. , 5.77350269, 6. ], + [20. , 21. , 8.16496581, 12. ]]) + + This matrix represents a dendrogram, where the first and second elements + are the two clusters merged at each step, the third element is the + distance between these clusters, and the fourth element is the size of + the new cluster - the number of original data points included. + + `scipy.cluster.hierarchy.fcluster` can be used to flatten the + dendrogram, obtaining as a result an assignation of the original data + points to single clusters. + + This assignation mostly depends on a distance threshold ``t`` - the maximum + inter-cluster distance allowed: + + >>> fcluster(Z, t=0.9, criterion='distance') + array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=int32) + + >>> fcluster(Z, t=1.1, criterion='distance') + array([1, 1, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8], dtype=int32) + + >>> fcluster(Z, t=3, criterion='distance') + array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32) + + >>> fcluster(Z, t=9, criterion='distance') + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) + + In the first case, the threshold ``t`` is too small to allow any two + samples in the data to form a cluster, so 12 different clusters are + returned. + + In the second case, the threshold is large enough to allow the first + 4 points to be merged with their nearest neighbors. So here only 8 + clusters are returned. + + The third case, with a much higher threshold, allows for up to 8 data + points to be connected - so 4 clusters are returned here. + + Lastly, the threshold of the fourth case is large enough to allow for + all data points to be merged together - so a single cluster is returned. + + """ + Z = np.asarray(Z, order='c') + is_valid_linkage(Z, throw=True, name='Z') + + n = Z.shape[0] + 1 + T = np.zeros((n,), dtype='i') + + # Since the C code does not support striding using strides. + # The dimensions are used instead. + [Z] = _copy_arrays_if_base_present([Z]) + + if criterion == 'inconsistent': + if R is None: + R = inconsistent(Z, depth) + else: + R = np.asarray(R, order='c') + is_valid_im(R, throw=True, name='R') + # Since the C code does not support striding using strides. + # The dimensions are used instead. + [R] = _copy_arrays_if_base_present([R]) + _hierarchy.cluster_in(Z, R, T, float(t), int(n)) + elif criterion == 'distance': + _hierarchy.cluster_dist(Z, T, float(t), int(n)) + elif criterion == 'maxclust': + _hierarchy.cluster_maxclust_dist(Z, T, int(n), int(t)) + elif criterion == 'monocrit': + [monocrit] = _copy_arrays_if_base_present([monocrit]) + _hierarchy.cluster_monocrit(Z, monocrit, T, float(t), int(n)) + elif criterion == 'maxclust_monocrit': + [monocrit] = _copy_arrays_if_base_present([monocrit]) + _hierarchy.cluster_maxclust_monocrit(Z, monocrit, T, int(n), int(t)) + else: + raise ValueError('Invalid cluster formation criterion: %s' + % str(criterion)) + return T + + +def fclusterdata(X, t, criterion='inconsistent', + metric='euclidean', depth=2, method='single', R=None): + """ + Cluster observation data using a given metric. + + Clusters the original observations in the n-by-m data + matrix X (n observations in m dimensions), using the euclidean + distance metric to calculate distances between original observations, + performs hierarchical clustering using the single linkage algorithm, + and forms flat clusters using the inconsistency method with `t` as the + cut-off threshold. + + A one-dimensional array ``T`` of length ``n`` is returned. ``T[i]`` is + the index of the flat cluster to which the original observation ``i`` + belongs. + + Parameters + ---------- + X : (N, M) ndarray + N by M data matrix with N observations in M dimensions. + t : scalar + For criteria 'inconsistent', 'distance' or 'monocrit', + this is the threshold to apply when forming flat clusters. + For 'maxclust' or 'maxclust_monocrit' criteria, + this would be max number of clusters requested. + criterion : str, optional + Specifies the criterion for forming flat clusters. Valid + values are 'inconsistent' (default), 'distance', or 'maxclust' + cluster formation algorithms. See `fcluster` for descriptions. + metric : str, optional + The distance metric for calculating pairwise distances. See + ``distance.pdist`` for descriptions and linkage to verify + compatibility with the linkage method. + depth : int, optional + The maximum depth for the inconsistency calculation. See + `inconsistent` for more information. + method : str, optional + The linkage method to use (single, complete, average, + weighted, median centroid, ward). See `linkage` for more + information. Default is "single". + R : ndarray, optional + The inconsistency matrix. It will be computed if necessary + if it is not passed. + + Returns + ------- + fclusterdata : ndarray + A vector of length n. T[i] is the flat cluster number to + which original observation i belongs. + + See Also + -------- + scipy.spatial.distance.pdist : pairwise distance metrics + + Notes + ----- + This function is similar to the MATLAB function ``clusterdata``. + + Examples + -------- + >>> from scipy.cluster.hierarchy import fclusterdata + + This is a convenience method that abstracts all the steps to perform in a + typical Scipy's hierarchical clustering workflow. + + * Transform the input data into a condensed matrix with `scipy.spatial.distance.pdist`. + + * Apply a clustering method. + + * Obtain flat clusters at a user defined distance threshold ``t`` using `scipy.cluster.hierarchy.fcluster`. + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> fclusterdata(X, t=1) + array([3, 3, 3, 4, 4, 4, 2, 2, 2, 1, 1, 1], dtype=int32) + + The output here (for the dataset ``X``, distance threshold ``t``, and the + default settings) is four clusters with three data points each. + + """ + X = np.asarray(X, order='c', dtype=np.double) + + if type(X) != np.ndarray or len(X.shape) != 2: + raise TypeError('The observation matrix X must be an n by m numpy ' + 'array.') + + Y = distance.pdist(X, metric=metric) + Z = linkage(Y, method=method) + if R is None: + R = inconsistent(Z, d=depth) + else: + R = np.asarray(R, order='c') + T = fcluster(Z, criterion=criterion, depth=depth, R=R, t=t) + return T + + +def leaves_list(Z): + """ + Return a list of leaf node ids. + + The return corresponds to the observation vector index as it appears + in the tree from left to right. Z is a linkage matrix. + + Parameters + ---------- + Z : ndarray + The hierarchical clustering encoded as a matrix. `Z` is + a linkage matrix. See `linkage` for more information. + + Returns + ------- + leaves_list : ndarray + The list of leaf node ids. + + See Also + -------- + dendrogram: for information about dendrogram structure. + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, dendrogram, leaves_list + >>> from scipy.spatial.distance import pdist + >>> from matplotlib import pyplot as plt + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = ward(pdist(X)) + + The linkage matrix ``Z`` represents a dendrogram, that is, a tree that + encodes the structure of the clustering performed. + `scipy.cluster.hierarchy.leaves_list` shows the mapping between + indexes in the ``X`` dataset and leaves in the dendrogram: + + >>> leaves_list(Z) + array([ 2, 0, 1, 5, 3, 4, 8, 6, 7, 11, 9, 10], dtype=int32) + + >>> fig = plt.figure(figsize=(25, 10)) + >>> dn = dendrogram(Z) + >>> plt.show() + + """ + Z = np.asarray(Z, order='c') + is_valid_linkage(Z, throw=True, name='Z') + n = Z.shape[0] + 1 + ML = np.zeros((n,), dtype='i') + [Z] = _copy_arrays_if_base_present([Z]) + _hierarchy.prelist(Z, ML, int(n)) + return ML + + +# Maps number of leaves to text size. +# +# p <= 20, size="12" +# 20 < p <= 30, size="10" +# 30 < p <= 50, size="8" +# 50 < p <= np.inf, size="6" + +_dtextsizes = {20: 12, 30: 10, 50: 8, 85: 6, np.inf: 5} +_drotation = {20: 0, 40: 45, np.inf: 90} +_dtextsortedkeys = list(_dtextsizes.keys()) +_dtextsortedkeys.sort() +_drotationsortedkeys = list(_drotation.keys()) +_drotationsortedkeys.sort() + + +def _remove_dups(L): + """ + Remove duplicates AND preserve the original order of the elements. + + The set class is not guaranteed to do this. + """ + seen_before = set([]) + L2 = [] + for i in L: + if i not in seen_before: + seen_before.add(i) + L2.append(i) + return L2 + + +def _get_tick_text_size(p): + for k in _dtextsortedkeys: + if p <= k: + return _dtextsizes[k] + + +def _get_tick_rotation(p): + for k in _drotationsortedkeys: + if p <= k: + return _drotation[k] + + +def _plot_dendrogram(icoords, dcoords, ivl, p, n, mh, orientation, + no_labels, color_list, leaf_font_size=None, + leaf_rotation=None, contraction_marks=None, + ax=None, above_threshold_color='b'): + # Import matplotlib here so that it's not imported unless dendrograms + # are plotted. Raise an informative error if importing fails. + try: + # if an axis is provided, don't use pylab at all + if ax is None: + import matplotlib.pylab + import matplotlib.patches + import matplotlib.collections + except ImportError: + raise ImportError("You must install the matplotlib library to plot " + "the dendrogram. Use no_plot=True to calculate the " + "dendrogram without plotting.") + + if ax is None: + ax = matplotlib.pylab.gca() + # if we're using pylab, we want to trigger a draw at the end + trigger_redraw = True + else: + trigger_redraw = False + + # Independent variable plot width + ivw = len(ivl) * 10 + # Dependent variable plot height + dvw = mh + mh * 0.05 + + iv_ticks = np.arange(5, len(ivl) * 10 + 5, 10) + if orientation in ('top', 'bottom'): + if orientation == 'top': + ax.set_ylim([0, dvw]) + ax.set_xlim([0, ivw]) + else: + ax.set_ylim([dvw, 0]) + ax.set_xlim([0, ivw]) + + xlines = icoords + ylines = dcoords + if no_labels: + ax.set_xticks([]) + ax.set_xticklabels([]) + else: + ax.set_xticks(iv_ticks) + + if orientation == 'top': + ax.xaxis.set_ticks_position('bottom') + else: + ax.xaxis.set_ticks_position('top') + + # Make the tick marks invisible because they cover up the links + for line in ax.get_xticklines(): + line.set_visible(False) + + leaf_rot = (float(_get_tick_rotation(len(ivl))) + if (leaf_rotation is None) else leaf_rotation) + leaf_font = (float(_get_tick_text_size(len(ivl))) + if (leaf_font_size is None) else leaf_font_size) + ax.set_xticklabels(ivl, rotation=leaf_rot, size=leaf_font) + + elif orientation in ('left', 'right'): + if orientation == 'left': + ax.set_xlim([dvw, 0]) + ax.set_ylim([0, ivw]) + else: + ax.set_xlim([0, dvw]) + ax.set_ylim([0, ivw]) + + xlines = dcoords + ylines = icoords + if no_labels: + ax.set_yticks([]) + ax.set_yticklabels([]) + else: + ax.set_yticks(iv_ticks) + + if orientation == 'left': + ax.yaxis.set_ticks_position('right') + else: + ax.yaxis.set_ticks_position('left') + + # Make the tick marks invisible because they cover up the links + for line in ax.get_yticklines(): + line.set_visible(False) + + leaf_font = (float(_get_tick_text_size(len(ivl))) + if (leaf_font_size is None) else leaf_font_size) + + if leaf_rotation is not None: + ax.set_yticklabels(ivl, rotation=leaf_rotation, size=leaf_font) + else: + ax.set_yticklabels(ivl, size=leaf_font) + + # Let's use collections instead. This way there is a separate legend item + # for each tree grouping, rather than stupidly one for each line segment. + colors_used = _remove_dups(color_list) + color_to_lines = {} + for color in colors_used: + color_to_lines[color] = [] + for (xline, yline, color) in zip(xlines, ylines, color_list): + color_to_lines[color].append(list(zip(xline, yline))) + + colors_to_collections = {} + # Construct the collections. + for color in colors_used: + coll = matplotlib.collections.LineCollection(color_to_lines[color], + colors=(color,)) + colors_to_collections[color] = coll + + # Add all the groupings below the color threshold. + for color in colors_used: + if color != above_threshold_color: + ax.add_collection(colors_to_collections[color]) + # If there's a grouping of links above the color threshold, it goes last. + if above_threshold_color in colors_to_collections: + ax.add_collection(colors_to_collections[above_threshold_color]) + + if contraction_marks is not None: + Ellipse = matplotlib.patches.Ellipse + for (x, y) in contraction_marks: + if orientation in ('left', 'right'): + e = Ellipse((y, x), width=dvw / 100, height=1.0) + else: + e = Ellipse((x, y), width=1.0, height=dvw / 100) + ax.add_artist(e) + e.set_clip_box(ax.bbox) + e.set_alpha(0.5) + e.set_facecolor('k') + + if trigger_redraw: + matplotlib.pylab.draw_if_interactive() + + +_link_line_colors = ['g', 'r', 'c', 'm', 'y', 'k'] + + +def set_link_color_palette(palette): + """ + Set list of matplotlib color codes for use by dendrogram. + + Note that this palette is global (i.e. setting it once changes the colors + for all subsequent calls to `dendrogram`) and that it affects only the + the colors below ``color_threshold``. + + Note that `dendrogram` also accepts a custom coloring function through its + ``link_color_func`` keyword, which is more flexible and non-global. + + Parameters + ---------- + palette : list of str or None + A list of matplotlib color codes. The order of the color codes is the + order in which the colors are cycled through when color thresholding in + the dendrogram. + + If ``None``, resets the palette to its default (which is + ``['g', 'r', 'c', 'm', 'y', 'k']``). + + Returns + ------- + None + + See Also + -------- + dendrogram + + Notes + ----- + Ability to reset the palette with ``None`` added in Scipy 0.17.0. + + Examples + -------- + >>> from scipy.cluster import hierarchy + >>> ytdist = np.array([662., 877., 255., 412., 996., 295., 468., 268., + ... 400., 754., 564., 138., 219., 869., 669.]) + >>> Z = hierarchy.linkage(ytdist, 'single') + >>> dn = hierarchy.dendrogram(Z, no_plot=True) + >>> dn['color_list'] + ['g', 'b', 'b', 'b', 'b'] + >>> hierarchy.set_link_color_palette(['c', 'm', 'y', 'k']) + >>> dn = hierarchy.dendrogram(Z, no_plot=True) + >>> dn['color_list'] + ['c', 'b', 'b', 'b', 'b'] + >>> dn = hierarchy.dendrogram(Z, no_plot=True, color_threshold=267, + ... above_threshold_color='k') + >>> dn['color_list'] + ['c', 'm', 'm', 'k', 'k'] + + Now reset the color palette to its default: + + >>> hierarchy.set_link_color_palette(None) + + """ + if palette is None: + # reset to its default + palette = ['g', 'r', 'c', 'm', 'y', 'k'] + elif type(palette) not in (list, tuple): + raise TypeError("palette must be a list or tuple") + _ptypes = [isinstance(p, string_types) for p in palette] + + if False in _ptypes: + raise TypeError("all palette list elements must be color strings") + + for i in list(_link_line_colors): + _link_line_colors.remove(i) + _link_line_colors.extend(list(palette)) + + +def dendrogram(Z, p=30, truncate_mode=None, color_threshold=None, + get_leaves=True, orientation='top', labels=None, + count_sort=False, distance_sort=False, show_leaf_counts=True, + no_plot=False, no_labels=False, leaf_font_size=None, + leaf_rotation=None, leaf_label_func=None, + show_contracted=False, link_color_func=None, ax=None, + above_threshold_color='b'): + """ + Plot the hierarchical clustering as a dendrogram. + + The dendrogram illustrates how each cluster is + composed by drawing a U-shaped link between a non-singleton + cluster and its children. The top of the U-link indicates a + cluster merge. The two legs of the U-link indicate which clusters + were merged. The length of the two legs of the U-link represents + the distance between the child clusters. It is also the + cophenetic distance between original observations in the two + children clusters. + + Parameters + ---------- + Z : ndarray + The linkage matrix encoding the hierarchical clustering to + render as a dendrogram. See the ``linkage`` function for more + information on the format of ``Z``. + p : int, optional + The ``p`` parameter for ``truncate_mode``. + truncate_mode : str, optional + The dendrogram can be hard to read when the original + observation matrix from which the linkage is derived is + large. Truncation is used to condense the dendrogram. There + are several modes: + + ``None`` + No truncation is performed (default). + Note: ``'none'`` is an alias for ``None`` that's kept for + backward compatibility. + + ``'lastp'`` + The last ``p`` non-singleton clusters formed in the linkage are the + only non-leaf nodes in the linkage; they correspond to rows + ``Z[n-p-2:end]`` in ``Z``. All other non-singleton clusters are + contracted into leaf nodes. + + ``'level'`` + No more than ``p`` levels of the dendrogram tree are displayed. + A "level" includes all nodes with ``p`` merges from the last merge. + + Note: ``'mtica'`` is an alias for ``'level'`` that's kept for + backward compatibility. + + color_threshold : double, optional + For brevity, let :math:`t` be the ``color_threshold``. + Colors all the descendent links below a cluster node + :math:`k` the same color if :math:`k` is the first node below + the cut threshold :math:`t`. All links connecting nodes with + distances greater than or equal to the threshold are colored + blue. If :math:`t` is less than or equal to zero, all nodes + are colored blue. If ``color_threshold`` is None or + 'default', corresponding with MATLAB(TM) behavior, the + threshold is set to ``0.7*max(Z[:,2])``. + get_leaves : bool, optional + Includes a list ``R['leaves']=H`` in the result + dictionary. For each :math:`i`, ``H[i] == j``, cluster node + ``j`` appears in position ``i`` in the left-to-right traversal + of the leaves, where :math:`j < 2n-1` and :math:`i < n`. + orientation : str, optional + The direction to plot the dendrogram, which can be any + of the following strings: + + ``'top'`` + Plots the root at the top, and plot descendent links going downwards. + (default). + + ``'bottom'`` + Plots the root at the bottom, and plot descendent links going + upwards. + + ``'left'`` + Plots the root at the left, and plot descendent links going right. + + ``'right'`` + Plots the root at the right, and plot descendent links going left. + + labels : ndarray, optional + By default ``labels`` is None so the index of the original observation + is used to label the leaf nodes. Otherwise, this is an :math:`n` + -sized list (or tuple). The ``labels[i]`` value is the text to put + under the :math:`i` th leaf node only if it corresponds to an original + observation and not a non-singleton cluster. + count_sort : str or bool, optional + For each node n, the order (visually, from left-to-right) n's + two descendent links are plotted is determined by this + parameter, which can be any of the following values: + + ``False`` + Nothing is done. + + ``'ascending'`` or ``True`` + The child with the minimum number of original objects in its cluster + is plotted first. + + ``'descending'`` + The child with the maximum number of original objects in its cluster + is plotted first. + + Note ``distance_sort`` and ``count_sort`` cannot both be True. + distance_sort : str or bool, optional + For each node n, the order (visually, from left-to-right) n's + two descendent links are plotted is determined by this + parameter, which can be any of the following values: + + ``False`` + Nothing is done. + + ``'ascending'`` or ``True`` + The child with the minimum distance between its direct descendents is + plotted first. + + ``'descending'`` + The child with the maximum distance between its direct descendents is + plotted first. + + Note ``distance_sort`` and ``count_sort`` cannot both be True. + show_leaf_counts : bool, optional + When True, leaf nodes representing :math:`k>1` original + observation are labeled with the number of observations they + contain in parentheses. + no_plot : bool, optional + When True, the final rendering is not performed. This is + useful if only the data structures computed for the rendering + are needed or if matplotlib is not available. + no_labels : bool, optional + When True, no labels appear next to the leaf nodes in the + rendering of the dendrogram. + leaf_rotation : double, optional + Specifies the angle (in degrees) to rotate the leaf + labels. When unspecified, the rotation is based on the number of + nodes in the dendrogram (default is 0). + leaf_font_size : int, optional + Specifies the font size (in points) of the leaf labels. When + unspecified, the size based on the number of nodes in the + dendrogram. + leaf_label_func : lambda or function, optional + When leaf_label_func is a callable function, for each + leaf with cluster index :math:`k < 2n-1`. The function + is expected to return a string with the label for the + leaf. + + Indices :math:`k < n` correspond to original observations + while indices :math:`k \\geq n` correspond to non-singleton + clusters. + + For example, to label singletons with their node id and + non-singletons with their id, count, and inconsistency + coefficient, simply do:: + + # First define the leaf label function. + def llf(id): + if id < n: + return str(id) + else: + return '[%d %d %1.2f]' % (id, count, R[n-id,3]) + # The text for the leaf nodes is going to be big so force + # a rotation of 90 degrees. + dendrogram(Z, leaf_label_func=llf, leaf_rotation=90) + + show_contracted : bool, optional + When True the heights of non-singleton nodes contracted + into a leaf node are plotted as crosses along the link + connecting that leaf node. This really is only useful when + truncation is used (see ``truncate_mode`` parameter). + link_color_func : callable, optional + If given, `link_color_function` is called with each non-singleton id + corresponding to each U-shaped link it will paint. The function is + expected to return the color to paint the link, encoded as a matplotlib + color string code. For example:: + + dendrogram(Z, link_color_func=lambda k: colors[k]) + + colors the direct links below each untruncated non-singleton node + ``k`` using ``colors[k]``. + ax : matplotlib Axes instance, optional + If None and `no_plot` is not True, the dendrogram will be plotted + on the current axes. Otherwise if `no_plot` is not True the + dendrogram will be plotted on the given ``Axes`` instance. This can be + useful if the dendrogram is part of a more complex figure. + above_threshold_color : str, optional + This matplotlib color string sets the color of the links above the + color_threshold. The default is 'b'. + + Returns + ------- + R : dict + A dictionary of data structures computed to render the + dendrogram. Its has the following keys: + + ``'color_list'`` + A list of color names. The k'th element represents the color of the + k'th link. + + ``'icoord'`` and ``'dcoord'`` + Each of them is a list of lists. Let ``icoord = [I1, I2, ..., Ip]`` + where ``Ik = [xk1, xk2, xk3, xk4]`` and ``dcoord = [D1, D2, ..., Dp]`` + where ``Dk = [yk1, yk2, yk3, yk4]``, then the k'th link painted is + ``(xk1, yk1)`` - ``(xk2, yk2)`` - ``(xk3, yk3)`` - ``(xk4, yk4)``. + + ``'ivl'`` + A list of labels corresponding to the leaf nodes. + + ``'leaves'`` + For each i, ``H[i] == j``, cluster node ``j`` appears in position + ``i`` in the left-to-right traversal of the leaves, where + :math:`j < 2n-1` and :math:`i < n`. If ``j`` is less than ``n``, the + ``i``-th leaf node corresponds to an original observation. + Otherwise, it corresponds to a non-singleton cluster. + + See Also + -------- + linkage, set_link_color_palette + + Notes + ----- + It is expected that the distances in ``Z[:,2]`` be monotonic, otherwise + crossings appear in the dendrogram. + + Examples + -------- + >>> from scipy.cluster import hierarchy + >>> import matplotlib.pyplot as plt + + A very basic example: + + >>> ytdist = np.array([662., 877., 255., 412., 996., 295., 468., 268., + ... 400., 754., 564., 138., 219., 869., 669.]) + >>> Z = hierarchy.linkage(ytdist, 'single') + >>> plt.figure() + >>> dn = hierarchy.dendrogram(Z) + + Now plot in given axes, improve the color scheme and use both vertical and + horizontal orientations: + + >>> hierarchy.set_link_color_palette(['m', 'c', 'y', 'k']) + >>> fig, axes = plt.subplots(1, 2, figsize=(8, 3)) + >>> dn1 = hierarchy.dendrogram(Z, ax=axes[0], above_threshold_color='y', + ... orientation='top') + >>> dn2 = hierarchy.dendrogram(Z, ax=axes[1], + ... above_threshold_color='#bcbddc', + ... orientation='right') + >>> hierarchy.set_link_color_palette(None) # reset to default after use + >>> plt.show() + + """ + # This feature was thought about but never implemented (still useful?): + # + # ... = dendrogram(..., leaves_order=None) + # + # Plots the leaves in the order specified by a vector of + # original observation indices. If the vector contains duplicates + # or results in a crossing, an exception will be thrown. Passing + # None orders leaf nodes based on the order they appear in the + # pre-order traversal. + Z = np.asarray(Z, order='c') + + if orientation not in ["top", "left", "bottom", "right"]: + raise ValueError("orientation must be one of 'top', 'left', " + "'bottom', or 'right'") + + is_valid_linkage(Z, throw=True, name='Z') + Zs = Z.shape + n = Zs[0] + 1 + if type(p) in (int, float): + p = int(p) + else: + raise TypeError('The second argument must be a number') + + if truncate_mode not in ('lastp', 'mlab', 'mtica', 'level', 'none', None): + # 'mlab' and 'mtica' are kept working for backwards compat. + raise ValueError('Invalid truncation mode.') + + if truncate_mode == 'lastp' or truncate_mode == 'mlab': + if p > n or p == 0: + p = n + + if truncate_mode == 'mtica': + # 'mtica' is an alias + truncate_mode = 'level' + + if truncate_mode == 'level': + if p <= 0: + p = np.inf + + if get_leaves: + lvs = [] + else: + lvs = None + + icoord_list = [] + dcoord_list = [] + color_list = [] + current_color = [0] + currently_below_threshold = [False] + ivl = [] # list of leaves + + if color_threshold is None or (isinstance(color_threshold, string_types) and + color_threshold == 'default'): + color_threshold = max(Z[:, 2]) * 0.7 + + R = {'icoord': icoord_list, 'dcoord': dcoord_list, 'ivl': ivl, + 'leaves': lvs, 'color_list': color_list} + + # Empty list will be filled in _dendrogram_calculate_info + contraction_marks = [] if show_contracted else None + + _dendrogram_calculate_info( + Z=Z, p=p, + truncate_mode=truncate_mode, + color_threshold=color_threshold, + get_leaves=get_leaves, + orientation=orientation, + labels=labels, + count_sort=count_sort, + distance_sort=distance_sort, + show_leaf_counts=show_leaf_counts, + i=2*n - 2, + iv=0.0, + ivl=ivl, + n=n, + icoord_list=icoord_list, + dcoord_list=dcoord_list, + lvs=lvs, + current_color=current_color, + color_list=color_list, + currently_below_threshold=currently_below_threshold, + leaf_label_func=leaf_label_func, + contraction_marks=contraction_marks, + link_color_func=link_color_func, + above_threshold_color=above_threshold_color) + + if not no_plot: + mh = max(Z[:, 2]) + _plot_dendrogram(icoord_list, dcoord_list, ivl, p, n, mh, orientation, + no_labels, color_list, + leaf_font_size=leaf_font_size, + leaf_rotation=leaf_rotation, + contraction_marks=contraction_marks, + ax=ax, + above_threshold_color=above_threshold_color) + + return R + + +def _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func, + i, labels): + # If the leaf id structure is not None and is a list then the caller + # to dendrogram has indicated that cluster id's corresponding to the + # leaf nodes should be recorded. + + if lvs is not None: + lvs.append(int(i)) + + # If leaf node labels are to be displayed... + if ivl is not None: + # If a leaf_label_func has been provided, the label comes from the + # string returned from the leaf_label_func, which is a function + # passed to dendrogram. + if leaf_label_func: + ivl.append(leaf_label_func(int(i))) + else: + # Otherwise, if the dendrogram caller has passed a labels list + # for the leaf nodes, use it. + if labels is not None: + ivl.append(labels[int(i - n)]) + else: + # Otherwise, use the id as the label for the leaf.x + ivl.append(str(int(i))) + + +def _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func, + i, labels, show_leaf_counts): + # If the leaf id structure is not None and is a list then the caller + # to dendrogram has indicated that cluster id's corresponding to the + # leaf nodes should be recorded. + + if lvs is not None: + lvs.append(int(i)) + if ivl is not None: + if leaf_label_func: + ivl.append(leaf_label_func(int(i))) + else: + if show_leaf_counts: + ivl.append("(" + str(int(Z[i - n, 3])) + ")") + else: + ivl.append("") + + +def _append_contraction_marks(Z, iv, i, n, contraction_marks): + _append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks) + _append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks) + + +def _append_contraction_marks_sub(Z, iv, i, n, contraction_marks): + if i >= n: + contraction_marks.append((iv, Z[i - n, 2])) + _append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks) + _append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks) + + +def _dendrogram_calculate_info(Z, p, truncate_mode, + color_threshold=np.inf, get_leaves=True, + orientation='top', labels=None, + count_sort=False, distance_sort=False, + show_leaf_counts=False, i=-1, iv=0.0, + ivl=[], n=0, icoord_list=[], dcoord_list=[], + lvs=None, mhr=False, + current_color=[], color_list=[], + currently_below_threshold=[], + leaf_label_func=None, level=0, + contraction_marks=None, + link_color_func=None, + above_threshold_color='b'): + """ + Calculate the endpoints of the links as well as the labels for the + the dendrogram rooted at the node with index i. iv is the independent + variable value to plot the left-most leaf node below the root node i + (if orientation='top', this would be the left-most x value where the + plotting of this root node i and its descendents should begin). + + ivl is a list to store the labels of the leaf nodes. The leaf_label_func + is called whenever ivl != None, labels == None, and + leaf_label_func != None. When ivl != None and labels != None, the + labels list is used only for labeling the leaf nodes. When + ivl == None, no labels are generated for leaf nodes. + + When get_leaves==True, a list of leaves is built as they are visited + in the dendrogram. + + Returns a tuple with l being the independent variable coordinate that + corresponds to the midpoint of cluster to the left of cluster i if + i is non-singleton, otherwise the independent coordinate of the leaf + node if i is a leaf node. + + Returns + ------- + A tuple (left, w, h, md), where: + + * left is the independent variable coordinate of the center of the + the U of the subtree + + * w is the amount of space used for the subtree (in independent + variable units) + + * h is the height of the subtree in dependent variable units + + * md is the ``max(Z[*,2]``) for all nodes ``*`` below and including + the target node. + + """ + if n == 0: + raise ValueError("Invalid singleton cluster count n.") + + if i == -1: + raise ValueError("Invalid root cluster index i.") + + if truncate_mode == 'lastp': + # If the node is a leaf node but corresponds to a non-singleton + # cluster, its label is either the empty string or the number of + # original observations belonging to cluster i. + if 2*n - p > i >= n: + d = Z[i - n, 2] + _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, + leaf_label_func, i, labels, + show_leaf_counts) + if contraction_marks is not None: + _append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks) + return (iv + 5.0, 10.0, 0.0, d) + elif i < n: + _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, + leaf_label_func, i, labels) + return (iv + 5.0, 10.0, 0.0, 0.0) + elif truncate_mode == 'level': + if i > n and level > p: + d = Z[i - n, 2] + _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, + leaf_label_func, i, labels, + show_leaf_counts) + if contraction_marks is not None: + _append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks) + return (iv + 5.0, 10.0, 0.0, d) + elif i < n: + _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, + leaf_label_func, i, labels) + return (iv + 5.0, 10.0, 0.0, 0.0) + elif truncate_mode in ('mlab',): + msg = "Mode 'mlab' is deprecated in scipy 0.19.0 (it never worked)." + warnings.warn(msg, DeprecationWarning) + + # Otherwise, only truncate if we have a leaf node. + # + # Only place leaves if they correspond to original observations. + if i < n: + _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, + leaf_label_func, i, labels) + return (iv + 5.0, 10.0, 0.0, 0.0) + + # !!! Otherwise, we don't have a leaf node, so work on plotting a + # non-leaf node. + # Actual indices of a and b + aa = int(Z[i - n, 0]) + ab = int(Z[i - n, 1]) + if aa > n: + # The number of singletons below cluster a + na = Z[aa - n, 3] + # The distance between a's two direct children. + da = Z[aa - n, 2] + else: + na = 1 + da = 0.0 + if ab > n: + nb = Z[ab - n, 3] + db = Z[ab - n, 2] + else: + nb = 1 + db = 0.0 + + if count_sort == 'ascending' or count_sort: + # If a has a count greater than b, it and its descendents should + # be drawn to the right. Otherwise, to the left. + if na > nb: + # The cluster index to draw to the left (ua) will be ab + # and the one to draw to the right (ub) will be aa + ua = ab + ub = aa + else: + ua = aa + ub = ab + elif count_sort == 'descending': + # If a has a count less than or equal to b, it and its + # descendents should be drawn to the left. Otherwise, to + # the right. + if na > nb: + ua = aa + ub = ab + else: + ua = ab + ub = aa + elif distance_sort == 'ascending' or distance_sort: + # If a has a distance greater than b, it and its descendents should + # be drawn to the right. Otherwise, to the left. + if da > db: + ua = ab + ub = aa + else: + ua = aa + ub = ab + elif distance_sort == 'descending': + # If a has a distance less than or equal to b, it and its + # descendents should be drawn to the left. Otherwise, to + # the right. + if da > db: + ua = aa + ub = ab + else: + ua = ab + ub = aa + else: + ua = aa + ub = ab + + # Updated iv variable and the amount of space used. + (uiva, uwa, uah, uamd) = \ + _dendrogram_calculate_info( + Z=Z, p=p, + truncate_mode=truncate_mode, + color_threshold=color_threshold, + get_leaves=get_leaves, + orientation=orientation, + labels=labels, + count_sort=count_sort, + distance_sort=distance_sort, + show_leaf_counts=show_leaf_counts, + i=ua, iv=iv, ivl=ivl, n=n, + icoord_list=icoord_list, + dcoord_list=dcoord_list, lvs=lvs, + current_color=current_color, + color_list=color_list, + currently_below_threshold=currently_below_threshold, + leaf_label_func=leaf_label_func, + level=level + 1, contraction_marks=contraction_marks, + link_color_func=link_color_func, + above_threshold_color=above_threshold_color) + + h = Z[i - n, 2] + if h >= color_threshold or color_threshold <= 0: + c = above_threshold_color + + if currently_below_threshold[0]: + current_color[0] = (current_color[0] + 1) % len(_link_line_colors) + currently_below_threshold[0] = False + else: + currently_below_threshold[0] = True + c = _link_line_colors[current_color[0]] + + (uivb, uwb, ubh, ubmd) = \ + _dendrogram_calculate_info( + Z=Z, p=p, + truncate_mode=truncate_mode, + color_threshold=color_threshold, + get_leaves=get_leaves, + orientation=orientation, + labels=labels, + count_sort=count_sort, + distance_sort=distance_sort, + show_leaf_counts=show_leaf_counts, + i=ub, iv=iv + uwa, ivl=ivl, n=n, + icoord_list=icoord_list, + dcoord_list=dcoord_list, lvs=lvs, + current_color=current_color, + color_list=color_list, + currently_below_threshold=currently_below_threshold, + leaf_label_func=leaf_label_func, + level=level + 1, contraction_marks=contraction_marks, + link_color_func=link_color_func, + above_threshold_color=above_threshold_color) + + max_dist = max(uamd, ubmd, h) + + icoord_list.append([uiva, uiva, uivb, uivb]) + dcoord_list.append([uah, h, h, ubh]) + if link_color_func is not None: + v = link_color_func(int(i)) + if not isinstance(v, string_types): + raise TypeError("link_color_func must return a matplotlib " + "color string!") + color_list.append(v) + else: + color_list.append(c) + + return (((uiva + uivb) / 2), uwa + uwb, h, max_dist) + + +def is_isomorphic(T1, T2): + """ + Determine if two different cluster assignments are equivalent. + + Parameters + ---------- + T1 : array_like + An assignment of singleton cluster ids to flat cluster ids. + T2 : array_like + An assignment of singleton cluster ids to flat cluster ids. + + Returns + ------- + b : bool + Whether the flat cluster assignments `T1` and `T2` are + equivalent. + + See Also + -------- + linkage: for a description of what a linkage matrix is. + fcluster: for the creation of flat cluster assignments. + + Examples + -------- + >>> from scipy.cluster.hierarchy import fcluster, is_isomorphic + >>> from scipy.cluster.hierarchy import single, complete + >>> from scipy.spatial.distance import pdist + + Two flat cluster assignments can be isomorphic if they represent the same + cluster assignment, with different labels. + + For example, we can use the `scipy.cluster.hierarchy.single`: method + and flatten the output to four clusters: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = single(pdist(X)) + >>> T = fcluster(Z, 1, criterion='distance') + >>> T + array([3, 3, 3, 4, 4, 4, 2, 2, 2, 1, 1, 1], dtype=int32) + + We can then do the same using the + `scipy.cluster.hierarchy.complete`: method: + + >>> Z = complete(pdist(X)) + >>> T_ = fcluster(Z, 1.5, criterion='distance') + >>> T_ + array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32) + + As we can see, in both cases we obtain four clusters and all the data + points are distributed in the same way - the only thing that changes + are the flat cluster labels (3 => 1, 4 =>2, 2 =>3 and 4 =>1), so both + cluster assignments are isomorphic: + + >>> is_isomorphic(T, T_) + True + + """ + T1 = np.asarray(T1, order='c') + T2 = np.asarray(T2, order='c') + + if type(T1) != np.ndarray: + raise TypeError('T1 must be a numpy array.') + if type(T2) != np.ndarray: + raise TypeError('T2 must be a numpy array.') + + T1S = T1.shape + T2S = T2.shape + + if len(T1S) != 1: + raise ValueError('T1 must be one-dimensional.') + if len(T2S) != 1: + raise ValueError('T2 must be one-dimensional.') + if T1S[0] != T2S[0]: + raise ValueError('T1 and T2 must have the same number of elements.') + n = T1S[0] + d1 = {} + d2 = {} + for i in xrange(0, n): + if T1[i] in d1: + if not T2[i] in d2: + return False + if d1[T1[i]] != T2[i] or d2[T2[i]] != T1[i]: + return False + elif T2[i] in d2: + return False + else: + d1[T1[i]] = T2[i] + d2[T2[i]] = T1[i] + return True + + +def maxdists(Z): + """ + Return the maximum distance between any non-singleton cluster. + + Parameters + ---------- + Z : ndarray + The hierarchical clustering encoded as a matrix. See + ``linkage`` for more information. + + Returns + ------- + maxdists : ndarray + A ``(n-1)`` sized numpy array of doubles; ``MD[i]`` represents + the maximum distance between any cluster (including + singletons) below and including the node with index i. More + specifically, ``MD[i] = Z[Q(i)-n, 2].max()`` where ``Q(i)`` is the + set of all node indices below and including node i. + + See Also + -------- + linkage: for a description of what a linkage matrix is. + is_monotonic: for testing for monotonicity of a linkage matrix. + + Examples + -------- + >>> from scipy.cluster.hierarchy import median, maxdists + >>> from scipy.spatial.distance import pdist + + Given a linkage matrix ``Z``, `scipy.cluster.hierarchy.maxdists` + computes for each new cluster generated (i.e. for each row of the linkage + matrix) what is the maximum distance between any two child clusters. + + Due to the nature of hierarchical clustering, in many cases this is going + to be just the distance between the two child clusters that were merged + to form the current one - that is, Z[:,2]. + + However, for non-monotonic cluster assignments such as + `scipy.cluster.hierarchy.median` clustering this is not always the + case: There may be cluster formations were the distance between the two + clusters merged is smaller than the distance between their children. + + We can see this in an example: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = median(pdist(X)) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 2. , 12. , 1.11803399, 3. ], + [ 5. , 13. , 1.11803399, 3. ], + [ 8. , 15. , 1.11803399, 3. ], + [11. , 14. , 1.11803399, 3. ], + [18. , 19. , 3. , 6. ], + [16. , 17. , 3.5 , 6. ], + [20. , 21. , 3.25 , 12. ]]) + >>> maxdists(Z) + array([1. , 1. , 1. , 1. , 1.11803399, + 1.11803399, 1.11803399, 1.11803399, 3. , 3.5 , + 3.5 ]) + + Note that while the distance between the two clusters merged when creating the + last cluster is 3.25, there are two children (clusters 16 and 17) whose distance + is larger (3.5). Thus, `scipy.cluster.hierarchy.maxdists` returns 3.5 in + this case. + + """ + Z = np.asarray(Z, order='c', dtype=np.double) + is_valid_linkage(Z, throw=True, name='Z') + + n = Z.shape[0] + 1 + MD = np.zeros((n - 1,)) + [Z] = _copy_arrays_if_base_present([Z]) + _hierarchy.get_max_dist_for_each_cluster(Z, MD, int(n)) + return MD + + +def maxinconsts(Z, R): + """ + Return the maximum inconsistency coefficient for each + non-singleton cluster and its children. + + Parameters + ---------- + Z : ndarray + The hierarchical clustering encoded as a matrix. See + `linkage` for more information. + R : ndarray + The inconsistency matrix. + + Returns + ------- + MI : ndarray + A monotonic ``(n-1)``-sized numpy array of doubles. + + See Also + -------- + linkage: for a description of what a linkage matrix is. + inconsistent: for the creation of a inconsistency matrix. + + Examples + -------- + >>> from scipy.cluster.hierarchy import median, inconsistent, maxinconsts + >>> from scipy.spatial.distance import pdist + + Given a data set ``X``, we can apply a clustering method to obtain a + linkage matrix ``Z``. `scipy.cluster.hierarchy.inconsistent` can + be also used to obtain the inconsistency matrix ``R`` associated to + this clustering process: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = median(pdist(X)) + >>> R = inconsistent(Z) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 2. , 12. , 1.11803399, 3. ], + [ 5. , 13. , 1.11803399, 3. ], + [ 8. , 15. , 1.11803399, 3. ], + [11. , 14. , 1.11803399, 3. ], + [18. , 19. , 3. , 6. ], + [16. , 17. , 3.5 , 6. ], + [20. , 21. , 3.25 , 12. ]]) + >>> R + array([[1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1.05901699, 0.08346263, 2. , 0.70710678], + [1.05901699, 0.08346263, 2. , 0.70710678], + [1.05901699, 0.08346263, 2. , 0.70710678], + [1.05901699, 0.08346263, 2. , 0.70710678], + [1.74535599, 1.08655358, 3. , 1.15470054], + [1.91202266, 1.37522872, 3. , 1.15470054], + [3.25 , 0.25 , 3. , 0. ]]) + + Here `scipy.cluster.hierarchy.maxinconsts` can be used to compute + the maximum value of the inconsistency statistic (the last column of + ``R``) for each non-singleton cluster and its children: + + >>> maxinconsts(Z, R) + array([0. , 0. , 0. , 0. , 0.70710678, + 0.70710678, 0.70710678, 0.70710678, 1.15470054, 1.15470054, + 1.15470054]) + + """ + Z = np.asarray(Z, order='c') + R = np.asarray(R, order='c') + is_valid_linkage(Z, throw=True, name='Z') + is_valid_im(R, throw=True, name='R') + + n = Z.shape[0] + 1 + if Z.shape[0] != R.shape[0]: + raise ValueError("The inconsistency matrix and linkage matrix each " + "have a different number of rows.") + MI = np.zeros((n - 1,)) + [Z, R] = _copy_arrays_if_base_present([Z, R]) + _hierarchy.get_max_Rfield_for_each_cluster(Z, R, MI, int(n), 3) + return MI + + +def maxRstat(Z, R, i): + """ + Return the maximum statistic for each non-singleton cluster and its + children. + + Parameters + ---------- + Z : array_like + The hierarchical clustering encoded as a matrix. See `linkage` for more + information. + R : array_like + The inconsistency matrix. + i : int + The column of `R` to use as the statistic. + + Returns + ------- + MR : ndarray + Calculates the maximum statistic for the i'th column of the + inconsistency matrix `R` for each non-singleton cluster + node. ``MR[j]`` is the maximum over ``R[Q(j)-n, i]`` where + ``Q(j)`` the set of all node ids corresponding to nodes below + and including ``j``. + + See Also + -------- + linkage: for a description of what a linkage matrix is. + inconsistent: for the creation of a inconsistency matrix. + + Examples + -------- + >>> from scipy.cluster.hierarchy import median, inconsistent, maxRstat + >>> from scipy.spatial.distance import pdist + + Given a data set ``X``, we can apply a clustering method to obtain a + linkage matrix ``Z``. `scipy.cluster.hierarchy.inconsistent` can + be also used to obtain the inconsistency matrix ``R`` associated to + this clustering process: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = median(pdist(X)) + >>> R = inconsistent(Z) + >>> R + array([[1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1.05901699, 0.08346263, 2. , 0.70710678], + [1.05901699, 0.08346263, 2. , 0.70710678], + [1.05901699, 0.08346263, 2. , 0.70710678], + [1.05901699, 0.08346263, 2. , 0.70710678], + [1.74535599, 1.08655358, 3. , 1.15470054], + [1.91202266, 1.37522872, 3. , 1.15470054], + [3.25 , 0.25 , 3. , 0. ]]) + + `scipy.cluster.hierarchy.maxRstat` can be used to compute + the maximum value of each column of ``R``, for each non-singleton + cluster and its children: + + >>> maxRstat(Z, R, 0) + array([1. , 1. , 1. , 1. , 1.05901699, + 1.05901699, 1.05901699, 1.05901699, 1.74535599, 1.91202266, + 3.25 ]) + >>> maxRstat(Z, R, 1) + array([0. , 0. , 0. , 0. , 0.08346263, + 0.08346263, 0.08346263, 0.08346263, 1.08655358, 1.37522872, + 1.37522872]) + >>> maxRstat(Z, R, 3) + array([0. , 0. , 0. , 0. , 0.70710678, + 0.70710678, 0.70710678, 0.70710678, 1.15470054, 1.15470054, + 1.15470054]) + + """ + Z = np.asarray(Z, order='c') + R = np.asarray(R, order='c') + is_valid_linkage(Z, throw=True, name='Z') + is_valid_im(R, throw=True, name='R') + if type(i) is not int: + raise TypeError('The third argument must be an integer.') + if i < 0 or i > 3: + raise ValueError('i must be an integer between 0 and 3 inclusive.') + + if Z.shape[0] != R.shape[0]: + raise ValueError("The inconsistency matrix and linkage matrix each " + "have a different number of rows.") + + n = Z.shape[0] + 1 + MR = np.zeros((n - 1,)) + [Z, R] = _copy_arrays_if_base_present([Z, R]) + _hierarchy.get_max_Rfield_for_each_cluster(Z, R, MR, int(n), i) + return MR + + +def leaders(Z, T): + """ + Return the root nodes in a hierarchical clustering. + + Returns the root nodes in a hierarchical clustering corresponding + to a cut defined by a flat cluster assignment vector ``T``. See + the ``fcluster`` function for more information on the format of ``T``. + + For each flat cluster :math:`j` of the :math:`k` flat clusters + represented in the n-sized flat cluster assignment vector ``T``, + this function finds the lowest cluster node :math:`i` in the linkage + tree Z such that: + + * leaf descendants belong only to flat cluster j + (i.e. ``T[p]==j`` for all :math:`p` in :math:`S(i)` where + :math:`S(i)` is the set of leaf ids of descendant leaf nodes + with cluster node :math:`i`) + + * there does not exist a leaf that is not a descendant with + :math:`i` that also belongs to cluster :math:`j` + (i.e. ``T[q]!=j`` for all :math:`q` not in :math:`S(i)`). If + this condition is violated, ``T`` is not a valid cluster + assignment vector, and an exception will be thrown. + + Parameters + ---------- + Z : ndarray + The hierarchical clustering encoded as a matrix. See + `linkage` for more information. + T : ndarray + The flat cluster assignment vector. + + Returns + ------- + L : ndarray + The leader linkage node id's stored as a k-element 1-D array + where ``k`` is the number of flat clusters found in ``T``. + + ``L[j]=i`` is the linkage cluster node id that is the + leader of flat cluster with id M[j]. If ``i < n``, ``i`` + corresponds to an original observation, otherwise it + corresponds to a non-singleton cluster. + + M : ndarray + The leader linkage node id's stored as a k-element 1-D array where + ``k`` is the number of flat clusters found in ``T``. This allows the + set of flat cluster ids to be any arbitrary set of ``k`` integers. + + For example: if ``L[3]=2`` and ``M[3]=8``, the flat cluster with + id 8's leader is linkage node 2. + + See Also + -------- + fcluster: for the creation of flat cluster assignments. + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, fcluster, leaders + >>> from scipy.spatial.distance import pdist + + Given a linkage matrix ``Z`` - obtained after apply a clustering method + to a dataset ``X`` - and a flat cluster assignment array ``T``: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = ward(pdist(X)) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.29099445, 3. ], + [ 5. , 13. , 1.29099445, 3. ], + [ 8. , 14. , 1.29099445, 3. ], + [11. , 15. , 1.29099445, 3. ], + [16. , 17. , 5.77350269, 6. ], + [18. , 19. , 5.77350269, 6. ], + [20. , 21. , 8.16496581, 12. ]]) + + + >>> T = fcluster(Z, 3, criterion='distance') + >>> T + array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32) + + `scipy.cluster.hierarchy.leaders` returns the indexes of the nodes + in the dendrogram that are the leaders of each flat cluster: + + >>> L, M = leaders(Z, T) + >>> L + array([16, 17, 18, 19], dtype=int32) + + (remember that indexes 0-11 point to the 12 data points in ``X`` + whereas indexes 12-22 point to the 11 rows of ``Z``) + + `scipy.cluster.hierarchy.leaders` also returns the indexes of + the flat clusters in ``T``: + + >>> M + array([1, 2, 3, 4], dtype=int32) + + """ + Z = np.asarray(Z, order='c') + T = np.asarray(T, order='c') + if type(T) != np.ndarray or T.dtype != 'i': + raise TypeError('T must be a one-dimensional numpy array of integers.') + is_valid_linkage(Z, throw=True, name='Z') + if len(T) != Z.shape[0] + 1: + raise ValueError('Mismatch: len(T)!=Z.shape[0] + 1.') + + Cl = np.unique(T) + kk = len(Cl) + L = np.zeros((kk,), dtype='i') + M = np.zeros((kk,), dtype='i') + n = Z.shape[0] + 1 + [Z, T] = _copy_arrays_if_base_present([Z, T]) + s = _hierarchy.leaders(Z, T, L, M, int(kk), int(n)) + if s >= 0: + raise ValueError(('T is not a valid assignment vector. Error found ' + 'when examining linkage node %d (< 2n-1).') % s) + return (L, M) diff --git a/project/venv/lib/python2.7/site-packages/scipy/cluster/hierarchy.pyc b/project/venv/lib/python2.7/site-packages/scipy/cluster/hierarchy.pyc new file mode 100644 index 0000000..aea9ad3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/cluster/hierarchy.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/cluster/setup.py b/project/venv/lib/python2.7/site-packages/scipy/cluster/setup.py new file mode 100644 index 0000000..37caecf --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/cluster/setup.py @@ -0,0 +1,38 @@ +from __future__ import division, print_function, absolute_import + +import sys + +if sys.version_info[0] >= 3: + DEFINE_MACROS = [("SCIPY_PY3K", None)] +else: + DEFINE_MACROS = [] + + +def configuration(parent_package='', top_path=None): + from scipy._build_utils.system_info import get_info + from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs + config = Configuration('cluster', parent_package, top_path) + + blas_opt = get_info('lapack_opt') + + config.add_data_dir('tests') + + config.add_extension('_vq', + sources=[('_vq.c')], + include_dirs=[get_numpy_include_dirs()], + extra_info=blas_opt) + + config.add_extension('_hierarchy', + sources=[('_hierarchy.c')], + include_dirs=[get_numpy_include_dirs()]) + + config.add_extension('_optimal_leaf_ordering', + sources=[('_optimal_leaf_ordering.c')], + include_dirs=[get_numpy_include_dirs()]) + + return config + + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(**configuration(top_path='').todict()) diff --git a/project/venv/lib/python2.7/site-packages/scipy/cluster/setup.pyc b/project/venv/lib/python2.7/site-packages/scipy/cluster/setup.pyc new file mode 100644 index 0000000..f925ec9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/cluster/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/cluster/tests/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/cluster/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/scipy/cluster/tests/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/cluster/tests/__init__.pyc new file mode 100644 index 0000000..1506099 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/cluster/tests/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/cluster/tests/hierarchy_test_data.py b/project/venv/lib/python2.7/site-packages/scipy/cluster/tests/hierarchy_test_data.py new file mode 100644 index 0000000..7d874ca --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/cluster/tests/hierarchy_test_data.py @@ -0,0 +1,145 @@ +from numpy import array + + +Q_X = array([[5.26563660e-01, 3.14160190e-01, 8.00656370e-02], + [7.50205180e-01, 4.60299830e-01, 8.98696460e-01], + [6.65461230e-01, 6.94011420e-01, 9.10465700e-01], + [9.64047590e-01, 1.43082200e-03, 7.39874220e-01], + [1.08159060e-01, 5.53028790e-01, 6.63804780e-02], + [9.31359130e-01, 8.25424910e-01, 9.52315440e-01], + [6.78086960e-01, 3.41903970e-01, 5.61481950e-01], + [9.82730940e-01, 7.04605210e-01, 8.70978630e-02], + [6.14691610e-01, 4.69989230e-02, 6.02406450e-01], + [5.80161260e-01, 9.17354970e-01, 5.88163850e-01], + [1.38246310e+00, 1.96358160e+00, 1.94437880e+00], + [2.10675860e+00, 1.67148730e+00, 1.34854480e+00], + [1.39880070e+00, 1.66142050e+00, 1.32224550e+00], + [1.71410460e+00, 1.49176380e+00, 1.45432170e+00], + [1.54102340e+00, 1.84374950e+00, 1.64658950e+00], + [2.08512480e+00, 1.84524350e+00, 2.17340850e+00], + [1.30748740e+00, 1.53801650e+00, 2.16007740e+00], + [1.41447700e+00, 1.99329070e+00, 1.99107420e+00], + [1.61943490e+00, 1.47703280e+00, 1.89788160e+00], + [1.59880600e+00, 1.54988980e+00, 1.57563350e+00], + [3.37247380e+00, 2.69635310e+00, 3.39981700e+00], + [3.13705120e+00, 3.36528090e+00, 3.06089070e+00], + [3.29413250e+00, 3.19619500e+00, 2.90700170e+00], + [2.65510510e+00, 3.06785900e+00, 2.97198540e+00], + [3.30941040e+00, 2.59283970e+00, 2.57714110e+00], + [2.59557220e+00, 3.33477370e+00, 3.08793190e+00], + [2.58206180e+00, 3.41615670e+00, 3.26441990e+00], + [2.71127000e+00, 2.77032450e+00, 2.63466500e+00], + [2.79617850e+00, 3.25473720e+00, 3.41801560e+00], + [2.64741750e+00, 2.54538040e+00, 3.25354110e+00]]) + +ytdist = array([662., 877., 255., 412., 996., 295., 468., 268., 400., 754., + 564., 138., 219., 869., 669.]) + +linkage_ytdist_single = array([[2., 5., 138., 2.], + [3., 4., 219., 2.], + [0., 7., 255., 3.], + [1., 8., 268., 4.], + [6., 9., 295., 6.]]) + +linkage_ytdist_complete = array([[2., 5., 138., 2.], + [3., 4., 219., 2.], + [1., 6., 400., 3.], + [0., 7., 412., 3.], + [8., 9., 996., 6.]]) + +linkage_ytdist_average = array([[2., 5., 138., 2.], + [3., 4., 219., 2.], + [0., 7., 333.5, 3.], + [1., 6., 347.5, 3.], + [8., 9., 680.77777778, 6.]]) + +linkage_ytdist_weighted = array([[2., 5., 138., 2.], + [3., 4., 219., 2.], + [0., 7., 333.5, 3.], + [1., 6., 347.5, 3.], + [8., 9., 670.125, 6.]]) + +# the optimal leaf ordering of linkage_ytdist_single +linkage_ytdist_single_olo = array([[5., 2., 138., 2.], + [4., 3., 219., 2.], + [7., 0., 255., 3.], + [1., 8., 268., 4.], + [6., 9., 295., 6.]]) + +X = array([[1.43054825, -7.5693489], + [6.95887839, 6.82293382], + [2.87137846, -9.68248579], + [7.87974764, -6.05485803], + [8.24018364, -6.09495602], + [7.39020262, 8.54004355]]) + +linkage_X_centroid = array([[3., 4., 0.36265956, 2.], + [1., 5., 1.77045373, 2.], + [0., 2., 2.55760419, 2.], + [6., 8., 6.43614494, 4.], + [7., 9., 15.17363237, 6.]]) + +linkage_X_median = array([[3., 4., 0.36265956, 2.], + [1., 5., 1.77045373, 2.], + [0., 2., 2.55760419, 2.], + [6., 8., 6.43614494, 4.], + [7., 9., 15.17363237, 6.]]) + +linkage_X_ward = array([[3., 4., 0.36265956, 2.], + [1., 5., 1.77045373, 2.], + [0., 2., 2.55760419, 2.], + [6., 8., 9.10208346, 4.], + [7., 9., 24.7784379, 6.]]) + +# the optimal leaf ordering of linkage_X_ward +linkage_X_ward_olo = array([[4., 3., 0.36265956, 2.], + [5., 1., 1.77045373, 2.], + [2., 0., 2.55760419, 2.], + [6., 8., 9.10208346, 4.], + [7., 9., 24.7784379, 6.]]) + +inconsistent_ytdist = { + 1: array([[138., 0., 1., 0.], + [219., 0., 1., 0.], + [255., 0., 1., 0.], + [268., 0., 1., 0.], + [295., 0., 1., 0.]]), + 2: array([[138., 0., 1., 0.], + [219., 0., 1., 0.], + [237., 25.45584412, 2., 0.70710678], + [261.5, 9.19238816, 2., 0.70710678], + [233.66666667, 83.9424406, 3., 0.7306594]]), + 3: array([[138., 0., 1., 0.], + [219., 0., 1., 0.], + [237., 25.45584412, 2., 0.70710678], + [247.33333333, 25.38372182, 3., 0.81417007], + [239., 69.36377537, 4., 0.80733783]]), + 4: array([[138., 0., 1., 0.], + [219., 0., 1., 0.], + [237., 25.45584412, 2., 0.70710678], + [247.33333333, 25.38372182, 3., 0.81417007], + [235., 60.73302232, 5., 0.98793042]])} + +fcluster_inconsistent = { + 0.8: array([6, 2, 2, 4, 6, 2, 3, 7, 3, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1]), + 1.0: array([6, 2, 2, 4, 6, 2, 3, 7, 3, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1]), + 2.0: array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1])} + +fcluster_distance = { + 0.6: array([4, 4, 4, 4, 4, 4, 4, 5, 4, 4, 6, 6, 6, 6, 6, 7, 6, 6, 6, 6, 3, + 1, 1, 1, 2, 1, 1, 1, 1, 1]), + 1.0: array([2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1]), + 2.0: array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1])} + +fcluster_maxclust = { + 8.0: array([5, 5, 5, 5, 5, 5, 5, 6, 5, 5, 7, 7, 7, 7, 7, 8, 7, 7, 7, 7, 4, + 1, 1, 1, 3, 1, 1, 1, 1, 2]), + 4.0: array([3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, + 1, 1, 1, 1, 1, 1, 1, 1, 1]), + 1.0: array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1])} diff --git a/project/venv/lib/python2.7/site-packages/scipy/cluster/tests/hierarchy_test_data.pyc b/project/venv/lib/python2.7/site-packages/scipy/cluster/tests/hierarchy_test_data.pyc new file mode 100644 index 0000000..d2a52ba Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/cluster/tests/hierarchy_test_data.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/cluster/tests/test_hierarchy.py b/project/venv/lib/python2.7/site-packages/scipy/cluster/tests/test_hierarchy.py new file mode 100644 index 0000000..1933b46 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/cluster/tests/test_hierarchy.py @@ -0,0 +1,1062 @@ +# +# Author: Damian Eads +# Date: April 17, 2008 +# +# Copyright (C) 2008 Damian Eads +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# +# 3. The name of the author may not be used to endorse or promote +# products derived from this software without specific prior +# written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS +# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import assert_allclose, assert_equal, assert_, assert_warns +import pytest +from pytest import raises as assert_raises + +from scipy._lib.six import xrange, u + +import scipy.cluster.hierarchy +from scipy.cluster.hierarchy import ( + ClusterWarning, linkage, from_mlab_linkage, to_mlab_linkage, + num_obs_linkage, inconsistent, cophenet, fclusterdata, fcluster, + is_isomorphic, single, leaders, complete, weighted, centroid, + correspond, is_monotonic, maxdists, maxinconsts, maxRstat, + is_valid_linkage, is_valid_im, to_tree, leaves_list, dendrogram, + set_link_color_palette, cut_tree, optimal_leaf_ordering, + _order_cluster_tree, _hierarchy, _LINKAGE_METHODS) +from scipy.spatial.distance import pdist +from scipy.cluster._hierarchy import Heap + +from . import hierarchy_test_data + + +# Matplotlib is not a scipy dependency but is optionally used in dendrogram, so +# check if it's available +try: + import matplotlib + # and set the backend to be Agg (no gui) + matplotlib.use('Agg') + # before importing pyplot + import matplotlib.pyplot as plt + have_matplotlib = True +except Exception: + have_matplotlib = False + + +class TestLinkage(object): + def test_linkage_non_finite_elements_in_distance_matrix(self): + # Tests linkage(Y) where Y contains a non-finite element (e.g. NaN or Inf). + # Exception expected. + y = np.zeros((6,)) + y[0] = np.nan + assert_raises(ValueError, linkage, y) + + def test_linkage_empty_distance_matrix(self): + # Tests linkage(Y) where Y is a 0x4 linkage matrix. Exception expected. + y = np.zeros((0,)) + assert_raises(ValueError, linkage, y) + + def test_linkage_tdist(self): + for method in ['single', 'complete', 'average', 'weighted', u('single')]: + self.check_linkage_tdist(method) + + def check_linkage_tdist(self, method): + # Tests linkage(Y, method) on the tdist data set. + Z = linkage(hierarchy_test_data.ytdist, method) + expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_' + method) + assert_allclose(Z, expectedZ, atol=1e-10) + + def test_linkage_X(self): + for method in ['centroid', 'median', 'ward']: + self.check_linkage_q(method) + + def check_linkage_q(self, method): + # Tests linkage(Y, method) on the Q data set. + Z = linkage(hierarchy_test_data.X, method) + expectedZ = getattr(hierarchy_test_data, 'linkage_X_' + method) + assert_allclose(Z, expectedZ, atol=1e-06) + + y = scipy.spatial.distance.pdist(hierarchy_test_data.X, + metric="euclidean") + Z = linkage(y, method) + assert_allclose(Z, expectedZ, atol=1e-06) + + def test_compare_with_trivial(self): + rng = np.random.RandomState(0) + n = 20 + X = rng.rand(n, 2) + d = pdist(X) + + for method, code in _LINKAGE_METHODS.items(): + Z_trivial = _hierarchy.linkage(d, n, code) + Z = linkage(d, method) + assert_allclose(Z_trivial, Z, rtol=1e-14, atol=1e-15) + + def test_optimal_leaf_ordering(self): + Z = linkage(hierarchy_test_data.ytdist, optimal_ordering=True) + expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_single_olo') + assert_allclose(Z, expectedZ, atol=1e-10) + + +class TestLinkageTies(object): + _expectations = { + 'single': np.array([[0, 1, 1.41421356, 2], + [2, 3, 1.41421356, 3]]), + 'complete': np.array([[0, 1, 1.41421356, 2], + [2, 3, 2.82842712, 3]]), + 'average': np.array([[0, 1, 1.41421356, 2], + [2, 3, 2.12132034, 3]]), + 'weighted': np.array([[0, 1, 1.41421356, 2], + [2, 3, 2.12132034, 3]]), + 'centroid': np.array([[0, 1, 1.41421356, 2], + [2, 3, 2.12132034, 3]]), + 'median': np.array([[0, 1, 1.41421356, 2], + [2, 3, 2.12132034, 3]]), + 'ward': np.array([[0, 1, 1.41421356, 2], + [2, 3, 2.44948974, 3]]), + } + + def test_linkage_ties(self): + for method in ['single', 'complete', 'average', 'weighted', 'centroid', 'median', 'ward']: + self.check_linkage_ties(method) + + def check_linkage_ties(self, method): + X = np.array([[-1, -1], [0, 0], [1, 1]]) + Z = linkage(X, method=method) + expectedZ = self._expectations[method] + assert_allclose(Z, expectedZ, atol=1e-06) + + +class TestInconsistent(object): + def test_inconsistent_tdist(self): + for depth in hierarchy_test_data.inconsistent_ytdist: + self.check_inconsistent_tdist(depth) + + def check_inconsistent_tdist(self, depth): + Z = hierarchy_test_data.linkage_ytdist_single + assert_allclose(inconsistent(Z, depth), + hierarchy_test_data.inconsistent_ytdist[depth]) + + +class TestCopheneticDistance(object): + def test_linkage_cophenet_tdist_Z(self): + # Tests cophenet(Z) on tdist data set. + expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295, + 295, 138, 219, 295, 295]) + Z = hierarchy_test_data.linkage_ytdist_single + M = cophenet(Z) + assert_allclose(M, expectedM, atol=1e-10) + + def test_linkage_cophenet_tdist_Z_Y(self): + # Tests cophenet(Z, Y) on tdist data set. + Z = hierarchy_test_data.linkage_ytdist_single + (c, M) = cophenet(Z, hierarchy_test_data.ytdist) + expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295, + 295, 138, 219, 295, 295]) + expectedc = 0.639931296433393415057366837573 + assert_allclose(c, expectedc, atol=1e-10) + assert_allclose(M, expectedM, atol=1e-10) + + +class TestMLabLinkageConversion(object): + def test_mlab_linkage_conversion_empty(self): + # Tests from/to_mlab_linkage on empty linkage array. + X = np.asarray([]) + assert_equal(from_mlab_linkage([]), X) + assert_equal(to_mlab_linkage([]), X) + + def test_mlab_linkage_conversion_single_row(self): + # Tests from/to_mlab_linkage on linkage array with single row. + Z = np.asarray([[0., 1., 3., 2.]]) + Zm = [[1, 2, 3]] + assert_equal(from_mlab_linkage(Zm), Z) + assert_equal(to_mlab_linkage(Z), Zm) + + def test_mlab_linkage_conversion_multiple_rows(self): + # Tests from/to_mlab_linkage on linkage array with multiple rows. + Zm = np.asarray([[3, 6, 138], [4, 5, 219], + [1, 8, 255], [2, 9, 268], [7, 10, 295]]) + Z = np.array([[2., 5., 138., 2.], + [3., 4., 219., 2.], + [0., 7., 255., 3.], + [1., 8., 268., 4.], + [6., 9., 295., 6.]], + dtype=np.double) + assert_equal(from_mlab_linkage(Zm), Z) + assert_equal(to_mlab_linkage(Z), Zm) + + +class TestFcluster(object): + def test_fclusterdata(self): + for t in hierarchy_test_data.fcluster_inconsistent: + self.check_fclusterdata(t, 'inconsistent') + for t in hierarchy_test_data.fcluster_distance: + self.check_fclusterdata(t, 'distance') + for t in hierarchy_test_data.fcluster_maxclust: + self.check_fclusterdata(t, 'maxclust') + + def check_fclusterdata(self, t, criterion): + # Tests fclusterdata(X, criterion=criterion, t=t) on a random 3-cluster data set. + expectedT = getattr(hierarchy_test_data, 'fcluster_' + criterion)[t] + X = hierarchy_test_data.Q_X + T = fclusterdata(X, criterion=criterion, t=t) + assert_(is_isomorphic(T, expectedT)) + + def test_fcluster(self): + for t in hierarchy_test_data.fcluster_inconsistent: + self.check_fcluster(t, 'inconsistent') + for t in hierarchy_test_data.fcluster_distance: + self.check_fcluster(t, 'distance') + for t in hierarchy_test_data.fcluster_maxclust: + self.check_fcluster(t, 'maxclust') + + def check_fcluster(self, t, criterion): + # Tests fcluster(Z, criterion=criterion, t=t) on a random 3-cluster data set. + expectedT = getattr(hierarchy_test_data, 'fcluster_' + criterion)[t] + Z = single(hierarchy_test_data.Q_X) + T = fcluster(Z, criterion=criterion, t=t) + assert_(is_isomorphic(T, expectedT)) + + def test_fcluster_monocrit(self): + for t in hierarchy_test_data.fcluster_distance: + self.check_fcluster_monocrit(t) + for t in hierarchy_test_data.fcluster_maxclust: + self.check_fcluster_maxclust_monocrit(t) + + def check_fcluster_monocrit(self, t): + expectedT = hierarchy_test_data.fcluster_distance[t] + Z = single(hierarchy_test_data.Q_X) + T = fcluster(Z, t, criterion='monocrit', monocrit=maxdists(Z)) + assert_(is_isomorphic(T, expectedT)) + + def check_fcluster_maxclust_monocrit(self, t): + expectedT = hierarchy_test_data.fcluster_maxclust[t] + Z = single(hierarchy_test_data.Q_X) + T = fcluster(Z, t, criterion='maxclust_monocrit', monocrit=maxdists(Z)) + assert_(is_isomorphic(T, expectedT)) + + +class TestLeaders(object): + def test_leaders_single(self): + # Tests leaders using a flat clustering generated by single linkage. + X = hierarchy_test_data.Q_X + Y = pdist(X) + Z = linkage(Y) + T = fcluster(Z, criterion='maxclust', t=3) + Lright = (np.array([53, 55, 56]), np.array([2, 3, 1])) + L = leaders(Z, T) + assert_equal(L, Lright) + + +class TestIsIsomorphic(object): + def test_is_isomorphic_1(self): + # Tests is_isomorphic on test case #1 (one flat cluster, different labellings) + a = [1, 1, 1] + b = [2, 2, 2] + assert_(is_isomorphic(a, b)) + assert_(is_isomorphic(b, a)) + + def test_is_isomorphic_2(self): + # Tests is_isomorphic on test case #2 (two flat clusters, different labelings) + a = [1, 7, 1] + b = [2, 3, 2] + assert_(is_isomorphic(a, b)) + assert_(is_isomorphic(b, a)) + + def test_is_isomorphic_3(self): + # Tests is_isomorphic on test case #3 (no flat clusters) + a = [] + b = [] + assert_(is_isomorphic(a, b)) + + def test_is_isomorphic_4A(self): + # Tests is_isomorphic on test case #4A (3 flat clusters, different labelings, isomorphic) + a = [1, 2, 3] + b = [1, 3, 2] + assert_(is_isomorphic(a, b)) + assert_(is_isomorphic(b, a)) + + def test_is_isomorphic_4B(self): + # Tests is_isomorphic on test case #4B (3 flat clusters, different labelings, nonisomorphic) + a = [1, 2, 3, 3] + b = [1, 3, 2, 3] + assert_(is_isomorphic(a, b) == False) + assert_(is_isomorphic(b, a) == False) + + def test_is_isomorphic_4C(self): + # Tests is_isomorphic on test case #4C (3 flat clusters, different labelings, isomorphic) + a = [7, 2, 3] + b = [6, 3, 2] + assert_(is_isomorphic(a, b)) + assert_(is_isomorphic(b, a)) + + def test_is_isomorphic_5(self): + # Tests is_isomorphic on test case #5 (1000 observations, 2/3/5 random + # clusters, random permutation of the labeling). + for nc in [2, 3, 5]: + self.help_is_isomorphic_randperm(1000, nc) + + def test_is_isomorphic_6(self): + # Tests is_isomorphic on test case #5A (1000 observations, 2/3/5 random + # clusters, random permutation of the labeling, slightly + # nonisomorphic.) + for nc in [2, 3, 5]: + self.help_is_isomorphic_randperm(1000, nc, True, 5) + + def test_is_isomorphic_7(self): + # Regression test for gh-6271 + assert_(not is_isomorphic([1, 2, 3], [1, 1, 1])) + + def help_is_isomorphic_randperm(self, nobs, nclusters, noniso=False, nerrors=0): + for k in range(3): + a = np.int_(np.random.rand(nobs) * nclusters) + b = np.zeros(a.size, dtype=np.int_) + P = np.random.permutation(nclusters) + for i in xrange(0, a.shape[0]): + b[i] = P[a[i]] + if noniso: + Q = np.random.permutation(nobs) + b[Q[0:nerrors]] += 1 + b[Q[0:nerrors]] %= nclusters + assert_(is_isomorphic(a, b) == (not noniso)) + assert_(is_isomorphic(b, a) == (not noniso)) + + +class TestIsValidLinkage(object): + def test_is_valid_linkage_various_size(self): + for nrow, ncol, valid in [(2, 5, False), (2, 3, False), + (1, 4, True), (2, 4, True)]: + self.check_is_valid_linkage_various_size(nrow, ncol, valid) + + def check_is_valid_linkage_various_size(self, nrow, ncol, valid): + # Tests is_valid_linkage(Z) with linkage matrics of various sizes + Z = np.asarray([[0, 1, 3.0, 2, 5], + [3, 2, 4.0, 3, 3]], dtype=np.double) + Z = Z[:nrow, :ncol] + assert_(is_valid_linkage(Z) == valid) + if not valid: + assert_raises(ValueError, is_valid_linkage, Z, throw=True) + + def test_is_valid_linkage_int_type(self): + # Tests is_valid_linkage(Z) with integer type. + Z = np.asarray([[0, 1, 3.0, 2], + [3, 2, 4.0, 3]], dtype=int) + assert_(is_valid_linkage(Z) == False) + assert_raises(TypeError, is_valid_linkage, Z, throw=True) + + def test_is_valid_linkage_empty(self): + # Tests is_valid_linkage(Z) with empty linkage. + Z = np.zeros((0, 4), dtype=np.double) + assert_(is_valid_linkage(Z) == False) + assert_raises(ValueError, is_valid_linkage, Z, throw=True) + + def test_is_valid_linkage_4_and_up(self): + # Tests is_valid_linkage(Z) on linkage on observation sets between + # sizes 4 and 15 (step size 3). + for i in xrange(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + Z = linkage(y) + assert_(is_valid_linkage(Z) == True) + + def test_is_valid_linkage_4_and_up_neg_index_left(self): + # Tests is_valid_linkage(Z) on linkage on observation sets between + # sizes 4 and 15 (step size 3) with negative indices (left). + for i in xrange(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + Z = linkage(y) + Z[i//2,0] = -2 + assert_(is_valid_linkage(Z) == False) + assert_raises(ValueError, is_valid_linkage, Z, throw=True) + + def test_is_valid_linkage_4_and_up_neg_index_right(self): + # Tests is_valid_linkage(Z) on linkage on observation sets between + # sizes 4 and 15 (step size 3) with negative indices (right). + for i in xrange(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + Z = linkage(y) + Z[i//2,1] = -2 + assert_(is_valid_linkage(Z) == False) + assert_raises(ValueError, is_valid_linkage, Z, throw=True) + + def test_is_valid_linkage_4_and_up_neg_dist(self): + # Tests is_valid_linkage(Z) on linkage on observation sets between + # sizes 4 and 15 (step size 3) with negative distances. + for i in xrange(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + Z = linkage(y) + Z[i//2,2] = -0.5 + assert_(is_valid_linkage(Z) == False) + assert_raises(ValueError, is_valid_linkage, Z, throw=True) + + def test_is_valid_linkage_4_and_up_neg_counts(self): + # Tests is_valid_linkage(Z) on linkage on observation sets between + # sizes 4 and 15 (step size 3) with negative counts. + for i in xrange(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + Z = linkage(y) + Z[i//2,3] = -2 + assert_(is_valid_linkage(Z) == False) + assert_raises(ValueError, is_valid_linkage, Z, throw=True) + + +class TestIsValidInconsistent(object): + def test_is_valid_im_int_type(self): + # Tests is_valid_im(R) with integer type. + R = np.asarray([[0, 1, 3.0, 2], + [3, 2, 4.0, 3]], dtype=int) + assert_(is_valid_im(R) == False) + assert_raises(TypeError, is_valid_im, R, throw=True) + + def test_is_valid_im_various_size(self): + for nrow, ncol, valid in [(2, 5, False), (2, 3, False), + (1, 4, True), (2, 4, True)]: + self.check_is_valid_im_various_size(nrow, ncol, valid) + + def check_is_valid_im_various_size(self, nrow, ncol, valid): + # Tests is_valid_im(R) with linkage matrics of various sizes + R = np.asarray([[0, 1, 3.0, 2, 5], + [3, 2, 4.0, 3, 3]], dtype=np.double) + R = R[:nrow, :ncol] + assert_(is_valid_im(R) == valid) + if not valid: + assert_raises(ValueError, is_valid_im, R, throw=True) + + def test_is_valid_im_empty(self): + # Tests is_valid_im(R) with empty inconsistency matrix. + R = np.zeros((0, 4), dtype=np.double) + assert_(is_valid_im(R) == False) + assert_raises(ValueError, is_valid_im, R, throw=True) + + def test_is_valid_im_4_and_up(self): + # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15 + # (step size 3). + for i in xrange(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + Z = linkage(y) + R = inconsistent(Z) + assert_(is_valid_im(R) == True) + + def test_is_valid_im_4_and_up_neg_index_left(self): + # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15 + # (step size 3) with negative link height means. + for i in xrange(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + Z = linkage(y) + R = inconsistent(Z) + R[i//2,0] = -2.0 + assert_(is_valid_im(R) == False) + assert_raises(ValueError, is_valid_im, R, throw=True) + + def test_is_valid_im_4_and_up_neg_index_right(self): + # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15 + # (step size 3) with negative link height standard deviations. + for i in xrange(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + Z = linkage(y) + R = inconsistent(Z) + R[i//2,1] = -2.0 + assert_(is_valid_im(R) == False) + assert_raises(ValueError, is_valid_im, R, throw=True) + + def test_is_valid_im_4_and_up_neg_dist(self): + # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15 + # (step size 3) with negative link counts. + for i in xrange(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + Z = linkage(y) + R = inconsistent(Z) + R[i//2,2] = -0.5 + assert_(is_valid_im(R) == False) + assert_raises(ValueError, is_valid_im, R, throw=True) + + +class TestNumObsLinkage(object): + def test_num_obs_linkage_empty(self): + # Tests num_obs_linkage(Z) with empty linkage. + Z = np.zeros((0, 4), dtype=np.double) + assert_raises(ValueError, num_obs_linkage, Z) + + def test_num_obs_linkage_1x4(self): + # Tests num_obs_linkage(Z) on linkage over 2 observations. + Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double) + assert_equal(num_obs_linkage(Z), 2) + + def test_num_obs_linkage_2x4(self): + # Tests num_obs_linkage(Z) on linkage over 3 observations. + Z = np.asarray([[0, 1, 3.0, 2], + [3, 2, 4.0, 3]], dtype=np.double) + assert_equal(num_obs_linkage(Z), 3) + + def test_num_obs_linkage_4_and_up(self): + # Tests num_obs_linkage(Z) on linkage on observation sets between sizes + # 4 and 15 (step size 3). + for i in xrange(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + Z = linkage(y) + assert_equal(num_obs_linkage(Z), i) + + +class TestLeavesList(object): + def test_leaves_list_1x4(self): + # Tests leaves_list(Z) on a 1x4 linkage. + Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double) + to_tree(Z) + assert_equal(leaves_list(Z), [0, 1]) + + def test_leaves_list_2x4(self): + # Tests leaves_list(Z) on a 2x4 linkage. + Z = np.asarray([[0, 1, 3.0, 2], + [3, 2, 4.0, 3]], dtype=np.double) + to_tree(Z) + assert_equal(leaves_list(Z), [0, 1, 2]) + + def test_leaves_list_Q(self): + for method in ['single', 'complete', 'average', 'weighted', 'centroid', + 'median', 'ward']: + self.check_leaves_list_Q(method) + + def check_leaves_list_Q(self, method): + # Tests leaves_list(Z) on the Q data set + X = hierarchy_test_data.Q_X + Z = linkage(X, method) + node = to_tree(Z) + assert_equal(node.pre_order(), leaves_list(Z)) + + def test_Q_subtree_pre_order(self): + # Tests that pre_order() works when called on sub-trees. + X = hierarchy_test_data.Q_X + Z = linkage(X, 'single') + node = to_tree(Z) + assert_equal(node.pre_order(), (node.get_left().pre_order() + + node.get_right().pre_order())) + + +class TestCorrespond(object): + def test_correspond_empty(self): + # Tests correspond(Z, y) with empty linkage and condensed distance matrix. + y = np.zeros((0,)) + Z = np.zeros((0,4)) + assert_raises(ValueError, correspond, Z, y) + + def test_correspond_2_and_up(self): + # Tests correspond(Z, y) on linkage and CDMs over observation sets of + # different sizes. + for i in xrange(2, 4): + y = np.random.rand(i*(i-1)//2) + Z = linkage(y) + assert_(correspond(Z, y)) + for i in xrange(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + Z = linkage(y) + assert_(correspond(Z, y)) + + def test_correspond_4_and_up(self): + # Tests correspond(Z, y) on linkage and CDMs over observation sets of + # different sizes. Correspondence should be false. + for (i, j) in (list(zip(list(range(2, 4)), list(range(3, 5)))) + + list(zip(list(range(3, 5)), list(range(2, 4))))): + y = np.random.rand(i*(i-1)//2) + y2 = np.random.rand(j*(j-1)//2) + Z = linkage(y) + Z2 = linkage(y2) + assert_equal(correspond(Z, y2), False) + assert_equal(correspond(Z2, y), False) + + def test_correspond_4_and_up_2(self): + # Tests correspond(Z, y) on linkage and CDMs over observation sets of + # different sizes. Correspondence should be false. + for (i, j) in (list(zip(list(range(2, 7)), list(range(16, 21)))) + + list(zip(list(range(2, 7)), list(range(16, 21))))): + y = np.random.rand(i*(i-1)//2) + y2 = np.random.rand(j*(j-1)//2) + Z = linkage(y) + Z2 = linkage(y2) + assert_equal(correspond(Z, y2), False) + assert_equal(correspond(Z2, y), False) + + def test_num_obs_linkage_multi_matrix(self): + # Tests num_obs_linkage with observation matrices of multiple sizes. + for n in xrange(2, 10): + X = np.random.rand(n, 4) + Y = pdist(X) + Z = linkage(Y) + assert_equal(num_obs_linkage(Z), n) + + +class TestIsMonotonic(object): + def test_is_monotonic_empty(self): + # Tests is_monotonic(Z) on an empty linkage. + Z = np.zeros((0, 4)) + assert_raises(ValueError, is_monotonic, Z) + + def test_is_monotonic_1x4(self): + # Tests is_monotonic(Z) on 1x4 linkage. Expecting True. + Z = np.asarray([[0, 1, 0.3, 2]], dtype=np.double) + assert_equal(is_monotonic(Z), True) + + def test_is_monotonic_2x4_T(self): + # Tests is_monotonic(Z) on 2x4 linkage. Expecting True. + Z = np.asarray([[0, 1, 0.3, 2], + [2, 3, 0.4, 3]], dtype=np.double) + assert_equal(is_monotonic(Z), True) + + def test_is_monotonic_2x4_F(self): + # Tests is_monotonic(Z) on 2x4 linkage. Expecting False. + Z = np.asarray([[0, 1, 0.4, 2], + [2, 3, 0.3, 3]], dtype=np.double) + assert_equal(is_monotonic(Z), False) + + def test_is_monotonic_3x4_T(self): + # Tests is_monotonic(Z) on 3x4 linkage. Expecting True. + Z = np.asarray([[0, 1, 0.3, 2], + [2, 3, 0.4, 2], + [4, 5, 0.6, 4]], dtype=np.double) + assert_equal(is_monotonic(Z), True) + + def test_is_monotonic_3x4_F1(self): + # Tests is_monotonic(Z) on 3x4 linkage (case 1). Expecting False. + Z = np.asarray([[0, 1, 0.3, 2], + [2, 3, 0.2, 2], + [4, 5, 0.6, 4]], dtype=np.double) + assert_equal(is_monotonic(Z), False) + + def test_is_monotonic_3x4_F2(self): + # Tests is_monotonic(Z) on 3x4 linkage (case 2). Expecting False. + Z = np.asarray([[0, 1, 0.8, 2], + [2, 3, 0.4, 2], + [4, 5, 0.6, 4]], dtype=np.double) + assert_equal(is_monotonic(Z), False) + + def test_is_monotonic_3x4_F3(self): + # Tests is_monotonic(Z) on 3x4 linkage (case 3). Expecting False + Z = np.asarray([[0, 1, 0.3, 2], + [2, 3, 0.4, 2], + [4, 5, 0.2, 4]], dtype=np.double) + assert_equal(is_monotonic(Z), False) + + def test_is_monotonic_tdist_linkage1(self): + # Tests is_monotonic(Z) on clustering generated by single linkage on + # tdist data set. Expecting True. + Z = linkage(hierarchy_test_data.ytdist, 'single') + assert_equal(is_monotonic(Z), True) + + def test_is_monotonic_tdist_linkage2(self): + # Tests is_monotonic(Z) on clustering generated by single linkage on + # tdist data set. Perturbing. Expecting False. + Z = linkage(hierarchy_test_data.ytdist, 'single') + Z[2,2] = 0.0 + assert_equal(is_monotonic(Z), False) + + def test_is_monotonic_Q_linkage(self): + # Tests is_monotonic(Z) on clustering generated by single linkage on + # Q data set. Expecting True. + X = hierarchy_test_data.Q_X + Z = linkage(X, 'single') + assert_equal(is_monotonic(Z), True) + + +class TestMaxDists(object): + def test_maxdists_empty_linkage(self): + # Tests maxdists(Z) on empty linkage. Expecting exception. + Z = np.zeros((0, 4), dtype=np.double) + assert_raises(ValueError, maxdists, Z) + + def test_maxdists_one_cluster_linkage(self): + # Tests maxdists(Z) on linkage with one cluster. + Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double) + MD = maxdists(Z) + expectedMD = calculate_maximum_distances(Z) + assert_allclose(MD, expectedMD, atol=1e-15) + + def test_maxdists_Q_linkage(self): + for method in ['single', 'complete', 'ward', 'centroid', 'median']: + self.check_maxdists_Q_linkage(method) + + def check_maxdists_Q_linkage(self, method): + # Tests maxdists(Z) on the Q data set + X = hierarchy_test_data.Q_X + Z = linkage(X, method) + MD = maxdists(Z) + expectedMD = calculate_maximum_distances(Z) + assert_allclose(MD, expectedMD, atol=1e-15) + + +class TestMaxInconsts(object): + def test_maxinconsts_empty_linkage(self): + # Tests maxinconsts(Z, R) on empty linkage. Expecting exception. + Z = np.zeros((0, 4), dtype=np.double) + R = np.zeros((0, 4), dtype=np.double) + assert_raises(ValueError, maxinconsts, Z, R) + + def test_maxinconsts_difrow_linkage(self): + # Tests maxinconsts(Z, R) on linkage and inconsistency matrices with + # different numbers of clusters. Expecting exception. + Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double) + R = np.random.rand(2, 4) + assert_raises(ValueError, maxinconsts, Z, R) + + def test_maxinconsts_one_cluster_linkage(self): + # Tests maxinconsts(Z, R) on linkage with one cluster. + Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double) + R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double) + MD = maxinconsts(Z, R) + expectedMD = calculate_maximum_inconsistencies(Z, R) + assert_allclose(MD, expectedMD, atol=1e-15) + + def test_maxinconsts_Q_linkage(self): + for method in ['single', 'complete', 'ward', 'centroid', 'median']: + self.check_maxinconsts_Q_linkage(method) + + def check_maxinconsts_Q_linkage(self, method): + # Tests maxinconsts(Z, R) on the Q data set + X = hierarchy_test_data.Q_X + Z = linkage(X, method) + R = inconsistent(Z) + MD = maxinconsts(Z, R) + expectedMD = calculate_maximum_inconsistencies(Z, R) + assert_allclose(MD, expectedMD, atol=1e-15) + + +class TestMaxRStat(object): + def test_maxRstat_invalid_index(self): + for i in [3.3, -1, 4]: + self.check_maxRstat_invalid_index(i) + + def check_maxRstat_invalid_index(self, i): + # Tests maxRstat(Z, R, i). Expecting exception. + Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double) + R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double) + if isinstance(i, int): + assert_raises(ValueError, maxRstat, Z, R, i) + else: + assert_raises(TypeError, maxRstat, Z, R, i) + + def test_maxRstat_empty_linkage(self): + for i in range(4): + self.check_maxRstat_empty_linkage(i) + + def check_maxRstat_empty_linkage(self, i): + # Tests maxRstat(Z, R, i) on empty linkage. Expecting exception. + Z = np.zeros((0, 4), dtype=np.double) + R = np.zeros((0, 4), dtype=np.double) + assert_raises(ValueError, maxRstat, Z, R, i) + + def test_maxRstat_difrow_linkage(self): + for i in range(4): + self.check_maxRstat_difrow_linkage(i) + + def check_maxRstat_difrow_linkage(self, i): + # Tests maxRstat(Z, R, i) on linkage and inconsistency matrices with + # different numbers of clusters. Expecting exception. + Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double) + R = np.random.rand(2, 4) + assert_raises(ValueError, maxRstat, Z, R, i) + + def test_maxRstat_one_cluster_linkage(self): + for i in range(4): + self.check_maxRstat_one_cluster_linkage(i) + + def check_maxRstat_one_cluster_linkage(self, i): + # Tests maxRstat(Z, R, i) on linkage with one cluster. + Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double) + R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double) + MD = maxRstat(Z, R, 1) + expectedMD = calculate_maximum_inconsistencies(Z, R, 1) + assert_allclose(MD, expectedMD, atol=1e-15) + + def test_maxRstat_Q_linkage(self): + for method in ['single', 'complete', 'ward', 'centroid', 'median']: + for i in range(4): + self.check_maxRstat_Q_linkage(method, i) + + def check_maxRstat_Q_linkage(self, method, i): + # Tests maxRstat(Z, R, i) on the Q data set + X = hierarchy_test_data.Q_X + Z = linkage(X, method) + R = inconsistent(Z) + MD = maxRstat(Z, R, 1) + expectedMD = calculate_maximum_inconsistencies(Z, R, 1) + assert_allclose(MD, expectedMD, atol=1e-15) + + +class TestDendrogram(object): + def test_dendrogram_single_linkage_tdist(self): + # Tests dendrogram calculation on single linkage of the tdist data set. + Z = linkage(hierarchy_test_data.ytdist, 'single') + R = dendrogram(Z, no_plot=True) + leaves = R["leaves"] + assert_equal(leaves, [2, 5, 1, 0, 3, 4]) + + def test_valid_orientation(self): + Z = linkage(hierarchy_test_data.ytdist, 'single') + assert_raises(ValueError, dendrogram, Z, orientation="foo") + + @pytest.mark.skipif(not have_matplotlib, reason="no matplotlib") + def test_dendrogram_plot(self): + for orientation in ['top', 'bottom', 'left', 'right']: + self.check_dendrogram_plot(orientation) + + def check_dendrogram_plot(self, orientation): + # Tests dendrogram plotting. + Z = linkage(hierarchy_test_data.ytdist, 'single') + expected = {'color_list': ['g', 'b', 'b', 'b', 'b'], + 'dcoord': [[0.0, 138.0, 138.0, 0.0], + [0.0, 219.0, 219.0, 0.0], + [0.0, 255.0, 255.0, 219.0], + [0.0, 268.0, 268.0, 255.0], + [138.0, 295.0, 295.0, 268.0]], + 'icoord': [[5.0, 5.0, 15.0, 15.0], + [45.0, 45.0, 55.0, 55.0], + [35.0, 35.0, 50.0, 50.0], + [25.0, 25.0, 42.5, 42.5], + [10.0, 10.0, 33.75, 33.75]], + 'ivl': ['2', '5', '1', '0', '3', '4'], + 'leaves': [2, 5, 1, 0, 3, 4]} + + fig = plt.figure() + ax = fig.add_subplot(221) + + # test that dendrogram accepts ax keyword + R1 = dendrogram(Z, ax=ax, orientation=orientation) + assert_equal(R1, expected) + + # test that dendrogram accepts and handle the leaf_font_size and + # leaf_rotation keywords + R1a = dendrogram(Z, ax=ax, orientation=orientation, + leaf_font_size=20, leaf_rotation=90) + testlabel = ( + ax.get_xticklabels()[0] + if orientation in ['top', 'bottom'] + else ax.get_yticklabels()[0] + ) + assert_equal(testlabel.get_rotation(), 90) + assert_equal(testlabel.get_size(), 20) + R1a = dendrogram(Z, ax=ax, orientation=orientation, + leaf_rotation=90) + testlabel = ( + ax.get_xticklabels()[0] + if orientation in ['top', 'bottom'] + else ax.get_yticklabels()[0] + ) + assert_equal(testlabel.get_rotation(), 90) + R1a = dendrogram(Z, ax=ax, orientation=orientation, + leaf_font_size=20) + testlabel = ( + ax.get_xticklabels()[0] + if orientation in ['top', 'bottom'] + else ax.get_yticklabels()[0] + ) + assert_equal(testlabel.get_size(), 20) + plt.close() + + # test plotting to gca (will import pylab) + R2 = dendrogram(Z, orientation=orientation) + plt.close() + assert_equal(R2, expected) + + @pytest.mark.skipif(not have_matplotlib, reason="no matplotlib") + def test_dendrogram_truncate_mode(self): + Z = linkage(hierarchy_test_data.ytdist, 'single') + + R = dendrogram(Z, 2, 'lastp', show_contracted=True) + plt.close() + assert_equal(R, {'color_list': ['b'], + 'dcoord': [[0.0, 295.0, 295.0, 0.0]], + 'icoord': [[5.0, 5.0, 15.0, 15.0]], + 'ivl': ['(2)', '(4)'], + 'leaves': [6, 9]}) + + R = dendrogram(Z, 2, 'mtica', show_contracted=True) + plt.close() + assert_equal(R, {'color_list': ['g', 'b', 'b', 'b'], + 'dcoord': [[0.0, 138.0, 138.0, 0.0], + [0.0, 255.0, 255.0, 0.0], + [0.0, 268.0, 268.0, 255.0], + [138.0, 295.0, 295.0, 268.0]], + 'icoord': [[5.0, 5.0, 15.0, 15.0], + [35.0, 35.0, 45.0, 45.0], + [25.0, 25.0, 40.0, 40.0], + [10.0, 10.0, 32.5, 32.5]], + 'ivl': ['2', '5', '1', '0', '(2)'], + 'leaves': [2, 5, 1, 0, 7]}) + + def test_dendrogram_colors(self): + # Tests dendrogram plots with alternate colors + Z = linkage(hierarchy_test_data.ytdist, 'single') + + set_link_color_palette(['c', 'm', 'y', 'k']) + R = dendrogram(Z, no_plot=True, + above_threshold_color='g', color_threshold=250) + set_link_color_palette(['g', 'r', 'c', 'm', 'y', 'k']) + + color_list = R['color_list'] + assert_equal(color_list, ['c', 'm', 'g', 'g', 'g']) + + # reset color palette (global list) + set_link_color_palette(None) + + +def calculate_maximum_distances(Z): + # Used for testing correctness of maxdists. + n = Z.shape[0] + 1 + B = np.zeros((n-1,)) + q = np.zeros((3,)) + for i in xrange(0, n - 1): + q[:] = 0.0 + left = Z[i, 0] + right = Z[i, 1] + if left >= n: + q[0] = B[int(left) - n] + if right >= n: + q[1] = B[int(right) - n] + q[2] = Z[i, 2] + B[i] = q.max() + return B + + +def calculate_maximum_inconsistencies(Z, R, k=3): + # Used for testing correctness of maxinconsts. + n = Z.shape[0] + 1 + B = np.zeros((n-1,)) + q = np.zeros((3,)) + for i in xrange(0, n - 1): + q[:] = 0.0 + left = Z[i, 0] + right = Z[i, 1] + if left >= n: + q[0] = B[int(left) - n] + if right >= n: + q[1] = B[int(right) - n] + q[2] = R[i, k] + B[i] = q.max() + return B + + +def within_tol(a, b, tol): + return np.abs(a - b).max() < tol + + +def test_unsupported_uncondensed_distance_matrix_linkage_warning(): + assert_warns(ClusterWarning, linkage, [[0, 1], [1, 0]]) + + +def test_euclidean_linkage_value_error(): + for method in scipy.cluster.hierarchy._EUCLIDEAN_METHODS: + assert_raises(ValueError, linkage, [[1, 1], [1, 1]], + method=method, metric='cityblock') + + +def test_2x2_linkage(): + Z1 = linkage([1], method='single', metric='euclidean') + Z2 = linkage([[0, 1], [0, 0]], method='single', metric='euclidean') + assert_allclose(Z1, Z2) + + +def test_node_compare(): + np.random.seed(23) + nobs = 50 + X = np.random.randn(nobs, 4) + Z = scipy.cluster.hierarchy.ward(X) + tree = to_tree(Z) + assert_(tree > tree.get_left()) + assert_(tree.get_right() > tree.get_left()) + assert_(tree.get_right() == tree.get_right()) + assert_(tree.get_right() != tree.get_left()) + + +def test_cut_tree(): + np.random.seed(23) + nobs = 50 + X = np.random.randn(nobs, 4) + Z = scipy.cluster.hierarchy.ward(X) + cutree = cut_tree(Z) + + assert_equal(cutree[:, 0], np.arange(nobs)) + assert_equal(cutree[:, -1], np.zeros(nobs)) + assert_equal(cutree.max(0), np.arange(nobs - 1, -1, -1)) + + assert_equal(cutree[:, [-5]], cut_tree(Z, n_clusters=5)) + assert_equal(cutree[:, [-5, -10]], cut_tree(Z, n_clusters=[5, 10])) + assert_equal(cutree[:, [-10, -5]], cut_tree(Z, n_clusters=[10, 5])) + + nodes = _order_cluster_tree(Z) + heights = np.array([node.dist for node in nodes]) + + assert_equal(cutree[:, np.searchsorted(heights, [5])], + cut_tree(Z, height=5)) + assert_equal(cutree[:, np.searchsorted(heights, [5, 10])], + cut_tree(Z, height=[5, 10])) + assert_equal(cutree[:, np.searchsorted(heights, [10, 5])], + cut_tree(Z, height=[10, 5])) + + +def test_optimal_leaf_ordering(): + # test with the distance vector y + Z = optimal_leaf_ordering(linkage(hierarchy_test_data.ytdist), + hierarchy_test_data.ytdist) + expectedZ = hierarchy_test_data.linkage_ytdist_single_olo + assert_allclose(Z, expectedZ, atol=1e-10) + + # test with the observation matrix X + Z = optimal_leaf_ordering(linkage(hierarchy_test_data.X, 'ward'), + hierarchy_test_data.X) + expectedZ = hierarchy_test_data.linkage_X_ward_olo + assert_allclose(Z, expectedZ, atol=1e-06) + + +def test_Heap(): + values = np.array([2, -1, 0, -1.5, 3]) + heap = Heap(values) + + pair = heap.get_min() + assert_equal(pair['key'], 3) + assert_equal(pair['value'], -1.5) + + heap.remove_min() + pair = heap.get_min() + assert_equal(pair['key'], 1) + assert_equal(pair['value'], -1) + + heap.change_value(1, 2.5) + pair = heap.get_min() + assert_equal(pair['key'], 2) + assert_equal(pair['value'], 0) + + heap.remove_min() + heap.remove_min() + + heap.change_value(1, 10) + pair = heap.get_min() + assert_equal(pair['key'], 4) + assert_equal(pair['value'], 3) + + heap.remove_min() + pair = heap.get_min() + assert_equal(pair['key'], 1) + assert_equal(pair['value'], 10) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/cluster/tests/test_hierarchy.pyc b/project/venv/lib/python2.7/site-packages/scipy/cluster/tests/test_hierarchy.pyc new file mode 100644 index 0000000..dae25e3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/cluster/tests/test_hierarchy.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/cluster/tests/test_vq.py b/project/venv/lib/python2.7/site-packages/scipy/cluster/tests/test_vq.py new file mode 100644 index 0000000..9d4450b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/cluster/tests/test_vq.py @@ -0,0 +1,290 @@ + +from __future__ import division, print_function, absolute_import + +import warnings +import sys + +import numpy as np +from numpy.testing import (assert_array_equal, assert_array_almost_equal, + assert_allclose, assert_equal, assert_) +from scipy._lib._numpy_compat import suppress_warnings +import pytest +from pytest import raises as assert_raises + +from scipy.cluster.vq import (kmeans, kmeans2, py_vq, vq, whiten, + ClusterError, _krandinit) +from scipy.cluster import _vq + + +TESTDATA_2D = np.array([ + -2.2, 1.17, -1.63, 1.69, -2.04, 4.38, -3.09, 0.95, -1.7, 4.79, -1.68, 0.68, + -2.26, 3.34, -2.29, 2.55, -1.72, -0.72, -1.99, 2.34, -2.75, 3.43, -2.45, + 2.41, -4.26, 3.65, -1.57, 1.87, -1.96, 4.03, -3.01, 3.86, -2.53, 1.28, + -4.0, 3.95, -1.62, 1.25, -3.42, 3.17, -1.17, 0.12, -3.03, -0.27, -2.07, + -0.55, -1.17, 1.34, -2.82, 3.08, -2.44, 0.24, -1.71, 2.48, -5.23, 4.29, + -2.08, 3.69, -1.89, 3.62, -2.09, 0.26, -0.92, 1.07, -2.25, 0.88, -2.25, + 2.02, -4.31, 3.86, -2.03, 3.42, -2.76, 0.3, -2.48, -0.29, -3.42, 3.21, + -2.3, 1.73, -2.84, 0.69, -1.81, 2.48, -5.24, 4.52, -2.8, 1.31, -1.67, + -2.34, -1.18, 2.17, -2.17, 2.82, -1.85, 2.25, -2.45, 1.86, -6.79, 3.94, + -2.33, 1.89, -1.55, 2.08, -1.36, 0.93, -2.51, 2.74, -2.39, 3.92, -3.33, + 2.99, -2.06, -0.9, -2.83, 3.35, -2.59, 3.05, -2.36, 1.85, -1.69, 1.8, + -1.39, 0.66, -2.06, 0.38, -1.47, 0.44, -4.68, 3.77, -5.58, 3.44, -2.29, + 2.24, -1.04, -0.38, -1.85, 4.23, -2.88, 0.73, -2.59, 1.39, -1.34, 1.75, + -1.95, 1.3, -2.45, 3.09, -1.99, 3.41, -5.55, 5.21, -1.73, 2.52, -2.17, + 0.85, -2.06, 0.49, -2.54, 2.07, -2.03, 1.3, -3.23, 3.09, -1.55, 1.44, + -0.81, 1.1, -2.99, 2.92, -1.59, 2.18, -2.45, -0.73, -3.12, -1.3, -2.83, + 0.2, -2.77, 3.24, -1.98, 1.6, -4.59, 3.39, -4.85, 3.75, -2.25, 1.71, -3.28, + 3.38, -1.74, 0.88, -2.41, 1.92, -2.24, 1.19, -2.48, 1.06, -1.68, -0.62, + -1.3, 0.39, -1.78, 2.35, -3.54, 2.44, -1.32, 0.66, -2.38, 2.76, -2.35, + 3.95, -1.86, 4.32, -2.01, -1.23, -1.79, 2.76, -2.13, -0.13, -5.25, 3.84, + -2.24, 1.59, -4.85, 2.96, -2.41, 0.01, -0.43, 0.13, -3.92, 2.91, -1.75, + -0.53, -1.69, 1.69, -1.09, 0.15, -2.11, 2.17, -1.53, 1.22, -2.1, -0.86, + -2.56, 2.28, -3.02, 3.33, -1.12, 3.86, -2.18, -1.19, -3.03, 0.79, -0.83, + 0.97, -3.19, 1.45, -1.34, 1.28, -2.52, 4.22, -4.53, 3.22, -1.97, 1.75, + -2.36, 3.19, -0.83, 1.53, -1.59, 1.86, -2.17, 2.3, -1.63, 2.71, -2.03, + 3.75, -2.57, -0.6, -1.47, 1.33, -1.95, 0.7, -1.65, 1.27, -1.42, 1.09, -3.0, + 3.87, -2.51, 3.06, -2.6, 0.74, -1.08, -0.03, -2.44, 1.31, -2.65, 2.99, + -1.84, 1.65, -4.76, 3.75, -2.07, 3.98, -2.4, 2.67, -2.21, 1.49, -1.21, + 1.22, -5.29, 2.38, -2.85, 2.28, -5.6, 3.78, -2.7, 0.8, -1.81, 3.5, -3.75, + 4.17, -1.29, 2.99, -5.92, 3.43, -1.83, 1.23, -1.24, -1.04, -2.56, 2.37, + -3.26, 0.39, -4.63, 2.51, -4.52, 3.04, -1.7, 0.36, -1.41, 0.04, -2.1, 1.0, + -1.87, 3.78, -4.32, 3.59, -2.24, 1.38, -1.99, -0.22, -1.87, 1.95, -0.84, + 2.17, -5.38, 3.56, -1.27, 2.9, -1.79, 3.31, -5.47, 3.85, -1.44, 3.69, + -2.02, 0.37, -1.29, 0.33, -2.34, 2.56, -1.74, -1.27, -1.97, 1.22, -2.51, + -0.16, -1.64, -0.96, -2.99, 1.4, -1.53, 3.31, -2.24, 0.45, -2.46, 1.71, + -2.88, 1.56, -1.63, 1.46, -1.41, 0.68, -1.96, 2.76, -1.61, + 2.11]).reshape((200, 2)) + + +# Global data +X = np.array([[3.0, 3], [4, 3], [4, 2], + [9, 2], [5, 1], [6, 2], [9, 4], + [5, 2], [5, 4], [7, 4], [6, 5]]) + +CODET1 = np.array([[3.0000, 3.0000], + [6.2000, 4.0000], + [5.8000, 1.8000]]) + +CODET2 = np.array([[11.0/3, 8.0/3], + [6.7500, 4.2500], + [6.2500, 1.7500]]) + +LABEL1 = np.array([0, 1, 2, 2, 2, 2, 1, 2, 1, 1, 1]) + + +class TestWhiten(object): + def test_whiten(self): + desired = np.array([[5.08738849, 2.97091878], + [3.19909255, 0.69660580], + [4.51041982, 0.02640918], + [4.38567074, 0.95120889], + [2.32191480, 1.63195503]]) + for tp in np.array, np.matrix: + obs = tp([[0.98744510, 0.82766775], + [0.62093317, 0.19406729], + [0.87545741, 0.00735733], + [0.85124403, 0.26499712], + [0.45067590, 0.45464607]]) + assert_allclose(whiten(obs), desired, rtol=1e-5) + + def test_whiten_zero_std(self): + desired = np.array([[0., 1.0, 2.86666544], + [0., 1.0, 1.32460034], + [0., 1.0, 3.74382172]]) + for tp in np.array, np.matrix: + obs = tp([[0., 1., 0.74109533], + [0., 1., 0.34243798], + [0., 1., 0.96785929]]) + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_allclose(whiten(obs), desired, rtol=1e-5) + assert_equal(len(w), 1) + assert_(issubclass(w[-1].category, RuntimeWarning)) + + def test_whiten_not_finite(self): + for tp in np.array, np.matrix: + for bad_value in np.nan, np.inf, -np.inf: + obs = tp([[0.98744510, bad_value], + [0.62093317, 0.19406729], + [0.87545741, 0.00735733], + [0.85124403, 0.26499712], + [0.45067590, 0.45464607]]) + assert_raises(ValueError, whiten, obs) + + +class TestVq(object): + def test_py_vq(self): + initc = np.concatenate(([[X[0]], [X[1]], [X[2]]])) + for tp in np.array, np.matrix: + label1 = py_vq(tp(X), tp(initc))[0] + assert_array_equal(label1, LABEL1) + + def test_vq(self): + initc = np.concatenate(([[X[0]], [X[1]], [X[2]]])) + for tp in np.array, np.matrix: + label1, dist = _vq.vq(tp(X), tp(initc)) + assert_array_equal(label1, LABEL1) + tlabel1, tdist = vq(tp(X), tp(initc)) + + def test_vq_1d(self): + # Test special rank 1 vq algo, python implementation. + data = X[:, 0] + initc = data[:3] + a, b = _vq.vq(data, initc) + ta, tb = py_vq(data[:, np.newaxis], initc[:, np.newaxis]) + assert_array_equal(a, ta) + assert_array_equal(b, tb) + + def test__vq_sametype(self): + a = np.array([1.0, 2.0], dtype=np.float64) + b = a.astype(np.float32) + assert_raises(TypeError, _vq.vq, a, b) + + def test__vq_invalid_type(self): + a = np.array([1, 2], dtype=int) + assert_raises(TypeError, _vq.vq, a, a) + + def test_vq_large_nfeat(self): + X = np.random.rand(20, 20) + code_book = np.random.rand(3, 20) + + codes0, dis0 = _vq.vq(X, code_book) + codes1, dis1 = py_vq(X, code_book) + assert_allclose(dis0, dis1, 1e-5) + assert_array_equal(codes0, codes1) + + X = X.astype(np.float32) + code_book = code_book.astype(np.float32) + + codes0, dis0 = _vq.vq(X, code_book) + codes1, dis1 = py_vq(X, code_book) + assert_allclose(dis0, dis1, 1e-5) + assert_array_equal(codes0, codes1) + + def test_vq_large_features(self): + X = np.random.rand(10, 5) * 1000000 + code_book = np.random.rand(2, 5) * 1000000 + + codes0, dis0 = _vq.vq(X, code_book) + codes1, dis1 = py_vq(X, code_book) + assert_allclose(dis0, dis1, 1e-5) + assert_array_equal(codes0, codes1) + + +class TestKMean(object): + def test_large_features(self): + # Generate a data set with large values, and run kmeans on it to + # (regression for 1077). + d = 300 + n = 100 + + m1 = np.random.randn(d) + m2 = np.random.randn(d) + x = 10000 * np.random.randn(n, d) - 20000 * m1 + y = 10000 * np.random.randn(n, d) + 20000 * m2 + + data = np.empty((x.shape[0] + y.shape[0], d), np.double) + data[:x.shape[0]] = x + data[x.shape[0]:] = y + + kmeans(data, 2) + + def test_kmeans_simple(self): + np.random.seed(54321) + initc = np.concatenate(([[X[0]], [X[1]], [X[2]]])) + for tp in np.array, np.matrix: + code1 = kmeans(tp(X), tp(initc), iter=1)[0] + assert_array_almost_equal(code1, CODET2) + + def test_kmeans_lost_cluster(self): + # This will cause kmeans to have a cluster with no points. + data = TESTDATA_2D + initk = np.array([[-1.8127404, -0.67128041], + [2.04621601, 0.07401111], + [-2.31149087, -0.05160469]]) + + kmeans(data, initk) + with suppress_warnings() as sup: + sup.filter(UserWarning, + "One of the clusters is empty. Re-run kmeans with a " + "different initialization") + kmeans2(data, initk, missing='warn') + + assert_raises(ClusterError, kmeans2, data, initk, missing='raise') + + def test_kmeans2_simple(self): + np.random.seed(12345678) + initc = np.concatenate(([[X[0]], [X[1]], [X[2]]])) + for tp in np.array, np.matrix: + code1 = kmeans2(tp(X), tp(initc), iter=1)[0] + code2 = kmeans2(tp(X), tp(initc), iter=2)[0] + + assert_array_almost_equal(code1, CODET1) + assert_array_almost_equal(code2, CODET2) + + def test_kmeans2_rank1(self): + data = TESTDATA_2D + data1 = data[:, 0] + + initc = data1[:3] + code = initc.copy() + kmeans2(data1, code, iter=1)[0] + kmeans2(data1, code, iter=2)[0] + + def test_kmeans2_rank1_2(self): + data = TESTDATA_2D + data1 = data[:, 0] + kmeans2(data1, 2, iter=1) + + def test_kmeans2_high_dim(self): + # test kmeans2 when the number of dimensions exceeds the number + # of input points + data = TESTDATA_2D + data = data.reshape((20, 20))[:10] + kmeans2(data, 2) + + def test_kmeans2_init(self): + np.random.seed(12345) + data = TESTDATA_2D + + kmeans2(data, 3, minit='points') + kmeans2(data[:, :1], 3, minit='points') # special case (1-D) + + kmeans2(data, 3, minit='++') + kmeans2(data[:, :1], 3, minit='++') # special case (1-D) + + # minit='random' can give warnings, filter those + with suppress_warnings() as sup: + sup.filter(message="One of the clusters is empty. Re-run") + kmeans2(data, 3, minit='random') + kmeans2(data[:, :1], 3, minit='random') # special case (1-D) + + @pytest.mark.skipif(sys.platform == 'win32', + reason='Fails with MemoryError in Wine.') + def test_krandinit(self): + data = TESTDATA_2D + datas = [data.reshape((200, 2)), data.reshape((20, 20))[:10]] + k = int(1e6) + for data in datas: + np.random.seed(1234) + init = _krandinit(data, k) + orig_cov = np.cov(data, rowvar=0) + init_cov = np.cov(init, rowvar=0) + assert_allclose(orig_cov, init_cov, atol=1e-2) + + def test_kmeans2_empty(self): + # Regression test for gh-1032. + assert_raises(ValueError, kmeans2, [], 2) + + def test_kmeans_0k(self): + # Regression test for gh-1073: fail when k arg is 0. + assert_raises(ValueError, kmeans, X, 0) + assert_raises(ValueError, kmeans2, X, 0) + assert_raises(ValueError, kmeans2, X, np.array([])) + + def test_kmeans_large_thres(self): + # Regression test for gh-1774 + x = np.array([1, 2, 3, 4, 10], dtype=float) + res = kmeans(x, 1, thresh=1e16) + assert_allclose(res[0], np.array([4.])) + assert_allclose(res[1], 2.3999999999999999) diff --git a/project/venv/lib/python2.7/site-packages/scipy/cluster/tests/test_vq.pyc b/project/venv/lib/python2.7/site-packages/scipy/cluster/tests/test_vq.pyc new file mode 100644 index 0000000..42f77d8 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/cluster/tests/test_vq.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/cluster/vq.py b/project/venv/lib/python2.7/site-packages/scipy/cluster/vq.py new file mode 100644 index 0000000..dfa156e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/cluster/vq.py @@ -0,0 +1,715 @@ +""" +==================================================================== +K-means clustering and vector quantization (:mod:`scipy.cluster.vq`) +==================================================================== + +Provides routines for k-means clustering, generating code books +from k-means models, and quantizing vectors by comparing them with +centroids in a code book. + +.. autosummary:: + :toctree: generated/ + + whiten -- Normalize a group of observations so each feature has unit variance + vq -- Calculate code book membership of a set of observation vectors + kmeans -- Performs k-means on a set of observation vectors forming k clusters + kmeans2 -- A different implementation of k-means with more methods + -- for initializing centroids + +Background information +====================== +The k-means algorithm takes as input the number of clusters to +generate, k, and a set of observation vectors to cluster. It +returns a set of centroids, one for each of the k clusters. An +observation vector is classified with the cluster number or +centroid index of the centroid closest to it. + +A vector v belongs to cluster i if it is closer to centroid i than +any other centroids. If v belongs to i, we say centroid i is the +dominating centroid of v. The k-means algorithm tries to +minimize distortion, which is defined as the sum of the squared distances +between each observation vector and its dominating centroid. +The minimization is achieved by iteratively reclassifying +the observations into clusters and recalculating the centroids until +a configuration is reached in which the centroids are stable. One can +also define a maximum number of iterations. + +Since vector quantization is a natural application for k-means, +information theory terminology is often used. The centroid index +or cluster index is also referred to as a "code" and the table +mapping codes to centroids and vice versa is often referred as a +"code book". The result of k-means, a set of centroids, can be +used to quantize vectors. Quantization aims to find an encoding of +vectors that reduces the expected distortion. + +All routines expect obs to be a M by N array where the rows are +the observation vectors. The codebook is a k by N array where the +i'th row is the centroid of code word i. The observation vectors +and centroids have the same feature dimension. + +As an example, suppose we wish to compress a 24-bit color image +(each pixel is represented by one byte for red, one for blue, and +one for green) before sending it over the web. By using a smaller +8-bit encoding, we can reduce the amount of data by two +thirds. Ideally, the colors for each of the 256 possible 8-bit +encoding values should be chosen to minimize distortion of the +color. Running k-means with k=256 generates a code book of 256 +codes, which fills up all possible 8-bit sequences. Instead of +sending a 3-byte value for each pixel, the 8-bit centroid index +(or code word) of the dominating centroid is transmitted. The code +book is also sent over the wire so each 8-bit code can be +translated back to a 24-bit pixel value representation. If the +image of interest was of an ocean, we would expect many 24-bit +blues to be represented by 8-bit codes. If it was an image of a +human face, more flesh tone colors would be represented in the +code book. + +""" +from __future__ import division, print_function, absolute_import + +import warnings +import numpy as np +from collections import deque +from scipy._lib._util import _asarray_validated +from scipy._lib.six import xrange +from scipy.spatial.distance import cdist + +from . import _vq + +__docformat__ = 'restructuredtext' + +__all__ = ['whiten', 'vq', 'kmeans', 'kmeans2'] + + +class ClusterError(Exception): + pass + + +def whiten(obs, check_finite=True): + """ + Normalize a group of observations on a per feature basis. + + Before running k-means, it is beneficial to rescale each feature + dimension of the observation set with whitening. Each feature is + divided by its standard deviation across all observations to give + it unit variance. + + Parameters + ---------- + obs : ndarray + Each row of the array is an observation. The + columns are the features seen during each observation. + + >>> # f0 f1 f2 + >>> obs = [[ 1., 1., 1.], #o0 + ... [ 2., 2., 2.], #o1 + ... [ 3., 3., 3.], #o2 + ... [ 4., 4., 4.]] #o3 + + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + Default: True + + Returns + ------- + result : ndarray + Contains the values in `obs` scaled by the standard deviation + of each column. + + Examples + -------- + >>> from scipy.cluster.vq import whiten + >>> features = np.array([[1.9, 2.3, 1.7], + ... [1.5, 2.5, 2.2], + ... [0.8, 0.6, 1.7,]]) + >>> whiten(features) + array([[ 4.17944278, 2.69811351, 7.21248917], + [ 3.29956009, 2.93273208, 9.33380951], + [ 1.75976538, 0.7038557 , 7.21248917]]) + + """ + obs = _asarray_validated(obs, check_finite=check_finite) + std_dev = obs.std(axis=0) + zero_std_mask = std_dev == 0 + if zero_std_mask.any(): + std_dev[zero_std_mask] = 1.0 + warnings.warn("Some columns have standard deviation zero. " + "The values of these columns will not change.", + RuntimeWarning) + return obs / std_dev + + +def vq(obs, code_book, check_finite=True): + """ + Assign codes from a code book to observations. + + Assigns a code from a code book to each observation. Each + observation vector in the 'M' by 'N' `obs` array is compared with the + centroids in the code book and assigned the code of the closest + centroid. + + The features in `obs` should have unit variance, which can be + achieved by passing them through the whiten function. The code + book can be created with the k-means algorithm or a different + encoding algorithm. + + Parameters + ---------- + obs : ndarray + Each row of the 'M' x 'N' array is an observation. The columns are + the "features" seen during each observation. The features must be + whitened first using the whiten function or something equivalent. + code_book : ndarray + The code book is usually generated using the k-means algorithm. + Each row of the array holds a different code, and the columns are + the features of the code. + + >>> # f0 f1 f2 f3 + >>> code_book = [ + ... [ 1., 2., 3., 4.], #c0 + ... [ 1., 2., 3., 4.], #c1 + ... [ 1., 2., 3., 4.]] #c2 + + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + Default: True + + Returns + ------- + code : ndarray + A length M array holding the code book index for each observation. + dist : ndarray + The distortion (distance) between the observation and its nearest + code. + + Examples + -------- + >>> from numpy import array + >>> from scipy.cluster.vq import vq + >>> code_book = array([[1.,1.,1.], + ... [2.,2.,2.]]) + >>> features = array([[ 1.9,2.3,1.7], + ... [ 1.5,2.5,2.2], + ... [ 0.8,0.6,1.7]]) + >>> vq(features,code_book) + (array([1, 1, 0],'i'), array([ 0.43588989, 0.73484692, 0.83066239])) + + """ + obs = _asarray_validated(obs, check_finite=check_finite) + code_book = _asarray_validated(code_book, check_finite=check_finite) + ct = np.common_type(obs, code_book) + + c_obs = obs.astype(ct, copy=False) + c_code_book = code_book.astype(ct, copy=False) + + if np.issubdtype(ct, np.float64) or np.issubdtype(ct, np.float32): + return _vq.vq(c_obs, c_code_book) + return py_vq(obs, code_book, check_finite=False) + + +def py_vq(obs, code_book, check_finite=True): + """ Python version of vq algorithm. + + The algorithm computes the euclidian distance between each + observation and every frame in the code_book. + + Parameters + ---------- + obs : ndarray + Expects a rank 2 array. Each row is one observation. + code_book : ndarray + Code book to use. Same format than obs. Should have same number of + features (eg columns) than obs. + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + Default: True + + Returns + ------- + code : ndarray + code[i] gives the label of the ith obversation, that its code is + code_book[code[i]]. + mind_dist : ndarray + min_dist[i] gives the distance between the ith observation and its + corresponding code. + + Notes + ----- + This function is slower than the C version but works for + all input types. If the inputs have the wrong types for the + C versions of the function, this one is called as a last resort. + + It is about 20 times slower than the C version. + + """ + obs = _asarray_validated(obs, check_finite=check_finite) + code_book = _asarray_validated(code_book, check_finite=check_finite) + + if obs.ndim != code_book.ndim: + raise ValueError("Observation and code_book should have the same rank") + + if obs.ndim == 1: + obs = obs[:, np.newaxis] + code_book = code_book[:, np.newaxis] + + dist = cdist(obs, code_book) + code = dist.argmin(axis=1) + min_dist = dist[np.arange(len(code)), code] + return code, min_dist + + +# py_vq2 was equivalent to py_vq +py_vq2 = np.deprecate(py_vq, old_name='py_vq2', new_name='py_vq') + + +def _kmeans(obs, guess, thresh=1e-5): + """ "raw" version of k-means. + + Returns + ------- + code_book + the lowest distortion codebook found. + avg_dist + the average distance a observation is from a code in the book. + Lower means the code_book matches the data better. + + See Also + -------- + kmeans : wrapper around k-means + + Examples + -------- + Note: not whitened in this example. + + >>> from numpy import array + >>> from scipy.cluster.vq import _kmeans + >>> features = array([[ 1.9,2.3], + ... [ 1.5,2.5], + ... [ 0.8,0.6], + ... [ 0.4,1.8], + ... [ 1.0,1.0]]) + >>> book = array((features[0],features[2])) + >>> _kmeans(features,book) + (array([[ 1.7 , 2.4 ], + [ 0.73333333, 1.13333333]]), 0.40563916697728591) + + """ + + code_book = np.asarray(guess) + diff = np.inf + prev_avg_dists = deque([diff], maxlen=2) + while diff > thresh: + # compute membership and distances between obs and code_book + obs_code, distort = vq(obs, code_book, check_finite=False) + prev_avg_dists.append(distort.mean(axis=-1)) + # recalc code_book as centroids of associated obs + code_book, has_members = _vq.update_cluster_means(obs, obs_code, + code_book.shape[0]) + code_book = code_book[has_members] + diff = prev_avg_dists[0] - prev_avg_dists[1] + + return code_book, prev_avg_dists[1] + + +def kmeans(obs, k_or_guess, iter=20, thresh=1e-5, check_finite=True): + """ + Performs k-means on a set of observation vectors forming k clusters. + + The k-means algorithm adjusts the classification of the observations + into clusters and updates the cluster centroids until the position of + the centroids is stable over successive iterations. In this + implementation of the algorithm, the stability of the centroids is + determined by comparing the absolute value of the change in the average + Euclidean distance between the observations and their corresponding + centroids against a threshold. This yields + a code book mapping centroids to codes and vice versa. + + Parameters + ---------- + obs : ndarray + Each row of the M by N array is an observation vector. The + columns are the features seen during each observation. + The features must be whitened first with the `whiten` function. + + k_or_guess : int or ndarray + The number of centroids to generate. A code is assigned to + each centroid, which is also the row index of the centroid + in the code_book matrix generated. + + The initial k centroids are chosen by randomly selecting + observations from the observation matrix. Alternatively, + passing a k by N array specifies the initial k centroids. + + iter : int, optional + The number of times to run k-means, returning the codebook + with the lowest distortion. This argument is ignored if + initial centroids are specified with an array for the + ``k_or_guess`` parameter. This parameter does not represent the + number of iterations of the k-means algorithm. + + thresh : float, optional + Terminates the k-means algorithm if the change in + distortion since the last k-means iteration is less than + or equal to thresh. + + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + Default: True + + Returns + ------- + codebook : ndarray + A k by N array of k centroids. The i'th centroid + codebook[i] is represented with the code i. The centroids + and codes generated represent the lowest distortion seen, + not necessarily the globally minimal distortion. + + distortion : float + The mean (non-squared) Euclidean distance between the observations + passed and the centroids generated. Note the difference to the standard + definition of distortion in the context of the K-means algorithm, which + is the sum of the squared distances. + + See Also + -------- + kmeans2 : a different implementation of k-means clustering + with more methods for generating initial centroids but without + using a distortion change threshold as a stopping criterion. + + whiten : must be called prior to passing an observation matrix + to kmeans. + + Examples + -------- + >>> from numpy import array + >>> from scipy.cluster.vq import vq, kmeans, whiten + >>> import matplotlib.pyplot as plt + >>> features = array([[ 1.9,2.3], + ... [ 1.5,2.5], + ... [ 0.8,0.6], + ... [ 0.4,1.8], + ... [ 0.1,0.1], + ... [ 0.2,1.8], + ... [ 2.0,0.5], + ... [ 0.3,1.5], + ... [ 1.0,1.0]]) + >>> whitened = whiten(features) + >>> book = np.array((whitened[0],whitened[2])) + >>> kmeans(whitened,book) + (array([[ 2.3110306 , 2.86287398], # random + [ 0.93218041, 1.24398691]]), 0.85684700941625547) + + >>> from numpy import random + >>> random.seed((1000,2000)) + >>> codes = 3 + >>> kmeans(whitened,codes) + (array([[ 2.3110306 , 2.86287398], # random + [ 1.32544402, 0.65607529], + [ 0.40782893, 2.02786907]]), 0.5196582527686241) + + >>> # Create 50 datapoints in two clusters a and b + >>> pts = 50 + >>> a = np.random.multivariate_normal([0, 0], [[4, 1], [1, 4]], size=pts) + >>> b = np.random.multivariate_normal([30, 10], + ... [[10, 2], [2, 1]], + ... size=pts) + >>> features = np.concatenate((a, b)) + >>> # Whiten data + >>> whitened = whiten(features) + >>> # Find 2 clusters in the data + >>> codebook, distortion = kmeans(whitened, 2) + >>> # Plot whitened data and cluster centers in red + >>> plt.scatter(whitened[:, 0], whitened[:, 1]) + >>> plt.scatter(codebook[:, 0], codebook[:, 1], c='r') + >>> plt.show() + """ + obs = _asarray_validated(obs, check_finite=check_finite) + if iter < 1: + raise ValueError("iter must be at least 1, got %s" % iter) + + # Determine whether a count (scalar) or an initial guess (array) was passed. + if not np.isscalar(k_or_guess): + guess = _asarray_validated(k_or_guess, check_finite=check_finite) + if guess.size < 1: + raise ValueError("Asked for 0 clusters. Initial book was %s" % + guess) + return _kmeans(obs, guess, thresh=thresh) + + # k_or_guess is a scalar, now verify that it's an integer + k = int(k_or_guess) + if k != k_or_guess: + raise ValueError("If k_or_guess is a scalar, it must be an integer.") + if k < 1: + raise ValueError("Asked for %d clusters." % k) + + # initialize best distance value to a large value + best_dist = np.inf + for i in xrange(iter): + # the initial code book is randomly selected from observations + guess = _kpoints(obs, k) + book, dist = _kmeans(obs, guess, thresh=thresh) + if dist < best_dist: + best_book = book + best_dist = dist + return best_book, best_dist + + +def _kpoints(data, k): + """Pick k points at random in data (one row = one observation). + + Parameters + ---------- + data : ndarray + Expect a rank 1 or 2 array. Rank 1 are assumed to describe one + dimensional data, rank 2 multidimensional data, in which case one + row is one observation. + k : int + Number of samples to generate. + + Returns + ------- + x : ndarray + A 'k' by 'N' containing the initial centroids + + """ + idx = np.random.choice(data.shape[0], size=k, replace=False) + return data[idx] + + +def _krandinit(data, k): + """Returns k samples of a random variable which parameters depend on data. + + More precisely, it returns k observations sampled from a Gaussian random + variable which mean and covariances are the one estimated from data. + + Parameters + ---------- + data : ndarray + Expect a rank 1 or 2 array. Rank 1 are assumed to describe one + dimensional data, rank 2 multidimensional data, in which case one + row is one observation. + k : int + Number of samples to generate. + + Returns + ------- + x : ndarray + A 'k' by 'N' containing the initial centroids + + """ + mu = data.mean(axis=0) + + if data.ndim == 1: + cov = np.cov(data) + x = np.random.randn(k) + x *= np.sqrt(cov) + elif data.shape[1] > data.shape[0]: + # initialize when the covariance matrix is rank deficient + _, s, vh = np.linalg.svd(data - mu, full_matrices=False) + x = np.random.randn(k, s.size) + sVh = s[:, None] * vh / np.sqrt(data.shape[0] - 1) + x = x.dot(sVh) + else: + cov = np.atleast_2d(np.cov(data, rowvar=False)) + + # k rows, d cols (one row = one obs) + # Generate k sample of a random variable ~ Gaussian(mu, cov) + x = np.random.randn(k, mu.size) + x = x.dot(np.linalg.cholesky(cov).T) + + x += mu + return x + + +def _kpp(data, k): + """ Picks k points in data based on the kmeans++ method + + Parameters + ---------- + data : ndarray + Expect a rank 1 or 2 array. Rank 1 are assumed to describe one + dimensional data, rank 2 multidimensional data, in which case one + row is one observation. + k : int + Number of samples to generate. + + Returns + ------- + init : ndarray + A 'k' by 'N' containing the initial centroids + + References + ---------- + .. [1] D. Arthur and S. Vassilvitskii, "k-means++: the advantages of + careful seeding", Proceedings of the Eighteenth Annual ACM-SIAM Symposium + on Discrete Algorithms, 2007. + """ + + dims = data.shape[1] if len(data.shape) > 1 else 1 + init = np.ndarray((k, dims)) + + for i in range(k): + if i == 0: + init[i, :] = data[np.random.randint(dims)] + + else: + D2 = np.array([min( + [np.inner(init[j]-x, init[j]-x) for j in range(i)] + ) for x in data]) + probs = D2/D2.sum() + cumprobs = probs.cumsum() + r = np.random.rand() + init[i, :] = data[np.searchsorted(cumprobs, r)] + + return init + + +_valid_init_meth = {'random': _krandinit, 'points': _kpoints, '++': _kpp} + + +def _missing_warn(): + """Print a warning when called.""" + warnings.warn("One of the clusters is empty. " + "Re-run kmeans with a different initialization.") + + +def _missing_raise(): + """raise a ClusterError when called.""" + raise ClusterError("One of the clusters is empty. " + "Re-run kmeans with a different initialization.") + + +_valid_miss_meth = {'warn': _missing_warn, 'raise': _missing_raise} + + +def kmeans2(data, k, iter=10, thresh=1e-5, minit='random', + missing='warn', check_finite=True): + """ + Classify a set of observations into k clusters using the k-means algorithm. + + The algorithm attempts to minimize the Euclidian distance between + observations and centroids. Several initialization methods are + included. + + Parameters + ---------- + data : ndarray + A 'M' by 'N' array of 'M' observations in 'N' dimensions or a length + 'M' array of 'M' one-dimensional observations. + k : int or ndarray + The number of clusters to form as well as the number of + centroids to generate. If `minit` initialization string is + 'matrix', or if a ndarray is given instead, it is + interpreted as initial cluster to use instead. + iter : int, optional + Number of iterations of the k-means algorithm to run. Note + that this differs in meaning from the iters parameter to + the kmeans function. + thresh : float, optional + (not used yet) + minit : str, optional + Method for initialization. Available methods are 'random', + 'points', '++' and 'matrix': + + 'random': generate k centroids from a Gaussian with mean and + variance estimated from the data. + + 'points': choose k observations (rows) at random from data for + the initial centroids. + + '++': choose k observations accordingly to the kmeans++ method + (careful seeding) + + 'matrix': interpret the k parameter as a k by M (or length k + array for one-dimensional data) array of initial centroids. + missing : str, optional + Method to deal with empty clusters. Available methods are + 'warn' and 'raise': + + 'warn': give a warning and continue. + + 'raise': raise an ClusterError and terminate the algorithm. + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + Default: True + + Returns + ------- + centroid : ndarray + A 'k' by 'N' array of centroids found at the last iteration of + k-means. + label : ndarray + label[i] is the code or index of the centroid the + i'th observation is closest to. + + References + ---------- + .. [1] D. Arthur and S. Vassilvitskii, "k-means++: the advantages of + careful seeding", Proceedings of the Eighteenth Annual ACM-SIAM Symposium + on Discrete Algorithms, 2007. + """ + if int(iter) < 1: + raise ValueError("Invalid iter (%s), " + "must be a positive integer." % iter) + try: + miss_meth = _valid_miss_meth[missing] + except KeyError: + raise ValueError("Unknown missing method %r" % (missing,)) + + data = _asarray_validated(data, check_finite=check_finite) + if data.ndim == 1: + d = 1 + elif data.ndim == 2: + d = data.shape[1] + else: + raise ValueError("Input of rank > 2 is not supported.") + + if data.size < 1: + raise ValueError("Empty input is not supported.") + + # If k is not a single value it should be compatible with data's shape + if minit == 'matrix' or not np.isscalar(k): + code_book = np.array(k, copy=True) + if data.ndim != code_book.ndim: + raise ValueError("k array doesn't match data rank") + nc = len(code_book) + if data.ndim > 1 and code_book.shape[1] != d: + raise ValueError("k array doesn't match data dimension") + else: + nc = int(k) + + if nc < 1: + raise ValueError("Cannot ask kmeans2 for %d clusters" + " (k was %s)" % (nc, k)) + elif nc != k: + warnings.warn("k was not an integer, was converted.") + + try: + init_meth = _valid_init_meth[minit] + except KeyError: + raise ValueError("Unknown init method %r" % (minit,)) + else: + code_book = init_meth(data, k) + + for i in xrange(iter): + # Compute the nearest neighbor for each obs using the current code book + label = vq(data, code_book)[0] + # Update the code book by computing centroids + new_code_book, has_members = _vq.update_cluster_means(data, label, nc) + if not has_members.all(): + miss_meth() + # Set the empty clusters to their previous positions + new_code_book[~has_members] = code_book[~has_members] + code_book = new_code_book + + return code_book, label diff --git a/project/venv/lib/python2.7/site-packages/scipy/cluster/vq.pyc b/project/venv/lib/python2.7/site-packages/scipy/cluster/vq.pyc new file mode 100644 index 0000000..945b909 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/cluster/vq.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/conftest.py b/project/venv/lib/python2.7/site-packages/scipy/conftest.py new file mode 100644 index 0000000..e5fac23 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/conftest.py @@ -0,0 +1,39 @@ +# Pytest customization +from __future__ import division, absolute_import, print_function + +import os +import pytest +import warnings + +from distutils.version import LooseVersion +from scipy._lib._fpumode import get_fpu_mode +from scipy._lib._testutils import FPUModeChangeWarning + + +def pytest_runtest_setup(item): + if LooseVersion(pytest.__version__) >= LooseVersion("3.6.0"): + mark = item.get_closest_marker("xslow") + else: + mark = item.get_marker("xslow") + if mark is not None: + try: + v = int(os.environ.get('SCIPY_XSLOW', '0')) + except ValueError: + v = False + if not v: + pytest.skip("very slow test; set environment variable SCIPY_XSLOW=1 to run it") + + +@pytest.fixture(scope="function", autouse=True) +def check_fpu_mode(request): + """ + Check FPU mode was not changed during the test. + """ + old_mode = get_fpu_mode() + yield + new_mode = get_fpu_mode() + + if old_mode != new_mode: + warnings.warn("FPU mode changed from {0:#x} to {1:#x} during " + "the test".format(old_mode, new_mode), + category=FPUModeChangeWarning, stacklevel=0) diff --git a/project/venv/lib/python2.7/site-packages/scipy/conftest.pyc b/project/venv/lib/python2.7/site-packages/scipy/conftest.pyc new file mode 100644 index 0000000..3f50d3e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/conftest.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/constants/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/constants/__init__.py new file mode 100644 index 0000000..b1010cb --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/constants/__init__.py @@ -0,0 +1,340 @@ +r""" +================================== +Constants (:mod:`scipy.constants`) +================================== + +.. currentmodule:: scipy.constants + +Physical and mathematical constants and units. + + +Mathematical constants +====================== + +================ ================================================================= +``pi`` Pi +``golden`` Golden ratio +``golden_ratio`` Golden ratio +================ ================================================================= + + +Physical constants +================== + +=========================== ================================================================= +``c`` speed of light in vacuum +``speed_of_light`` speed of light in vacuum +``mu_0`` the magnetic constant :math:`\mu_0` +``epsilon_0`` the electric constant (vacuum permittivity), :math:`\epsilon_0` +``h`` the Planck constant :math:`h` +``Planck`` the Planck constant :math:`h` +``hbar`` :math:`\hbar = h/(2\pi)` +``G`` Newtonian constant of gravitation +``gravitational_constant`` Newtonian constant of gravitation +``g`` standard acceleration of gravity +``e`` elementary charge +``elementary_charge`` elementary charge +``R`` molar gas constant +``gas_constant`` molar gas constant +``alpha`` fine-structure constant +``fine_structure`` fine-structure constant +``N_A`` Avogadro constant +``Avogadro`` Avogadro constant +``k`` Boltzmann constant +``Boltzmann`` Boltzmann constant +``sigma`` Stefan-Boltzmann constant :math:`\sigma` +``Stefan_Boltzmann`` Stefan-Boltzmann constant :math:`\sigma` +``Wien`` Wien displacement law constant +``Rydberg`` Rydberg constant +``m_e`` electron mass +``electron_mass`` electron mass +``m_p`` proton mass +``proton_mass`` proton mass +``m_n`` neutron mass +``neutron_mass`` neutron mass +=========================== ================================================================= + + +Constants database +------------------ + +In addition to the above variables, :mod:`scipy.constants` also contains the +2014 CODATA recommended values [CODATA2014]_ database containing more physical +constants. + +.. autosummary:: + :toctree: generated/ + + value -- Value in physical_constants indexed by key + unit -- Unit in physical_constants indexed by key + precision -- Relative precision in physical_constants indexed by key + find -- Return list of physical_constant keys with a given string + ConstantWarning -- Constant sought not in newest CODATA data set + +.. data:: physical_constants + + Dictionary of physical constants, of the format + ``physical_constants[name] = (value, unit, uncertainty)``. + +Available constants: + +====================================================================== ==== +%(constant_names)s +====================================================================== ==== + + +Units +===== + +SI prefixes +----------- + +============ ================================================================= +``yotta`` :math:`10^{24}` +``zetta`` :math:`10^{21}` +``exa`` :math:`10^{18}` +``peta`` :math:`10^{15}` +``tera`` :math:`10^{12}` +``giga`` :math:`10^{9}` +``mega`` :math:`10^{6}` +``kilo`` :math:`10^{3}` +``hecto`` :math:`10^{2}` +``deka`` :math:`10^{1}` +``deci`` :math:`10^{-1}` +``centi`` :math:`10^{-2}` +``milli`` :math:`10^{-3}` +``micro`` :math:`10^{-6}` +``nano`` :math:`10^{-9}` +``pico`` :math:`10^{-12}` +``femto`` :math:`10^{-15}` +``atto`` :math:`10^{-18}` +``zepto`` :math:`10^{-21}` +============ ================================================================= + +Binary prefixes +--------------- + +============ ================================================================= +``kibi`` :math:`2^{10}` +``mebi`` :math:`2^{20}` +``gibi`` :math:`2^{30}` +``tebi`` :math:`2^{40}` +``pebi`` :math:`2^{50}` +``exbi`` :math:`2^{60}` +``zebi`` :math:`2^{70}` +``yobi`` :math:`2^{80}` +============ ================================================================= + +Mass +---- + +================= ============================================================ +``gram`` :math:`10^{-3}` kg +``metric_ton`` :math:`10^{3}` kg +``grain`` one grain in kg +``lb`` one pound (avoirdupous) in kg +``pound`` one pound (avoirdupous) in kg +``blob`` one inch version of a slug in kg (added in 1.0.0) +``slinch`` one inch version of a slug in kg (added in 1.0.0) +``slug`` one slug in kg (added in 1.0.0) +``oz`` one ounce in kg +``ounce`` one ounce in kg +``stone`` one stone in kg +``grain`` one grain in kg +``long_ton`` one long ton in kg +``short_ton`` one short ton in kg +``troy_ounce`` one Troy ounce in kg +``troy_pound`` one Troy pound in kg +``carat`` one carat in kg +``m_u`` atomic mass constant (in kg) +``u`` atomic mass constant (in kg) +``atomic_mass`` atomic mass constant (in kg) +================= ============================================================ + +Angle +----- + +================= ============================================================ +``degree`` degree in radians +``arcmin`` arc minute in radians +``arcminute`` arc minute in radians +``arcsec`` arc second in radians +``arcsecond`` arc second in radians +================= ============================================================ + + +Time +---- + +================= ============================================================ +``minute`` one minute in seconds +``hour`` one hour in seconds +``day`` one day in seconds +``week`` one week in seconds +``year`` one year (365 days) in seconds +``Julian_year`` one Julian year (365.25 days) in seconds +================= ============================================================ + + +Length +------ + +===================== ============================================================ +``inch`` one inch in meters +``foot`` one foot in meters +``yard`` one yard in meters +``mile`` one mile in meters +``mil`` one mil in meters +``pt`` one point in meters +``point`` one point in meters +``survey_foot`` one survey foot in meters +``survey_mile`` one survey mile in meters +``nautical_mile`` one nautical mile in meters +``fermi`` one Fermi in meters +``angstrom`` one Angstrom in meters +``micron`` one micron in meters +``au`` one astronomical unit in meters +``astronomical_unit`` one astronomical unit in meters +``light_year`` one light year in meters +``parsec`` one parsec in meters +===================== ============================================================ + +Pressure +-------- + +================= ============================================================ +``atm`` standard atmosphere in pascals +``atmosphere`` standard atmosphere in pascals +``bar`` one bar in pascals +``torr`` one torr (mmHg) in pascals +``mmHg`` one torr (mmHg) in pascals +``psi`` one psi in pascals +================= ============================================================ + +Area +---- + +================= ============================================================ +``hectare`` one hectare in square meters +``acre`` one acre in square meters +================= ============================================================ + + +Volume +------ + +=================== ======================================================== +``liter`` one liter in cubic meters +``litre`` one liter in cubic meters +``gallon`` one gallon (US) in cubic meters +``gallon_US`` one gallon (US) in cubic meters +``gallon_imp`` one gallon (UK) in cubic meters +``fluid_ounce`` one fluid ounce (US) in cubic meters +``fluid_ounce_US`` one fluid ounce (US) in cubic meters +``fluid_ounce_imp`` one fluid ounce (UK) in cubic meters +``bbl`` one barrel in cubic meters +``barrel`` one barrel in cubic meters +=================== ======================================================== + +Speed +----- + +================== ========================================================== +``kmh`` kilometers per hour in meters per second +``mph`` miles per hour in meters per second +``mach`` one Mach (approx., at 15 C, 1 atm) in meters per second +``speed_of_sound`` one Mach (approx., at 15 C, 1 atm) in meters per second +``knot`` one knot in meters per second +================== ========================================================== + + +Temperature +----------- + +===================== ======================================================= +``zero_Celsius`` zero of Celsius scale in Kelvin +``degree_Fahrenheit`` one Fahrenheit (only differences) in Kelvins +===================== ======================================================= + +.. autosummary:: + :toctree: generated/ + + convert_temperature + +Energy +------ + +==================== ======================================================= +``eV`` one electron volt in Joules +``electron_volt`` one electron volt in Joules +``calorie`` one calorie (thermochemical) in Joules +``calorie_th`` one calorie (thermochemical) in Joules +``calorie_IT`` one calorie (International Steam Table calorie, 1956) in Joules +``erg`` one erg in Joules +``Btu`` one British thermal unit (International Steam Table) in Joules +``Btu_IT`` one British thermal unit (International Steam Table) in Joules +``Btu_th`` one British thermal unit (thermochemical) in Joules +``ton_TNT`` one ton of TNT in Joules +==================== ======================================================= + +Power +----- + +==================== ======================================================= +``hp`` one horsepower in watts +``horsepower`` one horsepower in watts +==================== ======================================================= + +Force +----- + +==================== ======================================================= +``dyn`` one dyne in newtons +``dyne`` one dyne in newtons +``lbf`` one pound force in newtons +``pound_force`` one pound force in newtons +``kgf`` one kilogram force in newtons +``kilogram_force`` one kilogram force in newtons +==================== ======================================================= + +Optics +------ + +.. autosummary:: + :toctree: generated/ + + lambda2nu + nu2lambda + +References +========== + +.. [CODATA2014] CODATA Recommended Values of the Fundamental + Physical Constants 2014. + + https://physics.nist.gov/cuu/Constants/ + +""" +from __future__ import division, print_function, absolute_import + +# Modules contributed by BasSw (wegwerp@gmail.com) +from .codata import * +from .constants import * +from .codata import _obsolete_constants + +_constant_names = [(_k.lower(), _k, _v) + for _k, _v in physical_constants.items() + if _k not in _obsolete_constants] +_constant_names = "\n".join(["``%s``%s %s %s" % (_x[1], " "*(66-len(_x[1])), + _x[2][0], _x[2][1]) + for _x in sorted(_constant_names)]) +if __doc__ is not None: + __doc__ = __doc__ % dict(constant_names=_constant_names) + +del _constant_names + +__all__ = [s for s in dir() if not s.startswith('_')] + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/project/venv/lib/python2.7/site-packages/scipy/constants/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/constants/__init__.pyc new file mode 100644 index 0000000..29c3cbf Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/constants/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/constants/codata.py b/project/venv/lib/python2.7/site-packages/scipy/constants/codata.py new file mode 100644 index 0000000..d34c686 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/constants/codata.py @@ -0,0 +1,1385 @@ +# Compiled by Charles Harris, dated October 3, 2002 +# updated to 2002 values by BasSw, 2006 +# Updated to 2006 values by Vincent Davis June 2010 +# Updated to 2014 values by Joseph Booker, 2015 + +""" +Fundamental Physical Constants +------------------------------ + +These constants are taken from CODATA Recommended Values of the Fundamental +Physical Constants 2014. + +Object +------ +physical_constants : dict + A dictionary containing physical constants. Keys are the names of physical + constants, values are tuples (value, units, precision). + +Functions +--------- +value(key): + Returns the value of the physical constant(key). +unit(key): + Returns the units of the physical constant(key). +precision(key): + Returns the relative precision of the physical constant(key). +find(sub): + Prints or returns list of keys containing the string sub, default is all. + +Source +------ +The values of the constants provided at this site are recommended for +international use by CODATA and are the latest available. Termed the "2014 +CODATA recommended values," they are generally recognized worldwide for use in +all fields of science and technology. The values became available on 25 June +2015 and replaced the 2010 CODATA set. They are based on all of the data +available through 31 December 2014. The 2014 adjustment was carried out under +the auspices of the CODATA Task Group on Fundamental Constants. Also available +is an introduction to the constants for non-experts at +https://physics.nist.gov/cuu/Constants/introduction.html + +References +---------- +Theoretical and experimental publications relevant to the fundamental constants +and closely related precision measurements published since the mid 1980s, but +also including many older papers of particular interest, some of which date +back to the 1800s. To search bibliography visit + +https://physics.nist.gov/cuu/Constants/ + +""" +from __future__ import division, print_function, absolute_import + +import warnings +from math import pi, sqrt + +__all__ = ['physical_constants', 'value', 'unit', 'precision', 'find', + 'ConstantWarning'] + +""" +Source: https://physics.nist.gov/cuu/Constants/ + +The values of the constants provided at the above site are recommended for +international use by CODATA and are the latest available. Termed the "2006 +CODATA recommended values", they are generally recognized worldwide for use +in all fields of science and technology. The values became available in March +2007 and replaced the 2002 CODATA set. They are based on all of the data +available through 31 December 2006. The 2006 adjustment was carried out under +the auspices of the CODATA Task Group on Fundamental Constants. +""" + +# +# Source: https://physics.nist.gov/cuu/Constants/ +# + +# Quantity Value Uncertainty Unit +# ---------------------------------------------------- --------------------- -------------------- ------------- +txt2002 = """\ +Wien displacement law constant 2.897 7685e-3 0.000 0051e-3 m K +atomic unit of 1st hyperpolarizablity 3.206 361 51e-53 0.000 000 28e-53 C^3 m^3 J^-2 +atomic unit of 2nd hyperpolarizablity 6.235 3808e-65 0.000 0011e-65 C^4 m^4 J^-3 +atomic unit of electric dipole moment 8.478 353 09e-30 0.000 000 73e-30 C m +atomic unit of electric polarizablity 1.648 777 274e-41 0.000 000 016e-41 C^2 m^2 J^-1 +atomic unit of electric quadrupole moment 4.486 551 24e-40 0.000 000 39e-40 C m^2 +atomic unit of magn. dipole moment 1.854 801 90e-23 0.000 000 16e-23 J T^-1 +atomic unit of magn. flux density 2.350 517 42e5 0.000 000 20e5 T +deuteron magn. moment 0.433 073 482e-26 0.000 000 038e-26 J T^-1 +deuteron magn. moment to Bohr magneton ratio 0.466 975 4567e-3 0.000 000 0050e-3 +deuteron magn. moment to nuclear magneton ratio 0.857 438 2329 0.000 000 0092 +deuteron-electron magn. moment ratio -4.664 345 548e-4 0.000 000 050e-4 +deuteron-proton magn. moment ratio 0.307 012 2084 0.000 000 0045 +deuteron-neutron magn. moment ratio -0.448 206 52 0.000 000 11 +electron gyromagn. ratio 1.760 859 74e11 0.000 000 15e11 s^-1 T^-1 +electron gyromagn. ratio over 2 pi 28 024.9532 0.0024 MHz T^-1 +electron magn. moment -928.476 412e-26 0.000 080e-26 J T^-1 +electron magn. moment to Bohr magneton ratio -1.001 159 652 1859 0.000 000 000 0038 +electron magn. moment to nuclear magneton ratio -1838.281 971 07 0.000 000 85 +electron magn. moment anomaly 1.159 652 1859e-3 0.000 000 0038e-3 +electron to shielded proton magn. moment ratio -658.227 5956 0.000 0071 +electron to shielded helion magn. moment ratio 864.058 255 0.000 010 +electron-deuteron magn. moment ratio -2143.923 493 0.000 023 +electron-muon magn. moment ratio 206.766 9894 0.000 0054 +electron-neutron magn. moment ratio 960.920 50 0.000 23 +electron-proton magn. moment ratio -658.210 6862 0.000 0066 +magn. constant 12.566 370 614...e-7 0 N A^-2 +magn. flux quantum 2.067 833 72e-15 0.000 000 18e-15 Wb +muon magn. moment -4.490 447 99e-26 0.000 000 40e-26 J T^-1 +muon magn. moment to Bohr magneton ratio -4.841 970 45e-3 0.000 000 13e-3 +muon magn. moment to nuclear magneton ratio -8.890 596 98 0.000 000 23 +muon-proton magn. moment ratio -3.183 345 118 0.000 000 089 +neutron gyromagn. ratio 1.832 471 83e8 0.000 000 46e8 s^-1 T^-1 +neutron gyromagn. ratio over 2 pi 29.164 6950 0.000 0073 MHz T^-1 +neutron magn. moment -0.966 236 45e-26 0.000 000 24e-26 J T^-1 +neutron magn. moment to Bohr magneton ratio -1.041 875 63e-3 0.000 000 25e-3 +neutron magn. moment to nuclear magneton ratio -1.913 042 73 0.000 000 45 +neutron to shielded proton magn. moment ratio -0.684 996 94 0.000 000 16 +neutron-electron magn. moment ratio 1.040 668 82e-3 0.000 000 25e-3 +neutron-proton magn. moment ratio -0.684 979 34 0.000 000 16 +proton gyromagn. ratio 2.675 222 05e8 0.000 000 23e8 s^-1 T^-1 +proton gyromagn. ratio over 2 pi 42.577 4813 0.000 0037 MHz T^-1 +proton magn. moment 1.410 606 71e-26 0.000 000 12e-26 J T^-1 +proton magn. moment to Bohr magneton ratio 1.521 032 206e-3 0.000 000 015e-3 +proton magn. moment to nuclear magneton ratio 2.792 847 351 0.000 000 028 +proton magn. shielding correction 25.689e-6 0.015e-6 +proton-neutron magn. moment ratio -1.459 898 05 0.000 000 34 +shielded helion gyromagn. ratio 2.037 894 70e8 0.000 000 18e8 s^-1 T^-1 +shielded helion gyromagn. ratio over 2 pi 32.434 1015 0.000 0028 MHz T^-1 +shielded helion magn. moment -1.074 553 024e-26 0.000 000 093e-26 J T^-1 +shielded helion magn. moment to Bohr magneton ratio -1.158 671 474e-3 0.000 000 014e-3 +shielded helion magn. moment to nuclear magneton ratio -2.127 497 723 0.000 000 025 +shielded helion to proton magn. moment ratio -0.761 766 562 0.000 000 012 +shielded helion to shielded proton magn. moment ratio -0.761 786 1313 0.000 000 0033 +shielded helion gyromagn. ratio 2.037 894 70e8 0.000 000 18e8 s^-1 T^-1 +shielded helion gyromagn. ratio over 2 pi 32.434 1015 0.000 0028 MHz T^-1 +shielded proton magn. moment 1.410 570 47e-26 0.000 000 12e-26 J T^-1 +shielded proton magn. moment to Bohr magneton ratio 1.520 993 132e-3 0.000 000 016e-3 +shielded proton magn. moment to nuclear magneton ratio 2.792 775 604 0.000 000 030 +{220} lattice spacing of silicon 192.015 5965e-12 0.000 0070e-12 m""" + +txt2006 = """\ +lattice spacing of silicon 192.015 5762 e-12 0.000 0050 e-12 m +alpha particle-electron mass ratio 7294.299 5365 0.000 0031 +alpha particle mass 6.644 656 20 e-27 0.000 000 33 e-27 kg +alpha particle mass energy equivalent 5.971 919 17 e-10 0.000 000 30 e-10 J +alpha particle mass energy equivalent in MeV 3727.379 109 0.000 093 MeV +alpha particle mass in u 4.001 506 179 127 0.000 000 000 062 u +alpha particle molar mass 4.001 506 179 127 e-3 0.000 000 000 062 e-3 kg mol^-1 +alpha particle-proton mass ratio 3.972 599 689 51 0.000 000 000 41 +Angstrom star 1.000 014 98 e-10 0.000 000 90 e-10 m +atomic mass constant 1.660 538 782 e-27 0.000 000 083 e-27 kg +atomic mass constant energy equivalent 1.492 417 830 e-10 0.000 000 074 e-10 J +atomic mass constant energy equivalent in MeV 931.494 028 0.000 023 MeV +atomic mass unit-electron volt relationship 931.494 028 e6 0.000 023 e6 eV +atomic mass unit-hartree relationship 3.423 177 7149 e7 0.000 000 0049 e7 E_h +atomic mass unit-hertz relationship 2.252 342 7369 e23 0.000 000 0032 e23 Hz +atomic mass unit-inverse meter relationship 7.513 006 671 e14 0.000 000 011 e14 m^-1 +atomic mass unit-joule relationship 1.492 417 830 e-10 0.000 000 074 e-10 J +atomic mass unit-kelvin relationship 1.080 9527 e13 0.000 0019 e13 K +atomic mass unit-kilogram relationship 1.660 538 782 e-27 0.000 000 083 e-27 kg +atomic unit of 1st hyperpolarizability 3.206 361 533 e-53 0.000 000 081 e-53 C^3 m^3 J^-2 +atomic unit of 2nd hyperpolarizability 6.235 380 95 e-65 0.000 000 31 e-65 C^4 m^4 J^-3 +atomic unit of action 1.054 571 628 e-34 0.000 000 053 e-34 J s +atomic unit of charge 1.602 176 487 e-19 0.000 000 040 e-19 C +atomic unit of charge density 1.081 202 300 e12 0.000 000 027 e12 C m^-3 +atomic unit of current 6.623 617 63 e-3 0.000 000 17 e-3 A +atomic unit of electric dipole mom. 8.478 352 81 e-30 0.000 000 21 e-30 C m +atomic unit of electric field 5.142 206 32 e11 0.000 000 13 e11 V m^-1 +atomic unit of electric field gradient 9.717 361 66 e21 0.000 000 24 e21 V m^-2 +atomic unit of electric polarizability 1.648 777 2536 e-41 0.000 000 0034 e-41 C^2 m^2 J^-1 +atomic unit of electric potential 27.211 383 86 0.000 000 68 V +atomic unit of electric quadrupole mom. 4.486 551 07 e-40 0.000 000 11 e-40 C m^2 +atomic unit of energy 4.359 743 94 e-18 0.000 000 22 e-18 J +atomic unit of force 8.238 722 06 e-8 0.000 000 41 e-8 N +atomic unit of length 0.529 177 208 59 e-10 0.000 000 000 36 e-10 m +atomic unit of mag. dipole mom. 1.854 801 830 e-23 0.000 000 046 e-23 J T^-1 +atomic unit of mag. flux density 2.350 517 382 e5 0.000 000 059 e5 T +atomic unit of magnetizability 7.891 036 433 e-29 0.000 000 027 e-29 J T^-2 +atomic unit of mass 9.109 382 15 e-31 0.000 000 45 e-31 kg +atomic unit of momentum 1.992 851 565 e-24 0.000 000 099 e-24 kg m s^-1 +atomic unit of permittivity 1.112 650 056... e-10 (exact) F m^-1 +atomic unit of time 2.418 884 326 505 e-17 0.000 000 000 016 e-17 s +atomic unit of velocity 2.187 691 2541 e6 0.000 000 0015 e6 m s^-1 +Avogadro constant 6.022 141 79 e23 0.000 000 30 e23 mol^-1 +Bohr magneton 927.400 915 e-26 0.000 023 e-26 J T^-1 +Bohr magneton in eV/T 5.788 381 7555 e-5 0.000 000 0079 e-5 eV T^-1 +Bohr magneton in Hz/T 13.996 246 04 e9 0.000 000 35 e9 Hz T^-1 +Bohr magneton in inverse meters per tesla 46.686 4515 0.000 0012 m^-1 T^-1 +Bohr magneton in K/T 0.671 7131 0.000 0012 K T^-1 +Bohr radius 0.529 177 208 59 e-10 0.000 000 000 36 e-10 m +Boltzmann constant 1.380 6504 e-23 0.000 0024 e-23 J K^-1 +Boltzmann constant in eV/K 8.617 343 e-5 0.000 015 e-5 eV K^-1 +Boltzmann constant in Hz/K 2.083 6644 e10 0.000 0036 e10 Hz K^-1 +Boltzmann constant in inverse meters per kelvin 69.503 56 0.000 12 m^-1 K^-1 +characteristic impedance of vacuum 376.730 313 461... (exact) ohm +classical electron radius 2.817 940 2894 e-15 0.000 000 0058 e-15 m +Compton wavelength 2.426 310 2175 e-12 0.000 000 0033 e-12 m +Compton wavelength over 2 pi 386.159 264 59 e-15 0.000 000 53 e-15 m +conductance quantum 7.748 091 7004 e-5 0.000 000 0053 e-5 S +conventional value of Josephson constant 483 597.9 e9 (exact) Hz V^-1 +conventional value of von Klitzing constant 25 812.807 (exact) ohm +Cu x unit 1.002 076 99 e-13 0.000 000 28 e-13 m +deuteron-electron mag. mom. ratio -4.664 345 537 e-4 0.000 000 039 e-4 +deuteron-electron mass ratio 3670.482 9654 0.000 0016 +deuteron g factor 0.857 438 2308 0.000 000 0072 +deuteron mag. mom. 0.433 073 465 e-26 0.000 000 011 e-26 J T^-1 +deuteron mag. mom. to Bohr magneton ratio 0.466 975 4556 e-3 0.000 000 0039 e-3 +deuteron mag. mom. to nuclear magneton ratio 0.857 438 2308 0.000 000 0072 +deuteron mass 3.343 583 20 e-27 0.000 000 17 e-27 kg +deuteron mass energy equivalent 3.005 062 72 e-10 0.000 000 15 e-10 J +deuteron mass energy equivalent in MeV 1875.612 793 0.000 047 MeV +deuteron mass in u 2.013 553 212 724 0.000 000 000 078 u +deuteron molar mass 2.013 553 212 724 e-3 0.000 000 000 078 e-3 kg mol^-1 +deuteron-neutron mag. mom. ratio -0.448 206 52 0.000 000 11 +deuteron-proton mag. mom. ratio 0.307 012 2070 0.000 000 0024 +deuteron-proton mass ratio 1.999 007 501 08 0.000 000 000 22 +deuteron rms charge radius 2.1402 e-15 0.0028 e-15 m +electric constant 8.854 187 817... e-12 (exact) F m^-1 +electron charge to mass quotient -1.758 820 150 e11 0.000 000 044 e11 C kg^-1 +electron-deuteron mag. mom. ratio -2143.923 498 0.000 018 +electron-deuteron mass ratio 2.724 437 1093 e-4 0.000 000 0012 e-4 +electron g factor -2.002 319 304 3622 0.000 000 000 0015 +electron gyromag. ratio 1.760 859 770 e11 0.000 000 044 e11 s^-1 T^-1 +electron gyromag. ratio over 2 pi 28 024.953 64 0.000 70 MHz T^-1 +electron mag. mom. -928.476 377 e-26 0.000 023 e-26 J T^-1 +electron mag. mom. anomaly 1.159 652 181 11 e-3 0.000 000 000 74 e-3 +electron mag. mom. to Bohr magneton ratio -1.001 159 652 181 11 0.000 000 000 000 74 +electron mag. mom. to nuclear magneton ratio -1838.281 970 92 0.000 000 80 +electron mass 9.109 382 15 e-31 0.000 000 45 e-31 kg +electron mass energy equivalent 8.187 104 38 e-14 0.000 000 41 e-14 J +electron mass energy equivalent in MeV 0.510 998 910 0.000 000 013 MeV +electron mass in u 5.485 799 0943 e-4 0.000 000 0023 e-4 u +electron molar mass 5.485 799 0943 e-7 0.000 000 0023 e-7 kg mol^-1 +electron-muon mag. mom. ratio 206.766 9877 0.000 0052 +electron-muon mass ratio 4.836 331 71 e-3 0.000 000 12 e-3 +electron-neutron mag. mom. ratio 960.920 50 0.000 23 +electron-neutron mass ratio 5.438 673 4459 e-4 0.000 000 0033 e-4 +electron-proton mag. mom. ratio -658.210 6848 0.000 0054 +electron-proton mass ratio 5.446 170 2177 e-4 0.000 000 0024 e-4 +electron-tau mass ratio 2.875 64 e-4 0.000 47 e-4 +electron to alpha particle mass ratio 1.370 933 555 70 e-4 0.000 000 000 58 e-4 +electron to shielded helion mag. mom. ratio 864.058 257 0.000 010 +electron to shielded proton mag. mom. ratio -658.227 5971 0.000 0072 +electron volt 1.602 176 487 e-19 0.000 000 040 e-19 J +electron volt-atomic mass unit relationship 1.073 544 188 e-9 0.000 000 027 e-9 u +electron volt-hartree relationship 3.674 932 540 e-2 0.000 000 092 e-2 E_h +electron volt-hertz relationship 2.417 989 454 e14 0.000 000 060 e14 Hz +electron volt-inverse meter relationship 8.065 544 65 e5 0.000 000 20 e5 m^-1 +electron volt-joule relationship 1.602 176 487 e-19 0.000 000 040 e-19 J +electron volt-kelvin relationship 1.160 4505 e4 0.000 0020 e4 K +electron volt-kilogram relationship 1.782 661 758 e-36 0.000 000 044 e-36 kg +elementary charge 1.602 176 487 e-19 0.000 000 040 e-19 C +elementary charge over h 2.417 989 454 e14 0.000 000 060 e14 A J^-1 +Faraday constant 96 485.3399 0.0024 C mol^-1 +Faraday constant for conventional electric current 96 485.3401 0.0048 C_90 mol^-1 +Fermi coupling constant 1.166 37 e-5 0.000 01 e-5 GeV^-2 +fine-structure constant 7.297 352 5376 e-3 0.000 000 0050 e-3 +first radiation constant 3.741 771 18 e-16 0.000 000 19 e-16 W m^2 +first radiation constant for spectral radiance 1.191 042 759 e-16 0.000 000 059 e-16 W m^2 sr^-1 +hartree-atomic mass unit relationship 2.921 262 2986 e-8 0.000 000 0042 e-8 u +hartree-electron volt relationship 27.211 383 86 0.000 000 68 eV +Hartree energy 4.359 743 94 e-18 0.000 000 22 e-18 J +Hartree energy in eV 27.211 383 86 0.000 000 68 eV +hartree-hertz relationship 6.579 683 920 722 e15 0.000 000 000 044 e15 Hz +hartree-inverse meter relationship 2.194 746 313 705 e7 0.000 000 000 015 e7 m^-1 +hartree-joule relationship 4.359 743 94 e-18 0.000 000 22 e-18 J +hartree-kelvin relationship 3.157 7465 e5 0.000 0055 e5 K +hartree-kilogram relationship 4.850 869 34 e-35 0.000 000 24 e-35 kg +helion-electron mass ratio 5495.885 2765 0.000 0052 +helion mass 5.006 411 92 e-27 0.000 000 25 e-27 kg +helion mass energy equivalent 4.499 538 64 e-10 0.000 000 22 e-10 J +helion mass energy equivalent in MeV 2808.391 383 0.000 070 MeV +helion mass in u 3.014 932 2473 0.000 000 0026 u +helion molar mass 3.014 932 2473 e-3 0.000 000 0026 e-3 kg mol^-1 +helion-proton mass ratio 2.993 152 6713 0.000 000 0026 +hertz-atomic mass unit relationship 4.439 821 6294 e-24 0.000 000 0064 e-24 u +hertz-electron volt relationship 4.135 667 33 e-15 0.000 000 10 e-15 eV +hertz-hartree relationship 1.519 829 846 006 e-16 0.000 000 000010e-16 E_h +hertz-inverse meter relationship 3.335 640 951... e-9 (exact) m^-1 +hertz-joule relationship 6.626 068 96 e-34 0.000 000 33 e-34 J +hertz-kelvin relationship 4.799 2374 e-11 0.000 0084 e-11 K +hertz-kilogram relationship 7.372 496 00 e-51 0.000 000 37 e-51 kg +inverse fine-structure constant 137.035 999 679 0.000 000 094 +inverse meter-atomic mass unit relationship 1.331 025 0394 e-15 0.000 000 0019 e-15 u +inverse meter-electron volt relationship 1.239 841 875 e-6 0.000 000 031 e-6 eV +inverse meter-hartree relationship 4.556 335 252 760 e-8 0.000 000 000 030 e-8 E_h +inverse meter-hertz relationship 299 792 458 (exact) Hz +inverse meter-joule relationship 1.986 445 501 e-25 0.000 000 099 e-25 J +inverse meter-kelvin relationship 1.438 7752 e-2 0.000 0025 e-2 K +inverse meter-kilogram relationship 2.210 218 70 e-42 0.000 000 11 e-42 kg +inverse of conductance quantum 12 906.403 7787 0.000 0088 ohm +Josephson constant 483 597.891 e9 0.012 e9 Hz V^-1 +joule-atomic mass unit relationship 6.700 536 41 e9 0.000 000 33 e9 u +joule-electron volt relationship 6.241 509 65 e18 0.000 000 16 e18 eV +joule-hartree relationship 2.293 712 69 e17 0.000 000 11 e17 E_h +joule-hertz relationship 1.509 190 450 e33 0.000 000 075 e33 Hz +joule-inverse meter relationship 5.034 117 47 e24 0.000 000 25 e24 m^-1 +joule-kelvin relationship 7.242 963 e22 0.000 013 e22 K +joule-kilogram relationship 1.112 650 056... e-17 (exact) kg +kelvin-atomic mass unit relationship 9.251 098 e-14 0.000 016 e-14 u +kelvin-electron volt relationship 8.617 343 e-5 0.000 015 e-5 eV +kelvin-hartree relationship 3.166 8153 e-6 0.000 0055 e-6 E_h +kelvin-hertz relationship 2.083 6644 e10 0.000 0036 e10 Hz +kelvin-inverse meter relationship 69.503 56 0.000 12 m^-1 +kelvin-joule relationship 1.380 6504 e-23 0.000 0024 e-23 J +kelvin-kilogram relationship 1.536 1807 e-40 0.000 0027 e-40 kg +kilogram-atomic mass unit relationship 6.022 141 79 e26 0.000 000 30 e26 u +kilogram-electron volt relationship 5.609 589 12 e35 0.000 000 14 e35 eV +kilogram-hartree relationship 2.061 486 16 e34 0.000 000 10 e34 E_h +kilogram-hertz relationship 1.356 392 733 e50 0.000 000 068 e50 Hz +kilogram-inverse meter relationship 4.524 439 15 e41 0.000 000 23 e41 m^-1 +kilogram-joule relationship 8.987 551 787... e16 (exact) J +kilogram-kelvin relationship 6.509 651 e39 0.000 011 e39 K +lattice parameter of silicon 543.102 064 e-12 0.000 014 e-12 m +Loschmidt constant (273.15 K, 101.325 kPa) 2.686 7774 e25 0.000 0047 e25 m^-3 +mag. constant 12.566 370 614... e-7 (exact) N A^-2 +mag. flux quantum 2.067 833 667 e-15 0.000 000 052 e-15 Wb +molar gas constant 8.314 472 0.000 015 J mol^-1 K^-1 +molar mass constant 1 e-3 (exact) kg mol^-1 +molar mass of carbon-12 12 e-3 (exact) kg mol^-1 +molar Planck constant 3.990 312 6821 e-10 0.000 000 0057 e-10 J s mol^-1 +molar Planck constant times c 0.119 626 564 72 0.000 000 000 17 J m mol^-1 +molar volume of ideal gas (273.15 K, 100 kPa) 22.710 981 e-3 0.000 040 e-3 m^3 mol^-1 +molar volume of ideal gas (273.15 K, 101.325 kPa) 22.413 996 e-3 0.000 039 e-3 m^3 mol^-1 +molar volume of silicon 12.058 8349 e-6 0.000 0011 e-6 m^3 mol^-1 +Mo x unit 1.002 099 55 e-13 0.000 000 53 e-13 m +muon Compton wavelength 11.734 441 04 e-15 0.000 000 30 e-15 m +muon Compton wavelength over 2 pi 1.867 594 295 e-15 0.000 000 047 e-15 m +muon-electron mass ratio 206.768 2823 0.000 0052 +muon g factor -2.002 331 8414 0.000 000 0012 +muon mag. mom. -4.490 447 86 e-26 0.000 000 16 e-26 J T^-1 +muon mag. mom. anomaly 1.165 920 69 e-3 0.000 000 60 e-3 +muon mag. mom. to Bohr magneton ratio -4.841 970 49 e-3 0.000 000 12 e-3 +muon mag. mom. to nuclear magneton ratio -8.890 597 05 0.000 000 23 +muon mass 1.883 531 30 e-28 0.000 000 11 e-28 kg +muon mass energy equivalent 1.692 833 510 e-11 0.000 000 095 e-11 J +muon mass energy equivalent in MeV 105.658 3668 0.000 0038 MeV +muon mass in u 0.113 428 9256 0.000 000 0029 u +muon molar mass 0.113 428 9256 e-3 0.000 000 0029 e-3 kg mol^-1 +muon-neutron mass ratio 0.112 454 5167 0.000 000 0029 +muon-proton mag. mom. ratio -3.183 345 137 0.000 000 085 +muon-proton mass ratio 0.112 609 5261 0.000 000 0029 +muon-tau mass ratio 5.945 92 e-2 0.000 97 e-2 +natural unit of action 1.054 571 628 e-34 0.000 000 053 e-34 J s +natural unit of action in eV s 6.582 118 99 e-16 0.000 000 16 e-16 eV s +natural unit of energy 8.187 104 38 e-14 0.000 000 41 e-14 J +natural unit of energy in MeV 0.510 998 910 0.000 000 013 MeV +natural unit of length 386.159 264 59 e-15 0.000 000 53 e-15 m +natural unit of mass 9.109 382 15 e-31 0.000 000 45 e-31 kg +natural unit of momentum 2.730 924 06 e-22 0.000 000 14 e-22 kg m s^-1 +natural unit of momentum in MeV/c 0.510 998 910 0.000 000 013 MeV/c +natural unit of time 1.288 088 6570 e-21 0.000 000 0018 e-21 s +natural unit of velocity 299 792 458 (exact) m s^-1 +neutron Compton wavelength 1.319 590 8951 e-15 0.000 000 0020 e-15 m +neutron Compton wavelength over 2 pi 0.210 019 413 82 e-15 0.000 000 000 31 e-15 m +neutron-electron mag. mom. ratio 1.040 668 82 e-3 0.000 000 25 e-3 +neutron-electron mass ratio 1838.683 6605 0.000 0011 +neutron g factor -3.826 085 45 0.000 000 90 +neutron gyromag. ratio 1.832 471 85 e8 0.000 000 43 e8 s^-1 T^-1 +neutron gyromag. ratio over 2 pi 29.164 6954 0.000 0069 MHz T^-1 +neutron mag. mom. -0.966 236 41 e-26 0.000 000 23 e-26 J T^-1 +neutron mag. mom. to Bohr magneton ratio -1.041 875 63 e-3 0.000 000 25 e-3 +neutron mag. mom. to nuclear magneton ratio -1.913 042 73 0.000 000 45 +neutron mass 1.674 927 211 e-27 0.000 000 084 e-27 kg +neutron mass energy equivalent 1.505 349 505 e-10 0.000 000 075 e-10 J +neutron mass energy equivalent in MeV 939.565 346 0.000 023 MeV +neutron mass in u 1.008 664 915 97 0.000 000 000 43 u +neutron molar mass 1.008 664 915 97 e-3 0.000 000 000 43 e-3 kg mol^-1 +neutron-muon mass ratio 8.892 484 09 0.000 000 23 +neutron-proton mag. mom. ratio -0.684 979 34 0.000 000 16 +neutron-proton mass ratio 1.001 378 419 18 0.000 000 000 46 +neutron-tau mass ratio 0.528 740 0.000 086 +neutron to shielded proton mag. mom. ratio -0.684 996 94 0.000 000 16 +Newtonian constant of gravitation 6.674 28 e-11 0.000 67 e-11 m^3 kg^-1 s^-2 +Newtonian constant of gravitation over h-bar c 6.708 81 e-39 0.000 67 e-39 (GeV/c^2)^-2 +nuclear magneton 5.050 783 24 e-27 0.000 000 13 e-27 J T^-1 +nuclear magneton in eV/T 3.152 451 2326 e-8 0.000 000 0045 e-8 eV T^-1 +nuclear magneton in inverse meters per tesla 2.542 623 616 e-2 0.000 000 064 e-2 m^-1 T^-1 +nuclear magneton in K/T 3.658 2637 e-4 0.000 0064 e-4 K T^-1 +nuclear magneton in MHz/T 7.622 593 84 0.000 000 19 MHz T^-1 +Planck constant 6.626 068 96 e-34 0.000 000 33 e-34 J s +Planck constant in eV s 4.135 667 33 e-15 0.000 000 10 e-15 eV s +Planck constant over 2 pi 1.054 571 628 e-34 0.000 000 053 e-34 J s +Planck constant over 2 pi in eV s 6.582 118 99 e-16 0.000 000 16 e-16 eV s +Planck constant over 2 pi times c in MeV fm 197.326 9631 0.000 0049 MeV fm +Planck length 1.616 252 e-35 0.000 081 e-35 m +Planck mass 2.176 44 e-8 0.000 11 e-8 kg +Planck mass energy equivalent in GeV 1.220 892 e19 0.000 061 e19 GeV +Planck temperature 1.416 785 e32 0.000 071 e32 K +Planck time 5.391 24 e-44 0.000 27 e-44 s +proton charge to mass quotient 9.578 833 92 e7 0.000 000 24 e7 C kg^-1 +proton Compton wavelength 1.321 409 8446 e-15 0.000 000 0019 e-15 m +proton Compton wavelength over 2 pi 0.210 308 908 61 e-15 0.000 000 000 30 e-15 m +proton-electron mass ratio 1836.152 672 47 0.000 000 80 +proton g factor 5.585 694 713 0.000 000 046 +proton gyromag. ratio 2.675 222 099 e8 0.000 000 070 e8 s^-1 T^-1 +proton gyromag. ratio over 2 pi 42.577 4821 0.000 0011 MHz T^-1 +proton mag. mom. 1.410 606 662 e-26 0.000 000 037 e-26 J T^-1 +proton mag. mom. to Bohr magneton ratio 1.521 032 209 e-3 0.000 000 012 e-3 +proton mag. mom. to nuclear magneton ratio 2.792 847 356 0.000 000 023 +proton mag. shielding correction 25.694 e-6 0.014 e-6 +proton mass 1.672 621 637 e-27 0.000 000 083 e-27 kg +proton mass energy equivalent 1.503 277 359 e-10 0.000 000 075 e-10 J +proton mass energy equivalent in MeV 938.272 013 0.000 023 MeV +proton mass in u 1.007 276 466 77 0.000 000 000 10 u +proton molar mass 1.007 276 466 77 e-3 0.000 000 000 10 e-3 kg mol^-1 +proton-muon mass ratio 8.880 243 39 0.000 000 23 +proton-neutron mag. mom. ratio -1.459 898 06 0.000 000 34 +proton-neutron mass ratio 0.998 623 478 24 0.000 000 000 46 +proton rms charge radius 0.8768 e-15 0.0069 e-15 m +proton-tau mass ratio 0.528 012 0.000 086 +quantum of circulation 3.636 947 5199 e-4 0.000 000 0050 e-4 m^2 s^-1 +quantum of circulation times 2 7.273 895 040 e-4 0.000 000 010 e-4 m^2 s^-1 +Rydberg constant 10 973 731.568 527 0.000 073 m^-1 +Rydberg constant times c in Hz 3.289 841 960 361 e15 0.000 000 000 022 e15 Hz +Rydberg constant times hc in eV 13.605 691 93 0.000 000 34 eV +Rydberg constant times hc in J 2.179 871 97 e-18 0.000 000 11 e-18 J +Sackur-Tetrode constant (1 K, 100 kPa) -1.151 7047 0.000 0044 +Sackur-Tetrode constant (1 K, 101.325 kPa) -1.164 8677 0.000 0044 +second radiation constant 1.438 7752 e-2 0.000 0025 e-2 m K +shielded helion gyromag. ratio 2.037 894 730 e8 0.000 000 056 e8 s^-1 T^-1 +shielded helion gyromag. ratio over 2 pi 32.434 101 98 0.000 000 90 MHz T^-1 +shielded helion mag. mom. -1.074 552 982 e-26 0.000 000 030 e-26 J T^-1 +shielded helion mag. mom. to Bohr magneton ratio -1.158 671 471 e-3 0.000 000 014 e-3 +shielded helion mag. mom. to nuclear magneton ratio -2.127 497 718 0.000 000 025 +shielded helion to proton mag. mom. ratio -0.761 766 558 0.000 000 011 +shielded helion to shielded proton mag. mom. ratio -0.761 786 1313 0.000 000 0033 +shielded proton gyromag. ratio 2.675 153 362 e8 0.000 000 073 e8 s^-1 T^-1 +shielded proton gyromag. ratio over 2 pi 42.576 3881 0.000 0012 MHz T^-1 +shielded proton mag. mom. 1.410 570 419 e-26 0.000 000 038 e-26 J T^-1 +shielded proton mag. mom. to Bohr magneton ratio 1.520 993 128 e-3 0.000 000 017 e-3 +shielded proton mag. mom. to nuclear magneton ratio 2.792 775 598 0.000 000 030 +speed of light in vacuum 299 792 458 (exact) m s^-1 +standard acceleration of gravity 9.806 65 (exact) m s^-2 +standard atmosphere 101 325 (exact) Pa +Stefan-Boltzmann constant 5.670 400 e-8 0.000 040 e-8 W m^-2 K^-4 +tau Compton wavelength 0.697 72 e-15 0.000 11 e-15 m +tau Compton wavelength over 2 pi 0.111 046 e-15 0.000 018 e-15 m +tau-electron mass ratio 3477.48 0.57 +tau mass 3.167 77 e-27 0.000 52 e-27 kg +tau mass energy equivalent 2.847 05 e-10 0.000 46 e-10 J +tau mass energy equivalent in MeV 1776.99 0.29 MeV +tau mass in u 1.907 68 0.000 31 u +tau molar mass 1.907 68 e-3 0.000 31 e-3 kg mol^-1 +tau-muon mass ratio 16.8183 0.0027 +tau-neutron mass ratio 1.891 29 0.000 31 +tau-proton mass ratio 1.893 90 0.000 31 +Thomson cross section 0.665 245 8558 e-28 0.000 000 0027 e-28 m^2 +triton-electron mag. mom. ratio -1.620 514 423 e-3 0.000 000 021 e-3 +triton-electron mass ratio 5496.921 5269 0.000 0051 +triton g factor 5.957 924 896 0.000 000 076 +triton mag. mom. 1.504 609 361 e-26 0.000 000 042 e-26 J T^-1 +triton mag. mom. to Bohr magneton ratio 1.622 393 657 e-3 0.000 000 021 e-3 +triton mag. mom. to nuclear magneton ratio 2.978 962 448 0.000 000 038 +triton mass 5.007 355 88 e-27 0.000 000 25 e-27 kg +triton mass energy equivalent 4.500 387 03 e-10 0.000 000 22 e-10 J +triton mass energy equivalent in MeV 2808.920 906 0.000 070 MeV +triton mass in u 3.015 500 7134 0.000 000 0025 u +triton molar mass 3.015 500 7134 e-3 0.000 000 0025 e-3 kg mol^-1 +triton-neutron mag. mom. ratio -1.557 185 53 0.000 000 37 +triton-proton mag. mom. ratio 1.066 639 908 0.000 000 010 +triton-proton mass ratio 2.993 717 0309 0.000 000 0025 +unified atomic mass unit 1.660 538 782 e-27 0.000 000 083 e-27 kg +von Klitzing constant 25 812.807 557 0.000 018 ohm +weak mixing angle 0.222 55 0.000 56 +Wien frequency displacement law constant 5.878 933 e10 0.000 010 e10 Hz K^-1 +Wien wavelength displacement law constant 2.897 7685 e-3 0.000 0051 e-3 m K""" + +txt2010 = """\ +{220} lattice spacing of silicon 192.015 5714 e-12 0.000 0032 e-12 m +alpha particle-electron mass ratio 7294.299 5361 0.000 0029 +alpha particle mass 6.644 656 75 e-27 0.000 000 29 e-27 kg +alpha particle mass energy equivalent 5.971 919 67 e-10 0.000 000 26 e-10 J +alpha particle mass energy equivalent in MeV 3727.379 240 0.000 082 MeV +alpha particle mass in u 4.001 506 179 125 0.000 000 000 062 u +alpha particle molar mass 4.001 506 179 125 e-3 0.000 000 000 062 e-3 kg mol^-1 +alpha particle-proton mass ratio 3.972 599 689 33 0.000 000 000 36 +Angstrom star 1.000 014 95 e-10 0.000 000 90 e-10 m +atomic mass constant 1.660 538 921 e-27 0.000 000 073 e-27 kg +atomic mass constant energy equivalent 1.492 417 954 e-10 0.000 000 066 e-10 J +atomic mass constant energy equivalent in MeV 931.494 061 0.000 021 MeV +atomic mass unit-electron volt relationship 931.494 061 e6 0.000 021 e6 eV +atomic mass unit-hartree relationship 3.423 177 6845 e7 0.000 000 0024 e7 E_h +atomic mass unit-hertz relationship 2.252 342 7168 e23 0.000 000 0016 e23 Hz +atomic mass unit-inverse meter relationship 7.513 006 6042 e14 0.000 000 0053 e14 m^-1 +atomic mass unit-joule relationship 1.492 417 954 e-10 0.000 000 066 e-10 J +atomic mass unit-kelvin relationship 1.080 954 08 e13 0.000 000 98 e13 K +atomic mass unit-kilogram relationship 1.660 538 921 e-27 0.000 000 073 e-27 kg +atomic unit of 1st hyperpolarizability 3.206 361 449 e-53 0.000 000 071 e-53 C^3 m^3 J^-2 +atomic unit of 2nd hyperpolarizability 6.235 380 54 e-65 0.000 000 28 e-65 C^4 m^4 J^-3 +atomic unit of action 1.054 571 726 e-34 0.000 000 047 e-34 J s +atomic unit of charge 1.602 176 565 e-19 0.000 000 035 e-19 C +atomic unit of charge density 1.081 202 338 e12 0.000 000 024 e12 C m^-3 +atomic unit of current 6.623 617 95 e-3 0.000 000 15 e-3 A +atomic unit of electric dipole mom. 8.478 353 26 e-30 0.000 000 19 e-30 C m +atomic unit of electric field 5.142 206 52 e11 0.000 000 11 e11 V m^-1 +atomic unit of electric field gradient 9.717 362 00 e21 0.000 000 21 e21 V m^-2 +atomic unit of electric polarizability 1.648 777 2754 e-41 0.000 000 0016 e-41 C^2 m^2 J^-1 +atomic unit of electric potential 27.211 385 05 0.000 000 60 V +atomic unit of electric quadrupole mom. 4.486 551 331 e-40 0.000 000 099 e-40 C m^2 +atomic unit of energy 4.359 744 34 e-18 0.000 000 19 e-18 J +atomic unit of force 8.238 722 78 e-8 0.000 000 36 e-8 N +atomic unit of length 0.529 177 210 92 e-10 0.000 000 000 17 e-10 m +atomic unit of mag. dipole mom. 1.854 801 936 e-23 0.000 000 041 e-23 J T^-1 +atomic unit of mag. flux density 2.350 517 464 e5 0.000 000 052 e5 T +atomic unit of magnetizability 7.891 036 607 e-29 0.000 000 013 e-29 J T^-2 +atomic unit of mass 9.109 382 91 e-31 0.000 000 40 e-31 kg +atomic unit of mom.um 1.992 851 740 e-24 0.000 000 088 e-24 kg m s^-1 +atomic unit of permittivity 1.112 650 056... e-10 (exact) F m^-1 +atomic unit of time 2.418 884 326 502e-17 0.000 000 000 012e-17 s +atomic unit of velocity 2.187 691 263 79 e6 0.000 000 000 71 e6 m s^-1 +Avogadro constant 6.022 141 29 e23 0.000 000 27 e23 mol^-1 +Bohr magneton 927.400 968 e-26 0.000 020 e-26 J T^-1 +Bohr magneton in eV/T 5.788 381 8066 e-5 0.000 000 0038 e-5 eV T^-1 +Bohr magneton in Hz/T 13.996 245 55 e9 0.000 000 31 e9 Hz T^-1 +Bohr magneton in inverse meters per tesla 46.686 4498 0.000 0010 m^-1 T^-1 +Bohr magneton in K/T 0.671 713 88 0.000 000 61 K T^-1 +Bohr radius 0.529 177 210 92 e-10 0.000 000 000 17 e-10 m +Boltzmann constant 1.380 6488 e-23 0.000 0013 e-23 J K^-1 +Boltzmann constant in eV/K 8.617 3324 e-5 0.000 0078 e-5 eV K^-1 +Boltzmann constant in Hz/K 2.083 6618 e10 0.000 0019 e10 Hz K^-1 +Boltzmann constant in inverse meters per kelvin 69.503 476 0.000 063 m^-1 K^-1 +characteristic impedance of vacuum 376.730 313 461... (exact) ohm +classical electron radius 2.817 940 3267 e-15 0.000 000 0027 e-15 m +Compton wavelength 2.426 310 2389 e-12 0.000 000 0016 e-12 m +Compton wavelength over 2 pi 386.159 268 00 e-15 0.000 000 25 e-15 m +conductance quantum 7.748 091 7346 e-5 0.000 000 0025 e-5 S +conventional value of Josephson constant 483 597.9 e9 (exact) Hz V^-1 +conventional value of von Klitzing constant 25 812.807 (exact) ohm +Cu x unit 1.002 076 97 e-13 0.000 000 28 e-13 m +deuteron-electron mag. mom. ratio -4.664 345 537 e-4 0.000 000 039 e-4 +deuteron-electron mass ratio 3670.482 9652 0.000 0015 +deuteron g factor 0.857 438 2308 0.000 000 0072 +deuteron mag. mom. 0.433 073 489 e-26 0.000 000 010 e-26 J T^-1 +deuteron mag. mom. to Bohr magneton ratio 0.466 975 4556 e-3 0.000 000 0039 e-3 +deuteron mag. mom. to nuclear magneton ratio 0.857 438 2308 0.000 000 0072 +deuteron mass 3.343 583 48 e-27 0.000 000 15 e-27 kg +deuteron mass energy equivalent 3.005 062 97 e-10 0.000 000 13 e-10 J +deuteron mass energy equivalent in MeV 1875.612 859 0.000 041 MeV +deuteron mass in u 2.013 553 212 712 0.000 000 000 077 u +deuteron molar mass 2.013 553 212 712 e-3 0.000 000 000 077 e-3 kg mol^-1 +deuteron-neutron mag. mom. ratio -0.448 206 52 0.000 000 11 +deuteron-proton mag. mom. ratio 0.307 012 2070 0.000 000 0024 +deuteron-proton mass ratio 1.999 007 500 97 0.000 000 000 18 +deuteron rms charge radius 2.1424 e-15 0.0021 e-15 m +electric constant 8.854 187 817... e-12 (exact) F m^-1 +electron charge to mass quotient -1.758 820 088 e11 0.000 000 039 e11 C kg^-1 +electron-deuteron mag. mom. ratio -2143.923 498 0.000 018 +electron-deuteron mass ratio 2.724 437 1095 e-4 0.000 000 0011 e-4 +electron g factor -2.002 319 304 361 53 0.000 000 000 000 53 +electron gyromag. ratio 1.760 859 708 e11 0.000 000 039 e11 s^-1 T^-1 +electron gyromag. ratio over 2 pi 28 024.952 66 0.000 62 MHz T^-1 +electron-helion mass ratio 1.819 543 0761 e-4 0.000 000 0017 e-4 +electron mag. mom. -928.476 430 e-26 0.000 021 e-26 J T^-1 +electron mag. mom. anomaly 1.159 652 180 76 e-3 0.000 000 000 27 e-3 +electron mag. mom. to Bohr magneton ratio -1.001 159 652 180 76 0.000 000 000 000 27 +electron mag. mom. to nuclear magneton ratio -1838.281 970 90 0.000 000 75 +electron mass 9.109 382 91 e-31 0.000 000 40 e-31 kg +electron mass energy equivalent 8.187 105 06 e-14 0.000 000 36 e-14 J +electron mass energy equivalent in MeV 0.510 998 928 0.000 000 011 MeV +electron mass in u 5.485 799 0946 e-4 0.000 000 0022 e-4 u +electron molar mass 5.485 799 0946 e-7 0.000 000 0022 e-7 kg mol^-1 +electron-muon mag. mom. ratio 206.766 9896 0.000 0052 +electron-muon mass ratio 4.836 331 66 e-3 0.000 000 12 e-3 +electron-neutron mag. mom. ratio 960.920 50 0.000 23 +electron-neutron mass ratio 5.438 673 4461 e-4 0.000 000 0032 e-4 +electron-proton mag. mom. ratio -658.210 6848 0.000 0054 +electron-proton mass ratio 5.446 170 2178 e-4 0.000 000 0022 e-4 +electron-tau mass ratio 2.875 92 e-4 0.000 26 e-4 +electron to alpha particle mass ratio 1.370 933 555 78 e-4 0.000 000 000 55 e-4 +electron to shielded helion mag. mom. ratio 864.058 257 0.000 010 +electron to shielded proton mag. mom. ratio -658.227 5971 0.000 0072 +electron-triton mass ratio 1.819 200 0653 e-4 0.000 000 0017 e-4 +electron volt 1.602 176 565 e-19 0.000 000 035 e-19 J +electron volt-atomic mass unit relationship 1.073 544 150 e-9 0.000 000 024 e-9 u +electron volt-hartree relationship 3.674 932 379 e-2 0.000 000 081 e-2 E_h +electron volt-hertz relationship 2.417 989 348 e14 0.000 000 053 e14 Hz +electron volt-inverse meter relationship 8.065 544 29 e5 0.000 000 18 e5 m^-1 +electron volt-joule relationship 1.602 176 565 e-19 0.000 000 035 e-19 J +electron volt-kelvin relationship 1.160 4519 e4 0.000 0011 e4 K +electron volt-kilogram relationship 1.782 661 845 e-36 0.000 000 039 e-36 kg +elementary charge 1.602 176 565 e-19 0.000 000 035 e-19 C +elementary charge over h 2.417 989 348 e14 0.000 000 053 e14 A J^-1 +Faraday constant 96 485.3365 0.0021 C mol^-1 +Faraday constant for conventional electric current 96 485.3321 0.0043 C_90 mol^-1 +Fermi coupling constant 1.166 364 e-5 0.000 005 e-5 GeV^-2 +fine-structure constant 7.297 352 5698 e-3 0.000 000 0024 e-3 +first radiation constant 3.741 771 53 e-16 0.000 000 17 e-16 W m^2 +first radiation constant for spectral radiance 1.191 042 869 e-16 0.000 000 053 e-16 W m^2 sr^-1 +hartree-atomic mass unit relationship 2.921 262 3246 e-8 0.000 000 0021 e-8 u +hartree-electron volt relationship 27.211 385 05 0.000 000 60 eV +Hartree energy 4.359 744 34 e-18 0.000 000 19 e-18 J +Hartree energy in eV 27.211 385 05 0.000 000 60 eV +hartree-hertz relationship 6.579 683 920 729 e15 0.000 000 000 033 e15 Hz +hartree-inverse meter relationship 2.194 746 313 708 e7 0.000 000 000 011 e7 m^-1 +hartree-joule relationship 4.359 744 34 e-18 0.000 000 19 e-18 J +hartree-kelvin relationship 3.157 7504 e5 0.000 0029 e5 K +hartree-kilogram relationship 4.850 869 79 e-35 0.000 000 21 e-35 kg +helion-electron mass ratio 5495.885 2754 0.000 0050 +helion g factor -4.255 250 613 0.000 000 050 +helion mag. mom. -1.074 617 486 e-26 0.000 000 027 e-26 J T^-1 +helion mag. mom. to Bohr magneton ratio -1.158 740 958 e-3 0.000 000 014 e-3 +helion mag. mom. to nuclear magneton ratio -2.127 625 306 0.000 000 025 +helion mass 5.006 412 34 e-27 0.000 000 22 e-27 kg +helion mass energy equivalent 4.499 539 02 e-10 0.000 000 20 e-10 J +helion mass energy equivalent in MeV 2808.391 482 0.000 062 MeV +helion mass in u 3.014 932 2468 0.000 000 0025 u +helion molar mass 3.014 932 2468 e-3 0.000 000 0025 e-3 kg mol^-1 +helion-proton mass ratio 2.993 152 6707 0.000 000 0025 +hertz-atomic mass unit relationship 4.439 821 6689 e-24 0.000 000 0031 e-24 u +hertz-electron volt relationship 4.135 667 516 e-15 0.000 000 091 e-15 eV +hertz-hartree relationship 1.519 829 8460045e-16 0.000 000 0000076e-16 E_h +hertz-inverse meter relationship 3.335 640 951... e-9 (exact) m^-1 +hertz-joule relationship 6.626 069 57 e-34 0.000 000 29 e-34 J +hertz-kelvin relationship 4.799 2434 e-11 0.000 0044 e-11 K +hertz-kilogram relationship 7.372 496 68 e-51 0.000 000 33 e-51 kg +inverse fine-structure constant 137.035 999 074 0.000 000 044 +inverse meter-atomic mass unit relationship 1.331 025 051 20 e-15 0.000 000 000 94 e-15 u +inverse meter-electron volt relationship 1.239 841 930 e-6 0.000 000 027 e-6 eV +inverse meter-hartree relationship 4.556 335 252 755 e-8 0.000 000 000 023 e-8 E_h +inverse meter-hertz relationship 299 792 458 (exact) Hz +inverse meter-joule relationship 1.986 445 684 e-25 0.000 000 088 e-25 J +inverse meter-kelvin relationship 1.438 7770 e-2 0.000 0013 e-2 K +inverse meter-kilogram relationship 2.210 218 902 e-42 0.000 000 098 e-42 kg +inverse of conductance quantum 12 906.403 7217 0.000 0042 ohm +Josephson constant 483 597.870 e9 0.011 e9 Hz V^-1 +joule-atomic mass unit relationship 6.700 535 85 e9 0.000 000 30 e9 u +joule-electron volt relationship 6.241 509 34 e18 0.000 000 14 e18 eV +joule-hartree relationship 2.293 712 48 e17 0.000 000 10 e17 E_h +joule-hertz relationship 1.509 190 311 e33 0.000 000 067 e33 Hz +joule-inverse meter relationship 5.034 117 01 e24 0.000 000 22 e24 m^-1 +joule-kelvin relationship 7.242 9716 e22 0.000 0066 e22 K +joule-kilogram relationship 1.112 650 056... e-17 (exact) kg +kelvin-atomic mass unit relationship 9.251 0868 e-14 0.000 0084 e-14 u +kelvin-electron volt relationship 8.617 3324 e-5 0.000 0078 e-5 eV +kelvin-hartree relationship 3.166 8114 e-6 0.000 0029 e-6 E_h +kelvin-hertz relationship 2.083 6618 e10 0.000 0019 e10 Hz +kelvin-inverse meter relationship 69.503 476 0.000 063 m^-1 +kelvin-joule relationship 1.380 6488 e-23 0.000 0013 e-23 J +kelvin-kilogram relationship 1.536 1790 e-40 0.000 0014 e-40 kg +kilogram-atomic mass unit relationship 6.022 141 29 e26 0.000 000 27 e26 u +kilogram-electron volt relationship 5.609 588 85 e35 0.000 000 12 e35 eV +kilogram-hartree relationship 2.061 485 968 e34 0.000 000 091 e34 E_h +kilogram-hertz relationship 1.356 392 608 e50 0.000 000 060 e50 Hz +kilogram-inverse meter relationship 4.524 438 73 e41 0.000 000 20 e41 m^-1 +kilogram-joule relationship 8.987 551 787... e16 (exact) J +kilogram-kelvin relationship 6.509 6582 e39 0.000 0059 e39 K +lattice parameter of silicon 543.102 0504 e-12 0.000 0089 e-12 m +Loschmidt constant (273.15 K, 100 kPa) 2.651 6462 e25 0.000 0024 e25 m^-3 +Loschmidt constant (273.15 K, 101.325 kPa) 2.686 7805 e25 0.000 0024 e25 m^-3 +mag. constant 12.566 370 614... e-7 (exact) N A^-2 +mag. flux quantum 2.067 833 758 e-15 0.000 000 046 e-15 Wb +molar gas constant 8.314 4621 0.000 0075 J mol^-1 K^-1 +molar mass constant 1 e-3 (exact) kg mol^-1 +molar mass of carbon-12 12 e-3 (exact) kg mol^-1 +molar Planck constant 3.990 312 7176 e-10 0.000 000 0028 e-10 J s mol^-1 +molar Planck constant times c 0.119 626 565 779 0.000 000 000 084 J m mol^-1 +molar volume of ideal gas (273.15 K, 100 kPa) 22.710 953 e-3 0.000 021 e-3 m^3 mol^-1 +molar volume of ideal gas (273.15 K, 101.325 kPa) 22.413 968 e-3 0.000 020 e-3 m^3 mol^-1 +molar volume of silicon 12.058 833 01 e-6 0.000 000 80 e-6 m^3 mol^-1 +Mo x unit 1.002 099 52 e-13 0.000 000 53 e-13 m +muon Compton wavelength 11.734 441 03 e-15 0.000 000 30 e-15 m +muon Compton wavelength over 2 pi 1.867 594 294 e-15 0.000 000 047 e-15 m +muon-electron mass ratio 206.768 2843 0.000 0052 +muon g factor -2.002 331 8418 0.000 000 0013 +muon mag. mom. -4.490 448 07 e-26 0.000 000 15 e-26 J T^-1 +muon mag. mom. anomaly 1.165 920 91 e-3 0.000 000 63 e-3 +muon mag. mom. to Bohr magneton ratio -4.841 970 44 e-3 0.000 000 12 e-3 +muon mag. mom. to nuclear magneton ratio -8.890 596 97 0.000 000 22 +muon mass 1.883 531 475 e-28 0.000 000 096 e-28 kg +muon mass energy equivalent 1.692 833 667 e-11 0.000 000 086 e-11 J +muon mass energy equivalent in MeV 105.658 3715 0.000 0035 MeV +muon mass in u 0.113 428 9267 0.000 000 0029 u +muon molar mass 0.113 428 9267 e-3 0.000 000 0029 e-3 kg mol^-1 +muon-neutron mass ratio 0.112 454 5177 0.000 000 0028 +muon-proton mag. mom. ratio -3.183 345 107 0.000 000 084 +muon-proton mass ratio 0.112 609 5272 0.000 000 0028 +muon-tau mass ratio 5.946 49 e-2 0.000 54 e-2 +natural unit of action 1.054 571 726 e-34 0.000 000 047 e-34 J s +natural unit of action in eV s 6.582 119 28 e-16 0.000 000 15 e-16 eV s +natural unit of energy 8.187 105 06 e-14 0.000 000 36 e-14 J +natural unit of energy in MeV 0.510 998 928 0.000 000 011 MeV +natural unit of length 386.159 268 00 e-15 0.000 000 25 e-15 m +natural unit of mass 9.109 382 91 e-31 0.000 000 40 e-31 kg +natural unit of mom.um 2.730 924 29 e-22 0.000 000 12 e-22 kg m s^-1 +natural unit of mom.um in MeV/c 0.510 998 928 0.000 000 011 MeV/c +natural unit of time 1.288 088 668 33 e-21 0.000 000 000 83 e-21 s +natural unit of velocity 299 792 458 (exact) m s^-1 +neutron Compton wavelength 1.319 590 9068 e-15 0.000 000 0011 e-15 m +neutron Compton wavelength over 2 pi 0.210 019 415 68 e-15 0.000 000 000 17 e-15 m +neutron-electron mag. mom. ratio 1.040 668 82 e-3 0.000 000 25 e-3 +neutron-electron mass ratio 1838.683 6605 0.000 0011 +neutron g factor -3.826 085 45 0.000 000 90 +neutron gyromag. ratio 1.832 471 79 e8 0.000 000 43 e8 s^-1 T^-1 +neutron gyromag. ratio over 2 pi 29.164 6943 0.000 0069 MHz T^-1 +neutron mag. mom. -0.966 236 47 e-26 0.000 000 23 e-26 J T^-1 +neutron mag. mom. to Bohr magneton ratio -1.041 875 63 e-3 0.000 000 25 e-3 +neutron mag. mom. to nuclear magneton ratio -1.913 042 72 0.000 000 45 +neutron mass 1.674 927 351 e-27 0.000 000 074 e-27 kg +neutron mass energy equivalent 1.505 349 631 e-10 0.000 000 066 e-10 J +neutron mass energy equivalent in MeV 939.565 379 0.000 021 MeV +neutron mass in u 1.008 664 916 00 0.000 000 000 43 u +neutron molar mass 1.008 664 916 00 e-3 0.000 000 000 43 e-3 kg mol^-1 +neutron-muon mass ratio 8.892 484 00 0.000 000 22 +neutron-proton mag. mom. ratio -0.684 979 34 0.000 000 16 +neutron-proton mass difference 2.305 573 92 e-30 0.000 000 76 e-30 +neutron-proton mass difference energy equivalent 2.072 146 50 e-13 0.000 000 68 e-13 +neutron-proton mass difference energy equivalent in MeV 1.293 332 17 0.000 000 42 +neutron-proton mass difference in u 0.001 388 449 19 0.000 000 000 45 +neutron-proton mass ratio 1.001 378 419 17 0.000 000 000 45 +neutron-tau mass ratio 0.528 790 0.000 048 +neutron to shielded proton mag. mom. ratio -0.684 996 94 0.000 000 16 +Newtonian constant of gravitation 6.673 84 e-11 0.000 80 e-11 m^3 kg^-1 s^-2 +Newtonian constant of gravitation over h-bar c 6.708 37 e-39 0.000 80 e-39 (GeV/c^2)^-2 +nuclear magneton 5.050 783 53 e-27 0.000 000 11 e-27 J T^-1 +nuclear magneton in eV/T 3.152 451 2605 e-8 0.000 000 0022 e-8 eV T^-1 +nuclear magneton in inverse meters per tesla 2.542 623 527 e-2 0.000 000 056 e-2 m^-1 T^-1 +nuclear magneton in K/T 3.658 2682 e-4 0.000 0033 e-4 K T^-1 +nuclear magneton in MHz/T 7.622 593 57 0.000 000 17 MHz T^-1 +Planck constant 6.626 069 57 e-34 0.000 000 29 e-34 J s +Planck constant in eV s 4.135 667 516 e-15 0.000 000 091 e-15 eV s +Planck constant over 2 pi 1.054 571 726 e-34 0.000 000 047 e-34 J s +Planck constant over 2 pi in eV s 6.582 119 28 e-16 0.000 000 15 e-16 eV s +Planck constant over 2 pi times c in MeV fm 197.326 9718 0.000 0044 MeV fm +Planck length 1.616 199 e-35 0.000 097 e-35 m +Planck mass 2.176 51 e-8 0.000 13 e-8 kg +Planck mass energy equivalent in GeV 1.220 932 e19 0.000 073 e19 GeV +Planck temperature 1.416 833 e32 0.000 085 e32 K +Planck time 5.391 06 e-44 0.000 32 e-44 s +proton charge to mass quotient 9.578 833 58 e7 0.000 000 21 e7 C kg^-1 +proton Compton wavelength 1.321 409 856 23 e-15 0.000 000 000 94 e-15 m +proton Compton wavelength over 2 pi 0.210 308 910 47 e-15 0.000 000 000 15 e-15 m +proton-electron mass ratio 1836.152 672 45 0.000 000 75 +proton g factor 5.585 694 713 0.000 000 046 +proton gyromag. ratio 2.675 222 005 e8 0.000 000 063 e8 s^-1 T^-1 +proton gyromag. ratio over 2 pi 42.577 4806 0.000 0010 MHz T^-1 +proton mag. mom. 1.410 606 743 e-26 0.000 000 033 e-26 J T^-1 +proton mag. mom. to Bohr magneton ratio 1.521 032 210 e-3 0.000 000 012 e-3 +proton mag. mom. to nuclear magneton ratio 2.792 847 356 0.000 000 023 +proton mag. shielding correction 25.694 e-6 0.014 e-6 +proton mass 1.672 621 777 e-27 0.000 000 074 e-27 kg +proton mass energy equivalent 1.503 277 484 e-10 0.000 000 066 e-10 J +proton mass energy equivalent in MeV 938.272 046 0.000 021 MeV +proton mass in u 1.007 276 466 812 0.000 000 000 090 u +proton molar mass 1.007 276 466 812 e-3 0.000 000 000 090 e-3 kg mol^-1 +proton-muon mass ratio 8.880 243 31 0.000 000 22 +proton-neutron mag. mom. ratio -1.459 898 06 0.000 000 34 +proton-neutron mass ratio 0.998 623 478 26 0.000 000 000 45 +proton rms charge radius 0.8775 e-15 0.0051 e-15 m +proton-tau mass ratio 0.528 063 0.000 048 +quantum of circulation 3.636 947 5520 e-4 0.000 000 0024 e-4 m^2 s^-1 +quantum of circulation times 2 7.273 895 1040 e-4 0.000 000 0047 e-4 m^2 s^-1 +Rydberg constant 10 973 731.568 539 0.000 055 m^-1 +Rydberg constant times c in Hz 3.289 841 960 364 e15 0.000 000 000 017 e15 Hz +Rydberg constant times hc in eV 13.605 692 53 0.000 000 30 eV +Rydberg constant times hc in J 2.179 872 171 e-18 0.000 000 096 e-18 J +Sackur-Tetrode constant (1 K, 100 kPa) -1.151 7078 0.000 0023 +Sackur-Tetrode constant (1 K, 101.325 kPa) -1.164 8708 0.000 0023 +second radiation constant 1.438 7770 e-2 0.000 0013 e-2 m K +shielded helion gyromag. ratio 2.037 894 659 e8 0.000 000 051 e8 s^-1 T^-1 +shielded helion gyromag. ratio over 2 pi 32.434 100 84 0.000 000 81 MHz T^-1 +shielded helion mag. mom. -1.074 553 044 e-26 0.000 000 027 e-26 J T^-1 +shielded helion mag. mom. to Bohr magneton ratio -1.158 671 471 e-3 0.000 000 014 e-3 +shielded helion mag. mom. to nuclear magneton ratio -2.127 497 718 0.000 000 025 +shielded helion to proton mag. mom. ratio -0.761 766 558 0.000 000 011 +shielded helion to shielded proton mag. mom. ratio -0.761 786 1313 0.000 000 0033 +shielded proton gyromag. ratio 2.675 153 268 e8 0.000 000 066 e8 s^-1 T^-1 +shielded proton gyromag. ratio over 2 pi 42.576 3866 0.000 0010 MHz T^-1 +shielded proton mag. mom. 1.410 570 499 e-26 0.000 000 035 e-26 J T^-1 +shielded proton mag. mom. to Bohr magneton ratio 1.520 993 128 e-3 0.000 000 017 e-3 +shielded proton mag. mom. to nuclear magneton ratio 2.792 775 598 0.000 000 030 +speed of light in vacuum 299 792 458 (exact) m s^-1 +standard acceleration of gravity 9.806 65 (exact) m s^-2 +standard atmosphere 101 325 (exact) Pa +standard-state pressure 100 000 (exact) Pa +Stefan-Boltzmann constant 5.670 373 e-8 0.000 021 e-8 W m^-2 K^-4 +tau Compton wavelength 0.697 787 e-15 0.000 063 e-15 m +tau Compton wavelength over 2 pi 0.111 056 e-15 0.000 010 e-15 m +tau-electron mass ratio 3477.15 0.31 +tau mass 3.167 47 e-27 0.000 29 e-27 kg +tau mass energy equivalent 2.846 78 e-10 0.000 26 e-10 J +tau mass energy equivalent in MeV 1776.82 0.16 MeV +tau mass in u 1.907 49 0.000 17 u +tau molar mass 1.907 49 e-3 0.000 17 e-3 kg mol^-1 +tau-muon mass ratio 16.8167 0.0015 +tau-neutron mass ratio 1.891 11 0.000 17 +tau-proton mass ratio 1.893 72 0.000 17 +Thomson cross section 0.665 245 8734 e-28 0.000 000 0013 e-28 m^2 +triton-electron mass ratio 5496.921 5267 0.000 0050 +triton g factor 5.957 924 896 0.000 000 076 +triton mag. mom. 1.504 609 447 e-26 0.000 000 038 e-26 J T^-1 +triton mag. mom. to Bohr magneton ratio 1.622 393 657 e-3 0.000 000 021 e-3 +triton mag. mom. to nuclear magneton ratio 2.978 962 448 0.000 000 038 +triton mass 5.007 356 30 e-27 0.000 000 22 e-27 kg +triton mass energy equivalent 4.500 387 41 e-10 0.000 000 20 e-10 J +triton mass energy equivalent in MeV 2808.921 005 0.000 062 MeV +triton mass in u 3.015 500 7134 0.000 000 0025 u +triton molar mass 3.015 500 7134 e-3 0.000 000 0025 e-3 kg mol^-1 +triton-proton mass ratio 2.993 717 0308 0.000 000 0025 +unified atomic mass unit 1.660 538 921 e-27 0.000 000 073 e-27 kg +von Klitzing constant 25 812.807 4434 0.000 0084 ohm +weak mixing angle 0.2223 0.0021 +Wien frequency displacement law constant 5.878 9254 e10 0.000 0053 e10 Hz K^-1 +Wien wavelength displacement law constant 2.897 7721 e-3 0.000 0026 e-3 m K""" + +txt2014 = """\ +{220} lattice spacing of silicon 192.015 5714 e-12 0.000 0032 e-12 m +alpha particle-electron mass ratio 7294.299 541 36 0.000 000 24 +alpha particle mass 6.644 657 230 e-27 0.000 000 082 e-27 kg +alpha particle mass energy equivalent 5.971 920 097 e-10 0.000 000 073 e-10 J +alpha particle mass energy equivalent in MeV 3727.379 378 0.000 023 MeV +alpha particle mass in u 4.001 506 179 127 0.000 000 000 063 u +alpha particle molar mass 4.001 506 179 127 e-3 0.000 000 000 063 e-3 kg mol^-1 +alpha particle-proton mass ratio 3.972 599 689 07 0.000 000 000 36 +Angstrom star 1.000 014 95 e-10 0.000 000 90 e-10 m +atomic mass constant 1.660 539 040 e-27 0.000 000 020 e-27 kg +atomic mass constant energy equivalent 1.492 418 062 e-10 0.000 000 018 e-10 J +atomic mass constant energy equivalent in MeV 931.494 0954 0.000 0057 MeV +atomic mass unit-electron volt relationship 931.494 0954 e6 0.000 0057 e6 eV +atomic mass unit-hartree relationship 3.423 177 6902 e7 0.000 000 0016 e7 E_h +atomic mass unit-hertz relationship 2.252 342 7206 e23 0.000 000 0010 e23 Hz +atomic mass unit-inverse meter relationship 7.513 006 6166 e14 0.000 000 0034 e14 m^-1 +atomic mass unit-joule relationship 1.492 418 062 e-10 0.000 000 018 e-10 J +atomic mass unit-kelvin relationship 1.080 954 38 e13 0.000 000 62 e13 K +atomic mass unit-kilogram relationship 1.660 539 040 e-27 0.000 000 020 e-27 kg +atomic unit of 1st hyperpolarizability 3.206 361 329 e-53 0.000 000 020 e-53 C^3 m^3 J^-2 +atomic unit of 2nd hyperpolarizability 6.235 380 085 e-65 0.000 000 077 e-65 C^4 m^4 J^-3 +atomic unit of action 1.054 571 800 e-34 0.000 000 013 e-34 J s +atomic unit of charge 1.602 176 6208 e-19 0.000 000 0098 e-19 C +atomic unit of charge density 1.081 202 3770 e12 0.000 000 0067 e12 C m^-3 +atomic unit of current 6.623 618 183 e-3 0.000 000 041 e-3 A +atomic unit of electric dipole mom. 8.478 353 552 e-30 0.000 000 052 e-30 C m +atomic unit of electric field 5.142 206 707 e11 0.000 000 032 e11 V m^-1 +atomic unit of electric field gradient 9.717 362 356 e21 0.000 000 060 e21 V m^-2 +atomic unit of electric polarizability 1.648 777 2731 e-41 0.000 000 0011 e-41 C^2 m^2 J^-1 +atomic unit of electric potential 27.211 386 02 0.000 000 17 V +atomic unit of electric quadrupole mom. 4.486 551 484 e-40 0.000 000 028 e-40 C m^2 +atomic unit of energy 4.359 744 650 e-18 0.000 000 054 e-18 J +atomic unit of force 8.238 723 36 e-8 0.000 000 10 e-8 N +atomic unit of length 0.529 177 210 67 e-10 0.000 000 000 12 e-10 m +atomic unit of mag. dipole mom. 1.854 801 999 e-23 0.000 000 011 e-23 J T^-1 +atomic unit of mag. flux density 2.350 517 550 e5 0.000 000 014 e5 T +atomic unit of magnetizability 7.891 036 5886 e-29 0.000 000 0090 e-29 J T^-2 +atomic unit of mass 9.109 383 56 e-31 0.000 000 11 e-31 kg +atomic unit of mom.um 1.992 851 882 e-24 0.000 000 024 e-24 kg m s^-1 +atomic unit of permittivity 1.112 650 056... e-10 (exact) F m^-1 +atomic unit of time 2.418 884 326509e-17 0.000 000 000014e-17 s +atomic unit of velocity 2.187 691 262 77 e6 0.000 000 000 50 e6 m s^-1 +Avogadro constant 6.022 140 857 e23 0.000 000 074 e23 mol^-1 +Bohr magneton 927.400 9994 e-26 0.000 0057 e-26 J T^-1 +Bohr magneton in eV/T 5.788 381 8012 e-5 0.000 000 0026 e-5 eV T^-1 +Bohr magneton in Hz/T 13.996 245 042 e9 0.000 000 086 e9 Hz T^-1 +Bohr magneton in inverse meters per tesla 46.686 448 14 0.000 000 29 m^-1 T^-1 +Bohr magneton in K/T 0.671 714 05 0.000 000 39 K T^-1 +Bohr radius 0.529 177 210 67 e-10 0.000 000 000 12 e-10 m +Boltzmann constant 1.380 648 52 e-23 0.000 000 79 e-23 J K^-1 +Boltzmann constant in eV/K 8.617 3303 e-5 0.000 0050 e-5 eV K^-1 +Boltzmann constant in Hz/K 2.083 6612 e10 0.000 0012 e10 Hz K^-1 +Boltzmann constant in inverse meters per kelvin 69.503 457 0.000 040 m^-1 K^-1 +characteristic impedance of vacuum 376.730 313 461... (exact) ohm +classical electron radius 2.817 940 3227 e-15 0.000 000 0019 e-15 m +Compton wavelength 2.426 310 2367 e-12 0.000 000 0011 e-12 m +Compton wavelength over 2 pi 386.159 267 64 e-15 0.000 000 18 e-15 m +conductance quantum 7.748 091 7310 e-5 0.000 000 0018 e-5 S +conventional value of Josephson constant 483 597.9 e9 (exact) Hz V^-1 +conventional value of von Klitzing constant 25 812.807 (exact) ohm +Cu x unit 1.002 076 97 e-13 0.000 000 28 e-13 m +deuteron-electron mag. mom. ratio -4.664 345 535 e-4 0.000 000 026 e-4 +deuteron-electron mass ratio 3670.482 967 85 0.000 000 13 +deuteron g factor 0.857 438 2311 0.000 000 0048 +deuteron mag. mom. 0.433 073 5040 e-26 0.000 000 0036 e-26 J T^-1 +deuteron mag. mom. to Bohr magneton ratio 0.466 975 4554 e-3 0.000 000 0026 e-3 +deuteron mag. mom. to nuclear magneton ratio 0.857 438 2311 0.000 000 0048 +deuteron mass 3.343 583 719 e-27 0.000 000 041 e-27 kg +deuteron mass energy equivalent 3.005 063 183 e-10 0.000 000 037 e-10 J +deuteron mass energy equivalent in MeV 1875.612 928 0.000 012 MeV +deuteron mass in u 2.013 553 212 745 0.000 000 000 040 u +deuteron molar mass 2.013 553 212 745 e-3 0.000 000 000 040 e-3 kg mol^-1 +deuteron-neutron mag. mom. ratio -0.448 206 52 0.000 000 11 +deuteron-proton mag. mom. ratio 0.307 012 2077 0.000 000 0015 +deuteron-proton mass ratio 1.999 007 500 87 0.000 000 000 19 +deuteron rms charge radius 2.1413 e-15 0.0025 e-15 m +electric constant 8.854 187 817... e-12 (exact) F m^-1 +electron charge to mass quotient -1.758 820 024 e11 0.000 000 011 e11 C kg^-1 +electron-deuteron mag. mom. ratio -2143.923 499 0.000 012 +electron-deuteron mass ratio 2.724 437 107 484 e-4 0.000 000 000 096 e-4 +electron g factor -2.002 319 304 361 82 0.000 000 000 000 52 +electron gyromag. ratio 1.760 859 644 e11 0.000 000 011 e11 s^-1 T^-1 +electron gyromag. ratio over 2 pi 28 024.951 64 0.000 17 MHz T^-1 +electron-helion mass ratio 1.819 543 074 854 e-4 0.000 000 000 088 e-4 +electron mag. mom. -928.476 4620 e-26 0.000 0057 e-26 J T^-1 +electron mag. mom. anomaly 1.159 652 180 91 e-3 0.000 000 000 26 e-3 +electron mag. mom. to Bohr magneton ratio -1.001 159 652 180 91 0.000 000 000 000 26 +electron mag. mom. to nuclear magneton ratio -1838.281 972 34 0.000 000 17 +electron mass 9.109 383 56 e-31 0.000 000 11 e-31 kg +electron mass energy equivalent 8.187 105 65 e-14 0.000 000 10 e-14 J +electron mass energy equivalent in MeV 0.510 998 9461 0.000 000 0031 MeV +electron mass in u 5.485 799 090 70 e-4 0.000 000 000 16 e-4 u +electron molar mass 5.485 799 090 70 e-7 0.000 000 000 16 e-7 kg mol^-1 +electron-muon mag. mom. ratio 206.766 9880 0.000 0046 +electron-muon mass ratio 4.836 331 70 e-3 0.000 000 11 e-3 +electron-neutron mag. mom. ratio 960.920 50 0.000 23 +electron-neutron mass ratio 5.438 673 4428 e-4 0.000 000 0027 e-4 +electron-proton mag. mom. ratio -658.210 6866 0.000 0020 +electron-proton mass ratio 5.446 170 213 52 e-4 0.000 000 000 52 e-4 +electron-tau mass ratio 2.875 92 e-4 0.000 26 e-4 +electron to alpha particle mass ratio 1.370 933 554 798 e-4 0.000 000 000 045 e-4 +electron to shielded helion mag. mom. ratio 864.058 257 0.000 010 +electron to shielded proton mag. mom. ratio -658.227 5971 0.000 0072 +electron-triton mass ratio 1.819 200 062 203 e-4 0.000 000 000 084 e-4 +electron volt 1.602 176 6208 e-19 0.000 000 0098 e-19 J +electron volt-atomic mass unit relationship 1.073 544 1105 e-9 0.000 000 0066 e-9 u +electron volt-hartree relationship 3.674 932 248 e-2 0.000 000 023 e-2 E_h +electron volt-hertz relationship 2.417 989 262 e14 0.000 000 015 e14 Hz +electron volt-inverse meter relationship 8.065 544 005 e5 0.000 000 050 e5 m^-1 +electron volt-joule relationship 1.602 176 6208 e-19 0.000 000 0098 e-19 J +electron volt-kelvin relationship 1.160 452 21 e4 0.000 000 67 e4 K +electron volt-kilogram relationship 1.782 661 907 e-36 0.000 000 011 e-36 kg +elementary charge 1.602 176 6208 e-19 0.000 000 0098 e-19 C +elementary charge over h 2.417 989 262 e14 0.000 000 015 e14 A J^-1 +Faraday constant 96 485.332 89 0.000 59 C mol^-1 +Faraday constant for conventional electric current 96 485.3251 0.0012 C_90 mol^-1 +Fermi coupling constant 1.166 3787 e-5 0.000 0006 e-5 GeV^-2 +fine-structure constant 7.297 352 5664 e-3 0.000 000 0017 e-3 +first radiation constant 3.741 771 790 e-16 0.000 000 046 e-16 W m^2 +first radiation constant for spectral radiance 1.191 042 953 e-16 0.000 000 015 e-16 W m^2 sr^-1 +hartree-atomic mass unit relationship 2.921 262 3197 e-8 0.000 000 0013 e-8 u +hartree-electron volt relationship 27.211 386 02 0.000 000 17 eV +Hartree energy 4.359 744 650 e-18 0.000 000 054 e-18 J +Hartree energy in eV 27.211 386 02 0.000 000 17 eV +hartree-hertz relationship 6.579 683 920 711 e15 0.000 000 000 039 e15 Hz +hartree-inverse meter relationship 2.194 746 313 702 e7 0.000 000 000 013 e7 m^-1 +hartree-joule relationship 4.359 744 650 e-18 0.000 000 054 e-18 J +hartree-kelvin relationship 3.157 7513 e5 0.000 0018 e5 K +hartree-kilogram relationship 4.850 870 129 e-35 0.000 000 060 e-35 kg +helion-electron mass ratio 5495.885 279 22 0.000 000 27 +helion g factor -4.255 250 616 0.000 000 050 +helion mag. mom. -1.074 617 522 e-26 0.000 000 014 e-26 J T^-1 +helion mag. mom. to Bohr magneton ratio -1.158 740 958 e-3 0.000 000 014 e-3 +helion mag. mom. to nuclear magneton ratio -2.127 625 308 0.000 000 025 +helion mass 5.006 412 700 e-27 0.000 000 062 e-27 kg +helion mass energy equivalent 4.499 539 341 e-10 0.000 000 055 e-10 J +helion mass energy equivalent in MeV 2808.391 586 0.000 017 MeV +helion mass in u 3.014 932 246 73 0.000 000 000 12 u +helion molar mass 3.014 932 246 73 e-3 0.000 000 000 12 e-3 kg mol^-1 +helion-proton mass ratio 2.993 152 670 46 0.000 000 000 29 +hertz-atomic mass unit relationship 4.439 821 6616 e-24 0.000 000 0020 e-24 u +hertz-electron volt relationship 4.135 667 662 e-15 0.000 000 025 e-15 eV +hertz-hartree relationship 1.5198298460088 e-16 0.0000000000090e-16 E_h +hertz-inverse meter relationship 3.335 640 951... e-9 (exact) m^-1 +hertz-joule relationship 6.626 070 040 e-34 0.000 000 081 e-34 J +hertz-kelvin relationship 4.799 2447 e-11 0.000 0028 e-11 K +hertz-kilogram relationship 7.372 497 201 e-51 0.000 000 091 e-51 kg +inverse fine-structure constant 137.035 999 139 0.000 000 031 +inverse meter-atomic mass unit relationship 1.331 025 049 00 e-15 0.000 000 000 61 e-15 u +inverse meter-electron volt relationship 1.239 841 9739 e-6 0.000 000 0076 e-6 eV +inverse meter-hartree relationship 4.556 335 252 767 e-8 0.000 000 000 027 e-8 E_h +inverse meter-hertz relationship 299 792 458 (exact) Hz +inverse meter-joule relationship 1.986 445 824 e-25 0.000 000 024 e-25 J +inverse meter-kelvin relationship 1.438 777 36 e-2 0.000 000 83 e-2 K +inverse meter-kilogram relationship 2.210 219 057 e-42 0.000 000 027 e-42 kg +inverse of conductance quantum 12 906.403 7278 0.000 0029 ohm +Josephson constant 483 597.8525 e9 0.0030 e9 Hz V^-1 +joule-atomic mass unit relationship 6.700 535 363 e9 0.000 000 082 e9 u +joule-electron volt relationship 6.241 509 126 e18 0.000 000 038 e18 eV +joule-hartree relationship 2.293 712 317 e17 0.000 000 028 e17 E_h +joule-hertz relationship 1.509 190 205 e33 0.000 000 019 e33 Hz +joule-inverse meter relationship 5.034 116 651 e24 0.000 000 062 e24 m^-1 +joule-kelvin relationship 7.242 9731 e22 0.000 0042 e22 K +joule-kilogram relationship 1.112 650 056... e-17 (exact) kg +kelvin-atomic mass unit relationship 9.251 0842 e-14 0.000 0053 e-14 u +kelvin-electron volt relationship 8.617 3303 e-5 0.000 0050 e-5 eV +kelvin-hartree relationship 3.166 8105 e-6 0.000 0018 e-6 E_h +kelvin-hertz relationship 2.083 6612 e10 0.000 0012 e10 Hz +kelvin-inverse meter relationship 69.503 457 0.000 040 m^-1 +kelvin-joule relationship 1.380 648 52 e-23 0.000 000 79 e-23 J +kelvin-kilogram relationship 1.536 178 65 e-40 0.000 000 88 e-40 kg +kilogram-atomic mass unit relationship 6.022 140 857 e26 0.000 000 074 e26 u +kilogram-electron volt relationship 5.609 588 650 e35 0.000 000 034 e35 eV +kilogram-hartree relationship 2.061 485 823 e34 0.000 000 025 e34 E_h +kilogram-hertz relationship 1.356 392 512 e50 0.000 000 017 e50 Hz +kilogram-inverse meter relationship 4.524 438 411 e41 0.000 000 056 e41 m^-1 +kilogram-joule relationship 8.987 551 787... e16 (exact) J +kilogram-kelvin relationship 6.509 6595 e39 0.000 0037 e39 K +lattice parameter of silicon 543.102 0504 e-12 0.000 0089 e-12 m +Loschmidt constant (273.15 K, 100 kPa) 2.651 6467 e25 0.000 0015 e25 m^-3 +Loschmidt constant (273.15 K, 101.325 kPa) 2.686 7811 e25 0.000 0015 e25 m^-3 +mag. constant 12.566 370 614... e-7 (exact) N A^-2 +mag. flux quantum 2.067 833 831 e-15 0.000 000 013 e-15 Wb +molar gas constant 8.314 4598 0.000 0048 J mol^-1 K^-1 +molar mass constant 1 e-3 (exact) kg mol^-1 +molar mass of carbon-12 12 e-3 (exact) kg mol^-1 +molar Planck constant 3.990 312 7110 e-10 0.000 000 0018 e-10 J s mol^-1 +molar Planck constant times c 0.119 626 565 582 0.000 000 000 054 J m mol^-1 +molar volume of ideal gas (273.15 K, 100 kPa) 22.710 947 e-3 0.000 013 e-3 m^3 mol^-1 +molar volume of ideal gas (273.15 K, 101.325 kPa) 22.413 962 e-3 0.000 013 e-3 m^3 mol^-1 +molar volume of silicon 12.058 832 14 e-6 0.000 000 61 e-6 m^3 mol^-1 +Mo x unit 1.002 099 52 e-13 0.000 000 53 e-13 m +muon Compton wavelength 11.734 441 11 e-15 0.000 000 26 e-15 m +muon Compton wavelength over 2 pi 1.867 594 308 e-15 0.000 000 042 e-15 m +muon-electron mass ratio 206.768 2826 0.000 0046 +muon g factor -2.002 331 8418 0.000 000 0013 +muon mag. mom. -4.490 448 26 e-26 0.000 000 10 e-26 J T^-1 +muon mag. mom. anomaly 1.165 920 89 e-3 0.000 000 63 e-3 +muon mag. mom. to Bohr magneton ratio -4.841 970 48 e-3 0.000 000 11 e-3 +muon mag. mom. to nuclear magneton ratio -8.890 597 05 0.000 000 20 +muon mass 1.883 531 594 e-28 0.000 000 048 e-28 kg +muon mass energy equivalent 1.692 833 774 e-11 0.000 000 043 e-11 J +muon mass energy equivalent in MeV 105.658 3745 0.000 0024 MeV +muon mass in u 0.113 428 9257 0.000 000 0025 u +muon molar mass 0.113 428 9257 e-3 0.000 000 0025 e-3 kg mol^-1 +muon-neutron mass ratio 0.112 454 5167 0.000 000 0025 +muon-proton mag. mom. ratio -3.183 345 142 0.000 000 071 +muon-proton mass ratio 0.112 609 5262 0.000 000 0025 +muon-tau mass ratio 5.946 49 e-2 0.000 54 e-2 +natural unit of action 1.054 571 800 e-34 0.000 000 013 e-34 J s +natural unit of action in eV s 6.582 119 514 e-16 0.000 000 040 e-16 eV s +natural unit of energy 8.187 105 65 e-14 0.000 000 10 e-14 J +natural unit of energy in MeV 0.510 998 9461 0.000 000 0031 MeV +natural unit of length 386.159 267 64 e-15 0.000 000 18 e-15 m +natural unit of mass 9.109 383 56 e-31 0.000 000 11 e-31 kg +natural unit of mom.um 2.730 924 488 e-22 0.000 000 034 e-22 kg m s^-1 +natural unit of mom.um in MeV/c 0.510 998 9461 0.000 000 0031 MeV/c +natural unit of time 1.288 088 667 12 e-21 0.000 000 000 58 e-21 s +natural unit of velocity 299 792 458 (exact) m s^-1 +neutron Compton wavelength 1.319 590 904 81 e-15 0.000 000 000 88 e-15 m +neutron Compton wavelength over 2 pi 0.210 019 415 36 e-15 0.000 000 000 14 e-15 m +neutron-electron mag. mom. ratio 1.040 668 82 e-3 0.000 000 25 e-3 +neutron-electron mass ratio 1838.683 661 58 0.000 000 90 +neutron g factor -3.826 085 45 0.000 000 90 +neutron gyromag. ratio 1.832 471 72 e8 0.000 000 43 e8 s^-1 T^-1 +neutron gyromag. ratio over 2 pi 29.164 6933 0.000 0069 MHz T^-1 +neutron mag. mom. -0.966 236 50 e-26 0.000 000 23 e-26 J T^-1 +neutron mag. mom. to Bohr magneton ratio -1.041 875 63 e-3 0.000 000 25 e-3 +neutron mag. mom. to nuclear magneton ratio -1.913 042 73 0.000 000 45 +neutron mass 1.674 927 471 e-27 0.000 000 021 e-27 kg +neutron mass energy equivalent 1.505 349 739 e-10 0.000 000 019 e-10 J +neutron mass energy equivalent in MeV 939.565 4133 0.000 0058 MeV +neutron mass in u 1.008 664 915 88 0.000 000 000 49 u +neutron molar mass 1.008 664 915 88 e-3 0.000 000 000 49 e-3 kg mol^-1 +neutron-muon mass ratio 8.892 484 08 0.000 000 20 +neutron-proton mag. mom. ratio -0.684 979 34 0.000 000 16 +neutron-proton mass difference 2.305 573 77 e-30 0.000 000 85 e-30 +neutron-proton mass difference energy equivalent 2.072 146 37 e-13 0.000 000 76 e-13 +neutron-proton mass difference energy equivalent in MeV 1.293 332 05 0.000 000 48 +neutron-proton mass difference in u 0.001 388 449 00 0.000 000 000 51 +neutron-proton mass ratio 1.001 378 418 98 0.000 000 000 51 +neutron-tau mass ratio 0.528 790 0.000 048 +neutron to shielded proton mag. mom. ratio -0.684 996 94 0.000 000 16 +Newtonian constant of gravitation 6.674 08 e-11 0.000 31 e-11 m^3 kg^-1 s^-2 +Newtonian constant of gravitation over h-bar c 6.708 61 e-39 0.000 31 e-39 (GeV/c^2)^-2 +nuclear magneton 5.050 783 699 e-27 0.000 000 031 e-27 J T^-1 +nuclear magneton in eV/T 3.152 451 2550 e-8 0.000 000 0015 e-8 eV T^-1 +nuclear magneton in inverse meters per tesla 2.542 623 432 e-2 0.000 000 016 e-2 m^-1 T^-1 +nuclear magneton in K/T 3.658 2690 e-4 0.000 0021 e-4 K T^-1 +nuclear magneton in MHz/T 7.622 593 285 0.000 000 047 MHz T^-1 +Planck constant 6.626 070 040 e-34 0.000 000 081 e-34 J s +Planck constant in eV s 4.135 667 662 e-15 0.000 000 025 e-15 eV s +Planck constant over 2 pi 1.054 571 800 e-34 0.000 000 013 e-34 J s +Planck constant over 2 pi in eV s 6.582 119 514 e-16 0.000 000 040 e-16 eV s +Planck constant over 2 pi times c in MeV fm 197.326 9788 0.000 0012 MeV fm +Planck length 1.616 229 e-35 0.000 038 e-35 m +Planck mass 2.176 470 e-8 0.000 051 e-8 kg +Planck mass energy equivalent in GeV 1.220 910 e19 0.000 029 e19 GeV +Planck temperature 1.416 808 e32 0.000 033 e32 K +Planck time 5.391 16 e-44 0.000 13 e-44 s +proton charge to mass quotient 9.578 833 226 e7 0.000 000 059 e7 C kg^-1 +proton Compton wavelength 1.321 409 853 96 e-15 0.000 000 000 61 e-15 m +proton Compton wavelength over 2 pi 0.210 308910109e-15 0.000 000 000097e-15 m +proton-electron mass ratio 1836.152 673 89 0.000 000 17 +proton g factor 5.585 694 702 0.000 000 017 +proton gyromag. ratio 2.675 221 900 e8 0.000 000 018 e8 s^-1 T^-1 +proton gyromag. ratio over 2 pi 42.577 478 92 0.000 000 29 MHz T^-1 +proton mag. mom. 1.410 606 7873 e-26 0.000 000 0097 e-26 J T^-1 +proton mag. mom. to Bohr magneton ratio 1.521 032 2053 e-3 0.000 000 0046 e-3 +proton mag. mom. to nuclear magneton ratio 2.792 847 3508 0.000 000 0085 +proton mag. shielding correction 25.691 e-6 0.011 e-6 +proton mass 1.672 621 898 e-27 0.000 000 021 e-27 kg +proton mass energy equivalent 1.503 277 593 e-10 0.000 000 018 e-10 J +proton mass energy equivalent in MeV 938.272 0813 0.000 0058 MeV +proton mass in u 1.007 276 466 879 0.000 000 000 091 u +proton molar mass 1.007 276 466 879 e-3 0.000 000 000 091 e-3 kg mol^-1 +proton-muon mass ratio 8.880 243 38 0.000 000 20 +proton-neutron mag. mom. ratio -1.459 898 05 0.000 000 34 +proton-neutron mass ratio 0.998 623 478 44 0.000 000 000 51 +proton rms charge radius 0.8751 e-15 0.0061 e-15 m +proton-tau mass ratio 0.528 063 0.000 048 +quantum of circulation 3.636 947 5486 e-4 0.000 000 0017 e-4 m^2 s^-1 +quantum of circulation times 2 7.273 895 0972 e-4 0.000 000 0033 e-4 m^2 s^-1 +Rydberg constant 10 973 731.568 508 0.000 065 m^-1 +Rydberg constant times c in Hz 3.289 841 960 355 e15 0.000 000 000 019 e15 Hz +Rydberg constant times hc in eV 13.605 693 009 0.000 000 084 eV +Rydberg constant times hc in J 2.179 872 325 e-18 0.000 000 027 e-18 J +Sackur-Tetrode constant (1 K, 100 kPa) -1.151 7084 0.000 0014 +Sackur-Tetrode constant (1 K, 101.325 kPa) -1.164 8714 0.000 0014 +second radiation constant 1.438 777 36 e-2 0.000 000 83 e-2 m K +shielded helion gyromag. ratio 2.037 894 585 e8 0.000 000 027 e8 s^-1 T^-1 +shielded helion gyromag. ratio over 2 pi 32.434 099 66 0.000 000 43 MHz T^-1 +shielded helion mag. mom. -1.074 553 080 e-26 0.000 000 014 e-26 J T^-1 +shielded helion mag. mom. to Bohr magneton ratio -1.158 671 471 e-3 0.000 000 014 e-3 +shielded helion mag. mom. to nuclear magneton ratio -2.127 497 720 0.000 000 025 +shielded helion to proton mag. mom. ratio -0.761 766 5603 0.000 000 0092 +shielded helion to shielded proton mag. mom. ratio -0.761 786 1313 0.000 000 0033 +shielded proton gyromag. ratio 2.675 153 171 e8 0.000 000 033 e8 s^-1 T^-1 +shielded proton gyromag. ratio over 2 pi 42.576 385 07 0.000 000 53 MHz T^-1 +shielded proton mag. mom. 1.410 570 547 e-26 0.000 000 018 e-26 J T^-1 +shielded proton mag. mom. to Bohr magneton ratio 1.520 993 128 e-3 0.000 000 017 e-3 +shielded proton mag. mom. to nuclear magneton ratio 2.792 775 600 0.000 000 030 +speed of light in vacuum 299 792 458 (exact) m s^-1 +standard acceleration of gravity 9.806 65 (exact) m s^-2 +standard atmosphere 101 325 (exact) Pa +standard-state pressure 100 000 (exact) Pa +Stefan-Boltzmann constant 5.670 367 e-8 0.000 013 e-8 W m^-2 K^-4 +tau Compton wavelength 0.697 787 e-15 0.000 063 e-15 m +tau Compton wavelength over 2 pi 0.111 056 e-15 0.000 010 e-15 m +tau-electron mass ratio 3477.15 0.31 +tau mass 3.167 47 e-27 0.000 29 e-27 kg +tau mass energy equivalent 2.846 78 e-10 0.000 26 e-10 J +tau mass energy equivalent in MeV 1776.82 0.16 MeV +tau mass in u 1.907 49 0.000 17 u +tau molar mass 1.907 49 e-3 0.000 17 e-3 kg mol^-1 +tau-muon mass ratio 16.8167 0.0015 +tau-neutron mass ratio 1.891 11 0.000 17 +tau-proton mass ratio 1.893 72 0.000 17 +Thomson cross section 0.665 245 871 58 e-28 0.000 000 000 91 e-28 m^2 +triton-electron mass ratio 5496.921 535 88 0.000 000 26 +triton g factor 5.957 924 920 0.000 000 028 +triton mag. mom. 1.504 609 503 e-26 0.000 000 012 e-26 J T^-1 +triton mag. mom. to Bohr magneton ratio 1.622 393 6616 e-3 0.000 000 0076 e-3 +triton mag. mom. to nuclear magneton ratio 2.978 962 460 0.000 000 014 +triton mass 5.007 356 665 e-27 0.000 000 062 e-27 kg +triton mass energy equivalent 4.500 387 735 e-10 0.000 000 055 e-10 J +triton mass energy equivalent in MeV 2808.921 112 0.000 017 MeV +triton mass in u 3.015 500 716 32 0.000 000 000 11 u +triton molar mass 3.015 500 716 32 e-3 0.000 000 000 11 e-3 kg mol^-1 +triton-proton mass ratio 2.993 717 033 48 0.000 000 000 22 +unified atomic mass unit 1.660 539 040 e-27 0.000 000 020 e-27 kg +von Klitzing constant 25 812.807 4555 0.000 0059 ohm +weak mixing angle 0.2223 0.0021 +Wien frequency displacement law constant 5.878 9238 e10 0.000 0034 e10 Hz K^-1 +Wien wavelength displacement law constant 2.897 7729 e-3 0.000 0017 e-3 m K""" + +# ----------------------------------------------------------------------------- + +physical_constants = {} + + +def parse_constants(d): + constants = {} + for line in d.split('\n'): + name = line[:55].rstrip() + val = line[55:77].replace(' ', '').replace('...', '') + val = float(val) + uncert = line[77:99].replace(' ', '').replace('(exact)', '0') + uncert = float(uncert) + units = line[99:].rstrip() + constants[name] = (val, units, uncert) + return constants + + +_physical_constants_2002 = parse_constants(txt2002) +_physical_constants_2006 = parse_constants(txt2006) +_physical_constants_2010 = parse_constants(txt2010) +_physical_constants_2014 = parse_constants(txt2014) + + +physical_constants.update(_physical_constants_2002) +physical_constants.update(_physical_constants_2006) +physical_constants.update(_physical_constants_2010) +physical_constants.update(_physical_constants_2014) +_current_constants = _physical_constants_2014 +_current_codata = "CODATA 2014" + +# check obsolete values +_obsolete_constants = {} +for k in physical_constants: + if k not in _current_constants: + _obsolete_constants[k] = True + +# generate some additional aliases +_aliases = {} +for k in _physical_constants_2002: + if 'magn.' in k: + _aliases[k] = k.replace('magn.', 'mag.') +for k in _physical_constants_2006: + if 'momentum' in k: + _aliases[k] = k.replace('momentum', 'mom.um') + + +class ConstantWarning(DeprecationWarning): + """Accessing a constant no longer in current CODATA data set""" + pass + + +def _check_obsolete(key): + if key in _obsolete_constants and key not in _aliases: + warnings.warn("Constant '%s' is not in current %s data set" % ( + key, _current_codata), ConstantWarning) + + +def value(key): + """ + Value in physical_constants indexed by key + + Parameters + ---------- + key : Python string or unicode + Key in dictionary `physical_constants` + + Returns + ------- + value : float + Value in `physical_constants` corresponding to `key` + + See Also + -------- + codata : Contains the description of `physical_constants`, which, as a + dictionary literal object, does not itself possess a docstring. + + Examples + -------- + >>> from scipy import constants + >>> constants.value(u'elementary charge') + 1.6021766208e-19 + + """ + _check_obsolete(key) + return physical_constants[key][0] + + +def unit(key): + """ + Unit in physical_constants indexed by key + + Parameters + ---------- + key : Python string or unicode + Key in dictionary `physical_constants` + + Returns + ------- + unit : Python string + Unit in `physical_constants` corresponding to `key` + + See Also + -------- + codata : Contains the description of `physical_constants`, which, as a + dictionary literal object, does not itself possess a docstring. + + Examples + -------- + >>> from scipy import constants + >>> constants.unit(u'proton mass') + 'kg' + + """ + _check_obsolete(key) + return physical_constants[key][1] + + +def precision(key): + """ + Relative precision in physical_constants indexed by key + + Parameters + ---------- + key : Python string or unicode + Key in dictionary `physical_constants` + + Returns + ------- + prec : float + Relative precision in `physical_constants` corresponding to `key` + + See Also + -------- + codata : Contains the description of `physical_constants`, which, as a + dictionary literal object, does not itself possess a docstring. + + Examples + -------- + >>> from scipy import constants + >>> constants.precision(u'proton mass') + 1.2555138746605121e-08 + + """ + _check_obsolete(key) + return physical_constants[key][2] / physical_constants[key][0] + + +def find(sub=None, disp=False): + """ + Return list of physical_constant keys containing a given string. + + Parameters + ---------- + sub : str, unicode + Sub-string to search keys for. By default, return all keys. + disp : bool + If True, print the keys that are found, and return None. + Otherwise, return the list of keys without printing anything. + + Returns + ------- + keys : list or None + If `disp` is False, the list of keys is returned. + Otherwise, None is returned. + + See Also + -------- + codata : Contains the description of `physical_constants`, which, as a + dictionary literal object, does not itself possess a docstring. + + Examples + -------- + >>> from scipy.constants import find, physical_constants + + Which keys in the ``physical_constants`` dictionary contain 'boltzmann'? + + >>> find('boltzmann') + ['Boltzmann constant', + 'Boltzmann constant in Hz/K', + 'Boltzmann constant in eV/K', + 'Boltzmann constant in inverse meters per kelvin', + 'Stefan-Boltzmann constant'] + + Get the constant called 'Boltzmann constant in Hz/K': + + >>> physical_constants['Boltzmann constant in Hz/K'] + (20836612000.0, 'Hz K^-1', 12000.0) + + Find constants with 'radius' in the key: + + >>> find('radius') + ['Bohr radius', + 'classical electron radius', + 'deuteron rms charge radius', + 'proton rms charge radius'] + >>> physical_constants['classical electron radius'] + (2.8179403227e-15, 'm', 1.9e-24) + + """ + if sub is None: + result = list(_current_constants.keys()) + else: + result = [key for key in _current_constants + if sub.lower() in key.lower()] + + result.sort() + if disp: + for key in result: + print(key) + return + else: + return result + + +# Table is lacking some digits for exact values: calculate from definition +c = value('speed of light in vacuum') +mu0 = 4e-7 * pi +epsilon0 = 1 / (mu0 * c * c) + +exact_values = { + 'mag. constant': (mu0, 'N A^-2', 0.0), + 'electric constant': (epsilon0, 'F m^-1', 0.0), + 'characteristic impedance of vacuum': (sqrt(mu0 / epsilon0), 'ohm', 0.0), + 'atomic unit of permittivity': (4 * epsilon0 * pi, 'F m^-1', 0.0), + 'joule-kilogram relationship': (1 / (c * c), 'kg', 0.0), + 'kilogram-joule relationship': (c * c, 'J', 0.0), + 'hertz-inverse meter relationship': (1 / c, 'm^-1', 0.0) +} + +# sanity check +for key in exact_values: + val = _current_constants[key][0] + if abs(exact_values[key][0] - val) / val > 1e-9: + raise ValueError("Constants.codata: exact values too far off.") + +physical_constants.update(exact_values) + +# finally, insert aliases for values +for k, v in list(_aliases.items()): + if v in _current_constants: + physical_constants[k] = physical_constants[v] + else: + del _aliases[k] diff --git a/project/venv/lib/python2.7/site-packages/scipy/constants/codata.pyc b/project/venv/lib/python2.7/site-packages/scipy/constants/codata.pyc new file mode 100644 index 0000000..e332a4b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/constants/codata.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/constants/constants.py b/project/venv/lib/python2.7/site-packages/scipy/constants/constants.py new file mode 100644 index 0000000..701db97 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/constants/constants.py @@ -0,0 +1,307 @@ +""" +Collection of physical constants and conversion factors. + +Most constants are in SI units, so you can do +print '10 mile per minute is', 10*mile/minute, 'm/s or', 10*mile/(minute*knot), 'knots' + +The list is not meant to be comprehensive, but just a convenient list for everyday use. +""" +from __future__ import division, print_function, absolute_import + +""" +BasSw 2006 +physical constants: imported from CODATA +unit conversion: see e.g. NIST special publication 811 +Use at own risk: double-check values before calculating your Mars orbit-insertion burn. +Some constants exist in a few variants, which are marked with suffixes. +The ones without any suffix should be the most common one. +""" + +import math as _math +from .codata import value as _cd +import numpy as _np + +# mathematical constants +pi = _math.pi +golden = golden_ratio = (1 + _math.sqrt(5)) / 2 + +# SI prefixes +yotta = 1e24 +zetta = 1e21 +exa = 1e18 +peta = 1e15 +tera = 1e12 +giga = 1e9 +mega = 1e6 +kilo = 1e3 +hecto = 1e2 +deka = 1e1 +deci = 1e-1 +centi = 1e-2 +milli = 1e-3 +micro = 1e-6 +nano = 1e-9 +pico = 1e-12 +femto = 1e-15 +atto = 1e-18 +zepto = 1e-21 + +# binary prefixes +kibi = 2**10 +mebi = 2**20 +gibi = 2**30 +tebi = 2**40 +pebi = 2**50 +exbi = 2**60 +zebi = 2**70 +yobi = 2**80 + +# physical constants +c = speed_of_light = _cd('speed of light in vacuum') +mu_0 = 4e-7*pi +epsilon_0 = 1 / (mu_0*c*c) +h = Planck = _cd('Planck constant') +hbar = h / (2 * pi) +G = gravitational_constant = _cd('Newtonian constant of gravitation') +g = _cd('standard acceleration of gravity') +e = elementary_charge = _cd('elementary charge') +R = gas_constant = _cd('molar gas constant') +alpha = fine_structure = _cd('fine-structure constant') +N_A = Avogadro = _cd('Avogadro constant') +k = Boltzmann = _cd('Boltzmann constant') +sigma = Stefan_Boltzmann = _cd('Stefan-Boltzmann constant') +Wien = _cd('Wien wavelength displacement law constant') +Rydberg = _cd('Rydberg constant') + +# mass in kg +gram = 1e-3 +metric_ton = 1e3 +grain = 64.79891e-6 +lb = pound = 7000 * grain # avoirdupois +blob = slinch = pound * g / 0.0254 # lbf*s**2/in (added in 1.0.0) +slug = blob / 12 # lbf*s**2/foot (added in 1.0.0) +oz = ounce = pound / 16 +stone = 14 * pound +long_ton = 2240 * pound +short_ton = 2000 * pound + +troy_ounce = 480 * grain # only for metals / gems +troy_pound = 12 * troy_ounce +carat = 200e-6 + +m_e = electron_mass = _cd('electron mass') +m_p = proton_mass = _cd('proton mass') +m_n = neutron_mass = _cd('neutron mass') +m_u = u = atomic_mass = _cd('atomic mass constant') + +# angle in rad +degree = pi / 180 +arcmin = arcminute = degree / 60 +arcsec = arcsecond = arcmin / 60 + +# time in second +minute = 60.0 +hour = 60 * minute +day = 24 * hour +week = 7 * day +year = 365 * day +Julian_year = 365.25 * day + +# length in meter +inch = 0.0254 +foot = 12 * inch +yard = 3 * foot +mile = 1760 * yard +mil = inch / 1000 +pt = point = inch / 72 # typography +survey_foot = 1200.0 / 3937 +survey_mile = 5280 * survey_foot +nautical_mile = 1852.0 +fermi = 1e-15 +angstrom = 1e-10 +micron = 1e-6 +au = astronomical_unit = 149597870691.0 +light_year = Julian_year * c +parsec = au / arcsec + +# pressure in pascal +atm = atmosphere = _cd('standard atmosphere') +bar = 1e5 +torr = mmHg = atm / 760 +psi = pound * g / (inch * inch) + +# area in meter**2 +hectare = 1e4 +acre = 43560 * foot**2 + +# volume in meter**3 +litre = liter = 1e-3 +gallon = gallon_US = 231 * inch**3 # US +# pint = gallon_US / 8 +fluid_ounce = fluid_ounce_US = gallon_US / 128 +bbl = barrel = 42 * gallon_US # for oil + +gallon_imp = 4.54609e-3 # UK +fluid_ounce_imp = gallon_imp / 160 + +# speed in meter per second +kmh = 1e3 / hour +mph = mile / hour +mach = speed_of_sound = 340.5 # approx value at 15 degrees in 1 atm. is this a common value? +knot = nautical_mile / hour + +# temperature in kelvin +zero_Celsius = 273.15 +degree_Fahrenheit = 1/1.8 # only for differences + +# energy in joule +eV = electron_volt = elementary_charge # * 1 Volt +calorie = calorie_th = 4.184 +calorie_IT = 4.1868 +erg = 1e-7 +Btu_th = pound * degree_Fahrenheit * calorie_th / gram +Btu = Btu_IT = pound * degree_Fahrenheit * calorie_IT / gram +ton_TNT = 1e9 * calorie_th +# Wh = watt_hour + +# power in watt +hp = horsepower = 550 * foot * pound * g + +# force in newton +dyn = dyne = 1e-5 +lbf = pound_force = pound * g +kgf = kilogram_force = g # * 1 kg + +# functions for conversions that are not linear + + +def convert_temperature(val, old_scale, new_scale): + """ + Convert from a temperature scale to another one among Celsius, Kelvin, + Fahrenheit and Rankine scales. + + Parameters + ---------- + val : array_like + Value(s) of the temperature(s) to be converted expressed in the + original scale. + + old_scale: str + Specifies as a string the original scale from which the temperature + value(s) will be converted. Supported scales are Celsius ('Celsius', + 'celsius', 'C' or 'c'), Kelvin ('Kelvin', 'kelvin', 'K', 'k'), + Fahrenheit ('Fahrenheit', 'fahrenheit', 'F' or 'f') and Rankine + ('Rankine', 'rankine', 'R', 'r'). + + new_scale: str + Specifies as a string the new scale to which the temperature + value(s) will be converted. Supported scales are Celsius ('Celsius', + 'celsius', 'C' or 'c'), Kelvin ('Kelvin', 'kelvin', 'K', 'k'), + Fahrenheit ('Fahrenheit', 'fahrenheit', 'F' or 'f') and Rankine + ('Rankine', 'rankine', 'R', 'r'). + + Returns + ------- + res : float or array of floats + Value(s) of the converted temperature(s) expressed in the new scale. + + Notes + ----- + .. versionadded:: 0.18.0 + + Examples + -------- + >>> from scipy.constants import convert_temperature + >>> convert_temperature(np.array([-40, 40.0]), 'Celsius', 'Kelvin') + array([ 233.15, 313.15]) + + """ + # Convert from `old_scale` to Kelvin + if old_scale.lower() in ['celsius', 'c']: + tempo = _np.asanyarray(val) + zero_Celsius + elif old_scale.lower() in ['kelvin', 'k']: + tempo = _np.asanyarray(val) + elif old_scale.lower() in ['fahrenheit', 'f']: + tempo = (_np.asanyarray(val) - 32.) * 5. / 9. + zero_Celsius + elif old_scale.lower() in ['rankine', 'r']: + tempo = _np.asanyarray(val) * 5. / 9. + else: + raise NotImplementedError("%s scale is unsupported: supported scales " + "are Celsius, Kelvin, Fahrenheit and " + "Rankine" % old_scale) + # and from Kelvin to `new_scale`. + if new_scale.lower() in ['celsius', 'c']: + res = tempo - zero_Celsius + elif new_scale.lower() in ['kelvin', 'k']: + res = tempo + elif new_scale.lower() in ['fahrenheit', 'f']: + res = (tempo - zero_Celsius) * 9. / 5. + 32. + elif new_scale.lower() in ['rankine', 'r']: + res = tempo * 9. / 5. + else: + raise NotImplementedError("'%s' scale is unsupported: supported " + "scales are 'Celsius', 'Kelvin', " + "'Fahrenheit' and 'Rankine'" % new_scale) + + return res + + +# optics + + +def lambda2nu(lambda_): + """ + Convert wavelength to optical frequency + + Parameters + ---------- + lambda_ : array_like + Wavelength(s) to be converted. + + Returns + ------- + nu : float or array of floats + Equivalent optical frequency. + + Notes + ----- + Computes ``nu = c / lambda`` where c = 299792458.0, i.e., the + (vacuum) speed of light in meters/second. + + Examples + -------- + >>> from scipy.constants import lambda2nu, speed_of_light + >>> lambda2nu(np.array((1, speed_of_light))) + array([ 2.99792458e+08, 1.00000000e+00]) + + """ + return _np.asanyarray(c) / lambda_ + + +def nu2lambda(nu): + """ + Convert optical frequency to wavelength. + + Parameters + ---------- + nu : array_like + Optical frequency to be converted. + + Returns + ------- + lambda : float or array of floats + Equivalent wavelength(s). + + Notes + ----- + Computes ``lambda = c / nu`` where c = 299792458.0, i.e., the + (vacuum) speed of light in meters/second. + + Examples + -------- + >>> from scipy.constants import nu2lambda, speed_of_light + >>> nu2lambda(np.array((1, speed_of_light))) + array([ 2.99792458e+08, 1.00000000e+00]) + + """ + return c / _np.asanyarray(nu) diff --git a/project/venv/lib/python2.7/site-packages/scipy/constants/constants.pyc b/project/venv/lib/python2.7/site-packages/scipy/constants/constants.pyc new file mode 100644 index 0000000..b737ffe Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/constants/constants.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/constants/setup.py b/project/venv/lib/python2.7/site-packages/scipy/constants/setup.py new file mode 100644 index 0000000..adc42a8 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/constants/setup.py @@ -0,0 +1,13 @@ +from __future__ import division, print_function, absolute_import + + +def configuration(parent_package='', top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('constants', parent_package, top_path) + config.add_data_dir('tests') + return config + + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(**configuration(top_path='').todict()) diff --git a/project/venv/lib/python2.7/site-packages/scipy/constants/setup.pyc b/project/venv/lib/python2.7/site-packages/scipy/constants/setup.pyc new file mode 100644 index 0000000..59571ba Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/constants/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/constants/tests/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/constants/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/scipy/constants/tests/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/constants/tests/__init__.pyc new file mode 100644 index 0000000..aa8e977 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/constants/tests/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/constants/tests/test_codata.py b/project/venv/lib/python2.7/site-packages/scipy/constants/tests/test_codata.py new file mode 100644 index 0000000..42f0e49 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/constants/tests/test_codata.py @@ -0,0 +1,57 @@ +from __future__ import division, print_function, absolute_import + +from scipy.constants import constants, codata, find, value +from numpy.testing import (assert_equal, assert_, + assert_almost_equal) + + +def test_find(): + keys = find('weak mixing', disp=False) + assert_equal(keys, ['weak mixing angle']) + + keys = find('qwertyuiop', disp=False) + assert_equal(keys, []) + + keys = find('natural unit', disp=False) + assert_equal(keys, sorted(['natural unit of velocity', + 'natural unit of action', + 'natural unit of action in eV s', + 'natural unit of mass', + 'natural unit of energy', + 'natural unit of energy in MeV', + 'natural unit of mom.um', + 'natural unit of mom.um in MeV/c', + 'natural unit of length', + 'natural unit of time'])) + + +def test_basic_table_parse(): + c = 'speed of light in vacuum' + assert_equal(codata.value(c), constants.c) + assert_equal(codata.value(c), constants.speed_of_light) + + +def test_basic_lookup(): + assert_equal('%d %s' % (codata.c, codata.unit('speed of light in vacuum')), + '299792458 m s^-1') + + +def test_find_all(): + assert_(len(codata.find(disp=False)) > 300) + + +def test_find_single(): + assert_equal(codata.find('Wien freq', disp=False)[0], + 'Wien frequency displacement law constant') + + +def test_2002_vs_2006(): + assert_almost_equal(codata.value('magn. flux quantum'), + codata.value('mag. flux quantum')) + + +def test_exact_values(): + # Check that updating stored values with exact ones worked. + for key in codata.exact_values: + assert_((codata.exact_values[key][0] - value(key)) / value(key) == 0) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/constants/tests/test_codata.pyc b/project/venv/lib/python2.7/site-packages/scipy/constants/tests/test_codata.pyc new file mode 100644 index 0000000..c2d37f2 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/constants/tests/test_codata.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/constants/tests/test_constants.py b/project/venv/lib/python2.7/site-packages/scipy/constants/tests/test_constants.py new file mode 100644 index 0000000..2076698 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/constants/tests/test_constants.py @@ -0,0 +1,37 @@ +from __future__ import division, print_function, absolute_import + +from numpy.testing import assert_equal, assert_allclose +import scipy.constants as sc + + +def test_convert_temperature(): + assert_equal(sc.convert_temperature(32, 'f', 'Celsius'), 0) + assert_equal(sc.convert_temperature([0, 0], 'celsius', 'Kelvin'), + [273.15, 273.15]) + assert_equal(sc.convert_temperature([0, 0], 'kelvin', 'c'), + [-273.15, -273.15]) + assert_equal(sc.convert_temperature([32, 32], 'f', 'k'), [273.15, 273.15]) + assert_equal(sc.convert_temperature([273.15, 273.15], 'kelvin', 'F'), + [32, 32]) + assert_equal(sc.convert_temperature([0, 0], 'C', 'fahrenheit'), [32, 32]) + assert_allclose(sc.convert_temperature([0, 0], 'c', 'r'), [491.67, 491.67], + rtol=0., atol=1e-13) + assert_allclose(sc.convert_temperature([491.67, 491.67], 'Rankine', 'C'), + [0., 0.], rtol=0., atol=1e-13) + assert_allclose(sc.convert_temperature([491.67, 491.67], 'r', 'F'), + [32., 32.], rtol=0., atol=1e-13) + assert_allclose(sc.convert_temperature([32, 32], 'fahrenheit', 'R'), + [491.67, 491.67], rtol=0., atol=1e-13) + assert_allclose(sc.convert_temperature([273.15, 273.15], 'K', 'R'), + [491.67, 491.67], rtol=0., atol=1e-13) + assert_allclose(sc.convert_temperature([491.67, 0.], 'rankine', 'kelvin'), + [273.15, 0.], rtol=0., atol=1e-13) + + +def test_lambda_to_nu(): + assert_equal(sc.lambda2nu(sc.speed_of_light), 1) + + +def test_nu_to_lambda(): + assert_equal(sc.nu2lambda(1), sc.speed_of_light) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/constants/tests/test_constants.pyc b/project/venv/lib/python2.7/site-packages/scipy/constants/tests/test_constants.pyc new file mode 100644 index 0000000..9ddd8c6 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/constants/tests/test_constants.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/fftpack/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/fftpack/__init__.py new file mode 100644 index 0000000..6aaa278 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/fftpack/__init__.py @@ -0,0 +1,114 @@ +""" +================================================== +Discrete Fourier transforms (:mod:`scipy.fftpack`) +================================================== + +Fast Fourier Transforms (FFTs) +============================== + +.. autosummary:: + :toctree: generated/ + + fft - Fast (discrete) Fourier Transform (FFT) + ifft - Inverse FFT + fft2 - Two dimensional FFT + ifft2 - Two dimensional inverse FFT + fftn - n-dimensional FFT + ifftn - n-dimensional inverse FFT + rfft - FFT of strictly real-valued sequence + irfft - Inverse of rfft + dct - Discrete cosine transform + idct - Inverse discrete cosine transform + dctn - n-dimensional Discrete cosine transform + idctn - n-dimensional Inverse discrete cosine transform + dst - Discrete sine transform + idst - Inverse discrete sine transform + dstn - n-dimensional Discrete sine transform + idstn - n-dimensional Inverse discrete sine transform + +Differential and pseudo-differential operators +============================================== + +.. autosummary:: + :toctree: generated/ + + diff - Differentiation and integration of periodic sequences + tilbert - Tilbert transform: cs_diff(x,h,h) + itilbert - Inverse Tilbert transform: sc_diff(x,h,h) + hilbert - Hilbert transform: cs_diff(x,inf,inf) + ihilbert - Inverse Hilbert transform: sc_diff(x,inf,inf) + cs_diff - cosh/sinh pseudo-derivative of periodic sequences + sc_diff - sinh/cosh pseudo-derivative of periodic sequences + ss_diff - sinh/sinh pseudo-derivative of periodic sequences + cc_diff - cosh/cosh pseudo-derivative of periodic sequences + shift - Shift periodic sequences + +Helper functions +================ + +.. autosummary:: + :toctree: generated/ + + fftshift - Shift the zero-frequency component to the center of the spectrum + ifftshift - The inverse of `fftshift` + fftfreq - Return the Discrete Fourier Transform sample frequencies + rfftfreq - DFT sample frequencies (for usage with rfft, irfft) + next_fast_len - Find the optimal length to zero-pad an FFT for speed + +Note that ``fftshift``, ``ifftshift`` and ``fftfreq`` are numpy functions +exposed by ``fftpack``; importing them from ``numpy`` should be preferred. + +Convolutions (:mod:`scipy.fftpack.convolve`) +============================================ + +.. module:: scipy.fftpack.convolve + +.. autosummary:: + :toctree: generated/ + + convolve + convolve_z + init_convolution_kernel + destroy_convolve_cache + +""" + +# List of possibly useful functions in scipy.fftpack._fftpack: +# drfft +# zfft +# zrfft +# zfftnd +# destroy_drfft_cache +# destroy_zfft_cache +# destroy_zfftnd_cache + +from __future__ import division, print_function, absolute_import + + +__all__ = ['fft','ifft','fftn','ifftn','rfft','irfft', + 'fft2','ifft2', + 'diff', + 'tilbert','itilbert','hilbert','ihilbert', + 'sc_diff','cs_diff','cc_diff','ss_diff', + 'shift', + 'fftfreq', 'rfftfreq', + 'fftshift', 'ifftshift', + 'next_fast_len', + ] + +from .basic import * +from .pseudo_diffs import * +from .helper import * + +from numpy.dual import register_func +for k in ['fft', 'ifft', 'fftn', 'ifftn', 'fft2', 'ifft2']: + register_func(k, eval(k)) +del k, register_func + +from .realtransforms import * +__all__.extend(['dct', 'idct', 'dst', 'idst', 'dctn', 'idctn', 'dstn', + 'idstn']) + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/project/venv/lib/python2.7/site-packages/scipy/fftpack/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/fftpack/__init__.pyc new file mode 100644 index 0000000..f6c1004 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/fftpack/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/fftpack/_fftpack.so b/project/venv/lib/python2.7/site-packages/scipy/fftpack/_fftpack.so new file mode 100755 index 0000000..71ce283 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/fftpack/_fftpack.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/fftpack/basic.py b/project/venv/lib/python2.7/site-packages/scipy/fftpack/basic.py new file mode 100644 index 0000000..c61e1d0 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/fftpack/basic.py @@ -0,0 +1,702 @@ +""" +Discrete Fourier Transforms - basic.py +""" +# Created by Pearu Peterson, August,September 2002 +from __future__ import division, print_function, absolute_import + +__all__ = ['fft','ifft','fftn','ifftn','rfft','irfft', + 'fft2','ifft2'] + +from numpy import swapaxes, zeros +import numpy +from . import _fftpack +from scipy.fftpack.helper import _init_nd_shape_and_axes_sorted + +import atexit +atexit.register(_fftpack.destroy_zfft_cache) +atexit.register(_fftpack.destroy_zfftnd_cache) +atexit.register(_fftpack.destroy_drfft_cache) +atexit.register(_fftpack.destroy_cfft_cache) +atexit.register(_fftpack.destroy_cfftnd_cache) +atexit.register(_fftpack.destroy_rfft_cache) +del atexit + + +def istype(arr, typeclass): + return issubclass(arr.dtype.type, typeclass) + + +def _datacopied(arr, original): + """ + Strict check for `arr` not sharing any data with `original`, + under the assumption that arr = asarray(original) + + """ + if arr is original: + return False + if not isinstance(original, numpy.ndarray) and hasattr(original, '__array__'): + return False + return arr.base is None + +# XXX: single precision FFTs partially disabled due to accuracy issues +# for large prime-sized inputs. +# +# See http://permalink.gmane.org/gmane.comp.python.scientific.devel/13834 +# ("fftpack test failures for 0.8.0b1", Ralf Gommers, 17 Jun 2010, +# @ scipy-dev) +# +# These should be re-enabled once the problems are resolved + + +def _is_safe_size(n): + """ + Is the size of FFT such that FFTPACK can handle it in single precision + with sufficient accuracy? + + Composite numbers of 2, 3, and 5 are accepted, as FFTPACK has those + """ + n = int(n) + + if n == 0: + return True + + # Divide by 3 until you can't, then by 5 until you can't + for c in (3, 5): + while n % c == 0: + n //= c + + # Return True if the remainder is a power of 2 + return not n & (n-1) + + +def _fake_crfft(x, n, *a, **kw): + if _is_safe_size(n): + return _fftpack.crfft(x, n, *a, **kw) + else: + return _fftpack.zrfft(x, n, *a, **kw).astype(numpy.complex64) + + +def _fake_cfft(x, n, *a, **kw): + if _is_safe_size(n): + return _fftpack.cfft(x, n, *a, **kw) + else: + return _fftpack.zfft(x, n, *a, **kw).astype(numpy.complex64) + + +def _fake_rfft(x, n, *a, **kw): + if _is_safe_size(n): + return _fftpack.rfft(x, n, *a, **kw) + else: + return _fftpack.drfft(x, n, *a, **kw).astype(numpy.float32) + + +def _fake_cfftnd(x, shape, *a, **kw): + if numpy.all(list(map(_is_safe_size, shape))): + return _fftpack.cfftnd(x, shape, *a, **kw) + else: + return _fftpack.zfftnd(x, shape, *a, **kw).astype(numpy.complex64) + + +_DTYPE_TO_FFT = { +# numpy.dtype(numpy.float32): _fftpack.crfft, + numpy.dtype(numpy.float32): _fake_crfft, + numpy.dtype(numpy.float64): _fftpack.zrfft, +# numpy.dtype(numpy.complex64): _fftpack.cfft, + numpy.dtype(numpy.complex64): _fake_cfft, + numpy.dtype(numpy.complex128): _fftpack.zfft, +} + +_DTYPE_TO_RFFT = { +# numpy.dtype(numpy.float32): _fftpack.rfft, + numpy.dtype(numpy.float32): _fake_rfft, + numpy.dtype(numpy.float64): _fftpack.drfft, +} + +_DTYPE_TO_FFTN = { +# numpy.dtype(numpy.complex64): _fftpack.cfftnd, + numpy.dtype(numpy.complex64): _fake_cfftnd, + numpy.dtype(numpy.complex128): _fftpack.zfftnd, +# numpy.dtype(numpy.float32): _fftpack.cfftnd, + numpy.dtype(numpy.float32): _fake_cfftnd, + numpy.dtype(numpy.float64): _fftpack.zfftnd, +} + + +def _asfarray(x): + """Like numpy asfarray, except that it does not modify x dtype if x is + already an array with a float dtype, and do not cast complex types to + real.""" + if hasattr(x, "dtype") and x.dtype.char in numpy.typecodes["AllFloat"]: + # 'dtype' attribute does not ensure that the + # object is an ndarray (e.g. Series class + # from the pandas library) + if x.dtype == numpy.half: + # no half-precision routines, so convert to single precision + return numpy.asarray(x, dtype=numpy.float32) + return numpy.asarray(x, dtype=x.dtype) + else: + # We cannot use asfarray directly because it converts sequences of + # complex to sequence of real + ret = numpy.asarray(x) + if ret.dtype == numpy.half: + return numpy.asarray(ret, dtype=numpy.float32) + elif ret.dtype.char not in numpy.typecodes["AllFloat"]: + return numpy.asfarray(x) + return ret + + +def _fix_shape(x, n, axis): + """ Internal auxiliary function for _raw_fft, _raw_fftnd.""" + s = list(x.shape) + if s[axis] > n: + index = [slice(None)]*len(s) + index[axis] = slice(0,n) + x = x[tuple(index)] + return x, False + else: + index = [slice(None)]*len(s) + index[axis] = slice(0,s[axis]) + s[axis] = n + z = zeros(s,x.dtype.char) + z[tuple(index)] = x + return z, True + + +def _raw_fft(x, n, axis, direction, overwrite_x, work_function): + """ Internal auxiliary function for fft, ifft, rfft, irfft.""" + if n is None: + n = x.shape[axis] + elif n != x.shape[axis]: + x, copy_made = _fix_shape(x,n,axis) + overwrite_x = overwrite_x or copy_made + + if n < 1: + raise ValueError("Invalid number of FFT data points " + "(%d) specified." % n) + + if axis == -1 or axis == len(x.shape)-1: + r = work_function(x,n,direction,overwrite_x=overwrite_x) + else: + x = swapaxes(x, axis, -1) + r = work_function(x,n,direction,overwrite_x=overwrite_x) + r = swapaxes(r, axis, -1) + return r + + +def fft(x, n=None, axis=-1, overwrite_x=False): + """ + Return discrete Fourier transform of real or complex sequence. + + The returned complex array contains ``y(0), y(1),..., y(n-1)`` where + + ``y(j) = (x * exp(-2*pi*sqrt(-1)*j*np.arange(n)/n)).sum()``. + + Parameters + ---------- + x : array_like + Array to Fourier transform. + n : int, optional + Length of the Fourier transform. If ``n < x.shape[axis]``, `x` is + truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The + default results in ``n = x.shape[axis]``. + axis : int, optional + Axis along which the fft's are computed; the default is over the + last axis (i.e., ``axis=-1``). + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + + Returns + ------- + z : complex ndarray + with the elements:: + + [y(0),y(1),..,y(n/2),y(1-n/2),...,y(-1)] if n is even + [y(0),y(1),..,y((n-1)/2),y(-(n-1)/2),...,y(-1)] if n is odd + + where:: + + y(j) = sum[k=0..n-1] x[k] * exp(-sqrt(-1)*j*k* 2*pi/n), j = 0..n-1 + + See Also + -------- + ifft : Inverse FFT + rfft : FFT of a real sequence + + Notes + ----- + The packing of the result is "standard": If ``A = fft(a, n)``, then + ``A[0]`` contains the zero-frequency term, ``A[1:n/2]`` contains the + positive-frequency terms, and ``A[n/2:]`` contains the negative-frequency + terms, in order of decreasingly negative frequency. So for an 8-point + transform, the frequencies of the result are [0, 1, 2, 3, -4, -3, -2, -1]. + To rearrange the fft output so that the zero-frequency component is + centered, like [-4, -3, -2, -1, 0, 1, 2, 3], use `fftshift`. + + Both single and double precision routines are implemented. Half precision + inputs will be converted to single precision. Non floating-point inputs + will be converted to double precision. Long-double precision inputs are + not supported. + + This function is most efficient when `n` is a power of two, and least + efficient when `n` is prime. + + Note that if ``x`` is real-valued then ``A[j] == A[n-j].conjugate()``. + If ``x`` is real-valued and ``n`` is even then ``A[n/2]`` is real. + + If the data type of `x` is real, a "real FFT" algorithm is automatically + used, which roughly halves the computation time. To increase efficiency + a little further, use `rfft`, which does the same calculation, but only + outputs half of the symmetrical spectrum. If the data is both real and + symmetrical, the `dct` can again double the efficiency, by generating + half of the spectrum from half of the signal. + + Examples + -------- + >>> from scipy.fftpack import fft, ifft + >>> x = np.arange(5) + >>> np.allclose(fft(ifft(x)), x, atol=1e-15) # within numerical accuracy. + True + + """ + tmp = _asfarray(x) + + try: + work_function = _DTYPE_TO_FFT[tmp.dtype] + except KeyError: + raise ValueError("type %s is not supported" % tmp.dtype) + + if not (istype(tmp, numpy.complex64) or istype(tmp, numpy.complex128)): + overwrite_x = 1 + + overwrite_x = overwrite_x or _datacopied(tmp, x) + + if n is None: + n = tmp.shape[axis] + elif n != tmp.shape[axis]: + tmp, copy_made = _fix_shape(tmp,n,axis) + overwrite_x = overwrite_x or copy_made + + if n < 1: + raise ValueError("Invalid number of FFT data points " + "(%d) specified." % n) + + if axis == -1 or axis == len(tmp.shape) - 1: + return work_function(tmp,n,1,0,overwrite_x) + + tmp = swapaxes(tmp, axis, -1) + tmp = work_function(tmp,n,1,0,overwrite_x) + return swapaxes(tmp, axis, -1) + + +def ifft(x, n=None, axis=-1, overwrite_x=False): + """ + Return discrete inverse Fourier transform of real or complex sequence. + + The returned complex array contains ``y(0), y(1),..., y(n-1)`` where + + ``y(j) = (x * exp(2*pi*sqrt(-1)*j*np.arange(n)/n)).mean()``. + + Parameters + ---------- + x : array_like + Transformed data to invert. + n : int, optional + Length of the inverse Fourier transform. If ``n < x.shape[axis]``, + `x` is truncated. If ``n > x.shape[axis]``, `x` is zero-padded. + The default results in ``n = x.shape[axis]``. + axis : int, optional + Axis along which the ifft's are computed; the default is over the + last axis (i.e., ``axis=-1``). + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + + Returns + ------- + ifft : ndarray of floats + The inverse discrete Fourier transform. + + See Also + -------- + fft : Forward FFT + + Notes + ----- + Both single and double precision routines are implemented. Half precision + inputs will be converted to single precision. Non floating-point inputs + will be converted to double precision. Long-double precision inputs are + not supported. + + This function is most efficient when `n` is a power of two, and least + efficient when `n` is prime. + + If the data type of `x` is real, a "real IFFT" algorithm is automatically + used, which roughly halves the computation time. + + Examples + -------- + >>> from scipy.fftpack import fft, ifft + >>> import numpy as np + >>> x = np.arange(5) + >>> np.allclose(ifft(fft(x)), x, atol=1e-15) # within numerical accuracy. + True + + """ + tmp = _asfarray(x) + + try: + work_function = _DTYPE_TO_FFT[tmp.dtype] + except KeyError: + raise ValueError("type %s is not supported" % tmp.dtype) + + if not (istype(tmp, numpy.complex64) or istype(tmp, numpy.complex128)): + overwrite_x = 1 + + overwrite_x = overwrite_x or _datacopied(tmp, x) + + if n is None: + n = tmp.shape[axis] + elif n != tmp.shape[axis]: + tmp, copy_made = _fix_shape(tmp,n,axis) + overwrite_x = overwrite_x or copy_made + + if n < 1: + raise ValueError("Invalid number of FFT data points " + "(%d) specified." % n) + + if axis == -1 or axis == len(tmp.shape) - 1: + return work_function(tmp,n,-1,1,overwrite_x) + + tmp = swapaxes(tmp, axis, -1) + tmp = work_function(tmp,n,-1,1,overwrite_x) + return swapaxes(tmp, axis, -1) + + +def rfft(x, n=None, axis=-1, overwrite_x=False): + """ + Discrete Fourier transform of a real sequence. + + Parameters + ---------- + x : array_like, real-valued + The data to transform. + n : int, optional + Defines the length of the Fourier transform. If `n` is not specified + (the default) then ``n = x.shape[axis]``. If ``n < x.shape[axis]``, + `x` is truncated, if ``n > x.shape[axis]``, `x` is zero-padded. + axis : int, optional + The axis along which the transform is applied. The default is the + last axis. + overwrite_x : bool, optional + If set to true, the contents of `x` can be overwritten. Default is + False. + + Returns + ------- + z : real ndarray + The returned real array contains:: + + [y(0),Re(y(1)),Im(y(1)),...,Re(y(n/2))] if n is even + [y(0),Re(y(1)),Im(y(1)),...,Re(y(n/2)),Im(y(n/2))] if n is odd + + where:: + + y(j) = sum[k=0..n-1] x[k] * exp(-sqrt(-1)*j*k*2*pi/n) + j = 0..n-1 + + See Also + -------- + fft, irfft, numpy.fft.rfft + + Notes + ----- + Within numerical accuracy, ``y == rfft(irfft(y))``. + + Both single and double precision routines are implemented. Half precision + inputs will be converted to single precision. Non floating-point inputs + will be converted to double precision. Long-double precision inputs are + not supported. + + To get an output with a complex datatype, consider using the related + function `numpy.fft.rfft`. + + Examples + -------- + >>> from scipy.fftpack import fft, rfft + >>> a = [9, -9, 1, 3] + >>> fft(a) + array([ 4. +0.j, 8.+12.j, 16. +0.j, 8.-12.j]) + >>> rfft(a) + array([ 4., 8., 12., 16.]) + + """ + tmp = _asfarray(x) + + if not numpy.isrealobj(tmp): + raise TypeError("1st argument must be real sequence") + + try: + work_function = _DTYPE_TO_RFFT[tmp.dtype] + except KeyError: + raise ValueError("type %s is not supported" % tmp.dtype) + + overwrite_x = overwrite_x or _datacopied(tmp, x) + + return _raw_fft(tmp,n,axis,1,overwrite_x,work_function) + + +def irfft(x, n=None, axis=-1, overwrite_x=False): + """ + Return inverse discrete Fourier transform of real sequence x. + + The contents of `x` are interpreted as the output of the `rfft` + function. + + Parameters + ---------- + x : array_like + Transformed data to invert. + n : int, optional + Length of the inverse Fourier transform. + If n < x.shape[axis], x is truncated. + If n > x.shape[axis], x is zero-padded. + The default results in n = x.shape[axis]. + axis : int, optional + Axis along which the ifft's are computed; the default is over + the last axis (i.e., axis=-1). + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + + Returns + ------- + irfft : ndarray of floats + The inverse discrete Fourier transform. + + See Also + -------- + rfft, ifft, numpy.fft.irfft + + Notes + ----- + The returned real array contains:: + + [y(0),y(1),...,y(n-1)] + + where for n is even:: + + y(j) = 1/n (sum[k=1..n/2-1] (x[2*k-1]+sqrt(-1)*x[2*k]) + * exp(sqrt(-1)*j*k* 2*pi/n) + + c.c. + x[0] + (-1)**(j) x[n-1]) + + and for n is odd:: + + y(j) = 1/n (sum[k=1..(n-1)/2] (x[2*k-1]+sqrt(-1)*x[2*k]) + * exp(sqrt(-1)*j*k* 2*pi/n) + + c.c. + x[0]) + + c.c. denotes complex conjugate of preceding expression. + + For details on input parameters, see `rfft`. + + To process (conjugate-symmetric) frequency-domain data with a complex + datatype, consider using the related function `numpy.fft.irfft`. + + Examples + -------- + >>> from scipy.fftpack import rfft, irfft + >>> a = [1.0, 2.0, 3.0, 4.0, 5.0] + >>> irfft(a) + array([ 2.6 , -3.16405192, 1.24398433, -1.14955713, 1.46962473]) + >>> irfft(rfft(a)) + array([1., 2., 3., 4., 5.]) + + """ + tmp = _asfarray(x) + if not numpy.isrealobj(tmp): + raise TypeError("1st argument must be real sequence") + + try: + work_function = _DTYPE_TO_RFFT[tmp.dtype] + except KeyError: + raise ValueError("type %s is not supported" % tmp.dtype) + + overwrite_x = overwrite_x or _datacopied(tmp, x) + + return _raw_fft(tmp,n,axis,-1,overwrite_x,work_function) + + +def _raw_fftnd(x, s, axes, direction, overwrite_x, work_function): + """Internal auxiliary function for fftnd, ifftnd.""" + noaxes = axes is None + s, axes = _init_nd_shape_and_axes_sorted(x, s, axes) + + # No need to swap axes, array is in C order + if noaxes: + for ax in axes: + x, copy_made = _fix_shape(x, s[ax], ax) + overwrite_x = overwrite_x or copy_made + return work_function(x, s, direction, overwrite_x=overwrite_x) + + # Swap the request axes, last first (i.e. First swap the axis which ends up + # at -1, then at -2, etc...), such as the request axes on which the + # operation is carried become the last ones + for i in range(1, axes.size+1): + x = numpy.swapaxes(x, axes[-i], -i) + + # We can now operate on the axes waxes, the p last axes (p = len(axes)), by + # fixing the shape of the input array to 1 for any axis the fft is not + # carried upon. + waxes = list(range(x.ndim - axes.size, x.ndim)) + shape = numpy.ones(x.ndim) + shape[waxes] = s + + for i in range(len(waxes)): + x, copy_made = _fix_shape(x, s[i], waxes[i]) + overwrite_x = overwrite_x or copy_made + + r = work_function(x, shape, direction, overwrite_x=overwrite_x) + + # reswap in the reverse order (first axis first, etc...) to get original + # order + for i in range(len(axes), 0, -1): + r = numpy.swapaxes(r, -i, axes[-i]) + + return r + + +def fftn(x, shape=None, axes=None, overwrite_x=False): + """ + Return multidimensional discrete Fourier transform. + + The returned array contains:: + + y[j_1,..,j_d] = sum[k_1=0..n_1-1, ..., k_d=0..n_d-1] + x[k_1,..,k_d] * prod[i=1..d] exp(-sqrt(-1)*2*pi/n_i * j_i * k_i) + + where d = len(x.shape) and n = x.shape. + + Parameters + ---------- + x : array_like + The (n-dimensional) array to transform. + shape : int or array_like of ints or None, optional + The shape of the result. If both `shape` and `axes` (see below) are + None, `shape` is ``x.shape``; if `shape` is None but `axes` is + not None, then `shape` is ``scipy.take(x.shape, axes, axis=0)``. + If ``shape[i] > x.shape[i]``, the i-th dimension is padded with zeros. + If ``shape[i] < x.shape[i]``, the i-th dimension is truncated to + length ``shape[i]``. + If any element of `shape` is -1, the size of the corresponding + dimension of `x` is used. + axes : int or array_like of ints or None, optional + The axes of `x` (`y` if `shape` is not None) along which the + transform is applied. + The default is over all axes. + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed. Default is False. + + Returns + ------- + y : complex-valued n-dimensional numpy array + The (n-dimensional) DFT of the input array. + + See Also + -------- + ifftn + + Notes + ----- + If ``x`` is real-valued, then + ``y[..., j_i, ...] == y[..., n_i-j_i, ...].conjugate()``. + + Both single and double precision routines are implemented. Half precision + inputs will be converted to single precision. Non floating-point inputs + will be converted to double precision. Long-double precision inputs are + not supported. + + Examples + -------- + >>> from scipy.fftpack import fftn, ifftn + >>> y = (-np.arange(16), 8 - np.arange(16), np.arange(16)) + >>> np.allclose(y, fftn(ifftn(y))) + True + + """ + return _raw_fftn_dispatch(x, shape, axes, overwrite_x, 1) + + +def _raw_fftn_dispatch(x, shape, axes, overwrite_x, direction): + tmp = _asfarray(x) + + try: + work_function = _DTYPE_TO_FFTN[tmp.dtype] + except KeyError: + raise ValueError("type %s is not supported" % tmp.dtype) + + if not (istype(tmp, numpy.complex64) or istype(tmp, numpy.complex128)): + overwrite_x = 1 + + overwrite_x = overwrite_x or _datacopied(tmp, x) + return _raw_fftnd(tmp, shape, axes, direction, overwrite_x, work_function) + + +def ifftn(x, shape=None, axes=None, overwrite_x=False): + """ + Return inverse multi-dimensional discrete Fourier transform. + + The sequence can be of an arbitrary type. + + The returned array contains:: + + y[j_1,..,j_d] = 1/p * sum[k_1=0..n_1-1, ..., k_d=0..n_d-1] + x[k_1,..,k_d] * prod[i=1..d] exp(sqrt(-1)*2*pi/n_i * j_i * k_i) + + where ``d = len(x.shape)``, ``n = x.shape``, and ``p = prod[i=1..d] n_i``. + + For description of parameters see `fftn`. + + See Also + -------- + fftn : for detailed information. + + Examples + -------- + >>> from scipy.fftpack import fftn, ifftn + >>> import numpy as np + >>> y = (-np.arange(16), 8 - np.arange(16), np.arange(16)) + >>> np.allclose(y, ifftn(fftn(y))) + True + + """ + return _raw_fftn_dispatch(x, shape, axes, overwrite_x, -1) + + +def fft2(x, shape=None, axes=(-2,-1), overwrite_x=False): + """ + 2-D discrete Fourier transform. + + Return the two-dimensional discrete Fourier transform of the 2-D argument + `x`. + + See Also + -------- + fftn : for detailed information. + + """ + return fftn(x,shape,axes,overwrite_x) + + +def ifft2(x, shape=None, axes=(-2,-1), overwrite_x=False): + """ + 2-D discrete inverse Fourier transform of real or complex sequence. + + Return inverse two-dimensional discrete Fourier transform of + arbitrary type sequence x. + + See `ifft` for more information. + + See also + -------- + fft2, ifft + + """ + return ifftn(x,shape,axes,overwrite_x) diff --git a/project/venv/lib/python2.7/site-packages/scipy/fftpack/basic.pyc b/project/venv/lib/python2.7/site-packages/scipy/fftpack/basic.pyc new file mode 100644 index 0000000..4806542 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/fftpack/basic.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/fftpack/convolve.so b/project/venv/lib/python2.7/site-packages/scipy/fftpack/convolve.so new file mode 100755 index 0000000..235d4ab Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/fftpack/convolve.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/fftpack/helper.py b/project/venv/lib/python2.7/site-packages/scipy/fftpack/helper.py new file mode 100644 index 0000000..92c5608 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/fftpack/helper.py @@ -0,0 +1,277 @@ +from __future__ import division, print_function, absolute_import + +import operator +from numpy import (arange, array, asarray, atleast_1d, intc, integer, + isscalar, issubdtype, take, unique, where) +from numpy.fft.helper import fftshift, ifftshift, fftfreq +from bisect import bisect_left + +__all__ = ['fftshift', 'ifftshift', 'fftfreq', 'rfftfreq', 'next_fast_len'] + + +def rfftfreq(n, d=1.0): + """DFT sample frequencies (for usage with rfft, irfft). + + The returned float array contains the frequency bins in + cycles/unit (with zero at the start) given a window length `n` and a + sample spacing `d`:: + + f = [0,1,1,2,2,...,n/2-1,n/2-1,n/2]/(d*n) if n is even + f = [0,1,1,2,2,...,n/2-1,n/2-1,n/2,n/2]/(d*n) if n is odd + + Parameters + ---------- + n : int + Window length. + d : scalar, optional + Sample spacing. Default is 1. + + Returns + ------- + out : ndarray + The array of length `n`, containing the sample frequencies. + + Examples + -------- + >>> from scipy import fftpack + >>> sig = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float) + >>> sig_fft = fftpack.rfft(sig) + >>> n = sig_fft.size + >>> timestep = 0.1 + >>> freq = fftpack.rfftfreq(n, d=timestep) + >>> freq + array([ 0. , 1.25, 1.25, 2.5 , 2.5 , 3.75, 3.75, 5. ]) + + """ + n = operator.index(n) + if n < 0: + raise ValueError("n = %s is not valid. " + "n must be a nonnegative integer." % n) + + return (arange(1, n + 1, dtype=int) // 2) / float(n * d) + + +def next_fast_len(target): + """ + Find the next fast size of input data to `fft`, for zero-padding, etc. + + SciPy's FFTPACK has efficient functions for radix {2, 3, 4, 5}, so this + returns the next composite of the prime factors 2, 3, and 5 which is + greater than or equal to `target`. (These are also known as 5-smooth + numbers, regular numbers, or Hamming numbers.) + + Parameters + ---------- + target : int + Length to start searching from. Must be a positive integer. + + Returns + ------- + out : int + The first 5-smooth number greater than or equal to `target`. + + Notes + ----- + .. versionadded:: 0.18.0 + + Examples + -------- + On a particular machine, an FFT of prime length takes 133 ms: + + >>> from scipy import fftpack + >>> min_len = 10007 # prime length is worst case for speed + >>> a = np.random.randn(min_len) + >>> b = fftpack.fft(a) + + Zero-padding to the next 5-smooth length reduces computation time to + 211 us, a speedup of 630 times: + + >>> fftpack.helper.next_fast_len(min_len) + 10125 + >>> b = fftpack.fft(a, 10125) + + Rounding up to the next power of 2 is not optimal, taking 367 us to + compute, 1.7 times as long as the 5-smooth size: + + >>> b = fftpack.fft(a, 16384) + + """ + hams = (8, 9, 10, 12, 15, 16, 18, 20, 24, 25, 27, 30, 32, 36, 40, 45, 48, + 50, 54, 60, 64, 72, 75, 80, 81, 90, 96, 100, 108, 120, 125, 128, + 135, 144, 150, 160, 162, 180, 192, 200, 216, 225, 240, 243, 250, + 256, 270, 288, 300, 320, 324, 360, 375, 384, 400, 405, 432, 450, + 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675, 720, 729, + 750, 768, 800, 810, 864, 900, 960, 972, 1000, 1024, 1080, 1125, + 1152, 1200, 1215, 1250, 1280, 1296, 1350, 1440, 1458, 1500, 1536, + 1600, 1620, 1728, 1800, 1875, 1920, 1944, 2000, 2025, 2048, 2160, + 2187, 2250, 2304, 2400, 2430, 2500, 2560, 2592, 2700, 2880, 2916, + 3000, 3072, 3125, 3200, 3240, 3375, 3456, 3600, 3645, 3750, 3840, + 3888, 4000, 4050, 4096, 4320, 4374, 4500, 4608, 4800, 4860, 5000, + 5120, 5184, 5400, 5625, 5760, 5832, 6000, 6075, 6144, 6250, 6400, + 6480, 6561, 6750, 6912, 7200, 7290, 7500, 7680, 7776, 8000, 8100, + 8192, 8640, 8748, 9000, 9216, 9375, 9600, 9720, 10000) + + target = int(target) + + if target <= 6: + return target + + # Quickly check if it's already a power of 2 + if not (target & (target-1)): + return target + + # Get result quickly for small sizes, since FFT itself is similarly fast. + if target <= hams[-1]: + return hams[bisect_left(hams, target)] + + match = float('inf') # Anything found will be smaller + p5 = 1 + while p5 < target: + p35 = p5 + while p35 < target: + # Ceiling integer division, avoiding conversion to float + # (quotient = ceil(target / p35)) + quotient = -(-target // p35) + + # Quickly find next power of 2 >= quotient + p2 = 2**((quotient - 1).bit_length()) + + N = p2 * p35 + if N == target: + return N + elif N < match: + match = N + p35 *= 3 + if p35 == target: + return p35 + if p35 < match: + match = p35 + p5 *= 5 + if p5 == target: + return p5 + if p5 < match: + match = p5 + return match + + +def _init_nd_shape_and_axes(x, shape, axes): + """Handle shape and axes arguments for n-dimensional transforms. + + Returns the shape and axes in a standard form, taking into account negative + values and checking for various potential errors. + + Parameters + ---------- + x : array_like + The input array. + shape : int or array_like of ints or None + The shape of the result. If both `shape` and `axes` (see below) are + None, `shape` is ``x.shape``; if `shape` is None but `axes` is + not None, then `shape` is ``scipy.take(x.shape, axes, axis=0)``. + If `shape` is -1, the size of the corresponding dimension of `x` is + used. + axes : int or array_like of ints or None + Axes along which the calculation is computed. + The default is over all axes. + Negative indices are automatically converted to their positive + counterpart. + + Returns + ------- + shape : array + The shape of the result. It is a 1D integer array. + axes : array + The shape of the result. It is a 1D integer array. + + """ + x = asarray(x) + noshape = shape is None + noaxes = axes is None + + if noaxes: + axes = arange(x.ndim, dtype=intc) + else: + axes = atleast_1d(axes) + + if axes.size == 0: + axes = axes.astype(intc) + + if not axes.ndim == 1: + raise ValueError("when given, axes values must be a scalar or vector") + if not issubdtype(axes.dtype, integer): + raise ValueError("when given, axes values must be integers") + + axes = where(axes < 0, axes + x.ndim, axes) + + if axes.size != 0 and (axes.max() >= x.ndim or axes.min() < 0): + raise ValueError("axes exceeds dimensionality of input") + if axes.size != 0 and unique(axes).shape != axes.shape: + raise ValueError("all axes must be unique") + + if not noshape: + shape = atleast_1d(shape) + elif isscalar(x): + shape = array([], dtype=intc) + elif noaxes: + shape = array(x.shape, dtype=intc) + else: + shape = take(x.shape, axes) + + if shape.size == 0: + shape = shape.astype(intc) + + if shape.ndim != 1: + raise ValueError("when given, shape values must be a scalar or vector") + if not issubdtype(shape.dtype, integer): + raise ValueError("when given, shape values must be integers") + if axes.shape != shape.shape: + raise ValueError("when given, axes and shape arguments" + " have to be of the same length") + + shape = where(shape == -1, array(x.shape)[axes], shape) + + if shape.size != 0 and (shape < 1).any(): + raise ValueError( + "invalid number of data points ({0}) specified".format(shape)) + + return shape, axes + + +def _init_nd_shape_and_axes_sorted(x, shape, axes): + """Handle and sort shape and axes arguments for n-dimensional transforms. + + This is identical to `_init_nd_shape_and_axes`, except the axes are + returned in sorted order and the shape is reordered to match. + + Parameters + ---------- + x : array_like + The input array. + shape : int or array_like of ints or None + The shape of the result. If both `shape` and `axes` (see below) are + None, `shape` is ``x.shape``; if `shape` is None but `axes` is + not None, then `shape` is ``scipy.take(x.shape, axes, axis=0)``. + If `shape` is -1, the size of the corresponding dimension of `x` is + used. + axes : int or array_like of ints or None + Axes along which the calculation is computed. + The default is over all axes. + Negative indices are automatically converted to their positive + counterpart. + + Returns + ------- + shape : array + The shape of the result. It is a 1D integer array. + axes : array + The shape of the result. It is a 1D integer array. + + """ + noaxes = axes is None + shape, axes = _init_nd_shape_and_axes(x, shape, axes) + + if not noaxes: + shape = shape[axes.argsort()] + axes.sort() + + return shape, axes diff --git a/project/venv/lib/python2.7/site-packages/scipy/fftpack/helper.pyc b/project/venv/lib/python2.7/site-packages/scipy/fftpack/helper.pyc new file mode 100644 index 0000000..cb68caa Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/fftpack/helper.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/fftpack/pseudo_diffs.py b/project/venv/lib/python2.7/site-packages/scipy/fftpack/pseudo_diffs.py new file mode 100644 index 0000000..a94ff49 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/fftpack/pseudo_diffs.py @@ -0,0 +1,557 @@ +""" +Differential and pseudo-differential operators. +""" +# Created by Pearu Peterson, September 2002 +from __future__ import division, print_function, absolute_import + + +__all__ = ['diff', + 'tilbert','itilbert','hilbert','ihilbert', + 'cs_diff','cc_diff','sc_diff','ss_diff', + 'shift'] + +from numpy import pi, asarray, sin, cos, sinh, cosh, tanh, iscomplexobj +from . import convolve + +from scipy.fftpack.basic import _datacopied + +import atexit +atexit.register(convolve.destroy_convolve_cache) +del atexit + + +_cache = {} + + +def diff(x,order=1,period=None, _cache=_cache): + """ + Return k-th derivative (or integral) of a periodic sequence x. + + If x_j and y_j are Fourier coefficients of periodic functions x + and y, respectively, then:: + + y_j = pow(sqrt(-1)*j*2*pi/period, order) * x_j + y_0 = 0 if order is not 0. + + Parameters + ---------- + x : array_like + Input array. + order : int, optional + The order of differentiation. Default order is 1. If order is + negative, then integration is carried out under the assumption + that ``x_0 == 0``. + period : float, optional + The assumed period of the sequence. Default is ``2*pi``. + + Notes + ----- + If ``sum(x, axis=0) = 0`` then ``diff(diff(x, k), -k) == x`` (within + numerical accuracy). + + For odd order and even ``len(x)``, the Nyquist mode is taken zero. + + """ + tmp = asarray(x) + if order == 0: + return tmp + if iscomplexobj(tmp): + return diff(tmp.real,order,period)+1j*diff(tmp.imag,order,period) + if period is not None: + c = 2*pi/period + else: + c = 1.0 + n = len(x) + omega = _cache.get((n,order,c)) + if omega is None: + if len(_cache) > 20: + while _cache: + _cache.popitem() + + def kernel(k,order=order,c=c): + if k: + return pow(c*k,order) + return 0 + omega = convolve.init_convolution_kernel(n,kernel,d=order, + zero_nyquist=1) + _cache[(n,order,c)] = omega + overwrite_x = _datacopied(tmp, x) + return convolve.convolve(tmp,omega,swap_real_imag=order % 2, + overwrite_x=overwrite_x) + + +del _cache + + +_cache = {} + + +def tilbert(x, h, period=None, _cache=_cache): + """ + Return h-Tilbert transform of a periodic sequence x. + + If x_j and y_j are Fourier coefficients of periodic functions x + and y, respectively, then:: + + y_j = sqrt(-1)*coth(j*h*2*pi/period) * x_j + y_0 = 0 + + Parameters + ---------- + x : array_like + The input array to transform. + h : float + Defines the parameter of the Tilbert transform. + period : float, optional + The assumed period of the sequence. Default period is ``2*pi``. + + Returns + ------- + tilbert : ndarray + The result of the transform. + + Notes + ----- + If ``sum(x, axis=0) == 0`` and ``n = len(x)`` is odd then + ``tilbert(itilbert(x)) == x``. + + If ``2 * pi * h / period`` is approximately 10 or larger, then + numerically ``tilbert == hilbert`` + (theoretically oo-Tilbert == Hilbert). + + For even ``len(x)``, the Nyquist mode of ``x`` is taken zero. + + """ + tmp = asarray(x) + if iscomplexobj(tmp): + return tilbert(tmp.real, h, period) + \ + 1j * tilbert(tmp.imag, h, period) + + if period is not None: + h = h * 2 * pi / period + + n = len(x) + omega = _cache.get((n, h)) + if omega is None: + if len(_cache) > 20: + while _cache: + _cache.popitem() + + def kernel(k, h=h): + if k: + return 1.0/tanh(h*k) + + return 0 + + omega = convolve.init_convolution_kernel(n, kernel, d=1) + _cache[(n,h)] = omega + + overwrite_x = _datacopied(tmp, x) + return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x) + + +del _cache + + +_cache = {} + + +def itilbert(x,h,period=None, _cache=_cache): + """ + Return inverse h-Tilbert transform of a periodic sequence x. + + If ``x_j`` and ``y_j`` are Fourier coefficients of periodic functions x + and y, respectively, then:: + + y_j = -sqrt(-1)*tanh(j*h*2*pi/period) * x_j + y_0 = 0 + + For more details, see `tilbert`. + + """ + tmp = asarray(x) + if iscomplexobj(tmp): + return itilbert(tmp.real,h,period) + \ + 1j*itilbert(tmp.imag,h,period) + if period is not None: + h = h*2*pi/period + n = len(x) + omega = _cache.get((n,h)) + if omega is None: + if len(_cache) > 20: + while _cache: + _cache.popitem() + + def kernel(k,h=h): + if k: + return -tanh(h*k) + return 0 + omega = convolve.init_convolution_kernel(n,kernel,d=1) + _cache[(n,h)] = omega + overwrite_x = _datacopied(tmp, x) + return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x) + + +del _cache + + +_cache = {} + + +def hilbert(x, _cache=_cache): + """ + Return Hilbert transform of a periodic sequence x. + + If x_j and y_j are Fourier coefficients of periodic functions x + and y, respectively, then:: + + y_j = sqrt(-1)*sign(j) * x_j + y_0 = 0 + + Parameters + ---------- + x : array_like + The input array, should be periodic. + _cache : dict, optional + Dictionary that contains the kernel used to do a convolution with. + + Returns + ------- + y : ndarray + The transformed input. + + See Also + -------- + scipy.signal.hilbert : Compute the analytic signal, using the Hilbert + transform. + + Notes + ----- + If ``sum(x, axis=0) == 0`` then ``hilbert(ihilbert(x)) == x``. + + For even len(x), the Nyquist mode of x is taken zero. + + The sign of the returned transform does not have a factor -1 that is more + often than not found in the definition of the Hilbert transform. Note also + that `scipy.signal.hilbert` does have an extra -1 factor compared to this + function. + + """ + tmp = asarray(x) + if iscomplexobj(tmp): + return hilbert(tmp.real)+1j*hilbert(tmp.imag) + n = len(x) + omega = _cache.get(n) + if omega is None: + if len(_cache) > 20: + while _cache: + _cache.popitem() + + def kernel(k): + if k > 0: + return 1.0 + elif k < 0: + return -1.0 + return 0.0 + omega = convolve.init_convolution_kernel(n,kernel,d=1) + _cache[n] = omega + overwrite_x = _datacopied(tmp, x) + return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x) + + +del _cache + + +def ihilbert(x): + """ + Return inverse Hilbert transform of a periodic sequence x. + + If ``x_j`` and ``y_j`` are Fourier coefficients of periodic functions x + and y, respectively, then:: + + y_j = -sqrt(-1)*sign(j) * x_j + y_0 = 0 + + """ + return -hilbert(x) + + +_cache = {} + + +def cs_diff(x, a, b, period=None, _cache=_cache): + """ + Return (a,b)-cosh/sinh pseudo-derivative of a periodic sequence. + + If ``x_j`` and ``y_j`` are Fourier coefficients of periodic functions x + and y, respectively, then:: + + y_j = -sqrt(-1)*cosh(j*a*2*pi/period)/sinh(j*b*2*pi/period) * x_j + y_0 = 0 + + Parameters + ---------- + x : array_like + The array to take the pseudo-derivative from. + a, b : float + Defines the parameters of the cosh/sinh pseudo-differential + operator. + period : float, optional + The period of the sequence. Default period is ``2*pi``. + + Returns + ------- + cs_diff : ndarray + Pseudo-derivative of periodic sequence `x`. + + Notes + ----- + For even len(`x`), the Nyquist mode of `x` is taken as zero. + + """ + tmp = asarray(x) + if iscomplexobj(tmp): + return cs_diff(tmp.real,a,b,period) + \ + 1j*cs_diff(tmp.imag,a,b,period) + if period is not None: + a = a*2*pi/period + b = b*2*pi/period + n = len(x) + omega = _cache.get((n,a,b)) + if omega is None: + if len(_cache) > 20: + while _cache: + _cache.popitem() + + def kernel(k,a=a,b=b): + if k: + return -cosh(a*k)/sinh(b*k) + return 0 + omega = convolve.init_convolution_kernel(n,kernel,d=1) + _cache[(n,a,b)] = omega + overwrite_x = _datacopied(tmp, x) + return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x) + + +del _cache + + +_cache = {} + + +def sc_diff(x, a, b, period=None, _cache=_cache): + """ + Return (a,b)-sinh/cosh pseudo-derivative of a periodic sequence x. + + If x_j and y_j are Fourier coefficients of periodic functions x + and y, respectively, then:: + + y_j = sqrt(-1)*sinh(j*a*2*pi/period)/cosh(j*b*2*pi/period) * x_j + y_0 = 0 + + Parameters + ---------- + x : array_like + Input array. + a,b : float + Defines the parameters of the sinh/cosh pseudo-differential + operator. + period : float, optional + The period of the sequence x. Default is 2*pi. + + Notes + ----- + ``sc_diff(cs_diff(x,a,b),b,a) == x`` + For even ``len(x)``, the Nyquist mode of x is taken as zero. + + """ + tmp = asarray(x) + if iscomplexobj(tmp): + return sc_diff(tmp.real,a,b,period) + \ + 1j*sc_diff(tmp.imag,a,b,period) + if period is not None: + a = a*2*pi/period + b = b*2*pi/period + n = len(x) + omega = _cache.get((n,a,b)) + if omega is None: + if len(_cache) > 20: + while _cache: + _cache.popitem() + + def kernel(k,a=a,b=b): + if k: + return sinh(a*k)/cosh(b*k) + return 0 + omega = convolve.init_convolution_kernel(n,kernel,d=1) + _cache[(n,a,b)] = omega + overwrite_x = _datacopied(tmp, x) + return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x) + + +del _cache + + +_cache = {} + + +def ss_diff(x, a, b, period=None, _cache=_cache): + """ + Return (a,b)-sinh/sinh pseudo-derivative of a periodic sequence x. + + If x_j and y_j are Fourier coefficients of periodic functions x + and y, respectively, then:: + + y_j = sinh(j*a*2*pi/period)/sinh(j*b*2*pi/period) * x_j + y_0 = a/b * x_0 + + Parameters + ---------- + x : array_like + The array to take the pseudo-derivative from. + a,b + Defines the parameters of the sinh/sinh pseudo-differential + operator. + period : float, optional + The period of the sequence x. Default is ``2*pi``. + + Notes + ----- + ``ss_diff(ss_diff(x,a,b),b,a) == x`` + + """ + tmp = asarray(x) + if iscomplexobj(tmp): + return ss_diff(tmp.real,a,b,period) + \ + 1j*ss_diff(tmp.imag,a,b,period) + if period is not None: + a = a*2*pi/period + b = b*2*pi/period + n = len(x) + omega = _cache.get((n,a,b)) + if omega is None: + if len(_cache) > 20: + while _cache: + _cache.popitem() + + def kernel(k,a=a,b=b): + if k: + return sinh(a*k)/sinh(b*k) + return float(a)/b + omega = convolve.init_convolution_kernel(n,kernel) + _cache[(n,a,b)] = omega + overwrite_x = _datacopied(tmp, x) + return convolve.convolve(tmp,omega,overwrite_x=overwrite_x) + + +del _cache + + +_cache = {} + + +def cc_diff(x, a, b, period=None, _cache=_cache): + """ + Return (a,b)-cosh/cosh pseudo-derivative of a periodic sequence. + + If x_j and y_j are Fourier coefficients of periodic functions x + and y, respectively, then:: + + y_j = cosh(j*a*2*pi/period)/cosh(j*b*2*pi/period) * x_j + + Parameters + ---------- + x : array_like + The array to take the pseudo-derivative from. + a,b : float + Defines the parameters of the sinh/sinh pseudo-differential + operator. + period : float, optional + The period of the sequence x. Default is ``2*pi``. + + Returns + ------- + cc_diff : ndarray + Pseudo-derivative of periodic sequence `x`. + + Notes + ----- + ``cc_diff(cc_diff(x,a,b),b,a) == x`` + + """ + tmp = asarray(x) + if iscomplexobj(tmp): + return cc_diff(tmp.real,a,b,period) + \ + 1j*cc_diff(tmp.imag,a,b,period) + if period is not None: + a = a*2*pi/period + b = b*2*pi/period + n = len(x) + omega = _cache.get((n,a,b)) + if omega is None: + if len(_cache) > 20: + while _cache: + _cache.popitem() + + def kernel(k,a=a,b=b): + return cosh(a*k)/cosh(b*k) + omega = convolve.init_convolution_kernel(n,kernel) + _cache[(n,a,b)] = omega + overwrite_x = _datacopied(tmp, x) + return convolve.convolve(tmp,omega,overwrite_x=overwrite_x) + + +del _cache + + +_cache = {} + + +def shift(x, a, period=None, _cache=_cache): + """ + Shift periodic sequence x by a: y(u) = x(u+a). + + If x_j and y_j are Fourier coefficients of periodic functions x + and y, respectively, then:: + + y_j = exp(j*a*2*pi/period*sqrt(-1)) * x_f + + Parameters + ---------- + x : array_like + The array to take the pseudo-derivative from. + a : float + Defines the parameters of the sinh/sinh pseudo-differential + period : float, optional + The period of the sequences x and y. Default period is ``2*pi``. + """ + tmp = asarray(x) + if iscomplexobj(tmp): + return shift(tmp.real,a,period)+1j*shift(tmp.imag,a,period) + if period is not None: + a = a*2*pi/period + n = len(x) + omega = _cache.get((n,a)) + if omega is None: + if len(_cache) > 20: + while _cache: + _cache.popitem() + + def kernel_real(k,a=a): + return cos(a*k) + + def kernel_imag(k,a=a): + return sin(a*k) + omega_real = convolve.init_convolution_kernel(n,kernel_real,d=0, + zero_nyquist=0) + omega_imag = convolve.init_convolution_kernel(n,kernel_imag,d=1, + zero_nyquist=0) + _cache[(n,a)] = omega_real,omega_imag + else: + omega_real,omega_imag = omega + overwrite_x = _datacopied(tmp, x) + return convolve.convolve_z(tmp,omega_real,omega_imag, + overwrite_x=overwrite_x) + + +del _cache diff --git a/project/venv/lib/python2.7/site-packages/scipy/fftpack/pseudo_diffs.pyc b/project/venv/lib/python2.7/site-packages/scipy/fftpack/pseudo_diffs.pyc new file mode 100644 index 0000000..e8e7185 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/fftpack/pseudo_diffs.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/fftpack/realtransforms.py b/project/venv/lib/python2.7/site-packages/scipy/fftpack/realtransforms.py new file mode 100644 index 0000000..72cfbe4 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/fftpack/realtransforms.py @@ -0,0 +1,739 @@ +""" +Real spectrum transforms (DCT, DST, MDCT) +""" +from __future__ import division, print_function, absolute_import + + +__all__ = ['dct', 'idct', 'dst', 'idst', 'dctn', 'idctn', 'dstn', 'idstn'] + +import numpy as np +from scipy.fftpack import _fftpack +from scipy.fftpack.basic import _datacopied, _fix_shape, _asfarray +from scipy.fftpack.helper import _init_nd_shape_and_axes + +import atexit +atexit.register(_fftpack.destroy_ddct1_cache) +atexit.register(_fftpack.destroy_ddct2_cache) +atexit.register(_fftpack.destroy_ddct4_cache) +atexit.register(_fftpack.destroy_dct1_cache) +atexit.register(_fftpack.destroy_dct2_cache) +atexit.register(_fftpack.destroy_dct4_cache) + +atexit.register(_fftpack.destroy_ddst1_cache) +atexit.register(_fftpack.destroy_ddst2_cache) +atexit.register(_fftpack.destroy_dst1_cache) +atexit.register(_fftpack.destroy_dst2_cache) + + +def dctn(x, type=2, shape=None, axes=None, norm=None, overwrite_x=False): + """ + Return multidimensional Discrete Cosine Transform along the specified axes. + + Parameters + ---------- + x : array_like + The input array. + type : {1, 2, 3, 4}, optional + Type of the DCT (see Notes). Default type is 2. + shape : int or array_like of ints or None, optional + The shape of the result. If both `shape` and `axes` (see below) are + None, `shape` is ``x.shape``; if `shape` is None but `axes` is + not None, then `shape` is ``scipy.take(x.shape, axes, axis=0)``. + If ``shape[i] > x.shape[i]``, the i-th dimension is padded with zeros. + If ``shape[i] < x.shape[i]``, the i-th dimension is truncated to + length ``shape[i]``. + If any element of `shape` is -1, the size of the corresponding + dimension of `x` is used. + axes : int or array_like of ints or None, optional + Axes along which the DCT is computed. + The default is over all axes. + norm : {None, 'ortho'}, optional + Normalization mode (see Notes). Default is None. + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + + Returns + ------- + y : ndarray of real + The transformed input array. + + See Also + -------- + idctn : Inverse multidimensional DCT + + Notes + ----- + For full details of the DCT types and normalization modes, as well as + references, see `dct`. + + Examples + -------- + >>> from scipy.fftpack import dctn, idctn + >>> y = np.random.randn(16, 16) + >>> np.allclose(y, idctn(dctn(y, norm='ortho'), norm='ortho')) + True + + """ + x = np.asanyarray(x) + shape, axes = _init_nd_shape_and_axes(x, shape, axes) + for n, ax in zip(shape, axes): + x = dct(x, type=type, n=n, axis=ax, norm=norm, overwrite_x=overwrite_x) + return x + + +def idctn(x, type=2, shape=None, axes=None, norm=None, overwrite_x=False): + """ + Return multidimensional Discrete Cosine Transform along the specified axes. + + Parameters + ---------- + x : array_like + The input array. + type : {1, 2, 3, 4}, optional + Type of the DCT (see Notes). Default type is 2. + shape : int or array_like of ints or None, optional + The shape of the result. If both `shape` and `axes` (see below) are + None, `shape` is ``x.shape``; if `shape` is None but `axes` is + not None, then `shape` is ``scipy.take(x.shape, axes, axis=0)``. + If ``shape[i] > x.shape[i]``, the i-th dimension is padded with zeros. + If ``shape[i] < x.shape[i]``, the i-th dimension is truncated to + length ``shape[i]``. + If any element of `shape` is -1, the size of the corresponding + dimension of `x` is used. + axes : int or array_like of ints or None, optional + Axes along which the IDCT is computed. + The default is over all axes. + norm : {None, 'ortho'}, optional + Normalization mode (see Notes). Default is None. + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + + Returns + ------- + y : ndarray of real + The transformed input array. + + See Also + -------- + dctn : multidimensional DCT + + Notes + ----- + For full details of the IDCT types and normalization modes, as well as + references, see `idct`. + + Examples + -------- + >>> from scipy.fftpack import dctn, idctn + >>> y = np.random.randn(16, 16) + >>> np.allclose(y, idctn(dctn(y, norm='ortho'), norm='ortho')) + True + + """ + x = np.asanyarray(x) + shape, axes = _init_nd_shape_and_axes(x, shape, axes) + for n, ax in zip(shape, axes): + x = idct(x, type=type, n=n, axis=ax, norm=norm, + overwrite_x=overwrite_x) + return x + + +def dstn(x, type=2, shape=None, axes=None, norm=None, overwrite_x=False): + """ + Return multidimensional Discrete Sine Transform along the specified axes. + + Parameters + ---------- + x : array_like + The input array. + type : {1, 2, 3, 4}, optional + Type of the DCT (see Notes). Default type is 2. + shape : int or array_like of ints or None, optional + The shape of the result. If both `shape` and `axes` (see below) are + None, `shape` is ``x.shape``; if `shape` is None but `axes` is + not None, then `shape` is ``scipy.take(x.shape, axes, axis=0)``. + If ``shape[i] > x.shape[i]``, the i-th dimension is padded with zeros. + If ``shape[i] < x.shape[i]``, the i-th dimension is truncated to + length ``shape[i]``. + If any element of `shape` is -1, the size of the corresponding + dimension of `x` is used. + axes : int or array_like of ints or None, optional + Axes along which the DCT is computed. + The default is over all axes. + norm : {None, 'ortho'}, optional + Normalization mode (see Notes). Default is None. + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + + Returns + ------- + y : ndarray of real + The transformed input array. + + See Also + -------- + idstn : Inverse multidimensional DST + + Notes + ----- + For full details of the DST types and normalization modes, as well as + references, see `dst`. + + Examples + -------- + >>> from scipy.fftpack import dstn, idstn + >>> y = np.random.randn(16, 16) + >>> np.allclose(y, idstn(dstn(y, norm='ortho'), norm='ortho')) + True + + """ + x = np.asanyarray(x) + shape, axes = _init_nd_shape_and_axes(x, shape, axes) + for n, ax in zip(shape, axes): + x = dst(x, type=type, n=n, axis=ax, norm=norm, overwrite_x=overwrite_x) + return x + + +def idstn(x, type=2, shape=None, axes=None, norm=None, overwrite_x=False): + """ + Return multidimensional Discrete Sine Transform along the specified axes. + + Parameters + ---------- + x : array_like + The input array. + type : {1, 2, 3, 4}, optional + Type of the DCT (see Notes). Default type is 2. + shape : int or array_like of ints or None, optional + The shape of the result. If both `shape` and `axes` (see below) are + None, `shape` is ``x.shape``; if `shape` is None but `axes` is + not None, then `shape` is ``scipy.take(x.shape, axes, axis=0)``. + If ``shape[i] > x.shape[i]``, the i-th dimension is padded with zeros. + If ``shape[i] < x.shape[i]``, the i-th dimension is truncated to + length ``shape[i]``. + If any element of `shape` is -1, the size of the corresponding + dimension of `x` is used. + axes : int or array_like of ints or None, optional + Axes along which the IDCT is computed. + The default is over all axes. + norm : {None, 'ortho'}, optional + Normalization mode (see Notes). Default is None. + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + + Returns + ------- + y : ndarray of real + The transformed input array. + + See Also + -------- + dctn : multidimensional DST + + Notes + ----- + For full details of the IDST types and normalization modes, as well as + references, see `idst`. + + Examples + -------- + >>> from scipy.fftpack import dstn, idstn + >>> y = np.random.randn(16, 16) + >>> np.allclose(y, idstn(dstn(y, norm='ortho'), norm='ortho')) + True + + """ + x = np.asanyarray(x) + shape, axes = _init_nd_shape_and_axes(x, shape, axes) + for n, ax in zip(shape, axes): + x = idst(x, type=type, n=n, axis=ax, norm=norm, + overwrite_x=overwrite_x) + return x + + +def dct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False): + """ + Return the Discrete Cosine Transform of arbitrary type sequence x. + + Parameters + ---------- + x : array_like + The input array. + type : {1, 2, 3, 4}, optional + Type of the DCT (see Notes). Default type is 2. + n : int, optional + Length of the transform. If ``n < x.shape[axis]``, `x` is + truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The + default results in ``n = x.shape[axis]``. + axis : int, optional + Axis along which the dct is computed; the default is over the + last axis (i.e., ``axis=-1``). + norm : {None, 'ortho'}, optional + Normalization mode (see Notes). Default is None. + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + + Returns + ------- + y : ndarray of real + The transformed input array. + + See Also + -------- + idct : Inverse DCT + + Notes + ----- + For a single dimension array ``x``, ``dct(x, norm='ortho')`` is equal to + MATLAB ``dct(x)``. + + There are theoretically 8 types of the DCT, only the first 4 types are + implemented in scipy. 'The' DCT generally refers to DCT type 2, and 'the' + Inverse DCT generally refers to DCT type 3. + + **Type I** + + There are several definitions of the DCT-I; we use the following + (for ``norm=None``):: + + N-2 + y[k] = x[0] + (-1)**k x[N-1] + 2 * sum x[n]*cos(pi*k*n/(N-1)) + n=1 + + If ``norm='ortho'``, ``x[0]`` and ``x[N-1]`` are multiplied by + a scaling factor of ``sqrt(2)``, and ``y[k]`` is multiplied by a + scaling factor `f`:: + + f = 0.5*sqrt(1/(N-1)) if k = 0 or N-1, + f = 0.5*sqrt(2/(N-1)) otherwise. + + .. versionadded:: 1.2.0 + Orthonormalization in DCT-I. + + .. note:: + The DCT-I is only supported for input size > 1. + + **Type II** + + There are several definitions of the DCT-II; we use the following + (for ``norm=None``):: + + + N-1 + y[k] = 2* sum x[n]*cos(pi*k*(2n+1)/(2*N)), 0 <= k < N. + n=0 + + If ``norm='ortho'``, ``y[k]`` is multiplied by a scaling factor `f`:: + + f = sqrt(1/(4*N)) if k = 0, + f = sqrt(1/(2*N)) otherwise. + + Which makes the corresponding matrix of coefficients orthonormal + (``OO' = Id``). + + **Type III** + + There are several definitions, we use the following + (for ``norm=None``):: + + N-1 + y[k] = x[0] + 2 * sum x[n]*cos(pi*(k+0.5)*n/N), 0 <= k < N. + n=1 + + or, for ``norm='ortho'`` and 0 <= k < N:: + + N-1 + y[k] = x[0] / sqrt(N) + sqrt(2/N) * sum x[n]*cos(pi*(k+0.5)*n/N) + n=1 + + The (unnormalized) DCT-III is the inverse of the (unnormalized) DCT-II, up + to a factor `2N`. The orthonormalized DCT-III is exactly the inverse of + the orthonormalized DCT-II. + + **Type IV** + + There are several definitions of the DCT-IV; we use the following + (for ``norm=None``):: + + + N-1 + y[k] = 2* sum x[n]*cos(pi*(2k+1)*(2n+1)/(4*N)), 0 <= k < N. + n=0 + + If ``norm='ortho'``, ``y[k]`` is multiplied by a scaling factor `f`:: + + f = 0.5*sqrt(2/N) + + .. versionadded:: 1.2.0 + Support for DCT-IV. + + References + ---------- + .. [1] 'A Fast Cosine Transform in One and Two Dimensions', by J. + Makhoul, `IEEE Transactions on acoustics, speech and signal + processing` vol. 28(1), pp. 27-34, + :doi:`10.1109/TASSP.1980.1163351` (1980). + .. [2] Wikipedia, "Discrete cosine transform", + https://en.wikipedia.org/wiki/Discrete_cosine_transform + + Examples + -------- + The Type 1 DCT is equivalent to the FFT (though faster) for real, + even-symmetrical inputs. The output is also real and even-symmetrical. + Half of the FFT input is used to generate half of the FFT output: + + >>> from scipy.fftpack import fft, dct + >>> fft(np.array([4., 3., 5., 10., 5., 3.])).real + array([ 30., -8., 6., -2., 6., -8.]) + >>> dct(np.array([4., 3., 5., 10.]), 1) + array([ 30., -8., 6., -2.]) + + """ + return _dct(x, type, n, axis, normalize=norm, overwrite_x=overwrite_x) + + +def idct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False): + """ + Return the Inverse Discrete Cosine Transform of an arbitrary type sequence. + + Parameters + ---------- + x : array_like + The input array. + type : {1, 2, 3, 4}, optional + Type of the DCT (see Notes). Default type is 2. + n : int, optional + Length of the transform. If ``n < x.shape[axis]``, `x` is + truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The + default results in ``n = x.shape[axis]``. + axis : int, optional + Axis along which the idct is computed; the default is over the + last axis (i.e., ``axis=-1``). + norm : {None, 'ortho'}, optional + Normalization mode (see Notes). Default is None. + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + + Returns + ------- + idct : ndarray of real + The transformed input array. + + See Also + -------- + dct : Forward DCT + + Notes + ----- + For a single dimension array `x`, ``idct(x, norm='ortho')`` is equal to + MATLAB ``idct(x)``. + + 'The' IDCT is the IDCT of type 2, which is the same as DCT of type 3. + + IDCT of type 1 is the DCT of type 1, IDCT of type 2 is the DCT of type + 3, and IDCT of type 3 is the DCT of type 2. IDCT of type 4 is the DCT + of type 4. For the definition of these types, see `dct`. + + Examples + -------- + The Type 1 DCT is equivalent to the DFT for real, even-symmetrical + inputs. The output is also real and even-symmetrical. Half of the IFFT + input is used to generate half of the IFFT output: + + >>> from scipy.fftpack import ifft, idct + >>> ifft(np.array([ 30., -8., 6., -2., 6., -8.])).real + array([ 4., 3., 5., 10., 5., 3.]) + >>> idct(np.array([ 30., -8., 6., -2.]), 1) / 6 + array([ 4., 3., 5., 10.]) + + """ + # Inverse/forward type table + _TP = {1:1, 2:3, 3:2, 4:4} + return _dct(x, _TP[type], n, axis, normalize=norm, overwrite_x=overwrite_x) + + +def _get_dct_fun(type, dtype): + try: + name = {'float64':'ddct%d', 'float32':'dct%d'}[dtype.name] + except KeyError: + raise ValueError("dtype %s not supported" % dtype) + try: + f = getattr(_fftpack, name % type) + except AttributeError as e: + raise ValueError(str(e) + ". Type %d not understood" % type) + return f + + +def _get_norm_mode(normalize): + try: + nm = {None:0, 'ortho':1}[normalize] + except KeyError: + raise ValueError("Unknown normalize mode %s" % normalize) + return nm + + +def __fix_shape(x, n, axis, dct_or_dst): + tmp = _asfarray(x) + copy_made = _datacopied(tmp, x) + if n is None: + n = tmp.shape[axis] + elif n != tmp.shape[axis]: + tmp, copy_made2 = _fix_shape(tmp, n, axis) + copy_made = copy_made or copy_made2 + if n < 1: + raise ValueError("Invalid number of %s data points " + "(%d) specified." % (dct_or_dst, n)) + return tmp, n, copy_made + + +def _raw_dct(x0, type, n, axis, nm, overwrite_x): + f = _get_dct_fun(type, x0.dtype) + return _eval_fun(f, x0, n, axis, nm, overwrite_x) + + +def _raw_dst(x0, type, n, axis, nm, overwrite_x): + f = _get_dst_fun(type, x0.dtype) + return _eval_fun(f, x0, n, axis, nm, overwrite_x) + + +def _eval_fun(f, tmp, n, axis, nm, overwrite_x): + if axis == -1 or axis == len(tmp.shape) - 1: + return f(tmp, n, nm, overwrite_x) + + tmp = np.swapaxes(tmp, axis, -1) + tmp = f(tmp, n, nm, overwrite_x) + return np.swapaxes(tmp, axis, -1) + + +def _dct(x, type, n=None, axis=-1, overwrite_x=False, normalize=None): + """ + Return Discrete Cosine Transform of arbitrary type sequence x. + + Parameters + ---------- + x : array_like + input array. + n : int, optional + Length of the transform. If ``n < x.shape[axis]``, `x` is + truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The + default results in ``n = x.shape[axis]``. + axis : int, optional + Axis along which the dct is computed; the default is over the + last axis (i.e., ``axis=-1``). + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + + Returns + ------- + z : ndarray + + """ + x0, n, copy_made = __fix_shape(x, n, axis, 'DCT') + if type == 1 and n < 2: + raise ValueError("DCT-I is not defined for size < 2") + overwrite_x = overwrite_x or copy_made + nm = _get_norm_mode(normalize) + if np.iscomplexobj(x0): + return (_raw_dct(x0.real, type, n, axis, nm, overwrite_x) + 1j * + _raw_dct(x0.imag, type, n, axis, nm, overwrite_x)) + else: + return _raw_dct(x0, type, n, axis, nm, overwrite_x) + + +def dst(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False): + """ + Return the Discrete Sine Transform of arbitrary type sequence x. + + Parameters + ---------- + x : array_like + The input array. + type : {1, 2, 3, 4}, optional + Type of the DST (see Notes). Default type is 2. + n : int, optional + Length of the transform. If ``n < x.shape[axis]``, `x` is + truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The + default results in ``n = x.shape[axis]``. + axis : int, optional + Axis along which the dst is computed; the default is over the + last axis (i.e., ``axis=-1``). + norm : {None, 'ortho'}, optional + Normalization mode (see Notes). Default is None. + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + + Returns + ------- + dst : ndarray of reals + The transformed input array. + + See Also + -------- + idst : Inverse DST + + Notes + ----- + For a single dimension array ``x``. + + There are theoretically 8 types of the DST for different combinations of + even/odd boundary conditions and boundary off sets [1]_, only the first + 3 types are implemented in scipy. + + **Type I** + + There are several definitions of the DST-I; we use the following + for ``norm=None``. DST-I assumes the input is odd around n=-1 and n=N. :: + + N-1 + y[k] = 2 * sum x[n]*sin(pi*(k+1)*(n+1)/(N+1)) + n=0 + + Note that the DST-I is only supported for input size > 1 + The (unnormalized) DST-I is its own inverse, up to a factor `2(N+1)`. + The orthonormalized DST-I is exactly its own inverse. + + **Type II** + + There are several definitions of the DST-II; we use the following + for ``norm=None``. DST-II assumes the input is odd around n=-1/2 and + n=N-1/2; the output is odd around k=-1 and even around k=N-1 :: + + N-1 + y[k] = 2* sum x[n]*sin(pi*(k+1)*(n+0.5)/N), 0 <= k < N. + n=0 + + if ``norm='ortho'``, ``y[k]`` is multiplied by a scaling factor `f` :: + + f = sqrt(1/(4*N)) if k == 0 + f = sqrt(1/(2*N)) otherwise. + + **Type III** + + There are several definitions of the DST-III, we use the following + (for ``norm=None``). DST-III assumes the input is odd around n=-1 + and even around n=N-1 :: + + N-2 + y[k] = x[N-1]*(-1)**k + 2* sum x[n]*sin(pi*(k+0.5)*(n+1)/N), 0 <= k < N. + n=0 + + The (unnormalized) DST-III is the inverse of the (unnormalized) DST-II, up + to a factor `2N`. The orthonormalized DST-III is exactly the inverse of + the orthonormalized DST-II. + + .. versionadded:: 0.11.0 + + **Type IV** + + There are several definitions of the DST-IV, we use the following + (for ``norm=None``). DST-IV assumes the input is odd around n=-0.5 + and even around n=N-0.5 :: + + N-1 + y[k] = 2* sum x[n]*sin(pi*(k+0.5)*(n+0.5)/N), 0 <= k < N. + n=0 + + The (unnormalized) DST-IV is its own inverse, up + to a factor `2N`. The orthonormalized DST-IV is exactly its own inverse. + + .. versionadded:: 1.2.0 + Support for DST-IV. + + References + ---------- + .. [1] Wikipedia, "Discrete sine transform", + https://en.wikipedia.org/wiki/Discrete_sine_transform + + """ + return _dst(x, type, n, axis, normalize=norm, overwrite_x=overwrite_x) + + +def idst(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False): + """ + Return the Inverse Discrete Sine Transform of an arbitrary type sequence. + + Parameters + ---------- + x : array_like + The input array. + type : {1, 2, 3, 4}, optional + Type of the DST (see Notes). Default type is 2. + n : int, optional + Length of the transform. If ``n < x.shape[axis]``, `x` is + truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The + default results in ``n = x.shape[axis]``. + axis : int, optional + Axis along which the idst is computed; the default is over the + last axis (i.e., ``axis=-1``). + norm : {None, 'ortho'}, optional + Normalization mode (see Notes). Default is None. + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + + Returns + ------- + idst : ndarray of real + The transformed input array. + + See Also + -------- + dst : Forward DST + + Notes + ----- + 'The' IDST is the IDST of type 2, which is the same as DST of type 3. + + IDST of type 1 is the DST of type 1, IDST of type 2 is the DST of type + 3, and IDST of type 3 is the DST of type 2. For the definition of these + types, see `dst`. + + .. versionadded:: 0.11.0 + + """ + # Inverse/forward type table + _TP = {1:1, 2:3, 3:2, 4:4} + return _dst(x, _TP[type], n, axis, normalize=norm, overwrite_x=overwrite_x) + + +def _get_dst_fun(type, dtype): + try: + name = {'float64':'ddst%d', 'float32':'dst%d'}[dtype.name] + except KeyError: + raise ValueError("dtype %s not supported" % dtype) + try: + f = getattr(_fftpack, name % type) + except AttributeError as e: + raise ValueError(str(e) + ". Type %d not understood" % type) + return f + + +def _dst(x, type, n=None, axis=-1, overwrite_x=False, normalize=None): + """ + Return Discrete Sine Transform of arbitrary type sequence x. + + Parameters + ---------- + x : array_like + input array. + n : int, optional + Length of the transform. + axis : int, optional + Axis along which the dst is computed. (default=-1) + overwrite_x : bool, optional + If True the contents of x can be destroyed. (default=False) + + Returns + ------- + z : real ndarray + + """ + x0, n, copy_made = __fix_shape(x, n, axis, 'DST') + if type == 1 and n < 2: + raise ValueError("DST-I is not defined for size < 2") + overwrite_x = overwrite_x or copy_made + nm = _get_norm_mode(normalize) + if np.iscomplexobj(x0): + return (_raw_dst(x0.real, type, n, axis, nm, overwrite_x) + 1j * + _raw_dst(x0.imag, type, n, axis, nm, overwrite_x)) + else: + return _raw_dst(x0, type, n, axis, nm, overwrite_x) diff --git a/project/venv/lib/python2.7/site-packages/scipy/fftpack/realtransforms.pyc b/project/venv/lib/python2.7/site-packages/scipy/fftpack/realtransforms.pyc new file mode 100644 index 0000000..1686835 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/fftpack/realtransforms.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/fftpack/setup.py b/project/venv/lib/python2.7/site-packages/scipy/fftpack/setup.py new file mode 100644 index 0000000..471dddb --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/fftpack/setup.py @@ -0,0 +1,40 @@ +# Created by Pearu Peterson, August 2002 +from __future__ import division, print_function, absolute_import + + +from os.path import join + + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + + config = Configuration('fftpack',parent_package, top_path) + + config.add_data_dir('tests') + + dfftpack_src = [join('src/dfftpack','*.f')] + config.add_library('dfftpack', sources=dfftpack_src) + + fftpack_src = [join('src/fftpack','*.f')] + config.add_library('fftpack', sources=fftpack_src) + + sources = ['fftpack.pyf','src/zfft.c','src/drfft.c','src/zrfft.c', + 'src/zfftnd.c', 'src/dct.c.src', 'src/dst.c.src'] + + config.add_extension('_fftpack', + sources=sources, + libraries=['dfftpack', 'fftpack'], + include_dirs=['src'], + depends=(dfftpack_src + fftpack_src)) + + config.add_extension('convolve', + sources=['convolve.pyf','src/convolve.c'], + libraries=['dfftpack'], + depends=dfftpack_src, + ) + return config + + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(**configuration(top_path='').todict()) diff --git a/project/venv/lib/python2.7/site-packages/scipy/fftpack/setup.pyc b/project/venv/lib/python2.7/site-packages/scipy/fftpack/setup.pyc new file mode 100644 index 0000000..2a0bc62 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/fftpack/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/Makefile b/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/Makefile new file mode 100644 index 0000000..39fdb58 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/Makefile @@ -0,0 +1,13 @@ +CC = gcc +LD = gcc + +fftw_single: fftw_dct.c + $(CC) -W -Wall -DDCT_TEST_USE_SINGLE $< -o $@ -lfftw3f + +fftw_double: fftw_dct.c + $(CC) -W -Wall $< -o $@ -lfftw3 + +clean: + rm -f fftw_single + rm -f fftw_double + rm -f *.o diff --git a/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/__init__.pyc new file mode 100644 index 0000000..0a644b4 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/fftw_dct.c b/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/fftw_dct.c new file mode 100644 index 0000000..45a69b9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/fftw_dct.c @@ -0,0 +1,138 @@ +#include <stdlib.h> +#include <stdio.h> + +#include <fftw3.h> + +#ifdef DCT_TEST_USE_SINGLE +typedef float float_prec; +#define PF "%.7f" +#define FFTW_PLAN fftwf_plan +#define FFTW_MALLOC fftwf_malloc +#define FFTW_FREE fftwf_free +#define FFTW_PLAN_CREATE fftwf_plan_r2r_1d +#define FFTW_EXECUTE fftwf_execute +#define FFTW_DESTROY_PLAN fftwf_destroy_plan +#define FFTW_CLEANUP fftwf_cleanup +#else +typedef double float_prec; +#define PF "%.18f" +#define FFTW_PLAN fftw_plan +#define FFTW_MALLOC fftw_malloc +#define FFTW_FREE fftw_free +#define FFTW_PLAN_CREATE fftw_plan_r2r_1d +#define FFTW_EXECUTE fftw_execute +#define FFTW_DESTROY_PLAN fftw_destroy_plan +#define FFTW_CLEANUP fftw_cleanup +#endif + + +enum type { + DCT_I = 1, + DCT_II = 2, + DCT_III = 3, + DCT_IV = 4, + DST_I = 5, + DST_II = 6, + DST_III = 7, + DST_IV = 8, +}; + +int gen(int type, int sz) +{ + float_prec *a, *b; + FFTW_PLAN p; + int i, tp; + + a = FFTW_MALLOC(sizeof(*a) * sz); + if (a == NULL) { + fprintf(stderr, "failure\n"); + exit(EXIT_FAILURE); + } + b = FFTW_MALLOC(sizeof(*b) * sz); + if (b == NULL) { + fprintf(stderr, "failure\n"); + exit(EXIT_FAILURE); + } + + switch(type) { + case DCT_I: + tp = FFTW_REDFT00; + break; + case DCT_II: + tp = FFTW_REDFT10; + break; + case DCT_III: + tp = FFTW_REDFT01; + break; + case DCT_IV: + tp = FFTW_REDFT11; + break; + case DST_I: + tp = FFTW_RODFT00; + break; + case DST_II: + tp = FFTW_RODFT10; + break; + case DST_III: + tp = FFTW_RODFT01; + break; + case DST_IV: + tp = FFTW_RODFT11; + break; + default: + fprintf(stderr, "unknown type\n"); + exit(EXIT_FAILURE); + } + + switch(type) { + case DCT_I: + case DCT_II: + case DCT_III: + case DCT_IV: + for(i=0; i < sz; ++i) { + a[i] = i; + } + break; + case DST_I: + case DST_II: + case DST_III: + case DST_IV: +/* TODO: what should we do for dst's?*/ + for(i=0; i < sz; ++i) { + a[i] = i; + } + break; + default: + fprintf(stderr, "unknown type\n"); + exit(EXIT_FAILURE); + } + + p = FFTW_PLAN_CREATE(sz, a, b, tp, FFTW_ESTIMATE); + FFTW_EXECUTE(p); + FFTW_DESTROY_PLAN(p); + + for(i=0; i < sz; ++i) { + printf(PF"\n", b[i]); + } + FFTW_FREE(b); + FFTW_FREE(a); + + return 0; +} + +int main(int argc, char* argv[]) +{ + int n, tp; + + if (argc < 3) { + fprintf(stderr, "missing argument: program type n\n"); + exit(EXIT_FAILURE); + } + tp = atoi(argv[1]); + n = atoi(argv[2]); + + gen(tp, n); + FFTW_CLEANUP(); + + return 0; +} diff --git a/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/fftw_double_ref.npz b/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/fftw_double_ref.npz new file mode 100644 index 0000000..cd8048d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/fftw_double_ref.npz differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/fftw_single_ref.npz b/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/fftw_single_ref.npz new file mode 100644 index 0000000..b6d481d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/fftw_single_ref.npz differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/gen_fftw_ref.py b/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/gen_fftw_ref.py new file mode 100644 index 0000000..fe2e1a1 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/gen_fftw_ref.py @@ -0,0 +1,59 @@ +from __future__ import division, print_function, absolute_import + +from subprocess import Popen, PIPE, STDOUT + +import numpy as np + +SZ = [2, 3, 4, 8, 12, 15, 16, 17, 32, 64, 128, 256, 512, 1024] + + +def gen_data(dt): + arrays = {} + + if dt == np.double: + pg = './fftw_double' + elif dt == np.float32: + pg = './fftw_single' + else: + raise ValueError("unknown: %s" % dt) + # Generate test data using FFTW for reference + for type in [1, 2, 3, 4, 5, 6, 7, 8]: + arrays[type] = {} + for sz in SZ: + a = Popen([pg, str(type), str(sz)], stdout=PIPE, stderr=STDOUT) + st = [i.strip() for i in a.stdout.readlines()] + arrays[type][sz] = np.fromstring(",".join(st), sep=',', dtype=dt) + + return arrays + + +# generate single precision data +data = gen_data(np.float32) +filename = 'fftw_single_ref' +# Save ref data into npz format +d = {'sizes': SZ} +for type in [1, 2, 3, 4]: + for sz in SZ: + d['dct_%d_%d' % (type, sz)] = data[type][sz] + +d['sizes'] = SZ +for type in [5, 6, 7, 8]: + for sz in SZ: + d['dst_%d_%d' % (type-4, sz)] = data[type][sz] +np.savez(filename, **d) + + +# generate double precision data +data = gen_data(np.float64) +filename = 'fftw_double_ref' +# Save ref data into npz format +d = {'sizes': SZ} +for type in [1, 2, 3, 4]: + for sz in SZ: + d['dct_%d_%d' % (type, sz)] = data[type][sz] + +d['sizes'] = SZ +for type in [5, 6, 7, 8]: + for sz in SZ: + d['dst_%d_%d' % (type-4, sz)] = data[type][sz] +np.savez(filename, **d) diff --git a/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/gen_fftw_ref.pyc b/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/gen_fftw_ref.pyc new file mode 100644 index 0000000..09303b2 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/gen_fftw_ref.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/gendata.m b/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/gendata.m new file mode 100644 index 0000000..6c231df --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/gendata.m @@ -0,0 +1,21 @@ +x0 = linspace(0, 10, 11); +x1 = linspace(0, 10, 15); +x2 = linspace(0, 10, 16); +x3 = linspace(0, 10, 17); + +x4 = randn(32, 1); +x5 = randn(64, 1); +x6 = randn(128, 1); +x7 = randn(256, 1); + +y0 = dct(x0); +y1 = dct(x1); +y2 = dct(x2); +y3 = dct(x3); +y4 = dct(x4); +y5 = dct(x5); +y6 = dct(x6); +y7 = dct(x7); + +save('test.mat', 'x0', 'x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7', ... + 'y0', 'y1', 'y2', 'y3', 'y4', 'y5', 'y6', 'y7'); diff --git a/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/gendata.py b/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/gendata.py new file mode 100644 index 0000000..b99fe74 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/gendata.py @@ -0,0 +1,8 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from scipy.io import loadmat + +m = loadmat('test.mat', squeeze_me=True, struct_as_record=True, + mat_dtype=True) +np.savez('test.npz', **m) diff --git a/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/gendata.pyc b/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/gendata.pyc new file mode 100644 index 0000000..d847422 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/gendata.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/test.npz b/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/test.npz new file mode 100644 index 0000000..f90294b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/test.npz differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/test_basic.py b/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/test_basic.py new file mode 100644 index 0000000..f73fab2 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/test_basic.py @@ -0,0 +1,972 @@ +# Created by Pearu Peterson, September 2002 + +from __future__ import division, print_function, absolute_import + +__usage__ = """ +Build fftpack: + python setup_fftpack.py build +Run tests if scipy is installed: + python -c 'import scipy;scipy.fftpack.test()' +Run tests if fftpack is not installed: + python tests/test_basic.py +""" + +from numpy.testing import (assert_, assert_equal, assert_array_almost_equal, + assert_array_almost_equal_nulp, assert_array_less) +import pytest +from pytest import raises as assert_raises +from scipy.fftpack import ifft, fft, fftn, ifftn, rfft, irfft, fft2 +from scipy.fftpack import _fftpack as fftpack +from scipy.fftpack.basic import _is_safe_size + +from numpy import (arange, add, array, asarray, zeros, dot, exp, pi, + swapaxes, double, cdouble) +import numpy as np +import numpy.fft +from numpy.random import rand + +# "large" composite numbers supported by FFTPACK +LARGE_COMPOSITE_SIZES = [ + 2**13, + 2**5 * 3**5, + 2**3 * 3**3 * 5**2, +] +SMALL_COMPOSITE_SIZES = [ + 2, + 2*3*5, + 2*2*3*3, +] +# prime +LARGE_PRIME_SIZES = [ + 2011 +] +SMALL_PRIME_SIZES = [ + 29 +] + + +def _assert_close_in_norm(x, y, rtol, size, rdt): + # helper function for testing + err_msg = "size: %s rdt: %s" % (size, rdt) + assert_array_less(np.linalg.norm(x - y), rtol*np.linalg.norm(x), err_msg) + + +def random(size): + return rand(*size) + + +def get_mat(n): + data = arange(n) + data = add.outer(data, data) + return data + + +def direct_dft(x): + x = asarray(x) + n = len(x) + y = zeros(n, dtype=cdouble) + w = -arange(n)*(2j*pi/n) + for i in range(n): + y[i] = dot(exp(i*w), x) + return y + + +def direct_idft(x): + x = asarray(x) + n = len(x) + y = zeros(n, dtype=cdouble) + w = arange(n)*(2j*pi/n) + for i in range(n): + y[i] = dot(exp(i*w), x)/n + return y + + +def direct_dftn(x): + x = asarray(x) + for axis in range(len(x.shape)): + x = fft(x, axis=axis) + return x + + +def direct_idftn(x): + x = asarray(x) + for axis in range(len(x.shape)): + x = ifft(x, axis=axis) + return x + + +def direct_rdft(x): + x = asarray(x) + n = len(x) + w = -arange(n)*(2j*pi/n) + r = zeros(n, dtype=double) + for i in range(n//2+1): + y = dot(exp(i*w), x) + if i: + r[2*i-1] = y.real + if 2*i < n: + r[2*i] = y.imag + else: + r[0] = y.real + return r + + +def direct_irdft(x): + x = asarray(x) + n = len(x) + x1 = zeros(n, dtype=cdouble) + for i in range(n//2+1): + if i: + if 2*i < n: + x1[i] = x[2*i-1] + 1j*x[2*i] + x1[n-i] = x[2*i-1] - 1j*x[2*i] + else: + x1[i] = x[2*i-1] + else: + x1[0] = x[0] + return direct_idft(x1).real + + +class _TestFFTBase(object): + def setup_method(self): + self.cdt = None + self.rdt = None + np.random.seed(1234) + + def test_definition(self): + x = np.array([1,2,3,4+1j,1,2,3,4+2j], dtype=self.cdt) + y = fft(x) + assert_equal(y.dtype, self.cdt) + y1 = direct_dft(x) + assert_array_almost_equal(y,y1) + x = np.array([1,2,3,4+0j,5], dtype=self.cdt) + assert_array_almost_equal(fft(x),direct_dft(x)) + + def test_n_argument_real(self): + x1 = np.array([1,2,3,4], dtype=self.rdt) + x2 = np.array([1,2,3,4], dtype=self.rdt) + y = fft([x1,x2],n=4) + assert_equal(y.dtype, self.cdt) + assert_equal(y.shape,(2,4)) + assert_array_almost_equal(y[0],direct_dft(x1)) + assert_array_almost_equal(y[1],direct_dft(x2)) + + def _test_n_argument_complex(self): + x1 = np.array([1,2,3,4+1j], dtype=self.cdt) + x2 = np.array([1,2,3,4+1j], dtype=self.cdt) + y = fft([x1,x2],n=4) + assert_equal(y.dtype, self.cdt) + assert_equal(y.shape,(2,4)) + assert_array_almost_equal(y[0],direct_dft(x1)) + assert_array_almost_equal(y[1],direct_dft(x2)) + + def test_djbfft(self): + for i in range(2,14): + n = 2**i + x = list(range(n)) + y = fftpack.zfft(x) + y2 = numpy.fft.fft(x) + assert_array_almost_equal(y,y2) + y = fftpack.zrfft(x) + assert_array_almost_equal(y,y2) + + def test_invalid_sizes(self): + assert_raises(ValueError, fft, []) + assert_raises(ValueError, fft, [[1,1],[2,2]], -5) + + def test__is_safe_size(self): + vals = [(0, True), (1, True), (2, True), (3, True), (4, True), (5, True), (6, True), (7, False), + (15, True), (16, True), (17, False), (18, True), (21, False), (25, True), (50, True), + (120, True), (210, False)] + for n, is_safe in vals: + assert_equal(_is_safe_size(n), is_safe) + + +class TestDoubleFFT(_TestFFTBase): + def setup_method(self): + self.cdt = np.cdouble + self.rdt = np.double + + +class TestSingleFFT(_TestFFTBase): + def setup_method(self): + self.cdt = np.complex64 + self.rdt = np.float32 + + @pytest.mark.xfail(run=False, reason="single-precision FFT implementation is partially disabled, until accuracy issues with large prime powers are resolved") + def test_notice(self): + pass + + +class TestFloat16FFT(object): + + def test_1_argument_real(self): + x1 = np.array([1, 2, 3, 4], dtype=np.float16) + y = fft(x1, n=4) + assert_equal(y.dtype, np.complex64) + assert_equal(y.shape, (4, )) + assert_array_almost_equal(y, direct_dft(x1.astype(np.float32))) + + def test_n_argument_real(self): + x1 = np.array([1, 2, 3, 4], dtype=np.float16) + x2 = np.array([1, 2, 3, 4], dtype=np.float16) + y = fft([x1, x2], n=4) + assert_equal(y.dtype, np.complex64) + assert_equal(y.shape, (2, 4)) + assert_array_almost_equal(y[0], direct_dft(x1.astype(np.float32))) + assert_array_almost_equal(y[1], direct_dft(x2.astype(np.float32))) + + +class _TestIFFTBase(object): + def setup_method(self): + np.random.seed(1234) + + def test_definition(self): + x = np.array([1,2,3,4+1j,1,2,3,4+2j], self.cdt) + y = ifft(x) + y1 = direct_idft(x) + assert_equal(y.dtype, self.cdt) + assert_array_almost_equal(y,y1) + + x = np.array([1,2,3,4+0j,5], self.cdt) + assert_array_almost_equal(ifft(x),direct_idft(x)) + + def test_definition_real(self): + x = np.array([1,2,3,4,1,2,3,4], self.rdt) + y = ifft(x) + assert_equal(y.dtype, self.cdt) + y1 = direct_idft(x) + assert_array_almost_equal(y,y1) + + x = np.array([1,2,3,4,5], dtype=self.rdt) + assert_equal(y.dtype, self.cdt) + assert_array_almost_equal(ifft(x),direct_idft(x)) + + def test_djbfft(self): + for i in range(2,14): + n = 2**i + x = list(range(n)) + y = fftpack.zfft(x,direction=-1) + y2 = numpy.fft.ifft(x) + assert_array_almost_equal(y,y2) + y = fftpack.zrfft(x,direction=-1) + assert_array_almost_equal(y,y2) + + def test_random_complex(self): + for size in [1,51,111,100,200,64,128,256,1024]: + x = random([size]).astype(self.cdt) + x = random([size]).astype(self.cdt) + 1j*x + y1 = ifft(fft(x)) + y2 = fft(ifft(x)) + assert_equal(y1.dtype, self.cdt) + assert_equal(y2.dtype, self.cdt) + assert_array_almost_equal(y1, x) + assert_array_almost_equal(y2, x) + + def test_random_real(self): + for size in [1,51,111,100,200,64,128,256,1024]: + x = random([size]).astype(self.rdt) + y1 = ifft(fft(x)) + y2 = fft(ifft(x)) + assert_equal(y1.dtype, self.cdt) + assert_equal(y2.dtype, self.cdt) + assert_array_almost_equal(y1, x) + assert_array_almost_equal(y2, x) + + def test_size_accuracy(self): + # Sanity check for the accuracy for prime and non-prime sized inputs + if self.rdt == np.float32: + rtol = 1e-5 + elif self.rdt == np.float64: + rtol = 1e-10 + + for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES: + np.random.seed(1234) + x = np.random.rand(size).astype(self.rdt) + y = ifft(fft(x)) + _assert_close_in_norm(x, y, rtol, size, self.rdt) + y = fft(ifft(x)) + _assert_close_in_norm(x, y, rtol, size, self.rdt) + + x = (x + 1j*np.random.rand(size)).astype(self.cdt) + y = ifft(fft(x)) + _assert_close_in_norm(x, y, rtol, size, self.rdt) + y = fft(ifft(x)) + _assert_close_in_norm(x, y, rtol, size, self.rdt) + + def test_invalid_sizes(self): + assert_raises(ValueError, ifft, []) + assert_raises(ValueError, ifft, [[1,1],[2,2]], -5) + + +class TestDoubleIFFT(_TestIFFTBase): + def setup_method(self): + self.cdt = np.cdouble + self.rdt = np.double + + +class TestSingleIFFT(_TestIFFTBase): + def setup_method(self): + self.cdt = np.complex64 + self.rdt = np.float32 + + +class _TestRFFTBase(object): + def setup_method(self): + np.random.seed(1234) + + def test_definition(self): + for t in [[1, 2, 3, 4, 1, 2, 3, 4], [1, 2, 3, 4, 1, 2, 3, 4, 5]]: + x = np.array(t, dtype=self.rdt) + y = rfft(x) + y1 = direct_rdft(x) + assert_array_almost_equal(y,y1) + assert_equal(y.dtype, self.rdt) + + def test_djbfft(self): + from numpy.fft import fft as numpy_fft + for i in range(2,14): + n = 2**i + x = list(range(n)) + y2 = numpy_fft(x) + y1 = zeros((n,),dtype=double) + y1[0] = y2[0].real + y1[-1] = y2[n//2].real + for k in range(1, n//2): + y1[2*k-1] = y2[k].real + y1[2*k] = y2[k].imag + y = fftpack.drfft(x) + assert_array_almost_equal(y,y1) + + def test_invalid_sizes(self): + assert_raises(ValueError, rfft, []) + assert_raises(ValueError, rfft, [[1,1],[2,2]], -5) + + # See gh-5790 + class MockSeries(object): + def __init__(self, data): + self.data = np.asarray(data) + + def __getattr__(self, item): + try: + return getattr(self.data, item) + except AttributeError: + raise AttributeError(("'MockSeries' object " + "has no attribute '{attr}'". + format(attr=item))) + + def test_non_ndarray_with_dtype(self): + x = np.array([1., 2., 3., 4., 5.]) + xs = _TestRFFTBase.MockSeries(x) + + expected = [1, 2, 3, 4, 5] + out = rfft(xs) + + # Data should not have been overwritten + assert_equal(x, expected) + assert_equal(xs.data, expected) + +class TestRFFTDouble(_TestRFFTBase): + def setup_method(self): + self.cdt = np.cdouble + self.rdt = np.double + + +class TestRFFTSingle(_TestRFFTBase): + def setup_method(self): + self.cdt = np.complex64 + self.rdt = np.float32 + + +class _TestIRFFTBase(object): + def setup_method(self): + np.random.seed(1234) + + def test_definition(self): + x1 = [1,2,3,4,1,2,3,4] + x1_1 = [1,2+3j,4+1j,2+3j,4,2-3j,4-1j,2-3j] + x2 = [1,2,3,4,1,2,3,4,5] + x2_1 = [1,2+3j,4+1j,2+3j,4+5j,4-5j,2-3j,4-1j,2-3j] + + def _test(x, xr): + y = irfft(np.array(x, dtype=self.rdt)) + y1 = direct_irdft(x) + assert_equal(y.dtype, self.rdt) + assert_array_almost_equal(y,y1, decimal=self.ndec) + assert_array_almost_equal(y,ifft(xr), decimal=self.ndec) + + _test(x1, x1_1) + _test(x2, x2_1) + + def test_djbfft(self): + from numpy.fft import ifft as numpy_ifft + for i in range(2,14): + n = 2**i + x = list(range(n)) + x1 = zeros((n,),dtype=cdouble) + x1[0] = x[0] + for k in range(1, n//2): + x1[k] = x[2*k-1]+1j*x[2*k] + x1[n-k] = x[2*k-1]-1j*x[2*k] + x1[n//2] = x[-1] + y1 = numpy_ifft(x1) + y = fftpack.drfft(x,direction=-1) + assert_array_almost_equal(y,y1) + + def test_random_real(self): + for size in [1,51,111,100,200,64,128,256,1024]: + x = random([size]).astype(self.rdt) + y1 = irfft(rfft(x)) + y2 = rfft(irfft(x)) + assert_equal(y1.dtype, self.rdt) + assert_equal(y2.dtype, self.rdt) + assert_array_almost_equal(y1, x, decimal=self.ndec, + err_msg="size=%d" % size) + assert_array_almost_equal(y2, x, decimal=self.ndec, + err_msg="size=%d" % size) + + def test_size_accuracy(self): + # Sanity check for the accuracy for prime and non-prime sized inputs + if self.rdt == np.float32: + rtol = 1e-5 + elif self.rdt == np.float64: + rtol = 1e-10 + + for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES: + np.random.seed(1234) + x = np.random.rand(size).astype(self.rdt) + y = irfft(rfft(x)) + _assert_close_in_norm(x, y, rtol, size, self.rdt) + y = rfft(irfft(x)) + _assert_close_in_norm(x, y, rtol, size, self.rdt) + + def test_invalid_sizes(self): + assert_raises(ValueError, irfft, []) + assert_raises(ValueError, irfft, [[1,1],[2,2]], -5) + + +# self.ndec is bogus; we should have a assert_array_approx_equal for number of +# significant digits + +class TestIRFFTDouble(_TestIRFFTBase): + def setup_method(self): + self.cdt = np.cdouble + self.rdt = np.double + self.ndec = 14 + + +class TestIRFFTSingle(_TestIRFFTBase): + def setup_method(self): + self.cdt = np.complex64 + self.rdt = np.float32 + self.ndec = 5 + + +class Testfft2(object): + def setup_method(self): + np.random.seed(1234) + + def test_regression_244(self): + """FFT returns wrong result with axes parameter.""" + # fftn (and hence fft2) used to break when both axes and shape were + # used + x = numpy.ones((4, 4, 2)) + y = fft2(x, shape=(8, 8), axes=(-3, -2)) + y_r = numpy.fft.fftn(x, s=(8, 8), axes=(-3, -2)) + assert_array_almost_equal(y, y_r) + + def test_invalid_sizes(self): + assert_raises(ValueError, fft2, [[]]) + assert_raises(ValueError, fft2, [[1, 1], [2, 2]], (4, -3)) + + +class TestFftnSingle(object): + def setup_method(self): + np.random.seed(1234) + + def test_definition(self): + x = [[1, 2, 3], + [4, 5, 6], + [7, 8, 9]] + y = fftn(np.array(x, np.float32)) + assert_(y.dtype == np.complex64, + msg="double precision output with single precision") + + y_r = np.array(fftn(x), np.complex64) + assert_array_almost_equal_nulp(y, y_r) + + @pytest.mark.parametrize('size', SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES) + def test_size_accuracy_small(self, size): + x = np.random.rand(size, size) + 1j*np.random.rand(size, size) + y1 = fftn(x.real.astype(np.float32)) + y2 = fftn(x.real.astype(np.float64)).astype(np.complex64) + + assert_equal(y1.dtype, np.complex64) + assert_array_almost_equal_nulp(y1, y2, 2000) + + @pytest.mark.parametrize('size', LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES) + def test_size_accuracy_large(self, size): + x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3) + y1 = fftn(x.real.astype(np.float32)) + y2 = fftn(x.real.astype(np.float64)).astype(np.complex64) + + assert_equal(y1.dtype, np.complex64) + assert_array_almost_equal_nulp(y1, y2, 2000) + + def test_definition_float16(self): + x = [[1, 2, 3], + [4, 5, 6], + [7, 8, 9]] + y = fftn(np.array(x, np.float16)) + assert_equal(y.dtype, np.complex64) + y_r = np.array(fftn(x), np.complex64) + assert_array_almost_equal_nulp(y, y_r) + + @pytest.mark.parametrize('size', SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES) + def test_float16_input_small(self, size): + x = np.random.rand(size, size) + 1j*np.random.rand(size, size) + y1 = fftn(x.real.astype(np.float16)) + y2 = fftn(x.real.astype(np.float64)).astype(np.complex64) + + assert_equal(y1.dtype, np.complex64) + assert_array_almost_equal_nulp(y1, y2, 5e5) + + @pytest.mark.parametrize('size', LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES) + def test_float16_input_large(self, size): + x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3) + y1 = fftn(x.real.astype(np.float16)) + y2 = fftn(x.real.astype(np.float64)).astype(np.complex64) + + assert_equal(y1.dtype, np.complex64) + assert_array_almost_equal_nulp(y1, y2, 2e6) + + +class TestFftn(object): + def setup_method(self): + np.random.seed(1234) + + def test_definition(self): + x = [[1, 2, 3], + [4, 5, 6], + [7, 8, 9]] + y = fftn(x) + assert_array_almost_equal(y, direct_dftn(x)) + + x = random((20, 26)) + assert_array_almost_equal(fftn(x), direct_dftn(x)) + + x = random((5, 4, 3, 20)) + assert_array_almost_equal(fftn(x), direct_dftn(x)) + + def test_axes_argument(self): + # plane == ji_plane, x== kji_space + plane1 = [[1, 2, 3], + [4, 5, 6], + [7, 8, 9]] + plane2 = [[10, 11, 12], + [13, 14, 15], + [16, 17, 18]] + plane3 = [[19, 20, 21], + [22, 23, 24], + [25, 26, 27]] + ki_plane1 = [[1, 2, 3], + [10, 11, 12], + [19, 20, 21]] + ki_plane2 = [[4, 5, 6], + [13, 14, 15], + [22, 23, 24]] + ki_plane3 = [[7, 8, 9], + [16, 17, 18], + [25, 26, 27]] + jk_plane1 = [[1, 10, 19], + [4, 13, 22], + [7, 16, 25]] + jk_plane2 = [[2, 11, 20], + [5, 14, 23], + [8, 17, 26]] + jk_plane3 = [[3, 12, 21], + [6, 15, 24], + [9, 18, 27]] + kj_plane1 = [[1, 4, 7], + [10, 13, 16], [19, 22, 25]] + kj_plane2 = [[2, 5, 8], + [11, 14, 17], [20, 23, 26]] + kj_plane3 = [[3, 6, 9], + [12, 15, 18], [21, 24, 27]] + ij_plane1 = [[1, 4, 7], + [2, 5, 8], + [3, 6, 9]] + ij_plane2 = [[10, 13, 16], + [11, 14, 17], + [12, 15, 18]] + ij_plane3 = [[19, 22, 25], + [20, 23, 26], + [21, 24, 27]] + ik_plane1 = [[1, 10, 19], + [2, 11, 20], + [3, 12, 21]] + ik_plane2 = [[4, 13, 22], + [5, 14, 23], + [6, 15, 24]] + ik_plane3 = [[7, 16, 25], + [8, 17, 26], + [9, 18, 27]] + ijk_space = [jk_plane1, jk_plane2, jk_plane3] + ikj_space = [kj_plane1, kj_plane2, kj_plane3] + jik_space = [ik_plane1, ik_plane2, ik_plane3] + jki_space = [ki_plane1, ki_plane2, ki_plane3] + kij_space = [ij_plane1, ij_plane2, ij_plane3] + x = array([plane1, plane2, plane3]) + + assert_array_almost_equal(fftn(x), + fftn(x, axes=(-3, -2, -1))) # kji_space + assert_array_almost_equal(fftn(x), fftn(x, axes=(0, 1, 2))) + assert_array_almost_equal(fftn(x, axes=(0, 2)), fftn(x, axes=(0, -1))) + y = fftn(x, axes=(2, 1, 0)) # ijk_space + assert_array_almost_equal(swapaxes(y, -1, -3), fftn(ijk_space)) + y = fftn(x, axes=(2, 0, 1)) # ikj_space + assert_array_almost_equal(swapaxes(swapaxes(y, -1, -3), -1, -2), + fftn(ikj_space)) + y = fftn(x, axes=(1, 2, 0)) # jik_space + assert_array_almost_equal(swapaxes(swapaxes(y, -1, -3), -3, -2), + fftn(jik_space)) + y = fftn(x, axes=(1, 0, 2)) # jki_space + assert_array_almost_equal(swapaxes(y, -2, -3), fftn(jki_space)) + y = fftn(x, axes=(0, 2, 1)) # kij_space + assert_array_almost_equal(swapaxes(y, -2, -1), fftn(kij_space)) + + y = fftn(x, axes=(-2, -1)) # ji_plane + assert_array_almost_equal(fftn(plane1), y[0]) + assert_array_almost_equal(fftn(plane2), y[1]) + assert_array_almost_equal(fftn(plane3), y[2]) + + y = fftn(x, axes=(1, 2)) # ji_plane + assert_array_almost_equal(fftn(plane1), y[0]) + assert_array_almost_equal(fftn(plane2), y[1]) + assert_array_almost_equal(fftn(plane3), y[2]) + + y = fftn(x, axes=(-3, -2)) # kj_plane + assert_array_almost_equal(fftn(x[:, :, 0]), y[:, :, 0]) + assert_array_almost_equal(fftn(x[:, :, 1]), y[:, :, 1]) + assert_array_almost_equal(fftn(x[:, :, 2]), y[:, :, 2]) + + y = fftn(x, axes=(-3, -1)) # ki_plane + assert_array_almost_equal(fftn(x[:, 0, :]), y[:, 0, :]) + assert_array_almost_equal(fftn(x[:, 1, :]), y[:, 1, :]) + assert_array_almost_equal(fftn(x[:, 2, :]), y[:, 2, :]) + + y = fftn(x, axes=(-1, -2)) # ij_plane + assert_array_almost_equal(fftn(ij_plane1), swapaxes(y[0], -2, -1)) + assert_array_almost_equal(fftn(ij_plane2), swapaxes(y[1], -2, -1)) + assert_array_almost_equal(fftn(ij_plane3), swapaxes(y[2], -2, -1)) + + y = fftn(x, axes=(-1, -3)) # ik_plane + assert_array_almost_equal(fftn(ik_plane1), + swapaxes(y[:, 0, :], -1, -2)) + assert_array_almost_equal(fftn(ik_plane2), + swapaxes(y[:, 1, :], -1, -2)) + assert_array_almost_equal(fftn(ik_plane3), + swapaxes(y[:, 2, :], -1, -2)) + + y = fftn(x, axes=(-2, -3)) # jk_plane + assert_array_almost_equal(fftn(jk_plane1), + swapaxes(y[:, :, 0], -1, -2)) + assert_array_almost_equal(fftn(jk_plane2), + swapaxes(y[:, :, 1], -1, -2)) + assert_array_almost_equal(fftn(jk_plane3), + swapaxes(y[:, :, 2], -1, -2)) + + y = fftn(x, axes=(-1,)) # i_line + for i in range(3): + for j in range(3): + assert_array_almost_equal(fft(x[i, j, :]), y[i, j, :]) + y = fftn(x, axes=(-2,)) # j_line + for i in range(3): + for j in range(3): + assert_array_almost_equal(fft(x[i, :, j]), y[i, :, j]) + y = fftn(x, axes=(0,)) # k_line + for i in range(3): + for j in range(3): + assert_array_almost_equal(fft(x[:, i, j]), y[:, i, j]) + + y = fftn(x, axes=()) # point + assert_array_almost_equal(y, x) + + def test_shape_argument(self): + small_x = [[1, 2, 3], + [4, 5, 6]] + large_x1 = [[1, 2, 3, 0], + [4, 5, 6, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]] + + y = fftn(small_x, shape=(4, 4)) + assert_array_almost_equal(y, fftn(large_x1)) + + y = fftn(small_x, shape=(3, 4)) + assert_array_almost_equal(y, fftn(large_x1[:-1])) + + def test_shape_axes_argument(self): + small_x = [[1, 2, 3], + [4, 5, 6], + [7, 8, 9]] + large_x1 = array([[1, 2, 3, 0], + [4, 5, 6, 0], + [7, 8, 9, 0], + [0, 0, 0, 0]]) + y = fftn(small_x, shape=(4, 4), axes=(-2, -1)) + assert_array_almost_equal(y, fftn(large_x1)) + y = fftn(small_x, shape=(4, 4), axes=(-1, -2)) + + assert_array_almost_equal(y, swapaxes( + fftn(swapaxes(large_x1, -1, -2)), -1, -2)) + + def test_shape_axes_argument2(self): + # Change shape of the last axis + x = numpy.random.random((10, 5, 3, 7)) + y = fftn(x, axes=(-1,), shape=(8,)) + assert_array_almost_equal(y, fft(x, axis=-1, n=8)) + + # Change shape of an arbitrary axis which is not the last one + x = numpy.random.random((10, 5, 3, 7)) + y = fftn(x, axes=(-2,), shape=(8,)) + assert_array_almost_equal(y, fft(x, axis=-2, n=8)) + + # Change shape of axes: cf #244, where shape and axes were mixed up + x = numpy.random.random((4, 4, 2)) + y = fftn(x, axes=(-3, -2), shape=(8, 8)) + assert_array_almost_equal(y, + numpy.fft.fftn(x, axes=(-3, -2), s=(8, 8))) + + def test_shape_argument_more(self): + x = zeros((4, 4, 2)) + with assert_raises(ValueError, + match="when given, axes and shape arguments" + " have to be of the same length"): + fftn(x, shape=(8, 8, 2, 1)) + + def test_invalid_sizes(self): + with assert_raises(ValueError, + match="invalid number of data points" + r" \(\[1 0\]\) specified"): + fftn([[]]) + + with assert_raises(ValueError, + match="invalid number of data points" + r" \(\[ 4 -3\]\) specified"): + fftn([[1, 1], [2, 2]], (4, -3)) + + +class TestIfftn(object): + dtype = None + cdtype = None + + def setup_method(self): + np.random.seed(1234) + + @pytest.mark.parametrize('dtype,cdtype,maxnlp', + [(np.float64, np.complex128, 2000), + (np.float32, np.complex64, 3500)]) + def test_definition(self, dtype, cdtype, maxnlp): + x = np.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]], dtype=dtype) + y = ifftn(x) + assert_equal(y.dtype, cdtype) + assert_array_almost_equal_nulp(y, direct_idftn(x), maxnlp) + + x = random((20, 26)) + assert_array_almost_equal_nulp(ifftn(x), direct_idftn(x), maxnlp) + + x = random((5, 4, 3, 20)) + assert_array_almost_equal_nulp(ifftn(x), direct_idftn(x), maxnlp) + + @pytest.mark.parametrize('maxnlp', [2000, 3500]) + @pytest.mark.parametrize('size', [1, 2, 51, 32, 64, 92]) + def test_random_complex(self, maxnlp, size): + x = random([size, size]) + 1j*random([size, size]) + assert_array_almost_equal_nulp(ifftn(fftn(x)), x, maxnlp) + assert_array_almost_equal_nulp(fftn(ifftn(x)), x, maxnlp) + + def test_invalid_sizes(self): + with assert_raises(ValueError, + match="invalid number of data points" + r" \(\[1 0\]\) specified"): + ifftn([[]]) + + with assert_raises(ValueError, + match="invalid number of data points" + r" \(\[ 4 -3\]\) specified"): + ifftn([[1, 1], [2, 2]], (4, -3)) + + +class TestLongDoubleFailure(object): + def setup_method(self): + np.random.seed(1234) + + def test_complex(self): + if np.dtype(np.longcomplex).itemsize == np.dtype(complex).itemsize: + # longdouble == double; so fft is supported + return + + x = np.random.randn(10).astype(np.longdouble) + \ + 1j * np.random.randn(10).astype(np.longdouble) + + for f in [fft, ifft]: + try: + f(x) + raise AssertionError("Type {0} not supported but does not fail" % + np.longcomplex) + except ValueError: + pass + + def test_real(self): + if np.dtype(np.longdouble).itemsize == np.dtype(np.double).itemsize: + # longdouble == double; so fft is supported + return + + x = np.random.randn(10).astype(np.longcomplex) + + for f in [fft, ifft]: + try: + f(x) + raise AssertionError("Type %r not supported but does not fail" % + np.longcomplex) + except ValueError: + pass + + +class FakeArray(object): + def __init__(self, data): + self._data = data + self.__array_interface__ = data.__array_interface__ + + +class FakeArray2(object): + def __init__(self, data): + self._data = data + + def __array__(self): + return self._data + + +class TestOverwrite(object): + """Check input overwrite behavior of the FFT functions.""" + + real_dtypes = [np.float32, np.float64] + dtypes = real_dtypes + [np.complex64, np.complex128] + fftsizes = [8, 16, 32] + + def _check(self, x, routine, fftsize, axis, overwrite_x, should_overwrite): + x2 = x.copy() + for fake in [lambda x: x, FakeArray, FakeArray2]: + routine(fake(x2), fftsize, axis, overwrite_x=overwrite_x) + + sig = "%s(%s%r, %r, axis=%r, overwrite_x=%r)" % ( + routine.__name__, x.dtype, x.shape, fftsize, axis, overwrite_x) + if not should_overwrite: + assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig) + + def _check_1d(self, routine, dtype, shape, axis, overwritable_dtypes, + fftsize, overwrite_x): + np.random.seed(1234) + if np.issubdtype(dtype, np.complexfloating): + data = np.random.randn(*shape) + 1j*np.random.randn(*shape) + else: + data = np.random.randn(*shape) + data = data.astype(dtype) + + should_overwrite = (overwrite_x + and dtype in overwritable_dtypes + and fftsize <= shape[axis] + and (len(shape) == 1 or + (axis % len(shape) == len(shape)-1 + and fftsize == shape[axis]))) + self._check(data, routine, fftsize, axis, + overwrite_x=overwrite_x, + should_overwrite=should_overwrite) + + @pytest.mark.parametrize('dtype', dtypes) + @pytest.mark.parametrize('fftsize', fftsizes) + @pytest.mark.parametrize('overwrite_x', [True, False]) + @pytest.mark.parametrize('shape,axes', [((16,), -1), + ((16, 2), 0), + ((2, 16), 1)]) + def test_fft_ifft(self, dtype, fftsize, overwrite_x, shape, axes): + overwritable = (np.complex128, np.complex64) + self._check_1d(fft, dtype, shape, axes, overwritable, + fftsize, overwrite_x) + self._check_1d(ifft, dtype, shape, axes, overwritable, + fftsize, overwrite_x) + + @pytest.mark.parametrize('dtype', real_dtypes) + @pytest.mark.parametrize('fftsize', fftsizes) + @pytest.mark.parametrize('overwrite_x', [True, False]) + @pytest.mark.parametrize('shape,axes', [((16,), -1), + ((16, 2), 0), + ((2, 16), 1)]) + def test_rfft_irfft(self, dtype, fftsize, overwrite_x, shape, axes): + overwritable = self.real_dtypes + self._check_1d(irfft, dtype, shape, axes, overwritable, + fftsize, overwrite_x) + self._check_1d(rfft, dtype, shape, axes, overwritable, + fftsize, overwrite_x) + + def _check_nd_one(self, routine, dtype, shape, axes, overwritable_dtypes, + overwrite_x): + np.random.seed(1234) + if np.issubdtype(dtype, np.complexfloating): + data = np.random.randn(*shape) + 1j*np.random.randn(*shape) + else: + data = np.random.randn(*shape) + data = data.astype(dtype) + + def fftshape_iter(shp): + if len(shp) <= 0: + yield () + else: + for j in (shp[0]//2, shp[0], shp[0]*2): + for rest in fftshape_iter(shp[1:]): + yield (j,) + rest + + if axes is None: + part_shape = shape + else: + part_shape = tuple(np.take(shape, axes)) + + for fftshape in fftshape_iter(part_shape): + should_overwrite = (overwrite_x + and data.ndim == 1 + and np.all([x < y for x, y in zip(fftshape, + part_shape)]) + and dtype in overwritable_dtypes) + self._check(data, routine, fftshape, axes, + overwrite_x=overwrite_x, + should_overwrite=should_overwrite) + if data.ndim > 1: + # check fortran order: it never overwrites + self._check(data.T, routine, fftshape, axes, + overwrite_x=overwrite_x, + should_overwrite=False) + + @pytest.mark.parametrize('dtype', dtypes) + @pytest.mark.parametrize('overwrite_x', [True, False]) + @pytest.mark.parametrize('shape,axes', [((16,), None), + ((16,), (0,)), + ((16, 2), (0,)), + ((2, 16), (1,)), + ((8, 16), None), + ((8, 16), (0, 1)), + ((8, 16, 2), (0, 1)), + ((8, 16, 2), (1, 2)), + ((8, 16, 2), (0,)), + ((8, 16, 2), (1,)), + ((8, 16, 2), (2,)), + ((8, 16, 2), None), + ((8, 16, 2), (0, 1, 2))]) + def test_fftn_ifftn(self, dtype, overwrite_x, shape, axes): + overwritable = (np.complex128, np.complex64) + self._check_nd_one(fftn, dtype, shape, axes, overwritable, + overwrite_x) + self._check_nd_one(ifftn, dtype, shape, axes, overwritable, + overwrite_x) diff --git a/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/test_basic.pyc b/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/test_basic.pyc new file mode 100644 index 0000000..94ad931 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/test_basic.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/test_helper.py b/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/test_helper.py new file mode 100644 index 0000000..0ae5c35 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/test_helper.py @@ -0,0 +1,416 @@ +# Created by Pearu Peterson, September 2002 + +from __future__ import division, print_function, absolute_import + +__usage__ = """ +Build fftpack: + python setup_fftpack.py build +Run tests if scipy is installed: + python -c 'import scipy;scipy.fftpack.test(<level>)' +Run tests if fftpack is not installed: + python tests/test_helper.py [<level>] +""" + +from pytest import raises as assert_raises +from numpy.testing import assert_array_almost_equal, assert_equal, assert_ +from scipy.fftpack import fftshift,ifftshift,fftfreq,rfftfreq +from scipy.fftpack.helper import (next_fast_len, + _init_nd_shape_and_axes, + _init_nd_shape_and_axes_sorted) + +from numpy import pi, random +import numpy as np + +class TestFFTShift(object): + + def test_definition(self): + x = [0,1,2,3,4,-4,-3,-2,-1] + y = [-4,-3,-2,-1,0,1,2,3,4] + assert_array_almost_equal(fftshift(x),y) + assert_array_almost_equal(ifftshift(y),x) + x = [0,1,2,3,4,-5,-4,-3,-2,-1] + y = [-5,-4,-3,-2,-1,0,1,2,3,4] + assert_array_almost_equal(fftshift(x),y) + assert_array_almost_equal(ifftshift(y),x) + + def test_inverse(self): + for n in [1,4,9,100,211]: + x = random.random((n,)) + assert_array_almost_equal(ifftshift(fftshift(x)),x) + + +class TestFFTFreq(object): + + def test_definition(self): + x = [0,1,2,3,4,-4,-3,-2,-1] + assert_array_almost_equal(9*fftfreq(9),x) + assert_array_almost_equal(9*pi*fftfreq(9,pi),x) + x = [0,1,2,3,4,-5,-4,-3,-2,-1] + assert_array_almost_equal(10*fftfreq(10),x) + assert_array_almost_equal(10*pi*fftfreq(10,pi),x) + + +class TestRFFTFreq(object): + + def test_definition(self): + x = [0,1,1,2,2,3,3,4,4] + assert_array_almost_equal(9*rfftfreq(9),x) + assert_array_almost_equal(9*pi*rfftfreq(9,pi),x) + x = [0,1,1,2,2,3,3,4,4,5] + assert_array_almost_equal(10*rfftfreq(10),x) + assert_array_almost_equal(10*pi*rfftfreq(10,pi),x) + + +class TestNextOptLen(object): + + def test_next_opt_len(self): + random.seed(1234) + + def nums(): + for j in range(1, 1000): + yield j + yield 2**5 * 3**5 * 4**5 + 1 + + for n in nums(): + m = next_fast_len(n) + msg = "n=%d, m=%d" % (n, m) + + assert_(m >= n, msg) + + # check regularity + k = m + for d in [2, 3, 5]: + while True: + a, b = divmod(k, d) + if b == 0: + k = a + else: + break + assert_equal(k, 1, err_msg=msg) + + def test_np_integers(self): + ITYPES = [np.int16, np.int32, np.int64, np.uint16, np.uint32, np.uint64] + for ityp in ITYPES: + x = ityp(12345) + testN = next_fast_len(x) + assert_equal(testN, next_fast_len(int(x))) + + def test_next_opt_len_strict(self): + hams = { + 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 8, 8: 8, 14: 15, 15: 15, + 16: 16, 17: 18, 1021: 1024, 1536: 1536, 51200000: 51200000, + 510183360: 510183360, 510183360 + 1: 512000000, + 511000000: 512000000, + 854296875: 854296875, 854296875 + 1: 859963392, + 196608000000: 196608000000, 196608000000 + 1: 196830000000, + 8789062500000: 8789062500000, 8789062500000 + 1: 8796093022208, + 206391214080000: 206391214080000, + 206391214080000 + 1: 206624260800000, + 470184984576000: 470184984576000, + 470184984576000 + 1: 470715894135000, + 7222041363087360: 7222041363087360, + 7222041363087360 + 1: 7230196133913600, + # power of 5 5**23 + 11920928955078125: 11920928955078125, + 11920928955078125 - 1: 11920928955078125, + # power of 3 3**34 + 16677181699666569: 16677181699666569, + 16677181699666569 - 1: 16677181699666569, + # power of 2 2**54 + 18014398509481984: 18014398509481984, + 18014398509481984 - 1: 18014398509481984, + # above this, int(ceil(n)) == int(ceil(n+1)) + 19200000000000000: 19200000000000000, + 19200000000000000 + 1: 19221679687500000, + 288230376151711744: 288230376151711744, + 288230376151711744 + 1: 288325195312500000, + 288325195312500000 - 1: 288325195312500000, + 288325195312500000: 288325195312500000, + 288325195312500000 + 1: 288555831593533440, + # power of 3 3**83 + 3990838394187339929534246675572349035227 - 1: + 3990838394187339929534246675572349035227, + 3990838394187339929534246675572349035227: + 3990838394187339929534246675572349035227, + # power of 2 2**135 + 43556142965880123323311949751266331066368 - 1: + 43556142965880123323311949751266331066368, + 43556142965880123323311949751266331066368: + 43556142965880123323311949751266331066368, + # power of 5 5**57 + 6938893903907228377647697925567626953125 - 1: + 6938893903907228377647697925567626953125, + 6938893903907228377647697925567626953125: + 6938893903907228377647697925567626953125, + # http://www.drdobbs.com/228700538 + # 2**96 * 3**1 * 5**13 + 290142196707511001929482240000000000000 - 1: + 290142196707511001929482240000000000000, + 290142196707511001929482240000000000000: + 290142196707511001929482240000000000000, + 290142196707511001929482240000000000000 + 1: + 290237644800000000000000000000000000000, + # 2**36 * 3**69 * 5**7 + 4479571262811807241115438439905203543080960000000 - 1: + 4479571262811807241115438439905203543080960000000, + 4479571262811807241115438439905203543080960000000: + 4479571262811807241115438439905203543080960000000, + 4479571262811807241115438439905203543080960000000 + 1: + 4480327901140333639941336854183943340032000000000, + # 2**37 * 3**44 * 5**42 + 30774090693237851027531250000000000000000000000000000000000000 - 1: + 30774090693237851027531250000000000000000000000000000000000000, + 30774090693237851027531250000000000000000000000000000000000000: + 30774090693237851027531250000000000000000000000000000000000000, + 30774090693237851027531250000000000000000000000000000000000000 + 1: + 30778180617309082445871527002041377406962596539492679680000000, + } + for x, y in hams.items(): + assert_equal(next_fast_len(x), y) + + +class Test_init_nd_shape_and_axes(object): + + def test_py_0d_defaults(self): + x = 4 + shape = None + axes = None + + shape_expected = np.array([]) + axes_expected = np.array([]) + + shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes) + + assert_equal(shape_res, shape_expected) + assert_equal(axes_res, axes_expected) + + shape_res, axes_res = _init_nd_shape_and_axes_sorted(x, shape, axes) + + assert_equal(shape_res, shape_expected) + assert_equal(axes_res, axes_expected) + + def test_np_0d_defaults(self): + x = np.array(7.) + shape = None + axes = None + + shape_expected = np.array([]) + axes_expected = np.array([]) + + shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes) + + assert_equal(shape_res, shape_expected) + assert_equal(axes_res, axes_expected) + + shape_res, axes_res = _init_nd_shape_and_axes_sorted(x, shape, axes) + + assert_equal(shape_res, shape_expected) + assert_equal(axes_res, axes_expected) + + def test_py_1d_defaults(self): + x = [1, 2, 3] + shape = None + axes = None + + shape_expected = np.array([3]) + axes_expected = np.array([0]) + + shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes) + + assert_equal(shape_res, shape_expected) + assert_equal(axes_res, axes_expected) + + shape_res, axes_res = _init_nd_shape_and_axes_sorted(x, shape, axes) + + assert_equal(shape_res, shape_expected) + assert_equal(axes_res, axes_expected) + + def test_np_1d_defaults(self): + x = np.arange(0, 1, .1) + shape = None + axes = None + + shape_expected = np.array([10]) + axes_expected = np.array([0]) + + shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes) + + assert_equal(shape_res, shape_expected) + assert_equal(axes_res, axes_expected) + + shape_res, axes_res = _init_nd_shape_and_axes_sorted(x, shape, axes) + + assert_equal(shape_res, shape_expected) + assert_equal(axes_res, axes_expected) + + def test_py_2d_defaults(self): + x = [[1, 2, 3, 4], + [5, 6, 7, 8]] + shape = None + axes = None + + shape_expected = np.array([2, 4]) + axes_expected = np.array([0, 1]) + + shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes) + + assert_equal(shape_res, shape_expected) + assert_equal(axes_res, axes_expected) + + shape_res, axes_res = _init_nd_shape_and_axes_sorted(x, shape, axes) + + assert_equal(shape_res, shape_expected) + assert_equal(axes_res, axes_expected) + + def test_np_2d_defaults(self): + x = np.arange(0, 1, .1).reshape(5, 2) + shape = None + axes = None + + shape_expected = np.array([5, 2]) + axes_expected = np.array([0, 1]) + + shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes) + + assert_equal(shape_res, shape_expected) + assert_equal(axes_res, axes_expected) + + shape_res, axes_res = _init_nd_shape_and_axes_sorted(x, shape, axes) + + assert_equal(shape_res, shape_expected) + assert_equal(axes_res, axes_expected) + + def test_np_5d_defaults(self): + x = np.zeros([6, 2, 5, 3, 4]) + shape = None + axes = None + + shape_expected = np.array([6, 2, 5, 3, 4]) + axes_expected = np.array([0, 1, 2, 3, 4]) + + shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes) + + assert_equal(shape_res, shape_expected) + assert_equal(axes_res, axes_expected) + + shape_res, axes_res = _init_nd_shape_and_axes_sorted(x, shape, axes) + + assert_equal(shape_res, shape_expected) + assert_equal(axes_res, axes_expected) + + def test_np_5d_set_shape(self): + x = np.zeros([6, 2, 5, 3, 4]) + shape = [10, -1, -1, 1, 4] + axes = None + + shape_expected = np.array([10, 2, 5, 1, 4]) + axes_expected = np.array([0, 1, 2, 3, 4]) + + shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes) + + assert_equal(shape_res, shape_expected) + assert_equal(axes_res, axes_expected) + + shape_res, axes_res = _init_nd_shape_and_axes_sorted(x, shape, axes) + + assert_equal(shape_res, shape_expected) + assert_equal(axes_res, axes_expected) + + def test_np_5d_set_axes(self): + x = np.zeros([6, 2, 5, 3, 4]) + shape = None + axes = [4, 1, 2] + + shape_expected = np.array([4, 2, 5]) + axes_expected = np.array([4, 1, 2]) + + shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes) + + assert_equal(shape_res, shape_expected) + assert_equal(axes_res, axes_expected) + + def test_np_5d_set_axes_sorted(self): + x = np.zeros([6, 2, 5, 3, 4]) + shape = None + axes = [4, 1, 2] + + shape_expected = np.array([2, 5, 4]) + axes_expected = np.array([1, 2, 4]) + + shape_res, axes_res = _init_nd_shape_and_axes_sorted(x, shape, axes) + + assert_equal(shape_res, shape_expected) + assert_equal(axes_res, axes_expected) + + def test_np_5d_set_shape_axes(self): + x = np.zeros([6, 2, 5, 3, 4]) + shape = [10, -1, 2] + axes = [1, 0, 3] + + shape_expected = np.array([10, 6, 2]) + axes_expected = np.array([1, 0, 3]) + + shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes) + + assert_equal(shape_res, shape_expected) + assert_equal(axes_res, axes_expected) + + def test_np_5d_set_shape_axes_sorted(self): + x = np.zeros([6, 2, 5, 3, 4]) + shape = [10, -1, 2] + axes = [1, 0, 3] + + shape_expected = np.array([6, 10, 2]) + axes_expected = np.array([0, 1, 3]) + + shape_res, axes_res = _init_nd_shape_and_axes_sorted(x, shape, axes) + + assert_equal(shape_res, shape_expected) + assert_equal(axes_res, axes_expected) + + def test_errors(self): + with assert_raises(ValueError, + match="when given, axes values must be a scalar" + " or vector"): + _init_nd_shape_and_axes([0], shape=None, axes=[[1, 2], [3, 4]]) + + with assert_raises(ValueError, + match="when given, axes values must be integers"): + _init_nd_shape_and_axes([0], shape=None, axes=[1., 2., 3., 4.]) + + with assert_raises(ValueError, + match="axes exceeds dimensionality of input"): + _init_nd_shape_and_axes([0], shape=None, axes=[1]) + + with assert_raises(ValueError, + match="axes exceeds dimensionality of input"): + _init_nd_shape_and_axes([0], shape=None, axes=[-2]) + + with assert_raises(ValueError, + match="all axes must be unique"): + _init_nd_shape_and_axes([0], shape=None, axes=[0, 0]) + + with assert_raises(ValueError, + match="when given, shape values must be a scalar " + "or vector"): + _init_nd_shape_and_axes([0], shape=[[1, 2], [3, 4]], axes=None) + + with assert_raises(ValueError, + match="when given, shape values must be integers"): + _init_nd_shape_and_axes([0], shape=[1., 2., 3., 4.], axes=None) + + with assert_raises(ValueError, + match="when given, axes and shape arguments" + " have to be of the same length"): + _init_nd_shape_and_axes(np.zeros([1, 1, 1, 1]), + shape=[1, 2, 3], axes=[1]) + + with assert_raises(ValueError, + match="invalid number of data points" + r" \(\[0\]\) specified"): + _init_nd_shape_and_axes([0], shape=[0], axes=None) + + with assert_raises(ValueError, + match="invalid number of data points" + r" \(\[-2\]\) specified"): + _init_nd_shape_and_axes([0], shape=-2, axes=None) diff --git a/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/test_helper.pyc b/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/test_helper.pyc new file mode 100644 index 0000000..5be36dd Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/test_helper.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/test_import.py b/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/test_import.py new file mode 100644 index 0000000..9c05445 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/test_import.py @@ -0,0 +1,33 @@ +"""Test possibility of patching fftpack with pyfftw. + +No module source outside of scipy.fftpack should contain an import of +the form `from scipy.fftpack import ...`, so that a simple replacement +of scipy.fftpack by the corresponding fftw interface completely swaps +the two FFT implementations. + +Because this simply inspects source files, we only need to run the test +on one version of Python. +""" + + +import sys +if sys.version_info >= (3, 4): + from pathlib import Path + import re + import tokenize + from numpy.testing import assert_ + import scipy + + class TestFFTPackImport(object): + def test_fftpack_import(self): + base = Path(scipy.__file__).parent + regexp = r"\s*from.+\.fftpack import .*\n" + for path in base.rglob("*.py"): + if base / "fftpack" in path.parents: + continue + # use tokenize to auto-detect encoding on systems where no + # default encoding is defined (e.g. LANG='C') + with tokenize.open(str(path)) as file: + assert_(all(not re.fullmatch(regexp, line) + for line in file), + "{0} contains an import from fftpack".format(path)) diff --git a/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/test_import.pyc b/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/test_import.pyc new file mode 100644 index 0000000..20ee51e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/test_import.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/test_pseudo_diffs.py b/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/test_pseudo_diffs.py new file mode 100644 index 0000000..d293ede --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/test_pseudo_diffs.py @@ -0,0 +1,382 @@ +# Created by Pearu Peterson, September 2002 + +from __future__ import division, print_function, absolute_import + +__usage__ = """ +Build fftpack: + python setup_fftpack.py build +Run tests if scipy is installed: + python -c 'import scipy;scipy.fftpack.test(<level>)' +Run tests if fftpack is not installed: + python tests/test_pseudo_diffs.py [<level>] +""" + +from numpy.testing import (assert_equal, assert_almost_equal, + assert_array_almost_equal) +from scipy.fftpack import (diff, fft, ifft, tilbert, itilbert, hilbert, + ihilbert, shift, fftfreq, cs_diff, sc_diff, + ss_diff, cc_diff) + +import numpy as np +from numpy import arange, sin, cos, pi, exp, tanh, sum, sign +from numpy.random import random + + +def direct_diff(x,k=1,period=None): + fx = fft(x) + n = len(fx) + if period is None: + period = 2*pi + w = fftfreq(n)*2j*pi/period*n + if k < 0: + w = 1 / w**k + w[0] = 0.0 + else: + w = w**k + if n > 2000: + w[250:n-250] = 0.0 + return ifft(w*fx).real + + +def direct_tilbert(x,h=1,period=None): + fx = fft(x) + n = len(fx) + if period is None: + period = 2*pi + w = fftfreq(n)*h*2*pi/period*n + w[0] = 1 + w = 1j/tanh(w) + w[0] = 0j + return ifft(w*fx) + + +def direct_itilbert(x,h=1,period=None): + fx = fft(x) + n = len(fx) + if period is None: + period = 2*pi + w = fftfreq(n)*h*2*pi/period*n + w = -1j*tanh(w) + return ifft(w*fx) + + +def direct_hilbert(x): + fx = fft(x) + n = len(fx) + w = fftfreq(n)*n + w = 1j*sign(w) + return ifft(w*fx) + + +def direct_ihilbert(x): + return -direct_hilbert(x) + + +def direct_shift(x,a,period=None): + n = len(x) + if period is None: + k = fftfreq(n)*1j*n + else: + k = fftfreq(n)*2j*pi/period*n + return ifft(fft(x)*exp(k*a)).real + + +class TestDiff(object): + + def test_definition(self): + for n in [16,17,64,127,32]: + x = arange(n)*2*pi/n + assert_array_almost_equal(diff(sin(x)),direct_diff(sin(x))) + assert_array_almost_equal(diff(sin(x),2),direct_diff(sin(x),2)) + assert_array_almost_equal(diff(sin(x),3),direct_diff(sin(x),3)) + assert_array_almost_equal(diff(sin(x),4),direct_diff(sin(x),4)) + assert_array_almost_equal(diff(sin(x),5),direct_diff(sin(x),5)) + assert_array_almost_equal(diff(sin(2*x),3),direct_diff(sin(2*x),3)) + assert_array_almost_equal(diff(sin(2*x),4),direct_diff(sin(2*x),4)) + assert_array_almost_equal(diff(cos(x)),direct_diff(cos(x))) + assert_array_almost_equal(diff(cos(x),2),direct_diff(cos(x),2)) + assert_array_almost_equal(diff(cos(x),3),direct_diff(cos(x),3)) + assert_array_almost_equal(diff(cos(x),4),direct_diff(cos(x),4)) + assert_array_almost_equal(diff(cos(2*x)),direct_diff(cos(2*x))) + assert_array_almost_equal(diff(sin(x*n/8)),direct_diff(sin(x*n/8))) + assert_array_almost_equal(diff(cos(x*n/8)),direct_diff(cos(x*n/8))) + for k in range(5): + assert_array_almost_equal(diff(sin(4*x),k),direct_diff(sin(4*x),k)) + assert_array_almost_equal(diff(cos(4*x),k),direct_diff(cos(4*x),k)) + + def test_period(self): + for n in [17,64]: + x = arange(n)/float(n) + assert_array_almost_equal(diff(sin(2*pi*x),period=1), + 2*pi*cos(2*pi*x)) + assert_array_almost_equal(diff(sin(2*pi*x),3,period=1), + -(2*pi)**3*cos(2*pi*x)) + + def test_sin(self): + for n in [32,64,77]: + x = arange(n)*2*pi/n + assert_array_almost_equal(diff(sin(x)),cos(x)) + assert_array_almost_equal(diff(cos(x)),-sin(x)) + assert_array_almost_equal(diff(sin(x),2),-sin(x)) + assert_array_almost_equal(diff(sin(x),4),sin(x)) + assert_array_almost_equal(diff(sin(4*x)),4*cos(4*x)) + assert_array_almost_equal(diff(sin(sin(x))),cos(x)*cos(sin(x))) + + def test_expr(self): + for n in [64,77,100,128,256,512,1024,2048,4096,8192][:5]: + x = arange(n)*2*pi/n + f = sin(x)*cos(4*x)+exp(sin(3*x)) + df = cos(x)*cos(4*x)-4*sin(x)*sin(4*x)+3*cos(3*x)*exp(sin(3*x)) + ddf = -17*sin(x)*cos(4*x)-8*cos(x)*sin(4*x)\ + - 9*sin(3*x)*exp(sin(3*x))+9*cos(3*x)**2*exp(sin(3*x)) + d1 = diff(f) + assert_array_almost_equal(d1,df) + assert_array_almost_equal(diff(df),ddf) + assert_array_almost_equal(diff(f,2),ddf) + assert_array_almost_equal(diff(ddf,-1),df) + + def test_expr_large(self): + for n in [2048,4096]: + x = arange(n)*2*pi/n + f = sin(x)*cos(4*x)+exp(sin(3*x)) + df = cos(x)*cos(4*x)-4*sin(x)*sin(4*x)+3*cos(3*x)*exp(sin(3*x)) + ddf = -17*sin(x)*cos(4*x)-8*cos(x)*sin(4*x)\ + - 9*sin(3*x)*exp(sin(3*x))+9*cos(3*x)**2*exp(sin(3*x)) + assert_array_almost_equal(diff(f),df) + assert_array_almost_equal(diff(df),ddf) + assert_array_almost_equal(diff(ddf,-1),df) + assert_array_almost_equal(diff(f,2),ddf) + + def test_int(self): + n = 64 + x = arange(n)*2*pi/n + assert_array_almost_equal(diff(sin(x),-1),-cos(x)) + assert_array_almost_equal(diff(sin(x),-2),-sin(x)) + assert_array_almost_equal(diff(sin(x),-4),sin(x)) + assert_array_almost_equal(diff(2*cos(2*x),-1),sin(2*x)) + + def test_random_even(self): + for k in [0,2,4,6]: + for n in [60,32,64,56,55]: + f = random((n,)) + af = sum(f,axis=0)/n + f = f-af + # zeroing Nyquist mode: + f = diff(diff(f,1),-1) + assert_almost_equal(sum(f,axis=0),0.0) + assert_array_almost_equal(diff(diff(f,k),-k),f) + assert_array_almost_equal(diff(diff(f,-k),k),f) + + def test_random_odd(self): + for k in [0,1,2,3,4,5,6]: + for n in [33,65,55]: + f = random((n,)) + af = sum(f,axis=0)/n + f = f-af + assert_almost_equal(sum(f,axis=0),0.0) + assert_array_almost_equal(diff(diff(f,k),-k),f) + assert_array_almost_equal(diff(diff(f,-k),k),f) + + def test_zero_nyquist(self): + for k in [0,1,2,3,4,5,6]: + for n in [32,33,64,56,55]: + f = random((n,)) + af = sum(f,axis=0)/n + f = f-af + # zeroing Nyquist mode: + f = diff(diff(f,1),-1) + assert_almost_equal(sum(f,axis=0),0.0) + assert_array_almost_equal(diff(diff(f,k),-k),f) + assert_array_almost_equal(diff(diff(f,-k),k),f) + + +class TestTilbert(object): + + def test_definition(self): + for h in [0.1,0.5,1,5.5,10]: + for n in [16,17,64,127]: + x = arange(n)*2*pi/n + y = tilbert(sin(x),h) + y1 = direct_tilbert(sin(x),h) + assert_array_almost_equal(y,y1) + assert_array_almost_equal(tilbert(sin(x),h), + direct_tilbert(sin(x),h)) + assert_array_almost_equal(tilbert(sin(2*x),h), + direct_tilbert(sin(2*x),h)) + + def test_random_even(self): + for h in [0.1,0.5,1,5.5,10]: + for n in [32,64,56]: + f = random((n,)) + af = sum(f,axis=0)/n + f = f-af + assert_almost_equal(sum(f,axis=0),0.0) + assert_array_almost_equal(direct_tilbert(direct_itilbert(f,h),h),f) + + def test_random_odd(self): + for h in [0.1,0.5,1,5.5,10]: + for n in [33,65,55]: + f = random((n,)) + af = sum(f,axis=0)/n + f = f-af + assert_almost_equal(sum(f,axis=0),0.0) + assert_array_almost_equal(itilbert(tilbert(f,h),h),f) + assert_array_almost_equal(tilbert(itilbert(f,h),h),f) + + +class TestITilbert(object): + + def test_definition(self): + for h in [0.1,0.5,1,5.5,10]: + for n in [16,17,64,127]: + x = arange(n)*2*pi/n + y = itilbert(sin(x),h) + y1 = direct_itilbert(sin(x),h) + assert_array_almost_equal(y,y1) + assert_array_almost_equal(itilbert(sin(x),h), + direct_itilbert(sin(x),h)) + assert_array_almost_equal(itilbert(sin(2*x),h), + direct_itilbert(sin(2*x),h)) + + +class TestHilbert(object): + + def test_definition(self): + for n in [16,17,64,127]: + x = arange(n)*2*pi/n + y = hilbert(sin(x)) + y1 = direct_hilbert(sin(x)) + assert_array_almost_equal(y,y1) + assert_array_almost_equal(hilbert(sin(2*x)), + direct_hilbert(sin(2*x))) + + def test_tilbert_relation(self): + for n in [16,17,64,127]: + x = arange(n)*2*pi/n + f = sin(x)+cos(2*x)*sin(x) + y = hilbert(f) + y1 = direct_hilbert(f) + assert_array_almost_equal(y,y1) + y2 = tilbert(f,h=10) + assert_array_almost_equal(y,y2) + + def test_random_odd(self): + for n in [33,65,55]: + f = random((n,)) + af = sum(f,axis=0)/n + f = f-af + assert_almost_equal(sum(f,axis=0),0.0) + assert_array_almost_equal(ihilbert(hilbert(f)),f) + assert_array_almost_equal(hilbert(ihilbert(f)),f) + + def test_random_even(self): + for n in [32,64,56]: + f = random((n,)) + af = sum(f,axis=0)/n + f = f-af + # zeroing Nyquist mode: + f = diff(diff(f,1),-1) + assert_almost_equal(sum(f,axis=0),0.0) + assert_array_almost_equal(direct_hilbert(direct_ihilbert(f)),f) + assert_array_almost_equal(hilbert(ihilbert(f)),f) + + +class TestIHilbert(object): + + def test_definition(self): + for n in [16,17,64,127]: + x = arange(n)*2*pi/n + y = ihilbert(sin(x)) + y1 = direct_ihilbert(sin(x)) + assert_array_almost_equal(y,y1) + assert_array_almost_equal(ihilbert(sin(2*x)), + direct_ihilbert(sin(2*x))) + + def test_itilbert_relation(self): + for n in [16,17,64,127]: + x = arange(n)*2*pi/n + f = sin(x)+cos(2*x)*sin(x) + y = ihilbert(f) + y1 = direct_ihilbert(f) + assert_array_almost_equal(y,y1) + y2 = itilbert(f,h=10) + assert_array_almost_equal(y,y2) + + +class TestShift(object): + + def test_definition(self): + for n in [18,17,64,127,32,2048,256]: + x = arange(n)*2*pi/n + for a in [0.1,3]: + assert_array_almost_equal(shift(sin(x),a),direct_shift(sin(x),a)) + assert_array_almost_equal(shift(sin(x),a),sin(x+a)) + assert_array_almost_equal(shift(cos(x),a),cos(x+a)) + assert_array_almost_equal(shift(cos(2*x)+sin(x),a), + cos(2*(x+a))+sin(x+a)) + assert_array_almost_equal(shift(exp(sin(x)),a),exp(sin(x+a))) + assert_array_almost_equal(shift(sin(x),2*pi),sin(x)) + assert_array_almost_equal(shift(sin(x),pi),-sin(x)) + assert_array_almost_equal(shift(sin(x),pi/2),cos(x)) + + +class TestOverwrite(object): + """Check input overwrite behavior """ + + real_dtypes = [np.float32, np.float64] + dtypes = real_dtypes + [np.complex64, np.complex128] + + def _check(self, x, routine, *args, **kwargs): + x2 = x.copy() + routine(x2, *args, **kwargs) + sig = routine.__name__ + if args: + sig += repr(args) + if kwargs: + sig += repr(kwargs) + assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig) + + def _check_1d(self, routine, dtype, shape, *args, **kwargs): + np.random.seed(1234) + if np.issubdtype(dtype, np.complexfloating): + data = np.random.randn(*shape) + 1j*np.random.randn(*shape) + else: + data = np.random.randn(*shape) + data = data.astype(dtype) + self._check(data, routine, *args, **kwargs) + + def test_diff(self): + for dtype in self.dtypes: + self._check_1d(diff, dtype, (16,)) + + def test_tilbert(self): + for dtype in self.dtypes: + self._check_1d(tilbert, dtype, (16,), 1.6) + + def test_itilbert(self): + for dtype in self.dtypes: + self._check_1d(itilbert, dtype, (16,), 1.6) + + def test_hilbert(self): + for dtype in self.dtypes: + self._check_1d(hilbert, dtype, (16,)) + + def test_cs_diff(self): + for dtype in self.dtypes: + self._check_1d(cs_diff, dtype, (16,), 1.0, 4.0) + + def test_sc_diff(self): + for dtype in self.dtypes: + self._check_1d(sc_diff, dtype, (16,), 1.0, 4.0) + + def test_ss_diff(self): + for dtype in self.dtypes: + self._check_1d(ss_diff, dtype, (16,), 1.0, 4.0) + + def test_cc_diff(self): + for dtype in self.dtypes: + self._check_1d(cc_diff, dtype, (16,), 1.0, 4.0) + + def test_shift(self): + for dtype in self.dtypes: + self._check_1d(shift, dtype, (16,), 1.0) diff --git a/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/test_pseudo_diffs.pyc b/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/test_pseudo_diffs.pyc new file mode 100644 index 0000000..b1501e7 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/test_pseudo_diffs.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/test_real_transforms.py b/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/test_real_transforms.py new file mode 100644 index 0000000..c2fcc13 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/test_real_transforms.py @@ -0,0 +1,829 @@ +from __future__ import division, print_function, absolute_import + +from os.path import join, dirname + +import numpy as np +from numpy.testing import assert_array_almost_equal, assert_equal +import pytest +from pytest import raises as assert_raises + +from scipy.fftpack.realtransforms import ( + dct, idct, dst, idst, dctn, idctn, dstn, idstn) + +# Matlab reference data +MDATA = np.load(join(dirname(__file__), 'test.npz')) +X = [MDATA['x%d' % i] for i in range(8)] +Y = [MDATA['y%d' % i] for i in range(8)] + +# FFTW reference data: the data are organized as follows: +# * SIZES is an array containing all available sizes +# * for every type (1, 2, 3, 4) and every size, the array dct_type_size +# contains the output of the DCT applied to the input np.linspace(0, size-1, +# size) +FFTWDATA_DOUBLE = np.load(join(dirname(__file__), 'fftw_double_ref.npz')) +FFTWDATA_SINGLE = np.load(join(dirname(__file__), 'fftw_single_ref.npz')) +FFTWDATA_SIZES = FFTWDATA_DOUBLE['sizes'] + + +def fftw_dct_ref(type, size, dt): + x = np.linspace(0, size-1, size).astype(dt) + dt = np.result_type(np.float32, dt) + if dt == np.double: + data = FFTWDATA_DOUBLE + elif dt == np.float32: + data = FFTWDATA_SINGLE + else: + raise ValueError() + y = (data['dct_%d_%d' % (type, size)]).astype(dt) + return x, y, dt + + +def fftw_dst_ref(type, size, dt): + x = np.linspace(0, size-1, size).astype(dt) + dt = np.result_type(np.float32, dt) + if dt == np.double: + data = FFTWDATA_DOUBLE + elif dt == np.float32: + data = FFTWDATA_SINGLE + else: + raise ValueError() + y = (data['dst_%d_%d' % (type, size)]).astype(dt) + return x, y, dt + + +def dct_2d_ref(x, **kwargs): + """Calculate reference values for testing dct2.""" + x = np.array(x, copy=True) + for row in range(x.shape[0]): + x[row, :] = dct(x[row, :], **kwargs) + for col in range(x.shape[1]): + x[:, col] = dct(x[:, col], **kwargs) + return x + + +def idct_2d_ref(x, **kwargs): + """Calculate reference values for testing idct2.""" + x = np.array(x, copy=True) + for row in range(x.shape[0]): + x[row, :] = idct(x[row, :], **kwargs) + for col in range(x.shape[1]): + x[:, col] = idct(x[:, col], **kwargs) + return x + + +def dst_2d_ref(x, **kwargs): + """Calculate reference values for testing dst2.""" + x = np.array(x, copy=True) + for row in range(x.shape[0]): + x[row, :] = dst(x[row, :], **kwargs) + for col in range(x.shape[1]): + x[:, col] = dst(x[:, col], **kwargs) + return x + + +def idst_2d_ref(x, **kwargs): + """Calculate reference values for testing idst2.""" + x = np.array(x, copy=True) + for row in range(x.shape[0]): + x[row, :] = idst(x[row, :], **kwargs) + for col in range(x.shape[1]): + x[:, col] = idst(x[:, col], **kwargs) + return x + + +def naive_dct1(x, norm=None): + """Calculate textbook definition version of DCT-I.""" + x = np.array(x, copy=True) + N = len(x) + M = N-1 + y = np.zeros(N) + m0, m = 1, 2 + if norm == 'ortho': + m0 = np.sqrt(1.0/M) + m = np.sqrt(2.0/M) + for k in range(N): + for n in range(1, N-1): + y[k] += m*x[n]*np.cos(np.pi*n*k/M) + y[k] += m0 * x[0] + y[k] += m0 * x[N-1] * (1 if k % 2 == 0 else -1) + if norm == 'ortho': + y[0] *= 1/np.sqrt(2) + y[N-1] *= 1/np.sqrt(2) + return y + + +def naive_dst1(x, norm=None): + """Calculate textbook definition version of DST-I.""" + x = np.array(x, copy=True) + N = len(x) + M = N+1 + y = np.zeros(N) + for k in range(N): + for n in range(N): + y[k] += 2*x[n]*np.sin(np.pi*(n+1.0)*(k+1.0)/M) + if norm == 'ortho': + y *= np.sqrt(0.5/M) + return y + + +def naive_dct4(x, norm=None): + """Calculate textbook definition version of DCT-IV.""" + x = np.array(x, copy=True) + N = len(x) + y = np.zeros(N) + for k in range(N): + for n in range(N): + y[k] += x[n]*np.cos(np.pi*(n+0.5)*(k+0.5)/(N)) + if norm == 'ortho': + y *= np.sqrt(2.0/N) + else: + y *= 2 + return y + + +def naive_dst4(x, norm=None): + """Calculate textbook definition version of DST-IV.""" + x = np.array(x, copy=True) + N = len(x) + y = np.zeros(N) + for k in range(N): + for n in range(N): + y[k] += x[n]*np.sin(np.pi*(n+0.5)*(k+0.5)/(N)) + if norm == 'ortho': + y *= np.sqrt(2.0/N) + else: + y *= 2 + return y + + +class TestComplex(object): + def test_dct_complex64(self): + y = dct(1j*np.arange(5, dtype=np.complex64)) + x = 1j*dct(np.arange(5)) + assert_array_almost_equal(x, y) + + def test_dct_complex(self): + y = dct(np.arange(5)*1j) + x = 1j*dct(np.arange(5)) + assert_array_almost_equal(x, y) + + def test_idct_complex(self): + y = idct(np.arange(5)*1j) + x = 1j*idct(np.arange(5)) + assert_array_almost_equal(x, y) + + def test_dst_complex64(self): + y = dst(np.arange(5, dtype=np.complex64)*1j) + x = 1j*dst(np.arange(5)) + assert_array_almost_equal(x, y) + + def test_dst_complex(self): + y = dst(np.arange(5)*1j) + x = 1j*dst(np.arange(5)) + assert_array_almost_equal(x, y) + + def test_idst_complex(self): + y = idst(np.arange(5)*1j) + x = 1j*idst(np.arange(5)) + assert_array_almost_equal(x, y) + + +class _TestDCTBase(object): + def setup_method(self): + self.rdt = None + self.dec = 14 + self.type = None + + def test_definition(self): + for i in FFTWDATA_SIZES: + x, yr, dt = fftw_dct_ref(self.type, i, self.rdt) + y = dct(x, type=self.type) + assert_equal(y.dtype, dt) + # XXX: we divide by np.max(y) because the tests fail otherwise. We + # should really use something like assert_array_approx_equal. The + # difference is due to fftw using a better algorithm w.r.t error + # propagation compared to the ones from fftpack. + assert_array_almost_equal(y / np.max(y), yr / np.max(y), decimal=self.dec, + err_msg="Size %d failed" % i) + + def test_axis(self): + nt = 2 + for i in [7, 8, 9, 16, 32, 64]: + x = np.random.randn(nt, i) + y = dct(x, type=self.type) + for j in range(nt): + assert_array_almost_equal(y[j], dct(x[j], type=self.type), + decimal=self.dec) + + x = x.T + y = dct(x, axis=0, type=self.type) + for j in range(nt): + assert_array_almost_equal(y[:,j], dct(x[:,j], type=self.type), + decimal=self.dec) + + +class _TestDCTIBase(_TestDCTBase): + def test_definition_ortho(self): + # Test orthornomal mode. + for i in range(len(X)): + x = np.array(X[i], dtype=self.rdt) + dt = np.result_type(np.float32, self.rdt) + y = dct(x, norm='ortho', type=1) + y2 = naive_dct1(x, norm='ortho') + assert_equal(y.dtype, dt) + assert_array_almost_equal(y / np.max(y), y2 / np.max(y), decimal=self.dec) + +class _TestDCTIIBase(_TestDCTBase): + def test_definition_matlab(self): + # Test correspondence with matlab (orthornomal mode). + for i in range(len(X)): + dt = np.result_type(np.float32, self.rdt) + x = np.array(X[i], dtype=dt) + + yr = Y[i] + y = dct(x, norm="ortho", type=2) + assert_equal(y.dtype, dt) + assert_array_almost_equal(y, yr, decimal=self.dec) + + +class _TestDCTIIIBase(_TestDCTBase): + def test_definition_ortho(self): + # Test orthornomal mode. + for i in range(len(X)): + x = np.array(X[i], dtype=self.rdt) + dt = np.result_type(np.float32, self.rdt) + y = dct(x, norm='ortho', type=2) + xi = dct(y, norm="ortho", type=3) + assert_equal(xi.dtype, dt) + assert_array_almost_equal(xi, x, decimal=self.dec) + +class _TestDCTIVBase(_TestDCTBase): + def test_definition_ortho(self): + # Test orthornomal mode. + for i in range(len(X)): + x = np.array(X[i], dtype=self.rdt) + dt = np.result_type(np.float32, self.rdt) + y = dct(x, norm='ortho', type=4) + y2 = naive_dct4(x, norm='ortho') + assert_equal(y.dtype, dt) + assert_array_almost_equal(y / np.max(y), y2 / np.max(y), decimal=self.dec) + + +class TestDCTIDouble(_TestDCTIBase): + def setup_method(self): + self.rdt = np.double + self.dec = 10 + self.type = 1 + + +class TestDCTIFloat(_TestDCTIBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 4 + self.type = 1 + + +class TestDCTIInt(_TestDCTIBase): + def setup_method(self): + self.rdt = int + self.dec = 5 + self.type = 1 + + +class TestDCTIIDouble(_TestDCTIIBase): + def setup_method(self): + self.rdt = np.double + self.dec = 10 + self.type = 2 + + +class TestDCTIIFloat(_TestDCTIIBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 5 + self.type = 2 + + +class TestDCTIIInt(_TestDCTIIBase): + def setup_method(self): + self.rdt = int + self.dec = 5 + self.type = 2 + + +class TestDCTIIIDouble(_TestDCTIIIBase): + def setup_method(self): + self.rdt = np.double + self.dec = 14 + self.type = 3 + + +class TestDCTIIIFloat(_TestDCTIIIBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 5 + self.type = 3 + + +class TestDCTIIIInt(_TestDCTIIIBase): + def setup_method(self): + self.rdt = int + self.dec = 5 + self.type = 3 + + +class TestDCTIVDouble(_TestDCTIVBase): + def setup_method(self): + self.rdt = np.double + self.dec = 12 + self.type = 3 + + +class TestDCTIVFloat(_TestDCTIVBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 5 + self.type = 3 + + +class TestDCTIVInt(_TestDCTIVBase): + def setup_method(self): + self.rdt = int + self.dec = 5 + self.type = 3 + + +class _TestIDCTBase(object): + def setup_method(self): + self.rdt = None + self.dec = 14 + self.type = None + + def test_definition(self): + for i in FFTWDATA_SIZES: + xr, yr, dt = fftw_dct_ref(self.type, i, self.rdt) + x = idct(yr, type=self.type) + if self.type == 1: + x /= 2 * (i-1) + else: + x /= 2 * i + assert_equal(x.dtype, dt) + # XXX: we divide by np.max(y) because the tests fail otherwise. We + # should really use something like assert_array_approx_equal. The + # difference is due to fftw using a better algorithm w.r.t error + # propagation compared to the ones from fftpack. + assert_array_almost_equal(x / np.max(x), xr / np.max(x), decimal=self.dec, + err_msg="Size %d failed" % i) + + +class TestIDCTIDouble(_TestIDCTBase): + def setup_method(self): + self.rdt = np.double + self.dec = 10 + self.type = 1 + + +class TestIDCTIFloat(_TestIDCTBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 4 + self.type = 1 + + +class TestIDCTIInt(_TestIDCTBase): + def setup_method(self): + self.rdt = int + self.dec = 4 + self.type = 1 + + +class TestIDCTIIDouble(_TestIDCTBase): + def setup_method(self): + self.rdt = np.double + self.dec = 10 + self.type = 2 + + +class TestIDCTIIFloat(_TestIDCTBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 5 + self.type = 2 + + +class TestIDCTIIInt(_TestIDCTBase): + def setup_method(self): + self.rdt = int + self.dec = 5 + self.type = 2 + + +class TestIDCTIIIDouble(_TestIDCTBase): + def setup_method(self): + self.rdt = np.double + self.dec = 14 + self.type = 3 + + +class TestIDCTIIIFloat(_TestIDCTBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 5 + self.type = 3 + + +class TestIDCTIIIInt(_TestIDCTBase): + def setup_method(self): + self.rdt = int + self.dec = 5 + self.type = 3 + +class TestIDCTIVDouble(_TestIDCTBase): + def setup_method(self): + self.rdt = np.double + self.dec = 12 + self.type = 4 + + +class TestIDCTIVFloat(_TestIDCTBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 5 + self.type = 4 + + +class TestIDCTIVInt(_TestIDCTBase): + def setup_method(self): + self.rdt = int + self.dec = 5 + self.type = 4 + +class _TestDSTBase(object): + def setup_method(self): + self.rdt = None # dtype + self.dec = None # number of decimals to match + self.type = None # dst type + + def test_definition(self): + for i in FFTWDATA_SIZES: + xr, yr, dt = fftw_dst_ref(self.type, i, self.rdt) + y = dst(xr, type=self.type) + assert_equal(y.dtype, dt) + # XXX: we divide by np.max(y) because the tests fail otherwise. We + # should really use something like assert_array_approx_equal. The + # difference is due to fftw using a better algorithm w.r.t error + # propagation compared to the ones from fftpack. + assert_array_almost_equal(y / np.max(y), yr / np.max(y), decimal=self.dec, + err_msg="Size %d failed" % i) + + +class _TestDSTIBase(_TestDSTBase): + def test_definition_ortho(self): + # Test orthornomal mode. + for i in range(len(X)): + x = np.array(X[i], dtype=self.rdt) + dt = np.result_type(np.float32, self.rdt) + y = dst(x, norm='ortho', type=1) + y2 = naive_dst1(x, norm='ortho') + assert_equal(y.dtype, dt) + assert_array_almost_equal(y / np.max(y), y2 / np.max(y), decimal=self.dec) + +class _TestDSTIVBase(_TestDSTBase): + def test_definition_ortho(self): + # Test orthornomal mode. + for i in range(len(X)): + x = np.array(X[i], dtype=self.rdt) + dt = np.result_type(np.float32, self.rdt) + y = dst(x, norm='ortho', type=4) + y2 = naive_dst4(x, norm='ortho') + assert_equal(y.dtype, dt) + assert_array_almost_equal(y, y2, decimal=self.dec) + +class TestDSTIDouble(_TestDSTIBase): + def setup_method(self): + self.rdt = np.double + self.dec = 12 + self.type = 1 + + +class TestDSTIFloat(_TestDSTIBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 4 + self.type = 1 + + +class TestDSTIInt(_TestDSTIBase): + def setup_method(self): + self.rdt = int + self.dec = 5 + self.type = 1 + + +class TestDSTIIDouble(_TestDSTBase): + def setup_method(self): + self.rdt = np.double + self.dec = 14 + self.type = 2 + + +class TestDSTIIFloat(_TestDSTBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 6 + self.type = 2 + + +class TestDSTIIInt(_TestDSTBase): + def setup_method(self): + self.rdt = int + self.dec = 6 + self.type = 2 + + +class TestDSTIIIDouble(_TestDSTBase): + def setup_method(self): + self.rdt = np.double + self.dec = 14 + self.type = 3 + + +class TestDSTIIIFloat(_TestDSTBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 7 + self.type = 3 + + +class TestDSTIIIInt(_TestDSTBase): + def setup_method(self): + self.rdt = int + self.dec = 7 + self.type = 3 + + +class TestDSTIVDouble(_TestDSTIVBase): + def setup_method(self): + self.rdt = np.double + self.dec = 12 + self.type = 4 + + +class TestDSTIVFloat(_TestDSTIVBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 4 + self.type = 4 + + +class TestDSTIVInt(_TestDSTIVBase): + def setup_method(self): + self.rdt = int + self.dec = 5 + self.type = 4 + + +class _TestIDSTBase(object): + def setup_method(self): + self.rdt = None + self.dec = None + self.type = None + + def test_definition(self): + for i in FFTWDATA_SIZES: + xr, yr, dt = fftw_dst_ref(self.type, i, self.rdt) + x = idst(yr, type=self.type) + if self.type == 1: + x /= 2 * (i+1) + else: + x /= 2 * i + assert_equal(x.dtype, dt) + # XXX: we divide by np.max(x) because the tests fail otherwise. We + # should really use something like assert_array_approx_equal. The + # difference is due to fftw using a better algorithm w.r.t error + # propagation compared to the ones from fftpack. + assert_array_almost_equal(x / np.max(x), xr / np.max(x), decimal=self.dec, + err_msg="Size %d failed" % i) + + +class TestIDSTIDouble(_TestIDSTBase): + def setup_method(self): + self.rdt = np.double + self.dec = 12 + self.type = 1 + + +class TestIDSTIFloat(_TestIDSTBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 4 + self.type = 1 + + +class TestIDSTIInt(_TestIDSTBase): + def setup_method(self): + self.rdt = int + self.dec = 4 + self.type = 1 + + +class TestIDSTIIDouble(_TestIDSTBase): + def setup_method(self): + self.rdt = np.double + self.dec = 14 + self.type = 2 + + +class TestIDSTIIFloat(_TestIDSTBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 6 + self.type = 2 + + +class TestIDSTIIInt(_TestIDSTBase): + def setup_method(self): + self.rdt = int + self.dec = 6 + self.type = 2 + + +class TestIDSTIIIDouble(_TestIDSTBase): + def setup_method(self): + self.rdt = np.double + self.dec = 14 + self.type = 3 + + +class TestIDSTIIIFloat(_TestIDSTBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 6 + self.type = 3 + + +class TestIDSTIIIInt(_TestIDSTBase): + def setup_method(self): + self.rdt = int + self.dec = 6 + self.type = 3 + + +class TestIDSTIVDouble(_TestIDSTBase): + def setup_method(self): + self.rdt = np.double + self.dec = 12 + self.type = 4 + + +class TestIDSTIVFloat(_TestIDSTBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 6 + self.type = 4 + + +class TestIDSTIVnt(_TestIDSTBase): + def setup_method(self): + self.rdt = int + self.dec = 6 + self.type = 4 + + +class TestOverwrite(object): + """Check input overwrite behavior.""" + + real_dtypes = [np.float32, np.float64] + + def _check(self, x, routine, type, fftsize, axis, norm, overwrite_x, + should_overwrite, **kw): + x2 = x.copy() + routine(x2, type, fftsize, axis, norm, overwrite_x=overwrite_x) + + sig = "%s(%s%r, %r, axis=%r, overwrite_x=%r)" % ( + routine.__name__, x.dtype, x.shape, fftsize, axis, overwrite_x) + if not should_overwrite: + assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig) + + def _check_1d(self, routine, dtype, shape, axis, overwritable_dtypes): + np.random.seed(1234) + if np.issubdtype(dtype, np.complexfloating): + data = np.random.randn(*shape) + 1j*np.random.randn(*shape) + else: + data = np.random.randn(*shape) + data = data.astype(dtype) + + for type in [1, 2, 3, 4]: + for overwrite_x in [True, False]: + for norm in [None, 'ortho']: + should_overwrite = (overwrite_x + and dtype in overwritable_dtypes + and (len(shape) == 1 or + (axis % len(shape) == len(shape)-1 + ))) + self._check(data, routine, type, None, axis, norm, + overwrite_x, should_overwrite) + + def test_dct(self): + overwritable = self.real_dtypes + for dtype in self.real_dtypes: + self._check_1d(dct, dtype, (16,), -1, overwritable) + self._check_1d(dct, dtype, (16, 2), 0, overwritable) + self._check_1d(dct, dtype, (2, 16), 1, overwritable) + + def test_idct(self): + overwritable = self.real_dtypes + for dtype in self.real_dtypes: + self._check_1d(idct, dtype, (16,), -1, overwritable) + self._check_1d(idct, dtype, (16, 2), 0, overwritable) + self._check_1d(idct, dtype, (2, 16), 1, overwritable) + + def test_dst(self): + overwritable = self.real_dtypes + for dtype in self.real_dtypes: + self._check_1d(dst, dtype, (16,), -1, overwritable) + self._check_1d(dst, dtype, (16, 2), 0, overwritable) + self._check_1d(dst, dtype, (2, 16), 1, overwritable) + + def test_idst(self): + overwritable = self.real_dtypes + for dtype in self.real_dtypes: + self._check_1d(idst, dtype, (16,), -1, overwritable) + self._check_1d(idst, dtype, (16, 2), 0, overwritable) + self._check_1d(idst, dtype, (2, 16), 1, overwritable) + + +class Test_DCTN_IDCTN(object): + dec = 14 + dct_type = [1, 2, 3, 4] + norms = [None, 'ortho'] + rstate = np.random.RandomState(1234) + shape = (32, 16) + data = rstate.randn(*shape) + + @pytest.mark.parametrize('fforward,finverse', [(dctn, idctn), + (dstn, idstn)]) + @pytest.mark.parametrize('axes', [None, + 1, (1,), [1], + 0, (0,), [0], + (0, 1), [0, 1], + (-2, -1), [-2, -1]]) + @pytest.mark.parametrize('dct_type', dct_type) + @pytest.mark.parametrize('norm', ['ortho']) + def test_axes_round_trip(self, fforward, finverse, axes, dct_type, norm): + tmp = fforward(self.data, type=dct_type, axes=axes, norm=norm) + tmp = finverse(tmp, type=dct_type, axes=axes, norm=norm) + assert_array_almost_equal(self.data, tmp, decimal=12) + + @pytest.mark.parametrize('fforward,fforward_ref', [(dctn, dct_2d_ref), + (dstn, dst_2d_ref)]) + @pytest.mark.parametrize('dct_type', dct_type) + @pytest.mark.parametrize('norm', norms) + def test_dctn_vs_2d_reference(self, fforward, fforward_ref, + dct_type, norm): + y1 = fforward(self.data, type=dct_type, axes=None, norm=norm) + y2 = fforward_ref(self.data, type=dct_type, norm=norm) + assert_array_almost_equal(y1, y2, decimal=11) + + @pytest.mark.parametrize('finverse,finverse_ref', [(idctn, idct_2d_ref), + (idstn, idst_2d_ref)]) + @pytest.mark.parametrize('dct_type', dct_type) + @pytest.mark.parametrize('norm', [None, 'ortho']) + def test_idctn_vs_2d_reference(self, finverse, finverse_ref, + dct_type, norm): + fdata = dctn(self.data, type=dct_type, norm=norm) + y1 = finverse(fdata, type=dct_type, norm=norm) + y2 = finverse_ref(fdata, type=dct_type, norm=norm) + assert_array_almost_equal(y1, y2, decimal=11) + + @pytest.mark.parametrize('fforward,finverse', [(dctn, idctn), + (dstn, idstn)]) + def test_axes_and_shape(self, fforward, finverse): + with assert_raises(ValueError, + match="when given, axes and shape arguments" + " have to be of the same length"): + fforward(self.data, shape=self.data.shape[0], axes=(0, 1)) + + with assert_raises(ValueError, + match="when given, axes and shape arguments" + " have to be of the same length"): + fforward(self.data, shape=self.data.shape[0], axes=None) + + with assert_raises(ValueError, + match="when given, axes and shape arguments" + " have to be of the same length"): + fforward(self.data, shape=self.data.shape, axes=0) + + @pytest.mark.parametrize('fforward', [dctn, dstn]) + def test_shape(self, fforward): + tmp = fforward(self.data, shape=(128, 128), axes=None) + assert_equal(tmp.shape, (128, 128)) + + @pytest.mark.parametrize('fforward,finverse', [(dctn, idctn), + (dstn, idstn)]) + @pytest.mark.parametrize('axes', [1, (1,), [1], + 0, (0,), [0]]) + def test_shape_is_none_with_axes(self, fforward, finverse, axes): + tmp = fforward(self.data, shape=None, axes=axes, norm='ortho') + tmp = finverse(tmp, shape=None, axes=axes, norm='ortho') + assert_array_almost_equal(self.data, tmp, decimal=self.dec) diff --git a/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/test_real_transforms.pyc b/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/test_real_transforms.pyc new file mode 100644 index 0000000..f058577 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/fftpack/tests/test_real_transforms.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/integrate/__init__.py new file mode 100644 index 0000000..b2a9796 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/integrate/__init__.py @@ -0,0 +1,101 @@ +""" +============================================= +Integration and ODEs (:mod:`scipy.integrate`) +============================================= + +.. currentmodule:: scipy.integrate + +Integrating functions, given function object +============================================ + +.. autosummary:: + :toctree: generated/ + + quad -- General purpose integration + dblquad -- General purpose double integration + tplquad -- General purpose triple integration + nquad -- General purpose n-dimensional integration + fixed_quad -- Integrate func(x) using Gaussian quadrature of order n + quadrature -- Integrate with given tolerance using Gaussian quadrature + romberg -- Integrate func using Romberg integration + quad_explain -- Print information for use of quad + newton_cotes -- Weights and error coefficient for Newton-Cotes integration + IntegrationWarning -- Warning on issues during integration + +Integrating functions, given fixed samples +========================================== + +.. autosummary:: + :toctree: generated/ + + trapz -- Use trapezoidal rule to compute integral. + cumtrapz -- Use trapezoidal rule to cumulatively compute integral. + simps -- Use Simpson's rule to compute integral from samples. + romb -- Use Romberg Integration to compute integral from + -- (2**k + 1) evenly-spaced samples. + +.. seealso:: + + :mod:`scipy.special` for orthogonal polynomials (special) for Gaussian + quadrature roots and weights for other weighting factors and regions. + +Solving initial value problems for ODE systems +============================================== + +The solvers are implemented as individual classes which can be used directly +(low-level usage) or through a convenience function. + +.. autosummary:: + :toctree: generated/ + + solve_ivp -- Convenient function for ODE integration. + RK23 -- Explicit Runge-Kutta solver of order 3(2). + RK45 -- Explicit Runge-Kutta solver of order 5(4). + Radau -- Implicit Runge-Kutta solver of order 5. + BDF -- Implicit multi-step variable order (1 to 5) solver. + LSODA -- LSODA solver from ODEPACK Fortran package. + OdeSolver -- Base class for ODE solvers. + DenseOutput -- Local interpolant for computing a dense output. + OdeSolution -- Class which represents a continuous ODE solution. + + +Old API +------- + +These are the routines developed earlier for scipy. They wrap older solvers +implemented in Fortran (mostly ODEPACK). While the interface to them is not +particularly convenient and certain features are missing compared to the new +API, the solvers themselves are of good quality and work fast as compiled +Fortran code. In some cases it might be worth using this old API. + +.. autosummary:: + :toctree: generated/ + + odeint -- General integration of ordinary differential equations. + ode -- Integrate ODE using VODE and ZVODE routines. + complex_ode -- Convert a complex-valued ODE to real-valued and integrate. + + +Solving boundary value problems for ODE systems +=============================================== + +.. autosummary:: + :toctree: generated/ + + solve_bvp -- Solve a boundary value problem for a system of ODEs. +""" +from __future__ import division, print_function, absolute_import + +from .quadrature import * +from .odepack import * +from .quadpack import * +from ._ode import * +from ._bvp import solve_bvp +from ._ivp import (solve_ivp, OdeSolution, DenseOutput, + OdeSolver, RK23, RK45, Radau, BDF, LSODA) + +__all__ = [s for s in dir() if not s.startswith('_')] + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/integrate/__init__.pyc new file mode 100644 index 0000000..be67f42 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/integrate/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/_bvp.py b/project/venv/lib/python2.7/site-packages/scipy/integrate/_bvp.py new file mode 100644 index 0000000..86d777f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/integrate/_bvp.py @@ -0,0 +1,1134 @@ +"""Boundary value problem solver.""" +from __future__ import division, print_function, absolute_import + +from warnings import warn + +import numpy as np +from numpy.linalg import norm, pinv + +from scipy.sparse import coo_matrix, csc_matrix +from scipy.sparse.linalg import splu +from scipy.optimize import OptimizeResult + + +EPS = np.finfo(float).eps + + +def estimate_fun_jac(fun, x, y, p, f0=None): + """Estimate derivatives of an ODE system rhs with forward differences. + + Returns + ------- + df_dy : ndarray, shape (n, n, m) + Derivatives with respect to y. An element (i, j, q) corresponds to + d f_i(x_q, y_q) / d (y_q)_j. + df_dp : ndarray with shape (n, k, m) or None + Derivatives with respect to p. An element (i, j, q) corresponds to + d f_i(x_q, y_q, p) / d p_j. If `p` is empty, None is returned. + """ + n, m = y.shape + if f0 is None: + f0 = fun(x, y, p) + + dtype = y.dtype + + df_dy = np.empty((n, n, m), dtype=dtype) + h = EPS**0.5 * (1 + np.abs(y)) + for i in range(n): + y_new = y.copy() + y_new[i] += h[i] + hi = y_new[i] - y[i] + f_new = fun(x, y_new, p) + df_dy[:, i, :] = (f_new - f0) / hi + + k = p.shape[0] + if k == 0: + df_dp = None + else: + df_dp = np.empty((n, k, m), dtype=dtype) + h = EPS**0.5 * (1 + np.abs(p)) + for i in range(k): + p_new = p.copy() + p_new[i] += h[i] + hi = p_new[i] - p[i] + f_new = fun(x, y, p_new) + df_dp[:, i, :] = (f_new - f0) / hi + + return df_dy, df_dp + + +def estimate_bc_jac(bc, ya, yb, p, bc0=None): + """Estimate derivatives of boundary conditions with forward differences. + + Returns + ------- + dbc_dya : ndarray, shape (n + k, n) + Derivatives with respect to ya. An element (i, j) corresponds to + d bc_i / d ya_j. + dbc_dyb : ndarray, shape (n + k, n) + Derivatives with respect to yb. An element (i, j) corresponds to + d bc_i / d ya_j. + dbc_dp : ndarray with shape (n + k, k) or None + Derivatives with respect to p. An element (i, j) corresponds to + d bc_i / d p_j. If `p` is empty, None is returned. + """ + n = ya.shape[0] + k = p.shape[0] + + if bc0 is None: + bc0 = bc(ya, yb, p) + + dtype = ya.dtype + + dbc_dya = np.empty((n, n + k), dtype=dtype) + h = EPS**0.5 * (1 + np.abs(ya)) + for i in range(n): + ya_new = ya.copy() + ya_new[i] += h[i] + hi = ya_new[i] - ya[i] + bc_new = bc(ya_new, yb, p) + dbc_dya[i] = (bc_new - bc0) / hi + dbc_dya = dbc_dya.T + + h = EPS**0.5 * (1 + np.abs(yb)) + dbc_dyb = np.empty((n, n + k), dtype=dtype) + for i in range(n): + yb_new = yb.copy() + yb_new[i] += h[i] + hi = yb_new[i] - yb[i] + bc_new = bc(ya, yb_new, p) + dbc_dyb[i] = (bc_new - bc0) / hi + dbc_dyb = dbc_dyb.T + + if k == 0: + dbc_dp = None + else: + h = EPS**0.5 * (1 + np.abs(p)) + dbc_dp = np.empty((k, n + k), dtype=dtype) + for i in range(k): + p_new = p.copy() + p_new[i] += h[i] + hi = p_new[i] - p[i] + bc_new = bc(ya, yb, p_new) + dbc_dp[i] = (bc_new - bc0) / hi + dbc_dp = dbc_dp.T + + return dbc_dya, dbc_dyb, dbc_dp + + +def compute_jac_indices(n, m, k): + """Compute indices for the collocation system Jacobian construction. + + See `construct_global_jac` for the explanation. + """ + i_col = np.repeat(np.arange((m - 1) * n), n) + j_col = (np.tile(np.arange(n), n * (m - 1)) + + np.repeat(np.arange(m - 1) * n, n**2)) + + i_bc = np.repeat(np.arange((m - 1) * n, m * n + k), n) + j_bc = np.tile(np.arange(n), n + k) + + i_p_col = np.repeat(np.arange((m - 1) * n), k) + j_p_col = np.tile(np.arange(m * n, m * n + k), (m - 1) * n) + + i_p_bc = np.repeat(np.arange((m - 1) * n, m * n + k), k) + j_p_bc = np.tile(np.arange(m * n, m * n + k), n + k) + + i = np.hstack((i_col, i_col, i_bc, i_bc, i_p_col, i_p_bc)) + j = np.hstack((j_col, j_col + n, + j_bc, j_bc + (m - 1) * n, + j_p_col, j_p_bc)) + + return i, j + + +def stacked_matmul(a, b): + """Stacked matrix multiply: out[i,:,:] = np.dot(a[i,:,:], b[i,:,:]). + + In our case a[i, :, :] and b[i, :, :] are always square. + """ + # Empirical optimization. Use outer Python loop and BLAS for large + # matrices, otherwise use a single einsum call. + if a.shape[1] > 50: + out = np.empty_like(a) + for i in range(a.shape[0]): + out[i] = np.dot(a[i], b[i]) + return out + else: + return np.einsum('...ij,...jk->...ik', a, b) + + +def construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy, df_dy_middle, df_dp, + df_dp_middle, dbc_dya, dbc_dyb, dbc_dp): + """Construct the Jacobian of the collocation system. + + There are n * m + k functions: m - 1 collocations residuals, each + containing n components, followed by n + k boundary condition residuals. + + There are n * m + k variables: m vectors of y, each containing n + components, followed by k values of vector p. + + For example, let m = 4, n = 2 and k = 1, then the Jacobian will have + the following sparsity structure: + + 1 1 2 2 0 0 0 0 5 + 1 1 2 2 0 0 0 0 5 + 0 0 1 1 2 2 0 0 5 + 0 0 1 1 2 2 0 0 5 + 0 0 0 0 1 1 2 2 5 + 0 0 0 0 1 1 2 2 5 + + 3 3 0 0 0 0 4 4 6 + 3 3 0 0 0 0 4 4 6 + 3 3 0 0 0 0 4 4 6 + + Zeros denote identically zero values, other values denote different kinds + of blocks in the matrix (see below). The blank row indicates the separation + of collocation residuals from boundary conditions. And the blank column + indicates the separation of y values from p values. + + Refer to [1]_ (p. 306) for the formula of n x n blocks for derivatives + of collocation residuals with respect to y. + + Parameters + ---------- + n : int + Number of equations in the ODE system. + m : int + Number of nodes in the mesh. + k : int + Number of the unknown parameters. + i_jac, j_jac : ndarray + Row and column indices returned by `compute_jac_indices`. They + represent different blocks in the Jacobian matrix in the following + order (see the scheme above): + + * 1: m - 1 diagonal n x n blocks for the collocation residuals. + * 2: m - 1 off-diagonal n x n blocks for the collocation residuals. + * 3 : (n + k) x n block for the dependency of the boundary + conditions on ya. + * 4: (n + k) x n block for the dependency of the boundary + conditions on yb. + * 5: (m - 1) * n x k block for the dependency of the collocation + residuals on p. + * 6: (n + k) x k block for the dependency of the boundary + conditions on p. + + df_dy : ndarray, shape (n, n, m) + Jacobian of f with respect to y computed at the mesh nodes. + df_dy_middle : ndarray, shape (n, n, m - 1) + Jacobian of f with respect to y computed at the middle between the + mesh nodes. + df_dp : ndarray with shape (n, k, m) or None + Jacobian of f with respect to p computed at the mesh nodes. + df_dp_middle: ndarray with shape (n, k, m - 1) or None + Jacobian of f with respect to p computed at the middle between the + mesh nodes. + dbc_dya, dbc_dyb : ndarray, shape (n, n) + Jacobian of bc with respect to ya and yb. + dbc_dp: ndarray with shape (n, k) or None + Jacobian of bc with respect to p. + + Returns + ------- + J : csc_matrix, shape (n * m + k, n * m + k) + Jacobian of the collocation system in a sparse form. + + References + ---------- + .. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual + Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27, + Number 3, pp. 299-316, 2001. + """ + df_dy = np.transpose(df_dy, (2, 0, 1)) + df_dy_middle = np.transpose(df_dy_middle, (2, 0, 1)) + + h = h[:, np.newaxis, np.newaxis] + + dtype = df_dy.dtype + + # Computing diagonal n x n blocks. + dPhi_dy_0 = np.empty((m - 1, n, n), dtype=dtype) + dPhi_dy_0[:] = -np.identity(n) + dPhi_dy_0 -= h / 6 * (df_dy[:-1] + 2 * df_dy_middle) + T = stacked_matmul(df_dy_middle, df_dy[:-1]) + dPhi_dy_0 -= h**2 / 12 * T + + # Computing off-diagonal n x n blocks. + dPhi_dy_1 = np.empty((m - 1, n, n), dtype=dtype) + dPhi_dy_1[:] = np.identity(n) + dPhi_dy_1 -= h / 6 * (df_dy[1:] + 2 * df_dy_middle) + T = stacked_matmul(df_dy_middle, df_dy[1:]) + dPhi_dy_1 += h**2 / 12 * T + + values = np.hstack((dPhi_dy_0.ravel(), dPhi_dy_1.ravel(), dbc_dya.ravel(), + dbc_dyb.ravel())) + + if k > 0: + df_dp = np.transpose(df_dp, (2, 0, 1)) + df_dp_middle = np.transpose(df_dp_middle, (2, 0, 1)) + T = stacked_matmul(df_dy_middle, df_dp[:-1] - df_dp[1:]) + df_dp_middle += 0.125 * h * T + dPhi_dp = -h/6 * (df_dp[:-1] + df_dp[1:] + 4 * df_dp_middle) + values = np.hstack((values, dPhi_dp.ravel(), dbc_dp.ravel())) + + J = coo_matrix((values, (i_jac, j_jac))) + return csc_matrix(J) + + +def collocation_fun(fun, y, p, x, h): + """Evaluate collocation residuals. + + This function lies in the core of the method. The solution is sought + as a cubic C1 continuous spline with derivatives matching the ODE rhs + at given nodes `x`. Collocation conditions are formed from the equality + of the spline derivatives and rhs of the ODE system in the middle points + between nodes. + + Such method is classified to Lobbato IIIA family in ODE literature. + Refer to [1]_ for the formula and some discussion. + + Returns + ------- + col_res : ndarray, shape (n, m - 1) + Collocation residuals at the middle points of the mesh intervals. + y_middle : ndarray, shape (n, m - 1) + Values of the cubic spline evaluated at the middle points of the mesh + intervals. + f : ndarray, shape (n, m) + RHS of the ODE system evaluated at the mesh nodes. + f_middle : ndarray, shape (n, m - 1) + RHS of the ODE system evaluated at the middle points of the mesh + intervals (and using `y_middle`). + + References + ---------- + .. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual + Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27, + Number 3, pp. 299-316, 2001. + """ + f = fun(x, y, p) + y_middle = (0.5 * (y[:, 1:] + y[:, :-1]) - + 0.125 * h * (f[:, 1:] - f[:, :-1])) + f_middle = fun(x[:-1] + 0.5 * h, y_middle, p) + col_res = y[:, 1:] - y[:, :-1] - h / 6 * (f[:, :-1] + f[:, 1:] + + 4 * f_middle) + + return col_res, y_middle, f, f_middle + + +def prepare_sys(n, m, k, fun, bc, fun_jac, bc_jac, x, h): + """Create the function and the Jacobian for the collocation system.""" + x_middle = x[:-1] + 0.5 * h + i_jac, j_jac = compute_jac_indices(n, m, k) + + def col_fun(y, p): + return collocation_fun(fun, y, p, x, h) + + def sys_jac(y, p, y_middle, f, f_middle, bc0): + if fun_jac is None: + df_dy, df_dp = estimate_fun_jac(fun, x, y, p, f) + df_dy_middle, df_dp_middle = estimate_fun_jac( + fun, x_middle, y_middle, p, f_middle) + else: + df_dy, df_dp = fun_jac(x, y, p) + df_dy_middle, df_dp_middle = fun_jac(x_middle, y_middle, p) + + if bc_jac is None: + dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(bc, y[:, 0], y[:, -1], + p, bc0) + else: + dbc_dya, dbc_dyb, dbc_dp = bc_jac(y[:, 0], y[:, -1], p) + + return construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy, + df_dy_middle, df_dp, df_dp_middle, dbc_dya, + dbc_dyb, dbc_dp) + + return col_fun, sys_jac + + +def solve_newton(n, m, h, col_fun, bc, jac, y, p, B, bvp_tol): + """Solve the nonlinear collocation system by a Newton method. + + This is a simple Newton method with a backtracking line search. As + advised in [1]_, an affine-invariant criterion function F = ||J^-1 r||^2 + is used, where J is the Jacobian matrix at the current iteration and r is + the vector or collocation residuals (values of the system lhs). + + The method alters between full Newton iterations and the fixed-Jacobian + iterations based + + There are other tricks proposed in [1]_, but they are not used as they + don't seem to improve anything significantly, and even break the + convergence on some test problems I tried. + + All important parameters of the algorithm are defined inside the function. + + Parameters + ---------- + n : int + Number of equations in the ODE system. + m : int + Number of nodes in the mesh. + h : ndarray, shape (m-1,) + Mesh intervals. + col_fun : callable + Function computing collocation residuals. + bc : callable + Function computing boundary condition residuals. + jac : callable + Function computing the Jacobian of the whole system (including + collocation and boundary condition residuals). It is supposed to + return csc_matrix. + y : ndarray, shape (n, m) + Initial guess for the function values at the mesh nodes. + p : ndarray, shape (k,) + Initial guess for the unknown parameters. + B : ndarray with shape (n, n) or None + Matrix to force the S y(a) = 0 condition for a problems with the + singular term. If None, the singular term is assumed to be absent. + bvp_tol : float + Tolerance to which we want to solve a BVP. + + Returns + ------- + y : ndarray, shape (n, m) + Final iterate for the function values at the mesh nodes. + p : ndarray, shape (k,) + Final iterate for the unknown parameters. + singular : bool + True, if the LU decomposition failed because Jacobian turned out + to be singular. + + References + ---------- + .. [1] U. Ascher, R. Mattheij and R. Russell "Numerical Solution of + Boundary Value Problems for Ordinary Differential Equations" + """ + # We know that the solution residuals at the middle points of the mesh + # are connected with collocation residuals r_middle = 1.5 * col_res / h. + # As our BVP solver tries to decrease relative residuals below a certain + # tolerance it seems reasonable to terminated Newton iterations by + # comparison of r_middle / (1 + np.abs(f_middle)) with a certain threshold, + # which we choose to be 1.5 orders lower than the BVP tolerance. We rewrite + # the condition as col_res < tol_r * (1 + np.abs(f_middle)), then tol_r + # should be computed as follows: + tol_r = 2/3 * h * 5e-2 * bvp_tol + + # We also need to control residuals of the boundary conditions. But it + # seems that they become very small eventually as the solver progresses, + # i. e. the tolerance for BC are not very important. We set it 1.5 orders + # lower than the BVP tolerance as well. + tol_bc = 5e-2 * bvp_tol + + # Maximum allowed number of Jacobian evaluation and factorization, in + # other words the maximum number of full Newton iterations. A small value + # is recommended in the literature. + max_njev = 4 + + # Maximum number of iterations, considering that some of them can be + # performed with the fixed Jacobian. In theory such iterations are cheap, + # but it's not that simple in Python. + max_iter = 8 + + # Minimum relative improvement of the criterion function to accept the + # step (Armijo constant). + sigma = 0.2 + + # Step size decrease factor for backtracking. + tau = 0.5 + + # Maximum number of backtracking steps, the minimum step is then + # tau ** n_trial. + n_trial = 4 + + col_res, y_middle, f, f_middle = col_fun(y, p) + bc_res = bc(y[:, 0], y[:, -1], p) + res = np.hstack((col_res.ravel(order='F'), bc_res)) + + njev = 0 + singular = False + recompute_jac = True + for iteration in range(max_iter): + if recompute_jac: + J = jac(y, p, y_middle, f, f_middle, bc_res) + njev += 1 + try: + LU = splu(J) + except RuntimeError: + singular = True + break + + step = LU.solve(res) + cost = np.dot(step, step) + + y_step = step[:m * n].reshape((n, m), order='F') + p_step = step[m * n:] + + alpha = 1 + for trial in range(n_trial + 1): + y_new = y - alpha * y_step + if B is not None: + y_new[:, 0] = np.dot(B, y_new[:, 0]) + p_new = p - alpha * p_step + + col_res, y_middle, f, f_middle = col_fun(y_new, p_new) + bc_res = bc(y_new[:, 0], y_new[:, -1], p_new) + res = np.hstack((col_res.ravel(order='F'), bc_res)) + + step_new = LU.solve(res) + cost_new = np.dot(step_new, step_new) + if cost_new < (1 - 2 * alpha * sigma) * cost: + break + + if trial < n_trial: + alpha *= tau + + y = y_new + p = p_new + + if njev == max_njev: + break + + if (np.all(np.abs(col_res) < tol_r * (1 + np.abs(f_middle))) and + np.all(bc_res < tol_bc)): + break + + # If the full step was taken, then we are going to continue with + # the same Jacobian. This is the approach of BVP_SOLVER. + if alpha == 1: + step = step_new + cost = cost_new + recompute_jac = False + else: + recompute_jac = True + + return y, p, singular + + +def print_iteration_header(): + print("{:^15}{:^15}{:^15}{:^15}".format( + "Iteration", "Max residual", "Total nodes", "Nodes added")) + + +def print_iteration_progress(iteration, residual, total_nodes, nodes_added): + print("{:^15}{:^15.2e}{:^15}{:^15}".format( + iteration, residual, total_nodes, nodes_added)) + + +class BVPResult(OptimizeResult): + pass + + +TERMINATION_MESSAGES = { + 0: "The algorithm converged to the desired accuracy.", + 1: "The maximum number of mesh nodes is exceeded.", + 2: "A singular Jacobian encountered when solving the collocation system." +} + + +def estimate_rms_residuals(fun, sol, x, h, p, r_middle, f_middle): + """Estimate rms values of collocation residuals using Lobatto quadrature. + + The residuals are defined as the difference between the derivatives of + our solution and rhs of the ODE system. We use relative residuals, i.e. + normalized by 1 + np.abs(f). RMS values are computed as sqrt from the + normalized integrals of the squared relative residuals over each interval. + Integrals are estimated using 5-point Lobatto quadrature [1]_, we use the + fact that residuals at the mesh nodes are identically zero. + + In [2] they don't normalize integrals by interval lengths, which gives + a higher rate of convergence of the residuals by the factor of h**0.5. + I chose to do such normalization for an ease of interpretation of return + values as RMS estimates. + + Returns + ------- + rms_res : ndarray, shape (m - 1,) + Estimated rms values of the relative residuals over each interval. + + References + ---------- + .. [1] http://mathworld.wolfram.com/LobattoQuadrature.html + .. [2] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual + Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27, + Number 3, pp. 299-316, 2001. + """ + x_middle = x[:-1] + 0.5 * h + s = 0.5 * h * (3/7)**0.5 + x1 = x_middle + s + x2 = x_middle - s + y1 = sol(x1) + y2 = sol(x2) + y1_prime = sol(x1, 1) + y2_prime = sol(x2, 1) + f1 = fun(x1, y1, p) + f2 = fun(x2, y2, p) + r1 = y1_prime - f1 + r2 = y2_prime - f2 + + r_middle /= 1 + np.abs(f_middle) + r1 /= 1 + np.abs(f1) + r2 /= 1 + np.abs(f2) + + r1 = np.sum(np.real(r1 * np.conj(r1)), axis=0) + r2 = np.sum(np.real(r2 * np.conj(r2)), axis=0) + r_middle = np.sum(np.real(r_middle * np.conj(r_middle)), axis=0) + + return (0.5 * (32 / 45 * r_middle + 49 / 90 * (r1 + r2))) ** 0.5 + + +def create_spline(y, yp, x, h): + """Create a cubic spline given values and derivatives. + + Formulas for the coefficients are taken from interpolate.CubicSpline. + + Returns + ------- + sol : PPoly + Constructed spline as a PPoly instance. + """ + from scipy.interpolate import PPoly + + n, m = y.shape + c = np.empty((4, n, m - 1), dtype=y.dtype) + slope = (y[:, 1:] - y[:, :-1]) / h + t = (yp[:, :-1] + yp[:, 1:] - 2 * slope) / h + c[0] = t / h + c[1] = (slope - yp[:, :-1]) / h - t + c[2] = yp[:, :-1] + c[3] = y[:, :-1] + c = np.rollaxis(c, 1) + + return PPoly(c, x, extrapolate=True, axis=1) + + +def modify_mesh(x, insert_1, insert_2): + """Insert nodes into a mesh. + + Nodes removal logic is not established, its impact on the solver is + presumably negligible. So only insertion is done in this function. + + Parameters + ---------- + x : ndarray, shape (m,) + Mesh nodes. + insert_1 : ndarray + Intervals to each insert 1 new node in the middle. + insert_2 : ndarray + Intervals to each insert 2 new nodes, such that divide an interval + into 3 equal parts. + + Returns + ------- + x_new : ndarray + New mesh nodes. + + Notes + ----- + `insert_1` and `insert_2` should not have common values. + """ + # Because np.insert implementation apparently varies with a version of + # numpy, we use a simple and reliable approach with sorting. + return np.sort(np.hstack(( + x, + 0.5 * (x[insert_1] + x[insert_1 + 1]), + (2 * x[insert_2] + x[insert_2 + 1]) / 3, + (x[insert_2] + 2 * x[insert_2 + 1]) / 3 + ))) + + +def wrap_functions(fun, bc, fun_jac, bc_jac, k, a, S, D, dtype): + """Wrap functions for unified usage in the solver.""" + if fun_jac is None: + fun_jac_wrapped = None + + if bc_jac is None: + bc_jac_wrapped = None + + if k == 0: + def fun_p(x, y, _): + return np.asarray(fun(x, y), dtype) + + def bc_wrapped(ya, yb, _): + return np.asarray(bc(ya, yb), dtype) + + if fun_jac is not None: + def fun_jac_p(x, y, _): + return np.asarray(fun_jac(x, y), dtype), None + + if bc_jac is not None: + def bc_jac_wrapped(ya, yb, _): + dbc_dya, dbc_dyb = bc_jac(ya, yb) + return (np.asarray(dbc_dya, dtype), + np.asarray(dbc_dyb, dtype), None) + else: + def fun_p(x, y, p): + return np.asarray(fun(x, y, p), dtype) + + def bc_wrapped(x, y, p): + return np.asarray(bc(x, y, p), dtype) + + if fun_jac is not None: + def fun_jac_p(x, y, p): + df_dy, df_dp = fun_jac(x, y, p) + return np.asarray(df_dy, dtype), np.asarray(df_dp, dtype) + + if bc_jac is not None: + def bc_jac_wrapped(ya, yb, p): + dbc_dya, dbc_dyb, dbc_dp = bc_jac(ya, yb, p) + return (np.asarray(dbc_dya, dtype), np.asarray(dbc_dyb, dtype), + np.asarray(dbc_dp, dtype)) + + if S is None: + fun_wrapped = fun_p + else: + def fun_wrapped(x, y, p): + f = fun_p(x, y, p) + if x[0] == a: + f[:, 0] = np.dot(D, f[:, 0]) + f[:, 1:] += np.dot(S, y[:, 1:]) / (x[1:] - a) + else: + f += np.dot(S, y) / (x - a) + return f + + if fun_jac is not None: + if S is None: + fun_jac_wrapped = fun_jac_p + else: + Sr = S[:, :, np.newaxis] + + def fun_jac_wrapped(x, y, p): + df_dy, df_dp = fun_jac_p(x, y, p) + if x[0] == a: + df_dy[:, :, 0] = np.dot(D, df_dy[:, :, 0]) + df_dy[:, :, 1:] += Sr / (x[1:] - a) + else: + df_dy += Sr / (x - a) + + return df_dy, df_dp + + return fun_wrapped, bc_wrapped, fun_jac_wrapped, bc_jac_wrapped + + +def solve_bvp(fun, bc, x, y, p=None, S=None, fun_jac=None, bc_jac=None, + tol=1e-3, max_nodes=1000, verbose=0): + """Solve a boundary-value problem for a system of ODEs. + + This function numerically solves a first order system of ODEs subject to + two-point boundary conditions:: + + dy / dx = f(x, y, p) + S * y / (x - a), a <= x <= b + bc(y(a), y(b), p) = 0 + + Here x is a 1-dimensional independent variable, y(x) is a n-dimensional + vector-valued function and p is a k-dimensional vector of unknown + parameters which is to be found along with y(x). For the problem to be + determined there must be n + k boundary conditions, i.e. bc must be + (n + k)-dimensional function. + + The last singular term in the right-hand side of the system is optional. + It is defined by an n-by-n matrix S, such that the solution must satisfy + S y(a) = 0. This condition will be forced during iterations, so it must not + contradict boundary conditions. See [2]_ for the explanation how this term + is handled when solving BVPs numerically. + + Problems in a complex domain can be solved as well. In this case y and p + are considered to be complex, and f and bc are assumed to be complex-valued + functions, but x stays real. Note that f and bc must be complex + differentiable (satisfy Cauchy-Riemann equations [4]_), otherwise you + should rewrite your problem for real and imaginary parts separately. To + solve a problem in a complex domain, pass an initial guess for y with a + complex data type (see below). + + Parameters + ---------- + fun : callable + Right-hand side of the system. The calling signature is ``fun(x, y)``, + or ``fun(x, y, p)`` if parameters are present. All arguments are + ndarray: ``x`` with shape (m,), ``y`` with shape (n, m), meaning that + ``y[:, i]`` corresponds to ``x[i]``, and ``p`` with shape (k,). The + return value must be an array with shape (n, m) and with the same + layout as ``y``. + bc : callable + Function evaluating residuals of the boundary conditions. The calling + signature is ``bc(ya, yb)``, or ``bc(ya, yb, p)`` if parameters are + present. All arguments are ndarray: ``ya`` and ``yb`` with shape (n,), + and ``p`` with shape (k,). The return value must be an array with + shape (n + k,). + x : array_like, shape (m,) + Initial mesh. Must be a strictly increasing sequence of real numbers + with ``x[0]=a`` and ``x[-1]=b``. + y : array_like, shape (n, m) + Initial guess for the function values at the mesh nodes, i-th column + corresponds to ``x[i]``. For problems in a complex domain pass `y` + with a complex data type (even if the initial guess is purely real). + p : array_like with shape (k,) or None, optional + Initial guess for the unknown parameters. If None (default), it is + assumed that the problem doesn't depend on any parameters. + S : array_like with shape (n, n) or None + Matrix defining the singular term. If None (default), the problem is + solved without the singular term. + fun_jac : callable or None, optional + Function computing derivatives of f with respect to y and p. The + calling signature is ``fun_jac(x, y)``, or ``fun_jac(x, y, p)`` if + parameters are present. The return must contain 1 or 2 elements in the + following order: + + * df_dy : array_like with shape (n, n, m) where an element + (i, j, q) equals to d f_i(x_q, y_q, p) / d (y_q)_j. + * df_dp : array_like with shape (n, k, m) where an element + (i, j, q) equals to d f_i(x_q, y_q, p) / d p_j. + + Here q numbers nodes at which x and y are defined, whereas i and j + number vector components. If the problem is solved without unknown + parameters df_dp should not be returned. + + If `fun_jac` is None (default), the derivatives will be estimated + by the forward finite differences. + bc_jac : callable or None, optional + Function computing derivatives of bc with respect to ya, yb and p. + The calling signature is ``bc_jac(ya, yb)``, or ``bc_jac(ya, yb, p)`` + if parameters are present. The return must contain 2 or 3 elements in + the following order: + + * dbc_dya : array_like with shape (n, n) where an element (i, j) + equals to d bc_i(ya, yb, p) / d ya_j. + * dbc_dyb : array_like with shape (n, n) where an element (i, j) + equals to d bc_i(ya, yb, p) / d yb_j. + * dbc_dp : array_like with shape (n, k) where an element (i, j) + equals to d bc_i(ya, yb, p) / d p_j. + + If the problem is solved without unknown parameters dbc_dp should not + be returned. + + If `bc_jac` is None (default), the derivatives will be estimated by + the forward finite differences. + tol : float, optional + Desired tolerance of the solution. If we define ``r = y' - f(x, y)`` + where y is the found solution, then the solver tries to achieve on each + mesh interval ``norm(r / (1 + abs(f)) < tol``, where ``norm`` is + estimated in a root mean squared sense (using a numerical quadrature + formula). Default is 1e-3. + max_nodes : int, optional + Maximum allowed number of the mesh nodes. If exceeded, the algorithm + terminates. Default is 1000. + verbose : {0, 1, 2}, optional + Level of algorithm's verbosity: + + * 0 (default) : work silently. + * 1 : display a termination report. + * 2 : display progress during iterations. + + Returns + ------- + Bunch object with the following fields defined: + sol : PPoly + Found solution for y as `scipy.interpolate.PPoly` instance, a C1 + continuous cubic spline. + p : ndarray or None, shape (k,) + Found parameters. None, if the parameters were not present in the + problem. + x : ndarray, shape (m,) + Nodes of the final mesh. + y : ndarray, shape (n, m) + Solution values at the mesh nodes. + yp : ndarray, shape (n, m) + Solution derivatives at the mesh nodes. + rms_residuals : ndarray, shape (m - 1,) + RMS values of the relative residuals over each mesh interval (see the + description of `tol` parameter). + niter : int + Number of completed iterations. + status : int + Reason for algorithm termination: + + * 0: The algorithm converged to the desired accuracy. + * 1: The maximum number of mesh nodes is exceeded. + * 2: A singular Jacobian encountered when solving the collocation + system. + + message : string + Verbal description of the termination reason. + success : bool + True if the algorithm converged to the desired accuracy (``status=0``). + + Notes + ----- + This function implements a 4-th order collocation algorithm with the + control of residuals similar to [1]_. A collocation system is solved + by a damped Newton method with an affine-invariant criterion function as + described in [3]_. + + Note that in [1]_ integral residuals are defined without normalization + by interval lengths. So their definition is different by a multiplier of + h**0.5 (h is an interval length) from the definition used here. + + .. versionadded:: 0.18.0 + + References + ---------- + .. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual + Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27, + Number 3, pp. 299-316, 2001. + .. [2] L.F. Shampine, P. H. Muir and H. Xu, "A User-Friendly Fortran BVP + Solver". + .. [3] U. Ascher, R. Mattheij and R. Russell "Numerical Solution of + Boundary Value Problems for Ordinary Differential Equations". + .. [4] `Cauchy-Riemann equations + <https://en.wikipedia.org/wiki/Cauchy-Riemann_equations>`_ on + Wikipedia. + + Examples + -------- + In the first example we solve Bratu's problem:: + + y'' + k * exp(y) = 0 + y(0) = y(1) = 0 + + for k = 1. + + We rewrite the equation as a first order system and implement its + right-hand side evaluation:: + + y1' = y2 + y2' = -exp(y1) + + >>> def fun(x, y): + ... return np.vstack((y[1], -np.exp(y[0]))) + + Implement evaluation of the boundary condition residuals: + + >>> def bc(ya, yb): + ... return np.array([ya[0], yb[0]]) + + Define the initial mesh with 5 nodes: + + >>> x = np.linspace(0, 1, 5) + + This problem is known to have two solutions. To obtain both of them we + use two different initial guesses for y. We denote them by subscripts + a and b. + + >>> y_a = np.zeros((2, x.size)) + >>> y_b = np.zeros((2, x.size)) + >>> y_b[0] = 3 + + Now we are ready to run the solver. + + >>> from scipy.integrate import solve_bvp + >>> res_a = solve_bvp(fun, bc, x, y_a) + >>> res_b = solve_bvp(fun, bc, x, y_b) + + Let's plot the two found solutions. We take an advantage of having the + solution in a spline form to produce a smooth plot. + + >>> x_plot = np.linspace(0, 1, 100) + >>> y_plot_a = res_a.sol(x_plot)[0] + >>> y_plot_b = res_b.sol(x_plot)[0] + >>> import matplotlib.pyplot as plt + >>> plt.plot(x_plot, y_plot_a, label='y_a') + >>> plt.plot(x_plot, y_plot_b, label='y_b') + >>> plt.legend() + >>> plt.xlabel("x") + >>> plt.ylabel("y") + >>> plt.show() + + We see that the two solutions have similar shape, but differ in scale + significantly. + + In the second example we solve a simple Sturm-Liouville problem:: + + y'' + k**2 * y = 0 + y(0) = y(1) = 0 + + It is known that a non-trivial solution y = A * sin(k * x) is possible for + k = pi * n, where n is an integer. To establish the normalization constant + A = 1 we add a boundary condition:: + + y'(0) = k + + Again we rewrite our equation as a first order system and implement its + right-hand side evaluation:: + + y1' = y2 + y2' = -k**2 * y1 + + >>> def fun(x, y, p): + ... k = p[0] + ... return np.vstack((y[1], -k**2 * y[0])) + + Note that parameters p are passed as a vector (with one element in our + case). + + Implement the boundary conditions: + + >>> def bc(ya, yb, p): + ... k = p[0] + ... return np.array([ya[0], yb[0], ya[1] - k]) + + Setup the initial mesh and guess for y. We aim to find the solution for + k = 2 * pi, to achieve that we set values of y to approximately follow + sin(2 * pi * x): + + >>> x = np.linspace(0, 1, 5) + >>> y = np.zeros((2, x.size)) + >>> y[0, 1] = 1 + >>> y[0, 3] = -1 + + Run the solver with 6 as an initial guess for k. + + >>> sol = solve_bvp(fun, bc, x, y, p=[6]) + + We see that the found k is approximately correct: + + >>> sol.p[0] + 6.28329460046 + + And finally plot the solution to see the anticipated sinusoid: + + >>> x_plot = np.linspace(0, 1, 100) + >>> y_plot = sol.sol(x_plot)[0] + >>> plt.plot(x_plot, y_plot) + >>> plt.xlabel("x") + >>> plt.ylabel("y") + >>> plt.show() + """ + x = np.asarray(x, dtype=float) + if x.ndim != 1: + raise ValueError("`x` must be 1 dimensional.") + h = np.diff(x) + if np.any(h <= 0): + raise ValueError("`x` must be strictly increasing.") + a = x[0] + + y = np.asarray(y) + if np.issubdtype(y.dtype, np.complexfloating): + dtype = complex + else: + dtype = float + y = y.astype(dtype, copy=False) + + if y.ndim != 2: + raise ValueError("`y` must be 2 dimensional.") + if y.shape[1] != x.shape[0]: + raise ValueError("`y` is expected to have {} columns, but actually " + "has {}.".format(x.shape[0], y.shape[1])) + + if p is None: + p = np.array([]) + else: + p = np.asarray(p, dtype=dtype) + if p.ndim != 1: + raise ValueError("`p` must be 1 dimensional.") + + if tol < 100 * EPS: + warn("`tol` is too low, setting to {:.2e}".format(100 * EPS)) + tol = 100 * EPS + + if verbose not in [0, 1, 2]: + raise ValueError("`verbose` must be in [0, 1, 2].") + + n = y.shape[0] + k = p.shape[0] + + if S is not None: + S = np.asarray(S, dtype=dtype) + if S.shape != (n, n): + raise ValueError("`S` is expected to have shape {}, " + "but actually has {}".format((n, n), S.shape)) + + # Compute I - S^+ S to impose necessary boundary conditions. + B = np.identity(n) - np.dot(pinv(S), S) + + y[:, 0] = np.dot(B, y[:, 0]) + + # Compute (I - S)^+ to correct derivatives at x=a. + D = pinv(np.identity(n) - S) + else: + B = None + D = None + + fun_wrapped, bc_wrapped, fun_jac_wrapped, bc_jac_wrapped = wrap_functions( + fun, bc, fun_jac, bc_jac, k, a, S, D, dtype) + + f = fun_wrapped(x, y, p) + if f.shape != y.shape: + raise ValueError("`fun` return is expected to have shape {}, " + "but actually has {}.".format(y.shape, f.shape)) + + bc_res = bc_wrapped(y[:, 0], y[:, -1], p) + if bc_res.shape != (n + k,): + raise ValueError("`bc` return is expected to have shape {}, " + "but actually has {}.".format((n + k,), bc_res.shape)) + + status = 0 + iteration = 0 + if verbose == 2: + print_iteration_header() + + while True: + m = x.shape[0] + + col_fun, jac_sys = prepare_sys(n, m, k, fun_wrapped, bc_wrapped, + fun_jac_wrapped, bc_jac_wrapped, x, h) + y, p, singular = solve_newton(n, m, h, col_fun, bc_wrapped, jac_sys, + y, p, B, tol) + iteration += 1 + + col_res, y_middle, f, f_middle = collocation_fun(fun_wrapped, y, + p, x, h) + # This relation is not trivial, but can be verified. + r_middle = 1.5 * col_res / h + sol = create_spline(y, f, x, h) + rms_res = estimate_rms_residuals(fun_wrapped, sol, x, h, p, + r_middle, f_middle) + max_rms_res = np.max(rms_res) + + if singular: + status = 2 + break + + insert_1, = np.nonzero((rms_res > tol) & (rms_res < 100 * tol)) + insert_2, = np.nonzero(rms_res >= 100 * tol) + nodes_added = insert_1.shape[0] + 2 * insert_2.shape[0] + + if m + nodes_added > max_nodes: + status = 1 + if verbose == 2: + nodes_added = "({})".format(nodes_added) + print_iteration_progress(iteration, max_rms_res, m, + nodes_added) + break + + if verbose == 2: + print_iteration_progress(iteration, max_rms_res, m, nodes_added) + + if nodes_added > 0: + x = modify_mesh(x, insert_1, insert_2) + h = np.diff(x) + y = sol(x) + else: + status = 0 + break + + if verbose > 0: + if status == 0: + print("Solved in {} iterations, number of nodes {}, " + "maximum relative residual {:.2e}." + .format(iteration, x.shape[0], max_rms_res)) + elif status == 1: + print("Number of nodes is exceeded after iteration {}, " + "maximum relative residual {:.2e}." + .format(iteration, max_rms_res)) + elif status == 2: + print("Singular Jacobian encountered when solving the collocation " + "system on iteration {}, maximum relative residual {:.2e}." + .format(iteration, max_rms_res)) + + if p.size == 0: + p = None + + return BVPResult(sol=sol, p=p, x=x, y=y, yp=f, rms_residuals=rms_res, + niter=iteration, status=status, + message=TERMINATION_MESSAGES[status], success=status == 0) diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/_bvp.pyc b/project/venv/lib/python2.7/site-packages/scipy/integrate/_bvp.pyc new file mode 100644 index 0000000..999e730 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/integrate/_bvp.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/_dop.so b/project/venv/lib/python2.7/site-packages/scipy/integrate/_dop.so new file mode 100755 index 0000000..0b3ad26 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/integrate/_dop.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/__init__.py new file mode 100644 index 0000000..1715e6a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/__init__.py @@ -0,0 +1,10 @@ +"""Suite of ODE solvers implemented in Python.""" +from __future__ import division, print_function, absolute_import + +from .ivp import solve_ivp +from .rk import RK23, RK45 +from .radau import Radau +from .bdf import BDF +from .lsoda import LSODA +from .common import OdeSolution +from .base import DenseOutput, OdeSolver diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/__init__.pyc new file mode 100644 index 0000000..033c290 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/base.py b/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/base.py new file mode 100644 index 0000000..0cc77a1 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/base.py @@ -0,0 +1,275 @@ +from __future__ import division, print_function, absolute_import +import numpy as np + + +def check_arguments(fun, y0, support_complex): + """Helper function for checking arguments common to all solvers.""" + y0 = np.asarray(y0) + if np.issubdtype(y0.dtype, np.complexfloating): + if not support_complex: + raise ValueError("`y0` is complex, but the chosen solver does " + "not support integration in a complex domain.") + dtype = complex + else: + dtype = float + y0 = y0.astype(dtype, copy=False) + + if y0.ndim != 1: + raise ValueError("`y0` must be 1-dimensional.") + + def fun_wrapped(t, y): + return np.asarray(fun(t, y), dtype=dtype) + + return fun_wrapped, y0 + + +class OdeSolver(object): + """Base class for ODE solvers. + + In order to implement a new solver you need to follow the guidelines: + + 1. A constructor must accept parameters presented in the base class + (listed below) along with any other parameters specific to a solver. + 2. A constructor must accept arbitrary extraneous arguments + ``**extraneous``, but warn that these arguments are irrelevant + using `common.warn_extraneous` function. Do not pass these + arguments to the base class. + 3. A solver must implement a private method `_step_impl(self)` which + propagates a solver one step further. It must return tuple + ``(success, message)``, where ``success`` is a boolean indicating + whether a step was successful, and ``message`` is a string + containing description of a failure if a step failed or None + otherwise. + 4. A solver must implement a private method `_dense_output_impl(self)` + which returns a `DenseOutput` object covering the last successful + step. + 5. A solver must have attributes listed below in Attributes section. + Note that `t_old` and `step_size` are updated automatically. + 6. Use `fun(self, t, y)` method for the system rhs evaluation, this + way the number of function evaluations (`nfev`) will be tracked + automatically. + 7. For convenience a base class provides `fun_single(self, t, y)` and + `fun_vectorized(self, t, y)` for evaluating the rhs in + non-vectorized and vectorized fashions respectively (regardless of + how `fun` from the constructor is implemented). These calls don't + increment `nfev`. + 8. If a solver uses a Jacobian matrix and LU decompositions, it should + track the number of Jacobian evaluations (`njev`) and the number of + LU decompositions (`nlu`). + 9. By convention the function evaluations used to compute a finite + difference approximation of the Jacobian should not be counted in + `nfev`, thus use `fun_single(self, t, y)` or + `fun_vectorized(self, t, y)` when computing a finite difference + approximation of the Jacobian. + + Parameters + ---------- + fun : callable + Right-hand side of the system. The calling signature is ``fun(t, y)``. + Here ``t`` is a scalar and there are two options for ndarray ``y``. + It can either have shape (n,), then ``fun`` must return array_like with + shape (n,). Or alternatively it can have shape (n, n_points), then + ``fun`` must return array_like with shape (n, n_points) (each column + corresponds to a single column in ``y``). The choice between the two + options is determined by `vectorized` argument (see below). + t0 : float + Initial time. + y0 : array_like, shape (n,) + Initial state. + t_bound : float + Boundary time --- the integration won't continue beyond it. It also + determines the direction of the integration. + vectorized : bool + Whether `fun` is implemented in a vectorized fashion. + support_complex : bool, optional + Whether integration in a complex domain should be supported. + Generally determined by a derived solver class capabilities. + Default is False. + + Attributes + ---------- + n : int + Number of equations. + status : string + Current status of the solver: 'running', 'finished' or 'failed'. + t_bound : float + Boundary time. + direction : float + Integration direction: +1 or -1. + t : float + Current time. + y : ndarray + Current state. + t_old : float + Previous time. None if no steps were made yet. + step_size : float + Size of the last successful step. None if no steps were made yet. + nfev : int + Number of the system's rhs evaluations. + njev : int + Number of the Jacobian evaluations. + nlu : int + Number of LU decompositions. + """ + TOO_SMALL_STEP = "Required step size is less than spacing between numbers." + + def __init__(self, fun, t0, y0, t_bound, vectorized, + support_complex=False): + self.t_old = None + self.t = t0 + self._fun, self.y = check_arguments(fun, y0, support_complex) + self.t_bound = t_bound + self.vectorized = vectorized + + if vectorized: + def fun_single(t, y): + return self._fun(t, y[:, None]).ravel() + fun_vectorized = self._fun + else: + fun_single = self._fun + + def fun_vectorized(t, y): + f = np.empty_like(y) + for i, yi in enumerate(y.T): + f[:, i] = self._fun(t, yi) + return f + + def fun(t, y): + self.nfev += 1 + return self.fun_single(t, y) + + self.fun = fun + self.fun_single = fun_single + self.fun_vectorized = fun_vectorized + + self.direction = np.sign(t_bound - t0) if t_bound != t0 else 1 + self.n = self.y.size + self.status = 'running' + + self.nfev = 0 + self.njev = 0 + self.nlu = 0 + + @property + def step_size(self): + if self.t_old is None: + return None + else: + return np.abs(self.t - self.t_old) + + def step(self): + """Perform one integration step. + + Returns + ------- + message : string or None + Report from the solver. Typically a reason for a failure if + `self.status` is 'failed' after the step was taken or None + otherwise. + """ + if self.status != 'running': + raise RuntimeError("Attempt to step on a failed or finished " + "solver.") + + if self.n == 0 or self.t == self.t_bound: + # Handle corner cases of empty solver or no integration. + self.t_old = self.t + self.t = self.t_bound + message = None + self.status = 'finished' + else: + t = self.t + success, message = self._step_impl() + + if not success: + self.status = 'failed' + else: + self.t_old = t + if self.direction * (self.t - self.t_bound) >= 0: + self.status = 'finished' + + return message + + def dense_output(self): + """Compute a local interpolant over the last successful step. + + Returns + ------- + sol : `DenseOutput` + Local interpolant over the last successful step. + """ + if self.t_old is None: + raise RuntimeError("Dense output is available after a successful " + "step was made.") + + if self.n == 0 or self.t == self.t_old: + # Handle corner cases of empty solver and no integration. + return ConstantDenseOutput(self.t_old, self.t, self.y) + else: + return self._dense_output_impl() + + def _step_impl(self): + raise NotImplementedError + + def _dense_output_impl(self): + raise NotImplementedError + + +class DenseOutput(object): + """Base class for local interpolant over step made by an ODE solver. + + It interpolates between `t_min` and `t_max` (see Attributes below). + Evaluation outside this interval is not forbidden, but the accuracy is not + guaranteed. + + Attributes + ---------- + t_min, t_max : float + Time range of the interpolation. + """ + def __init__(self, t_old, t): + self.t_old = t_old + self.t = t + self.t_min = min(t, t_old) + self.t_max = max(t, t_old) + + def __call__(self, t): + """Evaluate the interpolant. + + Parameters + ---------- + t : float or array_like with shape (n_points,) + Points to evaluate the solution at. + + Returns + ------- + y : ndarray, shape (n,) or (n, n_points) + Computed values. Shape depends on whether `t` was a scalar or a + 1-d array. + """ + t = np.asarray(t) + if t.ndim > 1: + raise ValueError("`t` must be float or 1-d array.") + return self._call_impl(t) + + def _call_impl(self, t): + raise NotImplementedError + + +class ConstantDenseOutput(DenseOutput): + """Constant value interpolator. + + This class used for degenerate integration cases: equal integration limits + or a system with 0 equations. + """ + def __init__(self, t_old, t, value): + super(ConstantDenseOutput, self).__init__(t_old, t) + self.value = value + + def _call_impl(self, t): + if t.ndim == 0: + return self.value + else: + ret = np.empty((self.value.shape[0], t.shape[0])) + ret[:] = self.value[:, None] + return ret diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/base.pyc b/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/base.pyc new file mode 100644 index 0000000..25b0e51 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/base.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/bdf.py b/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/bdf.py new file mode 100644 index 0000000..9a1a9bd --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/bdf.py @@ -0,0 +1,466 @@ +from __future__ import division, print_function, absolute_import +import numpy as np +from scipy.linalg import lu_factor, lu_solve +from scipy.sparse import issparse, csc_matrix, eye +from scipy.sparse.linalg import splu +from scipy.optimize._numdiff import group_columns +from .common import (validate_max_step, validate_tol, select_initial_step, + norm, EPS, num_jac, validate_first_step, + warn_extraneous) +from .base import OdeSolver, DenseOutput + + +MAX_ORDER = 5 +NEWTON_MAXITER = 4 +MIN_FACTOR = 0.2 +MAX_FACTOR = 10 + + +def compute_R(order, factor): + """Compute the matrix for changing the differences array.""" + I = np.arange(1, order + 1)[:, None] + J = np.arange(1, order + 1) + M = np.zeros((order + 1, order + 1)) + M[1:, 1:] = (I - 1 - factor * J) / I + M[0] = 1 + return np.cumprod(M, axis=0) + + +def change_D(D, order, factor): + """Change differences array in-place when step size is changed.""" + R = compute_R(order, factor) + U = compute_R(order, 1) + RU = R.dot(U) + D[:order + 1] = np.dot(RU.T, D[:order + 1]) + + +def solve_bdf_system(fun, t_new, y_predict, c, psi, LU, solve_lu, scale, tol): + """Solve the algebraic system resulting from BDF method.""" + d = 0 + y = y_predict.copy() + dy_norm_old = None + converged = False + for k in range(NEWTON_MAXITER): + f = fun(t_new, y) + if not np.all(np.isfinite(f)): + break + + dy = solve_lu(LU, c * f - psi - d) + dy_norm = norm(dy / scale) + + if dy_norm_old is None: + rate = None + else: + rate = dy_norm / dy_norm_old + + if (rate is not None and (rate >= 1 or + rate ** (NEWTON_MAXITER - k) / (1 - rate) * dy_norm > tol)): + break + + y += dy + d += dy + + if (dy_norm == 0 or + rate is not None and rate / (1 - rate) * dy_norm < tol): + converged = True + break + + dy_norm_old = dy_norm + + return converged, k + 1, y, d + + +class BDF(OdeSolver): + """Implicit method based on backward-differentiation formulas. + + This is a variable order method with the order varying automatically from + 1 to 5. The general framework of the BDF algorithm is described in [1]_. + This class implements a quasi-constant step size as explained in [2]_. + The error estimation strategy for the constant-step BDF is derived in [3]_. + An accuracy enhancement using modified formulas (NDF) [2]_ is also implemented. + + Can be applied in the complex domain. + + Parameters + ---------- + fun : callable + Right-hand side of the system. The calling signature is ``fun(t, y)``. + Here ``t`` is a scalar, and there are two options for the ndarray ``y``: + It can either have shape (n,); then ``fun`` must return array_like with + shape (n,). Alternatively it can have shape (n, k); then ``fun`` + must return an array_like with shape (n, k), i.e. each column + corresponds to a single column in ``y``. The choice between the two + options is determined by `vectorized` argument (see below). The + vectorized implementation allows a faster approximation of the Jacobian + by finite differences (required for this solver). + t0 : float + Initial time. + y0 : array_like, shape (n,) + Initial state. + t_bound : float + Boundary time - the integration won't continue beyond it. It also + determines the direction of the integration. + first_step : float or None, optional + Initial step size. Default is ``None`` which means that the algorithm + should choose. + max_step : float, optional + Maximum allowed step size. Default is np.inf, i.e. the step size is not + bounded and determined solely by the solver. + rtol, atol : float and array_like, optional + Relative and absolute tolerances. The solver keeps the local error + estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a + relative accuracy (number of correct digits). But if a component of `y` + is approximately below `atol`, the error only needs to fall within + the same `atol` threshold, and the number of correct digits is not + guaranteed. If components of y have different scales, it might be + beneficial to set different `atol` values for different components by + passing array_like with shape (n,) for `atol`. Default values are + 1e-3 for `rtol` and 1e-6 for `atol`. + jac : {None, array_like, sparse_matrix, callable}, optional + Jacobian matrix of the right-hand side of the system with respect to y, + required by this method. The Jacobian matrix has shape (n, n) and its + element (i, j) is equal to ``d f_i / d y_j``. + There are three ways to define the Jacobian: + + * If array_like or sparse_matrix, the Jacobian is assumed to + be constant. + * If callable, the Jacobian is assumed to depend on both + t and y; it will be called as ``jac(t, y)`` as necessary. + For the 'Radau' and 'BDF' methods, the return value might be a + sparse matrix. + * If None (default), the Jacobian will be approximated by + finite differences. + + It is generally recommended to provide the Jacobian rather than + relying on a finite-difference approximation. + jac_sparsity : {None, array_like, sparse matrix}, optional + Defines a sparsity structure of the Jacobian matrix for a + finite-difference approximation. Its shape must be (n, n). This argument + is ignored if `jac` is not `None`. If the Jacobian has only few non-zero + elements in *each* row, providing the sparsity structure will greatly + speed up the computations [4]_. A zero entry means that a corresponding + element in the Jacobian is always zero. If None (default), the Jacobian + is assumed to be dense. + vectorized : bool, optional + Whether `fun` is implemented in a vectorized fashion. Default is False. + + Attributes + ---------- + n : int + Number of equations. + status : string + Current status of the solver: 'running', 'finished' or 'failed'. + t_bound : float + Boundary time. + direction : float + Integration direction: +1 or -1. + t : float + Current time. + y : ndarray + Current state. + t_old : float + Previous time. None if no steps were made yet. + step_size : float + Size of the last successful step. None if no steps were made yet. + nfev : int + Number of evaluations of the right-hand side. + njev : int + Number of evaluations of the Jacobian. + nlu : int + Number of LU decompositions. + + References + ---------- + .. [1] G. D. Byrne, A. C. Hindmarsh, "A Polyalgorithm for the Numerical + Solution of Ordinary Differential Equations", ACM Transactions on + Mathematical Software, Vol. 1, No. 1, pp. 71-96, March 1975. + .. [2] L. F. Shampine, M. W. Reichelt, "THE MATLAB ODE SUITE", SIAM J. SCI. + COMPUTE., Vol. 18, No. 1, pp. 1-22, January 1997. + .. [3] E. Hairer, G. Wanner, "Solving Ordinary Differential Equations I: + Nonstiff Problems", Sec. III.2. + .. [4] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of + sparse Jacobian matrices", Journal of the Institute of Mathematics + and its Applications, 13, pp. 117-120, 1974. + """ + def __init__(self, fun, t0, y0, t_bound, max_step=np.inf, + rtol=1e-3, atol=1e-6, jac=None, jac_sparsity=None, + vectorized=False, first_step=None, **extraneous): + warn_extraneous(extraneous) + super(BDF, self).__init__(fun, t0, y0, t_bound, vectorized, + support_complex=True) + self.max_step = validate_max_step(max_step) + self.rtol, self.atol = validate_tol(rtol, atol, self.n) + f = self.fun(self.t, self.y) + if first_step is None: + self.h_abs = select_initial_step(self.fun, self.t, self.y, f, + self.direction, 1, + self.rtol, self.atol) + else: + self.h_abs = validate_first_step(first_step, t0, t_bound) + self.h_abs_old = None + self.error_norm_old = None + + self.newton_tol = max(10 * EPS / rtol, min(0.03, rtol ** 0.5)) + + self.jac_factor = None + self.jac, self.J = self._validate_jac(jac, jac_sparsity) + if issparse(self.J): + def lu(A): + self.nlu += 1 + return splu(A) + + def solve_lu(LU, b): + return LU.solve(b) + + I = eye(self.n, format='csc', dtype=self.y.dtype) + else: + def lu(A): + self.nlu += 1 + return lu_factor(A, overwrite_a=True) + + def solve_lu(LU, b): + return lu_solve(LU, b, overwrite_b=True) + + I = np.identity(self.n, dtype=self.y.dtype) + + self.lu = lu + self.solve_lu = solve_lu + self.I = I + + kappa = np.array([0, -0.1850, -1/9, -0.0823, -0.0415, 0]) + self.gamma = np.hstack((0, np.cumsum(1 / np.arange(1, MAX_ORDER + 1)))) + self.alpha = (1 - kappa) * self.gamma + self.error_const = kappa * self.gamma + 1 / np.arange(1, MAX_ORDER + 2) + + D = np.empty((MAX_ORDER + 3, self.n), dtype=self.y.dtype) + D[0] = self.y + D[1] = f * self.h_abs * self.direction + self.D = D + + self.order = 1 + self.n_equal_steps = 0 + self.LU = None + + def _validate_jac(self, jac, sparsity): + t0 = self.t + y0 = self.y + + if jac is None: + if sparsity is not None: + if issparse(sparsity): + sparsity = csc_matrix(sparsity) + groups = group_columns(sparsity) + sparsity = (sparsity, groups) + + def jac_wrapped(t, y): + self.njev += 1 + f = self.fun_single(t, y) + J, self.jac_factor = num_jac(self.fun_vectorized, t, y, f, + self.atol, self.jac_factor, + sparsity) + return J + J = jac_wrapped(t0, y0) + elif callable(jac): + J = jac(t0, y0) + self.njev += 1 + if issparse(J): + J = csc_matrix(J, dtype=y0.dtype) + + def jac_wrapped(t, y): + self.njev += 1 + return csc_matrix(jac(t, y), dtype=y0.dtype) + else: + J = np.asarray(J, dtype=y0.dtype) + + def jac_wrapped(t, y): + self.njev += 1 + return np.asarray(jac(t, y), dtype=y0.dtype) + + if J.shape != (self.n, self.n): + raise ValueError("`jac` is expected to have shape {}, but " + "actually has {}." + .format((self.n, self.n), J.shape)) + else: + if issparse(jac): + J = csc_matrix(jac, dtype=y0.dtype) + else: + J = np.asarray(jac, dtype=y0.dtype) + + if J.shape != (self.n, self.n): + raise ValueError("`jac` is expected to have shape {}, but " + "actually has {}." + .format((self.n, self.n), J.shape)) + jac_wrapped = None + + return jac_wrapped, J + + def _step_impl(self): + t = self.t + D = self.D + + max_step = self.max_step + min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t) + if self.h_abs > max_step: + h_abs = max_step + change_D(D, self.order, max_step / self.h_abs) + self.n_equal_steps = 0 + elif self.h_abs < min_step: + h_abs = min_step + change_D(D, self.order, min_step / self.h_abs) + self.n_equal_steps = 0 + else: + h_abs = self.h_abs + + atol = self.atol + rtol = self.rtol + order = self.order + + alpha = self.alpha + gamma = self.gamma + error_const = self.error_const + + J = self.J + LU = self.LU + current_jac = self.jac is None + + step_accepted = False + while not step_accepted: + if h_abs < min_step: + return False, self.TOO_SMALL_STEP + + h = h_abs * self.direction + t_new = t + h + + if self.direction * (t_new - self.t_bound) > 0: + t_new = self.t_bound + change_D(D, order, np.abs(t_new - t) / h_abs) + self.n_equal_steps = 0 + LU = None + + h = t_new - t + h_abs = np.abs(h) + + y_predict = np.sum(D[:order + 1], axis=0) + + scale = atol + rtol * np.abs(y_predict) + psi = np.dot(D[1: order + 1].T, gamma[1: order + 1]) / alpha[order] + + converged = False + c = h / alpha[order] + while not converged: + if LU is None: + LU = self.lu(self.I - c * J) + + converged, n_iter, y_new, d = solve_bdf_system( + self.fun, t_new, y_predict, c, psi, LU, self.solve_lu, + scale, self.newton_tol) + + if not converged: + if current_jac: + break + J = self.jac(t_new, y_predict) + LU = None + current_jac = True + + if not converged: + factor = 0.5 + h_abs *= factor + change_D(D, order, factor) + self.n_equal_steps = 0 + LU = None + continue + + safety = 0.9 * (2 * NEWTON_MAXITER + 1) / (2 * NEWTON_MAXITER + + n_iter) + + scale = atol + rtol * np.abs(y_new) + error = error_const[order] * d + error_norm = norm(error / scale) + + if error_norm > 1: + factor = max(MIN_FACTOR, + safety * error_norm ** (-1 / (order + 1))) + h_abs *= factor + change_D(D, order, factor) + self.n_equal_steps = 0 + # As we didn't have problems with convergence, we don't + # reset LU here. + else: + step_accepted = True + + self.n_equal_steps += 1 + + self.t = t_new + self.y = y_new + + self.h_abs = h_abs + self.J = J + self.LU = LU + + # Update differences. The principal relation here is + # D^{j + 1} y_n = D^{j} y_n - D^{j} y_{n - 1}. Keep in mind that D + # contained difference for previous interpolating polynomial and + # d = D^{k + 1} y_n. Thus this elegant code follows. + D[order + 2] = d - D[order + 1] + D[order + 1] = d + for i in reversed(range(order + 1)): + D[i] += D[i + 1] + + if self.n_equal_steps < order + 1: + return True, None + + if order > 1: + error_m = error_const[order - 1] * D[order] + error_m_norm = norm(error_m / scale) + else: + error_m_norm = np.inf + + if order < MAX_ORDER: + error_p = error_const[order + 1] * D[order + 2] + error_p_norm = norm(error_p / scale) + else: + error_p_norm = np.inf + + error_norms = np.array([error_m_norm, error_norm, error_p_norm]) + factors = error_norms ** (-1 / np.arange(order, order + 3)) + + delta_order = np.argmax(factors) - 1 + order += delta_order + self.order = order + + factor = min(MAX_FACTOR, safety * np.max(factors)) + self.h_abs *= factor + change_D(D, order, factor) + self.n_equal_steps = 0 + self.LU = None + + return True, None + + def _dense_output_impl(self): + return BdfDenseOutput(self.t_old, self.t, self.h_abs * self.direction, + self.order, self.D[:self.order + 1].copy()) + + +class BdfDenseOutput(DenseOutput): + def __init__(self, t_old, t, h, order, D): + super(BdfDenseOutput, self).__init__(t_old, t) + self.order = order + self.t_shift = self.t - h * np.arange(self.order) + self.denom = h * (1 + np.arange(self.order)) + self.D = D + + def _call_impl(self, t): + if t.ndim == 0: + x = (t - self.t_shift) / self.denom + p = np.cumprod(x) + else: + x = (t - self.t_shift[:, None]) / self.denom[:, None] + p = np.cumprod(x, axis=0) + + y = np.dot(self.D[1:].T, p) + if y.ndim == 1: + y += self.D[0] + else: + y += self.D[0, :, None] + + return y diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/bdf.pyc b/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/bdf.pyc new file mode 100644 index 0000000..fbc232d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/bdf.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/common.py b/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/common.py new file mode 100644 index 0000000..71122d7 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/common.py @@ -0,0 +1,431 @@ +from __future__ import division, print_function, absolute_import +from itertools import groupby +from warnings import warn +import numpy as np +from scipy.sparse import find, coo_matrix + + +EPS = np.finfo(float).eps + + +def validate_first_step(first_step, t0, t_bound): + """Assert that first_step is valid and return it.""" + if first_step <= 0: + raise ValueError("`first_step` must be positive.") + if first_step > np.abs(t_bound - t0): + raise ValueError("`first_step` exceeds bounds.") + return first_step + + +def validate_max_step(max_step): + """Assert that max_Step is valid and return it.""" + if max_step <= 0: + raise ValueError("`max_step` must be positive.") + return max_step + + +def warn_extraneous(extraneous): + """Display a warning for extraneous keyword arguments. + + The initializer of each solver class is expected to collect keyword + arguments that it doesn't understand and warn about them. This function + prints a warning for each key in the supplied dictionary. + + Parameters + ---------- + extraneous : dict + Extraneous keyword arguments + """ + if extraneous: + warn("The following arguments have no effect for a chosen solver: {}." + .format(", ".join("`{}`".format(x) for x in extraneous))) + + +def validate_tol(rtol, atol, n): + """Validate tolerance values.""" + if rtol < 100 * EPS: + warn("`rtol` is too low, setting to {}".format(100 * EPS)) + rtol = 100 * EPS + + atol = np.asarray(atol) + if atol.ndim > 0 and atol.shape != (n,): + raise ValueError("`atol` has wrong shape.") + + if np.any(atol < 0): + raise ValueError("`atol` must be positive.") + + return rtol, atol + + +def norm(x): + """Compute RMS norm.""" + return np.linalg.norm(x) / x.size ** 0.5 + + +def select_initial_step(fun, t0, y0, f0, direction, order, rtol, atol): + """Empirically select a good initial step. + + The algorithm is described in [1]_. + + Parameters + ---------- + fun : callable + Right-hand side of the system. + t0 : float + Initial value of the independent variable. + y0 : ndarray, shape (n,) + Initial value of the dependent variable. + f0 : ndarray, shape (n,) + Initial value of the derivative, i. e. ``fun(t0, y0)``. + direction : float + Integration direction. + order : float + Method order. + rtol : float + Desired relative tolerance. + atol : float + Desired absolute tolerance. + + Returns + ------- + h_abs : float + Absolute value of the suggested initial step. + + References + ---------- + .. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential + Equations I: Nonstiff Problems", Sec. II.4. + """ + if y0.size == 0: + return np.inf + + scale = atol + np.abs(y0) * rtol + d0 = norm(y0 / scale) + d1 = norm(f0 / scale) + if d0 < 1e-5 or d1 < 1e-5: + h0 = 1e-6 + else: + h0 = 0.01 * d0 / d1 + + y1 = y0 + h0 * direction * f0 + f1 = fun(t0 + h0 * direction, y1) + d2 = norm((f1 - f0) / scale) / h0 + + if d1 <= 1e-15 and d2 <= 1e-15: + h1 = max(1e-6, h0 * 1e-3) + else: + h1 = (0.01 / max(d1, d2)) ** (1 / (order + 1)) + + return min(100 * h0, h1) + + +class OdeSolution(object): + """Continuous ODE solution. + + It is organized as a collection of `DenseOutput` objects which represent + local interpolants. It provides an algorithm to select a right interpolant + for each given point. + + The interpolants cover the range between `t_min` and `t_max` (see + Attributes below). Evaluation outside this interval is not forbidden, but + the accuracy is not guaranteed. + + When evaluating at a breakpoint (one of the values in `ts`) a segment with + the lower index is selected. + + Parameters + ---------- + ts : array_like, shape (n_segments + 1,) + Time instants between which local interpolants are defined. Must + be strictly increasing or decreasing (zero segment with two points is + also allowed). + interpolants : list of DenseOutput with n_segments elements + Local interpolants. An i-th interpolant is assumed to be defined + between ``ts[i]`` and ``ts[i + 1]``. + + Attributes + ---------- + t_min, t_max : float + Time range of the interpolation. + """ + def __init__(self, ts, interpolants): + ts = np.asarray(ts) + d = np.diff(ts) + # The first case covers integration on zero segment. + if not ((ts.size == 2 and ts[0] == ts[-1]) + or np.all(d > 0) or np.all(d < 0)): + raise ValueError("`ts` must be strictly increasing or decreasing.") + + self.n_segments = len(interpolants) + if ts.shape != (self.n_segments + 1,): + raise ValueError("Numbers of time stamps and interpolants " + "don't match.") + + self.ts = ts + self.interpolants = interpolants + if ts[-1] >= ts[0]: + self.t_min = ts[0] + self.t_max = ts[-1] + self.ascending = True + self.ts_sorted = ts + else: + self.t_min = ts[-1] + self.t_max = ts[0] + self.ascending = False + self.ts_sorted = ts[::-1] + + def _call_single(self, t): + # Here we preserve a certain symmetry that when t is in self.ts, + # then we prioritize a segment with a lower index. + if self.ascending: + ind = np.searchsorted(self.ts_sorted, t, side='left') + else: + ind = np.searchsorted(self.ts_sorted, t, side='right') + + segment = min(max(ind - 1, 0), self.n_segments - 1) + if not self.ascending: + segment = self.n_segments - 1 - segment + + return self.interpolants[segment](t) + + def __call__(self, t): + """Evaluate the solution. + + Parameters + ---------- + t : float or array_like with shape (n_points,) + Points to evaluate at. + + Returns + ------- + y : ndarray, shape (n_states,) or (n_states, n_points) + Computed values. Shape depends on whether `t` is a scalar or a + 1-d array. + """ + t = np.asarray(t) + + if t.ndim == 0: + return self._call_single(t) + + order = np.argsort(t) + reverse = np.empty_like(order) + reverse[order] = np.arange(order.shape[0]) + t_sorted = t[order] + + # See comment in self._call_single. + if self.ascending: + segments = np.searchsorted(self.ts_sorted, t_sorted, side='left') + else: + segments = np.searchsorted(self.ts_sorted, t_sorted, side='right') + segments -= 1 + segments[segments < 0] = 0 + segments[segments > self.n_segments - 1] = self.n_segments - 1 + if not self.ascending: + segments = self.n_segments - 1 - segments + + ys = [] + group_start = 0 + for segment, group in groupby(segments): + group_end = group_start + len(list(group)) + y = self.interpolants[segment](t_sorted[group_start:group_end]) + ys.append(y) + group_start = group_end + + ys = np.hstack(ys) + ys = ys[:, reverse] + + return ys + + +NUM_JAC_DIFF_REJECT = EPS ** 0.875 +NUM_JAC_DIFF_SMALL = EPS ** 0.75 +NUM_JAC_DIFF_BIG = EPS ** 0.25 +NUM_JAC_MIN_FACTOR = 1e3 * EPS +NUM_JAC_FACTOR_INCREASE = 10 +NUM_JAC_FACTOR_DECREASE = 0.1 + + +def num_jac(fun, t, y, f, threshold, factor, sparsity=None): + """Finite differences Jacobian approximation tailored for ODE solvers. + + This function computes finite difference approximation to the Jacobian + matrix of `fun` with respect to `y` using forward differences. + The Jacobian matrix has shape (n, n) and its element (i, j) is equal to + ``d f_i / d y_j``. + + A special feature of this function is the ability to correct the step + size from iteration to iteration. The main idea is to keep the finite + difference significantly separated from its round-off error which + approximately equals ``EPS * np.abs(f)``. It reduces a possibility of a + huge error and assures that the estimated derivative are reasonably close + to the true values (i.e. the finite difference approximation is at least + qualitatively reflects the structure of the true Jacobian). + + Parameters + ---------- + fun : callable + Right-hand side of the system implemented in a vectorized fashion. + t : float + Current time. + y : ndarray, shape (n,) + Current state. + f : ndarray, shape (n,) + Value of the right hand side at (t, y). + threshold : float + Threshold for `y` value used for computing the step size as + ``factor * np.maximum(np.abs(y), threshold)``. Typically the value of + absolute tolerance (atol) for a solver should be passed as `threshold`. + factor : ndarray with shape (n,) or None + Factor to use for computing the step size. Pass None for the very + evaluation, then use the value returned from this function. + sparsity : tuple (structure, groups) or None + Sparsity structure of the Jacobian, `structure` must be csc_matrix. + + Returns + ------- + J : ndarray or csc_matrix, shape (n, n) + Jacobian matrix. + factor : ndarray, shape (n,) + Suggested `factor` for the next evaluation. + """ + y = np.asarray(y) + n = y.shape[0] + if n == 0: + return np.empty((0, 0)), factor + + if factor is None: + factor = np.full(n, EPS ** 0.5) + else: + factor = factor.copy() + + # Direct the step as ODE dictates, hoping that such a step won't lead to + # a problematic region. For complex ODEs it makes sense to use the real + # part of f as we use steps along real axis. + f_sign = 2 * (np.real(f) >= 0).astype(float) - 1 + y_scale = f_sign * np.maximum(threshold, np.abs(y)) + h = (y + factor * y_scale) - y + + # Make sure that the step is not 0 to start with. Not likely it will be + # executed often. + for i in np.nonzero(h == 0)[0]: + while h[i] == 0: + factor[i] *= 10 + h[i] = (y[i] + factor[i] * y_scale[i]) - y[i] + + if sparsity is None: + return _dense_num_jac(fun, t, y, f, h, factor, y_scale) + else: + structure, groups = sparsity + return _sparse_num_jac(fun, t, y, f, h, factor, y_scale, + structure, groups) + + +def _dense_num_jac(fun, t, y, f, h, factor, y_scale): + n = y.shape[0] + h_vecs = np.diag(h) + f_new = fun(t, y[:, None] + h_vecs) + diff = f_new - f[:, None] + max_ind = np.argmax(np.abs(diff), axis=0) + r = np.arange(n) + max_diff = np.abs(diff[max_ind, r]) + scale = np.maximum(np.abs(f[max_ind]), np.abs(f_new[max_ind, r])) + + diff_too_small = max_diff < NUM_JAC_DIFF_REJECT * scale + if np.any(diff_too_small): + ind, = np.nonzero(diff_too_small) + new_factor = NUM_JAC_FACTOR_INCREASE * factor[ind] + h_new = (y[ind] + new_factor * y_scale[ind]) - y[ind] + h_vecs[ind, ind] = h_new + f_new = fun(t, y[:, None] + h_vecs[:, ind]) + diff_new = f_new - f[:, None] + max_ind = np.argmax(np.abs(diff_new), axis=0) + r = np.arange(ind.shape[0]) + max_diff_new = np.abs(diff_new[max_ind, r]) + scale_new = np.maximum(np.abs(f[max_ind]), np.abs(f_new[max_ind, r])) + + update = max_diff[ind] * scale_new < max_diff_new * scale[ind] + if np.any(update): + update, = np.nonzero(update) + update_ind = ind[update] + factor[update_ind] = new_factor[update] + h[update_ind] = h_new[update] + diff[:, update_ind] = diff_new[:, update] + scale[update_ind] = scale_new[update] + max_diff[update_ind] = max_diff_new[update] + + diff /= h + + factor[max_diff < NUM_JAC_DIFF_SMALL * scale] *= NUM_JAC_FACTOR_INCREASE + factor[max_diff > NUM_JAC_DIFF_BIG * scale] *= NUM_JAC_FACTOR_DECREASE + factor = np.maximum(factor, NUM_JAC_MIN_FACTOR) + + return diff, factor + + +def _sparse_num_jac(fun, t, y, f, h, factor, y_scale, structure, groups): + n = y.shape[0] + n_groups = np.max(groups) + 1 + h_vecs = np.empty((n_groups, n)) + for group in range(n_groups): + e = np.equal(group, groups) + h_vecs[group] = h * e + h_vecs = h_vecs.T + + f_new = fun(t, y[:, None] + h_vecs) + df = f_new - f[:, None] + + i, j, _ = find(structure) + diff = coo_matrix((df[i, groups[j]], (i, j)), shape=(n, n)).tocsc() + max_ind = np.array(abs(diff).argmax(axis=0)).ravel() + r = np.arange(n) + max_diff = np.asarray(np.abs(diff[max_ind, r])).ravel() + scale = np.maximum(np.abs(f[max_ind]), + np.abs(f_new[max_ind, groups[r]])) + + diff_too_small = max_diff < NUM_JAC_DIFF_REJECT * scale + if np.any(diff_too_small): + ind, = np.nonzero(diff_too_small) + new_factor = NUM_JAC_FACTOR_INCREASE * factor[ind] + h_new = (y[ind] + new_factor * y_scale[ind]) - y[ind] + h_new_all = np.zeros(n) + h_new_all[ind] = h_new + + groups_unique = np.unique(groups[ind]) + groups_map = np.empty(n_groups, dtype=int) + h_vecs = np.empty((groups_unique.shape[0], n)) + for k, group in enumerate(groups_unique): + e = np.equal(group, groups) + h_vecs[k] = h_new_all * e + groups_map[group] = k + h_vecs = h_vecs.T + + f_new = fun(t, y[:, None] + h_vecs) + df = f_new - f[:, None] + i, j, _ = find(structure[:, ind]) + diff_new = coo_matrix((df[i, groups_map[groups[ind[j]]]], + (i, j)), shape=(n, ind.shape[0])).tocsc() + + max_ind_new = np.array(abs(diff_new).argmax(axis=0)).ravel() + r = np.arange(ind.shape[0]) + max_diff_new = np.asarray(np.abs(diff_new[max_ind_new, r])).ravel() + scale_new = np.maximum( + np.abs(f[max_ind_new]), + np.abs(f_new[max_ind_new, groups_map[groups[ind]]])) + + update = max_diff[ind] * scale_new < max_diff_new * scale[ind] + if np.any(update): + update, = np.nonzero(update) + update_ind = ind[update] + factor[update_ind] = new_factor[update] + h[update_ind] = h_new[update] + diff[:, update_ind] = diff_new[:, update] + scale[update_ind] = scale_new[update] + max_diff[update_ind] = max_diff_new[update] + + diff.data /= np.repeat(h, np.diff(diff.indptr)) + + factor[max_diff < NUM_JAC_DIFF_SMALL * scale] *= NUM_JAC_FACTOR_INCREASE + factor[max_diff > NUM_JAC_DIFF_BIG * scale] *= NUM_JAC_FACTOR_DECREASE + factor = np.maximum(factor, NUM_JAC_MIN_FACTOR) + + return diff, factor diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/common.pyc b/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/common.pyc new file mode 100644 index 0000000..9d51096 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/common.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/ivp.py b/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/ivp.py new file mode 100644 index 0000000..755a9f6 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/ivp.py @@ -0,0 +1,566 @@ +from __future__ import division, print_function, absolute_import +import inspect +import numpy as np +from .bdf import BDF +from .radau import Radau +from .rk import RK23, RK45 +from .lsoda import LSODA +from scipy.optimize import OptimizeResult +from .common import EPS, OdeSolution +from .base import OdeSolver + + +METHODS = {'RK23': RK23, + 'RK45': RK45, + 'Radau': Radau, + 'BDF': BDF, + 'LSODA': LSODA} + + +MESSAGES = {0: "The solver successfully reached the end of the integration interval.", + 1: "A termination event occurred."} + + +class OdeResult(OptimizeResult): + pass + + +def prepare_events(events): + """Standardize event functions and extract is_terminal and direction.""" + if callable(events): + events = (events,) + + if events is not None: + is_terminal = np.empty(len(events), dtype=bool) + direction = np.empty(len(events)) + for i, event in enumerate(events): + try: + is_terminal[i] = event.terminal + except AttributeError: + is_terminal[i] = False + + try: + direction[i] = event.direction + except AttributeError: + direction[i] = 0 + else: + is_terminal = None + direction = None + + return events, is_terminal, direction + + +def solve_event_equation(event, sol, t_old, t): + """Solve an equation corresponding to an ODE event. + + The equation is ``event(t, y(t)) = 0``, here ``y(t)`` is known from an + ODE solver using some sort of interpolation. It is solved by + `scipy.optimize.brentq` with xtol=atol=4*EPS. + + Parameters + ---------- + event : callable + Function ``event(t, y)``. + sol : callable + Function ``sol(t)`` which evaluates an ODE solution between `t_old` + and `t`. + t_old, t : float + Previous and new values of time. They will be used as a bracketing + interval. + + Returns + ------- + root : float + Found solution. + """ + from scipy.optimize import brentq + return brentq(lambda t: event(t, sol(t)), t_old, t, + xtol=4 * EPS, rtol=4 * EPS) + + +def handle_events(sol, events, active_events, is_terminal, t_old, t): + """Helper function to handle events. + + Parameters + ---------- + sol : DenseOutput + Function ``sol(t)`` which evaluates an ODE solution between `t_old` + and `t`. + events : list of callables, length n_events + Event functions with signatures ``event(t, y)``. + active_events : ndarray + Indices of events which occurred. + is_terminal : ndarray, shape (n_events,) + Which events are terminal. + t_old, t : float + Previous and new values of time. + + Returns + ------- + root_indices : ndarray + Indices of events which take zero between `t_old` and `t` and before + a possible termination. + roots : ndarray + Values of t at which events occurred. + terminate : bool + Whether a terminal event occurred. + """ + roots = [] + for event_index in active_events: + roots.append(solve_event_equation(events[event_index], sol, t_old, t)) + + roots = np.asarray(roots) + + if np.any(is_terminal[active_events]): + if t > t_old: + order = np.argsort(roots) + else: + order = np.argsort(-roots) + active_events = active_events[order] + roots = roots[order] + t = np.nonzero(is_terminal[active_events])[0][0] + active_events = active_events[:t + 1] + roots = roots[:t + 1] + terminate = True + else: + terminate = False + + return active_events, roots, terminate + + +def find_active_events(g, g_new, direction): + """Find which event occurred during an integration step. + + Parameters + ---------- + g, g_new : array_like, shape (n_events,) + Values of event functions at a current and next points. + direction : ndarray, shape (n_events,) + Event "direction" according to the definition in `solve_ivp`. + + Returns + ------- + active_events : ndarray + Indices of events which occurred during the step. + """ + g, g_new = np.asarray(g), np.asarray(g_new) + up = (g <= 0) & (g_new >= 0) + down = (g >= 0) & (g_new <= 0) + either = up | down + mask = (up & (direction > 0) | + down & (direction < 0) | + either & (direction == 0)) + + return np.nonzero(mask)[0] + + +def solve_ivp(fun, t_span, y0, method='RK45', t_eval=None, dense_output=False, + events=None, vectorized=False, **options): + """Solve an initial value problem for a system of ODEs. + + This function numerically integrates a system of ordinary differential + equations given an initial value:: + + dy / dt = f(t, y) + y(t0) = y0 + + Here t is a one-dimensional independent variable (time), y(t) is an + n-dimensional vector-valued function (state), and an n-dimensional + vector-valued function f(t, y) determines the differential equations. + The goal is to find y(t) approximately satisfying the differential + equations, given an initial value y(t0)=y0. + + Some of the solvers support integration in the complex domain, but note that + for stiff ODE solvers, the right-hand side must be complex-differentiable + (satisfy Cauchy-Riemann equations [11]_). To solve a problem in the complex + domain, pass y0 with a complex data type. Another option is always to + rewrite your problem for real and imaginary parts separately. + + Parameters + ---------- + fun : callable + Right-hand side of the system. The calling signature is ``fun(t, y)``. + Here ``t`` is a scalar, and there are two options for the ndarray ``y``: + It can either have shape (n,); then ``fun`` must return array_like with + shape (n,). Alternatively it can have shape (n, k); then ``fun`` + must return an array_like with shape (n, k), i.e. each column + corresponds to a single column in ``y``. The choice between the two + options is determined by `vectorized` argument (see below). The + vectorized implementation allows a faster approximation of the Jacobian + by finite differences (required for stiff solvers). + t_span : 2-tuple of floats + Interval of integration (t0, tf). The solver starts with t=t0 and + integrates until it reaches t=tf. + y0 : array_like, shape (n,) + Initial state. For problems in the complex domain, pass `y0` with a + complex data type (even if the initial guess is purely real). + method : string or `OdeSolver`, optional + Integration method to use: + + * 'RK45' (default): Explicit Runge-Kutta method of order 5(4) [1]_. + The error is controlled assuming accuracy of the fourth-order + method, but steps are taken using the fifth-order accurate formula + (local extrapolation is done). A quartic interpolation polynomial + is used for the dense output [2]_. Can be applied in the complex domain. + * 'RK23': Explicit Runge-Kutta method of order 3(2) [3]_. The error + is controlled assuming accuracy of the second-order method, but + steps are taken using the third-order accurate formula (local + extrapolation is done). A cubic Hermite polynomial is used for the + dense output. Can be applied in the complex domain. + * 'Radau': Implicit Runge-Kutta method of the Radau IIA family of + order 5 [4]_. The error is controlled with a third-order accurate + embedded formula. A cubic polynomial which satisfies the + collocation conditions is used for the dense output. + * 'BDF': Implicit multi-step variable-order (1 to 5) method based + on a backward differentiation formula for the derivative + approximation [5]_. The implementation follows the one described + in [6]_. A quasi-constant step scheme is used and accuracy is + enhanced using the NDF modification. Can be applied in the complex + domain. + * 'LSODA': Adams/BDF method with automatic stiffness detection and + switching [7]_, [8]_. This is a wrapper of the Fortran solver + from ODEPACK. + + You should use the 'RK45' or 'RK23' method for non-stiff problems and + 'Radau' or 'BDF' for stiff problems [9]_. If not sure, first try to run + 'RK45'. If needs unusually many iterations, diverges, or fails, your + problem is likely to be stiff and you should use 'Radau' or 'BDF'. + 'LSODA' can also be a good universal choice, but it might be somewhat + less convenient to work with as it wraps old Fortran code. + + You can also pass an arbitrary class derived from `OdeSolver` which + implements the solver. + dense_output : bool, optional + Whether to compute a continuous solution. Default is False. + t_eval : array_like or None, optional + Times at which to store the computed solution, must be sorted and lie + within `t_span`. If None (default), use points selected by the solver. + events : callable, list of callables or None, optional + Types of events to track. Each is defined by a continuous function of + time and state that becomes zero value in case of an event. Each function + must have the signature ``event(t, y)`` and return a float. The solver will + find an accurate value of ``t`` at which ``event(t, y(t)) = 0`` using a + root-finding algorithm. Additionally each ``event`` function might have + the following attributes: + + * terminal: bool, whether to terminate integration if this + event occurs. Implicitly False if not assigned. + * direction: float, direction of a zero crossing. If `direction` + is positive, `event` must go from negative to positive, and + vice versa if `direction` is negative. If 0, then either direction + will count. Implicitly 0 if not assigned. + + You can assign attributes like ``event.terminal = True`` to any + function in Python. If None (default), events won't be tracked. + vectorized : bool, optional + Whether `fun` is implemented in a vectorized fashion. Default is False. + options + Options passed to a chosen solver. All options available for already + implemented solvers are listed below. + first_step : float or None, optional + Initial step size. Default is ``None`` which means that the algorithm + should choose. + max_step : float, optional + Maximum allowed step size. Default is np.inf, i.e. the step size is not + bounded and determined solely by the solver. + rtol, atol : float and array_like, optional + Relative and absolute tolerances. The solver keeps the local error + estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a + relative accuracy (number of correct digits). But if a component of `y` + is approximately below `atol`, the error only needs to fall within + the same `atol` threshold, and the number of correct digits is not + guaranteed. If components of y have different scales, it might be + beneficial to set different `atol` values for different components by + passing array_like with shape (n,) for `atol`. Default values are + 1e-3 for `rtol` and 1e-6 for `atol`. + jac : {None, array_like, sparse_matrix, callable}, optional + Jacobian matrix of the right-hand side of the system with respect to + y, required by the 'Radau', 'BDF' and 'LSODA' method. The Jacobian matrix + has shape (n, n) and its element (i, j) is equal to ``d f_i / d y_j``. + There are three ways to define the Jacobian: + + * If array_like or sparse_matrix, the Jacobian is assumed to + be constant. Not supported by 'LSODA'. + * If callable, the Jacobian is assumed to depend on both + t and y; it will be called as ``jac(t, y)`` as necessary. + For the 'Radau' and 'BDF' methods, the return value might be a + sparse matrix. + * If None (default), the Jacobian will be approximated by + finite differences. + + It is generally recommended to provide the Jacobian rather than + relying on a finite-difference approximation. + jac_sparsity : {None, array_like, sparse matrix}, optional + Defines a sparsity structure of the Jacobian matrix for a + finite-difference approximation. Its shape must be (n, n). This argument + is ignored if `jac` is not `None`. If the Jacobian has only few non-zero + elements in *each* row, providing the sparsity structure will greatly + speed up the computations [10]_. A zero entry means that a corresponding + element in the Jacobian is always zero. If None (default), the Jacobian + is assumed to be dense. + Not supported by 'LSODA', see `lband` and `uband` instead. + lband, uband : int or None + Parameters defining the bandwidth of the Jacobian for the 'LSODA' method, + i.e., ``jac[i, j] != 0 only for i - lband <= j <= i + uband``. Setting + these requires your jac routine to return the Jacobian in the packed format: + the returned array must have ``n`` columns and ``uband + lband + 1`` + rows in which Jacobian diagonals are written. Specifically + ``jac_packed[uband + i - j , j] = jac[i, j]``. The same format is used + in `scipy.linalg.solve_banded` (check for an illustration). + These parameters can be also used with ``jac=None`` to reduce the + number of Jacobian elements estimated by finite differences. + min_step : float, optional + The minimum allowed step size for 'LSODA' method. + By default `min_step` is zero. + + Returns + ------- + Bunch object with the following fields defined: + t : ndarray, shape (n_points,) + Time points. + y : ndarray, shape (n, n_points) + Values of the solution at `t`. + sol : `OdeSolution` or None + Found solution as `OdeSolution` instance; None if `dense_output` was + set to False. + t_events : list of ndarray or None + Contains for each event type a list of arrays at which an event of + that type event was detected. None if `events` was None. + nfev : int + Number of evaluations of the right-hand side. + njev : int + Number of evaluations of the Jacobian. + nlu : int + Number of LU decompositions. + status : int + Reason for algorithm termination: + + * -1: Integration step failed. + * 0: The solver successfully reached the end of `tspan`. + * 1: A termination event occurred. + + message : string + Human-readable description of the termination reason. + success : bool + True if the solver reached the interval end or a termination event + occurred (``status >= 0``). + + References + ---------- + .. [1] J. R. Dormand, P. J. Prince, "A family of embedded Runge-Kutta + formulae", Journal of Computational and Applied Mathematics, Vol. 6, + No. 1, pp. 19-26, 1980. + .. [2] L. W. Shampine, "Some Practical Runge-Kutta Formulas", Mathematics + of Computation,, Vol. 46, No. 173, pp. 135-150, 1986. + .. [3] P. Bogacki, L.F. Shampine, "A 3(2) Pair of Runge-Kutta Formulas", + Appl. Math. Lett. Vol. 2, No. 4. pp. 321-325, 1989. + .. [4] E. Hairer, G. Wanner, "Solving Ordinary Differential Equations II: + Stiff and Differential-Algebraic Problems", Sec. IV.8. + .. [5] `Backward Differentiation Formula + <https://en.wikipedia.org/wiki/Backward_differentiation_formula>`_ + on Wikipedia. + .. [6] L. F. Shampine, M. W. Reichelt, "THE MATLAB ODE SUITE", SIAM J. SCI. + COMPUTE., Vol. 18, No. 1, pp. 1-22, January 1997. + .. [7] A. C. Hindmarsh, "ODEPACK, A Systematized Collection of ODE + Solvers," IMACS Transactions on Scientific Computation, Vol 1., + pp. 55-64, 1983. + .. [8] L. Petzold, "Automatic selection of methods for solving stiff and + nonstiff systems of ordinary differential equations", SIAM Journal + on Scientific and Statistical Computing, Vol. 4, No. 1, pp. 136-148, + 1983. + .. [9] `Stiff equation <https://en.wikipedia.org/wiki/Stiff_equation>`_ on + Wikipedia. + .. [10] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of + sparse Jacobian matrices", Journal of the Institute of Mathematics + and its Applications, 13, pp. 117-120, 1974. + .. [11] `Cauchy-Riemann equations + <https://en.wikipedia.org/wiki/Cauchy-Riemann_equations>`_ on + Wikipedia. + + Examples + -------- + Basic exponential decay showing automatically chosen time points. + + >>> from scipy.integrate import solve_ivp + >>> def exponential_decay(t, y): return -0.5 * y + >>> sol = solve_ivp(exponential_decay, [0, 10], [2, 4, 8]) + >>> print(sol.t) + [ 0. 0.11487653 1.26364188 3.06061781 4.85759374 + 6.65456967 8.4515456 10. ] + >>> print(sol.y) + [[2. 1.88836035 1.06327177 0.43319312 0.17648948 0.0719045 + 0.02929499 0.01350938] + [4. 3.7767207 2.12654355 0.86638624 0.35297895 0.143809 + 0.05858998 0.02701876] + [8. 7.5534414 4.25308709 1.73277247 0.7059579 0.287618 + 0.11717996 0.05403753]] + + Specifying points where the solution is desired. + + >>> sol = solve_ivp(exponential_decay, [0, 10], [2, 4, 8], + ... t_eval=[0, 1, 2, 4, 10]) + >>> print(sol.t) + [ 0 1 2 4 10] + >>> print(sol.y) + [[2. 1.21305369 0.73534021 0.27066736 0.01350938] + [4. 2.42610739 1.47068043 0.54133472 0.02701876] + [8. 4.85221478 2.94136085 1.08266944 0.05403753]] + + Cannon fired upward with terminal event upon impact. The ``terminal`` and + ``direction`` fields of an event are applied by monkey patching a function. + Here ``y[0]`` is position and ``y[1]`` is velocity. The projectile starts at + position 0 with velocity +10. Note that the integration never reaches t=100 + because the event is terminal. + + >>> def upward_cannon(t, y): return [y[1], -0.5] + >>> def hit_ground(t, y): return y[1] + >>> hit_ground.terminal = True + >>> hit_ground.direction = -1 + >>> sol = solve_ivp(upward_cannon, [0, 100], [0, 10], events=hit_ground) + >>> print(sol.t_events) + [array([ 20.])] + >>> print(sol.t) + [0.00000000e+00 9.99900010e-05 1.09989001e-03 1.10988901e-02 + 1.11088891e-01 1.11098890e+00 1.11099890e+01 2.00000000e+01] + """ + if method not in METHODS and not ( + inspect.isclass(method) and issubclass(method, OdeSolver)): + raise ValueError("`method` must be one of {} or OdeSolver class." + .format(METHODS)) + + t0, tf = float(t_span[0]), float(t_span[1]) + + if t_eval is not None: + t_eval = np.asarray(t_eval) + if t_eval.ndim != 1: + raise ValueError("`t_eval` must be 1-dimensional.") + + if np.any(t_eval < min(t0, tf)) or np.any(t_eval > max(t0, tf)): + raise ValueError("Values in `t_eval` are not within `t_span`.") + + d = np.diff(t_eval) + if tf > t0 and np.any(d <= 0) or tf < t0 and np.any(d >= 0): + raise ValueError("Values in `t_eval` are not properly sorted.") + + if tf > t0: + t_eval_i = 0 + else: + # Make order of t_eval decreasing to use np.searchsorted. + t_eval = t_eval[::-1] + # This will be an upper bound for slices. + t_eval_i = t_eval.shape[0] + + if method in METHODS: + method = METHODS[method] + + solver = method(fun, t0, y0, tf, vectorized=vectorized, **options) + + if t_eval is None: + ts = [t0] + ys = [y0] + elif t_eval is not None and dense_output: + ts = [] + ti = [t0] + ys = [] + else: + ts = [] + ys = [] + + interpolants = [] + + events, is_terminal, event_dir = prepare_events(events) + + if events is not None: + g = [event(t0, y0) for event in events] + t_events = [[] for _ in range(len(events))] + else: + t_events = None + + status = None + while status is None: + message = solver.step() + + if solver.status == 'finished': + status = 0 + elif solver.status == 'failed': + status = -1 + break + + t_old = solver.t_old + t = solver.t + y = solver.y + + if dense_output: + sol = solver.dense_output() + interpolants.append(sol) + else: + sol = None + + if events is not None: + g_new = [event(t, y) for event in events] + active_events = find_active_events(g, g_new, event_dir) + if active_events.size > 0: + if sol is None: + sol = solver.dense_output() + + root_indices, roots, terminate = handle_events( + sol, events, active_events, is_terminal, t_old, t) + + for e, te in zip(root_indices, roots): + t_events[e].append(te) + + if terminate: + status = 1 + t = roots[-1] + y = sol(t) + + g = g_new + + if t_eval is None: + ts.append(t) + ys.append(y) + else: + # The value in t_eval equal to t will be included. + if solver.direction > 0: + t_eval_i_new = np.searchsorted(t_eval, t, side='right') + t_eval_step = t_eval[t_eval_i:t_eval_i_new] + else: + t_eval_i_new = np.searchsorted(t_eval, t, side='left') + # It has to be done with two slice operations, because + # you can't slice to 0-th element inclusive using backward + # slicing. + t_eval_step = t_eval[t_eval_i_new:t_eval_i][::-1] + + if t_eval_step.size > 0: + if sol is None: + sol = solver.dense_output() + ts.append(t_eval_step) + ys.append(sol(t_eval_step)) + t_eval_i = t_eval_i_new + + if t_eval is not None and dense_output: + ti.append(t) + + message = MESSAGES.get(status, message) + + if t_events is not None: + t_events = [np.asarray(te) for te in t_events] + + if t_eval is None: + ts = np.array(ts) + ys = np.vstack(ys).T + else: + ts = np.hstack(ts) + ys = np.hstack(ys) + + if dense_output: + if t_eval is None: + sol = OdeSolution(ts, interpolants) + else: + sol = OdeSolution(ti, interpolants) + else: + sol = None + + return OdeResult(t=ts, y=ys, sol=sol, t_events=t_events, nfev=solver.nfev, + njev=solver.njev, nlu=solver.nlu, status=status, + message=message, success=status >= 0) diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/ivp.pyc b/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/ivp.pyc new file mode 100644 index 0000000..3967f75 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/ivp.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/lsoda.py b/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/lsoda.py new file mode 100644 index 0000000..ab37af3 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/lsoda.py @@ -0,0 +1,192 @@ +import numpy as np +from scipy.integrate import ode +from .common import validate_tol, validate_first_step, warn_extraneous +from .base import OdeSolver, DenseOutput + + +class LSODA(OdeSolver): + """Adams/BDF method with automatic stiffness detection and switching. + + This is a wrapper to the Fortran solver from ODEPACK [1]_. It switches + automatically between the nonstiff Adams method and the stiff BDF method. + The method was originally detailed in [2]_. + + Parameters + ---------- + fun : callable + Right-hand side of the system. The calling signature is ``fun(t, y)``. + Here ``t`` is a scalar, and there are two options for the ndarray ``y``: + It can either have shape (n,); then ``fun`` must return array_like with + shape (n,). Alternatively it can have shape (n, k); then ``fun`` + must return an array_like with shape (n, k), i.e. each column + corresponds to a single column in ``y``. The choice between the two + options is determined by `vectorized` argument (see below). The + vectorized implementation allows a faster approximation of the Jacobian + by finite differences (required for this solver). + t0 : float + Initial time. + y0 : array_like, shape (n,) + Initial state. + t_bound : float + Boundary time - the integration won't continue beyond it. It also + determines the direction of the integration. + first_step : float or None, optional + Initial step size. Default is ``None`` which means that the algorithm + should choose. + min_step : float, optional + Minimum allowed step size. Default is 0.0, i.e. the step size is not + bounded and determined solely by the solver. + max_step : float, optional + Maximum allowed step size. Default is np.inf, i.e. the step size is not + bounded and determined solely by the solver. + rtol, atol : float and array_like, optional + Relative and absolute tolerances. The solver keeps the local error + estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a + relative accuracy (number of correct digits). But if a component of `y` + is approximately below `atol`, the error only needs to fall within + the same `atol` threshold, and the number of correct digits is not + guaranteed. If components of y have different scales, it might be + beneficial to set different `atol` values for different components by + passing array_like with shape (n,) for `atol`. Default values are + 1e-3 for `rtol` and 1e-6 for `atol`. + jac : None or callable, optional + Jacobian matrix of the right-hand side of the system with respect to + ``y``. The Jacobian matrix has shape (n, n) and its element (i, j) is + equal to ``d f_i / d y_j``. The function will be called as + ``jac(t, y)``. If None (default), the Jacobian will be + approximated by finite differences. It is generally recommended to + provide the Jacobian rather than relying on a finite-difference + approximation. + lband, uband : int or None + Parameters defining the bandwidth of the Jacobian, + i.e., ``jac[i, j] != 0 only for i - lband <= j <= i + uband``. Setting + these requires your jac routine to return the Jacobian in the packed format: + the returned array must have ``n`` columns and ``uband + lband + 1`` + rows in which Jacobian diagonals are written. Specifically + ``jac_packed[uband + i - j , j] = jac[i, j]``. The same format is used + in `scipy.linalg.solve_banded` (check for an illustration). + These parameters can be also used with ``jac=None`` to reduce the + number of Jacobian elements estimated by finite differences. + vectorized : bool, optional + Whether `fun` is implemented in a vectorized fashion. A vectorized + implementation offers no advantages for this solver. Default is False. + + Attributes + ---------- + n : int + Number of equations. + status : string + Current status of the solver: 'running', 'finished' or 'failed'. + t_bound : float + Boundary time. + direction : float + Integration direction: +1 or -1. + t : float + Current time. + y : ndarray + Current state. + t_old : float + Previous time. None if no steps were made yet. + nfev : int + Number of evaluations of the right-hand side. + njev : int + Number of evaluations of the Jacobian. + + References + ---------- + .. [1] A. C. Hindmarsh, "ODEPACK, A Systematized Collection of ODE + Solvers," IMACS Transactions on Scientific Computation, Vol 1., + pp. 55-64, 1983. + .. [2] L. Petzold, "Automatic selection of methods for solving stiff and + nonstiff systems of ordinary differential equations", SIAM Journal + on Scientific and Statistical Computing, Vol. 4, No. 1, pp. 136-148, + 1983. + """ + def __init__(self, fun, t0, y0, t_bound, first_step=None, min_step=0.0, + max_step=np.inf, rtol=1e-3, atol=1e-6, jac=None, lband=None, + uband=None, vectorized=False, **extraneous): + warn_extraneous(extraneous) + super(LSODA, self).__init__(fun, t0, y0, t_bound, vectorized) + + if first_step is None: + first_step = 0 # LSODA value for automatic selection. + else: + first_step = validate_first_step(first_step, t0, t_bound) + + first_step *= self.direction + + if max_step == np.inf: + max_step = 0 # LSODA value for infinity. + elif max_step <= 0: + raise ValueError("`max_step` must be positive.") + + if min_step < 0: + raise ValueError("`min_step` must be nonnegative.") + + rtol, atol = validate_tol(rtol, atol, self.n) + + if jac is None: # No lambda as PEP8 insists. + def jac(): + return None + + solver = ode(self.fun, jac) + solver.set_integrator('lsoda', rtol=rtol, atol=atol, max_step=max_step, + min_step=min_step, first_step=first_step, + lband=lband, uband=uband) + solver.set_initial_value(y0, t0) + + # Inject t_bound into rwork array as needed for itask=5. + solver._integrator.rwork[0] = self.t_bound + solver._integrator.call_args[4] = solver._integrator.rwork + + self._lsoda_solver = solver + + def _step_impl(self): + solver = self._lsoda_solver + integrator = solver._integrator + + # From lsoda.step and lsoda.integrate itask=5 means take a single + # step and do not go past t_bound. + itask = integrator.call_args[2] + integrator.call_args[2] = 5 + solver._y, solver.t = integrator.run( + solver.f, solver.jac, solver._y, solver.t, + self.t_bound, solver.f_params, solver.jac_params) + integrator.call_args[2] = itask + + if solver.successful(): + self.t = solver.t + self.y = solver._y + # From LSODA Fortran source njev is equal to nlu. + self.njev = integrator.iwork[12] + self.nlu = integrator.iwork[12] + return True, None + else: + return False, 'Unexpected istate in LSODA.' + + def _dense_output_impl(self): + iwork = self._lsoda_solver._integrator.iwork + rwork = self._lsoda_solver._integrator.rwork + + order = iwork[14] + h = rwork[11] + yh = np.reshape(rwork[20:20 + (order + 1) * self.n], + (self.n, order + 1), order='F').copy() + + return LsodaDenseOutput(self.t_old, self.t, h, order, yh) + + +class LsodaDenseOutput(DenseOutput): + def __init__(self, t_old, t, h, order, yh): + super(LsodaDenseOutput, self).__init__(t_old, t) + self.h = h + self.yh = yh + self.p = np.arange(order + 1) + + def _call_impl(self, t): + if t.ndim == 0: + x = ((t - self.t) / self.h) ** self.p + else: + x = ((t - self.t) / self.h) ** self.p[:, None] + + return np.dot(self.yh, x) diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/lsoda.pyc b/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/lsoda.pyc new file mode 100644 index 0000000..d0625ea Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/lsoda.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/radau.py b/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/radau.py new file mode 100644 index 0000000..5bdad6a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/radau.py @@ -0,0 +1,563 @@ +from __future__ import division, print_function, absolute_import +import numpy as np +from scipy.linalg import lu_factor, lu_solve +from scipy.sparse import csc_matrix, issparse, eye +from scipy.sparse.linalg import splu +from scipy.optimize._numdiff import group_columns +from .common import (validate_max_step, validate_tol, select_initial_step, + norm, num_jac, EPS, warn_extraneous, + validate_first_step) +from .base import OdeSolver, DenseOutput + +S6 = 6 ** 0.5 + +# Butcher tableau. A is not used directly, see below. +C = np.array([(4 - S6) / 10, (4 + S6) / 10, 1]) +E = np.array([-13 - 7 * S6, -13 + 7 * S6, -1]) / 3 + +# Eigendecomposition of A is done: A = T L T**-1. There is 1 real eigenvalue +# and a complex conjugate pair. They are written below. +MU_REAL = 3 + 3 ** (2 / 3) - 3 ** (1 / 3) +MU_COMPLEX = (3 + 0.5 * (3 ** (1 / 3) - 3 ** (2 / 3)) + - 0.5j * (3 ** (5 / 6) + 3 ** (7 / 6))) + +# These are transformation matrices. +T = np.array([ + [0.09443876248897524, -0.14125529502095421, 0.03002919410514742], + [0.25021312296533332, 0.20412935229379994, -0.38294211275726192], + [1, 1, 0]]) +TI = np.array([ + [4.17871859155190428, 0.32768282076106237, 0.52337644549944951], + [-4.17871859155190428, -0.32768282076106237, 0.47662355450055044], + [0.50287263494578682, -2.57192694985560522, 0.59603920482822492]]) +# These linear combinations are used in the algorithm. +TI_REAL = TI[0] +TI_COMPLEX = TI[1] + 1j * TI[2] + +# Interpolator coefficients. +P = np.array([ + [13/3 + 7*S6/3, -23/3 - 22*S6/3, 10/3 + 5 * S6], + [13/3 - 7*S6/3, -23/3 + 22*S6/3, 10/3 - 5 * S6], + [1/3, -8/3, 10/3]]) + + +NEWTON_MAXITER = 6 # Maximum number of Newton iterations. +MIN_FACTOR = 0.2 # Minimum allowed decrease in a step size. +MAX_FACTOR = 10 # Maximum allowed increase in a step size. + + +def solve_collocation_system(fun, t, y, h, Z0, scale, tol, + LU_real, LU_complex, solve_lu): + """Solve the collocation system. + + Parameters + ---------- + fun : callable + Right-hand side of the system. + t : float + Current time. + y : ndarray, shape (n,) + Current state. + h : float + Step to try. + Z0 : ndarray, shape (3, n) + Initial guess for the solution. It determines new values of `y` at + ``t + h * C`` as ``y + Z0``, where ``C`` is the Radau method constants. + scale : float + Problem tolerance scale, i.e. ``rtol * abs(y) + atol``. + tol : float + Tolerance to which solve the system. This value is compared with + the normalized by `scale` error. + LU_real, LU_complex + LU decompositions of the system Jacobians. + solve_lu : callable + Callable which solves a linear system given a LU decomposition. The + signature is ``solve_lu(LU, b)``. + + Returns + ------- + converged : bool + Whether iterations converged. + n_iter : int + Number of completed iterations. + Z : ndarray, shape (3, n) + Found solution. + rate : float + The rate of convergence. + """ + n = y.shape[0] + M_real = MU_REAL / h + M_complex = MU_COMPLEX / h + + W = TI.dot(Z0) + Z = Z0 + + F = np.empty((3, n)) + ch = h * C + + dW_norm_old = None + dW = np.empty_like(W) + converged = False + for k in range(NEWTON_MAXITER): + for i in range(3): + F[i] = fun(t + ch[i], y + Z[i]) + + if not np.all(np.isfinite(F)): + break + + f_real = F.T.dot(TI_REAL) - M_real * W[0] + f_complex = F.T.dot(TI_COMPLEX) - M_complex * (W[1] + 1j * W[2]) + + dW_real = solve_lu(LU_real, f_real) + dW_complex = solve_lu(LU_complex, f_complex) + + dW[0] = dW_real + dW[1] = dW_complex.real + dW[2] = dW_complex.imag + + dW_norm = norm(dW / scale) + if dW_norm_old is not None: + rate = dW_norm / dW_norm_old + else: + rate = None + + if (rate is not None and (rate >= 1 or + rate ** (NEWTON_MAXITER - k) / (1 - rate) * dW_norm > tol)): + break + + W += dW + Z = T.dot(W) + + if (dW_norm == 0 or + rate is not None and rate / (1 - rate) * dW_norm < tol): + converged = True + break + + dW_norm_old = dW_norm + + return converged, k + 1, Z, rate + + +def predict_factor(h_abs, h_abs_old, error_norm, error_norm_old): + """Predict by which factor to increase/decrease the step size. + + The algorithm is described in [1]_. + + Parameters + ---------- + h_abs, h_abs_old : float + Current and previous values of the step size, `h_abs_old` can be None + (see Notes). + error_norm, error_norm_old : float + Current and previous values of the error norm, `error_norm_old` can + be None (see Notes). + + Returns + ------- + factor : float + Predicted factor. + + Notes + ----- + If `h_abs_old` and `error_norm_old` are both not None then a two-step + algorithm is used, otherwise a one-step algorithm is used. + + References + ---------- + .. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential + Equations II: Stiff and Differential-Algebraic Problems", Sec. IV.8. + """ + if error_norm_old is None or h_abs_old is None or error_norm == 0: + multiplier = 1 + else: + multiplier = h_abs / h_abs_old * (error_norm_old / error_norm) ** 0.25 + + with np.errstate(divide='ignore'): + factor = min(1, multiplier) * error_norm ** -0.25 + + return factor + + +class Radau(OdeSolver): + """Implicit Runge-Kutta method of Radau IIA family of order 5. + + The implementation follows [1]_. The error is controlled with a + third-order accurate embedded formula. A cubic polynomial which satisfies + the collocation conditions is used for the dense output. + + Parameters + ---------- + fun : callable + Right-hand side of the system. The calling signature is ``fun(t, y)``. + Here ``t`` is a scalar, and there are two options for the ndarray ``y``: + It can either have shape (n,); then ``fun`` must return array_like with + shape (n,). Alternatively it can have shape (n, k); then ``fun`` + must return an array_like with shape (n, k), i.e. each column + corresponds to a single column in ``y``. The choice between the two + options is determined by `vectorized` argument (see below). The + vectorized implementation allows a faster approximation of the Jacobian + by finite differences (required for this solver). + t0 : float + Initial time. + y0 : array_like, shape (n,) + Initial state. + t_bound : float + Boundary time - the integration won't continue beyond it. It also + determines the direction of the integration. + first_step : float or None, optional + Initial step size. Default is ``None`` which means that the algorithm + should choose. + max_step : float, optional + Maximum allowed step size. Default is np.inf, i.e. the step size is not + bounded and determined solely by the solver. + rtol, atol : float and array_like, optional + Relative and absolute tolerances. The solver keeps the local error + estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a + relative accuracy (number of correct digits). But if a component of `y` + is approximately below `atol`, the error only needs to fall within + the same `atol` threshold, and the number of correct digits is not + guaranteed. If components of y have different scales, it might be + beneficial to set different `atol` values for different components by + passing array_like with shape (n,) for `atol`. Default values are + 1e-3 for `rtol` and 1e-6 for `atol`. + jac : {None, array_like, sparse_matrix, callable}, optional + Jacobian matrix of the right-hand side of the system with respect to + y, required by this method. The Jacobian matrix has shape (n, n) and + its element (i, j) is equal to ``d f_i / d y_j``. + There are three ways to define the Jacobian: + + * If array_like or sparse_matrix, the Jacobian is assumed to + be constant. + * If callable, the Jacobian is assumed to depend on both + t and y; it will be called as ``jac(t, y)`` as necessary. + For the 'Radau' and 'BDF' methods, the return value might be a + sparse matrix. + * If None (default), the Jacobian will be approximated by + finite differences. + + It is generally recommended to provide the Jacobian rather than + relying on a finite-difference approximation. + jac_sparsity : {None, array_like, sparse matrix}, optional + Defines a sparsity structure of the Jacobian matrix for a + finite-difference approximation. Its shape must be (n, n). This argument + is ignored if `jac` is not `None`. If the Jacobian has only few non-zero + elements in *each* row, providing the sparsity structure will greatly + speed up the computations [2]_. A zero entry means that a corresponding + element in the Jacobian is always zero. If None (default), the Jacobian + is assumed to be dense. + vectorized : bool, optional + Whether `fun` is implemented in a vectorized fashion. Default is False. + + Attributes + ---------- + n : int + Number of equations. + status : string + Current status of the solver: 'running', 'finished' or 'failed'. + t_bound : float + Boundary time. + direction : float + Integration direction: +1 or -1. + t : float + Current time. + y : ndarray + Current state. + t_old : float + Previous time. None if no steps were made yet. + step_size : float + Size of the last successful step. None if no steps were made yet. + nfev : int + Number of evaluations of the right-hand side. + njev : int + Number of evaluations of the Jacobian. + nlu : int + Number of LU decompositions. + + References + ---------- + .. [1] E. Hairer, G. Wanner, "Solving Ordinary Differential Equations II: + Stiff and Differential-Algebraic Problems", Sec. IV.8. + .. [2] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of + sparse Jacobian matrices", Journal of the Institute of Mathematics + and its Applications, 13, pp. 117-120, 1974. + """ + def __init__(self, fun, t0, y0, t_bound, max_step=np.inf, + rtol=1e-3, atol=1e-6, jac=None, jac_sparsity=None, + vectorized=False, first_step=None, **extraneous): + warn_extraneous(extraneous) + super(Radau, self).__init__(fun, t0, y0, t_bound, vectorized) + self.y_old = None + self.max_step = validate_max_step(max_step) + self.rtol, self.atol = validate_tol(rtol, atol, self.n) + self.f = self.fun(self.t, self.y) + # Select initial step assuming the same order which is used to control + # the error. + if first_step is None: + self.h_abs = select_initial_step( + self.fun, self.t, self.y, self.f, self.direction, + 3, self.rtol, self.atol) + else: + self.h_abs = validate_first_step(first_step, t0, t_bound) + self.h_abs_old = None + self.error_norm_old = None + + self.newton_tol = max(10 * EPS / rtol, min(0.03, rtol ** 0.5)) + self.sol = None + + self.jac_factor = None + self.jac, self.J = self._validate_jac(jac, jac_sparsity) + if issparse(self.J): + def lu(A): + self.nlu += 1 + return splu(A) + + def solve_lu(LU, b): + return LU.solve(b) + + I = eye(self.n, format='csc') + else: + def lu(A): + self.nlu += 1 + return lu_factor(A, overwrite_a=True) + + def solve_lu(LU, b): + return lu_solve(LU, b, overwrite_b=True) + + I = np.identity(self.n) + + self.lu = lu + self.solve_lu = solve_lu + self.I = I + + self.current_jac = True + self.LU_real = None + self.LU_complex = None + self.Z = None + + def _validate_jac(self, jac, sparsity): + t0 = self.t + y0 = self.y + + if jac is None: + if sparsity is not None: + if issparse(sparsity): + sparsity = csc_matrix(sparsity) + groups = group_columns(sparsity) + sparsity = (sparsity, groups) + + def jac_wrapped(t, y, f): + self.njev += 1 + J, self.jac_factor = num_jac(self.fun_vectorized, t, y, f, + self.atol, self.jac_factor, + sparsity) + return J + J = jac_wrapped(t0, y0, self.f) + elif callable(jac): + J = jac(t0, y0) + self.njev = 1 + if issparse(J): + J = csc_matrix(J) + + def jac_wrapped(t, y, _=None): + self.njev += 1 + return csc_matrix(jac(t, y), dtype=float) + + else: + J = np.asarray(J, dtype=float) + + def jac_wrapped(t, y, _=None): + self.njev += 1 + return np.asarray(jac(t, y), dtype=float) + + if J.shape != (self.n, self.n): + raise ValueError("`jac` is expected to have shape {}, but " + "actually has {}." + .format((self.n, self.n), J.shape)) + else: + if issparse(jac): + J = csc_matrix(jac) + else: + J = np.asarray(jac, dtype=float) + + if J.shape != (self.n, self.n): + raise ValueError("`jac` is expected to have shape {}, but " + "actually has {}." + .format((self.n, self.n), J.shape)) + jac_wrapped = None + + return jac_wrapped, J + + def _step_impl(self): + t = self.t + y = self.y + f = self.f + + max_step = self.max_step + atol = self.atol + rtol = self.rtol + + min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t) + if self.h_abs > max_step: + h_abs = max_step + h_abs_old = None + error_norm_old = None + elif self.h_abs < min_step: + h_abs = min_step + h_abs_old = None + error_norm_old = None + else: + h_abs = self.h_abs + h_abs_old = self.h_abs_old + error_norm_old = self.error_norm_old + + J = self.J + LU_real = self.LU_real + LU_complex = self.LU_complex + + current_jac = self.current_jac + jac = self.jac + + rejected = False + step_accepted = False + message = None + while not step_accepted: + if h_abs < min_step: + return False, self.TOO_SMALL_STEP + + h = h_abs * self.direction + t_new = t + h + + if self.direction * (t_new - self.t_bound) > 0: + t_new = self.t_bound + + h = t_new - t + h_abs = np.abs(h) + + if self.sol is None: + Z0 = np.zeros((3, y.shape[0])) + else: + Z0 = self.sol(t + h * C).T - y + + scale = atol + np.abs(y) * rtol + + converged = False + while not converged: + if LU_real is None or LU_complex is None: + LU_real = self.lu(MU_REAL / h * self.I - J) + LU_complex = self.lu(MU_COMPLEX / h * self.I - J) + + converged, n_iter, Z, rate = solve_collocation_system( + self.fun, t, y, h, Z0, scale, self.newton_tol, + LU_real, LU_complex, self.solve_lu) + + if not converged: + if current_jac: + break + + J = self.jac(t, y, f) + current_jac = True + LU_real = None + LU_complex = None + + if not converged: + h_abs *= 0.5 + LU_real = None + LU_complex = None + continue + + y_new = y + Z[-1] + ZE = Z.T.dot(E) / h + error = self.solve_lu(LU_real, f + ZE) + scale = atol + np.maximum(np.abs(y), np.abs(y_new)) * rtol + error_norm = norm(error / scale) + safety = 0.9 * (2 * NEWTON_MAXITER + 1) / (2 * NEWTON_MAXITER + + n_iter) + + if rejected and error_norm > 1: + error = self.solve_lu(LU_real, self.fun(t, y + error) + ZE) + error_norm = norm(error / scale) + + if error_norm > 1: + factor = predict_factor(h_abs, h_abs_old, + error_norm, error_norm_old) + h_abs *= max(MIN_FACTOR, safety * factor) + + LU_real = None + LU_complex = None + rejected = True + else: + step_accepted = True + + recompute_jac = jac is not None and n_iter > 2 and rate > 1e-3 + + factor = predict_factor(h_abs, h_abs_old, error_norm, error_norm_old) + factor = min(MAX_FACTOR, safety * factor) + + if not recompute_jac and factor < 1.2: + factor = 1 + else: + LU_real = None + LU_complex = None + + f_new = self.fun(t_new, y_new) + if recompute_jac: + J = jac(t_new, y_new, f_new) + current_jac = True + elif jac is not None: + current_jac = False + + self.h_abs_old = self.h_abs + self.error_norm_old = error_norm + + self.h_abs = h_abs * factor + + self.y_old = y + + self.t = t_new + self.y = y_new + self.f = f_new + + self.Z = Z + + self.LU_real = LU_real + self.LU_complex = LU_complex + self.current_jac = current_jac + self.J = J + + self.t_old = t + self.sol = self._compute_dense_output() + + return step_accepted, message + + def _compute_dense_output(self): + Q = np.dot(self.Z.T, P) + return RadauDenseOutput(self.t_old, self.t, self.y_old, Q) + + def _dense_output_impl(self): + return self.sol + + +class RadauDenseOutput(DenseOutput): + def __init__(self, t_old, t, y_old, Q): + super(RadauDenseOutput, self).__init__(t_old, t) + self.h = t - t_old + self.Q = Q + self.order = Q.shape[1] - 1 + self.y_old = y_old + + def _call_impl(self, t): + x = (t - self.t_old) / self.h + if t.ndim == 0: + p = np.tile(x, self.order + 1) + p = np.cumprod(p) + else: + p = np.tile(x, (self.order + 1, 1)) + p = np.cumprod(p, axis=0) + # Here we don't multiply by h, not a mistake. + y = np.dot(self.Q, p) + if y.ndim == 2: + y += self.y_old[:, None] + else: + y += self.y_old + + return y diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/radau.pyc b/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/radau.pyc new file mode 100644 index 0000000..ed83169 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/radau.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/rk.py b/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/rk.py new file mode 100644 index 0000000..30b4110 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/rk.py @@ -0,0 +1,389 @@ +from __future__ import division, print_function, absolute_import +import numpy as np +from .base import OdeSolver, DenseOutput +from .common import (validate_max_step, validate_tol, select_initial_step, + norm, warn_extraneous, validate_first_step) + + +# Multiply steps computed from asymptotic behaviour of errors by this. +SAFETY = 0.9 + +MIN_FACTOR = 0.2 # Minimum allowed decrease in a step size. +MAX_FACTOR = 10 # Maximum allowed increase in a step size. + + +def rk_step(fun, t, y, f, h, A, B, C, E, K): + """Perform a single Runge-Kutta step. + + This function computes a prediction of an explicit Runge-Kutta method and + also estimates the error of a less accurate method. + + Notation for Butcher tableau is as in [1]_. + + Parameters + ---------- + fun : callable + Right-hand side of the system. + t : float + Current time. + y : ndarray, shape (n,) + Current state. + f : ndarray, shape (n,) + Current value of the derivative, i.e. ``fun(x, y)``. + h : float + Step to use. + A : list of ndarray, length n_stages - 1 + Coefficients for combining previous RK stages to compute the next + stage. For explicit methods the coefficients above the main diagonal + are zeros, so `A` is stored as a list of arrays of increasing lengths. + The first stage is always just `f`, thus no coefficients for it + are required. + B : ndarray, shape (n_stages,) + Coefficients for combining RK stages for computing the final + prediction. + C : ndarray, shape (n_stages - 1,) + Coefficients for incrementing time for consecutive RK stages. + The value for the first stage is always zero, thus it is not stored. + E : ndarray, shape (n_stages + 1,) + Coefficients for estimating the error of a less accurate method. They + are computed as the difference between b's in an extended tableau. + K : ndarray, shape (n_stages + 1, n) + Storage array for putting RK stages here. Stages are stored in rows. + + Returns + ------- + y_new : ndarray, shape (n,) + Solution at t + h computed with a higher accuracy. + f_new : ndarray, shape (n,) + Derivative ``fun(t + h, y_new)``. + error : ndarray, shape (n,) + Error estimate of a less accurate method. + + References + ---------- + .. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential + Equations I: Nonstiff Problems", Sec. II.4. + """ + K[0] = f + for s, (a, c) in enumerate(zip(A, C)): + dy = np.dot(K[:s + 1].T, a) * h + K[s + 1] = fun(t + c * h, y + dy) + + y_new = y + h * np.dot(K[:-1].T, B) + f_new = fun(t + h, y_new) + + K[-1] = f_new + error = np.dot(K.T, E) * h + + return y_new, f_new, error + + +class RungeKutta(OdeSolver): + """Base class for explicit Runge-Kutta methods.""" + C = NotImplemented + A = NotImplemented + B = NotImplemented + E = NotImplemented + P = NotImplemented + order = NotImplemented + n_stages = NotImplemented + + def __init__(self, fun, t0, y0, t_bound, max_step=np.inf, + rtol=1e-3, atol=1e-6, vectorized=False, + first_step=None, **extraneous): + warn_extraneous(extraneous) + super(RungeKutta, self).__init__(fun, t0, y0, t_bound, vectorized, + support_complex=True) + self.y_old = None + self.max_step = validate_max_step(max_step) + self.rtol, self.atol = validate_tol(rtol, atol, self.n) + self.f = self.fun(self.t, self.y) + if first_step is None: + self.h_abs = select_initial_step( + self.fun, self.t, self.y, self.f, self.direction, + self.order, self.rtol, self.atol) + else: + self.h_abs = validate_first_step(first_step, t0, t_bound) + self.K = np.empty((self.n_stages + 1, self.n), dtype=self.y.dtype) + + def _step_impl(self): + t = self.t + y = self.y + + max_step = self.max_step + rtol = self.rtol + atol = self.atol + + min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t) + + if self.h_abs > max_step: + h_abs = max_step + elif self.h_abs < min_step: + h_abs = min_step + else: + h_abs = self.h_abs + + order = self.order + step_accepted = False + + while not step_accepted: + if h_abs < min_step: + return False, self.TOO_SMALL_STEP + + h = h_abs * self.direction + t_new = t + h + + if self.direction * (t_new - self.t_bound) > 0: + t_new = self.t_bound + + h = t_new - t + h_abs = np.abs(h) + + y_new, f_new, error = rk_step(self.fun, t, y, self.f, h, self.A, + self.B, self.C, self.E, self.K) + scale = atol + np.maximum(np.abs(y), np.abs(y_new)) * rtol + error_norm = norm(error / scale) + + if error_norm == 0.0: + h_abs *= MAX_FACTOR + step_accepted = True + elif error_norm < 1: + h_abs *= min(MAX_FACTOR, + max(1, SAFETY * error_norm ** (-1 / (order + 1)))) + step_accepted = True + else: + h_abs *= max(MIN_FACTOR, + SAFETY * error_norm ** (-1 / (order + 1))) + + self.y_old = y + + self.t = t_new + self.y = y_new + + self.h_abs = h_abs + self.f = f_new + + return True, None + + def _dense_output_impl(self): + Q = self.K.T.dot(self.P) + return RkDenseOutput(self.t_old, self.t, self.y_old, Q) + + +class RK23(RungeKutta): + """Explicit Runge-Kutta method of order 3(2). + + This uses the Bogacki-Shampine pair of formulas [1]_. The error is controlled + assuming accuracy of the second-order method, but steps are taken using the + third-order accurate formula (local extrapolation is done). A cubic Hermite + polynomial is used for the dense output. + + Can be applied in the complex domain. + + Parameters + ---------- + fun : callable + Right-hand side of the system. The calling signature is ``fun(t, y)``. + Here ``t`` is a scalar and there are two options for ndarray ``y``. + It can either have shape (n,), then ``fun`` must return array_like with + shape (n,). Or alternatively it can have shape (n, k), then ``fun`` + must return array_like with shape (n, k), i.e. each column + corresponds to a single column in ``y``. The choice between the two + options is determined by `vectorized` argument (see below). + t0 : float + Initial time. + y0 : array_like, shape (n,) + Initial state. + t_bound : float + Boundary time - the integration won't continue beyond it. It also + determines the direction of the integration. + first_step : float or None, optional + Initial step size. Default is ``None`` which means that the algorithm + should choose. + max_step : float, optional + Maximum allowed step size. Default is np.inf, i.e. the step size is not + bounded and determined solely by the solver. + rtol, atol : float and array_like, optional + Relative and absolute tolerances. The solver keeps the local error + estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a + relative accuracy (number of correct digits). But if a component of `y` + is approximately below `atol`, the error only needs to fall within + the same `atol` threshold, and the number of correct digits is not + guaranteed. If components of y have different scales, it might be + beneficial to set different `atol` values for different components by + passing array_like with shape (n,) for `atol`. Default values are + 1e-3 for `rtol` and 1e-6 for `atol`. + vectorized : bool, optional + Whether `fun` is implemented in a vectorized fashion. Default is False. + + Attributes + ---------- + n : int + Number of equations. + status : string + Current status of the solver: 'running', 'finished' or 'failed'. + t_bound : float + Boundary time. + direction : float + Integration direction: +1 or -1. + t : float + Current time. + y : ndarray + Current state. + t_old : float + Previous time. None if no steps were made yet. + step_size : float + Size of the last successful step. None if no steps were made yet. + nfev : int + Number evaluations of the system's right-hand side. + njev : int + Number of evaluations of the Jacobian. Is always 0 for this solver as it does not use the Jacobian. + nlu : int + Number of LU decompositions. Is always 0 for this solver. + + References + ---------- + .. [1] P. Bogacki, L.F. Shampine, "A 3(2) Pair of Runge-Kutta Formulas", + Appl. Math. Lett. Vol. 2, No. 4. pp. 321-325, 1989. + """ + order = 2 + n_stages = 3 + C = np.array([1/2, 3/4]) + A = [np.array([1/2]), + np.array([0, 3/4])] + B = np.array([2/9, 1/3, 4/9]) + E = np.array([5/72, -1/12, -1/9, 1/8]) + P = np.array([[1, -4 / 3, 5 / 9], + [0, 1, -2/3], + [0, 4/3, -8/9], + [0, -1, 1]]) + + +class RK45(RungeKutta): + """Explicit Runge-Kutta method of order 5(4). + + This uses the Dormand-Prince pair of formulas [1]_. The error is controlled + assuming accuracy of the fourth-order method accuracy, but steps are taken + using the fifth-order accurate formula (local extrapolation is done). + A quartic interpolation polynomial is used for the dense output [2]_. + + Can be applied in the complex domain. + + Parameters + ---------- + fun : callable + Right-hand side of the system. The calling signature is ``fun(t, y)``. + Here ``t`` is a scalar, and there are two options for the ndarray ``y``: + It can either have shape (n,); then ``fun`` must return array_like with + shape (n,). Alternatively it can have shape (n, k); then ``fun`` + must return an array_like with shape (n, k), i.e. each column + corresponds to a single column in ``y``. The choice between the two + options is determined by `vectorized` argument (see below). + t0 : float + Initial time. + y0 : array_like, shape (n,) + Initial state. + t_bound : float + Boundary time - the integration won't continue beyond it. It also + determines the direction of the integration. + first_step : float or None, optional + Initial step size. Default is ``None`` which means that the algorithm + should choose. + max_step : float, optional + Maximum allowed step size. Default is np.inf, i.e. the step size is not + bounded and determined solely by the solver. + rtol, atol : float and array_like, optional + Relative and absolute tolerances. The solver keeps the local error + estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a + relative accuracy (number of correct digits). But if a component of `y` + is approximately below `atol`, the error only needs to fall within + the same `atol` threshold, and the number of correct digits is not + guaranteed. If components of y have different scales, it might be + beneficial to set different `atol` values for different components by + passing array_like with shape (n,) for `atol`. Default values are + 1e-3 for `rtol` and 1e-6 for `atol`. + vectorized : bool, optional + Whether `fun` is implemented in a vectorized fashion. Default is False. + + Attributes + ---------- + n : int + Number of equations. + status : string + Current status of the solver: 'running', 'finished' or 'failed'. + t_bound : float + Boundary time. + direction : float + Integration direction: +1 or -1. + t : float + Current time. + y : ndarray + Current state. + t_old : float + Previous time. None if no steps were made yet. + step_size : float + Size of the last successful step. None if no steps were made yet. + nfev : int + Number evaluations of the system's right-hand side. + njev : int + Number of evaluations of the Jacobian. Is always 0 for this solver as it does not use the Jacobian. + nlu : int + Number of LU decompositions. Is always 0 for this solver. + + References + ---------- + .. [1] J. R. Dormand, P. J. Prince, "A family of embedded Runge-Kutta + formulae", Journal of Computational and Applied Mathematics, Vol. 6, + No. 1, pp. 19-26, 1980. + .. [2] L. W. Shampine, "Some Practical Runge-Kutta Formulas", Mathematics + of Computation,, Vol. 46, No. 173, pp. 135-150, 1986. + """ + order = 4 + n_stages = 6 + C = np.array([1/5, 3/10, 4/5, 8/9, 1]) + A = [np.array([1/5]), + np.array([3/40, 9/40]), + np.array([44/45, -56/15, 32/9]), + np.array([19372/6561, -25360/2187, 64448/6561, -212/729]), + np.array([9017/3168, -355/33, 46732/5247, 49/176, -5103/18656])] + B = np.array([35/384, 0, 500/1113, 125/192, -2187/6784, 11/84]) + E = np.array([-71/57600, 0, 71/16695, -71/1920, 17253/339200, -22/525, + 1/40]) + # Corresponds to the optimum value of c_6 from [2]_. + P = np.array([ + [1, -8048581381/2820520608, 8663915743/2820520608, + -12715105075/11282082432], + [0, 0, 0, 0], + [0, 131558114200/32700410799, -68118460800/10900136933, + 87487479700/32700410799], + [0, -1754552775/470086768, 14199869525/1410260304, + -10690763975/1880347072], + [0, 127303824393/49829197408, -318862633887/49829197408, + 701980252875 / 199316789632], + [0, -282668133/205662961, 2019193451/616988883, -1453857185/822651844], + [0, 40617522/29380423, -110615467/29380423, 69997945/29380423]]) + + +class RkDenseOutput(DenseOutput): + def __init__(self, t_old, t, y_old, Q): + super(RkDenseOutput, self).__init__(t_old, t) + self.h = t - t_old + self.Q = Q + self.order = Q.shape[1] - 1 + self.y_old = y_old + + def _call_impl(self, t): + x = (t - self.t_old) / self.h + if t.ndim == 0: + p = np.tile(x, self.order + 1) + p = np.cumprod(p) + else: + p = np.tile(x, (self.order + 1, 1)) + p = np.cumprod(p, axis=0) + y = self.h * np.dot(self.Q, p) + if y.ndim == 2: + y += self.y_old[:, None] + else: + y += self.y_old + + return y diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/rk.pyc b/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/rk.pyc new file mode 100644 index 0000000..53118fc Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/integrate/_ivp/rk.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/_ode.py b/project/venv/lib/python2.7/site-packages/scipy/integrate/_ode.py new file mode 100644 index 0000000..2732fa6 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/integrate/_ode.py @@ -0,0 +1,1371 @@ +# Authors: Pearu Peterson, Pauli Virtanen, John Travers +""" +First-order ODE integrators. + +User-friendly interface to various numerical integrators for solving a +system of first order ODEs with prescribed initial conditions:: + + d y(t)[i] + --------- = f(t,y(t))[i], + d t + + y(t=0)[i] = y0[i], + +where:: + + i = 0, ..., len(y0) - 1 + +class ode +--------- + +A generic interface class to numeric integrators. It has the following +methods:: + + integrator = ode(f, jac=None) + integrator = integrator.set_integrator(name, **params) + integrator = integrator.set_initial_value(y0, t0=0.0) + integrator = integrator.set_f_params(*args) + integrator = integrator.set_jac_params(*args) + y1 = integrator.integrate(t1, step=False, relax=False) + flag = integrator.successful() + +class complex_ode +----------------- + +This class has the same generic interface as ode, except it can handle complex +f, y and Jacobians by transparently translating them into the equivalent +real valued system. It supports the real valued solvers (i.e not zvode) and is +an alternative to ode with the zvode solver, sometimes performing better. +""" +from __future__ import division, print_function, absolute_import + +# XXX: Integrators must have: +# =========================== +# cvode - C version of vode and vodpk with many improvements. +# Get it from http://www.netlib.org/ode/cvode.tar.gz +# To wrap cvode to Python, one must write extension module by +# hand. Its interface is too much 'advanced C' that using f2py +# would be too complicated (or impossible). +# +# How to define a new integrator: +# =============================== +# +# class myodeint(IntegratorBase): +# +# runner = <odeint function> or None +# +# def __init__(self,...): # required +# <initialize> +# +# def reset(self,n,has_jac): # optional +# # n - the size of the problem (number of equations) +# # has_jac - whether user has supplied its own routine for Jacobian +# <allocate memory,initialize further> +# +# def run(self,f,jac,y0,t0,t1,f_params,jac_params): # required +# # this method is called to integrate from t=t0 to t=t1 +# # with initial condition y0. f and jac are user-supplied functions +# # that define the problem. f_params,jac_params are additional +# # arguments +# # to these functions. +# <calculate y1> +# if <calculation was unsuccessful>: +# self.success = 0 +# return t1,y1 +# +# # In addition, one can define step() and run_relax() methods (they +# # take the same arguments as run()) if the integrator can support +# # these features (see IntegratorBase doc strings). +# +# if myodeint.runner: +# IntegratorBase.integrator_classes.append(myodeint) + +__all__ = ['ode', 'complex_ode'] +__version__ = "$Id$" +__docformat__ = "restructuredtext en" + +import re +import warnings + +from numpy import asarray, array, zeros, int32, isscalar, real, imag, vstack + +from . import vode as _vode +from . import _dop +from . import lsoda as _lsoda + + +# ------------------------------------------------------------------------------ +# User interface +# ------------------------------------------------------------------------------ + + +class ode(object): + """ + A generic interface class to numeric integrators. + + Solve an equation system :math:`y'(t) = f(t,y)` with (optional) ``jac = df/dy``. + + *Note*: The first two arguments of ``f(t, y, ...)`` are in the + opposite order of the arguments in the system definition function used + by `scipy.integrate.odeint`. + + Parameters + ---------- + f : callable ``f(t, y, *f_args)`` + Right-hand side of the differential equation. t is a scalar, + ``y.shape == (n,)``. + ``f_args`` is set by calling ``set_f_params(*args)``. + `f` should return a scalar, array or list (not a tuple). + jac : callable ``jac(t, y, *jac_args)``, optional + Jacobian of the right-hand side, ``jac[i,j] = d f[i] / d y[j]``. + ``jac_args`` is set by calling ``set_jac_params(*args)``. + + Attributes + ---------- + t : float + Current time. + y : ndarray + Current variable values. + + See also + -------- + odeint : an integrator with a simpler interface based on lsoda from ODEPACK + quad : for finding the area under a curve + + Notes + ----- + Available integrators are listed below. They can be selected using + the `set_integrator` method. + + "vode" + + Real-valued Variable-coefficient Ordinary Differential Equation + solver, with fixed-leading-coefficient implementation. It provides + implicit Adams method (for non-stiff problems) and a method based on + backward differentiation formulas (BDF) (for stiff problems). + + Source: http://www.netlib.org/ode/vode.f + + .. warning:: + + This integrator is not re-entrant. You cannot have two `ode` + instances using the "vode" integrator at the same time. + + This integrator accepts the following parameters in `set_integrator` + method of the `ode` class: + + - atol : float or sequence + absolute tolerance for solution + - rtol : float or sequence + relative tolerance for solution + - lband : None or int + - uband : None or int + Jacobian band width, jac[i,j] != 0 for i-lband <= j <= i+uband. + Setting these requires your jac routine to return the jacobian + in packed format, jac_packed[i-j+uband, j] = jac[i,j]. The + dimension of the matrix must be (lband+uband+1, len(y)). + - method: 'adams' or 'bdf' + Which solver to use, Adams (non-stiff) or BDF (stiff) + - with_jacobian : bool + This option is only considered when the user has not supplied a + Jacobian function and has not indicated (by setting either band) + that the Jacobian is banded. In this case, `with_jacobian` specifies + whether the iteration method of the ODE solver's correction step is + chord iteration with an internally generated full Jacobian or + functional iteration with no Jacobian. + - nsteps : int + Maximum number of (internally defined) steps allowed during one + call to the solver. + - first_step : float + - min_step : float + - max_step : float + Limits for the step sizes used by the integrator. + - order : int + Maximum order used by the integrator, + order <= 12 for Adams, <= 5 for BDF. + + "zvode" + + Complex-valued Variable-coefficient Ordinary Differential Equation + solver, with fixed-leading-coefficient implementation. It provides + implicit Adams method (for non-stiff problems) and a method based on + backward differentiation formulas (BDF) (for stiff problems). + + Source: http://www.netlib.org/ode/zvode.f + + .. warning:: + + This integrator is not re-entrant. You cannot have two `ode` + instances using the "zvode" integrator at the same time. + + This integrator accepts the same parameters in `set_integrator` + as the "vode" solver. + + .. note:: + + When using ZVODE for a stiff system, it should only be used for + the case in which the function f is analytic, that is, when each f(i) + is an analytic function of each y(j). Analyticity means that the + partial derivative df(i)/dy(j) is a unique complex number, and this + fact is critical in the way ZVODE solves the dense or banded linear + systems that arise in the stiff case. For a complex stiff ODE system + in which f is not analytic, ZVODE is likely to have convergence + failures, and for this problem one should instead use DVODE on the + equivalent real system (in the real and imaginary parts of y). + + "lsoda" + + Real-valued Variable-coefficient Ordinary Differential Equation + solver, with fixed-leading-coefficient implementation. It provides + automatic method switching between implicit Adams method (for non-stiff + problems) and a method based on backward differentiation formulas (BDF) + (for stiff problems). + + Source: http://www.netlib.org/odepack + + .. warning:: + + This integrator is not re-entrant. You cannot have two `ode` + instances using the "lsoda" integrator at the same time. + + This integrator accepts the following parameters in `set_integrator` + method of the `ode` class: + + - atol : float or sequence + absolute tolerance for solution + - rtol : float or sequence + relative tolerance for solution + - lband : None or int + - uband : None or int + Jacobian band width, jac[i,j] != 0 for i-lband <= j <= i+uband. + Setting these requires your jac routine to return the jacobian + in packed format, jac_packed[i-j+uband, j] = jac[i,j]. + - with_jacobian : bool + *Not used.* + - nsteps : int + Maximum number of (internally defined) steps allowed during one + call to the solver. + - first_step : float + - min_step : float + - max_step : float + Limits for the step sizes used by the integrator. + - max_order_ns : int + Maximum order used in the nonstiff case (default 12). + - max_order_s : int + Maximum order used in the stiff case (default 5). + - max_hnil : int + Maximum number of messages reporting too small step size (t + h = t) + (default 0) + - ixpr : int + Whether to generate extra printing at method switches (default False). + + "dopri5" + + This is an explicit runge-kutta method of order (4)5 due to Dormand & + Prince (with stepsize control and dense output). + + Authors: + + E. Hairer and G. Wanner + Universite de Geneve, Dept. de Mathematiques + CH-1211 Geneve 24, Switzerland + e-mail: ernst.hairer@math.unige.ch, gerhard.wanner@math.unige.ch + + This code is described in [HNW93]_. + + This integrator accepts the following parameters in set_integrator() + method of the ode class: + + - atol : float or sequence + absolute tolerance for solution + - rtol : float or sequence + relative tolerance for solution + - nsteps : int + Maximum number of (internally defined) steps allowed during one + call to the solver. + - first_step : float + - max_step : float + - safety : float + Safety factor on new step selection (default 0.9) + - ifactor : float + - dfactor : float + Maximum factor to increase/decrease step size by in one step + - beta : float + Beta parameter for stabilised step size control. + - verbosity : int + Switch for printing messages (< 0 for no messages). + + "dop853" + + This is an explicit runge-kutta method of order 8(5,3) due to Dormand + & Prince (with stepsize control and dense output). + + Options and references the same as "dopri5". + + Examples + -------- + + A problem to integrate and the corresponding jacobian: + + >>> from scipy.integrate import ode + >>> + >>> y0, t0 = [1.0j, 2.0], 0 + >>> + >>> def f(t, y, arg1): + ... return [1j*arg1*y[0] + y[1], -arg1*y[1]**2] + >>> def jac(t, y, arg1): + ... return [[1j*arg1, 1], [0, -arg1*2*y[1]]] + + The integration: + + >>> r = ode(f, jac).set_integrator('zvode', method='bdf') + >>> r.set_initial_value(y0, t0).set_f_params(2.0).set_jac_params(2.0) + >>> t1 = 10 + >>> dt = 1 + >>> while r.successful() and r.t < t1: + ... print(r.t+dt, r.integrate(r.t+dt)) + 1 [-0.71038232+0.23749653j 0.40000271+0.j ] + 2.0 [0.19098503-0.52359246j 0.22222356+0.j ] + 3.0 [0.47153208+0.52701229j 0.15384681+0.j ] + 4.0 [-0.61905937+0.30726255j 0.11764744+0.j ] + 5.0 [0.02340997-0.61418799j 0.09523835+0.j ] + 6.0 [0.58643071+0.339819j 0.08000018+0.j ] + 7.0 [-0.52070105+0.44525141j 0.06896565+0.j ] + 8.0 [-0.15986733-0.61234476j 0.06060616+0.j ] + 9.0 [0.64850462+0.15048982j 0.05405414+0.j ] + 10.0 [-0.38404699+0.56382299j 0.04878055+0.j ] + + References + ---------- + .. [HNW93] E. Hairer, S.P. Norsett and G. Wanner, Solving Ordinary + Differential Equations i. Nonstiff Problems. 2nd edition. + Springer Series in Computational Mathematics, + Springer-Verlag (1993) + + """ + + def __init__(self, f, jac=None): + self.stiff = 0 + self.f = f + self.jac = jac + self.f_params = () + self.jac_params = () + self._y = [] + + @property + def y(self): + return self._y + + def set_initial_value(self, y, t=0.0): + """Set initial conditions y(t) = y.""" + if isscalar(y): + y = [y] + n_prev = len(self._y) + if not n_prev: + self.set_integrator('') # find first available integrator + self._y = asarray(y, self._integrator.scalar) + self.t = t + self._integrator.reset(len(self._y), self.jac is not None) + return self + + def set_integrator(self, name, **integrator_params): + """ + Set integrator by name. + + Parameters + ---------- + name : str + Name of the integrator. + integrator_params + Additional parameters for the integrator. + """ + integrator = find_integrator(name) + if integrator is None: + # FIXME: this really should be raise an exception. Will that break + # any code? + warnings.warn('No integrator name match with %r or is not ' + 'available.' % name) + else: + self._integrator = integrator(**integrator_params) + if not len(self._y): + self.t = 0.0 + self._y = array([0.0], self._integrator.scalar) + self._integrator.reset(len(self._y), self.jac is not None) + return self + + def integrate(self, t, step=False, relax=False): + """Find y=y(t), set y as an initial condition, and return y. + + Parameters + ---------- + t : float + The endpoint of the integration step. + step : bool + If True, and if the integrator supports the step method, + then perform a single integration step and return. + This parameter is provided in order to expose internals of + the implementation, and should not be changed from its default + value in most cases. + relax : bool + If True and if the integrator supports the run_relax method, + then integrate until t_1 >= t and return. ``relax`` is not + referenced if ``step=True``. + This parameter is provided in order to expose internals of + the implementation, and should not be changed from its default + value in most cases. + + Returns + ------- + y : float + The integrated value at t + """ + if step and self._integrator.supports_step: + mth = self._integrator.step + elif relax and self._integrator.supports_run_relax: + mth = self._integrator.run_relax + else: + mth = self._integrator.run + + try: + self._y, self.t = mth(self.f, self.jac or (lambda: None), + self._y, self.t, t, + self.f_params, self.jac_params) + except SystemError: + # f2py issue with tuple returns, see ticket 1187. + raise ValueError('Function to integrate must not return a tuple.') + + return self._y + + def successful(self): + """Check if integration was successful.""" + try: + self._integrator + except AttributeError: + self.set_integrator('') + return self._integrator.success == 1 + + def get_return_code(self): + """Extracts the return code for the integration to enable better control + if the integration fails. + + In general, a return code > 0 implies success while a return code < 0 + implies failure. + + Notes + ----- + This section describes possible return codes and their meaning, for available + integrators that can be selected by `set_integrator` method. + + "vode" + + =========== ======= + Return Code Message + =========== ======= + 2 Integration successful. + -1 Excess work done on this call. (Perhaps wrong MF.) + -2 Excess accuracy requested. (Tolerances too small.) + -3 Illegal input detected. (See printed message.) + -4 Repeated error test failures. (Check all input.) + -5 Repeated convergence failures. (Perhaps bad Jacobian + supplied or wrong choice of MF or tolerances.) + -6 Error weight became zero during problem. (Solution + component i vanished, and ATOL or ATOL(i) = 0.) + =========== ======= + + "zvode" + + =========== ======= + Return Code Message + =========== ======= + 2 Integration successful. + -1 Excess work done on this call. (Perhaps wrong MF.) + -2 Excess accuracy requested. (Tolerances too small.) + -3 Illegal input detected. (See printed message.) + -4 Repeated error test failures. (Check all input.) + -5 Repeated convergence failures. (Perhaps bad Jacobian + supplied or wrong choice of MF or tolerances.) + -6 Error weight became zero during problem. (Solution + component i vanished, and ATOL or ATOL(i) = 0.) + =========== ======= + + "dopri5" + + =========== ======= + Return Code Message + =========== ======= + 1 Integration successful. + 2 Integration successful (interrupted by solout). + -1 Input is not consistent. + -2 Larger nsteps is needed. + -3 Step size becomes too small. + -4 Problem is probably stiff (interrupted). + =========== ======= + + "dop853" + + =========== ======= + Return Code Message + =========== ======= + 1 Integration successful. + 2 Integration successful (interrupted by solout). + -1 Input is not consistent. + -2 Larger nsteps is needed. + -3 Step size becomes too small. + -4 Problem is probably stiff (interrupted). + =========== ======= + + "lsoda" + + =========== ======= + Return Code Message + =========== ======= + 2 Integration successful. + -1 Excess work done on this call (perhaps wrong Dfun type). + -2 Excess accuracy requested (tolerances too small). + -3 Illegal input detected (internal error). + -4 Repeated error test failures (internal error). + -5 Repeated convergence failures (perhaps bad Jacobian or tolerances). + -6 Error weight became zero during problem. + -7 Internal workspace insufficient to finish (internal error). + =========== ======= + """ + try: + self._integrator + except AttributeError: + self.set_integrator('') + return self._integrator.istate + + def set_f_params(self, *args): + """Set extra parameters for user-supplied function f.""" + self.f_params = args + return self + + def set_jac_params(self, *args): + """Set extra parameters for user-supplied function jac.""" + self.jac_params = args + return self + + def set_solout(self, solout): + """ + Set callable to be called at every successful integration step. + + Parameters + ---------- + solout : callable + ``solout(t, y)`` is called at each internal integrator step, + t is a scalar providing the current independent position + y is the current soloution ``y.shape == (n,)`` + solout should return -1 to stop integration + otherwise it should return None or 0 + + """ + if self._integrator.supports_solout: + self._integrator.set_solout(solout) + if self._y is not None: + self._integrator.reset(len(self._y), self.jac is not None) + else: + raise ValueError("selected integrator does not support solout," + " choose another one") + + +def _transform_banded_jac(bjac): + """ + Convert a real matrix of the form (for example) + + [0 0 A B] [0 0 0 B] + [0 0 C D] [0 0 A D] + [E F G H] to [0 F C H] + [I J K L] [E J G L] + [I 0 K 0] + + That is, every other column is shifted up one. + """ + # Shift every other column. + newjac = zeros((bjac.shape[0] + 1, bjac.shape[1])) + newjac[1:, ::2] = bjac[:, ::2] + newjac[:-1, 1::2] = bjac[:, 1::2] + return newjac + + +class complex_ode(ode): + """ + A wrapper of ode for complex systems. + + This functions similarly as `ode`, but re-maps a complex-valued + equation system to a real-valued one before using the integrators. + + Parameters + ---------- + f : callable ``f(t, y, *f_args)`` + Rhs of the equation. t is a scalar, ``y.shape == (n,)``. + ``f_args`` is set by calling ``set_f_params(*args)``. + jac : callable ``jac(t, y, *jac_args)`` + Jacobian of the rhs, ``jac[i,j] = d f[i] / d y[j]``. + ``jac_args`` is set by calling ``set_f_params(*args)``. + + Attributes + ---------- + t : float + Current time. + y : ndarray + Current variable values. + + Examples + -------- + For usage examples, see `ode`. + + """ + + def __init__(self, f, jac=None): + self.cf = f + self.cjac = jac + if jac is None: + ode.__init__(self, self._wrap, None) + else: + ode.__init__(self, self._wrap, self._wrap_jac) + + def _wrap(self, t, y, *f_args): + f = self.cf(*((t, y[::2] + 1j * y[1::2]) + f_args)) + # self.tmp is a real-valued array containing the interleaved + # real and imaginary parts of f. + self.tmp[::2] = real(f) + self.tmp[1::2] = imag(f) + return self.tmp + + def _wrap_jac(self, t, y, *jac_args): + # jac is the complex Jacobian computed by the user-defined function. + jac = self.cjac(*((t, y[::2] + 1j * y[1::2]) + jac_args)) + + # jac_tmp is the real version of the complex Jacobian. Each complex + # entry in jac, say 2+3j, becomes a 2x2 block of the form + # [2 -3] + # [3 2] + jac_tmp = zeros((2 * jac.shape[0], 2 * jac.shape[1])) + jac_tmp[1::2, 1::2] = jac_tmp[::2, ::2] = real(jac) + jac_tmp[1::2, ::2] = imag(jac) + jac_tmp[::2, 1::2] = -jac_tmp[1::2, ::2] + + ml = getattr(self._integrator, 'ml', None) + mu = getattr(self._integrator, 'mu', None) + if ml is not None or mu is not None: + # Jacobian is banded. The user's Jacobian function has computed + # the complex Jacobian in packed format. The corresponding + # real-valued version has every other column shifted up. + jac_tmp = _transform_banded_jac(jac_tmp) + + return jac_tmp + + @property + def y(self): + return self._y[::2] + 1j * self._y[1::2] + + def set_integrator(self, name, **integrator_params): + """ + Set integrator by name. + + Parameters + ---------- + name : str + Name of the integrator + integrator_params + Additional parameters for the integrator. + """ + if name == 'zvode': + raise ValueError("zvode must be used with ode, not complex_ode") + + lband = integrator_params.get('lband') + uband = integrator_params.get('uband') + if lband is not None or uband is not None: + # The Jacobian is banded. Override the user-supplied bandwidths + # (which are for the complex Jacobian) with the bandwidths of + # the corresponding real-valued Jacobian wrapper of the complex + # Jacobian. + integrator_params['lband'] = 2 * (lband or 0) + 1 + integrator_params['uband'] = 2 * (uband or 0) + 1 + + return ode.set_integrator(self, name, **integrator_params) + + def set_initial_value(self, y, t=0.0): + """Set initial conditions y(t) = y.""" + y = asarray(y) + self.tmp = zeros(y.size * 2, 'float') + self.tmp[::2] = real(y) + self.tmp[1::2] = imag(y) + return ode.set_initial_value(self, self.tmp, t) + + def integrate(self, t, step=False, relax=False): + """Find y=y(t), set y as an initial condition, and return y. + + Parameters + ---------- + t : float + The endpoint of the integration step. + step : bool + If True, and if the integrator supports the step method, + then perform a single integration step and return. + This parameter is provided in order to expose internals of + the implementation, and should not be changed from its default + value in most cases. + relax : bool + If True and if the integrator supports the run_relax method, + then integrate until t_1 >= t and return. ``relax`` is not + referenced if ``step=True``. + This parameter is provided in order to expose internals of + the implementation, and should not be changed from its default + value in most cases. + + Returns + ------- + y : float + The integrated value at t + """ + y = ode.integrate(self, t, step, relax) + return y[::2] + 1j * y[1::2] + + def set_solout(self, solout): + """ + Set callable to be called at every successful integration step. + + Parameters + ---------- + solout : callable + ``solout(t, y)`` is called at each internal integrator step, + t is a scalar providing the current independent position + y is the current soloution ``y.shape == (n,)`` + solout should return -1 to stop integration + otherwise it should return None or 0 + + """ + if self._integrator.supports_solout: + self._integrator.set_solout(solout, complex=True) + else: + raise TypeError("selected integrator does not support solouta," + + "choose another one") + + +# ------------------------------------------------------------------------------ +# ODE integrators +# ------------------------------------------------------------------------------ + +def find_integrator(name): + for cl in IntegratorBase.integrator_classes: + if re.match(name, cl.__name__, re.I): + return cl + return None + + +class IntegratorConcurrencyError(RuntimeError): + """ + Failure due to concurrent usage of an integrator that can be used + only for a single problem at a time. + + """ + + def __init__(self, name): + msg = ("Integrator `%s` can be used to solve only a single problem " + "at a time. If you want to integrate multiple problems, " + "consider using a different integrator " + "(see `ode.set_integrator`)") % name + RuntimeError.__init__(self, msg) + + +class IntegratorBase(object): + runner = None # runner is None => integrator is not available + success = None # success==1 if integrator was called successfully + istate = None # istate > 0 means success, istate < 0 means failure + supports_run_relax = None + supports_step = None + supports_solout = False + integrator_classes = [] + scalar = float + + def acquire_new_handle(self): + # Some of the integrators have internal state (ancient + # Fortran...), and so only one instance can use them at a time. + # We keep track of this, and fail when concurrent usage is tried. + self.__class__.active_global_handle += 1 + self.handle = self.__class__.active_global_handle + + def check_handle(self): + if self.handle is not self.__class__.active_global_handle: + raise IntegratorConcurrencyError(self.__class__.__name__) + + def reset(self, n, has_jac): + """Prepare integrator for call: allocate memory, set flags, etc. + n - number of equations. + has_jac - if user has supplied function for evaluating Jacobian. + """ + + def run(self, f, jac, y0, t0, t1, f_params, jac_params): + """Integrate from t=t0 to t=t1 using y0 as an initial condition. + Return 2-tuple (y1,t1) where y1 is the result and t=t1 + defines the stoppage coordinate of the result. + """ + raise NotImplementedError('all integrators must define ' + 'run(f, jac, t0, t1, y0, f_params, jac_params)') + + def step(self, f, jac, y0, t0, t1, f_params, jac_params): + """Make one integration step and return (y1,t1).""" + raise NotImplementedError('%s does not support step() method' % + self.__class__.__name__) + + def run_relax(self, f, jac, y0, t0, t1, f_params, jac_params): + """Integrate from t=t0 to t>=t1 and return (y1,t).""" + raise NotImplementedError('%s does not support run_relax() method' % + self.__class__.__name__) + + # XXX: __str__ method for getting visual state of the integrator + + +def _vode_banded_jac_wrapper(jacfunc, ml, jac_params): + """ + Wrap a banded Jacobian function with a function that pads + the Jacobian with `ml` rows of zeros. + """ + + def jac_wrapper(t, y): + jac = asarray(jacfunc(t, y, *jac_params)) + padded_jac = vstack((jac, zeros((ml, jac.shape[1])))) + return padded_jac + + return jac_wrapper + + +class vode(IntegratorBase): + runner = getattr(_vode, 'dvode', None) + + messages = {-1: 'Excess work done on this call. (Perhaps wrong MF.)', + -2: 'Excess accuracy requested. (Tolerances too small.)', + -3: 'Illegal input detected. (See printed message.)', + -4: 'Repeated error test failures. (Check all input.)', + -5: 'Repeated convergence failures. (Perhaps bad' + ' Jacobian supplied or wrong choice of MF or tolerances.)', + -6: 'Error weight became zero during problem. (Solution' + ' component i vanished, and ATOL or ATOL(i) = 0.)' + } + supports_run_relax = 1 + supports_step = 1 + active_global_handle = 0 + + def __init__(self, + method='adams', + with_jacobian=False, + rtol=1e-6, atol=1e-12, + lband=None, uband=None, + order=12, + nsteps=500, + max_step=0.0, # corresponds to infinite + min_step=0.0, + first_step=0.0, # determined by solver + ): + + if re.match(method, r'adams', re.I): + self.meth = 1 + elif re.match(method, r'bdf', re.I): + self.meth = 2 + else: + raise ValueError('Unknown integration method %s' % method) + self.with_jacobian = with_jacobian + self.rtol = rtol + self.atol = atol + self.mu = uband + self.ml = lband + + self.order = order + self.nsteps = nsteps + self.max_step = max_step + self.min_step = min_step + self.first_step = first_step + self.success = 1 + + self.initialized = False + + def _determine_mf_and_set_bands(self, has_jac): + """ + Determine the `MF` parameter (Method Flag) for the Fortran subroutine `dvode`. + + In the Fortran code, the legal values of `MF` are: + 10, 11, 12, 13, 14, 15, 20, 21, 22, 23, 24, 25, + -11, -12, -14, -15, -21, -22, -24, -25 + but this python wrapper does not use negative values. + + Returns + + mf = 10*self.meth + miter + + self.meth is the linear multistep method: + self.meth == 1: method="adams" + self.meth == 2: method="bdf" + + miter is the correction iteration method: + miter == 0: Functional iteraton; no Jacobian involved. + miter == 1: Chord iteration with user-supplied full Jacobian + miter == 2: Chord iteration with internally computed full Jacobian + miter == 3: Chord iteration with internally computed diagonal Jacobian + miter == 4: Chord iteration with user-supplied banded Jacobian + miter == 5: Chord iteration with internally computed banded Jacobian + + Side effects: If either self.mu or self.ml is not None and the other is None, + then the one that is None is set to 0. + """ + + jac_is_banded = self.mu is not None or self.ml is not None + if jac_is_banded: + if self.mu is None: + self.mu = 0 + if self.ml is None: + self.ml = 0 + + # has_jac is True if the user provided a jacobian function. + if has_jac: + if jac_is_banded: + miter = 4 + else: + miter = 1 + else: + if jac_is_banded: + if self.ml == self.mu == 0: + miter = 3 # Chord iteration with internal diagonal Jacobian. + else: + miter = 5 # Chord iteration with internal banded Jacobian. + else: + # self.with_jacobian is set by the user in the call to ode.set_integrator. + if self.with_jacobian: + miter = 2 # Chord iteration with internal full Jacobian. + else: + miter = 0 # Functional iteraton; no Jacobian involved. + + mf = 10 * self.meth + miter + return mf + + def reset(self, n, has_jac): + mf = self._determine_mf_and_set_bands(has_jac) + + if mf == 10: + lrw = 20 + 16 * n + elif mf in [11, 12]: + lrw = 22 + 16 * n + 2 * n * n + elif mf == 13: + lrw = 22 + 17 * n + elif mf in [14, 15]: + lrw = 22 + 18 * n + (3 * self.ml + 2 * self.mu) * n + elif mf == 20: + lrw = 20 + 9 * n + elif mf in [21, 22]: + lrw = 22 + 9 * n + 2 * n * n + elif mf == 23: + lrw = 22 + 10 * n + elif mf in [24, 25]: + lrw = 22 + 11 * n + (3 * self.ml + 2 * self.mu) * n + else: + raise ValueError('Unexpected mf=%s' % mf) + + if mf % 10 in [0, 3]: + liw = 30 + else: + liw = 30 + n + + rwork = zeros((lrw,), float) + rwork[4] = self.first_step + rwork[5] = self.max_step + rwork[6] = self.min_step + self.rwork = rwork + + iwork = zeros((liw,), int32) + if self.ml is not None: + iwork[0] = self.ml + if self.mu is not None: + iwork[1] = self.mu + iwork[4] = self.order + iwork[5] = self.nsteps + iwork[6] = 2 # mxhnil + self.iwork = iwork + + self.call_args = [self.rtol, self.atol, 1, 1, + self.rwork, self.iwork, mf] + self.success = 1 + self.initialized = False + + def run(self, f, jac, y0, t0, t1, f_params, jac_params): + if self.initialized: + self.check_handle() + else: + self.initialized = True + self.acquire_new_handle() + + if self.ml is not None and self.ml > 0: + # Banded Jacobian. Wrap the user-provided function with one + # that pads the Jacobian array with the extra `self.ml` rows + # required by the f2py-generated wrapper. + jac = _vode_banded_jac_wrapper(jac, self.ml, jac_params) + + args = ((f, jac, y0, t0, t1) + tuple(self.call_args) + + (f_params, jac_params)) + y1, t, istate = self.runner(*args) + self.istate = istate + if istate < 0: + unexpected_istate_msg = 'Unexpected istate={:d}'.format(istate) + warnings.warn('{:s}: {:s}'.format(self.__class__.__name__, + self.messages.get(istate, unexpected_istate_msg))) + self.success = 0 + else: + self.call_args[3] = 2 # upgrade istate from 1 to 2 + self.istate = 2 + return y1, t + + def step(self, *args): + itask = self.call_args[2] + self.call_args[2] = 2 + r = self.run(*args) + self.call_args[2] = itask + return r + + def run_relax(self, *args): + itask = self.call_args[2] + self.call_args[2] = 3 + r = self.run(*args) + self.call_args[2] = itask + return r + + +if vode.runner is not None: + IntegratorBase.integrator_classes.append(vode) + + +class zvode(vode): + runner = getattr(_vode, 'zvode', None) + + supports_run_relax = 1 + supports_step = 1 + scalar = complex + active_global_handle = 0 + + def reset(self, n, has_jac): + mf = self._determine_mf_and_set_bands(has_jac) + + if mf in (10,): + lzw = 15 * n + elif mf in (11, 12): + lzw = 15 * n + 2 * n ** 2 + elif mf in (-11, -12): + lzw = 15 * n + n ** 2 + elif mf in (13,): + lzw = 16 * n + elif mf in (14, 15): + lzw = 17 * n + (3 * self.ml + 2 * self.mu) * n + elif mf in (-14, -15): + lzw = 16 * n + (2 * self.ml + self.mu) * n + elif mf in (20,): + lzw = 8 * n + elif mf in (21, 22): + lzw = 8 * n + 2 * n ** 2 + elif mf in (-21, -22): + lzw = 8 * n + n ** 2 + elif mf in (23,): + lzw = 9 * n + elif mf in (24, 25): + lzw = 10 * n + (3 * self.ml + 2 * self.mu) * n + elif mf in (-24, -25): + lzw = 9 * n + (2 * self.ml + self.mu) * n + + lrw = 20 + n + + if mf % 10 in (0, 3): + liw = 30 + else: + liw = 30 + n + + zwork = zeros((lzw,), complex) + self.zwork = zwork + + rwork = zeros((lrw,), float) + rwork[4] = self.first_step + rwork[5] = self.max_step + rwork[6] = self.min_step + self.rwork = rwork + + iwork = zeros((liw,), int32) + if self.ml is not None: + iwork[0] = self.ml + if self.mu is not None: + iwork[1] = self.mu + iwork[4] = self.order + iwork[5] = self.nsteps + iwork[6] = 2 # mxhnil + self.iwork = iwork + + self.call_args = [self.rtol, self.atol, 1, 1, + self.zwork, self.rwork, self.iwork, mf] + self.success = 1 + self.initialized = False + + +if zvode.runner is not None: + IntegratorBase.integrator_classes.append(zvode) + + +class dopri5(IntegratorBase): + runner = getattr(_dop, 'dopri5', None) + name = 'dopri5' + supports_solout = True + + messages = {1: 'computation successful', + 2: 'comput. successful (interrupted by solout)', + -1: 'input is not consistent', + -2: 'larger nsteps is needed', + -3: 'step size becomes too small', + -4: 'problem is probably stiff (interrupted)', + } + + def __init__(self, + rtol=1e-6, atol=1e-12, + nsteps=500, + max_step=0.0, + first_step=0.0, # determined by solver + safety=0.9, + ifactor=10.0, + dfactor=0.2, + beta=0.0, + method=None, + verbosity=-1, # no messages if negative + ): + self.rtol = rtol + self.atol = atol + self.nsteps = nsteps + self.max_step = max_step + self.first_step = first_step + self.safety = safety + self.ifactor = ifactor + self.dfactor = dfactor + self.beta = beta + self.verbosity = verbosity + self.success = 1 + self.set_solout(None) + + def set_solout(self, solout, complex=False): + self.solout = solout + self.solout_cmplx = complex + if solout is None: + self.iout = 0 + else: + self.iout = 1 + + def reset(self, n, has_jac): + work = zeros((8 * n + 21,), float) + work[1] = self.safety + work[2] = self.dfactor + work[3] = self.ifactor + work[4] = self.beta + work[5] = self.max_step + work[6] = self.first_step + self.work = work + iwork = zeros((21,), int32) + iwork[0] = self.nsteps + iwork[2] = self.verbosity + self.iwork = iwork + self.call_args = [self.rtol, self.atol, self._solout, + self.iout, self.work, self.iwork] + self.success = 1 + + def run(self, f, jac, y0, t0, t1, f_params, jac_params): + x, y, iwork, istate = self.runner(*((f, t0, y0, t1) + + tuple(self.call_args) + (f_params,))) + self.istate = istate + if istate < 0: + unexpected_istate_msg = 'Unexpected istate={:d}'.format(istate) + warnings.warn('{:s}: {:s}'.format(self.__class__.__name__, + self.messages.get(istate, unexpected_istate_msg))) + self.success = 0 + return y, x + + def _solout(self, nr, xold, x, y, nd, icomp, con): + if self.solout is not None: + if self.solout_cmplx: + y = y[::2] + 1j * y[1::2] + return self.solout(x, y) + else: + return 1 + + +if dopri5.runner is not None: + IntegratorBase.integrator_classes.append(dopri5) + + +class dop853(dopri5): + runner = getattr(_dop, 'dop853', None) + name = 'dop853' + + def __init__(self, + rtol=1e-6, atol=1e-12, + nsteps=500, + max_step=0.0, + first_step=0.0, # determined by solver + safety=0.9, + ifactor=6.0, + dfactor=0.3, + beta=0.0, + method=None, + verbosity=-1, # no messages if negative + ): + super(self.__class__, self).__init__(rtol, atol, nsteps, max_step, + first_step, safety, ifactor, + dfactor, beta, method, + verbosity) + + def reset(self, n, has_jac): + work = zeros((11 * n + 21,), float) + work[1] = self.safety + work[2] = self.dfactor + work[3] = self.ifactor + work[4] = self.beta + work[5] = self.max_step + work[6] = self.first_step + self.work = work + iwork = zeros((21,), int32) + iwork[0] = self.nsteps + iwork[2] = self.verbosity + self.iwork = iwork + self.call_args = [self.rtol, self.atol, self._solout, + self.iout, self.work, self.iwork] + self.success = 1 + + +if dop853.runner is not None: + IntegratorBase.integrator_classes.append(dop853) + + +class lsoda(IntegratorBase): + runner = getattr(_lsoda, 'lsoda', None) + active_global_handle = 0 + + messages = { + 2: "Integration successful.", + -1: "Excess work done on this call (perhaps wrong Dfun type).", + -2: "Excess accuracy requested (tolerances too small).", + -3: "Illegal input detected (internal error).", + -4: "Repeated error test failures (internal error).", + -5: "Repeated convergence failures (perhaps bad Jacobian or tolerances).", + -6: "Error weight became zero during problem.", + -7: "Internal workspace insufficient to finish (internal error)." + } + + def __init__(self, + with_jacobian=False, + rtol=1e-6, atol=1e-12, + lband=None, uband=None, + nsteps=500, + max_step=0.0, # corresponds to infinite + min_step=0.0, + first_step=0.0, # determined by solver + ixpr=0, + max_hnil=0, + max_order_ns=12, + max_order_s=5, + method=None + ): + + self.with_jacobian = with_jacobian + self.rtol = rtol + self.atol = atol + self.mu = uband + self.ml = lband + + self.max_order_ns = max_order_ns + self.max_order_s = max_order_s + self.nsteps = nsteps + self.max_step = max_step + self.min_step = min_step + self.first_step = first_step + self.ixpr = ixpr + self.max_hnil = max_hnil + self.success = 1 + + self.initialized = False + + def reset(self, n, has_jac): + # Calculate parameters for Fortran subroutine dvode. + if has_jac: + if self.mu is None and self.ml is None: + jt = 1 + else: + if self.mu is None: + self.mu = 0 + if self.ml is None: + self.ml = 0 + jt = 4 + else: + if self.mu is None and self.ml is None: + jt = 2 + else: + if self.mu is None: + self.mu = 0 + if self.ml is None: + self.ml = 0 + jt = 5 + lrn = 20 + (self.max_order_ns + 4) * n + if jt in [1, 2]: + lrs = 22 + (self.max_order_s + 4) * n + n * n + elif jt in [4, 5]: + lrs = 22 + (self.max_order_s + 5 + 2 * self.ml + self.mu) * n + else: + raise ValueError('Unexpected jt=%s' % jt) + lrw = max(lrn, lrs) + liw = 20 + n + rwork = zeros((lrw,), float) + rwork[4] = self.first_step + rwork[5] = self.max_step + rwork[6] = self.min_step + self.rwork = rwork + iwork = zeros((liw,), int32) + if self.ml is not None: + iwork[0] = self.ml + if self.mu is not None: + iwork[1] = self.mu + iwork[4] = self.ixpr + iwork[5] = self.nsteps + iwork[6] = self.max_hnil + iwork[7] = self.max_order_ns + iwork[8] = self.max_order_s + self.iwork = iwork + self.call_args = [self.rtol, self.atol, 1, 1, + self.rwork, self.iwork, jt] + self.success = 1 + self.initialized = False + + def run(self, f, jac, y0, t0, t1, f_params, jac_params): + if self.initialized: + self.check_handle() + else: + self.initialized = True + self.acquire_new_handle() + args = [f, y0, t0, t1] + self.call_args[:-1] + \ + [jac, self.call_args[-1], f_params, 0, jac_params] + y1, t, istate = self.runner(*args) + self.istate = istate + if istate < 0: + unexpected_istate_msg = 'Unexpected istate={:d}'.format(istate) + warnings.warn('{:s}: {:s}'.format(self.__class__.__name__, + self.messages.get(istate, unexpected_istate_msg))) + self.success = 0 + else: + self.call_args[3] = 2 # upgrade istate from 1 to 2 + self.istate = 2 + return y1, t + + def step(self, *args): + itask = self.call_args[2] + self.call_args[2] = 2 + r = self.run(*args) + self.call_args[2] = itask + return r + + def run_relax(self, *args): + itask = self.call_args[2] + self.call_args[2] = 3 + r = self.run(*args) + self.call_args[2] = itask + return r + + +if lsoda.runner: + IntegratorBase.integrator_classes.append(lsoda) diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/_ode.pyc b/project/venv/lib/python2.7/site-packages/scipy/integrate/_ode.pyc new file mode 100644 index 0000000..8b99297 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/integrate/_ode.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/_odepack.so b/project/venv/lib/python2.7/site-packages/scipy/integrate/_odepack.so new file mode 100755 index 0000000..f2e601e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/integrate/_odepack.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/_quadpack.so b/project/venv/lib/python2.7/site-packages/scipy/integrate/_quadpack.so new file mode 100755 index 0000000..94d7160 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/integrate/_quadpack.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/_test_multivariate.so b/project/venv/lib/python2.7/site-packages/scipy/integrate/_test_multivariate.so new file mode 100755 index 0000000..53a1ba7 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/integrate/_test_multivariate.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/_test_odeint_banded.so b/project/venv/lib/python2.7/site-packages/scipy/integrate/_test_odeint_banded.so new file mode 100755 index 0000000..c46b306 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/integrate/_test_odeint_banded.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/lsoda.so b/project/venv/lib/python2.7/site-packages/scipy/integrate/lsoda.so new file mode 100755 index 0000000..bf8be6c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/integrate/lsoda.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/odepack.py b/project/venv/lib/python2.7/site-packages/scipy/integrate/odepack.py new file mode 100644 index 0000000..ccfde88 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/integrate/odepack.py @@ -0,0 +1,259 @@ +# Author: Travis Oliphant +from __future__ import division, print_function, absolute_import + +__all__ = ['odeint'] + +import numpy as np +from . import _odepack +from copy import copy +import warnings + + +class ODEintWarning(Warning): + pass + + +_msgs = {2: "Integration successful.", + 1: "Nothing was done; the integration time was 0.", + -1: "Excess work done on this call (perhaps wrong Dfun type).", + -2: "Excess accuracy requested (tolerances too small).", + -3: "Illegal input detected (internal error).", + -4: "Repeated error test failures (internal error).", + -5: "Repeated convergence failures (perhaps bad Jacobian or tolerances).", + -6: "Error weight became zero during problem.", + -7: "Internal workspace insufficient to finish (internal error)." + } + + +def odeint(func, y0, t, args=(), Dfun=None, col_deriv=0, full_output=0, + ml=None, mu=None, rtol=None, atol=None, tcrit=None, h0=0.0, + hmax=0.0, hmin=0.0, ixpr=0, mxstep=0, mxhnil=0, mxordn=12, + mxords=5, printmessg=0, tfirst=False): + """ + Integrate a system of ordinary differential equations. + + .. note:: For new code, use `scipy.integrate.solve_ivp` to solve a + differential equation. + + Solve a system of ordinary differential equations using lsoda from the + FORTRAN library odepack. + + Solves the initial value problem for stiff or non-stiff systems + of first order ode-s:: + + dy/dt = func(y, t, ...) [or func(t, y, ...)] + + where y can be a vector. + + .. note:: By default, the required order of the first two arguments of + `func` are in the opposite order of the arguments in the system + definition function used by the `scipy.integrate.ode` class and + the function `scipy.integrate.solve_ivp`. To use a function with + the signature ``func(t, y, ...)``, the argument `tfirst` must be + set to ``True``. + + Parameters + ---------- + func : callable(y, t, ...) or callable(t, y, ...) + Computes the derivative of y at t. + If the signature is ``callable(t, y, ...)``, then the argument + `tfirst` must be set ``True``. + y0 : array + Initial condition on y (can be a vector). + t : array + A sequence of time points for which to solve for y. The initial + value point should be the first element of this sequence. + This sequence must be monotonically increasing or monotonically + decreasing; repeated values are allowed. + args : tuple, optional + Extra arguments to pass to function. + Dfun : callable(y, t, ...) or callable(t, y, ...) + Gradient (Jacobian) of `func`. + If the signature is ``callable(t, y, ...)``, then the argument + `tfirst` must be set ``True``. + col_deriv : bool, optional + True if `Dfun` defines derivatives down columns (faster), + otherwise `Dfun` should define derivatives across rows. + full_output : bool, optional + True if to return a dictionary of optional outputs as the second output + printmessg : bool, optional + Whether to print the convergence message + tfirst: bool, optional + If True, the first two arguments of `func` (and `Dfun`, if given) + must ``t, y`` instead of the default ``y, t``. + + .. versionadded:: 1.1.0 + + Returns + ------- + y : array, shape (len(t), len(y0)) + Array containing the value of y for each desired time in t, + with the initial value `y0` in the first row. + infodict : dict, only returned if full_output == True + Dictionary containing additional output information + + ======= ============================================================ + key meaning + ======= ============================================================ + 'hu' vector of step sizes successfully used for each time step. + 'tcur' vector with the value of t reached for each time step. + (will always be at least as large as the input times). + 'tolsf' vector of tolerance scale factors, greater than 1.0, + computed when a request for too much accuracy was detected. + 'tsw' value of t at the time of the last method switch + (given for each time step) + 'nst' cumulative number of time steps + 'nfe' cumulative number of function evaluations for each time step + 'nje' cumulative number of jacobian evaluations for each time step + 'nqu' a vector of method orders for each successful step. + 'imxer' index of the component of largest magnitude in the + weighted local error vector (e / ewt) on an error return, -1 + otherwise. + 'lenrw' the length of the double work array required. + 'leniw' the length of integer work array required. + 'mused' a vector of method indicators for each successful time step: + 1: adams (nonstiff), 2: bdf (stiff) + ======= ============================================================ + + Other Parameters + ---------------- + ml, mu : int, optional + If either of these are not None or non-negative, then the + Jacobian is assumed to be banded. These give the number of + lower and upper non-zero diagonals in this banded matrix. + For the banded case, `Dfun` should return a matrix whose + rows contain the non-zero bands (starting with the lowest diagonal). + Thus, the return matrix `jac` from `Dfun` should have shape + ``(ml + mu + 1, len(y0))`` when ``ml >=0`` or ``mu >=0``. + The data in `jac` must be stored such that ``jac[i - j + mu, j]`` + holds the derivative of the `i`th equation with respect to the `j`th + state variable. If `col_deriv` is True, the transpose of this + `jac` must be returned. + rtol, atol : float, optional + The input parameters `rtol` and `atol` determine the error + control performed by the solver. The solver will control the + vector, e, of estimated local errors in y, according to an + inequality of the form ``max-norm of (e / ewt) <= 1``, + where ewt is a vector of positive error weights computed as + ``ewt = rtol * abs(y) + atol``. + rtol and atol can be either vectors the same length as y or scalars. + Defaults to 1.49012e-8. + tcrit : ndarray, optional + Vector of critical points (e.g. singularities) where integration + care should be taken. + h0 : float, (0: solver-determined), optional + The step size to be attempted on the first step. + hmax : float, (0: solver-determined), optional + The maximum absolute step size allowed. + hmin : float, (0: solver-determined), optional + The minimum absolute step size allowed. + ixpr : bool, optional + Whether to generate extra printing at method switches. + mxstep : int, (0: solver-determined), optional + Maximum number of (internally defined) steps allowed for each + integration point in t. + mxhnil : int, (0: solver-determined), optional + Maximum number of messages printed. + mxordn : int, (0: solver-determined), optional + Maximum order to be allowed for the non-stiff (Adams) method. + mxords : int, (0: solver-determined), optional + Maximum order to be allowed for the stiff (BDF) method. + + See Also + -------- + solve_ivp : Solve an initial value problem for a system of ODEs. + ode : a more object-oriented integrator based on VODE. + quad : for finding the area under a curve. + + Examples + -------- + The second order differential equation for the angle `theta` of a + pendulum acted on by gravity with friction can be written:: + + theta''(t) + b*theta'(t) + c*sin(theta(t)) = 0 + + where `b` and `c` are positive constants, and a prime (') denotes a + derivative. To solve this equation with `odeint`, we must first convert + it to a system of first order equations. By defining the angular + velocity ``omega(t) = theta'(t)``, we obtain the system:: + + theta'(t) = omega(t) + omega'(t) = -b*omega(t) - c*sin(theta(t)) + + Let `y` be the vector [`theta`, `omega`]. We implement this system + in python as: + + >>> def pend(y, t, b, c): + ... theta, omega = y + ... dydt = [omega, -b*omega - c*np.sin(theta)] + ... return dydt + ... + + We assume the constants are `b` = 0.25 and `c` = 5.0: + + >>> b = 0.25 + >>> c = 5.0 + + For initial conditions, we assume the pendulum is nearly vertical + with `theta(0)` = `pi` - 0.1, and is initially at rest, so + `omega(0)` = 0. Then the vector of initial conditions is + + >>> y0 = [np.pi - 0.1, 0.0] + + We will generate a solution at 101 evenly spaced samples in the interval + 0 <= `t` <= 10. So our array of times is: + + >>> t = np.linspace(0, 10, 101) + + Call `odeint` to generate the solution. To pass the parameters + `b` and `c` to `pend`, we give them to `odeint` using the `args` + argument. + + >>> from scipy.integrate import odeint + >>> sol = odeint(pend, y0, t, args=(b, c)) + + The solution is an array with shape (101, 2). The first column + is `theta(t)`, and the second is `omega(t)`. The following code + plots both components. + + >>> import matplotlib.pyplot as plt + >>> plt.plot(t, sol[:, 0], 'b', label='theta(t)') + >>> plt.plot(t, sol[:, 1], 'g', label='omega(t)') + >>> plt.legend(loc='best') + >>> plt.xlabel('t') + >>> plt.grid() + >>> plt.show() + """ + + if ml is None: + ml = -1 # changed to zero inside function call + if mu is None: + mu = -1 # changed to zero inside function call + + dt = np.diff(t) + if not((dt >= 0).all() or (dt <= 0).all()): + raise ValueError("The values in t must be monotonically increasing " + "or monotonically decreasing; repeated values are " + "allowed.") + + t = copy(t) + y0 = copy(y0) + output = _odepack.odeint(func, y0, t, args, Dfun, col_deriv, ml, mu, + full_output, rtol, atol, tcrit, h0, hmax, hmin, + ixpr, mxstep, mxhnil, mxordn, mxords, + int(bool(tfirst))) + if output[-1] < 0: + warning_msg = _msgs[output[-1]] + " Run with full_output = 1 to get quantitative information." + warnings.warn(warning_msg, ODEintWarning) + elif printmessg: + warning_msg = _msgs[output[-1]] + warnings.warn(warning_msg, ODEintWarning) + + if full_output: + output[1]['message'] = _msgs[output[-1]] + + output = output[:-1] + if len(output) == 1: + return output[0] + else: + return output diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/odepack.pyc b/project/venv/lib/python2.7/site-packages/scipy/integrate/odepack.pyc new file mode 100644 index 0000000..c5615d9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/integrate/odepack.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/quadpack.py b/project/venv/lib/python2.7/site-packages/scipy/integrate/quadpack.py new file mode 100644 index 0000000..4fa8063 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/integrate/quadpack.py @@ -0,0 +1,878 @@ +# Author: Travis Oliphant 2001 +# Author: Nathan Woods 2013 (nquad &c) +from __future__ import division, print_function, absolute_import + +import sys +import warnings +from functools import partial + +from . import _quadpack +import numpy +from numpy import Inf + +__all__ = ['quad', 'dblquad', 'tplquad', 'nquad', 'quad_explain', + 'IntegrationWarning'] + + +error = _quadpack.error + +class IntegrationWarning(UserWarning): + """ + Warning on issues during integration. + """ + pass + + +def quad_explain(output=sys.stdout): + """ + Print extra information about integrate.quad() parameters and returns. + + Parameters + ---------- + output : instance with "write" method, optional + Information about `quad` is passed to ``output.write()``. + Default is ``sys.stdout``. + + Returns + ------- + None + + """ + output.write(quad.__doc__) + + +def quad(func, a, b, args=(), full_output=0, epsabs=1.49e-8, epsrel=1.49e-8, + limit=50, points=None, weight=None, wvar=None, wopts=None, maxp1=50, + limlst=50): + """ + Compute a definite integral. + + Integrate func from `a` to `b` (possibly infinite interval) using a + technique from the Fortran library QUADPACK. + + Parameters + ---------- + func : {function, scipy.LowLevelCallable} + A Python function or method to integrate. If `func` takes many + arguments, it is integrated along the axis corresponding to the + first argument. + + If the user desires improved integration performance, then `f` may + be a `scipy.LowLevelCallable` with one of the signatures:: + + double func(double x) + double func(double x, void *user_data) + double func(int n, double *xx) + double func(int n, double *xx, void *user_data) + + The ``user_data`` is the data contained in the `scipy.LowLevelCallable`. + In the call forms with ``xx``, ``n`` is the length of the ``xx`` + array which contains ``xx[0] == x`` and the rest of the items are + numbers contained in the ``args`` argument of quad. + + In addition, certain ctypes call signatures are supported for + backward compatibility, but those should not be used in new code. + a : float + Lower limit of integration (use -numpy.inf for -infinity). + b : float + Upper limit of integration (use numpy.inf for +infinity). + args : tuple, optional + Extra arguments to pass to `func`. + full_output : int, optional + Non-zero to return a dictionary of integration information. + If non-zero, warning messages are also suppressed and the + message is appended to the output tuple. + + Returns + ------- + y : float + The integral of func from `a` to `b`. + abserr : float + An estimate of the absolute error in the result. + infodict : dict + A dictionary containing additional information. + Run scipy.integrate.quad_explain() for more information. + message + A convergence message. + explain + Appended only with 'cos' or 'sin' weighting and infinite + integration limits, it contains an explanation of the codes in + infodict['ierlst'] + + Other Parameters + ---------------- + epsabs : float or int, optional + Absolute error tolerance. + epsrel : float or int, optional + Relative error tolerance. + limit : float or int, optional + An upper bound on the number of subintervals used in the adaptive + algorithm. + points : (sequence of floats,ints), optional + A sequence of break points in the bounded integration interval + where local difficulties of the integrand may occur (e.g., + singularities, discontinuities). The sequence does not have + to be sorted. + weight : float or int, optional + String indicating weighting function. Full explanation for this + and the remaining arguments can be found below. + wvar : optional + Variables for use with weighting functions. + wopts : optional + Optional input for reusing Chebyshev moments. + maxp1 : float or int, optional + An upper bound on the number of Chebyshev moments. + limlst : int, optional + Upper bound on the number of cycles (>=3) for use with a sinusoidal + weighting and an infinite end-point. + + See Also + -------- + dblquad : double integral + tplquad : triple integral + nquad : n-dimensional integrals (uses `quad` recursively) + fixed_quad : fixed-order Gaussian quadrature + quadrature : adaptive Gaussian quadrature + odeint : ODE integrator + ode : ODE integrator + simps : integrator for sampled data + romb : integrator for sampled data + scipy.special : for coefficients and roots of orthogonal polynomials + + Notes + ----- + + **Extra information for quad() inputs and outputs** + + If full_output is non-zero, then the third output argument + (infodict) is a dictionary with entries as tabulated below. For + infinite limits, the range is transformed to (0,1) and the + optional outputs are given with respect to this transformed range. + Let M be the input argument limit and let K be infodict['last']. + The entries are: + + 'neval' + The number of function evaluations. + 'last' + The number, K, of subintervals produced in the subdivision process. + 'alist' + A rank-1 array of length M, the first K elements of which are the + left end points of the subintervals in the partition of the + integration range. + 'blist' + A rank-1 array of length M, the first K elements of which are the + right end points of the subintervals. + 'rlist' + A rank-1 array of length M, the first K elements of which are the + integral approximations on the subintervals. + 'elist' + A rank-1 array of length M, the first K elements of which are the + moduli of the absolute error estimates on the subintervals. + 'iord' + A rank-1 integer array of length M, the first L elements of + which are pointers to the error estimates over the subintervals + with ``L=K`` if ``K<=M/2+2`` or ``L=M+1-K`` otherwise. Let I be the + sequence ``infodict['iord']`` and let E be the sequence + ``infodict['elist']``. Then ``E[I[1]], ..., E[I[L]]`` forms a + decreasing sequence. + + If the input argument points is provided (i.e. it is not None), + the following additional outputs are placed in the output + dictionary. Assume the points sequence is of length P. + + 'pts' + A rank-1 array of length P+2 containing the integration limits + and the break points of the intervals in ascending order. + This is an array giving the subintervals over which integration + will occur. + 'level' + A rank-1 integer array of length M (=limit), containing the + subdivision levels of the subintervals, i.e., if (aa,bb) is a + subinterval of ``(pts[1], pts[2])`` where ``pts[0]`` and ``pts[2]`` + are adjacent elements of ``infodict['pts']``, then (aa,bb) has level l + if ``|bb-aa| = |pts[2]-pts[1]| * 2**(-l)``. + 'ndin' + A rank-1 integer array of length P+2. After the first integration + over the intervals (pts[1], pts[2]), the error estimates over some + of the intervals may have been increased artificially in order to + put their subdivision forward. This array has ones in slots + corresponding to the subintervals for which this happens. + + **Weighting the integrand** + + The input variables, *weight* and *wvar*, are used to weight the + integrand by a select list of functions. Different integration + methods are used to compute the integral with these weighting + functions. The possible values of weight and the corresponding + weighting functions are. + + ========== =================================== ===================== + ``weight`` Weight function used ``wvar`` + ========== =================================== ===================== + 'cos' cos(w*x) wvar = w + 'sin' sin(w*x) wvar = w + 'alg' g(x) = ((x-a)**alpha)*((b-x)**beta) wvar = (alpha, beta) + 'alg-loga' g(x)*log(x-a) wvar = (alpha, beta) + 'alg-logb' g(x)*log(b-x) wvar = (alpha, beta) + 'alg-log' g(x)*log(x-a)*log(b-x) wvar = (alpha, beta) + 'cauchy' 1/(x-c) wvar = c + ========== =================================== ===================== + + wvar holds the parameter w, (alpha, beta), or c depending on the weight + selected. In these expressions, a and b are the integration limits. + + For the 'cos' and 'sin' weighting, additional inputs and outputs are + available. + + For finite integration limits, the integration is performed using a + Clenshaw-Curtis method which uses Chebyshev moments. For repeated + calculations, these moments are saved in the output dictionary: + + 'momcom' + The maximum level of Chebyshev moments that have been computed, + i.e., if ``M_c`` is ``infodict['momcom']`` then the moments have been + computed for intervals of length ``|b-a| * 2**(-l)``, + ``l=0,1,...,M_c``. + 'nnlog' + A rank-1 integer array of length M(=limit), containing the + subdivision levels of the subintervals, i.e., an element of this + array is equal to l if the corresponding subinterval is + ``|b-a|* 2**(-l)``. + 'chebmo' + A rank-2 array of shape (25, maxp1) containing the computed + Chebyshev moments. These can be passed on to an integration + over the same interval by passing this array as the second + element of the sequence wopts and passing infodict['momcom'] as + the first element. + + If one of the integration limits is infinite, then a Fourier integral is + computed (assuming w neq 0). If full_output is 1 and a numerical error + is encountered, besides the error message attached to the output tuple, + a dictionary is also appended to the output tuple which translates the + error codes in the array ``info['ierlst']`` to English messages. The + output information dictionary contains the following entries instead of + 'last', 'alist', 'blist', 'rlist', and 'elist': + + 'lst' + The number of subintervals needed for the integration (call it ``K_f``). + 'rslst' + A rank-1 array of length M_f=limlst, whose first ``K_f`` elements + contain the integral contribution over the interval + ``(a+(k-1)c, a+kc)`` where ``c = (2*floor(|w|) + 1) * pi / |w|`` + and ``k=1,2,...,K_f``. + 'erlst' + A rank-1 array of length ``M_f`` containing the error estimate + corresponding to the interval in the same position in + ``infodict['rslist']``. + 'ierlst' + A rank-1 integer array of length ``M_f`` containing an error flag + corresponding to the interval in the same position in + ``infodict['rslist']``. See the explanation dictionary (last entry + in the output tuple) for the meaning of the codes. + + Examples + -------- + Calculate :math:`\\int^4_0 x^2 dx` and compare with an analytic result + + >>> from scipy import integrate + >>> x2 = lambda x: x**2 + >>> integrate.quad(x2, 0, 4) + (21.333333333333332, 2.3684757858670003e-13) + >>> print(4**3 / 3.) # analytical result + 21.3333333333 + + Calculate :math:`\\int^\\infty_0 e^{-x} dx` + + >>> invexp = lambda x: np.exp(-x) + >>> integrate.quad(invexp, 0, np.inf) + (1.0, 5.842605999138044e-11) + + >>> f = lambda x,a : a*x + >>> y, err = integrate.quad(f, 0, 1, args=(1,)) + >>> y + 0.5 + >>> y, err = integrate.quad(f, 0, 1, args=(3,)) + >>> y + 1.5 + + Calculate :math:`\\int^1_0 x^2 + y^2 dx` with ctypes, holding + y parameter as 1:: + + testlib.c => + double func(int n, double args[n]){ + return args[0]*args[0] + args[1]*args[1];} + compile to library testlib.* + + :: + + from scipy import integrate + import ctypes + lib = ctypes.CDLL('/home/.../testlib.*') #use absolute path + lib.func.restype = ctypes.c_double + lib.func.argtypes = (ctypes.c_int,ctypes.c_double) + integrate.quad(lib.func,0,1,(1)) + #(1.3333333333333333, 1.4802973661668752e-14) + print((1.0**3/3.0 + 1.0) - (0.0**3/3.0 + 0.0)) #Analytic result + # 1.3333333333333333 + + Be aware that pulse shapes and other sharp features as compared to the + size of the integration interval may not be integrated correctly using + this method. A simplified example of this limitation is integrating a + y-axis reflected step function with many zero values within the integrals + bounds. + + >>> y = lambda x: 1 if x<=0 else 0 + >>> integrate.quad(y, -1, 1) + (1.0, 1.1102230246251565e-14) + >>> integrate.quad(y, -1, 100) + (1.0000000002199108, 1.0189464580163188e-08) + >>> integrate.quad(y, -1, 10000) + (0.0, 0.0) + + """ + if not isinstance(args, tuple): + args = (args,) + + # check the limits of integration: \int_a^b, expect a < b + flip, a, b = b < a, min(a, b), max(a, b) + + if weight is None: + retval = _quad(func, a, b, args, full_output, epsabs, epsrel, limit, + points) + else: + retval = _quad_weight(func, a, b, args, full_output, epsabs, epsrel, + limlst, limit, maxp1, weight, wvar, wopts) + + if flip: + retval = (-retval[0],) + retval[1:] + + ier = retval[-1] + if ier == 0: + return retval[:-1] + + msgs = {80: "A Python error occurred possibly while calling the function.", + 1: "The maximum number of subdivisions (%d) has been achieved.\n If increasing the limit yields no improvement it is advised to analyze \n the integrand in order to determine the difficulties. If the position of a \n local difficulty can be determined (singularity, discontinuity) one will \n probably gain from splitting up the interval and calling the integrator \n on the subranges. Perhaps a special-purpose integrator should be used." % limit, + 2: "The occurrence of roundoff error is detected, which prevents \n the requested tolerance from being achieved. The error may be \n underestimated.", + 3: "Extremely bad integrand behavior occurs at some points of the\n integration interval.", + 4: "The algorithm does not converge. Roundoff error is detected\n in the extrapolation table. It is assumed that the requested tolerance\n cannot be achieved, and that the returned result (if full_output = 1) is \n the best which can be obtained.", + 5: "The integral is probably divergent, or slowly convergent.", + 6: "The input is invalid.", + 7: "Abnormal termination of the routine. The estimates for result\n and error are less reliable. It is assumed that the requested accuracy\n has not been achieved.", + 'unknown': "Unknown error."} + + if weight in ['cos','sin'] and (b == Inf or a == -Inf): + msgs[1] = "The maximum number of cycles allowed has been achieved., e.e.\n of subintervals (a+(k-1)c, a+kc) where c = (2*int(abs(omega)+1))\n *pi/abs(omega), for k = 1, 2, ..., lst. One can allow more cycles by increasing the value of limlst. Look at info['ierlst'] with full_output=1." + msgs[4] = "The extrapolation table constructed for convergence acceleration\n of the series formed by the integral contributions over the cycles, \n does not converge to within the requested accuracy. Look at \n info['ierlst'] with full_output=1." + msgs[7] = "Bad integrand behavior occurs within one or more of the cycles.\n Location and type of the difficulty involved can be determined from \n the vector info['ierlist'] obtained with full_output=1." + explain = {1: "The maximum number of subdivisions (= limit) has been \n achieved on this cycle.", + 2: "The occurrence of roundoff error is detected and prevents\n the tolerance imposed on this cycle from being achieved.", + 3: "Extremely bad integrand behavior occurs at some points of\n this cycle.", + 4: "The integral over this cycle does not converge (to within the required accuracy) due to roundoff in the extrapolation procedure invoked on this cycle. It is assumed that the result on this interval is the best which can be obtained.", + 5: "The integral over this cycle is probably divergent or slowly convergent."} + + try: + msg = msgs[ier] + except KeyError: + msg = msgs['unknown'] + + if ier in [1,2,3,4,5,7]: + if full_output: + if weight in ['cos', 'sin'] and (b == Inf or a == Inf): + return retval[:-1] + (msg, explain) + else: + return retval[:-1] + (msg,) + else: + warnings.warn(msg, IntegrationWarning, stacklevel=2) + return retval[:-1] + + elif ier == 6: # Forensic decision tree when QUADPACK throws ier=6 + if epsabs <= 0: # Small error tolerance - applies to all methods + if epsrel < max(50 * sys.float_info.epsilon, 5e-29): + msg = ("If 'errabs'<=0, 'epsrel' must be greater than both" + " 5e-29 and 50*(machine epsilon).") + elif weight in ['sin', 'cos'] and (abs(a) + abs(b) == Inf): + msg = ("Sine or cosine weighted intergals with infinite domain" + " must have 'epsabs'>0.") + + elif weight is None: + if points is None: # QAGSE/QAGIE + msg = ("Invalid 'limit' argument. There must be" + " at least one subinterval") + else: # QAGPE + if not (min(a, b) <= min(points) <= max(points) <= max(a, b)): + msg = ("All break points in 'points' must lie within the" + " integration limits.") + elif len(points) >= limit: + msg = ("Number of break points ({:d})" + " must be less than subinterval" + " limit ({:d})").format(len(points), limit) + + else: + if maxp1 < 1: + msg = "Chebyshev moment limit maxp1 must be >=1." + + elif weight in ('cos', 'sin') and abs(a+b) == Inf: # QAWFE + msg = "Cycle limit limlst must be >=3." + + elif weight.startswith('alg'): # QAWSE + if min(wvar) < -1: + msg = "wvar parameters (alpha, beta) must both be >= -1." + if b < a: + msg = "Integration limits a, b must satistfy a<b." + + elif weight == 'cauchy' and wvar in (a, b): + msg = ("Parameter 'wvar' must not equal" + " integration limits 'a' or 'b'.") + + raise ValueError(msg) + + +def _quad(func,a,b,args,full_output,epsabs,epsrel,limit,points): + infbounds = 0 + if (b != Inf and a != -Inf): + pass # standard integration + elif (b == Inf and a != -Inf): + infbounds = 1 + bound = a + elif (b == Inf and a == -Inf): + infbounds = 2 + bound = 0 # ignored + elif (b != Inf and a == -Inf): + infbounds = -1 + bound = b + else: + raise RuntimeError("Infinity comparisons don't work for you.") + + if points is None: + if infbounds == 0: + return _quadpack._qagse(func,a,b,args,full_output,epsabs,epsrel,limit) + else: + return _quadpack._qagie(func,bound,infbounds,args,full_output,epsabs,epsrel,limit) + else: + if infbounds != 0: + raise ValueError("Infinity inputs cannot be used with break points.") + else: + #Duplicates force function evaluation at sinular points + the_points = numpy.unique(points) + the_points = the_points[a < the_points] + the_points = the_points[the_points < b] + the_points = numpy.concatenate((the_points, (0., 0.))) + return _quadpack._qagpe(func,a,b,the_points,args,full_output,epsabs,epsrel,limit) + + +def _quad_weight(func,a,b,args,full_output,epsabs,epsrel,limlst,limit,maxp1,weight,wvar,wopts): + if weight not in ['cos','sin','alg','alg-loga','alg-logb','alg-log','cauchy']: + raise ValueError("%s not a recognized weighting function." % weight) + + strdict = {'cos':1,'sin':2,'alg':1,'alg-loga':2,'alg-logb':3,'alg-log':4} + + if weight in ['cos','sin']: + integr = strdict[weight] + if (b != Inf and a != -Inf): # finite limits + if wopts is None: # no precomputed chebyshev moments + return _quadpack._qawoe(func, a, b, wvar, integr, args, full_output, + epsabs, epsrel, limit, maxp1,1) + else: # precomputed chebyshev moments + momcom = wopts[0] + chebcom = wopts[1] + return _quadpack._qawoe(func, a, b, wvar, integr, args, full_output, + epsabs, epsrel, limit, maxp1, 2, momcom, chebcom) + + elif (b == Inf and a != -Inf): + return _quadpack._qawfe(func, a, wvar, integr, args, full_output, + epsabs,limlst,limit,maxp1) + elif (b != Inf and a == -Inf): # remap function and interval + if weight == 'cos': + def thefunc(x,*myargs): + y = -x + func = myargs[0] + myargs = (y,) + myargs[1:] + return func(*myargs) + else: + def thefunc(x,*myargs): + y = -x + func = myargs[0] + myargs = (y,) + myargs[1:] + return -func(*myargs) + args = (func,) + args + return _quadpack._qawfe(thefunc, -b, wvar, integr, args, + full_output, epsabs, limlst, limit, maxp1) + else: + raise ValueError("Cannot integrate with this weight from -Inf to +Inf.") + else: + if a in [-Inf,Inf] or b in [-Inf,Inf]: + raise ValueError("Cannot integrate with this weight over an infinite interval.") + + if weight.startswith('alg'): + integr = strdict[weight] + return _quadpack._qawse(func, a, b, wvar, integr, args, + full_output, epsabs, epsrel, limit) + else: # weight == 'cauchy' + return _quadpack._qawce(func, a, b, wvar, args, full_output, + epsabs, epsrel, limit) + + +def dblquad(func, a, b, gfun, hfun, args=(), epsabs=1.49e-8, epsrel=1.49e-8): + """ + Compute a double integral. + + Return the double (definite) integral of ``func(y, x)`` from ``x = a..b`` + and ``y = gfun(x)..hfun(x)``. + + Parameters + ---------- + func : callable + A Python function or method of at least two variables: y must be the + first argument and x the second argument. + a, b : float + The limits of integration in x: `a` < `b` + gfun : callable or float + The lower boundary curve in y which is a function taking a single + floating point argument (x) and returning a floating point result + or a float indicating a constant boundary curve. + hfun : callable or float + The upper boundary curve in y (same requirements as `gfun`). + args : sequence, optional + Extra arguments to pass to `func`. + epsabs : float, optional + Absolute tolerance passed directly to the inner 1-D quadrature + integration. Default is 1.49e-8. + epsrel : float, optional + Relative tolerance of the inner 1-D integrals. Default is 1.49e-8. + + Returns + ------- + y : float + The resultant integral. + abserr : float + An estimate of the error. + + See also + -------- + quad : single integral + tplquad : triple integral + nquad : N-dimensional integrals + fixed_quad : fixed-order Gaussian quadrature + quadrature : adaptive Gaussian quadrature + odeint : ODE integrator + ode : ODE integrator + simps : integrator for sampled data + romb : integrator for sampled data + scipy.special : for coefficients and roots of orthogonal polynomials + + Examples + -------- + + Compute the double integral of ``x * y**2`` over the box + ``x`` ranging from 0 to 2 and ``y`` ranging from 0 to 1. + + >>> from scipy import integrate + >>> f = lambda y, x: x*y**2 + >>> integrate.dblquad(f, 0, 2, lambda x: 0, lambda x: 1) + (0.6666666666666667, 7.401486830834377e-15) + + """ + + def temp_ranges(*args): + return [gfun(args[0]) if callable(gfun) else gfun, + hfun(args[0]) if callable(hfun) else hfun] + + return nquad(func, [temp_ranges, [a, b]], args=args, + opts={"epsabs": epsabs, "epsrel": epsrel}) + + +def tplquad(func, a, b, gfun, hfun, qfun, rfun, args=(), epsabs=1.49e-8, + epsrel=1.49e-8): + """ + Compute a triple (definite) integral. + + Return the triple integral of ``func(z, y, x)`` from ``x = a..b``, + ``y = gfun(x)..hfun(x)``, and ``z = qfun(x,y)..rfun(x,y)``. + + Parameters + ---------- + func : function + A Python function or method of at least three variables in the + order (z, y, x). + a, b : float + The limits of integration in x: `a` < `b` + gfun : function or float + The lower boundary curve in y which is a function taking a single + floating point argument (x) and returning a floating point result + or a float indicating a constant boundary curve. + hfun : function or float + The upper boundary curve in y (same requirements as `gfun`). + qfun : function or float + The lower boundary surface in z. It must be a function that takes + two floats in the order (x, y) and returns a float or a float + indicating a constant boundary surface. + rfun : function or float + The upper boundary surface in z. (Same requirements as `qfun`.) + args : tuple, optional + Extra arguments to pass to `func`. + epsabs : float, optional + Absolute tolerance passed directly to the innermost 1-D quadrature + integration. Default is 1.49e-8. + epsrel : float, optional + Relative tolerance of the innermost 1-D integrals. Default is 1.49e-8. + + Returns + ------- + y : float + The resultant integral. + abserr : float + An estimate of the error. + + See Also + -------- + quad: Adaptive quadrature using QUADPACK + quadrature: Adaptive Gaussian quadrature + fixed_quad: Fixed-order Gaussian quadrature + dblquad: Double integrals + nquad : N-dimensional integrals + romb: Integrators for sampled data + simps: Integrators for sampled data + ode: ODE integrators + odeint: ODE integrators + scipy.special: For coefficients and roots of orthogonal polynomials + + Examples + -------- + + Compute the triple integral of ``x * y * z``, over ``x`` ranging + from 1 to 2, ``y`` ranging from 2 to 3, ``z`` ranging from 0 to 1. + + >>> from scipy import integrate + >>> f = lambda z, y, x: x*y*z + >>> integrate.tplquad(f, 1, 2, lambda x: 2, lambda x: 3, + ... lambda x, y: 0, lambda x, y: 1) + (1.8750000000000002, 3.324644794257407e-14) + + + """ + # f(z, y, x) + # qfun/rfun (x, y) + # gfun/hfun(x) + # nquad will hand (y, x, t0, ...) to ranges0 + # nquad will hand (x, t0, ...) to ranges1 + # Stupid different API... + + def ranges0(*args): + return [qfun(args[1], args[0]) if callable(qfun) else qfun, + rfun(args[1], args[0]) if callable(rfun) else rfun] + + def ranges1(*args): + return [gfun(args[0]) if callable(gfun) else gfun, + hfun(args[0]) if callable(hfun) else hfun] + + ranges = [ranges0, ranges1, [a, b]] + return nquad(func, ranges, args=args, + opts={"epsabs": epsabs, "epsrel": epsrel}) + + +def nquad(func, ranges, args=None, opts=None, full_output=False): + """ + Integration over multiple variables. + + Wraps `quad` to enable integration over multiple variables. + Various options allow improved integration of discontinuous functions, as + well as the use of weighted integration, and generally finer control of the + integration process. + + Parameters + ---------- + func : {callable, scipy.LowLevelCallable} + The function to be integrated. Has arguments of ``x0, ... xn``, + ``t0, tm``, where integration is carried out over ``x0, ... xn``, which + must be floats. Function signature should be + ``func(x0, x1, ..., xn, t0, t1, ..., tm)``. Integration is carried out + in order. That is, integration over ``x0`` is the innermost integral, + and ``xn`` is the outermost. + + If the user desires improved integration performance, then `f` may + be a `scipy.LowLevelCallable` with one of the signatures:: + + double func(int n, double *xx) + double func(int n, double *xx, void *user_data) + + where ``n`` is the number of extra parameters and args is an array + of doubles of the additional parameters, the ``xx`` array contains the + coordinates. The ``user_data`` is the data contained in the + `scipy.LowLevelCallable`. + ranges : iterable object + Each element of ranges may be either a sequence of 2 numbers, or else + a callable that returns such a sequence. ``ranges[0]`` corresponds to + integration over x0, and so on. If an element of ranges is a callable, + then it will be called with all of the integration arguments available, + as well as any parametric arguments. e.g. if + ``func = f(x0, x1, x2, t0, t1)``, then ``ranges[0]`` may be defined as + either ``(a, b)`` or else as ``(a, b) = range0(x1, x2, t0, t1)``. + args : iterable object, optional + Additional arguments ``t0, ..., tn``, required by `func`, `ranges`, and + ``opts``. + opts : iterable object or dict, optional + Options to be passed to `quad`. May be empty, a dict, or + a sequence of dicts or functions that return a dict. If empty, the + default options from scipy.integrate.quad are used. If a dict, the same + options are used for all levels of integraion. If a sequence, then each + element of the sequence corresponds to a particular integration. e.g. + opts[0] corresponds to integration over x0, and so on. If a callable, + the signature must be the same as for ``ranges``. The available + options together with their default values are: + + - epsabs = 1.49e-08 + - epsrel = 1.49e-08 + - limit = 50 + - points = None + - weight = None + - wvar = None + - wopts = None + + For more information on these options, see `quad` and `quad_explain`. + + full_output : bool, optional + Partial implementation of ``full_output`` from scipy.integrate.quad. + The number of integrand function evaluations ``neval`` can be obtained + by setting ``full_output=True`` when calling nquad. + + Returns + ------- + result : float + The result of the integration. + abserr : float + The maximum of the estimates of the absolute error in the various + integration results. + out_dict : dict, optional + A dict containing additional information on the integration. + + See Also + -------- + quad : 1-dimensional numerical integration + dblquad, tplquad : double and triple integrals + fixed_quad : fixed-order Gaussian quadrature + quadrature : adaptive Gaussian quadrature + + Examples + -------- + >>> from scipy import integrate + >>> func = lambda x0,x1,x2,x3 : x0**2 + x1*x2 - x3**3 + np.sin(x0) + ( + ... 1 if (x0-.2*x3-.5-.25*x1>0) else 0) + >>> points = [[lambda x1,x2,x3 : 0.2*x3 + 0.5 + 0.25*x1], [], [], []] + >>> def opts0(*args, **kwargs): + ... return {'points':[0.2*args[2] + 0.5 + 0.25*args[0]]} + >>> integrate.nquad(func, [[0,1], [-1,1], [.13,.8], [-.15,1]], + ... opts=[opts0,{},{},{}], full_output=True) + (1.5267454070738633, 2.9437360001402324e-14, {'neval': 388962}) + + >>> scale = .1 + >>> def func2(x0, x1, x2, x3, t0, t1): + ... return x0*x1*x3**2 + np.sin(x2) + 1 + (1 if x0+t1*x1-t0>0 else 0) + >>> def lim0(x1, x2, x3, t0, t1): + ... return [scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) - 1, + ... scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) + 1] + >>> def lim1(x2, x3, t0, t1): + ... return [scale * (t0*x2 + t1*x3) - 1, + ... scale * (t0*x2 + t1*x3) + 1] + >>> def lim2(x3, t0, t1): + ... return [scale * (x3 + t0**2*t1**3) - 1, + ... scale * (x3 + t0**2*t1**3) + 1] + >>> def lim3(t0, t1): + ... return [scale * (t0+t1) - 1, scale * (t0+t1) + 1] + >>> def opts0(x1, x2, x3, t0, t1): + ... return {'points' : [t0 - t1*x1]} + >>> def opts1(x2, x3, t0, t1): + ... return {} + >>> def opts2(x3, t0, t1): + ... return {} + >>> def opts3(t0, t1): + ... return {} + >>> integrate.nquad(func2, [lim0, lim1, lim2, lim3], args=(0,0), + ... opts=[opts0, opts1, opts2, opts3]) + (25.066666666666666, 2.7829590483937256e-13) + + """ + depth = len(ranges) + ranges = [rng if callable(rng) else _RangeFunc(rng) for rng in ranges] + if args is None: + args = () + if opts is None: + opts = [dict([])] * depth + + if isinstance(opts, dict): + opts = [_OptFunc(opts)] * depth + else: + opts = [opt if callable(opt) else _OptFunc(opt) for opt in opts] + return _NQuad(func, ranges, opts, full_output).integrate(*args) + + +class _RangeFunc(object): + def __init__(self, range_): + self.range_ = range_ + + def __call__(self, *args): + """Return stored value. + + *args needed because range_ can be float or func, and is called with + variable number of parameters. + """ + return self.range_ + + +class _OptFunc(object): + def __init__(self, opt): + self.opt = opt + + def __call__(self, *args): + """Return stored dict.""" + return self.opt + + +class _NQuad(object): + def __init__(self, func, ranges, opts, full_output): + self.abserr = 0 + self.func = func + self.ranges = ranges + self.opts = opts + self.maxdepth = len(ranges) + self.full_output = full_output + if self.full_output: + self.out_dict = {'neval': 0} + + def integrate(self, *args, **kwargs): + depth = kwargs.pop('depth', 0) + if kwargs: + raise ValueError('unexpected kwargs') + + # Get the integration range and options for this depth. + ind = -(depth + 1) + fn_range = self.ranges[ind] + low, high = fn_range(*args) + fn_opt = self.opts[ind] + opt = dict(fn_opt(*args)) + + if 'points' in opt: + opt['points'] = [x for x in opt['points'] if low <= x <= high] + if depth + 1 == self.maxdepth: + f = self.func + else: + f = partial(self.integrate, depth=depth+1) + quad_r = quad(f, low, high, args=args, full_output=self.full_output, + **opt) + value = quad_r[0] + abserr = quad_r[1] + if self.full_output: + infodict = quad_r[2] + # The 'neval' parameter in full_output returns the total + # number of times the integrand function was evaluated. + # Therefore, only the innermost integration loop counts. + if depth + 1 == self.maxdepth: + self.out_dict['neval'] += infodict['neval'] + self.abserr = max(self.abserr, abserr) + if depth > 0: + return value + else: + # Final result of n-D integration with error + if self.full_output: + return value, self.abserr, self.out_dict + else: + return value, self.abserr diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/quadpack.pyc b/project/venv/lib/python2.7/site-packages/scipy/integrate/quadpack.pyc new file mode 100644 index 0000000..ae66f4d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/integrate/quadpack.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/quadrature.py b/project/venv/lib/python2.7/site-packages/scipy/integrate/quadrature.py new file mode 100644 index 0000000..5e5609a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/integrate/quadrature.py @@ -0,0 +1,957 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +import math +import warnings + +# trapz is a public function for scipy.integrate, +# even though it's actually a numpy function. +from numpy import trapz +from scipy.special import roots_legendre +from scipy.special import gammaln +from scipy._lib.six import xrange + +__all__ = ['fixed_quad', 'quadrature', 'romberg', 'trapz', 'simps', 'romb', + 'cumtrapz', 'newton_cotes'] + + +class AccuracyWarning(Warning): + pass + + +def _cached_roots_legendre(n): + """ + Cache roots_legendre results to speed up calls of the fixed_quad + function. + """ + if n in _cached_roots_legendre.cache: + return _cached_roots_legendre.cache[n] + + _cached_roots_legendre.cache[n] = roots_legendre(n) + return _cached_roots_legendre.cache[n] + + +_cached_roots_legendre.cache = dict() + + +def fixed_quad(func, a, b, args=(), n=5): + """ + Compute a definite integral using fixed-order Gaussian quadrature. + + Integrate `func` from `a` to `b` using Gaussian quadrature of + order `n`. + + Parameters + ---------- + func : callable + A Python function or method to integrate (must accept vector inputs). + If integrating a vector-valued function, the returned array must have + shape ``(..., len(x))``. + a : float + Lower limit of integration. + b : float + Upper limit of integration. + args : tuple, optional + Extra arguments to pass to function, if any. + n : int, optional + Order of quadrature integration. Default is 5. + + Returns + ------- + val : float + Gaussian quadrature approximation to the integral + none : None + Statically returned value of None + + + See Also + -------- + quad : adaptive quadrature using QUADPACK + dblquad : double integrals + tplquad : triple integrals + romberg : adaptive Romberg quadrature + quadrature : adaptive Gaussian quadrature + romb : integrators for sampled data + simps : integrators for sampled data + cumtrapz : cumulative integration for sampled data + ode : ODE integrator + odeint : ODE integrator + + Examples + -------- + >>> from scipy import integrate + >>> f = lambda x: x**8 + >>> integrate.fixed_quad(f, 0.0, 1.0, n=4) + (0.1110884353741496, None) + >>> integrate.fixed_quad(f, 0.0, 1.0, n=5) + (0.11111111111111102, None) + >>> print(1/9.0) # analytical result + 0.1111111111111111 + + >>> integrate.fixed_quad(np.cos, 0.0, np.pi/2, n=4) + (0.9999999771971152, None) + >>> integrate.fixed_quad(np.cos, 0.0, np.pi/2, n=5) + (1.000000000039565, None) + >>> np.sin(np.pi/2)-np.sin(0) # analytical result + 1.0 + + """ + x, w = _cached_roots_legendre(n) + x = np.real(x) + if np.isinf(a) or np.isinf(b): + raise ValueError("Gaussian quadrature is only available for " + "finite limits.") + y = (b-a)*(x+1)/2.0 + a + return (b-a)/2.0 * np.sum(w*func(y, *args), axis=-1), None + + +def vectorize1(func, args=(), vec_func=False): + """Vectorize the call to a function. + + This is an internal utility function used by `romberg` and + `quadrature` to create a vectorized version of a function. + + If `vec_func` is True, the function `func` is assumed to take vector + arguments. + + Parameters + ---------- + func : callable + User defined function. + args : tuple, optional + Extra arguments for the function. + vec_func : bool, optional + True if the function func takes vector arguments. + + Returns + ------- + vfunc : callable + A function that will take a vector argument and return the + result. + + """ + if vec_func: + def vfunc(x): + return func(x, *args) + else: + def vfunc(x): + if np.isscalar(x): + return func(x, *args) + x = np.asarray(x) + # call with first point to get output type + y0 = func(x[0], *args) + n = len(x) + dtype = getattr(y0, 'dtype', type(y0)) + output = np.empty((n,), dtype=dtype) + output[0] = y0 + for i in xrange(1, n): + output[i] = func(x[i], *args) + return output + return vfunc + + +def quadrature(func, a, b, args=(), tol=1.49e-8, rtol=1.49e-8, maxiter=50, + vec_func=True, miniter=1): + """ + Compute a definite integral using fixed-tolerance Gaussian quadrature. + + Integrate `func` from `a` to `b` using Gaussian quadrature + with absolute tolerance `tol`. + + Parameters + ---------- + func : function + A Python function or method to integrate. + a : float + Lower limit of integration. + b : float + Upper limit of integration. + args : tuple, optional + Extra arguments to pass to function. + tol, rtol : float, optional + Iteration stops when error between last two iterates is less than + `tol` OR the relative change is less than `rtol`. + maxiter : int, optional + Maximum order of Gaussian quadrature. + vec_func : bool, optional + True or False if func handles arrays as arguments (is + a "vector" function). Default is True. + miniter : int, optional + Minimum order of Gaussian quadrature. + + Returns + ------- + val : float + Gaussian quadrature approximation (within tolerance) to integral. + err : float + Difference between last two estimates of the integral. + + See also + -------- + romberg: adaptive Romberg quadrature + fixed_quad: fixed-order Gaussian quadrature + quad: adaptive quadrature using QUADPACK + dblquad: double integrals + tplquad: triple integrals + romb: integrator for sampled data + simps: integrator for sampled data + cumtrapz: cumulative integration for sampled data + ode: ODE integrator + odeint: ODE integrator + + Examples + -------- + >>> from scipy import integrate + >>> f = lambda x: x**8 + >>> integrate.quadrature(f, 0.0, 1.0) + (0.11111111111111106, 4.163336342344337e-17) + >>> print(1/9.0) # analytical result + 0.1111111111111111 + + >>> integrate.quadrature(np.cos, 0.0, np.pi/2) + (0.9999999999999536, 3.9611425250996035e-11) + >>> np.sin(np.pi/2)-np.sin(0) # analytical result + 1.0 + + """ + if not isinstance(args, tuple): + args = (args,) + vfunc = vectorize1(func, args, vec_func=vec_func) + val = np.inf + err = np.inf + maxiter = max(miniter+1, maxiter) + for n in xrange(miniter, maxiter+1): + newval = fixed_quad(vfunc, a, b, (), n)[0] + err = abs(newval-val) + val = newval + + if err < tol or err < rtol*abs(val): + break + else: + warnings.warn( + "maxiter (%d) exceeded. Latest difference = %e" % (maxiter, err), + AccuracyWarning) + return val, err + + +def tupleset(t, i, value): + l = list(t) + l[i] = value + return tuple(l) + + +def cumtrapz(y, x=None, dx=1.0, axis=-1, initial=None): + """ + Cumulatively integrate y(x) using the composite trapezoidal rule. + + Parameters + ---------- + y : array_like + Values to integrate. + x : array_like, optional + The coordinate to integrate along. If None (default), use spacing `dx` + between consecutive elements in `y`. + dx : float, optional + Spacing between elements of `y`. Only used if `x` is None. + axis : int, optional + Specifies the axis to cumulate. Default is -1 (last axis). + initial : scalar, optional + If given, insert this value at the beginning of the returned result. + Typically this value should be 0. Default is None, which means no + value at ``x[0]`` is returned and `res` has one element less than `y` + along the axis of integration. + + Returns + ------- + res : ndarray + The result of cumulative integration of `y` along `axis`. + If `initial` is None, the shape is such that the axis of integration + has one less value than `y`. If `initial` is given, the shape is equal + to that of `y`. + + See Also + -------- + numpy.cumsum, numpy.cumprod + quad: adaptive quadrature using QUADPACK + romberg: adaptive Romberg quadrature + quadrature: adaptive Gaussian quadrature + fixed_quad: fixed-order Gaussian quadrature + dblquad: double integrals + tplquad: triple integrals + romb: integrators for sampled data + ode: ODE integrators + odeint: ODE integrators + + Examples + -------- + >>> from scipy import integrate + >>> import matplotlib.pyplot as plt + + >>> x = np.linspace(-2, 2, num=20) + >>> y = x + >>> y_int = integrate.cumtrapz(y, x, initial=0) + >>> plt.plot(x, y_int, 'ro', x, y[0] + 0.5 * x**2, 'b-') + >>> plt.show() + + """ + y = np.asarray(y) + if x is None: + d = dx + else: + x = np.asarray(x) + if x.ndim == 1: + d = np.diff(x) + # reshape to correct shape + shape = [1] * y.ndim + shape[axis] = -1 + d = d.reshape(shape) + elif len(x.shape) != len(y.shape): + raise ValueError("If given, shape of x must be 1-d or the " + "same as y.") + else: + d = np.diff(x, axis=axis) + + if d.shape[axis] != y.shape[axis] - 1: + raise ValueError("If given, length of x along axis must be the " + "same as y.") + + nd = len(y.shape) + slice1 = tupleset((slice(None),)*nd, axis, slice(1, None)) + slice2 = tupleset((slice(None),)*nd, axis, slice(None, -1)) + res = np.cumsum(d * (y[slice1] + y[slice2]) / 2.0, axis=axis) + + if initial is not None: + if not np.isscalar(initial): + raise ValueError("`initial` parameter should be a scalar.") + + shape = list(res.shape) + shape[axis] = 1 + res = np.concatenate([np.full(shape, initial, dtype=res.dtype), res], + axis=axis) + + return res + + +def _basic_simps(y, start, stop, x, dx, axis): + nd = len(y.shape) + if start is None: + start = 0 + step = 2 + slice_all = (slice(None),)*nd + slice0 = tupleset(slice_all, axis, slice(start, stop, step)) + slice1 = tupleset(slice_all, axis, slice(start+1, stop+1, step)) + slice2 = tupleset(slice_all, axis, slice(start+2, stop+2, step)) + + if x is None: # Even spaced Simpson's rule. + result = np.sum(dx/3.0 * (y[slice0]+4*y[slice1]+y[slice2]), + axis=axis) + else: + # Account for possibly different spacings. + # Simpson's rule changes a bit. + h = np.diff(x, axis=axis) + sl0 = tupleset(slice_all, axis, slice(start, stop, step)) + sl1 = tupleset(slice_all, axis, slice(start+1, stop+1, step)) + h0 = h[sl0] + h1 = h[sl1] + hsum = h0 + h1 + hprod = h0 * h1 + h0divh1 = h0 / h1 + tmp = hsum/6.0 * (y[slice0]*(2-1.0/h0divh1) + + y[slice1]*hsum*hsum/hprod + + y[slice2]*(2-h0divh1)) + result = np.sum(tmp, axis=axis) + return result + + +def simps(y, x=None, dx=1, axis=-1, even='avg'): + """ + Integrate y(x) using samples along the given axis and the composite + Simpson's rule. If x is None, spacing of dx is assumed. + + If there are an even number of samples, N, then there are an odd + number of intervals (N-1), but Simpson's rule requires an even number + of intervals. The parameter 'even' controls how this is handled. + + Parameters + ---------- + y : array_like + Array to be integrated. + x : array_like, optional + If given, the points at which `y` is sampled. + dx : int, optional + Spacing of integration points along axis of `y`. Only used when + `x` is None. Default is 1. + axis : int, optional + Axis along which to integrate. Default is the last axis. + even : str {'avg', 'first', 'last'}, optional + 'avg' : Average two results:1) use the first N-2 intervals with + a trapezoidal rule on the last interval and 2) use the last + N-2 intervals with a trapezoidal rule on the first interval. + + 'first' : Use Simpson's rule for the first N-2 intervals with + a trapezoidal rule on the last interval. + + 'last' : Use Simpson's rule for the last N-2 intervals with a + trapezoidal rule on the first interval. + + See Also + -------- + quad: adaptive quadrature using QUADPACK + romberg: adaptive Romberg quadrature + quadrature: adaptive Gaussian quadrature + fixed_quad: fixed-order Gaussian quadrature + dblquad: double integrals + tplquad: triple integrals + romb: integrators for sampled data + cumtrapz: cumulative integration for sampled data + ode: ODE integrators + odeint: ODE integrators + + Notes + ----- + For an odd number of samples that are equally spaced the result is + exact if the function is a polynomial of order 3 or less. If + the samples are not equally spaced, then the result is exact only + if the function is a polynomial of order 2 or less. + + Examples + -------- + >>> from scipy import integrate + >>> x = np.arange(0, 10) + >>> y = np.arange(0, 10) + + >>> integrate.simps(y, x) + 40.5 + + >>> y = np.power(x, 3) + >>> integrate.simps(y, x) + 1642.5 + >>> integrate.quad(lambda x: x**3, 0, 9)[0] + 1640.25 + + >>> integrate.simps(y, x, even='first') + 1644.5 + + """ + y = np.asarray(y) + nd = len(y.shape) + N = y.shape[axis] + last_dx = dx + first_dx = dx + returnshape = 0 + if x is not None: + x = np.asarray(x) + if len(x.shape) == 1: + shapex = [1] * nd + shapex[axis] = x.shape[0] + saveshape = x.shape + returnshape = 1 + x = x.reshape(tuple(shapex)) + elif len(x.shape) != len(y.shape): + raise ValueError("If given, shape of x must be 1-d or the " + "same as y.") + if x.shape[axis] != N: + raise ValueError("If given, length of x along axis must be the " + "same as y.") + if N % 2 == 0: + val = 0.0 + result = 0.0 + slice1 = (slice(None),)*nd + slice2 = (slice(None),)*nd + if even not in ['avg', 'last', 'first']: + raise ValueError("Parameter 'even' must be " + "'avg', 'last', or 'first'.") + # Compute using Simpson's rule on first intervals + if even in ['avg', 'first']: + slice1 = tupleset(slice1, axis, -1) + slice2 = tupleset(slice2, axis, -2) + if x is not None: + last_dx = x[slice1] - x[slice2] + val += 0.5*last_dx*(y[slice1]+y[slice2]) + result = _basic_simps(y, 0, N-3, x, dx, axis) + # Compute using Simpson's rule on last set of intervals + if even in ['avg', 'last']: + slice1 = tupleset(slice1, axis, 0) + slice2 = tupleset(slice2, axis, 1) + if x is not None: + first_dx = x[tuple(slice2)] - x[tuple(slice1)] + val += 0.5*first_dx*(y[slice2]+y[slice1]) + result += _basic_simps(y, 1, N-2, x, dx, axis) + if even == 'avg': + val /= 2.0 + result /= 2.0 + result = result + val + else: + result = _basic_simps(y, 0, N-2, x, dx, axis) + if returnshape: + x = x.reshape(saveshape) + return result + + +def romb(y, dx=1.0, axis=-1, show=False): + """ + Romberg integration using samples of a function. + + Parameters + ---------- + y : array_like + A vector of ``2**k + 1`` equally-spaced samples of a function. + dx : float, optional + The sample spacing. Default is 1. + axis : int, optional + The axis along which to integrate. Default is -1 (last axis). + show : bool, optional + When `y` is a single 1-D array, then if this argument is True + print the table showing Richardson extrapolation from the + samples. Default is False. + + Returns + ------- + romb : ndarray + The integrated result for `axis`. + + See also + -------- + quad : adaptive quadrature using QUADPACK + romberg : adaptive Romberg quadrature + quadrature : adaptive Gaussian quadrature + fixed_quad : fixed-order Gaussian quadrature + dblquad : double integrals + tplquad : triple integrals + simps : integrators for sampled data + cumtrapz : cumulative integration for sampled data + ode : ODE integrators + odeint : ODE integrators + + Examples + -------- + >>> from scipy import integrate + >>> x = np.arange(10, 14.25, 0.25) + >>> y = np.arange(3, 12) + + >>> integrate.romb(y) + 56.0 + + >>> y = np.sin(np.power(x, 2.5)) + >>> integrate.romb(y) + -0.742561336672229 + + >>> integrate.romb(y, show=True) + Richardson Extrapolation Table for Romberg Integration + ==================================================================== + -0.81576 + 4.63862 6.45674 + -1.10581 -3.02062 -3.65245 + -2.57379 -3.06311 -3.06595 -3.05664 + -1.34093 -0.92997 -0.78776 -0.75160 -0.74256 + ==================================================================== + -0.742561336672229 + """ + y = np.asarray(y) + nd = len(y.shape) + Nsamps = y.shape[axis] + Ninterv = Nsamps-1 + n = 1 + k = 0 + while n < Ninterv: + n <<= 1 + k += 1 + if n != Ninterv: + raise ValueError("Number of samples must be one plus a " + "non-negative power of 2.") + + R = {} + slice_all = (slice(None),) * nd + slice0 = tupleset(slice_all, axis, 0) + slicem1 = tupleset(slice_all, axis, -1) + h = Ninterv * np.asarray(dx, dtype=float) + R[(0, 0)] = (y[slice0] + y[slicem1])/2.0*h + slice_R = slice_all + start = stop = step = Ninterv + for i in xrange(1, k+1): + start >>= 1 + slice_R = tupleset(slice_R, axis, slice(start, stop, step)) + step >>= 1 + R[(i, 0)] = 0.5*(R[(i-1, 0)] + h*y[slice_R].sum(axis=axis)) + for j in xrange(1, i+1): + prev = R[(i, j-1)] + R[(i, j)] = prev + (prev-R[(i-1, j-1)]) / ((1 << (2*j))-1) + h /= 2.0 + + if show: + if not np.isscalar(R[(0, 0)]): + print("*** Printing table only supported for integrals" + + " of a single data set.") + else: + try: + precis = show[0] + except (TypeError, IndexError): + precis = 5 + try: + width = show[1] + except (TypeError, IndexError): + width = 8 + formstr = "%%%d.%df" % (width, precis) + + title = "Richardson Extrapolation Table for Romberg Integration" + print("", title.center(68), "=" * 68, sep="\n", end="\n") + for i in xrange(k+1): + for j in xrange(i+1): + print(formstr % R[(i, j)], end=" ") + print() + print("=" * 68) + print() + + return R[(k, k)] + +# Romberg quadratures for numeric integration. +# +# Written by Scott M. Ransom <ransom@cfa.harvard.edu> +# last revision: 14 Nov 98 +# +# Cosmetic changes by Konrad Hinsen <hinsen@cnrs-orleans.fr> +# last revision: 1999-7-21 +# +# Adapted to scipy by Travis Oliphant <oliphant.travis@ieee.org> +# last revision: Dec 2001 + + +def _difftrap(function, interval, numtraps): + """ + Perform part of the trapezoidal rule to integrate a function. + Assume that we had called difftrap with all lower powers-of-2 + starting with 1. Calling difftrap only returns the summation + of the new ordinates. It does _not_ multiply by the width + of the trapezoids. This must be performed by the caller. + 'function' is the function to evaluate (must accept vector arguments). + 'interval' is a sequence with lower and upper limits + of integration. + 'numtraps' is the number of trapezoids to use (must be a + power-of-2). + """ + if numtraps <= 0: + raise ValueError("numtraps must be > 0 in difftrap().") + elif numtraps == 1: + return 0.5*(function(interval[0])+function(interval[1])) + else: + numtosum = numtraps/2 + h = float(interval[1]-interval[0])/numtosum + lox = interval[0] + 0.5 * h + points = lox + h * np.arange(numtosum) + s = np.sum(function(points), axis=0) + return s + + +def _romberg_diff(b, c, k): + """ + Compute the differences for the Romberg quadrature corrections. + See Forman Acton's "Real Computing Made Real," p 143. + """ + tmp = 4.0**k + return (tmp * c - b)/(tmp - 1.0) + + +def _printresmat(function, interval, resmat): + # Print the Romberg result matrix. + i = j = 0 + print('Romberg integration of', repr(function), end=' ') + print('from', interval) + print('') + print('%6s %9s %9s' % ('Steps', 'StepSize', 'Results')) + for i in xrange(len(resmat)): + print('%6d %9f' % (2**i, (interval[1]-interval[0])/(2.**i)), end=' ') + for j in xrange(i+1): + print('%9f' % (resmat[i][j]), end=' ') + print('') + print('') + print('The final result is', resmat[i][j], end=' ') + print('after', 2**(len(resmat)-1)+1, 'function evaluations.') + + +def romberg(function, a, b, args=(), tol=1.48e-8, rtol=1.48e-8, show=False, + divmax=10, vec_func=False): + """ + Romberg integration of a callable function or method. + + Returns the integral of `function` (a function of one variable) + over the interval (`a`, `b`). + + If `show` is 1, the triangular array of the intermediate results + will be printed. If `vec_func` is True (default is False), then + `function` is assumed to support vector arguments. + + Parameters + ---------- + function : callable + Function to be integrated. + a : float + Lower limit of integration. + b : float + Upper limit of integration. + + Returns + ------- + results : float + Result of the integration. + + Other Parameters + ---------------- + args : tuple, optional + Extra arguments to pass to function. Each element of `args` will + be passed as a single argument to `func`. Default is to pass no + extra arguments. + tol, rtol : float, optional + The desired absolute and relative tolerances. Defaults are 1.48e-8. + show : bool, optional + Whether to print the results. Default is False. + divmax : int, optional + Maximum order of extrapolation. Default is 10. + vec_func : bool, optional + Whether `func` handles arrays as arguments (i.e whether it is a + "vector" function). Default is False. + + See Also + -------- + fixed_quad : Fixed-order Gaussian quadrature. + quad : Adaptive quadrature using QUADPACK. + dblquad : Double integrals. + tplquad : Triple integrals. + romb : Integrators for sampled data. + simps : Integrators for sampled data. + cumtrapz : Cumulative integration for sampled data. + ode : ODE integrator. + odeint : ODE integrator. + + References + ---------- + .. [1] 'Romberg's method' https://en.wikipedia.org/wiki/Romberg%27s_method + + Examples + -------- + Integrate a gaussian from 0 to 1 and compare to the error function. + + >>> from scipy import integrate + >>> from scipy.special import erf + >>> gaussian = lambda x: 1/np.sqrt(np.pi) * np.exp(-x**2) + >>> result = integrate.romberg(gaussian, 0, 1, show=True) + Romberg integration of <function vfunc at ...> from [0, 1] + + :: + + Steps StepSize Results + 1 1.000000 0.385872 + 2 0.500000 0.412631 0.421551 + 4 0.250000 0.419184 0.421368 0.421356 + 8 0.125000 0.420810 0.421352 0.421350 0.421350 + 16 0.062500 0.421215 0.421350 0.421350 0.421350 0.421350 + 32 0.031250 0.421317 0.421350 0.421350 0.421350 0.421350 0.421350 + + The final result is 0.421350396475 after 33 function evaluations. + + >>> print("%g %g" % (2*result, erf(1))) + 0.842701 0.842701 + + """ + if np.isinf(a) or np.isinf(b): + raise ValueError("Romberg integration only available " + "for finite limits.") + vfunc = vectorize1(function, args, vec_func=vec_func) + n = 1 + interval = [a, b] + intrange = b - a + ordsum = _difftrap(vfunc, interval, n) + result = intrange * ordsum + resmat = [[result]] + err = np.inf + last_row = resmat[0] + for i in xrange(1, divmax+1): + n *= 2 + ordsum += _difftrap(vfunc, interval, n) + row = [intrange * ordsum / n] + for k in xrange(i): + row.append(_romberg_diff(last_row[k], row[k], k+1)) + result = row[i] + lastresult = last_row[i-1] + if show: + resmat.append(row) + err = abs(result - lastresult) + if err < tol or err < rtol * abs(result): + break + last_row = row + else: + warnings.warn( + "divmax (%d) exceeded. Latest difference = %e" % (divmax, err), + AccuracyWarning) + + if show: + _printresmat(vfunc, interval, resmat) + return result + + +# Coefficients for Netwon-Cotes quadrature +# +# These are the points being used +# to construct the local interpolating polynomial +# a are the weights for Newton-Cotes integration +# B is the error coefficient. +# error in these coefficients grows as N gets larger. +# or as samples are closer and closer together + +# You can use maxima to find these rational coefficients +# for equally spaced data using the commands +# a(i,N) := integrate(product(r-j,j,0,i-1) * product(r-j,j,i+1,N),r,0,N) / ((N-i)! * i!) * (-1)^(N-i); +# Be(N) := N^(N+2)/(N+2)! * (N/(N+3) - sum((i/N)^(N+2)*a(i,N),i,0,N)); +# Bo(N) := N^(N+1)/(N+1)! * (N/(N+2) - sum((i/N)^(N+1)*a(i,N),i,0,N)); +# B(N) := (if (mod(N,2)=0) then Be(N) else Bo(N)); +# +# pre-computed for equally-spaced weights +# +# num_a, den_a, int_a, num_B, den_B = _builtincoeffs[N] +# +# a = num_a*array(int_a)/den_a +# B = num_B*1.0 / den_B +# +# integrate(f(x),x,x_0,x_N) = dx*sum(a*f(x_i)) + B*(dx)^(2k+3) f^(2k+2)(x*) +# where k = N // 2 +# +_builtincoeffs = { + 1: (1,2,[1,1],-1,12), + 2: (1,3,[1,4,1],-1,90), + 3: (3,8,[1,3,3,1],-3,80), + 4: (2,45,[7,32,12,32,7],-8,945), + 5: (5,288,[19,75,50,50,75,19],-275,12096), + 6: (1,140,[41,216,27,272,27,216,41],-9,1400), + 7: (7,17280,[751,3577,1323,2989,2989,1323,3577,751],-8183,518400), + 8: (4,14175,[989,5888,-928,10496,-4540,10496,-928,5888,989], + -2368,467775), + 9: (9,89600,[2857,15741,1080,19344,5778,5778,19344,1080, + 15741,2857], -4671, 394240), + 10: (5,299376,[16067,106300,-48525,272400,-260550,427368, + -260550,272400,-48525,106300,16067], + -673175, 163459296), + 11: (11,87091200,[2171465,13486539,-3237113, 25226685,-9595542, + 15493566,15493566,-9595542,25226685,-3237113, + 13486539,2171465], -2224234463, 237758976000), + 12: (1, 5255250, [1364651,9903168,-7587864,35725120,-51491295, + 87516288,-87797136,87516288,-51491295,35725120, + -7587864,9903168,1364651], -3012, 875875), + 13: (13, 402361344000,[8181904909, 56280729661, -31268252574, + 156074417954,-151659573325,206683437987, + -43111992612,-43111992612,206683437987, + -151659573325,156074417954,-31268252574, + 56280729661,8181904909], -2639651053, + 344881152000), + 14: (7, 2501928000, [90241897,710986864,-770720657,3501442784, + -6625093363,12630121616,-16802270373,19534438464, + -16802270373,12630121616,-6625093363,3501442784, + -770720657,710986864,90241897], -3740727473, + 1275983280000) + } + + +def newton_cotes(rn, equal=0): + r""" + Return weights and error coefficient for Newton-Cotes integration. + + Suppose we have (N+1) samples of f at the positions + x_0, x_1, ..., x_N. Then an N-point Newton-Cotes formula for the + integral between x_0 and x_N is: + + :math:`\int_{x_0}^{x_N} f(x)dx = \Delta x \sum_{i=0}^{N} a_i f(x_i) + + B_N (\Delta x)^{N+2} f^{N+1} (\xi)` + + where :math:`\xi \in [x_0,x_N]` + and :math:`\Delta x = \frac{x_N-x_0}{N}` is the average samples spacing. + + If the samples are equally-spaced and N is even, then the error + term is :math:`B_N (\Delta x)^{N+3} f^{N+2}(\xi)`. + + Parameters + ---------- + rn : int + The integer order for equally-spaced data or the relative positions of + the samples with the first sample at 0 and the last at N, where N+1 is + the length of `rn`. N is the order of the Newton-Cotes integration. + equal : int, optional + Set to 1 to enforce equally spaced data. + + Returns + ------- + an : ndarray + 1-D array of weights to apply to the function at the provided sample + positions. + B : float + Error coefficient. + + Examples + -------- + Compute the integral of sin(x) in [0, :math:`\pi`]: + + >>> from scipy.integrate import newton_cotes + >>> def f(x): + ... return np.sin(x) + >>> a = 0 + >>> b = np.pi + >>> exact = 2 + >>> for N in [2, 4, 6, 8, 10]: + ... x = np.linspace(a, b, N + 1) + ... an, B = newton_cotes(N, 1) + ... dx = (b - a) / N + ... quad = dx * np.sum(an * f(x)) + ... error = abs(quad - exact) + ... print('{:2d} {:10.9f} {:.5e}'.format(N, quad, error)) + ... + 2 2.094395102 9.43951e-02 + 4 1.998570732 1.42927e-03 + 6 2.000017814 1.78136e-05 + 8 1.999999835 1.64725e-07 + 10 2.000000001 1.14677e-09 + + Notes + ----- + Normally, the Newton-Cotes rules are used on smaller integration + regions and a composite rule is used to return the total integral. + + """ + try: + N = len(rn)-1 + if equal: + rn = np.arange(N+1) + elif np.all(np.diff(rn) == 1): + equal = 1 + except Exception: + N = rn + rn = np.arange(N+1) + equal = 1 + + if equal and N in _builtincoeffs: + na, da, vi, nb, db = _builtincoeffs[N] + an = na * np.array(vi, dtype=float) / da + return an, float(nb)/db + + if (rn[0] != 0) or (rn[-1] != N): + raise ValueError("The sample positions must start at 0" + " and end at N") + yi = rn / float(N) + ti = 2 * yi - 1 + nvec = np.arange(N+1) + C = ti ** nvec[:, np.newaxis] + Cinv = np.linalg.inv(C) + # improve precision of result + for i in range(2): + Cinv = 2*Cinv - Cinv.dot(C).dot(Cinv) + vec = 2.0 / (nvec[::2]+1) + ai = Cinv[:, ::2].dot(vec) * (N / 2.) + + if (N % 2 == 0) and equal: + BN = N/(N+3.) + power = N+2 + else: + BN = N/(N+2.) + power = N+1 + + BN = BN - np.dot(yi**power, ai) + p1 = power+1 + fac = power*math.log(N) - gammaln(p1) + fac = math.exp(fac) + return ai, BN*fac diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/quadrature.pyc b/project/venv/lib/python2.7/site-packages/scipy/integrate/quadrature.pyc new file mode 100644 index 0000000..bdaef68 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/integrate/quadrature.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/setup.py b/project/venv/lib/python2.7/site-packages/scipy/integrate/setup.py new file mode 100644 index 0000000..b0b530b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/integrate/setup.py @@ -0,0 +1,102 @@ +from __future__ import division, print_function, absolute_import + +import os +from os.path import join + +from scipy._build_utils import numpy_nodepr_api + + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + from scipy._build_utils.system_info import get_info + config = Configuration('integrate', parent_package, top_path) + + # Get a local copy of lapack_opt_info + lapack_opt = dict(get_info('lapack_opt',notfound_action=2)) + # Pop off the libraries list so it can be combined with + # additional required libraries + lapack_libs = lapack_opt.pop('libraries', []) + + mach_src = [join('mach','*.f')] + quadpack_src = [join('quadpack', '*.f')] + lsoda_src = [join('odepack', fn) for fn in [ + 'blkdta000.f', 'bnorm.f', 'cfode.f', + 'ewset.f', 'fnorm.f', 'intdy.f', + 'lsoda.f', 'prja.f', 'solsy.f', 'srcma.f', + 'stoda.f', 'vmnorm.f', 'xerrwv.f', 'xsetf.f', + 'xsetun.f']] + vode_src = [join('odepack', 'vode.f'), join('odepack', 'zvode.f')] + dop_src = [join('dop','*.f')] + quadpack_test_src = [join('tests','_test_multivariate.c')] + odeint_banded_test_src = [join('tests', 'banded5x5.f')] + + config.add_library('mach', sources=mach_src, + config_fc={'noopt':(__file__,1)}) + config.add_library('quadpack', sources=quadpack_src) + config.add_library('lsoda', sources=lsoda_src) + config.add_library('vode', sources=vode_src) + config.add_library('dop', sources=dop_src) + + # Extensions + # quadpack: + include_dirs = [join(os.path.dirname(__file__), '..', '_lib', 'src')] + if 'include_dirs' in lapack_opt: + lapack_opt = dict(lapack_opt) + include_dirs.extend(lapack_opt.pop('include_dirs')) + + config.add_extension('_quadpack', + sources=['_quadpackmodule.c'], + libraries=['quadpack', 'mach'] + lapack_libs, + depends=(['__quadpack.h'] + + quadpack_src + mach_src), + include_dirs=include_dirs, + **lapack_opt) + + # odepack/lsoda-odeint + odepack_opts = lapack_opt.copy() + odepack_opts.update(numpy_nodepr_api) + config.add_extension('_odepack', + sources=['_odepackmodule.c'], + libraries=['lsoda', 'mach'] + lapack_libs, + depends=(lsoda_src + mach_src), + **odepack_opts) + + # vode + config.add_extension('vode', + sources=['vode.pyf'], + libraries=['vode'] + lapack_libs, + depends=vode_src, + **lapack_opt) + + # lsoda + config.add_extension('lsoda', + sources=['lsoda.pyf'], + libraries=['lsoda', 'mach'] + lapack_libs, + depends=(lsoda_src + mach_src), + **lapack_opt) + + # dop + config.add_extension('_dop', + sources=['dop.pyf'], + libraries=['dop'], + depends=dop_src) + + config.add_extension('_test_multivariate', + sources=quadpack_test_src) + + # Fortran+f2py extension module for testing odeint. + config.add_extension('_test_odeint_banded', + sources=odeint_banded_test_src, + libraries=['lsoda', 'mach'] + lapack_libs, + depends=(lsoda_src + mach_src), + **lapack_opt) + + config.add_subpackage('_ivp') + + config.add_data_dir('tests') + return config + + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(**configuration(top_path='').todict()) diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/setup.pyc b/project/venv/lib/python2.7/site-packages/scipy/integrate/setup.pyc new file mode 100644 index 0000000..cee59b5 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/integrate/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/__init__.pyc new file mode 100644 index 0000000..bf4ca35 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/_test_multivariate.c b/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/_test_multivariate.c new file mode 100644 index 0000000..878e247 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/_test_multivariate.c @@ -0,0 +1,140 @@ +#include <Python.h> + +#include "math.h" + +const double PI = 3.141592653589793238462643383279502884; + +static double +_multivariate_typical(int n, double *args) +{ + return cos(args[1] * args[0] - args[2] * sin(args[0])) / PI; +} + +static double +_multivariate_indefinite(int n, double *args) +{ + return -exp(-args[0]) * log(args[0]); +} + +static double +_multivariate_sin(int n, double *args) +{ + return sin(args[0]); +} + +static double +_sin_0(double x, void *user_data) +{ + return sin(x); +} + +static double +_sin_1(int ndim, double *x, void *user_data) +{ + return sin(x[0]); +} + +static double +_sin_2(double x) +{ + return sin(x); +} + +static double +_sin_3(int ndim, double *x) +{ + return sin(x[0]); +} + + +typedef struct { + char *name; + void *ptr; +} routine_t; + + +static const routine_t routines[] = { + {"_multivariate_typical", &_multivariate_typical}, + {"_multivariate_indefinite", &_multivariate_indefinite}, + {"_multivariate_sin", &_multivariate_sin}, + {"_sin_0", &_sin_0}, + {"_sin_1", &_sin_1}, + {"_sin_2", &_sin_2}, + {"_sin_3", &_sin_3} +}; + + +static int create_pointers(PyObject *module) +{ + PyObject *d, *obj = NULL; + int i; + + d = PyModule_GetDict(module); + if (d == NULL) { + goto fail; + } + + for (i = 0; i < sizeof(routines) / sizeof(routine_t); ++i) { + obj = PyLong_FromVoidPtr(routines[i].ptr); + if (obj == NULL) { + goto fail; + } + + if (PyDict_SetItemString(d, routines[i].name, obj)) { + goto fail; + } + + Py_DECREF(obj); + obj = NULL; + } + + Py_XDECREF(obj); + return 0; + +fail: + Py_XDECREF(obj); + return -1; +} + + +#if PY_MAJOR_VERSION >= 3 +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "_test_multivariate", + NULL, + -1, + NULL, /* Empty methods section */ + NULL, + NULL, + NULL, + NULL +}; + +PyMODINIT_FUNC +PyInit__test_multivariate(void) +{ + PyObject *m; + m = PyModule_Create(&moduledef); + if (m == NULL) { + return NULL; + } + if (create_pointers(m)) { + Py_DECREF(m); + return NULL; + } + return m; +} + +#else + +PyMODINIT_FUNC +init_test_multivariate(void) +{ + PyObject *m; + m = Py_InitModule("_test_multivariate", NULL); + if (m == NULL) { + return; + } + create_pointers(m); +} +#endif diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/banded5x5.f b/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/banded5x5.f new file mode 100644 index 0000000..8a56593 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/banded5x5.f @@ -0,0 +1,240 @@ +c banded5x5.f +c +c This Fortran library contains implementations of the +c differential equation +c dy/dt = A*y +c where A is a 5x5 banded matrix (see below for the actual +c values). These functions will be used to test +c scipy.integrate.odeint. +c +c The idea is to solve the system two ways: pure Fortran, and +c using odeint. The "pure Fortran" solver is implemented in +c the subroutine banded5x5_solve below. It calls LSODA to +c solve the system. +c +c To solve the same system using odeint, the functions in this +c file are given a python wrapper using f2py. Then the code +c in test_odeint_jac.py uses the wrapper to implement the +c equation and Jacobian functions required by odeint. Because +c those functions ultimately call the Fortran routines defined +c in this file, the two method (pure Fortran and odeint) should +c produce exactly the same results. (That's assuming floating +c point calculations are deterministic, which can be an +c incorrect assumption.) If we simply re-implemented the +c equation and Jacobian functions using just python and numpy, +c the floating point calculations would not be performed in +c the same sequence as in the Fortran code, and we would obtain +c different answers. The answer for either method would be +c numerically "correct", but the errors would be different, +c and the counts of function and Jacobian evaluations would +c likely be different. +c + block data jacobian + implicit none + + double precision bands + dimension bands(4,5) + common /jac/ bands + +c The data for a banded Jacobian stored in packed banded +c format. The full Jacobian is +c +c -1, 0.25, 0, 0, 0 +c 0.25, -5, 0.25, 0, 0 +c 0.10, 0.25, -25, 0.25, 0 +c 0, 0.10, 0.25, -125, 0.25 +c 0, 0, 0.10, 0.25, -625 +c +c The columns in the following layout of numbers are +c the upper diagonal, main diagonal and two lower diagonals +c (i.e. each row in the layout is a column of the packed +c banded Jacobian). The values 0.00D0 are in the "don't +c care" positions. + + data bands/ + + 0.00D0, -1.0D0, 0.25D0, 0.10D0, + + 0.25D0, -5.0D0, 0.25D0, 0.10D0, + + 0.25D0, -25.0D0, 0.25D0, 0.10D0, + + 0.25D0, -125.0D0, 0.25D0, 0.00D0, + + 0.25D0, -625.0D0, 0.00D0, 0.00D0 + + / + + end + + subroutine getbands(jac) + double precision jac + dimension jac(4, 5) +cf2py intent(out) jac + + double precision bands + dimension bands(4,5) + common /jac/ bands + + integer i, j + do 5 i = 1, 4 + do 5 j = 1, 5 + jac(i, j) = bands(i, j) + 5 continue + + return + end + +c +c Differential equations, right-hand-side +c + subroutine banded5x5(n, t, y, f) + implicit none + integer n + double precision t, y, f + dimension y(n), f(n) + + double precision bands + dimension bands(4,5) + common /jac/ bands + + f(1) = bands(2,1)*y(1) + bands(1,2)*y(2) + f(2) = bands(3,1)*y(1) + bands(2,2)*y(2) + bands(1,3)*y(3) + f(3) = bands(4,1)*y(1) + bands(3,2)*y(2) + bands(2,3)*y(3) + + + bands(1,4)*y(4) + f(4) = bands(4,2)*y(2) + bands(3,3)*y(3) + bands(2,4)*y(4) + + + bands(1,5)*y(5) + f(5) = bands(4,3)*y(3) + bands(3,4)*y(4) + bands(2,5)*y(5) + + return + end + +c +c Jacobian +c +c The subroutine assumes that the full Jacobian is to be computed. +c ml and mu are ignored, and nrowpd is assumed to be n. +c + subroutine banded5x5_jac(n, t, y, ml, mu, jac, nrowpd) + implicit none + integer n, ml, mu, nrowpd + double precision t, y, jac + dimension y(n), jac(nrowpd, n) + + integer i, j + + double precision bands + dimension bands(4,5) + common /jac/ bands + + do 15 i = 1, 4 + do 15 j = 1, 5 + if ((i - j) .gt. 0) then + jac(i - j, j) = bands(i, j) + end if +15 continue + + return + end + +c +c Banded Jacobian +c +c ml = 2, mu = 1 +c + subroutine banded5x5_bjac(n, t, y, ml, mu, bjac, nrowpd) + implicit none + integer n, ml, mu, nrowpd + double precision t, y, bjac + dimension y(5), bjac(nrowpd, n) + + integer i, j + + double precision bands + dimension bands(4,5) + common /jac/ bands + + do 20 i = 1, 4 + do 20 j = 1, 5 + bjac(i, j) = bands(i, j) + 20 continue + + return + end + + + subroutine banded5x5_solve(y, nsteps, dt, jt, nst, nfe, nje) + +c jt is the Jacobian type: +c jt = 1 Use the full Jacobian. +c jt = 4 Use the banded Jacobian. +c nst, nfe and nje are outputs: +c nst: Total number of internal steps +c nfe: Total number of function (i.e. right-hand-side) +c evaluations +c nje: Total number of Jacobian evaluations + + implicit none + + external banded5x5 + external banded5x5_jac + external banded5x5_bjac + external LSODA + +c Arguments... + double precision y, dt + integer nsteps, jt, nst, nfe, nje +cf2py intent(inout) y +cf2py intent(in) nsteps, dt, jt +cf2py intent(out) nst, nfe, nje + +c Local variables... + double precision atol, rtol, t, tout, rwork + integer iwork + dimension y(5), rwork(500), iwork(500) + integer neq, i + integer itol, iopt, itask, istate, lrw, liw + +c Common block... + double precision jacband + dimension jacband(4,5) + common /jac/ jacband + +c --- t range --- + t = 0.0D0 + +c --- Solver tolerances --- + rtol = 1.0D-11 + atol = 1.0D-13 + itol = 1 + +c --- Other LSODA parameters --- + neq = 5 + itask = 1 + istate = 1 + iopt = 0 + iwork(1) = 2 + iwork(2) = 1 + lrw = 500 + liw = 500 + +c --- Call LSODA in a loop to compute the solution --- + do 40 i = 1, nsteps + tout = i*dt + if (jt .eq. 1) then + call LSODA(banded5x5, neq, y, t, tout, + & itol, rtol, atol, itask, istate, iopt, + & rwork, lrw, iwork, liw, + & banded5x5_jac, jt) + else + call LSODA(banded5x5, neq, y, t, tout, + & itol, rtol, atol, itask, istate, iopt, + & rwork, lrw, iwork, liw, + & banded5x5_bjac, jt) + end if + 40 if (istate .lt. 0) goto 80 + + nst = iwork(11) + nfe = iwork(12) + nje = iwork(13) + + return + + 80 write (6,89) istate + 89 format(1X,"Error: istate=",I3) + return + end diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/test_banded_ode_solvers.py b/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/test_banded_ode_solvers.py new file mode 100644 index 0000000..ec8a191 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/test_banded_ode_solvers.py @@ -0,0 +1,224 @@ + +from __future__ import division, print_function, absolute_import + +import itertools +import numpy as np +from numpy.testing import assert_allclose +from scipy.integrate import ode + + +def _band_count(a): + """Returns ml and mu, the lower and upper band sizes of a.""" + nrows, ncols = a.shape + ml = 0 + for k in range(-nrows+1, 0): + if np.diag(a, k).any(): + ml = -k + break + mu = 0 + for k in range(nrows-1, 0, -1): + if np.diag(a, k).any(): + mu = k + break + return ml, mu + + +def _linear_func(t, y, a): + """Linear system dy/dt = a * y""" + return a.dot(y) + + +def _linear_jac(t, y, a): + """Jacobian of a * y is a.""" + return a + + +def _linear_banded_jac(t, y, a): + """Banded Jacobian.""" + ml, mu = _band_count(a) + bjac = [] + for k in range(mu, 0, -1): + bjac.append(np.r_[[0] * k, np.diag(a, k)]) + bjac.append(np.diag(a)) + for k in range(-1, -ml-1, -1): + bjac.append(np.r_[np.diag(a, k), [0] * (-k)]) + return bjac + + +def _solve_linear_sys(a, y0, tend=1, dt=0.1, + solver=None, method='bdf', use_jac=True, + with_jacobian=False, banded=False): + """Use scipy.integrate.ode to solve a linear system of ODEs. + + a : square ndarray + Matrix of the linear system to be solved. + y0 : ndarray + Initial condition + tend : float + Stop time. + dt : float + Step size of the output. + solver : str + If not None, this must be "vode", "lsoda" or "zvode". + method : str + Either "bdf" or "adams". + use_jac : bool + Determines if the jacobian function is passed to ode(). + with_jacobian : bool + Passed to ode.set_integrator(). + banded : bool + Determines whether a banded or full jacobian is used. + If `banded` is True, `lband` and `uband` are determined by the + values in `a`. + """ + if banded: + lband, uband = _band_count(a) + else: + lband = None + uband = None + + if use_jac: + if banded: + r = ode(_linear_func, _linear_banded_jac) + else: + r = ode(_linear_func, _linear_jac) + else: + r = ode(_linear_func) + + if solver is None: + if np.iscomplexobj(a): + solver = "zvode" + else: + solver = "vode" + + r.set_integrator(solver, + with_jacobian=with_jacobian, + method=method, + lband=lband, uband=uband, + rtol=1e-9, atol=1e-10, + ) + t0 = 0 + r.set_initial_value(y0, t0) + r.set_f_params(a) + r.set_jac_params(a) + + t = [t0] + y = [y0] + while r.successful() and r.t < tend: + r.integrate(r.t + dt) + t.append(r.t) + y.append(r.y) + + t = np.array(t) + y = np.array(y) + return t, y + + +def _analytical_solution(a, y0, t): + """ + Analytical solution to the linear differential equations dy/dt = a*y. + + The solution is only valid if `a` is diagonalizable. + + Returns a 2-d array with shape (len(t), len(y0)). + """ + lam, v = np.linalg.eig(a) + c = np.linalg.solve(v, y0) + e = c * np.exp(lam * t.reshape(-1, 1)) + sol = e.dot(v.T) + return sol + + +def test_banded_ode_solvers(): + # Test the "lsoda", "vode" and "zvode" solvers of the `ode` class + # with a system that has a banded Jacobian matrix. + + t_exact = np.linspace(0, 1.0, 5) + + # --- Real arrays for testing the "lsoda" and "vode" solvers --- + + # lband = 2, uband = 1: + a_real = np.array([[-0.6, 0.1, 0.0, 0.0, 0.0], + [0.2, -0.5, 0.9, 0.0, 0.0], + [0.1, 0.1, -0.4, 0.1, 0.0], + [0.0, 0.3, -0.1, -0.9, -0.3], + [0.0, 0.0, 0.1, 0.1, -0.7]]) + + # lband = 0, uband = 1: + a_real_upper = np.triu(a_real) + + # lband = 2, uband = 0: + a_real_lower = np.tril(a_real) + + # lband = 0, uband = 0: + a_real_diag = np.triu(a_real_lower) + + real_matrices = [a_real, a_real_upper, a_real_lower, a_real_diag] + real_solutions = [] + + for a in real_matrices: + y0 = np.arange(1, a.shape[0] + 1) + y_exact = _analytical_solution(a, y0, t_exact) + real_solutions.append((y0, t_exact, y_exact)) + + def check_real(idx, solver, meth, use_jac, with_jac, banded): + a = real_matrices[idx] + y0, t_exact, y_exact = real_solutions[idx] + t, y = _solve_linear_sys(a, y0, + tend=t_exact[-1], + dt=t_exact[1] - t_exact[0], + solver=solver, + method=meth, + use_jac=use_jac, + with_jacobian=with_jac, + banded=banded) + assert_allclose(t, t_exact) + assert_allclose(y, y_exact) + + for idx in range(len(real_matrices)): + p = [['vode', 'lsoda'], # solver + ['bdf', 'adams'], # method + [False, True], # use_jac + [False, True], # with_jacobian + [False, True]] # banded + for solver, meth, use_jac, with_jac, banded in itertools.product(*p): + check_real(idx, solver, meth, use_jac, with_jac, banded) + + # --- Complex arrays for testing the "zvode" solver --- + + # complex, lband = 2, uband = 1: + a_complex = a_real - 0.5j * a_real + + # complex, lband = 0, uband = 0: + a_complex_diag = np.diag(np.diag(a_complex)) + + complex_matrices = [a_complex, a_complex_diag] + complex_solutions = [] + + for a in complex_matrices: + y0 = np.arange(1, a.shape[0] + 1) + 1j + y_exact = _analytical_solution(a, y0, t_exact) + complex_solutions.append((y0, t_exact, y_exact)) + + def check_complex(idx, solver, meth, use_jac, with_jac, banded): + a = complex_matrices[idx] + y0, t_exact, y_exact = complex_solutions[idx] + t, y = _solve_linear_sys(a, y0, + tend=t_exact[-1], + dt=t_exact[1] - t_exact[0], + solver=solver, + method=meth, + use_jac=use_jac, + with_jacobian=with_jac, + banded=banded) + assert_allclose(t, t_exact) + assert_allclose(y, y_exact) + + for idx in range(len(complex_matrices)): + p = [['bdf', 'adams'], # method + [False, True], # use_jac + [False, True], # with_jacobian + [False, True]] # banded + for meth, use_jac, with_jac, banded in itertools.product(*p): + check_complex(idx, "zvode", meth, use_jac, with_jac, banded) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/test_banded_ode_solvers.pyc b/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/test_banded_ode_solvers.pyc new file mode 100644 index 0000000..fb595da Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/test_banded_ode_solvers.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/test_bvp.py b/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/test_bvp.py new file mode 100644 index 0000000..651bbc7 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/test_bvp.py @@ -0,0 +1,553 @@ +from __future__ import division, print_function, absolute_import + +import sys + +try: + from StringIO import StringIO +except ImportError: + from io import StringIO + +import numpy as np +from numpy.testing import (assert_, assert_array_equal, assert_allclose, + assert_equal) +from pytest import raises as assert_raises + +from scipy.sparse import coo_matrix +from scipy.special import erf +from scipy.integrate._bvp import (modify_mesh, estimate_fun_jac, + estimate_bc_jac, compute_jac_indices, + construct_global_jac, solve_bvp) + + +def exp_fun(x, y): + return np.vstack((y[1], y[0])) + + +def exp_fun_jac(x, y): + df_dy = np.empty((2, 2, x.shape[0])) + df_dy[0, 0] = 0 + df_dy[0, 1] = 1 + df_dy[1, 0] = 1 + df_dy[1, 1] = 0 + return df_dy + + +def exp_bc(ya, yb): + return np.hstack((ya[0] - 1, yb[0])) + + +def exp_bc_complex(ya, yb): + return np.hstack((ya[0] - 1 - 1j, yb[0])) + + +def exp_bc_jac(ya, yb): + dbc_dya = np.array([ + [1, 0], + [0, 0] + ]) + dbc_dyb = np.array([ + [0, 0], + [1, 0] + ]) + return dbc_dya, dbc_dyb + + +def exp_sol(x): + return (np.exp(-x) - np.exp(x - 2)) / (1 - np.exp(-2)) + + +def sl_fun(x, y, p): + return np.vstack((y[1], -p[0]**2 * y[0])) + + +def sl_fun_jac(x, y, p): + n, m = y.shape + df_dy = np.empty((n, 2, m)) + df_dy[0, 0] = 0 + df_dy[0, 1] = 1 + df_dy[1, 0] = -p[0]**2 + df_dy[1, 1] = 0 + + df_dp = np.empty((n, 1, m)) + df_dp[0, 0] = 0 + df_dp[1, 0] = -2 * p[0] * y[0] + + return df_dy, df_dp + + +def sl_bc(ya, yb, p): + return np.hstack((ya[0], yb[0], ya[1] - p[0])) + + +def sl_bc_jac(ya, yb, p): + dbc_dya = np.zeros((3, 2)) + dbc_dya[0, 0] = 1 + dbc_dya[2, 1] = 1 + + dbc_dyb = np.zeros((3, 2)) + dbc_dyb[1, 0] = 1 + + dbc_dp = np.zeros((3, 1)) + dbc_dp[2, 0] = -1 + + return dbc_dya, dbc_dyb, dbc_dp + + +def sl_sol(x, p): + return np.sin(p[0] * x) + + +def emden_fun(x, y): + return np.vstack((y[1], -y[0]**5)) + + +def emden_fun_jac(x, y): + df_dy = np.empty((2, 2, x.shape[0])) + df_dy[0, 0] = 0 + df_dy[0, 1] = 1 + df_dy[1, 0] = -5 * y[0]**4 + df_dy[1, 1] = 0 + return df_dy + + +def emden_bc(ya, yb): + return np.array([ya[1], yb[0] - (3/4)**0.5]) + + +def emden_bc_jac(ya, yb): + dbc_dya = np.array([ + [0, 1], + [0, 0] + ]) + dbc_dyb = np.array([ + [0, 0], + [1, 0] + ]) + return dbc_dya, dbc_dyb + + +def emden_sol(x): + return (1 + x**2/3)**-0.5 + + +def undefined_fun(x, y): + return np.zeros_like(y) + + +def undefined_bc(ya, yb): + return np.array([ya[0], yb[0] - 1]) + + +def big_fun(x, y): + f = np.zeros_like(y) + f[::2] = y[1::2] + return f + + +def big_bc(ya, yb): + return np.hstack((ya[::2], yb[::2] - 1)) + + +def big_sol(x, n): + y = np.ones((2 * n, x.size)) + y[::2] = x + return x + + +def shock_fun(x, y): + eps = 1e-3 + return np.vstack(( + y[1], + -(x * y[1] + eps * np.pi**2 * np.cos(np.pi * x) + + np.pi * x * np.sin(np.pi * x)) / eps + )) + + +def shock_bc(ya, yb): + return np.array([ya[0] + 2, yb[0]]) + + +def shock_sol(x): + eps = 1e-3 + k = np.sqrt(2 * eps) + return np.cos(np.pi * x) + erf(x / k) / erf(1 / k) + + +def test_modify_mesh(): + x = np.array([0, 1, 3, 9], dtype=float) + x_new = modify_mesh(x, np.array([0]), np.array([2])) + assert_array_equal(x_new, np.array([0, 0.5, 1, 3, 5, 7, 9])) + + x = np.array([-6, -3, 0, 3, 6], dtype=float) + x_new = modify_mesh(x, np.array([1], dtype=int), np.array([0, 2, 3])) + assert_array_equal(x_new, [-6, -5, -4, -3, -1.5, 0, 1, 2, 3, 4, 5, 6]) + + +def test_compute_fun_jac(): + x = np.linspace(0, 1, 5) + y = np.empty((2, x.shape[0])) + y[0] = 0.01 + y[1] = 0.02 + p = np.array([]) + df_dy, df_dp = estimate_fun_jac(lambda x, y, p: exp_fun(x, y), x, y, p) + df_dy_an = exp_fun_jac(x, y) + assert_allclose(df_dy, df_dy_an) + assert_(df_dp is None) + + x = np.linspace(0, np.pi, 5) + y = np.empty((2, x.shape[0])) + y[0] = np.sin(x) + y[1] = np.cos(x) + p = np.array([1.0]) + df_dy, df_dp = estimate_fun_jac(sl_fun, x, y, p) + df_dy_an, df_dp_an = sl_fun_jac(x, y, p) + assert_allclose(df_dy, df_dy_an) + assert_allclose(df_dp, df_dp_an) + + x = np.linspace(0, 1, 10) + y = np.empty((2, x.shape[0])) + y[0] = (3/4)**0.5 + y[1] = 1e-4 + p = np.array([]) + df_dy, df_dp = estimate_fun_jac(lambda x, y, p: emden_fun(x, y), x, y, p) + df_dy_an = emden_fun_jac(x, y) + assert_allclose(df_dy, df_dy_an) + assert_(df_dp is None) + + +def test_compute_bc_jac(): + ya = np.array([-1.0, 2]) + yb = np.array([0.5, 3]) + p = np.array([]) + dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac( + lambda ya, yb, p: exp_bc(ya, yb), ya, yb, p) + dbc_dya_an, dbc_dyb_an = exp_bc_jac(ya, yb) + assert_allclose(dbc_dya, dbc_dya_an) + assert_allclose(dbc_dyb, dbc_dyb_an) + assert_(dbc_dp is None) + + ya = np.array([0.0, 1]) + yb = np.array([0.0, -1]) + p = np.array([0.5]) + dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(sl_bc, ya, yb, p) + dbc_dya_an, dbc_dyb_an, dbc_dp_an = sl_bc_jac(ya, yb, p) + assert_allclose(dbc_dya, dbc_dya_an) + assert_allclose(dbc_dyb, dbc_dyb_an) + assert_allclose(dbc_dp, dbc_dp_an) + + ya = np.array([0.5, 100]) + yb = np.array([-1000, 10.5]) + p = np.array([]) + dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac( + lambda ya, yb, p: emden_bc(ya, yb), ya, yb, p) + dbc_dya_an, dbc_dyb_an = emden_bc_jac(ya, yb) + assert_allclose(dbc_dya, dbc_dya_an) + assert_allclose(dbc_dyb, dbc_dyb_an) + assert_(dbc_dp is None) + + +def test_compute_jac_indices(): + n = 2 + m = 4 + k = 2 + i, j = compute_jac_indices(n, m, k) + s = coo_matrix((np.ones_like(i), (i, j))).toarray() + s_true = np.array([ + [1, 1, 1, 1, 0, 0, 0, 0, 1, 1], + [1, 1, 1, 1, 0, 0, 0, 0, 1, 1], + [0, 0, 1, 1, 1, 1, 0, 0, 1, 1], + [0, 0, 1, 1, 1, 1, 0, 0, 1, 1], + [0, 0, 0, 0, 1, 1, 1, 1, 1, 1], + [0, 0, 0, 0, 1, 1, 1, 1, 1, 1], + [1, 1, 0, 0, 0, 0, 1, 1, 1, 1], + [1, 1, 0, 0, 0, 0, 1, 1, 1, 1], + [1, 1, 0, 0, 0, 0, 1, 1, 1, 1], + [1, 1, 0, 0, 0, 0, 1, 1, 1, 1], + ]) + assert_array_equal(s, s_true) + + +def test_compute_global_jac(): + n = 2 + m = 5 + k = 1 + i_jac, j_jac = compute_jac_indices(2, 5, 1) + x = np.linspace(0, 1, 5) + h = np.diff(x) + y = np.vstack((np.sin(np.pi * x), np.pi * np.cos(np.pi * x))) + p = np.array([3.0]) + + f = sl_fun(x, y, p) + + x_middle = x[:-1] + 0.5 * h + y_middle = 0.5 * (y[:, :-1] + y[:, 1:]) - h/8 * (f[:, 1:] - f[:, :-1]) + + df_dy, df_dp = sl_fun_jac(x, y, p) + df_dy_middle, df_dp_middle = sl_fun_jac(x_middle, y_middle, p) + dbc_dya, dbc_dyb, dbc_dp = sl_bc_jac(y[:, 0], y[:, -1], p) + + J = construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy, df_dy_middle, + df_dp, df_dp_middle, dbc_dya, dbc_dyb, dbc_dp) + J = J.toarray() + + def J_block(h, p): + return np.array([ + [h**2*p**2/12 - 1, -0.5*h, -h**2*p**2/12 + 1, -0.5*h], + [0.5*h*p**2, h**2*p**2/12 - 1, 0.5*h*p**2, 1 - h**2*p**2/12] + ]) + + J_true = np.zeros((m * n + k, m * n + k)) + for i in range(m - 1): + J_true[i * n: (i + 1) * n, i * n: (i + 2) * n] = J_block(h[i], p) + + J_true[:(m - 1) * n:2, -1] = p * h**2/6 * (y[0, :-1] - y[0, 1:]) + J_true[1:(m - 1) * n:2, -1] = p * (h * (y[0, :-1] + y[0, 1:]) + + h**2/6 * (y[1, :-1] - y[1, 1:])) + + J_true[8, 0] = 1 + J_true[9, 8] = 1 + J_true[10, 1] = 1 + J_true[10, 10] = -1 + + assert_allclose(J, J_true, rtol=1e-10) + + df_dy, df_dp = estimate_fun_jac(sl_fun, x, y, p) + df_dy_middle, df_dp_middle = estimate_fun_jac(sl_fun, x_middle, y_middle, p) + dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(sl_bc, y[:, 0], y[:, -1], p) + J = construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy, df_dy_middle, + df_dp, df_dp_middle, dbc_dya, dbc_dyb, dbc_dp) + J = J.toarray() + assert_allclose(J, J_true, rtol=1e-8, atol=1e-9) + + +def test_parameter_validation(): + x = [0, 1, 0.5] + y = np.zeros((2, 3)) + assert_raises(ValueError, solve_bvp, exp_fun, exp_bc, x, y) + + x = np.linspace(0, 1, 5) + y = np.zeros((2, 4)) + assert_raises(ValueError, solve_bvp, exp_fun, exp_bc, x, y) + + fun = lambda x, y, p: exp_fun(x, y) + bc = lambda ya, yb, p: exp_bc(ya, yb) + + y = np.zeros((2, x.shape[0])) + assert_raises(ValueError, solve_bvp, fun, bc, x, y, p=[1]) + + def wrong_shape_fun(x, y): + return np.zeros(3) + + assert_raises(ValueError, solve_bvp, wrong_shape_fun, bc, x, y) + + S = np.array([[0, 0]]) + assert_raises(ValueError, solve_bvp, exp_fun, exp_bc, x, y, S=S) + + +def test_no_params(): + x = np.linspace(0, 1, 5) + x_test = np.linspace(0, 1, 100) + y = np.zeros((2, x.shape[0])) + for fun_jac in [None, exp_fun_jac]: + for bc_jac in [None, exp_bc_jac]: + sol = solve_bvp(exp_fun, exp_bc, x, y, fun_jac=fun_jac, + bc_jac=bc_jac) + + assert_equal(sol.status, 0) + assert_(sol.success) + + assert_equal(sol.x.size, 5) + + sol_test = sol.sol(x_test) + + assert_allclose(sol_test[0], exp_sol(x_test), atol=1e-5) + + f_test = exp_fun(x_test, sol_test) + r = sol.sol(x_test, 1) - f_test + rel_res = r / (1 + np.abs(f_test)) + norm_res = np.sum(rel_res**2, axis=0)**0.5 + assert_(np.all(norm_res < 1e-3)) + + assert_(np.all(sol.rms_residuals < 1e-3)) + assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10) + assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10) + + +def test_with_params(): + x = np.linspace(0, np.pi, 5) + x_test = np.linspace(0, np.pi, 100) + y = np.ones((2, x.shape[0])) + + for fun_jac in [None, sl_fun_jac]: + for bc_jac in [None, sl_bc_jac]: + sol = solve_bvp(sl_fun, sl_bc, x, y, p=[0.5], fun_jac=fun_jac, + bc_jac=bc_jac) + + assert_equal(sol.status, 0) + assert_(sol.success) + + assert_(sol.x.size < 10) + + assert_allclose(sol.p, [1], rtol=1e-4) + + sol_test = sol.sol(x_test) + + assert_allclose(sol_test[0], sl_sol(x_test, [1]), + rtol=1e-4, atol=1e-4) + + f_test = sl_fun(x_test, sol_test, [1]) + r = sol.sol(x_test, 1) - f_test + rel_res = r / (1 + np.abs(f_test)) + norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5 + assert_(np.all(norm_res < 1e-3)) + + assert_(np.all(sol.rms_residuals < 1e-3)) + assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10) + assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10) + + +def test_singular_term(): + x = np.linspace(0, 1, 10) + x_test = np.linspace(0.05, 1, 100) + y = np.empty((2, 10)) + y[0] = (3/4)**0.5 + y[1] = 1e-4 + S = np.array([[0, 0], [0, -2]]) + + for fun_jac in [None, emden_fun_jac]: + for bc_jac in [None, emden_bc_jac]: + sol = solve_bvp(emden_fun, emden_bc, x, y, S=S, fun_jac=fun_jac, + bc_jac=bc_jac) + + assert_equal(sol.status, 0) + assert_(sol.success) + + assert_equal(sol.x.size, 10) + + sol_test = sol.sol(x_test) + assert_allclose(sol_test[0], emden_sol(x_test), atol=1e-5) + + f_test = emden_fun(x_test, sol_test) + S.dot(sol_test) / x_test + r = sol.sol(x_test, 1) - f_test + rel_res = r / (1 + np.abs(f_test)) + norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5 + + assert_(np.all(norm_res < 1e-3)) + assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10) + assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10) + + +def test_complex(): + # The test is essentially the same as test_no_params, but boundary + # conditions are turned into complex. + x = np.linspace(0, 1, 5) + x_test = np.linspace(0, 1, 100) + y = np.zeros((2, x.shape[0]), dtype=complex) + for fun_jac in [None, exp_fun_jac]: + for bc_jac in [None, exp_bc_jac]: + sol = solve_bvp(exp_fun, exp_bc_complex, x, y, fun_jac=fun_jac, + bc_jac=bc_jac) + + assert_equal(sol.status, 0) + assert_(sol.success) + + sol_test = sol.sol(x_test) + + assert_allclose(sol_test[0].real, exp_sol(x_test), atol=1e-5) + assert_allclose(sol_test[0].imag, exp_sol(x_test), atol=1e-5) + + f_test = exp_fun(x_test, sol_test) + r = sol.sol(x_test, 1) - f_test + rel_res = r / (1 + np.abs(f_test)) + norm_res = np.sum(np.real(rel_res * np.conj(rel_res)), + axis=0) ** 0.5 + assert_(np.all(norm_res < 1e-3)) + + assert_(np.all(sol.rms_residuals < 1e-3)) + assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10) + assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10) + + +def test_failures(): + x = np.linspace(0, 1, 2) + y = np.zeros((2, x.size)) + res = solve_bvp(exp_fun, exp_bc, x, y, tol=1e-5, max_nodes=5) + assert_equal(res.status, 1) + assert_(not res.success) + + x = np.linspace(0, 1, 5) + y = np.zeros((2, x.size)) + res = solve_bvp(undefined_fun, undefined_bc, x, y) + assert_equal(res.status, 2) + assert_(not res.success) + + +def test_big_problem(): + n = 30 + x = np.linspace(0, 1, 5) + y = np.zeros((2 * n, x.size)) + sol = solve_bvp(big_fun, big_bc, x, y) + + assert_equal(sol.status, 0) + assert_(sol.success) + + sol_test = sol.sol(x) + + assert_allclose(sol_test[0], big_sol(x, n)) + + f_test = big_fun(x, sol_test) + r = sol.sol(x, 1) - f_test + rel_res = r / (1 + np.abs(f_test)) + norm_res = np.sum(np.real(rel_res * np.conj(rel_res)), axis=0) ** 0.5 + assert_(np.all(norm_res < 1e-3)) + + assert_(np.all(sol.rms_residuals < 1e-3)) + assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10) + assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10) + + +def test_shock_layer(): + x = np.linspace(-1, 1, 5) + x_test = np.linspace(-1, 1, 100) + y = np.zeros((2, x.size)) + sol = solve_bvp(shock_fun, shock_bc, x, y) + + assert_equal(sol.status, 0) + assert_(sol.success) + + assert_(sol.x.size < 110) + + sol_test = sol.sol(x_test) + assert_allclose(sol_test[0], shock_sol(x_test), rtol=1e-5, atol=1e-5) + + f_test = shock_fun(x_test, sol_test) + r = sol.sol(x_test, 1) - f_test + rel_res = r / (1 + np.abs(f_test)) + norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5 + + assert_(np.all(norm_res < 1e-3)) + assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10) + assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10) + + +def test_verbose(): + # Smoke test that checks the printing does something and does not crash + x = np.linspace(0, 1, 5) + y = np.zeros((2, x.shape[0])) + for verbose in [0, 1, 2]: + old_stdout = sys.stdout + sys.stdout = StringIO() + try: + sol = solve_bvp(exp_fun, exp_bc, x, y, verbose=verbose) + text = sys.stdout.getvalue() + finally: + sys.stdout = old_stdout + + assert_(sol.success) + if verbose == 0: + assert_(not text, text) + if verbose >= 1: + assert_("Solved in" in text, text) + if verbose >= 2: + assert_("Max residual" in text, text) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/test_bvp.pyc b/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/test_bvp.pyc new file mode 100644 index 0000000..09ceb5a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/test_bvp.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/test_integrate.py b/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/test_integrate.py new file mode 100644 index 0000000..22d5bba --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/test_integrate.py @@ -0,0 +1,835 @@ +# Authors: Nils Wagner, Ed Schofield, Pauli Virtanen, John Travers +""" +Tests for numerical integration. +""" +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy import (arange, zeros, array, dot, sqrt, cos, sin, eye, pi, exp, + allclose) + +from scipy._lib._numpy_compat import _assert_warns +from scipy._lib.six import xrange + +from numpy.testing import ( + assert_, assert_array_almost_equal, + assert_allclose, assert_array_equal, assert_equal) +from pytest import raises as assert_raises +from scipy.integrate import odeint, ode, complex_ode + +#------------------------------------------------------------------------------ +# Test ODE integrators +#------------------------------------------------------------------------------ + + +class TestOdeint(object): + # Check integrate.odeint + + def _do_problem(self, problem): + t = arange(0.0, problem.stop_t, 0.05) + + # Basic case + z, infodict = odeint(problem.f, problem.z0, t, full_output=True) + assert_(problem.verify(z, t)) + + # Use tfirst=True + z, infodict = odeint(lambda t, y: problem.f(y, t), problem.z0, t, + full_output=True, tfirst=True) + assert_(problem.verify(z, t)) + + if hasattr(problem, 'jac'): + # Use Dfun + z, infodict = odeint(problem.f, problem.z0, t, Dfun=problem.jac, + full_output=True) + assert_(problem.verify(z, t)) + + # Use Dfun and tfirst=True + z, infodict = odeint(lambda t, y: problem.f(y, t), problem.z0, t, + Dfun=lambda t, y: problem.jac(y, t), + full_output=True, tfirst=True) + assert_(problem.verify(z, t)) + + def test_odeint(self): + for problem_cls in PROBLEMS: + problem = problem_cls() + if problem.cmplx: + continue + self._do_problem(problem) + + +class TestODEClass(object): + + ode_class = None # Set in subclass. + + def _do_problem(self, problem, integrator, method='adams'): + + # ode has callback arguments in different order than odeint + f = lambda t, z: problem.f(z, t) + jac = None + if hasattr(problem, 'jac'): + jac = lambda t, z: problem.jac(z, t) + + integrator_params = {} + if problem.lband is not None or problem.uband is not None: + integrator_params['uband'] = problem.uband + integrator_params['lband'] = problem.lband + + ig = self.ode_class(f, jac) + ig.set_integrator(integrator, + atol=problem.atol/10, + rtol=problem.rtol/10, + method=method, + **integrator_params) + + ig.set_initial_value(problem.z0, t=0.0) + z = ig.integrate(problem.stop_t) + + assert_array_equal(z, ig.y) + assert_(ig.successful(), (problem, method)) + assert_(ig.get_return_code() > 0, (problem, method)) + assert_(problem.verify(array([z]), problem.stop_t), (problem, method)) + + +class TestOde(TestODEClass): + + ode_class = ode + + def test_vode(self): + # Check the vode solver + for problem_cls in PROBLEMS: + problem = problem_cls() + if problem.cmplx: + continue + if not problem.stiff: + self._do_problem(problem, 'vode', 'adams') + self._do_problem(problem, 'vode', 'bdf') + + def test_zvode(self): + # Check the zvode solver + for problem_cls in PROBLEMS: + problem = problem_cls() + if not problem.stiff: + self._do_problem(problem, 'zvode', 'adams') + self._do_problem(problem, 'zvode', 'bdf') + + def test_lsoda(self): + # Check the lsoda solver + for problem_cls in PROBLEMS: + problem = problem_cls() + if problem.cmplx: + continue + self._do_problem(problem, 'lsoda') + + def test_dopri5(self): + # Check the dopri5 solver + for problem_cls in PROBLEMS: + problem = problem_cls() + if problem.cmplx: + continue + if problem.stiff: + continue + if hasattr(problem, 'jac'): + continue + self._do_problem(problem, 'dopri5') + + def test_dop853(self): + # Check the dop853 solver + for problem_cls in PROBLEMS: + problem = problem_cls() + if problem.cmplx: + continue + if problem.stiff: + continue + if hasattr(problem, 'jac'): + continue + self._do_problem(problem, 'dop853') + + def test_concurrent_fail(self): + for sol in ('vode', 'zvode', 'lsoda'): + f = lambda t, y: 1.0 + + r = ode(f).set_integrator(sol) + r.set_initial_value(0, 0) + + r2 = ode(f).set_integrator(sol) + r2.set_initial_value(0, 0) + + r.integrate(r.t + 0.1) + r2.integrate(r2.t + 0.1) + + assert_raises(RuntimeError, r.integrate, r.t + 0.1) + + def test_concurrent_ok(self): + f = lambda t, y: 1.0 + + for k in xrange(3): + for sol in ('vode', 'zvode', 'lsoda', 'dopri5', 'dop853'): + r = ode(f).set_integrator(sol) + r.set_initial_value(0, 0) + + r2 = ode(f).set_integrator(sol) + r2.set_initial_value(0, 0) + + r.integrate(r.t + 0.1) + r2.integrate(r2.t + 0.1) + r2.integrate(r2.t + 0.1) + + assert_allclose(r.y, 0.1) + assert_allclose(r2.y, 0.2) + + for sol in ('dopri5', 'dop853'): + r = ode(f).set_integrator(sol) + r.set_initial_value(0, 0) + + r2 = ode(f).set_integrator(sol) + r2.set_initial_value(0, 0) + + r.integrate(r.t + 0.1) + r.integrate(r.t + 0.1) + r2.integrate(r2.t + 0.1) + r.integrate(r.t + 0.1) + r2.integrate(r2.t + 0.1) + + assert_allclose(r.y, 0.3) + assert_allclose(r2.y, 0.2) + + +class TestComplexOde(TestODEClass): + + ode_class = complex_ode + + def test_vode(self): + # Check the vode solver + for problem_cls in PROBLEMS: + problem = problem_cls() + if not problem.stiff: + self._do_problem(problem, 'vode', 'adams') + else: + self._do_problem(problem, 'vode', 'bdf') + + def test_lsoda(self): + # Check the lsoda solver + for problem_cls in PROBLEMS: + problem = problem_cls() + self._do_problem(problem, 'lsoda') + + def test_dopri5(self): + # Check the dopri5 solver + for problem_cls in PROBLEMS: + problem = problem_cls() + if problem.stiff: + continue + if hasattr(problem, 'jac'): + continue + self._do_problem(problem, 'dopri5') + + def test_dop853(self): + # Check the dop853 solver + for problem_cls in PROBLEMS: + problem = problem_cls() + if problem.stiff: + continue + if hasattr(problem, 'jac'): + continue + self._do_problem(problem, 'dop853') + + +class TestSolout(object): + # Check integrate.ode correctly handles solout for dopri5 and dop853 + def _run_solout_test(self, integrator): + # Check correct usage of solout + ts = [] + ys = [] + t0 = 0.0 + tend = 10.0 + y0 = [1.0, 2.0] + + def solout(t, y): + ts.append(t) + ys.append(y.copy()) + + def rhs(t, y): + return [y[0] + y[1], -y[1]**2] + + ig = ode(rhs).set_integrator(integrator) + ig.set_solout(solout) + ig.set_initial_value(y0, t0) + ret = ig.integrate(tend) + assert_array_equal(ys[0], y0) + assert_array_equal(ys[-1], ret) + assert_equal(ts[0], t0) + assert_equal(ts[-1], tend) + + def test_solout(self): + for integrator in ('dopri5', 'dop853'): + self._run_solout_test(integrator) + + def _run_solout_after_initial_test(self, integrator): + # Check if solout works even if it is set after the initial value. + ts = [] + ys = [] + t0 = 0.0 + tend = 10.0 + y0 = [1.0, 2.0] + + def solout(t, y): + ts.append(t) + ys.append(y.copy()) + + def rhs(t, y): + return [y[0] + y[1], -y[1]**2] + + ig = ode(rhs).set_integrator(integrator) + ig.set_initial_value(y0, t0) + ig.set_solout(solout) + ret = ig.integrate(tend) + assert_array_equal(ys[0], y0) + assert_array_equal(ys[-1], ret) + assert_equal(ts[0], t0) + assert_equal(ts[-1], tend) + + def test_solout_after_initial(self): + for integrator in ('dopri5', 'dop853'): + self._run_solout_after_initial_test(integrator) + + def _run_solout_break_test(self, integrator): + # Check correct usage of stopping via solout + ts = [] + ys = [] + t0 = 0.0 + tend = 10.0 + y0 = [1.0, 2.0] + + def solout(t, y): + ts.append(t) + ys.append(y.copy()) + if t > tend/2.0: + return -1 + + def rhs(t, y): + return [y[0] + y[1], -y[1]**2] + + ig = ode(rhs).set_integrator(integrator) + ig.set_solout(solout) + ig.set_initial_value(y0, t0) + ret = ig.integrate(tend) + assert_array_equal(ys[0], y0) + assert_array_equal(ys[-1], ret) + assert_equal(ts[0], t0) + assert_(ts[-1] > tend/2.0) + assert_(ts[-1] < tend) + + def test_solout_break(self): + for integrator in ('dopri5', 'dop853'): + self._run_solout_break_test(integrator) + + +class TestComplexSolout(object): + # Check integrate.ode correctly handles solout for dopri5 and dop853 + def _run_solout_test(self, integrator): + # Check correct usage of solout + ts = [] + ys = [] + t0 = 0.0 + tend = 20.0 + y0 = [0.0] + + def solout(t, y): + ts.append(t) + ys.append(y.copy()) + + def rhs(t, y): + return [1.0/(t - 10.0 - 1j)] + + ig = complex_ode(rhs).set_integrator(integrator) + ig.set_solout(solout) + ig.set_initial_value(y0, t0) + ret = ig.integrate(tend) + assert_array_equal(ys[0], y0) + assert_array_equal(ys[-1], ret) + assert_equal(ts[0], t0) + assert_equal(ts[-1], tend) + + def test_solout(self): + for integrator in ('dopri5', 'dop853'): + self._run_solout_test(integrator) + + def _run_solout_break_test(self, integrator): + # Check correct usage of stopping via solout + ts = [] + ys = [] + t0 = 0.0 + tend = 20.0 + y0 = [0.0] + + def solout(t, y): + ts.append(t) + ys.append(y.copy()) + if t > tend/2.0: + return -1 + + def rhs(t, y): + return [1.0/(t - 10.0 - 1j)] + + ig = complex_ode(rhs).set_integrator(integrator) + ig.set_solout(solout) + ig.set_initial_value(y0, t0) + ret = ig.integrate(tend) + assert_array_equal(ys[0], y0) + assert_array_equal(ys[-1], ret) + assert_equal(ts[0], t0) + assert_(ts[-1] > tend/2.0) + assert_(ts[-1] < tend) + + def test_solout_break(self): + for integrator in ('dopri5', 'dop853'): + self._run_solout_break_test(integrator) + + +#------------------------------------------------------------------------------ +# Test problems +#------------------------------------------------------------------------------ + + +class ODE: + """ + ODE problem + """ + stiff = False + cmplx = False + stop_t = 1 + z0 = [] + + lband = None + uband = None + + atol = 1e-6 + rtol = 1e-5 + + +class SimpleOscillator(ODE): + r""" + Free vibration of a simple oscillator:: + m \ddot{u} + k u = 0, u(0) = u_0 \dot{u}(0) \dot{u}_0 + Solution:: + u(t) = u_0*cos(sqrt(k/m)*t)+\dot{u}_0*sin(sqrt(k/m)*t)/sqrt(k/m) + """ + stop_t = 1 + 0.09 + z0 = array([1.0, 0.1], float) + + k = 4.0 + m = 1.0 + + def f(self, z, t): + tmp = zeros((2, 2), float) + tmp[0, 1] = 1.0 + tmp[1, 0] = -self.k / self.m + return dot(tmp, z) + + def verify(self, zs, t): + omega = sqrt(self.k / self.m) + u = self.z0[0]*cos(omega*t) + self.z0[1]*sin(omega*t)/omega + return allclose(u, zs[:, 0], atol=self.atol, rtol=self.rtol) + + +class ComplexExp(ODE): + r"""The equation :lm:`\dot u = i u`""" + stop_t = 1.23*pi + z0 = exp([1j, 2j, 3j, 4j, 5j]) + cmplx = True + + def f(self, z, t): + return 1j*z + + def jac(self, z, t): + return 1j*eye(5) + + def verify(self, zs, t): + u = self.z0 * exp(1j*t) + return allclose(u, zs, atol=self.atol, rtol=self.rtol) + + +class Pi(ODE): + r"""Integrate 1/(t + 1j) from t=-10 to t=10""" + stop_t = 20 + z0 = [0] + cmplx = True + + def f(self, z, t): + return array([1./(t - 10 + 1j)]) + + def verify(self, zs, t): + u = -2j * np.arctan(10) + return allclose(u, zs[-1, :], atol=self.atol, rtol=self.rtol) + + +class CoupledDecay(ODE): + r""" + 3 coupled decays suited for banded treatment + (banded mode makes it necessary when N>>3) + """ + + stiff = True + stop_t = 0.5 + z0 = [5.0, 7.0, 13.0] + lband = 1 + uband = 0 + + lmbd = [0.17, 0.23, 0.29] # fictitious decay constants + + def f(self, z, t): + lmbd = self.lmbd + return np.array([-lmbd[0]*z[0], + -lmbd[1]*z[1] + lmbd[0]*z[0], + -lmbd[2]*z[2] + lmbd[1]*z[1]]) + + def jac(self, z, t): + # The full Jacobian is + # + # [-lmbd[0] 0 0 ] + # [ lmbd[0] -lmbd[1] 0 ] + # [ 0 lmbd[1] -lmbd[2]] + # + # The lower and upper bandwidths are lband=1 and uband=0, resp. + # The representation of this array in packed format is + # + # [-lmbd[0] -lmbd[1] -lmbd[2]] + # [ lmbd[0] lmbd[1] 0 ] + + lmbd = self.lmbd + j = np.zeros((self.lband + self.uband + 1, 3), order='F') + + def set_j(ri, ci, val): + j[self.uband + ri - ci, ci] = val + set_j(0, 0, -lmbd[0]) + set_j(1, 0, lmbd[0]) + set_j(1, 1, -lmbd[1]) + set_j(2, 1, lmbd[1]) + set_j(2, 2, -lmbd[2]) + return j + + def verify(self, zs, t): + # Formulae derived by hand + lmbd = np.array(self.lmbd) + d10 = lmbd[1] - lmbd[0] + d21 = lmbd[2] - lmbd[1] + d20 = lmbd[2] - lmbd[0] + e0 = np.exp(-lmbd[0] * t) + e1 = np.exp(-lmbd[1] * t) + e2 = np.exp(-lmbd[2] * t) + u = np.vstack(( + self.z0[0] * e0, + self.z0[1] * e1 + self.z0[0] * lmbd[0] / d10 * (e0 - e1), + self.z0[2] * e2 + self.z0[1] * lmbd[1] / d21 * (e1 - e2) + + lmbd[1] * lmbd[0] * self.z0[0] / d10 * + (1 / d20 * (e0 - e2) - 1 / d21 * (e1 - e2)))).transpose() + return allclose(u, zs, atol=self.atol, rtol=self.rtol) + + +PROBLEMS = [SimpleOscillator, ComplexExp, Pi, CoupledDecay] + +#------------------------------------------------------------------------------ + + +def f(t, x): + dxdt = [x[1], -x[0]] + return dxdt + + +def jac(t, x): + j = array([[0.0, 1.0], + [-1.0, 0.0]]) + return j + + +def f1(t, x, omega): + dxdt = [omega*x[1], -omega*x[0]] + return dxdt + + +def jac1(t, x, omega): + j = array([[0.0, omega], + [-omega, 0.0]]) + return j + + +def f2(t, x, omega1, omega2): + dxdt = [omega1*x[1], -omega2*x[0]] + return dxdt + + +def jac2(t, x, omega1, omega2): + j = array([[0.0, omega1], + [-omega2, 0.0]]) + return j + + +def fv(t, x, omega): + dxdt = [omega[0]*x[1], -omega[1]*x[0]] + return dxdt + + +def jacv(t, x, omega): + j = array([[0.0, omega[0]], + [-omega[1], 0.0]]) + return j + + +class ODECheckParameterUse(object): + """Call an ode-class solver with several cases of parameter use.""" + + # solver_name must be set before tests can be run with this class. + + # Set these in subclasses. + solver_name = '' + solver_uses_jac = False + + def _get_solver(self, f, jac): + solver = ode(f, jac) + if self.solver_uses_jac: + solver.set_integrator(self.solver_name, atol=1e-9, rtol=1e-7, + with_jacobian=self.solver_uses_jac) + else: + # XXX Shouldn't set_integrator *always* accept the keyword arg + # 'with_jacobian', and perhaps raise an exception if it is set + # to True if the solver can't actually use it? + solver.set_integrator(self.solver_name, atol=1e-9, rtol=1e-7) + return solver + + def _check_solver(self, solver): + ic = [1.0, 0.0] + solver.set_initial_value(ic, 0.0) + solver.integrate(pi) + assert_array_almost_equal(solver.y, [-1.0, 0.0]) + + def test_no_params(self): + solver = self._get_solver(f, jac) + self._check_solver(solver) + + def test_one_scalar_param(self): + solver = self._get_solver(f1, jac1) + omega = 1.0 + solver.set_f_params(omega) + if self.solver_uses_jac: + solver.set_jac_params(omega) + self._check_solver(solver) + + def test_two_scalar_params(self): + solver = self._get_solver(f2, jac2) + omega1 = 1.0 + omega2 = 1.0 + solver.set_f_params(omega1, omega2) + if self.solver_uses_jac: + solver.set_jac_params(omega1, omega2) + self._check_solver(solver) + + def test_vector_param(self): + solver = self._get_solver(fv, jacv) + omega = [1.0, 1.0] + solver.set_f_params(omega) + if self.solver_uses_jac: + solver.set_jac_params(omega) + self._check_solver(solver) + + def test_warns_on_failure(self): + # Set nsteps small to ensure failure + solver = self._get_solver(f, jac) + solver.set_integrator(self.solver_name, nsteps=1) + ic = [1.0, 0.0] + solver.set_initial_value(ic, 0.0) + _assert_warns(UserWarning, solver.integrate, pi) + + +class TestDOPRI5CheckParameterUse(ODECheckParameterUse): + solver_name = 'dopri5' + solver_uses_jac = False + + +class TestDOP853CheckParameterUse(ODECheckParameterUse): + solver_name = 'dop853' + solver_uses_jac = False + + +class TestVODECheckParameterUse(ODECheckParameterUse): + solver_name = 'vode' + solver_uses_jac = True + + +class TestZVODECheckParameterUse(ODECheckParameterUse): + solver_name = 'zvode' + solver_uses_jac = True + + +class TestLSODACheckParameterUse(ODECheckParameterUse): + solver_name = 'lsoda' + solver_uses_jac = True + + +def test_odeint_trivial_time(): + # Test that odeint succeeds when given a single time point + # and full_output=True. This is a regression test for gh-4282. + y0 = 1 + t = [0] + y, info = odeint(lambda y, t: -y, y0, t, full_output=True) + assert_array_equal(y, np.array([[y0]])) + + +def test_odeint_banded_jacobian(): + # Test the use of the `Dfun`, `ml` and `mu` options of odeint. + + def func(y, t, c): + return c.dot(y) + + def jac(y, t, c): + return c + + def jac_transpose(y, t, c): + return c.T.copy(order='C') + + def bjac_rows(y, t, c): + jac = np.row_stack((np.r_[0, np.diag(c, 1)], + np.diag(c), + np.r_[np.diag(c, -1), 0], + np.r_[np.diag(c, -2), 0, 0])) + return jac + + def bjac_cols(y, t, c): + return bjac_rows(y, t, c).T.copy(order='C') + + c = array([[-205, 0.01, 0.00, 0.0], + [0.1, -2.50, 0.02, 0.0], + [1e-3, 0.01, -2.0, 0.01], + [0.00, 0.00, 0.1, -1.0]]) + + y0 = np.ones(4) + t = np.array([0, 5, 10, 100]) + + # Use the full Jacobian. + sol1, info1 = odeint(func, y0, t, args=(c,), full_output=True, + atol=1e-13, rtol=1e-11, mxstep=10000, + Dfun=jac) + + # Use the transposed full Jacobian, with col_deriv=True. + sol2, info2 = odeint(func, y0, t, args=(c,), full_output=True, + atol=1e-13, rtol=1e-11, mxstep=10000, + Dfun=jac_transpose, col_deriv=True) + + # Use the banded Jacobian. + sol3, info3 = odeint(func, y0, t, args=(c,), full_output=True, + atol=1e-13, rtol=1e-11, mxstep=10000, + Dfun=bjac_rows, ml=2, mu=1) + + # Use the transposed banded Jacobian, with col_deriv=True. + sol4, info4 = odeint(func, y0, t, args=(c,), full_output=True, + atol=1e-13, rtol=1e-11, mxstep=10000, + Dfun=bjac_cols, ml=2, mu=1, col_deriv=True) + + assert_allclose(sol1, sol2, err_msg="sol1 != sol2") + assert_allclose(sol1, sol3, atol=1e-12, err_msg="sol1 != sol3") + assert_allclose(sol3, sol4, err_msg="sol3 != sol4") + + # Verify that the number of jacobian evaluations was the same for the + # calls of odeint with a full jacobian and with a banded jacobian. This is + # a regression test--there was a bug in the handling of banded jacobians + # that resulted in an incorrect jacobian matrix being passed to the LSODA + # code. That would cause errors or excessive jacobian evaluations. + assert_array_equal(info1['nje'], info2['nje']) + assert_array_equal(info3['nje'], info4['nje']) + + # Test the use of tfirst + sol1ty, info1ty = odeint(lambda t, y, c: func(y, t, c), y0, t, args=(c,), + full_output=True, atol=1e-13, rtol=1e-11, + mxstep=10000, + Dfun=lambda t, y, c: jac(y, t, c), tfirst=True) + # The code should execute the exact same sequence of floating point + # calculations, so these should be exactly equal. We'll be safe and use + # a small tolerance. + assert_allclose(sol1, sol1ty, rtol=1e-12, err_msg="sol1 != sol1ty") + + +def test_odeint_errors(): + def sys1d(x, t): + return -100*x + + def bad1(x, t): + return 1.0/0 + + def bad2(x, t): + return "foo" + + def bad_jac1(x, t): + return 1.0/0 + + def bad_jac2(x, t): + return [["foo"]] + + def sys2d(x, t): + return [-100*x[0], -0.1*x[1]] + + def sys2d_bad_jac(x, t): + return [[1.0/0, 0], [0, -0.1]] + + assert_raises(ZeroDivisionError, odeint, bad1, 1.0, [0, 1]) + assert_raises(ValueError, odeint, bad2, 1.0, [0, 1]) + + assert_raises(ZeroDivisionError, odeint, sys1d, 1.0, [0, 1], Dfun=bad_jac1) + assert_raises(ValueError, odeint, sys1d, 1.0, [0, 1], Dfun=bad_jac2) + + assert_raises(ZeroDivisionError, odeint, sys2d, [1.0, 1.0], [0, 1], + Dfun=sys2d_bad_jac) + + +def test_odeint_bad_shapes(): + # Tests of some errors that can occur with odeint. + + def badrhs(x, t): + return [1, -1] + + def sys1(x, t): + return -100*x + + def badjac(x, t): + return [[0, 0, 0]] + + # y0 must be at most 1-d. + bad_y0 = [[0, 0], [0, 0]] + assert_raises(ValueError, odeint, sys1, bad_y0, [0, 1]) + + # t must be at most 1-d. + bad_t = [[0, 1], [2, 3]] + assert_raises(ValueError, odeint, sys1, [10.0], bad_t) + + # y0 is 10, but badrhs(x, t) returns [1, -1]. + assert_raises(RuntimeError, odeint, badrhs, 10, [0, 1]) + + # shape of array returned by badjac(x, t) is not correct. + assert_raises(RuntimeError, odeint, sys1, [10, 10], [0, 1], Dfun=badjac) + + +def test_repeated_t_values(): + """Regression test for gh-8217.""" + + def func(x, t): + return -0.25*x + + t = np.zeros(10) + sol = odeint(func, [1.], t) + assert_array_equal(sol, np.ones((len(t), 1))) + + tau = 4*np.log(2) + t = [0]*9 + [tau, 2*tau, 2*tau, 3*tau] + sol = odeint(func, [1, 2], t, rtol=1e-12, atol=1e-12) + expected_sol = np.array([[1.0, 2.0]]*9 + + [[0.5, 1.0], + [0.25, 0.5], + [0.25, 0.5], + [0.125, 0.25]]) + assert_allclose(sol, expected_sol) + + # Edge case: empty t sequence. + sol = odeint(func, [1.], []) + assert_array_equal(sol, np.array([], dtype=np.float64).reshape((0, 1))) + + # t values are not monotonic. + assert_raises(ValueError, odeint, func, [1.], [0, 1, 0.5, 0]) + assert_raises(ValueError, odeint, func, [1, 2, 3], [0, -1, -2, 3]) diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/test_integrate.pyc b/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/test_integrate.pyc new file mode 100644 index 0000000..0a54da7 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/test_integrate.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/test_ivp.py b/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/test_ivp.py new file mode 100644 index 0000000..5642c2c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/test_ivp.py @@ -0,0 +1,793 @@ +from __future__ import division, print_function, absolute_import +from itertools import product +from numpy.testing import (assert_, assert_allclose, + assert_equal, assert_no_warnings) +from pytest import raises as assert_raises +from scipy._lib._numpy_compat import suppress_warnings +import numpy as np +from scipy.optimize._numdiff import group_columns +from scipy.integrate import solve_ivp, RK23, RK45, Radau, BDF, LSODA +from scipy.integrate import OdeSolution +from scipy.integrate._ivp.common import num_jac +from scipy.integrate._ivp.base import ConstantDenseOutput +from scipy.sparse import coo_matrix, csc_matrix + + +def fun_linear(t, y): + return np.array([-y[0] - 5 * y[1], y[0] + y[1]]) + + +def jac_linear(): + return np.array([[-1, -5], [1, 1]]) + + +def sol_linear(t): + return np.vstack((-5 * np.sin(2 * t), + 2 * np.cos(2 * t) + np.sin(2 * t))) + + +def fun_rational(t, y): + return np.array([y[1] / t, + y[1] * (y[0] + 2 * y[1] - 1) / (t * (y[0] - 1))]) + + +def fun_rational_vectorized(t, y): + return np.vstack((y[1] / t, + y[1] * (y[0] + 2 * y[1] - 1) / (t * (y[0] - 1)))) + + +def jac_rational(t, y): + return np.array([ + [0, 1 / t], + [-2 * y[1] ** 2 / (t * (y[0] - 1) ** 2), + (y[0] + 4 * y[1] - 1) / (t * (y[0] - 1))] + ]) + + +def jac_rational_sparse(t, y): + return csc_matrix([ + [0, 1 / t], + [-2 * y[1] ** 2 / (t * (y[0] - 1) ** 2), + (y[0] + 4 * y[1] - 1) / (t * (y[0] - 1))] + ]) + + +def sol_rational(t): + return np.asarray((t / (t + 10), 10 * t / (t + 10) ** 2)) + + +def fun_medazko(t, y): + n = y.shape[0] // 2 + k = 100 + c = 4 + + phi = 2 if t <= 5 else 0 + y = np.hstack((phi, 0, y, y[-2])) + + d = 1 / n + j = np.arange(n) + 1 + alpha = 2 * (j * d - 1) ** 3 / c ** 2 + beta = (j * d - 1) ** 4 / c ** 2 + + j_2_p1 = 2 * j + 2 + j_2_m3 = 2 * j - 2 + j_2_m1 = 2 * j + j_2 = 2 * j + 1 + + f = np.empty(2 * n) + f[::2] = (alpha * (y[j_2_p1] - y[j_2_m3]) / (2 * d) + + beta * (y[j_2_m3] - 2 * y[j_2_m1] + y[j_2_p1]) / d ** 2 - + k * y[j_2_m1] * y[j_2]) + f[1::2] = -k * y[j_2] * y[j_2_m1] + + return f + + +def medazko_sparsity(n): + cols = [] + rows = [] + + i = np.arange(n) * 2 + + cols.append(i[1:]) + rows.append(i[1:] - 2) + + cols.append(i) + rows.append(i) + + cols.append(i) + rows.append(i + 1) + + cols.append(i[:-1]) + rows.append(i[:-1] + 2) + + i = np.arange(n) * 2 + 1 + + cols.append(i) + rows.append(i) + + cols.append(i) + rows.append(i - 1) + + cols = np.hstack(cols) + rows = np.hstack(rows) + + return coo_matrix((np.ones_like(cols), (cols, rows))) + + +def fun_complex(t, y): + return -y + + +def jac_complex(t, y): + return -np.eye(y.shape[0]) + + +def jac_complex_sparse(t, y): + return csc_matrix(jac_complex(t, y)) + + +def sol_complex(t): + y = (0.5 + 1j) * np.exp(-t) + return y.reshape((1, -1)) + + +def compute_error(y, y_true, rtol, atol): + e = (y - y_true) / (atol + rtol * np.abs(y_true)) + return np.sqrt(np.sum(np.real(e * e.conj()), axis=0) / e.shape[0]) + + +def test_integration(): + rtol = 1e-3 + atol = 1e-6 + y0 = [1/3, 2/9] + + for vectorized, method, t_span, jac in product( + [False, True], + ['RK23', 'RK45', 'Radau', 'BDF', 'LSODA'], + [[5, 9], [5, 1]], + [None, jac_rational, jac_rational_sparse]): + + if vectorized: + fun = fun_rational_vectorized + else: + fun = fun_rational + + with suppress_warnings() as sup: + sup.filter(UserWarning, + "The following arguments have no effect for a chosen solver: `jac`") + res = solve_ivp(fun, t_span, y0, rtol=rtol, + atol=atol, method=method, dense_output=True, + jac=jac, vectorized=vectorized) + assert_equal(res.t[0], t_span[0]) + assert_(res.t_events is None) + assert_(res.success) + assert_equal(res.status, 0) + + assert_(res.nfev < 40) + + if method in ['RK23', 'RK45', 'LSODA']: + assert_equal(res.njev, 0) + assert_equal(res.nlu, 0) + else: + assert_(0 < res.njev < 3) + assert_(0 < res.nlu < 10) + + y_true = sol_rational(res.t) + e = compute_error(res.y, y_true, rtol, atol) + assert_(np.all(e < 5)) + + tc = np.linspace(*t_span) + yc_true = sol_rational(tc) + yc = res.sol(tc) + + e = compute_error(yc, yc_true, rtol, atol) + assert_(np.all(e < 5)) + + tc = (t_span[0] + t_span[-1]) / 2 + yc_true = sol_rational(tc) + yc = res.sol(tc) + + e = compute_error(yc, yc_true, rtol, atol) + assert_(np.all(e < 5)) + + # LSODA for some reasons doesn't pass the polynomial through the + # previous points exactly after the order change. It might be some + # bug in LSOSA implementation or maybe we missing something. + if method != 'LSODA': + assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15) + + +def test_integration_complex(): + rtol = 1e-3 + atol = 1e-6 + y0 = [0.5 + 1j] + t_span = [0, 1] + tc = np.linspace(t_span[0], t_span[1]) + for method, jac in product(['RK23', 'RK45', 'BDF'], + [None, jac_complex, jac_complex_sparse]): + with suppress_warnings() as sup: + sup.filter(UserWarning, + "The following arguments have no effect for a chosen solver: `jac`") + res = solve_ivp(fun_complex, t_span, y0, method=method, + dense_output=True, rtol=rtol, atol=atol, jac=jac) + + assert_equal(res.t[0], t_span[0]) + assert_(res.t_events is None) + assert_(res.success) + assert_equal(res.status, 0) + + assert_(res.nfev < 25) + if method == 'BDF': + assert_equal(res.njev, 1) + assert_(res.nlu < 6) + else: + assert_equal(res.njev, 0) + assert_equal(res.nlu, 0) + + y_true = sol_complex(res.t) + e = compute_error(res.y, y_true, rtol, atol) + assert_(np.all(e < 5)) + + yc_true = sol_complex(tc) + yc = res.sol(tc) + e = compute_error(yc, yc_true, rtol, atol) + + assert_(np.all(e < 5)) + + +def test_integration_sparse_difference(): + n = 200 + t_span = [0, 20] + y0 = np.zeros(2 * n) + y0[1::2] = 1 + sparsity = medazko_sparsity(n) + + for method in ['BDF', 'Radau']: + res = solve_ivp(fun_medazko, t_span, y0, method=method, + jac_sparsity=sparsity) + + assert_equal(res.t[0], t_span[0]) + assert_(res.t_events is None) + assert_(res.success) + assert_equal(res.status, 0) + + assert_allclose(res.y[78, -1], 0.233994e-3, rtol=1e-2) + assert_allclose(res.y[79, -1], 0, atol=1e-3) + assert_allclose(res.y[148, -1], 0.359561e-3, rtol=1e-2) + assert_allclose(res.y[149, -1], 0, atol=1e-3) + assert_allclose(res.y[198, -1], 0.117374129e-3, rtol=1e-2) + assert_allclose(res.y[199, -1], 0.6190807e-5, atol=1e-3) + assert_allclose(res.y[238, -1], 0, atol=1e-3) + assert_allclose(res.y[239, -1], 0.9999997, rtol=1e-2) + + +def test_integration_const_jac(): + rtol = 1e-3 + atol = 1e-6 + y0 = [0, 2] + t_span = [0, 2] + J = jac_linear() + J_sparse = csc_matrix(J) + + for method, jac in product(['Radau', 'BDF'], [J, J_sparse]): + res = solve_ivp(fun_linear, t_span, y0, rtol=rtol, atol=atol, + method=method, dense_output=True, jac=jac) + assert_equal(res.t[0], t_span[0]) + assert_(res.t_events is None) + assert_(res.success) + assert_equal(res.status, 0) + + assert_(res.nfev < 100) + assert_equal(res.njev, 0) + assert_(0 < res.nlu < 15) + + y_true = sol_linear(res.t) + e = compute_error(res.y, y_true, rtol, atol) + assert_(np.all(e < 10)) + + tc = np.linspace(*t_span) + yc_true = sol_linear(tc) + yc = res.sol(tc) + + e = compute_error(yc, yc_true, rtol, atol) + assert_(np.all(e < 15)) + + assert_allclose(res.sol(res.t), res.y, rtol=1e-14, atol=1e-14) + + +def test_events(): + def event_rational_1(t, y): + return y[0] - y[1] ** 0.7 + + def event_rational_2(t, y): + return y[1] ** 0.6 - y[0] + + def event_rational_3(t, y): + return t - 7.4 + + event_rational_3.terminal = True + + for method in ['RK23', 'RK45', 'Radau', 'BDF', 'LSODA']: + res = solve_ivp(fun_rational, [5, 8], [1/3, 2/9], method=method, + events=(event_rational_1, event_rational_2)) + assert_equal(res.status, 0) + assert_equal(res.t_events[0].size, 1) + assert_equal(res.t_events[1].size, 1) + assert_(5.3 < res.t_events[0][0] < 5.7) + assert_(7.3 < res.t_events[1][0] < 7.7) + + event_rational_1.direction = 1 + event_rational_2.direction = 1 + res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method, + events=(event_rational_1, event_rational_2)) + assert_equal(res.status, 0) + assert_equal(res.t_events[0].size, 1) + assert_equal(res.t_events[1].size, 0) + assert_(5.3 < res.t_events[0][0] < 5.7) + + event_rational_1.direction = -1 + event_rational_2.direction = -1 + res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method, + events=(event_rational_1, event_rational_2)) + assert_equal(res.status, 0) + assert_equal(res.t_events[0].size, 0) + assert_equal(res.t_events[1].size, 1) + assert_(7.3 < res.t_events[1][0] < 7.7) + + event_rational_1.direction = 0 + event_rational_2.direction = 0 + + res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method, + events=(event_rational_1, event_rational_2, + event_rational_3), dense_output=True) + assert_equal(res.status, 1) + assert_equal(res.t_events[0].size, 1) + assert_equal(res.t_events[1].size, 0) + assert_equal(res.t_events[2].size, 1) + assert_(5.3 < res.t_events[0][0] < 5.7) + assert_(7.3 < res.t_events[2][0] < 7.5) + + res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method, + events=event_rational_1, dense_output=True) + assert_equal(res.status, 0) + assert_equal(res.t_events[0].size, 1) + assert_(5.3 < res.t_events[0][0] < 5.7) + + # Also test that termination by event doesn't break interpolants. + tc = np.linspace(res.t[0], res.t[-1]) + yc_true = sol_rational(tc) + yc = res.sol(tc) + e = compute_error(yc, yc_true, 1e-3, 1e-6) + assert_(np.all(e < 5)) + + # Test in backward direction. + event_rational_1.direction = 0 + event_rational_2.direction = 0 + for method in ['RK23', 'RK45', 'Radau', 'BDF', 'LSODA']: + res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method, + events=(event_rational_1, event_rational_2)) + assert_equal(res.status, 0) + assert_equal(res.t_events[0].size, 1) + assert_equal(res.t_events[1].size, 1) + assert_(5.3 < res.t_events[0][0] < 5.7) + assert_(7.3 < res.t_events[1][0] < 7.7) + + event_rational_1.direction = -1 + event_rational_2.direction = -1 + res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method, + events=(event_rational_1, event_rational_2)) + assert_equal(res.status, 0) + assert_equal(res.t_events[0].size, 1) + assert_equal(res.t_events[1].size, 0) + assert_(5.3 < res.t_events[0][0] < 5.7) + + event_rational_1.direction = 1 + event_rational_2.direction = 1 + res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method, + events=(event_rational_1, event_rational_2)) + assert_equal(res.status, 0) + assert_equal(res.t_events[0].size, 0) + assert_equal(res.t_events[1].size, 1) + assert_(7.3 < res.t_events[1][0] < 7.7) + + event_rational_1.direction = 0 + event_rational_2.direction = 0 + + res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method, + events=(event_rational_1, event_rational_2, + event_rational_3), dense_output=True) + assert_equal(res.status, 1) + assert_equal(res.t_events[0].size, 0) + assert_equal(res.t_events[1].size, 1) + assert_equal(res.t_events[2].size, 1) + assert_(7.3 < res.t_events[1][0] < 7.7) + assert_(7.3 < res.t_events[2][0] < 7.5) + + # Also test that termination by event doesn't break interpolants. + tc = np.linspace(res.t[-1], res.t[0]) + yc_true = sol_rational(tc) + yc = res.sol(tc) + e = compute_error(yc, yc_true, 1e-3, 1e-6) + assert_(np.all(e < 5)) + + +def test_max_step(): + rtol = 1e-3 + atol = 1e-6 + y0 = [1/3, 2/9] + for method in [RK23, RK45, Radau, BDF, LSODA]: + for t_span in ([5, 9], [5, 1]): + res = solve_ivp(fun_rational, t_span, y0, rtol=rtol, + max_step=0.5, atol=atol, method=method, + dense_output=True) + assert_equal(res.t[0], t_span[0]) + assert_equal(res.t[-1], t_span[-1]) + assert_(np.all(np.abs(np.diff(res.t)) <= 0.5)) + assert_(res.t_events is None) + assert_(res.success) + assert_equal(res.status, 0) + + y_true = sol_rational(res.t) + e = compute_error(res.y, y_true, rtol, atol) + assert_(np.all(e < 5)) + + tc = np.linspace(*t_span) + yc_true = sol_rational(tc) + yc = res.sol(tc) + + e = compute_error(yc, yc_true, rtol, atol) + assert_(np.all(e < 5)) + + # See comment in test_integration. + if method is not LSODA: + assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15) + + assert_raises(ValueError, method, fun_rational, t_span[0], y0, + t_span[1], max_step=-1) + + if method is not LSODA: + solver = method(fun_rational, t_span[0], y0, t_span[1], + rtol=rtol, atol=atol, max_step=1e-20) + message = solver.step() + + assert_equal(solver.status, 'failed') + assert_("step size is less" in message) + assert_raises(RuntimeError, solver.step) + + +def test_first_step(): + rtol = 1e-3 + atol = 1e-6 + y0 = [1/3, 2/9] + first_step = 0.1 + for method in [RK23, RK45, Radau, BDF, LSODA]: + for t_span in ([5, 9], [5, 1]): + res = solve_ivp(fun_rational, t_span, y0, rtol=rtol, + max_step=0.5, atol=atol, method=method, + dense_output=True, first_step=first_step) + + assert_equal(res.t[0], t_span[0]) + assert_equal(res.t[-1], t_span[-1]) + assert_allclose(first_step, np.abs(res.t[1] - 5)) + assert_(res.t_events is None) + assert_(res.success) + assert_equal(res.status, 0) + + y_true = sol_rational(res.t) + e = compute_error(res.y, y_true, rtol, atol) + assert_(np.all(e < 5)) + + tc = np.linspace(*t_span) + yc_true = sol_rational(tc) + yc = res.sol(tc) + + e = compute_error(yc, yc_true, rtol, atol) + assert_(np.all(e < 5)) + + # See comment in test_integration. + if method is not LSODA: + assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15) + + assert_raises(ValueError, method, fun_rational, t_span[0], y0, + t_span[1], first_step=-1) + assert_raises(ValueError, method, fun_rational, t_span[0], y0, + t_span[1], first_step=5) + + +def test_t_eval(): + rtol = 1e-3 + atol = 1e-6 + y0 = [1/3, 2/9] + for t_span in ([5, 9], [5, 1]): + t_eval = np.linspace(t_span[0], t_span[1], 10) + res = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol, + t_eval=t_eval) + assert_equal(res.t, t_eval) + assert_(res.t_events is None) + assert_(res.success) + assert_equal(res.status, 0) + + y_true = sol_rational(res.t) + e = compute_error(res.y, y_true, rtol, atol) + assert_(np.all(e < 5)) + + t_eval = [5, 5.01, 7, 8, 8.01, 9] + res = solve_ivp(fun_rational, [5, 9], y0, rtol=rtol, atol=atol, + t_eval=t_eval) + assert_equal(res.t, t_eval) + assert_(res.t_events is None) + assert_(res.success) + assert_equal(res.status, 0) + + y_true = sol_rational(res.t) + e = compute_error(res.y, y_true, rtol, atol) + assert_(np.all(e < 5)) + + t_eval = [5, 4.99, 3, 1.5, 1.1, 1.01, 1] + res = solve_ivp(fun_rational, [5, 1], y0, rtol=rtol, atol=atol, + t_eval=t_eval) + assert_equal(res.t, t_eval) + assert_(res.t_events is None) + assert_(res.success) + assert_equal(res.status, 0) + + t_eval = [5.01, 7, 8, 8.01] + res = solve_ivp(fun_rational, [5, 9], y0, rtol=rtol, atol=atol, + t_eval=t_eval) + assert_equal(res.t, t_eval) + assert_(res.t_events is None) + assert_(res.success) + assert_equal(res.status, 0) + + y_true = sol_rational(res.t) + e = compute_error(res.y, y_true, rtol, atol) + assert_(np.all(e < 5)) + + t_eval = [4.99, 3, 1.5, 1.1, 1.01] + res = solve_ivp(fun_rational, [5, 1], y0, rtol=rtol, atol=atol, + t_eval=t_eval) + assert_equal(res.t, t_eval) + assert_(res.t_events is None) + assert_(res.success) + assert_equal(res.status, 0) + + t_eval = [4, 6] + assert_raises(ValueError, solve_ivp, fun_rational, [5, 9], y0, + rtol=rtol, atol=atol, t_eval=t_eval) + + +def test_t_eval_dense_output(): + rtol = 1e-3 + atol = 1e-6 + y0 = [1/3, 2/9] + t_span = [5, 9] + t_eval = np.linspace(t_span[0], t_span[1], 10) + res = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol, + t_eval=t_eval) + res_d = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol, + t_eval=t_eval, dense_output=True) + assert_equal(res.t, t_eval) + assert_(res.t_events is None) + assert_(res.success) + assert_equal(res.status, 0) + + assert_equal(res.t, res_d.t) + assert_equal(res.y, res_d.y) + assert_(res_d.t_events is None) + assert_(res_d.success) + assert_equal(res_d.status, 0) + + # if t and y are equal only test values for one case + y_true = sol_rational(res.t) + e = compute_error(res.y, y_true, rtol, atol) + assert_(np.all(e < 5)) + + +def test_no_integration(): + for method in ['RK23', 'RK45', 'Radau', 'BDF', 'LSODA']: + sol = solve_ivp(lambda t, y: -y, [4, 4], [2, 3], + method=method, dense_output=True) + assert_equal(sol.sol(4), [2, 3]) + assert_equal(sol.sol([4, 5, 6]), [[2, 2, 2], [3, 3, 3]]) + + +def test_no_integration_class(): + for method in [RK23, RK45, Radau, BDF, LSODA]: + solver = method(lambda t, y: -y, 0.0, [10.0, 0.0], 0.0) + solver.step() + assert_equal(solver.status, 'finished') + sol = solver.dense_output() + assert_equal(sol(0.0), [10.0, 0.0]) + assert_equal(sol([0, 1, 2]), [[10, 10, 10], [0, 0, 0]]) + + solver = method(lambda t, y: -y, 0.0, [], np.inf) + solver.step() + assert_equal(solver.status, 'finished') + sol = solver.dense_output() + assert_equal(sol(100.0), []) + assert_equal(sol([0, 1, 2]), np.empty((0, 3))) + + +def test_empty(): + def fun(t, y): + return np.zeros((0,)) + + y0 = np.zeros((0,)) + + for method in ['RK23', 'RK45', 'Radau', 'BDF', 'LSODA']: + sol = assert_no_warnings(solve_ivp, fun, [0, 10], y0, + method=method, dense_output=True) + assert_equal(sol.sol(10), np.zeros((0,))) + assert_equal(sol.sol([1, 2, 3]), np.zeros((0, 3))) + + for method in ['RK23', 'RK45', 'Radau', 'BDF', 'LSODA']: + sol = assert_no_warnings(solve_ivp, fun, [0, np.inf], y0, + method=method, dense_output=True) + assert_equal(sol.sol(10), np.zeros((0,))) + assert_equal(sol.sol([1, 2, 3]), np.zeros((0, 3))) + + +def test_ConstantDenseOutput(): + sol = ConstantDenseOutput(0, 1, np.array([1, 2])) + assert_allclose(sol(1.5), [1, 2]) + assert_allclose(sol([1, 1.5, 2]), [[1, 1, 1], [2, 2, 2]]) + + sol = ConstantDenseOutput(0, 1, np.array([])) + assert_allclose(sol(1.5), np.empty(0)) + assert_allclose(sol([1, 1.5, 2]), np.empty((0, 3))) + + +def test_classes(): + y0 = [1 / 3, 2 / 9] + for cls in [RK23, RK45, Radau, BDF, LSODA]: + solver = cls(fun_rational, 5, y0, np.inf) + assert_equal(solver.n, 2) + assert_equal(solver.status, 'running') + assert_equal(solver.t_bound, np.inf) + assert_equal(solver.direction, 1) + assert_equal(solver.t, 5) + assert_equal(solver.y, y0) + assert_(solver.step_size is None) + if cls is not LSODA: + assert_(solver.nfev > 0) + assert_(solver.njev >= 0) + assert_equal(solver.nlu, 0) + else: + assert_equal(solver.nfev, 0) + assert_equal(solver.njev, 0) + assert_equal(solver.nlu, 0) + + assert_raises(RuntimeError, solver.dense_output) + + message = solver.step() + assert_equal(solver.status, 'running') + assert_equal(message, None) + assert_equal(solver.n, 2) + assert_equal(solver.t_bound, np.inf) + assert_equal(solver.direction, 1) + assert_(solver.t > 5) + assert_(not np.all(np.equal(solver.y, y0))) + assert_(solver.step_size > 0) + assert_(solver.nfev > 0) + assert_(solver.njev >= 0) + assert_(solver.nlu >= 0) + sol = solver.dense_output() + assert_allclose(sol(5), y0, rtol=1e-15, atol=0) + + +def test_OdeSolution(): + ts = np.array([0, 2, 5], dtype=float) + s1 = ConstantDenseOutput(ts[0], ts[1], np.array([-1])) + s2 = ConstantDenseOutput(ts[1], ts[2], np.array([1])) + + sol = OdeSolution(ts, [s1, s2]) + + assert_equal(sol(-1), [-1]) + assert_equal(sol(1), [-1]) + assert_equal(sol(2), [-1]) + assert_equal(sol(3), [1]) + assert_equal(sol(5), [1]) + assert_equal(sol(6), [1]) + + assert_equal(sol([0, 6, -2, 1.5, 4.5, 2.5, 5, 5.5, 2]), + np.array([[-1, 1, -1, -1, 1, 1, 1, 1, -1]])) + + ts = np.array([10, 4, -3]) + s1 = ConstantDenseOutput(ts[0], ts[1], np.array([-1])) + s2 = ConstantDenseOutput(ts[1], ts[2], np.array([1])) + + sol = OdeSolution(ts, [s1, s2]) + assert_equal(sol(11), [-1]) + assert_equal(sol(10), [-1]) + assert_equal(sol(5), [-1]) + assert_equal(sol(4), [-1]) + assert_equal(sol(0), [1]) + assert_equal(sol(-3), [1]) + assert_equal(sol(-4), [1]) + + assert_equal(sol([12, -5, 10, -3, 6, 1, 4]), + np.array([[-1, 1, -1, 1, -1, 1, -1]])) + + ts = np.array([1, 1]) + s = ConstantDenseOutput(1, 1, np.array([10])) + sol = OdeSolution(ts, [s]) + assert_equal(sol(0), [10]) + assert_equal(sol(1), [10]) + assert_equal(sol(2), [10]) + + assert_equal(sol([2, 1, 0]), np.array([[10, 10, 10]])) + + +def test_num_jac(): + def fun(t, y): + return np.vstack([ + -0.04 * y[0] + 1e4 * y[1] * y[2], + 0.04 * y[0] - 1e4 * y[1] * y[2] - 3e7 * y[1] ** 2, + 3e7 * y[1] ** 2 + ]) + + def jac(t, y): + return np.array([ + [-0.04, 1e4 * y[2], 1e4 * y[1]], + [0.04, -1e4 * y[2] - 6e7 * y[1], -1e4 * y[1]], + [0, 6e7 * y[1], 0] + ]) + + t = 1 + y = np.array([1, 0, 0]) + J_true = jac(t, y) + threshold = 1e-5 + f = fun(t, y).ravel() + + J_num, factor = num_jac(fun, t, y, f, threshold, None) + assert_allclose(J_num, J_true, rtol=1e-5, atol=1e-5) + + J_num, factor = num_jac(fun, t, y, f, threshold, factor) + assert_allclose(J_num, J_true, rtol=1e-5, atol=1e-5) + + +def test_num_jac_sparse(): + def fun(t, y): + e = y[1:]**3 - y[:-1]**2 + z = np.zeros(y.shape[1]) + return np.vstack((z, 3 * e)) + np.vstack((2 * e, z)) + + def structure(n): + A = np.zeros((n, n), dtype=int) + A[0, 0] = 1 + A[0, 1] = 1 + for i in range(1, n - 1): + A[i, i - 1: i + 2] = 1 + A[-1, -1] = 1 + A[-1, -2] = 1 + + return A + + np.random.seed(0) + n = 20 + y = np.random.randn(n) + A = structure(n) + groups = group_columns(A) + + f = fun(0, y[:, None]).ravel() + + # Compare dense and sparse results, assuming that dense implementation + # is correct (as it is straightforward). + J_num_sparse, factor_sparse = num_jac(fun, 0, y.ravel(), f, 1e-8, None, + sparsity=(A, groups)) + J_num_dense, factor_dense = num_jac(fun, 0, y.ravel(), f, 1e-8, None) + assert_allclose(J_num_dense, J_num_sparse.toarray(), + rtol=1e-12, atol=1e-14) + assert_allclose(factor_dense, factor_sparse, rtol=1e-12, atol=1e-14) + + # Take small factors to trigger their recomputing inside. + factor = np.random.uniform(0, 1e-12, size=n) + J_num_sparse, factor_sparse = num_jac(fun, 0, y.ravel(), f, 1e-8, factor, + sparsity=(A, groups)) + J_num_dense, factor_dense = num_jac(fun, 0, y.ravel(), f, 1e-8, factor) + + assert_allclose(J_num_dense, J_num_sparse.toarray(), + rtol=1e-12, atol=1e-14) + assert_allclose(factor_dense, factor_sparse, rtol=1e-12, atol=1e-14) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/test_ivp.pyc b/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/test_ivp.pyc new file mode 100644 index 0000000..938fae3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/test_ivp.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/test_odeint_jac.py b/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/test_odeint_jac.py new file mode 100644 index 0000000..9aff613 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/test_odeint_jac.py @@ -0,0 +1,75 @@ + +import numpy as np +from numpy.testing import assert_equal, assert_allclose +from scipy.integrate import odeint +import scipy.integrate._test_odeint_banded as banded5x5 + + +def rhs(y, t): + dydt = np.zeros_like(y) + banded5x5.banded5x5(t, y, dydt) + return dydt + + +def jac(y, t): + n = len(y) + jac = np.zeros((n, n), order='F') + banded5x5.banded5x5_jac(t, y, 1, 1, jac) + return jac + + +def bjac(y, t): + n = len(y) + bjac = np.zeros((4, n), order='F') + banded5x5.banded5x5_bjac(t, y, 1, 1, bjac) + return bjac + + +JACTYPE_FULL = 1 +JACTYPE_BANDED = 4 + + +def check_odeint(jactype): + if jactype == JACTYPE_FULL: + ml = None + mu = None + jacobian = jac + elif jactype == JACTYPE_BANDED: + ml = 2 + mu = 1 + jacobian = bjac + else: + raise ValueError("invalid jactype: %r" % (jactype,)) + + y0 = np.arange(1.0, 6.0) + # These tolerances must match the tolerances used in banded5x5.f. + rtol = 1e-11 + atol = 1e-13 + dt = 0.125 + nsteps = 64 + t = dt * np.arange(nsteps+1) + + sol, info = odeint(rhs, y0, t, + Dfun=jacobian, ml=ml, mu=mu, + atol=atol, rtol=rtol, full_output=True) + yfinal = sol[-1] + odeint_nst = info['nst'][-1] + odeint_nfe = info['nfe'][-1] + odeint_nje = info['nje'][-1] + + y1 = y0.copy() + # Pure Fortran solution. y1 is modified in-place. + nst, nfe, nje = banded5x5.banded5x5_solve(y1, nsteps, dt, jactype) + + # It is likely that yfinal and y1 are *exactly* the same, but + # we'll be cautious and use assert_allclose. + assert_allclose(yfinal, y1, rtol=1e-12) + assert_equal((odeint_nst, odeint_nfe, odeint_nje), (nst, nfe, nje)) + + +def test_odeint_full_jac(): + check_odeint(JACTYPE_FULL) + + +def test_odeint_banded_jac(): + check_odeint(JACTYPE_BANDED) diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/test_odeint_jac.pyc b/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/test_odeint_jac.pyc new file mode 100644 index 0000000..03d0f5d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/test_odeint_jac.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/test_quadpack.py b/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/test_quadpack.py new file mode 100644 index 0000000..3417adf --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/test_quadpack.py @@ -0,0 +1,418 @@ +from __future__ import division, print_function, absolute_import + +import sys +import math +import numpy as np +from numpy import sqrt, cos, sin, arctan, exp, log, pi, Inf +from numpy.testing import (assert_, + assert_allclose, assert_array_less, assert_almost_equal) +import pytest +from pytest import raises as assert_raises + +from scipy.integrate import quad, dblquad, tplquad, nquad +from scipy._lib.six import xrange +from scipy._lib._ccallback import LowLevelCallable + +import ctypes +import ctypes.util +from scipy._lib._ccallback_c import sine_ctypes + +import scipy.integrate._test_multivariate as clib_test + + +def assert_quad(value_and_err, tabled_value, errTol=1.5e-8): + value, err = value_and_err + assert_allclose(value, tabled_value, atol=err, rtol=0) + if errTol is not None: + assert_array_less(err, errTol) + + +def get_clib_test_routine(name, restype, *argtypes): + ptr = getattr(clib_test, name) + return ctypes.cast(ptr, ctypes.CFUNCTYPE(restype, *argtypes)) + + +class TestCtypesQuad(object): + def setup_method(self): + if sys.platform == 'win32': + if sys.version_info < (3, 5): + files = [ctypes.util.find_msvcrt()] + else: + files = ['api-ms-win-crt-math-l1-1-0.dll'] + elif sys.platform == 'darwin': + files = ['libm.dylib'] + else: + files = ['libm.so', 'libm.so.6'] + + for file in files: + try: + self.lib = ctypes.CDLL(file) + break + except OSError: + pass + else: + # This test doesn't work on some Linux platforms (Fedora for + # example) that put an ld script in libm.so - see gh-5370 + self.skipTest("Ctypes can't import libm.so") + + restype = ctypes.c_double + argtypes = (ctypes.c_double,) + for name in ['sin', 'cos', 'tan']: + func = getattr(self.lib, name) + func.restype = restype + func.argtypes = argtypes + + def test_typical(self): + assert_quad(quad(self.lib.sin, 0, 5), quad(math.sin, 0, 5)[0]) + assert_quad(quad(self.lib.cos, 0, 5), quad(math.cos, 0, 5)[0]) + assert_quad(quad(self.lib.tan, 0, 1), quad(math.tan, 0, 1)[0]) + + def test_ctypes_sine(self): + quad(LowLevelCallable(sine_ctypes), 0, 1) + + def test_ctypes_variants(self): + sin_0 = get_clib_test_routine('_sin_0', ctypes.c_double, + ctypes.c_double, ctypes.c_void_p) + + sin_1 = get_clib_test_routine('_sin_1', ctypes.c_double, + ctypes.c_int, ctypes.POINTER(ctypes.c_double), + ctypes.c_void_p) + + sin_2 = get_clib_test_routine('_sin_2', ctypes.c_double, + ctypes.c_double) + + sin_3 = get_clib_test_routine('_sin_3', ctypes.c_double, + ctypes.c_int, ctypes.POINTER(ctypes.c_double)) + + sin_4 = get_clib_test_routine('_sin_3', ctypes.c_double, + ctypes.c_int, ctypes.c_double) + + all_sigs = [sin_0, sin_1, sin_2, sin_3, sin_4] + legacy_sigs = [sin_2, sin_4] + legacy_only_sigs = [sin_4] + + # LowLevelCallables work for new signatures + for j, func in enumerate(all_sigs): + callback = LowLevelCallable(func) + if func in legacy_only_sigs: + assert_raises(ValueError, quad, callback, 0, pi) + else: + assert_allclose(quad(callback, 0, pi)[0], 2.0) + + # Plain ctypes items work only for legacy signatures + for j, func in enumerate(legacy_sigs): + if func in legacy_sigs: + assert_allclose(quad(func, 0, pi)[0], 2.0) + else: + assert_raises(ValueError, quad, func, 0, pi) + + +class TestMultivariateCtypesQuad(object): + def setup_method(self): + restype = ctypes.c_double + argtypes = (ctypes.c_int, ctypes.c_double) + for name in ['_multivariate_typical', '_multivariate_indefinite', + '_multivariate_sin']: + func = get_clib_test_routine(name, restype, *argtypes) + setattr(self, name, func) + + def test_typical(self): + # 1) Typical function with two extra arguments: + assert_quad(quad(self._multivariate_typical, 0, pi, (2, 1.8)), + 0.30614353532540296487) + + def test_indefinite(self): + # 2) Infinite integration limits --- Euler's constant + assert_quad(quad(self._multivariate_indefinite, 0, Inf), + 0.577215664901532860606512) + + def test_threadsafety(self): + # Ensure multivariate ctypes are threadsafe + def threadsafety(y): + return y + quad(self._multivariate_sin, 0, 1)[0] + assert_quad(quad(threadsafety, 0, 1), 0.9596976941318602) + + +class TestQuad(object): + def test_typical(self): + # 1) Typical function with two extra arguments: + def myfunc(x, n, z): # Bessel function integrand + return cos(n*x-z*sin(x))/pi + assert_quad(quad(myfunc, 0, pi, (2, 1.8)), 0.30614353532540296487) + + def test_indefinite(self): + # 2) Infinite integration limits --- Euler's constant + def myfunc(x): # Euler's constant integrand + return -exp(-x)*log(x) + assert_quad(quad(myfunc, 0, Inf), 0.577215664901532860606512) + + def test_singular(self): + # 3) Singular points in region of integration. + def myfunc(x): + if 0 < x < 2.5: + return sin(x) + elif 2.5 <= x <= 5.0: + return exp(-x) + else: + return 0.0 + + assert_quad(quad(myfunc, 0, 10, points=[2.5, 5.0]), + 1 - cos(2.5) + exp(-2.5) - exp(-5.0)) + + def test_sine_weighted_finite(self): + # 4) Sine weighted integral (finite limits) + def myfunc(x, a): + return exp(a*(x-1)) + + ome = 2.0**3.4 + assert_quad(quad(myfunc, 0, 1, args=20, weight='sin', wvar=ome), + (20*sin(ome)-ome*cos(ome)+ome*exp(-20))/(20**2 + ome**2)) + + def test_sine_weighted_infinite(self): + # 5) Sine weighted integral (infinite limits) + def myfunc(x, a): + return exp(-x*a) + + a = 4.0 + ome = 3.0 + assert_quad(quad(myfunc, 0, Inf, args=a, weight='sin', wvar=ome), + ome/(a**2 + ome**2)) + + def test_cosine_weighted_infinite(self): + # 6) Cosine weighted integral (negative infinite limits) + def myfunc(x, a): + return exp(x*a) + + a = 2.5 + ome = 2.3 + assert_quad(quad(myfunc, -Inf, 0, args=a, weight='cos', wvar=ome), + a/(a**2 + ome**2)) + + def test_algebraic_log_weight(self): + # 6) Algebraic-logarithmic weight. + def myfunc(x, a): + return 1/(1+x+2**(-a)) + + a = 1.5 + assert_quad(quad(myfunc, -1, 1, args=a, weight='alg', + wvar=(-0.5, -0.5)), + pi/sqrt((1+2**(-a))**2 - 1)) + + def test_cauchypv_weight(self): + # 7) Cauchy prinicpal value weighting w(x) = 1/(x-c) + def myfunc(x, a): + return 2.0**(-a)/((x-1)**2+4.0**(-a)) + + a = 0.4 + tabledValue = ((2.0**(-0.4)*log(1.5) - + 2.0**(-1.4)*log((4.0**(-a)+16) / (4.0**(-a)+1)) - + arctan(2.0**(a+2)) - + arctan(2.0**a)) / + (4.0**(-a) + 1)) + assert_quad(quad(myfunc, 0, 5, args=0.4, weight='cauchy', wvar=2.0), + tabledValue, errTol=1.9e-8) + + def test_b_less_than_a(self): + def f(x, p, q): + return p * np.exp(-q*x) + + val_1, err_1 = quad(f, 0, np.inf, args=(2, 3)) + val_2, err_2 = quad(f, np.inf, 0, args=(2, 3)) + assert_allclose(val_1, -val_2, atol=max(err_1, err_2)) + + def test_b_less_than_a_2(self): + def f(x, s): + return np.exp(-x**2 / 2 / s) / np.sqrt(2.*s) + + val_1, err_1 = quad(f, -np.inf, np.inf, args=(2,)) + val_2, err_2 = quad(f, np.inf, -np.inf, args=(2,)) + assert_allclose(val_1, -val_2, atol=max(err_1, err_2)) + + def test_b_less_than_a_3(self): + def f(x): + return 1.0 + + val_1, err_1 = quad(f, 0, 1, weight='alg', wvar=(0, 0)) + val_2, err_2 = quad(f, 1, 0, weight='alg', wvar=(0, 0)) + assert_allclose(val_1, -val_2, atol=max(err_1, err_2)) + + def test_b_less_than_a_full_output(self): + def f(x): + return 1.0 + + res_1 = quad(f, 0, 1, weight='alg', wvar=(0, 0), full_output=True) + res_2 = quad(f, 1, 0, weight='alg', wvar=(0, 0), full_output=True) + err = max(res_1[1], res_2[1]) + assert_allclose(res_1[0], -res_2[0], atol=err) + + def test_double_integral(self): + # 8) Double Integral test + def simpfunc(y, x): # Note order of arguments. + return x+y + + a, b = 1.0, 2.0 + assert_quad(dblquad(simpfunc, a, b, lambda x: x, lambda x: 2*x), + 5/6.0 * (b**3.0-a**3.0)) + + def test_double_integral2(self): + def func(x0, x1, t0, t1): + return x0 + x1 + t0 + t1 + g = lambda x: x + h = lambda x: 2 * x + args = 1, 2 + assert_quad(dblquad(func, 1, 2, g, h, args=args),35./6 + 9*.5) + + def test_double_integral3(self): + def func(x0, x1): + return x0 + x1 + 1 + 2 + assert_quad(dblquad(func, 1, 2, 1, 2),6.) + + def test_triple_integral(self): + # 9) Triple Integral test + def simpfunc(z, y, x, t): # Note order of arguments. + return (x+y+z)*t + + a, b = 1.0, 2.0 + assert_quad(tplquad(simpfunc, a, b, + lambda x: x, lambda x: 2*x, + lambda x, y: x - y, lambda x, y: x + y, + (2.,)), + 2*8/3.0 * (b**4.0 - a**4.0)) + + +class TestNQuad(object): + def test_fixed_limits(self): + def func1(x0, x1, x2, x3): + val = (x0**2 + x1*x2 - x3**3 + np.sin(x0) + + (1 if (x0 - 0.2*x3 - 0.5 - 0.25*x1 > 0) else 0)) + return val + + def opts_basic(*args): + return {'points': [0.2*args[2] + 0.5 + 0.25*args[0]]} + + res = nquad(func1, [[0, 1], [-1, 1], [.13, .8], [-.15, 1]], + opts=[opts_basic, {}, {}, {}], full_output=True) + assert_quad(res[:-1], 1.5267454070738635) + assert_(res[-1]['neval'] > 0 and res[-1]['neval'] < 4e5) + + def test_variable_limits(self): + scale = .1 + + def func2(x0, x1, x2, x3, t0, t1): + val = (x0*x1*x3**2 + np.sin(x2) + 1 + + (1 if x0 + t1*x1 - t0 > 0 else 0)) + return val + + def lim0(x1, x2, x3, t0, t1): + return [scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) - 1, + scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) + 1] + + def lim1(x2, x3, t0, t1): + return [scale * (t0*x2 + t1*x3) - 1, + scale * (t0*x2 + t1*x3) + 1] + + def lim2(x3, t0, t1): + return [scale * (x3 + t0**2*t1**3) - 1, + scale * (x3 + t0**2*t1**3) + 1] + + def lim3(t0, t1): + return [scale * (t0 + t1) - 1, scale * (t0 + t1) + 1] + + def opts0(x1, x2, x3, t0, t1): + return {'points': [t0 - t1*x1]} + + def opts1(x2, x3, t0, t1): + return {} + + def opts2(x3, t0, t1): + return {} + + def opts3(t0, t1): + return {} + + res = nquad(func2, [lim0, lim1, lim2, lim3], args=(0, 0), + opts=[opts0, opts1, opts2, opts3]) + assert_quad(res, 25.066666666666663) + + def test_square_separate_ranges_and_opts(self): + def f(y, x): + return 1.0 + + assert_quad(nquad(f, [[-1, 1], [-1, 1]], opts=[{}, {}]), 4.0) + + def test_square_aliased_ranges_and_opts(self): + def f(y, x): + return 1.0 + + r = [-1, 1] + opt = {} + assert_quad(nquad(f, [r, r], opts=[opt, opt]), 4.0) + + def test_square_separate_fn_ranges_and_opts(self): + def f(y, x): + return 1.0 + + def fn_range0(*args): + return (-1, 1) + + def fn_range1(*args): + return (-1, 1) + + def fn_opt0(*args): + return {} + + def fn_opt1(*args): + return {} + + ranges = [fn_range0, fn_range1] + opts = [fn_opt0, fn_opt1] + assert_quad(nquad(f, ranges, opts=opts), 4.0) + + def test_square_aliased_fn_ranges_and_opts(self): + def f(y, x): + return 1.0 + + def fn_range(*args): + return (-1, 1) + + def fn_opt(*args): + return {} + + ranges = [fn_range, fn_range] + opts = [fn_opt, fn_opt] + assert_quad(nquad(f, ranges, opts=opts), 4.0) + + def test_matching_quad(self): + def func(x): + return x**2 + 1 + + res, reserr = quad(func, 0, 4) + res2, reserr2 = nquad(func, ranges=[[0, 4]]) + assert_almost_equal(res, res2) + assert_almost_equal(reserr, reserr2) + + def test_matching_dblquad(self): + def func2d(x0, x1): + return x0**2 + x1**3 - x0 * x1 + 1 + + res, reserr = dblquad(func2d, -2, 2, lambda x: -3, lambda x: 3) + res2, reserr2 = nquad(func2d, [[-3, 3], (-2, 2)]) + assert_almost_equal(res, res2) + assert_almost_equal(reserr, reserr2) + + def test_matching_tplquad(self): + def func3d(x0, x1, x2, c0, c1): + return x0**2 + c0 * x1**3 - x0 * x1 + 1 + c1 * np.sin(x2) + + res = tplquad(func3d, -1, 2, lambda x: -2, lambda x: 2, + lambda x, y: -np.pi, lambda x, y: np.pi, + args=(2, 3)) + res2 = nquad(func3d, [[-np.pi, np.pi], [-2, 2], (-1, 2)], args=(2, 3)) + assert_almost_equal(res, res2) + + def test_dict_as_opts(self): + try: + out = nquad(lambda x, y: x * y, [[0, 1], [0, 1]], opts={'epsrel': 0.0001}) + except(TypeError): + assert False + diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/test_quadpack.pyc b/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/test_quadpack.pyc new file mode 100644 index 0000000..14489b8 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/test_quadpack.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/test_quadrature.py b/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/test_quadrature.py new file mode 100644 index 0000000..9e5f865 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/test_quadrature.py @@ -0,0 +1,233 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy import cos, sin, pi +from numpy.testing import assert_equal, \ + assert_almost_equal, assert_allclose, assert_ +from scipy._lib._numpy_compat import suppress_warnings + +from scipy.integrate import (quadrature, romberg, romb, newton_cotes, + cumtrapz, quad, simps, fixed_quad) +from scipy.integrate.quadrature import AccuracyWarning + + +class TestFixedQuad(object): + def test_scalar(self): + n = 4 + func = lambda x: x**(2*n - 1) + expected = 1/(2*n) + got, _ = fixed_quad(func, 0, 1, n=n) + # quadrature exact for this input + assert_allclose(got, expected, rtol=1e-12) + + def test_vector(self): + n = 4 + p = np.arange(1, 2*n) + func = lambda x: x**p[:,None] + expected = 1/(p + 1) + got, _ = fixed_quad(func, 0, 1, n=n) + assert_allclose(got, expected, rtol=1e-12) + + +class TestQuadrature(object): + def quad(self, x, a, b, args): + raise NotImplementedError + + def test_quadrature(self): + # Typical function with two extra arguments: + def myfunc(x, n, z): # Bessel function integrand + return cos(n*x-z*sin(x))/pi + val, err = quadrature(myfunc, 0, pi, (2, 1.8)) + table_val = 0.30614353532540296487 + assert_almost_equal(val, table_val, decimal=7) + + def test_quadrature_rtol(self): + def myfunc(x, n, z): # Bessel function integrand + return 1e90 * cos(n*x-z*sin(x))/pi + val, err = quadrature(myfunc, 0, pi, (2, 1.8), rtol=1e-10) + table_val = 1e90 * 0.30614353532540296487 + assert_allclose(val, table_val, rtol=1e-10) + + def test_quadrature_miniter(self): + # Typical function with two extra arguments: + def myfunc(x, n, z): # Bessel function integrand + return cos(n*x-z*sin(x))/pi + table_val = 0.30614353532540296487 + for miniter in [5, 52]: + val, err = quadrature(myfunc, 0, pi, (2, 1.8), miniter=miniter) + assert_almost_equal(val, table_val, decimal=7) + assert_(err < 1.0) + + def test_quadrature_single_args(self): + def myfunc(x, n): + return 1e90 * cos(n*x-1.8*sin(x))/pi + val, err = quadrature(myfunc, 0, pi, args=2, rtol=1e-10) + table_val = 1e90 * 0.30614353532540296487 + assert_allclose(val, table_val, rtol=1e-10) + + def test_romberg(self): + # Typical function with two extra arguments: + def myfunc(x, n, z): # Bessel function integrand + return cos(n*x-z*sin(x))/pi + val = romberg(myfunc, 0, pi, args=(2, 1.8)) + table_val = 0.30614353532540296487 + assert_almost_equal(val, table_val, decimal=7) + + def test_romberg_rtol(self): + # Typical function with two extra arguments: + def myfunc(x, n, z): # Bessel function integrand + return 1e19*cos(n*x-z*sin(x))/pi + val = romberg(myfunc, 0, pi, args=(2, 1.8), rtol=1e-10) + table_val = 1e19*0.30614353532540296487 + assert_allclose(val, table_val, rtol=1e-10) + + def test_romb(self): + assert_equal(romb(np.arange(17)), 128) + + def test_romb_gh_3731(self): + # Check that romb makes maximal use of data points + x = np.arange(2**4+1) + y = np.cos(0.2*x) + val = romb(y) + val2, err = quad(lambda x: np.cos(0.2*x), x.min(), x.max()) + assert_allclose(val, val2, rtol=1e-8, atol=0) + + # should be equal to romb with 2**k+1 samples + with suppress_warnings() as sup: + sup.filter(AccuracyWarning, "divmax .4. exceeded") + val3 = romberg(lambda x: np.cos(0.2*x), x.min(), x.max(), divmax=4) + assert_allclose(val, val3, rtol=1e-12, atol=0) + + def test_non_dtype(self): + # Check that we work fine with functions returning float + import math + valmath = romberg(math.sin, 0, 1) + expected_val = 0.45969769413185085 + assert_almost_equal(valmath, expected_val, decimal=7) + + def test_newton_cotes(self): + """Test the first few degrees, for evenly spaced points.""" + n = 1 + wts, errcoff = newton_cotes(n, 1) + assert_equal(wts, n*np.array([0.5, 0.5])) + assert_almost_equal(errcoff, -n**3/12.0) + + n = 2 + wts, errcoff = newton_cotes(n, 1) + assert_almost_equal(wts, n*np.array([1.0, 4.0, 1.0])/6.0) + assert_almost_equal(errcoff, -n**5/2880.0) + + n = 3 + wts, errcoff = newton_cotes(n, 1) + assert_almost_equal(wts, n*np.array([1.0, 3.0, 3.0, 1.0])/8.0) + assert_almost_equal(errcoff, -n**5/6480.0) + + n = 4 + wts, errcoff = newton_cotes(n, 1) + assert_almost_equal(wts, n*np.array([7.0, 32.0, 12.0, 32.0, 7.0])/90.0) + assert_almost_equal(errcoff, -n**7/1935360.0) + + def test_newton_cotes2(self): + """Test newton_cotes with points that are not evenly spaced.""" + + x = np.array([0.0, 1.5, 2.0]) + y = x**2 + wts, errcoff = newton_cotes(x) + exact_integral = 8.0/3 + numeric_integral = np.dot(wts, y) + assert_almost_equal(numeric_integral, exact_integral) + + x = np.array([0.0, 1.4, 2.1, 3.0]) + y = x**2 + wts, errcoff = newton_cotes(x) + exact_integral = 9.0 + numeric_integral = np.dot(wts, y) + assert_almost_equal(numeric_integral, exact_integral) + + def test_simps(self): + y = np.arange(17) + assert_equal(simps(y), 128) + assert_equal(simps(y, dx=0.5), 64) + assert_equal(simps(y, x=np.linspace(0, 4, 17)), 32) + + y = np.arange(4) + x = 2**y + assert_equal(simps(y, x=x, even='avg'), 13.875) + assert_equal(simps(y, x=x, even='first'), 13.75) + assert_equal(simps(y, x=x, even='last'), 14) + + +class TestCumtrapz(object): + def test_1d(self): + x = np.linspace(-2, 2, num=5) + y = x + y_int = cumtrapz(y, x, initial=0) + y_expected = [0., -1.5, -2., -1.5, 0.] + assert_allclose(y_int, y_expected) + + y_int = cumtrapz(y, x, initial=None) + assert_allclose(y_int, y_expected[1:]) + + def test_y_nd_x_nd(self): + x = np.arange(3 * 2 * 4).reshape(3, 2, 4) + y = x + y_int = cumtrapz(y, x, initial=0) + y_expected = np.array([[[0., 0.5, 2., 4.5], + [0., 4.5, 10., 16.5]], + [[0., 8.5, 18., 28.5], + [0., 12.5, 26., 40.5]], + [[0., 16.5, 34., 52.5], + [0., 20.5, 42., 64.5]]]) + + assert_allclose(y_int, y_expected) + + # Try with all axes + shapes = [(2, 2, 4), (3, 1, 4), (3, 2, 3)] + for axis, shape in zip([0, 1, 2], shapes): + y_int = cumtrapz(y, x, initial=3.45, axis=axis) + assert_equal(y_int.shape, (3, 2, 4)) + y_int = cumtrapz(y, x, initial=None, axis=axis) + assert_equal(y_int.shape, shape) + + def test_y_nd_x_1d(self): + y = np.arange(3 * 2 * 4).reshape(3, 2, 4) + x = np.arange(4)**2 + # Try with all axes + ys_expected = ( + np.array([[[4., 5., 6., 7.], + [8., 9., 10., 11.]], + [[40., 44., 48., 52.], + [56., 60., 64., 68.]]]), + np.array([[[2., 3., 4., 5.]], + [[10., 11., 12., 13.]], + [[18., 19., 20., 21.]]]), + np.array([[[0.5, 5., 17.5], + [4.5, 21., 53.5]], + [[8.5, 37., 89.5], + [12.5, 53., 125.5]], + [[16.5, 69., 161.5], + [20.5, 85., 197.5]]])) + + for axis, y_expected in zip([0, 1, 2], ys_expected): + y_int = cumtrapz(y, x=x[:y.shape[axis]], axis=axis, initial=None) + assert_allclose(y_int, y_expected) + + def test_x_none(self): + y = np.linspace(-2, 2, num=5) + + y_int = cumtrapz(y) + y_expected = [-1.5, -2., -1.5, 0.] + assert_allclose(y_int, y_expected) + + y_int = cumtrapz(y, initial=1.23) + y_expected = [1.23, -1.5, -2., -1.5, 0.] + assert_allclose(y_int, y_expected) + + y_int = cumtrapz(y, dx=3) + y_expected = [-4.5, -6., -4.5, 0.] + assert_allclose(y_int, y_expected) + + y_int = cumtrapz(y, dx=3, initial=1.23) + y_expected = [1.23, -4.5, -6., -4.5, 0.] + assert_allclose(y_int, y_expected) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/test_quadrature.pyc b/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/test_quadrature.pyc new file mode 100644 index 0000000..1a55d9c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/integrate/tests/test_quadrature.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/integrate/vode.so b/project/venv/lib/python2.7/site-packages/scipy/integrate/vode.so new file mode 100755 index 0000000..1da42c8 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/integrate/vode.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/interpolate/__init__.py new file mode 100644 index 0000000..1b425f0 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/interpolate/__init__.py @@ -0,0 +1,197 @@ +"""======================================== +Interpolation (:mod:`scipy.interpolate`) +======================================== + +.. currentmodule:: scipy.interpolate + +Sub-package for objects used in interpolation. + +As listed below, this sub-package contains spline functions and classes, +one-dimensional and multi-dimensional (univariate and multivariate) +interpolation classes, Lagrange and Taylor polynomial interpolators, and +wrappers for `FITPACK <http://www.netlib.org/dierckx/>`__ +and DFITPACK functions. + +Univariate interpolation +======================== + +.. autosummary:: + :toctree: generated/ + + interp1d + BarycentricInterpolator + KroghInterpolator + PchipInterpolator + barycentric_interpolate + krogh_interpolate + pchip_interpolate + Akima1DInterpolator + CubicSpline + PPoly + BPoly + + +Multivariate interpolation +========================== + +Unstructured data: + +.. autosummary:: + :toctree: generated/ + + griddata + LinearNDInterpolator + NearestNDInterpolator + CloughTocher2DInterpolator + Rbf + interp2d + +For data on a grid: + +.. autosummary:: + :toctree: generated/ + + interpn + RegularGridInterpolator + RectBivariateSpline + +.. seealso:: + + `scipy.ndimage.map_coordinates` + +Tensor product polynomials: + +.. autosummary:: + :toctree: generated/ + + NdPPoly + + +1-D Splines +=========== + +.. autosummary:: + :toctree: generated/ + + BSpline + make_interp_spline + make_lsq_spline + +Functional interface to FITPACK routines: + +.. autosummary:: + :toctree: generated/ + + splrep + splprep + splev + splint + sproot + spalde + splder + splantider + insert + +Object-oriented FITPACK interface: + +.. autosummary:: + :toctree: generated/ + + UnivariateSpline + InterpolatedUnivariateSpline + LSQUnivariateSpline + + + +2-D Splines +=========== + +For data on a grid: + +.. autosummary:: + :toctree: generated/ + + RectBivariateSpline + RectSphereBivariateSpline + +For unstructured data: + +.. autosummary:: + :toctree: generated/ + + BivariateSpline + SmoothBivariateSpline + SmoothSphereBivariateSpline + LSQBivariateSpline + LSQSphereBivariateSpline + +Low-level interface to FITPACK functions: + +.. autosummary:: + :toctree: generated/ + + bisplrep + bisplev + +Additional tools +================ + +.. autosummary:: + :toctree: generated/ + + lagrange + approximate_taylor_polynomial + pade + +.. seealso:: + + `scipy.ndimage.map_coordinates`, + `scipy.ndimage.spline_filter`, + `scipy.signal.resample`, + `scipy.signal.bspline`, + `scipy.signal.gauss_spline`, + `scipy.signal.qspline1d`, + `scipy.signal.cspline1d`, + `scipy.signal.qspline1d_eval`, + `scipy.signal.cspline1d_eval`, + `scipy.signal.qspline2d`, + `scipy.signal.cspline2d`. + +Functions existing for backward compatibility (should not be used in +new code): + +.. autosummary:: + :toctree: generated/ + + spleval + spline + splmake + spltopp + pchip + +""" +from __future__ import division, print_function, absolute_import + +from .interpolate import * +from .fitpack import * + +# New interface to fitpack library: +from .fitpack2 import * + +from .rbf import Rbf + +from .polyint import * + +from ._cubic import * + +from .ndgriddata import * + +from ._bsplines import * + +from ._pade import * + +__all__ = [s for s in dir() if not s.startswith('_')] + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/interpolate/__init__.pyc new file mode 100644 index 0000000..1f42e36 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/interpolate/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/_bspl.so b/project/venv/lib/python2.7/site-packages/scipy/interpolate/_bspl.so new file mode 100755 index 0000000..dfab270 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/interpolate/_bspl.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/_bsplines.py b/project/venv/lib/python2.7/site-packages/scipy/interpolate/_bsplines.py new file mode 100644 index 0000000..17e1eaf --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/interpolate/_bsplines.py @@ -0,0 +1,1023 @@ +from __future__ import division, print_function, absolute_import + +import functools +import operator + +import numpy as np +from scipy._lib.six import string_types +from scipy.linalg import (get_lapack_funcs, LinAlgError, + cholesky_banded, cho_solve_banded) +from . import _bspl +from . import _fitpack_impl +from . import _fitpack as _dierckx + +__all__ = ["BSpline", "make_interp_spline", "make_lsq_spline"] + + +# copy-paste from interpolate.py +def prod(x): + """Product of a list of numbers; ~40x faster vs np.prod for Python tuples""" + if len(x) == 0: + return 1 + return functools.reduce(operator.mul, x) + + +def _get_dtype(dtype): + """Return np.complex128 for complex dtypes, np.float64 otherwise.""" + if np.issubdtype(dtype, np.complexfloating): + return np.complex_ + else: + return np.float_ + + +def _as_float_array(x, check_finite=False): + """Convert the input into a C contiguous float array. + + NB: Upcasts half- and single-precision floats to double precision. + """ + x = np.ascontiguousarray(x) + dtyp = _get_dtype(x.dtype) + x = x.astype(dtyp, copy=False) + if check_finite and not np.isfinite(x).all(): + raise ValueError("Array must not contain infs or nans.") + return x + + +class BSpline(object): + r"""Univariate spline in the B-spline basis. + + .. math:: + + S(x) = \sum_{j=0}^{n-1} c_j B_{j, k; t}(x) + + where :math:`B_{j, k; t}` are B-spline basis functions of degree `k` + and knots `t`. + + Parameters + ---------- + t : ndarray, shape (n+k+1,) + knots + c : ndarray, shape (>=n, ...) + spline coefficients + k : int + B-spline order + extrapolate : bool or 'periodic', optional + whether to extrapolate beyond the base interval, ``t[k] .. t[n]``, + or to return nans. + If True, extrapolates the first and last polynomial pieces of b-spline + functions active on the base interval. + If 'periodic', periodic extrapolation is used. + Default is True. + axis : int, optional + Interpolation axis. Default is zero. + + Attributes + ---------- + t : ndarray + knot vector + c : ndarray + spline coefficients + k : int + spline degree + extrapolate : bool + If True, extrapolates the first and last polynomial pieces of b-spline + functions active on the base interval. + axis : int + Interpolation axis. + tck : tuple + A read-only equivalent of ``(self.t, self.c, self.k)`` + + Methods + ------- + __call__ + basis_element + derivative + antiderivative + integrate + construct_fast + + Notes + ----- + B-spline basis elements are defined via + + .. math:: + + B_{i, 0}(x) = 1, \textrm{if $t_i \le x < t_{i+1}$, otherwise $0$,} + + B_{i, k}(x) = \frac{x - t_i}{t_{i+k} - t_i} B_{i, k-1}(x) + + \frac{t_{i+k+1} - x}{t_{i+k+1} - t_{i+1}} B_{i+1, k-1}(x) + + **Implementation details** + + - At least ``k+1`` coefficients are required for a spline of degree `k`, + so that ``n >= k+1``. Additional coefficients, ``c[j]`` with + ``j > n``, are ignored. + + - B-spline basis elements of degree `k` form a partition of unity on the + *base interval*, ``t[k] <= x <= t[n]``. + + + Examples + -------- + + Translating the recursive definition of B-splines into Python code, we have: + + >>> def B(x, k, i, t): + ... if k == 0: + ... return 1.0 if t[i] <= x < t[i+1] else 0.0 + ... if t[i+k] == t[i]: + ... c1 = 0.0 + ... else: + ... c1 = (x - t[i])/(t[i+k] - t[i]) * B(x, k-1, i, t) + ... if t[i+k+1] == t[i+1]: + ... c2 = 0.0 + ... else: + ... c2 = (t[i+k+1] - x)/(t[i+k+1] - t[i+1]) * B(x, k-1, i+1, t) + ... return c1 + c2 + + >>> def bspline(x, t, c, k): + ... n = len(t) - k - 1 + ... assert (n >= k+1) and (len(c) >= n) + ... return sum(c[i] * B(x, k, i, t) for i in range(n)) + + Note that this is an inefficient (if straightforward) way to + evaluate B-splines --- this spline class does it in an equivalent, + but much more efficient way. + + Here we construct a quadratic spline function on the base interval + ``2 <= x <= 4`` and compare with the naive way of evaluating the spline: + + >>> from scipy.interpolate import BSpline + >>> k = 2 + >>> t = [0, 1, 2, 3, 4, 5, 6] + >>> c = [-1, 2, 0, -1] + >>> spl = BSpline(t, c, k) + >>> spl(2.5) + array(1.375) + >>> bspline(2.5, t, c, k) + 1.375 + + Note that outside of the base interval results differ. This is because + `BSpline` extrapolates the first and last polynomial pieces of b-spline + functions active on the base interval. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> xx = np.linspace(1.5, 4.5, 50) + >>> ax.plot(xx, [bspline(x, t, c ,k) for x in xx], 'r-', lw=3, label='naive') + >>> ax.plot(xx, spl(xx), 'b-', lw=4, alpha=0.7, label='BSpline') + >>> ax.grid(True) + >>> ax.legend(loc='best') + >>> plt.show() + + + References + ---------- + .. [1] Tom Lyche and Knut Morken, Spline methods, + http://www.uio.no/studier/emner/matnat/ifi/INF-MAT5340/v05/undervisningsmateriale/ + .. [2] Carl de Boor, A practical guide to splines, Springer, 2001. + + """ + def __init__(self, t, c, k, extrapolate=True, axis=0): + super(BSpline, self).__init__() + + self.k = operator.index(k) + self.c = np.asarray(c) + self.t = np.ascontiguousarray(t, dtype=np.float64) + + if extrapolate == 'periodic': + self.extrapolate = extrapolate + else: + self.extrapolate = bool(extrapolate) + + n = self.t.shape[0] - self.k - 1 + + if not (0 <= axis < self.c.ndim): + raise ValueError("%s must be between 0 and %s" % (axis, c.ndim)) + + self.axis = axis + if axis != 0: + # roll the interpolation axis to be the first one in self.c + # More specifically, the target shape for self.c is (n, ...), + # and axis !=0 means that we have c.shape (..., n, ...) + # ^ + # axis + self.c = np.rollaxis(self.c, axis) + + if k < 0: + raise ValueError("Spline order cannot be negative.") + if self.t.ndim != 1: + raise ValueError("Knot vector must be one-dimensional.") + if n < self.k + 1: + raise ValueError("Need at least %d knots for degree %d" % + (2*k + 2, k)) + if (np.diff(self.t) < 0).any(): + raise ValueError("Knots must be in a non-decreasing order.") + if len(np.unique(self.t[k:n+1])) < 2: + raise ValueError("Need at least two internal knots.") + if not np.isfinite(self.t).all(): + raise ValueError("Knots should not have nans or infs.") + if self.c.ndim < 1: + raise ValueError("Coefficients must be at least 1-dimensional.") + if self.c.shape[0] < n: + raise ValueError("Knots, coefficients and degree are inconsistent.") + + dt = _get_dtype(self.c.dtype) + self.c = np.ascontiguousarray(self.c, dtype=dt) + + @classmethod + def construct_fast(cls, t, c, k, extrapolate=True, axis=0): + """Construct a spline without making checks. + + Accepts same parameters as the regular constructor. Input arrays + `t` and `c` must of correct shape and dtype. + """ + self = object.__new__(cls) + self.t, self.c, self.k = t, c, k + self.extrapolate = extrapolate + self.axis = axis + return self + + @property + def tck(self): + """Equivalent to ``(self.t, self.c, self.k)`` (read-only). + """ + return self.t, self.c, self.k + + @classmethod + def basis_element(cls, t, extrapolate=True): + """Return a B-spline basis element ``B(x | t[0], ..., t[k+1])``. + + Parameters + ---------- + t : ndarray, shape (k+1,) + internal knots + extrapolate : bool or 'periodic', optional + whether to extrapolate beyond the base interval, ``t[0] .. t[k+1]``, + or to return nans. + If 'periodic', periodic extrapolation is used. + Default is True. + + Returns + ------- + basis_element : callable + A callable representing a B-spline basis element for the knot + vector `t`. + + Notes + ----- + The order of the b-spline, `k`, is inferred from the length of `t` as + ``len(t)-2``. The knot vector is constructed by appending and prepending + ``k+1`` elements to internal knots `t`. + + Examples + -------- + + Construct a cubic b-spline: + + >>> from scipy.interpolate import BSpline + >>> b = BSpline.basis_element([0, 1, 2, 3, 4]) + >>> k = b.k + >>> b.t[k:-k] + array([ 0., 1., 2., 3., 4.]) + >>> k + 3 + + Construct a second order b-spline on ``[0, 1, 1, 2]``, and compare + to its explicit form: + + >>> t = [-1, 0, 1, 1, 2] + >>> b = BSpline.basis_element(t[1:]) + >>> def f(x): + ... return np.where(x < 1, x*x, (2. - x)**2) + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> x = np.linspace(0, 2, 51) + >>> ax.plot(x, b(x), 'g', lw=3) + >>> ax.plot(x, f(x), 'r', lw=8, alpha=0.4) + >>> ax.grid(True) + >>> plt.show() + + """ + k = len(t) - 2 + t = _as_float_array(t) + t = np.r_[(t[0]-1,) * k, t, (t[-1]+1,) * k] + c = np.zeros_like(t) + c[k] = 1. + return cls.construct_fast(t, c, k, extrapolate) + + def __call__(self, x, nu=0, extrapolate=None): + """ + Evaluate a spline function. + + Parameters + ---------- + x : array_like + points to evaluate the spline at. + nu: int, optional + derivative to evaluate (default is 0). + extrapolate : bool or 'periodic', optional + whether to extrapolate based on the first and last intervals + or return nans. If 'periodic', periodic extrapolation is used. + Default is `self.extrapolate`. + + Returns + ------- + y : array_like + Shape is determined by replacing the interpolation axis + in the coefficient array with the shape of `x`. + + """ + if extrapolate is None: + extrapolate = self.extrapolate + x = np.asarray(x) + x_shape, x_ndim = x.shape, x.ndim + x = np.ascontiguousarray(x.ravel(), dtype=np.float_) + + # With periodic extrapolation we map x to the segment + # [self.t[k], self.t[n]]. + if extrapolate == 'periodic': + n = self.t.size - self.k - 1 + x = self.t[self.k] + (x - self.t[self.k]) % (self.t[n] - + self.t[self.k]) + extrapolate = False + + out = np.empty((len(x), prod(self.c.shape[1:])), dtype=self.c.dtype) + self._ensure_c_contiguous() + self._evaluate(x, nu, extrapolate, out) + out = out.reshape(x_shape + self.c.shape[1:]) + if self.axis != 0: + # transpose to move the calculated values to the interpolation axis + l = list(range(out.ndim)) + l = l[x_ndim:x_ndim+self.axis] + l[:x_ndim] + l[x_ndim+self.axis:] + out = out.transpose(l) + return out + + def _evaluate(self, xp, nu, extrapolate, out): + _bspl.evaluate_spline(self.t, self.c.reshape(self.c.shape[0], -1), + self.k, xp, nu, extrapolate, out) + + def _ensure_c_contiguous(self): + """ + c and t may be modified by the user. The Cython code expects + that they are C contiguous. + + """ + if not self.t.flags.c_contiguous: + self.t = self.t.copy() + if not self.c.flags.c_contiguous: + self.c = self.c.copy() + + def derivative(self, nu=1): + """Return a b-spline representing the derivative. + + Parameters + ---------- + nu : int, optional + Derivative order. + Default is 1. + + Returns + ------- + b : BSpline object + A new instance representing the derivative. + + See Also + -------- + splder, splantider + + """ + c = self.c + # pad the c array if needed + ct = len(self.t) - len(c) + if ct > 0: + c = np.r_[c, np.zeros((ct,) + c.shape[1:])] + tck = _fitpack_impl.splder((self.t, c, self.k), nu) + return self.construct_fast(*tck, extrapolate=self.extrapolate, + axis=self.axis) + + def antiderivative(self, nu=1): + """Return a b-spline representing the antiderivative. + + Parameters + ---------- + nu : int, optional + Antiderivative order. Default is 1. + + Returns + ------- + b : BSpline object + A new instance representing the antiderivative. + + Notes + ----- + If antiderivative is computed and ``self.extrapolate='periodic'``, + it will be set to False for the returned instance. This is done because + the antiderivative is no longer periodic and its correct evaluation + outside of the initially given x interval is difficult. + + See Also + -------- + splder, splantider + + """ + c = self.c + # pad the c array if needed + ct = len(self.t) - len(c) + if ct > 0: + c = np.r_[c, np.zeros((ct,) + c.shape[1:])] + tck = _fitpack_impl.splantider((self.t, c, self.k), nu) + + if self.extrapolate == 'periodic': + extrapolate = False + else: + extrapolate = self.extrapolate + + return self.construct_fast(*tck, extrapolate=extrapolate, + axis=self.axis) + + def integrate(self, a, b, extrapolate=None): + """Compute a definite integral of the spline. + + Parameters + ---------- + a : float + Lower limit of integration. + b : float + Upper limit of integration. + extrapolate : bool or 'periodic', optional + whether to extrapolate beyond the base interval, + ``t[k] .. t[-k-1]``, or take the spline to be zero outside of the + base interval. If 'periodic', periodic extrapolation is used. + If None (default), use `self.extrapolate`. + + Returns + ------- + I : array_like + Definite integral of the spline over the interval ``[a, b]``. + + Examples + -------- + Construct the linear spline ``x if x < 1 else 2 - x`` on the base + interval :math:`[0, 2]`, and integrate it + + >>> from scipy.interpolate import BSpline + >>> b = BSpline.basis_element([0, 1, 2]) + >>> b.integrate(0, 1) + array(0.5) + + If the integration limits are outside of the base interval, the result + is controlled by the `extrapolate` parameter + + >>> b.integrate(-1, 1) + array(0.0) + >>> b.integrate(-1, 1, extrapolate=False) + array(0.5) + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> ax.grid(True) + >>> ax.axvline(0, c='r', lw=5, alpha=0.5) # base interval + >>> ax.axvline(2, c='r', lw=5, alpha=0.5) + >>> xx = [-1, 1, 2] + >>> ax.plot(xx, b(xx)) + >>> plt.show() + + """ + if extrapolate is None: + extrapolate = self.extrapolate + + # Prepare self.t and self.c. + self._ensure_c_contiguous() + + # Swap integration bounds if needed. + sign = 1 + if b < a: + a, b = b, a + sign = -1 + n = self.t.size - self.k - 1 + + if extrapolate != "periodic" and not extrapolate: + # Shrink the integration interval, if needed. + a = max(a, self.t[self.k]) + b = min(b, self.t[n]) + + if self.c.ndim == 1: + # Fast path: use FITPACK's routine + # (cf _fitpack_impl.splint). + t, c, k = self.tck + integral, wrk = _dierckx._splint(t, c, k, a, b) + return integral * sign + + out = np.empty((2, prod(self.c.shape[1:])), dtype=self.c.dtype) + + # Compute the antiderivative. + c = self.c + ct = len(self.t) - len(c) + if ct > 0: + c = np.r_[c, np.zeros((ct,) + c.shape[1:])] + ta, ca, ka = _fitpack_impl.splantider((self.t, c, self.k), 1) + + if extrapolate == 'periodic': + # Split the integral into the part over period (can be several + # of them) and the remaining part. + + ts, te = self.t[self.k], self.t[n] + period = te - ts + interval = b - a + n_periods, left = divmod(interval, period) + + if n_periods > 0: + # Evaluate the difference of antiderivatives. + x = np.asarray([ts, te], dtype=np.float_) + _bspl.evaluate_spline(ta, ca.reshape(ca.shape[0], -1), + ka, x, 0, False, out) + integral = out[1] - out[0] + integral *= n_periods + else: + integral = np.zeros((1, prod(self.c.shape[1:])), + dtype=self.c.dtype) + + # Map a to [ts, te], b is always a + left. + a = ts + (a - ts) % period + b = a + left + + # If b <= te then we need to integrate over [a, b], otherwise + # over [a, te] and from xs to what is remained. + if b <= te: + x = np.asarray([a, b], dtype=np.float_) + _bspl.evaluate_spline(ta, ca.reshape(ca.shape[0], -1), + ka, x, 0, False, out) + integral += out[1] - out[0] + else: + x = np.asarray([a, te], dtype=np.float_) + _bspl.evaluate_spline(ta, ca.reshape(ca.shape[0], -1), + ka, x, 0, False, out) + integral += out[1] - out[0] + + x = np.asarray([ts, ts + b - te], dtype=np.float_) + _bspl.evaluate_spline(ta, ca.reshape(ca.shape[0], -1), + ka, x, 0, False, out) + integral += out[1] - out[0] + else: + # Evaluate the difference of antiderivatives. + x = np.asarray([a, b], dtype=np.float_) + _bspl.evaluate_spline(ta, ca.reshape(ca.shape[0], -1), + ka, x, 0, extrapolate, out) + integral = out[1] - out[0] + + integral *= sign + return integral.reshape(ca.shape[1:]) + + +################################# +# Interpolating spline helpers # +################################# + +def _not_a_knot(x, k): + """Given data x, construct the knot vector w/ not-a-knot BC. + cf de Boor, XIII(12).""" + x = np.asarray(x) + if k % 2 != 1: + raise ValueError("Odd degree for now only. Got %s." % k) + + m = (k - 1) // 2 + t = x[m+1:-m-1] + t = np.r_[(x[0],)*(k+1), t, (x[-1],)*(k+1)] + return t + + +def _augknt(x, k): + """Construct a knot vector appropriate for the order-k interpolation.""" + return np.r_[(x[0],)*k, x, (x[-1],)*k] + + +def _convert_string_aliases(deriv, target_shape): + if isinstance(deriv, string_types): + if deriv == "clamped": + deriv = [(1, np.zeros(target_shape))] + elif deriv == "natural": + deriv = [(2, np.zeros(target_shape))] + else: + raise ValueError("Unknown boundary condition : %s" % deriv) + return deriv + + +def _process_deriv_spec(deriv): + if deriv is not None: + try: + ords, vals = zip(*deriv) + except TypeError: + msg = ("Derivatives, `bc_type`, should be specified as a pair of " + "iterables of pairs of (order, value).") + raise ValueError(msg) + else: + ords, vals = [], [] + return np.atleast_1d(ords, vals) + + +def make_interp_spline(x, y, k=3, t=None, bc_type=None, axis=0, + check_finite=True): + """Compute the (coefficients of) interpolating B-spline. + + Parameters + ---------- + x : array_like, shape (n,) + Abscissas. + y : array_like, shape (n, ...) + Ordinates. + k : int, optional + B-spline degree. Default is cubic, k=3. + t : array_like, shape (nt + k + 1,), optional. + Knots. + The number of knots needs to agree with the number of datapoints and + the number of derivatives at the edges. Specifically, ``nt - n`` must + equal ``len(deriv_l) + len(deriv_r)``. + bc_type : 2-tuple or None + Boundary conditions. + Default is None, which means choosing the boundary conditions + automatically. Otherwise, it must be a length-two tuple where the first + element sets the boundary conditions at ``x[0]`` and the second + element sets the boundary conditions at ``x[-1]``. Each of these must + be an iterable of pairs ``(order, value)`` which gives the values of + derivatives of specified orders at the given edge of the interpolation + interval. + Alternatively, the following string aliases are recognized: + + * ``"clamped"``: The first derivatives at the ends are zero. This is + equivalent to ``bc_type=([(1, 0.0)], [(1, 0.0)])``. + * ``"natural"``: The second derivatives at ends are zero. This is + equivalent to ``bc_type=([(2, 0.0)], [(2, 0.0)])``. + * ``"not-a-knot"`` (default): The first and second segments are the same + polynomial. This is equivalent to having ``bc_type=None``. + + axis : int, optional + Interpolation axis. Default is 0. + check_finite : bool, optional + Whether to check that the input arrays contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + Default is True. + + Returns + ------- + b : a BSpline object of the degree ``k`` and with knots ``t``. + + Examples + -------- + + Use cubic interpolation on Chebyshev nodes: + + >>> def cheb_nodes(N): + ... jj = 2.*np.arange(N) + 1 + ... x = np.cos(np.pi * jj / 2 / N)[::-1] + ... return x + + >>> x = cheb_nodes(20) + >>> y = np.sqrt(1 - x**2) + + >>> from scipy.interpolate import BSpline, make_interp_spline + >>> b = make_interp_spline(x, y) + >>> np.allclose(b(x), y) + True + + Note that the default is a cubic spline with a not-a-knot boundary condition + + >>> b.k + 3 + + Here we use a 'natural' spline, with zero 2nd derivatives at edges: + + >>> l, r = [(2, 0.0)], [(2, 0.0)] + >>> b_n = make_interp_spline(x, y, bc_type=(l, r)) # or, bc_type="natural" + >>> np.allclose(b_n(x), y) + True + >>> x0, x1 = x[0], x[-1] + >>> np.allclose([b_n(x0, 2), b_n(x1, 2)], [0, 0]) + True + + Interpolation of parametric curves is also supported. As an example, we + compute a discretization of a snail curve in polar coordinates + + >>> phi = np.linspace(0, 2.*np.pi, 40) + >>> r = 0.3 + np.cos(phi) + >>> x, y = r*np.cos(phi), r*np.sin(phi) # convert to Cartesian coordinates + + Build an interpolating curve, parameterizing it by the angle + + >>> from scipy.interpolate import make_interp_spline + >>> spl = make_interp_spline(phi, np.c_[x, y]) + + Evaluate the interpolant on a finer grid (note that we transpose the result + to unpack it into a pair of x- and y-arrays) + + >>> phi_new = np.linspace(0, 2.*np.pi, 100) + >>> x_new, y_new = spl(phi_new).T + + Plot the result + + >>> import matplotlib.pyplot as plt + >>> plt.plot(x, y, 'o') + >>> plt.plot(x_new, y_new, '-') + >>> plt.show() + + See Also + -------- + BSpline : base class representing the B-spline objects + CubicSpline : a cubic spline in the polynomial basis + make_lsq_spline : a similar factory function for spline fitting + UnivariateSpline : a wrapper over FITPACK spline fitting routines + splrep : a wrapper over FITPACK spline fitting routines + + """ + # convert string aliases for the boundary conditions + if bc_type is None or bc_type == 'not-a-knot': + deriv_l, deriv_r = None, None + elif isinstance(bc_type, string_types): + deriv_l, deriv_r = bc_type, bc_type + else: + try: + deriv_l, deriv_r = bc_type + except TypeError: + raise ValueError("Unknown boundary condition: %s" % bc_type) + + y = np.asarray(y) + + if not -y.ndim <= axis < y.ndim: + raise ValueError("axis {} is out of bounds".format(axis)) + if axis < 0: + axis += y.ndim + + # special-case k=0 right away + if k == 0: + if any(_ is not None for _ in (t, deriv_l, deriv_r)): + raise ValueError("Too much info for k=0: t and bc_type can only " + "be None.") + x = _as_float_array(x, check_finite) + t = np.r_[x, x[-1]] + c = np.asarray(y) + c = np.rollaxis(c, axis) + c = np.ascontiguousarray(c, dtype=_get_dtype(c.dtype)) + return BSpline.construct_fast(t, c, k, axis=axis) + + # special-case k=1 (e.g., Lyche and Morken, Eq.(2.16)) + if k == 1 and t is None: + if not (deriv_l is None and deriv_r is None): + raise ValueError("Too much info for k=1: bc_type can only be None.") + x = _as_float_array(x, check_finite) + t = np.r_[x[0], x, x[-1]] + c = np.asarray(y) + c = np.rollaxis(c, axis) + c = np.ascontiguousarray(c, dtype=_get_dtype(c.dtype)) + return BSpline.construct_fast(t, c, k, axis=axis) + + x = _as_float_array(x, check_finite) + y = _as_float_array(y, check_finite) + k = operator.index(k) + + # come up with a sensible knot vector, if needed + if t is None: + if deriv_l is None and deriv_r is None: + if k == 2: + # OK, it's a bit ad hoc: Greville sites + omit + # 2nd and 2nd-to-last points, a la not-a-knot + t = (x[1:] + x[:-1]) / 2. + t = np.r_[(x[0],)*(k+1), + t[1:-1], + (x[-1],)*(k+1)] + else: + t = _not_a_knot(x, k) + else: + t = _augknt(x, k) + + t = _as_float_array(t, check_finite) + + y = np.rollaxis(y, axis) # now internally interp axis is zero + + if x.ndim != 1 or np.any(x[1:] <= x[:-1]): + raise ValueError("Expect x to be a 1-D sorted array_like.") + if k < 0: + raise ValueError("Expect non-negative k.") + if t.ndim != 1 or np.any(t[1:] < t[:-1]): + raise ValueError("Expect t to be a 1-D sorted array_like.") + if x.size != y.shape[0]: + raise ValueError('x and y are incompatible.') + if t.size < x.size + k + 1: + raise ValueError('Got %d knots, need at least %d.' % + (t.size, x.size + k + 1)) + if (x[0] < t[k]) or (x[-1] > t[-k]): + raise ValueError('Out of bounds w/ x = %s.' % x) + + # Here : deriv_l, r = [(nu, value), ...] + deriv_l = _convert_string_aliases(deriv_l, y.shape[1:]) + deriv_l_ords, deriv_l_vals = _process_deriv_spec(deriv_l) + nleft = deriv_l_ords.shape[0] + + deriv_r = _convert_string_aliases(deriv_r, y.shape[1:]) + deriv_r_ords, deriv_r_vals = _process_deriv_spec(deriv_r) + nright = deriv_r_ords.shape[0] + + # have `n` conditions for `nt` coefficients; need nt-n derivatives + n = x.size + nt = t.size - k - 1 + + if nt - n != nleft + nright: + raise ValueError("The number of derivatives at boundaries does not " + "match: expected %s, got %s+%s" % (nt-n, nleft, nright)) + + # set up the LHS: the collocation matrix + derivatives at boundaries + kl = ku = k + ab = np.zeros((2*kl + ku + 1, nt), dtype=np.float_, order='F') + _bspl._colloc(x, t, k, ab, offset=nleft) + if nleft > 0: + _bspl._handle_lhs_derivatives(t, k, x[0], ab, kl, ku, deriv_l_ords) + if nright > 0: + _bspl._handle_lhs_derivatives(t, k, x[-1], ab, kl, ku, deriv_r_ords, + offset=nt-nright) + + # set up the RHS: values to interpolate (+ derivative values, if any) + extradim = prod(y.shape[1:]) + rhs = np.empty((nt, extradim), dtype=y.dtype) + if nleft > 0: + rhs[:nleft] = deriv_l_vals.reshape(-1, extradim) + rhs[nleft:nt - nright] = y.reshape(-1, extradim) + if nright > 0: + rhs[nt - nright:] = deriv_r_vals.reshape(-1, extradim) + + # solve Ab @ x = rhs; this is the relevant part of linalg.solve_banded + if check_finite: + ab, rhs = map(np.asarray_chkfinite, (ab, rhs)) + gbsv, = get_lapack_funcs(('gbsv',), (ab, rhs)) + lu, piv, c, info = gbsv(kl, ku, ab, rhs, + overwrite_ab=True, overwrite_b=True) + + if info > 0: + raise LinAlgError("Collocation matix is singular.") + elif info < 0: + raise ValueError('illegal value in %d-th argument of internal gbsv' % -info) + + c = np.ascontiguousarray(c.reshape((nt,) + y.shape[1:])) + return BSpline.construct_fast(t, c, k, axis=axis) + + +def make_lsq_spline(x, y, t, k=3, w=None, axis=0, check_finite=True): + r"""Compute the (coefficients of) an LSQ B-spline. + + The result is a linear combination + + .. math:: + + S(x) = \sum_j c_j B_j(x; t) + + of the B-spline basis elements, :math:`B_j(x; t)`, which minimizes + + .. math:: + + \sum_{j} \left( w_j \times (S(x_j) - y_j) \right)^2 + + Parameters + ---------- + x : array_like, shape (m,) + Abscissas. + y : array_like, shape (m, ...) + Ordinates. + t : array_like, shape (n + k + 1,). + Knots. + Knots and data points must satisfy Schoenberg-Whitney conditions. + k : int, optional + B-spline degree. Default is cubic, k=3. + w : array_like, shape (n,), optional + Weights for spline fitting. Must be positive. If ``None``, + then weights are all equal. + Default is ``None``. + axis : int, optional + Interpolation axis. Default is zero. + check_finite : bool, optional + Whether to check that the input arrays contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + Default is True. + + Returns + ------- + b : a BSpline object of the degree `k` with knots `t`. + + Notes + ----- + + The number of data points must be larger than the spline degree `k`. + + Knots `t` must satisfy the Schoenberg-Whitney conditions, + i.e., there must be a subset of data points ``x[j]`` such that + ``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``. + + Examples + -------- + Generate some noisy data: + + >>> x = np.linspace(-3, 3, 50) + >>> y = np.exp(-x**2) + 0.1 * np.random.randn(50) + + Now fit a smoothing cubic spline with a pre-defined internal knots. + Here we make the knot vector (k+1)-regular by adding boundary knots: + + >>> from scipy.interpolate import make_lsq_spline, BSpline + >>> t = [-1, 0, 1] + >>> k = 3 + >>> t = np.r_[(x[0],)*(k+1), + ... t, + ... (x[-1],)*(k+1)] + >>> spl = make_lsq_spline(x, y, t, k) + + For comparison, we also construct an interpolating spline for the same + set of data: + + >>> from scipy.interpolate import make_interp_spline + >>> spl_i = make_interp_spline(x, y) + + Plot both: + + >>> import matplotlib.pyplot as plt + >>> xs = np.linspace(-3, 3, 100) + >>> plt.plot(x, y, 'ro', ms=5) + >>> plt.plot(xs, spl(xs), 'g-', lw=3, label='LSQ spline') + >>> plt.plot(xs, spl_i(xs), 'b-', lw=3, alpha=0.7, label='interp spline') + >>> plt.legend(loc='best') + >>> plt.show() + + **NaN handling**: If the input arrays contain ``nan`` values, the result is + not useful since the underlying spline fitting routines cannot deal with + ``nan``. A workaround is to use zero weights for not-a-number data points: + + >>> y[8] = np.nan + >>> w = np.isnan(y) + >>> y[w] = 0. + >>> tck = make_lsq_spline(x, y, t, w=~w) + + Notice the need to replace a ``nan`` by a numerical value (precise value + does not matter as long as the corresponding weight is zero.) + + See Also + -------- + BSpline : base class representing the B-spline objects + make_interp_spline : a similar factory function for interpolating splines + LSQUnivariateSpline : a FITPACK-based spline fitting routine + splrep : a FITPACK-based fitting routine + + """ + x = _as_float_array(x, check_finite) + y = _as_float_array(y, check_finite) + t = _as_float_array(t, check_finite) + if w is not None: + w = _as_float_array(w, check_finite) + else: + w = np.ones_like(x) + k = operator.index(k) + + if not -y.ndim <= axis < y.ndim: + raise ValueError("axis {} is out of bounds".format(axis)) + if axis < 0: + axis += y.ndim + + y = np.rollaxis(y, axis) # now internally interp axis is zero + + if x.ndim != 1 or np.any(x[1:] - x[:-1] <= 0): + raise ValueError("Expect x to be a 1-D sorted array_like.") + if x.shape[0] < k+1: + raise ValueError("Need more x points.") + if k < 0: + raise ValueError("Expect non-negative k.") + if t.ndim != 1 or np.any(t[1:] - t[:-1] < 0): + raise ValueError("Expect t to be a 1-D sorted array_like.") + if x.size != y.shape[0]: + raise ValueError('x & y are incompatible.') + if k > 0 and np.any((x < t[k]) | (x > t[-k])): + raise ValueError('Out of bounds w/ x = %s.' % x) + if x.size != w.size: + raise ValueError('Incompatible weights.') + + # number of coefficients + n = t.size - k - 1 + + # construct A.T @ A and rhs with A the collocation matrix, and + # rhs = A.T @ y for solving the LSQ problem ``A.T @ A @ c = A.T @ y`` + lower = True + extradim = prod(y.shape[1:]) + ab = np.zeros((k+1, n), dtype=np.float_, order='F') + rhs = np.zeros((n, extradim), dtype=y.dtype, order='F') + _bspl._norm_eq_lsq(x, t, k, + y.reshape(-1, extradim), + w, + ab, rhs) + rhs = rhs.reshape((n,) + y.shape[1:]) + + # have observation matrix & rhs, can solve the LSQ problem + cho_decomp = cholesky_banded(ab, overwrite_ab=True, lower=lower, + check_finite=check_finite) + c = cho_solve_banded((cho_decomp, lower), rhs, overwrite_b=True, + check_finite=check_finite) + + c = np.ascontiguousarray(c) + return BSpline.construct_fast(t, c, k, axis=axis) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/_bsplines.pyc b/project/venv/lib/python2.7/site-packages/scipy/interpolate/_bsplines.pyc new file mode 100644 index 0000000..4dfa514 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/interpolate/_bsplines.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/_cubic.py b/project/venv/lib/python2.7/site-packages/scipy/interpolate/_cubic.py new file mode 100644 index 0000000..63e6640 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/interpolate/_cubic.py @@ -0,0 +1,770 @@ +"""Interpolation algorithms using piecewise cubic polynomials.""" + +from __future__ import division, print_function, absolute_import + +import numpy as np + +from scipy._lib.six import string_types + +from . import BPoly, PPoly +from .polyint import _isscalar +from scipy._lib._util import _asarray_validated +from scipy.linalg import solve_banded, solve + + +__all__ = ["PchipInterpolator", "pchip_interpolate", "pchip", + "Akima1DInterpolator", "CubicSpline"] + + +class PchipInterpolator(BPoly): + r"""PCHIP 1-d monotonic cubic interpolation. + + `x` and `y` are arrays of values used to approximate some function f, + with ``y = f(x)``. The interpolant uses monotonic cubic splines + to find the value of new points. (PCHIP stands for Piecewise Cubic + Hermite Interpolating Polynomial). + + Parameters + ---------- + x : ndarray + A 1-D array of monotonically increasing real values. `x` cannot + include duplicate values (otherwise f is overspecified) + y : ndarray + A 1-D array of real values. `y`'s length along the interpolation + axis must be equal to the length of `x`. If N-D array, use `axis` + parameter to select correct axis. + axis : int, optional + Axis in the y array corresponding to the x-coordinate values. + extrapolate : bool, optional + Whether to extrapolate to out-of-bounds points based on first + and last intervals, or to return NaNs. + + Methods + ------- + __call__ + derivative + antiderivative + roots + + See Also + -------- + Akima1DInterpolator + CubicSpline + BPoly + + Notes + ----- + The interpolator preserves monotonicity in the interpolation data and does + not overshoot if the data is not smooth. + + The first derivatives are guaranteed to be continuous, but the second + derivatives may jump at :math:`x_k`. + + Determines the derivatives at the points :math:`x_k`, :math:`f'_k`, + by using PCHIP algorithm [1]_. + + Let :math:`h_k = x_{k+1} - x_k`, and :math:`d_k = (y_{k+1} - y_k) / h_k` + are the slopes at internal points :math:`x_k`. + If the signs of :math:`d_k` and :math:`d_{k-1}` are different or either of + them equals zero, then :math:`f'_k = 0`. Otherwise, it is given by the + weighted harmonic mean + + .. math:: + + \frac{w_1 + w_2}{f'_k} = \frac{w_1}{d_{k-1}} + \frac{w_2}{d_k} + + where :math:`w_1 = 2 h_k + h_{k-1}` and :math:`w_2 = h_k + 2 h_{k-1}`. + + The end slopes are set using a one-sided scheme [2]_. + + + References + ---------- + .. [1] F. N. Fritsch and R. E. Carlson, Monotone Piecewise Cubic Interpolation, + SIAM J. Numer. Anal., 17(2), 238 (1980). + :doi:`10.1137/0717021`. + .. [2] see, e.g., C. Moler, Numerical Computing with Matlab, 2004. + :doi:`10.1137/1.9780898717952` + + + """ + def __init__(self, x, y, axis=0, extrapolate=None): + x = _asarray_validated(x, check_finite=False, as_inexact=True) + y = _asarray_validated(y, check_finite=False, as_inexact=True) + + axis = axis % y.ndim + + xp = x.reshape((x.shape[0],) + (1,)*(y.ndim-1)) + yp = np.rollaxis(y, axis) + + dk = self._find_derivatives(xp, yp) + data = np.hstack((yp[:, None, ...], dk[:, None, ...])) + + _b = BPoly.from_derivatives(x, data, orders=None) + super(PchipInterpolator, self).__init__(_b.c, _b.x, + extrapolate=extrapolate) + self.axis = axis + + def roots(self): + """ + Return the roots of the interpolated function. + """ + return (PPoly.from_bernstein_basis(self)).roots() + + @staticmethod + def _edge_case(h0, h1, m0, m1): + # one-sided three-point estimate for the derivative + d = ((2*h0 + h1)*m0 - h0*m1) / (h0 + h1) + + # try to preserve shape + mask = np.sign(d) != np.sign(m0) + mask2 = (np.sign(m0) != np.sign(m1)) & (np.abs(d) > 3.*np.abs(m0)) + mmm = (~mask) & mask2 + + d[mask] = 0. + d[mmm] = 3.*m0[mmm] + + return d + + @staticmethod + def _find_derivatives(x, y): + # Determine the derivatives at the points y_k, d_k, by using + # PCHIP algorithm is: + # We choose the derivatives at the point x_k by + # Let m_k be the slope of the kth segment (between k and k+1) + # If m_k=0 or m_{k-1}=0 or sgn(m_k) != sgn(m_{k-1}) then d_k == 0 + # else use weighted harmonic mean: + # w_1 = 2h_k + h_{k-1}, w_2 = h_k + 2h_{k-1} + # 1/d_k = 1/(w_1 + w_2)*(w_1 / m_k + w_2 / m_{k-1}) + # where h_k is the spacing between x_k and x_{k+1} + y_shape = y.shape + if y.ndim == 1: + # So that _edge_case doesn't end up assigning to scalars + x = x[:, None] + y = y[:, None] + + hk = x[1:] - x[:-1] + mk = (y[1:] - y[:-1]) / hk + + if y.shape[0] == 2: + # edge case: only have two points, use linear interpolation + dk = np.zeros_like(y) + dk[0] = mk + dk[1] = mk + return dk.reshape(y_shape) + + smk = np.sign(mk) + condition = (smk[1:] != smk[:-1]) | (mk[1:] == 0) | (mk[:-1] == 0) + + w1 = 2*hk[1:] + hk[:-1] + w2 = hk[1:] + 2*hk[:-1] + + # values where division by zero occurs will be excluded + # by 'condition' afterwards + with np.errstate(divide='ignore'): + whmean = (w1/mk[:-1] + w2/mk[1:]) / (w1 + w2) + + dk = np.zeros_like(y) + dk[1:-1][condition] = 0.0 + dk[1:-1][~condition] = 1.0 / whmean[~condition] + + # special case endpoints, as suggested in + # Cleve Moler, Numerical Computing with MATLAB, Chap 3.4 + dk[0] = PchipInterpolator._edge_case(hk[0], hk[1], mk[0], mk[1]) + dk[-1] = PchipInterpolator._edge_case(hk[-1], hk[-2], mk[-1], mk[-2]) + + return dk.reshape(y_shape) + + +def pchip_interpolate(xi, yi, x, der=0, axis=0): + """ + Convenience function for pchip interpolation. + xi and yi are arrays of values used to approximate some function f, + with ``yi = f(xi)``. The interpolant uses monotonic cubic splines + to find the value of new points x and the derivatives there. + + See `PchipInterpolator` for details. + + Parameters + ---------- + xi : array_like + A sorted list of x-coordinates, of length N. + yi : array_like + A 1-D array of real values. `yi`'s length along the interpolation + axis must be equal to the length of `xi`. If N-D array, use axis + parameter to select correct axis. + x : scalar or array_like + Of length M. + der : int or list, optional + Derivatives to extract. The 0-th derivative can be included to + return the function value. + axis : int, optional + Axis in the yi array corresponding to the x-coordinate values. + + See Also + -------- + PchipInterpolator + + Returns + ------- + y : scalar or array_like + The result, of length R or length M or M by R, + + """ + P = PchipInterpolator(xi, yi, axis=axis) + + if der == 0: + return P(x) + elif _isscalar(der): + return P.derivative(der)(x) + else: + return [P.derivative(nu)(x) for nu in der] + + +# Backwards compatibility +pchip = PchipInterpolator + + +class Akima1DInterpolator(PPoly): + """ + Akima interpolator + + Fit piecewise cubic polynomials, given vectors x and y. The interpolation + method by Akima uses a continuously differentiable sub-spline built from + piecewise cubic polynomials. The resultant curve passes through the given + data points and will appear smooth and natural. + + Parameters + ---------- + x : ndarray, shape (m, ) + 1-D array of monotonically increasing real values. + y : ndarray, shape (m, ...) + N-D array of real values. The length of `y` along the first axis must + be equal to the length of `x`. + axis : int, optional + Specifies the axis of `y` along which to interpolate. Interpolation + defaults to the first axis of `y`. + + Methods + ------- + __call__ + derivative + antiderivative + roots + + See Also + -------- + PchipInterpolator + CubicSpline + PPoly + + Notes + ----- + .. versionadded:: 0.14 + + Use only for precise data, as the fitted curve passes through the given + points exactly. This routine is useful for plotting a pleasingly smooth + curve through a few given points for purposes of plotting. + + References + ---------- + [1] A new method of interpolation and smooth curve fitting based + on local procedures. Hiroshi Akima, J. ACM, October 1970, 17(4), + 589-602. + + """ + + def __init__(self, x, y, axis=0): + # Original implementation in MATLAB by N. Shamsundar (BSD licensed), see + # https://www.mathworks.com/matlabcentral/fileexchange/1814-akima-interpolation + x, y = map(np.asarray, (x, y)) + axis = axis % y.ndim + + if np.any(np.diff(x) < 0.): + raise ValueError("x must be strictly ascending") + if x.ndim != 1: + raise ValueError("x must be 1-dimensional") + if x.size < 2: + raise ValueError("at least 2 breakpoints are needed") + if x.size != y.shape[axis]: + raise ValueError("x.shape must equal y.shape[%s]" % axis) + + # move interpolation axis to front + y = np.rollaxis(y, axis) + + # determine slopes between breakpoints + m = np.empty((x.size + 3, ) + y.shape[1:]) + dx = np.diff(x) + dx = dx[(slice(None), ) + (None, ) * (y.ndim - 1)] + m[2:-2] = np.diff(y, axis=0) / dx + + # add two additional points on the left ... + m[1] = 2. * m[2] - m[3] + m[0] = 2. * m[1] - m[2] + # ... and on the right + m[-2] = 2. * m[-3] - m[-4] + m[-1] = 2. * m[-2] - m[-3] + + # if m1 == m2 != m3 == m4, the slope at the breakpoint is not defined. + # This is the fill value: + t = .5 * (m[3:] + m[:-3]) + # get the denominator of the slope t + dm = np.abs(np.diff(m, axis=0)) + f1 = dm[2:] + f2 = dm[:-2] + f12 = f1 + f2 + # These are the mask of where the the slope at breakpoint is defined: + ind = np.nonzero(f12 > 1e-9 * np.max(f12)) + x_ind, y_ind = ind[0], ind[1:] + # Set the slope at breakpoint + t[ind] = (f1[ind] * m[(x_ind + 1,) + y_ind] + + f2[ind] * m[(x_ind + 2,) + y_ind]) / f12[ind] + # calculate the higher order coefficients + c = (3. * m[2:-2] - 2. * t[:-1] - t[1:]) / dx + d = (t[:-1] + t[1:] - 2. * m[2:-2]) / dx ** 2 + + coeff = np.zeros((4, x.size - 1) + y.shape[1:]) + coeff[3] = y[:-1] + coeff[2] = t[:-1] + coeff[1] = c + coeff[0] = d + + super(Akima1DInterpolator, self).__init__(coeff, x, extrapolate=False) + self.axis = axis + + def extend(self, c, x, right=True): + raise NotImplementedError("Extending a 1D Akima interpolator is not " + "yet implemented") + + # These are inherited from PPoly, but they do not produce an Akima + # interpolator. Hence stub them out. + @classmethod + def from_spline(cls, tck, extrapolate=None): + raise NotImplementedError("This method does not make sense for " + "an Akima interpolator.") + + @classmethod + def from_bernstein_basis(cls, bp, extrapolate=None): + raise NotImplementedError("This method does not make sense for " + "an Akima interpolator.") + + +class CubicSpline(PPoly): + """Cubic spline data interpolator. + + Interpolate data with a piecewise cubic polynomial which is twice + continuously differentiable [1]_. The result is represented as a `PPoly` + instance with breakpoints matching the given data. + + Parameters + ---------- + x : array_like, shape (n,) + 1-d array containing values of the independent variable. + Values must be real, finite and in strictly increasing order. + y : array_like + Array containing values of the dependent variable. It can have + arbitrary number of dimensions, but the length along `axis` (see below) + must match the length of `x`. Values must be finite. + axis : int, optional + Axis along which `y` is assumed to be varying. Meaning that for + ``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``. + Default is 0. + bc_type : string or 2-tuple, optional + Boundary condition type. Two additional equations, given by the + boundary conditions, are required to determine all coefficients of + polynomials on each segment [2]_. + + If `bc_type` is a string, then the specified condition will be applied + at both ends of a spline. Available conditions are: + + * 'not-a-knot' (default): The first and second segment at a curve end + are the same polynomial. It is a good default when there is no + information on boundary conditions. + * 'periodic': The interpolated functions is assumed to be periodic + of period ``x[-1] - x[0]``. The first and last value of `y` must be + identical: ``y[0] == y[-1]``. This boundary condition will result in + ``y'[0] == y'[-1]`` and ``y''[0] == y''[-1]``. + * 'clamped': The first derivative at curves ends are zero. Assuming + a 1D `y`, ``bc_type=((1, 0.0), (1, 0.0))`` is the same condition. + * 'natural': The second derivative at curve ends are zero. Assuming + a 1D `y`, ``bc_type=((2, 0.0), (2, 0.0))`` is the same condition. + + If `bc_type` is a 2-tuple, the first and the second value will be + applied at the curve start and end respectively. The tuple values can + be one of the previously mentioned strings (except 'periodic') or a + tuple `(order, deriv_values)` allowing to specify arbitrary + derivatives at curve ends: + + * `order`: the derivative order, 1 or 2. + * `deriv_value`: array_like containing derivative values, shape must + be the same as `y`, excluding `axis` dimension. For example, if `y` + is 1D, then `deriv_value` must be a scalar. If `y` is 3D with the + shape (n0, n1, n2) and axis=2, then `deriv_value` must be 2D + and have the shape (n0, n1). + extrapolate : {bool, 'periodic', None}, optional + If bool, determines whether to extrapolate to out-of-bounds points + based on first and last intervals, or to return NaNs. If 'periodic', + periodic extrapolation is used. If None (default), `extrapolate` is + set to 'periodic' for ``bc_type='periodic'`` and to True otherwise. + + Attributes + ---------- + x : ndarray, shape (n,) + Breakpoints. The same `x` which was passed to the constructor. + c : ndarray, shape (4, n-1, ...) + Coefficients of the polynomials on each segment. The trailing + dimensions match the dimensions of `y`, excluding `axis`. For example, + if `y` is 1-d, then ``c[k, i]`` is a coefficient for + ``(x-x[i])**(3-k)`` on the segment between ``x[i]`` and ``x[i+1]``. + axis : int + Interpolation axis. The same `axis` which was passed to the + constructor. + + Methods + ------- + __call__ + derivative + antiderivative + integrate + roots + + See Also + -------- + Akima1DInterpolator + PchipInterpolator + PPoly + + Notes + ----- + Parameters `bc_type` and `interpolate` work independently, i.e. the former + controls only construction of a spline, and the latter only evaluation. + + When a boundary condition is 'not-a-knot' and n = 2, it is replaced by + a condition that the first derivative is equal to the linear interpolant + slope. When both boundary conditions are 'not-a-knot' and n = 3, the + solution is sought as a parabola passing through given points. + + When 'not-a-knot' boundary conditions is applied to both ends, the + resulting spline will be the same as returned by `splrep` (with ``s=0``) + and `InterpolatedUnivariateSpline`, but these two methods use a + representation in B-spline basis. + + .. versionadded:: 0.18.0 + + Examples + -------- + In this example the cubic spline is used to interpolate a sampled sinusoid. + You can see that the spline continuity property holds for the first and + second derivatives and violates only for the third derivative. + + >>> from scipy.interpolate import CubicSpline + >>> import matplotlib.pyplot as plt + >>> x = np.arange(10) + >>> y = np.sin(x) + >>> cs = CubicSpline(x, y) + >>> xs = np.arange(-0.5, 9.6, 0.1) + >>> fig, ax = plt.subplots(figsize=(6.5, 4)) + >>> ax.plot(x, y, 'o', label='data') + >>> ax.plot(xs, np.sin(xs), label='true') + >>> ax.plot(xs, cs(xs), label="S") + >>> ax.plot(xs, cs(xs, 1), label="S'") + >>> ax.plot(xs, cs(xs, 2), label="S''") + >>> ax.plot(xs, cs(xs, 3), label="S'''") + >>> ax.set_xlim(-0.5, 9.5) + >>> ax.legend(loc='lower left', ncol=2) + >>> plt.show() + + In the second example, the unit circle is interpolated with a spline. A + periodic boundary condition is used. You can see that the first derivative + values, ds/dx=0, ds/dy=1 at the periodic point (1, 0) are correctly + computed. Note that a circle cannot be exactly represented by a cubic + spline. To increase precision, more breakpoints would be required. + + >>> theta = 2 * np.pi * np.linspace(0, 1, 5) + >>> y = np.c_[np.cos(theta), np.sin(theta)] + >>> cs = CubicSpline(theta, y, bc_type='periodic') + >>> print("ds/dx={:.1f} ds/dy={:.1f}".format(cs(0, 1)[0], cs(0, 1)[1])) + ds/dx=0.0 ds/dy=1.0 + >>> xs = 2 * np.pi * np.linspace(0, 1, 100) + >>> fig, ax = plt.subplots(figsize=(6.5, 4)) + >>> ax.plot(y[:, 0], y[:, 1], 'o', label='data') + >>> ax.plot(np.cos(xs), np.sin(xs), label='true') + >>> ax.plot(cs(xs)[:, 0], cs(xs)[:, 1], label='spline') + >>> ax.axes.set_aspect('equal') + >>> ax.legend(loc='center') + >>> plt.show() + + The third example is the interpolation of a polynomial y = x**3 on the + interval 0 <= x<= 1. A cubic spline can represent this function exactly. + To achieve that we need to specify values and first derivatives at + endpoints of the interval. Note that y' = 3 * x**2 and thus y'(0) = 0 and + y'(1) = 3. + + >>> cs = CubicSpline([0, 1], [0, 1], bc_type=((1, 0), (1, 3))) + >>> x = np.linspace(0, 1) + >>> np.allclose(x**3, cs(x)) + True + + References + ---------- + .. [1] `Cubic Spline Interpolation + <https://en.wikiversity.org/wiki/Cubic_Spline_Interpolation>`_ + on Wikiversity. + .. [2] Carl de Boor, "A Practical Guide to Splines", Springer-Verlag, 1978. + """ + def __init__(self, x, y, axis=0, bc_type='not-a-knot', extrapolate=None): + x, y = map(np.asarray, (x, y)) + + if np.issubdtype(x.dtype, np.complexfloating): + raise ValueError("`x` must contain real values.") + + if np.issubdtype(y.dtype, np.complexfloating): + dtype = complex + else: + dtype = float + y = y.astype(dtype, copy=False) + + axis = axis % y.ndim + if x.ndim != 1: + raise ValueError("`x` must be 1-dimensional.") + if x.shape[0] < 2: + raise ValueError("`x` must contain at least 2 elements.") + if x.shape[0] != y.shape[axis]: + raise ValueError("The length of `y` along `axis`={0} doesn't " + "match the length of `x`".format(axis)) + + if not np.all(np.isfinite(x)): + raise ValueError("`x` must contain only finite values.") + if not np.all(np.isfinite(y)): + raise ValueError("`y` must contain only finite values.") + + dx = np.diff(x) + if np.any(dx <= 0): + raise ValueError("`x` must be strictly increasing sequence.") + + n = x.shape[0] + y = np.rollaxis(y, axis) + + bc, y = self._validate_bc(bc_type, y, y.shape[1:], axis) + + if extrapolate is None: + if bc[0] == 'periodic': + extrapolate = 'periodic' + else: + extrapolate = True + + dxr = dx.reshape([dx.shape[0]] + [1] * (y.ndim - 1)) + slope = np.diff(y, axis=0) / dxr + + # If bc is 'not-a-knot' this change is just a convention. + # If bc is 'periodic' then we already checked that y[0] == y[-1], + # and the spline is just a constant, we handle this case in the same + # way by setting the first derivatives to slope, which is 0. + if n == 2: + if bc[0] in ['not-a-knot', 'periodic']: + bc[0] = (1, slope[0]) + if bc[1] in ['not-a-knot', 'periodic']: + bc[1] = (1, slope[0]) + + # This is a very special case, when both conditions are 'not-a-knot' + # and n == 3. In this case 'not-a-knot' can't be handled regularly + # as the both conditions are identical. We handle this case by + # constructing a parabola passing through given points. + if n == 3 and bc[0] == 'not-a-knot' and bc[1] == 'not-a-knot': + A = np.zeros((3, 3)) # This is a standard matrix. + b = np.empty((3,) + y.shape[1:], dtype=y.dtype) + + A[0, 0] = 1 + A[0, 1] = 1 + A[1, 0] = dx[1] + A[1, 1] = 2 * (dx[0] + dx[1]) + A[1, 2] = dx[0] + A[2, 1] = 1 + A[2, 2] = 1 + + b[0] = 2 * slope[0] + b[1] = 3 * (dxr[0] * slope[1] + dxr[1] * slope[0]) + b[2] = 2 * slope[1] + + s = solve(A, b, overwrite_a=True, overwrite_b=True, + check_finite=False) + else: + # Find derivative values at each x[i] by solving a tridiagonal + # system. + A = np.zeros((3, n)) # This is a banded matrix representation. + b = np.empty((n,) + y.shape[1:], dtype=y.dtype) + + # Filling the system for i=1..n-2 + # (x[i-1] - x[i]) * s[i-1] +\ + # 2 * ((x[i] - x[i-1]) + (x[i+1] - x[i])) * s[i] +\ + # (x[i] - x[i-1]) * s[i+1] =\ + # 3 * ((x[i+1] - x[i])*(y[i] - y[i-1])/(x[i] - x[i-1]) +\ + # (x[i] - x[i-1])*(y[i+1] - y[i])/(x[i+1] - x[i])) + + A[1, 1:-1] = 2 * (dx[:-1] + dx[1:]) # The diagonal + A[0, 2:] = dx[:-1] # The upper diagonal + A[-1, :-2] = dx[1:] # The lower diagonal + + b[1:-1] = 3 * (dxr[1:] * slope[:-1] + dxr[:-1] * slope[1:]) + + bc_start, bc_end = bc + + if bc_start == 'periodic': + # Due to the periodicity, and because y[-1] = y[0], the linear + # system has (n-1) unknowns/equations instead of n: + A = A[:, 0:-1] + A[1, 0] = 2 * (dx[-1] + dx[0]) + A[0, 1] = dx[-1] + + b = b[:-1] + + # Also, due to the periodicity, the system is not tri-diagonal. + # We need to compute a "condensed" matrix of shape (n-2, n-2). + # See https://web.archive.org/web/20151220180652/http://www.cfm.brown.edu/people/gk/chap6/node14.html + # for more explanations. + # The condensed matrix is obtained by removing the last column + # and last row of the (n-1, n-1) system matrix. The removed + # values are saved in scalar variables with the (n-1, n-1) + # system matrix indices forming their names: + a_m1_0 = dx[-2] # lower left corner value: A[-1, 0] + a_m1_m2 = dx[-1] + a_m1_m1 = 2 * (dx[-1] + dx[-2]) + a_m2_m1 = dx[-2] + a_0_m1 = dx[0] + + b[0] = 3 * (dxr[0] * slope[-1] + dxr[-1] * slope[0]) + b[-1] = 3 * (dxr[-1] * slope[-2] + dxr[-2] * slope[-1]) + + Ac = A[:, :-1] + b1 = b[:-1] + b2 = np.zeros_like(b1) + b2[0] = -a_0_m1 + b2[-1] = -a_m2_m1 + + # s1 and s2 are the solutions of (n-2, n-2) system + s1 = solve_banded((1, 1), Ac, b1, overwrite_ab=False, + overwrite_b=False, check_finite=False) + + s2 = solve_banded((1, 1), Ac, b2, overwrite_ab=False, + overwrite_b=False, check_finite=False) + + # computing the s[n-2] solution: + s_m1 = ((b[-1] - a_m1_0 * s1[0] - a_m1_m2 * s1[-1]) / + (a_m1_m1 + a_m1_0 * s2[0] + a_m1_m2 * s2[-1])) + + # s is the solution of the (n, n) system: + s = np.empty((n,) + y.shape[1:], dtype=y.dtype) + s[:-2] = s1 + s_m1 * s2 + s[-2] = s_m1 + s[-1] = s[0] + else: + if bc_start == 'not-a-knot': + A[1, 0] = dx[1] + A[0, 1] = x[2] - x[0] + d = x[2] - x[0] + b[0] = ((dxr[0] + 2*d) * dxr[1] * slope[0] + + dxr[0]**2 * slope[1]) / d + elif bc_start[0] == 1: + A[1, 0] = 1 + A[0, 1] = 0 + b[0] = bc_start[1] + elif bc_start[0] == 2: + A[1, 0] = 2 * dx[0] + A[0, 1] = dx[0] + b[0] = -0.5 * bc_start[1] * dx[0]**2 + 3 * (y[1] - y[0]) + + if bc_end == 'not-a-knot': + A[1, -1] = dx[-2] + A[-1, -2] = x[-1] - x[-3] + d = x[-1] - x[-3] + b[-1] = ((dxr[-1]**2*slope[-2] + + (2*d + dxr[-1])*dxr[-2]*slope[-1]) / d) + elif bc_end[0] == 1: + A[1, -1] = 1 + A[-1, -2] = 0 + b[-1] = bc_end[1] + elif bc_end[0] == 2: + A[1, -1] = 2 * dx[-1] + A[-1, -2] = dx[-1] + b[-1] = 0.5 * bc_end[1] * dx[-1]**2 + 3 * (y[-1] - y[-2]) + + s = solve_banded((1, 1), A, b, overwrite_ab=True, + overwrite_b=True, check_finite=False) + + # Compute coefficients in PPoly form. + t = (s[:-1] + s[1:] - 2 * slope) / dxr + c = np.empty((4, n - 1) + y.shape[1:], dtype=t.dtype) + c[0] = t / dxr + c[1] = (slope - s[:-1]) / dxr - t + c[2] = s[:-1] + c[3] = y[:-1] + + super(CubicSpline, self).__init__(c, x, extrapolate=extrapolate) + self.axis = axis + + @staticmethod + def _validate_bc(bc_type, y, expected_deriv_shape, axis): + """Validate and prepare boundary conditions. + + Returns + ------- + validated_bc : 2-tuple + Boundary conditions for a curve start and end. + y : ndarray + y casted to complex dtype if one of the boundary conditions has + complex dtype. + """ + if isinstance(bc_type, string_types): + if bc_type == 'periodic': + if not np.allclose(y[0], y[-1], rtol=1e-15, atol=1e-15): + raise ValueError( + "The first and last `y` point along axis {} must " + "be identical (within machine precision) when " + "bc_type='periodic'.".format(axis)) + + bc_type = (bc_type, bc_type) + + else: + if len(bc_type) != 2: + raise ValueError("`bc_type` must contain 2 elements to " + "specify start and end conditions.") + + if 'periodic' in bc_type: + raise ValueError("'periodic' `bc_type` is defined for both " + "curve ends and cannot be used with other " + "boundary conditions.") + + validated_bc = [] + for bc in bc_type: + if isinstance(bc, string_types): + if bc == 'clamped': + validated_bc.append((1, np.zeros(expected_deriv_shape))) + elif bc == 'natural': + validated_bc.append((2, np.zeros(expected_deriv_shape))) + elif bc in ['not-a-knot', 'periodic']: + validated_bc.append(bc) + else: + raise ValueError("bc_type={} is not allowed.".format(bc)) + else: + try: + deriv_order, deriv_value = bc + except Exception: + raise ValueError("A specified derivative value must be " + "given in the form (order, value).") + + if deriv_order not in [1, 2]: + raise ValueError("The specified derivative order must " + "be 1 or 2.") + + deriv_value = np.asarray(deriv_value) + if deriv_value.shape != expected_deriv_shape: + raise ValueError( + "`deriv_value` shape {} is not the expected one {}." + .format(deriv_value.shape, expected_deriv_shape)) + + if np.issubdtype(deriv_value.dtype, np.complexfloating): + y = y.astype(complex, copy=False) + + validated_bc.append((deriv_order, deriv_value)) + + return validated_bc, y diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/_cubic.pyc b/project/venv/lib/python2.7/site-packages/scipy/interpolate/_cubic.pyc new file mode 100644 index 0000000..d819784 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/interpolate/_cubic.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/_fitpack.so b/project/venv/lib/python2.7/site-packages/scipy/interpolate/_fitpack.so new file mode 100755 index 0000000..7d369b3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/interpolate/_fitpack.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/_fitpack_impl.py b/project/venv/lib/python2.7/site-packages/scipy/interpolate/_fitpack_impl.py new file mode 100644 index 0000000..4e48b24 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/interpolate/_fitpack_impl.py @@ -0,0 +1,1311 @@ +""" +fitpack (dierckx in netlib) --- A Python-C wrapper to FITPACK (by P. Dierckx). + FITPACK is a collection of FORTRAN programs for curve and surface + fitting with splines and tensor product splines. + +See + https://web.archive.org/web/20010524124604/http://www.cs.kuleuven.ac.be:80/cwis/research/nalag/research/topics/fitpack.html +or + http://www.netlib.org/dierckx/ + +Copyright 2002 Pearu Peterson all rights reserved, +Pearu Peterson <pearu@cens.ioc.ee> +Permission to use, modify, and distribute this software is given under the +terms of the SciPy (BSD style) license. See LICENSE.txt that came with +this distribution for specifics. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. + +TODO: Make interfaces to the following fitpack functions: + For univariate splines: cocosp, concon, fourco, insert + For bivariate splines: profil, regrid, parsur, surev +""" +from __future__ import division, print_function, absolute_import + + +__all__ = ['splrep', 'splprep', 'splev', 'splint', 'sproot', 'spalde', + 'bisplrep', 'bisplev', 'insert', 'splder', 'splantider'] + +import warnings +import numpy as np +from . import _fitpack +from numpy import (atleast_1d, array, ones, zeros, sqrt, ravel, transpose, + empty, iinfo, intc, asarray) + +# Try to replace _fitpack interface with +# f2py-generated version +from . import dfitpack + + +def _intc_overflow(x, msg=None): + """Cast the value to an intc and raise an OverflowError if the value + cannot fit. + """ + if x > iinfo(intc).max: + if msg is None: + msg = '%r cannot fit into an intc' % x + raise OverflowError(msg) + return intc(x) + + +_iermess = { + 0: ["The spline has a residual sum of squares fp such that " + "abs(fp-s)/s<=0.001", None], + -1: ["The spline is an interpolating spline (fp=0)", None], + -2: ["The spline is weighted least-squares polynomial of degree k.\n" + "fp gives the upper bound fp0 for the smoothing factor s", None], + 1: ["The required storage space exceeds the available storage space.\n" + "Probable causes: data (x,y) size is too small or smoothing parameter" + "\ns is too small (fp>s).", ValueError], + 2: ["A theoretically impossible result when finding a smoothing spline\n" + "with fp = s. Probable cause: s too small. (abs(fp-s)/s>0.001)", + ValueError], + 3: ["The maximal number of iterations (20) allowed for finding smoothing\n" + "spline with fp=s has been reached. Probable cause: s too small.\n" + "(abs(fp-s)/s>0.001)", ValueError], + 10: ["Error on input data", ValueError], + 'unknown': ["An error occurred", TypeError] +} + +_iermess2 = { + 0: ["The spline has a residual sum of squares fp such that " + "abs(fp-s)/s<=0.001", None], + -1: ["The spline is an interpolating spline (fp=0)", None], + -2: ["The spline is weighted least-squares polynomial of degree kx and ky." + "\nfp gives the upper bound fp0 for the smoothing factor s", None], + -3: ["Warning. The coefficients of the spline have been computed as the\n" + "minimal norm least-squares solution of a rank deficient system.", + None], + 1: ["The required storage space exceeds the available storage space.\n" + "Probable causes: nxest or nyest too small or s is too small. (fp>s)", + ValueError], + 2: ["A theoretically impossible result when finding a smoothing spline\n" + "with fp = s. Probable causes: s too small or badly chosen eps.\n" + "(abs(fp-s)/s>0.001)", ValueError], + 3: ["The maximal number of iterations (20) allowed for finding smoothing\n" + "spline with fp=s has been reached. Probable cause: s too small.\n" + "(abs(fp-s)/s>0.001)", ValueError], + 4: ["No more knots can be added because the number of B-spline\n" + "coefficients already exceeds the number of data points m.\n" + "Probable causes: either s or m too small. (fp>s)", ValueError], + 5: ["No more knots can be added because the additional knot would\n" + "coincide with an old one. Probable cause: s too small or too large\n" + "a weight to an inaccurate data point. (fp>s)", ValueError], + 10: ["Error on input data", ValueError], + 11: ["rwrk2 too small, i.e. there is not enough workspace for computing\n" + "the minimal least-squares solution of a rank deficient system of\n" + "linear equations.", ValueError], + 'unknown': ["An error occurred", TypeError] +} + +_parcur_cache = {'t': array([], float), 'wrk': array([], float), + 'iwrk': array([], intc), 'u': array([], float), + 'ub': 0, 'ue': 1} + + +def splprep(x, w=None, u=None, ub=None, ue=None, k=3, task=0, s=None, t=None, + full_output=0, nest=None, per=0, quiet=1): + """ + Find the B-spline representation of an N-dimensional curve. + + Given a list of N rank-1 arrays, `x`, which represent a curve in + N-dimensional space parametrized by `u`, find a smooth approximating + spline curve g(`u`). Uses the FORTRAN routine parcur from FITPACK. + + Parameters + ---------- + x : array_like + A list of sample vector arrays representing the curve. + w : array_like, optional + Strictly positive rank-1 array of weights the same length as `x[0]`. + The weights are used in computing the weighted least-squares spline + fit. If the errors in the `x` values have standard-deviation given by + the vector d, then `w` should be 1/d. Default is ``ones(len(x[0]))``. + u : array_like, optional + An array of parameter values. If not given, these values are + calculated automatically as ``M = len(x[0])``, where + + v[0] = 0 + + v[i] = v[i-1] + distance(`x[i]`, `x[i-1]`) + + u[i] = v[i] / v[M-1] + + ub, ue : int, optional + The end-points of the parameters interval. Defaults to + u[0] and u[-1]. + k : int, optional + Degree of the spline. Cubic splines are recommended. + Even values of `k` should be avoided especially with a small s-value. + ``1 <= k <= 5``, default is 3. + task : int, optional + If task==0 (default), find t and c for a given smoothing factor, s. + If task==1, find t and c for another value of the smoothing factor, s. + There must have been a previous call with task=0 or task=1 + for the same set of data. + If task=-1 find the weighted least square spline for a given set of + knots, t. + s : float, optional + A smoothing condition. The amount of smoothness is determined by + satisfying the conditions: ``sum((w * (y - g))**2,axis=0) <= s``, + where g(x) is the smoothed interpolation of (x,y). The user can + use `s` to control the trade-off between closeness and smoothness + of fit. Larger `s` means more smoothing while smaller values of `s` + indicate less smoothing. Recommended values of `s` depend on the + weights, w. If the weights represent the inverse of the + standard-deviation of y, then a good `s` value should be found in + the range ``(m-sqrt(2*m),m+sqrt(2*m))``, where m is the number of + data points in x, y, and w. + t : int, optional + The knots needed for task=-1. + full_output : int, optional + If non-zero, then return optional outputs. + nest : int, optional + An over-estimate of the total number of knots of the spline to + help in determining the storage space. By default nest=m/2. + Always large enough is nest=m+k+1. + per : int, optional + If non-zero, data points are considered periodic with period + ``x[m-1] - x[0]`` and a smooth periodic spline approximation is + returned. Values of ``y[m-1]`` and ``w[m-1]`` are not used. + quiet : int, optional + Non-zero to suppress messages. + This parameter is deprecated; use standard Python warning filters + instead. + + Returns + ------- + tck : tuple + A tuple (t,c,k) containing the vector of knots, the B-spline + coefficients, and the degree of the spline. + u : array + An array of the values of the parameter. + fp : float + The weighted sum of squared residuals of the spline approximation. + ier : int + An integer flag about splrep success. Success is indicated + if ier<=0. If ier in [1,2,3] an error occurred but was not raised. + Otherwise an error is raised. + msg : str + A message corresponding to the integer flag, ier. + + See Also + -------- + splrep, splev, sproot, spalde, splint, + bisplrep, bisplev + UnivariateSpline, BivariateSpline + + Notes + ----- + See `splev` for evaluation of the spline and its derivatives. + The number of dimensions N must be smaller than 11. + + References + ---------- + .. [1] P. Dierckx, "Algorithms for smoothing data with periodic and + parametric splines, Computer Graphics and Image Processing", + 20 (1982) 171-184. + .. [2] P. Dierckx, "Algorithms for smoothing data with periodic and + parametric splines", report tw55, Dept. Computer Science, + K.U.Leuven, 1981. + .. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs on + Numerical Analysis, Oxford University Press, 1993. + + """ + if task <= 0: + _parcur_cache = {'t': array([], float), 'wrk': array([], float), + 'iwrk': array([], intc), 'u': array([], float), + 'ub': 0, 'ue': 1} + x = atleast_1d(x) + idim, m = x.shape + if per: + for i in range(idim): + if x[i][0] != x[i][-1]: + if quiet < 2: + warnings.warn(RuntimeWarning('Setting x[%d][%d]=x[%d][0]' % + (i, m, i))) + x[i][-1] = x[i][0] + if not 0 < idim < 11: + raise TypeError('0 < idim < 11 must hold') + if w is None: + w = ones(m, float) + else: + w = atleast_1d(w) + ipar = (u is not None) + if ipar: + _parcur_cache['u'] = u + if ub is None: + _parcur_cache['ub'] = u[0] + else: + _parcur_cache['ub'] = ub + if ue is None: + _parcur_cache['ue'] = u[-1] + else: + _parcur_cache['ue'] = ue + else: + _parcur_cache['u'] = zeros(m, float) + if not (1 <= k <= 5): + raise TypeError('1 <= k= %d <=5 must hold' % k) + if not (-1 <= task <= 1): + raise TypeError('task must be -1, 0 or 1') + if (not len(w) == m) or (ipar == 1 and (not len(u) == m)): + raise TypeError('Mismatch of input dimensions') + if s is None: + s = m - sqrt(2*m) + if t is None and task == -1: + raise TypeError('Knots must be given for task=-1') + if t is not None: + _parcur_cache['t'] = atleast_1d(t) + n = len(_parcur_cache['t']) + if task == -1 and n < 2*k + 2: + raise TypeError('There must be at least 2*k+2 knots for task=-1') + if m <= k: + raise TypeError('m > k must hold') + if nest is None: + nest = m + 2*k + + if (task >= 0 and s == 0) or (nest < 0): + if per: + nest = m + 2*k + else: + nest = m + k + 1 + nest = max(nest, 2*k + 3) + u = _parcur_cache['u'] + ub = _parcur_cache['ub'] + ue = _parcur_cache['ue'] + t = _parcur_cache['t'] + wrk = _parcur_cache['wrk'] + iwrk = _parcur_cache['iwrk'] + t, c, o = _fitpack._parcur(ravel(transpose(x)), w, u, ub, ue, k, + task, ipar, s, t, nest, wrk, iwrk, per) + _parcur_cache['u'] = o['u'] + _parcur_cache['ub'] = o['ub'] + _parcur_cache['ue'] = o['ue'] + _parcur_cache['t'] = t + _parcur_cache['wrk'] = o['wrk'] + _parcur_cache['iwrk'] = o['iwrk'] + ier = o['ier'] + fp = o['fp'] + n = len(t) + u = o['u'] + c.shape = idim, n - k - 1 + tcku = [t, list(c), k], u + if ier <= 0 and not quiet: + warnings.warn(RuntimeWarning(_iermess[ier][0] + + "\tk=%d n=%d m=%d fp=%f s=%f" % + (k, len(t), m, fp, s))) + if ier > 0 and not full_output: + if ier in [1, 2, 3]: + warnings.warn(RuntimeWarning(_iermess[ier][0])) + else: + try: + raise _iermess[ier][1](_iermess[ier][0]) + except KeyError: + raise _iermess['unknown'][1](_iermess['unknown'][0]) + if full_output: + try: + return tcku, fp, ier, _iermess[ier][0] + except KeyError: + return tcku, fp, ier, _iermess['unknown'][0] + else: + return tcku + + +_curfit_cache = {'t': array([], float), 'wrk': array([], float), + 'iwrk': array([], intc)} + + +def splrep(x, y, w=None, xb=None, xe=None, k=3, task=0, s=None, t=None, + full_output=0, per=0, quiet=1): + """ + Find the B-spline representation of 1-D curve. + + Given the set of data points ``(x[i], y[i])`` determine a smooth spline + approximation of degree k on the interval ``xb <= x <= xe``. + + Parameters + ---------- + x, y : array_like + The data points defining a curve y = f(x). + w : array_like, optional + Strictly positive rank-1 array of weights the same length as x and y. + The weights are used in computing the weighted least-squares spline + fit. If the errors in the y values have standard-deviation given by the + vector d, then w should be 1/d. Default is ones(len(x)). + xb, xe : float, optional + The interval to fit. If None, these default to x[0] and x[-1] + respectively. + k : int, optional + The order of the spline fit. It is recommended to use cubic splines. + Even order splines should be avoided especially with small s values. + 1 <= k <= 5 + task : {1, 0, -1}, optional + If task==0 find t and c for a given smoothing factor, s. + + If task==1 find t and c for another value of the smoothing factor, s. + There must have been a previous call with task=0 or task=1 for the same + set of data (t will be stored an used internally) + + If task=-1 find the weighted least square spline for a given set of + knots, t. These should be interior knots as knots on the ends will be + added automatically. + s : float, optional + A smoothing condition. The amount of smoothness is determined by + satisfying the conditions: sum((w * (y - g))**2,axis=0) <= s where g(x) + is the smoothed interpolation of (x,y). The user can use s to control + the tradeoff between closeness and smoothness of fit. Larger s means + more smoothing while smaller values of s indicate less smoothing. + Recommended values of s depend on the weights, w. If the weights + represent the inverse of the standard-deviation of y, then a good s + value should be found in the range (m-sqrt(2*m),m+sqrt(2*m)) where m is + the number of datapoints in x, y, and w. default : s=m-sqrt(2*m) if + weights are supplied. s = 0.0 (interpolating) if no weights are + supplied. + t : array_like, optional + The knots needed for task=-1. If given then task is automatically set + to -1. + full_output : bool, optional + If non-zero, then return optional outputs. + per : bool, optional + If non-zero, data points are considered periodic with period x[m-1] - + x[0] and a smooth periodic spline approximation is returned. Values of + y[m-1] and w[m-1] are not used. + quiet : bool, optional + Non-zero to suppress messages. + This parameter is deprecated; use standard Python warning filters + instead. + + Returns + ------- + tck : tuple + (t,c,k) a tuple containing the vector of knots, the B-spline + coefficients, and the degree of the spline. + fp : array, optional + The weighted sum of squared residuals of the spline approximation. + ier : int, optional + An integer flag about splrep success. Success is indicated if ier<=0. + If ier in [1,2,3] an error occurred but was not raised. Otherwise an + error is raised. + msg : str, optional + A message corresponding to the integer flag, ier. + + Notes + ----- + See splev for evaluation of the spline and its derivatives. + + The user is responsible for assuring that the values of *x* are unique. + Otherwise, *splrep* will not return sensible results. + + See Also + -------- + UnivariateSpline, BivariateSpline + splprep, splev, sproot, spalde, splint + bisplrep, bisplev + + Notes + ----- + See splev for evaluation of the spline and its derivatives. Uses the + FORTRAN routine curfit from FITPACK. + + If provided, knots `t` must satisfy the Schoenberg-Whitney conditions, + i.e., there must be a subset of data points ``x[j]`` such that + ``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``. + + References + ---------- + Based on algorithms described in [1]_, [2]_, [3]_, and [4]_: + + .. [1] P. Dierckx, "An algorithm for smoothing, differentiation and + integration of experimental data using spline functions", + J.Comp.Appl.Maths 1 (1975) 165-184. + .. [2] P. Dierckx, "A fast algorithm for smoothing data on a rectangular + grid while using spline functions", SIAM J.Numer.Anal. 19 (1982) + 1286-1304. + .. [3] P. Dierckx, "An improved algorithm for curve fitting with spline + functions", report tw54, Dept. Computer Science,K.U. Leuven, 1981. + .. [4] P. Dierckx, "Curve and surface fitting with splines", Monographs on + Numerical Analysis, Oxford University Press, 1993. + + Examples + -------- + + >>> import matplotlib.pyplot as plt + >>> from scipy.interpolate import splev, splrep + >>> x = np.linspace(0, 10, 10) + >>> y = np.sin(x) + >>> tck = splrep(x, y) + >>> x2 = np.linspace(0, 10, 200) + >>> y2 = splev(x2, tck) + >>> plt.plot(x, y, 'o', x2, y2) + >>> plt.show() + + """ + if task <= 0: + _curfit_cache = {} + x, y = map(atleast_1d, [x, y]) + m = len(x) + if w is None: + w = ones(m, float) + if s is None: + s = 0.0 + else: + w = atleast_1d(w) + if s is None: + s = m - sqrt(2*m) + if not len(w) == m: + raise TypeError('len(w)=%d is not equal to m=%d' % (len(w), m)) + if (m != len(y)) or (m != len(w)): + raise TypeError('Lengths of the first three arguments (x,y,w) must ' + 'be equal') + if not (1 <= k <= 5): + raise TypeError('Given degree of the spline (k=%d) is not supported. ' + '(1<=k<=5)' % k) + if m <= k: + raise TypeError('m > k must hold') + if xb is None: + xb = x[0] + if xe is None: + xe = x[-1] + if not (-1 <= task <= 1): + raise TypeError('task must be -1, 0 or 1') + if t is not None: + task = -1 + if task == -1: + if t is None: + raise TypeError('Knots must be given for task=-1') + numknots = len(t) + _curfit_cache['t'] = empty((numknots + 2*k + 2,), float) + _curfit_cache['t'][k+1:-k-1] = t + nest = len(_curfit_cache['t']) + elif task == 0: + if per: + nest = max(m + 2*k, 2*k + 3) + else: + nest = max(m + k + 1, 2*k + 3) + t = empty((nest,), float) + _curfit_cache['t'] = t + if task <= 0: + if per: + _curfit_cache['wrk'] = empty((m*(k + 1) + nest*(8 + 5*k),), float) + else: + _curfit_cache['wrk'] = empty((m*(k + 1) + nest*(7 + 3*k),), float) + _curfit_cache['iwrk'] = empty((nest,), intc) + try: + t = _curfit_cache['t'] + wrk = _curfit_cache['wrk'] + iwrk = _curfit_cache['iwrk'] + except KeyError: + raise TypeError("must call with task=1 only after" + " call with task=0,-1") + if not per: + n, c, fp, ier = dfitpack.curfit(task, x, y, w, t, wrk, iwrk, + xb, xe, k, s) + else: + n, c, fp, ier = dfitpack.percur(task, x, y, w, t, wrk, iwrk, k, s) + tck = (t[:n], c[:n], k) + if ier <= 0 and not quiet: + _mess = (_iermess[ier][0] + "\tk=%d n=%d m=%d fp=%f s=%f" % + (k, len(t), m, fp, s)) + warnings.warn(RuntimeWarning(_mess)) + if ier > 0 and not full_output: + if ier in [1, 2, 3]: + warnings.warn(RuntimeWarning(_iermess[ier][0])) + else: + try: + raise _iermess[ier][1](_iermess[ier][0]) + except KeyError: + raise _iermess['unknown'][1](_iermess['unknown'][0]) + if full_output: + try: + return tck, fp, ier, _iermess[ier][0] + except KeyError: + return tck, fp, ier, _iermess['unknown'][0] + else: + return tck + + +def splev(x, tck, der=0, ext=0): + """ + Evaluate a B-spline or its derivatives. + + Given the knots and coefficients of a B-spline representation, evaluate + the value of the smoothing polynomial and its derivatives. This is a + wrapper around the FORTRAN routines splev and splder of FITPACK. + + Parameters + ---------- + x : array_like + An array of points at which to return the value of the smoothed + spline or its derivatives. If `tck` was returned from `splprep`, + then the parameter values, u should be given. + tck : tuple + A sequence of length 3 returned by `splrep` or `splprep` containing + the knots, coefficients, and degree of the spline. + der : int, optional + The order of derivative of the spline to compute (must be less than + or equal to k). + ext : int, optional + Controls the value returned for elements of ``x`` not in the + interval defined by the knot sequence. + + * if ext=0, return the extrapolated value. + * if ext=1, return 0 + * if ext=2, raise a ValueError + * if ext=3, return the boundary value. + + The default value is 0. + + Returns + ------- + y : ndarray or list of ndarrays + An array of values representing the spline function evaluated at + the points in ``x``. If `tck` was returned from `splprep`, then this + is a list of arrays representing the curve in N-dimensional space. + + See Also + -------- + splprep, splrep, sproot, spalde, splint + bisplrep, bisplev + + References + ---------- + .. [1] C. de Boor, "On calculating with b-splines", J. Approximation + Theory, 6, p.50-62, 1972. + .. [2] M.G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths + Applics, 10, p.134-149, 1972. + .. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs + on Numerical Analysis, Oxford University Press, 1993. + + """ + t, c, k = tck + try: + c[0][0] + parametric = True + except Exception: + parametric = False + if parametric: + return list(map(lambda c, x=x, t=t, k=k, der=der: + splev(x, [t, c, k], der, ext), c)) + else: + if not (0 <= der <= k): + raise ValueError("0<=der=%d<=k=%d must hold" % (der, k)) + if ext not in (0, 1, 2, 3): + raise ValueError("ext = %s not in (0, 1, 2, 3) " % ext) + + x = asarray(x) + shape = x.shape + x = atleast_1d(x).ravel() + y, ier = _fitpack._spl_(x, der, t, c, k, ext) + + if ier == 10: + raise ValueError("Invalid input data") + if ier == 1: + raise ValueError("Found x value not in the domain") + if ier: + raise TypeError("An error occurred") + + return y.reshape(shape) + + +def splint(a, b, tck, full_output=0): + """ + Evaluate the definite integral of a B-spline. + + Given the knots and coefficients of a B-spline, evaluate the definite + integral of the smoothing polynomial between two given points. + + Parameters + ---------- + a, b : float + The end-points of the integration interval. + tck : tuple + A tuple (t,c,k) containing the vector of knots, the B-spline + coefficients, and the degree of the spline (see `splev`). + full_output : int, optional + Non-zero to return optional output. + + Returns + ------- + integral : float + The resulting integral. + wrk : ndarray + An array containing the integrals of the normalized B-splines + defined on the set of knots. + + Notes + ----- + splint silently assumes that the spline function is zero outside the data + interval (a, b). + + See Also + -------- + splprep, splrep, sproot, spalde, splev + bisplrep, bisplev + UnivariateSpline, BivariateSpline + + References + ---------- + .. [1] P.W. Gaffney, The calculation of indefinite integrals of b-splines", + J. Inst. Maths Applics, 17, p.37-41, 1976. + .. [2] P. Dierckx, "Curve and surface fitting with splines", Monographs + on Numerical Analysis, Oxford University Press, 1993. + + """ + t, c, k = tck + try: + c[0][0] + parametric = True + except Exception: + parametric = False + if parametric: + return list(map(lambda c, a=a, b=b, t=t, k=k: + splint(a, b, [t, c, k]), c)) + else: + aint, wrk = _fitpack._splint(t, c, k, a, b) + if full_output: + return aint, wrk + else: + return aint + + +def sproot(tck, mest=10): + """ + Find the roots of a cubic B-spline. + + Given the knots (>=8) and coefficients of a cubic B-spline return the + roots of the spline. + + Parameters + ---------- + tck : tuple + A tuple (t,c,k) containing the vector of knots, + the B-spline coefficients, and the degree of the spline. + The number of knots must be >= 8, and the degree must be 3. + The knots must be a montonically increasing sequence. + mest : int, optional + An estimate of the number of zeros (Default is 10). + + Returns + ------- + zeros : ndarray + An array giving the roots of the spline. + + See also + -------- + splprep, splrep, splint, spalde, splev + bisplrep, bisplev + UnivariateSpline, BivariateSpline + + + References + ---------- + .. [1] C. de Boor, "On calculating with b-splines", J. Approximation + Theory, 6, p.50-62, 1972. + .. [2] M.G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths + Applics, 10, p.134-149, 1972. + .. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs + on Numerical Analysis, Oxford University Press, 1993. + + """ + t, c, k = tck + if k != 3: + raise ValueError("sproot works only for cubic (k=3) splines") + try: + c[0][0] + parametric = True + except Exception: + parametric = False + if parametric: + return list(map(lambda c, t=t, k=k, mest=mest: + sproot([t, c, k], mest), c)) + else: + if len(t) < 8: + raise TypeError("The number of knots %d>=8" % len(t)) + z, ier = _fitpack._sproot(t, c, k, mest) + if ier == 10: + raise TypeError("Invalid input data. " + "t1<=..<=t4<t5<..<tn-3<=..<=tn must hold.") + if ier == 0: + return z + if ier == 1: + warnings.warn(RuntimeWarning("The number of zeros exceeds mest")) + return z + raise TypeError("Unknown error") + + +def spalde(x, tck): + """ + Evaluate all derivatives of a B-spline. + + Given the knots and coefficients of a cubic B-spline compute all + derivatives up to order k at a point (or set of points). + + Parameters + ---------- + x : array_like + A point or a set of points at which to evaluate the derivatives. + Note that ``t(k) <= x <= t(n-k+1)`` must hold for each `x`. + tck : tuple + A tuple (t,c,k) containing the vector of knots, + the B-spline coefficients, and the degree of the spline. + + Returns + ------- + results : {ndarray, list of ndarrays} + An array (or a list of arrays) containing all derivatives + up to order k inclusive for each point `x`. + + See Also + -------- + splprep, splrep, splint, sproot, splev, bisplrep, bisplev, + UnivariateSpline, BivariateSpline + + References + ---------- + .. [1] de Boor C : On calculating with b-splines, J. Approximation Theory + 6 (1972) 50-62. + .. [2] Cox M.G. : The numerical evaluation of b-splines, J. Inst. Maths + applics 10 (1972) 134-149. + .. [3] Dierckx P. : Curve and surface fitting with splines, Monographs on + Numerical Analysis, Oxford University Press, 1993. + + """ + t, c, k = tck + try: + c[0][0] + parametric = True + except Exception: + parametric = False + if parametric: + return list(map(lambda c, x=x, t=t, k=k: + spalde(x, [t, c, k]), c)) + else: + x = atleast_1d(x) + if len(x) > 1: + return list(map(lambda x, tck=tck: spalde(x, tck), x)) + d, ier = _fitpack._spalde(t, c, k, x[0]) + if ier == 0: + return d + if ier == 10: + raise TypeError("Invalid input data. t(k)<=x<=t(n-k+1) must hold.") + raise TypeError("Unknown error") + +# def _curfit(x,y,w=None,xb=None,xe=None,k=3,task=0,s=None,t=None, +# full_output=0,nest=None,per=0,quiet=1): + + +_surfit_cache = {'tx': array([], float), 'ty': array([], float), + 'wrk': array([], float), 'iwrk': array([], intc)} + + +def bisplrep(x, y, z, w=None, xb=None, xe=None, yb=None, ye=None, + kx=3, ky=3, task=0, s=None, eps=1e-16, tx=None, ty=None, + full_output=0, nxest=None, nyest=None, quiet=1): + """ + Find a bivariate B-spline representation of a surface. + + Given a set of data points (x[i], y[i], z[i]) representing a surface + z=f(x,y), compute a B-spline representation of the surface. Based on + the routine SURFIT from FITPACK. + + Parameters + ---------- + x, y, z : ndarray + Rank-1 arrays of data points. + w : ndarray, optional + Rank-1 array of weights. By default ``w=np.ones(len(x))``. + xb, xe : float, optional + End points of approximation interval in `x`. + By default ``xb = x.min(), xe=x.max()``. + yb, ye : float, optional + End points of approximation interval in `y`. + By default ``yb=y.min(), ye = y.max()``. + kx, ky : int, optional + The degrees of the spline (1 <= kx, ky <= 5). + Third order (kx=ky=3) is recommended. + task : int, optional + If task=0, find knots in x and y and coefficients for a given + smoothing factor, s. + If task=1, find knots and coefficients for another value of the + smoothing factor, s. bisplrep must have been previously called + with task=0 or task=1. + If task=-1, find coefficients for a given set of knots tx, ty. + s : float, optional + A non-negative smoothing factor. If weights correspond + to the inverse of the standard-deviation of the errors in z, + then a good s-value should be found in the range + ``(m-sqrt(2*m),m+sqrt(2*m))`` where m=len(x). + eps : float, optional + A threshold for determining the effective rank of an + over-determined linear system of equations (0 < eps < 1). + `eps` is not likely to need changing. + tx, ty : ndarray, optional + Rank-1 arrays of the knots of the spline for task=-1 + full_output : int, optional + Non-zero to return optional outputs. + nxest, nyest : int, optional + Over-estimates of the total number of knots. If None then + ``nxest = max(kx+sqrt(m/2),2*kx+3)``, + ``nyest = max(ky+sqrt(m/2),2*ky+3)``. + quiet : int, optional + Non-zero to suppress printing of messages. + This parameter is deprecated; use standard Python warning filters + instead. + + Returns + ------- + tck : array_like + A list [tx, ty, c, kx, ky] containing the knots (tx, ty) and + coefficients (c) of the bivariate B-spline representation of the + surface along with the degree of the spline. + fp : ndarray + The weighted sum of squared residuals of the spline approximation. + ier : int + An integer flag about splrep success. Success is indicated if + ier<=0. If ier in [1,2,3] an error occurred but was not raised. + Otherwise an error is raised. + msg : str + A message corresponding to the integer flag, ier. + + See Also + -------- + splprep, splrep, splint, sproot, splev + UnivariateSpline, BivariateSpline + + Notes + ----- + See `bisplev` to evaluate the value of the B-spline given its tck + representation. + + References + ---------- + .. [1] Dierckx P.:An algorithm for surface fitting with spline functions + Ima J. Numer. Anal. 1 (1981) 267-283. + .. [2] Dierckx P.:An algorithm for surface fitting with spline functions + report tw50, Dept. Computer Science,K.U.Leuven, 1980. + .. [3] Dierckx P.:Curve and surface fitting with splines, Monographs on + Numerical Analysis, Oxford University Press, 1993. + + """ + x, y, z = map(ravel, [x, y, z]) # ensure 1-d arrays. + m = len(x) + if not (m == len(y) == len(z)): + raise TypeError('len(x)==len(y)==len(z) must hold.') + if w is None: + w = ones(m, float) + else: + w = atleast_1d(w) + if not len(w) == m: + raise TypeError('len(w)=%d is not equal to m=%d' % (len(w), m)) + if xb is None: + xb = x.min() + if xe is None: + xe = x.max() + if yb is None: + yb = y.min() + if ye is None: + ye = y.max() + if not (-1 <= task <= 1): + raise TypeError('task must be -1, 0 or 1') + if s is None: + s = m - sqrt(2*m) + if tx is None and task == -1: + raise TypeError('Knots_x must be given for task=-1') + if tx is not None: + _surfit_cache['tx'] = atleast_1d(tx) + nx = len(_surfit_cache['tx']) + if ty is None and task == -1: + raise TypeError('Knots_y must be given for task=-1') + if ty is not None: + _surfit_cache['ty'] = atleast_1d(ty) + ny = len(_surfit_cache['ty']) + if task == -1 and nx < 2*kx+2: + raise TypeError('There must be at least 2*kx+2 knots_x for task=-1') + if task == -1 and ny < 2*ky+2: + raise TypeError('There must be at least 2*ky+2 knots_x for task=-1') + if not ((1 <= kx <= 5) and (1 <= ky <= 5)): + raise TypeError('Given degree of the spline (kx,ky=%d,%d) is not ' + 'supported. (1<=k<=5)' % (kx, ky)) + if m < (kx + 1)*(ky + 1): + raise TypeError('m >= (kx+1)(ky+1) must hold') + if nxest is None: + nxest = int(kx + sqrt(m/2)) + if nyest is None: + nyest = int(ky + sqrt(m/2)) + nxest, nyest = max(nxest, 2*kx + 3), max(nyest, 2*ky + 3) + if task >= 0 and s == 0: + nxest = int(kx + sqrt(3*m)) + nyest = int(ky + sqrt(3*m)) + if task == -1: + _surfit_cache['tx'] = atleast_1d(tx) + _surfit_cache['ty'] = atleast_1d(ty) + tx, ty = _surfit_cache['tx'], _surfit_cache['ty'] + wrk = _surfit_cache['wrk'] + u = nxest - kx - 1 + v = nyest - ky - 1 + km = max(kx, ky) + 1 + ne = max(nxest, nyest) + bx, by = kx*v + ky + 1, ky*u + kx + 1 + b1, b2 = bx, bx + v - ky + if bx > by: + b1, b2 = by, by + u - kx + msg = "Too many data points to interpolate" + lwrk1 = _intc_overflow(u*v*(2 + b1 + b2) + + 2*(u + v + km*(m + ne) + ne - kx - ky) + b2 + 1, + msg=msg) + lwrk2 = _intc_overflow(u*v*(b2 + 1) + b2, msg=msg) + tx, ty, c, o = _fitpack._surfit(x, y, z, w, xb, xe, yb, ye, kx, ky, + task, s, eps, tx, ty, nxest, nyest, + wrk, lwrk1, lwrk2) + _curfit_cache['tx'] = tx + _curfit_cache['ty'] = ty + _curfit_cache['wrk'] = o['wrk'] + ier, fp = o['ier'], o['fp'] + tck = [tx, ty, c, kx, ky] + + ierm = min(11, max(-3, ier)) + if ierm <= 0 and not quiet: + _mess = (_iermess2[ierm][0] + + "\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f" % + (kx, ky, len(tx), len(ty), m, fp, s)) + warnings.warn(RuntimeWarning(_mess)) + if ierm > 0 and not full_output: + if ier in [1, 2, 3, 4, 5]: + _mess = ("\n\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f" % + (kx, ky, len(tx), len(ty), m, fp, s)) + warnings.warn(RuntimeWarning(_iermess2[ierm][0] + _mess)) + else: + try: + raise _iermess2[ierm][1](_iermess2[ierm][0]) + except KeyError: + raise _iermess2['unknown'][1](_iermess2['unknown'][0]) + if full_output: + try: + return tck, fp, ier, _iermess2[ierm][0] + except KeyError: + return tck, fp, ier, _iermess2['unknown'][0] + else: + return tck + + +def bisplev(x, y, tck, dx=0, dy=0): + """ + Evaluate a bivariate B-spline and its derivatives. + + Return a rank-2 array of spline function values (or spline derivative + values) at points given by the cross-product of the rank-1 arrays `x` and + `y`. In special cases, return an array or just a float if either `x` or + `y` or both are floats. Based on BISPEV from FITPACK. + + Parameters + ---------- + x, y : ndarray + Rank-1 arrays specifying the domain over which to evaluate the + spline or its derivative. + tck : tuple + A sequence of length 5 returned by `bisplrep` containing the knot + locations, the coefficients, and the degree of the spline: + [tx, ty, c, kx, ky]. + dx, dy : int, optional + The orders of the partial derivatives in `x` and `y` respectively. + + Returns + ------- + vals : ndarray + The B-spline or its derivative evaluated over the set formed by + the cross-product of `x` and `y`. + + See Also + -------- + splprep, splrep, splint, sproot, splev + UnivariateSpline, BivariateSpline + + Notes + ----- + See `bisplrep` to generate the `tck` representation. + + References + ---------- + .. [1] Dierckx P. : An algorithm for surface fitting + with spline functions + Ima J. Numer. Anal. 1 (1981) 267-283. + .. [2] Dierckx P. : An algorithm for surface fitting + with spline functions + report tw50, Dept. Computer Science,K.U.Leuven, 1980. + .. [3] Dierckx P. : Curve and surface fitting with splines, + Monographs on Numerical Analysis, Oxford University Press, 1993. + + """ + tx, ty, c, kx, ky = tck + if not (0 <= dx < kx): + raise ValueError("0 <= dx = %d < kx = %d must hold" % (dx, kx)) + if not (0 <= dy < ky): + raise ValueError("0 <= dy = %d < ky = %d must hold" % (dy, ky)) + x, y = map(atleast_1d, [x, y]) + if (len(x.shape) != 1) or (len(y.shape) != 1): + raise ValueError("First two entries should be rank-1 arrays.") + z, ier = _fitpack._bispev(tx, ty, c, kx, ky, x, y, dx, dy) + if ier == 10: + raise ValueError("Invalid input data") + if ier: + raise TypeError("An error occurred") + z.shape = len(x), len(y) + if len(z) > 1: + return z + if len(z[0]) > 1: + return z[0] + return z[0][0] + + +def dblint(xa, xb, ya, yb, tck): + """Evaluate the integral of a spline over area [xa,xb] x [ya,yb]. + + Parameters + ---------- + xa, xb : float + The end-points of the x integration interval. + ya, yb : float + The end-points of the y integration interval. + tck : list [tx, ty, c, kx, ky] + A sequence of length 5 returned by bisplrep containing the knot + locations tx, ty, the coefficients c, and the degrees kx, ky + of the spline. + + Returns + ------- + integ : float + The value of the resulting integral. + """ + tx, ty, c, kx, ky = tck + return dfitpack.dblint(tx, ty, c, kx, ky, xa, xb, ya, yb) + + +def insert(x, tck, m=1, per=0): + """ + Insert knots into a B-spline. + + Given the knots and coefficients of a B-spline representation, create a + new B-spline with a knot inserted `m` times at point `x`. + This is a wrapper around the FORTRAN routine insert of FITPACK. + + Parameters + ---------- + x (u) : array_like + A 1-D point at which to insert a new knot(s). If `tck` was returned + from ``splprep``, then the parameter values, u should be given. + tck : tuple + A tuple (t,c,k) returned by ``splrep`` or ``splprep`` containing + the vector of knots, the B-spline coefficients, + and the degree of the spline. + m : int, optional + The number of times to insert the given knot (its multiplicity). + Default is 1. + per : int, optional + If non-zero, the input spline is considered periodic. + + Returns + ------- + tck : tuple + A tuple (t,c,k) containing the vector of knots, the B-spline + coefficients, and the degree of the new spline. + ``t(k+1) <= x <= t(n-k)``, where k is the degree of the spline. + In case of a periodic spline (``per != 0``) there must be + either at least k interior knots t(j) satisfying ``t(k+1)<t(j)<=x`` + or at least k interior knots t(j) satisfying ``x<=t(j)<t(n-k)``. + + Notes + ----- + Based on algorithms from [1]_ and [2]_. + + References + ---------- + .. [1] W. Boehm, "Inserting new knots into b-spline curves.", + Computer Aided Design, 12, p.199-201, 1980. + .. [2] P. Dierckx, "Curve and surface fitting with splines, Monographs on + Numerical Analysis", Oxford University Press, 1993. + + """ + t, c, k = tck + try: + c[0][0] + parametric = True + except Exception: + parametric = False + if parametric: + cc = [] + for c_vals in c: + tt, cc_val, kk = insert(x, [t, c_vals, k], m) + cc.append(cc_val) + return (tt, cc, kk) + else: + tt, cc, ier = _fitpack._insert(per, t, c, k, x, m) + if ier == 10: + raise ValueError("Invalid input data") + if ier: + raise TypeError("An error occurred") + return (tt, cc, k) + + +def splder(tck, n=1): + """ + Compute the spline representation of the derivative of a given spline + + Parameters + ---------- + tck : tuple of (t, c, k) + Spline whose derivative to compute + n : int, optional + Order of derivative to evaluate. Default: 1 + + Returns + ------- + tck_der : tuple of (t2, c2, k2) + Spline of order k2=k-n representing the derivative + of the input spline. + + Notes + ----- + + .. versionadded:: 0.13.0 + + See Also + -------- + splantider, splev, spalde + + Examples + -------- + This can be used for finding maxima of a curve: + + >>> from scipy.interpolate import splrep, splder, sproot + >>> x = np.linspace(0, 10, 70) + >>> y = np.sin(x) + >>> spl = splrep(x, y, k=4) + + Now, differentiate the spline and find the zeros of the + derivative. (NB: `sproot` only works for order 3 splines, so we + fit an order 4 spline): + + >>> dspl = splder(spl) + >>> sproot(dspl) / np.pi + array([ 0.50000001, 1.5 , 2.49999998]) + + This agrees well with roots :math:`\\pi/2 + n\\pi` of + :math:`\\cos(x) = \\sin'(x)`. + + """ + if n < 0: + return splantider(tck, -n) + + t, c, k = tck + + if n > k: + raise ValueError(("Order of derivative (n = %r) must be <= " + "order of spline (k = %r)") % (n, tck[2])) + + # Extra axes for the trailing dims of the `c` array: + sh = (slice(None),) + ((None,)*len(c.shape[1:])) + + with np.errstate(invalid='raise', divide='raise'): + try: + for j in range(n): + # See e.g. Schumaker, Spline Functions: Basic Theory, Chapter 5 + + # Compute the denominator in the differentiation formula. + # (and append traling dims, if necessary) + dt = t[k+1:-1] - t[1:-k-1] + dt = dt[sh] + # Compute the new coefficients + c = (c[1:-1-k] - c[:-2-k]) * k / dt + # Pad coefficient array to same size as knots (FITPACK + # convention) + c = np.r_[c, np.zeros((k,) + c.shape[1:])] + # Adjust knots + t = t[1:-1] + k -= 1 + except FloatingPointError: + raise ValueError(("The spline has internal repeated knots " + "and is not differentiable %d times") % n) + + return t, c, k + + +def splantider(tck, n=1): + """ + Compute the spline for the antiderivative (integral) of a given spline. + + Parameters + ---------- + tck : tuple of (t, c, k) + Spline whose antiderivative to compute + n : int, optional + Order of antiderivative to evaluate. Default: 1 + + Returns + ------- + tck_ader : tuple of (t2, c2, k2) + Spline of order k2=k+n representing the antiderivative of the input + spline. + + See Also + -------- + splder, splev, spalde + + Notes + ----- + The `splder` function is the inverse operation of this function. + Namely, ``splder(splantider(tck))`` is identical to `tck`, modulo + rounding error. + + .. versionadded:: 0.13.0 + + Examples + -------- + >>> from scipy.interpolate import splrep, splder, splantider, splev + >>> x = np.linspace(0, np.pi/2, 70) + >>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2) + >>> spl = splrep(x, y) + + The derivative is the inverse operation of the antiderivative, + although some floating point error accumulates: + + >>> splev(1.7, spl), splev(1.7, splder(splantider(spl))) + (array(2.1565429877197317), array(2.1565429877201865)) + + Antiderivative can be used to evaluate definite integrals: + + >>> ispl = splantider(spl) + >>> splev(np.pi/2, ispl) - splev(0, ispl) + 2.2572053588768486 + + This is indeed an approximation to the complete elliptic integral + :math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`: + + >>> from scipy.special import ellipk + >>> ellipk(0.8) + 2.2572053268208538 + + """ + if n < 0: + return splder(tck, -n) + + t, c, k = tck + + # Extra axes for the trailing dims of the `c` array: + sh = (slice(None),) + (None,)*len(c.shape[1:]) + + for j in range(n): + # This is the inverse set of operations to splder. + + # Compute the multiplier in the antiderivative formula. + dt = t[k+1:] - t[:-k-1] + dt = dt[sh] + # Compute the new coefficients + c = np.cumsum(c[:-k-1] * dt, axis=0) / (k + 1) + c = np.r_[np.zeros((1,) + c.shape[1:]), + c, + [c[-1]] * (k+2)] + # New knots + t = np.r_[t[0], t, t[-1]] + k += 1 + + return t, c, k diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/_fitpack_impl.pyc b/project/venv/lib/python2.7/site-packages/scipy/interpolate/_fitpack_impl.pyc new file mode 100644 index 0000000..822e66d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/interpolate/_fitpack_impl.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/_interpolate.so b/project/venv/lib/python2.7/site-packages/scipy/interpolate/_interpolate.so new file mode 100755 index 0000000..3ff29da Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/interpolate/_interpolate.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/_pade.py b/project/venv/lib/python2.7/site-packages/scipy/interpolate/_pade.py new file mode 100644 index 0000000..0ebe30b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/interpolate/_pade.py @@ -0,0 +1,68 @@ +from __future__ import division, print_function, absolute_import + +from numpy import zeros, asarray, eye, poly1d, hstack, r_ +from scipy import linalg + +__all__ = ["pade"] + +def pade(an, m, n=None): + """ + Return Pade approximation to a polynomial as the ratio of two polynomials. + + Parameters + ---------- + an : (N,) array_like + Taylor series coefficients. + m : int + The order of the returned approximating polynomial `q`. + n : int, optional + The order of the returned approximating polynomial `p`. By default, + the order is ``len(an)-m``. + + Returns + ------- + p, q : Polynomial class + The Pade approximation of the polynomial defined by `an` is + ``p(x)/q(x)``. + + Examples + -------- + >>> from scipy.interpolate import pade + >>> e_exp = [1.0, 1.0, 1.0/2.0, 1.0/6.0, 1.0/24.0, 1.0/120.0] + >>> p, q = pade(e_exp, 2) + + >>> e_exp.reverse() + >>> e_poly = np.poly1d(e_exp) + + Compare ``e_poly(x)`` and the Pade approximation ``p(x)/q(x)`` + + >>> e_poly(1) + 2.7166666666666668 + + >>> p(1)/q(1) + 2.7179487179487181 + + """ + an = asarray(an) + if n is None: + n = len(an) - 1 - m + if n < 0: + raise ValueError("Order of q <m> must be smaller than len(an)-1.") + if n < 0: + raise ValueError("Order of p <n> must be greater than 0.") + N = m + n + if N > len(an)-1: + raise ValueError("Order of q+p <m+n> must be smaller than len(an).") + an = an[:N+1] + Akj = eye(N+1, n+1) + Bkj = zeros((N+1, m), 'd') + for row in range(1, m+1): + Bkj[row,:row] = -(an[:row])[::-1] + for row in range(m+1, N+1): + Bkj[row,:] = -(an[row-m:row])[::-1] + C = hstack((Akj, Bkj)) + pq = linalg.solve(C, an) + p = pq[:n+1] + q = r_[1.0, pq[n+1:]] + return poly1d(p[::-1]), poly1d(q[::-1]) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/_pade.pyc b/project/venv/lib/python2.7/site-packages/scipy/interpolate/_pade.pyc new file mode 100644 index 0000000..15fdc0c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/interpolate/_pade.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/_ppoly.so b/project/venv/lib/python2.7/site-packages/scipy/interpolate/_ppoly.so new file mode 100755 index 0000000..0e69f38 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/interpolate/_ppoly.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/dfitpack.so b/project/venv/lib/python2.7/site-packages/scipy/interpolate/dfitpack.so new file mode 100755 index 0000000..7e4578d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/interpolate/dfitpack.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/fitpack.py b/project/venv/lib/python2.7/site-packages/scipy/interpolate/fitpack.py new file mode 100644 index 0000000..f224e25 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/interpolate/fitpack.py @@ -0,0 +1,722 @@ +from __future__ import print_function, division, absolute_import + +__all__ = ['splrep', 'splprep', 'splev', 'splint', 'sproot', 'spalde', + 'bisplrep', 'bisplev', 'insert', 'splder', 'splantider'] + +import warnings + +import numpy as np + +from ._fitpack_impl import bisplrep, bisplev, dblint +from . import _fitpack_impl as _impl +from ._bsplines import BSpline + + +def splprep(x, w=None, u=None, ub=None, ue=None, k=3, task=0, s=None, t=None, + full_output=0, nest=None, per=0, quiet=1): + """ + Find the B-spline representation of an N-dimensional curve. + + Given a list of N rank-1 arrays, `x`, which represent a curve in + N-dimensional space parametrized by `u`, find a smooth approximating + spline curve g(`u`). Uses the FORTRAN routine parcur from FITPACK. + + Parameters + ---------- + x : array_like + A list of sample vector arrays representing the curve. + w : array_like, optional + Strictly positive rank-1 array of weights the same length as `x[0]`. + The weights are used in computing the weighted least-squares spline + fit. If the errors in the `x` values have standard-deviation given by + the vector d, then `w` should be 1/d. Default is ``ones(len(x[0]))``. + u : array_like, optional + An array of parameter values. If not given, these values are + calculated automatically as ``M = len(x[0])``, where + + v[0] = 0 + + v[i] = v[i-1] + distance(`x[i]`, `x[i-1]`) + + u[i] = v[i] / v[M-1] + + ub, ue : int, optional + The end-points of the parameters interval. Defaults to + u[0] and u[-1]. + k : int, optional + Degree of the spline. Cubic splines are recommended. + Even values of `k` should be avoided especially with a small s-value. + ``1 <= k <= 5``, default is 3. + task : int, optional + If task==0 (default), find t and c for a given smoothing factor, s. + If task==1, find t and c for another value of the smoothing factor, s. + There must have been a previous call with task=0 or task=1 + for the same set of data. + If task=-1 find the weighted least square spline for a given set of + knots, t. + s : float, optional + A smoothing condition. The amount of smoothness is determined by + satisfying the conditions: ``sum((w * (y - g))**2,axis=0) <= s``, + where g(x) is the smoothed interpolation of (x,y). The user can + use `s` to control the trade-off between closeness and smoothness + of fit. Larger `s` means more smoothing while smaller values of `s` + indicate less smoothing. Recommended values of `s` depend on the + weights, w. If the weights represent the inverse of the + standard-deviation of y, then a good `s` value should be found in + the range ``(m-sqrt(2*m),m+sqrt(2*m))``, where m is the number of + data points in x, y, and w. + t : int, optional + The knots needed for task=-1. + full_output : int, optional + If non-zero, then return optional outputs. + nest : int, optional + An over-estimate of the total number of knots of the spline to + help in determining the storage space. By default nest=m/2. + Always large enough is nest=m+k+1. + per : int, optional + If non-zero, data points are considered periodic with period + ``x[m-1] - x[0]`` and a smooth periodic spline approximation is + returned. Values of ``y[m-1]`` and ``w[m-1]`` are not used. + quiet : int, optional + Non-zero to suppress messages. + This parameter is deprecated; use standard Python warning filters + instead. + + Returns + ------- + tck : tuple + (t,c,k) a tuple containing the vector of knots, the B-spline + coefficients, and the degree of the spline. + u : array + An array of the values of the parameter. + fp : float + The weighted sum of squared residuals of the spline approximation. + ier : int + An integer flag about splrep success. Success is indicated + if ier<=0. If ier in [1,2,3] an error occurred but was not raised. + Otherwise an error is raised. + msg : str + A message corresponding to the integer flag, ier. + + See Also + -------- + splrep, splev, sproot, spalde, splint, + bisplrep, bisplev + UnivariateSpline, BivariateSpline + BSpline + make_interp_spline + + Notes + ----- + See `splev` for evaluation of the spline and its derivatives. + The number of dimensions N must be smaller than 11. + + The number of coefficients in the `c` array is ``k+1`` less then the number + of knots, ``len(t)``. This is in contrast with `splrep`, which zero-pads + the array of coefficients to have the same length as the array of knots. + These additional coefficients are ignored by evaluation routines, `splev` + and `BSpline`. + + References + ---------- + .. [1] P. Dierckx, "Algorithms for smoothing data with periodic and + parametric splines, Computer Graphics and Image Processing", + 20 (1982) 171-184. + .. [2] P. Dierckx, "Algorithms for smoothing data with periodic and + parametric splines", report tw55, Dept. Computer Science, + K.U.Leuven, 1981. + .. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs on + Numerical Analysis, Oxford University Press, 1993. + + Examples + -------- + Generate a discretization of a limacon curve in the polar coordinates: + + >>> phi = np.linspace(0, 2.*np.pi, 40) + >>> r = 0.5 + np.cos(phi) # polar coords + >>> x, y = r * np.cos(phi), r * np.sin(phi) # convert to cartesian + + And interpolate: + + >>> from scipy.interpolate import splprep, splev + >>> tck, u = splprep([x, y], s=0) + >>> new_points = splev(u, tck) + + Notice that (i) we force interpolation by using `s=0`, + (ii) the parameterization, ``u``, is generated automatically. + Now plot the result: + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> ax.plot(x, y, 'ro') + >>> ax.plot(new_points[0], new_points[1], 'r-') + >>> plt.show() + + """ + res = _impl.splprep(x, w, u, ub, ue, k, task, s, t, full_output, nest, per, + quiet) + return res + + +def splrep(x, y, w=None, xb=None, xe=None, k=3, task=0, s=None, t=None, + full_output=0, per=0, quiet=1): + """ + Find the B-spline representation of 1-D curve. + + Given the set of data points ``(x[i], y[i])`` determine a smooth spline + approximation of degree k on the interval ``xb <= x <= xe``. + + Parameters + ---------- + x, y : array_like + The data points defining a curve y = f(x). + w : array_like, optional + Strictly positive rank-1 array of weights the same length as x and y. + The weights are used in computing the weighted least-squares spline + fit. If the errors in the y values have standard-deviation given by the + vector d, then w should be 1/d. Default is ones(len(x)). + xb, xe : float, optional + The interval to fit. If None, these default to x[0] and x[-1] + respectively. + k : int, optional + The degree of the spline fit. It is recommended to use cubic splines. + Even values of k should be avoided especially with small s values. + 1 <= k <= 5 + task : {1, 0, -1}, optional + If task==0 find t and c for a given smoothing factor, s. + + If task==1 find t and c for another value of the smoothing factor, s. + There must have been a previous call with task=0 or task=1 for the same + set of data (t will be stored an used internally) + + If task=-1 find the weighted least square spline for a given set of + knots, t. These should be interior knots as knots on the ends will be + added automatically. + s : float, optional + A smoothing condition. The amount of smoothness is determined by + satisfying the conditions: sum((w * (y - g))**2,axis=0) <= s where g(x) + is the smoothed interpolation of (x,y). The user can use s to control + the tradeoff between closeness and smoothness of fit. Larger s means + more smoothing while smaller values of s indicate less smoothing. + Recommended values of s depend on the weights, w. If the weights + represent the inverse of the standard-deviation of y, then a good s + value should be found in the range (m-sqrt(2*m),m+sqrt(2*m)) where m is + the number of datapoints in x, y, and w. default : s=m-sqrt(2*m) if + weights are supplied. s = 0.0 (interpolating) if no weights are + supplied. + t : array_like, optional + The knots needed for task=-1. If given then task is automatically set + to -1. + full_output : bool, optional + If non-zero, then return optional outputs. + per : bool, optional + If non-zero, data points are considered periodic with period x[m-1] - + x[0] and a smooth periodic spline approximation is returned. Values of + y[m-1] and w[m-1] are not used. + quiet : bool, optional + Non-zero to suppress messages. + This parameter is deprecated; use standard Python warning filters + instead. + + Returns + ------- + tck : tuple + A tuple (t,c,k) containing the vector of knots, the B-spline + coefficients, and the degree of the spline. + fp : array, optional + The weighted sum of squared residuals of the spline approximation. + ier : int, optional + An integer flag about splrep success. Success is indicated if ier<=0. + If ier in [1,2,3] an error occurred but was not raised. Otherwise an + error is raised. + msg : str, optional + A message corresponding to the integer flag, ier. + + See Also + -------- + UnivariateSpline, BivariateSpline + splprep, splev, sproot, spalde, splint + bisplrep, bisplev + BSpline + make_interp_spline + + Notes + ----- + See `splev` for evaluation of the spline and its derivatives. Uses the + FORTRAN routine ``curfit`` from FITPACK. + + The user is responsible for assuring that the values of `x` are unique. + Otherwise, `splrep` will not return sensible results. + + If provided, knots `t` must satisfy the Schoenberg-Whitney conditions, + i.e., there must be a subset of data points ``x[j]`` such that + ``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``. + + This routine zero-pads the coefficients array ``c`` to have the same length + as the array of knots ``t`` (the trailing ``k + 1`` coefficients are ignored + by the evaluation routines, `splev` and `BSpline`.) This is in contrast with + `splprep`, which does not zero-pad the coefficients. + + References + ---------- + Based on algorithms described in [1]_, [2]_, [3]_, and [4]_: + + .. [1] P. Dierckx, "An algorithm for smoothing, differentiation and + integration of experimental data using spline functions", + J.Comp.Appl.Maths 1 (1975) 165-184. + .. [2] P. Dierckx, "A fast algorithm for smoothing data on a rectangular + grid while using spline functions", SIAM J.Numer.Anal. 19 (1982) + 1286-1304. + .. [3] P. Dierckx, "An improved algorithm for curve fitting with spline + functions", report tw54, Dept. Computer Science,K.U. Leuven, 1981. + .. [4] P. Dierckx, "Curve and surface fitting with splines", Monographs on + Numerical Analysis, Oxford University Press, 1993. + + Examples + -------- + + >>> import matplotlib.pyplot as plt + >>> from scipy.interpolate import splev, splrep + >>> x = np.linspace(0, 10, 10) + >>> y = np.sin(x) + >>> spl = splrep(x, y) + >>> x2 = np.linspace(0, 10, 200) + >>> y2 = splev(x2, spl) + >>> plt.plot(x, y, 'o', x2, y2) + >>> plt.show() + + """ + res = _impl.splrep(x, y, w, xb, xe, k, task, s, t, full_output, per, quiet) + return res + + +def splev(x, tck, der=0, ext=0): + """ + Evaluate a B-spline or its derivatives. + + Given the knots and coefficients of a B-spline representation, evaluate + the value of the smoothing polynomial and its derivatives. This is a + wrapper around the FORTRAN routines splev and splder of FITPACK. + + Parameters + ---------- + x : array_like + An array of points at which to return the value of the smoothed + spline or its derivatives. If `tck` was returned from `splprep`, + then the parameter values, u should be given. + tck : 3-tuple or a BSpline object + If a tuple, then it should be a sequence of length 3 returned by + `splrep` or `splprep` containing the knots, coefficients, and degree + of the spline. (Also see Notes.) + der : int, optional + The order of derivative of the spline to compute (must be less than + or equal to k). + ext : int, optional + Controls the value returned for elements of ``x`` not in the + interval defined by the knot sequence. + + * if ext=0, return the extrapolated value. + * if ext=1, return 0 + * if ext=2, raise a ValueError + * if ext=3, return the boundary value. + + The default value is 0. + + Returns + ------- + y : ndarray or list of ndarrays + An array of values representing the spline function evaluated at + the points in `x`. If `tck` was returned from `splprep`, then this + is a list of arrays representing the curve in N-dimensional space. + + Notes + ----- + Manipulating the tck-tuples directly is not recommended. In new code, + prefer using `BSpline` objects. + + See Also + -------- + splprep, splrep, sproot, spalde, splint + bisplrep, bisplev + BSpline + + References + ---------- + .. [1] C. de Boor, "On calculating with b-splines", J. Approximation + Theory, 6, p.50-62, 1972. + .. [2] M. G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths + Applics, 10, p.134-149, 1972. + .. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs + on Numerical Analysis, Oxford University Press, 1993. + + """ + if isinstance(tck, BSpline): + if tck.c.ndim > 1: + mesg = ("Calling splev() with BSpline objects with c.ndim > 1 is " + "not recommended. Use BSpline.__call__(x) instead.") + warnings.warn(mesg, DeprecationWarning) + + # remap the out-of-bounds behavior + try: + extrapolate = {0: True, }[ext] + except KeyError: + raise ValueError("Extrapolation mode %s is not supported " + "by BSpline." % ext) + + return tck(x, der, extrapolate=extrapolate) + else: + return _impl.splev(x, tck, der, ext) + + +def splint(a, b, tck, full_output=0): + """ + Evaluate the definite integral of a B-spline between two given points. + + Parameters + ---------- + a, b : float + The end-points of the integration interval. + tck : tuple or a BSpline instance + If a tuple, then it should be a sequence of length 3, containing the + vector of knots, the B-spline coefficients, and the degree of the + spline (see `splev`). + full_output : int, optional + Non-zero to return optional output. + + Returns + ------- + integral : float + The resulting integral. + wrk : ndarray + An array containing the integrals of the normalized B-splines + defined on the set of knots. + (Only returned if `full_output` is non-zero) + + Notes + ----- + `splint` silently assumes that the spline function is zero outside the data + interval (`a`, `b`). + + Manipulating the tck-tuples directly is not recommended. In new code, + prefer using the `BSpline` objects. + + See Also + -------- + splprep, splrep, sproot, spalde, splev + bisplrep, bisplev + BSpline + + References + ---------- + .. [1] P.W. Gaffney, The calculation of indefinite integrals of b-splines", + J. Inst. Maths Applics, 17, p.37-41, 1976. + .. [2] P. Dierckx, "Curve and surface fitting with splines", Monographs + on Numerical Analysis, Oxford University Press, 1993. + + """ + if isinstance(tck, BSpline): + if tck.c.ndim > 1: + mesg = ("Calling splint() with BSpline objects with c.ndim > 1 is " + "not recommended. Use BSpline.integrate() instead.") + warnings.warn(mesg, DeprecationWarning) + + if full_output != 0: + mesg = ("full_output = %s is not supported. Proceeding as if " + "full_output = 0" % full_output) + + return tck.integrate(a, b, extrapolate=False) + else: + return _impl.splint(a, b, tck, full_output) + + +def sproot(tck, mest=10): + """ + Find the roots of a cubic B-spline. + + Given the knots (>=8) and coefficients of a cubic B-spline return the + roots of the spline. + + Parameters + ---------- + tck : tuple or a BSpline object + If a tuple, then it should be a sequence of length 3, containing the + vector of knots, the B-spline coefficients, and the degree of the + spline. + The number of knots must be >= 8, and the degree must be 3. + The knots must be a montonically increasing sequence. + mest : int, optional + An estimate of the number of zeros (Default is 10). + + Returns + ------- + zeros : ndarray + An array giving the roots of the spline. + + Notes + ----- + Manipulating the tck-tuples directly is not recommended. In new code, + prefer using the `BSpline` objects. + + See also + -------- + splprep, splrep, splint, spalde, splev + bisplrep, bisplev + BSpline + + + References + ---------- + .. [1] C. de Boor, "On calculating with b-splines", J. Approximation + Theory, 6, p.50-62, 1972. + .. [2] M. G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths + Applics, 10, p.134-149, 1972. + .. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs + on Numerical Analysis, Oxford University Press, 1993. + + """ + if isinstance(tck, BSpline): + if tck.c.ndim > 1: + mesg = ("Calling sproot() with BSpline objects with c.ndim > 1 is " + "not recommended.") + warnings.warn(mesg, DeprecationWarning) + + t, c, k = tck.tck + + # _impl.sproot expects the interpolation axis to be last, so roll it. + # NB: This transpose is a no-op if c is 1D. + sh = tuple(range(c.ndim)) + c = c.transpose(sh[1:] + (0,)) + return _impl.sproot((t, c, k), mest) + else: + return _impl.sproot(tck, mest) + + +def spalde(x, tck): + """ + Evaluate all derivatives of a B-spline. + + Given the knots and coefficients of a cubic B-spline compute all + derivatives up to order k at a point (or set of points). + + Parameters + ---------- + x : array_like + A point or a set of points at which to evaluate the derivatives. + Note that ``t(k) <= x <= t(n-k+1)`` must hold for each `x`. + tck : tuple + A tuple ``(t, c, k)``, containing the vector of knots, the B-spline + coefficients, and the degree of the spline (see `splev`). + + Returns + ------- + results : {ndarray, list of ndarrays} + An array (or a list of arrays) containing all derivatives + up to order k inclusive for each point `x`. + + See Also + -------- + splprep, splrep, splint, sproot, splev, bisplrep, bisplev, + BSpline + + References + ---------- + .. [1] C. de Boor: On calculating with b-splines, J. Approximation Theory + 6 (1972) 50-62. + .. [2] M. G. Cox : The numerical evaluation of b-splines, J. Inst. Maths + applics 10 (1972) 134-149. + .. [3] P. Dierckx : Curve and surface fitting with splines, Monographs on + Numerical Analysis, Oxford University Press, 1993. + + """ + if isinstance(tck, BSpline): + raise TypeError("spalde does not accept BSpline instances.") + else: + return _impl.spalde(x, tck) + + +def insert(x, tck, m=1, per=0): + """ + Insert knots into a B-spline. + + Given the knots and coefficients of a B-spline representation, create a + new B-spline with a knot inserted `m` times at point `x`. + This is a wrapper around the FORTRAN routine insert of FITPACK. + + Parameters + ---------- + x (u) : array_like + A 1-D point at which to insert a new knot(s). If `tck` was returned + from ``splprep``, then the parameter values, u should be given. + tck : a `BSpline` instance or a tuple + If tuple, then it is expected to be a tuple (t,c,k) containing + the vector of knots, the B-spline coefficients, and the degree of + the spline. + m : int, optional + The number of times to insert the given knot (its multiplicity). + Default is 1. + per : int, optional + If non-zero, the input spline is considered periodic. + + Returns + ------- + BSpline instance or a tuple + A new B-spline with knots t, coefficients c, and degree k. + ``t(k+1) <= x <= t(n-k)``, where k is the degree of the spline. + In case of a periodic spline (``per != 0``) there must be + either at least k interior knots t(j) satisfying ``t(k+1)<t(j)<=x`` + or at least k interior knots t(j) satisfying ``x<=t(j)<t(n-k)``. + A tuple is returned iff the input argument `tck` is a tuple, otherwise + a BSpline object is constructed and returned. + + Notes + ----- + Based on algorithms from [1]_ and [2]_. + + Manipulating the tck-tuples directly is not recommended. In new code, + prefer using the `BSpline` objects. + + References + ---------- + .. [1] W. Boehm, "Inserting new knots into b-spline curves.", + Computer Aided Design, 12, p.199-201, 1980. + .. [2] P. Dierckx, "Curve and surface fitting with splines, Monographs on + Numerical Analysis", Oxford University Press, 1993. + + """ + if isinstance(tck, BSpline): + + t, c, k = tck.tck + + # FITPACK expects the interpolation axis to be last, so roll it over + # NB: if c array is 1D, transposes are no-ops + sh = tuple(range(c.ndim)) + c = c.transpose(sh[1:] + (0,)) + t_, c_, k_ = _impl.insert(x, (t, c, k), m, per) + + # and roll the last axis back + c_ = np.asarray(c_) + c_ = c_.transpose((sh[-1],) + sh[:-1]) + return BSpline(t_, c_, k_) + else: + return _impl.insert(x, tck, m, per) + + +def splder(tck, n=1): + """ + Compute the spline representation of the derivative of a given spline + + Parameters + ---------- + tck : BSpline instance or a tuple of (t, c, k) + Spline whose derivative to compute + n : int, optional + Order of derivative to evaluate. Default: 1 + + Returns + ------- + `BSpline` instance or tuple + Spline of order k2=k-n representing the derivative + of the input spline. + A tuple is returned iff the input argument `tck` is a tuple, otherwise + a BSpline object is constructed and returned. + + Notes + ----- + + .. versionadded:: 0.13.0 + + See Also + -------- + splantider, splev, spalde + BSpline + + Examples + -------- + This can be used for finding maxima of a curve: + + >>> from scipy.interpolate import splrep, splder, sproot + >>> x = np.linspace(0, 10, 70) + >>> y = np.sin(x) + >>> spl = splrep(x, y, k=4) + + Now, differentiate the spline and find the zeros of the + derivative. (NB: `sproot` only works for order 3 splines, so we + fit an order 4 spline): + + >>> dspl = splder(spl) + >>> sproot(dspl) / np.pi + array([ 0.50000001, 1.5 , 2.49999998]) + + This agrees well with roots :math:`\\pi/2 + n\\pi` of + :math:`\\cos(x) = \\sin'(x)`. + + """ + if isinstance(tck, BSpline): + return tck.derivative(n) + else: + return _impl.splder(tck, n) + + +def splantider(tck, n=1): + """ + Compute the spline for the antiderivative (integral) of a given spline. + + Parameters + ---------- + tck : BSpline instance or a tuple of (t, c, k) + Spline whose antiderivative to compute + n : int, optional + Order of antiderivative to evaluate. Default: 1 + + Returns + ------- + BSpline instance or a tuple of (t2, c2, k2) + Spline of order k2=k+n representing the antiderivative of the input + spline. + A tuple is returned iff the input argument `tck` is a tuple, otherwise + a BSpline object is constructed and returned. + + See Also + -------- + splder, splev, spalde + BSpline + + Notes + ----- + The `splder` function is the inverse operation of this function. + Namely, ``splder(splantider(tck))`` is identical to `tck`, modulo + rounding error. + + .. versionadded:: 0.13.0 + + Examples + -------- + >>> from scipy.interpolate import splrep, splder, splantider, splev + >>> x = np.linspace(0, np.pi/2, 70) + >>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2) + >>> spl = splrep(x, y) + + The derivative is the inverse operation of the antiderivative, + although some floating point error accumulates: + + >>> splev(1.7, spl), splev(1.7, splder(splantider(spl))) + (array(2.1565429877197317), array(2.1565429877201865)) + + Antiderivative can be used to evaluate definite integrals: + + >>> ispl = splantider(spl) + >>> splev(np.pi/2, ispl) - splev(0, ispl) + 2.2572053588768486 + + This is indeed an approximation to the complete elliptic integral + :math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`: + + >>> from scipy.special import ellipk + >>> ellipk(0.8) + 2.2572053268208538 + + """ + if isinstance(tck, BSpline): + return tck.antiderivative(n) + else: + return _impl.splantider(tck, n) diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/fitpack.pyc b/project/venv/lib/python2.7/site-packages/scipy/interpolate/fitpack.pyc new file mode 100644 index 0000000..a3eb254 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/interpolate/fitpack.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/fitpack2.py b/project/venv/lib/python2.7/site-packages/scipy/interpolate/fitpack2.py new file mode 100644 index 0000000..6d3fce9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/interpolate/fitpack2.py @@ -0,0 +1,1716 @@ +""" +fitpack --- curve and surface fitting with splines + +fitpack is based on a collection of Fortran routines DIERCKX +by P. Dierckx (see http://www.netlib.org/dierckx/) transformed +to double routines by Pearu Peterson. +""" +# Created by Pearu Peterson, June,August 2003 +from __future__ import division, print_function, absolute_import + +__all__ = [ + 'UnivariateSpline', + 'InterpolatedUnivariateSpline', + 'LSQUnivariateSpline', + 'BivariateSpline', + 'LSQBivariateSpline', + 'SmoothBivariateSpline', + 'LSQSphereBivariateSpline', + 'SmoothSphereBivariateSpline', + 'RectBivariateSpline', + 'RectSphereBivariateSpline'] + + +import warnings + +from numpy import zeros, concatenate, alltrue, ravel, all, diff, array, ones +import numpy as np + +from . import fitpack +from . import dfitpack + + +# ############### Univariate spline #################### + +_curfit_messages = {1: """ +The required storage space exceeds the available storage space, as +specified by the parameter nest: nest too small. If nest is already +large (say nest > m/2), it may also indicate that s is too small. +The approximation returned is the weighted least-squares spline +according to the knots t[0],t[1],...,t[n-1]. (n=nest) the parameter fp +gives the corresponding weighted sum of squared residuals (fp>s). +""", + 2: """ +A theoretically impossible result was found during the iteration +process for finding a smoothing spline with fp = s: s too small. +There is an approximation returned but the corresponding weighted sum +of squared residuals does not satisfy the condition abs(fp-s)/s < tol.""", + 3: """ +The maximal number of iterations maxit (set to 20 by the program) +allowed for finding a smoothing spline with fp=s has been reached: s +too small. +There is an approximation returned but the corresponding weighted sum +of squared residuals does not satisfy the condition abs(fp-s)/s < tol.""", + 10: """ +Error on entry, no approximation returned. The following conditions +must hold: +xb<=x[0]<x[1]<...<x[m-1]<=xe, w[i]>0, i=0..m-1 +if iopt=-1: + xb<t[k+1]<t[k+2]<...<t[n-k-2]<xe""" + } + + +# UnivariateSpline, ext parameter can be an int or a string +_extrap_modes = {0: 0, 'extrapolate': 0, + 1: 1, 'zeros': 1, + 2: 2, 'raise': 2, + 3: 3, 'const': 3} + + +class UnivariateSpline(object): + """ + One-dimensional smoothing spline fit to a given set of data points. + + Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data. `s` + specifies the number of knots by specifying a smoothing condition. + + Parameters + ---------- + x : (N,) array_like + 1-D array of independent input data. Must be increasing. + y : (N,) array_like + 1-D array of dependent input data, of the same length as `x`. + w : (N,) array_like, optional + Weights for spline fitting. Must be positive. If None (default), + weights are all equal. + bbox : (2,) array_like, optional + 2-sequence specifying the boundary of the approximation interval. If + None (default), ``bbox=[x[0], x[-1]]``. + k : int, optional + Degree of the smoothing spline. Must be <= 5. + Default is k=3, a cubic spline. + s : float or None, optional + Positive smoothing factor used to choose the number of knots. Number + of knots will be increased until the smoothing condition is satisfied:: + + sum((w[i] * (y[i]-spl(x[i])))**2, axis=0) <= s + + If None (default), ``s = len(w)`` which should be a good value if + ``1/w[i]`` is an estimate of the standard deviation of ``y[i]``. + If 0, spline will interpolate through all data points. + ext : int or str, optional + Controls the extrapolation mode for elements + not in the interval defined by the knot sequence. + + * if ext=0 or 'extrapolate', return the extrapolated value. + * if ext=1 or 'zeros', return 0 + * if ext=2 or 'raise', raise a ValueError + * if ext=3 of 'const', return the boundary value. + + The default value is 0. + + check_finite : bool, optional + Whether to check that the input arrays contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination or non-sensical results) if the inputs + do contain infinities or NaNs. + Default is False. + + See Also + -------- + InterpolatedUnivariateSpline : Subclass with smoothing forced to 0 + LSQUnivariateSpline : Subclass in which knots are user-selected instead of + being set by smoothing condition + splrep : An older, non object-oriented wrapping of FITPACK + splev, sproot, splint, spalde + BivariateSpline : A similar class for two-dimensional spline interpolation + + Notes + ----- + The number of data points must be larger than the spline degree `k`. + + **NaN handling**: If the input arrays contain ``nan`` values, the result + is not useful, since the underlying spline fitting routines cannot deal + with ``nan`` . A workaround is to use zero weights for not-a-number + data points: + + >>> from scipy.interpolate import UnivariateSpline + >>> x, y = np.array([1, 2, 3, 4]), np.array([1, np.nan, 3, 4]) + >>> w = np.isnan(y) + >>> y[w] = 0. + >>> spl = UnivariateSpline(x, y, w=~w) + + Notice the need to replace a ``nan`` by a numerical value (precise value + does not matter as long as the corresponding weight is zero.) + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> from scipy.interpolate import UnivariateSpline + >>> x = np.linspace(-3, 3, 50) + >>> y = np.exp(-x**2) + 0.1 * np.random.randn(50) + >>> plt.plot(x, y, 'ro', ms=5) + + Use the default value for the smoothing parameter: + + >>> spl = UnivariateSpline(x, y) + >>> xs = np.linspace(-3, 3, 1000) + >>> plt.plot(xs, spl(xs), 'g', lw=3) + + Manually change the amount of smoothing: + + >>> spl.set_smoothing_factor(0.5) + >>> plt.plot(xs, spl(xs), 'b', lw=3) + >>> plt.show() + + """ + def __init__(self, x, y, w=None, bbox=[None]*2, k=3, s=None, + ext=0, check_finite=False): + + if check_finite: + w_finite = np.isfinite(w).all() if w is not None else True + if (not np.isfinite(x).all() or not np.isfinite(y).all() or + not w_finite): + raise ValueError("x and y array must not contain " + "NaNs or infs.") + if not all(diff(x) > 0.0): + raise ValueError('x must be strictly increasing') + + # _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier + try: + self.ext = _extrap_modes[ext] + except KeyError: + raise ValueError("Unknown extrapolation mode %s." % ext) + + data = dfitpack.fpcurf0(x, y, k, w=w, xb=bbox[0], + xe=bbox[1], s=s) + if data[-1] == 1: + # nest too small, setting to maximum bound + data = self._reset_nest(data) + self._data = data + self._reset_class() + + @classmethod + def _from_tck(cls, tck, ext=0): + """Construct a spline object from given tck""" + self = cls.__new__(cls) + t, c, k = tck + self._eval_args = tck + # _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier + self._data = (None, None, None, None, None, k, None, len(t), t, + c, None, None, None, None) + self.ext = ext + return self + + def _reset_class(self): + data = self._data + n, t, c, k, ier = data[7], data[8], data[9], data[5], data[-1] + self._eval_args = t[:n], c[:n], k + if ier == 0: + # the spline returned has a residual sum of squares fp + # such that abs(fp-s)/s <= tol with tol a relative + # tolerance set to 0.001 by the program + pass + elif ier == -1: + # the spline returned is an interpolating spline + self._set_class(InterpolatedUnivariateSpline) + elif ier == -2: + # the spline returned is the weighted least-squares + # polynomial of degree k. In this extreme case fp gives + # the upper bound fp0 for the smoothing factor s. + self._set_class(LSQUnivariateSpline) + else: + # error + if ier == 1: + self._set_class(LSQUnivariateSpline) + message = _curfit_messages.get(ier, 'ier=%s' % (ier)) + warnings.warn(message) + + def _set_class(self, cls): + self._spline_class = cls + if self.__class__ in (UnivariateSpline, InterpolatedUnivariateSpline, + LSQUnivariateSpline): + self.__class__ = cls + else: + # It's an unknown subclass -- don't change class. cf. #731 + pass + + def _reset_nest(self, data, nest=None): + n = data[10] + if nest is None: + k, m = data[5], len(data[0]) + nest = m+k+1 # this is the maximum bound for nest + else: + if not n <= nest: + raise ValueError("`nest` can only be increased") + t, c, fpint, nrdata = [np.resize(data[j], nest) for j in + [8, 9, 11, 12]] + + args = data[:8] + (t, c, n, fpint, nrdata, data[13]) + data = dfitpack.fpcurf1(*args) + return data + + def set_smoothing_factor(self, s): + """ Continue spline computation with the given smoothing + factor s and with the knots found at the last call. + + This routine modifies the spline in place. + + """ + data = self._data + if data[6] == -1: + warnings.warn('smoothing factor unchanged for' + 'LSQ spline with fixed knots') + return + args = data[:6] + (s,) + data[7:] + data = dfitpack.fpcurf1(*args) + if data[-1] == 1: + # nest too small, setting to maximum bound + data = self._reset_nest(data) + self._data = data + self._reset_class() + + def __call__(self, x, nu=0, ext=None): + """ + Evaluate spline (or its nu-th derivative) at positions x. + + Parameters + ---------- + x : array_like + A 1-D array of points at which to return the value of the smoothed + spline or its derivatives. Note: x can be unordered but the + evaluation is more efficient if x is (partially) ordered. + nu : int + The order of derivative of the spline to compute. + ext : int + Controls the value returned for elements of ``x`` not in the + interval defined by the knot sequence. + + * if ext=0 or 'extrapolate', return the extrapolated value. + * if ext=1 or 'zeros', return 0 + * if ext=2 or 'raise', raise a ValueError + * if ext=3 or 'const', return the boundary value. + + The default value is 0, passed from the initialization of + UnivariateSpline. + + """ + x = np.asarray(x) + # empty input yields empty output + if x.size == 0: + return array([]) +# if nu is None: +# return dfitpack.splev(*(self._eval_args+(x,))) +# return dfitpack.splder(nu=nu,*(self._eval_args+(x,))) + if ext is None: + ext = self.ext + else: + try: + ext = _extrap_modes[ext] + except KeyError: + raise ValueError("Unknown extrapolation mode %s." % ext) + return fitpack.splev(x, self._eval_args, der=nu, ext=ext) + + def get_knots(self): + """ Return positions of interior knots of the spline. + + Internally, the knot vector contains ``2*k`` additional boundary knots. + """ + data = self._data + k, n = data[5], data[7] + return data[8][k:n-k] + + def get_coeffs(self): + """Return spline coefficients.""" + data = self._data + k, n = data[5], data[7] + return data[9][:n-k-1] + + def get_residual(self): + """Return weighted sum of squared residuals of the spline approximation. + + This is equivalent to:: + + sum((w[i] * (y[i]-spl(x[i])))**2, axis=0) + + """ + return self._data[10] + + def integral(self, a, b): + """ Return definite integral of the spline between two given points. + + Parameters + ---------- + a : float + Lower limit of integration. + b : float + Upper limit of integration. + + Returns + ------- + integral : float + The value of the definite integral of the spline between limits. + + Examples + -------- + >>> from scipy.interpolate import UnivariateSpline + >>> x = np.linspace(0, 3, 11) + >>> y = x**2 + >>> spl = UnivariateSpline(x, y) + >>> spl.integral(0, 3) + 9.0 + + which agrees with :math:`\\int x^2 dx = x^3 / 3` between the limits + of 0 and 3. + + A caveat is that this routine assumes the spline to be zero outside of + the data limits: + + >>> spl.integral(-1, 4) + 9.0 + >>> spl.integral(-1, 0) + 0.0 + + """ + return dfitpack.splint(*(self._eval_args+(a, b))) + + def derivatives(self, x): + """ Return all derivatives of the spline at the point x. + + Parameters + ---------- + x : float + The point to evaluate the derivatives at. + + Returns + ------- + der : ndarray, shape(k+1,) + Derivatives of the orders 0 to k. + + Examples + -------- + >>> from scipy.interpolate import UnivariateSpline + >>> x = np.linspace(0, 3, 11) + >>> y = x**2 + >>> spl = UnivariateSpline(x, y) + >>> spl.derivatives(1.5) + array([2.25, 3.0, 2.0, 0]) + + """ + d, ier = dfitpack.spalde(*(self._eval_args+(x,))) + if not ier == 0: + raise ValueError("Error code returned by spalde: %s" % ier) + return d + + def roots(self): + """ Return the zeros of the spline. + + Restriction: only cubic splines are supported by fitpack. + """ + k = self._data[5] + if k == 3: + z, m, ier = dfitpack.sproot(*self._eval_args[:2]) + if not ier == 0: + raise ValueError("Error code returned by spalde: %s" % ier) + return z[:m] + raise NotImplementedError('finding roots unsupported for ' + 'non-cubic splines') + + def derivative(self, n=1): + """ + Construct a new spline representing the derivative of this spline. + + Parameters + ---------- + n : int, optional + Order of derivative to evaluate. Default: 1 + + Returns + ------- + spline : UnivariateSpline + Spline of order k2=k-n representing the derivative of this + spline. + + See Also + -------- + splder, antiderivative + + Notes + ----- + + .. versionadded:: 0.13.0 + + Examples + -------- + This can be used for finding maxima of a curve: + + >>> from scipy.interpolate import UnivariateSpline + >>> x = np.linspace(0, 10, 70) + >>> y = np.sin(x) + >>> spl = UnivariateSpline(x, y, k=4, s=0) + + Now, differentiate the spline and find the zeros of the + derivative. (NB: `sproot` only works for order 3 splines, so we + fit an order 4 spline): + + >>> spl.derivative().roots() / np.pi + array([ 0.50000001, 1.5 , 2.49999998]) + + This agrees well with roots :math:`\\pi/2 + n\\pi` of + :math:`\\cos(x) = \\sin'(x)`. + + """ + tck = fitpack.splder(self._eval_args, n) + return UnivariateSpline._from_tck(tck, self.ext) + + def antiderivative(self, n=1): + """ + Construct a new spline representing the antiderivative of this spline. + + Parameters + ---------- + n : int, optional + Order of antiderivative to evaluate. Default: 1 + + Returns + ------- + spline : UnivariateSpline + Spline of order k2=k+n representing the antiderivative of this + spline. + + Notes + ----- + + .. versionadded:: 0.13.0 + + See Also + -------- + splantider, derivative + + Examples + -------- + >>> from scipy.interpolate import UnivariateSpline + >>> x = np.linspace(0, np.pi/2, 70) + >>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2) + >>> spl = UnivariateSpline(x, y, s=0) + + The derivative is the inverse operation of the antiderivative, + although some floating point error accumulates: + + >>> spl(1.7), spl.antiderivative().derivative()(1.7) + (array(2.1565429877197317), array(2.1565429877201865)) + + Antiderivative can be used to evaluate definite integrals: + + >>> ispl = spl.antiderivative() + >>> ispl(np.pi/2) - ispl(0) + 2.2572053588768486 + + This is indeed an approximation to the complete elliptic integral + :math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`: + + >>> from scipy.special import ellipk + >>> ellipk(0.8) + 2.2572053268208538 + + """ + tck = fitpack.splantider(self._eval_args, n) + return UnivariateSpline._from_tck(tck, self.ext) + + +class InterpolatedUnivariateSpline(UnivariateSpline): + """ + One-dimensional interpolating spline for a given set of data points. + + Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data. + Spline function passes through all provided points. Equivalent to + `UnivariateSpline` with s=0. + + Parameters + ---------- + x : (N,) array_like + Input dimension of data points -- must be increasing + y : (N,) array_like + input dimension of data points + w : (N,) array_like, optional + Weights for spline fitting. Must be positive. If None (default), + weights are all equal. + bbox : (2,) array_like, optional + 2-sequence specifying the boundary of the approximation interval. If + None (default), ``bbox=[x[0], x[-1]]``. + k : int, optional + Degree of the smoothing spline. Must be 1 <= `k` <= 5. + ext : int or str, optional + Controls the extrapolation mode for elements + not in the interval defined by the knot sequence. + + * if ext=0 or 'extrapolate', return the extrapolated value. + * if ext=1 or 'zeros', return 0 + * if ext=2 or 'raise', raise a ValueError + * if ext=3 of 'const', return the boundary value. + + The default value is 0. + + check_finite : bool, optional + Whether to check that the input arrays contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination or non-sensical results) if the inputs + do contain infinities or NaNs. + Default is False. + + See Also + -------- + UnivariateSpline : Superclass -- allows knots to be selected by a + smoothing condition + LSQUnivariateSpline : spline for which knots are user-selected + splrep : An older, non object-oriented wrapping of FITPACK + splev, sproot, splint, spalde + BivariateSpline : A similar class for two-dimensional spline interpolation + + Notes + ----- + The number of data points must be larger than the spline degree `k`. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> from scipy.interpolate import InterpolatedUnivariateSpline + >>> x = np.linspace(-3, 3, 50) + >>> y = np.exp(-x**2) + 0.1 * np.random.randn(50) + >>> spl = InterpolatedUnivariateSpline(x, y) + >>> plt.plot(x, y, 'ro', ms=5) + >>> xs = np.linspace(-3, 3, 1000) + >>> plt.plot(xs, spl(xs), 'g', lw=3, alpha=0.7) + >>> plt.show() + + Notice that the ``spl(x)`` interpolates `y`: + + >>> spl.get_residual() + 0.0 + + """ + def __init__(self, x, y, w=None, bbox=[None]*2, k=3, + ext=0, check_finite=False): + + if check_finite: + w_finite = np.isfinite(w).all() if w is not None else True + if (not np.isfinite(x).all() or not np.isfinite(y).all() or + not w_finite): + raise ValueError("Input must not contain NaNs or infs.") + if not all(diff(x) > 0.0): + raise ValueError('x must be strictly increasing') + + # _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier + self._data = dfitpack.fpcurf0(x, y, k, w=w, xb=bbox[0], + xe=bbox[1], s=0) + self._reset_class() + + try: + self.ext = _extrap_modes[ext] + except KeyError: + raise ValueError("Unknown extrapolation mode %s." % ext) + + +_fpchec_error_string = """The input parameters have been rejected by fpchec. \ +This means that at least one of the following conditions is violated: + +1) k+1 <= n-k-1 <= m +2) t(1) <= t(2) <= ... <= t(k+1) + t(n-k) <= t(n-k+1) <= ... <= t(n) +3) t(k+1) < t(k+2) < ... < t(n-k) +4) t(k+1) <= x(i) <= t(n-k) +5) The conditions specified by Schoenberg and Whitney must hold + for at least one subset of data points, i.e., there must be a + subset of data points y(j) such that + t(j) < y(j) < t(j+k+1), j=1,2,...,n-k-1 +""" + + +class LSQUnivariateSpline(UnivariateSpline): + """ + One-dimensional spline with explicit internal knots. + + Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data. `t` + specifies the internal knots of the spline + + Parameters + ---------- + x : (N,) array_like + Input dimension of data points -- must be increasing + y : (N,) array_like + Input dimension of data points + t : (M,) array_like + interior knots of the spline. Must be in ascending order and:: + + bbox[0] < t[0] < ... < t[-1] < bbox[-1] + + w : (N,) array_like, optional + weights for spline fitting. Must be positive. If None (default), + weights are all equal. + bbox : (2,) array_like, optional + 2-sequence specifying the boundary of the approximation interval. If + None (default), ``bbox = [x[0], x[-1]]``. + k : int, optional + Degree of the smoothing spline. Must be 1 <= `k` <= 5. + Default is k=3, a cubic spline. + ext : int or str, optional + Controls the extrapolation mode for elements + not in the interval defined by the knot sequence. + + * if ext=0 or 'extrapolate', return the extrapolated value. + * if ext=1 or 'zeros', return 0 + * if ext=2 or 'raise', raise a ValueError + * if ext=3 of 'const', return the boundary value. + + The default value is 0. + + check_finite : bool, optional + Whether to check that the input arrays contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination or non-sensical results) if the inputs + do contain infinities or NaNs. + Default is False. + + Raises + ------ + ValueError + If the interior knots do not satisfy the Schoenberg-Whitney conditions + + See Also + -------- + UnivariateSpline : Superclass -- knots are specified by setting a + smoothing condition + InterpolatedUnivariateSpline : spline passing through all points + splrep : An older, non object-oriented wrapping of FITPACK + splev, sproot, splint, spalde + BivariateSpline : A similar class for two-dimensional spline interpolation + + Notes + ----- + The number of data points must be larger than the spline degree `k`. + + Knots `t` must satisfy the Schoenberg-Whitney conditions, + i.e., there must be a subset of data points ``x[j]`` such that + ``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``. + + Examples + -------- + >>> from scipy.interpolate import LSQUnivariateSpline, UnivariateSpline + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(-3, 3, 50) + >>> y = np.exp(-x**2) + 0.1 * np.random.randn(50) + + Fit a smoothing spline with a pre-defined internal knots: + + >>> t = [-1, 0, 1] + >>> spl = LSQUnivariateSpline(x, y, t) + + >>> xs = np.linspace(-3, 3, 1000) + >>> plt.plot(x, y, 'ro', ms=5) + >>> plt.plot(xs, spl(xs), 'g-', lw=3) + >>> plt.show() + + Check the knot vector: + + >>> spl.get_knots() + array([-3., -1., 0., 1., 3.]) + + Constructing lsq spline using the knots from another spline: + + >>> x = np.arange(10) + >>> s = UnivariateSpline(x, x, s=0) + >>> s.get_knots() + array([ 0., 2., 3., 4., 5., 6., 7., 9.]) + >>> knt = s.get_knots() + >>> s1 = LSQUnivariateSpline(x, x, knt[1:-1]) # Chop 1st and last knot + >>> s1.get_knots() + array([ 0., 2., 3., 4., 5., 6., 7., 9.]) + + """ + + def __init__(self, x, y, t, w=None, bbox=[None]*2, k=3, + ext=0, check_finite=False): + + if check_finite: + w_finite = np.isfinite(w).all() if w is not None else True + if (not np.isfinite(x).all() or not np.isfinite(y).all() or + not w_finite or not np.isfinite(t).all()): + raise ValueError("Input(s) must not contain NaNs or infs.") + if not all(diff(x) > 0.0): + raise ValueError('x must be strictly increasing') + + # _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier + xb = bbox[0] + xe = bbox[1] + if xb is None: + xb = x[0] + if xe is None: + xe = x[-1] + t = concatenate(([xb]*(k+1), t, [xe]*(k+1))) + n = len(t) + if not alltrue(t[k+1:n-k]-t[k:n-k-1] > 0, axis=0): + raise ValueError('Interior knots t must satisfy ' + 'Schoenberg-Whitney conditions') + if not dfitpack.fpchec(x, t, k) == 0: + raise ValueError(_fpchec_error_string) + data = dfitpack.fpcurfm1(x, y, k, t, w=w, xb=xb, xe=xe) + self._data = data[:-3] + (None, None, data[-1]) + self._reset_class() + + try: + self.ext = _extrap_modes[ext] + except KeyError: + raise ValueError("Unknown extrapolation mode %s." % ext) + + +# ############### Bivariate spline #################### + +class _BivariateSplineBase(object): + """ Base class for Bivariate spline s(x,y) interpolation on the rectangle + [xb,xe] x [yb, ye] calculated from a given set of data points + (x,y,z). + + See Also + -------- + bisplrep, bisplev : an older wrapping of FITPACK + BivariateSpline : + implementation of bivariate spline interpolation on a plane grid + SphereBivariateSpline : + implementation of bivariate spline interpolation on a spherical grid + """ + + def get_residual(self): + """ Return weighted sum of squared residuals of the spline + approximation: sum ((w[i]*(z[i]-s(x[i],y[i])))**2,axis=0) + """ + return self.fp + + def get_knots(self): + """ Return a tuple (tx,ty) where tx,ty contain knots positions + of the spline with respect to x-, y-variable, respectively. + The position of interior and additional knots are given as + t[k+1:-k-1] and t[:k+1]=b, t[-k-1:]=e, respectively. + """ + return self.tck[:2] + + def get_coeffs(self): + """ Return spline coefficients.""" + return self.tck[2] + + def __call__(self, x, y, dx=0, dy=0, grid=True): + """ + Evaluate the spline or its derivatives at given positions. + + Parameters + ---------- + x, y : array_like + Input coordinates. + + If `grid` is False, evaluate the spline at points ``(x[i], + y[i]), i=0, ..., len(x)-1``. Standard Numpy broadcasting + is obeyed. + + If `grid` is True: evaluate spline at the grid points + defined by the coordinate arrays x, y. The arrays must be + sorted to increasing order. + + Note that the axis ordering is inverted relative to + the output of meshgrid. + dx : int + Order of x-derivative + + .. versionadded:: 0.14.0 + dy : int + Order of y-derivative + + .. versionadded:: 0.14.0 + grid : bool + Whether to evaluate the results on a grid spanned by the + input arrays, or at points specified by the input arrays. + + .. versionadded:: 0.14.0 + + """ + x = np.asarray(x) + y = np.asarray(y) + + tx, ty, c = self.tck[:3] + kx, ky = self.degrees + if grid: + if x.size == 0 or y.size == 0: + return np.zeros((x.size, y.size), dtype=self.tck[2].dtype) + + if dx or dy: + z, ier = dfitpack.parder(tx, ty, c, kx, ky, dx, dy, x, y) + if not ier == 0: + raise ValueError("Error code returned by parder: %s" % ier) + else: + z, ier = dfitpack.bispev(tx, ty, c, kx, ky, x, y) + if not ier == 0: + raise ValueError("Error code returned by bispev: %s" % ier) + else: + # standard Numpy broadcasting + if x.shape != y.shape: + x, y = np.broadcast_arrays(x, y) + + shape = x.shape + x = x.ravel() + y = y.ravel() + + if x.size == 0 or y.size == 0: + return np.zeros(shape, dtype=self.tck[2].dtype) + + if dx or dy: + z, ier = dfitpack.pardeu(tx, ty, c, kx, ky, dx, dy, x, y) + if not ier == 0: + raise ValueError("Error code returned by pardeu: %s" % ier) + else: + z, ier = dfitpack.bispeu(tx, ty, c, kx, ky, x, y) + if not ier == 0: + raise ValueError("Error code returned by bispeu: %s" % ier) + + z = z.reshape(shape) + return z + + +_surfit_messages = {1: """ +The required storage space exceeds the available storage space: nxest +or nyest too small, or s too small. +The weighted least-squares spline corresponds to the current set of +knots.""", + 2: """ +A theoretically impossible result was found during the iteration +process for finding a smoothing spline with fp = s: s too small or +badly chosen eps. +Weighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.""", + 3: """ +the maximal number of iterations maxit (set to 20 by the program) +allowed for finding a smoothing spline with fp=s has been reached: +s too small. +Weighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.""", + 4: """ +No more knots can be added because the number of b-spline coefficients +(nx-kx-1)*(ny-ky-1) already exceeds the number of data points m: +either s or m too small. +The weighted least-squares spline corresponds to the current set of +knots.""", + 5: """ +No more knots can be added because the additional knot would (quasi) +coincide with an old one: s too small or too large a weight to an +inaccurate data point. +The weighted least-squares spline corresponds to the current set of +knots.""", + 10: """ +Error on entry, no approximation returned. The following conditions +must hold: +xb<=x[i]<=xe, yb<=y[i]<=ye, w[i]>0, i=0..m-1 +If iopt==-1, then + xb<tx[kx+1]<tx[kx+2]<...<tx[nx-kx-2]<xe + yb<ty[ky+1]<ty[ky+2]<...<ty[ny-ky-2]<ye""", + -3: """ +The coefficients of the spline returned have been computed as the +minimal norm least-squares solution of a (numerically) rank deficient +system (deficiency=%i). If deficiency is large, the results may be +inaccurate. Deficiency may strongly depend on the value of eps.""" + } + + +class BivariateSpline(_BivariateSplineBase): + """ + Base class for bivariate splines. + + This describes a spline ``s(x, y)`` of degrees ``kx`` and ``ky`` on + the rectangle ``[xb, xe] * [yb, ye]`` calculated from a given set + of data points ``(x, y, z)``. + + This class is meant to be subclassed, not instantiated directly. + To construct these splines, call either `SmoothBivariateSpline` or + `LSQBivariateSpline`. + + See Also + -------- + UnivariateSpline : a similar class for univariate spline interpolation + SmoothBivariateSpline : + to create a BivariateSpline through the given points + LSQBivariateSpline : + to create a BivariateSpline using weighted least-squares fitting + SphereBivariateSpline : + bivariate spline interpolation in spherical cooridinates + bisplrep : older wrapping of FITPACK + bisplev : older wrapping of FITPACK + + """ + + @classmethod + def _from_tck(cls, tck): + """Construct a spline object from given tck and degree""" + self = cls.__new__(cls) + if len(tck) != 5: + raise ValueError("tck should be a 5 element tuple of tx," + " ty, c, kx, ky") + self.tck = tck[:3] + self.degrees = tck[3:] + return self + + def ev(self, xi, yi, dx=0, dy=0): + """ + Evaluate the spline at points + + Returns the interpolated value at ``(xi[i], yi[i]), + i=0,...,len(xi)-1``. + + Parameters + ---------- + xi, yi : array_like + Input coordinates. Standard Numpy broadcasting is obeyed. + dx : int, optional + Order of x-derivative + + .. versionadded:: 0.14.0 + dy : int, optional + Order of y-derivative + + .. versionadded:: 0.14.0 + """ + return self.__call__(xi, yi, dx=dx, dy=dy, grid=False) + + def integral(self, xa, xb, ya, yb): + """ + Evaluate the integral of the spline over area [xa,xb] x [ya,yb]. + + Parameters + ---------- + xa, xb : float + The end-points of the x integration interval. + ya, yb : float + The end-points of the y integration interval. + + Returns + ------- + integ : float + The value of the resulting integral. + + """ + tx, ty, c = self.tck[:3] + kx, ky = self.degrees + return dfitpack.dblint(tx, ty, c, kx, ky, xa, xb, ya, yb) + + +class SmoothBivariateSpline(BivariateSpline): + """ + Smooth bivariate spline approximation. + + Parameters + ---------- + x, y, z : array_like + 1-D sequences of data points (order is not important). + w : array_like, optional + Positive 1-D sequence of weights, of same length as `x`, `y` and `z`. + bbox : array_like, optional + Sequence of length 4 specifying the boundary of the rectangular + approximation domain. By default, + ``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``. + kx, ky : ints, optional + Degrees of the bivariate spline. Default is 3. + s : float, optional + Positive smoothing factor defined for estimation condition: + ``sum((w[i]*(z[i]-s(x[i], y[i])))**2, axis=0) <= s`` + Default ``s=len(w)`` which should be a good value if ``1/w[i]`` is an + estimate of the standard deviation of ``z[i]``. + eps : float, optional + A threshold for determining the effective rank of an over-determined + linear system of equations. `eps` should have a value between 0 and 1, + the default is 1e-16. + + See Also + -------- + bisplrep : an older wrapping of FITPACK + bisplev : an older wrapping of FITPACK + UnivariateSpline : a similar class for univariate spline interpolation + LSQUnivariateSpline : to create a BivariateSpline using weighted + + Notes + ----- + The length of `x`, `y` and `z` should be at least ``(kx+1) * (ky+1)``. + + """ + + def __init__(self, x, y, z, w=None, bbox=[None] * 4, kx=3, ky=3, s=None, + eps=None): + xb, xe, yb, ye = bbox + nx, tx, ny, ty, c, fp, wrk1, ier = dfitpack.surfit_smth(x, y, z, w, + xb, xe, yb, + ye, kx, ky, + s=s, eps=eps, + lwrk2=1) + if ier > 10: # lwrk2 was to small, re-run + nx, tx, ny, ty, c, fp, wrk1, ier = dfitpack.surfit_smth(x, y, z, w, + xb, xe, yb, + ye, kx, ky, + s=s, + eps=eps, + lwrk2=ier) + if ier in [0, -1, -2]: # normal return + pass + else: + message = _surfit_messages.get(ier, 'ier=%s' % (ier)) + warnings.warn(message) + + self.fp = fp + self.tck = tx[:nx], ty[:ny], c[:(nx-kx-1)*(ny-ky-1)] + self.degrees = kx, ky + + +class LSQBivariateSpline(BivariateSpline): + """ + Weighted least-squares bivariate spline approximation. + + Parameters + ---------- + x, y, z : array_like + 1-D sequences of data points (order is not important). + tx, ty : array_like + Strictly ordered 1-D sequences of knots coordinates. + w : array_like, optional + Positive 1-D array of weights, of the same length as `x`, `y` and `z`. + bbox : (4,) array_like, optional + Sequence of length 4 specifying the boundary of the rectangular + approximation domain. By default, + ``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``. + kx, ky : ints, optional + Degrees of the bivariate spline. Default is 3. + eps : float, optional + A threshold for determining the effective rank of an over-determined + linear system of equations. `eps` should have a value between 0 and 1, + the default is 1e-16. + + See Also + -------- + bisplrep : an older wrapping of FITPACK + bisplev : an older wrapping of FITPACK + UnivariateSpline : a similar class for univariate spline interpolation + SmoothBivariateSpline : create a smoothing BivariateSpline + + Notes + ----- + The length of `x`, `y` and `z` should be at least ``(kx+1) * (ky+1)``. + + """ + + def __init__(self, x, y, z, tx, ty, w=None, bbox=[None]*4, kx=3, ky=3, + eps=None): + nx = 2*kx+2+len(tx) + ny = 2*ky+2+len(ty) + tx1 = zeros((nx,), float) + ty1 = zeros((ny,), float) + tx1[kx+1:nx-kx-1] = tx + ty1[ky+1:ny-ky-1] = ty + + xb, xe, yb, ye = bbox + tx1, ty1, c, fp, ier = dfitpack.surfit_lsq(x, y, z, tx1, ty1, w, + xb, xe, yb, ye, + kx, ky, eps, lwrk2=1) + if ier > 10: + tx1, ty1, c, fp, ier = dfitpack.surfit_lsq(x, y, z, tx1, ty1, w, + xb, xe, yb, ye, + kx, ky, eps, lwrk2=ier) + if ier in [0, -1, -2]: # normal return + pass + else: + if ier < -2: + deficiency = (nx-kx-1)*(ny-ky-1)+ier + message = _surfit_messages.get(-3) % (deficiency) + else: + message = _surfit_messages.get(ier, 'ier=%s' % (ier)) + warnings.warn(message) + self.fp = fp + self.tck = tx1, ty1, c + self.degrees = kx, ky + + +class RectBivariateSpline(BivariateSpline): + """ + Bivariate spline approximation over a rectangular mesh. + + Can be used for both smoothing and interpolating data. + + Parameters + ---------- + x,y : array_like + 1-D arrays of coordinates in strictly ascending order. + z : array_like + 2-D array of data with shape (x.size,y.size). + bbox : array_like, optional + Sequence of length 4 specifying the boundary of the rectangular + approximation domain. By default, + ``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``. + kx, ky : ints, optional + Degrees of the bivariate spline. Default is 3. + s : float, optional + Positive smoothing factor defined for estimation condition: + ``sum((w[i]*(z[i]-s(x[i], y[i])))**2, axis=0) <= s`` + Default is ``s=0``, which is for interpolation. + + See Also + -------- + SmoothBivariateSpline : a smoothing bivariate spline for scattered data + bisplrep : an older wrapping of FITPACK + bisplev : an older wrapping of FITPACK + UnivariateSpline : a similar class for univariate spline interpolation + + """ + + def __init__(self, x, y, z, bbox=[None] * 4, kx=3, ky=3, s=0): + x, y = ravel(x), ravel(y) + if not all(diff(x) > 0.0): + raise ValueError('x must be strictly increasing') + if not all(diff(y) > 0.0): + raise ValueError('y must be strictly increasing') + if not ((x.min() == x[0]) and (x.max() == x[-1])): + raise ValueError('x must be strictly ascending') + if not ((y.min() == y[0]) and (y.max() == y[-1])): + raise ValueError('y must be strictly ascending') + if not x.size == z.shape[0]: + raise ValueError('x dimension of z must have same number of ' + 'elements as x') + if not y.size == z.shape[1]: + raise ValueError('y dimension of z must have same number of ' + 'elements as y') + z = ravel(z) + xb, xe, yb, ye = bbox + nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(x, y, z, xb, xe, yb, + ye, kx, ky, s) + + if ier not in [0, -1, -2]: + msg = _surfit_messages.get(ier, 'ier=%s' % (ier)) + raise ValueError(msg) + + self.fp = fp + self.tck = tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)] + self.degrees = kx, ky + + +_spherefit_messages = _surfit_messages.copy() +_spherefit_messages[10] = """ +ERROR. On entry, the input data are controlled on validity. The following + restrictions must be satisfied: + -1<=iopt<=1, m>=2, ntest>=8 ,npest >=8, 0<eps<1, + 0<=teta(i)<=pi, 0<=phi(i)<=2*pi, w(i)>0, i=1,...,m + lwrk1 >= 185+52*v+10*u+14*u*v+8*(u-1)*v**2+8*m + kwrk >= m+(ntest-7)*(npest-7) + if iopt=-1: 8<=nt<=ntest , 9<=np<=npest + 0<tt(5)<tt(6)<...<tt(nt-4)<pi + 0<tp(5)<tp(6)<...<tp(np-4)<2*pi + if iopt>=0: s>=0 + if one of these conditions is found to be violated,control + is immediately repassed to the calling program. in that + case there is no approximation returned.""" +_spherefit_messages[-3] = """ +WARNING. The coefficients of the spline returned have been computed as the + minimal norm least-squares solution of a (numerically) rank + deficient system (deficiency=%i, rank=%i). Especially if the rank + deficiency, which is computed by 6+(nt-8)*(np-7)+ier, is large, + the results may be inaccurate. They could also seriously depend on + the value of eps.""" + + +class SphereBivariateSpline(_BivariateSplineBase): + """ + Bivariate spline s(x,y) of degrees 3 on a sphere, calculated from a + given set of data points (theta,phi,r). + + .. versionadded:: 0.11.0 + + See Also + -------- + bisplrep, bisplev : an older wrapping of FITPACK + UnivariateSpline : a similar class for univariate spline interpolation + SmoothUnivariateSpline : + to create a BivariateSpline through the given points + LSQUnivariateSpline : + to create a BivariateSpline using weighted least-squares fitting + """ + + def __call__(self, theta, phi, dtheta=0, dphi=0, grid=True): + """ + Evaluate the spline or its derivatives at given positions. + + Parameters + ---------- + theta, phi : array_like + Input coordinates. + + If `grid` is False, evaluate the spline at points + ``(theta[i], phi[i]), i=0, ..., len(x)-1``. Standard + Numpy broadcasting is obeyed. + + If `grid` is True: evaluate spline at the grid points + defined by the coordinate arrays theta, phi. The arrays + must be sorted to increasing order. + dtheta : int, optional + Order of theta-derivative + + .. versionadded:: 0.14.0 + dphi : int + Order of phi-derivative + + .. versionadded:: 0.14.0 + grid : bool + Whether to evaluate the results on a grid spanned by the + input arrays, or at points specified by the input arrays. + + .. versionadded:: 0.14.0 + + """ + theta = np.asarray(theta) + phi = np.asarray(phi) + + if theta.size > 0 and (theta.min() < 0. or theta.max() > np.pi): + raise ValueError("requested theta out of bounds.") + if phi.size > 0 and (phi.min() < 0. or phi.max() > 2. * np.pi): + raise ValueError("requested phi out of bounds.") + + return _BivariateSplineBase.__call__(self, theta, phi, + dx=dtheta, dy=dphi, grid=grid) + + def ev(self, theta, phi, dtheta=0, dphi=0): + """ + Evaluate the spline at points + + Returns the interpolated value at ``(theta[i], phi[i]), + i=0,...,len(theta)-1``. + + Parameters + ---------- + theta, phi : array_like + Input coordinates. Standard Numpy broadcasting is obeyed. + dtheta : int, optional + Order of theta-derivative + + .. versionadded:: 0.14.0 + dphi : int, optional + Order of phi-derivative + + .. versionadded:: 0.14.0 + """ + return self.__call__(theta, phi, dtheta=dtheta, dphi=dphi, grid=False) + + +class SmoothSphereBivariateSpline(SphereBivariateSpline): + """ + Smooth bivariate spline approximation in spherical coordinates. + + .. versionadded:: 0.11.0 + + Parameters + ---------- + theta, phi, r : array_like + 1-D sequences of data points (order is not important). Coordinates + must be given in radians. Theta must lie within the interval (0, pi), + and phi must lie within the interval (0, 2pi). + w : array_like, optional + Positive 1-D sequence of weights. + s : float, optional + Positive smoothing factor defined for estimation condition: + ``sum((w(i)*(r(i) - s(theta(i), phi(i))))**2, axis=0) <= s`` + Default ``s=len(w)`` which should be a good value if 1/w[i] is an + estimate of the standard deviation of r[i]. + eps : float, optional + A threshold for determining the effective rank of an over-determined + linear system of equations. `eps` should have a value between 0 and 1, + the default is 1e-16. + + Notes + ----- + For more information, see the FITPACK_ site about this function. + + .. _FITPACK: http://www.netlib.org/dierckx/sphere.f + + Examples + -------- + Suppose we have global data on a coarse grid (the input data does not + have to be on a grid): + + >>> theta = np.linspace(0., np.pi, 7) + >>> phi = np.linspace(0., 2*np.pi, 9) + >>> data = np.empty((theta.shape[0], phi.shape[0])) + >>> data[:,0], data[0,:], data[-1,:] = 0., 0., 0. + >>> data[1:-1,1], data[1:-1,-1] = 1., 1. + >>> data[1,1:-1], data[-2,1:-1] = 1., 1. + >>> data[2:-2,2], data[2:-2,-2] = 2., 2. + >>> data[2,2:-2], data[-3,2:-2] = 2., 2. + >>> data[3,3:-2] = 3. + >>> data = np.roll(data, 4, 1) + + We need to set up the interpolator object + + >>> lats, lons = np.meshgrid(theta, phi) + >>> from scipy.interpolate import SmoothSphereBivariateSpline + >>> lut = SmoothSphereBivariateSpline(lats.ravel(), lons.ravel(), + ... data.T.ravel(), s=3.5) + + As a first test, we'll see what the algorithm returns when run on the + input coordinates + + >>> data_orig = lut(theta, phi) + + Finally we interpolate the data to a finer grid + + >>> fine_lats = np.linspace(0., np.pi, 70) + >>> fine_lons = np.linspace(0., 2 * np.pi, 90) + + >>> data_smth = lut(fine_lats, fine_lons) + + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> ax1 = fig.add_subplot(131) + >>> ax1.imshow(data, interpolation='nearest') + >>> ax2 = fig.add_subplot(132) + >>> ax2.imshow(data_orig, interpolation='nearest') + >>> ax3 = fig.add_subplot(133) + >>> ax3.imshow(data_smth, interpolation='nearest') + >>> plt.show() + + """ + + def __init__(self, theta, phi, r, w=None, s=0., eps=1E-16): + if np.issubclass_(w, float): + w = ones(len(theta)) * w + nt_, tt_, np_, tp_, c, fp, ier = dfitpack.spherfit_smth(theta, phi, + r, w=w, s=s, + eps=eps) + if ier not in [0, -1, -2]: + message = _spherefit_messages.get(ier, 'ier=%s' % (ier)) + raise ValueError(message) + + self.fp = fp + self.tck = tt_[:nt_], tp_[:np_], c[:(nt_ - 4) * (np_ - 4)] + self.degrees = (3, 3) + + +class LSQSphereBivariateSpline(SphereBivariateSpline): + """ + Weighted least-squares bivariate spline approximation in spherical + coordinates. + + .. versionadded:: 0.11.0 + + Parameters + ---------- + theta, phi, r : array_like + 1-D sequences of data points (order is not important). Coordinates + must be given in radians. Theta must lie within the interval (0, pi), + and phi must lie within the interval (0, 2pi). + tt, tp : array_like + Strictly ordered 1-D sequences of knots coordinates. + Coordinates must satisfy ``0 < tt[i] < pi``, ``0 < tp[i] < 2*pi``. + w : array_like, optional + Positive 1-D sequence of weights, of the same length as `theta`, `phi` + and `r`. + eps : float, optional + A threshold for determining the effective rank of an over-determined + linear system of equations. `eps` should have a value between 0 and 1, + the default is 1e-16. + + Notes + ----- + For more information, see the FITPACK_ site about this function. + + .. _FITPACK: http://www.netlib.org/dierckx/sphere.f + + Examples + -------- + Suppose we have global data on a coarse grid (the input data does not + have to be on a grid): + + >>> theta = np.linspace(0., np.pi, 7) + >>> phi = np.linspace(0., 2*np.pi, 9) + >>> data = np.empty((theta.shape[0], phi.shape[0])) + >>> data[:,0], data[0,:], data[-1,:] = 0., 0., 0. + >>> data[1:-1,1], data[1:-1,-1] = 1., 1. + >>> data[1,1:-1], data[-2,1:-1] = 1., 1. + >>> data[2:-2,2], data[2:-2,-2] = 2., 2. + >>> data[2,2:-2], data[-3,2:-2] = 2., 2. + >>> data[3,3:-2] = 3. + >>> data = np.roll(data, 4, 1) + + We need to set up the interpolator object. Here, we must also specify the + coordinates of the knots to use. + + >>> lats, lons = np.meshgrid(theta, phi) + >>> knotst, knotsp = theta.copy(), phi.copy() + >>> knotst[0] += .0001 + >>> knotst[-1] -= .0001 + >>> knotsp[0] += .0001 + >>> knotsp[-1] -= .0001 + >>> from scipy.interpolate import LSQSphereBivariateSpline + >>> lut = LSQSphereBivariateSpline(lats.ravel(), lons.ravel(), + ... data.T.ravel(), knotst, knotsp) + + As a first test, we'll see what the algorithm returns when run on the + input coordinates + + >>> data_orig = lut(theta, phi) + + Finally we interpolate the data to a finer grid + + >>> fine_lats = np.linspace(0., np.pi, 70) + >>> fine_lons = np.linspace(0., 2*np.pi, 90) + + >>> data_lsq = lut(fine_lats, fine_lons) + + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> ax1 = fig.add_subplot(131) + >>> ax1.imshow(data, interpolation='nearest') + >>> ax2 = fig.add_subplot(132) + >>> ax2.imshow(data_orig, interpolation='nearest') + >>> ax3 = fig.add_subplot(133) + >>> ax3.imshow(data_lsq, interpolation='nearest') + >>> plt.show() + + """ + + def __init__(self, theta, phi, r, tt, tp, w=None, eps=1E-16): + if np.issubclass_(w, float): + w = ones(len(theta)) * w + nt_, np_ = 8 + len(tt), 8 + len(tp) + tt_, tp_ = zeros((nt_,), float), zeros((np_,), float) + tt_[4:-4], tp_[4:-4] = tt, tp + tt_[-4:], tp_[-4:] = np.pi, 2. * np.pi + tt_, tp_, c, fp, ier = dfitpack.spherfit_lsq(theta, phi, r, tt_, tp_, + w=w, eps=eps) + if ier < -2: + deficiency = 6 + (nt_ - 8) * (np_ - 7) + ier + message = _spherefit_messages.get(-3) % (deficiency, -ier) + warnings.warn(message, stacklevel=3) + elif ier not in [0, -1, -2]: + message = _spherefit_messages.get(ier, 'ier=%s' % (ier)) + raise ValueError(message) + + self.fp = fp + self.tck = tt_, tp_, c + self.degrees = (3, 3) + + +_spfit_messages = _surfit_messages.copy() +_spfit_messages[10] = """ +ERROR: on entry, the input data are controlled on validity + the following restrictions must be satisfied. + -1<=iopt(1)<=1, 0<=iopt(2)<=1, 0<=iopt(3)<=1, + -1<=ider(1)<=1, 0<=ider(2)<=1, ider(2)=0 if iopt(2)=0. + -1<=ider(3)<=1, 0<=ider(4)<=1, ider(4)=0 if iopt(3)=0. + mu >= mumin (see above), mv >= 4, nuest >=8, nvest >= 8, + kwrk>=5+mu+mv+nuest+nvest, + lwrk >= 12+nuest*(mv+nvest+3)+nvest*24+4*mu+8*mv+max(nuest,mv+nvest) + 0< u(i-1)<u(i)< pi,i=2,..,mu, + -pi<=v(1)< pi, v(1)<v(i-1)<v(i)<v(1)+2*pi, i=3,...,mv + if iopt(1)=-1: 8<=nu<=min(nuest,mu+6+iopt(2)+iopt(3)) + 0<tu(5)<tu(6)<...<tu(nu-4)< pi + 8<=nv<=min(nvest,mv+7) + v(1)<tv(5)<tv(6)<...<tv(nv-4)<v(1)+2*pi + the schoenberg-whitney conditions, i.e. there must be + subset of grid co-ordinates uu(p) and vv(q) such that + tu(p) < uu(p) < tu(p+4) ,p=1,...,nu-4 + (iopt(2)=1 and iopt(3)=1 also count for a uu-value + tv(q) < vv(q) < tv(q+4) ,q=1,...,nv-4 + (vv(q) is either a value v(j) or v(j)+2*pi) + if iopt(1)>=0: s>=0 + if s=0: nuest>=mu+6+iopt(2)+iopt(3), nvest>=mv+7 + if one of these conditions is found to be violated,control is + immediately repassed to the calling program. in that case there is no + approximation returned.""" + + +class RectSphereBivariateSpline(SphereBivariateSpline): + """ + Bivariate spline approximation over a rectangular mesh on a sphere. + + Can be used for smoothing data. + + .. versionadded:: 0.11.0 + + Parameters + ---------- + u : array_like + 1-D array of latitude coordinates in strictly ascending order. + Coordinates must be given in radians and lie within the interval + (0, pi). + v : array_like + 1-D array of longitude coordinates in strictly ascending order. + Coordinates must be given in radians. First element (v[0]) must lie + within the interval [-pi, pi). Last element (v[-1]) must satisfy + v[-1] <= v[0] + 2*pi. + r : array_like + 2-D array of data with shape ``(u.size, v.size)``. + s : float, optional + Positive smoothing factor defined for estimation condition + (``s=0`` is for interpolation). + pole_continuity : bool or (bool, bool), optional + Order of continuity at the poles ``u=0`` (``pole_continuity[0]``) and + ``u=pi`` (``pole_continuity[1]``). The order of continuity at the pole + will be 1 or 0 when this is True or False, respectively. + Defaults to False. + pole_values : float or (float, float), optional + Data values at the poles ``u=0`` and ``u=pi``. Either the whole + parameter or each individual element can be None. Defaults to None. + pole_exact : bool or (bool, bool), optional + Data value exactness at the poles ``u=0`` and ``u=pi``. If True, the + value is considered to be the right function value, and it will be + fitted exactly. If False, the value will be considered to be a data + value just like the other data values. Defaults to False. + pole_flat : bool or (bool, bool), optional + For the poles at ``u=0`` and ``u=pi``, specify whether or not the + approximation has vanishing derivatives. Defaults to False. + + See Also + -------- + RectBivariateSpline : bivariate spline approximation over a rectangular + mesh + + Notes + ----- + Currently, only the smoothing spline approximation (``iopt[0] = 0`` and + ``iopt[0] = 1`` in the FITPACK routine) is supported. The exact + least-squares spline approximation is not implemented yet. + + When actually performing the interpolation, the requested `v` values must + lie within the same length 2pi interval that the original `v` values were + chosen from. + + For more information, see the FITPACK_ site about this function. + + .. _FITPACK: http://www.netlib.org/dierckx/spgrid.f + + Examples + -------- + Suppose we have global data on a coarse grid + + >>> lats = np.linspace(10, 170, 9) * np.pi / 180. + >>> lons = np.linspace(0, 350, 18) * np.pi / 180. + >>> data = np.dot(np.atleast_2d(90. - np.linspace(-80., 80., 18)).T, + ... np.atleast_2d(180. - np.abs(np.linspace(0., 350., 9)))).T + + We want to interpolate it to a global one-degree grid + + >>> new_lats = np.linspace(1, 180, 180) * np.pi / 180 + >>> new_lons = np.linspace(1, 360, 360) * np.pi / 180 + >>> new_lats, new_lons = np.meshgrid(new_lats, new_lons) + + We need to set up the interpolator object + + >>> from scipy.interpolate import RectSphereBivariateSpline + >>> lut = RectSphereBivariateSpline(lats, lons, data) + + Finally we interpolate the data. The `RectSphereBivariateSpline` object + only takes 1-D arrays as input, therefore we need to do some reshaping. + + >>> data_interp = lut.ev(new_lats.ravel(), + ... new_lons.ravel()).reshape((360, 180)).T + + Looking at the original and the interpolated data, one can see that the + interpolant reproduces the original data very well: + + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> ax1 = fig.add_subplot(211) + >>> ax1.imshow(data, interpolation='nearest') + >>> ax2 = fig.add_subplot(212) + >>> ax2.imshow(data_interp, interpolation='nearest') + >>> plt.show() + + Choosing the optimal value of ``s`` can be a delicate task. Recommended + values for ``s`` depend on the accuracy of the data values. If the user + has an idea of the statistical errors on the data, she can also find a + proper estimate for ``s``. By assuming that, if she specifies the + right ``s``, the interpolator will use a spline ``f(u,v)`` which exactly + reproduces the function underlying the data, she can evaluate + ``sum((r(i,j)-s(u(i),v(j)))**2)`` to find a good estimate for this ``s``. + For example, if she knows that the statistical errors on her + ``r(i,j)``-values are not greater than 0.1, she may expect that a good + ``s`` should have a value not larger than ``u.size * v.size * (0.1)**2``. + + If nothing is known about the statistical error in ``r(i,j)``, ``s`` must + be determined by trial and error. The best is then to start with a very + large value of ``s`` (to determine the least-squares polynomial and the + corresponding upper bound ``fp0`` for ``s``) and then to progressively + decrease the value of ``s`` (say by a factor 10 in the beginning, i.e. + ``s = fp0 / 10, fp0 / 100, ...`` and more carefully as the approximation + shows more detail) to obtain closer fits. + + The interpolation results for different values of ``s`` give some insight + into this process: + + >>> fig2 = plt.figure() + >>> s = [3e9, 2e9, 1e9, 1e8] + >>> for ii in range(len(s)): + ... lut = RectSphereBivariateSpline(lats, lons, data, s=s[ii]) + ... data_interp = lut.ev(new_lats.ravel(), + ... new_lons.ravel()).reshape((360, 180)).T + ... ax = fig2.add_subplot(2, 2, ii+1) + ... ax.imshow(data_interp, interpolation='nearest') + ... ax.set_title("s = %g" % s[ii]) + >>> plt.show() + + """ + + def __init__(self, u, v, r, s=0., pole_continuity=False, pole_values=None, + pole_exact=False, pole_flat=False): + iopt = np.array([0, 0, 0], dtype=int) + ider = np.array([-1, 0, -1, 0], dtype=int) + if pole_values is None: + pole_values = (None, None) + elif isinstance(pole_values, (float, np.float32, np.float64)): + pole_values = (pole_values, pole_values) + if isinstance(pole_continuity, bool): + pole_continuity = (pole_continuity, pole_continuity) + if isinstance(pole_exact, bool): + pole_exact = (pole_exact, pole_exact) + if isinstance(pole_flat, bool): + pole_flat = (pole_flat, pole_flat) + + r0, r1 = pole_values + iopt[1:] = pole_continuity + if r0 is None: + ider[0] = -1 + else: + ider[0] = pole_exact[0] + + if r1 is None: + ider[2] = -1 + else: + ider[2] = pole_exact[1] + + ider[1], ider[3] = pole_flat + + u, v = np.ravel(u), np.ravel(v) + if not np.all(np.diff(u) > 0.0): + raise ValueError('u must be strictly increasing') + if not np.all(np.diff(v) > 0.0): + raise ValueError('v must be strictly increasing') + + if not u.size == r.shape[0]: + raise ValueError('u dimension of r must have same number of ' + 'elements as u') + if not v.size == r.shape[1]: + raise ValueError('v dimension of r must have same number of ' + 'elements as v') + + if pole_continuity[1] is False and pole_flat[1] is True: + raise ValueError('if pole_continuity is False, so must be ' + 'pole_flat') + if pole_continuity[0] is False and pole_flat[0] is True: + raise ValueError('if pole_continuity is False, so must be ' + 'pole_flat') + + r = np.ravel(r) + nu, tu, nv, tv, c, fp, ier = dfitpack.regrid_smth_spher(iopt, ider, + u.copy(), v.copy(), r.copy(), r0, r1, s) + + if ier not in [0, -1, -2]: + msg = _spfit_messages.get(ier, 'ier=%s' % (ier)) + raise ValueError(msg) + + self.fp = fp + self.tck = tu[:nu], tv[:nv], c[:(nu - 4) * (nv-4)] + self.degrees = (3, 3) diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/fitpack2.pyc b/project/venv/lib/python2.7/site-packages/scipy/interpolate/fitpack2.pyc new file mode 100644 index 0000000..f81f269 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/interpolate/fitpack2.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/interpnd.so b/project/venv/lib/python2.7/site-packages/scipy/interpolate/interpnd.so new file mode 100755 index 0000000..cbe5633 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/interpolate/interpnd.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/interpnd_info.py b/project/venv/lib/python2.7/site-packages/scipy/interpolate/interpnd_info.py new file mode 100644 index 0000000..8387968 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/interpolate/interpnd_info.py @@ -0,0 +1,39 @@ +""" +Here we perform some symbolic computations required for the N-D +interpolation routines in `interpnd.pyx`. + +""" +from __future__ import division, print_function, absolute_import + +from sympy import symbols, binomial, Matrix + + +def _estimate_gradients_2d_global(): + + # + # Compute + # + # + + f1, f2, df1, df2, x = symbols(['f1', 'f2', 'df1', 'df2', 'x']) + c = [f1, (df1 + 3*f1)/3, (df2 + 3*f2)/3, f2] + + w = 0 + for k in range(4): + w += binomial(3, k) * c[k] * x**k*(1-x)**(3-k) + + wpp = w.diff(x, 2).expand() + intwpp2 = (wpp**2).integrate((x, 0, 1)).expand() + + A = Matrix([[intwpp2.coeff(df1**2), intwpp2.coeff(df1*df2)/2], + [intwpp2.coeff(df1*df2)/2, intwpp2.coeff(df2**2)]]) + + B = Matrix([[intwpp2.coeff(df1).subs(df2, 0)], + [intwpp2.coeff(df2).subs(df1, 0)]]) / 2 + + print("A") + print(A) + print("B") + print(B) + print("solution") + print(A.inv() * B) diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/interpnd_info.pyc b/project/venv/lib/python2.7/site-packages/scipy/interpolate/interpnd_info.pyc new file mode 100644 index 0000000..56cb389 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/interpolate/interpnd_info.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/interpolate.py b/project/venv/lib/python2.7/site-packages/scipy/interpolate/interpolate.py new file mode 100644 index 0000000..f7a5b23 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/interpolate/interpolate.py @@ -0,0 +1,2918 @@ +""" Classes for interpolating values. +""" +from __future__ import division, print_function, absolute_import + + +__all__ = ['interp1d', 'interp2d', 'spline', 'spleval', 'splmake', 'spltopp', + 'lagrange', 'PPoly', 'BPoly', 'NdPPoly', + 'RegularGridInterpolator', 'interpn'] + + +import itertools +import warnings +import functools +import operator + +import numpy as np +from numpy import (array, transpose, searchsorted, atleast_1d, atleast_2d, + dot, ravel, poly1d, asarray, intp) + +import scipy.linalg +import scipy.special as spec +from scipy.special import comb + +from scipy._lib.six import xrange, integer_types, string_types + +from . import fitpack +from . import dfitpack +from . import _fitpack +from .polyint import _Interpolator1D +from . import _ppoly +from .fitpack2 import RectBivariateSpline +from .interpnd import _ndim_coords_from_arrays +from ._bsplines import make_interp_spline, BSpline + + +def prod(x): + """Product of a list of numbers; ~40x faster vs np.prod for Python tuples""" + if len(x) == 0: + return 1 + return functools.reduce(operator.mul, x) + + +def lagrange(x, w): + r""" + Return a Lagrange interpolating polynomial. + + Given two 1-D arrays `x` and `w,` returns the Lagrange interpolating + polynomial through the points ``(x, w)``. + + Warning: This implementation is numerically unstable. Do not expect to + be able to use more than about 20 points even if they are chosen optimally. + + Parameters + ---------- + x : array_like + `x` represents the x-coordinates of a set of datapoints. + w : array_like + `w` represents the y-coordinates of a set of datapoints, i.e. f(`x`). + + Returns + ------- + lagrange : `numpy.poly1d` instance + The Lagrange interpolating polynomial. + + Examples + -------- + Interpolate :math:`f(x) = x^3` by 3 points. + + >>> from scipy.interpolate import lagrange + >>> x = np.array([0, 1, 2]) + >>> y = x**3 + >>> poly = lagrange(x, y) + + Since there are only 3 points, Lagrange polynomial has degree 2. Explicitly, + it is given by + + .. math:: + + \begin{aligned} + L(x) &= 1\times \frac{x (x - 2)}{-1} + 8\times \frac{x (x-1)}{2} \\ + &= x (-2 + 3x) + \end{aligned} + + >>> from numpy.polynomial.polynomial import Polynomial + >>> Polynomial(poly).coef + array([ 3., -2., 0.]) + + """ + + M = len(x) + p = poly1d(0.0) + for j in xrange(M): + pt = poly1d(w[j]) + for k in xrange(M): + if k == j: + continue + fac = x[j]-x[k] + pt *= poly1d([1.0, -x[k]])/fac + p += pt + return p + + +# !! Need to find argument for keeping initialize. If it isn't +# !! found, get rid of it! + + +class interp2d(object): + """ + interp2d(x, y, z, kind='linear', copy=True, bounds_error=False, + fill_value=nan) + + Interpolate over a 2-D grid. + + `x`, `y` and `z` are arrays of values used to approximate some function + f: ``z = f(x, y)``. This class returns a function whose call method uses + spline interpolation to find the value of new points. + + If `x` and `y` represent a regular grid, consider using + RectBivariateSpline. + + Note that calling `interp2d` with NaNs present in input values results in + undefined behaviour. + + Methods + ------- + __call__ + + Parameters + ---------- + x, y : array_like + Arrays defining the data point coordinates. + + If the points lie on a regular grid, `x` can specify the column + coordinates and `y` the row coordinates, for example:: + + >>> x = [0,1,2]; y = [0,3]; z = [[1,2,3], [4,5,6]] + + Otherwise, `x` and `y` must specify the full coordinates for each + point, for example:: + + >>> x = [0,1,2,0,1,2]; y = [0,0,0,3,3,3]; z = [1,2,3,4,5,6] + + If `x` and `y` are multi-dimensional, they are flattened before use. + z : array_like + The values of the function to interpolate at the data points. If + `z` is a multi-dimensional array, it is flattened before use. The + length of a flattened `z` array is either + len(`x`)*len(`y`) if `x` and `y` specify the column and row coordinates + or ``len(z) == len(x) == len(y)`` if `x` and `y` specify coordinates + for each point. + kind : {'linear', 'cubic', 'quintic'}, optional + The kind of spline interpolation to use. Default is 'linear'. + copy : bool, optional + If True, the class makes internal copies of x, y and z. + If False, references may be used. The default is to copy. + bounds_error : bool, optional + If True, when interpolated values are requested outside of the + domain of the input data (x,y), a ValueError is raised. + If False, then `fill_value` is used. + fill_value : number, optional + If provided, the value to use for points outside of the + interpolation domain. If omitted (None), values outside + the domain are extrapolated. + + See Also + -------- + RectBivariateSpline : + Much faster 2D interpolation if your input data is on a grid + bisplrep, bisplev : + Spline interpolation based on FITPACK + BivariateSpline : a more recent wrapper of the FITPACK routines + interp1d : one dimension version of this function + + Notes + ----- + The minimum number of data points required along the interpolation + axis is ``(k+1)**2``, with k=1 for linear, k=3 for cubic and k=5 for + quintic interpolation. + + The interpolator is constructed by `bisplrep`, with a smoothing factor + of 0. If more control over smoothing is needed, `bisplrep` should be + used directly. + + Examples + -------- + Construct a 2-D grid and interpolate on it: + + >>> from scipy import interpolate + >>> x = np.arange(-5.01, 5.01, 0.25) + >>> y = np.arange(-5.01, 5.01, 0.25) + >>> xx, yy = np.meshgrid(x, y) + >>> z = np.sin(xx**2+yy**2) + >>> f = interpolate.interp2d(x, y, z, kind='cubic') + + Now use the obtained interpolation function and plot the result: + + >>> import matplotlib.pyplot as plt + >>> xnew = np.arange(-5.01, 5.01, 1e-2) + >>> ynew = np.arange(-5.01, 5.01, 1e-2) + >>> znew = f(xnew, ynew) + >>> plt.plot(x, z[0, :], 'ro-', xnew, znew[0, :], 'b-') + >>> plt.show() + """ + + def __init__(self, x, y, z, kind='linear', copy=True, bounds_error=False, + fill_value=None): + x = ravel(x) + y = ravel(y) + z = asarray(z) + + rectangular_grid = (z.size == len(x) * len(y)) + if rectangular_grid: + if z.ndim == 2: + if z.shape != (len(y), len(x)): + raise ValueError("When on a regular grid with x.size = m " + "and y.size = n, if z.ndim == 2, then z " + "must have shape (n, m)") + if not np.all(x[1:] >= x[:-1]): + j = np.argsort(x) + x = x[j] + z = z[:, j] + if not np.all(y[1:] >= y[:-1]): + j = np.argsort(y) + y = y[j] + z = z[j, :] + z = ravel(z.T) + else: + z = ravel(z) + if len(x) != len(y): + raise ValueError( + "x and y must have equal lengths for non rectangular grid") + if len(z) != len(x): + raise ValueError( + "Invalid length for input z for non rectangular grid") + + try: + kx = ky = {'linear': 1, + 'cubic': 3, + 'quintic': 5}[kind] + except KeyError: + raise ValueError("Unsupported interpolation type.") + + if not rectangular_grid: + # TODO: surfit is really not meant for interpolation! + self.tck = fitpack.bisplrep(x, y, z, kx=kx, ky=ky, s=0.0) + else: + nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth( + x, y, z, None, None, None, None, + kx=kx, ky=ky, s=0.0) + self.tck = (tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)], + kx, ky) + + self.bounds_error = bounds_error + self.fill_value = fill_value + self.x, self.y, self.z = [array(a, copy=copy) for a in (x, y, z)] + + self.x_min, self.x_max = np.amin(x), np.amax(x) + self.y_min, self.y_max = np.amin(y), np.amax(y) + + def __call__(self, x, y, dx=0, dy=0, assume_sorted=False): + """Interpolate the function. + + Parameters + ---------- + x : 1D array + x-coordinates of the mesh on which to interpolate. + y : 1D array + y-coordinates of the mesh on which to interpolate. + dx : int >= 0, < kx + Order of partial derivatives in x. + dy : int >= 0, < ky + Order of partial derivatives in y. + assume_sorted : bool, optional + If False, values of `x` and `y` can be in any order and they are + sorted first. + If True, `x` and `y` have to be arrays of monotonically + increasing values. + + Returns + ------- + z : 2D array with shape (len(y), len(x)) + The interpolated values. + """ + + x = atleast_1d(x) + y = atleast_1d(y) + + if x.ndim != 1 or y.ndim != 1: + raise ValueError("x and y should both be 1-D arrays") + + if not assume_sorted: + x = np.sort(x) + y = np.sort(y) + + if self.bounds_error or self.fill_value is not None: + out_of_bounds_x = (x < self.x_min) | (x > self.x_max) + out_of_bounds_y = (y < self.y_min) | (y > self.y_max) + + any_out_of_bounds_x = np.any(out_of_bounds_x) + any_out_of_bounds_y = np.any(out_of_bounds_y) + + if self.bounds_error and (any_out_of_bounds_x or any_out_of_bounds_y): + raise ValueError("Values out of range; x must be in %r, y in %r" + % ((self.x_min, self.x_max), + (self.y_min, self.y_max))) + + z = fitpack.bisplev(x, y, self.tck, dx, dy) + z = atleast_2d(z) + z = transpose(z) + + if self.fill_value is not None: + if any_out_of_bounds_x: + z[:, out_of_bounds_x] = self.fill_value + if any_out_of_bounds_y: + z[out_of_bounds_y, :] = self.fill_value + + if len(z) == 1: + z = z[0] + return array(z) + + +def _check_broadcast_up_to(arr_from, shape_to, name): + """Helper to check that arr_from broadcasts up to shape_to""" + shape_from = arr_from.shape + if len(shape_to) >= len(shape_from): + for t, f in zip(shape_to[::-1], shape_from[::-1]): + if f != 1 and f != t: + break + else: # all checks pass, do the upcasting that we need later + if arr_from.size != 1 and arr_from.shape != shape_to: + arr_from = np.ones(shape_to, arr_from.dtype) * arr_from + return arr_from.ravel() + # at least one check failed + raise ValueError('%s argument must be able to broadcast up ' + 'to shape %s but had shape %s' + % (name, shape_to, shape_from)) + + +def _do_extrapolate(fill_value): + """Helper to check if fill_value == "extrapolate" without warnings""" + return (isinstance(fill_value, string_types) and + fill_value == 'extrapolate') + + +class interp1d(_Interpolator1D): + """ + Interpolate a 1-D function. + + `x` and `y` are arrays of values used to approximate some function f: + ``y = f(x)``. This class returns a function whose call method uses + interpolation to find the value of new points. + + Note that calling `interp1d` with NaNs present in input values results in + undefined behaviour. + + Parameters + ---------- + x : (N,) array_like + A 1-D array of real values. + y : (...,N,...) array_like + A N-D array of real values. The length of `y` along the interpolation + axis must be equal to the length of `x`. + kind : str or int, optional + Specifies the kind of interpolation as a string + ('linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', + 'previous', 'next', where 'zero', 'slinear', 'quadratic' and 'cubic' + refer to a spline interpolation of zeroth, first, second or third + order; 'previous' and 'next' simply return the previous or next value + of the point) or as an integer specifying the order of the spline + interpolator to use. + Default is 'linear'. + axis : int, optional + Specifies the axis of `y` along which to interpolate. + Interpolation defaults to the last axis of `y`. + copy : bool, optional + If True, the class makes internal copies of x and y. + If False, references to `x` and `y` are used. The default is to copy. + bounds_error : bool, optional + If True, a ValueError is raised any time interpolation is attempted on + a value outside of the range of x (where extrapolation is + necessary). If False, out of bounds values are assigned `fill_value`. + By default, an error is raised unless `fill_value="extrapolate"`. + fill_value : array-like or (array-like, array_like) or "extrapolate", optional + - if a ndarray (or float), this value will be used to fill in for + requested points outside of the data range. If not provided, then + the default is NaN. The array-like must broadcast properly to the + dimensions of the non-interpolation axes. + - If a two-element tuple, then the first element is used as a + fill value for ``x_new < x[0]`` and the second element is used for + ``x_new > x[-1]``. Anything that is not a 2-element tuple (e.g., + list or ndarray, regardless of shape) is taken to be a single + array-like argument meant to be used for both bounds as + ``below, above = fill_value, fill_value``. + + .. versionadded:: 0.17.0 + - If "extrapolate", then points outside the data range will be + extrapolated. + + .. versionadded:: 0.17.0 + assume_sorted : bool, optional + If False, values of `x` can be in any order and they are sorted first. + If True, `x` has to be an array of monotonically increasing values. + + Methods + ------- + __call__ + + See Also + -------- + splrep, splev + Spline interpolation/smoothing based on FITPACK. + UnivariateSpline : An object-oriented wrapper of the FITPACK routines. + interp2d : 2-D interpolation + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> from scipy import interpolate + >>> x = np.arange(0, 10) + >>> y = np.exp(-x/3.0) + >>> f = interpolate.interp1d(x, y) + + >>> xnew = np.arange(0, 9, 0.1) + >>> ynew = f(xnew) # use interpolation function returned by `interp1d` + >>> plt.plot(x, y, 'o', xnew, ynew, '-') + >>> plt.show() + """ + + def __init__(self, x, y, kind='linear', axis=-1, + copy=True, bounds_error=None, fill_value=np.nan, + assume_sorted=False): + """ Initialize a 1D linear interpolation class.""" + _Interpolator1D.__init__(self, x, y, axis=axis) + + self.bounds_error = bounds_error # used by fill_value setter + self.copy = copy + + if kind in ['zero', 'slinear', 'quadratic', 'cubic']: + order = {'zero': 0, 'slinear': 1, + 'quadratic': 2, 'cubic': 3}[kind] + kind = 'spline' + elif isinstance(kind, int): + order = kind + kind = 'spline' + elif kind not in ('linear', 'nearest', 'previous', 'next'): + raise NotImplementedError("%s is unsupported: Use fitpack " + "routines for other types." % kind) + x = array(x, copy=self.copy) + y = array(y, copy=self.copy) + + if not assume_sorted: + ind = np.argsort(x) + x = x[ind] + y = np.take(y, ind, axis=axis) + + if x.ndim != 1: + raise ValueError("the x array must have exactly one dimension.") + if y.ndim == 0: + raise ValueError("the y array must have at least one dimension.") + + # Force-cast y to a floating-point type, if it's not yet one + if not issubclass(y.dtype.type, np.inexact): + y = y.astype(np.float_) + + # Backward compatibility + self.axis = axis % y.ndim + + # Interpolation goes internally along the first axis + self.y = y + self._y = self._reshape_yi(self.y) + self.x = x + del y, x # clean up namespace to prevent misuse; use attributes + self._kind = kind + self.fill_value = fill_value # calls the setter, can modify bounds_err + + # Adjust to interpolation kind; store reference to *unbound* + # interpolation methods, in order to avoid circular references to self + # stored in the bound instance methods, and therefore delayed garbage + # collection. See: https://docs.python.org/reference/datamodel.html + if kind in ('linear', 'nearest', 'previous', 'next'): + # Make a "view" of the y array that is rotated to the interpolation + # axis. + minval = 2 + if kind == 'nearest': + # Do division before addition to prevent possible integer + # overflow + self.x_bds = self.x / 2.0 + self.x_bds = self.x_bds[1:] + self.x_bds[:-1] + + self._call = self.__class__._call_nearest + elif kind == 'previous': + # Side for np.searchsorted and index for clipping + self._side = 'left' + self._ind = 0 + # Move x by one floating point value to the left + self._x_shift = np.nextafter(self.x, -np.inf) + self._call = self.__class__._call_previousnext + elif kind == 'next': + self._side = 'right' + self._ind = 1 + # Move x by one floating point value to the right + self._x_shift = np.nextafter(self.x, np.inf) + self._call = self.__class__._call_previousnext + else: + # Check if we can delegate to numpy.interp (2x-10x faster). + cond = self.x.dtype == np.float_ and self.y.dtype == np.float_ + cond = cond and self.y.ndim == 1 + cond = cond and not _do_extrapolate(fill_value) + + if cond: + self._call = self.__class__._call_linear_np + else: + self._call = self.__class__._call_linear + else: + minval = order + 1 + + rewrite_nan = False + xx, yy = self.x, self._y + if order > 1: + # Quadratic or cubic spline. If input contains even a single + # nan, then the output is all nans. We cannot just feed data + # with nans to make_interp_spline because it calls LAPACK. + # So, we make up a bogus x and y with no nans and use it + # to get the correct shape of the output, which we then fill + # with nans. + # For slinear or zero order spline, we just pass nans through. + if np.isnan(self.x).any(): + xx = np.linspace(min(self.x), max(self.x), len(self.x)) + rewrite_nan = True + if np.isnan(self._y).any(): + yy = np.ones_like(self._y) + rewrite_nan = True + + self._spline = make_interp_spline(xx, yy, k=order, + check_finite=False) + if rewrite_nan: + self._call = self.__class__._call_nan_spline + else: + self._call = self.__class__._call_spline + + if len(self.x) < minval: + raise ValueError("x and y arrays must have at " + "least %d entries" % minval) + + @property + def fill_value(self): + # backwards compat: mimic a public attribute + return self._fill_value_orig + + @fill_value.setter + def fill_value(self, fill_value): + # extrapolation only works for nearest neighbor and linear methods + if _do_extrapolate(fill_value): + if self.bounds_error: + raise ValueError("Cannot extrapolate and raise " + "at the same time.") + self.bounds_error = False + self._extrapolate = True + else: + broadcast_shape = (self.y.shape[:self.axis] + + self.y.shape[self.axis + 1:]) + if len(broadcast_shape) == 0: + broadcast_shape = (1,) + # it's either a pair (_below_range, _above_range) or a single value + # for both above and below range + if isinstance(fill_value, tuple) and len(fill_value) == 2: + below_above = [np.asarray(fill_value[0]), + np.asarray(fill_value[1])] + names = ('fill_value (below)', 'fill_value (above)') + for ii in range(2): + below_above[ii] = _check_broadcast_up_to( + below_above[ii], broadcast_shape, names[ii]) + else: + fill_value = np.asarray(fill_value) + below_above = [_check_broadcast_up_to( + fill_value, broadcast_shape, 'fill_value')] * 2 + self._fill_value_below, self._fill_value_above = below_above + self._extrapolate = False + if self.bounds_error is None: + self.bounds_error = True + # backwards compat: fill_value was a public attr; make it writeable + self._fill_value_orig = fill_value + + def _call_linear_np(self, x_new): + # Note that out-of-bounds values are taken care of in self._evaluate + return np.interp(x_new, self.x, self.y) + + def _call_linear(self, x_new): + # 2. Find where in the original data, the values to interpolate + # would be inserted. + # Note: If x_new[n] == x[m], then m is returned by searchsorted. + x_new_indices = searchsorted(self.x, x_new) + + # 3. Clip x_new_indices so that they are within the range of + # self.x indices and at least 1. Removes mis-interpolation + # of x_new[n] = x[0] + x_new_indices = x_new_indices.clip(1, len(self.x)-1).astype(int) + + # 4. Calculate the slope of regions that each x_new value falls in. + lo = x_new_indices - 1 + hi = x_new_indices + + x_lo = self.x[lo] + x_hi = self.x[hi] + y_lo = self._y[lo] + y_hi = self._y[hi] + + # Note that the following two expressions rely on the specifics of the + # broadcasting semantics. + slope = (y_hi - y_lo) / (x_hi - x_lo)[:, None] + + # 5. Calculate the actual value for each entry in x_new. + y_new = slope*(x_new - x_lo)[:, None] + y_lo + + return y_new + + def _call_nearest(self, x_new): + """ Find nearest neighbour interpolated y_new = f(x_new).""" + + # 2. Find where in the averaged data the values to interpolate + # would be inserted. + # Note: use side='left' (right) to searchsorted() to define the + # halfway point to be nearest to the left (right) neighbour + x_new_indices = searchsorted(self.x_bds, x_new, side='left') + + # 3. Clip x_new_indices so that they are within the range of x indices. + x_new_indices = x_new_indices.clip(0, len(self.x)-1).astype(intp) + + # 4. Calculate the actual value for each entry in x_new. + y_new = self._y[x_new_indices] + + return y_new + + def _call_previousnext(self, x_new): + """Use previous/next neighbour of x_new, y_new = f(x_new).""" + + # 1. Get index of left/right value + x_new_indices = searchsorted(self._x_shift, x_new, side=self._side) + + # 2. Clip x_new_indices so that they are within the range of x indices. + x_new_indices = x_new_indices.clip(1-self._ind, + len(self.x)-self._ind).astype(intp) + + # 3. Calculate the actual value for each entry in x_new. + y_new = self._y[x_new_indices+self._ind-1] + + return y_new + + def _call_spline(self, x_new): + return self._spline(x_new) + + def _call_nan_spline(self, x_new): + out = self._spline(x_new) + out[...] = np.nan + return out + + def _evaluate(self, x_new): + # 1. Handle values in x_new that are outside of x. Throw error, + # or return a list of mask array indicating the outofbounds values. + # The behavior is set by the bounds_error variable. + x_new = asarray(x_new) + y_new = self._call(self, x_new) + if not self._extrapolate: + below_bounds, above_bounds = self._check_bounds(x_new) + if len(y_new) > 0: + # Note fill_value must be broadcast up to the proper size + # and flattened to work here + y_new[below_bounds] = self._fill_value_below + y_new[above_bounds] = self._fill_value_above + return y_new + + def _check_bounds(self, x_new): + """Check the inputs for being in the bounds of the interpolated data. + + Parameters + ---------- + x_new : array + + Returns + ------- + out_of_bounds : bool array + The mask on x_new of values that are out of the bounds. + """ + + # If self.bounds_error is True, we raise an error if any x_new values + # fall outside the range of x. Otherwise, we return an array indicating + # which values are outside the boundary region. + below_bounds = x_new < self.x[0] + above_bounds = x_new > self.x[-1] + + # !! Could provide more information about which values are out of bounds + if self.bounds_error and below_bounds.any(): + raise ValueError("A value in x_new is below the interpolation " + "range.") + if self.bounds_error and above_bounds.any(): + raise ValueError("A value in x_new is above the interpolation " + "range.") + + # !! Should we emit a warning if some values are out of bounds? + # !! matlab does not. + return below_bounds, above_bounds + + +class _PPolyBase(object): + """Base class for piecewise polynomials.""" + __slots__ = ('c', 'x', 'extrapolate', 'axis') + + def __init__(self, c, x, extrapolate=None, axis=0): + self.c = np.asarray(c) + self.x = np.ascontiguousarray(x, dtype=np.float64) + + if extrapolate is None: + extrapolate = True + elif extrapolate != 'periodic': + extrapolate = bool(extrapolate) + self.extrapolate = extrapolate + + if self.c.ndim < 2: + raise ValueError("Coefficients array must be at least " + "2-dimensional.") + + if not (0 <= axis < self.c.ndim - 1): + raise ValueError("axis=%s must be between 0 and %s" % + (axis, self.c.ndim-1)) + + self.axis = axis + if axis != 0: + # roll the interpolation axis to be the first one in self.c + # More specifically, the target shape for self.c is (k, m, ...), + # and axis !=0 means that we have c.shape (..., k, m, ...) + # ^ + # axis + # So we roll two of them. + self.c = np.rollaxis(self.c, axis+1) + self.c = np.rollaxis(self.c, axis+1) + + if self.x.ndim != 1: + raise ValueError("x must be 1-dimensional") + if self.x.size < 2: + raise ValueError("at least 2 breakpoints are needed") + if self.c.ndim < 2: + raise ValueError("c must have at least 2 dimensions") + if self.c.shape[0] == 0: + raise ValueError("polynomial must be at least of order 0") + if self.c.shape[1] != self.x.size-1: + raise ValueError("number of coefficients != len(x)-1") + dx = np.diff(self.x) + if not (np.all(dx >= 0) or np.all(dx <= 0)): + raise ValueError("`x` must be strictly increasing or decreasing.") + + dtype = self._get_dtype(self.c.dtype) + self.c = np.ascontiguousarray(self.c, dtype=dtype) + + def _get_dtype(self, dtype): + if np.issubdtype(dtype, np.complexfloating) \ + or np.issubdtype(self.c.dtype, np.complexfloating): + return np.complex_ + else: + return np.float_ + + @classmethod + def construct_fast(cls, c, x, extrapolate=None, axis=0): + """ + Construct the piecewise polynomial without making checks. + + Takes the same parameters as the constructor. Input arguments + `c` and `x` must be arrays of the correct shape and type. The + `c` array can only be of dtypes float and complex, and `x` + array must have dtype float. + """ + self = object.__new__(cls) + self.c = c + self.x = x + self.axis = axis + if extrapolate is None: + extrapolate = True + self.extrapolate = extrapolate + return self + + def _ensure_c_contiguous(self): + """ + c and x may be modified by the user. The Cython code expects + that they are C contiguous. + """ + if not self.x.flags.c_contiguous: + self.x = self.x.copy() + if not self.c.flags.c_contiguous: + self.c = self.c.copy() + + def extend(self, c, x, right=None): + """ + Add additional breakpoints and coefficients to the polynomial. + + Parameters + ---------- + c : ndarray, size (k, m, ...) + Additional coefficients for polynomials in intervals. Note that + the first additional interval will be formed using one of the + `self.x` end points. + x : ndarray, size (m,) + Additional breakpoints. Must be sorted in the same order as + `self.x` and either to the right or to the left of the current + breakpoints. + right + Deprecated argument. Has no effect. + + .. deprecated:: 0.19 + """ + if right is not None: + warnings.warn("`right` is deprecated and will be removed.") + + c = np.asarray(c) + x = np.asarray(x) + + if c.ndim < 2: + raise ValueError("invalid dimensions for c") + if x.ndim != 1: + raise ValueError("invalid dimensions for x") + if x.shape[0] != c.shape[1]: + raise ValueError("x and c have incompatible sizes") + if c.shape[2:] != self.c.shape[2:] or c.ndim != self.c.ndim: + raise ValueError("c and self.c have incompatible shapes") + + if c.size == 0: + return + + dx = np.diff(x) + if not (np.all(dx >= 0) or np.all(dx <= 0)): + raise ValueError("`x` is not sorted.") + + if self.x[-1] >= self.x[0]: + if not x[-1] >= x[0]: + raise ValueError("`x` is in the different order " + "than `self.x`.") + + if x[0] >= self.x[-1]: + action = 'append' + elif x[-1] <= self.x[0]: + action = 'prepend' + else: + raise ValueError("`x` is neither on the left or on the right " + "from `self.x`.") + else: + if not x[-1] <= x[0]: + raise ValueError("`x` is in the different order " + "than `self.x`.") + + if x[0] <= self.x[-1]: + action = 'append' + elif x[-1] >= self.x[0]: + action = 'prepend' + else: + raise ValueError("`x` is neither on the left or on the right " + "from `self.x`.") + + dtype = self._get_dtype(c.dtype) + + k2 = max(c.shape[0], self.c.shape[0]) + c2 = np.zeros((k2, self.c.shape[1] + c.shape[1]) + self.c.shape[2:], + dtype=dtype) + + if action == 'append': + c2[k2-self.c.shape[0]:, :self.c.shape[1]] = self.c + c2[k2-c.shape[0]:, self.c.shape[1]:] = c + self.x = np.r_[self.x, x] + elif action == 'prepend': + c2[k2-self.c.shape[0]:, :c.shape[1]] = c + c2[k2-c.shape[0]:, c.shape[1]:] = self.c + self.x = np.r_[x, self.x] + + self.c = c2 + + def __call__(self, x, nu=0, extrapolate=None): + """ + Evaluate the piecewise polynomial or its derivative. + + Parameters + ---------- + x : array_like + Points to evaluate the interpolant at. + nu : int, optional + Order of derivative to evaluate. Must be non-negative. + extrapolate : {bool, 'periodic', None}, optional + If bool, determines whether to extrapolate to out-of-bounds points + based on first and last intervals, or to return NaNs. + If 'periodic', periodic extrapolation is used. + If None (default), use `self.extrapolate`. + + Returns + ------- + y : array_like + Interpolated values. Shape is determined by replacing + the interpolation axis in the original array with the shape of x. + + Notes + ----- + Derivatives are evaluated piecewise for each polynomial + segment, even if the polynomial is not differentiable at the + breakpoints. The polynomial intervals are considered half-open, + ``[a, b)``, except for the last interval which is closed + ``[a, b]``. + """ + if extrapolate is None: + extrapolate = self.extrapolate + x = np.asarray(x) + x_shape, x_ndim = x.shape, x.ndim + x = np.ascontiguousarray(x.ravel(), dtype=np.float_) + + # With periodic extrapolation we map x to the segment + # [self.x[0], self.x[-1]]. + if extrapolate == 'periodic': + x = self.x[0] + (x - self.x[0]) % (self.x[-1] - self.x[0]) + extrapolate = False + + out = np.empty((len(x), prod(self.c.shape[2:])), dtype=self.c.dtype) + self._ensure_c_contiguous() + self._evaluate(x, nu, extrapolate, out) + out = out.reshape(x_shape + self.c.shape[2:]) + if self.axis != 0: + # transpose to move the calculated values to the interpolation axis + l = list(range(out.ndim)) + l = l[x_ndim:x_ndim+self.axis] + l[:x_ndim] + l[x_ndim+self.axis:] + out = out.transpose(l) + return out + + +class PPoly(_PPolyBase): + """ + Piecewise polynomial in terms of coefficients and breakpoints + + The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the + local power basis:: + + S = sum(c[m, i] * (xp - x[i])**(k-m) for m in range(k+1)) + + where ``k`` is the degree of the polynomial. + + Parameters + ---------- + c : ndarray, shape (k, m, ...) + Polynomial coefficients, order `k` and `m` intervals + x : ndarray, shape (m+1,) + Polynomial breakpoints. Must be sorted in either increasing or + decreasing order. + extrapolate : bool or 'periodic', optional + If bool, determines whether to extrapolate to out-of-bounds points + based on first and last intervals, or to return NaNs. If 'periodic', + periodic extrapolation is used. Default is True. + axis : int, optional + Interpolation axis. Default is zero. + + Attributes + ---------- + x : ndarray + Breakpoints. + c : ndarray + Coefficients of the polynomials. They are reshaped + to a 3-dimensional array with the last dimension representing + the trailing dimensions of the original coefficient array. + axis : int + Interpolation axis. + + Methods + ------- + __call__ + derivative + antiderivative + integrate + solve + roots + extend + from_spline + from_bernstein_basis + construct_fast + + See also + -------- + BPoly : piecewise polynomials in the Bernstein basis + + Notes + ----- + High-order polynomials in the power basis can be numerically + unstable. Precision problems can start to appear for orders + larger than 20-30. + """ + def _evaluate(self, x, nu, extrapolate, out): + _ppoly.evaluate(self.c.reshape(self.c.shape[0], self.c.shape[1], -1), + self.x, x, nu, bool(extrapolate), out) + + def derivative(self, nu=1): + """ + Construct a new piecewise polynomial representing the derivative. + + Parameters + ---------- + nu : int, optional + Order of derivative to evaluate. Default is 1, i.e. compute the + first derivative. If negative, the antiderivative is returned. + + Returns + ------- + pp : PPoly + Piecewise polynomial of order k2 = k - n representing the derivative + of this polynomial. + + Notes + ----- + Derivatives are evaluated piecewise for each polynomial + segment, even if the polynomial is not differentiable at the + breakpoints. The polynomial intervals are considered half-open, + ``[a, b)``, except for the last interval which is closed + ``[a, b]``. + """ + if nu < 0: + return self.antiderivative(-nu) + + # reduce order + if nu == 0: + c2 = self.c.copy() + else: + c2 = self.c[:-nu, :].copy() + + if c2.shape[0] == 0: + # derivative of order 0 is zero + c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype) + + # multiply by the correct rising factorials + factor = spec.poch(np.arange(c2.shape[0], 0, -1), nu) + c2 *= factor[(slice(None),) + (None,)*(c2.ndim-1)] + + # construct a compatible polynomial + return self.construct_fast(c2, self.x, self.extrapolate, self.axis) + + def antiderivative(self, nu=1): + """ + Construct a new piecewise polynomial representing the antiderivative. + + Antiderivative is also the indefinite integral of the function, + and derivative is its inverse operation. + + Parameters + ---------- + nu : int, optional + Order of antiderivative to evaluate. Default is 1, i.e. compute + the first integral. If negative, the derivative is returned. + + Returns + ------- + pp : PPoly + Piecewise polynomial of order k2 = k + n representing + the antiderivative of this polynomial. + + Notes + ----- + The antiderivative returned by this function is continuous and + continuously differentiable to order n-1, up to floating point + rounding error. + + If antiderivative is computed and ``self.extrapolate='periodic'``, + it will be set to False for the returned instance. This is done because + the antiderivative is no longer periodic and its correct evaluation + outside of the initially given x interval is difficult. + """ + if nu <= 0: + return self.derivative(-nu) + + c = np.zeros((self.c.shape[0] + nu, self.c.shape[1]) + self.c.shape[2:], + dtype=self.c.dtype) + c[:-nu] = self.c + + # divide by the correct rising factorials + factor = spec.poch(np.arange(self.c.shape[0], 0, -1), nu) + c[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)] + + # fix continuity of added degrees of freedom + self._ensure_c_contiguous() + _ppoly.fix_continuity(c.reshape(c.shape[0], c.shape[1], -1), + self.x, nu - 1) + + if self.extrapolate == 'periodic': + extrapolate = False + else: + extrapolate = self.extrapolate + + # construct a compatible polynomial + return self.construct_fast(c, self.x, extrapolate, self.axis) + + def integrate(self, a, b, extrapolate=None): + """ + Compute a definite integral over a piecewise polynomial. + + Parameters + ---------- + a : float + Lower integration bound + b : float + Upper integration bound + extrapolate : {bool, 'periodic', None}, optional + If bool, determines whether to extrapolate to out-of-bounds points + based on first and last intervals, or to return NaNs. + If 'periodic', periodic extrapolation is used. + If None (default), use `self.extrapolate`. + + Returns + ------- + ig : array_like + Definite integral of the piecewise polynomial over [a, b] + """ + if extrapolate is None: + extrapolate = self.extrapolate + + # Swap integration bounds if needed + sign = 1 + if b < a: + a, b = b, a + sign = -1 + + range_int = np.empty((prod(self.c.shape[2:]),), dtype=self.c.dtype) + self._ensure_c_contiguous() + + # Compute the integral. + if extrapolate == 'periodic': + # Split the integral into the part over period (can be several + # of them) and the remaining part. + + xs, xe = self.x[0], self.x[-1] + period = xe - xs + interval = b - a + n_periods, left = divmod(interval, period) + + if n_periods > 0: + _ppoly.integrate( + self.c.reshape(self.c.shape[0], self.c.shape[1], -1), + self.x, xs, xe, False, out=range_int) + range_int *= n_periods + else: + range_int.fill(0) + + # Map a to [xs, xe], b is always a + left. + a = xs + (a - xs) % period + b = a + left + + # If b <= xe then we need to integrate over [a, b], otherwise + # over [a, xe] and from xs to what is remained. + remainder_int = np.empty_like(range_int) + if b <= xe: + _ppoly.integrate( + self.c.reshape(self.c.shape[0], self.c.shape[1], -1), + self.x, a, b, False, out=remainder_int) + range_int += remainder_int + else: + _ppoly.integrate( + self.c.reshape(self.c.shape[0], self.c.shape[1], -1), + self.x, a, xe, False, out=remainder_int) + range_int += remainder_int + + _ppoly.integrate( + self.c.reshape(self.c.shape[0], self.c.shape[1], -1), + self.x, xs, xs + left + a - xe, False, out=remainder_int) + range_int += remainder_int + else: + _ppoly.integrate( + self.c.reshape(self.c.shape[0], self.c.shape[1], -1), + self.x, a, b, bool(extrapolate), out=range_int) + + # Return + range_int *= sign + return range_int.reshape(self.c.shape[2:]) + + def solve(self, y=0., discontinuity=True, extrapolate=None): + """ + Find real solutions of the the equation ``pp(x) == y``. + + Parameters + ---------- + y : float, optional + Right-hand side. Default is zero. + discontinuity : bool, optional + Whether to report sign changes across discontinuities at + breakpoints as roots. + extrapolate : {bool, 'periodic', None}, optional + If bool, determines whether to return roots from the polynomial + extrapolated based on first and last intervals, 'periodic' works + the same as False. If None (default), use `self.extrapolate`. + + Returns + ------- + roots : ndarray + Roots of the polynomial(s). + + If the PPoly object describes multiple polynomials, the + return value is an object array whose each element is an + ndarray containing the roots. + + Notes + ----- + This routine works only on real-valued polynomials. + + If the piecewise polynomial contains sections that are + identically zero, the root list will contain the start point + of the corresponding interval, followed by a ``nan`` value. + + If the polynomial is discontinuous across a breakpoint, and + there is a sign change across the breakpoint, this is reported + if the `discont` parameter is True. + + Examples + -------- + + Finding roots of ``[x**2 - 1, (x - 1)**2]`` defined on intervals + ``[-2, 1], [1, 2]``: + + >>> from scipy.interpolate import PPoly + >>> pp = PPoly(np.array([[1, -4, 3], [1, 0, 0]]).T, [-2, 1, 2]) + >>> pp.roots() + array([-1., 1.]) + """ + if extrapolate is None: + extrapolate = self.extrapolate + + self._ensure_c_contiguous() + + if np.issubdtype(self.c.dtype, np.complexfloating): + raise ValueError("Root finding is only for " + "real-valued polynomials") + + y = float(y) + r = _ppoly.real_roots(self.c.reshape(self.c.shape[0], self.c.shape[1], -1), + self.x, y, bool(discontinuity), + bool(extrapolate)) + if self.c.ndim == 2: + return r[0] + else: + r2 = np.empty(prod(self.c.shape[2:]), dtype=object) + # this for-loop is equivalent to ``r2[...] = r``, but that's broken + # in numpy 1.6.0 + for ii, root in enumerate(r): + r2[ii] = root + + return r2.reshape(self.c.shape[2:]) + + def roots(self, discontinuity=True, extrapolate=None): + """ + Find real roots of the the piecewise polynomial. + + Parameters + ---------- + discontinuity : bool, optional + Whether to report sign changes across discontinuities at + breakpoints as roots. + extrapolate : {bool, 'periodic', None}, optional + If bool, determines whether to return roots from the polynomial + extrapolated based on first and last intervals, 'periodic' works + the same as False. If None (default), use `self.extrapolate`. + + Returns + ------- + roots : ndarray + Roots of the polynomial(s). + + If the PPoly object describes multiple polynomials, the + return value is an object array whose each element is an + ndarray containing the roots. + + See Also + -------- + PPoly.solve + """ + return self.solve(0, discontinuity, extrapolate) + + @classmethod + def from_spline(cls, tck, extrapolate=None): + """ + Construct a piecewise polynomial from a spline + + Parameters + ---------- + tck + A spline, as returned by `splrep` or a BSpline object. + extrapolate : bool or 'periodic', optional + If bool, determines whether to extrapolate to out-of-bounds points + based on first and last intervals, or to return NaNs. + If 'periodic', periodic extrapolation is used. Default is True. + """ + if isinstance(tck, BSpline): + t, c, k = tck.tck + if extrapolate is None: + extrapolate = tck.extrapolate + else: + t, c, k = tck + + cvals = np.empty((k + 1, len(t)-1), dtype=c.dtype) + for m in xrange(k, -1, -1): + y = fitpack.splev(t[:-1], tck, der=m) + cvals[k - m, :] = y/spec.gamma(m+1) + + return cls.construct_fast(cvals, t, extrapolate) + + @classmethod + def from_bernstein_basis(cls, bp, extrapolate=None): + """ + Construct a piecewise polynomial in the power basis + from a polynomial in Bernstein basis. + + Parameters + ---------- + bp : BPoly + A Bernstein basis polynomial, as created by BPoly + extrapolate : bool or 'periodic', optional + If bool, determines whether to extrapolate to out-of-bounds points + based on first and last intervals, or to return NaNs. + If 'periodic', periodic extrapolation is used. Default is True. + """ + dx = np.diff(bp.x) + k = bp.c.shape[0] - 1 # polynomial order + + rest = (None,)*(bp.c.ndim-2) + + c = np.zeros_like(bp.c) + for a in range(k+1): + factor = (-1)**a * comb(k, a) * bp.c[a] + for s in range(a, k+1): + val = comb(k-a, s-a) * (-1)**s + c[k-s] += factor * val / dx[(slice(None),)+rest]**s + + if extrapolate is None: + extrapolate = bp.extrapolate + + return cls.construct_fast(c, bp.x, extrapolate, bp.axis) + + +class BPoly(_PPolyBase): + """Piecewise polynomial in terms of coefficients and breakpoints. + + The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the + Bernstein polynomial basis:: + + S = sum(c[a, i] * b(a, k; x) for a in range(k+1)), + + where ``k`` is the degree of the polynomial, and:: + + b(a, k; x) = binom(k, a) * t**a * (1 - t)**(k - a), + + with ``t = (x - x[i]) / (x[i+1] - x[i])`` and ``binom`` is the binomial + coefficient. + + Parameters + ---------- + c : ndarray, shape (k, m, ...) + Polynomial coefficients, order `k` and `m` intervals + x : ndarray, shape (m+1,) + Polynomial breakpoints. Must be sorted in either increasing or + decreasing order. + extrapolate : bool, optional + If bool, determines whether to extrapolate to out-of-bounds points + based on first and last intervals, or to return NaNs. If 'periodic', + periodic extrapolation is used. Default is True. + axis : int, optional + Interpolation axis. Default is zero. + + Attributes + ---------- + x : ndarray + Breakpoints. + c : ndarray + Coefficients of the polynomials. They are reshaped + to a 3-dimensional array with the last dimension representing + the trailing dimensions of the original coefficient array. + axis : int + Interpolation axis. + + Methods + ------- + __call__ + extend + derivative + antiderivative + integrate + construct_fast + from_power_basis + from_derivatives + + See also + -------- + PPoly : piecewise polynomials in the power basis + + Notes + ----- + Properties of Bernstein polynomials are well documented in the literature. + Here's a non-exhaustive list: + + .. [1] https://en.wikipedia.org/wiki/Bernstein_polynomial + + .. [2] Kenneth I. Joy, Bernstein polynomials, + http://www.idav.ucdavis.edu/education/CAGDNotes/Bernstein-Polynomials.pdf + + .. [3] E. H. Doha, A. H. Bhrawy, and M. A. Saker, Boundary Value Problems, + vol 2011, article ID 829546, :doi:`10.1155/2011/829543`. + + Examples + -------- + >>> from scipy.interpolate import BPoly + >>> x = [0, 1] + >>> c = [[1], [2], [3]] + >>> bp = BPoly(c, x) + + This creates a 2nd order polynomial + + .. math:: + + B(x) = 1 \\times b_{0, 2}(x) + 2 \\times b_{1, 2}(x) + 3 \\times b_{2, 2}(x) \\\\ + = 1 \\times (1-x)^2 + 2 \\times 2 x (1 - x) + 3 \\times x^2 + + """ + + def _evaluate(self, x, nu, extrapolate, out): + _ppoly.evaluate_bernstein( + self.c.reshape(self.c.shape[0], self.c.shape[1], -1), + self.x, x, nu, bool(extrapolate), out) + + def derivative(self, nu=1): + """ + Construct a new piecewise polynomial representing the derivative. + + Parameters + ---------- + nu : int, optional + Order of derivative to evaluate. Default is 1, i.e. compute the + first derivative. If negative, the antiderivative is returned. + + Returns + ------- + bp : BPoly + Piecewise polynomial of order k - nu representing the derivative of + this polynomial. + + """ + if nu < 0: + return self.antiderivative(-nu) + + if nu > 1: + bp = self + for k in range(nu): + bp = bp.derivative() + return bp + + # reduce order + if nu == 0: + c2 = self.c.copy() + else: + # For a polynomial + # B(x) = \sum_{a=0}^{k} c_a b_{a, k}(x), + # we use the fact that + # b'_{a, k} = k ( b_{a-1, k-1} - b_{a, k-1} ), + # which leads to + # B'(x) = \sum_{a=0}^{k-1} (c_{a+1} - c_a) b_{a, k-1} + # + # finally, for an interval [y, y + dy] with dy != 1, + # we need to correct for an extra power of dy + + rest = (None,)*(self.c.ndim-2) + + k = self.c.shape[0] - 1 + dx = np.diff(self.x)[(None, slice(None))+rest] + c2 = k * np.diff(self.c, axis=0) / dx + + if c2.shape[0] == 0: + # derivative of order 0 is zero + c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype) + + # construct a compatible polynomial + return self.construct_fast(c2, self.x, self.extrapolate, self.axis) + + def antiderivative(self, nu=1): + """ + Construct a new piecewise polynomial representing the antiderivative. + + Parameters + ---------- + nu : int, optional + Order of antiderivative to evaluate. Default is 1, i.e. compute + the first integral. If negative, the derivative is returned. + + Returns + ------- + bp : BPoly + Piecewise polynomial of order k + nu representing the + antiderivative of this polynomial. + + Notes + ----- + If antiderivative is computed and ``self.extrapolate='periodic'``, + it will be set to False for the returned instance. This is done because + the antiderivative is no longer periodic and its correct evaluation + outside of the initially given x interval is difficult. + """ + if nu <= 0: + return self.derivative(-nu) + + if nu > 1: + bp = self + for k in range(nu): + bp = bp.antiderivative() + return bp + + # Construct the indefinite integrals on individual intervals + c, x = self.c, self.x + k = c.shape[0] + c2 = np.zeros((k+1,) + c.shape[1:], dtype=c.dtype) + + c2[1:, ...] = np.cumsum(c, axis=0) / k + delta = x[1:] - x[:-1] + c2 *= delta[(None, slice(None)) + (None,)*(c.ndim-2)] + + # Now fix continuity: on the very first interval, take the integration + # constant to be zero; on an interval [x_j, x_{j+1}) with j>0, + # the integration constant is then equal to the jump of the `bp` at x_j. + # The latter is given by the coefficient of B_{n+1, n+1} + # *on the previous interval* (other B. polynomials are zero at the + # breakpoint). Finally, use the fact that BPs form a partition of unity. + c2[:,1:] += np.cumsum(c2[k, :], axis=0)[:-1] + + if self.extrapolate == 'periodic': + extrapolate = False + else: + extrapolate = self.extrapolate + + return self.construct_fast(c2, x, extrapolate, axis=self.axis) + + def integrate(self, a, b, extrapolate=None): + """ + Compute a definite integral over a piecewise polynomial. + + Parameters + ---------- + a : float + Lower integration bound + b : float + Upper integration bound + extrapolate : {bool, 'periodic', None}, optional + Whether to extrapolate to out-of-bounds points based on first + and last intervals, or to return NaNs. If 'periodic', periodic + extrapolation is used. If None (default), use `self.extrapolate`. + + Returns + ------- + array_like + Definite integral of the piecewise polynomial over [a, b] + + """ + # XXX: can probably use instead the fact that + # \int_0^{1} B_{j, n}(x) \dx = 1/(n+1) + ib = self.antiderivative() + if extrapolate is None: + extrapolate = self.extrapolate + + # ib.extrapolate shouldn't be 'periodic', it is converted to + # False for 'periodic. in antiderivative() call. + if extrapolate != 'periodic': + ib.extrapolate = extrapolate + + if extrapolate == 'periodic': + # Split the integral into the part over period (can be several + # of them) and the remaining part. + + # For simplicity and clarity convert to a <= b case. + if a <= b: + sign = 1 + else: + a, b = b, a + sign = -1 + + xs, xe = self.x[0], self.x[-1] + period = xe - xs + interval = b - a + n_periods, left = divmod(interval, period) + res = n_periods * (ib(xe) - ib(xs)) + + # Map a and b to [xs, xe]. + a = xs + (a - xs) % period + b = a + left + + # If b <= xe then we need to integrate over [a, b], otherwise + # over [a, xe] and from xs to what is remained. + if b <= xe: + res += ib(b) - ib(a) + else: + res += ib(xe) - ib(a) + ib(xs + left + a - xe) - ib(xs) + + return sign * res + else: + return ib(b) - ib(a) + + def extend(self, c, x, right=None): + k = max(self.c.shape[0], c.shape[0]) + self.c = self._raise_degree(self.c, k - self.c.shape[0]) + c = self._raise_degree(c, k - c.shape[0]) + return _PPolyBase.extend(self, c, x, right) + extend.__doc__ = _PPolyBase.extend.__doc__ + + @classmethod + def from_power_basis(cls, pp, extrapolate=None): + """ + Construct a piecewise polynomial in Bernstein basis + from a power basis polynomial. + + Parameters + ---------- + pp : PPoly + A piecewise polynomial in the power basis + extrapolate : bool or 'periodic', optional + If bool, determines whether to extrapolate to out-of-bounds points + based on first and last intervals, or to return NaNs. + If 'periodic', periodic extrapolation is used. Default is True. + """ + dx = np.diff(pp.x) + k = pp.c.shape[0] - 1 # polynomial order + + rest = (None,)*(pp.c.ndim-2) + + c = np.zeros_like(pp.c) + for a in range(k+1): + factor = pp.c[a] / comb(k, k-a) * dx[(slice(None),)+rest]**(k-a) + for j in range(k-a, k+1): + c[j] += factor * comb(j, k-a) + + if extrapolate is None: + extrapolate = pp.extrapolate + + return cls.construct_fast(c, pp.x, extrapolate, pp.axis) + + @classmethod + def from_derivatives(cls, xi, yi, orders=None, extrapolate=None): + """Construct a piecewise polynomial in the Bernstein basis, + compatible with the specified values and derivatives at breakpoints. + + Parameters + ---------- + xi : array_like + sorted 1D array of x-coordinates + yi : array_like or list of array_likes + ``yi[i][j]`` is the ``j``-th derivative known at ``xi[i]`` + orders : None or int or array_like of ints. Default: None. + Specifies the degree of local polynomials. If not None, some + derivatives are ignored. + extrapolate : bool or 'periodic', optional + If bool, determines whether to extrapolate to out-of-bounds points + based on first and last intervals, or to return NaNs. + If 'periodic', periodic extrapolation is used. Default is True. + + Notes + ----- + If ``k`` derivatives are specified at a breakpoint ``x``, the + constructed polynomial is exactly ``k`` times continuously + differentiable at ``x``, unless the ``order`` is provided explicitly. + In the latter case, the smoothness of the polynomial at + the breakpoint is controlled by the ``order``. + + Deduces the number of derivatives to match at each end + from ``order`` and the number of derivatives available. If + possible it uses the same number of derivatives from + each end; if the number is odd it tries to take the + extra one from y2. In any case if not enough derivatives + are available at one end or another it draws enough to + make up the total from the other end. + + If the order is too high and not enough derivatives are available, + an exception is raised. + + Examples + -------- + + >>> from scipy.interpolate import BPoly + >>> BPoly.from_derivatives([0, 1], [[1, 2], [3, 4]]) + + Creates a polynomial `f(x)` of degree 3, defined on `[0, 1]` + such that `f(0) = 1, df/dx(0) = 2, f(1) = 3, df/dx(1) = 4` + + >>> BPoly.from_derivatives([0, 1, 2], [[0, 1], [0], [2]]) + + Creates a piecewise polynomial `f(x)`, such that + `f(0) = f(1) = 0`, `f(2) = 2`, and `df/dx(0) = 1`. + Based on the number of derivatives provided, the order of the + local polynomials is 2 on `[0, 1]` and 1 on `[1, 2]`. + Notice that no restriction is imposed on the derivatives at + `x = 1` and `x = 2`. + + Indeed, the explicit form of the polynomial is:: + + f(x) = | x * (1 - x), 0 <= x < 1 + | 2 * (x - 1), 1 <= x <= 2 + + So that f'(1-0) = -1 and f'(1+0) = 2 + + """ + xi = np.asarray(xi) + if len(xi) != len(yi): + raise ValueError("xi and yi need to have the same length") + if np.any(xi[1:] - xi[:1] <= 0): + raise ValueError("x coordinates are not in increasing order") + + # number of intervals + m = len(xi) - 1 + + # global poly order is k-1, local orders are <=k and can vary + try: + k = max(len(yi[i]) + len(yi[i+1]) for i in range(m)) + except TypeError: + raise ValueError("Using a 1D array for y? Please .reshape(-1, 1).") + + if orders is None: + orders = [None] * m + else: + if isinstance(orders, (integer_types, np.integer)): + orders = [orders] * m + k = max(k, max(orders)) + + if any(o <= 0 for o in orders): + raise ValueError("Orders must be positive.") + + c = [] + for i in range(m): + y1, y2 = yi[i], yi[i+1] + if orders[i] is None: + n1, n2 = len(y1), len(y2) + else: + n = orders[i]+1 + n1 = min(n//2, len(y1)) + n2 = min(n - n1, len(y2)) + n1 = min(n - n2, len(y2)) + if n1+n2 != n: + mesg = ("Point %g has %d derivatives, point %g" + " has %d derivatives, but order %d requested" % ( + xi[i], len(y1), xi[i+1], len(y2), orders[i])) + raise ValueError(mesg) + + if not (n1 <= len(y1) and n2 <= len(y2)): + raise ValueError("`order` input incompatible with" + " length y1 or y2.") + + b = BPoly._construct_from_derivatives(xi[i], xi[i+1], + y1[:n1], y2[:n2]) + if len(b) < k: + b = BPoly._raise_degree(b, k - len(b)) + c.append(b) + + c = np.asarray(c) + return cls(c.swapaxes(0, 1), xi, extrapolate) + + @staticmethod + def _construct_from_derivatives(xa, xb, ya, yb): + r"""Compute the coefficients of a polynomial in the Bernstein basis + given the values and derivatives at the edges. + + Return the coefficients of a polynomial in the Bernstein basis + defined on `[xa, xb]` and having the values and derivatives at the + endpoints ``xa`` and ``xb`` as specified by ``ya`` and ``yb``. + The polynomial constructed is of the minimal possible degree, i.e., + if the lengths of ``ya`` and ``yb`` are ``na`` and ``nb``, the degree + of the polynomial is ``na + nb - 1``. + + Parameters + ---------- + xa : float + Left-hand end point of the interval + xb : float + Right-hand end point of the interval + ya : array_like + Derivatives at ``xa``. ``ya[0]`` is the value of the function, and + ``ya[i]`` for ``i > 0`` is the value of the ``i``-th derivative. + yb : array_like + Derivatives at ``xb``. + + Returns + ------- + array + coefficient array of a polynomial having specified derivatives + + Notes + ----- + This uses several facts from life of Bernstein basis functions. + First of all, + + .. math:: b'_{a, n} = n (b_{a-1, n-1} - b_{a, n-1}) + + If B(x) is a linear combination of the form + + .. math:: B(x) = \sum_{a=0}^{n} c_a b_{a, n}, + + then :math: B'(x) = n \sum_{a=0}^{n-1} (c_{a+1} - c_{a}) b_{a, n-1}. + Iterating the latter one, one finds for the q-th derivative + + .. math:: B^{q}(x) = n!/(n-q)! \sum_{a=0}^{n-q} Q_a b_{a, n-q}, + + with + + .. math:: Q_a = \sum_{j=0}^{q} (-)^{j+q} comb(q, j) c_{j+a} + + This way, only `a=0` contributes to :math: `B^{q}(x = xa)`, and + `c_q` are found one by one by iterating `q = 0, ..., na`. + + At `x = xb` it's the same with `a = n - q`. + + """ + ya, yb = np.asarray(ya), np.asarray(yb) + if ya.shape[1:] != yb.shape[1:]: + raise ValueError('ya and yb have incompatible dimensions.') + + dta, dtb = ya.dtype, yb.dtype + if (np.issubdtype(dta, np.complexfloating) or + np.issubdtype(dtb, np.complexfloating)): + dt = np.complex_ + else: + dt = np.float_ + + na, nb = len(ya), len(yb) + n = na + nb + + c = np.empty((na+nb,) + ya.shape[1:], dtype=dt) + + # compute coefficients of a polynomial degree na+nb-1 + # walk left-to-right + for q in range(0, na): + c[q] = ya[q] / spec.poch(n - q, q) * (xb - xa)**q + for j in range(0, q): + c[q] -= (-1)**(j+q) * comb(q, j) * c[j] + + # now walk right-to-left + for q in range(0, nb): + c[-q-1] = yb[q] / spec.poch(n - q, q) * (-1)**q * (xb - xa)**q + for j in range(0, q): + c[-q-1] -= (-1)**(j+1) * comb(q, j+1) * c[-q+j] + + return c + + @staticmethod + def _raise_degree(c, d): + r"""Raise a degree of a polynomial in the Bernstein basis. + + Given the coefficients of a polynomial degree `k`, return (the + coefficients of) the equivalent polynomial of degree `k+d`. + + Parameters + ---------- + c : array_like + coefficient array, 1D + d : integer + + Returns + ------- + array + coefficient array, 1D array of length `c.shape[0] + d` + + Notes + ----- + This uses the fact that a Bernstein polynomial `b_{a, k}` can be + identically represented as a linear combination of polynomials of + a higher degree `k+d`: + + .. math:: b_{a, k} = comb(k, a) \sum_{j=0}^{d} b_{a+j, k+d} \ + comb(d, j) / comb(k+d, a+j) + + """ + if d == 0: + return c + + k = c.shape[0] - 1 + out = np.zeros((c.shape[0] + d,) + c.shape[1:], dtype=c.dtype) + + for a in range(c.shape[0]): + f = c[a] * comb(k, a) + for j in range(d+1): + out[a+j] += f * comb(d, j) / comb(k+d, a+j) + return out + + +class NdPPoly(object): + """ + Piecewise tensor product polynomial + + The value at point `xp = (x', y', z', ...)` is evaluated by first + computing the interval indices `i` such that:: + + x[0][i[0]] <= x' < x[0][i[0]+1] + x[1][i[1]] <= y' < x[1][i[1]+1] + ... + + and then computing:: + + S = sum(c[k0-m0-1,...,kn-mn-1,i[0],...,i[n]] + * (xp[0] - x[0][i[0]])**m0 + * ... + * (xp[n] - x[n][i[n]])**mn + for m0 in range(k[0]+1) + ... + for mn in range(k[n]+1)) + + where ``k[j]`` is the degree of the polynomial in dimension j. This + representation is the piecewise multivariate power basis. + + Parameters + ---------- + c : ndarray, shape (k0, ..., kn, m0, ..., mn, ...) + Polynomial coefficients, with polynomial order `kj` and + `mj+1` intervals for each dimension `j`. + x : ndim-tuple of ndarrays, shapes (mj+1,) + Polynomial breakpoints for each dimension. These must be + sorted in increasing order. + extrapolate : bool, optional + Whether to extrapolate to out-of-bounds points based on first + and last intervals, or to return NaNs. Default: True. + + Attributes + ---------- + x : tuple of ndarrays + Breakpoints. + c : ndarray + Coefficients of the polynomials. + + Methods + ------- + __call__ + construct_fast + + See also + -------- + PPoly : piecewise polynomials in 1D + + Notes + ----- + High-order polynomials in the power basis can be numerically + unstable. + + """ + + def __init__(self, c, x, extrapolate=None): + self.x = tuple(np.ascontiguousarray(v, dtype=np.float64) for v in x) + self.c = np.asarray(c) + if extrapolate is None: + extrapolate = True + self.extrapolate = bool(extrapolate) + + ndim = len(self.x) + if any(v.ndim != 1 for v in self.x): + raise ValueError("x arrays must all be 1-dimensional") + if any(v.size < 2 for v in self.x): + raise ValueError("x arrays must all contain at least 2 points") + if c.ndim < 2*ndim: + raise ValueError("c must have at least 2*len(x) dimensions") + if any(np.any(v[1:] - v[:-1] < 0) for v in self.x): + raise ValueError("x-coordinates are not in increasing order") + if any(a != b.size - 1 for a, b in zip(c.shape[ndim:2*ndim], self.x)): + raise ValueError("x and c do not agree on the number of intervals") + + dtype = self._get_dtype(self.c.dtype) + self.c = np.ascontiguousarray(self.c, dtype=dtype) + + @classmethod + def construct_fast(cls, c, x, extrapolate=None): + """ + Construct the piecewise polynomial without making checks. + + Takes the same parameters as the constructor. Input arguments + `c` and `x` must be arrays of the correct shape and type. The + `c` array can only be of dtypes float and complex, and `x` + array must have dtype float. + + """ + self = object.__new__(cls) + self.c = c + self.x = x + if extrapolate is None: + extrapolate = True + self.extrapolate = extrapolate + return self + + def _get_dtype(self, dtype): + if np.issubdtype(dtype, np.complexfloating) \ + or np.issubdtype(self.c.dtype, np.complexfloating): + return np.complex_ + else: + return np.float_ + + def _ensure_c_contiguous(self): + if not self.c.flags.c_contiguous: + self.c = self.c.copy() + if not isinstance(self.x, tuple): + self.x = tuple(self.x) + + def __call__(self, x, nu=None, extrapolate=None): + """ + Evaluate the piecewise polynomial or its derivative + + Parameters + ---------- + x : array-like + Points to evaluate the interpolant at. + nu : tuple, optional + Orders of derivatives to evaluate. Each must be non-negative. + extrapolate : bool, optional + Whether to extrapolate to out-of-bounds points based on first + and last intervals, or to return NaNs. + + Returns + ------- + y : array-like + Interpolated values. Shape is determined by replacing + the interpolation axis in the original array with the shape of x. + + Notes + ----- + Derivatives are evaluated piecewise for each polynomial + segment, even if the polynomial is not differentiable at the + breakpoints. The polynomial intervals are considered half-open, + ``[a, b)``, except for the last interval which is closed + ``[a, b]``. + + """ + if extrapolate is None: + extrapolate = self.extrapolate + else: + extrapolate = bool(extrapolate) + + ndim = len(self.x) + + x = _ndim_coords_from_arrays(x) + x_shape = x.shape + x = np.ascontiguousarray(x.reshape(-1, x.shape[-1]), dtype=np.float_) + + if nu is None: + nu = np.zeros((ndim,), dtype=np.intc) + else: + nu = np.asarray(nu, dtype=np.intc) + if nu.ndim != 1 or nu.shape[0] != ndim: + raise ValueError("invalid number of derivative orders nu") + + dim1 = prod(self.c.shape[:ndim]) + dim2 = prod(self.c.shape[ndim:2*ndim]) + dim3 = prod(self.c.shape[2*ndim:]) + ks = np.array(self.c.shape[:ndim], dtype=np.intc) + + out = np.empty((x.shape[0], dim3), dtype=self.c.dtype) + self._ensure_c_contiguous() + + _ppoly.evaluate_nd(self.c.reshape(dim1, dim2, dim3), + self.x, + ks, + x, + nu, + bool(extrapolate), + out) + + return out.reshape(x_shape[:-1] + self.c.shape[2*ndim:]) + + def _derivative_inplace(self, nu, axis): + """ + Compute 1D derivative along a selected dimension in-place + May result to non-contiguous c array. + """ + if nu < 0: + return self._antiderivative_inplace(-nu, axis) + + ndim = len(self.x) + axis = axis % ndim + + # reduce order + if nu == 0: + # noop + return + else: + sl = [slice(None)]*ndim + sl[axis] = slice(None, -nu, None) + c2 = self.c[tuple(sl)] + + if c2.shape[axis] == 0: + # derivative of order 0 is zero + shp = list(c2.shape) + shp[axis] = 1 + c2 = np.zeros(shp, dtype=c2.dtype) + + # multiply by the correct rising factorials + factor = spec.poch(np.arange(c2.shape[axis], 0, -1), nu) + sl = [None]*c2.ndim + sl[axis] = slice(None) + c2 *= factor[tuple(sl)] + + self.c = c2 + + def _antiderivative_inplace(self, nu, axis): + """ + Compute 1D antiderivative along a selected dimension + May result to non-contiguous c array. + """ + if nu <= 0: + return self._derivative_inplace(-nu, axis) + + ndim = len(self.x) + axis = axis % ndim + + perm = list(range(ndim)) + perm[0], perm[axis] = perm[axis], perm[0] + perm = perm + list(range(ndim, self.c.ndim)) + + c = self.c.transpose(perm) + + c2 = np.zeros((c.shape[0] + nu,) + c.shape[1:], + dtype=c.dtype) + c2[:-nu] = c + + # divide by the correct rising factorials + factor = spec.poch(np.arange(c.shape[0], 0, -1), nu) + c2[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)] + + # fix continuity of added degrees of freedom + perm2 = list(range(c2.ndim)) + perm2[1], perm2[ndim+axis] = perm2[ndim+axis], perm2[1] + + c2 = c2.transpose(perm2) + c2 = c2.copy() + _ppoly.fix_continuity(c2.reshape(c2.shape[0], c2.shape[1], -1), + self.x[axis], nu-1) + + c2 = c2.transpose(perm2) + c2 = c2.transpose(perm) + + # Done + self.c = c2 + + def derivative(self, nu): + """ + Construct a new piecewise polynomial representing the derivative. + + Parameters + ---------- + nu : ndim-tuple of int + Order of derivatives to evaluate for each dimension. + If negative, the antiderivative is returned. + + Returns + ------- + pp : NdPPoly + Piecewise polynomial of orders (k[0] - nu[0], ..., k[n] - nu[n]) + representing the derivative of this polynomial. + + Notes + ----- + Derivatives are evaluated piecewise for each polynomial + segment, even if the polynomial is not differentiable at the + breakpoints. The polynomial intervals in each dimension are + considered half-open, ``[a, b)``, except for the last interval + which is closed ``[a, b]``. + + """ + p = self.construct_fast(self.c.copy(), self.x, self.extrapolate) + + for axis, n in enumerate(nu): + p._derivative_inplace(n, axis) + + p._ensure_c_contiguous() + return p + + def antiderivative(self, nu): + """ + Construct a new piecewise polynomial representing the antiderivative. + + Antiderivative is also the indefinite integral of the function, + and derivative is its inverse operation. + + Parameters + ---------- + nu : ndim-tuple of int + Order of derivatives to evaluate for each dimension. + If negative, the derivative is returned. + + Returns + ------- + pp : PPoly + Piecewise polynomial of order k2 = k + n representing + the antiderivative of this polynomial. + + Notes + ----- + The antiderivative returned by this function is continuous and + continuously differentiable to order n-1, up to floating point + rounding error. + + """ + p = self.construct_fast(self.c.copy(), self.x, self.extrapolate) + + for axis, n in enumerate(nu): + p._antiderivative_inplace(n, axis) + + p._ensure_c_contiguous() + return p + + def integrate_1d(self, a, b, axis, extrapolate=None): + r""" + Compute NdPPoly representation for one dimensional definite integral + + The result is a piecewise polynomial representing the integral: + + .. math:: + + p(y, z, ...) = \int_a^b dx\, p(x, y, z, ...) + + where the dimension integrated over is specified with the + `axis` parameter. + + Parameters + ---------- + a, b : float + Lower and upper bound for integration. + axis : int + Dimension over which to compute the 1D integrals + extrapolate : bool, optional + Whether to extrapolate to out-of-bounds points based on first + and last intervals, or to return NaNs. + + Returns + ------- + ig : NdPPoly or array-like + Definite integral of the piecewise polynomial over [a, b]. + If the polynomial was 1-dimensional, an array is returned, + otherwise, an NdPPoly object. + + """ + if extrapolate is None: + extrapolate = self.extrapolate + else: + extrapolate = bool(extrapolate) + + ndim = len(self.x) + axis = int(axis) % ndim + + # reuse 1D integration routines + c = self.c + swap = list(range(c.ndim)) + swap.insert(0, swap[axis]) + del swap[axis + 1] + swap.insert(1, swap[ndim + axis]) + del swap[ndim + axis + 1] + + c = c.transpose(swap) + p = PPoly.construct_fast(c.reshape(c.shape[0], c.shape[1], -1), + self.x[axis], + extrapolate=extrapolate) + out = p.integrate(a, b, extrapolate=extrapolate) + + # Construct result + if ndim == 1: + return out.reshape(c.shape[2:]) + else: + c = out.reshape(c.shape[2:]) + x = self.x[:axis] + self.x[axis+1:] + return self.construct_fast(c, x, extrapolate=extrapolate) + + def integrate(self, ranges, extrapolate=None): + """ + Compute a definite integral over a piecewise polynomial. + + Parameters + ---------- + ranges : ndim-tuple of 2-tuples float + Sequence of lower and upper bounds for each dimension, + ``[(a[0], b[0]), ..., (a[ndim-1], b[ndim-1])]`` + extrapolate : bool, optional + Whether to extrapolate to out-of-bounds points based on first + and last intervals, or to return NaNs. + + Returns + ------- + ig : array_like + Definite integral of the piecewise polynomial over + [a[0], b[0]] x ... x [a[ndim-1], b[ndim-1]] + + """ + + ndim = len(self.x) + + if extrapolate is None: + extrapolate = self.extrapolate + else: + extrapolate = bool(extrapolate) + + if not hasattr(ranges, '__len__') or len(ranges) != ndim: + raise ValueError("Range not a sequence of correct length") + + self._ensure_c_contiguous() + + # Reuse 1D integration routine + c = self.c + for n, (a, b) in enumerate(ranges): + swap = list(range(c.ndim)) + swap.insert(1, swap[ndim - n]) + del swap[ndim - n + 1] + + c = c.transpose(swap) + + p = PPoly.construct_fast(c, self.x[n], extrapolate=extrapolate) + out = p.integrate(a, b, extrapolate=extrapolate) + c = out.reshape(c.shape[2:]) + + return c + + +class RegularGridInterpolator(object): + """ + Interpolation on a regular grid in arbitrary dimensions + + The data must be defined on a regular grid; the grid spacing however may be + uneven. Linear and nearest-neighbour interpolation are supported. After + setting up the interpolator object, the interpolation method (*linear* or + *nearest*) may be chosen at each evaluation. + + Parameters + ---------- + points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, ) + The points defining the regular grid in n dimensions. + + values : array_like, shape (m1, ..., mn, ...) + The data on the regular grid in n dimensions. + + method : str, optional + The method of interpolation to perform. Supported are "linear" and + "nearest". This parameter will become the default for the object's + ``__call__`` method. Default is "linear". + + bounds_error : bool, optional + If True, when interpolated values are requested outside of the + domain of the input data, a ValueError is raised. + If False, then `fill_value` is used. + + fill_value : number, optional + If provided, the value to use for points outside of the + interpolation domain. If None, values outside + the domain are extrapolated. + + Methods + ------- + __call__ + + Notes + ----- + Contrary to LinearNDInterpolator and NearestNDInterpolator, this class + avoids expensive triangulation of the input data by taking advantage of the + regular grid structure. + + If any of `points` have a dimension of size 1, linear interpolation will + return an array of `nan` values. Nearest-neighbor interpolation will work + as usual in this case. + + .. versionadded:: 0.14 + + Examples + -------- + Evaluate a simple example function on the points of a 3D grid: + + >>> from scipy.interpolate import RegularGridInterpolator + >>> def f(x, y, z): + ... return 2 * x**3 + 3 * y**2 - z + >>> x = np.linspace(1, 4, 11) + >>> y = np.linspace(4, 7, 22) + >>> z = np.linspace(7, 9, 33) + >>> data = f(*np.meshgrid(x, y, z, indexing='ij', sparse=True)) + + ``data`` is now a 3D array with ``data[i,j,k] = f(x[i], y[j], z[k])``. + Next, define an interpolating function from this data: + + >>> my_interpolating_function = RegularGridInterpolator((x, y, z), data) + + Evaluate the interpolating function at the two points + ``(x,y,z) = (2.1, 6.2, 8.3)`` and ``(3.3, 5.2, 7.1)``: + + >>> pts = np.array([[2.1, 6.2, 8.3], [3.3, 5.2, 7.1]]) + >>> my_interpolating_function(pts) + array([ 125.80469388, 146.30069388]) + + which is indeed a close approximation to + ``[f(2.1, 6.2, 8.3), f(3.3, 5.2, 7.1)]``. + + See also + -------- + NearestNDInterpolator : Nearest neighbour interpolation on unstructured + data in N dimensions + + LinearNDInterpolator : Piecewise linear interpolant on unstructured data + in N dimensions + + References + ---------- + .. [1] Python package *regulargrid* by Johannes Buchner, see + https://pypi.python.org/pypi/regulargrid/ + .. [2] Wikipedia, "Trilinear interpolation", + https://en.wikipedia.org/wiki/Trilinear_interpolation + .. [3] Weiser, Alan, and Sergio E. Zarantonello. "A note on piecewise linear + and multilinear table interpolation in many dimensions." MATH. + COMPUT. 50.181 (1988): 189-196. + https://www.ams.org/journals/mcom/1988-50-181/S0025-5718-1988-0917826-0/S0025-5718-1988-0917826-0.pdf + + """ + # this class is based on code originally programmed by Johannes Buchner, + # see https://github.com/JohannesBuchner/regulargrid + + def __init__(self, points, values, method="linear", bounds_error=True, + fill_value=np.nan): + if method not in ["linear", "nearest"]: + raise ValueError("Method '%s' is not defined" % method) + self.method = method + self.bounds_error = bounds_error + + if not hasattr(values, 'ndim'): + # allow reasonable duck-typed values + values = np.asarray(values) + + if len(points) > values.ndim: + raise ValueError("There are %d point arrays, but values has %d " + "dimensions" % (len(points), values.ndim)) + + if hasattr(values, 'dtype') and hasattr(values, 'astype'): + if not np.issubdtype(values.dtype, np.inexact): + values = values.astype(float) + + self.fill_value = fill_value + if fill_value is not None: + fill_value_dtype = np.asarray(fill_value).dtype + if (hasattr(values, 'dtype') and not + np.can_cast(fill_value_dtype, values.dtype, + casting='same_kind')): + raise ValueError("fill_value must be either 'None' or " + "of a type compatible with values") + + for i, p in enumerate(points): + if not np.all(np.diff(p) > 0.): + raise ValueError("The points in dimension %d must be strictly " + "ascending" % i) + if not np.asarray(p).ndim == 1: + raise ValueError("The points in dimension %d must be " + "1-dimensional" % i) + if not values.shape[i] == len(p): + raise ValueError("There are %d points and %d values in " + "dimension %d" % (len(p), values.shape[i], i)) + self.grid = tuple([np.asarray(p) for p in points]) + self.values = values + + def __call__(self, xi, method=None): + """ + Interpolation at coordinates + + Parameters + ---------- + xi : ndarray of shape (..., ndim) + The coordinates to sample the gridded data at + + method : str + The method of interpolation to perform. Supported are "linear" and + "nearest". + + """ + method = self.method if method is None else method + if method not in ["linear", "nearest"]: + raise ValueError("Method '%s' is not defined" % method) + + ndim = len(self.grid) + xi = _ndim_coords_from_arrays(xi, ndim=ndim) + if xi.shape[-1] != len(self.grid): + raise ValueError("The requested sample points xi have dimension " + "%d, but this RegularGridInterpolator has " + "dimension %d" % (xi.shape[1], ndim)) + + xi_shape = xi.shape + xi = xi.reshape(-1, xi_shape[-1]) + + if self.bounds_error: + for i, p in enumerate(xi.T): + if not np.logical_and(np.all(self.grid[i][0] <= p), + np.all(p <= self.grid[i][-1])): + raise ValueError("One of the requested xi is out of bounds " + "in dimension %d" % i) + + indices, norm_distances, out_of_bounds = self._find_indices(xi.T) + if method == "linear": + result = self._evaluate_linear(indices, + norm_distances, + out_of_bounds) + elif method == "nearest": + result = self._evaluate_nearest(indices, + norm_distances, + out_of_bounds) + if not self.bounds_error and self.fill_value is not None: + result[out_of_bounds] = self.fill_value + + return result.reshape(xi_shape[:-1] + self.values.shape[ndim:]) + + def _evaluate_linear(self, indices, norm_distances, out_of_bounds): + # slice for broadcasting over trailing dimensions in self.values + vslice = (slice(None),) + (None,)*(self.values.ndim - len(indices)) + + # find relevant values + # each i and i+1 represents a edge + edges = itertools.product(*[[i, i + 1] for i in indices]) + values = 0. + for edge_indices in edges: + weight = 1. + for ei, i, yi in zip(edge_indices, indices, norm_distances): + weight *= np.where(ei == i, 1 - yi, yi) + values += np.asarray(self.values[edge_indices]) * weight[vslice] + return values + + def _evaluate_nearest(self, indices, norm_distances, out_of_bounds): + idx_res = [] + for i, yi in zip(indices, norm_distances): + idx_res.append(np.where(yi <= .5, i, i + 1)) + return self.values[tuple(idx_res)] + + def _find_indices(self, xi): + # find relevant edges between which xi are situated + indices = [] + # compute distance to lower edge in unity units + norm_distances = [] + # check for out of bounds xi + out_of_bounds = np.zeros((xi.shape[1]), dtype=bool) + # iterate through dimensions + for x, grid in zip(xi, self.grid): + i = np.searchsorted(grid, x) - 1 + i[i < 0] = 0 + i[i > grid.size - 2] = grid.size - 2 + indices.append(i) + norm_distances.append((x - grid[i]) / + (grid[i + 1] - grid[i])) + if not self.bounds_error: + out_of_bounds += x < grid[0] + out_of_bounds += x > grid[-1] + return indices, norm_distances, out_of_bounds + + +def interpn(points, values, xi, method="linear", bounds_error=True, + fill_value=np.nan): + """ + Multidimensional interpolation on regular grids. + + Parameters + ---------- + points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, ) + The points defining the regular grid in n dimensions. + + values : array_like, shape (m1, ..., mn, ...) + The data on the regular grid in n dimensions. + + xi : ndarray of shape (..., ndim) + The coordinates to sample the gridded data at + + method : str, optional + The method of interpolation to perform. Supported are "linear" and + "nearest", and "splinef2d". "splinef2d" is only supported for + 2-dimensional data. + + bounds_error : bool, optional + If True, when interpolated values are requested outside of the + domain of the input data, a ValueError is raised. + If False, then `fill_value` is used. + + fill_value : number, optional + If provided, the value to use for points outside of the + interpolation domain. If None, values outside + the domain are extrapolated. Extrapolation is not supported by method + "splinef2d". + + Returns + ------- + values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:] + Interpolated values at input coordinates. + + Notes + ----- + + .. versionadded:: 0.14 + + See also + -------- + NearestNDInterpolator : Nearest neighbour interpolation on unstructured + data in N dimensions + + LinearNDInterpolator : Piecewise linear interpolant on unstructured data + in N dimensions + + RegularGridInterpolator : Linear and nearest-neighbor Interpolation on a + regular grid in arbitrary dimensions + + RectBivariateSpline : Bivariate spline approximation over a rectangular mesh + + """ + # sanity check 'method' kwarg + if method not in ["linear", "nearest", "splinef2d"]: + raise ValueError("interpn only understands the methods 'linear', " + "'nearest', and 'splinef2d'. You provided %s." % + method) + + if not hasattr(values, 'ndim'): + values = np.asarray(values) + + ndim = values.ndim + if ndim > 2 and method == "splinef2d": + raise ValueError("The method spline2fd can only be used for " + "2-dimensional input data") + if not bounds_error and fill_value is None and method == "splinef2d": + raise ValueError("The method spline2fd does not support extrapolation.") + + # sanity check consistency of input dimensions + if len(points) > ndim: + raise ValueError("There are %d point arrays, but values has %d " + "dimensions" % (len(points), ndim)) + if len(points) != ndim and method == 'splinef2d': + raise ValueError("The method spline2fd can only be used for " + "scalar data with one point per coordinate") + + # sanity check input grid + for i, p in enumerate(points): + if not np.all(np.diff(p) > 0.): + raise ValueError("The points in dimension %d must be strictly " + "ascending" % i) + if not np.asarray(p).ndim == 1: + raise ValueError("The points in dimension %d must be " + "1-dimensional" % i) + if not values.shape[i] == len(p): + raise ValueError("There are %d points and %d values in " + "dimension %d" % (len(p), values.shape[i], i)) + grid = tuple([np.asarray(p) for p in points]) + + # sanity check requested xi + xi = _ndim_coords_from_arrays(xi, ndim=len(grid)) + if xi.shape[-1] != len(grid): + raise ValueError("The requested sample points xi have dimension " + "%d, but this RegularGridInterpolator has " + "dimension %d" % (xi.shape[1], len(grid))) + + for i, p in enumerate(xi.T): + if bounds_error and not np.logical_and(np.all(grid[i][0] <= p), + np.all(p <= grid[i][-1])): + raise ValueError("One of the requested xi is out of bounds " + "in dimension %d" % i) + + # perform interpolation + if method == "linear": + interp = RegularGridInterpolator(points, values, method="linear", + bounds_error=bounds_error, + fill_value=fill_value) + return interp(xi) + elif method == "nearest": + interp = RegularGridInterpolator(points, values, method="nearest", + bounds_error=bounds_error, + fill_value=fill_value) + return interp(xi) + elif method == "splinef2d": + xi_shape = xi.shape + xi = xi.reshape(-1, xi.shape[-1]) + + # RectBivariateSpline doesn't support fill_value; we need to wrap here + idx_valid = np.all((grid[0][0] <= xi[:, 0], xi[:, 0] <= grid[0][-1], + grid[1][0] <= xi[:, 1], xi[:, 1] <= grid[1][-1]), + axis=0) + result = np.empty_like(xi[:, 0]) + + # make a copy of values for RectBivariateSpline + interp = RectBivariateSpline(points[0], points[1], values[:]) + result[idx_valid] = interp.ev(xi[idx_valid, 0], xi[idx_valid, 1]) + result[np.logical_not(idx_valid)] = fill_value + + return result.reshape(xi_shape[:-1]) + + +# backward compatibility wrapper +class _ppform(PPoly): + """ + Deprecated piecewise polynomial class. + + New code should use the `PPoly` class instead. + + """ + + def __init__(self, coeffs, breaks, fill=0.0, sort=False): + warnings.warn("_ppform is deprecated -- use PPoly instead", + category=DeprecationWarning) + + if sort: + breaks = np.sort(breaks) + else: + breaks = np.asarray(breaks) + + PPoly.__init__(self, coeffs, breaks) + + self.coeffs = self.c + self.breaks = self.x + self.K = self.coeffs.shape[0] + self.fill = fill + self.a = self.breaks[0] + self.b = self.breaks[-1] + + def __call__(self, x): + return PPoly.__call__(self, x, 0, False) + + def _evaluate(self, x, nu, extrapolate, out): + PPoly._evaluate(self, x, nu, extrapolate, out) + out[~((x >= self.a) & (x <= self.b))] = self.fill + return out + + @classmethod + def fromspline(cls, xk, cvals, order, fill=0.0): + # Note: this spline representation is incompatible with FITPACK + N = len(xk)-1 + sivals = np.empty((order+1, N), dtype=float) + for m in xrange(order, -1, -1): + fact = spec.gamma(m+1) + res = _fitpack._bspleval(xk[:-1], xk, cvals, order, m) + res /= fact + sivals[order-m, :] = res + return cls(sivals, xk, fill=fill) + + +# The 3 private functions below can be called by splmake(). + + +def _dot0(a, b): + """Similar to numpy.dot, but sum over last axis of a and 1st axis of b""" + if b.ndim <= 2: + return dot(a, b) + else: + axes = list(range(b.ndim)) + axes.insert(-1, 0) + axes.pop(0) + return dot(a, b.transpose(axes)) + + +def _find_smoothest(xk, yk, order, conds=None, B=None): + # construct Bmatrix, and Jmatrix + # e = J*c + # minimize norm(e,2) given B*c=yk + # if desired B can be given + # conds is ignored + N = len(xk)-1 + K = order + if B is None: + B = _fitpack._bsplmat(order, xk) + J = _fitpack._bspldismat(order, xk) + u, s, vh = scipy.linalg.svd(B) + ind = K-1 + V2 = vh[-ind:,:].T + V1 = vh[:-ind,:].T + A = dot(J.T,J) + tmp = dot(V2.T,A) + Q = dot(tmp,V2) + p = scipy.linalg.solve(Q, tmp) + tmp = dot(V2,p) + tmp = np.eye(N+K) - tmp + tmp = dot(tmp,V1) + tmp = dot(tmp,np.diag(1.0/s)) + tmp = dot(tmp,u.T) + return _dot0(tmp, yk) + + +# conds is a tuple of an array and a vector +# giving the left-hand and the right-hand side +# of the additional equations to add to B + + +def _find_user(xk, yk, order, conds, B): + lh = conds[0] + rh = conds[1] + B = np.concatenate((B, lh), axis=0) + w = np.concatenate((yk, rh), axis=0) + M, N = B.shape + if (M > N): + raise ValueError("over-specification of conditions") + elif (M < N): + return _find_smoothest(xk, yk, order, None, B) + else: + return scipy.linalg.solve(B, w) + + +# Remove the 3 private functions above as well when removing splmake +@np.deprecate(message="splmake is deprecated in scipy 0.19.0, " + "use make_interp_spline instead.") +def splmake(xk, yk, order=3, kind='smoothest', conds=None): + """ + Return a representation of a spline given data-points at internal knots + + Parameters + ---------- + xk : array_like + The input array of x values of rank 1 + yk : array_like + The input array of y values of rank N. `yk` can be an N-d array to + represent more than one curve, through the same `xk` points. The first + dimension is assumed to be the interpolating dimension and is the same + length of `xk`. + order : int, optional + Order of the spline + kind : str, optional + Can be 'smoothest', 'not_a_knot', 'fixed', 'clamped', 'natural', + 'periodic', 'symmetric', 'user', 'mixed' and it is ignored if order < 2 + conds : optional + Conds + + Returns + ------- + splmake : tuple + Return a (`xk`, `cvals`, `k`) representation of a spline given + data-points where the (internal) knots are at the data-points. + + """ + yk = np.asanyarray(yk) + + order = int(order) + if order < 0: + raise ValueError("order must not be negative") + if order == 0: + return xk, yk[:-1], order + elif order == 1: + return xk, yk, order + + try: + func = eval('_find_%s' % kind) + except Exception: + raise NotImplementedError + + # the constraint matrix + B = _fitpack._bsplmat(order, xk) + coefs = func(xk, yk, order, conds, B) + return xk, coefs, order + + +@np.deprecate(message="spleval is deprecated in scipy 0.19.0, " + "use BSpline instead.") +def spleval(xck, xnew, deriv=0): + """ + Evaluate a fixed spline represented by the given tuple at the new x-values + + The `xj` values are the interior knot points. The approximation + region is `xj[0]` to `xj[-1]`. If N+1 is the length of `xj`, then `cvals` + should have length N+k where `k` is the order of the spline. + + Parameters + ---------- + (xj, cvals, k) : tuple + Parameters that define the fixed spline + xj : array_like + Interior knot points + cvals : array_like + Curvature + k : int + Order of the spline + xnew : array_like + Locations to calculate spline + deriv : int + Deriv + + Returns + ------- + spleval : ndarray + If `cvals` represents more than one curve (`cvals.ndim` > 1) and/or + `xnew` is N-d, then the result is `xnew.shape` + `cvals.shape[1:]` + providing the interpolation of multiple curves. + + Notes + ----- + Internally, an additional `k`-1 knot points are added on either side of + the spline. + + """ + (xj, cvals, k) = xck + oldshape = np.shape(xnew) + xx = np.ravel(xnew) + sh = cvals.shape[1:] + res = np.empty(xx.shape + sh, dtype=cvals.dtype) + for index in np.ndindex(*sh): + sl = (slice(None),) + index + if issubclass(cvals.dtype.type, np.complexfloating): + res[sl].real = _fitpack._bspleval(xx,xj, cvals.real[sl], k, deriv) + res[sl].imag = _fitpack._bspleval(xx,xj, cvals.imag[sl], k, deriv) + else: + res[sl] = _fitpack._bspleval(xx, xj, cvals[sl], k, deriv) + res.shape = oldshape + sh + return res + + +# When `spltopp` gets removed, also remove the _ppform class. +@np.deprecate(message="spltopp is deprecated in scipy 0.19.0, " + "use PPoly.from_spline instead.") +def spltopp(xk, cvals, k): + """Return a piece-wise polynomial object from a fixed-spline tuple.""" + return _ppform.fromspline(xk, cvals, k) + + +@np.deprecate(message="spline is deprecated in scipy 0.19.0, " + "use Bspline class instead.") +def spline(xk, yk, xnew, order=3, kind='smoothest', conds=None): + """ + Interpolate a curve at new points using a spline fit + + Parameters + ---------- + xk, yk : array_like + The x and y values that define the curve. + xnew : array_like + The x values where spline should estimate the y values. + order : int + Default is 3. + kind : string + One of {'smoothest'} + conds : Don't know + Don't know + + Returns + ------- + spline : ndarray + An array of y values; the spline evaluated at the positions `xnew`. + + """ + return spleval(splmake(xk, yk, order=order, kind=kind, conds=conds), xnew) diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/interpolate.pyc b/project/venv/lib/python2.7/site-packages/scipy/interpolate/interpolate.pyc new file mode 100644 index 0000000..7a994b1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/interpolate/interpolate.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/interpolate_wrapper.py b/project/venv/lib/python2.7/site-packages/scipy/interpolate/interpolate_wrapper.py new file mode 100644 index 0000000..b07d54d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/interpolate/interpolate_wrapper.py @@ -0,0 +1,187 @@ +""" helper_funcs.py. + scavenged from enthought,interpolate +""" +from __future__ import division, print_function, absolute_import + +import numpy as np +from . import _interpolate # C extension. Does all the real work. + + +def atleast_1d_and_contiguous(ary, dtype=np.float64): + return np.atleast_1d(np.ascontiguousarray(ary, dtype)) + + +@np.deprecate(message="'nearest' is deprecated in SciPy 1.0.0") +def nearest(x, y, new_x): + """ + Rounds each new x to nearest input x and returns corresponding input y. + + Parameters + ---------- + x : array_like + Independent values. + y : array_like + Dependent values. + new_x : array_like + The x values to return the interpolate y values. + + Returns + ------- + nearest : ndarray + Rounds each `new_x` to nearest `x` and returns the corresponding `y`. + + """ + shifted_x = np.concatenate((np.array([x[0]-1]), x[0:-1])) + + midpoints_of_x = atleast_1d_and_contiguous(.5*(x + shifted_x)) + new_x = atleast_1d_and_contiguous(new_x) + + TINY = 1e-10 + indices = np.searchsorted(midpoints_of_x, new_x+TINY)-1 + indices = np.atleast_1d(np.clip(indices, 0, np.Inf).astype(int)) + new_y = np.take(y, indices, axis=-1) + + return new_y + + +@np.deprecate(message="'linear' is deprecated in SciPy 1.0.0") +def linear(x, y, new_x): + """ + Linearly interpolates values in new_x based on the values in x and y + + Parameters + ---------- + x : array_like + Independent values + y : array_like + Dependent values + new_x : array_like + The x values to return the interpolated y values. + + """ + x = atleast_1d_and_contiguous(x, np.float64) + y = atleast_1d_and_contiguous(y, np.float64) + new_x = atleast_1d_and_contiguous(new_x, np.float64) + + if y.ndim > 2: + raise ValueError("`linear` only works with 1-D or 2-D arrays.") + if len(y.shape) == 2: + new_y = np.zeros((y.shape[0], len(new_x)), np.float64) + for i in range(len(new_y)): # for each row + _interpolate.linear_dddd(x, y[i], new_x, new_y[i]) + else: + new_y = np.zeros(len(new_x), np.float64) + _interpolate.linear_dddd(x, y, new_x, new_y) + + return new_y + + +@np.deprecate(message="'logarithmic' is deprecated in SciPy 1.0.0") +def logarithmic(x, y, new_x): + """ + Linearly interpolates values in new_x based in the log space of y. + + Parameters + ---------- + x : array_like + Independent values. + y : array_like + Dependent values. + new_x : array_like + The x values to return interpolated y values at. + + """ + x = atleast_1d_and_contiguous(x, np.float64) + y = atleast_1d_and_contiguous(y, np.float64) + new_x = atleast_1d_and_contiguous(new_x, np.float64) + + if y.ndim > 2: + raise ValueError("`linear` only works with 1-D or 2-D arrays.") + if len(y.shape) == 2: + new_y = np.zeros((y.shape[0], len(new_x)), np.float64) + for i in range(len(new_y)): + _interpolate.loginterp_dddd(x, y[i], new_x, new_y[i]) + else: + new_y = np.zeros(len(new_x), np.float64) + _interpolate.loginterp_dddd(x, y, new_x, new_y) + + return new_y + + +@np.deprecate(message="'block_average_above' is deprecated in SciPy 1.0.0") +def block_average_above(x, y, new_x): + """ + Linearly interpolates values in new_x based on the values in x and y. + + Parameters + ---------- + x : array_like + Independent values. + y : array_like + Dependent values. + new_x : array_like + The x values to interpolate y values. + + """ + bad_index = None + x = atleast_1d_and_contiguous(x, np.float64) + y = atleast_1d_and_contiguous(y, np.float64) + new_x = atleast_1d_and_contiguous(new_x, np.float64) + + if y.ndim > 2: + raise ValueError("`linear` only works with 1-D or 2-D arrays.") + if len(y.shape) == 2: + new_y = np.zeros((y.shape[0], len(new_x)), np.float64) + for i in range(len(new_y)): + bad_index = _interpolate.block_averave_above_dddd(x, y[i], + new_x, new_y[i]) + if bad_index is not None: + break + else: + new_y = np.zeros(len(new_x), np.float64) + bad_index = _interpolate.block_average_above_dddd(x, y, new_x, new_y) + + if bad_index is not None: + msg = "block_average_above cannot extrapolate and new_x[%d]=%f "\ + "is out of the x range (%f, %f)" % \ + (bad_index, new_x[bad_index], x[0], x[-1]) + raise ValueError(msg) + + return new_y + + +@np.deprecate(message="'block' is deprecated in SciPy 1.0.0") +def block(x, y, new_x): + """ + Essentially a step function. + + For each `new_x`, finds largest j such that``x[j] < new_x[j]`` and + returns ``y[j]``. + + Parameters + ---------- + x : array_like + Independent values. + y : array_like + Dependent values. + new_x : array_like + The x values used to calculate the interpolated y. + + Returns + ------- + block : ndarray + Return array, of same length as `x_new`. + + """ + # find index of values in x that precede values in x + # This code is a little strange -- we really want a routine that + # returns the index of values where x[j] < x[index] + TINY = 1e-10 + indices = np.searchsorted(x, new_x+TINY)-1 + + # If the value is at the front of the list, it'll have -1. + # In this case, we will use the first (0), element in the array. + # take requires the index array to be an Int + indices = np.atleast_1d(np.clip(indices, 0, np.Inf).astype(int)) + new_y = np.take(y, indices, axis=-1) + return new_y diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/interpolate_wrapper.pyc b/project/venv/lib/python2.7/site-packages/scipy/interpolate/interpolate_wrapper.pyc new file mode 100644 index 0000000..49d69cf Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/interpolate/interpolate_wrapper.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/ndgriddata.py b/project/venv/lib/python2.7/site-packages/scipy/interpolate/ndgriddata.py new file mode 100644 index 0000000..8b7b455 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/interpolate/ndgriddata.py @@ -0,0 +1,230 @@ +""" +Convenience interface to N-D interpolation + +.. versionadded:: 0.9 + +""" +from __future__ import division, print_function, absolute_import + +import numpy as np +from .interpnd import LinearNDInterpolator, NDInterpolatorBase, \ + CloughTocher2DInterpolator, _ndim_coords_from_arrays +from scipy.spatial import cKDTree + +__all__ = ['griddata', 'NearestNDInterpolator', 'LinearNDInterpolator', + 'CloughTocher2DInterpolator'] + +#------------------------------------------------------------------------------ +# Nearest-neighbour interpolation +#------------------------------------------------------------------------------ + + +class NearestNDInterpolator(NDInterpolatorBase): + """ + NearestNDInterpolator(x, y) + + Nearest-neighbour interpolation in N dimensions. + + .. versionadded:: 0.9 + + Methods + ------- + __call__ + + Parameters + ---------- + x : (Npoints, Ndims) ndarray of floats + Data point coordinates. + y : (Npoints,) ndarray of float or complex + Data values. + rescale : boolean, optional + Rescale points to unit cube before performing interpolation. + This is useful if some of the input dimensions have + incommensurable units and differ by many orders of magnitude. + + .. versionadded:: 0.14.0 + tree_options : dict, optional + Options passed to the underlying ``cKDTree``. + + .. versionadded:: 0.17.0 + + + Notes + ----- + Uses ``scipy.spatial.cKDTree`` + + """ + + def __init__(self, x, y, rescale=False, tree_options=None): + NDInterpolatorBase.__init__(self, x, y, rescale=rescale, + need_contiguous=False, + need_values=False) + if tree_options is None: + tree_options = dict() + self.tree = cKDTree(self.points, **tree_options) + self.values = y + + def __call__(self, *args): + """ + Evaluate interpolator at given points. + + Parameters + ---------- + xi : ndarray of float, shape (..., ndim) + Points where to interpolate data at. + + """ + xi = _ndim_coords_from_arrays(args, ndim=self.points.shape[1]) + xi = self._check_call_shape(xi) + xi = self._scale_x(xi) + dist, i = self.tree.query(xi) + return self.values[i] + + +#------------------------------------------------------------------------------ +# Convenience interface function +#------------------------------------------------------------------------------ + +def griddata(points, values, xi, method='linear', fill_value=np.nan, + rescale=False): + """ + Interpolate unstructured D-dimensional data. + + Parameters + ---------- + points : ndarray of floats, shape (n, D) + Data point coordinates. Can either be an array of + shape (n, D), or a tuple of `ndim` arrays. + values : ndarray of float or complex, shape (n,) + Data values. + xi : 2-D ndarray of float or tuple of 1-D array, shape (M, D) + Points at which to interpolate data. + method : {'linear', 'nearest', 'cubic'}, optional + Method of interpolation. One of + + ``nearest`` + return the value at the data point closest to + the point of interpolation. See `NearestNDInterpolator` for + more details. + + ``linear`` + tessellate the input point set to n-dimensional + simplices, and interpolate linearly on each simplex. See + `LinearNDInterpolator` for more details. + + ``cubic`` (1-D) + return the value determined from a cubic + spline. + + ``cubic`` (2-D) + return the value determined from a + piecewise cubic, continuously differentiable (C1), and + approximately curvature-minimizing polynomial surface. See + `CloughTocher2DInterpolator` for more details. + fill_value : float, optional + Value used to fill in for requested points outside of the + convex hull of the input points. If not provided, then the + default is ``nan``. This option has no effect for the + 'nearest' method. + rescale : bool, optional + Rescale points to unit cube before performing interpolation. + This is useful if some of the input dimensions have + incommensurable units and differ by many orders of magnitude. + + .. versionadded:: 0.14.0 + + Returns + ------- + ndarray + Array of interpolated values. + + Notes + ----- + + .. versionadded:: 0.9 + + Examples + -------- + + Suppose we want to interpolate the 2-D function + + >>> def func(x, y): + ... return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2 + + on a grid in [0, 1]x[0, 1] + + >>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j] + + but we only know its values at 1000 data points: + + >>> points = np.random.rand(1000, 2) + >>> values = func(points[:,0], points[:,1]) + + This can be done with `griddata` -- below we try out all of the + interpolation methods: + + >>> from scipy.interpolate import griddata + >>> grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest') + >>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear') + >>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic') + + One can see that the exact result is reproduced by all of the + methods to some degree, but for this smooth function the piecewise + cubic interpolant gives the best results: + + >>> import matplotlib.pyplot as plt + >>> plt.subplot(221) + >>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower') + >>> plt.plot(points[:,0], points[:,1], 'k.', ms=1) + >>> plt.title('Original') + >>> plt.subplot(222) + >>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower') + >>> plt.title('Nearest') + >>> plt.subplot(223) + >>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower') + >>> plt.title('Linear') + >>> plt.subplot(224) + >>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower') + >>> plt.title('Cubic') + >>> plt.gcf().set_size_inches(6, 6) + >>> plt.show() + + """ + + points = _ndim_coords_from_arrays(points) + + if points.ndim < 2: + ndim = points.ndim + else: + ndim = points.shape[-1] + + if ndim == 1 and method in ('nearest', 'linear', 'cubic'): + from .interpolate import interp1d + points = points.ravel() + if isinstance(xi, tuple): + if len(xi) != 1: + raise ValueError("invalid number of dimensions in xi") + xi, = xi + # Sort points/values together, necessary as input for interp1d + idx = np.argsort(points) + points = points[idx] + values = values[idx] + if method == 'nearest': + fill_value = 'extrapolate' + ip = interp1d(points, values, kind=method, axis=0, bounds_error=False, + fill_value=fill_value) + return ip(xi) + elif method == 'nearest': + ip = NearestNDInterpolator(points, values, rescale=rescale) + return ip(xi) + elif method == 'linear': + ip = LinearNDInterpolator(points, values, fill_value=fill_value, + rescale=rescale) + return ip(xi) + elif method == 'cubic' and ndim == 2: + ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value, + rescale=rescale) + return ip(xi) + else: + raise ValueError("Unknown interpolation method %r for " + "%d dimensional data" % (method, ndim)) diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/ndgriddata.pyc b/project/venv/lib/python2.7/site-packages/scipy/interpolate/ndgriddata.pyc new file mode 100644 index 0000000..0c2510e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/interpolate/ndgriddata.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/polyint.py b/project/venv/lib/python2.7/site-packages/scipy/interpolate/polyint.py new file mode 100644 index 0000000..8e5f93b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/interpolate/polyint.py @@ -0,0 +1,666 @@ +from __future__ import division, print_function, absolute_import + +import warnings + +import numpy as np +from scipy.special import factorial + +from scipy._lib.six import xrange +from scipy._lib._util import _asarray_validated + + +__all__ = ["KroghInterpolator", "krogh_interpolate", "BarycentricInterpolator", + "barycentric_interpolate", "approximate_taylor_polynomial"] + + +def _isscalar(x): + """Check whether x is if a scalar type, or 0-dim""" + return np.isscalar(x) or hasattr(x, 'shape') and x.shape == () + + +class _Interpolator1D(object): + """ + Common features in univariate interpolation + + Deal with input data type and interpolation axis rolling. The + actual interpolator can assume the y-data is of shape (n, r) where + `n` is the number of x-points, and `r` the number of variables, + and use self.dtype as the y-data type. + + Attributes + ---------- + _y_axis + Axis along which the interpolation goes in the original array + _y_extra_shape + Additional trailing shape of the input arrays, excluding + the interpolation axis. + dtype + Dtype of the y-data arrays. Can be set via set_dtype, which + forces it to be float or complex. + + Methods + ------- + __call__ + _prepare_x + _finish_y + _reshape_yi + _set_yi + _set_dtype + _evaluate + + """ + + __slots__ = ('_y_axis', '_y_extra_shape', 'dtype') + + def __init__(self, xi=None, yi=None, axis=None): + self._y_axis = axis + self._y_extra_shape = None + self.dtype = None + if yi is not None: + self._set_yi(yi, xi=xi, axis=axis) + + def __call__(self, x): + """ + Evaluate the interpolant + + Parameters + ---------- + x : array_like + Points to evaluate the interpolant at. + + Returns + ------- + y : array_like + Interpolated values. Shape is determined by replacing + the interpolation axis in the original array with the shape of x. + + """ + x, x_shape = self._prepare_x(x) + y = self._evaluate(x) + return self._finish_y(y, x_shape) + + def _evaluate(self, x): + """ + Actually evaluate the value of the interpolator. + """ + raise NotImplementedError() + + def _prepare_x(self, x): + """Reshape input x array to 1-D""" + x = _asarray_validated(x, check_finite=False, as_inexact=True) + x_shape = x.shape + return x.ravel(), x_shape + + def _finish_y(self, y, x_shape): + """Reshape interpolated y back to n-d array similar to initial y""" + y = y.reshape(x_shape + self._y_extra_shape) + if self._y_axis != 0 and x_shape != (): + nx = len(x_shape) + ny = len(self._y_extra_shape) + s = (list(range(nx, nx + self._y_axis)) + + list(range(nx)) + list(range(nx+self._y_axis, nx+ny))) + y = y.transpose(s) + return y + + def _reshape_yi(self, yi, check=False): + yi = np.rollaxis(np.asarray(yi), self._y_axis) + if check and yi.shape[1:] != self._y_extra_shape: + ok_shape = "%r + (N,) + %r" % (self._y_extra_shape[-self._y_axis:], + self._y_extra_shape[:-self._y_axis]) + raise ValueError("Data must be of shape %s" % ok_shape) + return yi.reshape((yi.shape[0], -1)) + + def _set_yi(self, yi, xi=None, axis=None): + if axis is None: + axis = self._y_axis + if axis is None: + raise ValueError("no interpolation axis specified") + + yi = np.asarray(yi) + + shape = yi.shape + if shape == (): + shape = (1,) + if xi is not None and shape[axis] != len(xi): + raise ValueError("x and y arrays must be equal in length along " + "interpolation axis.") + + self._y_axis = (axis % yi.ndim) + self._y_extra_shape = yi.shape[:self._y_axis]+yi.shape[self._y_axis+1:] + self.dtype = None + self._set_dtype(yi.dtype) + + def _set_dtype(self, dtype, union=False): + if np.issubdtype(dtype, np.complexfloating) \ + or np.issubdtype(self.dtype, np.complexfloating): + self.dtype = np.complex_ + else: + if not union or self.dtype != np.complex_: + self.dtype = np.float_ + + +class _Interpolator1DWithDerivatives(_Interpolator1D): + def derivatives(self, x, der=None): + """ + Evaluate many derivatives of the polynomial at the point x + + Produce an array of all derivative values at the point x. + + Parameters + ---------- + x : array_like + Point or points at which to evaluate the derivatives + der : int or None, optional + How many derivatives to extract; None for all potentially + nonzero derivatives (that is a number equal to the number + of points). This number includes the function value as 0th + derivative. + + Returns + ------- + d : ndarray + Array with derivatives; d[j] contains the j-th derivative. + Shape of d[j] is determined by replacing the interpolation + axis in the original array with the shape of x. + + Examples + -------- + >>> from scipy.interpolate import KroghInterpolator + >>> KroghInterpolator([0,0,0],[1,2,3]).derivatives(0) + array([1.0,2.0,3.0]) + >>> KroghInterpolator([0,0,0],[1,2,3]).derivatives([0,0]) + array([[1.0,1.0], + [2.0,2.0], + [3.0,3.0]]) + + """ + x, x_shape = self._prepare_x(x) + y = self._evaluate_derivatives(x, der) + + y = y.reshape((y.shape[0],) + x_shape + self._y_extra_shape) + if self._y_axis != 0 and x_shape != (): + nx = len(x_shape) + ny = len(self._y_extra_shape) + s = ([0] + list(range(nx+1, nx + self._y_axis+1)) + + list(range(1,nx+1)) + + list(range(nx+1+self._y_axis, nx+ny+1))) + y = y.transpose(s) + return y + + def derivative(self, x, der=1): + """ + Evaluate one derivative of the polynomial at the point x + + Parameters + ---------- + x : array_like + Point or points at which to evaluate the derivatives + + der : integer, optional + Which derivative to extract. This number includes the + function value as 0th derivative. + + Returns + ------- + d : ndarray + Derivative interpolated at the x-points. Shape of d is + determined by replacing the interpolation axis in the + original array with the shape of x. + + Notes + ----- + This is computed by evaluating all derivatives up to the desired + one (using self.derivatives()) and then discarding the rest. + + """ + x, x_shape = self._prepare_x(x) + y = self._evaluate_derivatives(x, der+1) + return self._finish_y(y[der], x_shape) + + +class KroghInterpolator(_Interpolator1DWithDerivatives): + """ + Interpolating polynomial for a set of points. + + The polynomial passes through all the pairs (xi,yi). One may + additionally specify a number of derivatives at each point xi; + this is done by repeating the value xi and specifying the + derivatives as successive yi values. + + Allows evaluation of the polynomial and all its derivatives. + For reasons of numerical stability, this function does not compute + the coefficients of the polynomial, although they can be obtained + by evaluating all the derivatives. + + Parameters + ---------- + xi : array_like, length N + Known x-coordinates. Must be sorted in increasing order. + yi : array_like + Known y-coordinates. When an xi occurs two or more times in + a row, the corresponding yi's represent derivative values. + axis : int, optional + Axis in the yi array corresponding to the x-coordinate values. + + Notes + ----- + Be aware that the algorithms implemented here are not necessarily + the most numerically stable known. Moreover, even in a world of + exact computation, unless the x coordinates are chosen very + carefully - Chebyshev zeros (e.g. cos(i*pi/n)) are a good choice - + polynomial interpolation itself is a very ill-conditioned process + due to the Runge phenomenon. In general, even with well-chosen + x values, degrees higher than about thirty cause problems with + numerical instability in this code. + + Based on [1]_. + + References + ---------- + .. [1] Krogh, "Efficient Algorithms for Polynomial Interpolation + and Numerical Differentiation", 1970. + + Examples + -------- + To produce a polynomial that is zero at 0 and 1 and has + derivative 2 at 0, call + + >>> from scipy.interpolate import KroghInterpolator + >>> KroghInterpolator([0,0,1],[0,2,0]) + + This constructs the quadratic 2*X**2-2*X. The derivative condition + is indicated by the repeated zero in the xi array; the corresponding + yi values are 0, the function value, and 2, the derivative value. + + For another example, given xi, yi, and a derivative ypi for each + point, appropriate arrays can be constructed as: + + >>> xi = np.linspace(0, 1, 5) + >>> yi, ypi = np.random.rand(2, 5) + >>> xi_k, yi_k = np.repeat(xi, 2), np.ravel(np.dstack((yi,ypi))) + >>> KroghInterpolator(xi_k, yi_k) + + To produce a vector-valued polynomial, supply a higher-dimensional + array for yi: + + >>> KroghInterpolator([0,1],[[2,3],[4,5]]) + + This constructs a linear polynomial giving (2,3) at 0 and (4,5) at 1. + + """ + + def __init__(self, xi, yi, axis=0): + _Interpolator1DWithDerivatives.__init__(self, xi, yi, axis) + + self.xi = np.asarray(xi) + self.yi = self._reshape_yi(yi) + self.n, self.r = self.yi.shape + + c = np.zeros((self.n+1, self.r), dtype=self.dtype) + c[0] = self.yi[0] + Vk = np.zeros((self.n, self.r), dtype=self.dtype) + for k in xrange(1,self.n): + s = 0 + while s <= k and xi[k-s] == xi[k]: + s += 1 + s -= 1 + Vk[0] = self.yi[k]/float(factorial(s)) + for i in xrange(k-s): + if xi[i] == xi[k]: + raise ValueError("Elements if `xi` can't be equal.") + if s == 0: + Vk[i+1] = (c[i]-Vk[i])/(xi[i]-xi[k]) + else: + Vk[i+1] = (Vk[i+1]-Vk[i])/(xi[i]-xi[k]) + c[k] = Vk[k-s] + self.c = c + + def _evaluate(self, x): + pi = 1 + p = np.zeros((len(x), self.r), dtype=self.dtype) + p += self.c[0,np.newaxis,:] + for k in range(1, self.n): + w = x - self.xi[k-1] + pi = w*pi + p += pi[:,np.newaxis] * self.c[k] + return p + + def _evaluate_derivatives(self, x, der=None): + n = self.n + r = self.r + + if der is None: + der = self.n + pi = np.zeros((n, len(x))) + w = np.zeros((n, len(x))) + pi[0] = 1 + p = np.zeros((len(x), self.r), dtype=self.dtype) + p += self.c[0, np.newaxis, :] + + for k in xrange(1, n): + w[k-1] = x - self.xi[k-1] + pi[k] = w[k-1] * pi[k-1] + p += pi[k, :, np.newaxis] * self.c[k] + + cn = np.zeros((max(der, n+1), len(x), r), dtype=self.dtype) + cn[:n+1, :, :] += self.c[:n+1, np.newaxis, :] + cn[0] = p + for k in xrange(1, n): + for i in xrange(1, n-k+1): + pi[i] = w[k+i-1]*pi[i-1] + pi[i] + cn[k] = cn[k] + pi[i, :, np.newaxis]*cn[k+i] + cn[k] *= factorial(k) + + cn[n, :, :] = 0 + return cn[:der] + + +def krogh_interpolate(xi, yi, x, der=0, axis=0): + """ + Convenience function for polynomial interpolation. + + See `KroghInterpolator` for more details. + + Parameters + ---------- + xi : array_like + Known x-coordinates. + yi : array_like + Known y-coordinates, of shape ``(xi.size, R)``. Interpreted as + vectors of length R, or scalars if R=1. + x : array_like + Point or points at which to evaluate the derivatives. + der : int or list, optional + How many derivatives to extract; None for all potentially + nonzero derivatives (that is a number equal to the number + of points), or a list of derivatives to extract. This number + includes the function value as 0th derivative. + axis : int, optional + Axis in the yi array corresponding to the x-coordinate values. + + Returns + ------- + d : ndarray + If the interpolator's values are R-dimensional then the + returned array will be the number of derivatives by N by R. + If `x` is a scalar, the middle dimension will be dropped; if + the `yi` are scalars then the last dimension will be dropped. + + See Also + -------- + KroghInterpolator + + Notes + ----- + Construction of the interpolating polynomial is a relatively expensive + process. If you want to evaluate it repeatedly consider using the class + KroghInterpolator (which is what this function uses). + + """ + P = KroghInterpolator(xi, yi, axis=axis) + if der == 0: + return P(x) + elif _isscalar(der): + return P.derivative(x,der=der) + else: + return P.derivatives(x,der=np.amax(der)+1)[der] + + +def approximate_taylor_polynomial(f,x,degree,scale,order=None): + """ + Estimate the Taylor polynomial of f at x by polynomial fitting. + + Parameters + ---------- + f : callable + The function whose Taylor polynomial is sought. Should accept + a vector of `x` values. + x : scalar + The point at which the polynomial is to be evaluated. + degree : int + The degree of the Taylor polynomial + scale : scalar + The width of the interval to use to evaluate the Taylor polynomial. + Function values spread over a range this wide are used to fit the + polynomial. Must be chosen carefully. + order : int or None, optional + The order of the polynomial to be used in the fitting; `f` will be + evaluated ``order+1`` times. If None, use `degree`. + + Returns + ------- + p : poly1d instance + The Taylor polynomial (translated to the origin, so that + for example p(0)=f(x)). + + Notes + ----- + The appropriate choice of "scale" is a trade-off; too large and the + function differs from its Taylor polynomial too much to get a good + answer, too small and round-off errors overwhelm the higher-order terms. + The algorithm used becomes numerically unstable around order 30 even + under ideal circumstances. + + Choosing order somewhat larger than degree may improve the higher-order + terms. + + """ + if order is None: + order = degree + + n = order+1 + # Choose n points that cluster near the endpoints of the interval in + # a way that avoids the Runge phenomenon. Ensure, by including the + # endpoint or not as appropriate, that one point always falls at x + # exactly. + xs = scale*np.cos(np.linspace(0,np.pi,n,endpoint=n % 1)) + x + + P = KroghInterpolator(xs, f(xs)) + d = P.derivatives(x,der=degree+1) + + return np.poly1d((d/factorial(np.arange(degree+1)))[::-1]) + + +class BarycentricInterpolator(_Interpolator1D): + """The interpolating polynomial for a set of points + + Constructs a polynomial that passes through a given set of points. + Allows evaluation of the polynomial, efficient changing of the y + values to be interpolated, and updating by adding more x values. + For reasons of numerical stability, this function does not compute + the coefficients of the polynomial. + + The values yi need to be provided before the function is + evaluated, but none of the preprocessing depends on them, so rapid + updates are possible. + + Parameters + ---------- + xi : array_like + 1-d array of x coordinates of the points the polynomial + should pass through + yi : array_like, optional + The y coordinates of the points the polynomial should pass through. + If None, the y values will be supplied later via the `set_y` method. + axis : int, optional + Axis in the yi array corresponding to the x-coordinate values. + + Notes + ----- + This class uses a "barycentric interpolation" method that treats + the problem as a special case of rational function interpolation. + This algorithm is quite stable, numerically, but even in a world of + exact computation, unless the x coordinates are chosen very + carefully - Chebyshev zeros (e.g. cos(i*pi/n)) are a good choice - + polynomial interpolation itself is a very ill-conditioned process + due to the Runge phenomenon. + + Based on Berrut and Trefethen 2004, "Barycentric Lagrange Interpolation". + + """ + def __init__(self, xi, yi=None, axis=0): + _Interpolator1D.__init__(self, xi, yi, axis) + + self.xi = np.asarray(xi) + self.set_yi(yi) + self.n = len(self.xi) + + self.wi = np.zeros(self.n) + self.wi[0] = 1 + for j in xrange(1,self.n): + self.wi[:j] *= (self.xi[j]-self.xi[:j]) + self.wi[j] = np.multiply.reduce(self.xi[:j]-self.xi[j]) + self.wi **= -1 + + def set_yi(self, yi, axis=None): + """ + Update the y values to be interpolated + + The barycentric interpolation algorithm requires the calculation + of weights, but these depend only on the xi. The yi can be changed + at any time. + + Parameters + ---------- + yi : array_like + The y coordinates of the points the polynomial should pass through. + If None, the y values will be supplied later. + axis : int, optional + Axis in the yi array corresponding to the x-coordinate values. + + """ + if yi is None: + self.yi = None + return + self._set_yi(yi, xi=self.xi, axis=axis) + self.yi = self._reshape_yi(yi) + self.n, self.r = self.yi.shape + + def add_xi(self, xi, yi=None): + """ + Add more x values to the set to be interpolated + + The barycentric interpolation algorithm allows easy updating by + adding more points for the polynomial to pass through. + + Parameters + ---------- + xi : array_like + The x coordinates of the points that the polynomial should pass + through. + yi : array_like, optional + The y coordinates of the points the polynomial should pass through. + Should have shape ``(xi.size, R)``; if R > 1 then the polynomial is + vector-valued. + If `yi` is not given, the y values will be supplied later. `yi` should + be given if and only if the interpolator has y values specified. + + """ + if yi is not None: + if self.yi is None: + raise ValueError("No previous yi value to update!") + yi = self._reshape_yi(yi, check=True) + self.yi = np.vstack((self.yi,yi)) + else: + if self.yi is not None: + raise ValueError("No update to yi provided!") + old_n = self.n + self.xi = np.concatenate((self.xi,xi)) + self.n = len(self.xi) + self.wi **= -1 + old_wi = self.wi + self.wi = np.zeros(self.n) + self.wi[:old_n] = old_wi + for j in xrange(old_n,self.n): + self.wi[:j] *= (self.xi[j]-self.xi[:j]) + self.wi[j] = np.multiply.reduce(self.xi[:j]-self.xi[j]) + self.wi **= -1 + + def __call__(self, x): + """Evaluate the interpolating polynomial at the points x + + Parameters + ---------- + x : array_like + Points to evaluate the interpolant at. + + Returns + ------- + y : array_like + Interpolated values. Shape is determined by replacing + the interpolation axis in the original array with the shape of x. + + Notes + ----- + Currently the code computes an outer product between x and the + weights, that is, it constructs an intermediate array of size + N by len(x), where N is the degree of the polynomial. + """ + return _Interpolator1D.__call__(self, x) + + def _evaluate(self, x): + if x.size == 0: + p = np.zeros((0, self.r), dtype=self.dtype) + else: + c = x[...,np.newaxis]-self.xi + z = c == 0 + c[z] = 1 + c = self.wi/c + p = np.dot(c,self.yi)/np.sum(c,axis=-1)[...,np.newaxis] + # Now fix where x==some xi + r = np.nonzero(z) + if len(r) == 1: # evaluation at a scalar + if len(r[0]) > 0: # equals one of the points + p = self.yi[r[0][0]] + else: + p[r[:-1]] = self.yi[r[-1]] + return p + + +def barycentric_interpolate(xi, yi, x, axis=0): + """ + Convenience function for polynomial interpolation. + + Constructs a polynomial that passes through a given set of points, + then evaluates the polynomial. For reasons of numerical stability, + this function does not compute the coefficients of the polynomial. + + This function uses a "barycentric interpolation" method that treats + the problem as a special case of rational function interpolation. + This algorithm is quite stable, numerically, but even in a world of + exact computation, unless the `x` coordinates are chosen very + carefully - Chebyshev zeros (e.g. cos(i*pi/n)) are a good choice - + polynomial interpolation itself is a very ill-conditioned process + due to the Runge phenomenon. + + Parameters + ---------- + xi : array_like + 1-d array of x coordinates of the points the polynomial should + pass through + yi : array_like + The y coordinates of the points the polynomial should pass through. + x : scalar or array_like + Points to evaluate the interpolator at. + axis : int, optional + Axis in the yi array corresponding to the x-coordinate values. + + Returns + ------- + y : scalar or array_like + Interpolated values. Shape is determined by replacing + the interpolation axis in the original array with the shape of x. + + See Also + -------- + BarycentricInterpolator + + Notes + ----- + Construction of the interpolation weights is a relatively slow process. + If you want to call this many times with the same xi (but possibly + varying yi or x) you should use the class `BarycentricInterpolator`. + This is what this function uses internally. + + """ + return BarycentricInterpolator(xi, yi, axis=axis)(x) diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/polyint.pyc b/project/venv/lib/python2.7/site-packages/scipy/interpolate/polyint.pyc new file mode 100644 index 0000000..0b1d28e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/interpolate/polyint.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/rbf.py b/project/venv/lib/python2.7/site-packages/scipy/interpolate/rbf.py new file mode 100644 index 0000000..32d08e3 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/interpolate/rbf.py @@ -0,0 +1,261 @@ +"""rbf - Radial basis functions for interpolation/smoothing scattered Nd data. + +Written by John Travers <jtravs@gmail.com>, February 2007 +Based closely on Matlab code by Alex Chirokov +Additional, large, improvements by Robert Hetland +Some additional alterations by Travis Oliphant + +Permission to use, modify, and distribute this software is given under the +terms of the SciPy (BSD style) license. See LICENSE.txt that came with +this distribution for specifics. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. + +Copyright (c) 2006-2007, Robert Hetland <hetland@tamu.edu> +Copyright (c) 2007, John Travers <jtravs@gmail.com> + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of Robert Hetland nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +""" +from __future__ import division, print_function, absolute_import + +import sys +import numpy as np + +from scipy import linalg +from scipy._lib.six import callable, get_method_function, get_function_code +from scipy.special import xlogy +from scipy.spatial.distance import cdist, pdist, squareform + +__all__ = ['Rbf'] + + +class Rbf(object): + """ + Rbf(*args) + + A class for radial basis function approximation/interpolation of + n-dimensional scattered data. + + Parameters + ---------- + *args : arrays + x, y, z, ..., d, where x, y, z, ... are the coordinates of the nodes + and d is the array of values at the nodes + function : str or callable, optional + The radial basis function, based on the radius, r, given by the norm + (default is Euclidean distance); the default is 'multiquadric':: + + 'multiquadric': sqrt((r/self.epsilon)**2 + 1) + 'inverse': 1.0/sqrt((r/self.epsilon)**2 + 1) + 'gaussian': exp(-(r/self.epsilon)**2) + 'linear': r + 'cubic': r**3 + 'quintic': r**5 + 'thin_plate': r**2 * log(r) + + If callable, then it must take 2 arguments (self, r). The epsilon + parameter will be available as self.epsilon. Other keyword + arguments passed in will be available as well. + + epsilon : float, optional + Adjustable constant for gaussian or multiquadrics functions + - defaults to approximate average distance between nodes (which is + a good start). + smooth : float, optional + Values greater than zero increase the smoothness of the + approximation. 0 is for interpolation (default), the function will + always go through the nodal points in this case. + norm : str, callable, optional + A function that returns the 'distance' between two points, with + inputs as arrays of positions (x, y, z, ...), and an output as an + array of distance. E.g., the default: 'euclidean', such that the result + is a matrix of the distances from each point in ``x1`` to each point in + ``x2``. For more options, see documentation of + `scipy.spatial.distances.cdist`. + + Attributes + ---------- + N : int + The number of data points (as determined by the input arrays). + di : ndarray + The 1-D array of data values at each of the data coordinates `xi`. + xi : ndarray + The 2-D array of data coordinates. + function : str or callable + The radial basis function. See description under Parameters. + epsilon : float + Parameter used by gaussian or multiquadrics functions. See Parameters. + smooth : float + Smoothing parameter. See description under Parameters. + norm : str or callable + The distance function. See description under Parameters. + nodes : ndarray + A 1-D array of node values for the interpolation. + A : internal property, do not use + + Examples + -------- + >>> from scipy.interpolate import Rbf + >>> x, y, z, d = np.random.rand(4, 50) + >>> rbfi = Rbf(x, y, z, d) # radial basis function interpolator instance + >>> xi = yi = zi = np.linspace(0, 1, 20) + >>> di = rbfi(xi, yi, zi) # interpolated values + >>> di.shape + (20,) + + """ + # Available radial basis functions that can be selected as strings; + # they all start with _h_ (self._init_function relies on that) + def _h_multiquadric(self, r): + return np.sqrt((1.0/self.epsilon*r)**2 + 1) + + def _h_inverse_multiquadric(self, r): + return 1.0/np.sqrt((1.0/self.epsilon*r)**2 + 1) + + def _h_gaussian(self, r): + return np.exp(-(1.0/self.epsilon*r)**2) + + def _h_linear(self, r): + return r + + def _h_cubic(self, r): + return r**3 + + def _h_quintic(self, r): + return r**5 + + def _h_thin_plate(self, r): + return xlogy(r**2, r) + + # Setup self._function and do smoke test on initial r + def _init_function(self, r): + if isinstance(self.function, str): + self.function = self.function.lower() + _mapped = {'inverse': 'inverse_multiquadric', + 'inverse multiquadric': 'inverse_multiquadric', + 'thin-plate': 'thin_plate'} + if self.function in _mapped: + self.function = _mapped[self.function] + + func_name = "_h_" + self.function + if hasattr(self, func_name): + self._function = getattr(self, func_name) + else: + functionlist = [x[3:] for x in dir(self) + if x.startswith('_h_')] + raise ValueError("function must be a callable or one of " + + ", ".join(functionlist)) + self._function = getattr(self, "_h_"+self.function) + elif callable(self.function): + allow_one = False + if hasattr(self.function, 'func_code') or \ + hasattr(self.function, '__code__'): + val = self.function + allow_one = True + elif hasattr(self.function, "im_func"): + val = get_method_function(self.function) + elif hasattr(self.function, "__call__"): + val = get_method_function(self.function.__call__) + else: + raise ValueError("Cannot determine number of arguments to " + "function") + + argcount = get_function_code(val).co_argcount + if allow_one and argcount == 1: + self._function = self.function + elif argcount == 2: + if sys.version_info[0] >= 3: + self._function = self.function.__get__(self, Rbf) + else: + import new + self._function = new.instancemethod(self.function, self, + Rbf) + else: + raise ValueError("Function argument must take 1 or 2 " + "arguments.") + + a0 = self._function(r) + if a0.shape != r.shape: + raise ValueError("Callable must take array and return array of " + "the same shape") + return a0 + + def __init__(self, *args, **kwargs): + # `args` can be a variable number of arrays; we flatten them and store + # them as a single 2-D array `xi` of shape (n_args-1, array_size), + # plus a 1-D array `di` for the values. + # All arrays must have the same number of elements + self.xi = np.asarray([np.asarray(a, dtype=np.float_).flatten() + for a in args[:-1]]) + self.N = self.xi.shape[-1] + self.di = np.asarray(args[-1]).flatten() + + if not all([x.size == self.di.size for x in self.xi]): + raise ValueError("All arrays must be equal length.") + + self.norm = kwargs.pop('norm', 'euclidean') + self.epsilon = kwargs.pop('epsilon', None) + if self.epsilon is None: + # default epsilon is the "the average distance between nodes" based + # on a bounding hypercube + ximax = np.amax(self.xi, axis=1) + ximin = np.amin(self.xi, axis=1) + edges = ximax - ximin + edges = edges[np.nonzero(edges)] + self.epsilon = np.power(np.prod(edges)/self.N, 1.0/edges.size) + + self.smooth = kwargs.pop('smooth', 0.0) + self.function = kwargs.pop('function', 'multiquadric') + + # attach anything left in kwargs to self for use by any user-callable + # function or to save on the object returned. + for item, value in kwargs.items(): + setattr(self, item, value) + + self.nodes = linalg.solve(self.A, self.di) + + @property + def A(self): + # this only exists for backwards compatibility: self.A was available + # and, at least technically, public. + r = squareform(pdist(self.xi.T, self.norm)) # Pairwise norm + return self._init_function(r) - np.eye(self.N)*self.smooth + + def _call_norm(self, x1, x2): + return cdist(x1.T, x2.T, self.norm) + + def __call__(self, *args): + args = [np.asarray(x) for x in args] + if not all([x.shape == y.shape for x in args for y in args]): + raise ValueError("Array lengths must be equal") + + shp = args[0].shape + xa = np.asarray([a.flatten() for a in args], dtype=np.float_) + r = self._call_norm(xa, self.xi) + return np.dot(self._function(r), self.nodes).reshape(shp) diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/rbf.pyc b/project/venv/lib/python2.7/site-packages/scipy/interpolate/rbf.pyc new file mode 100644 index 0000000..202298c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/interpolate/rbf.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/setup.py b/project/venv/lib/python2.7/site-packages/scipy/interpolate/setup.py new file mode 100644 index 0000000..b1076ed --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/interpolate/setup.py @@ -0,0 +1,54 @@ +from __future__ import division, print_function, absolute_import + +from os.path import join + + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + from scipy._build_utils.system_info import get_info + + lapack_opt = get_info('lapack_opt', notfound_action=2) + + config = Configuration('interpolate', parent_package, top_path) + + fitpack_src = [join('fitpack', '*.f')] + config.add_library('fitpack', sources=fitpack_src) + + config.add_extension('interpnd', + sources=['interpnd.c']) + + config.add_extension('_ppoly', + sources=['_ppoly.c'], + **lapack_opt) + + config.add_extension('_bspl', + sources=['_bspl.c'], + libraries=['fitpack'], + depends=['src/__fitpack.h'] + fitpack_src) + + config.add_extension('_fitpack', + sources=['src/_fitpackmodule.c'], + libraries=['fitpack'], + depends=(['src/__fitpack.h','src/multipack.h'] + + fitpack_src) + ) + + config.add_extension('dfitpack', + sources=['src/fitpack.pyf'], + libraries=['fitpack'], + depends=fitpack_src, + ) + + config.add_extension('_interpolate', + sources=['src/_interpolate.cpp'], + include_dirs=['src'], + depends=['src/interpolate.h']) + + config.add_data_dir('tests') + + return config + + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(**configuration(top_path='').todict()) diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/setup.pyc b/project/venv/lib/python2.7/site-packages/scipy/interpolate/setup.pyc new file mode 100644 index 0000000..4e52db1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/interpolate/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/__init__.pyc new file mode 100644 index 0000000..7383ce9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/data/bug-1310.npz b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/data/bug-1310.npz new file mode 100644 index 0000000..8dc93c7 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/data/bug-1310.npz differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/data/estimate_gradients_hang.npy b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/data/estimate_gradients_hang.npy new file mode 100644 index 0000000..79e1b09 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/data/estimate_gradients_hang.npy differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_bsplines.py b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_bsplines.py new file mode 100644 index 0000000..8b66610 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_bsplines.py @@ -0,0 +1,1247 @@ +from __future__ import division, absolute_import, print_function + +import numpy as np +from numpy.testing import assert_equal, assert_allclose, assert_ +from scipy._lib._numpy_compat import suppress_warnings +from pytest import raises as assert_raises +import pytest + +from scipy.interpolate import (BSpline, BPoly, PPoly, make_interp_spline, + make_lsq_spline, _bspl, splev, splrep, splprep, splder, splantider, + sproot, splint, insert) +import scipy.linalg as sl +from scipy._lib._version import NumpyVersion + +from scipy.interpolate._bsplines import _not_a_knot, _augknt +import scipy.interpolate._fitpack_impl as _impl +from scipy.interpolate._fitpack import _splint + + +class TestBSpline(object): + + def test_ctor(self): + # knots should be an ordered 1D array of finite real numbers + assert_raises((TypeError, ValueError), BSpline, + **dict(t=[1, 1.j], c=[1.], k=0)) + with np.errstate(invalid='ignore'): + assert_raises(ValueError, BSpline, **dict(t=[1, np.nan], c=[1.], k=0)) + assert_raises(ValueError, BSpline, **dict(t=[1, np.inf], c=[1.], k=0)) + assert_raises(ValueError, BSpline, **dict(t=[1, -1], c=[1.], k=0)) + assert_raises(ValueError, BSpline, **dict(t=[[1], [1]], c=[1.], k=0)) + + # for n+k+1 knots and degree k need at least n coefficients + assert_raises(ValueError, BSpline, **dict(t=[0, 1, 2], c=[1], k=0)) + assert_raises(ValueError, BSpline, + **dict(t=[0, 1, 2, 3, 4], c=[1., 1.], k=2)) + + # non-integer orders + assert_raises(TypeError, BSpline, + **dict(t=[0., 0., 1., 2., 3., 4.], c=[1., 1., 1.], k="cubic")) + assert_raises(TypeError, BSpline, + **dict(t=[0., 0., 1., 2., 3., 4.], c=[1., 1., 1.], k=2.5)) + + # basic interval cannot have measure zero (here: [1..1]) + assert_raises(ValueError, BSpline, + **dict(t=[0., 0, 1, 1, 2, 3], c=[1., 1, 1], k=2)) + + # tck vs self.tck + n, k = 11, 3 + t = np.arange(n+k+1) + c = np.random.random(n) + b = BSpline(t, c, k) + + assert_allclose(t, b.t) + assert_allclose(c, b.c) + assert_equal(k, b.k) + + def test_tck(self): + b = _make_random_spline() + tck = b.tck + + assert_allclose(b.t, tck[0], atol=1e-15, rtol=1e-15) + assert_allclose(b.c, tck[1], atol=1e-15, rtol=1e-15) + assert_equal(b.k, tck[2]) + + # b.tck is read-only + with pytest.raises(AttributeError): + b.tck = 'foo' + + def test_degree_0(self): + xx = np.linspace(0, 1, 10) + + b = BSpline(t=[0, 1], c=[3.], k=0) + assert_allclose(b(xx), 3) + + b = BSpline(t=[0, 0.35, 1], c=[3, 4], k=0) + assert_allclose(b(xx), np.where(xx < 0.35, 3, 4)) + + def test_degree_1(self): + t = [0, 1, 2, 3, 4] + c = [1, 2, 3] + k = 1 + b = BSpline(t, c, k) + + x = np.linspace(1, 3, 50) + assert_allclose(c[0]*B_012(x) + c[1]*B_012(x-1) + c[2]*B_012(x-2), + b(x), atol=1e-14) + assert_allclose(splev(x, (t, c, k)), b(x), atol=1e-14) + + def test_bernstein(self): + # a special knot vector: Bernstein polynomials + k = 3 + t = np.asarray([0]*(k+1) + [1]*(k+1)) + c = np.asarray([1., 2., 3., 4.]) + bp = BPoly(c.reshape(-1, 1), [0, 1]) + bspl = BSpline(t, c, k) + + xx = np.linspace(-1., 2., 10) + assert_allclose(bp(xx, extrapolate=True), + bspl(xx, extrapolate=True), atol=1e-14) + assert_allclose(splev(xx, (t, c, k)), + bspl(xx), atol=1e-14) + + def test_rndm_naive_eval(self): + # test random coefficient spline *on the base interval*, + # t[k] <= x < t[-k-1] + b = _make_random_spline() + t, c, k = b.tck + xx = np.linspace(t[k], t[-k-1], 50) + y_b = b(xx) + + y_n = [_naive_eval(x, t, c, k) for x in xx] + assert_allclose(y_b, y_n, atol=1e-14) + + y_n2 = [_naive_eval_2(x, t, c, k) for x in xx] + assert_allclose(y_b, y_n2, atol=1e-14) + + def test_rndm_splev(self): + b = _make_random_spline() + t, c, k = b.tck + xx = np.linspace(t[k], t[-k-1], 50) + assert_allclose(b(xx), splev(xx, (t, c, k)), atol=1e-14) + + def test_rndm_splrep(self): + np.random.seed(1234) + x = np.sort(np.random.random(20)) + y = np.random.random(20) + + tck = splrep(x, y) + b = BSpline(*tck) + + t, k = b.t, b.k + xx = np.linspace(t[k], t[-k-1], 80) + assert_allclose(b(xx), splev(xx, tck), atol=1e-14) + + def test_rndm_unity(self): + b = _make_random_spline() + b.c = np.ones_like(b.c) + xx = np.linspace(b.t[b.k], b.t[-b.k-1], 100) + assert_allclose(b(xx), 1.) + + def test_vectorization(self): + n, k = 22, 3 + t = np.sort(np.random.random(n)) + c = np.random.random(size=(n, 6, 7)) + b = BSpline(t, c, k) + tm, tp = t[k], t[-k-1] + xx = tm + (tp - tm) * np.random.random((3, 4, 5)) + assert_equal(b(xx).shape, (3, 4, 5, 6, 7)) + + def test_len_c(self): + # for n+k+1 knots, only first n coefs are used. + # and BTW this is consistent with FITPACK + n, k = 33, 3 + t = np.sort(np.random.random(n+k+1)) + c = np.random.random(n) + + # pad coefficients with random garbage + c_pad = np.r_[c, np.random.random(k+1)] + + b, b_pad = BSpline(t, c, k), BSpline(t, c_pad, k) + + dt = t[-1] - t[0] + xx = np.linspace(t[0] - dt, t[-1] + dt, 50) + assert_allclose(b(xx), b_pad(xx), atol=1e-14) + assert_allclose(b(xx), splev(xx, (t, c, k)), atol=1e-14) + assert_allclose(b(xx), splev(xx, (t, c_pad, k)), atol=1e-14) + + def test_endpoints(self): + # base interval is closed + b = _make_random_spline() + t, _, k = b.tck + tm, tp = t[k], t[-k-1] + for extrap in (True, False): + assert_allclose(b([tm, tp], extrap), + b([tm + 1e-10, tp - 1e-10], extrap), atol=1e-9) + + def test_continuity(self): + # assert continuity at internal knots + b = _make_random_spline() + t, _, k = b.tck + assert_allclose(b(t[k+1:-k-1] - 1e-10), b(t[k+1:-k-1] + 1e-10), + atol=1e-9) + + def test_extrap(self): + b = _make_random_spline() + t, c, k = b.tck + dt = t[-1] - t[0] + xx = np.linspace(t[k] - dt, t[-k-1] + dt, 50) + mask = (t[k] < xx) & (xx < t[-k-1]) + + # extrap has no effect within the base interval + assert_allclose(b(xx[mask], extrapolate=True), + b(xx[mask], extrapolate=False)) + + # extrapolated values agree with FITPACK + assert_allclose(b(xx, extrapolate=True), + splev(xx, (t, c, k), ext=0)) + + def test_default_extrap(self): + # BSpline defaults to extrapolate=True + b = _make_random_spline() + t, _, k = b.tck + xx = [t[0] - 1, t[-1] + 1] + yy = b(xx) + assert_(not np.all(np.isnan(yy))) + + def test_periodic_extrap(self): + np.random.seed(1234) + t = np.sort(np.random.random(8)) + c = np.random.random(4) + k = 3 + b = BSpline(t, c, k, extrapolate='periodic') + n = t.size - (k + 1) + + dt = t[-1] - t[0] + xx = np.linspace(t[k] - dt, t[n] + dt, 50) + xy = t[k] + (xx - t[k]) % (t[n] - t[k]) + assert_allclose(b(xx), splev(xy, (t, c, k))) + + # Direct check + xx = [-1, 0, 0.5, 1] + xy = t[k] + (xx - t[k]) % (t[n] - t[k]) + assert_equal(b(xx, extrapolate='periodic'), b(xy, extrapolate=True)) + + def test_ppoly(self): + b = _make_random_spline() + t, c, k = b.tck + pp = PPoly.from_spline((t, c, k)) + + xx = np.linspace(t[k], t[-k], 100) + assert_allclose(b(xx), pp(xx), atol=1e-14, rtol=1e-14) + + def test_derivative_rndm(self): + b = _make_random_spline() + t, c, k = b.tck + xx = np.linspace(t[0], t[-1], 50) + xx = np.r_[xx, t] + + for der in range(1, k+1): + yd = splev(xx, (t, c, k), der=der) + assert_allclose(yd, b(xx, nu=der), atol=1e-14) + + # higher derivatives all vanish + assert_allclose(b(xx, nu=k+1), 0, atol=1e-14) + + def test_derivative_jumps(self): + # example from de Boor, Chap IX, example (24) + # NB: knots augmented & corresp coefs are zeroed out + # in agreement with the convention (29) + k = 2 + t = [-1, -1, 0, 1, 1, 3, 4, 6, 6, 6, 7, 7] + np.random.seed(1234) + c = np.r_[0, 0, np.random.random(5), 0, 0] + b = BSpline(t, c, k) + + # b is continuous at x != 6 (triple knot) + x = np.asarray([1, 3, 4, 6]) + assert_allclose(b(x[x != 6] - 1e-10), + b(x[x != 6] + 1e-10)) + assert_(not np.allclose(b(6.-1e-10), b(6+1e-10))) + + # 1st derivative jumps at double knots, 1 & 6: + x0 = np.asarray([3, 4]) + assert_allclose(b(x0 - 1e-10, nu=1), + b(x0 + 1e-10, nu=1)) + x1 = np.asarray([1, 6]) + assert_(not np.all(np.allclose(b(x1 - 1e-10, nu=1), + b(x1 + 1e-10, nu=1)))) + + # 2nd derivative is not guaranteed to be continuous either + assert_(not np.all(np.allclose(b(x - 1e-10, nu=2), + b(x + 1e-10, nu=2)))) + + def test_basis_element_quadratic(self): + xx = np.linspace(-1, 4, 20) + b = BSpline.basis_element(t=[0, 1, 2, 3]) + assert_allclose(b(xx), + splev(xx, (b.t, b.c, b.k)), atol=1e-14) + assert_allclose(b(xx), + B_0123(xx), atol=1e-14) + + b = BSpline.basis_element(t=[0, 1, 1, 2]) + xx = np.linspace(0, 2, 10) + assert_allclose(b(xx), + np.where(xx < 1, xx*xx, (2.-xx)**2), atol=1e-14) + + def test_basis_element_rndm(self): + b = _make_random_spline() + t, c, k = b.tck + xx = np.linspace(t[k], t[-k-1], 20) + assert_allclose(b(xx), _sum_basis_elements(xx, t, c, k), atol=1e-14) + + def test_cmplx(self): + b = _make_random_spline() + t, c, k = b.tck + cc = c * (1. + 3.j) + + b = BSpline(t, cc, k) + b_re = BSpline(t, b.c.real, k) + b_im = BSpline(t, b.c.imag, k) + + xx = np.linspace(t[k], t[-k-1], 20) + assert_allclose(b(xx).real, b_re(xx), atol=1e-14) + assert_allclose(b(xx).imag, b_im(xx), atol=1e-14) + + def test_nan(self): + # nan in, nan out. + b = BSpline.basis_element([0, 1, 1, 2]) + assert_(np.isnan(b(np.nan))) + + def test_derivative_method(self): + b = _make_random_spline(k=5) + t, c, k = b.tck + b0 = BSpline(t, c, k) + xx = np.linspace(t[k], t[-k-1], 20) + for j in range(1, k): + b = b.derivative() + assert_allclose(b0(xx, j), b(xx), atol=1e-12, rtol=1e-12) + + def test_antiderivative_method(self): + b = _make_random_spline() + t, c, k = b.tck + xx = np.linspace(t[k], t[-k-1], 20) + assert_allclose(b.antiderivative().derivative()(xx), + b(xx), atol=1e-14, rtol=1e-14) + + # repeat with n-D array for c + c = np.c_[c, c, c] + c = np.dstack((c, c)) + b = BSpline(t, c, k) + assert_allclose(b.antiderivative().derivative()(xx), + b(xx), atol=1e-14, rtol=1e-14) + + def test_integral(self): + b = BSpline.basis_element([0, 1, 2]) # x for x < 1 else 2 - x + assert_allclose(b.integrate(0, 1), 0.5) + assert_allclose(b.integrate(1, 0), -1 * 0.5) + assert_allclose(b.integrate(1, 0), -0.5) + + # extrapolate or zeros outside of [0, 2]; default is yes + assert_allclose(b.integrate(-1, 1), 0) + assert_allclose(b.integrate(-1, 1, extrapolate=True), 0) + assert_allclose(b.integrate(-1, 1, extrapolate=False), 0.5) + assert_allclose(b.integrate(1, -1, extrapolate=False), -1 * 0.5) + + # Test ``_fitpack._splint()`` + t, c, k = b.tck + assert_allclose(b.integrate(1, -1, extrapolate=False), + _splint(t, c, k, 1, -1)[0]) + + # Test ``extrapolate='periodic'``. + b.extrapolate = 'periodic' + i = b.antiderivative() + period_int = i(2) - i(0) + + assert_allclose(b.integrate(0, 2), period_int) + assert_allclose(b.integrate(2, 0), -1 * period_int) + assert_allclose(b.integrate(-9, -7), period_int) + assert_allclose(b.integrate(-8, -4), 2 * period_int) + + assert_allclose(b.integrate(0.5, 1.5), i(1.5) - i(0.5)) + assert_allclose(b.integrate(1.5, 3), i(1) - i(0) + i(2) - i(1.5)) + assert_allclose(b.integrate(1.5 + 12, 3 + 12), + i(1) - i(0) + i(2) - i(1.5)) + assert_allclose(b.integrate(1.5, 3 + 12), + i(1) - i(0) + i(2) - i(1.5) + 6 * period_int) + + assert_allclose(b.integrate(0, -1), i(0) - i(1)) + assert_allclose(b.integrate(-9, -10), i(0) - i(1)) + assert_allclose(b.integrate(0, -9), i(1) - i(2) - 4 * period_int) + + def test_integrate_ppoly(self): + # test .integrate method to be consistent with PPoly.integrate + x = [0, 1, 2, 3, 4] + b = make_interp_spline(x, x) + b.extrapolate = 'periodic' + p = PPoly.from_spline(b) + + for x0, x1 in [(-5, 0.5), (0.5, 5), (-4, 13)]: + assert_allclose(b.integrate(x0, x1), + p.integrate(x0, x1)) + + def test_subclassing(self): + # classmethods should not decay to the base class + class B(BSpline): + pass + + b = B.basis_element([0, 1, 2, 2]) + assert_equal(b.__class__, B) + assert_equal(b.derivative().__class__, B) + assert_equal(b.antiderivative().__class__, B) + + def test_axis(self): + n, k = 22, 3 + t = np.linspace(0, 1, n + k + 1) + sh0 = [6, 7, 8] + for axis in range(4): + sh = sh0[:] + sh.insert(axis, n) # [22, 6, 7, 8] etc + c = np.random.random(size=sh) + b = BSpline(t, c, k, axis=axis) + assert_equal(b.c.shape, + [sh[axis],] + sh[:axis] + sh[axis+1:]) + + xp = np.random.random((3, 4, 5)) + assert_equal(b(xp).shape, + sh[:axis] + list(xp.shape) + sh[axis+1:]) + + #0 <= axis < c.ndim + for ax in [-1, c.ndim]: + assert_raises(ValueError, BSpline, **dict(t=t, c=c, k=k, axis=ax)) + + # derivative, antiderivative keeps the axis + for b1 in [BSpline(t, c, k, axis=axis).derivative(), + BSpline(t, c, k, axis=axis).derivative(2), + BSpline(t, c, k, axis=axis).antiderivative(), + BSpline(t, c, k, axis=axis).antiderivative(2)]: + assert_equal(b1.axis, b.axis) + + +def test_knots_multiplicity(): + # Take a spline w/ random coefficients, throw in knots of varying + # multiplicity. + + def check_splev(b, j, der=0, atol=1e-14, rtol=1e-14): + # check evaluations against FITPACK, incl extrapolations + t, c, k = b.tck + x = np.unique(t) + x = np.r_[t[0]-0.1, 0.5*(x[1:] + x[:1]), t[-1]+0.1] + assert_allclose(splev(x, (t, c, k), der), b(x, der), + atol=atol, rtol=rtol, err_msg='der = %s k = %s' % (der, b.k)) + + # test loop itself + # [the index `j` is for interpreting the traceback in case of a failure] + for k in [1, 2, 3, 4, 5]: + b = _make_random_spline(k=k) + for j, b1 in enumerate(_make_multiples(b)): + check_splev(b1, j) + for der in range(1, k+1): + check_splev(b1, j, der, 1e-12, 1e-12) + + +### stolen from @pv, verbatim +def _naive_B(x, k, i, t): + """ + Naive way to compute B-spline basis functions. Useful only for testing! + computes B(x; t[i],..., t[i+k+1]) + """ + if k == 0: + return 1.0 if t[i] <= x < t[i+1] else 0.0 + if t[i+k] == t[i]: + c1 = 0.0 + else: + c1 = (x - t[i])/(t[i+k] - t[i]) * _naive_B(x, k-1, i, t) + if t[i+k+1] == t[i+1]: + c2 = 0.0 + else: + c2 = (t[i+k+1] - x)/(t[i+k+1] - t[i+1]) * _naive_B(x, k-1, i+1, t) + return (c1 + c2) + + +### stolen from @pv, verbatim +def _naive_eval(x, t, c, k): + """ + Naive B-spline evaluation. Useful only for testing! + """ + if x == t[k]: + i = k + else: + i = np.searchsorted(t, x) - 1 + assert t[i] <= x <= t[i+1] + assert i >= k and i < len(t) - k + return sum(c[i-j] * _naive_B(x, k, i-j, t) for j in range(0, k+1)) + + +def _naive_eval_2(x, t, c, k): + """Naive B-spline evaluation, another way.""" + n = len(t) - (k+1) + assert n >= k+1 + assert len(c) >= n + assert t[k] <= x <= t[n] + return sum(c[i] * _naive_B(x, k, i, t) for i in range(n)) + + +def _sum_basis_elements(x, t, c, k): + n = len(t) - (k+1) + assert n >= k+1 + assert len(c) >= n + s = 0. + for i in range(n): + b = BSpline.basis_element(t[i:i+k+2], extrapolate=False)(x) + s += c[i] * np.nan_to_num(b) # zero out out-of-bounds elements + return s + + +def B_012(x): + """ A linear B-spline function B(x | 0, 1, 2).""" + x = np.atleast_1d(x) + return np.piecewise(x, [(x < 0) | (x > 2), + (x >= 0) & (x < 1), + (x >= 1) & (x <= 2)], + [lambda x: 0., lambda x: x, lambda x: 2.-x]) + + +def B_0123(x, der=0): + """A quadratic B-spline function B(x | 0, 1, 2, 3).""" + x = np.atleast_1d(x) + conds = [x < 1, (x > 1) & (x < 2), x > 2] + if der == 0: + funcs = [lambda x: x*x/2., + lambda x: 3./4 - (x-3./2)**2, + lambda x: (3.-x)**2 / 2] + elif der == 2: + funcs = [lambda x: 1., + lambda x: -2., + lambda x: 1.] + else: + raise ValueError('never be here: der=%s' % der) + pieces = np.piecewise(x, conds, funcs) + return pieces + + +def _make_random_spline(n=35, k=3): + np.random.seed(123) + t = np.sort(np.random.random(n+k+1)) + c = np.random.random(n) + return BSpline.construct_fast(t, c, k) + + +def _make_multiples(b): + """Increase knot multiplicity.""" + c, k = b.c, b.k + + t1 = b.t.copy() + t1[17:19] = t1[17] + t1[22] = t1[21] + yield BSpline(t1, c, k) + + t1 = b.t.copy() + t1[:k+1] = t1[0] + yield BSpline(t1, c, k) + + t1 = b.t.copy() + t1[-k-1:] = t1[-1] + yield BSpline(t1, c, k) + + +class TestInterop(object): + # + # Test that FITPACK-based spl* functions can deal with BSpline objects + # + def setup_method(self): + xx = np.linspace(0, 4.*np.pi, 41) + yy = np.cos(xx) + b = make_interp_spline(xx, yy) + self.tck = (b.t, b.c, b.k) + self.xx, self.yy, self.b = xx, yy, b + + self.xnew = np.linspace(0, 4.*np.pi, 21) + + c2 = np.c_[b.c, b.c, b.c] + self.c2 = np.dstack((c2, c2)) + self.b2 = BSpline(b.t, self.c2, b.k) + + def test_splev(self): + xnew, b, b2 = self.xnew, self.b, self.b2 + + # check that splev works with 1D array of coefficients + # for array and scalar `x` + assert_allclose(splev(xnew, b), + b(xnew), atol=1e-15, rtol=1e-15) + assert_allclose(splev(xnew, b.tck), + b(xnew), atol=1e-15, rtol=1e-15) + assert_allclose([splev(x, b) for x in xnew], + b(xnew), atol=1e-15, rtol=1e-15) + + # With n-D coefficients, there's a quirck: + # splev(x, BSpline) is equivalent to BSpline(x) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, + "Calling splev.. with BSpline objects with c.ndim > 1 is not recommended.") + assert_allclose(splev(xnew, b2), b2(xnew), atol=1e-15, rtol=1e-15) + + # However, splev(x, BSpline.tck) needs some transposes. This is because + # BSpline interpolates along the first axis, while the legacy FITPACK + # wrapper does list(map(...)) which effectively interpolates along the + # last axis. Like so: + sh = tuple(range(1, b2.c.ndim)) + (0,) # sh = (1, 2, 0) + cc = b2.c.transpose(sh) + tck = (b2.t, cc, b2.k) + assert_allclose(splev(xnew, tck), + b2(xnew).transpose(sh), atol=1e-15, rtol=1e-15) + + def test_splrep(self): + x, y = self.xx, self.yy + # test that "new" splrep is equivalent to _impl.splrep + tck = splrep(x, y) + t, c, k = _impl.splrep(x, y) + assert_allclose(tck[0], t, atol=1e-15) + assert_allclose(tck[1], c, atol=1e-15) + assert_equal(tck[2], k) + + # also cover the `full_output=True` branch + tck_f, _, _, _ = splrep(x, y, full_output=True) + assert_allclose(tck_f[0], t, atol=1e-15) + assert_allclose(tck_f[1], c, atol=1e-15) + assert_equal(tck_f[2], k) + + # test that the result of splrep roundtrips with splev: + # evaluate the spline on the original `x` points + yy = splev(x, tck) + assert_allclose(y, yy, atol=1e-15) + + # ... and also it roundtrips if wrapped in a BSpline + b = BSpline(*tck) + assert_allclose(y, b(x), atol=1e-15) + + @pytest.mark.xfail(NumpyVersion(np.__version__) < '1.14.0', + reason='requires NumPy >= 1.14.0') + def test_splrep_errors(self): + # test that both "old" and "new" splrep raise for an n-D ``y`` array + # with n > 1 + x, y = self.xx, self.yy + y2 = np.c_[y, y] + with assert_raises(ValueError): + splrep(x, y2) + with assert_raises(ValueError): + _impl.splrep(x, y2) + + # input below minimum size + with assert_raises(TypeError, match="m > k must hold"): + splrep(x[:3], y[:3]) + with assert_raises(TypeError, match="m > k must hold"): + _impl.splrep(x[:3], y[:3]) + + def test_splprep(self): + x = np.arange(15).reshape((3, 5)) + b, u = splprep(x) + tck, u1 = _impl.splprep(x) + + # test the roundtrip with splev for both "old" and "new" output + assert_allclose(u, u1, atol=1e-15) + assert_allclose(splev(u, b), x, atol=1e-15) + assert_allclose(splev(u, tck), x, atol=1e-15) + + # cover the ``full_output=True`` branch + (b_f, u_f), _, _, _ = splprep(x, s=0, full_output=True) + assert_allclose(u, u_f, atol=1e-15) + assert_allclose(splev(u_f, b_f), x, atol=1e-15) + + def test_splprep_errors(self): + # test that both "old" and "new" code paths raise for x.ndim > 2 + x = np.arange(3*4*5).reshape((3, 4, 5)) + with assert_raises(ValueError, match="too many values to unpack"): + splprep(x) + with assert_raises(ValueError, match="too many values to unpack"): + _impl.splprep(x) + + # input below minimum size + x = np.linspace(0, 40, num=3) + with assert_raises(TypeError, match="m > k must hold"): + splprep([x]) + with assert_raises(TypeError, match="m > k must hold"): + _impl.splprep([x]) + + # automatically calculated parameters are non-increasing + # see gh-7589 + x = [-50.49072266, -50.49072266, -54.49072266, -54.49072266] + with assert_raises(ValueError, match="Invalid inputs"): + splprep([x]) + with assert_raises(ValueError, match="Invalid inputs"): + _impl.splprep([x]) + + # given non-increasing parameter values u + x = [1, 3, 2, 4] + u = [0, 0.3, 0.2, 1] + with assert_raises(ValueError, match="Invalid inputs"): + splprep(*[[x], None, u]) + + def test_sproot(self): + b, b2 = self.b, self.b2 + roots = np.array([0.5, 1.5, 2.5, 3.5])*np.pi + # sproot accepts a BSpline obj w/ 1D coef array + assert_allclose(sproot(b), roots, atol=1e-7, rtol=1e-7) + assert_allclose(sproot((b.t, b.c, b.k)), roots, atol=1e-7, rtol=1e-7) + + # ... and deals with trailing dimensions if coef array is n-D + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, + "Calling sproot.. with BSpline objects with c.ndim > 1 is not recommended.") + r = sproot(b2, mest=50) + r = np.asarray(r) + + assert_equal(r.shape, (3, 2, 4)) + assert_allclose(r - roots, 0, atol=1e-12) + + # and legacy behavior is preserved for a tck tuple w/ n-D coef + c2r = b2.c.transpose(1, 2, 0) + rr = np.asarray(sproot((b2.t, c2r, b2.k), mest=50)) + assert_equal(rr.shape, (3, 2, 4)) + assert_allclose(rr - roots, 0, atol=1e-12) + + def test_splint(self): + # test that splint accepts BSpline objects + b, b2 = self.b, self.b2 + assert_allclose(splint(0, 1, b), + splint(0, 1, b.tck), atol=1e-14) + assert_allclose(splint(0, 1, b), + b.integrate(0, 1), atol=1e-14) + + # ... and deals with n-D arrays of coefficients + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, + "Calling splint.. with BSpline objects with c.ndim > 1 is not recommended.") + assert_allclose(splint(0, 1, b2), b2.integrate(0, 1), atol=1e-14) + + # and the legacy behavior is preserved for a tck tuple w/ n-D coef + c2r = b2.c.transpose(1, 2, 0) + integr = np.asarray(splint(0, 1, (b2.t, c2r, b2.k))) + assert_equal(integr.shape, (3, 2)) + assert_allclose(integr, + splint(0, 1, b), atol=1e-14) + + def test_splder(self): + for b in [self.b, self.b2]: + # pad the c array (FITPACK convention) + ct = len(b.t) - len(b.c) + if ct > 0: + b.c = np.r_[b.c, np.zeros((ct,) + b.c.shape[1:])] + + for n in [1, 2, 3]: + bd = splder(b) + tck_d = _impl.splder((b.t, b.c, b.k)) + assert_allclose(bd.t, tck_d[0], atol=1e-15) + assert_allclose(bd.c, tck_d[1], atol=1e-15) + assert_equal(bd.k, tck_d[2]) + assert_(isinstance(bd, BSpline)) + assert_(isinstance(tck_d, tuple)) # back-compat: tck in and out + + def test_splantider(self): + for b in [self.b, self.b2]: + # pad the c array (FITPACK convention) + ct = len(b.t) - len(b.c) + if ct > 0: + b.c = np.r_[b.c, np.zeros((ct,) + b.c.shape[1:])] + + for n in [1, 2, 3]: + bd = splantider(b) + tck_d = _impl.splantider((b.t, b.c, b.k)) + assert_allclose(bd.t, tck_d[0], atol=1e-15) + assert_allclose(bd.c, tck_d[1], atol=1e-15) + assert_equal(bd.k, tck_d[2]) + assert_(isinstance(bd, BSpline)) + assert_(isinstance(tck_d, tuple)) # back-compat: tck in and out + + def test_insert(self): + b, b2, xx = self.b, self.b2, self.xx + + j = b.t.size // 2 + tn = 0.5*(b.t[j] + b.t[j+1]) + + bn, tck_n = insert(tn, b), insert(tn, (b.t, b.c, b.k)) + assert_allclose(splev(xx, bn), + splev(xx, tck_n), atol=1e-15) + assert_(isinstance(bn, BSpline)) + assert_(isinstance(tck_n, tuple)) # back-compat: tck in, tck out + + # for n-D array of coefficients, BSpline.c needs to be transposed + # after that, the results are equivalent. + sh = tuple(range(b2.c.ndim)) + c_ = b2.c.transpose(sh[1:] + (0,)) + tck_n2 = insert(tn, (b2.t, c_, b2.k)) + + bn2 = insert(tn, b2) + + # need a transpose for comparing the results, cf test_splev + assert_allclose(np.asarray(splev(xx, tck_n2)).transpose(2, 0, 1), + bn2(xx), atol=1e-15) + assert_(isinstance(bn2, BSpline)) + assert_(isinstance(tck_n2, tuple)) # back-compat: tck in, tck out + + +class TestInterp(object): + # + # Test basic ways of constructing interpolating splines. + # + xx = np.linspace(0., 2.*np.pi) + yy = np.sin(xx) + + def test_non_int_order(self): + with assert_raises(TypeError): + make_interp_spline(self.xx, self.yy, k=2.5) + + def test_order_0(self): + b = make_interp_spline(self.xx, self.yy, k=0) + assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14) + b = make_interp_spline(self.xx, self.yy, k=0, axis=-1) + assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14) + + def test_linear(self): + b = make_interp_spline(self.xx, self.yy, k=1) + assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14) + b = make_interp_spline(self.xx, self.yy, k=1, axis=-1) + assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14) + + def test_not_a_knot(self): + for k in [3, 5]: + b = make_interp_spline(self.xx, self.yy, k) + assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14) + + def test_quadratic_deriv(self): + der = [(1, 8.)] # order, value: f'(x) = 8. + + # derivative at right-hand edge + b = make_interp_spline(self.xx, self.yy, k=2, bc_type=(None, der)) + assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14) + assert_allclose(b(self.xx[-1], 1), der[0][1], atol=1e-14, rtol=1e-14) + + # derivative at left-hand edge + b = make_interp_spline(self.xx, self.yy, k=2, bc_type=(der, None)) + assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14) + assert_allclose(b(self.xx[0], 1), der[0][1], atol=1e-14, rtol=1e-14) + + def test_cubic_deriv(self): + k = 3 + + # first derivatives at left & right edges: + der_l, der_r = [(1, 3.)], [(1, 4.)] + b = make_interp_spline(self.xx, self.yy, k, bc_type=(der_l, der_r)) + assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14) + assert_allclose([b(self.xx[0], 1), b(self.xx[-1], 1)], + [der_l[0][1], der_r[0][1]], atol=1e-14, rtol=1e-14) + + # 'natural' cubic spline, zero out 2nd derivatives at the boundaries + der_l, der_r = [(2, 0)], [(2, 0)] + b = make_interp_spline(self.xx, self.yy, k, bc_type=(der_l, der_r)) + assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14) + + def test_quintic_derivs(self): + k, n = 5, 7 + x = np.arange(n).astype(np.float_) + y = np.sin(x) + der_l = [(1, -12.), (2, 1)] + der_r = [(1, 8.), (2, 3.)] + b = make_interp_spline(x, y, k=k, bc_type=(der_l, der_r)) + assert_allclose(b(x), y, atol=1e-14, rtol=1e-14) + assert_allclose([b(x[0], 1), b(x[0], 2)], + [val for (nu, val) in der_l]) + assert_allclose([b(x[-1], 1), b(x[-1], 2)], + [val for (nu, val) in der_r]) + + @pytest.mark.xfail(reason='unstable') + def test_cubic_deriv_unstable(self): + # 1st and 2nd derivative at x[0], no derivative information at x[-1] + # The problem is not that it fails [who would use this anyway], + # the problem is that it fails *silently*, and I've no idea + # how to detect this sort of instability. + # In this particular case: it's OK for len(t) < 20, goes haywire + # at larger `len(t)`. + k = 3 + t = _augknt(self.xx, k) + + der_l = [(1, 3.), (2, 4.)] + b = make_interp_spline(self.xx, self.yy, k, t, bc_type=(der_l, None)) + assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14) + + def test_knots_not_data_sites(self): + # Knots need not coincide with the data sites. + # use a quadratic spline, knots are at data averages, + # two additional constraints are zero 2nd derivs at edges + k = 2 + t = np.r_[(self.xx[0],)*(k+1), + (self.xx[1:] + self.xx[:-1]) / 2., + (self.xx[-1],)*(k+1)] + b = make_interp_spline(self.xx, self.yy, k, t, + bc_type=([(2, 0)], [(2, 0)])) + + assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14) + assert_allclose([b(self.xx[0], 2), b(self.xx[-1], 2)], [0., 0.], + atol=1e-14) + + def test_minimum_points_and_deriv(self): + # interpolation of f(x) = x**3 between 0 and 1. f'(x) = 3 * xx**2 and + # f'(0) = 0, f'(1) = 3. + k = 3 + x = [0., 1.] + y = [0., 1.] + b = make_interp_spline(x, y, k, bc_type=([(1, 0.)], [(1, 3.)])) + + xx = np.linspace(0., 1.) + yy = xx**3 + assert_allclose(b(xx), yy, atol=1e-14, rtol=1e-14) + + def test_deriv_spec(self): + # If one of the derivatives is omitted, the spline definition is + # incomplete. + x = y = [1.0, 2, 3, 4, 5, 6] + + with assert_raises(ValueError): + make_interp_spline(x, y, bc_type=([(1, 0.)], None)) + + with assert_raises(ValueError): + make_interp_spline(x, y, bc_type=(1, 0.)) + + with assert_raises(ValueError): + make_interp_spline(x, y, bc_type=[(1, 0.)]) + + with assert_raises(ValueError): + make_interp_spline(x, y, bc_type=42) + + # CubicSpline expects`bc_type=(left_pair, right_pair)`, while + # here we expect `bc_type=(iterable, iterable)`. + l, r = (1, 0.0), (1, 0.0) + with assert_raises(ValueError): + make_interp_spline(x, y, bc_type=(l, r)) + + def test_complex(self): + k = 3 + xx = self.xx + yy = self.yy + 1.j*self.yy + + # first derivatives at left & right edges: + der_l, der_r = [(1, 3.j)], [(1, 4.+2.j)] + b = make_interp_spline(xx, yy, k, bc_type=(der_l, der_r)) + assert_allclose(b(xx), yy, atol=1e-14, rtol=1e-14) + assert_allclose([b(xx[0], 1), b(xx[-1], 1)], + [der_l[0][1], der_r[0][1]], atol=1e-14, rtol=1e-14) + + # also test zero and first order + for k in (0, 1): + b = make_interp_spline(xx, yy, k=k) + assert_allclose(b(xx), yy, atol=1e-14, rtol=1e-14) + + def test_int_xy(self): + x = np.arange(10).astype(np.int_) + y = np.arange(10).astype(np.int_) + + # cython chokes on "buffer type mismatch" (construction) or + # "no matching signature found" (evaluation) + for k in (0, 1, 2, 3): + b = make_interp_spline(x, y, k=k) + b(x) + + def test_sliced_input(self): + # cython code chokes on non C contiguous arrays + xx = np.linspace(-1, 1, 100) + + x = xx[::5] + y = xx[::5] + + for k in (0, 1, 2, 3): + make_interp_spline(x, y, k=k) + + def test_check_finite(self): + # check_finite defaults to True; nans and such trigger a ValueError + x = np.arange(10).astype(float) + y = x**2 + + for z in [np.nan, np.inf, -np.inf]: + y[-1] = z + assert_raises(ValueError, make_interp_spline, x, y) + + @pytest.mark.parametrize('k', [1, 2, 3, 5]) + def test_list_input(self, k): + # regression test for gh-8714: TypeError for x, y being lists and k=2 + x = list(range(10)) + y = [a**2 for a in x] + make_interp_spline(x, y, k=k) + + def test_multiple_rhs(self): + yy = np.c_[np.sin(self.xx), np.cos(self.xx)] + der_l = [(1, [1., 2.])] + der_r = [(1, [3., 4.])] + + b = make_interp_spline(self.xx, yy, k=3, bc_type=(der_l, der_r)) + assert_allclose(b(self.xx), yy, atol=1e-14, rtol=1e-14) + assert_allclose(b(self.xx[0], 1), der_l[0][1], atol=1e-14, rtol=1e-14) + assert_allclose(b(self.xx[-1], 1), der_r[0][1], atol=1e-14, rtol=1e-14) + + def test_shapes(self): + np.random.seed(1234) + k, n = 3, 22 + x = np.sort(np.random.random(size=n)) + y = np.random.random(size=(n, 5, 6, 7)) + + b = make_interp_spline(x, y, k) + assert_equal(b.c.shape, (n, 5, 6, 7)) + + # now throw in some derivatives + d_l = [(1, np.random.random((5, 6, 7)))] + d_r = [(1, np.random.random((5, 6, 7)))] + b = make_interp_spline(x, y, k, bc_type=(d_l, d_r)) + assert_equal(b.c.shape, (n + k - 1, 5, 6, 7)) + + def test_string_aliases(self): + yy = np.sin(self.xx) + + # a single string is duplicated + b1 = make_interp_spline(self.xx, yy, k=3, bc_type='natural') + b2 = make_interp_spline(self.xx, yy, k=3, bc_type=([(2, 0)], [(2, 0)])) + assert_allclose(b1.c, b2.c, atol=1e-15) + + # two strings are handled + b1 = make_interp_spline(self.xx, yy, k=3, + bc_type=('natural', 'clamped')) + b2 = make_interp_spline(self.xx, yy, k=3, + bc_type=([(2, 0)], [(1, 0)])) + assert_allclose(b1.c, b2.c, atol=1e-15) + + # one-sided BCs are OK + b1 = make_interp_spline(self.xx, yy, k=2, bc_type=(None, 'clamped')) + b2 = make_interp_spline(self.xx, yy, k=2, bc_type=(None, [(1, 0.0)])) + assert_allclose(b1.c, b2.c, atol=1e-15) + + # 'not-a-knot' is equivalent to None + b1 = make_interp_spline(self.xx, yy, k=3, bc_type='not-a-knot') + b2 = make_interp_spline(self.xx, yy, k=3, bc_type=None) + assert_allclose(b1.c, b2.c, atol=1e-15) + + # unknown strings do not pass + with assert_raises(ValueError): + make_interp_spline(self.xx, yy, k=3, bc_type='typo') + + # string aliases are handled for 2D values + yy = np.c_[np.sin(self.xx), np.cos(self.xx)] + der_l = [(1, [0., 0.])] + der_r = [(2, [0., 0.])] + b2 = make_interp_spline(self.xx, yy, k=3, bc_type=(der_l, der_r)) + b1 = make_interp_spline(self.xx, yy, k=3, + bc_type=('clamped', 'natural')) + assert_allclose(b1.c, b2.c, atol=1e-15) + + # ... and for n-D values: + np.random.seed(1234) + k, n = 3, 22 + x = np.sort(np.random.random(size=n)) + y = np.random.random(size=(n, 5, 6, 7)) + + # now throw in some derivatives + d_l = [(1, np.zeros((5, 6, 7)))] + d_r = [(1, np.zeros((5, 6, 7)))] + b1 = make_interp_spline(x, y, k, bc_type=(d_l, d_r)) + b2 = make_interp_spline(x, y, k, bc_type='clamped') + assert_allclose(b1.c, b2.c, atol=1e-15) + + def test_full_matrix(self): + np.random.seed(1234) + k, n = 3, 7 + x = np.sort(np.random.random(size=n)) + y = np.random.random(size=n) + t = _not_a_knot(x, k) + + b = make_interp_spline(x, y, k, t) + cf = make_interp_full_matr(x, y, t, k) + assert_allclose(b.c, cf, atol=1e-14, rtol=1e-14) + + +def make_interp_full_matr(x, y, t, k): + """Assemble an spline order k with knots t to interpolate + y(x) using full matrices. + Not-a-knot BC only. + + This routine is here for testing only (even though it's functional). + """ + assert x.size == y.size + assert t.size == x.size + k + 1 + n = x.size + + A = np.zeros((n, n), dtype=np.float_) + + for j in range(n): + xval = x[j] + if xval == t[k]: + left = k + else: + left = np.searchsorted(t, xval) - 1 + + # fill a row + bb = _bspl.evaluate_all_bspl(t, k, xval, left) + A[j, left-k:left+1] = bb + + c = sl.solve(A, y) + return c + + +### XXX: 'periodic' interp spline using full matrices +def make_interp_per_full_matr(x, y, t, k): + x, y, t = map(np.asarray, (x, y, t)) + + n = x.size + nt = t.size - k - 1 + + # have `n` conditions for `nt` coefficients; need nt-n derivatives + assert nt - n == k - 1 + + # LHS: the collocation matrix + derivatives at edges + A = np.zeros((nt, nt), dtype=np.float_) + + # derivatives at x[0]: + offset = 0 + + if x[0] == t[k]: + left = k + else: + left = np.searchsorted(t, x[0]) - 1 + + if x[-1] == t[k]: + left2 = k + else: + left2 = np.searchsorted(t, x[-1]) - 1 + + for i in range(k-1): + bb = _bspl.evaluate_all_bspl(t, k, x[0], left, nu=i+1) + A[i, left-k:left+1] = bb + bb = _bspl.evaluate_all_bspl(t, k, x[-1], left2, nu=i+1) + A[i, left2-k:left2+1] = -bb + offset += 1 + + # RHS + y = np.r_[[0]*(k-1), y] + + # collocation matrix + for j in range(n): + xval = x[j] + # find interval + if xval == t[k]: + left = k + else: + left = np.searchsorted(t, xval) - 1 + + # fill a row + bb = _bspl.evaluate_all_bspl(t, k, xval, left) + A[j + offset, left-k:left+1] = bb + + c = sl.solve(A, y) + return c + + +def make_lsq_full_matrix(x, y, t, k=3): + """Make the least-square spline, full matrices.""" + x, y, t = map(np.asarray, (x, y, t)) + m = x.size + n = t.size - k - 1 + + A = np.zeros((m, n), dtype=np.float_) + + for j in range(m): + xval = x[j] + # find interval + if xval == t[k]: + left = k + else: + left = np.searchsorted(t, xval) - 1 + + # fill a row + bb = _bspl.evaluate_all_bspl(t, k, xval, left) + A[j, left-k:left+1] = bb + + # have observation matrix, can solve the LSQ problem + B = np.dot(A.T, A) + Y = np.dot(A.T, y) + c = sl.solve(B, Y) + + return c, (A, Y) + + +class TestLSQ(object): + # + # Test make_lsq_spline + # + np.random.seed(1234) + n, k = 13, 3 + x = np.sort(np.random.random(n)) + y = np.random.random(n) + t = _augknt(np.linspace(x[0], x[-1], 7), k) + + def test_lstsq(self): + # check LSQ construction vs a full matrix version + x, y, t, k = self.x, self.y, self.t, self.k + + c0, AY = make_lsq_full_matrix(x, y, t, k) + b = make_lsq_spline(x, y, t, k) + + assert_allclose(b.c, c0) + assert_equal(b.c.shape, (t.size - k - 1,)) + + # also check against numpy.lstsq + aa, yy = AY + c1, _, _, _ = np.linalg.lstsq(aa, y, rcond=-1) + assert_allclose(b.c, c1) + + def test_weights(self): + # weights = 1 is same as None + x, y, t, k = self.x, self.y, self.t, self.k + w = np.ones_like(x) + + b = make_lsq_spline(x, y, t, k) + b_w = make_lsq_spline(x, y, t, k, w=w) + + assert_allclose(b.t, b_w.t, atol=1e-14) + assert_allclose(b.c, b_w.c, atol=1e-14) + assert_equal(b.k, b_w.k) + + def test_multiple_rhs(self): + x, t, k, n = self.x, self.t, self.k, self.n + y = np.random.random(size=(n, 5, 6, 7)) + + b = make_lsq_spline(x, y, t, k) + assert_equal(b.c.shape, (t.size-k-1, 5, 6, 7)) + + def test_complex(self): + # cmplx-valued `y` + x, t, k = self.x, self.t, self.k + yc = self.y * (1. + 2.j) + + b = make_lsq_spline(x, yc, t, k) + b_re = make_lsq_spline(x, yc.real, t, k) + b_im = make_lsq_spline(x, yc.imag, t, k) + + assert_allclose(b(x), b_re(x) + 1.j*b_im(x), atol=1e-15, rtol=1e-15) + + def test_int_xy(self): + x = np.arange(10).astype(np.int_) + y = np.arange(10).astype(np.int_) + t = _augknt(x, k=1) + # cython chokes on "buffer type mismatch" + make_lsq_spline(x, y, t, k=1) + + def test_sliced_input(self): + # cython code chokes on non C contiguous arrays + xx = np.linspace(-1, 1, 100) + + x = xx[::3] + y = xx[::3] + t = _augknt(x, 1) + make_lsq_spline(x, y, t, k=1) + + def test_checkfinite(self): + # check_finite defaults to True; nans and such trigger a ValueError + x = np.arange(12).astype(float) + y = x**2 + t = _augknt(x, 3) + + for z in [np.nan, np.inf, -np.inf]: + y[-1] = z + assert_raises(ValueError, make_lsq_spline, x, y, t) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_bsplines.pyc b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_bsplines.pyc new file mode 100644 index 0000000..25c66b5 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_bsplines.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_fitpack.py b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_fitpack.py new file mode 100644 index 0000000..1bb8735 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_fitpack.py @@ -0,0 +1,463 @@ +from __future__ import division, print_function, absolute_import + +import os + +import numpy as np +from numpy.testing import (assert_equal, assert_allclose, assert_, + assert_almost_equal, assert_array_almost_equal) +from pytest import raises as assert_raises + +from numpy import array, asarray, pi, sin, cos, arange, dot, ravel, sqrt, round +from scipy import interpolate +from scipy.interpolate.fitpack import (splrep, splev, bisplrep, bisplev, + sproot, splprep, splint, spalde, splder, splantider, insert, dblint) +from scipy.interpolate.dfitpack import regrid_smth + + +def data_file(basename): + return os.path.join(os.path.abspath(os.path.dirname(__file__)), + 'data', basename) + + +def norm2(x): + return sqrt(dot(x.T,x)) + + +def f1(x,d=0): + if d is None: + return "sin" + if x is None: + return "sin(x)" + if d % 4 == 0: + return sin(x) + if d % 4 == 1: + return cos(x) + if d % 4 == 2: + return -sin(x) + if d % 4 == 3: + return -cos(x) + + +def f2(x,y=0,dx=0,dy=0): + if x is None: + return "sin(x+y)" + d = dx+dy + if d % 4 == 0: + return sin(x+y) + if d % 4 == 1: + return cos(x+y) + if d % 4 == 2: + return -sin(x+y) + if d % 4 == 3: + return -cos(x+y) + + +def makepairs(x, y): + """Helper function to create an array of pairs of x and y.""" + # Or itertools.product (>= python 2.6) + xy = array([[a, b] for a in asarray(x) for b in asarray(y)]) + return xy.T + + +def put(*a): + """Produce some output if file run directly""" + import sys + if hasattr(sys.modules['__main__'], '__put_prints'): + sys.stderr.write("".join(map(str, a)) + "\n") + + +class TestSmokeTests(object): + """ + Smoke tests (with a few asserts) for fitpack routines -- mostly + check that they are runnable + """ + + def check_1(self,f=f1,per=0,s=0,a=0,b=2*pi,N=20,at=0,xb=None,xe=None): + if xb is None: + xb = a + if xe is None: + xe = b + x = a+(b-a)*arange(N+1,dtype=float)/float(N) # nodes + x1 = a+(b-a)*arange(1,N,dtype=float)/float(N-1) # middle points of the nodes + v,v1 = f(x),f(x1) + nk = [] + + def err_est(k, d): + # Assume f has all derivatives < 1 + h = 1.0/float(N) + tol = 5 * h**(.75*(k-d)) + if s > 0: + tol += 1e5*s + return tol + + for k in range(1,6): + tck = splrep(x,v,s=s,per=per,k=k,xe=xe) + if at: + t = tck[0][k:-k] + else: + t = x1 + nd = [] + for d in range(k+1): + tol = err_est(k, d) + err = norm2(f(t,d)-splev(t,tck,d)) / norm2(f(t,d)) + assert_(err < tol, (k, d, err, tol)) + nd.append((err, tol)) + nk.append(nd) + put("\nf = %s s=S_k(x;t,c) x in [%s, %s] > [%s, %s]" % (f(None), + repr(round(xb,3)),repr(round(xe,3)), + repr(round(a,3)),repr(round(b,3)))) + if at: + str = "at knots" + else: + str = "at the middle of nodes" + put(" per=%d s=%s Evaluation %s" % (per,repr(s),str)) + put(" k : |f-s|^2 |f'-s'| |f''-.. |f'''-. |f''''- |f'''''") + k = 1 + for l in nk: + put(' %d : ' % k) + for r in l: + put(' %.1e %.1e' % r) + put('\n') + k = k+1 + + def check_2(self,f=f1,per=0,s=0,a=0,b=2*pi,N=20,xb=None,xe=None, + ia=0,ib=2*pi,dx=0.2*pi): + if xb is None: + xb = a + if xe is None: + xe = b + x = a+(b-a)*arange(N+1,dtype=float)/float(N) # nodes + v = f(x) + + def err_est(k, d): + # Assume f has all derivatives < 1 + h = 1.0/float(N) + tol = 5 * h**(.75*(k-d)) + if s > 0: + tol += 1e5*s + return tol + + nk = [] + for k in range(1,6): + tck = splrep(x,v,s=s,per=per,k=k,xe=xe) + nk.append([splint(ia,ib,tck),spalde(dx,tck)]) + put("\nf = %s s=S_k(x;t,c) x in [%s, %s] > [%s, %s]" % (f(None), + repr(round(xb,3)),repr(round(xe,3)), + repr(round(a,3)),repr(round(b,3)))) + put(" per=%d s=%s N=%d [a, b] = [%s, %s] dx=%s" % (per,repr(s),N,repr(round(ia,3)),repr(round(ib,3)),repr(round(dx,3)))) + put(" k : int(s,[a,b]) Int.Error Rel. error of s^(d)(dx) d = 0, .., k") + k = 1 + for r in nk: + if r[0] < 0: + sr = '-' + else: + sr = ' ' + put(" %d %s%.8f %.1e " % (k,sr,abs(r[0]), + abs(r[0]-(f(ib,-1)-f(ia,-1))))) + d = 0 + for dr in r[1]: + err = abs(1-dr/f(dx,d)) + tol = err_est(k, d) + assert_(err < tol, (k, d)) + put(" %.1e %.1e" % (err, tol)) + d = d+1 + put("\n") + k = k+1 + + def check_3(self,f=f1,per=0,s=0,a=0,b=2*pi,N=20,xb=None,xe=None, + ia=0,ib=2*pi,dx=0.2*pi): + if xb is None: + xb = a + if xe is None: + xe = b + x = a+(b-a)*arange(N+1,dtype=float)/float(N) # nodes + v = f(x) + put(" k : Roots of s(x) approx %s x in [%s,%s]:" % + (f(None),repr(round(a,3)),repr(round(b,3)))) + for k in range(1,6): + tck = splrep(x, v, s=s, per=per, k=k, xe=xe) + if k == 3: + roots = sproot(tck) + assert_allclose(splev(roots, tck), 0, atol=1e-10, rtol=1e-10) + assert_allclose(roots, pi*array([1, 2, 3, 4]), rtol=1e-3) + put(' %d : %s' % (k, repr(roots.tolist()))) + else: + assert_raises(ValueError, sproot, tck) + + def check_4(self,f=f1,per=0,s=0,a=0,b=2*pi,N=20,xb=None,xe=None, + ia=0,ib=2*pi,dx=0.2*pi): + if xb is None: + xb = a + if xe is None: + xe = b + x = a+(b-a)*arange(N+1,dtype=float)/float(N) # nodes + x1 = a + (b-a)*arange(1,N,dtype=float)/float(N-1) # middle points of the nodes + v,v1 = f(x),f(x1) + put(" u = %s N = %d" % (repr(round(dx,3)),N)) + put(" k : [x(u), %s(x(u))] Error of splprep Error of splrep " % (f(0,None))) + for k in range(1,6): + tckp,u = splprep([x,v],s=s,per=per,k=k,nest=-1) + tck = splrep(x,v,s=s,per=per,k=k) + uv = splev(dx,tckp) + err1 = abs(uv[1]-f(uv[0])) + err2 = abs(splev(uv[0],tck)-f(uv[0])) + assert_(err1 < 1e-2) + assert_(err2 < 1e-2) + put(" %d : %s %.1e %.1e" % + (k,repr([round(z,3) for z in uv]), + err1, + err2)) + put("Derivatives of parametric cubic spline at u (first function):") + k = 3 + tckp,u = splprep([x,v],s=s,per=per,k=k,nest=-1) + for d in range(1,k+1): + uv = splev(dx,tckp,d) + put(" %s " % (repr(uv[0]))) + + def check_5(self,f=f2,kx=3,ky=3,xb=0,xe=2*pi,yb=0,ye=2*pi,Nx=20,Ny=20,s=0): + x = xb+(xe-xb)*arange(Nx+1,dtype=float)/float(Nx) + y = yb+(ye-yb)*arange(Ny+1,dtype=float)/float(Ny) + xy = makepairs(x,y) + tck = bisplrep(xy[0],xy[1],f(xy[0],xy[1]),s=s,kx=kx,ky=ky) + tt = [tck[0][kx:-kx],tck[1][ky:-ky]] + t2 = makepairs(tt[0],tt[1]) + v1 = bisplev(tt[0],tt[1],tck) + v2 = f2(t2[0],t2[1]) + v2.shape = len(tt[0]),len(tt[1]) + err = norm2(ravel(v1-v2)) + assert_(err < 1e-2, err) + put(err) + + def test_smoke_splrep_splev(self): + put("***************** splrep/splev") + self.check_1(s=1e-6) + self.check_1() + self.check_1(at=1) + self.check_1(per=1) + self.check_1(per=1,at=1) + self.check_1(b=1.5*pi) + self.check_1(b=1.5*pi,xe=2*pi,per=1,s=1e-1) + + def test_smoke_splint_spalde(self): + put("***************** splint/spalde") + self.check_2() + self.check_2(per=1) + self.check_2(ia=0.2*pi,ib=pi) + self.check_2(ia=0.2*pi,ib=pi,N=50) + + def test_smoke_sproot(self): + put("***************** sproot") + self.check_3(a=0.1,b=15) + + def test_smoke_splprep_splrep_splev(self): + put("***************** splprep/splrep/splev") + self.check_4() + self.check_4(N=50) + + def test_smoke_bisplrep_bisplev(self): + put("***************** bisplev") + self.check_5() + + +class TestSplev(object): + def test_1d_shape(self): + x = [1,2,3,4,5] + y = [4,5,6,7,8] + tck = splrep(x, y) + z = splev([1], tck) + assert_equal(z.shape, (1,)) + z = splev(1, tck) + assert_equal(z.shape, ()) + + def test_2d_shape(self): + x = [1, 2, 3, 4, 5] + y = [4, 5, 6, 7, 8] + tck = splrep(x, y) + t = np.array([[1.0, 1.5, 2.0, 2.5], + [3.0, 3.5, 4.0, 4.5]]) + z = splev(t, tck) + z0 = splev(t[0], tck) + z1 = splev(t[1], tck) + assert_equal(z, np.row_stack((z0, z1))) + + def test_extrapolation_modes(self): + # test extrapolation modes + # * if ext=0, return the extrapolated value. + # * if ext=1, return 0 + # * if ext=2, raise a ValueError + # * if ext=3, return the boundary value. + x = [1,2,3] + y = [0,2,4] + tck = splrep(x, y, k=1) + + rstl = [[-2, 6], [0, 0], None, [0, 4]] + for ext in (0, 1, 3): + assert_array_almost_equal(splev([0, 4], tck, ext=ext), rstl[ext]) + + assert_raises(ValueError, splev, [0, 4], tck, ext=2) + + +class TestSplder(object): + def setup_method(self): + # non-uniform grid, just to make it sure + x = np.linspace(0, 1, 100)**3 + y = np.sin(20 * x) + self.spl = splrep(x, y) + + # double check that knots are non-uniform + assert_(np.diff(self.spl[0]).ptp() > 0) + + def test_inverse(self): + # Check that antiderivative + derivative is identity. + for n in range(5): + spl2 = splantider(self.spl, n) + spl3 = splder(spl2, n) + assert_allclose(self.spl[0], spl3[0]) + assert_allclose(self.spl[1], spl3[1]) + assert_equal(self.spl[2], spl3[2]) + + def test_splder_vs_splev(self): + # Check derivative vs. FITPACK + + for n in range(3+1): + # Also extrapolation! + xx = np.linspace(-1, 2, 2000) + if n == 3: + # ... except that FITPACK extrapolates strangely for + # order 0, so let's not check that. + xx = xx[(xx >= 0) & (xx <= 1)] + + dy = splev(xx, self.spl, n) + spl2 = splder(self.spl, n) + dy2 = splev(xx, spl2) + if n == 1: + assert_allclose(dy, dy2, rtol=2e-6) + else: + assert_allclose(dy, dy2) + + def test_splantider_vs_splint(self): + # Check antiderivative vs. FITPACK + spl2 = splantider(self.spl) + + # no extrapolation, splint assumes function is zero outside + # range + xx = np.linspace(0, 1, 20) + + for x1 in xx: + for x2 in xx: + y1 = splint(x1, x2, self.spl) + y2 = splev(x2, spl2) - splev(x1, spl2) + assert_allclose(y1, y2) + + def test_order0_diff(self): + assert_raises(ValueError, splder, self.spl, 4) + + def test_kink(self): + # Should refuse to differentiate splines with kinks + + spl2 = insert(0.5, self.spl, m=2) + splder(spl2, 2) # Should work + assert_raises(ValueError, splder, spl2, 3) + + spl2 = insert(0.5, self.spl, m=3) + splder(spl2, 1) # Should work + assert_raises(ValueError, splder, spl2, 2) + + spl2 = insert(0.5, self.spl, m=4) + assert_raises(ValueError, splder, spl2, 1) + + def test_multidim(self): + # c can have trailing dims + for n in range(3): + t, c, k = self.spl + c2 = np.c_[c, c, c] + c2 = np.dstack((c2, c2)) + + spl2 = splantider((t, c2, k), n) + spl3 = splder(spl2, n) + + assert_allclose(t, spl3[0]) + assert_allclose(c2, spl3[1]) + assert_equal(k, spl3[2]) + + +class TestBisplrep(object): + def test_overflow(self): + a = np.linspace(0, 1, 620) + b = np.linspace(0, 1, 620) + x, y = np.meshgrid(a, b) + z = np.random.rand(*x.shape) + assert_raises(OverflowError, bisplrep, x.ravel(), y.ravel(), z.ravel(), s=0) + + def test_regression_1310(self): + # Regression test for gh-1310 + data = np.load(data_file('bug-1310.npz'))['data'] + + # Shouldn't crash -- the input data triggers work array sizes + # that caused previously some data to not be aligned on + # sizeof(double) boundaries in memory, which made the Fortran + # code to crash when compiled with -O3 + bisplrep(data[:,0], data[:,1], data[:,2], kx=3, ky=3, s=0, + full_output=True) + + +def test_dblint(): + # Basic test to see it runs and gives the correct result on a trivial + # problem. Note that `dblint` is not exposed in the interpolate namespace. + x = np.linspace(0, 1) + y = np.linspace(0, 1) + xx, yy = np.meshgrid(x, y) + rect = interpolate.RectBivariateSpline(x, y, 4 * xx * yy) + tck = list(rect.tck) + tck.extend(rect.degrees) + + assert_almost_equal(dblint(0, 1, 0, 1, tck), 1) + assert_almost_equal(dblint(0, 0.5, 0, 1, tck), 0.25) + assert_almost_equal(dblint(0.5, 1, 0, 1, tck), 0.75) + assert_almost_equal(dblint(-100, 100, -100, 100, tck), 1) + + +def test_splev_der_k(): + # regression test for gh-2188: splev(x, tck, der=k) gives garbage or crashes + # for x outside of knot range + + # test case from gh-2188 + tck = (np.array([0., 0., 2.5, 2.5]), + np.array([-1.56679978, 2.43995873, 0., 0.]), + 1) + t, c, k = tck + x = np.array([-3, 0, 2.5, 3]) + + # an explicit form of the linear spline + assert_allclose(splev(x, tck), c[0] + (c[1] - c[0]) * x/t[2]) + assert_allclose(splev(x, tck, 1), (c[1]-c[0]) / t[2]) + + # now check a random spline vs splder + np.random.seed(1234) + x = np.sort(np.random.random(30)) + y = np.random.random(30) + t, c, k = splrep(x, y) + + x = [t[0] - 1., t[-1] + 1.] + tck2 = splder((t, c, k), k) + assert_allclose(splev(x, (t, c, k), k), splev(x, tck2)) + + +def test_bisplev_integer_overflow(): + np.random.seed(1) + + x = np.linspace(0, 1, 11) + y = x + z = np.random.randn(11, 11).ravel() + kx = 1 + ky = 1 + + nx, tx, ny, ty, c, fp, ier = regrid_smth( + x, y, z, None, None, None, None, kx=kx, ky=ky, s=0.0) + tck = (tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)], kx, ky) + + xp = np.zeros([2621440]) + yp = np.zeros([2621440]) + + assert_raises((RuntimeError, MemoryError), bisplev, xp, yp, tck) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_fitpack.pyc b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_fitpack.pyc new file mode 100644 index 0000000..ba32701 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_fitpack.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_fitpack2.py b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_fitpack2.py new file mode 100644 index 0000000..39a5e1d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_fitpack2.py @@ -0,0 +1,511 @@ +# Created by Pearu Peterson, June 2003 +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import (assert_equal, assert_almost_equal, assert_array_equal, + assert_array_almost_equal, assert_allclose) +from scipy._lib._numpy_compat import suppress_warnings +from pytest import raises as assert_raises + +from numpy import array, diff, linspace, meshgrid, ones, pi, shape +from scipy.interpolate.fitpack import bisplrep, bisplev +from scipy.interpolate.fitpack2 import (UnivariateSpline, + LSQUnivariateSpline, InterpolatedUnivariateSpline, + LSQBivariateSpline, SmoothBivariateSpline, RectBivariateSpline, + LSQSphereBivariateSpline, SmoothSphereBivariateSpline, + RectSphereBivariateSpline) + + +class TestUnivariateSpline(object): + def test_linear_constant(self): + x = [1,2,3] + y = [3,3,3] + lut = UnivariateSpline(x,y,k=1) + assert_array_almost_equal(lut.get_knots(),[1,3]) + assert_array_almost_equal(lut.get_coeffs(),[3,3]) + assert_almost_equal(lut.get_residual(),0.0) + assert_array_almost_equal(lut([1,1.5,2]),[3,3,3]) + + def test_preserve_shape(self): + x = [1, 2, 3] + y = [0, 2, 4] + lut = UnivariateSpline(x, y, k=1) + arg = 2 + assert_equal(shape(arg), shape(lut(arg))) + assert_equal(shape(arg), shape(lut(arg, nu=1))) + arg = [1.5, 2, 2.5] + assert_equal(shape(arg), shape(lut(arg))) + assert_equal(shape(arg), shape(lut(arg, nu=1))) + + def test_linear_1d(self): + x = [1,2,3] + y = [0,2,4] + lut = UnivariateSpline(x,y,k=1) + assert_array_almost_equal(lut.get_knots(),[1,3]) + assert_array_almost_equal(lut.get_coeffs(),[0,4]) + assert_almost_equal(lut.get_residual(),0.0) + assert_array_almost_equal(lut([1,1.5,2]),[0,1,2]) + + def test_subclassing(self): + # See #731 + + class ZeroSpline(UnivariateSpline): + def __call__(self, x): + return 0*array(x) + + sp = ZeroSpline([1,2,3,4,5], [3,2,3,2,3], k=2) + assert_array_equal(sp([1.5, 2.5]), [0., 0.]) + + def test_empty_input(self): + # Test whether empty input returns an empty output. Ticket 1014 + x = [1,3,5,7,9] + y = [0,4,9,12,21] + spl = UnivariateSpline(x, y, k=3) + assert_array_equal(spl([]), array([])) + + def test_resize_regression(self): + """Regression test for #1375.""" + x = [-1., -0.65016502, -0.58856235, -0.26903553, -0.17370892, + -0.10011001, 0., 0.10011001, 0.17370892, 0.26903553, 0.58856235, + 0.65016502, 1.] + y = [1.,0.62928599, 0.5797223, 0.39965815, 0.36322694, 0.3508061, + 0.35214793, 0.3508061, 0.36322694, 0.39965815, 0.5797223, + 0.62928599, 1.] + w = [1.00000000e+12, 6.88875973e+02, 4.89314737e+02, 4.26864807e+02, + 6.07746770e+02, 4.51341444e+02, 3.17480210e+02, 4.51341444e+02, + 6.07746770e+02, 4.26864807e+02, 4.89314737e+02, 6.88875973e+02, + 1.00000000e+12] + spl = UnivariateSpline(x=x, y=y, w=w, s=None) + desired = array([0.35100374, 0.51715855, 0.87789547, 0.98719344]) + assert_allclose(spl([0.1, 0.5, 0.9, 0.99]), desired, atol=5e-4) + + def test_out_of_range_regression(self): + # Test different extrapolation modes. See ticket 3557 + x = np.arange(5, dtype=float) + y = x**3 + + xp = linspace(-8, 13, 100) + xp_zeros = xp.copy() + xp_zeros[np.logical_or(xp_zeros < 0., xp_zeros > 4.)] = 0 + xp_clip = xp.copy() + xp_clip[xp_clip < x[0]] = x[0] + xp_clip[xp_clip > x[-1]] = x[-1] + + for cls in [UnivariateSpline, InterpolatedUnivariateSpline]: + spl = cls(x=x, y=y) + for ext in [0, 'extrapolate']: + assert_allclose(spl(xp, ext=ext), xp**3, atol=1e-16) + assert_allclose(cls(x, y, ext=ext)(xp), xp**3, atol=1e-16) + for ext in [1, 'zeros']: + assert_allclose(spl(xp, ext=ext), xp_zeros**3, atol=1e-16) + assert_allclose(cls(x, y, ext=ext)(xp), xp_zeros**3, atol=1e-16) + for ext in [2, 'raise']: + assert_raises(ValueError, spl, xp, **dict(ext=ext)) + for ext in [3, 'const']: + assert_allclose(spl(xp, ext=ext), xp_clip**3, atol=1e-16) + assert_allclose(cls(x, y, ext=ext)(xp), xp_clip**3, atol=1e-16) + + # also test LSQUnivariateSpline [which needs explicit knots] + t = spl.get_knots()[3:4] # interior knots w/ default k=3 + spl = LSQUnivariateSpline(x, y, t) + assert_allclose(spl(xp, ext=0), xp**3, atol=1e-16) + assert_allclose(spl(xp, ext=1), xp_zeros**3, atol=1e-16) + assert_raises(ValueError, spl, xp, **dict(ext=2)) + assert_allclose(spl(xp, ext=3), xp_clip**3, atol=1e-16) + + # also make sure that unknown values for `ext` are caught early + for ext in [-1, 'unknown']: + spl = UnivariateSpline(x, y) + assert_raises(ValueError, spl, xp, **dict(ext=ext)) + assert_raises(ValueError, UnivariateSpline, + **dict(x=x, y=y, ext=ext)) + + def test_lsq_fpchec(self): + xs = np.arange(100) * 1. + ys = np.arange(100) * 1. + knots = np.linspace(0, 99, 10) + bbox = (-1, 101) + assert_raises(ValueError, LSQUnivariateSpline, xs, ys, knots, + bbox=bbox) + + def test_derivative_and_antiderivative(self): + # Thin wrappers to splder/splantider, so light smoke test only. + x = np.linspace(0, 1, 70)**3 + y = np.cos(x) + + spl = UnivariateSpline(x, y, s=0) + spl2 = spl.antiderivative(2).derivative(2) + assert_allclose(spl(0.3), spl2(0.3)) + + spl2 = spl.antiderivative(1) + assert_allclose(spl2(0.6) - spl2(0.2), + spl.integral(0.2, 0.6)) + + def test_nan(self): + # bail out early if the input data contains nans + x = np.arange(10, dtype=float) + y = x**3 + w = np.ones_like(x) + # also test LSQUnivariateSpline [which needs explicit knots] + spl = UnivariateSpline(x, y, check_finite=True) + t = spl.get_knots()[3:4] # interior knots w/ default k=3 + y_end = y[-1] + for z in [np.nan, np.inf, -np.inf]: + y[-1] = z + assert_raises(ValueError, UnivariateSpline, + **dict(x=x, y=y, check_finite=True)) + assert_raises(ValueError, InterpolatedUnivariateSpline, + **dict(x=x, y=y, check_finite=True)) + assert_raises(ValueError, LSQUnivariateSpline, + **dict(x=x, y=y, t=t, check_finite=True)) + y[-1] = y_end # check valid y but invalid w + w[-1] = z + assert_raises(ValueError, UnivariateSpline, + **dict(x=x, y=y, w=w, check_finite=True)) + assert_raises(ValueError, InterpolatedUnivariateSpline, + **dict(x=x, y=y, w=w, check_finite=True)) + assert_raises(ValueError, LSQUnivariateSpline, + **dict(x=x, y=y, t=t, w=w, check_finite=True)) + + def test_increasing_x(self): + xx = np.arange(10, dtype=float) + yy = xx**3 + x = np.arange(10, dtype=float) + x[1] = x[0] + y = x**3 + w = np.ones_like(x) + # also test LSQUnivariateSpline [which needs explicit knots] + spl = UnivariateSpline(xx, yy, check_finite=True) + t = spl.get_knots()[3:4] # interior knots w/ default k=3 + assert_raises(ValueError, UnivariateSpline, + **dict(x=x, y=y, check_finite=True)) + assert_raises(ValueError, InterpolatedUnivariateSpline, + **dict(x=x, y=y, check_finite=True)) + assert_raises(ValueError, LSQUnivariateSpline, + **dict(x=x, y=y, t=t, w=w, check_finite=True)) + + +class TestLSQBivariateSpline(object): + # NOTE: The systems in this test class are rank-deficient + def test_linear_constant(self): + x = [1,1,1,2,2,2,3,3,3] + y = [1,2,3,1,2,3,1,2,3] + z = [3,3,3,3,3,3,3,3,3] + s = 0.1 + tx = [1+s,3-s] + ty = [1+s,3-s] + with suppress_warnings() as sup: + r = sup.record(UserWarning, "\nThe coefficients of the spline") + lut = LSQBivariateSpline(x,y,z,tx,ty,kx=1,ky=1) + assert_equal(len(r), 1) + + assert_almost_equal(lut(2,2), 3.) + + def test_bilinearity(self): + x = [1,1,1,2,2,2,3,3,3] + y = [1,2,3,1,2,3,1,2,3] + z = [0,7,8,3,4,7,1,3,4] + s = 0.1 + tx = [1+s,3-s] + ty = [1+s,3-s] + with suppress_warnings() as sup: + # This seems to fail (ier=1, see ticket 1642). + sup.filter(UserWarning, "\nThe coefficients of the spline") + lut = LSQBivariateSpline(x,y,z,tx,ty,kx=1,ky=1) + + tx, ty = lut.get_knots() + for xa, xb in zip(tx[:-1], tx[1:]): + for ya, yb in zip(ty[:-1], ty[1:]): + for t in [0.1, 0.5, 0.9]: + for s in [0.3, 0.4, 0.7]: + xp = xa*(1-t) + xb*t + yp = ya*(1-s) + yb*s + zp = (+ lut(xa, ya)*(1-t)*(1-s) + + lut(xb, ya)*t*(1-s) + + lut(xa, yb)*(1-t)*s + + lut(xb, yb)*t*s) + assert_almost_equal(lut(xp,yp), zp) + + def test_integral(self): + x = [1,1,1,2,2,2,8,8,8] + y = [1,2,3,1,2,3,1,2,3] + z = array([0,7,8,3,4,7,1,3,4]) + + s = 0.1 + tx = [1+s,3-s] + ty = [1+s,3-s] + with suppress_warnings() as sup: + r = sup.record(UserWarning, "\nThe coefficients of the spline") + lut = LSQBivariateSpline(x, y, z, tx, ty, kx=1, ky=1) + assert_equal(len(r), 1) + tx, ty = lut.get_knots() + tz = lut(tx, ty) + trpz = .25*(diff(tx)[:,None]*diff(ty)[None,:] + * (tz[:-1,:-1]+tz[1:,:-1]+tz[:-1,1:]+tz[1:,1:])).sum() + + assert_almost_equal(lut.integral(tx[0], tx[-1], ty[0], ty[-1]), + trpz) + + def test_empty_input(self): + # Test whether empty inputs returns an empty output. Ticket 1014 + x = [1,1,1,2,2,2,3,3,3] + y = [1,2,3,1,2,3,1,2,3] + z = [3,3,3,3,3,3,3,3,3] + s = 0.1 + tx = [1+s,3-s] + ty = [1+s,3-s] + with suppress_warnings() as sup: + r = sup.record(UserWarning, "\nThe coefficients of the spline") + lut = LSQBivariateSpline(x, y, z, tx, ty, kx=1, ky=1) + assert_equal(len(r), 1) + + assert_array_equal(lut([], []), np.zeros((0,0))) + assert_array_equal(lut([], [], grid=False), np.zeros((0,))) + + +class TestSmoothBivariateSpline(object): + def test_linear_constant(self): + x = [1,1,1,2,2,2,3,3,3] + y = [1,2,3,1,2,3,1,2,3] + z = [3,3,3,3,3,3,3,3,3] + lut = SmoothBivariateSpline(x,y,z,kx=1,ky=1) + assert_array_almost_equal(lut.get_knots(),([1,1,3,3],[1,1,3,3])) + assert_array_almost_equal(lut.get_coeffs(),[3,3,3,3]) + assert_almost_equal(lut.get_residual(),0.0) + assert_array_almost_equal(lut([1,1.5,2],[1,1.5]),[[3,3],[3,3],[3,3]]) + + def test_linear_1d(self): + x = [1,1,1,2,2,2,3,3,3] + y = [1,2,3,1,2,3,1,2,3] + z = [0,0,0,2,2,2,4,4,4] + lut = SmoothBivariateSpline(x,y,z,kx=1,ky=1) + assert_array_almost_equal(lut.get_knots(),([1,1,3,3],[1,1,3,3])) + assert_array_almost_equal(lut.get_coeffs(),[0,0,4,4]) + assert_almost_equal(lut.get_residual(),0.0) + assert_array_almost_equal(lut([1,1.5,2],[1,1.5]),[[0,0],[1,1],[2,2]]) + + def test_integral(self): + x = [1,1,1,2,2,2,4,4,4] + y = [1,2,3,1,2,3,1,2,3] + z = array([0,7,8,3,4,7,1,3,4]) + + with suppress_warnings() as sup: + # This seems to fail (ier=1, see ticket 1642). + sup.filter(UserWarning, "\nThe required storage space") + lut = SmoothBivariateSpline(x, y, z, kx=1, ky=1, s=0) + + tx = [1,2,4] + ty = [1,2,3] + + tz = lut(tx, ty) + trpz = .25*(diff(tx)[:,None]*diff(ty)[None,:] + * (tz[:-1,:-1]+tz[1:,:-1]+tz[:-1,1:]+tz[1:,1:])).sum() + assert_almost_equal(lut.integral(tx[0], tx[-1], ty[0], ty[-1]), trpz) + + lut2 = SmoothBivariateSpline(x, y, z, kx=2, ky=2, s=0) + assert_almost_equal(lut2.integral(tx[0], tx[-1], ty[0], ty[-1]), trpz, + decimal=0) # the quadratures give 23.75 and 23.85 + + tz = lut(tx[:-1], ty[:-1]) + trpz = .25*(diff(tx[:-1])[:,None]*diff(ty[:-1])[None,:] + * (tz[:-1,:-1]+tz[1:,:-1]+tz[:-1,1:]+tz[1:,1:])).sum() + assert_almost_equal(lut.integral(tx[0], tx[-2], ty[0], ty[-2]), trpz) + + def test_rerun_lwrk2_too_small(self): + # in this setting, lwrk2 is too small in the default run. Here we + # check for equality with the bisplrep/bisplev output because there, + # an automatic re-run of the spline representation is done if ier>10. + x = np.linspace(-2, 2, 80) + y = np.linspace(-2, 2, 80) + z = x + y + xi = np.linspace(-1, 1, 100) + yi = np.linspace(-2, 2, 100) + tck = bisplrep(x, y, z) + res1 = bisplev(xi, yi, tck) + interp_ = SmoothBivariateSpline(x, y, z) + res2 = interp_(xi, yi) + assert_almost_equal(res1, res2) + + +class TestLSQSphereBivariateSpline(object): + def setup_method(self): + # define the input data and coordinates + ntheta, nphi = 70, 90 + theta = linspace(0.5/(ntheta - 1), 1 - 0.5/(ntheta - 1), ntheta) * pi + phi = linspace(0.5/(nphi - 1), 1 - 0.5/(nphi - 1), nphi) * 2. * pi + data = ones((theta.shape[0], phi.shape[0])) + # define knots and extract data values at the knots + knotst = theta[::5] + knotsp = phi[::5] + knotdata = data[::5, ::5] + # calculate spline coefficients + lats, lons = meshgrid(theta, phi) + lut_lsq = LSQSphereBivariateSpline(lats.ravel(), lons.ravel(), + data.T.ravel(), knotst, knotsp) + self.lut_lsq = lut_lsq + self.data = knotdata + self.new_lons, self.new_lats = knotsp, knotst + + def test_linear_constant(self): + assert_almost_equal(self.lut_lsq.get_residual(), 0.0) + assert_array_almost_equal(self.lut_lsq(self.new_lats, self.new_lons), + self.data) + + def test_empty_input(self): + assert_array_almost_equal(self.lut_lsq([], []), np.zeros((0,0))) + assert_array_almost_equal(self.lut_lsq([], [], grid=False), np.zeros((0,))) + + +class TestSmoothSphereBivariateSpline(object): + def setup_method(self): + theta = array([.25*pi, .25*pi, .25*pi, .5*pi, .5*pi, .5*pi, .75*pi, + .75*pi, .75*pi]) + phi = array([.5 * pi, pi, 1.5 * pi, .5 * pi, pi, 1.5 * pi, .5 * pi, pi, + 1.5 * pi]) + r = array([3, 3, 3, 3, 3, 3, 3, 3, 3]) + self.lut = SmoothSphereBivariateSpline(theta, phi, r, s=1E10) + + def test_linear_constant(self): + assert_almost_equal(self.lut.get_residual(), 0.) + assert_array_almost_equal(self.lut([1, 1.5, 2],[1, 1.5]), + [[3, 3], [3, 3], [3, 3]]) + + def test_empty_input(self): + assert_array_almost_equal(self.lut([], []), np.zeros((0,0))) + assert_array_almost_equal(self.lut([], [], grid=False), np.zeros((0,))) + + +class TestRectBivariateSpline(object): + def test_defaults(self): + x = array([1,2,3,4,5]) + y = array([1,2,3,4,5]) + z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]]) + lut = RectBivariateSpline(x,y,z) + assert_array_almost_equal(lut(x,y),z) + + def test_evaluate(self): + x = array([1,2,3,4,5]) + y = array([1,2,3,4,5]) + z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]]) + lut = RectBivariateSpline(x,y,z) + + xi = [1, 2.3, 5.3, 0.5, 3.3, 1.2, 3] + yi = [1, 3.3, 1.2, 4.0, 5.0, 1.0, 3] + zi = lut.ev(xi, yi) + zi2 = array([lut(xp, yp)[0,0] for xp, yp in zip(xi, yi)]) + + assert_almost_equal(zi, zi2) + + def test_derivatives_grid(self): + x = array([1,2,3,4,5]) + y = array([1,2,3,4,5]) + z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]]) + dx = array([[0,0,-20,0,0],[0,0,13,0,0],[0,0,4,0,0], + [0,0,-11,0,0],[0,0,4,0,0]])/6. + dy = array([[4,-1,0,1,-4],[4,-1,0,1,-4],[0,1.5,0,-1.5,0], + [2,.25,0,-.25,-2],[4,-1,0,1,-4]]) + dxdy = array([[40,-25,0,25,-40],[-26,16.25,0,-16.25,26], + [-8,5,0,-5,8],[22,-13.75,0,13.75,-22],[-8,5,0,-5,8]])/6. + lut = RectBivariateSpline(x,y,z) + assert_array_almost_equal(lut(x,y,dx=1),dx) + assert_array_almost_equal(lut(x,y,dy=1),dy) + assert_array_almost_equal(lut(x,y,dx=1,dy=1),dxdy) + + def test_derivatives(self): + x = array([1,2,3,4,5]) + y = array([1,2,3,4,5]) + z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]]) + dx = array([0,0,2./3,0,0]) + dy = array([4,-1,0,-.25,-4]) + dxdy = array([160,65,0,55,32])/24. + lut = RectBivariateSpline(x,y,z) + assert_array_almost_equal(lut(x,y,dx=1,grid=False),dx) + assert_array_almost_equal(lut(x,y,dy=1,grid=False),dy) + assert_array_almost_equal(lut(x,y,dx=1,dy=1,grid=False),dxdy) + + def test_broadcast(self): + x = array([1,2,3,4,5]) + y = array([1,2,3,4,5]) + z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]]) + lut = RectBivariateSpline(x,y,z) + assert_allclose(lut(x, y), lut(x[:,None], y[None,:], grid=False)) + + +class TestRectSphereBivariateSpline(object): + def test_defaults(self): + y = linspace(0.01, 2*pi-0.01, 7) + x = linspace(0.01, pi-0.01, 7) + z = array([[1,2,1,2,1,2,1],[1,2,1,2,1,2,1],[1,2,3,2,1,2,1], + [1,2,2,2,1,2,1],[1,2,1,2,1,2,1],[1,2,2,2,1,2,1], + [1,2,1,2,1,2,1]]) + lut = RectSphereBivariateSpline(x,y,z) + assert_array_almost_equal(lut(x,y),z) + + def test_evaluate(self): + y = linspace(0.01, 2*pi-0.01, 7) + x = linspace(0.01, pi-0.01, 7) + z = array([[1,2,1,2,1,2,1],[1,2,1,2,1,2,1],[1,2,3,2,1,2,1], + [1,2,2,2,1,2,1],[1,2,1,2,1,2,1],[1,2,2,2,1,2,1], + [1,2,1,2,1,2,1]]) + lut = RectSphereBivariateSpline(x,y,z) + yi = [0.2, 1, 2.3, 2.35, 3.0, 3.99, 5.25] + xi = [1.5, 0.4, 1.1, 0.45, 0.2345, 1., 0.0001] + zi = lut.ev(xi, yi) + zi2 = array([lut(xp, yp)[0,0] for xp, yp in zip(xi, yi)]) + assert_almost_equal(zi, zi2) + + def test_derivatives_grid(self): + y = linspace(0.01, 2*pi-0.01, 7) + x = linspace(0.01, pi-0.01, 7) + z = array([[1,2,1,2,1,2,1],[1,2,1,2,1,2,1],[1,2,3,2,1,2,1], + [1,2,2,2,1,2,1],[1,2,1,2,1,2,1],[1,2,2,2,1,2,1], + [1,2,1,2,1,2,1]]) + + lut = RectSphereBivariateSpline(x,y,z) + + y = linspace(0.02, 2*pi-0.02, 7) + x = linspace(0.02, pi-0.02, 7) + + assert_allclose(lut(x, y, dtheta=1), _numdiff_2d(lut, x, y, dx=1), + rtol=1e-4, atol=1e-4) + assert_allclose(lut(x, y, dphi=1), _numdiff_2d(lut, x, y, dy=1), + rtol=1e-4, atol=1e-4) + assert_allclose(lut(x, y, dtheta=1, dphi=1), _numdiff_2d(lut, x, y, dx=1, dy=1, eps=1e-6), + rtol=1e-3, atol=1e-3) + + def test_derivatives(self): + y = linspace(0.01, 2*pi-0.01, 7) + x = linspace(0.01, pi-0.01, 7) + z = array([[1,2,1,2,1,2,1],[1,2,1,2,1,2,1],[1,2,3,2,1,2,1], + [1,2,2,2,1,2,1],[1,2,1,2,1,2,1],[1,2,2,2,1,2,1], + [1,2,1,2,1,2,1]]) + + lut = RectSphereBivariateSpline(x,y,z) + + y = linspace(0.02, 2*pi-0.02, 7) + x = linspace(0.02, pi-0.02, 7) + + assert_equal(lut(x, y, dtheta=1, grid=False).shape, x.shape) + assert_allclose(lut(x, y, dtheta=1, grid=False), + _numdiff_2d(lambda x,y: lut(x,y,grid=False), x, y, dx=1), + rtol=1e-4, atol=1e-4) + assert_allclose(lut(x, y, dphi=1, grid=False), + _numdiff_2d(lambda x,y: lut(x,y,grid=False), x, y, dy=1), + rtol=1e-4, atol=1e-4) + assert_allclose(lut(x, y, dtheta=1, dphi=1, grid=False), + _numdiff_2d(lambda x,y: lut(x,y,grid=False), x, y, dx=1, dy=1, eps=1e-6), + rtol=1e-3, atol=1e-3) + + +def _numdiff_2d(func, x, y, dx=0, dy=0, eps=1e-8): + if dx == 0 and dy == 0: + return func(x, y) + elif dx == 1 and dy == 0: + return (func(x + eps, y) - func(x - eps, y)) / (2*eps) + elif dx == 0 and dy == 1: + return (func(x, y + eps) - func(x, y - eps)) / (2*eps) + elif dx == 1 and dy == 1: + return (func(x + eps, y + eps) - func(x - eps, y + eps) + - func(x + eps, y - eps) + func(x - eps, y - eps)) / (2*eps)**2 + else: + raise ValueError("invalid derivative order") diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_fitpack2.pyc b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_fitpack2.pyc new file mode 100644 index 0000000..5be24ca Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_fitpack2.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_gil.py b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_gil.py new file mode 100644 index 0000000..c4674d6 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_gil.py @@ -0,0 +1,67 @@ +from __future__ import division, print_function, absolute_import + +import itertools +import threading +import time + +import numpy as np +from numpy.testing import assert_equal +import pytest +import scipy.interpolate + + +class TestGIL(object): + """Check if the GIL is properly released by scipy.interpolate functions.""" + + def setup_method(self): + self.messages = [] + + def log(self, message): + self.messages.append(message) + + def make_worker_thread(self, target, args): + log = self.log + + class WorkerThread(threading.Thread): + def run(self): + log('interpolation started') + target(*args) + log('interpolation complete') + + return WorkerThread() + + @pytest.mark.slow + @pytest.mark.xfail(reason='race conditions, may depend on system load') + def test_rectbivariatespline(self): + def generate_params(n_points): + x = y = np.linspace(0, 1000, n_points) + x_grid, y_grid = np.meshgrid(x, y) + z = x_grid * y_grid + return x, y, z + + def calibrate_delay(requested_time): + for n_points in itertools.count(5000, 1000): + args = generate_params(n_points) + time_started = time.time() + interpolate(*args) + if time.time() - time_started > requested_time: + return args + + def interpolate(x, y, z): + scipy.interpolate.RectBivariateSpline(x, y, z) + + args = calibrate_delay(requested_time=3) + worker_thread = self.make_worker_thread(interpolate, args) + worker_thread.start() + for i in range(3): + time.sleep(0.5) + self.log('working') + worker_thread.join() + assert_equal(self.messages, [ + 'interpolation started', + 'working', + 'working', + 'working', + 'interpolation complete', + ]) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_gil.pyc b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_gil.pyc new file mode 100644 index 0000000..3cc4bad Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_gil.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_interpnd.py b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_interpnd.py new file mode 100644 index 0000000..3ddd8af --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_interpnd.py @@ -0,0 +1,388 @@ +from __future__ import division, print_function, absolute_import + +import os + +import numpy as np +from numpy.testing import assert_equal, assert_allclose, assert_almost_equal +from pytest import raises as assert_raises +import pytest +from scipy._lib._numpy_compat import suppress_warnings + +import scipy.interpolate.interpnd as interpnd +import scipy.spatial.qhull as qhull + +import pickle + + +def data_file(basename): + return os.path.join(os.path.abspath(os.path.dirname(__file__)), + 'data', basename) + + +class TestLinearNDInterpolation(object): + def test_smoketest(self): + # Test at single points + x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)], + dtype=np.double) + y = np.arange(x.shape[0], dtype=np.double) + + yi = interpnd.LinearNDInterpolator(x, y)(x) + assert_almost_equal(y, yi) + + def test_smoketest_alternate(self): + # Test at single points, alternate calling convention + x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)], + dtype=np.double) + y = np.arange(x.shape[0], dtype=np.double) + + yi = interpnd.LinearNDInterpolator((x[:,0], x[:,1]), y)(x[:,0], x[:,1]) + assert_almost_equal(y, yi) + + def test_complex_smoketest(self): + # Test at single points + x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)], + dtype=np.double) + y = np.arange(x.shape[0], dtype=np.double) + y = y - 3j*y + + yi = interpnd.LinearNDInterpolator(x, y)(x) + assert_almost_equal(y, yi) + + def test_tri_input(self): + # Test at single points + x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)], + dtype=np.double) + y = np.arange(x.shape[0], dtype=np.double) + y = y - 3j*y + + tri = qhull.Delaunay(x) + yi = interpnd.LinearNDInterpolator(tri, y)(x) + assert_almost_equal(y, yi) + + def test_square(self): + # Test barycentric interpolation on a square against a manual + # implementation + + points = np.array([(0,0), (0,1), (1,1), (1,0)], dtype=np.double) + values = np.array([1., 2., -3., 5.], dtype=np.double) + + # NB: assume triangles (0, 1, 3) and (1, 2, 3) + # + # 1----2 + # | \ | + # | \ | + # 0----3 + + def ip(x, y): + t1 = (x + y <= 1) + t2 = ~t1 + + x1 = x[t1] + y1 = y[t1] + + x2 = x[t2] + y2 = y[t2] + + z = 0*x + + z[t1] = (values[0]*(1 - x1 - y1) + + values[1]*y1 + + values[3]*x1) + + z[t2] = (values[2]*(x2 + y2 - 1) + + values[1]*(1 - x2) + + values[3]*(1 - y2)) + return z + + xx, yy = np.broadcast_arrays(np.linspace(0, 1, 14)[:,None], + np.linspace(0, 1, 14)[None,:]) + xx = xx.ravel() + yy = yy.ravel() + + xi = np.array([xx, yy]).T.copy() + zi = interpnd.LinearNDInterpolator(points, values)(xi) + + assert_almost_equal(zi, ip(xx, yy)) + + def test_smoketest_rescale(self): + # Test at single points + x = np.array([(0, 0), (-5, -5), (-5, 5), (5, 5), (2.5, 3)], + dtype=np.double) + y = np.arange(x.shape[0], dtype=np.double) + + yi = interpnd.LinearNDInterpolator(x, y, rescale=True)(x) + assert_almost_equal(y, yi) + + def test_square_rescale(self): + # Test barycentric interpolation on a rectangle with rescaling + # agaings the same implementation without rescaling + + points = np.array([(0,0), (0,100), (10,100), (10,0)], dtype=np.double) + values = np.array([1., 2., -3., 5.], dtype=np.double) + + xx, yy = np.broadcast_arrays(np.linspace(0, 10, 14)[:,None], + np.linspace(0, 100, 14)[None,:]) + xx = xx.ravel() + yy = yy.ravel() + xi = np.array([xx, yy]).T.copy() + zi = interpnd.LinearNDInterpolator(points, values)(xi) + zi_rescaled = interpnd.LinearNDInterpolator(points, values, + rescale=True)(xi) + + assert_almost_equal(zi, zi_rescaled) + + def test_tripoints_input_rescale(self): + # Test at single points + x = np.array([(0,0), (-5,-5), (-5,5), (5, 5), (2.5, 3)], + dtype=np.double) + y = np.arange(x.shape[0], dtype=np.double) + y = y - 3j*y + + tri = qhull.Delaunay(x) + yi = interpnd.LinearNDInterpolator(tri.points, y)(x) + yi_rescale = interpnd.LinearNDInterpolator(tri.points, y, + rescale=True)(x) + assert_almost_equal(yi, yi_rescale) + + def test_tri_input_rescale(self): + # Test at single points + x = np.array([(0,0), (-5,-5), (-5,5), (5, 5), (2.5, 3)], + dtype=np.double) + y = np.arange(x.shape[0], dtype=np.double) + y = y - 3j*y + + tri = qhull.Delaunay(x) + match = ("Rescaling is not supported when passing a " + "Delaunay triangulation as ``points``.") + with pytest.raises(ValueError, match=match): + interpnd.LinearNDInterpolator(tri, y, rescale=True)(x) + + def test_pickle(self): + # Test at single points + np.random.seed(1234) + x = np.random.rand(30, 2) + y = np.random.rand(30) + 1j*np.random.rand(30) + + ip = interpnd.LinearNDInterpolator(x, y) + ip2 = pickle.loads(pickle.dumps(ip)) + + assert_almost_equal(ip(0.5, 0.5), ip2(0.5, 0.5)) + + +class TestEstimateGradients2DGlobal(object): + def test_smoketest(self): + x = np.array([(0, 0), (0, 2), + (1, 0), (1, 2), (0.25, 0.75), (0.6, 0.8)], dtype=float) + tri = qhull.Delaunay(x) + + # Should be exact for linear functions, independent of triangulation + + funcs = [ + (lambda x, y: 0*x + 1, (0, 0)), + (lambda x, y: 0 + x, (1, 0)), + (lambda x, y: -2 + y, (0, 1)), + (lambda x, y: 3 + 3*x + 14.15*y, (3, 14.15)) + ] + + for j, (func, grad) in enumerate(funcs): + z = func(x[:,0], x[:,1]) + dz = interpnd.estimate_gradients_2d_global(tri, z, tol=1e-6) + + assert_equal(dz.shape, (6, 2)) + assert_allclose(dz, np.array(grad)[None,:] + 0*dz, + rtol=1e-5, atol=1e-5, err_msg="item %d" % j) + + def test_regression_2359(self): + # Check regression --- for certain point sets, gradient + # estimation could end up in an infinite loop + points = np.load(data_file('estimate_gradients_hang.npy')) + values = np.random.rand(points.shape[0]) + tri = qhull.Delaunay(points) + + # This should not hang + with suppress_warnings() as sup: + sup.filter(interpnd.GradientEstimationWarning, + "Gradient estimation did not converge") + interpnd.estimate_gradients_2d_global(tri, values, maxiter=1) + + +class TestCloughTocher2DInterpolator(object): + + def _check_accuracy(self, func, x=None, tol=1e-6, alternate=False, rescale=False, **kw): + np.random.seed(1234) + if x is None: + x = np.array([(0, 0), (0, 1), + (1, 0), (1, 1), (0.25, 0.75), (0.6, 0.8), + (0.5, 0.2)], + dtype=float) + + if not alternate: + ip = interpnd.CloughTocher2DInterpolator(x, func(x[:,0], x[:,1]), + tol=1e-6, rescale=rescale) + else: + ip = interpnd.CloughTocher2DInterpolator((x[:,0], x[:,1]), + func(x[:,0], x[:,1]), + tol=1e-6, rescale=rescale) + + p = np.random.rand(50, 2) + + if not alternate: + a = ip(p) + else: + a = ip(p[:,0], p[:,1]) + b = func(p[:,0], p[:,1]) + + try: + assert_allclose(a, b, **kw) + except AssertionError: + print(abs(a - b)) + print(ip.grad) + raise + + def test_linear_smoketest(self): + # Should be exact for linear functions, independent of triangulation + funcs = [ + lambda x, y: 0*x + 1, + lambda x, y: 0 + x, + lambda x, y: -2 + y, + lambda x, y: 3 + 3*x + 14.15*y, + ] + + for j, func in enumerate(funcs): + self._check_accuracy(func, tol=1e-13, atol=1e-7, rtol=1e-7, + err_msg="Function %d" % j) + self._check_accuracy(func, tol=1e-13, atol=1e-7, rtol=1e-7, + alternate=True, + err_msg="Function (alternate) %d" % j) + # check rescaling + self._check_accuracy(func, tol=1e-13, atol=1e-7, rtol=1e-7, + err_msg="Function (rescaled) %d" % j, rescale=True) + self._check_accuracy(func, tol=1e-13, atol=1e-7, rtol=1e-7, + alternate=True, rescale=True, + err_msg="Function (alternate, rescaled) %d" % j) + + def test_quadratic_smoketest(self): + # Should be reasonably accurate for quadratic functions + funcs = [ + lambda x, y: x**2, + lambda x, y: y**2, + lambda x, y: x**2 - y**2, + lambda x, y: x*y, + ] + + for j, func in enumerate(funcs): + self._check_accuracy(func, tol=1e-9, atol=0.22, rtol=0, + err_msg="Function %d" % j) + self._check_accuracy(func, tol=1e-9, atol=0.22, rtol=0, + err_msg="Function %d" % j, rescale=True) + + def test_tri_input(self): + # Test at single points + x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)], + dtype=np.double) + y = np.arange(x.shape[0], dtype=np.double) + y = y - 3j*y + + tri = qhull.Delaunay(x) + yi = interpnd.CloughTocher2DInterpolator(tri, y)(x) + assert_almost_equal(y, yi) + + def test_tri_input_rescale(self): + # Test at single points + x = np.array([(0,0), (-5,-5), (-5,5), (5, 5), (2.5, 3)], + dtype=np.double) + y = np.arange(x.shape[0], dtype=np.double) + y = y - 3j*y + + tri = qhull.Delaunay(x) + match = ("Rescaling is not supported when passing a " + "Delaunay triangulation as ``points``.") + with pytest.raises(ValueError, match=match): + interpnd.CloughTocher2DInterpolator(tri, y, rescale=True)(x) + + def test_tripoints_input_rescale(self): + # Test at single points + x = np.array([(0,0), (-5,-5), (-5,5), (5, 5), (2.5, 3)], + dtype=np.double) + y = np.arange(x.shape[0], dtype=np.double) + y = y - 3j*y + + tri = qhull.Delaunay(x) + yi = interpnd.CloughTocher2DInterpolator(tri.points, y)(x) + yi_rescale = interpnd.CloughTocher2DInterpolator(tri.points, y, rescale=True)(x) + assert_almost_equal(yi, yi_rescale) + + def test_dense(self): + # Should be more accurate for dense meshes + funcs = [ + lambda x, y: x**2, + lambda x, y: y**2, + lambda x, y: x**2 - y**2, + lambda x, y: x*y, + lambda x, y: np.cos(2*np.pi*x)*np.sin(2*np.pi*y) + ] + + np.random.seed(4321) # use a different seed than the check! + grid = np.r_[np.array([(0,0), (0,1), (1,0), (1,1)], dtype=float), + np.random.rand(30*30, 2)] + + for j, func in enumerate(funcs): + self._check_accuracy(func, x=grid, tol=1e-9, atol=5e-3, rtol=1e-2, + err_msg="Function %d" % j) + self._check_accuracy(func, x=grid, tol=1e-9, atol=5e-3, rtol=1e-2, + err_msg="Function %d" % j, rescale=True) + + def test_wrong_ndim(self): + x = np.random.randn(30, 3) + y = np.random.randn(30) + assert_raises(ValueError, interpnd.CloughTocher2DInterpolator, x, y) + + def test_pickle(self): + # Test at single points + np.random.seed(1234) + x = np.random.rand(30, 2) + y = np.random.rand(30) + 1j*np.random.rand(30) + + ip = interpnd.CloughTocher2DInterpolator(x, y) + ip2 = pickle.loads(pickle.dumps(ip)) + + assert_almost_equal(ip(0.5, 0.5), ip2(0.5, 0.5)) + + def test_boundary_tri_symmetry(self): + # Interpolation at neighbourless triangles should retain + # symmetry with mirroring the triangle. + + # Equilateral triangle + points = np.array([(0, 0), (1, 0), (0.5, np.sqrt(3)/2)]) + values = np.array([1, 0, 0]) + + ip = interpnd.CloughTocher2DInterpolator(points, values) + + # Set gradient to zero at vertices + ip.grad[...] = 0 + + # Interpolation should be symmetric vs. bisector + alpha = 0.3 + p1 = np.array([0.5 * np.cos(alpha), 0.5 * np.sin(alpha)]) + p2 = np.array([0.5 * np.cos(np.pi/3 - alpha), 0.5 * np.sin(np.pi/3 - alpha)]) + + v1 = ip(p1) + v2 = ip(p2) + assert_allclose(v1, v2) + + # ... and affine invariant + np.random.seed(1) + A = np.random.randn(2, 2) + b = np.random.randn(2) + + points = A.dot(points.T).T + b[None,:] + p1 = A.dot(p1) + b + p2 = A.dot(p2) + b + + ip = interpnd.CloughTocher2DInterpolator(points, values) + ip.grad[...] = 0 + + w1 = ip(p1) + w2 = ip(p2) + assert_allclose(w1, v1) + assert_allclose(w2, v2) diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_interpnd.pyc b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_interpnd.pyc new file mode 100644 index 0000000..4e26f61 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_interpnd.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_interpolate.py b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_interpolate.py new file mode 100644 index 0000000..3423d15 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_interpolate.py @@ -0,0 +1,2769 @@ +from __future__ import division, print_function, absolute_import + +import itertools + +from numpy.testing import (assert_, assert_equal, assert_almost_equal, + assert_array_almost_equal, assert_array_equal, + assert_allclose) +from pytest import raises as assert_raises +import pytest + +from numpy import mgrid, pi, sin, ogrid, poly1d, linspace +import numpy as np + +from scipy._lib.six import xrange +from scipy._lib._numpy_compat import _assert_warns, suppress_warnings + +from scipy.interpolate import (interp1d, interp2d, lagrange, PPoly, BPoly, + splrep, splev, splantider, splint, sproot, Akima1DInterpolator, + RegularGridInterpolator, LinearNDInterpolator, NearestNDInterpolator, + RectBivariateSpline, interpn, NdPPoly, BSpline) + +from scipy.special import poch, gamma + +from scipy.interpolate import _ppoly + +from scipy._lib._gcutils import assert_deallocated, IS_PYPY + +from scipy.integrate import nquad + +from scipy.special import binom + + +class TestInterp2D(object): + def test_interp2d(self): + y, x = mgrid[0:2:20j, 0:pi:21j] + z = sin(x+0.5*y) + I = interp2d(x, y, z) + assert_almost_equal(I(1.0, 2.0), sin(2.0), decimal=2) + + v,u = ogrid[0:2:24j, 0:pi:25j] + assert_almost_equal(I(u.ravel(), v.ravel()), sin(u+0.5*v), decimal=2) + + def test_interp2d_meshgrid_input(self): + # Ticket #703 + x = linspace(0, 2, 16) + y = linspace(0, pi, 21) + z = sin(x[None,:] + y[:,None]/2.) + I = interp2d(x, y, z) + assert_almost_equal(I(1.0, 2.0), sin(2.0), decimal=2) + + def test_interp2d_meshgrid_input_unsorted(self): + np.random.seed(1234) + x = linspace(0, 2, 16) + y = linspace(0, pi, 21) + + z = sin(x[None,:] + y[:,None]/2.) + ip1 = interp2d(x.copy(), y.copy(), z, kind='cubic') + + np.random.shuffle(x) + z = sin(x[None,:] + y[:,None]/2.) + ip2 = interp2d(x.copy(), y.copy(), z, kind='cubic') + + np.random.shuffle(x) + np.random.shuffle(y) + z = sin(x[None,:] + y[:,None]/2.) + ip3 = interp2d(x, y, z, kind='cubic') + + x = linspace(0, 2, 31) + y = linspace(0, pi, 30) + + assert_equal(ip1(x, y), ip2(x, y)) + assert_equal(ip1(x, y), ip3(x, y)) + + def test_interp2d_eval_unsorted(self): + y, x = mgrid[0:2:20j, 0:pi:21j] + z = sin(x + 0.5*y) + func = interp2d(x, y, z) + + xe = np.array([3, 4, 5]) + ye = np.array([5.3, 7.1]) + assert_allclose(func(xe, ye), func(xe, ye[::-1])) + + assert_raises(ValueError, func, xe, ye[::-1], 0, 0, True) + + def test_interp2d_linear(self): + # Ticket #898 + a = np.zeros([5, 5]) + a[2, 2] = 1.0 + x = y = np.arange(5) + b = interp2d(x, y, a, 'linear') + assert_almost_equal(b(2.0, 1.5), np.array([0.5]), decimal=2) + assert_almost_equal(b(2.0, 2.5), np.array([0.5]), decimal=2) + + def test_interp2d_bounds(self): + x = np.linspace(0, 1, 5) + y = np.linspace(0, 2, 7) + z = x[None, :]**2 + y[:, None] + + ix = np.linspace(-1, 3, 31) + iy = np.linspace(-1, 3, 33) + + b = interp2d(x, y, z, bounds_error=True) + assert_raises(ValueError, b, ix, iy) + + b = interp2d(x, y, z, fill_value=np.nan) + iz = b(ix, iy) + mx = (ix < 0) | (ix > 1) + my = (iy < 0) | (iy > 2) + assert_(np.isnan(iz[my,:]).all()) + assert_(np.isnan(iz[:,mx]).all()) + assert_(np.isfinite(iz[~my,:][:,~mx]).all()) + + +class TestInterp1D(object): + + def setup_method(self): + self.x5 = np.arange(5.) + self.x10 = np.arange(10.) + self.y10 = np.arange(10.) + self.x25 = self.x10.reshape((2,5)) + self.x2 = np.arange(2.) + self.y2 = np.arange(2.) + self.x1 = np.array([0.]) + self.y1 = np.array([0.]) + + self.y210 = np.arange(20.).reshape((2, 10)) + self.y102 = np.arange(20.).reshape((10, 2)) + self.y225 = np.arange(20.).reshape((2, 2, 5)) + self.y25 = np.arange(10.).reshape((2, 5)) + self.y235 = np.arange(30.).reshape((2, 3, 5)) + self.y325 = np.arange(30.).reshape((3, 2, 5)) + + self.fill_value = -100.0 + + def test_validation(self): + # Make sure that appropriate exceptions are raised when invalid values + # are given to the constructor. + + # These should all work. + for kind in ('nearest', 'zero', 'linear', 'slinear', 'quadratic', + 'cubic', 'previous', 'next'): + interp1d(self.x10, self.y10, kind=kind) + interp1d(self.x10, self.y10, kind=kind, fill_value="extrapolate") + interp1d(self.x10, self.y10, kind='linear', fill_value=(-1, 1)) + interp1d(self.x10, self.y10, kind='linear', + fill_value=np.array([-1])) + interp1d(self.x10, self.y10, kind='linear', + fill_value=(-1,)) + interp1d(self.x10, self.y10, kind='linear', + fill_value=-1) + interp1d(self.x10, self.y10, kind='linear', + fill_value=(-1, -1)) + interp1d(self.x10, self.y10, kind=0) + interp1d(self.x10, self.y10, kind=1) + interp1d(self.x10, self.y10, kind=2) + interp1d(self.x10, self.y10, kind=3) + interp1d(self.x10, self.y210, kind='linear', axis=-1, + fill_value=(-1, -1)) + interp1d(self.x2, self.y210, kind='linear', axis=0, + fill_value=np.ones(10)) + interp1d(self.x2, self.y210, kind='linear', axis=0, + fill_value=(np.ones(10), np.ones(10))) + interp1d(self.x2, self.y210, kind='linear', axis=0, + fill_value=(np.ones(10), -1)) + + # x array must be 1D. + assert_raises(ValueError, interp1d, self.x25, self.y10) + + # y array cannot be a scalar. + assert_raises(ValueError, interp1d, self.x10, np.array(0)) + + # Check for x and y arrays having the same length. + assert_raises(ValueError, interp1d, self.x10, self.y2) + assert_raises(ValueError, interp1d, self.x2, self.y10) + assert_raises(ValueError, interp1d, self.x10, self.y102) + interp1d(self.x10, self.y210) + interp1d(self.x10, self.y102, axis=0) + + # Check for x and y having at least 1 element. + assert_raises(ValueError, interp1d, self.x1, self.y10) + assert_raises(ValueError, interp1d, self.x10, self.y1) + assert_raises(ValueError, interp1d, self.x1, self.y1) + + # Bad fill values + assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear', + fill_value=(-1, -1, -1)) # doesn't broadcast + assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear', + fill_value=[-1, -1, -1]) # doesn't broadcast + assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear', + fill_value=np.array((-1, -1, -1))) # doesn't broadcast + assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear', + fill_value=[[-1]]) # doesn't broadcast + assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear', + fill_value=[-1, -1]) # doesn't broadcast + assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear', + fill_value=np.array([])) # doesn't broadcast + assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear', + fill_value=()) # doesn't broadcast + assert_raises(ValueError, interp1d, self.x2, self.y210, kind='linear', + axis=0, fill_value=[-1, -1]) # doesn't broadcast + assert_raises(ValueError, interp1d, self.x2, self.y210, kind='linear', + axis=0, fill_value=(0., [-1, -1])) # above doesn't bc + + def test_init(self): + # Check that the attributes are initialized appropriately by the + # constructor. + assert_(interp1d(self.x10, self.y10).copy) + assert_(not interp1d(self.x10, self.y10, copy=False).copy) + assert_(interp1d(self.x10, self.y10).bounds_error) + assert_(not interp1d(self.x10, self.y10, bounds_error=False).bounds_error) + assert_(np.isnan(interp1d(self.x10, self.y10).fill_value)) + assert_equal(interp1d(self.x10, self.y10, fill_value=3.0).fill_value, + 3.0) + assert_equal(interp1d(self.x10, self.y10, fill_value=(1.0, 2.0)).fill_value, + (1.0, 2.0)) + assert_equal(interp1d(self.x10, self.y10).axis, 0) + assert_equal(interp1d(self.x10, self.y210).axis, 1) + assert_equal(interp1d(self.x10, self.y102, axis=0).axis, 0) + assert_array_equal(interp1d(self.x10, self.y10).x, self.x10) + assert_array_equal(interp1d(self.x10, self.y10).y, self.y10) + assert_array_equal(interp1d(self.x10, self.y210).y, self.y210) + + def test_assume_sorted(self): + # Check for unsorted arrays + interp10 = interp1d(self.x10, self.y10) + interp10_unsorted = interp1d(self.x10[::-1], self.y10[::-1]) + + assert_array_almost_equal(interp10_unsorted(self.x10), self.y10) + assert_array_almost_equal(interp10_unsorted(1.2), np.array([1.2])) + assert_array_almost_equal(interp10_unsorted([2.4, 5.6, 6.0]), + interp10([2.4, 5.6, 6.0])) + + # Check assume_sorted keyword (defaults to False) + interp10_assume_kw = interp1d(self.x10[::-1], self.y10[::-1], + assume_sorted=False) + assert_array_almost_equal(interp10_assume_kw(self.x10), self.y10) + + interp10_assume_kw2 = interp1d(self.x10[::-1], self.y10[::-1], + assume_sorted=True) + # Should raise an error for unsorted input if assume_sorted=True + assert_raises(ValueError, interp10_assume_kw2, self.x10) + + # Check that if y is a 2-D array, things are still consistent + interp10_y_2d = interp1d(self.x10, self.y210) + interp10_y_2d_unsorted = interp1d(self.x10[::-1], self.y210[:, ::-1]) + assert_array_almost_equal(interp10_y_2d(self.x10), + interp10_y_2d_unsorted(self.x10)) + + def test_linear(self): + for kind in ['linear', 'slinear']: + self._check_linear(kind) + + def _check_linear(self, kind): + # Check the actual implementation of linear interpolation. + interp10 = interp1d(self.x10, self.y10, kind=kind) + assert_array_almost_equal(interp10(self.x10), self.y10) + assert_array_almost_equal(interp10(1.2), np.array([1.2])) + assert_array_almost_equal(interp10([2.4, 5.6, 6.0]), + np.array([2.4, 5.6, 6.0])) + + # test fill_value="extrapolate" + extrapolator = interp1d(self.x10, self.y10, kind=kind, + fill_value='extrapolate') + assert_allclose(extrapolator([-1., 0, 9, 11]), + [-1, 0, 9, 11], rtol=1e-14) + + opts = dict(kind=kind, + fill_value='extrapolate', + bounds_error=True) + assert_raises(ValueError, interp1d, self.x10, self.y10, **opts) + + def test_linear_dtypes(self): + # regression test for gh-5898, where 1D linear interpolation has been + # delegated to numpy.interp for all float dtypes, and the latter was + # not handling e.g. np.float128. + for dtyp in np.sctypes["float"]: + x = np.arange(8, dtype=dtyp) + y = x + yp = interp1d(x, y, kind='linear')(x) + assert_equal(yp.dtype, dtyp) + assert_allclose(yp, y, atol=1e-15) + + def test_slinear_dtypes(self): + # regression test for gh-7273: 1D slinear interpolation fails with + # float32 inputs + dt_r = [np.float16, np.float32, np.float64] + dt_rc = dt_r + [np.complex64, np.complex128] + spline_kinds = ['slinear', 'zero', 'quadratic', 'cubic'] + for dtx in dt_r: + x = np.arange(0, 10, dtype=dtx) + for dty in dt_rc: + y = np.exp(-x/3.0).astype(dty) + for dtn in dt_r: + xnew = x.astype(dtn) + for kind in spline_kinds: + f = interp1d(x, y, kind=kind, bounds_error=False) + assert_allclose(f(xnew), y, atol=1e-7, + err_msg="%s, %s %s" % (dtx, dty, dtn)) + + def test_cubic(self): + # Check the actual implementation of spline interpolation. + interp10 = interp1d(self.x10, self.y10, kind='cubic') + assert_array_almost_equal(interp10(self.x10), self.y10) + assert_array_almost_equal(interp10(1.2), np.array([1.2])) + assert_array_almost_equal(interp10([2.4, 5.6, 6.0]), + np.array([2.4, 5.6, 6.0]),) + + def test_nearest(self): + # Check the actual implementation of nearest-neighbour interpolation. + interp10 = interp1d(self.x10, self.y10, kind='nearest') + assert_array_almost_equal(interp10(self.x10), self.y10) + assert_array_almost_equal(interp10(1.2), np.array(1.)) + assert_array_almost_equal(interp10([2.4, 5.6, 6.0]), + np.array([2., 6., 6.]),) + + # test fill_value="extrapolate" + extrapolator = interp1d(self.x10, self.y10, kind='nearest', + fill_value='extrapolate') + assert_allclose(extrapolator([-1., 0, 9, 11]), + [0, 0, 9, 9], rtol=1e-14) + + opts = dict(kind='nearest', + fill_value='extrapolate', + bounds_error=True) + assert_raises(ValueError, interp1d, self.x10, self.y10, **opts) + + def test_previous(self): + # Check the actual implementation of previous interpolation. + interp10 = interp1d(self.x10, self.y10, kind='previous') + assert_array_almost_equal(interp10(self.x10), self.y10) + assert_array_almost_equal(interp10(1.2), np.array(1.)) + assert_array_almost_equal(interp10([2.4, 5.6, 6.0]), + np.array([2., 5., 6.]),) + + # test fill_value="extrapolate" + extrapolator = interp1d(self.x10, self.y10, kind='previous', + fill_value='extrapolate') + assert_allclose(extrapolator([-1., 0, 9, 11]), + [0, 0, 9, 9], rtol=1e-14) + + opts = dict(kind='previous', + fill_value='extrapolate', + bounds_error=True) + assert_raises(ValueError, interp1d, self.x10, self.y10, **opts) + + def test_next(self): + # Check the actual implementation of next interpolation. + interp10 = interp1d(self.x10, self.y10, kind='next') + assert_array_almost_equal(interp10(self.x10), self.y10) + assert_array_almost_equal(interp10(1.2), np.array(2.)) + assert_array_almost_equal(interp10([2.4, 5.6, 6.0]), + np.array([3., 6., 6.]),) + + # test fill_value="extrapolate" + extrapolator = interp1d(self.x10, self.y10, kind='next', + fill_value='extrapolate') + assert_allclose(extrapolator([-1., 0, 9, 11]), + [0, 0, 9, 9], rtol=1e-14) + + opts = dict(kind='next', + fill_value='extrapolate', + bounds_error=True) + assert_raises(ValueError, interp1d, self.x10, self.y10, **opts) + + def test_zero(self): + # Check the actual implementation of zero-order spline interpolation. + interp10 = interp1d(self.x10, self.y10, kind='zero') + assert_array_almost_equal(interp10(self.x10), self.y10) + assert_array_almost_equal(interp10(1.2), np.array(1.)) + assert_array_almost_equal(interp10([2.4, 5.6, 6.0]), + np.array([2., 5., 6.])) + + def _bounds_check(self, kind='linear'): + # Test that our handling of out-of-bounds input is correct. + extrap10 = interp1d(self.x10, self.y10, fill_value=self.fill_value, + bounds_error=False, kind=kind) + + assert_array_equal(extrap10(11.2), np.array(self.fill_value)) + assert_array_equal(extrap10(-3.4), np.array(self.fill_value)) + assert_array_equal(extrap10([[[11.2], [-3.4], [12.6], [19.3]]]), + np.array(self.fill_value),) + assert_array_equal(extrap10._check_bounds( + np.array([-1.0, 0.0, 5.0, 9.0, 11.0])), + np.array([[True, False, False, False, False], + [False, False, False, False, True]])) + + raises_bounds_error = interp1d(self.x10, self.y10, bounds_error=True, + kind=kind) + assert_raises(ValueError, raises_bounds_error, -1.0) + assert_raises(ValueError, raises_bounds_error, 11.0) + raises_bounds_error([0.0, 5.0, 9.0]) + + def _bounds_check_int_nan_fill(self, kind='linear'): + x = np.arange(10).astype(np.int_) + y = np.arange(10).astype(np.int_) + c = interp1d(x, y, kind=kind, fill_value=np.nan, bounds_error=False) + yi = c(x - 1) + assert_(np.isnan(yi[0])) + assert_array_almost_equal(yi, np.r_[np.nan, y[:-1]]) + + def test_bounds(self): + for kind in ('linear', 'cubic', 'nearest', 'previous', 'next', + 'slinear', 'zero', 'quadratic'): + self._bounds_check(kind) + self._bounds_check_int_nan_fill(kind) + + def _check_fill_value(self, kind): + interp = interp1d(self.x10, self.y10, kind=kind, + fill_value=(-100, 100), bounds_error=False) + assert_array_almost_equal(interp(10), 100) + assert_array_almost_equal(interp(-10), -100) + assert_array_almost_equal(interp([-10, 10]), [-100, 100]) + + # Proper broadcasting: + # interp along axis of length 5 + # other dim=(2, 3), (3, 2), (2, 2), or (2,) + + # one singleton fill_value (works for all) + for y in (self.y235, self.y325, self.y225, self.y25): + interp = interp1d(self.x5, y, kind=kind, axis=-1, + fill_value=100, bounds_error=False) + assert_array_almost_equal(interp(10), 100) + assert_array_almost_equal(interp(-10), 100) + assert_array_almost_equal(interp([-10, 10]), 100) + + # singleton lower, singleton upper + interp = interp1d(self.x5, y, kind=kind, axis=-1, + fill_value=(-100, 100), bounds_error=False) + assert_array_almost_equal(interp(10), 100) + assert_array_almost_equal(interp(-10), -100) + if y.ndim == 3: + result = [[[-100, 100]] * y.shape[1]] * y.shape[0] + else: + result = [[-100, 100]] * y.shape[0] + assert_array_almost_equal(interp([-10, 10]), result) + + # one broadcastable (3,) fill_value + fill_value = [100, 200, 300] + for y in (self.y325, self.y225): + assert_raises(ValueError, interp1d, self.x5, y, kind=kind, + axis=-1, fill_value=fill_value, bounds_error=False) + interp = interp1d(self.x5, self.y235, kind=kind, axis=-1, + fill_value=fill_value, bounds_error=False) + assert_array_almost_equal(interp(10), [[100, 200, 300]] * 2) + assert_array_almost_equal(interp(-10), [[100, 200, 300]] * 2) + assert_array_almost_equal(interp([-10, 10]), [[[100, 100], + [200, 200], + [300, 300]]] * 2) + + # one broadcastable (2,) fill_value + fill_value = [100, 200] + assert_raises(ValueError, interp1d, self.x5, self.y235, kind=kind, + axis=-1, fill_value=fill_value, bounds_error=False) + for y in (self.y225, self.y325, self.y25): + interp = interp1d(self.x5, y, kind=kind, axis=-1, + fill_value=fill_value, bounds_error=False) + result = [100, 200] + if y.ndim == 3: + result = [result] * y.shape[0] + assert_array_almost_equal(interp(10), result) + assert_array_almost_equal(interp(-10), result) + result = [[100, 100], [200, 200]] + if y.ndim == 3: + result = [result] * y.shape[0] + assert_array_almost_equal(interp([-10, 10]), result) + + # broadcastable (3,) lower, singleton upper + fill_value = (np.array([-100, -200, -300]), 100) + for y in (self.y325, self.y225): + assert_raises(ValueError, interp1d, self.x5, y, kind=kind, + axis=-1, fill_value=fill_value, bounds_error=False) + interp = interp1d(self.x5, self.y235, kind=kind, axis=-1, + fill_value=fill_value, bounds_error=False) + assert_array_almost_equal(interp(10), 100) + assert_array_almost_equal(interp(-10), [[-100, -200, -300]] * 2) + assert_array_almost_equal(interp([-10, 10]), [[[-100, 100], + [-200, 100], + [-300, 100]]] * 2) + + # broadcastable (2,) lower, singleton upper + fill_value = (np.array([-100, -200]), 100) + assert_raises(ValueError, interp1d, self.x5, self.y235, kind=kind, + axis=-1, fill_value=fill_value, bounds_error=False) + for y in (self.y225, self.y325, self.y25): + interp = interp1d(self.x5, y, kind=kind, axis=-1, + fill_value=fill_value, bounds_error=False) + assert_array_almost_equal(interp(10), 100) + result = [-100, -200] + if y.ndim == 3: + result = [result] * y.shape[0] + assert_array_almost_equal(interp(-10), result) + result = [[-100, 100], [-200, 100]] + if y.ndim == 3: + result = [result] * y.shape[0] + assert_array_almost_equal(interp([-10, 10]), result) + + # broadcastable (3,) lower, broadcastable (3,) upper + fill_value = ([-100, -200, -300], [100, 200, 300]) + for y in (self.y325, self.y225): + assert_raises(ValueError, interp1d, self.x5, y, kind=kind, + axis=-1, fill_value=fill_value, bounds_error=False) + for ii in range(2): # check ndarray as well as list here + if ii == 1: + fill_value = tuple(np.array(f) for f in fill_value) + interp = interp1d(self.x5, self.y235, kind=kind, axis=-1, + fill_value=fill_value, bounds_error=False) + assert_array_almost_equal(interp(10), [[100, 200, 300]] * 2) + assert_array_almost_equal(interp(-10), [[-100, -200, -300]] * 2) + assert_array_almost_equal(interp([-10, 10]), [[[-100, 100], + [-200, 200], + [-300, 300]]] * 2) + # broadcastable (2,) lower, broadcastable (2,) upper + fill_value = ([-100, -200], [100, 200]) + assert_raises(ValueError, interp1d, self.x5, self.y235, kind=kind, + axis=-1, fill_value=fill_value, bounds_error=False) + for y in (self.y325, self.y225, self.y25): + interp = interp1d(self.x5, y, kind=kind, axis=-1, + fill_value=fill_value, bounds_error=False) + result = [100, 200] + if y.ndim == 3: + result = [result] * y.shape[0] + assert_array_almost_equal(interp(10), result) + result = [-100, -200] + if y.ndim == 3: + result = [result] * y.shape[0] + assert_array_almost_equal(interp(-10), result) + result = [[-100, 100], [-200, 200]] + if y.ndim == 3: + result = [result] * y.shape[0] + assert_array_almost_equal(interp([-10, 10]), result) + + # one broadcastable (2, 2) array-like + fill_value = [[100, 200], [1000, 2000]] + for y in (self.y235, self.y325, self.y25): + assert_raises(ValueError, interp1d, self.x5, y, kind=kind, + axis=-1, fill_value=fill_value, bounds_error=False) + for ii in range(2): + if ii == 1: + fill_value = np.array(fill_value) + interp = interp1d(self.x5, self.y225, kind=kind, axis=-1, + fill_value=fill_value, bounds_error=False) + assert_array_almost_equal(interp(10), [[100, 200], [1000, 2000]]) + assert_array_almost_equal(interp(-10), [[100, 200], [1000, 2000]]) + assert_array_almost_equal(interp([-10, 10]), [[[100, 100], + [200, 200]], + [[1000, 1000], + [2000, 2000]]]) + + # broadcastable (2, 2) lower, broadcastable (2, 2) upper + fill_value = ([[-100, -200], [-1000, -2000]], + [[100, 200], [1000, 2000]]) + for y in (self.y235, self.y325, self.y25): + assert_raises(ValueError, interp1d, self.x5, y, kind=kind, + axis=-1, fill_value=fill_value, bounds_error=False) + for ii in range(2): + if ii == 1: + fill_value = (np.array(fill_value[0]), np.array(fill_value[1])) + interp = interp1d(self.x5, self.y225, kind=kind, axis=-1, + fill_value=fill_value, bounds_error=False) + assert_array_almost_equal(interp(10), [[100, 200], [1000, 2000]]) + assert_array_almost_equal(interp(-10), [[-100, -200], + [-1000, -2000]]) + assert_array_almost_equal(interp([-10, 10]), [[[-100, 100], + [-200, 200]], + [[-1000, 1000], + [-2000, 2000]]]) + + def test_fill_value(self): + # test that two-element fill value works + for kind in ('linear', 'nearest', 'cubic', 'slinear', 'quadratic', + 'zero', 'previous', 'next'): + self._check_fill_value(kind) + + def test_fill_value_writeable(self): + # backwards compat: fill_value is a public writeable attribute + interp = interp1d(self.x10, self.y10, fill_value=123.0) + assert_equal(interp.fill_value, 123.0) + interp.fill_value = 321.0 + assert_equal(interp.fill_value, 321.0) + + def _nd_check_interp(self, kind='linear'): + # Check the behavior when the inputs and outputs are multidimensional. + + # Multidimensional input. + interp10 = interp1d(self.x10, self.y10, kind=kind) + assert_array_almost_equal(interp10(np.array([[3., 5.], [2., 7.]])), + np.array([[3., 5.], [2., 7.]])) + + # Scalar input -> 0-dim scalar array output + assert_(isinstance(interp10(1.2), np.ndarray)) + assert_equal(interp10(1.2).shape, ()) + + # Multidimensional outputs. + interp210 = interp1d(self.x10, self.y210, kind=kind) + assert_array_almost_equal(interp210(1.), np.array([1., 11.])) + assert_array_almost_equal(interp210(np.array([1., 2.])), + np.array([[1., 2.], [11., 12.]])) + + interp102 = interp1d(self.x10, self.y102, axis=0, kind=kind) + assert_array_almost_equal(interp102(1.), np.array([2.0, 3.0])) + assert_array_almost_equal(interp102(np.array([1., 3.])), + np.array([[2., 3.], [6., 7.]])) + + # Both at the same time! + x_new = np.array([[3., 5.], [2., 7.]]) + assert_array_almost_equal(interp210(x_new), + np.array([[[3., 5.], [2., 7.]], + [[13., 15.], [12., 17.]]])) + assert_array_almost_equal(interp102(x_new), + np.array([[[6., 7.], [10., 11.]], + [[4., 5.], [14., 15.]]])) + + def _nd_check_shape(self, kind='linear'): + # Check large ndim output shape + a = [4, 5, 6, 7] + y = np.arange(np.prod(a)).reshape(*a) + for n, s in enumerate(a): + x = np.arange(s) + z = interp1d(x, y, axis=n, kind=kind) + assert_array_almost_equal(z(x), y, err_msg=kind) + + x2 = np.arange(2*3*1).reshape((2,3,1)) / 12. + b = list(a) + b[n:n+1] = [2,3,1] + assert_array_almost_equal(z(x2).shape, b, err_msg=kind) + + def test_nd(self): + for kind in ('linear', 'cubic', 'slinear', 'quadratic', 'nearest', + 'zero', 'previous', 'next'): + self._nd_check_interp(kind) + self._nd_check_shape(kind) + + def _check_complex(self, dtype=np.complex_, kind='linear'): + x = np.array([1, 2.5, 3, 3.1, 4, 6.4, 7.9, 8.0, 9.5, 10]) + y = x * x ** (1 + 2j) + y = y.astype(dtype) + + # simple test + c = interp1d(x, y, kind=kind) + assert_array_almost_equal(y[:-1], c(x)[:-1]) + + # check against interpolating real+imag separately + xi = np.linspace(1, 10, 31) + cr = interp1d(x, y.real, kind=kind) + ci = interp1d(x, y.imag, kind=kind) + assert_array_almost_equal(c(xi).real, cr(xi)) + assert_array_almost_equal(c(xi).imag, ci(xi)) + + def test_complex(self): + for kind in ('linear', 'nearest', 'cubic', 'slinear', 'quadratic', + 'zero', 'previous', 'next'): + self._check_complex(np.complex64, kind) + self._check_complex(np.complex128, kind) + + @pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy") + def test_circular_refs(self): + # Test interp1d can be automatically garbage collected + x = np.linspace(0, 1) + y = np.linspace(0, 1) + # Confirm interp can be released from memory after use + with assert_deallocated(interp1d, x, y) as interp: + new_y = interp([0.1, 0.2]) + del interp + + def test_overflow_nearest(self): + # Test that the x range doesn't overflow when given integers as input + for kind in ('nearest', 'previous', 'next'): + x = np.array([0, 50, 127], dtype=np.int8) + ii = interp1d(x, x, kind=kind) + assert_array_almost_equal(ii(x), x) + + def test_local_nans(self): + # check that for local interpolation kinds (slinear, zero) a single nan + # only affects its local neighborhood + x = np.arange(10).astype(float) + y = x.copy() + y[6] = np.nan + for kind in ('zero', 'slinear'): + ir = interp1d(x, y, kind=kind) + vals = ir([4.9, 7.0]) + assert_(np.isfinite(vals).all()) + + def test_spline_nans(self): + # Backwards compat: a single nan makes the whole spline interpolation + # return nans in an array of the correct shape. And it doesn't raise, + # just quiet nans because of backcompat. + x = np.arange(8).astype(float) + y = x.copy() + yn = y.copy() + yn[3] = np.nan + + for kind in ['quadratic', 'cubic']: + ir = interp1d(x, y, kind=kind) + irn = interp1d(x, yn, kind=kind) + for xnew in (6, [1, 6], [[1, 6], [3, 5]]): + xnew = np.asarray(xnew) + out, outn = ir(x), irn(x) + assert_(np.isnan(outn).all()) + assert_equal(out.shape, outn.shape) + + def test_read_only(self): + x = np.arange(0, 10) + y = np.exp(-x / 3.0) + xnew = np.arange(0, 9, 0.1) + # Check both read-only and not read-only: + for writeable in (True, False): + xnew.flags.writeable = writeable + for kind in ('linear', 'nearest', 'zero', 'slinear', 'quadratic', + 'cubic'): + f = interp1d(x, y, kind=kind) + vals = f(xnew) + assert_(np.isfinite(vals).all()) + + +class TestLagrange(object): + + def test_lagrange(self): + p = poly1d([5,2,1,4,3]) + xs = np.arange(len(p.coeffs)) + ys = p(xs) + pl = lagrange(xs,ys) + assert_array_almost_equal(p.coeffs,pl.coeffs) + + +class TestAkima1DInterpolator(object): + def test_eval(self): + x = np.arange(0., 11.) + y = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.]) + ak = Akima1DInterpolator(x, y) + xi = np.array([0., 0.5, 1., 1.5, 2.5, 3.5, 4.5, 5.1, 6.5, 7.2, + 8.6, 9.9, 10.]) + yi = np.array([0., 1.375, 2., 1.5, 1.953125, 2.484375, + 4.1363636363636366866103344, 5.9803623910336236590978842, + 5.5067291516462386624652936, 5.2031367459745245795943447, + 4.1796554159017080820603951, 3.4110386597938129327189927, + 3.]) + assert_allclose(ak(xi), yi) + + def test_eval_2d(self): + x = np.arange(0., 11.) + y = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.]) + y = np.column_stack((y, 2. * y)) + ak = Akima1DInterpolator(x, y) + xi = np.array([0., 0.5, 1., 1.5, 2.5, 3.5, 4.5, 5.1, 6.5, 7.2, + 8.6, 9.9, 10.]) + yi = np.array([0., 1.375, 2., 1.5, 1.953125, 2.484375, + 4.1363636363636366866103344, + 5.9803623910336236590978842, + 5.5067291516462386624652936, + 5.2031367459745245795943447, + 4.1796554159017080820603951, + 3.4110386597938129327189927, 3.]) + yi = np.column_stack((yi, 2. * yi)) + assert_allclose(ak(xi), yi) + + def test_eval_3d(self): + x = np.arange(0., 11.) + y_ = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.]) + y = np.empty((11, 2, 2)) + y[:, 0, 0] = y_ + y[:, 1, 0] = 2. * y_ + y[:, 0, 1] = 3. * y_ + y[:, 1, 1] = 4. * y_ + ak = Akima1DInterpolator(x, y) + xi = np.array([0., 0.5, 1., 1.5, 2.5, 3.5, 4.5, 5.1, 6.5, 7.2, + 8.6, 9.9, 10.]) + yi = np.empty((13, 2, 2)) + yi_ = np.array([0., 1.375, 2., 1.5, 1.953125, 2.484375, + 4.1363636363636366866103344, + 5.9803623910336236590978842, + 5.5067291516462386624652936, + 5.2031367459745245795943447, + 4.1796554159017080820603951, + 3.4110386597938129327189927, 3.]) + yi[:, 0, 0] = yi_ + yi[:, 1, 0] = 2. * yi_ + yi[:, 0, 1] = 3. * yi_ + yi[:, 1, 1] = 4. * yi_ + assert_allclose(ak(xi), yi) + + def test_degenerate_case_multidimensional(self): + # This test is for issue #5683. + x = np.array([0, 1, 2]) + y = np.vstack((x, x**2)).T + ak = Akima1DInterpolator(x, y) + x_eval = np.array([0.5, 1.5]) + y_eval = ak(x_eval) + assert_allclose(y_eval, np.vstack((x_eval, x_eval**2)).T) + + def test_extend(self): + x = np.arange(0., 11.) + y = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.]) + ak = Akima1DInterpolator(x, y) + match = "Extending a 1D Akima interpolator is not yet implemented" + with pytest.raises(NotImplementedError, match=match): + ak.extend(None, None) + + +class TestPPolyCommon(object): + # test basic functionality for PPoly and BPoly + def test_sort_check(self): + c = np.array([[1, 4], [2, 5], [3, 6]]) + x = np.array([0, 1, 0.5]) + assert_raises(ValueError, PPoly, c, x) + assert_raises(ValueError, BPoly, c, x) + + def test_ctor_c(self): + # wrong shape: `c` must be at least 2-dimensional + with assert_raises(ValueError): + PPoly([1, 2], [0, 1]) + + def test_extend(self): + # Test adding new points to the piecewise polynomial + np.random.seed(1234) + + order = 3 + x = np.unique(np.r_[0, 10 * np.random.rand(30), 10]) + c = 2*np.random.rand(order+1, len(x)-1, 2, 3) - 1 + + for cls in (PPoly, BPoly): + pp = cls(c[:,:9], x[:10]) + pp.extend(c[:,9:], x[10:]) + + pp2 = cls(c[:, 10:], x[10:]) + pp2.extend(c[:, :10], x[:10]) + + pp3 = cls(c, x) + + assert_array_equal(pp.c, pp3.c) + assert_array_equal(pp.x, pp3.x) + assert_array_equal(pp2.c, pp3.c) + assert_array_equal(pp2.x, pp3.x) + + def test_extend_diff_orders(self): + # Test extending polynomial with different order one + np.random.seed(1234) + + x = np.linspace(0, 1, 6) + c = np.random.rand(2, 5) + + x2 = np.linspace(1, 2, 6) + c2 = np.random.rand(4, 5) + + for cls in (PPoly, BPoly): + pp1 = cls(c, x) + pp2 = cls(c2, x2) + + pp_comb = cls(c, x) + pp_comb.extend(c2, x2[1:]) + + # NB. doesn't match to pp1 at the endpoint, because pp1 is not + # continuous with pp2 as we took random coefs. + xi1 = np.linspace(0, 1, 300, endpoint=False) + xi2 = np.linspace(1, 2, 300) + + assert_allclose(pp1(xi1), pp_comb(xi1)) + assert_allclose(pp2(xi2), pp_comb(xi2)) + + def test_extend_descending(self): + np.random.seed(0) + + order = 3 + x = np.sort(np.random.uniform(0, 10, 20)) + c = np.random.rand(order + 1, x.shape[0] - 1, 2, 3) + + for cls in (PPoly, BPoly): + p = cls(c, x) + + p1 = cls(c[:, :9], x[:10]) + p1.extend(c[:, 9:], x[10:]) + + p2 = cls(c[:, 10:], x[10:]) + p2.extend(c[:, :10], x[:10]) + + assert_array_equal(p1.c, p.c) + assert_array_equal(p1.x, p.x) + assert_array_equal(p2.c, p.c) + assert_array_equal(p2.x, p.x) + + def test_shape(self): + np.random.seed(1234) + c = np.random.rand(8, 12, 5, 6, 7) + x = np.sort(np.random.rand(13)) + xp = np.random.rand(3, 4) + for cls in (PPoly, BPoly): + p = cls(c, x) + assert_equal(p(xp).shape, (3, 4, 5, 6, 7)) + + # 'scalars' + for cls in (PPoly, BPoly): + p = cls(c[..., 0, 0, 0], x) + + assert_equal(np.shape(p(0.5)), ()) + assert_equal(np.shape(p(np.array(0.5))), ()) + + # can't use dtype=object (with any numpy; what fails is + # constructing the object array here for old numpy) + assert_raises(ValueError, p, np.array([[0.1, 0.2], [0.4]])) + + def test_complex_coef(self): + np.random.seed(12345) + x = np.sort(np.random.random(13)) + c = np.random.random((8, 12)) * (1. + 0.3j) + c_re, c_im = c.real, c.imag + xp = np.random.random(5) + for cls in (PPoly, BPoly): + p, p_re, p_im = cls(c, x), cls(c_re, x), cls(c_im, x) + for nu in [0, 1, 2]: + assert_allclose(p(xp, nu).real, p_re(xp, nu)) + assert_allclose(p(xp, nu).imag, p_im(xp, nu)) + + def test_axis(self): + np.random.seed(12345) + c = np.random.rand(3, 4, 5, 6, 7, 8) + c_s = c.shape + xp = np.random.random((1, 2)) + for axis in (0, 1, 2, 3): + k, m = c.shape[axis], c.shape[axis+1] + x = np.sort(np.random.rand(m+1)) + for cls in (PPoly, BPoly): + p = cls(c, x, axis=axis) + assert_equal(p.c.shape, + c_s[axis:axis+2] + c_s[:axis] + c_s[axis+2:]) + res = p(xp) + targ_shape = c_s[:axis] + xp.shape + c_s[2+axis:] + assert_equal(res.shape, targ_shape) + + # deriv/antideriv does not drop the axis + for p1 in [cls(c, x, axis=axis).derivative(), + cls(c, x, axis=axis).derivative(2), + cls(c, x, axis=axis).antiderivative(), + cls(c, x, axis=axis).antiderivative(2)]: + assert_equal(p1.axis, p.axis) + + # c array needs two axes for the coefficients and intervals, so + # 0 <= axis < c.ndim-1; raise otherwise + for axis in (-1, 4, 5, 6): + for cls in (BPoly, PPoly): + assert_raises(ValueError, cls, **dict(c=c, x=x, axis=axis)) + + +class TestPolySubclassing(object): + class P(PPoly): + pass + + class B(BPoly): + pass + + def _make_polynomials(self): + np.random.seed(1234) + x = np.sort(np.random.random(3)) + c = np.random.random((4, 2)) + return self.P(c, x), self.B(c, x) + + def test_derivative(self): + pp, bp = self._make_polynomials() + for p in (pp, bp): + pd = p.derivative() + assert_equal(p.__class__, pd.__class__) + + ppa = pp.antiderivative() + assert_equal(pp.__class__, ppa.__class__) + + def test_from_spline(self): + np.random.seed(1234) + x = np.sort(np.r_[0, np.random.rand(11), 1]) + y = np.random.rand(len(x)) + + spl = splrep(x, y, s=0) + pp = self.P.from_spline(spl) + assert_equal(pp.__class__, self.P) + + def test_conversions(self): + pp, bp = self._make_polynomials() + + pp1 = self.P.from_bernstein_basis(bp) + assert_equal(pp1.__class__, self.P) + + bp1 = self.B.from_power_basis(pp) + assert_equal(bp1.__class__, self.B) + + def test_from_derivatives(self): + x = [0, 1, 2] + y = [[1], [2], [3]] + bp = self.B.from_derivatives(x, y) + assert_equal(bp.__class__, self.B) + + +class TestPPoly(object): + def test_simple(self): + c = np.array([[1, 4], [2, 5], [3, 6]]) + x = np.array([0, 0.5, 1]) + p = PPoly(c, x) + assert_allclose(p(0.3), 1*0.3**2 + 2*0.3 + 3) + assert_allclose(p(0.7), 4*(0.7-0.5)**2 + 5*(0.7-0.5) + 6) + + def test_periodic(self): + c = np.array([[1, 4], [2, 5], [3, 6]]) + x = np.array([0, 0.5, 1]) + p = PPoly(c, x, extrapolate='periodic') + + assert_allclose(p(1.3), 1 * 0.3 ** 2 + 2 * 0.3 + 3) + assert_allclose(p(-0.3), 4 * (0.7 - 0.5) ** 2 + 5 * (0.7 - 0.5) + 6) + + assert_allclose(p(1.3, 1), 2 * 0.3 + 2) + assert_allclose(p(-0.3, 1), 8 * (0.7 - 0.5) + 5) + + def test_descending(self): + def binom_matrix(power): + n = np.arange(power + 1).reshape(-1, 1) + k = np.arange(power + 1) + B = binom(n, k) + return B[::-1, ::-1] + + np.random.seed(0) + + power = 3 + for m in [10, 20, 30]: + x = np.sort(np.random.uniform(0, 10, m + 1)) + ca = np.random.uniform(-2, 2, size=(power + 1, m)) + + h = np.diff(x) + h_powers = h[None, :] ** np.arange(power + 1)[::-1, None] + B = binom_matrix(power) + cap = ca * h_powers + cdp = np.dot(B.T, cap) + cd = cdp / h_powers + + pa = PPoly(ca, x, extrapolate=True) + pd = PPoly(cd[:, ::-1], x[::-1], extrapolate=True) + + x_test = np.random.uniform(-10, 20, 100) + assert_allclose(pa(x_test), pd(x_test), rtol=1e-13) + assert_allclose(pa(x_test, 1), pd(x_test, 1), rtol=1e-13) + + pa_d = pa.derivative() + pd_d = pd.derivative() + + assert_allclose(pa_d(x_test), pd_d(x_test), rtol=1e-13) + + # Antiderivatives won't be equal because fixing continuity is + # done in the reverse order, but surely the differences should be + # equal. + pa_i = pa.antiderivative() + pd_i = pd.antiderivative() + for a, b in np.random.uniform(-10, 20, (5, 2)): + int_a = pa.integrate(a, b) + int_d = pd.integrate(a, b) + assert_allclose(int_a, int_d, rtol=1e-13) + assert_allclose(pa_i(b) - pa_i(a), pd_i(b) - pd_i(a), + rtol=1e-13) + + roots_d = pd.roots() + roots_a = pa.roots() + assert_allclose(roots_a, np.sort(roots_d), rtol=1e-12) + + def test_multi_shape(self): + c = np.random.rand(6, 2, 1, 2, 3) + x = np.array([0, 0.5, 1]) + p = PPoly(c, x) + assert_equal(p.x.shape, x.shape) + assert_equal(p.c.shape, c.shape) + assert_equal(p(0.3).shape, c.shape[2:]) + + assert_equal(p(np.random.rand(5, 6)).shape, (5, 6) + c.shape[2:]) + + dp = p.derivative() + assert_equal(dp.c.shape, (5, 2, 1, 2, 3)) + ip = p.antiderivative() + assert_equal(ip.c.shape, (7, 2, 1, 2, 3)) + + def test_construct_fast(self): + np.random.seed(1234) + c = np.array([[1, 4], [2, 5], [3, 6]], dtype=float) + x = np.array([0, 0.5, 1]) + p = PPoly.construct_fast(c, x) + assert_allclose(p(0.3), 1*0.3**2 + 2*0.3 + 3) + assert_allclose(p(0.7), 4*(0.7-0.5)**2 + 5*(0.7-0.5) + 6) + + def test_vs_alternative_implementations(self): + np.random.seed(1234) + c = np.random.rand(3, 12, 22) + x = np.sort(np.r_[0, np.random.rand(11), 1]) + + p = PPoly(c, x) + + xp = np.r_[0.3, 0.5, 0.33, 0.6] + expected = _ppoly_eval_1(c, x, xp) + assert_allclose(p(xp), expected) + + expected = _ppoly_eval_2(c[:,:,0], x, xp) + assert_allclose(p(xp)[:,0], expected) + + def test_from_spline(self): + np.random.seed(1234) + x = np.sort(np.r_[0, np.random.rand(11), 1]) + y = np.random.rand(len(x)) + + spl = splrep(x, y, s=0) + pp = PPoly.from_spline(spl) + + xi = np.linspace(0, 1, 200) + assert_allclose(pp(xi), splev(xi, spl)) + + # make sure .from_spline accepts BSpline objects + b = BSpline(*spl) + ppp = PPoly.from_spline(b) + assert_allclose(ppp(xi), b(xi)) + + # BSpline's extrapolate attribute propagates unless overridden + t, c, k = spl + for extrap in (None, True, False): + b = BSpline(t, c, k, extrapolate=extrap) + p = PPoly.from_spline(b) + assert_equal(p.extrapolate, b.extrapolate) + + def test_derivative_simple(self): + np.random.seed(1234) + c = np.array([[4, 3, 2, 1]]).T + dc = np.array([[3*4, 2*3, 2]]).T + ddc = np.array([[2*3*4, 1*2*3]]).T + x = np.array([0, 1]) + + pp = PPoly(c, x) + dpp = PPoly(dc, x) + ddpp = PPoly(ddc, x) + + assert_allclose(pp.derivative().c, dpp.c) + assert_allclose(pp.derivative(2).c, ddpp.c) + + def test_derivative_eval(self): + np.random.seed(1234) + x = np.sort(np.r_[0, np.random.rand(11), 1]) + y = np.random.rand(len(x)) + + spl = splrep(x, y, s=0) + pp = PPoly.from_spline(spl) + + xi = np.linspace(0, 1, 200) + for dx in range(0, 3): + assert_allclose(pp(xi, dx), splev(xi, spl, dx)) + + def test_derivative(self): + np.random.seed(1234) + x = np.sort(np.r_[0, np.random.rand(11), 1]) + y = np.random.rand(len(x)) + + spl = splrep(x, y, s=0, k=5) + pp = PPoly.from_spline(spl) + + xi = np.linspace(0, 1, 200) + for dx in range(0, 10): + assert_allclose(pp(xi, dx), pp.derivative(dx)(xi), + err_msg="dx=%d" % (dx,)) + + def test_antiderivative_of_constant(self): + # https://github.com/scipy/scipy/issues/4216 + p = PPoly([[1.]], [0, 1]) + assert_equal(p.antiderivative().c, PPoly([[1], [0]], [0, 1]).c) + assert_equal(p.antiderivative().x, PPoly([[1], [0]], [0, 1]).x) + + def test_antiderivative_regression_4355(self): + # https://github.com/scipy/scipy/issues/4355 + p = PPoly([[1., 0.5]], [0, 1, 2]) + q = p.antiderivative() + assert_equal(q.c, [[1, 0.5], [0, 1]]) + assert_equal(q.x, [0, 1, 2]) + assert_allclose(p.integrate(0, 2), 1.5) + assert_allclose(q(2) - q(0), 1.5) + + def test_antiderivative_simple(self): + np.random.seed(1234) + # [ p1(x) = 3*x**2 + 2*x + 1, + # p2(x) = 1.6875] + c = np.array([[3, 2, 1], [0, 0, 1.6875]]).T + # [ pp1(x) = x**3 + x**2 + x, + # pp2(x) = 1.6875*(x - 0.25) + pp1(0.25)] + ic = np.array([[1, 1, 1, 0], [0, 0, 1.6875, 0.328125]]).T + # [ ppp1(x) = (1/4)*x**4 + (1/3)*x**3 + (1/2)*x**2, + # ppp2(x) = (1.6875/2)*(x - 0.25)**2 + pp1(0.25)*x + ppp1(0.25)] + iic = np.array([[1/4, 1/3, 1/2, 0, 0], + [0, 0, 1.6875/2, 0.328125, 0.037434895833333336]]).T + x = np.array([0, 0.25, 1]) + + pp = PPoly(c, x) + ipp = pp.antiderivative() + iipp = pp.antiderivative(2) + iipp2 = ipp.antiderivative() + + assert_allclose(ipp.x, x) + assert_allclose(ipp.c.T, ic.T) + assert_allclose(iipp.c.T, iic.T) + assert_allclose(iipp2.c.T, iic.T) + + def test_antiderivative_vs_derivative(self): + np.random.seed(1234) + x = np.linspace(0, 1, 30)**2 + y = np.random.rand(len(x)) + spl = splrep(x, y, s=0, k=5) + pp = PPoly.from_spline(spl) + + for dx in range(0, 10): + ipp = pp.antiderivative(dx) + + # check that derivative is inverse op + pp2 = ipp.derivative(dx) + assert_allclose(pp.c, pp2.c) + + # check continuity + for k in range(dx): + pp2 = ipp.derivative(k) + + r = 1e-13 + endpoint = r*pp2.x[:-1] + (1 - r)*pp2.x[1:] + + assert_allclose(pp2(pp2.x[1:]), pp2(endpoint), + rtol=1e-7, err_msg="dx=%d k=%d" % (dx, k)) + + def test_antiderivative_vs_spline(self): + np.random.seed(1234) + x = np.sort(np.r_[0, np.random.rand(11), 1]) + y = np.random.rand(len(x)) + + spl = splrep(x, y, s=0, k=5) + pp = PPoly.from_spline(spl) + + for dx in range(0, 10): + pp2 = pp.antiderivative(dx) + spl2 = splantider(spl, dx) + + xi = np.linspace(0, 1, 200) + assert_allclose(pp2(xi), splev(xi, spl2), + rtol=1e-7) + + def test_antiderivative_continuity(self): + c = np.array([[2, 1, 2, 2], [2, 1, 3, 3]]).T + x = np.array([0, 0.5, 1]) + + p = PPoly(c, x) + ip = p.antiderivative() + + # check continuity + assert_allclose(ip(0.5 - 1e-9), ip(0.5 + 1e-9), rtol=1e-8) + + # check that only lowest order coefficients were changed + p2 = ip.derivative() + assert_allclose(p2.c, p.c) + + def test_integrate(self): + np.random.seed(1234) + x = np.sort(np.r_[0, np.random.rand(11), 1]) + y = np.random.rand(len(x)) + + spl = splrep(x, y, s=0, k=5) + pp = PPoly.from_spline(spl) + + a, b = 0.3, 0.9 + ig = pp.integrate(a, b) + + ipp = pp.antiderivative() + assert_allclose(ig, ipp(b) - ipp(a)) + assert_allclose(ig, splint(a, b, spl)) + + a, b = -0.3, 0.9 + ig = pp.integrate(a, b, extrapolate=True) + assert_allclose(ig, ipp(b) - ipp(a)) + + assert_(np.isnan(pp.integrate(a, b, extrapolate=False)).all()) + + def test_integrate_periodic(self): + x = np.array([1, 2, 4]) + c = np.array([[0., 0.], [-1., -1.], [2., -0.], [1., 2.]]) + + P = PPoly(c, x, extrapolate='periodic') + I = P.antiderivative() + + period_int = I(4) - I(1) + + assert_allclose(P.integrate(1, 4), period_int) + assert_allclose(P.integrate(-10, -7), period_int) + assert_allclose(P.integrate(-10, -4), 2 * period_int) + + assert_allclose(P.integrate(1.5, 2.5), I(2.5) - I(1.5)) + assert_allclose(P.integrate(3.5, 5), I(2) - I(1) + I(4) - I(3.5)) + assert_allclose(P.integrate(3.5 + 12, 5 + 12), + I(2) - I(1) + I(4) - I(3.5)) + assert_allclose(P.integrate(3.5, 5 + 12), + I(2) - I(1) + I(4) - I(3.5) + 4 * period_int) + + assert_allclose(P.integrate(0, -1), I(2) - I(3)) + assert_allclose(P.integrate(-9, -10), I(2) - I(3)) + assert_allclose(P.integrate(0, -10), I(2) - I(3) - 3 * period_int) + + def test_roots(self): + x = np.linspace(0, 1, 31)**2 + y = np.sin(30*x) + + spl = splrep(x, y, s=0, k=3) + pp = PPoly.from_spline(spl) + + r = pp.roots() + r = r[(r >= 0 - 1e-15) & (r <= 1 + 1e-15)] + assert_allclose(r, sproot(spl), atol=1e-15) + + def test_roots_idzero(self): + # Roots for piecewise polynomials with identically zero + # sections. + c = np.array([[-1, 0.25], [0, 0], [-1, 0.25]]).T + x = np.array([0, 0.4, 0.6, 1.0]) + + pp = PPoly(c, x) + assert_array_equal(pp.roots(), + [0.25, 0.4, np.nan, 0.6 + 0.25]) + + # ditto for p.solve(const) with sections identically equal const + const = 2. + c1 = c.copy() + c1[1, :] += const + pp1 = PPoly(c1, x) + + assert_array_equal(pp1.solve(const), + [0.25, 0.4, np.nan, 0.6 + 0.25]) + + def test_roots_all_zero(self): + # test the code path for the polynomial being identically zero everywhere + c = [[0], [0]] + x = [0, 1] + p = PPoly(c, x) + assert_array_equal(p.roots(), [0, np.nan]) + assert_array_equal(p.solve(0), [0, np.nan]) + assert_array_equal(p.solve(1), []) + + c = [[0, 0], [0, 0]] + x = [0, 1, 2] + p = PPoly(c, x) + assert_array_equal(p.roots(), [0, np.nan, 1, np.nan]) + assert_array_equal(p.solve(0), [0, np.nan, 1, np.nan]) + assert_array_equal(p.solve(1), []) + + def test_roots_repeated(self): + # Check roots repeated in multiple sections are reported only + # once. + + # [(x + 1)**2 - 1, -x**2] ; x == 0 is a repeated root + c = np.array([[1, 0, -1], [-1, 0, 0]]).T + x = np.array([-1, 0, 1]) + + pp = PPoly(c, x) + assert_array_equal(pp.roots(), [-2, 0]) + assert_array_equal(pp.roots(extrapolate=False), [0]) + + def test_roots_discont(self): + # Check that a discontinuity across zero is reported as root + c = np.array([[1], [-1]]).T + x = np.array([0, 0.5, 1]) + pp = PPoly(c, x) + assert_array_equal(pp.roots(), [0.5]) + assert_array_equal(pp.roots(discontinuity=False), []) + + # ditto for a discontinuity across y: + assert_array_equal(pp.solve(0.5), [0.5]) + assert_array_equal(pp.solve(0.5, discontinuity=False), []) + + assert_array_equal(pp.solve(1.5), []) + assert_array_equal(pp.solve(1.5, discontinuity=False), []) + + def test_roots_random(self): + # Check high-order polynomials with random coefficients + np.random.seed(1234) + + num = 0 + + for extrapolate in (True, False): + for order in range(0, 20): + x = np.unique(np.r_[0, 10 * np.random.rand(30), 10]) + c = 2*np.random.rand(order+1, len(x)-1, 2, 3) - 1 + + pp = PPoly(c, x) + for y in [0, np.random.random()]: + r = pp.solve(y, discontinuity=False, extrapolate=extrapolate) + + for i in range(2): + for j in range(3): + rr = r[i,j] + if rr.size > 0: + # Check that the reported roots indeed are roots + num += rr.size + val = pp(rr, extrapolate=extrapolate)[:,i,j] + cmpval = pp(rr, nu=1, + extrapolate=extrapolate)[:,i,j] + msg = "(%r) r = %s" % (extrapolate, repr(rr),) + assert_allclose((val-y) / cmpval, 0, atol=1e-7, + err_msg=msg) + + # Check that we checked a number of roots + assert_(num > 100, repr(num)) + + def test_roots_croots(self): + # Test the complex root finding algorithm + np.random.seed(1234) + + for k in range(1, 15): + c = np.random.rand(k, 1, 130) + + if k == 3: + # add a case with zero discriminant + c[:,0,0] = 1, 2, 1 + + for y in [0, np.random.random()]: + w = np.empty(c.shape, dtype=complex) + _ppoly._croots_poly1(c, w) + + if k == 1: + assert_(np.isnan(w).all()) + continue + + res = 0 + cres = 0 + for i in range(k): + res += c[i,None] * w**(k-1-i) + cres += abs(c[i,None] * w**(k-1-i)) + with np.errstate(invalid='ignore'): + res /= cres + res = res.ravel() + res = res[~np.isnan(res)] + assert_allclose(res, 0, atol=1e-10) + + def test_extrapolate_attr(self): + # [ 1 - x**2 ] + c = np.array([[-1, 0, 1]]).T + x = np.array([0, 1]) + + for extrapolate in [True, False, None]: + pp = PPoly(c, x, extrapolate=extrapolate) + pp_d = pp.derivative() + pp_i = pp.antiderivative() + + if extrapolate is False: + assert_(np.isnan(pp([-0.1, 1.1])).all()) + assert_(np.isnan(pp_i([-0.1, 1.1])).all()) + assert_(np.isnan(pp_d([-0.1, 1.1])).all()) + assert_equal(pp.roots(), [1]) + else: + assert_allclose(pp([-0.1, 1.1]), [1-0.1**2, 1-1.1**2]) + assert_(not np.isnan(pp_i([-0.1, 1.1])).any()) + assert_(not np.isnan(pp_d([-0.1, 1.1])).any()) + assert_allclose(pp.roots(), [1, -1]) + + +class TestBPoly(object): + def test_simple(self): + x = [0, 1] + c = [[3]] + bp = BPoly(c, x) + assert_allclose(bp(0.1), 3.) + + def test_simple2(self): + x = [0, 1] + c = [[3], [1]] + bp = BPoly(c, x) # 3*(1-x) + 1*x + assert_allclose(bp(0.1), 3*0.9 + 1.*0.1) + + def test_simple3(self): + x = [0, 1] + c = [[3], [1], [4]] + bp = BPoly(c, x) # 3 * (1-x)**2 + 2 * x (1-x) + 4 * x**2 + assert_allclose(bp(0.2), + 3 * 0.8*0.8 + 1 * 2*0.2*0.8 + 4 * 0.2*0.2) + + def test_simple4(self): + x = [0, 1] + c = [[1], [1], [1], [2]] + bp = BPoly(c, x) + assert_allclose(bp(0.3), 0.7**3 + + 3 * 0.7**2 * 0.3 + + 3 * 0.7 * 0.3**2 + + 2 * 0.3**3) + + def test_simple5(self): + x = [0, 1] + c = [[1], [1], [8], [2], [1]] + bp = BPoly(c, x) + assert_allclose(bp(0.3), 0.7**4 + + 4 * 0.7**3 * 0.3 + + 8 * 6 * 0.7**2 * 0.3**2 + + 2 * 4 * 0.7 * 0.3**3 + + 0.3**4) + + def test_periodic(self): + x = [0, 1, 3] + c = [[3, 0], [0, 0], [0, 2]] + # [3*(1-x)**2, 2*((x-1)/2)**2] + bp = BPoly(c, x, extrapolate='periodic') + + assert_allclose(bp(3.4), 3 * 0.6**2) + assert_allclose(bp(-1.3), 2 * (0.7/2)**2) + + assert_allclose(bp(3.4, 1), -6 * 0.6) + assert_allclose(bp(-1.3, 1), 2 * (0.7/2)) + + def test_descending(self): + np.random.seed(0) + + power = 3 + for m in [10, 20, 30]: + x = np.sort(np.random.uniform(0, 10, m + 1)) + ca = np.random.uniform(-0.1, 0.1, size=(power + 1, m)) + # We need only to flip coefficients to get it right! + cd = ca[::-1].copy() + + pa = BPoly(ca, x, extrapolate=True) + pd = BPoly(cd[:, ::-1], x[::-1], extrapolate=True) + + x_test = np.random.uniform(-10, 20, 100) + assert_allclose(pa(x_test), pd(x_test), rtol=1e-13) + assert_allclose(pa(x_test, 1), pd(x_test, 1), rtol=1e-13) + + pa_d = pa.derivative() + pd_d = pd.derivative() + + assert_allclose(pa_d(x_test), pd_d(x_test), rtol=1e-13) + + # Antiderivatives won't be equal because fixing continuity is + # done in the reverse order, but surely the differences should be + # equal. + pa_i = pa.antiderivative() + pd_i = pd.antiderivative() + for a, b in np.random.uniform(-10, 20, (5, 2)): + int_a = pa.integrate(a, b) + int_d = pd.integrate(a, b) + assert_allclose(int_a, int_d, rtol=1e-12) + assert_allclose(pa_i(b) - pa_i(a), pd_i(b) - pd_i(a), + rtol=1e-12) + + def test_multi_shape(self): + c = np.random.rand(6, 2, 1, 2, 3) + x = np.array([0, 0.5, 1]) + p = BPoly(c, x) + assert_equal(p.x.shape, x.shape) + assert_equal(p.c.shape, c.shape) + assert_equal(p(0.3).shape, c.shape[2:]) + assert_equal(p(np.random.rand(5,6)).shape, + (5,6)+c.shape[2:]) + + dp = p.derivative() + assert_equal(dp.c.shape, (5, 2, 1, 2, 3)) + + def test_interval_length(self): + x = [0, 2] + c = [[3], [1], [4]] + bp = BPoly(c, x) + xval = 0.1 + s = xval / 2 # s = (x - xa) / (xb - xa) + assert_allclose(bp(xval), 3 * (1-s)*(1-s) + 1 * 2*s*(1-s) + 4 * s*s) + + def test_two_intervals(self): + x = [0, 1, 3] + c = [[3, 0], [0, 0], [0, 2]] + bp = BPoly(c, x) # [3*(1-x)**2, 2*((x-1)/2)**2] + + assert_allclose(bp(0.4), 3 * 0.6*0.6) + assert_allclose(bp(1.7), 2 * (0.7/2)**2) + + def test_extrapolate_attr(self): + x = [0, 2] + c = [[3], [1], [4]] + bp = BPoly(c, x) + + for extrapolate in (True, False, None): + bp = BPoly(c, x, extrapolate=extrapolate) + bp_d = bp.derivative() + if extrapolate is False: + assert_(np.isnan(bp([-0.1, 2.1])).all()) + assert_(np.isnan(bp_d([-0.1, 2.1])).all()) + else: + assert_(not np.isnan(bp([-0.1, 2.1])).any()) + assert_(not np.isnan(bp_d([-0.1, 2.1])).any()) + + +class TestBPolyCalculus(object): + def test_derivative(self): + x = [0, 1, 3] + c = [[3, 0], [0, 0], [0, 2]] + bp = BPoly(c, x) # [3*(1-x)**2, 2*((x-1)/2)**2] + bp_der = bp.derivative() + assert_allclose(bp_der(0.4), -6*(0.6)) + assert_allclose(bp_der(1.7), 0.7) + + # derivatives in-place + assert_allclose([bp(0.4, nu=1), bp(0.4, nu=2), bp(0.4, nu=3)], + [-6*(1-0.4), 6., 0.]) + assert_allclose([bp(1.7, nu=1), bp(1.7, nu=2), bp(1.7, nu=3)], + [0.7, 1., 0]) + + def test_derivative_ppoly(self): + # make sure it's consistent w/ power basis + np.random.seed(1234) + m, k = 5, 8 # number of intervals, order + x = np.sort(np.random.random(m)) + c = np.random.random((k, m-1)) + bp = BPoly(c, x) + pp = PPoly.from_bernstein_basis(bp) + + for d in range(k): + bp = bp.derivative() + pp = pp.derivative() + xp = np.linspace(x[0], x[-1], 21) + assert_allclose(bp(xp), pp(xp)) + + def test_deriv_inplace(self): + np.random.seed(1234) + m, k = 5, 8 # number of intervals, order + x = np.sort(np.random.random(m)) + c = np.random.random((k, m-1)) + + # test both real and complex coefficients + for cc in [c.copy(), c*(1. + 2.j)]: + bp = BPoly(cc, x) + xp = np.linspace(x[0], x[-1], 21) + for i in range(k): + assert_allclose(bp(xp, i), bp.derivative(i)(xp)) + + def test_antiderivative_simple(self): + # f(x) = x for x \in [0, 1), + # (x-1)/2 for x \in [1, 3] + # + # antiderivative is then + # F(x) = x**2 / 2 for x \in [0, 1), + # 0.5*x*(x/2 - 1) + A for x \in [1, 3] + # where A = 3/4 for continuity at x = 1. + x = [0, 1, 3] + c = [[0, 0], [1, 1]] + + bp = BPoly(c, x) + bi = bp.antiderivative() + + xx = np.linspace(0, 3, 11) + assert_allclose(bi(xx), + np.where(xx < 1, xx**2 / 2., + 0.5 * xx * (xx/2. - 1) + 3./4), + atol=1e-12, rtol=1e-12) + + def test_der_antider(self): + np.random.seed(1234) + x = np.sort(np.random.random(11)) + c = np.random.random((4, 10, 2, 3)) + bp = BPoly(c, x) + + xx = np.linspace(x[0], x[-1], 100) + assert_allclose(bp.antiderivative().derivative()(xx), + bp(xx), atol=1e-12, rtol=1e-12) + + def test_antider_ppoly(self): + np.random.seed(1234) + x = np.sort(np.random.random(11)) + c = np.random.random((4, 10, 2, 3)) + bp = BPoly(c, x) + pp = PPoly.from_bernstein_basis(bp) + + xx = np.linspace(x[0], x[-1], 10) + + assert_allclose(bp.antiderivative(2)(xx), + pp.antiderivative(2)(xx), atol=1e-12, rtol=1e-12) + + def test_antider_continuous(self): + np.random.seed(1234) + x = np.sort(np.random.random(11)) + c = np.random.random((4, 10)) + bp = BPoly(c, x).antiderivative() + + xx = bp.x[1:-1] + assert_allclose(bp(xx - 1e-14), + bp(xx + 1e-14), atol=1e-12, rtol=1e-12) + + def test_integrate(self): + np.random.seed(1234) + x = np.sort(np.random.random(11)) + c = np.random.random((4, 10)) + bp = BPoly(c, x) + pp = PPoly.from_bernstein_basis(bp) + assert_allclose(bp.integrate(0, 1), + pp.integrate(0, 1), atol=1e-12, rtol=1e-12) + + def test_integrate_extrap(self): + c = [[1]] + x = [0, 1] + b = BPoly(c, x) + + # default is extrapolate=True + assert_allclose(b.integrate(0, 2), 2., atol=1e-14) + + # .integrate argument overrides self.extrapolate + b1 = BPoly(c, x, extrapolate=False) + assert_(np.isnan(b1.integrate(0, 2))) + assert_allclose(b1.integrate(0, 2, extrapolate=True), 2., atol=1e-14) + + def test_integrate_periodic(self): + x = np.array([1, 2, 4]) + c = np.array([[0., 0.], [-1., -1.], [2., -0.], [1., 2.]]) + + P = BPoly.from_power_basis(PPoly(c, x), extrapolate='periodic') + I = P.antiderivative() + + period_int = I(4) - I(1) + + assert_allclose(P.integrate(1, 4), period_int) + assert_allclose(P.integrate(-10, -7), period_int) + assert_allclose(P.integrate(-10, -4), 2 * period_int) + + assert_allclose(P.integrate(1.5, 2.5), I(2.5) - I(1.5)) + assert_allclose(P.integrate(3.5, 5), I(2) - I(1) + I(4) - I(3.5)) + assert_allclose(P.integrate(3.5 + 12, 5 + 12), + I(2) - I(1) + I(4) - I(3.5)) + assert_allclose(P.integrate(3.5, 5 + 12), + I(2) - I(1) + I(4) - I(3.5) + 4 * period_int) + + assert_allclose(P.integrate(0, -1), I(2) - I(3)) + assert_allclose(P.integrate(-9, -10), I(2) - I(3)) + assert_allclose(P.integrate(0, -10), I(2) - I(3) - 3 * period_int) + + def test_antider_neg(self): + # .derivative(-nu) ==> .andiderivative(nu) and vice versa + c = [[1]] + x = [0, 1] + b = BPoly(c, x) + + xx = np.linspace(0, 1, 21) + + assert_allclose(b.derivative(-1)(xx), b.antiderivative()(xx), + atol=1e-12, rtol=1e-12) + assert_allclose(b.derivative(1)(xx), b.antiderivative(-1)(xx), + atol=1e-12, rtol=1e-12) + + +class TestPolyConversions(object): + def test_bp_from_pp(self): + x = [0, 1, 3] + c = [[3, 2], [1, 8], [4, 3]] + pp = PPoly(c, x) + bp = BPoly.from_power_basis(pp) + pp1 = PPoly.from_bernstein_basis(bp) + + xp = [0.1, 1.4] + assert_allclose(pp(xp), bp(xp)) + assert_allclose(pp(xp), pp1(xp)) + + def test_bp_from_pp_random(self): + np.random.seed(1234) + m, k = 5, 8 # number of intervals, order + x = np.sort(np.random.random(m)) + c = np.random.random((k, m-1)) + pp = PPoly(c, x) + bp = BPoly.from_power_basis(pp) + pp1 = PPoly.from_bernstein_basis(bp) + + xp = np.linspace(x[0], x[-1], 21) + assert_allclose(pp(xp), bp(xp)) + assert_allclose(pp(xp), pp1(xp)) + + def test_pp_from_bp(self): + x = [0, 1, 3] + c = [[3, 3], [1, 1], [4, 2]] + bp = BPoly(c, x) + pp = PPoly.from_bernstein_basis(bp) + bp1 = BPoly.from_power_basis(pp) + + xp = [0.1, 1.4] + assert_allclose(bp(xp), pp(xp)) + assert_allclose(bp(xp), bp1(xp)) + + +class TestBPolyFromDerivatives(object): + def test_make_poly_1(self): + c1 = BPoly._construct_from_derivatives(0, 1, [2], [3]) + assert_allclose(c1, [2., 3.]) + + def test_make_poly_2(self): + c1 = BPoly._construct_from_derivatives(0, 1, [1, 0], [1]) + assert_allclose(c1, [1., 1., 1.]) + + # f'(0) = 3 + c2 = BPoly._construct_from_derivatives(0, 1, [2, 3], [1]) + assert_allclose(c2, [2., 7./2, 1.]) + + # f'(1) = 3 + c3 = BPoly._construct_from_derivatives(0, 1, [2], [1, 3]) + assert_allclose(c3, [2., -0.5, 1.]) + + def test_make_poly_3(self): + # f'(0)=2, f''(0)=3 + c1 = BPoly._construct_from_derivatives(0, 1, [1, 2, 3], [4]) + assert_allclose(c1, [1., 5./3, 17./6, 4.]) + + # f'(1)=2, f''(1)=3 + c2 = BPoly._construct_from_derivatives(0, 1, [1], [4, 2, 3]) + assert_allclose(c2, [1., 19./6, 10./3, 4.]) + + # f'(0)=2, f'(1)=3 + c3 = BPoly._construct_from_derivatives(0, 1, [1, 2], [4, 3]) + assert_allclose(c3, [1., 5./3, 3., 4.]) + + def test_make_poly_12(self): + np.random.seed(12345) + ya = np.r_[0, np.random.random(5)] + yb = np.r_[0, np.random.random(5)] + + c = BPoly._construct_from_derivatives(0, 1, ya, yb) + pp = BPoly(c[:, None], [0, 1]) + for j in range(6): + assert_allclose([pp(0.), pp(1.)], [ya[j], yb[j]]) + pp = pp.derivative() + + def test_raise_degree(self): + np.random.seed(12345) + x = [0, 1] + k, d = 8, 5 + c = np.random.random((k, 1, 2, 3, 4)) + bp = BPoly(c, x) + + c1 = BPoly._raise_degree(c, d) + bp1 = BPoly(c1, x) + + xp = np.linspace(0, 1, 11) + assert_allclose(bp(xp), bp1(xp)) + + def test_xi_yi(self): + assert_raises(ValueError, BPoly.from_derivatives, [0, 1], [0]) + + def test_coords_order(self): + xi = [0, 0, 1] + yi = [[0], [0], [0]] + assert_raises(ValueError, BPoly.from_derivatives, xi, yi) + + def test_zeros(self): + xi = [0, 1, 2, 3] + yi = [[0, 0], [0], [0, 0], [0, 0]] # NB: will have to raise the degree + pp = BPoly.from_derivatives(xi, yi) + assert_(pp.c.shape == (4, 3)) + + ppd = pp.derivative() + for xp in [0., 0.1, 1., 1.1, 1.9, 2., 2.5]: + assert_allclose([pp(xp), ppd(xp)], [0., 0.]) + + def _make_random_mk(self, m, k): + # k derivatives at each breakpoint + np.random.seed(1234) + xi = np.asarray([1. * j**2 for j in range(m+1)]) + yi = [np.random.random(k) for j in range(m+1)] + return xi, yi + + def test_random_12(self): + m, k = 5, 12 + xi, yi = self._make_random_mk(m, k) + pp = BPoly.from_derivatives(xi, yi) + + for order in range(k//2): + assert_allclose(pp(xi), [yy[order] for yy in yi]) + pp = pp.derivative() + + def test_order_zero(self): + m, k = 5, 12 + xi, yi = self._make_random_mk(m, k) + assert_raises(ValueError, BPoly.from_derivatives, + **dict(xi=xi, yi=yi, orders=0)) + + def test_orders_too_high(self): + m, k = 5, 12 + xi, yi = self._make_random_mk(m, k) + + pp = BPoly.from_derivatives(xi, yi, orders=2*k-1) # this is still ok + assert_raises(ValueError, BPoly.from_derivatives, # but this is not + **dict(xi=xi, yi=yi, orders=2*k)) + + def test_orders_global(self): + m, k = 5, 12 + xi, yi = self._make_random_mk(m, k) + + # ok, this is confusing. Local polynomials will be of the order 5 + # which means that up to the 2nd derivatives will be used at each point + order = 5 + pp = BPoly.from_derivatives(xi, yi, orders=order) + + for j in range(order//2+1): + assert_allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12)) + pp = pp.derivative() + assert_(not np.allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12))) + + # now repeat with `order` being even: on each interval, it uses + # order//2 'derivatives' @ the right-hand endpoint and + # order//2+1 @ 'derivatives' the left-hand endpoint + order = 6 + pp = BPoly.from_derivatives(xi, yi, orders=order) + for j in range(order//2): + assert_allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12)) + pp = pp.derivative() + assert_(not np.allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12))) + + def test_orders_local(self): + m, k = 7, 12 + xi, yi = self._make_random_mk(m, k) + + orders = [o + 1 for o in range(m)] + for i, x in enumerate(xi[1:-1]): + pp = BPoly.from_derivatives(xi, yi, orders=orders) + for j in range(orders[i] // 2 + 1): + assert_allclose(pp(x - 1e-12), pp(x + 1e-12)) + pp = pp.derivative() + assert_(not np.allclose(pp(x - 1e-12), pp(x + 1e-12))) + + def test_yi_trailing_dims(self): + m, k = 7, 5 + xi = np.sort(np.random.random(m+1)) + yi = np.random.random((m+1, k, 6, 7, 8)) + pp = BPoly.from_derivatives(xi, yi) + assert_equal(pp.c.shape, (2*k, m, 6, 7, 8)) + + def test_gh_5430(self): + # At least one of these raises an error unless gh-5430 is + # fixed. In py2k an int is implemented using a C long, so + # which one fails depends on your system. In py3k there is only + # one arbitrary precision integer type, so both should fail. + orders = np.int32(1) + p = BPoly.from_derivatives([0, 1], [[0], [0]], orders=orders) + assert_almost_equal(p(0), 0) + orders = np.int64(1) + p = BPoly.from_derivatives([0, 1], [[0], [0]], orders=orders) + assert_almost_equal(p(0), 0) + orders = 1 + # This worked before; make sure it still works + p = BPoly.from_derivatives([0, 1], [[0], [0]], orders=orders) + assert_almost_equal(p(0), 0) + orders = 1 + + +class TestNdPPoly(object): + def test_simple_1d(self): + np.random.seed(1234) + + c = np.random.rand(4, 5) + x = np.linspace(0, 1, 5+1) + + xi = np.random.rand(200) + + p = NdPPoly(c, (x,)) + v1 = p((xi,)) + + v2 = _ppoly_eval_1(c[:,:,None], x, xi).ravel() + assert_allclose(v1, v2) + + def test_simple_2d(self): + np.random.seed(1234) + + c = np.random.rand(4, 5, 6, 7) + x = np.linspace(0, 1, 6+1) + y = np.linspace(0, 1, 7+1)**2 + + xi = np.random.rand(200) + yi = np.random.rand(200) + + v1 = np.empty([len(xi), 1], dtype=c.dtype) + v1.fill(np.nan) + _ppoly.evaluate_nd(c.reshape(4*5, 6*7, 1), + (x, y), + np.array([4, 5], dtype=np.intc), + np.c_[xi, yi], + np.array([0, 0], dtype=np.intc), + 1, + v1) + v1 = v1.ravel() + v2 = _ppoly2d_eval(c, (x, y), xi, yi) + assert_allclose(v1, v2) + + p = NdPPoly(c, (x, y)) + for nu in (None, (0, 0), (0, 1), (1, 0), (2, 3), (9, 2)): + v1 = p(np.c_[xi, yi], nu=nu) + v2 = _ppoly2d_eval(c, (x, y), xi, yi, nu=nu) + assert_allclose(v1, v2, err_msg=repr(nu)) + + def test_simple_3d(self): + np.random.seed(1234) + + c = np.random.rand(4, 5, 6, 7, 8, 9) + x = np.linspace(0, 1, 7+1) + y = np.linspace(0, 1, 8+1)**2 + z = np.linspace(0, 1, 9+1)**3 + + xi = np.random.rand(40) + yi = np.random.rand(40) + zi = np.random.rand(40) + + p = NdPPoly(c, (x, y, z)) + + for nu in (None, (0, 0, 0), (0, 1, 0), (1, 0, 0), (2, 3, 0), + (6, 0, 2)): + v1 = p((xi, yi, zi), nu=nu) + v2 = _ppoly3d_eval(c, (x, y, z), xi, yi, zi, nu=nu) + assert_allclose(v1, v2, err_msg=repr(nu)) + + def test_simple_4d(self): + np.random.seed(1234) + + c = np.random.rand(4, 5, 6, 7, 8, 9, 10, 11) + x = np.linspace(0, 1, 8+1) + y = np.linspace(0, 1, 9+1)**2 + z = np.linspace(0, 1, 10+1)**3 + u = np.linspace(0, 1, 11+1)**4 + + xi = np.random.rand(20) + yi = np.random.rand(20) + zi = np.random.rand(20) + ui = np.random.rand(20) + + p = NdPPoly(c, (x, y, z, u)) + v1 = p((xi, yi, zi, ui)) + + v2 = _ppoly4d_eval(c, (x, y, z, u), xi, yi, zi, ui) + assert_allclose(v1, v2) + + def test_deriv_1d(self): + np.random.seed(1234) + + c = np.random.rand(4, 5) + x = np.linspace(0, 1, 5+1) + + p = NdPPoly(c, (x,)) + + # derivative + dp = p.derivative(nu=[1]) + p1 = PPoly(c, x) + dp1 = p1.derivative() + assert_allclose(dp.c, dp1.c) + + # antiderivative + dp = p.antiderivative(nu=[2]) + p1 = PPoly(c, x) + dp1 = p1.antiderivative(2) + assert_allclose(dp.c, dp1.c) + + def test_deriv_3d(self): + np.random.seed(1234) + + c = np.random.rand(4, 5, 6, 7, 8, 9) + x = np.linspace(0, 1, 7+1) + y = np.linspace(0, 1, 8+1)**2 + z = np.linspace(0, 1, 9+1)**3 + + p = NdPPoly(c, (x, y, z)) + + # differentiate vs x + p1 = PPoly(c.transpose(0, 3, 1, 2, 4, 5), x) + dp = p.derivative(nu=[2]) + dp1 = p1.derivative(2) + assert_allclose(dp.c, + dp1.c.transpose(0, 2, 3, 1, 4, 5)) + + # antidifferentiate vs y + p1 = PPoly(c.transpose(1, 4, 0, 2, 3, 5), y) + dp = p.antiderivative(nu=[0, 1, 0]) + dp1 = p1.antiderivative(1) + assert_allclose(dp.c, + dp1.c.transpose(2, 0, 3, 4, 1, 5)) + + # differentiate vs z + p1 = PPoly(c.transpose(2, 5, 0, 1, 3, 4), z) + dp = p.derivative(nu=[0, 0, 3]) + dp1 = p1.derivative(3) + assert_allclose(dp.c, + dp1.c.transpose(2, 3, 0, 4, 5, 1)) + + def test_deriv_3d_simple(self): + # Integrate to obtain function x y**2 z**4 / (2! 4!) + + c = np.ones((1, 1, 1, 3, 4, 5)) + x = np.linspace(0, 1, 3+1)**1 + y = np.linspace(0, 1, 4+1)**2 + z = np.linspace(0, 1, 5+1)**3 + + p = NdPPoly(c, (x, y, z)) + ip = p.antiderivative((1, 0, 4)) + ip = ip.antiderivative((0, 2, 0)) + + xi = np.random.rand(20) + yi = np.random.rand(20) + zi = np.random.rand(20) + + assert_allclose(ip((xi, yi, zi)), + xi * yi**2 * zi**4 / (gamma(3)*gamma(5))) + + def test_integrate_2d(self): + np.random.seed(1234) + c = np.random.rand(4, 5, 16, 17) + x = np.linspace(0, 1, 16+1)**1 + y = np.linspace(0, 1, 17+1)**2 + + # make continuously differentiable so that nquad() has an + # easier time + c = c.transpose(0, 2, 1, 3) + cx = c.reshape(c.shape[0], c.shape[1], -1).copy() + _ppoly.fix_continuity(cx, x, 2) + c = cx.reshape(c.shape) + c = c.transpose(0, 2, 1, 3) + c = c.transpose(1, 3, 0, 2) + cx = c.reshape(c.shape[0], c.shape[1], -1).copy() + _ppoly.fix_continuity(cx, y, 2) + c = cx.reshape(c.shape) + c = c.transpose(2, 0, 3, 1).copy() + + # Check integration + p = NdPPoly(c, (x, y)) + + for ranges in [[(0, 1), (0, 1)], + [(0, 0.5), (0, 1)], + [(0, 1), (0, 0.5)], + [(0.3, 0.7), (0.6, 0.2)]]: + + ig = p.integrate(ranges) + ig2, err2 = nquad(lambda x, y: p((x, y)), ranges, + opts=[dict(epsrel=1e-5, epsabs=1e-5)]*2) + assert_allclose(ig, ig2, rtol=1e-5, atol=1e-5, + err_msg=repr(ranges)) + + def test_integrate_1d(self): + np.random.seed(1234) + c = np.random.rand(4, 5, 6, 16, 17, 18) + x = np.linspace(0, 1, 16+1)**1 + y = np.linspace(0, 1, 17+1)**2 + z = np.linspace(0, 1, 18+1)**3 + + # Check 1D integration + p = NdPPoly(c, (x, y, z)) + + u = np.random.rand(200) + v = np.random.rand(200) + a, b = 0.2, 0.7 + + px = p.integrate_1d(a, b, axis=0) + pax = p.antiderivative((1, 0, 0)) + assert_allclose(px((u, v)), pax((b, u, v)) - pax((a, u, v))) + + py = p.integrate_1d(a, b, axis=1) + pay = p.antiderivative((0, 1, 0)) + assert_allclose(py((u, v)), pay((u, b, v)) - pay((u, a, v))) + + pz = p.integrate_1d(a, b, axis=2) + paz = p.antiderivative((0, 0, 1)) + assert_allclose(pz((u, v)), paz((u, v, b)) - paz((u, v, a))) + + +def _ppoly_eval_1(c, x, xps): + """Evaluate piecewise polynomial manually""" + out = np.zeros((len(xps), c.shape[2])) + for i, xp in enumerate(xps): + if xp < 0 or xp > 1: + out[i,:] = np.nan + continue + j = np.searchsorted(x, xp) - 1 + d = xp - x[j] + assert_(x[j] <= xp < x[j+1]) + r = sum(c[k,j] * d**(c.shape[0]-k-1) + for k in range(c.shape[0])) + out[i,:] = r + return out + + +def _ppoly_eval_2(coeffs, breaks, xnew, fill=np.nan): + """Evaluate piecewise polynomial manually (another way)""" + a = breaks[0] + b = breaks[-1] + K = coeffs.shape[0] + + saveshape = np.shape(xnew) + xnew = np.ravel(xnew) + res = np.empty_like(xnew) + mask = (xnew >= a) & (xnew <= b) + res[~mask] = fill + xx = xnew.compress(mask) + indxs = np.searchsorted(breaks, xx)-1 + indxs = indxs.clip(0, len(breaks)) + pp = coeffs + diff = xx - breaks.take(indxs) + V = np.vander(diff, N=K) + values = np.array([np.dot(V[k, :], pp[:, indxs[k]]) for k in xrange(len(xx))]) + res[mask] = values + res.shape = saveshape + return res + + +def _dpow(x, y, n): + """ + d^n (x**y) / dx^n + """ + if n < 0: + raise ValueError("invalid derivative order") + elif n > y: + return 0 + else: + return poch(y - n + 1, n) * x**(y - n) + + +def _ppoly2d_eval(c, xs, xnew, ynew, nu=None): + """ + Straightforward evaluation of 2D piecewise polynomial + """ + if nu is None: + nu = (0, 0) + + out = np.empty((len(xnew),), dtype=c.dtype) + + nx, ny = c.shape[:2] + + for jout, (x, y) in enumerate(zip(xnew, ynew)): + if not ((xs[0][0] <= x <= xs[0][-1]) and + (xs[1][0] <= y <= xs[1][-1])): + out[jout] = np.nan + continue + + j1 = np.searchsorted(xs[0], x) - 1 + j2 = np.searchsorted(xs[1], y) - 1 + + s1 = x - xs[0][j1] + s2 = y - xs[1][j2] + + val = 0 + + for k1 in range(c.shape[0]): + for k2 in range(c.shape[1]): + val += (c[nx-k1-1,ny-k2-1,j1,j2] + * _dpow(s1, k1, nu[0]) + * _dpow(s2, k2, nu[1])) + + out[jout] = val + + return out + + +def _ppoly3d_eval(c, xs, xnew, ynew, znew, nu=None): + """ + Straightforward evaluation of 3D piecewise polynomial + """ + if nu is None: + nu = (0, 0, 0) + + out = np.empty((len(xnew),), dtype=c.dtype) + + nx, ny, nz = c.shape[:3] + + for jout, (x, y, z) in enumerate(zip(xnew, ynew, znew)): + if not ((xs[0][0] <= x <= xs[0][-1]) and + (xs[1][0] <= y <= xs[1][-1]) and + (xs[2][0] <= z <= xs[2][-1])): + out[jout] = np.nan + continue + + j1 = np.searchsorted(xs[0], x) - 1 + j2 = np.searchsorted(xs[1], y) - 1 + j3 = np.searchsorted(xs[2], z) - 1 + + s1 = x - xs[0][j1] + s2 = y - xs[1][j2] + s3 = z - xs[2][j3] + + val = 0 + for k1 in range(c.shape[0]): + for k2 in range(c.shape[1]): + for k3 in range(c.shape[2]): + val += (c[nx-k1-1,ny-k2-1,nz-k3-1,j1,j2,j3] + * _dpow(s1, k1, nu[0]) + * _dpow(s2, k2, nu[1]) + * _dpow(s3, k3, nu[2])) + + out[jout] = val + + return out + + +def _ppoly4d_eval(c, xs, xnew, ynew, znew, unew, nu=None): + """ + Straightforward evaluation of 4D piecewise polynomial + """ + if nu is None: + nu = (0, 0, 0, 0) + + out = np.empty((len(xnew),), dtype=c.dtype) + + mx, my, mz, mu = c.shape[:4] + + for jout, (x, y, z, u) in enumerate(zip(xnew, ynew, znew, unew)): + if not ((xs[0][0] <= x <= xs[0][-1]) and + (xs[1][0] <= y <= xs[1][-1]) and + (xs[2][0] <= z <= xs[2][-1]) and + (xs[3][0] <= u <= xs[3][-1])): + out[jout] = np.nan + continue + + j1 = np.searchsorted(xs[0], x) - 1 + j2 = np.searchsorted(xs[1], y) - 1 + j3 = np.searchsorted(xs[2], z) - 1 + j4 = np.searchsorted(xs[3], u) - 1 + + s1 = x - xs[0][j1] + s2 = y - xs[1][j2] + s3 = z - xs[2][j3] + s4 = u - xs[3][j4] + + val = 0 + for k1 in range(c.shape[0]): + for k2 in range(c.shape[1]): + for k3 in range(c.shape[2]): + for k4 in range(c.shape[3]): + val += (c[mx-k1-1,my-k2-1,mz-k3-1,mu-k4-1,j1,j2,j3,j4] + * _dpow(s1, k1, nu[0]) + * _dpow(s2, k2, nu[1]) + * _dpow(s3, k3, nu[2]) + * _dpow(s4, k4, nu[3])) + + out[jout] = val + + return out + + +class TestRegularGridInterpolator(object): + def _get_sample_4d(self): + # create a 4d grid of 3 points in each dimension + points = [(0., .5, 1.)] * 4 + values = np.asarray([0., .5, 1.]) + values0 = values[:, np.newaxis, np.newaxis, np.newaxis] + values1 = values[np.newaxis, :, np.newaxis, np.newaxis] + values2 = values[np.newaxis, np.newaxis, :, np.newaxis] + values3 = values[np.newaxis, np.newaxis, np.newaxis, :] + values = (values0 + values1 * 10 + values2 * 100 + values3 * 1000) + return points, values + + def _get_sample_4d_2(self): + # create another 4d grid of 3 points in each dimension + points = [(0., .5, 1.)] * 2 + [(0., 5., 10.)] * 2 + values = np.asarray([0., .5, 1.]) + values0 = values[:, np.newaxis, np.newaxis, np.newaxis] + values1 = values[np.newaxis, :, np.newaxis, np.newaxis] + values2 = values[np.newaxis, np.newaxis, :, np.newaxis] + values3 = values[np.newaxis, np.newaxis, np.newaxis, :] + values = (values0 + values1 * 10 + values2 * 100 + values3 * 1000) + return points, values + + def test_list_input(self): + points, values = self._get_sample_4d() + + sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8], + [0.5, 0.5, .5, .5]]) + + for method in ['linear', 'nearest']: + interp = RegularGridInterpolator(points, + values.tolist(), + method=method) + v1 = interp(sample.tolist()) + interp = RegularGridInterpolator(points, + values, + method=method) + v2 = interp(sample) + assert_allclose(v1, v2) + + def test_complex(self): + points, values = self._get_sample_4d() + values = values - 2j*values + sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8], + [0.5, 0.5, .5, .5]]) + + for method in ['linear', 'nearest']: + interp = RegularGridInterpolator(points, values, + method=method) + rinterp = RegularGridInterpolator(points, values.real, + method=method) + iinterp = RegularGridInterpolator(points, values.imag, + method=method) + + v1 = interp(sample) + v2 = rinterp(sample) + 1j*iinterp(sample) + assert_allclose(v1, v2) + + def test_linear_xi1d(self): + points, values = self._get_sample_4d_2() + interp = RegularGridInterpolator(points, values) + sample = np.asarray([0.1, 0.1, 10., 9.]) + wanted = 1001.1 + assert_array_almost_equal(interp(sample), wanted) + + def test_linear_xi3d(self): + points, values = self._get_sample_4d() + interp = RegularGridInterpolator(points, values) + sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8], + [0.5, 0.5, .5, .5]]) + wanted = np.asarray([1001.1, 846.2, 555.5]) + assert_array_almost_equal(interp(sample), wanted) + + def test_nearest(self): + points, values = self._get_sample_4d() + interp = RegularGridInterpolator(points, values, method="nearest") + sample = np.asarray([0.1, 0.1, .9, .9]) + wanted = 1100. + assert_array_almost_equal(interp(sample), wanted) + sample = np.asarray([0.1, 0.1, 0.1, 0.1]) + wanted = 0. + assert_array_almost_equal(interp(sample), wanted) + sample = np.asarray([0., 0., 0., 0.]) + wanted = 0. + assert_array_almost_equal(interp(sample), wanted) + sample = np.asarray([1., 1., 1., 1.]) + wanted = 1111. + assert_array_almost_equal(interp(sample), wanted) + sample = np.asarray([0.1, 0.4, 0.6, 0.9]) + wanted = 1055. + assert_array_almost_equal(interp(sample), wanted) + + def test_linear_edges(self): + points, values = self._get_sample_4d() + interp = RegularGridInterpolator(points, values) + sample = np.asarray([[0., 0., 0., 0.], [1., 1., 1., 1.]]) + wanted = np.asarray([0., 1111.]) + assert_array_almost_equal(interp(sample), wanted) + + def test_valid_create(self): + # create a 2d grid of 3 points in each dimension + points = [(0., .5, 1.), (0., 1., .5)] + values = np.asarray([0., .5, 1.]) + values0 = values[:, np.newaxis] + values1 = values[np.newaxis, :] + values = (values0 + values1 * 10) + assert_raises(ValueError, RegularGridInterpolator, points, values) + points = [((0., .5, 1.), ), (0., .5, 1.)] + assert_raises(ValueError, RegularGridInterpolator, points, values) + points = [(0., .5, .75, 1.), (0., .5, 1.)] + assert_raises(ValueError, RegularGridInterpolator, points, values) + points = [(0., .5, 1.), (0., .5, 1.), (0., .5, 1.)] + assert_raises(ValueError, RegularGridInterpolator, points, values) + points = [(0., .5, 1.), (0., .5, 1.)] + assert_raises(ValueError, RegularGridInterpolator, points, values, + method="undefmethod") + + def test_valid_call(self): + points, values = self._get_sample_4d() + interp = RegularGridInterpolator(points, values) + sample = np.asarray([[0., 0., 0., 0.], [1., 1., 1., 1.]]) + assert_raises(ValueError, interp, sample, "undefmethod") + sample = np.asarray([[0., 0., 0.], [1., 1., 1.]]) + assert_raises(ValueError, interp, sample) + sample = np.asarray([[0., 0., 0., 0.], [1., 1., 1., 1.1]]) + assert_raises(ValueError, interp, sample) + + def test_out_of_bounds_extrap(self): + points, values = self._get_sample_4d() + interp = RegularGridInterpolator(points, values, bounds_error=False, + fill_value=None) + sample = np.asarray([[-.1, -.1, -.1, -.1], [1.1, 1.1, 1.1, 1.1], + [21, 2.1, -1.1, -11], [2.1, 2.1, -1.1, -1.1]]) + wanted = np.asarray([0., 1111., 11., 11.]) + assert_array_almost_equal(interp(sample, method="nearest"), wanted) + wanted = np.asarray([-111.1, 1222.1, -11068., -1186.9]) + assert_array_almost_equal(interp(sample, method="linear"), wanted) + + def test_out_of_bounds_extrap2(self): + points, values = self._get_sample_4d_2() + interp = RegularGridInterpolator(points, values, bounds_error=False, + fill_value=None) + sample = np.asarray([[-.1, -.1, -.1, -.1], [1.1, 1.1, 1.1, 1.1], + [21, 2.1, -1.1, -11], [2.1, 2.1, -1.1, -1.1]]) + wanted = np.asarray([0., 11., 11., 11.]) + assert_array_almost_equal(interp(sample, method="nearest"), wanted) + wanted = np.asarray([-12.1, 133.1, -1069., -97.9]) + assert_array_almost_equal(interp(sample, method="linear"), wanted) + + def test_out_of_bounds_fill(self): + points, values = self._get_sample_4d() + interp = RegularGridInterpolator(points, values, bounds_error=False, + fill_value=np.nan) + sample = np.asarray([[-.1, -.1, -.1, -.1], [1.1, 1.1, 1.1, 1.1], + [2.1, 2.1, -1.1, -1.1]]) + wanted = np.asarray([np.nan, np.nan, np.nan]) + assert_array_almost_equal(interp(sample, method="nearest"), wanted) + assert_array_almost_equal(interp(sample, method="linear"), wanted) + sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8], + [0.5, 0.5, .5, .5]]) + wanted = np.asarray([1001.1, 846.2, 555.5]) + assert_array_almost_equal(interp(sample), wanted) + + def test_nearest_compare_qhull(self): + points, values = self._get_sample_4d() + interp = RegularGridInterpolator(points, values, method="nearest") + points_qhull = itertools.product(*points) + points_qhull = [p for p in points_qhull] + points_qhull = np.asarray(points_qhull) + values_qhull = values.reshape(-1) + interp_qhull = NearestNDInterpolator(points_qhull, values_qhull) + sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8], + [0.5, 0.5, .5, .5]]) + assert_array_almost_equal(interp(sample), interp_qhull(sample)) + + def test_linear_compare_qhull(self): + points, values = self._get_sample_4d() + interp = RegularGridInterpolator(points, values) + points_qhull = itertools.product(*points) + points_qhull = [p for p in points_qhull] + points_qhull = np.asarray(points_qhull) + values_qhull = values.reshape(-1) + interp_qhull = LinearNDInterpolator(points_qhull, values_qhull) + sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8], + [0.5, 0.5, .5, .5]]) + assert_array_almost_equal(interp(sample), interp_qhull(sample)) + + def test_duck_typed_values(self): + x = np.linspace(0, 2, 5) + y = np.linspace(0, 1, 7) + + values = MyValue((5, 7)) + + for method in ('nearest', 'linear'): + interp = RegularGridInterpolator((x, y), values, + method=method) + v1 = interp([0.4, 0.7]) + + interp = RegularGridInterpolator((x, y), values._v, + method=method) + v2 = interp([0.4, 0.7]) + assert_allclose(v1, v2) + + def test_invalid_fill_value(self): + np.random.seed(1234) + x = np.linspace(0, 2, 5) + y = np.linspace(0, 1, 7) + values = np.random.rand(5, 7) + + # integers can be cast to floats + RegularGridInterpolator((x, y), values, fill_value=1) + + # complex values cannot + assert_raises(ValueError, RegularGridInterpolator, + (x, y), values, fill_value=1+2j) + + def test_fillvalue_type(self): + # from #3703; test that interpolator object construction succeeds + values = np.ones((10, 20, 30), dtype='>f4') + points = [np.arange(n) for n in values.shape] + xi = [(1, 1, 1)] + interpolator = RegularGridInterpolator(points, values) + interpolator = RegularGridInterpolator(points, values, fill_value=0.) + + +class MyValue(object): + """ + Minimal indexable object + """ + + def __init__(self, shape): + self.ndim = 2 + self.shape = shape + self._v = np.arange(np.prod(shape)).reshape(shape) + + def __getitem__(self, idx): + return self._v[idx] + + def __array_interface__(self): + return None + + def __array__(self): + raise RuntimeError("No array representation") + + +class TestInterpN(object): + def _sample_2d_data(self): + x = np.arange(1, 6) + x = np.array([.5, 2., 3., 4., 5.5]) + y = np.arange(1, 6) + y = np.array([.5, 2., 3., 4., 5.5]) + z = np.array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1], + [1, 2, 2, 2, 1], [1, 2, 1, 2, 1]]) + return x, y, z + + def test_spline_2d(self): + x, y, z = self._sample_2d_data() + lut = RectBivariateSpline(x, y, z) + + xi = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3], + [1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T + assert_array_almost_equal(interpn((x, y), z, xi, method="splinef2d"), + lut.ev(xi[:, 0], xi[:, 1])) + + def test_list_input(self): + x, y, z = self._sample_2d_data() + xi = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3], + [1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T + + for method in ['nearest', 'linear', 'splinef2d']: + v1 = interpn((x, y), z, xi, method=method) + v2 = interpn((x.tolist(), y.tolist()), z.tolist(), + xi.tolist(), method=method) + assert_allclose(v1, v2, err_msg=method) + + def test_spline_2d_outofbounds(self): + x = np.array([.5, 2., 3., 4., 5.5]) + y = np.array([.5, 2., 3., 4., 5.5]) + z = np.array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1], + [1, 2, 2, 2, 1], [1, 2, 1, 2, 1]]) + lut = RectBivariateSpline(x, y, z) + + xi = np.array([[1, 2.3, 6.3, 0.5, 3.3, 1.2, 3], + [1, 3.3, 1.2, -4.0, 5.0, 1.0, 3]]).T + actual = interpn((x, y), z, xi, method="splinef2d", + bounds_error=False, fill_value=999.99) + expected = lut.ev(xi[:, 0], xi[:, 1]) + expected[2:4] = 999.99 + assert_array_almost_equal(actual, expected) + + # no extrapolation for splinef2d + assert_raises(ValueError, interpn, (x, y), z, xi, method="splinef2d", + bounds_error=False, fill_value=None) + + def _sample_4d_data(self): + points = [(0., .5, 1.)] * 2 + [(0., 5., 10.)] * 2 + values = np.asarray([0., .5, 1.]) + values0 = values[:, np.newaxis, np.newaxis, np.newaxis] + values1 = values[np.newaxis, :, np.newaxis, np.newaxis] + values2 = values[np.newaxis, np.newaxis, :, np.newaxis] + values3 = values[np.newaxis, np.newaxis, np.newaxis, :] + values = (values0 + values1 * 10 + values2 * 100 + values3 * 1000) + return points, values + + def test_linear_4d(self): + # create a 4d grid of 3 points in each dimension + points, values = self._sample_4d_data() + interp_rg = RegularGridInterpolator(points, values) + sample = np.asarray([[0.1, 0.1, 10., 9.]]) + wanted = interpn(points, values, sample, method="linear") + assert_array_almost_equal(interp_rg(sample), wanted) + + def test_4d_linear_outofbounds(self): + # create a 4d grid of 3 points in each dimension + points, values = self._sample_4d_data() + sample = np.asarray([[0.1, -0.1, 10.1, 9.]]) + wanted = 999.99 + actual = interpn(points, values, sample, method="linear", + bounds_error=False, fill_value=999.99) + assert_array_almost_equal(actual, wanted) + + def test_nearest_4d(self): + # create a 4d grid of 3 points in each dimension + points, values = self._sample_4d_data() + interp_rg = RegularGridInterpolator(points, values, method="nearest") + sample = np.asarray([[0.1, 0.1, 10., 9.]]) + wanted = interpn(points, values, sample, method="nearest") + assert_array_almost_equal(interp_rg(sample), wanted) + + def test_4d_nearest_outofbounds(self): + # create a 4d grid of 3 points in each dimension + points, values = self._sample_4d_data() + sample = np.asarray([[0.1, -0.1, 10.1, 9.]]) + wanted = 999.99 + actual = interpn(points, values, sample, method="nearest", + bounds_error=False, fill_value=999.99) + assert_array_almost_equal(actual, wanted) + + def test_xi_1d(self): + # verify that 1D xi works as expected + points, values = self._sample_4d_data() + sample = np.asarray([0.1, 0.1, 10., 9.]) + v1 = interpn(points, values, sample, bounds_error=False) + v2 = interpn(points, values, sample[None,:], bounds_error=False) + assert_allclose(v1, v2) + + def test_xi_nd(self): + # verify that higher-d xi works as expected + points, values = self._sample_4d_data() + + np.random.seed(1234) + sample = np.random.rand(2, 3, 4) + + v1 = interpn(points, values, sample, method='nearest', + bounds_error=False) + assert_equal(v1.shape, (2, 3)) + + v2 = interpn(points, values, sample.reshape(-1, 4), + method='nearest', bounds_error=False) + assert_allclose(v1, v2.reshape(v1.shape)) + + def test_xi_broadcast(self): + # verify that the interpolators broadcast xi + x, y, values = self._sample_2d_data() + points = (x, y) + + xi = np.linspace(0, 1, 2) + yi = np.linspace(0, 3, 3) + + for method in ['nearest', 'linear', 'splinef2d']: + sample = (xi[:,None], yi[None,:]) + v1 = interpn(points, values, sample, method=method, + bounds_error=False) + assert_equal(v1.shape, (2, 3)) + + xx, yy = np.meshgrid(xi, yi) + sample = np.c_[xx.T.ravel(), yy.T.ravel()] + + v2 = interpn(points, values, sample, + method=method, bounds_error=False) + assert_allclose(v1, v2.reshape(v1.shape)) + + def test_nonscalar_values(self): + # Verify that non-scalar valued values also works + points, values = self._sample_4d_data() + + np.random.seed(1234) + values = np.random.rand(3, 3, 3, 3, 6) + sample = np.random.rand(7, 11, 4) + + for method in ['nearest', 'linear']: + v = interpn(points, values, sample, method=method, + bounds_error=False) + assert_equal(v.shape, (7, 11, 6), err_msg=method) + + vs = [interpn(points, values[...,j], sample, method=method, + bounds_error=False) + for j in range(6)] + v2 = np.array(vs).transpose(1, 2, 0) + + assert_allclose(v, v2, err_msg=method) + + # Vector-valued splines supported with fitpack + assert_raises(ValueError, interpn, points, values, sample, + method='splinef2d') + + def test_complex(self): + x, y, values = self._sample_2d_data() + points = (x, y) + values = values - 2j*values + + sample = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3], + [1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T + + for method in ['linear', 'nearest']: + v1 = interpn(points, values, sample, method=method) + v2r = interpn(points, values.real, sample, method=method) + v2i = interpn(points, values.imag, sample, method=method) + v2 = v2r + 1j*v2i + assert_allclose(v1, v2) + + # Complex-valued data not supported by spline2fd + _assert_warns(np.ComplexWarning, interpn, points, values, + sample, method='splinef2d') + + def test_duck_typed_values(self): + x = np.linspace(0, 2, 5) + y = np.linspace(0, 1, 7) + + values = MyValue((5, 7)) + + for method in ('nearest', 'linear'): + v1 = interpn((x, y), values, [0.4, 0.7], method=method) + v2 = interpn((x, y), values._v, [0.4, 0.7], method=method) + assert_allclose(v1, v2) + + def test_matrix_input(self): + x = np.linspace(0, 2, 5) + y = np.linspace(0, 1, 7) + + values = np.matrix(np.random.rand(5, 7)) + + sample = np.random.rand(3, 7, 2) + + for method in ('nearest', 'linear', 'splinef2d'): + v1 = interpn((x, y), values, sample, method=method) + v2 = interpn((x, y), np.asarray(values), sample, method=method) + assert_allclose(v1, np.asmatrix(v2)) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_interpolate.pyc b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_interpolate.pyc new file mode 100644 index 0000000..7beabf1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_interpolate.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_interpolate_wrapper.py b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_interpolate_wrapper.py new file mode 100644 index 0000000..6b98a95 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_interpolate_wrapper.py @@ -0,0 +1,81 @@ +""" module to test interpolate_wrapper.py +""" +from __future__ import division, print_function, absolute_import + +from numpy import arange, allclose, ones, isnan +import numpy as np +from numpy.testing import (assert_, assert_allclose) +from scipy._lib._numpy_compat import suppress_warnings + +# functionality to be tested +from scipy.interpolate.interpolate_wrapper import (linear, logarithmic, + block_average_above, nearest) + + +class Test(object): + + def assertAllclose(self, x, y, rtol=1.0e-5): + for i, xi in enumerate(x): + assert_(allclose(xi, y[i], rtol) or (isnan(xi) and isnan(y[i]))) + + def test_nearest(self): + N = 5 + x = arange(N) + y = arange(N) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "`nearest` is deprecated") + assert_allclose(y, nearest(x, y, x+.1)) + assert_allclose(y, nearest(x, y, x-.1)) + + def test_linear(self): + N = 3000. + x = arange(N) + y = arange(N) + new_x = arange(N)+0.5 + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "`linear` is deprecated") + new_y = linear(x, y, new_x) + + assert_allclose(new_y[:5], [0.5, 1.5, 2.5, 3.5, 4.5]) + + def test_block_average_above(self): + N = 3000 + x = arange(N, dtype=float) + y = arange(N, dtype=float) + + new_x = arange(N // 2) * 2 + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "`block_average_above` is deprecated") + new_y = block_average_above(x, y, new_x) + assert_allclose(new_y[:5], [0.0, 0.5, 2.5, 4.5, 6.5]) + + def test_linear2(self): + N = 3000 + x = arange(N, dtype=float) + y = ones((100,N)) * arange(N) + new_x = arange(N) + 0.5 + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "`linear` is deprecated") + new_y = linear(x, y, new_x) + assert_allclose(new_y[:5,:5], + [[0.5, 1.5, 2.5, 3.5, 4.5], + [0.5, 1.5, 2.5, 3.5, 4.5], + [0.5, 1.5, 2.5, 3.5, 4.5], + [0.5, 1.5, 2.5, 3.5, 4.5], + [0.5, 1.5, 2.5, 3.5, 4.5]]) + + def test_logarithmic(self): + N = 4000. + x = arange(N) + y = arange(N) + new_x = arange(N)+0.5 + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "`logarithmic` is deprecated") + new_y = logarithmic(x, y, new_x) + correct_y = [np.NaN, 1.41421356, 2.44948974, 3.46410162, 4.47213595] + assert_allclose(new_y[:5], correct_y) + + def runTest(self): + test_list = [name for name in dir(self) if name.find('test_') == 0] + for test_name in test_list: + exec("self.%s()" % test_name) diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_interpolate_wrapper.pyc b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_interpolate_wrapper.pyc new file mode 100644 index 0000000..5d9cfa0 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_interpolate_wrapper.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_ndgriddata.py b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_ndgriddata.py new file mode 100644 index 0000000..df4d552 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_ndgriddata.py @@ -0,0 +1,177 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import assert_equal, assert_array_equal, assert_allclose +from pytest import raises as assert_raises + +from scipy.interpolate import griddata, NearestNDInterpolator + + +class TestGriddata(object): + def test_fill_value(self): + x = [(0,0), (0,1), (1,0)] + y = [1, 2, 3] + + yi = griddata(x, y, [(1,1), (1,2), (0,0)], fill_value=-1) + assert_array_equal(yi, [-1., -1, 1]) + + yi = griddata(x, y, [(1,1), (1,2), (0,0)]) + assert_array_equal(yi, [np.nan, np.nan, 1]) + + def test_alternative_call(self): + x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)], + dtype=np.double) + y = (np.arange(x.shape[0], dtype=np.double)[:,None] + + np.array([0,1])[None,:]) + + for method in ('nearest', 'linear', 'cubic'): + for rescale in (True, False): + msg = repr((method, rescale)) + yi = griddata((x[:,0], x[:,1]), y, (x[:,0], x[:,1]), method=method, + rescale=rescale) + assert_allclose(y, yi, atol=1e-14, err_msg=msg) + + def test_multivalue_2d(self): + x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)], + dtype=np.double) + y = (np.arange(x.shape[0], dtype=np.double)[:,None] + + np.array([0,1])[None,:]) + + for method in ('nearest', 'linear', 'cubic'): + for rescale in (True, False): + msg = repr((method, rescale)) + yi = griddata(x, y, x, method=method, rescale=rescale) + assert_allclose(y, yi, atol=1e-14, err_msg=msg) + + def test_multipoint_2d(self): + x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)], + dtype=np.double) + y = np.arange(x.shape[0], dtype=np.double) + + xi = x[:,None,:] + np.array([0,0,0])[None,:,None] + + for method in ('nearest', 'linear', 'cubic'): + for rescale in (True, False): + msg = repr((method, rescale)) + yi = griddata(x, y, xi, method=method, rescale=rescale) + + assert_equal(yi.shape, (5, 3), err_msg=msg) + assert_allclose(yi, np.tile(y[:,None], (1, 3)), + atol=1e-14, err_msg=msg) + + def test_complex_2d(self): + x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)], + dtype=np.double) + y = np.arange(x.shape[0], dtype=np.double) + y = y - 2j*y[::-1] + + xi = x[:,None,:] + np.array([0,0,0])[None,:,None] + + for method in ('nearest', 'linear', 'cubic'): + for rescale in (True, False): + msg = repr((method, rescale)) + yi = griddata(x, y, xi, method=method, rescale=rescale) + + assert_equal(yi.shape, (5, 3), err_msg=msg) + assert_allclose(yi, np.tile(y[:,None], (1, 3)), + atol=1e-14, err_msg=msg) + + def test_1d(self): + x = np.array([1, 2.5, 3, 4.5, 5, 6]) + y = np.array([1, 2, 0, 3.9, 2, 1]) + + for method in ('nearest', 'linear', 'cubic'): + assert_allclose(griddata(x, y, x, method=method), y, + err_msg=method, atol=1e-14) + assert_allclose(griddata(x.reshape(6, 1), y, x, method=method), y, + err_msg=method, atol=1e-14) + assert_allclose(griddata((x,), y, (x,), method=method), y, + err_msg=method, atol=1e-14) + + def test_1d_borders(self): + # Test for nearest neighbor case with xi outside + # the range of the values. + x = np.array([1, 2.5, 3, 4.5, 5, 6]) + y = np.array([1, 2, 0, 3.9, 2, 1]) + xi = np.array([0.9, 6.5]) + yi_should = np.array([1.0, 1.0]) + + method = 'nearest' + assert_allclose(griddata(x, y, xi, + method=method), yi_should, + err_msg=method, + atol=1e-14) + assert_allclose(griddata(x.reshape(6, 1), y, xi, + method=method), yi_should, + err_msg=method, + atol=1e-14) + assert_allclose(griddata((x, ), y, (xi, ), + method=method), yi_should, + err_msg=method, + atol=1e-14) + + def test_1d_unsorted(self): + x = np.array([2.5, 1, 4.5, 5, 6, 3]) + y = np.array([1, 2, 0, 3.9, 2, 1]) + + for method in ('nearest', 'linear', 'cubic'): + assert_allclose(griddata(x, y, x, method=method), y, + err_msg=method, atol=1e-10) + assert_allclose(griddata(x.reshape(6, 1), y, x, method=method), y, + err_msg=method, atol=1e-10) + assert_allclose(griddata((x,), y, (x,), method=method), y, + err_msg=method, atol=1e-10) + + def test_square_rescale_manual(self): + points = np.array([(0,0), (0,100), (10,100), (10,0), (1, 5)], dtype=np.double) + points_rescaled = np.array([(0,0), (0,1), (1,1), (1,0), (0.1, 0.05)], dtype=np.double) + values = np.array([1., 2., -3., 5., 9.], dtype=np.double) + + xx, yy = np.broadcast_arrays(np.linspace(0, 10, 14)[:,None], + np.linspace(0, 100, 14)[None,:]) + xx = xx.ravel() + yy = yy.ravel() + xi = np.array([xx, yy]).T.copy() + + for method in ('nearest', 'linear', 'cubic'): + msg = method + zi = griddata(points_rescaled, values, xi/np.array([10, 100.]), + method=method) + zi_rescaled = griddata(points, values, xi, method=method, + rescale=True) + assert_allclose(zi, zi_rescaled, err_msg=msg, + atol=1e-12) + + def test_xi_1d(self): + # Check that 1-D xi is interpreted as a coordinate + x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)], + dtype=np.double) + y = np.arange(x.shape[0], dtype=np.double) + y = y - 2j*y[::-1] + + xi = np.array([0.5, 0.5]) + + for method in ('nearest', 'linear', 'cubic'): + p1 = griddata(x, y, xi, method=method) + p2 = griddata(x, y, xi[None,:], method=method) + assert_allclose(p1, p2, err_msg=method) + + xi1 = np.array([0.5]) + xi3 = np.array([0.5, 0.5, 0.5]) + assert_raises(ValueError, griddata, x, y, xi1, + method=method) + assert_raises(ValueError, griddata, x, y, xi3, + method=method) + + +def test_nearest_options(): + # smoke test that NearestNDInterpolator accept cKDTree options + npts, nd = 4, 3 + x = np.arange(npts*nd).reshape((npts, nd)) + y = np.arange(npts) + nndi = NearestNDInterpolator(x, y) + + opts = {'balanced_tree': False, 'compact_nodes': False} + nndi_o = NearestNDInterpolator(x, y, tree_options=opts) + assert_allclose(nndi(x), nndi_o(x), atol=1e-14) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_ndgriddata.pyc b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_ndgriddata.pyc new file mode 100644 index 0000000..bf3ba12 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_ndgriddata.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_pade.py b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_pade.py new file mode 100644 index 0000000..c68c6f5 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_pade.py @@ -0,0 +1,66 @@ +from __future__ import division, print_function, absolute_import + +from numpy.testing import (assert_array_equal, assert_array_almost_equal) +from scipy.interpolate import pade + +def test_pade_trivial(): + nump, denomp = pade([1.0], 0) + assert_array_equal(nump.c, [1.0]) + assert_array_equal(denomp.c, [1.0]) + + nump, denomp = pade([1.0], 0, 0) + assert_array_equal(nump.c, [1.0]) + assert_array_equal(denomp.c, [1.0]) + + +def test_pade_4term_exp(): + # First four Taylor coefficients of exp(x). + # Unlike poly1d, the first array element is the zero-order term. + an = [1.0, 1.0, 0.5, 1.0/6] + + nump, denomp = pade(an, 0) + assert_array_almost_equal(nump.c, [1.0/6, 0.5, 1.0, 1.0]) + assert_array_almost_equal(denomp.c, [1.0]) + + nump, denomp = pade(an, 1) + assert_array_almost_equal(nump.c, [1.0/6, 2.0/3, 1.0]) + assert_array_almost_equal(denomp.c, [-1.0/3, 1.0]) + + nump, denomp = pade(an, 2) + assert_array_almost_equal(nump.c, [1.0/3, 1.0]) + assert_array_almost_equal(denomp.c, [1.0/6, -2.0/3, 1.0]) + + nump, denomp = pade(an, 3) + assert_array_almost_equal(nump.c, [1.0]) + assert_array_almost_equal(denomp.c, [-1.0/6, 0.5, -1.0, 1.0]) + + # Testing inclusion of optional parameter + nump, denomp = pade(an, 0, 3) + assert_array_almost_equal(nump.c, [1.0/6, 0.5, 1.0, 1.0]) + assert_array_almost_equal(denomp.c, [1.0]) + + nump, denomp = pade(an, 1, 2) + assert_array_almost_equal(nump.c, [1.0/6, 2.0/3, 1.0]) + assert_array_almost_equal(denomp.c, [-1.0/3, 1.0]) + + nump, denomp = pade(an, 2, 1) + assert_array_almost_equal(nump.c, [1.0/3, 1.0]) + assert_array_almost_equal(denomp.c, [1.0/6, -2.0/3, 1.0]) + + nump, denomp = pade(an, 3, 0) + assert_array_almost_equal(nump.c, [1.0]) + assert_array_almost_equal(denomp.c, [-1.0/6, 0.5, -1.0, 1.0]) + + # Testing reducing array + nump, denomp = pade(an, 0, 2) + assert_array_almost_equal(nump.c, [0.5, 1.0, 1.0]) + assert_array_almost_equal(denomp.c, [1.0]) + + nump, denomp = pade(an, 1, 1) + assert_array_almost_equal(nump.c, [1.0/2, 1.0]) + assert_array_almost_equal(denomp.c, [-1.0/2, 1.0]) + + nump, denomp = pade(an, 2, 0) + assert_array_almost_equal(nump.c, [1.0]) + assert_array_almost_equal(denomp.c, [1.0/2, -1.0, 1.0]) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_pade.pyc b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_pade.pyc new file mode 100644 index 0000000..e7edac1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_pade.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_polyint.py b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_polyint.py new file mode 100644 index 0000000..172e6ae --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_polyint.py @@ -0,0 +1,664 @@ +from __future__ import division, print_function, absolute_import + +import warnings + +import numpy as np + +from numpy.testing import ( + assert_almost_equal, assert_array_equal, assert_array_almost_equal, + assert_allclose, assert_equal, assert_) +from pytest import raises as assert_raises + +from scipy.interpolate import ( + KroghInterpolator, krogh_interpolate, + BarycentricInterpolator, barycentric_interpolate, + approximate_taylor_polynomial, pchip, PchipInterpolator, + pchip_interpolate, Akima1DInterpolator, CubicSpline, make_interp_spline) + +from scipy._lib.six import xrange + + +def check_shape(interpolator_cls, x_shape, y_shape, deriv_shape=None, axis=0, + extra_args={}): + np.random.seed(1234) + + x = [-1, 0, 1, 2, 3, 4] + s = list(range(1, len(y_shape)+1)) + s.insert(axis % (len(y_shape)+1), 0) + y = np.random.rand(*((6,) + y_shape)).transpose(s) + + # Cython code chokes on y.shape = (0, 3) etc, skip them + if y.size == 0: + return + + xi = np.zeros(x_shape) + yi = interpolator_cls(x, y, axis=axis, **extra_args)(xi) + + target_shape = ((deriv_shape or ()) + y.shape[:axis] + + x_shape + y.shape[axis:][1:]) + assert_equal(yi.shape, target_shape) + + # check it works also with lists + if x_shape and y.size > 0: + interpolator_cls(list(x), list(y), axis=axis, **extra_args)(list(xi)) + + # check also values + if xi.size > 0 and deriv_shape is None: + bs_shape = y.shape[:axis] + (1,)*len(x_shape) + y.shape[axis:][1:] + yv = y[((slice(None,),)*(axis % y.ndim)) + (1,)] + yv = yv.reshape(bs_shape) + + yi, y = np.broadcast_arrays(yi, yv) + assert_allclose(yi, y) + + +SHAPES = [(), (0,), (1,), (6, 2, 5)] + + +def test_shapes(): + + def spl_interp(x, y, axis): + return make_interp_spline(x, y, axis=axis) + + for ip in [KroghInterpolator, BarycentricInterpolator, pchip, + Akima1DInterpolator, CubicSpline, spl_interp]: + for s1 in SHAPES: + for s2 in SHAPES: + for axis in range(-len(s2), len(s2)): + if ip != CubicSpline: + check_shape(ip, s1, s2, None, axis) + else: + for bc in ['natural', 'clamped']: + extra = {'bc_type': bc} + check_shape(ip, s1, s2, None, axis, extra) + +def test_derivs_shapes(): + def krogh_derivs(x, y, axis=0): + return KroghInterpolator(x, y, axis).derivatives + + for s1 in SHAPES: + for s2 in SHAPES: + for axis in range(-len(s2), len(s2)): + check_shape(krogh_derivs, s1, s2, (6,), axis) + + +def test_deriv_shapes(): + def krogh_deriv(x, y, axis=0): + return KroghInterpolator(x, y, axis).derivative + + def pchip_deriv(x, y, axis=0): + return pchip(x, y, axis).derivative() + + def pchip_deriv2(x, y, axis=0): + return pchip(x, y, axis).derivative(2) + + def pchip_antideriv(x, y, axis=0): + return pchip(x, y, axis).derivative() + + def pchip_antideriv2(x, y, axis=0): + return pchip(x, y, axis).derivative(2) + + def pchip_deriv_inplace(x, y, axis=0): + class P(PchipInterpolator): + def __call__(self, x): + return PchipInterpolator.__call__(self, x, 1) + pass + return P(x, y, axis) + + def akima_deriv(x, y, axis=0): + return Akima1DInterpolator(x, y, axis).derivative() + + def akima_antideriv(x, y, axis=0): + return Akima1DInterpolator(x, y, axis).antiderivative() + + def cspline_deriv(x, y, axis=0): + return CubicSpline(x, y, axis).derivative() + + def cspline_antideriv(x, y, axis=0): + return CubicSpline(x, y, axis).antiderivative() + + def bspl_deriv(x, y, axis=0): + return make_interp_spline(x, y, axis=axis).derivative() + + def bspl_antideriv(x, y, axis=0): + return make_interp_spline(x, y, axis=axis).antiderivative() + + for ip in [krogh_deriv, pchip_deriv, pchip_deriv2, pchip_deriv_inplace, + pchip_antideriv, pchip_antideriv2, akima_deriv, akima_antideriv, + cspline_deriv, cspline_antideriv, bspl_deriv, bspl_antideriv]: + for s1 in SHAPES: + for s2 in SHAPES: + for axis in range(-len(s2), len(s2)): + check_shape(ip, s1, s2, (), axis) + + +def _check_complex(ip): + x = [1, 2, 3, 4] + y = [1, 2, 1j, 3] + p = ip(x, y) + assert_allclose(y, p(x)) + + +def test_complex(): + for ip in [KroghInterpolator, BarycentricInterpolator, pchip, CubicSpline]: + _check_complex(ip) + + +class TestKrogh(object): + def setup_method(self): + self.true_poly = np.poly1d([-2,3,1,5,-4]) + self.test_xs = np.linspace(-1,1,100) + self.xs = np.linspace(-1,1,5) + self.ys = self.true_poly(self.xs) + + def test_lagrange(self): + P = KroghInterpolator(self.xs,self.ys) + assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs)) + + def test_scalar(self): + P = KroghInterpolator(self.xs,self.ys) + assert_almost_equal(self.true_poly(7),P(7)) + assert_almost_equal(self.true_poly(np.array(7)), P(np.array(7))) + + def test_derivatives(self): + P = KroghInterpolator(self.xs,self.ys) + D = P.derivatives(self.test_xs) + for i in xrange(D.shape[0]): + assert_almost_equal(self.true_poly.deriv(i)(self.test_xs), + D[i]) + + def test_low_derivatives(self): + P = KroghInterpolator(self.xs,self.ys) + D = P.derivatives(self.test_xs,len(self.xs)+2) + for i in xrange(D.shape[0]): + assert_almost_equal(self.true_poly.deriv(i)(self.test_xs), + D[i]) + + def test_derivative(self): + P = KroghInterpolator(self.xs,self.ys) + m = 10 + r = P.derivatives(self.test_xs,m) + for i in xrange(m): + assert_almost_equal(P.derivative(self.test_xs,i),r[i]) + + def test_high_derivative(self): + P = KroghInterpolator(self.xs,self.ys) + for i in xrange(len(self.xs),2*len(self.xs)): + assert_almost_equal(P.derivative(self.test_xs,i), + np.zeros(len(self.test_xs))) + + def test_hermite(self): + xs = [0,0,0,1,1,1,2] + ys = [self.true_poly(0), + self.true_poly.deriv(1)(0), + self.true_poly.deriv(2)(0), + self.true_poly(1), + self.true_poly.deriv(1)(1), + self.true_poly.deriv(2)(1), + self.true_poly(2)] + P = KroghInterpolator(self.xs,self.ys) + assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs)) + + def test_vector(self): + xs = [0, 1, 2] + ys = np.array([[0,1],[1,0],[2,1]]) + P = KroghInterpolator(xs,ys) + Pi = [KroghInterpolator(xs,ys[:,i]) for i in xrange(ys.shape[1])] + test_xs = np.linspace(-1,3,100) + assert_almost_equal(P(test_xs), + np.rollaxis(np.asarray([p(test_xs) for p in Pi]),-1)) + assert_almost_equal(P.derivatives(test_xs), + np.transpose(np.asarray([p.derivatives(test_xs) for p in Pi]), + (1,2,0))) + + def test_empty(self): + P = KroghInterpolator(self.xs,self.ys) + assert_array_equal(P([]), []) + + def test_shapes_scalarvalue(self): + P = KroghInterpolator(self.xs,self.ys) + assert_array_equal(np.shape(P(0)), ()) + assert_array_equal(np.shape(P(np.array(0))), ()) + assert_array_equal(np.shape(P([0])), (1,)) + assert_array_equal(np.shape(P([0,1])), (2,)) + + def test_shapes_scalarvalue_derivative(self): + P = KroghInterpolator(self.xs,self.ys) + n = P.n + assert_array_equal(np.shape(P.derivatives(0)), (n,)) + assert_array_equal(np.shape(P.derivatives(np.array(0))), (n,)) + assert_array_equal(np.shape(P.derivatives([0])), (n,1)) + assert_array_equal(np.shape(P.derivatives([0,1])), (n,2)) + + def test_shapes_vectorvalue(self): + P = KroghInterpolator(self.xs,np.outer(self.ys,np.arange(3))) + assert_array_equal(np.shape(P(0)), (3,)) + assert_array_equal(np.shape(P([0])), (1,3)) + assert_array_equal(np.shape(P([0,1])), (2,3)) + + def test_shapes_1d_vectorvalue(self): + P = KroghInterpolator(self.xs,np.outer(self.ys,[1])) + assert_array_equal(np.shape(P(0)), (1,)) + assert_array_equal(np.shape(P([0])), (1,1)) + assert_array_equal(np.shape(P([0,1])), (2,1)) + + def test_shapes_vectorvalue_derivative(self): + P = KroghInterpolator(self.xs,np.outer(self.ys,np.arange(3))) + n = P.n + assert_array_equal(np.shape(P.derivatives(0)), (n,3)) + assert_array_equal(np.shape(P.derivatives([0])), (n,1,3)) + assert_array_equal(np.shape(P.derivatives([0,1])), (n,2,3)) + + def test_wrapper(self): + P = KroghInterpolator(self.xs, self.ys) + ki = krogh_interpolate + assert_almost_equal(P(self.test_xs), ki(self.xs, self.ys, self.test_xs)) + assert_almost_equal(P.derivative(self.test_xs, 2), + ki(self.xs, self.ys, self.test_xs, der=2)) + assert_almost_equal(P.derivatives(self.test_xs, 2), + ki(self.xs, self.ys, self.test_xs, der=[0, 1])) + + def test_int_inputs(self): + # Check input args are cast correctly to floats, gh-3669 + x = [0, 234, 468, 702, 936, 1170, 1404, 2340, 3744, 6084, 8424, + 13104, 60000] + offset_cdf = np.array([-0.95, -0.86114777, -0.8147762, -0.64072425, + -0.48002351, -0.34925329, -0.26503107, + -0.13148093, -0.12988833, -0.12979296, + -0.12973574, -0.08582937, 0.05]) + f = KroghInterpolator(x, offset_cdf) + + assert_allclose(abs((f(x) - offset_cdf) / f.derivative(x, 1)), + 0, atol=1e-10) + + def test_derivatives_complex(self): + # regression test for gh-7381: krogh.derivatives(0) fails complex y + x, y = np.array([-1, -1, 0, 1, 1]), np.array([1, 1.0j, 0, -1, 1.0j]) + func = KroghInterpolator(x, y) + cmplx = func.derivatives(0) + + cmplx2 = (KroghInterpolator(x, y.real).derivatives(0) + + 1j*KroghInterpolator(x, y.imag).derivatives(0)) + assert_allclose(cmplx, cmplx2, atol=1e-15) + + +class TestTaylor(object): + def test_exponential(self): + degree = 5 + p = approximate_taylor_polynomial(np.exp, 0, degree, 1, 15) + for i in xrange(degree+1): + assert_almost_equal(p(0),1) + p = p.deriv() + assert_almost_equal(p(0),0) + + +class TestBarycentric(object): + def setup_method(self): + self.true_poly = np.poly1d([-2, 3, 1, 5, -4]) + self.test_xs = np.linspace(-1, 1, 100) + self.xs = np.linspace(-1, 1, 5) + self.ys = self.true_poly(self.xs) + + def test_lagrange(self): + P = BarycentricInterpolator(self.xs, self.ys) + assert_almost_equal(self.true_poly(self.test_xs), P(self.test_xs)) + + def test_scalar(self): + P = BarycentricInterpolator(self.xs, self.ys) + assert_almost_equal(self.true_poly(7), P(7)) + assert_almost_equal(self.true_poly(np.array(7)), P(np.array(7))) + + def test_delayed(self): + P = BarycentricInterpolator(self.xs) + P.set_yi(self.ys) + assert_almost_equal(self.true_poly(self.test_xs), P(self.test_xs)) + + def test_append(self): + P = BarycentricInterpolator(self.xs[:3], self.ys[:3]) + P.add_xi(self.xs[3:], self.ys[3:]) + assert_almost_equal(self.true_poly(self.test_xs), P(self.test_xs)) + + def test_vector(self): + xs = [0, 1, 2] + ys = np.array([[0, 1], [1, 0], [2, 1]]) + BI = BarycentricInterpolator + P = BI(xs, ys) + Pi = [BI(xs, ys[:, i]) for i in xrange(ys.shape[1])] + test_xs = np.linspace(-1, 3, 100) + assert_almost_equal(P(test_xs), + np.rollaxis(np.asarray([p(test_xs) for p in Pi]), -1)) + + def test_shapes_scalarvalue(self): + P = BarycentricInterpolator(self.xs, self.ys) + assert_array_equal(np.shape(P(0)), ()) + assert_array_equal(np.shape(P(np.array(0))), ()) + assert_array_equal(np.shape(P([0])), (1,)) + assert_array_equal(np.shape(P([0, 1])), (2,)) + + def test_shapes_vectorvalue(self): + P = BarycentricInterpolator(self.xs, np.outer(self.ys, np.arange(3))) + assert_array_equal(np.shape(P(0)), (3,)) + assert_array_equal(np.shape(P([0])), (1, 3)) + assert_array_equal(np.shape(P([0, 1])), (2, 3)) + + def test_shapes_1d_vectorvalue(self): + P = BarycentricInterpolator(self.xs, np.outer(self.ys, [1])) + assert_array_equal(np.shape(P(0)), (1,)) + assert_array_equal(np.shape(P([0])), (1, 1)) + assert_array_equal(np.shape(P([0,1])), (2, 1)) + + def test_wrapper(self): + P = BarycentricInterpolator(self.xs, self.ys) + values = barycentric_interpolate(self.xs, self.ys, self.test_xs) + assert_almost_equal(P(self.test_xs), values) + + +class TestPCHIP(object): + def _make_random(self, npts=20): + np.random.seed(1234) + xi = np.sort(np.random.random(npts)) + yi = np.random.random(npts) + return pchip(xi, yi), xi, yi + + def test_overshoot(self): + # PCHIP should not overshoot + p, xi, yi = self._make_random() + for i in range(len(xi)-1): + x1, x2 = xi[i], xi[i+1] + y1, y2 = yi[i], yi[i+1] + if y1 > y2: + y1, y2 = y2, y1 + xp = np.linspace(x1, x2, 10) + yp = p(xp) + assert_(((y1 <= yp) & (yp <= y2)).all()) + + def test_monotone(self): + # PCHIP should preserve monotonicty + p, xi, yi = self._make_random() + for i in range(len(xi)-1): + x1, x2 = xi[i], xi[i+1] + y1, y2 = yi[i], yi[i+1] + xp = np.linspace(x1, x2, 10) + yp = p(xp) + assert_(((y2-y1) * (yp[1:] - yp[:1]) > 0).all()) + + def test_cast(self): + # regression test for integer input data, see gh-3453 + data = np.array([[0, 4, 12, 27, 47, 60, 79, 87, 99, 100], + [-33, -33, -19, -2, 12, 26, 38, 45, 53, 55]]) + xx = np.arange(100) + curve = pchip(data[0], data[1])(xx) + + data1 = data * 1.0 + curve1 = pchip(data1[0], data1[1])(xx) + + assert_allclose(curve, curve1, atol=1e-14, rtol=1e-14) + + def test_nag(self): + # Example from NAG C implementation, + # http://nag.com/numeric/cl/nagdoc_cl25/html/e01/e01bec.html + # suggested in gh-5326 as a smoke test for the way the derivatives + # are computed (see also gh-3453) + from scipy._lib.six import StringIO + dataStr = ''' + 7.99 0.00000E+0 + 8.09 0.27643E-4 + 8.19 0.43750E-1 + 8.70 0.16918E+0 + 9.20 0.46943E+0 + 10.00 0.94374E+0 + 12.00 0.99864E+0 + 15.00 0.99992E+0 + 20.00 0.99999E+0 + ''' + data = np.loadtxt(StringIO(dataStr)) + pch = pchip(data[:,0], data[:,1]) + + resultStr = ''' + 7.9900 0.0000 + 9.1910 0.4640 + 10.3920 0.9645 + 11.5930 0.9965 + 12.7940 0.9992 + 13.9950 0.9998 + 15.1960 0.9999 + 16.3970 1.0000 + 17.5980 1.0000 + 18.7990 1.0000 + 20.0000 1.0000 + ''' + result = np.loadtxt(StringIO(resultStr)) + assert_allclose(result[:,1], pch(result[:,0]), rtol=0., atol=5e-5) + + def test_endslopes(self): + # this is a smoke test for gh-3453: PCHIP interpolator should not + # set edge slopes to zero if the data do not suggest zero edge derivatives + x = np.array([0.0, 0.1, 0.25, 0.35]) + y1 = np.array([279.35, 0.5e3, 1.0e3, 2.5e3]) + y2 = np.array([279.35, 2.5e3, 1.50e3, 1.0e3]) + for pp in (pchip(x, y1), pchip(x, y2)): + for t in (x[0], x[-1]): + assert_(pp(t, 1) != 0) + + def test_all_zeros(self): + x = np.arange(10) + y = np.zeros_like(x) + + # this should work and not generate any warnings + with warnings.catch_warnings(): + warnings.filterwarnings('error') + pch = pchip(x, y) + + xx = np.linspace(0, 9, 101) + assert_equal(pch(xx), 0.) + + def test_two_points(self): + # regression test for gh-6222: pchip([0, 1], [0, 1]) fails because + # it tries to use a three-point scheme to estimate edge derivatives, + # while there are only two points available. + # Instead, it should construct a linear interpolator. + x = np.linspace(0, 1, 11) + p = pchip([0, 1], [0, 2]) + assert_allclose(p(x), 2*x, atol=1e-15) + + def test_pchip_interpolate(self): + assert_array_almost_equal( + pchip_interpolate([1,2,3], [4,5,6], [0.5], der=1), + [1.]) + + assert_array_almost_equal( + pchip_interpolate([1,2,3], [4,5,6], [0.5], der=0), + [3.5]) + + assert_array_almost_equal( + pchip_interpolate([1,2,3], [4,5,6], [0.5], der=[0, 1]), + [[3.5], [1]]) + + def test_roots(self): + # regression test for gh-6357: .roots method should work + p = pchip([0, 1], [-1, 1]) + r = p.roots() + assert_allclose(r, 0.5) + +class TestCubicSpline(object): + @staticmethod + def check_correctness(S, bc_start='not-a-knot', bc_end='not-a-knot', + tol=1e-14): + """Check that spline coefficients satisfy the continuity and boundary + conditions.""" + x = S.x + c = S.c + dx = np.diff(x) + dx = dx.reshape([dx.shape[0]] + [1] * (c.ndim - 2)) + dxi = dx[:-1] + + # Check C2 continuity. + assert_allclose(c[3, 1:], c[0, :-1] * dxi**3 + c[1, :-1] * dxi**2 + + c[2, :-1] * dxi + c[3, :-1], rtol=tol, atol=tol) + assert_allclose(c[2, 1:], 3 * c[0, :-1] * dxi**2 + + 2 * c[1, :-1] * dxi + c[2, :-1], rtol=tol, atol=tol) + assert_allclose(c[1, 1:], 3 * c[0, :-1] * dxi + c[1, :-1], + rtol=tol, atol=tol) + + # Check that we found a parabola, the third derivative is 0. + if x.size == 3 and bc_start == 'not-a-knot' and bc_end == 'not-a-knot': + assert_allclose(c[0], 0, rtol=tol, atol=tol) + return + + # Check periodic boundary conditions. + if bc_start == 'periodic': + assert_allclose(S(x[0], 0), S(x[-1], 0), rtol=tol, atol=tol) + assert_allclose(S(x[0], 1), S(x[-1], 1), rtol=tol, atol=tol) + assert_allclose(S(x[0], 2), S(x[-1], 2), rtol=tol, atol=tol) + return + + # Check other boundary conditions. + if bc_start == 'not-a-knot': + if x.size == 2: + slope = (S(x[1]) - S(x[0])) / dx[0] + assert_allclose(S(x[0], 1), slope, rtol=tol, atol=tol) + else: + assert_allclose(c[0, 0], c[0, 1], rtol=tol, atol=tol) + elif bc_start == 'clamped': + assert_allclose(S(x[0], 1), 0, rtol=tol, atol=tol) + elif bc_start == 'natural': + assert_allclose(S(x[0], 2), 0, rtol=tol, atol=tol) + else: + order, value = bc_start + assert_allclose(S(x[0], order), value, rtol=tol, atol=tol) + + if bc_end == 'not-a-knot': + if x.size == 2: + slope = (S(x[1]) - S(x[0])) / dx[0] + assert_allclose(S(x[1], 1), slope, rtol=tol, atol=tol) + else: + assert_allclose(c[0, -1], c[0, -2], rtol=tol, atol=tol) + elif bc_end == 'clamped': + assert_allclose(S(x[-1], 1), 0, rtol=tol, atol=tol) + elif bc_end == 'natural': + assert_allclose(S(x[-1], 2), 0, rtol=2*tol, atol=2*tol) + else: + order, value = bc_end + assert_allclose(S(x[-1], order), value, rtol=tol, atol=tol) + + def check_all_bc(self, x, y, axis): + deriv_shape = list(y.shape) + del deriv_shape[axis] + first_deriv = np.empty(deriv_shape) + first_deriv.fill(2) + second_deriv = np.empty(deriv_shape) + second_deriv.fill(-1) + bc_all = [ + 'not-a-knot', + 'natural', + 'clamped', + (1, first_deriv), + (2, second_deriv) + ] + for bc in bc_all[:3]: + S = CubicSpline(x, y, axis=axis, bc_type=bc) + self.check_correctness(S, bc, bc) + + for bc_start in bc_all: + for bc_end in bc_all: + S = CubicSpline(x, y, axis=axis, bc_type=(bc_start, bc_end)) + self.check_correctness(S, bc_start, bc_end, tol=2e-14) + + def test_general(self): + x = np.array([-1, 0, 0.5, 2, 4, 4.5, 5.5, 9]) + y = np.array([0, -0.5, 2, 3, 2.5, 1, 1, 0.5]) + for n in [2, 3, x.size]: + self.check_all_bc(x[:n], y[:n], 0) + + Y = np.empty((2, n, 2)) + Y[0, :, 0] = y[:n] + Y[0, :, 1] = y[:n] - 1 + Y[1, :, 0] = y[:n] + 2 + Y[1, :, 1] = y[:n] + 3 + self.check_all_bc(x[:n], Y, 1) + + def test_periodic(self): + for n in [2, 3, 5]: + x = np.linspace(0, 2 * np.pi, n) + y = np.cos(x) + S = CubicSpline(x, y, bc_type='periodic') + self.check_correctness(S, 'periodic', 'periodic') + + Y = np.empty((2, n, 2)) + Y[0, :, 0] = y + Y[0, :, 1] = y + 2 + Y[1, :, 0] = y - 1 + Y[1, :, 1] = y + 5 + S = CubicSpline(x, Y, axis=1, bc_type='periodic') + self.check_correctness(S, 'periodic', 'periodic') + + def test_periodic_eval(self): + x = np.linspace(0, 2 * np.pi, 10) + y = np.cos(x) + S = CubicSpline(x, y, bc_type='periodic') + assert_almost_equal(S(1), S(1 + 2 * np.pi), decimal=15) + + def test_dtypes(self): + x = np.array([0, 1, 2, 3], dtype=int) + y = np.array([-5, 2, 3, 1], dtype=int) + S = CubicSpline(x, y) + self.check_correctness(S) + + y = np.array([-1+1j, 0.0, 1-1j, 0.5-1.5j]) + S = CubicSpline(x, y) + self.check_correctness(S) + + S = CubicSpline(x, x ** 3, bc_type=("natural", (1, 2j))) + self.check_correctness(S, "natural", (1, 2j)) + + y = np.array([-5, 2, 3, 1]) + S = CubicSpline(x, y, bc_type=[(1, 2 + 0.5j), (2, 0.5 - 1j)]) + self.check_correctness(S, (1, 2 + 0.5j), (2, 0.5 - 1j)) + + def test_small_dx(self): + rng = np.random.RandomState(0) + x = np.sort(rng.uniform(size=100)) + y = 1e4 + rng.uniform(size=100) + S = CubicSpline(x, y) + self.check_correctness(S, tol=1e-13) + + def test_incorrect_inputs(self): + x = np.array([1, 2, 3, 4]) + y = np.array([1, 2, 3, 4]) + xc = np.array([1 + 1j, 2, 3, 4]) + xn = np.array([np.nan, 2, 3, 4]) + xo = np.array([2, 1, 3, 4]) + yn = np.array([np.nan, 2, 3, 4]) + y3 = [1, 2, 3] + x1 = [1] + y1 = [1] + + assert_raises(ValueError, CubicSpline, xc, y) + assert_raises(ValueError, CubicSpline, xn, y) + assert_raises(ValueError, CubicSpline, x, yn) + assert_raises(ValueError, CubicSpline, xo, y) + assert_raises(ValueError, CubicSpline, x, y3) + assert_raises(ValueError, CubicSpline, x[:, np.newaxis], y) + assert_raises(ValueError, CubicSpline, x1, y1) + + wrong_bc = [('periodic', 'clamped'), + ((2, 0), (3, 10)), + ((1, 0), ), + (0., 0.), + 'not-a-typo'] + + for bc_type in wrong_bc: + assert_raises(ValueError, CubicSpline, x, y, 0, bc_type, True) + + # Shapes mismatch when giving arbitrary derivative values: + Y = np.c_[y, y] + bc1 = ('clamped', (1, 0)) + bc2 = ('clamped', (1, [0, 0, 0])) + bc3 = ('clamped', (1, [[0, 0]])) + assert_raises(ValueError, CubicSpline, x, Y, 0, bc1, True) + assert_raises(ValueError, CubicSpline, x, Y, 0, bc2, True) + assert_raises(ValueError, CubicSpline, x, Y, 0, bc3, True) + + # periodic condition, y[-1] must be equal to y[0]: + assert_raises(ValueError, CubicSpline, x, y, 0, 'periodic', True) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_polyint.pyc b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_polyint.pyc new file mode 100644 index 0000000..3523c17 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_polyint.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_rbf.py b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_rbf.py new file mode 100644 index 0000000..e137ec2 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_rbf.py @@ -0,0 +1,155 @@ +# Created by John Travers, Robert Hetland, 2007 +""" Test functions for rbf module """ +from __future__ import division, print_function, absolute_import + + +import numpy as np +from numpy.testing import (assert_, assert_array_almost_equal, + assert_almost_equal) +from numpy import linspace, sin, random, exp, allclose +from scipy.interpolate.rbf import Rbf + +FUNCTIONS = ('multiquadric', 'inverse multiquadric', 'gaussian', + 'cubic', 'quintic', 'thin-plate', 'linear') + + +def check_rbf1d_interpolation(function): + # Check that the Rbf function interpolates through the nodes (1D) + x = linspace(0,10,9) + y = sin(x) + rbf = Rbf(x, y, function=function) + yi = rbf(x) + assert_array_almost_equal(y, yi) + assert_almost_equal(rbf(float(x[0])), y[0]) + + +def check_rbf2d_interpolation(function): + # Check that the Rbf function interpolates through the nodes (2D). + x = random.rand(50,1)*4-2 + y = random.rand(50,1)*4-2 + z = x*exp(-x**2-1j*y**2) + rbf = Rbf(x, y, z, epsilon=2, function=function) + zi = rbf(x, y) + zi.shape = x.shape + assert_array_almost_equal(z, zi) + + +def check_rbf3d_interpolation(function): + # Check that the Rbf function interpolates through the nodes (3D). + x = random.rand(50, 1)*4 - 2 + y = random.rand(50, 1)*4 - 2 + z = random.rand(50, 1)*4 - 2 + d = x*exp(-x**2 - y**2) + rbf = Rbf(x, y, z, d, epsilon=2, function=function) + di = rbf(x, y, z) + di.shape = x.shape + assert_array_almost_equal(di, d) + + +def test_rbf_interpolation(): + for function in FUNCTIONS: + check_rbf1d_interpolation(function) + check_rbf2d_interpolation(function) + check_rbf3d_interpolation(function) + + +def check_rbf1d_regularity(function, atol): + # Check that the Rbf function approximates a smooth function well away + # from the nodes. + x = linspace(0, 10, 9) + y = sin(x) + rbf = Rbf(x, y, function=function) + xi = linspace(0, 10, 100) + yi = rbf(xi) + # import matplotlib.pyplot as plt + # plt.figure() + # plt.plot(x, y, 'o', xi, sin(xi), ':', xi, yi, '-') + # plt.plot(x, y, 'o', xi, yi-sin(xi), ':') + # plt.title(function) + # plt.show() + msg = "abs-diff: %f" % abs(yi - sin(xi)).max() + assert_(allclose(yi, sin(xi), atol=atol), msg) + + +def test_rbf_regularity(): + tolerances = { + 'multiquadric': 0.1, + 'inverse multiquadric': 0.15, + 'gaussian': 0.15, + 'cubic': 0.15, + 'quintic': 0.1, + 'thin-plate': 0.1, + 'linear': 0.2 + } + for function in FUNCTIONS: + check_rbf1d_regularity(function, tolerances.get(function, 1e-2)) + + +def check_rbf1d_stability(function): + # Check that the Rbf function with default epsilon is not subject + # to overshoot. Regression for issue #4523. + # + # Generate some data (fixed random seed hence deterministic) + np.random.seed(1234) + x = np.linspace(0, 10, 50) + z = x + 4.0 * np.random.randn(len(x)) + + rbf = Rbf(x, z, function=function) + xi = np.linspace(0, 10, 1000) + yi = rbf(xi) + + # subtract the linear trend and make sure there no spikes + assert_(np.abs(yi-xi).max() / np.abs(z-x).max() < 1.1) + +def test_rbf_stability(): + for function in FUNCTIONS: + check_rbf1d_stability(function) + + +def test_default_construction(): + # Check that the Rbf class can be constructed with the default + # multiquadric basis function. Regression test for ticket #1228. + x = linspace(0,10,9) + y = sin(x) + rbf = Rbf(x, y) + yi = rbf(x) + assert_array_almost_equal(y, yi) + + +def test_function_is_callable(): + # Check that the Rbf class can be constructed with function=callable. + x = linspace(0,10,9) + y = sin(x) + linfunc = lambda x:x + rbf = Rbf(x, y, function=linfunc) + yi = rbf(x) + assert_array_almost_equal(y, yi) + + +def test_two_arg_function_is_callable(): + # Check that the Rbf class can be constructed with a two argument + # function=callable. + def _func(self, r): + return self.epsilon + r + + x = linspace(0,10,9) + y = sin(x) + rbf = Rbf(x, y, function=_func) + yi = rbf(x) + assert_array_almost_equal(y, yi) + + +def test_rbf_epsilon_none(): + x = linspace(0, 10, 9) + y = sin(x) + rbf = Rbf(x, y, epsilon=None) + + +def test_rbf_epsilon_none_collinear(): + # Check that collinear points in one dimension doesn't cause an error + # due to epsilon = 0 + x = [1, 2, 3] + y = [4, 4, 4] + z = [5, 6, 7] + rbf = Rbf(x, y, z, epsilon=None) + assert_(rbf.epsilon > 0) diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_rbf.pyc b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_rbf.pyc new file mode 100644 index 0000000..dd81c4f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_rbf.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_regression.py b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_regression.py new file mode 100644 index 0000000..e499f94 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_regression.py @@ -0,0 +1,16 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +import scipy.interpolate as interp +from numpy.testing import assert_almost_equal + + +class TestRegression(object): + def test_spalde_scalar_input(self): + """Ticket #629""" + x = np.linspace(0,10) + y = x**3 + tck = interp.splrep(x, y, k=3, t=[5]) + res = interp.spalde(np.float64(1), tck) + des = np.array([1., 3., 6., 6.]) + assert_almost_equal(res, des) diff --git a/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_regression.pyc b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_regression.pyc new file mode 100644 index 0000000..23392cd Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_regression.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/io/__init__.py new file mode 100644 index 0000000..e991144 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/__init__.py @@ -0,0 +1,113 @@ +# -*- coding: utf-8 -*- +u""" +================================== +Input and output (:mod:`scipy.io`) +================================== + +.. currentmodule:: scipy.io + +SciPy has many modules, classes, and functions available to read data +from and write data to a variety of file formats. + +.. seealso:: :ref:`numpy-reference.routines.io` (in Numpy) + +MATLAB® files +============= + +.. autosummary:: + :toctree: generated/ + + loadmat - Read a MATLAB style mat file (version 4 through 7.1) + savemat - Write a MATLAB style mat file (version 4 through 7.1) + whosmat - List contents of a MATLAB style mat file (version 4 through 7.1) + +IDL® files +========== + +.. autosummary:: + :toctree: generated/ + + readsav - Read an IDL 'save' file + +Matrix Market files +=================== + +.. autosummary:: + :toctree: generated/ + + mminfo - Query matrix info from Matrix Market formatted file + mmread - Read matrix from Matrix Market formatted file + mmwrite - Write matrix to Matrix Market formatted file + +Unformatted Fortran files +=============================== + +.. autosummary:: + :toctree: generated/ + + FortranFile - A file object for unformatted sequential Fortran files + +Netcdf +====== + +.. autosummary:: + :toctree: generated/ + + netcdf_file - A file object for NetCDF data + netcdf_variable - A data object for the netcdf module + +Harwell-Boeing files +==================== + +.. autosummary:: + :toctree: generated/ + + hb_read -- read H-B file + hb_write -- write H-B file + +Wav sound files (:mod:`scipy.io.wavfile`) +========================================= + +.. module:: scipy.io.wavfile + +.. autosummary:: + :toctree: generated/ + + read + write + WavFileWarning + +Arff files (:mod:`scipy.io.arff`) +================================= + +.. module:: scipy.io.arff + +.. autosummary:: + :toctree: generated/ + + loadarff + MetaData + ArffError + ParseArffError + +""" +from __future__ import division, print_function, absolute_import + +# matfile read and write +from .matlab import loadmat, savemat, whosmat, byteordercodes + +# netCDF file support +from .netcdf import netcdf_file, netcdf_variable + +# Fortran file support +from ._fortran import FortranFile + +from .mmio import mminfo, mmread, mmwrite +from .idl import readsav +from .harwell_boeing import hb_read, hb_write + +__all__ = [s for s in dir() if not s.startswith('_')] + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/io/__init__.pyc new file mode 100644 index 0000000..b0348a8 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/_fortran.py b/project/venv/lib/python2.7/site-packages/scipy/io/_fortran.py new file mode 100644 index 0000000..64fff32 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/_fortran.py @@ -0,0 +1,317 @@ +""" +Module to read / write Fortran unformatted sequential files. + +This is in the spirit of code written by Neil Martinsen-Burrell and Joe Zuntz. + +""" +from __future__ import division, print_function, absolute_import + +import warnings +import numpy as np + +__all__ = ['FortranFile'] + + +class FortranFile(object): + """ + A file object for unformatted sequential files from Fortran code. + + Parameters + ---------- + filename : file or str + Open file object or filename. + mode : {'r', 'w'}, optional + Read-write mode, default is 'r'. + header_dtype : dtype, optional + Data type of the header. Size and endiness must match the input/output file. + + Notes + ----- + These files are broken up into records of unspecified types. The size of + each record is given at the start (although the size of this header is not + standard) and the data is written onto disk without any formatting. Fortran + compilers supporting the BACKSPACE statement will write a second copy of + the size to facilitate backwards seeking. + + This class only supports files written with both sizes for the record. + It also does not support the subrecords used in Intel and gfortran compilers + for records which are greater than 2GB with a 4-byte header. + + An example of an unformatted sequential file in Fortran would be written as:: + + OPEN(1, FILE=myfilename, FORM='unformatted') + + WRITE(1) myvariable + + Since this is a non-standard file format, whose contents depend on the + compiler and the endianness of the machine, caution is advised. Files from + gfortran 4.8.0 and gfortran 4.1.2 on x86_64 are known to work. + + Consider using Fortran direct-access files or files from the newer Stream + I/O, which can be easily read by `numpy.fromfile`. + + Examples + -------- + To create an unformatted sequential Fortran file: + + >>> from scipy.io import FortranFile + >>> f = FortranFile('test.unf', 'w') + >>> f.write_record(np.array([1,2,3,4,5], dtype=np.int32)) + >>> f.write_record(np.linspace(0,1,20).reshape((5,4)).T) + >>> f.close() + + To read this file: + + >>> f = FortranFile('test.unf', 'r') + >>> print(f.read_ints(np.int32)) + [1 2 3 4 5] + >>> print(f.read_reals(float).reshape((5,4), order="F")) + [[0. 0.05263158 0.10526316 0.15789474] + [0.21052632 0.26315789 0.31578947 0.36842105] + [0.42105263 0.47368421 0.52631579 0.57894737] + [0.63157895 0.68421053 0.73684211 0.78947368] + [0.84210526 0.89473684 0.94736842 1. ]] + >>> f.close() + + Or, in Fortran:: + + integer :: a(5), i + double precision :: b(5,4) + open(1, file='test.unf', form='unformatted') + read(1) a + read(1) b + close(1) + write(*,*) a + do i = 1, 5 + write(*,*) b(i,:) + end do + + """ + def __init__(self, filename, mode='r', header_dtype=np.uint32): + if header_dtype is None: + raise ValueError('Must specify dtype') + + header_dtype = np.dtype(header_dtype) + if header_dtype.kind != 'u': + warnings.warn("Given a dtype which is not unsigned.") + + if mode not in 'rw' or len(mode) != 1: + raise ValueError('mode must be either r or w') + + if hasattr(filename, 'seek'): + self._fp = filename + else: + self._fp = open(filename, '%sb' % mode) + + self._header_dtype = header_dtype + + def _read_size(self): + return int(np.fromfile(self._fp, dtype=self._header_dtype, count=1)) + + def write_record(self, *items): + """ + Write a record (including sizes) to the file. + + Parameters + ---------- + *items : array_like + The data arrays to write. + + Notes + ----- + Writes data items to a file:: + + write_record(a.T, b.T, c.T, ...) + + write(1) a, b, c, ... + + Note that data in multidimensional arrays is written in + row-major order --- to make them read correctly by Fortran + programs, you need to transpose the arrays yourself when + writing them. + + """ + items = tuple(np.asarray(item) for item in items) + total_size = sum(item.nbytes for item in items) + + nb = np.array([total_size], dtype=self._header_dtype) + + nb.tofile(self._fp) + for item in items: + item.tofile(self._fp) + nb.tofile(self._fp) + + def read_record(self, *dtypes, **kwargs): + """ + Reads a record of a given type from the file. + + Parameters + ---------- + *dtypes : dtypes, optional + Data type(s) specifying the size and endiness of the data. + + Returns + ------- + data : ndarray + A one-dimensional array object. + + Notes + ----- + If the record contains a multi-dimensional array, you can specify + the size in the dtype. For example:: + + INTEGER var(5,4) + + can be read with:: + + read_record('(4,5)i4').T + + Note that this function does **not** assume the file data is in Fortran + column major order, so you need to (i) swap the order of dimensions + when reading and (ii) transpose the resulting array. + + Alternatively, you can read the data as a 1D array and handle the + ordering yourself. For example:: + + read_record('i4').reshape(5, 4, order='F') + + For records that contain several variables or mixed types (as opposed + to single scalar or array types), give them as separate arguments:: + + double precision :: a + integer :: b + write(1) a, b + + record = f.read_record('<f4', '<i4') + a = record[0] # first number + b = record[1] # second number + + and if any of the variables are arrays, the shape can be specified as + the third item in the relevant dtype:: + + double precision :: a + integer :: b(3,4) + write(1) a, b + + record = f.read_record('<f4', np.dtype(('<i4', (4, 3)))) + a = record[0] + b = record[1].T + + Numpy also supports a short syntax for this kind of type:: + + record = f.read_record('<f4', '(3,3)<i4') + + See Also + -------- + read_reals + read_ints + + """ + dtype = kwargs.pop('dtype', None) + if kwargs: + raise ValueError("Unknown keyword arguments {}".format(tuple(kwargs.keys()))) + + if dtype is not None: + dtypes = dtypes + (dtype,) + elif not dtypes: + raise ValueError('Must specify at least one dtype') + + first_size = self._read_size() + + dtypes = tuple(np.dtype(dtype) for dtype in dtypes) + block_size = sum(dtype.itemsize for dtype in dtypes) + + num_blocks, remainder = divmod(first_size, block_size) + if remainder != 0: + raise ValueError('Size obtained ({0}) is not a multiple of the ' + 'dtypes given ({1}).'.format(first_size, block_size)) + + if len(dtypes) != 1 and first_size != block_size: + # Fortran does not write mixed type array items in interleaved order, + # and it's not possible to guess the sizes of the arrays that were written. + # The user must specify the exact sizes of each of the arrays. + raise ValueError('Size obtained ({0}) does not match with the expected ' + 'size ({1}) of multi-item record'.format(first_size, block_size)) + + data = [] + for dtype in dtypes: + r = np.fromfile(self._fp, dtype=dtype, count=num_blocks) + if dtype.shape != (): + # Squeeze outmost block dimension for array items + if num_blocks == 1: + assert r.shape == (1,) + dtype.shape + r = r[0] + + data.append(r) + + second_size = self._read_size() + if first_size != second_size: + raise IOError('Sizes do not agree in the header and footer for ' + 'this record - check header dtype') + + # Unpack result + if len(dtypes) == 1: + return data[0] + else: + return tuple(data) + + def read_ints(self, dtype='i4'): + """ + Reads a record of a given type from the file, defaulting to an integer + type (``INTEGER*4`` in Fortran). + + Parameters + ---------- + dtype : dtype, optional + Data type specifying the size and endiness of the data. + + Returns + ------- + data : ndarray + A one-dimensional array object. + + See Also + -------- + read_reals + read_record + + """ + return self.read_record(dtype) + + def read_reals(self, dtype='f8'): + """ + Reads a record of a given type from the file, defaulting to a floating + point number (``real*8`` in Fortran). + + Parameters + ---------- + dtype : dtype, optional + Data type specifying the size and endiness of the data. + + Returns + ------- + data : ndarray + A one-dimensional array object. + + See Also + -------- + read_ints + read_record + + """ + return self.read_record(dtype) + + def close(self): + """ + Closes the file. It is unsupported to call any other methods off this + object after closing it. Note that this class supports the 'with' + statement in modern versions of Python, to call this automatically + + """ + self._fp.close() + + def __enter__(self): + return self + + def __exit__(self, type, value, tb): + self.close() diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/_fortran.pyc b/project/venv/lib/python2.7/site-packages/scipy/io/_fortran.pyc new file mode 100644 index 0000000..5d7ced4 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/_fortran.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/_test_fortran.so b/project/venv/lib/python2.7/site-packages/scipy/io/_test_fortran.so new file mode 100755 index 0000000..6a24da2 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/_test_fortran.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/arff/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/io/arff/__init__.py new file mode 100644 index 0000000..abb0b14 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/arff/__init__.py @@ -0,0 +1,26 @@ +""" +Module to read ARFF files, which are the standard data format for WEKA. + +ARFF is a text file format which support numerical, string and data values. +The format can also represent missing data and sparse data. + +Notes +----- +The ARFF support in ``scipy.io`` provides file reading functionality only. +For more extensive ARFF functionality, see `liac-arff +<https://github.com/renatopp/liac-arff>`_. + +See the `WEKA website <http://weka.wikispaces.com/ARFF>`_ +for more details about the ARFF format and available datasets. + +""" +from __future__ import division, print_function, absolute_import + +from .arffread import * +from . import arffread + +__all__ = arffread.__all__ + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/arff/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/io/arff/__init__.pyc new file mode 100644 index 0000000..3394224 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/arff/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/arff/arffread.py b/project/venv/lib/python2.7/site-packages/scipy/io/arff/arffread.py new file mode 100644 index 0000000..f82afcf --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/arff/arffread.py @@ -0,0 +1,670 @@ +# Last Change: Mon Aug 20 08:00 PM 2007 J +from __future__ import division, print_function, absolute_import + +import re +import itertools +import datetime +from functools import partial + +import numpy as np + +from scipy._lib.six import next + +"""A module to read arff files.""" + +__all__ = ['MetaData', 'loadarff', 'ArffError', 'ParseArffError'] + +# An Arff file is basically two parts: +# - header +# - data +# +# A header has each of its components starting by @META where META is one of +# the keyword (attribute of relation, for now). + +# TODO: +# - both integer and reals are treated as numeric -> the integer info +# is lost! +# - Replace ValueError by ParseError or something + +# We know can handle the following: +# - numeric and nominal attributes +# - missing values for numeric attributes + +r_meta = re.compile(r'^\s*@') +# Match a comment +r_comment = re.compile(r'^%') +# Match an empty line +r_empty = re.compile(r'^\s+$') +# Match a header line, that is a line which starts by @ + a word +r_headerline = re.compile(r'^@\S*') +r_datameta = re.compile(r'^@[Dd][Aa][Tt][Aa]') +r_relation = re.compile(r'^@[Rr][Ee][Ll][Aa][Tt][Ii][Oo][Nn]\s*(\S*)') +r_attribute = re.compile(r'^@[Aa][Tt][Tt][Rr][Ii][Bb][Uu][Tt][Ee]\s*(..*$)') + +# To get attributes name enclosed with '' +r_comattrval = re.compile(r"'(..+)'\s+(..+$)") +# To get normal attributes +r_wcomattrval = re.compile(r"(\S+)\s+(..+$)") + +#------------------------- +# Module defined exception +#------------------------- + + +class ArffError(IOError): + pass + + +class ParseArffError(ArffError): + pass + +#------------------ +# Various utilities +#------------------ + +# An attribute is defined as @attribute name value + + +def parse_type(attrtype): + """Given an arff attribute value (meta data), returns its type. + + Expect the value to be a name.""" + uattribute = attrtype.lower().strip() + if uattribute[0] == '{': + return 'nominal' + elif uattribute[:len('real')] == 'real': + return 'numeric' + elif uattribute[:len('integer')] == 'integer': + return 'numeric' + elif uattribute[:len('numeric')] == 'numeric': + return 'numeric' + elif uattribute[:len('string')] == 'string': + return 'string' + elif uattribute[:len('relational')] == 'relational': + return 'relational' + elif uattribute[:len('date')] == 'date': + return 'date' + else: + raise ParseArffError("unknown attribute %s" % uattribute) + + +def get_nominal(attribute): + """If attribute is nominal, returns a list of the values""" + return attribute.split(',') + + +def read_data_list(ofile): + """Read each line of the iterable and put it in a list.""" + data = [next(ofile)] + if data[0].strip()[0] == '{': + raise ValueError("This looks like a sparse ARFF: not supported yet") + data.extend([i for i in ofile]) + return data + + +def get_ndata(ofile): + """Read the whole file to get number of data attributes.""" + data = [next(ofile)] + loc = 1 + if data[0].strip()[0] == '{': + raise ValueError("This looks like a sparse ARFF: not supported yet") + for i in ofile: + loc += 1 + return loc + + +def maxnomlen(atrv): + """Given a string containing a nominal type definition, returns the + string len of the biggest component. + + A nominal type is defined as seomthing framed between brace ({}). + + Parameters + ---------- + atrv : str + Nominal type definition + + Returns + ------- + slen : int + length of longest component + + Examples + -------- + maxnomlen("{floup, bouga, fl, ratata}") returns 6 (the size of + ratata, the longest nominal value). + + >>> maxnomlen("{floup, bouga, fl, ratata}") + 6 + """ + nomtp = get_nom_val(atrv) + return max(len(i) for i in nomtp) + + +def get_nom_val(atrv): + """Given a string containing a nominal type, returns a tuple of the + possible values. + + A nominal type is defined as something framed between braces ({}). + + Parameters + ---------- + atrv : str + Nominal type definition + + Returns + ------- + poss_vals : tuple + possible values + + Examples + -------- + >>> get_nom_val("{floup, bouga, fl, ratata}") + ('floup', 'bouga', 'fl', 'ratata') + """ + r_nominal = re.compile('{(.+)}') + m = r_nominal.match(atrv) + if m: + return tuple(i.strip() for i in m.group(1).split(',')) + else: + raise ValueError("This does not look like a nominal string") + + +def get_date_format(atrv): + r_date = re.compile(r"[Dd][Aa][Tt][Ee]\s+[\"']?(.+?)[\"']?$") + m = r_date.match(atrv) + if m: + pattern = m.group(1).strip() + # convert time pattern from Java's SimpleDateFormat to C's format + datetime_unit = None + if "yyyy" in pattern: + pattern = pattern.replace("yyyy", "%Y") + datetime_unit = "Y" + elif "yy": + pattern = pattern.replace("yy", "%y") + datetime_unit = "Y" + if "MM" in pattern: + pattern = pattern.replace("MM", "%m") + datetime_unit = "M" + if "dd" in pattern: + pattern = pattern.replace("dd", "%d") + datetime_unit = "D" + if "HH" in pattern: + pattern = pattern.replace("HH", "%H") + datetime_unit = "h" + if "mm" in pattern: + pattern = pattern.replace("mm", "%M") + datetime_unit = "m" + if "ss" in pattern: + pattern = pattern.replace("ss", "%S") + datetime_unit = "s" + if "z" in pattern or "Z" in pattern: + raise ValueError("Date type attributes with time zone not " + "supported, yet") + + if datetime_unit is None: + raise ValueError("Invalid or unsupported date format") + + return pattern, datetime_unit + else: + raise ValueError("Invalid or no date format") + + +def go_data(ofile): + """Skip header. + + the first next() call of the returned iterator will be the @data line""" + return itertools.dropwhile(lambda x: not r_datameta.match(x), ofile) + + +#---------------- +# Parsing header +#---------------- +def tokenize_attribute(iterable, attribute): + """Parse a raw string in header (eg starts by @attribute). + + Given a raw string attribute, try to get the name and type of the + attribute. Constraints: + + * The first line must start with @attribute (case insensitive, and + space like characters before @attribute are allowed) + * Works also if the attribute is spread on multilines. + * Works if empty lines or comments are in between + + Parameters + ---------- + attribute : str + the attribute string. + + Returns + ------- + name : str + name of the attribute + value : str + value of the attribute + next : str + next line to be parsed + + Examples + -------- + If attribute is a string defined in python as r"floupi real", will + return floupi as name, and real as value. + + >>> iterable = iter([0] * 10) # dummy iterator + >>> tokenize_attribute(iterable, r"@attribute floupi real") + ('floupi', 'real', 0) + + If attribute is r"'floupi 2' real", will return 'floupi 2' as name, + and real as value. + + >>> tokenize_attribute(iterable, r" @attribute 'floupi 2' real ") + ('floupi 2', 'real', 0) + + """ + sattr = attribute.strip() + mattr = r_attribute.match(sattr) + if mattr: + # atrv is everything after @attribute + atrv = mattr.group(1) + if r_comattrval.match(atrv): + name, type = tokenize_single_comma(atrv) + next_item = next(iterable) + elif r_wcomattrval.match(atrv): + name, type = tokenize_single_wcomma(atrv) + next_item = next(iterable) + else: + # Not sure we should support this, as it does not seem supported by + # weka. + raise ValueError("multi line not supported yet") + #name, type, next_item = tokenize_multilines(iterable, atrv) + else: + raise ValueError("First line unparsable: %s" % sattr) + + if type == 'relational': + raise ValueError("relational attributes not supported yet") + return name, type, next_item + + +def tokenize_single_comma(val): + # XXX we match twice the same string (here and at the caller level). It is + # stupid, but it is easier for now... + m = r_comattrval.match(val) + if m: + try: + name = m.group(1).strip() + type = m.group(2).strip() + except IndexError: + raise ValueError("Error while tokenizing attribute") + else: + raise ValueError("Error while tokenizing single %s" % val) + return name, type + + +def tokenize_single_wcomma(val): + # XXX we match twice the same string (here and at the caller level). It is + # stupid, but it is easier for now... + m = r_wcomattrval.match(val) + if m: + try: + name = m.group(1).strip() + type = m.group(2).strip() + except IndexError: + raise ValueError("Error while tokenizing attribute") + else: + raise ValueError("Error while tokenizing single %s" % val) + return name, type + + +def read_header(ofile): + """Read the header of the iterable ofile.""" + i = next(ofile) + + # Pass first comments + while r_comment.match(i): + i = next(ofile) + + # Header is everything up to DATA attribute ? + relation = None + attributes = [] + while not r_datameta.match(i): + m = r_headerline.match(i) + if m: + isattr = r_attribute.match(i) + if isattr: + name, type, i = tokenize_attribute(ofile, i) + attributes.append((name, type)) + else: + isrel = r_relation.match(i) + if isrel: + relation = isrel.group(1) + else: + raise ValueError("Error parsing line %s" % i) + i = next(ofile) + else: + i = next(ofile) + + return relation, attributes + + +#-------------------- +# Parsing actual data +#-------------------- +def safe_float(x): + """given a string x, convert it to a float. If the stripped string is a ?, + return a Nan (missing value). + + Parameters + ---------- + x : str + string to convert + + Returns + ------- + f : float + where float can be nan + + Examples + -------- + >>> safe_float('1') + 1.0 + >>> safe_float('1\\n') + 1.0 + >>> safe_float('?\\n') + nan + """ + if '?' in x: + return np.nan + else: + return float(x) + + +def safe_nominal(value, pvalue): + svalue = value.strip() + if svalue in pvalue: + return svalue + elif svalue == '?': + return svalue + else: + raise ValueError("%s value not in %s" % (str(svalue), str(pvalue))) + + +def safe_date(value, date_format, datetime_unit): + date_str = value.strip().strip("'").strip('"') + if date_str == '?': + return np.datetime64('NaT', datetime_unit) + else: + dt = datetime.datetime.strptime(date_str, date_format) + return np.datetime64(dt).astype("datetime64[%s]" % datetime_unit) + + +class MetaData(object): + """Small container to keep useful information on a ARFF dataset. + + Knows about attributes names and types. + + Examples + -------- + :: + + data, meta = loadarff('iris.arff') + # This will print the attributes names of the iris.arff dataset + for i in meta: + print(i) + # This works too + meta.names() + # Getting attribute type + types = meta.types() + + Notes + ----- + Also maintains the list of attributes in order, i.e. doing for i in + meta, where meta is an instance of MetaData, will return the + different attribute names in the order they were defined. + """ + def __init__(self, rel, attr): + self.name = rel + # We need the dictionary to be ordered + # XXX: may be better to implement an ordered dictionary + self._attributes = {} + self._attrnames = [] + for name, value in attr: + tp = parse_type(value) + self._attrnames.append(name) + if tp == 'nominal': + self._attributes[name] = (tp, get_nom_val(value)) + elif tp == 'date': + self._attributes[name] = (tp, get_date_format(value)[0]) + else: + self._attributes[name] = (tp, None) + + def __repr__(self): + msg = "" + msg += "Dataset: %s\n" % self.name + for i in self._attrnames: + msg += "\t%s's type is %s" % (i, self._attributes[i][0]) + if self._attributes[i][1]: + msg += ", range is %s" % str(self._attributes[i][1]) + msg += '\n' + return msg + + def __iter__(self): + return iter(self._attrnames) + + def __getitem__(self, key): + return self._attributes[key] + + def names(self): + """Return the list of attribute names.""" + return self._attrnames + + def types(self): + """Return the list of attribute types.""" + attr_types = [self._attributes[name][0] for name in self._attrnames] + return attr_types + + +def loadarff(f): + """ + Read an arff file. + + The data is returned as a record array, which can be accessed much like + a dictionary of numpy arrays. For example, if one of the attributes is + called 'pressure', then its first 10 data points can be accessed from the + ``data`` record array like so: ``data['pressure'][0:10]`` + + + Parameters + ---------- + f : file-like or str + File-like object to read from, or filename to open. + + Returns + ------- + data : record array + The data of the arff file, accessible by attribute names. + meta : `MetaData` + Contains information about the arff file such as name and + type of attributes, the relation (name of the dataset), etc... + + Raises + ------ + ParseArffError + This is raised if the given file is not ARFF-formatted. + NotImplementedError + The ARFF file has an attribute which is not supported yet. + + Notes + ----- + + This function should be able to read most arff files. Not + implemented functionality include: + + * date type attributes + * string type attributes + + It can read files with numeric and nominal attributes. It cannot read + files with sparse data ({} in the file). However, this function can + read files with missing data (? in the file), representing the data + points as NaNs. + + Examples + -------- + >>> from scipy.io import arff + >>> from io import StringIO + >>> content = \"\"\" + ... @relation foo + ... @attribute width numeric + ... @attribute height numeric + ... @attribute color {red,green,blue,yellow,black} + ... @data + ... 5.0,3.25,blue + ... 4.5,3.75,green + ... 3.0,4.00,red + ... \"\"\" + >>> f = StringIO(content) + >>> data, meta = arff.loadarff(f) + >>> data + array([(5.0, 3.25, 'blue'), (4.5, 3.75, 'green'), (3.0, 4.0, 'red')], + dtype=[('width', '<f8'), ('height', '<f8'), ('color', '|S6')]) + >>> meta + Dataset: foo + \twidth's type is numeric + \theight's type is numeric + \tcolor's type is nominal, range is ('red', 'green', 'blue', 'yellow', 'black') + + """ + if hasattr(f, 'read'): + ofile = f + else: + ofile = open(f, 'rt') + try: + return _loadarff(ofile) + finally: + if ofile is not f: # only close what we opened + ofile.close() + + +def _loadarff(ofile): + # Parse the header file + try: + rel, attr = read_header(ofile) + except ValueError as e: + msg = "Error while parsing header, error was: " + str(e) + raise ParseArffError(msg) + + # Check whether we have a string attribute (not supported yet) + hasstr = False + for name, value in attr: + type = parse_type(value) + if type == 'string': + hasstr = True + + meta = MetaData(rel, attr) + + # XXX The following code is not great + # Build the type descriptor descr and the list of convertors to convert + # each attribute to the suitable type (which should match the one in + # descr). + + # This can be used once we want to support integer as integer values and + # not as numeric anymore (using masked arrays ?). + acls2dtype = {'real': float, 'integer': float, 'numeric': float} + acls2conv = {'real': safe_float, + 'integer': safe_float, + 'numeric': safe_float} + descr = [] + convertors = [] + if not hasstr: + for name, value in attr: + type = parse_type(value) + if type == 'date': + date_format, datetime_unit = get_date_format(value) + descr.append((name, "datetime64[%s]" % datetime_unit)) + convertors.append(partial(safe_date, date_format=date_format, + datetime_unit=datetime_unit)) + elif type == 'nominal': + n = maxnomlen(value) + descr.append((name, 'S%d' % n)) + pvalue = get_nom_val(value) + convertors.append(partial(safe_nominal, pvalue=pvalue)) + else: + descr.append((name, acls2dtype[type])) + convertors.append(safe_float) + #dc.append(acls2conv[type]) + #sdescr.append((name, acls2sdtype[type])) + else: + # How to support string efficiently ? Ideally, we should know the max + # size of the string before allocating the numpy array. + raise NotImplementedError("String attributes not supported yet, sorry") + + ni = len(convertors) + + def generator(row_iter, delim=','): + # TODO: this is where we are spending times (~80%). I think things + # could be made more efficiently: + # - We could for example "compile" the function, because some values + # do not change here. + # - The function to convert a line to dtyped values could also be + # generated on the fly from a string and be executed instead of + # looping. + # - The regex are overkill: for comments, checking that a line starts + # by % should be enough and faster, and for empty lines, same thing + # --> this does not seem to change anything. + + # 'compiling' the range since it does not change + # Note, I have already tried zipping the converters and + # row elements and got slightly worse performance. + elems = list(range(ni)) + + for raw in row_iter: + # We do not abstract skipping comments and empty lines for + # performance reasons. + if r_comment.match(raw) or r_empty.match(raw): + continue + row = raw.split(delim) + yield tuple([convertors[i](row[i]) for i in elems]) + + a = generator(ofile) + # No error should happen here: it is a bug otherwise + data = np.fromiter(a, descr) + return data, meta + + +#----- +# Misc +#----- +def basic_stats(data): + nbfac = data.size * 1. / (data.size - 1) + return np.nanmin(data), np.nanmax(data), np.mean(data), np.std(data) * nbfac + + +def print_attribute(name, tp, data): + type = tp[0] + if type == 'numeric' or type == 'real' or type == 'integer': + min, max, mean, std = basic_stats(data) + print("%s,%s,%f,%f,%f,%f" % (name, type, min, max, mean, std)) + else: + msg = name + ",{" + for i in range(len(tp[1])-1): + msg += tp[1][i] + "," + msg += tp[1][-1] + msg += "}" + print(msg) + + +def test_weka(filename): + data, meta = loadarff(filename) + print(len(data.dtype)) + print(data.size) + for i in meta: + print_attribute(i, meta[i], data[i]) + + +# make sure nose does not find this as a test +test_weka.__test__ = False + + +if __name__ == '__main__': + import sys + filename = sys.argv[1] + test_weka(filename) diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/arff/arffread.pyc b/project/venv/lib/python2.7/site-packages/scipy/io/arff/arffread.pyc new file mode 100644 index 0000000..98f060b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/arff/arffread.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/arff/setup.py b/project/venv/lib/python2.7/site-packages/scipy/io/arff/setup.py new file mode 100644 index 0000000..c3b2925 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/arff/setup.py @@ -0,0 +1,13 @@ +from __future__ import division, print_function, absolute_import + + +def configuration(parent_package='io',top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('arff', parent_package, top_path) + config.add_data_dir('tests') + return config + + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(**configuration(top_path='').todict()) diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/arff/setup.pyc b/project/venv/lib/python2.7/site-packages/scipy/io/arff/setup.pyc new file mode 100644 index 0000000..833fd06 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/arff/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/__init__.pyc new file mode 100644 index 0000000..e1cccb4 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/data/iris.arff b/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/data/iris.arff new file mode 100644 index 0000000..780480c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/data/iris.arff @@ -0,0 +1,225 @@ +% 1. Title: Iris Plants Database +% +% 2. Sources: +% (a) Creator: R.A. Fisher +% (b) Donor: Michael Marshall (MARSHALL%PLU@io.arc.nasa.gov) +% (c) Date: July, 1988 +% +% 3. Past Usage: +% - Publications: too many to mention!!! Here are a few. +% 1. Fisher,R.A. "The use of multiple measurements in taxonomic problems" +% Annual Eugenics, 7, Part II, 179-188 (1936); also in "Contributions +% to Mathematical Statistics" (John Wiley, NY, 1950). +% 2. Duda,R.O., & Hart,P.E. (1973) Pattern Classification and Scene Analysis. +% (Q327.D83) John Wiley & Sons. ISBN 0-471-22361-1. See page 218. +% 3. Dasarathy, B.V. (1980) "Nosing Around the Neighborhood: A New System +% Structure and Classification Rule for Recognition in Partially Exposed +% Environments". IEEE Transactions on Pattern Analysis and Machine +% Intelligence, Vol. PAMI-2, No. 1, 67-71. +% -- Results: +% -- very low misclassification rates (0% for the setosa class) +% 4. Gates, G.W. (1972) "The Reduced Nearest Neighbor Rule". IEEE +% Transactions on Information Theory, May 1972, 431-433. +% -- Results: +% -- very low misclassification rates again +% 5. See also: 1988 MLC Proceedings, 54-64. Cheeseman et al's AUTOCLASS II +% conceptual clustering system finds 3 classes in the data. +% +% 4. Relevant Information: +% --- This is perhaps the best known database to be found in the pattern +% recognition literature. Fisher's paper is a classic in the field +% and is referenced frequently to this day. (See Duda & Hart, for +% example.) The data set contains 3 classes of 50 instances each, +% where each class refers to a type of iris plant. One class is +% linearly separable from the other 2; the latter are NOT linearly +% separable from each other. +% --- Predicted attribute: class of iris plant. +% --- This is an exceedingly simple domain. +% +% 5. Number of Instances: 150 (50 in each of three classes) +% +% 6. Number of Attributes: 4 numeric, predictive attributes and the class +% +% 7. Attribute Information: +% 1. sepal length in cm +% 2. sepal width in cm +% 3. petal length in cm +% 4. petal width in cm +% 5. class: +% -- Iris Setosa +% -- Iris Versicolour +% -- Iris Virginica +% +% 8. Missing Attribute Values: None +% +% Summary Statistics: +% Min Max Mean SD Class Correlation +% sepal length: 4.3 7.9 5.84 0.83 0.7826 +% sepal width: 2.0 4.4 3.05 0.43 -0.4194 +% petal length: 1.0 6.9 3.76 1.76 0.9490 (high!) +% petal width: 0.1 2.5 1.20 0.76 0.9565 (high!) +% +% 9. Class Distribution: 33.3% for each of 3 classes. + +@RELATION iris + +@ATTRIBUTE sepallength REAL +@ATTRIBUTE sepalwidth REAL +@ATTRIBUTE petallength REAL +@ATTRIBUTE petalwidth REAL +@ATTRIBUTE class {Iris-setosa,Iris-versicolor,Iris-virginica} + +@DATA +5.1,3.5,1.4,0.2,Iris-setosa +4.9,3.0,1.4,0.2,Iris-setosa +4.7,3.2,1.3,0.2,Iris-setosa +4.6,3.1,1.5,0.2,Iris-setosa +5.0,3.6,1.4,0.2,Iris-setosa +5.4,3.9,1.7,0.4,Iris-setosa +4.6,3.4,1.4,0.3,Iris-setosa +5.0,3.4,1.5,0.2,Iris-setosa +4.4,2.9,1.4,0.2,Iris-setosa +4.9,3.1,1.5,0.1,Iris-setosa +5.4,3.7,1.5,0.2,Iris-setosa +4.8,3.4,1.6,0.2,Iris-setosa +4.8,3.0,1.4,0.1,Iris-setosa +4.3,3.0,1.1,0.1,Iris-setosa +5.8,4.0,1.2,0.2,Iris-setosa +5.7,4.4,1.5,0.4,Iris-setosa +5.4,3.9,1.3,0.4,Iris-setosa +5.1,3.5,1.4,0.3,Iris-setosa +5.7,3.8,1.7,0.3,Iris-setosa +5.1,3.8,1.5,0.3,Iris-setosa +5.4,3.4,1.7,0.2,Iris-setosa +5.1,3.7,1.5,0.4,Iris-setosa +4.6,3.6,1.0,0.2,Iris-setosa +5.1,3.3,1.7,0.5,Iris-setosa +4.8,3.4,1.9,0.2,Iris-setosa +5.0,3.0,1.6,0.2,Iris-setosa +5.0,3.4,1.6,0.4,Iris-setosa +5.2,3.5,1.5,0.2,Iris-setosa +5.2,3.4,1.4,0.2,Iris-setosa +4.7,3.2,1.6,0.2,Iris-setosa +4.8,3.1,1.6,0.2,Iris-setosa +5.4,3.4,1.5,0.4,Iris-setosa +5.2,4.1,1.5,0.1,Iris-setosa +5.5,4.2,1.4,0.2,Iris-setosa +4.9,3.1,1.5,0.1,Iris-setosa +5.0,3.2,1.2,0.2,Iris-setosa +5.5,3.5,1.3,0.2,Iris-setosa +4.9,3.1,1.5,0.1,Iris-setosa +4.4,3.0,1.3,0.2,Iris-setosa +5.1,3.4,1.5,0.2,Iris-setosa +5.0,3.5,1.3,0.3,Iris-setosa +4.5,2.3,1.3,0.3,Iris-setosa +4.4,3.2,1.3,0.2,Iris-setosa +5.0,3.5,1.6,0.6,Iris-setosa +5.1,3.8,1.9,0.4,Iris-setosa +4.8,3.0,1.4,0.3,Iris-setosa +5.1,3.8,1.6,0.2,Iris-setosa +4.6,3.2,1.4,0.2,Iris-setosa +5.3,3.7,1.5,0.2,Iris-setosa +5.0,3.3,1.4,0.2,Iris-setosa +7.0,3.2,4.7,1.4,Iris-versicolor +6.4,3.2,4.5,1.5,Iris-versicolor +6.9,3.1,4.9,1.5,Iris-versicolor +5.5,2.3,4.0,1.3,Iris-versicolor +6.5,2.8,4.6,1.5,Iris-versicolor +5.7,2.8,4.5,1.3,Iris-versicolor +6.3,3.3,4.7,1.6,Iris-versicolor +4.9,2.4,3.3,1.0,Iris-versicolor +6.6,2.9,4.6,1.3,Iris-versicolor +5.2,2.7,3.9,1.4,Iris-versicolor +5.0,2.0,3.5,1.0,Iris-versicolor +5.9,3.0,4.2,1.5,Iris-versicolor +6.0,2.2,4.0,1.0,Iris-versicolor +6.1,2.9,4.7,1.4,Iris-versicolor +5.6,2.9,3.6,1.3,Iris-versicolor +6.7,3.1,4.4,1.4,Iris-versicolor +5.6,3.0,4.5,1.5,Iris-versicolor +5.8,2.7,4.1,1.0,Iris-versicolor +6.2,2.2,4.5,1.5,Iris-versicolor +5.6,2.5,3.9,1.1,Iris-versicolor +5.9,3.2,4.8,1.8,Iris-versicolor +6.1,2.8,4.0,1.3,Iris-versicolor +6.3,2.5,4.9,1.5,Iris-versicolor +6.1,2.8,4.7,1.2,Iris-versicolor +6.4,2.9,4.3,1.3,Iris-versicolor +6.6,3.0,4.4,1.4,Iris-versicolor +6.8,2.8,4.8,1.4,Iris-versicolor +6.7,3.0,5.0,1.7,Iris-versicolor +6.0,2.9,4.5,1.5,Iris-versicolor +5.7,2.6,3.5,1.0,Iris-versicolor +5.5,2.4,3.8,1.1,Iris-versicolor +5.5,2.4,3.7,1.0,Iris-versicolor +5.8,2.7,3.9,1.2,Iris-versicolor +6.0,2.7,5.1,1.6,Iris-versicolor +5.4,3.0,4.5,1.5,Iris-versicolor +6.0,3.4,4.5,1.6,Iris-versicolor +6.7,3.1,4.7,1.5,Iris-versicolor +6.3,2.3,4.4,1.3,Iris-versicolor +5.6,3.0,4.1,1.3,Iris-versicolor +5.5,2.5,4.0,1.3,Iris-versicolor +5.5,2.6,4.4,1.2,Iris-versicolor +6.1,3.0,4.6,1.4,Iris-versicolor +5.8,2.6,4.0,1.2,Iris-versicolor +5.0,2.3,3.3,1.0,Iris-versicolor +5.6,2.7,4.2,1.3,Iris-versicolor +5.7,3.0,4.2,1.2,Iris-versicolor +5.7,2.9,4.2,1.3,Iris-versicolor +6.2,2.9,4.3,1.3,Iris-versicolor +5.1,2.5,3.0,1.1,Iris-versicolor +5.7,2.8,4.1,1.3,Iris-versicolor +6.3,3.3,6.0,2.5,Iris-virginica +5.8,2.7,5.1,1.9,Iris-virginica +7.1,3.0,5.9,2.1,Iris-virginica +6.3,2.9,5.6,1.8,Iris-virginica +6.5,3.0,5.8,2.2,Iris-virginica +7.6,3.0,6.6,2.1,Iris-virginica +4.9,2.5,4.5,1.7,Iris-virginica +7.3,2.9,6.3,1.8,Iris-virginica +6.7,2.5,5.8,1.8,Iris-virginica +7.2,3.6,6.1,2.5,Iris-virginica +6.5,3.2,5.1,2.0,Iris-virginica +6.4,2.7,5.3,1.9,Iris-virginica +6.8,3.0,5.5,2.1,Iris-virginica +5.7,2.5,5.0,2.0,Iris-virginica +5.8,2.8,5.1,2.4,Iris-virginica +6.4,3.2,5.3,2.3,Iris-virginica +6.5,3.0,5.5,1.8,Iris-virginica +7.7,3.8,6.7,2.2,Iris-virginica +7.7,2.6,6.9,2.3,Iris-virginica +6.0,2.2,5.0,1.5,Iris-virginica +6.9,3.2,5.7,2.3,Iris-virginica +5.6,2.8,4.9,2.0,Iris-virginica +7.7,2.8,6.7,2.0,Iris-virginica +6.3,2.7,4.9,1.8,Iris-virginica +6.7,3.3,5.7,2.1,Iris-virginica +7.2,3.2,6.0,1.8,Iris-virginica +6.2,2.8,4.8,1.8,Iris-virginica +6.1,3.0,4.9,1.8,Iris-virginica +6.4,2.8,5.6,2.1,Iris-virginica +7.2,3.0,5.8,1.6,Iris-virginica +7.4,2.8,6.1,1.9,Iris-virginica +7.9,3.8,6.4,2.0,Iris-virginica +6.4,2.8,5.6,2.2,Iris-virginica +6.3,2.8,5.1,1.5,Iris-virginica +6.1,2.6,5.6,1.4,Iris-virginica +7.7,3.0,6.1,2.3,Iris-virginica +6.3,3.4,5.6,2.4,Iris-virginica +6.4,3.1,5.5,1.8,Iris-virginica +6.0,3.0,4.8,1.8,Iris-virginica +6.9,3.1,5.4,2.1,Iris-virginica +6.7,3.1,5.6,2.4,Iris-virginica +6.9,3.1,5.1,2.3,Iris-virginica +5.8,2.7,5.1,1.9,Iris-virginica +6.8,3.2,5.9,2.3,Iris-virginica +6.7,3.3,5.7,2.5,Iris-virginica +6.7,3.0,5.2,2.3,Iris-virginica +6.3,2.5,5.0,1.9,Iris-virginica +6.5,3.0,5.2,2.0,Iris-virginica +6.2,3.4,5.4,2.3,Iris-virginica +5.9,3.0,5.1,1.8,Iris-virginica +% +% +% diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/data/missing.arff b/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/data/missing.arff new file mode 100644 index 0000000..dedc64c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/data/missing.arff @@ -0,0 +1,8 @@ +% This arff file contains some missing data +@relation missing +@attribute yop real +@attribute yap real +@data +1,5 +2,4 +?,? diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/data/nodata.arff b/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/data/nodata.arff new file mode 100644 index 0000000..5766aeb --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/data/nodata.arff @@ -0,0 +1,11 @@ +@RELATION iris + +@ATTRIBUTE sepallength REAL +@ATTRIBUTE sepalwidth REAL +@ATTRIBUTE petallength REAL +@ATTRIBUTE petalwidth REAL +@ATTRIBUTE class {Iris-setosa,Iris-versicolor,Iris-virginica} + +@DATA + +% This file has no data diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/data/test1.arff b/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/data/test1.arff new file mode 100644 index 0000000..ccc8e0c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/data/test1.arff @@ -0,0 +1,10 @@ +@RELATION test1 + +@ATTRIBUTE attr0 REAL +@ATTRIBUTE attr1 REAL +@ATTRIBUTE attr2 REAL +@ATTRIBUTE attr3 REAL +@ATTRIBUTE class {class0, class1, class2, class3} + +@DATA +0.1, 0.2, 0.3, 0.4,class1 diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/data/test2.arff b/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/data/test2.arff new file mode 100644 index 0000000..30f0dbf --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/data/test2.arff @@ -0,0 +1,15 @@ +@RELATION test2 + +@ATTRIBUTE attr0 REAL +@ATTRIBUTE attr1 real +@ATTRIBUTE attr2 integer +@ATTRIBUTE attr3 Integer +@ATTRIBUTE attr4 Numeric +@ATTRIBUTE attr5 numeric +@ATTRIBUTE attr6 string +@ATTRIBUTE attr7 STRING +@ATTRIBUTE attr8 {bla} +@ATTRIBUTE attr9 {bla, bla} + +@DATA +0.1, 0.2, 0.3, 0.4,class1 diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/data/test3.arff b/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/data/test3.arff new file mode 100644 index 0000000..23da3b3 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/data/test3.arff @@ -0,0 +1,6 @@ +@RELATION test3 + +@ATTRIBUTE attr0 crap + +@DATA +0.1, 0.2, 0.3, 0.4,class1 diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/data/test4.arff b/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/data/test4.arff new file mode 100644 index 0000000..bf5f99c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/data/test4.arff @@ -0,0 +1,11 @@ +@RELATION test5 + +@ATTRIBUTE attr0 REAL +@ATTRIBUTE attr1 REAL +@ATTRIBUTE attr2 REAL +@ATTRIBUTE attr3 REAL +@ATTRIBUTE class {class0, class1, class2, class3} +@DATA +0.1, 0.2, 0.3, 0.4,class1 +-0.1, -0.2, -0.3, -0.4,class2 +1, 2, 3, 4,class3 diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/data/test5.arff b/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/data/test5.arff new file mode 100644 index 0000000..0075daf --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/data/test5.arff @@ -0,0 +1,26 @@ +@RELATION test4 + +@ATTRIBUTE attr0 REAL +@ATTRIBUTE attr1 REAL +@ATTRIBUTE attr2 REAL +@ATTRIBUTE attr3 REAL +@ATTRIBUTE class {class0, class1, class2, class3} + +@DATA + +% lsdflkjhaksjdhf + +% lsdflkjhaksjdhf + +0.1, 0.2, 0.3, 0.4,class1 +% laksjdhf + +% lsdflkjhaksjdhf +-0.1, -0.2, -0.3, -0.4,class2 + +% lsdflkjhaksjdhf +% lsdflkjhaksjdhf + +% lsdflkjhaksjdhf + +1, 2, 3, 4,class3 diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/data/test6.arff b/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/data/test6.arff new file mode 100644 index 0000000..b63280b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/data/test6.arff @@ -0,0 +1,12 @@ +@RELATION test6 + +@ATTRIBUTE attr0 REAL +@ATTRIBUTE attr1 REAL +@ATTRIBUTE attr2 REAL +@ATTRIBUTE attr3 REAL +@ATTRIBUTE class {C} + +@DATA +0.1, 0.2, 0.3, 0.4,C +-0.1, -0.2, -0.3, -0.4,C +1, 2, 3, 4,C diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/data/test7.arff b/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/data/test7.arff new file mode 100644 index 0000000..38ef6c9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/data/test7.arff @@ -0,0 +1,15 @@ +@RELATION test7 + +@ATTRIBUTE attr_year DATE yyyy +@ATTRIBUTE attr_month DATE yyyy-MM +@ATTRIBUTE attr_date DATE yyyy-MM-dd +@ATTRIBUTE attr_datetime_local DATE "yyyy-MM-dd HH:mm" +@ATTRIBUTE attr_datetime_missing DATE "yyyy-MM-dd HH:mm" + +@DATA +1999,1999-01,1999-01-31,"1999-01-31 00:01",? +2004,2004-12,2004-12-01,"2004-12-01 23:59","2004-12-01 23:59" +1817,1817-04,1817-04-28,"1817-04-28 13:00",? +2100,2100-09,2100-09-10,"2100-09-10 12:00",? +2013,2013-11,2013-11-30,"2013-11-30 04:55","2013-11-30 04:55" +1631,1631-10,1631-10-15,"1631-10-15 20:04","1631-10-15 20:04" \ No newline at end of file diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/data/test8.arff b/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/data/test8.arff new file mode 100644 index 0000000..776deb4 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/data/test8.arff @@ -0,0 +1,12 @@ +@RELATION test8 + +@ATTRIBUTE attr_datetime_utc DATE "yyyy-MM-dd HH:mm Z" +@ATTRIBUTE attr_datetime_full DATE "yy-MM-dd HH:mm:ss z" + +@DATA +"1999-01-31 00:01 UTC","99-01-31 00:01:08 +0430" +"2004-12-01 23:59 UTC","04-12-01 23:59:59 -0800" +"1817-04-28 13:00 UTC","17-04-28 13:00:33 +1000" +"2100-09-10 12:00 UTC","21-09-10 12:00:21 -0300" +"2013-11-30 04:55 UTC","13-11-30 04:55:48 -1100" +"1631-10-15 20:04 UTC","31-10-15 20:04:10 +0000" \ No newline at end of file diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/test_arffread.py b/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/test_arffread.py new file mode 100644 index 0000000..8a88bc4 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/test_arffread.py @@ -0,0 +1,259 @@ +from __future__ import division, print_function, absolute_import + +import datetime +import os +import sys +from os.path import join as pjoin + +if sys.version_info[0] >= 3: + from io import StringIO +else: + from cStringIO import StringIO + +import numpy as np + +from numpy.testing import (assert_array_almost_equal, + assert_array_equal, assert_equal, assert_) +import pytest +from pytest import raises as assert_raises + +from scipy.io.arff.arffread import loadarff +from scipy.io.arff.arffread import read_header, parse_type, ParseArffError + + +data_path = pjoin(os.path.dirname(__file__), 'data') + +test1 = pjoin(data_path, 'test1.arff') +test2 = pjoin(data_path, 'test2.arff') +test3 = pjoin(data_path, 'test3.arff') + +test4 = pjoin(data_path, 'test4.arff') +test5 = pjoin(data_path, 'test5.arff') +test6 = pjoin(data_path, 'test6.arff') +test7 = pjoin(data_path, 'test7.arff') +test8 = pjoin(data_path, 'test8.arff') +expect4_data = [(0.1, 0.2, 0.3, 0.4, 'class1'), + (-0.1, -0.2, -0.3, -0.4, 'class2'), + (1, 2, 3, 4, 'class3')] +expected_types = ['numeric', 'numeric', 'numeric', 'numeric', 'nominal'] + +missing = pjoin(data_path, 'missing.arff') +expect_missing_raw = np.array([[1, 5], [2, 4], [np.nan, np.nan]]) +expect_missing = np.empty(3, [('yop', float), ('yap', float)]) +expect_missing['yop'] = expect_missing_raw[:, 0] +expect_missing['yap'] = expect_missing_raw[:, 1] + + +class TestData(object): + def test1(self): + # Parsing trivial file with nothing. + self._test(test4) + + def test2(self): + # Parsing trivial file with some comments in the data section. + self._test(test5) + + def test3(self): + # Parsing trivial file with nominal attribute of 1 character. + self._test(test6) + + def _test(self, test_file): + data, meta = loadarff(test_file) + for i in range(len(data)): + for j in range(4): + assert_array_almost_equal(expect4_data[i][j], data[i][j]) + assert_equal(meta.types(), expected_types) + + def test_filelike(self): + # Test reading from file-like object (StringIO) + f1 = open(test1) + data1, meta1 = loadarff(f1) + f1.close() + f2 = open(test1) + data2, meta2 = loadarff(StringIO(f2.read())) + f2.close() + assert_(data1 == data2) + assert_(repr(meta1) == repr(meta2)) + + @pytest.mark.skipif(sys.version_info < (3, 6), + reason='Passing path-like objects to IO functions requires Python >= 3.6') + def test_path(self): + # Test reading from `pathlib.Path` object + from pathlib import Path + + with open(test1) as f1: + data1, meta1 = loadarff(f1) + + data2, meta2 = loadarff(Path(test1)) + + assert_(data1 == data2) + assert_(repr(meta1) == repr(meta2)) + +class TestMissingData(object): + def test_missing(self): + data, meta = loadarff(missing) + for i in ['yop', 'yap']: + assert_array_almost_equal(data[i], expect_missing[i]) + + +class TestNoData(object): + def test_nodata(self): + # The file nodata.arff has no data in the @DATA section. + # Reading it should result in an array with length 0. + nodata_filename = os.path.join(data_path, 'nodata.arff') + data, meta = loadarff(nodata_filename) + expected_dtype = np.dtype([('sepallength', '<f8'), + ('sepalwidth', '<f8'), + ('petallength', '<f8'), + ('petalwidth', '<f8'), + ('class', 'S15')]) + assert_equal(data.dtype, expected_dtype) + assert_equal(data.size, 0) + + +class TestHeader(object): + def test_type_parsing(self): + # Test parsing type of attribute from their value. + ofile = open(test2) + rel, attrs = read_header(ofile) + ofile.close() + + expected = ['numeric', 'numeric', 'numeric', 'numeric', 'numeric', + 'numeric', 'string', 'string', 'nominal', 'nominal'] + + for i in range(len(attrs)): + assert_(parse_type(attrs[i][1]) == expected[i]) + + def test_badtype_parsing(self): + # Test parsing wrong type of attribute from their value. + ofile = open(test3) + rel, attrs = read_header(ofile) + ofile.close() + + for name, value in attrs: + assert_raises(ParseArffError, parse_type, value) + + def test_fullheader1(self): + # Parsing trivial header with nothing. + ofile = open(test1) + rel, attrs = read_header(ofile) + ofile.close() + + # Test relation + assert_(rel == 'test1') + + # Test numerical attributes + assert_(len(attrs) == 5) + for i in range(4): + assert_(attrs[i][0] == 'attr%d' % i) + assert_(attrs[i][1] == 'REAL') + + # Test nominal attribute + assert_(attrs[4][0] == 'class') + assert_(attrs[4][1] == '{class0, class1, class2, class3}') + + def test_dateheader(self): + ofile = open(test7) + rel, attrs = read_header(ofile) + ofile.close() + + assert_(rel == 'test7') + + assert_(len(attrs) == 5) + + assert_(attrs[0][0] == 'attr_year') + assert_(attrs[0][1] == 'DATE yyyy') + + assert_(attrs[1][0] == 'attr_month') + assert_(attrs[1][1] == 'DATE yyyy-MM') + + assert_(attrs[2][0] == 'attr_date') + assert_(attrs[2][1] == 'DATE yyyy-MM-dd') + + assert_(attrs[3][0] == 'attr_datetime_local') + assert_(attrs[3][1] == 'DATE "yyyy-MM-dd HH:mm"') + + assert_(attrs[4][0] == 'attr_datetime_missing') + assert_(attrs[4][1] == 'DATE "yyyy-MM-dd HH:mm"') + + def test_dateheader_unsupported(self): + ofile = open(test8) + rel, attrs = read_header(ofile) + ofile.close() + + assert_(rel == 'test8') + + assert_(len(attrs) == 2) + assert_(attrs[0][0] == 'attr_datetime_utc') + assert_(attrs[0][1] == 'DATE "yyyy-MM-dd HH:mm Z"') + + assert_(attrs[1][0] == 'attr_datetime_full') + assert_(attrs[1][1] == 'DATE "yy-MM-dd HH:mm:ss z"') + + +class TestDateAttribute(object): + def setup_method(self): + self.data, self.meta = loadarff(test7) + + def test_year_attribute(self): + expected = np.array([ + '1999', + '2004', + '1817', + '2100', + '2013', + '1631' + ], dtype='datetime64[Y]') + + assert_array_equal(self.data["attr_year"], expected) + + def test_month_attribute(self): + expected = np.array([ + '1999-01', + '2004-12', + '1817-04', + '2100-09', + '2013-11', + '1631-10' + ], dtype='datetime64[M]') + + assert_array_equal(self.data["attr_month"], expected) + + def test_date_attribute(self): + expected = np.array([ + '1999-01-31', + '2004-12-01', + '1817-04-28', + '2100-09-10', + '2013-11-30', + '1631-10-15' + ], dtype='datetime64[D]') + + assert_array_equal(self.data["attr_date"], expected) + + def test_datetime_local_attribute(self): + expected = np.array([ + datetime.datetime(year=1999, month=1, day=31, hour=0, minute=1), + datetime.datetime(year=2004, month=12, day=1, hour=23, minute=59), + datetime.datetime(year=1817, month=4, day=28, hour=13, minute=0), + datetime.datetime(year=2100, month=9, day=10, hour=12, minute=0), + datetime.datetime(year=2013, month=11, day=30, hour=4, minute=55), + datetime.datetime(year=1631, month=10, day=15, hour=20, minute=4) + ], dtype='datetime64[m]') + + assert_array_equal(self.data["attr_datetime_local"], expected) + + def test_datetime_missing(self): + expected = np.array([ + 'nat', + '2004-12-01T23:59', + 'nat', + 'nat', + '2013-11-30T04:55', + '1631-10-15T20:04' + ], dtype='datetime64[m]') + + assert_array_equal(self.data["attr_datetime_missing"], expected) + + def test_datetime_timezone(self): + assert_raises(ValueError, loadarff, test8) diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/test_arffread.pyc b/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/test_arffread.pyc new file mode 100644 index 0000000..b499b24 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/arff/tests/test_arffread.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/harwell_boeing/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/io/harwell_boeing/__init__.py new file mode 100644 index 0000000..59a303b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/harwell_boeing/__init__.py @@ -0,0 +1,4 @@ +from __future__ import division, print_function, absolute_import + +from scipy.io.harwell_boeing.hb import MalformedHeader, HBInfo, HBFile, \ + HBMatrixType, hb_read, hb_write diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/harwell_boeing/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/io/harwell_boeing/__init__.pyc new file mode 100644 index 0000000..c4fb98e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/harwell_boeing/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/harwell_boeing/_fortran_format_parser.py b/project/venv/lib/python2.7/site-packages/scipy/io/harwell_boeing/_fortran_format_parser.py new file mode 100644 index 0000000..4c514d4 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/harwell_boeing/_fortran_format_parser.py @@ -0,0 +1,312 @@ +""" +Preliminary module to handle fortran formats for IO. Does not use this outside +scipy.sparse io for now, until the API is deemed reasonable. + +The *Format classes handle conversion between fortran and python format, and +FortranFormatParser can create *Format instances from raw fortran format +strings (e.g. '(3I4)', '(10I3)', etc...) +""" +from __future__ import division, print_function, absolute_import + +import re + +import numpy as np + + +__all__ = ["BadFortranFormat", "FortranFormatParser", "IntFormat", "ExpFormat"] + + +TOKENS = { + "LPAR": r"\(", + "RPAR": r"\)", + "INT_ID": r"I", + "EXP_ID": r"E", + "INT": r"\d+", + "DOT": r"\.", +} + + +class BadFortranFormat(SyntaxError): + pass + + +def number_digits(n): + return int(np.floor(np.log10(np.abs(n))) + 1) + + +class IntFormat(object): + @classmethod + def from_number(cls, n, min=None): + """Given an integer, returns a "reasonable" IntFormat instance to represent + any number between 0 and n if n > 0, -n and n if n < 0 + + Parameters + ---------- + n : int + max number one wants to be able to represent + min : int + minimum number of characters to use for the format + + Returns + ------- + res : IntFormat + IntFormat instance with reasonable (see Notes) computed width + + Notes + ----- + Reasonable should be understood as the minimal string length necessary + without losing precision. For example, IntFormat.from_number(1) will + return an IntFormat instance of width 2, so that any 0 and 1 may be + represented as 1-character strings without loss of information. + """ + width = number_digits(n) + 1 + if n < 0: + width += 1 + repeat = 80 // width + return cls(width, min, repeat=repeat) + + def __init__(self, width, min=None, repeat=None): + self.width = width + self.repeat = repeat + self.min = min + + def __repr__(self): + r = "IntFormat(" + if self.repeat: + r += "%d" % self.repeat + r += "I%d" % self.width + if self.min: + r += ".%d" % self.min + return r + ")" + + @property + def fortran_format(self): + r = "(" + if self.repeat: + r += "%d" % self.repeat + r += "I%d" % self.width + if self.min: + r += ".%d" % self.min + return r + ")" + + @property + def python_format(self): + return "%" + str(self.width) + "d" + + +class ExpFormat(object): + @classmethod + def from_number(cls, n, min=None): + """Given a float number, returns a "reasonable" ExpFormat instance to + represent any number between -n and n. + + Parameters + ---------- + n : float + max number one wants to be able to represent + min : int + minimum number of characters to use for the format + + Returns + ------- + res : ExpFormat + ExpFormat instance with reasonable (see Notes) computed width + + Notes + ----- + Reasonable should be understood as the minimal string length necessary + to avoid losing precision. + """ + # len of one number in exp format: sign + 1|0 + "." + + # number of digit for fractional part + 'E' + sign of exponent + + # len of exponent + finfo = np.finfo(n.dtype) + # Number of digits for fractional part + n_prec = finfo.precision + 1 + # Number of digits for exponential part + n_exp = number_digits(np.max(np.abs([finfo.maxexp, finfo.minexp]))) + width = 1 + 1 + n_prec + 1 + n_exp + 1 + if n < 0: + width += 1 + repeat = int(np.floor(80 / width)) + return cls(width, n_prec, min, repeat=repeat) + + def __init__(self, width, significand, min=None, repeat=None): + """\ + Parameters + ---------- + width : int + number of characters taken by the string (includes space). + """ + self.width = width + self.significand = significand + self.repeat = repeat + self.min = min + + def __repr__(self): + r = "ExpFormat(" + if self.repeat: + r += "%d" % self.repeat + r += "E%d.%d" % (self.width, self.significand) + if self.min: + r += "E%d" % self.min + return r + ")" + + @property + def fortran_format(self): + r = "(" + if self.repeat: + r += "%d" % self.repeat + r += "E%d.%d" % (self.width, self.significand) + if self.min: + r += "E%d" % self.min + return r + ")" + + @property + def python_format(self): + return "%" + str(self.width-1) + "." + str(self.significand) + "E" + + +class Token(object): + def __init__(self, type, value, pos): + self.type = type + self.value = value + self.pos = pos + + def __str__(self): + return """Token('%s', "%s")""" % (self.type, self.value) + + def __repr__(self): + return self.__str__() + + +class Tokenizer(object): + def __init__(self): + self.tokens = list(TOKENS.keys()) + self.res = [re.compile(TOKENS[i]) for i in self.tokens] + + def input(self, s): + self.data = s + self.curpos = 0 + self.len = len(s) + + def next_token(self): + curpos = self.curpos + tokens = self.tokens + + while curpos < self.len: + for i, r in enumerate(self.res): + m = r.match(self.data, curpos) + if m is None: + continue + else: + self.curpos = m.end() + return Token(self.tokens[i], m.group(), self.curpos) + raise SyntaxError("Unknown character at position %d (%s)" + % (self.curpos, self.data[curpos])) + + +# Grammar for fortran format: +# format : LPAR format_string RPAR +# format_string : repeated | simple +# repeated : repeat simple +# simple : int_fmt | exp_fmt +# int_fmt : INT_ID width +# exp_fmt : simple_exp_fmt +# simple_exp_fmt : EXP_ID width DOT significand +# extended_exp_fmt : EXP_ID width DOT significand EXP_ID ndigits +# repeat : INT +# width : INT +# significand : INT +# ndigits : INT + +# Naive fortran formatter - parser is hand-made +class FortranFormatParser(object): + """Parser for fortran format strings. The parse method returns a *Format + instance. + + Notes + ----- + Only ExpFormat (exponential format for floating values) and IntFormat + (integer format) for now. + """ + def __init__(self): + self.tokenizer = Tokenizer() + + def parse(self, s): + self.tokenizer.input(s) + + tokens = [] + + try: + while True: + t = self.tokenizer.next_token() + if t is None: + break + else: + tokens.append(t) + return self._parse_format(tokens) + except SyntaxError as e: + raise BadFortranFormat(str(e)) + + def _get_min(self, tokens): + next = tokens.pop(0) + if not next.type == "DOT": + raise SyntaxError() + next = tokens.pop(0) + return next.value + + def _expect(self, token, tp): + if not token.type == tp: + raise SyntaxError() + + def _parse_format(self, tokens): + if not tokens[0].type == "LPAR": + raise SyntaxError("Expected left parenthesis at position " + "%d (got '%s')" % (0, tokens[0].value)) + elif not tokens[-1].type == "RPAR": + raise SyntaxError("Expected right parenthesis at position " + "%d (got '%s')" % (len(tokens), tokens[-1].value)) + + tokens = tokens[1:-1] + types = [t.type for t in tokens] + if types[0] == "INT": + repeat = int(tokens.pop(0).value) + else: + repeat = None + + next = tokens.pop(0) + if next.type == "INT_ID": + next = self._next(tokens, "INT") + width = int(next.value) + if tokens: + min = int(self._get_min(tokens)) + else: + min = None + return IntFormat(width, min, repeat) + elif next.type == "EXP_ID": + next = self._next(tokens, "INT") + width = int(next.value) + + next = self._next(tokens, "DOT") + + next = self._next(tokens, "INT") + significand = int(next.value) + + if tokens: + next = self._next(tokens, "EXP_ID") + + next = self._next(tokens, "INT") + min = int(next.value) + else: + min = None + return ExpFormat(width, significand, min, repeat) + else: + raise SyntaxError("Invalid formater type %s" % next.value) + + def _next(self, tokens, tp): + if not len(tokens) > 0: + raise SyntaxError() + next = tokens.pop(0) + self._expect(next, tp) + return next diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/harwell_boeing/_fortran_format_parser.pyc b/project/venv/lib/python2.7/site-packages/scipy/io/harwell_boeing/_fortran_format_parser.pyc new file mode 100644 index 0000000..f8f0a91 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/harwell_boeing/_fortran_format_parser.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/harwell_boeing/hb.py b/project/venv/lib/python2.7/site-packages/scipy/io/harwell_boeing/hb.py new file mode 100644 index 0000000..3b3d76a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/harwell_boeing/hb.py @@ -0,0 +1,547 @@ +""" +Implementation of Harwell-Boeing read/write. + +At the moment not the full Harwell-Boeing format is supported. Supported +features are: + + - assembled, non-symmetric, real matrices + - integer for pointer/indices + - exponential format for float values, and int format + +""" +from __future__ import division, print_function, absolute_import + +# TODO: +# - Add more support (symmetric/complex matrices, non-assembled matrices ?) + +# XXX: reading is reasonably efficient (>= 85 % is in numpy.fromstring), but +# takes a lot of memory. Being faster would require compiled code. +# write is not efficient. Although not a terribly exciting task, +# having reusable facilities to efficiently read/write fortran-formatted files +# would be useful outside this module. + +import warnings + +import numpy as np +from scipy.sparse import csc_matrix +from scipy.io.harwell_boeing._fortran_format_parser import \ + FortranFormatParser, IntFormat, ExpFormat + +__all__ = ["MalformedHeader", "hb_read", "hb_write", "HBInfo", "HBFile", + "HBMatrixType"] + + +class MalformedHeader(Exception): + pass + + +class LineOverflow(Warning): + pass + + +def _nbytes_full(fmt, nlines): + """Return the number of bytes to read to get every full lines for the + given parsed fortran format.""" + return (fmt.repeat * fmt.width + 1) * (nlines - 1) + + +class HBInfo(object): + @classmethod + def from_data(cls, m, title="Default title", key="0", mxtype=None, fmt=None): + """Create a HBInfo instance from an existing sparse matrix. + + Parameters + ---------- + m : sparse matrix + the HBInfo instance will derive its parameters from m + title : str + Title to put in the HB header + key : str + Key + mxtype : HBMatrixType + type of the input matrix + fmt : dict + not implemented + + Returns + ------- + hb_info : HBInfo instance + """ + m = m.tocsc(copy=False) + + pointer = m.indptr + indices = m.indices + values = m.data + + nrows, ncols = m.shape + nnon_zeros = m.nnz + + if fmt is None: + # +1 because HB use one-based indexing (Fortran), and we will write + # the indices /pointer as such + pointer_fmt = IntFormat.from_number(np.max(pointer+1)) + indices_fmt = IntFormat.from_number(np.max(indices+1)) + + if values.dtype.kind in np.typecodes["AllFloat"]: + values_fmt = ExpFormat.from_number(-np.max(np.abs(values))) + elif values.dtype.kind in np.typecodes["AllInteger"]: + values_fmt = IntFormat.from_number(-np.max(np.abs(values))) + else: + raise NotImplementedError("type %s not implemented yet" % values.dtype.kind) + else: + raise NotImplementedError("fmt argument not supported yet.") + + if mxtype is None: + if not np.isrealobj(values): + raise ValueError("Complex values not supported yet") + if values.dtype.kind in np.typecodes["AllInteger"]: + tp = "integer" + elif values.dtype.kind in np.typecodes["AllFloat"]: + tp = "real" + else: + raise NotImplementedError("type %s for values not implemented" + % values.dtype) + mxtype = HBMatrixType(tp, "unsymmetric", "assembled") + else: + raise ValueError("mxtype argument not handled yet.") + + def _nlines(fmt, size): + nlines = size // fmt.repeat + if nlines * fmt.repeat != size: + nlines += 1 + return nlines + + pointer_nlines = _nlines(pointer_fmt, pointer.size) + indices_nlines = _nlines(indices_fmt, indices.size) + values_nlines = _nlines(values_fmt, values.size) + + total_nlines = pointer_nlines + indices_nlines + values_nlines + + return cls(title, key, + total_nlines, pointer_nlines, indices_nlines, values_nlines, + mxtype, nrows, ncols, nnon_zeros, + pointer_fmt.fortran_format, indices_fmt.fortran_format, + values_fmt.fortran_format) + + @classmethod + def from_file(cls, fid): + """Create a HBInfo instance from a file object containing a matrix in the + HB format. + + Parameters + ---------- + fid : file-like matrix + File or file-like object containing a matrix in the HB format. + + Returns + ------- + hb_info : HBInfo instance + """ + # First line + line = fid.readline().strip("\n") + if not len(line) > 72: + raise ValueError("Expected at least 72 characters for first line, " + "got: \n%s" % line) + title = line[:72] + key = line[72:] + + # Second line + line = fid.readline().strip("\n") + if not len(line.rstrip()) >= 56: + raise ValueError("Expected at least 56 characters for second line, " + "got: \n%s" % line) + total_nlines = _expect_int(line[:14]) + pointer_nlines = _expect_int(line[14:28]) + indices_nlines = _expect_int(line[28:42]) + values_nlines = _expect_int(line[42:56]) + + rhs_nlines = line[56:72].strip() + if rhs_nlines == '': + rhs_nlines = 0 + else: + rhs_nlines = _expect_int(rhs_nlines) + if not rhs_nlines == 0: + raise ValueError("Only files without right hand side supported for " + "now.") + + # Third line + line = fid.readline().strip("\n") + if not len(line) >= 70: + raise ValueError("Expected at least 72 character for third line, got:\n" + "%s" % line) + + mxtype_s = line[:3].upper() + if not len(mxtype_s) == 3: + raise ValueError("mxtype expected to be 3 characters long") + + mxtype = HBMatrixType.from_fortran(mxtype_s) + if mxtype.value_type not in ["real", "integer"]: + raise ValueError("Only real or integer matrices supported for " + "now (detected %s)" % mxtype) + if not mxtype.structure == "unsymmetric": + raise ValueError("Only unsymmetric matrices supported for " + "now (detected %s)" % mxtype) + if not mxtype.storage == "assembled": + raise ValueError("Only assembled matrices supported for now") + + if not line[3:14] == " " * 11: + raise ValueError("Malformed data for third line: %s" % line) + + nrows = _expect_int(line[14:28]) + ncols = _expect_int(line[28:42]) + nnon_zeros = _expect_int(line[42:56]) + nelementals = _expect_int(line[56:70]) + if not nelementals == 0: + raise ValueError("Unexpected value %d for nltvl (last entry of line 3)" + % nelementals) + + # Fourth line + line = fid.readline().strip("\n") + + ct = line.split() + if not len(ct) == 3: + raise ValueError("Expected 3 formats, got %s" % ct) + + return cls(title, key, + total_nlines, pointer_nlines, indices_nlines, values_nlines, + mxtype, nrows, ncols, nnon_zeros, + ct[0], ct[1], ct[2], + rhs_nlines, nelementals) + + def __init__(self, title, key, + total_nlines, pointer_nlines, indices_nlines, values_nlines, + mxtype, nrows, ncols, nnon_zeros, + pointer_format_str, indices_format_str, values_format_str, + right_hand_sides_nlines=0, nelementals=0): + """Do not use this directly, but the class ctrs (from_* functions).""" + self.title = title + self.key = key + if title is None: + title = "No Title" + if len(title) > 72: + raise ValueError("title cannot be > 72 characters") + + if key is None: + key = "|No Key" + if len(key) > 8: + warnings.warn("key is > 8 characters (key is %s)" % key, LineOverflow) + + self.total_nlines = total_nlines + self.pointer_nlines = pointer_nlines + self.indices_nlines = indices_nlines + self.values_nlines = values_nlines + + parser = FortranFormatParser() + pointer_format = parser.parse(pointer_format_str) + if not isinstance(pointer_format, IntFormat): + raise ValueError("Expected int format for pointer format, got %s" + % pointer_format) + + indices_format = parser.parse(indices_format_str) + if not isinstance(indices_format, IntFormat): + raise ValueError("Expected int format for indices format, got %s" % + indices_format) + + values_format = parser.parse(values_format_str) + if isinstance(values_format, ExpFormat): + if mxtype.value_type not in ["real", "complex"]: + raise ValueError("Inconsistency between matrix type %s and " + "value type %s" % (mxtype, values_format)) + values_dtype = np.float64 + elif isinstance(values_format, IntFormat): + if mxtype.value_type not in ["integer"]: + raise ValueError("Inconsistency between matrix type %s and " + "value type %s" % (mxtype, values_format)) + # XXX: fortran int -> dtype association ? + values_dtype = int + else: + raise ValueError("Unsupported format for values %r" % (values_format,)) + + self.pointer_format = pointer_format + self.indices_format = indices_format + self.values_format = values_format + + self.pointer_dtype = np.int32 + self.indices_dtype = np.int32 + self.values_dtype = values_dtype + + self.pointer_nlines = pointer_nlines + self.pointer_nbytes_full = _nbytes_full(pointer_format, pointer_nlines) + + self.indices_nlines = indices_nlines + self.indices_nbytes_full = _nbytes_full(indices_format, indices_nlines) + + self.values_nlines = values_nlines + self.values_nbytes_full = _nbytes_full(values_format, values_nlines) + + self.nrows = nrows + self.ncols = ncols + self.nnon_zeros = nnon_zeros + self.nelementals = nelementals + self.mxtype = mxtype + + def dump(self): + """Gives the header corresponding to this instance as a string.""" + header = [self.title.ljust(72) + self.key.ljust(8)] + + header.append("%14d%14d%14d%14d" % + (self.total_nlines, self.pointer_nlines, + self.indices_nlines, self.values_nlines)) + header.append("%14s%14d%14d%14d%14d" % + (self.mxtype.fortran_format.ljust(14), self.nrows, + self.ncols, self.nnon_zeros, 0)) + + pffmt = self.pointer_format.fortran_format + iffmt = self.indices_format.fortran_format + vffmt = self.values_format.fortran_format + header.append("%16s%16s%20s" % + (pffmt.ljust(16), iffmt.ljust(16), vffmt.ljust(20))) + return "\n".join(header) + + +def _expect_int(value, msg=None): + try: + return int(value) + except ValueError: + if msg is None: + msg = "Expected an int, got %s" + raise ValueError(msg % value) + + +def _read_hb_data(content, header): + # XXX: look at a way to reduce memory here (big string creation) + ptr_string = "".join([content.read(header.pointer_nbytes_full), + content.readline()]) + ptr = np.fromstring(ptr_string, + dtype=int, sep=' ') + + ind_string = "".join([content.read(header.indices_nbytes_full), + content.readline()]) + ind = np.fromstring(ind_string, + dtype=int, sep=' ') + + val_string = "".join([content.read(header.values_nbytes_full), + content.readline()]) + val = np.fromstring(val_string, + dtype=header.values_dtype, sep=' ') + + try: + return csc_matrix((val, ind-1, ptr-1), + shape=(header.nrows, header.ncols)) + except ValueError as e: + raise e + + +def _write_data(m, fid, header): + m = m.tocsc(copy=False) + + def write_array(f, ar, nlines, fmt): + # ar_nlines is the number of full lines, n is the number of items per + # line, ffmt the fortran format + pyfmt = fmt.python_format + pyfmt_full = pyfmt * fmt.repeat + + # for each array to write, we first write the full lines, and special + # case for partial line + full = ar[:(nlines - 1) * fmt.repeat] + for row in full.reshape((nlines-1, fmt.repeat)): + f.write(pyfmt_full % tuple(row) + "\n") + nremain = ar.size - full.size + if nremain > 0: + f.write((pyfmt * nremain) % tuple(ar[ar.size - nremain:]) + "\n") + + fid.write(header.dump()) + fid.write("\n") + # +1 is for fortran one-based indexing + write_array(fid, m.indptr+1, header.pointer_nlines, + header.pointer_format) + write_array(fid, m.indices+1, header.indices_nlines, + header.indices_format) + write_array(fid, m.data, header.values_nlines, + header.values_format) + + +class HBMatrixType(object): + """Class to hold the matrix type.""" + # q2f* translates qualified names to fortran character + _q2f_type = { + "real": "R", + "complex": "C", + "pattern": "P", + "integer": "I", + } + _q2f_structure = { + "symmetric": "S", + "unsymmetric": "U", + "hermitian": "H", + "skewsymmetric": "Z", + "rectangular": "R" + } + _q2f_storage = { + "assembled": "A", + "elemental": "E", + } + + _f2q_type = dict([(j, i) for i, j in _q2f_type.items()]) + _f2q_structure = dict([(j, i) for i, j in _q2f_structure.items()]) + _f2q_storage = dict([(j, i) for i, j in _q2f_storage.items()]) + + @classmethod + def from_fortran(cls, fmt): + if not len(fmt) == 3: + raise ValueError("Fortran format for matrix type should be 3 " + "characters long") + try: + value_type = cls._f2q_type[fmt[0]] + structure = cls._f2q_structure[fmt[1]] + storage = cls._f2q_storage[fmt[2]] + return cls(value_type, structure, storage) + except KeyError: + raise ValueError("Unrecognized format %s" % fmt) + + def __init__(self, value_type, structure, storage="assembled"): + self.value_type = value_type + self.structure = structure + self.storage = storage + + if value_type not in self._q2f_type: + raise ValueError("Unrecognized type %s" % value_type) + if structure not in self._q2f_structure: + raise ValueError("Unrecognized structure %s" % structure) + if storage not in self._q2f_storage: + raise ValueError("Unrecognized storage %s" % storage) + + @property + def fortran_format(self): + return self._q2f_type[self.value_type] + \ + self._q2f_structure[self.structure] + \ + self._q2f_storage[self.storage] + + def __repr__(self): + return "HBMatrixType(%s, %s, %s)" % \ + (self.value_type, self.structure, self.storage) + + +class HBFile(object): + def __init__(self, file, hb_info=None): + """Create a HBFile instance. + + Parameters + ---------- + file : file-object + StringIO work as well + hb_info : HBInfo, optional + Should be given as an argument for writing, in which case the file + should be writable. + """ + self._fid = file + if hb_info is None: + self._hb_info = HBInfo.from_file(file) + else: + #raise IOError("file %s is not writable, and hb_info " + # "was given." % file) + self._hb_info = hb_info + + @property + def title(self): + return self._hb_info.title + + @property + def key(self): + return self._hb_info.key + + @property + def type(self): + return self._hb_info.mxtype.value_type + + @property + def structure(self): + return self._hb_info.mxtype.structure + + @property + def storage(self): + return self._hb_info.mxtype.storage + + def read_matrix(self): + return _read_hb_data(self._fid, self._hb_info) + + def write_matrix(self, m): + return _write_data(m, self._fid, self._hb_info) + + +def hb_read(path_or_open_file): + """Read HB-format file. + + Parameters + ---------- + path_or_open_file : path-like or file-like + If a file-like object, it is used as-is. Otherwise it is opened + before reading. + + Returns + ------- + data : scipy.sparse.csc_matrix instance + The data read from the HB file as a sparse matrix. + + Notes + ----- + At the moment not the full Harwell-Boeing format is supported. Supported + features are: + + - assembled, non-symmetric, real matrices + - integer for pointer/indices + - exponential format for float values, and int format + + """ + def _get_matrix(fid): + hb = HBFile(fid) + return hb.read_matrix() + + if hasattr(path_or_open_file, 'read'): + return _get_matrix(path_or_open_file) + else: + with open(path_or_open_file) as f: + return _get_matrix(f) + + +def hb_write(path_or_open_file, m, hb_info=None): + """Write HB-format file. + + Parameters + ---------- + path_or_open_file : path-like or file-like + If a file-like object, it is used as-is. Otherwise it is opened + before writing. + m : sparse-matrix + the sparse matrix to write + hb_info : HBInfo + contains the meta-data for write + + Returns + ------- + None + + Notes + ----- + At the moment not the full Harwell-Boeing format is supported. Supported + features are: + + - assembled, non-symmetric, real matrices + - integer for pointer/indices + - exponential format for float values, and int format + + """ + m = m.tocsc(copy=False) + + if hb_info is None: + hb_info = HBInfo.from_data(m) + + def _set_matrix(fid): + hb = HBFile(fid, hb_info) + return hb.write_matrix(m) + + if hasattr(path_or_open_file, 'write'): + return _set_matrix(path_or_open_file) + else: + with open(path_or_open_file, 'w') as f: + return _set_matrix(f) diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/harwell_boeing/hb.pyc b/project/venv/lib/python2.7/site-packages/scipy/io/harwell_boeing/hb.pyc new file mode 100644 index 0000000..1cb4869 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/harwell_boeing/hb.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/harwell_boeing/setup.py b/project/venv/lib/python2.7/site-packages/scipy/io/harwell_boeing/setup.py new file mode 100644 index 0000000..3cdddff --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/harwell_boeing/setup.py @@ -0,0 +1,14 @@ +from __future__ import division, print_function, absolute_import + + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('harwell_boeing',parent_package,top_path) + config.add_data_dir('tests') + + return config + + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(**configuration(top_path='').todict()) diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/harwell_boeing/setup.pyc b/project/venv/lib/python2.7/site-packages/scipy/io/harwell_boeing/setup.pyc new file mode 100644 index 0000000..82926e4 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/harwell_boeing/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/harwell_boeing/tests/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/io/harwell_boeing/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/harwell_boeing/tests/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/io/harwell_boeing/tests/__init__.pyc new file mode 100644 index 0000000..3de0441 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/harwell_boeing/tests/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/harwell_boeing/tests/test_fortran_format.py b/project/venv/lib/python2.7/site-packages/scipy/io/harwell_boeing/tests/test_fortran_format.py new file mode 100644 index 0000000..215bae6 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/harwell_boeing/tests/test_fortran_format.py @@ -0,0 +1,77 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np + +from numpy.testing import assert_equal +from pytest import raises as assert_raises + +from scipy.io.harwell_boeing._fortran_format_parser import ( + FortranFormatParser, IntFormat, ExpFormat, BadFortranFormat, + number_digits) + + +class TestFortranFormatParser(object): + def setup_method(self): + self.parser = FortranFormatParser() + + def _test_equal(self, format, ref): + ret = self.parser.parse(format) + assert_equal(ret.__dict__, ref.__dict__) + + def test_simple_int(self): + self._test_equal("(I4)", IntFormat(4)) + + def test_simple_repeated_int(self): + self._test_equal("(3I4)", IntFormat(4, repeat=3)) + + def test_simple_exp(self): + self._test_equal("(E4.3)", ExpFormat(4, 3)) + + def test_exp_exp(self): + self._test_equal("(E8.3E3)", ExpFormat(8, 3, 3)) + + def test_repeat_exp(self): + self._test_equal("(2E4.3)", ExpFormat(4, 3, repeat=2)) + + def test_repeat_exp_exp(self): + self._test_equal("(2E8.3E3)", ExpFormat(8, 3, 3, repeat=2)) + + def test_wrong_formats(self): + def _test_invalid(bad_format): + assert_raises(BadFortranFormat, lambda: self.parser.parse(bad_format)) + _test_invalid("I4") + _test_invalid("(E4)") + _test_invalid("(E4.)") + _test_invalid("(E4.E3)") + + +class TestIntFormat(object): + def test_to_fortran(self): + f = [IntFormat(10), IntFormat(12, 10), IntFormat(12, 10, 3)] + res = ["(I10)", "(I12.10)", "(3I12.10)"] + + for i, j in zip(f, res): + assert_equal(i.fortran_format, j) + + def test_from_number(self): + f = [10, -12, 123456789] + r_f = [IntFormat(3, repeat=26), IntFormat(4, repeat=20), + IntFormat(10, repeat=8)] + for i, j in zip(f, r_f): + assert_equal(IntFormat.from_number(i).__dict__, j.__dict__) + + +class TestExpFormat(object): + def test_to_fortran(self): + f = [ExpFormat(10, 5), ExpFormat(12, 10), ExpFormat(12, 10, min=3), + ExpFormat(10, 5, repeat=3)] + res = ["(E10.5)", "(E12.10)", "(E12.10E3)", "(3E10.5)"] + + for i, j in zip(f, res): + assert_equal(i.fortran_format, j) + + def test_from_number(self): + f = np.array([1.0, -1.2]) + r_f = [ExpFormat(24, 16, repeat=3), ExpFormat(25, 16, repeat=3)] + for i, j in zip(f, r_f): + assert_equal(ExpFormat.from_number(i).__dict__, j.__dict__) diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/harwell_boeing/tests/test_fortran_format.pyc b/project/venv/lib/python2.7/site-packages/scipy/io/harwell_boeing/tests/test_fortran_format.pyc new file mode 100644 index 0000000..472df72 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/harwell_boeing/tests/test_fortran_format.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/harwell_boeing/tests/test_hb.py b/project/venv/lib/python2.7/site-packages/scipy/io/harwell_boeing/tests/test_hb.py new file mode 100644 index 0000000..36133f8 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/harwell_boeing/tests/test_hb.py @@ -0,0 +1,71 @@ +from __future__ import division, print_function, absolute_import + +import sys +if sys.version_info[0] >= 3: + from io import StringIO +else: + from StringIO import StringIO +import tempfile + +import numpy as np + +from numpy.testing import assert_equal, \ + assert_array_almost_equal_nulp + +from scipy.sparse import coo_matrix, csc_matrix, rand + +from scipy.io import hb_read, hb_write + + +SIMPLE = """\ +No Title |No Key + 9 4 1 4 +RUA 100 100 10 0 +(26I3) (26I3) (3E23.15) +1 2 2 2 2 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 +3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 +3 3 3 3 3 3 3 4 4 4 6 6 6 6 6 6 6 6 6 6 6 8 9 9 9 9 +9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 11 +37 71 89 18 30 45 70 19 25 52 +2.971243799687726e-01 3.662366682877375e-01 4.786962174699534e-01 +6.490068647991184e-01 6.617490424831662e-02 8.870370343191623e-01 +4.196478590163001e-01 5.649603072111251e-01 9.934423887087086e-01 +6.912334991524289e-01 +""" + +SIMPLE_MATRIX = coo_matrix( + ((0.297124379969, 0.366236668288, 0.47869621747, 0.649006864799, + 0.0661749042483, 0.887037034319, 0.419647859016, + 0.564960307211, 0.993442388709, 0.691233499152,), + (np.array([[36, 70, 88, 17, 29, 44, 69, 18, 24, 51], + [0, 4, 58, 61, 61, 72, 72, 73, 99, 99]])))) + + +def assert_csc_almost_equal(r, l): + r = csc_matrix(r) + l = csc_matrix(l) + assert_equal(r.indptr, l.indptr) + assert_equal(r.indices, l.indices) + assert_array_almost_equal_nulp(r.data, l.data, 10000) + + +class TestHBReader(object): + def test_simple(self): + m = hb_read(StringIO(SIMPLE)) + assert_csc_almost_equal(m, SIMPLE_MATRIX) + + +class TestHBReadWrite(object): + + def check_save_load(self, value): + with tempfile.NamedTemporaryFile(mode='w+t') as file: + hb_write(file, value) + file.file.seek(0) + value_loaded = hb_read(file) + assert_csc_almost_equal(value, value_loaded) + + def test_simple(self): + random_matrix = rand(10, 100, 0.1) + for matrix_format in ('coo', 'csc', 'csr', 'bsr', 'dia', 'dok', 'lil'): + matrix = random_matrix.asformat(matrix_format, copy=False) + self.check_save_load(matrix) diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/harwell_boeing/tests/test_hb.pyc b/project/venv/lib/python2.7/site-packages/scipy/io/harwell_boeing/tests/test_hb.pyc new file mode 100644 index 0000000..ae9e3d9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/harwell_boeing/tests/test_hb.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/idl.py b/project/venv/lib/python2.7/site-packages/scipy/io/idl.py new file mode 100644 index 0000000..593807d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/idl.py @@ -0,0 +1,884 @@ +# IDLSave - a python module to read IDL 'save' files +# Copyright (c) 2010 Thomas P. Robitaille + +# Many thanks to Craig Markwardt for publishing the Unofficial Format +# Specification for IDL .sav files, without which this Python module would not +# exist (http://cow.physics.wisc.edu/~craigm/idl/savefmt). + +# This code was developed by with permission from ITT Visual Information +# Systems. IDL(r) is a registered trademark of ITT Visual Information Systems, +# Inc. for their Interactive Data Language software. + +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +from __future__ import division, print_function, absolute_import + +__all__ = ['readsav'] + +import struct +import numpy as np +from numpy.compat import asstr +import tempfile +import zlib +import warnings + +# Define the different data types that can be found in an IDL save file +DTYPE_DICT = {1: '>u1', + 2: '>i2', + 3: '>i4', + 4: '>f4', + 5: '>f8', + 6: '>c8', + 7: '|O', + 8: '|O', + 9: '>c16', + 10: '|O', + 11: '|O', + 12: '>u2', + 13: '>u4', + 14: '>i8', + 15: '>u8'} + +# Define the different record types that can be found in an IDL save file +RECTYPE_DICT = {0: "START_MARKER", + 1: "COMMON_VARIABLE", + 2: "VARIABLE", + 3: "SYSTEM_VARIABLE", + 6: "END_MARKER", + 10: "TIMESTAMP", + 12: "COMPILED", + 13: "IDENTIFICATION", + 14: "VERSION", + 15: "HEAP_HEADER", + 16: "HEAP_DATA", + 17: "PROMOTE64", + 19: "NOTICE", + 20: "DESCRIPTION"} + +# Define a dictionary to contain structure definitions +STRUCT_DICT = {} + + +def _align_32(f): + '''Align to the next 32-bit position in a file''' + + pos = f.tell() + if pos % 4 != 0: + f.seek(pos + 4 - pos % 4) + return + + +def _skip_bytes(f, n): + '''Skip `n` bytes''' + f.read(n) + return + + +def _read_bytes(f, n): + '''Read the next `n` bytes''' + return f.read(n) + + +def _read_byte(f): + '''Read a single byte''' + return np.uint8(struct.unpack('>B', f.read(4)[:1])[0]) + + +def _read_long(f): + '''Read a signed 32-bit integer''' + return np.int32(struct.unpack('>l', f.read(4))[0]) + + +def _read_int16(f): + '''Read a signed 16-bit integer''' + return np.int16(struct.unpack('>h', f.read(4)[2:4])[0]) + + +def _read_int32(f): + '''Read a signed 32-bit integer''' + return np.int32(struct.unpack('>i', f.read(4))[0]) + + +def _read_int64(f): + '''Read a signed 64-bit integer''' + return np.int64(struct.unpack('>q', f.read(8))[0]) + + +def _read_uint16(f): + '''Read an unsigned 16-bit integer''' + return np.uint16(struct.unpack('>H', f.read(4)[2:4])[0]) + + +def _read_uint32(f): + '''Read an unsigned 32-bit integer''' + return np.uint32(struct.unpack('>I', f.read(4))[0]) + + +def _read_uint64(f): + '''Read an unsigned 64-bit integer''' + return np.uint64(struct.unpack('>Q', f.read(8))[0]) + + +def _read_float32(f): + '''Read a 32-bit float''' + return np.float32(struct.unpack('>f', f.read(4))[0]) + + +def _read_float64(f): + '''Read a 64-bit float''' + return np.float64(struct.unpack('>d', f.read(8))[0]) + + +class Pointer(object): + '''Class used to define pointers''' + + def __init__(self, index): + self.index = index + return + + +class ObjectPointer(Pointer): + '''Class used to define object pointers''' + pass + + +def _read_string(f): + '''Read a string''' + length = _read_long(f) + if length > 0: + chars = _read_bytes(f, length) + _align_32(f) + chars = asstr(chars) + else: + chars = '' + return chars + + +def _read_string_data(f): + '''Read a data string (length is specified twice)''' + length = _read_long(f) + if length > 0: + length = _read_long(f) + string_data = _read_bytes(f, length) + _align_32(f) + else: + string_data = '' + return string_data + + +def _read_data(f, dtype): + '''Read a variable with a specified data type''' + if dtype == 1: + if _read_int32(f) != 1: + raise Exception("Error occurred while reading byte variable") + return _read_byte(f) + elif dtype == 2: + return _read_int16(f) + elif dtype == 3: + return _read_int32(f) + elif dtype == 4: + return _read_float32(f) + elif dtype == 5: + return _read_float64(f) + elif dtype == 6: + real = _read_float32(f) + imag = _read_float32(f) + return np.complex64(real + imag * 1j) + elif dtype == 7: + return _read_string_data(f) + elif dtype == 8: + raise Exception("Should not be here - please report this") + elif dtype == 9: + real = _read_float64(f) + imag = _read_float64(f) + return np.complex128(real + imag * 1j) + elif dtype == 10: + return Pointer(_read_int32(f)) + elif dtype == 11: + return ObjectPointer(_read_int32(f)) + elif dtype == 12: + return _read_uint16(f) + elif dtype == 13: + return _read_uint32(f) + elif dtype == 14: + return _read_int64(f) + elif dtype == 15: + return _read_uint64(f) + else: + raise Exception("Unknown IDL type: %i - please report this" % dtype) + + +def _read_structure(f, array_desc, struct_desc): + ''' + Read a structure, with the array and structure descriptors given as + `array_desc` and `structure_desc` respectively. + ''' + + nrows = array_desc['nelements'] + columns = struct_desc['tagtable'] + + dtype = [] + for col in columns: + if col['structure'] or col['array']: + dtype.append(((col['name'].lower(), col['name']), np.object_)) + else: + if col['typecode'] in DTYPE_DICT: + dtype.append(((col['name'].lower(), col['name']), + DTYPE_DICT[col['typecode']])) + else: + raise Exception("Variable type %i not implemented" % + col['typecode']) + + structure = np.recarray((nrows, ), dtype=dtype) + + for i in range(nrows): + for col in columns: + dtype = col['typecode'] + if col['structure']: + structure[col['name']][i] = _read_structure(f, + struct_desc['arrtable'][col['name']], + struct_desc['structtable'][col['name']]) + elif col['array']: + structure[col['name']][i] = _read_array(f, dtype, + struct_desc['arrtable'][col['name']]) + else: + structure[col['name']][i] = _read_data(f, dtype) + + # Reshape structure if needed + if array_desc['ndims'] > 1: + dims = array_desc['dims'][:int(array_desc['ndims'])] + dims.reverse() + structure = structure.reshape(dims) + + return structure + + +def _read_array(f, typecode, array_desc): + ''' + Read an array of type `typecode`, with the array descriptor given as + `array_desc`. + ''' + + if typecode in [1, 3, 4, 5, 6, 9, 13, 14, 15]: + + if typecode == 1: + nbytes = _read_int32(f) + if nbytes != array_desc['nbytes']: + warnings.warn("Not able to verify number of bytes from header") + + # Read bytes as numpy array + array = np.frombuffer(f.read(array_desc['nbytes']), + dtype=DTYPE_DICT[typecode]) + + elif typecode in [2, 12]: + + # These are 2 byte types, need to skip every two as they are not packed + + array = np.frombuffer(f.read(array_desc['nbytes']*2), + dtype=DTYPE_DICT[typecode])[1::2] + + else: + + # Read bytes into list + array = [] + for i in range(array_desc['nelements']): + dtype = typecode + data = _read_data(f, dtype) + array.append(data) + + array = np.array(array, dtype=np.object_) + + # Reshape array if needed + if array_desc['ndims'] > 1: + dims = array_desc['dims'][:int(array_desc['ndims'])] + dims.reverse() + array = array.reshape(dims) + + # Go to next alignment position + _align_32(f) + + return array + + +def _read_record(f): + '''Function to read in a full record''' + + record = {'rectype': _read_long(f)} + + nextrec = _read_uint32(f) + nextrec += _read_uint32(f) * 2**32 + + _skip_bytes(f, 4) + + if not record['rectype'] in RECTYPE_DICT: + raise Exception("Unknown RECTYPE: %i" % record['rectype']) + + record['rectype'] = RECTYPE_DICT[record['rectype']] + + if record['rectype'] in ["VARIABLE", "HEAP_DATA"]: + + if record['rectype'] == "VARIABLE": + record['varname'] = _read_string(f) + else: + record['heap_index'] = _read_long(f) + _skip_bytes(f, 4) + + rectypedesc = _read_typedesc(f) + + if rectypedesc['typecode'] == 0: + + if nextrec == f.tell(): + record['data'] = None # Indicates NULL value + else: + raise ValueError("Unexpected type code: 0") + + else: + + varstart = _read_long(f) + if varstart != 7: + raise Exception("VARSTART is not 7") + + if rectypedesc['structure']: + record['data'] = _read_structure(f, rectypedesc['array_desc'], + rectypedesc['struct_desc']) + elif rectypedesc['array']: + record['data'] = _read_array(f, rectypedesc['typecode'], + rectypedesc['array_desc']) + else: + dtype = rectypedesc['typecode'] + record['data'] = _read_data(f, dtype) + + elif record['rectype'] == "TIMESTAMP": + + _skip_bytes(f, 4*256) + record['date'] = _read_string(f) + record['user'] = _read_string(f) + record['host'] = _read_string(f) + + elif record['rectype'] == "VERSION": + + record['format'] = _read_long(f) + record['arch'] = _read_string(f) + record['os'] = _read_string(f) + record['release'] = _read_string(f) + + elif record['rectype'] == "IDENTIFICATON": + + record['author'] = _read_string(f) + record['title'] = _read_string(f) + record['idcode'] = _read_string(f) + + elif record['rectype'] == "NOTICE": + + record['notice'] = _read_string(f) + + elif record['rectype'] == "DESCRIPTION": + + record['description'] = _read_string_data(f) + + elif record['rectype'] == "HEAP_HEADER": + + record['nvalues'] = _read_long(f) + record['indices'] = [] + for i in range(record['nvalues']): + record['indices'].append(_read_long(f)) + + elif record['rectype'] == "COMMONBLOCK": + + record['nvars'] = _read_long(f) + record['name'] = _read_string(f) + record['varnames'] = [] + for i in range(record['nvars']): + record['varnames'].append(_read_string(f)) + + elif record['rectype'] == "END_MARKER": + + record['end'] = True + + elif record['rectype'] == "UNKNOWN": + + warnings.warn("Skipping UNKNOWN record") + + elif record['rectype'] == "SYSTEM_VARIABLE": + + warnings.warn("Skipping SYSTEM_VARIABLE record") + + else: + + raise Exception("record['rectype']=%s not implemented" % + record['rectype']) + + f.seek(nextrec) + + return record + + +def _read_typedesc(f): + '''Function to read in a type descriptor''' + + typedesc = {'typecode': _read_long(f), 'varflags': _read_long(f)} + + if typedesc['varflags'] & 2 == 2: + raise Exception("System variables not implemented") + + typedesc['array'] = typedesc['varflags'] & 4 == 4 + typedesc['structure'] = typedesc['varflags'] & 32 == 32 + + if typedesc['structure']: + typedesc['array_desc'] = _read_arraydesc(f) + typedesc['struct_desc'] = _read_structdesc(f) + elif typedesc['array']: + typedesc['array_desc'] = _read_arraydesc(f) + + return typedesc + + +def _read_arraydesc(f): + '''Function to read in an array descriptor''' + + arraydesc = {'arrstart': _read_long(f)} + + if arraydesc['arrstart'] == 8: + + _skip_bytes(f, 4) + + arraydesc['nbytes'] = _read_long(f) + arraydesc['nelements'] = _read_long(f) + arraydesc['ndims'] = _read_long(f) + + _skip_bytes(f, 8) + + arraydesc['nmax'] = _read_long(f) + + arraydesc['dims'] = [] + for d in range(arraydesc['nmax']): + arraydesc['dims'].append(_read_long(f)) + + elif arraydesc['arrstart'] == 18: + + warnings.warn("Using experimental 64-bit array read") + + _skip_bytes(f, 8) + + arraydesc['nbytes'] = _read_uint64(f) + arraydesc['nelements'] = _read_uint64(f) + arraydesc['ndims'] = _read_long(f) + + _skip_bytes(f, 8) + + arraydesc['nmax'] = 8 + + arraydesc['dims'] = [] + for d in range(arraydesc['nmax']): + v = _read_long(f) + if v != 0: + raise Exception("Expected a zero in ARRAY_DESC") + arraydesc['dims'].append(_read_long(f)) + + else: + + raise Exception("Unknown ARRSTART: %i" % arraydesc['arrstart']) + + return arraydesc + + +def _read_structdesc(f): + '''Function to read in a structure descriptor''' + + structdesc = {} + + structstart = _read_long(f) + if structstart != 9: + raise Exception("STRUCTSTART should be 9") + + structdesc['name'] = _read_string(f) + predef = _read_long(f) + structdesc['ntags'] = _read_long(f) + structdesc['nbytes'] = _read_long(f) + + structdesc['predef'] = predef & 1 + structdesc['inherits'] = predef & 2 + structdesc['is_super'] = predef & 4 + + if not structdesc['predef']: + + structdesc['tagtable'] = [] + for t in range(structdesc['ntags']): + structdesc['tagtable'].append(_read_tagdesc(f)) + + for tag in structdesc['tagtable']: + tag['name'] = _read_string(f) + + structdesc['arrtable'] = {} + for tag in structdesc['tagtable']: + if tag['array']: + structdesc['arrtable'][tag['name']] = _read_arraydesc(f) + + structdesc['structtable'] = {} + for tag in structdesc['tagtable']: + if tag['structure']: + structdesc['structtable'][tag['name']] = _read_structdesc(f) + + if structdesc['inherits'] or structdesc['is_super']: + structdesc['classname'] = _read_string(f) + structdesc['nsupclasses'] = _read_long(f) + structdesc['supclassnames'] = [] + for s in range(structdesc['nsupclasses']): + structdesc['supclassnames'].append(_read_string(f)) + structdesc['supclasstable'] = [] + for s in range(structdesc['nsupclasses']): + structdesc['supclasstable'].append(_read_structdesc(f)) + + STRUCT_DICT[structdesc['name']] = structdesc + + else: + + if not structdesc['name'] in STRUCT_DICT: + raise Exception("PREDEF=1 but can't find definition") + + structdesc = STRUCT_DICT[structdesc['name']] + + return structdesc + + +def _read_tagdesc(f): + '''Function to read in a tag descriptor''' + + tagdesc = {'offset': _read_long(f)} + + if tagdesc['offset'] == -1: + tagdesc['offset'] = _read_uint64(f) + + tagdesc['typecode'] = _read_long(f) + tagflags = _read_long(f) + + tagdesc['array'] = tagflags & 4 == 4 + tagdesc['structure'] = tagflags & 32 == 32 + tagdesc['scalar'] = tagdesc['typecode'] in DTYPE_DICT + # Assume '10'x is scalar + + return tagdesc + + +def _replace_heap(variable, heap): + + if isinstance(variable, Pointer): + + while isinstance(variable, Pointer): + + if variable.index == 0: + variable = None + else: + if variable.index in heap: + variable = heap[variable.index] + else: + warnings.warn("Variable referenced by pointer not found " + "in heap: variable will be set to None") + variable = None + + replace, new = _replace_heap(variable, heap) + + if replace: + variable = new + + return True, variable + + elif isinstance(variable, np.core.records.recarray): + + # Loop over records + for ir, record in enumerate(variable): + + replace, new = _replace_heap(record, heap) + + if replace: + variable[ir] = new + + return False, variable + + elif isinstance(variable, np.core.records.record): + + # Loop over values + for iv, value in enumerate(variable): + + replace, new = _replace_heap(value, heap) + + if replace: + variable[iv] = new + + return False, variable + + elif isinstance(variable, np.ndarray): + + # Loop over values if type is np.object_ + if variable.dtype.type is np.object_: + + for iv in range(variable.size): + + replace, new = _replace_heap(variable.item(iv), heap) + + if replace: + variable.itemset(iv, new) + + return False, variable + + else: + + return False, variable + + +class AttrDict(dict): + ''' + A case-insensitive dictionary with access via item, attribute, and call + notations: + + >>> d = AttrDict() + >>> d['Variable'] = 123 + >>> d['Variable'] + 123 + >>> d.Variable + 123 + >>> d.variable + 123 + >>> d('VARIABLE') + 123 + ''' + + def __init__(self, init={}): + dict.__init__(self, init) + + def __getitem__(self, name): + return super(AttrDict, self).__getitem__(name.lower()) + + def __setitem__(self, key, value): + return super(AttrDict, self).__setitem__(key.lower(), value) + + __getattr__ = __getitem__ + __setattr__ = __setitem__ + __call__ = __getitem__ + + +def readsav(file_name, idict=None, python_dict=False, + uncompressed_file_name=None, verbose=False): + """ + Read an IDL .sav file. + + Parameters + ---------- + file_name : str + Name of the IDL save file. + idict : dict, optional + Dictionary in which to insert .sav file variables. + python_dict : bool, optional + By default, the object return is not a Python dictionary, but a + case-insensitive dictionary with item, attribute, and call access + to variables. To get a standard Python dictionary, set this option + to True. + uncompressed_file_name : str, optional + This option only has an effect for .sav files written with the + /compress option. If a file name is specified, compressed .sav + files are uncompressed to this file. Otherwise, readsav will use + the `tempfile` module to determine a temporary filename + automatically, and will remove the temporary file upon successfully + reading it in. + verbose : bool, optional + Whether to print out information about the save file, including + the records read, and available variables. + + Returns + ------- + idl_dict : AttrDict or dict + If `python_dict` is set to False (default), this function returns a + case-insensitive dictionary with item, attribute, and call access + to variables. If `python_dict` is set to True, this function + returns a Python dictionary with all variable names in lowercase. + If `idict` was specified, then variables are written to the + dictionary specified, and the updated dictionary is returned. + + """ + + # Initialize record and variable holders + records = [] + if python_dict or idict: + variables = {} + else: + variables = AttrDict() + + # Open the IDL file + f = open(file_name, 'rb') + + # Read the signature, which should be 'SR' + signature = _read_bytes(f, 2) + if signature != b'SR': + raise Exception("Invalid SIGNATURE: %s" % signature) + + # Next, the record format, which is '\x00\x04' for normal .sav + # files, and '\x00\x06' for compressed .sav files. + recfmt = _read_bytes(f, 2) + + if recfmt == b'\x00\x04': + pass + + elif recfmt == b'\x00\x06': + + if verbose: + print("IDL Save file is compressed") + + if uncompressed_file_name: + fout = open(uncompressed_file_name, 'w+b') + else: + fout = tempfile.NamedTemporaryFile(suffix='.sav') + + if verbose: + print(" -> expanding to %s" % fout.name) + + # Write header + fout.write(b'SR\x00\x04') + + # Cycle through records + while True: + + # Read record type + rectype = _read_long(f) + fout.write(struct.pack('>l', int(rectype))) + + # Read position of next record and return as int + nextrec = _read_uint32(f) + nextrec += _read_uint32(f) * 2**32 + + # Read the unknown 4 bytes + unknown = f.read(4) + + # Check if the end of the file has been reached + if RECTYPE_DICT[rectype] == 'END_MARKER': + fout.write(struct.pack('>I', int(nextrec) % 2**32)) + fout.write(struct.pack('>I', int((nextrec - (nextrec % 2**32)) / 2**32))) + fout.write(unknown) + break + + # Find current position + pos = f.tell() + + # Decompress record + rec_string = zlib.decompress(f.read(nextrec-pos)) + + # Find new position of next record + nextrec = fout.tell() + len(rec_string) + 12 + + # Write out record + fout.write(struct.pack('>I', int(nextrec % 2**32))) + fout.write(struct.pack('>I', int((nextrec - (nextrec % 2**32)) / 2**32))) + fout.write(unknown) + fout.write(rec_string) + + # Close the original compressed file + f.close() + + # Set f to be the decompressed file, and skip the first four bytes + f = fout + f.seek(4) + + else: + raise Exception("Invalid RECFMT: %s" % recfmt) + + # Loop through records, and add them to the list + while True: + r = _read_record(f) + records.append(r) + if 'end' in r: + if r['end']: + break + + # Close the file + f.close() + + # Find heap data variables + heap = {} + for r in records: + if r['rectype'] == "HEAP_DATA": + heap[r['heap_index']] = r['data'] + + # Find all variables + for r in records: + if r['rectype'] == "VARIABLE": + replace, new = _replace_heap(r['data'], heap) + if replace: + r['data'] = new + variables[r['varname'].lower()] = r['data'] + + if verbose: + + # Print out timestamp info about the file + for record in records: + if record['rectype'] == "TIMESTAMP": + print("-"*50) + print("Date: %s" % record['date']) + print("User: %s" % record['user']) + print("Host: %s" % record['host']) + break + + # Print out version info about the file + for record in records: + if record['rectype'] == "VERSION": + print("-"*50) + print("Format: %s" % record['format']) + print("Architecture: %s" % record['arch']) + print("Operating System: %s" % record['os']) + print("IDL Version: %s" % record['release']) + break + + # Print out identification info about the file + for record in records: + if record['rectype'] == "IDENTIFICATON": + print("-"*50) + print("Author: %s" % record['author']) + print("Title: %s" % record['title']) + print("ID Code: %s" % record['idcode']) + break + + # Print out descriptions saved with the file + for record in records: + if record['rectype'] == "DESCRIPTION": + print("-"*50) + print("Description: %s" % record['description']) + break + + print("-"*50) + print("Successfully read %i records of which:" % + (len(records))) + + # Create convenience list of record types + rectypes = [r['rectype'] for r in records] + + for rt in set(rectypes): + if rt != 'END_MARKER': + print(" - %i are of type %s" % (rectypes.count(rt), rt)) + print("-"*50) + + if 'VARIABLE' in rectypes: + print("Available variables:") + for var in variables: + print(" - %s [%s]" % (var, type(variables[var]))) + print("-"*50) + + if idict: + for var in variables: + idict[var] = variables[var] + return idict + else: + return variables diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/idl.pyc b/project/venv/lib/python2.7/site-packages/scipy/io/idl.pyc new file mode 100644 index 0000000..1b7abdf Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/idl.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/__init__.py new file mode 100644 index 0000000..4ce8bd7 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/__init__.py @@ -0,0 +1,20 @@ +""" +Utilities for dealing with MATLAB(R) files + +Notes +----- +MATLAB(R) is a registered trademark of The MathWorks, Inc., 3 Apple Hill +Drive, Natick, MA 01760-2098, USA. + +""" +from __future__ import division, print_function, absolute_import + +# Matlab file read and write utilities +from .mio import loadmat, savemat, whosmat +from . import byteordercodes + +__all__ = ['loadmat', 'savemat', 'whosmat', 'byteordercodes'] + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/__init__.pyc new file mode 100644 index 0000000..e8dda39 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/byteordercodes.py b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/byteordercodes.py new file mode 100644 index 0000000..211a203 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/byteordercodes.py @@ -0,0 +1,70 @@ +''' Byteorder utilities for system - numpy byteorder encoding + +Converts a variety of string codes for little endian, big endian, +native byte order and swapped byte order to explicit numpy endian +codes - one of '<' (little endian) or '>' (big endian) + +''' +from __future__ import division, print_function, absolute_import + +import sys + +sys_is_le = sys.byteorder == 'little' +native_code = sys_is_le and '<' or '>' +swapped_code = sys_is_le and '>' or '<' + +aliases = {'little': ('little', '<', 'l', 'le'), + 'big': ('big', '>', 'b', 'be'), + 'native': ('native', '='), + 'swapped': ('swapped', 'S')} + + +def to_numpy_code(code): + """ + Convert various order codings to numpy format. + + Parameters + ---------- + code : str + The code to convert. It is converted to lower case before parsing. + Legal values are: + 'little', 'big', 'l', 'b', 'le', 'be', '<', '>', 'native', '=', + 'swapped', 's'. + + Returns + ------- + out_code : {'<', '>'} + Here '<' is the numpy dtype code for little endian, + and '>' is the code for big endian. + + Examples + -------- + >>> import sys + >>> sys_is_le == (sys.byteorder == 'little') + True + >>> to_numpy_code('big') + '>' + >>> to_numpy_code('little') + '<' + >>> nc = to_numpy_code('native') + >>> nc == '<' if sys_is_le else nc == '>' + True + >>> sc = to_numpy_code('swapped') + >>> sc == '>' if sys_is_le else sc == '<' + True + + """ + code = code.lower() + if code is None: + return native_code + if code in aliases['little']: + return '<' + elif code in aliases['big']: + return '>' + elif code in aliases['native']: + return native_code + elif code in aliases['swapped']: + return swapped_code + else: + raise ValueError( + 'We cannot handle byte order %s' % code) diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/byteordercodes.pyc b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/byteordercodes.pyc new file mode 100644 index 0000000..5907225 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/byteordercodes.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/mio.py b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/mio.py new file mode 100644 index 0000000..765c395 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/mio.py @@ -0,0 +1,326 @@ +""" +Module for reading and writing matlab (TM) .mat files +""" +# Authors: Travis Oliphant, Matthew Brett + +from __future__ import division, print_function, absolute_import + +from scipy._lib.six import string_types + +from .miobase import get_matfile_version, docfiller +from .mio4 import MatFile4Reader, MatFile4Writer +from .mio5 import MatFile5Reader, MatFile5Writer + +__all__ = ['mat_reader_factory', 'loadmat', 'savemat', 'whosmat'] + + +def _open_file(file_like, appendmat): + """ + Open `file_like` and return as file-like object. First, check if object is + already file-like; if so, return it as-is. Otherwise, try to pass it + to open(). If that fails, and `file_like` is a string, and `appendmat` is true, + append '.mat' and try again. + """ + try: + file_like.read(0) + return file_like, False + except AttributeError: + pass + + try: + return open(file_like, 'rb'), True + except IOError: + # Probably "not found" + if isinstance(file_like, string_types): + if appendmat and not file_like.endswith('.mat'): + file_like += '.mat' + return open(file_like, 'rb'), True + else: + raise IOError('Reader needs file name or open file-like object') + +@docfiller +def mat_reader_factory(file_name, appendmat=True, **kwargs): + """ + Create reader for matlab .mat format files. + + Parameters + ---------- + %(file_arg)s + %(append_arg)s + %(load_args)s + %(struct_arg)s + + Returns + ------- + matreader : MatFileReader object + Initialized instance of MatFileReader class matching the mat file + type detected in `filename`. + file_opened : bool + Whether the file was opened by this routine. + + """ + byte_stream, file_opened = _open_file(file_name, appendmat) + mjv, mnv = get_matfile_version(byte_stream) + if mjv == 0: + return MatFile4Reader(byte_stream, **kwargs), file_opened + elif mjv == 1: + return MatFile5Reader(byte_stream, **kwargs), file_opened + elif mjv == 2: + raise NotImplementedError('Please use HDF reader for matlab v7.3 files') + else: + raise TypeError('Did not recognize version %s' % mjv) + + +@docfiller +def loadmat(file_name, mdict=None, appendmat=True, **kwargs): + """ + Load MATLAB file. + + Parameters + ---------- + file_name : str + Name of the mat file (do not need .mat extension if + appendmat==True). Can also pass open file-like object. + mdict : dict, optional + Dictionary in which to insert matfile variables. + appendmat : bool, optional + True to append the .mat extension to the end of the given + filename, if not already present. + byte_order : str or None, optional + None by default, implying byte order guessed from mat + file. Otherwise can be one of ('native', '=', 'little', '<', + 'BIG', '>'). + mat_dtype : bool, optional + If True, return arrays in same dtype as would be loaded into + MATLAB (instead of the dtype with which they are saved). + squeeze_me : bool, optional + Whether to squeeze unit matrix dimensions or not. + chars_as_strings : bool, optional + Whether to convert char arrays to string arrays. + matlab_compatible : bool, optional + Returns matrices as would be loaded by MATLAB (implies + squeeze_me=False, chars_as_strings=False, mat_dtype=True, + struct_as_record=True). + struct_as_record : bool, optional + Whether to load MATLAB structs as numpy record arrays, or as + old-style numpy arrays with dtype=object. Setting this flag to + False replicates the behavior of scipy version 0.7.x (returning + numpy object arrays). The default setting is True, because it + allows easier round-trip load and save of MATLAB files. + verify_compressed_data_integrity : bool, optional + Whether the length of compressed sequences in the MATLAB file + should be checked, to ensure that they are not longer than we expect. + It is advisable to enable this (the default) because overlong + compressed sequences in MATLAB files generally indicate that the + files have experienced some sort of corruption. + variable_names : None or sequence + If None (the default) - read all variables in file. Otherwise + `variable_names` should be a sequence of strings, giving names of the + MATLAB variables to read from the file. The reader will skip any + variable with a name not in this sequence, possibly saving some read + processing. + + Returns + ------- + mat_dict : dict + dictionary with variable names as keys, and loaded matrices as + values. + + Notes + ----- + v4 (Level 1.0), v6 and v7 to 7.2 matfiles are supported. + + You will need an HDF5 python library to read MATLAB 7.3 format mat + files. Because scipy does not supply one, we do not implement the + HDF5 / 7.3 interface here. + + Examples + -------- + >>> from os.path import dirname, join as pjoin + >>> import scipy.io as sio + + Get the filename for an example .mat file from the tests/data directory. + + >>> data_dir = pjoin(dirname(sio.__file__), 'matlab', 'tests', 'data') + >>> mat_fname = pjoin(data_dir, 'testdouble_7.4_GLNX86.mat') + + Load the .mat file contents. + + >>> mat_contents = sio.loadmat(mat_fname) + + The result is a dictionary, one key/value pair for each variable: + + >>> sorted(mat_contents.keys()) + ['__globals__', '__header__', '__version__', 'testdouble'] + >>> mat_contents['testdouble'] + array([[0. , 0.78539816, 1.57079633, 2.35619449, 3.14159265, + 3.92699082, 4.71238898, 5.49778714, 6.28318531]]) + + By default SciPy reads MATLAB structs as structured NumPy arrays where the + dtype fields are of type `object` and the names correspond to the MATLAB + struct field names. This can be disabled by setting the optional argument + `struct_as_record=False`. + + Get the filename for an example .mat file that contains a MATLAB struct + called `teststruct` and load the contents. + + >>> matstruct_fname = pjoin(data_dir, 'teststruct_7.4_GLNX86.mat') + >>> matstruct_contents = sio.loadmat(matstruct_fname) + >>> teststruct = matstruct_contents['teststruct'] + >>> teststruct.dtype + dtype([('stringfield', 'O'), ('doublefield', 'O'), ('complexfield', 'O')]) + + The size of the structured array is the size of the MATLAB struct, not the + number of elements in any particular field. The shape defaults to 2-D + unless the optional argument `squeeze_me=True`, in which case all length 1 + dimensions are removed. + + >>> teststruct.size + 1 + >>> teststruct.shape + (1, 1) + + Get the 'stringfield' of the first element in the MATLAB struct. + + >>> teststruct[0, 0]['stringfield'] + array(['Rats live on no evil star.'], + dtype='<U26') + + Get the first element of the 'doublefield'. + + >>> teststruct['doublefield'][0, 0] + array([[ 1.41421356, 2.71828183, 3.14159265]]) + + Load the MATLAB struct, squeezing out length 1 dimensions, and get the item + from the 'complexfield'. + + >>> matstruct_squeezed = sio.loadmat(matstruct_fname, squeeze_me=True) + >>> matstruct_squeezed['teststruct'].shape + () + >>> matstruct_squeezed['teststruct']['complexfield'].shape + () + >>> matstruct_squeezed['teststruct']['complexfield'].item() + array([ 1.41421356+1.41421356j, 2.71828183+2.71828183j, + 3.14159265+3.14159265j]) + """ + variable_names = kwargs.pop('variable_names', None) + MR, file_opened = mat_reader_factory(file_name, appendmat, **kwargs) + matfile_dict = MR.get_variables(variable_names) + if mdict is not None: + mdict.update(matfile_dict) + else: + mdict = matfile_dict + if file_opened: + MR.mat_stream.close() + return mdict + + +@docfiller +def savemat(file_name, mdict, + appendmat=True, + format='5', + long_field_names=False, + do_compression=False, + oned_as='row'): + """ + Save a dictionary of names and arrays into a MATLAB-style .mat file. + + This saves the array objects in the given dictionary to a MATLAB- + style .mat file. + + Parameters + ---------- + file_name : str or file-like object + Name of the .mat file (.mat extension not needed if ``appendmat == + True``). + Can also pass open file_like object. + mdict : dict + Dictionary from which to save matfile variables. + appendmat : bool, optional + True (the default) to append the .mat extension to the end of the + given filename, if not already present. + format : {'5', '4'}, string, optional + '5' (the default) for MATLAB 5 and up (to 7.2), + '4' for MATLAB 4 .mat files. + long_field_names : bool, optional + False (the default) - maximum field name length in a structure is + 31 characters which is the documented maximum length. + True - maximum field name length in a structure is 63 characters + which works for MATLAB 7.6+. + do_compression : bool, optional + Whether or not to compress matrices on write. Default is False. + oned_as : {'row', 'column'}, optional + If 'column', write 1-D numpy arrays as column vectors. + If 'row', write 1-D numpy arrays as row vectors. + + See also + -------- + mio4.MatFile4Writer + mio5.MatFile5Writer + """ + file_opened = False + if hasattr(file_name, 'write'): + # File-like object already; use as-is + file_stream = file_name + else: + if isinstance(file_name, string_types): + if appendmat and not file_name.endswith('.mat'): + file_name = file_name + ".mat" + + file_stream = open(file_name, 'wb') + file_opened = True + + if format == '4': + if long_field_names: + raise ValueError("Long field names are not available for version 4 files") + MW = MatFile4Writer(file_stream, oned_as) + elif format == '5': + MW = MatFile5Writer(file_stream, + do_compression=do_compression, + unicode_strings=True, + long_field_names=long_field_names, + oned_as=oned_as) + else: + raise ValueError("Format should be '4' or '5'") + MW.put_variables(mdict) + if file_opened: + file_stream.close() + + +@docfiller +def whosmat(file_name, appendmat=True, **kwargs): + """ + List variables inside a MATLAB file. + + Parameters + ---------- + %(file_arg)s + %(append_arg)s + %(load_args)s + %(struct_arg)s + + Returns + ------- + variables : list of tuples + A list of tuples, where each tuple holds the matrix name (a string), + its shape (tuple of ints), and its data class (a string). + Possible data classes are: int8, uint8, int16, uint16, int32, uint32, + int64, uint64, single, double, cell, struct, object, char, sparse, + function, opaque, logical, unknown. + + Notes + ----- + v4 (Level 1.0), v6 and v7 to 7.2 matfiles are supported. + + You will need an HDF5 python library to read matlab 7.3 format mat + files. Because scipy does not supply one, we do not implement the + HDF5 / 7.3 interface here. + + .. versionadded:: 0.12.0 + + """ + ML, file_opened = mat_reader_factory(file_name, **kwargs) + variables = ML.list_variables() + if file_opened: + ML.mat_stream.close() + return variables diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/mio.pyc b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/mio.pyc new file mode 100644 index 0000000..e69327a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/mio.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/mio4.py b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/mio4.py new file mode 100644 index 0000000..bf83bf7 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/mio4.py @@ -0,0 +1,618 @@ +''' Classes for read / write of matlab (TM) 4 files +''' +from __future__ import division, print_function, absolute_import + +import sys +import warnings + +import numpy as np +from numpy.compat import asbytes, asstr + +import scipy.sparse + +from scipy._lib.six import string_types + +from .miobase import (MatFileReader, docfiller, matdims, read_dtype, + convert_dtypes, arr_to_chars, arr_dtype_number) + +from .mio_utils import squeeze_element, chars_to_strings +from functools import reduce + + +SYS_LITTLE_ENDIAN = sys.byteorder == 'little' + +miDOUBLE = 0 +miSINGLE = 1 +miINT32 = 2 +miINT16 = 3 +miUINT16 = 4 +miUINT8 = 5 + +mdtypes_template = { + miDOUBLE: 'f8', + miSINGLE: 'f4', + miINT32: 'i4', + miINT16: 'i2', + miUINT16: 'u2', + miUINT8: 'u1', + 'header': [('mopt', 'i4'), + ('mrows', 'i4'), + ('ncols', 'i4'), + ('imagf', 'i4'), + ('namlen', 'i4')], + 'U1': 'U1', + } + +np_to_mtypes = { + 'f8': miDOUBLE, + 'c32': miDOUBLE, + 'c24': miDOUBLE, + 'c16': miDOUBLE, + 'f4': miSINGLE, + 'c8': miSINGLE, + 'i4': miINT32, + 'i2': miINT16, + 'u2': miUINT16, + 'u1': miUINT8, + 'S1': miUINT8, + } + +# matrix classes +mxFULL_CLASS = 0 +mxCHAR_CLASS = 1 +mxSPARSE_CLASS = 2 + +order_codes = { + 0: '<', + 1: '>', + 2: 'VAX D-float', # ! + 3: 'VAX G-float', + 4: 'Cray', # !! + } + +mclass_info = { + mxFULL_CLASS: 'double', + mxCHAR_CLASS: 'char', + mxSPARSE_CLASS: 'sparse', + } + + +class VarHeader4(object): + # Mat4 variables never logical or global + is_logical = False + is_global = False + + def __init__(self, + name, + dtype, + mclass, + dims, + is_complex): + self.name = name + self.dtype = dtype + self.mclass = mclass + self.dims = dims + self.is_complex = is_complex + + +class VarReader4(object): + ''' Class to read matlab 4 variables ''' + + def __init__(self, file_reader): + self.file_reader = file_reader + self.mat_stream = file_reader.mat_stream + self.dtypes = file_reader.dtypes + self.chars_as_strings = file_reader.chars_as_strings + self.squeeze_me = file_reader.squeeze_me + + def read_header(self): + ''' Read and return header for variable ''' + data = read_dtype(self.mat_stream, self.dtypes['header']) + name = self.mat_stream.read(int(data['namlen'])).strip(b'\x00') + if data['mopt'] < 0 or data['mopt'] > 5000: + raise ValueError('Mat 4 mopt wrong format, byteswapping problem?') + M, rest = divmod(data['mopt'], 1000) # order code + if M not in (0, 1): + warnings.warn("We do not support byte ordering '%s'; returned " + "data may be corrupt" % order_codes[M], + UserWarning) + O, rest = divmod(rest, 100) # unused, should be 0 + if O != 0: + raise ValueError('O in MOPT integer should be 0, wrong format?') + P, rest = divmod(rest, 10) # data type code e.g miDOUBLE (see above) + T = rest # matrix type code e.g. mxFULL_CLASS (see above) + dims = (data['mrows'], data['ncols']) + is_complex = data['imagf'] == 1 + dtype = self.dtypes[P] + return VarHeader4( + name, + dtype, + T, + dims, + is_complex) + + def array_from_header(self, hdr, process=True): + mclass = hdr.mclass + if mclass == mxFULL_CLASS: + arr = self.read_full_array(hdr) + elif mclass == mxCHAR_CLASS: + arr = self.read_char_array(hdr) + if process and self.chars_as_strings: + arr = chars_to_strings(arr) + elif mclass == mxSPARSE_CLASS: + # no current processing (below) makes sense for sparse + return self.read_sparse_array(hdr) + else: + raise TypeError('No reader for class code %s' % mclass) + if process and self.squeeze_me: + return squeeze_element(arr) + return arr + + def read_sub_array(self, hdr, copy=True): + ''' Mat4 read using header `hdr` dtype and dims + + Parameters + ---------- + hdr : object + object with attributes ``dtype``, ``dims``. dtype is assumed to be + the correct endianness + copy : bool, optional + copies array before return if True (default True) + (buffer is usually read only) + + Returns + ------- + arr : ndarray + of dtype givem by `hdr` ``dtype`` and shape givem by `hdr` ``dims`` + ''' + dt = hdr.dtype + dims = hdr.dims + num_bytes = dt.itemsize + for d in dims: + num_bytes *= d + buffer = self.mat_stream.read(int(num_bytes)) + if len(buffer) != num_bytes: + raise ValueError("Not enough bytes to read matrix '%s'; is this " + "a badly-formed file? Consider listing matrices " + "with `whosmat` and loading named matrices with " + "`variable_names` kwarg to `loadmat`" % hdr.name) + arr = np.ndarray(shape=dims, + dtype=dt, + buffer=buffer, + order='F') + if copy: + arr = arr.copy() + return arr + + def read_full_array(self, hdr): + ''' Full (rather than sparse) matrix getter + + Read matrix (array) can be real or complex + + Parameters + ---------- + hdr : ``VarHeader4`` instance + + Returns + ------- + arr : ndarray + complex array if ``hdr.is_complex`` is True, otherwise a real + numeric array + ''' + if hdr.is_complex: + # avoid array copy to save memory + res = self.read_sub_array(hdr, copy=False) + res_j = self.read_sub_array(hdr, copy=False) + return res + (res_j * 1j) + return self.read_sub_array(hdr) + + def read_char_array(self, hdr): + ''' latin-1 text matrix (char matrix) reader + + Parameters + ---------- + hdr : ``VarHeader4`` instance + + Returns + ------- + arr : ndarray + with dtype 'U1', shape given by `hdr` ``dims`` + ''' + arr = self.read_sub_array(hdr).astype(np.uint8) + S = arr.tostring().decode('latin-1') + return np.ndarray(shape=hdr.dims, + dtype=np.dtype('U1'), + buffer=np.array(S)).copy() + + def read_sparse_array(self, hdr): + ''' Read and return sparse matrix type + + Parameters + ---------- + hdr : ``VarHeader4`` instance + + Returns + ------- + arr : ``scipy.sparse.coo_matrix`` + with dtype ``float`` and shape read from the sparse matrix data + + Notes + ----- + MATLAB 4 real sparse arrays are saved in a N+1 by 3 array format, where + N is the number of non-zero values. Column 1 values [0:N] are the + (1-based) row indices of the each non-zero value, column 2 [0:N] are the + column indices, column 3 [0:N] are the (real) values. The last values + [-1,0:2] of the rows, column indices are shape[0] and shape[1] + respectively of the output matrix. The last value for the values column + is a padding 0. mrows and ncols values from the header give the shape of + the stored matrix, here [N+1, 3]. Complex data is saved as a 4 column + matrix, where the fourth column contains the imaginary component; the + last value is again 0. Complex sparse data do *not* have the header + ``imagf`` field set to True; the fact that the data are complex is only + detectable because there are 4 storage columns + ''' + res = self.read_sub_array(hdr) + tmp = res[:-1,:] + # All numbers are float64 in Matlab, but Scipy sparse expects int shape + dims = (int(res[-1,0]), int(res[-1,1])) + I = np.ascontiguousarray(tmp[:,0],dtype='intc') # fixes byte order also + J = np.ascontiguousarray(tmp[:,1],dtype='intc') + I -= 1 # for 1-based indexing + J -= 1 + if res.shape[1] == 3: + V = np.ascontiguousarray(tmp[:,2],dtype='float') + else: + V = np.ascontiguousarray(tmp[:,2],dtype='complex') + V.imag = tmp[:,3] + return scipy.sparse.coo_matrix((V,(I,J)), dims) + + def shape_from_header(self, hdr): + '''Read the shape of the array described by the header. + The file position after this call is unspecified. + ''' + mclass = hdr.mclass + if mclass == mxFULL_CLASS: + shape = tuple(map(int, hdr.dims)) + elif mclass == mxCHAR_CLASS: + shape = tuple(map(int, hdr.dims)) + if self.chars_as_strings: + shape = shape[:-1] + elif mclass == mxSPARSE_CLASS: + dt = hdr.dtype + dims = hdr.dims + + if not (len(dims) == 2 and dims[0] >= 1 and dims[1] >= 1): + return () + + # Read only the row and column counts + self.mat_stream.seek(dt.itemsize * (dims[0] - 1), 1) + rows = np.ndarray(shape=(1,), dtype=dt, + buffer=self.mat_stream.read(dt.itemsize)) + self.mat_stream.seek(dt.itemsize * (dims[0] - 1), 1) + cols = np.ndarray(shape=(1,), dtype=dt, + buffer=self.mat_stream.read(dt.itemsize)) + + shape = (int(rows), int(cols)) + else: + raise TypeError('No reader for class code %s' % mclass) + + if self.squeeze_me: + shape = tuple([x for x in shape if x != 1]) + return shape + + +class MatFile4Reader(MatFileReader): + ''' Reader for Mat4 files ''' + @docfiller + def __init__(self, mat_stream, *args, **kwargs): + ''' Initialize matlab 4 file reader + + %(matstream_arg)s + %(load_args)s + ''' + super(MatFile4Reader, self).__init__(mat_stream, *args, **kwargs) + self._matrix_reader = None + + def guess_byte_order(self): + self.mat_stream.seek(0) + mopt = read_dtype(self.mat_stream, np.dtype('i4')) + self.mat_stream.seek(0) + if mopt == 0: + return '<' + if mopt < 0 or mopt > 5000: + # Number must have been byteswapped + return SYS_LITTLE_ENDIAN and '>' or '<' + # Not byteswapped + return SYS_LITTLE_ENDIAN and '<' or '>' + + def initialize_read(self): + ''' Run when beginning read of variables + + Sets up readers from parameters in `self` + ''' + self.dtypes = convert_dtypes(mdtypes_template, self.byte_order) + self._matrix_reader = VarReader4(self) + + def read_var_header(self): + ''' Read and return header, next position + + Parameters + ---------- + None + + Returns + ------- + header : object + object that can be passed to self.read_var_array, and that + has attributes ``name`` and ``is_global`` + next_position : int + position in stream of next variable + ''' + hdr = self._matrix_reader.read_header() + n = reduce(lambda x, y: x*y, hdr.dims, 1) # fast product + remaining_bytes = hdr.dtype.itemsize * n + if hdr.is_complex and not hdr.mclass == mxSPARSE_CLASS: + remaining_bytes *= 2 + next_position = self.mat_stream.tell() + remaining_bytes + return hdr, next_position + + def read_var_array(self, header, process=True): + ''' Read array, given `header` + + Parameters + ---------- + header : header object + object with fields defining variable header + process : {True, False}, optional + If True, apply recursive post-processing during loading of array. + + Returns + ------- + arr : array + array with post-processing applied or not according to + `process`. + ''' + return self._matrix_reader.array_from_header(header, process) + + def get_variables(self, variable_names=None): + ''' get variables from stream as dictionary + + Parameters + ---------- + variable_names : None or str or sequence of str, optional + variable name, or sequence of variable names to get from Mat file / + file stream. If None, then get all variables in file + ''' + if isinstance(variable_names, string_types): + variable_names = [variable_names] + elif variable_names is not None: + variable_names = list(variable_names) + self.mat_stream.seek(0) + # set up variable reader + self.initialize_read() + mdict = {} + while not self.end_of_stream(): + hdr, next_position = self.read_var_header() + name = asstr(hdr.name) + if variable_names is not None and name not in variable_names: + self.mat_stream.seek(next_position) + continue + mdict[name] = self.read_var_array(hdr) + self.mat_stream.seek(next_position) + if variable_names is not None: + variable_names.remove(name) + if len(variable_names) == 0: + break + return mdict + + def list_variables(self): + ''' list variables from stream ''' + self.mat_stream.seek(0) + # set up variable reader + self.initialize_read() + vars = [] + while not self.end_of_stream(): + hdr, next_position = self.read_var_header() + name = asstr(hdr.name) + shape = self._matrix_reader.shape_from_header(hdr) + info = mclass_info.get(hdr.mclass, 'unknown') + vars.append((name, shape, info)) + + self.mat_stream.seek(next_position) + return vars + + +def arr_to_2d(arr, oned_as='row'): + ''' Make ``arr`` exactly two dimensional + + If `arr` has more than 2 dimensions, raise a ValueError + + Parameters + ---------- + arr : array + oned_as : {'row', 'column'}, optional + Whether to reshape 1D vectors as row vectors or column vectors. + See documentation for ``matdims`` for more detail + + Returns + ------- + arr2d : array + 2D version of the array + ''' + dims = matdims(arr, oned_as) + if len(dims) > 2: + raise ValueError('Matlab 4 files cannot save arrays with more than ' + '2 dimensions') + return arr.reshape(dims) + + +class VarWriter4(object): + def __init__(self, file_writer): + self.file_stream = file_writer.file_stream + self.oned_as = file_writer.oned_as + + def write_bytes(self, arr): + self.file_stream.write(arr.tostring(order='F')) + + def write_string(self, s): + self.file_stream.write(s) + + def write_header(self, name, shape, P=miDOUBLE, T=mxFULL_CLASS, imagf=0): + ''' Write header for given data options + + Parameters + ---------- + name : str + name of variable + shape : sequence + Shape of array as it will be read in matlab + P : int, optional + code for mat4 data type, one of ``miDOUBLE, miSINGLE, miINT32, + miINT16, miUINT16, miUINT8`` + T : int, optional + code for mat4 matrix class, one of ``mxFULL_CLASS, mxCHAR_CLASS, + mxSPARSE_CLASS`` + imagf : int, optional + flag indicating complex + ''' + header = np.empty((), mdtypes_template['header']) + M = not SYS_LITTLE_ENDIAN + O = 0 + header['mopt'] = (M * 1000 + + O * 100 + + P * 10 + + T) + header['mrows'] = shape[0] + header['ncols'] = shape[1] + header['imagf'] = imagf + header['namlen'] = len(name) + 1 + self.write_bytes(header) + self.write_string(asbytes(name + '\0')) + + def write(self, arr, name): + ''' Write matrix `arr`, with name `name` + + Parameters + ---------- + arr : array_like + array to write + name : str + name in matlab workspace + ''' + # we need to catch sparse first, because np.asarray returns an + # an object array for scipy.sparse + if scipy.sparse.issparse(arr): + self.write_sparse(arr, name) + return + arr = np.asarray(arr) + dt = arr.dtype + if not dt.isnative: + arr = arr.astype(dt.newbyteorder('=')) + dtt = dt.type + if dtt is np.object_: + raise TypeError('Cannot save object arrays in Mat4') + elif dtt is np.void: + raise TypeError('Cannot save void type arrays') + elif dtt in (np.unicode_, np.string_): + self.write_char(arr, name) + return + self.write_numeric(arr, name) + + def write_numeric(self, arr, name): + arr = arr_to_2d(arr, self.oned_as) + imagf = arr.dtype.kind == 'c' + try: + P = np_to_mtypes[arr.dtype.str[1:]] + except KeyError: + if imagf: + arr = arr.astype('c128') + else: + arr = arr.astype('f8') + P = miDOUBLE + self.write_header(name, + arr.shape, + P=P, + T=mxFULL_CLASS, + imagf=imagf) + if imagf: + self.write_bytes(arr.real) + self.write_bytes(arr.imag) + else: + self.write_bytes(arr) + + def write_char(self, arr, name): + arr = arr_to_chars(arr) + arr = arr_to_2d(arr, self.oned_as) + dims = arr.shape + self.write_header( + name, + dims, + P=miUINT8, + T=mxCHAR_CLASS) + if arr.dtype.kind == 'U': + # Recode unicode to latin1 + n_chars = np.product(dims) + st_arr = np.ndarray(shape=(), + dtype=arr_dtype_number(arr, n_chars), + buffer=arr) + st = st_arr.item().encode('latin-1') + arr = np.ndarray(shape=dims, dtype='S1', buffer=st) + self.write_bytes(arr) + + def write_sparse(self, arr, name): + ''' Sparse matrices are 2D + + See docstring for VarReader4.read_sparse_array + ''' + A = arr.tocoo() # convert to sparse COO format (ijv) + imagf = A.dtype.kind == 'c' + ijv = np.zeros((A.nnz + 1, 3+imagf), dtype='f8') + ijv[:-1,0] = A.row + ijv[:-1,1] = A.col + ijv[:-1,0:2] += 1 # 1 based indexing + if imagf: + ijv[:-1,2] = A.data.real + ijv[:-1,3] = A.data.imag + else: + ijv[:-1,2] = A.data + ijv[-1,0:2] = A.shape + self.write_header( + name, + ijv.shape, + P=miDOUBLE, + T=mxSPARSE_CLASS) + self.write_bytes(ijv) + + +class MatFile4Writer(object): + ''' Class for writing matlab 4 format files ''' + def __init__(self, file_stream, oned_as=None): + self.file_stream = file_stream + if oned_as is None: + oned_as = 'row' + self.oned_as = oned_as + self._matrix_writer = None + + def put_variables(self, mdict, write_header=None): + ''' Write variables in `mdict` to stream + + Parameters + ---------- + mdict : mapping + mapping with method ``items`` return name, contents pairs + where ``name`` which will appeak in the matlab workspace in + file load, and ``contents`` is something writeable to a + matlab file, such as a numpy array. + write_header : {None, True, False} + If True, then write the matlab file header before writing the + variables. If None (the default) then write the file header + if we are at position 0 in the stream. By setting False + here, and setting the stream position to the end of the file, + you can append variables to a matlab file + ''' + # there is no header for a matlab 4 mat file, so we ignore the + # ``write_header`` input argument. It's there for compatibility + # with the matlab 5 version of this method + self._matrix_writer = VarWriter4(self) + for name, var in mdict.items(): + self._matrix_writer.write(var, name) diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/mio4.pyc b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/mio4.pyc new file mode 100644 index 0000000..b7c4ee1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/mio4.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/mio5.py b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/mio5.py new file mode 100644 index 0000000..af9e17b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/mio5.py @@ -0,0 +1,849 @@ +''' Classes for read / write of matlab (TM) 5 files + +The matfile specification last found here: + +https://www.mathworks.com/access/helpdesk/help/pdf_doc/matlab/matfile_format.pdf + +(as of December 5 2008) +''' +from __future__ import division, print_function, absolute_import + +''' +================================= + Note on functions and mat files +================================= + +The document above does not give any hints as to the storage of matlab +function handles, or anonymous function handles. I had therefore to +guess the format of matlab arrays of ``mxFUNCTION_CLASS`` and +``mxOPAQUE_CLASS`` by looking at example mat files. + +``mxFUNCTION_CLASS`` stores all types of matlab functions. It seems to +contain a struct matrix with a set pattern of fields. For anonymous +functions, a sub-fields of one of these fields seems to contain the +well-named ``mxOPAQUE_CLASS``. This seems to contain: + +* array flags as for any matlab matrix +* 3 int8 strings +* a matrix + +It seems that, whenever the mat file contains a ``mxOPAQUE_CLASS`` +instance, there is also an un-named matrix (name == '') at the end of +the mat file. I'll call this the ``__function_workspace__`` matrix. + +When I saved two anonymous functions in a mat file, or appended another +anonymous function to the mat file, there was still only one +``__function_workspace__`` un-named matrix at the end, but larger than +that for a mat file with a single anonymous function, suggesting that +the workspaces for the two functions had been merged. + +The ``__function_workspace__`` matrix appears to be of double class +(``mxCLASS_DOUBLE``), but stored as uint8, the memory for which is in +the format of a mini .mat file, without the first 124 bytes of the file +header (the description and the subsystem_offset), but with the version +U2 bytes, and the S2 endian test bytes. There follow 4 zero bytes, +presumably for 8 byte padding, and then a series of ``miMATRIX`` +entries, as in a standard mat file. The ``miMATRIX`` entries appear to +be series of un-named (name == '') matrices, and may also contain arrays +of this same mini-mat format. + +I guess that: + +* saving an anonymous function back to a mat file will need the + associated ``__function_workspace__`` matrix saved as well for the + anonymous function to work correctly. +* appending to a mat file that has a ``__function_workspace__`` would + involve first pulling off this workspace, appending, checking whether + there were any more anonymous functions appended, and then somehow + merging the relevant workspaces, and saving at the end of the mat + file. + +The mat files I was playing with are in ``tests/data``: + +* sqr.mat +* parabola.mat +* some_functions.mat + +See ``tests/test_mio.py:test_mio_funcs.py`` for a debugging +script I was working with. + +''' + +# Small fragments of current code adapted from matfile.py by Heiko +# Henkelmann + +import os +import time +import sys +import zlib + +from io import BytesIO + +import warnings + +import numpy as np +from numpy.compat import asbytes, asstr + +import scipy.sparse + +from scipy._lib.six import string_types + +from .byteordercodes import native_code, swapped_code + +from .miobase import (MatFileReader, docfiller, matdims, read_dtype, + arr_to_chars, arr_dtype_number, MatWriteError, + MatReadError, MatReadWarning) + +# Reader object for matlab 5 format variables +from .mio5_utils import VarReader5 + +# Constants and helper objects +from .mio5_params import (MatlabObject, MatlabFunction, MDTYPES, NP_TO_MTYPES, + NP_TO_MXTYPES, miCOMPRESSED, miMATRIX, miINT8, + miUTF8, miUINT32, mxCELL_CLASS, mxSTRUCT_CLASS, + mxOBJECT_CLASS, mxCHAR_CLASS, mxSPARSE_CLASS, + mxDOUBLE_CLASS, mclass_info) + +from .streams import ZlibInputStream + + +class MatFile5Reader(MatFileReader): + ''' Reader for Mat 5 mat files + Adds the following attribute to base class + + uint16_codec - char codec to use for uint16 char arrays + (defaults to system default codec) + + Uses variable reader that has the following stardard interface (see + abstract class in ``miobase``:: + + __init__(self, file_reader) + read_header(self) + array_from_header(self) + + and added interface:: + + set_stream(self, stream) + read_full_tag(self) + + ''' + @docfiller + def __init__(self, + mat_stream, + byte_order=None, + mat_dtype=False, + squeeze_me=False, + chars_as_strings=True, + matlab_compatible=False, + struct_as_record=True, + verify_compressed_data_integrity=True, + uint16_codec=None + ): + '''Initializer for matlab 5 file format reader + + %(matstream_arg)s + %(load_args)s + %(struct_arg)s + uint16_codec : {None, string} + Set codec to use for uint16 char arrays (e.g. 'utf-8'). + Use system default codec if None + ''' + super(MatFile5Reader, self).__init__( + mat_stream, + byte_order, + mat_dtype, + squeeze_me, + chars_as_strings, + matlab_compatible, + struct_as_record, + verify_compressed_data_integrity + ) + # Set uint16 codec + if not uint16_codec: + uint16_codec = sys.getdefaultencoding() + self.uint16_codec = uint16_codec + # placeholders for readers - see initialize_read method + self._file_reader = None + self._matrix_reader = None + + def guess_byte_order(self): + ''' Guess byte order. + Sets stream pointer to 0 ''' + self.mat_stream.seek(126) + mi = self.mat_stream.read(2) + self.mat_stream.seek(0) + return mi == b'IM' and '<' or '>' + + def read_file_header(self): + ''' Read in mat 5 file header ''' + hdict = {} + hdr_dtype = MDTYPES[self.byte_order]['dtypes']['file_header'] + hdr = read_dtype(self.mat_stream, hdr_dtype) + hdict['__header__'] = hdr['description'].item().strip(b' \t\n\000') + v_major = hdr['version'] >> 8 + v_minor = hdr['version'] & 0xFF + hdict['__version__'] = '%d.%d' % (v_major, v_minor) + return hdict + + def initialize_read(self): + ''' Run when beginning read of variables + + Sets up readers from parameters in `self` + ''' + # reader for top level stream. We need this extra top-level + # reader because we use the matrix_reader object to contain + # compressed matrices (so they have their own stream) + self._file_reader = VarReader5(self) + # reader for matrix streams + self._matrix_reader = VarReader5(self) + + def read_var_header(self): + ''' Read header, return header, next position + + Header has to define at least .name and .is_global + + Parameters + ---------- + None + + Returns + ------- + header : object + object that can be passed to self.read_var_array, and that + has attributes .name and .is_global + next_position : int + position in stream of next variable + ''' + mdtype, byte_count = self._file_reader.read_full_tag() + if not byte_count > 0: + raise ValueError("Did not read any bytes") + next_pos = self.mat_stream.tell() + byte_count + if mdtype == miCOMPRESSED: + # Make new stream from compressed data + stream = ZlibInputStream(self.mat_stream, byte_count) + self._matrix_reader.set_stream(stream) + check_stream_limit = self.verify_compressed_data_integrity + mdtype, byte_count = self._matrix_reader.read_full_tag() + else: + check_stream_limit = False + self._matrix_reader.set_stream(self.mat_stream) + if not mdtype == miMATRIX: + raise TypeError('Expecting miMATRIX type here, got %d' % mdtype) + header = self._matrix_reader.read_header(check_stream_limit) + return header, next_pos + + def read_var_array(self, header, process=True): + ''' Read array, given `header` + + Parameters + ---------- + header : header object + object with fields defining variable header + process : {True, False} bool, optional + If True, apply recursive post-processing during loading of + array. + + Returns + ------- + arr : array + array with post-processing applied or not according to + `process`. + ''' + return self._matrix_reader.array_from_header(header, process) + + def get_variables(self, variable_names=None): + ''' get variables from stream as dictionary + + variable_names - optional list of variable names to get + + If variable_names is None, then get all variables in file + ''' + if isinstance(variable_names, string_types): + variable_names = [variable_names] + elif variable_names is not None: + variable_names = list(variable_names) + + self.mat_stream.seek(0) + # Here we pass all the parameters in self to the reading objects + self.initialize_read() + mdict = self.read_file_header() + mdict['__globals__'] = [] + while not self.end_of_stream(): + hdr, next_position = self.read_var_header() + name = asstr(hdr.name) + if name in mdict: + warnings.warn('Duplicate variable name "%s" in stream' + ' - replacing previous with new\n' + 'Consider mio5.varmats_from_mat to split ' + 'file into single variable files' % name, + MatReadWarning, stacklevel=2) + if name == '': + # can only be a matlab 7 function workspace + name = '__function_workspace__' + # We want to keep this raw because mat_dtype processing + # will break the format (uint8 as mxDOUBLE_CLASS) + process = False + else: + process = True + if variable_names is not None and name not in variable_names: + self.mat_stream.seek(next_position) + continue + try: + res = self.read_var_array(hdr, process) + except MatReadError as err: + warnings.warn( + 'Unreadable variable "%s", because "%s"' % + (name, err), + Warning, stacklevel=2) + res = "Read error: %s" % err + self.mat_stream.seek(next_position) + mdict[name] = res + if hdr.is_global: + mdict['__globals__'].append(name) + if variable_names is not None: + variable_names.remove(name) + if len(variable_names) == 0: + break + return mdict + + def list_variables(self): + ''' list variables from stream ''' + self.mat_stream.seek(0) + # Here we pass all the parameters in self to the reading objects + self.initialize_read() + self.read_file_header() + vars = [] + while not self.end_of_stream(): + hdr, next_position = self.read_var_header() + name = asstr(hdr.name) + if name == '': + # can only be a matlab 7 function workspace + name = '__function_workspace__' + + shape = self._matrix_reader.shape_from_header(hdr) + if hdr.is_logical: + info = 'logical' + else: + info = mclass_info.get(hdr.mclass, 'unknown') + vars.append((name, shape, info)) + + self.mat_stream.seek(next_position) + return vars + + +def varmats_from_mat(file_obj): + """ Pull variables out of mat 5 file as a sequence of mat file objects + + This can be useful with a difficult mat file, containing unreadable + variables. This routine pulls the variables out in raw form and puts them, + unread, back into a file stream for saving or reading. Another use is the + pathological case where there is more than one variable of the same name in + the file; this routine returns the duplicates, whereas the standard reader + will overwrite duplicates in the returned dictionary. + + The file pointer in `file_obj` will be undefined. File pointers for the + returned file-like objects are set at 0. + + Parameters + ---------- + file_obj : file-like + file object containing mat file + + Returns + ------- + named_mats : list + list contains tuples of (name, BytesIO) where BytesIO is a file-like + object containing mat file contents as for a single variable. The + BytesIO contains a string with the original header and a single var. If + ``var_file_obj`` is an individual BytesIO instance, then save as a mat + file with something like ``open('test.mat', + 'wb').write(var_file_obj.read())`` + + Examples + -------- + >>> import scipy.io + + BytesIO is from the ``io`` module in python 3, and is ``cStringIO`` for + python < 3. + + >>> mat_fileobj = BytesIO() + >>> scipy.io.savemat(mat_fileobj, {'b': np.arange(10), 'a': 'a string'}) + >>> varmats = varmats_from_mat(mat_fileobj) + >>> sorted([name for name, str_obj in varmats]) + ['a', 'b'] + """ + rdr = MatFile5Reader(file_obj) + file_obj.seek(0) + # Raw read of top-level file header + hdr_len = MDTYPES[native_code]['dtypes']['file_header'].itemsize + raw_hdr = file_obj.read(hdr_len) + # Initialize variable reading + file_obj.seek(0) + rdr.initialize_read() + mdict = rdr.read_file_header() + next_position = file_obj.tell() + named_mats = [] + while not rdr.end_of_stream(): + start_position = next_position + hdr, next_position = rdr.read_var_header() + name = asstr(hdr.name) + # Read raw variable string + file_obj.seek(start_position) + byte_count = next_position - start_position + var_str = file_obj.read(byte_count) + # write to stringio object + out_obj = BytesIO() + out_obj.write(raw_hdr) + out_obj.write(var_str) + out_obj.seek(0) + named_mats.append((name, out_obj)) + return named_mats + + +class EmptyStructMarker(object): + """ Class to indicate presence of empty matlab struct on output """ + + +def to_writeable(source): + ''' Convert input object ``source`` to something we can write + + Parameters + ---------- + source : object + + Returns + ------- + arr : None or ndarray or EmptyStructMarker + If `source` cannot be converted to something we can write to a matfile, + return None. If `source` is equivalent to an empty dictionary, return + ``EmptyStructMarker``. Otherwise return `source` converted to an + ndarray with contents for writing to matfile. + ''' + if isinstance(source, np.ndarray): + return source + if source is None: + return None + # Objects that implement mappings + is_mapping = (hasattr(source, 'keys') and hasattr(source, 'values') and + hasattr(source, 'items')) + # Objects that don't implement mappings, but do have dicts + if isinstance(source, np.generic): + # Numpy scalars are never mappings (pypy issue workaround) + pass + elif not is_mapping and hasattr(source, '__dict__'): + source = dict((key, value) for key, value in source.__dict__.items() + if not key.startswith('_')) + is_mapping = True + if is_mapping: + dtype = [] + values = [] + for field, value in source.items(): + if (isinstance(field, string_types) and + field[0] not in '_0123456789'): + dtype.append((str(field), object)) + values.append(value) + if dtype: + return np.array([tuple(values)], dtype) + else: + return EmptyStructMarker + # Next try and convert to an array + narr = np.asanyarray(source) + if narr.dtype.type in (object, np.object_) and \ + narr.shape == () and narr == source: + # No interesting conversion possible + return None + return narr + + +# Native byte ordered dtypes for convenience for writers +NDT_FILE_HDR = MDTYPES[native_code]['dtypes']['file_header'] +NDT_TAG_FULL = MDTYPES[native_code]['dtypes']['tag_full'] +NDT_TAG_SMALL = MDTYPES[native_code]['dtypes']['tag_smalldata'] +NDT_ARRAY_FLAGS = MDTYPES[native_code]['dtypes']['array_flags'] + + +class VarWriter5(object): + ''' Generic matlab matrix writing class ''' + mat_tag = np.zeros((), NDT_TAG_FULL) + mat_tag['mdtype'] = miMATRIX + + def __init__(self, file_writer): + self.file_stream = file_writer.file_stream + self.unicode_strings = file_writer.unicode_strings + self.long_field_names = file_writer.long_field_names + self.oned_as = file_writer.oned_as + # These are used for top level writes, and unset after + self._var_name = None + self._var_is_global = False + + def write_bytes(self, arr): + self.file_stream.write(arr.tostring(order='F')) + + def write_string(self, s): + self.file_stream.write(s) + + def write_element(self, arr, mdtype=None): + ''' write tag and data ''' + if mdtype is None: + mdtype = NP_TO_MTYPES[arr.dtype.str[1:]] + # Array needs to be in native byte order + if arr.dtype.byteorder == swapped_code: + arr = arr.byteswap().newbyteorder() + byte_count = arr.size*arr.itemsize + if byte_count <= 4: + self.write_smalldata_element(arr, mdtype, byte_count) + else: + self.write_regular_element(arr, mdtype, byte_count) + + def write_smalldata_element(self, arr, mdtype, byte_count): + # write tag with embedded data + tag = np.zeros((), NDT_TAG_SMALL) + tag['byte_count_mdtype'] = (byte_count << 16) + mdtype + # if arr.tostring is < 4, the element will be zero-padded as needed. + tag['data'] = arr.tostring(order='F') + self.write_bytes(tag) + + def write_regular_element(self, arr, mdtype, byte_count): + # write tag, data + tag = np.zeros((), NDT_TAG_FULL) + tag['mdtype'] = mdtype + tag['byte_count'] = byte_count + self.write_bytes(tag) + self.write_bytes(arr) + # pad to next 64-bit boundary + bc_mod_8 = byte_count % 8 + if bc_mod_8: + self.file_stream.write(b'\x00' * (8-bc_mod_8)) + + def write_header(self, + shape, + mclass, + is_complex=False, + is_logical=False, + nzmax=0): + ''' Write header for given data options + shape : sequence + array shape + mclass - mat5 matrix class + is_complex - True if matrix is complex + is_logical - True if matrix is logical + nzmax - max non zero elements for sparse arrays + + We get the name and the global flag from the object, and reset + them to defaults after we've used them + ''' + # get name and is_global from one-shot object store + name = self._var_name + is_global = self._var_is_global + # initialize the top-level matrix tag, store position + self._mat_tag_pos = self.file_stream.tell() + self.write_bytes(self.mat_tag) + # write array flags (complex, global, logical, class, nzmax) + af = np.zeros((), NDT_ARRAY_FLAGS) + af['data_type'] = miUINT32 + af['byte_count'] = 8 + flags = is_complex << 3 | is_global << 2 | is_logical << 1 + af['flags_class'] = mclass | flags << 8 + af['nzmax'] = nzmax + self.write_bytes(af) + # shape + self.write_element(np.array(shape, dtype='i4')) + # write name + name = np.asarray(name) + if name == '': # empty string zero-terminated + self.write_smalldata_element(name, miINT8, 0) + else: + self.write_element(name, miINT8) + # reset the one-shot store to defaults + self._var_name = '' + self._var_is_global = False + + def update_matrix_tag(self, start_pos): + curr_pos = self.file_stream.tell() + self.file_stream.seek(start_pos) + byte_count = curr_pos - start_pos - 8 + if byte_count >= 2**32: + raise MatWriteError("Matrix too large to save with Matlab " + "5 format") + self.mat_tag['byte_count'] = byte_count + self.write_bytes(self.mat_tag) + self.file_stream.seek(curr_pos) + + def write_top(self, arr, name, is_global): + """ Write variable at top level of mat file + + Parameters + ---------- + arr : array_like + array-like object to create writer for + name : str, optional + name as it will appear in matlab workspace + default is empty string + is_global : {False, True}, optional + whether variable will be global on load into matlab + """ + # these are set before the top-level header write, and unset at + # the end of the same write, because they do not apply for lower levels + self._var_is_global = is_global + self._var_name = name + # write the header and data + self.write(arr) + + def write(self, arr): + ''' Write `arr` to stream at top and sub levels + + Parameters + ---------- + arr : array_like + array-like object to create writer for + ''' + # store position, so we can update the matrix tag + mat_tag_pos = self.file_stream.tell() + # First check if these are sparse + if scipy.sparse.issparse(arr): + self.write_sparse(arr) + self.update_matrix_tag(mat_tag_pos) + return + # Try to convert things that aren't arrays + narr = to_writeable(arr) + if narr is None: + raise TypeError('Could not convert %s (type %s) to array' + % (arr, type(arr))) + if isinstance(narr, MatlabObject): + self.write_object(narr) + elif isinstance(narr, MatlabFunction): + raise MatWriteError('Cannot write matlab functions') + elif narr is EmptyStructMarker: # empty struct array + self.write_empty_struct() + elif narr.dtype.fields: # struct array + self.write_struct(narr) + elif narr.dtype.hasobject: # cell array + self.write_cells(narr) + elif narr.dtype.kind in ('U', 'S'): + if self.unicode_strings: + codec = 'UTF8' + else: + codec = 'ascii' + self.write_char(narr, codec) + else: + self.write_numeric(narr) + self.update_matrix_tag(mat_tag_pos) + + def write_numeric(self, arr): + imagf = arr.dtype.kind == 'c' + logif = arr.dtype.kind == 'b' + try: + mclass = NP_TO_MXTYPES[arr.dtype.str[1:]] + except KeyError: + # No matching matlab type, probably complex256 / float128 / float96 + # Cast data to complex128 / float64. + if imagf: + arr = arr.astype('c128') + elif logif: + arr = arr.astype('i1') # Should only contain 0/1 + else: + arr = arr.astype('f8') + mclass = mxDOUBLE_CLASS + self.write_header(matdims(arr, self.oned_as), + mclass, + is_complex=imagf, + is_logical=logif) + if imagf: + self.write_element(arr.real) + self.write_element(arr.imag) + else: + self.write_element(arr) + + def write_char(self, arr, codec='ascii'): + ''' Write string array `arr` with given `codec` + ''' + if arr.size == 0 or np.all(arr == ''): + # This an empty string array or a string array containing + # only empty strings. Matlab cannot distinguish between a + # string array that is empty, and a string array containing + # only empty strings, because it stores strings as arrays of + # char. There is no way of having an array of char that is + # not empty, but contains an empty string. We have to + # special-case the array-with-empty-strings because even + # empty strings have zero padding, which would otherwise + # appear in matlab as a string with a space. + shape = (0,) * np.max([arr.ndim, 2]) + self.write_header(shape, mxCHAR_CLASS) + self.write_smalldata_element(arr, miUTF8, 0) + return + # non-empty string. + # + # Convert to char array + arr = arr_to_chars(arr) + # We have to write the shape directly, because we are going + # recode the characters, and the resulting stream of chars + # may have a different length + shape = arr.shape + self.write_header(shape, mxCHAR_CLASS) + if arr.dtype.kind == 'U' and arr.size: + # Make one long string from all the characters. We need to + # transpose here, because we're flattening the array, before + # we write the bytes. The bytes have to be written in + # Fortran order. + n_chars = np.product(shape) + st_arr = np.ndarray(shape=(), + dtype=arr_dtype_number(arr, n_chars), + buffer=arr.T.copy()) # Fortran order + # Recode with codec to give byte string + st = st_arr.item().encode(codec) + # Reconstruct as one-dimensional byte array + arr = np.ndarray(shape=(len(st),), + dtype='S1', + buffer=st) + self.write_element(arr, mdtype=miUTF8) + + def write_sparse(self, arr): + ''' Sparse matrices are 2D + ''' + A = arr.tocsc() # convert to sparse CSC format + A.sort_indices() # MATLAB expects sorted row indices + is_complex = (A.dtype.kind == 'c') + is_logical = (A.dtype.kind == 'b') + nz = A.nnz + self.write_header(matdims(arr, self.oned_as), + mxSPARSE_CLASS, + is_complex=is_complex, + is_logical=is_logical, + # matlab won't load file with 0 nzmax + nzmax=1 if nz == 0 else nz) + self.write_element(A.indices.astype('i4')) + self.write_element(A.indptr.astype('i4')) + self.write_element(A.data.real) + if is_complex: + self.write_element(A.data.imag) + + def write_cells(self, arr): + self.write_header(matdims(arr, self.oned_as), + mxCELL_CLASS) + # loop over data, column major + A = np.atleast_2d(arr).flatten('F') + for el in A: + self.write(el) + + def write_empty_struct(self): + self.write_header((1, 1), mxSTRUCT_CLASS) + # max field name length set to 1 in an example matlab struct + self.write_element(np.array(1, dtype=np.int32)) + # Field names element is empty + self.write_element(np.array([], dtype=np.int8)) + + def write_struct(self, arr): + self.write_header(matdims(arr, self.oned_as), + mxSTRUCT_CLASS) + self._write_items(arr) + + def _write_items(self, arr): + # write fieldnames + fieldnames = [f[0] for f in arr.dtype.descr] + length = max([len(fieldname) for fieldname in fieldnames])+1 + max_length = (self.long_field_names and 64) or 32 + if length > max_length: + raise ValueError("Field names are restricted to %d characters" % + (max_length-1)) + self.write_element(np.array([length], dtype='i4')) + self.write_element( + np.array(fieldnames, dtype='S%d' % (length)), + mdtype=miINT8) + A = np.atleast_2d(arr).flatten('F') + for el in A: + for f in fieldnames: + self.write(el[f]) + + def write_object(self, arr): + '''Same as writing structs, except different mx class, and extra + classname element after header + ''' + self.write_header(matdims(arr, self.oned_as), + mxOBJECT_CLASS) + self.write_element(np.array(arr.classname, dtype='S'), + mdtype=miINT8) + self._write_items(arr) + + +class MatFile5Writer(object): + ''' Class for writing mat5 files ''' + + @docfiller + def __init__(self, file_stream, + do_compression=False, + unicode_strings=False, + global_vars=None, + long_field_names=False, + oned_as='row'): + ''' Initialize writer for matlab 5 format files + + Parameters + ---------- + %(do_compression)s + %(unicode_strings)s + global_vars : None or sequence of strings, optional + Names of variables to be marked as global for matlab + %(long_fields)s + %(oned_as)s + ''' + self.file_stream = file_stream + self.do_compression = do_compression + self.unicode_strings = unicode_strings + if global_vars: + self.global_vars = global_vars + else: + self.global_vars = [] + self.long_field_names = long_field_names + self.oned_as = oned_as + self._matrix_writer = None + + def write_file_header(self): + # write header + hdr = np.zeros((), NDT_FILE_HDR) + hdr['description'] = 'MATLAB 5.0 MAT-file Platform: %s, Created on: %s' \ + % (os.name,time.asctime()) + hdr['version'] = 0x0100 + hdr['endian_test'] = np.ndarray(shape=(), + dtype='S2', + buffer=np.uint16(0x4d49)) + self.file_stream.write(hdr.tostring()) + + def put_variables(self, mdict, write_header=None): + ''' Write variables in `mdict` to stream + + Parameters + ---------- + mdict : mapping + mapping with method ``items`` returns name, contents pairs where + ``name`` which will appear in the matlab workspace in file load, and + ``contents`` is something writeable to a matlab file, such as a numpy + array. + write_header : {None, True, False}, optional + If True, then write the matlab file header before writing the + variables. If None (the default) then write the file header + if we are at position 0 in the stream. By setting False + here, and setting the stream position to the end of the file, + you can append variables to a matlab file + ''' + # write header if requested, or None and start of file + if write_header is None: + write_header = self.file_stream.tell() == 0 + if write_header: + self.write_file_header() + self._matrix_writer = VarWriter5(self) + for name, var in mdict.items(): + if name[0] == '_': + continue + is_global = name in self.global_vars + if self.do_compression: + stream = BytesIO() + self._matrix_writer.file_stream = stream + self._matrix_writer.write_top(var, asbytes(name), is_global) + out_str = zlib.compress(stream.getvalue()) + tag = np.empty((), NDT_TAG_FULL) + tag['mdtype'] = miCOMPRESSED + tag['byte_count'] = len(out_str) + self.file_stream.write(tag.tostring()) + self.file_stream.write(out_str) + else: # not compressing + self._matrix_writer.write_top(var, asbytes(name), is_global) diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/mio5.pyc b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/mio5.pyc new file mode 100644 index 0000000..60f53b9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/mio5.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/mio5_params.py b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/mio5_params.py new file mode 100644 index 0000000..d2cbbe6 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/mio5_params.py @@ -0,0 +1,254 @@ +''' Constants and classes for matlab 5 read and write + +See also mio5_utils.pyx where these same constants arise as c enums. + +If you make changes in this file, don't forget to change mio5_utils.pyx +''' +from __future__ import division, print_function, absolute_import + +import numpy as np + +from .miobase import convert_dtypes + +miINT8 = 1 +miUINT8 = 2 +miINT16 = 3 +miUINT16 = 4 +miINT32 = 5 +miUINT32 = 6 +miSINGLE = 7 +miDOUBLE = 9 +miINT64 = 12 +miUINT64 = 13 +miMATRIX = 14 +miCOMPRESSED = 15 +miUTF8 = 16 +miUTF16 = 17 +miUTF32 = 18 + +mxCELL_CLASS = 1 +mxSTRUCT_CLASS = 2 +# The March 2008 edition of "Matlab 7 MAT-File Format" says that +# mxOBJECT_CLASS = 3, whereas matrix.h says that mxLOGICAL = 3. +# Matlab 2008a appears to save logicals as type 9, so we assume that +# the document is correct. See type 18, below. +mxOBJECT_CLASS = 3 +mxCHAR_CLASS = 4 +mxSPARSE_CLASS = 5 +mxDOUBLE_CLASS = 6 +mxSINGLE_CLASS = 7 +mxINT8_CLASS = 8 +mxUINT8_CLASS = 9 +mxINT16_CLASS = 10 +mxUINT16_CLASS = 11 +mxINT32_CLASS = 12 +mxUINT32_CLASS = 13 +# The following are not in the March 2008 edition of "Matlab 7 +# MAT-File Format," but were guessed from matrix.h. +mxINT64_CLASS = 14 +mxUINT64_CLASS = 15 +mxFUNCTION_CLASS = 16 +# Not doing anything with these at the moment. +mxOPAQUE_CLASS = 17 # This appears to be a function workspace +# Thread 'saveing/loading symbol table of annymous functions', octave-maintainers, April-May 2007 +# https://lists.gnu.org/archive/html/octave-maintainers/2007-04/msg00031.html +# https://lists.gnu.org/archive/html/octave-maintainers/2007-05/msg00032.html +# (Was/Deprecated: https://www-old.cae.wisc.edu/pipermail/octave-maintainers/2007-May/002824.html) +mxOBJECT_CLASS_FROM_MATRIX_H = 18 + +mdtypes_template = { + miINT8: 'i1', + miUINT8: 'u1', + miINT16: 'i2', + miUINT16: 'u2', + miINT32: 'i4', + miUINT32: 'u4', + miSINGLE: 'f4', + miDOUBLE: 'f8', + miINT64: 'i8', + miUINT64: 'u8', + miUTF8: 'u1', + miUTF16: 'u2', + miUTF32: 'u4', + 'file_header': [('description', 'S116'), + ('subsystem_offset', 'i8'), + ('version', 'u2'), + ('endian_test', 'S2')], + 'tag_full': [('mdtype', 'u4'), ('byte_count', 'u4')], + 'tag_smalldata':[('byte_count_mdtype', 'u4'), ('data', 'S4')], + 'array_flags': [('data_type', 'u4'), + ('byte_count', 'u4'), + ('flags_class','u4'), + ('nzmax', 'u4')], + 'U1': 'U1', + } + +mclass_dtypes_template = { + mxINT8_CLASS: 'i1', + mxUINT8_CLASS: 'u1', + mxINT16_CLASS: 'i2', + mxUINT16_CLASS: 'u2', + mxINT32_CLASS: 'i4', + mxUINT32_CLASS: 'u4', + mxINT64_CLASS: 'i8', + mxUINT64_CLASS: 'u8', + mxSINGLE_CLASS: 'f4', + mxDOUBLE_CLASS: 'f8', + } + +mclass_info = { + mxINT8_CLASS: 'int8', + mxUINT8_CLASS: 'uint8', + mxINT16_CLASS: 'int16', + mxUINT16_CLASS: 'uint16', + mxINT32_CLASS: 'int32', + mxUINT32_CLASS: 'uint32', + mxINT64_CLASS: 'int64', + mxUINT64_CLASS: 'uint64', + mxSINGLE_CLASS: 'single', + mxDOUBLE_CLASS: 'double', + mxCELL_CLASS: 'cell', + mxSTRUCT_CLASS: 'struct', + mxOBJECT_CLASS: 'object', + mxCHAR_CLASS: 'char', + mxSPARSE_CLASS: 'sparse', + mxFUNCTION_CLASS: 'function', + mxOPAQUE_CLASS: 'opaque', + } + +NP_TO_MTYPES = { + 'f8': miDOUBLE, + 'c32': miDOUBLE, + 'c24': miDOUBLE, + 'c16': miDOUBLE, + 'f4': miSINGLE, + 'c8': miSINGLE, + 'i8': miINT64, + 'i4': miINT32, + 'i2': miINT16, + 'i1': miINT8, + 'u8': miUINT64, + 'u4': miUINT32, + 'u2': miUINT16, + 'u1': miUINT8, + 'S1': miUINT8, + 'U1': miUTF16, + 'b1': miUINT8, # not standard but seems MATLAB uses this (gh-4022) + } + + +NP_TO_MXTYPES = { + 'f8': mxDOUBLE_CLASS, + 'c32': mxDOUBLE_CLASS, + 'c24': mxDOUBLE_CLASS, + 'c16': mxDOUBLE_CLASS, + 'f4': mxSINGLE_CLASS, + 'c8': mxSINGLE_CLASS, + 'i8': mxINT64_CLASS, + 'i4': mxINT32_CLASS, + 'i2': mxINT16_CLASS, + 'i1': mxINT8_CLASS, + 'u8': mxUINT64_CLASS, + 'u4': mxUINT32_CLASS, + 'u2': mxUINT16_CLASS, + 'u1': mxUINT8_CLASS, + 'S1': mxUINT8_CLASS, + 'b1': mxUINT8_CLASS, # not standard but seems MATLAB uses this + } + +''' Before release v7.1 (release 14) matlab (TM) used the system +default character encoding scheme padded out to 16-bits. Release 14 +and later use Unicode. When saving character data, R14 checks if it +can be encoded in 7-bit ascii, and saves in that format if so.''' + +codecs_template = { + miUTF8: {'codec': 'utf_8', 'width': 1}, + miUTF16: {'codec': 'utf_16', 'width': 2}, + miUTF32: {'codec': 'utf_32','width': 4}, + } + + +def _convert_codecs(template, byte_order): + ''' Convert codec template mapping to byte order + + Set codecs not on this system to None + + Parameters + ---------- + template : mapping + key, value are respectively codec name, and root name for codec + (without byte order suffix) + byte_order : {'<', '>'} + code for little or big endian + + Returns + ------- + codecs : dict + key, value are name, codec (as in .encode(codec)) + ''' + codecs = {} + postfix = byte_order == '<' and '_le' or '_be' + for k, v in template.items(): + codec = v['codec'] + try: + " ".encode(codec) + except LookupError: + codecs[k] = None + continue + if v['width'] > 1: + codec += postfix + codecs[k] = codec + return codecs.copy() + + +MDTYPES = {} +for _bytecode in '<>': + _def = {'dtypes': convert_dtypes(mdtypes_template, _bytecode), + 'classes': convert_dtypes(mclass_dtypes_template, _bytecode), + 'codecs': _convert_codecs(codecs_template, _bytecode)} + MDTYPES[_bytecode] = _def + + +class mat_struct(object): + ''' Placeholder for holding read data from structs + + We use instances of this class when the user passes False as a value to the + ``struct_as_record`` parameter of the :func:`scipy.io.matlab.loadmat` + function. + ''' + pass + + +class MatlabObject(np.ndarray): + ''' ndarray Subclass to contain matlab object ''' + def __new__(cls, input_array, classname=None): + # Input array is an already formed ndarray instance + # We first cast to be our class type + obj = np.asarray(input_array).view(cls) + # add the new attribute to the created instance + obj.classname = classname + # Finally, we must return the newly created object: + return obj + + def __array_finalize__(self,obj): + # reset the attribute from passed original object + self.classname = getattr(obj, 'classname', None) + # We do not need to return anything + + +class MatlabFunction(np.ndarray): + ''' Subclass to signal this is a matlab function ''' + def __new__(cls, input_array): + obj = np.asarray(input_array).view(cls) + return obj + + +class MatlabOpaque(np.ndarray): + ''' Subclass to signal this is a matlab opaque matrix ''' + def __new__(cls, input_array): + obj = np.asarray(input_array).view(cls) + return obj + + +OPAQUE_DTYPE = np.dtype( + [('s0', 'O'), ('s1', 'O'), ('s2', 'O'), ('arr', 'O')]) diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/mio5_params.pyc b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/mio5_params.pyc new file mode 100644 index 0000000..a23de7a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/mio5_params.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/mio5_utils.so b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/mio5_utils.so new file mode 100755 index 0000000..0ede6f7 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/mio5_utils.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/mio_utils.so b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/mio_utils.so new file mode 100755 index 0000000..8156f3a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/mio_utils.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/miobase.py b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/miobase.py new file mode 100644 index 0000000..5ce5ac1 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/miobase.py @@ -0,0 +1,415 @@ +# Authors: Travis Oliphant, Matthew Brett + +""" +Base classes for MATLAB file stream reading. + +MATLAB is a registered trademark of the Mathworks inc. +""" +from __future__ import division, print_function, absolute_import + +import sys +import operator + +from scipy._lib.six import reduce + +import numpy as np + +if sys.version_info[0] >= 3: + byteord = int +else: + byteord = ord + +from scipy.misc import doccer + +from . import byteordercodes as boc + + +class MatReadError(Exception): + pass + + +class MatWriteError(Exception): + pass + + +class MatReadWarning(UserWarning): + pass + + +doc_dict = \ + {'file_arg': + '''file_name : str + Name of the mat file (do not need .mat extension if + appendmat==True) Can also pass open file-like object.''', + 'append_arg': + '''appendmat : bool, optional + True to append the .mat extension to the end of the given + filename, if not already present.''', + 'load_args': + '''byte_order : str or None, optional + None by default, implying byte order guessed from mat + file. Otherwise can be one of ('native', '=', 'little', '<', + 'BIG', '>'). +mat_dtype : bool, optional + If True, return arrays in same dtype as would be loaded into + MATLAB (instead of the dtype with which they are saved). +squeeze_me : bool, optional + Whether to squeeze unit matrix dimensions or not. +chars_as_strings : bool, optional + Whether to convert char arrays to string arrays. +matlab_compatible : bool, optional + Returns matrices as would be loaded by MATLAB (implies + squeeze_me=False, chars_as_strings=False, mat_dtype=True, + struct_as_record=True).''', + 'struct_arg': + '''struct_as_record : bool, optional + Whether to load MATLAB structs as numpy record arrays, or as + old-style numpy arrays with dtype=object. Setting this flag to + False replicates the behavior of scipy version 0.7.x (returning + numpy object arrays). The default setting is True, because it + allows easier round-trip load and save of MATLAB files.''', + 'matstream_arg': + '''mat_stream : file-like + Object with file API, open for reading.''', + 'long_fields': + '''long_field_names : bool, optional + * False - maximum field name length in a structure is 31 characters + which is the documented maximum length. This is the default. + * True - maximum field name length in a structure is 63 characters + which works for MATLAB 7.6''', + 'do_compression': + '''do_compression : bool, optional + Whether to compress matrices on write. Default is False.''', + 'oned_as': + '''oned_as : {'row', 'column'}, optional + If 'column', write 1-D numpy arrays as column vectors. + If 'row', write 1D numpy arrays as row vectors.''', + 'unicode_strings': + '''unicode_strings : bool, optional + If True, write strings as Unicode, else MATLAB usual encoding.'''} + +docfiller = doccer.filldoc(doc_dict) + +''' + + Note on architecture +====================== + +There are three sets of parameters relevant for reading files. The +first are *file read parameters* - containing options that are common +for reading the whole file, and therefore every variable within that +file. At the moment these are: + +* mat_stream +* dtypes (derived from byte code) +* byte_order +* chars_as_strings +* squeeze_me +* struct_as_record (MATLAB 5 files) +* class_dtypes (derived from order code, MATLAB 5 files) +* codecs (MATLAB 5 files) +* uint16_codec (MATLAB 5 files) + +Another set of parameters are those that apply only to the current +variable being read - the *header*: + +* header related variables (different for v4 and v5 mat files) +* is_complex +* mclass +* var_stream + +With the header, we need ``next_position`` to tell us where the next +variable in the stream is. + +Then, for each element in a matrix, there can be *element read +parameters*. An element is, for example, one element in a MATLAB cell +array. At the moment these are: + +* mat_dtype + +The file-reading object contains the *file read parameters*. The +*header* is passed around as a data object, or may be read and discarded +in a single function. The *element read parameters* - the mat_dtype in +this instance, is passed into a general post-processing function - see +``mio_utils`` for details. +''' + + +def convert_dtypes(dtype_template, order_code): + ''' Convert dtypes in mapping to given order + + Parameters + ---------- + dtype_template : mapping + mapping with values returning numpy dtype from ``np.dtype(val)`` + order_code : str + an order code suitable for using in ``dtype.newbyteorder()`` + + Returns + ------- + dtypes : mapping + mapping where values have been replaced by + ``np.dtype(val).newbyteorder(order_code)`` + + ''' + dtypes = dtype_template.copy() + for k in dtypes: + dtypes[k] = np.dtype(dtypes[k]).newbyteorder(order_code) + return dtypes + + +def read_dtype(mat_stream, a_dtype): + """ + Generic get of byte stream data of known type + + Parameters + ---------- + mat_stream : file_like object + MATLAB (tm) mat file stream + a_dtype : dtype + dtype of array to read. `a_dtype` is assumed to be correct + endianness. + + Returns + ------- + arr : ndarray + Array of dtype `a_dtype` read from stream. + + """ + num_bytes = a_dtype.itemsize + arr = np.ndarray(shape=(), + dtype=a_dtype, + buffer=mat_stream.read(num_bytes), + order='F') + return arr + + +def get_matfile_version(fileobj): + """ + Return major, minor tuple depending on apparent mat file type + + Where: + + #. 0,x -> version 4 format mat files + #. 1,x -> version 5 format mat files + #. 2,x -> version 7.3 format mat files (HDF format) + + Parameters + ---------- + fileobj : file_like + object implementing seek() and read() + + Returns + ------- + major_version : {0, 1, 2} + major MATLAB File format version + minor_version : int + minor MATLAB file format version + + Raises + ------ + MatReadError + If the file is empty. + ValueError + The matfile version is unknown. + + Notes + ----- + Has the side effect of setting the file read pointer to 0 + """ + # Mat4 files have a zero somewhere in first 4 bytes + fileobj.seek(0) + mopt_bytes = fileobj.read(4) + if len(mopt_bytes) == 0: + raise MatReadError("Mat file appears to be empty") + mopt_ints = np.ndarray(shape=(4,), dtype=np.uint8, buffer=mopt_bytes) + if 0 in mopt_ints: + fileobj.seek(0) + return (0,0) + # For 5 format or 7.3 format we need to read an integer in the + # header. Bytes 124 through 128 contain a version integer and an + # endian test string + fileobj.seek(124) + tst_str = fileobj.read(4) + fileobj.seek(0) + maj_ind = int(tst_str[2] == b'I'[0]) + maj_val = byteord(tst_str[maj_ind]) + min_val = byteord(tst_str[1-maj_ind]) + ret = (maj_val, min_val) + if maj_val in (1, 2): + return ret + raise ValueError('Unknown mat file type, version %s, %s' % ret) + + +def matdims(arr, oned_as='column'): + """ + Determine equivalent MATLAB dimensions for given array + + Parameters + ---------- + arr : ndarray + Input array + oned_as : {'column', 'row'}, optional + Whether 1-D arrays are returned as MATLAB row or column matrices. + Default is 'column'. + + Returns + ------- + dims : tuple + Shape tuple, in the form MATLAB expects it. + + Notes + ----- + We had to decide what shape a 1 dimensional array would be by + default. ``np.atleast_2d`` thinks it is a row vector. The + default for a vector in MATLAB (e.g. ``>> 1:12``) is a row vector. + + Versions of scipy up to and including 0.11 resulted (accidentally) + in 1-D arrays being read as column vectors. For the moment, we + maintain the same tradition here. + + Examples + -------- + >>> matdims(np.array(1)) # numpy scalar + (1, 1) + >>> matdims(np.array([1])) # 1d array, 1 element + (1, 1) + >>> matdims(np.array([1,2])) # 1d array, 2 elements + (2, 1) + >>> matdims(np.array([[2],[3]])) # 2d array, column vector + (2, 1) + >>> matdims(np.array([[2,3]])) # 2d array, row vector + (1, 2) + >>> matdims(np.array([[[2,3]]])) # 3d array, rowish vector + (1, 1, 2) + >>> matdims(np.array([])) # empty 1d array + (0, 0) + >>> matdims(np.array([[]])) # empty 2d + (0, 0) + >>> matdims(np.array([[[]]])) # empty 3d + (0, 0, 0) + + Optional argument flips 1-D shape behavior. + + >>> matdims(np.array([1,2]), 'row') # 1d array, 2 elements + (1, 2) + + The argument has to make sense though + + >>> matdims(np.array([1,2]), 'bizarre') + Traceback (most recent call last): + ... + ValueError: 1D option "bizarre" is strange + + """ + shape = arr.shape + if shape == (): # scalar + return (1,1) + if reduce(operator.mul, shape) == 0: # zero elememts + return (0,) * np.max([arr.ndim, 2]) + if len(shape) == 1: # 1D + if oned_as == 'column': + return shape + (1,) + elif oned_as == 'row': + return (1,) + shape + else: + raise ValueError('1D option "%s" is strange' + % oned_as) + return shape + + +class MatVarReader(object): + ''' Abstract class defining required interface for var readers''' + def __init__(self, file_reader): + pass + + def read_header(self): + ''' Returns header ''' + pass + + def array_from_header(self, header): + ''' Reads array given header ''' + pass + + +class MatFileReader(object): + """ Base object for reading mat files + + To make this class functional, you will need to override the + following methods: + + matrix_getter_factory - gives object to fetch next matrix from stream + guess_byte_order - guesses file byte order from file + """ + + @docfiller + def __init__(self, mat_stream, + byte_order=None, + mat_dtype=False, + squeeze_me=False, + chars_as_strings=True, + matlab_compatible=False, + struct_as_record=True, + verify_compressed_data_integrity=True + ): + ''' + Initializer for mat file reader + + mat_stream : file-like + object with file API, open for reading + %(load_args)s + ''' + # Initialize stream + self.mat_stream = mat_stream + self.dtypes = {} + if not byte_order: + byte_order = self.guess_byte_order() + else: + byte_order = boc.to_numpy_code(byte_order) + self.byte_order = byte_order + self.struct_as_record = struct_as_record + if matlab_compatible: + self.set_matlab_compatible() + else: + self.squeeze_me = squeeze_me + self.chars_as_strings = chars_as_strings + self.mat_dtype = mat_dtype + self.verify_compressed_data_integrity = verify_compressed_data_integrity + + def set_matlab_compatible(self): + ''' Sets options to return arrays as MATLAB loads them ''' + self.mat_dtype = True + self.squeeze_me = False + self.chars_as_strings = False + + def guess_byte_order(self): + ''' As we do not know what file type we have, assume native ''' + return boc.native_code + + def end_of_stream(self): + b = self.mat_stream.read(1) + curpos = self.mat_stream.tell() + self.mat_stream.seek(curpos-1) + return len(b) == 0 + + +def arr_dtype_number(arr, num): + ''' Return dtype for given number of items per element''' + return np.dtype(arr.dtype.str[:2] + str(num)) + + +def arr_to_chars(arr): + ''' Convert string array to char array ''' + dims = list(arr.shape) + if not dims: + dims = [1] + dims.append(int(arr.dtype.str[2:])) + arr = np.ndarray(shape=dims, + dtype=arr_dtype_number(arr, 1), + buffer=arr) + empties = [arr == ''] + if not np.any(empties): + return arr + arr = arr.copy() + arr[tuple(empties)] = ' ' + return arr diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/miobase.pyc b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/miobase.pyc new file mode 100644 index 0000000..8e70f02 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/miobase.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/setup.py b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/setup.py new file mode 100644 index 0000000..00de55b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/setup.py @@ -0,0 +1,16 @@ +from __future__ import division, print_function, absolute_import + + +def configuration(parent_package='io',top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('matlab', parent_package, top_path) + config.add_extension('streams', sources=['streams.c']) + config.add_extension('mio_utils', sources=['mio_utils.c']) + config.add_extension('mio5_utils', sources=['mio5_utils.c']) + config.add_data_dir('tests') + return config + + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(**configuration(top_path='').todict()) diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/setup.pyc b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/setup.pyc new file mode 100644 index 0000000..7e03fd1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/streams.so b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/streams.so new file mode 100755 index 0000000..decbcdc Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/streams.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/__init__.pyc new file mode 100644 index 0000000..a94e3c4 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/afunc.m b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/afunc.m new file mode 100644 index 0000000..5cbf628 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/afunc.m @@ -0,0 +1,4 @@ +function [a, b] = afunc(c, d) +% A function +a = c + 1; +b = d + 10; diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/bad_miuint32.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/bad_miuint32.mat new file mode 100644 index 0000000..c9ab357 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/bad_miuint32.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/bad_miutf8_array_name.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/bad_miutf8_array_name.mat new file mode 100644 index 0000000..a17203f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/bad_miutf8_array_name.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/big_endian.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/big_endian.mat new file mode 100644 index 0000000..2a0c982 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/big_endian.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/broken_utf8.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/broken_utf8.mat new file mode 100644 index 0000000..4f63238 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/broken_utf8.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/corrupted_zlib_checksum.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/corrupted_zlib_checksum.mat new file mode 100644 index 0000000..c88cbb6 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/corrupted_zlib_checksum.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/corrupted_zlib_data.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/corrupted_zlib_data.mat new file mode 100644 index 0000000..45a2ef4 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/corrupted_zlib_data.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/japanese_utf8.txt b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/japanese_utf8.txt new file mode 100644 index 0000000..1459b6b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/japanese_utf8.txt @@ -0,0 +1,5 @@ +Japanese: +すべての人間は、生まれながらにして自由であり、 +かつ、尊厳と権利と について平等である。 +人間は、理性と良心とを授けられており、 +互いに同胞の精神をもって行動しなければならない。 \ No newline at end of file diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/little_endian.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/little_endian.mat new file mode 100644 index 0000000..df6db66 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/little_endian.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/logical_sparse.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/logical_sparse.mat new file mode 100644 index 0000000..a60ad5b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/logical_sparse.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/malformed1.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/malformed1.mat new file mode 100644 index 0000000..54462e2 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/malformed1.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/miuint32_for_miint32.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/miuint32_for_miint32.mat new file mode 100644 index 0000000..fd2c499 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/miuint32_for_miint32.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/miutf8_array_name.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/miutf8_array_name.mat new file mode 100644 index 0000000..ccfdaa8 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/miutf8_array_name.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/nasty_duplicate_fieldnames.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/nasty_duplicate_fieldnames.mat new file mode 100644 index 0000000..35dcb71 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/nasty_duplicate_fieldnames.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/one_by_zero_char.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/one_by_zero_char.mat new file mode 100644 index 0000000..07e7dca Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/one_by_zero_char.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/parabola.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/parabola.mat new file mode 100644 index 0000000..6635053 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/parabola.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/single_empty_string.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/single_empty_string.mat new file mode 100644 index 0000000..293f387 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/single_empty_string.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/some_functions.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/some_functions.mat new file mode 100644 index 0000000..cc81859 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/some_functions.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/sqr.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/sqr.mat new file mode 100644 index 0000000..2436d87 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/sqr.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/test3dmatrix_6.1_SOL2.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/test3dmatrix_6.1_SOL2.mat new file mode 100644 index 0000000..4537126 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/test3dmatrix_6.1_SOL2.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/test3dmatrix_6.5.1_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/test3dmatrix_6.5.1_GLNX86.mat new file mode 100644 index 0000000..e04d27d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/test3dmatrix_6.5.1_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/test3dmatrix_7.1_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/test3dmatrix_7.1_GLNX86.mat new file mode 100644 index 0000000..4c03030 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/test3dmatrix_7.1_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/test3dmatrix_7.4_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/test3dmatrix_7.4_GLNX86.mat new file mode 100644 index 0000000..232a051 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/test3dmatrix_7.4_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/test_empty_struct.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/test_empty_struct.mat new file mode 100644 index 0000000..30c8c8a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/test_empty_struct.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/test_mat4_le_floats.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/test_mat4_le_floats.mat new file mode 100644 index 0000000..6643c42 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/test_mat4_le_floats.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/test_skip_variable.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/test_skip_variable.mat new file mode 100644 index 0000000..efbe3fe Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/test_skip_variable.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testbool_8_WIN64.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testbool_8_WIN64.mat new file mode 100644 index 0000000..faa30b1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testbool_8_WIN64.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testcell_6.1_SOL2.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testcell_6.1_SOL2.mat new file mode 100644 index 0000000..512f7d8 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testcell_6.1_SOL2.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testcell_6.5.1_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testcell_6.5.1_GLNX86.mat new file mode 100644 index 0000000..a763310 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testcell_6.5.1_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testcell_7.1_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testcell_7.1_GLNX86.mat new file mode 100644 index 0000000..2ac1da1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testcell_7.1_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testcell_7.4_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testcell_7.4_GLNX86.mat new file mode 100644 index 0000000..fc893f3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testcell_7.4_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testcellnest_6.1_SOL2.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testcellnest_6.1_SOL2.mat new file mode 100644 index 0000000..4198a4f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testcellnest_6.1_SOL2.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testcellnest_6.5.1_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testcellnest_6.5.1_GLNX86.mat new file mode 100644 index 0000000..2c7826e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testcellnest_6.5.1_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testcellnest_7.1_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testcellnest_7.1_GLNX86.mat new file mode 100644 index 0000000..b3b086c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testcellnest_7.1_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testcellnest_7.4_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testcellnest_7.4_GLNX86.mat new file mode 100644 index 0000000..316f889 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testcellnest_7.4_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testcomplex_4.2c_SOL2.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testcomplex_4.2c_SOL2.mat new file mode 100644 index 0000000..36621b2 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testcomplex_4.2c_SOL2.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testcomplex_6.1_SOL2.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testcomplex_6.1_SOL2.mat new file mode 100644 index 0000000..32fcd2a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testcomplex_6.1_SOL2.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testcomplex_6.5.1_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testcomplex_6.5.1_GLNX86.mat new file mode 100644 index 0000000..f3ecd20 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testcomplex_6.5.1_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testcomplex_7.1_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testcomplex_7.1_GLNX86.mat new file mode 100644 index 0000000..c0c0838 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testcomplex_7.1_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testcomplex_7.4_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testcomplex_7.4_GLNX86.mat new file mode 100644 index 0000000..6a187ed Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testcomplex_7.4_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testdouble_4.2c_SOL2.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testdouble_4.2c_SOL2.mat new file mode 100644 index 0000000..5dbfcf1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testdouble_4.2c_SOL2.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testdouble_6.1_SOL2.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testdouble_6.1_SOL2.mat new file mode 100644 index 0000000..8e36c0c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testdouble_6.1_SOL2.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testdouble_6.5.1_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testdouble_6.5.1_GLNX86.mat new file mode 100644 index 0000000..a003b6d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testdouble_6.5.1_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testdouble_7.1_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testdouble_7.1_GLNX86.mat new file mode 100644 index 0000000..3106712 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testdouble_7.1_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testdouble_7.4_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testdouble_7.4_GLNX86.mat new file mode 100644 index 0000000..9097bb0 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testdouble_7.4_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testemptycell_5.3_SOL2.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testemptycell_5.3_SOL2.mat new file mode 100644 index 0000000..e7dec3b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testemptycell_5.3_SOL2.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testemptycell_6.5.1_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testemptycell_6.5.1_GLNX86.mat new file mode 100644 index 0000000..a1c9348 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testemptycell_6.5.1_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testemptycell_7.1_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testemptycell_7.1_GLNX86.mat new file mode 100644 index 0000000..f29d4f9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testemptycell_7.1_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testemptycell_7.4_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testemptycell_7.4_GLNX86.mat new file mode 100644 index 0000000..8b24404 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testemptycell_7.4_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testfunc_7.4_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testfunc_7.4_GLNX86.mat new file mode 100644 index 0000000..adb6c28 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testfunc_7.4_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testhdf5_7.4_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testhdf5_7.4_GLNX86.mat new file mode 100644 index 0000000..6066c1e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testhdf5_7.4_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testmatrix_4.2c_SOL2.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testmatrix_4.2c_SOL2.mat new file mode 100644 index 0000000..3698c88 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testmatrix_4.2c_SOL2.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testmatrix_6.1_SOL2.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testmatrix_6.1_SOL2.mat new file mode 100644 index 0000000..164be11 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testmatrix_6.1_SOL2.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testmatrix_6.5.1_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testmatrix_6.5.1_GLNX86.mat new file mode 100644 index 0000000..a8735e9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testmatrix_6.5.1_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testmatrix_7.1_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testmatrix_7.1_GLNX86.mat new file mode 100644 index 0000000..b6fb05b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testmatrix_7.1_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testmatrix_7.4_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testmatrix_7.4_GLNX86.mat new file mode 100644 index 0000000..eb537ab Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testmatrix_7.4_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testminus_4.2c_SOL2.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testminus_4.2c_SOL2.mat new file mode 100644 index 0000000..cc207ed Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testminus_4.2c_SOL2.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testminus_6.1_SOL2.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testminus_6.1_SOL2.mat new file mode 100644 index 0000000..c2f0ba2 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testminus_6.1_SOL2.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testminus_6.5.1_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testminus_6.5.1_GLNX86.mat new file mode 100644 index 0000000..b4dbd15 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testminus_6.5.1_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testminus_7.1_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testminus_7.1_GLNX86.mat new file mode 100644 index 0000000..fadcd23 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testminus_7.1_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testminus_7.4_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testminus_7.4_GLNX86.mat new file mode 100644 index 0000000..9ce65f9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testminus_7.4_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testmulti_4.2c_SOL2.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testmulti_4.2c_SOL2.mat new file mode 100644 index 0000000..9c6ba79 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testmulti_4.2c_SOL2.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testmulti_7.1_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testmulti_7.1_GLNX86.mat new file mode 100644 index 0000000..0c4729c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testmulti_7.1_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testmulti_7.4_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testmulti_7.4_GLNX86.mat new file mode 100644 index 0000000..6d3e068 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testmulti_7.4_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testobject_6.1_SOL2.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testobject_6.1_SOL2.mat new file mode 100644 index 0000000..fc13642 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testobject_6.1_SOL2.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testobject_6.5.1_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testobject_6.5.1_GLNX86.mat new file mode 100644 index 0000000..f68323b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testobject_6.5.1_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testobject_7.1_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testobject_7.1_GLNX86.mat new file mode 100644 index 0000000..83dcad3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testobject_7.1_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testobject_7.4_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testobject_7.4_GLNX86.mat new file mode 100644 index 0000000..59d243c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testobject_7.4_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testonechar_4.2c_SOL2.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testonechar_4.2c_SOL2.mat new file mode 100644 index 0000000..cdb4191 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testonechar_4.2c_SOL2.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testonechar_6.1_SOL2.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testonechar_6.1_SOL2.mat new file mode 100644 index 0000000..3b5a428 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testonechar_6.1_SOL2.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testonechar_6.5.1_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testonechar_6.5.1_GLNX86.mat new file mode 100644 index 0000000..8cef2dd Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testonechar_6.5.1_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testonechar_7.1_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testonechar_7.1_GLNX86.mat new file mode 100644 index 0000000..5ba4810 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testonechar_7.1_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testonechar_7.4_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testonechar_7.4_GLNX86.mat new file mode 100644 index 0000000..8964765 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testonechar_7.4_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testscalarcell_7.4_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testscalarcell_7.4_GLNX86.mat new file mode 100644 index 0000000..1dcd72e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testscalarcell_7.4_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testsparse_4.2c_SOL2.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testsparse_4.2c_SOL2.mat new file mode 100644 index 0000000..55cbd3c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testsparse_4.2c_SOL2.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testsparse_6.1_SOL2.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testsparse_6.1_SOL2.mat new file mode 100644 index 0000000..194ca4d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testsparse_6.1_SOL2.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testsparse_6.5.1_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testsparse_6.5.1_GLNX86.mat new file mode 100644 index 0000000..3e1e9a1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testsparse_6.5.1_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testsparse_7.1_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testsparse_7.1_GLNX86.mat new file mode 100644 index 0000000..55b5107 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testsparse_7.1_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testsparse_7.4_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testsparse_7.4_GLNX86.mat new file mode 100644 index 0000000..bdb6ce6 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testsparse_7.4_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testsparsecomplex_4.2c_SOL2.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testsparsecomplex_4.2c_SOL2.mat new file mode 100644 index 0000000..81c536d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testsparsecomplex_4.2c_SOL2.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testsparsecomplex_6.1_SOL2.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testsparsecomplex_6.1_SOL2.mat new file mode 100644 index 0000000..520e1ce Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testsparsecomplex_6.1_SOL2.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testsparsecomplex_6.5.1_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testsparsecomplex_6.5.1_GLNX86.mat new file mode 100644 index 0000000..969b714 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testsparsecomplex_6.5.1_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testsparsecomplex_7.1_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testsparsecomplex_7.1_GLNX86.mat new file mode 100644 index 0000000..9117dce Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testsparsecomplex_7.1_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testsparsecomplex_7.4_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testsparsecomplex_7.4_GLNX86.mat new file mode 100644 index 0000000..a8a615a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testsparsecomplex_7.4_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testsparsefloat_7.4_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testsparsefloat_7.4_GLNX86.mat new file mode 100644 index 0000000..1542426 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testsparsefloat_7.4_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststring_4.2c_SOL2.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststring_4.2c_SOL2.mat new file mode 100644 index 0000000..137561e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststring_4.2c_SOL2.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststring_6.1_SOL2.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststring_6.1_SOL2.mat new file mode 100644 index 0000000..2ad75f2 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststring_6.1_SOL2.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststring_6.5.1_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststring_6.5.1_GLNX86.mat new file mode 100644 index 0000000..6fd12d8 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststring_6.5.1_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststring_7.1_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststring_7.1_GLNX86.mat new file mode 100644 index 0000000..ab93994 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststring_7.1_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststring_7.4_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststring_7.4_GLNX86.mat new file mode 100644 index 0000000..63059b8 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststring_7.4_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststringarray_4.2c_SOL2.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststringarray_4.2c_SOL2.mat new file mode 100644 index 0000000..fa687ee Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststringarray_4.2c_SOL2.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststringarray_6.1_SOL2.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststringarray_6.1_SOL2.mat new file mode 100644 index 0000000..11afb41 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststringarray_6.1_SOL2.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststringarray_6.5.1_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststringarray_6.5.1_GLNX86.mat new file mode 100644 index 0000000..75e07a0 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststringarray_6.5.1_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststringarray_7.1_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststringarray_7.1_GLNX86.mat new file mode 100644 index 0000000..7d76f63 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststringarray_7.1_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststringarray_7.4_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststringarray_7.4_GLNX86.mat new file mode 100644 index 0000000..954e39b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststringarray_7.4_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststruct_6.1_SOL2.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststruct_6.1_SOL2.mat new file mode 100644 index 0000000..5086bb7 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststruct_6.1_SOL2.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststruct_6.5.1_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststruct_6.5.1_GLNX86.mat new file mode 100644 index 0000000..6feb6e4 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststruct_6.5.1_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststruct_7.1_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststruct_7.1_GLNX86.mat new file mode 100644 index 0000000..b2ff222 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststruct_7.1_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststruct_7.4_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststruct_7.4_GLNX86.mat new file mode 100644 index 0000000..028841f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststruct_7.4_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststructarr_6.1_SOL2.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststructarr_6.1_SOL2.mat new file mode 100644 index 0000000..da57365 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststructarr_6.1_SOL2.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststructarr_6.5.1_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststructarr_6.5.1_GLNX86.mat new file mode 100644 index 0000000..d1c97a7 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststructarr_6.5.1_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststructarr_7.1_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststructarr_7.1_GLNX86.mat new file mode 100644 index 0000000..c7ca095 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststructarr_7.1_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststructarr_7.4_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststructarr_7.4_GLNX86.mat new file mode 100644 index 0000000..8716f7e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststructarr_7.4_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststructnest_6.1_SOL2.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststructnest_6.1_SOL2.mat new file mode 100644 index 0000000..2c34c4d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststructnest_6.1_SOL2.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststructnest_6.5.1_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststructnest_6.5.1_GLNX86.mat new file mode 100644 index 0000000..c6dccc0 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststructnest_6.5.1_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststructnest_7.1_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststructnest_7.1_GLNX86.mat new file mode 100644 index 0000000..0f6f544 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststructnest_7.1_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststructnest_7.4_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststructnest_7.4_GLNX86.mat new file mode 100644 index 0000000..faf9221 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/teststructnest_7.4_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testunicode_7.1_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testunicode_7.1_GLNX86.mat new file mode 100644 index 0000000..1b7b3d7 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testunicode_7.1_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testunicode_7.4_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testunicode_7.4_GLNX86.mat new file mode 100644 index 0000000..d22fb57 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testunicode_7.4_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testvec_4_GLNX86.mat b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testvec_4_GLNX86.mat new file mode 100644 index 0000000..76c51d0 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/data/testvec_4_GLNX86.mat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/gen_mat4files.m b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/gen_mat4files.m new file mode 100644 index 0000000..a67cc20 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/gen_mat4files.m @@ -0,0 +1,50 @@ +% Generates mat files for loadmat unit tests +% Uses save_matfile.m function +% This is the version for matlab 4 + +% work out matlab version and file suffix for test files +global FILEPREFIX FILESUFFIX +sepchar = '/'; +if strcmp(computer, 'PCWIN'), sepchar = '\'; end +FILEPREFIX = [pwd sepchar 'data' sepchar]; +mlv = version; +FILESUFFIX = ['_' mlv '_' computer '.mat']; + +% basic double array +theta = 0:pi/4:2*pi; +save_matfile('testdouble', theta); + +% string +save_matfile('teststring', '"Do nine men interpret?" "Nine men," I nod.') + +% complex +save_matfile('testcomplex', cos(theta) + 1j*sin(theta)); + +% asymmetric array to check indexing +a = zeros(3, 5); +a(:,1) = [1:3]'; +a(1,:) = 1:5; + +% 2D matrix +save_matfile('testmatrix', a); + +% minus number - tests signed int +save_matfile('testminus', -1); + +% single character +save_matfile('testonechar', 'r'); + +% string array +save_matfile('teststringarray', ['one '; 'two '; 'three']); + +% sparse array +save_matfile('testsparse', sparse(a)); + +% sparse complex array +b = sparse(a); +b(1,1) = b(1,1) + j; +save_matfile('testsparsecomplex', b); + +% Two variables in same file +save([FILEPREFIX 'testmulti' FILESUFFIX], 'a', 'theta') + diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/gen_mat5files.m b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/gen_mat5files.m new file mode 100644 index 0000000..9351127 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/gen_mat5files.m @@ -0,0 +1,100 @@ +% Generates mat files for loadmat unit tests +% This is the version for matlab 5 and higher +% Uses save_matfile.m function + +% work out matlab version and file suffix for test files +global FILEPREFIX FILESUFFIX +FILEPREFIX = [fullfile(pwd, 'data') filesep]; +temp = ver('MATLAB'); +mlv = temp.Version; +FILESUFFIX = ['_' mlv '_' computer '.mat']; + +% basic double array +theta = 0:pi/4:2*pi; +save_matfile('testdouble', theta); + +% string +save_matfile('teststring', '"Do nine men interpret?" "Nine men," I nod.') + +% complex +save_matfile('testcomplex', cos(theta) + 1j*sin(theta)); + +% asymmetric array to check indexing +a = zeros(3, 5); +a(:,1) = [1:3]'; +a(1,:) = 1:5; + +% 2D matrix +save_matfile('testmatrix', a); + +% minus number - tests signed int +save_matfile('testminus', -1); + +% single character +save_matfile('testonechar', 'r'); + +% string array +save_matfile('teststringarray', ['one '; 'two '; 'three']); + +% sparse array +save_matfile('testsparse', sparse(a)); + +% sparse complex array +b = sparse(a); +b(1,1) = b(1,1) + j; +save_matfile('testsparsecomplex', b); + +% Two variables in same file +save([FILEPREFIX 'testmulti' FILESUFFIX], 'a', 'theta') + + +% struct +save_matfile('teststruct', ... + struct('stringfield','Rats live on no evil star.',... + 'doublefield',[sqrt(2) exp(1) pi],... + 'complexfield',(1+1j)*[sqrt(2) exp(1) pi])); + +% cell +save_matfile('testcell', ... + {['This cell contains this string and 3 arrays of increasing' ... + ' length'], 1., 1.:2., 1.:3.}); + +% scalar cell +save_matfile('testscalarcell', {1}) + +% Empty cells in two cell matrices +save_matfile('testemptycell', {1, 2, [], [], 3}); + +% 3D matrix +save_matfile('test3dmatrix', reshape(1:24,[2 3 4])) + +% nested cell array +save_matfile('testcellnest', {1, {2, 3, {4, 5}}}); + +% nested struct +save_matfile('teststructnest', struct('one', 1, 'two', ... + struct('three', 'number 3'))); + +% array of struct +save_matfile('teststructarr', [struct('one', 1, 'two', 2) ... + struct('one', 'number 1', 'two', 'number 2')]); + +% matlab object +save_matfile('testobject', inline('x')) + +% array of matlab objects +%save_matfile('testobjarr', [inline('x') inline('x')]) + +% unicode test +if str2num(mlv) > 7 % function added 7.0.1 + fid = fopen([FILEPREFIX 'japanese_utf8.txt']); + from_japan = fread(fid, 'uint8')'; + fclose(fid); + save_matfile('testunicode', native2unicode(from_japan, 'utf-8')); +end + +% func +if str2num(mlv) > 7 % function pointers added recently + func = @afunc; + save_matfile('testfunc', func); +end \ No newline at end of file diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/save_matfile.m b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/save_matfile.m new file mode 100644 index 0000000..a6ff677 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/save_matfile.m @@ -0,0 +1,6 @@ +function save_matfile(test_name, v) +% saves variable passed in m with filename from prefix + +global FILEPREFIX FILESUFFIX +eval([test_name ' = v;']); +save([FILEPREFIX test_name FILESUFFIX], test_name) \ No newline at end of file diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_byteordercodes.py b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_byteordercodes.py new file mode 100644 index 0000000..8c7198f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_byteordercodes.py @@ -0,0 +1,31 @@ +''' Tests for byteorder module ''' + +from __future__ import division, print_function, absolute_import + +import sys + +from numpy.testing import assert_ +from pytest import raises as assert_raises + +import scipy.io.matlab.byteordercodes as sibc + + +def test_native(): + native_is_le = sys.byteorder == 'little' + assert_(sibc.sys_is_le == native_is_le) + + +def test_to_numpy(): + if sys.byteorder == 'little': + assert_(sibc.to_numpy_code('native') == '<') + assert_(sibc.to_numpy_code('swapped') == '>') + else: + assert_(sibc.to_numpy_code('native') == '>') + assert_(sibc.to_numpy_code('swapped') == '<') + assert_(sibc.to_numpy_code('native') == sibc.to_numpy_code('=')) + assert_(sibc.to_numpy_code('big') == '>') + for code in ('little', '<', 'l', 'L', 'le'): + assert_(sibc.to_numpy_code(code) == '<') + for code in ('big', '>', 'b', 'B', 'be'): + assert_(sibc.to_numpy_code(code) == '>') + assert_raises(ValueError, sibc.to_numpy_code, 'silly string') diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_byteordercodes.pyc b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_byteordercodes.pyc new file mode 100644 index 0000000..cccd9d5 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_byteordercodes.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_mio.py b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_mio.py new file mode 100644 index 0000000..7cb1e77 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_mio.py @@ -0,0 +1,1238 @@ +# -*- coding: latin-1 -*- +''' Nose test generators + +Need function load / save / roundtrip tests + +''' +from __future__ import division, print_function, absolute_import + +import os +from collections import OrderedDict +from os.path import join as pjoin, dirname +from glob import glob +from io import BytesIO +from tempfile import mkdtemp + +from scipy._lib.six import u, text_type, string_types + +import warnings +import shutil +import gzip + +from numpy.testing import (assert_array_equal, assert_array_almost_equal, + assert_equal, assert_) +from pytest import raises as assert_raises +from scipy._lib._numpy_compat import suppress_warnings + +import numpy as np +from numpy import array +import scipy.sparse as SP + +import scipy.io.matlab.byteordercodes as boc +from scipy.io.matlab.miobase import matdims, MatWriteError, MatReadError +from scipy.io.matlab.mio import (mat_reader_factory, loadmat, savemat, whosmat) +from scipy.io.matlab.mio5 import (MatlabObject, MatFile5Writer, MatFile5Reader, + MatlabFunction, varmats_from_mat, + to_writeable, EmptyStructMarker) +from scipy.io.matlab import mio5_params as mio5p + +test_data_path = pjoin(dirname(__file__), 'data') + + +def mlarr(*args, **kwargs): + """Convenience function to return matlab-compatible 2D array.""" + arr = np.array(*args, **kwargs) + arr.shape = matdims(arr) + return arr + + +# Define cases to test +theta = np.pi/4*np.arange(9,dtype=float).reshape(1,9) +case_table4 = [ + {'name': 'double', + 'classes': {'testdouble': 'double'}, + 'expected': {'testdouble': theta} + }] +case_table4.append( + {'name': 'string', + 'classes': {'teststring': 'char'}, + 'expected': {'teststring': + array([u('"Do nine men interpret?" "Nine men," I nod.')])} + }) +case_table4.append( + {'name': 'complex', + 'classes': {'testcomplex': 'double'}, + 'expected': {'testcomplex': np.cos(theta) + 1j*np.sin(theta)} + }) +A = np.zeros((3,5)) +A[0] = list(range(1,6)) +A[:,0] = list(range(1,4)) +case_table4.append( + {'name': 'matrix', + 'classes': {'testmatrix': 'double'}, + 'expected': {'testmatrix': A}, + }) +case_table4.append( + {'name': 'sparse', + 'classes': {'testsparse': 'sparse'}, + 'expected': {'testsparse': SP.coo_matrix(A)}, + }) +B = A.astype(complex) +B[0,0] += 1j +case_table4.append( + {'name': 'sparsecomplex', + 'classes': {'testsparsecomplex': 'sparse'}, + 'expected': {'testsparsecomplex': SP.coo_matrix(B)}, + }) +case_table4.append( + {'name': 'multi', + 'classes': {'theta': 'double', 'a': 'double'}, + 'expected': {'theta': theta, 'a': A}, + }) +case_table4.append( + {'name': 'minus', + 'classes': {'testminus': 'double'}, + 'expected': {'testminus': mlarr(-1)}, + }) +case_table4.append( + {'name': 'onechar', + 'classes': {'testonechar': 'char'}, + 'expected': {'testonechar': array([u('r')])}, + }) +# Cell arrays stored as object arrays +CA = mlarr(( # tuple for object array creation + [], + mlarr([1]), + mlarr([[1,2]]), + mlarr([[1,2,3]])), dtype=object).reshape(1,-1) +CA[0,0] = array( + [u('This cell contains this string and 3 arrays of increasing length')]) +case_table5 = [ + {'name': 'cell', + 'classes': {'testcell': 'cell'}, + 'expected': {'testcell': CA}}] +CAE = mlarr(( # tuple for object array creation + mlarr(1), + mlarr(2), + mlarr([]), + mlarr([]), + mlarr(3)), dtype=object).reshape(1,-1) +objarr = np.empty((1,1),dtype=object) +objarr[0,0] = mlarr(1) +case_table5.append( + {'name': 'scalarcell', + 'classes': {'testscalarcell': 'cell'}, + 'expected': {'testscalarcell': objarr} + }) +case_table5.append( + {'name': 'emptycell', + 'classes': {'testemptycell': 'cell'}, + 'expected': {'testemptycell': CAE}}) +case_table5.append( + {'name': 'stringarray', + 'classes': {'teststringarray': 'char'}, + 'expected': {'teststringarray': array( + [u('one '), u('two '), u('three')])}, + }) +case_table5.append( + {'name': '3dmatrix', + 'classes': {'test3dmatrix': 'double'}, + 'expected': { + 'test3dmatrix': np.transpose(np.reshape(list(range(1,25)), (4,3,2)))} + }) +st_sub_arr = array([np.sqrt(2),np.exp(1),np.pi]).reshape(1,3) +dtype = [(n, object) for n in ['stringfield', 'doublefield', 'complexfield']] +st1 = np.zeros((1,1), dtype) +st1['stringfield'][0,0] = array([u('Rats live on no evil star.')]) +st1['doublefield'][0,0] = st_sub_arr +st1['complexfield'][0,0] = st_sub_arr * (1 + 1j) +case_table5.append( + {'name': 'struct', + 'classes': {'teststruct': 'struct'}, + 'expected': {'teststruct': st1} + }) +CN = np.zeros((1,2), dtype=object) +CN[0,0] = mlarr(1) +CN[0,1] = np.zeros((1,3), dtype=object) +CN[0,1][0,0] = mlarr(2, dtype=np.uint8) +CN[0,1][0,1] = mlarr([[3]], dtype=np.uint8) +CN[0,1][0,2] = np.zeros((1,2), dtype=object) +CN[0,1][0,2][0,0] = mlarr(4, dtype=np.uint8) +CN[0,1][0,2][0,1] = mlarr(5, dtype=np.uint8) +case_table5.append( + {'name': 'cellnest', + 'classes': {'testcellnest': 'cell'}, + 'expected': {'testcellnest': CN}, + }) +st2 = np.empty((1,1), dtype=[(n, object) for n in ['one', 'two']]) +st2[0,0]['one'] = mlarr(1) +st2[0,0]['two'] = np.empty((1,1), dtype=[('three', object)]) +st2[0,0]['two'][0,0]['three'] = array([u('number 3')]) +case_table5.append( + {'name': 'structnest', + 'classes': {'teststructnest': 'struct'}, + 'expected': {'teststructnest': st2} + }) +a = np.empty((1,2), dtype=[(n, object) for n in ['one', 'two']]) +a[0,0]['one'] = mlarr(1) +a[0,0]['two'] = mlarr(2) +a[0,1]['one'] = array([u('number 1')]) +a[0,1]['two'] = array([u('number 2')]) +case_table5.append( + {'name': 'structarr', + 'classes': {'teststructarr': 'struct'}, + 'expected': {'teststructarr': a} + }) +ODT = np.dtype([(n, object) for n in + ['expr', 'inputExpr', 'args', + 'isEmpty', 'numArgs', 'version']]) +MO = MatlabObject(np.zeros((1,1), dtype=ODT), 'inline') +m0 = MO[0,0] +m0['expr'] = array([u('x')]) +m0['inputExpr'] = array([u(' x = INLINE_INPUTS_{1};')]) +m0['args'] = array([u('x')]) +m0['isEmpty'] = mlarr(0) +m0['numArgs'] = mlarr(1) +m0['version'] = mlarr(1) +case_table5.append( + {'name': 'object', + 'classes': {'testobject': 'object'}, + 'expected': {'testobject': MO} + }) +fp_u_str = open(pjoin(test_data_path, 'japanese_utf8.txt'), 'rb') +u_str = fp_u_str.read().decode('utf-8') +fp_u_str.close() +case_table5.append( + {'name': 'unicode', + 'classes': {'testunicode': 'char'}, + 'expected': {'testunicode': array([u_str])} + }) +case_table5.append( + {'name': 'sparse', + 'classes': {'testsparse': 'sparse'}, + 'expected': {'testsparse': SP.coo_matrix(A)}, + }) +case_table5.append( + {'name': 'sparsecomplex', + 'classes': {'testsparsecomplex': 'sparse'}, + 'expected': {'testsparsecomplex': SP.coo_matrix(B)}, + }) +case_table5.append( + {'name': 'bool', + 'classes': {'testbools': 'logical'}, + 'expected': {'testbools': + array([[True], [False]])}, + }) + +case_table5_rt = case_table5[:] +# Inline functions can't be concatenated in matlab, so RT only +case_table5_rt.append( + {'name': 'objectarray', + 'classes': {'testobjectarray': 'object'}, + 'expected': {'testobjectarray': np.repeat(MO, 2).reshape(1,2)}}) + + +def types_compatible(var1, var2): + """Check if types are same or compatible. + + 0-D numpy scalars are compatible with bare python scalars. + """ + type1 = type(var1) + type2 = type(var2) + if type1 is type2: + return True + if type1 is np.ndarray and var1.shape == (): + return type(var1.item()) is type2 + if type2 is np.ndarray and var2.shape == (): + return type(var2.item()) is type1 + return False + + +def _check_level(label, expected, actual): + """ Check one level of a potentially nested array """ + if SP.issparse(expected): # allow different types of sparse matrices + assert_(SP.issparse(actual)) + assert_array_almost_equal(actual.todense(), + expected.todense(), + err_msg=label, + decimal=5) + return + # Check types are as expected + assert_(types_compatible(expected, actual), + "Expected type %s, got %s at %s" % + (type(expected), type(actual), label)) + # A field in a record array may not be an ndarray + # A scalar from a record array will be type np.void + if not isinstance(expected, + (np.void, np.ndarray, MatlabObject)): + assert_equal(expected, actual) + return + # This is an ndarray-like thing + assert_(expected.shape == actual.shape, + msg='Expected shape %s, got %s at %s' % (expected.shape, + actual.shape, + label)) + ex_dtype = expected.dtype + if ex_dtype.hasobject: # array of objects + if isinstance(expected, MatlabObject): + assert_equal(expected.classname, actual.classname) + for i, ev in enumerate(expected): + level_label = "%s, [%d], " % (label, i) + _check_level(level_label, ev, actual[i]) + return + if ex_dtype.fields: # probably recarray + for fn in ex_dtype.fields: + level_label = "%s, field %s, " % (label, fn) + _check_level(level_label, + expected[fn], actual[fn]) + return + if ex_dtype.type in (text_type, # string or bool + np.unicode_, + np.bool_): + assert_equal(actual, expected, err_msg=label) + return + # Something numeric + assert_array_almost_equal(actual, expected, err_msg=label, decimal=5) + + +def _load_check_case(name, files, case): + for file_name in files: + matdict = loadmat(file_name, struct_as_record=True) + label = "test %s; file %s" % (name, file_name) + for k, expected in case.items(): + k_label = "%s, variable %s" % (label, k) + assert_(k in matdict, "Missing key at %s" % k_label) + _check_level(k_label, expected, matdict[k]) + + +def _whos_check_case(name, files, case, classes): + for file_name in files: + label = "test %s; file %s" % (name, file_name) + + whos = whosmat(file_name) + + expected_whos = [] + for k, expected in case.items(): + expected_whos.append((k, expected.shape, classes[k])) + + whos.sort() + expected_whos.sort() + assert_equal(whos, expected_whos, + "%s: %r != %r" % (label, whos, expected_whos) + ) + + +# Round trip tests +def _rt_check_case(name, expected, format): + mat_stream = BytesIO() + savemat(mat_stream, expected, format=format) + mat_stream.seek(0) + _load_check_case(name, [mat_stream], expected) + + +# generator for load tests +def test_load(): + for case in case_table4 + case_table5: + name = case['name'] + expected = case['expected'] + filt = pjoin(test_data_path, 'test%s_*.mat' % name) + files = glob(filt) + assert_(len(files) > 0, + "No files for test %s using filter %s" % (name, filt)) + _load_check_case(name, files, expected) + + +# generator for whos tests +def test_whos(): + for case in case_table4 + case_table5: + name = case['name'] + expected = case['expected'] + classes = case['classes'] + filt = pjoin(test_data_path, 'test%s_*.mat' % name) + files = glob(filt) + assert_(len(files) > 0, + "No files for test %s using filter %s" % (name, filt)) + _whos_check_case(name, files, expected, classes) + + +# generator for round trip tests +def test_round_trip(): + for case in case_table4 + case_table5_rt: + case_table4_names = [case['name'] for case in case_table4] + name = case['name'] + '_round_trip' + expected = case['expected'] + for format in (['4', '5'] if case['name'] in case_table4_names else ['5']): + _rt_check_case(name, expected, format) + + +def test_gzip_simple(): + xdense = np.zeros((20,20)) + xdense[2,3] = 2.3 + xdense[4,5] = 4.5 + x = SP.csc_matrix(xdense) + + name = 'gzip_test' + expected = {'x':x} + format = '4' + + tmpdir = mkdtemp() + try: + fname = pjoin(tmpdir,name) + mat_stream = gzip.open(fname,mode='wb') + savemat(mat_stream, expected, format=format) + mat_stream.close() + + mat_stream = gzip.open(fname,mode='rb') + actual = loadmat(mat_stream, struct_as_record=True) + mat_stream.close() + finally: + shutil.rmtree(tmpdir) + + assert_array_almost_equal(actual['x'].todense(), + expected['x'].todense(), + err_msg=repr(actual)) + + +def test_multiple_open(): + # Ticket #1039, on Windows: check that files are not left open + tmpdir = mkdtemp() + try: + x = dict(x=np.zeros((2, 2))) + + fname = pjoin(tmpdir, "a.mat") + + # Check that file is not left open + savemat(fname, x) + os.unlink(fname) + savemat(fname, x) + loadmat(fname) + os.unlink(fname) + + # Check that stream is left open + f = open(fname, 'wb') + savemat(f, x) + f.seek(0) + f.close() + + f = open(fname, 'rb') + loadmat(f) + f.seek(0) + f.close() + finally: + shutil.rmtree(tmpdir) + + +def test_mat73(): + # Check any hdf5 files raise an error + filenames = glob( + pjoin(test_data_path, 'testhdf5*.mat')) + assert_(len(filenames) > 0) + for filename in filenames: + fp = open(filename, 'rb') + assert_raises(NotImplementedError, + loadmat, + fp, + struct_as_record=True) + fp.close() + + +def test_warnings(): + # This test is an echo of the previous behavior, which was to raise a + # warning if the user triggered a search for mat files on the Python system + # path. We can remove the test in the next version after upcoming (0.13) + fname = pjoin(test_data_path, 'testdouble_7.1_GLNX86.mat') + with warnings.catch_warnings(): + warnings.simplefilter('error') + # This should not generate a warning + mres = loadmat(fname, struct_as_record=True) + # This neither + mres = loadmat(fname, struct_as_record=False) + + +def test_regression_653(): + # Saving a dictionary with only invalid keys used to raise an error. Now we + # save this as an empty struct in matlab space. + sio = BytesIO() + savemat(sio, {'d':{1:2}}, format='5') + back = loadmat(sio)['d'] + # Check we got an empty struct equivalent + assert_equal(back.shape, (1,1)) + assert_equal(back.dtype, np.dtype(object)) + assert_(back[0,0] is None) + + +def test_structname_len(): + # Test limit for length of field names in structs + lim = 31 + fldname = 'a' * lim + st1 = np.zeros((1,1), dtype=[(fldname, object)]) + savemat(BytesIO(), {'longstruct': st1}, format='5') + fldname = 'a' * (lim+1) + st1 = np.zeros((1,1), dtype=[(fldname, object)]) + assert_raises(ValueError, savemat, BytesIO(), + {'longstruct': st1}, format='5') + + +def test_4_and_long_field_names_incompatible(): + # Long field names option not supported in 4 + my_struct = np.zeros((1,1),dtype=[('my_fieldname',object)]) + assert_raises(ValueError, savemat, BytesIO(), + {'my_struct':my_struct}, format='4', long_field_names=True) + + +def test_long_field_names(): + # Test limit for length of field names in structs + lim = 63 + fldname = 'a' * lim + st1 = np.zeros((1,1), dtype=[(fldname, object)]) + savemat(BytesIO(), {'longstruct': st1}, format='5',long_field_names=True) + fldname = 'a' * (lim+1) + st1 = np.zeros((1,1), dtype=[(fldname, object)]) + assert_raises(ValueError, savemat, BytesIO(), + {'longstruct': st1}, format='5',long_field_names=True) + + +def test_long_field_names_in_struct(): + # Regression test - long_field_names was erased if you passed a struct + # within a struct + lim = 63 + fldname = 'a' * lim + cell = np.ndarray((1,2),dtype=object) + st1 = np.zeros((1,1), dtype=[(fldname, object)]) + cell[0,0] = st1 + cell[0,1] = st1 + savemat(BytesIO(), {'longstruct': cell}, format='5',long_field_names=True) + # + # Check to make sure it fails with long field names off + # + assert_raises(ValueError, savemat, BytesIO(), + {'longstruct': cell}, format='5', long_field_names=False) + + +def test_cell_with_one_thing_in_it(): + # Regression test - make a cell array that's 1 x 2 and put two + # strings in it. It works. Make a cell array that's 1 x 1 and put + # a string in it. It should work but, in the old days, it didn't. + cells = np.ndarray((1,2),dtype=object) + cells[0,0] = 'Hello' + cells[0,1] = 'World' + savemat(BytesIO(), {'x': cells}, format='5') + + cells = np.ndarray((1,1),dtype=object) + cells[0,0] = 'Hello, world' + savemat(BytesIO(), {'x': cells}, format='5') + + +def test_writer_properties(): + # Tests getting, setting of properties of matrix writer + mfw = MatFile5Writer(BytesIO()) + assert_equal(mfw.global_vars, []) + mfw.global_vars = ['avar'] + assert_equal(mfw.global_vars, ['avar']) + assert_equal(mfw.unicode_strings, False) + mfw.unicode_strings = True + assert_equal(mfw.unicode_strings, True) + assert_equal(mfw.long_field_names, False) + mfw.long_field_names = True + assert_equal(mfw.long_field_names, True) + + +def test_use_small_element(): + # Test whether we're using small data element or not + sio = BytesIO() + wtr = MatFile5Writer(sio) + # First check size for no sde for name + arr = np.zeros(10) + wtr.put_variables({'aaaaa': arr}) + w_sz = len(sio.getvalue()) + # Check small name results in largish difference in size + sio.truncate(0) + sio.seek(0) + wtr.put_variables({'aaaa': arr}) + assert_(w_sz - len(sio.getvalue()) > 4) + # Whereas increasing name size makes less difference + sio.truncate(0) + sio.seek(0) + wtr.put_variables({'aaaaaa': arr}) + assert_(len(sio.getvalue()) - w_sz < 4) + + +def test_save_dict(): + # Test that dict can be saved (as recarray), loaded as matstruct + dict_types = ((dict, False), (OrderedDict, True),) + ab_exp = np.array([[(1, 2)]], dtype=[('a', object), ('b', object)]) + ba_exp = np.array([[(2, 1)]], dtype=[('b', object), ('a', object)]) + for dict_type, is_ordered in dict_types: + # Initialize with tuples to keep order for OrderedDict + d = dict_type([('a', 1), ('b', 2)]) + stream = BytesIO() + savemat(stream, {'dict': d}) + stream.seek(0) + vals = loadmat(stream)['dict'] + assert_equal(set(vals.dtype.names), set(['a', 'b'])) + if is_ordered: # Input was ordered, output in ab order + assert_array_equal(vals, ab_exp) + else: # Not ordered input, either order output + if vals.dtype.names[0] == 'a': + assert_array_equal(vals, ab_exp) + else: + assert_array_equal(vals, ba_exp) + + +def test_1d_shape(): + # New 5 behavior is 1D -> row vector + arr = np.arange(5) + for format in ('4', '5'): + # Column is the default + stream = BytesIO() + savemat(stream, {'oned': arr}, format=format) + vals = loadmat(stream) + assert_equal(vals['oned'].shape, (1, 5)) + # can be explicitly 'column' for oned_as + stream = BytesIO() + savemat(stream, {'oned':arr}, + format=format, + oned_as='column') + vals = loadmat(stream) + assert_equal(vals['oned'].shape, (5,1)) + # but different from 'row' + stream = BytesIO() + savemat(stream, {'oned':arr}, + format=format, + oned_as='row') + vals = loadmat(stream) + assert_equal(vals['oned'].shape, (1,5)) + + +def test_compression(): + arr = np.zeros(100).reshape((5,20)) + arr[2,10] = 1 + stream = BytesIO() + savemat(stream, {'arr':arr}) + raw_len = len(stream.getvalue()) + vals = loadmat(stream) + assert_array_equal(vals['arr'], arr) + stream = BytesIO() + savemat(stream, {'arr':arr}, do_compression=True) + compressed_len = len(stream.getvalue()) + vals = loadmat(stream) + assert_array_equal(vals['arr'], arr) + assert_(raw_len > compressed_len) + # Concatenate, test later + arr2 = arr.copy() + arr2[0,0] = 1 + stream = BytesIO() + savemat(stream, {'arr':arr, 'arr2':arr2}, do_compression=False) + vals = loadmat(stream) + assert_array_equal(vals['arr2'], arr2) + stream = BytesIO() + savemat(stream, {'arr':arr, 'arr2':arr2}, do_compression=True) + vals = loadmat(stream) + assert_array_equal(vals['arr2'], arr2) + + +def test_single_object(): + stream = BytesIO() + savemat(stream, {'A':np.array(1, dtype=object)}) + + +def test_skip_variable(): + # Test skipping over the first of two variables in a MAT file + # using mat_reader_factory and put_variables to read them in. + # + # This is a regression test of a problem that's caused by + # using the compressed file reader seek instead of the raw file + # I/O seek when skipping over a compressed chunk. + # + # The problem arises when the chunk is large: this file has + # a 256x256 array of random (uncompressible) doubles. + # + filename = pjoin(test_data_path,'test_skip_variable.mat') + # + # Prove that it loads with loadmat + # + d = loadmat(filename, struct_as_record=True) + assert_('first' in d) + assert_('second' in d) + # + # Make the factory + # + factory, file_opened = mat_reader_factory(filename, struct_as_record=True) + # + # This is where the factory breaks with an error in MatMatrixGetter.to_next + # + d = factory.get_variables('second') + assert_('second' in d) + factory.mat_stream.close() + + +def test_empty_struct(): + # ticket 885 + filename = pjoin(test_data_path,'test_empty_struct.mat') + # before ticket fix, this would crash with ValueError, empty data + # type + d = loadmat(filename, struct_as_record=True) + a = d['a'] + assert_equal(a.shape, (1,1)) + assert_equal(a.dtype, np.dtype(object)) + assert_(a[0,0] is None) + stream = BytesIO() + arr = np.array((), dtype='U') + # before ticket fix, this used to give data type not understood + savemat(stream, {'arr':arr}) + d = loadmat(stream) + a2 = d['arr'] + assert_array_equal(a2, arr) + + +def test_save_empty_dict(): + # saving empty dict also gives empty struct + stream = BytesIO() + savemat(stream, {'arr': {}}) + d = loadmat(stream) + a = d['arr'] + assert_equal(a.shape, (1,1)) + assert_equal(a.dtype, np.dtype(object)) + assert_(a[0,0] is None) + + +def assert_any_equal(output, alternatives): + """ Assert `output` is equal to at least one element in `alternatives` + """ + one_equal = False + for expected in alternatives: + if np.all(output == expected): + one_equal = True + break + assert_(one_equal) + + +def test_to_writeable(): + # Test to_writeable function + res = to_writeable(np.array([1])) # pass through ndarrays + assert_equal(res.shape, (1,)) + assert_array_equal(res, 1) + # Dict fields can be written in any order + expected1 = np.array([(1, 2)], dtype=[('a', '|O8'), ('b', '|O8')]) + expected2 = np.array([(2, 1)], dtype=[('b', '|O8'), ('a', '|O8')]) + alternatives = (expected1, expected2) + assert_any_equal(to_writeable({'a':1,'b':2}), alternatives) + # Fields with underscores discarded + assert_any_equal(to_writeable({'a':1,'b':2, '_c':3}), alternatives) + # Not-string fields discarded + assert_any_equal(to_writeable({'a':1,'b':2, 100:3}), alternatives) + # String fields that are valid Python identifiers discarded + assert_any_equal(to_writeable({'a':1,'b':2, '99':3}), alternatives) + # Object with field names is equivalent + + class klass(object): + pass + + c = klass + c.a = 1 + c.b = 2 + assert_any_equal(to_writeable(c), alternatives) + # empty list and tuple go to empty array + res = to_writeable([]) + assert_equal(res.shape, (0,)) + assert_equal(res.dtype.type, np.float64) + res = to_writeable(()) + assert_equal(res.shape, (0,)) + assert_equal(res.dtype.type, np.float64) + # None -> None + assert_(to_writeable(None) is None) + # String to strings + assert_equal(to_writeable('a string').dtype.type, np.str_) + # Scalars to numpy to numpy scalars + res = to_writeable(1) + assert_equal(res.shape, ()) + assert_equal(res.dtype.type, np.array(1).dtype.type) + assert_array_equal(res, 1) + # Empty dict returns EmptyStructMarker + assert_(to_writeable({}) is EmptyStructMarker) + # Object does not have (even empty) __dict__ + assert_(to_writeable(object()) is None) + # Custom object does have empty __dict__, returns EmptyStructMarker + + class C(object): + pass + + assert_(to_writeable(c()) is EmptyStructMarker) + # dict keys with legal characters are convertible + res = to_writeable({'a': 1})['a'] + assert_equal(res.shape, (1,)) + assert_equal(res.dtype.type, np.object_) + # Only fields with illegal characters, falls back to EmptyStruct + assert_(to_writeable({'1':1}) is EmptyStructMarker) + assert_(to_writeable({'_a':1}) is EmptyStructMarker) + # Unless there are valid fields, in which case structured array + assert_equal(to_writeable({'1':1, 'f': 2}), + np.array([(2,)], dtype=[('f', '|O8')])) + + +def test_recarray(): + # check roundtrip of structured array + dt = [('f1', 'f8'), + ('f2', 'S10')] + arr = np.zeros((2,), dtype=dt) + arr[0]['f1'] = 0.5 + arr[0]['f2'] = 'python' + arr[1]['f1'] = 99 + arr[1]['f2'] = 'not perl' + stream = BytesIO() + savemat(stream, {'arr': arr}) + d = loadmat(stream, struct_as_record=False) + a20 = d['arr'][0,0] + assert_equal(a20.f1, 0.5) + assert_equal(a20.f2, 'python') + d = loadmat(stream, struct_as_record=True) + a20 = d['arr'][0,0] + assert_equal(a20['f1'], 0.5) + assert_equal(a20['f2'], 'python') + # structs always come back as object types + assert_equal(a20.dtype, np.dtype([('f1', 'O'), + ('f2', 'O')])) + a21 = d['arr'].flat[1] + assert_equal(a21['f1'], 99) + assert_equal(a21['f2'], 'not perl') + + +def test_save_object(): + class C(object): + pass + c = C() + c.field1 = 1 + c.field2 = 'a string' + stream = BytesIO() + savemat(stream, {'c': c}) + d = loadmat(stream, struct_as_record=False) + c2 = d['c'][0,0] + assert_equal(c2.field1, 1) + assert_equal(c2.field2, 'a string') + d = loadmat(stream, struct_as_record=True) + c2 = d['c'][0,0] + assert_equal(c2['field1'], 1) + assert_equal(c2['field2'], 'a string') + + +def test_read_opts(): + # tests if read is seeing option sets, at initialization and after + # initialization + arr = np.arange(6).reshape(1,6) + stream = BytesIO() + savemat(stream, {'a': arr}) + rdr = MatFile5Reader(stream) + back_dict = rdr.get_variables() + rarr = back_dict['a'] + assert_array_equal(rarr, arr) + rdr = MatFile5Reader(stream, squeeze_me=True) + assert_array_equal(rdr.get_variables()['a'], arr.reshape((6,))) + rdr.squeeze_me = False + assert_array_equal(rarr, arr) + rdr = MatFile5Reader(stream, byte_order=boc.native_code) + assert_array_equal(rdr.get_variables()['a'], arr) + # inverted byte code leads to error on read because of swapped + # header etc + rdr = MatFile5Reader(stream, byte_order=boc.swapped_code) + assert_raises(Exception, rdr.get_variables) + rdr.byte_order = boc.native_code + assert_array_equal(rdr.get_variables()['a'], arr) + arr = np.array(['a string']) + stream.truncate(0) + stream.seek(0) + savemat(stream, {'a': arr}) + rdr = MatFile5Reader(stream) + assert_array_equal(rdr.get_variables()['a'], arr) + rdr = MatFile5Reader(stream, chars_as_strings=False) + carr = np.atleast_2d(np.array(list(arr.item()), dtype='U1')) + assert_array_equal(rdr.get_variables()['a'], carr) + rdr.chars_as_strings = True + assert_array_equal(rdr.get_variables()['a'], arr) + + +def test_empty_string(): + # make sure reading empty string does not raise error + estring_fname = pjoin(test_data_path, 'single_empty_string.mat') + fp = open(estring_fname, 'rb') + rdr = MatFile5Reader(fp) + d = rdr.get_variables() + fp.close() + assert_array_equal(d['a'], np.array([], dtype='U1')) + # empty string round trip. Matlab cannot distinguish + # between a string array that is empty, and a string array + # containing a single empty string, because it stores strings as + # arrays of char. There is no way of having an array of char that + # is not empty, but contains an empty string. + stream = BytesIO() + savemat(stream, {'a': np.array([''])}) + rdr = MatFile5Reader(stream) + d = rdr.get_variables() + assert_array_equal(d['a'], np.array([], dtype='U1')) + stream.truncate(0) + stream.seek(0) + savemat(stream, {'a': np.array([], dtype='U1')}) + rdr = MatFile5Reader(stream) + d = rdr.get_variables() + assert_array_equal(d['a'], np.array([], dtype='U1')) + stream.close() + + +def test_corrupted_data(): + import zlib + for exc, fname in [(ValueError, 'corrupted_zlib_data.mat'), + (zlib.error, 'corrupted_zlib_checksum.mat')]: + with open(pjoin(test_data_path, fname), 'rb') as fp: + rdr = MatFile5Reader(fp) + assert_raises(exc, rdr.get_variables) + + +def test_corrupted_data_check_can_be_disabled(): + with open(pjoin(test_data_path, 'corrupted_zlib_data.mat'), 'rb') as fp: + rdr = MatFile5Reader(fp, verify_compressed_data_integrity=False) + rdr.get_variables() + + +def test_read_both_endian(): + # make sure big- and little- endian data is read correctly + for fname in ('big_endian.mat', 'little_endian.mat'): + fp = open(pjoin(test_data_path, fname), 'rb') + rdr = MatFile5Reader(fp) + d = rdr.get_variables() + fp.close() + assert_array_equal(d['strings'], + np.array([['hello'], + ['world']], dtype=object)) + assert_array_equal(d['floats'], + np.array([[2., 3.], + [3., 4.]], dtype=np.float32)) + + +def test_write_opposite_endian(): + # We don't support writing opposite endian .mat files, but we need to behave + # correctly if the user supplies an other-endian numpy array to write out + float_arr = np.array([[2., 3.], + [3., 4.]]) + int_arr = np.arange(6).reshape((2, 3)) + uni_arr = np.array(['hello', 'world'], dtype='U') + stream = BytesIO() + savemat(stream, {'floats': float_arr.byteswap().newbyteorder(), + 'ints': int_arr.byteswap().newbyteorder(), + 'uni_arr': uni_arr.byteswap().newbyteorder()}) + rdr = MatFile5Reader(stream) + d = rdr.get_variables() + assert_array_equal(d['floats'], float_arr) + assert_array_equal(d['ints'], int_arr) + assert_array_equal(d['uni_arr'], uni_arr) + stream.close() + + +def test_logical_array(): + # The roundtrip test doesn't verify that we load the data up with the + # correct (bool) dtype + with open(pjoin(test_data_path, 'testbool_8_WIN64.mat'), 'rb') as fobj: + rdr = MatFile5Reader(fobj, mat_dtype=True) + d = rdr.get_variables() + x = np.array([[True], [False]], dtype=np.bool_) + assert_array_equal(d['testbools'], x) + assert_equal(d['testbools'].dtype, x.dtype) + + +def test_logical_out_type(): + # Confirm that bool type written as uint8, uint8 class + # See gh-4022 + stream = BytesIO() + barr = np.array([False, True, False]) + savemat(stream, {'barray': barr}) + stream.seek(0) + reader = MatFile5Reader(stream) + reader.initialize_read() + reader.read_file_header() + hdr, _ = reader.read_var_header() + assert_equal(hdr.mclass, mio5p.mxUINT8_CLASS) + assert_equal(hdr.is_logical, True) + var = reader.read_var_array(hdr, False) + assert_equal(var.dtype.type, np.uint8) + + +def test_mat4_3d(): + # test behavior when writing 3D arrays to matlab 4 files + stream = BytesIO() + arr = np.arange(24).reshape((2,3,4)) + assert_raises(ValueError, savemat, stream, {'a': arr}, True, '4') + + +def test_func_read(): + func_eg = pjoin(test_data_path, 'testfunc_7.4_GLNX86.mat') + fp = open(func_eg, 'rb') + rdr = MatFile5Reader(fp) + d = rdr.get_variables() + fp.close() + assert_(isinstance(d['testfunc'], MatlabFunction)) + stream = BytesIO() + wtr = MatFile5Writer(stream) + assert_raises(MatWriteError, wtr.put_variables, d) + + +def test_mat_dtype(): + double_eg = pjoin(test_data_path, 'testmatrix_6.1_SOL2.mat') + fp = open(double_eg, 'rb') + rdr = MatFile5Reader(fp, mat_dtype=False) + d = rdr.get_variables() + fp.close() + assert_equal(d['testmatrix'].dtype.kind, 'u') + + fp = open(double_eg, 'rb') + rdr = MatFile5Reader(fp, mat_dtype=True) + d = rdr.get_variables() + fp.close() + assert_equal(d['testmatrix'].dtype.kind, 'f') + + +def test_sparse_in_struct(): + # reproduces bug found by DC where Cython code was insisting on + # ndarray return type, but getting sparse matrix + st = {'sparsefield': SP.coo_matrix(np.eye(4))} + stream = BytesIO() + savemat(stream, {'a':st}) + d = loadmat(stream, struct_as_record=True) + assert_array_equal(d['a'][0,0]['sparsefield'].todense(), np.eye(4)) + + +def test_mat_struct_squeeze(): + stream = BytesIO() + in_d = {'st':{'one':1, 'two':2}} + savemat(stream, in_d) + # no error without squeeze + out_d = loadmat(stream, struct_as_record=False) + # previous error was with squeeze, with mat_struct + out_d = loadmat(stream, + struct_as_record=False, + squeeze_me=True, + ) + + +def test_scalar_squeeze(): + stream = BytesIO() + in_d = {'scalar': [[0.1]], 'string': 'my name', 'st':{'one':1, 'two':2}} + savemat(stream, in_d) + out_d = loadmat(stream, squeeze_me=True) + assert_(isinstance(out_d['scalar'], float)) + assert_(isinstance(out_d['string'], string_types)) + assert_(isinstance(out_d['st'], np.ndarray)) + + +def test_str_round(): + # from report by Angus McMorland on mailing list 3 May 2010 + stream = BytesIO() + in_arr = np.array(['Hello', 'Foob']) + out_arr = np.array(['Hello', 'Foob ']) + savemat(stream, dict(a=in_arr)) + res = loadmat(stream) + # resulted in ['HloolFoa', 'elWrdobr'] + assert_array_equal(res['a'], out_arr) + stream.truncate(0) + stream.seek(0) + # Make Fortran ordered version of string + in_str = in_arr.tostring(order='F') + in_from_str = np.ndarray(shape=a.shape, + dtype=in_arr.dtype, + order='F', + buffer=in_str) + savemat(stream, dict(a=in_from_str)) + assert_array_equal(res['a'], out_arr) + # unicode save did lead to buffer too small error + stream.truncate(0) + stream.seek(0) + in_arr_u = in_arr.astype('U') + out_arr_u = out_arr.astype('U') + savemat(stream, {'a': in_arr_u}) + res = loadmat(stream) + assert_array_equal(res['a'], out_arr_u) + + +def test_fieldnames(): + # Check that field names are as expected + stream = BytesIO() + savemat(stream, {'a': {'a':1, 'b':2}}) + res = loadmat(stream) + field_names = res['a'].dtype.names + assert_equal(set(field_names), set(('a', 'b'))) + + +def test_loadmat_varnames(): + # Test that we can get just one variable from a mat file using loadmat + mat5_sys_names = ['__globals__', + '__header__', + '__version__'] + for eg_file, sys_v_names in ( + (pjoin(test_data_path, 'testmulti_4.2c_SOL2.mat'), []), (pjoin( + test_data_path, 'testmulti_7.4_GLNX86.mat'), mat5_sys_names)): + vars = loadmat(eg_file) + assert_equal(set(vars.keys()), set(['a', 'theta'] + sys_v_names)) + vars = loadmat(eg_file, variable_names='a') + assert_equal(set(vars.keys()), set(['a'] + sys_v_names)) + vars = loadmat(eg_file, variable_names=['a']) + assert_equal(set(vars.keys()), set(['a'] + sys_v_names)) + vars = loadmat(eg_file, variable_names=['theta']) + assert_equal(set(vars.keys()), set(['theta'] + sys_v_names)) + vars = loadmat(eg_file, variable_names=('theta',)) + assert_equal(set(vars.keys()), set(['theta'] + sys_v_names)) + vars = loadmat(eg_file, variable_names=[]) + assert_equal(set(vars.keys()), set(sys_v_names)) + vnames = ['theta'] + vars = loadmat(eg_file, variable_names=vnames) + assert_equal(vnames, ['theta']) + + +def test_round_types(): + # Check that saving, loading preserves dtype in most cases + arr = np.arange(10) + stream = BytesIO() + for dts in ('f8','f4','i8','i4','i2','i1', + 'u8','u4','u2','u1','c16','c8'): + stream.truncate(0) + stream.seek(0) # needed for BytesIO in python 3 + savemat(stream, {'arr': arr.astype(dts)}) + vars = loadmat(stream) + assert_equal(np.dtype(dts), vars['arr'].dtype) + + +def test_varmats_from_mat(): + # Make a mat file with several variables, write it, read it back + names_vars = (('arr', mlarr(np.arange(10))), + ('mystr', mlarr('a string')), + ('mynum', mlarr(10))) + + # Dict like thing to give variables in defined order + class C(object): + def items(self): + return names_vars + stream = BytesIO() + savemat(stream, C()) + varmats = varmats_from_mat(stream) + assert_equal(len(varmats), 3) + for i in range(3): + name, var_stream = varmats[i] + exp_name, exp_res = names_vars[i] + assert_equal(name, exp_name) + res = loadmat(var_stream) + assert_array_equal(res[name], exp_res) + + +def test_one_by_zero(): + # Test 1x0 chars get read correctly + func_eg = pjoin(test_data_path, 'one_by_zero_char.mat') + fp = open(func_eg, 'rb') + rdr = MatFile5Reader(fp) + d = rdr.get_variables() + fp.close() + assert_equal(d['var'].shape, (0,)) + + +def test_load_mat4_le(): + # We were getting byte order wrong when reading little-endian floa64 dense + # matrices on big-endian platforms + mat4_fname = pjoin(test_data_path, 'test_mat4_le_floats.mat') + vars = loadmat(mat4_fname) + assert_array_equal(vars['a'], [[0.1, 1.2]]) + + +def test_unicode_mat4(): + # Mat4 should save unicode as latin1 + bio = BytesIO() + var = {'second_cat': u('Schrödinger')} + savemat(bio, var, format='4') + var_back = loadmat(bio) + assert_equal(var_back['second_cat'], var['second_cat']) + + +def test_logical_sparse(): + # Test we can read logical sparse stored in mat file as bytes. + # See https://github.com/scipy/scipy/issues/3539. + # In some files saved by MATLAB, the sparse data elements (Real Part + # Subelement in MATLAB speak) are stored with apparent type double + # (miDOUBLE) but are in fact single bytes. + filename = pjoin(test_data_path,'logical_sparse.mat') + # Before fix, this would crash with: + # ValueError: indices and data should have the same size + d = loadmat(filename, struct_as_record=True) + log_sp = d['sp_log_5_4'] + assert_(isinstance(log_sp, SP.csc_matrix)) + assert_equal(log_sp.dtype.type, np.bool_) + assert_array_equal(log_sp.toarray(), + [[True, True, True, False], + [False, False, True, False], + [False, False, True, False], + [False, False, False, False], + [False, False, False, False]]) + + +def test_empty_sparse(): + # Can we read empty sparse matrices? + sio = BytesIO() + import scipy.sparse + empty_sparse = scipy.sparse.csr_matrix([[0,0],[0,0]]) + savemat(sio, dict(x=empty_sparse)) + sio.seek(0) + res = loadmat(sio) + assert_array_equal(res['x'].shape, empty_sparse.shape) + assert_array_equal(res['x'].todense(), 0) + # Do empty sparse matrices get written with max nnz 1? + # See https://github.com/scipy/scipy/issues/4208 + sio.seek(0) + reader = MatFile5Reader(sio) + reader.initialize_read() + reader.read_file_header() + hdr, _ = reader.read_var_header() + assert_equal(hdr.nzmax, 1) + + +def test_empty_mat_error(): + # Test we get a specific warning for an empty mat file + sio = BytesIO() + assert_raises(MatReadError, loadmat, sio) + + +def test_miuint32_compromise(): + # Reader should accept miUINT32 for miINT32, but check signs + # mat file with miUINT32 for miINT32, but OK values + filename = pjoin(test_data_path, 'miuint32_for_miint32.mat') + res = loadmat(filename) + assert_equal(res['an_array'], np.arange(10)[None, :]) + # mat file with miUINT32 for miINT32, with negative value + filename = pjoin(test_data_path, 'bad_miuint32.mat') + with suppress_warnings() as sup: + sup.filter(message="unclosed file") # Py3k ResourceWarning + assert_raises(ValueError, loadmat, filename) + + +def test_miutf8_for_miint8_compromise(): + # Check reader accepts ascii as miUTF8 for array names + filename = pjoin(test_data_path, 'miutf8_array_name.mat') + res = loadmat(filename) + assert_equal(res['array_name'], [[1]]) + # mat file with non-ascii utf8 name raises error + filename = pjoin(test_data_path, 'bad_miutf8_array_name.mat') + with suppress_warnings() as sup: + sup.filter(message="unclosed file") # Py3k ResourceWarning + assert_raises(ValueError, loadmat, filename) + + +def test_bad_utf8(): + # Check that reader reads bad UTF with 'replace' option + filename = pjoin(test_data_path,'broken_utf8.mat') + res = loadmat(filename) + assert_equal(res['bad_string'], + b'\x80 am broken'.decode('utf8', 'replace')) + + +def test_save_unicode_field(tmpdir): + filename = os.path.join(str(tmpdir), 'test.mat') + test_dict = {u'a':{u'b':1,u'c':'test_str'}} + savemat(filename, test_dict) + + +def test_filenotfound(): + # Check the correct error is thrown + assert_raises(IOError, loadmat, "NotExistentFile00.mat") + assert_raises(IOError, loadmat, "NotExistentFile00") diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_mio.pyc b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_mio.pyc new file mode 100644 index 0000000..f8b4dba Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_mio.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_mio5_utils.py b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_mio5_utils.py new file mode 100644 index 0000000..267ce18 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_mio5_utils.py @@ -0,0 +1,185 @@ +""" Testing mio5_utils Cython module + +""" +from __future__ import division, print_function, absolute_import + +import sys + +from io import BytesIO +cStringIO = BytesIO + +import numpy as np + +from numpy.testing import assert_array_equal, assert_equal, assert_ +from pytest import raises as assert_raises + +from scipy._lib.six import u + +import scipy.io.matlab.byteordercodes as boc +import scipy.io.matlab.streams as streams +import scipy.io.matlab.mio5_params as mio5p +import scipy.io.matlab.mio5_utils as m5u + + +def test_byteswap(): + for val in ( + 1, + 0x100, + 0x10000): + a = np.array(val, dtype=np.uint32) + b = a.byteswap() + c = m5u.byteswap_u4(a) + assert_equal(b.item(), c) + d = m5u.byteswap_u4(c) + assert_equal(a.item(), d) + + +def _make_tag(base_dt, val, mdtype, sde=False): + ''' Makes a simple matlab tag, full or sde ''' + base_dt = np.dtype(base_dt) + bo = boc.to_numpy_code(base_dt.byteorder) + byte_count = base_dt.itemsize + if not sde: + udt = bo + 'u4' + padding = 8 - (byte_count % 8) + all_dt = [('mdtype', udt), + ('byte_count', udt), + ('val', base_dt)] + if padding: + all_dt.append(('padding', 'u1', padding)) + else: # is sde + udt = bo + 'u2' + padding = 4-byte_count + if bo == '<': # little endian + all_dt = [('mdtype', udt), + ('byte_count', udt), + ('val', base_dt)] + else: # big endian + all_dt = [('byte_count', udt), + ('mdtype', udt), + ('val', base_dt)] + if padding: + all_dt.append(('padding', 'u1', padding)) + tag = np.zeros((1,), dtype=all_dt) + tag['mdtype'] = mdtype + tag['byte_count'] = byte_count + tag['val'] = val + return tag + + +def _write_stream(stream, *strings): + stream.truncate(0) + stream.seek(0) + for s in strings: + stream.write(s) + stream.seek(0) + + +def _make_readerlike(stream, byte_order=boc.native_code): + class R(object): + pass + r = R() + r.mat_stream = stream + r.byte_order = byte_order + r.struct_as_record = True + r.uint16_codec = sys.getdefaultencoding() + r.chars_as_strings = False + r.mat_dtype = False + r.squeeze_me = False + return r + + +def test_read_tag(): + # mainly to test errors + # make reader-like thing + str_io = BytesIO() + r = _make_readerlike(str_io) + c_reader = m5u.VarReader5(r) + # This works for StringIO but _not_ cStringIO + assert_raises(IOError, c_reader.read_tag) + # bad SDE + tag = _make_tag('i4', 1, mio5p.miINT32, sde=True) + tag['byte_count'] = 5 + _write_stream(str_io, tag.tostring()) + assert_raises(ValueError, c_reader.read_tag) + + +def test_read_stream(): + tag = _make_tag('i4', 1, mio5p.miINT32, sde=True) + tag_str = tag.tostring() + str_io = cStringIO(tag_str) + st = streams.make_stream(str_io) + s = streams._read_into(st, tag.itemsize) + assert_equal(s, tag.tostring()) + + +def test_read_numeric(): + # make reader-like thing + str_io = cStringIO() + r = _make_readerlike(str_io) + # check simplest of tags + for base_dt, val, mdtype in (('u2', 30, mio5p.miUINT16), + ('i4', 1, mio5p.miINT32), + ('i2', -1, mio5p.miINT16)): + for byte_code in ('<', '>'): + r.byte_order = byte_code + c_reader = m5u.VarReader5(r) + assert_equal(c_reader.little_endian, byte_code == '<') + assert_equal(c_reader.is_swapped, byte_code != boc.native_code) + for sde_f in (False, True): + dt = np.dtype(base_dt).newbyteorder(byte_code) + a = _make_tag(dt, val, mdtype, sde_f) + a_str = a.tostring() + _write_stream(str_io, a_str) + el = c_reader.read_numeric() + assert_equal(el, val) + # two sequential reads + _write_stream(str_io, a_str, a_str) + el = c_reader.read_numeric() + assert_equal(el, val) + el = c_reader.read_numeric() + assert_equal(el, val) + + +def test_read_numeric_writeable(): + # make reader-like thing + str_io = cStringIO() + r = _make_readerlike(str_io, '<') + c_reader = m5u.VarReader5(r) + dt = np.dtype('<u2') + a = _make_tag(dt, 30, mio5p.miUINT16, 0) + a_str = a.tostring() + _write_stream(str_io, a_str) + el = c_reader.read_numeric() + assert_(el.flags.writeable is True) + + +def test_zero_byte_string(): + # Tests hack to allow chars of non-zero length, but 0 bytes + # make reader-like thing + str_io = cStringIO() + r = _make_readerlike(str_io, boc.native_code) + c_reader = m5u.VarReader5(r) + tag_dt = np.dtype([('mdtype', 'u4'), ('byte_count', 'u4')]) + tag = np.zeros((1,), dtype=tag_dt) + tag['mdtype'] = mio5p.miINT8 + tag['byte_count'] = 1 + hdr = m5u.VarHeader5() + # Try when string is 1 length + hdr.set_dims([1,]) + _write_stream(str_io, tag.tostring() + b' ') + str_io.seek(0) + val = c_reader.read_char(hdr) + assert_equal(val, u(' ')) + # Now when string has 0 bytes 1 length + tag['byte_count'] = 0 + _write_stream(str_io, tag.tostring()) + str_io.seek(0) + val = c_reader.read_char(hdr) + assert_equal(val, u(' ')) + # Now when string has 0 bytes 4 length + str_io.seek(0) + hdr.set_dims([4,]) + val = c_reader.read_char(hdr) + assert_array_equal(val, [u(' ')] * 4) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_mio5_utils.pyc b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_mio5_utils.pyc new file mode 100644 index 0000000..d86b3d3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_mio5_utils.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_mio_funcs.py b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_mio_funcs.py new file mode 100644 index 0000000..06df362 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_mio_funcs.py @@ -0,0 +1,57 @@ +''' Jottings to work out format for __function_workspace__ matrix at end +of mat file. + +''' +from __future__ import division, print_function, absolute_import + +import os.path +import sys +import io + +from numpy.compat import asstr + +from scipy.io.matlab.mio5 import (MatlabObject, MatFile5Writer, + MatFile5Reader, MatlabFunction) + +test_data_path = os.path.join(os.path.dirname(__file__), 'data') + + +def read_minimat_vars(rdr): + rdr.initialize_read() + mdict = {'__globals__': []} + i = 0 + while not rdr.end_of_stream(): + hdr, next_position = rdr.read_var_header() + name = asstr(hdr.name) + if name == '': + name = 'var_%d' % i + i += 1 + res = rdr.read_var_array(hdr, process=False) + rdr.mat_stream.seek(next_position) + mdict[name] = res + if hdr.is_global: + mdict['__globals__'].append(name) + return mdict + + +def read_workspace_vars(fname): + fp = open(fname, 'rb') + rdr = MatFile5Reader(fp, struct_as_record=True) + vars = rdr.get_variables() + fws = vars['__function_workspace__'] + ws_bs = io.BytesIO(fws.tostring()) + ws_bs.seek(2) + rdr.mat_stream = ws_bs + # Guess byte order. + mi = rdr.mat_stream.read(2) + rdr.byte_order = mi == b'IM' and '<' or '>' + rdr.mat_stream.read(4) # presumably byte padding + mdict = read_minimat_vars(rdr) + fp.close() + return mdict + + +def test_jottings(): + # example + fname = os.path.join(test_data_path, 'parabola.mat') + ws_vars = read_workspace_vars(fname) diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_mio_funcs.pyc b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_mio_funcs.pyc new file mode 100644 index 0000000..b57772c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_mio_funcs.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_mio_utils.py b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_mio_utils.py new file mode 100644 index 0000000..1b2ae86 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_mio_utils.py @@ -0,0 +1,46 @@ +""" Testing + +""" + +from __future__ import division, print_function, absolute_import + +import numpy as np + +from numpy.testing import assert_array_equal, assert_array_almost_equal, \ + assert_ + +from scipy.io.matlab.mio_utils import squeeze_element, chars_to_strings + + +def test_squeeze_element(): + a = np.zeros((1,3)) + assert_array_equal(np.squeeze(a), squeeze_element(a)) + # 0d output from squeeze gives scalar + sq_int = squeeze_element(np.zeros((1,1), dtype=float)) + assert_(isinstance(sq_int, float)) + # Unless it's a structured array + sq_sa = squeeze_element(np.zeros((1,1),dtype=[('f1', 'f')])) + assert_(isinstance(sq_sa, np.ndarray)) + + +def test_chars_strings(): + # chars as strings + strings = ['learn ', 'python', 'fast ', 'here '] + str_arr = np.array(strings, dtype='U6') # shape (4,) + chars = [list(s) for s in strings] + char_arr = np.array(chars, dtype='U1') # shape (4,6) + assert_array_equal(chars_to_strings(char_arr), str_arr) + ca2d = char_arr.reshape((2,2,6)) + sa2d = str_arr.reshape((2,2)) + assert_array_equal(chars_to_strings(ca2d), sa2d) + ca3d = char_arr.reshape((1,2,2,6)) + sa3d = str_arr.reshape((1,2,2)) + assert_array_equal(chars_to_strings(ca3d), sa3d) + # Fortran ordered arrays + char_arrf = np.array(chars, dtype='U1', order='F') # shape (4,6) + assert_array_equal(chars_to_strings(char_arrf), str_arr) + # empty array + arr = np.array([['']], dtype='U1') + out_arr = np.array([''], dtype='U1') + assert_array_equal(chars_to_strings(arr), out_arr) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_mio_utils.pyc b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_mio_utils.pyc new file mode 100644 index 0000000..ddfe5f8 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_mio_utils.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_miobase.py b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_miobase.py new file mode 100644 index 0000000..0d1d638 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_miobase.py @@ -0,0 +1,31 @@ +""" Testing miobase module +""" + +import numpy as np + +from numpy.testing import assert_equal +from pytest import raises as assert_raises + +from scipy.io.matlab.miobase import matdims + + +def test_matdims(): + # Test matdims dimension finder + assert_equal(matdims(np.array(1)), (1, 1)) # numpy scalar + assert_equal(matdims(np.array([1])), (1, 1)) # 1d array, 1 element + assert_equal(matdims(np.array([1,2])), (2, 1)) # 1d array, 2 elements + assert_equal(matdims(np.array([[2],[3]])), (2, 1)) # 2d array, column vector + assert_equal(matdims(np.array([[2,3]])), (1, 2)) # 2d array, row vector + # 3d array, rowish vector + assert_equal(matdims(np.array([[[2,3]]])), (1, 1, 2)) + assert_equal(matdims(np.array([])), (0, 0)) # empty 1d array + assert_equal(matdims(np.array([[]])), (0, 0)) # empty 2d + assert_equal(matdims(np.array([[[]]])), (0, 0, 0)) # empty 3d + # Optional argument flips 1-D shape behavior. + assert_equal(matdims(np.array([1,2]), 'row'), (1, 2)) # 1d array, 2 elements + # The argument has to make sense though + assert_raises(ValueError, matdims, np.array([1,2]), 'bizarre') + # Check empty sparse matrices get their own shape + from scipy.sparse import csr_matrix, csc_matrix + assert_equal(matdims(csr_matrix(np.zeros((3, 3)))), (3, 3)) + assert_equal(matdims(csc_matrix(np.zeros((2, 2)))), (2, 2)) diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_miobase.pyc b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_miobase.pyc new file mode 100644 index 0000000..95b626e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_miobase.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_pathological.py b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_pathological.py new file mode 100644 index 0000000..4b290ff --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_pathological.py @@ -0,0 +1,35 @@ +""" Test reading of files not conforming to matlab specification + +We try and read any file that matlab reads, these files included +""" +from __future__ import division, print_function, absolute_import + +from os.path import dirname, join as pjoin + +from numpy.testing import assert_ +from pytest import raises as assert_raises + +from scipy.io.matlab.mio import loadmat + +TEST_DATA_PATH = pjoin(dirname(__file__), 'data') + + +def test_multiple_fieldnames(): + # Example provided by Dharhas Pothina + # Extracted using mio5.varmats_from_mat + multi_fname = pjoin(TEST_DATA_PATH, 'nasty_duplicate_fieldnames.mat') + vars = loadmat(multi_fname) + funny_names = vars['Summary'].dtype.names + assert_(set(['_1_Station_Q', '_2_Station_Q', + '_3_Station_Q']).issubset(funny_names)) + + +def test_malformed1(): + # Example from gh-6072 + # Contains malformed header data, which previously resulted into a + # buffer overflow. + # + # Should raise an exception, not segfault + fname = pjoin(TEST_DATA_PATH, 'malformed1.mat') + with open(fname, 'rb') as f: + assert_raises(ValueError, loadmat, f) diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_pathological.pyc b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_pathological.pyc new file mode 100644 index 0000000..b9835fd Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_pathological.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_streams.py b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_streams.py new file mode 100644 index 0000000..9f4ac33 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_streams.py @@ -0,0 +1,184 @@ +""" Testing + +""" + +from __future__ import division, print_function, absolute_import + +import os +import sys +import zlib + +from io import BytesIO + +if sys.version_info[0] >= 3: + cStringIO = BytesIO +else: + from cStringIO import StringIO as cStringIO + +from tempfile import mkstemp +from contextlib import contextmanager + +import numpy as np + +from numpy.testing import assert_, assert_equal +from pytest import raises as assert_raises + +from scipy.io.matlab.streams import (make_stream, + GenericStream, cStringStream, FileStream, ZlibInputStream, + _read_into, _read_string) + +IS_PYPY = ('__pypy__' in sys.modules) + + +@contextmanager +def setup_test_file(): + val = b'a\x00string' + fd, fname = mkstemp() + + with os.fdopen(fd, 'wb') as fs: + fs.write(val) + with open(fname, 'rb') as fs: + gs = BytesIO(val) + cs = cStringIO(val) + yield fs, gs, cs + os.unlink(fname) + + +def test_make_stream(): + with setup_test_file() as (fs, gs, cs): + # test stream initialization + assert_(isinstance(make_stream(gs), GenericStream)) + if sys.version_info[0] < 3 and not IS_PYPY: + assert_(isinstance(make_stream(cs), cStringStream)) + assert_(isinstance(make_stream(fs), FileStream)) + + +def test_tell_seek(): + with setup_test_file() as (fs, gs, cs): + for s in (fs, gs, cs): + st = make_stream(s) + res = st.seek(0) + assert_equal(res, 0) + assert_equal(st.tell(), 0) + res = st.seek(5) + assert_equal(res, 0) + assert_equal(st.tell(), 5) + res = st.seek(2, 1) + assert_equal(res, 0) + assert_equal(st.tell(), 7) + res = st.seek(-2, 2) + assert_equal(res, 0) + assert_equal(st.tell(), 6) + + +def test_read(): + with setup_test_file() as (fs, gs, cs): + for s in (fs, gs, cs): + st = make_stream(s) + st.seek(0) + res = st.read(-1) + assert_equal(res, b'a\x00string') + st.seek(0) + res = st.read(4) + assert_equal(res, b'a\x00st') + # read into + st.seek(0) + res = _read_into(st, 4) + assert_equal(res, b'a\x00st') + res = _read_into(st, 4) + assert_equal(res, b'ring') + assert_raises(IOError, _read_into, st, 2) + # read alloc + st.seek(0) + res = _read_string(st, 4) + assert_equal(res, b'a\x00st') + res = _read_string(st, 4) + assert_equal(res, b'ring') + assert_raises(IOError, _read_string, st, 2) + + +class TestZlibInputStream(object): + def _get_data(self, size): + data = np.random.randint(0, 256, size).astype(np.uint8).tostring() + compressed_data = zlib.compress(data) + stream = BytesIO(compressed_data) + return stream, len(compressed_data), data + + def test_read(self): + block_size = 131072 + + SIZES = [0, 1, 10, block_size//2, block_size-1, + block_size, block_size+1, 2*block_size-1] + + READ_SIZES = [block_size//2, block_size-1, + block_size, block_size+1] + + def check(size, read_size): + compressed_stream, compressed_data_len, data = self._get_data(size) + stream = ZlibInputStream(compressed_stream, compressed_data_len) + data2 = b'' + so_far = 0 + while True: + block = stream.read(min(read_size, + size - so_far)) + if not block: + break + so_far += len(block) + data2 += block + assert_equal(data, data2) + + for size in SIZES: + for read_size in READ_SIZES: + check(size, read_size) + + def test_read_max_length(self): + size = 1234 + data = np.random.randint(0, 256, size).astype(np.uint8).tostring() + compressed_data = zlib.compress(data) + compressed_stream = BytesIO(compressed_data + b"abbacaca") + stream = ZlibInputStream(compressed_stream, len(compressed_data)) + + stream.read(len(data)) + assert_equal(compressed_stream.tell(), len(compressed_data)) + + assert_raises(IOError, stream.read, 1) + + def test_seek(self): + compressed_stream, compressed_data_len, data = self._get_data(1024) + + stream = ZlibInputStream(compressed_stream, compressed_data_len) + + stream.seek(123) + p = 123 + assert_equal(stream.tell(), p) + d1 = stream.read(11) + assert_equal(d1, data[p:p+11]) + + stream.seek(321, 1) + p = 123+11+321 + assert_equal(stream.tell(), p) + d2 = stream.read(21) + assert_equal(d2, data[p:p+21]) + + stream.seek(641, 0) + p = 641 + assert_equal(stream.tell(), p) + d3 = stream.read(11) + assert_equal(d3, data[p:p+11]) + + assert_raises(IOError, stream.seek, 10, 2) + assert_raises(IOError, stream.seek, -1, 1) + assert_raises(ValueError, stream.seek, 1, 123) + + stream.seek(10000, 1) + assert_raises(IOError, stream.read, 12) + + def test_all_data_read(self): + compressed_stream, compressed_data_len, data = self._get_data(1024) + stream = ZlibInputStream(compressed_stream, compressed_data_len) + assert_(not stream.all_data_read()) + stream.seek(512) + assert_(not stream.all_data_read()) + stream.seek(1024) + assert_(stream.all_data_read()) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_streams.pyc b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_streams.pyc new file mode 100644 index 0000000..8fdb9a2 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_streams.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/mmio.py b/project/venv/lib/python2.7/site-packages/scipy/io/mmio.py new file mode 100644 index 0000000..753c1a4 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/mmio.py @@ -0,0 +1,835 @@ +""" + Matrix Market I/O in Python. + See http://math.nist.gov/MatrixMarket/formats.html + for information about the Matrix Market format. +""" +# +# Author: Pearu Peterson <pearu@cens.ioc.ee> +# Created: October, 2004 +# +# References: +# http://math.nist.gov/MatrixMarket/ +# +from __future__ import division, print_function, absolute_import + +import os +import sys + +from numpy import (asarray, real, imag, conj, zeros, ndarray, concatenate, + ones, can_cast) +from numpy.compat import asbytes, asstr + +from scipy._lib.six import string_types +from scipy.sparse import coo_matrix, isspmatrix + +__all__ = ['mminfo', 'mmread', 'mmwrite', 'MMFile'] + + +# ----------------------------------------------------------------------------- +def mminfo(source): + """ + Return size and storage parameters from Matrix Market file-like 'source'. + + Parameters + ---------- + source : str or file-like + Matrix Market filename (extension .mtx) or open file-like object + + Returns + ------- + rows : int + Number of matrix rows. + cols : int + Number of matrix columns. + entries : int + Number of non-zero entries of a sparse matrix + or rows*cols for a dense matrix. + format : str + Either 'coordinate' or 'array'. + field : str + Either 'real', 'complex', 'pattern', or 'integer'. + symmetry : str + Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'. + """ + return MMFile.info(source) + +# ----------------------------------------------------------------------------- + + +def mmread(source): + """ + Reads the contents of a Matrix Market file-like 'source' into a matrix. + + Parameters + ---------- + source : str or file-like + Matrix Market filename (extensions .mtx, .mtz.gz) + or open file-like object. + + Returns + ------- + a : ndarray or coo_matrix + Dense or sparse matrix depending on the matrix format in the + Matrix Market file. + """ + return MMFile().read(source) + +# ----------------------------------------------------------------------------- + + +def mmwrite(target, a, comment='', field=None, precision=None, symmetry=None): + """ + Writes the sparse or dense array `a` to Matrix Market file-like `target`. + + Parameters + ---------- + target : str or file-like + Matrix Market filename (extension .mtx) or open file-like object. + a : array like + Sparse or dense 2D array. + comment : str, optional + Comments to be prepended to the Matrix Market file. + field : None or str, optional + Either 'real', 'complex', 'pattern', or 'integer'. + precision : None or int, optional + Number of digits to display for real or complex values. + symmetry : None or str, optional + Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'. + If symmetry is None the symmetry type of 'a' is determined by its + values. + """ + MMFile().write(target, a, comment, field, precision, symmetry) + + +############################################################################### +class MMFile (object): + __slots__ = ('_rows', + '_cols', + '_entries', + '_format', + '_field', + '_symmetry') + + @property + def rows(self): + return self._rows + + @property + def cols(self): + return self._cols + + @property + def entries(self): + return self._entries + + @property + def format(self): + return self._format + + @property + def field(self): + return self._field + + @property + def symmetry(self): + return self._symmetry + + @property + def has_symmetry(self): + return self._symmetry in (self.SYMMETRY_SYMMETRIC, + self.SYMMETRY_SKEW_SYMMETRIC, + self.SYMMETRY_HERMITIAN) + + # format values + FORMAT_COORDINATE = 'coordinate' + FORMAT_ARRAY = 'array' + FORMAT_VALUES = (FORMAT_COORDINATE, FORMAT_ARRAY) + + @classmethod + def _validate_format(self, format): + if format not in self.FORMAT_VALUES: + raise ValueError('unknown format type %s, must be one of %s' % + (format, self.FORMAT_VALUES)) + + # field values + FIELD_INTEGER = 'integer' + FIELD_UNSIGNED = 'unsigned-integer' + FIELD_REAL = 'real' + FIELD_COMPLEX = 'complex' + FIELD_PATTERN = 'pattern' + FIELD_VALUES = (FIELD_INTEGER, FIELD_UNSIGNED, FIELD_REAL, FIELD_COMPLEX, FIELD_PATTERN) + + @classmethod + def _validate_field(self, field): + if field not in self.FIELD_VALUES: + raise ValueError('unknown field type %s, must be one of %s' % + (field, self.FIELD_VALUES)) + + # symmetry values + SYMMETRY_GENERAL = 'general' + SYMMETRY_SYMMETRIC = 'symmetric' + SYMMETRY_SKEW_SYMMETRIC = 'skew-symmetric' + SYMMETRY_HERMITIAN = 'hermitian' + SYMMETRY_VALUES = (SYMMETRY_GENERAL, SYMMETRY_SYMMETRIC, + SYMMETRY_SKEW_SYMMETRIC, SYMMETRY_HERMITIAN) + + @classmethod + def _validate_symmetry(self, symmetry): + if symmetry not in self.SYMMETRY_VALUES: + raise ValueError('unknown symmetry type %s, must be one of %s' % + (symmetry, self.SYMMETRY_VALUES)) + + DTYPES_BY_FIELD = {FIELD_INTEGER: 'intp', + FIELD_UNSIGNED: 'uint64', + FIELD_REAL: 'd', + FIELD_COMPLEX: 'D', + FIELD_PATTERN: 'd'} + + # ------------------------------------------------------------------------- + @staticmethod + def reader(): + pass + + # ------------------------------------------------------------------------- + @staticmethod + def writer(): + pass + + # ------------------------------------------------------------------------- + @classmethod + def info(self, source): + """ + Return size, storage parameters from Matrix Market file-like 'source'. + + Parameters + ---------- + source : str or file-like + Matrix Market filename (extension .mtx) or open file-like object + + Returns + ------- + rows : int + Number of matrix rows. + cols : int + Number of matrix columns. + entries : int + Number of non-zero entries of a sparse matrix + or rows*cols for a dense matrix. + format : str + Either 'coordinate' or 'array'. + field : str + Either 'real', 'complex', 'pattern', or 'integer'. + symmetry : str + Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'. + """ + + stream, close_it = self._open(source) + + try: + + # read and validate header line + line = stream.readline() + mmid, matrix, format, field, symmetry = \ + [asstr(part.strip()) for part in line.split()] + if not mmid.startswith('%%MatrixMarket'): + raise ValueError('source is not in Matrix Market format') + if not matrix.lower() == 'matrix': + raise ValueError("Problem reading file header: " + line) + + # http://math.nist.gov/MatrixMarket/formats.html + if format.lower() == 'array': + format = self.FORMAT_ARRAY + elif format.lower() == 'coordinate': + format = self.FORMAT_COORDINATE + + # skip comments + while line.startswith(b'%'): + line = stream.readline() + + line = line.split() + if format == self.FORMAT_ARRAY: + if not len(line) == 2: + raise ValueError("Header line not of length 2: " + line) + rows, cols = map(int, line) + entries = rows * cols + else: + if not len(line) == 3: + raise ValueError("Header line not of length 3: " + line) + rows, cols, entries = map(int, line) + + return (rows, cols, entries, format, field.lower(), + symmetry.lower()) + + finally: + if close_it: + stream.close() + + # ------------------------------------------------------------------------- + @staticmethod + def _open(filespec, mode='rb'): + """ Return an open file stream for reading based on source. + + If source is a file name, open it (after trying to find it with mtx and + gzipped mtx extensions). Otherwise, just return source. + + Parameters + ---------- + filespec : str or file-like + String giving file name or file-like object + mode : str, optional + Mode with which to open file, if `filespec` is a file name. + + Returns + ------- + fobj : file-like + Open file-like object. + close_it : bool + True if the calling function should close this file when done, + false otherwise. + """ + close_it = False + if isinstance(filespec, string_types): + close_it = True + + # open for reading + if mode[0] == 'r': + + # determine filename plus extension + if not os.path.isfile(filespec): + if os.path.isfile(filespec+'.mtx'): + filespec = filespec + '.mtx' + elif os.path.isfile(filespec+'.mtx.gz'): + filespec = filespec + '.mtx.gz' + elif os.path.isfile(filespec+'.mtx.bz2'): + filespec = filespec + '.mtx.bz2' + # open filename + if filespec.endswith('.gz'): + import gzip + stream = gzip.open(filespec, mode) + elif filespec.endswith('.bz2'): + import bz2 + stream = bz2.BZ2File(filespec, 'rb') + else: + stream = open(filespec, mode) + + # open for writing + else: + if filespec[-4:] != '.mtx': + filespec = filespec + '.mtx' + stream = open(filespec, mode) + else: + stream = filespec + + return stream, close_it + + # ------------------------------------------------------------------------- + @staticmethod + def _get_symmetry(a): + m, n = a.shape + if m != n: + return MMFile.SYMMETRY_GENERAL + issymm = True + isskew = True + isherm = a.dtype.char in 'FD' + + # sparse input + if isspmatrix(a): + # check if number of nonzero entries of lower and upper triangle + # matrix are equal + a = a.tocoo() + (row, col) = a.nonzero() + if (row < col).sum() != (row > col).sum(): + return MMFile.SYMMETRY_GENERAL + + # define iterator over symmetric pair entries + a = a.todok() + + def symm_iterator(): + for ((i, j), aij) in a.items(): + if i > j: + aji = a[j, i] + yield (aij, aji) + + # non-sparse input + else: + # define iterator over symmetric pair entries + def symm_iterator(): + for j in range(n): + for i in range(j+1, n): + aij, aji = a[i][j], a[j][i] + yield (aij, aji) + + # check for symmetry + for (aij, aji) in symm_iterator(): + if issymm and aij != aji: + issymm = False + if isskew and aij != -aji: + isskew = False + if isherm and aij != conj(aji): + isherm = False + if not (issymm or isskew or isherm): + break + + # return symmetry value + if issymm: + return MMFile.SYMMETRY_SYMMETRIC + if isskew: + return MMFile.SYMMETRY_SKEW_SYMMETRIC + if isherm: + return MMFile.SYMMETRY_HERMITIAN + return MMFile.SYMMETRY_GENERAL + + # ------------------------------------------------------------------------- + @staticmethod + def _field_template(field, precision): + return {MMFile.FIELD_REAL: '%%.%ie\n' % precision, + MMFile.FIELD_INTEGER: '%i\n', + MMFile.FIELD_UNSIGNED: '%u\n', + MMFile.FIELD_COMPLEX: '%%.%ie %%.%ie\n' % + (precision, precision) + }.get(field, None) + + # ------------------------------------------------------------------------- + def __init__(self, **kwargs): + self._init_attrs(**kwargs) + + # ------------------------------------------------------------------------- + def read(self, source): + """ + Reads the contents of a Matrix Market file-like 'source' into a matrix. + + Parameters + ---------- + source : str or file-like + Matrix Market filename (extensions .mtx, .mtz.gz) + or open file object. + + Returns + ------- + a : ndarray or coo_matrix + Dense or sparse matrix depending on the matrix format in the + Matrix Market file. + """ + stream, close_it = self._open(source) + + try: + self._parse_header(stream) + return self._parse_body(stream) + + finally: + if close_it: + stream.close() + + # ------------------------------------------------------------------------- + def write(self, target, a, comment='', field=None, precision=None, + symmetry=None): + """ + Writes sparse or dense array `a` to Matrix Market file-like `target`. + + Parameters + ---------- + target : str or file-like + Matrix Market filename (extension .mtx) or open file-like object. + a : array like + Sparse or dense 2D array. + comment : str, optional + Comments to be prepended to the Matrix Market file. + field : None or str, optional + Either 'real', 'complex', 'pattern', or 'integer'. + precision : None or int, optional + Number of digits to display for real or complex values. + symmetry : None or str, optional + Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'. + If symmetry is None the symmetry type of 'a' is determined by its + values. + """ + + stream, close_it = self._open(target, 'wb') + + try: + self._write(stream, a, comment, field, precision, symmetry) + + finally: + if close_it: + stream.close() + else: + stream.flush() + + # ------------------------------------------------------------------------- + def _init_attrs(self, **kwargs): + """ + Initialize each attributes with the corresponding keyword arg value + or a default of None + """ + + attrs = self.__class__.__slots__ + public_attrs = [attr[1:] for attr in attrs] + invalid_keys = set(kwargs.keys()) - set(public_attrs) + + if invalid_keys: + raise ValueError('''found %s invalid keyword arguments, please only + use %s''' % (tuple(invalid_keys), + public_attrs)) + + for attr in attrs: + setattr(self, attr, kwargs.get(attr[1:], None)) + + # ------------------------------------------------------------------------- + def _parse_header(self, stream): + rows, cols, entries, format, field, symmetry = \ + self.__class__.info(stream) + self._init_attrs(rows=rows, cols=cols, entries=entries, format=format, + field=field, symmetry=symmetry) + + # ------------------------------------------------------------------------- + def _parse_body(self, stream): + rows, cols, entries, format, field, symm = (self.rows, self.cols, + self.entries, self.format, + self.field, self.symmetry) + + try: + from scipy.sparse import coo_matrix + except ImportError: + coo_matrix = None + + dtype = self.DTYPES_BY_FIELD.get(field, None) + + has_symmetry = self.has_symmetry + is_integer = field == self.FIELD_INTEGER + is_unsigned_integer = field == self.FIELD_UNSIGNED + is_complex = field == self.FIELD_COMPLEX + is_skew = symm == self.SYMMETRY_SKEW_SYMMETRIC + is_herm = symm == self.SYMMETRY_HERMITIAN + is_pattern = field == self.FIELD_PATTERN + + if format == self.FORMAT_ARRAY: + a = zeros((rows, cols), dtype=dtype) + line = 1 + i, j = 0, 0 + if is_skew: + a[i, j] = 0 + if i < rows - 1: + i += 1 + while line: + line = stream.readline() + if not line or line.startswith(b'%'): + continue + if is_integer: + aij = int(line) + elif is_unsigned_integer: + aij = int(line) + elif is_complex: + aij = complex(*map(float, line.split())) + else: + aij = float(line) + a[i, j] = aij + if has_symmetry and i != j: + if is_skew: + a[j, i] = -aij + elif is_herm: + a[j, i] = conj(aij) + else: + a[j, i] = aij + if i < rows-1: + i = i + 1 + else: + j = j + 1 + if not has_symmetry: + i = 0 + else: + i = j + if is_skew: + a[i, j] = 0 + if i < rows-1: + i += 1 + + if is_skew: + if not (i in [0, j] and j == cols - 1): + raise ValueError("Parse error, did not read all lines.") + else: + if not (i in [0, j] and j == cols): + raise ValueError("Parse error, did not read all lines.") + + elif format == self.FORMAT_COORDINATE and coo_matrix is None: + # Read sparse matrix to dense when coo_matrix is not available. + a = zeros((rows, cols), dtype=dtype) + line = 1 + k = 0 + while line: + line = stream.readline() + if not line or line.startswith(b'%'): + continue + l = line.split() + i, j = map(int, l[:2]) + i, j = i-1, j-1 + if is_integer: + aij = int(l[2]) + elif is_unsigned_integer: + aij = int(l[2]) + elif is_complex: + aij = complex(*map(float, l[2:])) + else: + aij = float(l[2]) + a[i, j] = aij + if has_symmetry and i != j: + if is_skew: + a[j, i] = -aij + elif is_herm: + a[j, i] = conj(aij) + else: + a[j, i] = aij + k = k + 1 + if not k == entries: + ValueError("Did not read all entries") + + elif format == self.FORMAT_COORDINATE: + # Read sparse COOrdinate format + + if entries == 0: + # empty matrix + return coo_matrix((rows, cols), dtype=dtype) + + I = zeros(entries, dtype='intc') + J = zeros(entries, dtype='intc') + if is_pattern: + V = ones(entries, dtype='int8') + elif is_integer: + V = zeros(entries, dtype='intp') + elif is_unsigned_integer: + V = zeros(entries, dtype='uint64') + elif is_complex: + V = zeros(entries, dtype='complex') + else: + V = zeros(entries, dtype='float') + + entry_number = 0 + for line in stream: + if not line or line.startswith(b'%'): + continue + + if entry_number+1 > entries: + raise ValueError("'entries' in header is smaller than " + "number of entries") + l = line.split() + I[entry_number], J[entry_number] = map(int, l[:2]) + + if not is_pattern: + if is_integer: + V[entry_number] = int(l[2]) + elif is_unsigned_integer: + V[entry_number] = int(l[2]) + elif is_complex: + V[entry_number] = complex(*map(float, l[2:])) + else: + V[entry_number] = float(l[2]) + entry_number += 1 + if entry_number < entries: + raise ValueError("'entries' in header is larger than " + "number of entries") + + I -= 1 # adjust indices (base 1 -> base 0) + J -= 1 + + if has_symmetry: + mask = (I != J) # off diagonal mask + od_I = I[mask] + od_J = J[mask] + od_V = V[mask] + + I = concatenate((I, od_J)) + J = concatenate((J, od_I)) + + if is_skew: + od_V *= -1 + elif is_herm: + od_V = od_V.conjugate() + + V = concatenate((V, od_V)) + + a = coo_matrix((V, (I, J)), shape=(rows, cols), dtype=dtype) + else: + raise NotImplementedError(format) + + return a + + # ------------------------------------------------------------------------ + def _write(self, stream, a, comment='', field=None, precision=None, + symmetry=None): + if isinstance(a, list) or isinstance(a, ndarray) or \ + isinstance(a, tuple) or hasattr(a, '__array__'): + rep = self.FORMAT_ARRAY + a = asarray(a) + if len(a.shape) != 2: + raise ValueError('Expected 2 dimensional array') + rows, cols = a.shape + + if field is not None: + + if field == self.FIELD_INTEGER: + if not can_cast(a.dtype, 'intp'): + raise OverflowError("mmwrite does not support integer " + "dtypes larger than native 'intp'.") + a = a.astype('intp') + elif field == self.FIELD_REAL: + if a.dtype.char not in 'fd': + a = a.astype('d') + elif field == self.FIELD_COMPLEX: + if a.dtype.char not in 'FD': + a = a.astype('D') + + else: + if not isspmatrix(a): + raise ValueError('unknown matrix type: %s' % type(a)) + + rep = 'coordinate' + rows, cols = a.shape + + typecode = a.dtype.char + + if precision is None: + if typecode in 'fF': + precision = 8 + else: + precision = 16 + if field is None: + kind = a.dtype.kind + if kind == 'i': + if not can_cast(a.dtype, 'intp'): + raise OverflowError("mmwrite does not support integer " + "dtypes larger than native 'intp'.") + field = 'integer' + elif kind == 'f': + field = 'real' + elif kind == 'c': + field = 'complex' + elif kind == 'u': + field = 'unsigned-integer' + else: + raise TypeError('unexpected dtype kind ' + kind) + + if symmetry is None: + symmetry = self._get_symmetry(a) + + # validate rep, field, and symmetry + self.__class__._validate_format(rep) + self.__class__._validate_field(field) + self.__class__._validate_symmetry(symmetry) + + # write initial header line + stream.write(asbytes('%%MatrixMarket matrix {0} {1} {2}\n'.format(rep, + field, symmetry))) + + # write comments + for line in comment.split('\n'): + stream.write(asbytes('%%%s\n' % (line))) + + template = self._field_template(field, precision) + # write dense format + if rep == self.FORMAT_ARRAY: + # write shape spec + stream.write(asbytes('%i %i\n' % (rows, cols))) + + if field in (self.FIELD_INTEGER, self.FIELD_REAL, self.FIELD_UNSIGNED): + if symmetry == self.SYMMETRY_GENERAL: + for j in range(cols): + for i in range(rows): + stream.write(asbytes(template % a[i, j])) + + elif symmetry == self.SYMMETRY_SKEW_SYMMETRIC: + for j in range(cols): + for i in range(j + 1, rows): + stream.write(asbytes(template % a[i, j])) + + else: + for j in range(cols): + for i in range(j, rows): + stream.write(asbytes(template % a[i, j])) + + elif field == self.FIELD_COMPLEX: + + if symmetry == self.SYMMETRY_GENERAL: + for j in range(cols): + for i in range(rows): + aij = a[i, j] + stream.write(asbytes(template % (real(aij), + imag(aij)))) + else: + for j in range(cols): + for i in range(j, rows): + aij = a[i, j] + stream.write(asbytes(template % (real(aij), + imag(aij)))) + + elif field == self.FIELD_PATTERN: + raise ValueError('pattern type inconsisted with dense format') + + else: + raise TypeError('Unknown field type %s' % field) + + # write sparse format + else: + coo = a.tocoo() # convert to COOrdinate format + + # if symmetry format used, remove values above main diagonal + if symmetry != self.SYMMETRY_GENERAL: + lower_triangle_mask = coo.row >= coo.col + coo = coo_matrix((coo.data[lower_triangle_mask], + (coo.row[lower_triangle_mask], + coo.col[lower_triangle_mask])), + shape=coo.shape) + + # write shape spec + stream.write(asbytes('%i %i %i\n' % (rows, cols, coo.nnz))) + + template = self._field_template(field, precision-1) + + if field == self.FIELD_PATTERN: + for r, c in zip(coo.row+1, coo.col+1): + stream.write(asbytes("%i %i\n" % (r, c))) + elif field in (self.FIELD_INTEGER, self.FIELD_REAL, self.FIELD_UNSIGNED): + for r, c, d in zip(coo.row+1, coo.col+1, coo.data): + stream.write(asbytes(("%i %i " % (r, c)) + + (template % d))) + elif field == self.FIELD_COMPLEX: + for r, c, d in zip(coo.row+1, coo.col+1, coo.data): + stream.write(asbytes(("%i %i " % (r, c)) + + (template % (d.real, d.imag)))) + else: + raise TypeError('Unknown field type %s' % field) + + +def _is_fromfile_compatible(stream): + """ + Check whether `stream` is compatible with numpy.fromfile. + + Passing a gzipped file object to ``fromfile/fromstring`` doesn't work with + Python3. + """ + if sys.version_info[0] < 3: + return True + + bad_cls = [] + try: + import gzip + bad_cls.append(gzip.GzipFile) + except ImportError: + pass + try: + import bz2 + bad_cls.append(bz2.BZ2File) + except ImportError: + pass + + bad_cls = tuple(bad_cls) + return not isinstance(stream, bad_cls) + + +# ----------------------------------------------------------------------------- +if __name__ == '__main__': + import time + for filename in sys.argv[1:]: + print('Reading', filename, '...', end=' ') + sys.stdout.flush() + t = time.time() + mmread(filename) + print('took %s seconds' % (time.time() - t)) diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/mmio.pyc b/project/venv/lib/python2.7/site-packages/scipy/io/mmio.pyc new file mode 100644 index 0000000..e8925ca Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/mmio.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/netcdf.py b/project/venv/lib/python2.7/site-packages/scipy/io/netcdf.py new file mode 100644 index 0000000..40eeddc --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/netcdf.py @@ -0,0 +1,1098 @@ +""" +NetCDF reader/writer module. + +This module is used to read and create NetCDF files. NetCDF files are +accessed through the `netcdf_file` object. Data written to and from NetCDF +files are contained in `netcdf_variable` objects. Attributes are given +as member variables of the `netcdf_file` and `netcdf_variable` objects. + +This module implements the Scientific.IO.NetCDF API to read and create +NetCDF files. The same API is also used in the PyNIO and pynetcdf +modules, allowing these modules to be used interchangeably when working +with NetCDF files. + +Only NetCDF3 is supported here; for NetCDF4 see +`netCDF4-python <http://unidata.github.io/netcdf4-python/>`__, +which has a similar API. + +""" + +from __future__ import division, print_function, absolute_import + +# TODO: +# * properly implement ``_FillValue``. +# * fix character variables. +# * implement PAGESIZE for Python 2.6? + +# The Scientific.IO.NetCDF API allows attributes to be added directly to +# instances of ``netcdf_file`` and ``netcdf_variable``. To differentiate +# between user-set attributes and instance attributes, user-set attributes +# are automatically stored in the ``_attributes`` attribute by overloading +#``__setattr__``. This is the reason why the code sometimes uses +#``obj.__dict__['key'] = value``, instead of simply ``obj.key = value``; +# otherwise the key would be inserted into userspace attributes. + + +__all__ = ['netcdf_file', 'netcdf_variable'] + + +import sys +import warnings +import weakref +from operator import mul +from collections import OrderedDict + +import mmap as mm + +import numpy as np +from numpy.compat import asbytes, asstr +from numpy import frombuffer, dtype, empty, array, asarray +from numpy import little_endian as LITTLE_ENDIAN +from functools import reduce + +from scipy._lib.six import integer_types, text_type, binary_type + +IS_PYPY = ('__pypy__' in sys.modules) + +ABSENT = b'\x00\x00\x00\x00\x00\x00\x00\x00' +ZERO = b'\x00\x00\x00\x00' +NC_BYTE = b'\x00\x00\x00\x01' +NC_CHAR = b'\x00\x00\x00\x02' +NC_SHORT = b'\x00\x00\x00\x03' +NC_INT = b'\x00\x00\x00\x04' +NC_FLOAT = b'\x00\x00\x00\x05' +NC_DOUBLE = b'\x00\x00\x00\x06' +NC_DIMENSION = b'\x00\x00\x00\n' +NC_VARIABLE = b'\x00\x00\x00\x0b' +NC_ATTRIBUTE = b'\x00\x00\x00\x0c' +FILL_BYTE = b'\x81' +FILL_CHAR = b'\x00' +FILL_SHORT = b'\x80\x01' +FILL_INT = b'\x80\x00\x00\x01' +FILL_FLOAT = b'\x7C\xF0\x00\x00' +FILL_DOUBLE = b'\x47\x9E\x00\x00\x00\x00\x00\x00' + +TYPEMAP = {NC_BYTE: ('b', 1), + NC_CHAR: ('c', 1), + NC_SHORT: ('h', 2), + NC_INT: ('i', 4), + NC_FLOAT: ('f', 4), + NC_DOUBLE: ('d', 8)} + +FILLMAP = {NC_BYTE: FILL_BYTE, + NC_CHAR: FILL_CHAR, + NC_SHORT: FILL_SHORT, + NC_INT: FILL_INT, + NC_FLOAT: FILL_FLOAT, + NC_DOUBLE: FILL_DOUBLE} + +REVERSE = {('b', 1): NC_BYTE, + ('B', 1): NC_CHAR, + ('c', 1): NC_CHAR, + ('h', 2): NC_SHORT, + ('i', 4): NC_INT, + ('f', 4): NC_FLOAT, + ('d', 8): NC_DOUBLE, + + # these come from asarray(1).dtype.char and asarray('foo').dtype.char, + # used when getting the types from generic attributes. + ('l', 4): NC_INT, + ('S', 1): NC_CHAR} + + +class netcdf_file(object): + """ + A file object for NetCDF data. + + A `netcdf_file` object has two standard attributes: `dimensions` and + `variables`. The values of both are dictionaries, mapping dimension + names to their associated lengths and variable names to variables, + respectively. Application programs should never modify these + dictionaries. + + All other attributes correspond to global attributes defined in the + NetCDF file. Global file attributes are created by assigning to an + attribute of the `netcdf_file` object. + + Parameters + ---------- + filename : string or file-like + string -> filename + mode : {'r', 'w', 'a'}, optional + read-write-append mode, default is 'r' + mmap : None or bool, optional + Whether to mmap `filename` when reading. Default is True + when `filename` is a file name, False when `filename` is a + file-like object. Note that when mmap is in use, data arrays + returned refer directly to the mmapped data on disk, and the + file cannot be closed as long as references to it exist. + version : {1, 2}, optional + version of netcdf to read / write, where 1 means *Classic + format* and 2 means *64-bit offset format*. Default is 1. See + `here <https://www.unidata.ucar.edu/software/netcdf/docs/netcdf_introduction.html#select_format>`__ + for more info. + maskandscale : bool, optional + Whether to automatically scale and/or mask data based on attributes. + Default is False. + + Notes + ----- + The major advantage of this module over other modules is that it doesn't + require the code to be linked to the NetCDF libraries. This module is + derived from `pupynere <https://bitbucket.org/robertodealmeida/pupynere/>`_. + + NetCDF files are a self-describing binary data format. The file contains + metadata that describes the dimensions and variables in the file. More + details about NetCDF files can be found `here + <https://www.unidata.ucar.edu/software/netcdf/docs/user_guide.html>`__. There + are three main sections to a NetCDF data structure: + + 1. Dimensions + 2. Variables + 3. Attributes + + The dimensions section records the name and length of each dimension used + by the variables. The variables would then indicate which dimensions it + uses and any attributes such as data units, along with containing the data + values for the variable. It is good practice to include a + variable that is the same name as a dimension to provide the values for + that axes. Lastly, the attributes section would contain additional + information such as the name of the file creator or the instrument used to + collect the data. + + When writing data to a NetCDF file, there is often the need to indicate the + 'record dimension'. A record dimension is the unbounded dimension for a + variable. For example, a temperature variable may have dimensions of + latitude, longitude and time. If one wants to add more temperature data to + the NetCDF file as time progresses, then the temperature variable should + have the time dimension flagged as the record dimension. + + In addition, the NetCDF file header contains the position of the data in + the file, so access can be done in an efficient manner without loading + unnecessary data into memory. It uses the ``mmap`` module to create + Numpy arrays mapped to the data on disk, for the same purpose. + + Note that when `netcdf_file` is used to open a file with mmap=True + (default for read-only), arrays returned by it refer to data + directly on the disk. The file should not be closed, and cannot be cleanly + closed when asked, if such arrays are alive. You may want to copy data arrays + obtained from mmapped Netcdf file if they are to be processed after the file + is closed, see the example below. + + Examples + -------- + To create a NetCDF file: + + >>> from scipy.io import netcdf + >>> f = netcdf.netcdf_file('simple.nc', 'w') + >>> f.history = 'Created for a test' + >>> f.createDimension('time', 10) + >>> time = f.createVariable('time', 'i', ('time',)) + >>> time[:] = np.arange(10) + >>> time.units = 'days since 2008-01-01' + >>> f.close() + + Note the assignment of ``arange(10)`` to ``time[:]``. Exposing the slice + of the time variable allows for the data to be set in the object, rather + than letting ``arange(10)`` overwrite the ``time`` variable. + + To read the NetCDF file we just created: + + >>> from scipy.io import netcdf + >>> f = netcdf.netcdf_file('simple.nc', 'r') + >>> print(f.history) + b'Created for a test' + >>> time = f.variables['time'] + >>> print(time.units) + b'days since 2008-01-01' + >>> print(time.shape) + (10,) + >>> print(time[-1]) + 9 + + NetCDF files, when opened read-only, return arrays that refer + directly to memory-mapped data on disk: + + >>> data = time[:] + >>> data.base.base + <mmap.mmap object at 0x7fe753763180> + + If the data is to be processed after the file is closed, it needs + to be copied to main memory: + + >>> data = time[:].copy() + >>> f.close() + >>> data.mean() + 4.5 + + A NetCDF file can also be used as context manager: + + >>> from scipy.io import netcdf + >>> with netcdf.netcdf_file('simple.nc', 'r') as f: + ... print(f.history) + b'Created for a test' + + """ + def __init__(self, filename, mode='r', mmap=None, version=1, + maskandscale=False): + """Initialize netcdf_file from fileobj (str or file-like).""" + if mode not in 'rwa': + raise ValueError("Mode must be either 'r', 'w' or 'a'.") + + if hasattr(filename, 'seek'): # file-like + self.fp = filename + self.filename = 'None' + if mmap is None: + mmap = False + elif mmap and not hasattr(filename, 'fileno'): + raise ValueError('Cannot use file object for mmap') + else: # maybe it's a string + self.filename = filename + omode = 'r+' if mode == 'a' else mode + self.fp = open(self.filename, '%sb' % omode) + if mmap is None: + # Mmapped files on PyPy cannot be usually closed + # before the GC runs, so it's better to use mmap=False + # as the default. + mmap = (not IS_PYPY) + + if mode != 'r': + # Cannot read write-only files + mmap = False + + self.use_mmap = mmap + self.mode = mode + self.version_byte = version + self.maskandscale = maskandscale + + self.dimensions = OrderedDict() + self.variables = OrderedDict() + + self._dims = [] + self._recs = 0 + self._recsize = 0 + + self._mm = None + self._mm_buf = None + if self.use_mmap: + self._mm = mm.mmap(self.fp.fileno(), 0, access=mm.ACCESS_READ) + self._mm_buf = np.frombuffer(self._mm, dtype=np.int8) + + self._attributes = OrderedDict() + + if mode in 'ra': + self._read() + + def __setattr__(self, attr, value): + # Store user defined attributes in a separate dict, + # so we can save them to file later. + try: + self._attributes[attr] = value + except AttributeError: + pass + self.__dict__[attr] = value + + def close(self): + """Closes the NetCDF file.""" + if hasattr(self, 'fp') and not self.fp.closed: + try: + self.flush() + finally: + self.variables = OrderedDict() + if self._mm_buf is not None: + ref = weakref.ref(self._mm_buf) + self._mm_buf = None + if ref() is None: + # self._mm_buf is gc'd, and we can close the mmap + self._mm.close() + else: + # we cannot close self._mm, since self._mm_buf is + # alive and there may still be arrays referring to it + warnings.warn(( + "Cannot close a netcdf_file opened with mmap=True, when " + "netcdf_variables or arrays referring to its data still exist. " + "All data arrays obtained from such files refer directly to " + "data on disk, and must be copied before the file can be cleanly " + "closed. (See netcdf_file docstring for more information on mmap.)" + ), category=RuntimeWarning) + self._mm = None + self.fp.close() + __del__ = close + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + self.close() + + def createDimension(self, name, length): + """ + Adds a dimension to the Dimension section of the NetCDF data structure. + + Note that this function merely adds a new dimension that the variables can + reference. The values for the dimension, if desired, should be added as + a variable using `createVariable`, referring to this dimension. + + Parameters + ---------- + name : str + Name of the dimension (Eg, 'lat' or 'time'). + length : int + Length of the dimension. + + See Also + -------- + createVariable + + """ + if length is None and self._dims: + raise ValueError("Only first dimension may be unlimited!") + + self.dimensions[name] = length + self._dims.append(name) + + def createVariable(self, name, type, dimensions): + """ + Create an empty variable for the `netcdf_file` object, specifying its data + type and the dimensions it uses. + + Parameters + ---------- + name : str + Name of the new variable. + type : dtype or str + Data type of the variable. + dimensions : sequence of str + List of the dimension names used by the variable, in the desired order. + + Returns + ------- + variable : netcdf_variable + The newly created ``netcdf_variable`` object. + This object has also been added to the `netcdf_file` object as well. + + See Also + -------- + createDimension + + Notes + ----- + Any dimensions to be used by the variable should already exist in the + NetCDF data structure or should be created by `createDimension` prior to + creating the NetCDF variable. + + """ + shape = tuple([self.dimensions[dim] for dim in dimensions]) + shape_ = tuple([dim or 0 for dim in shape]) # replace None with 0 for numpy + + type = dtype(type) + typecode, size = type.char, type.itemsize + if (typecode, size) not in REVERSE: + raise ValueError("NetCDF 3 does not support type %s" % type) + + data = empty(shape_, dtype=type.newbyteorder("B")) # convert to big endian always for NetCDF 3 + self.variables[name] = netcdf_variable( + data, typecode, size, shape, dimensions, + maskandscale=self.maskandscale) + return self.variables[name] + + def flush(self): + """ + Perform a sync-to-disk flush if the `netcdf_file` object is in write mode. + + See Also + -------- + sync : Identical function + + """ + if hasattr(self, 'mode') and self.mode in 'wa': + self._write() + sync = flush + + def _write(self): + self.fp.seek(0) + self.fp.write(b'CDF') + self.fp.write(array(self.version_byte, '>b').tostring()) + + # Write headers and data. + self._write_numrecs() + self._write_dim_array() + self._write_gatt_array() + self._write_var_array() + + def _write_numrecs(self): + # Get highest record count from all record variables. + for var in self.variables.values(): + if var.isrec and len(var.data) > self._recs: + self.__dict__['_recs'] = len(var.data) + self._pack_int(self._recs) + + def _write_dim_array(self): + if self.dimensions: + self.fp.write(NC_DIMENSION) + self._pack_int(len(self.dimensions)) + for name in self._dims: + self._pack_string(name) + length = self.dimensions[name] + self._pack_int(length or 0) # replace None with 0 for record dimension + else: + self.fp.write(ABSENT) + + def _write_gatt_array(self): + self._write_att_array(self._attributes) + + def _write_att_array(self, attributes): + if attributes: + self.fp.write(NC_ATTRIBUTE) + self._pack_int(len(attributes)) + for name, values in attributes.items(): + self._pack_string(name) + self._write_att_values(values) + else: + self.fp.write(ABSENT) + + def _write_var_array(self): + if self.variables: + self.fp.write(NC_VARIABLE) + self._pack_int(len(self.variables)) + + # Sort variable names non-recs first, then recs. + def sortkey(n): + v = self.variables[n] + if v.isrec: + return (-1,) + return v._shape + variables = sorted(self.variables, key=sortkey, reverse=True) + + # Set the metadata for all variables. + for name in variables: + self._write_var_metadata(name) + # Now that we have the metadata, we know the vsize of + # each record variable, so we can calculate recsize. + self.__dict__['_recsize'] = sum([ + var._vsize for var in self.variables.values() + if var.isrec]) + # Set the data for all variables. + for name in variables: + self._write_var_data(name) + else: + self.fp.write(ABSENT) + + def _write_var_metadata(self, name): + var = self.variables[name] + + self._pack_string(name) + self._pack_int(len(var.dimensions)) + for dimname in var.dimensions: + dimid = self._dims.index(dimname) + self._pack_int(dimid) + + self._write_att_array(var._attributes) + + nc_type = REVERSE[var.typecode(), var.itemsize()] + self.fp.write(asbytes(nc_type)) + + if not var.isrec: + vsize = var.data.size * var.data.itemsize + vsize += -vsize % 4 + else: # record variable + try: + vsize = var.data[0].size * var.data.itemsize + except IndexError: + vsize = 0 + rec_vars = len([v for v in self.variables.values() + if v.isrec]) + if rec_vars > 1: + vsize += -vsize % 4 + self.variables[name].__dict__['_vsize'] = vsize + self._pack_int(vsize) + + # Pack a bogus begin, and set the real value later. + self.variables[name].__dict__['_begin'] = self.fp.tell() + self._pack_begin(0) + + def _write_var_data(self, name): + var = self.variables[name] + + # Set begin in file header. + the_beguine = self.fp.tell() + self.fp.seek(var._begin) + self._pack_begin(the_beguine) + self.fp.seek(the_beguine) + + # Write data. + if not var.isrec: + self.fp.write(var.data.tostring()) + count = var.data.size * var.data.itemsize + self._write_var_padding(var, var._vsize - count) + else: # record variable + # Handle rec vars with shape[0] < nrecs. + if self._recs > len(var.data): + shape = (self._recs,) + var.data.shape[1:] + # Resize in-place does not always work since + # the array might not be single-segment + try: + var.data.resize(shape) + except ValueError: + var.__dict__['data'] = np.resize(var.data, shape).astype(var.data.dtype) + + pos0 = pos = self.fp.tell() + for rec in var.data: + # Apparently scalars cannot be converted to big endian. If we + # try to convert a ``=i4`` scalar to, say, '>i4' the dtype + # will remain as ``=i4``. + if not rec.shape and (rec.dtype.byteorder == '<' or + (rec.dtype.byteorder == '=' and LITTLE_ENDIAN)): + rec = rec.byteswap() + self.fp.write(rec.tostring()) + # Padding + count = rec.size * rec.itemsize + self._write_var_padding(var, var._vsize - count) + pos += self._recsize + self.fp.seek(pos) + self.fp.seek(pos0 + var._vsize) + + def _write_var_padding(self, var, size): + encoded_fill_value = var._get_encoded_fill_value() + num_fills = size // len(encoded_fill_value) + self.fp.write(encoded_fill_value * num_fills) + + def _write_att_values(self, values): + if hasattr(values, 'dtype'): + nc_type = REVERSE[values.dtype.char, values.dtype.itemsize] + else: + types = [(t, NC_INT) for t in integer_types] + types += [ + (float, NC_FLOAT), + (str, NC_CHAR) + ] + # bytes index into scalars in py3k. Check for "string" types + if isinstance(values, text_type) or isinstance(values, binary_type): + sample = values + else: + try: + sample = values[0] # subscriptable? + except TypeError: + sample = values # scalar + + for class_, nc_type in types: + if isinstance(sample, class_): + break + + typecode, size = TYPEMAP[nc_type] + dtype_ = '>%s' % typecode + # asarray() dies with bytes and '>c' in py3k. Change to 'S' + dtype_ = 'S' if dtype_ == '>c' else dtype_ + + values = asarray(values, dtype=dtype_) + + self.fp.write(asbytes(nc_type)) + + if values.dtype.char == 'S': + nelems = values.itemsize + else: + nelems = values.size + self._pack_int(nelems) + + if not values.shape and (values.dtype.byteorder == '<' or + (values.dtype.byteorder == '=' and LITTLE_ENDIAN)): + values = values.byteswap() + self.fp.write(values.tostring()) + count = values.size * values.itemsize + self.fp.write(b'\x00' * (-count % 4)) # pad + + def _read(self): + # Check magic bytes and version + magic = self.fp.read(3) + if not magic == b'CDF': + raise TypeError("Error: %s is not a valid NetCDF 3 file" % + self.filename) + self.__dict__['version_byte'] = frombuffer(self.fp.read(1), '>b')[0] + + # Read file headers and set data. + self._read_numrecs() + self._read_dim_array() + self._read_gatt_array() + self._read_var_array() + + def _read_numrecs(self): + self.__dict__['_recs'] = self._unpack_int() + + def _read_dim_array(self): + header = self.fp.read(4) + if header not in [ZERO, NC_DIMENSION]: + raise ValueError("Unexpected header.") + count = self._unpack_int() + + for dim in range(count): + name = asstr(self._unpack_string()) + length = self._unpack_int() or None # None for record dimension + self.dimensions[name] = length + self._dims.append(name) # preserve order + + def _read_gatt_array(self): + for k, v in self._read_att_array().items(): + self.__setattr__(k, v) + + def _read_att_array(self): + header = self.fp.read(4) + if header not in [ZERO, NC_ATTRIBUTE]: + raise ValueError("Unexpected header.") + count = self._unpack_int() + + attributes = OrderedDict() + for attr in range(count): + name = asstr(self._unpack_string()) + attributes[name] = self._read_att_values() + return attributes + + def _read_var_array(self): + header = self.fp.read(4) + if header not in [ZERO, NC_VARIABLE]: + raise ValueError("Unexpected header.") + + begin = 0 + dtypes = {'names': [], 'formats': []} + rec_vars = [] + count = self._unpack_int() + for var in range(count): + (name, dimensions, shape, attributes, + typecode, size, dtype_, begin_, vsize) = self._read_var() + # https://www.unidata.ucar.edu/software/netcdf/docs/user_guide.html + # Note that vsize is the product of the dimension lengths + # (omitting the record dimension) and the number of bytes + # per value (determined from the type), increased to the + # next multiple of 4, for each variable. If a record + # variable, this is the amount of space per record. The + # netCDF "record size" is calculated as the sum of the + # vsize's of all the record variables. + # + # The vsize field is actually redundant, because its value + # may be computed from other information in the header. The + # 32-bit vsize field is not large enough to contain the size + # of variables that require more than 2^32 - 4 bytes, so + # 2^32 - 1 is used in the vsize field for such variables. + if shape and shape[0] is None: # record variable + rec_vars.append(name) + # The netCDF "record size" is calculated as the sum of + # the vsize's of all the record variables. + self.__dict__['_recsize'] += vsize + if begin == 0: + begin = begin_ + dtypes['names'].append(name) + dtypes['formats'].append(str(shape[1:]) + dtype_) + + # Handle padding with a virtual variable. + if typecode in 'bch': + actual_size = reduce(mul, (1,) + shape[1:]) * size + padding = -actual_size % 4 + if padding: + dtypes['names'].append('_padding_%d' % var) + dtypes['formats'].append('(%d,)>b' % padding) + + # Data will be set later. + data = None + else: # not a record variable + # Calculate size to avoid problems with vsize (above) + a_size = reduce(mul, shape, 1) * size + if self.use_mmap: + data = self._mm_buf[begin_:begin_+a_size].view(dtype=dtype_) + data.shape = shape + else: + pos = self.fp.tell() + self.fp.seek(begin_) + data = frombuffer(self.fp.read(a_size), dtype=dtype_ + ).copy() + data.shape = shape + self.fp.seek(pos) + + # Add variable. + self.variables[name] = netcdf_variable( + data, typecode, size, shape, dimensions, attributes, + maskandscale=self.maskandscale) + + if rec_vars: + # Remove padding when only one record variable. + if len(rec_vars) == 1: + dtypes['names'] = dtypes['names'][:1] + dtypes['formats'] = dtypes['formats'][:1] + + # Build rec array. + if self.use_mmap: + rec_array = self._mm_buf[begin:begin+self._recs*self._recsize].view(dtype=dtypes) + rec_array.shape = (self._recs,) + else: + pos = self.fp.tell() + self.fp.seek(begin) + rec_array = frombuffer(self.fp.read(self._recs*self._recsize), + dtype=dtypes).copy() + rec_array.shape = (self._recs,) + self.fp.seek(pos) + + for var in rec_vars: + self.variables[var].__dict__['data'] = rec_array[var] + + def _read_var(self): + name = asstr(self._unpack_string()) + dimensions = [] + shape = [] + dims = self._unpack_int() + + for i in range(dims): + dimid = self._unpack_int() + dimname = self._dims[dimid] + dimensions.append(dimname) + dim = self.dimensions[dimname] + shape.append(dim) + dimensions = tuple(dimensions) + shape = tuple(shape) + + attributes = self._read_att_array() + nc_type = self.fp.read(4) + vsize = self._unpack_int() + begin = [self._unpack_int, self._unpack_int64][self.version_byte-1]() + + typecode, size = TYPEMAP[nc_type] + dtype_ = '>%s' % typecode + + return name, dimensions, shape, attributes, typecode, size, dtype_, begin, vsize + + def _read_att_values(self): + nc_type = self.fp.read(4) + n = self._unpack_int() + + typecode, size = TYPEMAP[nc_type] + + count = n*size + values = self.fp.read(int(count)) + self.fp.read(-count % 4) # read padding + + if typecode is not 'c': + values = frombuffer(values, dtype='>%s' % typecode).copy() + if values.shape == (1,): + values = values[0] + else: + values = values.rstrip(b'\x00') + return values + + def _pack_begin(self, begin): + if self.version_byte == 1: + self._pack_int(begin) + elif self.version_byte == 2: + self._pack_int64(begin) + + def _pack_int(self, value): + self.fp.write(array(value, '>i').tostring()) + _pack_int32 = _pack_int + + def _unpack_int(self): + return int(frombuffer(self.fp.read(4), '>i')[0]) + _unpack_int32 = _unpack_int + + def _pack_int64(self, value): + self.fp.write(array(value, '>q').tostring()) + + def _unpack_int64(self): + return frombuffer(self.fp.read(8), '>q')[0] + + def _pack_string(self, s): + count = len(s) + self._pack_int(count) + self.fp.write(asbytes(s)) + self.fp.write(b'\x00' * (-count % 4)) # pad + + def _unpack_string(self): + count = self._unpack_int() + s = self.fp.read(count).rstrip(b'\x00') + self.fp.read(-count % 4) # read padding + return s + + +class netcdf_variable(object): + """ + A data object for the `netcdf` module. + + `netcdf_variable` objects are constructed by calling the method + `netcdf_file.createVariable` on the `netcdf_file` object. `netcdf_variable` + objects behave much like array objects defined in numpy, except that their + data resides in a file. Data is read by indexing and written by assigning + to an indexed subset; the entire array can be accessed by the index ``[:]`` + or (for scalars) by using the methods `getValue` and `assignValue`. + `netcdf_variable` objects also have attribute `shape` with the same meaning + as for arrays, but the shape cannot be modified. There is another read-only + attribute `dimensions`, whose value is the tuple of dimension names. + + All other attributes correspond to variable attributes defined in + the NetCDF file. Variable attributes are created by assigning to an + attribute of the `netcdf_variable` object. + + Parameters + ---------- + data : array_like + The data array that holds the values for the variable. + Typically, this is initialized as empty, but with the proper shape. + typecode : dtype character code + Desired data-type for the data array. + size : int + Desired element size for the data array. + shape : sequence of ints + The shape of the array. This should match the lengths of the + variable's dimensions. + dimensions : sequence of strings + The names of the dimensions used by the variable. Must be in the + same order of the dimension lengths given by `shape`. + attributes : dict, optional + Attribute values (any type) keyed by string names. These attributes + become attributes for the netcdf_variable object. + maskandscale : bool, optional + Whether to automatically scale and/or mask data based on attributes. + Default is False. + + + Attributes + ---------- + dimensions : list of str + List of names of dimensions used by the variable object. + isrec, shape + Properties + + See also + -------- + isrec, shape + + """ + def __init__(self, data, typecode, size, shape, dimensions, + attributes=None, + maskandscale=False): + self.data = data + self._typecode = typecode + self._size = size + self._shape = shape + self.dimensions = dimensions + self.maskandscale = maskandscale + + self._attributes = attributes or OrderedDict() + for k, v in self._attributes.items(): + self.__dict__[k] = v + + def __setattr__(self, attr, value): + # Store user defined attributes in a separate dict, + # so we can save them to file later. + try: + self._attributes[attr] = value + except AttributeError: + pass + self.__dict__[attr] = value + + def isrec(self): + """Returns whether the variable has a record dimension or not. + + A record dimension is a dimension along which additional data could be + easily appended in the netcdf data structure without much rewriting of + the data file. This attribute is a read-only property of the + `netcdf_variable`. + + """ + return bool(self.data.shape) and not self._shape[0] + isrec = property(isrec) + + def shape(self): + """Returns the shape tuple of the data variable. + + This is a read-only attribute and can not be modified in the + same manner of other numpy arrays. + """ + return self.data.shape + shape = property(shape) + + def getValue(self): + """ + Retrieve a scalar value from a `netcdf_variable` of length one. + + Raises + ------ + ValueError + If the netcdf variable is an array of length greater than one, + this exception will be raised. + + """ + return self.data.item() + + def assignValue(self, value): + """ + Assign a scalar value to a `netcdf_variable` of length one. + + Parameters + ---------- + value : scalar + Scalar value (of compatible type) to assign to a length-one netcdf + variable. This value will be written to file. + + Raises + ------ + ValueError + If the input is not a scalar, or if the destination is not a length-one + netcdf variable. + + """ + if not self.data.flags.writeable: + # Work-around for a bug in NumPy. Calling itemset() on a read-only + # memory-mapped array causes a seg. fault. + # See NumPy ticket #1622, and SciPy ticket #1202. + # This check for `writeable` can be removed when the oldest version + # of numpy still supported by scipy contains the fix for #1622. + raise RuntimeError("variable is not writeable") + + self.data.itemset(value) + + def typecode(self): + """ + Return the typecode of the variable. + + Returns + ------- + typecode : char + The character typecode of the variable (eg, 'i' for int). + + """ + return self._typecode + + def itemsize(self): + """ + Return the itemsize of the variable. + + Returns + ------- + itemsize : int + The element size of the variable (eg, 8 for float64). + + """ + return self._size + + def __getitem__(self, index): + if not self.maskandscale: + return self.data[index] + + data = self.data[index].copy() + missing_value = self._get_missing_value() + data = self._apply_missing_value(data, missing_value) + scale_factor = self._attributes.get('scale_factor') + add_offset = self._attributes.get('add_offset') + if add_offset is not None or scale_factor is not None: + data = data.astype(np.float64) + if scale_factor is not None: + data = data * scale_factor + if add_offset is not None: + data += add_offset + + return data + + def __setitem__(self, index, data): + if self.maskandscale: + missing_value = ( + self._get_missing_value() or + getattr(data, 'fill_value', 999999)) + self._attributes.setdefault('missing_value', missing_value) + self._attributes.setdefault('_FillValue', missing_value) + data = ((data - self._attributes.get('add_offset', 0.0)) / + self._attributes.get('scale_factor', 1.0)) + data = np.ma.asarray(data).filled(missing_value) + if self._typecode not in 'fd' and data.dtype.kind == 'f': + data = np.round(data) + + # Expand data for record vars? + if self.isrec: + if isinstance(index, tuple): + rec_index = index[0] + else: + rec_index = index + if isinstance(rec_index, slice): + recs = (rec_index.start or 0) + len(data) + else: + recs = rec_index + 1 + if recs > len(self.data): + shape = (recs,) + self._shape[1:] + # Resize in-place does not always work since + # the array might not be single-segment + try: + self.data.resize(shape) + except ValueError: + self.__dict__['data'] = np.resize(self.data, shape).astype(self.data.dtype) + self.data[index] = data + + def _default_encoded_fill_value(self): + """ + The default encoded fill-value for this Variable's data type. + """ + nc_type = REVERSE[self.typecode(), self.itemsize()] + return FILLMAP[nc_type] + + def _get_encoded_fill_value(self): + """ + Returns the encoded fill value for this variable as bytes. + + This is taken from either the _FillValue attribute, or the default fill + value for this variable's data type. + """ + if '_FillValue' in self._attributes: + fill_value = np.array(self._attributes['_FillValue'], + dtype=self.data.dtype).tostring() + if len(fill_value) == self.itemsize(): + return fill_value + else: + return self._default_encoded_fill_value() + else: + return self._default_encoded_fill_value() + + def _get_missing_value(self): + """ + Returns the value denoting "no data" for this variable. + + If this variable does not have a missing/fill value, returns None. + + If both _FillValue and missing_value are given, give precedence to + _FillValue. The netCDF standard gives special meaning to _FillValue; + missing_value is just used for compatibility with old datasets. + """ + + if '_FillValue' in self._attributes: + missing_value = self._attributes['_FillValue'] + elif 'missing_value' in self._attributes: + missing_value = self._attributes['missing_value'] + else: + missing_value = None + + return missing_value + + @staticmethod + def _apply_missing_value(data, missing_value): + """ + Applies the given missing value to the data array. + + Returns a numpy.ma array, with any value equal to missing_value masked + out (unless missing_value is None, in which case the original array is + returned). + """ + + if missing_value is None: + newdata = data + else: + try: + missing_value_isnan = np.isnan(missing_value) + except (TypeError, NotImplementedError): + # some data types (e.g., characters) cannot be tested for NaN + missing_value_isnan = False + + if missing_value_isnan: + mymask = np.isnan(data) + else: + mymask = (data == missing_value) + + newdata = np.ma.masked_where(mymask, data) + + return newdata + + +NetCDFFile = netcdf_file +NetCDFVariable = netcdf_variable + diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/netcdf.pyc b/project/venv/lib/python2.7/site-packages/scipy/io/netcdf.pyc new file mode 100644 index 0000000..20f55ce Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/netcdf.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/setup.py b/project/venv/lib/python2.7/site-packages/scipy/io/setup.py new file mode 100644 index 0000000..e78f4cd --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/setup.py @@ -0,0 +1,20 @@ +from __future__ import division, print_function, absolute_import + + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('io', parent_package, top_path) + + config.add_extension('_test_fortran', + sources=['_test_fortran.pyf', '_test_fortran.f']) + + config.add_data_dir('tests') + config.add_subpackage('matlab') + config.add_subpackage('arff') + config.add_subpackage('harwell_boeing') + return config + + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(**configuration(top_path='').todict()) diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/setup.pyc b/project/venv/lib/python2.7/site-packages/scipy/io/setup.pyc new file mode 100644 index 0000000..b1362a9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/io/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/io/tests/__init__.pyc new file mode 100644 index 0000000..5c3754f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_1d.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_1d.sav new file mode 100644 index 0000000..619a125 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_1d.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_2d.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_2d.sav new file mode 100644 index 0000000..804d8b1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_2d.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_3d.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_3d.sav new file mode 100644 index 0000000..3fa56c4 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_3d.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_4d.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_4d.sav new file mode 100644 index 0000000..4bb951e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_4d.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_5d.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_5d.sav new file mode 100644 index 0000000..2854dbc Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_5d.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_6d.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_6d.sav new file mode 100644 index 0000000..91588d3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_6d.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_7d.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_7d.sav new file mode 100644 index 0000000..3e978fa Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_7d.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_8d.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_8d.sav new file mode 100644 index 0000000..f699fe2 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_8d.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_pointer_1d.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_pointer_1d.sav new file mode 100644 index 0000000..8e3a402 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_pointer_1d.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_pointer_2d.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_pointer_2d.sav new file mode 100644 index 0000000..dd3504f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_pointer_2d.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_pointer_3d.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_pointer_3d.sav new file mode 100644 index 0000000..285da7f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_pointer_3d.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_pointer_4d.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_pointer_4d.sav new file mode 100644 index 0000000..d99fa48 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_pointer_4d.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_pointer_5d.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_pointer_5d.sav new file mode 100644 index 0000000..de5e984 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_pointer_5d.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_pointer_6d.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_pointer_6d.sav new file mode 100644 index 0000000..bb76671 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_pointer_6d.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_pointer_7d.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_pointer_7d.sav new file mode 100644 index 0000000..995d23c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_pointer_7d.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_pointer_8d.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_pointer_8d.sav new file mode 100644 index 0000000..4249ec6 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/array_float32_pointer_8d.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/example_1.nc b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/example_1.nc new file mode 100644 index 0000000..5775622 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/example_1.nc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/example_2.nc b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/example_2.nc new file mode 100644 index 0000000..07db1cd Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/example_2.nc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/example_3_maskedvals.nc b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/example_3_maskedvals.nc new file mode 100644 index 0000000..57f8bf9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/example_3_maskedvals.nc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/fortran-3x3d-2i.dat b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/fortran-3x3d-2i.dat new file mode 100644 index 0000000..87731eb Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/fortran-3x3d-2i.dat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/fortran-mixed.dat b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/fortran-mixed.dat new file mode 100644 index 0000000..a165a7a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/fortran-mixed.dat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/fortran-sf8-11x1x10.dat b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/fortran-sf8-11x1x10.dat new file mode 100644 index 0000000..c3bb9dc Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/fortran-sf8-11x1x10.dat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/fortran-sf8-15x10x22.dat b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/fortran-sf8-15x10x22.dat new file mode 100644 index 0000000..351801f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/fortran-sf8-15x10x22.dat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/fortran-sf8-1x1x1.dat b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/fortran-sf8-1x1x1.dat new file mode 100644 index 0000000..64bf92f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/fortran-sf8-1x1x1.dat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/fortran-sf8-1x1x5.dat b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/fortran-sf8-1x1x5.dat new file mode 100644 index 0000000..3d3f27f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/fortran-sf8-1x1x5.dat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/fortran-sf8-1x1x7.dat b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/fortran-sf8-1x1x7.dat new file mode 100644 index 0000000..0bd6830 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/fortran-sf8-1x1x7.dat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/fortran-sf8-1x3x5.dat b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/fortran-sf8-1x3x5.dat new file mode 100644 index 0000000..25269ff Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/fortran-sf8-1x3x5.dat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/fortran-si4-11x1x10.dat b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/fortran-si4-11x1x10.dat new file mode 100644 index 0000000..9850de3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/fortran-si4-11x1x10.dat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/fortran-si4-15x10x22.dat b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/fortran-si4-15x10x22.dat new file mode 100644 index 0000000..98c09c2 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/fortran-si4-15x10x22.dat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/fortran-si4-1x1x1.dat b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/fortran-si4-1x1x1.dat new file mode 100644 index 0000000..959098d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/fortran-si4-1x1x1.dat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/fortran-si4-1x1x5.dat b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/fortran-si4-1x1x5.dat new file mode 100644 index 0000000..49c0ec1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/fortran-si4-1x1x5.dat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/fortran-si4-1x1x7.dat b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/fortran-si4-1x1x7.dat new file mode 100644 index 0000000..bb936b8 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/fortran-si4-1x1x7.dat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/fortran-si4-1x3x5.dat b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/fortran-si4-1x3x5.dat new file mode 100644 index 0000000..cb3e9e4 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/fortran-si4-1x3x5.dat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/invalid_pointer.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/invalid_pointer.sav new file mode 100644 index 0000000..d53893c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/invalid_pointer.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/null_pointer.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/null_pointer.sav new file mode 100644 index 0000000..8cee5eb Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/null_pointer.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/scalar_byte.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/scalar_byte.sav new file mode 100644 index 0000000..e4027b3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/scalar_byte.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/scalar_byte_descr.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/scalar_byte_descr.sav new file mode 100644 index 0000000..182e29b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/scalar_byte_descr.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/scalar_complex32.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/scalar_complex32.sav new file mode 100644 index 0000000..593e8c6 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/scalar_complex32.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/scalar_complex64.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/scalar_complex64.sav new file mode 100644 index 0000000..edb19d3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/scalar_complex64.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/scalar_float32.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/scalar_float32.sav new file mode 100644 index 0000000..be9e387 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/scalar_float32.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/scalar_float64.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/scalar_float64.sav new file mode 100644 index 0000000..9680b28 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/scalar_float64.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/scalar_heap_pointer.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/scalar_heap_pointer.sav new file mode 100644 index 0000000..d02b175 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/scalar_heap_pointer.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/scalar_int16.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/scalar_int16.sav new file mode 100644 index 0000000..6035256 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/scalar_int16.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/scalar_int32.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/scalar_int32.sav new file mode 100644 index 0000000..40210b8 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/scalar_int32.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/scalar_int64.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/scalar_int64.sav new file mode 100644 index 0000000..c91cd0a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/scalar_int64.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/scalar_string.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/scalar_string.sav new file mode 100644 index 0000000..ee6e69f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/scalar_string.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/scalar_uint16.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/scalar_uint16.sav new file mode 100644 index 0000000..759c2e6 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/scalar_uint16.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/scalar_uint32.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/scalar_uint32.sav new file mode 100644 index 0000000..74dec7b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/scalar_uint32.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/scalar_uint64.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/scalar_uint64.sav new file mode 100644 index 0000000..fc9da57 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/scalar_uint64.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/struct_arrays.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/struct_arrays.sav new file mode 100644 index 0000000..40c9cd3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/struct_arrays.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/struct_arrays_byte_idl80.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/struct_arrays_byte_idl80.sav new file mode 100644 index 0000000..f1aa416 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/struct_arrays_byte_idl80.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/struct_arrays_replicated.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/struct_arrays_replicated.sav new file mode 100644 index 0000000..6f01fbf Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/struct_arrays_replicated.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/struct_arrays_replicated_3d.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/struct_arrays_replicated_3d.sav new file mode 100644 index 0000000..bac9b20 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/struct_arrays_replicated_3d.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/struct_inherit.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/struct_inherit.sav new file mode 100644 index 0000000..8babd56 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/struct_inherit.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/struct_pointer_arrays.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/struct_pointer_arrays.sav new file mode 100644 index 0000000..a3c6781 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/struct_pointer_arrays.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/struct_pointer_arrays_replicated.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/struct_pointer_arrays_replicated.sav new file mode 100644 index 0000000..38b8122 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/struct_pointer_arrays_replicated.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/struct_pointer_arrays_replicated_3d.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/struct_pointer_arrays_replicated_3d.sav new file mode 100644 index 0000000..db1c256 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/struct_pointer_arrays_replicated_3d.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/struct_pointers.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/struct_pointers.sav new file mode 100644 index 0000000..acbb058 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/struct_pointers.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/struct_pointers_replicated.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/struct_pointers_replicated.sav new file mode 100644 index 0000000..d16f465 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/struct_pointers_replicated.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/struct_pointers_replicated_3d.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/struct_pointers_replicated_3d.sav new file mode 100644 index 0000000..732dd2c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/struct_pointers_replicated_3d.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/struct_scalars.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/struct_scalars.sav new file mode 100644 index 0000000..69d7eaf Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/struct_scalars.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/struct_scalars_replicated.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/struct_scalars_replicated.sav new file mode 100644 index 0000000..2222391 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/struct_scalars_replicated.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/struct_scalars_replicated_3d.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/struct_scalars_replicated_3d.sav new file mode 100644 index 0000000..a35f1ac Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/struct_scalars_replicated_3d.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/test-44100Hz-2ch-32bit-float-be.wav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/test-44100Hz-2ch-32bit-float-be.wav new file mode 100644 index 0000000..056333e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/test-44100Hz-2ch-32bit-float-be.wav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/test-44100Hz-2ch-32bit-float-le.wav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/test-44100Hz-2ch-32bit-float-le.wav new file mode 100644 index 0000000..57e6f17 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/test-44100Hz-2ch-32bit-float-le.wav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof.wav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof.wav new file mode 100644 index 0000000..d1b7065 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof.wav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-incomplete-chunk.wav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-incomplete-chunk.wav new file mode 100644 index 0000000..7271fdd Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-incomplete-chunk.wav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes.wav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes.wav new file mode 100644 index 0000000..8aae8e2 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes.wav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/test-48000Hz-2ch-64bit-float-le-wavex.wav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/test-48000Hz-2ch-64bit-float-le-wavex.wav new file mode 100644 index 0000000..31221b2 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/test-48000Hz-2ch-64bit-float-le-wavex.wav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/test-8000Hz-le-2ch-1byteu.wav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/test-8000Hz-le-2ch-1byteu.wav new file mode 100644 index 0000000..7090081 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/test-8000Hz-le-2ch-1byteu.wav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/various_compressed.sav b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/various_compressed.sav new file mode 100644 index 0000000..dcdb0b0 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/data/various_compressed.sav differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/test_fortran.py b/project/venv/lib/python2.7/site-packages/scipy/io/tests/test_fortran.py new file mode 100644 index 0000000..335252d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/tests/test_fortran.py @@ -0,0 +1,159 @@ +''' Tests for fortran sequential files ''' + +import tempfile +import shutil +from os import path, unlink +from glob import iglob +import re + +from numpy.testing import assert_equal, assert_allclose +import numpy as np + +from scipy.io import FortranFile, _test_fortran + + +DATA_PATH = path.join(path.dirname(__file__), 'data') + + +def test_fortranfiles_read(): + for filename in iglob(path.join(DATA_PATH, "fortran-*-*x*x*.dat")): + m = re.search(r'fortran-([^-]+)-(\d+)x(\d+)x(\d+).dat', filename, re.I) + if not m: + raise RuntimeError("Couldn't match %s filename to regex" % filename) + + dims = (int(m.group(2)), int(m.group(3)), int(m.group(4))) + + dtype = m.group(1).replace('s', '<') + + f = FortranFile(filename, 'r', '<u4') + data = f.read_record(dtype=dtype).reshape(dims, order='F') + f.close() + + expected = np.arange(np.prod(dims)).reshape(dims).astype(dtype) + assert_equal(data, expected) + + +def test_fortranfiles_mixed_record(): + filename = path.join(DATA_PATH, "fortran-mixed.dat") + with FortranFile(filename, 'r', '<u4') as f: + record = f.read_record('<i4,<f4,<i8,(2)<f8') + + assert_equal(record['f0'][0], 1) + assert_allclose(record['f1'][0], 2.3) + assert_equal(record['f2'][0], 4) + assert_allclose(record['f3'][0], [5.6, 7.8]) + + +def test_fortranfiles_write(): + for filename in iglob(path.join(DATA_PATH, "fortran-*-*x*x*.dat")): + m = re.search(r'fortran-([^-]+)-(\d+)x(\d+)x(\d+).dat', filename, re.I) + if not m: + raise RuntimeError("Couldn't match %s filename to regex" % filename) + dims = (int(m.group(2)), int(m.group(3)), int(m.group(4))) + + dtype = m.group(1).replace('s', '<') + data = np.arange(np.prod(dims)).reshape(dims).astype(dtype) + + tmpdir = tempfile.mkdtemp() + try: + testFile = path.join(tmpdir,path.basename(filename)) + f = FortranFile(testFile, 'w','<u4') + f.write_record(data.T) + f.close() + originalfile = open(filename, 'rb') + newfile = open(testFile, 'rb') + assert_equal(originalfile.read(), newfile.read(), + err_msg=filename) + originalfile.close() + newfile.close() + finally: + shutil.rmtree(tmpdir) + + +def test_fortranfile_read_mixed_record(): + # The data file fortran-3x3d-2i.dat contains the program that + # produced it at the end. + # + # double precision :: a(3,3) + # integer :: b(2) + # ... + # open(1, file='fortran-3x3d-2i.dat', form='unformatted') + # write(1) a, b + # close(1) + # + + filename = path.join(DATA_PATH, "fortran-3x3d-2i.dat") + with FortranFile(filename, 'r', '<u4') as f: + record = f.read_record('(3,3)f8', '2i4') + + ax = np.arange(3*3).reshape(3, 3).astype(np.double) + bx = np.array([-1, -2], dtype=np.int32) + + assert_equal(record[0], ax.T) + assert_equal(record[1], bx.T) + + +def test_fortranfile_write_mixed_record(tmpdir): + tf = path.join(str(tmpdir), 'test.dat') + + records = [ + (('f4', 'f4', 'i4'), (np.float32(2), np.float32(3), np.int32(100))), + (('4f4', '(3,3)f4', '8i4'), (np.random.randint(255, size=[4]).astype(np.float32), + np.random.randint(255, size=[3, 3]).astype(np.float32), + np.random.randint(255, size=[8]).astype(np.int32))) + ] + + for dtype, a in records: + with FortranFile(tf, 'w') as f: + f.write_record(*a) + + with FortranFile(tf, 'r') as f: + b = f.read_record(*dtype) + + assert_equal(len(a), len(b)) + + for aa, bb in zip(a, b): + assert_equal(bb, aa) + + +def test_fortran_roundtrip(tmpdir): + filename = path.join(str(tmpdir), 'test.dat') + + np.random.seed(1) + + # double precision + m, n, k = 5, 3, 2 + a = np.random.randn(m, n, k) + with FortranFile(filename, 'w') as f: + f.write_record(a.T) + a2 = _test_fortran.read_unformatted_double(m, n, k, filename) + with FortranFile(filename, 'r') as f: + a3 = f.read_record('(2,3,5)f8').T + assert_equal(a2, a) + assert_equal(a3, a) + + # integer + m, n, k = 5, 3, 2 + a = np.random.randn(m, n, k).astype(np.int32) + with FortranFile(filename, 'w') as f: + f.write_record(a.T) + a2 = _test_fortran.read_unformatted_int(m, n, k, filename) + with FortranFile(filename, 'r') as f: + a3 = f.read_record('(2,3,5)i4').T + assert_equal(a2, a) + assert_equal(a3, a) + + # mixed + m, n, k = 5, 3, 2 + a = np.random.randn(m, n) + b = np.random.randn(k).astype(np.intc) + with FortranFile(filename, 'w') as f: + f.write_record(a.T, b.T) + a2, b2 = _test_fortran.read_unformatted_mixed(m, n, k, filename) + with FortranFile(filename, 'r') as f: + a3, b3 = f.read_record('(3,5)f8', '2i4') + a3 = a3.T + assert_equal(a2, a) + assert_equal(a3, a) + assert_equal(b2, b) + assert_equal(b3, b) diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/test_fortran.pyc b/project/venv/lib/python2.7/site-packages/scipy/io/tests/test_fortran.pyc new file mode 100644 index 0000000..c80d2f8 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/test_fortran.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/test_idl.py b/project/venv/lib/python2.7/site-packages/scipy/io/tests/test_idl.py new file mode 100644 index 0000000..78be7dc --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/tests/test_idl.py @@ -0,0 +1,442 @@ +from __future__ import division, print_function, absolute_import + +from os import path +import warnings + +DATA_PATH = path.join(path.dirname(__file__), 'data') + +import numpy as np +from numpy.testing import (assert_equal, assert_array_equal, + assert_) +from scipy._lib._numpy_compat import suppress_warnings + +from scipy.io.idl import readsav + + +def object_array(*args): + """Constructs a numpy array of objects""" + array = np.empty(len(args), dtype=object) + for i in range(len(args)): + array[i] = args[i] + return array + + +def assert_identical(a, b): + """Assert whether value AND type are the same""" + assert_equal(a, b) + if type(b) is str: + assert_equal(type(a), type(b)) + else: + assert_equal(np.asarray(a).dtype.type, np.asarray(b).dtype.type) + + +def assert_array_identical(a, b): + """Assert whether values AND type are the same""" + assert_array_equal(a, b) + assert_equal(a.dtype.type, b.dtype.type) + + +# Define vectorized ID function for pointer arrays +vect_id = np.vectorize(id) + + +class TestIdict: + + def test_idict(self): + custom_dict = {'a': np.int16(999)} + original_id = id(custom_dict) + s = readsav(path.join(DATA_PATH, 'scalar_byte.sav'), idict=custom_dict, verbose=False) + assert_equal(original_id, id(s)) + assert_('a' in s) + assert_identical(s['a'], np.int16(999)) + assert_identical(s['i8u'], np.uint8(234)) + + +class TestScalars: + # Test that scalar values are read in with the correct value and type + + def test_byte(self): + s = readsav(path.join(DATA_PATH, 'scalar_byte.sav'), verbose=False) + assert_identical(s.i8u, np.uint8(234)) + + def test_int16(self): + s = readsav(path.join(DATA_PATH, 'scalar_int16.sav'), verbose=False) + assert_identical(s.i16s, np.int16(-23456)) + + def test_int32(self): + s = readsav(path.join(DATA_PATH, 'scalar_int32.sav'), verbose=False) + assert_identical(s.i32s, np.int32(-1234567890)) + + def test_float32(self): + s = readsav(path.join(DATA_PATH, 'scalar_float32.sav'), verbose=False) + assert_identical(s.f32, np.float32(-3.1234567e+37)) + + def test_float64(self): + s = readsav(path.join(DATA_PATH, 'scalar_float64.sav'), verbose=False) + assert_identical(s.f64, np.float64(-1.1976931348623157e+307)) + + def test_complex32(self): + s = readsav(path.join(DATA_PATH, 'scalar_complex32.sav'), verbose=False) + assert_identical(s.c32, np.complex64(3.124442e13-2.312442e31j)) + + def test_bytes(self): + s = readsav(path.join(DATA_PATH, 'scalar_string.sav'), verbose=False) + assert_identical(s.s, np.bytes_("The quick brown fox jumps over the lazy python")) + + def test_structure(self): + pass + + def test_complex64(self): + s = readsav(path.join(DATA_PATH, 'scalar_complex64.sav'), verbose=False) + assert_identical(s.c64, np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j)) + + def test_heap_pointer(self): + pass + + def test_object_reference(self): + pass + + def test_uint16(self): + s = readsav(path.join(DATA_PATH, 'scalar_uint16.sav'), verbose=False) + assert_identical(s.i16u, np.uint16(65511)) + + def test_uint32(self): + s = readsav(path.join(DATA_PATH, 'scalar_uint32.sav'), verbose=False) + assert_identical(s.i32u, np.uint32(4294967233)) + + def test_int64(self): + s = readsav(path.join(DATA_PATH, 'scalar_int64.sav'), verbose=False) + assert_identical(s.i64s, np.int64(-9223372036854774567)) + + def test_uint64(self): + s = readsav(path.join(DATA_PATH, 'scalar_uint64.sav'), verbose=False) + assert_identical(s.i64u, np.uint64(18446744073709529285)) + + +class TestCompressed(TestScalars): + # Test that compressed .sav files can be read in + + def test_compressed(self): + s = readsav(path.join(DATA_PATH, 'various_compressed.sav'), verbose=False) + + assert_identical(s.i8u, np.uint8(234)) + assert_identical(s.f32, np.float32(-3.1234567e+37)) + assert_identical(s.c64, np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j)) + assert_equal(s.array5d.shape, (4, 3, 4, 6, 5)) + assert_identical(s.arrays.a[0], np.array([1, 2, 3], dtype=np.int16)) + assert_identical(s.arrays.b[0], np.array([4., 5., 6., 7.], dtype=np.float32)) + assert_identical(s.arrays.c[0], np.array([np.complex64(1+2j), np.complex64(7+8j)])) + assert_identical(s.arrays.d[0], np.array([b"cheese", b"bacon", b"spam"], dtype=object)) + + +class TestArrayDimensions: + # Test that multi-dimensional arrays are read in with the correct dimensions + + def test_1d(self): + s = readsav(path.join(DATA_PATH, 'array_float32_1d.sav'), verbose=False) + assert_equal(s.array1d.shape, (123, )) + + def test_2d(self): + s = readsav(path.join(DATA_PATH, 'array_float32_2d.sav'), verbose=False) + assert_equal(s.array2d.shape, (22, 12)) + + def test_3d(self): + s = readsav(path.join(DATA_PATH, 'array_float32_3d.sav'), verbose=False) + assert_equal(s.array3d.shape, (11, 22, 12)) + + def test_4d(self): + s = readsav(path.join(DATA_PATH, 'array_float32_4d.sav'), verbose=False) + assert_equal(s.array4d.shape, (4, 5, 8, 7)) + + def test_5d(self): + s = readsav(path.join(DATA_PATH, 'array_float32_5d.sav'), verbose=False) + assert_equal(s.array5d.shape, (4, 3, 4, 6, 5)) + + def test_6d(self): + s = readsav(path.join(DATA_PATH, 'array_float32_6d.sav'), verbose=False) + assert_equal(s.array6d.shape, (3, 6, 4, 5, 3, 4)) + + def test_7d(self): + s = readsav(path.join(DATA_PATH, 'array_float32_7d.sav'), verbose=False) + assert_equal(s.array7d.shape, (2, 1, 2, 3, 4, 3, 2)) + + def test_8d(self): + s = readsav(path.join(DATA_PATH, 'array_float32_8d.sav'), verbose=False) + assert_equal(s.array8d.shape, (4, 3, 2, 1, 2, 3, 5, 4)) + + +class TestStructures: + + def test_scalars(self): + s = readsav(path.join(DATA_PATH, 'struct_scalars.sav'), verbose=False) + assert_identical(s.scalars.a, np.array(np.int16(1))) + assert_identical(s.scalars.b, np.array(np.int32(2))) + assert_identical(s.scalars.c, np.array(np.float32(3.))) + assert_identical(s.scalars.d, np.array(np.float64(4.))) + assert_identical(s.scalars.e, np.array([b"spam"], dtype=object)) + assert_identical(s.scalars.f, np.array(np.complex64(-1.+3j))) + + def test_scalars_replicated(self): + s = readsav(path.join(DATA_PATH, 'struct_scalars_replicated.sav'), verbose=False) + assert_identical(s.scalars_rep.a, np.repeat(np.int16(1), 5)) + assert_identical(s.scalars_rep.b, np.repeat(np.int32(2), 5)) + assert_identical(s.scalars_rep.c, np.repeat(np.float32(3.), 5)) + assert_identical(s.scalars_rep.d, np.repeat(np.float64(4.), 5)) + assert_identical(s.scalars_rep.e, np.repeat(b"spam", 5).astype(object)) + assert_identical(s.scalars_rep.f, np.repeat(np.complex64(-1.+3j), 5)) + + def test_scalars_replicated_3d(self): + s = readsav(path.join(DATA_PATH, 'struct_scalars_replicated_3d.sav'), verbose=False) + assert_identical(s.scalars_rep.a, np.repeat(np.int16(1), 24).reshape(4, 3, 2)) + assert_identical(s.scalars_rep.b, np.repeat(np.int32(2), 24).reshape(4, 3, 2)) + assert_identical(s.scalars_rep.c, np.repeat(np.float32(3.), 24).reshape(4, 3, 2)) + assert_identical(s.scalars_rep.d, np.repeat(np.float64(4.), 24).reshape(4, 3, 2)) + assert_identical(s.scalars_rep.e, np.repeat(b"spam", 24).reshape(4, 3, 2).astype(object)) + assert_identical(s.scalars_rep.f, np.repeat(np.complex64(-1.+3j), 24).reshape(4, 3, 2)) + + def test_arrays(self): + s = readsav(path.join(DATA_PATH, 'struct_arrays.sav'), verbose=False) + assert_array_identical(s.arrays.a[0], np.array([1, 2, 3], dtype=np.int16)) + assert_array_identical(s.arrays.b[0], np.array([4., 5., 6., 7.], dtype=np.float32)) + assert_array_identical(s.arrays.c[0], np.array([np.complex64(1+2j), np.complex64(7+8j)])) + assert_array_identical(s.arrays.d[0], np.array([b"cheese", b"bacon", b"spam"], dtype=object)) + + def test_arrays_replicated(self): + s = readsav(path.join(DATA_PATH, 'struct_arrays_replicated.sav'), verbose=False) + + # Check column types + assert_(s.arrays_rep.a.dtype.type is np.object_) + assert_(s.arrays_rep.b.dtype.type is np.object_) + assert_(s.arrays_rep.c.dtype.type is np.object_) + assert_(s.arrays_rep.d.dtype.type is np.object_) + + # Check column shapes + assert_equal(s.arrays_rep.a.shape, (5, )) + assert_equal(s.arrays_rep.b.shape, (5, )) + assert_equal(s.arrays_rep.c.shape, (5, )) + assert_equal(s.arrays_rep.d.shape, (5, )) + + # Check values + for i in range(5): + assert_array_identical(s.arrays_rep.a[i], + np.array([1, 2, 3], dtype=np.int16)) + assert_array_identical(s.arrays_rep.b[i], + np.array([4., 5., 6., 7.], dtype=np.float32)) + assert_array_identical(s.arrays_rep.c[i], + np.array([np.complex64(1+2j), + np.complex64(7+8j)])) + assert_array_identical(s.arrays_rep.d[i], + np.array([b"cheese", b"bacon", b"spam"], + dtype=object)) + + def test_arrays_replicated_3d(self): + s = readsav(path.join(DATA_PATH, 'struct_arrays_replicated_3d.sav'), verbose=False) + + # Check column types + assert_(s.arrays_rep.a.dtype.type is np.object_) + assert_(s.arrays_rep.b.dtype.type is np.object_) + assert_(s.arrays_rep.c.dtype.type is np.object_) + assert_(s.arrays_rep.d.dtype.type is np.object_) + + # Check column shapes + assert_equal(s.arrays_rep.a.shape, (4, 3, 2)) + assert_equal(s.arrays_rep.b.shape, (4, 3, 2)) + assert_equal(s.arrays_rep.c.shape, (4, 3, 2)) + assert_equal(s.arrays_rep.d.shape, (4, 3, 2)) + + # Check values + for i in range(4): + for j in range(3): + for k in range(2): + assert_array_identical(s.arrays_rep.a[i, j, k], + np.array([1, 2, 3], dtype=np.int16)) + assert_array_identical(s.arrays_rep.b[i, j, k], + np.array([4., 5., 6., 7.], + dtype=np.float32)) + assert_array_identical(s.arrays_rep.c[i, j, k], + np.array([np.complex64(1+2j), + np.complex64(7+8j)])) + assert_array_identical(s.arrays_rep.d[i, j, k], + np.array([b"cheese", b"bacon", b"spam"], + dtype=object)) + + def test_inheritance(self): + s = readsav(path.join(DATA_PATH, 'struct_inherit.sav'), verbose=False) + assert_identical(s.fc.x, np.array([0], dtype=np.int16)) + assert_identical(s.fc.y, np.array([0], dtype=np.int16)) + assert_identical(s.fc.r, np.array([0], dtype=np.int16)) + assert_identical(s.fc.c, np.array([4], dtype=np.int16)) + + def test_arrays_corrupt_idl80(self): + # test byte arrays with missing nbyte information from IDL 8.0 .sav file + with suppress_warnings() as sup: + sup.filter(UserWarning, "Not able to verify number of bytes from header") + s = readsav(path.join(DATA_PATH,'struct_arrays_byte_idl80.sav'), + verbose=False) + + assert_identical(s.y.x[0], np.array([55,66], dtype=np.uint8)) + + +class TestPointers: + # Check that pointers in .sav files produce references to the same object in Python + + def test_pointers(self): + s = readsav(path.join(DATA_PATH, 'scalar_heap_pointer.sav'), verbose=False) + assert_identical(s.c64_pointer1, np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j)) + assert_identical(s.c64_pointer2, np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j)) + assert_(s.c64_pointer1 is s.c64_pointer2) + + +class TestPointerArray: + # Test that pointers in arrays are correctly read in + + def test_1d(self): + s = readsav(path.join(DATA_PATH, 'array_float32_pointer_1d.sav'), verbose=False) + assert_equal(s.array1d.shape, (123, )) + assert_(np.all(s.array1d == np.float32(4.))) + assert_(np.all(vect_id(s.array1d) == id(s.array1d[0]))) + + def test_2d(self): + s = readsav(path.join(DATA_PATH, 'array_float32_pointer_2d.sav'), verbose=False) + assert_equal(s.array2d.shape, (22, 12)) + assert_(np.all(s.array2d == np.float32(4.))) + assert_(np.all(vect_id(s.array2d) == id(s.array2d[0,0]))) + + def test_3d(self): + s = readsav(path.join(DATA_PATH, 'array_float32_pointer_3d.sav'), verbose=False) + assert_equal(s.array3d.shape, (11, 22, 12)) + assert_(np.all(s.array3d == np.float32(4.))) + assert_(np.all(vect_id(s.array3d) == id(s.array3d[0,0,0]))) + + def test_4d(self): + s = readsav(path.join(DATA_PATH, 'array_float32_pointer_4d.sav'), verbose=False) + assert_equal(s.array4d.shape, (4, 5, 8, 7)) + assert_(np.all(s.array4d == np.float32(4.))) + assert_(np.all(vect_id(s.array4d) == id(s.array4d[0,0,0,0]))) + + def test_5d(self): + s = readsav(path.join(DATA_PATH, 'array_float32_pointer_5d.sav'), verbose=False) + assert_equal(s.array5d.shape, (4, 3, 4, 6, 5)) + assert_(np.all(s.array5d == np.float32(4.))) + assert_(np.all(vect_id(s.array5d) == id(s.array5d[0,0,0,0,0]))) + + def test_6d(self): + s = readsav(path.join(DATA_PATH, 'array_float32_pointer_6d.sav'), verbose=False) + assert_equal(s.array6d.shape, (3, 6, 4, 5, 3, 4)) + assert_(np.all(s.array6d == np.float32(4.))) + assert_(np.all(vect_id(s.array6d) == id(s.array6d[0,0,0,0,0,0]))) + + def test_7d(self): + s = readsav(path.join(DATA_PATH, 'array_float32_pointer_7d.sav'), verbose=False) + assert_equal(s.array7d.shape, (2, 1, 2, 3, 4, 3, 2)) + assert_(np.all(s.array7d == np.float32(4.))) + assert_(np.all(vect_id(s.array7d) == id(s.array7d[0,0,0,0,0,0,0]))) + + def test_8d(self): + s = readsav(path.join(DATA_PATH, 'array_float32_pointer_8d.sav'), verbose=False) + assert_equal(s.array8d.shape, (4, 3, 2, 1, 2, 3, 5, 4)) + assert_(np.all(s.array8d == np.float32(4.))) + assert_(np.all(vect_id(s.array8d) == id(s.array8d[0,0,0,0,0,0,0,0]))) + + +class TestPointerStructures: + # Test that structures are correctly read in + + def test_scalars(self): + s = readsav(path.join(DATA_PATH, 'struct_pointers.sav'), verbose=False) + assert_identical(s.pointers.g, np.array(np.float32(4.), dtype=np.object_)) + assert_identical(s.pointers.h, np.array(np.float32(4.), dtype=np.object_)) + assert_(id(s.pointers.g[0]) == id(s.pointers.h[0])) + + def test_pointers_replicated(self): + s = readsav(path.join(DATA_PATH, 'struct_pointers_replicated.sav'), verbose=False) + assert_identical(s.pointers_rep.g, np.repeat(np.float32(4.), 5).astype(np.object_)) + assert_identical(s.pointers_rep.h, np.repeat(np.float32(4.), 5).astype(np.object_)) + assert_(np.all(vect_id(s.pointers_rep.g) == vect_id(s.pointers_rep.h))) + + def test_pointers_replicated_3d(self): + s = readsav(path.join(DATA_PATH, 'struct_pointers_replicated_3d.sav'), verbose=False) + s_expect = np.repeat(np.float32(4.), 24).reshape(4, 3, 2).astype(np.object_) + assert_identical(s.pointers_rep.g, s_expect) + assert_identical(s.pointers_rep.h, s_expect) + assert_(np.all(vect_id(s.pointers_rep.g) == vect_id(s.pointers_rep.h))) + + def test_arrays(self): + s = readsav(path.join(DATA_PATH, 'struct_pointer_arrays.sav'), verbose=False) + assert_array_identical(s.arrays.g[0], np.repeat(np.float32(4.), 2).astype(np.object_)) + assert_array_identical(s.arrays.h[0], np.repeat(np.float32(4.), 3).astype(np.object_)) + assert_(np.all(vect_id(s.arrays.g[0]) == id(s.arrays.g[0][0]))) + assert_(np.all(vect_id(s.arrays.h[0]) == id(s.arrays.h[0][0]))) + assert_(id(s.arrays.g[0][0]) == id(s.arrays.h[0][0])) + + def test_arrays_replicated(self): + s = readsav(path.join(DATA_PATH, 'struct_pointer_arrays_replicated.sav'), verbose=False) + + # Check column types + assert_(s.arrays_rep.g.dtype.type is np.object_) + assert_(s.arrays_rep.h.dtype.type is np.object_) + + # Check column shapes + assert_equal(s.arrays_rep.g.shape, (5, )) + assert_equal(s.arrays_rep.h.shape, (5, )) + + # Check values + for i in range(5): + assert_array_identical(s.arrays_rep.g[i], np.repeat(np.float32(4.), 2).astype(np.object_)) + assert_array_identical(s.arrays_rep.h[i], np.repeat(np.float32(4.), 3).astype(np.object_)) + assert_(np.all(vect_id(s.arrays_rep.g[i]) == id(s.arrays_rep.g[0][0]))) + assert_(np.all(vect_id(s.arrays_rep.h[i]) == id(s.arrays_rep.h[0][0]))) + + def test_arrays_replicated_3d(self): + pth = path.join(DATA_PATH, 'struct_pointer_arrays_replicated_3d.sav') + s = readsav(pth, verbose=False) + + # Check column types + assert_(s.arrays_rep.g.dtype.type is np.object_) + assert_(s.arrays_rep.h.dtype.type is np.object_) + + # Check column shapes + assert_equal(s.arrays_rep.g.shape, (4, 3, 2)) + assert_equal(s.arrays_rep.h.shape, (4, 3, 2)) + + # Check values + for i in range(4): + for j in range(3): + for k in range(2): + assert_array_identical(s.arrays_rep.g[i, j, k], + np.repeat(np.float32(4.), 2).astype(np.object_)) + assert_array_identical(s.arrays_rep.h[i, j, k], + np.repeat(np.float32(4.), 3).astype(np.object_)) + assert_(np.all(vect_id(s.arrays_rep.g[i, j, k]) == id(s.arrays_rep.g[0, 0, 0][0]))) + assert_(np.all(vect_id(s.arrays_rep.h[i, j, k]) == id(s.arrays_rep.h[0, 0, 0][0]))) +class TestTags: + '''Test that sav files with description tag read at all''' + + def test_description(self): + s = readsav(path.join(DATA_PATH, 'scalar_byte_descr.sav'), verbose=False) + assert_identical(s.i8u, np.uint8(234)) + + +def test_null_pointer(): + # Regression test for null pointers. + s = readsav(path.join(DATA_PATH, 'null_pointer.sav'), verbose=False) + assert_identical(s.point, None) + assert_identical(s.check, np.int16(5)) + + +def test_invalid_pointer(): + # Regression test for invalid pointers (gh-4613). + + # In some files in the wild, pointers can sometimes refer to a heap + # variable that does not exist. In that case, we now gracefully fail for + # that variable and replace the variable with None and emit a warning. + # Since it's difficult to artificially produce such files, the file used + # here has been edited to force the pointer reference to be invalid. + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + s = readsav(path.join(DATA_PATH, 'invalid_pointer.sav'), verbose=False) + assert_(len(w) == 1) + assert_(str(w[0].message) == ("Variable referenced by pointer not found in " + "heap: variable will be set to None")) + assert_identical(s['a'], np.array([None, None])) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/test_idl.pyc b/project/venv/lib/python2.7/site-packages/scipy/io/tests/test_idl.pyc new file mode 100644 index 0000000..11d18e9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/test_idl.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/test_mmio.py b/project/venv/lib/python2.7/site-packages/scipy/io/tests/test_mmio.py new file mode 100644 index 0000000..a58bc28 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/tests/test_mmio.py @@ -0,0 +1,673 @@ +from __future__ import division, print_function, absolute_import + +from tempfile import mkdtemp, mktemp +import os +import shutil + +import numpy as np +from numpy import array, transpose, pi +from numpy.testing import (assert_equal, + assert_array_equal, assert_array_almost_equal) +import pytest +from pytest import raises as assert_raises + +import scipy.sparse +from scipy.io.mmio import mminfo, mmread, mmwrite + +parametrize_args = [('integer', 'int'), + ('unsigned-integer', 'uint')] + + +class TestMMIOArray(object): + def setup_method(self): + self.tmpdir = mkdtemp() + self.fn = os.path.join(self.tmpdir, 'testfile.mtx') + + def teardown_method(self): + shutil.rmtree(self.tmpdir) + + def check(self, a, info): + mmwrite(self.fn, a) + assert_equal(mminfo(self.fn), info) + b = mmread(self.fn) + assert_array_almost_equal(a, b) + + def check_exact(self, a, info): + mmwrite(self.fn, a) + assert_equal(mminfo(self.fn), info) + b = mmread(self.fn) + assert_equal(a, b) + + @pytest.mark.parametrize('typeval, dtype', parametrize_args) + def test_simple_integer(self, typeval, dtype): + self.check_exact(array([[1, 2], [3, 4]], dtype=dtype), + (2, 2, 4, 'array', typeval, 'general')) + + @pytest.mark.parametrize('typeval, dtype', parametrize_args) + def test_32bit_integer(self, typeval, dtype): + a = array([[2**31-1, 2**31-2], [2**31-3, 2**31-4]], dtype=dtype) + self.check_exact(a, (2, 2, 4, 'array', typeval, 'general')) + + def test_64bit_integer(self): + a = array([[2**31, 2**32], [2**63-2, 2**63-1]], dtype=np.int64) + if (np.intp(0).itemsize < 8): + assert_raises(OverflowError, mmwrite, self.fn, a) + else: + self.check_exact(a, (2, 2, 4, 'array', 'integer', 'general')) + + def test_64bit_unsigned_integer(self): + a = array([[2**31, 2**32], [2**64-2, 2**64-1]], dtype=np.uint64) + self.check_exact(a, (2, 2, 4, 'array', 'unsigned-integer', 'general')) + + @pytest.mark.parametrize('typeval, dtype', parametrize_args) + def test_simple_upper_triangle_integer(self, typeval, dtype): + self.check_exact(array([[0, 1], [0, 0]], dtype=dtype), + (2, 2, 4, 'array', typeval, 'general')) + + @pytest.mark.parametrize('typeval, dtype', parametrize_args) + def test_simple_lower_triangle_integer(self, typeval, dtype): + self.check_exact(array([[0, 0], [1, 0]], dtype=dtype), + (2, 2, 4, 'array', typeval, 'general')) + + @pytest.mark.parametrize('typeval, dtype', parametrize_args) + def test_simple_rectangular_integer(self, typeval, dtype): + self.check_exact(array([[1, 2, 3], [4, 5, 6]], dtype=dtype), + (2, 3, 6, 'array', typeval, 'general')) + + def test_simple_rectangular_float(self): + self.check([[1, 2], [3.5, 4], [5, 6]], + (3, 2, 6, 'array', 'real', 'general')) + + def test_simple_float(self): + self.check([[1, 2], [3, 4.0]], + (2, 2, 4, 'array', 'real', 'general')) + + def test_simple_complex(self): + self.check([[1, 2], [3, 4j]], + (2, 2, 4, 'array', 'complex', 'general')) + + @pytest.mark.parametrize('typeval, dtype', parametrize_args) + def test_simple_symmetric_integer(self, typeval, dtype): + self.check_exact(array([[1, 2], [2, 4]], dtype=dtype), + (2, 2, 4, 'array', typeval, 'symmetric')) + + def test_simple_skew_symmetric_integer(self): + self.check_exact([[0, 2], [-2, 0]], + (2, 2, 4, 'array', 'integer', 'skew-symmetric')) + + def test_simple_skew_symmetric_float(self): + self.check(array([[0, 2], [-2.0, 0.0]], 'f'), + (2, 2, 4, 'array', 'real', 'skew-symmetric')) + + def test_simple_hermitian_complex(self): + self.check([[1, 2+3j], [2-3j, 4]], + (2, 2, 4, 'array', 'complex', 'hermitian')) + + def test_random_symmetric_float(self): + sz = (20, 20) + a = np.random.random(sz) + a = a + transpose(a) + self.check(a, (20, 20, 400, 'array', 'real', 'symmetric')) + + def test_random_rectangular_float(self): + sz = (20, 15) + a = np.random.random(sz) + self.check(a, (20, 15, 300, 'array', 'real', 'general')) + + +class TestMMIOSparseCSR(TestMMIOArray): + def setup_method(self): + self.tmpdir = mkdtemp() + self.fn = os.path.join(self.tmpdir, 'testfile.mtx') + + def teardown_method(self): + shutil.rmtree(self.tmpdir) + + def check(self, a, info): + mmwrite(self.fn, a) + assert_equal(mminfo(self.fn), info) + b = mmread(self.fn) + assert_array_almost_equal(a.todense(), b.todense()) + + def check_exact(self, a, info): + mmwrite(self.fn, a) + assert_equal(mminfo(self.fn), info) + b = mmread(self.fn) + assert_equal(a.todense(), b.todense()) + + @pytest.mark.parametrize('typeval, dtype', parametrize_args) + def test_simple_integer(self, typeval, dtype): + self.check_exact(scipy.sparse.csr_matrix([[1, 2], [3, 4]], dtype=dtype), + (2, 2, 4, 'coordinate', typeval, 'general')) + + def test_32bit_integer(self): + a = scipy.sparse.csr_matrix(array([[2**31-1, -2**31+2], + [2**31-3, 2**31-4]], + dtype=np.int32)) + self.check_exact(a, (2, 2, 4, 'coordinate', 'integer', 'general')) + + def test_64bit_integer(self): + a = scipy.sparse.csr_matrix(array([[2**32+1, 2**32+1], + [-2**63+2, 2**63-2]], + dtype=np.int64)) + if (np.intp(0).itemsize < 8): + assert_raises(OverflowError, mmwrite, self.fn, a) + else: + self.check_exact(a, (2, 2, 4, 'coordinate', 'integer', 'general')) + + def test_32bit_unsigned_integer(self): + a = scipy.sparse.csr_matrix(array([[2**31-1, 2**31-2], + [2**31-3, 2**31-4]], + dtype=np.uint32)) + self.check_exact(a, (2, 2, 4, 'coordinate', 'unsigned-integer', 'general')) + + def test_64bit_unsigned_integer(self): + a = scipy.sparse.csr_matrix(array([[2**32+1, 2**32+1], + [2**64-2, 2**64-1]], + dtype=np.uint64)) + self.check_exact(a, (2, 2, 4, 'coordinate', 'unsigned-integer', 'general')) + + @pytest.mark.parametrize('typeval, dtype', parametrize_args) + def test_simple_upper_triangle_integer(self, typeval, dtype): + self.check_exact(scipy.sparse.csr_matrix([[0, 1], [0, 0]], dtype=dtype), + (2, 2, 1, 'coordinate', typeval, 'general')) + + @pytest.mark.parametrize('typeval, dtype', parametrize_args) + def test_simple_lower_triangle_integer(self, typeval, dtype): + self.check_exact(scipy.sparse.csr_matrix([[0, 0], [1, 0]], dtype=dtype), + (2, 2, 1, 'coordinate', typeval, 'general')) + + @pytest.mark.parametrize('typeval, dtype', parametrize_args) + def test_simple_rectangular_integer(self, typeval, dtype): + self.check_exact(scipy.sparse.csr_matrix([[1, 2, 3], [4, 5, 6]], dtype=dtype), + (2, 3, 6, 'coordinate', typeval, 'general')) + + def test_simple_rectangular_float(self): + self.check(scipy.sparse.csr_matrix([[1, 2], [3.5, 4], [5, 6]]), + (3, 2, 6, 'coordinate', 'real', 'general')) + + def test_simple_float(self): + self.check(scipy.sparse.csr_matrix([[1, 2], [3, 4.0]]), + (2, 2, 4, 'coordinate', 'real', 'general')) + + def test_simple_complex(self): + self.check(scipy.sparse.csr_matrix([[1, 2], [3, 4j]]), + (2, 2, 4, 'coordinate', 'complex', 'general')) + + @pytest.mark.parametrize('typeval, dtype', parametrize_args) + def test_simple_symmetric_integer(self, typeval, dtype): + self.check_exact(scipy.sparse.csr_matrix([[1, 2], [2, 4]], dtype=dtype), + (2, 2, 3, 'coordinate', typeval, 'symmetric')) + + def test_simple_skew_symmetric_integer(self): + self.check_exact(scipy.sparse.csr_matrix([[1, 2], [-2, 4]]), + (2, 2, 3, 'coordinate', 'integer', 'skew-symmetric')) + + def test_simple_skew_symmetric_float(self): + self.check(scipy.sparse.csr_matrix(array([[1, 2], [-2.0, 4]], 'f')), + (2, 2, 3, 'coordinate', 'real', 'skew-symmetric')) + + def test_simple_hermitian_complex(self): + self.check(scipy.sparse.csr_matrix([[1, 2+3j], [2-3j, 4]]), + (2, 2, 3, 'coordinate', 'complex', 'hermitian')) + + def test_random_symmetric_float(self): + sz = (20, 20) + a = np.random.random(sz) + a = a + transpose(a) + a = scipy.sparse.csr_matrix(a) + self.check(a, (20, 20, 210, 'coordinate', 'real', 'symmetric')) + + def test_random_rectangular_float(self): + sz = (20, 15) + a = np.random.random(sz) + a = scipy.sparse.csr_matrix(a) + self.check(a, (20, 15, 300, 'coordinate', 'real', 'general')) + + def test_simple_pattern(self): + a = scipy.sparse.csr_matrix([[0, 1.5], [3.0, 2.5]]) + p = np.zeros_like(a.todense()) + p[a.todense() > 0] = 1 + info = (2, 2, 3, 'coordinate', 'pattern', 'general') + mmwrite(self.fn, a, field='pattern') + assert_equal(mminfo(self.fn), info) + b = mmread(self.fn) + assert_array_almost_equal(p, b.todense()) + + +_32bit_integer_dense_example = '''\ +%%MatrixMarket matrix array integer general +2 2 +2147483647 +2147483646 +2147483647 +2147483646 +''' + +_32bit_integer_sparse_example = '''\ +%%MatrixMarket matrix coordinate integer symmetric +2 2 2 +1 1 2147483647 +2 2 2147483646 +''' + +_64bit_integer_dense_example = '''\ +%%MatrixMarket matrix array integer general +2 2 + 2147483648 +-9223372036854775806 + -2147483648 + 9223372036854775807 +''' + +_64bit_integer_sparse_general_example = '''\ +%%MatrixMarket matrix coordinate integer general +2 2 3 +1 1 2147483648 +1 2 9223372036854775807 +2 2 9223372036854775807 +''' + +_64bit_integer_sparse_symmetric_example = '''\ +%%MatrixMarket matrix coordinate integer symmetric +2 2 3 +1 1 2147483648 +1 2 -9223372036854775807 +2 2 9223372036854775807 +''' + +_64bit_integer_sparse_skew_example = '''\ +%%MatrixMarket matrix coordinate integer skew-symmetric +2 2 3 +1 1 2147483648 +1 2 -9223372036854775807 +2 2 9223372036854775807 +''' + +_over64bit_integer_dense_example = '''\ +%%MatrixMarket matrix array integer general +2 2 + 2147483648 +9223372036854775807 + 2147483648 +9223372036854775808 +''' + +_over64bit_integer_sparse_example = '''\ +%%MatrixMarket matrix coordinate integer symmetric +2 2 2 +1 1 2147483648 +2 2 19223372036854775808 +''' + +class TestMMIOReadLargeIntegers(object): + def setup_method(self): + self.tmpdir = mkdtemp() + self.fn = os.path.join(self.tmpdir, 'testfile.mtx') + + def teardown_method(self): + shutil.rmtree(self.tmpdir) + + def check_read(self, example, a, info, dense, over32, over64): + with open(self.fn, 'w') as f: + f.write(example) + assert_equal(mminfo(self.fn), info) + if (over32 and (np.intp(0).itemsize < 8)) or over64: + assert_raises(OverflowError, mmread, self.fn) + else: + b = mmread(self.fn) + if not dense: + b = b.todense() + assert_equal(a, b) + + def test_read_32bit_integer_dense(self): + a = array([[2**31-1, 2**31-1], + [2**31-2, 2**31-2]], dtype=np.int64) + self.check_read(_32bit_integer_dense_example, + a, + (2, 2, 4, 'array', 'integer', 'general'), + dense=True, + over32=False, + over64=False) + + def test_read_32bit_integer_sparse(self): + a = array([[2**31-1, 0], + [0, 2**31-2]], dtype=np.int64) + self.check_read(_32bit_integer_sparse_example, + a, + (2, 2, 2, 'coordinate', 'integer', 'symmetric'), + dense=False, + over32=False, + over64=False) + + def test_read_64bit_integer_dense(self): + a = array([[2**31, -2**31], + [-2**63+2, 2**63-1]], dtype=np.int64) + self.check_read(_64bit_integer_dense_example, + a, + (2, 2, 4, 'array', 'integer', 'general'), + dense=True, + over32=True, + over64=False) + + def test_read_64bit_integer_sparse_general(self): + a = array([[2**31, 2**63-1], + [0, 2**63-1]], dtype=np.int64) + self.check_read(_64bit_integer_sparse_general_example, + a, + (2, 2, 3, 'coordinate', 'integer', 'general'), + dense=False, + over32=True, + over64=False) + + def test_read_64bit_integer_sparse_symmetric(self): + a = array([[2**31, -2**63+1], + [-2**63+1, 2**63-1]], dtype=np.int64) + self.check_read(_64bit_integer_sparse_symmetric_example, + a, + (2, 2, 3, 'coordinate', 'integer', 'symmetric'), + dense=False, + over32=True, + over64=False) + + def test_read_64bit_integer_sparse_skew(self): + a = array([[2**31, -2**63+1], + [2**63-1, 2**63-1]], dtype=np.int64) + self.check_read(_64bit_integer_sparse_skew_example, + a, + (2, 2, 3, 'coordinate', 'integer', 'skew-symmetric'), + dense=False, + over32=True, + over64=False) + + def test_read_over64bit_integer_dense(self): + self.check_read(_over64bit_integer_dense_example, + None, + (2, 2, 4, 'array', 'integer', 'general'), + dense=True, + over32=True, + over64=True) + + def test_read_over64bit_integer_sparse(self): + self.check_read(_over64bit_integer_sparse_example, + None, + (2, 2, 2, 'coordinate', 'integer', 'symmetric'), + dense=False, + over32=True, + over64=True) + + +_general_example = '''\ +%%MatrixMarket matrix coordinate real general +%================================================================================= +% +% This ASCII file represents a sparse MxN matrix with L +% nonzeros in the following Matrix Market format: +% +% +----------------------------------------------+ +% |%%MatrixMarket matrix coordinate real general | <--- header line +% |% | <--+ +% |% comments | |-- 0 or more comment lines +% |% | <--+ +% | M N L | <--- rows, columns, entries +% | I1 J1 A(I1, J1) | <--+ +% | I2 J2 A(I2, J2) | | +% | I3 J3 A(I3, J3) | |-- L lines +% | . . . | | +% | IL JL A(IL, JL) | <--+ +% +----------------------------------------------+ +% +% Indices are 1-based, i.e. A(1,1) is the first element. +% +%================================================================================= + 5 5 8 + 1 1 1.000e+00 + 2 2 1.050e+01 + 3 3 1.500e-02 + 1 4 6.000e+00 + 4 2 2.505e+02 + 4 4 -2.800e+02 + 4 5 3.332e+01 + 5 5 1.200e+01 +''' + +_hermitian_example = '''\ +%%MatrixMarket matrix coordinate complex hermitian + 5 5 7 + 1 1 1.0 0 + 2 2 10.5 0 + 4 2 250.5 22.22 + 3 3 1.5e-2 0 + 4 4 -2.8e2 0 + 5 5 12. 0 + 5 4 0 33.32 +''' + +_skew_example = '''\ +%%MatrixMarket matrix coordinate real skew-symmetric + 5 5 7 + 1 1 1.0 + 2 2 10.5 + 4 2 250.5 + 3 3 1.5e-2 + 4 4 -2.8e2 + 5 5 12. + 5 4 0 +''' + +_symmetric_example = '''\ +%%MatrixMarket matrix coordinate real symmetric + 5 5 7 + 1 1 1.0 + 2 2 10.5 + 4 2 250.5 + 3 3 1.5e-2 + 4 4 -2.8e2 + 5 5 12. + 5 4 8 +''' + +_symmetric_pattern_example = '''\ +%%MatrixMarket matrix coordinate pattern symmetric + 5 5 7 + 1 1 + 2 2 + 4 2 + 3 3 + 4 4 + 5 5 + 5 4 +''' + + +class TestMMIOCoordinate(object): + def setup_method(self): + self.tmpdir = mkdtemp() + self.fn = os.path.join(self.tmpdir, 'testfile.mtx') + + def teardown_method(self): + shutil.rmtree(self.tmpdir) + + def check_read(self, example, a, info): + f = open(self.fn, 'w') + f.write(example) + f.close() + assert_equal(mminfo(self.fn), info) + b = mmread(self.fn).todense() + assert_array_almost_equal(a, b) + + def test_read_general(self): + a = [[1, 0, 0, 6, 0], + [0, 10.5, 0, 0, 0], + [0, 0, .015, 0, 0], + [0, 250.5, 0, -280, 33.32], + [0, 0, 0, 0, 12]] + self.check_read(_general_example, a, + (5, 5, 8, 'coordinate', 'real', 'general')) + + def test_read_hermitian(self): + a = [[1, 0, 0, 0, 0], + [0, 10.5, 0, 250.5 - 22.22j, 0], + [0, 0, .015, 0, 0], + [0, 250.5 + 22.22j, 0, -280, -33.32j], + [0, 0, 0, 33.32j, 12]] + self.check_read(_hermitian_example, a, + (5, 5, 7, 'coordinate', 'complex', 'hermitian')) + + def test_read_skew(self): + a = [[1, 0, 0, 0, 0], + [0, 10.5, 0, -250.5, 0], + [0, 0, .015, 0, 0], + [0, 250.5, 0, -280, 0], + [0, 0, 0, 0, 12]] + self.check_read(_skew_example, a, + (5, 5, 7, 'coordinate', 'real', 'skew-symmetric')) + + def test_read_symmetric(self): + a = [[1, 0, 0, 0, 0], + [0, 10.5, 0, 250.5, 0], + [0, 0, .015, 0, 0], + [0, 250.5, 0, -280, 8], + [0, 0, 0, 8, 12]] + self.check_read(_symmetric_example, a, + (5, 5, 7, 'coordinate', 'real', 'symmetric')) + + def test_read_symmetric_pattern(self): + a = [[1, 0, 0, 0, 0], + [0, 1, 0, 1, 0], + [0, 0, 1, 0, 0], + [0, 1, 0, 1, 1], + [0, 0, 0, 1, 1]] + self.check_read(_symmetric_pattern_example, a, + (5, 5, 7, 'coordinate', 'pattern', 'symmetric')) + + def test_empty_write_read(self): + # https://github.com/scipy/scipy/issues/1410 (Trac #883) + + b = scipy.sparse.coo_matrix((10, 10)) + mmwrite(self.fn, b) + + assert_equal(mminfo(self.fn), + (10, 10, 0, 'coordinate', 'real', 'symmetric')) + a = b.todense() + b = mmread(self.fn).todense() + assert_array_almost_equal(a, b) + + def test_bzip2_py3(self): + # test if fix for #2152 works + try: + # bz2 module isn't always built when building Python. + import bz2 + except ImportError: + return + I = array([0, 0, 1, 2, 3, 3, 3, 4]) + J = array([0, 3, 1, 2, 1, 3, 4, 4]) + V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0]) + + b = scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5)) + + mmwrite(self.fn, b) + + fn_bzip2 = "%s.bz2" % self.fn + with open(self.fn, 'rb') as f_in: + f_out = bz2.BZ2File(fn_bzip2, 'wb') + f_out.write(f_in.read()) + f_out.close() + + a = mmread(fn_bzip2).todense() + assert_array_almost_equal(a, b.todense()) + + def test_gzip_py3(self): + # test if fix for #2152 works + try: + # gzip module can be missing from Python installation + import gzip + except ImportError: + return + I = array([0, 0, 1, 2, 3, 3, 3, 4]) + J = array([0, 3, 1, 2, 1, 3, 4, 4]) + V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0]) + + b = scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5)) + + mmwrite(self.fn, b) + + fn_gzip = "%s.gz" % self.fn + with open(self.fn, 'rb') as f_in: + f_out = gzip.open(fn_gzip, 'wb') + f_out.write(f_in.read()) + f_out.close() + + a = mmread(fn_gzip).todense() + assert_array_almost_equal(a, b.todense()) + + def test_real_write_read(self): + I = array([0, 0, 1, 2, 3, 3, 3, 4]) + J = array([0, 3, 1, 2, 1, 3, 4, 4]) + V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0]) + + b = scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5)) + + mmwrite(self.fn, b) + + assert_equal(mminfo(self.fn), + (5, 5, 8, 'coordinate', 'real', 'general')) + a = b.todense() + b = mmread(self.fn).todense() + assert_array_almost_equal(a, b) + + def test_complex_write_read(self): + I = array([0, 0, 1, 2, 3, 3, 3, 4]) + J = array([0, 3, 1, 2, 1, 3, 4, 4]) + V = array([1.0 + 3j, 6.0 + 2j, 10.50 + 0.9j, 0.015 + -4.4j, + 250.5 + 0j, -280.0 + 5j, 33.32 + 6.4j, 12.00 + 0.8j]) + + b = scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5)) + + mmwrite(self.fn, b) + + assert_equal(mminfo(self.fn), + (5, 5, 8, 'coordinate', 'complex', 'general')) + a = b.todense() + b = mmread(self.fn).todense() + assert_array_almost_equal(a, b) + + def test_sparse_formats(self): + mats = [] + + I = array([0, 0, 1, 2, 3, 3, 3, 4]) + J = array([0, 3, 1, 2, 1, 3, 4, 4]) + + V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0]) + mats.append(scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5))) + + V = array([1.0 + 3j, 6.0 + 2j, 10.50 + 0.9j, 0.015 + -4.4j, + 250.5 + 0j, -280.0 + 5j, 33.32 + 6.4j, 12.00 + 0.8j]) + mats.append(scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5))) + + for mat in mats: + expected = mat.todense() + for fmt in ['csr', 'csc', 'coo']: + fn = mktemp(dir=self.tmpdir) # safe, we own tmpdir + mmwrite(fn, mat.asformat(fmt)) + + result = mmread(fn).todense() + assert_array_almost_equal(result, expected) + + def test_precision(self): + test_values = [pi] + [10**(i) for i in range(0, -10, -1)] + test_precisions = range(1, 10) + for value in test_values: + for precision in test_precisions: + # construct sparse matrix with test value at last main diagonal + n = 10**precision + 1 + A = scipy.sparse.dok_matrix((n, n)) + A[n-1, n-1] = value + # write matrix with test precision and read again + mmwrite(self.fn, A, precision=precision) + A = scipy.io.mmread(self.fn) + # check for right entries in matrix + assert_array_equal(A.row, [n-1]) + assert_array_equal(A.col, [n-1]) + assert_array_almost_equal(A.data, + [float('%%.%dg' % precision % value)]) diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/test_mmio.pyc b/project/venv/lib/python2.7/site-packages/scipy/io/tests/test_mmio.pyc new file mode 100644 index 0000000..25776c1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/test_mmio.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/test_netcdf.py b/project/venv/lib/python2.7/site-packages/scipy/io/tests/test_netcdf.py new file mode 100644 index 0000000..e22fd2d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/tests/test_netcdf.py @@ -0,0 +1,544 @@ +''' Tests for netcdf ''' +from __future__ import division, print_function, absolute_import + +import os +from os.path import join as pjoin, dirname +import shutil +import tempfile +import warnings +from io import BytesIO +from glob import glob +from contextlib import contextmanager + +import numpy as np +from numpy.testing import assert_, assert_allclose, assert_equal +from pytest import raises as assert_raises + +from scipy.io.netcdf import netcdf_file, IS_PYPY + +from scipy._lib._numpy_compat import suppress_warnings +from scipy._lib._tmpdirs import in_tempdir + +TEST_DATA_PATH = pjoin(dirname(__file__), 'data') + +N_EG_ELS = 11 # number of elements for example variable +VARTYPE_EG = 'b' # var type for example variable + + +@contextmanager +def make_simple(*args, **kwargs): + f = netcdf_file(*args, **kwargs) + f.history = 'Created for a test' + f.createDimension('time', N_EG_ELS) + time = f.createVariable('time', VARTYPE_EG, ('time',)) + time[:] = np.arange(N_EG_ELS) + time.units = 'days since 2008-01-01' + f.flush() + yield f + f.close() + + +def check_simple(ncfileobj): + '''Example fileobj tests ''' + assert_equal(ncfileobj.history, b'Created for a test') + time = ncfileobj.variables['time'] + assert_equal(time.units, b'days since 2008-01-01') + assert_equal(time.shape, (N_EG_ELS,)) + assert_equal(time[-1], N_EG_ELS-1) + +def assert_mask_matches(arr, expected_mask): + ''' + Asserts that the mask of arr is effectively the same as expected_mask. + + In contrast to numpy.ma.testutils.assert_mask_equal, this function allows + testing the 'mask' of a standard numpy array (the mask in this case is treated + as all False). + + Parameters + ---------- + arr: ndarray or MaskedArray + Array to test. + expected_mask: array_like of booleans + A list giving the expected mask. + ''' + + mask = np.ma.getmaskarray(arr) + assert_equal(mask, expected_mask) + + +def test_read_write_files(): + # test round trip for example file + cwd = os.getcwd() + try: + tmpdir = tempfile.mkdtemp() + os.chdir(tmpdir) + with make_simple('simple.nc', 'w') as f: + pass + # read the file we just created in 'a' mode + with netcdf_file('simple.nc', 'a') as f: + check_simple(f) + # add something + f._attributes['appendRan'] = 1 + + # To read the NetCDF file we just created:: + with netcdf_file('simple.nc') as f: + # Using mmap is the default (but not on pypy) + assert_equal(f.use_mmap, not IS_PYPY) + check_simple(f) + assert_equal(f._attributes['appendRan'], 1) + + # Read it in append (and check mmap is off) + with netcdf_file('simple.nc', 'a') as f: + assert_(not f.use_mmap) + check_simple(f) + assert_equal(f._attributes['appendRan'], 1) + + # Now without mmap + with netcdf_file('simple.nc', mmap=False) as f: + # Using mmap is the default + assert_(not f.use_mmap) + check_simple(f) + + # To read the NetCDF file we just created, as file object, no + # mmap. When n * n_bytes(var_type) is not divisible by 4, this + # raised an error in pupynere 1.0.12 and scipy rev 5893, because + # calculated vsize was rounding up in units of 4 - see + # https://www.unidata.ucar.edu/software/netcdf/docs/user_guide.html + with open('simple.nc', 'rb') as fobj: + with netcdf_file(fobj) as f: + # by default, don't use mmap for file-like + assert_(not f.use_mmap) + check_simple(f) + + # Read file from fileobj, with mmap + with suppress_warnings() as sup: + if IS_PYPY: + sup.filter(RuntimeWarning, + "Cannot close a netcdf_file opened with mmap=True.*") + with open('simple.nc', 'rb') as fobj: + with netcdf_file(fobj, mmap=True) as f: + assert_(f.use_mmap) + check_simple(f) + + # Again read it in append mode (adding another att) + with open('simple.nc', 'r+b') as fobj: + with netcdf_file(fobj, 'a') as f: + assert_(not f.use_mmap) + check_simple(f) + f.createDimension('app_dim', 1) + var = f.createVariable('app_var', 'i', ('app_dim',)) + var[:] = 42 + + # And... check that app_var made it in... + with netcdf_file('simple.nc') as f: + check_simple(f) + assert_equal(f.variables['app_var'][:], 42) + + except: # noqa: E722 + os.chdir(cwd) + shutil.rmtree(tmpdir) + raise + os.chdir(cwd) + shutil.rmtree(tmpdir) + + +def test_read_write_sio(): + eg_sio1 = BytesIO() + with make_simple(eg_sio1, 'w') as f1: + str_val = eg_sio1.getvalue() + + eg_sio2 = BytesIO(str_val) + with netcdf_file(eg_sio2) as f2: + check_simple(f2) + + # Test that error is raised if attempting mmap for sio + eg_sio3 = BytesIO(str_val) + assert_raises(ValueError, netcdf_file, eg_sio3, 'r', True) + # Test 64-bit offset write / read + eg_sio_64 = BytesIO() + with make_simple(eg_sio_64, 'w', version=2) as f_64: + str_val = eg_sio_64.getvalue() + + eg_sio_64 = BytesIO(str_val) + with netcdf_file(eg_sio_64) as f_64: + check_simple(f_64) + assert_equal(f_64.version_byte, 2) + # also when version 2 explicitly specified + eg_sio_64 = BytesIO(str_val) + with netcdf_file(eg_sio_64, version=2) as f_64: + check_simple(f_64) + assert_equal(f_64.version_byte, 2) + + +def test_bytes(): + raw_file = BytesIO() + f = netcdf_file(raw_file, mode='w') + # Dataset only has a single variable, dimension and attribute to avoid + # any ambiguity related to order. + f.a = 'b' + f.createDimension('dim', 1) + var = f.createVariable('var', np.int16, ('dim',)) + var[0] = -9999 + var.c = 'd' + f.sync() + + actual = raw_file.getvalue() + + expected = (b'CDF\x01' + b'\x00\x00\x00\x00' + b'\x00\x00\x00\x0a' + b'\x00\x00\x00\x01' + b'\x00\x00\x00\x03' + b'dim\x00' + b'\x00\x00\x00\x01' + b'\x00\x00\x00\x0c' + b'\x00\x00\x00\x01' + b'\x00\x00\x00\x01' + b'a\x00\x00\x00' + b'\x00\x00\x00\x02' + b'\x00\x00\x00\x01' + b'b\x00\x00\x00' + b'\x00\x00\x00\x0b' + b'\x00\x00\x00\x01' + b'\x00\x00\x00\x03' + b'var\x00' + b'\x00\x00\x00\x01' + b'\x00\x00\x00\x00' + b'\x00\x00\x00\x0c' + b'\x00\x00\x00\x01' + b'\x00\x00\x00\x01' + b'c\x00\x00\x00' + b'\x00\x00\x00\x02' + b'\x00\x00\x00\x01' + b'd\x00\x00\x00' + b'\x00\x00\x00\x03' + b'\x00\x00\x00\x04' + b'\x00\x00\x00\x78' + b'\xd8\xf1\x80\x01') + + assert_equal(actual, expected) + + +def test_encoded_fill_value(): + with netcdf_file(BytesIO(), mode='w') as f: + f.createDimension('x', 1) + var = f.createVariable('var', 'S1', ('x',)) + assert_equal(var._get_encoded_fill_value(), b'\x00') + var._FillValue = b'\x01' + assert_equal(var._get_encoded_fill_value(), b'\x01') + var._FillValue = b'\x00\x00' # invalid, wrong size + assert_equal(var._get_encoded_fill_value(), b'\x00') + + +def test_read_example_data(): + # read any example data files + for fname in glob(pjoin(TEST_DATA_PATH, '*.nc')): + with netcdf_file(fname, 'r') as f: + pass + with netcdf_file(fname, 'r', mmap=False) as f: + pass + + +def test_itemset_no_segfault_on_readonly(): + # Regression test for ticket #1202. + # Open the test file in read-only mode. + + filename = pjoin(TEST_DATA_PATH, 'example_1.nc') + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, + "Cannot close a netcdf_file opened with mmap=True, when netcdf_variables or arrays referring to its data still exist") + with netcdf_file(filename, 'r', mmap=True) as f: + time_var = f.variables['time'] + + # time_var.assignValue(42) should raise a RuntimeError--not seg. fault! + assert_raises(RuntimeError, time_var.assignValue, 42) + + +def test_appending_issue_gh_8625(): + stream = BytesIO() + + with make_simple(stream, mode='w') as f: + f.createDimension('x', 2) + f.createVariable('x', float, ('x',)) + f.variables['x'][...] = 1 + f.flush() + contents = stream.getvalue() + + stream = BytesIO(contents) + with netcdf_file(stream, mode='a') as f: + f.variables['x'][...] = 2 + + +def test_write_invalid_dtype(): + dtypes = ['int64', 'uint64'] + if np.dtype('int').itemsize == 8: # 64-bit machines + dtypes.append('int') + if np.dtype('uint').itemsize == 8: # 64-bit machines + dtypes.append('uint') + + with netcdf_file(BytesIO(), 'w') as f: + f.createDimension('time', N_EG_ELS) + for dt in dtypes: + assert_raises(ValueError, f.createVariable, 'time', dt, ('time',)) + + +def test_flush_rewind(): + stream = BytesIO() + with make_simple(stream, mode='w') as f: + x = f.createDimension('x',4) + v = f.createVariable('v', 'i2', ['x']) + v[:] = 1 + f.flush() + len_single = len(stream.getvalue()) + f.flush() + len_double = len(stream.getvalue()) + + assert_(len_single == len_double) + + +def test_dtype_specifiers(): + # Numpy 1.7.0-dev had a bug where 'i2' wouldn't work. + # Specifying np.int16 or similar only works from the same commit as this + # comment was made. + with make_simple(BytesIO(), mode='w') as f: + f.createDimension('x',4) + f.createVariable('v1', 'i2', ['x']) + f.createVariable('v2', np.int16, ['x']) + f.createVariable('v3', np.dtype(np.int16), ['x']) + + +def test_ticket_1720(): + io = BytesIO() + + items = [0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9] + + with netcdf_file(io, 'w') as f: + f.history = 'Created for a test' + f.createDimension('float_var', 10) + float_var = f.createVariable('float_var', 'f', ('float_var',)) + float_var[:] = items + float_var.units = 'metres' + f.flush() + contents = io.getvalue() + + io = BytesIO(contents) + with netcdf_file(io, 'r') as f: + assert_equal(f.history, b'Created for a test') + float_var = f.variables['float_var'] + assert_equal(float_var.units, b'metres') + assert_equal(float_var.shape, (10,)) + assert_allclose(float_var[:], items) + + +def test_mmaps_segfault(): + filename = pjoin(TEST_DATA_PATH, 'example_1.nc') + + if not IS_PYPY: + with warnings.catch_warnings(): + warnings.simplefilter("error") + with netcdf_file(filename, mmap=True) as f: + x = f.variables['lat'][:] + # should not raise warnings + del x + + def doit(): + with netcdf_file(filename, mmap=True) as f: + return f.variables['lat'][:] + + # should not crash + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, + "Cannot close a netcdf_file opened with mmap=True, when netcdf_variables or arrays referring to its data still exist") + x = doit() + x.sum() + + +def test_zero_dimensional_var(): + io = BytesIO() + with make_simple(io, 'w') as f: + v = f.createVariable('zerodim', 'i2', []) + # This is checking that .isrec returns a boolean - don't simplify it + # to 'assert not ...' + assert v.isrec is False, v.isrec + f.flush() + + +def test_byte_gatts(): + # Check that global "string" atts work like they did before py3k + # unicode and general bytes confusion + with in_tempdir(): + filename = 'g_byte_atts.nc' + f = netcdf_file(filename, 'w') + f._attributes['holy'] = b'grail' + f._attributes['witch'] = 'floats' + f.close() + f = netcdf_file(filename, 'r') + assert_equal(f._attributes['holy'], b'grail') + assert_equal(f._attributes['witch'], b'floats') + f.close() + + +def test_open_append(): + # open 'w' put one attr + with in_tempdir(): + filename = 'append_dat.nc' + f = netcdf_file(filename, 'w') + f._attributes['Kilroy'] = 'was here' + f.close() + + # open again in 'a', read the att and and a new one + f = netcdf_file(filename, 'a') + assert_equal(f._attributes['Kilroy'], b'was here') + f._attributes['naughty'] = b'Zoot' + f.close() + + # open yet again in 'r' and check both atts + f = netcdf_file(filename, 'r') + assert_equal(f._attributes['Kilroy'], b'was here') + assert_equal(f._attributes['naughty'], b'Zoot') + f.close() + + +def test_append_recordDimension(): + dataSize = 100 + + with in_tempdir(): + # Create file with record time dimension + with netcdf_file('withRecordDimension.nc', 'w') as f: + f.createDimension('time', None) + f.createVariable('time', 'd', ('time',)) + f.createDimension('x', dataSize) + x = f.createVariable('x', 'd', ('x',)) + x[:] = np.array(range(dataSize)) + f.createDimension('y', dataSize) + y = f.createVariable('y', 'd', ('y',)) + y[:] = np.array(range(dataSize)) + f.createVariable('testData', 'i', ('time', 'x', 'y')) + f.flush() + f.close() + + for i in range(2): + # Open the file in append mode and add data + with netcdf_file('withRecordDimension.nc', 'a') as f: + f.variables['time'].data = np.append(f.variables["time"].data, i) + f.variables['testData'][i, :, :] = np.ones((dataSize, dataSize))*i + f.flush() + + # Read the file and check that append worked + with netcdf_file('withRecordDimension.nc') as f: + assert_equal(f.variables['time'][-1], i) + assert_equal(f.variables['testData'][-1, :, :].copy(), np.ones((dataSize, dataSize))*i) + assert_equal(f.variables['time'].data.shape[0], i+1) + assert_equal(f.variables['testData'].data.shape[0], i+1) + + # Read the file and check that 'data' was not saved as user defined + # attribute of testData variable during append operation + with netcdf_file('withRecordDimension.nc') as f: + with assert_raises(KeyError) as ar: + f.variables['testData']._attributes['data'] + ex = ar.value + assert_equal(ex.args[0], 'data') + +def test_maskandscale(): + t = np.linspace(20, 30, 15) + t[3] = 100 + tm = np.ma.masked_greater(t, 99) + fname = pjoin(TEST_DATA_PATH, 'example_2.nc') + with netcdf_file(fname, maskandscale=True) as f: + Temp = f.variables['Temperature'] + assert_equal(Temp.missing_value, 9999) + assert_equal(Temp.add_offset, 20) + assert_equal(Temp.scale_factor, np.float32(0.01)) + found = Temp[:].compressed() + del Temp # Remove ref to mmap, so file can be closed. + expected = np.round(tm.compressed(), 2) + assert_allclose(found, expected) + + with in_tempdir(): + newfname = 'ms.nc' + f = netcdf_file(newfname, 'w', maskandscale=True) + f.createDimension('Temperature', len(tm)) + temp = f.createVariable('Temperature', 'i', ('Temperature',)) + temp.missing_value = 9999 + temp.scale_factor = 0.01 + temp.add_offset = 20 + temp[:] = tm + f.close() + + with netcdf_file(newfname, maskandscale=True) as f: + Temp = f.variables['Temperature'] + assert_equal(Temp.missing_value, 9999) + assert_equal(Temp.add_offset, 20) + assert_equal(Temp.scale_factor, np.float32(0.01)) + expected = np.round(tm.compressed(), 2) + found = Temp[:].compressed() + del Temp + assert_allclose(found, expected) + + +# ------------------------------------------------------------------------ +# Test reading with masked values (_FillValue / missing_value) +# ------------------------------------------------------------------------ + +def test_read_withValuesNearFillValue(): + # Regression test for ticket #5626 + fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc') + with netcdf_file(fname, maskandscale=True) as f: + vardata = f.variables['var1_fillval0'][:] + assert_mask_matches(vardata, [False, True, False]) + +def test_read_withNoFillValue(): + # For a variable with no fill value, reading data with maskandscale=True + # should return unmasked data + fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc') + with netcdf_file(fname, maskandscale=True) as f: + vardata = f.variables['var2_noFillval'][:] + assert_mask_matches(vardata, [False, False, False]) + assert_equal(vardata, [1,2,3]) + +def test_read_withFillValueAndMissingValue(): + # For a variable with both _FillValue and missing_value, the _FillValue + # should be used + IRRELEVANT_VALUE = 9999 + fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc') + with netcdf_file(fname, maskandscale=True) as f: + vardata = f.variables['var3_fillvalAndMissingValue'][:] + assert_mask_matches(vardata, [True, False, False]) + assert_equal(vardata, [IRRELEVANT_VALUE, 2, 3]) + +def test_read_withMissingValue(): + # For a variable with missing_value but not _FillValue, the missing_value + # should be used + fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc') + with netcdf_file(fname, maskandscale=True) as f: + vardata = f.variables['var4_missingValue'][:] + assert_mask_matches(vardata, [False, True, False]) + +def test_read_withFillValNaN(): + fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc') + with netcdf_file(fname, maskandscale=True) as f: + vardata = f.variables['var5_fillvalNaN'][:] + assert_mask_matches(vardata, [False, True, False]) + +def test_read_withChar(): + fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc') + with netcdf_file(fname, maskandscale=True) as f: + vardata = f.variables['var6_char'][:] + assert_mask_matches(vardata, [False, True, False]) + +def test_read_with2dVar(): + fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc') + with netcdf_file(fname, maskandscale=True) as f: + vardata = f.variables['var7_2d'][:] + assert_mask_matches(vardata, [[True, False], [False, False], [False, True]]) + +def test_read_withMaskAndScaleFalse(): + # If a variable has a _FillValue (or missing_value) attribute, but is read + # with maskandscale set to False, the result should be unmasked + fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc') + # Open file with mmap=False to avoid problems with closing a mmap'ed file + # when arrays referring to its data still exist: + with netcdf_file(fname, maskandscale=False, mmap=False) as f: + vardata = f.variables['var3_fillvalAndMissingValue'][:] + assert_mask_matches(vardata, [False, False, False]) + assert_equal(vardata, [1, 2, 3]) diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/test_netcdf.pyc b/project/venv/lib/python2.7/site-packages/scipy/io/tests/test_netcdf.pyc new file mode 100644 index 0000000..14b6010 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/test_netcdf.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/test_paths.py b/project/venv/lib/python2.7/site-packages/scipy/io/tests/test_paths.py new file mode 100644 index 0000000..4fecfb0 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/tests/test_paths.py @@ -0,0 +1,88 @@ +""" +Ensure that we can use pathlib.Path objects in all relevant IO functions. +""" +import sys + +try: + from pathlib import Path +except ImportError: + # Not available. No fallback import, since we'll skip the entire + # test suite for Python < 3.6. + pass + +import numpy as np +from numpy.testing import assert_ +import pytest + +import scipy.io +import scipy.io.wavfile +from scipy._lib._tmpdirs import tempdir +import scipy.sparse + + +@pytest.mark.skipif(sys.version_info < (3, 6), + reason='Passing path-like objects to IO functions requires Python >= 3.6') +class TestPaths(object): + data = np.arange(5).astype(np.int64) + + def test_savemat(self): + with tempdir() as temp_dir: + path = Path(temp_dir) / 'data.mat' + scipy.io.savemat(path, {'data': self.data}) + assert_(path.is_file()) + + def test_loadmat(self): + # Save data with string path, load with pathlib.Path + with tempdir() as temp_dir: + path = Path(temp_dir) / 'data.mat' + scipy.io.savemat(str(path), {'data': self.data}) + + mat_contents = scipy.io.loadmat(path) + assert_((mat_contents['data'] == self.data).all()) + + def test_whosmat(self): + # Save data with string path, load with pathlib.Path + with tempdir() as temp_dir: + path = Path(temp_dir) / 'data.mat' + scipy.io.savemat(str(path), {'data': self.data}) + + contents = scipy.io.whosmat(path) + assert_(contents[0] == ('data', (1, 5), 'int64')) + + def test_readsav(self): + path = Path(__file__).parent / 'data/scalar_string.sav' + scipy.io.readsav(path) + + def test_hb_read(self): + # Save data with string path, load with pathlib.Path + with tempdir() as temp_dir: + data = scipy.sparse.csr_matrix(scipy.sparse.eye(3)) + path = Path(temp_dir) / 'data.hb' + scipy.io.harwell_boeing.hb_write(str(path), data) + + data_new = scipy.io.harwell_boeing.hb_read(path) + assert_((data_new != data).nnz == 0) + + def test_hb_write(self): + with tempdir() as temp_dir: + data = scipy.sparse.csr_matrix(scipy.sparse.eye(3)) + path = Path(temp_dir) / 'data.hb' + scipy.io.harwell_boeing.hb_write(path, data) + assert_(path.is_file()) + + def test_netcdf_file(self): + path = Path(__file__).parent / 'data/example_1.nc' + scipy.io.netcdf.netcdf_file(path) + + def test_wavfile_read(self): + path = Path(__file__).parent / 'data/test-8000Hz-le-2ch-1byteu.wav' + scipy.io.wavfile.read(path) + + def test_wavfile_write(self): + # Read from str path, write to Path + input_path = Path(__file__).parent / 'data/test-8000Hz-le-2ch-1byteu.wav' + rate, data = scipy.io.wavfile.read(str(input_path)) + + with tempdir() as temp_dir: + output_path = Path(temp_dir) / input_path.name + scipy.io.wavfile.write(output_path, rate, data) diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/test_paths.pyc b/project/venv/lib/python2.7/site-packages/scipy/io/tests/test_paths.pyc new file mode 100644 index 0000000..f077303 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/test_paths.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/test_wavfile.py b/project/venv/lib/python2.7/site-packages/scipy/io/tests/test_wavfile.py new file mode 100644 index 0000000..546fa8f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/tests/test_wavfile.py @@ -0,0 +1,159 @@ +from __future__ import division, print_function, absolute_import + +import os +import sys +import tempfile +from io import BytesIO + +import numpy as np +from numpy.testing import assert_equal, assert_, assert_array_equal +from pytest import raises as assert_raises +from scipy._lib._numpy_compat import suppress_warnings + +from scipy.io import wavfile + + +def datafile(fn): + return os.path.join(os.path.dirname(__file__), 'data', fn) + + +def test_read_1(): + for mmap in [False, True]: + rate, data = wavfile.read(datafile('test-44100Hz-le-1ch-4bytes.wav'), + mmap=mmap) + + assert_equal(rate, 44100) + assert_(np.issubdtype(data.dtype, np.int32)) + assert_equal(data.shape, (4410,)) + + del data + + +def test_read_2(): + for mmap in [False, True]: + rate, data = wavfile.read(datafile('test-8000Hz-le-2ch-1byteu.wav'), + mmap=mmap) + assert_equal(rate, 8000) + assert_(np.issubdtype(data.dtype, np.uint8)) + assert_equal(data.shape, (800, 2)) + + del data + +def test_read_3(): + for mmap in [False, True]: + rate, data = wavfile.read(datafile('test-44100Hz-2ch-32bit-float-le.wav'), + mmap=mmap) + assert_equal(rate, 44100) + assert_(np.issubdtype(data.dtype, np.float32)) + assert_equal(data.shape, (441, 2)) + + del data + +def test_read_4(): + for mmap in [False, True]: + with suppress_warnings() as sup: + sup.filter(wavfile.WavFileWarning, + "Chunk .non-data. not understood, skipping it") + rate, data = wavfile.read(datafile('test-48000Hz-2ch-64bit-float-le-wavex.wav'), + mmap=mmap) + + assert_equal(rate, 48000) + assert_(np.issubdtype(data.dtype, np.float64)) + assert_equal(data.shape, (480, 2)) + + del data + + +def test_read_5(): + for mmap in [False, True]: + rate, data = wavfile.read(datafile('test-44100Hz-2ch-32bit-float-be.wav'), + mmap=mmap) + assert_equal(rate, 44100) + assert_(np.issubdtype(data.dtype, np.float32)) + assert_(data.dtype.byteorder == '>' or (sys.byteorder == 'big' and + data.dtype.byteorder == '=')) + assert_equal(data.shape, (441, 2)) + + del data + + +def test_read_fail(): + for mmap in [False, True]: + fp = open(datafile('example_1.nc'), 'rb') + assert_raises(ValueError, wavfile.read, fp, mmap=mmap) + fp.close() + + +def test_read_early_eof(): + for mmap in [False, True]: + fp = open(datafile('test-44100Hz-le-1ch-4bytes-early-eof.wav'), 'rb') + assert_raises(ValueError, wavfile.read, fp, mmap=mmap) + fp.close() + + +def test_read_incomplete_chunk(): + for mmap in [False, True]: + fp = open(datafile('test-44100Hz-le-1ch-4bytes-incomplete-chunk.wav'), 'rb') + assert_raises(ValueError, wavfile.read, fp, mmap=mmap) + fp.close() + + +def _check_roundtrip(realfile, rate, dtype, channels): + if realfile: + fd, tmpfile = tempfile.mkstemp(suffix='.wav') + os.close(fd) + else: + tmpfile = BytesIO() + try: + data = np.random.rand(100, channels) + if channels == 1: + data = data[:,0] + if dtype.kind == 'f': + # The range of the float type should be in [-1, 1] + data = data.astype(dtype) + else: + data = (data*128).astype(dtype) + + wavfile.write(tmpfile, rate, data) + + for mmap in [False, True]: + rate2, data2 = wavfile.read(tmpfile, mmap=mmap) + + assert_equal(rate, rate2) + assert_(data2.dtype.byteorder in ('<', '=', '|'), msg=data2.dtype) + assert_array_equal(data, data2) + + del data2 + finally: + if realfile: + os.unlink(tmpfile) + + +def test_write_roundtrip(): + for realfile in (False, True): + for dtypechar in ('i', 'u', 'f', 'g', 'q'): + for size in (1, 2, 4, 8): + if size == 1 and dtypechar == 'i': + # signed 8-bit integer PCM is not allowed + continue + if size > 1 and dtypechar == 'u': + # unsigned > 8-bit integer PCM is not allowed + continue + if (size == 1 or size == 2) and dtypechar == 'f': + # 8- or 16-bit float PCM is not expected + continue + if dtypechar in 'gq': + # no size allowed for these types + if size == 1: + size = '' + else: + continue + + for endianness in ('>', '<'): + if size == 1 and endianness == '<': + continue + for rate in (8000, 32000): + for channels in (1, 2, 5): + dt = np.dtype('%s%s%s' % (endianness, dtypechar, size)) + _check_roundtrip(realfile, rate, dt, channels) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/tests/test_wavfile.pyc b/project/venv/lib/python2.7/site-packages/scipy/io/tests/test_wavfile.pyc new file mode 100644 index 0000000..64b7b2d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/tests/test_wavfile.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/wavfile.py b/project/venv/lib/python2.7/site-packages/scipy/io/wavfile.py new file mode 100644 index 0000000..f574af2 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/io/wavfile.py @@ -0,0 +1,405 @@ +""" +Module to read / write wav files using numpy arrays + +Functions +--------- +`read`: Return the sample rate (in samples/sec) and data from a WAV file. + +`write`: Write a numpy array as a WAV file. + +""" +from __future__ import division, print_function, absolute_import + +import sys +import numpy +import struct +import warnings + + +__all__ = [ + 'WavFileWarning', + 'read', + 'write' +] + + +class WavFileWarning(UserWarning): + pass + + +WAVE_FORMAT_PCM = 0x0001 +WAVE_FORMAT_IEEE_FLOAT = 0x0003 +WAVE_FORMAT_EXTENSIBLE = 0xfffe +KNOWN_WAVE_FORMATS = (WAVE_FORMAT_PCM, WAVE_FORMAT_IEEE_FLOAT) + +# assumes file pointer is immediately +# after the 'fmt ' id + + +def _read_fmt_chunk(fid, is_big_endian): + """ + Returns + ------- + size : int + size of format subchunk in bytes (minus 8 for "fmt " and itself) + format_tag : int + PCM, float, or compressed format + channels : int + number of channels + fs : int + sampling frequency in samples per second + bytes_per_second : int + overall byte rate for the file + block_align : int + bytes per sample, including all channels + bit_depth : int + bits per sample + """ + if is_big_endian: + fmt = '>' + else: + fmt = '<' + + size = res = struct.unpack(fmt+'I', fid.read(4))[0] + bytes_read = 0 + + if size < 16: + raise ValueError("Binary structure of wave file is not compliant") + + res = struct.unpack(fmt+'HHIIHH', fid.read(16)) + bytes_read += 16 + + format_tag, channels, fs, bytes_per_second, block_align, bit_depth = res + + if format_tag == WAVE_FORMAT_EXTENSIBLE and size >= (16+2): + ext_chunk_size = struct.unpack(fmt+'H', fid.read(2))[0] + bytes_read += 2 + if ext_chunk_size >= 22: + extensible_chunk_data = fid.read(22) + bytes_read += 22 + raw_guid = extensible_chunk_data[2+4:2+4+16] + # GUID template {XXXXXXXX-0000-0010-8000-00AA00389B71} (RFC-2361) + # MS GUID byte order: first three groups are native byte order, + # rest is Big Endian + if is_big_endian: + tail = b'\x00\x00\x00\x10\x80\x00\x00\xAA\x00\x38\x9B\x71' + else: + tail = b'\x00\x00\x10\x00\x80\x00\x00\xAA\x00\x38\x9B\x71' + if raw_guid.endswith(tail): + format_tag = struct.unpack(fmt+'I', raw_guid[:4])[0] + else: + raise ValueError("Binary structure of wave file is not compliant") + + if format_tag not in KNOWN_WAVE_FORMATS: + raise ValueError("Unknown wave file format") + + # move file pointer to next chunk + if size > (bytes_read): + fid.read(size - bytes_read) + + return (size, format_tag, channels, fs, bytes_per_second, block_align, + bit_depth) + + +# assumes file pointer is immediately after the 'data' id +def _read_data_chunk(fid, format_tag, channels, bit_depth, is_big_endian, + mmap=False): + if is_big_endian: + fmt = '>I' + else: + fmt = '<I' + + # Size of the data subchunk in bytes + size = struct.unpack(fmt, fid.read(4))[0] + + # Number of bytes per sample + bytes_per_sample = bit_depth//8 + if bit_depth == 8: + dtype = 'u1' + else: + if is_big_endian: + dtype = '>' + else: + dtype = '<' + if format_tag == WAVE_FORMAT_PCM: + dtype += 'i%d' % bytes_per_sample + else: + dtype += 'f%d' % bytes_per_sample + if not mmap: + data = numpy.frombuffer(fid.read(size), dtype=dtype) + else: + start = fid.tell() + data = numpy.memmap(fid, dtype=dtype, mode='c', offset=start, + shape=(size//bytes_per_sample,)) + fid.seek(start + size) + + if channels > 1: + data = data.reshape(-1, channels) + return data + + +def _skip_unknown_chunk(fid, is_big_endian): + if is_big_endian: + fmt = '>I' + else: + fmt = '<I' + + data = fid.read(4) + # call unpack() and seek() only if we have really read data from file + # otherwise empty read at the end of the file would trigger + # unnecessary exception at unpack() call + # in case data equals somehow to 0, there is no need for seek() anyway + if data: + size = struct.unpack(fmt, data)[0] + fid.seek(size, 1) + + +def _read_riff_chunk(fid): + str1 = fid.read(4) # File signature + if str1 == b'RIFF': + is_big_endian = False + fmt = '<I' + elif str1 == b'RIFX': + is_big_endian = True + fmt = '>I' + else: + # There are also .wav files with "FFIR" or "XFIR" signatures? + raise ValueError("File format {}... not " + "understood.".format(repr(str1))) + + # Size of entire file + file_size = struct.unpack(fmt, fid.read(4))[0] + 8 + + str2 = fid.read(4) + if str2 != b'WAVE': + raise ValueError("Not a WAV file.") + + return file_size, is_big_endian + + +def read(filename, mmap=False): + """ + Open a WAV file + + Return the sample rate (in samples/sec) and data from a WAV file. + + Parameters + ---------- + filename : string or open file handle + Input wav file. + mmap : bool, optional + Whether to read data as memory-mapped. + Only to be used on real files (Default: False). + + .. versionadded:: 0.12.0 + + Returns + ------- + rate : int + Sample rate of wav file. + data : numpy array + Data read from wav file. Data-type is determined from the file; + see Notes. + + Notes + ----- + This function cannot read wav files with 24-bit data. + + Common data types: [1]_ + + ===================== =========== =========== ============= + WAV format Min Max NumPy dtype + ===================== =========== =========== ============= + 32-bit floating-point -1.0 +1.0 float32 + 32-bit PCM -2147483648 +2147483647 int32 + 16-bit PCM -32768 +32767 int16 + 8-bit PCM 0 255 uint8 + ===================== =========== =========== ============= + + Note that 8-bit PCM is unsigned. + + References + ---------- + .. [1] IBM Corporation and Microsoft Corporation, "Multimedia Programming + Interface and Data Specifications 1.0", section "Data Format of the + Samples", August 1991 + http://www.tactilemedia.com/info/MCI_Control_Info.html + + """ + if hasattr(filename, 'read'): + fid = filename + mmap = False + else: + fid = open(filename, 'rb') + + try: + file_size, is_big_endian = _read_riff_chunk(fid) + fmt_chunk_received = False + channels = 1 + bit_depth = 8 + format_tag = WAVE_FORMAT_PCM + while fid.tell() < file_size: + # read the next chunk + chunk_id = fid.read(4) + + if not chunk_id: + raise ValueError("Unexpected end of file.") + elif len(chunk_id) < 4: + raise ValueError("Incomplete wav chunk.") + + if chunk_id == b'fmt ': + fmt_chunk_received = True + fmt_chunk = _read_fmt_chunk(fid, is_big_endian) + format_tag, channels, fs = fmt_chunk[1:4] + bit_depth = fmt_chunk[6] + if bit_depth not in (8, 16, 32, 64, 96, 128): + raise ValueError("Unsupported bit depth: the wav file " + "has {}-bit data.".format(bit_depth)) + elif chunk_id == b'fact': + _skip_unknown_chunk(fid, is_big_endian) + elif chunk_id == b'data': + if not fmt_chunk_received: + raise ValueError("No fmt chunk before data") + data = _read_data_chunk(fid, format_tag, channels, bit_depth, + is_big_endian, mmap) + elif chunk_id == b'LIST': + # Someday this could be handled properly but for now skip it + _skip_unknown_chunk(fid, is_big_endian) + elif chunk_id in (b'JUNK', b'Fake'): + # Skip alignment chunks without warning + _skip_unknown_chunk(fid, is_big_endian) + else: + warnings.warn("Chunk (non-data) not understood, skipping it.", + WavFileWarning) + _skip_unknown_chunk(fid, is_big_endian) + finally: + if not hasattr(filename, 'read'): + fid.close() + else: + fid.seek(0) + + return fs, data + + +def write(filename, rate, data): + """ + Write a numpy array as a WAV file. + + Parameters + ---------- + filename : string or open file handle + Output wav file. + rate : int + The sample rate (in samples/sec). + data : ndarray + A 1-D or 2-D numpy array of either integer or float data-type. + + Notes + ----- + * Writes a simple uncompressed WAV file. + * To write multiple-channels, use a 2-D array of shape + (Nsamples, Nchannels). + * The bits-per-sample and PCM/float will be determined by the data-type. + + Common data types: [1]_ + + ===================== =========== =========== ============= + WAV format Min Max NumPy dtype + ===================== =========== =========== ============= + 32-bit floating-point -1.0 +1.0 float32 + 32-bit PCM -2147483648 +2147483647 int32 + 16-bit PCM -32768 +32767 int16 + 8-bit PCM 0 255 uint8 + ===================== =========== =========== ============= + + Note that 8-bit PCM is unsigned. + + References + ---------- + .. [1] IBM Corporation and Microsoft Corporation, "Multimedia Programming + Interface and Data Specifications 1.0", section "Data Format of the + Samples", August 1991 + http://www.tactilemedia.com/info/MCI_Control_Info.html + + """ + if hasattr(filename, 'write'): + fid = filename + else: + fid = open(filename, 'wb') + + fs = rate + + try: + dkind = data.dtype.kind + if not (dkind == 'i' or dkind == 'f' or (dkind == 'u' and + data.dtype.itemsize == 1)): + raise ValueError("Unsupported data type '%s'" % data.dtype) + + header_data = b'' + + header_data += b'RIFF' + header_data += b'\x00\x00\x00\x00' + header_data += b'WAVE' + + # fmt chunk + header_data += b'fmt ' + if dkind == 'f': + format_tag = WAVE_FORMAT_IEEE_FLOAT + else: + format_tag = WAVE_FORMAT_PCM + if data.ndim == 1: + channels = 1 + else: + channels = data.shape[1] + bit_depth = data.dtype.itemsize * 8 + bytes_per_second = fs*(bit_depth // 8)*channels + block_align = channels * (bit_depth // 8) + + fmt_chunk_data = struct.pack('<HHIIHH', format_tag, channels, fs, + bytes_per_second, block_align, bit_depth) + if not (dkind == 'i' or dkind == 'u'): + # add cbSize field for non-PCM files + fmt_chunk_data += b'\x00\x00' + + header_data += struct.pack('<I', len(fmt_chunk_data)) + header_data += fmt_chunk_data + + # fact chunk (non-PCM files) + if not (dkind == 'i' or dkind == 'u'): + header_data += b'fact' + header_data += struct.pack('<II', 4, data.shape[0]) + + # check data size (needs to be immediately before the data chunk) + if ((len(header_data)-4-4) + (4+4+data.nbytes)) > 0xFFFFFFFF: + raise ValueError("Data exceeds wave file size limit") + + fid.write(header_data) + + # data chunk + fid.write(b'data') + fid.write(struct.pack('<I', data.nbytes)) + if data.dtype.byteorder == '>' or (data.dtype.byteorder == '=' and + sys.byteorder == 'big'): + data = data.byteswap() + _array_tofile(fid, data) + + # Determine file size and place it in correct + # position at start of the file. + size = fid.tell() + fid.seek(4) + fid.write(struct.pack('<I', size-8)) + + finally: + if not hasattr(filename, 'write'): + fid.close() + else: + fid.seek(0) + + +if sys.version_info[0] >= 3: + def _array_tofile(fid, data): + # ravel gives a c-contiguous buffer + fid.write(data.ravel().view('b').data) +else: + def _array_tofile(fid, data): + fid.write(data.tostring()) diff --git a/project/venv/lib/python2.7/site-packages/scipy/io/wavfile.pyc b/project/venv/lib/python2.7/site-packages/scipy/io/wavfile.pyc new file mode 100644 index 0000000..4525741 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/io/wavfile.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg.pxd b/project/venv/lib/python2.7/site-packages/scipy/linalg.pxd new file mode 100644 index 0000000..1f656b8 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg.pxd @@ -0,0 +1 @@ +from .linalg cimport cython_blas, cython_lapack diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/__init__.py new file mode 100644 index 0000000..c98561c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/__init__.py @@ -0,0 +1,229 @@ +""" +==================================== +Linear algebra (:mod:`scipy.linalg`) +==================================== + +.. currentmodule:: scipy.linalg + +Linear algebra functions. + +.. seealso:: + + `numpy.linalg` for more linear algebra functions. Note that + although `scipy.linalg` imports most of them, identically named + functions from `scipy.linalg` may offer more or slightly differing + functionality. + + +Basics +====== + +.. autosummary:: + :toctree: generated/ + + inv - Find the inverse of a square matrix + solve - Solve a linear system of equations + solve_banded - Solve a banded linear system + solveh_banded - Solve a Hermitian or symmetric banded system + solve_circulant - Solve a circulant system + solve_triangular - Solve a triangular matrix + solve_toeplitz - Solve a toeplitz matrix + det - Find the determinant of a square matrix + norm - Matrix and vector norm + lstsq - Solve a linear least-squares problem + pinv - Pseudo-inverse (Moore-Penrose) using lstsq + pinv2 - Pseudo-inverse using svd + pinvh - Pseudo-inverse of hermitian matrix + kron - Kronecker product of two arrays + tril - Construct a lower-triangular matrix from a given matrix + triu - Construct an upper-triangular matrix from a given matrix + orthogonal_procrustes - Solve an orthogonal Procrustes problem + matrix_balance - Balance matrix entries with a similarity transformation + subspace_angles - Compute the subspace angles between two matrices + LinAlgError + LinAlgWarning + +Eigenvalue Problems +=================== + +.. autosummary:: + :toctree: generated/ + + eig - Find the eigenvalues and eigenvectors of a square matrix + eigvals - Find just the eigenvalues of a square matrix + eigh - Find the e-vals and e-vectors of a Hermitian or symmetric matrix + eigvalsh - Find just the eigenvalues of a Hermitian or symmetric matrix + eig_banded - Find the eigenvalues and eigenvectors of a banded matrix + eigvals_banded - Find just the eigenvalues of a banded matrix + eigh_tridiagonal - Find the eigenvalues and eigenvectors of a tridiagonal matrix + eigvalsh_tridiagonal - Find just the eigenvalues of a tridiagonal matrix + +Decompositions +============== + +.. autosummary:: + :toctree: generated/ + + lu - LU decomposition of a matrix + lu_factor - LU decomposition returning unordered matrix and pivots + lu_solve - Solve Ax=b using back substitution with output of lu_factor + svd - Singular value decomposition of a matrix + svdvals - Singular values of a matrix + diagsvd - Construct matrix of singular values from output of svd + orth - Construct orthonormal basis for the range of A using svd + null_space - Construct orthonormal basis for the null space of A using svd + ldl - LDL.T decomposition of a Hermitian or a symmetric matrix. + cholesky - Cholesky decomposition of a matrix + cholesky_banded - Cholesky decomp. of a sym. or Hermitian banded matrix + cho_factor - Cholesky decomposition for use in solving a linear system + cho_solve - Solve previously factored linear system + cho_solve_banded - Solve previously factored banded linear system + polar - Compute the polar decomposition. + qr - QR decomposition of a matrix + qr_multiply - QR decomposition and multiplication by Q + qr_update - Rank k QR update + qr_delete - QR downdate on row or column deletion + qr_insert - QR update on row or column insertion + rq - RQ decomposition of a matrix + qz - QZ decomposition of a pair of matrices + ordqz - QZ decomposition of a pair of matrices with reordering + schur - Schur decomposition of a matrix + rsf2csf - Real to complex Schur form + hessenberg - Hessenberg form of a matrix + cdf2rdf - Complex diagonal form to real diagonal block form + +.. seealso:: + + `scipy.linalg.interpolative` -- Interpolative matrix decompositions + + +Matrix Functions +================ + +.. autosummary:: + :toctree: generated/ + + expm - Matrix exponential + logm - Matrix logarithm + cosm - Matrix cosine + sinm - Matrix sine + tanm - Matrix tangent + coshm - Matrix hyperbolic cosine + sinhm - Matrix hyperbolic sine + tanhm - Matrix hyperbolic tangent + signm - Matrix sign + sqrtm - Matrix square root + funm - Evaluating an arbitrary matrix function + expm_frechet - Frechet derivative of the matrix exponential + expm_cond - Relative condition number of expm in the Frobenius norm + fractional_matrix_power - Fractional matrix power + + +Matrix Equation Solvers +======================= + +.. autosummary:: + :toctree: generated/ + + solve_sylvester - Solve the Sylvester matrix equation + solve_continuous_are - Solve the continuous-time algebraic Riccati equation + solve_discrete_are - Solve the discrete-time algebraic Riccati equation + solve_continuous_lyapunov - Solve the continous-time Lyapunov equation + solve_discrete_lyapunov - Solve the discrete-time Lyapunov equation + + +Sketches and Random Projections +=============================== + +.. autosummary:: + :toctree: generated/ + + clarkson_woodruff_transform - Applies the Clarkson Woodruff Sketch (a.k.a CountMin Sketch) + +Special Matrices +================ + +.. autosummary:: + :toctree: generated/ + + block_diag - Construct a block diagonal matrix from submatrices + circulant - Circulant matrix + companion - Companion matrix + dft - Discrete Fourier transform matrix + hadamard - Hadamard matrix of order 2**n + hankel - Hankel matrix + helmert - Helmert matrix + hilbert - Hilbert matrix + invhilbert - Inverse Hilbert matrix + leslie - Leslie matrix + pascal - Pascal matrix + invpascal - Inverse Pascal matrix + toeplitz - Toeplitz matrix + tri - Construct a matrix filled with ones at and below a given diagonal + +Low-level routines +================== + +.. autosummary:: + :toctree: generated/ + + get_blas_funcs + get_lapack_funcs + find_best_blas_type + +.. seealso:: + + `scipy.linalg.blas` -- Low-level BLAS functions + + `scipy.linalg.lapack` -- Low-level LAPACK functions + + `scipy.linalg.cython_blas` -- Low-level BLAS functions for Cython + + `scipy.linalg.cython_lapack` -- Low-level LAPACK functions for Cython + +""" # noqa: E501 + +from __future__ import division, print_function, absolute_import + +from .linalg_version import linalg_version as __version__ + +from .misc import * +from .basic import * +from .decomp import * +from .decomp_lu import * +from ._decomp_ldl import * +from .decomp_cholesky import * +from .decomp_qr import * +from ._decomp_qz import * +from .decomp_svd import * +from .decomp_schur import * +from ._decomp_polar import * +from .matfuncs import * +from .blas import * +from .lapack import * +from .special_matrices import * +from ._solvers import * +from ._procrustes import * +from ._decomp_update import * +from ._sketches import * + +__all__ = [s for s in dir() if not s.startswith('_')] + +from numpy.dual import register_func +for k in ['norm', 'inv', 'svd', 'solve', 'det', 'eig', 'eigh', 'eigvals', + 'eigvalsh', 'lstsq', 'cholesky']: + try: + register_func(k, eval(k)) + except ValueError: + pass + +try: + register_func('pinv', pinv2) +except ValueError: + pass + +del k, register_func + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/__init__.pyc new file mode 100644 index 0000000..85e8866 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/_cython_signature_generator.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/_cython_signature_generator.py new file mode 100644 index 0000000..2f45797 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/_cython_signature_generator.py @@ -0,0 +1,198 @@ +""" +A script that uses f2py to generate the signature files used to make +the Cython BLAS and LAPACK wrappers from the fortran source code for +LAPACK and the reference BLAS. + +To generate the BLAS wrapper signatures call: +python _cython_signature_generator.py blas <blas_directory> <out_file> + +To generate the LAPACK wrapper signatures call: +python _cython_signature_generator.py lapack <lapack_src_directory> <out_file> + +This script expects to be run on the source directory for +the oldest supported version of LAPACK (currently 3.4.0). +""" + +import glob +import os +from numpy.f2py import crackfortran + +sig_types = {'integer': 'int', + 'complex': 'c', + 'double precision': 'd', + 'real': 's', + 'complex*16': 'z', + 'double complex': 'z', + 'character': 'char', + 'logical': 'bint'} + + +def get_type(info, arg): + argtype = sig_types[info['vars'][arg]['typespec']] + if argtype == 'c' and info['vars'][arg].get('kindselector') is not None: + argtype = 'z' + return argtype + + +def make_signature(filename): + info = crackfortran.crackfortran(filename)[0] + name = info['name'] + if info['block'] == 'subroutine': + return_type = 'void' + else: + return_type = get_type(info, name) + arglist = [' *'.join([get_type(info, arg), arg]) for arg in info['args']] + args = ', '.join(arglist) + # Eliminate strange variable naming that replaces rank with rank_bn. + args = args.replace('rank_bn', 'rank') + return '{0} {1}({2})\n'.format(return_type, name, args) + + +def get_sig_name(line): + return line.split('(')[0].split(' ')[-1] + + +def sigs_from_dir(directory, outfile, manual_wrappers=None, exclusions=None): + if directory[-1] in ['/', '\\']: + directory = directory[:-1] + files = sorted(glob.glob(directory + '/*.f*')) + if exclusions is None: + exclusions = [] + if manual_wrappers is not None: + exclusions += [get_sig_name(l) for l in manual_wrappers.split('\n')] + signatures = [] + for filename in files: + name = os.path.splitext(os.path.basename(filename))[0] + if name in exclusions: + continue + signatures.append(make_signature(filename)) + if manual_wrappers is not None: + signatures += [l + '\n' for l in manual_wrappers.split('\n')] + signatures.sort(key=get_sig_name) + comment = ["# This file was generated by _cython_signature_generator.py.\n", + "# Do not edit this file directly.\n\n"] + with open(outfile, 'w') as f: + f.writelines(comment) + f.writelines(signatures) + +# slamch and dlamch are not in the lapack src directory, but,since they +# already have Python wrappers, we'll wrap them as well. +# The other manual signatures are used because the signature generating +# functions don't work when function pointer arguments are used. + + +lapack_manual_wrappers = '''void cgees(char *jobvs, char *sort, cselect1 *select, int *n, c *a, int *lda, int *sdim, c *w, c *vs, int *ldvs, c *work, int *lwork, s *rwork, bint *bwork, int *info) +void cgeesx(char *jobvs, char *sort, cselect1 *select, char *sense, int *n, c *a, int *lda, int *sdim, c *w, c *vs, int *ldvs, s *rconde, s *rcondv, c *work, int *lwork, s *rwork, bint *bwork, int *info) +void cgges(char *jobvsl, char *jobvsr, char *sort, cselect2 *selctg, int *n, c *a, int *lda, c *b, int *ldb, int *sdim, c *alpha, c *beta, c *vsl, int *ldvsl, c *vsr, int *ldvsr, c *work, int *lwork, s *rwork, bint *bwork, int *info) +void cggesx(char *jobvsl, char *jobvsr, char *sort, cselect2 *selctg, char *sense, int *n, c *a, int *lda, c *b, int *ldb, int *sdim, c *alpha, c *beta, c *vsl, int *ldvsl, c *vsr, int *ldvsr, s *rconde, s *rcondv, c *work, int *lwork, s *rwork, int *iwork, int *liwork, bint *bwork, int *info) +void dgees(char *jobvs, char *sort, dselect2 *select, int *n, d *a, int *lda, int *sdim, d *wr, d *wi, d *vs, int *ldvs, d *work, int *lwork, bint *bwork, int *info) +void dgeesx(char *jobvs, char *sort, dselect2 *select, char *sense, int *n, d *a, int *lda, int *sdim, d *wr, d *wi, d *vs, int *ldvs, d *rconde, d *rcondv, d *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info) +void dgges(char *jobvsl, char *jobvsr, char *sort, dselect3 *selctg, int *n, d *a, int *lda, d *b, int *ldb, int *sdim, d *alphar, d *alphai, d *beta, d *vsl, int *ldvsl, d *vsr, int *ldvsr, d *work, int *lwork, bint *bwork, int *info) +void dggesx(char *jobvsl, char *jobvsr, char *sort, dselect3 *selctg, char *sense, int *n, d *a, int *lda, d *b, int *ldb, int *sdim, d *alphar, d *alphai, d *beta, d *vsl, int *ldvsl, d *vsr, int *ldvsr, d *rconde, d *rcondv, d *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info) +d dlamch(char *cmach) +void ilaver(int *vers_major, int *vers_minor, int *vers_patch) +void sgees(char *jobvs, char *sort, sselect2 *select, int *n, s *a, int *lda, int *sdim, s *wr, s *wi, s *vs, int *ldvs, s *work, int *lwork, bint *bwork, int *info) +void sgeesx(char *jobvs, char *sort, sselect2 *select, char *sense, int *n, s *a, int *lda, int *sdim, s *wr, s *wi, s *vs, int *ldvs, s *rconde, s *rcondv, s *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info) +void sgges(char *jobvsl, char *jobvsr, char *sort, sselect3 *selctg, int *n, s *a, int *lda, s *b, int *ldb, int *sdim, s *alphar, s *alphai, s *beta, s *vsl, int *ldvsl, s *vsr, int *ldvsr, s *work, int *lwork, bint *bwork, int *info) +void sggesx(char *jobvsl, char *jobvsr, char *sort, sselect3 *selctg, char *sense, int *n, s *a, int *lda, s *b, int *ldb, int *sdim, s *alphar, s *alphai, s *beta, s *vsl, int *ldvsl, s *vsr, int *ldvsr, s *rconde, s *rcondv, s *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info) +s slamch(char *cmach) +void zgees(char *jobvs, char *sort, zselect1 *select, int *n, z *a, int *lda, int *sdim, z *w, z *vs, int *ldvs, z *work, int *lwork, d *rwork, bint *bwork, int *info) +void zgeesx(char *jobvs, char *sort, zselect1 *select, char *sense, int *n, z *a, int *lda, int *sdim, z *w, z *vs, int *ldvs, d *rconde, d *rcondv, z *work, int *lwork, d *rwork, bint *bwork, int *info) +void zgges(char *jobvsl, char *jobvsr, char *sort, zselect2 *selctg, int *n, z *a, int *lda, z *b, int *ldb, int *sdim, z *alpha, z *beta, z *vsl, int *ldvsl, z *vsr, int *ldvsr, z *work, int *lwork, d *rwork, bint *bwork, int *info) +void zggesx(char *jobvsl, char *jobvsr, char *sort, zselect2 *selctg, char *sense, int *n, z *a, int *lda, z *b, int *ldb, int *sdim, z *alpha, z *beta, z *vsl, int *ldvsl, z *vsr, int *ldvsr, d *rconde, d *rcondv, z *work, int *lwork, d *rwork, int *iwork, int *liwork, bint *bwork, int *info)''' + +if __name__ == '__main__': + from sys import argv + libname, src_dir, outfile = argv[1:] + # Exclude scabs and sisnan since they aren't currently included + # in the scipy-specific ABI wrappers. + if libname.lower() == 'blas': + sigs_from_dir(src_dir, outfile, exclusions=['scabs1', 'xerbla']) + elif libname.lower() == 'lapack': + # Exclude all routines that do not have consistent interfaces from + # LAPACK 3.4.0 through 3.6.0. + # Also exclude routines with string arguments to avoid + # compatibility woes with different standards for string arguments. + exclusions = [ + # Not included because people should be using the + # C standard library function instead. + # sisnan is also not currently included in the + # ABI wrappers. + 'sisnan', 'dlaisnan', 'slaisnan', + # Exclude slaneg because it isn't currently included + # in the ABI wrappers + 'slaneg', + # Excluded because they require Fortran string arguments. + 'ilaenv', 'iparmq', 'lsamen', 'xerbla', + # Exclude XBLAS routines since they aren't included + # by default. + 'cgesvxx', 'dgesvxx', 'sgesvxx', 'zgesvxx', + 'cgerfsx', 'dgerfsx', 'sgerfsx', 'zgerfsx', + 'cla_gerfsx_extended', 'dla_gerfsx_extended', + 'sla_gerfsx_extended', 'zla_gerfsx_extended', + 'cla_geamv', 'dla_geamv', 'sla_geamv', 'zla_geamv', + 'dla_gercond', 'sla_gercond', + 'cla_gercond_c', 'zla_gercond_c', + 'cla_gercond_x', 'zla_gercond_x', + 'cla_gerpvgrw', 'dla_gerpvgrw', + 'sla_gerpvgrw', 'zla_gerpvgrw', + 'csysvxx', 'dsysvxx', 'ssysvxx', 'zsysvxx', + 'csyrfsx', 'dsyrfsx', 'ssyrfsx', 'zsyrfsx', + 'cla_syrfsx_extended', 'dla_syrfsx_extended', + 'sla_syrfsx_extended', 'zla_syrfsx_extended', + 'cla_syamv', 'dla_syamv', 'sla_syamv', 'zla_syamv', + 'dla_syrcond', 'sla_syrcond', + 'cla_syrcond_c', 'zla_syrcond_c', + 'cla_syrcond_x', 'zla_syrcond_x', + 'cla_syrpvgrw', 'dla_syrpvgrw', + 'sla_syrpvgrw', 'zla_syrpvgrw', + 'cposvxx', 'dposvxx', 'sposvxx', 'zposvxx', + 'cporfsx', 'dporfsx', 'sporfsx', 'zporfsx', + 'cla_porfsx_extended', 'dla_porfsx_extended', + 'sla_porfsx_extended', 'zla_porfsx_extended', + 'dla_porcond', 'sla_porcond', + 'cla_porcond_c', 'zla_porcond_c', + 'cla_porcond_x', 'zla_porcond_x', + 'cla_porpvgrw', 'dla_porpvgrw', + 'sla_porpvgrw', 'zla_porpvgrw', + 'cgbsvxx', 'dgbsvxx', 'sgbsvxx', 'zgbsvxx', + 'cgbrfsx', 'dgbrfsx', 'sgbrfsx', 'zgbrfsx', + 'cla_gbrfsx_extended', 'dla_gbrfsx_extended', + 'sla_gbrfsx_extended', 'zla_gbrfsx_extended', + 'cla_gbamv', 'dla_gbamv', 'sla_gbamv', 'zla_gbamv', + 'dla_gbrcond', 'sla_gbrcond', + 'cla_gbrcond_c', 'zla_gbrcond_c', + 'cla_gbrcond_x', 'zla_gbrcond_x', + 'cla_gbrpvgrw', 'dla_gbrpvgrw', + 'sla_gbrpvgrw', 'zla_gbrpvgrw', + 'chesvxx', 'zhesvxx', + 'cherfsx', 'zherfsx', + 'cla_herfsx_extended', 'zla_herfsx_extended', + 'cla_heamv', 'zla_heamv', + 'cla_hercond_c', 'zla_hercond_c', + 'cla_hercond_x', 'zla_hercond_x', + 'cla_herpvgrw', 'zla_herpvgrw', + 'sla_lin_berr', 'cla_lin_berr', + 'dla_lin_berr', 'zla_lin_berr', + 'clarscl2', 'dlarscl2', 'slarscl2', 'zlarscl2', + 'clascl2', 'dlascl2', 'slascl2', 'zlascl2', + 'cla_wwaddw', 'dla_wwaddw', 'sla_wwaddw', 'zla_wwaddw', + # Removed between 3.3.1 and 3.4.0. + 'cla_rpvgrw', 'dla_rpvgrw', 'sla_rpvgrw', 'zla_rpvgrw', + # Signatures changed between 3.4.0 and 3.4.1. + 'dlasq5', 'slasq5', + # Routines deprecated in LAPACK 3.6.0 + 'cgegs', 'cgegv', 'cgelsx', + 'cgeqpf', 'cggsvd', 'cggsvp', + 'clahrd', 'clatzm', 'ctzrqf', + 'dgegs', 'dgegv', 'dgelsx', + 'dgeqpf', 'dggsvd', 'dggsvp', + 'dlahrd', 'dlatzm', 'dtzrqf', + 'sgegs', 'sgegv', 'sgelsx', + 'sgeqpf', 'sggsvd', 'sggsvp', + 'slahrd', 'slatzm', 'stzrqf', + 'zgegs', 'zgegv', 'zgelsx', + 'zgeqpf', 'zggsvd', 'zggsvp', + 'zlahrd', 'zlatzm', 'ztzrqf'] + sigs_from_dir(src_dir, outfile, manual_wrappers=lapack_manual_wrappers, + exclusions=exclusions) diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/_cython_signature_generator.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/_cython_signature_generator.pyc new file mode 100644 index 0000000..35ca488 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/_cython_signature_generator.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/_decomp_ldl.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/_decomp_ldl.py new file mode 100644 index 0000000..77ec281 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/_decomp_ldl.py @@ -0,0 +1,354 @@ +from __future__ import division, print_function, absolute_import + +from warnings import warn + +import numpy as np +from numpy import (atleast_2d, ComplexWarning, arange, zeros_like, imag, diag, + iscomplexobj, tril, triu, argsort, empty_like) +from .decomp import _asarray_validated +from .lapack import get_lapack_funcs, _compute_lwork + +__all__ = ['ldl'] + + +def ldl(A, lower=True, hermitian=True, overwrite_a=False, check_finite=True): + """ Computes the LDLt or Bunch-Kaufman factorization of a symmetric/ + hermitian matrix. + + This function returns a block diagonal matrix D consisting blocks of size + at most 2x2 and also a possibly permuted unit lower triangular matrix + ``L`` such that the factorization ``A = L D L^H`` or ``A = L D L^T`` + holds. If ``lower`` is False then (again possibly permuted) upper + triangular matrices are returned as outer factors. + + The permutation array can be used to triangularize the outer factors + simply by a row shuffle, i.e., ``lu[perm, :]`` is an upper/lower + triangular matrix. This is also equivalent to multiplication with a + permutation matrix ``P.dot(lu)`` where ``P`` is a column-permuted + identity matrix ``I[:, perm]``. + + Depending on the value of the boolean ``lower``, only upper or lower + triangular part of the input array is referenced. Hence a triangular + matrix on entry would give the same result as if the full matrix is + supplied. + + Parameters + ---------- + a : array_like + Square input array + lower : bool, optional + This switches between the lower and upper triangular outer factors of + the factorization. Lower triangular (``lower=True``) is the default. + hermitian : bool, optional + For complex-valued arrays, this defines whether ``a = a.conj().T`` or + ``a = a.T`` is assumed. For real-valued arrays, this switch has no + effect. + overwrite_a : bool, optional + Allow overwriting data in ``a`` (may enhance performance). The default + is False. + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + lu : ndarray + The (possibly) permuted upper/lower triangular outer factor of the + factorization. + d : ndarray + The block diagonal multiplier of the factorization. + perm : ndarray + The row-permutation index array that brings lu into triangular form. + + Raises + ------ + ValueError + If input array is not square. + ComplexWarning + If a complex-valued array with nonzero imaginary parts on the + diagonal is given and hermitian is set to True. + + Examples + -------- + Given an upper triangular array `a` that represents the full symmetric + array with its entries, obtain `l`, 'd' and the permutation vector `perm`: + + >>> import numpy as np + >>> from scipy.linalg import ldl + >>> a = np.array([[2, -1, 3], [0, 2, 0], [0, 0, 1]]) + >>> lu, d, perm = ldl(a, lower=0) # Use the upper part + >>> lu + array([[ 0. , 0. , 1. ], + [ 0. , 1. , -0.5], + [ 1. , 1. , 1.5]]) + >>> d + array([[-5. , 0. , 0. ], + [ 0. , 1.5, 0. ], + [ 0. , 0. , 2. ]]) + >>> perm + array([2, 1, 0]) + >>> lu[perm, :] + array([[ 1. , 1. , 1.5], + [ 0. , 1. , -0.5], + [ 0. , 0. , 1. ]]) + >>> lu.dot(d).dot(lu.T) + array([[ 2., -1., 3.], + [-1., 2., 0.], + [ 3., 0., 1.]]) + + Notes + ----- + This function uses ``?SYTRF`` routines for symmetric matrices and + ``?HETRF`` routines for Hermitian matrices from LAPACK. See [1]_ for + the algorithm details. + + Depending on the ``lower`` keyword value, only lower or upper triangular + part of the input array is referenced. Moreover, this keyword also defines + the structure of the outer factors of the factorization. + + .. versionadded:: 1.1.0 + + See also + -------- + cholesky, lu + + References + ---------- + .. [1] J.R. Bunch, L. Kaufman, Some stable methods for calculating + inertia and solving symmetric linear systems, Math. Comput. Vol.31, + 1977. DOI: 10.2307/2005787 + + """ + a = atleast_2d(_asarray_validated(A, check_finite=check_finite)) + if a.shape[0] != a.shape[1]: + raise ValueError('The input array "a" should be square.') + # Return empty arrays for empty square input + if a.size == 0: + return empty_like(a), empty_like(a), np.array([], dtype=int) + + n = a.shape[0] + r_or_c = complex if iscomplexobj(a) else float + + # Get the LAPACK routine + if r_or_c is complex and hermitian: + s, sl = 'hetrf', 'hetrf_lwork' + if np.any(imag(diag(a))): + warn('scipy.linalg.ldl():\nThe imaginary parts of the diagonal' + 'are ignored. Use "hermitian=False" for factorization of' + 'complex symmetric arrays.', ComplexWarning, stacklevel=2) + else: + s, sl = 'sytrf', 'sytrf_lwork' + + solver, solver_lwork = get_lapack_funcs((s, sl), (a,)) + lwork = _compute_lwork(solver_lwork, n, lower=lower) + ldu, piv, info = solver(a, lwork=lwork, lower=lower, + overwrite_a=overwrite_a) + if info < 0: + raise ValueError('{} exited with the internal error "illegal value ' + 'in argument number {}". See LAPACK documentation ' + 'for the error codes.'.format(s.upper(), -info)) + + swap_arr, pivot_arr = _ldl_sanitize_ipiv(piv, lower=lower) + d, lu = _ldl_get_d_and_l(ldu, pivot_arr, lower=lower, hermitian=hermitian) + lu, perm = _ldl_construct_tri_factor(lu, swap_arr, pivot_arr, lower=lower) + + return lu, d, perm + + +def _ldl_sanitize_ipiv(a, lower=True): + """ + This helper function takes the rather strangely encoded permutation array + returned by the LAPACK routines ?(HE/SY)TRF and converts it into + regularized permutation and diagonal pivot size format. + + Since FORTRAN uses 1-indexing and LAPACK uses different start points for + upper and lower formats there are certain offsets in the indices used + below. + + Let's assume a result where the matrix is 6x6 and there are two 2x2 + and two 1x1 blocks reported by the routine. To ease the coding efforts, + we still populate a 6-sized array and fill zeros as the following :: + + pivots = [2, 0, 2, 0, 1, 1] + + This denotes a diagonal matrix of the form :: + + [x x ] + [x x ] + [ x x ] + [ x x ] + [ x ] + [ x] + + In other words, we write 2 when the 2x2 block is first encountered and + automatically write 0 to the next entry and skip the next spin of the + loop. Thus, a separate counter or array appends to keep track of block + sizes are avoided. If needed, zeros can be filtered out later without + losing the block structure. + + Parameters + ---------- + a : ndarray + The permutation array ipiv returned by LAPACK + lower : bool, optional + The switch to select whether upper or lower triangle is chosen in + the LAPACK call. + + Returns + ------- + swap_ : ndarray + The array that defines the row/column swap operations. For example, + if row two is swapped with row four, the result is [0, 3, 2, 3]. + pivots : ndarray + The array that defines the block diagonal structure as given above. + + """ + n = a.size + swap_ = arange(n) + pivots = zeros_like(swap_, dtype=int) + skip_2x2 = False + + # Some upper/lower dependent offset values + # range (s)tart, r(e)nd, r(i)ncrement + x, y, rs, re, ri = (1, 0, 0, n, 1) if lower else (-1, -1, n-1, -1, -1) + + for ind in range(rs, re, ri): + # If previous spin belonged already to a 2x2 block + if skip_2x2: + skip_2x2 = False + continue + + cur_val = a[ind] + # do we have a 1x1 block or not? + if cur_val > 0: + if cur_val != ind+1: + # Index value != array value --> permutation required + swap_[ind] = swap_[cur_val-1] + pivots[ind] = 1 + # Not. + elif cur_val < 0 and cur_val == a[ind+x]: + # first neg entry of 2x2 block identifier + if -cur_val != ind+2: + # Index value != array value --> permutation required + swap_[ind+x] = swap_[-cur_val-1] + pivots[ind+y] = 2 + skip_2x2 = True + else: # Doesn't make sense, give up + raise ValueError('While parsing the permutation array ' + 'in "scipy.linalg.ldl", invalid entries ' + 'found. The array syntax is invalid.') + return swap_, pivots + + +def _ldl_get_d_and_l(ldu, pivs, lower=True, hermitian=True): + """ + Helper function to extract the diagonal and triangular matrices for + LDL.T factorization. + + Parameters + ---------- + ldu : ndarray + The compact output returned by the LAPACK routing + pivs : ndarray + The sanitized array of {0, 1, 2} denoting the sizes of the pivots. For + every 2 there is a succeeding 0. + lower : bool, optional + If set to False, upper triangular part is considered. + hermitian : bool, optional + If set to False a symmetric complex array is assumed. + + Returns + ------- + d : ndarray + The block diagonal matrix. + lu : ndarray + The upper/lower triangular matrix + """ + is_c = iscomplexobj(ldu) + d = diag(diag(ldu)) + n = d.shape[0] + blk_i = 0 # block index + + # row/column offsets for selecting sub-, super-diagonal + x, y = (1, 0) if lower else (0, 1) + + lu = tril(ldu, -1) if lower else triu(ldu, 1) + diag_inds = arange(n) + lu[diag_inds, diag_inds] = 1 + + for blk in pivs[pivs != 0]: + # increment the block index and check for 2s + # if 2 then copy the off diagonals depending on uplo + inc = blk_i + blk + + if blk == 2: + d[blk_i+x, blk_i+y] = ldu[blk_i+x, blk_i+y] + # If Hermitian matrix is factorized, the cross-offdiagonal element + # should be conjugated. + if is_c and hermitian: + d[blk_i+y, blk_i+x] = ldu[blk_i+x, blk_i+y].conj() + else: + d[blk_i+y, blk_i+x] = ldu[blk_i+x, blk_i+y] + + lu[blk_i+x, blk_i+y] = 0. + blk_i = inc + + return d, lu + + +def _ldl_construct_tri_factor(lu, swap_vec, pivs, lower=True): + """ + Helper function to construct explicit outer factors of LDL factorization. + + If lower is True the permuted factors are multiplied as L(1)*L(2)*...*L(k). + Otherwise, the permuted factors are multiplied as L(k)*...*L(2)*L(1). See + LAPACK documentation for more details. + + Parameters + ---------- + lu : ndarray + The triangular array that is extracted from LAPACK routine call with + ones on the diagonals. + swap_vec : ndarray + The array that defines the row swapping indices. If k'th entry is m + then rows k,m are swapped. Notice that m'th entry is not necessarily + k to avoid undoing the swapping. + pivs : ndarray + The array that defines the block diagonal structure returned by + _ldl_sanitize_ipiv(). + lower : bool, optional + The boolean to switch between lower and upper triangular structure. + + Returns + ------- + lu : ndarray + The square outer factor which satisfies the L * D * L.T = A + perm : ndarray + The permutation vector that brings the lu to the triangular form + + Notes + ----- + Note that the original argument "lu" is overwritten. + + """ + n = lu.shape[0] + perm = arange(n) + # Setup the reading order of the permutation matrix for upper/lower + rs, re, ri = (n-1, -1, -1) if lower else (0, n, 1) + + for ind in range(rs, re, ri): + s_ind = swap_vec[ind] + if s_ind != ind: + # Column start and end positions + col_s = ind if lower else 0 + col_e = n if lower else ind+1 + + # If we stumble upon a 2x2 block include both cols in the perm. + if pivs[ind] == (0 if lower else 2): + col_s += -1 if lower else 0 + col_e += 0 if lower else 1 + lu[[s_ind, ind], col_s:col_e] = lu[[ind, s_ind], col_s:col_e] + perm[[s_ind, ind]] = perm[[ind, s_ind]] + + return lu, argsort(perm) diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/_decomp_ldl.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/_decomp_ldl.pyc new file mode 100644 index 0000000..1b6fe2b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/_decomp_ldl.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/_decomp_polar.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/_decomp_polar.py new file mode 100644 index 0000000..c8a568e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/_decomp_polar.py @@ -0,0 +1,112 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from scipy.linalg import svd + + +__all__ = ['polar'] + + +def polar(a, side="right"): + """ + Compute the polar decomposition. + + Returns the factors of the polar decomposition [1]_ `u` and `p` such + that ``a = up`` (if `side` is "right") or ``a = pu`` (if `side` is + "left"), where `p` is positive semidefinite. Depending on the shape + of `a`, either the rows or columns of `u` are orthonormal. When `a` + is a square array, `u` is a square unitary array. When `a` is not + square, the "canonical polar decomposition" [2]_ is computed. + + Parameters + ---------- + a : (m, n) array_like + The array to be factored. + side : {'left', 'right'}, optional + Determines whether a right or left polar decomposition is computed. + If `side` is "right", then ``a = up``. If `side` is "left", then + ``a = pu``. The default is "right". + + Returns + ------- + u : (m, n) ndarray + If `a` is square, then `u` is unitary. If m > n, then the columns + of `a` are orthonormal, and if m < n, then the rows of `u` are + orthonormal. + p : ndarray + `p` is Hermitian positive semidefinite. If `a` is nonsingular, `p` + is positive definite. The shape of `p` is (n, n) or (m, m), depending + on whether `side` is "right" or "left", respectively. + + References + ---------- + .. [1] R. A. Horn and C. R. Johnson, "Matrix Analysis", Cambridge + University Press, 1985. + .. [2] N. J. Higham, "Functions of Matrices: Theory and Computation", + SIAM, 2008. + + Examples + -------- + >>> from scipy.linalg import polar + >>> a = np.array([[1, -1], [2, 4]]) + >>> u, p = polar(a) + >>> u + array([[ 0.85749293, -0.51449576], + [ 0.51449576, 0.85749293]]) + >>> p + array([[ 1.88648444, 1.2004901 ], + [ 1.2004901 , 3.94446746]]) + + A non-square example, with m < n: + + >>> b = np.array([[0.5, 1, 2], [1.5, 3, 4]]) + >>> u, p = polar(b) + >>> u + array([[-0.21196618, -0.42393237, 0.88054056], + [ 0.39378971, 0.78757942, 0.4739708 ]]) + >>> p + array([[ 0.48470147, 0.96940295, 1.15122648], + [ 0.96940295, 1.9388059 , 2.30245295], + [ 1.15122648, 2.30245295, 3.65696431]]) + >>> u.dot(p) # Verify the decomposition. + array([[ 0.5, 1. , 2. ], + [ 1.5, 3. , 4. ]]) + >>> u.dot(u.T) # The rows of u are orthonormal. + array([[ 1.00000000e+00, -2.07353665e-17], + [ -2.07353665e-17, 1.00000000e+00]]) + + Another non-square example, with m > n: + + >>> c = b.T + >>> u, p = polar(c) + >>> u + array([[-0.21196618, 0.39378971], + [-0.42393237, 0.78757942], + [ 0.88054056, 0.4739708 ]]) + >>> p + array([[ 1.23116567, 1.93241587], + [ 1.93241587, 4.84930602]]) + >>> u.dot(p) # Verify the decomposition. + array([[ 0.5, 1.5], + [ 1. , 3. ], + [ 2. , 4. ]]) + >>> u.T.dot(u) # The columns of u are orthonormal. + array([[ 1.00000000e+00, -1.26363763e-16], + [ -1.26363763e-16, 1.00000000e+00]]) + + """ + if side not in ['right', 'left']: + raise ValueError("`side` must be either 'right' or 'left'") + a = np.asarray(a) + if a.ndim != 2: + raise ValueError("`a` must be a 2-D array.") + + w, s, vh = svd(a, full_matrices=False) + u = w.dot(vh) + if side == 'right': + # a = up + p = (vh.T.conj() * s).dot(vh) + else: + # a = pu + p = (w * s).dot(w.T.conj()) + return u, p diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/_decomp_polar.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/_decomp_polar.pyc new file mode 100644 index 0000000..e2a800a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/_decomp_polar.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/_decomp_qz.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/_decomp_qz.py new file mode 100644 index 0000000..4d805ee --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/_decomp_qz.py @@ -0,0 +1,405 @@ +from __future__ import division, print_function, absolute_import + +import warnings + +import numpy as np +from numpy import asarray_chkfinite + +from .misc import LinAlgError, _datacopied, LinAlgWarning +from .lapack import get_lapack_funcs + +from scipy._lib.six import callable + +__all__ = ['qz', 'ordqz'] + +_double_precision = ['i', 'l', 'd'] + + +def _select_function(sort): + if callable(sort): + # assume the user knows what they're doing + sfunction = sort + elif sort == 'lhp': + sfunction = _lhp + elif sort == 'rhp': + sfunction = _rhp + elif sort == 'iuc': + sfunction = _iuc + elif sort == 'ouc': + sfunction = _ouc + else: + raise ValueError("sort parameter must be None, a callable, or " + "one of ('lhp','rhp','iuc','ouc')") + + return sfunction + + +def _lhp(x, y): + out = np.empty_like(x, dtype=bool) + nonzero = (y != 0) + # handles (x, y) = (0, 0) too + out[~nonzero] = False + out[nonzero] = (np.real(x[nonzero]/y[nonzero]) < 0.0) + return out + + +def _rhp(x, y): + out = np.empty_like(x, dtype=bool) + nonzero = (y != 0) + # handles (x, y) = (0, 0) too + out[~nonzero] = False + out[nonzero] = (np.real(x[nonzero]/y[nonzero]) > 0.0) + return out + + +def _iuc(x, y): + out = np.empty_like(x, dtype=bool) + nonzero = (y != 0) + # handles (x, y) = (0, 0) too + out[~nonzero] = False + out[nonzero] = (abs(x[nonzero]/y[nonzero]) < 1.0) + return out + + +def _ouc(x, y): + out = np.empty_like(x, dtype=bool) + xzero = (x == 0) + yzero = (y == 0) + out[xzero & yzero] = False + out[~xzero & yzero] = True + out[~yzero] = (abs(x[~yzero]/y[~yzero]) > 1.0) + return out + + +def _qz(A, B, output='real', lwork=None, sort=None, overwrite_a=False, + overwrite_b=False, check_finite=True): + if sort is not None: + # Disabled due to segfaults on win32, see ticket 1717. + raise ValueError("The 'sort' input of qz() has to be None and will be " + "removed in a future release. Use ordqz instead.") + + if output not in ['real', 'complex', 'r', 'c']: + raise ValueError("argument must be 'real', or 'complex'") + + if check_finite: + a1 = asarray_chkfinite(A) + b1 = asarray_chkfinite(B) + else: + a1 = np.asarray(A) + b1 = np.asarray(B) + + a_m, a_n = a1.shape + b_m, b_n = b1.shape + if not (a_m == a_n == b_m == b_n): + raise ValueError("Array dimensions must be square and agree") + + typa = a1.dtype.char + if output in ['complex', 'c'] and typa not in ['F', 'D']: + if typa in _double_precision: + a1 = a1.astype('D') + typa = 'D' + else: + a1 = a1.astype('F') + typa = 'F' + typb = b1.dtype.char + if output in ['complex', 'c'] and typb not in ['F', 'D']: + if typb in _double_precision: + b1 = b1.astype('D') + typb = 'D' + else: + b1 = b1.astype('F') + typb = 'F' + + overwrite_a = overwrite_a or (_datacopied(a1, A)) + overwrite_b = overwrite_b or (_datacopied(b1, B)) + + gges, = get_lapack_funcs(('gges',), (a1, b1)) + + if lwork is None or lwork == -1: + # get optimal work array size + result = gges(lambda x: None, a1, b1, lwork=-1) + lwork = result[-2][0].real.astype(np.int) + + sfunction = lambda x: None + result = gges(sfunction, a1, b1, lwork=lwork, overwrite_a=overwrite_a, + overwrite_b=overwrite_b, sort_t=0) + + info = result[-1] + if info < 0: + raise ValueError("Illegal value in argument {} of gges".format(-info)) + elif info > 0 and info <= a_n: + warnings.warn("The QZ iteration failed. (a,b) are not in Schur " + "form, but ALPHAR(j), ALPHAI(j), and BETA(j) should be " + "correct for J={},...,N".format(info-1), LinAlgWarning, + stacklevel=3) + elif info == a_n+1: + raise LinAlgError("Something other than QZ iteration failed") + elif info == a_n+2: + raise LinAlgError("After reordering, roundoff changed values of some " + "complex eigenvalues so that leading eigenvalues " + "in the Generalized Schur form no longer satisfy " + "sort=True. This could also be due to scaling.") + elif info == a_n+3: + raise LinAlgError("Reordering failed in <s,d,c,z>tgsen") + + return result, gges.typecode + + +def qz(A, B, output='real', lwork=None, sort=None, overwrite_a=False, + overwrite_b=False, check_finite=True): + """ + QZ decomposition for generalized eigenvalues of a pair of matrices. + + The QZ, or generalized Schur, decomposition for a pair of N x N + nonsymmetric matrices (A,B) is:: + + (A,B) = (Q*AA*Z', Q*BB*Z') + + where AA, BB is in generalized Schur form if BB is upper-triangular + with non-negative diagonal and AA is upper-triangular, or for real QZ + decomposition (``output='real'``) block upper triangular with 1x1 + and 2x2 blocks. In this case, the 1x1 blocks correspond to real + generalized eigenvalues and 2x2 blocks are 'standardized' by making + the corresponding elements of BB have the form:: + + [ a 0 ] + [ 0 b ] + + and the pair of corresponding 2x2 blocks in AA and BB will have a complex + conjugate pair of generalized eigenvalues. If (``output='complex'``) or + A and B are complex matrices, Z' denotes the conjugate-transpose of Z. + Q and Z are unitary matrices. + + Parameters + ---------- + A : (N, N) array_like + 2d array to decompose + B : (N, N) array_like + 2d array to decompose + output : {'real', 'complex'}, optional + Construct the real or complex QZ decomposition for real matrices. + Default is 'real'. + lwork : int, optional + Work array size. If None or -1, it is automatically computed. + sort : {None, callable, 'lhp', 'rhp', 'iuc', 'ouc'}, optional + NOTE: THIS INPUT IS DISABLED FOR NOW. Use ordqz instead. + + Specifies whether the upper eigenvalues should be sorted. A callable + may be passed that, given a eigenvalue, returns a boolean denoting + whether the eigenvalue should be sorted to the top-left (True). For + real matrix pairs, the sort function takes three real arguments + (alphar, alphai, beta). The eigenvalue + ``x = (alphar + alphai*1j)/beta``. For complex matrix pairs or + output='complex', the sort function takes two complex arguments + (alpha, beta). The eigenvalue ``x = (alpha/beta)``. Alternatively, + string parameters may be used: + + - 'lhp' Left-hand plane (x.real < 0.0) + - 'rhp' Right-hand plane (x.real > 0.0) + - 'iuc' Inside the unit circle (x*x.conjugate() < 1.0) + - 'ouc' Outside the unit circle (x*x.conjugate() > 1.0) + + Defaults to None (no sorting). + overwrite_a : bool, optional + Whether to overwrite data in a (may improve performance) + overwrite_b : bool, optional + Whether to overwrite data in b (may improve performance) + check_finite : bool, optional + If true checks the elements of `A` and `B` are finite numbers. If + false does no checking and passes matrix through to + underlying algorithm. + + Returns + ------- + AA : (N, N) ndarray + Generalized Schur form of A. + BB : (N, N) ndarray + Generalized Schur form of B. + Q : (N, N) ndarray + The left Schur vectors. + Z : (N, N) ndarray + The right Schur vectors. + + Notes + ----- + Q is transposed versus the equivalent function in Matlab. + + .. versionadded:: 0.11.0 + + Examples + -------- + >>> from scipy import linalg + >>> np.random.seed(1234) + >>> A = np.arange(9).reshape((3, 3)) + >>> B = np.random.randn(3, 3) + + >>> AA, BB, Q, Z = linalg.qz(A, B) + >>> AA + array([[-13.40928183, -4.62471562, 1.09215523], + [ 0. , 0. , 1.22805978], + [ 0. , 0. , 0.31973817]]) + >>> BB + array([[ 0.33362547, -1.37393632, 0.02179805], + [ 0. , 1.68144922, 0.74683866], + [ 0. , 0. , 0.9258294 ]]) + >>> Q + array([[ 0.14134727, -0.97562773, 0.16784365], + [ 0.49835904, -0.07636948, -0.86360059], + [ 0.85537081, 0.20571399, 0.47541828]]) + >>> Z + array([[-0.24900855, -0.51772687, 0.81850696], + [-0.79813178, 0.58842606, 0.12938478], + [-0.54861681, -0.6210585 , -0.55973739]]) + + See also + -------- + ordqz + """ + # output for real + # AA, BB, sdim, alphar, alphai, beta, vsl, vsr, work, info + # output for complex + # AA, BB, sdim, alpha, beta, vsl, vsr, work, info + result, _ = _qz(A, B, output=output, lwork=lwork, sort=sort, + overwrite_a=overwrite_a, overwrite_b=overwrite_b, + check_finite=check_finite) + return result[0], result[1], result[-4], result[-3] + + +def ordqz(A, B, sort='lhp', output='real', overwrite_a=False, + overwrite_b=False, check_finite=True): + """QZ decomposition for a pair of matrices with reordering. + + .. versionadded:: 0.17.0 + + Parameters + ---------- + A : (N, N) array_like + 2d array to decompose + B : (N, N) array_like + 2d array to decompose + sort : {callable, 'lhp', 'rhp', 'iuc', 'ouc'}, optional + Specifies whether the upper eigenvalues should be sorted. A + callable may be passed that, given an ordered pair ``(alpha, + beta)`` representing the eigenvalue ``x = (alpha/beta)``, + returns a boolean denoting whether the eigenvalue should be + sorted to the top-left (True). For the real matrix pairs + ``beta`` is real while ``alpha`` can be complex, and for + complex matrix pairs both ``alpha`` and ``beta`` can be + complex. The callable must be able to accept a numpy + array. Alternatively, string parameters may be used: + + - 'lhp' Left-hand plane (x.real < 0.0) + - 'rhp' Right-hand plane (x.real > 0.0) + - 'iuc' Inside the unit circle (x*x.conjugate() < 1.0) + - 'ouc' Outside the unit circle (x*x.conjugate() > 1.0) + + With the predefined sorting functions, an infinite eigenvalue + (i.e. ``alpha != 0`` and ``beta = 0``) is considered to lie in + neither the left-hand nor the right-hand plane, but it is + considered to lie outside the unit circle. For the eigenvalue + ``(alpha, beta) = (0, 0)`` the predefined sorting functions + all return `False`. + output : str {'real','complex'}, optional + Construct the real or complex QZ decomposition for real matrices. + Default is 'real'. + overwrite_a : bool, optional + If True, the contents of A are overwritten. + overwrite_b : bool, optional + If True, the contents of B are overwritten. + check_finite : bool, optional + If true checks the elements of `A` and `B` are finite numbers. If + false does no checking and passes matrix through to + underlying algorithm. + + Returns + ------- + AA : (N, N) ndarray + Generalized Schur form of A. + BB : (N, N) ndarray + Generalized Schur form of B. + alpha : (N,) ndarray + alpha = alphar + alphai * 1j. See notes. + beta : (N,) ndarray + See notes. + Q : (N, N) ndarray + The left Schur vectors. + Z : (N, N) ndarray + The right Schur vectors. + + Notes + ----- + On exit, ``(ALPHAR(j) + ALPHAI(j)*i)/BETA(j), j=1,...,N``, will be the + generalized eigenvalues. ``ALPHAR(j) + ALPHAI(j)*i`` and + ``BETA(j),j=1,...,N`` are the diagonals of the complex Schur form (S,T) + that would result if the 2-by-2 diagonal blocks of the real generalized + Schur form of (A,B) were further reduced to triangular form using complex + unitary transformations. If ALPHAI(j) is zero, then the j-th eigenvalue is + real; if positive, then the ``j``-th and ``(j+1)``-st eigenvalues are a + complex conjugate pair, with ``ALPHAI(j+1)`` negative. + + See also + -------- + qz + + Examples + -------- + >>> from scipy.linalg import ordqz + >>> A = np.array([[2, 5, 8, 7], [5, 2, 2, 8], [7, 5, 6, 6], [5, 4, 4, 8]]) + >>> B = np.array([[0, 6, 0, 0], [5, 0, 2, 1], [5, 2, 6, 6], [4, 7, 7, 7]]) + >>> AA, BB, alpha, beta, Q, Z = ordqz(A, B, sort='lhp') + + Since we have sorted for left half plane eigenvalues, negatives come first + + >>> (alpha/beta).real < 0 + array([ True, True, False, False], dtype=bool) + + """ + # NOTE: should users be able to set these? + lwork = None + result, typ = _qz(A, B, output=output, lwork=lwork, sort=None, + overwrite_a=overwrite_a, overwrite_b=overwrite_b, + check_finite=check_finite) + AA, BB, Q, Z = result[0], result[1], result[-4], result[-3] + if typ not in 'cz': + alpha, beta = result[3] + result[4]*1.j, result[5] + else: + alpha, beta = result[3], result[4] + + sfunction = _select_function(sort) + select = sfunction(alpha, beta) + + tgsen, = get_lapack_funcs(('tgsen',), (AA, BB)) + + if lwork is None or lwork == -1: + result = tgsen(select, AA, BB, Q, Z, lwork=-1) + lwork = result[-3][0].real.astype(np.int) + # looks like wrong value passed to ZTGSYL if not + lwork += 1 + + liwork = None + if liwork is None or liwork == -1: + result = tgsen(select, AA, BB, Q, Z, liwork=-1) + liwork = result[-2][0] + + result = tgsen(select, AA, BB, Q, Z, lwork=lwork, liwork=liwork) + + info = result[-1] + if info < 0: + raise ValueError("Illegal value in argument %d of tgsen" % -info) + elif info == 1: + raise ValueError("Reordering of (A, B) failed because the transformed" + " matrix pair (A, B) would be too far from " + "generalized Schur form; the problem is very " + "ill-conditioned. (A, B) may have been partially " + "reorded. If requested, 0 is returned in DIF(*), " + "PL, and PR.") + + # for real results has a, b, alphar, alphai, beta, q, z, m, pl, pr, dif, + # work, iwork, info + if typ in ['f', 'd']: + alpha = result[2] + result[3] * 1.j + return (result[0], result[1], alpha, result[4], result[5], result[6]) + # for complex results has a, b, alpha, beta, q, z, m, pl, pr, dif, work, + # iwork, info + else: + return result[0], result[1], result[2], result[3], result[4], result[5] diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/_decomp_qz.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/_decomp_qz.pyc new file mode 100644 index 0000000..c3b9fac Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/_decomp_qz.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/_decomp_update.so b/project/venv/lib/python2.7/site-packages/scipy/linalg/_decomp_update.so new file mode 100755 index 0000000..0044e83 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/_decomp_update.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/_expm_frechet.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/_expm_frechet.py new file mode 100644 index 0000000..036800d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/_expm_frechet.py @@ -0,0 +1,411 @@ +"""Frechet derivative of the matrix exponential.""" +from __future__ import division, print_function, absolute_import + +import numpy as np +import scipy.linalg + +__all__ = ['expm_frechet', 'expm_cond'] + + +def expm_frechet(A, E, method=None, compute_expm=True, check_finite=True): + """ + Frechet derivative of the matrix exponential of A in the direction E. + + Parameters + ---------- + A : (N, N) array_like + Matrix of which to take the matrix exponential. + E : (N, N) array_like + Matrix direction in which to take the Frechet derivative. + method : str, optional + Choice of algorithm. Should be one of + + - `SPS` (default) + - `blockEnlarge` + + compute_expm : bool, optional + Whether to compute also `expm_A` in addition to `expm_frechet_AE`. + Default is True. + check_finite : bool, optional + Whether to check that the input matrix contains only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + expm_A : ndarray + Matrix exponential of A. + expm_frechet_AE : ndarray + Frechet derivative of the matrix exponential of A in the direction E. + + For ``compute_expm = False``, only `expm_frechet_AE` is returned. + + See also + -------- + expm : Compute the exponential of a matrix. + + Notes + ----- + This section describes the available implementations that can be selected + by the `method` parameter. The default method is *SPS*. + + Method *blockEnlarge* is a naive algorithm. + + Method *SPS* is Scaling-Pade-Squaring [1]_. + It is a sophisticated implementation which should take + only about 3/8 as much time as the naive implementation. + The asymptotics are the same. + + .. versionadded:: 0.13.0 + + References + ---------- + .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2009) + Computing the Frechet Derivative of the Matrix Exponential, + with an application to Condition Number Estimation. + SIAM Journal On Matrix Analysis and Applications., + 30 (4). pp. 1639-1657. ISSN 1095-7162 + + Examples + -------- + >>> import scipy.linalg + >>> A = np.random.randn(3, 3) + >>> E = np.random.randn(3, 3) + >>> expm_A, expm_frechet_AE = scipy.linalg.expm_frechet(A, E) + >>> expm_A.shape, expm_frechet_AE.shape + ((3, 3), (3, 3)) + + >>> import scipy.linalg + >>> A = np.random.randn(3, 3) + >>> E = np.random.randn(3, 3) + >>> expm_A, expm_frechet_AE = scipy.linalg.expm_frechet(A, E) + >>> M = np.zeros((6, 6)) + >>> M[:3, :3] = A; M[:3, 3:] = E; M[3:, 3:] = A + >>> expm_M = scipy.linalg.expm(M) + >>> np.allclose(expm_A, expm_M[:3, :3]) + True + >>> np.allclose(expm_frechet_AE, expm_M[:3, 3:]) + True + + """ + if check_finite: + A = np.asarray_chkfinite(A) + E = np.asarray_chkfinite(E) + else: + A = np.asarray(A) + E = np.asarray(E) + if A.ndim != 2 or A.shape[0] != A.shape[1]: + raise ValueError('expected A to be a square matrix') + if E.ndim != 2 or E.shape[0] != E.shape[1]: + raise ValueError('expected E to be a square matrix') + if A.shape != E.shape: + raise ValueError('expected A and E to be the same shape') + if method is None: + method = 'SPS' + if method == 'SPS': + expm_A, expm_frechet_AE = expm_frechet_algo_64(A, E) + elif method == 'blockEnlarge': + expm_A, expm_frechet_AE = expm_frechet_block_enlarge(A, E) + else: + raise ValueError('Unknown implementation %s' % method) + if compute_expm: + return expm_A, expm_frechet_AE + else: + return expm_frechet_AE + + +def expm_frechet_block_enlarge(A, E): + """ + This is a helper function, mostly for testing and profiling. + Return expm(A), frechet(A, E) + """ + n = A.shape[0] + M = np.vstack([ + np.hstack([A, E]), + np.hstack([np.zeros_like(A), A])]) + expm_M = scipy.linalg.expm(M) + return expm_M[:n, :n], expm_M[:n, n:] + + +""" +Maximal values ell_m of ||2**-s A|| such that the backward error bound +does not exceed 2**-53. +""" +ell_table_61 = ( + None, + # 1 + 2.11e-8, + 3.56e-4, + 1.08e-2, + 6.49e-2, + 2.00e-1, + 4.37e-1, + 7.83e-1, + 1.23e0, + 1.78e0, + 2.42e0, + # 11 + 3.13e0, + 3.90e0, + 4.74e0, + 5.63e0, + 6.56e0, + 7.52e0, + 8.53e0, + 9.56e0, + 1.06e1, + 1.17e1, + ) + + +# The b vectors and U and V are copypasted +# from scipy.sparse.linalg.matfuncs.py. +# M, Lu, Lv follow (6.11), (6.12), (6.13), (3.3) + +def _diff_pade3(A, E, ident): + b = (120., 60., 12., 1.) + A2 = A.dot(A) + M2 = np.dot(A, E) + np.dot(E, A) + U = A.dot(b[3]*A2 + b[1]*ident) + V = b[2]*A2 + b[0]*ident + Lu = A.dot(b[3]*M2) + E.dot(b[3]*A2 + b[1]*ident) + Lv = b[2]*M2 + return U, V, Lu, Lv + + +def _diff_pade5(A, E, ident): + b = (30240., 15120., 3360., 420., 30., 1.) + A2 = A.dot(A) + M2 = np.dot(A, E) + np.dot(E, A) + A4 = np.dot(A2, A2) + M4 = np.dot(A2, M2) + np.dot(M2, A2) + U = A.dot(b[5]*A4 + b[3]*A2 + b[1]*ident) + V = b[4]*A4 + b[2]*A2 + b[0]*ident + Lu = (A.dot(b[5]*M4 + b[3]*M2) + + E.dot(b[5]*A4 + b[3]*A2 + b[1]*ident)) + Lv = b[4]*M4 + b[2]*M2 + return U, V, Lu, Lv + + +def _diff_pade7(A, E, ident): + b = (17297280., 8648640., 1995840., 277200., 25200., 1512., 56., 1.) + A2 = A.dot(A) + M2 = np.dot(A, E) + np.dot(E, A) + A4 = np.dot(A2, A2) + M4 = np.dot(A2, M2) + np.dot(M2, A2) + A6 = np.dot(A2, A4) + M6 = np.dot(A4, M2) + np.dot(M4, A2) + U = A.dot(b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident) + V = b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*ident + Lu = (A.dot(b[7]*M6 + b[5]*M4 + b[3]*M2) + + E.dot(b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident)) + Lv = b[6]*M6 + b[4]*M4 + b[2]*M2 + return U, V, Lu, Lv + + +def _diff_pade9(A, E, ident): + b = (17643225600., 8821612800., 2075673600., 302702400., 30270240., + 2162160., 110880., 3960., 90., 1.) + A2 = A.dot(A) + M2 = np.dot(A, E) + np.dot(E, A) + A4 = np.dot(A2, A2) + M4 = np.dot(A2, M2) + np.dot(M2, A2) + A6 = np.dot(A2, A4) + M6 = np.dot(A4, M2) + np.dot(M4, A2) + A8 = np.dot(A4, A4) + M8 = np.dot(A4, M4) + np.dot(M4, A4) + U = A.dot(b[9]*A8 + b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident) + V = b[8]*A8 + b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*ident + Lu = (A.dot(b[9]*M8 + b[7]*M6 + b[5]*M4 + b[3]*M2) + + E.dot(b[9]*A8 + b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident)) + Lv = b[8]*M8 + b[6]*M6 + b[4]*M4 + b[2]*M2 + return U, V, Lu, Lv + + +def expm_frechet_algo_64(A, E): + n = A.shape[0] + s = None + ident = np.identity(n) + A_norm_1 = scipy.linalg.norm(A, 1) + m_pade_pairs = ( + (3, _diff_pade3), + (5, _diff_pade5), + (7, _diff_pade7), + (9, _diff_pade9)) + for m, pade in m_pade_pairs: + if A_norm_1 <= ell_table_61[m]: + U, V, Lu, Lv = pade(A, E, ident) + s = 0 + break + if s is None: + # scaling + s = max(0, int(np.ceil(np.log2(A_norm_1 / ell_table_61[13])))) + A = A * 2.0**-s + E = E * 2.0**-s + # pade order 13 + A2 = np.dot(A, A) + M2 = np.dot(A, E) + np.dot(E, A) + A4 = np.dot(A2, A2) + M4 = np.dot(A2, M2) + np.dot(M2, A2) + A6 = np.dot(A2, A4) + M6 = np.dot(A4, M2) + np.dot(M4, A2) + b = (64764752532480000., 32382376266240000., 7771770303897600., + 1187353796428800., 129060195264000., 10559470521600., + 670442572800., 33522128640., 1323241920., 40840800., 960960., + 16380., 182., 1.) + W1 = b[13]*A6 + b[11]*A4 + b[9]*A2 + W2 = b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident + Z1 = b[12]*A6 + b[10]*A4 + b[8]*A2 + Z2 = b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*ident + W = np.dot(A6, W1) + W2 + U = np.dot(A, W) + V = np.dot(A6, Z1) + Z2 + Lw1 = b[13]*M6 + b[11]*M4 + b[9]*M2 + Lw2 = b[7]*M6 + b[5]*M4 + b[3]*M2 + Lz1 = b[12]*M6 + b[10]*M4 + b[8]*M2 + Lz2 = b[6]*M6 + b[4]*M4 + b[2]*M2 + Lw = np.dot(A6, Lw1) + np.dot(M6, W1) + Lw2 + Lu = np.dot(A, Lw) + np.dot(E, W) + Lv = np.dot(A6, Lz1) + np.dot(M6, Z1) + Lz2 + # factor once and solve twice + lu_piv = scipy.linalg.lu_factor(-U + V) + R = scipy.linalg.lu_solve(lu_piv, U + V) + L = scipy.linalg.lu_solve(lu_piv, Lu + Lv + np.dot((Lu - Lv), R)) + # squaring + for k in range(s): + L = np.dot(R, L) + np.dot(L, R) + R = np.dot(R, R) + return R, L + + +def vec(M): + """ + Stack columns of M to construct a single vector. + + This is somewhat standard notation in linear algebra. + + Parameters + ---------- + M : 2d array_like + Input matrix + + Returns + ------- + v : 1d ndarray + Output vector + + """ + return M.T.ravel() + + +def expm_frechet_kronform(A, method=None, check_finite=True): + """ + Construct the Kronecker form of the Frechet derivative of expm. + + Parameters + ---------- + A : array_like with shape (N, N) + Matrix to be expm'd. + method : str, optional + Extra keyword to be passed to expm_frechet. + check_finite : bool, optional + Whether to check that the input matrix contains only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + K : 2d ndarray with shape (N*N, N*N) + Kronecker form of the Frechet derivative of the matrix exponential. + + Notes + ----- + This function is used to help compute the condition number + of the matrix exponential. + + See also + -------- + expm : Compute a matrix exponential. + expm_frechet : Compute the Frechet derivative of the matrix exponential. + expm_cond : Compute the relative condition number of the matrix exponential + in the Frobenius norm. + + """ + if check_finite: + A = np.asarray_chkfinite(A) + else: + A = np.asarray(A) + if len(A.shape) != 2 or A.shape[0] != A.shape[1]: + raise ValueError('expected a square matrix') + + n = A.shape[0] + ident = np.identity(n) + cols = [] + for i in range(n): + for j in range(n): + E = np.outer(ident[i], ident[j]) + F = expm_frechet(A, E, + method=method, compute_expm=False, check_finite=False) + cols.append(vec(F)) + return np.vstack(cols).T + + +def expm_cond(A, check_finite=True): + """ + Relative condition number of the matrix exponential in the Frobenius norm. + + Parameters + ---------- + A : 2d array_like + Square input matrix with shape (N, N). + check_finite : bool, optional + Whether to check that the input matrix contains only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + kappa : float + The relative condition number of the matrix exponential + in the Frobenius norm + + Notes + ----- + A faster estimate for the condition number in the 1-norm + has been published but is not yet implemented in scipy. + + .. versionadded:: 0.14.0 + + See also + -------- + expm : Compute the exponential of a matrix. + expm_frechet : Compute the Frechet derivative of the matrix exponential. + + Examples + -------- + >>> from scipy.linalg import expm_cond + >>> A = np.array([[-0.3, 0.2, 0.6], [0.6, 0.3, -0.1], [-0.7, 1.2, 0.9]]) + >>> k = expm_cond(A) + >>> k + 1.7787805864469866 + + """ + if check_finite: + A = np.asarray_chkfinite(A) + else: + A = np.asarray(A) + if len(A.shape) != 2 or A.shape[0] != A.shape[1]: + raise ValueError('expected a square matrix') + + X = scipy.linalg.expm(A) + K = expm_frechet_kronform(A, check_finite=False) + + # The following norm choices are deliberate. + # The norms of A and X are Frobenius norms, + # and the norm of K is the induced 2-norm. + A_norm = scipy.linalg.norm(A, 'fro') + X_norm = scipy.linalg.norm(X, 'fro') + K_norm = scipy.linalg.norm(K, 2) + + kappa = (K_norm * A_norm) / X_norm + return kappa diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/_expm_frechet.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/_expm_frechet.pyc new file mode 100644 index 0000000..ff5202b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/_expm_frechet.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/_fblas.so b/project/venv/lib/python2.7/site-packages/scipy/linalg/_fblas.so new file mode 100755 index 0000000..7ea451b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/_fblas.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/_flapack.so b/project/venv/lib/python2.7/site-packages/scipy/linalg/_flapack.so new file mode 100755 index 0000000..d2c73cd Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/_flapack.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/_flinalg.so b/project/venv/lib/python2.7/site-packages/scipy/linalg/_flinalg.so new file mode 100755 index 0000000..9b20962 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/_flinalg.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/_generate_pyx.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/_generate_pyx.py new file mode 100644 index 0000000..d8f8005 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/_generate_pyx.py @@ -0,0 +1,757 @@ +""" +Code generator script to make the Cython BLAS and LAPACK wrappers +from the files "cython_blas_signatures.txt" and +"cython_lapack_signatures.txt" which contain the signatures for +all the BLAS/LAPACK routines that should be included in the wrappers. +""" + +from collections import defaultdict +from operator import itemgetter +import os + +BASE_DIR = os.path.abspath(os.path.dirname(__file__)) + +fortran_types = {'int': 'integer', + 'c': 'complex', + 'd': 'double precision', + 's': 'real', + 'z': 'complex*16', + 'char': 'character', + 'bint': 'logical'} + +c_types = {'int': 'int', + 'c': 'npy_complex64', + 'd': 'double', + 's': 'float', + 'z': 'npy_complex128', + 'char': 'char', + 'bint': 'int', + 'cselect1': '_cselect1', + 'cselect2': '_cselect2', + 'dselect2': '_dselect2', + 'dselect3': '_dselect3', + 'sselect2': '_sselect2', + 'sselect3': '_sselect3', + 'zselect1': '_zselect1', + 'zselect2': '_zselect2'} + + +def arg_names_and_types(args): + return zip(*[arg.split(' *') for arg in args.split(', ')]) + + +pyx_func_template = """ +cdef extern from "{header_name}": + void _fortran_{name} "F_FUNC({name}wrp, {upname}WRP)"({ret_type} *out, {fort_args}) nogil +cdef {ret_type} {name}({args}) nogil: + cdef {ret_type} out + _fortran_{name}(&out, {argnames}) + return out +""" + +npy_types = {'c': 'npy_complex64', 'z': 'npy_complex128', + 'cselect1': '_cselect1', 'cselect2': '_cselect2', + 'dselect2': '_dselect2', 'dselect3': '_dselect3', + 'sselect2': '_sselect2', 'sselect3': '_sselect3', + 'zselect1': '_zselect1', 'zselect2': '_zselect2'} + + +def arg_casts(arg): + if arg in ['npy_complex64', 'npy_complex128', '_cselect1', '_cselect2', + '_dselect2', '_dselect3', '_sselect2', '_sselect3', + '_zselect1', '_zselect2']: + return '<{0}*>'.format(arg) + return '' + + +def pyx_decl_func(name, ret_type, args, header_name): + argtypes, argnames = arg_names_and_types(args) + # Fix the case where one of the arguments has the same name as the + # abbreviation for the argument type. + # Otherwise the variable passed as an argument is considered overwrites + # the previous typedef and Cython compilation fails. + if ret_type in argnames: + argnames = [n if n != ret_type else ret_type + '_' for n in argnames] + argnames = [n if n not in ['lambda', 'in'] else n + '_' + for n in argnames] + args = ', '.join([' *'.join([n, t]) + for n, t in zip(argtypes, argnames)]) + argtypes = [npy_types.get(t, t) for t in argtypes] + fort_args = ', '.join([' *'.join([n, t]) + for n, t in zip(argtypes, argnames)]) + argnames = [arg_casts(t) + n for n, t in zip(argnames, argtypes)] + argnames = ', '.join(argnames) + c_ret_type = c_types[ret_type] + args = args.replace('lambda', 'lambda_') + return pyx_func_template.format(name=name, upname=name.upper(), args=args, + fort_args=fort_args, ret_type=ret_type, + c_ret_type=c_ret_type, argnames=argnames, + header_name=header_name) + + +pyx_sub_template = """cdef extern from "{header_name}": + void _fortran_{name} "F_FUNC({name},{upname})"({fort_args}) nogil +cdef void {name}({args}) nogil: + _fortran_{name}({argnames}) +""" + + +def pyx_decl_sub(name, args, header_name): + argtypes, argnames = arg_names_and_types(args) + argtypes = [npy_types.get(t, t) for t in argtypes] + argnames = [n if n not in ['lambda', 'in'] else n + '_' for n in argnames] + fort_args = ', '.join([' *'.join([n, t]) + for n, t in zip(argtypes, argnames)]) + argnames = [arg_casts(t) + n for n, t in zip(argnames, argtypes)] + argnames = ', '.join(argnames) + args = args.replace('*lambda,', '*lambda_,').replace('*in,', '*in_,') + return pyx_sub_template.format(name=name, upname=name.upper(), + args=args, fort_args=fort_args, + argnames=argnames, header_name=header_name) + + +blas_pyx_preamble = '''# cython: boundscheck = False +# cython: wraparound = False +# cython: cdivision = True + +""" +BLAS Functions for Cython +========================= + +Usable from Cython via:: + + cimport scipy.linalg.cython_blas + +These wrappers do not check for alignment of arrays. +Alignment should be checked before these wrappers are used. + +Raw function pointers (Fortran-style pointer arguments): + +- {} + + +""" + +# Within scipy, these wrappers can be used via relative or absolute cimport. +# Examples: +# from ..linalg cimport cython_blas +# from scipy.linalg cimport cython_blas +# cimport scipy.linalg.cython_blas as cython_blas +# cimport ..linalg.cython_blas as cython_blas + +# Within scipy, if BLAS functions are needed in C/C++/Fortran, +# these wrappers should not be used. +# The original libraries should be linked directly. + +from __future__ import absolute_import + +cdef extern from "fortran_defs.h": + pass + +from numpy cimport npy_complex64, npy_complex128 + +''' + + +def make_blas_pyx_preamble(all_sigs): + names = [sig[0] for sig in all_sigs] + return blas_pyx_preamble.format("\n- ".join(names)) + + +lapack_pyx_preamble = '''""" +LAPACK functions for Cython +=========================== + +Usable from Cython via:: + + cimport scipy.linalg.cython_lapack + +This module provides Cython-level wrappers for all primary routines included +in LAPACK 3.4.0 except for ``zcgesv`` since its interface is not consistent +from LAPACK 3.4.0 to 3.6.0. It also provides some of the +fixed-api auxiliary routines. + +These wrappers do not check for alignment of arrays. +Alignment should be checked before these wrappers are used. + +Raw function pointers (Fortran-style pointer arguments): + +- {} + + +""" + +# Within scipy, these wrappers can be used via relative or absolute cimport. +# Examples: +# from ..linalg cimport cython_lapack +# from scipy.linalg cimport cython_lapack +# cimport scipy.linalg.cython_lapack as cython_lapack +# cimport ..linalg.cython_lapack as cython_lapack + +# Within scipy, if LAPACK functions are needed in C/C++/Fortran, +# these wrappers should not be used. +# The original libraries should be linked directly. + +from __future__ import absolute_import + +cdef extern from "fortran_defs.h": + pass + +from numpy cimport npy_complex64, npy_complex128 + +cdef extern from "_lapack_subroutines.h": + # Function pointer type declarations for + # gees and gges families of functions. + ctypedef bint _cselect1(npy_complex64*) + ctypedef bint _cselect2(npy_complex64*, npy_complex64*) + ctypedef bint _dselect2(d*, d*) + ctypedef bint _dselect3(d*, d*, d*) + ctypedef bint _sselect2(s*, s*) + ctypedef bint _sselect3(s*, s*, s*) + ctypedef bint _zselect1(npy_complex128*) + ctypedef bint _zselect2(npy_complex128*, npy_complex128*) + +''' + + +def make_lapack_pyx_preamble(all_sigs): + names = [sig[0] for sig in all_sigs] + return lapack_pyx_preamble.format("\n- ".join(names)) + + +blas_py_wrappers = """ + +# Python-accessible wrappers for testing: + +cdef inline bint _is_contiguous(double[:,:] a, int axis) nogil: + return (a.strides[axis] == sizeof(a[0,0]) or a.shape[axis] == 1) + +cpdef float complex _test_cdotc(float complex[:] cx, float complex[:] cy) nogil: + cdef: + int n = cx.shape[0] + int incx = cx.strides[0] // sizeof(cx[0]) + int incy = cy.strides[0] // sizeof(cy[0]) + return cdotc(&n, &cx[0], &incx, &cy[0], &incy) + +cpdef float complex _test_cdotu(float complex[:] cx, float complex[:] cy) nogil: + cdef: + int n = cx.shape[0] + int incx = cx.strides[0] // sizeof(cx[0]) + int incy = cy.strides[0] // sizeof(cy[0]) + return cdotu(&n, &cx[0], &incx, &cy[0], &incy) + +cpdef double _test_dasum(double[:] dx) nogil: + cdef: + int n = dx.shape[0] + int incx = dx.strides[0] // sizeof(dx[0]) + return dasum(&n, &dx[0], &incx) + +cpdef double _test_ddot(double[:] dx, double[:] dy) nogil: + cdef: + int n = dx.shape[0] + int incx = dx.strides[0] // sizeof(dx[0]) + int incy = dy.strides[0] // sizeof(dy[0]) + return ddot(&n, &dx[0], &incx, &dy[0], &incy) + +cpdef int _test_dgemm(double alpha, double[:,:] a, double[:,:] b, double beta, + double[:,:] c) nogil except -1: + cdef: + char *transa + char *transb + int m, n, k, lda, ldb, ldc + double *a0=&a[0,0] + double *b0=&b[0,0] + double *c0=&c[0,0] + # In the case that c is C contiguous, swap a and b and + # swap whether or not each of them is transposed. + # This can be done because a.dot(b) = b.T.dot(a.T).T. + if _is_contiguous(c, 1): + if _is_contiguous(a, 1): + transb = 'n' + ldb = (&a[1,0]) - a0 if a.shape[0] > 1 else 1 + elif _is_contiguous(a, 0): + transb = 't' + ldb = (&a[0,1]) - a0 if a.shape[1] > 1 else 1 + else: + with gil: + raise ValueError("Input 'a' is neither C nor Fortran contiguous.") + if _is_contiguous(b, 1): + transa = 'n' + lda = (&b[1,0]) - b0 if b.shape[0] > 1 else 1 + elif _is_contiguous(b, 0): + transa = 't' + lda = (&b[0,1]) - b0 if b.shape[1] > 1 else 1 + else: + with gil: + raise ValueError("Input 'b' is neither C nor Fortran contiguous.") + k = b.shape[0] + if k != a.shape[1]: + with gil: + raise ValueError("Shape mismatch in input arrays.") + m = b.shape[1] + n = a.shape[0] + if n != c.shape[0] or m != c.shape[1]: + with gil: + raise ValueError("Output array does not have the correct shape.") + ldc = (&c[1,0]) - c0 if c.shape[0] > 1 else 1 + dgemm(transa, transb, &m, &n, &k, &alpha, b0, &lda, a0, + &ldb, &beta, c0, &ldc) + elif _is_contiguous(c, 0): + if _is_contiguous(a, 1): + transa = 't' + lda = (&a[1,0]) - a0 if a.shape[0] > 1 else 1 + elif _is_contiguous(a, 0): + transa = 'n' + lda = (&a[0,1]) - a0 if a.shape[1] > 1 else 1 + else: + with gil: + raise ValueError("Input 'a' is neither C nor Fortran contiguous.") + if _is_contiguous(b, 1): + transb = 't' + ldb = (&b[1,0]) - b0 if b.shape[0] > 1 else 1 + elif _is_contiguous(b, 0): + transb = 'n' + ldb = (&b[0,1]) - b0 if b.shape[1] > 1 else 1 + else: + with gil: + raise ValueError("Input 'b' is neither C nor Fortran contiguous.") + m = a.shape[0] + k = a.shape[1] + if k != b.shape[0]: + with gil: + raise ValueError("Shape mismatch in input arrays.") + n = b.shape[1] + if m != c.shape[0] or n != c.shape[1]: + with gil: + raise ValueError("Output array does not have the correct shape.") + ldc = (&c[0,1]) - c0 if c.shape[1] > 1 else 1 + dgemm(transa, transb, &m, &n, &k, &alpha, a0, &lda, b0, + &ldb, &beta, c0, &ldc) + else: + with gil: + raise ValueError("Input 'c' is neither C nor Fortran contiguous.") + return 0 + +cpdef double _test_dnrm2(double[:] x) nogil: + cdef: + int n = x.shape[0] + int incx = x.strides[0] // sizeof(x[0]) + return dnrm2(&n, &x[0], &incx) + +cpdef double _test_dzasum(double complex[:] zx) nogil: + cdef: + int n = zx.shape[0] + int incx = zx.strides[0] // sizeof(zx[0]) + return dzasum(&n, &zx[0], &incx) + +cpdef double _test_dznrm2(double complex[:] x) nogil: + cdef: + int n = x.shape[0] + int incx = x.strides[0] // sizeof(x[0]) + return dznrm2(&n, &x[0], &incx) + +cpdef int _test_icamax(float complex[:] cx) nogil: + cdef: + int n = cx.shape[0] + int incx = cx.strides[0] // sizeof(cx[0]) + return icamax(&n, &cx[0], &incx) + +cpdef int _test_idamax(double[:] dx) nogil: + cdef: + int n = dx.shape[0] + int incx = dx.strides[0] // sizeof(dx[0]) + return idamax(&n, &dx[0], &incx) + +cpdef int _test_isamax(float[:] sx) nogil: + cdef: + int n = sx.shape[0] + int incx = sx.strides[0] // sizeof(sx[0]) + return isamax(&n, &sx[0], &incx) + +cpdef int _test_izamax(double complex[:] zx) nogil: + cdef: + int n = zx.shape[0] + int incx = zx.strides[0] // sizeof(zx[0]) + return izamax(&n, &zx[0], &incx) + +cpdef float _test_sasum(float[:] sx) nogil: + cdef: + int n = sx.shape[0] + int incx = sx.shape[0] // sizeof(sx[0]) + return sasum(&n, &sx[0], &incx) + +cpdef float _test_scasum(float complex[:] cx) nogil: + cdef: + int n = cx.shape[0] + int incx = cx.strides[0] // sizeof(cx[0]) + return scasum(&n, &cx[0], &incx) + +cpdef float _test_scnrm2(float complex[:] x) nogil: + cdef: + int n = x.shape[0] + int incx = x.strides[0] // sizeof(x[0]) + return scnrm2(&n, &x[0], &incx) + +cpdef float _test_sdot(float[:] sx, float[:] sy) nogil: + cdef: + int n = sx.shape[0] + int incx = sx.strides[0] // sizeof(sx[0]) + int incy = sy.strides[0] // sizeof(sy[0]) + return sdot(&n, &sx[0], &incx, &sy[0], &incy) + +cpdef float _test_snrm2(float[:] x) nogil: + cdef: + int n = x.shape[0] + int incx = x.shape[0] // sizeof(x[0]) + return snrm2(&n, &x[0], &incx) + +cpdef double complex _test_zdotc(double complex[:] zx, double complex[:] zy) nogil: + cdef: + int n = zx.shape[0] + int incx = zx.strides[0] // sizeof(zx[0]) + int incy = zy.strides[0] // sizeof(zy[0]) + return zdotc(&n, &zx[0], &incx, &zy[0], &incy) + +cpdef double complex _test_zdotu(double complex[:] zx, double complex[:] zy) nogil: + cdef: + int n = zx.shape[0] + int incx = zx.strides[0] // sizeof(zx[0]) + int incy = zy.strides[0] // sizeof(zy[0]) + return zdotu(&n, &zx[0], &incx, &zy[0], &incy) +""" + + +def generate_blas_pyx(func_sigs, sub_sigs, all_sigs, header_name): + funcs = "\n".join(pyx_decl_func(*(s+(header_name,))) for s in func_sigs) + subs = "\n" + "\n".join(pyx_decl_sub(*(s[::2]+(header_name,))) + for s in sub_sigs) + return make_blas_pyx_preamble(all_sigs) + funcs + subs + blas_py_wrappers + + +lapack_py_wrappers = """ + +# Python accessible wrappers for testing: + +def _test_dlamch(cmach): + # This conversion is necessary to handle Python 3 strings. + cmach_bytes = bytes(cmach) + # Now that it is a bytes representation, a non-temporary variable + # must be passed as a part of the function call. + cdef char* cmach_char = cmach_bytes + return dlamch(cmach_char) + +def _test_slamch(cmach): + # This conversion is necessary to handle Python 3 strings. + cmach_bytes = bytes(cmach) + # Now that it is a bytes representation, a non-temporary variable + # must be passed as a part of the function call. + cdef char* cmach_char = cmach_bytes + return slamch(cmach_char) +""" + + +def generate_lapack_pyx(func_sigs, sub_sigs, all_sigs, header_name): + funcs = "\n".join(pyx_decl_func(*(s+(header_name,))) for s in func_sigs) + subs = "\n" + "\n".join(pyx_decl_sub(*(s[::2]+(header_name,))) + for s in sub_sigs) + preamble = make_lapack_pyx_preamble(all_sigs) + return preamble + funcs + subs + lapack_py_wrappers + + +pxd_template = """ctypedef {ret_type} {name}_t({args}) nogil +cdef {name}_t *{name}_f +""" +pxd_template = """cdef {ret_type} {name}({args}) nogil +""" + + +def pxd_decl(name, ret_type, args): + args = args.replace('lambda', 'lambda_').replace('*in,', '*in_,') + return pxd_template.format(name=name, ret_type=ret_type, args=args) + + +blas_pxd_preamble = """# Within scipy, these wrappers can be used via relative or absolute cimport. +# Examples: +# from ..linalg cimport cython_blas +# from scipy.linalg cimport cython_blas +# cimport scipy.linalg.cython_blas as cython_blas +# cimport ..linalg.cython_blas as cython_blas + +# Within scipy, if BLAS functions are needed in C/C++/Fortran, +# these wrappers should not be used. +# The original libraries should be linked directly. + +ctypedef float s +ctypedef double d +ctypedef float complex c +ctypedef double complex z + +""" + + +def generate_blas_pxd(all_sigs): + body = '\n'.join(pxd_decl(*sig) for sig in all_sigs) + return blas_pxd_preamble + body + + +lapack_pxd_preamble = """# Within scipy, these wrappers can be used via relative or absolute cimport. +# Examples: +# from ..linalg cimport cython_lapack +# from scipy.linalg cimport cython_lapack +# cimport scipy.linalg.cython_lapack as cython_lapack +# cimport ..linalg.cython_lapack as cython_lapack + +# Within scipy, if LAPACK functions are needed in C/C++/Fortran, +# these wrappers should not be used. +# The original libraries should be linked directly. + +ctypedef float s +ctypedef double d +ctypedef float complex c +ctypedef double complex z + +# Function pointer type declarations for +# gees and gges families of functions. +ctypedef bint cselect1(c*) +ctypedef bint cselect2(c*, c*) +ctypedef bint dselect2(d*, d*) +ctypedef bint dselect3(d*, d*, d*) +ctypedef bint sselect2(s*, s*) +ctypedef bint sselect3(s*, s*, s*) +ctypedef bint zselect1(z*) +ctypedef bint zselect2(z*, z*) + +""" + + +def generate_lapack_pxd(all_sigs): + return lapack_pxd_preamble + '\n'.join(pxd_decl(*sig) for sig in all_sigs) + + +fortran_template = """ subroutine {name}wrp( + + ret, + + {argnames} + + ) + external {wrapper} + {ret_type} {wrapper} + {ret_type} ret + {argdecls} + ret = {wrapper}( + + {argnames} + + ) + end +""" + +dims = {'work': '(*)', 'ab': '(ldab,*)', 'a': '(lda,*)', 'dl': '(*)', + 'd': '(*)', 'du': '(*)', 'ap': '(*)', 'e': '(*)', 'lld': '(*)'} + +xy_specialized_dims = {'x': '', 'y': ''} +a_specialized_dims = {'a': '(*)'} +special_cases = defaultdict(dict, + ladiv = xy_specialized_dims, + lanhf = a_specialized_dims, + lansf = a_specialized_dims, + lapy2 = xy_specialized_dims, + lapy3 = xy_specialized_dims) + + +def process_fortran_name(name, funcname): + if 'inc' in name: + return name + special = special_cases[funcname[1:]] + if 'x' in name or 'y' in name: + suffix = special.get(name, '(n)') + else: + suffix = special.get(name, '') + return name + suffix + + +def called_name(name): + included = ['cdotc', 'cdotu', 'zdotc', 'zdotu', 'cladiv', 'zladiv'] + if name in included: + return "w" + name + return name + + +def fort_subroutine_wrapper(name, ret_type, args): + wrapper = called_name(name) + types, names = arg_names_and_types(args) + argnames = ',\n + '.join(names) + + names = [process_fortran_name(n, name) for n in names] + argdecls = '\n '.join('{0} {1}'.format(fortran_types[t], n) + for n, t in zip(names, types)) + return fortran_template.format(name=name, wrapper=wrapper, + argnames=argnames, argdecls=argdecls, + ret_type=fortran_types[ret_type]) + + +def generate_fortran(func_sigs): + return "\n".join(fort_subroutine_wrapper(*sig) for sig in func_sigs) + + +def make_c_args(args): + types, names = arg_names_and_types(args) + types = [c_types[arg] for arg in types] + return ', '.join('{0} *{1}'.format(t, n) for t, n in zip(types, names)) + + +c_func_template = ("void F_FUNC({name}wrp, {upname}WRP)" + "({return_type} *ret, {args});\n") + + +def c_func_decl(name, return_type, args): + args = make_c_args(args) + return_type = c_types[return_type] + return c_func_template.format(name=name, upname=name.upper(), + return_type=return_type, args=args) + + +c_sub_template = "void F_FUNC({name},{upname})({args});\n" + + +def c_sub_decl(name, return_type, args): + args = make_c_args(args) + return c_sub_template.format(name=name, upname=name.upper(), args=args) + + +c_preamble = """#ifndef SCIPY_LINALG_{lib}_FORTRAN_WRAPPERS_H +#define SCIPY_LINALG_{lib}_FORTRAN_WRAPPERS_H +#include "fortran_defs.h" +#include "numpy/arrayobject.h" +""" + +lapack_decls = """ +typedef int (*_cselect1)(npy_complex64*); +typedef int (*_cselect2)(npy_complex64*, npy_complex64*); +typedef int (*_dselect2)(double*, double*); +typedef int (*_dselect3)(double*, double*, double*); +typedef int (*_sselect2)(float*, float*); +typedef int (*_sselect3)(float*, float*, float*); +typedef int (*_zselect1)(npy_complex128*); +typedef int (*_zselect2)(npy_complex128*, npy_complex128*); +""" + +cpp_guard = """ +#ifdef __cplusplus +extern "C" { +#endif + +""" + +c_end = """ +#ifdef __cplusplus +} +#endif +#endif +""" + + +def generate_c_header(func_sigs, sub_sigs, all_sigs, lib_name): + funcs = "".join(c_func_decl(*sig) for sig in func_sigs) + subs = "\n" + "".join(c_sub_decl(*sig) for sig in sub_sigs) + if lib_name == 'LAPACK': + preamble = (c_preamble.format(lib=lib_name) + lapack_decls) + else: + preamble = c_preamble.format(lib=lib_name) + return "".join([preamble, cpp_guard, funcs, subs, c_end]) + + +def split_signature(sig): + name_and_type, args = sig[:-1].split('(') + ret_type, name = name_and_type.split(' ') + return name, ret_type, args + + +def filter_lines(lines): + lines = [line for line in map(str.strip, lines) + if line and not line.startswith('#')] + func_sigs = [split_signature(line) for line in lines + if line.split(' ')[0] != 'void'] + sub_sigs = [split_signature(line) for line in lines + if line.split(' ')[0] == 'void'] + all_sigs = list(sorted(func_sigs + sub_sigs, key=itemgetter(0))) + return func_sigs, sub_sigs, all_sigs + + +def all_newer(src_files, dst_files): + from distutils.dep_util import newer + return all(os.path.exists(dst) and newer(dst, src) + for dst in dst_files for src in src_files) + + +def make_all(blas_signature_file="cython_blas_signatures.txt", + lapack_signature_file="cython_lapack_signatures.txt", + blas_name="cython_blas", + lapack_name="cython_lapack", + blas_fortran_name="_blas_subroutine_wrappers.f", + lapack_fortran_name="_lapack_subroutine_wrappers.f", + blas_header_name="_blas_subroutines.h", + lapack_header_name="_lapack_subroutines.h"): + + src_files = (os.path.abspath(__file__), + blas_signature_file, + lapack_signature_file) + dst_files = (blas_name + '.pyx', + blas_name + '.pxd', + blas_fortran_name, + blas_header_name, + lapack_name + '.pyx', + lapack_name + '.pxd', + lapack_fortran_name, + lapack_header_name) + + os.chdir(BASE_DIR) + + if all_newer(src_files, dst_files): + print("scipy/linalg/_generate_pyx.py: all files up-to-date") + return + + comments = ["This file was generated by _generate_pyx.py.\n", + "Do not edit this file directly.\n"] + ccomment = ''.join(['/* ' + line.rstrip() + ' */\n' + for line in comments]) + '\n' + pyxcomment = ''.join(['# ' + line for line in comments]) + '\n' + fcomment = ''.join(['c ' + line for line in comments]) + '\n' + with open(blas_signature_file, 'r') as f: + blas_sigs = f.readlines() + blas_sigs = filter_lines(blas_sigs) + blas_pyx = generate_blas_pyx(*(blas_sigs + (blas_header_name,))) + with open(blas_name + '.pyx', 'w') as f: + f.write(pyxcomment) + f.write(blas_pyx) + blas_pxd = generate_blas_pxd(blas_sigs[2]) + with open(blas_name + '.pxd', 'w') as f: + f.write(pyxcomment) + f.write(blas_pxd) + blas_fortran = generate_fortran(blas_sigs[0]) + with open(blas_fortran_name, 'w') as f: + f.write(fcomment) + f.write(blas_fortran) + blas_c_header = generate_c_header(*(blas_sigs + ('BLAS',))) + with open(blas_header_name, 'w') as f: + f.write(ccomment) + f.write(blas_c_header) + with open(lapack_signature_file, 'r') as f: + lapack_sigs = f.readlines() + lapack_sigs = filter_lines(lapack_sigs) + lapack_pyx = generate_lapack_pyx(*(lapack_sigs + (lapack_header_name,))) + with open(lapack_name + '.pyx', 'w') as f: + f.write(pyxcomment) + f.write(lapack_pyx) + lapack_pxd = generate_lapack_pxd(lapack_sigs[2]) + with open(lapack_name + '.pxd', 'w') as f: + f.write(pyxcomment) + f.write(lapack_pxd) + lapack_fortran = generate_fortran(lapack_sigs[0]) + with open(lapack_fortran_name, 'w') as f: + f.write(fcomment) + f.write(lapack_fortran) + lapack_c_header = generate_c_header(*(lapack_sigs + ('LAPACK',))) + with open(lapack_header_name, 'w') as f: + f.write(ccomment) + f.write(lapack_c_header) + + +if __name__ == '__main__': + make_all() diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/_generate_pyx.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/_generate_pyx.pyc new file mode 100644 index 0000000..eee6c7e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/_generate_pyx.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/_interpolative.so b/project/venv/lib/python2.7/site-packages/scipy/linalg/_interpolative.so new file mode 100755 index 0000000..428f9bb Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/_interpolative.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/_interpolative_backend.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/_interpolative_backend.py new file mode 100644 index 0000000..6b73ba4 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/_interpolative_backend.py @@ -0,0 +1,1669 @@ +#****************************************************************************** +# Copyright (C) 2013 Kenneth L. Ho +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. Redistributions in binary +# form must reproduce the above copyright notice, this list of conditions and +# the following disclaimer in the documentation and/or other materials +# provided with the distribution. +# +# None of the names of the copyright holders may be used to endorse or +# promote products derived from this software without specific prior written +# permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +#****************************************************************************** + +""" +Direct wrappers for Fortran `id_dist` backend. +""" + +import scipy.linalg._interpolative as _id +import numpy as np + +_RETCODE_ERROR = RuntimeError("nonzero return code") + + +#------------------------------------------------------------------------------ +# id_rand.f +#------------------------------------------------------------------------------ + +def id_srand(n): + """ + Generate standard uniform pseudorandom numbers via a very efficient lagged + Fibonacci method. + + :param n: + Number of pseudorandom numbers to generate. + :type n: int + + :return: + Pseudorandom numbers. + :rtype: :class:`numpy.ndarray` + """ + return _id.id_srand(n) + + +def id_srandi(t): + """ + Initialize seed values for :func:`id_srand` (any appropriately random + numbers will do). + + :param t: + Array of 55 seed values. + :type t: :class:`numpy.ndarray` + """ + t = np.asfortranarray(t) + _id.id_srandi(t) + + +def id_srando(): + """ + Reset seed values to their original values. + """ + _id.id_srando() + + +#------------------------------------------------------------------------------ +# idd_frm.f +#------------------------------------------------------------------------------ + +def idd_frm(n, w, x): + """ + Transform real vector via a composition of Rokhlin's random transform, + random subselection, and an FFT. + + In contrast to :func:`idd_sfrm`, this routine works best when the length of + the transformed vector is the power-of-two integer output by + :func:`idd_frmi`, or when the length is not specified but instead + determined a posteriori from the output. The returned transformed vector is + randomly permuted. + + :param n: + Greatest power-of-two integer satisfying `n <= x.size` as obtained from + :func:`idd_frmi`; `n` is also the length of the output vector. + :type n: int + :param w: + Initialization array constructed by :func:`idd_frmi`. + :type w: :class:`numpy.ndarray` + :param x: + Vector to be transformed. + :type x: :class:`numpy.ndarray` + + :return: + Transformed vector. + :rtype: :class:`numpy.ndarray` + """ + return _id.idd_frm(n, w, x) + + +def idd_sfrm(l, n, w, x): + """ + Transform real vector via a composition of Rokhlin's random transform, + random subselection, and an FFT. + + In contrast to :func:`idd_frm`, this routine works best when the length of + the transformed vector is known a priori. + + :param l: + Length of transformed vector, satisfying `l <= n`. + :type l: int + :param n: + Greatest power-of-two integer satisfying `n <= x.size` as obtained from + :func:`idd_sfrmi`. + :type n: int + :param w: + Initialization array constructed by :func:`idd_sfrmi`. + :type w: :class:`numpy.ndarray` + :param x: + Vector to be transformed. + :type x: :class:`numpy.ndarray` + + :return: + Transformed vector. + :rtype: :class:`numpy.ndarray` + """ + return _id.idd_sfrm(l, n, w, x) + + +def idd_frmi(m): + """ + Initialize data for :func:`idd_frm`. + + :param m: + Length of vector to be transformed. + :type m: int + + :return: + Greatest power-of-two integer `n` satisfying `n <= m`. + :rtype: int + :return: + Initialization array to be used by :func:`idd_frm`. + :rtype: :class:`numpy.ndarray` + """ + return _id.idd_frmi(m) + + +def idd_sfrmi(l, m): + """ + Initialize data for :func:`idd_sfrm`. + + :param l: + Length of output transformed vector. + :type l: int + :param m: + Length of the vector to be transformed. + :type m: int + + :return: + Greatest power-of-two integer `n` satisfying `n <= m`. + :rtype: int + :return: + Initialization array to be used by :func:`idd_sfrm`. + :rtype: :class:`numpy.ndarray` + """ + return _id.idd_sfrmi(l, m) + + +#------------------------------------------------------------------------------ +# idd_id.f +#------------------------------------------------------------------------------ + +def iddp_id(eps, A): + """ + Compute ID of a real matrix to a specified relative precision. + + :param eps: + Relative precision. + :type eps: float + :param A: + Matrix. + :type A: :class:`numpy.ndarray` + + :return: + Rank of ID. + :rtype: int + :return: + Column index array. + :rtype: :class:`numpy.ndarray` + :return: + Interpolation coefficients. + :rtype: :class:`numpy.ndarray` + """ + A = np.asfortranarray(A) + k, idx, rnorms = _id.iddp_id(eps, A) + n = A.shape[1] + proj = A.T.ravel()[:k*(n-k)].reshape((k, n-k), order='F') + return k, idx, proj + + +def iddr_id(A, k): + """ + Compute ID of a real matrix to a specified rank. + + :param A: + Matrix. + :type A: :class:`numpy.ndarray` + :param k: + Rank of ID. + :type k: int + + :return: + Column index array. + :rtype: :class:`numpy.ndarray` + :return: + Interpolation coefficients. + :rtype: :class:`numpy.ndarray` + """ + A = np.asfortranarray(A) + idx, rnorms = _id.iddr_id(A, k) + n = A.shape[1] + proj = A.T.ravel()[:k*(n-k)].reshape((k, n-k), order='F') + return idx, proj + + +def idd_reconid(B, idx, proj): + """ + Reconstruct matrix from real ID. + + :param B: + Skeleton matrix. + :type B: :class:`numpy.ndarray` + :param idx: + Column index array. + :type idx: :class:`numpy.ndarray` + :param proj: + Interpolation coefficients. + :type proj: :class:`numpy.ndarray` + + :return: + Reconstructed matrix. + :rtype: :class:`numpy.ndarray` + """ + B = np.asfortranarray(B) + if proj.size > 0: + return _id.idd_reconid(B, idx, proj) + else: + return B[:, np.argsort(idx)] + + +def idd_reconint(idx, proj): + """ + Reconstruct interpolation matrix from real ID. + + :param idx: + Column index array. + :type idx: :class:`numpy.ndarray` + :param proj: + Interpolation coefficients. + :type proj: :class:`numpy.ndarray` + + :return: + Interpolation matrix. + :rtype: :class:`numpy.ndarray` + """ + return _id.idd_reconint(idx, proj) + + +def idd_copycols(A, k, idx): + """ + Reconstruct skeleton matrix from real ID. + + :param A: + Original matrix. + :type A: :class:`numpy.ndarray` + :param k: + Rank of ID. + :type k: int + :param idx: + Column index array. + :type idx: :class:`numpy.ndarray` + + :return: + Skeleton matrix. + :rtype: :class:`numpy.ndarray` + """ + A = np.asfortranarray(A) + return _id.idd_copycols(A, k, idx) + + +#------------------------------------------------------------------------------ +# idd_id2svd.f +#------------------------------------------------------------------------------ + +def idd_id2svd(B, idx, proj): + """ + Convert real ID to SVD. + + :param B: + Skeleton matrix. + :type B: :class:`numpy.ndarray` + :param idx: + Column index array. + :type idx: :class:`numpy.ndarray` + :param proj: + Interpolation coefficients. + :type proj: :class:`numpy.ndarray` + + :return: + Left singular vectors. + :rtype: :class:`numpy.ndarray` + :return: + Right singular vectors. + :rtype: :class:`numpy.ndarray` + :return: + Singular values. + :rtype: :class:`numpy.ndarray` + """ + B = np.asfortranarray(B) + U, V, S, ier = _id.idd_id2svd(B, idx, proj) + if ier: + raise _RETCODE_ERROR + return U, V, S + + +#------------------------------------------------------------------------------ +# idd_snorm.f +#------------------------------------------------------------------------------ + +def idd_snorm(m, n, matvect, matvec, its=20): + """ + Estimate spectral norm of a real matrix by the randomized power method. + + :param m: + Matrix row dimension. + :type m: int + :param n: + Matrix column dimension. + :type n: int + :param matvect: + Function to apply the matrix transpose to a vector, with call signature + `y = matvect(x)`, where `x` and `y` are the input and output vectors, + respectively. + :type matvect: function + :param matvec: + Function to apply the matrix to a vector, with call signature + `y = matvec(x)`, where `x` and `y` are the input and output vectors, + respectively. + :type matvec: function + :param its: + Number of power method iterations. + :type its: int + + :return: + Spectral norm estimate. + :rtype: float + """ + snorm, v = _id.idd_snorm(m, n, matvect, matvec, its) + return snorm + + +def idd_diffsnorm(m, n, matvect, matvect2, matvec, matvec2, its=20): + """ + Estimate spectral norm of the difference of two real matrices by the + randomized power method. + + :param m: + Matrix row dimension. + :type m: int + :param n: + Matrix column dimension. + :type n: int + :param matvect: + Function to apply the transpose of the first matrix to a vector, with + call signature `y = matvect(x)`, where `x` and `y` are the input and + output vectors, respectively. + :type matvect: function + :param matvect2: + Function to apply the transpose of the second matrix to a vector, with + call signature `y = matvect2(x)`, where `x` and `y` are the input and + output vectors, respectively. + :type matvect2: function + :param matvec: + Function to apply the first matrix to a vector, with call signature + `y = matvec(x)`, where `x` and `y` are the input and output vectors, + respectively. + :type matvec: function + :param matvec2: + Function to apply the second matrix to a vector, with call signature + `y = matvec2(x)`, where `x` and `y` are the input and output vectors, + respectively. + :type matvec2: function + :param its: + Number of power method iterations. + :type its: int + + :return: + Spectral norm estimate of matrix difference. + :rtype: float + """ + return _id.idd_diffsnorm(m, n, matvect, matvect2, matvec, matvec2, its) + + +#------------------------------------------------------------------------------ +# idd_svd.f +#------------------------------------------------------------------------------ + +def iddr_svd(A, k): + """ + Compute SVD of a real matrix to a specified rank. + + :param A: + Matrix. + :type A: :class:`numpy.ndarray` + :param k: + Rank of SVD. + :type k: int + + :return: + Left singular vectors. + :rtype: :class:`numpy.ndarray` + :return: + Right singular vectors. + :rtype: :class:`numpy.ndarray` + :return: + Singular values. + :rtype: :class:`numpy.ndarray` + """ + A = np.asfortranarray(A) + U, V, S, ier = _id.iddr_svd(A, k) + if ier: + raise _RETCODE_ERROR + return U, V, S + + +def iddp_svd(eps, A): + """ + Compute SVD of a real matrix to a specified relative precision. + + :param eps: + Relative precision. + :type eps: float + :param A: + Matrix. + :type A: :class:`numpy.ndarray` + + :return: + Left singular vectors. + :rtype: :class:`numpy.ndarray` + :return: + Right singular vectors. + :rtype: :class:`numpy.ndarray` + :return: + Singular values. + :rtype: :class:`numpy.ndarray` + """ + A = np.asfortranarray(A) + m, n = A.shape + k, iU, iV, iS, w, ier = _id.iddp_svd(eps, A) + if ier: + raise _RETCODE_ERROR + U = w[iU-1:iU+m*k-1].reshape((m, k), order='F') + V = w[iV-1:iV+n*k-1].reshape((n, k), order='F') + S = w[iS-1:iS+k-1] + return U, V, S + + +#------------------------------------------------------------------------------ +# iddp_aid.f +#------------------------------------------------------------------------------ + +def iddp_aid(eps, A): + """ + Compute ID of a real matrix to a specified relative precision using random + sampling. + + :param eps: + Relative precision. + :type eps: float + :param A: + Matrix. + :type A: :class:`numpy.ndarray` + + :return: + Rank of ID. + :rtype: int + :return: + Column index array. + :rtype: :class:`numpy.ndarray` + :return: + Interpolation coefficients. + :rtype: :class:`numpy.ndarray` + """ + A = np.asfortranarray(A) + m, n = A.shape + n2, w = idd_frmi(m) + proj = np.empty(n*(2*n2 + 1) + n2 + 1, order='F') + k, idx, proj = _id.iddp_aid(eps, A, w, proj) + proj = proj[:k*(n-k)].reshape((k, n-k), order='F') + return k, idx, proj + + +def idd_estrank(eps, A): + """ + Estimate rank of a real matrix to a specified relative precision using + random sampling. + + The output rank is typically about 8 higher than the actual rank. + + :param eps: + Relative precision. + :type eps: float + :param A: + Matrix. + :type A: :class:`numpy.ndarray` + + :return: + Rank estimate. + :rtype: int + """ + A = np.asfortranarray(A) + m, n = A.shape + n2, w = idd_frmi(m) + ra = np.empty(n*n2 + (n + 1)*(n2 + 1), order='F') + k, ra = _id.idd_estrank(eps, A, w, ra) + return k + + +#------------------------------------------------------------------------------ +# iddp_asvd.f +#------------------------------------------------------------------------------ + +def iddp_asvd(eps, A): + """ + Compute SVD of a real matrix to a specified relative precision using random + sampling. + + :param eps: + Relative precision. + :type eps: float + :param A: + Matrix. + :type A: :class:`numpy.ndarray` + + :return: + Left singular vectors. + :rtype: :class:`numpy.ndarray` + :return: + Right singular vectors. + :rtype: :class:`numpy.ndarray` + :return: + Singular values. + :rtype: :class:`numpy.ndarray` + """ + A = np.asfortranarray(A) + m, n = A.shape + n2, winit = _id.idd_frmi(m) + w = np.empty( + max((min(m, n) + 1)*(3*m + 5*n + 1) + 25*min(m, n)**2, + (2*n + 1)*(n2 + 1)), + order='F') + k, iU, iV, iS, w, ier = _id.iddp_asvd(eps, A, winit, w) + if ier: + raise _RETCODE_ERROR + U = w[iU-1:iU+m*k-1].reshape((m, k), order='F') + V = w[iV-1:iV+n*k-1].reshape((n, k), order='F') + S = w[iS-1:iS+k-1] + return U, V, S + + +#------------------------------------------------------------------------------ +# iddp_rid.f +#------------------------------------------------------------------------------ + +def iddp_rid(eps, m, n, matvect): + """ + Compute ID of a real matrix to a specified relative precision using random + matrix-vector multiplication. + + :param eps: + Relative precision. + :type eps: float + :param m: + Matrix row dimension. + :type m: int + :param n: + Matrix column dimension. + :type n: int + :param matvect: + Function to apply the matrix transpose to a vector, with call signature + `y = matvect(x)`, where `x` and `y` are the input and output vectors, + respectively. + :type matvect: function + + :return: + Rank of ID. + :rtype: int + :return: + Column index array. + :rtype: :class:`numpy.ndarray` + :return: + Interpolation coefficients. + :rtype: :class:`numpy.ndarray` + """ + proj = np.empty(m + 1 + 2*n*(min(m, n) + 1), order='F') + k, idx, proj, ier = _id.iddp_rid(eps, m, n, matvect, proj) + if ier != 0: + raise _RETCODE_ERROR + proj = proj[:k*(n-k)].reshape((k, n-k), order='F') + return k, idx, proj + + +def idd_findrank(eps, m, n, matvect): + """ + Estimate rank of a real matrix to a specified relative precision using + random matrix-vector multiplication. + + :param eps: + Relative precision. + :type eps: float + :param m: + Matrix row dimension. + :type m: int + :param n: + Matrix column dimension. + :type n: int + :param matvect: + Function to apply the matrix transpose to a vector, with call signature + `y = matvect(x)`, where `x` and `y` are the input and output vectors, + respectively. + :type matvect: function + + :return: + Rank estimate. + :rtype: int + """ + k, ra, ier = _id.idd_findrank(eps, m, n, matvect) + if ier: + raise _RETCODE_ERROR + return k + + +#------------------------------------------------------------------------------ +# iddp_rsvd.f +#------------------------------------------------------------------------------ + +def iddp_rsvd(eps, m, n, matvect, matvec): + """ + Compute SVD of a real matrix to a specified relative precision using random + matrix-vector multiplication. + + :param eps: + Relative precision. + :type eps: float + :param m: + Matrix row dimension. + :type m: int + :param n: + Matrix column dimension. + :type n: int + :param matvect: + Function to apply the matrix transpose to a vector, with call signature + `y = matvect(x)`, where `x` and `y` are the input and output vectors, + respectively. + :type matvect: function + :param matvec: + Function to apply the matrix to a vector, with call signature + `y = matvec(x)`, where `x` and `y` are the input and output vectors, + respectively. + :type matvec: function + + :return: + Left singular vectors. + :rtype: :class:`numpy.ndarray` + :return: + Right singular vectors. + :rtype: :class:`numpy.ndarray` + :return: + Singular values. + :rtype: :class:`numpy.ndarray` + """ + k, iU, iV, iS, w, ier = _id.iddp_rsvd(eps, m, n, matvect, matvec) + if ier: + raise _RETCODE_ERROR + U = w[iU-1:iU+m*k-1].reshape((m, k), order='F') + V = w[iV-1:iV+n*k-1].reshape((n, k), order='F') + S = w[iS-1:iS+k-1] + return U, V, S + + +#------------------------------------------------------------------------------ +# iddr_aid.f +#------------------------------------------------------------------------------ + +def iddr_aid(A, k): + """ + Compute ID of a real matrix to a specified rank using random sampling. + + :param A: + Matrix. + :type A: :class:`numpy.ndarray` + :param k: + Rank of ID. + :type k: int + + :return: + Column index array. + :rtype: :class:`numpy.ndarray` + :return: + Interpolation coefficients. + :rtype: :class:`numpy.ndarray` + """ + A = np.asfortranarray(A) + m, n = A.shape + w = iddr_aidi(m, n, k) + idx, proj = _id.iddr_aid(A, k, w) + if k == n: + proj = np.empty((k, n-k), dtype='float64', order='F') + else: + proj = proj.reshape((k, n-k), order='F') + return idx, proj + + +def iddr_aidi(m, n, k): + """ + Initialize array for :func:`iddr_aid`. + + :param m: + Matrix row dimension. + :type m: int + :param n: + Matrix column dimension. + :type n: int + :param k: + Rank of ID. + :type k: int + + :return: + Initialization array to be used by :func:`iddr_aid`. + :rtype: :class:`numpy.ndarray` + """ + return _id.iddr_aidi(m, n, k) + + +#------------------------------------------------------------------------------ +# iddr_asvd.f +#------------------------------------------------------------------------------ + +def iddr_asvd(A, k): + """ + Compute SVD of a real matrix to a specified rank using random sampling. + + :param A: + Matrix. + :type A: :class:`numpy.ndarray` + :param k: + Rank of SVD. + :type k: int + + :return: + Left singular vectors. + :rtype: :class:`numpy.ndarray` + :return: + Right singular vectors. + :rtype: :class:`numpy.ndarray` + :return: + Singular values. + :rtype: :class:`numpy.ndarray` + """ + A = np.asfortranarray(A) + m, n = A.shape + w = np.empty((2*k + 28)*m + (6*k + 21)*n + 25*k**2 + 100, order='F') + w_ = iddr_aidi(m, n, k) + w[:w_.size] = w_ + U, V, S, ier = _id.iddr_asvd(A, k, w) + if ier != 0: + raise _RETCODE_ERROR + return U, V, S + + +#------------------------------------------------------------------------------ +# iddr_rid.f +#------------------------------------------------------------------------------ + +def iddr_rid(m, n, matvect, k): + """ + Compute ID of a real matrix to a specified rank using random matrix-vector + multiplication. + + :param m: + Matrix row dimension. + :type m: int + :param n: + Matrix column dimension. + :type n: int + :param matvect: + Function to apply the matrix transpose to a vector, with call signature + `y = matvect(x)`, where `x` and `y` are the input and output vectors, + respectively. + :type matvect: function + :param k: + Rank of ID. + :type k: int + + :return: + Column index array. + :rtype: :class:`numpy.ndarray` + :return: + Interpolation coefficients. + :rtype: :class:`numpy.ndarray` + """ + idx, proj = _id.iddr_rid(m, n, matvect, k) + proj = proj[:k*(n-k)].reshape((k, n-k), order='F') + return idx, proj + + +#------------------------------------------------------------------------------ +# iddr_rsvd.f +#------------------------------------------------------------------------------ + +def iddr_rsvd(m, n, matvect, matvec, k): + """ + Compute SVD of a real matrix to a specified rank using random matrix-vector + multiplication. + + :param m: + Matrix row dimension. + :type m: int + :param n: + Matrix column dimension. + :type n: int + :param matvect: + Function to apply the matrix transpose to a vector, with call signature + `y = matvect(x)`, where `x` and `y` are the input and output vectors, + respectively. + :type matvect: function + :param matvec: + Function to apply the matrix to a vector, with call signature + `y = matvec(x)`, where `x` and `y` are the input and output vectors, + respectively. + :type matvec: function + :param k: + Rank of SVD. + :type k: int + + :return: + Left singular vectors. + :rtype: :class:`numpy.ndarray` + :return: + Right singular vectors. + :rtype: :class:`numpy.ndarray` + :return: + Singular values. + :rtype: :class:`numpy.ndarray` + """ + U, V, S, ier = _id.iddr_rsvd(m, n, matvect, matvec, k) + if ier != 0: + raise _RETCODE_ERROR + return U, V, S + + +#------------------------------------------------------------------------------ +# idz_frm.f +#------------------------------------------------------------------------------ + +def idz_frm(n, w, x): + """ + Transform complex vector via a composition of Rokhlin's random transform, + random subselection, and an FFT. + + In contrast to :func:`idz_sfrm`, this routine works best when the length of + the transformed vector is the power-of-two integer output by + :func:`idz_frmi`, or when the length is not specified but instead + determined a posteriori from the output. The returned transformed vector is + randomly permuted. + + :param n: + Greatest power-of-two integer satisfying `n <= x.size` as obtained from + :func:`idz_frmi`; `n` is also the length of the output vector. + :type n: int + :param w: + Initialization array constructed by :func:`idz_frmi`. + :type w: :class:`numpy.ndarray` + :param x: + Vector to be transformed. + :type x: :class:`numpy.ndarray` + + :return: + Transformed vector. + :rtype: :class:`numpy.ndarray` + """ + return _id.idz_frm(n, w, x) + + +def idz_sfrm(l, n, w, x): + """ + Transform complex vector via a composition of Rokhlin's random transform, + random subselection, and an FFT. + + In contrast to :func:`idz_frm`, this routine works best when the length of + the transformed vector is known a priori. + + :param l: + Length of transformed vector, satisfying `l <= n`. + :type l: int + :param n: + Greatest power-of-two integer satisfying `n <= x.size` as obtained from + :func:`idz_sfrmi`. + :type n: int + :param w: + Initialization array constructed by :func:`idd_sfrmi`. + :type w: :class:`numpy.ndarray` + :param x: + Vector to be transformed. + :type x: :class:`numpy.ndarray` + + :return: + Transformed vector. + :rtype: :class:`numpy.ndarray` + """ + return _id.idz_sfrm(l, n, w, x) + + +def idz_frmi(m): + """ + Initialize data for :func:`idz_frm`. + + :param m: + Length of vector to be transformed. + :type m: int + + :return: + Greatest power-of-two integer `n` satisfying `n <= m`. + :rtype: int + :return: + Initialization array to be used by :func:`idz_frm`. + :rtype: :class:`numpy.ndarray` + """ + return _id.idz_frmi(m) + + +def idz_sfrmi(l, m): + """ + Initialize data for :func:`idz_sfrm`. + + :param l: + Length of output transformed vector. + :type l: int + :param m: + Length of the vector to be transformed. + :type m: int + + :return: + Greatest power-of-two integer `n` satisfying `n <= m`. + :rtype: int + :return: + Initialization array to be used by :func:`idz_sfrm`. + :rtype: :class:`numpy.ndarray` + """ + return _id.idz_sfrmi(l, m) + + +#------------------------------------------------------------------------------ +# idz_id.f +#------------------------------------------------------------------------------ + +def idzp_id(eps, A): + """ + Compute ID of a complex matrix to a specified relative precision. + + :param eps: + Relative precision. + :type eps: float + :param A: + Matrix. + :type A: :class:`numpy.ndarray` + + :return: + Rank of ID. + :rtype: int + :return: + Column index array. + :rtype: :class:`numpy.ndarray` + :return: + Interpolation coefficients. + :rtype: :class:`numpy.ndarray` + """ + A = np.asfortranarray(A) + k, idx, rnorms = _id.idzp_id(eps, A) + n = A.shape[1] + proj = A.T.ravel()[:k*(n-k)].reshape((k, n-k), order='F') + return k, idx, proj + + +def idzr_id(A, k): + """ + Compute ID of a complex matrix to a specified rank. + + :param A: + Matrix. + :type A: :class:`numpy.ndarray` + :param k: + Rank of ID. + :type k: int + + :return: + Column index array. + :rtype: :class:`numpy.ndarray` + :return: + Interpolation coefficients. + :rtype: :class:`numpy.ndarray` + """ + A = np.asfortranarray(A) + idx, rnorms = _id.idzr_id(A, k) + n = A.shape[1] + proj = A.T.ravel()[:k*(n-k)].reshape((k, n-k), order='F') + return idx, proj + + +def idz_reconid(B, idx, proj): + """ + Reconstruct matrix from complex ID. + + :param B: + Skeleton matrix. + :type B: :class:`numpy.ndarray` + :param idx: + Column index array. + :type idx: :class:`numpy.ndarray` + :param proj: + Interpolation coefficients. + :type proj: :class:`numpy.ndarray` + + :return: + Reconstructed matrix. + :rtype: :class:`numpy.ndarray` + """ + B = np.asfortranarray(B) + if proj.size > 0: + return _id.idz_reconid(B, idx, proj) + else: + return B[:, np.argsort(idx)] + + +def idz_reconint(idx, proj): + """ + Reconstruct interpolation matrix from complex ID. + + :param idx: + Column index array. + :type idx: :class:`numpy.ndarray` + :param proj: + Interpolation coefficients. + :type proj: :class:`numpy.ndarray` + + :return: + Interpolation matrix. + :rtype: :class:`numpy.ndarray` + """ + return _id.idz_reconint(idx, proj) + + +def idz_copycols(A, k, idx): + """ + Reconstruct skeleton matrix from complex ID. + + :param A: + Original matrix. + :type A: :class:`numpy.ndarray` + :param k: + Rank of ID. + :type k: int + :param idx: + Column index array. + :type idx: :class:`numpy.ndarray` + + :return: + Skeleton matrix. + :rtype: :class:`numpy.ndarray` + """ + A = np.asfortranarray(A) + return _id.idz_copycols(A, k, idx) + + +#------------------------------------------------------------------------------ +# idz_id2svd.f +#------------------------------------------------------------------------------ + +def idz_id2svd(B, idx, proj): + """ + Convert complex ID to SVD. + + :param B: + Skeleton matrix. + :type B: :class:`numpy.ndarray` + :param idx: + Column index array. + :type idx: :class:`numpy.ndarray` + :param proj: + Interpolation coefficients. + :type proj: :class:`numpy.ndarray` + + :return: + Left singular vectors. + :rtype: :class:`numpy.ndarray` + :return: + Right singular vectors. + :rtype: :class:`numpy.ndarray` + :return: + Singular values. + :rtype: :class:`numpy.ndarray` + """ + B = np.asfortranarray(B) + U, V, S, ier = _id.idz_id2svd(B, idx, proj) + if ier: + raise _RETCODE_ERROR + return U, V, S + + +#------------------------------------------------------------------------------ +# idz_snorm.f +#------------------------------------------------------------------------------ + +def idz_snorm(m, n, matveca, matvec, its=20): + """ + Estimate spectral norm of a complex matrix by the randomized power method. + + :param m: + Matrix row dimension. + :type m: int + :param n: + Matrix column dimension. + :type n: int + :param matveca: + Function to apply the matrix adjoint to a vector, with call signature + `y = matveca(x)`, where `x` and `y` are the input and output vectors, + respectively. + :type matveca: function + :param matvec: + Function to apply the matrix to a vector, with call signature + `y = matvec(x)`, where `x` and `y` are the input and output vectors, + respectively. + :type matvec: function + :param its: + Number of power method iterations. + :type its: int + + :return: + Spectral norm estimate. + :rtype: float + """ + snorm, v = _id.idz_snorm(m, n, matveca, matvec, its) + return snorm + + +def idz_diffsnorm(m, n, matveca, matveca2, matvec, matvec2, its=20): + """ + Estimate spectral norm of the difference of two complex matrices by the + randomized power method. + + :param m: + Matrix row dimension. + :type m: int + :param n: + Matrix column dimension. + :type n: int + :param matveca: + Function to apply the adjoint of the first matrix to a vector, with + call signature `y = matveca(x)`, where `x` and `y` are the input and + output vectors, respectively. + :type matveca: function + :param matveca2: + Function to apply the adjoint of the second matrix to a vector, with + call signature `y = matveca2(x)`, where `x` and `y` are the input and + output vectors, respectively. + :type matveca2: function + :param matvec: + Function to apply the first matrix to a vector, with call signature + `y = matvec(x)`, where `x` and `y` are the input and output vectors, + respectively. + :type matvec: function + :param matvec2: + Function to apply the second matrix to a vector, with call signature + `y = matvec2(x)`, where `x` and `y` are the input and output vectors, + respectively. + :type matvec2: function + :param its: + Number of power method iterations. + :type its: int + + :return: + Spectral norm estimate of matrix difference. + :rtype: float + """ + return _id.idz_diffsnorm(m, n, matveca, matveca2, matvec, matvec2, its) + + +#------------------------------------------------------------------------------ +# idz_svd.f +#------------------------------------------------------------------------------ + +def idzr_svd(A, k): + """ + Compute SVD of a complex matrix to a specified rank. + + :param A: + Matrix. + :type A: :class:`numpy.ndarray` + :param k: + Rank of SVD. + :type k: int + + :return: + Left singular vectors. + :rtype: :class:`numpy.ndarray` + :return: + Right singular vectors. + :rtype: :class:`numpy.ndarray` + :return: + Singular values. + :rtype: :class:`numpy.ndarray` + """ + A = np.asfortranarray(A) + U, V, S, ier = _id.idzr_svd(A, k) + if ier: + raise _RETCODE_ERROR + return U, V, S + + +def idzp_svd(eps, A): + """ + Compute SVD of a complex matrix to a specified relative precision. + + :param eps: + Relative precision. + :type eps: float + :param A: + Matrix. + :type A: :class:`numpy.ndarray` + + :return: + Left singular vectors. + :rtype: :class:`numpy.ndarray` + :return: + Right singular vectors. + :rtype: :class:`numpy.ndarray` + :return: + Singular values. + :rtype: :class:`numpy.ndarray` + """ + A = np.asfortranarray(A) + m, n = A.shape + k, iU, iV, iS, w, ier = _id.idzp_svd(eps, A) + if ier: + raise _RETCODE_ERROR + U = w[iU-1:iU+m*k-1].reshape((m, k), order='F') + V = w[iV-1:iV+n*k-1].reshape((n, k), order='F') + S = w[iS-1:iS+k-1] + return U, V, S + + +#------------------------------------------------------------------------------ +# idzp_aid.f +#------------------------------------------------------------------------------ + +def idzp_aid(eps, A): + """ + Compute ID of a complex matrix to a specified relative precision using + random sampling. + + :param eps: + Relative precision. + :type eps: float + :param A: + Matrix. + :type A: :class:`numpy.ndarray` + + :return: + Rank of ID. + :rtype: int + :return: + Column index array. + :rtype: :class:`numpy.ndarray` + :return: + Interpolation coefficients. + :rtype: :class:`numpy.ndarray` + """ + A = np.asfortranarray(A) + m, n = A.shape + n2, w = idz_frmi(m) + proj = np.empty(n*(2*n2 + 1) + n2 + 1, dtype='complex128', order='F') + k, idx, proj = _id.idzp_aid(eps, A, w, proj) + proj = proj[:k*(n-k)].reshape((k, n-k), order='F') + return k, idx, proj + + +def idz_estrank(eps, A): + """ + Estimate rank of a complex matrix to a specified relative precision using + random sampling. + + The output rank is typically about 8 higher than the actual rank. + + :param eps: + Relative precision. + :type eps: float + :param A: + Matrix. + :type A: :class:`numpy.ndarray` + + :return: + Rank estimate. + :rtype: int + """ + A = np.asfortranarray(A) + m, n = A.shape + n2, w = idz_frmi(m) + ra = np.empty(n*n2 + (n + 1)*(n2 + 1), dtype='complex128', order='F') + k, ra = _id.idz_estrank(eps, A, w, ra) + return k + + +#------------------------------------------------------------------------------ +# idzp_asvd.f +#------------------------------------------------------------------------------ + +def idzp_asvd(eps, A): + """ + Compute SVD of a complex matrix to a specified relative precision using + random sampling. + + :param eps: + Relative precision. + :type eps: float + :param A: + Matrix. + :type A: :class:`numpy.ndarray` + + :return: + Left singular vectors. + :rtype: :class:`numpy.ndarray` + :return: + Right singular vectors. + :rtype: :class:`numpy.ndarray` + :return: + Singular values. + :rtype: :class:`numpy.ndarray` + """ + A = np.asfortranarray(A) + m, n = A.shape + n2, winit = _id.idz_frmi(m) + w = np.empty( + max((min(m, n) + 1)*(3*m + 5*n + 11) + 8*min(m, n)**2, + (2*n + 1)*(n2 + 1)), + dtype=np.complex128, order='F') + k, iU, iV, iS, w, ier = _id.idzp_asvd(eps, A, winit, w) + if ier: + raise _RETCODE_ERROR + U = w[iU-1:iU+m*k-1].reshape((m, k), order='F') + V = w[iV-1:iV+n*k-1].reshape((n, k), order='F') + S = w[iS-1:iS+k-1] + return U, V, S + + +#------------------------------------------------------------------------------ +# idzp_rid.f +#------------------------------------------------------------------------------ + +def idzp_rid(eps, m, n, matveca): + """ + Compute ID of a complex matrix to a specified relative precision using + random matrix-vector multiplication. + + :param eps: + Relative precision. + :type eps: float + :param m: + Matrix row dimension. + :type m: int + :param n: + Matrix column dimension. + :type n: int + :param matveca: + Function to apply the matrix adjoint to a vector, with call signature + `y = matveca(x)`, where `x` and `y` are the input and output vectors, + respectively. + :type matveca: function + + :return: + Rank of ID. + :rtype: int + :return: + Column index array. + :rtype: :class:`numpy.ndarray` + :return: + Interpolation coefficients. + :rtype: :class:`numpy.ndarray` + """ + proj = np.empty( + m + 1 + 2*n*(min(m, n) + 1), + dtype=np.complex128, order='F') + k, idx, proj, ier = _id.idzp_rid(eps, m, n, matveca, proj) + if ier: + raise _RETCODE_ERROR + proj = proj[:k*(n-k)].reshape((k, n-k), order='F') + return k, idx, proj + + +def idz_findrank(eps, m, n, matveca): + """ + Estimate rank of a complex matrix to a specified relative precision using + random matrix-vector multiplication. + + :param eps: + Relative precision. + :type eps: float + :param m: + Matrix row dimension. + :type m: int + :param n: + Matrix column dimension. + :type n: int + :param matveca: + Function to apply the matrix adjoint to a vector, with call signature + `y = matveca(x)`, where `x` and `y` are the input and output vectors, + respectively. + :type matveca: function + + :return: + Rank estimate. + :rtype: int + """ + k, ra, ier = _id.idz_findrank(eps, m, n, matveca) + if ier: + raise _RETCODE_ERROR + return k + + +#------------------------------------------------------------------------------ +# idzp_rsvd.f +#------------------------------------------------------------------------------ + +def idzp_rsvd(eps, m, n, matveca, matvec): + """ + Compute SVD of a complex matrix to a specified relative precision using + random matrix-vector multiplication. + + :param eps: + Relative precision. + :type eps: float + :param m: + Matrix row dimension. + :type m: int + :param n: + Matrix column dimension. + :type n: int + :param matveca: + Function to apply the matrix adjoint to a vector, with call signature + `y = matveca(x)`, where `x` and `y` are the input and output vectors, + respectively. + :type matveca: function + :param matvec: + Function to apply the matrix to a vector, with call signature + `y = matvec(x)`, where `x` and `y` are the input and output vectors, + respectively. + :type matvec: function + + :return: + Left singular vectors. + :rtype: :class:`numpy.ndarray` + :return: + Right singular vectors. + :rtype: :class:`numpy.ndarray` + :return: + Singular values. + :rtype: :class:`numpy.ndarray` + """ + k, iU, iV, iS, w, ier = _id.idzp_rsvd(eps, m, n, matveca, matvec) + if ier: + raise _RETCODE_ERROR + U = w[iU-1:iU+m*k-1].reshape((m, k), order='F') + V = w[iV-1:iV+n*k-1].reshape((n, k), order='F') + S = w[iS-1:iS+k-1] + return U, V, S + + +#------------------------------------------------------------------------------ +# idzr_aid.f +#------------------------------------------------------------------------------ + +def idzr_aid(A, k): + """ + Compute ID of a complex matrix to a specified rank using random sampling. + + :param A: + Matrix. + :type A: :class:`numpy.ndarray` + :param k: + Rank of ID. + :type k: int + + :return: + Column index array. + :rtype: :class:`numpy.ndarray` + :return: + Interpolation coefficients. + :rtype: :class:`numpy.ndarray` + """ + A = np.asfortranarray(A) + m, n = A.shape + w = idzr_aidi(m, n, k) + idx, proj = _id.idzr_aid(A, k, w) + if k == n: + proj = np.empty((k, n-k), dtype='complex128', order='F') + else: + proj = proj.reshape((k, n-k), order='F') + return idx, proj + + +def idzr_aidi(m, n, k): + """ + Initialize array for :func:`idzr_aid`. + + :param m: + Matrix row dimension. + :type m: int + :param n: + Matrix column dimension. + :type n: int + :param k: + Rank of ID. + :type k: int + + :return: + Initialization array to be used by :func:`idzr_aid`. + :rtype: :class:`numpy.ndarray` + """ + return _id.idzr_aidi(m, n, k) + + +#------------------------------------------------------------------------------ +# idzr_asvd.f +#------------------------------------------------------------------------------ + +def idzr_asvd(A, k): + """ + Compute SVD of a complex matrix to a specified rank using random sampling. + + :param A: + Matrix. + :type A: :class:`numpy.ndarray` + :param k: + Rank of SVD. + :type k: int + + :return: + Left singular vectors. + :rtype: :class:`numpy.ndarray` + :return: + Right singular vectors. + :rtype: :class:`numpy.ndarray` + :return: + Singular values. + :rtype: :class:`numpy.ndarray` + """ + A = np.asfortranarray(A) + m, n = A.shape + w = np.empty( + (2*k + 22)*m + (6*k + 21)*n + 8*k**2 + 10*k + 90, + dtype='complex128', order='F') + w_ = idzr_aidi(m, n, k) + w[:w_.size] = w_ + U, V, S, ier = _id.idzr_asvd(A, k, w) + if ier: + raise _RETCODE_ERROR + return U, V, S + + +#------------------------------------------------------------------------------ +# idzr_rid.f +#------------------------------------------------------------------------------ + +def idzr_rid(m, n, matveca, k): + """ + Compute ID of a complex matrix to a specified rank using random + matrix-vector multiplication. + + :param m: + Matrix row dimension. + :type m: int + :param n: + Matrix column dimension. + :type n: int + :param matveca: + Function to apply the matrix adjoint to a vector, with call signature + `y = matveca(x)`, where `x` and `y` are the input and output vectors, + respectively. + :type matveca: function + :param k: + Rank of ID. + :type k: int + + :return: + Column index array. + :rtype: :class:`numpy.ndarray` + :return: + Interpolation coefficients. + :rtype: :class:`numpy.ndarray` + """ + idx, proj = _id.idzr_rid(m, n, matveca, k) + proj = proj[:k*(n-k)].reshape((k, n-k), order='F') + return idx, proj + + +#------------------------------------------------------------------------------ +# idzr_rsvd.f +#------------------------------------------------------------------------------ + +def idzr_rsvd(m, n, matveca, matvec, k): + """ + Compute SVD of a complex matrix to a specified rank using random + matrix-vector multiplication. + + :param m: + Matrix row dimension. + :type m: int + :param n: + Matrix column dimension. + :type n: int + :param matveca: + Function to apply the matrix adjoint to a vector, with call signature + `y = matveca(x)`, where `x` and `y` are the input and output vectors, + respectively. + :type matveca: function + :param matvec: + Function to apply the matrix to a vector, with call signature + `y = matvec(x)`, where `x` and `y` are the input and output vectors, + respectively. + :type matvec: function + :param k: + Rank of SVD. + :type k: int + + :return: + Left singular vectors. + :rtype: :class:`numpy.ndarray` + :return: + Right singular vectors. + :rtype: :class:`numpy.ndarray` + :return: + Singular values. + :rtype: :class:`numpy.ndarray` + """ + U, V, S, ier = _id.idzr_rsvd(m, n, matveca, matvec, k) + if ier: + raise _RETCODE_ERROR + return U, V, S diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/_interpolative_backend.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/_interpolative_backend.pyc new file mode 100644 index 0000000..1c99b71 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/_interpolative_backend.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/_matfuncs_inv_ssq.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/_matfuncs_inv_ssq.py new file mode 100644 index 0000000..3c61b10 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/_matfuncs_inv_ssq.py @@ -0,0 +1,888 @@ +""" +Matrix functions that use Pade approximation with inverse scaling and squaring. + +""" +from __future__ import division, print_function, absolute_import + +import warnings + +import numpy as np + +from scipy.linalg._matfuncs_sqrtm import SqrtmError, _sqrtm_triu +from scipy.linalg.decomp_schur import schur, rsf2csf +from scipy.linalg.matfuncs import funm +from scipy.linalg import svdvals, solve_triangular +from scipy.sparse.linalg.interface import LinearOperator +from scipy.sparse.linalg import onenormest +import scipy.special + + +class LogmRankWarning(UserWarning): + pass + + +class LogmExactlySingularWarning(LogmRankWarning): + pass + + +class LogmNearlySingularWarning(LogmRankWarning): + pass + + +class LogmError(np.linalg.LinAlgError): + pass + + +class FractionalMatrixPowerError(np.linalg.LinAlgError): + pass + + +#TODO renovate or move this class when scipy operators are more mature +class _MatrixM1PowerOperator(LinearOperator): + """ + A representation of the linear operator (A - I)^p. + """ + + def __init__(self, A, p): + if A.ndim != 2 or A.shape[0] != A.shape[1]: + raise ValueError('expected A to be like a square matrix') + if p < 0 or p != int(p): + raise ValueError('expected p to be a non-negative integer') + self._A = A + self._p = p + self.ndim = A.ndim + self.shape = A.shape + + def _matvec(self, x): + for i in range(self._p): + x = self._A.dot(x) - x + return x + + def _rmatvec(self, x): + for i in range(self._p): + x = x.dot(self._A) - x + return x + + def _matmat(self, X): + for i in range(self._p): + X = self._A.dot(X) - X + return X + + def _adjoint(self): + return _MatrixM1PowerOperator(self._A.T, self._p) + + +#TODO renovate or move this function when scipy operators are more mature +def _onenormest_m1_power(A, p, + t=2, itmax=5, compute_v=False, compute_w=False): + """ + Efficiently estimate the 1-norm of (A - I)^p. + + Parameters + ---------- + A : ndarray + Matrix whose 1-norm of a power is to be computed. + p : int + Non-negative integer power. + t : int, optional + A positive parameter controlling the tradeoff between + accuracy versus time and memory usage. + Larger values take longer and use more memory + but give more accurate output. + itmax : int, optional + Use at most this many iterations. + compute_v : bool, optional + Request a norm-maximizing linear operator input vector if True. + compute_w : bool, optional + Request a norm-maximizing linear operator output vector if True. + + Returns + ------- + est : float + An underestimate of the 1-norm of the sparse matrix. + v : ndarray, optional + The vector such that ||Av||_1 == est*||v||_1. + It can be thought of as an input to the linear operator + that gives an output with particularly large norm. + w : ndarray, optional + The vector Av which has relatively large 1-norm. + It can be thought of as an output of the linear operator + that is relatively large in norm compared to the input. + + """ + return onenormest(_MatrixM1PowerOperator(A, p), + t=t, itmax=itmax, compute_v=compute_v, compute_w=compute_w) + + +def _unwindk(z): + """ + Compute the scalar unwinding number. + + Uses Eq. (5.3) in [1]_, and should be equal to (z - log(exp(z)) / (2 pi i). + Note that this definition differs in sign from the original definition + in equations (5, 6) in [2]_. The sign convention is justified in [3]_. + + Parameters + ---------- + z : complex + A complex number. + + Returns + ------- + unwinding_number : integer + The scalar unwinding number of z. + + References + ---------- + .. [1] Nicholas J. Higham and Lijing lin (2011) + "A Schur-Pade Algorithm for Fractional Powers of a Matrix." + SIAM Journal on Matrix Analysis and Applications, + 32 (3). pp. 1056-1078. ISSN 0895-4798 + + .. [2] Robert M. Corless and David J. Jeffrey, + "The unwinding number." Newsletter ACM SIGSAM Bulletin + Volume 30, Issue 2, June 1996, Pages 28-35. + + .. [3] Russell Bradford and Robert M. Corless and James H. Davenport and + David J. Jeffrey and Stephen M. Watt, + "Reasoning about the elementary functions of complex analysis" + Annals of Mathematics and Artificial Intelligence, + 36: 303-318, 2002. + + """ + return int(np.ceil((z.imag - np.pi) / (2*np.pi))) + + +def _briggs_helper_function(a, k): + """ + Computes r = a^(1 / (2^k)) - 1. + + This is algorithm (2) of [1]_. + The purpose is to avoid a danger of subtractive cancellation. + For more computational efficiency it should probably be cythonized. + + Parameters + ---------- + a : complex + A complex number. + k : integer + A nonnegative integer. + + Returns + ------- + r : complex + The value r = a^(1 / (2^k)) - 1 computed with less cancellation. + + Notes + ----- + The algorithm as formulated in the reference does not handle k=0 or k=1 + correctly, so these are special-cased in this implementation. + This function is intended to not allow `a` to belong to the closed + negative real axis, but this constraint is relaxed. + + References + ---------- + .. [1] Awad H. Al-Mohy (2012) + "A more accurate Briggs method for the logarithm", + Numerical Algorithms, 59 : 393--402. + + """ + if k < 0 or int(k) != k: + raise ValueError('expected a nonnegative integer k') + if k == 0: + return a - 1 + elif k == 1: + return np.sqrt(a) - 1 + else: + k_hat = k + if np.angle(a) >= np.pi / 2: + a = np.sqrt(a) + k_hat = k - 1 + z0 = a - 1 + a = np.sqrt(a) + r = 1 + a + for j in range(1, k_hat): + a = np.sqrt(a) + r = r * (1 + a) + r = z0 / r + return r + + +def _fractional_power_superdiag_entry(l1, l2, t12, p): + """ + Compute a superdiagonal entry of a fractional matrix power. + + This is Eq. (5.6) in [1]_. + + Parameters + ---------- + l1 : complex + A diagonal entry of the matrix. + l2 : complex + A diagonal entry of the matrix. + t12 : complex + A superdiagonal entry of the matrix. + p : float + A fractional power. + + Returns + ------- + f12 : complex + A superdiagonal entry of the fractional matrix power. + + Notes + ----- + Care has been taken to return a real number if possible when + all of the inputs are real numbers. + + References + ---------- + .. [1] Nicholas J. Higham and Lijing lin (2011) + "A Schur-Pade Algorithm for Fractional Powers of a Matrix." + SIAM Journal on Matrix Analysis and Applications, + 32 (3). pp. 1056-1078. ISSN 0895-4798 + + """ + if l1 == l2: + f12 = t12 * p * l1**(p-1) + elif abs(l2 - l1) > abs(l1 + l2) / 2: + f12 = t12 * ((l2**p) - (l1**p)) / (l2 - l1) + else: + # This is Eq. (5.5) in [1]. + z = (l2 - l1) / (l2 + l1) + log_l1 = np.log(l1) + log_l2 = np.log(l2) + arctanh_z = np.arctanh(z) + tmp_a = t12 * np.exp((p/2)*(log_l2 + log_l1)) + tmp_u = _unwindk(log_l2 - log_l1) + if tmp_u: + tmp_b = p * (arctanh_z + np.pi * 1j * tmp_u) + else: + tmp_b = p * arctanh_z + tmp_c = 2 * np.sinh(tmp_b) / (l2 - l1) + f12 = tmp_a * tmp_c + return f12 + + +def _logm_superdiag_entry(l1, l2, t12): + """ + Compute a superdiagonal entry of a matrix logarithm. + + This is like Eq. (11.28) in [1]_, except the determination of whether + l1 and l2 are sufficiently far apart has been modified. + + Parameters + ---------- + l1 : complex + A diagonal entry of the matrix. + l2 : complex + A diagonal entry of the matrix. + t12 : complex + A superdiagonal entry of the matrix. + + Returns + ------- + f12 : complex + A superdiagonal entry of the matrix logarithm. + + Notes + ----- + Care has been taken to return a real number if possible when + all of the inputs are real numbers. + + References + ---------- + .. [1] Nicholas J. Higham (2008) + "Functions of Matrices: Theory and Computation" + ISBN 978-0-898716-46-7 + + """ + if l1 == l2: + f12 = t12 / l1 + elif abs(l2 - l1) > abs(l1 + l2) / 2: + f12 = t12 * (np.log(l2) - np.log(l1)) / (l2 - l1) + else: + z = (l2 - l1) / (l2 + l1) + u = _unwindk(np.log(l2) - np.log(l1)) + if u: + f12 = t12 * 2 * (np.arctanh(z) + np.pi*1j*u) / (l2 - l1) + else: + f12 = t12 * 2 * np.arctanh(z) / (l2 - l1) + return f12 + + +def _inverse_squaring_helper(T0, theta): + """ + A helper function for inverse scaling and squaring for Pade approximation. + + Parameters + ---------- + T0 : (N, N) array_like upper triangular + Matrix involved in inverse scaling and squaring. + theta : indexable + The values theta[1] .. theta[7] must be available. + They represent bounds related to Pade approximation, and they depend + on the matrix function which is being computed. + For example, different values of theta are required for + matrix logarithm than for fractional matrix power. + + Returns + ------- + R : (N, N) array_like upper triangular + Composition of zero or more matrix square roots of T0, minus I. + s : non-negative integer + Number of square roots taken. + m : positive integer + The degree of the Pade approximation. + + Notes + ----- + This subroutine appears as a chunk of lines within + a couple of published algorithms; for example it appears + as lines 4--35 in algorithm (3.1) of [1]_, and + as lines 3--34 in algorithm (4.1) of [2]_. + The instances of 'goto line 38' in algorithm (3.1) of [1]_ + probably mean 'goto line 36' and have been intepreted accordingly. + + References + ---------- + .. [1] Nicholas J. Higham and Lijing Lin (2013) + "An Improved Schur-Pade Algorithm for Fractional Powers + of a Matrix and their Frechet Derivatives." + + .. [2] Awad H. Al-Mohy and Nicholas J. Higham (2012) + "Improved Inverse Scaling and Squaring Algorithms + for the Matrix Logarithm." + SIAM Journal on Scientific Computing, 34 (4). C152-C169. + ISSN 1095-7197 + + """ + if len(T0.shape) != 2 or T0.shape[0] != T0.shape[1]: + raise ValueError('expected an upper triangular square matrix') + n, n = T0.shape + T = T0 + + # Find s0, the smallest s such that the spectral radius + # of a certain diagonal matrix is at most theta[7]. + # Note that because theta[7] < 1, + # this search will not terminate if any diagonal entry of T is zero. + s0 = 0 + tmp_diag = np.diag(T) + if np.count_nonzero(tmp_diag) != n: + raise Exception('internal inconsistency') + while np.max(np.absolute(tmp_diag - 1)) > theta[7]: + tmp_diag = np.sqrt(tmp_diag) + s0 += 1 + + # Take matrix square roots of T. + for i in range(s0): + T = _sqrtm_triu(T) + + # Flow control in this section is a little odd. + # This is because I am translating algorithm descriptions + # which have GOTOs in the publication. + s = s0 + k = 0 + d2 = _onenormest_m1_power(T, 2) ** (1/2) + d3 = _onenormest_m1_power(T, 3) ** (1/3) + a2 = max(d2, d3) + m = None + for i in (1, 2): + if a2 <= theta[i]: + m = i + break + while m is None: + if s > s0: + d3 = _onenormest_m1_power(T, 3) ** (1/3) + d4 = _onenormest_m1_power(T, 4) ** (1/4) + a3 = max(d3, d4) + if a3 <= theta[7]: + j1 = min(i for i in (3, 4, 5, 6, 7) if a3 <= theta[i]) + if j1 <= 6: + m = j1 + break + elif a3 / 2 <= theta[5] and k < 2: + k += 1 + T = _sqrtm_triu(T) + s += 1 + continue + d5 = _onenormest_m1_power(T, 5) ** (1/5) + a4 = max(d4, d5) + eta = min(a3, a4) + for i in (6, 7): + if eta <= theta[i]: + m = i + break + if m is not None: + break + T = _sqrtm_triu(T) + s += 1 + + # The subtraction of the identity is redundant here, + # because the diagonal will be replaced for improved numerical accuracy, + # but this formulation should help clarify the meaning of R. + R = T - np.identity(n) + + # Replace the diagonal and first superdiagonal of T0^(1/(2^s)) - I + # using formulas that have less subtractive cancellation. + # Skip this step if the principal branch + # does not exist at T0; this happens when a diagonal entry of T0 + # is negative with imaginary part 0. + has_principal_branch = all(x.real > 0 or x.imag != 0 for x in np.diag(T0)) + if has_principal_branch: + for j in range(n): + a = T0[j, j] + r = _briggs_helper_function(a, s) + R[j, j] = r + p = np.exp2(-s) + for j in range(n-1): + l1 = T0[j, j] + l2 = T0[j+1, j+1] + t12 = T0[j, j+1] + f12 = _fractional_power_superdiag_entry(l1, l2, t12, p) + R[j, j+1] = f12 + + # Return the T-I matrix, the number of square roots, and the Pade degree. + if not np.array_equal(R, np.triu(R)): + raise Exception('internal inconsistency') + return R, s, m + + +def _fractional_power_pade_constant(i, t): + # A helper function for matrix fractional power. + if i < 1: + raise ValueError('expected a positive integer i') + if not (-1 < t < 1): + raise ValueError('expected -1 < t < 1') + if i == 1: + return -t + elif i % 2 == 0: + j = i // 2 + return (-j + t) / (2 * (2*j - 1)) + elif i % 2 == 1: + j = (i - 1) // 2 + return (-j - t) / (2 * (2*j + 1)) + else: + raise Exception('internal error') + + +def _fractional_power_pade(R, t, m): + """ + Evaluate the Pade approximation of a fractional matrix power. + + Evaluate the degree-m Pade approximation of R + to the fractional matrix power t using the continued fraction + in bottom-up fashion using algorithm (4.1) in [1]_. + + Parameters + ---------- + R : (N, N) array_like + Upper triangular matrix whose fractional power to evaluate. + t : float + Fractional power between -1 and 1 exclusive. + m : positive integer + Degree of Pade approximation. + + Returns + ------- + U : (N, N) array_like + The degree-m Pade approximation of R to the fractional power t. + This matrix will be upper triangular. + + References + ---------- + .. [1] Nicholas J. Higham and Lijing lin (2011) + "A Schur-Pade Algorithm for Fractional Powers of a Matrix." + SIAM Journal on Matrix Analysis and Applications, + 32 (3). pp. 1056-1078. ISSN 0895-4798 + + """ + if m < 1 or int(m) != m: + raise ValueError('expected a positive integer m') + if not (-1 < t < 1): + raise ValueError('expected -1 < t < 1') + R = np.asarray(R) + if len(R.shape) != 2 or R.shape[0] != R.shape[1]: + raise ValueError('expected an upper triangular square matrix') + n, n = R.shape + ident = np.identity(n) + Y = R * _fractional_power_pade_constant(2*m, t) + for j in range(2*m - 1, 0, -1): + rhs = R * _fractional_power_pade_constant(j, t) + Y = solve_triangular(ident + Y, rhs) + U = ident + Y + if not np.array_equal(U, np.triu(U)): + raise Exception('internal inconsistency') + return U + + +def _remainder_matrix_power_triu(T, t): + """ + Compute a fractional power of an upper triangular matrix. + + The fractional power is restricted to fractions -1 < t < 1. + This uses algorithm (3.1) of [1]_. + The Pade approximation itself uses algorithm (4.1) of [2]_. + + Parameters + ---------- + T : (N, N) array_like + Upper triangular matrix whose fractional power to evaluate. + t : float + Fractional power between -1 and 1 exclusive. + + Returns + ------- + X : (N, N) array_like + The fractional power of the matrix. + + References + ---------- + .. [1] Nicholas J. Higham and Lijing Lin (2013) + "An Improved Schur-Pade Algorithm for Fractional Powers + of a Matrix and their Frechet Derivatives." + + .. [2] Nicholas J. Higham and Lijing lin (2011) + "A Schur-Pade Algorithm for Fractional Powers of a Matrix." + SIAM Journal on Matrix Analysis and Applications, + 32 (3). pp. 1056-1078. ISSN 0895-4798 + + """ + m_to_theta = { + 1: 1.51e-5, + 2: 2.24e-3, + 3: 1.88e-2, + 4: 6.04e-2, + 5: 1.24e-1, + 6: 2.00e-1, + 7: 2.79e-1, + } + n, n = T.shape + T0 = T + T0_diag = np.diag(T0) + if np.array_equal(T0, np.diag(T0_diag)): + U = np.diag(T0_diag ** t) + else: + R, s, m = _inverse_squaring_helper(T0, m_to_theta) + + # Evaluate the Pade approximation. + # Note that this function expects the negative of the matrix + # returned by the inverse squaring helper. + U = _fractional_power_pade(-R, t, m) + + # Undo the inverse scaling and squaring. + # Be less clever about this + # if the principal branch does not exist at T0; + # this happens when a diagonal entry of T0 + # is negative with imaginary part 0. + eivals = np.diag(T0) + has_principal_branch = all(x.real > 0 or x.imag != 0 for x in eivals) + for i in range(s, -1, -1): + if i < s: + U = U.dot(U) + else: + if has_principal_branch: + p = t * np.exp2(-i) + U[np.diag_indices(n)] = T0_diag ** p + for j in range(n-1): + l1 = T0[j, j] + l2 = T0[j+1, j+1] + t12 = T0[j, j+1] + f12 = _fractional_power_superdiag_entry(l1, l2, t12, p) + U[j, j+1] = f12 + if not np.array_equal(U, np.triu(U)): + raise Exception('internal inconsistency') + return U + + +def _remainder_matrix_power(A, t): + """ + Compute the fractional power of a matrix, for fractions -1 < t < 1. + + This uses algorithm (3.1) of [1]_. + The Pade approximation itself uses algorithm (4.1) of [2]_. + + Parameters + ---------- + A : (N, N) array_like + Matrix whose fractional power to evaluate. + t : float + Fractional power between -1 and 1 exclusive. + + Returns + ------- + X : (N, N) array_like + The fractional power of the matrix. + + References + ---------- + .. [1] Nicholas J. Higham and Lijing Lin (2013) + "An Improved Schur-Pade Algorithm for Fractional Powers + of a Matrix and their Frechet Derivatives." + + .. [2] Nicholas J. Higham and Lijing lin (2011) + "A Schur-Pade Algorithm for Fractional Powers of a Matrix." + SIAM Journal on Matrix Analysis and Applications, + 32 (3). pp. 1056-1078. ISSN 0895-4798 + + """ + # This code block is copied from numpy.matrix_power(). + A = np.asarray(A) + if len(A.shape) != 2 or A.shape[0] != A.shape[1]: + raise ValueError('input must be a square array') + + # Get the number of rows and columns. + n, n = A.shape + + # Triangularize the matrix if necessary, + # attempting to preserve dtype if possible. + if np.array_equal(A, np.triu(A)): + Z = None + T = A + else: + if np.isrealobj(A): + T, Z = schur(A) + if not np.array_equal(T, np.triu(T)): + T, Z = rsf2csf(T, Z) + else: + T, Z = schur(A, output='complex') + + # Zeros on the diagonal of the triangular matrix are forbidden, + # because the inverse scaling and squaring cannot deal with it. + T_diag = np.diag(T) + if np.count_nonzero(T_diag) != n: + raise FractionalMatrixPowerError( + 'cannot use inverse scaling and squaring to find ' + 'the fractional matrix power of a singular matrix') + + # If the triangular matrix is real and has a negative + # entry on the diagonal, then force the matrix to be complex. + if np.isrealobj(T) and np.min(T_diag) < 0: + T = T.astype(complex) + + # Get the fractional power of the triangular matrix, + # and de-triangularize it if necessary. + U = _remainder_matrix_power_triu(T, t) + if Z is not None: + ZH = np.conjugate(Z).T + return Z.dot(U).dot(ZH) + else: + return U + + +def _fractional_matrix_power(A, p): + """ + Compute the fractional power of a matrix. + + See the fractional_matrix_power docstring in matfuncs.py for more info. + + """ + A = np.asarray(A) + if len(A.shape) != 2 or A.shape[0] != A.shape[1]: + raise ValueError('expected a square matrix') + if p == int(p): + return np.linalg.matrix_power(A, int(p)) + # Compute singular values. + s = svdvals(A) + # Inverse scaling and squaring cannot deal with a singular matrix, + # because the process of repeatedly taking square roots + # would not converge to the identity matrix. + if s[-1]: + # Compute the condition number relative to matrix inversion, + # and use this to decide between floor(p) and ceil(p). + k2 = s[0] / s[-1] + p1 = p - np.floor(p) + p2 = p - np.ceil(p) + if p1 * k2 ** (1 - p1) <= -p2 * k2: + a = int(np.floor(p)) + b = p1 + else: + a = int(np.ceil(p)) + b = p2 + try: + R = _remainder_matrix_power(A, b) + Q = np.linalg.matrix_power(A, a) + return Q.dot(R) + except np.linalg.LinAlgError: + pass + # If p is negative then we are going to give up. + # If p is non-negative then we can fall back to generic funm. + if p < 0: + X = np.empty_like(A) + X.fill(np.nan) + return X + else: + p1 = p - np.floor(p) + a = int(np.floor(p)) + b = p1 + R, info = funm(A, lambda x: pow(x, b), disp=False) + Q = np.linalg.matrix_power(A, a) + return Q.dot(R) + + +def _logm_triu(T): + """ + Compute matrix logarithm of an upper triangular matrix. + + The matrix logarithm is the inverse of + expm: expm(logm(`T`)) == `T` + + Parameters + ---------- + T : (N, N) array_like + Upper triangular matrix whose logarithm to evaluate + + Returns + ------- + logm : (N, N) ndarray + Matrix logarithm of `T` + + References + ---------- + .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2012) + "Improved Inverse Scaling and Squaring Algorithms + for the Matrix Logarithm." + SIAM Journal on Scientific Computing, 34 (4). C152-C169. + ISSN 1095-7197 + + .. [2] Nicholas J. Higham (2008) + "Functions of Matrices: Theory and Computation" + ISBN 978-0-898716-46-7 + + .. [3] Nicholas J. Higham and Lijing lin (2011) + "A Schur-Pade Algorithm for Fractional Powers of a Matrix." + SIAM Journal on Matrix Analysis and Applications, + 32 (3). pp. 1056-1078. ISSN 0895-4798 + + """ + T = np.asarray(T) + if len(T.shape) != 2 or T.shape[0] != T.shape[1]: + raise ValueError('expected an upper triangular square matrix') + n, n = T.shape + + # Construct T0 with the appropriate type, + # depending on the dtype and the spectrum of T. + T_diag = np.diag(T) + keep_it_real = np.isrealobj(T) and np.min(T_diag) >= 0 + if keep_it_real: + T0 = T + else: + T0 = T.astype(complex) + + # Define bounds given in Table (2.1). + theta = (None, + 1.59e-5, 2.31e-3, 1.94e-2, 6.21e-2, + 1.28e-1, 2.06e-1, 2.88e-1, 3.67e-1, + 4.39e-1, 5.03e-1, 5.60e-1, 6.09e-1, + 6.52e-1, 6.89e-1, 7.21e-1, 7.49e-1) + + R, s, m = _inverse_squaring_helper(T0, theta) + + # Evaluate U = 2**s r_m(T - I) using the partial fraction expansion (1.1). + # This requires the nodes and weights + # corresponding to degree-m Gauss-Legendre quadrature. + # These quadrature arrays need to be transformed from the [-1, 1] interval + # to the [0, 1] interval. + nodes, weights = scipy.special.p_roots(m) + nodes = nodes.real + if nodes.shape != (m,) or weights.shape != (m,): + raise Exception('internal error') + nodes = 0.5 + 0.5 * nodes + weights = 0.5 * weights + ident = np.identity(n) + U = np.zeros_like(R) + for alpha, beta in zip(weights, nodes): + U += solve_triangular(ident + beta*R, alpha*R) + U *= np.exp2(s) + + # Skip this step if the principal branch + # does not exist at T0; this happens when a diagonal entry of T0 + # is negative with imaginary part 0. + has_principal_branch = all(x.real > 0 or x.imag != 0 for x in np.diag(T0)) + if has_principal_branch: + + # Recompute diagonal entries of U. + U[np.diag_indices(n)] = np.log(np.diag(T0)) + + # Recompute superdiagonal entries of U. + # This indexing of this code should be renovated + # when newer np.diagonal() becomes available. + for i in range(n-1): + l1 = T0[i, i] + l2 = T0[i+1, i+1] + t12 = T0[i, i+1] + U[i, i+1] = _logm_superdiag_entry(l1, l2, t12) + + # Return the logm of the upper triangular matrix. + if not np.array_equal(U, np.triu(U)): + raise Exception('internal inconsistency') + return U + + +def _logm_force_nonsingular_triangular_matrix(T, inplace=False): + # The input matrix should be upper triangular. + # The eps is ad hoc and is not meant to be machine precision. + tri_eps = 1e-20 + abs_diag = np.absolute(np.diag(T)) + if np.any(abs_diag == 0): + exact_singularity_msg = 'The logm input matrix is exactly singular.' + warnings.warn(exact_singularity_msg, LogmExactlySingularWarning) + if not inplace: + T = T.copy() + n = T.shape[0] + for i in range(n): + if not T[i, i]: + T[i, i] = tri_eps + elif np.any(abs_diag < tri_eps): + near_singularity_msg = 'The logm input matrix may be nearly singular.' + warnings.warn(near_singularity_msg, LogmNearlySingularWarning) + return T + + +def _logm(A): + """ + Compute the matrix logarithm. + + See the logm docstring in matfuncs.py for more info. + + Notes + ----- + In this function we look at triangular matrices that are similar + to the input matrix. If any diagonal entry of such a triangular matrix + is exactly zero then the original matrix is singular. + The matrix logarithm does not exist for such matrices, + but in such cases we will pretend that the diagonal entries that are zero + are actually slightly positive by an ad-hoc amount, in the interest + of returning something more useful than NaN. This will cause a warning. + + """ + A = np.asarray(A) + if len(A.shape) != 2 or A.shape[0] != A.shape[1]: + raise ValueError('expected a square matrix') + + # If the input matrix dtype is integer then copy to a float dtype matrix. + if issubclass(A.dtype.type, np.integer): + A = np.asarray(A, dtype=float) + + keep_it_real = np.isrealobj(A) + try: + if np.array_equal(A, np.triu(A)): + A = _logm_force_nonsingular_triangular_matrix(A) + if np.min(np.diag(A)) < 0: + A = A.astype(complex) + return _logm_triu(A) + else: + if keep_it_real: + T, Z = schur(A) + if not np.array_equal(T, np.triu(T)): + T, Z = rsf2csf(T, Z) + else: + T, Z = schur(A, output='complex') + T = _logm_force_nonsingular_triangular_matrix(T, inplace=True) + U = _logm_triu(T) + ZH = np.conjugate(Z).T + return Z.dot(U).dot(ZH) + except (SqrtmError, LogmError): + X = np.empty_like(A) + X.fill(np.nan) + return X diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/_matfuncs_inv_ssq.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/_matfuncs_inv_ssq.pyc new file mode 100644 index 0000000..b71ca70 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/_matfuncs_inv_ssq.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/_matfuncs_sqrtm.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/_matfuncs_sqrtm.py new file mode 100644 index 0000000..d75a82a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/_matfuncs_sqrtm.py @@ -0,0 +1,196 @@ +""" +Matrix square root for general matrices and for upper triangular matrices. + +This module exists to avoid cyclic imports. + +""" +from __future__ import division, print_function, absolute_import + +__all__ = ['sqrtm'] + +import numpy as np + +from scipy._lib._util import _asarray_validated + + +# Local imports +from .misc import norm +from .lapack import ztrsyl, dtrsyl +from .decomp_schur import schur, rsf2csf + + +class SqrtmError(np.linalg.LinAlgError): + pass + + +def _sqrtm_triu(T, blocksize=64): + """ + Matrix square root of an upper triangular matrix. + + This is a helper function for `sqrtm` and `logm`. + + Parameters + ---------- + T : (N, N) array_like upper triangular + Matrix whose square root to evaluate + blocksize : int, optional + If the blocksize is not degenerate with respect to the + size of the input array, then use a blocked algorithm. (Default: 64) + + Returns + ------- + sqrtm : (N, N) ndarray + Value of the sqrt function at `T` + + References + ---------- + .. [1] Edvin Deadman, Nicholas J. Higham, Rui Ralha (2013) + "Blocked Schur Algorithms for Computing the Matrix Square Root, + Lecture Notes in Computer Science, 7782. pp. 171-182. + + """ + T_diag = np.diag(T) + keep_it_real = np.isrealobj(T) and np.min(T_diag) >= 0 + if not keep_it_real: + T_diag = T_diag.astype(complex) + R = np.diag(np.sqrt(T_diag)) + + # Compute the number of blocks to use; use at least one block. + n, n = T.shape + nblocks = max(n // blocksize, 1) + + # Compute the smaller of the two sizes of blocks that + # we will actually use, and compute the number of large blocks. + bsmall, nlarge = divmod(n, nblocks) + blarge = bsmall + 1 + nsmall = nblocks - nlarge + if nsmall * bsmall + nlarge * blarge != n: + raise Exception('internal inconsistency') + + # Define the index range covered by each block. + start_stop_pairs = [] + start = 0 + for count, size in ((nsmall, bsmall), (nlarge, blarge)): + for i in range(count): + start_stop_pairs.append((start, start + size)) + start += size + + # Within-block interactions. + for start, stop in start_stop_pairs: + for j in range(start, stop): + for i in range(j-1, start-1, -1): + s = 0 + if j - i > 1: + s = R[i, i+1:j].dot(R[i+1:j, j]) + denom = R[i, i] + R[j, j] + num = T[i, j] - s + if denom != 0: + R[i, j] = (T[i, j] - s) / denom + elif denom == 0 and num == 0: + R[i, j] = 0 + else: + raise SqrtmError('failed to find the matrix square root') + + # Between-block interactions. + for j in range(nblocks): + jstart, jstop = start_stop_pairs[j] + for i in range(j-1, -1, -1): + istart, istop = start_stop_pairs[i] + S = T[istart:istop, jstart:jstop] + if j - i > 1: + S = S - R[istart:istop, istop:jstart].dot(R[istop:jstart, + jstart:jstop]) + + # Invoke LAPACK. + # For more details, see the solve_sylvester implemention + # and the fortran dtrsyl and ztrsyl docs. + Rii = R[istart:istop, istart:istop] + Rjj = R[jstart:jstop, jstart:jstop] + if keep_it_real: + x, scale, info = dtrsyl(Rii, Rjj, S) + else: + x, scale, info = ztrsyl(Rii, Rjj, S) + R[istart:istop, jstart:jstop] = x * scale + + # Return the matrix square root. + return R + + +def sqrtm(A, disp=True, blocksize=64): + """ + Matrix square root. + + Parameters + ---------- + A : (N, N) array_like + Matrix whose square root to evaluate + disp : bool, optional + Print warning if error in the result is estimated large + instead of returning estimated error. (Default: True) + blocksize : integer, optional + If the blocksize is not degenerate with respect to the + size of the input array, then use a blocked algorithm. (Default: 64) + + Returns + ------- + sqrtm : (N, N) ndarray + Value of the sqrt function at `A` + + errest : float + (if disp == False) + + Frobenius norm of the estimated error, ||err||_F / ||A||_F + + References + ---------- + .. [1] Edvin Deadman, Nicholas J. Higham, Rui Ralha (2013) + "Blocked Schur Algorithms for Computing the Matrix Square Root, + Lecture Notes in Computer Science, 7782. pp. 171-182. + + Examples + -------- + >>> from scipy.linalg import sqrtm + >>> a = np.array([[1.0, 3.0], [1.0, 4.0]]) + >>> r = sqrtm(a) + >>> r + array([[ 0.75592895, 1.13389342], + [ 0.37796447, 1.88982237]]) + >>> r.dot(r) + array([[ 1., 3.], + [ 1., 4.]]) + + """ + A = _asarray_validated(A, check_finite=True, as_inexact=True) + if len(A.shape) != 2: + raise ValueError("Non-matrix input to matrix function.") + if blocksize < 1: + raise ValueError("The blocksize should be at least 1.") + keep_it_real = np.isrealobj(A) + if keep_it_real: + T, Z = schur(A) + if not np.array_equal(T, np.triu(T)): + T, Z = rsf2csf(T, Z) + else: + T, Z = schur(A, output='complex') + failflag = False + try: + R = _sqrtm_triu(T, blocksize=blocksize) + ZH = np.conjugate(Z).T + X = Z.dot(R).dot(ZH) + except SqrtmError: + failflag = True + X = np.empty_like(A) + X.fill(np.nan) + + if disp: + if failflag: + print("Failed to find a square root.") + return X + else: + try: + arg2 = norm(X.dot(X) - A, 'fro')**2 / norm(A, 'fro') + except ValueError: + # NaNs in matrix + arg2 = np.inf + + return X, arg2 diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/_matfuncs_sqrtm.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/_matfuncs_sqrtm.pyc new file mode 100644 index 0000000..3582dd8 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/_matfuncs_sqrtm.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/_procrustes.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/_procrustes.py new file mode 100644 index 0000000..8148455 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/_procrustes.py @@ -0,0 +1,91 @@ +""" +Solve the orthogonal Procrustes problem. + +""" +from __future__ import division, print_function, absolute_import + +import numpy as np +from .decomp_svd import svd + + +__all__ = ['orthogonal_procrustes'] + + +def orthogonal_procrustes(A, B, check_finite=True): + """ + Compute the matrix solution of the orthogonal Procrustes problem. + + Given matrices A and B of equal shape, find an orthogonal matrix R + that most closely maps A to B using the algorithm given in [1]_. + + Parameters + ---------- + A : (M, N) array_like + Matrix to be mapped. + B : (M, N) array_like + Target matrix. + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + R : (N, N) ndarray + The matrix solution of the orthogonal Procrustes problem. + Minimizes the Frobenius norm of ``(A @ R) - B``, subject to + ``R.T @ R = I``. + scale : float + Sum of the singular values of ``A.T @ B``. + + Raises + ------ + ValueError + If the input array shapes don't match or if check_finite is True and + the arrays contain Inf or NaN. + + Notes + ----- + Note that unlike higher level Procrustes analyses of spatial data, this + function only uses orthogonal transformations like rotations and + reflections, and it does not use scaling or translation. + + .. versionadded:: 0.15.0 + + References + ---------- + .. [1] Peter H. Schonemann, "A generalized solution of the orthogonal + Procrustes problem", Psychometrica -- Vol. 31, No. 1, March, 1996. + + Examples + -------- + >>> from scipy.linalg import orthogonal_procrustes + >>> A = np.array([[ 2, 0, 1], [-2, 0, 0]]) + + Flip the order of columns and check for the anti-diagonal mapping + + >>> R, sca = orthogonal_procrustes(A, np.fliplr(A)) + >>> R + array([[-5.34384992e-17, 0.00000000e+00, 1.00000000e+00], + [ 0.00000000e+00, 1.00000000e+00, 0.00000000e+00], + [ 1.00000000e+00, 0.00000000e+00, -7.85941422e-17]]) + >>> sca + 9.0 + + """ + if check_finite: + A = np.asarray_chkfinite(A) + B = np.asarray_chkfinite(B) + else: + A = np.asanyarray(A) + B = np.asanyarray(B) + if A.ndim != 2: + raise ValueError('expected ndim to be 2, but observed %s' % A.ndim) + if A.shape != B.shape: + raise ValueError('the shapes of A and B differ (%s vs %s)' % ( + A.shape, B.shape)) + # Be clever with transposes, with the intention to save memory. + u, w, vt = svd(B.T.dot(A).T) + R = u.dot(vt) + scale = w.sum() + return R, scale diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/_procrustes.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/_procrustes.pyc new file mode 100644 index 0000000..39cf4e7 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/_procrustes.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/_sketches.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/_sketches.py new file mode 100644 index 0000000..b57eff4 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/_sketches.py @@ -0,0 +1,121 @@ +""" Sketching-based Matrix Computations """ + +# Author: Jordi Montes <jomsdev@gmail.com> +# August 28, 2017 + +from __future__ import division, print_function, absolute_import + +import numpy as np + +from scipy._lib._util import check_random_state + +__all__ = ['clarkson_woodruff_transform'] + + +def cwt_matrix(n_rows, n_columns, seed=None): + r"""" + Generate a matrix S for the Clarkson-Woodruff sketch. + + Given the desired size of matrix, the method returns a matrix S of size + (n_rows, n_columns) where each column has all the entries set to 0 less one + position which has been randomly set to +1 or -1 with equal probability. + + Parameters + ---------- + n_rows: int + Number of rows of S + n_columns: int + Number of columns of S + seed : None or int or `numpy.random.RandomState` instance, optional + This parameter defines the ``RandomState`` object to use for drawing + random variates. + If None (or ``np.random``), the global ``np.random`` state is used. + If integer, it is used to seed the local ``RandomState`` instance. + Default is None. + + Returns + ------- + S : (n_rows, n_columns) array_like + + Notes + ----- + Given a matrix A, with probability at least 9/10, + .. math:: ||SA|| == (1 \pm \epsilon)||A|| + Where epsilon is related to the size of S + """ + S = np.zeros((n_rows, n_columns)) + nz_positions = np.random.randint(0, n_rows, n_columns) + rng = check_random_state(seed) + values = rng.choice([1, -1], n_columns) + for i in range(n_columns): + S[nz_positions[i]][i] = values[i] + + return S + + +def clarkson_woodruff_transform(input_matrix, sketch_size, seed=None): + r"""" + Find low-rank matrix approximation via the Clarkson-Woodruff Transform. + + Given an input_matrix ``A`` of size ``(n, d)``, compute a matrix ``A'`` of + size (sketch_size, d) which holds: + + .. math:: ||Ax|| = (1 \pm \epsilon)||A'x|| + + with high probability. + + The error is related to the number of rows of the sketch and it is bounded + + .. math:: poly(r(\epsilon^{-1})) + + Parameters + ---------- + input_matrix: array_like + Input matrix, of shape ``(n, d)``. + sketch_size: int + Number of rows for the sketch. + seed : None or int or `numpy.random.RandomState` instance, optional + This parameter defines the ``RandomState`` object to use for drawing + random variates. + If None (or ``np.random``), the global ``np.random`` state is used. + If integer, it is used to seed the local ``RandomState`` instance. + Default is None. + + Returns + ------- + A' : array_like + Sketch of the input matrix ``A``, of size ``(sketch_size, d)``. + + Notes + ----- + This is an implementation of the Clarkson-Woodruff Transform (CountSketch). + ``A'`` can be computed in principle in ``O(nnz(A))`` (with ``nnz`` meaning + the number of nonzero entries), however we don't take advantage of sparse + matrices in this implementation. + + Examples + -------- + Given a big dense matrix ``A``: + + >>> from scipy import linalg + >>> n_rows, n_columns, sketch_n_rows = (2000, 100, 100) + >>> threshold = 0.1 + >>> tmp = np.random.normal(0, 0.1, n_rows*n_columns) + >>> A = np.reshape(tmp, (n_rows, n_columns)) + >>> sketch = linalg.clarkson_woodruff_transform(A, sketch_n_rows) + >>> sketch.shape + (100, 100) + >>> normA = linalg.norm(A) + >>> norm_sketch = linalg.norm(sketch) + + Now with high probability, the condition ``abs(normA-normSketch) < + threshold`` holds. + + References + ---------- + .. [1] Kenneth L. Clarkson and David P. Woodruff. Low rank approximation and + regression in input sparsity time. In STOC, 2013. + + """ + S = cwt_matrix(sketch_size, input_matrix.shape[0], seed) + return np.dot(S, input_matrix) diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/_sketches.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/_sketches.pyc new file mode 100644 index 0000000..b4aa169 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/_sketches.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/_solve_toeplitz.so b/project/venv/lib/python2.7/site-packages/scipy/linalg/_solve_toeplitz.so new file mode 100755 index 0000000..2169d1f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/_solve_toeplitz.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/_solvers.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/_solvers.py new file mode 100644 index 0000000..f5c5d3a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/_solvers.py @@ -0,0 +1,844 @@ +"""Matrix equation solver routines""" +# Author: Jeffrey Armstrong <jeff@approximatrix.com> +# February 24, 2012 + +# Modified: Chad Fulton <ChadFulton@gmail.com> +# June 19, 2014 + +# Modified: Ilhan Polat <ilhanpolat@gmail.com> +# September 13, 2016 + +from __future__ import division, print_function, absolute_import + +import warnings +import numpy as np +from numpy.linalg import inv, LinAlgError, norm, cond, svd + +from .basic import solve, solve_triangular, matrix_balance +from .lapack import get_lapack_funcs +from .decomp_schur import schur +from .decomp_lu import lu +from .decomp_qr import qr +from ._decomp_qz import ordqz +from .decomp import _asarray_validated +from .special_matrices import kron, block_diag + +__all__ = ['solve_sylvester', + 'solve_continuous_lyapunov', 'solve_discrete_lyapunov', + 'solve_lyapunov', + 'solve_continuous_are', 'solve_discrete_are'] + + +def solve_sylvester(a, b, q): + """ + Computes a solution (X) to the Sylvester equation :math:`AX + XB = Q`. + + Parameters + ---------- + a : (M, M) array_like + Leading matrix of the Sylvester equation + b : (N, N) array_like + Trailing matrix of the Sylvester equation + q : (M, N) array_like + Right-hand side + + Returns + ------- + x : (M, N) ndarray + The solution to the Sylvester equation. + + Raises + ------ + LinAlgError + If solution was not found + + Notes + ----- + Computes a solution to the Sylvester matrix equation via the Bartels- + Stewart algorithm. The A and B matrices first undergo Schur + decompositions. The resulting matrices are used to construct an + alternative Sylvester equation (``RY + YS^T = F``) where the R and S + matrices are in quasi-triangular form (or, when R, S or F are complex, + triangular form). The simplified equation is then solved using + ``*TRSYL`` from LAPACK directly. + + .. versionadded:: 0.11.0 + + Examples + -------- + Given `a`, `b`, and `q` solve for `x`: + + >>> from scipy import linalg + >>> a = np.array([[-3, -2, 0], [-1, -1, 3], [3, -5, -1]]) + >>> b = np.array([[1]]) + >>> q = np.array([[1],[2],[3]]) + >>> x = linalg.solve_sylvester(a, b, q) + >>> x + array([[ 0.0625], + [-0.5625], + [ 0.6875]]) + >>> np.allclose(a.dot(x) + x.dot(b), q) + True + + """ + + # Compute the Schur decomp form of a + r, u = schur(a, output='real') + + # Compute the Schur decomp of b + s, v = schur(b.conj().transpose(), output='real') + + # Construct f = u'*q*v + f = np.dot(np.dot(u.conj().transpose(), q), v) + + # Call the Sylvester equation solver + trsyl, = get_lapack_funcs(('trsyl',), (r, s, f)) + if trsyl is None: + raise RuntimeError('LAPACK implementation does not contain a proper ' + 'Sylvester equation solver (TRSYL)') + y, scale, info = trsyl(r, s, f, tranb='C') + + y = scale*y + + if info < 0: + raise LinAlgError("Illegal value encountered in " + "the %d term" % (-info,)) + + return np.dot(np.dot(u, y), v.conj().transpose()) + + +def solve_continuous_lyapunov(a, q): + """ + Solves the continuous Lyapunov equation :math:`AX + XA^H = Q`. + + Uses the Bartels-Stewart algorithm to find :math:`X`. + + Parameters + ---------- + a : array_like + A square matrix + + q : array_like + Right-hand side square matrix + + Returns + ------- + x : ndarray + Solution to the continuous Lyapunov equation + + See Also + -------- + solve_discrete_lyapunov : computes the solution to the discrete-time + Lyapunov equation + solve_sylvester : computes the solution to the Sylvester equation + + Notes + ----- + The continuous Lyapunov equation is a special form of the Sylvester + equation, hence this solver relies on LAPACK routine ?TRSYL. + + .. versionadded:: 0.11.0 + + Examples + -------- + Given `a` and `q` solve for `x`: + + >>> from scipy import linalg + >>> a = np.array([[-3, -2, 0], [-1, -1, 0], [0, -5, -1]]) + >>> b = np.array([2, 4, -1]) + >>> q = np.eye(3) + >>> x = linalg.solve_continuous_lyapunov(a, q) + >>> x + array([[ -0.75 , 0.875 , -3.75 ], + [ 0.875 , -1.375 , 5.3125], + [ -3.75 , 5.3125, -27.0625]]) + >>> np.allclose(a.dot(x) + x.dot(a.T), q) + True + """ + + a = np.atleast_2d(_asarray_validated(a, check_finite=True)) + q = np.atleast_2d(_asarray_validated(q, check_finite=True)) + + r_or_c = float + + for ind, _ in enumerate((a, q)): + if np.iscomplexobj(_): + r_or_c = complex + + if not np.equal(*_.shape): + raise ValueError("Matrix {} should be square.".format("aq"[ind])) + + # Shape consistency check + if a.shape != q.shape: + raise ValueError("Matrix a and q should have the same shape.") + + # Compute the Schur decomp form of a + r, u = schur(a, output='real') + + # Construct f = u'*q*u + f = u.conj().T.dot(q.dot(u)) + + # Call the Sylvester equation solver + trsyl = get_lapack_funcs('trsyl', (r, f)) + + dtype_string = 'T' if r_or_c == float else 'C' + y, scale, info = trsyl(r, r, f, tranb=dtype_string) + + if info < 0: + raise ValueError('?TRSYL exited with the internal error ' + '"illegal value in argument number {}.". See ' + 'LAPACK documentation for the ?TRSYL error codes.' + ''.format(-info)) + elif info == 1: + warnings.warn('Input "a" has an eigenvalue pair whose sum is ' + 'very close to or exactly zero. The solution is ' + 'obtained via perturbing the coefficients.', + RuntimeWarning) + y *= scale + + return u.dot(y).dot(u.conj().T) + + +# For backwards compatibility, keep the old name +solve_lyapunov = solve_continuous_lyapunov + + +def _solve_discrete_lyapunov_direct(a, q): + """ + Solves the discrete Lyapunov equation directly. + + This function is called by the `solve_discrete_lyapunov` function with + `method=direct`. It is not supposed to be called directly. + """ + + lhs = kron(a, a.conj()) + lhs = np.eye(lhs.shape[0]) - lhs + x = solve(lhs, q.flatten()) + + return np.reshape(x, q.shape) + + +def _solve_discrete_lyapunov_bilinear(a, q): + """ + Solves the discrete Lyapunov equation using a bilinear transformation. + + This function is called by the `solve_discrete_lyapunov` function with + `method=bilinear`. It is not supposed to be called directly. + """ + eye = np.eye(a.shape[0]) + aH = a.conj().transpose() + aHI_inv = inv(aH + eye) + b = np.dot(aH - eye, aHI_inv) + c = 2*np.dot(np.dot(inv(a + eye), q), aHI_inv) + return solve_lyapunov(b.conj().transpose(), -c) + + +def solve_discrete_lyapunov(a, q, method=None): + """ + Solves the discrete Lyapunov equation :math:`AXA^H - X + Q = 0`. + + Parameters + ---------- + a, q : (M, M) array_like + Square matrices corresponding to A and Q in the equation + above respectively. Must have the same shape. + + method : {'direct', 'bilinear'}, optional + Type of solver. + + If not given, chosen to be ``direct`` if ``M`` is less than 10 and + ``bilinear`` otherwise. + + Returns + ------- + x : ndarray + Solution to the discrete Lyapunov equation + + See Also + -------- + solve_continuous_lyapunov : computes the solution to the continuous-time + Lyapunov equation + + Notes + ----- + This section describes the available solvers that can be selected by the + 'method' parameter. The default method is *direct* if ``M`` is less than 10 + and ``bilinear`` otherwise. + + Method *direct* uses a direct analytical solution to the discrete Lyapunov + equation. The algorithm is given in, for example, [1]_. However it requires + the linear solution of a system with dimension :math:`M^2` so that + performance degrades rapidly for even moderately sized matrices. + + Method *bilinear* uses a bilinear transformation to convert the discrete + Lyapunov equation to a continuous Lyapunov equation :math:`(BX+XB'=-C)` + where :math:`B=(A-I)(A+I)^{-1}` and + :math:`C=2(A' + I)^{-1} Q (A + I)^{-1}`. The continuous equation can be + efficiently solved since it is a special case of a Sylvester equation. + The transformation algorithm is from Popov (1964) as described in [2]_. + + .. versionadded:: 0.11.0 + + References + ---------- + .. [1] Hamilton, James D. Time Series Analysis, Princeton: Princeton + University Press, 1994. 265. Print. + http://doc1.lbfl.li/aca/FLMF037168.pdf + .. [2] Gajic, Z., and M.T.J. Qureshi. 2008. + Lyapunov Matrix Equation in System Stability and Control. + Dover Books on Engineering Series. Dover Publications. + + Examples + -------- + Given `a` and `q` solve for `x`: + + >>> from scipy import linalg + >>> a = np.array([[0.2, 0.5],[0.7, -0.9]]) + >>> q = np.eye(2) + >>> x = linalg.solve_discrete_lyapunov(a, q) + >>> x + array([[ 0.70872893, 1.43518822], + [ 1.43518822, -2.4266315 ]]) + >>> np.allclose(a.dot(x).dot(a.T)-x, -q) + True + + """ + a = np.asarray(a) + q = np.asarray(q) + if method is None: + # Select automatically based on size of matrices + if a.shape[0] >= 10: + method = 'bilinear' + else: + method = 'direct' + + meth = method.lower() + + if meth == 'direct': + x = _solve_discrete_lyapunov_direct(a, q) + elif meth == 'bilinear': + x = _solve_discrete_lyapunov_bilinear(a, q) + else: + raise ValueError('Unknown solver %s' % method) + + return x + + +def solve_continuous_are(a, b, q, r, e=None, s=None, balanced=True): + r""" + Solves the continuous-time algebraic Riccati equation (CARE). + + The CARE is defined as + + .. math:: + + X A + A^H X - X B R^{-1} B^H X + Q = 0 + + The limitations for a solution to exist are : + + * All eigenvalues of :math:`A` on the right half plane, should be + controllable. + + * The associated hamiltonian pencil (See Notes), should have + eigenvalues sufficiently away from the imaginary axis. + + Moreover, if ``e`` or ``s`` is not precisely ``None``, then the + generalized version of CARE + + .. math:: + + E^HXA + A^HXE - (E^HXB + S) R^{-1} (B^HXE + S^H) + Q = 0 + + is solved. When omitted, ``e`` is assumed to be the identity and ``s`` + is assumed to be the zero matrix with sizes compatible with ``a`` and + ``b`` respectively. + + Parameters + ---------- + a : (M, M) array_like + Square matrix + b : (M, N) array_like + Input + q : (M, M) array_like + Input + r : (N, N) array_like + Nonsingular square matrix + e : (M, M) array_like, optional + Nonsingular square matrix + s : (M, N) array_like, optional + Input + balanced : bool, optional + The boolean that indicates whether a balancing step is performed + on the data. The default is set to True. + + Returns + ------- + x : (M, M) ndarray + Solution to the continuous-time algebraic Riccati equation. + + Raises + ------ + LinAlgError + For cases where the stable subspace of the pencil could not be + isolated. See Notes section and the references for details. + + See Also + -------- + solve_discrete_are : Solves the discrete-time algebraic Riccati equation + + Notes + ----- + The equation is solved by forming the extended hamiltonian matrix pencil, + as described in [1]_, :math:`H - \lambda J` given by the block matrices :: + + [ A 0 B ] [ E 0 0 ] + [-Q -A^H -S ] - \lambda * [ 0 E^H 0 ] + [ S^H B^H R ] [ 0 0 0 ] + + and using a QZ decomposition method. + + In this algorithm, the fail conditions are linked to the symmetry + of the product :math:`U_2 U_1^{-1}` and condition number of + :math:`U_1`. Here, :math:`U` is the 2m-by-m matrix that holds the + eigenvectors spanning the stable subspace with 2m rows and partitioned + into two m-row matrices. See [1]_ and [2]_ for more details. + + In order to improve the QZ decomposition accuracy, the pencil goes + through a balancing step where the sum of absolute values of + :math:`H` and :math:`J` entries (after removing the diagonal entries of + the sum) is balanced following the recipe given in [3]_. + + .. versionadded:: 0.11.0 + + References + ---------- + .. [1] P. van Dooren , "A Generalized Eigenvalue Approach For Solving + Riccati Equations.", SIAM Journal on Scientific and Statistical + Computing, Vol.2(2), DOI: 10.1137/0902010 + + .. [2] A.J. Laub, "A Schur Method for Solving Algebraic Riccati + Equations.", Massachusetts Institute of Technology. Laboratory for + Information and Decision Systems. LIDS-R ; 859. Available online : + http://hdl.handle.net/1721.1/1301 + + .. [3] P. Benner, "Symplectic Balancing of Hamiltonian Matrices", 2001, + SIAM J. Sci. Comput., 2001, Vol.22(5), DOI: 10.1137/S1064827500367993 + + Examples + -------- + Given `a`, `b`, `q`, and `r` solve for `x`: + + >>> from scipy import linalg + >>> a = np.array([[4, 3], [-4.5, -3.5]]) + >>> b = np.array([[1], [-1]]) + >>> q = np.array([[9, 6], [6, 4.]]) + >>> r = 1 + >>> x = linalg.solve_continuous_are(a, b, q, r) + >>> x + array([[ 21.72792206, 14.48528137], + [ 14.48528137, 9.65685425]]) + >>> np.allclose(a.T.dot(x) + x.dot(a)-x.dot(b).dot(b.T).dot(x), -q) + True + + """ + + # Validate input arguments + a, b, q, r, e, s, m, n, r_or_c, gen_are = _are_validate_args( + a, b, q, r, e, s, 'care') + + H = np.empty((2*m+n, 2*m+n), dtype=r_or_c) + H[:m, :m] = a + H[:m, m:2*m] = 0. + H[:m, 2*m:] = b + H[m:2*m, :m] = -q + H[m:2*m, m:2*m] = -a.conj().T + H[m:2*m, 2*m:] = 0. if s is None else -s + H[2*m:, :m] = 0. if s is None else s.conj().T + H[2*m:, m:2*m] = b.conj().T + H[2*m:, 2*m:] = r + + if gen_are and e is not None: + J = block_diag(e, e.conj().T, np.zeros_like(r, dtype=r_or_c)) + else: + J = block_diag(np.eye(2*m), np.zeros_like(r, dtype=r_or_c)) + + if balanced: + # xGEBAL does not remove the diagonals before scaling. Also + # to avoid destroying the Symplectic structure, we follow Ref.3 + M = np.abs(H) + np.abs(J) + M[np.diag_indices_from(M)] = 0. + _, (sca, _) = matrix_balance(M, separate=1, permute=0) + # do we need to bother? + if not np.allclose(sca, np.ones_like(sca)): + # Now impose diag(D,inv(D)) from Benner where D is + # square root of s_i/s_(n+i) for i=0,.... + sca = np.log2(sca) + # NOTE: Py3 uses "Bankers Rounding: round to the nearest even" !! + s = np.round((sca[m:2*m] - sca[:m])/2) + sca = 2 ** np.r_[s, -s, sca[2*m:]] + # Elementwise multiplication via broadcasting. + elwisescale = sca[:, None] * np.reciprocal(sca) + H *= elwisescale + J *= elwisescale + + # Deflate the pencil to 2m x 2m ala Ref.1, eq.(55) + q, r = qr(H[:, -n:]) + H = q[:, n:].conj().T.dot(H[:, :2*m]) + J = q[:2*m, n:].conj().T.dot(J[:2*m, :2*m]) + + # Decide on which output type is needed for QZ + out_str = 'real' if r_or_c == float else 'complex' + + _, _, _, _, _, u = ordqz(H, J, sort='lhp', overwrite_a=True, + overwrite_b=True, check_finite=False, + output=out_str) + + # Get the relevant parts of the stable subspace basis + if e is not None: + u, _ = qr(np.vstack((e.dot(u[:m, :m]), u[m:, :m]))) + u00 = u[:m, :m] + u10 = u[m:, :m] + + # Solve via back-substituion after checking the condition of u00 + up, ul, uu = lu(u00) + if 1/cond(uu) < np.spacing(1.): + raise LinAlgError('Failed to find a finite solution.') + + # Exploit the triangular structure + x = solve_triangular(ul.conj().T, + solve_triangular(uu.conj().T, + u10.conj().T, + lower=True), + unit_diagonal=True, + ).conj().T.dot(up.conj().T) + if balanced: + x *= sca[:m, None] * sca[:m] + + # Check the deviation from symmetry for lack of success + # See proof of Thm.5 item 3 in [2] + u_sym = u00.conj().T.dot(u10) + n_u_sym = norm(u_sym, 1) + u_sym = u_sym - u_sym.conj().T + sym_threshold = np.max([np.spacing(1000.), 0.1*n_u_sym]) + + if norm(u_sym, 1) > sym_threshold: + raise LinAlgError('The associated Hamiltonian pencil has eigenvalues ' + 'too close to the imaginary axis') + + return (x + x.conj().T)/2 + + +def solve_discrete_are(a, b, q, r, e=None, s=None, balanced=True): + r""" + Solves the discrete-time algebraic Riccati equation (DARE). + + The DARE is defined as + + .. math:: + + A^HXA - X - (A^HXB) (R + B^HXB)^{-1} (B^HXA) + Q = 0 + + The limitations for a solution to exist are : + + * All eigenvalues of :math:`A` outside the unit disc, should be + controllable. + + * The associated symplectic pencil (See Notes), should have + eigenvalues sufficiently away from the unit circle. + + Moreover, if ``e`` and ``s`` are not both precisely ``None``, then the + generalized version of DARE + + .. math:: + + A^HXA - E^HXE - (A^HXB+S) (R+B^HXB)^{-1} (B^HXA+S^H) + Q = 0 + + is solved. When omitted, ``e`` is assumed to be the identity and ``s`` + is assumed to be the zero matrix. + + Parameters + ---------- + a : (M, M) array_like + Square matrix + b : (M, N) array_like + Input + q : (M, M) array_like + Input + r : (N, N) array_like + Square matrix + e : (M, M) array_like, optional + Nonsingular square matrix + s : (M, N) array_like, optional + Input + balanced : bool + The boolean that indicates whether a balancing step is performed + on the data. The default is set to True. + + Returns + ------- + x : (M, M) ndarray + Solution to the discrete algebraic Riccati equation. + + Raises + ------ + LinAlgError + For cases where the stable subspace of the pencil could not be + isolated. See Notes section and the references for details. + + See Also + -------- + solve_continuous_are : Solves the continuous algebraic Riccati equation + + Notes + ----- + The equation is solved by forming the extended symplectic matrix pencil, + as described in [1]_, :math:`H - \lambda J` given by the block matrices :: + + [ A 0 B ] [ E 0 B ] + [ -Q E^H -S ] - \lambda * [ 0 A^H 0 ] + [ S^H 0 R ] [ 0 -B^H 0 ] + + and using a QZ decomposition method. + + In this algorithm, the fail conditions are linked to the symmetry + of the product :math:`U_2 U_1^{-1}` and condition number of + :math:`U_1`. Here, :math:`U` is the 2m-by-m matrix that holds the + eigenvectors spanning the stable subspace with 2m rows and partitioned + into two m-row matrices. See [1]_ and [2]_ for more details. + + In order to improve the QZ decomposition accuracy, the pencil goes + through a balancing step where the sum of absolute values of + :math:`H` and :math:`J` rows/cols (after removing the diagonal entries) + is balanced following the recipe given in [3]_. If the data has small + numerical noise, balancing may amplify their effects and some clean up + is required. + + .. versionadded:: 0.11.0 + + References + ---------- + .. [1] P. van Dooren , "A Generalized Eigenvalue Approach For Solving + Riccati Equations.", SIAM Journal on Scientific and Statistical + Computing, Vol.2(2), DOI: 10.1137/0902010 + + .. [2] A.J. Laub, "A Schur Method for Solving Algebraic Riccati + Equations.", Massachusetts Institute of Technology. Laboratory for + Information and Decision Systems. LIDS-R ; 859. Available online : + http://hdl.handle.net/1721.1/1301 + + .. [3] P. Benner, "Symplectic Balancing of Hamiltonian Matrices", 2001, + SIAM J. Sci. Comput., 2001, Vol.22(5), DOI: 10.1137/S1064827500367993 + + Examples + -------- + Given `a`, `b`, `q`, and `r` solve for `x`: + + >>> from scipy import linalg as la + >>> a = np.array([[0, 1], [0, -1]]) + >>> b = np.array([[1, 0], [2, 1]]) + >>> q = np.array([[-4, -4], [-4, 7]]) + >>> r = np.array([[9, 3], [3, 1]]) + >>> x = la.solve_discrete_are(a, b, q, r) + >>> x + array([[-4., -4.], + [-4., 7.]]) + >>> R = la.solve(r + b.T.dot(x).dot(b), b.T.dot(x).dot(a)) + >>> np.allclose(a.T.dot(x).dot(a) - x - a.T.dot(x).dot(b).dot(R), -q) + True + + """ + + # Validate input arguments + a, b, q, r, e, s, m, n, r_or_c, gen_are = _are_validate_args( + a, b, q, r, e, s, 'dare') + + # Form the matrix pencil + H = np.zeros((2*m+n, 2*m+n), dtype=r_or_c) + H[:m, :m] = a + H[:m, 2*m:] = b + H[m:2*m, :m] = -q + H[m:2*m, m:2*m] = np.eye(m) if e is None else e.conj().T + H[m:2*m, 2*m:] = 0. if s is None else -s + H[2*m:, :m] = 0. if s is None else s.conj().T + H[2*m:, 2*m:] = r + + J = np.zeros_like(H, dtype=r_or_c) + J[:m, :m] = np.eye(m) if e is None else e + J[m:2*m, m:2*m] = a.conj().T + J[2*m:, m:2*m] = -b.conj().T + + if balanced: + # xGEBAL does not remove the diagonals before scaling. Also + # to avoid destroying the Symplectic structure, we follow Ref.3 + M = np.abs(H) + np.abs(J) + M[np.diag_indices_from(M)] = 0. + _, (sca, _) = matrix_balance(M, separate=1, permute=0) + # do we need to bother? + if not np.allclose(sca, np.ones_like(sca)): + # Now impose diag(D,inv(D)) from Benner where D is + # square root of s_i/s_(n+i) for i=0,.... + sca = np.log2(sca) + # NOTE: Py3 uses "Bankers Rounding: round to the nearest even" !! + s = np.round((sca[m:2*m] - sca[:m])/2) + sca = 2 ** np.r_[s, -s, sca[2*m:]] + # Elementwise multiplication via broadcasting. + elwisescale = sca[:, None] * np.reciprocal(sca) + H *= elwisescale + J *= elwisescale + + # Deflate the pencil by the R column ala Ref.1 + q_of_qr, _ = qr(H[:, -n:]) + H = q_of_qr[:, n:].conj().T.dot(H[:, :2*m]) + J = q_of_qr[:, n:].conj().T.dot(J[:, :2*m]) + + # Decide on which output type is needed for QZ + out_str = 'real' if r_or_c == float else 'complex' + + _, _, _, _, _, u = ordqz(H, J, sort='iuc', + overwrite_a=True, + overwrite_b=True, + check_finite=False, + output=out_str) + + # Get the relevant parts of the stable subspace basis + if e is not None: + u, _ = qr(np.vstack((e.dot(u[:m, :m]), u[m:, :m]))) + u00 = u[:m, :m] + u10 = u[m:, :m] + + # Solve via back-substituion after checking the condition of u00 + up, ul, uu = lu(u00) + + if 1/cond(uu) < np.spacing(1.): + raise LinAlgError('Failed to find a finite solution.') + + # Exploit the triangular structure + x = solve_triangular(ul.conj().T, + solve_triangular(uu.conj().T, + u10.conj().T, + lower=True), + unit_diagonal=True, + ).conj().T.dot(up.conj().T) + if balanced: + x *= sca[:m, None] * sca[:m] + + # Check the deviation from symmetry for lack of success + # See proof of Thm.5 item 3 in [2] + u_sym = u00.conj().T.dot(u10) + n_u_sym = norm(u_sym, 1) + u_sym = u_sym - u_sym.conj().T + sym_threshold = np.max([np.spacing(1000.), 0.1*n_u_sym]) + + if norm(u_sym, 1) > sym_threshold: + raise LinAlgError('The associated symplectic pencil has eigenvalues' + 'too close to the unit circle') + + return (x + x.conj().T)/2 + + +def _are_validate_args(a, b, q, r, e, s, eq_type='care'): + """ + A helper function to validate the arguments supplied to the + Riccati equation solvers. Any discrepancy found in the input + matrices leads to a ``ValueError`` exception. + + Essentially, it performs: + + - a check whether the input is free of NaN and Infs. + - a pass for the data through ``numpy.atleast_2d()`` + - squareness check of the relevant arrays, + - shape consistency check of the arrays, + - singularity check of the relevant arrays, + - symmetricity check of the relevant matrices, + - a check whether the regular or the generalized version is asked. + + This function is used by ``solve_continuous_are`` and + ``solve_discrete_are``. + + Parameters + ---------- + a, b, q, r, e, s : array_like + Input data + eq_type : str + Accepted arguments are 'care' and 'dare'. + + Returns + ------- + a, b, q, r, e, s : ndarray + Regularized input data + m, n : int + shape of the problem + r_or_c : type + Data type of the problem, returns float or complex + gen_or_not : bool + Type of the equation, True for generalized and False for regular ARE. + + """ + + if not eq_type.lower() in ('dare', 'care'): + raise ValueError("Equation type unknown. " + "Only 'care' and 'dare' is understood") + + a = np.atleast_2d(_asarray_validated(a, check_finite=True)) + b = np.atleast_2d(_asarray_validated(b, check_finite=True)) + q = np.atleast_2d(_asarray_validated(q, check_finite=True)) + r = np.atleast_2d(_asarray_validated(r, check_finite=True)) + + # Get the correct data types otherwise Numpy complains + # about pushing complex numbers into real arrays. + r_or_c = complex if np.iscomplexobj(b) else float + + for ind, mat in enumerate((a, q, r)): + if np.iscomplexobj(mat): + r_or_c = complex + + if not np.equal(*mat.shape): + raise ValueError("Matrix {} should be square.".format("aqr"[ind])) + + # Shape consistency checks + m, n = b.shape + if m != a.shape[0]: + raise ValueError("Matrix a and b should have the same number of rows.") + if m != q.shape[0]: + raise ValueError("Matrix a and q should have the same shape.") + if n != r.shape[0]: + raise ValueError("Matrix b and r should have the same number of cols.") + + # Check if the data matrices q, r are (sufficiently) hermitian + for ind, mat in enumerate((q, r)): + if norm(mat - mat.conj().T, 1) > np.spacing(norm(mat, 1))*100: + raise ValueError("Matrix {} should be symmetric/hermitian." + "".format("qr"[ind])) + + # Continuous time ARE should have a nonsingular r matrix. + if eq_type == 'care': + min_sv = svd(r, compute_uv=False)[-1] + if min_sv == 0. or min_sv < np.spacing(1.)*norm(r, 1): + raise ValueError('Matrix r is numerically singular.') + + # Check if the generalized case is required with omitted arguments + # perform late shape checking etc. + generalized_case = e is not None or s is not None + + if generalized_case: + if e is not None: + e = np.atleast_2d(_asarray_validated(e, check_finite=True)) + if not np.equal(*e.shape): + raise ValueError("Matrix e should be square.") + if m != e.shape[0]: + raise ValueError("Matrix a and e should have the same shape.") + # numpy.linalg.cond doesn't check for exact zeros and + # emits a runtime warning. Hence the following manual check. + min_sv = svd(e, compute_uv=False)[-1] + if min_sv == 0. or min_sv < np.spacing(1.) * norm(e, 1): + raise ValueError('Matrix e is numerically singular.') + if np.iscomplexobj(e): + r_or_c = complex + if s is not None: + s = np.atleast_2d(_asarray_validated(s, check_finite=True)) + if s.shape != b.shape: + raise ValueError("Matrix b and s should have the same shape.") + if np.iscomplexobj(s): + r_or_c = complex + + return a, b, q, r, e, s, m, n, r_or_c, generalized_case diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/_solvers.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/_solvers.pyc new file mode 100644 index 0000000..1d2fa20 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/_solvers.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/_testutils.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/_testutils.py new file mode 100644 index 0000000..9b1d1dd --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/_testutils.py @@ -0,0 +1,65 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np + + +class _FakeMatrix(object): + def __init__(self, data): + self._data = data + self.__array_interface__ = data.__array_interface__ + + +class _FakeMatrix2(object): + def __init__(self, data): + self._data = data + + def __array__(self): + return self._data + + +def _get_array(shape, dtype): + """ + Get a test array of given shape and data type. + Returned NxN matrices are posdef, and 2xN are banded-posdef. + + """ + if len(shape) == 2 and shape[0] == 2: + # yield a banded positive definite one + x = np.zeros(shape, dtype=dtype) + x[0, 1:] = -1 + x[1] = 2 + return x + elif len(shape) == 2 and shape[0] == shape[1]: + # always yield a positive definite matrix + x = np.zeros(shape, dtype=dtype) + j = np.arange(shape[0]) + x[j, j] = 2 + x[j[:-1], j[:-1]+1] = -1 + x[j[:-1]+1, j[:-1]] = -1 + return x + else: + np.random.seed(1234) + return np.random.randn(*shape).astype(dtype) + + +def _id(x): + return x + + +def assert_no_overwrite(call, shapes, dtypes=None): + """ + Test that a call does not overwrite its input arguments + """ + + if dtypes is None: + dtypes = [np.float32, np.float64, np.complex64, np.complex128] + + for dtype in dtypes: + for order in ["C", "F"]: + for faker in [_id, _FakeMatrix, _FakeMatrix2]: + orig_inputs = [_get_array(s, dtype) for s in shapes] + inputs = [faker(x.copy(order)) for x in orig_inputs] + call(*inputs) + msg = "call modified inputs [%r, %r]" % (dtype, faker) + for a, b in zip(inputs, orig_inputs): + np.testing.assert_equal(a, b, err_msg=msg) diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/_testutils.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/_testutils.pyc new file mode 100644 index 0000000..11836b0 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/_testutils.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/basic.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/basic.py new file mode 100644 index 0000000..9582989 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/basic.py @@ -0,0 +1,1619 @@ +# +# Author: Pearu Peterson, March 2002 +# +# w/ additions by Travis Oliphant, March 2002 +# and Jake Vanderplas, August 2012 + +from __future__ import division, print_function, absolute_import + +from warnings import warn +import numpy as np +from numpy import atleast_1d, atleast_2d +from .flinalg import get_flinalg_funcs +from .lapack import get_lapack_funcs, _compute_lwork +from .misc import LinAlgError, _datacopied, LinAlgWarning +from .decomp import _asarray_validated +from . import decomp, decomp_svd +from ._solve_toeplitz import levinson + +__all__ = ['solve', 'solve_triangular', 'solveh_banded', 'solve_banded', + 'solve_toeplitz', 'solve_circulant', 'inv', 'det', 'lstsq', + 'pinv', 'pinv2', 'pinvh', 'matrix_balance'] + + +# Linear equations +def _solve_check(n, info, lamch=None, rcond=None): + """ Check arguments during the different steps of the solution phase """ + if info < 0: + raise ValueError('LAPACK reported an illegal value in {}-th argument' + '.'.format(-info)) + elif 0 < info: + raise LinAlgError('Matrix is singular.') + + if lamch is None: + return + E = lamch('E') + if rcond < E: + warn('Ill-conditioned matrix (rcond={:.6g}): ' + 'result may not be accurate.'.format(rcond), + LinAlgWarning, stacklevel=3) + + +def solve(a, b, sym_pos=False, lower=False, overwrite_a=False, + overwrite_b=False, debug=None, check_finite=True, assume_a='gen', + transposed=False): + """ + Solves the linear equation set ``a * x = b`` for the unknown ``x`` + for square ``a`` matrix. + + If the data matrix is known to be a particular type then supplying the + corresponding string to ``assume_a`` key chooses the dedicated solver. + The available options are + + =================== ======== + generic matrix 'gen' + symmetric 'sym' + hermitian 'her' + positive definite 'pos' + =================== ======== + + If omitted, ``'gen'`` is the default structure. + + The datatype of the arrays define which solver is called regardless + of the values. In other words, even when the complex array entries have + precisely zero imaginary parts, the complex solver will be called based + on the data type of the array. + + Parameters + ---------- + a : (N, N) array_like + Square input data + b : (N, NRHS) array_like + Input data for the right hand side. + sym_pos : bool, optional + Assume `a` is symmetric and positive definite. This key is deprecated + and assume_a = 'pos' keyword is recommended instead. The functionality + is the same. It will be removed in the future. + lower : bool, optional + If True, only the data contained in the lower triangle of `a`. Default + is to use upper triangle. (ignored for ``'gen'``) + overwrite_a : bool, optional + Allow overwriting data in `a` (may enhance performance). + Default is False. + overwrite_b : bool, optional + Allow overwriting data in `b` (may enhance performance). + Default is False. + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + assume_a : str, optional + Valid entries are explained above. + transposed: bool, optional + If True, ``a^T x = b`` for real matrices, raises `NotImplementedError` + for complex matrices (only for True). + + Returns + ------- + x : (N, NRHS) ndarray + The solution array. + + Raises + ------ + ValueError + If size mismatches detected or input a is not square. + LinAlgError + If the matrix is singular. + LinAlgWarning + If an ill-conditioned input a is detected. + NotImplementedError + If transposed is True and input a is a complex matrix. + + Examples + -------- + Given `a` and `b`, solve for `x`: + + >>> a = np.array([[3, 2, 0], [1, -1, 0], [0, 5, 1]]) + >>> b = np.array([2, 4, -1]) + >>> from scipy import linalg + >>> x = linalg.solve(a, b) + >>> x + array([ 2., -2., 9.]) + >>> np.dot(a, x) == b + array([ True, True, True], dtype=bool) + + Notes + ----- + If the input b matrix is a 1D array with N elements, when supplied + together with an NxN input a, it is assumed as a valid column vector + despite the apparent size mismatch. This is compatible with the + numpy.dot() behavior and the returned result is still 1D array. + + The generic, symmetric, hermitian and positive definite solutions are + obtained via calling ?GESV, ?SYSV, ?HESV, and ?POSV routines of + LAPACK respectively. + """ + # Flags for 1D or nD right hand side + b_is_1D = False + + a1 = atleast_2d(_asarray_validated(a, check_finite=check_finite)) + b1 = atleast_1d(_asarray_validated(b, check_finite=check_finite)) + n = a1.shape[0] + + overwrite_a = overwrite_a or _datacopied(a1, a) + overwrite_b = overwrite_b or _datacopied(b1, b) + + if a1.shape[0] != a1.shape[1]: + raise ValueError('Input a needs to be a square matrix.') + + if n != b1.shape[0]: + # Last chance to catch 1x1 scalar a and 1D b arrays + if not (n == 1 and b1.size != 0): + raise ValueError('Input b has to have same number of rows as ' + 'input a') + + # accommodate empty arrays + if b1.size == 0: + return np.asfortranarray(b1.copy()) + + # regularize 1D b arrays to 2D + if b1.ndim == 1: + if n == 1: + b1 = b1[None, :] + else: + b1 = b1[:, None] + b_is_1D = True + + # Backwards compatibility - old keyword. + if sym_pos: + assume_a = 'pos' + + if assume_a not in ('gen', 'sym', 'her', 'pos'): + raise ValueError('{} is not a recognized matrix structure' + ''.format(assume_a)) + + # Deprecate keyword "debug" + if debug is not None: + warn('Use of the "debug" keyword is deprecated ' + 'and this keyword will be removed in future ' + 'versions of SciPy.', DeprecationWarning, stacklevel=2) + + # Get the correct lamch function. + # The LAMCH functions only exists for S and D + # So for complex values we have to convert to real/double. + if a1.dtype.char in 'fF': # single precision + lamch = get_lapack_funcs('lamch', dtype='f') + else: + lamch = get_lapack_funcs('lamch', dtype='d') + + # Currently we do not have the other forms of the norm calculators + # lansy, lanpo, lanhe. + # However, in any case they only reduce computations slightly... + lange = get_lapack_funcs('lange', (a1,)) + + # Since the I-norm and 1-norm are the same for symmetric matrices + # we can collect them all in this one call + # Note however, that when issuing 'gen' and form!='none', then + # the I-norm should be used + if transposed: + trans = 1 + norm = 'I' + if np.iscomplexobj(a1): + raise NotImplementedError('scipy.linalg.solve can currently ' + 'not solve a^T x = b or a^H x = b ' + 'for complex matrices.') + else: + trans = 0 + norm = '1' + + anorm = lange(norm, a1) + + # Generalized case 'gesv' + if assume_a == 'gen': + gecon, getrf, getrs = get_lapack_funcs(('gecon', 'getrf', 'getrs'), + (a1, b1)) + lu, ipvt, info = getrf(a1, overwrite_a=overwrite_a) + _solve_check(n, info) + x, info = getrs(lu, ipvt, b1, + trans=trans, overwrite_b=overwrite_b) + _solve_check(n, info) + rcond, info = gecon(lu, anorm, norm=norm) + # Hermitian case 'hesv' + elif assume_a == 'her': + hecon, hesv, hesv_lw = get_lapack_funcs(('hecon', 'hesv', + 'hesv_lwork'), (a1, b1)) + lwork = _compute_lwork(hesv_lw, n, lower) + lu, ipvt, x, info = hesv(a1, b1, lwork=lwork, + lower=lower, + overwrite_a=overwrite_a, + overwrite_b=overwrite_b) + _solve_check(n, info) + rcond, info = hecon(lu, ipvt, anorm) + # Symmetric case 'sysv' + elif assume_a == 'sym': + sycon, sysv, sysv_lw = get_lapack_funcs(('sycon', 'sysv', + 'sysv_lwork'), (a1, b1)) + lwork = _compute_lwork(sysv_lw, n, lower) + lu, ipvt, x, info = sysv(a1, b1, lwork=lwork, + lower=lower, + overwrite_a=overwrite_a, + overwrite_b=overwrite_b) + _solve_check(n, info) + rcond, info = sycon(lu, ipvt, anorm) + # Positive definite case 'posv' + else: + pocon, posv = get_lapack_funcs(('pocon', 'posv'), + (a1, b1)) + lu, x, info = posv(a1, b1, lower=lower, + overwrite_a=overwrite_a, + overwrite_b=overwrite_b) + _solve_check(n, info) + rcond, info = pocon(lu, anorm) + + _solve_check(n, info, lamch, rcond) + + if b_is_1D: + x = x.ravel() + + return x + + +def solve_triangular(a, b, trans=0, lower=False, unit_diagonal=False, + overwrite_b=False, debug=None, check_finite=True): + """ + Solve the equation `a x = b` for `x`, assuming a is a triangular matrix. + + Parameters + ---------- + a : (M, M) array_like + A triangular matrix + b : (M,) or (M, N) array_like + Right-hand side matrix in `a x = b` + lower : bool, optional + Use only data contained in the lower triangle of `a`. + Default is to use upper triangle. + trans : {0, 1, 2, 'N', 'T', 'C'}, optional + Type of system to solve: + + ======== ========= + trans system + ======== ========= + 0 or 'N' a x = b + 1 or 'T' a^T x = b + 2 or 'C' a^H x = b + ======== ========= + unit_diagonal : bool, optional + If True, diagonal elements of `a` are assumed to be 1 and + will not be referenced. + overwrite_b : bool, optional + Allow overwriting data in `b` (may enhance performance) + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + x : (M,) or (M, N) ndarray + Solution to the system `a x = b`. Shape of return matches `b`. + + Raises + ------ + LinAlgError + If `a` is singular + + Notes + ----- + .. versionadded:: 0.9.0 + + Examples + -------- + Solve the lower triangular system a x = b, where:: + + [3 0 0 0] [4] + a = [2 1 0 0] b = [2] + [1 0 1 0] [4] + [1 1 1 1] [2] + + >>> from scipy.linalg import solve_triangular + >>> a = np.array([[3, 0, 0, 0], [2, 1, 0, 0], [1, 0, 1, 0], [1, 1, 1, 1]]) + >>> b = np.array([4, 2, 4, 2]) + >>> x = solve_triangular(a, b, lower=True) + >>> x + array([ 1.33333333, -0.66666667, 2.66666667, -1.33333333]) + >>> a.dot(x) # Check the result + array([ 4., 2., 4., 2.]) + + """ + + # Deprecate keyword "debug" + if debug is not None: + warn('Use of the "debug" keyword is deprecated ' + 'and this keyword will be removed in the future ' + 'versions of SciPy.', DeprecationWarning, stacklevel=2) + + a1 = _asarray_validated(a, check_finite=check_finite) + b1 = _asarray_validated(b, check_finite=check_finite) + if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]: + raise ValueError('expected square matrix') + if a1.shape[0] != b1.shape[0]: + raise ValueError('incompatible dimensions') + overwrite_b = overwrite_b or _datacopied(b1, b) + if debug: + print('solve:overwrite_b=', overwrite_b) + trans = {'N': 0, 'T': 1, 'C': 2}.get(trans, trans) + trtrs, = get_lapack_funcs(('trtrs',), (a1, b1)) + x, info = trtrs(a1, b1, overwrite_b=overwrite_b, lower=lower, + trans=trans, unitdiag=unit_diagonal) + + if info == 0: + return x + if info > 0: + raise LinAlgError("singular matrix: resolution failed at diagonal %d" % + (info-1)) + raise ValueError('illegal value in %d-th argument of internal trtrs' % + (-info)) + + +def solve_banded(l_and_u, ab, b, overwrite_ab=False, overwrite_b=False, + debug=None, check_finite=True): + """ + Solve the equation a x = b for x, assuming a is banded matrix. + + The matrix a is stored in `ab` using the matrix diagonal ordered form:: + + ab[u + i - j, j] == a[i,j] + + Example of `ab` (shape of a is (6,6), `u` =1, `l` =2):: + + * a01 a12 a23 a34 a45 + a00 a11 a22 a33 a44 a55 + a10 a21 a32 a43 a54 * + a20 a31 a42 a53 * * + + Parameters + ---------- + (l, u) : (integer, integer) + Number of non-zero lower and upper diagonals + ab : (`l` + `u` + 1, M) array_like + Banded matrix + b : (M,) or (M, K) array_like + Right-hand side + overwrite_ab : bool, optional + Discard data in `ab` (may enhance performance) + overwrite_b : bool, optional + Discard data in `b` (may enhance performance) + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + x : (M,) or (M, K) ndarray + The solution to the system a x = b. Returned shape depends on the + shape of `b`. + + Examples + -------- + Solve the banded system a x = b, where:: + + [5 2 -1 0 0] [0] + [1 4 2 -1 0] [1] + a = [0 1 3 2 -1] b = [2] + [0 0 1 2 2] [2] + [0 0 0 1 1] [3] + + There is one nonzero diagonal below the main diagonal (l = 1), and + two above (u = 2). The diagonal banded form of the matrix is:: + + [* * -1 -1 -1] + ab = [* 2 2 2 2] + [5 4 3 2 1] + [1 1 1 1 *] + + >>> from scipy.linalg import solve_banded + >>> ab = np.array([[0, 0, -1, -1, -1], + ... [0, 2, 2, 2, 2], + ... [5, 4, 3, 2, 1], + ... [1, 1, 1, 1, 0]]) + >>> b = np.array([0, 1, 2, 2, 3]) + >>> x = solve_banded((1, 2), ab, b) + >>> x + array([-2.37288136, 3.93220339, -4. , 4.3559322 , -1.3559322 ]) + + """ + + # Deprecate keyword "debug" + if debug is not None: + warn('Use of the "debug" keyword is deprecated ' + 'and this keyword will be removed in the future ' + 'versions of SciPy.', DeprecationWarning, stacklevel=2) + + a1 = _asarray_validated(ab, check_finite=check_finite, as_inexact=True) + b1 = _asarray_validated(b, check_finite=check_finite, as_inexact=True) + # Validate shapes. + if a1.shape[-1] != b1.shape[0]: + raise ValueError("shapes of ab and b are not compatible.") + (nlower, nupper) = l_and_u + if nlower + nupper + 1 != a1.shape[0]: + raise ValueError("invalid values for the number of lower and upper " + "diagonals: l+u+1 (%d) does not equal ab.shape[0] " + "(%d)" % (nlower + nupper + 1, ab.shape[0])) + + overwrite_b = overwrite_b or _datacopied(b1, b) + if a1.shape[-1] == 1: + b2 = np.array(b1, copy=(not overwrite_b)) + b2 /= a1[1, 0] + return b2 + if nlower == nupper == 1: + overwrite_ab = overwrite_ab or _datacopied(a1, ab) + gtsv, = get_lapack_funcs(('gtsv',), (a1, b1)) + du = a1[0, 1:] + d = a1[1, :] + dl = a1[2, :-1] + du2, d, du, x, info = gtsv(dl, d, du, b1, overwrite_ab, overwrite_ab, + overwrite_ab, overwrite_b) + else: + gbsv, = get_lapack_funcs(('gbsv',), (a1, b1)) + a2 = np.zeros((2*nlower + nupper + 1, a1.shape[1]), dtype=gbsv.dtype) + a2[nlower:, :] = a1 + lu, piv, x, info = gbsv(nlower, nupper, a2, b1, overwrite_ab=True, + overwrite_b=overwrite_b) + if info == 0: + return x + if info > 0: + raise LinAlgError("singular matrix") + raise ValueError('illegal value in %d-th argument of internal ' + 'gbsv/gtsv' % -info) + + +def solveh_banded(ab, b, overwrite_ab=False, overwrite_b=False, lower=False, + check_finite=True): + """ + Solve equation a x = b. a is Hermitian positive-definite banded matrix. + + The matrix a is stored in `ab` either in lower diagonal or upper + diagonal ordered form: + + ab[u + i - j, j] == a[i,j] (if upper form; i <= j) + ab[ i - j, j] == a[i,j] (if lower form; i >= j) + + Example of `ab` (shape of a is (6, 6), `u` =2):: + + upper form: + * * a02 a13 a24 a35 + * a01 a12 a23 a34 a45 + a00 a11 a22 a33 a44 a55 + + lower form: + a00 a11 a22 a33 a44 a55 + a10 a21 a32 a43 a54 * + a20 a31 a42 a53 * * + + Cells marked with * are not used. + + Parameters + ---------- + ab : (`u` + 1, M) array_like + Banded matrix + b : (M,) or (M, K) array_like + Right-hand side + overwrite_ab : bool, optional + Discard data in `ab` (may enhance performance) + overwrite_b : bool, optional + Discard data in `b` (may enhance performance) + lower : bool, optional + Is the matrix in the lower form. (Default is upper form) + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + x : (M,) or (M, K) ndarray + The solution to the system a x = b. Shape of return matches shape + of `b`. + + Examples + -------- + Solve the banded system A x = b, where:: + + [ 4 2 -1 0 0 0] [1] + [ 2 5 2 -1 0 0] [2] + A = [-1 2 6 2 -1 0] b = [2] + [ 0 -1 2 7 2 -1] [3] + [ 0 0 -1 2 8 2] [3] + [ 0 0 0 -1 2 9] [3] + + >>> from scipy.linalg import solveh_banded + + `ab` contains the main diagonal and the nonzero diagonals below the + main diagonal. That is, we use the lower form: + + >>> ab = np.array([[ 4, 5, 6, 7, 8, 9], + ... [ 2, 2, 2, 2, 2, 0], + ... [-1, -1, -1, -1, 0, 0]]) + >>> b = np.array([1, 2, 2, 3, 3, 3]) + >>> x = solveh_banded(ab, b, lower=True) + >>> x + array([ 0.03431373, 0.45938375, 0.05602241, 0.47759104, 0.17577031, + 0.34733894]) + + + Solve the Hermitian banded system H x = b, where:: + + [ 8 2-1j 0 0 ] [ 1 ] + H = [2+1j 5 1j 0 ] b = [1+1j] + [ 0 -1j 9 -2-1j] [1-2j] + [ 0 0 -2+1j 6 ] [ 0 ] + + In this example, we put the upper diagonals in the array `hb`: + + >>> hb = np.array([[0, 2-1j, 1j, -2-1j], + ... [8, 5, 9, 6 ]]) + >>> b = np.array([1, 1+1j, 1-2j, 0]) + >>> x = solveh_banded(hb, b) + >>> x + array([ 0.07318536-0.02939412j, 0.11877624+0.17696461j, + 0.10077984-0.23035393j, -0.00479904-0.09358128j]) + + """ + a1 = _asarray_validated(ab, check_finite=check_finite) + b1 = _asarray_validated(b, check_finite=check_finite) + # Validate shapes. + if a1.shape[-1] != b1.shape[0]: + raise ValueError("shapes of ab and b are not compatible.") + + overwrite_b = overwrite_b or _datacopied(b1, b) + overwrite_ab = overwrite_ab or _datacopied(a1, ab) + + if a1.shape[0] == 2: + ptsv, = get_lapack_funcs(('ptsv',), (a1, b1)) + if lower: + d = a1[0, :].real + e = a1[1, :-1] + else: + d = a1[1, :].real + e = a1[0, 1:].conj() + d, du, x, info = ptsv(d, e, b1, overwrite_ab, overwrite_ab, + overwrite_b) + else: + pbsv, = get_lapack_funcs(('pbsv',), (a1, b1)) + c, x, info = pbsv(a1, b1, lower=lower, overwrite_ab=overwrite_ab, + overwrite_b=overwrite_b) + if info > 0: + raise LinAlgError("%d-th leading minor not positive definite" % info) + if info < 0: + raise ValueError('illegal value in %d-th argument of internal ' + 'pbsv' % -info) + return x + + +def solve_toeplitz(c_or_cr, b, check_finite=True): + """Solve a Toeplitz system using Levinson Recursion + + The Toeplitz matrix has constant diagonals, with c as its first column + and r as its first row. If r is not given, ``r == conjugate(c)`` is + assumed. + + Parameters + ---------- + c_or_cr : array_like or tuple of (array_like, array_like) + The vector ``c``, or a tuple of arrays (``c``, ``r``). Whatever the + actual shape of ``c``, it will be converted to a 1-D array. If not + supplied, ``r = conjugate(c)`` is assumed; in this case, if c[0] is + real, the Toeplitz matrix is Hermitian. r[0] is ignored; the first row + of the Toeplitz matrix is ``[c[0], r[1:]]``. Whatever the actual shape + of ``r``, it will be converted to a 1-D array. + b : (M,) or (M, K) array_like + Right-hand side in ``T x = b``. + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (result entirely NaNs) if the inputs do contain infinities or NaNs. + + Returns + ------- + x : (M,) or (M, K) ndarray + The solution to the system ``T x = b``. Shape of return matches shape + of `b`. + + See Also + -------- + toeplitz : Toeplitz matrix + + Notes + ----- + The solution is computed using Levinson-Durbin recursion, which is faster + than generic least-squares methods, but can be less numerically stable. + + Examples + -------- + Solve the Toeplitz system T x = b, where:: + + [ 1 -1 -2 -3] [1] + T = [ 3 1 -1 -2] b = [2] + [ 6 3 1 -1] [2] + [10 6 3 1] [5] + + To specify the Toeplitz matrix, only the first column and the first + row are needed. + + >>> c = np.array([1, 3, 6, 10]) # First column of T + >>> r = np.array([1, -1, -2, -3]) # First row of T + >>> b = np.array([1, 2, 2, 5]) + + >>> from scipy.linalg import solve_toeplitz, toeplitz + >>> x = solve_toeplitz((c, r), b) + >>> x + array([ 1.66666667, -1. , -2.66666667, 2.33333333]) + + Check the result by creating the full Toeplitz matrix and + multiplying it by `x`. We should get `b`. + + >>> T = toeplitz(c, r) + >>> T.dot(x) + array([ 1., 2., 2., 5.]) + + """ + # If numerical stability of this algorithm is a problem, a future + # developer might consider implementing other O(N^2) Toeplitz solvers, + # such as GKO (https://www.jstor.org/stable/2153371) or Bareiss. + if isinstance(c_or_cr, tuple): + c, r = c_or_cr + c = _asarray_validated(c, check_finite=check_finite).ravel() + r = _asarray_validated(r, check_finite=check_finite).ravel() + else: + c = _asarray_validated(c_or_cr, check_finite=check_finite).ravel() + r = c.conjugate() + + # Form a 1D array of values to be used in the matrix, containing a reversed + # copy of r[1:], followed by c. + vals = np.concatenate((r[-1:0:-1], c)) + if b is None: + raise ValueError('illegal value, `b` is a required argument') + + b = _asarray_validated(b) + if vals.shape[0] != (2*b.shape[0] - 1): + raise ValueError('incompatible dimensions') + if np.iscomplexobj(vals) or np.iscomplexobj(b): + vals = np.asarray(vals, dtype=np.complex128, order='c') + b = np.asarray(b, dtype=np.complex128) + else: + vals = np.asarray(vals, dtype=np.double, order='c') + b = np.asarray(b, dtype=np.double) + + if b.ndim == 1: + x, _ = levinson(vals, np.ascontiguousarray(b)) + else: + b_shape = b.shape + b = b.reshape(b.shape[0], -1) + x = np.column_stack([levinson(vals, np.ascontiguousarray(b[:, i]))[0] + for i in range(b.shape[1])]) + x = x.reshape(*b_shape) + + return x + + +def _get_axis_len(aname, a, axis): + ax = axis + if ax < 0: + ax += a.ndim + if 0 <= ax < a.ndim: + return a.shape[ax] + raise ValueError("'%saxis' entry is out of bounds" % (aname,)) + + +def solve_circulant(c, b, singular='raise', tol=None, + caxis=-1, baxis=0, outaxis=0): + """Solve C x = b for x, where C is a circulant matrix. + + `C` is the circulant matrix associated with the vector `c`. + + The system is solved by doing division in Fourier space. The + calculation is:: + + x = ifft(fft(b) / fft(c)) + + where `fft` and `ifft` are the fast Fourier transform and its inverse, + respectively. For a large vector `c`, this is *much* faster than + solving the system with the full circulant matrix. + + Parameters + ---------- + c : array_like + The coefficients of the circulant matrix. + b : array_like + Right-hand side matrix in ``a x = b``. + singular : str, optional + This argument controls how a near singular circulant matrix is + handled. If `singular` is "raise" and the circulant matrix is + near singular, a `LinAlgError` is raised. If `singular` is + "lstsq", the least squares solution is returned. Default is "raise". + tol : float, optional + If any eigenvalue of the circulant matrix has an absolute value + that is less than or equal to `tol`, the matrix is considered to be + near singular. If not given, `tol` is set to:: + + tol = abs_eigs.max() * abs_eigs.size * np.finfo(np.float64).eps + + where `abs_eigs` is the array of absolute values of the eigenvalues + of the circulant matrix. + caxis : int + When `c` has dimension greater than 1, it is viewed as a collection + of circulant vectors. In this case, `caxis` is the axis of `c` that + holds the vectors of circulant coefficients. + baxis : int + When `b` has dimension greater than 1, it is viewed as a collection + of vectors. In this case, `baxis` is the axis of `b` that holds the + right-hand side vectors. + outaxis : int + When `c` or `b` are multidimensional, the value returned by + `solve_circulant` is multidimensional. In this case, `outaxis` is + the axis of the result that holds the solution vectors. + + Returns + ------- + x : ndarray + Solution to the system ``C x = b``. + + Raises + ------ + LinAlgError + If the circulant matrix associated with `c` is near singular. + + See Also + -------- + circulant : circulant matrix + + Notes + ----- + For a one-dimensional vector `c` with length `m`, and an array `b` + with shape ``(m, ...)``, + + solve_circulant(c, b) + + returns the same result as + + solve(circulant(c), b) + + where `solve` and `circulant` are from `scipy.linalg`. + + .. versionadded:: 0.16.0 + + Examples + -------- + >>> from scipy.linalg import solve_circulant, solve, circulant, lstsq + + >>> c = np.array([2, 2, 4]) + >>> b = np.array([1, 2, 3]) + >>> solve_circulant(c, b) + array([ 0.75, -0.25, 0.25]) + + Compare that result to solving the system with `scipy.linalg.solve`: + + >>> solve(circulant(c), b) + array([ 0.75, -0.25, 0.25]) + + A singular example: + + >>> c = np.array([1, 1, 0, 0]) + >>> b = np.array([1, 2, 3, 4]) + + Calling ``solve_circulant(c, b)`` will raise a `LinAlgError`. For the + least square solution, use the option ``singular='lstsq'``: + + >>> solve_circulant(c, b, singular='lstsq') + array([ 0.25, 1.25, 2.25, 1.25]) + + Compare to `scipy.linalg.lstsq`: + + >>> x, resid, rnk, s = lstsq(circulant(c), b) + >>> x + array([ 0.25, 1.25, 2.25, 1.25]) + + A broadcasting example: + + Suppose we have the vectors of two circulant matrices stored in an array + with shape (2, 5), and three `b` vectors stored in an array with shape + (3, 5). For example, + + >>> c = np.array([[1.5, 2, 3, 0, 0], [1, 1, 4, 3, 2]]) + >>> b = np.arange(15).reshape(-1, 5) + + We want to solve all combinations of circulant matrices and `b` vectors, + with the result stored in an array with shape (2, 3, 5). When we + disregard the axes of `c` and `b` that hold the vectors of coefficients, + the shapes of the collections are (2,) and (3,), respectively, which are + not compatible for broadcasting. To have a broadcast result with shape + (2, 3), we add a trivial dimension to `c`: ``c[:, np.newaxis, :]`` has + shape (2, 1, 5). The last dimension holds the coefficients of the + circulant matrices, so when we call `solve_circulant`, we can use the + default ``caxis=-1``. The coefficients of the `b` vectors are in the last + dimension of the array `b`, so we use ``baxis=-1``. If we use the + default `outaxis`, the result will have shape (5, 2, 3), so we'll use + ``outaxis=-1`` to put the solution vectors in the last dimension. + + >>> x = solve_circulant(c[:, np.newaxis, :], b, baxis=-1, outaxis=-1) + >>> x.shape + (2, 3, 5) + >>> np.set_printoptions(precision=3) # For compact output of numbers. + >>> x + array([[[-0.118, 0.22 , 1.277, -0.142, 0.302], + [ 0.651, 0.989, 2.046, 0.627, 1.072], + [ 1.42 , 1.758, 2.816, 1.396, 1.841]], + [[ 0.401, 0.304, 0.694, -0.867, 0.377], + [ 0.856, 0.758, 1.149, -0.412, 0.831], + [ 1.31 , 1.213, 1.603, 0.042, 1.286]]]) + + Check by solving one pair of `c` and `b` vectors (cf. ``x[1, 1, :]``): + + >>> solve_circulant(c[1], b[1, :]) + array([ 0.856, 0.758, 1.149, -0.412, 0.831]) + + """ + c = np.atleast_1d(c) + nc = _get_axis_len("c", c, caxis) + b = np.atleast_1d(b) + nb = _get_axis_len("b", b, baxis) + if nc != nb: + raise ValueError('Incompatible c and b axis lengths') + + fc = np.fft.fft(np.rollaxis(c, caxis, c.ndim), axis=-1) + abs_fc = np.abs(fc) + if tol is None: + # This is the same tolerance as used in np.linalg.matrix_rank. + tol = abs_fc.max(axis=-1) * nc * np.finfo(np.float64).eps + if tol.shape != (): + tol.shape = tol.shape + (1,) + else: + tol = np.atleast_1d(tol) + + near_zeros = abs_fc <= tol + is_near_singular = np.any(near_zeros) + if is_near_singular: + if singular == 'raise': + raise LinAlgError("near singular circulant matrix.") + else: + # Replace the small values with 1 to avoid errors in the + # division fb/fc below. + fc[near_zeros] = 1 + + fb = np.fft.fft(np.rollaxis(b, baxis, b.ndim), axis=-1) + + q = fb / fc + + if is_near_singular: + # `near_zeros` is a boolean array, same shape as `c`, that is + # True where `fc` is (near) zero. `q` is the broadcasted result + # of fb / fc, so to set the values of `q` to 0 where `fc` is near + # zero, we use a mask that is the broadcast result of an array + # of True values shaped like `b` with `near_zeros`. + mask = np.ones_like(b, dtype=bool) & near_zeros + q[mask] = 0 + + x = np.fft.ifft(q, axis=-1) + if not (np.iscomplexobj(c) or np.iscomplexobj(b)): + x = x.real + if outaxis != -1: + x = np.rollaxis(x, -1, outaxis) + return x + + +# matrix inversion +def inv(a, overwrite_a=False, check_finite=True): + """ + Compute the inverse of a matrix. + + Parameters + ---------- + a : array_like + Square matrix to be inverted. + overwrite_a : bool, optional + Discard data in `a` (may improve performance). Default is False. + check_finite : bool, optional + Whether to check that the input matrix contains only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + ainv : ndarray + Inverse of the matrix `a`. + + Raises + ------ + LinAlgError + If `a` is singular. + ValueError + If `a` is not square, or not 2-dimensional. + + Examples + -------- + >>> from scipy import linalg + >>> a = np.array([[1., 2.], [3., 4.]]) + >>> linalg.inv(a) + array([[-2. , 1. ], + [ 1.5, -0.5]]) + >>> np.dot(a, linalg.inv(a)) + array([[ 1., 0.], + [ 0., 1.]]) + + """ + a1 = _asarray_validated(a, check_finite=check_finite) + if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]: + raise ValueError('expected square matrix') + overwrite_a = overwrite_a or _datacopied(a1, a) + # XXX: I found no advantage or disadvantage of using finv. +# finv, = get_flinalg_funcs(('inv',),(a1,)) +# if finv is not None: +# a_inv,info = finv(a1,overwrite_a=overwrite_a) +# if info==0: +# return a_inv +# if info>0: raise LinAlgError, "singular matrix" +# if info<0: raise ValueError('illegal value in %d-th argument of ' +# 'internal inv.getrf|getri'%(-info)) + getrf, getri, getri_lwork = get_lapack_funcs(('getrf', 'getri', + 'getri_lwork'), + (a1,)) + lu, piv, info = getrf(a1, overwrite_a=overwrite_a) + if info == 0: + lwork = _compute_lwork(getri_lwork, a1.shape[0]) + + # XXX: the following line fixes curious SEGFAULT when + # benchmarking 500x500 matrix inverse. This seems to + # be a bug in LAPACK ?getri routine because if lwork is + # minimal (when using lwork[0] instead of lwork[1]) then + # all tests pass. Further investigation is required if + # more such SEGFAULTs occur. + lwork = int(1.01 * lwork) + inv_a, info = getri(lu, piv, lwork=lwork, overwrite_lu=1) + if info > 0: + raise LinAlgError("singular matrix") + if info < 0: + raise ValueError('illegal value in %d-th argument of internal ' + 'getrf|getri' % -info) + return inv_a + + +# Determinant + +def det(a, overwrite_a=False, check_finite=True): + """ + Compute the determinant of a matrix + + The determinant of a square matrix is a value derived arithmetically + from the coefficients of the matrix. + + The determinant for a 3x3 matrix, for example, is computed as follows:: + + a b c + d e f = A + g h i + + det(A) = a*e*i + b*f*g + c*d*h - c*e*g - b*d*i - a*f*h + + Parameters + ---------- + a : (M, M) array_like + A square matrix. + overwrite_a : bool, optional + Allow overwriting data in a (may enhance performance). + check_finite : bool, optional + Whether to check that the input matrix contains only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + det : float or complex + Determinant of `a`. + + Notes + ----- + The determinant is computed via LU factorization, LAPACK routine z/dgetrf. + + Examples + -------- + >>> from scipy import linalg + >>> a = np.array([[1,2,3], [4,5,6], [7,8,9]]) + >>> linalg.det(a) + 0.0 + >>> a = np.array([[0,2,3], [4,5,6], [7,8,9]]) + >>> linalg.det(a) + 3.0 + + """ + a1 = _asarray_validated(a, check_finite=check_finite) + if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]: + raise ValueError('expected square matrix') + overwrite_a = overwrite_a or _datacopied(a1, a) + fdet, = get_flinalg_funcs(('det',), (a1,)) + a_det, info = fdet(a1, overwrite_a=overwrite_a) + if info < 0: + raise ValueError('illegal value in %d-th argument of internal ' + 'det.getrf' % -info) + return a_det + +# Linear Least Squares + + +class LstsqLapackError(LinAlgError): + pass + + +def lstsq(a, b, cond=None, overwrite_a=False, overwrite_b=False, + check_finite=True, lapack_driver=None): + """ + Compute least-squares solution to equation Ax = b. + + Compute a vector x such that the 2-norm ``|b - A x|`` is minimized. + + Parameters + ---------- + a : (M, N) array_like + Left hand side matrix (2-D array). + b : (M,) or (M, K) array_like + Right hand side matrix or vector (1-D or 2-D array). + cond : float, optional + Cutoff for 'small' singular values; used to determine effective + rank of a. Singular values smaller than + ``rcond * largest_singular_value`` are considered zero. + overwrite_a : bool, optional + Discard data in `a` (may enhance performance). Default is False. + overwrite_b : bool, optional + Discard data in `b` (may enhance performance). Default is False. + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + lapack_driver : str, optional + Which LAPACK driver is used to solve the least-squares problem. + Options are ``'gelsd'``, ``'gelsy'``, ``'gelss'``. Default + (``'gelsd'``) is a good choice. However, ``'gelsy'`` can be slightly + faster on many problems. ``'gelss'`` was used historically. It is + generally slow but uses less memory. + + .. versionadded:: 0.17.0 + + Returns + ------- + x : (N,) or (N, K) ndarray + Least-squares solution. Return shape matches shape of `b`. + residues : (0,) or () or (K,) ndarray + Sums of residues, squared 2-norm for each column in ``b - a x``. + If rank of matrix a is ``< N`` or ``N > M``, or ``'gelsy'`` is used, + this is a length zero array. If b was 1-D, this is a () shape array + (numpy scalar), otherwise the shape is (K,). + rank : int + Effective rank of matrix `a`. + s : (min(M,N),) ndarray or None + Singular values of `a`. The condition number of a is + ``abs(s[0] / s[-1])``. None is returned when ``'gelsy'`` is used. + + Raises + ------ + LinAlgError + If computation does not converge. + + ValueError + When parameters are wrong. + + See Also + -------- + optimize.nnls : linear least squares with non-negativity constraint + + Examples + -------- + >>> from scipy.linalg import lstsq + >>> import matplotlib.pyplot as plt + + Suppose we have the following data: + + >>> x = np.array([1, 2.5, 3.5, 4, 5, 7, 8.5]) + >>> y = np.array([0.3, 1.1, 1.5, 2.0, 3.2, 6.6, 8.6]) + + We want to fit a quadratic polynomial of the form ``y = a + b*x**2`` + to this data. We first form the "design matrix" M, with a constant + column of 1s and a column containing ``x**2``: + + >>> M = x[:, np.newaxis]**[0, 2] + >>> M + array([[ 1. , 1. ], + [ 1. , 6.25], + [ 1. , 12.25], + [ 1. , 16. ], + [ 1. , 25. ], + [ 1. , 49. ], + [ 1. , 72.25]]) + + We want to find the least-squares solution to ``M.dot(p) = y``, + where ``p`` is a vector with length 2 that holds the parameters + ``a`` and ``b``. + + >>> p, res, rnk, s = lstsq(M, y) + >>> p + array([ 0.20925829, 0.12013861]) + + Plot the data and the fitted curve. + + >>> plt.plot(x, y, 'o', label='data') + >>> xx = np.linspace(0, 9, 101) + >>> yy = p[0] + p[1]*xx**2 + >>> plt.plot(xx, yy, label='least squares fit, $y = a + bx^2$') + >>> plt.xlabel('x') + >>> plt.ylabel('y') + >>> plt.legend(framealpha=1, shadow=True) + >>> plt.grid(alpha=0.25) + >>> plt.show() + + """ + a1 = _asarray_validated(a, check_finite=check_finite) + b1 = _asarray_validated(b, check_finite=check_finite) + if len(a1.shape) != 2: + raise ValueError('expected matrix') + m, n = a1.shape + if len(b1.shape) == 2: + nrhs = b1.shape[1] + else: + nrhs = 1 + if m != b1.shape[0]: + raise ValueError('incompatible dimensions') + if m == 0 or n == 0: # Zero-sized problem, confuses LAPACK + x = np.zeros((n,) + b1.shape[1:], dtype=np.common_type(a1, b1)) + if n == 0: + residues = np.linalg.norm(b1, axis=0)**2 + else: + residues = np.empty((0,)) + return x, residues, 0, np.empty((0,)) + + driver = lapack_driver + if driver is None: + driver = lstsq.default_lapack_driver + if driver not in ('gelsd', 'gelsy', 'gelss'): + raise ValueError('LAPACK driver "%s" is not found' % driver) + + lapack_func, lapack_lwork = get_lapack_funcs((driver, + '%s_lwork' % driver), + (a1, b1)) + real_data = True if (lapack_func.dtype.kind == 'f') else False + + if m < n: + # need to extend b matrix as it will be filled with + # a larger solution matrix + if len(b1.shape) == 2: + b2 = np.zeros((n, nrhs), dtype=lapack_func.dtype) + b2[:m, :] = b1 + else: + b2 = np.zeros(n, dtype=lapack_func.dtype) + b2[:m] = b1 + b1 = b2 + + overwrite_a = overwrite_a or _datacopied(a1, a) + overwrite_b = overwrite_b or _datacopied(b1, b) + + if cond is None: + cond = np.finfo(lapack_func.dtype).eps + + if driver in ('gelss', 'gelsd'): + if driver == 'gelss': + lwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond) + v, x, s, rank, work, info = lapack_func(a1, b1, cond, lwork, + overwrite_a=overwrite_a, + overwrite_b=overwrite_b) + + elif driver == 'gelsd': + if real_data: + lwork, iwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond) + if iwork == 0: + # this is LAPACK bug 0038: dgelsd does not provide the + # size of the iwork array in query mode. This bug was + # fixed in LAPACK 3.2.2, released July 21, 2010. + mesg = ("internal gelsd driver lwork query error, " + "required iwork dimension not returned. " + "This is likely the result of LAPACK bug " + "0038, fixed in LAPACK 3.2.2 (released " + "July 21, 2010). ") + + if lapack_driver is None: + # restart with gelss + lstsq.default_lapack_driver = 'gelss' + mesg += "Falling back to 'gelss' driver." + warn(mesg, RuntimeWarning, stacklevel=2) + return lstsq(a, b, cond, overwrite_a, overwrite_b, + check_finite, lapack_driver='gelss') + + # can't proceed, bail out + mesg += ("Use a different lapack_driver when calling lstsq" + " or upgrade LAPACK.") + raise LstsqLapackError(mesg) + + x, s, rank, info = lapack_func(a1, b1, lwork, + iwork, cond, False, False) + else: # complex data + lwork, rwork, iwork = _compute_lwork(lapack_lwork, m, n, + nrhs, cond) + x, s, rank, info = lapack_func(a1, b1, lwork, rwork, iwork, + cond, False, False) + if info > 0: + raise LinAlgError("SVD did not converge in Linear Least Squares") + if info < 0: + raise ValueError('illegal value in %d-th argument of internal %s' + % (-info, lapack_driver)) + resids = np.asarray([], dtype=x.dtype) + if m > n: + x1 = x[:n] + if rank == n: + resids = np.sum(np.abs(x[n:])**2, axis=0) + x = x1 + return x, resids, rank, s + + elif driver == 'gelsy': + lwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond) + jptv = np.zeros((a1.shape[1], 1), dtype=np.int32) + v, x, j, rank, info = lapack_func(a1, b1, jptv, cond, + lwork, False, False) + if info < 0: + raise ValueError("illegal value in %d-th argument of internal " + "gelsy" % -info) + if m > n: + x1 = x[:n] + x = x1 + return x, np.array([], x.dtype), rank, None + + +lstsq.default_lapack_driver = 'gelsd' + + +def pinv(a, cond=None, rcond=None, return_rank=False, check_finite=True): + """ + Compute the (Moore-Penrose) pseudo-inverse of a matrix. + + Calculate a generalized inverse of a matrix using a least-squares + solver. + + Parameters + ---------- + a : (M, N) array_like + Matrix to be pseudo-inverted. + cond, rcond : float, optional + Cutoff for 'small' singular values in the least-squares solver. + Singular values smaller than ``rcond * largest_singular_value`` + are considered zero. + return_rank : bool, optional + if True, return the effective rank of the matrix + check_finite : bool, optional + Whether to check that the input matrix contains only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + B : (N, M) ndarray + The pseudo-inverse of matrix `a`. + rank : int + The effective rank of the matrix. Returned if return_rank == True + + Raises + ------ + LinAlgError + If computation does not converge. + + Examples + -------- + >>> from scipy import linalg + >>> a = np.random.randn(9, 6) + >>> B = linalg.pinv(a) + >>> np.allclose(a, np.dot(a, np.dot(B, a))) + True + >>> np.allclose(B, np.dot(B, np.dot(a, B))) + True + + """ + a = _asarray_validated(a, check_finite=check_finite) + b = np.identity(a.shape[0], dtype=a.dtype) + if rcond is not None: + cond = rcond + + x, resids, rank, s = lstsq(a, b, cond=cond, check_finite=False) + + if return_rank: + return x, rank + else: + return x + + +def pinv2(a, cond=None, rcond=None, return_rank=False, check_finite=True): + """ + Compute the (Moore-Penrose) pseudo-inverse of a matrix. + + Calculate a generalized inverse of a matrix using its + singular-value decomposition and including all 'large' singular + values. + + Parameters + ---------- + a : (M, N) array_like + Matrix to be pseudo-inverted. + cond, rcond : float or None + Cutoff for 'small' singular values. + Singular values smaller than ``rcond*largest_singular_value`` + are considered zero. + If None or -1, suitable machine precision is used. + return_rank : bool, optional + if True, return the effective rank of the matrix + check_finite : bool, optional + Whether to check that the input matrix contains only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + B : (N, M) ndarray + The pseudo-inverse of matrix `a`. + rank : int + The effective rank of the matrix. Returned if return_rank == True + + Raises + ------ + LinAlgError + If SVD computation does not converge. + + Examples + -------- + >>> from scipy import linalg + >>> a = np.random.randn(9, 6) + >>> B = linalg.pinv2(a) + >>> np.allclose(a, np.dot(a, np.dot(B, a))) + True + >>> np.allclose(B, np.dot(B, np.dot(a, B))) + True + + """ + a = _asarray_validated(a, check_finite=check_finite) + u, s, vh = decomp_svd.svd(a, full_matrices=False, check_finite=False) + + if rcond is not None: + cond = rcond + if cond in [None, -1]: + t = u.dtype.char.lower() + factor = {'f': 1E3, 'd': 1E6} + cond = factor[t] * np.finfo(t).eps + + rank = np.sum(s > cond * np.max(s)) + + u = u[:, :rank] + u /= s[:rank] + B = np.transpose(np.conjugate(np.dot(u, vh[:rank]))) + + if return_rank: + return B, rank + else: + return B + + +def pinvh(a, cond=None, rcond=None, lower=True, return_rank=False, + check_finite=True): + """ + Compute the (Moore-Penrose) pseudo-inverse of a Hermitian matrix. + + Calculate a generalized inverse of a Hermitian or real symmetric matrix + using its eigenvalue decomposition and including all eigenvalues with + 'large' absolute value. + + Parameters + ---------- + a : (N, N) array_like + Real symmetric or complex hermetian matrix to be pseudo-inverted + cond, rcond : float or None + Cutoff for 'small' eigenvalues. + Singular values smaller than rcond * largest_eigenvalue are considered + zero. + + If None or -1, suitable machine precision is used. + lower : bool, optional + Whether the pertinent array data is taken from the lower or upper + triangle of a. (Default: lower) + return_rank : bool, optional + if True, return the effective rank of the matrix + check_finite : bool, optional + Whether to check that the input matrix contains only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + B : (N, N) ndarray + The pseudo-inverse of matrix `a`. + rank : int + The effective rank of the matrix. Returned if return_rank == True + + Raises + ------ + LinAlgError + If eigenvalue does not converge + + Examples + -------- + >>> from scipy.linalg import pinvh + >>> a = np.random.randn(9, 6) + >>> a = np.dot(a, a.T) + >>> B = pinvh(a) + >>> np.allclose(a, np.dot(a, np.dot(B, a))) + True + >>> np.allclose(B, np.dot(B, np.dot(a, B))) + True + + """ + a = _asarray_validated(a, check_finite=check_finite) + s, u = decomp.eigh(a, lower=lower, check_finite=False) + + if rcond is not None: + cond = rcond + if cond in [None, -1]: + t = u.dtype.char.lower() + factor = {'f': 1E3, 'd': 1E6} + cond = factor[t] * np.finfo(t).eps + + # For Hermitian matrices, singular values equal abs(eigenvalues) + above_cutoff = (abs(s) > cond * np.max(abs(s))) + psigma_diag = 1.0 / s[above_cutoff] + u = u[:, above_cutoff] + + B = np.dot(u * psigma_diag, np.conjugate(u).T) + + if return_rank: + return B, len(psigma_diag) + else: + return B + + +def matrix_balance(A, permute=True, scale=True, separate=False, + overwrite_a=False): + """ + Compute a diagonal similarity transformation for row/column balancing. + + The balancing tries to equalize the row and column 1-norms by applying + a similarity transformation such that the magnitude variation of the + matrix entries is reflected to the scaling matrices. + + Moreover, if enabled, the matrix is first permuted to isolate the upper + triangular parts of the matrix and, again if scaling is also enabled, + only the remaining subblocks are subjected to scaling. + + The balanced matrix satisfies the following equality + + .. math:: + + B = T^{-1} A T + + The scaling coefficients are approximated to the nearest power of 2 + to avoid round-off errors. + + Parameters + ---------- + A : (n, n) array_like + Square data matrix for the balancing. + permute : bool, optional + The selector to define whether permutation of A is also performed + prior to scaling. + scale : bool, optional + The selector to turn on and off the scaling. If False, the matrix + will not be scaled. + separate : bool, optional + This switches from returning a full matrix of the transformation + to a tuple of two separate 1D permutation and scaling arrays. + overwrite_a : bool, optional + This is passed to xGEBAL directly. Essentially, overwrites the result + to the data. It might increase the space efficiency. See LAPACK manual + for details. This is False by default. + + Returns + ------- + B : (n, n) ndarray + Balanced matrix + T : (n, n) ndarray + A possibly permuted diagonal matrix whose nonzero entries are + integer powers of 2 to avoid numerical truncation errors. + scale, perm : (n,) ndarray + If ``separate`` keyword is set to True then instead of the array + ``T`` above, the scaling and the permutation vectors are given + separately as a tuple without allocating the full array ``T``. + + Notes + ----- + + This algorithm is particularly useful for eigenvalue and matrix + decompositions and in many cases it is already called by various + LAPACK routines. + + The algorithm is based on the well-known technique of [1]_ and has + been modified to account for special cases. See [2]_ for details + which have been implemented since LAPACK v3.5.0. Before this version + there are corner cases where balancing can actually worsen the + conditioning. See [3]_ for such examples. + + The code is a wrapper around LAPACK's xGEBAL routine family for matrix + balancing. + + .. versionadded:: 0.19.0 + + Examples + -------- + >>> from scipy import linalg + >>> x = np.array([[1,2,0], [9,1,0.01], [1,2,10*np.pi]]) + + >>> y, permscale = linalg.matrix_balance(x) + >>> np.abs(x).sum(axis=0) / np.abs(x).sum(axis=1) + array([ 3.66666667, 0.4995005 , 0.91312162]) + + >>> np.abs(y).sum(axis=0) / np.abs(y).sum(axis=1) + array([ 1.2 , 1.27041742, 0.92658316]) # may vary + + >>> permscale # only powers of 2 (0.5 == 2^(-1)) + array([[ 0.5, 0. , 0. ], # may vary + [ 0. , 1. , 0. ], + [ 0. , 0. , 1. ]]) + + References + ---------- + .. [1] : B.N. Parlett and C. Reinsch, "Balancing a Matrix for + Calculation of Eigenvalues and Eigenvectors", Numerische Mathematik, + Vol.13(4), 1969, DOI:10.1007/BF02165404 + + .. [2] : R. James, J. Langou, B.R. Lowery, "On matrix balancing and + eigenvector computation", 2014, Available online: + https://arxiv.org/abs/1401.5766 + + .. [3] : D.S. Watkins. A case where balancing is harmful. + Electron. Trans. Numer. Anal, Vol.23, 2006. + + """ + + A = np.atleast_2d(_asarray_validated(A, check_finite=True)) + + if not np.equal(*A.shape): + raise ValueError('The data matrix for balancing should be square.') + + gebal = get_lapack_funcs(('gebal'), (A,)) + B, lo, hi, ps, info = gebal(A, scale=scale, permute=permute, + overwrite_a=overwrite_a) + + if info < 0: + raise ValueError('xGEBAL exited with the internal error ' + '"illegal value in argument number {}.". See ' + 'LAPACK documentation for the xGEBAL error codes.' + ''.format(-info)) + + # Separate the permutations from the scalings and then convert to int + scaling = np.ones_like(ps, dtype=float) + scaling[lo:hi+1] = ps[lo:hi+1] + + # gebal uses 1-indexing + ps = ps.astype(int, copy=False) - 1 + n = A.shape[0] + perm = np.arange(n) + + # LAPACK permutes with the ordering n --> hi, then 0--> lo + if hi < n: + for ind, x in enumerate(ps[hi+1:][::-1], 1): + if n-ind == x: + continue + perm[[x, n-ind]] = perm[[n-ind, x]] + + if lo > 0: + for ind, x in enumerate(ps[:lo]): + if ind == x: + continue + perm[[x, ind]] = perm[[ind, x]] + + if separate: + return B, (scaling, perm) + + # get the inverse permutation + iperm = np.empty_like(perm) + iperm[perm] = np.arange(n) + + return B, np.diag(scaling)[iperm, :] diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/basic.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/basic.pyc new file mode 100644 index 0000000..6408c15 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/basic.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/blas.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/blas.py new file mode 100644 index 0000000..6462db9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/blas.py @@ -0,0 +1,395 @@ +""" +Low-level BLAS functions (:mod:`scipy.linalg.blas`) +=================================================== + +This module contains low-level functions from the BLAS library. + +.. versionadded:: 0.12.0 + +.. note:: + + The common ``overwrite_<>`` option in many routines, allows the + input arrays to be overwritten to avoid extra memory allocation. + However this requires the array to satisfy two conditions + which are memory order and the data type to match exactly the + order and the type expected by the routine. + + As an example, if you pass a double precision float array to any + ``S....`` routine which expects single precision arguments, f2py + will create an intermediate array to match the argument types and + overwriting will be performed on that intermediate array. + + Similarly, if a C-contiguous array is passed, f2py will pass a + FORTRAN-contiguous array internally. Please make sure that these + details are satisfied. More information can be found in the f2py + documentation. + +.. warning:: + + These functions do little to no error checking. + It is possible to cause crashes by mis-using them, + so prefer using the higher-level routines in `scipy.linalg`. + +Finding functions +----------------- + +.. autosummary:: + :toctree: generated/ + + get_blas_funcs + find_best_blas_type + +BLAS Level 1 functions +---------------------- + +.. autosummary:: + :toctree: generated/ + + caxpy + ccopy + cdotc + cdotu + crotg + cscal + csrot + csscal + cswap + dasum + daxpy + dcopy + ddot + dnrm2 + drot + drotg + drotm + drotmg + dscal + dswap + dzasum + dznrm2 + icamax + idamax + isamax + izamax + sasum + saxpy + scasum + scnrm2 + scopy + sdot + snrm2 + srot + srotg + srotm + srotmg + sscal + sswap + zaxpy + zcopy + zdotc + zdotu + zdrot + zdscal + zrotg + zscal + zswap + +BLAS Level 2 functions +---------------------- + +.. autosummary:: + :toctree: generated/ + + sgbmv + sgemv + sger + ssbmv + sspr + sspr2 + ssymv + ssyr + ssyr2 + stbmv + stpsv + strmv + strsv + dgbmv + dgemv + dger + dsbmv + dspr + dspr2 + dsymv + dsyr + dsyr2 + dtbmv + dtpsv + dtrmv + dtrsv + cgbmv + cgemv + cgerc + cgeru + chbmv + chemv + cher + cher2 + chpmv + chpr + chpr2 + ctbmv + ctbsv + ctpmv + ctpsv + ctrmv + ctrsv + csyr + zgbmv + zgemv + zgerc + zgeru + zhbmv + zhemv + zher + zher2 + zhpmv + zhpr + zhpr2 + ztbmv + ztbsv + ztpmv + ztrmv + ztrsv + zsyr + +BLAS Level 3 functions +---------------------- + +.. autosummary:: + :toctree: generated/ + + sgemm + ssymm + ssyr2k + ssyrk + strmm + strsm + dgemm + dsymm + dsyr2k + dsyrk + dtrmm + dtrsm + cgemm + chemm + cher2k + cherk + csymm + csyr2k + csyrk + ctrmm + ctrsm + zgemm + zhemm + zher2k + zherk + zsymm + zsyr2k + zsyrk + ztrmm + ztrsm + +""" +# +# Author: Pearu Peterson, March 2002 +# refactoring by Fabian Pedregosa, March 2010 +# + +from __future__ import division, print_function, absolute_import + +__all__ = ['get_blas_funcs', 'find_best_blas_type'] + +import numpy as _np + +from scipy.linalg import _fblas +try: + from scipy.linalg import _cblas +except ImportError: + _cblas = None + +# Expose all functions (only fblas --- cblas is an implementation detail) +empty_module = None +from scipy.linalg._fblas import * +del empty_module + +# 'd' will be default for 'i',.. +_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z', 'G': 'z'} + +# some convenience alias for complex functions +_blas_alias = {'cnrm2': 'scnrm2', 'znrm2': 'dznrm2', + 'cdot': 'cdotc', 'zdot': 'zdotc', + 'cger': 'cgerc', 'zger': 'zgerc', + 'sdotc': 'sdot', 'sdotu': 'sdot', + 'ddotc': 'ddot', 'ddotu': 'ddot'} + + +def find_best_blas_type(arrays=(), dtype=None): + """Find best-matching BLAS/LAPACK type. + + Arrays are used to determine the optimal prefix of BLAS routines. + + Parameters + ---------- + arrays : sequence of ndarrays, optional + Arrays can be given to determine optimal prefix of BLAS + routines. If not given, double-precision routines will be + used, otherwise the most generic type in arrays will be used. + dtype : str or dtype, optional + Data-type specifier. Not used if `arrays` is non-empty. + + Returns + ------- + prefix : str + BLAS/LAPACK prefix character. + dtype : dtype + Inferred Numpy data type. + prefer_fortran : bool + Whether to prefer Fortran order routines over C order. + + Examples + -------- + >>> import scipy.linalg.blas as bla + >>> a = np.random.rand(10,15) + >>> b = np.asfortranarray(a) # Change the memory layout order + >>> bla.find_best_blas_type((a,)) + ('d', dtype('float64'), False) + >>> bla.find_best_blas_type((a*1j,)) + ('z', dtype('complex128'), False) + >>> bla.find_best_blas_type((b,)) + ('d', dtype('float64'), True) + + """ + dtype = _np.dtype(dtype) + prefer_fortran = False + + if arrays: + # use the most generic type in arrays + dtypes = [ar.dtype for ar in arrays] + dtype = _np.find_common_type(dtypes, ()) + try: + index = dtypes.index(dtype) + except ValueError: + index = 0 + if arrays[index].flags['FORTRAN']: + # prefer Fortran for leading array with column major order + prefer_fortran = True + + prefix = _type_conv.get(dtype.char, 'd') + if dtype.char == 'G': + # complex256 -> complex128 (i.e., C long double -> C double) + dtype = _np.dtype('D') + elif dtype.char not in 'fdFD': + dtype = _np.dtype('d') + + return prefix, dtype, prefer_fortran + + +def _get_funcs(names, arrays, dtype, + lib_name, fmodule, cmodule, + fmodule_name, cmodule_name, alias): + """ + Return available BLAS/LAPACK functions. + + Used also in lapack.py. See get_blas_funcs for docstring. + """ + + funcs = [] + unpack = False + dtype = _np.dtype(dtype) + module1 = (cmodule, cmodule_name) + module2 = (fmodule, fmodule_name) + + if isinstance(names, str): + names = (names,) + unpack = True + + prefix, dtype, prefer_fortran = find_best_blas_type(arrays, dtype) + + if prefer_fortran: + module1, module2 = module2, module1 + + for i, name in enumerate(names): + func_name = prefix + name + func_name = alias.get(func_name, func_name) + func = getattr(module1[0], func_name, None) + module_name = module1[1] + if func is None: + func = getattr(module2[0], func_name, None) + module_name = module2[1] + if func is None: + raise ValueError( + '%s function %s could not be found' % (lib_name, func_name)) + func.module_name, func.typecode = module_name, prefix + func.dtype = dtype + func.prefix = prefix # Backward compatibility + funcs.append(func) + + if unpack: + return funcs[0] + else: + return funcs + + +def get_blas_funcs(names, arrays=(), dtype=None): + """Return available BLAS function objects from names. + + Arrays are used to determine the optimal prefix of BLAS routines. + + Parameters + ---------- + names : str or sequence of str + Name(s) of BLAS functions without type prefix. + + arrays : sequence of ndarrays, optional + Arrays can be given to determine optimal prefix of BLAS + routines. If not given, double-precision routines will be + used, otherwise the most generic type in arrays will be used. + + dtype : str or dtype, optional + Data-type specifier. Not used if `arrays` is non-empty. + + + Returns + ------- + funcs : list + List containing the found function(s). + + + Notes + ----- + This routine automatically chooses between Fortran/C + interfaces. Fortran code is used whenever possible for arrays with + column major order. In all other cases, C code is preferred. + + In BLAS, the naming convention is that all functions start with a + type prefix, which depends on the type of the principal + matrix. These can be one of {'s', 'd', 'c', 'z'} for the numpy + types {float32, float64, complex64, complex128} respectively. + The code and the dtype are stored in attributes `typecode` and `dtype` + of the returned functions. + + Examples + -------- + >>> import scipy.linalg as LA + >>> a = np.random.rand(3,2) + >>> x_gemv = LA.get_blas_funcs('gemv', (a,)) + >>> x_gemv.typecode + 'd' + >>> x_gemv = LA.get_blas_funcs('gemv',(a*1j,)) + >>> x_gemv.typecode + 'z' + + """ + return _get_funcs(names, arrays, dtype, + "BLAS", _fblas, _cblas, "fblas", "cblas", + _blas_alias) diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/blas.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/blas.pyc new file mode 100644 index 0000000..c4b9e55 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/blas.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/cython_blas.pxd b/project/venv/lib/python2.7/site-packages/scipy/linalg/cython_blas.pxd new file mode 100644 index 0000000..e30c9e2 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/cython_blas.pxd @@ -0,0 +1,314 @@ +# This file was generated by _generate_pyx.py. +# Do not edit this file directly. + +# Within scipy, these wrappers can be used via relative or absolute cimport. +# Examples: +# from ..linalg cimport cython_blas +# from scipy.linalg cimport cython_blas +# cimport scipy.linalg.cython_blas as cython_blas +# cimport ..linalg.cython_blas as cython_blas + +# Within scipy, if BLAS functions are needed in C/C++/Fortran, +# these wrappers should not be used. +# The original libraries should be linked directly. + +ctypedef float s +ctypedef double d +ctypedef float complex c +ctypedef double complex z + +cdef void caxpy(int *n, c *ca, c *cx, int *incx, c *cy, int *incy) nogil + +cdef void ccopy(int *n, c *cx, int *incx, c *cy, int *incy) nogil + +cdef c cdotc(int *n, c *cx, int *incx, c *cy, int *incy) nogil + +cdef c cdotu(int *n, c *cx, int *incx, c *cy, int *incy) nogil + +cdef void cgbmv(char *trans, int *m, int *n, int *kl, int *ku, c *alpha, c *a, int *lda, c *x, int *incx, c *beta, c *y, int *incy) nogil + +cdef void cgemm(char *transa, char *transb, int *m, int *n, int *k, c *alpha, c *a, int *lda, c *b, int *ldb, c *beta, c *c, int *ldc) nogil + +cdef void cgemv(char *trans, int *m, int *n, c *alpha, c *a, int *lda, c *x, int *incx, c *beta, c *y, int *incy) nogil + +cdef void cgerc(int *m, int *n, c *alpha, c *x, int *incx, c *y, int *incy, c *a, int *lda) nogil + +cdef void cgeru(int *m, int *n, c *alpha, c *x, int *incx, c *y, int *incy, c *a, int *lda) nogil + +cdef void chbmv(char *uplo, int *n, int *k, c *alpha, c *a, int *lda, c *x, int *incx, c *beta, c *y, int *incy) nogil + +cdef void chemm(char *side, char *uplo, int *m, int *n, c *alpha, c *a, int *lda, c *b, int *ldb, c *beta, c *c, int *ldc) nogil + +cdef void chemv(char *uplo, int *n, c *alpha, c *a, int *lda, c *x, int *incx, c *beta, c *y, int *incy) nogil + +cdef void cher(char *uplo, int *n, s *alpha, c *x, int *incx, c *a, int *lda) nogil + +cdef void cher2(char *uplo, int *n, c *alpha, c *x, int *incx, c *y, int *incy, c *a, int *lda) nogil + +cdef void cher2k(char *uplo, char *trans, int *n, int *k, c *alpha, c *a, int *lda, c *b, int *ldb, s *beta, c *c, int *ldc) nogil + +cdef void cherk(char *uplo, char *trans, int *n, int *k, s *alpha, c *a, int *lda, s *beta, c *c, int *ldc) nogil + +cdef void chpmv(char *uplo, int *n, c *alpha, c *ap, c *x, int *incx, c *beta, c *y, int *incy) nogil + +cdef void chpr(char *uplo, int *n, s *alpha, c *x, int *incx, c *ap) nogil + +cdef void chpr2(char *uplo, int *n, c *alpha, c *x, int *incx, c *y, int *incy, c *ap) nogil + +cdef void crotg(c *ca, c *cb, s *c, c *s) nogil + +cdef void cscal(int *n, c *ca, c *cx, int *incx) nogil + +cdef void csrot(int *n, c *cx, int *incx, c *cy, int *incy, s *c, s *s) nogil + +cdef void csscal(int *n, s *sa, c *cx, int *incx) nogil + +cdef void cswap(int *n, c *cx, int *incx, c *cy, int *incy) nogil + +cdef void csymm(char *side, char *uplo, int *m, int *n, c *alpha, c *a, int *lda, c *b, int *ldb, c *beta, c *c, int *ldc) nogil + +cdef void csyr2k(char *uplo, char *trans, int *n, int *k, c *alpha, c *a, int *lda, c *b, int *ldb, c *beta, c *c, int *ldc) nogil + +cdef void csyrk(char *uplo, char *trans, int *n, int *k, c *alpha, c *a, int *lda, c *beta, c *c, int *ldc) nogil + +cdef void ctbmv(char *uplo, char *trans, char *diag, int *n, int *k, c *a, int *lda, c *x, int *incx) nogil + +cdef void ctbsv(char *uplo, char *trans, char *diag, int *n, int *k, c *a, int *lda, c *x, int *incx) nogil + +cdef void ctpmv(char *uplo, char *trans, char *diag, int *n, c *ap, c *x, int *incx) nogil + +cdef void ctpsv(char *uplo, char *trans, char *diag, int *n, c *ap, c *x, int *incx) nogil + +cdef void ctrmm(char *side, char *uplo, char *transa, char *diag, int *m, int *n, c *alpha, c *a, int *lda, c *b, int *ldb) nogil + +cdef void ctrmv(char *uplo, char *trans, char *diag, int *n, c *a, int *lda, c *x, int *incx) nogil + +cdef void ctrsm(char *side, char *uplo, char *transa, char *diag, int *m, int *n, c *alpha, c *a, int *lda, c *b, int *ldb) nogil + +cdef void ctrsv(char *uplo, char *trans, char *diag, int *n, c *a, int *lda, c *x, int *incx) nogil + +cdef d dasum(int *n, d *dx, int *incx) nogil + +cdef void daxpy(int *n, d *da, d *dx, int *incx, d *dy, int *incy) nogil + +cdef d dcabs1(z *z) nogil + +cdef void dcopy(int *n, d *dx, int *incx, d *dy, int *incy) nogil + +cdef d ddot(int *n, d *dx, int *incx, d *dy, int *incy) nogil + +cdef void dgbmv(char *trans, int *m, int *n, int *kl, int *ku, d *alpha, d *a, int *lda, d *x, int *incx, d *beta, d *y, int *incy) nogil + +cdef void dgemm(char *transa, char *transb, int *m, int *n, int *k, d *alpha, d *a, int *lda, d *b, int *ldb, d *beta, d *c, int *ldc) nogil + +cdef void dgemv(char *trans, int *m, int *n, d *alpha, d *a, int *lda, d *x, int *incx, d *beta, d *y, int *incy) nogil + +cdef void dger(int *m, int *n, d *alpha, d *x, int *incx, d *y, int *incy, d *a, int *lda) nogil + +cdef d dnrm2(int *n, d *x, int *incx) nogil + +cdef void drot(int *n, d *dx, int *incx, d *dy, int *incy, d *c, d *s) nogil + +cdef void drotg(d *da, d *db, d *c, d *s) nogil + +cdef void drotm(int *n, d *dx, int *incx, d *dy, int *incy, d *dparam) nogil + +cdef void drotmg(d *dd1, d *dd2, d *dx1, d *dy1, d *dparam) nogil + +cdef void dsbmv(char *uplo, int *n, int *k, d *alpha, d *a, int *lda, d *x, int *incx, d *beta, d *y, int *incy) nogil + +cdef void dscal(int *n, d *da, d *dx, int *incx) nogil + +cdef d dsdot(int *n, s *sx, int *incx, s *sy, int *incy) nogil + +cdef void dspmv(char *uplo, int *n, d *alpha, d *ap, d *x, int *incx, d *beta, d *y, int *incy) nogil + +cdef void dspr(char *uplo, int *n, d *alpha, d *x, int *incx, d *ap) nogil + +cdef void dspr2(char *uplo, int *n, d *alpha, d *x, int *incx, d *y, int *incy, d *ap) nogil + +cdef void dswap(int *n, d *dx, int *incx, d *dy, int *incy) nogil + +cdef void dsymm(char *side, char *uplo, int *m, int *n, d *alpha, d *a, int *lda, d *b, int *ldb, d *beta, d *c, int *ldc) nogil + +cdef void dsymv(char *uplo, int *n, d *alpha, d *a, int *lda, d *x, int *incx, d *beta, d *y, int *incy) nogil + +cdef void dsyr(char *uplo, int *n, d *alpha, d *x, int *incx, d *a, int *lda) nogil + +cdef void dsyr2(char *uplo, int *n, d *alpha, d *x, int *incx, d *y, int *incy, d *a, int *lda) nogil + +cdef void dsyr2k(char *uplo, char *trans, int *n, int *k, d *alpha, d *a, int *lda, d *b, int *ldb, d *beta, d *c, int *ldc) nogil + +cdef void dsyrk(char *uplo, char *trans, int *n, int *k, d *alpha, d *a, int *lda, d *beta, d *c, int *ldc) nogil + +cdef void dtbmv(char *uplo, char *trans, char *diag, int *n, int *k, d *a, int *lda, d *x, int *incx) nogil + +cdef void dtbsv(char *uplo, char *trans, char *diag, int *n, int *k, d *a, int *lda, d *x, int *incx) nogil + +cdef void dtpmv(char *uplo, char *trans, char *diag, int *n, d *ap, d *x, int *incx) nogil + +cdef void dtpsv(char *uplo, char *trans, char *diag, int *n, d *ap, d *x, int *incx) nogil + +cdef void dtrmm(char *side, char *uplo, char *transa, char *diag, int *m, int *n, d *alpha, d *a, int *lda, d *b, int *ldb) nogil + +cdef void dtrmv(char *uplo, char *trans, char *diag, int *n, d *a, int *lda, d *x, int *incx) nogil + +cdef void dtrsm(char *side, char *uplo, char *transa, char *diag, int *m, int *n, d *alpha, d *a, int *lda, d *b, int *ldb) nogil + +cdef void dtrsv(char *uplo, char *trans, char *diag, int *n, d *a, int *lda, d *x, int *incx) nogil + +cdef d dzasum(int *n, z *zx, int *incx) nogil + +cdef d dznrm2(int *n, z *x, int *incx) nogil + +cdef int icamax(int *n, c *cx, int *incx) nogil + +cdef int idamax(int *n, d *dx, int *incx) nogil + +cdef int isamax(int *n, s *sx, int *incx) nogil + +cdef int izamax(int *n, z *zx, int *incx) nogil + +cdef bint lsame(char *ca, char *cb) nogil + +cdef s sasum(int *n, s *sx, int *incx) nogil + +cdef void saxpy(int *n, s *sa, s *sx, int *incx, s *sy, int *incy) nogil + +cdef s scasum(int *n, c *cx, int *incx) nogil + +cdef s scnrm2(int *n, c *x, int *incx) nogil + +cdef void scopy(int *n, s *sx, int *incx, s *sy, int *incy) nogil + +cdef s sdot(int *n, s *sx, int *incx, s *sy, int *incy) nogil + +cdef s sdsdot(int *n, s *sb, s *sx, int *incx, s *sy, int *incy) nogil + +cdef void sgbmv(char *trans, int *m, int *n, int *kl, int *ku, s *alpha, s *a, int *lda, s *x, int *incx, s *beta, s *y, int *incy) nogil + +cdef void sgemm(char *transa, char *transb, int *m, int *n, int *k, s *alpha, s *a, int *lda, s *b, int *ldb, s *beta, s *c, int *ldc) nogil + +cdef void sgemv(char *trans, int *m, int *n, s *alpha, s *a, int *lda, s *x, int *incx, s *beta, s *y, int *incy) nogil + +cdef void sger(int *m, int *n, s *alpha, s *x, int *incx, s *y, int *incy, s *a, int *lda) nogil + +cdef s snrm2(int *n, s *x, int *incx) nogil + +cdef void srot(int *n, s *sx, int *incx, s *sy, int *incy, s *c, s *s) nogil + +cdef void srotg(s *sa, s *sb, s *c, s *s) nogil + +cdef void srotm(int *n, s *sx, int *incx, s *sy, int *incy, s *sparam) nogil + +cdef void srotmg(s *sd1, s *sd2, s *sx1, s *sy1, s *sparam) nogil + +cdef void ssbmv(char *uplo, int *n, int *k, s *alpha, s *a, int *lda, s *x, int *incx, s *beta, s *y, int *incy) nogil + +cdef void sscal(int *n, s *sa, s *sx, int *incx) nogil + +cdef void sspmv(char *uplo, int *n, s *alpha, s *ap, s *x, int *incx, s *beta, s *y, int *incy) nogil + +cdef void sspr(char *uplo, int *n, s *alpha, s *x, int *incx, s *ap) nogil + +cdef void sspr2(char *uplo, int *n, s *alpha, s *x, int *incx, s *y, int *incy, s *ap) nogil + +cdef void sswap(int *n, s *sx, int *incx, s *sy, int *incy) nogil + +cdef void ssymm(char *side, char *uplo, int *m, int *n, s *alpha, s *a, int *lda, s *b, int *ldb, s *beta, s *c, int *ldc) nogil + +cdef void ssymv(char *uplo, int *n, s *alpha, s *a, int *lda, s *x, int *incx, s *beta, s *y, int *incy) nogil + +cdef void ssyr(char *uplo, int *n, s *alpha, s *x, int *incx, s *a, int *lda) nogil + +cdef void ssyr2(char *uplo, int *n, s *alpha, s *x, int *incx, s *y, int *incy, s *a, int *lda) nogil + +cdef void ssyr2k(char *uplo, char *trans, int *n, int *k, s *alpha, s *a, int *lda, s *b, int *ldb, s *beta, s *c, int *ldc) nogil + +cdef void ssyrk(char *uplo, char *trans, int *n, int *k, s *alpha, s *a, int *lda, s *beta, s *c, int *ldc) nogil + +cdef void stbmv(char *uplo, char *trans, char *diag, int *n, int *k, s *a, int *lda, s *x, int *incx) nogil + +cdef void stbsv(char *uplo, char *trans, char *diag, int *n, int *k, s *a, int *lda, s *x, int *incx) nogil + +cdef void stpmv(char *uplo, char *trans, char *diag, int *n, s *ap, s *x, int *incx) nogil + +cdef void stpsv(char *uplo, char *trans, char *diag, int *n, s *ap, s *x, int *incx) nogil + +cdef void strmm(char *side, char *uplo, char *transa, char *diag, int *m, int *n, s *alpha, s *a, int *lda, s *b, int *ldb) nogil + +cdef void strmv(char *uplo, char *trans, char *diag, int *n, s *a, int *lda, s *x, int *incx) nogil + +cdef void strsm(char *side, char *uplo, char *transa, char *diag, int *m, int *n, s *alpha, s *a, int *lda, s *b, int *ldb) nogil + +cdef void strsv(char *uplo, char *trans, char *diag, int *n, s *a, int *lda, s *x, int *incx) nogil + +cdef void zaxpy(int *n, z *za, z *zx, int *incx, z *zy, int *incy) nogil + +cdef void zcopy(int *n, z *zx, int *incx, z *zy, int *incy) nogil + +cdef z zdotc(int *n, z *zx, int *incx, z *zy, int *incy) nogil + +cdef z zdotu(int *n, z *zx, int *incx, z *zy, int *incy) nogil + +cdef void zdrot(int *n, z *cx, int *incx, z *cy, int *incy, d *c, d *s) nogil + +cdef void zdscal(int *n, d *da, z *zx, int *incx) nogil + +cdef void zgbmv(char *trans, int *m, int *n, int *kl, int *ku, z *alpha, z *a, int *lda, z *x, int *incx, z *beta, z *y, int *incy) nogil + +cdef void zgemm(char *transa, char *transb, int *m, int *n, int *k, z *alpha, z *a, int *lda, z *b, int *ldb, z *beta, z *c, int *ldc) nogil + +cdef void zgemv(char *trans, int *m, int *n, z *alpha, z *a, int *lda, z *x, int *incx, z *beta, z *y, int *incy) nogil + +cdef void zgerc(int *m, int *n, z *alpha, z *x, int *incx, z *y, int *incy, z *a, int *lda) nogil + +cdef void zgeru(int *m, int *n, z *alpha, z *x, int *incx, z *y, int *incy, z *a, int *lda) nogil + +cdef void zhbmv(char *uplo, int *n, int *k, z *alpha, z *a, int *lda, z *x, int *incx, z *beta, z *y, int *incy) nogil + +cdef void zhemm(char *side, char *uplo, int *m, int *n, z *alpha, z *a, int *lda, z *b, int *ldb, z *beta, z *c, int *ldc) nogil + +cdef void zhemv(char *uplo, int *n, z *alpha, z *a, int *lda, z *x, int *incx, z *beta, z *y, int *incy) nogil + +cdef void zher(char *uplo, int *n, d *alpha, z *x, int *incx, z *a, int *lda) nogil + +cdef void zher2(char *uplo, int *n, z *alpha, z *x, int *incx, z *y, int *incy, z *a, int *lda) nogil + +cdef void zher2k(char *uplo, char *trans, int *n, int *k, z *alpha, z *a, int *lda, z *b, int *ldb, d *beta, z *c, int *ldc) nogil + +cdef void zherk(char *uplo, char *trans, int *n, int *k, d *alpha, z *a, int *lda, d *beta, z *c, int *ldc) nogil + +cdef void zhpmv(char *uplo, int *n, z *alpha, z *ap, z *x, int *incx, z *beta, z *y, int *incy) nogil + +cdef void zhpr(char *uplo, int *n, d *alpha, z *x, int *incx, z *ap) nogil + +cdef void zhpr2(char *uplo, int *n, z *alpha, z *x, int *incx, z *y, int *incy, z *ap) nogil + +cdef void zrotg(z *ca, z *cb, d *c, z *s) nogil + +cdef void zscal(int *n, z *za, z *zx, int *incx) nogil + +cdef void zswap(int *n, z *zx, int *incx, z *zy, int *incy) nogil + +cdef void zsymm(char *side, char *uplo, int *m, int *n, z *alpha, z *a, int *lda, z *b, int *ldb, z *beta, z *c, int *ldc) nogil + +cdef void zsyr2k(char *uplo, char *trans, int *n, int *k, z *alpha, z *a, int *lda, z *b, int *ldb, z *beta, z *c, int *ldc) nogil + +cdef void zsyrk(char *uplo, char *trans, int *n, int *k, z *alpha, z *a, int *lda, z *beta, z *c, int *ldc) nogil + +cdef void ztbmv(char *uplo, char *trans, char *diag, int *n, int *k, z *a, int *lda, z *x, int *incx) nogil + +cdef void ztbsv(char *uplo, char *trans, char *diag, int *n, int *k, z *a, int *lda, z *x, int *incx) nogil + +cdef void ztpmv(char *uplo, char *trans, char *diag, int *n, z *ap, z *x, int *incx) nogil + +cdef void ztpsv(char *uplo, char *trans, char *diag, int *n, z *ap, z *x, int *incx) nogil + +cdef void ztrmm(char *side, char *uplo, char *transa, char *diag, int *m, int *n, z *alpha, z *a, int *lda, z *b, int *ldb) nogil + +cdef void ztrmv(char *uplo, char *trans, char *diag, int *n, z *a, int *lda, z *x, int *incx) nogil + +cdef void ztrsm(char *side, char *uplo, char *transa, char *diag, int *m, int *n, z *alpha, z *a, int *lda, z *b, int *ldb) nogil + +cdef void ztrsv(char *uplo, char *trans, char *diag, int *n, z *a, int *lda, z *x, int *incx) nogil diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/cython_blas.so b/project/venv/lib/python2.7/site-packages/scipy/linalg/cython_blas.so new file mode 100755 index 0000000..4c374b3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/cython_blas.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/cython_lapack.pxd b/project/venv/lib/python2.7/site-packages/scipy/linalg/cython_lapack.pxd new file mode 100644 index 0000000..802d51a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/cython_lapack.pxd @@ -0,0 +1,3021 @@ +# This file was generated by _generate_pyx.py. +# Do not edit this file directly. + +# Within scipy, these wrappers can be used via relative or absolute cimport. +# Examples: +# from ..linalg cimport cython_lapack +# from scipy.linalg cimport cython_lapack +# cimport scipy.linalg.cython_lapack as cython_lapack +# cimport ..linalg.cython_lapack as cython_lapack + +# Within scipy, if LAPACK functions are needed in C/C++/Fortran, +# these wrappers should not be used. +# The original libraries should be linked directly. + +ctypedef float s +ctypedef double d +ctypedef float complex c +ctypedef double complex z + +# Function pointer type declarations for +# gees and gges families of functions. +ctypedef bint cselect1(c*) +ctypedef bint cselect2(c*, c*) +ctypedef bint dselect2(d*, d*) +ctypedef bint dselect3(d*, d*, d*) +ctypedef bint sselect2(s*, s*) +ctypedef bint sselect3(s*, s*, s*) +ctypedef bint zselect1(z*) +ctypedef bint zselect2(z*, z*) + +cdef void cbbcsd(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, int *m, int *p, int *q, s *theta, s *phi, c *u1, int *ldu1, c *u2, int *ldu2, c *v1t, int *ldv1t, c *v2t, int *ldv2t, s *b11d, s *b11e, s *b12d, s *b12e, s *b21d, s *b21e, s *b22d, s *b22e, s *rwork, int *lrwork, int *info) nogil + +cdef void cbdsqr(char *uplo, int *n, int *ncvt, int *nru, int *ncc, s *d, s *e, c *vt, int *ldvt, c *u, int *ldu, c *c, int *ldc, s *rwork, int *info) nogil + +cdef void cgbbrd(char *vect, int *m, int *n, int *ncc, int *kl, int *ku, c *ab, int *ldab, s *d, s *e, c *q, int *ldq, c *pt, int *ldpt, c *c, int *ldc, c *work, s *rwork, int *info) nogil + +cdef void cgbcon(char *norm, int *n, int *kl, int *ku, c *ab, int *ldab, int *ipiv, s *anorm, s *rcond, c *work, s *rwork, int *info) nogil + +cdef void cgbequ(int *m, int *n, int *kl, int *ku, c *ab, int *ldab, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil + +cdef void cgbequb(int *m, int *n, int *kl, int *ku, c *ab, int *ldab, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil + +cdef void cgbrfs(char *trans, int *n, int *kl, int *ku, int *nrhs, c *ab, int *ldab, c *afb, int *ldafb, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void cgbsv(int *n, int *kl, int *ku, int *nrhs, c *ab, int *ldab, int *ipiv, c *b, int *ldb, int *info) nogil + +cdef void cgbsvx(char *fact, char *trans, int *n, int *kl, int *ku, int *nrhs, c *ab, int *ldab, c *afb, int *ldafb, int *ipiv, char *equed, s *r, s *c, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void cgbtf2(int *m, int *n, int *kl, int *ku, c *ab, int *ldab, int *ipiv, int *info) nogil + +cdef void cgbtrf(int *m, int *n, int *kl, int *ku, c *ab, int *ldab, int *ipiv, int *info) nogil + +cdef void cgbtrs(char *trans, int *n, int *kl, int *ku, int *nrhs, c *ab, int *ldab, int *ipiv, c *b, int *ldb, int *info) nogil + +cdef void cgebak(char *job, char *side, int *n, int *ilo, int *ihi, s *scale, int *m, c *v, int *ldv, int *info) nogil + +cdef void cgebal(char *job, int *n, c *a, int *lda, int *ilo, int *ihi, s *scale, int *info) nogil + +cdef void cgebd2(int *m, int *n, c *a, int *lda, s *d, s *e, c *tauq, c *taup, c *work, int *info) nogil + +cdef void cgebrd(int *m, int *n, c *a, int *lda, s *d, s *e, c *tauq, c *taup, c *work, int *lwork, int *info) nogil + +cdef void cgecon(char *norm, int *n, c *a, int *lda, s *anorm, s *rcond, c *work, s *rwork, int *info) nogil + +cdef void cgeequ(int *m, int *n, c *a, int *lda, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil + +cdef void cgeequb(int *m, int *n, c *a, int *lda, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil + +cdef void cgees(char *jobvs, char *sort, cselect1 *select, int *n, c *a, int *lda, int *sdim, c *w, c *vs, int *ldvs, c *work, int *lwork, s *rwork, bint *bwork, int *info) nogil + +cdef void cgeesx(char *jobvs, char *sort, cselect1 *select, char *sense, int *n, c *a, int *lda, int *sdim, c *w, c *vs, int *ldvs, s *rconde, s *rcondv, c *work, int *lwork, s *rwork, bint *bwork, int *info) nogil + +cdef void cgeev(char *jobvl, char *jobvr, int *n, c *a, int *lda, c *w, c *vl, int *ldvl, c *vr, int *ldvr, c *work, int *lwork, s *rwork, int *info) nogil + +cdef void cgeevx(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, c *a, int *lda, c *w, c *vl, int *ldvl, c *vr, int *ldvr, int *ilo, int *ihi, s *scale, s *abnrm, s *rconde, s *rcondv, c *work, int *lwork, s *rwork, int *info) nogil + +cdef void cgehd2(int *n, int *ilo, int *ihi, c *a, int *lda, c *tau, c *work, int *info) nogil + +cdef void cgehrd(int *n, int *ilo, int *ihi, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil + +cdef void cgelq2(int *m, int *n, c *a, int *lda, c *tau, c *work, int *info) nogil + +cdef void cgelqf(int *m, int *n, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil + +cdef void cgels(char *trans, int *m, int *n, int *nrhs, c *a, int *lda, c *b, int *ldb, c *work, int *lwork, int *info) nogil + +cdef void cgelsd(int *m, int *n, int *nrhs, c *a, int *lda, c *b, int *ldb, s *s, s *rcond, int *rank, c *work, int *lwork, s *rwork, int *iwork, int *info) nogil + +cdef void cgelss(int *m, int *n, int *nrhs, c *a, int *lda, c *b, int *ldb, s *s, s *rcond, int *rank, c *work, int *lwork, s *rwork, int *info) nogil + +cdef void cgelsy(int *m, int *n, int *nrhs, c *a, int *lda, c *b, int *ldb, int *jpvt, s *rcond, int *rank, c *work, int *lwork, s *rwork, int *info) nogil + +cdef void cgemqrt(char *side, char *trans, int *m, int *n, int *k, int *nb, c *v, int *ldv, c *t, int *ldt, c *c, int *ldc, c *work, int *info) nogil + +cdef void cgeql2(int *m, int *n, c *a, int *lda, c *tau, c *work, int *info) nogil + +cdef void cgeqlf(int *m, int *n, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil + +cdef void cgeqp3(int *m, int *n, c *a, int *lda, int *jpvt, c *tau, c *work, int *lwork, s *rwork, int *info) nogil + +cdef void cgeqr2(int *m, int *n, c *a, int *lda, c *tau, c *work, int *info) nogil + +cdef void cgeqr2p(int *m, int *n, c *a, int *lda, c *tau, c *work, int *info) nogil + +cdef void cgeqrf(int *m, int *n, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil + +cdef void cgeqrfp(int *m, int *n, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil + +cdef void cgeqrt(int *m, int *n, int *nb, c *a, int *lda, c *t, int *ldt, c *work, int *info) nogil + +cdef void cgeqrt2(int *m, int *n, c *a, int *lda, c *t, int *ldt, int *info) nogil + +cdef void cgeqrt3(int *m, int *n, c *a, int *lda, c *t, int *ldt, int *info) nogil + +cdef void cgerfs(char *trans, int *n, int *nrhs, c *a, int *lda, c *af, int *ldaf, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void cgerq2(int *m, int *n, c *a, int *lda, c *tau, c *work, int *info) nogil + +cdef void cgerqf(int *m, int *n, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil + +cdef void cgesc2(int *n, c *a, int *lda, c *rhs, int *ipiv, int *jpiv, s *scale) nogil + +cdef void cgesdd(char *jobz, int *m, int *n, c *a, int *lda, s *s, c *u, int *ldu, c *vt, int *ldvt, c *work, int *lwork, s *rwork, int *iwork, int *info) nogil + +cdef void cgesv(int *n, int *nrhs, c *a, int *lda, int *ipiv, c *b, int *ldb, int *info) nogil + +cdef void cgesvd(char *jobu, char *jobvt, int *m, int *n, c *a, int *lda, s *s, c *u, int *ldu, c *vt, int *ldvt, c *work, int *lwork, s *rwork, int *info) nogil + +cdef void cgesvx(char *fact, char *trans, int *n, int *nrhs, c *a, int *lda, c *af, int *ldaf, int *ipiv, char *equed, s *r, s *c, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void cgetc2(int *n, c *a, int *lda, int *ipiv, int *jpiv, int *info) nogil + +cdef void cgetf2(int *m, int *n, c *a, int *lda, int *ipiv, int *info) nogil + +cdef void cgetrf(int *m, int *n, c *a, int *lda, int *ipiv, int *info) nogil + +cdef void cgetri(int *n, c *a, int *lda, int *ipiv, c *work, int *lwork, int *info) nogil + +cdef void cgetrs(char *trans, int *n, int *nrhs, c *a, int *lda, int *ipiv, c *b, int *ldb, int *info) nogil + +cdef void cggbak(char *job, char *side, int *n, int *ilo, int *ihi, s *lscale, s *rscale, int *m, c *v, int *ldv, int *info) nogil + +cdef void cggbal(char *job, int *n, c *a, int *lda, c *b, int *ldb, int *ilo, int *ihi, s *lscale, s *rscale, s *work, int *info) nogil + +cdef void cgges(char *jobvsl, char *jobvsr, char *sort, cselect2 *selctg, int *n, c *a, int *lda, c *b, int *ldb, int *sdim, c *alpha, c *beta, c *vsl, int *ldvsl, c *vsr, int *ldvsr, c *work, int *lwork, s *rwork, bint *bwork, int *info) nogil + +cdef void cggesx(char *jobvsl, char *jobvsr, char *sort, cselect2 *selctg, char *sense, int *n, c *a, int *lda, c *b, int *ldb, int *sdim, c *alpha, c *beta, c *vsl, int *ldvsl, c *vsr, int *ldvsr, s *rconde, s *rcondv, c *work, int *lwork, s *rwork, int *iwork, int *liwork, bint *bwork, int *info) nogil + +cdef void cggev(char *jobvl, char *jobvr, int *n, c *a, int *lda, c *b, int *ldb, c *alpha, c *beta, c *vl, int *ldvl, c *vr, int *ldvr, c *work, int *lwork, s *rwork, int *info) nogil + +cdef void cggevx(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, c *a, int *lda, c *b, int *ldb, c *alpha, c *beta, c *vl, int *ldvl, c *vr, int *ldvr, int *ilo, int *ihi, s *lscale, s *rscale, s *abnrm, s *bbnrm, s *rconde, s *rcondv, c *work, int *lwork, s *rwork, int *iwork, bint *bwork, int *info) nogil + +cdef void cggglm(int *n, int *m, int *p, c *a, int *lda, c *b, int *ldb, c *d, c *x, c *y, c *work, int *lwork, int *info) nogil + +cdef void cgghrd(char *compq, char *compz, int *n, int *ilo, int *ihi, c *a, int *lda, c *b, int *ldb, c *q, int *ldq, c *z, int *ldz, int *info) nogil + +cdef void cgglse(int *m, int *n, int *p, c *a, int *lda, c *b, int *ldb, c *c, c *d, c *x, c *work, int *lwork, int *info) nogil + +cdef void cggqrf(int *n, int *m, int *p, c *a, int *lda, c *taua, c *b, int *ldb, c *taub, c *work, int *lwork, int *info) nogil + +cdef void cggrqf(int *m, int *p, int *n, c *a, int *lda, c *taua, c *b, int *ldb, c *taub, c *work, int *lwork, int *info) nogil + +cdef void cgtcon(char *norm, int *n, c *dl, c *d, c *du, c *du2, int *ipiv, s *anorm, s *rcond, c *work, int *info) nogil + +cdef void cgtrfs(char *trans, int *n, int *nrhs, c *dl, c *d, c *du, c *dlf, c *df, c *duf, c *du2, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void cgtsv(int *n, int *nrhs, c *dl, c *d, c *du, c *b, int *ldb, int *info) nogil + +cdef void cgtsvx(char *fact, char *trans, int *n, int *nrhs, c *dl, c *d, c *du, c *dlf, c *df, c *duf, c *du2, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void cgttrf(int *n, c *dl, c *d, c *du, c *du2, int *ipiv, int *info) nogil + +cdef void cgttrs(char *trans, int *n, int *nrhs, c *dl, c *d, c *du, c *du2, int *ipiv, c *b, int *ldb, int *info) nogil + +cdef void cgtts2(int *itrans, int *n, int *nrhs, c *dl, c *d, c *du, c *du2, int *ipiv, c *b, int *ldb) nogil + +cdef void chbev(char *jobz, char *uplo, int *n, int *kd, c *ab, int *ldab, s *w, c *z, int *ldz, c *work, s *rwork, int *info) nogil + +cdef void chbevd(char *jobz, char *uplo, int *n, int *kd, c *ab, int *ldab, s *w, c *z, int *ldz, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil + +cdef void chbevx(char *jobz, char *range, char *uplo, int *n, int *kd, c *ab, int *ldab, c *q, int *ldq, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, c *z, int *ldz, c *work, s *rwork, int *iwork, int *ifail, int *info) nogil + +cdef void chbgst(char *vect, char *uplo, int *n, int *ka, int *kb, c *ab, int *ldab, c *bb, int *ldbb, c *x, int *ldx, c *work, s *rwork, int *info) nogil + +cdef void chbgv(char *jobz, char *uplo, int *n, int *ka, int *kb, c *ab, int *ldab, c *bb, int *ldbb, s *w, c *z, int *ldz, c *work, s *rwork, int *info) nogil + +cdef void chbgvd(char *jobz, char *uplo, int *n, int *ka, int *kb, c *ab, int *ldab, c *bb, int *ldbb, s *w, c *z, int *ldz, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil + +cdef void chbgvx(char *jobz, char *range, char *uplo, int *n, int *ka, int *kb, c *ab, int *ldab, c *bb, int *ldbb, c *q, int *ldq, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, c *z, int *ldz, c *work, s *rwork, int *iwork, int *ifail, int *info) nogil + +cdef void chbtrd(char *vect, char *uplo, int *n, int *kd, c *ab, int *ldab, s *d, s *e, c *q, int *ldq, c *work, int *info) nogil + +cdef void checon(char *uplo, int *n, c *a, int *lda, int *ipiv, s *anorm, s *rcond, c *work, int *info) nogil + +cdef void cheequb(char *uplo, int *n, c *a, int *lda, s *s, s *scond, s *amax, c *work, int *info) nogil + +cdef void cheev(char *jobz, char *uplo, int *n, c *a, int *lda, s *w, c *work, int *lwork, s *rwork, int *info) nogil + +cdef void cheevd(char *jobz, char *uplo, int *n, c *a, int *lda, s *w, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil + +cdef void cheevr(char *jobz, char *range, char *uplo, int *n, c *a, int *lda, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, c *z, int *ldz, int *isuppz, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil + +cdef void cheevx(char *jobz, char *range, char *uplo, int *n, c *a, int *lda, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, c *z, int *ldz, c *work, int *lwork, s *rwork, int *iwork, int *ifail, int *info) nogil + +cdef void chegs2(int *itype, char *uplo, int *n, c *a, int *lda, c *b, int *ldb, int *info) nogil + +cdef void chegst(int *itype, char *uplo, int *n, c *a, int *lda, c *b, int *ldb, int *info) nogil + +cdef void chegv(int *itype, char *jobz, char *uplo, int *n, c *a, int *lda, c *b, int *ldb, s *w, c *work, int *lwork, s *rwork, int *info) nogil + +cdef void chegvd(int *itype, char *jobz, char *uplo, int *n, c *a, int *lda, c *b, int *ldb, s *w, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil + +cdef void chegvx(int *itype, char *jobz, char *range, char *uplo, int *n, c *a, int *lda, c *b, int *ldb, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, c *z, int *ldz, c *work, int *lwork, s *rwork, int *iwork, int *ifail, int *info) nogil + +cdef void cherfs(char *uplo, int *n, int *nrhs, c *a, int *lda, c *af, int *ldaf, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void chesv(char *uplo, int *n, int *nrhs, c *a, int *lda, int *ipiv, c *b, int *ldb, c *work, int *lwork, int *info) nogil + +cdef void chesvx(char *fact, char *uplo, int *n, int *nrhs, c *a, int *lda, c *af, int *ldaf, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, int *lwork, s *rwork, int *info) nogil + +cdef void cheswapr(char *uplo, int *n, c *a, int *lda, int *i1, int *i2) nogil + +cdef void chetd2(char *uplo, int *n, c *a, int *lda, s *d, s *e, c *tau, int *info) nogil + +cdef void chetf2(char *uplo, int *n, c *a, int *lda, int *ipiv, int *info) nogil + +cdef void chetrd(char *uplo, int *n, c *a, int *lda, s *d, s *e, c *tau, c *work, int *lwork, int *info) nogil + +cdef void chetrf(char *uplo, int *n, c *a, int *lda, int *ipiv, c *work, int *lwork, int *info) nogil + +cdef void chetri(char *uplo, int *n, c *a, int *lda, int *ipiv, c *work, int *info) nogil + +cdef void chetri2(char *uplo, int *n, c *a, int *lda, int *ipiv, c *work, int *lwork, int *info) nogil + +cdef void chetri2x(char *uplo, int *n, c *a, int *lda, int *ipiv, c *work, int *nb, int *info) nogil + +cdef void chetrs(char *uplo, int *n, int *nrhs, c *a, int *lda, int *ipiv, c *b, int *ldb, int *info) nogil + +cdef void chetrs2(char *uplo, int *n, int *nrhs, c *a, int *lda, int *ipiv, c *b, int *ldb, c *work, int *info) nogil + +cdef void chfrk(char *transr, char *uplo, char *trans, int *n, int *k, s *alpha, c *a, int *lda, s *beta, c *c) nogil + +cdef void chgeqz(char *job, char *compq, char *compz, int *n, int *ilo, int *ihi, c *h, int *ldh, c *t, int *ldt, c *alpha, c *beta, c *q, int *ldq, c *z, int *ldz, c *work, int *lwork, s *rwork, int *info) nogil + +cdef char chla_transtype(int *trans) nogil + +cdef void chpcon(char *uplo, int *n, c *ap, int *ipiv, s *anorm, s *rcond, c *work, int *info) nogil + +cdef void chpev(char *jobz, char *uplo, int *n, c *ap, s *w, c *z, int *ldz, c *work, s *rwork, int *info) nogil + +cdef void chpevd(char *jobz, char *uplo, int *n, c *ap, s *w, c *z, int *ldz, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil + +cdef void chpevx(char *jobz, char *range, char *uplo, int *n, c *ap, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, c *z, int *ldz, c *work, s *rwork, int *iwork, int *ifail, int *info) nogil + +cdef void chpgst(int *itype, char *uplo, int *n, c *ap, c *bp, int *info) nogil + +cdef void chpgv(int *itype, char *jobz, char *uplo, int *n, c *ap, c *bp, s *w, c *z, int *ldz, c *work, s *rwork, int *info) nogil + +cdef void chpgvd(int *itype, char *jobz, char *uplo, int *n, c *ap, c *bp, s *w, c *z, int *ldz, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil + +cdef void chpgvx(int *itype, char *jobz, char *range, char *uplo, int *n, c *ap, c *bp, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, c *z, int *ldz, c *work, s *rwork, int *iwork, int *ifail, int *info) nogil + +cdef void chprfs(char *uplo, int *n, int *nrhs, c *ap, c *afp, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void chpsv(char *uplo, int *n, int *nrhs, c *ap, int *ipiv, c *b, int *ldb, int *info) nogil + +cdef void chpsvx(char *fact, char *uplo, int *n, int *nrhs, c *ap, c *afp, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void chptrd(char *uplo, int *n, c *ap, s *d, s *e, c *tau, int *info) nogil + +cdef void chptrf(char *uplo, int *n, c *ap, int *ipiv, int *info) nogil + +cdef void chptri(char *uplo, int *n, c *ap, int *ipiv, c *work, int *info) nogil + +cdef void chptrs(char *uplo, int *n, int *nrhs, c *ap, int *ipiv, c *b, int *ldb, int *info) nogil + +cdef void chsein(char *side, char *eigsrc, char *initv, bint *select, int *n, c *h, int *ldh, c *w, c *vl, int *ldvl, c *vr, int *ldvr, int *mm, int *m, c *work, s *rwork, int *ifaill, int *ifailr, int *info) nogil + +cdef void chseqr(char *job, char *compz, int *n, int *ilo, int *ihi, c *h, int *ldh, c *w, c *z, int *ldz, c *work, int *lwork, int *info) nogil + +cdef void clabrd(int *m, int *n, int *nb, c *a, int *lda, s *d, s *e, c *tauq, c *taup, c *x, int *ldx, c *y, int *ldy) nogil + +cdef void clacgv(int *n, c *x, int *incx) nogil + +cdef void clacn2(int *n, c *v, c *x, s *est, int *kase, int *isave) nogil + +cdef void clacon(int *n, c *v, c *x, s *est, int *kase) nogil + +cdef void clacp2(char *uplo, int *m, int *n, s *a, int *lda, c *b, int *ldb) nogil + +cdef void clacpy(char *uplo, int *m, int *n, c *a, int *lda, c *b, int *ldb) nogil + +cdef void clacrm(int *m, int *n, c *a, int *lda, s *b, int *ldb, c *c, int *ldc, s *rwork) nogil + +cdef void clacrt(int *n, c *cx, int *incx, c *cy, int *incy, c *c, c *s) nogil + +cdef c cladiv(c *x, c *y) nogil + +cdef void claed0(int *qsiz, int *n, s *d, s *e, c *q, int *ldq, c *qstore, int *ldqs, s *rwork, int *iwork, int *info) nogil + +cdef void claed7(int *n, int *cutpnt, int *qsiz, int *tlvls, int *curlvl, int *curpbm, s *d, c *q, int *ldq, s *rho, int *indxq, s *qstore, int *qptr, int *prmptr, int *perm, int *givptr, int *givcol, s *givnum, c *work, s *rwork, int *iwork, int *info) nogil + +cdef void claed8(int *k, int *n, int *qsiz, c *q, int *ldq, s *d, s *rho, int *cutpnt, s *z, s *dlamda, c *q2, int *ldq2, s *w, int *indxp, int *indx, int *indxq, int *perm, int *givptr, int *givcol, s *givnum, int *info) nogil + +cdef void claein(bint *rightv, bint *noinit, int *n, c *h, int *ldh, c *w, c *v, c *b, int *ldb, s *rwork, s *eps3, s *smlnum, int *info) nogil + +cdef void claesy(c *a, c *b, c *c, c *rt1, c *rt2, c *evscal, c *cs1, c *sn1) nogil + +cdef void claev2(c *a, c *b, c *c, s *rt1, s *rt2, s *cs1, c *sn1) nogil + +cdef void clag2z(int *m, int *n, c *sa, int *ldsa, z *a, int *lda, int *info) nogil + +cdef void clags2(bint *upper, s *a1, c *a2, s *a3, s *b1, c *b2, s *b3, s *csu, c *snu, s *csv, c *snv, s *csq, c *snq) nogil + +cdef void clagtm(char *trans, int *n, int *nrhs, s *alpha, c *dl, c *d, c *du, c *x, int *ldx, s *beta, c *b, int *ldb) nogil + +cdef void clahef(char *uplo, int *n, int *nb, int *kb, c *a, int *lda, int *ipiv, c *w, int *ldw, int *info) nogil + +cdef void clahqr(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, c *h, int *ldh, c *w, int *iloz, int *ihiz, c *z, int *ldz, int *info) nogil + +cdef void clahr2(int *n, int *k, int *nb, c *a, int *lda, c *tau, c *t, int *ldt, c *y, int *ldy) nogil + +cdef void claic1(int *job, int *j, c *x, s *sest, c *w, c *gamma, s *sestpr, c *s, c *c) nogil + +cdef void clals0(int *icompq, int *nl, int *nr, int *sqre, int *nrhs, c *b, int *ldb, c *bx, int *ldbx, int *perm, int *givptr, int *givcol, int *ldgcol, s *givnum, int *ldgnum, s *poles, s *difl, s *difr, s *z, int *k, s *c, s *s, s *rwork, int *info) nogil + +cdef void clalsa(int *icompq, int *smlsiz, int *n, int *nrhs, c *b, int *ldb, c *bx, int *ldbx, s *u, int *ldu, s *vt, int *k, s *difl, s *difr, s *z, s *poles, int *givptr, int *givcol, int *ldgcol, int *perm, s *givnum, s *c, s *s, s *rwork, int *iwork, int *info) nogil + +cdef void clalsd(char *uplo, int *smlsiz, int *n, int *nrhs, s *d, s *e, c *b, int *ldb, s *rcond, int *rank, c *work, s *rwork, int *iwork, int *info) nogil + +cdef s clangb(char *norm, int *n, int *kl, int *ku, c *ab, int *ldab, s *work) nogil + +cdef s clange(char *norm, int *m, int *n, c *a, int *lda, s *work) nogil + +cdef s clangt(char *norm, int *n, c *dl, c *d, c *du) nogil + +cdef s clanhb(char *norm, char *uplo, int *n, int *k, c *ab, int *ldab, s *work) nogil + +cdef s clanhe(char *norm, char *uplo, int *n, c *a, int *lda, s *work) nogil + +cdef s clanhf(char *norm, char *transr, char *uplo, int *n, c *a, s *work) nogil + +cdef s clanhp(char *norm, char *uplo, int *n, c *ap, s *work) nogil + +cdef s clanhs(char *norm, int *n, c *a, int *lda, s *work) nogil + +cdef s clanht(char *norm, int *n, s *d, c *e) nogil + +cdef s clansb(char *norm, char *uplo, int *n, int *k, c *ab, int *ldab, s *work) nogil + +cdef s clansp(char *norm, char *uplo, int *n, c *ap, s *work) nogil + +cdef s clansy(char *norm, char *uplo, int *n, c *a, int *lda, s *work) nogil + +cdef s clantb(char *norm, char *uplo, char *diag, int *n, int *k, c *ab, int *ldab, s *work) nogil + +cdef s clantp(char *norm, char *uplo, char *diag, int *n, c *ap, s *work) nogil + +cdef s clantr(char *norm, char *uplo, char *diag, int *m, int *n, c *a, int *lda, s *work) nogil + +cdef void clapll(int *n, c *x, int *incx, c *y, int *incy, s *ssmin) nogil + +cdef void clapmr(bint *forwrd, int *m, int *n, c *x, int *ldx, int *k) nogil + +cdef void clapmt(bint *forwrd, int *m, int *n, c *x, int *ldx, int *k) nogil + +cdef void claqgb(int *m, int *n, int *kl, int *ku, c *ab, int *ldab, s *r, s *c, s *rowcnd, s *colcnd, s *amax, char *equed) nogil + +cdef void claqge(int *m, int *n, c *a, int *lda, s *r, s *c, s *rowcnd, s *colcnd, s *amax, char *equed) nogil + +cdef void claqhb(char *uplo, int *n, int *kd, c *ab, int *ldab, s *s, s *scond, s *amax, char *equed) nogil + +cdef void claqhe(char *uplo, int *n, c *a, int *lda, s *s, s *scond, s *amax, char *equed) nogil + +cdef void claqhp(char *uplo, int *n, c *ap, s *s, s *scond, s *amax, char *equed) nogil + +cdef void claqp2(int *m, int *n, int *offset, c *a, int *lda, int *jpvt, c *tau, s *vn1, s *vn2, c *work) nogil + +cdef void claqps(int *m, int *n, int *offset, int *nb, int *kb, c *a, int *lda, int *jpvt, c *tau, s *vn1, s *vn2, c *auxv, c *f, int *ldf) nogil + +cdef void claqr0(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, c *h, int *ldh, c *w, int *iloz, int *ihiz, c *z, int *ldz, c *work, int *lwork, int *info) nogil + +cdef void claqr1(int *n, c *h, int *ldh, c *s1, c *s2, c *v) nogil + +cdef void claqr2(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, c *h, int *ldh, int *iloz, int *ihiz, c *z, int *ldz, int *ns, int *nd, c *sh, c *v, int *ldv, int *nh, c *t, int *ldt, int *nv, c *wv, int *ldwv, c *work, int *lwork) nogil + +cdef void claqr3(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, c *h, int *ldh, int *iloz, int *ihiz, c *z, int *ldz, int *ns, int *nd, c *sh, c *v, int *ldv, int *nh, c *t, int *ldt, int *nv, c *wv, int *ldwv, c *work, int *lwork) nogil + +cdef void claqr4(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, c *h, int *ldh, c *w, int *iloz, int *ihiz, c *z, int *ldz, c *work, int *lwork, int *info) nogil + +cdef void claqr5(bint *wantt, bint *wantz, int *kacc22, int *n, int *ktop, int *kbot, int *nshfts, c *s, c *h, int *ldh, int *iloz, int *ihiz, c *z, int *ldz, c *v, int *ldv, c *u, int *ldu, int *nv, c *wv, int *ldwv, int *nh, c *wh, int *ldwh) nogil + +cdef void claqsb(char *uplo, int *n, int *kd, c *ab, int *ldab, s *s, s *scond, s *amax, char *equed) nogil + +cdef void claqsp(char *uplo, int *n, c *ap, s *s, s *scond, s *amax, char *equed) nogil + +cdef void claqsy(char *uplo, int *n, c *a, int *lda, s *s, s *scond, s *amax, char *equed) nogil + +cdef void clar1v(int *n, int *b1, int *bn, s *lambda_, s *d, s *l, s *ld, s *lld, s *pivmin, s *gaptol, c *z, bint *wantnc, int *negcnt, s *ztz, s *mingma, int *r, int *isuppz, s *nrminv, s *resid, s *rqcorr, s *work) nogil + +cdef void clar2v(int *n, c *x, c *y, c *z, int *incx, s *c, c *s, int *incc) nogil + +cdef void clarcm(int *m, int *n, s *a, int *lda, c *b, int *ldb, c *c, int *ldc, s *rwork) nogil + +cdef void clarf(char *side, int *m, int *n, c *v, int *incv, c *tau, c *c, int *ldc, c *work) nogil + +cdef void clarfb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, c *v, int *ldv, c *t, int *ldt, c *c, int *ldc, c *work, int *ldwork) nogil + +cdef void clarfg(int *n, c *alpha, c *x, int *incx, c *tau) nogil + +cdef void clarfgp(int *n, c *alpha, c *x, int *incx, c *tau) nogil + +cdef void clarft(char *direct, char *storev, int *n, int *k, c *v, int *ldv, c *tau, c *t, int *ldt) nogil + +cdef void clarfx(char *side, int *m, int *n, c *v, c *tau, c *c, int *ldc, c *work) nogil + +cdef void clargv(int *n, c *x, int *incx, c *y, int *incy, s *c, int *incc) nogil + +cdef void clarnv(int *idist, int *iseed, int *n, c *x) nogil + +cdef void clarrv(int *n, s *vl, s *vu, s *d, s *l, s *pivmin, int *isplit, int *m, int *dol, int *dou, s *minrgp, s *rtol1, s *rtol2, s *w, s *werr, s *wgap, int *iblock, int *indexw, s *gers, c *z, int *ldz, int *isuppz, s *work, int *iwork, int *info) nogil + +cdef void clartg(c *f, c *g, s *cs, c *sn, c *r) nogil + +cdef void clartv(int *n, c *x, int *incx, c *y, int *incy, s *c, c *s, int *incc) nogil + +cdef void clarz(char *side, int *m, int *n, int *l, c *v, int *incv, c *tau, c *c, int *ldc, c *work) nogil + +cdef void clarzb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, c *v, int *ldv, c *t, int *ldt, c *c, int *ldc, c *work, int *ldwork) nogil + +cdef void clarzt(char *direct, char *storev, int *n, int *k, c *v, int *ldv, c *tau, c *t, int *ldt) nogil + +cdef void clascl(char *type_bn, int *kl, int *ku, s *cfrom, s *cto, int *m, int *n, c *a, int *lda, int *info) nogil + +cdef void claset(char *uplo, int *m, int *n, c *alpha, c *beta, c *a, int *lda) nogil + +cdef void clasr(char *side, char *pivot, char *direct, int *m, int *n, s *c, s *s, c *a, int *lda) nogil + +cdef void classq(int *n, c *x, int *incx, s *scale, s *sumsq) nogil + +cdef void claswp(int *n, c *a, int *lda, int *k1, int *k2, int *ipiv, int *incx) nogil + +cdef void clasyf(char *uplo, int *n, int *nb, int *kb, c *a, int *lda, int *ipiv, c *w, int *ldw, int *info) nogil + +cdef void clatbs(char *uplo, char *trans, char *diag, char *normin, int *n, int *kd, c *ab, int *ldab, c *x, s *scale, s *cnorm, int *info) nogil + +cdef void clatdf(int *ijob, int *n, c *z, int *ldz, c *rhs, s *rdsum, s *rdscal, int *ipiv, int *jpiv) nogil + +cdef void clatps(char *uplo, char *trans, char *diag, char *normin, int *n, c *ap, c *x, s *scale, s *cnorm, int *info) nogil + +cdef void clatrd(char *uplo, int *n, int *nb, c *a, int *lda, s *e, c *tau, c *w, int *ldw) nogil + +cdef void clatrs(char *uplo, char *trans, char *diag, char *normin, int *n, c *a, int *lda, c *x, s *scale, s *cnorm, int *info) nogil + +cdef void clatrz(int *m, int *n, int *l, c *a, int *lda, c *tau, c *work) nogil + +cdef void clauu2(char *uplo, int *n, c *a, int *lda, int *info) nogil + +cdef void clauum(char *uplo, int *n, c *a, int *lda, int *info) nogil + +cdef void cpbcon(char *uplo, int *n, int *kd, c *ab, int *ldab, s *anorm, s *rcond, c *work, s *rwork, int *info) nogil + +cdef void cpbequ(char *uplo, int *n, int *kd, c *ab, int *ldab, s *s, s *scond, s *amax, int *info) nogil + +cdef void cpbrfs(char *uplo, int *n, int *kd, int *nrhs, c *ab, int *ldab, c *afb, int *ldafb, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void cpbstf(char *uplo, int *n, int *kd, c *ab, int *ldab, int *info) nogil + +cdef void cpbsv(char *uplo, int *n, int *kd, int *nrhs, c *ab, int *ldab, c *b, int *ldb, int *info) nogil + +cdef void cpbsvx(char *fact, char *uplo, int *n, int *kd, int *nrhs, c *ab, int *ldab, c *afb, int *ldafb, char *equed, s *s, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void cpbtf2(char *uplo, int *n, int *kd, c *ab, int *ldab, int *info) nogil + +cdef void cpbtrf(char *uplo, int *n, int *kd, c *ab, int *ldab, int *info) nogil + +cdef void cpbtrs(char *uplo, int *n, int *kd, int *nrhs, c *ab, int *ldab, c *b, int *ldb, int *info) nogil + +cdef void cpftrf(char *transr, char *uplo, int *n, c *a, int *info) nogil + +cdef void cpftri(char *transr, char *uplo, int *n, c *a, int *info) nogil + +cdef void cpftrs(char *transr, char *uplo, int *n, int *nrhs, c *a, c *b, int *ldb, int *info) nogil + +cdef void cpocon(char *uplo, int *n, c *a, int *lda, s *anorm, s *rcond, c *work, s *rwork, int *info) nogil + +cdef void cpoequ(int *n, c *a, int *lda, s *s, s *scond, s *amax, int *info) nogil + +cdef void cpoequb(int *n, c *a, int *lda, s *s, s *scond, s *amax, int *info) nogil + +cdef void cporfs(char *uplo, int *n, int *nrhs, c *a, int *lda, c *af, int *ldaf, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void cposv(char *uplo, int *n, int *nrhs, c *a, int *lda, c *b, int *ldb, int *info) nogil + +cdef void cposvx(char *fact, char *uplo, int *n, int *nrhs, c *a, int *lda, c *af, int *ldaf, char *equed, s *s, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void cpotf2(char *uplo, int *n, c *a, int *lda, int *info) nogil + +cdef void cpotrf(char *uplo, int *n, c *a, int *lda, int *info) nogil + +cdef void cpotri(char *uplo, int *n, c *a, int *lda, int *info) nogil + +cdef void cpotrs(char *uplo, int *n, int *nrhs, c *a, int *lda, c *b, int *ldb, int *info) nogil + +cdef void cppcon(char *uplo, int *n, c *ap, s *anorm, s *rcond, c *work, s *rwork, int *info) nogil + +cdef void cppequ(char *uplo, int *n, c *ap, s *s, s *scond, s *amax, int *info) nogil + +cdef void cpprfs(char *uplo, int *n, int *nrhs, c *ap, c *afp, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void cppsv(char *uplo, int *n, int *nrhs, c *ap, c *b, int *ldb, int *info) nogil + +cdef void cppsvx(char *fact, char *uplo, int *n, int *nrhs, c *ap, c *afp, char *equed, s *s, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void cpptrf(char *uplo, int *n, c *ap, int *info) nogil + +cdef void cpptri(char *uplo, int *n, c *ap, int *info) nogil + +cdef void cpptrs(char *uplo, int *n, int *nrhs, c *ap, c *b, int *ldb, int *info) nogil + +cdef void cpstf2(char *uplo, int *n, c *a, int *lda, int *piv, int *rank, s *tol, s *work, int *info) nogil + +cdef void cpstrf(char *uplo, int *n, c *a, int *lda, int *piv, int *rank, s *tol, s *work, int *info) nogil + +cdef void cptcon(int *n, s *d, c *e, s *anorm, s *rcond, s *rwork, int *info) nogil + +cdef void cpteqr(char *compz, int *n, s *d, s *e, c *z, int *ldz, s *work, int *info) nogil + +cdef void cptrfs(char *uplo, int *n, int *nrhs, s *d, c *e, s *df, c *ef, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void cptsv(int *n, int *nrhs, s *d, c *e, c *b, int *ldb, int *info) nogil + +cdef void cptsvx(char *fact, int *n, int *nrhs, s *d, c *e, s *df, c *ef, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void cpttrf(int *n, s *d, c *e, int *info) nogil + +cdef void cpttrs(char *uplo, int *n, int *nrhs, s *d, c *e, c *b, int *ldb, int *info) nogil + +cdef void cptts2(int *iuplo, int *n, int *nrhs, s *d, c *e, c *b, int *ldb) nogil + +cdef void crot(int *n, c *cx, int *incx, c *cy, int *incy, s *c, c *s) nogil + +cdef void cspcon(char *uplo, int *n, c *ap, int *ipiv, s *anorm, s *rcond, c *work, int *info) nogil + +cdef void cspmv(char *uplo, int *n, c *alpha, c *ap, c *x, int *incx, c *beta, c *y, int *incy) nogil + +cdef void cspr(char *uplo, int *n, c *alpha, c *x, int *incx, c *ap) nogil + +cdef void csprfs(char *uplo, int *n, int *nrhs, c *ap, c *afp, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void cspsv(char *uplo, int *n, int *nrhs, c *ap, int *ipiv, c *b, int *ldb, int *info) nogil + +cdef void cspsvx(char *fact, char *uplo, int *n, int *nrhs, c *ap, c *afp, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void csptrf(char *uplo, int *n, c *ap, int *ipiv, int *info) nogil + +cdef void csptri(char *uplo, int *n, c *ap, int *ipiv, c *work, int *info) nogil + +cdef void csptrs(char *uplo, int *n, int *nrhs, c *ap, int *ipiv, c *b, int *ldb, int *info) nogil + +cdef void csrscl(int *n, s *sa, c *sx, int *incx) nogil + +cdef void cstedc(char *compz, int *n, s *d, s *e, c *z, int *ldz, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil + +cdef void cstegr(char *jobz, char *range, int *n, s *d, s *e, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, c *z, int *ldz, int *isuppz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void cstein(int *n, s *d, s *e, int *m, s *w, int *iblock, int *isplit, c *z, int *ldz, s *work, int *iwork, int *ifail, int *info) nogil + +cdef void cstemr(char *jobz, char *range, int *n, s *d, s *e, s *vl, s *vu, int *il, int *iu, int *m, s *w, c *z, int *ldz, int *nzc, int *isuppz, bint *tryrac, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void csteqr(char *compz, int *n, s *d, s *e, c *z, int *ldz, s *work, int *info) nogil + +cdef void csycon(char *uplo, int *n, c *a, int *lda, int *ipiv, s *anorm, s *rcond, c *work, int *info) nogil + +cdef void csyconv(char *uplo, char *way, int *n, c *a, int *lda, int *ipiv, c *work, int *info) nogil + +cdef void csyequb(char *uplo, int *n, c *a, int *lda, s *s, s *scond, s *amax, c *work, int *info) nogil + +cdef void csymv(char *uplo, int *n, c *alpha, c *a, int *lda, c *x, int *incx, c *beta, c *y, int *incy) nogil + +cdef void csyr(char *uplo, int *n, c *alpha, c *x, int *incx, c *a, int *lda) nogil + +cdef void csyrfs(char *uplo, int *n, int *nrhs, c *a, int *lda, c *af, int *ldaf, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void csysv(char *uplo, int *n, int *nrhs, c *a, int *lda, int *ipiv, c *b, int *ldb, c *work, int *lwork, int *info) nogil + +cdef void csysvx(char *fact, char *uplo, int *n, int *nrhs, c *a, int *lda, c *af, int *ldaf, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, int *lwork, s *rwork, int *info) nogil + +cdef void csyswapr(char *uplo, int *n, c *a, int *lda, int *i1, int *i2) nogil + +cdef void csytf2(char *uplo, int *n, c *a, int *lda, int *ipiv, int *info) nogil + +cdef void csytrf(char *uplo, int *n, c *a, int *lda, int *ipiv, c *work, int *lwork, int *info) nogil + +cdef void csytri(char *uplo, int *n, c *a, int *lda, int *ipiv, c *work, int *info) nogil + +cdef void csytri2(char *uplo, int *n, c *a, int *lda, int *ipiv, c *work, int *lwork, int *info) nogil + +cdef void csytri2x(char *uplo, int *n, c *a, int *lda, int *ipiv, c *work, int *nb, int *info) nogil + +cdef void csytrs(char *uplo, int *n, int *nrhs, c *a, int *lda, int *ipiv, c *b, int *ldb, int *info) nogil + +cdef void csytrs2(char *uplo, int *n, int *nrhs, c *a, int *lda, int *ipiv, c *b, int *ldb, c *work, int *info) nogil + +cdef void ctbcon(char *norm, char *uplo, char *diag, int *n, int *kd, c *ab, int *ldab, s *rcond, c *work, s *rwork, int *info) nogil + +cdef void ctbrfs(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, c *ab, int *ldab, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void ctbtrs(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, c *ab, int *ldab, c *b, int *ldb, int *info) nogil + +cdef void ctfsm(char *transr, char *side, char *uplo, char *trans, char *diag, int *m, int *n, c *alpha, c *a, c *b, int *ldb) nogil + +cdef void ctftri(char *transr, char *uplo, char *diag, int *n, c *a, int *info) nogil + +cdef void ctfttp(char *transr, char *uplo, int *n, c *arf, c *ap, int *info) nogil + +cdef void ctfttr(char *transr, char *uplo, int *n, c *arf, c *a, int *lda, int *info) nogil + +cdef void ctgevc(char *side, char *howmny, bint *select, int *n, c *s, int *lds, c *p, int *ldp, c *vl, int *ldvl, c *vr, int *ldvr, int *mm, int *m, c *work, s *rwork, int *info) nogil + +cdef void ctgex2(bint *wantq, bint *wantz, int *n, c *a, int *lda, c *b, int *ldb, c *q, int *ldq, c *z, int *ldz, int *j1, int *info) nogil + +cdef void ctgexc(bint *wantq, bint *wantz, int *n, c *a, int *lda, c *b, int *ldb, c *q, int *ldq, c *z, int *ldz, int *ifst, int *ilst, int *info) nogil + +cdef void ctgsen(int *ijob, bint *wantq, bint *wantz, bint *select, int *n, c *a, int *lda, c *b, int *ldb, c *alpha, c *beta, c *q, int *ldq, c *z, int *ldz, int *m, s *pl, s *pr, s *dif, c *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void ctgsja(char *jobu, char *jobv, char *jobq, int *m, int *p, int *n, int *k, int *l, c *a, int *lda, c *b, int *ldb, s *tola, s *tolb, s *alpha, s *beta, c *u, int *ldu, c *v, int *ldv, c *q, int *ldq, c *work, int *ncycle, int *info) nogil + +cdef void ctgsna(char *job, char *howmny, bint *select, int *n, c *a, int *lda, c *b, int *ldb, c *vl, int *ldvl, c *vr, int *ldvr, s *s, s *dif, int *mm, int *m, c *work, int *lwork, int *iwork, int *info) nogil + +cdef void ctgsy2(char *trans, int *ijob, int *m, int *n, c *a, int *lda, c *b, int *ldb, c *c, int *ldc, c *d, int *ldd, c *e, int *lde, c *f, int *ldf, s *scale, s *rdsum, s *rdscal, int *info) nogil + +cdef void ctgsyl(char *trans, int *ijob, int *m, int *n, c *a, int *lda, c *b, int *ldb, c *c, int *ldc, c *d, int *ldd, c *e, int *lde, c *f, int *ldf, s *scale, s *dif, c *work, int *lwork, int *iwork, int *info) nogil + +cdef void ctpcon(char *norm, char *uplo, char *diag, int *n, c *ap, s *rcond, c *work, s *rwork, int *info) nogil + +cdef void ctpmqrt(char *side, char *trans, int *m, int *n, int *k, int *l, int *nb, c *v, int *ldv, c *t, int *ldt, c *a, int *lda, c *b, int *ldb, c *work, int *info) nogil + +cdef void ctpqrt(int *m, int *n, int *l, int *nb, c *a, int *lda, c *b, int *ldb, c *t, int *ldt, c *work, int *info) nogil + +cdef void ctpqrt2(int *m, int *n, int *l, c *a, int *lda, c *b, int *ldb, c *t, int *ldt, int *info) nogil + +cdef void ctprfb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, c *v, int *ldv, c *t, int *ldt, c *a, int *lda, c *b, int *ldb, c *work, int *ldwork) nogil + +cdef void ctprfs(char *uplo, char *trans, char *diag, int *n, int *nrhs, c *ap, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void ctptri(char *uplo, char *diag, int *n, c *ap, int *info) nogil + +cdef void ctptrs(char *uplo, char *trans, char *diag, int *n, int *nrhs, c *ap, c *b, int *ldb, int *info) nogil + +cdef void ctpttf(char *transr, char *uplo, int *n, c *ap, c *arf, int *info) nogil + +cdef void ctpttr(char *uplo, int *n, c *ap, c *a, int *lda, int *info) nogil + +cdef void ctrcon(char *norm, char *uplo, char *diag, int *n, c *a, int *lda, s *rcond, c *work, s *rwork, int *info) nogil + +cdef void ctrevc(char *side, char *howmny, bint *select, int *n, c *t, int *ldt, c *vl, int *ldvl, c *vr, int *ldvr, int *mm, int *m, c *work, s *rwork, int *info) nogil + +cdef void ctrexc(char *compq, int *n, c *t, int *ldt, c *q, int *ldq, int *ifst, int *ilst, int *info) nogil + +cdef void ctrrfs(char *uplo, char *trans, char *diag, int *n, int *nrhs, c *a, int *lda, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void ctrsen(char *job, char *compq, bint *select, int *n, c *t, int *ldt, c *q, int *ldq, c *w, int *m, s *s, s *sep, c *work, int *lwork, int *info) nogil + +cdef void ctrsna(char *job, char *howmny, bint *select, int *n, c *t, int *ldt, c *vl, int *ldvl, c *vr, int *ldvr, s *s, s *sep, int *mm, int *m, c *work, int *ldwork, s *rwork, int *info) nogil + +cdef void ctrsyl(char *trana, char *tranb, int *isgn, int *m, int *n, c *a, int *lda, c *b, int *ldb, c *c, int *ldc, s *scale, int *info) nogil + +cdef void ctrti2(char *uplo, char *diag, int *n, c *a, int *lda, int *info) nogil + +cdef void ctrtri(char *uplo, char *diag, int *n, c *a, int *lda, int *info) nogil + +cdef void ctrtrs(char *uplo, char *trans, char *diag, int *n, int *nrhs, c *a, int *lda, c *b, int *ldb, int *info) nogil + +cdef void ctrttf(char *transr, char *uplo, int *n, c *a, int *lda, c *arf, int *info) nogil + +cdef void ctrttp(char *uplo, int *n, c *a, int *lda, c *ap, int *info) nogil + +cdef void ctzrzf(int *m, int *n, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil + +cdef void cunbdb(char *trans, char *signs, int *m, int *p, int *q, c *x11, int *ldx11, c *x12, int *ldx12, c *x21, int *ldx21, c *x22, int *ldx22, s *theta, s *phi, c *taup1, c *taup2, c *tauq1, c *tauq2, c *work, int *lwork, int *info) nogil + +cdef void cuncsd(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, char *signs, int *m, int *p, int *q, c *x11, int *ldx11, c *x12, int *ldx12, c *x21, int *ldx21, c *x22, int *ldx22, s *theta, c *u1, int *ldu1, c *u2, int *ldu2, c *v1t, int *ldv1t, c *v2t, int *ldv2t, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *info) nogil + +cdef void cung2l(int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *info) nogil + +cdef void cung2r(int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *info) nogil + +cdef void cungbr(char *vect, int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil + +cdef void cunghr(int *n, int *ilo, int *ihi, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil + +cdef void cungl2(int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *info) nogil + +cdef void cunglq(int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil + +cdef void cungql(int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil + +cdef void cungqr(int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil + +cdef void cungr2(int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *info) nogil + +cdef void cungrq(int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil + +cdef void cungtr(char *uplo, int *n, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil + +cdef void cunm2l(char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *info) nogil + +cdef void cunm2r(char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *info) nogil + +cdef void cunmbr(char *vect, char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *lwork, int *info) nogil + +cdef void cunmhr(char *side, char *trans, int *m, int *n, int *ilo, int *ihi, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *lwork, int *info) nogil + +cdef void cunml2(char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *info) nogil + +cdef void cunmlq(char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *lwork, int *info) nogil + +cdef void cunmql(char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *lwork, int *info) nogil + +cdef void cunmqr(char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *lwork, int *info) nogil + +cdef void cunmr2(char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *info) nogil + +cdef void cunmr3(char *side, char *trans, int *m, int *n, int *k, int *l, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *info) nogil + +cdef void cunmrq(char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *lwork, int *info) nogil + +cdef void cunmrz(char *side, char *trans, int *m, int *n, int *k, int *l, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *lwork, int *info) nogil + +cdef void cunmtr(char *side, char *uplo, char *trans, int *m, int *n, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *lwork, int *info) nogil + +cdef void cupgtr(char *uplo, int *n, c *ap, c *tau, c *q, int *ldq, c *work, int *info) nogil + +cdef void cupmtr(char *side, char *uplo, char *trans, int *m, int *n, c *ap, c *tau, c *c, int *ldc, c *work, int *info) nogil + +cdef void dbbcsd(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, int *m, int *p, int *q, d *theta, d *phi, d *u1, int *ldu1, d *u2, int *ldu2, d *v1t, int *ldv1t, d *v2t, int *ldv2t, d *b11d, d *b11e, d *b12d, d *b12e, d *b21d, d *b21e, d *b22d, d *b22e, d *work, int *lwork, int *info) nogil + +cdef void dbdsdc(char *uplo, char *compq, int *n, d *d, d *e, d *u, int *ldu, d *vt, int *ldvt, d *q, int *iq, d *work, int *iwork, int *info) nogil + +cdef void dbdsqr(char *uplo, int *n, int *ncvt, int *nru, int *ncc, d *d, d *e, d *vt, int *ldvt, d *u, int *ldu, d *c, int *ldc, d *work, int *info) nogil + +cdef void ddisna(char *job, int *m, int *n, d *d, d *sep, int *info) nogil + +cdef void dgbbrd(char *vect, int *m, int *n, int *ncc, int *kl, int *ku, d *ab, int *ldab, d *d, d *e, d *q, int *ldq, d *pt, int *ldpt, d *c, int *ldc, d *work, int *info) nogil + +cdef void dgbcon(char *norm, int *n, int *kl, int *ku, d *ab, int *ldab, int *ipiv, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil + +cdef void dgbequ(int *m, int *n, int *kl, int *ku, d *ab, int *ldab, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil + +cdef void dgbequb(int *m, int *n, int *kl, int *ku, d *ab, int *ldab, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil + +cdef void dgbrfs(char *trans, int *n, int *kl, int *ku, int *nrhs, d *ab, int *ldab, d *afb, int *ldafb, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil + +cdef void dgbsv(int *n, int *kl, int *ku, int *nrhs, d *ab, int *ldab, int *ipiv, d *b, int *ldb, int *info) nogil + +cdef void dgbsvx(char *fact, char *trans, int *n, int *kl, int *ku, int *nrhs, d *ab, int *ldab, d *afb, int *ldafb, int *ipiv, char *equed, d *r, d *c, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *iwork, int *info) nogil + +cdef void dgbtf2(int *m, int *n, int *kl, int *ku, d *ab, int *ldab, int *ipiv, int *info) nogil + +cdef void dgbtrf(int *m, int *n, int *kl, int *ku, d *ab, int *ldab, int *ipiv, int *info) nogil + +cdef void dgbtrs(char *trans, int *n, int *kl, int *ku, int *nrhs, d *ab, int *ldab, int *ipiv, d *b, int *ldb, int *info) nogil + +cdef void dgebak(char *job, char *side, int *n, int *ilo, int *ihi, d *scale, int *m, d *v, int *ldv, int *info) nogil + +cdef void dgebal(char *job, int *n, d *a, int *lda, int *ilo, int *ihi, d *scale, int *info) nogil + +cdef void dgebd2(int *m, int *n, d *a, int *lda, d *d, d *e, d *tauq, d *taup, d *work, int *info) nogil + +cdef void dgebrd(int *m, int *n, d *a, int *lda, d *d, d *e, d *tauq, d *taup, d *work, int *lwork, int *info) nogil + +cdef void dgecon(char *norm, int *n, d *a, int *lda, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil + +cdef void dgeequ(int *m, int *n, d *a, int *lda, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil + +cdef void dgeequb(int *m, int *n, d *a, int *lda, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil + +cdef void dgees(char *jobvs, char *sort, dselect2 *select, int *n, d *a, int *lda, int *sdim, d *wr, d *wi, d *vs, int *ldvs, d *work, int *lwork, bint *bwork, int *info) nogil + +cdef void dgeesx(char *jobvs, char *sort, dselect2 *select, char *sense, int *n, d *a, int *lda, int *sdim, d *wr, d *wi, d *vs, int *ldvs, d *rconde, d *rcondv, d *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info) nogil + +cdef void dgeev(char *jobvl, char *jobvr, int *n, d *a, int *lda, d *wr, d *wi, d *vl, int *ldvl, d *vr, int *ldvr, d *work, int *lwork, int *info) nogil + +cdef void dgeevx(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, d *a, int *lda, d *wr, d *wi, d *vl, int *ldvl, d *vr, int *ldvr, int *ilo, int *ihi, d *scale, d *abnrm, d *rconde, d *rcondv, d *work, int *lwork, int *iwork, int *info) nogil + +cdef void dgehd2(int *n, int *ilo, int *ihi, d *a, int *lda, d *tau, d *work, int *info) nogil + +cdef void dgehrd(int *n, int *ilo, int *ihi, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil + +cdef void dgejsv(char *joba, char *jobu, char *jobv, char *jobr, char *jobt, char *jobp, int *m, int *n, d *a, int *lda, d *sva, d *u, int *ldu, d *v, int *ldv, d *work, int *lwork, int *iwork, int *info) nogil + +cdef void dgelq2(int *m, int *n, d *a, int *lda, d *tau, d *work, int *info) nogil + +cdef void dgelqf(int *m, int *n, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil + +cdef void dgels(char *trans, int *m, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, d *work, int *lwork, int *info) nogil + +cdef void dgelsd(int *m, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, d *s, d *rcond, int *rank, d *work, int *lwork, int *iwork, int *info) nogil + +cdef void dgelss(int *m, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, d *s, d *rcond, int *rank, d *work, int *lwork, int *info) nogil + +cdef void dgelsy(int *m, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, int *jpvt, d *rcond, int *rank, d *work, int *lwork, int *info) nogil + +cdef void dgemqrt(char *side, char *trans, int *m, int *n, int *k, int *nb, d *v, int *ldv, d *t, int *ldt, d *c, int *ldc, d *work, int *info) nogil + +cdef void dgeql2(int *m, int *n, d *a, int *lda, d *tau, d *work, int *info) nogil + +cdef void dgeqlf(int *m, int *n, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil + +cdef void dgeqp3(int *m, int *n, d *a, int *lda, int *jpvt, d *tau, d *work, int *lwork, int *info) nogil + +cdef void dgeqr2(int *m, int *n, d *a, int *lda, d *tau, d *work, int *info) nogil + +cdef void dgeqr2p(int *m, int *n, d *a, int *lda, d *tau, d *work, int *info) nogil + +cdef void dgeqrf(int *m, int *n, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil + +cdef void dgeqrfp(int *m, int *n, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil + +cdef void dgeqrt(int *m, int *n, int *nb, d *a, int *lda, d *t, int *ldt, d *work, int *info) nogil + +cdef void dgeqrt2(int *m, int *n, d *a, int *lda, d *t, int *ldt, int *info) nogil + +cdef void dgeqrt3(int *m, int *n, d *a, int *lda, d *t, int *ldt, int *info) nogil + +cdef void dgerfs(char *trans, int *n, int *nrhs, d *a, int *lda, d *af, int *ldaf, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil + +cdef void dgerq2(int *m, int *n, d *a, int *lda, d *tau, d *work, int *info) nogil + +cdef void dgerqf(int *m, int *n, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil + +cdef void dgesc2(int *n, d *a, int *lda, d *rhs, int *ipiv, int *jpiv, d *scale) nogil + +cdef void dgesdd(char *jobz, int *m, int *n, d *a, int *lda, d *s, d *u, int *ldu, d *vt, int *ldvt, d *work, int *lwork, int *iwork, int *info) nogil + +cdef void dgesv(int *n, int *nrhs, d *a, int *lda, int *ipiv, d *b, int *ldb, int *info) nogil + +cdef void dgesvd(char *jobu, char *jobvt, int *m, int *n, d *a, int *lda, d *s, d *u, int *ldu, d *vt, int *ldvt, d *work, int *lwork, int *info) nogil + +cdef void dgesvj(char *joba, char *jobu, char *jobv, int *m, int *n, d *a, int *lda, d *sva, int *mv, d *v, int *ldv, d *work, int *lwork, int *info) nogil + +cdef void dgesvx(char *fact, char *trans, int *n, int *nrhs, d *a, int *lda, d *af, int *ldaf, int *ipiv, char *equed, d *r, d *c, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *iwork, int *info) nogil + +cdef void dgetc2(int *n, d *a, int *lda, int *ipiv, int *jpiv, int *info) nogil + +cdef void dgetf2(int *m, int *n, d *a, int *lda, int *ipiv, int *info) nogil + +cdef void dgetrf(int *m, int *n, d *a, int *lda, int *ipiv, int *info) nogil + +cdef void dgetri(int *n, d *a, int *lda, int *ipiv, d *work, int *lwork, int *info) nogil + +cdef void dgetrs(char *trans, int *n, int *nrhs, d *a, int *lda, int *ipiv, d *b, int *ldb, int *info) nogil + +cdef void dggbak(char *job, char *side, int *n, int *ilo, int *ihi, d *lscale, d *rscale, int *m, d *v, int *ldv, int *info) nogil + +cdef void dggbal(char *job, int *n, d *a, int *lda, d *b, int *ldb, int *ilo, int *ihi, d *lscale, d *rscale, d *work, int *info) nogil + +cdef void dgges(char *jobvsl, char *jobvsr, char *sort, dselect3 *selctg, int *n, d *a, int *lda, d *b, int *ldb, int *sdim, d *alphar, d *alphai, d *beta, d *vsl, int *ldvsl, d *vsr, int *ldvsr, d *work, int *lwork, bint *bwork, int *info) nogil + +cdef void dggesx(char *jobvsl, char *jobvsr, char *sort, dselect3 *selctg, char *sense, int *n, d *a, int *lda, d *b, int *ldb, int *sdim, d *alphar, d *alphai, d *beta, d *vsl, int *ldvsl, d *vsr, int *ldvsr, d *rconde, d *rcondv, d *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info) nogil + +cdef void dggev(char *jobvl, char *jobvr, int *n, d *a, int *lda, d *b, int *ldb, d *alphar, d *alphai, d *beta, d *vl, int *ldvl, d *vr, int *ldvr, d *work, int *lwork, int *info) nogil + +cdef void dggevx(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, d *a, int *lda, d *b, int *ldb, d *alphar, d *alphai, d *beta, d *vl, int *ldvl, d *vr, int *ldvr, int *ilo, int *ihi, d *lscale, d *rscale, d *abnrm, d *bbnrm, d *rconde, d *rcondv, d *work, int *lwork, int *iwork, bint *bwork, int *info) nogil + +cdef void dggglm(int *n, int *m, int *p, d *a, int *lda, d *b, int *ldb, d *d, d *x, d *y, d *work, int *lwork, int *info) nogil + +cdef void dgghrd(char *compq, char *compz, int *n, int *ilo, int *ihi, d *a, int *lda, d *b, int *ldb, d *q, int *ldq, d *z, int *ldz, int *info) nogil + +cdef void dgglse(int *m, int *n, int *p, d *a, int *lda, d *b, int *ldb, d *c, d *d, d *x, d *work, int *lwork, int *info) nogil + +cdef void dggqrf(int *n, int *m, int *p, d *a, int *lda, d *taua, d *b, int *ldb, d *taub, d *work, int *lwork, int *info) nogil + +cdef void dggrqf(int *m, int *p, int *n, d *a, int *lda, d *taua, d *b, int *ldb, d *taub, d *work, int *lwork, int *info) nogil + +cdef void dgsvj0(char *jobv, int *m, int *n, d *a, int *lda, d *d, d *sva, int *mv, d *v, int *ldv, d *eps, d *sfmin, d *tol, int *nsweep, d *work, int *lwork, int *info) nogil + +cdef void dgsvj1(char *jobv, int *m, int *n, int *n1, d *a, int *lda, d *d, d *sva, int *mv, d *v, int *ldv, d *eps, d *sfmin, d *tol, int *nsweep, d *work, int *lwork, int *info) nogil + +cdef void dgtcon(char *norm, int *n, d *dl, d *d, d *du, d *du2, int *ipiv, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil + +cdef void dgtrfs(char *trans, int *n, int *nrhs, d *dl, d *d, d *du, d *dlf, d *df, d *duf, d *du2, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil + +cdef void dgtsv(int *n, int *nrhs, d *dl, d *d, d *du, d *b, int *ldb, int *info) nogil + +cdef void dgtsvx(char *fact, char *trans, int *n, int *nrhs, d *dl, d *d, d *du, d *dlf, d *df, d *duf, d *du2, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *iwork, int *info) nogil + +cdef void dgttrf(int *n, d *dl, d *d, d *du, d *du2, int *ipiv, int *info) nogil + +cdef void dgttrs(char *trans, int *n, int *nrhs, d *dl, d *d, d *du, d *du2, int *ipiv, d *b, int *ldb, int *info) nogil + +cdef void dgtts2(int *itrans, int *n, int *nrhs, d *dl, d *d, d *du, d *du2, int *ipiv, d *b, int *ldb) nogil + +cdef void dhgeqz(char *job, char *compq, char *compz, int *n, int *ilo, int *ihi, d *h, int *ldh, d *t, int *ldt, d *alphar, d *alphai, d *beta, d *q, int *ldq, d *z, int *ldz, d *work, int *lwork, int *info) nogil + +cdef void dhsein(char *side, char *eigsrc, char *initv, bint *select, int *n, d *h, int *ldh, d *wr, d *wi, d *vl, int *ldvl, d *vr, int *ldvr, int *mm, int *m, d *work, int *ifaill, int *ifailr, int *info) nogil + +cdef void dhseqr(char *job, char *compz, int *n, int *ilo, int *ihi, d *h, int *ldh, d *wr, d *wi, d *z, int *ldz, d *work, int *lwork, int *info) nogil + +cdef bint disnan(d *din) nogil + +cdef void dlabad(d *small, d *large) nogil + +cdef void dlabrd(int *m, int *n, int *nb, d *a, int *lda, d *d, d *e, d *tauq, d *taup, d *x, int *ldx, d *y, int *ldy) nogil + +cdef void dlacn2(int *n, d *v, d *x, int *isgn, d *est, int *kase, int *isave) nogil + +cdef void dlacon(int *n, d *v, d *x, int *isgn, d *est, int *kase) nogil + +cdef void dlacpy(char *uplo, int *m, int *n, d *a, int *lda, d *b, int *ldb) nogil + +cdef void dladiv(d *a, d *b, d *c, d *d, d *p, d *q) nogil + +cdef void dlae2(d *a, d *b, d *c, d *rt1, d *rt2) nogil + +cdef void dlaebz(int *ijob, int *nitmax, int *n, int *mmax, int *minp, int *nbmin, d *abstol, d *reltol, d *pivmin, d *d, d *e, d *e2, int *nval, d *ab, d *c, int *mout, int *nab, d *work, int *iwork, int *info) nogil + +cdef void dlaed0(int *icompq, int *qsiz, int *n, d *d, d *e, d *q, int *ldq, d *qstore, int *ldqs, d *work, int *iwork, int *info) nogil + +cdef void dlaed1(int *n, d *d, d *q, int *ldq, int *indxq, d *rho, int *cutpnt, d *work, int *iwork, int *info) nogil + +cdef void dlaed2(int *k, int *n, int *n1, d *d, d *q, int *ldq, int *indxq, d *rho, d *z, d *dlamda, d *w, d *q2, int *indx, int *indxc, int *indxp, int *coltyp, int *info) nogil + +cdef void dlaed3(int *k, int *n, int *n1, d *d, d *q, int *ldq, d *rho, d *dlamda, d *q2, int *indx, int *ctot, d *w, d *s, int *info) nogil + +cdef void dlaed4(int *n, int *i, d *d, d *z, d *delta, d *rho, d *dlam, int *info) nogil + +cdef void dlaed5(int *i, d *d, d *z, d *delta, d *rho, d *dlam) nogil + +cdef void dlaed6(int *kniter, bint *orgati, d *rho, d *d, d *z, d *finit, d *tau, int *info) nogil + +cdef void dlaed7(int *icompq, int *n, int *qsiz, int *tlvls, int *curlvl, int *curpbm, d *d, d *q, int *ldq, int *indxq, d *rho, int *cutpnt, d *qstore, int *qptr, int *prmptr, int *perm, int *givptr, int *givcol, d *givnum, d *work, int *iwork, int *info) nogil + +cdef void dlaed8(int *icompq, int *k, int *n, int *qsiz, d *d, d *q, int *ldq, int *indxq, d *rho, int *cutpnt, d *z, d *dlamda, d *q2, int *ldq2, d *w, int *perm, int *givptr, int *givcol, d *givnum, int *indxp, int *indx, int *info) nogil + +cdef void dlaed9(int *k, int *kstart, int *kstop, int *n, d *d, d *q, int *ldq, d *rho, d *dlamda, d *w, d *s, int *lds, int *info) nogil + +cdef void dlaeda(int *n, int *tlvls, int *curlvl, int *curpbm, int *prmptr, int *perm, int *givptr, int *givcol, d *givnum, d *q, int *qptr, d *z, d *ztemp, int *info) nogil + +cdef void dlaein(bint *rightv, bint *noinit, int *n, d *h, int *ldh, d *wr, d *wi, d *vr, d *vi, d *b, int *ldb, d *work, d *eps3, d *smlnum, d *bignum, int *info) nogil + +cdef void dlaev2(d *a, d *b, d *c, d *rt1, d *rt2, d *cs1, d *sn1) nogil + +cdef void dlaexc(bint *wantq, int *n, d *t, int *ldt, d *q, int *ldq, int *j1, int *n1, int *n2, d *work, int *info) nogil + +cdef void dlag2(d *a, int *lda, d *b, int *ldb, d *safmin, d *scale1, d *scale2, d *wr1, d *wr2, d *wi) nogil + +cdef void dlag2s(int *m, int *n, d *a, int *lda, s *sa, int *ldsa, int *info) nogil + +cdef void dlags2(bint *upper, d *a1, d *a2, d *a3, d *b1, d *b2, d *b3, d *csu, d *snu, d *csv, d *snv, d *csq, d *snq) nogil + +cdef void dlagtf(int *n, d *a, d *lambda_, d *b, d *c, d *tol, d *d, int *in_, int *info) nogil + +cdef void dlagtm(char *trans, int *n, int *nrhs, d *alpha, d *dl, d *d, d *du, d *x, int *ldx, d *beta, d *b, int *ldb) nogil + +cdef void dlagts(int *job, int *n, d *a, d *b, d *c, d *d, int *in_, d *y, d *tol, int *info) nogil + +cdef void dlagv2(d *a, int *lda, d *b, int *ldb, d *alphar, d *alphai, d *beta, d *csl, d *snl, d *csr, d *snr) nogil + +cdef void dlahqr(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, d *h, int *ldh, d *wr, d *wi, int *iloz, int *ihiz, d *z, int *ldz, int *info) nogil + +cdef void dlahr2(int *n, int *k, int *nb, d *a, int *lda, d *tau, d *t, int *ldt, d *y, int *ldy) nogil + +cdef void dlaic1(int *job, int *j, d *x, d *sest, d *w, d *gamma, d *sestpr, d *s, d *c) nogil + +cdef void dlaln2(bint *ltrans, int *na, int *nw, d *smin, d *ca, d *a, int *lda, d *d1, d *d2, d *b, int *ldb, d *wr, d *wi, d *x, int *ldx, d *scale, d *xnorm, int *info) nogil + +cdef void dlals0(int *icompq, int *nl, int *nr, int *sqre, int *nrhs, d *b, int *ldb, d *bx, int *ldbx, int *perm, int *givptr, int *givcol, int *ldgcol, d *givnum, int *ldgnum, d *poles, d *difl, d *difr, d *z, int *k, d *c, d *s, d *work, int *info) nogil + +cdef void dlalsa(int *icompq, int *smlsiz, int *n, int *nrhs, d *b, int *ldb, d *bx, int *ldbx, d *u, int *ldu, d *vt, int *k, d *difl, d *difr, d *z, d *poles, int *givptr, int *givcol, int *ldgcol, int *perm, d *givnum, d *c, d *s, d *work, int *iwork, int *info) nogil + +cdef void dlalsd(char *uplo, int *smlsiz, int *n, int *nrhs, d *d, d *e, d *b, int *ldb, d *rcond, int *rank, d *work, int *iwork, int *info) nogil + +cdef d dlamch(char *cmach) nogil + +cdef void dlamrg(int *n1, int *n2, d *a, int *dtrd1, int *dtrd2, int *index_bn) nogil + +cdef int dlaneg(int *n, d *d, d *lld, d *sigma, d *pivmin, int *r) nogil + +cdef d dlangb(char *norm, int *n, int *kl, int *ku, d *ab, int *ldab, d *work) nogil + +cdef d dlange(char *norm, int *m, int *n, d *a, int *lda, d *work) nogil + +cdef d dlangt(char *norm, int *n, d *dl, d *d, d *du) nogil + +cdef d dlanhs(char *norm, int *n, d *a, int *lda, d *work) nogil + +cdef d dlansb(char *norm, char *uplo, int *n, int *k, d *ab, int *ldab, d *work) nogil + +cdef d dlansf(char *norm, char *transr, char *uplo, int *n, d *a, d *work) nogil + +cdef d dlansp(char *norm, char *uplo, int *n, d *ap, d *work) nogil + +cdef d dlanst(char *norm, int *n, d *d, d *e) nogil + +cdef d dlansy(char *norm, char *uplo, int *n, d *a, int *lda, d *work) nogil + +cdef d dlantb(char *norm, char *uplo, char *diag, int *n, int *k, d *ab, int *ldab, d *work) nogil + +cdef d dlantp(char *norm, char *uplo, char *diag, int *n, d *ap, d *work) nogil + +cdef d dlantr(char *norm, char *uplo, char *diag, int *m, int *n, d *a, int *lda, d *work) nogil + +cdef void dlanv2(d *a, d *b, d *c, d *d, d *rt1r, d *rt1i, d *rt2r, d *rt2i, d *cs, d *sn) nogil + +cdef void dlapll(int *n, d *x, int *incx, d *y, int *incy, d *ssmin) nogil + +cdef void dlapmr(bint *forwrd, int *m, int *n, d *x, int *ldx, int *k) nogil + +cdef void dlapmt(bint *forwrd, int *m, int *n, d *x, int *ldx, int *k) nogil + +cdef d dlapy2(d *x, d *y) nogil + +cdef d dlapy3(d *x, d *y, d *z) nogil + +cdef void dlaqgb(int *m, int *n, int *kl, int *ku, d *ab, int *ldab, d *r, d *c, d *rowcnd, d *colcnd, d *amax, char *equed) nogil + +cdef void dlaqge(int *m, int *n, d *a, int *lda, d *r, d *c, d *rowcnd, d *colcnd, d *amax, char *equed) nogil + +cdef void dlaqp2(int *m, int *n, int *offset, d *a, int *lda, int *jpvt, d *tau, d *vn1, d *vn2, d *work) nogil + +cdef void dlaqps(int *m, int *n, int *offset, int *nb, int *kb, d *a, int *lda, int *jpvt, d *tau, d *vn1, d *vn2, d *auxv, d *f, int *ldf) nogil + +cdef void dlaqr0(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, d *h, int *ldh, d *wr, d *wi, int *iloz, int *ihiz, d *z, int *ldz, d *work, int *lwork, int *info) nogil + +cdef void dlaqr1(int *n, d *h, int *ldh, d *sr1, d *si1, d *sr2, d *si2, d *v) nogil + +cdef void dlaqr2(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, d *h, int *ldh, int *iloz, int *ihiz, d *z, int *ldz, int *ns, int *nd, d *sr, d *si, d *v, int *ldv, int *nh, d *t, int *ldt, int *nv, d *wv, int *ldwv, d *work, int *lwork) nogil + +cdef void dlaqr3(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, d *h, int *ldh, int *iloz, int *ihiz, d *z, int *ldz, int *ns, int *nd, d *sr, d *si, d *v, int *ldv, int *nh, d *t, int *ldt, int *nv, d *wv, int *ldwv, d *work, int *lwork) nogil + +cdef void dlaqr4(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, d *h, int *ldh, d *wr, d *wi, int *iloz, int *ihiz, d *z, int *ldz, d *work, int *lwork, int *info) nogil + +cdef void dlaqr5(bint *wantt, bint *wantz, int *kacc22, int *n, int *ktop, int *kbot, int *nshfts, d *sr, d *si, d *h, int *ldh, int *iloz, int *ihiz, d *z, int *ldz, d *v, int *ldv, d *u, int *ldu, int *nv, d *wv, int *ldwv, int *nh, d *wh, int *ldwh) nogil + +cdef void dlaqsb(char *uplo, int *n, int *kd, d *ab, int *ldab, d *s, d *scond, d *amax, char *equed) nogil + +cdef void dlaqsp(char *uplo, int *n, d *ap, d *s, d *scond, d *amax, char *equed) nogil + +cdef void dlaqsy(char *uplo, int *n, d *a, int *lda, d *s, d *scond, d *amax, char *equed) nogil + +cdef void dlaqtr(bint *ltran, bint *lreal, int *n, d *t, int *ldt, d *b, d *w, d *scale, d *x, d *work, int *info) nogil + +cdef void dlar1v(int *n, int *b1, int *bn, d *lambda_, d *d, d *l, d *ld, d *lld, d *pivmin, d *gaptol, d *z, bint *wantnc, int *negcnt, d *ztz, d *mingma, int *r, int *isuppz, d *nrminv, d *resid, d *rqcorr, d *work) nogil + +cdef void dlar2v(int *n, d *x, d *y, d *z, int *incx, d *c, d *s, int *incc) nogil + +cdef void dlarf(char *side, int *m, int *n, d *v, int *incv, d *tau, d *c, int *ldc, d *work) nogil + +cdef void dlarfb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, d *v, int *ldv, d *t, int *ldt, d *c, int *ldc, d *work, int *ldwork) nogil + +cdef void dlarfg(int *n, d *alpha, d *x, int *incx, d *tau) nogil + +cdef void dlarfgp(int *n, d *alpha, d *x, int *incx, d *tau) nogil + +cdef void dlarft(char *direct, char *storev, int *n, int *k, d *v, int *ldv, d *tau, d *t, int *ldt) nogil + +cdef void dlarfx(char *side, int *m, int *n, d *v, d *tau, d *c, int *ldc, d *work) nogil + +cdef void dlargv(int *n, d *x, int *incx, d *y, int *incy, d *c, int *incc) nogil + +cdef void dlarnv(int *idist, int *iseed, int *n, d *x) nogil + +cdef void dlarra(int *n, d *d, d *e, d *e2, d *spltol, d *tnrm, int *nsplit, int *isplit, int *info) nogil + +cdef void dlarrb(int *n, d *d, d *lld, int *ifirst, int *ilast, d *rtol1, d *rtol2, int *offset, d *w, d *wgap, d *werr, d *work, int *iwork, d *pivmin, d *spdiam, int *twist, int *info) nogil + +cdef void dlarrc(char *jobt, int *n, d *vl, d *vu, d *d, d *e, d *pivmin, int *eigcnt, int *lcnt, int *rcnt, int *info) nogil + +cdef void dlarrd(char *range, char *order, int *n, d *vl, d *vu, int *il, int *iu, d *gers, d *reltol, d *d, d *e, d *e2, d *pivmin, int *nsplit, int *isplit, int *m, d *w, d *werr, d *wl, d *wu, int *iblock, int *indexw, d *work, int *iwork, int *info) nogil + +cdef void dlarre(char *range, int *n, d *vl, d *vu, int *il, int *iu, d *d, d *e, d *e2, d *rtol1, d *rtol2, d *spltol, int *nsplit, int *isplit, int *m, d *w, d *werr, d *wgap, int *iblock, int *indexw, d *gers, d *pivmin, d *work, int *iwork, int *info) nogil + +cdef void dlarrf(int *n, d *d, d *l, d *ld, int *clstrt, int *clend, d *w, d *wgap, d *werr, d *spdiam, d *clgapl, d *clgapr, d *pivmin, d *sigma, d *dplus, d *lplus, d *work, int *info) nogil + +cdef void dlarrj(int *n, d *d, d *e2, int *ifirst, int *ilast, d *rtol, int *offset, d *w, d *werr, d *work, int *iwork, d *pivmin, d *spdiam, int *info) nogil + +cdef void dlarrk(int *n, int *iw, d *gl, d *gu, d *d, d *e2, d *pivmin, d *reltol, d *w, d *werr, int *info) nogil + +cdef void dlarrr(int *n, d *d, d *e, int *info) nogil + +cdef void dlarrv(int *n, d *vl, d *vu, d *d, d *l, d *pivmin, int *isplit, int *m, int *dol, int *dou, d *minrgp, d *rtol1, d *rtol2, d *w, d *werr, d *wgap, int *iblock, int *indexw, d *gers, d *z, int *ldz, int *isuppz, d *work, int *iwork, int *info) nogil + +cdef void dlartg(d *f, d *g, d *cs, d *sn, d *r) nogil + +cdef void dlartgp(d *f, d *g, d *cs, d *sn, d *r) nogil + +cdef void dlartgs(d *x, d *y, d *sigma, d *cs, d *sn) nogil + +cdef void dlartv(int *n, d *x, int *incx, d *y, int *incy, d *c, d *s, int *incc) nogil + +cdef void dlaruv(int *iseed, int *n, d *x) nogil + +cdef void dlarz(char *side, int *m, int *n, int *l, d *v, int *incv, d *tau, d *c, int *ldc, d *work) nogil + +cdef void dlarzb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, d *v, int *ldv, d *t, int *ldt, d *c, int *ldc, d *work, int *ldwork) nogil + +cdef void dlarzt(char *direct, char *storev, int *n, int *k, d *v, int *ldv, d *tau, d *t, int *ldt) nogil + +cdef void dlas2(d *f, d *g, d *h, d *ssmin, d *ssmax) nogil + +cdef void dlascl(char *type_bn, int *kl, int *ku, d *cfrom, d *cto, int *m, int *n, d *a, int *lda, int *info) nogil + +cdef void dlasd0(int *n, int *sqre, d *d, d *e, d *u, int *ldu, d *vt, int *ldvt, int *smlsiz, int *iwork, d *work, int *info) nogil + +cdef void dlasd1(int *nl, int *nr, int *sqre, d *d, d *alpha, d *beta, d *u, int *ldu, d *vt, int *ldvt, int *idxq, int *iwork, d *work, int *info) nogil + +cdef void dlasd2(int *nl, int *nr, int *sqre, int *k, d *d, d *z, d *alpha, d *beta, d *u, int *ldu, d *vt, int *ldvt, d *dsigma, d *u2, int *ldu2, d *vt2, int *ldvt2, int *idxp, int *idx, int *idxc, int *idxq, int *coltyp, int *info) nogil + +cdef void dlasd3(int *nl, int *nr, int *sqre, int *k, d *d, d *q, int *ldq, d *dsigma, d *u, int *ldu, d *u2, int *ldu2, d *vt, int *ldvt, d *vt2, int *ldvt2, int *idxc, int *ctot, d *z, int *info) nogil + +cdef void dlasd4(int *n, int *i, d *d, d *z, d *delta, d *rho, d *sigma, d *work, int *info) nogil + +cdef void dlasd5(int *i, d *d, d *z, d *delta, d *rho, d *dsigma, d *work) nogil + +cdef void dlasd6(int *icompq, int *nl, int *nr, int *sqre, d *d, d *vf, d *vl, d *alpha, d *beta, int *idxq, int *perm, int *givptr, int *givcol, int *ldgcol, d *givnum, int *ldgnum, d *poles, d *difl, d *difr, d *z, int *k, d *c, d *s, d *work, int *iwork, int *info) nogil + +cdef void dlasd7(int *icompq, int *nl, int *nr, int *sqre, int *k, d *d, d *z, d *zw, d *vf, d *vfw, d *vl, d *vlw, d *alpha, d *beta, d *dsigma, int *idx, int *idxp, int *idxq, int *perm, int *givptr, int *givcol, int *ldgcol, d *givnum, int *ldgnum, d *c, d *s, int *info) nogil + +cdef void dlasd8(int *icompq, int *k, d *d, d *z, d *vf, d *vl, d *difl, d *difr, int *lddifr, d *dsigma, d *work, int *info) nogil + +cdef void dlasda(int *icompq, int *smlsiz, int *n, int *sqre, d *d, d *e, d *u, int *ldu, d *vt, int *k, d *difl, d *difr, d *z, d *poles, int *givptr, int *givcol, int *ldgcol, int *perm, d *givnum, d *c, d *s, d *work, int *iwork, int *info) nogil + +cdef void dlasdq(char *uplo, int *sqre, int *n, int *ncvt, int *nru, int *ncc, d *d, d *e, d *vt, int *ldvt, d *u, int *ldu, d *c, int *ldc, d *work, int *info) nogil + +cdef void dlasdt(int *n, int *lvl, int *nd, int *inode, int *ndiml, int *ndimr, int *msub) nogil + +cdef void dlaset(char *uplo, int *m, int *n, d *alpha, d *beta, d *a, int *lda) nogil + +cdef void dlasq1(int *n, d *d, d *e, d *work, int *info) nogil + +cdef void dlasq2(int *n, d *z, int *info) nogil + +cdef void dlasq3(int *i0, int *n0, d *z, int *pp, d *dmin, d *sigma, d *desig, d *qmax, int *nfail, int *iter, int *ndiv, bint *ieee, int *ttype, d *dmin1, d *dmin2, d *dn, d *dn1, d *dn2, d *g, d *tau) nogil + +cdef void dlasq4(int *i0, int *n0, d *z, int *pp, int *n0in, d *dmin, d *dmin1, d *dmin2, d *dn, d *dn1, d *dn2, d *tau, int *ttype, d *g) nogil + +cdef void dlasq6(int *i0, int *n0, d *z, int *pp, d *dmin, d *dmin1, d *dmin2, d *dn, d *dnm1, d *dnm2) nogil + +cdef void dlasr(char *side, char *pivot, char *direct, int *m, int *n, d *c, d *s, d *a, int *lda) nogil + +cdef void dlasrt(char *id, int *n, d *d, int *info) nogil + +cdef void dlassq(int *n, d *x, int *incx, d *scale, d *sumsq) nogil + +cdef void dlasv2(d *f, d *g, d *h, d *ssmin, d *ssmax, d *snr, d *csr, d *snl, d *csl) nogil + +cdef void dlaswp(int *n, d *a, int *lda, int *k1, int *k2, int *ipiv, int *incx) nogil + +cdef void dlasy2(bint *ltranl, bint *ltranr, int *isgn, int *n1, int *n2, d *tl, int *ldtl, d *tr, int *ldtr, d *b, int *ldb, d *scale, d *x, int *ldx, d *xnorm, int *info) nogil + +cdef void dlasyf(char *uplo, int *n, int *nb, int *kb, d *a, int *lda, int *ipiv, d *w, int *ldw, int *info) nogil + +cdef void dlat2s(char *uplo, int *n, d *a, int *lda, s *sa, int *ldsa, int *info) nogil + +cdef void dlatbs(char *uplo, char *trans, char *diag, char *normin, int *n, int *kd, d *ab, int *ldab, d *x, d *scale, d *cnorm, int *info) nogil + +cdef void dlatdf(int *ijob, int *n, d *z, int *ldz, d *rhs, d *rdsum, d *rdscal, int *ipiv, int *jpiv) nogil + +cdef void dlatps(char *uplo, char *trans, char *diag, char *normin, int *n, d *ap, d *x, d *scale, d *cnorm, int *info) nogil + +cdef void dlatrd(char *uplo, int *n, int *nb, d *a, int *lda, d *e, d *tau, d *w, int *ldw) nogil + +cdef void dlatrs(char *uplo, char *trans, char *diag, char *normin, int *n, d *a, int *lda, d *x, d *scale, d *cnorm, int *info) nogil + +cdef void dlatrz(int *m, int *n, int *l, d *a, int *lda, d *tau, d *work) nogil + +cdef void dlauu2(char *uplo, int *n, d *a, int *lda, int *info) nogil + +cdef void dlauum(char *uplo, int *n, d *a, int *lda, int *info) nogil + +cdef void dopgtr(char *uplo, int *n, d *ap, d *tau, d *q, int *ldq, d *work, int *info) nogil + +cdef void dopmtr(char *side, char *uplo, char *trans, int *m, int *n, d *ap, d *tau, d *c, int *ldc, d *work, int *info) nogil + +cdef void dorbdb(char *trans, char *signs, int *m, int *p, int *q, d *x11, int *ldx11, d *x12, int *ldx12, d *x21, int *ldx21, d *x22, int *ldx22, d *theta, d *phi, d *taup1, d *taup2, d *tauq1, d *tauq2, d *work, int *lwork, int *info) nogil + +cdef void dorcsd(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, char *signs, int *m, int *p, int *q, d *x11, int *ldx11, d *x12, int *ldx12, d *x21, int *ldx21, d *x22, int *ldx22, d *theta, d *u1, int *ldu1, d *u2, int *ldu2, d *v1t, int *ldv1t, d *v2t, int *ldv2t, d *work, int *lwork, int *iwork, int *info) nogil + +cdef void dorg2l(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *info) nogil + +cdef void dorg2r(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *info) nogil + +cdef void dorgbr(char *vect, int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil + +cdef void dorghr(int *n, int *ilo, int *ihi, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil + +cdef void dorgl2(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *info) nogil + +cdef void dorglq(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil + +cdef void dorgql(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil + +cdef void dorgqr(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil + +cdef void dorgr2(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *info) nogil + +cdef void dorgrq(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil + +cdef void dorgtr(char *uplo, int *n, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil + +cdef void dorm2l(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *info) nogil + +cdef void dorm2r(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *info) nogil + +cdef void dormbr(char *vect, char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil + +cdef void dormhr(char *side, char *trans, int *m, int *n, int *ilo, int *ihi, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil + +cdef void dorml2(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *info) nogil + +cdef void dormlq(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil + +cdef void dormql(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil + +cdef void dormqr(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil + +cdef void dormr2(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *info) nogil + +cdef void dormr3(char *side, char *trans, int *m, int *n, int *k, int *l, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *info) nogil + +cdef void dormrq(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil + +cdef void dormrz(char *side, char *trans, int *m, int *n, int *k, int *l, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil + +cdef void dormtr(char *side, char *uplo, char *trans, int *m, int *n, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil + +cdef void dpbcon(char *uplo, int *n, int *kd, d *ab, int *ldab, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil + +cdef void dpbequ(char *uplo, int *n, int *kd, d *ab, int *ldab, d *s, d *scond, d *amax, int *info) nogil + +cdef void dpbrfs(char *uplo, int *n, int *kd, int *nrhs, d *ab, int *ldab, d *afb, int *ldafb, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil + +cdef void dpbstf(char *uplo, int *n, int *kd, d *ab, int *ldab, int *info) nogil + +cdef void dpbsv(char *uplo, int *n, int *kd, int *nrhs, d *ab, int *ldab, d *b, int *ldb, int *info) nogil + +cdef void dpbsvx(char *fact, char *uplo, int *n, int *kd, int *nrhs, d *ab, int *ldab, d *afb, int *ldafb, char *equed, d *s, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *iwork, int *info) nogil + +cdef void dpbtf2(char *uplo, int *n, int *kd, d *ab, int *ldab, int *info) nogil + +cdef void dpbtrf(char *uplo, int *n, int *kd, d *ab, int *ldab, int *info) nogil + +cdef void dpbtrs(char *uplo, int *n, int *kd, int *nrhs, d *ab, int *ldab, d *b, int *ldb, int *info) nogil + +cdef void dpftrf(char *transr, char *uplo, int *n, d *a, int *info) nogil + +cdef void dpftri(char *transr, char *uplo, int *n, d *a, int *info) nogil + +cdef void dpftrs(char *transr, char *uplo, int *n, int *nrhs, d *a, d *b, int *ldb, int *info) nogil + +cdef void dpocon(char *uplo, int *n, d *a, int *lda, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil + +cdef void dpoequ(int *n, d *a, int *lda, d *s, d *scond, d *amax, int *info) nogil + +cdef void dpoequb(int *n, d *a, int *lda, d *s, d *scond, d *amax, int *info) nogil + +cdef void dporfs(char *uplo, int *n, int *nrhs, d *a, int *lda, d *af, int *ldaf, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil + +cdef void dposv(char *uplo, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, int *info) nogil + +cdef void dposvx(char *fact, char *uplo, int *n, int *nrhs, d *a, int *lda, d *af, int *ldaf, char *equed, d *s, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *iwork, int *info) nogil + +cdef void dpotf2(char *uplo, int *n, d *a, int *lda, int *info) nogil + +cdef void dpotrf(char *uplo, int *n, d *a, int *lda, int *info) nogil + +cdef void dpotri(char *uplo, int *n, d *a, int *lda, int *info) nogil + +cdef void dpotrs(char *uplo, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, int *info) nogil + +cdef void dppcon(char *uplo, int *n, d *ap, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil + +cdef void dppequ(char *uplo, int *n, d *ap, d *s, d *scond, d *amax, int *info) nogil + +cdef void dpprfs(char *uplo, int *n, int *nrhs, d *ap, d *afp, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil + +cdef void dppsv(char *uplo, int *n, int *nrhs, d *ap, d *b, int *ldb, int *info) nogil + +cdef void dppsvx(char *fact, char *uplo, int *n, int *nrhs, d *ap, d *afp, char *equed, d *s, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *iwork, int *info) nogil + +cdef void dpptrf(char *uplo, int *n, d *ap, int *info) nogil + +cdef void dpptri(char *uplo, int *n, d *ap, int *info) nogil + +cdef void dpptrs(char *uplo, int *n, int *nrhs, d *ap, d *b, int *ldb, int *info) nogil + +cdef void dpstf2(char *uplo, int *n, d *a, int *lda, int *piv, int *rank, d *tol, d *work, int *info) nogil + +cdef void dpstrf(char *uplo, int *n, d *a, int *lda, int *piv, int *rank, d *tol, d *work, int *info) nogil + +cdef void dptcon(int *n, d *d, d *e, d *anorm, d *rcond, d *work, int *info) nogil + +cdef void dpteqr(char *compz, int *n, d *d, d *e, d *z, int *ldz, d *work, int *info) nogil + +cdef void dptrfs(int *n, int *nrhs, d *d, d *e, d *df, d *ef, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *info) nogil + +cdef void dptsv(int *n, int *nrhs, d *d, d *e, d *b, int *ldb, int *info) nogil + +cdef void dptsvx(char *fact, int *n, int *nrhs, d *d, d *e, d *df, d *ef, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *info) nogil + +cdef void dpttrf(int *n, d *d, d *e, int *info) nogil + +cdef void dpttrs(int *n, int *nrhs, d *d, d *e, d *b, int *ldb, int *info) nogil + +cdef void dptts2(int *n, int *nrhs, d *d, d *e, d *b, int *ldb) nogil + +cdef void drscl(int *n, d *sa, d *sx, int *incx) nogil + +cdef void dsbev(char *jobz, char *uplo, int *n, int *kd, d *ab, int *ldab, d *w, d *z, int *ldz, d *work, int *info) nogil + +cdef void dsbevd(char *jobz, char *uplo, int *n, int *kd, d *ab, int *ldab, d *w, d *z, int *ldz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void dsbevx(char *jobz, char *range, char *uplo, int *n, int *kd, d *ab, int *ldab, d *q, int *ldq, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, d *work, int *iwork, int *ifail, int *info) nogil + +cdef void dsbgst(char *vect, char *uplo, int *n, int *ka, int *kb, d *ab, int *ldab, d *bb, int *ldbb, d *x, int *ldx, d *work, int *info) nogil + +cdef void dsbgv(char *jobz, char *uplo, int *n, int *ka, int *kb, d *ab, int *ldab, d *bb, int *ldbb, d *w, d *z, int *ldz, d *work, int *info) nogil + +cdef void dsbgvd(char *jobz, char *uplo, int *n, int *ka, int *kb, d *ab, int *ldab, d *bb, int *ldbb, d *w, d *z, int *ldz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void dsbgvx(char *jobz, char *range, char *uplo, int *n, int *ka, int *kb, d *ab, int *ldab, d *bb, int *ldbb, d *q, int *ldq, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, d *work, int *iwork, int *ifail, int *info) nogil + +cdef void dsbtrd(char *vect, char *uplo, int *n, int *kd, d *ab, int *ldab, d *d, d *e, d *q, int *ldq, d *work, int *info) nogil + +cdef void dsfrk(char *transr, char *uplo, char *trans, int *n, int *k, d *alpha, d *a, int *lda, d *beta, d *c) nogil + +cdef void dsgesv(int *n, int *nrhs, d *a, int *lda, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *work, s *swork, int *iter, int *info) nogil + +cdef void dspcon(char *uplo, int *n, d *ap, int *ipiv, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil + +cdef void dspev(char *jobz, char *uplo, int *n, d *ap, d *w, d *z, int *ldz, d *work, int *info) nogil + +cdef void dspevd(char *jobz, char *uplo, int *n, d *ap, d *w, d *z, int *ldz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void dspevx(char *jobz, char *range, char *uplo, int *n, d *ap, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, d *work, int *iwork, int *ifail, int *info) nogil + +cdef void dspgst(int *itype, char *uplo, int *n, d *ap, d *bp, int *info) nogil + +cdef void dspgv(int *itype, char *jobz, char *uplo, int *n, d *ap, d *bp, d *w, d *z, int *ldz, d *work, int *info) nogil + +cdef void dspgvd(int *itype, char *jobz, char *uplo, int *n, d *ap, d *bp, d *w, d *z, int *ldz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void dspgvx(int *itype, char *jobz, char *range, char *uplo, int *n, d *ap, d *bp, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, d *work, int *iwork, int *ifail, int *info) nogil + +cdef void dsposv(char *uplo, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, d *x, int *ldx, d *work, s *swork, int *iter, int *info) nogil + +cdef void dsprfs(char *uplo, int *n, int *nrhs, d *ap, d *afp, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil + +cdef void dspsv(char *uplo, int *n, int *nrhs, d *ap, int *ipiv, d *b, int *ldb, int *info) nogil + +cdef void dspsvx(char *fact, char *uplo, int *n, int *nrhs, d *ap, d *afp, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *iwork, int *info) nogil + +cdef void dsptrd(char *uplo, int *n, d *ap, d *d, d *e, d *tau, int *info) nogil + +cdef void dsptrf(char *uplo, int *n, d *ap, int *ipiv, int *info) nogil + +cdef void dsptri(char *uplo, int *n, d *ap, int *ipiv, d *work, int *info) nogil + +cdef void dsptrs(char *uplo, int *n, int *nrhs, d *ap, int *ipiv, d *b, int *ldb, int *info) nogil + +cdef void dstebz(char *range, char *order, int *n, d *vl, d *vu, int *il, int *iu, d *abstol, d *d, d *e, int *m, int *nsplit, d *w, int *iblock, int *isplit, d *work, int *iwork, int *info) nogil + +cdef void dstedc(char *compz, int *n, d *d, d *e, d *z, int *ldz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void dstegr(char *jobz, char *range, int *n, d *d, d *e, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, int *isuppz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void dstein(int *n, d *d, d *e, int *m, d *w, int *iblock, int *isplit, d *z, int *ldz, d *work, int *iwork, int *ifail, int *info) nogil + +cdef void dstemr(char *jobz, char *range, int *n, d *d, d *e, d *vl, d *vu, int *il, int *iu, int *m, d *w, d *z, int *ldz, int *nzc, int *isuppz, bint *tryrac, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void dsteqr(char *compz, int *n, d *d, d *e, d *z, int *ldz, d *work, int *info) nogil + +cdef void dsterf(int *n, d *d, d *e, int *info) nogil + +cdef void dstev(char *jobz, int *n, d *d, d *e, d *z, int *ldz, d *work, int *info) nogil + +cdef void dstevd(char *jobz, int *n, d *d, d *e, d *z, int *ldz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void dstevr(char *jobz, char *range, int *n, d *d, d *e, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, int *isuppz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void dstevx(char *jobz, char *range, int *n, d *d, d *e, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, d *work, int *iwork, int *ifail, int *info) nogil + +cdef void dsycon(char *uplo, int *n, d *a, int *lda, int *ipiv, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil + +cdef void dsyconv(char *uplo, char *way, int *n, d *a, int *lda, int *ipiv, d *work, int *info) nogil + +cdef void dsyequb(char *uplo, int *n, d *a, int *lda, d *s, d *scond, d *amax, d *work, int *info) nogil + +cdef void dsyev(char *jobz, char *uplo, int *n, d *a, int *lda, d *w, d *work, int *lwork, int *info) nogil + +cdef void dsyevd(char *jobz, char *uplo, int *n, d *a, int *lda, d *w, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void dsyevr(char *jobz, char *range, char *uplo, int *n, d *a, int *lda, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, int *isuppz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void dsyevx(char *jobz, char *range, char *uplo, int *n, d *a, int *lda, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, d *work, int *lwork, int *iwork, int *ifail, int *info) nogil + +cdef void dsygs2(int *itype, char *uplo, int *n, d *a, int *lda, d *b, int *ldb, int *info) nogil + +cdef void dsygst(int *itype, char *uplo, int *n, d *a, int *lda, d *b, int *ldb, int *info) nogil + +cdef void dsygv(int *itype, char *jobz, char *uplo, int *n, d *a, int *lda, d *b, int *ldb, d *w, d *work, int *lwork, int *info) nogil + +cdef void dsygvd(int *itype, char *jobz, char *uplo, int *n, d *a, int *lda, d *b, int *ldb, d *w, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void dsygvx(int *itype, char *jobz, char *range, char *uplo, int *n, d *a, int *lda, d *b, int *ldb, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, d *work, int *lwork, int *iwork, int *ifail, int *info) nogil + +cdef void dsyrfs(char *uplo, int *n, int *nrhs, d *a, int *lda, d *af, int *ldaf, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil + +cdef void dsysv(char *uplo, int *n, int *nrhs, d *a, int *lda, int *ipiv, d *b, int *ldb, d *work, int *lwork, int *info) nogil + +cdef void dsysvx(char *fact, char *uplo, int *n, int *nrhs, d *a, int *lda, d *af, int *ldaf, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *lwork, int *iwork, int *info) nogil + +cdef void dsyswapr(char *uplo, int *n, d *a, int *lda, int *i1, int *i2) nogil + +cdef void dsytd2(char *uplo, int *n, d *a, int *lda, d *d, d *e, d *tau, int *info) nogil + +cdef void dsytf2(char *uplo, int *n, d *a, int *lda, int *ipiv, int *info) nogil + +cdef void dsytrd(char *uplo, int *n, d *a, int *lda, d *d, d *e, d *tau, d *work, int *lwork, int *info) nogil + +cdef void dsytrf(char *uplo, int *n, d *a, int *lda, int *ipiv, d *work, int *lwork, int *info) nogil + +cdef void dsytri(char *uplo, int *n, d *a, int *lda, int *ipiv, d *work, int *info) nogil + +cdef void dsytri2(char *uplo, int *n, d *a, int *lda, int *ipiv, d *work, int *lwork, int *info) nogil + +cdef void dsytri2x(char *uplo, int *n, d *a, int *lda, int *ipiv, d *work, int *nb, int *info) nogil + +cdef void dsytrs(char *uplo, int *n, int *nrhs, d *a, int *lda, int *ipiv, d *b, int *ldb, int *info) nogil + +cdef void dsytrs2(char *uplo, int *n, int *nrhs, d *a, int *lda, int *ipiv, d *b, int *ldb, d *work, int *info) nogil + +cdef void dtbcon(char *norm, char *uplo, char *diag, int *n, int *kd, d *ab, int *ldab, d *rcond, d *work, int *iwork, int *info) nogil + +cdef void dtbrfs(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, d *ab, int *ldab, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil + +cdef void dtbtrs(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, d *ab, int *ldab, d *b, int *ldb, int *info) nogil + +cdef void dtfsm(char *transr, char *side, char *uplo, char *trans, char *diag, int *m, int *n, d *alpha, d *a, d *b, int *ldb) nogil + +cdef void dtftri(char *transr, char *uplo, char *diag, int *n, d *a, int *info) nogil + +cdef void dtfttp(char *transr, char *uplo, int *n, d *arf, d *ap, int *info) nogil + +cdef void dtfttr(char *transr, char *uplo, int *n, d *arf, d *a, int *lda, int *info) nogil + +cdef void dtgevc(char *side, char *howmny, bint *select, int *n, d *s, int *lds, d *p, int *ldp, d *vl, int *ldvl, d *vr, int *ldvr, int *mm, int *m, d *work, int *info) nogil + +cdef void dtgex2(bint *wantq, bint *wantz, int *n, d *a, int *lda, d *b, int *ldb, d *q, int *ldq, d *z, int *ldz, int *j1, int *n1, int *n2, d *work, int *lwork, int *info) nogil + +cdef void dtgexc(bint *wantq, bint *wantz, int *n, d *a, int *lda, d *b, int *ldb, d *q, int *ldq, d *z, int *ldz, int *ifst, int *ilst, d *work, int *lwork, int *info) nogil + +cdef void dtgsen(int *ijob, bint *wantq, bint *wantz, bint *select, int *n, d *a, int *lda, d *b, int *ldb, d *alphar, d *alphai, d *beta, d *q, int *ldq, d *z, int *ldz, int *m, d *pl, d *pr, d *dif, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void dtgsja(char *jobu, char *jobv, char *jobq, int *m, int *p, int *n, int *k, int *l, d *a, int *lda, d *b, int *ldb, d *tola, d *tolb, d *alpha, d *beta, d *u, int *ldu, d *v, int *ldv, d *q, int *ldq, d *work, int *ncycle, int *info) nogil + +cdef void dtgsna(char *job, char *howmny, bint *select, int *n, d *a, int *lda, d *b, int *ldb, d *vl, int *ldvl, d *vr, int *ldvr, d *s, d *dif, int *mm, int *m, d *work, int *lwork, int *iwork, int *info) nogil + +cdef void dtgsy2(char *trans, int *ijob, int *m, int *n, d *a, int *lda, d *b, int *ldb, d *c, int *ldc, d *d, int *ldd, d *e, int *lde, d *f, int *ldf, d *scale, d *rdsum, d *rdscal, int *iwork, int *pq, int *info) nogil + +cdef void dtgsyl(char *trans, int *ijob, int *m, int *n, d *a, int *lda, d *b, int *ldb, d *c, int *ldc, d *d, int *ldd, d *e, int *lde, d *f, int *ldf, d *scale, d *dif, d *work, int *lwork, int *iwork, int *info) nogil + +cdef void dtpcon(char *norm, char *uplo, char *diag, int *n, d *ap, d *rcond, d *work, int *iwork, int *info) nogil + +cdef void dtpmqrt(char *side, char *trans, int *m, int *n, int *k, int *l, int *nb, d *v, int *ldv, d *t, int *ldt, d *a, int *lda, d *b, int *ldb, d *work, int *info) nogil + +cdef void dtpqrt(int *m, int *n, int *l, int *nb, d *a, int *lda, d *b, int *ldb, d *t, int *ldt, d *work, int *info) nogil + +cdef void dtpqrt2(int *m, int *n, int *l, d *a, int *lda, d *b, int *ldb, d *t, int *ldt, int *info) nogil + +cdef void dtprfb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, d *v, int *ldv, d *t, int *ldt, d *a, int *lda, d *b, int *ldb, d *work, int *ldwork) nogil + +cdef void dtprfs(char *uplo, char *trans, char *diag, int *n, int *nrhs, d *ap, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil + +cdef void dtptri(char *uplo, char *diag, int *n, d *ap, int *info) nogil + +cdef void dtptrs(char *uplo, char *trans, char *diag, int *n, int *nrhs, d *ap, d *b, int *ldb, int *info) nogil + +cdef void dtpttf(char *transr, char *uplo, int *n, d *ap, d *arf, int *info) nogil + +cdef void dtpttr(char *uplo, int *n, d *ap, d *a, int *lda, int *info) nogil + +cdef void dtrcon(char *norm, char *uplo, char *diag, int *n, d *a, int *lda, d *rcond, d *work, int *iwork, int *info) nogil + +cdef void dtrevc(char *side, char *howmny, bint *select, int *n, d *t, int *ldt, d *vl, int *ldvl, d *vr, int *ldvr, int *mm, int *m, d *work, int *info) nogil + +cdef void dtrexc(char *compq, int *n, d *t, int *ldt, d *q, int *ldq, int *ifst, int *ilst, d *work, int *info) nogil + +cdef void dtrrfs(char *uplo, char *trans, char *diag, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil + +cdef void dtrsen(char *job, char *compq, bint *select, int *n, d *t, int *ldt, d *q, int *ldq, d *wr, d *wi, int *m, d *s, d *sep, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void dtrsna(char *job, char *howmny, bint *select, int *n, d *t, int *ldt, d *vl, int *ldvl, d *vr, int *ldvr, d *s, d *sep, int *mm, int *m, d *work, int *ldwork, int *iwork, int *info) nogil + +cdef void dtrsyl(char *trana, char *tranb, int *isgn, int *m, int *n, d *a, int *lda, d *b, int *ldb, d *c, int *ldc, d *scale, int *info) nogil + +cdef void dtrti2(char *uplo, char *diag, int *n, d *a, int *lda, int *info) nogil + +cdef void dtrtri(char *uplo, char *diag, int *n, d *a, int *lda, int *info) nogil + +cdef void dtrtrs(char *uplo, char *trans, char *diag, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, int *info) nogil + +cdef void dtrttf(char *transr, char *uplo, int *n, d *a, int *lda, d *arf, int *info) nogil + +cdef void dtrttp(char *uplo, int *n, d *a, int *lda, d *ap, int *info) nogil + +cdef void dtzrzf(int *m, int *n, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil + +cdef d dzsum1(int *n, z *cx, int *incx) nogil + +cdef int icmax1(int *n, c *cx, int *incx) nogil + +cdef int ieeeck(int *ispec, s *zero, s *one) nogil + +cdef int ilaclc(int *m, int *n, c *a, int *lda) nogil + +cdef int ilaclr(int *m, int *n, c *a, int *lda) nogil + +cdef int iladiag(char *diag) nogil + +cdef int iladlc(int *m, int *n, d *a, int *lda) nogil + +cdef int iladlr(int *m, int *n, d *a, int *lda) nogil + +cdef int ilaprec(char *prec) nogil + +cdef int ilaslc(int *m, int *n, s *a, int *lda) nogil + +cdef int ilaslr(int *m, int *n, s *a, int *lda) nogil + +cdef int ilatrans(char *trans) nogil + +cdef int ilauplo(char *uplo) nogil + +cdef void ilaver(int *vers_major, int *vers_minor, int *vers_patch) nogil + +cdef int ilazlc(int *m, int *n, z *a, int *lda) nogil + +cdef int ilazlr(int *m, int *n, z *a, int *lda) nogil + +cdef int izmax1(int *n, z *cx, int *incx) nogil + +cdef void sbbcsd(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, int *m, int *p, int *q, s *theta, s *phi, s *u1, int *ldu1, s *u2, int *ldu2, s *v1t, int *ldv1t, s *v2t, int *ldv2t, s *b11d, s *b11e, s *b12d, s *b12e, s *b21d, s *b21e, s *b22d, s *b22e, s *work, int *lwork, int *info) nogil + +cdef void sbdsdc(char *uplo, char *compq, int *n, s *d, s *e, s *u, int *ldu, s *vt, int *ldvt, s *q, int *iq, s *work, int *iwork, int *info) nogil + +cdef void sbdsqr(char *uplo, int *n, int *ncvt, int *nru, int *ncc, s *d, s *e, s *vt, int *ldvt, s *u, int *ldu, s *c, int *ldc, s *work, int *info) nogil + +cdef s scsum1(int *n, c *cx, int *incx) nogil + +cdef void sdisna(char *job, int *m, int *n, s *d, s *sep, int *info) nogil + +cdef void sgbbrd(char *vect, int *m, int *n, int *ncc, int *kl, int *ku, s *ab, int *ldab, s *d, s *e, s *q, int *ldq, s *pt, int *ldpt, s *c, int *ldc, s *work, int *info) nogil + +cdef void sgbcon(char *norm, int *n, int *kl, int *ku, s *ab, int *ldab, int *ipiv, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil + +cdef void sgbequ(int *m, int *n, int *kl, int *ku, s *ab, int *ldab, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil + +cdef void sgbequb(int *m, int *n, int *kl, int *ku, s *ab, int *ldab, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil + +cdef void sgbrfs(char *trans, int *n, int *kl, int *ku, int *nrhs, s *ab, int *ldab, s *afb, int *ldafb, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil + +cdef void sgbsv(int *n, int *kl, int *ku, int *nrhs, s *ab, int *ldab, int *ipiv, s *b, int *ldb, int *info) nogil + +cdef void sgbsvx(char *fact, char *trans, int *n, int *kl, int *ku, int *nrhs, s *ab, int *ldab, s *afb, int *ldafb, int *ipiv, char *equed, s *r, s *c, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *iwork, int *info) nogil + +cdef void sgbtf2(int *m, int *n, int *kl, int *ku, s *ab, int *ldab, int *ipiv, int *info) nogil + +cdef void sgbtrf(int *m, int *n, int *kl, int *ku, s *ab, int *ldab, int *ipiv, int *info) nogil + +cdef void sgbtrs(char *trans, int *n, int *kl, int *ku, int *nrhs, s *ab, int *ldab, int *ipiv, s *b, int *ldb, int *info) nogil + +cdef void sgebak(char *job, char *side, int *n, int *ilo, int *ihi, s *scale, int *m, s *v, int *ldv, int *info) nogil + +cdef void sgebal(char *job, int *n, s *a, int *lda, int *ilo, int *ihi, s *scale, int *info) nogil + +cdef void sgebd2(int *m, int *n, s *a, int *lda, s *d, s *e, s *tauq, s *taup, s *work, int *info) nogil + +cdef void sgebrd(int *m, int *n, s *a, int *lda, s *d, s *e, s *tauq, s *taup, s *work, int *lwork, int *info) nogil + +cdef void sgecon(char *norm, int *n, s *a, int *lda, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil + +cdef void sgeequ(int *m, int *n, s *a, int *lda, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil + +cdef void sgeequb(int *m, int *n, s *a, int *lda, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil + +cdef void sgees(char *jobvs, char *sort, sselect2 *select, int *n, s *a, int *lda, int *sdim, s *wr, s *wi, s *vs, int *ldvs, s *work, int *lwork, bint *bwork, int *info) nogil + +cdef void sgeesx(char *jobvs, char *sort, sselect2 *select, char *sense, int *n, s *a, int *lda, int *sdim, s *wr, s *wi, s *vs, int *ldvs, s *rconde, s *rcondv, s *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info) nogil + +cdef void sgeev(char *jobvl, char *jobvr, int *n, s *a, int *lda, s *wr, s *wi, s *vl, int *ldvl, s *vr, int *ldvr, s *work, int *lwork, int *info) nogil + +cdef void sgeevx(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, s *a, int *lda, s *wr, s *wi, s *vl, int *ldvl, s *vr, int *ldvr, int *ilo, int *ihi, s *scale, s *abnrm, s *rconde, s *rcondv, s *work, int *lwork, int *iwork, int *info) nogil + +cdef void sgehd2(int *n, int *ilo, int *ihi, s *a, int *lda, s *tau, s *work, int *info) nogil + +cdef void sgehrd(int *n, int *ilo, int *ihi, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil + +cdef void sgejsv(char *joba, char *jobu, char *jobv, char *jobr, char *jobt, char *jobp, int *m, int *n, s *a, int *lda, s *sva, s *u, int *ldu, s *v, int *ldv, s *work, int *lwork, int *iwork, int *info) nogil + +cdef void sgelq2(int *m, int *n, s *a, int *lda, s *tau, s *work, int *info) nogil + +cdef void sgelqf(int *m, int *n, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil + +cdef void sgels(char *trans, int *m, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, s *work, int *lwork, int *info) nogil + +cdef void sgelsd(int *m, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, s *s, s *rcond, int *rank, s *work, int *lwork, int *iwork, int *info) nogil + +cdef void sgelss(int *m, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, s *s, s *rcond, int *rank, s *work, int *lwork, int *info) nogil + +cdef void sgelsy(int *m, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, int *jpvt, s *rcond, int *rank, s *work, int *lwork, int *info) nogil + +cdef void sgemqrt(char *side, char *trans, int *m, int *n, int *k, int *nb, s *v, int *ldv, s *t, int *ldt, s *c, int *ldc, s *work, int *info) nogil + +cdef void sgeql2(int *m, int *n, s *a, int *lda, s *tau, s *work, int *info) nogil + +cdef void sgeqlf(int *m, int *n, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil + +cdef void sgeqp3(int *m, int *n, s *a, int *lda, int *jpvt, s *tau, s *work, int *lwork, int *info) nogil + +cdef void sgeqr2(int *m, int *n, s *a, int *lda, s *tau, s *work, int *info) nogil + +cdef void sgeqr2p(int *m, int *n, s *a, int *lda, s *tau, s *work, int *info) nogil + +cdef void sgeqrf(int *m, int *n, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil + +cdef void sgeqrfp(int *m, int *n, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil + +cdef void sgeqrt(int *m, int *n, int *nb, s *a, int *lda, s *t, int *ldt, s *work, int *info) nogil + +cdef void sgeqrt2(int *m, int *n, s *a, int *lda, s *t, int *ldt, int *info) nogil + +cdef void sgeqrt3(int *m, int *n, s *a, int *lda, s *t, int *ldt, int *info) nogil + +cdef void sgerfs(char *trans, int *n, int *nrhs, s *a, int *lda, s *af, int *ldaf, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil + +cdef void sgerq2(int *m, int *n, s *a, int *lda, s *tau, s *work, int *info) nogil + +cdef void sgerqf(int *m, int *n, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil + +cdef void sgesc2(int *n, s *a, int *lda, s *rhs, int *ipiv, int *jpiv, s *scale) nogil + +cdef void sgesdd(char *jobz, int *m, int *n, s *a, int *lda, s *s, s *u, int *ldu, s *vt, int *ldvt, s *work, int *lwork, int *iwork, int *info) nogil + +cdef void sgesv(int *n, int *nrhs, s *a, int *lda, int *ipiv, s *b, int *ldb, int *info) nogil + +cdef void sgesvd(char *jobu, char *jobvt, int *m, int *n, s *a, int *lda, s *s, s *u, int *ldu, s *vt, int *ldvt, s *work, int *lwork, int *info) nogil + +cdef void sgesvj(char *joba, char *jobu, char *jobv, int *m, int *n, s *a, int *lda, s *sva, int *mv, s *v, int *ldv, s *work, int *lwork, int *info) nogil + +cdef void sgesvx(char *fact, char *trans, int *n, int *nrhs, s *a, int *lda, s *af, int *ldaf, int *ipiv, char *equed, s *r, s *c, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *iwork, int *info) nogil + +cdef void sgetc2(int *n, s *a, int *lda, int *ipiv, int *jpiv, int *info) nogil + +cdef void sgetf2(int *m, int *n, s *a, int *lda, int *ipiv, int *info) nogil + +cdef void sgetrf(int *m, int *n, s *a, int *lda, int *ipiv, int *info) nogil + +cdef void sgetri(int *n, s *a, int *lda, int *ipiv, s *work, int *lwork, int *info) nogil + +cdef void sgetrs(char *trans, int *n, int *nrhs, s *a, int *lda, int *ipiv, s *b, int *ldb, int *info) nogil + +cdef void sggbak(char *job, char *side, int *n, int *ilo, int *ihi, s *lscale, s *rscale, int *m, s *v, int *ldv, int *info) nogil + +cdef void sggbal(char *job, int *n, s *a, int *lda, s *b, int *ldb, int *ilo, int *ihi, s *lscale, s *rscale, s *work, int *info) nogil + +cdef void sgges(char *jobvsl, char *jobvsr, char *sort, sselect3 *selctg, int *n, s *a, int *lda, s *b, int *ldb, int *sdim, s *alphar, s *alphai, s *beta, s *vsl, int *ldvsl, s *vsr, int *ldvsr, s *work, int *lwork, bint *bwork, int *info) nogil + +cdef void sggesx(char *jobvsl, char *jobvsr, char *sort, sselect3 *selctg, char *sense, int *n, s *a, int *lda, s *b, int *ldb, int *sdim, s *alphar, s *alphai, s *beta, s *vsl, int *ldvsl, s *vsr, int *ldvsr, s *rconde, s *rcondv, s *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info) nogil + +cdef void sggev(char *jobvl, char *jobvr, int *n, s *a, int *lda, s *b, int *ldb, s *alphar, s *alphai, s *beta, s *vl, int *ldvl, s *vr, int *ldvr, s *work, int *lwork, int *info) nogil + +cdef void sggevx(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, s *a, int *lda, s *b, int *ldb, s *alphar, s *alphai, s *beta, s *vl, int *ldvl, s *vr, int *ldvr, int *ilo, int *ihi, s *lscale, s *rscale, s *abnrm, s *bbnrm, s *rconde, s *rcondv, s *work, int *lwork, int *iwork, bint *bwork, int *info) nogil + +cdef void sggglm(int *n, int *m, int *p, s *a, int *lda, s *b, int *ldb, s *d, s *x, s *y, s *work, int *lwork, int *info) nogil + +cdef void sgghrd(char *compq, char *compz, int *n, int *ilo, int *ihi, s *a, int *lda, s *b, int *ldb, s *q, int *ldq, s *z, int *ldz, int *info) nogil + +cdef void sgglse(int *m, int *n, int *p, s *a, int *lda, s *b, int *ldb, s *c, s *d, s *x, s *work, int *lwork, int *info) nogil + +cdef void sggqrf(int *n, int *m, int *p, s *a, int *lda, s *taua, s *b, int *ldb, s *taub, s *work, int *lwork, int *info) nogil + +cdef void sggrqf(int *m, int *p, int *n, s *a, int *lda, s *taua, s *b, int *ldb, s *taub, s *work, int *lwork, int *info) nogil + +cdef void sgsvj0(char *jobv, int *m, int *n, s *a, int *lda, s *d, s *sva, int *mv, s *v, int *ldv, s *eps, s *sfmin, s *tol, int *nsweep, s *work, int *lwork, int *info) nogil + +cdef void sgsvj1(char *jobv, int *m, int *n, int *n1, s *a, int *lda, s *d, s *sva, int *mv, s *v, int *ldv, s *eps, s *sfmin, s *tol, int *nsweep, s *work, int *lwork, int *info) nogil + +cdef void sgtcon(char *norm, int *n, s *dl, s *d, s *du, s *du2, int *ipiv, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil + +cdef void sgtrfs(char *trans, int *n, int *nrhs, s *dl, s *d, s *du, s *dlf, s *df, s *duf, s *du2, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil + +cdef void sgtsv(int *n, int *nrhs, s *dl, s *d, s *du, s *b, int *ldb, int *info) nogil + +cdef void sgtsvx(char *fact, char *trans, int *n, int *nrhs, s *dl, s *d, s *du, s *dlf, s *df, s *duf, s *du2, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *iwork, int *info) nogil + +cdef void sgttrf(int *n, s *dl, s *d, s *du, s *du2, int *ipiv, int *info) nogil + +cdef void sgttrs(char *trans, int *n, int *nrhs, s *dl, s *d, s *du, s *du2, int *ipiv, s *b, int *ldb, int *info) nogil + +cdef void sgtts2(int *itrans, int *n, int *nrhs, s *dl, s *d, s *du, s *du2, int *ipiv, s *b, int *ldb) nogil + +cdef void shgeqz(char *job, char *compq, char *compz, int *n, int *ilo, int *ihi, s *h, int *ldh, s *t, int *ldt, s *alphar, s *alphai, s *beta, s *q, int *ldq, s *z, int *ldz, s *work, int *lwork, int *info) nogil + +cdef void shsein(char *side, char *eigsrc, char *initv, bint *select, int *n, s *h, int *ldh, s *wr, s *wi, s *vl, int *ldvl, s *vr, int *ldvr, int *mm, int *m, s *work, int *ifaill, int *ifailr, int *info) nogil + +cdef void shseqr(char *job, char *compz, int *n, int *ilo, int *ihi, s *h, int *ldh, s *wr, s *wi, s *z, int *ldz, s *work, int *lwork, int *info) nogil + +cdef void slabad(s *small, s *large) nogil + +cdef void slabrd(int *m, int *n, int *nb, s *a, int *lda, s *d, s *e, s *tauq, s *taup, s *x, int *ldx, s *y, int *ldy) nogil + +cdef void slacn2(int *n, s *v, s *x, int *isgn, s *est, int *kase, int *isave) nogil + +cdef void slacon(int *n, s *v, s *x, int *isgn, s *est, int *kase) nogil + +cdef void slacpy(char *uplo, int *m, int *n, s *a, int *lda, s *b, int *ldb) nogil + +cdef void sladiv(s *a, s *b, s *c, s *d, s *p, s *q) nogil + +cdef void slae2(s *a, s *b, s *c, s *rt1, s *rt2) nogil + +cdef void slaebz(int *ijob, int *nitmax, int *n, int *mmax, int *minp, int *nbmin, s *abstol, s *reltol, s *pivmin, s *d, s *e, s *e2, int *nval, s *ab, s *c, int *mout, int *nab, s *work, int *iwork, int *info) nogil + +cdef void slaed0(int *icompq, int *qsiz, int *n, s *d, s *e, s *q, int *ldq, s *qstore, int *ldqs, s *work, int *iwork, int *info) nogil + +cdef void slaed1(int *n, s *d, s *q, int *ldq, int *indxq, s *rho, int *cutpnt, s *work, int *iwork, int *info) nogil + +cdef void slaed2(int *k, int *n, int *n1, s *d, s *q, int *ldq, int *indxq, s *rho, s *z, s *dlamda, s *w, s *q2, int *indx, int *indxc, int *indxp, int *coltyp, int *info) nogil + +cdef void slaed3(int *k, int *n, int *n1, s *d, s *q, int *ldq, s *rho, s *dlamda, s *q2, int *indx, int *ctot, s *w, s *s, int *info) nogil + +cdef void slaed4(int *n, int *i, s *d, s *z, s *delta, s *rho, s *dlam, int *info) nogil + +cdef void slaed5(int *i, s *d, s *z, s *delta, s *rho, s *dlam) nogil + +cdef void slaed6(int *kniter, bint *orgati, s *rho, s *d, s *z, s *finit, s *tau, int *info) nogil + +cdef void slaed7(int *icompq, int *n, int *qsiz, int *tlvls, int *curlvl, int *curpbm, s *d, s *q, int *ldq, int *indxq, s *rho, int *cutpnt, s *qstore, int *qptr, int *prmptr, int *perm, int *givptr, int *givcol, s *givnum, s *work, int *iwork, int *info) nogil + +cdef void slaed8(int *icompq, int *k, int *n, int *qsiz, s *d, s *q, int *ldq, int *indxq, s *rho, int *cutpnt, s *z, s *dlamda, s *q2, int *ldq2, s *w, int *perm, int *givptr, int *givcol, s *givnum, int *indxp, int *indx, int *info) nogil + +cdef void slaed9(int *k, int *kstart, int *kstop, int *n, s *d, s *q, int *ldq, s *rho, s *dlamda, s *w, s *s, int *lds, int *info) nogil + +cdef void slaeda(int *n, int *tlvls, int *curlvl, int *curpbm, int *prmptr, int *perm, int *givptr, int *givcol, s *givnum, s *q, int *qptr, s *z, s *ztemp, int *info) nogil + +cdef void slaein(bint *rightv, bint *noinit, int *n, s *h, int *ldh, s *wr, s *wi, s *vr, s *vi, s *b, int *ldb, s *work, s *eps3, s *smlnum, s *bignum, int *info) nogil + +cdef void slaev2(s *a, s *b, s *c, s *rt1, s *rt2, s *cs1, s *sn1) nogil + +cdef void slaexc(bint *wantq, int *n, s *t, int *ldt, s *q, int *ldq, int *j1, int *n1, int *n2, s *work, int *info) nogil + +cdef void slag2(s *a, int *lda, s *b, int *ldb, s *safmin, s *scale1, s *scale2, s *wr1, s *wr2, s *wi) nogil + +cdef void slag2d(int *m, int *n, s *sa, int *ldsa, d *a, int *lda, int *info) nogil + +cdef void slags2(bint *upper, s *a1, s *a2, s *a3, s *b1, s *b2, s *b3, s *csu, s *snu, s *csv, s *snv, s *csq, s *snq) nogil + +cdef void slagtf(int *n, s *a, s *lambda_, s *b, s *c, s *tol, s *d, int *in_, int *info) nogil + +cdef void slagtm(char *trans, int *n, int *nrhs, s *alpha, s *dl, s *d, s *du, s *x, int *ldx, s *beta, s *b, int *ldb) nogil + +cdef void slagts(int *job, int *n, s *a, s *b, s *c, s *d, int *in_, s *y, s *tol, int *info) nogil + +cdef void slagv2(s *a, int *lda, s *b, int *ldb, s *alphar, s *alphai, s *beta, s *csl, s *snl, s *csr, s *snr) nogil + +cdef void slahqr(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, s *h, int *ldh, s *wr, s *wi, int *iloz, int *ihiz, s *z, int *ldz, int *info) nogil + +cdef void slahr2(int *n, int *k, int *nb, s *a, int *lda, s *tau, s *t, int *ldt, s *y, int *ldy) nogil + +cdef void slaic1(int *job, int *j, s *x, s *sest, s *w, s *gamma, s *sestpr, s *s, s *c) nogil + +cdef void slaln2(bint *ltrans, int *na, int *nw, s *smin, s *ca, s *a, int *lda, s *d1, s *d2, s *b, int *ldb, s *wr, s *wi, s *x, int *ldx, s *scale, s *xnorm, int *info) nogil + +cdef void slals0(int *icompq, int *nl, int *nr, int *sqre, int *nrhs, s *b, int *ldb, s *bx, int *ldbx, int *perm, int *givptr, int *givcol, int *ldgcol, s *givnum, int *ldgnum, s *poles, s *difl, s *difr, s *z, int *k, s *c, s *s, s *work, int *info) nogil + +cdef void slalsa(int *icompq, int *smlsiz, int *n, int *nrhs, s *b, int *ldb, s *bx, int *ldbx, s *u, int *ldu, s *vt, int *k, s *difl, s *difr, s *z, s *poles, int *givptr, int *givcol, int *ldgcol, int *perm, s *givnum, s *c, s *s, s *work, int *iwork, int *info) nogil + +cdef void slalsd(char *uplo, int *smlsiz, int *n, int *nrhs, s *d, s *e, s *b, int *ldb, s *rcond, int *rank, s *work, int *iwork, int *info) nogil + +cdef s slamch(char *cmach) nogil + +cdef void slamrg(int *n1, int *n2, s *a, int *strd1, int *strd2, int *index_bn) nogil + +cdef s slangb(char *norm, int *n, int *kl, int *ku, s *ab, int *ldab, s *work) nogil + +cdef s slange(char *norm, int *m, int *n, s *a, int *lda, s *work) nogil + +cdef s slangt(char *norm, int *n, s *dl, s *d, s *du) nogil + +cdef s slanhs(char *norm, int *n, s *a, int *lda, s *work) nogil + +cdef s slansb(char *norm, char *uplo, int *n, int *k, s *ab, int *ldab, s *work) nogil + +cdef s slansf(char *norm, char *transr, char *uplo, int *n, s *a, s *work) nogil + +cdef s slansp(char *norm, char *uplo, int *n, s *ap, s *work) nogil + +cdef s slanst(char *norm, int *n, s *d, s *e) nogil + +cdef s slansy(char *norm, char *uplo, int *n, s *a, int *lda, s *work) nogil + +cdef s slantb(char *norm, char *uplo, char *diag, int *n, int *k, s *ab, int *ldab, s *work) nogil + +cdef s slantp(char *norm, char *uplo, char *diag, int *n, s *ap, s *work) nogil + +cdef s slantr(char *norm, char *uplo, char *diag, int *m, int *n, s *a, int *lda, s *work) nogil + +cdef void slanv2(s *a, s *b, s *c, s *d, s *rt1r, s *rt1i, s *rt2r, s *rt2i, s *cs, s *sn) nogil + +cdef void slapll(int *n, s *x, int *incx, s *y, int *incy, s *ssmin) nogil + +cdef void slapmr(bint *forwrd, int *m, int *n, s *x, int *ldx, int *k) nogil + +cdef void slapmt(bint *forwrd, int *m, int *n, s *x, int *ldx, int *k) nogil + +cdef s slapy2(s *x, s *y) nogil + +cdef s slapy3(s *x, s *y, s *z) nogil + +cdef void slaqgb(int *m, int *n, int *kl, int *ku, s *ab, int *ldab, s *r, s *c, s *rowcnd, s *colcnd, s *amax, char *equed) nogil + +cdef void slaqge(int *m, int *n, s *a, int *lda, s *r, s *c, s *rowcnd, s *colcnd, s *amax, char *equed) nogil + +cdef void slaqp2(int *m, int *n, int *offset, s *a, int *lda, int *jpvt, s *tau, s *vn1, s *vn2, s *work) nogil + +cdef void slaqps(int *m, int *n, int *offset, int *nb, int *kb, s *a, int *lda, int *jpvt, s *tau, s *vn1, s *vn2, s *auxv, s *f, int *ldf) nogil + +cdef void slaqr0(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, s *h, int *ldh, s *wr, s *wi, int *iloz, int *ihiz, s *z, int *ldz, s *work, int *lwork, int *info) nogil + +cdef void slaqr1(int *n, s *h, int *ldh, s *sr1, s *si1, s *sr2, s *si2, s *v) nogil + +cdef void slaqr2(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, s *h, int *ldh, int *iloz, int *ihiz, s *z, int *ldz, int *ns, int *nd, s *sr, s *si, s *v, int *ldv, int *nh, s *t, int *ldt, int *nv, s *wv, int *ldwv, s *work, int *lwork) nogil + +cdef void slaqr3(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, s *h, int *ldh, int *iloz, int *ihiz, s *z, int *ldz, int *ns, int *nd, s *sr, s *si, s *v, int *ldv, int *nh, s *t, int *ldt, int *nv, s *wv, int *ldwv, s *work, int *lwork) nogil + +cdef void slaqr4(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, s *h, int *ldh, s *wr, s *wi, int *iloz, int *ihiz, s *z, int *ldz, s *work, int *lwork, int *info) nogil + +cdef void slaqr5(bint *wantt, bint *wantz, int *kacc22, int *n, int *ktop, int *kbot, int *nshfts, s *sr, s *si, s *h, int *ldh, int *iloz, int *ihiz, s *z, int *ldz, s *v, int *ldv, s *u, int *ldu, int *nv, s *wv, int *ldwv, int *nh, s *wh, int *ldwh) nogil + +cdef void slaqsb(char *uplo, int *n, int *kd, s *ab, int *ldab, s *s, s *scond, s *amax, char *equed) nogil + +cdef void slaqsp(char *uplo, int *n, s *ap, s *s, s *scond, s *amax, char *equed) nogil + +cdef void slaqsy(char *uplo, int *n, s *a, int *lda, s *s, s *scond, s *amax, char *equed) nogil + +cdef void slaqtr(bint *ltran, bint *lreal, int *n, s *t, int *ldt, s *b, s *w, s *scale, s *x, s *work, int *info) nogil + +cdef void slar1v(int *n, int *b1, int *bn, s *lambda_, s *d, s *l, s *ld, s *lld, s *pivmin, s *gaptol, s *z, bint *wantnc, int *negcnt, s *ztz, s *mingma, int *r, int *isuppz, s *nrminv, s *resid, s *rqcorr, s *work) nogil + +cdef void slar2v(int *n, s *x, s *y, s *z, int *incx, s *c, s *s, int *incc) nogil + +cdef void slarf(char *side, int *m, int *n, s *v, int *incv, s *tau, s *c, int *ldc, s *work) nogil + +cdef void slarfb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, s *v, int *ldv, s *t, int *ldt, s *c, int *ldc, s *work, int *ldwork) nogil + +cdef void slarfg(int *n, s *alpha, s *x, int *incx, s *tau) nogil + +cdef void slarfgp(int *n, s *alpha, s *x, int *incx, s *tau) nogil + +cdef void slarft(char *direct, char *storev, int *n, int *k, s *v, int *ldv, s *tau, s *t, int *ldt) nogil + +cdef void slarfx(char *side, int *m, int *n, s *v, s *tau, s *c, int *ldc, s *work) nogil + +cdef void slargv(int *n, s *x, int *incx, s *y, int *incy, s *c, int *incc) nogil + +cdef void slarnv(int *idist, int *iseed, int *n, s *x) nogil + +cdef void slarra(int *n, s *d, s *e, s *e2, s *spltol, s *tnrm, int *nsplit, int *isplit, int *info) nogil + +cdef void slarrb(int *n, s *d, s *lld, int *ifirst, int *ilast, s *rtol1, s *rtol2, int *offset, s *w, s *wgap, s *werr, s *work, int *iwork, s *pivmin, s *spdiam, int *twist, int *info) nogil + +cdef void slarrc(char *jobt, int *n, s *vl, s *vu, s *d, s *e, s *pivmin, int *eigcnt, int *lcnt, int *rcnt, int *info) nogil + +cdef void slarrd(char *range, char *order, int *n, s *vl, s *vu, int *il, int *iu, s *gers, s *reltol, s *d, s *e, s *e2, s *pivmin, int *nsplit, int *isplit, int *m, s *w, s *werr, s *wl, s *wu, int *iblock, int *indexw, s *work, int *iwork, int *info) nogil + +cdef void slarre(char *range, int *n, s *vl, s *vu, int *il, int *iu, s *d, s *e, s *e2, s *rtol1, s *rtol2, s *spltol, int *nsplit, int *isplit, int *m, s *w, s *werr, s *wgap, int *iblock, int *indexw, s *gers, s *pivmin, s *work, int *iwork, int *info) nogil + +cdef void slarrf(int *n, s *d, s *l, s *ld, int *clstrt, int *clend, s *w, s *wgap, s *werr, s *spdiam, s *clgapl, s *clgapr, s *pivmin, s *sigma, s *dplus, s *lplus, s *work, int *info) nogil + +cdef void slarrj(int *n, s *d, s *e2, int *ifirst, int *ilast, s *rtol, int *offset, s *w, s *werr, s *work, int *iwork, s *pivmin, s *spdiam, int *info) nogil + +cdef void slarrk(int *n, int *iw, s *gl, s *gu, s *d, s *e2, s *pivmin, s *reltol, s *w, s *werr, int *info) nogil + +cdef void slarrr(int *n, s *d, s *e, int *info) nogil + +cdef void slarrv(int *n, s *vl, s *vu, s *d, s *l, s *pivmin, int *isplit, int *m, int *dol, int *dou, s *minrgp, s *rtol1, s *rtol2, s *w, s *werr, s *wgap, int *iblock, int *indexw, s *gers, s *z, int *ldz, int *isuppz, s *work, int *iwork, int *info) nogil + +cdef void slartg(s *f, s *g, s *cs, s *sn, s *r) nogil + +cdef void slartgp(s *f, s *g, s *cs, s *sn, s *r) nogil + +cdef void slartgs(s *x, s *y, s *sigma, s *cs, s *sn) nogil + +cdef void slartv(int *n, s *x, int *incx, s *y, int *incy, s *c, s *s, int *incc) nogil + +cdef void slaruv(int *iseed, int *n, s *x) nogil + +cdef void slarz(char *side, int *m, int *n, int *l, s *v, int *incv, s *tau, s *c, int *ldc, s *work) nogil + +cdef void slarzb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, s *v, int *ldv, s *t, int *ldt, s *c, int *ldc, s *work, int *ldwork) nogil + +cdef void slarzt(char *direct, char *storev, int *n, int *k, s *v, int *ldv, s *tau, s *t, int *ldt) nogil + +cdef void slas2(s *f, s *g, s *h, s *ssmin, s *ssmax) nogil + +cdef void slascl(char *type_bn, int *kl, int *ku, s *cfrom, s *cto, int *m, int *n, s *a, int *lda, int *info) nogil + +cdef void slasd0(int *n, int *sqre, s *d, s *e, s *u, int *ldu, s *vt, int *ldvt, int *smlsiz, int *iwork, s *work, int *info) nogil + +cdef void slasd1(int *nl, int *nr, int *sqre, s *d, s *alpha, s *beta, s *u, int *ldu, s *vt, int *ldvt, int *idxq, int *iwork, s *work, int *info) nogil + +cdef void slasd2(int *nl, int *nr, int *sqre, int *k, s *d, s *z, s *alpha, s *beta, s *u, int *ldu, s *vt, int *ldvt, s *dsigma, s *u2, int *ldu2, s *vt2, int *ldvt2, int *idxp, int *idx, int *idxc, int *idxq, int *coltyp, int *info) nogil + +cdef void slasd3(int *nl, int *nr, int *sqre, int *k, s *d, s *q, int *ldq, s *dsigma, s *u, int *ldu, s *u2, int *ldu2, s *vt, int *ldvt, s *vt2, int *ldvt2, int *idxc, int *ctot, s *z, int *info) nogil + +cdef void slasd4(int *n, int *i, s *d, s *z, s *delta, s *rho, s *sigma, s *work, int *info) nogil + +cdef void slasd5(int *i, s *d, s *z, s *delta, s *rho, s *dsigma, s *work) nogil + +cdef void slasd6(int *icompq, int *nl, int *nr, int *sqre, s *d, s *vf, s *vl, s *alpha, s *beta, int *idxq, int *perm, int *givptr, int *givcol, int *ldgcol, s *givnum, int *ldgnum, s *poles, s *difl, s *difr, s *z, int *k, s *c, s *s, s *work, int *iwork, int *info) nogil + +cdef void slasd7(int *icompq, int *nl, int *nr, int *sqre, int *k, s *d, s *z, s *zw, s *vf, s *vfw, s *vl, s *vlw, s *alpha, s *beta, s *dsigma, int *idx, int *idxp, int *idxq, int *perm, int *givptr, int *givcol, int *ldgcol, s *givnum, int *ldgnum, s *c, s *s, int *info) nogil + +cdef void slasd8(int *icompq, int *k, s *d, s *z, s *vf, s *vl, s *difl, s *difr, int *lddifr, s *dsigma, s *work, int *info) nogil + +cdef void slasda(int *icompq, int *smlsiz, int *n, int *sqre, s *d, s *e, s *u, int *ldu, s *vt, int *k, s *difl, s *difr, s *z, s *poles, int *givptr, int *givcol, int *ldgcol, int *perm, s *givnum, s *c, s *s, s *work, int *iwork, int *info) nogil + +cdef void slasdq(char *uplo, int *sqre, int *n, int *ncvt, int *nru, int *ncc, s *d, s *e, s *vt, int *ldvt, s *u, int *ldu, s *c, int *ldc, s *work, int *info) nogil + +cdef void slasdt(int *n, int *lvl, int *nd, int *inode, int *ndiml, int *ndimr, int *msub) nogil + +cdef void slaset(char *uplo, int *m, int *n, s *alpha, s *beta, s *a, int *lda) nogil + +cdef void slasq1(int *n, s *d, s *e, s *work, int *info) nogil + +cdef void slasq2(int *n, s *z, int *info) nogil + +cdef void slasq3(int *i0, int *n0, s *z, int *pp, s *dmin, s *sigma, s *desig, s *qmax, int *nfail, int *iter, int *ndiv, bint *ieee, int *ttype, s *dmin1, s *dmin2, s *dn, s *dn1, s *dn2, s *g, s *tau) nogil + +cdef void slasq4(int *i0, int *n0, s *z, int *pp, int *n0in, s *dmin, s *dmin1, s *dmin2, s *dn, s *dn1, s *dn2, s *tau, int *ttype, s *g) nogil + +cdef void slasq6(int *i0, int *n0, s *z, int *pp, s *dmin, s *dmin1, s *dmin2, s *dn, s *dnm1, s *dnm2) nogil + +cdef void slasr(char *side, char *pivot, char *direct, int *m, int *n, s *c, s *s, s *a, int *lda) nogil + +cdef void slasrt(char *id, int *n, s *d, int *info) nogil + +cdef void slassq(int *n, s *x, int *incx, s *scale, s *sumsq) nogil + +cdef void slasv2(s *f, s *g, s *h, s *ssmin, s *ssmax, s *snr, s *csr, s *snl, s *csl) nogil + +cdef void slaswp(int *n, s *a, int *lda, int *k1, int *k2, int *ipiv, int *incx) nogil + +cdef void slasy2(bint *ltranl, bint *ltranr, int *isgn, int *n1, int *n2, s *tl, int *ldtl, s *tr, int *ldtr, s *b, int *ldb, s *scale, s *x, int *ldx, s *xnorm, int *info) nogil + +cdef void slasyf(char *uplo, int *n, int *nb, int *kb, s *a, int *lda, int *ipiv, s *w, int *ldw, int *info) nogil + +cdef void slatbs(char *uplo, char *trans, char *diag, char *normin, int *n, int *kd, s *ab, int *ldab, s *x, s *scale, s *cnorm, int *info) nogil + +cdef void slatdf(int *ijob, int *n, s *z, int *ldz, s *rhs, s *rdsum, s *rdscal, int *ipiv, int *jpiv) nogil + +cdef void slatps(char *uplo, char *trans, char *diag, char *normin, int *n, s *ap, s *x, s *scale, s *cnorm, int *info) nogil + +cdef void slatrd(char *uplo, int *n, int *nb, s *a, int *lda, s *e, s *tau, s *w, int *ldw) nogil + +cdef void slatrs(char *uplo, char *trans, char *diag, char *normin, int *n, s *a, int *lda, s *x, s *scale, s *cnorm, int *info) nogil + +cdef void slatrz(int *m, int *n, int *l, s *a, int *lda, s *tau, s *work) nogil + +cdef void slauu2(char *uplo, int *n, s *a, int *lda, int *info) nogil + +cdef void slauum(char *uplo, int *n, s *a, int *lda, int *info) nogil + +cdef void sopgtr(char *uplo, int *n, s *ap, s *tau, s *q, int *ldq, s *work, int *info) nogil + +cdef void sopmtr(char *side, char *uplo, char *trans, int *m, int *n, s *ap, s *tau, s *c, int *ldc, s *work, int *info) nogil + +cdef void sorbdb(char *trans, char *signs, int *m, int *p, int *q, s *x11, int *ldx11, s *x12, int *ldx12, s *x21, int *ldx21, s *x22, int *ldx22, s *theta, s *phi, s *taup1, s *taup2, s *tauq1, s *tauq2, s *work, int *lwork, int *info) nogil + +cdef void sorcsd(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, char *signs, int *m, int *p, int *q, s *x11, int *ldx11, s *x12, int *ldx12, s *x21, int *ldx21, s *x22, int *ldx22, s *theta, s *u1, int *ldu1, s *u2, int *ldu2, s *v1t, int *ldv1t, s *v2t, int *ldv2t, s *work, int *lwork, int *iwork, int *info) nogil + +cdef void sorg2l(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *info) nogil + +cdef void sorg2r(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *info) nogil + +cdef void sorgbr(char *vect, int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil + +cdef void sorghr(int *n, int *ilo, int *ihi, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil + +cdef void sorgl2(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *info) nogil + +cdef void sorglq(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil + +cdef void sorgql(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil + +cdef void sorgqr(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil + +cdef void sorgr2(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *info) nogil + +cdef void sorgrq(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil + +cdef void sorgtr(char *uplo, int *n, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil + +cdef void sorm2l(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *info) nogil + +cdef void sorm2r(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *info) nogil + +cdef void sormbr(char *vect, char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil + +cdef void sormhr(char *side, char *trans, int *m, int *n, int *ilo, int *ihi, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil + +cdef void sorml2(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *info) nogil + +cdef void sormlq(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil + +cdef void sormql(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil + +cdef void sormqr(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil + +cdef void sormr2(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *info) nogil + +cdef void sormr3(char *side, char *trans, int *m, int *n, int *k, int *l, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *info) nogil + +cdef void sormrq(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil + +cdef void sormrz(char *side, char *trans, int *m, int *n, int *k, int *l, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil + +cdef void sormtr(char *side, char *uplo, char *trans, int *m, int *n, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil + +cdef void spbcon(char *uplo, int *n, int *kd, s *ab, int *ldab, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil + +cdef void spbequ(char *uplo, int *n, int *kd, s *ab, int *ldab, s *s, s *scond, s *amax, int *info) nogil + +cdef void spbrfs(char *uplo, int *n, int *kd, int *nrhs, s *ab, int *ldab, s *afb, int *ldafb, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil + +cdef void spbstf(char *uplo, int *n, int *kd, s *ab, int *ldab, int *info) nogil + +cdef void spbsv(char *uplo, int *n, int *kd, int *nrhs, s *ab, int *ldab, s *b, int *ldb, int *info) nogil + +cdef void spbsvx(char *fact, char *uplo, int *n, int *kd, int *nrhs, s *ab, int *ldab, s *afb, int *ldafb, char *equed, s *s, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *iwork, int *info) nogil + +cdef void spbtf2(char *uplo, int *n, int *kd, s *ab, int *ldab, int *info) nogil + +cdef void spbtrf(char *uplo, int *n, int *kd, s *ab, int *ldab, int *info) nogil + +cdef void spbtrs(char *uplo, int *n, int *kd, int *nrhs, s *ab, int *ldab, s *b, int *ldb, int *info) nogil + +cdef void spftrf(char *transr, char *uplo, int *n, s *a, int *info) nogil + +cdef void spftri(char *transr, char *uplo, int *n, s *a, int *info) nogil + +cdef void spftrs(char *transr, char *uplo, int *n, int *nrhs, s *a, s *b, int *ldb, int *info) nogil + +cdef void spocon(char *uplo, int *n, s *a, int *lda, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil + +cdef void spoequ(int *n, s *a, int *lda, s *s, s *scond, s *amax, int *info) nogil + +cdef void spoequb(int *n, s *a, int *lda, s *s, s *scond, s *amax, int *info) nogil + +cdef void sporfs(char *uplo, int *n, int *nrhs, s *a, int *lda, s *af, int *ldaf, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil + +cdef void sposv(char *uplo, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, int *info) nogil + +cdef void sposvx(char *fact, char *uplo, int *n, int *nrhs, s *a, int *lda, s *af, int *ldaf, char *equed, s *s, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *iwork, int *info) nogil + +cdef void spotf2(char *uplo, int *n, s *a, int *lda, int *info) nogil + +cdef void spotrf(char *uplo, int *n, s *a, int *lda, int *info) nogil + +cdef void spotri(char *uplo, int *n, s *a, int *lda, int *info) nogil + +cdef void spotrs(char *uplo, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, int *info) nogil + +cdef void sppcon(char *uplo, int *n, s *ap, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil + +cdef void sppequ(char *uplo, int *n, s *ap, s *s, s *scond, s *amax, int *info) nogil + +cdef void spprfs(char *uplo, int *n, int *nrhs, s *ap, s *afp, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil + +cdef void sppsv(char *uplo, int *n, int *nrhs, s *ap, s *b, int *ldb, int *info) nogil + +cdef void sppsvx(char *fact, char *uplo, int *n, int *nrhs, s *ap, s *afp, char *equed, s *s, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *iwork, int *info) nogil + +cdef void spptrf(char *uplo, int *n, s *ap, int *info) nogil + +cdef void spptri(char *uplo, int *n, s *ap, int *info) nogil + +cdef void spptrs(char *uplo, int *n, int *nrhs, s *ap, s *b, int *ldb, int *info) nogil + +cdef void spstf2(char *uplo, int *n, s *a, int *lda, int *piv, int *rank, s *tol, s *work, int *info) nogil + +cdef void spstrf(char *uplo, int *n, s *a, int *lda, int *piv, int *rank, s *tol, s *work, int *info) nogil + +cdef void sptcon(int *n, s *d, s *e, s *anorm, s *rcond, s *work, int *info) nogil + +cdef void spteqr(char *compz, int *n, s *d, s *e, s *z, int *ldz, s *work, int *info) nogil + +cdef void sptrfs(int *n, int *nrhs, s *d, s *e, s *df, s *ef, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *info) nogil + +cdef void sptsv(int *n, int *nrhs, s *d, s *e, s *b, int *ldb, int *info) nogil + +cdef void sptsvx(char *fact, int *n, int *nrhs, s *d, s *e, s *df, s *ef, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *info) nogil + +cdef void spttrf(int *n, s *d, s *e, int *info) nogil + +cdef void spttrs(int *n, int *nrhs, s *d, s *e, s *b, int *ldb, int *info) nogil + +cdef void sptts2(int *n, int *nrhs, s *d, s *e, s *b, int *ldb) nogil + +cdef void srscl(int *n, s *sa, s *sx, int *incx) nogil + +cdef void ssbev(char *jobz, char *uplo, int *n, int *kd, s *ab, int *ldab, s *w, s *z, int *ldz, s *work, int *info) nogil + +cdef void ssbevd(char *jobz, char *uplo, int *n, int *kd, s *ab, int *ldab, s *w, s *z, int *ldz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void ssbevx(char *jobz, char *range, char *uplo, int *n, int *kd, s *ab, int *ldab, s *q, int *ldq, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, s *work, int *iwork, int *ifail, int *info) nogil + +cdef void ssbgst(char *vect, char *uplo, int *n, int *ka, int *kb, s *ab, int *ldab, s *bb, int *ldbb, s *x, int *ldx, s *work, int *info) nogil + +cdef void ssbgv(char *jobz, char *uplo, int *n, int *ka, int *kb, s *ab, int *ldab, s *bb, int *ldbb, s *w, s *z, int *ldz, s *work, int *info) nogil + +cdef void ssbgvd(char *jobz, char *uplo, int *n, int *ka, int *kb, s *ab, int *ldab, s *bb, int *ldbb, s *w, s *z, int *ldz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void ssbgvx(char *jobz, char *range, char *uplo, int *n, int *ka, int *kb, s *ab, int *ldab, s *bb, int *ldbb, s *q, int *ldq, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, s *work, int *iwork, int *ifail, int *info) nogil + +cdef void ssbtrd(char *vect, char *uplo, int *n, int *kd, s *ab, int *ldab, s *d, s *e, s *q, int *ldq, s *work, int *info) nogil + +cdef void ssfrk(char *transr, char *uplo, char *trans, int *n, int *k, s *alpha, s *a, int *lda, s *beta, s *c) nogil + +cdef void sspcon(char *uplo, int *n, s *ap, int *ipiv, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil + +cdef void sspev(char *jobz, char *uplo, int *n, s *ap, s *w, s *z, int *ldz, s *work, int *info) nogil + +cdef void sspevd(char *jobz, char *uplo, int *n, s *ap, s *w, s *z, int *ldz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void sspevx(char *jobz, char *range, char *uplo, int *n, s *ap, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, s *work, int *iwork, int *ifail, int *info) nogil + +cdef void sspgst(int *itype, char *uplo, int *n, s *ap, s *bp, int *info) nogil + +cdef void sspgv(int *itype, char *jobz, char *uplo, int *n, s *ap, s *bp, s *w, s *z, int *ldz, s *work, int *info) nogil + +cdef void sspgvd(int *itype, char *jobz, char *uplo, int *n, s *ap, s *bp, s *w, s *z, int *ldz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void sspgvx(int *itype, char *jobz, char *range, char *uplo, int *n, s *ap, s *bp, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, s *work, int *iwork, int *ifail, int *info) nogil + +cdef void ssprfs(char *uplo, int *n, int *nrhs, s *ap, s *afp, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil + +cdef void sspsv(char *uplo, int *n, int *nrhs, s *ap, int *ipiv, s *b, int *ldb, int *info) nogil + +cdef void sspsvx(char *fact, char *uplo, int *n, int *nrhs, s *ap, s *afp, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *iwork, int *info) nogil + +cdef void ssptrd(char *uplo, int *n, s *ap, s *d, s *e, s *tau, int *info) nogil + +cdef void ssptrf(char *uplo, int *n, s *ap, int *ipiv, int *info) nogil + +cdef void ssptri(char *uplo, int *n, s *ap, int *ipiv, s *work, int *info) nogil + +cdef void ssptrs(char *uplo, int *n, int *nrhs, s *ap, int *ipiv, s *b, int *ldb, int *info) nogil + +cdef void sstebz(char *range, char *order, int *n, s *vl, s *vu, int *il, int *iu, s *abstol, s *d, s *e, int *m, int *nsplit, s *w, int *iblock, int *isplit, s *work, int *iwork, int *info) nogil + +cdef void sstedc(char *compz, int *n, s *d, s *e, s *z, int *ldz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void sstegr(char *jobz, char *range, int *n, s *d, s *e, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, int *isuppz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void sstein(int *n, s *d, s *e, int *m, s *w, int *iblock, int *isplit, s *z, int *ldz, s *work, int *iwork, int *ifail, int *info) nogil + +cdef void sstemr(char *jobz, char *range, int *n, s *d, s *e, s *vl, s *vu, int *il, int *iu, int *m, s *w, s *z, int *ldz, int *nzc, int *isuppz, bint *tryrac, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void ssteqr(char *compz, int *n, s *d, s *e, s *z, int *ldz, s *work, int *info) nogil + +cdef void ssterf(int *n, s *d, s *e, int *info) nogil + +cdef void sstev(char *jobz, int *n, s *d, s *e, s *z, int *ldz, s *work, int *info) nogil + +cdef void sstevd(char *jobz, int *n, s *d, s *e, s *z, int *ldz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void sstevr(char *jobz, char *range, int *n, s *d, s *e, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, int *isuppz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void sstevx(char *jobz, char *range, int *n, s *d, s *e, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, s *work, int *iwork, int *ifail, int *info) nogil + +cdef void ssycon(char *uplo, int *n, s *a, int *lda, int *ipiv, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil + +cdef void ssyconv(char *uplo, char *way, int *n, s *a, int *lda, int *ipiv, s *work, int *info) nogil + +cdef void ssyequb(char *uplo, int *n, s *a, int *lda, s *s, s *scond, s *amax, s *work, int *info) nogil + +cdef void ssyev(char *jobz, char *uplo, int *n, s *a, int *lda, s *w, s *work, int *lwork, int *info) nogil + +cdef void ssyevd(char *jobz, char *uplo, int *n, s *a, int *lda, s *w, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void ssyevr(char *jobz, char *range, char *uplo, int *n, s *a, int *lda, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, int *isuppz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void ssyevx(char *jobz, char *range, char *uplo, int *n, s *a, int *lda, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, s *work, int *lwork, int *iwork, int *ifail, int *info) nogil + +cdef void ssygs2(int *itype, char *uplo, int *n, s *a, int *lda, s *b, int *ldb, int *info) nogil + +cdef void ssygst(int *itype, char *uplo, int *n, s *a, int *lda, s *b, int *ldb, int *info) nogil + +cdef void ssygv(int *itype, char *jobz, char *uplo, int *n, s *a, int *lda, s *b, int *ldb, s *w, s *work, int *lwork, int *info) nogil + +cdef void ssygvd(int *itype, char *jobz, char *uplo, int *n, s *a, int *lda, s *b, int *ldb, s *w, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void ssygvx(int *itype, char *jobz, char *range, char *uplo, int *n, s *a, int *lda, s *b, int *ldb, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, s *work, int *lwork, int *iwork, int *ifail, int *info) nogil + +cdef void ssyrfs(char *uplo, int *n, int *nrhs, s *a, int *lda, s *af, int *ldaf, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil + +cdef void ssysv(char *uplo, int *n, int *nrhs, s *a, int *lda, int *ipiv, s *b, int *ldb, s *work, int *lwork, int *info) nogil + +cdef void ssysvx(char *fact, char *uplo, int *n, int *nrhs, s *a, int *lda, s *af, int *ldaf, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *lwork, int *iwork, int *info) nogil + +cdef void ssyswapr(char *uplo, int *n, s *a, int *lda, int *i1, int *i2) nogil + +cdef void ssytd2(char *uplo, int *n, s *a, int *lda, s *d, s *e, s *tau, int *info) nogil + +cdef void ssytf2(char *uplo, int *n, s *a, int *lda, int *ipiv, int *info) nogil + +cdef void ssytrd(char *uplo, int *n, s *a, int *lda, s *d, s *e, s *tau, s *work, int *lwork, int *info) nogil + +cdef void ssytrf(char *uplo, int *n, s *a, int *lda, int *ipiv, s *work, int *lwork, int *info) nogil + +cdef void ssytri(char *uplo, int *n, s *a, int *lda, int *ipiv, s *work, int *info) nogil + +cdef void ssytri2(char *uplo, int *n, s *a, int *lda, int *ipiv, s *work, int *lwork, int *info) nogil + +cdef void ssytri2x(char *uplo, int *n, s *a, int *lda, int *ipiv, s *work, int *nb, int *info) nogil + +cdef void ssytrs(char *uplo, int *n, int *nrhs, s *a, int *lda, int *ipiv, s *b, int *ldb, int *info) nogil + +cdef void ssytrs2(char *uplo, int *n, int *nrhs, s *a, int *lda, int *ipiv, s *b, int *ldb, s *work, int *info) nogil + +cdef void stbcon(char *norm, char *uplo, char *diag, int *n, int *kd, s *ab, int *ldab, s *rcond, s *work, int *iwork, int *info) nogil + +cdef void stbrfs(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, s *ab, int *ldab, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil + +cdef void stbtrs(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, s *ab, int *ldab, s *b, int *ldb, int *info) nogil + +cdef void stfsm(char *transr, char *side, char *uplo, char *trans, char *diag, int *m, int *n, s *alpha, s *a, s *b, int *ldb) nogil + +cdef void stftri(char *transr, char *uplo, char *diag, int *n, s *a, int *info) nogil + +cdef void stfttp(char *transr, char *uplo, int *n, s *arf, s *ap, int *info) nogil + +cdef void stfttr(char *transr, char *uplo, int *n, s *arf, s *a, int *lda, int *info) nogil + +cdef void stgevc(char *side, char *howmny, bint *select, int *n, s *s, int *lds, s *p, int *ldp, s *vl, int *ldvl, s *vr, int *ldvr, int *mm, int *m, s *work, int *info) nogil + +cdef void stgex2(bint *wantq, bint *wantz, int *n, s *a, int *lda, s *b, int *ldb, s *q, int *ldq, s *z, int *ldz, int *j1, int *n1, int *n2, s *work, int *lwork, int *info) nogil + +cdef void stgexc(bint *wantq, bint *wantz, int *n, s *a, int *lda, s *b, int *ldb, s *q, int *ldq, s *z, int *ldz, int *ifst, int *ilst, s *work, int *lwork, int *info) nogil + +cdef void stgsen(int *ijob, bint *wantq, bint *wantz, bint *select, int *n, s *a, int *lda, s *b, int *ldb, s *alphar, s *alphai, s *beta, s *q, int *ldq, s *z, int *ldz, int *m, s *pl, s *pr, s *dif, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void stgsja(char *jobu, char *jobv, char *jobq, int *m, int *p, int *n, int *k, int *l, s *a, int *lda, s *b, int *ldb, s *tola, s *tolb, s *alpha, s *beta, s *u, int *ldu, s *v, int *ldv, s *q, int *ldq, s *work, int *ncycle, int *info) nogil + +cdef void stgsna(char *job, char *howmny, bint *select, int *n, s *a, int *lda, s *b, int *ldb, s *vl, int *ldvl, s *vr, int *ldvr, s *s, s *dif, int *mm, int *m, s *work, int *lwork, int *iwork, int *info) nogil + +cdef void stgsy2(char *trans, int *ijob, int *m, int *n, s *a, int *lda, s *b, int *ldb, s *c, int *ldc, s *d, int *ldd, s *e, int *lde, s *f, int *ldf, s *scale, s *rdsum, s *rdscal, int *iwork, int *pq, int *info) nogil + +cdef void stgsyl(char *trans, int *ijob, int *m, int *n, s *a, int *lda, s *b, int *ldb, s *c, int *ldc, s *d, int *ldd, s *e, int *lde, s *f, int *ldf, s *scale, s *dif, s *work, int *lwork, int *iwork, int *info) nogil + +cdef void stpcon(char *norm, char *uplo, char *diag, int *n, s *ap, s *rcond, s *work, int *iwork, int *info) nogil + +cdef void stpmqrt(char *side, char *trans, int *m, int *n, int *k, int *l, int *nb, s *v, int *ldv, s *t, int *ldt, s *a, int *lda, s *b, int *ldb, s *work, int *info) nogil + +cdef void stpqrt(int *m, int *n, int *l, int *nb, s *a, int *lda, s *b, int *ldb, s *t, int *ldt, s *work, int *info) nogil + +cdef void stpqrt2(int *m, int *n, int *l, s *a, int *lda, s *b, int *ldb, s *t, int *ldt, int *info) nogil + +cdef void stprfb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, s *v, int *ldv, s *t, int *ldt, s *a, int *lda, s *b, int *ldb, s *work, int *ldwork) nogil + +cdef void stprfs(char *uplo, char *trans, char *diag, int *n, int *nrhs, s *ap, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil + +cdef void stptri(char *uplo, char *diag, int *n, s *ap, int *info) nogil + +cdef void stptrs(char *uplo, char *trans, char *diag, int *n, int *nrhs, s *ap, s *b, int *ldb, int *info) nogil + +cdef void stpttf(char *transr, char *uplo, int *n, s *ap, s *arf, int *info) nogil + +cdef void stpttr(char *uplo, int *n, s *ap, s *a, int *lda, int *info) nogil + +cdef void strcon(char *norm, char *uplo, char *diag, int *n, s *a, int *lda, s *rcond, s *work, int *iwork, int *info) nogil + +cdef void strevc(char *side, char *howmny, bint *select, int *n, s *t, int *ldt, s *vl, int *ldvl, s *vr, int *ldvr, int *mm, int *m, s *work, int *info) nogil + +cdef void strexc(char *compq, int *n, s *t, int *ldt, s *q, int *ldq, int *ifst, int *ilst, s *work, int *info) nogil + +cdef void strrfs(char *uplo, char *trans, char *diag, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil + +cdef void strsen(char *job, char *compq, bint *select, int *n, s *t, int *ldt, s *q, int *ldq, s *wr, s *wi, int *m, s *s, s *sep, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void strsna(char *job, char *howmny, bint *select, int *n, s *t, int *ldt, s *vl, int *ldvl, s *vr, int *ldvr, s *s, s *sep, int *mm, int *m, s *work, int *ldwork, int *iwork, int *info) nogil + +cdef void strsyl(char *trana, char *tranb, int *isgn, int *m, int *n, s *a, int *lda, s *b, int *ldb, s *c, int *ldc, s *scale, int *info) nogil + +cdef void strti2(char *uplo, char *diag, int *n, s *a, int *lda, int *info) nogil + +cdef void strtri(char *uplo, char *diag, int *n, s *a, int *lda, int *info) nogil + +cdef void strtrs(char *uplo, char *trans, char *diag, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, int *info) nogil + +cdef void strttf(char *transr, char *uplo, int *n, s *a, int *lda, s *arf, int *info) nogil + +cdef void strttp(char *uplo, int *n, s *a, int *lda, s *ap, int *info) nogil + +cdef void stzrzf(int *m, int *n, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil + +cdef void xerbla_array(char *srname_array, int *srname_len, int *info) nogil + +cdef void zbbcsd(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, int *m, int *p, int *q, d *theta, d *phi, z *u1, int *ldu1, z *u2, int *ldu2, z *v1t, int *ldv1t, z *v2t, int *ldv2t, d *b11d, d *b11e, d *b12d, d *b12e, d *b21d, d *b21e, d *b22d, d *b22e, d *rwork, int *lrwork, int *info) nogil + +cdef void zbdsqr(char *uplo, int *n, int *ncvt, int *nru, int *ncc, d *d, d *e, z *vt, int *ldvt, z *u, int *ldu, z *c, int *ldc, d *rwork, int *info) nogil + +cdef void zcgesv(int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, z *x, int *ldx, z *work, c *swork, d *rwork, int *iter, int *info) nogil + +cdef void zcposv(char *uplo, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, z *x, int *ldx, z *work, c *swork, d *rwork, int *iter, int *info) nogil + +cdef void zdrscl(int *n, d *sa, z *sx, int *incx) nogil + +cdef void zgbbrd(char *vect, int *m, int *n, int *ncc, int *kl, int *ku, z *ab, int *ldab, d *d, d *e, z *q, int *ldq, z *pt, int *ldpt, z *c, int *ldc, z *work, d *rwork, int *info) nogil + +cdef void zgbcon(char *norm, int *n, int *kl, int *ku, z *ab, int *ldab, int *ipiv, d *anorm, d *rcond, z *work, d *rwork, int *info) nogil + +cdef void zgbequ(int *m, int *n, int *kl, int *ku, z *ab, int *ldab, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil + +cdef void zgbequb(int *m, int *n, int *kl, int *ku, z *ab, int *ldab, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil + +cdef void zgbrfs(char *trans, int *n, int *kl, int *ku, int *nrhs, z *ab, int *ldab, z *afb, int *ldafb, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void zgbsv(int *n, int *kl, int *ku, int *nrhs, z *ab, int *ldab, int *ipiv, z *b, int *ldb, int *info) nogil + +cdef void zgbsvx(char *fact, char *trans, int *n, int *kl, int *ku, int *nrhs, z *ab, int *ldab, z *afb, int *ldafb, int *ipiv, char *equed, d *r, d *c, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void zgbtf2(int *m, int *n, int *kl, int *ku, z *ab, int *ldab, int *ipiv, int *info) nogil + +cdef void zgbtrf(int *m, int *n, int *kl, int *ku, z *ab, int *ldab, int *ipiv, int *info) nogil + +cdef void zgbtrs(char *trans, int *n, int *kl, int *ku, int *nrhs, z *ab, int *ldab, int *ipiv, z *b, int *ldb, int *info) nogil + +cdef void zgebak(char *job, char *side, int *n, int *ilo, int *ihi, d *scale, int *m, z *v, int *ldv, int *info) nogil + +cdef void zgebal(char *job, int *n, z *a, int *lda, int *ilo, int *ihi, d *scale, int *info) nogil + +cdef void zgebd2(int *m, int *n, z *a, int *lda, d *d, d *e, z *tauq, z *taup, z *work, int *info) nogil + +cdef void zgebrd(int *m, int *n, z *a, int *lda, d *d, d *e, z *tauq, z *taup, z *work, int *lwork, int *info) nogil + +cdef void zgecon(char *norm, int *n, z *a, int *lda, d *anorm, d *rcond, z *work, d *rwork, int *info) nogil + +cdef void zgeequ(int *m, int *n, z *a, int *lda, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil + +cdef void zgeequb(int *m, int *n, z *a, int *lda, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil + +cdef void zgees(char *jobvs, char *sort, zselect1 *select, int *n, z *a, int *lda, int *sdim, z *w, z *vs, int *ldvs, z *work, int *lwork, d *rwork, bint *bwork, int *info) nogil + +cdef void zgeesx(char *jobvs, char *sort, zselect1 *select, char *sense, int *n, z *a, int *lda, int *sdim, z *w, z *vs, int *ldvs, d *rconde, d *rcondv, z *work, int *lwork, d *rwork, bint *bwork, int *info) nogil + +cdef void zgeev(char *jobvl, char *jobvr, int *n, z *a, int *lda, z *w, z *vl, int *ldvl, z *vr, int *ldvr, z *work, int *lwork, d *rwork, int *info) nogil + +cdef void zgeevx(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, z *a, int *lda, z *w, z *vl, int *ldvl, z *vr, int *ldvr, int *ilo, int *ihi, d *scale, d *abnrm, d *rconde, d *rcondv, z *work, int *lwork, d *rwork, int *info) nogil + +cdef void zgehd2(int *n, int *ilo, int *ihi, z *a, int *lda, z *tau, z *work, int *info) nogil + +cdef void zgehrd(int *n, int *ilo, int *ihi, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil + +cdef void zgelq2(int *m, int *n, z *a, int *lda, z *tau, z *work, int *info) nogil + +cdef void zgelqf(int *m, int *n, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil + +cdef void zgels(char *trans, int *m, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, z *work, int *lwork, int *info) nogil + +cdef void zgelsd(int *m, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, d *s, d *rcond, int *rank, z *work, int *lwork, d *rwork, int *iwork, int *info) nogil + +cdef void zgelss(int *m, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, d *s, d *rcond, int *rank, z *work, int *lwork, d *rwork, int *info) nogil + +cdef void zgelsy(int *m, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, int *jpvt, d *rcond, int *rank, z *work, int *lwork, d *rwork, int *info) nogil + +cdef void zgemqrt(char *side, char *trans, int *m, int *n, int *k, int *nb, z *v, int *ldv, z *t, int *ldt, z *c, int *ldc, z *work, int *info) nogil + +cdef void zgeql2(int *m, int *n, z *a, int *lda, z *tau, z *work, int *info) nogil + +cdef void zgeqlf(int *m, int *n, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil + +cdef void zgeqp3(int *m, int *n, z *a, int *lda, int *jpvt, z *tau, z *work, int *lwork, d *rwork, int *info) nogil + +cdef void zgeqr2(int *m, int *n, z *a, int *lda, z *tau, z *work, int *info) nogil + +cdef void zgeqr2p(int *m, int *n, z *a, int *lda, z *tau, z *work, int *info) nogil + +cdef void zgeqrf(int *m, int *n, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil + +cdef void zgeqrfp(int *m, int *n, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil + +cdef void zgeqrt(int *m, int *n, int *nb, z *a, int *lda, z *t, int *ldt, z *work, int *info) nogil + +cdef void zgeqrt2(int *m, int *n, z *a, int *lda, z *t, int *ldt, int *info) nogil + +cdef void zgeqrt3(int *m, int *n, z *a, int *lda, z *t, int *ldt, int *info) nogil + +cdef void zgerfs(char *trans, int *n, int *nrhs, z *a, int *lda, z *af, int *ldaf, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void zgerq2(int *m, int *n, z *a, int *lda, z *tau, z *work, int *info) nogil + +cdef void zgerqf(int *m, int *n, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil + +cdef void zgesc2(int *n, z *a, int *lda, z *rhs, int *ipiv, int *jpiv, d *scale) nogil + +cdef void zgesdd(char *jobz, int *m, int *n, z *a, int *lda, d *s, z *u, int *ldu, z *vt, int *ldvt, z *work, int *lwork, d *rwork, int *iwork, int *info) nogil + +cdef void zgesv(int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, int *info) nogil + +cdef void zgesvd(char *jobu, char *jobvt, int *m, int *n, z *a, int *lda, d *s, z *u, int *ldu, z *vt, int *ldvt, z *work, int *lwork, d *rwork, int *info) nogil + +cdef void zgesvx(char *fact, char *trans, int *n, int *nrhs, z *a, int *lda, z *af, int *ldaf, int *ipiv, char *equed, d *r, d *c, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void zgetc2(int *n, z *a, int *lda, int *ipiv, int *jpiv, int *info) nogil + +cdef void zgetf2(int *m, int *n, z *a, int *lda, int *ipiv, int *info) nogil + +cdef void zgetrf(int *m, int *n, z *a, int *lda, int *ipiv, int *info) nogil + +cdef void zgetri(int *n, z *a, int *lda, int *ipiv, z *work, int *lwork, int *info) nogil + +cdef void zgetrs(char *trans, int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, int *info) nogil + +cdef void zggbak(char *job, char *side, int *n, int *ilo, int *ihi, d *lscale, d *rscale, int *m, z *v, int *ldv, int *info) nogil + +cdef void zggbal(char *job, int *n, z *a, int *lda, z *b, int *ldb, int *ilo, int *ihi, d *lscale, d *rscale, d *work, int *info) nogil + +cdef void zgges(char *jobvsl, char *jobvsr, char *sort, zselect2 *selctg, int *n, z *a, int *lda, z *b, int *ldb, int *sdim, z *alpha, z *beta, z *vsl, int *ldvsl, z *vsr, int *ldvsr, z *work, int *lwork, d *rwork, bint *bwork, int *info) nogil + +cdef void zggesx(char *jobvsl, char *jobvsr, char *sort, zselect2 *selctg, char *sense, int *n, z *a, int *lda, z *b, int *ldb, int *sdim, z *alpha, z *beta, z *vsl, int *ldvsl, z *vsr, int *ldvsr, d *rconde, d *rcondv, z *work, int *lwork, d *rwork, int *iwork, int *liwork, bint *bwork, int *info) nogil + +cdef void zggev(char *jobvl, char *jobvr, int *n, z *a, int *lda, z *b, int *ldb, z *alpha, z *beta, z *vl, int *ldvl, z *vr, int *ldvr, z *work, int *lwork, d *rwork, int *info) nogil + +cdef void zggevx(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, z *a, int *lda, z *b, int *ldb, z *alpha, z *beta, z *vl, int *ldvl, z *vr, int *ldvr, int *ilo, int *ihi, d *lscale, d *rscale, d *abnrm, d *bbnrm, d *rconde, d *rcondv, z *work, int *lwork, d *rwork, int *iwork, bint *bwork, int *info) nogil + +cdef void zggglm(int *n, int *m, int *p, z *a, int *lda, z *b, int *ldb, z *d, z *x, z *y, z *work, int *lwork, int *info) nogil + +cdef void zgghrd(char *compq, char *compz, int *n, int *ilo, int *ihi, z *a, int *lda, z *b, int *ldb, z *q, int *ldq, z *z, int *ldz, int *info) nogil + +cdef void zgglse(int *m, int *n, int *p, z *a, int *lda, z *b, int *ldb, z *c, z *d, z *x, z *work, int *lwork, int *info) nogil + +cdef void zggqrf(int *n, int *m, int *p, z *a, int *lda, z *taua, z *b, int *ldb, z *taub, z *work, int *lwork, int *info) nogil + +cdef void zggrqf(int *m, int *p, int *n, z *a, int *lda, z *taua, z *b, int *ldb, z *taub, z *work, int *lwork, int *info) nogil + +cdef void zgtcon(char *norm, int *n, z *dl, z *d, z *du, z *du2, int *ipiv, d *anorm, d *rcond, z *work, int *info) nogil + +cdef void zgtrfs(char *trans, int *n, int *nrhs, z *dl, z *d, z *du, z *dlf, z *df, z *duf, z *du2, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void zgtsv(int *n, int *nrhs, z *dl, z *d, z *du, z *b, int *ldb, int *info) nogil + +cdef void zgtsvx(char *fact, char *trans, int *n, int *nrhs, z *dl, z *d, z *du, z *dlf, z *df, z *duf, z *du2, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void zgttrf(int *n, z *dl, z *d, z *du, z *du2, int *ipiv, int *info) nogil + +cdef void zgttrs(char *trans, int *n, int *nrhs, z *dl, z *d, z *du, z *du2, int *ipiv, z *b, int *ldb, int *info) nogil + +cdef void zgtts2(int *itrans, int *n, int *nrhs, z *dl, z *d, z *du, z *du2, int *ipiv, z *b, int *ldb) nogil + +cdef void zhbev(char *jobz, char *uplo, int *n, int *kd, z *ab, int *ldab, d *w, z *z, int *ldz, z *work, d *rwork, int *info) nogil + +cdef void zhbevd(char *jobz, char *uplo, int *n, int *kd, z *ab, int *ldab, d *w, z *z, int *ldz, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil + +cdef void zhbevx(char *jobz, char *range, char *uplo, int *n, int *kd, z *ab, int *ldab, z *q, int *ldq, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, z *z, int *ldz, z *work, d *rwork, int *iwork, int *ifail, int *info) nogil + +cdef void zhbgst(char *vect, char *uplo, int *n, int *ka, int *kb, z *ab, int *ldab, z *bb, int *ldbb, z *x, int *ldx, z *work, d *rwork, int *info) nogil + +cdef void zhbgv(char *jobz, char *uplo, int *n, int *ka, int *kb, z *ab, int *ldab, z *bb, int *ldbb, d *w, z *z, int *ldz, z *work, d *rwork, int *info) nogil + +cdef void zhbgvd(char *jobz, char *uplo, int *n, int *ka, int *kb, z *ab, int *ldab, z *bb, int *ldbb, d *w, z *z, int *ldz, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil + +cdef void zhbgvx(char *jobz, char *range, char *uplo, int *n, int *ka, int *kb, z *ab, int *ldab, z *bb, int *ldbb, z *q, int *ldq, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, z *z, int *ldz, z *work, d *rwork, int *iwork, int *ifail, int *info) nogil + +cdef void zhbtrd(char *vect, char *uplo, int *n, int *kd, z *ab, int *ldab, d *d, d *e, z *q, int *ldq, z *work, int *info) nogil + +cdef void zhecon(char *uplo, int *n, z *a, int *lda, int *ipiv, d *anorm, d *rcond, z *work, int *info) nogil + +cdef void zheequb(char *uplo, int *n, z *a, int *lda, d *s, d *scond, d *amax, z *work, int *info) nogil + +cdef void zheev(char *jobz, char *uplo, int *n, z *a, int *lda, d *w, z *work, int *lwork, d *rwork, int *info) nogil + +cdef void zheevd(char *jobz, char *uplo, int *n, z *a, int *lda, d *w, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil + +cdef void zheevr(char *jobz, char *range, char *uplo, int *n, z *a, int *lda, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, z *z, int *ldz, int *isuppz, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil + +cdef void zheevx(char *jobz, char *range, char *uplo, int *n, z *a, int *lda, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, z *z, int *ldz, z *work, int *lwork, d *rwork, int *iwork, int *ifail, int *info) nogil + +cdef void zhegs2(int *itype, char *uplo, int *n, z *a, int *lda, z *b, int *ldb, int *info) nogil + +cdef void zhegst(int *itype, char *uplo, int *n, z *a, int *lda, z *b, int *ldb, int *info) nogil + +cdef void zhegv(int *itype, char *jobz, char *uplo, int *n, z *a, int *lda, z *b, int *ldb, d *w, z *work, int *lwork, d *rwork, int *info) nogil + +cdef void zhegvd(int *itype, char *jobz, char *uplo, int *n, z *a, int *lda, z *b, int *ldb, d *w, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil + +cdef void zhegvx(int *itype, char *jobz, char *range, char *uplo, int *n, z *a, int *lda, z *b, int *ldb, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, z *z, int *ldz, z *work, int *lwork, d *rwork, int *iwork, int *ifail, int *info) nogil + +cdef void zherfs(char *uplo, int *n, int *nrhs, z *a, int *lda, z *af, int *ldaf, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void zhesv(char *uplo, int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, z *work, int *lwork, int *info) nogil + +cdef void zhesvx(char *fact, char *uplo, int *n, int *nrhs, z *a, int *lda, z *af, int *ldaf, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, int *lwork, d *rwork, int *info) nogil + +cdef void zheswapr(char *uplo, int *n, z *a, int *lda, int *i1, int *i2) nogil + +cdef void zhetd2(char *uplo, int *n, z *a, int *lda, d *d, d *e, z *tau, int *info) nogil + +cdef void zhetf2(char *uplo, int *n, z *a, int *lda, int *ipiv, int *info) nogil + +cdef void zhetrd(char *uplo, int *n, z *a, int *lda, d *d, d *e, z *tau, z *work, int *lwork, int *info) nogil + +cdef void zhetrf(char *uplo, int *n, z *a, int *lda, int *ipiv, z *work, int *lwork, int *info) nogil + +cdef void zhetri(char *uplo, int *n, z *a, int *lda, int *ipiv, z *work, int *info) nogil + +cdef void zhetri2(char *uplo, int *n, z *a, int *lda, int *ipiv, z *work, int *lwork, int *info) nogil + +cdef void zhetri2x(char *uplo, int *n, z *a, int *lda, int *ipiv, z *work, int *nb, int *info) nogil + +cdef void zhetrs(char *uplo, int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, int *info) nogil + +cdef void zhetrs2(char *uplo, int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, z *work, int *info) nogil + +cdef void zhfrk(char *transr, char *uplo, char *trans, int *n, int *k, d *alpha, z *a, int *lda, d *beta, z *c) nogil + +cdef void zhgeqz(char *job, char *compq, char *compz, int *n, int *ilo, int *ihi, z *h, int *ldh, z *t, int *ldt, z *alpha, z *beta, z *q, int *ldq, z *z, int *ldz, z *work, int *lwork, d *rwork, int *info) nogil + +cdef void zhpcon(char *uplo, int *n, z *ap, int *ipiv, d *anorm, d *rcond, z *work, int *info) nogil + +cdef void zhpev(char *jobz, char *uplo, int *n, z *ap, d *w, z *z, int *ldz, z *work, d *rwork, int *info) nogil + +cdef void zhpevd(char *jobz, char *uplo, int *n, z *ap, d *w, z *z, int *ldz, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil + +cdef void zhpevx(char *jobz, char *range, char *uplo, int *n, z *ap, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, z *z, int *ldz, z *work, d *rwork, int *iwork, int *ifail, int *info) nogil + +cdef void zhpgst(int *itype, char *uplo, int *n, z *ap, z *bp, int *info) nogil + +cdef void zhpgv(int *itype, char *jobz, char *uplo, int *n, z *ap, z *bp, d *w, z *z, int *ldz, z *work, d *rwork, int *info) nogil + +cdef void zhpgvd(int *itype, char *jobz, char *uplo, int *n, z *ap, z *bp, d *w, z *z, int *ldz, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil + +cdef void zhpgvx(int *itype, char *jobz, char *range, char *uplo, int *n, z *ap, z *bp, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, z *z, int *ldz, z *work, d *rwork, int *iwork, int *ifail, int *info) nogil + +cdef void zhprfs(char *uplo, int *n, int *nrhs, z *ap, z *afp, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void zhpsv(char *uplo, int *n, int *nrhs, z *ap, int *ipiv, z *b, int *ldb, int *info) nogil + +cdef void zhpsvx(char *fact, char *uplo, int *n, int *nrhs, z *ap, z *afp, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void zhptrd(char *uplo, int *n, z *ap, d *d, d *e, z *tau, int *info) nogil + +cdef void zhptrf(char *uplo, int *n, z *ap, int *ipiv, int *info) nogil + +cdef void zhptri(char *uplo, int *n, z *ap, int *ipiv, z *work, int *info) nogil + +cdef void zhptrs(char *uplo, int *n, int *nrhs, z *ap, int *ipiv, z *b, int *ldb, int *info) nogil + +cdef void zhsein(char *side, char *eigsrc, char *initv, bint *select, int *n, z *h, int *ldh, z *w, z *vl, int *ldvl, z *vr, int *ldvr, int *mm, int *m, z *work, d *rwork, int *ifaill, int *ifailr, int *info) nogil + +cdef void zhseqr(char *job, char *compz, int *n, int *ilo, int *ihi, z *h, int *ldh, z *w, z *z, int *ldz, z *work, int *lwork, int *info) nogil + +cdef void zlabrd(int *m, int *n, int *nb, z *a, int *lda, d *d, d *e, z *tauq, z *taup, z *x, int *ldx, z *y, int *ldy) nogil + +cdef void zlacgv(int *n, z *x, int *incx) nogil + +cdef void zlacn2(int *n, z *v, z *x, d *est, int *kase, int *isave) nogil + +cdef void zlacon(int *n, z *v, z *x, d *est, int *kase) nogil + +cdef void zlacp2(char *uplo, int *m, int *n, d *a, int *lda, z *b, int *ldb) nogil + +cdef void zlacpy(char *uplo, int *m, int *n, z *a, int *lda, z *b, int *ldb) nogil + +cdef void zlacrm(int *m, int *n, z *a, int *lda, d *b, int *ldb, z *c, int *ldc, d *rwork) nogil + +cdef void zlacrt(int *n, z *cx, int *incx, z *cy, int *incy, z *c, z *s) nogil + +cdef z zladiv(z *x, z *y) nogil + +cdef void zlaed0(int *qsiz, int *n, d *d, d *e, z *q, int *ldq, z *qstore, int *ldqs, d *rwork, int *iwork, int *info) nogil + +cdef void zlaed7(int *n, int *cutpnt, int *qsiz, int *tlvls, int *curlvl, int *curpbm, d *d, z *q, int *ldq, d *rho, int *indxq, d *qstore, int *qptr, int *prmptr, int *perm, int *givptr, int *givcol, d *givnum, z *work, d *rwork, int *iwork, int *info) nogil + +cdef void zlaed8(int *k, int *n, int *qsiz, z *q, int *ldq, d *d, d *rho, int *cutpnt, d *z, d *dlamda, z *q2, int *ldq2, d *w, int *indxp, int *indx, int *indxq, int *perm, int *givptr, int *givcol, d *givnum, int *info) nogil + +cdef void zlaein(bint *rightv, bint *noinit, int *n, z *h, int *ldh, z *w, z *v, z *b, int *ldb, d *rwork, d *eps3, d *smlnum, int *info) nogil + +cdef void zlaesy(z *a, z *b, z *c, z *rt1, z *rt2, z *evscal, z *cs1, z *sn1) nogil + +cdef void zlaev2(z *a, z *b, z *c, d *rt1, d *rt2, d *cs1, z *sn1) nogil + +cdef void zlag2c(int *m, int *n, z *a, int *lda, c *sa, int *ldsa, int *info) nogil + +cdef void zlags2(bint *upper, d *a1, z *a2, d *a3, d *b1, z *b2, d *b3, d *csu, z *snu, d *csv, z *snv, d *csq, z *snq) nogil + +cdef void zlagtm(char *trans, int *n, int *nrhs, d *alpha, z *dl, z *d, z *du, z *x, int *ldx, d *beta, z *b, int *ldb) nogil + +cdef void zlahef(char *uplo, int *n, int *nb, int *kb, z *a, int *lda, int *ipiv, z *w, int *ldw, int *info) nogil + +cdef void zlahqr(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, z *h, int *ldh, z *w, int *iloz, int *ihiz, z *z, int *ldz, int *info) nogil + +cdef void zlahr2(int *n, int *k, int *nb, z *a, int *lda, z *tau, z *t, int *ldt, z *y, int *ldy) nogil + +cdef void zlaic1(int *job, int *j, z *x, d *sest, z *w, z *gamma, d *sestpr, z *s, z *c) nogil + +cdef void zlals0(int *icompq, int *nl, int *nr, int *sqre, int *nrhs, z *b, int *ldb, z *bx, int *ldbx, int *perm, int *givptr, int *givcol, int *ldgcol, d *givnum, int *ldgnum, d *poles, d *difl, d *difr, d *z, int *k, d *c, d *s, d *rwork, int *info) nogil + +cdef void zlalsa(int *icompq, int *smlsiz, int *n, int *nrhs, z *b, int *ldb, z *bx, int *ldbx, d *u, int *ldu, d *vt, int *k, d *difl, d *difr, d *z, d *poles, int *givptr, int *givcol, int *ldgcol, int *perm, d *givnum, d *c, d *s, d *rwork, int *iwork, int *info) nogil + +cdef void zlalsd(char *uplo, int *smlsiz, int *n, int *nrhs, d *d, d *e, z *b, int *ldb, d *rcond, int *rank, z *work, d *rwork, int *iwork, int *info) nogil + +cdef d zlangb(char *norm, int *n, int *kl, int *ku, z *ab, int *ldab, d *work) nogil + +cdef d zlange(char *norm, int *m, int *n, z *a, int *lda, d *work) nogil + +cdef d zlangt(char *norm, int *n, z *dl, z *d, z *du) nogil + +cdef d zlanhb(char *norm, char *uplo, int *n, int *k, z *ab, int *ldab, d *work) nogil + +cdef d zlanhe(char *norm, char *uplo, int *n, z *a, int *lda, d *work) nogil + +cdef d zlanhf(char *norm, char *transr, char *uplo, int *n, z *a, d *work) nogil + +cdef d zlanhp(char *norm, char *uplo, int *n, z *ap, d *work) nogil + +cdef d zlanhs(char *norm, int *n, z *a, int *lda, d *work) nogil + +cdef d zlanht(char *norm, int *n, d *d, z *e) nogil + +cdef d zlansb(char *norm, char *uplo, int *n, int *k, z *ab, int *ldab, d *work) nogil + +cdef d zlansp(char *norm, char *uplo, int *n, z *ap, d *work) nogil + +cdef d zlansy(char *norm, char *uplo, int *n, z *a, int *lda, d *work) nogil + +cdef d zlantb(char *norm, char *uplo, char *diag, int *n, int *k, z *ab, int *ldab, d *work) nogil + +cdef d zlantp(char *norm, char *uplo, char *diag, int *n, z *ap, d *work) nogil + +cdef d zlantr(char *norm, char *uplo, char *diag, int *m, int *n, z *a, int *lda, d *work) nogil + +cdef void zlapll(int *n, z *x, int *incx, z *y, int *incy, d *ssmin) nogil + +cdef void zlapmr(bint *forwrd, int *m, int *n, z *x, int *ldx, int *k) nogil + +cdef void zlapmt(bint *forwrd, int *m, int *n, z *x, int *ldx, int *k) nogil + +cdef void zlaqgb(int *m, int *n, int *kl, int *ku, z *ab, int *ldab, d *r, d *c, d *rowcnd, d *colcnd, d *amax, char *equed) nogil + +cdef void zlaqge(int *m, int *n, z *a, int *lda, d *r, d *c, d *rowcnd, d *colcnd, d *amax, char *equed) nogil + +cdef void zlaqhb(char *uplo, int *n, int *kd, z *ab, int *ldab, d *s, d *scond, d *amax, char *equed) nogil + +cdef void zlaqhe(char *uplo, int *n, z *a, int *lda, d *s, d *scond, d *amax, char *equed) nogil + +cdef void zlaqhp(char *uplo, int *n, z *ap, d *s, d *scond, d *amax, char *equed) nogil + +cdef void zlaqp2(int *m, int *n, int *offset, z *a, int *lda, int *jpvt, z *tau, d *vn1, d *vn2, z *work) nogil + +cdef void zlaqps(int *m, int *n, int *offset, int *nb, int *kb, z *a, int *lda, int *jpvt, z *tau, d *vn1, d *vn2, z *auxv, z *f, int *ldf) nogil + +cdef void zlaqr0(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, z *h, int *ldh, z *w, int *iloz, int *ihiz, z *z, int *ldz, z *work, int *lwork, int *info) nogil + +cdef void zlaqr1(int *n, z *h, int *ldh, z *s1, z *s2, z *v) nogil + +cdef void zlaqr2(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, z *h, int *ldh, int *iloz, int *ihiz, z *z, int *ldz, int *ns, int *nd, z *sh, z *v, int *ldv, int *nh, z *t, int *ldt, int *nv, z *wv, int *ldwv, z *work, int *lwork) nogil + +cdef void zlaqr3(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, z *h, int *ldh, int *iloz, int *ihiz, z *z, int *ldz, int *ns, int *nd, z *sh, z *v, int *ldv, int *nh, z *t, int *ldt, int *nv, z *wv, int *ldwv, z *work, int *lwork) nogil + +cdef void zlaqr4(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, z *h, int *ldh, z *w, int *iloz, int *ihiz, z *z, int *ldz, z *work, int *lwork, int *info) nogil + +cdef void zlaqr5(bint *wantt, bint *wantz, int *kacc22, int *n, int *ktop, int *kbot, int *nshfts, z *s, z *h, int *ldh, int *iloz, int *ihiz, z *z, int *ldz, z *v, int *ldv, z *u, int *ldu, int *nv, z *wv, int *ldwv, int *nh, z *wh, int *ldwh) nogil + +cdef void zlaqsb(char *uplo, int *n, int *kd, z *ab, int *ldab, d *s, d *scond, d *amax, char *equed) nogil + +cdef void zlaqsp(char *uplo, int *n, z *ap, d *s, d *scond, d *amax, char *equed) nogil + +cdef void zlaqsy(char *uplo, int *n, z *a, int *lda, d *s, d *scond, d *amax, char *equed) nogil + +cdef void zlar1v(int *n, int *b1, int *bn, d *lambda_, d *d, d *l, d *ld, d *lld, d *pivmin, d *gaptol, z *z, bint *wantnc, int *negcnt, d *ztz, d *mingma, int *r, int *isuppz, d *nrminv, d *resid, d *rqcorr, d *work) nogil + +cdef void zlar2v(int *n, z *x, z *y, z *z, int *incx, d *c, z *s, int *incc) nogil + +cdef void zlarcm(int *m, int *n, d *a, int *lda, z *b, int *ldb, z *c, int *ldc, d *rwork) nogil + +cdef void zlarf(char *side, int *m, int *n, z *v, int *incv, z *tau, z *c, int *ldc, z *work) nogil + +cdef void zlarfb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, z *v, int *ldv, z *t, int *ldt, z *c, int *ldc, z *work, int *ldwork) nogil + +cdef void zlarfg(int *n, z *alpha, z *x, int *incx, z *tau) nogil + +cdef void zlarfgp(int *n, z *alpha, z *x, int *incx, z *tau) nogil + +cdef void zlarft(char *direct, char *storev, int *n, int *k, z *v, int *ldv, z *tau, z *t, int *ldt) nogil + +cdef void zlarfx(char *side, int *m, int *n, z *v, z *tau, z *c, int *ldc, z *work) nogil + +cdef void zlargv(int *n, z *x, int *incx, z *y, int *incy, d *c, int *incc) nogil + +cdef void zlarnv(int *idist, int *iseed, int *n, z *x) nogil + +cdef void zlarrv(int *n, d *vl, d *vu, d *d, d *l, d *pivmin, int *isplit, int *m, int *dol, int *dou, d *minrgp, d *rtol1, d *rtol2, d *w, d *werr, d *wgap, int *iblock, int *indexw, d *gers, z *z, int *ldz, int *isuppz, d *work, int *iwork, int *info) nogil + +cdef void zlartg(z *f, z *g, d *cs, z *sn, z *r) nogil + +cdef void zlartv(int *n, z *x, int *incx, z *y, int *incy, d *c, z *s, int *incc) nogil + +cdef void zlarz(char *side, int *m, int *n, int *l, z *v, int *incv, z *tau, z *c, int *ldc, z *work) nogil + +cdef void zlarzb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, z *v, int *ldv, z *t, int *ldt, z *c, int *ldc, z *work, int *ldwork) nogil + +cdef void zlarzt(char *direct, char *storev, int *n, int *k, z *v, int *ldv, z *tau, z *t, int *ldt) nogil + +cdef void zlascl(char *type_bn, int *kl, int *ku, d *cfrom, d *cto, int *m, int *n, z *a, int *lda, int *info) nogil + +cdef void zlaset(char *uplo, int *m, int *n, z *alpha, z *beta, z *a, int *lda) nogil + +cdef void zlasr(char *side, char *pivot, char *direct, int *m, int *n, d *c, d *s, z *a, int *lda) nogil + +cdef void zlassq(int *n, z *x, int *incx, d *scale, d *sumsq) nogil + +cdef void zlaswp(int *n, z *a, int *lda, int *k1, int *k2, int *ipiv, int *incx) nogil + +cdef void zlasyf(char *uplo, int *n, int *nb, int *kb, z *a, int *lda, int *ipiv, z *w, int *ldw, int *info) nogil + +cdef void zlat2c(char *uplo, int *n, z *a, int *lda, c *sa, int *ldsa, int *info) nogil + +cdef void zlatbs(char *uplo, char *trans, char *diag, char *normin, int *n, int *kd, z *ab, int *ldab, z *x, d *scale, d *cnorm, int *info) nogil + +cdef void zlatdf(int *ijob, int *n, z *z, int *ldz, z *rhs, d *rdsum, d *rdscal, int *ipiv, int *jpiv) nogil + +cdef void zlatps(char *uplo, char *trans, char *diag, char *normin, int *n, z *ap, z *x, d *scale, d *cnorm, int *info) nogil + +cdef void zlatrd(char *uplo, int *n, int *nb, z *a, int *lda, d *e, z *tau, z *w, int *ldw) nogil + +cdef void zlatrs(char *uplo, char *trans, char *diag, char *normin, int *n, z *a, int *lda, z *x, d *scale, d *cnorm, int *info) nogil + +cdef void zlatrz(int *m, int *n, int *l, z *a, int *lda, z *tau, z *work) nogil + +cdef void zlauu2(char *uplo, int *n, z *a, int *lda, int *info) nogil + +cdef void zlauum(char *uplo, int *n, z *a, int *lda, int *info) nogil + +cdef void zpbcon(char *uplo, int *n, int *kd, z *ab, int *ldab, d *anorm, d *rcond, z *work, d *rwork, int *info) nogil + +cdef void zpbequ(char *uplo, int *n, int *kd, z *ab, int *ldab, d *s, d *scond, d *amax, int *info) nogil + +cdef void zpbrfs(char *uplo, int *n, int *kd, int *nrhs, z *ab, int *ldab, z *afb, int *ldafb, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void zpbstf(char *uplo, int *n, int *kd, z *ab, int *ldab, int *info) nogil + +cdef void zpbsv(char *uplo, int *n, int *kd, int *nrhs, z *ab, int *ldab, z *b, int *ldb, int *info) nogil + +cdef void zpbsvx(char *fact, char *uplo, int *n, int *kd, int *nrhs, z *ab, int *ldab, z *afb, int *ldafb, char *equed, d *s, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void zpbtf2(char *uplo, int *n, int *kd, z *ab, int *ldab, int *info) nogil + +cdef void zpbtrf(char *uplo, int *n, int *kd, z *ab, int *ldab, int *info) nogil + +cdef void zpbtrs(char *uplo, int *n, int *kd, int *nrhs, z *ab, int *ldab, z *b, int *ldb, int *info) nogil + +cdef void zpftrf(char *transr, char *uplo, int *n, z *a, int *info) nogil + +cdef void zpftri(char *transr, char *uplo, int *n, z *a, int *info) nogil + +cdef void zpftrs(char *transr, char *uplo, int *n, int *nrhs, z *a, z *b, int *ldb, int *info) nogil + +cdef void zpocon(char *uplo, int *n, z *a, int *lda, d *anorm, d *rcond, z *work, d *rwork, int *info) nogil + +cdef void zpoequ(int *n, z *a, int *lda, d *s, d *scond, d *amax, int *info) nogil + +cdef void zpoequb(int *n, z *a, int *lda, d *s, d *scond, d *amax, int *info) nogil + +cdef void zporfs(char *uplo, int *n, int *nrhs, z *a, int *lda, z *af, int *ldaf, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void zposv(char *uplo, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, int *info) nogil + +cdef void zposvx(char *fact, char *uplo, int *n, int *nrhs, z *a, int *lda, z *af, int *ldaf, char *equed, d *s, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void zpotf2(char *uplo, int *n, z *a, int *lda, int *info) nogil + +cdef void zpotrf(char *uplo, int *n, z *a, int *lda, int *info) nogil + +cdef void zpotri(char *uplo, int *n, z *a, int *lda, int *info) nogil + +cdef void zpotrs(char *uplo, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, int *info) nogil + +cdef void zppcon(char *uplo, int *n, z *ap, d *anorm, d *rcond, z *work, d *rwork, int *info) nogil + +cdef void zppequ(char *uplo, int *n, z *ap, d *s, d *scond, d *amax, int *info) nogil + +cdef void zpprfs(char *uplo, int *n, int *nrhs, z *ap, z *afp, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void zppsv(char *uplo, int *n, int *nrhs, z *ap, z *b, int *ldb, int *info) nogil + +cdef void zppsvx(char *fact, char *uplo, int *n, int *nrhs, z *ap, z *afp, char *equed, d *s, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void zpptrf(char *uplo, int *n, z *ap, int *info) nogil + +cdef void zpptri(char *uplo, int *n, z *ap, int *info) nogil + +cdef void zpptrs(char *uplo, int *n, int *nrhs, z *ap, z *b, int *ldb, int *info) nogil + +cdef void zpstf2(char *uplo, int *n, z *a, int *lda, int *piv, int *rank, d *tol, d *work, int *info) nogil + +cdef void zpstrf(char *uplo, int *n, z *a, int *lda, int *piv, int *rank, d *tol, d *work, int *info) nogil + +cdef void zptcon(int *n, d *d, z *e, d *anorm, d *rcond, d *rwork, int *info) nogil + +cdef void zpteqr(char *compz, int *n, d *d, d *e, z *z, int *ldz, d *work, int *info) nogil + +cdef void zptrfs(char *uplo, int *n, int *nrhs, d *d, z *e, d *df, z *ef, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void zptsv(int *n, int *nrhs, d *d, z *e, z *b, int *ldb, int *info) nogil + +cdef void zptsvx(char *fact, int *n, int *nrhs, d *d, z *e, d *df, z *ef, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void zpttrf(int *n, d *d, z *e, int *info) nogil + +cdef void zpttrs(char *uplo, int *n, int *nrhs, d *d, z *e, z *b, int *ldb, int *info) nogil + +cdef void zptts2(int *iuplo, int *n, int *nrhs, d *d, z *e, z *b, int *ldb) nogil + +cdef void zrot(int *n, z *cx, int *incx, z *cy, int *incy, d *c, z *s) nogil + +cdef void zspcon(char *uplo, int *n, z *ap, int *ipiv, d *anorm, d *rcond, z *work, int *info) nogil + +cdef void zspmv(char *uplo, int *n, z *alpha, z *ap, z *x, int *incx, z *beta, z *y, int *incy) nogil + +cdef void zspr(char *uplo, int *n, z *alpha, z *x, int *incx, z *ap) nogil + +cdef void zsprfs(char *uplo, int *n, int *nrhs, z *ap, z *afp, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void zspsv(char *uplo, int *n, int *nrhs, z *ap, int *ipiv, z *b, int *ldb, int *info) nogil + +cdef void zspsvx(char *fact, char *uplo, int *n, int *nrhs, z *ap, z *afp, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void zsptrf(char *uplo, int *n, z *ap, int *ipiv, int *info) nogil + +cdef void zsptri(char *uplo, int *n, z *ap, int *ipiv, z *work, int *info) nogil + +cdef void zsptrs(char *uplo, int *n, int *nrhs, z *ap, int *ipiv, z *b, int *ldb, int *info) nogil + +cdef void zstedc(char *compz, int *n, d *d, d *e, z *z, int *ldz, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil + +cdef void zstegr(char *jobz, char *range, int *n, d *d, d *e, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, z *z, int *ldz, int *isuppz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void zstein(int *n, d *d, d *e, int *m, d *w, int *iblock, int *isplit, z *z, int *ldz, d *work, int *iwork, int *ifail, int *info) nogil + +cdef void zstemr(char *jobz, char *range, int *n, d *d, d *e, d *vl, d *vu, int *il, int *iu, int *m, d *w, z *z, int *ldz, int *nzc, int *isuppz, bint *tryrac, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void zsteqr(char *compz, int *n, d *d, d *e, z *z, int *ldz, d *work, int *info) nogil + +cdef void zsycon(char *uplo, int *n, z *a, int *lda, int *ipiv, d *anorm, d *rcond, z *work, int *info) nogil + +cdef void zsyconv(char *uplo, char *way, int *n, z *a, int *lda, int *ipiv, z *work, int *info) nogil + +cdef void zsyequb(char *uplo, int *n, z *a, int *lda, d *s, d *scond, d *amax, z *work, int *info) nogil + +cdef void zsymv(char *uplo, int *n, z *alpha, z *a, int *lda, z *x, int *incx, z *beta, z *y, int *incy) nogil + +cdef void zsyr(char *uplo, int *n, z *alpha, z *x, int *incx, z *a, int *lda) nogil + +cdef void zsyrfs(char *uplo, int *n, int *nrhs, z *a, int *lda, z *af, int *ldaf, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void zsysv(char *uplo, int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, z *work, int *lwork, int *info) nogil + +cdef void zsysvx(char *fact, char *uplo, int *n, int *nrhs, z *a, int *lda, z *af, int *ldaf, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, int *lwork, d *rwork, int *info) nogil + +cdef void zsyswapr(char *uplo, int *n, z *a, int *lda, int *i1, int *i2) nogil + +cdef void zsytf2(char *uplo, int *n, z *a, int *lda, int *ipiv, int *info) nogil + +cdef void zsytrf(char *uplo, int *n, z *a, int *lda, int *ipiv, z *work, int *lwork, int *info) nogil + +cdef void zsytri(char *uplo, int *n, z *a, int *lda, int *ipiv, z *work, int *info) nogil + +cdef void zsytri2(char *uplo, int *n, z *a, int *lda, int *ipiv, z *work, int *lwork, int *info) nogil + +cdef void zsytri2x(char *uplo, int *n, z *a, int *lda, int *ipiv, z *work, int *nb, int *info) nogil + +cdef void zsytrs(char *uplo, int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, int *info) nogil + +cdef void zsytrs2(char *uplo, int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, z *work, int *info) nogil + +cdef void ztbcon(char *norm, char *uplo, char *diag, int *n, int *kd, z *ab, int *ldab, d *rcond, z *work, d *rwork, int *info) nogil + +cdef void ztbrfs(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, z *ab, int *ldab, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void ztbtrs(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, z *ab, int *ldab, z *b, int *ldb, int *info) nogil + +cdef void ztfsm(char *transr, char *side, char *uplo, char *trans, char *diag, int *m, int *n, z *alpha, z *a, z *b, int *ldb) nogil + +cdef void ztftri(char *transr, char *uplo, char *diag, int *n, z *a, int *info) nogil + +cdef void ztfttp(char *transr, char *uplo, int *n, z *arf, z *ap, int *info) nogil + +cdef void ztfttr(char *transr, char *uplo, int *n, z *arf, z *a, int *lda, int *info) nogil + +cdef void ztgevc(char *side, char *howmny, bint *select, int *n, z *s, int *lds, z *p, int *ldp, z *vl, int *ldvl, z *vr, int *ldvr, int *mm, int *m, z *work, d *rwork, int *info) nogil + +cdef void ztgex2(bint *wantq, bint *wantz, int *n, z *a, int *lda, z *b, int *ldb, z *q, int *ldq, z *z, int *ldz, int *j1, int *info) nogil + +cdef void ztgexc(bint *wantq, bint *wantz, int *n, z *a, int *lda, z *b, int *ldb, z *q, int *ldq, z *z, int *ldz, int *ifst, int *ilst, int *info) nogil + +cdef void ztgsen(int *ijob, bint *wantq, bint *wantz, bint *select, int *n, z *a, int *lda, z *b, int *ldb, z *alpha, z *beta, z *q, int *ldq, z *z, int *ldz, int *m, d *pl, d *pr, d *dif, z *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void ztgsja(char *jobu, char *jobv, char *jobq, int *m, int *p, int *n, int *k, int *l, z *a, int *lda, z *b, int *ldb, d *tola, d *tolb, d *alpha, d *beta, z *u, int *ldu, z *v, int *ldv, z *q, int *ldq, z *work, int *ncycle, int *info) nogil + +cdef void ztgsna(char *job, char *howmny, bint *select, int *n, z *a, int *lda, z *b, int *ldb, z *vl, int *ldvl, z *vr, int *ldvr, d *s, d *dif, int *mm, int *m, z *work, int *lwork, int *iwork, int *info) nogil + +cdef void ztgsy2(char *trans, int *ijob, int *m, int *n, z *a, int *lda, z *b, int *ldb, z *c, int *ldc, z *d, int *ldd, z *e, int *lde, z *f, int *ldf, d *scale, d *rdsum, d *rdscal, int *info) nogil + +cdef void ztgsyl(char *trans, int *ijob, int *m, int *n, z *a, int *lda, z *b, int *ldb, z *c, int *ldc, z *d, int *ldd, z *e, int *lde, z *f, int *ldf, d *scale, d *dif, z *work, int *lwork, int *iwork, int *info) nogil + +cdef void ztpcon(char *norm, char *uplo, char *diag, int *n, z *ap, d *rcond, z *work, d *rwork, int *info) nogil + +cdef void ztpmqrt(char *side, char *trans, int *m, int *n, int *k, int *l, int *nb, z *v, int *ldv, z *t, int *ldt, z *a, int *lda, z *b, int *ldb, z *work, int *info) nogil + +cdef void ztpqrt(int *m, int *n, int *l, int *nb, z *a, int *lda, z *b, int *ldb, z *t, int *ldt, z *work, int *info) nogil + +cdef void ztpqrt2(int *m, int *n, int *l, z *a, int *lda, z *b, int *ldb, z *t, int *ldt, int *info) nogil + +cdef void ztprfb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, z *v, int *ldv, z *t, int *ldt, z *a, int *lda, z *b, int *ldb, z *work, int *ldwork) nogil + +cdef void ztprfs(char *uplo, char *trans, char *diag, int *n, int *nrhs, z *ap, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void ztptri(char *uplo, char *diag, int *n, z *ap, int *info) nogil + +cdef void ztptrs(char *uplo, char *trans, char *diag, int *n, int *nrhs, z *ap, z *b, int *ldb, int *info) nogil + +cdef void ztpttf(char *transr, char *uplo, int *n, z *ap, z *arf, int *info) nogil + +cdef void ztpttr(char *uplo, int *n, z *ap, z *a, int *lda, int *info) nogil + +cdef void ztrcon(char *norm, char *uplo, char *diag, int *n, z *a, int *lda, d *rcond, z *work, d *rwork, int *info) nogil + +cdef void ztrevc(char *side, char *howmny, bint *select, int *n, z *t, int *ldt, z *vl, int *ldvl, z *vr, int *ldvr, int *mm, int *m, z *work, d *rwork, int *info) nogil + +cdef void ztrexc(char *compq, int *n, z *t, int *ldt, z *q, int *ldq, int *ifst, int *ilst, int *info) nogil + +cdef void ztrrfs(char *uplo, char *trans, char *diag, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void ztrsen(char *job, char *compq, bint *select, int *n, z *t, int *ldt, z *q, int *ldq, z *w, int *m, d *s, d *sep, z *work, int *lwork, int *info) nogil + +cdef void ztrsna(char *job, char *howmny, bint *select, int *n, z *t, int *ldt, z *vl, int *ldvl, z *vr, int *ldvr, d *s, d *sep, int *mm, int *m, z *work, int *ldwork, d *rwork, int *info) nogil + +cdef void ztrsyl(char *trana, char *tranb, int *isgn, int *m, int *n, z *a, int *lda, z *b, int *ldb, z *c, int *ldc, d *scale, int *info) nogil + +cdef void ztrti2(char *uplo, char *diag, int *n, z *a, int *lda, int *info) nogil + +cdef void ztrtri(char *uplo, char *diag, int *n, z *a, int *lda, int *info) nogil + +cdef void ztrtrs(char *uplo, char *trans, char *diag, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, int *info) nogil + +cdef void ztrttf(char *transr, char *uplo, int *n, z *a, int *lda, z *arf, int *info) nogil + +cdef void ztrttp(char *uplo, int *n, z *a, int *lda, z *ap, int *info) nogil + +cdef void ztzrzf(int *m, int *n, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil + +cdef void zunbdb(char *trans, char *signs, int *m, int *p, int *q, z *x11, int *ldx11, z *x12, int *ldx12, z *x21, int *ldx21, z *x22, int *ldx22, d *theta, d *phi, z *taup1, z *taup2, z *tauq1, z *tauq2, z *work, int *lwork, int *info) nogil + +cdef void zuncsd(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, char *signs, int *m, int *p, int *q, z *x11, int *ldx11, z *x12, int *ldx12, z *x21, int *ldx21, z *x22, int *ldx22, d *theta, z *u1, int *ldu1, z *u2, int *ldu2, z *v1t, int *ldv1t, z *v2t, int *ldv2t, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *info) nogil + +cdef void zung2l(int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *info) nogil + +cdef void zung2r(int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *info) nogil + +cdef void zungbr(char *vect, int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil + +cdef void zunghr(int *n, int *ilo, int *ihi, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil + +cdef void zungl2(int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *info) nogil + +cdef void zunglq(int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil + +cdef void zungql(int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil + +cdef void zungqr(int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil + +cdef void zungr2(int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *info) nogil + +cdef void zungrq(int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil + +cdef void zungtr(char *uplo, int *n, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil + +cdef void zunm2l(char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *info) nogil + +cdef void zunm2r(char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *info) nogil + +cdef void zunmbr(char *vect, char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *lwork, int *info) nogil + +cdef void zunmhr(char *side, char *trans, int *m, int *n, int *ilo, int *ihi, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *lwork, int *info) nogil + +cdef void zunml2(char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *info) nogil + +cdef void zunmlq(char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *lwork, int *info) nogil + +cdef void zunmql(char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *lwork, int *info) nogil + +cdef void zunmqr(char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *lwork, int *info) nogil + +cdef void zunmr2(char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *info) nogil + +cdef void zunmr3(char *side, char *trans, int *m, int *n, int *k, int *l, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *info) nogil + +cdef void zunmrq(char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *lwork, int *info) nogil + +cdef void zunmrz(char *side, char *trans, int *m, int *n, int *k, int *l, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *lwork, int *info) nogil + +cdef void zunmtr(char *side, char *uplo, char *trans, int *m, int *n, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *lwork, int *info) nogil + +cdef void zupgtr(char *uplo, int *n, z *ap, z *tau, z *q, int *ldq, z *work, int *info) nogil + +cdef void zupmtr(char *side, char *uplo, char *trans, int *m, int *n, z *ap, z *tau, z *c, int *ldc, z *work, int *info) nogil diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/cython_lapack.so b/project/venv/lib/python2.7/site-packages/scipy/linalg/cython_lapack.so new file mode 100755 index 0000000..a76d7ca Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/cython_lapack.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/decomp.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/decomp.py new file mode 100644 index 0000000..ea45f82 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/decomp.py @@ -0,0 +1,1431 @@ +# +# Author: Pearu Peterson, March 2002 +# +# additions by Travis Oliphant, March 2002 +# additions by Eric Jones, June 2002 +# additions by Johannes Loehnert, June 2006 +# additions by Bart Vandereycken, June 2006 +# additions by Andrew D Straw, May 2007 +# additions by Tiziano Zito, November 2008 +# +# April 2010: Functions for LU, QR, SVD, Schur and Cholesky decompositions were +# moved to their own files. Still in this file are functions for eigenstuff +# and for the Hessenberg form. + +from __future__ import division, print_function, absolute_import + +__all__ = ['eig', 'eigvals', 'eigh', 'eigvalsh', + 'eig_banded', 'eigvals_banded', + 'eigh_tridiagonal', 'eigvalsh_tridiagonal', 'hessenberg', 'cdf2rdf'] + +import numpy +from numpy import (array, isfinite, inexact, nonzero, iscomplexobj, cast, + flatnonzero, conj, asarray, argsort, empty, newaxis, + argwhere, iscomplex, eye, zeros, einsum) +# Local imports +from scipy._lib.six import xrange +from scipy._lib._util import _asarray_validated +from scipy._lib.six import string_types +from .misc import LinAlgError, _datacopied, norm +from .lapack import get_lapack_funcs, _compute_lwork + + +_I = cast['F'](1j) + + +def _make_complex_eigvecs(w, vin, dtype): + """ + Produce complex-valued eigenvectors from LAPACK DGGEV real-valued output + """ + # - see LAPACK man page DGGEV at ALPHAI + v = numpy.array(vin, dtype=dtype) + m = (w.imag > 0) + m[:-1] |= (w.imag[1:] < 0) # workaround for LAPACK bug, cf. ticket #709 + for i in flatnonzero(m): + v.imag[:, i] = vin[:, i+1] + conj(v[:, i], v[:, i+1]) + return v + + +def _make_eigvals(alpha, beta, homogeneous_eigvals): + if homogeneous_eigvals: + if beta is None: + return numpy.vstack((alpha, numpy.ones_like(alpha))) + else: + return numpy.vstack((alpha, beta)) + else: + if beta is None: + return alpha + else: + w = numpy.empty_like(alpha) + alpha_zero = (alpha == 0) + beta_zero = (beta == 0) + beta_nonzero = ~beta_zero + w[beta_nonzero] = alpha[beta_nonzero]/beta[beta_nonzero] + # Use numpy.inf for complex values too since + # 1/numpy.inf = 0, i.e. it correctly behaves as projective + # infinity. + w[~alpha_zero & beta_zero] = numpy.inf + if numpy.all(alpha.imag == 0): + w[alpha_zero & beta_zero] = numpy.nan + else: + w[alpha_zero & beta_zero] = complex(numpy.nan, numpy.nan) + return w + + +def _geneig(a1, b1, left, right, overwrite_a, overwrite_b, + homogeneous_eigvals): + ggev, = get_lapack_funcs(('ggev',), (a1, b1)) + cvl, cvr = left, right + res = ggev(a1, b1, lwork=-1) + lwork = res[-2][0].real.astype(numpy.int) + if ggev.typecode in 'cz': + alpha, beta, vl, vr, work, info = ggev(a1, b1, cvl, cvr, lwork, + overwrite_a, overwrite_b) + w = _make_eigvals(alpha, beta, homogeneous_eigvals) + else: + alphar, alphai, beta, vl, vr, work, info = ggev(a1, b1, cvl, cvr, + lwork, overwrite_a, + overwrite_b) + alpha = alphar + _I * alphai + w = _make_eigvals(alpha, beta, homogeneous_eigvals) + _check_info(info, 'generalized eig algorithm (ggev)') + + only_real = numpy.all(w.imag == 0.0) + if not (ggev.typecode in 'cz' or only_real): + t = w.dtype.char + if left: + vl = _make_complex_eigvecs(w, vl, t) + if right: + vr = _make_complex_eigvecs(w, vr, t) + + # the eigenvectors returned by the lapack function are NOT normalized + for i in xrange(vr.shape[0]): + if right: + vr[:, i] /= norm(vr[:, i]) + if left: + vl[:, i] /= norm(vl[:, i]) + + if not (left or right): + return w + if left: + if right: + return w, vl, vr + return w, vl + return w, vr + + +def eig(a, b=None, left=False, right=True, overwrite_a=False, + overwrite_b=False, check_finite=True, homogeneous_eigvals=False): + """ + Solve an ordinary or generalized eigenvalue problem of a square matrix. + + Find eigenvalues w and right or left eigenvectors of a general matrix:: + + a vr[:,i] = w[i] b vr[:,i] + a.H vl[:,i] = w[i].conj() b.H vl[:,i] + + where ``.H`` is the Hermitian conjugation. + + Parameters + ---------- + a : (M, M) array_like + A complex or real matrix whose eigenvalues and eigenvectors + will be computed. + b : (M, M) array_like, optional + Right-hand side matrix in a generalized eigenvalue problem. + Default is None, identity matrix is assumed. + left : bool, optional + Whether to calculate and return left eigenvectors. Default is False. + right : bool, optional + Whether to calculate and return right eigenvectors. Default is True. + overwrite_a : bool, optional + Whether to overwrite `a`; may improve performance. Default is False. + overwrite_b : bool, optional + Whether to overwrite `b`; may improve performance. Default is False. + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + homogeneous_eigvals : bool, optional + If True, return the eigenvalues in homogeneous coordinates. + In this case ``w`` is a (2, M) array so that:: + + w[1,i] a vr[:,i] = w[0,i] b vr[:,i] + + Default is False. + + Returns + ------- + w : (M,) or (2, M) double or complex ndarray + The eigenvalues, each repeated according to its + multiplicity. The shape is (M,) unless + ``homogeneous_eigvals=True``. + vl : (M, M) double or complex ndarray + The normalized left eigenvector corresponding to the eigenvalue + ``w[i]`` is the column vl[:,i]. Only returned if ``left=True``. + vr : (M, M) double or complex ndarray + The normalized right eigenvector corresponding to the eigenvalue + ``w[i]`` is the column ``vr[:,i]``. Only returned if ``right=True``. + + Raises + ------ + LinAlgError + If eigenvalue computation does not converge. + + See Also + -------- + eigvals : eigenvalues of general arrays + eigh : Eigenvalues and right eigenvectors for symmetric/Hermitian arrays. + eig_banded : eigenvalues and right eigenvectors for symmetric/Hermitian + band matrices + eigh_tridiagonal : eigenvalues and right eiegenvectors for + symmetric/Hermitian tridiagonal matrices + + Examples + -------- + >>> from scipy import linalg + >>> a = np.array([[0., -1.], [1., 0.]]) + >>> linalg.eigvals(a) + array([0.+1.j, 0.-1.j]) + + >>> b = np.array([[0., 1.], [1., 1.]]) + >>> linalg.eigvals(a, b) + array([ 1.+0.j, -1.+0.j]) + + >>> a = np.array([[3., 0., 0.], [0., 8., 0.], [0., 0., 7.]]) + >>> linalg.eigvals(a, homogeneous_eigvals=True) + array([[3.+0.j, 8.+0.j, 7.+0.j], + [1.+0.j, 1.+0.j, 1.+0.j]]) + + >>> a = np.array([[0., -1.], [1., 0.]]) + >>> linalg.eigvals(a) == linalg.eig(a)[0] + array([ True, True]) + >>> linalg.eig(a, left=True, right=False)[1] # normalized left eigenvector + array([[-0.70710678+0.j , -0.70710678-0.j ], + [-0. +0.70710678j, -0. -0.70710678j]]) + >>> linalg.eig(a, left=False, right=True)[1] # normalized right eigenvector + array([[0.70710678+0.j , 0.70710678-0.j ], + [0. -0.70710678j, 0. +0.70710678j]]) + + + + """ + a1 = _asarray_validated(a, check_finite=check_finite) + if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]: + raise ValueError('expected square matrix') + overwrite_a = overwrite_a or (_datacopied(a1, a)) + if b is not None: + b1 = _asarray_validated(b, check_finite=check_finite) + overwrite_b = overwrite_b or _datacopied(b1, b) + if len(b1.shape) != 2 or b1.shape[0] != b1.shape[1]: + raise ValueError('expected square matrix') + if b1.shape != a1.shape: + raise ValueError('a and b must have the same shape') + return _geneig(a1, b1, left, right, overwrite_a, overwrite_b, + homogeneous_eigvals) + + geev, geev_lwork = get_lapack_funcs(('geev', 'geev_lwork'), (a1,)) + compute_vl, compute_vr = left, right + + lwork = _compute_lwork(geev_lwork, a1.shape[0], + compute_vl=compute_vl, + compute_vr=compute_vr) + + if geev.typecode in 'cz': + w, vl, vr, info = geev(a1, lwork=lwork, + compute_vl=compute_vl, + compute_vr=compute_vr, + overwrite_a=overwrite_a) + w = _make_eigvals(w, None, homogeneous_eigvals) + else: + wr, wi, vl, vr, info = geev(a1, lwork=lwork, + compute_vl=compute_vl, + compute_vr=compute_vr, + overwrite_a=overwrite_a) + t = {'f': 'F', 'd': 'D'}[wr.dtype.char] + w = wr + _I * wi + w = _make_eigvals(w, None, homogeneous_eigvals) + + _check_info(info, 'eig algorithm (geev)', + positive='did not converge (only eigenvalues ' + 'with order >= %d have converged)') + + only_real = numpy.all(w.imag == 0.0) + if not (geev.typecode in 'cz' or only_real): + t = w.dtype.char + if left: + vl = _make_complex_eigvecs(w, vl, t) + if right: + vr = _make_complex_eigvecs(w, vr, t) + if not (left or right): + return w + if left: + if right: + return w, vl, vr + return w, vl + return w, vr + + +def eigh(a, b=None, lower=True, eigvals_only=False, overwrite_a=False, + overwrite_b=False, turbo=True, eigvals=None, type=1, + check_finite=True): + """ + Solve an ordinary or generalized eigenvalue problem for a complex + Hermitian or real symmetric matrix. + + Find eigenvalues w and optionally eigenvectors v of matrix `a`, where + `b` is positive definite:: + + a v[:,i] = w[i] b v[:,i] + v[i,:].conj() a v[:,i] = w[i] + v[i,:].conj() b v[:,i] = 1 + + Parameters + ---------- + a : (M, M) array_like + A complex Hermitian or real symmetric matrix whose eigenvalues and + eigenvectors will be computed. + b : (M, M) array_like, optional + A complex Hermitian or real symmetric definite positive matrix in. + If omitted, identity matrix is assumed. + lower : bool, optional + Whether the pertinent array data is taken from the lower or upper + triangle of `a`. (Default: lower) + eigvals_only : bool, optional + Whether to calculate only eigenvalues and no eigenvectors. + (Default: both are calculated) + turbo : bool, optional + Use divide and conquer algorithm (faster but expensive in memory, + only for generalized eigenvalue problem and if eigvals=None) + eigvals : tuple (lo, hi), optional + Indexes of the smallest and largest (in ascending order) eigenvalues + and corresponding eigenvectors to be returned: 0 <= lo <= hi <= M-1. + If omitted, all eigenvalues and eigenvectors are returned. + type : int, optional + Specifies the problem type to be solved: + + type = 1: a v[:,i] = w[i] b v[:,i] + + type = 2: a b v[:,i] = w[i] v[:,i] + + type = 3: b a v[:,i] = w[i] v[:,i] + overwrite_a : bool, optional + Whether to overwrite data in `a` (may improve performance) + overwrite_b : bool, optional + Whether to overwrite data in `b` (may improve performance) + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + w : (N,) float ndarray + The N (1<=N<=M) selected eigenvalues, in ascending order, each + repeated according to its multiplicity. + v : (M, N) complex ndarray + (if eigvals_only == False) + + The normalized selected eigenvector corresponding to the + eigenvalue w[i] is the column v[:,i]. + + Normalization: + + type 1 and 3: v.conj() a v = w + + type 2: inv(v).conj() a inv(v) = w + + type = 1 or 2: v.conj() b v = I + + type = 3: v.conj() inv(b) v = I + + Raises + ------ + LinAlgError + If eigenvalue computation does not converge, + an error occurred, or b matrix is not definite positive. Note that + if input matrices are not symmetric or hermitian, no error is reported + but results will be wrong. + + See Also + -------- + eigvalsh : eigenvalues of symmetric or Hermitian arrays + eig : eigenvalues and right eigenvectors for non-symmetric arrays + eigh : eigenvalues and right eigenvectors for symmetric/Hermitian arrays + eigh_tridiagonal : eigenvalues and right eiegenvectors for + symmetric/Hermitian tridiagonal matrices + + Notes + ----- + This function does not check the input array for being hermitian/symmetric + in order to allow for representing arrays with only their upper/lower + triangular parts. + + Examples + -------- + >>> from scipy.linalg import eigh + >>> A = np.array([[6, 3, 1, 5], [3, 0, 5, 1], [1, 5, 6, 2], [5, 1, 2, 2]]) + >>> w, v = eigh(A) + >>> np.allclose(A @ v - v @ np.diag(w), np.zeros((4, 4))) + True + + """ + a1 = _asarray_validated(a, check_finite=check_finite) + if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]: + raise ValueError('expected square matrix') + overwrite_a = overwrite_a or (_datacopied(a1, a)) + if iscomplexobj(a1): + cplx = True + else: + cplx = False + if b is not None: + b1 = _asarray_validated(b, check_finite=check_finite) + overwrite_b = overwrite_b or _datacopied(b1, b) + if len(b1.shape) != 2 or b1.shape[0] != b1.shape[1]: + raise ValueError('expected square matrix') + + if b1.shape != a1.shape: + raise ValueError("wrong b dimensions %s, should " + "be %s" % (str(b1.shape), str(a1.shape))) + if iscomplexobj(b1): + cplx = True + else: + cplx = cplx or False + else: + b1 = None + + # Set job for fortran routines + _job = (eigvals_only and 'N') or 'V' + + # port eigenvalue range from python to fortran convention + if eigvals is not None: + lo, hi = eigvals + if lo < 0 or hi >= a1.shape[0]: + raise ValueError('The eigenvalue range specified is not valid.\n' + 'Valid range is [%s,%s]' % (0, a1.shape[0]-1)) + lo += 1 + hi += 1 + eigvals = (lo, hi) + + # set lower + if lower: + uplo = 'L' + else: + uplo = 'U' + + # fix prefix for lapack routines + if cplx: + pfx = 'he' + else: + pfx = 'sy' + + # Standard Eigenvalue Problem + # Use '*evr' routines + # FIXME: implement calculation of optimal lwork + # for all lapack routines + if b1 is None: + driver = pfx+'evr' + (evr,) = get_lapack_funcs((driver,), (a1,)) + if eigvals is None: + w, v, info = evr(a1, uplo=uplo, jobz=_job, range="A", il=1, + iu=a1.shape[0], overwrite_a=overwrite_a) + else: + (lo, hi) = eigvals + w_tot, v, info = evr(a1, uplo=uplo, jobz=_job, range="I", + il=lo, iu=hi, overwrite_a=overwrite_a) + w = w_tot[0:hi-lo+1] + + # Generalized Eigenvalue Problem + else: + # Use '*gvx' routines if range is specified + if eigvals is not None: + driver = pfx+'gvx' + (gvx,) = get_lapack_funcs((driver,), (a1, b1)) + (lo, hi) = eigvals + w_tot, v, ifail, info = gvx(a1, b1, uplo=uplo, iu=hi, + itype=type, jobz=_job, il=lo, + overwrite_a=overwrite_a, + overwrite_b=overwrite_b) + w = w_tot[0:hi-lo+1] + # Use '*gvd' routine if turbo is on and no eigvals are specified + elif turbo: + driver = pfx+'gvd' + (gvd,) = get_lapack_funcs((driver,), (a1, b1)) + v, w, info = gvd(a1, b1, uplo=uplo, itype=type, jobz=_job, + overwrite_a=overwrite_a, + overwrite_b=overwrite_b) + # Use '*gv' routine if turbo is off and no eigvals are specified + else: + driver = pfx+'gv' + (gv,) = get_lapack_funcs((driver,), (a1, b1)) + v, w, info = gv(a1, b1, uplo=uplo, itype=type, jobz=_job, + overwrite_a=overwrite_a, + overwrite_b=overwrite_b) + + # Check if we had a successful exit + if info == 0: + if eigvals_only: + return w + else: + return w, v + _check_info(info, driver, positive=False) # triage more specifically + if info > 0 and b1 is None: + raise LinAlgError("unrecoverable internal error.") + + # The algorithm failed to converge. + elif 0 < info <= b1.shape[0]: + if eigvals is not None: + raise LinAlgError("the eigenvectors %s failed to" + " converge." % nonzero(ifail)-1) + else: + raise LinAlgError("internal fortran routine failed to converge: " + "%i off-diagonal elements of an " + "intermediate tridiagonal form did not converge" + " to zero." % info) + + # This occurs when b is not positive definite + else: + raise LinAlgError("the leading minor of order %i" + " of 'b' is not positive definite. The" + " factorization of 'b' could not be completed" + " and no eigenvalues or eigenvectors were" + " computed." % (info-b1.shape[0])) + + +_conv_dict = {0: 0, 1: 1, 2: 2, + 'all': 0, 'value': 1, 'index': 2, + 'a': 0, 'v': 1, 'i': 2} + + +def _check_select(select, select_range, max_ev, max_len): + """Check that select is valid, convert to Fortran style.""" + if isinstance(select, string_types): + select = select.lower() + try: + select = _conv_dict[select] + except KeyError: + raise ValueError('invalid argument for select') + vl, vu = 0., 1. + il = iu = 1 + if select != 0: # (non-all) + sr = asarray(select_range) + if sr.ndim != 1 or sr.size != 2 or sr[1] < sr[0]: + raise ValueError('select_range must be a 2-element array-like ' + 'in nondecreasing order') + if select == 1: # (value) + vl, vu = sr + if max_ev == 0: + max_ev = max_len + else: # 2 (index) + if sr.dtype.char.lower() not in 'hilqp': + raise ValueError('when using select="i", select_range must ' + 'contain integers, got dtype %s (%s)' + % (sr.dtype, sr.dtype.char)) + # translate Python (0 ... N-1) into Fortran (1 ... N) with + 1 + il, iu = sr + 1 + if min(il, iu) < 1 or max(il, iu) > max_len: + raise ValueError('select_range out of bounds') + max_ev = iu - il + 1 + return select, vl, vu, il, iu, max_ev + + +def eig_banded(a_band, lower=False, eigvals_only=False, overwrite_a_band=False, + select='a', select_range=None, max_ev=0, check_finite=True): + """ + Solve real symmetric or complex hermitian band matrix eigenvalue problem. + + Find eigenvalues w and optionally right eigenvectors v of a:: + + a v[:,i] = w[i] v[:,i] + v.H v = identity + + The matrix a is stored in a_band either in lower diagonal or upper + diagonal ordered form: + + a_band[u + i - j, j] == a[i,j] (if upper form; i <= j) + a_band[ i - j, j] == a[i,j] (if lower form; i >= j) + + where u is the number of bands above the diagonal. + + Example of a_band (shape of a is (6,6), u=2):: + + upper form: + * * a02 a13 a24 a35 + * a01 a12 a23 a34 a45 + a00 a11 a22 a33 a44 a55 + + lower form: + a00 a11 a22 a33 a44 a55 + a10 a21 a32 a43 a54 * + a20 a31 a42 a53 * * + + Cells marked with * are not used. + + Parameters + ---------- + a_band : (u+1, M) array_like + The bands of the M by M matrix a. + lower : bool, optional + Is the matrix in the lower form. (Default is upper form) + eigvals_only : bool, optional + Compute only the eigenvalues and no eigenvectors. + (Default: calculate also eigenvectors) + overwrite_a_band : bool, optional + Discard data in a_band (may enhance performance) + select : {'a', 'v', 'i'}, optional + Which eigenvalues to calculate + + ====== ======================================== + select calculated + ====== ======================================== + 'a' All eigenvalues + 'v' Eigenvalues in the interval (min, max] + 'i' Eigenvalues with indices min <= i <= max + ====== ======================================== + select_range : (min, max), optional + Range of selected eigenvalues + max_ev : int, optional + For select=='v', maximum number of eigenvalues expected. + For other values of select, has no meaning. + + In doubt, leave this parameter untouched. + + check_finite : bool, optional + Whether to check that the input matrix contains only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + w : (M,) ndarray + The eigenvalues, in ascending order, each repeated according to its + multiplicity. + v : (M, M) float or complex ndarray + The normalized eigenvector corresponding to the eigenvalue w[i] is + the column v[:,i]. + + Raises + ------ + LinAlgError + If eigenvalue computation does not converge. + + See Also + -------- + eigvals_banded : eigenvalues for symmetric/Hermitian band matrices + eig : eigenvalues and right eigenvectors of general arrays. + eigh : eigenvalues and right eigenvectors for symmetric/Hermitian arrays + eigh_tridiagonal : eigenvalues and right eiegenvectors for + symmetric/Hermitian tridiagonal matrices + + Examples + -------- + >>> from scipy.linalg import eig_banded + >>> A = np.array([[1, 5, 2, 0], [5, 2, 5, 2], [2, 5, 3, 5], [0, 2, 5, 4]]) + >>> Ab = np.array([[1, 2, 3, 4], [5, 5, 5, 0], [2, 2, 0, 0]]) + >>> w, v = eig_banded(Ab, lower=True) + >>> np.allclose(A @ v - v @ np.diag(w), np.zeros((4, 4))) + True + >>> w = eig_banded(Ab, lower=True, eigvals_only=True) + >>> w + array([-4.26200532, -2.22987175, 3.95222349, 12.53965359]) + + Request only the eigenvalues between ``[-3, 4]`` + + >>> w, v = eig_banded(Ab, lower=True, select='v', select_range=[-3, 4]) + >>> w + array([-2.22987175, 3.95222349]) + + """ + if eigvals_only or overwrite_a_band: + a1 = _asarray_validated(a_band, check_finite=check_finite) + overwrite_a_band = overwrite_a_band or (_datacopied(a1, a_band)) + else: + a1 = array(a_band) + if issubclass(a1.dtype.type, inexact) and not isfinite(a1).all(): + raise ValueError("array must not contain infs or NaNs") + overwrite_a_band = 1 + + if len(a1.shape) != 2: + raise ValueError('expected two-dimensional array') + select, vl, vu, il, iu, max_ev = _check_select( + select, select_range, max_ev, a1.shape[1]) + del select_range + if select == 0: + if a1.dtype.char in 'GFD': + # FIXME: implement this somewhen, for now go with builtin values + # FIXME: calc optimal lwork by calling ?hbevd(lwork=-1) + # or by using calc_lwork.f ??? + # lwork = calc_lwork.hbevd(bevd.typecode, a1.shape[0], lower) + internal_name = 'hbevd' + else: # a1.dtype.char in 'fd': + # FIXME: implement this somewhen, for now go with builtin values + # see above + # lwork = calc_lwork.sbevd(bevd.typecode, a1.shape[0], lower) + internal_name = 'sbevd' + bevd, = get_lapack_funcs((internal_name,), (a1,)) + w, v, info = bevd(a1, compute_v=not eigvals_only, + lower=lower, overwrite_ab=overwrite_a_band) + else: # select in [1, 2] + if eigvals_only: + max_ev = 1 + # calculate optimal abstol for dsbevx (see manpage) + if a1.dtype.char in 'fF': # single precision + lamch, = get_lapack_funcs(('lamch',), (array(0, dtype='f'),)) + else: + lamch, = get_lapack_funcs(('lamch',), (array(0, dtype='d'),)) + abstol = 2 * lamch('s') + if a1.dtype.char in 'GFD': + internal_name = 'hbevx' + else: # a1.dtype.char in 'gfd' + internal_name = 'sbevx' + bevx, = get_lapack_funcs((internal_name,), (a1,)) + w, v, m, ifail, info = bevx( + a1, vl, vu, il, iu, compute_v=not eigvals_only, mmax=max_ev, + range=select, lower=lower, overwrite_ab=overwrite_a_band, + abstol=abstol) + # crop off w and v + w = w[:m] + if not eigvals_only: + v = v[:, :m] + _check_info(info, internal_name) + + if eigvals_only: + return w + return w, v + + +def eigvals(a, b=None, overwrite_a=False, check_finite=True, + homogeneous_eigvals=False): + """ + Compute eigenvalues from an ordinary or generalized eigenvalue problem. + + Find eigenvalues of a general matrix:: + + a vr[:,i] = w[i] b vr[:,i] + + Parameters + ---------- + a : (M, M) array_like + A complex or real matrix whose eigenvalues and eigenvectors + will be computed. + b : (M, M) array_like, optional + Right-hand side matrix in a generalized eigenvalue problem. + If omitted, identity matrix is assumed. + overwrite_a : bool, optional + Whether to overwrite data in a (may improve performance) + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities + or NaNs. + homogeneous_eigvals : bool, optional + If True, return the eigenvalues in homogeneous coordinates. + In this case ``w`` is a (2, M) array so that:: + + w[1,i] a vr[:,i] = w[0,i] b vr[:,i] + + Default is False. + + Returns + ------- + w : (M,) or (2, M) double or complex ndarray + The eigenvalues, each repeated according to its multiplicity + but not in any specific order. The shape is (M,) unless + ``homogeneous_eigvals=True``. + + Raises + ------ + LinAlgError + If eigenvalue computation does not converge + + See Also + -------- + eig : eigenvalues and right eigenvectors of general arrays. + eigvalsh : eigenvalues of symmetric or Hermitian arrays + eigvals_banded : eigenvalues for symmetric/Hermitian band matrices + eigvalsh_tridiagonal : eigenvalues of symmetric/Hermitian tridiagonal + matrices + + Examples + -------- + >>> from scipy import linalg + >>> a = np.array([[0., -1.], [1., 0.]]) + >>> linalg.eigvals(a) + array([0.+1.j, 0.-1.j]) + + >>> b = np.array([[0., 1.], [1., 1.]]) + >>> linalg.eigvals(a, b) + array([ 1.+0.j, -1.+0.j]) + + >>> a = np.array([[3., 0., 0.], [0., 8., 0.], [0., 0., 7.]]) + >>> linalg.eigvals(a, homogeneous_eigvals=True) + array([[3.+0.j, 8.+0.j, 7.+0.j], + [1.+0.j, 1.+0.j, 1.+0.j]]) + + """ + return eig(a, b=b, left=0, right=0, overwrite_a=overwrite_a, + check_finite=check_finite, + homogeneous_eigvals=homogeneous_eigvals) + + +def eigvalsh(a, b=None, lower=True, overwrite_a=False, + overwrite_b=False, turbo=True, eigvals=None, type=1, + check_finite=True): + """ + Solve an ordinary or generalized eigenvalue problem for a complex + Hermitian or real symmetric matrix. + + Find eigenvalues w of matrix a, where b is positive definite:: + + a v[:,i] = w[i] b v[:,i] + v[i,:].conj() a v[:,i] = w[i] + v[i,:].conj() b v[:,i] = 1 + + + Parameters + ---------- + a : (M, M) array_like + A complex Hermitian or real symmetric matrix whose eigenvalues and + eigenvectors will be computed. + b : (M, M) array_like, optional + A complex Hermitian or real symmetric definite positive matrix in. + If omitted, identity matrix is assumed. + lower : bool, optional + Whether the pertinent array data is taken from the lower or upper + triangle of `a`. (Default: lower) + turbo : bool, optional + Use divide and conquer algorithm (faster but expensive in memory, + only for generalized eigenvalue problem and if eigvals=None) + eigvals : tuple (lo, hi), optional + Indexes of the smallest and largest (in ascending order) eigenvalues + and corresponding eigenvectors to be returned: 0 <= lo < hi <= M-1. + If omitted, all eigenvalues and eigenvectors are returned. + type : int, optional + Specifies the problem type to be solved: + + type = 1: a v[:,i] = w[i] b v[:,i] + + type = 2: a b v[:,i] = w[i] v[:,i] + + type = 3: b a v[:,i] = w[i] v[:,i] + overwrite_a : bool, optional + Whether to overwrite data in `a` (may improve performance) + overwrite_b : bool, optional + Whether to overwrite data in `b` (may improve performance) + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + w : (N,) float ndarray + The N (1<=N<=M) selected eigenvalues, in ascending order, each + repeated according to its multiplicity. + + Raises + ------ + LinAlgError + If eigenvalue computation does not converge, + an error occurred, or b matrix is not definite positive. Note that + if input matrices are not symmetric or hermitian, no error is reported + but results will be wrong. + + See Also + -------- + eigh : eigenvalues and right eigenvectors for symmetric/Hermitian arrays + eigvals : eigenvalues of general arrays + eigvals_banded : eigenvalues for symmetric/Hermitian band matrices + eigvalsh_tridiagonal : eigenvalues of symmetric/Hermitian tridiagonal + matrices + + Notes + ----- + This function does not check the input array for being hermitian/symmetric + in order to allow for representing arrays with only their upper/lower + triangular parts. + + Examples + -------- + >>> from scipy.linalg import eigvalsh + >>> A = np.array([[6, 3, 1, 5], [3, 0, 5, 1], [1, 5, 6, 2], [5, 1, 2, 2]]) + >>> w = eigvalsh(A) + >>> w + array([-3.74637491, -0.76263923, 6.08502336, 12.42399079]) + + """ + return eigh(a, b=b, lower=lower, eigvals_only=True, + overwrite_a=overwrite_a, overwrite_b=overwrite_b, + turbo=turbo, eigvals=eigvals, type=type, + check_finite=check_finite) + + +def eigvals_banded(a_band, lower=False, overwrite_a_band=False, + select='a', select_range=None, check_finite=True): + """ + Solve real symmetric or complex hermitian band matrix eigenvalue problem. + + Find eigenvalues w of a:: + + a v[:,i] = w[i] v[:,i] + v.H v = identity + + The matrix a is stored in a_band either in lower diagonal or upper + diagonal ordered form: + + a_band[u + i - j, j] == a[i,j] (if upper form; i <= j) + a_band[ i - j, j] == a[i,j] (if lower form; i >= j) + + where u is the number of bands above the diagonal. + + Example of a_band (shape of a is (6,6), u=2):: + + upper form: + * * a02 a13 a24 a35 + * a01 a12 a23 a34 a45 + a00 a11 a22 a33 a44 a55 + + lower form: + a00 a11 a22 a33 a44 a55 + a10 a21 a32 a43 a54 * + a20 a31 a42 a53 * * + + Cells marked with * are not used. + + Parameters + ---------- + a_band : (u+1, M) array_like + The bands of the M by M matrix a. + lower : bool, optional + Is the matrix in the lower form. (Default is upper form) + overwrite_a_band : bool, optional + Discard data in a_band (may enhance performance) + select : {'a', 'v', 'i'}, optional + Which eigenvalues to calculate + + ====== ======================================== + select calculated + ====== ======================================== + 'a' All eigenvalues + 'v' Eigenvalues in the interval (min, max] + 'i' Eigenvalues with indices min <= i <= max + ====== ======================================== + select_range : (min, max), optional + Range of selected eigenvalues + check_finite : bool, optional + Whether to check that the input matrix contains only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + w : (M,) ndarray + The eigenvalues, in ascending order, each repeated according to its + multiplicity. + + Raises + ------ + LinAlgError + If eigenvalue computation does not converge. + + See Also + -------- + eig_banded : eigenvalues and right eigenvectors for symmetric/Hermitian + band matrices + eigvalsh_tridiagonal : eigenvalues of symmetric/Hermitian tridiagonal + matrices + eigvals : eigenvalues of general arrays + eigh : eigenvalues and right eigenvectors for symmetric/Hermitian arrays + eig : eigenvalues and right eigenvectors for non-symmetric arrays + + Examples + -------- + >>> from scipy.linalg import eigvals_banded + >>> A = np.array([[1, 5, 2, 0], [5, 2, 5, 2], [2, 5, 3, 5], [0, 2, 5, 4]]) + >>> Ab = np.array([[1, 2, 3, 4], [5, 5, 5, 0], [2, 2, 0, 0]]) + >>> w = eigvals_banded(Ab, lower=True) + >>> w + array([-4.26200532, -2.22987175, 3.95222349, 12.53965359]) + """ + return eig_banded(a_band, lower=lower, eigvals_only=1, + overwrite_a_band=overwrite_a_band, select=select, + select_range=select_range, check_finite=check_finite) + + +def eigvalsh_tridiagonal(d, e, select='a', select_range=None, + check_finite=True, tol=0., lapack_driver='auto'): + """ + Solve eigenvalue problem for a real symmetric tridiagonal matrix. + + Find eigenvalues `w` of ``a``:: + + a v[:,i] = w[i] v[:,i] + v.H v = identity + + For a real symmetric matrix ``a`` with diagonal elements `d` and + off-diagonal elements `e`. + + Parameters + ---------- + d : ndarray, shape (ndim,) + The diagonal elements of the array. + e : ndarray, shape (ndim-1,) + The off-diagonal elements of the array. + select : {'a', 'v', 'i'}, optional + Which eigenvalues to calculate + + ====== ======================================== + select calculated + ====== ======================================== + 'a' All eigenvalues + 'v' Eigenvalues in the interval (min, max] + 'i' Eigenvalues with indices min <= i <= max + ====== ======================================== + select_range : (min, max), optional + Range of selected eigenvalues + check_finite : bool, optional + Whether to check that the input matrix contains only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + tol : float + The absolute tolerance to which each eigenvalue is required + (only used when ``lapack_driver='stebz'``). + An eigenvalue (or cluster) is considered to have converged if it + lies in an interval of this width. If <= 0. (default), + the value ``eps*|a|`` is used where eps is the machine precision, + and ``|a|`` is the 1-norm of the matrix ``a``. + lapack_driver : str + LAPACK function to use, can be 'auto', 'stemr', 'stebz', 'sterf', + or 'stev'. When 'auto' (default), it will use 'stemr' if ``select='a'`` + and 'stebz' otherwise. 'sterf' and 'stev' can only be used when + ``select='a'``. + + Returns + ------- + w : (M,) ndarray + The eigenvalues, in ascending order, each repeated according to its + multiplicity. + + Raises + ------ + LinAlgError + If eigenvalue computation does not converge. + + See Also + -------- + eigh_tridiagonal : eigenvalues and right eiegenvectors for + symmetric/Hermitian tridiagonal matrices + + Examples + -------- + >>> from scipy.linalg import eigvalsh_tridiagonal, eigvalsh + >>> d = 3*np.ones(4) + >>> e = -1*np.ones(3) + >>> w = eigvalsh_tridiagonal(d, e) + >>> A = np.diag(d) + np.diag(e, k=1) + np.diag(e, k=-1) + >>> w2 = eigvalsh(A) # Verify with other eigenvalue routines + >>> np.allclose(w - w2, np.zeros(4)) + True + """ + return eigh_tridiagonal( + d, e, eigvals_only=True, select=select, select_range=select_range, + check_finite=check_finite, tol=tol, lapack_driver=lapack_driver) + + +def eigh_tridiagonal(d, e, eigvals_only=False, select='a', select_range=None, + check_finite=True, tol=0., lapack_driver='auto'): + """ + Solve eigenvalue problem for a real symmetric tridiagonal matrix. + + Find eigenvalues `w` and optionally right eigenvectors `v` of ``a``:: + + a v[:,i] = w[i] v[:,i] + v.H v = identity + + For a real symmetric matrix ``a`` with diagonal elements `d` and + off-diagonal elements `e`. + + Parameters + ---------- + d : ndarray, shape (ndim,) + The diagonal elements of the array. + e : ndarray, shape (ndim-1,) + The off-diagonal elements of the array. + select : {'a', 'v', 'i'}, optional + Which eigenvalues to calculate + + ====== ======================================== + select calculated + ====== ======================================== + 'a' All eigenvalues + 'v' Eigenvalues in the interval (min, max] + 'i' Eigenvalues with indices min <= i <= max + ====== ======================================== + select_range : (min, max), optional + Range of selected eigenvalues + check_finite : bool, optional + Whether to check that the input matrix contains only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + tol : float + The absolute tolerance to which each eigenvalue is required + (only used when 'stebz' is the `lapack_driver`). + An eigenvalue (or cluster) is considered to have converged if it + lies in an interval of this width. If <= 0. (default), + the value ``eps*|a|`` is used where eps is the machine precision, + and ``|a|`` is the 1-norm of the matrix ``a``. + lapack_driver : str + LAPACK function to use, can be 'auto', 'stemr', 'stebz', 'sterf', + or 'stev'. When 'auto' (default), it will use 'stemr' if ``select='a'`` + and 'stebz' otherwise. When 'stebz' is used to find the eigenvalues and + ``eigvals_only=False``, then a second LAPACK call (to ``?STEIN``) is + used to find the corresponding eigenvectors. 'sterf' can only be + used when ``eigvals_only=True`` and ``select='a'``. 'stev' can only + be used when ``select='a'``. + + Returns + ------- + w : (M,) ndarray + The eigenvalues, in ascending order, each repeated according to its + multiplicity. + v : (M, M) ndarray + The normalized eigenvector corresponding to the eigenvalue ``w[i]`` is + the column ``v[:,i]``. + + Raises + ------ + LinAlgError + If eigenvalue computation does not converge. + + See Also + -------- + eigvalsh_tridiagonal : eigenvalues of symmetric/Hermitian tridiagonal + matrices + eig : eigenvalues and right eigenvectors for non-symmetric arrays + eigh : eigenvalues and right eigenvectors for symmetric/Hermitian arrays + eig_banded : eigenvalues and right eigenvectors for symmetric/Hermitian + band matrices + + Notes + ----- + This function makes use of LAPACK ``S/DSTEMR`` routines. + + Examples + -------- + >>> from scipy.linalg import eigh_tridiagonal + >>> d = 3*np.ones(4) + >>> e = -1*np.ones(3) + >>> w, v = eigh_tridiagonal(d, e) + >>> A = np.diag(d) + np.diag(e, k=1) + np.diag(e, k=-1) + >>> np.allclose(A @ v - v @ np.diag(w), np.zeros((4, 4))) + True + """ + d = _asarray_validated(d, check_finite=check_finite) + e = _asarray_validated(e, check_finite=check_finite) + for check in (d, e): + if check.ndim != 1: + raise ValueError('expected one-dimensional array') + if check.dtype.char in 'GFD': # complex + raise TypeError('Only real arrays currently supported') + if d.size != e.size + 1: + raise ValueError('d (%s) must have one more element than e (%s)' + % (d.size, e.size)) + select, vl, vu, il, iu, _ = _check_select( + select, select_range, 0, d.size) + if not isinstance(lapack_driver, string_types): + raise TypeError('lapack_driver must be str') + drivers = ('auto', 'stemr', 'sterf', 'stebz', 'stev') + if lapack_driver not in drivers: + raise ValueError('lapack_driver must be one of %s, got %s' + % (drivers, lapack_driver)) + if lapack_driver == 'auto': + lapack_driver = 'stemr' if select == 0 else 'stebz' + func, = get_lapack_funcs((lapack_driver,), (d, e)) + compute_v = not eigvals_only + if lapack_driver == 'sterf': + if select != 0: + raise ValueError('sterf can only be used when select == "a"') + if not eigvals_only: + raise ValueError('sterf can only be used when eigvals_only is ' + 'True') + w, info = func(d, e) + m = len(w) + elif lapack_driver == 'stev': + if select != 0: + raise ValueError('stev can only be used when select == "a"') + w, v, info = func(d, e, compute_v=compute_v) + m = len(w) + elif lapack_driver == 'stebz': + tol = float(tol) + internal_name = 'stebz' + stebz, = get_lapack_funcs((internal_name,), (d, e)) + # If getting eigenvectors, needs to be block-ordered (B) instead of + # matirx-ordered (E), and we will reorder later + order = 'E' if eigvals_only else 'B' + m, w, iblock, isplit, info = stebz(d, e, select, vl, vu, il, iu, tol, + order) + else: # 'stemr' + # ?STEMR annoyingly requires size N instead of N-1 + e_ = empty(e.size+1, e.dtype) + e_[:-1] = e + stemr_lwork, = get_lapack_funcs(('stemr_lwork',), (d, e)) + lwork, liwork, info = stemr_lwork(d, e_, select, vl, vu, il, iu, + compute_v=compute_v) + _check_info(info, 'stemr_lwork') + m, w, v, info = func(d, e_, select, vl, vu, il, iu, + compute_v=compute_v, lwork=lwork, liwork=liwork) + _check_info(info, lapack_driver + ' (eigh_tridiagonal)') + w = w[:m] + if eigvals_only: + return w + else: + # Do we still need to compute the eigenvalues? + if lapack_driver == 'stebz': + func, = get_lapack_funcs(('stein',), (d, e)) + v, info = func(d, e, w, iblock, isplit) + _check_info(info, 'stein (eigh_tridiagonal)', + positive='%d eigenvectors failed to converge') + # Convert block-order to matrix-order + order = argsort(w) + w, v = w[order], v[:, order] + else: + v = v[:, :m] + return w, v + + +def _check_info(info, driver, positive='did not converge (LAPACK info=%d)'): + """Check info return value.""" + if info < 0: + raise ValueError('illegal value in argument %d of internal %s' + % (-info, driver)) + if info > 0 and positive: + raise LinAlgError(("%s " + positive) % (driver, info,)) + + +def hessenberg(a, calc_q=False, overwrite_a=False, check_finite=True): + """ + Compute Hessenberg form of a matrix. + + The Hessenberg decomposition is:: + + A = Q H Q^H + + where `Q` is unitary/orthogonal and `H` has only zero elements below + the first sub-diagonal. + + Parameters + ---------- + a : (M, M) array_like + Matrix to bring into Hessenberg form. + calc_q : bool, optional + Whether to compute the transformation matrix. Default is False. + overwrite_a : bool, optional + Whether to overwrite `a`; may improve performance. + Default is False. + check_finite : bool, optional + Whether to check that the input matrix contains only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + H : (M, M) ndarray + Hessenberg form of `a`. + Q : (M, M) ndarray + Unitary/orthogonal similarity transformation matrix ``A = Q H Q^H``. + Only returned if ``calc_q=True``. + + Examples + -------- + >>> from scipy.linalg import hessenberg + >>> A = np.array([[2, 5, 8, 7], [5, 2, 2, 8], [7, 5, 6, 6], [5, 4, 4, 8]]) + >>> H, Q = hessenberg(A, calc_q=True) + >>> H + array([[ 2. , -11.65843866, 1.42005301, 0.25349066], + [ -9.94987437, 14.53535354, -5.31022304, 2.43081618], + [ 0. , -1.83299243, 0.38969961, -0.51527034], + [ 0. , 0. , -3.83189513, 1.07494686]]) + >>> np.allclose(Q @ H @ Q.conj().T - A, np.zeros((4, 4))) + True + """ + a1 = _asarray_validated(a, check_finite=check_finite) + if len(a1.shape) != 2 or (a1.shape[0] != a1.shape[1]): + raise ValueError('expected square matrix') + overwrite_a = overwrite_a or (_datacopied(a1, a)) + + # if 2x2 or smaller: already in Hessenberg + if a1.shape[0] <= 2: + if calc_q: + return a1, numpy.eye(a1.shape[0]) + return a1 + + gehrd, gebal, gehrd_lwork = get_lapack_funcs(('gehrd', 'gebal', + 'gehrd_lwork'), (a1,)) + ba, lo, hi, pivscale, info = gebal(a1, permute=0, overwrite_a=overwrite_a) + _check_info(info, 'gebal (hessenberg)', positive=False) + n = len(a1) + + lwork = _compute_lwork(gehrd_lwork, ba.shape[0], lo=lo, hi=hi) + + hq, tau, info = gehrd(ba, lo=lo, hi=hi, lwork=lwork, overwrite_a=1) + _check_info(info, 'gehrd (hessenberg)', positive=False) + h = numpy.triu(hq, -1) + if not calc_q: + return h + + # use orghr/unghr to compute q + orghr, orghr_lwork = get_lapack_funcs(('orghr', 'orghr_lwork'), (a1,)) + lwork = _compute_lwork(orghr_lwork, n, lo=lo, hi=hi) + + q, info = orghr(a=hq, tau=tau, lo=lo, hi=hi, lwork=lwork, overwrite_a=1) + _check_info(info, 'orghr (hessenberg)', positive=False) + return h, q + + +def cdf2rdf(w, v): + """ + Converts complex eigenvalues ``w`` and eigenvectors ``v`` to real + eigenvalues in a block diagonal form ``wr`` and the associated real + eigenvectors ``vr``, such that:: + + vr @ wr = X @ vr + + continues to hold, where ``X`` is the original array for which ``w`` and + ``v`` are the eigenvalues and eigenvectors. + + .. versionadded:: 1.1.0 + + Parameters + ---------- + w : (..., M) array_like + Complex or real eigenvalues, an array or stack of arrays + + Conjugate pairs must not be interleaved, else the wrong result + will be produced. So ``[1+1j, 1, 1-1j]`` will give a correct result, but + ``[1+1j, 2+1j, 1-1j, 2-1j]`` will not. + + v : (..., M, M) array_like + Complex or real eigenvectors, a square array or stack of square arrays. + + Returns + ------- + wr : (..., M, M) ndarray + Real diagonal block form of eigenvalues + vr : (..., M, M) ndarray + Real eigenvectors associated with ``wr`` + + See Also + -------- + eig : Eigenvalues and right eigenvectors for non-symmetric arrays + rsf2csf : Convert real Schur form to complex Schur form + + Notes + ----- + ``w``, ``v`` must be the eigenstructure for some *real* matrix ``X``. + For example, obtained by ``w, v = scipy.linalg.eig(X)`` or + ``w, v = numpy.linalg.eig(X)`` in which case ``X`` can also represent + stacked arrays. + + .. versionadded:: 1.1.0 + + Examples + -------- + >>> X = np.array([[1, 2, 3], [0, 4, 5], [0, -5, 4]]) + >>> X + array([[ 1, 2, 3], + [ 0, 4, 5], + [ 0, -5, 4]]) + + >>> from scipy import linalg + >>> w, v = linalg.eig(X) + >>> w + array([ 1.+0.j, 4.+5.j, 4.-5.j]) + >>> v + array([[ 1.00000+0.j , -0.01906-0.40016j, -0.01906+0.40016j], + [ 0.00000+0.j , 0.00000-0.64788j, 0.00000+0.64788j], + [ 0.00000+0.j , 0.64788+0.j , 0.64788-0.j ]]) + + >>> wr, vr = linalg.cdf2rdf(w, v) + >>> wr + array([[ 1., 0., 0.], + [ 0., 4., 5.], + [ 0., -5., 4.]]) + >>> vr + array([[ 1. , 0.40016, -0.01906], + [ 0. , 0.64788, 0. ], + [ 0. , 0. , 0.64788]]) + + >>> vr @ wr + array([[ 1. , 1.69593, 1.9246 ], + [ 0. , 2.59153, 3.23942], + [ 0. , -3.23942, 2.59153]]) + >>> X @ vr + array([[ 1. , 1.69593, 1.9246 ], + [ 0. , 2.59153, 3.23942], + [ 0. , -3.23942, 2.59153]]) + """ + w, v = _asarray_validated(w), _asarray_validated(v) + + # check dimensions + if w.ndim < 1: + raise ValueError('expected w to be at least one-dimensional') + if v.ndim < 2: + raise ValueError('expected v to be at least two-dimensional') + if v.ndim != w.ndim + 1: + raise ValueError('expected eigenvectors array to have exactly one ' + 'dimension more than eigenvalues array') + + # check shapes + n = w.shape[-1] + M = w.shape[:-1] + if v.shape[-2] != v.shape[-1]: + raise ValueError('expected v to be a square matrix or stacked square ' + 'matrices: v.shape[-2] = v.shape[-1]') + if v.shape[-1] != n: + raise ValueError('expected the same number of eigenvalues as ' + 'eigenvectors') + + # get indices for each first pair of complex eigenvalues + complex_mask = iscomplex(w) + n_complex = complex_mask.sum(axis=-1) + + # check if all complex eigenvalues have conjugate pairs + if not (n_complex % 2 == 0).all(): + raise ValueError('expected complex-conjugate pairs of eigenvalues') + + # find complex indices + idx = nonzero(complex_mask) + idx_stack = idx[:-1] + idx_elem = idx[-1] + + # filter them to conjugate indices, assuming pairs are not interleaved + j = idx_elem[0::2] + k = idx_elem[1::2] + stack_ind = () + for i in idx_stack: + # should never happen, assuming nonzero orders by the last axis + assert (i[0::2] == i[1::2]).all(), "Conjugate pair spanned different arrays!" + stack_ind += (i[0::2],) + + # all eigenvalues to diagonal form + wr = zeros(M + (n, n), dtype=w.real.dtype) + di = range(n) + wr[..., di, di] = w.real + + # complex eigenvalues to real block diagonal form + wr[stack_ind + (j, k)] = w[stack_ind + (j,)].imag + wr[stack_ind + (k, j)] = w[stack_ind + (k,)].imag + + # compute real eigenvectors associated with real block diagonal eigenvalues + u = zeros(M + (n, n), dtype=numpy.cdouble) + u[..., di, di] = 1.0 + u[stack_ind + (j, j)] = 0.5j + u[stack_ind + (j, k)] = 0.5 + u[stack_ind + (k, j)] = -0.5j + u[stack_ind + (k, k)] = 0.5 + + # multipy matrices v and u (equivalent to v @ u) + vr = einsum('...ij,...jk->...ik', v, u).real + + return wr, vr diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/decomp.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/decomp.pyc new file mode 100644 index 0000000..cc2b54c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/decomp.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/decomp_cholesky.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/decomp_cholesky.py new file mode 100644 index 0000000..2646d54 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/decomp_cholesky.py @@ -0,0 +1,353 @@ +"""Cholesky decomposition functions.""" + +from __future__ import division, print_function, absolute_import + +from numpy import asarray_chkfinite, asarray, atleast_2d + +# Local imports +from .misc import LinAlgError, _datacopied +from .lapack import get_lapack_funcs + +__all__ = ['cholesky', 'cho_factor', 'cho_solve', 'cholesky_banded', + 'cho_solve_banded'] + + +def _cholesky(a, lower=False, overwrite_a=False, clean=True, + check_finite=True): + """Common code for cholesky() and cho_factor().""" + + a1 = asarray_chkfinite(a) if check_finite else asarray(a) + a1 = atleast_2d(a1) + + # Dimension check + if a1.ndim != 2: + raise ValueError('Input array needs to be 2 dimensional but received ' + 'a {}d-array.'.format(a1.ndim)) + # Squareness check + if a1.shape[0] != a1.shape[1]: + raise ValueError('Input array is expected to be square but has ' + 'the shape: {}.'.format(a1.shape)) + + # Quick return for square empty array + if a1.size == 0: + return a1.copy(), lower + + overwrite_a = overwrite_a or _datacopied(a1, a) + potrf, = get_lapack_funcs(('potrf',), (a1,)) + c, info = potrf(a1, lower=lower, overwrite_a=overwrite_a, clean=clean) + if info > 0: + raise LinAlgError("%d-th leading minor of the array is not positive " + "definite" % info) + if info < 0: + raise ValueError('LAPACK reported an illegal value in {}-th argument' + 'on entry to "POTRF".'.format(-info)) + return c, lower + + +def cholesky(a, lower=False, overwrite_a=False, check_finite=True): + """ + Compute the Cholesky decomposition of a matrix. + + Returns the Cholesky decomposition, :math:`A = L L^*` or + :math:`A = U^* U` of a Hermitian positive-definite matrix A. + + Parameters + ---------- + a : (M, M) array_like + Matrix to be decomposed + lower : bool, optional + Whether to compute the upper or lower triangular Cholesky + factorization. Default is upper-triangular. + overwrite_a : bool, optional + Whether to overwrite data in `a` (may improve performance). + check_finite : bool, optional + Whether to check that the input matrix contains only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + c : (M, M) ndarray + Upper- or lower-triangular Cholesky factor of `a`. + + Raises + ------ + LinAlgError : if decomposition fails. + + Examples + -------- + >>> from scipy.linalg import cholesky + >>> a = np.array([[1,-2j],[2j,5]]) + >>> L = cholesky(a, lower=True) + >>> L + array([[ 1.+0.j, 0.+0.j], + [ 0.+2.j, 1.+0.j]]) + >>> L @ L.T.conj() + array([[ 1.+0.j, 0.-2.j], + [ 0.+2.j, 5.+0.j]]) + + """ + c, lower = _cholesky(a, lower=lower, overwrite_a=overwrite_a, clean=True, + check_finite=check_finite) + return c + + +def cho_factor(a, lower=False, overwrite_a=False, check_finite=True): + """ + Compute the Cholesky decomposition of a matrix, to use in cho_solve + + Returns a matrix containing the Cholesky decomposition, + ``A = L L*`` or ``A = U* U`` of a Hermitian positive-definite matrix `a`. + The return value can be directly used as the first parameter to cho_solve. + + .. warning:: + The returned matrix also contains random data in the entries not + used by the Cholesky decomposition. If you need to zero these + entries, use the function `cholesky` instead. + + Parameters + ---------- + a : (M, M) array_like + Matrix to be decomposed + lower : bool, optional + Whether to compute the upper or lower triangular Cholesky factorization + (Default: upper-triangular) + overwrite_a : bool, optional + Whether to overwrite data in a (may improve performance) + check_finite : bool, optional + Whether to check that the input matrix contains only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + c : (M, M) ndarray + Matrix whose upper or lower triangle contains the Cholesky factor + of `a`. Other parts of the matrix contain random data. + lower : bool + Flag indicating whether the factor is in the lower or upper triangle + + Raises + ------ + LinAlgError + Raised if decomposition fails. + + See also + -------- + cho_solve : Solve a linear set equations using the Cholesky factorization + of a matrix. + + Examples + -------- + >>> from scipy.linalg import cho_factor + >>> A = np.array([[9, 3, 1, 5], [3, 7, 5, 1], [1, 5, 9, 2], [5, 1, 2, 6]]) + >>> c, low = cho_factor(A) + >>> c + array([[3. , 1. , 0.33333333, 1.66666667], + [3. , 2.44948974, 1.90515869, -0.27216553], + [1. , 5. , 2.29330749, 0.8559528 ], + [5. , 1. , 2. , 1.55418563]]) + >>> np.allclose(np.triu(c).T @ np. triu(c) - A, np.zeros((4, 4))) + True + + """ + c, lower = _cholesky(a, lower=lower, overwrite_a=overwrite_a, clean=False, + check_finite=check_finite) + return c, lower + + +def cho_solve(c_and_lower, b, overwrite_b=False, check_finite=True): + """Solve the linear equations A x = b, given the Cholesky factorization of A. + + Parameters + ---------- + (c, lower) : tuple, (array, bool) + Cholesky factorization of a, as given by cho_factor + b : array + Right-hand side + overwrite_b : bool, optional + Whether to overwrite data in b (may improve performance) + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + x : array + The solution to the system A x = b + + See also + -------- + cho_factor : Cholesky factorization of a matrix + + Examples + -------- + >>> from scipy.linalg import cho_factor, cho_solve + >>> A = np.array([[9, 3, 1, 5], [3, 7, 5, 1], [1, 5, 9, 2], [5, 1, 2, 6]]) + >>> c, low = cho_factor(A) + >>> x = cho_solve((c, low), [1, 1, 1, 1]) + >>> np.allclose(A @ x - [1, 1, 1, 1], np.zeros(4)) + True + + """ + (c, lower) = c_and_lower + if check_finite: + b1 = asarray_chkfinite(b) + c = asarray_chkfinite(c) + else: + b1 = asarray(b) + c = asarray(c) + if c.ndim != 2 or c.shape[0] != c.shape[1]: + raise ValueError("The factored matrix c is not square.") + if c.shape[1] != b1.shape[0]: + raise ValueError("incompatible dimensions.") + + overwrite_b = overwrite_b or _datacopied(b1, b) + + potrs, = get_lapack_funcs(('potrs',), (c, b1)) + x, info = potrs(c, b1, lower=lower, overwrite_b=overwrite_b) + if info != 0: + raise ValueError('illegal value in %d-th argument of internal potrs' + % -info) + return x + + +def cholesky_banded(ab, overwrite_ab=False, lower=False, check_finite=True): + """ + Cholesky decompose a banded Hermitian positive-definite matrix + + The matrix a is stored in ab either in lower diagonal or upper + diagonal ordered form:: + + ab[u + i - j, j] == a[i,j] (if upper form; i <= j) + ab[ i - j, j] == a[i,j] (if lower form; i >= j) + + Example of ab (shape of a is (6,6), u=2):: + + upper form: + * * a02 a13 a24 a35 + * a01 a12 a23 a34 a45 + a00 a11 a22 a33 a44 a55 + + lower form: + a00 a11 a22 a33 a44 a55 + a10 a21 a32 a43 a54 * + a20 a31 a42 a53 * * + + Parameters + ---------- + ab : (u + 1, M) array_like + Banded matrix + overwrite_ab : bool, optional + Discard data in ab (may enhance performance) + lower : bool, optional + Is the matrix in the lower form. (Default is upper form) + check_finite : bool, optional + Whether to check that the input matrix contains only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + c : (u + 1, M) ndarray + Cholesky factorization of a, in the same banded format as ab + + See also + -------- + cho_solve_banded : Solve a linear set equations, given the Cholesky factorization + of a banded hermitian. + + Examples + -------- + >>> from scipy.linalg import cholesky_banded + >>> from numpy import allclose, zeros, diag + >>> Ab = np.array([[0, 0, 1j, 2, 3j], [0, -1, -2, 3, 4], [9, 8, 7, 6, 9]]) + >>> A = np.diag(Ab[0,2:], k=2) + np.diag(Ab[1,1:], k=1) + >>> A = A + A.conj().T + np.diag(Ab[2, :]) + >>> c = cholesky_banded(Ab) + >>> C = np.diag(c[0, 2:], k=2) + np.diag(c[1, 1:], k=1) + np.diag(c[2, :]) + >>> np.allclose(C.conj().T @ C - A, np.zeros((5, 5))) + True + + """ + if check_finite: + ab = asarray_chkfinite(ab) + else: + ab = asarray(ab) + + pbtrf, = get_lapack_funcs(('pbtrf',), (ab,)) + c, info = pbtrf(ab, lower=lower, overwrite_ab=overwrite_ab) + if info > 0: + raise LinAlgError("%d-th leading minor not positive definite" % info) + if info < 0: + raise ValueError('illegal value in %d-th argument of internal pbtrf' + % -info) + return c + + +def cho_solve_banded(cb_and_lower, b, overwrite_b=False, check_finite=True): + """ + Solve the linear equations ``A x = b``, given the Cholesky factorization of + the banded hermitian ``A``. + + Parameters + ---------- + (cb, lower) : tuple, (ndarray, bool) + `cb` is the Cholesky factorization of A, as given by cholesky_banded. + `lower` must be the same value that was given to cholesky_banded. + b : array_like + Right-hand side + overwrite_b : bool, optional + If True, the function will overwrite the values in `b`. + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + x : array + The solution to the system A x = b + + See also + -------- + cholesky_banded : Cholesky factorization of a banded matrix + + Notes + ----- + + .. versionadded:: 0.8.0 + + Examples + -------- + >>> from scipy.linalg import cholesky_banded, cho_solve_banded + >>> Ab = np.array([[0, 0, 1j, 2, 3j], [0, -1, -2, 3, 4], [9, 8, 7, 6, 9]]) + >>> A = np.diag(Ab[0,2:], k=2) + np.diag(Ab[1,1:], k=1) + >>> A = A + A.conj().T + np.diag(Ab[2, :]) + >>> c = cholesky_banded(Ab) + >>> x = cho_solve_banded((c, False), np.ones(5)) + >>> np.allclose(A @ x - np.ones(5), np.zeros(5)) + True + + """ + (cb, lower) = cb_and_lower + if check_finite: + cb = asarray_chkfinite(cb) + b = asarray_chkfinite(b) + else: + cb = asarray(cb) + b = asarray(b) + + # Validate shapes. + if cb.shape[-1] != b.shape[0]: + raise ValueError("shapes of cb and b are not compatible.") + + pbtrs, = get_lapack_funcs(('pbtrs',), (cb, b)) + x, info = pbtrs(cb, b, lower=lower, overwrite_b=overwrite_b) + if info > 0: + raise LinAlgError("%d-th leading minor not positive definite" % info) + if info < 0: + raise ValueError('illegal value in %d-th argument of internal pbtrs' + % -info) + return x diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/decomp_cholesky.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/decomp_cholesky.pyc new file mode 100644 index 0000000..4974126 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/decomp_cholesky.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/decomp_lu.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/decomp_lu.py new file mode 100644 index 0000000..faf58c8 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/decomp_lu.py @@ -0,0 +1,224 @@ +"""LU decomposition functions.""" + +from __future__ import division, print_function, absolute_import + +from warnings import warn + +from numpy import asarray, asarray_chkfinite + +# Local imports +from .misc import _datacopied, LinAlgWarning +from .lapack import get_lapack_funcs +from .flinalg import get_flinalg_funcs + +__all__ = ['lu', 'lu_solve', 'lu_factor'] + + +def lu_factor(a, overwrite_a=False, check_finite=True): + """ + Compute pivoted LU decomposition of a matrix. + + The decomposition is:: + + A = P L U + + where P is a permutation matrix, L lower triangular with unit + diagonal elements, and U upper triangular. + + Parameters + ---------- + a : (M, M) array_like + Matrix to decompose + overwrite_a : bool, optional + Whether to overwrite data in A (may increase performance) + check_finite : bool, optional + Whether to check that the input matrix contains only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + lu : (N, N) ndarray + Matrix containing U in its upper triangle, and L in its lower triangle. + The unit diagonal elements of L are not stored. + piv : (N,) ndarray + Pivot indices representing the permutation matrix P: + row i of matrix was interchanged with row piv[i]. + + See also + -------- + lu_solve : solve an equation system using the LU factorization of a matrix + + Notes + ----- + This is a wrapper to the ``*GETRF`` routines from LAPACK. + + Examples + -------- + >>> from scipy.linalg import lu_factor + >>> from numpy import tril, triu, allclose, zeros, eye + >>> A = np.array([[2, 5, 8, 7], [5, 2, 2, 8], [7, 5, 6, 6], [5, 4, 4, 8]]) + >>> lu, piv = lu_factor(A) + >>> piv + array([2, 2, 3, 3], dtype=int32) + + Convert LAPACK's ``piv`` array to NumPy index and test the permutation + + >>> piv_py = [2, 0, 3, 1] + >>> L, U = np.tril(lu, k=-1) + np.eye(4), np.triu(lu) + >>> np.allclose(A[piv_py] - L @ U, np.zeros((4, 4))) + True + """ + if check_finite: + a1 = asarray_chkfinite(a) + else: + a1 = asarray(a) + if len(a1.shape) != 2 or (a1.shape[0] != a1.shape[1]): + raise ValueError('expected square matrix') + overwrite_a = overwrite_a or (_datacopied(a1, a)) + getrf, = get_lapack_funcs(('getrf',), (a1,)) + lu, piv, info = getrf(a1, overwrite_a=overwrite_a) + if info < 0: + raise ValueError('illegal value in %d-th argument of ' + 'internal getrf (lu_factor)' % -info) + if info > 0: + warn("Diagonal number %d is exactly zero. Singular matrix." % info, + LinAlgWarning, stacklevel=2) + return lu, piv + + +def lu_solve(lu_and_piv, b, trans=0, overwrite_b=False, check_finite=True): + """Solve an equation system, a x = b, given the LU factorization of a + + Parameters + ---------- + (lu, piv) + Factorization of the coefficient matrix a, as given by lu_factor + b : array + Right-hand side + trans : {0, 1, 2}, optional + Type of system to solve: + + ===== ========= + trans system + ===== ========= + 0 a x = b + 1 a^T x = b + 2 a^H x = b + ===== ========= + overwrite_b : bool, optional + Whether to overwrite data in b (may increase performance) + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + x : array + Solution to the system + + See also + -------- + lu_factor : LU factorize a matrix + + Examples + -------- + >>> from scipy.linalg import lu_factor, lu_solve + >>> A = np.array([[2, 5, 8, 7], [5, 2, 2, 8], [7, 5, 6, 6], [5, 4, 4, 8]]) + >>> b = np.array([1, 1, 1, 1]) + >>> lu, piv = lu_factor(A) + >>> x = lu_solve((lu, piv), b) + >>> np.allclose(A @ x - b, np.zeros((4,))) + True + + """ + (lu, piv) = lu_and_piv + if check_finite: + b1 = asarray_chkfinite(b) + else: + b1 = asarray(b) + overwrite_b = overwrite_b or _datacopied(b1, b) + if lu.shape[0] != b1.shape[0]: + raise ValueError("incompatible dimensions.") + + getrs, = get_lapack_funcs(('getrs',), (lu, b1)) + x, info = getrs(lu, piv, b1, trans=trans, overwrite_b=overwrite_b) + if info == 0: + return x + raise ValueError('illegal value in %d-th argument of internal gesv|posv' + % -info) + + +def lu(a, permute_l=False, overwrite_a=False, check_finite=True): + """ + Compute pivoted LU decomposition of a matrix. + + The decomposition is:: + + A = P L U + + where P is a permutation matrix, L lower triangular with unit + diagonal elements, and U upper triangular. + + Parameters + ---------- + a : (M, N) array_like + Array to decompose + permute_l : bool, optional + Perform the multiplication P*L (Default: do not permute) + overwrite_a : bool, optional + Whether to overwrite data in a (may improve performance) + check_finite : bool, optional + Whether to check that the input matrix contains only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + **(If permute_l == False)** + + p : (M, M) ndarray + Permutation matrix + l : (M, K) ndarray + Lower triangular or trapezoidal matrix with unit diagonal. + K = min(M, N) + u : (K, N) ndarray + Upper triangular or trapezoidal matrix + + **(If permute_l == True)** + + pl : (M, K) ndarray + Permuted L matrix. + K = min(M, N) + u : (K, N) ndarray + Upper triangular or trapezoidal matrix + + Notes + ----- + This is a LU factorization routine written for Scipy. + + Examples + -------- + >>> from scipy.linalg import lu + >>> A = np.array([[2, 5, 8, 7], [5, 2, 2, 8], [7, 5, 6, 6], [5, 4, 4, 8]]) + >>> p, l, u = lu(A) + >>> np.allclose(A - p @ l @ u, np.zeros((4, 4))) + True + + """ + if check_finite: + a1 = asarray_chkfinite(a) + else: + a1 = asarray(a) + if len(a1.shape) != 2: + raise ValueError('expected matrix') + overwrite_a = overwrite_a or (_datacopied(a1, a)) + flu, = get_flinalg_funcs(('lu',), (a1,)) + p, l, u, info = flu(a1, permute_l=permute_l, overwrite_a=overwrite_a) + if info < 0: + raise ValueError('illegal value in %d-th argument of ' + 'internal lu.getrf' % -info) + if permute_l: + return l, u + return p, l, u diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/decomp_lu.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/decomp_lu.pyc new file mode 100644 index 0000000..47ef1ef Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/decomp_lu.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/decomp_qr.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/decomp_qr.py new file mode 100644 index 0000000..acd2ca3 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/decomp_qr.py @@ -0,0 +1,424 @@ +"""QR decomposition functions.""" +from __future__ import division, print_function, absolute_import + +import numpy + +# Local imports +from .lapack import get_lapack_funcs +from .misc import _datacopied + +__all__ = ['qr', 'qr_multiply', 'rq'] + + +def safecall(f, name, *args, **kwargs): + """Call a LAPACK routine, determining lwork automatically and handling + error return values""" + lwork = kwargs.get("lwork", None) + if lwork in (None, -1): + kwargs['lwork'] = -1 + ret = f(*args, **kwargs) + kwargs['lwork'] = ret[-2][0].real.astype(numpy.int) + ret = f(*args, **kwargs) + if ret[-1] < 0: + raise ValueError("illegal value in %d-th argument of internal %s" + % (-ret[-1], name)) + return ret[:-2] + + +def qr(a, overwrite_a=False, lwork=None, mode='full', pivoting=False, + check_finite=True): + """ + Compute QR decomposition of a matrix. + + Calculate the decomposition ``A = Q R`` where Q is unitary/orthogonal + and R upper triangular. + + Parameters + ---------- + a : (M, N) array_like + Matrix to be decomposed + overwrite_a : bool, optional + Whether data in a is overwritten (may improve performance) + lwork : int, optional + Work array size, lwork >= a.shape[1]. If None or -1, an optimal size + is computed. + mode : {'full', 'r', 'economic', 'raw'}, optional + Determines what information is to be returned: either both Q and R + ('full', default), only R ('r') or both Q and R but computed in + economy-size ('economic', see Notes). The final option 'raw' + (added in Scipy 0.11) makes the function return two matrices + (Q, TAU) in the internal format used by LAPACK. + pivoting : bool, optional + Whether or not factorization should include pivoting for rank-revealing + qr decomposition. If pivoting, compute the decomposition + ``A P = Q R`` as above, but where P is chosen such that the diagonal + of R is non-increasing. + check_finite : bool, optional + Whether to check that the input matrix contains only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + Q : float or complex ndarray + Of shape (M, M), or (M, K) for ``mode='economic'``. Not returned + if ``mode='r'``. + R : float or complex ndarray + Of shape (M, N), or (K, N) for ``mode='economic'``. ``K = min(M, N)``. + P : int ndarray + Of shape (N,) for ``pivoting=True``. Not returned if + ``pivoting=False``. + + Raises + ------ + LinAlgError + Raised if decomposition fails + + Notes + ----- + This is an interface to the LAPACK routines dgeqrf, zgeqrf, + dorgqr, zungqr, dgeqp3, and zgeqp3. + + If ``mode=economic``, the shapes of Q and R are (M, K) and (K, N) instead + of (M,M) and (M,N), with ``K=min(M,N)``. + + Examples + -------- + >>> from scipy import random, linalg, dot, diag, all, allclose + >>> a = random.randn(9, 6) + + >>> q, r = linalg.qr(a) + >>> allclose(a, np.dot(q, r)) + True + >>> q.shape, r.shape + ((9, 9), (9, 6)) + + >>> r2 = linalg.qr(a, mode='r') + >>> allclose(r, r2) + True + + >>> q3, r3 = linalg.qr(a, mode='economic') + >>> q3.shape, r3.shape + ((9, 6), (6, 6)) + + >>> q4, r4, p4 = linalg.qr(a, pivoting=True) + >>> d = abs(diag(r4)) + >>> all(d[1:] <= d[:-1]) + True + >>> allclose(a[:, p4], dot(q4, r4)) + True + >>> q4.shape, r4.shape, p4.shape + ((9, 9), (9, 6), (6,)) + + >>> q5, r5, p5 = linalg.qr(a, mode='economic', pivoting=True) + >>> q5.shape, r5.shape, p5.shape + ((9, 6), (6, 6), (6,)) + + """ + # 'qr' was the old default, equivalent to 'full'. Neither 'full' nor + # 'qr' are used below. + # 'raw' is used internally by qr_multiply + if mode not in ['full', 'qr', 'r', 'economic', 'raw']: + raise ValueError("Mode argument should be one of ['full', 'r'," + "'economic', 'raw']") + + if check_finite: + a1 = numpy.asarray_chkfinite(a) + else: + a1 = numpy.asarray(a) + if len(a1.shape) != 2: + raise ValueError("expected 2D array") + M, N = a1.shape + overwrite_a = overwrite_a or (_datacopied(a1, a)) + + if pivoting: + geqp3, = get_lapack_funcs(('geqp3',), (a1,)) + qr, jpvt, tau = safecall(geqp3, "geqp3", a1, overwrite_a=overwrite_a) + jpvt -= 1 # geqp3 returns a 1-based index array, so subtract 1 + else: + geqrf, = get_lapack_funcs(('geqrf',), (a1,)) + qr, tau = safecall(geqrf, "geqrf", a1, lwork=lwork, + overwrite_a=overwrite_a) + + if mode not in ['economic', 'raw'] or M < N: + R = numpy.triu(qr) + else: + R = numpy.triu(qr[:N, :]) + + if pivoting: + Rj = R, jpvt + else: + Rj = R, + + if mode == 'r': + return Rj + elif mode == 'raw': + return ((qr, tau),) + Rj + + gor_un_gqr, = get_lapack_funcs(('orgqr',), (qr,)) + + if M < N: + Q, = safecall(gor_un_gqr, "gorgqr/gungqr", qr[:, :M], tau, + lwork=lwork, overwrite_a=1) + elif mode == 'economic': + Q, = safecall(gor_un_gqr, "gorgqr/gungqr", qr, tau, lwork=lwork, + overwrite_a=1) + else: + t = qr.dtype.char + qqr = numpy.empty((M, M), dtype=t) + qqr[:, :N] = qr + Q, = safecall(gor_un_gqr, "gorgqr/gungqr", qqr, tau, lwork=lwork, + overwrite_a=1) + + return (Q,) + Rj + + +def qr_multiply(a, c, mode='right', pivoting=False, conjugate=False, + overwrite_a=False, overwrite_c=False): + """ + Calculate the QR decomposition and multiply Q with a matrix. + + Calculate the decomposition ``A = Q R`` where Q is unitary/orthogonal + and R upper triangular. Multiply Q with a vector or a matrix c. + + Parameters + ---------- + a : (M, N), array_like + Input array + c : array_like + Input array to be multiplied by ``q``. + mode : {'left', 'right'}, optional + ``Q @ c`` is returned if mode is 'left', ``c @ Q`` is returned if + mode is 'right'. + The shape of c must be appropriate for the matrix multiplications, + if mode is 'left', ``min(a.shape) == c.shape[0]``, + if mode is 'right', ``a.shape[0] == c.shape[1]``. + pivoting : bool, optional + Whether or not factorization should include pivoting for rank-revealing + qr decomposition, see the documentation of qr. + conjugate : bool, optional + Whether Q should be complex-conjugated. This might be faster + than explicit conjugation. + overwrite_a : bool, optional + Whether data in a is overwritten (may improve performance) + overwrite_c : bool, optional + Whether data in c is overwritten (may improve performance). + If this is used, c must be big enough to keep the result, + i.e. ``c.shape[0]`` = ``a.shape[0]`` if mode is 'left'. + + Returns + ------- + CQ : ndarray + The product of ``Q`` and ``c``. + R : (K, N), ndarray + R array of the resulting QR factorization where ``K = min(M, N)``. + P : (N,) ndarray + Integer pivot array. Only returned when ``pivoting=True``. + + Raises + ------ + LinAlgError + Raised if QR decomposition fails. + + Notes + ----- + This is an interface to the LAPACK routines ``?GEQRF``, ``?ORMQR``, + ``?UNMQR``, and ``?GEQP3``. + + .. versionadded:: 0.11.0 + + Examples + -------- + >>> from scipy.linalg import qr_multiply, qr + >>> A = np.array([[1, 3, 3], [2, 3, 2], [2, 3, 3], [1, 3, 2]]) + >>> qc, r1, piv1 = qr_multiply(A, 2*np.eye(4), pivoting=1) + >>> qc + array([[-1., 1., -1.], + [-1., -1., 1.], + [-1., -1., -1.], + [-1., 1., 1.]]) + >>> r1 + array([[-6., -3., -5. ], + [ 0., -1., -1.11022302e-16], + [ 0., 0., -1. ]]) + >>> piv1 + array([1, 0, 2], dtype=int32) + >>> q2, r2, piv2 = qr(A, mode='economic', pivoting=1) + >>> np.allclose(2*q2 - qc, np.zeros((4, 3))) + True + + """ + if mode not in ['left', 'right']: + raise ValueError("Mode argument can only be 'left' or 'right' but " + "not '{}'".format(mode)) + c = numpy.asarray_chkfinite(c) + if c.ndim < 2: + onedim = True + c = numpy.atleast_2d(c) + if mode == "left": + c = c.T + else: + onedim = False + + a = numpy.atleast_2d(numpy.asarray(a)) # chkfinite done in qr + M, N = a.shape + + if mode == 'left': + if c.shape[0] != min(M, N + overwrite_c*(M-N)): + raise ValueError('Array shapes are not compatible for Q @ c' + ' operation: {} vs {}'.format(a.shape, c.shape)) + else: + if M != c.shape[1]: + raise ValueError('Array shapes are not compatible for c @ Q' + ' operation: {} vs {}'.format(c.shape, a.shape)) + + raw = qr(a, overwrite_a, None, "raw", pivoting) + Q, tau = raw[0] + + gor_un_mqr, = get_lapack_funcs(('ormqr',), (Q,)) + if gor_un_mqr.typecode in ('s', 'd'): + trans = "T" + else: + trans = "C" + + Q = Q[:, :min(M, N)] + if M > N and mode == "left" and not overwrite_c: + if conjugate: + cc = numpy.zeros((c.shape[1], M), dtype=c.dtype, order="F") + cc[:, :N] = c.T + else: + cc = numpy.zeros((M, c.shape[1]), dtype=c.dtype, order="F") + cc[:N, :] = c + trans = "N" + if conjugate: + lr = "R" + else: + lr = "L" + overwrite_c = True + elif c.flags["C_CONTIGUOUS"] and trans == "T" or conjugate: + cc = c.T + if mode == "left": + lr = "R" + else: + lr = "L" + else: + trans = "N" + cc = c + if mode == "left": + lr = "L" + else: + lr = "R" + cQ, = safecall(gor_un_mqr, "gormqr/gunmqr", lr, trans, Q, tau, cc, + overwrite_c=overwrite_c) + if trans != "N": + cQ = cQ.T + if mode == "right": + cQ = cQ[:, :min(M, N)] + if onedim: + cQ = cQ.ravel() + + return (cQ,) + raw[1:] + + +def rq(a, overwrite_a=False, lwork=None, mode='full', check_finite=True): + """ + Compute RQ decomposition of a matrix. + + Calculate the decomposition ``A = R Q`` where Q is unitary/orthogonal + and R upper triangular. + + Parameters + ---------- + a : (M, N) array_like + Matrix to be decomposed + overwrite_a : bool, optional + Whether data in a is overwritten (may improve performance) + lwork : int, optional + Work array size, lwork >= a.shape[1]. If None or -1, an optimal size + is computed. + mode : {'full', 'r', 'economic'}, optional + Determines what information is to be returned: either both Q and R + ('full', default), only R ('r') or both Q and R but computed in + economy-size ('economic', see Notes). + check_finite : bool, optional + Whether to check that the input matrix contains only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + R : float or complex ndarray + Of shape (M, N) or (M, K) for ``mode='economic'``. ``K = min(M, N)``. + Q : float or complex ndarray + Of shape (N, N) or (K, N) for ``mode='economic'``. Not returned + if ``mode='r'``. + + Raises + ------ + LinAlgError + If decomposition fails. + + Notes + ----- + This is an interface to the LAPACK routines sgerqf, dgerqf, cgerqf, zgerqf, + sorgrq, dorgrq, cungrq and zungrq. + + If ``mode=economic``, the shapes of Q and R are (K, N) and (M, K) instead + of (N,N) and (M,N), with ``K=min(M,N)``. + + Examples + -------- + >>> from scipy import linalg + >>> a = np.random.randn(6, 9) + >>> r, q = linalg.rq(a) + >>> np.allclose(a, r @ q) + True + >>> r.shape, q.shape + ((6, 9), (9, 9)) + >>> r2 = linalg.rq(a, mode='r') + >>> np.allclose(r, r2) + True + >>> r3, q3 = linalg.rq(a, mode='economic') + >>> r3.shape, q3.shape + ((6, 6), (6, 9)) + + """ + if mode not in ['full', 'r', 'economic']: + raise ValueError( + "Mode argument should be one of ['full', 'r', 'economic']") + + if check_finite: + a1 = numpy.asarray_chkfinite(a) + else: + a1 = numpy.asarray(a) + if len(a1.shape) != 2: + raise ValueError('expected matrix') + M, N = a1.shape + overwrite_a = overwrite_a or (_datacopied(a1, a)) + + gerqf, = get_lapack_funcs(('gerqf',), (a1,)) + rq, tau = safecall(gerqf, 'gerqf', a1, lwork=lwork, + overwrite_a=overwrite_a) + if not mode == 'economic' or N < M: + R = numpy.triu(rq, N-M) + else: + R = numpy.triu(rq[-M:, -M:]) + + if mode == 'r': + return R + + gor_un_grq, = get_lapack_funcs(('orgrq',), (rq,)) + + if N < M: + Q, = safecall(gor_un_grq, "gorgrq/gungrq", rq[-N:], tau, lwork=lwork, + overwrite_a=1) + elif mode == 'economic': + Q, = safecall(gor_un_grq, "gorgrq/gungrq", rq, tau, lwork=lwork, + overwrite_a=1) + else: + rq1 = numpy.empty((N, N), dtype=rq.dtype) + rq1[-M:] = rq + Q, = safecall(gor_un_grq, "gorgrq/gungrq", rq1, tau, lwork=lwork, + overwrite_a=1) + + return R, Q diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/decomp_qr.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/decomp_qr.pyc new file mode 100644 index 0000000..fdef350 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/decomp_qr.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/decomp_schur.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/decomp_schur.py new file mode 100644 index 0000000..d0e6233 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/decomp_schur.py @@ -0,0 +1,295 @@ +"""Schur decomposition functions.""" +from __future__ import division, print_function, absolute_import + +import numpy +from numpy import asarray_chkfinite, single, asarray, array +from numpy.linalg import norm + +from scipy._lib.six import callable + +# Local imports. +from .misc import LinAlgError, _datacopied +from .lapack import get_lapack_funcs +from .decomp import eigvals + +__all__ = ['schur', 'rsf2csf'] + +_double_precision = ['i', 'l', 'd'] + + +def schur(a, output='real', lwork=None, overwrite_a=False, sort=None, + check_finite=True): + """ + Compute Schur decomposition of a matrix. + + The Schur decomposition is:: + + A = Z T Z^H + + where Z is unitary and T is either upper-triangular, or for real + Schur decomposition (output='real'), quasi-upper triangular. In + the quasi-triangular form, 2x2 blocks describing complex-valued + eigenvalue pairs may extrude from the diagonal. + + Parameters + ---------- + a : (M, M) array_like + Matrix to decompose + output : {'real', 'complex'}, optional + Construct the real or complex Schur decomposition (for real matrices). + lwork : int, optional + Work array size. If None or -1, it is automatically computed. + overwrite_a : bool, optional + Whether to overwrite data in a (may improve performance). + sort : {None, callable, 'lhp', 'rhp', 'iuc', 'ouc'}, optional + Specifies whether the upper eigenvalues should be sorted. A callable + may be passed that, given a eigenvalue, returns a boolean denoting + whether the eigenvalue should be sorted to the top-left (True). + Alternatively, string parameters may be used:: + + 'lhp' Left-hand plane (x.real < 0.0) + 'rhp' Right-hand plane (x.real > 0.0) + 'iuc' Inside the unit circle (x*x.conjugate() <= 1.0) + 'ouc' Outside the unit circle (x*x.conjugate() > 1.0) + + Defaults to None (no sorting). + check_finite : bool, optional + Whether to check that the input matrix contains only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + T : (M, M) ndarray + Schur form of A. It is real-valued for the real Schur decomposition. + Z : (M, M) ndarray + An unitary Schur transformation matrix for A. + It is real-valued for the real Schur decomposition. + sdim : int + If and only if sorting was requested, a third return value will + contain the number of eigenvalues satisfying the sort condition. + + Raises + ------ + LinAlgError + Error raised under three conditions: + + 1. The algorithm failed due to a failure of the QR algorithm to + compute all eigenvalues + 2. If eigenvalue sorting was requested, the eigenvalues could not be + reordered due to a failure to separate eigenvalues, usually because + of poor conditioning + 3. If eigenvalue sorting was requested, roundoff errors caused the + leading eigenvalues to no longer satisfy the sorting condition + + See also + -------- + rsf2csf : Convert real Schur form to complex Schur form + + Examples + -------- + >>> from scipy.linalg import schur, eigvals + >>> A = np.array([[0, 2, 2], [0, 1, 2], [1, 0, 1]]) + >>> T, Z = schur(A) + >>> T + array([[ 2.65896708, 1.42440458, -1.92933439], + [ 0. , -0.32948354, -0.49063704], + [ 0. , 1.31178921, -0.32948354]]) + >>> Z + array([[0.72711591, -0.60156188, 0.33079564], + [0.52839428, 0.79801892, 0.28976765], + [0.43829436, 0.03590414, -0.89811411]]) + + >>> T2, Z2 = schur(A, output='complex') + >>> T2 + array([[ 2.65896708, -1.22839825+1.32378589j, 0.42590089+1.51937378j], + [ 0. , -0.32948354+0.80225456j, -0.59877807+0.56192146j], + [ 0. , 0. , -0.32948354-0.80225456j]]) + >>> eigvals(T2) + array([2.65896708, -0.32948354+0.80225456j, -0.32948354-0.80225456j]) + + An arbitrary custom eig-sorting condition, having positive imaginary part, + which is satisfied by only one eigenvalue + + >>> T3, Z3, sdim = schur(A, output='complex', sort=lambda x: x.imag > 0) + >>> sdim + 1 + + """ + if output not in ['real', 'complex', 'r', 'c']: + raise ValueError("argument must be 'real', or 'complex'") + if check_finite: + a1 = asarray_chkfinite(a) + else: + a1 = asarray(a) + if len(a1.shape) != 2 or (a1.shape[0] != a1.shape[1]): + raise ValueError('expected square matrix') + typ = a1.dtype.char + if output in ['complex', 'c'] and typ not in ['F', 'D']: + if typ in _double_precision: + a1 = a1.astype('D') + typ = 'D' + else: + a1 = a1.astype('F') + typ = 'F' + overwrite_a = overwrite_a or (_datacopied(a1, a)) + gees, = get_lapack_funcs(('gees',), (a1,)) + if lwork is None or lwork == -1: + # get optimal work array + result = gees(lambda x: None, a1, lwork=-1) + lwork = result[-2][0].real.astype(numpy.int) + + if sort is None: + sort_t = 0 + sfunction = lambda x: None + else: + sort_t = 1 + if callable(sort): + sfunction = sort + elif sort == 'lhp': + sfunction = lambda x: (x.real < 0.0) + elif sort == 'rhp': + sfunction = lambda x: (x.real >= 0.0) + elif sort == 'iuc': + sfunction = lambda x: (abs(x) <= 1.0) + elif sort == 'ouc': + sfunction = lambda x: (abs(x) > 1.0) + else: + raise ValueError("'sort' parameter must either be 'None', or a " + "callable, or one of ('lhp','rhp','iuc','ouc')") + + result = gees(sfunction, a1, lwork=lwork, overwrite_a=overwrite_a, + sort_t=sort_t) + + info = result[-1] + if info < 0: + raise ValueError('illegal value in {}-th argument of internal gees' + ''.format(-info)) + elif info == a1.shape[0] + 1: + raise LinAlgError('Eigenvalues could not be separated for reordering.') + elif info == a1.shape[0] + 2: + raise LinAlgError('Leading eigenvalues do not satisfy sort condition.') + elif info > 0: + raise LinAlgError("Schur form not found. Possibly ill-conditioned.") + + if sort_t == 0: + return result[0], result[-3] + else: + return result[0], result[-3], result[1] + + +eps = numpy.finfo(float).eps +feps = numpy.finfo(single).eps + +_array_kind = {'b': 0, 'h': 0, 'B': 0, 'i': 0, 'l': 0, + 'f': 0, 'd': 0, 'F': 1, 'D': 1} +_array_precision = {'i': 1, 'l': 1, 'f': 0, 'd': 1, 'F': 0, 'D': 1} +_array_type = [['f', 'd'], ['F', 'D']] + + +def _commonType(*arrays): + kind = 0 + precision = 0 + for a in arrays: + t = a.dtype.char + kind = max(kind, _array_kind[t]) + precision = max(precision, _array_precision[t]) + return _array_type[kind][precision] + + +def _castCopy(type, *arrays): + cast_arrays = () + for a in arrays: + if a.dtype.char == type: + cast_arrays = cast_arrays + (a.copy(),) + else: + cast_arrays = cast_arrays + (a.astype(type),) + if len(cast_arrays) == 1: + return cast_arrays[0] + else: + return cast_arrays + + +def rsf2csf(T, Z, check_finite=True): + """ + Convert real Schur form to complex Schur form. + + Convert a quasi-diagonal real-valued Schur form to the upper triangular + complex-valued Schur form. + + Parameters + ---------- + T : (M, M) array_like + Real Schur form of the original array + Z : (M, M) array_like + Schur transformation matrix + check_finite : bool, optional + Whether to check that the input arrays contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + T : (M, M) ndarray + Complex Schur form of the original array + Z : (M, M) ndarray + Schur transformation matrix corresponding to the complex form + + See Also + -------- + schur : Schur decomposition of an array + + Examples + -------- + >>> from scipy.linalg import schur, rsf2csf + >>> A = np.array([[0, 2, 2], [0, 1, 2], [1, 0, 1]]) + >>> T, Z = schur(A) + >>> T + array([[ 2.65896708, 1.42440458, -1.92933439], + [ 0. , -0.32948354, -0.49063704], + [ 0. , 1.31178921, -0.32948354]]) + >>> Z + array([[0.72711591, -0.60156188, 0.33079564], + [0.52839428, 0.79801892, 0.28976765], + [0.43829436, 0.03590414, -0.89811411]]) + >>> T2 , Z2 = rsf2csf(T, Z) + >>> T2 + array([[2.65896708+0.j, -1.64592781+0.743164187j, -1.21516887+1.00660462j], + [0.+0.j , -0.32948354+8.02254558e-01j, -0.82115218-2.77555756e-17j], + [0.+0.j , 0.+0.j, -0.32948354-0.802254558j]]) + >>> Z2 + array([[0.72711591+0.j, 0.28220393-0.31385693j, 0.51319638-0.17258824j], + [0.52839428+0.j, 0.24720268+0.41635578j, -0.68079517-0.15118243j], + [0.43829436+0.j, -0.76618703+0.01873251j, -0.03063006+0.46857912j]]) + + """ + if check_finite: + Z, T = map(asarray_chkfinite, (Z, T)) + else: + Z, T = map(asarray, (Z, T)) + + for ind, X in enumerate([Z, T]): + if X.ndim != 2 or X.shape[0] != X.shape[1]: + raise ValueError("Input '{}' must be square.".format('ZT'[ind])) + + if T.shape[0] != Z.shape[0]: + raise ValueError("Input array shapes must match: Z: {} vs. T: {}" + "".format(Z.shape, T.shape)) + N = T.shape[0] + t = _commonType(Z, T, array([3.0], 'F')) + Z, T = _castCopy(t, Z, T) + + for m in range(N-1, 0, -1): + if abs(T[m, m-1]) > eps*(abs(T[m-1, m-1]) + abs(T[m, m])): + mu = eigvals(T[m-1:m+1, m-1:m+1]) - T[m, m] + r = norm([mu[0], T[m, m-1]]) + c = mu[0] / r + s = T[m, m-1] / r + G = array([[c.conj(), s], [-s, c]], dtype=t) + + T[m-1:m+1, m-1:] = G.dot(T[m-1:m+1, m-1:]) + T[:m+1, m-1:m+1] = T[:m+1, m-1:m+1].dot(G.conj().T) + Z[:, m-1:m+1] = Z[:, m-1:m+1].dot(G.conj().T) + + T[m, m-1] = 0.0 + return T, Z diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/decomp_schur.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/decomp_schur.pyc new file mode 100644 index 0000000..4af3114 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/decomp_schur.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/decomp_svd.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/decomp_svd.py new file mode 100644 index 0000000..b56def2 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/decomp_svd.py @@ -0,0 +1,495 @@ +"""SVD decomposition functions.""" +from __future__ import division, print_function, absolute_import + +import numpy +from numpy import zeros, r_, diag, dot, arccos, arcsin, where, clip + +# Local imports. +from .misc import LinAlgError, _datacopied +from .lapack import get_lapack_funcs, _compute_lwork +from .decomp import _asarray_validated +from scipy._lib.six import string_types + +__all__ = ['svd', 'svdvals', 'diagsvd', 'orth', 'subspace_angles', 'null_space'] + + +def svd(a, full_matrices=True, compute_uv=True, overwrite_a=False, + check_finite=True, lapack_driver='gesdd'): + """ + Singular Value Decomposition. + + Factorizes the matrix `a` into two unitary matrices ``U`` and ``Vh``, and + a 1-D array ``s`` of singular values (real, non-negative) such that + ``a == U @ S @ Vh``, where ``S`` is a suitably shaped matrix of zeros with + main diagonal ``s``. + + Parameters + ---------- + a : (M, N) array_like + Matrix to decompose. + full_matrices : bool, optional + If True (default), `U` and `Vh` are of shape ``(M, M)``, ``(N, N)``. + If False, the shapes are ``(M, K)`` and ``(K, N)``, where + ``K = min(M, N)``. + compute_uv : bool, optional + Whether to compute also ``U`` and ``Vh`` in addition to ``s``. + Default is True. + overwrite_a : bool, optional + Whether to overwrite `a`; may improve performance. + Default is False. + check_finite : bool, optional + Whether to check that the input matrix contains only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + lapack_driver : {'gesdd', 'gesvd'}, optional + Whether to use the more efficient divide-and-conquer approach + (``'gesdd'``) or general rectangular approach (``'gesvd'``) + to compute the SVD. MATLAB and Octave use the ``'gesvd'`` approach. + Default is ``'gesdd'``. + + .. versionadded:: 0.18 + + Returns + ------- + U : ndarray + Unitary matrix having left singular vectors as columns. + Of shape ``(M, M)`` or ``(M, K)``, depending on `full_matrices`. + s : ndarray + The singular values, sorted in non-increasing order. + Of shape (K,), with ``K = min(M, N)``. + Vh : ndarray + Unitary matrix having right singular vectors as rows. + Of shape ``(N, N)`` or ``(K, N)`` depending on `full_matrices`. + + For ``compute_uv=False``, only ``s`` is returned. + + Raises + ------ + LinAlgError + If SVD computation does not converge. + + See also + -------- + svdvals : Compute singular values of a matrix. + diagsvd : Construct the Sigma matrix, given the vector s. + + Examples + -------- + >>> from scipy import linalg + >>> m, n = 9, 6 + >>> a = np.random.randn(m, n) + 1.j*np.random.randn(m, n) + >>> U, s, Vh = linalg.svd(a) + >>> U.shape, s.shape, Vh.shape + ((9, 9), (6,), (6, 6)) + + Reconstruct the original matrix from the decomposition: + + >>> sigma = np.zeros((m, n)) + >>> for i in range(min(m, n)): + ... sigma[i, i] = s[i] + >>> a1 = np.dot(U, np.dot(sigma, Vh)) + >>> np.allclose(a, a1) + True + + Alternatively, use ``full_matrices=False`` (notice that the shape of + ``U`` is then ``(m, n)`` instead of ``(m, m)``): + + >>> U, s, Vh = linalg.svd(a, full_matrices=False) + >>> U.shape, s.shape, Vh.shape + ((9, 6), (6,), (6, 6)) + >>> S = np.diag(s) + >>> np.allclose(a, np.dot(U, np.dot(S, Vh))) + True + + >>> s2 = linalg.svd(a, compute_uv=False) + >>> np.allclose(s, s2) + True + + """ + a1 = _asarray_validated(a, check_finite=check_finite) + if len(a1.shape) != 2: + raise ValueError('expected matrix') + m, n = a1.shape + overwrite_a = overwrite_a or (_datacopied(a1, a)) + + if not isinstance(lapack_driver, string_types): + raise TypeError('lapack_driver must be a string') + if lapack_driver not in ('gesdd', 'gesvd'): + raise ValueError('lapack_driver must be "gesdd" or "gesvd", not "%s"' + % (lapack_driver,)) + funcs = (lapack_driver, lapack_driver + '_lwork') + gesXd, gesXd_lwork = get_lapack_funcs(funcs, (a1,)) + + # compute optimal lwork + lwork = _compute_lwork(gesXd_lwork, a1.shape[0], a1.shape[1], + compute_uv=compute_uv, full_matrices=full_matrices) + + # perform decomposition + u, s, v, info = gesXd(a1, compute_uv=compute_uv, lwork=lwork, + full_matrices=full_matrices, overwrite_a=overwrite_a) + + if info > 0: + raise LinAlgError("SVD did not converge") + if info < 0: + raise ValueError('illegal value in %d-th argument of internal gesdd' + % -info) + if compute_uv: + return u, s, v + else: + return s + + +def svdvals(a, overwrite_a=False, check_finite=True): + """ + Compute singular values of a matrix. + + Parameters + ---------- + a : (M, N) array_like + Matrix to decompose. + overwrite_a : bool, optional + Whether to overwrite `a`; may improve performance. + Default is False. + check_finite : bool, optional + Whether to check that the input matrix contains only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + s : (min(M, N),) ndarray + The singular values, sorted in decreasing order. + + Raises + ------ + LinAlgError + If SVD computation does not converge. + + Notes + ----- + ``svdvals(a)`` only differs from ``svd(a, compute_uv=False)`` by its + handling of the edge case of empty ``a``, where it returns an + empty sequence: + + >>> a = np.empty((0, 2)) + >>> from scipy.linalg import svdvals + >>> svdvals(a) + array([], dtype=float64) + + See Also + -------- + svd : Compute the full singular value decomposition of a matrix. + diagsvd : Construct the Sigma matrix, given the vector s. + + Examples + -------- + >>> from scipy.linalg import svdvals + >>> m = np.array([[1.0, 0.0], + ... [2.0, 3.0], + ... [1.0, 1.0], + ... [0.0, 2.0], + ... [1.0, 0.0]]) + >>> svdvals(m) + array([ 4.28091555, 1.63516424]) + + We can verify the maximum singular value of `m` by computing the maximum + length of `m.dot(u)` over all the unit vectors `u` in the (x,y) plane. + We approximate "all" the unit vectors with a large sample. Because + of linearity, we only need the unit vectors with angles in [0, pi]. + + >>> t = np.linspace(0, np.pi, 2000) + >>> u = np.array([np.cos(t), np.sin(t)]) + >>> np.linalg.norm(m.dot(u), axis=0).max() + 4.2809152422538475 + + `p` is a projection matrix with rank 1. With exact arithmetic, + its singular values would be [1, 0, 0, 0]. + + >>> v = np.array([0.1, 0.3, 0.9, 0.3]) + >>> p = np.outer(v, v) + >>> svdvals(p) + array([ 1.00000000e+00, 2.02021698e-17, 1.56692500e-17, + 8.15115104e-34]) + + The singular values of an orthogonal matrix are all 1. Here we + create a random orthogonal matrix by using the `rvs()` method of + `scipy.stats.ortho_group`. + + >>> from scipy.stats import ortho_group + >>> np.random.seed(123) + >>> orth = ortho_group.rvs(4) + >>> svdvals(orth) + array([ 1., 1., 1., 1.]) + + """ + a = _asarray_validated(a, check_finite=check_finite) + if a.size: + return svd(a, compute_uv=0, overwrite_a=overwrite_a, + check_finite=False) + elif len(a.shape) != 2: + raise ValueError('expected matrix') + else: + return numpy.empty(0) + + +def diagsvd(s, M, N): + """ + Construct the sigma matrix in SVD from singular values and size M, N. + + Parameters + ---------- + s : (M,) or (N,) array_like + Singular values + M : int + Size of the matrix whose singular values are `s`. + N : int + Size of the matrix whose singular values are `s`. + + Returns + ------- + S : (M, N) ndarray + The S-matrix in the singular value decomposition + + See Also + -------- + svd : Singular value decomposition of a matrix + svdvals : Compute singular values of a matrix. + + Examples + -------- + >>> from scipy.linalg import diagsvd + >>> vals = np.array([1, 2, 3]) # The array representing the computed svd + >>> diagsvd(vals, 3, 4) + array([[1, 0, 0, 0], + [0, 2, 0, 0], + [0, 0, 3, 0]]) + >>> diagsvd(vals, 4, 3) + array([[1, 0, 0], + [0, 2, 0], + [0, 0, 3], + [0, 0, 0]]) + + """ + part = diag(s) + typ = part.dtype.char + MorN = len(s) + if MorN == M: + return r_['-1', part, zeros((M, N-M), typ)] + elif MorN == N: + return r_[part, zeros((M-N, N), typ)] + else: + raise ValueError("Length of s must be M or N.") + + +# Orthonormal decomposition + +def orth(A, rcond=None): + """ + Construct an orthonormal basis for the range of A using SVD + + Parameters + ---------- + A : (M, N) array_like + Input array + rcond : float, optional + Relative condition number. Singular values ``s`` smaller than + ``rcond * max(s)`` are considered zero. + Default: floating point eps * max(M,N). + + Returns + ------- + Q : (M, K) ndarray + Orthonormal basis for the range of A. + K = effective rank of A, as determined by rcond + + See also + -------- + svd : Singular value decomposition of a matrix + null_space : Matrix null space + + Examples + -------- + >>> from scipy.linalg import orth + >>> A = np.array([[2, 0, 0], [0, 5, 0]]) # rank 2 array + >>> orth(A) + array([[0., 1.], + [1., 0.]]) + >>> orth(A.T) + array([[0., 1.], + [1., 0.], + [0., 0.]]) + + """ + u, s, vh = svd(A, full_matrices=False) + M, N = u.shape[0], vh.shape[1] + if rcond is None: + rcond = numpy.finfo(s.dtype).eps * max(M, N) + tol = numpy.amax(s) * rcond + num = numpy.sum(s > tol, dtype=int) + Q = u[:, :num] + return Q + + +def null_space(A, rcond=None): + """ + Construct an orthonormal basis for the null space of A using SVD + + Parameters + ---------- + A : (M, N) array_like + Input array + rcond : float, optional + Relative condition number. Singular values ``s`` smaller than + ``rcond * max(s)`` are considered zero. + Default: floating point eps * max(M,N). + + Returns + ------- + Z : (N, K) ndarray + Orthonormal basis for the null space of A. + K = dimension of effective null space, as determined by rcond + + See also + -------- + svd : Singular value decomposition of a matrix + orth : Matrix range + + Examples + -------- + One-dimensional null space: + + >>> from scipy.linalg import null_space + >>> A = np.array([[1, 1], [1, 1]]) + >>> ns = null_space(A) + >>> ns * np.sign(ns[0,0]) # Remove the sign ambiguity of the vector + array([[ 0.70710678], + [-0.70710678]]) + + Two-dimensional null space: + + >>> B = np.random.rand(3, 5) + >>> Z = null_space(B) + >>> Z.shape + (5, 2) + >>> np.allclose(B.dot(Z), 0) + True + + The basis vectors are orthonormal (up to rounding error): + + >>> Z.T.dot(Z) + array([[ 1.00000000e+00, 6.92087741e-17], + [ 6.92087741e-17, 1.00000000e+00]]) + + """ + u, s, vh = svd(A, full_matrices=True) + M, N = u.shape[0], vh.shape[1] + if rcond is None: + rcond = numpy.finfo(s.dtype).eps * max(M, N) + tol = numpy.amax(s) * rcond + num = numpy.sum(s > tol, dtype=int) + Q = vh[num:,:].T.conj() + return Q + + +def subspace_angles(A, B): + r""" + Compute the subspace angles between two matrices. + + Parameters + ---------- + A : (M, N) array_like + The first input array. + B : (M, K) array_like + The second input array. + + Returns + ------- + angles : ndarray, shape (min(N, K),) + The subspace angles between the column spaces of `A` and `B` in + descending order. + + See Also + -------- + orth + svd + + Notes + ----- + This computes the subspace angles according to the formula + provided in [1]_. For equivalence with MATLAB and Octave behavior, + use ``angles[0]``. + + .. versionadded:: 1.0 + + References + ---------- + .. [1] Knyazev A, Argentati M (2002) Principal Angles between Subspaces + in an A-Based Scalar Product: Algorithms and Perturbation + Estimates. SIAM J. Sci. Comput. 23:2008-2040. + + Examples + -------- + A Hadamard matrix, which has orthogonal columns, so we expect that + the suspace angle to be :math:`\frac{\pi}{2}`: + + >>> from scipy.linalg import hadamard, subspace_angles + >>> H = hadamard(4) + >>> print(H) + [[ 1 1 1 1] + [ 1 -1 1 -1] + [ 1 1 -1 -1] + [ 1 -1 -1 1]] + >>> np.rad2deg(subspace_angles(H[:, :2], H[:, 2:])) + array([ 90., 90.]) + + And the subspace angle of a matrix to itself should be zero: + + >>> subspace_angles(H[:, :2], H[:, :2]) <= 2 * np.finfo(float).eps + array([ True, True], dtype=bool) + + The angles between non-orthogonal subspaces are in between these extremes: + + >>> x = np.random.RandomState(0).randn(4, 3) + >>> np.rad2deg(subspace_angles(x[:, :2], x[:, [2]])) + array([ 55.832]) + """ + # Steps here omit the U and V calculation steps from the paper + + # 1. Compute orthonormal bases of column-spaces + A = _asarray_validated(A, check_finite=True) + if len(A.shape) != 2: + raise ValueError('expected 2D array, got shape %s' % (A.shape,)) + QA = orth(A) + del A + + B = _asarray_validated(B, check_finite=True) + if len(B.shape) != 2: + raise ValueError('expected 2D array, got shape %s' % (B.shape,)) + if len(B) != len(QA): + raise ValueError('A and B must have the same number of rows, got ' + '%s and %s' % (QA.shape[0], B.shape[0])) + QB = orth(B) + del B + + # 2. Compute SVD for cosine + QA_T_QB = dot(QA.T, QB) + sigma = svdvals(QA_T_QB) + + # 3. Compute matrix B + if QA.shape[1] >= QB.shape[1]: + B = QB - dot(QA, QA_T_QB) + else: + B = QA - dot(QB, QA_T_QB.T) + del QA, QB, QA_T_QB + + # 4. Compute SVD for sine + mask = sigma ** 2 >= 0.5 + if mask.any(): + mu_arcsin = arcsin(clip(svdvals(B, overwrite_a=True), -1., 1.)) + else: + mu_arcsin = 0. + + # 5. Compute the principal angles + # with reverse ordering of sigma because smallest sigma belongs to largest angle theta + theta = where(mask, mu_arcsin, arccos(clip(sigma[::-1], -1., 1.))) + return theta diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/decomp_svd.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/decomp_svd.pyc new file mode 100644 index 0000000..5f23f6a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/decomp_svd.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/flinalg.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/flinalg.py new file mode 100644 index 0000000..b07b10a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/flinalg.py @@ -0,0 +1,58 @@ +# +# Author: Pearu Peterson, March 2002 +# + +from __future__ import division, print_function, absolute_import + +__all__ = ['get_flinalg_funcs'] + +# The following ensures that possibly missing flavor (C or Fortran) is +# replaced with the available one. If none is available, exception +# is raised at the first attempt to use the resources. +try: + from . import _flinalg +except ImportError: + _flinalg = None +# from numpy.distutils.misc_util import PostponedException +# _flinalg = PostponedException() +# print _flinalg.__doc__ + has_column_major_storage = lambda a:0 + + +def has_column_major_storage(arr): + return arr.flags['FORTRAN'] + + +_type_conv = {'f':'s', 'd':'d', 'F':'c', 'D':'z'} # 'd' will be default for 'i',.. + + +def get_flinalg_funcs(names,arrays=(),debug=0): + """Return optimal available _flinalg function objects with + names. arrays are used to determine optimal prefix.""" + ordering = [] + for i in range(len(arrays)): + t = arrays[i].dtype.char + if t not in _type_conv: + t = 'd' + ordering.append((t,i)) + if ordering: + ordering.sort() + required_prefix = _type_conv[ordering[0][0]] + else: + required_prefix = 'd' + # Some routines may require special treatment. + # Handle them here before the default lookup. + + # Default lookup: + if ordering and has_column_major_storage(arrays[ordering[0][1]]): + suffix1,suffix2 = '_c','_r' + else: + suffix1,suffix2 = '_r','_c' + + funcs = [] + for name in names: + func_name = required_prefix + name + func = getattr(_flinalg,func_name+suffix1, + getattr(_flinalg,func_name+suffix2,None)) + funcs.append(func) + return tuple(funcs) diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/flinalg.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/flinalg.pyc new file mode 100644 index 0000000..8ad35a5 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/flinalg.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/interpolative.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/interpolative.py new file mode 100644 index 0000000..33218c8 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/interpolative.py @@ -0,0 +1,969 @@ +#****************************************************************************** +# Copyright (C) 2013 Kenneth L. Ho +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. Redistributions in binary +# form must reproduce the above copyright notice, this list of conditions and +# the following disclaimer in the documentation and/or other materials +# provided with the distribution. +# +# None of the names of the copyright holders may be used to endorse or +# promote products derived from this software without specific prior written +# permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +#****************************************************************************** + +# Python module for interfacing with `id_dist`. + +r""" +====================================================================== +Interpolative matrix decomposition (:mod:`scipy.linalg.interpolative`) +====================================================================== + +.. moduleauthor:: Kenneth L. Ho <klho@stanford.edu> + +.. versionadded:: 0.13 + +.. currentmodule:: scipy.linalg.interpolative + +An interpolative decomposition (ID) of a matrix :math:`A \in +\mathbb{C}^{m \times n}` of rank :math:`k \leq \min \{ m, n \}` is a +factorization + +.. math:: + A \Pi = + \begin{bmatrix} + A \Pi_{1} & A \Pi_{2} + \end{bmatrix} = + A \Pi_{1} + \begin{bmatrix} + I & T + \end{bmatrix}, + +where :math:`\Pi = [\Pi_{1}, \Pi_{2}]` is a permutation matrix with +:math:`\Pi_{1} \in \{ 0, 1 \}^{n \times k}`, i.e., :math:`A \Pi_{2} = +A \Pi_{1} T`. This can equivalently be written as :math:`A = BP`, +where :math:`B = A \Pi_{1}` and :math:`P = [I, T] \Pi^{\mathsf{T}}` +are the *skeleton* and *interpolation matrices*, respectively. + +If :math:`A` does not have exact rank :math:`k`, then there exists an +approximation in the form of an ID such that :math:`A = BP + E`, where +:math:`\| E \| \sim \sigma_{k + 1}` is on the order of the :math:`(k + +1)`-th largest singular value of :math:`A`. Note that :math:`\sigma_{k ++ 1}` is the best possible error for a rank-:math:`k` approximation +and, in fact, is achieved by the singular value decomposition (SVD) +:math:`A \approx U S V^{*}`, where :math:`U \in \mathbb{C}^{m \times +k}` and :math:`V \in \mathbb{C}^{n \times k}` have orthonormal columns +and :math:`S = \mathop{\mathrm{diag}} (\sigma_{i}) \in \mathbb{C}^{k +\times k}` is diagonal with nonnegative entries. The principal +advantages of using an ID over an SVD are that: + +- it is cheaper to construct; +- it preserves the structure of :math:`A`; and +- it is more efficient to compute with in light of the identity submatrix of :math:`P`. + +Routines +======== + +Main functionality: + +.. autosummary:: + :toctree: generated/ + + interp_decomp + reconstruct_matrix_from_id + reconstruct_interp_matrix + reconstruct_skel_matrix + id_to_svd + svd + estimate_spectral_norm + estimate_spectral_norm_diff + estimate_rank + +Support functions: + +.. autosummary:: + :toctree: generated/ + + seed + rand + + +References +========== + +This module uses the ID software package [1]_ by Martinsson, Rokhlin, +Shkolnisky, and Tygert, which is a Fortran library for computing IDs +using various algorithms, including the rank-revealing QR approach of +[2]_ and the more recent randomized methods described in [3]_, [4]_, +and [5]_. This module exposes its functionality in a way convenient +for Python users. Note that this module does not add any functionality +beyond that of organizing a simpler and more consistent interface. + +We advise the user to consult also the `documentation for the ID package +<http://tygert.com/id_doc.4.pdf>`_. + +.. [1] P.G. Martinsson, V. Rokhlin, Y. Shkolnisky, M. Tygert. "ID: a + software package for low-rank approximation of matrices via interpolative + decompositions, version 0.2." http://tygert.com/id_doc.4.pdf. + +.. [2] H. Cheng, Z. Gimbutas, P.G. Martinsson, V. Rokhlin. "On the + compression of low rank matrices." *SIAM J. Sci. Comput.* 26 (4): 1389--1404, + 2005. :doi:`10.1137/030602678`. + +.. [3] E. Liberty, F. Woolfe, P.G. Martinsson, V. Rokhlin, M. + Tygert. "Randomized algorithms for the low-rank approximation of matrices." + *Proc. Natl. Acad. Sci. U.S.A.* 104 (51): 20167--20172, 2007. + :doi:`10.1073/pnas.0709640104`. + +.. [4] P.G. Martinsson, V. Rokhlin, M. Tygert. "A randomized + algorithm for the decomposition of matrices." *Appl. Comput. Harmon. Anal.* 30 + (1): 47--68, 2011. :doi:`10.1016/j.acha.2010.02.003`. + +.. [5] F. Woolfe, E. Liberty, V. Rokhlin, M. Tygert. "A fast + randomized algorithm for the approximation of matrices." *Appl. Comput. + Harmon. Anal.* 25 (3): 335--366, 2008. :doi:`10.1016/j.acha.2007.12.002`. + + +Tutorial +======== + +Initializing +------------ + +The first step is to import :mod:`scipy.linalg.interpolative` by issuing the +command: + +>>> import scipy.linalg.interpolative as sli + +Now let's build a matrix. For this, we consider a Hilbert matrix, which is well +know to have low rank: + +>>> from scipy.linalg import hilbert +>>> n = 1000 +>>> A = hilbert(n) + +We can also do this explicitly via: + +>>> import numpy as np +>>> n = 1000 +>>> A = np.empty((n, n), order='F') +>>> for j in range(n): +>>> for i in range(m): +>>> A[i,j] = 1. / (i + j + 1) + +Note the use of the flag ``order='F'`` in :func:`numpy.empty`. This +instantiates the matrix in Fortran-contiguous order and is important for +avoiding data copying when passing to the backend. + +We then define multiplication routines for the matrix by regarding it as a +:class:`scipy.sparse.linalg.LinearOperator`: + +>>> from scipy.sparse.linalg import aslinearoperator +>>> L = aslinearoperator(A) + +This automatically sets up methods describing the action of the matrix and its +adjoint on a vector. + +Computing an ID +--------------- + +We have several choices of algorithm to compute an ID. These fall largely +according to two dichotomies: + +1. how the matrix is represented, i.e., via its entries or via its action on a + vector; and +2. whether to approximate it to a fixed relative precision or to a fixed rank. + +We step through each choice in turn below. + +In all cases, the ID is represented by three parameters: + +1. a rank ``k``; +2. an index array ``idx``; and +3. interpolation coefficients ``proj``. + +The ID is specified by the relation +``np.dot(A[:,idx[:k]], proj) == A[:,idx[k:]]``. + +From matrix entries +................... + +We first consider a matrix given in terms of its entries. + +To compute an ID to a fixed precision, type: + +>>> k, idx, proj = sli.interp_decomp(A, eps) + +where ``eps < 1`` is the desired precision. + +To compute an ID to a fixed rank, use: + +>>> idx, proj = sli.interp_decomp(A, k) + +where ``k >= 1`` is the desired rank. + +Both algorithms use random sampling and are usually faster than the +corresponding older, deterministic algorithms, which can be accessed via the +commands: + +>>> k, idx, proj = sli.interp_decomp(A, eps, rand=False) + +and: + +>>> idx, proj = sli.interp_decomp(A, k, rand=False) + +respectively. + +From matrix action +.................. + +Now consider a matrix given in terms of its action on a vector as a +:class:`scipy.sparse.linalg.LinearOperator`. + +To compute an ID to a fixed precision, type: + +>>> k, idx, proj = sli.interp_decomp(L, eps) + +To compute an ID to a fixed rank, use: + +>>> idx, proj = sli.interp_decomp(L, k) + +These algorithms are randomized. + +Reconstructing an ID +-------------------- + +The ID routines above do not output the skeleton and interpolation matrices +explicitly but instead return the relevant information in a more compact (and +sometimes more useful) form. To build these matrices, write: + +>>> B = sli.reconstruct_skel_matrix(A, k, idx) + +for the skeleton matrix and: + +>>> P = sli.reconstruct_interp_matrix(idx, proj) + +for the interpolation matrix. The ID approximation can then be computed as: + +>>> C = np.dot(B, P) + +This can also be constructed directly using: + +>>> C = sli.reconstruct_matrix_from_id(B, idx, proj) + +without having to first compute ``P``. + +Alternatively, this can be done explicitly as well using: + +>>> B = A[:,idx[:k]] +>>> P = np.hstack([np.eye(k), proj])[:,np.argsort(idx)] +>>> C = np.dot(B, P) + +Computing an SVD +---------------- + +An ID can be converted to an SVD via the command: + +>>> U, S, V = sli.id_to_svd(B, idx, proj) + +The SVD approximation is then: + +>>> C = np.dot(U, np.dot(np.diag(S), np.dot(V.conj().T))) + +The SVD can also be computed "fresh" by combining both the ID and conversion +steps into one command. Following the various ID algorithms above, there are +correspondingly various SVD algorithms that one can employ. + +From matrix entries +................... + +We consider first SVD algorithms for a matrix given in terms of its entries. + +To compute an SVD to a fixed precision, type: + +>>> U, S, V = sli.svd(A, eps) + +To compute an SVD to a fixed rank, use: + +>>> U, S, V = sli.svd(A, k) + +Both algorithms use random sampling; for the determinstic versions, issue the +keyword ``rand=False`` as above. + +From matrix action +.................. + +Now consider a matrix given in terms of its action on a vector. + +To compute an SVD to a fixed precision, type: + +>>> U, S, V = sli.svd(L, eps) + +To compute an SVD to a fixed rank, use: + +>>> U, S, V = sli.svd(L, k) + +Utility routines +---------------- + +Several utility routines are also available. + +To estimate the spectral norm of a matrix, use: + +>>> snorm = sli.estimate_spectral_norm(A) + +This algorithm is based on the randomized power method and thus requires only +matrix-vector products. The number of iterations to take can be set using the +keyword ``its`` (default: ``its=20``). The matrix is interpreted as a +:class:`scipy.sparse.linalg.LinearOperator`, but it is also valid to supply it +as a :class:`numpy.ndarray`, in which case it is trivially converted using +:func:`scipy.sparse.linalg.aslinearoperator`. + +The same algorithm can also estimate the spectral norm of the difference of two +matrices ``A1`` and ``A2`` as follows: + +>>> diff = sli.estimate_spectral_norm_diff(A1, A2) + +This is often useful for checking the accuracy of a matrix approximation. + +Some routines in :mod:`scipy.linalg.interpolative` require estimating the rank +of a matrix as well. This can be done with either: + +>>> k = sli.estimate_rank(A, eps) + +or: + +>>> k = sli.estimate_rank(L, eps) + +depending on the representation. The parameter ``eps`` controls the definition +of the numerical rank. + +Finally, the random number generation required for all randomized routines can +be controlled via :func:`scipy.linalg.interpolative.seed`. To reset the seed +values to their original values, use: + +>>> sli.seed('default') + +To specify the seed values, use: + +>>> sli.seed(s) + +where ``s`` must be an integer or array of 55 floats. If an integer, the array +of floats is obtained by using `np.random.rand` with the given integer seed. + +To simply generate some random numbers, type: + +>>> sli.rand(n) + +where ``n`` is the number of random numbers to generate. + +Remarks +------- + +The above functions all automatically detect the appropriate interface and work +with both real and complex data types, passing input arguments to the proper +backend routine. + +""" + +import scipy.linalg._interpolative_backend as backend +import numpy as np + +_DTYPE_ERROR = ValueError("invalid input dtype (input must be float64 or complex128)") +_TYPE_ERROR = TypeError("invalid input type (must be array or LinearOperator)") + + +def _is_real(A): + try: + if A.dtype == np.complex128: + return False + elif A.dtype == np.float64: + return True + else: + raise _DTYPE_ERROR + except AttributeError: + raise _TYPE_ERROR + + +def seed(seed=None): + """ + Seed the internal random number generator used in this ID package. + + The generator is a lagged Fibonacci method with 55-element internal state. + + Parameters + ---------- + seed : int, sequence, 'default', optional + If 'default', the random seed is reset to a default value. + + If `seed` is a sequence containing 55 floating-point numbers + in range [0,1], these are used to set the internal state of + the generator. + + If the value is an integer, the internal state is obtained + from `numpy.random.RandomState` (MT19937) with the integer + used as the initial seed. + + If `seed` is omitted (None), `numpy.random` is used to + initialize the generator. + + """ + # For details, see :func:`backend.id_srand`, :func:`backend.id_srandi`, + # and :func:`backend.id_srando`. + + if isinstance(seed, str) and seed == 'default': + backend.id_srando() + elif hasattr(seed, '__len__'): + state = np.asfortranarray(seed, dtype=float) + if state.shape != (55,): + raise ValueError("invalid input size") + elif state.min() < 0 or state.max() > 1: + raise ValueError("values not in range [0,1]") + backend.id_srandi(state) + elif seed is None: + backend.id_srandi(np.random.rand(55)) + else: + rnd = np.random.RandomState(seed) + backend.id_srandi(rnd.rand(55)) + + +def rand(*shape): + """ + Generate standard uniform pseudorandom numbers via a very efficient lagged + Fibonacci method. + + This routine is used for all random number generation in this package and + can affect ID and SVD results. + + Parameters + ---------- + shape + Shape of output array + + """ + # For details, see :func:`backend.id_srand`, and :func:`backend.id_srando`. + return backend.id_srand(np.prod(shape)).reshape(shape) + + +def interp_decomp(A, eps_or_k, rand=True): + """ + Compute ID of a matrix. + + An ID of a matrix `A` is a factorization defined by a rank `k`, a column + index array `idx`, and interpolation coefficients `proj` such that:: + + numpy.dot(A[:,idx[:k]], proj) = A[:,idx[k:]] + + The original matrix can then be reconstructed as:: + + numpy.hstack([A[:,idx[:k]], + numpy.dot(A[:,idx[:k]], proj)] + )[:,numpy.argsort(idx)] + + or via the routine :func:`reconstruct_matrix_from_id`. This can + equivalently be written as:: + + numpy.dot(A[:,idx[:k]], + numpy.hstack([numpy.eye(k), proj]) + )[:,np.argsort(idx)] + + in terms of the skeleton and interpolation matrices:: + + B = A[:,idx[:k]] + + and:: + + P = numpy.hstack([numpy.eye(k), proj])[:,np.argsort(idx)] + + respectively. See also :func:`reconstruct_interp_matrix` and + :func:`reconstruct_skel_matrix`. + + The ID can be computed to any relative precision or rank (depending on the + value of `eps_or_k`). If a precision is specified (`eps_or_k < 1`), then + this function has the output signature:: + + k, idx, proj = interp_decomp(A, eps_or_k) + + Otherwise, if a rank is specified (`eps_or_k >= 1`), then the output + signature is:: + + idx, proj = interp_decomp(A, eps_or_k) + + .. This function automatically detects the form of the input parameters + and passes them to the appropriate backend. For details, see + :func:`backend.iddp_id`, :func:`backend.iddp_aid`, + :func:`backend.iddp_rid`, :func:`backend.iddr_id`, + :func:`backend.iddr_aid`, :func:`backend.iddr_rid`, + :func:`backend.idzp_id`, :func:`backend.idzp_aid`, + :func:`backend.idzp_rid`, :func:`backend.idzr_id`, + :func:`backend.idzr_aid`, and :func:`backend.idzr_rid`. + + Parameters + ---------- + A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator` with `rmatvec` + Matrix to be factored + eps_or_k : float or int + Relative error (if `eps_or_k < 1`) or rank (if `eps_or_k >= 1`) of + approximation. + rand : bool, optional + Whether to use random sampling if `A` is of type :class:`numpy.ndarray` + (randomized algorithms are always used if `A` is of type + :class:`scipy.sparse.linalg.LinearOperator`). + + Returns + ------- + k : int + Rank required to achieve specified relative precision if + `eps_or_k < 1`. + idx : :class:`numpy.ndarray` + Column index array. + proj : :class:`numpy.ndarray` + Interpolation coefficients. + """ + from scipy.sparse.linalg import LinearOperator + + real = _is_real(A) + + if isinstance(A, np.ndarray): + if eps_or_k < 1: + eps = eps_or_k + if rand: + if real: + k, idx, proj = backend.iddp_aid(eps, A) + else: + k, idx, proj = backend.idzp_aid(eps, A) + else: + if real: + k, idx, proj = backend.iddp_id(eps, A) + else: + k, idx, proj = backend.idzp_id(eps, A) + return k, idx - 1, proj + else: + k = int(eps_or_k) + if rand: + if real: + idx, proj = backend.iddr_aid(A, k) + else: + idx, proj = backend.idzr_aid(A, k) + else: + if real: + idx, proj = backend.iddr_id(A, k) + else: + idx, proj = backend.idzr_id(A, k) + return idx - 1, proj + elif isinstance(A, LinearOperator): + m, n = A.shape + matveca = A.rmatvec + if eps_or_k < 1: + eps = eps_or_k + if real: + k, idx, proj = backend.iddp_rid(eps, m, n, matveca) + else: + k, idx, proj = backend.idzp_rid(eps, m, n, matveca) + return k, idx - 1, proj + else: + k = int(eps_or_k) + if real: + idx, proj = backend.iddr_rid(m, n, matveca, k) + else: + idx, proj = backend.idzr_rid(m, n, matveca, k) + return idx - 1, proj + else: + raise _TYPE_ERROR + + +def reconstruct_matrix_from_id(B, idx, proj): + """ + Reconstruct matrix from its ID. + + A matrix `A` with skeleton matrix `B` and ID indices and coefficients `idx` + and `proj`, respectively, can be reconstructed as:: + + numpy.hstack([B, numpy.dot(B, proj)])[:,numpy.argsort(idx)] + + See also :func:`reconstruct_interp_matrix` and + :func:`reconstruct_skel_matrix`. + + .. This function automatically detects the matrix data type and calls the + appropriate backend. For details, see :func:`backend.idd_reconid` and + :func:`backend.idz_reconid`. + + Parameters + ---------- + B : :class:`numpy.ndarray` + Skeleton matrix. + idx : :class:`numpy.ndarray` + Column index array. + proj : :class:`numpy.ndarray` + Interpolation coefficients. + + Returns + ------- + :class:`numpy.ndarray` + Reconstructed matrix. + """ + if _is_real(B): + return backend.idd_reconid(B, idx + 1, proj) + else: + return backend.idz_reconid(B, idx + 1, proj) + + +def reconstruct_interp_matrix(idx, proj): + """ + Reconstruct interpolation matrix from ID. + + The interpolation matrix can be reconstructed from the ID indices and + coefficients `idx` and `proj`, respectively, as:: + + P = numpy.hstack([numpy.eye(proj.shape[0]), proj])[:,numpy.argsort(idx)] + + The original matrix can then be reconstructed from its skeleton matrix `B` + via:: + + numpy.dot(B, P) + + See also :func:`reconstruct_matrix_from_id` and + :func:`reconstruct_skel_matrix`. + + .. This function automatically detects the matrix data type and calls the + appropriate backend. For details, see :func:`backend.idd_reconint` and + :func:`backend.idz_reconint`. + + Parameters + ---------- + idx : :class:`numpy.ndarray` + Column index array. + proj : :class:`numpy.ndarray` + Interpolation coefficients. + + Returns + ------- + :class:`numpy.ndarray` + Interpolation matrix. + """ + if _is_real(proj): + return backend.idd_reconint(idx + 1, proj) + else: + return backend.idz_reconint(idx + 1, proj) + + +def reconstruct_skel_matrix(A, k, idx): + """ + Reconstruct skeleton matrix from ID. + + The skeleton matrix can be reconstructed from the original matrix `A` and its + ID rank and indices `k` and `idx`, respectively, as:: + + B = A[:,idx[:k]] + + The original matrix can then be reconstructed via:: + + numpy.hstack([B, numpy.dot(B, proj)])[:,numpy.argsort(idx)] + + See also :func:`reconstruct_matrix_from_id` and + :func:`reconstruct_interp_matrix`. + + .. This function automatically detects the matrix data type and calls the + appropriate backend. For details, see :func:`backend.idd_copycols` and + :func:`backend.idz_copycols`. + + Parameters + ---------- + A : :class:`numpy.ndarray` + Original matrix. + k : int + Rank of ID. + idx : :class:`numpy.ndarray` + Column index array. + + Returns + ------- + :class:`numpy.ndarray` + Skeleton matrix. + """ + if _is_real(A): + return backend.idd_copycols(A, k, idx + 1) + else: + return backend.idz_copycols(A, k, idx + 1) + + +def id_to_svd(B, idx, proj): + """ + Convert ID to SVD. + + The SVD reconstruction of a matrix with skeleton matrix `B` and ID indices and + coefficients `idx` and `proj`, respectively, is:: + + U, S, V = id_to_svd(B, idx, proj) + A = numpy.dot(U, numpy.dot(numpy.diag(S), V.conj().T)) + + See also :func:`svd`. + + .. This function automatically detects the matrix data type and calls the + appropriate backend. For details, see :func:`backend.idd_id2svd` and + :func:`backend.idz_id2svd`. + + Parameters + ---------- + B : :class:`numpy.ndarray` + Skeleton matrix. + idx : :class:`numpy.ndarray` + Column index array. + proj : :class:`numpy.ndarray` + Interpolation coefficients. + + Returns + ------- + U : :class:`numpy.ndarray` + Left singular vectors. + S : :class:`numpy.ndarray` + Singular values. + V : :class:`numpy.ndarray` + Right singular vectors. + """ + if _is_real(B): + U, V, S = backend.idd_id2svd(B, idx + 1, proj) + else: + U, V, S = backend.idz_id2svd(B, idx + 1, proj) + return U, S, V + + +def estimate_spectral_norm(A, its=20): + """ + Estimate spectral norm of a matrix by the randomized power method. + + .. This function automatically detects the matrix data type and calls the + appropriate backend. For details, see :func:`backend.idd_snorm` and + :func:`backend.idz_snorm`. + + Parameters + ---------- + A : :class:`scipy.sparse.linalg.LinearOperator` + Matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with the + `matvec` and `rmatvec` methods (to apply the matrix and its adjoint). + its : int, optional + Number of power method iterations. + + Returns + ------- + float + Spectral norm estimate. + """ + from scipy.sparse.linalg import aslinearoperator + A = aslinearoperator(A) + m, n = A.shape + matvec = lambda x: A. matvec(x) + matveca = lambda x: A.rmatvec(x) + if _is_real(A): + return backend.idd_snorm(m, n, matveca, matvec, its=its) + else: + return backend.idz_snorm(m, n, matveca, matvec, its=its) + + +def estimate_spectral_norm_diff(A, B, its=20): + """ + Estimate spectral norm of the difference of two matrices by the randomized + power method. + + .. This function automatically detects the matrix data type and calls the + appropriate backend. For details, see :func:`backend.idd_diffsnorm` and + :func:`backend.idz_diffsnorm`. + + Parameters + ---------- + A : :class:`scipy.sparse.linalg.LinearOperator` + First matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with the + `matvec` and `rmatvec` methods (to apply the matrix and its adjoint). + B : :class:`scipy.sparse.linalg.LinearOperator` + Second matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with + the `matvec` and `rmatvec` methods (to apply the matrix and its adjoint). + its : int, optional + Number of power method iterations. + + Returns + ------- + float + Spectral norm estimate of matrix difference. + """ + from scipy.sparse.linalg import aslinearoperator + A = aslinearoperator(A) + B = aslinearoperator(B) + m, n = A.shape + matvec1 = lambda x: A. matvec(x) + matveca1 = lambda x: A.rmatvec(x) + matvec2 = lambda x: B. matvec(x) + matveca2 = lambda x: B.rmatvec(x) + if _is_real(A): + return backend.idd_diffsnorm( + m, n, matveca1, matveca2, matvec1, matvec2, its=its) + else: + return backend.idz_diffsnorm( + m, n, matveca1, matveca2, matvec1, matvec2, its=its) + + +def svd(A, eps_or_k, rand=True): + """ + Compute SVD of a matrix via an ID. + + An SVD of a matrix `A` is a factorization:: + + A = numpy.dot(U, numpy.dot(numpy.diag(S), V.conj().T)) + + where `U` and `V` have orthonormal columns and `S` is nonnegative. + + The SVD can be computed to any relative precision or rank (depending on the + value of `eps_or_k`). + + See also :func:`interp_decomp` and :func:`id_to_svd`. + + .. This function automatically detects the form of the input parameters and + passes them to the appropriate backend. For details, see + :func:`backend.iddp_svd`, :func:`backend.iddp_asvd`, + :func:`backend.iddp_rsvd`, :func:`backend.iddr_svd`, + :func:`backend.iddr_asvd`, :func:`backend.iddr_rsvd`, + :func:`backend.idzp_svd`, :func:`backend.idzp_asvd`, + :func:`backend.idzp_rsvd`, :func:`backend.idzr_svd`, + :func:`backend.idzr_asvd`, and :func:`backend.idzr_rsvd`. + + Parameters + ---------- + A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator` + Matrix to be factored, given as either a :class:`numpy.ndarray` or a + :class:`scipy.sparse.linalg.LinearOperator` with the `matvec` and + `rmatvec` methods (to apply the matrix and its adjoint). + eps_or_k : float or int + Relative error (if `eps_or_k < 1`) or rank (if `eps_or_k >= 1`) of + approximation. + rand : bool, optional + Whether to use random sampling if `A` is of type :class:`numpy.ndarray` + (randomized algorithms are always used if `A` is of type + :class:`scipy.sparse.linalg.LinearOperator`). + + Returns + ------- + U : :class:`numpy.ndarray` + Left singular vectors. + S : :class:`numpy.ndarray` + Singular values. + V : :class:`numpy.ndarray` + Right singular vectors. + """ + from scipy.sparse.linalg import LinearOperator + + real = _is_real(A) + + if isinstance(A, np.ndarray): + if eps_or_k < 1: + eps = eps_or_k + if rand: + if real: + U, V, S = backend.iddp_asvd(eps, A) + else: + U, V, S = backend.idzp_asvd(eps, A) + else: + if real: + U, V, S = backend.iddp_svd(eps, A) + else: + U, V, S = backend.idzp_svd(eps, A) + else: + k = int(eps_or_k) + if k > min(A.shape): + raise ValueError("Approximation rank %s exceeds min(A.shape) = " + " %s " % (k, min(A.shape))) + if rand: + if real: + U, V, S = backend.iddr_asvd(A, k) + else: + U, V, S = backend.idzr_asvd(A, k) + else: + if real: + U, V, S = backend.iddr_svd(A, k) + else: + U, V, S = backend.idzr_svd(A, k) + elif isinstance(A, LinearOperator): + m, n = A.shape + matvec = lambda x: A.matvec(x) + matveca = lambda x: A.rmatvec(x) + if eps_or_k < 1: + eps = eps_or_k + if real: + U, V, S = backend.iddp_rsvd(eps, m, n, matveca, matvec) + else: + U, V, S = backend.idzp_rsvd(eps, m, n, matveca, matvec) + else: + k = int(eps_or_k) + if real: + U, V, S = backend.iddr_rsvd(m, n, matveca, matvec, k) + else: + U, V, S = backend.idzr_rsvd(m, n, matveca, matvec, k) + else: + raise _TYPE_ERROR + return U, S, V + + +def estimate_rank(A, eps): + """ + Estimate matrix rank to a specified relative precision using randomized + methods. + + The matrix `A` can be given as either a :class:`numpy.ndarray` or a + :class:`scipy.sparse.linalg.LinearOperator`, with different algorithms used + for each case. If `A` is of type :class:`numpy.ndarray`, then the output + rank is typically about 8 higher than the actual numerical rank. + + .. This function automatically detects the form of the input parameters and + passes them to the appropriate backend. For details, + see :func:`backend.idd_estrank`, :func:`backend.idd_findrank`, + :func:`backend.idz_estrank`, and :func:`backend.idz_findrank`. + + Parameters + ---------- + A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator` + Matrix whose rank is to be estimated, given as either a + :class:`numpy.ndarray` or a :class:`scipy.sparse.linalg.LinearOperator` + with the `rmatvec` method (to apply the matrix adjoint). + eps : float + Relative error for numerical rank definition. + + Returns + ------- + int + Estimated matrix rank. + """ + from scipy.sparse.linalg import LinearOperator + + real = _is_real(A) + + if isinstance(A, np.ndarray): + if real: + rank = backend.idd_estrank(eps, A) + else: + rank = backend.idz_estrank(eps, A) + if rank == 0: + # special return value for nearly full rank + rank = min(A.shape) + return rank + elif isinstance(A, LinearOperator): + m, n = A.shape + matveca = A.rmatvec + if real: + return backend.idd_findrank(eps, m, n, matveca) + else: + return backend.idz_findrank(eps, m, n, matveca) + else: + raise _TYPE_ERROR diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/interpolative.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/interpolative.pyc new file mode 100644 index 0000000..6bcb468 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/interpolative.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/lapack.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/lapack.py new file mode 100644 index 0000000..bf6f02b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/lapack.py @@ -0,0 +1,717 @@ +""" +Low-level LAPACK functions (:mod:`scipy.linalg.lapack`) +======================================================= + +This module contains low-level functions from the LAPACK library. + +The `*gegv` family of routines have been removed from LAPACK 3.6.0 +and have been deprecated in SciPy 0.17.0. They will be removed in +a future release. + +.. versionadded:: 0.12.0 + +.. note:: + + The common ``overwrite_<>`` option in many routines, allows the + input arrays to be overwritten to avoid extra memory allocation. + However this requires the array to satisfy two conditions + which are memory order and the data type to match exactly the + order and the type expected by the routine. + + As an example, if you pass a double precision float array to any + ``S....`` routine which expects single precision arguments, f2py + will create an intermediate array to match the argument types and + overwriting will be performed on that intermediate array. + + Similarly, if a C-contiguous array is passed, f2py will pass a + FORTRAN-contiguous array internally. Please make sure that these + details are satisfied. More information can be found in the f2py + documentation. + +.. warning:: + + These functions do little to no error checking. + It is possible to cause crashes by mis-using them, + so prefer using the higher-level routines in `scipy.linalg`. + +Finding functions +----------------- + +.. autosummary:: + + get_lapack_funcs + +All functions +------------- + +.. autosummary:: + :toctree: generated/ + + + sgbsv + dgbsv + cgbsv + zgbsv + + sgbtrf + dgbtrf + cgbtrf + zgbtrf + + sgbtrs + dgbtrs + cgbtrs + zgbtrs + + sgebal + dgebal + cgebal + zgebal + + sgees + dgees + cgees + zgees + + sgeev + dgeev + cgeev + zgeev + + sgeev_lwork + dgeev_lwork + cgeev_lwork + zgeev_lwork + + sgegv + dgegv + cgegv + zgegv + + sgehrd + dgehrd + cgehrd + zgehrd + + sgehrd_lwork + dgehrd_lwork + cgehrd_lwork + zgehrd_lwork + + sgelss + dgelss + cgelss + zgelss + + sgelss_lwork + dgelss_lwork + cgelss_lwork + zgelss_lwork + + sgelsd + dgelsd + cgelsd + zgelsd + + sgelsd_lwork + dgelsd_lwork + cgelsd_lwork + zgelsd_lwork + + sgelsy + dgelsy + cgelsy + zgelsy + + sgelsy_lwork + dgelsy_lwork + cgelsy_lwork + zgelsy_lwork + + sgeqp3 + dgeqp3 + cgeqp3 + zgeqp3 + + sgeqrf + dgeqrf + cgeqrf + zgeqrf + + sgerqf + dgerqf + cgerqf + zgerqf + + sgesdd + dgesdd + cgesdd + zgesdd + + sgesdd_lwork + dgesdd_lwork + cgesdd_lwork + zgesdd_lwork + + sgesvd + dgesvd + cgesvd + zgesvd + + sgesvd_lwork + dgesvd_lwork + cgesvd_lwork + zgesvd_lwork + + sgesv + dgesv + cgesv + zgesv + + sgesvx + dgesvx + cgesvx + zgesvx + + sgecon + dgecon + cgecon + zgecon + + ssysv + dsysv + csysv + zsysv + + ssysv_lwork + dsysv_lwork + csysv_lwork + zsysv_lwork + + ssysvx + dsysvx + csysvx + zsysvx + + ssysvx_lwork + dsysvx_lwork + csysvx_lwork + zsysvx_lwork + + ssygst + dsygst + + ssytrd + dsytrd + + ssytrd_lwork + dsytrd_lwork + + chetrd + zhetrd + + chetrd_lwork + zhetrd_lwork + + chesv + zhesv + + chesv_lwork + zhesv_lwork + + chesvx + zhesvx + + chesvx_lwork + zhesvx_lwork + + chegst + zhegst + + sgetrf + dgetrf + cgetrf + zgetrf + + sgetri + dgetri + cgetri + zgetri + + sgetri_lwork + dgetri_lwork + cgetri_lwork + zgetri_lwork + + sgetrs + dgetrs + cgetrs + zgetrs + + sgges + dgges + cgges + zgges + + sggev + dggev + cggev + zggev + + chbevd + zhbevd + + chbevx + zhbevx + + cheev + zheev + + cheevd + zheevd + + cheevr + zheevr + + chegv + zhegv + + chegvd + zhegvd + + chegvx + zhegvx + + slarf + dlarf + clarf + zlarf + + slarfg + dlarfg + clarfg + zlarfg + + slartg + dlartg + clartg + zlartg + + slasd4 + dlasd4 + + slaswp + dlaswp + claswp + zlaswp + + slauum + dlauum + clauum + zlauum + + spbsv + dpbsv + cpbsv + zpbsv + + spbtrf + dpbtrf + cpbtrf + zpbtrf + + spbtrs + dpbtrs + cpbtrs + zpbtrs + + sposv + dposv + cposv + zposv + + sposvx + dposvx + cposvx + zposvx + + spocon + dpocon + cpocon + zpocon + + spotrf + dpotrf + cpotrf + zpotrf + + spotri + dpotri + cpotri + zpotri + + spotrs + dpotrs + cpotrs + zpotrs + + crot + zrot + + strsyl + dtrsyl + ctrsyl + ztrsyl + + strtri + dtrtri + ctrtri + ztrtri + + strtrs + dtrtrs + ctrtrs + ztrtrs + + spftrf + dpftrf + cpftrf + zpftrf + + spftri + dpftri + cpftri + zpftri + + spftrs + dpftrs + cpftrs + zpftrs + + cunghr + zunghr + + cungqr + zungqr + + cungrq + zungrq + + cunmqr + zunmqr + + cunmrz + zunmrz + + cunmrz_lwork + zunmrz_lwork + + sgtsv + dgtsv + cgtsv + zgtsv + + sptsv + dptsv + cptsv + zptsv + + slamch + dlamch + + sorghr + dorghr + sorgqr + dorgqr + + sorgrq + dorgrq + + sormqr + dormqr + + sormrz + dormrz + + sormrz_lwork + dormrz_lwork + + ssbev + dsbev + + ssbevd + dsbevd + + ssbevx + dsbevx + + sstebz + dstebz + + sstemr + dstemr + + ssterf + dsterf + + sstein + dstein + + sstev + dstev + + ssyev + dsyev + + ssyevd + dsyevd + + ssyevr + dsyevr + + ssygv + dsygv + + ssygvd + dsygvd + + ssygvx + dsygvx + + ssfrk + dsfrk + + chfrk + zhfrk + + stfsm + dtfsm + ctfsm + ztfsm + + stpttf + dtpttf + ctpttf + ztpttf + + stfttp + dtfttp + ctfttp + ztfttp + + stfttr + dtfttr + ctfttr + ztfttr + + strttf + dtrttf + ctrttf + ztrttf + + stpttr + dtpttr + ctpttr + ztpttr + + strttp + dtrttp + ctrttp + ztrttp + + stfsm + dtfsm + ctfsm + dtfsm + + stzrzf + dtzrzf + ctzrzf + ztzrzf + + stzrzf_lwork + dtzrzf_lwork + ctzrzf_lwork + ztzrzf_lwork + + slange + dlange + clange + zlange + + ilaver + +""" +# +# Author: Pearu Peterson, March 2002 +# + +from __future__ import division, print_function, absolute_import + +__all__ = ['get_lapack_funcs'] + +import numpy as _np + +from .blas import _get_funcs + +# Backward compatibility: +from .blas import find_best_blas_type as find_best_lapack_type + +from scipy.linalg import _flapack +try: + from scipy.linalg import _clapack +except ImportError: + _clapack = None + +# Backward compatibility +from scipy._lib._util import DeprecatedImport as _DeprecatedImport +clapack = _DeprecatedImport("scipy.linalg.blas.clapack", "scipy.linalg.lapack") +flapack = _DeprecatedImport("scipy.linalg.blas.flapack", "scipy.linalg.lapack") + +# Expose all functions (only flapack --- clapack is an implementation detail) +empty_module = None +from scipy.linalg._flapack import * +del empty_module + +_dep_message = """The `*gegv` family of routines has been deprecated in +LAPACK 3.6.0 in favor of the `*ggev` family of routines. +The corresponding wrappers will be removed from SciPy in +a future release.""" + +cgegv = _np.deprecate(cgegv, old_name='cgegv', message=_dep_message) +dgegv = _np.deprecate(dgegv, old_name='dgegv', message=_dep_message) +sgegv = _np.deprecate(sgegv, old_name='sgegv', message=_dep_message) +zgegv = _np.deprecate(zgegv, old_name='zgegv', message=_dep_message) + +# Modyfy _flapack in this scope so the deprecation warnings apply to +# functions returned by get_lapack_funcs. +_flapack.cgegv = cgegv +_flapack.dgegv = dgegv +_flapack.sgegv = sgegv +_flapack.zgegv = zgegv + +# some convenience alias for complex functions +_lapack_alias = { + 'corghr': 'cunghr', 'zorghr': 'zunghr', + 'corghr_lwork': 'cunghr_lwork', 'zorghr_lwork': 'zunghr_lwork', + 'corgqr': 'cungqr', 'zorgqr': 'zungqr', + 'cormqr': 'cunmqr', 'zormqr': 'zunmqr', + 'corgrq': 'cungrq', 'zorgrq': 'zungrq', +} + + +def get_lapack_funcs(names, arrays=(), dtype=None): + """Return available LAPACK function objects from names. + + Arrays are used to determine the optimal prefix of LAPACK routines. + + Parameters + ---------- + names : str or sequence of str + Name(s) of LAPACK functions without type prefix. + + arrays : sequence of ndarrays, optional + Arrays can be given to determine optimal prefix of LAPACK + routines. If not given, double-precision routines will be + used, otherwise the most generic type in arrays will be used. + + dtype : str or dtype, optional + Data-type specifier. Not used if `arrays` is non-empty. + + Returns + ------- + funcs : list + List containing the found function(s). + + Notes + ----- + This routine automatically chooses between Fortran/C + interfaces. Fortran code is used whenever possible for arrays with + column major order. In all other cases, C code is preferred. + + In LAPACK, the naming convention is that all functions start with a + type prefix, which depends on the type of the principal + matrix. These can be one of {'s', 'd', 'c', 'z'} for the numpy + types {float32, float64, complex64, complex128} respectively, and + are stored in attribute ``typecode`` of the returned functions. + + Examples + -------- + Suppose we would like to use '?lange' routine which computes the selected + norm of an array. We pass our array in order to get the correct 'lange' + flavor. + + >>> import scipy.linalg as LA + >>> a = np.random.rand(3,2) + >>> x_lange = LA.get_lapack_funcs('lange', (a,)) + >>> x_lange.typecode + 'd' + >>> x_lange = LA.get_lapack_funcs('lange',(a*1j,)) + >>> x_lange.typecode + 'z' + + Several LAPACK routines work best when its internal WORK array has + the optimal size (big enough for fast computation and small enough to + avoid waste of memory). This size is determined also by a dedicated query + to the function which is often wrapped as a standalone function and + commonly denoted as ``###_lwork``. Below is an example for ``?sysv`` + + >>> import scipy.linalg as LA + >>> a = np.random.rand(1000,1000) + >>> b = np.random.rand(1000,1)*1j + >>> # We pick up zsysv and zsysv_lwork due to b array + ... xsysv, xlwork = LA.get_lapack_funcs(('sysv', 'sysv_lwork'), (a, b)) + >>> opt_lwork, _ = xlwork(a.shape[0]) # returns a complex for 'z' prefix + >>> udut, ipiv, x, info = xsysv(a, b, lwork=int(opt_lwork.real)) + + """ + return _get_funcs(names, arrays, dtype, + "LAPACK", _flapack, _clapack, + "flapack", "clapack", _lapack_alias) + + +def _compute_lwork(routine, *args, **kwargs): + """ + Round floating-point lwork returned by lapack to integer. + + Several LAPACK routines compute optimal values for LWORK, which + they return in a floating-point variable. However, for large + values of LWORK, single-precision floating point is not sufficient + to hold the exact value --- some LAPACK versions (<= 3.5.0 at + least) truncate the returned integer to single precision and in + some cases this can be smaller than the required value. + + Examples + -------- + >>> from scipy.linalg import lapack + >>> n = 5000 + >>> s_r, s_lw = lapack.get_lapack_funcs(('sysvx', 'sysvx_lwork')) + >>> lwork = lapack._compute_lwork(s_lw, n) + >>> lwork + 32000 + + """ + wi = routine(*args, **kwargs) + if len(wi) < 2: + raise ValueError('') + info = wi[-1] + if info != 0: + raise ValueError("Internal work array size computation failed: " + "%d" % (info,)) + + lwork = [w.real for w in wi[:-1]] + + dtype = getattr(routine, 'dtype', None) + if dtype == _np.float32 or dtype == _np.complex64: + # Single-precision routine -- take next fp value to work + # around possible truncation in LAPACK code + lwork = _np.nextafter(lwork, _np.inf, dtype=_np.float32) + + lwork = _np.array(lwork, _np.int64) + if _np.any(_np.logical_or(lwork < 0, lwork > _np.iinfo(_np.int32).max)): + raise ValueError("Too large work array required -- computation cannot " + "be performed with standard 32-bit LAPACK.") + lwork = lwork.astype(_np.int32) + if lwork.size == 1: + return lwork[0] + return lwork diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/lapack.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/lapack.pyc new file mode 100644 index 0000000..f6167f4 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/lapack.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/linalg_version.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/linalg_version.py new file mode 100644 index 0000000..239d5a1 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/linalg_version.py @@ -0,0 +1,7 @@ +from __future__ import division, print_function, absolute_import + +major = 0 +minor = 4 +micro = 9 + +linalg_version = '%(major)d.%(minor)d.%(micro)d' % (locals()) diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/linalg_version.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/linalg_version.pyc new file mode 100644 index 0000000..5e572af Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/linalg_version.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/matfuncs.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/matfuncs.py new file mode 100644 index 0000000..0674751 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/matfuncs.py @@ -0,0 +1,670 @@ +# +# Author: Travis Oliphant, March 2002 +# + +from __future__ import division, print_function, absolute_import + +__all__ = ['expm','cosm','sinm','tanm','coshm','sinhm', + 'tanhm','logm','funm','signm','sqrtm', + 'expm_frechet', 'expm_cond', 'fractional_matrix_power'] + +from numpy import (Inf, dot, diag, product, logical_not, ravel, + transpose, conjugate, absolute, amax, sign, isfinite, single) +import numpy as np + +# Local imports +from .misc import norm +from .basic import solve, inv +from .special_matrices import triu +from .decomp_svd import svd +from .decomp_schur import schur, rsf2csf +from ._expm_frechet import expm_frechet, expm_cond +from ._matfuncs_sqrtm import sqrtm + +eps = np.finfo(float).eps +feps = np.finfo(single).eps + +_array_precision = {'i': 1, 'l': 1, 'f': 0, 'd': 1, 'F': 0, 'D': 1} + + +############################################################################### +# Utility functions. + + +def _asarray_square(A): + """ + Wraps asarray with the extra requirement that the input be a square matrix. + + The motivation is that the matfuncs module has real functions that have + been lifted to square matrix functions. + + Parameters + ---------- + A : array_like + A square matrix. + + Returns + ------- + out : ndarray + An ndarray copy or view or other representation of A. + + """ + A = np.asarray(A) + if len(A.shape) != 2 or A.shape[0] != A.shape[1]: + raise ValueError('expected square array_like input') + return A + + +def _maybe_real(A, B, tol=None): + """ + Return either B or the real part of B, depending on properties of A and B. + + The motivation is that B has been computed as a complicated function of A, + and B may be perturbed by negligible imaginary components. + If A is real and B is complex with small imaginary components, + then return a real copy of B. The assumption in that case would be that + the imaginary components of B are numerical artifacts. + + Parameters + ---------- + A : ndarray + Input array whose type is to be checked as real vs. complex. + B : ndarray + Array to be returned, possibly without its imaginary part. + tol : float + Absolute tolerance. + + Returns + ------- + out : real or complex array + Either the input array B or only the real part of the input array B. + + """ + # Note that booleans and integers compare as real. + if np.isrealobj(A) and np.iscomplexobj(B): + if tol is None: + tol = {0:feps*1e3, 1:eps*1e6}[_array_precision[B.dtype.char]] + if np.allclose(B.imag, 0.0, atol=tol): + B = B.real + return B + + +############################################################################### +# Matrix functions. + + +def fractional_matrix_power(A, t): + """ + Compute the fractional power of a matrix. + + Proceeds according to the discussion in section (6) of [1]_. + + Parameters + ---------- + A : (N, N) array_like + Matrix whose fractional power to evaluate. + t : float + Fractional power. + + Returns + ------- + X : (N, N) array_like + The fractional power of the matrix. + + References + ---------- + .. [1] Nicholas J. Higham and Lijing lin (2011) + "A Schur-Pade Algorithm for Fractional Powers of a Matrix." + SIAM Journal on Matrix Analysis and Applications, + 32 (3). pp. 1056-1078. ISSN 0895-4798 + + Examples + -------- + >>> from scipy.linalg import fractional_matrix_power + >>> a = np.array([[1.0, 3.0], [1.0, 4.0]]) + >>> b = fractional_matrix_power(a, 0.5) + >>> b + array([[ 0.75592895, 1.13389342], + [ 0.37796447, 1.88982237]]) + >>> np.dot(b, b) # Verify square root + array([[ 1., 3.], + [ 1., 4.]]) + + """ + # This fixes some issue with imports; + # this function calls onenormest which is in scipy.sparse. + A = _asarray_square(A) + import scipy.linalg._matfuncs_inv_ssq + return scipy.linalg._matfuncs_inv_ssq._fractional_matrix_power(A, t) + + +def logm(A, disp=True): + """ + Compute matrix logarithm. + + The matrix logarithm is the inverse of + expm: expm(logm(`A`)) == `A` + + Parameters + ---------- + A : (N, N) array_like + Matrix whose logarithm to evaluate + disp : bool, optional + Print warning if error in the result is estimated large + instead of returning estimated error. (Default: True) + + Returns + ------- + logm : (N, N) ndarray + Matrix logarithm of `A` + errest : float + (if disp == False) + + 1-norm of the estimated error, ||err||_1 / ||A||_1 + + References + ---------- + .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2012) + "Improved Inverse Scaling and Squaring Algorithms + for the Matrix Logarithm." + SIAM Journal on Scientific Computing, 34 (4). C152-C169. + ISSN 1095-7197 + + .. [2] Nicholas J. Higham (2008) + "Functions of Matrices: Theory and Computation" + ISBN 978-0-898716-46-7 + + .. [3] Nicholas J. Higham and Lijing lin (2011) + "A Schur-Pade Algorithm for Fractional Powers of a Matrix." + SIAM Journal on Matrix Analysis and Applications, + 32 (3). pp. 1056-1078. ISSN 0895-4798 + + Examples + -------- + >>> from scipy.linalg import logm, expm + >>> a = np.array([[1.0, 3.0], [1.0, 4.0]]) + >>> b = logm(a) + >>> b + array([[-1.02571087, 2.05142174], + [ 0.68380725, 1.02571087]]) + >>> expm(b) # Verify expm(logm(a)) returns a + array([[ 1., 3.], + [ 1., 4.]]) + + """ + A = _asarray_square(A) + # Avoid circular import ... this is OK, right? + import scipy.linalg._matfuncs_inv_ssq + F = scipy.linalg._matfuncs_inv_ssq._logm(A) + F = _maybe_real(A, F) + errtol = 1000*eps + #TODO use a better error approximation + errest = norm(expm(F)-A,1) / norm(A,1) + if disp: + if not isfinite(errest) or errest >= errtol: + print("logm result may be inaccurate, approximate err =", errest) + return F + else: + return F, errest + + +def expm(A): + """ + Compute the matrix exponential using Pade approximation. + + Parameters + ---------- + A : (N, N) array_like or sparse matrix + Matrix to be exponentiated. + + Returns + ------- + expm : (N, N) ndarray + Matrix exponential of `A`. + + References + ---------- + .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2009) + "A New Scaling and Squaring Algorithm for the Matrix Exponential." + SIAM Journal on Matrix Analysis and Applications. + 31 (3). pp. 970-989. ISSN 1095-7162 + + Examples + -------- + >>> from scipy.linalg import expm, sinm, cosm + + Matrix version of the formula exp(0) = 1: + + >>> expm(np.zeros((2,2))) + array([[ 1., 0.], + [ 0., 1.]]) + + Euler's identity (exp(i*theta) = cos(theta) + i*sin(theta)) + applied to a matrix: + + >>> a = np.array([[1.0, 2.0], [-1.0, 3.0]]) + >>> expm(1j*a) + array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j], + [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]]) + >>> cosm(a) + 1j*sinm(a) + array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j], + [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]]) + + """ + # Input checking and conversion is provided by sparse.linalg.expm(). + import scipy.sparse.linalg + return scipy.sparse.linalg.expm(A) + + +def cosm(A): + """ + Compute the matrix cosine. + + This routine uses expm to compute the matrix exponentials. + + Parameters + ---------- + A : (N, N) array_like + Input array + + Returns + ------- + cosm : (N, N) ndarray + Matrix cosine of A + + Examples + -------- + >>> from scipy.linalg import expm, sinm, cosm + + Euler's identity (exp(i*theta) = cos(theta) + i*sin(theta)) + applied to a matrix: + + >>> a = np.array([[1.0, 2.0], [-1.0, 3.0]]) + >>> expm(1j*a) + array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j], + [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]]) + >>> cosm(a) + 1j*sinm(a) + array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j], + [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]]) + + """ + A = _asarray_square(A) + if np.iscomplexobj(A): + return 0.5*(expm(1j*A) + expm(-1j*A)) + else: + return expm(1j*A).real + + +def sinm(A): + """ + Compute the matrix sine. + + This routine uses expm to compute the matrix exponentials. + + Parameters + ---------- + A : (N, N) array_like + Input array. + + Returns + ------- + sinm : (N, N) ndarray + Matrix sine of `A` + + Examples + -------- + >>> from scipy.linalg import expm, sinm, cosm + + Euler's identity (exp(i*theta) = cos(theta) + i*sin(theta)) + applied to a matrix: + + >>> a = np.array([[1.0, 2.0], [-1.0, 3.0]]) + >>> expm(1j*a) + array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j], + [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]]) + >>> cosm(a) + 1j*sinm(a) + array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j], + [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]]) + + """ + A = _asarray_square(A) + if np.iscomplexobj(A): + return -0.5j*(expm(1j*A) - expm(-1j*A)) + else: + return expm(1j*A).imag + + +def tanm(A): + """ + Compute the matrix tangent. + + This routine uses expm to compute the matrix exponentials. + + Parameters + ---------- + A : (N, N) array_like + Input array. + + Returns + ------- + tanm : (N, N) ndarray + Matrix tangent of `A` + + Examples + -------- + >>> from scipy.linalg import tanm, sinm, cosm + >>> a = np.array([[1.0, 3.0], [1.0, 4.0]]) + >>> t = tanm(a) + >>> t + array([[ -2.00876993, -8.41880636], + [ -2.80626879, -10.42757629]]) + + Verify tanm(a) = sinm(a).dot(inv(cosm(a))) + + >>> s = sinm(a) + >>> c = cosm(a) + >>> s.dot(np.linalg.inv(c)) + array([[ -2.00876993, -8.41880636], + [ -2.80626879, -10.42757629]]) + + """ + A = _asarray_square(A) + return _maybe_real(A, solve(cosm(A), sinm(A))) + + +def coshm(A): + """ + Compute the hyperbolic matrix cosine. + + This routine uses expm to compute the matrix exponentials. + + Parameters + ---------- + A : (N, N) array_like + Input array. + + Returns + ------- + coshm : (N, N) ndarray + Hyperbolic matrix cosine of `A` + + Examples + -------- + >>> from scipy.linalg import tanhm, sinhm, coshm + >>> a = np.array([[1.0, 3.0], [1.0, 4.0]]) + >>> c = coshm(a) + >>> c + array([[ 11.24592233, 38.76236492], + [ 12.92078831, 50.00828725]]) + + Verify tanhm(a) = sinhm(a).dot(inv(coshm(a))) + + >>> t = tanhm(a) + >>> s = sinhm(a) + >>> t - s.dot(np.linalg.inv(c)) + array([[ 2.72004641e-15, 4.55191440e-15], + [ 0.00000000e+00, -5.55111512e-16]]) + + """ + A = _asarray_square(A) + return _maybe_real(A, 0.5 * (expm(A) + expm(-A))) + + +def sinhm(A): + """ + Compute the hyperbolic matrix sine. + + This routine uses expm to compute the matrix exponentials. + + Parameters + ---------- + A : (N, N) array_like + Input array. + + Returns + ------- + sinhm : (N, N) ndarray + Hyperbolic matrix sine of `A` + + Examples + -------- + >>> from scipy.linalg import tanhm, sinhm, coshm + >>> a = np.array([[1.0, 3.0], [1.0, 4.0]]) + >>> s = sinhm(a) + >>> s + array([[ 10.57300653, 39.28826594], + [ 13.09608865, 49.86127247]]) + + Verify tanhm(a) = sinhm(a).dot(inv(coshm(a))) + + >>> t = tanhm(a) + >>> c = coshm(a) + >>> t - s.dot(np.linalg.inv(c)) + array([[ 2.72004641e-15, 4.55191440e-15], + [ 0.00000000e+00, -5.55111512e-16]]) + + """ + A = _asarray_square(A) + return _maybe_real(A, 0.5 * (expm(A) - expm(-A))) + + +def tanhm(A): + """ + Compute the hyperbolic matrix tangent. + + This routine uses expm to compute the matrix exponentials. + + Parameters + ---------- + A : (N, N) array_like + Input array + + Returns + ------- + tanhm : (N, N) ndarray + Hyperbolic matrix tangent of `A` + + Examples + -------- + >>> from scipy.linalg import tanhm, sinhm, coshm + >>> a = np.array([[1.0, 3.0], [1.0, 4.0]]) + >>> t = tanhm(a) + >>> t + array([[ 0.3428582 , 0.51987926], + [ 0.17329309, 0.86273746]]) + + Verify tanhm(a) = sinhm(a).dot(inv(coshm(a))) + + >>> s = sinhm(a) + >>> c = coshm(a) + >>> t - s.dot(np.linalg.inv(c)) + array([[ 2.72004641e-15, 4.55191440e-15], + [ 0.00000000e+00, -5.55111512e-16]]) + + """ + A = _asarray_square(A) + return _maybe_real(A, solve(coshm(A), sinhm(A))) + + +def funm(A, func, disp=True): + """ + Evaluate a matrix function specified by a callable. + + Returns the value of matrix-valued function ``f`` at `A`. The + function ``f`` is an extension of the scalar-valued function `func` + to matrices. + + Parameters + ---------- + A : (N, N) array_like + Matrix at which to evaluate the function + func : callable + Callable object that evaluates a scalar function f. + Must be vectorized (eg. using vectorize). + disp : bool, optional + Print warning if error in the result is estimated large + instead of returning estimated error. (Default: True) + + Returns + ------- + funm : (N, N) ndarray + Value of the matrix function specified by func evaluated at `A` + errest : float + (if disp == False) + + 1-norm of the estimated error, ||err||_1 / ||A||_1 + + Examples + -------- + >>> from scipy.linalg import funm + >>> a = np.array([[1.0, 3.0], [1.0, 4.0]]) + >>> funm(a, lambda x: x*x) + array([[ 4., 15.], + [ 5., 19.]]) + >>> a.dot(a) + array([[ 4., 15.], + [ 5., 19.]]) + + Notes + ----- + This function implements the general algorithm based on Schur decomposition + (Algorithm 9.1.1. in [1]_). + + If the input matrix is known to be diagonalizable, then relying on the + eigendecomposition is likely to be faster. For example, if your matrix is + Hermitian, you can do + + >>> from scipy.linalg import eigh + >>> def funm_herm(a, func, check_finite=False): + ... w, v = eigh(a, check_finite=check_finite) + ... ## if you further know that your matrix is positive semidefinite, + ... ## you can optionally guard against precision errors by doing + ... # w = np.maximum(w, 0) + ... w = func(w) + ... return (v * w).dot(v.conj().T) + + References + ---------- + .. [1] Gene H. Golub, Charles F. van Loan, Matrix Computations 4th ed. + + """ + A = _asarray_square(A) + # Perform Shur decomposition (lapack ?gees) + T, Z = schur(A) + T, Z = rsf2csf(T,Z) + n,n = T.shape + F = diag(func(diag(T))) # apply function to diagonal elements + F = F.astype(T.dtype.char) # e.g. when F is real but T is complex + + minden = abs(T[0,0]) + + # implement Algorithm 11.1.1 from Golub and Van Loan + # "matrix Computations." + for p in range(1,n): + for i in range(1,n-p+1): + j = i + p + s = T[i-1,j-1] * (F[j-1,j-1] - F[i-1,i-1]) + ksl = slice(i,j-1) + val = dot(T[i-1,ksl],F[ksl,j-1]) - dot(F[i-1,ksl],T[ksl,j-1]) + s = s + val + den = T[j-1,j-1] - T[i-1,i-1] + if den != 0.0: + s = s / den + F[i-1,j-1] = s + minden = min(minden,abs(den)) + + F = dot(dot(Z, F), transpose(conjugate(Z))) + F = _maybe_real(A, F) + + tol = {0:feps, 1:eps}[_array_precision[F.dtype.char]] + if minden == 0.0: + minden = tol + err = min(1, max(tol,(tol/minden)*norm(triu(T,1),1))) + if product(ravel(logical_not(isfinite(F))),axis=0): + err = Inf + if disp: + if err > 1000*tol: + print("funm result may be inaccurate, approximate err =", err) + return F + else: + return F, err + + +def signm(A, disp=True): + """ + Matrix sign function. + + Extension of the scalar sign(x) to matrices. + + Parameters + ---------- + A : (N, N) array_like + Matrix at which to evaluate the sign function + disp : bool, optional + Print warning if error in the result is estimated large + instead of returning estimated error. (Default: True) + + Returns + ------- + signm : (N, N) ndarray + Value of the sign function at `A` + errest : float + (if disp == False) + + 1-norm of the estimated error, ||err||_1 / ||A||_1 + + Examples + -------- + >>> from scipy.linalg import signm, eigvals + >>> a = [[1,2,3], [1,2,1], [1,1,1]] + >>> eigvals(a) + array([ 4.12488542+0.j, -0.76155718+0.j, 0.63667176+0.j]) + >>> eigvals(signm(a)) + array([-1.+0.j, 1.+0.j, 1.+0.j]) + + """ + A = _asarray_square(A) + + def rounded_sign(x): + rx = np.real(x) + if rx.dtype.char == 'f': + c = 1e3*feps*amax(x) + else: + c = 1e3*eps*amax(x) + return sign((absolute(rx) > c) * rx) + result, errest = funm(A, rounded_sign, disp=0) + errtol = {0:1e3*feps, 1:1e3*eps}[_array_precision[result.dtype.char]] + if errest < errtol: + return result + + # Handle signm of defective matrices: + + # See "E.D.Denman and J.Leyva-Ramos, Appl.Math.Comp., + # 8:237-250,1981" for how to improve the following (currently a + # rather naive) iteration process: + + # a = result # sometimes iteration converges faster but where?? + + # Shifting to avoid zero eigenvalues. How to ensure that shifting does + # not change the spectrum too much? + vals = svd(A, compute_uv=0) + max_sv = np.amax(vals) + # min_nonzero_sv = vals[(vals>max_sv*errtol).tolist().count(1)-1] + # c = 0.5/min_nonzero_sv + c = 0.5/max_sv + S0 = A + c*np.identity(A.shape[0]) + prev_errest = errest + for i in range(100): + iS0 = inv(S0) + S0 = 0.5*(S0 + iS0) + Pp = 0.5*(dot(S0,S0)+S0) + errest = norm(dot(Pp,Pp)-Pp,1) + if errest < errtol or prev_errest == errest: + break + prev_errest = errest + if disp: + if not isfinite(errest) or errest >= errtol: + print("signm result may be inaccurate, approximate err =", errest) + return S0 + else: + return S0, errest diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/matfuncs.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/matfuncs.pyc new file mode 100644 index 0000000..81d3aea Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/matfuncs.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/misc.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/misc.py new file mode 100644 index 0000000..b14c4b9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/misc.py @@ -0,0 +1,187 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.linalg import LinAlgError +from .blas import get_blas_funcs +from .lapack import get_lapack_funcs + +__all__ = ['LinAlgError', 'LinAlgWarning', 'norm'] + + +class LinAlgWarning(RuntimeWarning): + """ + The warning emitted when a linear algebra related operation is close + to fail conditions of the algorithm or loss of accuracy is expected. + """ + pass + + +def norm(a, ord=None, axis=None, keepdims=False): + """ + Matrix or vector norm. + + This function is able to return one of seven different matrix norms, + or one of an infinite number of vector norms (described below), depending + on the value of the ``ord`` parameter. + + Parameters + ---------- + a : (M,) or (M, N) array_like + Input array. If `axis` is None, `a` must be 1-D or 2-D. + ord : {non-zero int, inf, -inf, 'fro'}, optional + Order of the norm (see table under ``Notes``). inf means numpy's + `inf` object + axis : {int, 2-tuple of ints, None}, optional + If `axis` is an integer, it specifies the axis of `a` along which to + compute the vector norms. If `axis` is a 2-tuple, it specifies the + axes that hold 2-D matrices, and the matrix norms of these matrices + are computed. If `axis` is None then either a vector norm (when `a` + is 1-D) or a matrix norm (when `a` is 2-D) is returned. + keepdims : bool, optional + If this is set to True, the axes which are normed over are left in the + result as dimensions with size one. With this option the result will + broadcast correctly against the original `a`. + + Returns + ------- + n : float or ndarray + Norm of the matrix or vector(s). + + Notes + ----- + For values of ``ord <= 0``, the result is, strictly speaking, not a + mathematical 'norm', but it may still be useful for various numerical + purposes. + + The following norms can be calculated: + + ===== ============================ ========================== + ord norm for matrices norm for vectors + ===== ============================ ========================== + None Frobenius norm 2-norm + 'fro' Frobenius norm -- + inf max(sum(abs(x), axis=1)) max(abs(x)) + -inf min(sum(abs(x), axis=1)) min(abs(x)) + 0 -- sum(x != 0) + 1 max(sum(abs(x), axis=0)) as below + -1 min(sum(abs(x), axis=0)) as below + 2 2-norm (largest sing. value) as below + -2 smallest singular value as below + other -- sum(abs(x)**ord)**(1./ord) + ===== ============================ ========================== + + The Frobenius norm is given by [1]_: + + :math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}` + + The ``axis`` and ``keepdims`` arguments are passed directly to + ``numpy.linalg.norm`` and are only usable if they are supported + by the version of numpy in use. + + References + ---------- + .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*, + Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15 + + Examples + -------- + >>> from scipy.linalg import norm + >>> a = np.arange(9) - 4.0 + >>> a + array([-4., -3., -2., -1., 0., 1., 2., 3., 4.]) + >>> b = a.reshape((3, 3)) + >>> b + array([[-4., -3., -2.], + [-1., 0., 1.], + [ 2., 3., 4.]]) + + >>> norm(a) + 7.745966692414834 + >>> norm(b) + 7.745966692414834 + >>> norm(b, 'fro') + 7.745966692414834 + >>> norm(a, np.inf) + 4 + >>> norm(b, np.inf) + 9 + >>> norm(a, -np.inf) + 0 + >>> norm(b, -np.inf) + 2 + + >>> norm(a, 1) + 20 + >>> norm(b, 1) + 7 + >>> norm(a, -1) + -4.6566128774142013e-010 + >>> norm(b, -1) + 6 + >>> norm(a, 2) + 7.745966692414834 + >>> norm(b, 2) + 7.3484692283495345 + + >>> norm(a, -2) + 0 + >>> norm(b, -2) + 1.8570331885190563e-016 + >>> norm(a, 3) + 5.8480354764257312 + >>> norm(a, -3) + 0 + + """ + # Differs from numpy only in non-finite handling and the use of blas. + a = np.asarray_chkfinite(a) + + # Only use optimized norms if axis and keepdims are not specified. + if a.dtype.char in 'fdFD' and axis is None and not keepdims: + + if ord in (None, 2) and (a.ndim == 1): + # use blas for fast and stable euclidean norm + nrm2 = get_blas_funcs('nrm2', dtype=a.dtype) + return nrm2(a) + + if a.ndim == 2 and axis is None and not keepdims: + # Use lapack for a couple fast matrix norms. + # For some reason the *lange frobenius norm is slow. + lange_args = None + # Make sure this works if the user uses the axis keywords + # to apply the norm to the transpose. + if ord == 1: + if np.isfortran(a): + lange_args = '1', a + elif np.isfortran(a.T): + lange_args = 'i', a.T + elif ord == np.inf: + if np.isfortran(a): + lange_args = 'i', a + elif np.isfortran(a.T): + lange_args = '1', a.T + if lange_args: + lange = get_lapack_funcs('lange', dtype=a.dtype) + return lange(*lange_args) + + # Filter out the axis and keepdims arguments if they aren't used so they + # are never inadvertently passed to a version of numpy that doesn't + # support them. + if axis is not None: + if keepdims: + return np.linalg.norm(a, ord=ord, axis=axis, keepdims=keepdims) + return np.linalg.norm(a, ord=ord, axis=axis) + return np.linalg.norm(a, ord=ord) + + +def _datacopied(arr, original): + """ + Strict check for `arr` not sharing any data with `original`, + under the assumption that arr = asarray(original) + + """ + if arr is original: + return False + if not isinstance(original, np.ndarray) and hasattr(original, '__array__'): + return False + return arr.base is None diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/misc.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/misc.pyc new file mode 100644 index 0000000..d42c32b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/misc.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/setup.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/setup.py new file mode 100644 index 0000000..783a301 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/setup.py @@ -0,0 +1,172 @@ +from __future__ import division, print_function, absolute_import + +import os +from os.path import join + + +def configuration(parent_package='', top_path=None): + from distutils.sysconfig import get_python_inc + from scipy._build_utils.system_info import get_info, NotFoundError, numpy_info + from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs + from scipy._build_utils import (get_g77_abi_wrappers, split_fortran_files) + + config = Configuration('linalg', parent_package, top_path) + + lapack_opt = get_info('lapack_opt') + + atlas_version = ([v[3:-3] for k, v in lapack_opt.get('define_macros', []) + if k == 'ATLAS_INFO']+[None])[0] + if atlas_version: + print(('ATLAS version: %s' % atlas_version)) + + # fblas: + sources = ['fblas.pyf.src'] + sources += get_g77_abi_wrappers(lapack_opt) + + config.add_extension('_fblas', + sources=sources, + depends=['fblas_l?.pyf.src'], + extra_info=lapack_opt + ) + + # flapack: + sources = ['flapack.pyf.src'] + sources += get_g77_abi_wrappers(lapack_opt) + dep_pfx = join('src', 'lapack_deprecations') + deprecated_lapack_routines = [join(dep_pfx, c + 'gegv.f') for c in 'cdsz'] + sources += deprecated_lapack_routines + + config.add_extension('_flapack', + sources=sources, + depends=['flapack_gen.pyf.src', + 'flapack_gen_banded.pyf.src', + 'flapack_gen_tri.pyf.src', + 'flapack_pos_def.pyf.src', + 'flapack_pos_def_tri.pyf.src', + 'flapack_sym_herm.pyf.src', + 'flapack_other.pyf.src', + 'flapack_user.pyf.src'], + extra_info=lapack_opt + ) + + if atlas_version is not None: + # cblas: + config.add_extension('_cblas', + sources=['cblas.pyf.src'], + depends=['cblas.pyf.src', 'cblas_l1.pyf.src'], + extra_info=lapack_opt + ) + + # clapack: + config.add_extension('_clapack', + sources=['clapack.pyf.src'], + depends=['clapack.pyf.src'], + extra_info=lapack_opt + ) + + # _flinalg: + config.add_extension('_flinalg', + sources=[join('src', 'det.f'), join('src', 'lu.f')], + extra_info=lapack_opt + ) + + # _interpolative: + routines_to_split = [ + 'dfftb1', + 'dfftf1', + 'dffti1', + 'dsint1', + 'dzfft1', + 'id_srand', + 'idd_copyints', + 'idd_id2svd0', + 'idd_pairsamps', + 'idd_permute', + 'idd_permuter', + 'idd_random_transf0', + 'idd_random_transf0_inv', + 'idd_random_transf_init0', + 'idd_subselect', + 'iddp_asvd0', + 'iddp_rsvd0', + 'iddr_asvd0', + 'iddr_rsvd0', + 'idz_estrank0', + 'idz_id2svd0', + 'idz_permute', + 'idz_permuter', + 'idz_random_transf0_inv', + 'idz_random_transf_init0', + 'idz_random_transf_init00', + 'idz_realcomp', + 'idz_realcomplex', + 'idz_reco', + 'idz_subselect', + 'idzp_aid0', + 'idzp_aid1', + 'idzp_asvd0', + 'idzp_rsvd0', + 'idzr_asvd0', + 'idzr_reco', + 'idzr_rsvd0', + 'zfftb1', + 'zfftf1', + 'zffti1', + ] + print('Splitting linalg.interpolative Fortran source files') + dirname = os.path.split(os.path.abspath(__file__))[0] + fnames = split_fortran_files(join(dirname, 'src', 'id_dist', 'src'), + routines_to_split) + fnames = [join('src', 'id_dist', 'src', f) for f in fnames] + config.add_extension('_interpolative', fnames + ["interpolative.pyf"], + extra_info=lapack_opt + ) + + # _solve_toeplitz: + config.add_extension('_solve_toeplitz', + sources=[('_solve_toeplitz.c')], + include_dirs=[get_numpy_include_dirs()]) + + config.add_data_dir('tests') + + # Cython BLAS/LAPACK + config.add_data_files('cython_blas.pxd') + config.add_data_files('cython_lapack.pxd') + + sources = ['_blas_subroutine_wrappers.f', '_lapack_subroutine_wrappers.f'] + sources += get_g77_abi_wrappers(lapack_opt) + includes = numpy_info().get_include_dirs() + [get_python_inc()] + config.add_library('fwrappers', sources=sources, include_dirs=includes) + + config.add_extension('cython_blas', + sources=['cython_blas.c'], + depends=['cython_blas.pyx', 'cython_blas.pxd', + 'fortran_defs.h', '_blas_subroutines.h'], + include_dirs=['.'], + libraries=['fwrappers'], + extra_info=lapack_opt) + + config.add_extension('cython_lapack', + sources=['cython_lapack.c'], + depends=['cython_lapack.pyx', 'cython_lapack.pxd', + 'fortran_defs.h', '_lapack_subroutines.h'], + include_dirs=['.'], + libraries=['fwrappers'], + extra_info=lapack_opt) + + config.add_extension('_decomp_update', + sources=['_decomp_update.c']) + + # Add any license files + config.add_data_files('src/id_dist/doc/doc.tex') + config.add_data_files('src/lapack_deprecations/LICENSE') + + return config + + +if __name__ == '__main__': + from numpy.distutils.core import setup + from linalg_version import linalg_version + + setup(version=linalg_version, + **configuration(top_path='').todict()) diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/setup.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/setup.pyc new file mode 100644 index 0000000..d9dd51f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/special_matrices.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/special_matrices.py new file mode 100644 index 0000000..5eb5db9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/special_matrices.py @@ -0,0 +1,1038 @@ +from __future__ import division, print_function, absolute_import + +import math +import numpy as np +from scipy._lib.six import xrange +from scipy._lib.six import string_types +from numpy.lib.stride_tricks import as_strided + + +__all__ = ['tri', 'tril', 'triu', 'toeplitz', 'circulant', 'hankel', + 'hadamard', 'leslie', 'kron', 'block_diag', 'companion', + 'helmert', 'hilbert', 'invhilbert', 'pascal', 'invpascal', 'dft'] + + +#----------------------------------------------------------------------------- +# matrix construction functions +#----------------------------------------------------------------------------- + +# +# *Note*: tri{,u,l} is implemented in numpy, but an important bug was fixed in +# 2.0.0.dev-1af2f3, the following tri{,u,l} definitions are here for backwards +# compatibility. + +def tri(N, M=None, k=0, dtype=None): + """ + Construct (N, M) matrix filled with ones at and below the k-th diagonal. + + The matrix has A[i,j] == 1 for i <= j + k + + Parameters + ---------- + N : int + The size of the first dimension of the matrix. + M : int or None, optional + The size of the second dimension of the matrix. If `M` is None, + `M = N` is assumed. + k : int, optional + Number of subdiagonal below which matrix is filled with ones. + `k` = 0 is the main diagonal, `k` < 0 subdiagonal and `k` > 0 + superdiagonal. + dtype : dtype, optional + Data type of the matrix. + + Returns + ------- + tri : (N, M) ndarray + Tri matrix. + + Examples + -------- + >>> from scipy.linalg import tri + >>> tri(3, 5, 2, dtype=int) + array([[1, 1, 1, 0, 0], + [1, 1, 1, 1, 0], + [1, 1, 1, 1, 1]]) + >>> tri(3, 5, -1, dtype=int) + array([[0, 0, 0, 0, 0], + [1, 0, 0, 0, 0], + [1, 1, 0, 0, 0]]) + + """ + if M is None: + M = N + if isinstance(M, string_types): + #pearu: any objections to remove this feature? + # As tri(N,'d') is equivalent to tri(N,dtype='d') + dtype = M + M = N + m = np.greater_equal.outer(np.arange(k, N+k), np.arange(M)) + if dtype is None: + return m + else: + return m.astype(dtype) + + +def tril(m, k=0): + """ + Make a copy of a matrix with elements above the k-th diagonal zeroed. + + Parameters + ---------- + m : array_like + Matrix whose elements to return + k : int, optional + Diagonal above which to zero elements. + `k` == 0 is the main diagonal, `k` < 0 subdiagonal and + `k` > 0 superdiagonal. + + Returns + ------- + tril : ndarray + Return is the same shape and type as `m`. + + Examples + -------- + >>> from scipy.linalg import tril + >>> tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) + array([[ 0, 0, 0], + [ 4, 0, 0], + [ 7, 8, 0], + [10, 11, 12]]) + + """ + m = np.asarray(m) + out = tri(m.shape[0], m.shape[1], k=k, dtype=m.dtype.char) * m + return out + + +def triu(m, k=0): + """ + Make a copy of a matrix with elements below the k-th diagonal zeroed. + + Parameters + ---------- + m : array_like + Matrix whose elements to return + k : int, optional + Diagonal below which to zero elements. + `k` == 0 is the main diagonal, `k` < 0 subdiagonal and + `k` > 0 superdiagonal. + + Returns + ------- + triu : ndarray + Return matrix with zeroed elements below the k-th diagonal and has + same shape and type as `m`. + + Examples + -------- + >>> from scipy.linalg import triu + >>> triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) + array([[ 1, 2, 3], + [ 4, 5, 6], + [ 0, 8, 9], + [ 0, 0, 12]]) + + """ + m = np.asarray(m) + out = (1 - tri(m.shape[0], m.shape[1], k - 1, m.dtype.char)) * m + return out + + +def toeplitz(c, r=None): + """ + Construct a Toeplitz matrix. + + The Toeplitz matrix has constant diagonals, with c as its first column + and r as its first row. If r is not given, ``r == conjugate(c)`` is + assumed. + + Parameters + ---------- + c : array_like + First column of the matrix. Whatever the actual shape of `c`, it + will be converted to a 1-D array. + r : array_like, optional + First row of the matrix. If None, ``r = conjugate(c)`` is assumed; + in this case, if c[0] is real, the result is a Hermitian matrix. + r[0] is ignored; the first row of the returned matrix is + ``[c[0], r[1:]]``. Whatever the actual shape of `r`, it will be + converted to a 1-D array. + + Returns + ------- + A : (len(c), len(r)) ndarray + The Toeplitz matrix. Dtype is the same as ``(c[0] + r[0]).dtype``. + + See Also + -------- + circulant : circulant matrix + hankel : Hankel matrix + solve_toeplitz : Solve a Toeplitz system. + + Notes + ----- + The behavior when `c` or `r` is a scalar, or when `c` is complex and + `r` is None, was changed in version 0.8.0. The behavior in previous + versions was undocumented and is no longer supported. + + Examples + -------- + >>> from scipy.linalg import toeplitz + >>> toeplitz([1,2,3], [1,4,5,6]) + array([[1, 4, 5, 6], + [2, 1, 4, 5], + [3, 2, 1, 4]]) + >>> toeplitz([1.0, 2+3j, 4-1j]) + array([[ 1.+0.j, 2.-3.j, 4.+1.j], + [ 2.+3.j, 1.+0.j, 2.-3.j], + [ 4.-1.j, 2.+3.j, 1.+0.j]]) + + """ + c = np.asarray(c).ravel() + if r is None: + r = c.conjugate() + else: + r = np.asarray(r).ravel() + # Form a 1D array containing a reversed c followed by r[1:] that could be + # strided to give us toeplitz matrix. + vals = np.concatenate((c[::-1], r[1:])) + out_shp = len(c), len(r) + n = vals.strides[0] + return as_strided(vals[len(c)-1:], shape=out_shp, strides=(-n, n)).copy() + + +def circulant(c): + """ + Construct a circulant matrix. + + Parameters + ---------- + c : (N,) array_like + 1-D array, the first column of the matrix. + + Returns + ------- + A : (N, N) ndarray + A circulant matrix whose first column is `c`. + + See Also + -------- + toeplitz : Toeplitz matrix + hankel : Hankel matrix + solve_circulant : Solve a circulant system. + + Notes + ----- + .. versionadded:: 0.8.0 + + Examples + -------- + >>> from scipy.linalg import circulant + >>> circulant([1, 2, 3]) + array([[1, 3, 2], + [2, 1, 3], + [3, 2, 1]]) + + """ + c = np.asarray(c).ravel() + # Form an extended array that could be strided to give circulant version + c_ext = np.concatenate((c[::-1], c[:0:-1])) + L = len(c) + n = c_ext.strides[0] + return as_strided(c_ext[L-1:], shape=(L, L), strides=(-n, n)).copy() + + +def hankel(c, r=None): + """ + Construct a Hankel matrix. + + The Hankel matrix has constant anti-diagonals, with `c` as its + first column and `r` as its last row. If `r` is not given, then + `r = zeros_like(c)` is assumed. + + Parameters + ---------- + c : array_like + First column of the matrix. Whatever the actual shape of `c`, it + will be converted to a 1-D array. + r : array_like, optional + Last row of the matrix. If None, ``r = zeros_like(c)`` is assumed. + r[0] is ignored; the last row of the returned matrix is + ``[c[-1], r[1:]]``. Whatever the actual shape of `r`, it will be + converted to a 1-D array. + + Returns + ------- + A : (len(c), len(r)) ndarray + The Hankel matrix. Dtype is the same as ``(c[0] + r[0]).dtype``. + + See Also + -------- + toeplitz : Toeplitz matrix + circulant : circulant matrix + + Examples + -------- + >>> from scipy.linalg import hankel + >>> hankel([1, 17, 99]) + array([[ 1, 17, 99], + [17, 99, 0], + [99, 0, 0]]) + >>> hankel([1,2,3,4], [4,7,7,8,9]) + array([[1, 2, 3, 4, 7], + [2, 3, 4, 7, 7], + [3, 4, 7, 7, 8], + [4, 7, 7, 8, 9]]) + + """ + c = np.asarray(c).ravel() + if r is None: + r = np.zeros_like(c) + else: + r = np.asarray(r).ravel() + # Form a 1D array of values to be used in the matrix, containing `c` + # followed by r[1:]. + vals = np.concatenate((c, r[1:])) + # Stride on concatenated array to get hankel matrix + out_shp = len(c), len(r) + n = vals.strides[0] + return as_strided(vals, shape=out_shp, strides=(n, n)).copy() + + +def hadamard(n, dtype=int): + """ + Construct a Hadamard matrix. + + Constructs an n-by-n Hadamard matrix, using Sylvester's + construction. `n` must be a power of 2. + + Parameters + ---------- + n : int + The order of the matrix. `n` must be a power of 2. + dtype : dtype, optional + The data type of the array to be constructed. + + Returns + ------- + H : (n, n) ndarray + The Hadamard matrix. + + Notes + ----- + .. versionadded:: 0.8.0 + + Examples + -------- + >>> from scipy.linalg import hadamard + >>> hadamard(2, dtype=complex) + array([[ 1.+0.j, 1.+0.j], + [ 1.+0.j, -1.-0.j]]) + >>> hadamard(4) + array([[ 1, 1, 1, 1], + [ 1, -1, 1, -1], + [ 1, 1, -1, -1], + [ 1, -1, -1, 1]]) + + """ + + # This function is a slightly modified version of the + # function contributed by Ivo in ticket #675. + + if n < 1: + lg2 = 0 + else: + lg2 = int(math.log(n, 2)) + if 2 ** lg2 != n: + raise ValueError("n must be an positive integer, and n must be " + "a power of 2") + + H = np.array([[1]], dtype=dtype) + + # Sylvester's construction + for i in range(0, lg2): + H = np.vstack((np.hstack((H, H)), np.hstack((H, -H)))) + + return H + + +def leslie(f, s): + """ + Create a Leslie matrix. + + Given the length n array of fecundity coefficients `f` and the length + n-1 array of survival coefficients `s`, return the associated Leslie matrix. + + Parameters + ---------- + f : (N,) array_like + The "fecundity" coefficients. + s : (N-1,) array_like + The "survival" coefficients, has to be 1-D. The length of `s` + must be one less than the length of `f`, and it must be at least 1. + + Returns + ------- + L : (N, N) ndarray + The array is zero except for the first row, + which is `f`, and the first sub-diagonal, which is `s`. + The data-type of the array will be the data-type of ``f[0]+s[0]``. + + Notes + ----- + .. versionadded:: 0.8.0 + + The Leslie matrix is used to model discrete-time, age-structured + population growth [1]_ [2]_. In a population with `n` age classes, two sets + of parameters define a Leslie matrix: the `n` "fecundity coefficients", + which give the number of offspring per-capita produced by each age + class, and the `n` - 1 "survival coefficients", which give the + per-capita survival rate of each age class. + + References + ---------- + .. [1] P. H. Leslie, On the use of matrices in certain population + mathematics, Biometrika, Vol. 33, No. 3, 183--212 (Nov. 1945) + .. [2] P. H. Leslie, Some further notes on the use of matrices in + population mathematics, Biometrika, Vol. 35, No. 3/4, 213--245 + (Dec. 1948) + + Examples + -------- + >>> from scipy.linalg import leslie + >>> leslie([0.1, 2.0, 1.0, 0.1], [0.2, 0.8, 0.7]) + array([[ 0.1, 2. , 1. , 0.1], + [ 0.2, 0. , 0. , 0. ], + [ 0. , 0.8, 0. , 0. ], + [ 0. , 0. , 0.7, 0. ]]) + + """ + f = np.atleast_1d(f) + s = np.atleast_1d(s) + if f.ndim != 1: + raise ValueError("Incorrect shape for f. f must be one-dimensional") + if s.ndim != 1: + raise ValueError("Incorrect shape for s. s must be one-dimensional") + if f.size != s.size + 1: + raise ValueError("Incorrect lengths for f and s. The length" + " of s must be one less than the length of f.") + if s.size == 0: + raise ValueError("The length of s must be at least 1.") + + tmp = f[0] + s[0] + n = f.size + a = np.zeros((n, n), dtype=tmp.dtype) + a[0] = f + a[list(range(1, n)), list(range(0, n - 1))] = s + return a + + +def kron(a, b): + """ + Kronecker product. + + The result is the block matrix:: + + a[0,0]*b a[0,1]*b ... a[0,-1]*b + a[1,0]*b a[1,1]*b ... a[1,-1]*b + ... + a[-1,0]*b a[-1,1]*b ... a[-1,-1]*b + + Parameters + ---------- + a : (M, N) ndarray + Input array + b : (P, Q) ndarray + Input array + + Returns + ------- + A : (M*P, N*Q) ndarray + Kronecker product of `a` and `b`. + + Examples + -------- + >>> from numpy import array + >>> from scipy.linalg import kron + >>> kron(array([[1,2],[3,4]]), array([[1,1,1]])) + array([[1, 1, 1, 2, 2, 2], + [3, 3, 3, 4, 4, 4]]) + + """ + if not a.flags['CONTIGUOUS']: + a = np.reshape(a, a.shape) + if not b.flags['CONTIGUOUS']: + b = np.reshape(b, b.shape) + o = np.outer(a, b) + o = o.reshape(a.shape + b.shape) + return np.concatenate(np.concatenate(o, axis=1), axis=1) + + +def block_diag(*arrs): + """ + Create a block diagonal matrix from provided arrays. + + Given the inputs `A`, `B` and `C`, the output will have these + arrays arranged on the diagonal:: + + [[A, 0, 0], + [0, B, 0], + [0, 0, C]] + + Parameters + ---------- + A, B, C, ... : array_like, up to 2-D + Input arrays. A 1-D array or array_like sequence of length `n` is + treated as a 2-D array with shape ``(1,n)``. + + Returns + ------- + D : ndarray + Array with `A`, `B`, `C`, ... on the diagonal. `D` has the + same dtype as `A`. + + Notes + ----- + If all the input arrays are square, the output is known as a + block diagonal matrix. + + Empty sequences (i.e., array-likes of zero size) will not be ignored. + Noteworthy, both [] and [[]] are treated as matrices with shape ``(1,0)``. + + Examples + -------- + >>> from scipy.linalg import block_diag + >>> A = [[1, 0], + ... [0, 1]] + >>> B = [[3, 4, 5], + ... [6, 7, 8]] + >>> C = [[7]] + >>> P = np.zeros((2, 0), dtype='int32') + >>> block_diag(A, B, C) + array([[1, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0], + [0, 0, 3, 4, 5, 0], + [0, 0, 6, 7, 8, 0], + [0, 0, 0, 0, 0, 7]]) + >>> block_diag(A, P, B, C) + array([[1, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 3, 4, 5, 0], + [0, 0, 6, 7, 8, 0], + [0, 0, 0, 0, 0, 7]]) + >>> block_diag(1.0, [2, 3], [[4, 5], [6, 7]]) + array([[ 1., 0., 0., 0., 0.], + [ 0., 2., 3., 0., 0.], + [ 0., 0., 0., 4., 5.], + [ 0., 0., 0., 6., 7.]]) + + """ + if arrs == (): + arrs = ([],) + arrs = [np.atleast_2d(a) for a in arrs] + + bad_args = [k for k in range(len(arrs)) if arrs[k].ndim > 2] + if bad_args: + raise ValueError("arguments in the following positions have dimension " + "greater than 2: %s" % bad_args) + + shapes = np.array([a.shape for a in arrs]) + out_dtype = np.find_common_type([arr.dtype for arr in arrs], []) + out = np.zeros(np.sum(shapes, axis=0), dtype=out_dtype) + + r, c = 0, 0 + for i, (rr, cc) in enumerate(shapes): + out[r:r + rr, c:c + cc] = arrs[i] + r += rr + c += cc + return out + + +def companion(a): + """ + Create a companion matrix. + + Create the companion matrix [1]_ associated with the polynomial whose + coefficients are given in `a`. + + Parameters + ---------- + a : (N,) array_like + 1-D array of polynomial coefficients. The length of `a` must be + at least two, and ``a[0]`` must not be zero. + + Returns + ------- + c : (N-1, N-1) ndarray + The first row of `c` is ``-a[1:]/a[0]``, and the first + sub-diagonal is all ones. The data-type of the array is the same + as the data-type of ``1.0*a[0]``. + + Raises + ------ + ValueError + If any of the following are true: a) ``a.ndim != 1``; + b) ``a.size < 2``; c) ``a[0] == 0``. + + Notes + ----- + .. versionadded:: 0.8.0 + + References + ---------- + .. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK: + Cambridge University Press, 1999, pp. 146-7. + + Examples + -------- + >>> from scipy.linalg import companion + >>> companion([1, -10, 31, -30]) + array([[ 10., -31., 30.], + [ 1., 0., 0.], + [ 0., 1., 0.]]) + + """ + a = np.atleast_1d(a) + + if a.ndim != 1: + raise ValueError("Incorrect shape for `a`. `a` must be " + "one-dimensional.") + + if a.size < 2: + raise ValueError("The length of `a` must be at least 2.") + + if a[0] == 0: + raise ValueError("The first coefficient in `a` must not be zero.") + + first_row = -a[1:] / (1.0 * a[0]) + n = a.size + c = np.zeros((n - 1, n - 1), dtype=first_row.dtype) + c[0] = first_row + c[list(range(1, n - 1)), list(range(0, n - 2))] = 1 + return c + + +def helmert(n, full=False): + """ + Create a Helmert matrix of order `n`. + + This has applications in statistics, compositional or simplicial analysis, + and in Aitchison geometry. + + Parameters + ---------- + n : int + The size of the array to create. + full : bool, optional + If True the (n, n) ndarray will be returned. + Otherwise the submatrix that does not include the first + row will be returned. + Default: False. + + Returns + ------- + M : ndarray + The Helmert matrix. + The shape is (n, n) or (n-1, n) depending on the `full` argument. + + Examples + -------- + >>> from scipy.linalg import helmert + >>> helmert(5, full=True) + array([[ 0.4472136 , 0.4472136 , 0.4472136 , 0.4472136 , 0.4472136 ], + [ 0.70710678, -0.70710678, 0. , 0. , 0. ], + [ 0.40824829, 0.40824829, -0.81649658, 0. , 0. ], + [ 0.28867513, 0.28867513, 0.28867513, -0.8660254 , 0. ], + [ 0.2236068 , 0.2236068 , 0.2236068 , 0.2236068 , -0.89442719]]) + + """ + H = np.tril(np.ones((n, n)), -1) - np.diag(np.arange(n)) + d = np.arange(n) * np.arange(1, n+1) + H[0] = 1 + d[0] = n + H_full = H / np.sqrt(d)[:, np.newaxis] + if full: + return H_full + else: + return H_full[1:] + + +def hilbert(n): + """ + Create a Hilbert matrix of order `n`. + + Returns the `n` by `n` array with entries `h[i,j] = 1 / (i + j + 1)`. + + Parameters + ---------- + n : int + The size of the array to create. + + Returns + ------- + h : (n, n) ndarray + The Hilbert matrix. + + See Also + -------- + invhilbert : Compute the inverse of a Hilbert matrix. + + Notes + ----- + .. versionadded:: 0.10.0 + + Examples + -------- + >>> from scipy.linalg import hilbert + >>> hilbert(3) + array([[ 1. , 0.5 , 0.33333333], + [ 0.5 , 0.33333333, 0.25 ], + [ 0.33333333, 0.25 , 0.2 ]]) + + """ + values = 1.0 / (1.0 + np.arange(2 * n - 1)) + h = hankel(values[:n], r=values[n - 1:]) + return h + + +def invhilbert(n, exact=False): + """ + Compute the inverse of the Hilbert matrix of order `n`. + + The entries in the inverse of a Hilbert matrix are integers. When `n` + is greater than 14, some entries in the inverse exceed the upper limit + of 64 bit integers. The `exact` argument provides two options for + dealing with these large integers. + + Parameters + ---------- + n : int + The order of the Hilbert matrix. + exact : bool, optional + If False, the data type of the array that is returned is np.float64, + and the array is an approximation of the inverse. + If True, the array is the exact integer inverse array. To represent + the exact inverse when n > 14, the returned array is an object array + of long integers. For n <= 14, the exact inverse is returned as an + array with data type np.int64. + + Returns + ------- + invh : (n, n) ndarray + The data type of the array is np.float64 if `exact` is False. + If `exact` is True, the data type is either np.int64 (for n <= 14) + or object (for n > 14). In the latter case, the objects in the + array will be long integers. + + See Also + -------- + hilbert : Create a Hilbert matrix. + + Notes + ----- + .. versionadded:: 0.10.0 + + Examples + -------- + >>> from scipy.linalg import invhilbert + >>> invhilbert(4) + array([[ 16., -120., 240., -140.], + [ -120., 1200., -2700., 1680.], + [ 240., -2700., 6480., -4200.], + [ -140., 1680., -4200., 2800.]]) + >>> invhilbert(4, exact=True) + array([[ 16, -120, 240, -140], + [ -120, 1200, -2700, 1680], + [ 240, -2700, 6480, -4200], + [ -140, 1680, -4200, 2800]], dtype=int64) + >>> invhilbert(16)[7,7] + 4.2475099528537506e+19 + >>> invhilbert(16, exact=True)[7,7] + 42475099528537378560L + + """ + from scipy.special import comb + if exact: + if n > 14: + dtype = object + else: + dtype = np.int64 + else: + dtype = np.float64 + invh = np.empty((n, n), dtype=dtype) + for i in xrange(n): + for j in xrange(0, i + 1): + s = i + j + invh[i, j] = ((-1) ** s * (s + 1) * + comb(n + i, n - j - 1, exact) * + comb(n + j, n - i - 1, exact) * + comb(s, i, exact) ** 2) + if i != j: + invh[j, i] = invh[i, j] + return invh + + +def pascal(n, kind='symmetric', exact=True): + """ + Returns the n x n Pascal matrix. + + The Pascal matrix is a matrix containing the binomial coefficients as + its elements. + + Parameters + ---------- + n : int + The size of the matrix to create; that is, the result is an n x n + matrix. + kind : str, optional + Must be one of 'symmetric', 'lower', or 'upper'. + Default is 'symmetric'. + exact : bool, optional + If `exact` is True, the result is either an array of type + numpy.uint64 (if n < 35) or an object array of Python long integers. + If `exact` is False, the coefficients in the matrix are computed using + `scipy.special.comb` with `exact=False`. The result will be a floating + point array, and the values in the array will not be the exact + coefficients, but this version is much faster than `exact=True`. + + Returns + ------- + p : (n, n) ndarray + The Pascal matrix. + + See Also + -------- + invpascal + + Notes + ----- + See https://en.wikipedia.org/wiki/Pascal_matrix for more information + about Pascal matrices. + + .. versionadded:: 0.11.0 + + Examples + -------- + >>> from scipy.linalg import pascal + >>> pascal(4) + array([[ 1, 1, 1, 1], + [ 1, 2, 3, 4], + [ 1, 3, 6, 10], + [ 1, 4, 10, 20]], dtype=uint64) + >>> pascal(4, kind='lower') + array([[1, 0, 0, 0], + [1, 1, 0, 0], + [1, 2, 1, 0], + [1, 3, 3, 1]], dtype=uint64) + >>> pascal(50)[-1, -1] + 25477612258980856902730428600L + >>> from scipy.special import comb + >>> comb(98, 49, exact=True) + 25477612258980856902730428600L + + """ + + from scipy.special import comb + if kind not in ['symmetric', 'lower', 'upper']: + raise ValueError("kind must be 'symmetric', 'lower', or 'upper'") + + if exact: + if n >= 35: + L_n = np.empty((n, n), dtype=object) + L_n.fill(0) + else: + L_n = np.zeros((n, n), dtype=np.uint64) + for i in range(n): + for j in range(i + 1): + L_n[i, j] = comb(i, j, exact=True) + else: + L_n = comb(*np.ogrid[:n, :n]) + + if kind == 'lower': + p = L_n + elif kind == 'upper': + p = L_n.T + else: + p = np.dot(L_n, L_n.T) + + return p + + +def invpascal(n, kind='symmetric', exact=True): + """ + Returns the inverse of the n x n Pascal matrix. + + The Pascal matrix is a matrix containing the binomial coefficients as + its elements. + + Parameters + ---------- + n : int + The size of the matrix to create; that is, the result is an n x n + matrix. + kind : str, optional + Must be one of 'symmetric', 'lower', or 'upper'. + Default is 'symmetric'. + exact : bool, optional + If `exact` is True, the result is either an array of type + `numpy.int64` (if `n` <= 35) or an object array of Python integers. + If `exact` is False, the coefficients in the matrix are computed using + `scipy.special.comb` with `exact=False`. The result will be a floating + point array, and for large `n`, the values in the array will not be the + exact coefficients. + + Returns + ------- + invp : (n, n) ndarray + The inverse of the Pascal matrix. + + See Also + -------- + pascal + + Notes + ----- + + .. versionadded:: 0.16.0 + + References + ---------- + .. [1] "Pascal matrix", https://en.wikipedia.org/wiki/Pascal_matrix + .. [2] Cohen, A. M., "The inverse of a Pascal matrix", Mathematical + Gazette, 59(408), pp. 111-112, 1975. + + Examples + -------- + >>> from scipy.linalg import invpascal, pascal + >>> invp = invpascal(5) + >>> invp + array([[ 5, -10, 10, -5, 1], + [-10, 30, -35, 19, -4], + [ 10, -35, 46, -27, 6], + [ -5, 19, -27, 17, -4], + [ 1, -4, 6, -4, 1]]) + + >>> p = pascal(5) + >>> p.dot(invp) + array([[ 1., 0., 0., 0., 0.], + [ 0., 1., 0., 0., 0.], + [ 0., 0., 1., 0., 0.], + [ 0., 0., 0., 1., 0.], + [ 0., 0., 0., 0., 1.]]) + + An example of the use of `kind` and `exact`: + + >>> invpascal(5, kind='lower', exact=False) + array([[ 1., -0., 0., -0., 0.], + [-1., 1., -0., 0., -0.], + [ 1., -2., 1., -0., 0.], + [-1., 3., -3., 1., -0.], + [ 1., -4., 6., -4., 1.]]) + + """ + from scipy.special import comb + + if kind not in ['symmetric', 'lower', 'upper']: + raise ValueError("'kind' must be 'symmetric', 'lower' or 'upper'.") + + if kind == 'symmetric': + if exact: + if n > 34: + dt = object + else: + dt = np.int64 + else: + dt = np.float64 + invp = np.empty((n, n), dtype=dt) + for i in range(n): + for j in range(0, i + 1): + v = 0 + for k in range(n - i): + v += comb(i + k, k, exact=exact) * comb(i + k, i + k - j, + exact=exact) + invp[i, j] = (-1)**(i - j) * v + if i != j: + invp[j, i] = invp[i, j] + else: + # For the 'lower' and 'upper' cases, we computer the inverse by + # changing the sign of every other diagonal of the pascal matrix. + invp = pascal(n, kind=kind, exact=exact) + if invp.dtype == np.uint64: + # This cast from np.uint64 to int64 OK, because if `kind` is not + # "symmetric", the values in invp are all much less than 2**63. + invp = invp.view(np.int64) + + # The toeplitz matrix has alternating bands of 1 and -1. + invp *= toeplitz((-1)**np.arange(n)).astype(invp.dtype) + + return invp + + +def dft(n, scale=None): + """ + Discrete Fourier transform matrix. + + Create the matrix that computes the discrete Fourier transform of a + sequence [1]_. The n-th primitive root of unity used to generate the + matrix is exp(-2*pi*i/n), where i = sqrt(-1). + + Parameters + ---------- + n : int + Size the matrix to create. + scale : str, optional + Must be None, 'sqrtn', or 'n'. + If `scale` is 'sqrtn', the matrix is divided by `sqrt(n)`. + If `scale` is 'n', the matrix is divided by `n`. + If `scale` is None (the default), the matrix is not normalized, and the + return value is simply the Vandermonde matrix of the roots of unity. + + Returns + ------- + m : (n, n) ndarray + The DFT matrix. + + Notes + ----- + When `scale` is None, multiplying a vector by the matrix returned by + `dft` is mathematically equivalent to (but much less efficient than) + the calculation performed by `scipy.fftpack.fft`. + + .. versionadded:: 0.14.0 + + References + ---------- + .. [1] "DFT matrix", https://en.wikipedia.org/wiki/DFT_matrix + + Examples + -------- + >>> from scipy.linalg import dft + >>> np.set_printoptions(precision=5, suppress=True) + >>> x = np.array([1, 2, 3, 0, 3, 2, 1, 0]) + >>> m = dft(8) + >>> m.dot(x) # Compute the DFT of x + array([ 12.+0.j, -2.-2.j, 0.-4.j, -2.+2.j, 4.+0.j, -2.-2.j, + -0.+4.j, -2.+2.j]) + + Verify that ``m.dot(x)`` is the same as ``fft(x)``. + + >>> from scipy.fftpack import fft + >>> fft(x) # Same result as m.dot(x) + array([ 12.+0.j, -2.-2.j, 0.-4.j, -2.+2.j, 4.+0.j, -2.-2.j, + 0.+4.j, -2.+2.j]) + """ + if scale not in [None, 'sqrtn', 'n']: + raise ValueError("scale must be None, 'sqrtn', or 'n'; " + "%r is not valid." % (scale,)) + + omegas = np.exp(-2j * np.pi * np.arange(n) / n).reshape(-1, 1) + m = omegas ** np.arange(n) + if scale == 'sqrtn': + m /= math.sqrt(n) + elif scale == 'n': + m /= n + return m diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/special_matrices.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/special_matrices.pyc new file mode 100644 index 0000000..b73f9f5 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/special_matrices.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/src/id_dist/doc/doc.tex b/project/venv/lib/python2.7/site-packages/scipy/linalg/src/id_dist/doc/doc.tex new file mode 100644 index 0000000..8bcece8 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/src/id_dist/doc/doc.tex @@ -0,0 +1,977 @@ +\documentclass[letterpaper,12pt]{article} +\usepackage[margin=1in]{geometry} +\usepackage{verbatim} +\usepackage{amsmath} +\usepackage{supertabular} +\usepackage{array} + +\def\T{{\hbox{\scriptsize{\rm T}}}} +\def\epsilon{\varepsilon} +\def\bigoh{\mathcal{O}} +\def\phi{\varphi} +\def\st{{\hbox{\scriptsize{\rm st}}}} +\def\th{{\hbox{\scriptsize{\rm th}}}} +\def\x{\mathbf{x}} + + +\title{ID: A software package for low-rank approximation + of matrices via interpolative decompositions, Version 0.4} +\author{Per-Gunnar Martinsson, Vladimir Rokhlin,\\ + Yoel Shkolnisky, and Mark Tygert} + + +\begin{document} + +\maketitle + +\newpage + +{\parindent=0pt + +The present document and all of the software +in the accompanying distribution (which is contained in the directory +{\tt id\_dist} and its subdirectories, or in the file +{\tt id\_dist.tar.gz})\, is + +\bigskip + +Copyright \copyright\ 2014 by P.-G. Martinsson, V. Rokhlin, +Y. Shkolnisky, and M. Tygert. + +\bigskip + +All rights reserved. + +\bigskip + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +\begin{enumerate} +\item Redistributions of source code must retain the above copyright +notice, this list of conditions, and the following disclaimer. +\item Redistributions in binary form must reproduce the above copyright +notice, this list of conditions, and the following disclaimer in the +documentation and/or other materials provided with the distribution. +\item None of the names of the copyright holders may be used to endorse +or promote products derived from this software without specific prior +written permission. +\end{enumerate} + +\bigskip + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNERS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +} + +\newpage + +\tableofcontents + +\newpage + + + +\hrule + +\medskip + +\centerline{\Large \bf IMPORTANT} + +\medskip + +\hrule + +\medskip + +\noindent At the minimum, please read Subsection~\ref{warning} +and Section~\ref{naming} below, and beware that the {\it N.B.}'s +in the source code comments highlight key information about the routines; +{\it N.B.} stands for {\it nota bene} (Latin for ``note well''). + +\medskip + +\hrule + +\bigskip + + + +\section{Introduction} + +This software distribution provides Fortran routines +for computing low-rank approximations to matrices, +in the forms of interpolative decompositions (IDs) +and singular value decompositions (SVDs). +The routines use algorithms based on the ID. +The ID is also commonly known as +the approximation obtained via skeletonization, +the approximation obtained via subsampling, +and the approximation obtained via subset selection. +The ID provides many advantages in many applications, +and we suspect that it will become increasingly popular +once tools for its computation become more widely available. +This software distribution includes some such tools, +as well as tools for computing low-rank approximations +in the form of SVDs. +Section~\ref{defs} below defines IDs and SVDs, +and provides references to detailed discussions of the algorithms +used in this software package. + +Please beware that normalized power iterations are better suited than +the software in this distribution +for computing principal component analyses +in the typical case when the square of the signal-to-noise ratio +is not orders of magnitude greater than both dimensions +of the data matrix; see~\cite{halko-martinsson-tropp}. + +The algorithms used in this distribution have been optimized +for accuracy, efficiency, and reliability; +as a somewhat counterintuitive consequence, many must be randomized. +All randomized codes in this software package succeed +with overwhelmingly high probability (see, for example, +\cite{halko-martinsson-tropp}). +The truly paranoid are welcome to use the routines {\tt idd\_diffsnorm} +and {\tt idz\_diffsnorm} to evaluate rapidly the quality +of the approximations produced by the randomized algorithms +(as done, for example, in the files +{\tt idd\_a\_test.f}, {\tt idd\_r\_test.f}, {\tt idz\_a\_test.f}, +and {\tt idz\_r\_test.f} in the {\tt test} subdirectory +of the main directory {\tt id\_dist}). +In most circumstances, evaluating the quality of an approximation +via routines {\tt idd\_diffsnorm} or {\tt idz\_diffsnorm} is much faster +than forming the approximation to be evaluated. Still, we are unaware +of any instance in which a properly-compiled routine failed to produce +an accurate approximation. +To facilitate successful compilation, we encourage the user +to read the instructions in the next section, +and to read Section~\ref{naming}, too. + + + +\section{Compilation instructions} + + +Followed in numerical order, the subsections of this section +provide step-by-step instructions for compiling the software +under a Unix-compatible operating system. + + +\subsection{Beware that default command-line flags may not be + sufficient for compiling the source codes!} +\label{warning} + +The Fortran source codes in this distribution pass {\tt real*8} +variables as integer variables, integers as {\tt real*8}'s, +{\tt real*8}'s as {\tt complex*16}'s, and so on. +This is common practice in numerical codes, and is not an error; +be sure to provide the relevant command-line flags to the compiler +(for example, run {\tt fort77} and {\tt f2c} with the flag {\tt -!P}). +When following the compilation instructions +in Subsection~\ref{makefile_edit} below, +be sure to set {\tt FFLAGS} appropriately. + + +\subsection{Install LAPACK} + +The SVD routines in this distribution depend on LAPACK. +Before compiling the present distribution, +create the LAPACK and BLAS archive (library) {\tt .a} files; +information about installing LAPACK is available +at {\tt http://www.netlib.org/lapack/} (and several other web sites). + + +\subsection{Decompress and untar the file {\tt id\_dist.tar.gz}} + +At the command line, decompress and untar the file +{\tt id\_dist.tar.gz} by issuing a command such as +{\tt tar -xvvzf id\_dist.tar.gz}. +This will create a directory named {\tt id\_dist}. + + +\subsection{Edit the Makefile} +\label{makefile_edit} + +The directory {\tt id\_dist} contains a file named {\tt Makefile}. +In {\tt Makefile}, set the following: +% +\begin{itemize} +\item {\tt FC} is the Fortran compiler. +\item {\tt FFLAGS} is the set of command-line flags + (specifying optimization settings, for example) + for the Fortran compiler specified by {\tt FC}; + please heed the warning in Subsection~\ref{warning} above! +\item {\tt BLAS\_LIB} is the file-system path to the BLAS archive + (library) {\tt .a} file. +\item {\tt LAPACK\_LIB} is the file-system path to the LAPACK archive + (library) {\tt .a} file. +\item {\tt ARCH} is the archiver utility (usually {\tt ar}). +\item {\tt ARCHFLAGS} is the set of command-line flags + for the archiver specified by {\tt ARCH} needed + to create an archive (usually {\tt cr}). +\item {\tt RANLIB} is to be set to {\tt ranlib} + when {\tt ranlib} is available, and is to be set to {\tt echo} + when {\tt ranlib} is not available. +\end{itemize} + + +\subsection{Make and test the libraries} + +At the command line in a shell that adheres +to the Bourne shell conventions for redirection, issue the command +``{\tt make clean; make}'' to both create the archive (library) +{\tt id\_lib.a} and test it. +(In most modern Unix distributions, {\tt sh} is the Bourne shell, +or else is fully compatible with the Bourne shell; +the Korn shell {\tt ksh} and the Bourne-again shell {\tt bash} +also use the Bourne shell conventions for redirection.) +{\tt make} places the file {\tt id\_lib.a} +in the directory {\tt id\_dist}; the archive (library) file +{\tt id\_lib.a} contains machine code for all user-callable routines +in this distribution. + + + +\section{Naming conventions} +\label{naming} + +The names of routines and files in this distribution +start with prefixes, followed by an underscore (``\_''). +The prefixes are two to four characters in length, +and have the following meanings: +% +\begin{itemize} +\item The first two letters are always ``{\tt id}'', + the name of this distribution. +\item The third letter (when present) is either ``{\tt d}'' + or ``{\tt z}''; + ``{\tt d}'' stands for double precision ({\tt real*8}), + and ``{\tt z}'' stands for double complex ({\tt complex*16}). +\item The fourth letter (when present) is either ``{\tt r}'' + or ``{\tt p}''; + ``{\tt r}'' stands for specified rank, + and ``{\tt p}'' stands for specified precision. + The specified rank routines require the user to provide + the rank of the approximation to be constructed, + while the specified precision routines adjust the rank adaptively + to attain the desired precision. +\end{itemize} + +For example, {\tt iddr\_aid} is a {\tt real*8} routine which computes +an approximation of specified rank. +{\tt idz\_snorm} is a {\tt complex*16} routine. +{\tt id\_randperm} is yet another routine in this distribution. + + + +\section{Example programs} + +For examples of how to use the user-callable routines +in this distribution, see the source codes in subdirectory {\tt test} +of the main directory {\tt id\_dist}. + + + +\section{Directory structure} + +The main {\tt id\_dist} directory contains a Makefile, +the auxiliary text files {\tt README.txt} and {\tt size.txt}, +and the following subdirectories, described in the subsections below: +% +\begin{enumerate} +\item {\tt bin} +\item {\tt development} +\item {\tt doc} +\item {\tt src} +\item {\tt test} +\item {\tt tmp} +\end{enumerate} +% +If a ``{\tt make all}'' command has completed successfully, +then the main {\tt id\_dist} directory will also contain +an archive (library) file {\tt id\_lib.a} containing machine code +for all of the user-callable routines. + + +\subsection{Subdirectory {\tt bin}} + +Once all of the libraries have been made via the Makefile +in the main {\tt id\_dist} directory, +the subdirectory {\tt bin} will contain object files (machine code), +each compiled from the corresponding file of source code +in the subdirectory {\tt src} of {\tt id\_dist}. + + +\subsection{Subdirectory {\tt development}} + +Each Fortran file in the subdirectory {\tt development} +(except for {\tt dfft.f} and {\tt prini.f}) +specifies its dependencies at the top, then provides a main program +for testing and debugging, and finally provides source code +for a library of user-callable subroutines. +The Fortran file {\tt dfft.f} is a copy of P. N. Swarztrauber's FFTPACK library +for computing fast Fourier transforms. +The Fortran file {\tt prini.f} is a copy of V. Rokhlin's library +of formatted printing routines. +Both {\tt dfft.f} (version 4) and {\tt prini.f} are in the public domain. +The shell script {\tt RUNME.sh} runs shell scripts {\tt make\_src.sh} +and {\tt make\_test.sh}, which fill the subdirectories {\tt src} +and {\tt test} of the main directory {\tt id\_dist} +with source codes for user-callable routines +and with the main program testing codes. + + +\subsection{Subdirectory {\tt doc}} + +Subdirectory {\tt doc} contains this documentation, +supplementing comments in the source codes. + + +\subsection{Subdirectory {\tt src}} + +The files in the subdirectory {\tt src} provide source code +for software libraries. Each file in the subdirectory {\tt src} +(except for {\tt dfft.f} and {\tt prini.f}) is +the bottom part of the corresponding file +in the subdirectory {\tt development} of {\tt id\_dist}. +The file {\tt dfft.f} is just a copy +of P. N. Swarztrauber's FFTPACK library +for computing fast Fourier transforms. +The file {\tt prini.f} is a copy of V. Rokhlin's library +of formatted printing routines. +Both {\tt dfft.f} (version 4) and {\tt prini.f} are in the public domain. + + +\subsection{Subdirectory {\tt test}} + +The files in subdirectory {\tt test} provide source code +for testing and debugging. Each file in subdirectory {\tt test} is +the top part of the corresponding file +in subdirectory {\tt development} of {\tt id\_dist}, +and provides a main program and a list of its dependencies. +These codes provide examples of how to call the user-callable routines. + + + +\section{Catalog of the routines} + +The main routines for decomposing {\tt real*8} matrices are: +% +\begin{enumerate} +% +\item IDs of arbitrary (generally dense) matrices: +{\tt iddp\_id}, {\tt iddr\_id}, {\tt iddp\_aid}, {\tt iddr\_aid} +% +\item IDs of matrices that may be rapidly applied to arbitrary vectors +(as may the matrices' transposes): +{\tt iddp\_rid}, {\tt iddr\_rid} +% +\item SVDs of arbitrary (generally dense) matrices: +{\tt iddp\_svd}, {\tt iddr\_svd}, {\tt iddp\_asvd},\\{\tt iddr\_asvd} +% +\item SVDs of matrices that may be rapidly applied to arbitrary vectors +(as may the matrices' transposes): +{\tt iddp\_rsvd}, {\tt iddr\_rsvd} +% +\end{enumerate} + +Similarly, the main routines for decomposing {\tt complex*16} matrices +are: +% +\begin{enumerate} +% +\item IDs of arbitrary (generally dense) matrices: +{\tt idzp\_id}, {\tt idzr\_id}, {\tt idzp\_aid}, {\tt idzr\_aid} +% +\item IDs of matrices that may be rapidly applied to arbitrary vectors +(as may the matrices' adjoints): +{\tt idzp\_rid}, {\tt idzr\_rid} +% +\item SVDs of arbitrary (generally dense) matrices: +{\tt idzp\_svd}, {\tt idzr\_svd}, {\tt idzp\_asvd},\\{\tt idzr\_asvd} +% +\item SVDs of matrices that may be rapidly applied to arbitrary vectors +(as may the matrices' adjoints): +{\tt idzp\_rsvd}, {\tt idzr\_rsvd} +% +\end{enumerate} + +This distribution also includes routines for constructing pivoted $QR$ +decompositions (in {\tt idd\_qrpiv.f} and {\tt idz\_qrpiv.f}), for +estimating the spectral norms of matrices that may be applied rapidly +to arbitrary vectors as may their adjoints (in {\tt idd\_snorm.f} +and {\tt idz\_snorm.f}), for converting IDs to SVDs (in +{\tt idd\_id2svd.f} and {\tt idz\_id2svd.f}), and for computing rapidly +arbitrary subsets of the entries of the discrete Fourier transforms +of vectors (in {\tt idd\_sfft.f} and {\tt idz\_sfft.f}). + + +\subsection{List of the routines} + +The following is an alphabetical list of the routines +in this distribution, together with brief descriptions +of their functionality and the names of the files containing +the routines' source code: + +\begin{center} +% +\tablehead{\bf Routine & \bf Description & \bf Source file \\} +\tabletail{\hline} +% +\begin{supertabular}{>{\raggedright}p{1.2in} p{.53\textwidth} l} +% +\hline +{\tt id\_frand} & generates pseudorandom numbers drawn uniformly from +the interval $[0,1]$; this routine is more efficient than routine +{\tt id\_srand}, but cannot generate fewer than 55 pseudorandom numbers +per call & {\tt id\_rand.f} \\\hline +% +{\tt id\_frandi} & initializes the seed values for routine +{\tt id\_frand} to specified values & {\tt id\_rand.f} \\\hline +% +{\tt id\_frando} & initializes the seed values for routine +{\tt id\_frand} to their original, default values & {\tt id\_rand.f} +\\\hline +% +{\tt id\_randperm} & generates a uniformly random permutation & +{\tt id\_rand.f} \\\hline +% +{\tt id\_srand} & generates pseudorandom numbers drawn uniformly from +the interval $[0,1]$; this routine is less efficient than routine +{\tt id\_frand}, but can generate fewer than 55 pseudorandom numbers +per call & {\tt id\_rand.f} \\\hline +% +{\tt id\_srandi} & initializes the seed values for routine +{\tt id\_srand} to specified values & {\tt id\_rand.f} \\\hline +% +{\tt id\_srando} & initializes the seed values for routine +{\tt id\_srand} to their original, default values & {\tt id\_rand.f} +\\\hline +% +{\tt idd\_copycols} & collects together selected columns of a matrix & +{\tt idd\_id.f} \\\hline +% +{\tt idd\_diffsnorm} & estimates the spectral norm of the difference +between two matrices specified by routines for applying the matrices +and their transposes to arbitrary vectors; this routine uses the power +method with a random starting vector & {\tt idd\_snorm.f} \\\hline +% +{\tt idd\_enorm} & calculates the Euclidean norm of a vector & +{\tt idd\_snorm.f} \\\hline +% +{\tt idd\_estrank} & estimates the numerical rank of an arbitrary +(generally dense) matrix to a specified precision; this routine is +randomized, and must be initialized with routine {\tt idd\_frmi} & +{\tt iddp\_aid.f} \\\hline +% +{\tt idd\_frm} & transforms a vector into a vector which is +sufficiently scrambled to be subsampled, via a composition of Rokhlin's +random transform, random subselection, and a fast Fourier transform & +{\tt idd\_frm.f} \\\hline +% +{\tt idd\_frmi} & initializes routine {\tt idd\_frm} & {\tt idd\_frm.f} +\\\hline +% +{\tt idd\_getcols} & collects together selected columns of a matrix +specified by a routine for applying the matrix to arbitrary vectors & +{\tt idd\_id.f} \\\hline +% +{\tt idd\_house} & calculates the vector and scalar needed to apply the +Householder transformation reflecting a given vector into its first +entry & {\tt idd\_house.f} \\\hline +% +{\tt idd\_houseapp} & applies a Householder matrix to a vector & +{\tt idd\_house.f} \\\hline +% +{\tt idd\_id2svd} & converts an approximation to a matrix in the form +of an ID into an approximation in the form of an SVD & +{\tt idd\_id2svd.f} \\\hline +% +{\tt idd\_ldiv} & finds the greatest integer less than or equal to a +specified integer, that is divisible by another (larger) specified +integer & {\tt idd\_sfft.f} \\\hline +% +{\tt idd\_pairsamps} & calculates the indices of the pairs of integers +that the individual integers in a specified set belong to & +{\tt idd\_frm.f} \\\hline +% +{\tt idd\_permmult} & multiplies together a bunch of permutations & +{\tt idd\_qrpiv.f} \\\hline +% +{\tt idd\_qinqr} & reconstructs the $Q$ matrix in a $QR$ decomposition +from the output of routines {\tt iddp\_qrpiv} or {\tt iddr\_qrpiv} & +{\tt idd\_qrpiv.f} \\\hline +% +{\tt idd\_qrmatmat} & applies to multiple vectors collected together as +a matrix the $Q$ matrix (or its transpose) in the $QR$ decomposition of +a matrix, as described by the output of routines {\tt iddp\_qrpiv} or +{\tt iddr\_qrpiv}; to apply $Q$ (or its transpose) to a single vector +without having to provide a work array, use routine {\tt idd\_qrmatvec} +instead & {\tt idd\_qrpiv.f} \\\hline +% +{\tt idd\_qrmatvec} & applies to a single vector the $Q$ matrix (or its +transpose) in the $QR$ decomposition of a matrix, as described by the +output of routines {\tt iddp\_qrpiv} or {\tt iddr\_qrpiv}; to apply $Q$ +(or its transpose) to several vectors efficiently, use routine +{\tt idd\_qrmatmat} instead & {\tt idd\_qrpiv.f} \\\hline +% +{\tt idd\_random\_} {\tt transf} & applies rapidly a +random orthogonal matrix to a user-supplied vector & {\tt id\_rtrans.f} +\\\hline +% +{\tt idd\_random\_ transf\_init} & \raggedright initializes routines +{\tt idd\_random\_transf} and {\tt idd\_random\_transf\_inverse} & +{\tt id\_rtrans.f} \\\hline +% +{\tt idd\_random\_} {\tt transf\_inverse} & applies +rapidly the inverse of the operator applied by routine +{\tt idd\_random\_transf} & {\tt id\_rtrans.f} \\\hline +% +{\tt idd\_reconid} & reconstructs a matrix from its ID & +{\tt idd\_id.f} \\\hline +% +{\tt idd\_reconint} & constructs $P$ in the ID $A = B \, P$, where the +columns of $B$ are a subset of the columns of $A$, and $P$ is the +projection coefficient matrix, given {\tt list}, {\tt krank}, and +{\tt proj} output by routines {\tt iddr\_id}, {\tt iddp\_id}, +{\tt iddr\_aid}, {\tt iddp\_aid}, {\tt iddr\_rid}, or {\tt iddp\_rid} & +{\tt idd\_id.f} \\\hline +% +{\tt idd\_sfft} & rapidly computes a subset of the entries of the +discrete Fourier transform of a vector, composed with permutation +matrices both on input and on output & {\tt idd\_sfft.f} \\\hline +% +{\tt idd\_sffti} & initializes routine {\tt idd\_sfft} & +{\tt idd\_sfft.f} \\\hline +% +{\tt idd\_sfrm} & transforms a vector into a scrambled vector of +specified length, via a composition of Rokhlin's random transform, +random subselection, and a fast Fourier transform & {\tt idd\_frm.f} +\\\hline +% +{\tt idd\_sfrmi} & initializes routine {\tt idd\_sfrm} & +{\tt idd\_frm.f} \\\hline +% +{\tt idd\_snorm} & estimates the spectral norm of a matrix specified by +routines for applying the matrix and its transpose to arbitrary +vectors; this routine uses the power method with a random starting +vector & {\tt idd\_snorm.f} \\\hline +% +{\tt iddp\_aid} & computes the ID of an arbitrary (generally dense) +matrix, to a specified precision; this routine is randomized, and must +be initialized with routine {\tt idd\_frmi} & {\tt iddp\_aid.f} +\\\hline +% +{\tt iddp\_asvd} & computes the SVD of an arbitrary (generally dense) +matrix, to a specified precision; this routine is randomized, and must +be initialized with routine {\tt idd\_frmi} & {\tt iddp\_asvd.f} +\\\hline +% +{\tt iddp\_id} & computes the ID of an arbitrary (generally dense) +matrix, to a specified precision; this routine is often less efficient +than routine {\tt iddp\_aid} & {\tt idd\_id.f} \\\hline +% +{\tt iddp\_qrpiv} & computes the pivoted $QR$ decomposition of an +arbitrary (generally dense) matrix via Householder transformations, +stopping at a specified precision of the decomposition & +{\tt idd\_qrpiv.f} \\\hline +% +{\tt iddp\_rid} & computes the ID, to a specified precision, of a +matrix specified by a routine for applying its transpose to arbitrary +vectors; this routine is randomized & {\tt iddp\_rid.f} \\\hline +% +{\tt iddp\_rsvd} & computes the SVD, to a specified precision, of a +matrix specified by routines for applying the matrix and its transpose +to arbitrary vectors; this routine is randomized & {\tt iddp\_rsvd.f} +\\\hline +% +{\tt iddp\_svd} & computes the SVD of an arbitrary (generally dense) +matrix, to a specified precision; this routine is often less efficient +than routine {\tt iddp\_asvd} & {\tt idd\_svd.f} \\\hline +% +{\tt iddr\_aid} & computes the ID of an arbitrary (generally dense) +matrix, to a specified rank; this routine is randomized, and must be +initialized by routine {\tt iddr\_aidi} & {\tt iddr\_aid.f} \\\hline +% +{\tt iddr\_aidi} & initializes routine {\tt iddr\_aid} & +{\tt iddr\_aid.f} \\\hline +% +{\tt iddr\_asvd} & computes the SVD of an arbitrary (generally dense) +matrix, to a specified rank; this routine is randomized, and must be +initialized with routine {\tt idd\_aidi} & {\tt iddr\_asvd.f} +\\\hline +% +{\tt iddr\_id} & computes the ID of an arbitrary (generally dense) +matrix, to a specified rank; this routine is often less efficient than +routine {\tt iddr\_aid} & {\tt idd\_id.f} \\\hline +% +{\tt iddr\_qrpiv} & computes the pivoted $QR$ decomposition of an +arbitrary (generally dense) matrix via Householder transformations, +stopping at a specified rank of the decomposition & {\tt idd\_qrpiv.f} +\\\hline +% +{\tt iddr\_rid} & computes the ID, to a specified rank, of a matrix +specified by a routine for applying its transpose to arbitrary vectors; +this routine is randomized & {\tt iddr\_rid.f} \\\hline +% +{\tt iddr\_rsvd} & computes the SVD, to a specified rank, of a matrix +specified by routines for applying the matrix and its transpose to +arbitrary vectors; this routine is randomized & {\tt iddr\_rsvd.f} +\\\hline +% +{\tt iddr\_svd} & computes the SVD of an arbitrary (generally dense) +matrix, to a specified rank; this routine is often less efficient than +routine {\tt iddr\_asvd} & {\tt idd\_svd.f} \\\hline +% +{\tt idz\_copycols} & collects together selected columns of a matrix & +{\tt idz\_id.f} \\\hline +% +{\tt idz\_diffsnorm} & estimates the spectral norm of the difference +between two matrices specified by routines for applying the matrices +and their adjoints to arbitrary vectors; this routine uses the power +method with a random starting vector & {\tt idz\_snorm.f} \\\hline +% +{\tt idz\_enorm} & calculates the Euclidean norm of a vector & +{\tt idz\_snorm.f} \\\hline +% +{\tt idz\_estrank} & estimates the numerical rank of an arbitrary +(generally dense) matrix to a specified precision; this routine is +randomized, and must be initialized with routine {\tt idz\_frmi} & +{\tt idzp\_aid.f} \\\hline +% +{\tt idz\_frm} & transforms a vector into a vector which is +sufficiently scrambled to be subsampled, via a composition of Rokhlin's +random transform, random subselection, and a fast Fourier transform & +{\tt idz\_frm.f} \\\hline +% +{\tt idz\_frmi} & initializes routine {\tt idz\_frm} & {\tt idz\_frm.f} +\\\hline +% +{\tt idz\_getcols} & collects together selected columns of a matrix +specified by a routine for applying the matrix to arbitrary vectors & +{\tt idz\_id.f} \\\hline +% +{\tt idz\_house} & calculates the vector and scalar needed to apply the +Householder transformation reflecting a given vector into its first +entry & {\tt idz\_house.f} \\\hline +% +{\tt idz\_houseapp} & applies a Householder matrix to a vector & +{\tt idz\_house.f} \\\hline +% +{\tt idz\_id2svd} & converts an approximation to a matrix in the form +of an ID into an approximation in the form of an SVD & +{\tt idz\_id2svd.f} \\\hline +% +{\tt idz\_ldiv} & finds the greatest integer less than or equal to a +specified integer, that is divisible by another (larger) specified +integer & {\tt idz\_sfft.f} \\\hline +% +{\tt idz\_permmult} & multiplies together a bunch of permutations & +{\tt idz\_qrpiv.f} \\\hline +% +{\tt idz\_qinqr} & reconstructs the $Q$ matrix in a $QR$ decomposition +from the output of routines {\tt idzp\_qrpiv} or {\tt idzr\_qrpiv} & +{\tt idz\_qrpiv.f} \\\hline +% +{\tt idz\_qrmatmat} & applies to multiple vectors collected together as +a matrix the $Q$ matrix (or its adjoint) in the $QR$ decomposition of +a matrix, as described by the output of routines {\tt idzp\_qrpiv} or +{\tt idzr\_qrpiv}; to apply $Q$ (or its adjoint) to a single vector +without having to provide a work array, use routine {\tt idz\_qrmatvec} +instead & {\tt idz\_qrpiv.f} \\\hline +% +{\tt idz\_qrmatvec} & applies to a single vector the $Q$ matrix (or its +adjoint) in the $QR$ decomposition of a matrix, as described by the +output of routines {\tt idzp\_qrpiv} or {\tt idzr\_qrpiv}; to apply $Q$ +(or its adjoint) to several vectors efficiently, use routine +{\tt idz\_qrmatmat} instead & {\tt idz\_qrpiv.f} \\\hline +% +{\tt idz\_random\_ transf} & applies rapidly a random unitary matrix to +a user-supplied vector & {\tt id\_rtrans.f} \\\hline +% +{\tt idz\_random\_ transf\_init} & \raggedright initializes routines +{\tt idz\_random\_transf} and {\tt idz\_random\_transf\_inverse} & +{\tt id\_rtrans.f} \\\hline +% +{\tt idz\_random\_ transf\_inverse} & applies rapidly the inverse of +the operator applied by routine {\tt idz\_random\_transf} & +{\tt id\_rtrans.f} \\\hline +% +{\tt idz\_reconid} & reconstructs a matrix from its ID & +{\tt idz\_id.f} \\\hline +% +{\tt idz\_reconint} & constructs $P$ in the ID $A = B \, P$, where the +columns of $B$ are a subset of the columns of $A$, and $P$ is the +projection coefficient matrix, given {\tt list}, {\tt krank}, and +{\tt proj} output by routines {\tt idzr\_id}, {\tt idzp\_id}, +{\tt idzr\_aid}, {\tt idzp\_aid}, {\tt idzr\_rid}, or {\tt idzp\_rid} & +{\tt idz\_id.f} \\\hline +% +{\tt idz\_sfft} & rapidly computes a subset of the entries of the +discrete Fourier transform of a vector, composed with permutation +matrices both on input and on output & {\tt idz\_sfft.f} \\\hline +% +{\tt idz\_sffti} & initializes routine {\tt idz\_sfft} & +{\tt idz\_sfft.f} \\\hline +% +{\tt idz\_sfrm} & transforms a vector into a scrambled vector of +specified length, via a composition of Rokhlin's random transform, +random subselection, and a fast Fourier transform & {\tt idz\_frm.f} +\\\hline +% +{\tt idz\_sfrmi} & initializes routine {\tt idz\_sfrm} & +{\tt idz\_frm.f} \\\hline +% +{\tt idz\_snorm} & estimates the spectral norm of a matrix specified by +routines for applying the matrix and its adjoint to arbitrary +vectors; this routine uses the power method with a random starting +vector & {\tt idz\_snorm.f} \\\hline +% +{\tt idzp\_aid} & computes the ID of an arbitrary (generally dense) +matrix, to a specified precision; this routine is randomized, and must +be initialized with routine {\tt idz\_frmi} & {\tt idzp\_aid.f} +\\\hline +% +{\tt idzp\_asvd} & computes the SVD of an arbitrary (generally dense) +matrix, to a specified precision; this routine is randomized, and must +be initialized with routine {\tt idz\_frmi} & {\tt idzp\_asvd.f} +\\\hline +% +{\tt idzp\_id} & computes the ID of an arbitrary (generally dense) +matrix, to a specified precision; this routine is often less efficient +than routine {\tt idzp\_aid} & {\tt idz\_id.f} \\\hline +% +{\tt idzp\_qrpiv} & computes the pivoted $QR$ decomposition of an +arbitrary (generally dense) matrix via Householder transformations, +stopping at a specified precision of the decomposition & +{\tt idz\_qrpiv.f} \\\hline +% +{\tt idzp\_rid} & computes the ID, to a specified precision, of a +matrix specified by a routine for applying its adjoint to arbitrary +vectors; this routine is randomized & {\tt idzp\_rid.f} \\\hline +% +{\tt idzp\_rsvd} & computes the SVD, to a specified precision, of a +matrix specified by routines for applying the matrix and its adjoint +to arbitrary vectors; this routine is randomized & {\tt idzp\_rsvd.f} +\\\hline +% +{\tt idzp\_svd} & computes the SVD of an arbitrary (generally dense) +matrix, to a specified precision; this routine is often less efficient +than routine {\tt idzp\_asvd} & {\tt idz\_svd.f} \\\hline +% +{\tt idzr\_aid} & computes the ID of an arbitrary (generally dense) +matrix, to a specified rank; this routine is randomized, and must be +initialized by routine {\tt idzr\_aidi} & {\tt idzr\_aid.f} \\\hline +% +{\tt idzr\_aidi} & initializes routine {\tt idzr\_aid} & +{\tt idzr\_aid.f} \\\hline +% +{\tt idzr\_asvd} & computes the SVD of an arbitrary (generally dense) +matrix, to a specified rank; this routine is randomized, and must be +initialized with routine {\tt idz\_aidi} & {\tt idzr\_asvd.f} +\\\hline +% +{\tt idzr\_id} & computes the ID of an arbitrary (generally dense) +matrix, to a specified rank; this routine is often less efficient than +routine {\tt idzr\_aid} & {\tt idz\_id.f} \\\hline +% +{\tt idzr\_qrpiv} & computes the pivoted $QR$ decomposition of an +arbitrary (generally dense) matrix via Householder transformations, +stopping at a specified rank of the decomposition & {\tt idz\_qrpiv.f} +\\\hline +% +{\tt idzr\_rid} & computes the ID, to a specified rank, of a matrix +specified by a routine for applying its adjoint to arbitrary vectors; +this routine is randomized & {\tt idzr\_rid.f} \\\hline +% +{\tt idzr\_rsvd} & computes the SVD, to a specified rank, of a matrix +specified by routines for applying the matrix and its adjoint to +arbitrary vectors; this routine is randomized & {\tt idzr\_rsvd.f} +\\\hline +% +{\tt idzr\_svd} & computes the SVD of an arbitrary (generally dense) +matrix, to a specified rank; this routine is often less efficient than +routine {\tt idzr\_asvd} & {\tt idz\_svd.f} \\ +% +\end{supertabular} +\end{center} + + + +\section{Documentation in the source codes} + +Each routine in the source codes includes documentation +in the comments immediately following the declaration +of the subroutine's calling sequence. +This documentation describes the purpose of the routine, +the input and output variables, and the required work arrays (if any). +This documentation also cites relevant references. +Please pay attention to the {\it N.B.}'s; +{\it N.B.} stands for {\it nota bene} (Latin for ``note well'') +and highlights important information about the routines. + + + +\section{Notation and decompositions} +\label{defs} + +This section sets notational conventions employed +in this documentation and the associated software, +and defines both the singular value decomposition (SVD) +and the interpolative decomposition (ID). +For information concerning other mathematical objects +used in the code (such as Householder transformations, +pivoted $QR$ decompositions, and discrete and fast Fourier transforms +--- DFTs and FFTs), see, for example,~\cite{golub-van_loan}. +For detailed descriptions and proofs of the mathematical facts +discussed in the present section, see, for example, +\cite{golub-van_loan} and the references +in~\cite{halko-martinsson-tropp}. + +Throughout this document and the accompanying software distribution, +$\| \x \|$ always denotes the Euclidean norm of the vector $\x$, +and $\| A \|$ always denotes the spectral norm of the matrix $A$. +Subsection~\ref{Euclidean} below defines the Euclidean norm; +Subsection~\ref{spectral} below defines the spectral norm. +We use $A^*$ to denote the adjoint of the matrix $A$. + + +\subsection{Euclidean norm} +\label{Euclidean} + +For any positive integer $n$, and vector $\x$ of length $n$, +the Euclidean ($l^2$) norm $\| \x \|$ is +% +\begin{equation} +\| \x \| = \sqrt{ \sum_{k=1}^n |x_k|^2 }, +\end{equation} +% +where $x_1$,~$x_2$, \dots, $x_{n-1}$,~$x_n$ are the entries of $\x$. + + +\subsection{Spectral norm} +\label{spectral} + +For any positive integers $m$ and $n$, and $m \times n$ matrix $A$, +the spectral ($l^2$ operator) norm $\| A \|$ is +% +\begin{equation} +\| A_{m \times n} \| += \max \frac{\| A_{m \times n} \, \x_{n \times 1} \|} + {\| \x_{n \times 1} \|}, +\end{equation} +% +where the $\max$ is taken over all $n \times 1$ column vectors $\x$ +such that $\| \x \| \ne 0$. + + +\subsection{Singular value decomposition (SVD)} + +For any positive real number $\epsilon$, +positive integers $k$, $m$, and $n$ with $k \le m$ and $k \le n$, +and any $m \times n$ matrix $A$, +a rank-$k$ approximation to $A$ in the form of an SVD +(to precision $\epsilon$) consists of an $m \times k$ matrix $U$ +whose columns are orthonormal, an $n \times k$ matrix $V$ +whose columns are orthonormal, and a diagonal $k \times k$ matrix +$\Sigma$ with diagonal entries +$\Sigma_{1,1} \ge \Sigma_{2,2} \ge \dots \ge \Sigma_{n-1,n-1} + \ge \Sigma_{n,n} \ge 0$, +such that +% +\begin{equation} +\| A_{m \times n} - U_{m \times k} \, \Sigma_{k \times k} + \, (V^*)_{k \times n} \| \le \epsilon. +\end{equation} +% +The product $U \, \Sigma \, V^*$ is known as an SVD. +The columns of $U$ are known as left singular vectors; +the columns of $V$ are known as right singular vectors. +The diagonal entries of $\Sigma$ are known as singular values. + +When $k = m$ or $k = n$, and $A = U \, \Sigma \, V^*$, +then $U \, \Sigma \, V^*$ is known as the SVD +of $A$; the columns of $U$ are the left singular vectors of $A$, +the columns of $V$ are the right singular vectors of $A$, +and the diagonal entries of $\Sigma$ are the singular values of $A$. +For any positive integer $k$ with $k < m$ and $k < n$, +there exists a rank-$k$ approximation to $A$ in the form of an SVD, +to precision $\sigma_{k+1}$, where $\sigma_{k+1}$ is the $(k+1)^\st$ +greatest singular value of $A$. + + +\subsection{Interpolative decomposition (ID)} + +For any positive real number $\epsilon$, +positive integers $k$, $m$, and $n$ with $k \le m$ and $k \le n$, +and any $m \times n$ matrix $A$, +a rank-$k$ approximation to $A$ in the form of an ID +(to precision $\epsilon$) consists of a $k \times n$ matrix $P$, +and an $m \times k$ matrix $B$ whose columns constitute a subset +of the columns of $A$, such that +% +\begin{enumerate} +\item $\| A_{m \times n} - B_{m \times k} \, P_{k \times n} \| + \le \epsilon$, +\item some subset of the columns of $P$ makes up the $k \times k$ + identity matrix, and +\item every entry of $P$ has an absolute value less than or equal + to a reasonably small positive real number, say 2. +\end{enumerate} +% +The product $B \, P$ is known as an ID. +The matrix $P$ is known as the projection or interpolation matrix +of the ID. Property~1 above approximates each column of $A$ +via a linear combination of the columns of $B$ +(which are themselves columns of $A$), with the coefficients +in the linear combination given by the entries of $P$. + +The interpolative decomposition is ``interpolative'' +due to Property~2 above. The ID is numerically stable +due to Property~3 above. +It follows from Property~2 that the least ($k^\th$ greatest) singular value +of $P$ is at least 1. Combining Properties~2 and~3 yields that +% +\begin{equation} +\| P_{k \times n} \| \le \sqrt{4k(n-k)+1}. +\end{equation} + +When $k = m$ or $k = n$, and $A = B \, P$, +then $B \, P$ is known as the ID of $A$. +For any positive integer $k$ with $k < m$ and $k < n$, +there exists a rank-$k$ approximation to $A$ in the form of an ID, +to precision $\sqrt{k(n-k)+1} \; \sigma_{k+1}$, +where $\sigma_{k+1}$ is the $(k+1)^\st$ greatest singular value of $A$ +(in fact, there exists an ID in which every entry +of the projection matrix $P$ has an absolute value less than or equal +to 1). + + + +\section{Bug reports, feedback, and support} + +Please let us know about errors in the software or in the documentation +via e-mail to {\tt tygert@aya.yale.edu}. +We would also appreciate hearing about particular applications of the codes, +especially in the form of journal articles +e-mailed to {\tt tygert@aya.yale.edu}. +Mathematical and technical support may also be available via e-mail. Enjoy! + + + +\bibliographystyle{siam} +\bibliography{doc} + + +\end{document} diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/src/lapack_deprecations/LICENSE b/project/venv/lib/python2.7/site-packages/scipy/linalg/src/lapack_deprecations/LICENSE new file mode 100644 index 0000000..8d713b6 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/src/lapack_deprecations/LICENSE @@ -0,0 +1,48 @@ +Copyright (c) 1992-2015 The University of Tennessee and The University + of Tennessee Research Foundation. All rights + reserved. +Copyright (c) 2000-2015 The University of California Berkeley. All + rights reserved. +Copyright (c) 2006-2015 The University of Colorado Denver. All rights + reserved. + +$COPYRIGHT$ + +Additional copyrights may follow + +$HEADER$ + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +- Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +- Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer listed + in this license in the documentation and/or other materials + provided with the distribution. + +- Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +The copyright holders provide no reassurances that the source code +provided does not infringe any patent, copyright, or any other +intellectual property rights of third parties. The copyright holders +disclaim any liability to any recipient for claims brought against +recipient by any third party for infringement of that parties +intellectual property rights. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/__init__.pyc new file mode 100644 index 0000000..039de50 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/data/carex_15_data.npz b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/data/carex_15_data.npz new file mode 100644 index 0000000..31a7dc6 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/data/carex_15_data.npz differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/data/carex_18_data.npz b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/data/carex_18_data.npz new file mode 100644 index 0000000..6bd78dc Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/data/carex_18_data.npz differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/data/carex_19_data.npz b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/data/carex_19_data.npz new file mode 100644 index 0000000..3564000 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/data/carex_19_data.npz differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/data/carex_20_data.npz b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/data/carex_20_data.npz new file mode 100644 index 0000000..e68e5a2 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/data/carex_20_data.npz differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/data/carex_6_data.npz b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/data/carex_6_data.npz new file mode 100644 index 0000000..e70ff73 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/data/carex_6_data.npz differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/data/gendare_20170120_data.npz b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/data/gendare_20170120_data.npz new file mode 100644 index 0000000..22cb129 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/data/gendare_20170120_data.npz differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_basic.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_basic.py new file mode 100644 index 0000000..4183da2 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_basic.py @@ -0,0 +1,1653 @@ +# +# Created by: Pearu Peterson, March 2002 +# +""" Test functions for linalg.basic module + +""" +from __future__ import division, print_function, absolute_import + +import warnings +import itertools +import numpy as np +from numpy import (arange, array, dot, zeros, identity, conjugate, transpose, + float32) +import numpy.linalg as linalg +from numpy.random import random + +from numpy.testing import (assert_equal, assert_almost_equal, assert_, + assert_array_almost_equal, assert_allclose, + assert_array_equal) +import pytest +from pytest import raises as assert_raises +from scipy._lib._numpy_compat import suppress_warnings + +from scipy.linalg import (solve, inv, det, lstsq, pinv, pinv2, pinvh, norm, + solve_banded, solveh_banded, solve_triangular, + solve_circulant, circulant, LinAlgError, block_diag, + matrix_balance, LinAlgWarning) + +from scipy.linalg.basic import LstsqLapackError +from scipy.linalg._testutils import assert_no_overwrite + +from scipy._lib._version import NumpyVersion + + +""" +Bugs: +1) solve.check_random_sym_complex fails if a is complex + and transpose(a) = conjugate(a) (a is Hermitian). +""" +__usage__ = """ +Build linalg: + python setup_linalg.py build +Run tests if scipy is installed: + python -c 'import scipy;scipy.linalg.test()' +Run tests if linalg is not installed: + python tests/test_basic.py +""" + +REAL_DTYPES = [np.float32, np.float64, np.longdouble] +COMPLEX_DTYPES = [np.complex64, np.complex128, np.clongdouble] +DTYPES = REAL_DTYPES + COMPLEX_DTYPES + + +def _eps_cast(dtyp): + """Get the epsilon for dtype, possibly downcast to BLAS types.""" + dt = dtyp + if dt == np.longdouble: + dt = np.float64 + elif dt == np.clongdouble: + dt = np.complex128 + return np.finfo(dt).eps + + +class TestSolveBanded(object): + + def test_real(self): + a = array([[1.0, 20, 0, 0], + [-30, 4, 6, 0], + [2, 1, 20, 2], + [0, -1, 7, 14]]) + ab = array([[0.0, 20, 6, 2], + [1, 4, 20, 14], + [-30, 1, 7, 0], + [2, -1, 0, 0]]) + l, u = 2, 1 + b4 = array([10.0, 0.0, 2.0, 14.0]) + b4by1 = b4.reshape(-1, 1) + b4by2 = array([[2, 1], + [-30, 4], + [2, 3], + [1, 3]]) + b4by4 = array([[1, 0, 0, 0], + [0, 0, 0, 1], + [0, 1, 0, 0], + [0, 1, 0, 0]]) + for b in [b4, b4by1, b4by2, b4by4]: + x = solve_banded((l, u), ab, b) + assert_array_almost_equal(dot(a, x), b) + + def test_complex(self): + a = array([[1.0, 20, 0, 0], + [-30, 4, 6, 0], + [2j, 1, 20, 2j], + [0, -1, 7, 14]]) + ab = array([[0.0, 20, 6, 2j], + [1, 4, 20, 14], + [-30, 1, 7, 0], + [2j, -1, 0, 0]]) + l, u = 2, 1 + b4 = array([10.0, 0.0, 2.0, 14.0j]) + b4by1 = b4.reshape(-1, 1) + b4by2 = array([[2, 1], + [-30, 4], + [2, 3], + [1, 3]]) + b4by4 = array([[1, 0, 0, 0], + [0, 0, 0, 1j], + [0, 1, 0, 0], + [0, 1, 0, 0]]) + for b in [b4, b4by1, b4by2, b4by4]: + x = solve_banded((l, u), ab, b) + assert_array_almost_equal(dot(a, x), b) + + def test_tridiag_real(self): + ab = array([[0.0, 20, 6, 2], + [1, 4, 20, 14], + [-30, 1, 7, 0]]) + a = np.diag(ab[0, 1:], 1) + np.diag(ab[1, :], 0) + np.diag( + ab[2, :-1], -1) + b4 = array([10.0, 0.0, 2.0, 14.0]) + b4by1 = b4.reshape(-1, 1) + b4by2 = array([[2, 1], + [-30, 4], + [2, 3], + [1, 3]]) + b4by4 = array([[1, 0, 0, 0], + [0, 0, 0, 1], + [0, 1, 0, 0], + [0, 1, 0, 0]]) + for b in [b4, b4by1, b4by2, b4by4]: + x = solve_banded((1, 1), ab, b) + assert_array_almost_equal(dot(a, x), b) + + def test_tridiag_complex(self): + ab = array([[0.0, 20, 6, 2j], + [1, 4, 20, 14], + [-30, 1, 7, 0]]) + a = np.diag(ab[0, 1:], 1) + np.diag(ab[1, :], 0) + np.diag( + ab[2, :-1], -1) + b4 = array([10.0, 0.0, 2.0, 14.0j]) + b4by1 = b4.reshape(-1, 1) + b4by2 = array([[2, 1], + [-30, 4], + [2, 3], + [1, 3]]) + b4by4 = array([[1, 0, 0, 0], + [0, 0, 0, 1], + [0, 1, 0, 0], + [0, 1, 0, 0]]) + for b in [b4, b4by1, b4by2, b4by4]: + x = solve_banded((1, 1), ab, b) + assert_array_almost_equal(dot(a, x), b) + + def test_check_finite(self): + a = array([[1.0, 20, 0, 0], + [-30, 4, 6, 0], + [2, 1, 20, 2], + [0, -1, 7, 14]]) + ab = array([[0.0, 20, 6, 2], + [1, 4, 20, 14], + [-30, 1, 7, 0], + [2, -1, 0, 0]]) + l, u = 2, 1 + b4 = array([10.0, 0.0, 2.0, 14.0]) + x = solve_banded((l, u), ab, b4, check_finite=False) + assert_array_almost_equal(dot(a, x), b4) + + def test_bad_shape(self): + ab = array([[0.0, 20, 6, 2], + [1, 4, 20, 14], + [-30, 1, 7, 0], + [2, -1, 0, 0]]) + l, u = 2, 1 + bad = array([1.0, 2.0, 3.0, 4.0]).reshape(-1, 4) + assert_raises(ValueError, solve_banded, (l, u), ab, bad) + assert_raises(ValueError, solve_banded, (l, u), ab, [1.0, 2.0]) + + # Values of (l,u) are not compatible with ab. + assert_raises(ValueError, solve_banded, (1, 1), ab, [1.0, 2.0]) + + def test_1x1(self): + b = array([[1., 2., 3.]]) + x = solve_banded((1, 1), [[0], [2], [0]], b) + assert_array_equal(x, [[0.5, 1.0, 1.5]]) + assert_equal(x.dtype, np.dtype('f8')) + assert_array_equal(b, [[1.0, 2.0, 3.0]]) + + def test_native_list_arguments(self): + a = [[1.0, 20, 0, 0], + [-30, 4, 6, 0], + [2, 1, 20, 2], + [0, -1, 7, 14]] + ab = [[0.0, 20, 6, 2], + [1, 4, 20, 14], + [-30, 1, 7, 0], + [2, -1, 0, 0]] + l, u = 2, 1 + b = [10.0, 0.0, 2.0, 14.0] + x = solve_banded((l, u), ab, b) + assert_array_almost_equal(dot(a, x), b) + + +class TestSolveHBanded(object): + + def test_01_upper(self): + # Solve + # [ 4 1 2 0] [1] + # [ 1 4 1 2] X = [4] + # [ 2 1 4 1] [1] + # [ 0 2 1 4] [2] + # with the RHS as a 1D array. + ab = array([[0.0, 0.0, 2.0, 2.0], + [-99, 1.0, 1.0, 1.0], + [4.0, 4.0, 4.0, 4.0]]) + b = array([1.0, 4.0, 1.0, 2.0]) + x = solveh_banded(ab, b) + assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0]) + + def test_02_upper(self): + # Solve + # [ 4 1 2 0] [1 6] + # [ 1 4 1 2] X = [4 2] + # [ 2 1 4 1] [1 6] + # [ 0 2 1 4] [2 1] + # + ab = array([[0.0, 0.0, 2.0, 2.0], + [-99, 1.0, 1.0, 1.0], + [4.0, 4.0, 4.0, 4.0]]) + b = array([[1.0, 6.0], + [4.0, 2.0], + [1.0, 6.0], + [2.0, 1.0]]) + x = solveh_banded(ab, b) + expected = array([[0.0, 1.0], + [1.0, 0.0], + [0.0, 1.0], + [0.0, 0.0]]) + assert_array_almost_equal(x, expected) + + def test_03_upper(self): + # Solve + # [ 4 1 2 0] [1] + # [ 1 4 1 2] X = [4] + # [ 2 1 4 1] [1] + # [ 0 2 1 4] [2] + # with the RHS as a 2D array with shape (3,1). + ab = array([[0.0, 0.0, 2.0, 2.0], + [-99, 1.0, 1.0, 1.0], + [4.0, 4.0, 4.0, 4.0]]) + b = array([1.0, 4.0, 1.0, 2.0]).reshape(-1, 1) + x = solveh_banded(ab, b) + assert_array_almost_equal(x, array([0., 1., 0., 0.]).reshape(-1, 1)) + + def test_01_lower(self): + # Solve + # [ 4 1 2 0] [1] + # [ 1 4 1 2] X = [4] + # [ 2 1 4 1] [1] + # [ 0 2 1 4] [2] + # + ab = array([[4.0, 4.0, 4.0, 4.0], + [1.0, 1.0, 1.0, -99], + [2.0, 2.0, 0.0, 0.0]]) + b = array([1.0, 4.0, 1.0, 2.0]) + x = solveh_banded(ab, b, lower=True) + assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0]) + + def test_02_lower(self): + # Solve + # [ 4 1 2 0] [1 6] + # [ 1 4 1 2] X = [4 2] + # [ 2 1 4 1] [1 6] + # [ 0 2 1 4] [2 1] + # + ab = array([[4.0, 4.0, 4.0, 4.0], + [1.0, 1.0, 1.0, -99], + [2.0, 2.0, 0.0, 0.0]]) + b = array([[1.0, 6.0], + [4.0, 2.0], + [1.0, 6.0], + [2.0, 1.0]]) + x = solveh_banded(ab, b, lower=True) + expected = array([[0.0, 1.0], + [1.0, 0.0], + [0.0, 1.0], + [0.0, 0.0]]) + assert_array_almost_equal(x, expected) + + def test_01_float32(self): + # Solve + # [ 4 1 2 0] [1] + # [ 1 4 1 2] X = [4] + # [ 2 1 4 1] [1] + # [ 0 2 1 4] [2] + # + ab = array([[0.0, 0.0, 2.0, 2.0], + [-99, 1.0, 1.0, 1.0], + [4.0, 4.0, 4.0, 4.0]], dtype=float32) + b = array([1.0, 4.0, 1.0, 2.0], dtype=float32) + x = solveh_banded(ab, b) + assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0]) + + def test_02_float32(self): + # Solve + # [ 4 1 2 0] [1 6] + # [ 1 4 1 2] X = [4 2] + # [ 2 1 4 1] [1 6] + # [ 0 2 1 4] [2 1] + # + ab = array([[0.0, 0.0, 2.0, 2.0], + [-99, 1.0, 1.0, 1.0], + [4.0, 4.0, 4.0, 4.0]], dtype=float32) + b = array([[1.0, 6.0], + [4.0, 2.0], + [1.0, 6.0], + [2.0, 1.0]], dtype=float32) + x = solveh_banded(ab, b) + expected = array([[0.0, 1.0], + [1.0, 0.0], + [0.0, 1.0], + [0.0, 0.0]]) + assert_array_almost_equal(x, expected) + + def test_01_complex(self): + # Solve + # [ 4 -j 2 0] [2-j] + # [ j 4 -j 2] X = [4-j] + # [ 2 j 4 -j] [4+j] + # [ 0 2 j 4] [2+j] + # + ab = array([[0.0, 0.0, 2.0, 2.0], + [-99, -1.0j, -1.0j, -1.0j], + [4.0, 4.0, 4.0, 4.0]]) + b = array([2-1.0j, 4.0-1j, 4+1j, 2+1j]) + x = solveh_banded(ab, b) + assert_array_almost_equal(x, [0.0, 1.0, 1.0, 0.0]) + + def test_02_complex(self): + # Solve + # [ 4 -j 2 0] [2-j 2+4j] + # [ j 4 -j 2] X = [4-j -1-j] + # [ 2 j 4 -j] [4+j 4+2j] + # [ 0 2 j 4] [2+j j] + # + ab = array([[0.0, 0.0, 2.0, 2.0], + [-99, -1.0j, -1.0j, -1.0j], + [4.0, 4.0, 4.0, 4.0]]) + b = array([[2-1j, 2+4j], + [4.0-1j, -1-1j], + [4.0+1j, 4+2j], + [2+1j, 1j]]) + x = solveh_banded(ab, b) + expected = array([[0.0, 1.0j], + [1.0, 0.0], + [1.0, 1.0], + [0.0, 0.0]]) + assert_array_almost_equal(x, expected) + + def test_tridiag_01_upper(self): + # Solve + # [ 4 1 0] [1] + # [ 1 4 1] X = [4] + # [ 0 1 4] [1] + # with the RHS as a 1D array. + ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]]) + b = array([1.0, 4.0, 1.0]) + x = solveh_banded(ab, b) + assert_array_almost_equal(x, [0.0, 1.0, 0.0]) + + def test_tridiag_02_upper(self): + # Solve + # [ 4 1 0] [1 4] + # [ 1 4 1] X = [4 2] + # [ 0 1 4] [1 4] + # + ab = array([[-99, 1.0, 1.0], + [4.0, 4.0, 4.0]]) + b = array([[1.0, 4.0], + [4.0, 2.0], + [1.0, 4.0]]) + x = solveh_banded(ab, b) + expected = array([[0.0, 1.0], + [1.0, 0.0], + [0.0, 1.0]]) + assert_array_almost_equal(x, expected) + + def test_tridiag_03_upper(self): + # Solve + # [ 4 1 0] [1] + # [ 1 4 1] X = [4] + # [ 0 1 4] [1] + # with the RHS as a 2D array with shape (3,1). + ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]]) + b = array([1.0, 4.0, 1.0]).reshape(-1, 1) + x = solveh_banded(ab, b) + assert_array_almost_equal(x, array([0.0, 1.0, 0.0]).reshape(-1, 1)) + + def test_tridiag_01_lower(self): + # Solve + # [ 4 1 0] [1] + # [ 1 4 1] X = [4] + # [ 0 1 4] [1] + # + ab = array([[4.0, 4.0, 4.0], + [1.0, 1.0, -99]]) + b = array([1.0, 4.0, 1.0]) + x = solveh_banded(ab, b, lower=True) + assert_array_almost_equal(x, [0.0, 1.0, 0.0]) + + def test_tridiag_02_lower(self): + # Solve + # [ 4 1 0] [1 4] + # [ 1 4 1] X = [4 2] + # [ 0 1 4] [1 4] + # + ab = array([[4.0, 4.0, 4.0], + [1.0, 1.0, -99]]) + b = array([[1.0, 4.0], + [4.0, 2.0], + [1.0, 4.0]]) + x = solveh_banded(ab, b, lower=True) + expected = array([[0.0, 1.0], + [1.0, 0.0], + [0.0, 1.0]]) + assert_array_almost_equal(x, expected) + + def test_tridiag_01_float32(self): + # Solve + # [ 4 1 0] [1] + # [ 1 4 1] X = [4] + # [ 0 1 4] [1] + # + ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]], dtype=float32) + b = array([1.0, 4.0, 1.0], dtype=float32) + x = solveh_banded(ab, b) + assert_array_almost_equal(x, [0.0, 1.0, 0.0]) + + def test_tridiag_02_float32(self): + # Solve + # [ 4 1 0] [1 4] + # [ 1 4 1] X = [4 2] + # [ 0 1 4] [1 4] + # + ab = array([[-99, 1.0, 1.0], + [4.0, 4.0, 4.0]], dtype=float32) + b = array([[1.0, 4.0], + [4.0, 2.0], + [1.0, 4.0]], dtype=float32) + x = solveh_banded(ab, b) + expected = array([[0.0, 1.0], + [1.0, 0.0], + [0.0, 1.0]]) + assert_array_almost_equal(x, expected) + + def test_tridiag_01_complex(self): + # Solve + # [ 4 -j 0] [ -j] + # [ j 4 -j] X = [4-j] + # [ 0 j 4] [4+j] + # + ab = array([[-99, -1.0j, -1.0j], [4.0, 4.0, 4.0]]) + b = array([-1.0j, 4.0-1j, 4+1j]) + x = solveh_banded(ab, b) + assert_array_almost_equal(x, [0.0, 1.0, 1.0]) + + def test_tridiag_02_complex(self): + # Solve + # [ 4 -j 0] [ -j 4j] + # [ j 4 -j] X = [4-j -1-j] + # [ 0 j 4] [4+j 4 ] + # + ab = array([[-99, -1.0j, -1.0j], + [4.0, 4.0, 4.0]]) + b = array([[-1j, 4.0j], + [4.0-1j, -1.0-1j], + [4.0+1j, 4.0]]) + x = solveh_banded(ab, b) + expected = array([[0.0, 1.0j], + [1.0, 0.0], + [1.0, 1.0]]) + assert_array_almost_equal(x, expected) + + def test_check_finite(self): + # Solve + # [ 4 1 0] [1] + # [ 1 4 1] X = [4] + # [ 0 1 4] [1] + # with the RHS as a 1D array. + ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]]) + b = array([1.0, 4.0, 1.0]) + x = solveh_banded(ab, b, check_finite=False) + assert_array_almost_equal(x, [0.0, 1.0, 0.0]) + + def test_bad_shapes(self): + ab = array([[-99, 1.0, 1.0], + [4.0, 4.0, 4.0]]) + b = array([[1.0, 4.0], + [4.0, 2.0]]) + assert_raises(ValueError, solveh_banded, ab, b) + assert_raises(ValueError, solveh_banded, ab, [1.0, 2.0]) + assert_raises(ValueError, solveh_banded, ab, [1.0]) + + def test_1x1(self): + x = solveh_banded([[1]], [[1, 2, 3]]) + assert_array_equal(x, [[1.0, 2.0, 3.0]]) + assert_equal(x.dtype, np.dtype('f8')) + + def test_native_list_arguments(self): + # Same as test_01_upper, using python's native list. + ab = [[0.0, 0.0, 2.0, 2.0], + [-99, 1.0, 1.0, 1.0], + [4.0, 4.0, 4.0, 4.0]] + b = [1.0, 4.0, 1.0, 2.0] + x = solveh_banded(ab, b) + assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0]) + + +class TestSolve(object): + def setup_method(self): + np.random.seed(1234) + + def test_20Feb04_bug(self): + a = [[1, 1], [1.0, 0]] # ok + x0 = solve(a, [1, 0j]) + assert_array_almost_equal(dot(a, x0), [1, 0]) + + # gives failure with clapack.zgesv(..,rowmajor=0) + a = [[1, 1], [1.2, 0]] + b = [1, 0j] + x0 = solve(a, b) + assert_array_almost_equal(dot(a, x0), [1, 0]) + + def test_simple(self): + a = [[1, 20], [-30, 4]] + for b in ([[1, 0], [0, 1]], [1, 0], + [[2, 1], [-30, 4]]): + x = solve(a, b) + assert_array_almost_equal(dot(a, x), b) + + def test_simple_sym(self): + a = [[2, 3], [3, 5]] + for lower in [0, 1]: + for b in ([[1, 0], [0, 1]], [1, 0]): + x = solve(a, b, sym_pos=1, lower=lower) + assert_array_almost_equal(dot(a, x), b) + + def test_simple_sym_complex(self): + a = [[5, 2], [2, 4]] + for b in [[1j, 0], + [[1j, 1j], + [0, 2]], + ]: + x = solve(a, b, sym_pos=1) + assert_array_almost_equal(dot(a, x), b) + + def test_simple_complex(self): + a = array([[5, 2], [2j, 4]], 'D') + for b in [[1j, 0], + [[1j, 1j], + [0, 2]], + [1, 0j], + array([1, 0], 'D'), + ]: + x = solve(a, b) + assert_array_almost_equal(dot(a, x), b) + + def test_nils_20Feb04(self): + n = 2 + A = random([n, n])+random([n, n])*1j + X = zeros((n, n), 'D') + Ainv = inv(A) + R = identity(n)+identity(n)*0j + for i in arange(0, n): + r = R[:, i] + X[:, i] = solve(A, r) + assert_array_almost_equal(X, Ainv) + + def test_random(self): + + n = 20 + a = random([n, n]) + for i in range(n): + a[i, i] = 20*(.1+a[i, i]) + for i in range(4): + b = random([n, 3]) + x = solve(a, b) + assert_array_almost_equal(dot(a, x), b) + + def test_random_complex(self): + n = 20 + a = random([n, n]) + 1j * random([n, n]) + for i in range(n): + a[i, i] = 20*(.1+a[i, i]) + for i in range(2): + b = random([n, 3]) + x = solve(a, b) + assert_array_almost_equal(dot(a, x), b) + + def test_random_sym(self): + n = 20 + a = random([n, n]) + for i in range(n): + a[i, i] = abs(20*(.1+a[i, i])) + for j in range(i): + a[i, j] = a[j, i] + for i in range(4): + b = random([n]) + x = solve(a, b, sym_pos=1) + assert_array_almost_equal(dot(a, x), b) + + def test_random_sym_complex(self): + n = 20 + a = random([n, n]) + # XXX: with the following addition the accuracy will be very low + a = a + 1j*random([n, n]) + for i in range(n): + a[i, i] = abs(20*(.1+a[i, i])) + for j in range(i): + a[i, j] = conjugate(a[j, i]) + b = random([n])+2j*random([n]) + for i in range(2): + x = solve(a, b, sym_pos=1) + assert_array_almost_equal(dot(a, x), b) + + def test_check_finite(self): + a = [[1, 20], [-30, 4]] + for b in ([[1, 0], [0, 1]], [1, 0], + [[2, 1], [-30, 4]]): + x = solve(a, b, check_finite=False) + assert_array_almost_equal(dot(a, x), b) + + def test_scalar_a_and_1D_b(self): + a = 1 + b = [1, 2, 3] + x = solve(a, b) + assert_array_almost_equal(x.ravel(), b) + assert_(x.shape == (3,), 'Scalar_a_1D_b test returned wrong shape') + + def test_simple2(self): + a = np.array([[1.80, 2.88, 2.05, -0.89], + [525.00, -295.00, -95.00, -380.00], + [1.58, -2.69, -2.90, -1.04], + [-1.11, -0.66, -0.59, 0.80]]) + + b = np.array([[9.52, 18.47], + [2435.00, 225.00], + [0.77, -13.28], + [-6.22, -6.21]]) + + x = solve(a, b) + assert_array_almost_equal(x, np.array([[1., -1, 3, -5], + [3, 2, 4, 1]]).T) + + def test_simple_complex2(self): + a = np.array([[-1.34+2.55j, 0.28+3.17j, -6.39-2.20j, 0.72-0.92j], + [-1.70-14.10j, 33.10-1.50j, -1.50+13.40j, 12.90+13.80j], + [-3.29-2.39j, -1.91+4.42j, -0.14-1.35j, 1.72+1.35j], + [2.41+0.39j, -0.56+1.47j, -0.83-0.69j, -1.96+0.67j]]) + + b = np.array([[26.26+51.78j, 31.32-6.70j], + [64.30-86.80j, 158.60-14.20j], + [-5.75+25.31j, -2.15+30.19j], + [1.16+2.57j, -2.56+7.55j]]) + + x = solve(a, b) + assert_array_almost_equal(x, np. array([[1+1.j, -1-2.j], + [2-3.j, 5+1.j], + [-4-5.j, -3+4.j], + [6.j, 2-3.j]])) + + def test_hermitian(self): + # An upper triangular matrix will be used for hermitian matrix a + a = np.array([[-1.84, 0.11-0.11j, -1.78-1.18j, 3.91-1.50j], + [0, -4.63, -1.84+0.03j, 2.21+0.21j], + [0, 0, -8.87, 1.58-0.90j], + [0, 0, 0, -1.36]]) + b = np.array([[2.98-10.18j, 28.68-39.89j], + [-9.58+3.88j, -24.79-8.40j], + [-0.77-16.05j, 4.23-70.02j], + [7.79+5.48j, -35.39+18.01j]]) + res = np.array([[2.+1j, -8+6j], + [3.-2j, 7-2j], + [-1+2j, -1+5j], + [1.-1j, 3-4j]]) + x = solve(a, b, assume_a='her') + assert_array_almost_equal(x, res) + # Also conjugate a and test for lower triangular data + x = solve(a.conj().T, b, assume_a='her', lower=True) + assert_array_almost_equal(x, res) + + def test_pos_and_sym(self): + A = np.arange(1, 10).reshape(3, 3) + x = solve(np.tril(A)/9, np.ones(3), assume_a='pos') + assert_array_almost_equal(x, [9., 1.8, 1.]) + x = solve(np.tril(A)/9, np.ones(3), assume_a='sym') + assert_array_almost_equal(x, [9., 1.8, 1.]) + + def test_singularity(self): + a = np.array([[1, 0, 0, 0, 0, 0, 1, 0, 1], + [1, 1, 1, 0, 0, 0, 1, 0, 1], + [0, 1, 1, 0, 0, 0, 1, 0, 1], + [1, 0, 1, 1, 1, 1, 0, 0, 0], + [1, 0, 1, 1, 1, 1, 0, 0, 0], + [1, 0, 1, 1, 1, 1, 0, 0, 0], + [1, 0, 1, 1, 1, 1, 0, 0, 0], + [1, 1, 1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 1, 1]]) + b = np.arange(9)[:, None] + assert_raises(LinAlgError, solve, a, b) + + def test_ill_condition_warning(self): + a = np.array([[1, 1], [1+1e-16, 1-1e-16]]) + b = np.ones(2) + with warnings.catch_warnings(): + warnings.simplefilter('error') + assert_raises(LinAlgWarning, solve, a, b) + + def test_empty_rhs(self): + a = np.eye(2) + b = [[], []] + x = solve(a, b) + assert_(x.size == 0, 'Returned array is not empty') + assert_(x.shape == (2, 0), 'Returned empty array shape is wrong') + + def test_multiple_rhs(self): + a = np.eye(2) + b = np.random.rand(2, 3, 4) + x = solve(a, b) + assert_array_almost_equal(x, b) + + def test_transposed_keyword(self): + A = np.arange(9).reshape(3, 3) + 1 + x = solve(np.tril(A)/9, np.ones(3), transposed=True) + assert_array_almost_equal(x, [1.2, 0.2, 1]) + x = solve(np.tril(A)/9, np.ones(3), transposed=False) + assert_array_almost_equal(x, [9, -5.4, -1.2]) + + def test_transposed_notimplemented(self): + a = np.eye(3).astype(complex) + with assert_raises(NotImplementedError): + solve(a, a, transposed=True) + + def test_nonsquare_a(self): + assert_raises(ValueError, solve, [1, 2], 1) + + def test_size_mismatch_with_1D_b(self): + assert_array_almost_equal(solve(np.eye(3), np.ones(3)), np.ones(3)) + assert_raises(ValueError, solve, np.eye(3), np.ones(4)) + + def test_assume_a_keyword(self): + assert_raises(ValueError, solve, 1, 1, assume_a='zxcv') + + @pytest.mark.skip(reason="Failure on OS X (gh-7500), " + "crash on Windows (gh-8064)") + def test_all_type_size_routine_combinations(self): + sizes = [10, 100] + assume_as = ['gen', 'sym', 'pos', 'her'] + dtypes = [np.float32, np.float64, np.complex64, np.complex128] + for size, assume_a, dtype in itertools.product(sizes, assume_as, + dtypes): + is_complex = dtype in (np.complex64, np.complex128) + if assume_a == 'her' and not is_complex: + continue + + err_msg = ("Failed for size: {}, assume_a: {}," + "dtype: {}".format(size, assume_a, dtype)) + + a = np.random.randn(size, size).astype(dtype) + b = np.random.randn(size).astype(dtype) + if is_complex: + a = a + (1j*np.random.randn(size, size)).astype(dtype) + + if assume_a == 'sym': # Can still be complex but only symmetric + a = a + a.T + elif assume_a == 'her': # Handle hermitian matrices here instead + a = a + a.T.conj() + elif assume_a == 'pos': + a = a.conj().T.dot(a) + 0.1*np.eye(size) + + tol = 1e-12 if dtype in (np.float64, np.complex128) else 1e-6 + + if assume_a in ['gen', 'sym', 'her']: + # We revert the tolerance from before + # 4b4a6e7c34fa4060533db38f9a819b98fa81476c + if dtype in (np.float32, np.complex64): + tol *= 10 + + x = solve(a, b, assume_a=assume_a) + assert_allclose(a.dot(x), b, + atol=tol * size, + rtol=tol * size, + err_msg=err_msg) + + if assume_a == 'sym' and dtype not in (np.complex64, + np.complex128): + x = solve(a, b, assume_a=assume_a, transposed=True) + assert_allclose(a.dot(x), b, + atol=tol * size, + rtol=tol * size, + err_msg=err_msg) + + +class TestSolveTriangular(object): + + def test_simple(self): + """ + solve_triangular on a simple 2x2 matrix. + """ + A = array([[1, 0], [1, 2]]) + b = [1, 1] + sol = solve_triangular(A, b, lower=True) + assert_array_almost_equal(sol, [1, 0]) + + # check that it works also for non-contiguous matrices + sol = solve_triangular(A.T, b, lower=False) + assert_array_almost_equal(sol, [.5, .5]) + + # and that it gives the same result as trans=1 + sol = solve_triangular(A, b, lower=True, trans=1) + assert_array_almost_equal(sol, [.5, .5]) + + b = identity(2) + sol = solve_triangular(A, b, lower=True, trans=1) + assert_array_almost_equal(sol, [[1., -.5], [0, 0.5]]) + + def test_simple_complex(self): + """ + solve_triangular on a simple 2x2 complex matrix + """ + A = array([[1+1j, 0], [1j, 2]]) + b = identity(2) + sol = solve_triangular(A, b, lower=True, trans=1) + assert_array_almost_equal(sol, [[.5-.5j, -.25-.25j], [0, 0.5]]) + + def test_check_finite(self): + """ + solve_triangular on a simple 2x2 matrix. + """ + A = array([[1, 0], [1, 2]]) + b = [1, 1] + sol = solve_triangular(A, b, lower=True, check_finite=False) + assert_array_almost_equal(sol, [1, 0]) + + +class TestInv(object): + def setup_method(self): + np.random.seed(1234) + + def test_simple(self): + a = [[1, 2], [3, 4]] + a_inv = inv(a) + assert_array_almost_equal(dot(a, a_inv), np.eye(2)) + a = [[1, 2, 3], [4, 5, 6], [7, 8, 10]] + a_inv = inv(a) + assert_array_almost_equal(dot(a, a_inv), np.eye(3)) + + def test_random(self): + n = 20 + for i in range(4): + a = random([n, n]) + for i in range(n): + a[i, i] = 20*(.1+a[i, i]) + a_inv = inv(a) + assert_array_almost_equal(dot(a, a_inv), + identity(n)) + + def test_simple_complex(self): + a = [[1, 2], [3, 4j]] + a_inv = inv(a) + assert_array_almost_equal(dot(a, a_inv), [[1, 0], [0, 1]]) + + def test_random_complex(self): + n = 20 + for i in range(4): + a = random([n, n])+2j*random([n, n]) + for i in range(n): + a[i, i] = 20*(.1+a[i, i]) + a_inv = inv(a) + assert_array_almost_equal(dot(a, a_inv), + identity(n)) + + def test_check_finite(self): + a = [[1, 2], [3, 4]] + a_inv = inv(a, check_finite=False) + assert_array_almost_equal(dot(a, a_inv), [[1, 0], [0, 1]]) + + +class TestDet(object): + def setup_method(self): + np.random.seed(1234) + + def test_simple(self): + a = [[1, 2], [3, 4]] + a_det = det(a) + assert_almost_equal(a_det, -2.0) + + def test_simple_complex(self): + a = [[1, 2], [3, 4j]] + a_det = det(a) + assert_almost_equal(a_det, -6+4j) + + def test_random(self): + basic_det = linalg.det + n = 20 + for i in range(4): + a = random([n, n]) + d1 = det(a) + d2 = basic_det(a) + assert_almost_equal(d1, d2) + + def test_random_complex(self): + basic_det = linalg.det + n = 20 + for i in range(4): + a = random([n, n]) + 2j*random([n, n]) + d1 = det(a) + d2 = basic_det(a) + assert_allclose(d1, d2, rtol=1e-13) + + def test_check_finite(self): + a = [[1, 2], [3, 4]] + a_det = det(a, check_finite=False) + assert_almost_equal(a_det, -2.0) + + +def direct_lstsq(a, b, cmplx=0): + at = transpose(a) + if cmplx: + at = conjugate(at) + a1 = dot(at, a) + b1 = dot(at, b) + return solve(a1, b1) + + +class TestLstsq(object): + + lapack_drivers = ('gelsd', 'gelss', 'gelsy', None) + + def setup_method(self): + np.random.seed(1234) + + def test_simple_exact(self): + for dtype in REAL_DTYPES: + a = np.array([[1, 20], [-30, 4]], dtype=dtype) + for lapack_driver in TestLstsq.lapack_drivers: + for overwrite in (True, False): + for bt in (((1, 0), (0, 1)), (1, 0), + ((2, 1), (-30, 4))): + # Store values in case they are overwritten + # later + a1 = a.copy() + b = np.array(bt, dtype=dtype) + b1 = b.copy() + try: + out = lstsq(a1, b1, + lapack_driver=lapack_driver, + overwrite_a=overwrite, + overwrite_b=overwrite) + except LstsqLapackError: + if lapack_driver is None: + mesg = ('LstsqLapackError raised with ' + 'lapack_driver being None.') + raise AssertionError(mesg) + else: + # can't proceed, skip to the next iteration + continue + + x = out[0] + r = out[2] + assert_(r == 2, + 'expected efficient rank 2, got %s' % r) + assert_allclose( + dot(a, x), b, + atol=25 * _eps_cast(a1.dtype), + rtol=25 * _eps_cast(a1.dtype), + err_msg="driver: %s" % lapack_driver) + + def test_simple_overdet(self): + for dtype in REAL_DTYPES: + a = np.array([[1, 2], [4, 5], [3, 4]], dtype=dtype) + b = np.array([1, 2, 3], dtype=dtype) + for lapack_driver in TestLstsq.lapack_drivers: + for overwrite in (True, False): + # Store values in case they are overwritten later + a1 = a.copy() + b1 = b.copy() + try: + out = lstsq(a1, b1, lapack_driver=lapack_driver, + overwrite_a=overwrite, + overwrite_b=overwrite) + except LstsqLapackError: + if lapack_driver is None: + mesg = ('LstsqLapackError raised with ' + 'lapack_driver being None.') + raise AssertionError(mesg) + else: + # can't proceed, skip to the next iteration + continue + + x = out[0] + if lapack_driver == 'gelsy': + residuals = np.sum((b - a.dot(x))**2) + else: + residuals = out[1] + r = out[2] + assert_(r == 2, 'expected efficient rank 2, got %s' % r) + assert_allclose(abs((dot(a, x) - b)**2).sum(axis=0), + residuals, + rtol=25 * _eps_cast(a1.dtype), + atol=25 * _eps_cast(a1.dtype), + err_msg="driver: %s" % lapack_driver) + assert_allclose(x, (-0.428571428571429, 0.85714285714285), + rtol=25 * _eps_cast(a1.dtype), + atol=25 * _eps_cast(a1.dtype), + err_msg="driver: %s" % lapack_driver) + + def test_simple_overdet_complex(self): + for dtype in COMPLEX_DTYPES: + a = np.array([[1+2j, 2], [4, 5], [3, 4]], dtype=dtype) + b = np.array([1, 2+4j, 3], dtype=dtype) + for lapack_driver in TestLstsq.lapack_drivers: + for overwrite in (True, False): + # Store values in case they are overwritten later + a1 = a.copy() + b1 = b.copy() + try: + out = lstsq(a1, b1, lapack_driver=lapack_driver, + overwrite_a=overwrite, + overwrite_b=overwrite) + except LstsqLapackError: + if lapack_driver is None: + mesg = ('LstsqLapackError raised with ' + 'lapack_driver being None.') + raise AssertionError(mesg) + else: + # can't proceed, skip to the next iteration + continue + + x = out[0] + if lapack_driver == 'gelsy': + res = b - a.dot(x) + residuals = np.sum(res * res.conj()) + else: + residuals = out[1] + r = out[2] + assert_(r == 2, 'expected efficient rank 2, got %s' % r) + assert_allclose(abs((dot(a, x) - b)**2).sum(axis=0), + residuals, + rtol=25 * _eps_cast(a1.dtype), + atol=25 * _eps_cast(a1.dtype), + err_msg="driver: %s" % lapack_driver) + assert_allclose( + x, (-0.4831460674157303 + 0.258426966292135j, + 0.921348314606741 + 0.292134831460674j), + rtol=25 * _eps_cast(a1.dtype), + atol=25 * _eps_cast(a1.dtype), + err_msg="driver: %s" % lapack_driver) + + def test_simple_underdet(self): + for dtype in REAL_DTYPES: + a = np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype) + b = np.array([1, 2], dtype=dtype) + for lapack_driver in TestLstsq.lapack_drivers: + for overwrite in (True, False): + # Store values in case they are overwritten later + a1 = a.copy() + b1 = b.copy() + try: + out = lstsq(a1, b1, lapack_driver=lapack_driver, + overwrite_a=overwrite, + overwrite_b=overwrite) + except LstsqLapackError: + if lapack_driver is None: + mesg = ('LstsqLapackError raised with ' + 'lapack_driver being None.') + raise AssertionError(mesg) + else: + # can't proceed, skip to the next iteration + continue + + x = out[0] + r = out[2] + assert_(r == 2, 'expected efficient rank 2, got %s' % r) + assert_allclose(x, (-0.055555555555555, 0.111111111111111, + 0.277777777777777), + rtol=25 * _eps_cast(a1.dtype), + atol=25 * _eps_cast(a1.dtype), + err_msg="driver: %s" % lapack_driver) + + def test_random_exact(self): + for dtype in REAL_DTYPES: + for n in (20, 200): + for lapack_driver in TestLstsq.lapack_drivers: + for overwrite in (True, False): + a = np.asarray(random([n, n]), dtype=dtype) + for i in range(n): + a[i, i] = 20 * (0.1 + a[i, i]) + for i in range(4): + b = np.asarray(random([n, 3]), dtype=dtype) + # Store values in case they are overwritten later + a1 = a.copy() + b1 = b.copy() + try: + out = lstsq(a1, b1, + lapack_driver=lapack_driver, + overwrite_a=overwrite, + overwrite_b=overwrite) + except LstsqLapackError: + if lapack_driver is None: + mesg = ('LstsqLapackError raised with ' + 'lapack_driver being None.') + raise AssertionError(mesg) + else: + # can't proceed, skip to the next iteration + continue + x = out[0] + r = out[2] + assert_(r == n, 'expected efficient rank %s, ' + 'got %s' % (n, r)) + if dtype is np.float32: + assert_allclose( + dot(a, x), b, + rtol=500 * _eps_cast(a1.dtype), + atol=500 * _eps_cast(a1.dtype), + err_msg="driver: %s" % lapack_driver) + else: + assert_allclose( + dot(a, x), b, + rtol=1000 * _eps_cast(a1.dtype), + atol=1000 * _eps_cast(a1.dtype), + err_msg="driver: %s" % lapack_driver) + + def test_random_complex_exact(self): + for dtype in COMPLEX_DTYPES: + for n in (20, 200): + for lapack_driver in TestLstsq.lapack_drivers: + for overwrite in (True, False): + a = np.asarray(random([n, n]) + 1j*random([n, n]), + dtype=dtype) + for i in range(n): + a[i, i] = 20 * (0.1 + a[i, i]) + for i in range(2): + b = np.asarray(random([n, 3]), dtype=dtype) + # Store values in case they are overwritten later + a1 = a.copy() + b1 = b.copy() + out = lstsq(a1, b1, lapack_driver=lapack_driver, + overwrite_a=overwrite, + overwrite_b=overwrite) + x = out[0] + r = out[2] + assert_(r == n, 'expected efficient rank %s, ' + 'got %s' % (n, r)) + if dtype is np.complex64: + assert_allclose( + dot(a, x), b, + rtol=400 * _eps_cast(a1.dtype), + atol=400 * _eps_cast(a1.dtype), + err_msg="driver: %s" % lapack_driver) + else: + assert_allclose( + dot(a, x), b, + rtol=1000 * _eps_cast(a1.dtype), + atol=1000 * _eps_cast(a1.dtype), + err_msg="driver: %s" % lapack_driver) + + def test_random_overdet(self): + for dtype in REAL_DTYPES: + for (n, m) in ((20, 15), (200, 2)): + for lapack_driver in TestLstsq.lapack_drivers: + for overwrite in (True, False): + a = np.asarray(random([n, m]), dtype=dtype) + for i in range(m): + a[i, i] = 20 * (0.1 + a[i, i]) + for i in range(4): + b = np.asarray(random([n, 3]), dtype=dtype) + # Store values in case they are overwritten later + a1 = a.copy() + b1 = b.copy() + try: + out = lstsq(a1, b1, + lapack_driver=lapack_driver, + overwrite_a=overwrite, + overwrite_b=overwrite) + except LstsqLapackError: + if lapack_driver is None: + mesg = ('LstsqLapackError raised with ' + 'lapack_driver being None.') + raise AssertionError(mesg) + else: + # can't proceed, skip to the next iteration + continue + + x = out[0] + r = out[2] + assert_(r == m, 'expected efficient rank %s, ' + 'got %s' % (m, r)) + assert_allclose( + x, direct_lstsq(a, b, cmplx=0), + rtol=25 * _eps_cast(a1.dtype), + atol=25 * _eps_cast(a1.dtype), + err_msg="driver: %s" % lapack_driver) + + def test_random_complex_overdet(self): + for dtype in COMPLEX_DTYPES: + for (n, m) in ((20, 15), (200, 2)): + for lapack_driver in TestLstsq.lapack_drivers: + for overwrite in (True, False): + a = np.asarray(random([n, m]) + 1j*random([n, m]), + dtype=dtype) + for i in range(m): + a[i, i] = 20 * (0.1 + a[i, i]) + for i in range(2): + b = np.asarray(random([n, 3]), dtype=dtype) + # Store values in case they are overwritten + # later + a1 = a.copy() + b1 = b.copy() + out = lstsq(a1, b1, + lapack_driver=lapack_driver, + overwrite_a=overwrite, + overwrite_b=overwrite) + x = out[0] + r = out[2] + assert_(r == m, 'expected efficient rank %s, ' + 'got %s' % (m, r)) + assert_allclose( + x, direct_lstsq(a, b, cmplx=1), + rtol=25 * _eps_cast(a1.dtype), + atol=25 * _eps_cast(a1.dtype), + err_msg="driver: %s" % lapack_driver) + + def test_check_finite(self): + with suppress_warnings() as sup: + # On (some) OSX this tests triggers a warning (gh-7538) + sup.filter(RuntimeWarning, + "internal gelsd driver lwork query error,.*" + "Falling back to 'gelss' driver.") + + at = np.array(((1, 20), (-30, 4))) + for dtype, bt, lapack_driver, overwrite, check_finite in \ + itertools.product(REAL_DTYPES, + (((1, 0), (0, 1)), (1, 0), ((2, 1), (-30, 4))), + TestLstsq.lapack_drivers, + (True, False), + (True, False)): + + a = at.astype(dtype) + b = np.array(bt, dtype=dtype) + # Store values in case they are overwritten + # later + a1 = a.copy() + b1 = b.copy() + try: + out = lstsq(a1, b1, lapack_driver=lapack_driver, + check_finite=check_finite, overwrite_a=overwrite, + overwrite_b=overwrite) + except LstsqLapackError: + if lapack_driver is None: + raise AssertionError('LstsqLapackError raised with ' + '"lapack_driver" being "None".') + else: + # can't proceed, + # skip to the next iteration + continue + x = out[0] + r = out[2] + assert_(r == 2, 'expected efficient rank 2, got %s' % r) + assert_allclose(dot(a, x), b, + rtol=25 * _eps_cast(a.dtype), + atol=25 * _eps_cast(a.dtype), + err_msg="driver: %s" % lapack_driver) + + def test_zero_size(self): + for a_shape, b_shape in (((0, 2), (0,)), + ((0, 4), (0, 2)), + ((4, 0), (4,)), + ((4, 0), (4, 2))): + b = np.ones(b_shape) + x, residues, rank, s = lstsq(np.zeros(a_shape), b) + assert_equal(x, np.zeros((a_shape[1],) + b_shape[1:])) + residues_should_be = (np.empty((0,)) if a_shape[1] + else np.linalg.norm(b, axis=0)**2) + assert_equal(residues, residues_should_be) + assert_(rank == 0, 'expected rank 0') + assert_equal(s, np.empty((0,))) + + +class TestPinv(object): + + def test_simple_real(self): + a = array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float) + a_pinv = pinv(a) + assert_array_almost_equal(dot(a, a_pinv), np.eye(3)) + a_pinv = pinv2(a) + assert_array_almost_equal(dot(a, a_pinv), np.eye(3)) + + def test_simple_complex(self): + a = (array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], + dtype=float) + 1j * array([[10, 8, 7], [6, 5, 4], [3, 2, 1]], + dtype=float)) + a_pinv = pinv(a) + assert_array_almost_equal(dot(a, a_pinv), np.eye(3)) + a_pinv = pinv2(a) + assert_array_almost_equal(dot(a, a_pinv), np.eye(3)) + + def test_simple_singular(self): + a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=float) + a_pinv = pinv(a) + a_pinv2 = pinv2(a) + assert_array_almost_equal(a_pinv, a_pinv2) + + def test_simple_cols(self): + a = array([[1, 2, 3], [4, 5, 6]], dtype=float) + a_pinv = pinv(a) + a_pinv2 = pinv2(a) + assert_array_almost_equal(a_pinv, a_pinv2) + + def test_simple_rows(self): + a = array([[1, 2], [3, 4], [5, 6]], dtype=float) + a_pinv = pinv(a) + a_pinv2 = pinv2(a) + assert_array_almost_equal(a_pinv, a_pinv2) + + def test_check_finite(self): + a = array([[1, 2, 3], [4, 5, 6.], [7, 8, 10]]) + a_pinv = pinv(a, check_finite=False) + assert_array_almost_equal(dot(a, a_pinv), np.eye(3)) + a_pinv = pinv2(a, check_finite=False) + assert_array_almost_equal(dot(a, a_pinv), np.eye(3)) + + def test_native_list_argument(self): + a = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] + a_pinv = pinv(a) + a_pinv2 = pinv2(a) + assert_array_almost_equal(a_pinv, a_pinv2) + + +class TestPinvSymmetric(object): + + def test_simple_real(self): + a = array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float) + a = np.dot(a, a.T) + a_pinv = pinvh(a) + assert_array_almost_equal(np.dot(a, a_pinv), np.eye(3)) + + def test_nonpositive(self): + a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=float) + a = np.dot(a, a.T) + u, s, vt = np.linalg.svd(a) + s[0] *= -1 + a = np.dot(u * s, vt) # a is now symmetric non-positive and singular + a_pinv = pinv2(a) + a_pinvh = pinvh(a) + assert_array_almost_equal(a_pinv, a_pinvh) + + def test_simple_complex(self): + a = (array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], + dtype=float) + 1j * array([[10, 8, 7], [6, 5, 4], [3, 2, 1]], + dtype=float)) + a = np.dot(a, a.conj().T) + a_pinv = pinvh(a) + assert_array_almost_equal(np.dot(a, a_pinv), np.eye(3)) + + def test_native_list_argument(self): + a = array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float) + a = np.dot(a, a.T) + a_pinv = pinvh(a.tolist()) + assert_array_almost_equal(np.dot(a, a_pinv), np.eye(3)) + + +class TestVectorNorms(object): + + def test_types(self): + for dtype in np.typecodes['AllFloat']: + x = np.array([1, 2, 3], dtype=dtype) + tol = max(1e-15, np.finfo(dtype).eps.real * 20) + assert_allclose(norm(x), np.sqrt(14), rtol=tol) + assert_allclose(norm(x, 2), np.sqrt(14), rtol=tol) + + for dtype in np.typecodes['Complex']: + x = np.array([1j, 2j, 3j], dtype=dtype) + tol = max(1e-15, np.finfo(dtype).eps.real * 20) + assert_allclose(norm(x), np.sqrt(14), rtol=tol) + assert_allclose(norm(x, 2), np.sqrt(14), rtol=tol) + + def test_overflow(self): + # unlike numpy's norm, this one is + # safer on overflow + a = array([1e20], dtype=float32) + assert_almost_equal(norm(a), a) + + def test_stable(self): + # more stable than numpy's norm + a = array([1e4] + [1]*10000, dtype=float32) + try: + # snrm in double precision; we obtain the same as for float64 + # -- large atol needed due to varying blas implementations + assert_allclose(norm(a) - 1e4, 0.5, atol=1e-2) + except AssertionError: + # snrm implemented in single precision, == np.linalg.norm result + msg = ": Result should equal either 0.0 or 0.5 (depending on " \ + "implementation of snrm2)." + assert_almost_equal(norm(a) - 1e4, 0.0, err_msg=msg) + + def test_zero_norm(self): + assert_equal(norm([1, 0, 3], 0), 2) + assert_equal(norm([1, 2, 3], 0), 3) + + def test_axis_kwd(self): + a = np.array([[[2, 1], [3, 4]]] * 2, 'd') + assert_allclose(norm(a, axis=1), [[3.60555128, 4.12310563]] * 2) + assert_allclose(norm(a, 1, axis=1), [[5.] * 2] * 2) + + @pytest.mark.skipif(NumpyVersion(np.__version__) < '1.10.0', reason="") + def test_keepdims_kwd(self): + a = np.array([[[2, 1], [3, 4]]] * 2, 'd') + b = norm(a, axis=1, keepdims=True) + assert_allclose(b, [[[3.60555128, 4.12310563]]] * 2) + assert_(b.shape == (2, 1, 2)) + assert_allclose(norm(a, 1, axis=2, keepdims=True), [[[3.], [7.]]] * 2) + + +class TestMatrixNorms(object): + + def test_matrix_norms(self): + # Not all of these are matrix norms in the most technical sense. + np.random.seed(1234) + for n, m in (1, 1), (1, 3), (3, 1), (4, 4), (4, 5), (5, 4): + for t in np.single, np.double, np.csingle, np.cdouble, np.int64: + A = 10 * np.random.randn(n, m).astype(t) + if np.issubdtype(A.dtype, np.complexfloating): + A = (A + 10j * np.random.randn(n, m)).astype(t) + t_high = np.cdouble + else: + t_high = np.double + for order in (None, 'fro', 1, -1, 2, -2, np.inf, -np.inf): + actual = norm(A, ord=order) + desired = np.linalg.norm(A, ord=order) + # SciPy may return higher precision matrix norms. + # This is a consequence of using LAPACK. + if not np.allclose(actual, desired): + desired = np.linalg.norm(A.astype(t_high), ord=order) + assert_allclose(actual, desired) + + def test_axis_kwd(self): + a = np.array([[[2, 1], [3, 4]]] * 2, 'd') + b = norm(a, ord=np.inf, axis=(1, 0)) + c = norm(np.swapaxes(a, 0, 1), ord=np.inf, axis=(0, 1)) + d = norm(a, ord=1, axis=(0, 1)) + assert_allclose(b, c) + assert_allclose(c, d) + assert_allclose(b, d) + assert_(b.shape == c.shape == d.shape) + b = norm(a, ord=1, axis=(1, 0)) + c = norm(np.swapaxes(a, 0, 1), ord=1, axis=(0, 1)) + d = norm(a, ord=np.inf, axis=(0, 1)) + assert_allclose(b, c) + assert_allclose(c, d) + assert_allclose(b, d) + assert_(b.shape == c.shape == d.shape) + + @pytest.mark.skipif(NumpyVersion(np.__version__) < '1.10.0', reason="") + def test_keepdims_kwd(self): + a = np.arange(120, dtype='d').reshape(2, 3, 4, 5) + b = norm(a, ord=np.inf, axis=(1, 0), keepdims=True) + c = norm(a, ord=1, axis=(0, 1), keepdims=True) + assert_allclose(b, c) + assert_(b.shape == c.shape) + + +class TestOverwrite(object): + def test_solve(self): + assert_no_overwrite(solve, [(3, 3), (3,)]) + + def test_solve_triangular(self): + assert_no_overwrite(solve_triangular, [(3, 3), (3,)]) + + def test_solve_banded(self): + assert_no_overwrite(lambda ab, b: solve_banded((2, 1), ab, b), + [(4, 6), (6,)]) + + def test_solveh_banded(self): + assert_no_overwrite(solveh_banded, [(2, 6), (6,)]) + + def test_inv(self): + assert_no_overwrite(inv, [(3, 3)]) + + def test_det(self): + assert_no_overwrite(det, [(3, 3)]) + + def test_lstsq(self): + assert_no_overwrite(lstsq, [(3, 2), (3,)]) + + def test_pinv(self): + assert_no_overwrite(pinv, [(3, 3)]) + + def test_pinv2(self): + assert_no_overwrite(pinv2, [(3, 3)]) + + def test_pinvh(self): + assert_no_overwrite(pinvh, [(3, 3)]) + + +class TestSolveCirculant(object): + + def test_basic1(self): + c = np.array([1, 2, 3, 5]) + b = np.array([1, -1, 1, 0]) + x = solve_circulant(c, b) + y = solve(circulant(c), b) + assert_allclose(x, y) + + def test_basic2(self): + # b is a 2-d matrix. + c = np.array([1, 2, -3, -5]) + b = np.arange(12).reshape(4, 3) + x = solve_circulant(c, b) + y = solve(circulant(c), b) + assert_allclose(x, y) + + def test_basic3(self): + # b is a 3-d matrix. + c = np.array([1, 2, -3, -5]) + b = np.arange(24).reshape(4, 3, 2) + x = solve_circulant(c, b) + y = solve(circulant(c), b) + assert_allclose(x, y) + + def test_complex(self): + # Complex b and c + c = np.array([1+2j, -3, 4j, 5]) + b = np.arange(8).reshape(4, 2) + 0.5j + x = solve_circulant(c, b) + y = solve(circulant(c), b) + assert_allclose(x, y) + + def test_random_b_and_c(self): + # Random b and c + np.random.seed(54321) + c = np.random.randn(50) + b = np.random.randn(50) + x = solve_circulant(c, b) + y = solve(circulant(c), b) + assert_allclose(x, y) + + def test_singular(self): + # c gives a singular circulant matrix. + c = np.array([1, 1, 0, 0]) + b = np.array([1, 2, 3, 4]) + x = solve_circulant(c, b, singular='lstsq') + y, res, rnk, s = lstsq(circulant(c), b) + assert_allclose(x, y) + assert_raises(LinAlgError, solve_circulant, x, y) + + def test_axis_args(self): + # Test use of caxis, baxis and outaxis. + + # c has shape (2, 1, 4) + c = np.array([[[-1, 2.5, 3, 3.5]], [[1, 6, 6, 6.5]]]) + + # b has shape (3, 4) + b = np.array([[0, 0, 1, 1], [1, 1, 0, 0], [1, -1, 0, 0]]) + + x = solve_circulant(c, b, baxis=1) + assert_equal(x.shape, (4, 2, 3)) + expected = np.empty_like(x) + expected[:, 0, :] = solve(circulant(c[0]), b.T) + expected[:, 1, :] = solve(circulant(c[1]), b.T) + assert_allclose(x, expected) + + x = solve_circulant(c, b, baxis=1, outaxis=-1) + assert_equal(x.shape, (2, 3, 4)) + assert_allclose(np.rollaxis(x, -1), expected) + + # np.swapaxes(c, 1, 2) has shape (2, 4, 1); b.T has shape (4, 3). + x = solve_circulant(np.swapaxes(c, 1, 2), b.T, caxis=1) + assert_equal(x.shape, (4, 2, 3)) + assert_allclose(x, expected) + + def test_native_list_arguments(self): + # Same as test_basic1 using python's native list. + c = [1, 2, 3, 5] + b = [1, -1, 1, 0] + x = solve_circulant(c, b) + y = solve(circulant(c), b) + assert_allclose(x, y) + + +class TestMatrix_Balance(object): + + def test_string_arg(self): + assert_raises(ValueError, matrix_balance, 'Some string for fail') + + def test_infnan_arg(self): + assert_raises(ValueError, matrix_balance, + np.array([[1, 2], [3, np.inf]])) + assert_raises(ValueError, matrix_balance, + np.array([[1, 2], [3, np.nan]])) + + def test_scaling(self): + _, y = matrix_balance(np.array([[1000, 1], [1000, 0]])) + # Pre/post LAPACK 3.5.0 gives the same result up to an offset + # since in each case col norm is x1000 greater and + # 1000 / 32 ~= 1 * 32 hence balanced with 2 ** 5. + assert_allclose(int(np.diff(np.log2(np.diag(y)))), 5) + + def test_scaling_order(self): + A = np.array([[1, 0, 1e-4], [1, 1, 1e-2], [1e4, 1e2, 1]]) + x, y = matrix_balance(A) + assert_allclose(solve(y, A).dot(y), x) + + def test_separate(self): + _, (y, z) = matrix_balance(np.array([[1000, 1], [1000, 0]]), + separate=1) + assert_equal(int(np.diff(np.log2(y))), 5) + assert_allclose(z, np.arange(2)) + + def test_permutation(self): + A = block_diag(np.ones((2, 2)), np.tril(np.ones((2, 2))), + np.ones((3, 3))) + x, (y, z) = matrix_balance(A, separate=1) + assert_allclose(y, np.ones_like(y)) + assert_allclose(z, np.array([0, 1, 6, 5, 4, 3, 2])) + + def test_perm_and_scaling(self): + # Matrix with its diagonal removed + cases = ( # Case 0 + np.array([[0., 0., 0., 0., 0.000002], + [0., 0., 0., 0., 0.], + [2., 2., 0., 0., 0.], + [2., 2., 0., 0., 0.], + [0., 0., 0.000002, 0., 0.]]), + # Case 1 user reported GH-7258 + np.array([[-0.5, 0., 0., 0.], + [0., -1., 0., 0.], + [1., 0., -0.5, 0.], + [0., 1., 0., -1.]]), + # Case 2 user reported GH-7258 + np.array([[-3., 0., 1., 0.], + [-1., -1., -0., 1.], + [-3., -0., -0., 0.], + [-1., -0., 1., -1.]]) + ) + + for A in cases: + x, y = matrix_balance(A) + x, (s, p) = matrix_balance(A, separate=1) + ip = np.empty_like(p) + ip[p] = np.arange(A.shape[0]) + assert_allclose(y, np.diag(s)[ip, :]) + assert_allclose(solve(y, A).dot(y), x) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_basic.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_basic.pyc new file mode 100644 index 0000000..123e20f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_basic.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_blas.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_blas.py new file mode 100644 index 0000000..29605dc --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_blas.py @@ -0,0 +1,1079 @@ +# +# Created by: Pearu Peterson, April 2002 +# +from __future__ import division, print_function, absolute_import + + +__usage__ = """ +Build linalg: + python setup.py build +Run tests if scipy is installed: + python -c 'import scipy;scipy.linalg.test()' +""" + +import math + +import numpy as np +from numpy.testing import (assert_equal, assert_almost_equal, assert_, + assert_array_almost_equal, assert_allclose) +from pytest import raises as assert_raises + +from numpy import float32, float64, complex64, complex128, arange, triu, \ + tril, zeros, tril_indices, ones, mod, diag, append, eye, \ + nonzero + +from numpy.random import rand, seed +from scipy.linalg import _fblas as fblas, get_blas_funcs, toeplitz, solve, \ + solve_triangular + +try: + from scipy.linalg import _cblas as cblas +except ImportError: + cblas = None + +REAL_DTYPES = [float32, float64] +COMPLEX_DTYPES = [complex64, complex128] +DTYPES = REAL_DTYPES + COMPLEX_DTYPES + + +def test_get_blas_funcs(): + # check that it returns Fortran code for arrays that are + # fortran-ordered + f1, f2, f3 = get_blas_funcs( + ('axpy', 'axpy', 'axpy'), + (np.empty((2, 2), dtype=np.complex64, order='F'), + np.empty((2, 2), dtype=np.complex128, order='C')) + ) + + # get_blas_funcs will choose libraries depending on most generic + # array + assert_equal(f1.typecode, 'z') + assert_equal(f2.typecode, 'z') + if cblas is not None: + assert_equal(f1.module_name, 'cblas') + assert_equal(f2.module_name, 'cblas') + + # check defaults. + f1 = get_blas_funcs('rotg') + assert_equal(f1.typecode, 'd') + + # check also dtype interface + f1 = get_blas_funcs('gemm', dtype=np.complex64) + assert_equal(f1.typecode, 'c') + f1 = get_blas_funcs('gemm', dtype='F') + assert_equal(f1.typecode, 'c') + + # extended precision complex + f1 = get_blas_funcs('gemm', dtype=np.longcomplex) + assert_equal(f1.typecode, 'z') + + # check safe complex upcasting + f1 = get_blas_funcs('axpy', + (np.empty((2, 2), dtype=np.float64), + np.empty((2, 2), dtype=np.complex64)) + ) + assert_equal(f1.typecode, 'z') + + +def test_get_blas_funcs_alias(): + # check alias for get_blas_funcs + f, g = get_blas_funcs(('nrm2', 'dot'), dtype=np.complex64) + assert f.typecode == 'c' + assert g.typecode == 'c' + + f, g, h = get_blas_funcs(('dot', 'dotc', 'dotu'), dtype=np.float64) + assert f is g + assert f is h + + +class TestCBLAS1Simple(object): + + def test_axpy(self): + for p in 'sd': + f = getattr(cblas, p+'axpy', None) + if f is None: + continue + assert_array_almost_equal(f([1, 2, 3], [2, -1, 3], a=5), + [7, 9, 18]) + for p in 'cz': + f = getattr(cblas, p+'axpy', None) + if f is None: + continue + assert_array_almost_equal(f([1, 2j, 3], [2, -1, 3], a=5), + [7, 10j-1, 18]) + + +class TestFBLAS1Simple(object): + + def test_axpy(self): + for p in 'sd': + f = getattr(fblas, p+'axpy', None) + if f is None: + continue + assert_array_almost_equal(f([1, 2, 3], [2, -1, 3], a=5), + [7, 9, 18]) + for p in 'cz': + f = getattr(fblas, p+'axpy', None) + if f is None: + continue + assert_array_almost_equal(f([1, 2j, 3], [2, -1, 3], a=5), + [7, 10j-1, 18]) + + def test_copy(self): + for p in 'sd': + f = getattr(fblas, p+'copy', None) + if f is None: + continue + assert_array_almost_equal(f([3, 4, 5], [8]*3), [3, 4, 5]) + for p in 'cz': + f = getattr(fblas, p+'copy', None) + if f is None: + continue + assert_array_almost_equal(f([3, 4j, 5+3j], [8]*3), [3, 4j, 5+3j]) + + def test_asum(self): + for p in 'sd': + f = getattr(fblas, p+'asum', None) + if f is None: + continue + assert_almost_equal(f([3, -4, 5]), 12) + for p in ['sc', 'dz']: + f = getattr(fblas, p+'asum', None) + if f is None: + continue + assert_almost_equal(f([3j, -4, 3-4j]), 14) + + def test_dot(self): + for p in 'sd': + f = getattr(fblas, p+'dot', None) + if f is None: + continue + assert_almost_equal(f([3, -4, 5], [2, 5, 1]), -9) + + def test_complex_dotu(self): + for p in 'cz': + f = getattr(fblas, p+'dotu', None) + if f is None: + continue + assert_almost_equal(f([3j, -4, 3-4j], [2, 3, 1]), -9+2j) + + def test_complex_dotc(self): + for p in 'cz': + f = getattr(fblas, p+'dotc', None) + if f is None: + continue + assert_almost_equal(f([3j, -4, 3-4j], [2, 3j, 1]), 3-14j) + + def test_nrm2(self): + for p in 'sd': + f = getattr(fblas, p+'nrm2', None) + if f is None: + continue + assert_almost_equal(f([3, -4, 5]), math.sqrt(50)) + for p in ['c', 'z', 'sc', 'dz']: + f = getattr(fblas, p+'nrm2', None) + if f is None: + continue + assert_almost_equal(f([3j, -4, 3-4j]), math.sqrt(50)) + + def test_scal(self): + for p in 'sd': + f = getattr(fblas, p+'scal', None) + if f is None: + continue + assert_array_almost_equal(f(2, [3, -4, 5]), [6, -8, 10]) + for p in 'cz': + f = getattr(fblas, p+'scal', None) + if f is None: + continue + assert_array_almost_equal(f(3j, [3j, -4, 3-4j]), [-9, -12j, 12+9j]) + for p in ['cs', 'zd']: + f = getattr(fblas, p+'scal', None) + if f is None: + continue + assert_array_almost_equal(f(3, [3j, -4, 3-4j]), [9j, -12, 9-12j]) + + def test_swap(self): + for p in 'sd': + f = getattr(fblas, p+'swap', None) + if f is None: + continue + x, y = [2, 3, 1], [-2, 3, 7] + x1, y1 = f(x, y) + assert_array_almost_equal(x1, y) + assert_array_almost_equal(y1, x) + for p in 'cz': + f = getattr(fblas, p+'swap', None) + if f is None: + continue + x, y = [2, 3j, 1], [-2, 3, 7-3j] + x1, y1 = f(x, y) + assert_array_almost_equal(x1, y) + assert_array_almost_equal(y1, x) + + def test_amax(self): + for p in 'sd': + f = getattr(fblas, 'i'+p+'amax') + assert_equal(f([-2, 4, 3]), 1) + for p in 'cz': + f = getattr(fblas, 'i'+p+'amax') + assert_equal(f([-5, 4+3j, 6]), 1) + # XXX: need tests for rot,rotm,rotg,rotmg + + +class TestFBLAS2Simple(object): + + def test_gemv(self): + for p in 'sd': + f = getattr(fblas, p+'gemv', None) + if f is None: + continue + assert_array_almost_equal(f(3, [[3]], [-4]), [-36]) + assert_array_almost_equal(f(3, [[3]], [-4], 3, [5]), [-21]) + for p in 'cz': + f = getattr(fblas, p+'gemv', None) + if f is None: + continue + assert_array_almost_equal(f(3j, [[3-4j]], [-4]), [-48-36j]) + assert_array_almost_equal(f(3j, [[3-4j]], [-4], 3, [5j]), + [-48-21j]) + + def test_ger(self): + + for p in 'sd': + f = getattr(fblas, p+'ger', None) + if f is None: + continue + assert_array_almost_equal(f(1, [1, 2], [3, 4]), [[3, 4], [6, 8]]) + assert_array_almost_equal(f(2, [1, 2, 3], [3, 4]), + [[6, 8], [12, 16], [18, 24]]) + + assert_array_almost_equal(f(1, [1, 2], [3, 4], + a=[[1, 2], [3, 4]]), [[4, 6], [9, 12]]) + + for p in 'cz': + f = getattr(fblas, p+'geru', None) + if f is None: + continue + assert_array_almost_equal(f(1, [1j, 2], [3, 4]), + [[3j, 4j], [6, 8]]) + assert_array_almost_equal(f(-2, [1j, 2j, 3j], [3j, 4j]), + [[6, 8], [12, 16], [18, 24]]) + + for p in 'cz': + for name in ('ger', 'gerc'): + f = getattr(fblas, p+name, None) + if f is None: + continue + assert_array_almost_equal(f(1, [1j, 2], [3, 4]), + [[3j, 4j], [6, 8]]) + assert_array_almost_equal(f(2, [1j, 2j, 3j], [3j, 4j]), + [[6, 8], [12, 16], [18, 24]]) + + def test_syr_her(self): + x = np.arange(1, 5, dtype='d') + resx = np.triu(x[:, np.newaxis] * x) + resx_reverse = np.triu(x[::-1, np.newaxis] * x[::-1]) + + y = np.linspace(0, 8.5, 17, endpoint=False) + + z = np.arange(1, 9, dtype='d').view('D') + resz = np.triu(z[:, np.newaxis] * z) + resz_reverse = np.triu(z[::-1, np.newaxis] * z[::-1]) + rehz = np.triu(z[:, np.newaxis] * z.conj()) + rehz_reverse = np.triu(z[::-1, np.newaxis] * z[::-1].conj()) + + w = np.c_[np.zeros(4), z, np.zeros(4)].ravel() + + for p, rtol in zip('sd', [1e-7, 1e-14]): + f = getattr(fblas, p+'syr', None) + if f is None: + continue + assert_allclose(f(1.0, x), resx, rtol=rtol) + assert_allclose(f(1.0, x, lower=True), resx.T, rtol=rtol) + assert_allclose(f(1.0, y, incx=2, offx=2, n=4), resx, rtol=rtol) + # negative increments imply reversed vectors in blas + assert_allclose(f(1.0, y, incx=-2, offx=2, n=4), + resx_reverse, rtol=rtol) + + a = np.zeros((4, 4), 'f' if p == 's' else 'd', 'F') + b = f(1.0, x, a=a, overwrite_a=True) + assert_allclose(a, resx, rtol=rtol) + + b = f(2.0, x, a=a) + assert_(a is not b) + assert_allclose(b, 3*resx, rtol=rtol) + + assert_raises(Exception, f, 1.0, x, incx=0) + assert_raises(Exception, f, 1.0, x, offx=5) + assert_raises(Exception, f, 1.0, x, offx=-2) + assert_raises(Exception, f, 1.0, x, n=-2) + assert_raises(Exception, f, 1.0, x, n=5) + assert_raises(Exception, f, 1.0, x, lower=2) + assert_raises(Exception, f, 1.0, x, a=np.zeros((2, 2), 'd', 'F')) + + for p, rtol in zip('cz', [1e-7, 1e-14]): + f = getattr(fblas, p+'syr', None) + if f is None: + continue + assert_allclose(f(1.0, z), resz, rtol=rtol) + assert_allclose(f(1.0, z, lower=True), resz.T, rtol=rtol) + assert_allclose(f(1.0, w, incx=3, offx=1, n=4), resz, rtol=rtol) + # negative increments imply reversed vectors in blas + assert_allclose(f(1.0, w, incx=-3, offx=1, n=4), + resz_reverse, rtol=rtol) + + a = np.zeros((4, 4), 'F' if p == 'c' else 'D', 'F') + b = f(1.0, z, a=a, overwrite_a=True) + assert_allclose(a, resz, rtol=rtol) + + b = f(2.0, z, a=a) + assert_(a is not b) + assert_allclose(b, 3*resz, rtol=rtol) + + assert_raises(Exception, f, 1.0, x, incx=0) + assert_raises(Exception, f, 1.0, x, offx=5) + assert_raises(Exception, f, 1.0, x, offx=-2) + assert_raises(Exception, f, 1.0, x, n=-2) + assert_raises(Exception, f, 1.0, x, n=5) + assert_raises(Exception, f, 1.0, x, lower=2) + assert_raises(Exception, f, 1.0, x, a=np.zeros((2, 2), 'd', 'F')) + + for p, rtol in zip('cz', [1e-7, 1e-14]): + f = getattr(fblas, p+'her', None) + if f is None: + continue + assert_allclose(f(1.0, z), rehz, rtol=rtol) + assert_allclose(f(1.0, z, lower=True), rehz.T.conj(), rtol=rtol) + assert_allclose(f(1.0, w, incx=3, offx=1, n=4), rehz, rtol=rtol) + # negative increments imply reversed vectors in blas + assert_allclose(f(1.0, w, incx=-3, offx=1, n=4), + rehz_reverse, rtol=rtol) + + a = np.zeros((4, 4), 'F' if p == 'c' else 'D', 'F') + b = f(1.0, z, a=a, overwrite_a=True) + assert_allclose(a, rehz, rtol=rtol) + + b = f(2.0, z, a=a) + assert_(a is not b) + assert_allclose(b, 3*rehz, rtol=rtol) + + assert_raises(Exception, f, 1.0, x, incx=0) + assert_raises(Exception, f, 1.0, x, offx=5) + assert_raises(Exception, f, 1.0, x, offx=-2) + assert_raises(Exception, f, 1.0, x, n=-2) + assert_raises(Exception, f, 1.0, x, n=5) + assert_raises(Exception, f, 1.0, x, lower=2) + assert_raises(Exception, f, 1.0, x, a=np.zeros((2, 2), 'd', 'F')) + + def test_syr2(self): + x = np.arange(1, 5, dtype='d') + y = np.arange(5, 9, dtype='d') + resxy = np.triu(x[:, np.newaxis] * y + y[:, np.newaxis] * x) + resxy_reverse = np.triu(x[::-1, np.newaxis] * y[::-1] + + y[::-1, np.newaxis] * x[::-1]) + + q = np.linspace(0, 8.5, 17, endpoint=False) + + for p, rtol in zip('sd', [1e-7, 1e-14]): + f = getattr(fblas, p+'syr2', None) + if f is None: + continue + assert_allclose(f(1.0, x, y), resxy, rtol=rtol) + assert_allclose(f(1.0, x, y, n=3), resxy[:3, :3], rtol=rtol) + assert_allclose(f(1.0, x, y, lower=True), resxy.T, rtol=rtol) + + assert_allclose(f(1.0, q, q, incx=2, offx=2, incy=2, offy=10), + resxy, rtol=rtol) + assert_allclose(f(1.0, q, q, incx=2, offx=2, incy=2, offy=10, n=3), + resxy[:3, :3], rtol=rtol) + # negative increments imply reversed vectors in blas + assert_allclose(f(1.0, q, q, incx=-2, offx=2, incy=-2, offy=10), + resxy_reverse, rtol=rtol) + + a = np.zeros((4, 4), 'f' if p == 's' else 'd', 'F') + b = f(1.0, x, y, a=a, overwrite_a=True) + assert_allclose(a, resxy, rtol=rtol) + + b = f(2.0, x, y, a=a) + assert_(a is not b) + assert_allclose(b, 3*resxy, rtol=rtol) + + assert_raises(Exception, f, 1.0, x, y, incx=0) + assert_raises(Exception, f, 1.0, x, y, offx=5) + assert_raises(Exception, f, 1.0, x, y, offx=-2) + assert_raises(Exception, f, 1.0, x, y, incy=0) + assert_raises(Exception, f, 1.0, x, y, offy=5) + assert_raises(Exception, f, 1.0, x, y, offy=-2) + assert_raises(Exception, f, 1.0, x, y, n=-2) + assert_raises(Exception, f, 1.0, x, y, n=5) + assert_raises(Exception, f, 1.0, x, y, lower=2) + assert_raises(Exception, f, 1.0, x, y, + a=np.zeros((2, 2), 'd', 'F')) + + def test_her2(self): + x = np.arange(1, 9, dtype='d').view('D') + y = np.arange(9, 17, dtype='d').view('D') + resxy = x[:, np.newaxis] * y.conj() + y[:, np.newaxis] * x.conj() + resxy = np.triu(resxy) + + resxy_reverse = x[::-1, np.newaxis] * y[::-1].conj() + resxy_reverse += y[::-1, np.newaxis] * x[::-1].conj() + resxy_reverse = np.triu(resxy_reverse) + + u = np.c_[np.zeros(4), x, np.zeros(4)].ravel() + v = np.c_[np.zeros(4), y, np.zeros(4)].ravel() + + for p, rtol in zip('cz', [1e-7, 1e-14]): + f = getattr(fblas, p+'her2', None) + if f is None: + continue + assert_allclose(f(1.0, x, y), resxy, rtol=rtol) + assert_allclose(f(1.0, x, y, n=3), resxy[:3, :3], rtol=rtol) + assert_allclose(f(1.0, x, y, lower=True), resxy.T.conj(), + rtol=rtol) + + assert_allclose(f(1.0, u, v, incx=3, offx=1, incy=3, offy=1), + resxy, rtol=rtol) + assert_allclose(f(1.0, u, v, incx=3, offx=1, incy=3, offy=1, n=3), + resxy[:3, :3], rtol=rtol) + # negative increments imply reversed vectors in blas + assert_allclose(f(1.0, u, v, incx=-3, offx=1, incy=-3, offy=1), + resxy_reverse, rtol=rtol) + + a = np.zeros((4, 4), 'F' if p == 'c' else 'D', 'F') + b = f(1.0, x, y, a=a, overwrite_a=True) + assert_allclose(a, resxy, rtol=rtol) + + b = f(2.0, x, y, a=a) + assert_(a is not b) + assert_allclose(b, 3*resxy, rtol=rtol) + + assert_raises(Exception, f, 1.0, x, y, incx=0) + assert_raises(Exception, f, 1.0, x, y, offx=5) + assert_raises(Exception, f, 1.0, x, y, offx=-2) + assert_raises(Exception, f, 1.0, x, y, incy=0) + assert_raises(Exception, f, 1.0, x, y, offy=5) + assert_raises(Exception, f, 1.0, x, y, offy=-2) + assert_raises(Exception, f, 1.0, x, y, n=-2) + assert_raises(Exception, f, 1.0, x, y, n=5) + assert_raises(Exception, f, 1.0, x, y, lower=2) + assert_raises(Exception, f, 1.0, x, y, + a=np.zeros((2, 2), 'd', 'F')) + + def test_gbmv(self): + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 7 + m = 5 + kl = 1 + ku = 2 + # fake a banded matrix via toeplitz + A = toeplitz(append(rand(kl+1), zeros(m-kl-1)), + append(rand(ku+1), zeros(n-ku-1))) + A = A.astype(dtype) + Ab = zeros((kl+ku+1, n), dtype=dtype) + + # Form the banded storage + Ab[2, :5] = A[0, 0] # diag + Ab[1, 1:6] = A[0, 1] # sup1 + Ab[0, 2:7] = A[0, 2] # sup2 + Ab[3, :4] = A[1, 0] # sub1 + + x = rand(n).astype(dtype) + y = rand(m).astype(dtype) + alpha, beta = dtype(3), dtype(-5) + + func, = get_blas_funcs(('gbmv',), dtype=dtype) + y1 = func(m=m, n=n, ku=ku, kl=kl, alpha=alpha, a=Ab, + x=x, y=y, beta=beta) + y2 = alpha * A.dot(x) + beta * y + assert_array_almost_equal(y1, y2) + + def test_sbmv_hbmv(self): + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 6 + k = 2 + A = zeros((n, n), dtype=dtype) + Ab = zeros((k+1, n), dtype=dtype) + + # Form the array and its packed banded storage + A[arange(n), arange(n)] = rand(n) + for ind2 in range(1, k+1): + temp = rand(n-ind2) + A[arange(n-ind2), arange(ind2, n)] = temp + Ab[-1-ind2, ind2:] = temp + A = A.astype(dtype) + A = A + A.T if ind < 2 else A + A.conj().T + Ab[-1, :] = diag(A) + x = rand(n).astype(dtype) + y = rand(n).astype(dtype) + alpha, beta = dtype(1.25), dtype(3) + + if ind > 1: + func, = get_blas_funcs(('hbmv',), dtype=dtype) + else: + func, = get_blas_funcs(('sbmv',), dtype=dtype) + y1 = func(k=k, alpha=alpha, a=Ab, x=x, y=y, beta=beta) + y2 = alpha * A.dot(x) + beta * y + assert_array_almost_equal(y1, y2) + + def test_spmv_hpmv(self): + seed(1234) + for ind, dtype in enumerate(DTYPES+COMPLEX_DTYPES): + n = 3 + A = rand(n, n).astype(dtype) + if ind > 1: + A += rand(n, n)*1j + A = A.astype(dtype) + A = A + A.T if ind < 4 else A + A.conj().T + c, r = tril_indices(n) + Ap = A[r, c] + x = rand(n).astype(dtype) + y = rand(n).astype(dtype) + xlong = arange(2*n).astype(dtype) + ylong = ones(2*n).astype(dtype) + alpha, beta = dtype(1.25), dtype(2) + + if ind > 3: + func, = get_blas_funcs(('hpmv',), dtype=dtype) + else: + func, = get_blas_funcs(('spmv',), dtype=dtype) + y1 = func(n=n, alpha=alpha, ap=Ap, x=x, y=y, beta=beta) + y2 = alpha * A.dot(x) + beta * y + assert_array_almost_equal(y1, y2) + + # Test inc and offsets + y1 = func(n=n-1, alpha=alpha, beta=beta, x=xlong, y=ylong, ap=Ap, + incx=2, incy=2, offx=n, offy=n) + y2 = (alpha * A[:-1, :-1]).dot(xlong[3::2]) + beta * ylong[3::2] + assert_array_almost_equal(y1[3::2], y2) + assert_almost_equal(y1[4], ylong[4]) + + def test_spr_hpr(self): + seed(1234) + for ind, dtype in enumerate(DTYPES+COMPLEX_DTYPES): + n = 3 + A = rand(n, n).astype(dtype) + if ind > 1: + A += rand(n, n)*1j + A = A.astype(dtype) + A = A + A.T if ind < 4 else A + A.conj().T + c, r = tril_indices(n) + Ap = A[r, c] + x = rand(n).astype(dtype) + alpha = (DTYPES+COMPLEX_DTYPES)[mod(ind, 4)](2.5) + + if ind > 3: + func, = get_blas_funcs(('hpr',), dtype=dtype) + y2 = alpha * x[:, None].dot(x[None, :].conj()) + A + else: + func, = get_blas_funcs(('spr',), dtype=dtype) + y2 = alpha * x[:, None].dot(x[None, :]) + A + + y1 = func(n=n, alpha=alpha, ap=Ap, x=x) + y1f = zeros((3, 3), dtype=dtype) + y1f[r, c] = y1 + y1f[c, r] = y1.conj() if ind > 3 else y1 + assert_array_almost_equal(y1f, y2) + + def test_spr2_hpr2(self): + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 3 + A = rand(n, n).astype(dtype) + if ind > 1: + A += rand(n, n)*1j + A = A.astype(dtype) + A = A + A.T if ind < 2 else A + A.conj().T + c, r = tril_indices(n) + Ap = A[r, c] + x = rand(n).astype(dtype) + y = rand(n).astype(dtype) + alpha = dtype(2) + + if ind > 1: + func, = get_blas_funcs(('hpr2',), dtype=dtype) + else: + func, = get_blas_funcs(('spr2',), dtype=dtype) + + u = alpha.conj() * x[:, None].dot(y[None, :].conj()) + y2 = A + u + u.conj().T + y1 = func(n=n, alpha=alpha, x=x, y=y, ap=Ap) + y1f = zeros((3, 3), dtype=dtype) + y1f[r, c] = y1 + y1f[[1, 2, 2], [0, 0, 1]] = y1[[1, 3, 4]].conj() + assert_array_almost_equal(y1f, y2) + + def test_tbmv(self): + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 10 + k = 3 + x = rand(n).astype(dtype) + A = zeros((n, n), dtype=dtype) + # Banded upper triangular array + for sup in range(k+1): + A[arange(n-sup), arange(sup, n)] = rand(n-sup) + + # Add complex parts for c,z + if ind > 1: + A[nonzero(A)] += 1j * rand((k+1)*n-(k*(k+1)//2)).astype(dtype) + + # Form the banded storage + Ab = zeros((k+1, n), dtype=dtype) + for row in range(k+1): + Ab[-row-1, row:] = diag(A, k=row) + func, = get_blas_funcs(('tbmv',), dtype=dtype) + + y1 = func(k=k, a=Ab, x=x) + y2 = A.dot(x) + assert_array_almost_equal(y1, y2) + + y1 = func(k=k, a=Ab, x=x, diag=1) + A[arange(n), arange(n)] = dtype(1) + y2 = A.dot(x) + assert_array_almost_equal(y1, y2) + + y1 = func(k=k, a=Ab, x=x, diag=1, trans=1) + y2 = A.T.dot(x) + assert_array_almost_equal(y1, y2) + + y1 = func(k=k, a=Ab, x=x, diag=1, trans=2) + y2 = A.conj().T.dot(x) + assert_array_almost_equal(y1, y2) + + def test_tbsv(self): + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 6 + k = 3 + x = rand(n).astype(dtype) + A = zeros((n, n), dtype=dtype) + # Banded upper triangular array + for sup in range(k+1): + A[arange(n-sup), arange(sup, n)] = rand(n-sup) + + # Add complex parts for c,z + if ind > 1: + A[nonzero(A)] += 1j * rand((k+1)*n-(k*(k+1)//2)).astype(dtype) + + # Form the banded storage + Ab = zeros((k+1, n), dtype=dtype) + for row in range(k+1): + Ab[-row-1, row:] = diag(A, k=row) + func, = get_blas_funcs(('tbsv',), dtype=dtype) + + y1 = func(k=k, a=Ab, x=x) + y2 = solve(A, x) + assert_array_almost_equal(y1, y2) + + y1 = func(k=k, a=Ab, x=x, diag=1) + A[arange(n), arange(n)] = dtype(1) + y2 = solve(A, x) + assert_array_almost_equal(y1, y2) + + y1 = func(k=k, a=Ab, x=x, diag=1, trans=1) + y2 = solve(A.T, x) + assert_array_almost_equal(y1, y2) + + y1 = func(k=k, a=Ab, x=x, diag=1, trans=2) + y2 = solve(A.conj().T, x) + assert_array_almost_equal(y1, y2) + + def test_tpmv(self): + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 10 + x = rand(n).astype(dtype) + # Upper triangular array + A = triu(rand(n, n)) if ind < 2 else triu(rand(n, n)+rand(n, n)*1j) + # Form the packed storage + c, r = tril_indices(n) + Ap = A[r, c] + func, = get_blas_funcs(('tpmv',), dtype=dtype) + + y1 = func(n=n, ap=Ap, x=x) + y2 = A.dot(x) + assert_array_almost_equal(y1, y2) + + y1 = func(n=n, ap=Ap, x=x, diag=1) + A[arange(n), arange(n)] = dtype(1) + y2 = A.dot(x) + assert_array_almost_equal(y1, y2) + + y1 = func(n=n, ap=Ap, x=x, diag=1, trans=1) + y2 = A.T.dot(x) + assert_array_almost_equal(y1, y2) + + y1 = func(n=n, ap=Ap, x=x, diag=1, trans=2) + y2 = A.conj().T.dot(x) + assert_array_almost_equal(y1, y2) + + def test_tpsv(self): + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 10 + x = rand(n).astype(dtype) + # Upper triangular array + A = triu(rand(n, n)) if ind < 2 else triu(rand(n, n)+rand(n, n)*1j) + A += eye(n) + # Form the packed storage + c, r = tril_indices(n) + Ap = A[r, c] + func, = get_blas_funcs(('tpsv',), dtype=dtype) + + y1 = func(n=n, ap=Ap, x=x) + y2 = solve(A, x) + assert_array_almost_equal(y1, y2) + + y1 = func(n=n, ap=Ap, x=x, diag=1) + A[arange(n), arange(n)] = dtype(1) + y2 = solve(A, x) + assert_array_almost_equal(y1, y2) + + y1 = func(n=n, ap=Ap, x=x, diag=1, trans=1) + y2 = solve(A.T, x) + assert_array_almost_equal(y1, y2) + + y1 = func(n=n, ap=Ap, x=x, diag=1, trans=2) + y2 = solve(A.conj().T, x) + assert_array_almost_equal(y1, y2) + + def test_trmv(self): + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 3 + A = (rand(n, n)+eye(n)).astype(dtype) + x = rand(3).astype(dtype) + func, = get_blas_funcs(('trmv',), dtype=dtype) + + y1 = func(a=A, x=x) + y2 = triu(A).dot(x) + assert_array_almost_equal(y1, y2) + + y1 = func(a=A, x=x, diag=1) + A[arange(n), arange(n)] = dtype(1) + y2 = triu(A).dot(x) + assert_array_almost_equal(y1, y2) + + y1 = func(a=A, x=x, diag=1, trans=1) + y2 = triu(A).T.dot(x) + assert_array_almost_equal(y1, y2) + + y1 = func(a=A, x=x, diag=1, trans=2) + y2 = triu(A).conj().T.dot(x) + assert_array_almost_equal(y1, y2) + + def test_trsv(self): + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 15 + A = (rand(n, n)+eye(n)).astype(dtype) + x = rand(n).astype(dtype) + func, = get_blas_funcs(('trsv',), dtype=dtype) + + y1 = func(a=A, x=x) + y2 = solve(triu(A), x) + assert_array_almost_equal(y1, y2) + + y1 = func(a=A, x=x, lower=1) + y2 = solve(tril(A), x) + assert_array_almost_equal(y1, y2) + + y1 = func(a=A, x=x, diag=1) + A[arange(n), arange(n)] = dtype(1) + y2 = solve(triu(A), x) + assert_array_almost_equal(y1, y2) + + y1 = func(a=A, x=x, diag=1, trans=1) + y2 = solve(triu(A).T, x) + assert_array_almost_equal(y1, y2) + + y1 = func(a=A, x=x, diag=1, trans=2) + y2 = solve(triu(A).conj().T, x) + assert_array_almost_equal(y1, y2) + + +class TestFBLAS3Simple(object): + + def test_gemm(self): + for p in 'sd': + f = getattr(fblas, p+'gemm', None) + if f is None: + continue + assert_array_almost_equal(f(3, [3], [-4]), [[-36]]) + assert_array_almost_equal(f(3, [3], [-4], 3, [5]), [-21]) + for p in 'cz': + f = getattr(fblas, p+'gemm', None) + if f is None: + continue + assert_array_almost_equal(f(3j, [3-4j], [-4]), [[-48-36j]]) + assert_array_almost_equal(f(3j, [3-4j], [-4], 3, [5j]), [-48-21j]) + + +def _get_func(func, ps='sdzc'): + """Just a helper: return a specified BLAS function w/typecode.""" + for p in ps: + f = getattr(fblas, p+func, None) + if f is None: + continue + yield f + + +class TestBLAS3Symm(object): + + def setup_method(self): + self.a = np.array([[1., 2.], + [0., 1.]]) + self.b = np.array([[1., 0., 3.], + [0., -1., 2.]]) + self.c = np.ones((2, 3)) + self.t = np.array([[2., -1., 8.], + [3., 0., 9.]]) + + def test_symm(self): + for f in _get_func('symm'): + res = f(a=self.a, b=self.b, c=self.c, alpha=1., beta=1.) + assert_array_almost_equal(res, self.t) + + res = f(a=self.a.T, b=self.b, lower=1, c=self.c, alpha=1., beta=1.) + assert_array_almost_equal(res, self.t) + + res = f(a=self.a, b=self.b.T, side=1, c=self.c.T, + alpha=1., beta=1.) + assert_array_almost_equal(res, self.t.T) + + def test_summ_wrong_side(self): + f = getattr(fblas, 'dsymm', None) + if f is not None: + assert_raises(Exception, f, **{'a': self.a, 'b': self.b, + 'alpha': 1, 'side': 1}) + # `side=1` means C <- B*A, hence shapes of A and B are to be + # compatible. Otherwise, f2py exception is raised + + def test_symm_wrong_uplo(self): + """SYMM only considers the upper/lower part of A. Hence setting + wrong value for `lower` (default is lower=0, meaning upper triangle) + gives a wrong result. + """ + f = getattr(fblas, 'dsymm', None) + if f is not None: + res = f(a=self.a, b=self.b, c=self.c, alpha=1., beta=1.) + assert np.allclose(res, self.t) + + res = f(a=self.a, b=self.b, lower=1, c=self.c, alpha=1., beta=1.) + assert not np.allclose(res, self.t) + + +class TestBLAS3Syrk(object): + def setup_method(self): + self.a = np.array([[1., 0.], + [0., -2.], + [2., 3.]]) + self.t = np.array([[1., 0., 2.], + [0., 4., -6.], + [2., -6., 13.]]) + self.tt = np.array([[5., 6.], + [6., 13.]]) + + def test_syrk(self): + for f in _get_func('syrk'): + c = f(a=self.a, alpha=1.) + assert_array_almost_equal(np.triu(c), np.triu(self.t)) + + c = f(a=self.a, alpha=1., lower=1) + assert_array_almost_equal(np.tril(c), np.tril(self.t)) + + c0 = np.ones(self.t.shape) + c = f(a=self.a, alpha=1., beta=1., c=c0) + assert_array_almost_equal(np.triu(c), np.triu(self.t+c0)) + + c = f(a=self.a, alpha=1., trans=1) + assert_array_almost_equal(np.triu(c), np.triu(self.tt)) + + # prints '0-th dimension must be fixed to 3 but got 5', + # FIXME: suppress? + # FIXME: how to catch the _fblas.error? + def test_syrk_wrong_c(self): + f = getattr(fblas, 'dsyrk', None) + if f is not None: + assert_raises(Exception, f, **{'a': self.a, 'alpha': 1., + 'c': np.ones((5, 8))}) + # if C is supplied, it must have compatible dimensions + + +class TestBLAS3Syr2k(object): + def setup_method(self): + self.a = np.array([[1., 0.], + [0., -2.], + [2., 3.]]) + self.b = np.array([[0., 1.], + [1., 0.], + [0, 1.]]) + self.t = np.array([[0., -1., 3.], + [-1., 0., 0.], + [3., 0., 6.]]) + self.tt = np.array([[0., 1.], + [1., 6]]) + + def test_syr2k(self): + for f in _get_func('syr2k'): + c = f(a=self.a, b=self.b, alpha=1.) + assert_array_almost_equal(np.triu(c), np.triu(self.t)) + + c = f(a=self.a, b=self.b, alpha=1., lower=1) + assert_array_almost_equal(np.tril(c), np.tril(self.t)) + + c0 = np.ones(self.t.shape) + c = f(a=self.a, b=self.b, alpha=1., beta=1., c=c0) + assert_array_almost_equal(np.triu(c), np.triu(self.t+c0)) + + c = f(a=self.a, b=self.b, alpha=1., trans=1) + assert_array_almost_equal(np.triu(c), np.triu(self.tt)) + + # prints '0-th dimension must be fixed to 3 but got 5', FIXME: suppress? + def test_syr2k_wrong_c(self): + f = getattr(fblas, 'dsyr2k', None) + if f is not None: + assert_raises(Exception, f, **{'a': self.a, + 'b': self.b, + 'alpha': 1., + 'c': np.zeros((15, 8))}) + # if C is supplied, it must have compatible dimensions + + +class TestSyHe(object): + """Quick and simple tests for (zc)-symm, syrk, syr2k.""" + def setup_method(self): + self.sigma_y = np.array([[0., -1.j], + [1.j, 0.]]) + + def test_symm_zc(self): + for f in _get_func('symm', 'zc'): + # NB: a is symmetric w/upper diag of ONLY + res = f(a=self.sigma_y, b=self.sigma_y, alpha=1.) + assert_array_almost_equal(np.triu(res), np.diag([1, -1])) + + def test_hemm_zc(self): + for f in _get_func('hemm', 'zc'): + # NB: a is hermitian w/upper diag of ONLY + res = f(a=self.sigma_y, b=self.sigma_y, alpha=1.) + assert_array_almost_equal(np.triu(res), np.diag([1, 1])) + + def test_syrk_zr(self): + for f in _get_func('syrk', 'zc'): + res = f(a=self.sigma_y, alpha=1.) + assert_array_almost_equal(np.triu(res), np.diag([-1, -1])) + + def test_herk_zr(self): + for f in _get_func('herk', 'zc'): + res = f(a=self.sigma_y, alpha=1.) + assert_array_almost_equal(np.triu(res), np.diag([1, 1])) + + def test_syr2k_zr(self): + for f in _get_func('syr2k', 'zc'): + res = f(a=self.sigma_y, b=self.sigma_y, alpha=1.) + assert_array_almost_equal(np.triu(res), 2.*np.diag([-1, -1])) + + def test_her2k_zr(self): + for f in _get_func('her2k', 'zc'): + res = f(a=self.sigma_y, b=self.sigma_y, alpha=1.) + assert_array_almost_equal(np.triu(res), 2.*np.diag([1, 1])) + + +class TestTRMM(object): + """Quick and simple tests for dtrmm.""" + def setup_method(self): + self.a = np.array([[1., 2., ], + [-2., 1.]]) + self.b = np.array([[3., 4., -1.], + [5., 6., -2.]]) + + def test_ab(self): + f = getattr(fblas, 'dtrmm', None) + if f is not None: + result = f(1., self.a, self.b) + # default a is upper triangular + expected = np.array([[13., 16., -5.], + [5., 6., -2.]]) + assert_array_almost_equal(result, expected) + + def test_ab_lower(self): + f = getattr(fblas, 'dtrmm', None) + if f is not None: + result = f(1., self.a, self.b, lower=True) + expected = np.array([[3., 4., -1.], + [-1., -2., 0.]]) # now a is lower triangular + assert_array_almost_equal(result, expected) + + def test_b_overwrites(self): + # BLAS dtrmm modifies B argument in-place. + # Here the default is to copy, but this can be overridden + f = getattr(fblas, 'dtrmm', None) + if f is not None: + for overwr in [True, False]: + bcopy = self.b.copy() + result = f(1., self.a, bcopy, overwrite_b=overwr) + # C-contiguous arrays are copied + assert_(bcopy.flags.f_contiguous is False and + np.may_share_memory(bcopy, result) is False) + assert_equal(bcopy, self.b) + + bcopy = np.asfortranarray(self.b.copy()) # or just transpose it + result = f(1., self.a, bcopy, overwrite_b=True) + assert_(bcopy.flags.f_contiguous is True and + np.may_share_memory(bcopy, result) is True) + assert_array_almost_equal(bcopy, result) + + +def test_trsm(): + seed(1234) + for ind, dtype in enumerate(DTYPES): + tol = np.finfo(dtype).eps*1000 + func, = get_blas_funcs(('trsm',), dtype=dtype) + + # Test protection against size mismatches + A = rand(4, 5).astype(dtype) + B = rand(4, 4).astype(dtype) + alpha = dtype(1) + assert_raises(Exception, func, alpha, A, B) + assert_raises(Exception, func, alpha, A.T, B) + + n = 8 + m = 7 + alpha = dtype(-2.5) + A = (rand(m, m) if ind < 2 else rand(m, m) + rand(m, m)*1j) + eye(m) + A = A.astype(dtype) + Au = triu(A) + Al = tril(A) + B1 = rand(m, n).astype(dtype) + B2 = rand(n, m).astype(dtype) + + x1 = func(alpha=alpha, a=A, b=B1) + assert_equal(B1.shape, x1.shape) + x2 = solve(Au, alpha*B1) + assert_allclose(x1, x2, atol=tol) + + x1 = func(alpha=alpha, a=A, b=B1, trans_a=1) + x2 = solve(Au.T, alpha*B1) + assert_allclose(x1, x2, atol=tol) + + x1 = func(alpha=alpha, a=A, b=B1, trans_a=2) + x2 = solve(Au.conj().T, alpha*B1) + assert_allclose(x1, x2, atol=tol) + + x1 = func(alpha=alpha, a=A, b=B1, diag=1) + Au[arange(m), arange(m)] = dtype(1) + x2 = solve(Au, alpha*B1) + assert_allclose(x1, x2, atol=tol) + + x1 = func(alpha=alpha, a=A, b=B2, diag=1, side=1) + x2 = solve(Au.conj().T, alpha*B2.conj().T) + assert_allclose(x1, x2.conj().T, atol=tol) + + x1 = func(alpha=alpha, a=A, b=B2, diag=1, side=1, lower=1) + Al[arange(m), arange(m)] = dtype(1) + x2 = solve(Al.conj().T, alpha*B2.conj().T) + assert_allclose(x1, x2.conj().T, atol=tol) diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_blas.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_blas.pyc new file mode 100644 index 0000000..fcd955c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_blas.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_build.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_build.py new file mode 100644 index 0000000..26fdbf5 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_build.py @@ -0,0 +1,57 @@ +from __future__ import division, print_function, absolute_import + +from subprocess import call, PIPE, Popen +import sys +import re + +import pytest +from numpy.testing import assert_ +from numpy.compat import asbytes + +from scipy.linalg import _flapack as flapack + +# XXX: this is copied from numpy trunk. Can be removed when we will depend on +# numpy 1.3 + + +class FindDependenciesLdd: + def __init__(self): + self.cmd = ['ldd'] + + try: + st = call(self.cmd, stdout=PIPE, stderr=PIPE) + except OSError: + raise RuntimeError("command %s cannot be run" % self.cmd) + + def get_dependencies(self, file): + p = Popen(self.cmd + [file], stdout=PIPE, stderr=PIPE) + stdout, stderr = p.communicate() + if not (p.returncode == 0): + raise RuntimeError("Failed to check dependencies for %s" % file) + + return stdout + + def grep_dependencies(self, file, deps): + stdout = self.get_dependencies(file) + + rdeps = dict([(asbytes(dep), re.compile(asbytes(dep))) for dep in deps]) + founds = [] + for l in stdout.splitlines(): + for k, v in rdeps.items(): + if v.search(l): + founds.append(k) + + return founds + + +class TestF77Mismatch(object): + @pytest.mark.skipif(not(sys.platform[:5] == 'linux'), + reason="Skipping fortran compiler mismatch on non Linux platform") + def test_lapack(self): + f = FindDependenciesLdd() + deps = f.grep_dependencies(flapack.__file__, + ['libg2c', 'libgfortran']) + assert_(not (len(deps) > 1), +"""Both g77 and gfortran runtimes linked in scipy.linalg.flapack ! This is +likely to cause random crashes and wrong results. See numpy INSTALL.rst.txt for +more information.""") diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_build.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_build.pyc new file mode 100644 index 0000000..a38d0fc Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_build.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_cython_blas.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_cython_blas.py new file mode 100644 index 0000000..67a4159 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_cython_blas.py @@ -0,0 +1,120 @@ +import numpy as np +from numpy.testing import (assert_allclose, + assert_equal) +import scipy.linalg.cython_blas as blas + +class TestDGEMM(object): + + def test_transposes(self): + + a = np.arange(12, dtype='d').reshape((3, 4))[:2,:2] + b = np.arange(1, 13, dtype='d').reshape((4, 3))[:2,:2] + c = np.empty((2, 4))[:2,:2] + + blas._test_dgemm(1., a, b, 0., c) + assert_allclose(c, a.dot(b)) + + blas._test_dgemm(1., a.T, b, 0., c) + assert_allclose(c, a.T.dot(b)) + + blas._test_dgemm(1., a, b.T, 0., c) + assert_allclose(c, a.dot(b.T)) + + blas._test_dgemm(1., a.T, b.T, 0., c) + assert_allclose(c, a.T.dot(b.T)) + + blas._test_dgemm(1., a, b, 0., c.T) + assert_allclose(c, a.dot(b).T) + + blas._test_dgemm(1., a.T, b, 0., c.T) + assert_allclose(c, a.T.dot(b).T) + + blas._test_dgemm(1., a, b.T, 0., c.T) + assert_allclose(c, a.dot(b.T).T) + + blas._test_dgemm(1., a.T, b.T, 0., c.T) + assert_allclose(c, a.T.dot(b.T).T) + + def test_shapes(self): + a = np.arange(6, dtype='d').reshape((3, 2)) + b = np.arange(-6, 2, dtype='d').reshape((2, 4)) + c = np.empty((3, 4)) + + blas._test_dgemm(1., a, b, 0., c) + assert_allclose(c, a.dot(b)) + + blas._test_dgemm(1., b.T, a.T, 0., c.T) + assert_allclose(c, b.T.dot(a.T).T) + +class TestWfuncPointers(object): + """ Test the function pointers that are expected to fail on + Mac OS X without the additional entry statement in their definitions + in fblas_l1.pyf.src. """ + + def test_complex_args(self): + + cx = np.array([.5 + 1.j, .25 - .375j, 12.5 - 4.j], np.complex64) + cy = np.array([.8 + 2.j, .875 - .625j, -1. + 2.j], np.complex64) + + assert_allclose(blas._test_cdotc(cx, cy), + -17.6468753815+21.3718757629j, 5) + assert_allclose(blas._test_cdotu(cx, cy), + -6.11562538147+30.3156242371j, 5) + + assert_equal(blas._test_icamax(cx), 3) + + assert_allclose(blas._test_scasum(cx), 18.625, 5) + assert_allclose(blas._test_scnrm2(cx), 13.1796483994, 5) + + assert_allclose(blas._test_cdotc(cx[::2], cy[::2]), + -18.1000003815+21.2000007629j, 5) + assert_allclose(blas._test_cdotu(cx[::2], cy[::2]), + -6.10000038147+30.7999992371j, 5) + assert_allclose(blas._test_scasum(cx[::2]), 18., 5) + assert_allclose(blas._test_scnrm2(cx[::2]), 13.1719398499, 5) + + def test_double_args(self): + + x = np.array([5., -3, -.5], np.float64) + y = np.array([2, 1, .5], np.float64) + + assert_allclose(blas._test_dasum(x), 8.5, 10) + assert_allclose(blas._test_ddot(x, y), 6.75, 10) + assert_allclose(blas._test_dnrm2(x), 5.85234975815, 10) + + assert_allclose(blas._test_dasum(x[::2]), 5.5, 10) + assert_allclose(blas._test_ddot(x[::2], y[::2]), 9.75, 10) + assert_allclose(blas._test_dnrm2(x[::2]), 5.0249376297, 10) + + assert_equal(blas._test_idamax(x), 1) + + def test_float_args(self): + + x = np.array([5., -3, -.5], np.float32) + y = np.array([2, 1, .5], np.float32) + + assert_equal(blas._test_isamax(x), 1) + + assert_allclose(blas._test_sasum(x), 8.5, 5) + assert_allclose(blas._test_sdot(x, y), 6.75, 5) + assert_allclose(blas._test_snrm2(x), 5.85234975815, 5) + + assert_allclose(blas._test_sasum(x[::2]), 5.5, 5) + assert_allclose(blas._test_sdot(x[::2], y[::2]), 9.75, 5) + assert_allclose(blas._test_snrm2(x[::2]), 5.0249376297, 5) + + def test_double_complex_args(self): + + cx = np.array([.5 + 1.j, .25 - .375j, 13. - 4.j], np.complex128) + cy = np.array([.875 + 2.j, .875 - .625j, -1. + 2.j], np.complex128) + + assert_equal(blas._test_izamax(cx), 3) + + assert_allclose(blas._test_zdotc(cx, cy), -18.109375+22.296875j, 10) + assert_allclose(blas._test_zdotu(cx, cy), -6.578125+31.390625j, 10) + + assert_allclose(blas._test_zdotc(cx[::2], cy[::2]), + -18.5625+22.125j, 10) + assert_allclose(blas._test_zdotu(cx[::2], cy[::2]), + -6.5625+31.875j, 10) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_cython_blas.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_cython_blas.pyc new file mode 100644 index 0000000..4fddbee Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_cython_blas.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_cython_lapack.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_cython_lapack.py new file mode 100644 index 0000000..905ba76 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_cython_lapack.py @@ -0,0 +1,17 @@ +from numpy.testing import assert_allclose +from scipy.linalg import cython_lapack as cython_lapack +from scipy.linalg import lapack + + +class TestLamch(object): + + def test_slamch(self): + for c in [b'e', b's', b'b', b'p', b'n', b'r', b'm', b'u', b'l', b'o']: + assert_allclose(cython_lapack._test_slamch(c), + lapack.slamch(c)) + + def test_dlamch(self): + for c in [b'e', b's', b'b', b'p', b'n', b'r', b'm', b'u', b'l', b'o']: + assert_allclose(cython_lapack._test_dlamch(c), + lapack.dlamch(c)) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_cython_lapack.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_cython_lapack.pyc new file mode 100644 index 0000000..1e1e6fb Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_cython_lapack.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_decomp.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_decomp.py new file mode 100644 index 0000000..9ec6c51 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_decomp.py @@ -0,0 +1,2830 @@ +""" Test functions for linalg.decomp module + +""" +from __future__ import division, print_function, absolute_import + +__usage__ = """ +Build linalg: + python setup_linalg.py build +Run tests if scipy is installed: + python -c 'import scipy;scipy.linalg.test()' +""" + +import itertools +import numpy as np +from numpy.testing import (assert_equal, assert_almost_equal, + assert_array_almost_equal, assert_array_equal, + assert_, assert_allclose) + +import pytest +from pytest import raises as assert_raises + +from scipy._lib.six import xrange + +from scipy.linalg import (eig, eigvals, lu, svd, svdvals, cholesky, qr, + schur, rsf2csf, lu_solve, lu_factor, solve, diagsvd, hessenberg, rq, + eig_banded, eigvals_banded, eigh, eigvalsh, qr_multiply, qz, orth, ordqz, + subspace_angles, hadamard, eigvalsh_tridiagonal, eigh_tridiagonal, + null_space, cdf2rdf) +from scipy.linalg.lapack import dgbtrf, dgbtrs, zgbtrf, zgbtrs, \ + dsbev, dsbevd, dsbevx, zhbevd, zhbevx +from scipy.linalg.misc import norm +from scipy.linalg._decomp_qz import _select_function + +from numpy import array, transpose, sometrue, diag, ones, linalg, \ + argsort, zeros, arange, float32, complex64, dot, conj, identity, \ + ravel, sqrt, iscomplex, shape, sort, conjugate, bmat, sign, \ + asarray, matrix, isfinite, all, ndarray, outer, eye, dtype, empty,\ + triu, tril + +from numpy.random import normal, seed, random + +from scipy.linalg._testutils import assert_no_overwrite + +# digit precision to use in asserts for different types +DIGITS = {'d':11, 'D':11, 'f':4, 'F':4} + +def clear_fuss(ar, fuss_binary_bits=7): + """Clears trailing `fuss_binary_bits` of mantissa of a floating number""" + x = np.asanyarray(ar) + if np.iscomplexobj(x): + return clear_fuss(x.real) + 1j * clear_fuss(x.imag) + + significant_binary_bits = np.finfo(x.dtype).nmant + x_mant, x_exp = np.frexp(x) + f = 2.0**(significant_binary_bits - fuss_binary_bits) + x_mant *= f + np.rint(x_mant, out=x_mant) + x_mant /= f + + return np.ldexp(x_mant, x_exp) + + +# XXX: This function should be available through numpy.testing + + +def assert_dtype_equal(act, des): + if isinstance(act, ndarray): + act = act.dtype + else: + act = dtype(act) + + if isinstance(des, ndarray): + des = des.dtype + else: + des = dtype(des) + + assert_(act == des, 'dtype mismatch: "%s" (should be "%s") ' % (act, des)) + +# XXX: This function should not be defined here, but somewhere in +# scipy.linalg namespace + + +def symrand(dim_or_eigv): + """Return a random symmetric (Hermitian) matrix. + + If 'dim_or_eigv' is an integer N, return a NxN matrix, with eigenvalues + uniformly distributed on (-1,1). + + If 'dim_or_eigv' is 1-D real array 'a', return a matrix whose + eigenvalues are 'a'. + """ + if isinstance(dim_or_eigv, int): + dim = dim_or_eigv + d = random(dim)*2 - 1 + elif (isinstance(dim_or_eigv, ndarray) and + len(dim_or_eigv.shape) == 1): + dim = dim_or_eigv.shape[0] + d = dim_or_eigv + else: + raise TypeError("input type not supported.") + + v = random_rot(dim) + h = dot(dot(v.T.conj(), diag(d)), v) + # to avoid roundoff errors, symmetrize the matrix (again) + h = 0.5*(h.T+h) + return h + +# XXX: This function should not be defined here, but somewhere in +# scipy.linalg namespace + + +def random_rot(dim): + """Return a random rotation matrix, drawn from the Haar distribution + (the only uniform distribution on SO(n)). + The algorithm is described in the paper + Stewart, G.W., 'The efficient generation of random orthogonal + matrices with an application to condition estimators', SIAM Journal + on Numerical Analysis, 17(3), pp. 403-409, 1980. + For more information see + https://en.wikipedia.org/wiki/Orthogonal_matrix#Randomization""" + H = eye(dim) + D = ones((dim,)) + for n in range(1, dim): + x = normal(size=(dim-n+1,)) + D[n-1] = sign(x[0]) + x[0] -= D[n-1]*sqrt((x*x).sum()) + # Householder transformation + + Hx = eye(dim-n+1) - 2.*outer(x, x)/(x*x).sum() + mat = eye(dim) + mat[n-1:,n-1:] = Hx + H = dot(H, mat) + # Fix the last sign such that the determinant is 1 + D[-1] = -D.prod() + H = (D*H.T).T + return H + + +class TestEigVals(object): + + def test_simple(self): + a = [[1,2,3],[1,2,3],[2,5,6]] + w = eigvals(a) + exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2] + assert_array_almost_equal(w,exact_w) + + def test_simple_tr(self): + a = array([[1,2,3],[1,2,3],[2,5,6]],'d') + a = transpose(a).copy() + a = transpose(a) + w = eigvals(a) + exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2] + assert_array_almost_equal(w,exact_w) + + def test_simple_complex(self): + a = [[1,2,3],[1,2,3],[2,5,6+1j]] + w = eigvals(a) + exact_w = [(9+1j+sqrt(92+6j))/2, + 0, + (9+1j-sqrt(92+6j))/2] + assert_array_almost_equal(w,exact_w) + + def test_finite(self): + a = [[1,2,3],[1,2,3],[2,5,6]] + w = eigvals(a, check_finite=False) + exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2] + assert_array_almost_equal(w,exact_w) + + +class TestEig(object): + + def test_simple(self): + a = [[1,2,3],[1,2,3],[2,5,6]] + w,v = eig(a) + exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2] + v0 = array([1,1,(1+sqrt(93)/3)/2]) + v1 = array([3.,0,-1]) + v2 = array([1,1,(1-sqrt(93)/3)/2]) + v0 = v0 / sqrt(dot(v0,transpose(v0))) + v1 = v1 / sqrt(dot(v1,transpose(v1))) + v2 = v2 / sqrt(dot(v2,transpose(v2))) + assert_array_almost_equal(w,exact_w) + assert_array_almost_equal(v0,v[:,0]*sign(v[0,0])) + assert_array_almost_equal(v1,v[:,1]*sign(v[0,1])) + assert_array_almost_equal(v2,v[:,2]*sign(v[0,2])) + for i in range(3): + assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i]) + w,v = eig(a,left=1,right=0) + for i in range(3): + assert_array_almost_equal(dot(transpose(a),v[:,i]),w[i]*v[:,i]) + + def test_simple_complex_eig(self): + a = [[1,2],[-2,1]] + w,vl,vr = eig(a,left=1,right=1) + assert_array_almost_equal(w, array([1+2j, 1-2j])) + for i in range(2): + assert_array_almost_equal(dot(a,vr[:,i]),w[i]*vr[:,i]) + for i in range(2): + assert_array_almost_equal(dot(conjugate(transpose(a)),vl[:,i]), + conjugate(w[i])*vl[:,i]) + + def test_simple_complex(self): + a = [[1,2,3],[1,2,3],[2,5,6+1j]] + w,vl,vr = eig(a,left=1,right=1) + for i in range(3): + assert_array_almost_equal(dot(a,vr[:,i]),w[i]*vr[:,i]) + for i in range(3): + assert_array_almost_equal(dot(conjugate(transpose(a)),vl[:,i]), + conjugate(w[i])*vl[:,i]) + + def test_gh_3054(self): + a = [[1]] + b = [[0]] + w, vr = eig(a, b, homogeneous_eigvals=True) + assert_allclose(w[1,0], 0) + assert_(w[0,0] != 0) + assert_allclose(vr, 1) + + w, vr = eig(a, b) + assert_equal(w, np.inf) + assert_allclose(vr, 1) + + def _check_gen_eig(self, A, B): + if B is not None: + A, B = asarray(A), asarray(B) + B0 = B + else: + A = asarray(A) + B0 = B + B = np.eye(*A.shape) + msg = "\n%r\n%r" % (A, B) + + # Eigenvalues in homogeneous coordinates + w, vr = eig(A, B0, homogeneous_eigvals=True) + wt = eigvals(A, B0, homogeneous_eigvals=True) + val1 = dot(A, vr) * w[1,:] + val2 = dot(B, vr) * w[0,:] + for i in range(val1.shape[1]): + assert_allclose(val1[:,i], val2[:,i], rtol=1e-13, atol=1e-13, err_msg=msg) + + if B0 is None: + assert_allclose(w[1,:], 1) + assert_allclose(wt[1,:], 1) + + perm = np.lexsort(w) + permt = np.lexsort(wt) + assert_allclose(w[:,perm], wt[:,permt], atol=1e-7, rtol=1e-7, + err_msg=msg) + + length = np.empty(len(vr)) + for i in xrange(len(vr)): + length[i] = norm(vr[:,i]) + assert_allclose(length, np.ones(length.size), err_msg=msg, + atol=1e-7, rtol=1e-7) + + # Convert homogeneous coordinates + beta_nonzero = (w[1,:] != 0) + wh = w[0,beta_nonzero] / w[1,beta_nonzero] + + # Eigenvalues in standard coordinates + w, vr = eig(A, B0) + wt = eigvals(A, B0) + val1 = dot(A, vr) + val2 = dot(B, vr) * w + res = val1 - val2 + for i in range(res.shape[1]): + if all(isfinite(res[:,i])): + assert_allclose(res[:,i], 0, rtol=1e-13, atol=1e-13, err_msg=msg) + + w_fin = w[isfinite(w)] + wt_fin = wt[isfinite(wt)] + perm = argsort(clear_fuss(w_fin)) + permt = argsort(clear_fuss(wt_fin)) + assert_allclose(w[perm], wt[permt], + atol=1e-7, rtol=1e-7, err_msg=msg) + + length = np.empty(len(vr)) + for i in xrange(len(vr)): + length[i] = norm(vr[:,i]) + assert_allclose(length, np.ones(length.size), err_msg=msg) + + # Compare homogeneous and nonhomogeneous versions + assert_allclose(sort(wh), sort(w[np.isfinite(w)])) + + @pytest.mark.xfail(reason="See gh-2254.") + def test_singular(self): + # Example taken from + # https://web.archive.org/web/20040903121217/http://www.cs.umu.se/research/nla/singular_pairs/guptri/matlab.html + A = array(([22,34,31,31,17], [45,45,42,19,29], [39,47,49,26,34], + [27,31,26,21,15], [38,44,44,24,30])) + B = array(([13,26,25,17,24], [31,46,40,26,37], [26,40,19,25,25], + [16,25,27,14,23], [24,35,18,21,22])) + + olderr = np.seterr(all='ignore') + try: + self._check_gen_eig(A, B) + finally: + np.seterr(**olderr) + + def test_falker(self): + # Test matrices giving some Nan generalized eigenvalues. + M = diag(array(([1,0,3]))) + K = array(([2,-1,-1],[-1,2,-1],[-1,-1,2])) + D = array(([1,-1,0],[-1,1,0],[0,0,0])) + Z = zeros((3,3)) + I3 = identity(3) + A = bmat([[I3, Z], [Z, -K]]) + B = bmat([[Z, I3], [M, D]]) + + olderr = np.seterr(all='ignore') + try: + self._check_gen_eig(A, B) + finally: + np.seterr(**olderr) + + def test_bad_geneig(self): + # Ticket #709 (strange return values from DGGEV) + + def matrices(omega): + c1 = -9 + omega**2 + c2 = 2*omega + A = [[1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, c1, 0], + [0, 0, 0, c1]] + B = [[0, 0, 1, 0], + [0, 0, 0, 1], + [1, 0, 0, -c2], + [0, 1, c2, 0]] + return A, B + + # With a buggy LAPACK, this can fail for different omega on different + # machines -- so we need to test several values + olderr = np.seterr(all='ignore') + try: + for k in xrange(100): + A, B = matrices(omega=k*5./100) + self._check_gen_eig(A, B) + finally: + np.seterr(**olderr) + + def test_make_eigvals(self): + # Step through all paths in _make_eigvals + seed(1234) + # Real eigenvalues + A = symrand(3) + self._check_gen_eig(A, None) + B = symrand(3) + self._check_gen_eig(A, B) + # Complex eigenvalues + A = random((3, 3)) + 1j*random((3, 3)) + self._check_gen_eig(A, None) + B = random((3, 3)) + 1j*random((3, 3)) + self._check_gen_eig(A, B) + + def test_check_finite(self): + a = [[1,2,3],[1,2,3],[2,5,6]] + w,v = eig(a, check_finite=False) + exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2] + v0 = array([1,1,(1+sqrt(93)/3)/2]) + v1 = array([3.,0,-1]) + v2 = array([1,1,(1-sqrt(93)/3)/2]) + v0 = v0 / sqrt(dot(v0,transpose(v0))) + v1 = v1 / sqrt(dot(v1,transpose(v1))) + v2 = v2 / sqrt(dot(v2,transpose(v2))) + assert_array_almost_equal(w,exact_w) + assert_array_almost_equal(v0,v[:,0]*sign(v[0,0])) + assert_array_almost_equal(v1,v[:,1]*sign(v[0,1])) + assert_array_almost_equal(v2,v[:,2]*sign(v[0,2])) + for i in range(3): + assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i]) + + def test_not_square_error(self): + """Check that passing a non-square array raises a ValueError.""" + A = np.arange(6).reshape(3,2) + assert_raises(ValueError, eig, A) + + def test_shape_mismatch(self): + """Check that passing arrays of with different shapes raises a ValueError.""" + A = identity(2) + B = np.arange(9.0).reshape(3,3) + assert_raises(ValueError, eig, A, B) + assert_raises(ValueError, eig, B, A) + + +class TestEigBanded(object): + def setup_method(self): + self.create_bandmat() + + def create_bandmat(self): + """Create the full matrix `self.fullmat` and + the corresponding band matrix `self.bandmat`.""" + N = 10 + self.KL = 2 # number of subdiagonals (below the diagonal) + self.KU = 2 # number of superdiagonals (above the diagonal) + + # symmetric band matrix + self.sym_mat = (diag(1.0*ones(N)) + + diag(-1.0*ones(N-1), -1) + diag(-1.0*ones(N-1), 1) + + diag(-2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2)) + + # hermitian band matrix + self.herm_mat = (diag(-1.0*ones(N)) + + 1j*diag(1.0*ones(N-1), -1) - 1j*diag(1.0*ones(N-1), 1) + + diag(-2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2)) + + # general real band matrix + self.real_mat = (diag(1.0*ones(N)) + + diag(-1.0*ones(N-1), -1) + diag(-3.0*ones(N-1), 1) + + diag(2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2)) + + # general complex band matrix + self.comp_mat = (1j*diag(1.0*ones(N)) + + diag(-1.0*ones(N-1), -1) + 1j*diag(-3.0*ones(N-1), 1) + + diag(2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2)) + + # Eigenvalues and -vectors from linalg.eig + ew, ev = linalg.eig(self.sym_mat) + ew = ew.real + args = argsort(ew) + self.w_sym_lin = ew[args] + self.evec_sym_lin = ev[:,args] + + ew, ev = linalg.eig(self.herm_mat) + ew = ew.real + args = argsort(ew) + self.w_herm_lin = ew[args] + self.evec_herm_lin = ev[:,args] + + # Extract upper bands from symmetric and hermitian band matrices + # (for use in dsbevd, dsbevx, zhbevd, zhbevx + # and their single precision versions) + LDAB = self.KU + 1 + self.bandmat_sym = zeros((LDAB, N), dtype=float) + self.bandmat_herm = zeros((LDAB, N), dtype=complex) + for i in xrange(LDAB): + self.bandmat_sym[LDAB-i-1,i:N] = diag(self.sym_mat, i) + self.bandmat_herm[LDAB-i-1,i:N] = diag(self.herm_mat, i) + + # Extract bands from general real and complex band matrix + # (for use in dgbtrf, dgbtrs and their single precision versions) + LDAB = 2*self.KL + self.KU + 1 + self.bandmat_real = zeros((LDAB, N), dtype=float) + self.bandmat_real[2*self.KL,:] = diag(self.real_mat) # diagonal + for i in xrange(self.KL): + # superdiagonals + self.bandmat_real[2*self.KL-1-i,i+1:N] = diag(self.real_mat, i+1) + # subdiagonals + self.bandmat_real[2*self.KL+1+i,0:N-1-i] = diag(self.real_mat,-i-1) + + self.bandmat_comp = zeros((LDAB, N), dtype=complex) + self.bandmat_comp[2*self.KL,:] = diag(self.comp_mat) # diagonal + for i in xrange(self.KL): + # superdiagonals + self.bandmat_comp[2*self.KL-1-i,i+1:N] = diag(self.comp_mat, i+1) + # subdiagonals + self.bandmat_comp[2*self.KL+1+i,0:N-1-i] = diag(self.comp_mat,-i-1) + + # absolute value for linear equation system A*x = b + self.b = 1.0*arange(N) + self.bc = self.b * (1 + 1j) + + ##################################################################### + + def test_dsbev(self): + """Compare dsbev eigenvalues and eigenvectors with + the result of linalg.eig.""" + w, evec, info = dsbev(self.bandmat_sym, compute_v=1) + evec_ = evec[:,argsort(w)] + assert_array_almost_equal(sort(w), self.w_sym_lin) + assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin)) + + def test_dsbevd(self): + """Compare dsbevd eigenvalues and eigenvectors with + the result of linalg.eig.""" + w, evec, info = dsbevd(self.bandmat_sym, compute_v=1) + evec_ = evec[:,argsort(w)] + assert_array_almost_equal(sort(w), self.w_sym_lin) + assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin)) + + def test_dsbevx(self): + """Compare dsbevx eigenvalues and eigenvectors + with the result of linalg.eig.""" + N,N = shape(self.sym_mat) + ## Achtung: Argumente 0.0,0.0,range? + w, evec, num, ifail, info = dsbevx(self.bandmat_sym, 0.0, 0.0, 1, N, + compute_v=1, range=2) + evec_ = evec[:,argsort(w)] + assert_array_almost_equal(sort(w), self.w_sym_lin) + assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin)) + + def test_zhbevd(self): + """Compare zhbevd eigenvalues and eigenvectors + with the result of linalg.eig.""" + w, evec, info = zhbevd(self.bandmat_herm, compute_v=1) + evec_ = evec[:,argsort(w)] + assert_array_almost_equal(sort(w), self.w_herm_lin) + assert_array_almost_equal(abs(evec_), abs(self.evec_herm_lin)) + + def test_zhbevx(self): + """Compare zhbevx eigenvalues and eigenvectors + with the result of linalg.eig.""" + N,N = shape(self.herm_mat) + ## Achtung: Argumente 0.0,0.0,range? + w, evec, num, ifail, info = zhbevx(self.bandmat_herm, 0.0, 0.0, 1, N, + compute_v=1, range=2) + evec_ = evec[:,argsort(w)] + assert_array_almost_equal(sort(w), self.w_herm_lin) + assert_array_almost_equal(abs(evec_), abs(self.evec_herm_lin)) + + def test_eigvals_banded(self): + """Compare eigenvalues of eigvals_banded with those of linalg.eig.""" + w_sym = eigvals_banded(self.bandmat_sym) + w_sym = w_sym.real + assert_array_almost_equal(sort(w_sym), self.w_sym_lin) + + w_herm = eigvals_banded(self.bandmat_herm) + w_herm = w_herm.real + assert_array_almost_equal(sort(w_herm), self.w_herm_lin) + + # extracting eigenvalues with respect to an index range + ind1 = 2 + ind2 = np.longlong(6) + w_sym_ind = eigvals_banded(self.bandmat_sym, + select='i', select_range=(ind1, ind2)) + assert_array_almost_equal(sort(w_sym_ind), + self.w_sym_lin[ind1:ind2+1]) + w_herm_ind = eigvals_banded(self.bandmat_herm, + select='i', select_range=(ind1, ind2)) + assert_array_almost_equal(sort(w_herm_ind), + self.w_herm_lin[ind1:ind2+1]) + + # extracting eigenvalues with respect to a value range + v_lower = self.w_sym_lin[ind1] - 1.0e-5 + v_upper = self.w_sym_lin[ind2] + 1.0e-5 + w_sym_val = eigvals_banded(self.bandmat_sym, + select='v', select_range=(v_lower, v_upper)) + assert_array_almost_equal(sort(w_sym_val), + self.w_sym_lin[ind1:ind2+1]) + + v_lower = self.w_herm_lin[ind1] - 1.0e-5 + v_upper = self.w_herm_lin[ind2] + 1.0e-5 + w_herm_val = eigvals_banded(self.bandmat_herm, + select='v', select_range=(v_lower, v_upper)) + assert_array_almost_equal(sort(w_herm_val), + self.w_herm_lin[ind1:ind2+1]) + + w_sym = eigvals_banded(self.bandmat_sym, check_finite=False) + w_sym = w_sym.real + assert_array_almost_equal(sort(w_sym), self.w_sym_lin) + + def test_eig_banded(self): + """Compare eigenvalues and eigenvectors of eig_banded + with those of linalg.eig. """ + w_sym, evec_sym = eig_banded(self.bandmat_sym) + evec_sym_ = evec_sym[:,argsort(w_sym.real)] + assert_array_almost_equal(sort(w_sym), self.w_sym_lin) + assert_array_almost_equal(abs(evec_sym_), abs(self.evec_sym_lin)) + + w_herm, evec_herm = eig_banded(self.bandmat_herm) + evec_herm_ = evec_herm[:,argsort(w_herm.real)] + assert_array_almost_equal(sort(w_herm), self.w_herm_lin) + assert_array_almost_equal(abs(evec_herm_), abs(self.evec_herm_lin)) + + # extracting eigenvalues with respect to an index range + ind1 = 2 + ind2 = 6 + w_sym_ind, evec_sym_ind = eig_banded(self.bandmat_sym, + select='i', select_range=(ind1, ind2)) + assert_array_almost_equal(sort(w_sym_ind), + self.w_sym_lin[ind1:ind2+1]) + assert_array_almost_equal(abs(evec_sym_ind), + abs(self.evec_sym_lin[:,ind1:ind2+1])) + + w_herm_ind, evec_herm_ind = eig_banded(self.bandmat_herm, + select='i', select_range=(ind1, ind2)) + assert_array_almost_equal(sort(w_herm_ind), + self.w_herm_lin[ind1:ind2+1]) + assert_array_almost_equal(abs(evec_herm_ind), + abs(self.evec_herm_lin[:,ind1:ind2+1])) + + # extracting eigenvalues with respect to a value range + v_lower = self.w_sym_lin[ind1] - 1.0e-5 + v_upper = self.w_sym_lin[ind2] + 1.0e-5 + w_sym_val, evec_sym_val = eig_banded(self.bandmat_sym, + select='v', select_range=(v_lower, v_upper)) + assert_array_almost_equal(sort(w_sym_val), + self.w_sym_lin[ind1:ind2+1]) + assert_array_almost_equal(abs(evec_sym_val), + abs(self.evec_sym_lin[:,ind1:ind2+1])) + + v_lower = self.w_herm_lin[ind1] - 1.0e-5 + v_upper = self.w_herm_lin[ind2] + 1.0e-5 + w_herm_val, evec_herm_val = eig_banded(self.bandmat_herm, + select='v', select_range=(v_lower, v_upper)) + assert_array_almost_equal(sort(w_herm_val), + self.w_herm_lin[ind1:ind2+1]) + assert_array_almost_equal(abs(evec_herm_val), + abs(self.evec_herm_lin[:,ind1:ind2+1])) + + w_sym, evec_sym = eig_banded(self.bandmat_sym, check_finite=False) + evec_sym_ = evec_sym[:,argsort(w_sym.real)] + assert_array_almost_equal(sort(w_sym), self.w_sym_lin) + assert_array_almost_equal(abs(evec_sym_), abs(self.evec_sym_lin)) + + def test_dgbtrf(self): + """Compare dgbtrf LU factorisation with the LU factorisation result + of linalg.lu.""" + M,N = shape(self.real_mat) + lu_symm_band, ipiv, info = dgbtrf(self.bandmat_real, self.KL, self.KU) + + # extract matrix u from lu_symm_band + u = diag(lu_symm_band[2*self.KL,:]) + for i in xrange(self.KL + self.KU): + u += diag(lu_symm_band[2*self.KL-1-i,i+1:N], i+1) + + p_lin, l_lin, u_lin = lu(self.real_mat, permute_l=0) + assert_array_almost_equal(u, u_lin) + + def test_zgbtrf(self): + """Compare zgbtrf LU factorisation with the LU factorisation result + of linalg.lu.""" + M,N = shape(self.comp_mat) + lu_symm_band, ipiv, info = zgbtrf(self.bandmat_comp, self.KL, self.KU) + + # extract matrix u from lu_symm_band + u = diag(lu_symm_band[2*self.KL,:]) + for i in xrange(self.KL + self.KU): + u += diag(lu_symm_band[2*self.KL-1-i,i+1:N], i+1) + + p_lin, l_lin, u_lin = lu(self.comp_mat, permute_l=0) + assert_array_almost_equal(u, u_lin) + + def test_dgbtrs(self): + """Compare dgbtrs solutions for linear equation system A*x = b + with solutions of linalg.solve.""" + + lu_symm_band, ipiv, info = dgbtrf(self.bandmat_real, self.KL, self.KU) + y, info = dgbtrs(lu_symm_band, self.KL, self.KU, self.b, ipiv) + + y_lin = linalg.solve(self.real_mat, self.b) + assert_array_almost_equal(y, y_lin) + + def test_zgbtrs(self): + """Compare zgbtrs solutions for linear equation system A*x = b + with solutions of linalg.solve.""" + + lu_symm_band, ipiv, info = zgbtrf(self.bandmat_comp, self.KL, self.KU) + y, info = zgbtrs(lu_symm_band, self.KL, self.KU, self.bc, ipiv) + + y_lin = linalg.solve(self.comp_mat, self.bc) + assert_array_almost_equal(y, y_lin) + + +class TestEigTridiagonal(object): + def setup_method(self): + self.create_trimat() + + def create_trimat(self): + """Create the full matrix `self.fullmat`, `self.d`, and `self.e`.""" + N = 10 + + # symmetric band matrix + self.d = 1.0*ones(N) + self.e = -1.0*ones(N-1) + self.full_mat = (diag(self.d) + diag(self.e, -1) + diag(self.e, 1)) + + ew, ev = linalg.eig(self.full_mat) + ew = ew.real + args = argsort(ew) + self.w = ew[args] + self.evec = ev[:, args] + + def test_degenerate(self): + """Test error conditions.""" + # Wrong sizes + assert_raises(ValueError, eigvalsh_tridiagonal, self.d, self.e[:-1]) + # Must be real + assert_raises(TypeError, eigvalsh_tridiagonal, self.d, self.e * 1j) + # Bad driver + assert_raises(TypeError, eigvalsh_tridiagonal, self.d, self.e, + lapack_driver=1.) + assert_raises(ValueError, eigvalsh_tridiagonal, self.d, self.e, + lapack_driver='foo') + # Bad bounds + assert_raises(ValueError, eigvalsh_tridiagonal, self.d, self.e, + select='i', select_range=(0, -1)) + + def test_eigvalsh_tridiagonal(self): + """Compare eigenvalues of eigvalsh_tridiagonal with those of eig.""" + # can't use ?STERF with subselection + for driver in ('sterf', 'stev', 'stebz', 'stemr', 'auto'): + w = eigvalsh_tridiagonal(self.d, self.e, lapack_driver=driver) + assert_array_almost_equal(sort(w), self.w) + + for driver in ('sterf', 'stev'): + assert_raises(ValueError, eigvalsh_tridiagonal, self.d, self.e, + lapack_driver='stev', select='i', + select_range=(0, 1)) + for driver in ('stebz', 'stemr', 'auto'): + # extracting eigenvalues with respect to the full index range + w_ind = eigvalsh_tridiagonal( + self.d, self.e, select='i', select_range=(0, len(self.d)-1), + lapack_driver=driver) + assert_array_almost_equal(sort(w_ind), self.w) + + # extracting eigenvalues with respect to an index range + ind1 = 2 + ind2 = 6 + w_ind = eigvalsh_tridiagonal( + self.d, self.e, select='i', select_range=(ind1, ind2), + lapack_driver=driver) + assert_array_almost_equal(sort(w_ind), self.w[ind1:ind2+1]) + + # extracting eigenvalues with respect to a value range + v_lower = self.w[ind1] - 1.0e-5 + v_upper = self.w[ind2] + 1.0e-5 + w_val = eigvalsh_tridiagonal( + self.d, self.e, select='v', select_range=(v_lower, v_upper), + lapack_driver=driver) + assert_array_almost_equal(sort(w_val), self.w[ind1:ind2+1]) + + def test_eigh_tridiagonal(self): + """Compare eigenvalues and eigenvectors of eigh_tridiagonal + with those of eig. """ + # can't use ?STERF when eigenvectors are requested + assert_raises(ValueError, eigh_tridiagonal, self.d, self.e, + lapack_driver='sterf') + for driver in ('stebz', 'stev', 'stemr', 'auto'): + w, evec = eigh_tridiagonal(self.d, self.e, lapack_driver=driver) + evec_ = evec[:, argsort(w)] + assert_array_almost_equal(sort(w), self.w) + assert_array_almost_equal(abs(evec_), abs(self.evec)) + + assert_raises(ValueError, eigh_tridiagonal, self.d, self.e, + lapack_driver='stev', select='i', select_range=(0, 1)) + for driver in ('stebz', 'stemr', 'auto'): + # extracting eigenvalues with respect to an index range + ind1 = 0 + ind2 = len(self.d)-1 + w, evec = eigh_tridiagonal( + self.d, self.e, select='i', select_range=(ind1, ind2), + lapack_driver=driver) + assert_array_almost_equal(sort(w), self.w) + assert_array_almost_equal(abs(evec), abs(self.evec)) + ind1 = 2 + ind2 = 6 + w, evec = eigh_tridiagonal( + self.d, self.e, select='i', select_range=(ind1, ind2), + lapack_driver=driver) + assert_array_almost_equal(sort(w), self.w[ind1:ind2+1]) + assert_array_almost_equal(abs(evec), + abs(self.evec[:, ind1:ind2+1])) + + # extracting eigenvalues with respect to a value range + v_lower = self.w[ind1] - 1.0e-5 + v_upper = self.w[ind2] + 1.0e-5 + w, evec = eigh_tridiagonal( + self.d, self.e, select='v', select_range=(v_lower, v_upper), + lapack_driver=driver) + assert_array_almost_equal(sort(w), self.w[ind1:ind2+1]) + assert_array_almost_equal(abs(evec), + abs(self.evec[:, ind1:ind2+1])) + + +def test_eigh(): + DIM = 6 + v = {'dim': (DIM,), + 'dtype': ('f','d','F','D'), + 'overwrite': (True, False), + 'lower': (True, False), + 'turbo': (True, False), + 'eigvals': (None, (2, DIM-2))} + + for dim in v['dim']: + for typ in v['dtype']: + for overwrite in v['overwrite']: + for turbo in v['turbo']: + for eigenvalues in v['eigvals']: + for lower in v['lower']: + eigenhproblem_standard( + 'ordinary', + dim, typ, overwrite, lower, + turbo, eigenvalues) + eigenhproblem_general( + 'general ', + dim, typ, overwrite, lower, + turbo, eigenvalues) + + +def test_eigh_of_sparse(): + # This tests the rejection of inputs that eigh cannot currently handle. + import scipy.sparse + a = scipy.sparse.identity(2).tocsc() + b = np.atleast_2d(a) + assert_raises(ValueError, eigh, a) + assert_raises(ValueError, eigh, b) + + +def _complex_symrand(dim, dtype): + a1, a2 = symrand(dim), symrand(dim) + # add antisymmetric matrix as imag part + a = a1 + 1j*(triu(a2)-tril(a2)) + return a.astype(dtype) + + +def eigenhproblem_standard(desc, dim, dtype, + overwrite, lower, turbo, + eigenvalues): + """Solve a standard eigenvalue problem.""" + if iscomplex(empty(1, dtype=dtype)): + a = _complex_symrand(dim, dtype) + else: + a = symrand(dim).astype(dtype) + + if overwrite: + a_c = a.copy() + else: + a_c = a + w, z = eigh(a, overwrite_a=overwrite, lower=lower, eigvals=eigenvalues) + assert_dtype_equal(z.dtype, dtype) + w = w.astype(dtype) + diag_ = diag(dot(z.T.conj(), dot(a_c, z))).real + assert_array_almost_equal(diag_, w, DIGITS[dtype]) + + +def eigenhproblem_general(desc, dim, dtype, + overwrite, lower, turbo, + eigenvalues): + """Solve a generalized eigenvalue problem.""" + if iscomplex(empty(1, dtype=dtype)): + a = _complex_symrand(dim, dtype) + b = _complex_symrand(dim, dtype)+diag([2.1]*dim).astype(dtype) + else: + a = symrand(dim).astype(dtype) + b = symrand(dim).astype(dtype)+diag([2.1]*dim).astype(dtype) + + if overwrite: + a_c, b_c = a.copy(), b.copy() + else: + a_c, b_c = a, b + + w, z = eigh(a, b, overwrite_a=overwrite, lower=lower, + overwrite_b=overwrite, turbo=turbo, eigvals=eigenvalues) + assert_dtype_equal(z.dtype, dtype) + w = w.astype(dtype) + diag1_ = diag(dot(z.T.conj(), dot(a_c, z))).real + assert_array_almost_equal(diag1_, w, DIGITS[dtype]) + diag2_ = diag(dot(z.T.conj(), dot(b_c, z))).real + assert_array_almost_equal(diag2_, ones(diag2_.shape[0]), DIGITS[dtype]) + + +def test_eigh_integer(): + a = array([[1,2],[2,7]]) + b = array([[3,1],[1,5]]) + w,z = eigh(a) + w,z = eigh(a,b) + + +class TestLU(object): + def setup_method(self): + self.a = array([[1,2,3],[1,2,3],[2,5,6]]) + self.ca = array([[1,2,3],[1,2,3],[2,5j,6]]) + # Those matrices are more robust to detect problems in permutation + # matrices than the ones above + self.b = array([[1,2,3],[4,5,6],[7,8,9]]) + self.cb = array([[1j,2j,3j],[4j,5j,6j],[7j,8j,9j]]) + + # Reectangular matrices + self.hrect = array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 12, 12]]) + self.chrect = 1.j * array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 12, 12]]) + + self.vrect = array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 12, 12]]) + self.cvrect = 1.j * array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 12, 12]]) + + # Medium sizes matrices + self.med = random((30, 40)) + self.cmed = random((30, 40)) + 1.j * random((30, 40)) + + def _test_common(self, data): + p,l,u = lu(data) + assert_array_almost_equal(dot(dot(p,l),u),data) + pl,u = lu(data,permute_l=1) + assert_array_almost_equal(dot(pl,u),data) + + # Simple tests + def test_simple(self): + self._test_common(self.a) + + def test_simple_complex(self): + self._test_common(self.ca) + + def test_simple2(self): + self._test_common(self.b) + + def test_simple2_complex(self): + self._test_common(self.cb) + + # rectangular matrices tests + def test_hrectangular(self): + self._test_common(self.hrect) + + def test_vrectangular(self): + self._test_common(self.vrect) + + def test_hrectangular_complex(self): + self._test_common(self.chrect) + + def test_vrectangular_complex(self): + self._test_common(self.cvrect) + + # Bigger matrices + def test_medium1(self): + """Check lu decomposition on medium size, rectangular matrix.""" + self._test_common(self.med) + + def test_medium1_complex(self): + """Check lu decomposition on medium size, rectangular matrix.""" + self._test_common(self.cmed) + + def test_check_finite(self): + p, l, u = lu(self.a, check_finite=False) + assert_array_almost_equal(dot(dot(p,l),u), self.a) + + def test_simple_known(self): + # Ticket #1458 + for order in ['C', 'F']: + A = np.array([[2, 1],[0, 1.]], order=order) + LU, P = lu_factor(A) + assert_array_almost_equal(LU, np.array([[2, 1], [0, 1]])) + assert_array_equal(P, np.array([0, 1])) + + +class TestLUSingle(TestLU): + """LU testers for single precision, real and double""" + def setup_method(self): + TestLU.setup_method(self) + + self.a = self.a.astype(float32) + self.ca = self.ca.astype(complex64) + self.b = self.b.astype(float32) + self.cb = self.cb.astype(complex64) + + self.hrect = self.hrect.astype(float32) + self.chrect = self.hrect.astype(complex64) + + self.vrect = self.vrect.astype(float32) + self.cvrect = self.vrect.astype(complex64) + + self.med = self.vrect.astype(float32) + self.cmed = self.vrect.astype(complex64) + + +class TestLUSolve(object): + def setup_method(self): + seed(1234) + + def test_lu(self): + a0 = random((10,10)) + b = random((10,)) + + for order in ['C', 'F']: + a = np.array(a0, order=order) + + x1 = solve(a,b) + + lu_a = lu_factor(a) + x2 = lu_solve(lu_a,b) + + assert_array_almost_equal(x1,x2) + + def test_check_finite(self): + a = random((10,10)) + b = random((10,)) + x1 = solve(a,b) + + lu_a = lu_factor(a, check_finite=False) + x2 = lu_solve(lu_a,b, check_finite=False) + + assert_array_almost_equal(x1,x2) + + +class TestSVD_GESDD(object): + def setup_method(self): + self.lapack_driver = 'gesdd' + seed(1234) + + def test_degenerate(self): + assert_raises(TypeError, svd, [[1.]], lapack_driver=1.) + assert_raises(ValueError, svd, [[1.]], lapack_driver='foo') + + def test_simple(self): + a = [[1,2,3],[1,20,3],[2,5,6]] + for full_matrices in (True, False): + u,s,vh = svd(a, full_matrices=full_matrices, + lapack_driver=self.lapack_driver) + assert_array_almost_equal(dot(transpose(u),u),identity(3)) + assert_array_almost_equal(dot(transpose(vh),vh),identity(3)) + sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char) + for i in range(len(s)): + sigma[i,i] = s[i] + assert_array_almost_equal(dot(dot(u,sigma),vh),a) + + def test_simple_singular(self): + a = [[1,2,3],[1,2,3],[2,5,6]] + for full_matrices in (True, False): + u,s,vh = svd(a, full_matrices=full_matrices, + lapack_driver=self.lapack_driver) + assert_array_almost_equal(dot(transpose(u),u),identity(3)) + assert_array_almost_equal(dot(transpose(vh),vh),identity(3)) + sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char) + for i in range(len(s)): + sigma[i,i] = s[i] + assert_array_almost_equal(dot(dot(u,sigma),vh),a) + + def test_simple_underdet(self): + a = [[1,2,3],[4,5,6]] + for full_matrices in (True, False): + u,s,vh = svd(a, full_matrices=full_matrices, + lapack_driver=self.lapack_driver) + assert_array_almost_equal(dot(transpose(u),u),identity(u.shape[0])) + sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char) + for i in range(len(s)): + sigma[i,i] = s[i] + assert_array_almost_equal(dot(dot(u,sigma),vh),a) + + def test_simple_overdet(self): + a = [[1,2],[4,5],[3,4]] + for full_matrices in (True, False): + u,s,vh = svd(a, full_matrices=full_matrices, + lapack_driver=self.lapack_driver) + assert_array_almost_equal(dot(transpose(u),u), identity(u.shape[1])) + assert_array_almost_equal(dot(transpose(vh),vh),identity(2)) + sigma = zeros((u.shape[1],vh.shape[0]),s.dtype.char) + for i in range(len(s)): + sigma[i,i] = s[i] + assert_array_almost_equal(dot(dot(u,sigma),vh),a) + + def test_random(self): + n = 20 + m = 15 + for i in range(3): + for a in [random([n,m]),random([m,n])]: + for full_matrices in (True, False): + u,s,vh = svd(a, full_matrices=full_matrices, + lapack_driver=self.lapack_driver) + assert_array_almost_equal(dot(transpose(u),u),identity(u.shape[1])) + assert_array_almost_equal(dot(vh, transpose(vh)),identity(vh.shape[0])) + sigma = zeros((u.shape[1],vh.shape[0]),s.dtype.char) + for i in range(len(s)): + sigma[i,i] = s[i] + assert_array_almost_equal(dot(dot(u,sigma),vh),a) + + def test_simple_complex(self): + a = [[1,2,3],[1,2j,3],[2,5,6]] + for full_matrices in (True, False): + u,s,vh = svd(a, full_matrices=full_matrices, + lapack_driver=self.lapack_driver) + assert_array_almost_equal(dot(conj(transpose(u)),u),identity(u.shape[1])) + assert_array_almost_equal(dot(conj(transpose(vh)),vh),identity(vh.shape[0])) + sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char) + for i in range(len(s)): + sigma[i,i] = s[i] + assert_array_almost_equal(dot(dot(u,sigma),vh),a) + + def test_random_complex(self): + n = 20 + m = 15 + for i in range(3): + for full_matrices in (True, False): + for a in [random([n,m]),random([m,n])]: + a = a + 1j*random(list(a.shape)) + u,s,vh = svd(a, full_matrices=full_matrices, + lapack_driver=self.lapack_driver) + assert_array_almost_equal(dot(conj(transpose(u)),u),identity(u.shape[1])) + # This fails when [m,n] + # assert_array_almost_equal(dot(conj(transpose(vh)),vh),identity(len(vh),dtype=vh.dtype.char)) + sigma = zeros((u.shape[1],vh.shape[0]),s.dtype.char) + for i in range(len(s)): + sigma[i,i] = s[i] + assert_array_almost_equal(dot(dot(u,sigma),vh),a) + + def test_crash_1580(self): + sizes = [(13, 23), (30, 50), (60, 100)] + np.random.seed(1234) + for sz in sizes: + for dt in [np.float32, np.float64, np.complex64, np.complex128]: + a = np.random.rand(*sz).astype(dt) + # should not crash + svd(a, lapack_driver=self.lapack_driver) + + def test_check_finite(self): + a = [[1,2,3],[1,20,3],[2,5,6]] + u,s,vh = svd(a, check_finite=False, lapack_driver=self.lapack_driver) + assert_array_almost_equal(dot(transpose(u),u),identity(3)) + assert_array_almost_equal(dot(transpose(vh),vh),identity(3)) + sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char) + for i in range(len(s)): + sigma[i,i] = s[i] + assert_array_almost_equal(dot(dot(u,sigma),vh),a) + + def test_gh_5039(self): + # This is a smoke test for https://github.com/scipy/scipy/issues/5039 + # + # The following is reported to raise "ValueError: On entry to DGESDD + # parameter number 12 had an illegal value". + # `interp1d([1,2,3,4], [1,2,3,4], kind='cubic')` + # This is reported to only show up on LAPACK 3.0.3. + # + # The matrix below is taken from the call to + # `B = _fitpack._bsplmat(order, xk)` in interpolate._find_smoothest + b = np.array( + [[0.16666667, 0.66666667, 0.16666667, 0., 0., 0.], + [0., 0.16666667, 0.66666667, 0.16666667, 0., 0.], + [0., 0., 0.16666667, 0.66666667, 0.16666667, 0.], + [0., 0., 0., 0.16666667, 0.66666667, 0.16666667]]) + svd(b, lapack_driver=self.lapack_driver) + + +class TestSVD_GESVD(TestSVD_GESDD): + def setup_method(self): + self.lapack_driver = 'gesvd' + seed(1234) + + +class TestSVDVals(object): + + def test_empty(self): + for a in [[]], np.empty((2, 0)), np.ones((0, 3)): + s = svdvals(a) + assert_equal(s, np.empty(0)) + + def test_simple(self): + a = [[1,2,3],[1,2,3],[2,5,6]] + s = svdvals(a) + assert_(len(s) == 3) + assert_(s[0] >= s[1] >= s[2]) + + def test_simple_underdet(self): + a = [[1,2,3],[4,5,6]] + s = svdvals(a) + assert_(len(s) == 2) + assert_(s[0] >= s[1]) + + def test_simple_overdet(self): + a = [[1,2],[4,5],[3,4]] + s = svdvals(a) + assert_(len(s) == 2) + assert_(s[0] >= s[1]) + + def test_simple_complex(self): + a = [[1,2,3],[1,20,3j],[2,5,6]] + s = svdvals(a) + assert_(len(s) == 3) + assert_(s[0] >= s[1] >= s[2]) + + def test_simple_underdet_complex(self): + a = [[1,2,3],[4,5j,6]] + s = svdvals(a) + assert_(len(s) == 2) + assert_(s[0] >= s[1]) + + def test_simple_overdet_complex(self): + a = [[1,2],[4,5],[3j,4]] + s = svdvals(a) + assert_(len(s) == 2) + assert_(s[0] >= s[1]) + + def test_check_finite(self): + a = [[1,2,3],[1,2,3],[2,5,6]] + s = svdvals(a, check_finite=False) + assert_(len(s) == 3) + assert_(s[0] >= s[1] >= s[2]) + + @pytest.mark.slow + def test_crash_2609(self): + np.random.seed(1234) + a = np.random.rand(1500, 2800) + # Shouldn't crash: + svdvals(a) + + +class TestDiagSVD(object): + + def test_simple(self): + assert_array_almost_equal(diagsvd([1,0,0],3,3),[[1,0,0],[0,0,0],[0,0,0]]) + + +class TestQR(object): + + def setup_method(self): + seed(1234) + + def test_simple(self): + a = [[8,2,3],[2,9,3],[5,3,6]] + q,r = qr(a) + assert_array_almost_equal(dot(transpose(q),q),identity(3)) + assert_array_almost_equal(dot(q,r),a) + + def test_simple_left(self): + a = [[8,2,3],[2,9,3],[5,3,6]] + q,r = qr(a) + c = [1, 2, 3] + qc,r2 = qr_multiply(a, c, "left") + assert_array_almost_equal(dot(q, c), qc) + assert_array_almost_equal(r, r2) + qc,r2 = qr_multiply(a, identity(3), "left") + assert_array_almost_equal(q, qc) + + def test_simple_right(self): + a = [[8,2,3],[2,9,3],[5,3,6]] + q,r = qr(a) + c = [1, 2, 3] + qc,r2 = qr_multiply(a, c) + assert_array_almost_equal(dot(c, q), qc) + assert_array_almost_equal(r, r2) + qc,r = qr_multiply(a, identity(3)) + assert_array_almost_equal(q, qc) + + def test_simple_pivoting(self): + a = np.asarray([[8,2,3],[2,9,3],[5,3,6]]) + q,r,p = qr(a, pivoting=True) + d = abs(diag(r)) + assert_(all(d[1:] <= d[:-1])) + assert_array_almost_equal(dot(transpose(q),q),identity(3)) + assert_array_almost_equal(dot(q,r),a[:,p]) + q2,r2 = qr(a[:,p]) + assert_array_almost_equal(q,q2) + assert_array_almost_equal(r,r2) + + def test_simple_left_pivoting(self): + a = [[8,2,3],[2,9,3],[5,3,6]] + q,r,jpvt = qr(a, pivoting=True) + c = [1, 2, 3] + qc,r,jpvt = qr_multiply(a, c, "left", True) + assert_array_almost_equal(dot(q, c), qc) + + def test_simple_right_pivoting(self): + a = [[8,2,3],[2,9,3],[5,3,6]] + q,r,jpvt = qr(a, pivoting=True) + c = [1, 2, 3] + qc,r,jpvt = qr_multiply(a, c, pivoting=True) + assert_array_almost_equal(dot(c, q), qc) + + def test_simple_trap(self): + a = [[8,2,3],[2,9,3]] + q,r = qr(a) + assert_array_almost_equal(dot(transpose(q),q),identity(2)) + assert_array_almost_equal(dot(q,r),a) + + def test_simple_trap_pivoting(self): + a = np.asarray([[8,2,3],[2,9,3]]) + q,r,p = qr(a, pivoting=True) + d = abs(diag(r)) + assert_(all(d[1:] <= d[:-1])) + assert_array_almost_equal(dot(transpose(q),q),identity(2)) + assert_array_almost_equal(dot(q,r),a[:,p]) + q2,r2 = qr(a[:,p]) + assert_array_almost_equal(q,q2) + assert_array_almost_equal(r,r2) + + def test_simple_tall(self): + # full version + a = [[8,2],[2,9],[5,3]] + q,r = qr(a) + assert_array_almost_equal(dot(transpose(q),q),identity(3)) + assert_array_almost_equal(dot(q,r),a) + + def test_simple_tall_pivoting(self): + # full version pivoting + a = np.asarray([[8,2],[2,9],[5,3]]) + q,r,p = qr(a, pivoting=True) + d = abs(diag(r)) + assert_(all(d[1:] <= d[:-1])) + assert_array_almost_equal(dot(transpose(q),q),identity(3)) + assert_array_almost_equal(dot(q,r),a[:,p]) + q2,r2 = qr(a[:,p]) + assert_array_almost_equal(q,q2) + assert_array_almost_equal(r,r2) + + def test_simple_tall_e(self): + # economy version + a = [[8,2],[2,9],[5,3]] + q,r = qr(a, mode='economic') + assert_array_almost_equal(dot(transpose(q),q),identity(2)) + assert_array_almost_equal(dot(q,r),a) + assert_equal(q.shape, (3,2)) + assert_equal(r.shape, (2,2)) + + def test_simple_tall_e_pivoting(self): + # economy version pivoting + a = np.asarray([[8,2],[2,9],[5,3]]) + q,r,p = qr(a, pivoting=True, mode='economic') + d = abs(diag(r)) + assert_(all(d[1:] <= d[:-1])) + assert_array_almost_equal(dot(transpose(q),q),identity(2)) + assert_array_almost_equal(dot(q,r),a[:,p]) + q2,r2 = qr(a[:,p], mode='economic') + assert_array_almost_equal(q,q2) + assert_array_almost_equal(r,r2) + + def test_simple_tall_left(self): + a = [[8,2],[2,9],[5,3]] + q,r = qr(a, mode="economic") + c = [1, 2] + qc,r2 = qr_multiply(a, c, "left") + assert_array_almost_equal(dot(q, c), qc) + assert_array_almost_equal(r, r2) + c = array([1,2,0]) + qc,r2 = qr_multiply(a, c, "left", overwrite_c=True) + assert_array_almost_equal(dot(q, c[:2]), qc) + qc,r = qr_multiply(a, identity(2), "left") + assert_array_almost_equal(qc, q) + + def test_simple_tall_left_pivoting(self): + a = [[8,2],[2,9],[5,3]] + q,r,jpvt = qr(a, mode="economic", pivoting=True) + c = [1, 2] + qc,r,kpvt = qr_multiply(a, c, "left", True) + assert_array_equal(jpvt, kpvt) + assert_array_almost_equal(dot(q, c), qc) + qc,r,jpvt = qr_multiply(a, identity(2), "left", True) + assert_array_almost_equal(qc, q) + + def test_simple_tall_right(self): + a = [[8,2],[2,9],[5,3]] + q,r = qr(a, mode="economic") + c = [1, 2, 3] + cq,r2 = qr_multiply(a, c) + assert_array_almost_equal(dot(c, q), cq) + assert_array_almost_equal(r, r2) + cq,r = qr_multiply(a, identity(3)) + assert_array_almost_equal(cq, q) + + def test_simple_tall_right_pivoting(self): + a = [[8,2],[2,9],[5,3]] + q,r,jpvt = qr(a, pivoting=True, mode="economic") + c = [1, 2, 3] + cq,r,jpvt = qr_multiply(a, c, pivoting=True) + assert_array_almost_equal(dot(c, q), cq) + cq,r,jpvt = qr_multiply(a, identity(3), pivoting=True) + assert_array_almost_equal(cq, q) + + def test_simple_fat(self): + # full version + a = [[8,2,5],[2,9,3]] + q,r = qr(a) + assert_array_almost_equal(dot(transpose(q),q),identity(2)) + assert_array_almost_equal(dot(q,r),a) + assert_equal(q.shape, (2,2)) + assert_equal(r.shape, (2,3)) + + def test_simple_fat_pivoting(self): + # full version pivoting + a = np.asarray([[8,2,5],[2,9,3]]) + q,r,p = qr(a, pivoting=True) + d = abs(diag(r)) + assert_(all(d[1:] <= d[:-1])) + assert_array_almost_equal(dot(transpose(q),q),identity(2)) + assert_array_almost_equal(dot(q,r),a[:,p]) + assert_equal(q.shape, (2,2)) + assert_equal(r.shape, (2,3)) + q2,r2 = qr(a[:,p]) + assert_array_almost_equal(q,q2) + assert_array_almost_equal(r,r2) + + def test_simple_fat_e(self): + # economy version + a = [[8,2,3],[2,9,5]] + q,r = qr(a, mode='economic') + assert_array_almost_equal(dot(transpose(q),q),identity(2)) + assert_array_almost_equal(dot(q,r),a) + assert_equal(q.shape, (2,2)) + assert_equal(r.shape, (2,3)) + + def test_simple_fat_e_pivoting(self): + # economy version pivoting + a = np.asarray([[8,2,3],[2,9,5]]) + q,r,p = qr(a, pivoting=True, mode='economic') + d = abs(diag(r)) + assert_(all(d[1:] <= d[:-1])) + assert_array_almost_equal(dot(transpose(q),q),identity(2)) + assert_array_almost_equal(dot(q,r),a[:,p]) + assert_equal(q.shape, (2,2)) + assert_equal(r.shape, (2,3)) + q2,r2 = qr(a[:,p], mode='economic') + assert_array_almost_equal(q,q2) + assert_array_almost_equal(r,r2) + + def test_simple_fat_left(self): + a = [[8,2,3],[2,9,5]] + q,r = qr(a, mode="economic") + c = [1, 2] + qc,r2 = qr_multiply(a, c, "left") + assert_array_almost_equal(dot(q, c), qc) + assert_array_almost_equal(r, r2) + qc,r = qr_multiply(a, identity(2), "left") + assert_array_almost_equal(qc, q) + + def test_simple_fat_left_pivoting(self): + a = [[8,2,3],[2,9,5]] + q,r,jpvt = qr(a, mode="economic", pivoting=True) + c = [1, 2] + qc,r,jpvt = qr_multiply(a, c, "left", True) + assert_array_almost_equal(dot(q, c), qc) + qc,r,jpvt = qr_multiply(a, identity(2), "left", True) + assert_array_almost_equal(qc, q) + + def test_simple_fat_right(self): + a = [[8,2,3],[2,9,5]] + q,r = qr(a, mode="economic") + c = [1, 2] + cq,r2 = qr_multiply(a, c) + assert_array_almost_equal(dot(c, q), cq) + assert_array_almost_equal(r, r2) + cq,r = qr_multiply(a, identity(2)) + assert_array_almost_equal(cq, q) + + def test_simple_fat_right_pivoting(self): + a = [[8,2,3],[2,9,5]] + q,r,jpvt = qr(a, pivoting=True, mode="economic") + c = [1, 2] + cq,r,jpvt = qr_multiply(a, c, pivoting=True) + assert_array_almost_equal(dot(c, q), cq) + cq,r,jpvt = qr_multiply(a, identity(2), pivoting=True) + assert_array_almost_equal(cq, q) + + def test_simple_complex(self): + a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]] + q,r = qr(a) + assert_array_almost_equal(dot(conj(transpose(q)),q),identity(3)) + assert_array_almost_equal(dot(q,r),a) + + def test_simple_complex_left(self): + a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]] + q,r = qr(a) + c = [1, 2, 3+4j] + qc,r = qr_multiply(a, c, "left") + assert_array_almost_equal(dot(q, c), qc) + qc,r = qr_multiply(a, identity(3), "left") + assert_array_almost_equal(q, qc) + + def test_simple_complex_right(self): + a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]] + q,r = qr(a) + c = [1, 2, 3+4j] + qc,r = qr_multiply(a, c) + assert_array_almost_equal(dot(c, q), qc) + qc,r = qr_multiply(a, identity(3)) + assert_array_almost_equal(q, qc) + + def test_simple_tall_complex_left(self): + a = [[8,2+3j],[2,9],[5+7j,3]] + q,r = qr(a, mode="economic") + c = [1, 2+2j] + qc,r2 = qr_multiply(a, c, "left") + assert_array_almost_equal(dot(q, c), qc) + assert_array_almost_equal(r, r2) + c = array([1,2,0]) + qc,r2 = qr_multiply(a, c, "left", overwrite_c=True) + assert_array_almost_equal(dot(q, c[:2]), qc) + qc,r = qr_multiply(a, identity(2), "left") + assert_array_almost_equal(qc, q) + + def test_simple_complex_left_conjugate(self): + a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]] + q,r = qr(a) + c = [1, 2, 3+4j] + qc,r = qr_multiply(a, c, "left", conjugate=True) + assert_array_almost_equal(dot(q.conjugate(), c), qc) + + def test_simple_complex_tall_left_conjugate(self): + a = [[3,3+4j],[5,2+2j],[3,2]] + q,r = qr(a, mode='economic') + c = [1, 3+4j] + qc,r = qr_multiply(a, c, "left", conjugate=True) + assert_array_almost_equal(dot(q.conjugate(), c), qc) + + def test_simple_complex_right_conjugate(self): + a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]] + q,r = qr(a) + c = [1, 2, 3+4j] + qc,r = qr_multiply(a, c, conjugate=True) + assert_array_almost_equal(dot(c, q.conjugate()), qc) + + def test_simple_complex_pivoting(self): + a = np.asarray([[3,3+4j,5],[5,2,2+7j],[3,2,7]]) + q,r,p = qr(a, pivoting=True) + d = abs(diag(r)) + assert_(all(d[1:] <= d[:-1])) + assert_array_almost_equal(dot(conj(transpose(q)),q),identity(3)) + assert_array_almost_equal(dot(q,r),a[:,p]) + q2,r2 = qr(a[:,p]) + assert_array_almost_equal(q,q2) + assert_array_almost_equal(r,r2) + + def test_simple_complex_left_pivoting(self): + a = np.asarray([[3,3+4j,5],[5,2,2+7j],[3,2,7]]) + q,r,jpvt = qr(a, pivoting=True) + c = [1, 2, 3+4j] + qc,r,jpvt = qr_multiply(a, c, "left", True) + assert_array_almost_equal(dot(q, c), qc) + + def test_simple_complex_right_pivoting(self): + a = np.asarray([[3,3+4j,5],[5,2,2+7j],[3,2,7]]) + q,r,jpvt = qr(a, pivoting=True) + c = [1, 2, 3+4j] + qc,r,jpvt = qr_multiply(a, c, pivoting=True) + assert_array_almost_equal(dot(c, q), qc) + + def test_random(self): + n = 20 + for k in range(2): + a = random([n,n]) + q,r = qr(a) + assert_array_almost_equal(dot(transpose(q),q),identity(n)) + assert_array_almost_equal(dot(q,r),a) + + def test_random_left(self): + n = 20 + for k in range(2): + a = random([n,n]) + q,r = qr(a) + c = random([n]) + qc,r = qr_multiply(a, c, "left") + assert_array_almost_equal(dot(q, c), qc) + qc,r = qr_multiply(a, identity(n), "left") + assert_array_almost_equal(q, qc) + + def test_random_right(self): + n = 20 + for k in range(2): + a = random([n,n]) + q,r = qr(a) + c = random([n]) + cq,r = qr_multiply(a, c) + assert_array_almost_equal(dot(c, q), cq) + cq,r = qr_multiply(a, identity(n)) + assert_array_almost_equal(q, cq) + + def test_random_pivoting(self): + n = 20 + for k in range(2): + a = random([n,n]) + q,r,p = qr(a, pivoting=True) + d = abs(diag(r)) + assert_(all(d[1:] <= d[:-1])) + assert_array_almost_equal(dot(transpose(q),q),identity(n)) + assert_array_almost_equal(dot(q,r),a[:,p]) + q2,r2 = qr(a[:,p]) + assert_array_almost_equal(q,q2) + assert_array_almost_equal(r,r2) + + def test_random_tall(self): + # full version + m = 200 + n = 100 + for k in range(2): + a = random([m,n]) + q,r = qr(a) + assert_array_almost_equal(dot(transpose(q),q),identity(m)) + assert_array_almost_equal(dot(q,r),a) + + def test_random_tall_left(self): + # full version + m = 200 + n = 100 + for k in range(2): + a = random([m,n]) + q,r = qr(a, mode="economic") + c = random([n]) + qc,r = qr_multiply(a, c, "left") + assert_array_almost_equal(dot(q, c), qc) + qc,r = qr_multiply(a, identity(n), "left") + assert_array_almost_equal(qc, q) + + def test_random_tall_right(self): + # full version + m = 200 + n = 100 + for k in range(2): + a = random([m,n]) + q,r = qr(a, mode="economic") + c = random([m]) + cq,r = qr_multiply(a, c) + assert_array_almost_equal(dot(c, q), cq) + cq,r = qr_multiply(a, identity(m)) + assert_array_almost_equal(cq, q) + + def test_random_tall_pivoting(self): + # full version pivoting + m = 200 + n = 100 + for k in range(2): + a = random([m,n]) + q,r,p = qr(a, pivoting=True) + d = abs(diag(r)) + assert_(all(d[1:] <= d[:-1])) + assert_array_almost_equal(dot(transpose(q),q),identity(m)) + assert_array_almost_equal(dot(q,r),a[:,p]) + q2,r2 = qr(a[:,p]) + assert_array_almost_equal(q,q2) + assert_array_almost_equal(r,r2) + + def test_random_tall_e(self): + # economy version + m = 200 + n = 100 + for k in range(2): + a = random([m,n]) + q,r = qr(a, mode='economic') + assert_array_almost_equal(dot(transpose(q),q),identity(n)) + assert_array_almost_equal(dot(q,r),a) + assert_equal(q.shape, (m,n)) + assert_equal(r.shape, (n,n)) + + def test_random_tall_e_pivoting(self): + # economy version pivoting + m = 200 + n = 100 + for k in range(2): + a = random([m,n]) + q,r,p = qr(a, pivoting=True, mode='economic') + d = abs(diag(r)) + assert_(all(d[1:] <= d[:-1])) + assert_array_almost_equal(dot(transpose(q),q),identity(n)) + assert_array_almost_equal(dot(q,r),a[:,p]) + assert_equal(q.shape, (m,n)) + assert_equal(r.shape, (n,n)) + q2,r2 = qr(a[:,p], mode='economic') + assert_array_almost_equal(q,q2) + assert_array_almost_equal(r,r2) + + def test_random_trap(self): + m = 100 + n = 200 + for k in range(2): + a = random([m,n]) + q,r = qr(a) + assert_array_almost_equal(dot(transpose(q),q),identity(m)) + assert_array_almost_equal(dot(q,r),a) + + def test_random_trap_pivoting(self): + m = 100 + n = 200 + for k in range(2): + a = random([m,n]) + q,r,p = qr(a, pivoting=True) + d = abs(diag(r)) + assert_(all(d[1:] <= d[:-1])) + assert_array_almost_equal(dot(transpose(q),q),identity(m)) + assert_array_almost_equal(dot(q,r),a[:,p]) + q2,r2 = qr(a[:,p]) + assert_array_almost_equal(q,q2) + assert_array_almost_equal(r,r2) + + def test_random_complex(self): + n = 20 + for k in range(2): + a = random([n,n])+1j*random([n,n]) + q,r = qr(a) + assert_array_almost_equal(dot(conj(transpose(q)),q),identity(n)) + assert_array_almost_equal(dot(q,r),a) + + def test_random_complex_left(self): + n = 20 + for k in range(2): + a = random([n,n])+1j*random([n,n]) + q,r = qr(a) + c = random([n])+1j*random([n]) + qc,r = qr_multiply(a, c, "left") + assert_array_almost_equal(dot(q, c), qc) + qc,r = qr_multiply(a, identity(n), "left") + assert_array_almost_equal(q, qc) + + def test_random_complex_right(self): + n = 20 + for k in range(2): + a = random([n,n])+1j*random([n,n]) + q,r = qr(a) + c = random([n])+1j*random([n]) + cq,r = qr_multiply(a, c) + assert_array_almost_equal(dot(c, q), cq) + cq,r = qr_multiply(a, identity(n)) + assert_array_almost_equal(q, cq) + + def test_random_complex_pivoting(self): + n = 20 + for k in range(2): + a = random([n,n])+1j*random([n,n]) + q,r,p = qr(a, pivoting=True) + d = abs(diag(r)) + assert_(all(d[1:] <= d[:-1])) + assert_array_almost_equal(dot(conj(transpose(q)),q),identity(n)) + assert_array_almost_equal(dot(q,r),a[:,p]) + q2,r2 = qr(a[:,p]) + assert_array_almost_equal(q,q2) + assert_array_almost_equal(r,r2) + + def test_check_finite(self): + a = [[8,2,3],[2,9,3],[5,3,6]] + q,r = qr(a, check_finite=False) + assert_array_almost_equal(dot(transpose(q),q),identity(3)) + assert_array_almost_equal(dot(q,r),a) + + def test_lwork(self): + a = [[8,2,3],[2,9,3],[5,3,6]] + # Get comparison values + q,r = qr(a, lwork=None) + + # Test against minimum valid lwork + q2,r2 = qr(a, lwork=3) + assert_array_almost_equal(q2,q) + assert_array_almost_equal(r2,r) + + # Test against larger lwork + q3,r3 = qr(a, lwork=10) + assert_array_almost_equal(q3,q) + assert_array_almost_equal(r3,r) + + # Test against explicit lwork=-1 + q4,r4 = qr(a, lwork=-1) + assert_array_almost_equal(q4,q) + assert_array_almost_equal(r4,r) + + # Test against invalid lwork + assert_raises(Exception, qr, (a,), {'lwork':0}) + assert_raises(Exception, qr, (a,), {'lwork':2}) + +class TestRQ(object): + + def setup_method(self): + seed(1234) + + def test_simple(self): + a = [[8,2,3],[2,9,3],[5,3,6]] + r,q = rq(a) + assert_array_almost_equal(dot(q, transpose(q)),identity(3)) + assert_array_almost_equal(dot(r,q),a) + + def test_r(self): + a = [[8,2,3],[2,9,3],[5,3,6]] + r,q = rq(a) + r2 = rq(a, mode='r') + assert_array_almost_equal(r, r2) + + def test_random(self): + n = 20 + for k in range(2): + a = random([n,n]) + r,q = rq(a) + assert_array_almost_equal(dot(q, transpose(q)),identity(n)) + assert_array_almost_equal(dot(r,q),a) + + def test_simple_trap(self): + a = [[8,2,3],[2,9,3]] + r,q = rq(a) + assert_array_almost_equal(dot(transpose(q),q),identity(3)) + assert_array_almost_equal(dot(r,q),a) + + def test_simple_tall(self): + a = [[8,2],[2,9],[5,3]] + r,q = rq(a) + assert_array_almost_equal(dot(transpose(q),q),identity(2)) + assert_array_almost_equal(dot(r,q),a) + + def test_simple_fat(self): + a = [[8,2,5],[2,9,3]] + r,q = rq(a) + assert_array_almost_equal(dot(transpose(q),q),identity(3)) + assert_array_almost_equal(dot(r,q),a) + + def test_simple_complex(self): + a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]] + r,q = rq(a) + assert_array_almost_equal(dot(q, conj(transpose(q))),identity(3)) + assert_array_almost_equal(dot(r,q),a) + + def test_random_tall(self): + m = 200 + n = 100 + for k in range(2): + a = random([m,n]) + r,q = rq(a) + assert_array_almost_equal(dot(q, transpose(q)),identity(n)) + assert_array_almost_equal(dot(r,q),a) + + def test_random_trap(self): + m = 100 + n = 200 + for k in range(2): + a = random([m,n]) + r,q = rq(a) + assert_array_almost_equal(dot(q, transpose(q)),identity(n)) + assert_array_almost_equal(dot(r,q),a) + + def test_random_trap_economic(self): + m = 100 + n = 200 + for k in range(2): + a = random([m,n]) + r,q = rq(a, mode='economic') + assert_array_almost_equal(dot(q,transpose(q)),identity(m)) + assert_array_almost_equal(dot(r,q),a) + assert_equal(q.shape, (m, n)) + assert_equal(r.shape, (m, m)) + + def test_random_complex(self): + n = 20 + for k in range(2): + a = random([n,n])+1j*random([n,n]) + r,q = rq(a) + assert_array_almost_equal(dot(q, conj(transpose(q))),identity(n)) + assert_array_almost_equal(dot(r,q),a) + + def test_random_complex_economic(self): + m = 100 + n = 200 + for k in range(2): + a = random([m,n])+1j*random([m,n]) + r,q = rq(a, mode='economic') + assert_array_almost_equal(dot(q,conj(transpose(q))),identity(m)) + assert_array_almost_equal(dot(r,q),a) + assert_equal(q.shape, (m, n)) + assert_equal(r.shape, (m, m)) + + def test_check_finite(self): + a = [[8,2,3],[2,9,3],[5,3,6]] + r,q = rq(a, check_finite=False) + assert_array_almost_equal(dot(q, transpose(q)),identity(3)) + assert_array_almost_equal(dot(r,q),a) + + +transp = transpose +any = sometrue + + +class TestSchur(object): + + def test_simple(self): + a = [[8,12,3],[2,9,3],[10,3,6]] + t,z = schur(a) + assert_array_almost_equal(dot(dot(z,t),transp(conj(z))),a) + tc,zc = schur(a,'complex') + assert_(any(ravel(iscomplex(zc))) and any(ravel(iscomplex(tc)))) + assert_array_almost_equal(dot(dot(zc,tc),transp(conj(zc))),a) + tc2,zc2 = rsf2csf(tc,zc) + assert_array_almost_equal(dot(dot(zc2,tc2),transp(conj(zc2))),a) + + def test_sort(self): + a = [[4.,3.,1.,-1.],[-4.5,-3.5,-1.,1.],[9.,6.,-4.,4.5],[6.,4.,-3.,3.5]] + s,u,sdim = schur(a,sort='lhp') + assert_array_almost_equal([[0.1134,0.5436,0.8316,0.], + [-0.1134,-0.8245,0.5544,0.], + [-0.8213,0.1308,0.0265,-0.5547], + [-0.5475,0.0872,0.0177,0.8321]], + u,3) + assert_array_almost_equal([[-1.4142,0.1456,-11.5816,-7.7174], + [0.,-0.5000,9.4472,-0.7184], + [0.,0.,1.4142,-0.1456], + [0.,0.,0.,0.5]], + s,3) + assert_equal(2,sdim) + + s,u,sdim = schur(a,sort='rhp') + assert_array_almost_equal([[0.4862,-0.4930,0.1434,-0.7071], + [-0.4862,0.4930,-0.1434,-0.7071], + [0.6042,0.3944,-0.6924,0.], + [0.4028,0.5986,0.6924,0.]], + u,3) + assert_array_almost_equal([[1.4142,-0.9270,4.5368,-14.4130], + [0.,0.5,6.5809,-3.1870], + [0.,0.,-1.4142,0.9270], + [0.,0.,0.,-0.5]], + s,3) + assert_equal(2,sdim) + + s,u,sdim = schur(a,sort='iuc') + assert_array_almost_equal([[0.5547,0.,-0.5721,-0.6042], + [-0.8321,0.,-0.3814,-0.4028], + [0.,0.7071,-0.5134,0.4862], + [0.,0.7071,0.5134,-0.4862]], + u,3) + assert_array_almost_equal([[-0.5000,0.0000,-6.5809,-4.0974], + [0.,0.5000,-3.3191,-14.4130], + [0.,0.,1.4142,2.1573], + [0.,0.,0.,-1.4142]], + s,3) + assert_equal(2,sdim) + + s,u,sdim = schur(a,sort='ouc') + assert_array_almost_equal([[0.4862,-0.5134,0.7071,0.], + [-0.4862,0.5134,0.7071,0.], + [0.6042,0.5721,0.,-0.5547], + [0.4028,0.3814,0.,0.8321]], + u,3) + assert_array_almost_equal([[1.4142,-2.1573,14.4130,4.0974], + [0.,-1.4142,3.3191,6.5809], + [0.,0.,-0.5000,0.], + [0.,0.,0.,0.5000]], + s,3) + assert_equal(2,sdim) + + rhp_function = lambda x: x >= 0.0 + s,u,sdim = schur(a,sort=rhp_function) + assert_array_almost_equal([[0.4862,-0.4930,0.1434,-0.7071], + [-0.4862,0.4930,-0.1434,-0.7071], + [0.6042,0.3944,-0.6924,0.], + [0.4028,0.5986,0.6924,0.]], + u,3) + assert_array_almost_equal([[1.4142,-0.9270,4.5368,-14.4130], + [0.,0.5,6.5809,-3.1870], + [0.,0.,-1.4142,0.9270], + [0.,0.,0.,-0.5]], + s,3) + assert_equal(2,sdim) + + def test_sort_errors(self): + a = [[4.,3.,1.,-1.],[-4.5,-3.5,-1.,1.],[9.,6.,-4.,4.5],[6.,4.,-3.,3.5]] + assert_raises(ValueError, schur, a, sort='unsupported') + assert_raises(ValueError, schur, a, sort=1) + + def test_check_finite(self): + a = [[8,12,3],[2,9,3],[10,3,6]] + t,z = schur(a, check_finite=False) + assert_array_almost_equal(dot(dot(z,t),transp(conj(z))),a) + + +class TestHessenberg(object): + + def test_simple(self): + a = [[-149, -50,-154], + [537, 180, 546], + [-27, -9, -25]] + h1 = [[-149.0000,42.2037,-156.3165], + [-537.6783,152.5511,-554.9272], + [0,0.0728, 2.4489]] + h,q = hessenberg(a,calc_q=1) + assert_array_almost_equal(dot(transp(q),dot(a,q)),h) + assert_array_almost_equal(h,h1,decimal=4) + + def test_simple_complex(self): + a = [[-149, -50,-154], + [537, 180j, 546], + [-27j, -9, -25]] + h,q = hessenberg(a,calc_q=1) + h1 = dot(transp(conj(q)),dot(a,q)) + assert_array_almost_equal(h1,h) + + def test_simple2(self): + a = [[1,2,3,4,5,6,7], + [0,2,3,4,6,7,2], + [0,2,2,3,0,3,2], + [0,0,2,8,0,0,2], + [0,3,1,2,0,1,2], + [0,1,2,3,0,1,0], + [0,0,0,0,0,1,2]] + h,q = hessenberg(a,calc_q=1) + assert_array_almost_equal(dot(transp(q),dot(a,q)),h) + + def test_simple3(self): + a = np.eye(3) + a[-1, 0] = 2 + h, q = hessenberg(a, calc_q=1) + assert_array_almost_equal(dot(transp(q), dot(a, q)), h) + + def test_random(self): + n = 20 + for k in range(2): + a = random([n,n]) + h,q = hessenberg(a,calc_q=1) + assert_array_almost_equal(dot(transp(q),dot(a,q)),h) + + def test_random_complex(self): + n = 20 + for k in range(2): + a = random([n,n])+1j*random([n,n]) + h,q = hessenberg(a,calc_q=1) + h1 = dot(transp(conj(q)),dot(a,q)) + assert_array_almost_equal(h1,h) + + def test_check_finite(self): + a = [[-149, -50,-154], + [537, 180, 546], + [-27, -9, -25]] + h1 = [[-149.0000,42.2037,-156.3165], + [-537.6783,152.5511,-554.9272], + [0,0.0728, 2.4489]] + h,q = hessenberg(a,calc_q=1, check_finite=False) + assert_array_almost_equal(dot(transp(q),dot(a,q)),h) + assert_array_almost_equal(h,h1,decimal=4) + + def test_2x2(self): + a = [[2, 1], [7, 12]] + + h, q = hessenberg(a, calc_q=1) + assert_array_almost_equal(q, np.eye(2)) + assert_array_almost_equal(h, a) + + b = [[2-7j, 1+2j], [7+3j, 12-2j]] + h2, q2 = hessenberg(b, calc_q=1) + assert_array_almost_equal(q2, np.eye(2)) + assert_array_almost_equal(h2, b) + + +class TestQZ(object): + def setup_method(self): + seed(12345) + + def test_qz_single(self): + n = 5 + A = random([n,n]).astype(float32) + B = random([n,n]).astype(float32) + AA,BB,Q,Z = qz(A,B) + assert_array_almost_equal(dot(dot(Q,AA),Z.T), A, decimal=5) + assert_array_almost_equal(dot(dot(Q,BB),Z.T), B, decimal=5) + assert_array_almost_equal(dot(Q,Q.T), eye(n), decimal=5) + assert_array_almost_equal(dot(Z,Z.T), eye(n), decimal=5) + assert_(all(diag(BB) >= 0)) + + def test_qz_double(self): + n = 5 + A = random([n,n]) + B = random([n,n]) + AA,BB,Q,Z = qz(A,B) + assert_array_almost_equal(dot(dot(Q,AA),Z.T), A) + assert_array_almost_equal(dot(dot(Q,BB),Z.T), B) + assert_array_almost_equal(dot(Q,Q.T), eye(n)) + assert_array_almost_equal(dot(Z,Z.T), eye(n)) + assert_(all(diag(BB) >= 0)) + + def test_qz_complex(self): + n = 5 + A = random([n,n]) + 1j*random([n,n]) + B = random([n,n]) + 1j*random([n,n]) + AA,BB,Q,Z = qz(A,B) + assert_array_almost_equal(dot(dot(Q,AA),Z.conjugate().T), A) + assert_array_almost_equal(dot(dot(Q,BB),Z.conjugate().T), B) + assert_array_almost_equal(dot(Q,Q.conjugate().T), eye(n)) + assert_array_almost_equal(dot(Z,Z.conjugate().T), eye(n)) + assert_(all(diag(BB) >= 0)) + assert_(all(diag(BB).imag == 0)) + + def test_qz_complex64(self): + n = 5 + A = (random([n,n]) + 1j*random([n,n])).astype(complex64) + B = (random([n,n]) + 1j*random([n,n])).astype(complex64) + AA,BB,Q,Z = qz(A,B) + assert_array_almost_equal(dot(dot(Q,AA),Z.conjugate().T), A, decimal=5) + assert_array_almost_equal(dot(dot(Q,BB),Z.conjugate().T), B, decimal=5) + assert_array_almost_equal(dot(Q,Q.conjugate().T), eye(n), decimal=5) + assert_array_almost_equal(dot(Z,Z.conjugate().T), eye(n), decimal=5) + assert_(all(diag(BB) >= 0)) + assert_(all(diag(BB).imag == 0)) + + def test_qz_double_complex(self): + n = 5 + A = random([n,n]) + B = random([n,n]) + AA,BB,Q,Z = qz(A,B, output='complex') + aa = dot(dot(Q,AA),Z.conjugate().T) + assert_array_almost_equal(aa.real, A) + assert_array_almost_equal(aa.imag, 0) + bb = dot(dot(Q,BB),Z.conjugate().T) + assert_array_almost_equal(bb.real, B) + assert_array_almost_equal(bb.imag, 0) + assert_array_almost_equal(dot(Q,Q.conjugate().T), eye(n)) + assert_array_almost_equal(dot(Z,Z.conjugate().T), eye(n)) + assert_(all(diag(BB) >= 0)) + + def test_qz_double_sort(self): + # from https://www.nag.com/lapack-ex/node119.html + # NOTE: These matrices may be ill-conditioned and lead to a + # seg fault on certain python versions when compiled with + # sse2 or sse3 older ATLAS/LAPACK binaries for windows + # A = np.array([[3.9, 12.5, -34.5, -0.5], + # [ 4.3, 21.5, -47.5, 7.5], + # [ 4.3, 21.5, -43.5, 3.5], + # [ 4.4, 26.0, -46.0, 6.0 ]]) + + # B = np.array([[ 1.0, 2.0, -3.0, 1.0], + # [1.0, 3.0, -5.0, 4.0], + # [1.0, 3.0, -4.0, 3.0], + # [1.0, 3.0, -4.0, 4.0]]) + A = np.array([[3.9, 12.5, -34.5, 2.5], + [4.3, 21.5, -47.5, 7.5], + [4.3, 1.5, -43.5, 3.5], + [4.4, 6.0, -46.0, 6.0]]) + + B = np.array([[1.0, 1.0, -3.0, 1.0], + [1.0, 3.0, -5.0, 4.4], + [1.0, 2.0, -4.0, 1.0], + [1.2, 3.0, -4.0, 4.0]]) + + sort = lambda ar,ai,beta: ai == 0 + + assert_raises(ValueError, qz, A, B, sort=sort) + if False: + AA,BB,Q,Z,sdim = qz(A,B,sort=sort) + # assert_(sdim == 2) + assert_(sdim == 4) + assert_array_almost_equal(dot(dot(Q,AA),Z.T), A) + assert_array_almost_equal(dot(dot(Q,BB),Z.T), B) + + # test absolute values bc the sign is ambiguous and might be platform + # dependent + assert_array_almost_equal(np.abs(AA), np.abs(np.array( + [[35.7864, -80.9061, -12.0629, -9.498], + [0., 2.7638, -2.3505, 7.3256], + [0., 0., 0.6258, -0.0398], + [0., 0., 0., -12.8217]])), 4) + assert_array_almost_equal(np.abs(BB), np.abs(np.array( + [[4.5324, -8.7878, 3.2357, -3.5526], + [0., 1.4314, -2.1894, 0.9709], + [0., 0., 1.3126, -0.3468], + [0., 0., 0., 0.559]])), 4) + assert_array_almost_equal(np.abs(Q), np.abs(np.array( + [[-0.4193, -0.605, -0.1894, -0.6498], + [-0.5495, 0.6987, 0.2654, -0.3734], + [-0.4973, -0.3682, 0.6194, 0.4832], + [-0.5243, 0.1008, -0.7142, 0.4526]])), 4) + assert_array_almost_equal(np.abs(Z), np.abs(np.array( + [[-0.9471, -0.2971, -0.1217, 0.0055], + [-0.0367, 0.1209, 0.0358, 0.9913], + [0.3171, -0.9041, -0.2547, 0.1312], + [0.0346, 0.2824, -0.9587, 0.0014]])), 4) + + # test absolute values bc the sign is ambiguous and might be platform + # dependent + # assert_array_almost_equal(abs(AA), abs(np.array([ + # [3.8009, -69.4505, 50.3135, -43.2884], + # [0.0000, 9.2033, -0.2001, 5.9881], + # [0.0000, 0.0000, 1.4279, 4.4453], + # [0.0000, 0.0000, 0.9019, -1.1962]])), 4) + # assert_array_almost_equal(abs(BB), abs(np.array([ + # [1.9005, -10.2285, 0.8658, -5.2134], + # [0.0000, 2.3008, 0.7915, 0.4262], + # [0.0000, 0.0000, 0.8101, 0.0000], + # [0.0000, 0.0000, 0.0000, -0.2823]])), 4) + # assert_array_almost_equal(abs(Q), abs(np.array([ + # [0.4642, 0.7886, 0.2915, -0.2786], + # [0.5002, -0.5986, 0.5638, -0.2713], + # [0.5002, 0.0154, -0.0107, 0.8657], + # [0.5331, -0.1395, -0.7727, -0.3151]])), 4) + # assert_array_almost_equal(dot(Q,Q.T), eye(4)) + # assert_array_almost_equal(abs(Z), abs(np.array([ + # [0.9961, -0.0014, 0.0887, -0.0026], + # [0.0057, -0.0404, -0.0938, -0.9948], + # [0.0626, 0.7194, -0.6908, 0.0363], + # [0.0626, -0.6934, -0.7114, 0.0956]])), 4) + # assert_array_almost_equal(dot(Z,Z.T), eye(4)) + + # def test_qz_complex_sort(self): + # cA = np.array([ + # [-21.10+22.50*1j, 53.50+-50.50*1j, -34.50+127.50*1j, 7.50+ 0.50*1j], + # [-0.46+ -7.78*1j, -3.50+-37.50*1j, -15.50+ 58.50*1j,-10.50+ -1.50*1j], + # [ 4.30+ -5.50*1j, 39.70+-17.10*1j, -68.50+ 12.50*1j, -7.50+ -3.50*1j], + # [ 5.50+ 4.40*1j, 14.40+ 43.30*1j, -32.50+-46.00*1j,-19.00+-32.50*1j]]) + + # cB = np.array([ + # [1.00+ -5.00*1j, 1.60+ 1.20*1j,-3.00+ 0.00*1j, 0.00+ -1.00*1j], + # [0.80+ -0.60*1j, 3.00+ -5.00*1j,-4.00+ 3.00*1j,-2.40+ -3.20*1j], + # [1.00+ 0.00*1j, 2.40+ 1.80*1j,-4.00+ -5.00*1j, 0.00+ -3.00*1j], + # [0.00+ 1.00*1j,-1.80+ 2.40*1j, 0.00+ -4.00*1j, 4.00+ -5.00*1j]]) + + # AAS,BBS,QS,ZS,sdim = qz(cA,cB,sort='lhp') + + # eigenvalues = diag(AAS)/diag(BBS) + # assert_(all(np.real(eigenvalues[:sdim] < 0))) + # assert_(all(np.real(eigenvalues[sdim:] > 0))) + + def test_check_finite(self): + n = 5 + A = random([n,n]) + B = random([n,n]) + AA,BB,Q,Z = qz(A,B,check_finite=False) + assert_array_almost_equal(dot(dot(Q,AA),Z.T), A) + assert_array_almost_equal(dot(dot(Q,BB),Z.T), B) + assert_array_almost_equal(dot(Q,Q.T), eye(n)) + assert_array_almost_equal(dot(Z,Z.T), eye(n)) + assert_(all(diag(BB) >= 0)) + + +def _make_pos(X): + # the decompositions can have different signs than verified results + return np.sign(X)*X + + +class TestOrdQZ(object): + @classmethod + def setup_class(cls): + # https://www.nag.com/lapack-ex/node119.html + A1 = np.array([[-21.10 - 22.50j, 53.5 - 50.5j, -34.5 + 127.5j, + 7.5 + 0.5j], + [-0.46 - 7.78j, -3.5 - 37.5j, -15.5 + 58.5j, + -10.5 - 1.5j], + [4.30 - 5.50j, 39.7 - 17.1j, -68.5 + 12.5j, + -7.5 - 3.5j], + [5.50 + 4.40j, 14.4 + 43.3j, -32.5 - 46.0j, + -19.0 - 32.5j]]) + + B1 = np.array([[1.0 - 5.0j, 1.6 + 1.2j, -3 + 0j, 0.0 - 1.0j], + [0.8 - 0.6j, .0 - 5.0j, -4 + 3j, -2.4 - 3.2j], + [1.0 + 0.0j, 2.4 + 1.8j, -4 - 5j, 0.0 - 3.0j], + [0.0 + 1.0j, -1.8 + 2.4j, 0 - 4j, 4.0 - 5.0j]]) + + # https://www.nag.com/numeric/fl/nagdoc_fl23/xhtml/F08/f08yuf.xml + A2 = np.array([[3.9, 12.5, -34.5, -0.5], + [4.3, 21.5, -47.5, 7.5], + [4.3, 21.5, -43.5, 3.5], + [4.4, 26.0, -46.0, 6.0]]) + + B2 = np.array([[1, 2, -3, 1], + [1, 3, -5, 4], + [1, 3, -4, 3], + [1, 3, -4, 4]]) + + # example with the eigenvalues + # -0.33891648, 1.61217396+0.74013521j, 1.61217396-0.74013521j, + # 0.61244091 + # thus featuring: + # * one complex conjugate eigenvalue pair, + # * one eigenvalue in the lhp + # * 2 eigenvalues in the unit circle + # * 2 non-real eigenvalues + A3 = np.array([[5., 1., 3., 3.], + [4., 4., 2., 7.], + [7., 4., 1., 3.], + [0., 4., 8., 7.]]) + B3 = np.array([[8., 10., 6., 10.], + [7., 7., 2., 9.], + [9., 1., 6., 6.], + [5., 1., 4., 7.]]) + + # example with infinite eigenvalues + A4 = np.eye(2) + B4 = np.diag([0, 1]) + + # example with (alpha, beta) = (0, 0) + A5 = np.diag([1, 0]) + B5 = np.diag([1, 0]) + + cls.A = [A1, A2, A3, A4, A5] + cls.B = [B1, B2, B3, B4, A5] + + def qz_decomp(self, sort): + try: + olderr = np.seterr('raise') + ret = [ordqz(Ai, Bi, sort=sort) for Ai, Bi in zip(self.A, self.B)] + finally: + np.seterr(**olderr) + + return tuple(ret) + + def check(self, A, B, sort, AA, BB, alpha, beta, Q, Z): + Id = np.eye(*A.shape) + # make sure Q and Z are orthogonal + assert_array_almost_equal(Q.dot(Q.T.conj()), Id) + assert_array_almost_equal(Z.dot(Z.T.conj()), Id) + # check factorization + assert_array_almost_equal(Q.dot(AA), A.dot(Z)) + assert_array_almost_equal(Q.dot(BB), B.dot(Z)) + # check shape of AA and BB + assert_array_equal(np.tril(AA, -2), np.zeros(AA.shape)) + assert_array_equal(np.tril(BB, -1), np.zeros(BB.shape)) + # check eigenvalues + for i in range(A.shape[0]): + # does the current diagonal element belong to a 2-by-2 block + # that was already checked? + if i > 0 and A[i, i - 1] != 0: + continue + # take care of 2-by-2 blocks + if i < AA.shape[0] - 1 and AA[i + 1, i] != 0: + evals, _ = eig(AA[i:i + 2, i:i + 2], BB[i:i + 2, i:i + 2]) + # make sure the pair of complex conjugate eigenvalues + # is ordered consistently (positive imaginary part first) + if evals[0].imag < 0: + evals = evals[[1, 0]] + tmp = alpha[i:i + 2]/beta[i:i + 2] + if tmp[0].imag < 0: + tmp = tmp[[1, 0]] + assert_array_almost_equal(evals, tmp) + else: + if alpha[i] == 0 and beta[i] == 0: + assert_equal(AA[i, i], 0) + assert_equal(BB[i, i], 0) + elif beta[i] == 0: + assert_equal(BB[i, i], 0) + else: + assert_almost_equal(AA[i, i]/BB[i, i], alpha[i]/beta[i]) + sortfun = _select_function(sort) + lastsort = True + for i in range(A.shape[0]): + cursort = sortfun(np.array([alpha[i]]), np.array([beta[i]])) + # once the sorting criterion was not matched all subsequent + # eigenvalues also shouldn't match + if not lastsort: + assert(not cursort) + lastsort = cursort + + def check_all(self, sort): + ret = self.qz_decomp(sort) + + for reti, Ai, Bi in zip(ret, self.A, self.B): + self.check(Ai, Bi, sort, *reti) + + def test_lhp(self): + self.check_all('lhp') + + def test_rhp(self): + self.check_all('rhp') + + def test_iuc(self): + self.check_all('iuc') + + def test_ouc(self): + self.check_all('ouc') + + def test_ref(self): + # real eigenvalues first (top-left corner) + def sort(x, y): + out = np.empty_like(x, dtype=bool) + nonzero = (y != 0) + out[~nonzero] = False + out[nonzero] = (x[nonzero]/y[nonzero]).imag == 0 + return out + + self.check_all(sort) + + def test_cef(self): + # complex eigenvalues first (top-left corner) + def sort(x, y): + out = np.empty_like(x, dtype=bool) + nonzero = (y != 0) + out[~nonzero] = False + out[nonzero] = (x[nonzero]/y[nonzero]).imag != 0 + return out + + self.check_all(sort) + + def test_diff_input_types(self): + ret = ordqz(self.A[1], self.B[2], sort='lhp') + self.check(self.A[1], self.B[2], 'lhp', *ret) + + ret = ordqz(self.B[2], self.A[1], sort='lhp') + self.check(self.B[2], self.A[1], 'lhp', *ret) + + def test_sort_explicit(self): + # Test order of the eigenvalues in the 2 x 2 case where we can + # explicitly compute the solution + A1 = np.eye(2) + B1 = np.diag([-2, 0.5]) + expected1 = [('lhp', [-0.5, 2]), + ('rhp', [2, -0.5]), + ('iuc', [-0.5, 2]), + ('ouc', [2, -0.5])] + A2 = np.eye(2) + B2 = np.diag([-2 + 1j, 0.5 + 0.5j]) + expected2 = [('lhp', [1/(-2 + 1j), 1/(0.5 + 0.5j)]), + ('rhp', [1/(0.5 + 0.5j), 1/(-2 + 1j)]), + ('iuc', [1/(-2 + 1j), 1/(0.5 + 0.5j)]), + ('ouc', [1/(0.5 + 0.5j), 1/(-2 + 1j)])] + # 'lhp' is ambiguous so don't test it + A3 = np.eye(2) + B3 = np.diag([2, 0]) + expected3 = [('rhp', [0.5, np.inf]), + ('iuc', [0.5, np.inf]), + ('ouc', [np.inf, 0.5])] + # 'rhp' is ambiguous so don't test it + A4 = np.eye(2) + B4 = np.diag([-2, 0]) + expected4 = [('lhp', [-0.5, np.inf]), + ('iuc', [-0.5, np.inf]), + ('ouc', [np.inf, -0.5])] + A5 = np.diag([0, 1]) + B5 = np.diag([0, 0.5]) + # 'lhp' and 'iuc' are ambiguous so don't test them + expected5 = [('rhp', [2, np.nan]), + ('ouc', [2, np.nan])] + + A = [A1, A2, A3, A4, A5] + B = [B1, B2, B3, B4, B5] + expected = [expected1, expected2, expected3, expected4, expected5] + for Ai, Bi, expectedi in zip(A, B, expected): + for sortstr, expected_eigvals in expectedi: + _, _, alpha, beta, _, _ = ordqz(Ai, Bi, sort=sortstr) + azero = (alpha == 0) + bzero = (beta == 0) + x = np.empty_like(alpha) + x[azero & bzero] = np.nan + x[~azero & bzero] = np.inf + x[~bzero] = alpha[~bzero]/beta[~bzero] + assert_allclose(expected_eigvals, x) + + +class TestOrdQZWorkspaceSize(object): + + def setup_method(self): + seed(12345) + + def test_decompose(self): + + N = 202 + + # raises error if lwork parameter to dtrsen is too small + for ddtype in [np.float32, np.float64]: + A = random((N,N)).astype(ddtype) + B = random((N,N)).astype(ddtype) + # sort = lambda alphar, alphai, beta: alphar**2 + alphai**2< beta**2 + sort = lambda alpha, beta: alpha < beta + [S,T,alpha,beta,U,V] = ordqz(A,B,sort=sort, output='real') + + for ddtype in [np.complex, np.complex64]: + A = random((N,N)).astype(ddtype) + B = random((N,N)).astype(ddtype) + sort = lambda alpha, beta: alpha < beta + [S,T,alpha,beta,U,V] = ordqz(A,B,sort=sort, output='complex') + + @pytest.mark.slow + def test_decompose_ouc(self): + + N = 202 + + # segfaults if lwork parameter to dtrsen is too small + for ddtype in [np.float32, np.float64, np.complex, np.complex64]: + A = random((N,N)).astype(ddtype) + B = random((N,N)).astype(ddtype) + [S,T,alpha,beta,U,V] = ordqz(A,B,sort='ouc') + + +class TestDatacopied(object): + + def test_datacopied(self): + from scipy.linalg.decomp import _datacopied + + M = matrix([[0,1],[2,3]]) + A = asarray(M) + L = M.tolist() + M2 = M.copy() + + class Fake1: + def __array__(self): + return A + + class Fake2: + __array_interface__ = A.__array_interface__ + + F1 = Fake1() + F2 = Fake2() + + for item, status in [(M, False), (A, False), (L, True), + (M2, False), (F1, False), (F2, False)]: + arr = asarray(item) + assert_equal(_datacopied(arr, item), status, + err_msg=repr(item)) + + +def test_aligned_mem_float(): + """Check linalg works with non-aligned memory""" + # Allocate 402 bytes of memory (allocated on boundary) + a = arange(402, dtype=np.uint8) + + # Create an array with boundary offset 4 + z = np.frombuffer(a.data, offset=2, count=100, dtype=float32) + z.shape = 10, 10 + + eig(z, overwrite_a=True) + eig(z.T, overwrite_a=True) + + +def test_aligned_mem(): + """Check linalg works with non-aligned memory""" + # Allocate 804 bytes of memory (allocated on boundary) + a = arange(804, dtype=np.uint8) + + # Create an array with boundary offset 4 + z = np.frombuffer(a.data, offset=4, count=100, dtype=float) + z.shape = 10, 10 + + eig(z, overwrite_a=True) + eig(z.T, overwrite_a=True) + + +def test_aligned_mem_complex(): + """Check that complex objects don't need to be completely aligned""" + # Allocate 1608 bytes of memory (allocated on boundary) + a = zeros(1608, dtype=np.uint8) + + # Create an array with boundary offset 8 + z = np.frombuffer(a.data, offset=8, count=100, dtype=complex) + z.shape = 10, 10 + + eig(z, overwrite_a=True) + # This does not need special handling + eig(z.T, overwrite_a=True) + + +def check_lapack_misaligned(func, args, kwargs): + args = list(args) + for i in range(len(args)): + a = args[:] + if isinstance(a[i],np.ndarray): + # Try misaligning a[i] + aa = np.zeros(a[i].size*a[i].dtype.itemsize+8, dtype=np.uint8) + aa = np.frombuffer(aa.data, offset=4, count=a[i].size, dtype=a[i].dtype) + aa.shape = a[i].shape + aa[...] = a[i] + a[i] = aa + func(*a,**kwargs) + if len(a[i].shape) > 1: + a[i] = a[i].T + func(*a,**kwargs) + + +@pytest.mark.xfail(run=False, reason="Ticket #1152, triggers a segfault in rare cases.") +def test_lapack_misaligned(): + M = np.eye(10,dtype=float) + R = np.arange(100) + R.shape = 10,10 + S = np.arange(20000,dtype=np.uint8) + S = np.frombuffer(S.data, offset=4, count=100, dtype=float) + S.shape = 10, 10 + b = np.ones(10) + LU, piv = lu_factor(S) + for (func, args, kwargs) in [ + (eig,(S,),dict(overwrite_a=True)), # crash + (eigvals,(S,),dict(overwrite_a=True)), # no crash + (lu,(S,),dict(overwrite_a=True)), # no crash + (lu_factor,(S,),dict(overwrite_a=True)), # no crash + (lu_solve,((LU,piv),b),dict(overwrite_b=True)), + (solve,(S,b),dict(overwrite_a=True,overwrite_b=True)), + (svd,(M,),dict(overwrite_a=True)), # no crash + (svd,(R,),dict(overwrite_a=True)), # no crash + (svd,(S,),dict(overwrite_a=True)), # crash + (svdvals,(S,),dict()), # no crash + (svdvals,(S,),dict(overwrite_a=True)), # crash + (cholesky,(M,),dict(overwrite_a=True)), # no crash + (qr,(S,),dict(overwrite_a=True)), # crash + (rq,(S,),dict(overwrite_a=True)), # crash + (hessenberg,(S,),dict(overwrite_a=True)), # crash + (schur,(S,),dict(overwrite_a=True)), # crash + ]: + check_lapack_misaligned(func, args, kwargs) +# not properly tested +# cholesky, rsf2csf, lu_solve, solve, eig_banded, eigvals_banded, eigh, diagsvd + + +class TestOverwrite(object): + def test_eig(self): + assert_no_overwrite(eig, [(3,3)]) + assert_no_overwrite(eig, [(3,3), (3,3)]) + + def test_eigh(self): + assert_no_overwrite(eigh, [(3,3)]) + assert_no_overwrite(eigh, [(3,3), (3,3)]) + + def test_eig_banded(self): + assert_no_overwrite(eig_banded, [(3,2)]) + + def test_eigvals(self): + assert_no_overwrite(eigvals, [(3,3)]) + + def test_eigvalsh(self): + assert_no_overwrite(eigvalsh, [(3,3)]) + + def test_eigvals_banded(self): + assert_no_overwrite(eigvals_banded, [(3,2)]) + + def test_hessenberg(self): + assert_no_overwrite(hessenberg, [(3,3)]) + + def test_lu_factor(self): + assert_no_overwrite(lu_factor, [(3,3)]) + + def test_lu_solve(self): + x = np.array([[1,2,3], [4,5,6], [7,8,8]]) + xlu = lu_factor(x) + assert_no_overwrite(lambda b: lu_solve(xlu, b), [(3,)]) + + def test_lu(self): + assert_no_overwrite(lu, [(3,3)]) + + def test_qr(self): + assert_no_overwrite(qr, [(3,3)]) + + def test_rq(self): + assert_no_overwrite(rq, [(3,3)]) + + def test_schur(self): + assert_no_overwrite(schur, [(3,3)]) + + def test_schur_complex(self): + assert_no_overwrite(lambda a: schur(a, 'complex'), [(3,3)], + dtypes=[np.float32, np.float64]) + + def test_svd(self): + assert_no_overwrite(svd, [(3,3)]) + assert_no_overwrite(lambda a: svd(a, lapack_driver='gesvd'), [(3,3)]) + + def test_svdvals(self): + assert_no_overwrite(svdvals, [(3,3)]) + + +def _check_orth(n, dtype, skip_big=False): + X = np.ones((n, 2), dtype=float).astype(dtype) + + eps = np.finfo(dtype).eps + tol = 1000 * eps + + Y = orth(X) + assert_equal(Y.shape, (n, 1)) + assert_allclose(Y, Y.mean(), atol=tol) + + Y = orth(X.T) + assert_equal(Y.shape, (2, 1)) + assert_allclose(Y, Y.mean(), atol=tol) + + if n > 5 and not skip_big: + np.random.seed(1) + X = np.random.rand(n, 5).dot(np.random.rand(5, n)) + X = X + 1e-4 * np.random.rand(n, 1).dot(np.random.rand(1, n)) + X = X.astype(dtype) + + Y = orth(X, rcond=1e-3) + assert_equal(Y.shape, (n, 5)) + + Y = orth(X, rcond=1e-6) + assert_equal(Y.shape, (n, 5 + 1)) + + +@pytest.mark.slow +@pytest.mark.skipif(np.dtype(np.intp).itemsize < 8, reason="test only on 64-bit, else too slow") +def test_orth_memory_efficiency(): + # Pick n so that 16*n bytes is reasonable but 8*n*n bytes is unreasonable. + # Keep in mind that @pytest.mark.slow tests are likely to be running + # under configurations that support 4Gb+ memory for tests related to + # 32 bit overflow. + n = 10*1000*1000 + try: + _check_orth(n, np.float64, skip_big=True) + except MemoryError: + raise AssertionError('memory error perhaps caused by orth regression') + + +def test_orth(): + dtypes = [np.float32, np.float64, np.complex64, np.complex128] + sizes = [1, 2, 3, 10, 100] + for dt, n in itertools.product(dtypes, sizes): + _check_orth(n, dt) + + +def test_null_space(): + np.random.seed(1) + + dtypes = [np.float32, np.float64, np.complex64, np.complex128] + sizes = [1, 2, 3, 10, 100] + + for dt, n in itertools.product(dtypes, sizes): + X = np.ones((2, n), dtype=dt) + + eps = np.finfo(dt).eps + tol = 1000 * eps + + Y = null_space(X) + assert_equal(Y.shape, (n, n-1)) + assert_allclose(X.dot(Y), 0, atol=tol) + + Y = null_space(X.T) + assert_equal(Y.shape, (2, 1)) + assert_allclose(X.T.dot(Y), 0, atol=tol) + + X = np.random.randn(1 + n//2, n) + Y = null_space(X) + assert_equal(Y.shape, (n, n - 1 - n//2)) + assert_allclose(X.dot(Y), 0, atol=tol) + + if n > 5: + np.random.seed(1) + X = np.random.rand(n, 5).dot(np.random.rand(5, n)) + X = X + 1e-4 * np.random.rand(n, 1).dot(np.random.rand(1, n)) + X = X.astype(dt) + + Y = null_space(X, rcond=1e-3) + assert_equal(Y.shape, (n, n - 5)) + + Y = null_space(X, rcond=1e-6) + assert_equal(Y.shape, (n, n - 6)) + + +def test_subspace_angles(): + H = hadamard(8, float) + A = H[:, :3] + B = H[:, 3:] + assert_allclose(subspace_angles(A, B), [np.pi / 2.] * 3, atol=1e-14) + assert_allclose(subspace_angles(B, A), [np.pi / 2.] * 3, atol=1e-14) + for x in (A, B): + assert_allclose(subspace_angles(x, x), np.zeros(x.shape[1]), + atol=1e-14) + # From MATLAB function "subspace", which effectively only returns the + # last value that we calculate + x = np.array( + [[0.537667139546100, 0.318765239858981, 3.578396939725760, 0.725404224946106], # noqa: E501 + [1.833885014595086, -1.307688296305273, 2.769437029884877, -0.063054873189656], # noqa: E501 + [-2.258846861003648, -0.433592022305684, -1.349886940156521, 0.714742903826096], # noqa: E501 + [0.862173320368121, 0.342624466538650, 3.034923466331855, -0.204966058299775]]) # noqa: E501 + expected = 1.481454682101605 + assert_allclose(subspace_angles(x[:, :2], x[:, 2:])[0], expected, + rtol=1e-12) + assert_allclose(subspace_angles(x[:, 2:], x[:, :2])[0], expected, + rtol=1e-12) + expected = 0.746361174247302 + assert_allclose(subspace_angles(x[:, :2], x[:, [2]]), expected, rtol=1e-12) + assert_allclose(subspace_angles(x[:, [2]], x[:, :2]), expected, rtol=1e-12) + expected = 0.487163718534313 + assert_allclose(subspace_angles(x[:, :3], x[:, [3]]), expected, rtol=1e-12) + assert_allclose(subspace_angles(x[:, [3]], x[:, :3]), expected, rtol=1e-12) + expected = 0.328950515907756 + assert_allclose(subspace_angles(x[:, :2], x[:, 1:]), [expected, 0], + atol=1e-12) + # Degenerate conditions + assert_raises(ValueError, subspace_angles, x[0], x) + assert_raises(ValueError, subspace_angles, x, x[0]) + assert_raises(ValueError, subspace_angles, x[:-1], x) + + # Test branch if mask.any is True: + A = np.array([[1, 0, 0], + [0, 1, 0], + [0, 0, 1], + [0, 0, 0], + [0, 0, 0]]) + B = np.array([[1, 0, 0], + [0, 1, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 1]]) + expected = np.array([np.pi/2, 0, 0]) + assert_allclose(subspace_angles(A, B), expected, rtol=1e-12) + + +class TestCDF2RDF(object): + + def matmul(self, a, b): + return np.einsum('...ij,...jk->...ik', a, b) + + def assert_eig_valid(self, w, v, x): + assert_array_almost_equal( + self.matmul(v, w), + self.matmul(x, v) + ) + + def test_single_array0x0real(self): + # eig doesn't support 0x0 in old versions of numpy + X = np.empty((0, 0)) + w, v = np.empty(0), np.empty((0, 0)) + wr, vr = cdf2rdf(w, v) + self.assert_eig_valid(wr, vr, X) + + def test_single_array2x2_real(self): + X = np.array([[1, 2], [3, -1]]) + w, v = np.linalg.eig(X) + wr, vr = cdf2rdf(w, v) + self.assert_eig_valid(wr, vr, X) + + def test_single_array2x2_complex(self): + X = np.array([[1, 2], [-2, 1]]) + w, v = np.linalg.eig(X) + wr, vr = cdf2rdf(w, v) + self.assert_eig_valid(wr, vr, X) + + def test_single_array3x3_real(self): + X = np.array([[1, 2, 3], [1, 2, 3], [2, 5, 6]]) + w, v = np.linalg.eig(X) + wr, vr = cdf2rdf(w, v) + self.assert_eig_valid(wr, vr, X) + + def test_single_array3x3_complex(self): + X = np.array([[1, 2, 3], [0, 4, 5], [0, -5, 4]]) + w, v = np.linalg.eig(X) + wr, vr = cdf2rdf(w, v) + self.assert_eig_valid(wr, vr, X) + + def test_random_1d_stacked_arrays(self): + # cannot test M == 0 due to bug in old numpy + for M in range(1, 7): + X = np.random.rand(100, M, M) + w, v = np.linalg.eig(X) + wr, vr = cdf2rdf(w, v) + self.assert_eig_valid(wr, vr, X) + + def test_random_2d_stacked_arrays(self): + # cannot test M == 0 due to bug in old numpy + for M in range(1, 7): + X = np.random.rand(10, 10, M, M) + w, v = np.linalg.eig(X) + wr, vr = cdf2rdf(w, v) + self.assert_eig_valid(wr, vr, X) + + def test_low_dimensionality_error(self): + w, v = np.empty(()), np.array((2,)) + assert_raises(ValueError, cdf2rdf, w, v) + + def test_not_square_error(self): + # Check that passing a non-square array raises a ValueError. + w, v = np.arange(3), np.arange(6).reshape(3,2) + assert_raises(ValueError, cdf2rdf, w, v) + + def test_swapped_v_w_error(self): + # Check that exchanging places of w and v raises ValueError. + X = np.array([[1, 2, 3], [0, 4, 5], [0, -5, 4]]) + w, v = np.linalg.eig(X) + assert_raises(ValueError, cdf2rdf, v, w) + + def test_non_associated_error(self): + # Check that passing non-associated eigenvectors raises a ValueError. + w, v = np.arange(3), np.arange(16).reshape(4,4) + assert_raises(ValueError, cdf2rdf, w, v) + + def test_not_conjugate_pairs(self): + # Check that passing non-conjugate pairs raises a ValueError. + X = np.array([[1, 2, 3], [1, 2, 3], [2, 5, 6+1j]]) + w, v = np.linalg.eig(X) + assert_raises(ValueError, cdf2rdf, w, v) + + # different arrays in the stack, so not conjugate + X = np.array([ + [[1, 2, 3], [1, 2, 3], [2, 5, 6+1j]], + [[1, 2, 3], [1, 2, 3], [2, 5, 6-1j]], + ]) + w, v = np.linalg.eig(X) + assert_raises(ValueError, cdf2rdf, w, v) diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_decomp.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_decomp.pyc new file mode 100644 index 0000000..9e39923 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_decomp.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_decomp_cholesky.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_decomp_cholesky.py new file mode 100644 index 0000000..0e8737d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_decomp_cholesky.py @@ -0,0 +1,204 @@ +from __future__ import division, print_function, absolute_import + +from numpy.testing import assert_array_almost_equal, assert_array_equal +from pytest import raises as assert_raises + +from numpy import array, transpose, dot, conjugate, zeros_like, empty +from numpy.random import random +from scipy.linalg import cholesky, cholesky_banded, cho_solve_banded, \ + cho_factor, cho_solve + +from scipy.linalg._testutils import assert_no_overwrite + + +class TestCholesky(object): + + def test_simple(self): + a = [[8, 2, 3], [2, 9, 3], [3, 3, 6]] + c = cholesky(a) + assert_array_almost_equal(dot(transpose(c), c), a) + c = transpose(c) + a = dot(c, transpose(c)) + assert_array_almost_equal(cholesky(a, lower=1), c) + + def test_check_finite(self): + a = [[8, 2, 3], [2, 9, 3], [3, 3, 6]] + c = cholesky(a, check_finite=False) + assert_array_almost_equal(dot(transpose(c), c), a) + c = transpose(c) + a = dot(c, transpose(c)) + assert_array_almost_equal(cholesky(a, lower=1, check_finite=False), c) + + def test_simple_complex(self): + m = array([[3+1j, 3+4j, 5], [0, 2+2j, 2+7j], [0, 0, 7+4j]]) + a = dot(transpose(conjugate(m)), m) + c = cholesky(a) + a1 = dot(transpose(conjugate(c)), c) + assert_array_almost_equal(a, a1) + c = transpose(c) + a = dot(c, transpose(conjugate(c))) + assert_array_almost_equal(cholesky(a, lower=1), c) + + def test_random(self): + n = 20 + for k in range(2): + m = random([n, n]) + for i in range(n): + m[i, i] = 20*(.1+m[i, i]) + a = dot(transpose(m), m) + c = cholesky(a) + a1 = dot(transpose(c), c) + assert_array_almost_equal(a, a1) + c = transpose(c) + a = dot(c, transpose(c)) + assert_array_almost_equal(cholesky(a, lower=1), c) + + def test_random_complex(self): + n = 20 + for k in range(2): + m = random([n, n])+1j*random([n, n]) + for i in range(n): + m[i, i] = 20*(.1+abs(m[i, i])) + a = dot(transpose(conjugate(m)), m) + c = cholesky(a) + a1 = dot(transpose(conjugate(c)), c) + assert_array_almost_equal(a, a1) + c = transpose(c) + a = dot(c, transpose(conjugate(c))) + assert_array_almost_equal(cholesky(a, lower=1), c) + + +class TestCholeskyBanded(object): + """Tests for cholesky_banded() and cho_solve_banded.""" + + def test_check_finite(self): + # Symmetric positive definite banded matrix `a` + a = array([[4.0, 1.0, 0.0, 0.0], + [1.0, 4.0, 0.5, 0.0], + [0.0, 0.5, 4.0, 0.2], + [0.0, 0.0, 0.2, 4.0]]) + # Banded storage form of `a`. + ab = array([[-1.0, 1.0, 0.5, 0.2], + [4.0, 4.0, 4.0, 4.0]]) + c = cholesky_banded(ab, lower=False, check_finite=False) + ufac = zeros_like(a) + ufac[list(range(4)), list(range(4))] = c[-1] + ufac[(0, 1, 2), (1, 2, 3)] = c[0, 1:] + assert_array_almost_equal(a, dot(ufac.T, ufac)) + + b = array([0.0, 0.5, 4.2, 4.2]) + x = cho_solve_banded((c, False), b, check_finite=False) + assert_array_almost_equal(x, [0.0, 0.0, 1.0, 1.0]) + + def test_upper_real(self): + # Symmetric positive definite banded matrix `a` + a = array([[4.0, 1.0, 0.0, 0.0], + [1.0, 4.0, 0.5, 0.0], + [0.0, 0.5, 4.0, 0.2], + [0.0, 0.0, 0.2, 4.0]]) + # Banded storage form of `a`. + ab = array([[-1.0, 1.0, 0.5, 0.2], + [4.0, 4.0, 4.0, 4.0]]) + c = cholesky_banded(ab, lower=False) + ufac = zeros_like(a) + ufac[list(range(4)), list(range(4))] = c[-1] + ufac[(0, 1, 2), (1, 2, 3)] = c[0, 1:] + assert_array_almost_equal(a, dot(ufac.T, ufac)) + + b = array([0.0, 0.5, 4.2, 4.2]) + x = cho_solve_banded((c, False), b) + assert_array_almost_equal(x, [0.0, 0.0, 1.0, 1.0]) + + def test_upper_complex(self): + # Hermitian positive definite banded matrix `a` + a = array([[4.0, 1.0, 0.0, 0.0], + [1.0, 4.0, 0.5, 0.0], + [0.0, 0.5, 4.0, -0.2j], + [0.0, 0.0, 0.2j, 4.0]]) + # Banded storage form of `a`. + ab = array([[-1.0, 1.0, 0.5, -0.2j], + [4.0, 4.0, 4.0, 4.0]]) + c = cholesky_banded(ab, lower=False) + ufac = zeros_like(a) + ufac[list(range(4)), list(range(4))] = c[-1] + ufac[(0, 1, 2), (1, 2, 3)] = c[0, 1:] + assert_array_almost_equal(a, dot(ufac.conj().T, ufac)) + + b = array([0.0, 0.5, 4.0-0.2j, 0.2j + 4.0]) + x = cho_solve_banded((c, False), b) + assert_array_almost_equal(x, [0.0, 0.0, 1.0, 1.0]) + + def test_lower_real(self): + # Symmetric positive definite banded matrix `a` + a = array([[4.0, 1.0, 0.0, 0.0], + [1.0, 4.0, 0.5, 0.0], + [0.0, 0.5, 4.0, 0.2], + [0.0, 0.0, 0.2, 4.0]]) + # Banded storage form of `a`. + ab = array([[4.0, 4.0, 4.0, 4.0], + [1.0, 0.5, 0.2, -1.0]]) + c = cholesky_banded(ab, lower=True) + lfac = zeros_like(a) + lfac[list(range(4)), list(range(4))] = c[0] + lfac[(1, 2, 3), (0, 1, 2)] = c[1, :3] + assert_array_almost_equal(a, dot(lfac, lfac.T)) + + b = array([0.0, 0.5, 4.2, 4.2]) + x = cho_solve_banded((c, True), b) + assert_array_almost_equal(x, [0.0, 0.0, 1.0, 1.0]) + + def test_lower_complex(self): + # Hermitian positive definite banded matrix `a` + a = array([[4.0, 1.0, 0.0, 0.0], + [1.0, 4.0, 0.5, 0.0], + [0.0, 0.5, 4.0, -0.2j], + [0.0, 0.0, 0.2j, 4.0]]) + # Banded storage form of `a`. + ab = array([[4.0, 4.0, 4.0, 4.0], + [1.0, 0.5, 0.2j, -1.0]]) + c = cholesky_banded(ab, lower=True) + lfac = zeros_like(a) + lfac[list(range(4)), list(range(4))] = c[0] + lfac[(1, 2, 3), (0, 1, 2)] = c[1, :3] + assert_array_almost_equal(a, dot(lfac, lfac.conj().T)) + + b = array([0.0, 0.5j, 3.8j, 3.8]) + x = cho_solve_banded((c, True), b) + assert_array_almost_equal(x, [0.0, 0.0, 1.0j, 1.0]) + + +class TestOverwrite(object): + def test_cholesky(self): + assert_no_overwrite(cholesky, [(3, 3)]) + + def test_cho_factor(self): + assert_no_overwrite(cho_factor, [(3, 3)]) + + def test_cho_solve(self): + x = array([[2, -1, 0], [-1, 2, -1], [0, -1, 2]]) + xcho = cho_factor(x) + assert_no_overwrite(lambda b: cho_solve(xcho, b), [(3,)]) + + def test_cholesky_banded(self): + assert_no_overwrite(cholesky_banded, [(2, 3)]) + + def test_cho_solve_banded(self): + x = array([[0, -1, -1], [2, 2, 2]]) + xcho = cholesky_banded(x) + assert_no_overwrite(lambda b: cho_solve_banded((xcho, False), b), + [(3,)]) + + +class TestEmptyArray(object): + def test_cho_factor_empty_square(self): + a = empty((0, 0)) + b = array([]) + c = array([[]]) + d = [] + e = [[]] + + x, _ = cho_factor(a) + assert_array_equal(x, a) + + for x in ([b, c, d, e]): + assert_raises(ValueError, cho_factor, x) diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_decomp_cholesky.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_decomp_cholesky.pyc new file mode 100644 index 0000000..cffc078 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_decomp_cholesky.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_decomp_ldl.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_decomp_ldl.py new file mode 100644 index 0000000..82066a7 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_decomp_ldl.py @@ -0,0 +1,137 @@ +from __future__ import division, print_function, absolute_import + +import itertools +from numpy.testing import assert_array_almost_equal, assert_allclose, assert_ +from numpy import (array, eye, zeros, empty_like, empty, tril_indices_from, + tril, triu_indices_from, spacing, float32, float64, + complex64, complex128) +from numpy.random import rand, randint, seed +from scipy.linalg import ldl +from pytest import raises as assert_raises, warns +from numpy import ComplexWarning + + +def test_args(): + A = eye(3) + # Nonsquare array + assert_raises(ValueError, ldl, A[:, :2]) + # Complex matrix with imaginary diagonal entries with "hermitian=True" + with warns(ComplexWarning): + ldl(A*1j) + + +def test_empty_array(): + a = empty((0, 0), dtype=complex) + l, d, p = ldl(empty((0, 0))) + assert_array_almost_equal(l, empty_like(a)) + assert_array_almost_equal(d, empty_like(a)) + assert_array_almost_equal(p, array([], dtype=int)) + + +def test_simple(): + a = array([[-0.39-0.71j, 5.14-0.64j, -7.86-2.96j, 3.80+0.92j], + [5.14-0.64j, 8.86+1.81j, -3.52+0.58j, 5.32-1.59j], + [-7.86-2.96j, -3.52+0.58j, -2.83-0.03j, -1.54-2.86j], + [3.80+0.92j, 5.32-1.59j, -1.54-2.86j, -0.56+0.12j]]) + b = array([[5., 10, 1, 18], + [10., 2, 11, 1], + [1., 11, 19, 9], + [18., 1, 9, 0]]) + c = array([[52., 97, 112, 107, 50], + [97., 114, 89, 98, 13], + [112., 89, 64, 33, 6], + [107., 98, 33, 60, 73], + [50., 13, 6, 73, 77]]) + + d = array([[2., 2, -4, 0, 4], + [2., -2, -2, 10, -8], + [-4., -2, 6, -8, -4], + [0., 10, -8, 6, -6], + [4., -8, -4, -6, 10]]) + e = array([[-1.36+0.00j, 0+0j, 0+0j, 0+0j], + [1.58-0.90j, -8.87+0j, 0+0j, 0+0j], + [2.21+0.21j, -1.84+0.03j, -4.63+0j, 0+0j], + [3.91-1.50j, -1.78-1.18j, 0.11-0.11j, -1.84+0.00j]]) + for x in (b, c, d): + l, d, p = ldl(x) + assert_allclose(l.dot(d).dot(l.T), x, atol=spacing(1000.), rtol=0) + + u, d, p = ldl(x, lower=False) + assert_allclose(u.dot(d).dot(u.T), x, atol=spacing(1000.), rtol=0) + + l, d, p = ldl(a, hermitian=False) + assert_allclose(l.dot(d).dot(l.T), a, atol=spacing(1000.), rtol=0) + + u, d, p = ldl(a, lower=False, hermitian=False) + assert_allclose(u.dot(d).dot(u.T), a, atol=spacing(1000.), rtol=0) + + # Use upper part for the computation and use the lower part for comparison + l, d, p = ldl(e.conj().T, lower=0) + assert_allclose(tril(l.dot(d).dot(l.conj().T)-e), zeros((4, 4)), + atol=spacing(1000.), rtol=0) + + +def test_permutations(): + seed(1234) + for _ in range(10): + n = randint(1, 100) + # Random real/complex array + x = rand(n, n) if randint(2) else rand(n, n) + rand(n, n)*1j + x = x + x.conj().T + x += eye(n)*randint(5, 1e6) + l_ind = tril_indices_from(x, k=-1) + u_ind = triu_indices_from(x, k=1) + + # Test whether permutations lead to a triangular array + u, d, p = ldl(x, lower=0) + # lower part should be zero + assert_(not any(u[p, :][l_ind]), 'Spin {} failed'.format(_)) + + l, d, p = ldl(x, lower=1) + # upper part should be zero + assert_(not any(l[p, :][u_ind]), 'Spin {} failed'.format(_)) + + +def test_ldl_type_size_combinations(): + seed(1234) + sizes = [30, 750] + real_dtypes = [float32, float64] + complex_dtypes = [complex64, complex128] + + for n, dtype in itertools.product(sizes, real_dtypes): + msg = ("Failed for size: {}, dtype: {}".format(n, dtype)) + + x = rand(n, n).astype(dtype) + x = x + x.T + x += eye(n, dtype=dtype)*dtype(randint(5, 1e6)) + + l, d1, p = ldl(x) + u, d2, p = ldl(x, lower=0) + rtol = 1e-4 if dtype is float32 else 1e-10 + assert_allclose(l.dot(d1).dot(l.T), x, rtol=rtol, err_msg=msg) + assert_allclose(u.dot(d2).dot(u.T), x, rtol=rtol, err_msg=msg) + + for n, dtype in itertools.product(sizes, complex_dtypes): + msg1 = ("Her failed for size: {}, dtype: {}".format(n, dtype)) + msg2 = ("Sym failed for size: {}, dtype: {}".format(n, dtype)) + + # Complex hermitian upper/lower + x = (rand(n, n)+1j*rand(n, n)).astype(dtype) + x = x+x.conj().T + x += eye(n, dtype=dtype)*dtype(randint(5, 1e6)) + + l, d1, p = ldl(x) + u, d2, p = ldl(x, lower=0) + rtol = 1e-4 if dtype is complex64 else 1e-10 + assert_allclose(l.dot(d1).dot(l.conj().T), x, rtol=rtol, err_msg=msg1) + assert_allclose(u.dot(d2).dot(u.conj().T), x, rtol=rtol, err_msg=msg1) + + # Complex symmetric upper/lower + x = (rand(n, n)+1j*rand(n, n)).astype(dtype) + x = x+x.T + x += eye(n, dtype=dtype)*dtype(randint(5, 1e6)) + + l, d1, p = ldl(x, hermitian=0) + u, d2, p = ldl(x, lower=0, hermitian=0) + assert_allclose(l.dot(d1).dot(l.T), x, rtol=rtol, err_msg=msg2) + assert_allclose(u.dot(d2).dot(u.T), x, rtol=rtol, err_msg=msg2) diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_decomp_ldl.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_decomp_ldl.pyc new file mode 100644 index 0000000..9066a60 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_decomp_ldl.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_decomp_polar.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_decomp_polar.py new file mode 100644 index 0000000..207c3bb --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_decomp_polar.py @@ -0,0 +1,92 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.linalg import norm +from numpy.testing import (assert_, assert_allclose, assert_equal) +from scipy.linalg import polar, eigh + + +diag2 = np.array([[2, 0], [0, 3]]) +a13 = np.array([[1, 2, 2]]) + +precomputed_cases = [ + [[[0]], 'right', [[1]], [[0]]], + [[[0]], 'left', [[1]], [[0]]], + [[[9]], 'right', [[1]], [[9]]], + [[[9]], 'left', [[1]], [[9]]], + [diag2, 'right', np.eye(2), diag2], + [diag2, 'left', np.eye(2), diag2], + [a13, 'right', a13/norm(a13[0]), a13.T.dot(a13)/norm(a13[0])], +] + +verify_cases = [ + [[1, 2], [3, 4]], + [[1, 2, 3]], + [[1], [2], [3]], + [[1, 2, 3], [3, 4, 0]], + [[1, 2], [3, 4], [5, 5]], + [[1, 2], [3, 4+5j]], + [[1, 2, 3j]], + [[1], [2], [3j]], + [[1, 2, 3+2j], [3, 4-1j, -4j]], + [[1, 2], [3-2j, 4+0.5j], [5, 5]], + [[10000, 10, 1], [-1, 2, 3j], [0, 1, 2]], +] + + +def check_precomputed_polar(a, side, expected_u, expected_p): + # Compare the result of the polar decomposition to a + # precomputed result. + u, p = polar(a, side=side) + assert_allclose(u, expected_u, atol=1e-15) + assert_allclose(p, expected_p, atol=1e-15) + + +def verify_polar(a): + # Compute the polar decomposition, and then verify that + # the result has all the expected properties. + product_atol = np.sqrt(np.finfo(float).eps) + + aa = np.asarray(a) + m, n = aa.shape + + u, p = polar(a, side='right') + assert_equal(u.shape, (m, n)) + assert_equal(p.shape, (n, n)) + # a = up + assert_allclose(u.dot(p), a, atol=product_atol) + if m >= n: + assert_allclose(u.conj().T.dot(u), np.eye(n), atol=1e-15) + else: + assert_allclose(u.dot(u.conj().T), np.eye(m), atol=1e-15) + # p is Hermitian positive semidefinite. + assert_allclose(p.conj().T, p) + evals = eigh(p, eigvals_only=True) + nonzero_evals = evals[abs(evals) > 1e-14] + assert_((nonzero_evals >= 0).all()) + + u, p = polar(a, side='left') + assert_equal(u.shape, (m, n)) + assert_equal(p.shape, (m, m)) + # a = pu + assert_allclose(p.dot(u), a, atol=product_atol) + if m >= n: + assert_allclose(u.conj().T.dot(u), np.eye(n), atol=1e-15) + else: + assert_allclose(u.dot(u.conj().T), np.eye(m), atol=1e-15) + # p is Hermitian positive semidefinite. + assert_allclose(p.conj().T, p) + evals = eigh(p, eigvals_only=True) + nonzero_evals = evals[abs(evals) > 1e-14] + assert_((nonzero_evals >= 0).all()) + + +def test_precomputed_cases(): + for a, side, expected_u, expected_p in precomputed_cases: + check_precomputed_polar(a, side, expected_u, expected_p) + + +def test_verify_cases(): + for a in verify_cases: + verify_polar(a) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_decomp_polar.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_decomp_polar.pyc new file mode 100644 index 0000000..bc9b304 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_decomp_polar.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_decomp_update.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_decomp_update.py new file mode 100644 index 0000000..64f1ef9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_decomp_update.py @@ -0,0 +1,1697 @@ +from __future__ import division, print_function, absolute_import + +import itertools + +import numpy as np +from numpy.testing import assert_, assert_allclose, assert_equal +from pytest import raises as assert_raises +from scipy import linalg +import scipy.linalg._decomp_update as _decomp_update +from scipy.linalg._decomp_update import * + +def assert_unitary(a, rtol=None, atol=None, assert_sqr=True): + if rtol is None: + rtol = 10.0 ** -(np.finfo(a.dtype).precision-2) + if atol is None: + atol = 2*np.finfo(a.dtype).eps + + if assert_sqr: + assert_(a.shape[0] == a.shape[1], 'unitary matrices must be square') + aTa = np.dot(a.T.conj(), a) + assert_allclose(aTa, np.eye(a.shape[1]), rtol=rtol, atol=atol) + +def assert_upper_tri(a, rtol=None, atol=None): + if rtol is None: + rtol = 10.0 ** -(np.finfo(a.dtype).precision-2) + if atol is None: + atol = 2*np.finfo(a.dtype).eps + mask = np.tri(a.shape[0], a.shape[1], -1, np.bool_) + assert_allclose(a[mask], 0.0, rtol=rtol, atol=atol) + +def check_qr(q, r, a, rtol, atol, assert_sqr=True): + assert_unitary(q, rtol, atol, assert_sqr) + assert_upper_tri(r, rtol, atol) + assert_allclose(q.dot(r), a, rtol=rtol, atol=atol) + +def make_strided(arrs): + strides = [(3, 7), (2, 2), (3, 4), (4, 2), (5, 4), (2, 3), (2, 1), (4, 5)] + kmax = len(strides) + k = 0 + ret = [] + for a in arrs: + if a.ndim == 1: + s = strides[k % kmax] + k += 1 + base = np.zeros(s[0]*a.shape[0]+s[1], a.dtype) + view = base[s[1]::s[0]] + view[...] = a + elif a.ndim == 2: + s = strides[k % kmax] + t = strides[(k+1) % kmax] + k += 2 + base = np.zeros((s[0]*a.shape[0]+s[1], t[0]*a.shape[1]+t[1]), a.dtype) + view = base[s[1]::s[0], t[1]::t[0]] + view[...] = a + else: + raise ValueError('make_strided only works for ndim = 1 or 2 arrays') + ret.append(view) + return ret + +def negate_strides(arrs): + ret = [] + for a in arrs: + b = np.zeros_like(a) + if b.ndim == 2: + b = b[::-1, ::-1] + elif b.ndim == 1: + b = b[::-1] + else: + raise ValueError('negate_strides only works for ndim = 1 or 2 arrays') + b[...] = a + ret.append(b) + return ret + +def nonitemsize_strides(arrs): + out = [] + for a in arrs: + a_dtype = a.dtype + b = np.zeros(a.shape, [('a', a_dtype), ('junk', 'S1')]) + c = b.getfield(a_dtype) + c[...] = a + out.append(c) + return out + +def make_nonnative(arrs): + out = [] + for a in arrs: + out.append(a.astype(a.dtype.newbyteorder())) + return out + +class BaseQRdeltas(object): + def setup_method(self): + self.rtol = 10.0 ** -(np.finfo(self.dtype).precision-2) + self.atol = 10 * np.finfo(self.dtype).eps + + def generate(self, type, mode='full'): + np.random.seed(29382) + shape = {'sqr': (8, 8), 'tall': (12, 7), 'fat': (7, 12), + 'Mx1': (8, 1), '1xN': (1, 8), '1x1': (1, 1)}[type] + a = np.random.random(shape) + if np.iscomplexobj(self.dtype.type(1)): + b = np.random.random(shape) + a = a + 1j * b + a = a.astype(self.dtype) + q, r = linalg.qr(a, mode=mode) + return a, q, r + +class BaseQRdelete(BaseQRdeltas): + def test_sqr_1_row(self): + a, q, r = self.generate('sqr') + for row in range(r.shape[0]): + q1, r1 = qr_delete(q, r, row, overwrite_qr=False) + a1 = np.delete(a, row, 0) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_sqr_p_row(self): + a, q, r = self.generate('sqr') + for ndel in range(2, 6): + for row in range(a.shape[0]-ndel): + q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False) + a1 = np.delete(a, slice(row, row+ndel), 0) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_sqr_1_col(self): + a, q, r = self.generate('sqr') + for col in range(r.shape[1]): + q1, r1 = qr_delete(q, r, col, which='col', overwrite_qr=False) + a1 = np.delete(a, col, 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_sqr_p_col(self): + a, q, r = self.generate('sqr') + for ndel in range(2, 6): + for col in range(r.shape[1]-ndel): + q1, r1 = qr_delete(q, r, col, ndel, which='col', + overwrite_qr=False) + a1 = np.delete(a, slice(col, col+ndel), 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_tall_1_row(self): + a, q, r = self.generate('tall') + for row in range(r.shape[0]): + q1, r1 = qr_delete(q, r, row, overwrite_qr=False) + a1 = np.delete(a, row, 0) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_tall_p_row(self): + a, q, r = self.generate('tall') + for ndel in range(2, 6): + for row in range(a.shape[0]-ndel): + q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False) + a1 = np.delete(a, slice(row, row+ndel), 0) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_tall_1_col(self): + a, q, r = self.generate('tall') + for col in range(r.shape[1]): + q1, r1 = qr_delete(q, r, col, which='col', overwrite_qr=False) + a1 = np.delete(a, col, 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_tall_p_col(self): + a, q, r = self.generate('tall') + for ndel in range(2, 6): + for col in range(r.shape[1]-ndel): + q1, r1 = qr_delete(q, r, col, ndel, which='col', + overwrite_qr=False) + a1 = np.delete(a, slice(col, col+ndel), 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_fat_1_row(self): + a, q, r = self.generate('fat') + for row in range(r.shape[0]): + q1, r1 = qr_delete(q, r, row, overwrite_qr=False) + a1 = np.delete(a, row, 0) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_fat_p_row(self): + a, q, r = self.generate('fat') + for ndel in range(2, 6): + for row in range(a.shape[0]-ndel): + q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False) + a1 = np.delete(a, slice(row, row+ndel), 0) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_fat_1_col(self): + a, q, r = self.generate('fat') + for col in range(r.shape[1]): + q1, r1 = qr_delete(q, r, col, which='col', overwrite_qr=False) + a1 = np.delete(a, col, 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_fat_p_col(self): + a, q, r = self.generate('fat') + for ndel in range(2, 6): + for col in range(r.shape[1]-ndel): + q1, r1 = qr_delete(q, r, col, ndel, which='col', + overwrite_qr=False) + a1 = np.delete(a, slice(col, col+ndel), 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_economic_1_row(self): + # this test always starts and ends with an economic decomp. + a, q, r = self.generate('tall', 'economic') + for row in range(r.shape[0]): + q1, r1 = qr_delete(q, r, row, overwrite_qr=False) + a1 = np.delete(a, row, 0) + check_qr(q1, r1, a1, self.rtol, self.atol, False) + + # for economic row deletes + # eco - prow = eco + # eco - prow = sqr + # eco - prow = fat + def base_economic_p_row_xxx(self, ndel): + a, q, r = self.generate('tall', 'economic') + for row in range(a.shape[0]-ndel): + q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False) + a1 = np.delete(a, slice(row, row+ndel), 0) + check_qr(q1, r1, a1, self.rtol, self.atol, False) + + def test_economic_p_row_economic(self): + # (12, 7) - (3, 7) = (9,7) --> stays economic + self.base_economic_p_row_xxx(3) + + def test_economic_p_row_sqr(self): + # (12, 7) - (5, 7) = (7, 7) --> becomes square + self.base_economic_p_row_xxx(5) + + def test_economic_p_row_fat(self): + # (12, 7) - (7,7) = (5, 7) --> becomes fat + self.base_economic_p_row_xxx(7) + + def test_economic_1_col(self): + a, q, r = self.generate('tall', 'economic') + for col in range(r.shape[1]): + q1, r1 = qr_delete(q, r, col, which='col', overwrite_qr=False) + a1 = np.delete(a, col, 1) + check_qr(q1, r1, a1, self.rtol, self.atol, False) + + def test_economic_p_col(self): + a, q, r = self.generate('tall', 'economic') + for ndel in range(2, 6): + for col in range(r.shape[1]-ndel): + q1, r1 = qr_delete(q, r, col, ndel, which='col', + overwrite_qr=False) + a1 = np.delete(a, slice(col, col+ndel), 1) + check_qr(q1, r1, a1, self.rtol, self.atol, False) + + def test_Mx1_1_row(self): + a, q, r = self.generate('Mx1') + for row in range(r.shape[0]): + q1, r1 = qr_delete(q, r, row, overwrite_qr=False) + a1 = np.delete(a, row, 0) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_Mx1_p_row(self): + a, q, r = self.generate('Mx1') + for ndel in range(2, 6): + for row in range(a.shape[0]-ndel): + q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False) + a1 = np.delete(a, slice(row, row+ndel), 0) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_1xN_1_col(self): + a, q, r = self.generate('1xN') + for col in range(r.shape[1]): + q1, r1 = qr_delete(q, r, col, which='col', overwrite_qr=False) + a1 = np.delete(a, col, 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_1xN_p_col(self): + a, q, r = self.generate('1xN') + for ndel in range(2, 6): + for col in range(r.shape[1]-ndel): + q1, r1 = qr_delete(q, r, col, ndel, which='col', + overwrite_qr=False) + a1 = np.delete(a, slice(col, col+ndel), 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_Mx1_economic_1_row(self): + a, q, r = self.generate('Mx1', 'economic') + for row in range(r.shape[0]): + q1, r1 = qr_delete(q, r, row, overwrite_qr=False) + a1 = np.delete(a, row, 0) + check_qr(q1, r1, a1, self.rtol, self.atol, False) + + def test_Mx1_economic_p_row(self): + a, q, r = self.generate('Mx1', 'economic') + for ndel in range(2, 6): + for row in range(a.shape[0]-ndel): + q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False) + a1 = np.delete(a, slice(row, row+ndel), 0) + check_qr(q1, r1, a1, self.rtol, self.atol, False) + + def test_delete_last_1_row(self): + # full and eco are the same for 1xN + a, q, r = self.generate('1xN') + q1, r1 = qr_delete(q, r, 0, 1, 'row') + assert_equal(q1, np.ndarray(shape=(0, 0), dtype=q.dtype)) + assert_equal(r1, np.ndarray(shape=(0, r.shape[1]), dtype=r.dtype)) + + def test_delete_last_p_row(self): + a, q, r = self.generate('tall', 'full') + q1, r1 = qr_delete(q, r, 0, a.shape[0], 'row') + assert_equal(q1, np.ndarray(shape=(0, 0), dtype=q.dtype)) + assert_equal(r1, np.ndarray(shape=(0, r.shape[1]), dtype=r.dtype)) + + a, q, r = self.generate('tall', 'economic') + q1, r1 = qr_delete(q, r, 0, a.shape[0], 'row') + assert_equal(q1, np.ndarray(shape=(0, 0), dtype=q.dtype)) + assert_equal(r1, np.ndarray(shape=(0, r.shape[1]), dtype=r.dtype)) + + def test_delete_last_1_col(self): + a, q, r = self.generate('Mx1', 'economic') + q1, r1 = qr_delete(q, r, 0, 1, 'col') + assert_equal(q1, np.ndarray(shape=(q.shape[0], 0), dtype=q.dtype)) + assert_equal(r1, np.ndarray(shape=(0, 0), dtype=r.dtype)) + + a, q, r = self.generate('Mx1', 'full') + q1, r1 = qr_delete(q, r, 0, 1, 'col') + assert_unitary(q1) + assert_(q1.dtype == q.dtype) + assert_(q1.shape == q.shape) + assert_equal(r1, np.ndarray(shape=(r.shape[0], 0), dtype=r.dtype)) + + def test_delete_last_p_col(self): + a, q, r = self.generate('tall', 'full') + q1, r1 = qr_delete(q, r, 0, a.shape[1], 'col') + assert_unitary(q1) + assert_(q1.dtype == q.dtype) + assert_(q1.shape == q.shape) + assert_equal(r1, np.ndarray(shape=(r.shape[0], 0), dtype=r.dtype)) + + a, q, r = self.generate('tall', 'economic') + q1, r1 = qr_delete(q, r, 0, a.shape[1], 'col') + assert_equal(q1, np.ndarray(shape=(q.shape[0], 0), dtype=q.dtype)) + assert_equal(r1, np.ndarray(shape=(0, 0), dtype=r.dtype)) + + def test_delete_1x1_row_col(self): + a, q, r = self.generate('1x1') + q1, r1 = qr_delete(q, r, 0, 1, 'row') + assert_equal(q1, np.ndarray(shape=(0, 0), dtype=q.dtype)) + assert_equal(r1, np.ndarray(shape=(0, r.shape[1]), dtype=r.dtype)) + + a, q, r = self.generate('1x1') + q1, r1 = qr_delete(q, r, 0, 1, 'col') + assert_unitary(q1) + assert_(q1.dtype == q.dtype) + assert_(q1.shape == q.shape) + assert_equal(r1, np.ndarray(shape=(r.shape[0], 0), dtype=r.dtype)) + + # all full qr, row deletes and single column deletes should be able to + # handle any non negative strides. (only row and column vector + # operations are used.) p column delete require fortran ordered + # Q and R and will make a copy as necessary. Economic qr row deletes + # requre a contigous q. + + def base_non_simple_strides(self, adjust_strides, ks, p, which, overwriteable): + if which == 'row': + qind = (slice(p,None), slice(p,None)) + rind = (slice(p,None), slice(None)) + else: + qind = (slice(None), slice(None)) + rind = (slice(None), slice(None,-p)) + + for type, k in itertools.product(['sqr', 'tall', 'fat'], ks): + a, q0, r0, = self.generate(type) + qs, rs = adjust_strides((q0, r0)) + if p == 1: + a1 = np.delete(a, k, 0 if which == 'row' else 1) + else: + s = slice(k,k+p) + if k < 0: + s = slice(k, k + p + (a.shape[0] if which == 'row' else a.shape[1])) + a1 = np.delete(a, s, 0 if which == 'row' else 1) + + # for each variable, q, r we try with it strided and + # overwrite=False. Then we try with overwrite=True, and make + # sure that q and r are still overwritten. + + q = q0.copy('F') + r = r0.copy('F') + q1, r1 = qr_delete(qs, r, k, p, which, False) + check_qr(q1, r1, a1, self.rtol, self.atol) + q1o, r1o = qr_delete(qs, r, k, p, which, True) + check_qr(q1o, r1o, a1, self.rtol, self.atol) + if overwriteable: + assert_allclose(q1o, qs[qind], rtol=self.rtol, atol=self.atol) + assert_allclose(r1o, r[rind], rtol=self.rtol, atol=self.atol) + + q = q0.copy('F') + r = r0.copy('F') + q2, r2 = qr_delete(q, rs, k, p, which, False) + check_qr(q2, r2, a1, self.rtol, self.atol) + q2o, r2o = qr_delete(q, rs, k, p, which, True) + check_qr(q2o, r2o, a1, self.rtol, self.atol) + if overwriteable: + assert_allclose(q2o, q[qind], rtol=self.rtol, atol=self.atol) + assert_allclose(r2o, rs[rind], rtol=self.rtol, atol=self.atol) + + q = q0.copy('F') + r = r0.copy('F') + # since some of these were consumed above + qs, rs = adjust_strides((q, r)) + q3, r3 = qr_delete(qs, rs, k, p, which, False) + check_qr(q3, r3, a1, self.rtol, self.atol) + q3o, r3o = qr_delete(qs, rs, k, p, which, True) + check_qr(q3o, r3o, a1, self.rtol, self.atol) + if overwriteable: + assert_allclose(q2o, qs[qind], rtol=self.rtol, atol=self.atol) + assert_allclose(r3o, rs[rind], rtol=self.rtol, atol=self.atol) + + def test_non_unit_strides_1_row(self): + self.base_non_simple_strides(make_strided, [0], 1, 'row', True) + + def test_non_unit_strides_p_row(self): + self.base_non_simple_strides(make_strided, [0], 3, 'row', True) + + def test_non_unit_strides_1_col(self): + self.base_non_simple_strides(make_strided, [0], 1, 'col', True) + + def test_non_unit_strides_p_col(self): + self.base_non_simple_strides(make_strided, [0], 3, 'col', False) + + def test_neg_strides_1_row(self): + self.base_non_simple_strides(negate_strides, [0], 1, 'row', False) + + def test_neg_strides_p_row(self): + self.base_non_simple_strides(negate_strides, [0], 3, 'row', False) + + def test_neg_strides_1_col(self): + self.base_non_simple_strides(negate_strides, [0], 1, 'col', False) + + def test_neg_strides_p_col(self): + self.base_non_simple_strides(negate_strides, [0], 3, 'col', False) + + def test_non_itemize_strides_1_row(self): + self.base_non_simple_strides(nonitemsize_strides, [0], 1, 'row', False) + + def test_non_itemize_strides_p_row(self): + self.base_non_simple_strides(nonitemsize_strides, [0], 3, 'row', False) + + def test_non_itemize_strides_1_col(self): + self.base_non_simple_strides(nonitemsize_strides, [0], 1, 'col', False) + + def test_non_itemize_strides_p_col(self): + self.base_non_simple_strides(nonitemsize_strides, [0], 3, 'col', False) + + def test_non_native_byte_order_1_row(self): + self.base_non_simple_strides(make_nonnative, [0], 1, 'row', False) + + def test_non_native_byte_order_p_row(self): + self.base_non_simple_strides(make_nonnative, [0], 3, 'row', False) + + def test_non_native_byte_order_1_col(self): + self.base_non_simple_strides(make_nonnative, [0], 1, 'col', False) + + def test_non_native_byte_order_p_col(self): + self.base_non_simple_strides(make_nonnative, [0], 3, 'col', False) + + def test_neg_k(self): + a, q, r = self.generate('sqr') + for k, p, w in itertools.product([-3, -7], [1, 3], ['row', 'col']): + q1, r1 = qr_delete(q, r, k, p, w, overwrite_qr=False) + if w == 'row': + a1 = np.delete(a, slice(k+a.shape[0], k+p+a.shape[0]), 0) + else: + a1 = np.delete(a, slice(k+a.shape[0], k+p+a.shape[1]), 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def base_overwrite_qr(self, which, p, test_C, test_F, mode='full'): + assert_sqr = True if mode == 'full' else False + if which == 'row': + qind = (slice(p,None), slice(p,None)) + rind = (slice(p,None), slice(None)) + else: + qind = (slice(None), slice(None)) + rind = (slice(None), slice(None,-p)) + a, q0, r0 = self.generate('sqr', mode) + if p == 1: + a1 = np.delete(a, 3, 0 if which == 'row' else 1) + else: + a1 = np.delete(a, slice(3, 3+p), 0 if which == 'row' else 1) + + # don't overwrite + q = q0.copy('F') + r = r0.copy('F') + q1, r1 = qr_delete(q, r, 3, p, which, False) + check_qr(q1, r1, a1, self.rtol, self.atol, assert_sqr) + check_qr(q, r, a, self.rtol, self.atol, assert_sqr) + + if test_F: + q = q0.copy('F') + r = r0.copy('F') + q2, r2 = qr_delete(q, r, 3, p, which, True) + check_qr(q2, r2, a1, self.rtol, self.atol, assert_sqr) + # verify the overwriting + assert_allclose(q2, q[qind], rtol=self.rtol, atol=self.atol) + assert_allclose(r2, r[rind], rtol=self.rtol, atol=self.atol) + + if test_C: + q = q0.copy('C') + r = r0.copy('C') + q3, r3 = qr_delete(q, r, 3, p, which, True) + check_qr(q3, r3, a1, self.rtol, self.atol, assert_sqr) + assert_allclose(q3, q[qind], rtol=self.rtol, atol=self.atol) + assert_allclose(r3, r[rind], rtol=self.rtol, atol=self.atol) + + def test_overwrite_qr_1_row(self): + # any positively strided q and r. + self.base_overwrite_qr('row', 1, True, True) + + def test_overwrite_economic_qr_1_row(self): + # Any contiguous q and positively strided r. + self.base_overwrite_qr('row', 1, True, True, 'economic') + + def test_overwrite_qr_1_col(self): + # any positively strided q and r. + # full and eco share code paths + self.base_overwrite_qr('col', 1, True, True) + + def test_overwrite_qr_p_row(self): + # any positively strided q and r. + self.base_overwrite_qr('row', 3, True, True) + + def test_overwrite_economic_qr_p_row(self): + # any contiguous q and positively strided r + self.base_overwrite_qr('row', 3, True, True, 'economic') + + def test_overwrite_qr_p_col(self): + # only F orderd q and r can be overwritten for cols + # full and eco share code paths + self.base_overwrite_qr('col', 3, False, True) + + def test_bad_which(self): + a, q, r = self.generate('sqr') + assert_raises(ValueError, qr_delete, q, r, 0, which='foo') + + def test_bad_k(self): + a, q, r = self.generate('tall') + assert_raises(ValueError, qr_delete, q, r, q.shape[0], 1) + assert_raises(ValueError, qr_delete, q, r, -q.shape[0]-1, 1) + assert_raises(ValueError, qr_delete, q, r, r.shape[0], 1, 'col') + assert_raises(ValueError, qr_delete, q, r, -r.shape[0]-1, 1, 'col') + + def test_bad_p(self): + a, q, r = self.generate('tall') + # p must be positive + assert_raises(ValueError, qr_delete, q, r, 0, -1) + assert_raises(ValueError, qr_delete, q, r, 0, -1, 'col') + + # and nonzero + assert_raises(ValueError, qr_delete, q, r, 0, 0) + assert_raises(ValueError, qr_delete, q, r, 0, 0, 'col') + + # must have at least k+p rows or cols, depending. + assert_raises(ValueError, qr_delete, q, r, 3, q.shape[0]-2) + assert_raises(ValueError, qr_delete, q, r, 3, r.shape[1]-2, 'col') + + def test_empty_q(self): + a, q, r = self.generate('tall') + # same code path for 'row' and 'col' + assert_raises(ValueError, qr_delete, np.array([]), r, 0, 1) + + def test_empty_r(self): + a, q, r = self.generate('tall') + # same code path for 'row' and 'col' + assert_raises(ValueError, qr_delete, q, np.array([]), 0, 1) + + def test_mismatched_q_and_r(self): + a, q, r = self.generate('tall') + r = r[1:] + assert_raises(ValueError, qr_delete, q, r, 0, 1) + + def test_unsupported_dtypes(self): + dts = ['int8', 'int16', 'int32', 'int64', + 'uint8', 'uint16', 'uint32', 'uint64', + 'float16', 'longdouble', 'longcomplex', + 'bool'] + a, q0, r0 = self.generate('tall') + for dtype in dts: + q = q0.real.astype(dtype) + r = r0.real.astype(dtype) + assert_raises(ValueError, qr_delete, q, r0, 0, 1, 'row') + assert_raises(ValueError, qr_delete, q, r0, 0, 2, 'row') + assert_raises(ValueError, qr_delete, q, r0, 0, 1, 'col') + assert_raises(ValueError, qr_delete, q, r0, 0, 2, 'col') + + assert_raises(ValueError, qr_delete, q0, r, 0, 1, 'row') + assert_raises(ValueError, qr_delete, q0, r, 0, 2, 'row') + assert_raises(ValueError, qr_delete, q0, r, 0, 1, 'col') + assert_raises(ValueError, qr_delete, q0, r, 0, 2, 'col') + + def test_check_finite(self): + a0, q0, r0 = self.generate('tall') + + q = q0.copy('F') + q[1,1] = np.nan + assert_raises(ValueError, qr_delete, q, r0, 0, 1, 'row') + assert_raises(ValueError, qr_delete, q, r0, 0, 3, 'row') + assert_raises(ValueError, qr_delete, q, r0, 0, 1, 'col') + assert_raises(ValueError, qr_delete, q, r0, 0, 3, 'col') + + r = r0.copy('F') + r[1,1] = np.nan + assert_raises(ValueError, qr_delete, q0, r, 0, 1, 'row') + assert_raises(ValueError, qr_delete, q0, r, 0, 3, 'row') + assert_raises(ValueError, qr_delete, q0, r, 0, 1, 'col') + assert_raises(ValueError, qr_delete, q0, r, 0, 3, 'col') + + def test_qr_scalar(self): + a, q, r = self.generate('1x1') + assert_raises(ValueError, qr_delete, q[0, 0], r, 0, 1, 'row') + assert_raises(ValueError, qr_delete, q, r[0, 0], 0, 1, 'row') + assert_raises(ValueError, qr_delete, q[0, 0], r, 0, 1, 'col') + assert_raises(ValueError, qr_delete, q, r[0, 0], 0, 1, 'col') + +class TestQRdelete_f(BaseQRdelete): + dtype = np.dtype('f') + +class TestQRdelete_F(BaseQRdelete): + dtype = np.dtype('F') + +class TestQRdelete_d(BaseQRdelete): + dtype = np.dtype('d') + +class TestQRdelete_D(BaseQRdelete): + dtype = np.dtype('D') + +class BaseQRinsert(BaseQRdeltas): + def generate(self, type, mode='full', which='row', p=1): + a, q, r = super(BaseQRinsert, self).generate(type, mode) + + assert_(p > 0) + + # super call set the seed... + if which == 'row': + if p == 1: + u = np.random.random(a.shape[1]) + else: + u = np.random.random((p, a.shape[1])) + elif which == 'col': + if p == 1: + u = np.random.random(a.shape[0]) + else: + u = np.random.random((a.shape[0], p)) + else: + ValueError('which should be either "row" or "col"') + + if np.iscomplexobj(self.dtype.type(1)): + b = np.random.random(u.shape) + u = u + 1j * b + + u = u.astype(self.dtype) + return a, q, r, u + + def test_sqr_1_row(self): + a, q, r, u = self.generate('sqr', which='row') + for row in range(r.shape[0] + 1): + q1, r1 = qr_insert(q, r, u, row) + a1 = np.insert(a, row, u, 0) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_sqr_p_row(self): + # sqr + rows --> fat always + a, q, r, u = self.generate('sqr', which='row', p=3) + for row in range(r.shape[0] + 1): + q1, r1 = qr_insert(q, r, u, row) + a1 = np.insert(a, row*np.ones(3, np.intp), u, 0) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_sqr_1_col(self): + a, q, r, u = self.generate('sqr', which='col') + for col in range(r.shape[1] + 1): + q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) + a1 = np.insert(a, col, u, 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_sqr_p_col(self): + # sqr + cols --> fat always + a, q, r, u = self.generate('sqr', which='col', p=3) + for col in range(r.shape[1] + 1): + q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) + a1 = np.insert(a, col*np.ones(3, np.intp), u, 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_tall_1_row(self): + a, q, r, u = self.generate('tall', which='row') + for row in range(r.shape[0] + 1): + q1, r1 = qr_insert(q, r, u, row) + a1 = np.insert(a, row, u, 0) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_tall_p_row(self): + # tall + rows --> tall always + a, q, r, u = self.generate('tall', which='row', p=3) + for row in range(r.shape[0] + 1): + q1, r1 = qr_insert(q, r, u, row) + a1 = np.insert(a, row*np.ones(3, np.intp), u, 0) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_tall_1_col(self): + a, q, r, u = self.generate('tall', which='col') + for col in range(r.shape[1] + 1): + q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) + a1 = np.insert(a, col, u, 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + + # for column adds to tall matrices there are three cases to test + # tall + pcol --> tall + # tall + pcol --> sqr + # tall + pcol --> fat + def base_tall_p_col_xxx(self, p): + a, q, r, u = self.generate('tall', which='col', p=p) + for col in range(r.shape[1] + 1): + q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) + a1 = np.insert(a, col*np.ones(p, np.intp), u, 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_tall_p_col_tall(self): + # 12x7 + 12x3 = 12x10 --> stays tall + self.base_tall_p_col_xxx(3) + + def test_tall_p_col_sqr(self): + # 12x7 + 12x5 = 12x12 --> becomes sqr + self.base_tall_p_col_xxx(5) + + def test_tall_p_col_fat(self): + # 12x7 + 12x7 = 12x14 --> becomes fat + self.base_tall_p_col_xxx(7) + + def test_fat_1_row(self): + a, q, r, u = self.generate('fat', which='row') + for row in range(r.shape[0] + 1): + q1, r1 = qr_insert(q, r, u, row) + a1 = np.insert(a, row, u, 0) + check_qr(q1, r1, a1, self.rtol, self.atol) + + # for row adds to fat matrices there are three cases to test + # fat + prow --> fat + # fat + prow --> sqr + # fat + prow --> tall + def base_fat_p_row_xxx(self, p): + a, q, r, u = self.generate('fat', which='row', p=p) + for row in range(r.shape[0] + 1): + q1, r1 = qr_insert(q, r, u, row) + a1 = np.insert(a, row*np.ones(p, np.intp), u, 0) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_fat_p_row_fat(self): + # 7x12 + 3x12 = 10x12 --> stays fat + self.base_fat_p_row_xxx(3) + + def test_fat_p_row_sqr(self): + # 7x12 + 5x12 = 12x12 --> becomes sqr + self.base_fat_p_row_xxx(5) + + def test_fat_p_row_tall(self): + # 7x12 + 7x12 = 14x12 --> becomes tall + self.base_fat_p_row_xxx(7) + + def test_fat_1_col(self): + a, q, r, u = self.generate('fat', which='col') + for col in range(r.shape[1] + 1): + q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) + a1 = np.insert(a, col, u, 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_fat_p_col(self): + # fat + cols --> fat always + a, q, r, u = self.generate('fat', which='col', p=3) + for col in range(r.shape[1] + 1): + q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) + a1 = np.insert(a, col*np.ones(3, np.intp), u, 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_economic_1_row(self): + a, q, r, u = self.generate('tall', 'economic', 'row') + for row in range(r.shape[0] + 1): + q1, r1 = qr_insert(q, r, u, row, overwrite_qru=False) + a1 = np.insert(a, row, u, 0) + check_qr(q1, r1, a1, self.rtol, self.atol, False) + + def test_economic_p_row(self): + # tall + rows --> tall always + a, q, r, u = self.generate('tall', 'economic', 'row', 3) + for row in range(r.shape[0] + 1): + q1, r1 = qr_insert(q, r, u, row, overwrite_qru=False) + a1 = np.insert(a, row*np.ones(3, np.intp), u, 0) + check_qr(q1, r1, a1, self.rtol, self.atol, False) + + def test_economic_1_col(self): + a, q, r, u = self.generate('tall', 'economic', which='col') + for col in range(r.shape[1] + 1): + q1, r1 = qr_insert(q, r, u.copy(), col, 'col', overwrite_qru=False) + a1 = np.insert(a, col, u, 1) + check_qr(q1, r1, a1, self.rtol, self.atol, False) + + def test_economic_1_col_bad_update(self): + # When the column to be added lies in the span of Q, the update is + # not meaningful. This is detected, and a LinAlgError is issued. + q = np.eye(5, 3, dtype=self.dtype) + r = np.eye(3, dtype=self.dtype) + u = np.array([1, 0, 0, 0, 0], self.dtype) + assert_raises(linalg.LinAlgError, qr_insert, q, r, u, 0, 'col') + + # for column adds to economic matrices there are three cases to test + # eco + pcol --> eco + # eco + pcol --> sqr + # eco + pcol --> fat + def base_economic_p_col_xxx(self, p): + a, q, r, u = self.generate('tall', 'economic', which='col', p=p) + for col in range(r.shape[1] + 1): + q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) + a1 = np.insert(a, col*np.ones(p, np.intp), u, 1) + check_qr(q1, r1, a1, self.rtol, self.atol, False) + + def test_economic_p_col_eco(self): + # 12x7 + 12x3 = 12x10 --> stays eco + self.base_economic_p_col_xxx(3) + + def test_economic_p_col_sqr(self): + # 12x7 + 12x5 = 12x12 --> becomes sqr + self.base_economic_p_col_xxx(5) + + def test_economic_p_col_fat(self): + # 12x7 + 12x7 = 12x14 --> becomes fat + self.base_economic_p_col_xxx(7) + + def test_Mx1_1_row(self): + a, q, r, u = self.generate('Mx1', which='row') + for row in range(r.shape[0] + 1): + q1, r1 = qr_insert(q, r, u, row) + a1 = np.insert(a, row, u, 0) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_Mx1_p_row(self): + a, q, r, u = self.generate('Mx1', which='row', p=3) + for row in range(r.shape[0] + 1): + q1, r1 = qr_insert(q, r, u, row) + a1 = np.insert(a, row*np.ones(3, np.intp), u, 0) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_Mx1_1_col(self): + a, q, r, u = self.generate('Mx1', which='col') + for col in range(r.shape[1] + 1): + q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) + a1 = np.insert(a, col, u, 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_Mx1_p_col(self): + a, q, r, u = self.generate('Mx1', which='col', p=3) + for col in range(r.shape[1] + 1): + q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) + a1 = np.insert(a, col*np.ones(3, np.intp), u, 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_Mx1_economic_1_row(self): + a, q, r, u = self.generate('Mx1', 'economic', 'row') + for row in range(r.shape[0] + 1): + q1, r1 = qr_insert(q, r, u, row) + a1 = np.insert(a, row, u, 0) + check_qr(q1, r1, a1, self.rtol, self.atol, False) + + def test_Mx1_economic_p_row(self): + a, q, r, u = self.generate('Mx1', 'economic', 'row', 3) + for row in range(r.shape[0] + 1): + q1, r1 = qr_insert(q, r, u, row) + a1 = np.insert(a, row*np.ones(3, np.intp), u, 0) + check_qr(q1, r1, a1, self.rtol, self.atol, False) + + def test_Mx1_economic_1_col(self): + a, q, r, u = self.generate('Mx1', 'economic', 'col') + for col in range(r.shape[1] + 1): + q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) + a1 = np.insert(a, col, u, 1) + check_qr(q1, r1, a1, self.rtol, self.atol, False) + + def test_Mx1_economic_p_col(self): + a, q, r, u = self.generate('Mx1', 'economic', 'col', 3) + for col in range(r.shape[1] + 1): + q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) + a1 = np.insert(a, col*np.ones(3, np.intp), u, 1) + check_qr(q1, r1, a1, self.rtol, self.atol, False) + + def test_1xN_1_row(self): + a, q, r, u = self.generate('1xN', which='row') + for row in range(r.shape[0] + 1): + q1, r1 = qr_insert(q, r, u, row) + a1 = np.insert(a, row, u, 0) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_1xN_p_row(self): + a, q, r, u = self.generate('1xN', which='row', p=3) + for row in range(r.shape[0] + 1): + q1, r1 = qr_insert(q, r, u, row) + a1 = np.insert(a, row*np.ones(3, np.intp), u, 0) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_1xN_1_col(self): + a, q, r, u = self.generate('1xN', which='col') + for col in range(r.shape[1] + 1): + q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) + a1 = np.insert(a, col, u, 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_1xN_p_col(self): + a, q, r, u = self.generate('1xN', which='col', p=3) + for col in range(r.shape[1] + 1): + q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) + a1 = np.insert(a, col*np.ones(3, np.intp), u, 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_1x1_1_row(self): + a, q, r, u = self.generate('1x1', which='row') + for row in range(r.shape[0] + 1): + q1, r1 = qr_insert(q, r, u, row) + a1 = np.insert(a, row, u, 0) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_1x1_p_row(self): + a, q, r, u = self.generate('1x1', which='row', p=3) + for row in range(r.shape[0] + 1): + q1, r1 = qr_insert(q, r, u, row) + a1 = np.insert(a, row*np.ones(3, np.intp), u, 0) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_1x1_1_col(self): + a, q, r, u = self.generate('1x1', which='col') + for col in range(r.shape[1] + 1): + q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) + a1 = np.insert(a, col, u, 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_1x1_p_col(self): + a, q, r, u = self.generate('1x1', which='col', p=3) + for col in range(r.shape[1] + 1): + q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) + a1 = np.insert(a, col*np.ones(3, np.intp), u, 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_1x1_1_scalar(self): + a, q, r, u = self.generate('1x1', which='row') + assert_raises(ValueError, qr_insert, q[0, 0], r, u, 0, 'row') + assert_raises(ValueError, qr_insert, q, r[0, 0], u, 0, 'row') + assert_raises(ValueError, qr_insert, q, r, u[0], 0, 'row') + + assert_raises(ValueError, qr_insert, q[0, 0], r, u, 0, 'col') + assert_raises(ValueError, qr_insert, q, r[0, 0], u, 0, 'col') + assert_raises(ValueError, qr_insert, q, r, u[0], 0, 'col') + + def base_non_simple_strides(self, adjust_strides, k, p, which): + for type in ['sqr', 'tall', 'fat']: + a, q0, r0, u0 = self.generate(type, which=which, p=p) + qs, rs, us = adjust_strides((q0, r0, u0)) + if p == 1: + ai = np.insert(a, k, u0, 0 if which == 'row' else 1) + else: + ai = np.insert(a, k*np.ones(p, np.intp), + u0 if which == 'row' else u0, + 0 if which == 'row' else 1) + + # for each variable, q, r, u we try with it strided and + # overwrite=False. Then we try with overwrite=True. Nothing + # is checked to see if it can be overwritten, since only + # F ordered Q can be overwritten when adding columns. + + q = q0.copy('F') + r = r0.copy('F') + u = u0.copy('F') + q1, r1 = qr_insert(qs, r, u, k, which, overwrite_qru=False) + check_qr(q1, r1, ai, self.rtol, self.atol) + q1o, r1o = qr_insert(qs, r, u, k, which, overwrite_qru=True) + check_qr(q1o, r1o, ai, self.rtol, self.atol) + + q = q0.copy('F') + r = r0.copy('F') + u = u0.copy('F') + q2, r2 = qr_insert(q, rs, u, k, which, overwrite_qru=False) + check_qr(q2, r2, ai, self.rtol, self.atol) + q2o, r2o = qr_insert(q, rs, u, k, which, overwrite_qru=True) + check_qr(q2o, r2o, ai, self.rtol, self.atol) + + q = q0.copy('F') + r = r0.copy('F') + u = u0.copy('F') + q3, r3 = qr_insert(q, r, us, k, which, overwrite_qru=False) + check_qr(q3, r3, ai, self.rtol, self.atol) + q3o, r3o = qr_insert(q, r, us, k, which, overwrite_qru=True) + check_qr(q3o, r3o, ai, self.rtol, self.atol) + + q = q0.copy('F') + r = r0.copy('F') + u = u0.copy('F') + # since some of these were consumed above + qs, rs, us = adjust_strides((q, r, u)) + q5, r5 = qr_insert(qs, rs, us, k, which, overwrite_qru=False) + check_qr(q5, r5, ai, self.rtol, self.atol) + q5o, r5o = qr_insert(qs, rs, us, k, which, overwrite_qru=True) + check_qr(q5o, r5o, ai, self.rtol, self.atol) + + def test_non_unit_strides_1_row(self): + self.base_non_simple_strides(make_strided, 0, 1, 'row') + + def test_non_unit_strides_p_row(self): + self.base_non_simple_strides(make_strided, 0, 3, 'row') + + def test_non_unit_strides_1_col(self): + self.base_non_simple_strides(make_strided, 0, 1, 'col') + + def test_non_unit_strides_p_col(self): + self.base_non_simple_strides(make_strided, 0, 3, 'col') + + def test_neg_strides_1_row(self): + self.base_non_simple_strides(negate_strides, 0, 1, 'row') + + def test_neg_strides_p_row(self): + self.base_non_simple_strides(negate_strides, 0, 3, 'row') + + def test_neg_strides_1_col(self): + self.base_non_simple_strides(negate_strides, 0, 1, 'col') + + def test_neg_strides_p_col(self): + self.base_non_simple_strides(negate_strides, 0, 3, 'col') + + def test_non_itemsize_strides_1_row(self): + self.base_non_simple_strides(nonitemsize_strides, 0, 1, 'row') + + def test_non_itemsize_strides_p_row(self): + self.base_non_simple_strides(nonitemsize_strides, 0, 3, 'row') + + def test_non_itemsize_strides_1_col(self): + self.base_non_simple_strides(nonitemsize_strides, 0, 1, 'col') + + def test_non_itemsize_strides_p_col(self): + self.base_non_simple_strides(nonitemsize_strides, 0, 3, 'col') + + def test_non_native_byte_order_1_row(self): + self.base_non_simple_strides(make_nonnative, 0, 1, 'row') + + def test_non_native_byte_order_p_row(self): + self.base_non_simple_strides(make_nonnative, 0, 3, 'row') + + def test_non_native_byte_order_1_col(self): + self.base_non_simple_strides(make_nonnative, 0, 1, 'col') + + def test_non_native_byte_order_p_col(self): + self.base_non_simple_strides(make_nonnative, 0, 3, 'col') + + def test_overwrite_qu_rank_1(self): + # when inserting rows, the size of both Q and R change, so only + # column inserts can overwrite q. Only complex column inserts + # with C ordered Q overwrite u. Any contiguous Q is overwritten + # when inserting 1 column + a, q0, r, u, = self.generate('sqr', which='col', p=1) + q = q0.copy('C') + u0 = u.copy() + # don't overwrite + q1, r1 = qr_insert(q, r, u, 0, 'col', overwrite_qru=False) + a1 = np.insert(a, 0, u0, 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + check_qr(q, r, a, self.rtol, self.atol) + + # try overwriting + q2, r2 = qr_insert(q, r, u, 0, 'col', overwrite_qru=True) + check_qr(q2, r2, a1, self.rtol, self.atol) + # verify the overwriting + assert_allclose(q2, q, rtol=self.rtol, atol=self.atol) + assert_allclose(u, u0.conj(), self.rtol, self.atol) + + # now try with a fortran ordered Q + qF = q0.copy('F') + u1 = u0.copy() + q3, r3 = qr_insert(qF, r, u1, 0, 'col', overwrite_qru=False) + check_qr(q3, r3, a1, self.rtol, self.atol) + check_qr(qF, r, a, self.rtol, self.atol) + + # try overwriting + q4, r4 = qr_insert(qF, r, u1, 0, 'col', overwrite_qru=True) + check_qr(q4, r4, a1, self.rtol, self.atol) + assert_allclose(q4, qF, rtol=self.rtol, atol=self.atol) + + def test_overwrite_qu_rank_p(self): + # when inserting rows, the size of both Q and R change, so only + # column inserts can potentially overwrite Q. In practice, only + # F ordered Q are overwritten with a rank p update. + a, q0, r, u, = self.generate('sqr', which='col', p=3) + q = q0.copy('F') + a1 = np.insert(a, np.zeros(3, np.intp), u, 1) + + # don't overwrite + q1, r1 = qr_insert(q, r, u, 0, 'col', overwrite_qru=False) + check_qr(q1, r1, a1, self.rtol, self.atol) + check_qr(q, r, a, self.rtol, self.atol) + + # try overwriting + q2, r2 = qr_insert(q, r, u, 0, 'col', overwrite_qru=True) + check_qr(q2, r2, a1, self.rtol, self.atol) + assert_allclose(q2, q, rtol=self.rtol, atol=self.atol) + + def test_empty_inputs(self): + a, q, r, u = self.generate('sqr', which='row') + assert_raises(ValueError, qr_insert, np.array([]), r, u, 0, 'row') + assert_raises(ValueError, qr_insert, q, np.array([]), u, 0, 'row') + assert_raises(ValueError, qr_insert, q, r, np.array([]), 0, 'row') + assert_raises(ValueError, qr_insert, np.array([]), r, u, 0, 'col') + assert_raises(ValueError, qr_insert, q, np.array([]), u, 0, 'col') + assert_raises(ValueError, qr_insert, q, r, np.array([]), 0, 'col') + + def test_mismatched_shapes(self): + a, q, r, u = self.generate('tall', which='row') + assert_raises(ValueError, qr_insert, q, r[1:], u, 0, 'row') + assert_raises(ValueError, qr_insert, q[:-2], r, u, 0, 'row') + assert_raises(ValueError, qr_insert, q, r, u[1:], 0, 'row') + assert_raises(ValueError, qr_insert, q, r[1:], u, 0, 'col') + assert_raises(ValueError, qr_insert, q[:-2], r, u, 0, 'col') + assert_raises(ValueError, qr_insert, q, r, u[1:], 0, 'col') + + def test_unsupported_dtypes(self): + dts = ['int8', 'int16', 'int32', 'int64', + 'uint8', 'uint16', 'uint32', 'uint64', + 'float16', 'longdouble', 'longcomplex', + 'bool'] + a, q0, r0, u0 = self.generate('sqr', which='row') + for dtype in dts: + q = q0.real.astype(dtype) + r = r0.real.astype(dtype) + u = u0.real.astype(dtype) + assert_raises(ValueError, qr_insert, q, r0, u0, 0, 'row') + assert_raises(ValueError, qr_insert, q, r0, u0, 0, 'col') + assert_raises(ValueError, qr_insert, q0, r, u0, 0, 'row') + assert_raises(ValueError, qr_insert, q0, r, u0, 0, 'col') + assert_raises(ValueError, qr_insert, q0, r0, u, 0, 'row') + assert_raises(ValueError, qr_insert, q0, r0, u, 0, 'col') + + def test_check_finite(self): + a0, q0, r0, u0 = self.generate('sqr', which='row', p=3) + + q = q0.copy('F') + q[1,1] = np.nan + assert_raises(ValueError, qr_insert, q, r0, u0[:,0], 0, 'row') + assert_raises(ValueError, qr_insert, q, r0, u0, 0, 'row') + assert_raises(ValueError, qr_insert, q, r0, u0[:,0], 0, 'col') + assert_raises(ValueError, qr_insert, q, r0, u0, 0, 'col') + + r = r0.copy('F') + r[1,1] = np.nan + assert_raises(ValueError, qr_insert, q0, r, u0[:,0], 0, 'row') + assert_raises(ValueError, qr_insert, q0, r, u0, 0, 'row') + assert_raises(ValueError, qr_insert, q0, r, u0[:,0], 0, 'col') + assert_raises(ValueError, qr_insert, q0, r, u0, 0, 'col') + + u = u0.copy('F') + u[0,0] = np.nan + assert_raises(ValueError, qr_insert, q0, r0, u[:,0], 0, 'row') + assert_raises(ValueError, qr_insert, q0, r0, u, 0, 'row') + assert_raises(ValueError, qr_insert, q0, r0, u[:,0], 0, 'col') + assert_raises(ValueError, qr_insert, q0, r0, u, 0, 'col') + +class TestQRinsert_f(BaseQRinsert): + dtype = np.dtype('f') + +class TestQRinsert_F(BaseQRinsert): + dtype = np.dtype('F') + +class TestQRinsert_d(BaseQRinsert): + dtype = np.dtype('d') + +class TestQRinsert_D(BaseQRinsert): + dtype = np.dtype('D') + +class BaseQRupdate(BaseQRdeltas): + def generate(self, type, mode='full', p=1): + a, q, r = super(BaseQRupdate, self).generate(type, mode) + + # super call set the seed... + if p == 1: + u = np.random.random(q.shape[0]) + v = np.random.random(r.shape[1]) + else: + u = np.random.random((q.shape[0], p)) + v = np.random.random((r.shape[1], p)) + + if np.iscomplexobj(self.dtype.type(1)): + b = np.random.random(u.shape) + u = u + 1j * b + + c = np.random.random(v.shape) + v = v + 1j * c + + u = u.astype(self.dtype) + v = v.astype(self.dtype) + return a, q, r, u, v + + def test_sqr_rank_1(self): + a, q, r, u, v = self.generate('sqr') + q1, r1 = qr_update(q, r, u, v, False) + a1 = a + np.outer(u, v.conj()) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_sqr_rank_p(self): + # test ndim = 2, rank 1 updates here too + for p in [1, 2, 3, 5]: + a, q, r, u, v = self.generate('sqr', p=p) + if p == 1: + u = u.reshape(u.size, 1) + v = v.reshape(v.size, 1) + q1, r1 = qr_update(q, r, u, v, False) + a1 = a + np.dot(u, v.T.conj()) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_tall_rank_1(self): + a, q, r, u, v = self.generate('tall') + q1, r1 = qr_update(q, r, u, v, False) + a1 = a + np.outer(u, v.conj()) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_tall_rank_p(self): + for p in [1, 2, 3, 5]: + a, q, r, u, v = self.generate('tall', p=p) + if p == 1: + u = u.reshape(u.size, 1) + v = v.reshape(v.size, 1) + q1, r1 = qr_update(q, r, u, v, False) + a1 = a + np.dot(u, v.T.conj()) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_fat_rank_1(self): + a, q, r, u, v = self.generate('fat') + q1, r1 = qr_update(q, r, u, v, False) + a1 = a + np.outer(u, v.conj()) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_fat_rank_p(self): + for p in [1, 2, 3, 5]: + a, q, r, u, v = self.generate('fat', p=p) + if p == 1: + u = u.reshape(u.size, 1) + v = v.reshape(v.size, 1) + q1, r1 = qr_update(q, r, u, v, False) + a1 = a + np.dot(u, v.T.conj()) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_economic_rank_1(self): + a, q, r, u, v = self.generate('tall', 'economic') + q1, r1 = qr_update(q, r, u, v, False) + a1 = a + np.outer(u, v.conj()) + check_qr(q1, r1, a1, self.rtol, self.atol, False) + + def test_economic_rank_p(self): + for p in [1, 2, 3, 5]: + a, q, r, u, v = self.generate('tall', 'economic', p) + if p == 1: + u = u.reshape(u.size, 1) + v = v.reshape(v.size, 1) + q1, r1 = qr_update(q, r, u, v, False) + a1 = a + np.dot(u, v.T.conj()) + check_qr(q1, r1, a1, self.rtol, self.atol, False) + + def test_Mx1_rank_1(self): + a, q, r, u, v = self.generate('Mx1') + q1, r1 = qr_update(q, r, u, v, False) + a1 = a + np.outer(u, v.conj()) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_Mx1_rank_p(self): + # when M or N == 1, only a rank 1 update is allowed. This isn't + # fundamental limitation, but the code does not support it. + a, q, r, u, v = self.generate('Mx1', p=1) + u = u.reshape(u.size, 1) + v = v.reshape(v.size, 1) + q1, r1 = qr_update(q, r, u, v, False) + a1 = a + np.dot(u, v.T.conj()) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_Mx1_economic_rank_1(self): + a, q, r, u, v = self.generate('Mx1', 'economic') + q1, r1 = qr_update(q, r, u, v, False) + a1 = a + np.outer(u, v.conj()) + check_qr(q1, r1, a1, self.rtol, self.atol, False) + + def test_Mx1_economic_rank_p(self): + # when M or N == 1, only a rank 1 update is allowed. This isn't + # fundamental limitation, but the code does not support it. + a, q, r, u, v = self.generate('Mx1', 'economic', p=1) + u = u.reshape(u.size, 1) + v = v.reshape(v.size, 1) + q1, r1 = qr_update(q, r, u, v, False) + a1 = a + np.dot(u, v.T.conj()) + check_qr(q1, r1, a1, self.rtol, self.atol, False) + + def test_1xN_rank_1(self): + a, q, r, u, v = self.generate('1xN') + q1, r1 = qr_update(q, r, u, v, False) + a1 = a + np.outer(u, v.conj()) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_1xN_rank_p(self): + # when M or N == 1, only a rank 1 update is allowed. This isn't + # fundamental limitation, but the code does not support it. + a, q, r, u, v = self.generate('1xN', p=1) + u = u.reshape(u.size, 1) + v = v.reshape(v.size, 1) + q1, r1 = qr_update(q, r, u, v, False) + a1 = a + np.dot(u, v.T.conj()) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_1x1_rank_1(self): + a, q, r, u, v = self.generate('1x1') + q1, r1 = qr_update(q, r, u, v, False) + a1 = a + np.outer(u, v.conj()) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_1x1_rank_p(self): + # when M or N == 1, only a rank 1 update is allowed. This isn't + # fundamental limitation, but the code does not support it. + a, q, r, u, v = self.generate('1x1', p=1) + u = u.reshape(u.size, 1) + v = v.reshape(v.size, 1) + q1, r1 = qr_update(q, r, u, v, False) + a1 = a + np.dot(u, v.T.conj()) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_1x1_rank_1_scalar(self): + a, q, r, u, v = self.generate('1x1') + assert_raises(ValueError, qr_update, q[0, 0], r, u, v) + assert_raises(ValueError, qr_update, q, r[0, 0], u, v) + assert_raises(ValueError, qr_update, q, r, u[0], v) + assert_raises(ValueError, qr_update, q, r, u, v[0]) + + def base_non_simple_strides(self, adjust_strides, mode, p, overwriteable): + assert_sqr = False if mode == 'economic' else True + for type in ['sqr', 'tall', 'fat']: + a, q0, r0, u0, v0 = self.generate(type, mode, p) + qs, rs, us, vs = adjust_strides((q0, r0, u0, v0)) + if p == 1: + aup = a + np.outer(u0, v0.conj()) + else: + aup = a + np.dot(u0, v0.T.conj()) + + # for each variable, q, r, u, v we try with it strided and + # overwrite=False. Then we try with overwrite=True, and make + # sure that if p == 1, r and v are still overwritten. + # a strided q and u must always be copied. + + q = q0.copy('F') + r = r0.copy('F') + u = u0.copy('F') + v = v0.copy('C') + q1, r1 = qr_update(qs, r, u, v, False) + check_qr(q1, r1, aup, self.rtol, self.atol, assert_sqr) + q1o, r1o = qr_update(qs, r, u, v, True) + check_qr(q1o, r1o, aup, self.rtol, self.atol, assert_sqr) + if overwriteable: + assert_allclose(r1o, r, rtol=self.rtol, atol=self.atol) + assert_allclose(v, v0.conj(), rtol=self.rtol, atol=self.atol) + + q = q0.copy('F') + r = r0.copy('F') + u = u0.copy('F') + v = v0.copy('C') + q2, r2 = qr_update(q, rs, u, v, False) + check_qr(q2, r2, aup, self.rtol, self.atol, assert_sqr) + q2o, r2o = qr_update(q, rs, u, v, True) + check_qr(q2o, r2o, aup, self.rtol, self.atol, assert_sqr) + if overwriteable: + assert_allclose(r2o, rs, rtol=self.rtol, atol=self.atol) + assert_allclose(v, v0.conj(), rtol=self.rtol, atol=self.atol) + + q = q0.copy('F') + r = r0.copy('F') + u = u0.copy('F') + v = v0.copy('C') + q3, r3 = qr_update(q, r, us, v, False) + check_qr(q3, r3, aup, self.rtol, self.atol, assert_sqr) + q3o, r3o = qr_update(q, r, us, v, True) + check_qr(q3o, r3o, aup, self.rtol, self.atol, assert_sqr) + if overwriteable: + assert_allclose(r3o, r, rtol=self.rtol, atol=self.atol) + assert_allclose(v, v0.conj(), rtol=self.rtol, atol=self.atol) + + q = q0.copy('F') + r = r0.copy('F') + u = u0.copy('F') + v = v0.copy('C') + q4, r4 = qr_update(q, r, u, vs, False) + check_qr(q4, r4, aup, self.rtol, self.atol, assert_sqr) + q4o, r4o = qr_update(q, r, u, vs, True) + check_qr(q4o, r4o, aup, self.rtol, self.atol, assert_sqr) + if overwriteable: + assert_allclose(r4o, r, rtol=self.rtol, atol=self.atol) + assert_allclose(vs, v0.conj(), rtol=self.rtol, atol=self.atol) + + q = q0.copy('F') + r = r0.copy('F') + u = u0.copy('F') + v = v0.copy('C') + # since some of these were consumed above + qs, rs, us, vs = adjust_strides((q, r, u, v)) + q5, r5 = qr_update(qs, rs, us, vs, False) + check_qr(q5, r5, aup, self.rtol, self.atol, assert_sqr) + q5o, r5o = qr_update(qs, rs, us, vs, True) + check_qr(q5o, r5o, aup, self.rtol, self.atol, assert_sqr) + if overwriteable: + assert_allclose(r5o, rs, rtol=self.rtol, atol=self.atol) + assert_allclose(vs, v0.conj(), rtol=self.rtol, atol=self.atol) + + def test_non_unit_strides_rank_1(self): + self.base_non_simple_strides(make_strided, 'full', 1, True) + + def test_non_unit_strides_economic_rank_1(self): + self.base_non_simple_strides(make_strided, 'economic', 1, True) + + def test_non_unit_strides_rank_p(self): + self.base_non_simple_strides(make_strided, 'full', 3, False) + + def test_non_unit_strides_economic_rank_p(self): + self.base_non_simple_strides(make_strided, 'economic', 3, False) + + def test_neg_strides_rank_1(self): + self.base_non_simple_strides(negate_strides, 'full', 1, False) + + def test_neg_strides_economic_rank_1(self): + self.base_non_simple_strides(negate_strides, 'economic', 1, False) + + def test_neg_strides_rank_p(self): + self.base_non_simple_strides(negate_strides, 'full', 3, False) + + def test_neg_strides_economic_rank_p(self): + self.base_non_simple_strides(negate_strides, 'economic', 3, False) + + def test_non_itemsize_strides_rank_1(self): + self.base_non_simple_strides(nonitemsize_strides, 'full', 1, False) + + def test_non_itemsize_strides_economic_rank_1(self): + self.base_non_simple_strides(nonitemsize_strides, 'economic', 1, False) + + def test_non_itemsize_strides_rank_p(self): + self.base_non_simple_strides(nonitemsize_strides, 'full', 3, False) + + def test_non_itemsize_strides_economic_rank_p(self): + self.base_non_simple_strides(nonitemsize_strides, 'economic', 3, False) + + def test_non_native_byte_order_rank_1(self): + self.base_non_simple_strides(make_nonnative, 'full', 1, False) + + def test_non_native_byte_order_economic_rank_1(self): + self.base_non_simple_strides(make_nonnative, 'economic', 1, False) + + def test_non_native_byte_order_rank_p(self): + self.base_non_simple_strides(make_nonnative, 'full', 3, False) + + def test_non_native_byte_order_economic_rank_p(self): + self.base_non_simple_strides(make_nonnative, 'economic', 3, False) + + def test_overwrite_qruv_rank_1(self): + # Any positive strided q, r, u, and v can be overwritten for a rank 1 + # update, only checking C and F contiguous. + a, q0, r0, u0, v0 = self.generate('sqr') + a1 = a + np.outer(u0, v0.conj()) + q = q0.copy('F') + r = r0.copy('F') + u = u0.copy('F') + v = v0.copy('F') + + # don't overwrite + q1, r1 = qr_update(q, r, u, v, False) + check_qr(q1, r1, a1, self.rtol, self.atol) + check_qr(q, r, a, self.rtol, self.atol) + + q2, r2 = qr_update(q, r, u, v, True) + check_qr(q2, r2, a1, self.rtol, self.atol) + # verify the overwriting, no good way to check u and v. + assert_allclose(q2, q, rtol=self.rtol, atol=self.atol) + assert_allclose(r2, r, rtol=self.rtol, atol=self.atol) + + q = q0.copy('C') + r = r0.copy('C') + u = u0.copy('C') + v = v0.copy('C') + q3, r3 = qr_update(q, r, u, v, True) + check_qr(q3, r3, a1, self.rtol, self.atol) + assert_allclose(q3, q, rtol=self.rtol, atol=self.atol) + assert_allclose(r3, r, rtol=self.rtol, atol=self.atol) + + def test_overwrite_qruv_rank_1_economic(self): + # updating economic decompositions can overwrite any contigous r, + # and positively strided r and u. V is only ever read. + # only checking C and F contiguous. + a, q0, r0, u0, v0 = self.generate('tall', 'economic') + a1 = a + np.outer(u0, v0.conj()) + q = q0.copy('F') + r = r0.copy('F') + u = u0.copy('F') + v = v0.copy('F') + + # don't overwrite + q1, r1 = qr_update(q, r, u, v, False) + check_qr(q1, r1, a1, self.rtol, self.atol, False) + check_qr(q, r, a, self.rtol, self.atol, False) + + q2, r2 = qr_update(q, r, u, v, True) + check_qr(q2, r2, a1, self.rtol, self.atol, False) + # verify the overwriting, no good way to check u and v. + assert_allclose(q2, q, rtol=self.rtol, atol=self.atol) + assert_allclose(r2, r, rtol=self.rtol, atol=self.atol) + + q = q0.copy('C') + r = r0.copy('C') + u = u0.copy('C') + v = v0.copy('C') + q3, r3 = qr_update(q, r, u, v, True) + check_qr(q3, r3, a1, self.rtol, self.atol, False) + assert_allclose(q3, q, rtol=self.rtol, atol=self.atol) + assert_allclose(r3, r, rtol=self.rtol, atol=self.atol) + + def test_overwrite_qruv_rank_p(self): + # for rank p updates, q r must be F contiguous, v must be C (v.T --> F) + # and u can be C or F, but is only overwritten if Q is C and complex + a, q0, r0, u0, v0 = self.generate('sqr', p=3) + a1 = a + np.dot(u0, v0.T.conj()) + q = q0.copy('F') + r = r0.copy('F') + u = u0.copy('F') + v = v0.copy('C') + + # don't overwrite + q1, r1 = qr_update(q, r, u, v, False) + check_qr(q1, r1, a1, self.rtol, self.atol) + check_qr(q, r, a, self.rtol, self.atol) + + q2, r2 = qr_update(q, r, u, v, True) + check_qr(q2, r2, a1, self.rtol, self.atol) + # verify the overwriting, no good way to check u and v. + assert_allclose(q2, q, rtol=self.rtol, atol=self.atol) + assert_allclose(r2, r, rtol=self.rtol, atol=self.atol) + + def test_empty_inputs(self): + a, q, r, u, v = self.generate('tall') + assert_raises(ValueError, qr_update, np.array([]), r, u, v) + assert_raises(ValueError, qr_update, q, np.array([]), u, v) + assert_raises(ValueError, qr_update, q, r, np.array([]), v) + assert_raises(ValueError, qr_update, q, r, u, np.array([])) + + def test_mismatched_shapes(self): + a, q, r, u, v = self.generate('tall') + assert_raises(ValueError, qr_update, q, r[1:], u, v) + assert_raises(ValueError, qr_update, q[:-2], r, u, v) + assert_raises(ValueError, qr_update, q, r, u[1:], v) + assert_raises(ValueError, qr_update, q, r, u, v[1:]) + + def test_unsupported_dtypes(self): + dts = ['int8', 'int16', 'int32', 'int64', + 'uint8', 'uint16', 'uint32', 'uint64', + 'float16', 'longdouble', 'longcomplex', + 'bool'] + a, q0, r0, u0, v0 = self.generate('tall') + for dtype in dts: + q = q0.real.astype(dtype) + r = r0.real.astype(dtype) + u = u0.real.astype(dtype) + v = v0.real.astype(dtype) + assert_raises(ValueError, qr_update, q, r0, u0, v0) + assert_raises(ValueError, qr_update, q0, r, u0, v0) + assert_raises(ValueError, qr_update, q0, r0, u, v0) + assert_raises(ValueError, qr_update, q0, r0, u0, v) + + def test_integer_input(self): + q = np.arange(16).reshape(4, 4) + r = q.copy() # doesn't matter + u = q[:, 0].copy() + v = r[0, :].copy() + assert_raises(ValueError, qr_update, q, r, u, v) + + def test_check_finite(self): + a0, q0, r0, u0, v0 = self.generate('tall', p=3) + + q = q0.copy('F') + q[1,1] = np.nan + assert_raises(ValueError, qr_update, q, r0, u0[:,0], v0[:,0]) + assert_raises(ValueError, qr_update, q, r0, u0, v0) + + r = r0.copy('F') + r[1,1] = np.nan + assert_raises(ValueError, qr_update, q0, r, u0[:,0], v0[:,0]) + assert_raises(ValueError, qr_update, q0, r, u0, v0) + + u = u0.copy('F') + u[0,0] = np.nan + assert_raises(ValueError, qr_update, q0, r0, u[:,0], v0[:,0]) + assert_raises(ValueError, qr_update, q0, r0, u, v0) + + v = v0.copy('F') + v[0,0] = np.nan + assert_raises(ValueError, qr_update, q0, r0, u[:,0], v[:,0]) + assert_raises(ValueError, qr_update, q0, r0, u, v) + + def test_economic_check_finite(self): + a0, q0, r0, u0, v0 = self.generate('tall', mode='economic', p=3) + + q = q0.copy('F') + q[1,1] = np.nan + assert_raises(ValueError, qr_update, q, r0, u0[:,0], v0[:,0]) + assert_raises(ValueError, qr_update, q, r0, u0, v0) + + r = r0.copy('F') + r[1,1] = np.nan + assert_raises(ValueError, qr_update, q0, r, u0[:,0], v0[:,0]) + assert_raises(ValueError, qr_update, q0, r, u0, v0) + + u = u0.copy('F') + u[0,0] = np.nan + assert_raises(ValueError, qr_update, q0, r0, u[:,0], v0[:,0]) + assert_raises(ValueError, qr_update, q0, r0, u, v0) + + v = v0.copy('F') + v[0,0] = np.nan + assert_raises(ValueError, qr_update, q0, r0, u[:,0], v[:,0]) + assert_raises(ValueError, qr_update, q0, r0, u, v) + + def test_u_exactly_in_span_q(self): + q = np.array([[0, 0], [0, 0], [1, 0], [0, 1]], self.dtype) + r = np.array([[1, 0], [0, 1]], self.dtype) + u = np.array([0, 0, 0, -1], self.dtype) + v = np.array([1, 2], self.dtype) + q1, r1 = qr_update(q, r, u, v) + a1 = np.dot(q, r) + np.outer(u, v.conj()) + check_qr(q1, r1, a1, self.rtol, self.atol, False) + +class TestQRupdate_f(BaseQRupdate): + dtype = np.dtype('f') + +class TestQRupdate_F(BaseQRupdate): + dtype = np.dtype('F') + +class TestQRupdate_d(BaseQRupdate): + dtype = np.dtype('d') + +class TestQRupdate_D(BaseQRupdate): + dtype = np.dtype('D') + +def test_form_qTu(): + # We want to ensure that all of the code paths through this function are + # tested. Most of them should be hit with the rest of test suite, but + # explicit tests make clear precisely what is being tested. + # + # This function expects that Q is either C or F contiguous and square. + # Economic mode decompositions (Q is (M, N), M != N) do not go through this + # function. U may have any positive strides. + # + # Some of these test are duplicates, since contiguous 1d arrays are both C + # and F. + + q_order = ['F', 'C'] + q_shape = [(8, 8), ] + u_order = ['F', 'C', 'A'] # here A means is not F not C + u_shape = [1, 3] + dtype = ['f', 'd', 'F', 'D'] + + for qo, qs, uo, us, d in \ + itertools.product(q_order, q_shape, u_order, u_shape, dtype): + if us == 1: + check_form_qTu(qo, qs, uo, us, 1, d) + check_form_qTu(qo, qs, uo, us, 2, d) + else: + check_form_qTu(qo, qs, uo, us, 2, d) + +def check_form_qTu(q_order, q_shape, u_order, u_shape, u_ndim, dtype): + np.random.seed(47) + if u_shape == 1 and u_ndim == 1: + u_shape = (q_shape[0],) + else: + u_shape = (q_shape[0], u_shape) + dtype = np.dtype(dtype) + + if dtype.char in 'fd': + q = np.random.random(q_shape) + u = np.random.random(u_shape) + elif dtype.char in 'FD': + q = np.random.random(q_shape) + 1j*np.random.random(q_shape) + u = np.random.random(u_shape) + 1j*np.random.random(u_shape) + else: + ValueError("form_qTu doesn't support this dtype") + + q = np.require(q, dtype, q_order) + if u_order != 'A': + u = np.require(u, dtype, u_order) + else: + u, = make_strided((u.astype(dtype),)) + + rtol = 10.0 ** -(np.finfo(dtype).precision-2) + atol = 2*np.finfo(dtype).eps + + expected = np.dot(q.T.conj(), u) + res = _decomp_update._form_qTu(q, u) + assert_allclose(res, expected, rtol=rtol, atol=atol) + + diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_decomp_update.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_decomp_update.pyc new file mode 100644 index 0000000..55eaf8c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_decomp_update.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_fblas.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_fblas.py new file mode 100644 index 0000000..e25e91e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_fblas.py @@ -0,0 +1,611 @@ +# Test interfaces to fortran blas. +# +# The tests are more of interface than they are of the underlying blas. +# Only very small matrices checked -- N=3 or so. +# +# !! Complex calculations really aren't checked that carefully. +# !! Only real valued complex numbers are used in tests. + +from __future__ import division, print_function, absolute_import + +from numpy import float32, float64, complex64, complex128, arange, array, \ + zeros, shape, transpose, newaxis, common_type, conjugate + +from scipy.linalg import _fblas as fblas + +from scipy._lib.six import xrange + +from numpy.testing import assert_array_equal, \ + assert_allclose, assert_array_almost_equal, assert_ + +import pytest + +# decimal accuracy to require between Python and LAPACK/BLAS calculations +accuracy = 5 + +# Since numpy.dot likely uses the same blas, use this routine +# to check. + + +def matrixmultiply(a, b): + if len(b.shape) == 1: + b_is_vector = True + b = b[:, newaxis] + else: + b_is_vector = False + assert_(a.shape[1] == b.shape[0]) + c = zeros((a.shape[0], b.shape[1]), common_type(a, b)) + for i in xrange(a.shape[0]): + for j in xrange(b.shape[1]): + s = 0 + for k in xrange(a.shape[1]): + s += a[i, k] * b[k, j] + c[i, j] = s + if b_is_vector: + c = c.reshape((a.shape[0],)) + return c + +################################################## +# Test blas ?axpy + + +class BaseAxpy(object): + ''' Mixin class for axpy tests ''' + + def test_default_a(self): + x = arange(3., dtype=self.dtype) + y = arange(3., dtype=x.dtype) + real_y = x*1.+y + y = self.blas_func(x, y) + assert_array_equal(real_y, y) + + def test_simple(self): + x = arange(3., dtype=self.dtype) + y = arange(3., dtype=x.dtype) + real_y = x*3.+y + y = self.blas_func(x, y, a=3.) + assert_array_equal(real_y, y) + + def test_x_stride(self): + x = arange(6., dtype=self.dtype) + y = zeros(3, x.dtype) + y = arange(3., dtype=x.dtype) + real_y = x[::2]*3.+y + y = self.blas_func(x, y, a=3., n=3, incx=2) + assert_array_equal(real_y, y) + + def test_y_stride(self): + x = arange(3., dtype=self.dtype) + y = zeros(6, x.dtype) + real_y = x*3.+y[::2] + y = self.blas_func(x, y, a=3., n=3, incy=2) + assert_array_equal(real_y, y[::2]) + + def test_x_and_y_stride(self): + x = arange(12., dtype=self.dtype) + y = zeros(6, x.dtype) + real_y = x[::4]*3.+y[::2] + y = self.blas_func(x, y, a=3., n=3, incx=4, incy=2) + assert_array_equal(real_y, y[::2]) + + def test_x_bad_size(self): + x = arange(12., dtype=self.dtype) + y = zeros(6, x.dtype) + with pytest.raises(Exception, match='failed for 1st keyword'): + self.blas_func(x, y, n=4, incx=5) + + def test_y_bad_size(self): + x = arange(12., dtype=self.dtype) + y = zeros(6, x.dtype) + with pytest.raises(Exception, match='failed for 1st keyword'): + self.blas_func(x, y, n=3, incy=5) + + +try: + class TestSaxpy(BaseAxpy): + blas_func = fblas.saxpy + dtype = float32 +except AttributeError: + class TestSaxpy: + pass + + +class TestDaxpy(BaseAxpy): + blas_func = fblas.daxpy + dtype = float64 + + +try: + class TestCaxpy(BaseAxpy): + blas_func = fblas.caxpy + dtype = complex64 +except AttributeError: + class TestCaxpy: + pass + + +class TestZaxpy(BaseAxpy): + blas_func = fblas.zaxpy + dtype = complex128 + + +################################################## +# Test blas ?scal + +class BaseScal(object): + ''' Mixin class for scal testing ''' + + def test_simple(self): + x = arange(3., dtype=self.dtype) + real_x = x*3. + x = self.blas_func(3., x) + assert_array_equal(real_x, x) + + def test_x_stride(self): + x = arange(6., dtype=self.dtype) + real_x = x.copy() + real_x[::2] = x[::2]*array(3., self.dtype) + x = self.blas_func(3., x, n=3, incx=2) + assert_array_equal(real_x, x) + + def test_x_bad_size(self): + x = arange(12., dtype=self.dtype) + with pytest.raises(Exception, match='failed for 1st keyword'): + self.blas_func(2., x, n=4, incx=5) + + +try: + class TestSscal(BaseScal): + blas_func = fblas.sscal + dtype = float32 +except AttributeError: + class TestSscal: + pass + + +class TestDscal(BaseScal): + blas_func = fblas.dscal + dtype = float64 + + +try: + class TestCscal(BaseScal): + blas_func = fblas.cscal + dtype = complex64 +except AttributeError: + class TestCscal: + pass + + +class TestZscal(BaseScal): + blas_func = fblas.zscal + dtype = complex128 + + +################################################## +# Test blas ?copy + +class BaseCopy(object): + ''' Mixin class for copy testing ''' + + def test_simple(self): + x = arange(3., dtype=self.dtype) + y = zeros(shape(x), x.dtype) + y = self.blas_func(x, y) + assert_array_equal(x, y) + + def test_x_stride(self): + x = arange(6., dtype=self.dtype) + y = zeros(3, x.dtype) + y = self.blas_func(x, y, n=3, incx=2) + assert_array_equal(x[::2], y) + + def test_y_stride(self): + x = arange(3., dtype=self.dtype) + y = zeros(6, x.dtype) + y = self.blas_func(x, y, n=3, incy=2) + assert_array_equal(x, y[::2]) + + def test_x_and_y_stride(self): + x = arange(12., dtype=self.dtype) + y = zeros(6, x.dtype) + y = self.blas_func(x, y, n=3, incx=4, incy=2) + assert_array_equal(x[::4], y[::2]) + + def test_x_bad_size(self): + x = arange(12., dtype=self.dtype) + y = zeros(6, x.dtype) + with pytest.raises(Exception, match='failed for 1st keyword'): + self.blas_func(x, y, n=4, incx=5) + + def test_y_bad_size(self): + x = arange(12., dtype=self.dtype) + y = zeros(6, x.dtype) + with pytest.raises(Exception, match='failed for 1st keyword'): + self.blas_func(x, y, n=3, incy=5) + + # def test_y_bad_type(self): + ## Hmmm. Should this work? What should be the output. + # x = arange(3.,dtype=self.dtype) + # y = zeros(shape(x)) + # self.blas_func(x,y) + # assert_array_equal(x,y) + + +try: + class TestScopy(BaseCopy): + blas_func = fblas.scopy + dtype = float32 +except AttributeError: + class TestScopy: + pass + + +class TestDcopy(BaseCopy): + blas_func = fblas.dcopy + dtype = float64 + + +try: + class TestCcopy(BaseCopy): + blas_func = fblas.ccopy + dtype = complex64 +except AttributeError: + class TestCcopy: + pass + + +class TestZcopy(BaseCopy): + blas_func = fblas.zcopy + dtype = complex128 + + +################################################## +# Test blas ?swap + +class BaseSwap(object): + ''' Mixin class for swap tests ''' + + def test_simple(self): + x = arange(3., dtype=self.dtype) + y = zeros(shape(x), x.dtype) + desired_x = y.copy() + desired_y = x.copy() + x, y = self.blas_func(x, y) + assert_array_equal(desired_x, x) + assert_array_equal(desired_y, y) + + def test_x_stride(self): + x = arange(6., dtype=self.dtype) + y = zeros(3, x.dtype) + desired_x = y.copy() + desired_y = x.copy()[::2] + x, y = self.blas_func(x, y, n=3, incx=2) + assert_array_equal(desired_x, x[::2]) + assert_array_equal(desired_y, y) + + def test_y_stride(self): + x = arange(3., dtype=self.dtype) + y = zeros(6, x.dtype) + desired_x = y.copy()[::2] + desired_y = x.copy() + x, y = self.blas_func(x, y, n=3, incy=2) + assert_array_equal(desired_x, x) + assert_array_equal(desired_y, y[::2]) + + def test_x_and_y_stride(self): + x = arange(12., dtype=self.dtype) + y = zeros(6, x.dtype) + desired_x = y.copy()[::2] + desired_y = x.copy()[::4] + x, y = self.blas_func(x, y, n=3, incx=4, incy=2) + assert_array_equal(desired_x, x[::4]) + assert_array_equal(desired_y, y[::2]) + + def test_x_bad_size(self): + x = arange(12., dtype=self.dtype) + y = zeros(6, x.dtype) + with pytest.raises(Exception, match='failed for 1st keyword'): + self.blas_func(x, y, n=4, incx=5) + + def test_y_bad_size(self): + x = arange(12., dtype=self.dtype) + y = zeros(6, x.dtype) + with pytest.raises(Exception, match='failed for 1st keyword'): + self.blas_func(x, y, n=3, incy=5) + + +try: + class TestSswap(BaseSwap): + blas_func = fblas.sswap + dtype = float32 +except AttributeError: + class TestSswap: + pass + + +class TestDswap(BaseSwap): + blas_func = fblas.dswap + dtype = float64 + + +try: + class TestCswap(BaseSwap): + blas_func = fblas.cswap + dtype = complex64 +except AttributeError: + class TestCswap: + pass + + +class TestZswap(BaseSwap): + blas_func = fblas.zswap + dtype = complex128 + +################################################## +# Test blas ?gemv +# This will be a mess to test all cases. + + +class BaseGemv(object): + ''' Mixin class for gemv tests ''' + + def get_data(self, x_stride=1, y_stride=1): + mult = array(1, dtype=self.dtype) + if self.dtype in [complex64, complex128]: + mult = array(1+1j, dtype=self.dtype) + from numpy.random import normal, seed + seed(1234) + alpha = array(1., dtype=self.dtype) * mult + beta = array(1., dtype=self.dtype) * mult + a = normal(0., 1., (3, 3)).astype(self.dtype) * mult + x = arange(shape(a)[0]*x_stride, dtype=self.dtype) * mult + y = arange(shape(a)[1]*y_stride, dtype=self.dtype) * mult + return alpha, beta, a, x, y + + def test_simple(self): + alpha, beta, a, x, y = self.get_data() + desired_y = alpha*matrixmultiply(a, x)+beta*y + y = self.blas_func(alpha, a, x, beta, y) + assert_array_almost_equal(desired_y, y) + + def test_default_beta_y(self): + alpha, beta, a, x, y = self.get_data() + desired_y = matrixmultiply(a, x) + y = self.blas_func(1, a, x) + assert_array_almost_equal(desired_y, y) + + def test_simple_transpose(self): + alpha, beta, a, x, y = self.get_data() + desired_y = alpha*matrixmultiply(transpose(a), x)+beta*y + y = self.blas_func(alpha, a, x, beta, y, trans=1) + assert_array_almost_equal(desired_y, y) + + def test_simple_transpose_conj(self): + alpha, beta, a, x, y = self.get_data() + desired_y = alpha*matrixmultiply(transpose(conjugate(a)), x)+beta*y + y = self.blas_func(alpha, a, x, beta, y, trans=2) + assert_array_almost_equal(desired_y, y) + + def test_x_stride(self): + alpha, beta, a, x, y = self.get_data(x_stride=2) + desired_y = alpha*matrixmultiply(a, x[::2])+beta*y + y = self.blas_func(alpha, a, x, beta, y, incx=2) + assert_array_almost_equal(desired_y, y) + + def test_x_stride_transpose(self): + alpha, beta, a, x, y = self.get_data(x_stride=2) + desired_y = alpha*matrixmultiply(transpose(a), x[::2])+beta*y + y = self.blas_func(alpha, a, x, beta, y, trans=1, incx=2) + assert_array_almost_equal(desired_y, y) + + def test_x_stride_assert(self): + # What is the use of this test? + alpha, beta, a, x, y = self.get_data(x_stride=2) + with pytest.raises(Exception, match='failed for 3rd argument'): + y = self.blas_func(1, a, x, 1, y, trans=0, incx=3) + with pytest.raises(Exception, match='failed for 3rd argument'): + y = self.blas_func(1, a, x, 1, y, trans=1, incx=3) + + def test_y_stride(self): + alpha, beta, a, x, y = self.get_data(y_stride=2) + desired_y = y.copy() + desired_y[::2] = alpha*matrixmultiply(a, x)+beta*y[::2] + y = self.blas_func(alpha, a, x, beta, y, incy=2) + assert_array_almost_equal(desired_y, y) + + def test_y_stride_transpose(self): + alpha, beta, a, x, y = self.get_data(y_stride=2) + desired_y = y.copy() + desired_y[::2] = alpha*matrixmultiply(transpose(a), x)+beta*y[::2] + y = self.blas_func(alpha, a, x, beta, y, trans=1, incy=2) + assert_array_almost_equal(desired_y, y) + + def test_y_stride_assert(self): + # What is the use of this test? + alpha, beta, a, x, y = self.get_data(y_stride=2) + with pytest.raises(Exception, match='failed for 2nd keyword'): + y = self.blas_func(1, a, x, 1, y, trans=0, incy=3) + with pytest.raises(Exception, match='failed for 2nd keyword'): + y = self.blas_func(1, a, x, 1, y, trans=1, incy=3) + + +try: + class TestSgemv(BaseGemv): + blas_func = fblas.sgemv + dtype = float32 + + def test_sgemv_on_osx(self): + from itertools import product + import sys + import numpy as np + + if sys.platform != 'darwin': + return + + def aligned_array(shape, align, dtype, order='C'): + # Make array shape `shape` with aligned at `align` bytes + d = dtype() + # Make array of correct size with `align` extra bytes + N = np.prod(shape) + tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8) + address = tmp.__array_interface__["data"][0] + # Find offset into array giving desired alignment + for offset in range(align): + if (address + offset) % align == 0: + break + tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype) + return tmp.reshape(shape, order=order) + + def as_aligned(arr, align, dtype, order='C'): + # Copy `arr` into an aligned array with same shape + aligned = aligned_array(arr.shape, align, dtype, order) + aligned[:] = arr[:] + return aligned + + def assert_dot_close(A, X, desired): + assert_allclose(self.blas_func(1.0, A, X), desired, + rtol=1e-5, atol=1e-7) + + testdata = product((15, 32), (10000,), (200, 89), ('C', 'F')) + for align, m, n, a_order in testdata: + A_d = np.random.rand(m, n) + X_d = np.random.rand(n) + desired = np.dot(A_d, X_d) + # Calculation with aligned single precision + A_f = as_aligned(A_d, align, np.float32, order=a_order) + X_f = as_aligned(X_d, align, np.float32, order=a_order) + assert_dot_close(A_f, X_f, desired) + +except AttributeError: + class TestSgemv: + pass + + +class TestDgemv(BaseGemv): + blas_func = fblas.dgemv + dtype = float64 + + +try: + class TestCgemv(BaseGemv): + blas_func = fblas.cgemv + dtype = complex64 +except AttributeError: + class TestCgemv: + pass + + +class TestZgemv(BaseGemv): + blas_func = fblas.zgemv + dtype = complex128 + + +""" +################################################## +### Test blas ?ger +### This will be a mess to test all cases. + +class BaseGer(object): + def get_data(self,x_stride=1,y_stride=1): + from numpy.random import normal, seed + seed(1234) + alpha = array(1., dtype = self.dtype) + a = normal(0.,1.,(3,3)).astype(self.dtype) + x = arange(shape(a)[0]*x_stride,dtype=self.dtype) + y = arange(shape(a)[1]*y_stride,dtype=self.dtype) + return alpha,a,x,y + def test_simple(self): + alpha,a,x,y = self.get_data() + # tranpose takes care of Fortran vs. C(and Python) memory layout + desired_a = alpha*transpose(x[:,newaxis]*y) + a + self.blas_func(x,y,a) + assert_array_almost_equal(desired_a,a) + def test_x_stride(self): + alpha,a,x,y = self.get_data(x_stride=2) + desired_a = alpha*transpose(x[::2,newaxis]*y) + a + self.blas_func(x,y,a,incx=2) + assert_array_almost_equal(desired_a,a) + def test_x_stride_assert(self): + alpha,a,x,y = self.get_data(x_stride=2) + with pytest.raises(ValueError, match='foo'): + self.blas_func(x,y,a,incx=3) + def test_y_stride(self): + alpha,a,x,y = self.get_data(y_stride=2) + desired_a = alpha*transpose(x[:,newaxis]*y[::2]) + a + self.blas_func(x,y,a,incy=2) + assert_array_almost_equal(desired_a,a) + + def test_y_stride_assert(self): + alpha,a,x,y = self.get_data(y_stride=2) + with pytest.raises(ValueError, match='foo'): + self.blas_func(a,x,y,incy=3) + +class TestSger(BaseGer): + blas_func = fblas.sger + dtype = float32 +class TestDger(BaseGer): + blas_func = fblas.dger + dtype = float64 +""" +################################################## +# Test blas ?gerc +# This will be a mess to test all cases. + +""" +class BaseGerComplex(BaseGer): + def get_data(self,x_stride=1,y_stride=1): + from numpy.random import normal, seed + seed(1234) + alpha = array(1+1j, dtype = self.dtype) + a = normal(0.,1.,(3,3)).astype(self.dtype) + a = a + normal(0.,1.,(3,3)) * array(1j, dtype = self.dtype) + x = normal(0.,1.,shape(a)[0]*x_stride).astype(self.dtype) + x = x + x * array(1j, dtype = self.dtype) + y = normal(0.,1.,shape(a)[1]*y_stride).astype(self.dtype) + y = y + y * array(1j, dtype = self.dtype) + return alpha,a,x,y + def test_simple(self): + alpha,a,x,y = self.get_data() + # tranpose takes care of Fortran vs. C(and Python) memory layout + a = a * array(0.,dtype = self.dtype) + #desired_a = alpha*transpose(x[:,newaxis]*self.transform(y)) + a + desired_a = alpha*transpose(x[:,newaxis]*y) + a + #self.blas_func(x,y,a,alpha = alpha) + fblas.cgeru(x,y,a,alpha = alpha) + assert_array_almost_equal(desired_a,a) + + #def test_x_stride(self): + # alpha,a,x,y = self.get_data(x_stride=2) + # desired_a = alpha*transpose(x[::2,newaxis]*self.transform(y)) + a + # self.blas_func(x,y,a,incx=2) + # assert_array_almost_equal(desired_a,a) + #def test_y_stride(self): + # alpha,a,x,y = self.get_data(y_stride=2) + # desired_a = alpha*transpose(x[:,newaxis]*self.transform(y[::2])) + a + # self.blas_func(x,y,a,incy=2) + # assert_array_almost_equal(desired_a,a) + +class TestCgeru(BaseGerComplex): + blas_func = fblas.cgeru + dtype = complex64 + def transform(self,x): + return x +class TestZgeru(BaseGerComplex): + blas_func = fblas.zgeru + dtype = complex128 + def transform(self,x): + return x + +class TestCgerc(BaseGerComplex): + blas_func = fblas.cgerc + dtype = complex64 + def transform(self,x): + return conjugate(x) + +class TestZgerc(BaseGerComplex): + blas_func = fblas.zgerc + dtype = complex128 + def transform(self,x): + return conjugate(x) +""" diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_fblas.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_fblas.pyc new file mode 100644 index 0000000..bc5c599 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_fblas.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_interpolative.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_interpolative.py new file mode 100644 index 0000000..4deef44 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_interpolative.py @@ -0,0 +1,278 @@ +#****************************************************************************** +# Copyright (C) 2013 Kenneth L. Ho +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. Redistributions in binary +# form must reproduce the above copyright notice, this list of conditions and +# the following disclaimer in the documentation and/or other materials +# provided with the distribution. +# +# None of the names of the copyright holders may be used to endorse or +# promote products derived from this software without specific prior written +# permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +#****************************************************************************** + +import scipy.linalg.interpolative as pymatrixid +import numpy as np +from scipy.linalg import hilbert, svdvals, norm +from scipy.sparse.linalg import aslinearoperator +import time + +from numpy.testing import assert_, assert_allclose +from pytest import raises as assert_raises + + +def _debug_print(s): + if 0: + print(s) + + +class TestInterpolativeDecomposition(object): + def test_id(self): + for dtype in [np.float64, np.complex128]: + self.check_id(dtype) + + def check_id(self, dtype): + # Test ID routines on a Hilbert matrix. + + # set parameters + n = 300 + eps = 1e-12 + + # construct Hilbert matrix + A = hilbert(n).astype(dtype) + if np.issubdtype(dtype, np.complexfloating): + A = A * (1 + 1j) + L = aslinearoperator(A) + + # find rank + S = np.linalg.svd(A, compute_uv=False) + try: + rank = np.nonzero(S < eps)[0][0] + except IndexError: + rank = n + + # print input summary + _debug_print("Hilbert matrix dimension: %8i" % n) + _debug_print("Working precision: %8.2e" % eps) + _debug_print("Rank to working precision: %8i" % rank) + + # set print format + fmt = "%8.2e (s) / %5s" + + # test real ID routines + _debug_print("-----------------------------------------") + _debug_print("Real ID routines") + _debug_print("-----------------------------------------") + + # fixed precision + _debug_print("Calling iddp_id / idzp_id ...",) + t0 = time.time() + k, idx, proj = pymatrixid.interp_decomp(A, eps, rand=False) + t = time.time() - t0 + B = pymatrixid.reconstruct_matrix_from_id(A[:, idx[:k]], idx, proj) + _debug_print(fmt % (t, np.allclose(A, B, eps))) + assert_(np.allclose(A, B, eps)) + + _debug_print("Calling iddp_aid / idzp_aid ...",) + t0 = time.time() + k, idx, proj = pymatrixid.interp_decomp(A, eps) + t = time.time() - t0 + B = pymatrixid.reconstruct_matrix_from_id(A[:, idx[:k]], idx, proj) + _debug_print(fmt % (t, np.allclose(A, B, eps))) + assert_(np.allclose(A, B, eps)) + + _debug_print("Calling iddp_rid / idzp_rid ...",) + t0 = time.time() + k, idx, proj = pymatrixid.interp_decomp(L, eps) + t = time.time() - t0 + B = pymatrixid.reconstruct_matrix_from_id(A[:, idx[:k]], idx, proj) + _debug_print(fmt % (t, np.allclose(A, B, eps))) + assert_(np.allclose(A, B, eps)) + + # fixed rank + k = rank + + _debug_print("Calling iddr_id / idzr_id ...",) + t0 = time.time() + idx, proj = pymatrixid.interp_decomp(A, k, rand=False) + t = time.time() - t0 + B = pymatrixid.reconstruct_matrix_from_id(A[:, idx[:k]], idx, proj) + _debug_print(fmt % (t, np.allclose(A, B, eps))) + assert_(np.allclose(A, B, eps)) + + _debug_print("Calling iddr_aid / idzr_aid ...",) + t0 = time.time() + idx, proj = pymatrixid.interp_decomp(A, k) + t = time.time() - t0 + B = pymatrixid.reconstruct_matrix_from_id(A[:, idx[:k]], idx, proj) + _debug_print(fmt % (t, np.allclose(A, B, eps))) + assert_(np.allclose(A, B, eps)) + + _debug_print("Calling iddr_rid / idzr_rid ...",) + t0 = time.time() + idx, proj = pymatrixid.interp_decomp(L, k) + t = time.time() - t0 + B = pymatrixid.reconstruct_matrix_from_id(A[:, idx[:k]], idx, proj) + _debug_print(fmt % (t, np.allclose(A, B, eps))) + assert_(np.allclose(A, B, eps)) + + # check skeleton and interpolation matrices + idx, proj = pymatrixid.interp_decomp(A, k, rand=False) + P = pymatrixid.reconstruct_interp_matrix(idx, proj) + B = pymatrixid.reconstruct_skel_matrix(A, k, idx) + assert_(np.allclose(B, A[:,idx[:k]], eps)) + assert_(np.allclose(B.dot(P), A, eps)) + + # test SVD routines + _debug_print("-----------------------------------------") + _debug_print("SVD routines") + _debug_print("-----------------------------------------") + + # fixed precision + _debug_print("Calling iddp_svd / idzp_svd ...",) + t0 = time.time() + U, S, V = pymatrixid.svd(A, eps, rand=False) + t = time.time() - t0 + B = np.dot(U, np.dot(np.diag(S), V.T.conj())) + _debug_print(fmt % (t, np.allclose(A, B, eps))) + assert_(np.allclose(A, B, eps)) + + _debug_print("Calling iddp_asvd / idzp_asvd...",) + t0 = time.time() + U, S, V = pymatrixid.svd(A, eps) + t = time.time() - t0 + B = np.dot(U, np.dot(np.diag(S), V.T.conj())) + _debug_print(fmt % (t, np.allclose(A, B, eps))) + assert_(np.allclose(A, B, eps)) + + _debug_print("Calling iddp_rsvd / idzp_rsvd...",) + t0 = time.time() + U, S, V = pymatrixid.svd(L, eps) + t = time.time() - t0 + B = np.dot(U, np.dot(np.diag(S), V.T.conj())) + _debug_print(fmt % (t, np.allclose(A, B, eps))) + assert_(np.allclose(A, B, eps)) + + # fixed rank + k = rank + + _debug_print("Calling iddr_svd / idzr_svd ...",) + t0 = time.time() + U, S, V = pymatrixid.svd(A, k, rand=False) + t = time.time() - t0 + B = np.dot(U, np.dot(np.diag(S), V.T.conj())) + _debug_print(fmt % (t, np.allclose(A, B, eps))) + assert_(np.allclose(A, B, eps)) + + _debug_print("Calling iddr_asvd / idzr_asvd ...",) + t0 = time.time() + U, S, V = pymatrixid.svd(A, k) + t = time.time() - t0 + B = np.dot(U, np.dot(np.diag(S), V.T.conj())) + _debug_print(fmt % (t, np.allclose(A, B, eps))) + assert_(np.allclose(A, B, eps)) + + _debug_print("Calling iddr_rsvd / idzr_rsvd ...",) + t0 = time.time() + U, S, V = pymatrixid.svd(L, k) + t = time.time() - t0 + B = np.dot(U, np.dot(np.diag(S), V.T.conj())) + _debug_print(fmt % (t, np.allclose(A, B, eps))) + assert_(np.allclose(A, B, eps)) + + # ID to SVD + idx, proj = pymatrixid.interp_decomp(A, k, rand=False) + Up, Sp, Vp = pymatrixid.id_to_svd(A[:, idx[:k]], idx, proj) + B = U.dot(np.diag(S).dot(V.T.conj())) + assert_(np.allclose(A, B, eps)) + + # Norm estimates + s = svdvals(A) + norm_2_est = pymatrixid.estimate_spectral_norm(A) + assert_(np.allclose(norm_2_est, s[0], 1e-6)) + + B = A.copy() + B[:,0] *= 1.2 + s = svdvals(A - B) + norm_2_est = pymatrixid.estimate_spectral_norm_diff(A, B) + assert_(np.allclose(norm_2_est, s[0], 1e-6)) + + # Rank estimates + B = np.array([[1, 1, 0], [0, 0, 1], [0, 0, 1]], dtype=dtype) + for M in [A, B]: + ML = aslinearoperator(M) + + rank_tol = 1e-9 + rank_np = np.linalg.matrix_rank(M, norm(M, 2)*rank_tol) + rank_est = pymatrixid.estimate_rank(M, rank_tol) + rank_est_2 = pymatrixid.estimate_rank(ML, rank_tol) + + assert_(rank_est >= rank_np) + assert_(rank_est <= rank_np + 10) + + assert_(rank_est_2 >= rank_np - 4) + assert_(rank_est_2 <= rank_np + 4) + + def test_rand(self): + pymatrixid.seed('default') + assert_(np.allclose(pymatrixid.rand(2), [0.8932059, 0.64500803], 1e-4)) + + pymatrixid.seed(1234) + x1 = pymatrixid.rand(2) + assert_(np.allclose(x1, [0.7513823, 0.06861718], 1e-4)) + + np.random.seed(1234) + pymatrixid.seed() + x2 = pymatrixid.rand(2) + + np.random.seed(1234) + pymatrixid.seed(np.random.rand(55)) + x3 = pymatrixid.rand(2) + + assert_allclose(x1, x2) + assert_allclose(x1, x3) + + def test_badcall(self): + A = hilbert(5).astype(np.float32) + assert_raises(ValueError, pymatrixid.interp_decomp, A, 1e-6, rand=False) + + def test_rank_too_large(self): + # svd(array, k) should not segfault + a = np.ones((4, 3)) + with assert_raises(ValueError): + pymatrixid.svd(a, 4) + + def test_full_rank(self): + eps = 1.0e-12 + + # fixed precision + A = np.random.rand(16, 8) + k, idx, proj = pymatrixid.interp_decomp(A, eps) + assert_(k == A.shape[1]) + + P = pymatrixid.reconstruct_interp_matrix(idx, proj) + B = pymatrixid.reconstruct_skel_matrix(A, k, idx) + assert_allclose(A, B.dot(P)) + + # fixed rank + idx, proj = pymatrixid.interp_decomp(A, k) + + P = pymatrixid.reconstruct_interp_matrix(idx, proj) + B = pymatrixid.reconstruct_skel_matrix(A, k, idx) + assert_allclose(A, B.dot(P)) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_interpolative.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_interpolative.pyc new file mode 100644 index 0000000..bc8ffb0 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_interpolative.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_lapack.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_lapack.py new file mode 100644 index 0000000..a4a8825 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_lapack.py @@ -0,0 +1,1245 @@ +# +# Created by: Pearu Peterson, September 2002 +# + +from __future__ import division, print_function, absolute_import + +import sys +import subprocess +import time +from functools import reduce + +from numpy.testing import (assert_equal, assert_array_almost_equal, assert_, + assert_allclose, assert_almost_equal, + assert_array_equal) +import pytest +from pytest import raises as assert_raises + +import numpy as np +from numpy import (eye, ones, zeros, zeros_like, triu, tril, tril_indices, + triu_indices) + +from numpy.random import rand, seed + +from scipy.linalg import _flapack as flapack +from scipy.linalg import inv, svd, cholesky, solve +from scipy.linalg.lapack import _compute_lwork + +try: + from scipy.linalg import _clapack as clapack +except ImportError: + clapack = None +from scipy.linalg.lapack import get_lapack_funcs +from scipy.linalg.blas import get_blas_funcs + +REAL_DTYPES = [np.float32, np.float64] +COMPLEX_DTYPES = [np.complex64, np.complex128] +DTYPES = REAL_DTYPES + COMPLEX_DTYPES + + +class TestFlapackSimple(object): + + def test_gebal(self): + a = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] + a1 = [[1, 0, 0, 3e-4], + [4, 0, 0, 2e-3], + [7, 1, 0, 0], + [0, 1, 0, 0]] + for p in 'sdzc': + f = getattr(flapack, p+'gebal', None) + if f is None: + continue + ba, lo, hi, pivscale, info = f(a) + assert_(not info, repr(info)) + assert_array_almost_equal(ba, a) + assert_equal((lo, hi), (0, len(a[0])-1)) + assert_array_almost_equal(pivscale, np.ones(len(a))) + + ba, lo, hi, pivscale, info = f(a1, permute=1, scale=1) + assert_(not info, repr(info)) + # print(a1) + # print(ba, lo, hi, pivscale) + + def test_gehrd(self): + a = [[-149, -50, -154], + [537, 180, 546], + [-27, -9, -25]] + for p in 'd': + f = getattr(flapack, p+'gehrd', None) + if f is None: + continue + ht, tau, info = f(a) + assert_(not info, repr(info)) + + def test_trsyl(self): + a = np.array([[1, 2], [0, 4]]) + b = np.array([[5, 6], [0, 8]]) + c = np.array([[9, 10], [11, 12]]) + trans = 'T' + + # Test single and double implementations, including most + # of the options + for dtype in 'fdFD': + a1, b1, c1 = a.astype(dtype), b.astype(dtype), c.astype(dtype) + trsyl, = get_lapack_funcs(('trsyl',), (a1,)) + if dtype.isupper(): # is complex dtype + a1[0] += 1j + trans = 'C' + + x, scale, info = trsyl(a1, b1, c1) + assert_array_almost_equal(np.dot(a1, x) + np.dot(x, b1), + scale * c1) + + x, scale, info = trsyl(a1, b1, c1, trana=trans, tranb=trans) + assert_array_almost_equal( + np.dot(a1.conjugate().T, x) + np.dot(x, b1.conjugate().T), + scale * c1, decimal=4) + + x, scale, info = trsyl(a1, b1, c1, isgn=-1) + assert_array_almost_equal(np.dot(a1, x) - np.dot(x, b1), + scale * c1, decimal=4) + + def test_lange(self): + a = np.array([ + [-149, -50, -154], + [537, 180, 546], + [-27, -9, -25]]) + + for dtype in 'fdFD': + for norm in 'Mm1OoIiFfEe': + a1 = a.astype(dtype) + if dtype.isupper(): + # is complex dtype + a1[0, 0] += 1j + + lange, = get_lapack_funcs(('lange',), (a1,)) + value = lange(norm, a1) + + if norm in 'FfEe': + if dtype in 'Ff': + decimal = 3 + else: + decimal = 7 + ref = np.sqrt(np.sum(np.square(np.abs(a1)))) + assert_almost_equal(value, ref, decimal) + else: + if norm in 'Mm': + ref = np.max(np.abs(a1)) + elif norm in '1Oo': + ref = np.max(np.sum(np.abs(a1), axis=0)) + elif norm in 'Ii': + ref = np.max(np.sum(np.abs(a1), axis=1)) + + assert_equal(value, ref) + + +class TestLapack(object): + + def test_flapack(self): + if hasattr(flapack, 'empty_module'): + # flapack module is empty + pass + + def test_clapack(self): + if hasattr(clapack, 'empty_module'): + # clapack module is empty + pass + + +class TestLeastSquaresSolvers(object): + + def test_gels(self): + seed(1234) + # Test fat/tall matrix argument handling - gh-issue #8329 + for ind, dtype in enumerate(DTYPES): + m = 10 + n = 20 + nrhs = 1 + a1 = rand(m, n).astype(dtype) + b1 = rand(n).astype(dtype) + gls, glslw = get_lapack_funcs(('gels', 'gels_lwork'), dtype=dtype) + + # Request of sizes + lwork = _compute_lwork(glslw, m, n, nrhs) + _, _, info = gls(a1, b1, lwork=lwork) + assert_(info >= 0) + _, _, info = gls(a1, b1, trans='TTCC'[ind], lwork=lwork) + assert_(info >= 0) + + for dtype in REAL_DTYPES: + a1 = np.array([[1.0, 2.0], + [4.0, 5.0], + [7.0, 8.0]], dtype=dtype) + b1 = np.array([16.0, 17.0, 20.0], dtype=dtype) + gels, gels_lwork, geqrf = get_lapack_funcs( + ('gels', 'gels_lwork', 'geqrf'), (a1, b1)) + + m, n = a1.shape + if len(b1.shape) == 2: + nrhs = b1.shape[1] + else: + nrhs = 1 + + # Request of sizes + lwork = _compute_lwork(gels_lwork, m, n, nrhs) + + lqr, x, info = gels(a1, b1, lwork=lwork) + assert_allclose(x[:-1], np.array([-14.333333333333323, + 14.999999999999991], + dtype=dtype), + rtol=25*np.finfo(dtype).eps) + lqr_truth, _, _, _ = geqrf(a1) + assert_array_equal(lqr, lqr_truth) + + for dtype in COMPLEX_DTYPES: + a1 = np.array([[1.0+4.0j, 2.0], + [4.0+0.5j, 5.0-3.0j], + [7.0-2.0j, 8.0+0.7j]], dtype=dtype) + b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype) + gels, gels_lwork, geqrf = get_lapack_funcs( + ('gels', 'gels_lwork', 'geqrf'), (a1, b1)) + + m, n = a1.shape + if len(b1.shape) == 2: + nrhs = b1.shape[1] + else: + nrhs = 1 + + # Request of sizes + lwork = _compute_lwork(gels_lwork, m, n, nrhs) + + lqr, x, info = gels(a1, b1, lwork=lwork) + assert_allclose(x[:-1], + np.array([1.161753632288328-1.901075709391912j, + 1.735882340522193+1.521240901196909j], + dtype=dtype), rtol=25*np.finfo(dtype).eps) + lqr_truth, _, _, _ = geqrf(a1) + assert_array_equal(lqr, lqr_truth) + + def test_gelsd(self): + for dtype in REAL_DTYPES: + a1 = np.array([[1.0, 2.0], + [4.0, 5.0], + [7.0, 8.0]], dtype=dtype) + b1 = np.array([16.0, 17.0, 20.0], dtype=dtype) + gelsd, gelsd_lwork = get_lapack_funcs(('gelsd', 'gelsd_lwork'), + (a1, b1)) + + m, n = a1.shape + if len(b1.shape) == 2: + nrhs = b1.shape[1] + else: + nrhs = 1 + + # Request of sizes + work, iwork, info = gelsd_lwork(m, n, nrhs, -1) + lwork = int(np.real(work)) + iwork_size = iwork + + x, s, rank, info = gelsd(a1, b1, lwork, iwork_size, + -1, False, False) + assert_allclose(x[:-1], np.array([-14.333333333333323, + 14.999999999999991], dtype=dtype), + rtol=25*np.finfo(dtype).eps) + assert_allclose(s, np.array([12.596017180511966, + 0.583396253199685], dtype=dtype), + rtol=25*np.finfo(dtype).eps) + + for dtype in COMPLEX_DTYPES: + a1 = np.array([[1.0+4.0j, 2.0], + [4.0+0.5j, 5.0-3.0j], + [7.0-2.0j, 8.0+0.7j]], dtype=dtype) + b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype) + gelsd, gelsd_lwork = get_lapack_funcs(('gelsd', 'gelsd_lwork'), + (a1, b1)) + + m, n = a1.shape + if len(b1.shape) == 2: + nrhs = b1.shape[1] + else: + nrhs = 1 + + # Request of sizes + work, rwork, iwork, info = gelsd_lwork(m, n, nrhs, -1) + lwork = int(np.real(work)) + rwork_size = int(rwork) + iwork_size = iwork + + x, s, rank, info = gelsd(a1, b1, lwork, rwork_size, iwork_size, + -1, False, False) + assert_allclose(x[:-1], + np.array([1.161753632288328-1.901075709391912j, + 1.735882340522193+1.521240901196909j], + dtype=dtype), rtol=25*np.finfo(dtype).eps) + assert_allclose(s, + np.array([13.035514762572043, 4.337666985231382], + dtype=dtype), rtol=25*np.finfo(dtype).eps) + + def test_gelss(self): + + for dtype in REAL_DTYPES: + a1 = np.array([[1.0, 2.0], + [4.0, 5.0], + [7.0, 8.0]], dtype=dtype) + b1 = np.array([16.0, 17.0, 20.0], dtype=dtype) + gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'), + (a1, b1)) + + m, n = a1.shape + if len(b1.shape) == 2: + nrhs = b1.shape[1] + else: + nrhs = 1 + + # Request of sizes + work, info = gelss_lwork(m, n, nrhs, -1) + lwork = int(np.real(work)) + + v, x, s, rank, work, info = gelss(a1, b1, -1, lwork, False, False) + assert_allclose(x[:-1], np.array([-14.333333333333323, + 14.999999999999991], dtype=dtype), + rtol=25*np.finfo(dtype).eps) + assert_allclose(s, np.array([12.596017180511966, + 0.583396253199685], dtype=dtype), + rtol=25*np.finfo(dtype).eps) + + for dtype in COMPLEX_DTYPES: + a1 = np.array([[1.0+4.0j, 2.0], + [4.0+0.5j, 5.0-3.0j], + [7.0-2.0j, 8.0+0.7j]], dtype=dtype) + b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype) + gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'), + (a1, b1)) + + m, n = a1.shape + if len(b1.shape) == 2: + nrhs = b1.shape[1] + else: + nrhs = 1 + + # Request of sizes + work, info = gelss_lwork(m, n, nrhs, -1) + lwork = int(np.real(work)) + + v, x, s, rank, work, info = gelss(a1, b1, -1, lwork, False, False) + assert_allclose(x[:-1], + np.array([1.161753632288328-1.901075709391912j, + 1.735882340522193+1.521240901196909j], + dtype=dtype), + rtol=25*np.finfo(dtype).eps) + assert_allclose(s, np.array([13.035514762572043, + 4.337666985231382], dtype=dtype), + rtol=25*np.finfo(dtype).eps) + + def test_gelsy(self): + + for dtype in REAL_DTYPES: + a1 = np.array([[1.0, 2.0], + [4.0, 5.0], + [7.0, 8.0]], dtype=dtype) + b1 = np.array([16.0, 17.0, 20.0], dtype=dtype) + gelsy, gelsy_lwork = get_lapack_funcs(('gelsy', 'gelss_lwork'), + (a1, b1)) + + m, n = a1.shape + if len(b1.shape) == 2: + nrhs = b1.shape[1] + else: + nrhs = 1 + + # Request of sizes + work, info = gelsy_lwork(m, n, nrhs, 10*np.finfo(dtype).eps) + lwork = int(np.real(work)) + + jptv = np.zeros((a1.shape[1], 1), dtype=np.int32) + v, x, j, rank, info = gelsy(a1, b1, jptv, np.finfo(dtype).eps, + lwork, False, False) + assert_allclose(x[:-1], np.array([-14.333333333333323, + 14.999999999999991], dtype=dtype), + rtol=25*np.finfo(dtype).eps) + + for dtype in COMPLEX_DTYPES: + a1 = np.array([[1.0+4.0j, 2.0], + [4.0+0.5j, 5.0-3.0j], + [7.0-2.0j, 8.0+0.7j]], dtype=dtype) + b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype) + gelsy, gelsy_lwork = get_lapack_funcs(('gelsy', 'gelss_lwork'), + (a1, b1)) + + m, n = a1.shape + if len(b1.shape) == 2: + nrhs = b1.shape[1] + else: + nrhs = 1 + + # Request of sizes + work, info = gelsy_lwork(m, n, nrhs, 10*np.finfo(dtype).eps) + lwork = int(np.real(work)) + + jptv = np.zeros((a1.shape[1], 1), dtype=np.int32) + v, x, j, rank, info = gelsy(a1, b1, jptv, np.finfo(dtype).eps, + lwork, False, False) + assert_allclose(x[:-1], + np.array([1.161753632288328-1.901075709391912j, + 1.735882340522193+1.521240901196909j], + dtype=dtype), + rtol=25*np.finfo(dtype).eps) + + +class TestRegression(object): + + def test_ticket_1645(self): + # Check that RQ routines have correct lwork + for dtype in DTYPES: + a = np.zeros((300, 2), dtype=dtype) + + gerqf, = get_lapack_funcs(['gerqf'], [a]) + assert_raises(Exception, gerqf, a, lwork=2) + rq, tau, work, info = gerqf(a) + + if dtype in REAL_DTYPES: + orgrq, = get_lapack_funcs(['orgrq'], [a]) + assert_raises(Exception, orgrq, rq[-2:], tau, lwork=1) + orgrq(rq[-2:], tau, lwork=2) + elif dtype in COMPLEX_DTYPES: + ungrq, = get_lapack_funcs(['ungrq'], [a]) + assert_raises(Exception, ungrq, rq[-2:], tau, lwork=1) + ungrq(rq[-2:], tau, lwork=2) + + +class TestDpotr(object): + def test_gh_2691(self): + # 'lower' argument of dportf/dpotri + for lower in [True, False]: + for clean in [True, False]: + np.random.seed(42) + x = np.random.normal(size=(3, 3)) + a = x.dot(x.T) + + dpotrf, dpotri = get_lapack_funcs(("potrf", "potri"), (a, )) + + c, info = dpotrf(a, lower, clean=clean) + dpt = dpotri(c, lower)[0] + + if lower: + assert_allclose(np.tril(dpt), np.tril(inv(a))) + else: + assert_allclose(np.triu(dpt), np.triu(inv(a))) + + +class TestDlasd4(object): + def test_sing_val_update(self): + + sigmas = np.array([4., 3., 2., 0]) + m_vec = np.array([3.12, 5.7, -4.8, -2.2]) + + M = np.hstack((np.vstack((np.diag(sigmas[0:-1]), + np.zeros((1, len(m_vec) - 1)))), m_vec[:, np.newaxis])) + SM = svd(M, full_matrices=False, compute_uv=False, overwrite_a=False, + check_finite=False) + + it_len = len(sigmas) + sgm = np.concatenate((sigmas[::-1], (sigmas[0] + + it_len*np.sqrt(np.sum(np.power(m_vec, 2))),))) + mvc = np.concatenate((m_vec[::-1], (0,))) + + lasd4 = get_lapack_funcs('lasd4', (sigmas,)) + + roots = [] + for i in range(0, it_len): + res = lasd4(i, sgm, mvc) + roots.append(res[1]) + + assert_((res[3] <= 0), "LAPACK root finding dlasd4 failed to find \ + the singular value %i" % i) + roots = np.array(roots)[::-1] + + assert_((not np.any(np.isnan(roots)), "There are NaN roots")) + assert_allclose(SM, roots, atol=100*np.finfo(np.float64).eps, + rtol=100*np.finfo(np.float64).eps) + + +def test_lartg(): + for dtype in 'fdFD': + lartg = get_lapack_funcs('lartg', dtype=dtype) + + f = np.array(3, dtype) + g = np.array(4, dtype) + + if np.iscomplexobj(g): + g *= 1j + + cs, sn, r = lartg(f, g) + + assert_allclose(cs, 3.0/5.0) + assert_allclose(r, 5.0) + + if np.iscomplexobj(g): + assert_allclose(sn, -4.0j/5.0) + assert_(type(r) == complex) + assert_(type(cs) == float) + else: + assert_allclose(sn, 4.0/5.0) + + +def test_rot(): + # srot, drot from blas and crot and zrot from lapack. + + for dtype in 'fdFD': + c = 0.6 + s = 0.8 + + u = np.ones(4, dtype) * 3 + v = np.ones(4, dtype) * 4 + atol = 10**-(np.finfo(dtype).precision-1) + + if dtype in 'fd': + rot = get_blas_funcs('rot', dtype=dtype) + f = 4 + else: + rot = get_lapack_funcs('rot', dtype=dtype) + s *= -1j + v *= 1j + f = 4j + + assert_allclose(rot(u, v, c, s), [[5, 5, 5, 5], + [0, 0, 0, 0]], atol=atol) + assert_allclose(rot(u, v, c, s, n=2), [[5, 5, 3, 3], + [0, 0, f, f]], atol=atol) + assert_allclose(rot(u, v, c, s, offx=2, offy=2), + [[3, 3, 5, 5], [f, f, 0, 0]], atol=atol) + assert_allclose(rot(u, v, c, s, incx=2, offy=2, n=2), + [[5, 3, 5, 3], [f, f, 0, 0]], atol=atol) + assert_allclose(rot(u, v, c, s, offx=2, incy=2, n=2), + [[3, 3, 5, 5], [0, f, 0, f]], atol=atol) + assert_allclose(rot(u, v, c, s, offx=2, incx=2, offy=2, incy=2, n=1), + [[3, 3, 5, 3], [f, f, 0, f]], atol=atol) + assert_allclose(rot(u, v, c, s, incx=-2, incy=-2, n=2), + [[5, 3, 5, 3], [0, f, 0, f]], atol=atol) + + a, b = rot(u, v, c, s, overwrite_x=1, overwrite_y=1) + assert_(a is u) + assert_(b is v) + assert_allclose(a, [5, 5, 5, 5], atol=atol) + assert_allclose(b, [0, 0, 0, 0], atol=atol) + + +def test_larfg_larf(): + np.random.seed(1234) + a0 = np.random.random((4, 4)) + a0 = a0.T.dot(a0) + + a0j = np.random.random((4, 4)) + 1j*np.random.random((4, 4)) + a0j = a0j.T.conj().dot(a0j) + + # our test here will be to do one step of reducing a hermetian matrix to + # tridiagonal form using householder transforms. + + for dtype in 'fdFD': + larfg, larf = get_lapack_funcs(['larfg', 'larf'], dtype=dtype) + + if dtype in 'FD': + a = a0j.copy() + else: + a = a0.copy() + + # generate a householder transform to clear a[2:,0] + alpha, x, tau = larfg(a.shape[0]-1, a[1, 0], a[2:, 0]) + + # create expected output + expected = np.zeros_like(a[:, 0]) + expected[0] = a[0, 0] + expected[1] = alpha + + # assemble householder vector + v = np.zeros_like(a[1:, 0]) + v[0] = 1.0 + v[1:] = x + + # apply transform from the left + a[1:, :] = larf(v, tau.conjugate(), a[1:, :], np.zeros(a.shape[1])) + + # apply transform from the right + a[:, 1:] = larf(v, tau, a[:, 1:], np.zeros(a.shape[0]), side='R') + + assert_allclose(a[:, 0], expected, atol=1e-5) + assert_allclose(a[0, :], expected, atol=1e-5) + + +@pytest.mark.xslow +def test_sgesdd_lwork_bug_workaround(): + # Test that SGESDD lwork is sufficiently large for LAPACK. + # + # This checks that workaround around an apparent LAPACK bug + # actually works. cf. gh-5401 + # + # xslow: requires 1GB+ of memory + + p = subprocess.Popen([sys.executable, '-c', + 'import numpy as np; ' + 'from scipy.linalg import svd; ' + 'a = np.zeros([9537, 9537], dtype=np.float32); ' + 'svd(a)'], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + + # Check if it an error occurred within 5 sec; the computation can + # take substantially longer, and we will not wait for it to finish + for j in range(50): + time.sleep(0.1) + if p.poll() is not None: + returncode = p.returncode + break + else: + # Didn't exit in time -- probably entered computation. The + # error is raised before entering computation, so things are + # probably OK. + returncode = 0 + p.terminate() + + assert_equal(returncode, 0, + "Code apparently failed: " + p.stdout.read()) + + +class TestSytrd(object): + def test_sytrd(self): + for dtype in REAL_DTYPES: + # Assert that a 0x0 matrix raises an error + A = np.zeros((0, 0), dtype=dtype) + sytrd, sytrd_lwork = \ + get_lapack_funcs(('sytrd', 'sytrd_lwork'), (A,)) + assert_raises(ValueError, sytrd, A) + + # Tests for n = 1 currently fail with + # ``` + # ValueError: failed to create intent(cache|hide)|optional array-- + # must have defined dimensions but got (0,) + # ``` + # This is a NumPy issue + # <https://github.com/numpy/numpy/issues/9617>. + # TODO once the issue has been resolved, test for n=1 + + # some upper triangular array + n = 3 + A = np.zeros((n, n), dtype=dtype) + A[np.triu_indices_from(A)] = \ + np.arange(1, n*(n+1)//2+1, dtype=dtype) + + # query lwork + lwork, info = sytrd_lwork(n) + assert_equal(info, 0) + + # check lower=1 behavior (shouldn't do much since the matrix is + # upper triangular) + data, d, e, tau, info = sytrd(A, lower=1, lwork=lwork) + assert_equal(info, 0) + + assert_allclose(data, A, atol=5*np.finfo(dtype).eps, rtol=1.0) + assert_allclose(d, np.diag(A)) + assert_allclose(e, 0.0) + assert_allclose(tau, 0.0) + + # and now for the proper test (lower=0 is the default) + data, d, e, tau, info = sytrd(A, lwork=lwork) + assert_equal(info, 0) + + # assert Q^T*A*Q = tridiag(e, d, e) + + # build tridiagonal matrix + T = np.zeros_like(A, dtype=dtype) + k = np.arange(A.shape[0]) + T[k, k] = d + k2 = np.arange(A.shape[0]-1) + T[k2+1, k2] = e + T[k2, k2+1] = e + + # build Q + Q = np.eye(n, n, dtype=dtype) + for i in range(n-1): + v = np.zeros(n, dtype=dtype) + v[:i] = data[:i, i+1] + v[i] = 1.0 + H = np.eye(n, n, dtype=dtype) - tau[i] * np.outer(v, v) + Q = np.dot(H, Q) + + # Make matrix fully symmetric + i_lower = np.tril_indices(n, -1) + A[i_lower] = A.T[i_lower] + + QTAQ = np.dot(Q.T, np.dot(A, Q)) + + # disable rtol here since some values in QTAQ and T are very close + # to 0. + assert_allclose(QTAQ, T, atol=5*np.finfo(dtype).eps, rtol=1.0) + + +class TestHetrd(object): + def test_hetrd(self): + for real_dtype, complex_dtype in zip(REAL_DTYPES, COMPLEX_DTYPES): + # Assert that a 0x0 matrix raises an error + A = np.zeros((0, 0), dtype=complex_dtype) + hetrd, hetrd_lwork = \ + get_lapack_funcs(('hetrd', 'hetrd_lwork'), (A,)) + assert_raises(ValueError, hetrd, A) + + # Tests for n = 1 currently fail with + # ``` + # ValueError: failed to create intent(cache|hide)|optional array-- + # must have defined dimensions but got (0,) + # ``` + # This is a NumPy issue + # <https://github.com/numpy/numpy/issues/9617>. + # TODO once the issue has been resolved, test for n=1 + + # some upper triangular array + n = 3 + A = np.zeros((n, n), dtype=complex_dtype) + A[np.triu_indices_from(A)] = ( + np.arange(1, n*(n+1)//2+1, dtype=real_dtype) + + 1j * np.arange(1, n*(n+1)//2+1, dtype=real_dtype) + ) + np.fill_diagonal(A, np.real(np.diag(A))) + + # query lwork + lwork, info = hetrd_lwork(n) + assert_equal(info, 0) + + # check lower=1 behavior (shouldn't do much since the matrix is + # upper triangular) + data, d, e, tau, info = hetrd(A, lower=1, lwork=lwork) + assert_equal(info, 0) + + assert_allclose(data, A, atol=5*np.finfo(real_dtype).eps, rtol=1.0) + + assert_allclose(d, np.real(np.diag(A))) + assert_allclose(e, 0.0) + assert_allclose(tau, 0.0) + + # and now for the proper test (lower=0 is the default) + data, d, e, tau, info = hetrd(A, lwork=lwork) + assert_equal(info, 0) + + # assert Q^T*A*Q = tridiag(e, d, e) + + # build tridiagonal matrix + T = np.zeros_like(A, dtype=real_dtype) + k = np.arange(A.shape[0], dtype=int) + T[k, k] = d + k2 = np.arange(A.shape[0]-1, dtype=int) + T[k2+1, k2] = e + T[k2, k2+1] = e + + # build Q + Q = np.eye(n, n, dtype=complex_dtype) + for i in range(n-1): + v = np.zeros(n, dtype=complex_dtype) + v[:i] = data[:i, i+1] + v[i] = 1.0 + H = np.eye(n, n, dtype=complex_dtype) \ + - tau[i] * np.outer(v, np.conj(v)) + Q = np.dot(H, Q) + + # Make matrix fully Hermetian + i_lower = np.tril_indices(n, -1) + A[i_lower] = np.conj(A.T[i_lower]) + + QHAQ = np.dot(np.conj(Q.T), np.dot(A, Q)) + + # disable rtol here since some values in QTAQ and T are very close + # to 0. + assert_allclose( + QHAQ, T, atol=10*np.finfo(real_dtype).eps, rtol=1.0 + ) + + +def test_gglse(): + # Example data taken from NAG manual + for ind, dtype in enumerate(DTYPES): + # DTYPES = <s,d,c,z> gglse + func, func_lwork = get_lapack_funcs(('gglse', 'gglse_lwork'), + dtype=dtype) + lwork = _compute_lwork(func_lwork, m=6, n=4, p=2) + # For <s,d>gglse + if ind < 2: + a = np.array([[-0.57, -1.28, -0.39, 0.25], + [-1.93, 1.08, -0.31, -2.14], + [2.30, 0.24, 0.40, -0.35], + [-1.93, 0.64, -0.66, 0.08], + [0.15, 0.30, 0.15, -2.13], + [-0.02, 1.03, -1.43, 0.50]], dtype=dtype) + c = np.array([-1.50, -2.14, 1.23, -0.54, -1.68, 0.82], dtype=dtype) + d = np.array([0., 0.], dtype=dtype) + # For <s,d>gglse + else: + a = np.array([[0.96-0.81j, -0.03+0.96j, -0.91+2.06j, -0.05+0.41j], + [-0.98+1.98j, -1.20+0.19j, -0.66+0.42j, -0.81+0.56j], + [0.62-0.46j, 1.01+0.02j, 0.63-0.17j, -1.11+0.60j], + [0.37+0.38j, 0.19-0.54j, -0.98-0.36j, 0.22-0.20j], + [0.83+0.51j, 0.20+0.01j, -0.17-0.46j, 1.47+1.59j], + [1.08-0.28j, 0.20-0.12j, -0.07+1.23j, 0.26+0.26j]]) + c = np.array([[-2.54+0.09j], + [1.65-2.26j], + [-2.11-3.96j], + [1.82+3.30j], + [-6.41+3.77j], + [2.07+0.66j]]) + d = np.zeros(2, dtype=dtype) + + b = np.array([[1., 0., -1., 0.], [0., 1., 0., -1.]], dtype=dtype) + + _, _, _, result, _ = func(a, b, c, d, lwork=lwork) + if ind < 2: + expected = np.array([0.48904455, + 0.99754786, + 0.48904455, + 0.99754786]) + else: + expected = np.array([1.08742917-1.96205783j, + -0.74093902+3.72973919j, + 1.08742917-1.96205759j, + -0.74093896+3.72973895j]) + assert_array_almost_equal(result, expected, decimal=4) + + +def test_sycon_hecon(): + seed(1234) + for ind, dtype in enumerate(DTYPES+COMPLEX_DTYPES): + # DTYPES + COMPLEX DTYPES = <s,d,c,z> sycon + <c,z>hecon + n = 10 + # For <s,d,c,z>sycon + if ind < 4: + func_lwork = get_lapack_funcs('sytrf_lwork', dtype=dtype) + funcon, functrf = get_lapack_funcs(('sycon', 'sytrf'), dtype=dtype) + A = (rand(n, n)).astype(dtype) + # For <c,z>hecon + else: + func_lwork = get_lapack_funcs('hetrf_lwork', dtype=dtype) + funcon, functrf = get_lapack_funcs(('hecon', 'hetrf'), dtype=dtype) + A = (rand(n, n) + rand(n, n)*1j).astype(dtype) + + # Since sycon only refers to upper/lower part, conj() is safe here. + A = (A + A.conj().T)/2 + 2*np.eye(n, dtype=dtype) + + anorm = np.linalg.norm(A, 1) + lwork = _compute_lwork(func_lwork, n) + ldu, ipiv, _ = functrf(A, lwork=lwork, lower=1) + rcond, _ = funcon(a=ldu, ipiv=ipiv, anorm=anorm, lower=1) + # The error is at most 1-fold + assert_(abs(1/rcond - np.linalg.cond(A, p=1))*rcond < 1) + + +def test_sygst(): + seed(1234) + for ind, dtype in enumerate(REAL_DTYPES): + # DTYPES = <s,d> sygst + n = 10 + + potrf, sygst, syevd, sygvd = get_lapack_funcs(('potrf', 'sygst', + 'syevd', 'sygvd'), + dtype=dtype) + + A = rand(n, n).astype(dtype) + A = (A + A.T)/2 + # B must be positive definite + B = rand(n, n).astype(dtype) + B = (B + B.T)/2 + 2 * np.eye(n, dtype=dtype) + + # Perform eig (sygvd) + _, eig_gvd, info = sygvd(A, B) + assert_(info == 0) + + # Convert to std problem potrf + b, info = potrf(B) + assert_(info == 0) + a, info = sygst(A, b) + assert_(info == 0) + + eig, _, info = syevd(a) + assert_(info == 0) + assert_allclose(eig, eig_gvd, rtol=1e-4) + + +def test_hegst(): + seed(1234) + for ind, dtype in enumerate(COMPLEX_DTYPES): + # DTYPES = <c,z> hegst + n = 10 + + potrf, hegst, heevd, hegvd = get_lapack_funcs(('potrf', 'hegst', + 'heevd', 'hegvd'), + dtype=dtype) + + A = rand(n, n).astype(dtype) + 1j * rand(n, n).astype(dtype) + A = (A + A.conj().T)/2 + # B must be positive definite + B = rand(n, n).astype(dtype) + 1j * rand(n, n).astype(dtype) + B = (B + B.conj().T)/2 + 2 * np.eye(n, dtype=dtype) + + # Perform eig (hegvd) + _, eig_gvd, info = hegvd(A, B) + assert_(info == 0) + + # Convert to std problem potrf + b, info = potrf(B) + assert_(info == 0) + a, info = hegst(A, b) + assert_(info == 0) + + eig, _, info = heevd(a) + assert_(info == 0) + assert_allclose(eig, eig_gvd, rtol=1e-4) + + +def test_tzrzf(): + """ + This test performs an RZ decomposition in which an m x n upper trapezoidal + array M (m <= n) is factorized as M = [R 0] * Z where R is upper triangular + and Z is unitary. + """ + seed(1234) + m, n = 10, 15 + for ind, dtype in enumerate(DTYPES): + tzrzf, tzrzf_lw = get_lapack_funcs(('tzrzf', 'tzrzf_lwork'), + dtype=dtype) + lwork = _compute_lwork(tzrzf_lw, m, n) + + if ind < 2: + A = triu(rand(m, n).astype(dtype)) + else: + A = triu((rand(m, n) + rand(m, n)*1j).astype(dtype)) + + # assert wrong shape arg, f2py returns generic error + assert_raises(Exception, tzrzf, A.T) + rz, tau, info = tzrzf(A, lwork=lwork) + # Check success + assert_(info == 0) + + # Get Z manually for comparison + R = np.hstack((rz[:, :m], np.zeros((m, n-m), dtype=dtype))) + V = np.hstack((np.eye(m, dtype=dtype), rz[:, m:])) + Id = np.eye(n, dtype=dtype) + ref = [Id-tau[x]*V[[x], :].T.dot(V[[x], :].conj()) for x in range(m)] + Z = reduce(np.dot, ref) + assert_allclose(R.dot(Z) - A, zeros_like(A, dtype=dtype), + atol=10*np.spacing(dtype(1.0).real), rtol=0.) + + +def test_tfsm(): + """ + Test for solving a linear system with the coefficient matrix is a + triangular array stored in Full Packed (RFP) format. + """ + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 20 + if ind > 1: + A = triu(rand(n, n) + rand(n, n)*1j + eye(n)).astype(dtype) + trans = 'C' + else: + A = triu(rand(n, n) + eye(n)).astype(dtype) + trans = 'T' + + trttf, tfttr, tfsm = get_lapack_funcs(('trttf', 'tfttr', 'tfsm'), + dtype=dtype) + + Afp, _ = trttf(A) + B = rand(n, 2).astype(dtype) + soln = tfsm(-1, Afp, B) + assert_array_almost_equal(soln, solve(-A, B), + decimal=4 if ind % 2 == 0 else 6) + + soln = tfsm(-1, Afp, B, trans=trans) + assert_array_almost_equal(soln, solve(-A.conj().T, B), + decimal=4 if ind % 2 == 0 else 6) + + # Make A, unit diagonal + A[np.arange(n), np.arange(n)] = dtype(1.) + soln = tfsm(-1, Afp, B, trans=trans, diag='U') + assert_array_almost_equal(soln, solve(-A.conj().T, B), + decimal=4 if ind % 2 == 0 else 6) + + # Change side + B2 = rand(3, n).astype(dtype) + soln = tfsm(-1, Afp, B2, trans=trans, diag='U', side='R') + assert_array_almost_equal(soln, solve(-A, B2.T).conj().T, + decimal=4 if ind % 2 == 0 else 6) + + +def test_ormrz_unmrz(): + """ + This test performs a matrix multiplication with an arbitrary m x n matric C + and a unitary matrix Q without explicitly forming the array. The array data + is encoded in the rectangular part of A which is obtained from ?TZRZF. Q + size is inferred by m, n, side keywords. + """ + seed(1234) + qm, qn, cn = 10, 15, 15 + for ind, dtype in enumerate(DTYPES): + tzrzf, tzrzf_lw = get_lapack_funcs(('tzrzf', 'tzrzf_lwork'), + dtype=dtype) + lwork_rz = _compute_lwork(tzrzf_lw, qm, qn) + + if ind < 2: + A = triu(rand(qm, qn).astype(dtype)) + C = rand(cn, cn).astype(dtype) + orun_mrz, orun_mrz_lw = get_lapack_funcs(('ormrz', 'ormrz_lwork'), + dtype=dtype) + else: + A = triu((rand(qm, qn) + rand(qm, qn)*1j).astype(dtype)) + C = (rand(cn, cn) + rand(cn, cn)*1j).astype(dtype) + orun_mrz, orun_mrz_lw = get_lapack_funcs(('unmrz', 'unmrz_lwork'), + dtype=dtype) + + lwork_mrz = _compute_lwork(orun_mrz_lw, cn, cn) + rz, tau, info = tzrzf(A, lwork=lwork_rz) + + # Get Q manually for comparison + V = np.hstack((np.eye(qm, dtype=dtype), rz[:, qm:])) + Id = np.eye(qn, dtype=dtype) + ref = [Id-tau[x]*V[[x], :].T.dot(V[[x], :].conj()) for x in range(qm)] + Q = reduce(np.dot, ref) + + # Now that we have Q, we can test whether lapack results agree with + # each case of CQ, CQ^H, QC, and QC^H + trans = 'T' if ind < 2 else 'C' + tol = 10*np.spacing(dtype(1.0).real) + + cq, info = orun_mrz(rz, tau, C, lwork=lwork_mrz) + assert_(info == 0) + assert_allclose(cq - Q.dot(C), zeros_like(C), atol=tol, rtol=0.) + + cq, info = orun_mrz(rz, tau, C, trans=trans, lwork=lwork_mrz) + assert_(info == 0) + assert_allclose(cq - Q.conj().T.dot(C), zeros_like(C), atol=tol, + rtol=0.) + + cq, info = orun_mrz(rz, tau, C, side='R', lwork=lwork_mrz) + assert_(info == 0) + assert_allclose(cq - C.dot(Q), zeros_like(C), atol=tol, rtol=0.) + + cq, info = orun_mrz(rz, tau, C, side='R', trans=trans, lwork=lwork_mrz) + assert_(info == 0) + assert_allclose(cq - C.dot(Q.conj().T), zeros_like(C), atol=tol, + rtol=0.) + + +def test_tfttr_trttf(): + """ + Test conversion routines between the Rectengular Full Packed (RFP) format + and Standard Triangular Array (TR) + """ + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 20 + if ind > 1: + A_full = (rand(n, n) + rand(n, n)*1j).astype(dtype) + transr = 'C' + else: + A_full = (rand(n, n)).astype(dtype) + transr = 'T' + + trttf, tfttr = get_lapack_funcs(('trttf', 'tfttr'), dtype=dtype) + A_tf_U, info = trttf(A_full) + assert_(info == 0) + A_tf_L, info = trttf(A_full, uplo='L') + assert_(info == 0) + A_tf_U_T, info = trttf(A_full, transr=transr, uplo='U') + assert_(info == 0) + A_tf_L_T, info = trttf(A_full, transr=transr, uplo='L') + assert_(info == 0) + + # Create the RFP array manually (n is even!) + A_tf_U_m = zeros((n+1, n//2), dtype=dtype) + A_tf_U_m[:-1, :] = triu(A_full)[:, n//2:] + A_tf_U_m[n//2+1:, :] += triu(A_full)[:n//2, :n//2].conj().T + + A_tf_L_m = zeros((n+1, n//2), dtype=dtype) + A_tf_L_m[1:, :] = tril(A_full)[:, :n//2] + A_tf_L_m[:n//2, :] += tril(A_full)[n//2:, n//2:].conj().T + + assert_array_almost_equal(A_tf_U, A_tf_U_m.reshape(-1, order='F')) + assert_array_almost_equal(A_tf_U_T, + A_tf_U_m.conj().T.reshape(-1, order='F')) + + assert_array_almost_equal(A_tf_L, A_tf_L_m.reshape(-1, order='F')) + assert_array_almost_equal(A_tf_L_T, + A_tf_L_m.conj().T.reshape(-1, order='F')) + + # Get the original array from RFP + A_tr_U, info = tfttr(n, A_tf_U) + assert_(info == 0) + A_tr_L, info = tfttr(n, A_tf_L, uplo='L') + assert_(info == 0) + A_tr_U_T, info = tfttr(n, A_tf_U_T, transr=transr, uplo='U') + assert_(info == 0) + A_tr_L_T, info = tfttr(n, A_tf_L_T, transr=transr, uplo='L') + assert_(info == 0) + + assert_array_almost_equal(A_tr_U, triu(A_full)) + assert_array_almost_equal(A_tr_U_T, triu(A_full)) + assert_array_almost_equal(A_tr_L, tril(A_full)) + assert_array_almost_equal(A_tr_L_T, tril(A_full)) + + +def test_tpttr_trttp(): + """ + Test conversion routines between the Rectengular Full Packed (RFP) format + and Standard Triangular Array (TR) + """ + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 20 + if ind > 1: + A_full = (rand(n, n) + rand(n, n)*1j).astype(dtype) + else: + A_full = (rand(n, n)).astype(dtype) + + trttp, tpttr = get_lapack_funcs(('trttp', 'tpttr'), dtype=dtype) + A_tp_U, info = trttp(A_full) + assert_(info == 0) + A_tp_L, info = trttp(A_full, uplo='L') + assert_(info == 0) + + # Create the TP array manually + inds = tril_indices(n) + A_tp_U_m = zeros(n*(n+1)//2, dtype=dtype) + A_tp_U_m[:] = (triu(A_full).T)[inds] + + inds = triu_indices(n) + A_tp_L_m = zeros(n*(n+1)//2, dtype=dtype) + A_tp_L_m[:] = (tril(A_full).T)[inds] + + assert_array_almost_equal(A_tp_U, A_tp_U_m) + assert_array_almost_equal(A_tp_L, A_tp_L_m) + + # Get the original array from TP + A_tr_U, info = tpttr(n, A_tp_U) + assert_(info == 0) + A_tr_L, info = tpttr(n, A_tp_L, uplo='L') + assert_(info == 0) + + assert_array_almost_equal(A_tr_U, triu(A_full)) + assert_array_almost_equal(A_tr_L, tril(A_full)) + + +def test_pftrf(): + """ + Test Cholesky factorization of a positive definite Rectengular Full + Packed (RFP) format array + """ + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 20 + if ind > 1: + A = (rand(n, n) + rand(n, n)*1j).astype(dtype) + A = A + A.conj().T + n*eye(n) + else: + A = (rand(n, n)).astype(dtype) + A = A + A.T + n*eye(n) + + pftrf, trttf, tfttr = get_lapack_funcs(('pftrf', 'trttf', 'tfttr'), + dtype=dtype) + + # Get the original array from TP + Afp, info = trttf(A) + Achol_rfp, info = pftrf(n, Afp) + assert_(info == 0) + A_chol_r, _ = tfttr(n, Achol_rfp) + Achol = cholesky(A) + assert_array_almost_equal(A_chol_r, Achol) + + +def test_pftri(): + """ + Test Cholesky factorization of a positive definite Rectengular Full + Packed (RFP) format array to find its inverse + """ + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 20 + if ind > 1: + A = (rand(n, n) + rand(n, n)*1j).astype(dtype) + A = A + A.conj().T + n*eye(n) + else: + A = (rand(n, n)).astype(dtype) + A = A + A.T + n*eye(n) + + pftri, pftrf, trttf, tfttr = get_lapack_funcs(('pftri', + 'pftrf', + 'trttf', + 'tfttr'), + dtype=dtype) + + # Get the original array from TP + Afp, info = trttf(A) + A_chol_rfp, info = pftrf(n, Afp) + A_inv_rfp, info = pftri(n, A_chol_rfp) + assert_(info == 0) + A_inv_r, _ = tfttr(n, A_inv_rfp) + Ainv = inv(A) + assert_array_almost_equal(A_inv_r, triu(Ainv), + decimal=4 if ind % 2 == 0 else 6) + + +def test_pftrs(): + """ + Test Cholesky factorization of a positive definite Rectengular Full + Packed (RFP) format array and solve a linear system + """ + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 20 + if ind > 1: + A = (rand(n, n) + rand(n, n)*1j).astype(dtype) + A = A + A.conj().T + n*eye(n) + else: + A = (rand(n, n)).astype(dtype) + A = A + A.T + n*eye(n) + + B = ones((n, 3), dtype=dtype) + Bf1 = ones((n+2, 3), dtype=dtype) + Bf2 = ones((n-2, 3), dtype=dtype) + pftrs, pftrf, trttf, tfttr = get_lapack_funcs(('pftrs', + 'pftrf', + 'trttf', + 'tfttr'), + dtype=dtype) + + # Get the original array from TP + Afp, info = trttf(A) + A_chol_rfp, info = pftrf(n, Afp) + # larger B arrays shouldn't segfault + soln, info = pftrs(n, A_chol_rfp, Bf1) + assert_(info == 0) + assert_raises(Exception, pftrs, n, A_chol_rfp, Bf2) + soln, info = pftrs(n, A_chol_rfp, B) + assert_(info == 0) + assert_array_almost_equal(solve(A, B), soln, + decimal=4 if ind % 2 == 0 else 6) + + +def test_sfrk_hfrk(): + """ + Test for performing a symmetric rank-k operation for matrix in RFP format. + """ + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 20 + if ind > 1: + A = (rand(n, n) + rand(n, n)*1j).astype(dtype) + A = A + A.conj().T + n*eye(n) + else: + A = (rand(n, n)).astype(dtype) + A = A + A.T + n*eye(n) + + prefix = 's'if ind < 2 else 'h' + trttf, tfttr, shfrk = get_lapack_funcs(('trttf', 'tfttr', '{}frk' + ''.format(prefix)), + dtype=dtype) + + Afp, _ = trttf(A) + C = np.random.rand(n, 2).astype(dtype) + Afp_out = shfrk(n, 2, -1, C, 2, Afp) + A_out, _ = tfttr(n, Afp_out) + assert_array_almost_equal(A_out, triu(-C.dot(C.conj().T) + 2*A), + decimal=4 if ind % 2 == 0 else 6) diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_lapack.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_lapack.pyc new file mode 100644 index 0000000..b2df6b5 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_lapack.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_matfuncs.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_matfuncs.py new file mode 100644 index 0000000..f4fcbee --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_matfuncs.py @@ -0,0 +1,836 @@ +# +# Created by: Pearu Peterson, March 2002 +# +""" Test functions for linalg.matfuncs module + +""" +from __future__ import division, print_function, absolute_import + +import random +import functools + +import numpy as np +from numpy import array, matrix, identity, dot, sqrt, double +from numpy.testing import ( + assert_array_equal, assert_array_less, assert_equal, + assert_array_almost_equal, assert_array_almost_equal_nulp, + assert_allclose, assert_) +import pytest + +from scipy._lib._numpy_compat import _assert_warns, suppress_warnings + +import scipy.linalg +from scipy.linalg import (funm, signm, logm, sqrtm, fractional_matrix_power, + expm, expm_frechet, expm_cond, norm) +from scipy.linalg import _matfuncs_inv_ssq +import scipy.linalg._expm_frechet + +from scipy.optimize import minimize + + +def _get_al_mohy_higham_2012_experiment_1(): + """ + Return the test matrix from Experiment (1) of [1]_. + + References + ---------- + .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2012) + "Improved Inverse Scaling and Squaring Algorithms + for the Matrix Logarithm." + SIAM Journal on Scientific Computing, 34 (4). C152-C169. + ISSN 1095-7197 + + """ + A = np.array([ + [3.2346e-1, 3e4, 3e4, 3e4], + [0, 3.0089e-1, 3e4, 3e4], + [0, 0, 3.2210e-1, 3e4], + [0, 0, 0, 3.0744e-1]], dtype=float) + return A + + +class TestSignM(object): + + def test_nils(self): + a = array([[29.2, -24.2, 69.5, 49.8, 7.], + [-9.2, 5.2, -18., -16.8, -2.], + [-10., 6., -20., -18., -2.], + [-9.6, 9.6, -25.5, -15.4, -2.], + [9.8, -4.8, 18., 18.2, 2.]]) + cr = array([[11.94933333,-2.24533333,15.31733333,21.65333333,-2.24533333], + [-3.84266667,0.49866667,-4.59066667,-7.18666667,0.49866667], + [-4.08,0.56,-4.92,-7.6,0.56], + [-4.03466667,1.04266667,-5.59866667,-7.02666667,1.04266667], + [4.15733333,-0.50133333,4.90933333,7.81333333,-0.50133333]]) + r = signm(a) + assert_array_almost_equal(r,cr) + + def test_defective1(self): + a = array([[0.0,1,0,0],[1,0,1,0],[0,0,0,1],[0,0,1,0]]) + r = signm(a, disp=False) + #XXX: what would be the correct result? + + def test_defective2(self): + a = array(( + [29.2,-24.2,69.5,49.8,7.0], + [-9.2,5.2,-18.0,-16.8,-2.0], + [-10.0,6.0,-20.0,-18.0,-2.0], + [-9.6,9.6,-25.5,-15.4,-2.0], + [9.8,-4.8,18.0,18.2,2.0])) + r = signm(a, disp=False) + #XXX: what would be the correct result? + + def test_defective3(self): + a = array([[-2., 25., 0., 0., 0., 0., 0.], + [0., -3., 10., 3., 3., 3., 0.], + [0., 0., 2., 15., 3., 3., 0.], + [0., 0., 0., 0., 15., 3., 0.], + [0., 0., 0., 0., 3., 10., 0.], + [0., 0., 0., 0., 0., -2., 25.], + [0., 0., 0., 0., 0., 0., -3.]]) + r = signm(a, disp=False) + #XXX: what would be the correct result? + + +class TestLogM(object): + + def test_nils(self): + a = array([[-2., 25., 0., 0., 0., 0., 0.], + [0., -3., 10., 3., 3., 3., 0.], + [0., 0., 2., 15., 3., 3., 0.], + [0., 0., 0., 0., 15., 3., 0.], + [0., 0., 0., 0., 3., 10., 0.], + [0., 0., 0., 0., 0., -2., 25.], + [0., 0., 0., 0., 0., 0., -3.]]) + m = (identity(7)*3.1+0j)-a + logm(m, disp=False) + #XXX: what would be the correct result? + + def test_al_mohy_higham_2012_experiment_1_logm(self): + # The logm completes the round trip successfully. + # Note that the expm leg of the round trip is badly conditioned. + A = _get_al_mohy_higham_2012_experiment_1() + A_logm, info = logm(A, disp=False) + A_round_trip = expm(A_logm) + assert_allclose(A_round_trip, A, rtol=1e-5, atol=1e-14) + + def test_al_mohy_higham_2012_experiment_1_funm_log(self): + # The raw funm with np.log does not complete the round trip. + # Note that the expm leg of the round trip is badly conditioned. + A = _get_al_mohy_higham_2012_experiment_1() + A_funm_log, info = funm(A, np.log, disp=False) + A_round_trip = expm(A_funm_log) + assert_(not np.allclose(A_round_trip, A, rtol=1e-5, atol=1e-14)) + + def test_round_trip_random_float(self): + np.random.seed(1234) + for n in range(1, 6): + M_unscaled = np.random.randn(n, n) + for scale in np.logspace(-4, 4, 9): + M = M_unscaled * scale + + # Eigenvalues are related to the branch cut. + W = np.linalg.eigvals(M) + err_msg = 'M:{0} eivals:{1}'.format(M, W) + + # Check sqrtm round trip because it is used within logm. + M_sqrtm, info = sqrtm(M, disp=False) + M_sqrtm_round_trip = M_sqrtm.dot(M_sqrtm) + assert_allclose(M_sqrtm_round_trip, M) + + # Check logm round trip. + M_logm, info = logm(M, disp=False) + M_logm_round_trip = expm(M_logm) + assert_allclose(M_logm_round_trip, M, err_msg=err_msg) + + def test_round_trip_random_complex(self): + np.random.seed(1234) + for n in range(1, 6): + M_unscaled = np.random.randn(n, n) + 1j * np.random.randn(n, n) + for scale in np.logspace(-4, 4, 9): + M = M_unscaled * scale + M_logm, info = logm(M, disp=False) + M_round_trip = expm(M_logm) + assert_allclose(M_round_trip, M) + + def test_logm_type_preservation_and_conversion(self): + # The logm matrix function should preserve the type of a matrix + # whose eigenvalues are positive with zero imaginary part. + # Test this preservation for variously structured matrices. + complex_dtype_chars = ('F', 'D', 'G') + for matrix_as_list in ( + [[1, 0], [0, 1]], + [[1, 0], [1, 1]], + [[2, 1], [1, 1]], + [[2, 3], [1, 2]]): + + # check that the spectrum has the expected properties + W = scipy.linalg.eigvals(matrix_as_list) + assert_(not any(w.imag or w.real < 0 for w in W)) + + # check float type preservation + A = np.array(matrix_as_list, dtype=float) + A_logm, info = logm(A, disp=False) + assert_(A_logm.dtype.char not in complex_dtype_chars) + + # check complex type preservation + A = np.array(matrix_as_list, dtype=complex) + A_logm, info = logm(A, disp=False) + assert_(A_logm.dtype.char in complex_dtype_chars) + + # check float->complex type conversion for the matrix negation + A = -np.array(matrix_as_list, dtype=float) + A_logm, info = logm(A, disp=False) + assert_(A_logm.dtype.char in complex_dtype_chars) + + def test_complex_spectrum_real_logm(self): + # This matrix has complex eigenvalues and real logm. + # Its output dtype depends on its input dtype. + M = [[1, 1, 2], [2, 1, 1], [1, 2, 1]] + for dt in float, complex: + X = np.array(M, dtype=dt) + w = scipy.linalg.eigvals(X) + assert_(1e-2 < np.absolute(w.imag).sum()) + Y, info = logm(X, disp=False) + assert_(np.issubdtype(Y.dtype, np.inexact)) + assert_allclose(expm(Y), X) + + def test_real_mixed_sign_spectrum(self): + # These matrices have real eigenvalues with mixed signs. + # The output logm dtype is complex, regardless of input dtype. + for M in ( + [[1, 0], [0, -1]], + [[0, 1], [1, 0]]): + for dt in float, complex: + A = np.array(M, dtype=dt) + A_logm, info = logm(A, disp=False) + assert_(np.issubdtype(A_logm.dtype, np.complexfloating)) + + def test_exactly_singular(self): + A = np.array([[0, 0], [1j, 1j]]) + B = np.asarray([[1, 1], [0, 0]]) + for M in A, A.T, B, B.T: + expected_warning = _matfuncs_inv_ssq.LogmExactlySingularWarning + L, info = _assert_warns(expected_warning, logm, M, disp=False) + E = expm(L) + assert_allclose(E, M, atol=1e-14) + + def test_nearly_singular(self): + M = np.array([[1e-100]]) + expected_warning = _matfuncs_inv_ssq.LogmNearlySingularWarning + L, info = _assert_warns(expected_warning, logm, M, disp=False) + E = expm(L) + assert_allclose(E, M, atol=1e-14) + + def test_opposite_sign_complex_eigenvalues(self): + # See gh-6113 + E = [[0, 1], [-1, 0]] + L = [[0, np.pi*0.5], [-np.pi*0.5, 0]] + assert_allclose(expm(L), E, atol=1e-14) + assert_allclose(logm(E), L, atol=1e-14) + E = [[1j, 4], [0, -1j]] + L = [[1j*np.pi*0.5, 2*np.pi], [0, -1j*np.pi*0.5]] + assert_allclose(expm(L), E, atol=1e-14) + assert_allclose(logm(E), L, atol=1e-14) + E = [[1j, 0], [0, -1j]] + L = [[1j*np.pi*0.5, 0], [0, -1j*np.pi*0.5]] + assert_allclose(expm(L), E, atol=1e-14) + assert_allclose(logm(E), L, atol=1e-14) + + +class TestSqrtM(object): + def test_round_trip_random_float(self): + np.random.seed(1234) + for n in range(1, 6): + M_unscaled = np.random.randn(n, n) + for scale in np.logspace(-4, 4, 9): + M = M_unscaled * scale + M_sqrtm, info = sqrtm(M, disp=False) + M_sqrtm_round_trip = M_sqrtm.dot(M_sqrtm) + assert_allclose(M_sqrtm_round_trip, M) + + def test_round_trip_random_complex(self): + np.random.seed(1234) + for n in range(1, 6): + M_unscaled = np.random.randn(n, n) + 1j * np.random.randn(n, n) + for scale in np.logspace(-4, 4, 9): + M = M_unscaled * scale + M_sqrtm, info = sqrtm(M, disp=False) + M_sqrtm_round_trip = M_sqrtm.dot(M_sqrtm) + assert_allclose(M_sqrtm_round_trip, M) + + def test_bad(self): + # See https://web.archive.org/web/20051220232650/http://www.maths.man.ac.uk/~nareports/narep336.ps.gz + e = 2**-5 + se = sqrt(e) + a = array([[1.0,0,0,1], + [0,e,0,0], + [0,0,e,0], + [0,0,0,1]]) + sa = array([[1,0,0,0.5], + [0,se,0,0], + [0,0,se,0], + [0,0,0,1]]) + n = a.shape[0] + assert_array_almost_equal(dot(sa,sa),a) + # Check default sqrtm. + esa = sqrtm(a, disp=False, blocksize=n)[0] + assert_array_almost_equal(dot(esa,esa),a) + # Check sqrtm with 2x2 blocks. + esa = sqrtm(a, disp=False, blocksize=2)[0] + assert_array_almost_equal(dot(esa,esa),a) + + def test_sqrtm_type_preservation_and_conversion(self): + # The sqrtm matrix function should preserve the type of a matrix + # whose eigenvalues are nonnegative with zero imaginary part. + # Test this preservation for variously structured matrices. + complex_dtype_chars = ('F', 'D', 'G') + for matrix_as_list in ( + [[1, 0], [0, 1]], + [[1, 0], [1, 1]], + [[2, 1], [1, 1]], + [[2, 3], [1, 2]], + [[1, 1], [1, 1]]): + + # check that the spectrum has the expected properties + W = scipy.linalg.eigvals(matrix_as_list) + assert_(not any(w.imag or w.real < 0 for w in W)) + + # check float type preservation + A = np.array(matrix_as_list, dtype=float) + A_sqrtm, info = sqrtm(A, disp=False) + assert_(A_sqrtm.dtype.char not in complex_dtype_chars) + + # check complex type preservation + A = np.array(matrix_as_list, dtype=complex) + A_sqrtm, info = sqrtm(A, disp=False) + assert_(A_sqrtm.dtype.char in complex_dtype_chars) + + # check float->complex type conversion for the matrix negation + A = -np.array(matrix_as_list, dtype=float) + A_sqrtm, info = sqrtm(A, disp=False) + assert_(A_sqrtm.dtype.char in complex_dtype_chars) + + def test_sqrtm_type_conversion_mixed_sign_or_complex_spectrum(self): + complex_dtype_chars = ('F', 'D', 'G') + for matrix_as_list in ( + [[1, 0], [0, -1]], + [[0, 1], [1, 0]], + [[0, 1, 0], [0, 0, 1], [1, 0, 0]]): + + # check that the spectrum has the expected properties + W = scipy.linalg.eigvals(matrix_as_list) + assert_(any(w.imag or w.real < 0 for w in W)) + + # check complex->complex + A = np.array(matrix_as_list, dtype=complex) + A_sqrtm, info = sqrtm(A, disp=False) + assert_(A_sqrtm.dtype.char in complex_dtype_chars) + + # check float->complex + A = np.array(matrix_as_list, dtype=float) + A_sqrtm, info = sqrtm(A, disp=False) + assert_(A_sqrtm.dtype.char in complex_dtype_chars) + + def test_blocksizes(self): + # Make sure I do not goof up the blocksizes when they do not divide n. + np.random.seed(1234) + for n in range(1, 8): + A = np.random.rand(n, n) + 1j*np.random.randn(n, n) + A_sqrtm_default, info = sqrtm(A, disp=False, blocksize=n) + assert_allclose(A, np.linalg.matrix_power(A_sqrtm_default, 2)) + for blocksize in range(1, 10): + A_sqrtm_new, info = sqrtm(A, disp=False, blocksize=blocksize) + assert_allclose(A_sqrtm_default, A_sqrtm_new) + + def test_al_mohy_higham_2012_experiment_1(self): + # Matrix square root of a tricky upper triangular matrix. + A = _get_al_mohy_higham_2012_experiment_1() + A_sqrtm, info = sqrtm(A, disp=False) + A_round_trip = A_sqrtm.dot(A_sqrtm) + assert_allclose(A_round_trip, A, rtol=1e-5) + assert_allclose(np.tril(A_round_trip), np.tril(A)) + + def test_strict_upper_triangular(self): + # This matrix has no square root. + for dt in int, float: + A = np.array([ + [0, 3, 0, 0], + [0, 0, 3, 0], + [0, 0, 0, 3], + [0, 0, 0, 0]], dtype=dt) + A_sqrtm, info = sqrtm(A, disp=False) + assert_(np.isnan(A_sqrtm).all()) + + def test_weird_matrix(self): + # The square root of matrix B exists. + for dt in int, float: + A = np.array([ + [0, 0, 1], + [0, 0, 0], + [0, 1, 0]], dtype=dt) + B = np.array([ + [0, 1, 0], + [0, 0, 0], + [0, 0, 0]], dtype=dt) + assert_array_equal(B, A.dot(A)) + + # But scipy sqrtm is not clever enough to find it. + B_sqrtm, info = sqrtm(B, disp=False) + assert_(np.isnan(B_sqrtm).all()) + + def test_disp(self): + from io import StringIO + np.random.seed(1234) + + A = np.random.rand(3, 3) + B = sqrtm(A, disp=True) + assert_allclose(B.dot(B), A) + + def test_opposite_sign_complex_eigenvalues(self): + M = [[2j, 4], [0, -2j]] + R = [[1+1j, 2], [0, 1-1j]] + assert_allclose(np.dot(R, R), M, atol=1e-14) + assert_allclose(sqrtm(M), R, atol=1e-14) + + def test_gh4866(self): + M = np.array([[1, 0, 0, 1], + [0, 0, 0, 0], + [0, 0, 0, 0], + [1, 0, 0, 1]]) + R = np.array([[sqrt(0.5), 0, 0, sqrt(0.5)], + [0, 0, 0, 0], + [0, 0, 0, 0], + [sqrt(0.5), 0, 0, sqrt(0.5)]]) + assert_allclose(np.dot(R, R), M, atol=1e-14) + assert_allclose(sqrtm(M), R, atol=1e-14) + + def test_gh5336(self): + M = np.diag([2, 1, 0]) + R = np.diag([sqrt(2), 1, 0]) + assert_allclose(np.dot(R, R), M, atol=1e-14) + assert_allclose(sqrtm(M), R, atol=1e-14) + + def test_gh7839(self): + M = np.zeros((2, 2)) + R = np.zeros((2, 2)) + assert_allclose(np.dot(R, R), M, atol=1e-14) + assert_allclose(sqrtm(M), R, atol=1e-14) + + +class TestFractionalMatrixPower(object): + def test_round_trip_random_complex(self): + np.random.seed(1234) + for p in range(1, 5): + for n in range(1, 5): + M_unscaled = np.random.randn(n, n) + 1j * np.random.randn(n, n) + for scale in np.logspace(-4, 4, 9): + M = M_unscaled * scale + M_root = fractional_matrix_power(M, 1/p) + M_round_trip = np.linalg.matrix_power(M_root, p) + assert_allclose(M_round_trip, M) + + def test_round_trip_random_float(self): + # This test is more annoying because it can hit the branch cut; + # this happens when the matrix has an eigenvalue + # with no imaginary component and with a real negative component, + # and it means that the principal branch does not exist. + np.random.seed(1234) + for p in range(1, 5): + for n in range(1, 5): + M_unscaled = np.random.randn(n, n) + for scale in np.logspace(-4, 4, 9): + M = M_unscaled * scale + M_root = fractional_matrix_power(M, 1/p) + M_round_trip = np.linalg.matrix_power(M_root, p) + assert_allclose(M_round_trip, M) + + def test_larger_abs_fractional_matrix_powers(self): + np.random.seed(1234) + for n in (2, 3, 5): + for i in range(10): + M = np.random.randn(n, n) + 1j * np.random.randn(n, n) + M_one_fifth = fractional_matrix_power(M, 0.2) + # Test the round trip. + M_round_trip = np.linalg.matrix_power(M_one_fifth, 5) + assert_allclose(M, M_round_trip) + # Test a large abs fractional power. + X = fractional_matrix_power(M, -5.4) + Y = np.linalg.matrix_power(M_one_fifth, -27) + assert_allclose(X, Y) + # Test another large abs fractional power. + X = fractional_matrix_power(M, 3.8) + Y = np.linalg.matrix_power(M_one_fifth, 19) + assert_allclose(X, Y) + + def test_random_matrices_and_powers(self): + # Each independent iteration of this fuzz test picks random parameters. + # It tries to hit some edge cases. + np.random.seed(1234) + nsamples = 20 + for i in range(nsamples): + # Sample a matrix size and a random real power. + n = random.randrange(1, 5) + p = np.random.randn() + + # Sample a random real or complex matrix. + matrix_scale = np.exp(random.randrange(-4, 5)) + A = np.random.randn(n, n) + if random.choice((True, False)): + A = A + 1j * np.random.randn(n, n) + A = A * matrix_scale + + # Check a couple of analytically equivalent ways + # to compute the fractional matrix power. + # These can be compared because they both use the principal branch. + A_power = fractional_matrix_power(A, p) + A_logm, info = logm(A, disp=False) + A_power_expm_logm = expm(A_logm * p) + assert_allclose(A_power, A_power_expm_logm) + + def test_al_mohy_higham_2012_experiment_1(self): + # Fractional powers of a tricky upper triangular matrix. + A = _get_al_mohy_higham_2012_experiment_1() + + # Test remainder matrix power. + A_funm_sqrt, info = funm(A, np.sqrt, disp=False) + A_sqrtm, info = sqrtm(A, disp=False) + A_rem_power = _matfuncs_inv_ssq._remainder_matrix_power(A, 0.5) + A_power = fractional_matrix_power(A, 0.5) + assert_array_equal(A_rem_power, A_power) + assert_allclose(A_sqrtm, A_power) + assert_allclose(A_sqrtm, A_funm_sqrt) + + # Test more fractional powers. + for p in (1/2, 5/3): + A_power = fractional_matrix_power(A, p) + A_round_trip = fractional_matrix_power(A_power, 1/p) + assert_allclose(A_round_trip, A, rtol=1e-2) + assert_allclose(np.tril(A_round_trip, 1), np.tril(A, 1)) + + def test_briggs_helper_function(self): + np.random.seed(1234) + for a in np.random.randn(10) + 1j * np.random.randn(10): + for k in range(5): + x_observed = _matfuncs_inv_ssq._briggs_helper_function(a, k) + x_expected = a ** np.exp2(-k) - 1 + assert_allclose(x_observed, x_expected) + + def test_type_preservation_and_conversion(self): + # The fractional_matrix_power matrix function should preserve + # the type of a matrix whose eigenvalues + # are positive with zero imaginary part. + # Test this preservation for variously structured matrices. + complex_dtype_chars = ('F', 'D', 'G') + for matrix_as_list in ( + [[1, 0], [0, 1]], + [[1, 0], [1, 1]], + [[2, 1], [1, 1]], + [[2, 3], [1, 2]]): + + # check that the spectrum has the expected properties + W = scipy.linalg.eigvals(matrix_as_list) + assert_(not any(w.imag or w.real < 0 for w in W)) + + # Check various positive and negative powers + # with absolute values bigger and smaller than 1. + for p in (-2.4, -0.9, 0.2, 3.3): + + # check float type preservation + A = np.array(matrix_as_list, dtype=float) + A_power = fractional_matrix_power(A, p) + assert_(A_power.dtype.char not in complex_dtype_chars) + + # check complex type preservation + A = np.array(matrix_as_list, dtype=complex) + A_power = fractional_matrix_power(A, p) + assert_(A_power.dtype.char in complex_dtype_chars) + + # check float->complex for the matrix negation + A = -np.array(matrix_as_list, dtype=float) + A_power = fractional_matrix_power(A, p) + assert_(A_power.dtype.char in complex_dtype_chars) + + def test_type_conversion_mixed_sign_or_complex_spectrum(self): + complex_dtype_chars = ('F', 'D', 'G') + for matrix_as_list in ( + [[1, 0], [0, -1]], + [[0, 1], [1, 0]], + [[0, 1, 0], [0, 0, 1], [1, 0, 0]]): + + # check that the spectrum has the expected properties + W = scipy.linalg.eigvals(matrix_as_list) + assert_(any(w.imag or w.real < 0 for w in W)) + + # Check various positive and negative powers + # with absolute values bigger and smaller than 1. + for p in (-2.4, -0.9, 0.2, 3.3): + + # check complex->complex + A = np.array(matrix_as_list, dtype=complex) + A_power = fractional_matrix_power(A, p) + assert_(A_power.dtype.char in complex_dtype_chars) + + # check float->complex + A = np.array(matrix_as_list, dtype=float) + A_power = fractional_matrix_power(A, p) + assert_(A_power.dtype.char in complex_dtype_chars) + + @pytest.mark.xfail(reason='Too unstable across LAPACKs.') + def test_singular(self): + # Negative fractional powers do not work with singular matrices. + for matrix_as_list in ( + [[0, 0], [0, 0]], + [[1, 1], [1, 1]], + [[1, 2], [3, 6]], + [[0, 0, 0], [0, 1, 1], [0, -1, 1]]): + + # Check fractional powers both for float and for complex types. + for newtype in (float, complex): + A = np.array(matrix_as_list, dtype=newtype) + for p in (-0.7, -0.9, -2.4, -1.3): + A_power = fractional_matrix_power(A, p) + assert_(np.isnan(A_power).all()) + for p in (0.2, 1.43): + A_power = fractional_matrix_power(A, p) + A_round_trip = fractional_matrix_power(A_power, 1/p) + assert_allclose(A_round_trip, A) + + def test_opposite_sign_complex_eigenvalues(self): + M = [[2j, 4], [0, -2j]] + R = [[1+1j, 2], [0, 1-1j]] + assert_allclose(np.dot(R, R), M, atol=1e-14) + assert_allclose(fractional_matrix_power(M, 0.5), R, atol=1e-14) + + +class TestExpM(object): + def test_zero(self): + a = array([[0.,0],[0,0]]) + assert_array_almost_equal(expm(a),[[1,0],[0,1]]) + + def test_single_elt(self): + # See gh-5853 + from scipy.sparse import csc_matrix + + vOne = -2.02683397006j + vTwo = -2.12817566856j + + mOne = csc_matrix([[vOne]], dtype='complex') + mTwo = csc_matrix([[vTwo]], dtype='complex') + + outOne = expm(mOne) + outTwo = expm(mTwo) + + assert_equal(type(outOne), type(mOne)) + assert_equal(type(outTwo), type(mTwo)) + + assert_allclose(outOne[0, 0], complex(-0.44039415155949196, + -0.8978045395698304)) + assert_allclose(outTwo[0, 0], complex(-0.52896401032626006, + -0.84864425749518878)) + + +class TestExpmFrechet(object): + + def test_expm_frechet(self): + # a test of the basic functionality + M = np.array([ + [1, 2, 3, 4], + [5, 6, 7, 8], + [0, 0, 1, 2], + [0, 0, 5, 6], + ], dtype=float) + A = np.array([ + [1, 2], + [5, 6], + ], dtype=float) + E = np.array([ + [3, 4], + [7, 8], + ], dtype=float) + expected_expm = scipy.linalg.expm(A) + expected_frechet = scipy.linalg.expm(M)[:2, 2:] + for kwargs in ({}, {'method':'SPS'}, {'method':'blockEnlarge'}): + observed_expm, observed_frechet = expm_frechet(A, E, **kwargs) + assert_allclose(expected_expm, observed_expm) + assert_allclose(expected_frechet, observed_frechet) + + def test_small_norm_expm_frechet(self): + # methodically test matrices with a range of norms, for better coverage + M_original = np.array([ + [1, 2, 3, 4], + [5, 6, 7, 8], + [0, 0, 1, 2], + [0, 0, 5, 6], + ], dtype=float) + A_original = np.array([ + [1, 2], + [5, 6], + ], dtype=float) + E_original = np.array([ + [3, 4], + [7, 8], + ], dtype=float) + A_original_norm_1 = scipy.linalg.norm(A_original, 1) + selected_m_list = [1, 3, 5, 7, 9, 11, 13, 15] + m_neighbor_pairs = zip(selected_m_list[:-1], selected_m_list[1:]) + for ma, mb in m_neighbor_pairs: + ell_a = scipy.linalg._expm_frechet.ell_table_61[ma] + ell_b = scipy.linalg._expm_frechet.ell_table_61[mb] + target_norm_1 = 0.5 * (ell_a + ell_b) + scale = target_norm_1 / A_original_norm_1 + M = scale * M_original + A = scale * A_original + E = scale * E_original + expected_expm = scipy.linalg.expm(A) + expected_frechet = scipy.linalg.expm(M)[:2, 2:] + observed_expm, observed_frechet = expm_frechet(A, E) + assert_allclose(expected_expm, observed_expm) + assert_allclose(expected_frechet, observed_frechet) + + def test_fuzz(self): + # try a bunch of crazy inputs + rfuncs = ( + np.random.uniform, + np.random.normal, + np.random.standard_cauchy, + np.random.exponential) + ntests = 100 + for i in range(ntests): + rfunc = random.choice(rfuncs) + target_norm_1 = random.expovariate(1.0) + n = random.randrange(2, 16) + A_original = rfunc(size=(n,n)) + E_original = rfunc(size=(n,n)) + A_original_norm_1 = scipy.linalg.norm(A_original, 1) + scale = target_norm_1 / A_original_norm_1 + A = scale * A_original + E = scale * E_original + M = np.vstack([ + np.hstack([A, E]), + np.hstack([np.zeros_like(A), A])]) + expected_expm = scipy.linalg.expm(A) + expected_frechet = scipy.linalg.expm(M)[:n, n:] + observed_expm, observed_frechet = expm_frechet(A, E) + assert_allclose(expected_expm, observed_expm) + assert_allclose(expected_frechet, observed_frechet) + + def test_problematic_matrix(self): + # this test case uncovered a bug which has since been fixed + A = np.array([ + [1.50591997, 1.93537998], + [0.41203263, 0.23443516], + ], dtype=float) + E = np.array([ + [1.87864034, 2.07055038], + [1.34102727, 0.67341123], + ], dtype=float) + A_norm_1 = scipy.linalg.norm(A, 1) + sps_expm, sps_frechet = expm_frechet( + A, E, method='SPS') + blockEnlarge_expm, blockEnlarge_frechet = expm_frechet( + A, E, method='blockEnlarge') + assert_allclose(sps_expm, blockEnlarge_expm) + assert_allclose(sps_frechet, blockEnlarge_frechet) + + @pytest.mark.slow + @pytest.mark.skip(reason='this test is deliberately slow') + def test_medium_matrix(self): + # profile this to see the speed difference + n = 1000 + A = np.random.exponential(size=(n, n)) + E = np.random.exponential(size=(n, n)) + sps_expm, sps_frechet = expm_frechet( + A, E, method='SPS') + blockEnlarge_expm, blockEnlarge_frechet = expm_frechet( + A, E, method='blockEnlarge') + assert_allclose(sps_expm, blockEnlarge_expm) + assert_allclose(sps_frechet, blockEnlarge_frechet) + + +def _help_expm_cond_search(A, A_norm, X, X_norm, eps, p): + p = np.reshape(p, A.shape) + p_norm = norm(p) + perturbation = eps * p * (A_norm / p_norm) + X_prime = expm(A + perturbation) + scaled_relative_error = norm(X_prime - X) / (X_norm * eps) + return -scaled_relative_error + + +def _normalized_like(A, B): + return A * (scipy.linalg.norm(B) / scipy.linalg.norm(A)) + + +def _relative_error(f, A, perturbation): + X = f(A) + X_prime = f(A + perturbation) + return norm(X_prime - X) / norm(X) + + +class TestExpmConditionNumber(object): + def test_expm_cond_smoke(self): + np.random.seed(1234) + for n in range(1, 4): + A = np.random.randn(n, n) + kappa = expm_cond(A) + assert_array_less(0, kappa) + + def test_expm_bad_condition_number(self): + A = np.array([ + [-1.128679820, 9.614183771e4, -4.524855739e9, 2.924969411e14], + [0, -1.201010529, 9.634696872e4, -4.681048289e9], + [0, 0, -1.132893222, 9.532491830e4], + [0, 0, 0, -1.179475332], + ]) + kappa = expm_cond(A) + assert_array_less(1e36, kappa) + + def test_univariate(self): + np.random.seed(12345) + for x in np.linspace(-5, 5, num=11): + A = np.array([[x]]) + assert_allclose(expm_cond(A), abs(x)) + for x in np.logspace(-2, 2, num=11): + A = np.array([[x]]) + assert_allclose(expm_cond(A), abs(x)) + for i in range(10): + A = np.random.randn(1, 1) + assert_allclose(expm_cond(A), np.absolute(A)[0, 0]) + + @pytest.mark.slow + def test_expm_cond_fuzz(self): + np.random.seed(12345) + eps = 1e-5 + nsamples = 10 + for i in range(nsamples): + n = np.random.randint(2, 5) + A = np.random.randn(n, n) + A_norm = scipy.linalg.norm(A) + X = expm(A) + X_norm = scipy.linalg.norm(X) + kappa = expm_cond(A) + + # Look for the small perturbation that gives the greatest + # relative error. + f = functools.partial(_help_expm_cond_search, + A, A_norm, X, X_norm, eps) + guess = np.ones(n*n) + out = minimize(f, guess, method='L-BFGS-B') + xopt = out.x + yopt = f(xopt) + p_best = eps * _normalized_like(np.reshape(xopt, A.shape), A) + p_best_relerr = _relative_error(expm, A, p_best) + assert_allclose(p_best_relerr, -yopt * eps) + + # Check that the identified perturbation indeed gives greater + # relative error than random perturbations with similar norms. + for j in range(5): + p_rand = eps * _normalized_like(np.random.randn(*A.shape), A) + assert_allclose(norm(p_best), norm(p_rand)) + p_rand_relerr = _relative_error(expm, A, p_rand) + assert_array_less(p_rand_relerr, p_best_relerr) + + # The greatest relative error should not be much greater than + # eps times the condition number kappa. + # In the limit as eps approaches zero it should never be greater. + assert_array_less(p_best_relerr, (1 + 2*eps) * eps * kappa) diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_matfuncs.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_matfuncs.pyc new file mode 100644 index 0000000..caf5273 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_matfuncs.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_procrustes.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_procrustes.py new file mode 100644 index 0000000..fabfe28 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_procrustes.py @@ -0,0 +1,190 @@ +from itertools import product, permutations + +import numpy as np +from numpy.testing import assert_array_less, assert_allclose +from pytest import raises as assert_raises + +from scipy.linalg import inv, eigh, norm +from scipy.linalg import orthogonal_procrustes + + +def test_orthogonal_procrustes_ndim_too_large(): + np.random.seed(1234) + A = np.random.randn(3, 4, 5) + B = np.random.randn(3, 4, 5) + assert_raises(ValueError, orthogonal_procrustes, A, B) + + +def test_orthogonal_procrustes_ndim_too_small(): + np.random.seed(1234) + A = np.random.randn(3) + B = np.random.randn(3) + assert_raises(ValueError, orthogonal_procrustes, A, B) + + +def test_orthogonal_procrustes_shape_mismatch(): + np.random.seed(1234) + shapes = ((3, 3), (3, 4), (4, 3), (4, 4)) + for a, b in permutations(shapes, 2): + A = np.random.randn(*a) + B = np.random.randn(*b) + assert_raises(ValueError, orthogonal_procrustes, A, B) + + +def test_orthogonal_procrustes_checkfinite_exception(): + np.random.seed(1234) + m, n = 2, 3 + A_good = np.random.randn(m, n) + B_good = np.random.randn(m, n) + for bad_value in np.inf, -np.inf, np.nan: + A_bad = A_good.copy() + A_bad[1, 2] = bad_value + B_bad = B_good.copy() + B_bad[1, 2] = bad_value + for A, B in ((A_good, B_bad), (A_bad, B_good), (A_bad, B_bad)): + assert_raises(ValueError, orthogonal_procrustes, A, B) + + +def test_orthogonal_procrustes_scale_invariance(): + np.random.seed(1234) + m, n = 4, 3 + for i in range(3): + A_orig = np.random.randn(m, n) + B_orig = np.random.randn(m, n) + R_orig, s = orthogonal_procrustes(A_orig, B_orig) + for A_scale in np.square(np.random.randn(3)): + for B_scale in np.square(np.random.randn(3)): + R, s = orthogonal_procrustes(A_orig * A_scale, B_orig * B_scale) + assert_allclose(R, R_orig) + + +def test_orthogonal_procrustes_array_conversion(): + np.random.seed(1234) + for m, n in ((6, 4), (4, 4), (4, 6)): + A_arr = np.random.randn(m, n) + B_arr = np.random.randn(m, n) + As = (A_arr, A_arr.tolist(), np.matrix(A_arr)) + Bs = (B_arr, B_arr.tolist(), np.matrix(B_arr)) + R_arr, s = orthogonal_procrustes(A_arr, B_arr) + AR_arr = A_arr.dot(R_arr) + for A, B in product(As, Bs): + R, s = orthogonal_procrustes(A, B) + AR = A_arr.dot(R) + assert_allclose(AR, AR_arr) + + +def test_orthogonal_procrustes(): + np.random.seed(1234) + for m, n in ((6, 4), (4, 4), (4, 6)): + # Sample a random target matrix. + B = np.random.randn(m, n) + # Sample a random orthogonal matrix + # by computing eigh of a sampled symmetric matrix. + X = np.random.randn(n, n) + w, V = eigh(X.T + X) + assert_allclose(inv(V), V.T) + # Compute a matrix with a known orthogonal transformation that gives B. + A = np.dot(B, V.T) + # Check that an orthogonal transformation from A to B can be recovered. + R, s = orthogonal_procrustes(A, B) + assert_allclose(inv(R), R.T) + assert_allclose(A.dot(R), B) + # Create a perturbed input matrix. + A_perturbed = A + 1e-2 * np.random.randn(m, n) + # Check that the orthogonal procrustes function can find an orthogonal + # transformation that is better than the orthogonal transformation + # computed from the original input matrix. + R_prime, s = orthogonal_procrustes(A_perturbed, B) + assert_allclose(inv(R_prime), R_prime.T) + # Compute the naive and optimal transformations of the perturbed input. + naive_approx = A_perturbed.dot(R) + optim_approx = A_perturbed.dot(R_prime) + # Compute the Frobenius norm errors of the matrix approximations. + naive_approx_error = norm(naive_approx - B, ord='fro') + optim_approx_error = norm(optim_approx - B, ord='fro') + # Check that the orthogonal Procrustes approximation is better. + assert_array_less(optim_approx_error, naive_approx_error) + + +def _centered(A): + mu = A.mean(axis=0) + return A - mu, mu + + +def test_orthogonal_procrustes_exact_example(): + # Check a small application. + # It uses translation, scaling, reflection, and rotation. + # + # | + # a b | + # | + # d c | w + # | + # --------+--- x ----- z --- + # | + # | y + # | + # + A_orig = np.array([[-3, 3], [-2, 3], [-2, 2], [-3, 2]], dtype=float) + B_orig = np.array([[3, 2], [1, 0], [3, -2], [5, 0]], dtype=float) + A, A_mu = _centered(A_orig) + B, B_mu = _centered(B_orig) + R, s = orthogonal_procrustes(A, B) + scale = s / np.square(norm(A)) + B_approx = scale * np.dot(A, R) + B_mu + assert_allclose(B_approx, B_orig, atol=1e-8) + + +def test_orthogonal_procrustes_stretched_example(): + # Try again with a target with a stretched y axis. + A_orig = np.array([[-3, 3], [-2, 3], [-2, 2], [-3, 2]], dtype=float) + B_orig = np.array([[3, 40], [1, 0], [3, -40], [5, 0]], dtype=float) + A, A_mu = _centered(A_orig) + B, B_mu = _centered(B_orig) + R, s = orthogonal_procrustes(A, B) + scale = s / np.square(norm(A)) + B_approx = scale * np.dot(A, R) + B_mu + expected = np.array([[3, 21], [-18, 0], [3, -21], [24, 0]], dtype=float) + assert_allclose(B_approx, expected, atol=1e-8) + # Check disparity symmetry. + expected_disparity = 0.4501246882793018 + AB_disparity = np.square(norm(B_approx - B_orig) / norm(B)) + assert_allclose(AB_disparity, expected_disparity) + R, s = orthogonal_procrustes(B, A) + scale = s / np.square(norm(B)) + A_approx = scale * np.dot(B, R) + A_mu + BA_disparity = np.square(norm(A_approx - A_orig) / norm(A)) + assert_allclose(BA_disparity, expected_disparity) + + +def test_orthogonal_procrustes_skbio_example(): + # This transformation is also exact. + # It uses translation, scaling, and reflection. + # + # | + # | a + # | b + # | c d + # --+--------- + # | + # | w + # | + # | x + # | + # | z y + # | + # + A_orig = np.array([[4, -2], [4, -4], [4, -6], [2, -6]], dtype=float) + B_orig = np.array([[1, 3], [1, 2], [1, 1], [2, 1]], dtype=float) + B_standardized = np.array([ + [-0.13363062, 0.6681531], + [-0.13363062, 0.13363062], + [-0.13363062, -0.40089186], + [0.40089186, -0.40089186]]) + A, A_mu = _centered(A_orig) + B, B_mu = _centered(B_orig) + R, s = orthogonal_procrustes(A, B) + scale = s / np.square(norm(A)) + B_approx = scale * np.dot(A, R) + B_mu + assert_allclose(B_approx, B_orig) + assert_allclose(B / norm(B), B_standardized) diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_procrustes.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_procrustes.pyc new file mode 100644 index 0000000..3bf1518 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_procrustes.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_sketches.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_sketches.py new file mode 100644 index 0000000..804872a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_sketches.py @@ -0,0 +1,61 @@ +"""Tests for _sketches.py.""" + +from __future__ import division, print_function, absolute_import +import numpy as np +from scipy.linalg import clarkson_woodruff_transform + +from numpy.testing import assert_ + + +def make_random_dense_gaussian_matrix(n_rows, n_columns, mu=0, sigma=0.01): + """ + Make some random data with Gaussian distributed values + """ + np.random.seed(142352345) + res = np.random.normal(mu, sigma, n_rows*n_columns) + return np.reshape(res, (n_rows, n_columns)) + + +class TestClarksonWoodruffTransform(object): + """ + Testing the Clarkson Woodruff Transform + """ + # Big dense matrix dimensions + n_matrix_rows = 2000 + n_matrix_columns = 100 + + # Sketch matrix dimensions + n_sketch_rows = 100 + + # Error threshold + threshold = 0.1 + + dense_big_matrix = make_random_dense_gaussian_matrix(n_matrix_rows, + n_matrix_columns) + + def test_sketch_dimensions(self): + sketch = clarkson_woodruff_transform(self.dense_big_matrix, + self.n_sketch_rows) + + assert_(sketch.shape == (self.n_sketch_rows, + self.dense_big_matrix.shape[1])) + + def test_sketch_rows_norm(self): + # Given the probabilistic nature of the sketches + # we run the 'test' multiple times and check that + # we pass all/almost all the tries + n_errors = 0 + + seeds = [1755490010, 934377150, 1391612830, 1752708722, 2008891431, + 1302443994, 1521083269, 1501189312, 1126232505, 1533465685] + + for seed_ in seeds: + sketch = clarkson_woodruff_transform(self.dense_big_matrix, + self.n_sketch_rows, seed_) + + # We could use other norms (like L2) + err = np.linalg.norm(self.dense_big_matrix) - np.linalg.norm(sketch) + if err > self.threshold: + n_errors += 1 + + assert_(n_errors == 0) diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_sketches.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_sketches.pyc new file mode 100644 index 0000000..91773a6 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_sketches.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_solve_toeplitz.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_solve_toeplitz.py new file mode 100644 index 0000000..2363b1a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_solve_toeplitz.py @@ -0,0 +1,123 @@ +"""Test functions for linalg._solve_toeplitz module +""" +from __future__ import division, print_function, absolute_import + +import numpy as np +from scipy.linalg._solve_toeplitz import levinson +from scipy.linalg import solve, toeplitz, solve_toeplitz +from numpy.testing import assert_equal, assert_allclose + +import pytest +from pytest import raises as assert_raises + + +def test_solve_equivalence(): + # For toeplitz matrices, solve_toeplitz() should be equivalent to solve(). + random = np.random.RandomState(1234) + for n in (1, 2, 3, 10): + c = random.randn(n) + if random.rand() < 0.5: + c = c + 1j * random.randn(n) + r = random.randn(n) + if random.rand() < 0.5: + r = r + 1j * random.randn(n) + y = random.randn(n) + if random.rand() < 0.5: + y = y + 1j * random.randn(n) + + # Check equivalence when both the column and row are provided. + actual = solve_toeplitz((c,r), y) + desired = solve(toeplitz(c, r=r), y) + assert_allclose(actual, desired) + + # Check equivalence when the column is provided but not the row. + actual = solve_toeplitz(c, b=y) + desired = solve(toeplitz(c), y) + assert_allclose(actual, desired) + + +def test_multiple_rhs(): + random = np.random.RandomState(1234) + c = random.randn(4) + r = random.randn(4) + for offset in [0, 1j]: + for yshape in ((4,), (4, 3), (4, 3, 2)): + y = random.randn(*yshape) + offset + actual = solve_toeplitz((c,r), b=y) + desired = solve(toeplitz(c, r=r), y) + assert_equal(actual.shape, yshape) + assert_equal(desired.shape, yshape) + assert_allclose(actual, desired) + + +def test_native_list_arguments(): + c = [1,2,4,7] + r = [1,3,9,12] + y = [5,1,4,2] + actual = solve_toeplitz((c,r), y) + desired = solve(toeplitz(c, r=r), y) + assert_allclose(actual, desired) + + +def test_zero_diag_error(): + # The Levinson-Durbin implementation fails when the diagonal is zero. + random = np.random.RandomState(1234) + n = 4 + c = random.randn(n) + r = random.randn(n) + y = random.randn(n) + c[0] = 0 + assert_raises(np.linalg.LinAlgError, + solve_toeplitz, (c, r), b=y) + + +def test_wikipedia_counterexample(): + # The Levinson-Durbin implementation also fails in other cases. + # This example is from the talk page of the wikipedia article. + random = np.random.RandomState(1234) + c = [2, 2, 1] + y = random.randn(3) + assert_raises(np.linalg.LinAlgError, solve_toeplitz, c, b=y) + + +def test_reflection_coeffs(): + # check that that the partial solutions are given by the reflection + # coefficients + + random = np.random.RandomState(1234) + y_d = random.randn(10) + y_z = random.randn(10) + 1j + reflection_coeffs_d = [1] + reflection_coeffs_z = [1] + for i in range(2, 10): + reflection_coeffs_d.append(solve_toeplitz(y_d[:(i-1)], b=y_d[1:i])[-1]) + reflection_coeffs_z.append(solve_toeplitz(y_z[:(i-1)], b=y_z[1:i])[-1]) + + y_d_concat = np.concatenate((y_d[-2:0:-1], y_d[:-1])) + y_z_concat = np.concatenate((y_z[-2:0:-1].conj(), y_z[:-1])) + _, ref_d = levinson(y_d_concat, b=y_d[1:]) + _, ref_z = levinson(y_z_concat, b=y_z[1:]) + + assert_allclose(reflection_coeffs_d, ref_d[:-1]) + assert_allclose(reflection_coeffs_z, ref_z[:-1]) + + +@pytest.mark.xfail(reason='Instability of Levinson iteration') +def test_unstable(): + # this is a "Gaussian Toeplitz matrix", as mentioned in Example 2 of + # I. Gohbert, T. Kailath and V. Olshevsky "Fast Gaussian Elimination with + # Partial Pivoting for Matrices with Displacement Structure" + # Mathematics of Computation, 64, 212 (1995), pp 1557-1576 + # which can be unstable for levinson recursion. + + # other fast toeplitz solvers such as GKO or Burg should be better. + random = np.random.RandomState(1234) + n = 100 + c = 0.9 ** (np.arange(n)**2) + y = random.randn(n) + + solution1 = solve_toeplitz(c, b=y) + solution2 = solve(toeplitz(c), y) + + assert_allclose(solution1, solution2) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_solve_toeplitz.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_solve_toeplitz.pyc new file mode 100644 index 0000000..c9b368f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_solve_toeplitz.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_solvers.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_solvers.py new file mode 100644 index 0000000..7299770 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_solvers.py @@ -0,0 +1,767 @@ +from __future__ import division, print_function, absolute_import + +import os +import numpy as np + +from numpy.testing import assert_array_almost_equal +import pytest +from pytest import raises as assert_raises + +from scipy.linalg import solve_sylvester +from scipy.linalg import solve_continuous_lyapunov, solve_discrete_lyapunov +from scipy.linalg import solve_continuous_are, solve_discrete_are +from scipy.linalg import block_diag, solve, LinAlgError + + +def _load_data(name): + """ + Load npz data file under data/ + Returns a copy of the data, rather than keeping the npz file open. + """ + filename = os.path.join(os.path.abspath(os.path.dirname(__file__)), + 'data', name) + with np.load(filename) as f: + return dict(f.items()) + + +class TestSolveLyapunov(object): + + cases = [ + (np.array([[1, 2], [3, 4]]), + np.array([[9, 10], [11, 12]])), + # a, q all complex. + (np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]), + np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])), + # a real; q complex. + (np.array([[1.0, 2.0], [3.0, 5.0]]), + np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])), + # a complex; q real. + (np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]), + np.array([[2.0, 2.0], [-1.0, 2.0]])), + # An example from Kitagawa, 1977 + (np.array([[3, 9, 5, 1, 4], [1, 2, 3, 8, 4], [4, 6, 6, 6, 3], + [1, 5, 2, 0, 7], [5, 3, 3, 1, 5]]), + np.array([[2, 4, 1, 0, 1], [4, 1, 0, 2, 0], [1, 0, 3, 0, 3], + [0, 2, 0, 1, 0], [1, 0, 3, 0, 4]])), + # Companion matrix example. a complex; q real; a.shape[0] = 11 + (np.array([[0.100+0.j, 0.091+0.j, 0.082+0.j, 0.073+0.j, 0.064+0.j, + 0.055+0.j, 0.046+0.j, 0.037+0.j, 0.028+0.j, 0.019+0.j, + 0.010+0.j], + [1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, + 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, + 0.000+0.j], + [0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, + 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, + 0.000+0.j], + [0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j, + 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, + 0.000+0.j], + [0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j, + 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, + 0.000+0.j], + [0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j, + 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, + 0.000+0.j], + [0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, + 1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, + 0.000+0.j], + [0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, + 0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, + 0.000+0.j], + [0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, + 0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j, + 0.000+0.j], + [0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, + 0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j, + 0.000+0.j], + [0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, + 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j, + 0.000+0.j]]), + np.eye(11)), + # https://github.com/scipy/scipy/issues/4176 + (np.matrix([[0, 1], [-1/2, -1]]), + (np.matrix([0, 3]).T * np.matrix([0, 3]).T.T)), + # https://github.com/scipy/scipy/issues/4176 + (np.matrix([[0, 1], [-1/2, -1]]), + (np.array(np.matrix([0, 3]).T * np.matrix([0, 3]).T.T))), + ] + + def test_continuous_squareness_and_shape(self): + nsq = np.ones((3, 2)) + sq = np.eye(3) + assert_raises(ValueError, solve_continuous_lyapunov, nsq, sq) + assert_raises(ValueError, solve_continuous_lyapunov, sq, nsq) + assert_raises(ValueError, solve_continuous_lyapunov, sq, np.eye(2)) + + def check_continuous_case(self, a, q): + x = solve_continuous_lyapunov(a, q) + assert_array_almost_equal( + np.dot(a, x) + np.dot(x, a.conj().transpose()), q) + + def check_discrete_case(self, a, q, method=None): + x = solve_discrete_lyapunov(a, q, method=method) + assert_array_almost_equal( + np.dot(np.dot(a, x), a.conj().transpose()) - x, -1.0*q) + + def test_cases(self): + for case in self.cases: + self.check_continuous_case(case[0], case[1]) + self.check_discrete_case(case[0], case[1]) + self.check_discrete_case(case[0], case[1], method='direct') + self.check_discrete_case(case[0], case[1], method='bilinear') + + +def test_solve_continuous_are(): + mat6 = _load_data('carex_6_data.npz') + mat15 = _load_data('carex_15_data.npz') + mat18 = _load_data('carex_18_data.npz') + mat19 = _load_data('carex_19_data.npz') + mat20 = _load_data('carex_20_data.npz') + cases = [ + # Carex examples taken from (with default parameters): + # [1] P.BENNER, A.J. LAUB, V. MEHRMANN: 'A Collection of Benchmark + # Examples for the Numerical Solution of Algebraic Riccati + # Equations II: Continuous-Time Case', Tech. Report SPC 95_23, + # Fak. f. Mathematik, TU Chemnitz-Zwickau (Germany), 1995. + # + # The format of the data is (a, b, q, r, knownfailure), where + # knownfailure is None if the test passes or a string + # indicating the reason for failure. + # + # Test Case 0: carex #1 + (np.diag([1.], 1), + np.array([[0], [1]]), + block_diag(1., 2.), + 1, + None), + # Test Case 1: carex #2 + (np.array([[4, 3], [-4.5, -3.5]]), + np.array([[1], [-1]]), + np.array([[9, 6], [6, 4.]]), + 1, + None), + # Test Case 2: carex #3 + (np.array([[0, 1, 0, 0], + [0, -1.89, 0.39, -5.53], + [0, -0.034, -2.98, 2.43], + [0.034, -0.0011, -0.99, -0.21]]), + np.array([[0, 0], [0.36, -1.6], [-0.95, -0.032], [0.03, 0]]), + np.array([[2.313, 2.727, 0.688, 0.023], + [2.727, 4.271, 1.148, 0.323], + [0.688, 1.148, 0.313, 0.102], + [0.023, 0.323, 0.102, 0.083]]), + np.eye(2), + None), + # Test Case 3: carex #4 + (np.array([[-0.991, 0.529, 0, 0, 0, 0, 0, 0], + [0.522, -1.051, 0.596, 0, 0, 0, 0, 0], + [0, 0.522, -1.118, 0.596, 0, 0, 0, 0], + [0, 0, 0.522, -1.548, 0.718, 0, 0, 0], + [0, 0, 0, 0.922, -1.64, 0.799, 0, 0], + [0, 0, 0, 0, 0.922, -1.721, 0.901, 0], + [0, 0, 0, 0, 0, 0.922, -1.823, 1.021], + [0, 0, 0, 0, 0, 0, 0.922, -1.943]]), + np.array([[3.84, 4.00, 37.60, 3.08, 2.36, 2.88, 3.08, 3.00], + [-2.88, -3.04, -2.80, -2.32, -3.32, -3.82, -4.12, -3.96]] + ).T * 0.001, + np.array([[1.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.1], + [0.0, 1.0, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0], + [0.0, 0.0, 1.0, 0.0, 0.0, 0.5, 0.0, 0.0], + [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0], + [0.5, 0.1, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.5, 0.0, 0.0, 0.1, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1, 0.0], + [0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1]]), + np.eye(2), + None), + # Test Case 4: carex #5 + (np.array( + [[-4.019, 5.120, 0., 0., -2.082, 0., 0., 0., 0.870], + [-0.346, 0.986, 0., 0., -2.340, 0., 0., 0., 0.970], + [-7.909, 15.407, -4.069, 0., -6.450, 0., 0., 0., 2.680], + [-21.816, 35.606, -0.339, -3.870, -17.800, 0., 0., 0., 7.390], + [-60.196, 98.188, -7.907, 0.340, -53.008, 0., 0., 0., 20.400], + [0, 0, 0, 0, 94.000, -147.200, 0., 53.200, 0.], + [0, 0, 0, 0, 0, 94.000, -147.200, 0, 0], + [0, 0, 0, 0, 0, 12.800, 0.000, -31.600, 0], + [0, 0, 0, 0, 12.800, 0.000, 0.000, 18.800, -31.600]]), + np.array([[0.010, -0.011, -0.151], + [0.003, -0.021, 0.000], + [0.009, -0.059, 0.000], + [0.024, -0.162, 0.000], + [0.068, -0.445, 0.000], + [0.000, 0.000, 0.000], + [0.000, 0.000, 0.000], + [0.000, 0.000, 0.000], + [0.000, 0.000, 0.000]]), + np.eye(9), + np.eye(3), + None), + # Test Case 5: carex #6 + (mat6['A'], mat6['B'], mat6['Q'], mat6['R'], None), + # Test Case 6: carex #7 + (np.array([[1, 0], [0, -2.]]), + np.array([[1e-6], [0]]), + np.ones((2, 2)), + 1., + 'Bad residual accuracy'), + # Test Case 7: carex #8 + (block_diag(-0.1, -0.02), + np.array([[0.100, 0.000], [0.001, 0.010]]), + np.array([[100, 1000], [1000, 10000]]), + np.ones((2, 2)) + block_diag(1e-6, 0), + None), + # Test Case 8: carex #9 + (np.array([[0, 1e6], [0, 0]]), + np.array([[0], [1.]]), + np.eye(2), + 1., + None), + # Test Case 9: carex #10 + (np.array([[1.0000001, 1], [1., 1.0000001]]), + np.eye(2), + np.eye(2), + np.eye(2), + None), + # Test Case 10: carex #11 + (np.array([[3, 1.], [4, 2]]), + np.array([[1], [1]]), + np.array([[-11, -5], [-5, -2.]]), + 1., + None), + # Test Case 11: carex #12 + (np.array([[7000000., 2000000., -0.], + [2000000., 6000000., -2000000.], + [0., -2000000., 5000000.]]) / 3, + np.eye(3), + np.array([[1., -2., -2.], [-2., 1., -2.], [-2., -2., 1.]]).dot( + np.diag([1e-6, 1, 1e6])).dot( + np.array([[1., -2., -2.], [-2., 1., -2.], [-2., -2., 1.]])) / 9, + np.eye(3) * 1e6, + 'Bad Residual Accuracy'), + # Test Case 12: carex #13 + (np.array([[0, 0.4, 0, 0], + [0, 0, 0.345, 0], + [0, -0.524e6, -0.465e6, 0.262e6], + [0, 0, 0, -1e6]]), + np.array([[0, 0, 0, 1e6]]).T, + np.diag([1, 0, 1, 0]), + 1., + None), + # Test Case 13: carex #14 + (np.array([[-1e-6, 1, 0, 0], + [-1, -1e-6, 0, 0], + [0, 0, 1e-6, 1], + [0, 0, -1, 1e-6]]), + np.ones((4, 1)), + np.ones((4, 4)), + 1., + None), + # Test Case 14: carex #15 + (mat15['A'], mat15['B'], mat15['Q'], mat15['R'], None), + # Test Case 15: carex #16 + (np.eye(64, 64, k=-1) + np.eye(64, 64)*(-2.) + np.rot90( + block_diag(1, np.zeros((62, 62)), 1)) + np.eye(64, 64, k=1), + np.eye(64), + np.eye(64), + np.eye(64), + None), + # Test Case 16: carex #17 + (np.diag(np.ones((20, )), 1), + np.flipud(np.eye(21, 1)), + np.eye(21, 1) * np.eye(21, 1).T, + 1, + 'Bad Residual Accuracy'), + # Test Case 17: carex #18 + (mat18['A'], mat18['B'], mat18['Q'], mat18['R'], None), + # Test Case 18: carex #19 + (mat19['A'], mat19['B'], mat19['Q'], mat19['R'], + 'Bad Residual Accuracy'), + # Test Case 19: carex #20 + (mat20['A'], mat20['B'], mat20['Q'], mat20['R'], + 'Bad Residual Accuracy') + ] + # Makes the minimum precision requirements customized to the test. + # Here numbers represent the number of decimals that agrees with zero + # matrix when the solution x is plugged in to the equation. + # + # res = array([[8e-3,1e-16],[1e-16,1e-20]]) --> min_decimal[k] = 2 + # + # If the test is failing use "None" for that entry. + # + min_decimal = (14, 12, 13, 14, 11, 6, None, 5, 7, 14, 14, + None, 9, 14, 13, 14, None, 12, None, None) + + def _test_factory(case, dec): + """Checks if 0 = XA + A'X - XB(R)^{-1} B'X + Q is true""" + a, b, q, r, knownfailure = case + if knownfailure: + pytest.xfail(reason=knownfailure) + + x = solve_continuous_are(a, b, q, r) + res = x.dot(a) + a.conj().T.dot(x) + q + out_fact = x.dot(b) + res -= out_fact.dot(solve(np.atleast_2d(r), out_fact.conj().T)) + assert_array_almost_equal(res, np.zeros_like(res), decimal=dec) + + for ind, case in enumerate(cases): + _test_factory(case, min_decimal[ind]) + + +def test_solve_discrete_are(): + + cases = [ + # Darex examples taken from (with default parameters): + # [1] P.BENNER, A.J. LAUB, V. MEHRMANN: 'A Collection of Benchmark + # Examples for the Numerical Solution of Algebraic Riccati + # Equations II: Discrete-Time Case', Tech. Report SPC 95_23, + # Fak. f. Mathematik, TU Chemnitz-Zwickau (Germany), 1995. + # [2] T. GUDMUNDSSON, C. KENNEY, A.J. LAUB: 'Scaling of the + # Discrete-Time Algebraic Riccati Equation to Enhance Stability + # of the Schur Solution Method', IEEE Trans.Aut.Cont., vol.37(4) + # + # The format of the data is (a, b, q, r, knownfailure), where + # knownfailure is None if the test passes or a string + # indicating the reason for failure. + # + # TEST CASE 0 : Complex a; real b, q, r + (np.array([[2, 1-2j], [0, -3j]]), + np.array([[0], [1]]), + np.array([[1, 0], [0, 2]]), + np.array([[1]]), + None), + # TEST CASE 1 :Real a, q, r; complex b + (np.array([[2, 1], [0, -1]]), + np.array([[-2j], [1j]]), + np.array([[1, 0], [0, 2]]), + np.array([[1]]), + None), + # TEST CASE 2 : Real a, b; complex q, r + (np.array([[3, 1], [0, -1]]), + np.array([[1, 2], [1, 3]]), + np.array([[1, 1+1j], [1-1j, 2]]), + np.array([[2, -2j], [2j, 3]]), + None), + # TEST CASE 3 : User-reported gh-2251 (Trac #1732) + (np.array([[0.63399379, 0.54906824, 0.76253406], + [0.5404729, 0.53745766, 0.08731853], + [0.27524045, 0.84922129, 0.4681622]]), + np.array([[0.96861695], [0.05532739], [0.78934047]]), + np.eye(3), + np.eye(1), + None), + # TEST CASE 4 : darex #1 + (np.array([[4, 3], [-4.5, -3.5]]), + np.array([[1], [-1]]), + np.array([[9, 6], [6, 4]]), + np.array([[1]]), + None), + # TEST CASE 5 : darex #2 + (np.array([[0.9512, 0], [0, 0.9048]]), + np.array([[4.877, 4.877], [-1.1895, 3.569]]), + np.array([[0.005, 0], [0, 0.02]]), + np.array([[1/3, 0], [0, 3]]), + None), + # TEST CASE 6 : darex #3 + (np.array([[2, -1], [1, 0]]), + np.array([[1], [0]]), + np.array([[0, 0], [0, 1]]), + np.array([[0]]), + None), + # TEST CASE 7 : darex #4 (skipped the gen. Ric. term S) + (np.array([[0, 1], [0, -1]]), + np.array([[1, 0], [2, 1]]), + np.array([[-4, -4], [-4, 7]]) * (1/11), + np.array([[9, 3], [3, 1]]), + None), + # TEST CASE 8 : darex #5 + (np.array([[0, 1], [0, 0]]), + np.array([[0], [1]]), + np.array([[1, 2], [2, 4]]), + np.array([[1]]), + None), + # TEST CASE 9 : darex #6 + (np.array([[0.998, 0.067, 0, 0], + [-.067, 0.998, 0, 0], + [0, 0, 0.998, 0.153], + [0, 0, -.153, 0.998]]), + np.array([[0.0033, 0.0200], + [0.1000, -.0007], + [0.0400, 0.0073], + [-.0028, 0.1000]]), + np.array([[1.87, 0, 0, -0.244], + [0, 0.744, 0.205, 0], + [0, 0.205, 0.589, 0], + [-0.244, 0, 0, 1.048]]), + np.eye(2), + None), + # TEST CASE 10 : darex #7 + (np.array([[0.984750, -.079903, 0.0009054, -.0010765], + [0.041588, 0.998990, -.0358550, 0.0126840], + [-.546620, 0.044916, -.3299100, 0.1931800], + [2.662400, -.100450, -.9245500, -.2632500]]), + np.array([[0.0037112, 0.0007361], + [-.0870510, 9.3411e-6], + [-1.198440, -4.1378e-4], + [-3.192700, 9.2535e-4]]), + np.eye(4)*1e-2, + np.eye(2), + None), + # TEST CASE 11 : darex #8 + (np.array([[-0.6000000, -2.2000000, -3.6000000, -5.4000180], + [1.0000000, 0.6000000, 0.8000000, 3.3999820], + [0.0000000, 1.0000000, 1.8000000, 3.7999820], + [0.0000000, 0.0000000, 0.0000000, -0.9999820]]), + np.array([[1.0, -1.0, -1.0, -1.0], + [0.0, 1.0, -1.0, -1.0], + [0.0, 0.0, 1.0, -1.0], + [0.0, 0.0, 0.0, 1.0]]), + np.array([[2, 1, 3, 6], + [1, 2, 2, 5], + [3, 2, 6, 11], + [6, 5, 11, 22]]), + np.eye(4), + None), + # TEST CASE 12 : darex #9 + (np.array([[95.4070, 1.9643, 0.3597, 0.0673, 0.0190], + [40.8490, 41.3170, 16.0840, 4.4679, 1.1971], + [12.2170, 26.3260, 36.1490, 15.9300, 12.3830], + [4.1118, 12.8580, 27.2090, 21.4420, 40.9760], + [0.1305, 0.5808, 1.8750, 3.6162, 94.2800]]) * 0.01, + np.array([[0.0434, -0.0122], + [2.6606, -1.0453], + [3.7530, -5.5100], + [3.6076, -6.6000], + [0.4617, -0.9148]]) * 0.01, + np.eye(5), + np.eye(2), + None), + # TEST CASE 13 : darex #10 + (np.kron(np.eye(2), np.diag([1, 1], k=1)), + np.kron(np.eye(2), np.array([[0], [0], [1]])), + np.array([[1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, -1, 0], + [0, 0, 0, -1, 1, 0], + [0, 0, 0, 0, 0, 0]]), + np.array([[3, 0], [0, 1]]), + None), + # TEST CASE 14 : darex #11 + (0.001 * np.array( + [[870.1, 135.0, 11.59, .5014, -37.22, .3484, 0, 4.242, 7.249], + [76.55, 897.4, 12.72, 0.5504, -40.16, .3743, 0, 4.53, 7.499], + [-127.2, 357.5, 817, 1.455, -102.8, .987, 0, 11.85, 18.72], + [-363.5, 633.9, 74.91, 796.6, -273.5, 2.653, 0, 31.72, 48.82], + [-960, 1645.9, -128.9, -5.597, 71.42, 7.108, 0, 84.52, 125.9], + [-664.4, 112.96, -88.89, -3.854, 84.47, 13.6, 0, 144.3, 101.6], + [-410.2, 693, -54.71, -2.371, 66.49, 12.49, .1063, 99.97, 69.67], + [-179.9, 301.7, -23.93, -1.035, 60.59, 22.16, 0, 213.9, 35.54], + [-345.1, 580.4, -45.96, -1.989, 105.6, 19.86, 0, 219.1, 215.2]]), + np.array([[4.7600, -0.5701, -83.6800], + [0.8790, -4.7730, -2.7300], + [1.4820, -13.1200, 8.8760], + [3.8920, -35.1300, 24.8000], + [10.3400, -92.7500, 66.8000], + [7.2030, -61.5900, 38.3400], + [4.4540, -36.8300, 20.2900], + [1.9710, -15.5400, 6.9370], + [3.7730, -30.2800, 14.6900]]) * 0.001, + np.diag([50, 0, 0, 0, 50, 0, 0, 0, 0]), + np.eye(3), + None), + # TEST CASE 15 : darex #12 - numerically least accurate example + (np.array([[0, 1e6], [0, 0]]), + np.array([[0], [1]]), + np.eye(2), + np.array([[1]]), + None), + # TEST CASE 16 : darex #13 + (np.array([[16, 10, -2], + [10, 13, -8], + [-2, -8, 7]]) * (1/9), + np.eye(3), + 1e6 * np.eye(3), + 1e6 * np.eye(3), + None), + # TEST CASE 17 : darex #14 + (np.array([[1 - 1/1e8, 0, 0, 0], + [1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0]]), + np.array([[1e-08], [0], [0], [0]]), + np.diag([0, 0, 0, 1]), + np.array([[0.25]]), + None), + # TEST CASE 18 : darex #15 + (np.eye(100, k=1), + np.flipud(np.eye(100, 1)), + np.eye(100), + np.array([[1]]), + None) + ] + + # Makes the minimum precision requirements customized to the test. + # Here numbers represent the number of decimals that agrees with zero + # matrix when the solution x is plugged in to the equation. + # + # res = array([[8e-3,1e-16],[1e-16,1e-20]]) --> min_decimal[k] = 2 + # + # If the test is failing use "None" for that entry. + # + min_decimal = (12, 14, 13, 14, 13, 16, 18, 14, 15, 13, + 14, 13, 13, 14, 12, 2, 5, 6, 10) + + def _test_factory(case, dec): + """Checks if X = A'XA-(A'XB)(R+B'XB)^-1(B'XA)+Q) is true""" + a, b, q, r, knownfailure = case + if knownfailure: + pytest.xfail(reason=knownfailure) + + x = solve_discrete_are(a, b, q, r) + res = a.conj().T.dot(x.dot(a)) - x + q + res -= a.conj().T.dot(x.dot(b)).dot( + solve(r+b.conj().T.dot(x.dot(b)), b.conj().T).dot(x.dot(a)) + ) + assert_array_almost_equal(res, np.zeros_like(res), decimal=dec) + + for ind, case in enumerate(cases): + _test_factory(case, min_decimal[ind]) + + # An infeasible example taken from https://arxiv.org/abs/1505.04861v1 + A = np.triu(np.ones((3, 3))) + A[0, 1] = -1 + B = np.array([[1, 1, 0], [0, 0, 1]]).T + Q = -2*np.ones_like(A) + np.diag([8, -1, -1.9]) + R = np.diag([-10, 0.1]) + assert_raises(LinAlgError, solve_continuous_are, A, B, Q, R) + + +def test_solve_generalized_continuous_are(): + cases = [ + # Two random examples differ by s term + # in the absence of any literature for demanding examples. + (np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01], + [4.617139e-02, 6.948286e-01, 3.444608e-02], + [9.713178e-02, 3.170995e-01, 4.387444e-01]]), + np.array([[3.815585e-01, 1.868726e-01], + [7.655168e-01, 4.897644e-01], + [7.951999e-01, 4.455862e-01]]), + np.eye(3), + np.eye(2), + np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01], + [7.093648e-01, 6.797027e-01, 1.189977e-01], + [7.546867e-01, 6.550980e-01, 4.983641e-01]]), + np.zeros((3, 2)), + None), + (np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01], + [4.617139e-02, 6.948286e-01, 3.444608e-02], + [9.713178e-02, 3.170995e-01, 4.387444e-01]]), + np.array([[3.815585e-01, 1.868726e-01], + [7.655168e-01, 4.897644e-01], + [7.951999e-01, 4.455862e-01]]), + np.eye(3), + np.eye(2), + np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01], + [7.093648e-01, 6.797027e-01, 1.189977e-01], + [7.546867e-01, 6.550980e-01, 4.983641e-01]]), + np.ones((3, 2)), + None) + ] + + min_decimal = (10, 10) + + def _test_factory(case, dec): + """Checks if X = A'XA-(A'XB)(R+B'XB)^-1(B'XA)+Q) is true""" + a, b, q, r, e, s, knownfailure = case + if knownfailure: + pytest.xfail(reason=knownfailure) + + x = solve_continuous_are(a, b, q, r, e, s) + res = a.conj().T.dot(x.dot(e)) + e.conj().T.dot(x.dot(a)) + q + out_fact = e.conj().T.dot(x).dot(b) + s + res -= out_fact.dot(solve(np.atleast_2d(r), out_fact.conj().T)) + assert_array_almost_equal(res, np.zeros_like(res), decimal=dec) + + for ind, case in enumerate(cases): + _test_factory(case, min_decimal[ind]) + + +def test_solve_generalized_discrete_are(): + mat20170120 = _load_data('gendare_20170120_data.npz') + + cases = [ + # Two random examples differ by s term + # in the absence of any literature for demanding examples. + (np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01], + [4.617139e-02, 6.948286e-01, 3.444608e-02], + [9.713178e-02, 3.170995e-01, 4.387444e-01]]), + np.array([[3.815585e-01, 1.868726e-01], + [7.655168e-01, 4.897644e-01], + [7.951999e-01, 4.455862e-01]]), + np.eye(3), + np.eye(2), + np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01], + [7.093648e-01, 6.797027e-01, 1.189977e-01], + [7.546867e-01, 6.550980e-01, 4.983641e-01]]), + np.zeros((3, 2)), + None), + (np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01], + [4.617139e-02, 6.948286e-01, 3.444608e-02], + [9.713178e-02, 3.170995e-01, 4.387444e-01]]), + np.array([[3.815585e-01, 1.868726e-01], + [7.655168e-01, 4.897644e-01], + [7.951999e-01, 4.455862e-01]]), + np.eye(3), + np.eye(2), + np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01], + [7.093648e-01, 6.797027e-01, 1.189977e-01], + [7.546867e-01, 6.550980e-01, 4.983641e-01]]), + np.ones((3, 2)), + None), + # user-reported (under PR-6616) 20-Jan-2017 + # tests against the case where E is None but S is provided + (mat20170120['A'], + mat20170120['B'], + mat20170120['Q'], + mat20170120['R'], + None, + mat20170120['S'], + None), + ] + + min_decimal = (11, 11, 16) + + def _test_factory(case, dec): + """Checks if X = A'XA-(A'XB)(R+B'XB)^-1(B'XA)+Q) is true""" + a, b, q, r, e, s, knownfailure = case + if knownfailure: + pytest.xfail(reason=knownfailure) + + x = solve_discrete_are(a, b, q, r, e, s) + if e is None: + e = np.eye(a.shape[0]) + if s is None: + s = np.zeros_like(b) + res = a.conj().T.dot(x.dot(a)) - e.conj().T.dot(x.dot(e)) + q + res -= (a.conj().T.dot(x.dot(b)) + s).dot( + solve(r+b.conj().T.dot(x.dot(b)), + (b.conj().T.dot(x.dot(a)) + s.conj().T) + ) + ) + assert_array_almost_equal(res, np.zeros_like(res), decimal=dec) + + for ind, case in enumerate(cases): + _test_factory(case, min_decimal[ind]) + + +def test_are_validate_args(): + + def test_square_shape(): + nsq = np.ones((3, 2)) + sq = np.eye(3) + for x in (solve_continuous_are, solve_discrete_are): + assert_raises(ValueError, x, nsq, 1, 1, 1) + assert_raises(ValueError, x, sq, sq, nsq, 1) + assert_raises(ValueError, x, sq, sq, sq, nsq) + assert_raises(ValueError, x, sq, sq, sq, sq, nsq) + + def test_compatible_sizes(): + nsq = np.ones((3, 2)) + sq = np.eye(4) + for x in (solve_continuous_are, solve_discrete_are): + assert_raises(ValueError, x, sq, nsq, 1, 1) + assert_raises(ValueError, x, sq, sq, sq, sq, sq, nsq) + assert_raises(ValueError, x, sq, sq, np.eye(3), sq) + assert_raises(ValueError, x, sq, sq, sq, np.eye(3)) + assert_raises(ValueError, x, sq, sq, sq, sq, np.eye(3)) + + def test_symmetry(): + nsym = np.arange(9).reshape(3, 3) + sym = np.eye(3) + for x in (solve_continuous_are, solve_discrete_are): + assert_raises(ValueError, x, sym, sym, nsym, sym) + assert_raises(ValueError, x, sym, sym, sym, nsym) + + def test_singularity(): + sing = 1e12 * np.ones((3, 3)) + sing[2, 2] -= 1 + sq = np.eye(3) + for x in (solve_continuous_are, solve_discrete_are): + assert_raises(ValueError, x, sq, sq, sq, sq, sing) + + assert_raises(ValueError, solve_continuous_are, sq, sq, sq, sing) + + def test_finiteness(): + nm = np.ones((2, 2)) * np.nan + sq = np.eye(2) + for x in (solve_continuous_are, solve_discrete_are): + assert_raises(ValueError, x, nm, sq, sq, sq) + assert_raises(ValueError, x, sq, nm, sq, sq) + assert_raises(ValueError, x, sq, sq, nm, sq) + assert_raises(ValueError, x, sq, sq, sq, nm) + assert_raises(ValueError, x, sq, sq, sq, sq, nm) + assert_raises(ValueError, x, sq, sq, sq, sq, sq, nm) + + +class TestSolveSylvester(object): + + cases = [ + # a, b, c all real. + (np.array([[1, 2], [0, 4]]), + np.array([[5, 6], [0, 8]]), + np.array([[9, 10], [11, 12]])), + # a, b, c all real, 4x4. a and b have non-trival 2x2 blocks in their + # quasi-triangular form. + (np.array([[1.0, 0, 0, 0], + [0, 1.0, 2.0, 0.0], + [0, 0, 3.0, -4], + [0, 0, 2, 5]]), + np.array([[2.0, 0, 0, 1.0], + [0, 1.0, 0.0, 0.0], + [0, 0, 1.0, -1], + [0, 0, 1, 1]]), + np.array([[1.0, 0, 0, 0], + [0, 1.0, 0, 0], + [0, 0, 1.0, 0], + [0, 0, 0, 1.0]])), + # a, b, c all complex. + (np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]), + np.array([[-1.0, 2j], [3.0, 4.0]]), + np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])), + # a and b real; c complex. + (np.array([[1.0, 2.0], [3.0, 5.0]]), + np.array([[-1.0, 0], [3.0, 4.0]]), + np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])), + # a and c complex; b real. + (np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]), + np.array([[-1.0, 0], [3.0, 4.0]]), + np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])), + # a complex; b and c real. + (np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]), + np.array([[-1.0, 0], [3.0, 4.0]]), + np.array([[2.0, 2.0], [-1.0, 2.0]])), + # not square matrices, real + (np.array([[8, 1, 6], [3, 5, 7], [4, 9, 2]]), + np.array([[2, 3], [4, 5]]), + np.array([[1, 2], [3, 4], [5, 6]])), + # not square matrices, complex + (np.array([[8, 1j, 6+2j], [3, 5, 7], [4, 9, 2]]), + np.array([[2, 3], [4, 5-1j]]), + np.array([[1, 2j], [3, 4j], [5j, 6+7j]])), + ] + + def check_case(self, a, b, c): + x = solve_sylvester(a, b, c) + assert_array_almost_equal(np.dot(a, x) + np.dot(x, b), c) + + def test_cases(self): + for case in self.cases: + self.check_case(case[0], case[1], case[2]) + + def test_trivial(self): + a = np.array([[1.0, 0.0], [0.0, 1.0]]) + b = np.array([[1.0]]) + c = np.array([2.0, 2.0]).reshape(-1, 1) + x = solve_sylvester(a, b, c) + assert_array_almost_equal(x, np.array([1.0, 1.0]).reshape(-1, 1)) diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_solvers.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_solvers.pyc new file mode 100644 index 0000000..3da8546 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_solvers.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_special_matrices.py b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_special_matrices.py new file mode 100644 index 0000000..e45e66a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_special_matrices.py @@ -0,0 +1,598 @@ +"""Tests for functions in special_matrices.py.""" + +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy import arange, add, array, eye, copy, sqrt +from numpy.testing import (assert_equal, assert_array_equal, + assert_array_almost_equal, assert_allclose) +from pytest import raises as assert_raises + +from scipy._lib.six import xrange + +from scipy import fftpack +from scipy.special import comb +from scipy.linalg import (toeplitz, hankel, circulant, hadamard, leslie, + companion, tri, triu, tril, kron, block_diag, + helmert, hilbert, invhilbert, pascal, invpascal, dft) +from numpy.linalg import cond + + +def get_mat(n): + data = arange(n) + data = add.outer(data,data) + return data + + +class TestTri(object): + def test_basic(self): + assert_equal(tri(4),array([[1,0,0,0], + [1,1,0,0], + [1,1,1,0], + [1,1,1,1]])) + assert_equal(tri(4,dtype='f'),array([[1,0,0,0], + [1,1,0,0], + [1,1,1,0], + [1,1,1,1]],'f')) + + def test_diag(self): + assert_equal(tri(4,k=1),array([[1,1,0,0], + [1,1,1,0], + [1,1,1,1], + [1,1,1,1]])) + assert_equal(tri(4,k=-1),array([[0,0,0,0], + [1,0,0,0], + [1,1,0,0], + [1,1,1,0]])) + + def test_2d(self): + assert_equal(tri(4,3),array([[1,0,0], + [1,1,0], + [1,1,1], + [1,1,1]])) + assert_equal(tri(3,4),array([[1,0,0,0], + [1,1,0,0], + [1,1,1,0]])) + + def test_diag2d(self): + assert_equal(tri(3,4,k=2),array([[1,1,1,0], + [1,1,1,1], + [1,1,1,1]])) + assert_equal(tri(4,3,k=-2),array([[0,0,0], + [0,0,0], + [1,0,0], + [1,1,0]])) + + +class TestTril(object): + def test_basic(self): + a = (100*get_mat(5)).astype('l') + b = a.copy() + for k in range(5): + for l in range(k+1,5): + b[k,l] = 0 + assert_equal(tril(a),b) + + def test_diag(self): + a = (100*get_mat(5)).astype('f') + b = a.copy() + for k in range(5): + for l in range(k+3,5): + b[k,l] = 0 + assert_equal(tril(a,k=2),b) + b = a.copy() + for k in range(5): + for l in range(max((k-1,0)),5): + b[k,l] = 0 + assert_equal(tril(a,k=-2),b) + + +class TestTriu(object): + def test_basic(self): + a = (100*get_mat(5)).astype('l') + b = a.copy() + for k in range(5): + for l in range(k+1,5): + b[l,k] = 0 + assert_equal(triu(a),b) + + def test_diag(self): + a = (100*get_mat(5)).astype('f') + b = a.copy() + for k in range(5): + for l in range(max((k-1,0)),5): + b[l,k] = 0 + assert_equal(triu(a,k=2),b) + b = a.copy() + for k in range(5): + for l in range(k+3,5): + b[l,k] = 0 + assert_equal(triu(a,k=-2),b) + + +class TestToeplitz(object): + + def test_basic(self): + y = toeplitz([1,2,3]) + assert_array_equal(y,[[1,2,3],[2,1,2],[3,2,1]]) + y = toeplitz([1,2,3],[1,4,5]) + assert_array_equal(y,[[1,4,5],[2,1,4],[3,2,1]]) + + def test_complex_01(self): + data = (1.0 + arange(3.0)) * (1.0 + 1.0j) + x = copy(data) + t = toeplitz(x) + # Calling toeplitz should not change x. + assert_array_equal(x, data) + # According to the docstring, x should be the first column of t. + col0 = t[:,0] + assert_array_equal(col0, data) + assert_array_equal(t[0,1:], data[1:].conj()) + + def test_scalar_00(self): + """Scalar arguments still produce a 2D array.""" + t = toeplitz(10) + assert_array_equal(t, [[10]]) + t = toeplitz(10, 20) + assert_array_equal(t, [[10]]) + + def test_scalar_01(self): + c = array([1,2,3]) + t = toeplitz(c, 1) + assert_array_equal(t, [[1],[2],[3]]) + + def test_scalar_02(self): + c = array([1,2,3]) + t = toeplitz(c, array(1)) + assert_array_equal(t, [[1],[2],[3]]) + + def test_scalar_03(self): + c = array([1,2,3]) + t = toeplitz(c, array([1])) + assert_array_equal(t, [[1],[2],[3]]) + + def test_scalar_04(self): + r = array([10,2,3]) + t = toeplitz(1, r) + assert_array_equal(t, [[1,2,3]]) + + +class TestHankel(object): + def test_basic(self): + y = hankel([1,2,3]) + assert_array_equal(y, [[1,2,3], [2,3,0], [3,0,0]]) + y = hankel([1,2,3], [3,4,5]) + assert_array_equal(y, [[1,2,3], [2,3,4], [3,4,5]]) + + +class TestCirculant(object): + def test_basic(self): + y = circulant([1,2,3]) + assert_array_equal(y, [[1,3,2], [2,1,3], [3,2,1]]) + + +class TestHadamard(object): + + def test_basic(self): + + y = hadamard(1) + assert_array_equal(y, [[1]]) + + y = hadamard(2, dtype=float) + assert_array_equal(y, [[1.0, 1.0], [1.0, -1.0]]) + + y = hadamard(4) + assert_array_equal(y, [[1,1,1,1], [1,-1,1,-1], [1,1,-1,-1], [1,-1,-1,1]]) + + assert_raises(ValueError, hadamard, 0) + assert_raises(ValueError, hadamard, 5) + + +class TestLeslie(object): + + def test_bad_shapes(self): + assert_raises(ValueError, leslie, [[1,1],[2,2]], [3,4,5]) + assert_raises(ValueError, leslie, [3,4,5], [[1,1],[2,2]]) + assert_raises(ValueError, leslie, [1,2], [1,2]) + assert_raises(ValueError, leslie, [1], []) + + def test_basic(self): + a = leslie([1, 2, 3], [0.25, 0.5]) + expected = array([ + [1.0, 2.0, 3.0], + [0.25, 0.0, 0.0], + [0.0, 0.5, 0.0]]) + assert_array_equal(a, expected) + + +class TestCompanion(object): + + def test_bad_shapes(self): + assert_raises(ValueError, companion, [[1,1],[2,2]]) + assert_raises(ValueError, companion, [0,4,5]) + assert_raises(ValueError, companion, [1]) + assert_raises(ValueError, companion, []) + + def test_basic(self): + c = companion([1, 2, 3]) + expected = array([ + [-2.0, -3.0], + [1.0, 0.0]]) + assert_array_equal(c, expected) + + c = companion([2.0, 5.0, -10.0]) + expected = array([ + [-2.5, 5.0], + [1.0, 0.0]]) + assert_array_equal(c, expected) + + +class TestBlockDiag: + def test_basic(self): + x = block_diag(eye(2), [[1,2], [3,4], [5,6]], [[1, 2, 3]]) + assert_array_equal(x, [[1, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0], + [0, 0, 1, 2, 0, 0, 0], + [0, 0, 3, 4, 0, 0, 0], + [0, 0, 5, 6, 0, 0, 0], + [0, 0, 0, 0, 1, 2, 3]]) + + def test_dtype(self): + x = block_diag([[1.5]]) + assert_equal(x.dtype, float) + + x = block_diag([[True]]) + assert_equal(x.dtype, bool) + + def test_mixed_dtypes(self): + actual = block_diag([[1]], [[1j]]) + desired = np.array([[1, 0], [0, 1j]]) + assert_array_equal(actual, desired) + + def test_scalar_and_1d_args(self): + a = block_diag(1) + assert_equal(a.shape, (1,1)) + assert_array_equal(a, [[1]]) + + a = block_diag([2,3], 4) + assert_array_equal(a, [[2, 3, 0], [0, 0, 4]]) + + def test_bad_arg(self): + assert_raises(ValueError, block_diag, [[[1]]]) + + def test_no_args(self): + a = block_diag() + assert_equal(a.ndim, 2) + assert_equal(a.nbytes, 0) + + def test_empty_matrix_arg(self): + # regression test for gh-4596: check the shape of the result + # for empty matrix inputs. Empty matrices are no longer ignored + # (gh-4908) it is viewed as a shape (1, 0) matrix. + a = block_diag([[1, 0], [0, 1]], + [], + [[2, 3], [4, 5], [6, 7]]) + assert_array_equal(a, [[1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 0, 0], + [0, 0, 2, 3], + [0, 0, 4, 5], + [0, 0, 6, 7]]) + + def test_zerosized_matrix_arg(self): + # test for gh-4908: check the shape of the result for + # zero-sized matrix inputs, i.e. matrices with shape (0,n) or (n,0). + # note that [[]] takes shape (1,0) + a = block_diag([[1, 0], [0, 1]], + [[]], + [[2, 3], [4, 5], [6, 7]], + np.zeros([0,2],dtype='int32')) + assert_array_equal(a, [[1, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 2, 3, 0, 0], + [0, 0, 4, 5, 0, 0], + [0, 0, 6, 7, 0, 0]]) + +class TestKron: + + def test_basic(self): + + a = kron(array([[1, 2], [3, 4]]), array([[1, 1, 1]])) + assert_array_equal(a, array([[1, 1, 1, 2, 2, 2], + [3, 3, 3, 4, 4, 4]])) + + m1 = array([[1, 2], [3, 4]]) + m2 = array([[10], [11]]) + a = kron(m1, m2) + expected = array([[10, 20], + [11, 22], + [30, 40], + [33, 44]]) + assert_array_equal(a, expected) + + +class TestHelmert(object): + + def test_orthogonality(self): + for n in range(1, 7): + H = helmert(n, full=True) + Id = np.eye(n) + assert_allclose(H.dot(H.T), Id, atol=1e-12) + assert_allclose(H.T.dot(H), Id, atol=1e-12) + + def test_subspace(self): + for n in range(2, 7): + H_full = helmert(n, full=True) + H_partial = helmert(n) + for U in H_full[1:, :].T, H_partial.T: + C = np.eye(n) - np.ones((n, n)) / n + assert_allclose(U.dot(U.T), C) + assert_allclose(U.T.dot(U), np.eye(n-1), atol=1e-12) + + +class TestHilbert(object): + + def test_basic(self): + h3 = array([[1.0, 1/2., 1/3.], + [1/2., 1/3., 1/4.], + [1/3., 1/4., 1/5.]]) + assert_array_almost_equal(hilbert(3), h3) + + assert_array_equal(hilbert(1), [[1.0]]) + + h0 = hilbert(0) + assert_equal(h0.shape, (0,0)) + + +class TestInvHilbert(object): + + def test_basic(self): + invh1 = array([[1]]) + assert_array_equal(invhilbert(1, exact=True), invh1) + assert_array_equal(invhilbert(1), invh1) + + invh2 = array([[4, -6], + [-6, 12]]) + assert_array_equal(invhilbert(2, exact=True), invh2) + assert_array_almost_equal(invhilbert(2), invh2) + + invh3 = array([[9, -36, 30], + [-36, 192, -180], + [30, -180, 180]]) + assert_array_equal(invhilbert(3, exact=True), invh3) + assert_array_almost_equal(invhilbert(3), invh3) + + invh4 = array([[16, -120, 240, -140], + [-120, 1200, -2700, 1680], + [240, -2700, 6480, -4200], + [-140, 1680, -4200, 2800]]) + assert_array_equal(invhilbert(4, exact=True), invh4) + assert_array_almost_equal(invhilbert(4), invh4) + + invh5 = array([[25, -300, 1050, -1400, 630], + [-300, 4800, -18900, 26880, -12600], + [1050, -18900, 79380, -117600, 56700], + [-1400, 26880, -117600, 179200, -88200], + [630, -12600, 56700, -88200, 44100]]) + assert_array_equal(invhilbert(5, exact=True), invh5) + assert_array_almost_equal(invhilbert(5), invh5) + + invh17 = array([ + [289, -41616, 1976760, -46124400, 629598060, -5540462928, + 33374693352, -143034400080, 446982500250, -1033026222800, + 1774926873720, -2258997839280, 2099709530100, -1384423866000, + 613101997800, -163493866080, 19835652870], + [-41616, 7990272, -426980160, 10627061760, -151103534400, 1367702848512, + -8410422724704, 36616806420480, -115857864064800, 270465047424000, + -468580694662080, 600545887119360, -561522320049600, 372133135180800, + -165537539406000, 44316454993920, -5395297580640], + [1976760, -426980160, 24337869120, -630981792000, 9228108708000, + -85267724461920, 532660105897920, -2348052711713280, 7504429831470000, + -17664748409880000, 30818191841236800, -39732544853164800, + 37341234283298400, -24857330514030000, 11100752642520000, + -2982128117299200, 364182586693200], + [-46124400, 10627061760, -630981792000, 16826181120000, + -251209625940000, 2358021022156800, -14914482965141760, + 66409571644416000, -214015221119700000, 507295338950400000, + -890303319857952000, 1153715376477081600, -1089119333262870000, + 727848632044800000, -326170262829600000, 87894302404608000, + -10763618673376800], + [629598060, -151103534400, 9228108708000, + -251209625940000, 3810012660090000, -36210360321495360, + 231343968720664800, -1038687206500944000, 3370739732635275000, + -8037460526495400000, 14178080368737885600, -18454939322943942000, + 17489975175339030000, -11728977435138600000, 5272370630081100000, + -1424711708039692800, 174908803442373000], + [-5540462928, 1367702848512, -85267724461920, 2358021022156800, + -36210360321495360, 347619459086355456, -2239409617216035264, + 10124803292907663360, -33052510749726468000, 79217210949138662400, + -140362995650505067440, 183420385176741672960, -174433352415381259200, + 117339159519533952000, -52892422160973595200, 14328529177999196160, + -1763080738699119840], + [33374693352, -8410422724704, 532660105897920, + -14914482965141760, 231343968720664800, -2239409617216035264, + 14527452132196331328, -66072377044391477760, 216799987176909536400, + -521925895055522958000, 928414062734059661760, -1217424500995626443520, + 1161358898976091015200, -783401860847777371200, 354015418167362952000, + -96120549902411274240, 11851820521255194480], + [-143034400080, 36616806420480, -2348052711713280, 66409571644416000, + -1038687206500944000, 10124803292907663360, -66072377044391477760, + 302045152202932469760, -995510145200094810000, 2405996923185123840000, + -4294704507885446054400, 5649058909023744614400, + -5403874060541811254400, 3654352703663101440000, + -1655137020003255360000, 450325202737117593600, -55630994283442749600], + [446982500250, -115857864064800, 7504429831470000, -214015221119700000, + 3370739732635275000, -33052510749726468000, 216799987176909536400, + -995510145200094810000, 3293967392206196062500, + -7988661659013106500000, 14303908928401362270000, + -18866974090684772052000, 18093328327706957325000, + -12263364009096700500000, 5565847995255512250000, + -1517208935002984080000, 187754605706619279900], + [-1033026222800, 270465047424000, -17664748409880000, + 507295338950400000, -8037460526495400000, 79217210949138662400, + -521925895055522958000, 2405996923185123840000, + -7988661659013106500000, 19434404971634224000000, + -34894474126569249192000, 46141453390504792320000, + -44349976506971935800000, 30121928988527376000000, + -13697025107665828500000, 3740200989399948902400, + -463591619028689580000], + [1774926873720, -468580694662080, + 30818191841236800, -890303319857952000, 14178080368737885600, + -140362995650505067440, 928414062734059661760, -4294704507885446054400, + 14303908928401362270000, -34894474126569249192000, + 62810053427824648545600, -83243376594051600326400, + 80177044485212743068000, -54558343880470209780000, + 24851882355348879230400, -6797096028813368678400, 843736746632215035600], + [-2258997839280, 600545887119360, -39732544853164800, + 1153715376477081600, -18454939322943942000, 183420385176741672960, + -1217424500995626443520, 5649058909023744614400, + -18866974090684772052000, 46141453390504792320000, + -83243376594051600326400, 110552468520163390156800, + -106681852579497947388000, 72720410752415168870400, + -33177973900974346080000, 9087761081682520473600, + -1129631016152221783200], + [2099709530100, -561522320049600, 37341234283298400, + -1089119333262870000, 17489975175339030000, -174433352415381259200, + 1161358898976091015200, -5403874060541811254400, + 18093328327706957325000, -44349976506971935800000, + 80177044485212743068000, -106681852579497947388000, + 103125790826848015808400, -70409051543137015800000, + 32171029219823375700000, -8824053728865840192000, + 1098252376814660067000], + [-1384423866000, 372133135180800, + -24857330514030000, 727848632044800000, -11728977435138600000, + 117339159519533952000, -783401860847777371200, 3654352703663101440000, + -12263364009096700500000, 30121928988527376000000, + -54558343880470209780000, 72720410752415168870400, + -70409051543137015800000, 48142941226076592000000, + -22027500987368499000000, 6049545098753157120000, + -753830033789944188000], + [613101997800, -165537539406000, + 11100752642520000, -326170262829600000, 5272370630081100000, + -52892422160973595200, 354015418167362952000, -1655137020003255360000, + 5565847995255512250000, -13697025107665828500000, + 24851882355348879230400, -33177973900974346080000, + 32171029219823375700000, -22027500987368499000000, + 10091416708498869000000, -2774765838662800128000, 346146444087219270000], + [-163493866080, 44316454993920, -2982128117299200, 87894302404608000, + -1424711708039692800, 14328529177999196160, -96120549902411274240, + 450325202737117593600, -1517208935002984080000, 3740200989399948902400, + -6797096028813368678400, 9087761081682520473600, + -8824053728865840192000, 6049545098753157120000, + -2774765838662800128000, 763806510427609497600, -95382575704033754400], + [19835652870, -5395297580640, 364182586693200, -10763618673376800, + 174908803442373000, -1763080738699119840, 11851820521255194480, + -55630994283442749600, 187754605706619279900, -463591619028689580000, + 843736746632215035600, -1129631016152221783200, 1098252376814660067000, + -753830033789944188000, 346146444087219270000, -95382575704033754400, + 11922821963004219300] + ]) + assert_array_equal(invhilbert(17, exact=True), invh17) + assert_allclose(invhilbert(17), invh17.astype(float), rtol=1e-12) + + def test_inverse(self): + for n in xrange(1, 10): + a = hilbert(n) + b = invhilbert(n) + # The Hilbert matrix is increasingly badly conditioned, + # so take that into account in the test + c = cond(a) + assert_allclose(a.dot(b), eye(n), atol=1e-15*c, rtol=1e-15*c) + + +class TestPascal(object): + + cases = [ + (1, array([[1]]), array([[1]])), + (2, array([[1, 1], + [1, 2]]), + array([[1, 0], + [1, 1]])), + (3, array([[1, 1, 1], + [1, 2, 3], + [1, 3, 6]]), + array([[1, 0, 0], + [1, 1, 0], + [1, 2, 1]])), + (4, array([[1, 1, 1, 1], + [1, 2, 3, 4], + [1, 3, 6, 10], + [1, 4, 10, 20]]), + array([[1, 0, 0, 0], + [1, 1, 0, 0], + [1, 2, 1, 0], + [1, 3, 3, 1]])), + ] + + def check_case(self, n, sym, low): + assert_array_equal(pascal(n), sym) + assert_array_equal(pascal(n, kind='lower'), low) + assert_array_equal(pascal(n, kind='upper'), low.T) + assert_array_almost_equal(pascal(n, exact=False), sym) + assert_array_almost_equal(pascal(n, exact=False, kind='lower'), low) + assert_array_almost_equal(pascal(n, exact=False, kind='upper'), low.T) + + def test_cases(self): + for n, sym, low in self.cases: + self.check_case(n, sym, low) + + def test_big(self): + p = pascal(50) + assert_equal(p[-1, -1], comb(98, 49, exact=True)) + + def test_threshold(self): + # Regression test. An early version of `pascal` returned an + # array of type np.uint64 for n=35, but that data type is too small + # to hold p[-1, -1]. The second assert_equal below would fail + # because p[-1, -1] overflowed. + p = pascal(34) + assert_equal(2*p.item(-1, -2), p.item(-1, -1), err_msg="n = 34") + p = pascal(35) + assert_equal(2*p.item(-1, -2), p.item(-1, -1), err_msg="n = 35") + + +def test_invpascal(): + + def check_invpascal(n, kind, exact): + ip = invpascal(n, kind=kind, exact=exact) + p = pascal(n, kind=kind, exact=exact) + # Matrix-multiply ip and p, and check that we get the identity matrix. + # We can't use the simple expression e = ip.dot(p), because when + # n < 35 and exact is True, p.dtype is np.uint64 and ip.dtype is + # np.int64. The product of those dtypes is np.float64, which loses + # precision when n is greater than 18. Instead we'll cast both to + # object arrays, and then multiply. + e = ip.astype(object).dot(p.astype(object)) + assert_array_equal(e, eye(n), err_msg="n=%d kind=%r exact=%r" % + (n, kind, exact)) + + kinds = ['symmetric', 'lower', 'upper'] + + ns = [1, 2, 5, 18] + for n in ns: + for kind in kinds: + for exact in [True, False]: + check_invpascal(n, kind, exact) + + ns = [19, 34, 35, 50] + for n in ns: + for kind in kinds: + check_invpascal(n, kind, True) + + +def test_dft(): + m = dft(2) + expected = array([[1.0, 1.0], [1.0, -1.0]]) + assert_array_almost_equal(m, expected) + m = dft(2, scale='n') + assert_array_almost_equal(m, expected/2.0) + m = dft(2, scale='sqrtn') + assert_array_almost_equal(m, expected/sqrt(2.0)) + + x = array([0, 1, 2, 3, 4, 5, 0, 1]) + m = dft(8) + mx = m.dot(x) + fx = fftpack.fft(x) + assert_array_almost_equal(mx, fx) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_special_matrices.pyc b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_special_matrices.pyc new file mode 100644 index 0000000..6299bf8 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/linalg/tests/test_special_matrices.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/misc/__init__.py new file mode 100644 index 0000000..5a1cf07 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/misc/__init__.py @@ -0,0 +1,113 @@ +""" +========================================== +Miscellaneous routines (:mod:`scipy.misc`) +========================================== + +.. currentmodule:: scipy.misc + +Various utilities that don't have another home. + +Note that Pillow (https://python-pillow.org/) is not a dependency +of SciPy, but the image manipulation functions indicated in the list +below are not available without it. + +.. autosummary:: + :toctree: generated/ + + ascent - Get example image for processing + central_diff_weights - Weights for an n-point central m-th derivative + derivative - Find the n-th derivative of a function at a point + face - Get example image for processing + electrocardiogram - Load an example of a one-dimensional signal. + +Deprecated functions: + +.. autosummary:: + :toctree: generated/ + + bytescale - Byte scales an array (image) [requires Pillow] + fromimage - Return a copy of a PIL image as a numpy array [requires Pillow] + imfilter - Simple filtering of an image [requires Pillow] + imread - Read an image file from a filename [requires Pillow] + imresize - Resize an image [requires Pillow] + imrotate - Rotate an image counter-clockwise [requires Pillow] + imsave - Save an array to an image file [requires Pillow] + imshow - Simple showing of an image through an external viewer [requires Pillow] + toimage - Takes a numpy array and returns a PIL image [requires Pillow] + + +Deprecated aliases: + +.. autosummary:: + :toctree: generated/ + + comb - Combinations of N things taken k at a time, "N choose k" (imported from `scipy.special`) + factorial - The factorial function, ``n! = special.gamma(n+1)`` + (imported from `scipy.special`) + factorial2 - Double factorial, ``(n!)!`` (imported from `scipy.special`) + factorialk - ``(...((n!)!)!...)!`` where there are k '!' (imported from `scipy.special`) + logsumexp - Compute the log of the sum of exponentials of input elements + (imported from `scipy.special`) + pade - Pade approximation to function as the ratio of two polynomials. + (imported from `scipy.interpolate`) + info - Get help information for a function, class, or module. (imported from `numpy`) + source - Print function source code. (imported from `numpy`) + who - Print the Numpy arrays in the given dictionary. (imported from `numpy`) + +""" + +from __future__ import division, print_function, absolute_import + +__all__ = ['who', 'source', 'info', 'doccer', 'pade', + 'comb', 'factorial', 'factorial2', 'factorialk', 'logsumexp'] + +from . import doccer +from .common import * +from numpy import who as _who, source as _source, info as _info +import numpy as np +from scipy.interpolate._pade import pade as _pade +from scipy.special import (comb as _comb, logsumexp as _lsm, + factorial as _fact, factorial2 as _fact2, factorialk as _factk) + +import sys + +_msg = ("Importing `%(name)s` from scipy.misc is deprecated in scipy 1.0.0. Use " + "`scipy.special.%(name)s` instead.") +comb = np.deprecate(_comb, message=_msg % {"name": _comb.__name__}) +logsumexp = np.deprecate(_lsm, message=_msg % {"name": _lsm.__name__}) +factorial = np.deprecate(_fact, message=_msg % {"name": _fact.__name__}) +factorial2 = np.deprecate(_fact2, message=_msg % {"name": _fact2.__name__}) +factorialk = np.deprecate(_factk, message=_msg % {"name": _factk.__name__}) + +_msg = ("Importing `pade` from scipy.misc is deprecated in scipy 1.0.0. Use " + "`scipy.interpolate.pade` instead.") +pade = np.deprecate(_pade, message=_msg) + +_msg = ("Importing `%(name)s` from scipy.misc is deprecated in scipy 1.0.0. Use " + "`numpy.%(name)s` instead.") +who = np.deprecate(_who, message=_msg % {"name": "who"}) +source = np.deprecate(_source, message=_msg % {"name": "source"}) + +@np.deprecate(message=_msg % {"name": "info.(..., toplevel='scipy')"}) +def info(object=None,maxwidth=76,output=sys.stdout,toplevel='scipy'): + return _info(object, maxwidth, output, toplevel) + + +info.__doc__ = _info.__doc__ +del sys + +try: + from .pilutil import * + from . import pilutil + __all__ += pilutil.__all__ + del pilutil +except ImportError: + pass + +from . import common +__all__ += common.__all__ +del common + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/misc/__init__.pyc new file mode 100644 index 0000000..cb665b9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/misc/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/ascent.dat b/project/venv/lib/python2.7/site-packages/scipy/misc/ascent.dat new file mode 100644 index 0000000..f360246 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/misc/ascent.dat @@ -0,0 +1,749 @@ +]q(]q(KSKSKSKSKSKSKSKRKRKRKRKRKRKRKRKRKRKSKSKSKSKSKSKSKRKRKRKRKRKRKRKRKRKUKVKUKUKUKVKVKVKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKTKUKVKUKUKUKUKVKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKYK[KZK[KZKZKZKZK[KXKWKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K[KZKZKZKZKZKZK[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K_K_K`K]K\K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKdKdKaKaKaKcKeKdKdKdKdKeKbK^KOKQKRKTKRKVKTKVKNKRKMKOKIKPKYKXKRKPKUK`KjK[KSKRKUK9K!K$K%K&K&K'K*K0K K +K KKKK<KHKLKEKDK9KKKK$K>KCKBKAKEK*KKKK!K)K-K(K)K-K+K"KKKK8KBK<K;K=K;K$K K$K&K-K:K,K4K:K6K/KKKK%K4K3K1K4K8K)K'K,K3K<K>K9K2K/K/K+K"KKK!K/K0K$K+K3K5K4K?KGKAK;K9K-K+K+K+K$K8KGKFKFKFKFKFKFKFKFKFKFKGK6KK$KBKIKJKJKHKHKAK9K=K=K=K<K;K=K=K=K=K<K<K<K<K<K=K<K>K<K5K-K,K4K7K5K4K4K3K1K/K(K$K,K2K3K3K3K3K3K3K1K4K3K3K3K3K3K3K3K3K3K3K@KbKsKsKtKsKsKsKsKsKsKsKsKsKsKsKuKuKsKtKvKtKsKsKsKsKtKvKtKsKsKsKuKuKsKtKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKue]q(KRKRKSKSKSKSKSKRKRKRKRKRKRKRKRKRKRKSKSKSKSKSKSKSKSKRKRKRKRKRKRKRKRKUKVKUKUKVKTKSKUKUKUKUKUKUKUKVKUKUKUKUKUKUKVKVKUKUKVKTKUKVKUKVKVKUKVKXKWKWKWKWKWKWKWKWKWKXKXKWKWKWKWKWKWKWKWKWKWKWKWKXKXKXKYKZKZKZKZKZKZKZK[KXKWKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKYKZKZKZKZKZKZKZK[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K_K_K_K]K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`K`K`K`K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKdKcKbKbKbKdKeKdKdKdKdKdKbK^KOKRKRKUKRKVKTKUKOKRKOKSKHKJKYKXKTKOKOKJKWKTKSKSKSK/K!K%K%K%KKKKKK K KKKK<KBKGKJKGK2KKKKK)K+K+K,K0KKKKKK%K+K-K'K*K&KKKKK7K8K4K5K4K,KKKKK'K/K-K3K8K6K*KKKK&K1K0K0K5K5K"K#K'K(K0K;K8K1K.K-K'KKKK&K/K/K.K.K6K-K,K4K9K8K:K8K-K+K*K*K K#K>KHKFKFKFKFKFKFKFKGKFKGKHK2KK*KEKFKHKIKHKGK?K<K<K<K<K;K=K=K<K<K=K=K=K<K<K<K=K<K;K=K6K(K0K3K4K6K6K6K0K/K0K-K+K0K3K3K3K3K3K3K1K3K3K3K3K3K3K3K3K3K3K2K1K>KdKsKrKtKsKsKsKsKsKsKsKsKsKsKuKuKsKtKuKtKsKtKtKtKtKvKtKsKsKsKuKuKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKue]q(KPKQKSKSKSKSKSKRKRKRKRKRKRKRKRKRKRKSKSKSKSKSKSKSKRKRKRKRKRKRKRKRKRKUKVKUKUKUKUKUKUKUKUKUKVKTKUKVKUKUKUKUKUKUKWKXKUKUKUKUKUKUKUKWKWKUKVKXKWKWKUKVKWKWKWKWKWKXKXKWKWKWKWKWKWKWKWKWKWKWKWKZK[K[KYKWKWKWKZKZKZKZK[KXKWKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKYKZKZKZKZKZKZKZK[K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKeKdKeKdKdKdKdKeKcKaKcK^KNKTKTKVKQKVKTKSKQKUKOKTKIKCKVKZKYKMKCKJKNKVKUKSKPK*K$K&K%K!KKKKK K +K +KKKK?KAK@K<K6K%KKKKKKKKKKKKKKK%K+K*K+K,K*KKKKK'K&K$K"K#KKKKKK%K+K*K*K6K9K&KKKK&K)K*K,K2K*KKK K!K%KAK7K1K,K+K$KKKK(K0K-K+K.K.K%K(K,K-K2KBK2K*K*K)K%KKK!K@KGKGKGKFKFKFKFKFKGKFKFKHK0KK-KFKFKJKJKGKDK?K;K;K=K;K>K=K;K;K?K?K=K<K<K<K<K<K<K<K=K4K+K-K2K7K6K5K3K/K4K/K-K.K1K3K2K/K1K3K3K3K3K3K3K3K3K3K3K3K3K3K2K0K;K^KrKrKsKsKsKsKsKsKsKsKsKsKuKvKtKsKsKsKsKuKvKvKvKvKtKsKsKsKsKtKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKue]q(KRKRKSKSKSKSKSKRKRKRKRKRKRKRKRKRKRKRKSKRKRKSKRKRKSKRKTKTKTKTKRKSKTKUKUKUKUKUKUKUKUKUKUKUKUKUKUKTKTKUKUKTKVKUKVKVKUKUKUKUKUKVKVKWKWKVKWKWKWKWKVKWKWKWKWKWKWKVKVKWKWKWKWKWKYKXKWKXKWKWKXKZKZKZKZKYKYKYKZKZKZKZKZKYKYKZKZKZKZKZKZKZKZKZKZK[KZKZKZKZK[K\K[KZK[K[KZKZK[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K^K]K\K\K\K\K]K^K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`K`K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKcKcKdKdKdKdKdKdKdKdKdKcKaK]KMKQKTKVKRKVKUKVKRKVKPKTKKKEKUKZKYKPK:KcKvKTKTKTKMK(K$K&K$K!KKKKK +K K KKKK7K9K3K/K1KK KKKKKKKKKKKKKK'K-K*K*K'K#KKKKKKKKKKKKKKK'K,K)K1K9K6KKKKKKKKK#KKKKKK)K>K.K-K+K)K KKKK'K'K&K%K)K$K K"K%K%K1K>K(K)K)K+K"KKKK0KDKDKFKGKFKFKFKFKFKGKFKFKFK)KK4KFKGKIKHKFKEK@K;K<K:K<K=K<K=K>K=K=K=K<K=K<K=K>K=K<K<K4K)K+K3K3K4K7K5K4K1K.K.K/K3K2K0K1K3K3K3K3K3K3K3K3K3K3K3K3K3K2K1K2K8K[KrKrKsKsKsKsKsKsKsKsKsKtKtKsKtKtKtKtKuKuKtKuKvKtKsKtKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKue]q(KSKRKSKTKRKSKSKSKSKSKSKSKSKSKRKRKRKRKRKRKRKRKQKRKSKRKUKVKVKUKQKUKVKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKSKWKUKUKUKUKUKUKUKUKWKXKWKWKXKWKWKWKWKXKWKWKWKWKWKWKXKXKWKWKWKWKWKWKWKWKWKWKXKWKZK[KZK[K[K[K[KZKZKZKZKZK[K[KZKZKZKZKZKZKZK[KZKYK\KZKZKZKZKZKZKZKZK\K]K[KZK]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K`K^K\K\K\K\K]K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKaK_K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKdKeKdKdKeKdKdKdKdKdKdKeKbK[KLKOKUKUKQKVKUKXKRKWKQKUKJKJKSKWKQKUKCKKKMKSKVKHK%K&K&K&K KK KK KKK K K KK'K$K#K$K%KK K KKKKKKKKKKKKK$K%K%K"KKKKK KKKKKKKKKKKK%K'K'K.K3K-KKKKKKKKKKKKKKK1K3K%K.K+K*KKKKKKKKKKKKK K%K2K-K$K+K,K)KKKKK#K1KBKEKGKFKFKFKFKFKFKGKFKGKCK&KK5KGKHKIKIKHKDK>K:K:K9K<K=K?K=K<K>K?K=K<K<K>K?K=K=K=K;K4K*K,K0K4K8K7K5K4K3K1K0K/K0K4K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K1K6KYKrKtKsKsKsKsKsKsKsKsKsKsKsKtKvKvKvKuKuKsKtKvKtKsKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKue]q(KSKRKSKSKSKSKSKSKSKSKSKSKSKSKRKRKRKRKRKRKSKUKTKRKSKRKSKSKUKUKTKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKVKUKVKTKVKUKUKUKUKUKUKVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKUKUKXKWKWKXKWKXKYK[KYKXKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[KZKZKZKZKZKZK[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K_K]K\K\K\K\K]K]K^K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaK`K_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKdKbKaKdKdKeKcKbKdKeKdKdKeKcKbKbKXKOKQKWKWKTKVKUKWKSKWKRKVKLKMKLKPKDKNKSK]KhKPKVKVKBK!K&K%K&KK K K +K KKK K K KKKKKKKK K KKKKKKKKKKKKKKKKKKKKK KKKKKKKKKKKKK#K)K'K)K&KKKKKKKKKKKKKKK1K/K(K+K(K%KKKKKKKKKKKKKK#K/K)K'K)K)K&KKKKKKK1KGKGKGKFKFKFKFKFKFKGKFKHKBK!KK:KHKHKIKIKGKCK?K;K=K=K=K=K>K>K=K<K;K=K<K=K=K=K=K<K<K=K6K)K/K4K5K5K5K5K3K1K1K/K/K0K2K3K3K3K3K3K3K3K3K4K4K3K3K3K3K3K3K3K2K3KSKrKtKtKsKsKsKsKsKsKsKsKsKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKue]q(KSKSKSKSKSKSKSKSKSKSKSKSKSKSKRKRKRKRKSKRKSKVKUKRKRKRKRKSKUKUKVKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKTKUKUKUKUKUKUKUKVKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKYKXKWKXKWKWKWKWKXK[KYKWKYK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K`K^K\K\K\K\K\K\K^K`K_K_K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KbK`K_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKcKeKbKaKdKdKeKcKaKcKeKdKdKeKcKaKaKXKSKTKWKWKTKWKUKWKSKWKSKWKMKQKJKJKIKDKVKKK;KQKVKTK=K"K%K&K'KKKKKKKKK +K KKKKKKK K +K +K KKKKKKKKKKKKKKKKKK K K KKKK KKKKKKKKKKKKKKK K KKKKKKKKKKKK(K&K#K#K KKK KKKKKKKKKKKKK+K%K%K)K(KKKKKKKKK7KGKFKFKFKFKFKFKFKFKFKFKGK<KKK=KFKGKLKJKGKEK>K;K<K<K<K>K?K=K<K;K>K<K<K<K=K=K=K=K<K?K7K+K0K1K2K5K7K5K3K3K0K/K/K3K3K2K3K2K3K3K3K3K4K5K3K3K3K3K3K3K3K3K2K3KMKnKtKsKsKsKsKsKsKsKsKsKsKsKtKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKue]q (KSKSKSKSKSKSKSKSKSKSKSKSKSKSKSKSKRKTKVKVKUKTKWKVKTKRKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKWKWKWKWKWKWKWKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K]K[KZKZK[K\K\K]K[KZK\K\KZKZKZK\K\K\K\K\K\K\K\K\K\K\K\K\K]K\K\K\K\K\K\K\K\K\K_K`K_K_K_K_K_K]K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KbKaKaK`K_K_K`KaKaKaKaKaKaKaKaKaKaKaKaK`KbKaKaKaKaKaKaKaKaKcKeKdKdKdKdKdKdKdKdKdKdKdKeKdKaK`KVKTKSKXKWKTKWKUKWKSKWKSKXKMKQKJKHKOK@KTKRK:KRKSKRK6K#K&K%K&KKKKKKKKK KKKKKKKK K +K K K K K K K KK K K KKKKKKKKK K +K K K K KKKK KKKKKKKKKKK K KKKKKKKKKKKKKKKKKKKK KKKKKKKKKKKKK%K K!K!KKKK KKKKKKK<KEKDKGKGKFKFKFKFKGKFKGKGK;KKK?KFKHKIKIKIKCK;K<K=K;K>K?K=K=K<K=K<K>K?K>K>K>K>K<K=K=K4K(K)K0K2K5K5K5K2K0K1K1K0K/K4K3K1K4K3K2K3K3K3K3K3K3K3K3K3K3K3K3K3K1KJKlKrKsKtKsKtKvKtKsKsKtKuKuKuKuKuKuKuKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKue]q +(KSKSKSKSKSKSKSKSKSKSKSKSKSKSKSKSKSKTKUKUKUKTKVKVKTKSKUKVKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKVKVKVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKXKXKXKXKXKXKXKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K\K[K[K[K[K\K\K\K[K[K\K\K[K[K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K]K\K\K\K]K]K]K]K^K_K_K_K_K_K_K]K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaKaKbK`K`K`K`KaKaKaKaKaKaKaKaKaKaKaKaKaKbKaKaKaKaKaKaKaKaKcKeKdKdKdKdKdKdKdKdKdKdKdKeKdKaK_KUKSKTKXKWKUKWKUKVKTKWKSKXKOKUKKKHKTK?KKKOK?KSKRKRK.K#K&K&K$KKKKKKKKKK KKK K K K K K K K +K +K +K +K +K K K K K KKKKKKKK K K K K K KK +K K +K KKKKKKKKKK K K KKK K K KKKKKKKKKKKKKK K KKKKKKKKKKKKKKKKKKK K KKKKKKKK?KHKGKGKFKFKFKFKGKFKGKFKEK7KK"K>KHKHKHKIKGKBK<K<K;K>K>K<K<K=K<K<K=K>K>K>K?K>K=K;K=K>K4K'K.K2K5K5K8K6K5K2K/K*K*K,K$K0K2K2K4K3K3K3K3K3K3K3K3K3K3K3K3K3K2K0KHKlKtKsKsKtKuKtKtKtKtKuKuKuKuKuKuKuKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKvKuKuKuKuKuKuKuKuKue]q (KSKSKSKSKSKSKSKSKSKSKSKSKSKSKRKTKVKTKRKRKSKVKVKVKVKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKWKXKXKXKWKWKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKZK[K[K[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[KZKZK\K]K\K\K]K\KZK\K]KZKZK\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K`K]K\K\K]K`K`K`K]K\K\K]K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKcKeKdKeKeKeKdKdKdKdKdKdKdKeKdKaK`KUKSKUKYKWKWKYKWKVKUKVKTKZKRKVKLKGKWKDKMKIKBKTKSKNK(K"K%K&K!K K +K +K K +K +KKK K +K K K K K KKKKK K K K K K K K K K K K K K K K K K K K K K +K K +KK K K K K KKKKKKKKK K K K K K K K K K KKKKKKKKKK K K K K K K K K KKKKKKKKKKKKK K KKKKKKKK%KDKGKFKFKFKFKFKGKFKGKIKHKGK3KK#KEKGKHKHKHKIKBK:K<K>K<K<K<K<K<K<K<K<K>K?K?K>K<K<K<K<K=K4K,K0K5K5K7K8K6K2K/K/K/KKKK.K0K3K3K3K3K4K6K5K4K3K3K3K3K3K2K3K3K3KGKhKrKsKsKsKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKyKvKuKuKuKuKuKuKuKue]q (KSKSKSKSKSKSKRKSKSKSKSKSKRKSKSKTKVKTKRKTKTKTKTKTKUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKUKUKUKUKUKUKUKUKUKUKVKVKVKVKVKVKVKVKWKWKWKVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKYKYKYKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKYKZK[K[K[K\K\K\K\K[K\K\K[K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K^K]K\K^K^K^K_K_K^K^K^K^K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKcKeKcKcKcKcKdKdKdKdKdKdKdKeKdKbK^KRKRKTKYKVKWKYKVKUKUKVKTKZKSKYKLKJKUKKKSKBKDKUKSKJK&K#K%K&K!K K K K K K K +K K K K K KKKK KKKKKKKKKKKKKK K K K +K K K KKK K K K +K K +KKKKK KK K KK K KK K K K K K K K KKKKKKKKKKKKK K K K K +K K K K KKKKKKKKKKKK K K K K K K KKKKK*KGKFKFKGKFKFKFKGKHKHKGKGKFK,KK*KDKIKJKJKIKHKAK=K<K=K=K=K<K<K<K=K>K=K>K?K>K<K=K>K=K<K;K2K-K0K3K5K7K7K5K2K0K/K)KK KK-K1K2K1K1K3K4K5K4K3K3K3K3K3K3K3K3K4K1KBKjKvKrKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKwKwKwKvKuKuKuKuKuKuKuKue]q (KSKSKSKRKSKRKRKSKSKSKSKSKQKTKRKSKVKTKRKUKVKRKRKRKTKVKUKUKUKUKUKUKUKUKUKVKTKTKWKUKUKUKUKUKUKUKUKUKVKWKXKXKXKXKVKUKUKVKXKVKUKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKYK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K]K[KZK\K]K\K\K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K`K_K\K^K`K_K`K`K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKcKeKbKaKaKbKdKdKdKdKdKdKdKdKdKbK]KTKPKVKXKVKWKYKTKUKUKVKUKZKSKYKOKMKMKFKUKCKIKUKTKFK$K#K%K'K KKK K K +K +K +K K K K KKKKK K KKKKKKKKKKKKK K K K K +K K KKK K K KKK K K KKK K K K K K K K K K K K K K K K KKKKKKK +K K K K KKK K K K +K K KKKKKKKKKKKKK K +K +K +K +K +K +K +K K K KKK2KHKFKGKFKFKFKHKIKFKFKFKGKFK(KK.KFKIKIKIKJKGKBK<K=K=K<K<K=K<K>K?K=K<K?K>K<K>K?K=K<K<K=K2K)K0K3K6K8K8K5K2K/K1K+KKKK/K3K3K3K3K2K5K5K2K3K3K3K3K3K3K3K2K3K3K?KcKsKsKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKyKvKuKuKuKuKuKuKuKuKuKue]q(KSKSKRKTKUKSKRKSKSKSKSKSKTKUKUKTKSKTKUKVKWKUKSKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKUKUKUKUKUKUKUKUKUKUKWKWKVKVKVKWKWKWKWKWKWKWKWKWKVKVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKYKZKZKYKWKYK[KZKZKZK[KYKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K\K[KZKZK[K]K\K[K\K\K[K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K_K_K^K]K]K\K^K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaKaKaK_K_K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKdKbKaKaKaKaKbKbKcKcKbKeKdKdKdKdKdKdKdKdKdKbKZKRKQKVKXKVKXKVKUKUKVKWKVKZKSKYKQKTKMKHKPKEKLKTKTK=K"K&K&K'KK K KK K K K K K KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK KKKKKKKKKKKKKKKKKKKKKKKKKK K KKKKKKKKKKKKKKKKKK K K K K K K K K +K K K +K +K K K K K KK7KGKFKFKFKFKGKGKFKGKFKFKGKAK$KK4KHKFKIKJKHKDKAK=K:K;K<K=K<K>K?K=K=K>K>K>K>K>K>K>K?K>K=K5K+K2K6K3K4K6K5K1K2K/K*KKKK2K6K2K4K4K5K5K4K3K3K4K4K4K3K3K4K4K3K0K<KaKtKuKuKtKtKvKuKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKvKvKuKuKuKuKuKuKuKuKuKue]q(KSKSKRKTKVKSKSKSKSKSKSKSKUKVKVKUKRKTKVKTKUKVKTKVKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKWKWKUKUKUKWKXKWKWKWKWKWKWKWKUKVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKWKWKZK[K[KZKWKYK[KZKZKZK[KYKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K]K[KZKZK[K\K\KZK\K]K[K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K`K_K_K\K\K\K^K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KbKaKaK_K_K_K`KbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKcKeKbKaKaKaKaKaKaKdKdKcKfKdKdKdKdKdKdKdKdKdKbKZKQKQKVKYKWKXKUKVKUKVKXKWKYKSKZKQKWKRKUKSK:KQKSKUK5K K&K&K&KK KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK K K K K K K KKKKKKK K K K K KK=KGKFKFKFKFKFKFKFKGKFKGKHKAK!KK9KHKIKHKIKGKGK@K:K:K<K=K<K>K?K=K=K>K?K?K?K>K?K?K?K>K<K=K8K/K2K/K.K3K6K6K5K1K0K)KKKK.K3K5K5K5K5K5K3K3K5K5K5K4K3K4K4K3K3K4K;K_KsKqKsKtKvKtKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKue]q(KSKSKSKSKSKSKRKRKRKRKTKUKUKUKRKSKUKUKVKUKUKUKUKUKUKUKUKUKVKVKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKVKWKVKUKUKVKWKWKWKWKWKVKUKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKZKYKWKYKZKXKXKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K\KZKZKZK\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K_K^K\K\K\K\K\K\K^K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`K`K`K_K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKaKaKaKaKaKcKdKdKdKdKdKdKdKdKdKdKdKdKdKeKbKYKSKSKWKXKWKXKVKXKTKWKWKXKXKXK[KPKXKWKYK]K9KVKUKRK1K#K&K&K#KKKK!K KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK KKKKKKKKKKKKK KKBKHKFKFKFKGKGKFKFKHKHKFKHK=KK/KDKEKHKKKLKIKEK?K;K<K=K<K=K=K>K>K>K>K>K>K>K>K>K?K>K<K<K>K3K*K0K/K4K7K8K5K3K2K0K+KKKK,K4K4K3K4K5K3K3K3K3K3K3K3K3K3K4K4K4K3K9KYKrKsKrKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKue]q(KSKSKSKSKSKSKSKRKSKSKTKVKUKUKSKTKVKUKVKVKVKUKUKUKUKUKUKUKTKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKVKVKVKUKVKXKWKVKVKVKWKWKWKWKWKVKVKWKXKWKWKWKWKWKWKWKWKWKWKWKWKWKXK[KYKWKZKZKXKXKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K[KZKZK\K\K[K[K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K`K^K]K]K]K]K]K\K^K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKbKaKbKbKcKeKdKdKdKdKdKdKdKdKdKdKdKeKdKaKXKTKRKXKYKWKXKWKYKTKWKWKYKXKXK[KPKWKXK^KdKJKTKTKQK,K$K&K&K"KK&K'K(K'K'K)K%K$K'K'K'K'K(K&K&K&K&K&K%K$K&K&K$K$K$K#K"K"K"K"K!K K K K K KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK KKKKKKKKKKKKKKK$KDKFKGKGKFKFKGKFKHKHKFKFKGK>K?KAKEKGKJKKKIKFKFK=K;K=K<K<K<K>K?K>K>K>K?K?K?K>K?K>K=K=K<K8K0K-K2K4K7K7K7K7K5K2K1K,KKKK-K4K2K4K5K4K3K2K2K2K3K3K3K3K5K4K1K2K1K5KZKsKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKvKvKvKuKuKuKvKvKvKvKvKuKuKuKuKvKvKuKuKuKvKvKuKuKuKuKuKuKue]q(KRKSKRKUKVKVKVKRKSKVKUKUKUKUKVKUKUKUKVKUKVKUKUKUKUKUKUKUKTKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKXKWKWKSKWKWKWKWKWKWKWKWKWKWKWKWKWKVKYKWKWKWKWKWKWKWKWKWKWKWKWKWKXK[KYKWKZK[K[KZKWKYK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K]K[KZKZK[K\K\K\K\K\K]K\KZK[K]K[K]K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K`K_K_K`K`K`K^K\K^K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKdKeKaKbKeKdKeKbKaKdKdKdKdKdKdKdKdKdKeKcK_KWKPK>KYK\KXKWKWKXKTKXKXKXKXKWK[KRKXKYKcKdKVKUKUKNK)K%K&K'K#K K&K%K&K&K$K&K'K#K%K&K%K%K%K&K&K&K&K&K&K&K&K&K&K&K&K&K&K&K&K&K&K&K&K&K&K&K&K'K'K'K'K'K&K$K$K#K$K$K$K$K$K$K$K"K!K!K!K K KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK-KEKFKGKGKFKFKGKHKHKFKGKIKHKFKEKFKFKFKIKIKHKKKFK>K=K=K<K<K>K?K?K?K?K=K<K>K?K>K>K?K>K=K<K<K6K.K0K3K5K8K8K5K5K2K.K*KKKK.K6K4K5K5K5K4K4K1K3K3K4K5K6K3K3K3K3K3K6KTKqKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKxKyKyKwKuKuKuKxKyKxKzKxKuKuKuKvKyKwKuKuKuKxKxKuKuKuKuKuKuKue]q(KTKSKRKTKTKSKSKRKSKVKUKUKUKUKUKVKUKUKVKUKVKUKUKUKUKUKUKUKWKVKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKXKWKWKUKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKWKWKWKWKWKWKWKWKWKWKWKWKYKYKYKXKWKYKZKZKZKYKYKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K\K[KZKZK[K\K\K\K\K\K\K[K[K\K\K\K]K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K_K`K_K^K^K_K_K^K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKcKbKaKcKcKaKbKcKdKeKbKaKdKdKdKdKdKdKdKdKdKeKdK^KUKLK:KXK[KVKXKWKXKTKXKWKYKXKWK[KUKYK[KiKeKUKUKWKHK%K&K%K'K#K"K&K&K&K%K%K&K'K'K&K&K&K&K&K&K&K&K&K&K&K&K&K&K&K&K&K&K&K&K%K&K%K%K%K%K%K%K&K%K%K%K%K%K&K%K$K%K$K$K$K$K%K&K'K%K$K$K$K&K%K$K$K$K%K%K$K%K&K%K%K%K%K$K%K"K K!K!K!K!K!K"KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK3KGKDKEKFKGKFKGKHKGKHKIKHKGKHKHKGKEKGKIKHKIKJKEK<K<K=K<K>K>K>K>K>K=K<K>K?K>K>K?K=K;K<K>K>K5K,K0K4K7K7K5K4K2K0K2K-KKKK-K3K4K4K5K5K5K3K4K4K4K4K5K4K4K3K3K3K3K4KPKoKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKwKwKwKyKwKuKuKuKwKwKxKyKxKwKwKvKvKwKwKwKvKuKwKwKuKuKuKuKuKuKue]q(KRKRKRKRKRKSKSKRKSKVKUKUKUKUKUKWKUKTKVKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKXKWKXKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKZKZKWKWKWKZK[KZK[K[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K\K\K\K\K\KZKZK]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K_K`K_K]K\K_K_K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKeKcKaKaKaKaKaKaKbKeKbKaKdKdKdKdKdKdKdKdKdKeKdK]KRKMKGK[KYKTKYKWKXKTKWKSKXKXKVK\KVKXK^KlKaKUKUKWKBK#K&K&K'K"K#K%K$K&K'K'K&K%K'K%K%K%K%K%K&K&K&K&K%K%K%K%K%K%K&K%K&K&K&K&K&K&K&K&K&K&K&K%K&K&K&K&K%K'K&K%K#K#K#K#K#K#K$K&K$K#K#K#K#K#K$K#K#K%K&K#K$K&K%K#K$K$K!K%K$K$K$K$K$K$K$K$K$K$K&K#K$K$K!K"K"K"K#K#K#K#K#K#K#K#K!K K!K!K!KKKKKKKKKKK;KDKDKFKHKFKFKGKIKIKIKGKFKFKGKGKCKFKIKIKHKHKGKDK?K<K=K>K>K<K<K<K>K?K?K?K>K>K?K=K<K<K<K<K=K6K-K/K4K5K5K5K5K4K3K/K+KK KK+K1K2K6K3K3K5K5K5K3K2K5K5K5K4K3K3K3K3K3KMKnKuKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKxKyKuKvKyKwKuKuKuKuKvKxKxKyKyKyKvKuKuKvKyKvKuKuKuKuKuKuKuKuKuKue]q(KTKSKRKTKUKTKTKTKUKUKUKUKUKUKVKVKVKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKWKVKUKWKWKUKUKUKVKVKVKVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKVKVKWKXKZKYKZKZKZKWKXKZKYKXKXKYKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K\K\K\K[K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K_K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKcKcKbKcKdKdKdKcKdKcKdKdKdKcKdKdKdKdKdKdKdKdKdKeKdK[KRKNKOK\K[KWKZKYKZKWKXKUKYKXKUKZKVKYKaKnK\KUKUKVK;K"K'K&K$K#K-K1K3K#KKKKK%K,K+K)K+K)K&K&K$K%K*K)K)K)K&K'K&K'K&K%K&K$K'K%K&K'K%K'K%K'K%K%K%K%K$K%K%K$K$K%K%K#K$K%K%K&K%K%K%K%K$K$K%K%K&K&K&K$K%K&K%K$K$K$K K"K$K#K$K%K%K$K#K$K#K"K$K#K#K$K$K$K$K#K#K#K#K#K#K$K$K$K#K#K$K$K$K$K#K$K$K%K%K$K#K$K#K*K@KFKFKEKGKHKHKHKHKIKGKFKHKHKGKEKCKGKIKIKIKJKKKCK<K<K=K=K=K=K<K=K?K>K=K>K>K>K?K?K>K>K>K=K=K5K-K2K2K4K6K8K6K4K2K2K-KKKK-K4K5K4K4K5K5K5K4K4K3K3K3K3K3K3K3K3K3K3KJKmKsKsKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKvKuKuKvKvKuKuKuKwKxKyKxKvKvKvKwKwKuKwKyKxKwKxKwKuKuKuKwKxKxKwe]q(KVKUKRKUKVKSKSKVKVKUKUKUKUKUKVKVKVKVKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKXKVKUKWKWKUKUKUKUKUKUKUKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKXKWKYK[KZKZK[KZKWKYK[KXKWKWKXKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K]K\K\KZK[K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K`K_K_K`K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKdKdKaKcKeKdKeKeKeKeKdKdKdKeKdKdKdKdKdKdKdKdKdKeKdK[KOKPKVK[K\KWK[KZK[KXKYKWK\K[KYKWKUKYKaKnK\KUKUKUK.K#K'K&K$K'K=KBKHKKKKKK)K<K8K1K7K7K*K)K#K0KBK@KAK<K>K8K/K1K1K7K1K,K-K-K.K(K%K'K$K*K0K.K.K.K.K.K.K-K*K+K(K'K'K%K&K%K%K&K&K%K#K$K%K$K$K$K$K%K$K%K$K#K#K#K$K$K$K$K#K%K&K$K$K$K$K$K$K$K$K$K$K$K#K$K$K$K$K$K$K$K$K$K#K$K$K$K$K$K$K$K#K#K#K#K$K#K!K!K*KDKGKDKGKIKHKIKHKIKGKFKHKHKFKHKBKAKEKHKIKLKJKHKEK>K<K<K<K=K<K<K?K=K<K>K>K>K>K>K?K?K?K>K<K>K6K-K/K4K6K8K6K5K3K2K2K.KKKK-K4K4K4K5K5K5K5K5K3K3K3K3K3K3K3K3K3K2K2KEKhKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKxKyKxKwKuKuKuKxKxKuKwKxKxKxKyKxKuKuKuKwKyKyKxe]q(KUKUKUKUKVKUKUKVKVKVKVKVKVKVKUKUKUKUKVKVKVKVKTKUKWKUKVKVKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKXKVKUKWKWKWKWKWKWKWKWKXKWKWKXKWKVKWKWKWKWKWKWKWKWKWKXKXKWKYK[KXKWKZKZKWKYK[KZKZKWKXKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K\K[K[K\K\KZKZK[KZK[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K_K]K\K\K]K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`K_K_K_K_K_K_K_KaKaK_K_K_K_K_KaKaK`K_K_K`KbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKXKPKSKVK\K[KWK[KYK[KWKYKZK]KYKZKZKYKZKfKkKXKTKVKQK&K$K&K&K$K+K>KBK?KKKKKK,K;K7K5K9K5K+K+K$K7KIKDKDKCKIK;K7K8K;KBK<K2K+K4K7K*K$K(K#K3KCKBK?K>KGKEKJKJKLKKK:K.K0K2K5K,K&K#K#K+K,K-K-K0K6K7K6K5K5K/K(K(K&K%K'K&K&K%K$K%K%K&K'K#K$K#K"K"K"K$K$K!K!K#K$K$K$K$K#K#K$K#K!K#K#K#K$K#K#K$K$K$K#K K"K$K#K$K K2KGKGKFKHKIKIKIKIKHKHKGKFKFKFKGKCKCKDKHKMKKKKKJKCK<K<K=K<K<K<K?K?K>K?K?K>K>K>K?K>K=K=K=K>K<K5K.K1K5K5K6K8K6K3K2K1K-KK KK,K4K5K4K4K3K4K5K5K4K3K3K3K4K5K3K3K3K4K4KAKfKvKtKvKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKuKuKuKxKxKxKwKuKwKxKxKxKvKuKuKuKuKxKxKxKwKuKvKvKuKuKuKuKuKwKyKvKue]q(KUKUKUKVKVKVKVKVKVKVKVKVKVKVKUKUKUKVKVKVKVKVKTKUKVKVKVKVKUKUKVKVKUKUKUKUKUKVKVKVKVKVKVKVKXKWKVKWKWKWKWKWKWKWKWKXKWKWKWKXKYKXKWKWKWKWKWKWKWKWKWKWKWKXKZKXKXKZKZKXKYK[KZKZKXKXKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K]KZK[K\K[KZKZKZK[K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K_K]K\K]K^K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKaK`K`K_K_K_KaKaK`K`K_K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKdKVKSKXK[K]KZKYK[KWK[KWKYKZK]KYKZK[KYK[KkKkKVKVKWKMK#K%K&K&K!K/K=KCK9KKKKKK/K7K4K7K:K3K+K*K&K:KHKFKBKCKFK5K6K8K;K@K;K,K)K2K1K(K%K'K#K8K@KBK<K@KJKIKMKNKQKMK5K*K5K7K9K/K'K"K%K5K4K4K5KDKTKWKRKWKYKEK7K3K-K,K-K&K%K$K)K-K,K+K)K9KBKAK?K=K;K1K)K&K&K%K$K#K#K$K%K$K$K$K$K'K$K#K"K#K#K$K#K#K"K!K$K$K$K#K!K"K7KFKGKHKHKHKHKIKHKHKGKGKGKGKFKJKIKEKGKHKKKLKJKIK@K:K=K=K=K=K?K@K>K>K>K>K>K>K?K>K=K<K:K:K=K>K5K/K4K5K6K7K7K4K3K2K1K-KKKK+K7K6K4K3K4K5K4K4K3K3K3K4K4K3K3K3K2K4K2K>KdKuKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKuKuKuKxKxKxKwKvKxKyKxKwKuKvKvKvKvKxKxKyKwKvKvKuKvKvKuKuKuKwKyKvKve]q(KVKVKUKVKVKVKVKVKVKVKVKVKVKVKTKUKVKUKVKVKVKUKUKUKUKVKVKVKUKVKXKVKUKUKUKUKVKXKWKWKWKWKXKXKXKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKZK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K]KZKZKZKZKZKZKZK\K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K\K\K`K_K\K^K`K_K_K_K_K_K_K_K_K_K_K_K`K^K\K_K`K_K_K_K_K_K_K_KaKaK_K_K_K_K_KaKaK_K_K_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKdKdKaKcKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKeKbKUKTKYK\K^K\KWK[KXKXKVKYKYK^KXKZKZKWK]KoKhKUKVKXKIK"K&K&K&K#K3K<KDK2KKKKKK5K9K5K6K:K.K+K*K&K?KHKGKBKCKCK7K9K9K<K@K<K-K+K3K-K'K'K$K&K>K>K@K9KEKHKJKKKPKQKLK:K3K6K8K7K)K&K!K)K4K4K4K5KHKSKVKQK\KUKAK4K0K0K+K.K'K#K$K,K-K/K+K1KMKWKXKYKWKRK8K-K+K+K)K$K#KK'K+K*K)K&K=KDKCKCK@K=K7K%K!K!K"K KK"K%K%K%K"K%K<KFKGKFKFKGKIKEKFKIKHKIKHKFKHKJKAKEKGKIKLKKKLKGK>K>K?K?K?K?K?K>K<K=K?K?K>K?K>K<K<K;K<K=K<K=K6K/K3K3K5K8K7K7K6K1K/K,KK KK)K5K4K2K4K5K3K3K5K5K5K3K3K3K3K3K3K3K5K3K=K`KtKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKxKxKuKwKyKyKyKvKuKuKvKyKyKyKyKyKxKxKyKvKuKxKxKuKuKuKwKyKyKxe]q(KTKUKUKUKUKUKUKUKVKVKVKVKUKVKUKUKUKUKUKVKUKUKUKUKUKUKVKVKUKVKWKVKUKUKUKUKVKVKWKWKWKWKWKWKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKYKXKWKWKWKYK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K\KZKZKZKZKZKZKZK[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K^K^K]K\K]K^K_K_K_K_K_K_K_K_K_K_K_K_K^K^K_K_K_K_K_K_K_K_K`KaKaK_K`K`K`K`KaKaK`K`K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKcKcKcKcKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKaKUKRKXK[K_K\KVK[KYKYKVKYKZK_KZK[K[KVK^KqKfKUKVKXKBK#K&K&K%K*K;K<KDK(KKKKK!K:K8K9K9K9K-K,K'K*KCKFKCKAKGKDK6K8K9K=K@K9K4K5K4K/K%K'K#K-KBKAKAK?KHKHKMKPKTKQKIK4K1K7K7K5K(K(K K,K3K4K5K7KOKTKXK[K\KSK<K0K0K1K-K,K'K#K%K-K.K/K,K<KRKUKVKXKTKKK1K-K,K-K(K#K"K K*K.K,K*K,KIKKKOKPKMKKK?K$K!KK!KKK"K(K*K*K*K5KLKKKEKGKGKHKIKGKGKIKHKIKHKGKGKIK8K'K>KHKGKJKLKKKIKAK>K?K>K=K=K?K>K=K>K>K?K>K>K=K>K?K=K<K=K=K>K6K-K2K5K6K7K9K8K3K1K/K,KKKK)K2K1K4K5K4K4K5K5K5K3K3K2K3K4K3K3K4K5K5K:K]KtKuKuKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKwKuKvKwKwKwKvKvKwKwKyKwKwKwKwKxKxKxKwKwKxKxKuKvKwKwKwKwKwe]q(KRKSKVKUKUKUKUKUKVKVKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKWKWKUKUKUKUKUKUKUKUKUKUKVKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKZKZKWKWKWKYK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K`K`K_K\K\K\K\K\K_K`K_K_K_K_K_K_K_K_K_K_K_K`K_K_K_K_K_K_K_K_K_KaKaK_K`KbKaKbKaKaKbKbKbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKeKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKcK`KTKRKXK\K_K[KWK[KZK[KVKYK\KaK[K]K]KXK`KpKaKVKVKXK<K#K&K'K%K.K>K>KFK!KKKKK%K:K9K:K3K8K,K,K$K1KEKCKBK@KGKAK6K8K9K=K?K;K6K4K6K/K#K'K#K1K?K?KBKAKDKDKPKRKTKQKCK-K.K3K:K2K(K%K"K.K4K5K3K<KSKSKYK[K[KOK?K1K4K0K0K*K%K!K(K.K0K0K-KDKSKVKXKXKQKAK.K-K.K/K(K!K!K!K)K.K*K'K8KKKGKMKOKJKLK4K#K!K!K KKK%K*K,K)K.KEKOKRKKKGKIKIKHKIKIKHKHKHKIKIKHKLK7K KKCKFKJKLKLKLKFK?K>K=K<K>K?K?K>K<K>K?K>K?K?K?K?K=K<K>K>K<K>K5K/K2K3K6K8K7K5K5K2K0K.KKKK*K2K4K5K5K5K4K4K5K3K2K2K3K5K4K2K4K5K5K3K7KYKqKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKyKyKyKvKuKuKvKxKxKxKxKyKyKxKuKvKyKvKuKuKue]q(KTKUKUKUKUKUKUKVKVKVKUKUKTKVKVKUKVKTKSKUKUKUKUKUKUKUKWKWKUKVKWKVKUKWKWKUKUKUKVKXKVKVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKZKYKYKYKYKZKZKWKWKWKYK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K\K\K[KZK[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K^K_K_K_K^K^K^K^K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKdKbKaKaKaKaKbKbKcKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKcKbKdKdKcK^KRKSKXK\K^K]KXK[K[KZKWK[K\K`K[K]K\KYKcKqK_KVKVKXK5K"K&K'K$K2K>K?K?KKKKKK(K9K9K;K;K4K-K-K#K4KGKCKCK@KFK=K8K9K;K?K>K8K6K4K6K.K&K(K%K6K=KAK?K@KHKGKQKTKUKIK:K-K2K3K7K*K(K"K%K2K3K4K4KCKTKTKYK\KWKJK7K,K-K1K/K(K#K K,K.K/K-K1KKKSKUKWKRKPK:K+K/K0K-K'K"KK$K/K-K+K(KCKKKLKPKOKKKEK+K$K#K$K KK K(K+K+K)K4KOKQKSKTKJKFKHKIKHKHKHKHKHKHKHKIKJKEK)KK)KDKHKLKLKKKGKEKAK>K>K=K>K?K=K>K>K?K?K>K=K=K?K>K>K>K>K>K?K>K3K.K2K6K5K6K8K7K4K4K1K.KK KK'K4K4K4K5K4K4K5K3K2K2K3K3K4K4K5K5K3K4K3K5KWKsKwKuKuKuKuKuKuKuKuKvKxKwKuKuKuKuKvKxKwKxKvKuKwKxKwKwKwKxKwKuKvKyKwKvKvKuKwKxKxKxKxKxKxKxKxKwKxKxKxKwKwKxe]q(KVKUKUKUKUKUKUKVKVKVKUKUKSKVKVKUKUKVKVKUKUKUKUKUKUKUKWKWKUKVKXKVKUKWKWKUKUKUKVKXKVKUKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXK[KZK[K[KZKZKZKWKWKWKZK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K\K]K\KZK\K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K_K_K_K`K`K`K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKbKaKaKbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKeKcKaKaKaKaKaKaKcKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKeKdKdKcK]KQKSKXK\K^K^KYK[K[KZKXK\K\K`K[K^K\KZKfKpKZKVKXKUK.K"K&K'K"K5K<K?K;KKKKKK/K<K9K;K<K/K+K*K&K:KGKCKBKBKHK;K9K:K<K?K5K-K0K5K5K)K&K(K'K=K@KBK=KCKLKOKTKVKUKDK1K/K4K6K5K'K&K"K+K3K2K3K6KKKTK[KZK[KPK<K3K3K0K1K/K'K#K$K.K.K/K*K:KQKSKSKVKTKNK7K,K-K+K)K&K"K!K(K.K+K*K/KKKNKQKQKOKPKCK$K#K$K'KKK"K,K,K*K'KAKPKTKVKVKQKIKJKHKIKHKHKHKHKHKHKHKHKGKFK*KK/KHKGKKKKKIKJKEK?K>K=K<K>K?K?K?K>K?K>K<K=K?K?K?K>K?K>K?K@K=K1K.K4K4K6K8K7K4K3K0K1K.KKKK)K2K4K5K4K4K5K3K2K3K3K2K4K5K5K5K3K3K5K4K5KSKsKvKvKuKuKuKuKuKuKvKyKwKuKuKuKuKvKyKxKyKvKuKxKyKyKyKyKyKyKvKvKyKwKuKuKuKxKyKxKxKxKxKxKxKxKyKxKxKxKyKyKye]q(KUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKVKUKUKUKUKUKUKUKWKWKUKUKUKUKUKWKWKWKWKUKVKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKZKZKWKYK[KZKZKXKWKWKYKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K_K]K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKaK_K`KbK`K_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKeKcKaKcKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKfKgKeKdKcK\KRKVKZK\K^K[KXK[KZKZKVKZK^K`K[K^K\K[KjKnKXKWKXKQK)K%K)K'K%K9K=KBK7KKKKKK3K<K8K6K5K1K-K(K)K>KCKEK@KFKFK9K:K:K@K=K4K.K2K5K4K(K)K'K*K?KBKBK=KFKLKPKTKXKUKEK3K2K4K9K4K(K'K"K-K3K3K0K9KQKTKXK[KZKMK>K6K3K2K2K+K$K"K)K.K.K,K,KAKSKUKVKTKSKIK/K/K0K.K(K#K K!K+K+K+K(K9KLKMKNKNKLKPK7K%K$K$K$KKK(K+K+K*K-KIKMKVKWKVKYKNKGKIKIKIKHKHKHKHKHKHKIKHKIKEK$KK3KFKGKJKIKIKGKDK@K<K=K>K?K?K?K?K?K>K>K>K>K?K>K>K?K>K>K?KAK=K2K,K1K3K5K7K8K6K2K2K/K/KKKK(K5K3K2K4K5K5K4K3K3K3K3K3K4K5K3K3K3K3K3K2KNKrKwKuKvKuKuKuKtKwKuKwKyKvKuKuKuKuKvKuKwKxKxKxKxKxKxKxKxKxKxKuKwKxKxKxKxKxKyKwKuKwKyKxKxKxKxKxKxKxKxKxe]q(KUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKWKWKVKVKVKVKVKWKWKWKWKVKVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKZKYKWKYK[KZKZKXKXKXKYKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K[KZKZKZK[K[K[K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K]K\K^K`K]K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKaK`K`KbK`K_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKeKcKaKcKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKfKeKdKcKZKRKTKZK\K^K\KYK[KZKZKVKZK_K`K[K_K[K[KjKlKWKWKXKLK&K&K)K&K&K-K8KFK/KKKKKK4K6K6K8K4K0K.K(K+K?KAKEKAKEKDK6K:K;K?K<K9K4K5K6K1K$K*K&K/K@KAKAK>KGKJKQKUK\KRKAK3K3K4K9K/K%K&K#K-K3K3K2K@KSKUKYK\KYKJK<K3K0K4K3K)K$K"K-K/K.K,K/KJKUKVKWKVKQK?K.K0K0K.K'K"KK"K+K+K,K'KDKNKMKNKNKLKHK+K'K$K$KKK K*K+K+K)K7KPKMKUKZKYKWK=K>KKKHKHKHKHKHKHKHKHKHKIKHKKKAKKK8KHKIKIKIKJKMKEK=K<K>K>K>K>K>K>K?K?K?K>K?K>K>K?K>K>K>K>K?K=K1K+K0K5K7K7K8K6K4K2K0K-KKKK'K0K3K4K5K5K5K3K3K3K3K3K4K5K3K3K3K3K3K2K1KJKpKwKuKuKuKuKuKvKuKvKxKwKvKvKvKvKvKvKxKyKxKxKxKxKxKxKxKyKxKvKwKyKxKxKxKxKyKwKvKwKyKxKxKxKxKxKxKxKxKxe]q (KUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKUKUKUKUKUKUKUKUKWKWKWKWKWKWKWKVKUKWKWKXKVKUKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKYK[KXKWKZKZK[KYKWKYK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K\KZKZKZK\K]K\K\K]K\KZK\K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K`K_K\K^K`K_K_K\K]K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKaKaKaKbK`K_KaKbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKeKcKaKaKaKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKcKYKOKQK[K[K^K^KYK[K\KZKXK\K_K`K\K_KZK_KoKfKUKXKYKEK$K&K&K%KKK7KEK'KKKKKK2K7K9K4K5K.K.K&K/KAKDKDKAKGKCK9K;K>K=K;K9K4K4K8K/K#K$K"K4K@K?K?KBKKKOKVKYK[KNK2K.K3K3K6K-K&K"K'K.K2K3K4KHKRKYK^K^KMK@K4K.K2K6K0K%K#K$K-K/K.K,K5KMKTKVKUKSKNK7K.K0K,K(K"K!K K&K*K+K+K-KIKOKQKPKMKOK>K%K%K$K#KKK#K*K+K*K*KAKQKUK[K[KYKRK/K-KAKIKFKHKIKHKHKHKHKHKIKHKEKHKAKKK<KIKGKLKLKKKGKDK=K<K<K<K<K<K>K?K>K>K>K>K>K>K>K>K>K?K>K>K>K;K1K/K3K5K4K7K8K4K4K2K0K.KK KK(K4K5K5K5K5K5K5K5K5K5K5K5K3K3K3K3K3K2K3K0KGKlKuKuKuKuKuKuKuKuKuKwKyKyKyKyKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKuKuKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q!(KUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKUKTKVKUKUKVKVKUKUKUKWKWKWKWKWKWKWKVKVKWKWKWKWKVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKYKYKYKYKYKZKZKZKZKXKZKZKZKZKZKZKZK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K\K[K[K[K\K\K\K\K\K\K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K^K^K^K\K^K`K_K_K^K^K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`K`KaKaKaKaKaKaK`K`K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKcKcKcKcKcKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKeKeKdKdKdKdKdKdKWKPKRK^K\K^K\KYK[K\KZKXK\K^K`KZK^K[KaKpKfKVKWKYK?K"K'K'K"K&K5K=KCK KKKKK&K5K5K8K7K1K-K.K$K2KAKDKCK>KJK?K:K;K?K=K9K8K5K4K5K-K'K#K!K;K>K:K>KCKIKPKVKYKXKJK0K.K1K5K5K*K'K"K)K1K3K2K6KMKTKYK]KYKJK@K2K0K2K5K.K!KK&K-K/K.K+K;KOKTKUKTKSKLK2K.K.K+K'K"K!K"K(K*K+K(K6KMKQKQKQKLKQK6K%K&K&KKKK'K*K)K(K/KKKSKVKZKZKZKFK-K+K,KCKGKHKIKHKHKHKHKHKIKHKGKHKFK7KKK@KGKHKLKLKKKHKBK=K=K=K=K<K>K?K>K>K>K>K>K>K>K>K>K>K>K>K>K?K=K/K.K4K5K7K8K7K7K3K1K1K0K KKK%K2K5K5K5K5K5K5K5K5K4K4K2K4K4K4K4K3K4K3K1KCKjKxKuKuKuKuKuKuKuKwKxKwKwKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKwKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q"(KVKVKVKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKXKWKUKUKUKWKWKWKWKWKWKWKWKXKWKWKWKWKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXK[KYKWKZK[KZKZK[K[K[KZKZKZKZKZKZKZK\KYKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K\K\K]K]K\K\K\K\K\K\K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K_K_K\K\K\K^K`K_K_K`K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKbKaKaKaKaKaKaKbKbKbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKcKeKeKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKgKfKdKdKdKdKdKbKTKNKRK]K\K^KZKXK[K\KZKXK[K^K`KXK]K[KbKqKbKUKXKYK7K!K'K(K K-K<K=K?KKKKKK*K;K5K4K6K0K.K-K%K:KBKBKBK?KJK<K9K;K?K=K7K7K7K2K,K+K)K'K(K?K7K"K9KGKNKSKVKYKVKFK4K0K.K6K4K)K%K$K.K4K3K2K:KRKUKYK\KRKHK>K2K.K1K3K+K#KK*K+K"K(K-KGKOKSKVKTKSK@K.K/K,K,K&K#K!K$K+K+K+K)KCKOKQKQKPKNKLK,K%K&K$KKK!K)K*K)K)K9KPKUKXKZK[KWK9K.K)K"K/KFKIKHKHKHKHKHKHKHKIKHKJKFKEK4KK KBKGKHKLKLKKKHKDK<K<K<K<K>K?K>K>K>K>K>K>K>K>K>K>K>K>K?K>K>K:K1K0K2K4K5K8K8K5K4K2K.K/KKKK'K3K2K2K5K5K5K5K5K3K2K2K2K2K3K5K5K5K3K4K2KBKgKwKuKuKuKuKuKuKwKxKuKuKuKvKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q#(KVKVKVKUKUKUKUKUKUKUKUKUKUKUKVKUKUKVKUKUKUKVKVKWKVKUKWKWKVKVKVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKYKZKZKZKYKZKZKZKZKXKYKZKZKZKZKZKZKZKZK[KZKZKZKZKZKZKZKZKZK[K\K[KZKZKZK\K\K\K\K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K_K]K]K\K\K\K^K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaK`K_KaK`K_K`KaKaKaKaKaKaKaKaKbKbKaKaKaKaKaKaKaKaKaKaKaKaKcKdKcKcKcKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKeKfKeKdKeKcK`KRKNKRK]K^K`KZKXK[K]KYKZK\K^KaK[K]K[KiKrK^KUKXKXK0K$K(K'K#K.K9K?K;KKKKKK-K9K5K6K4K*K.K*K$K?KEKAK?KAKHK:K9K;K?K=K7K7K8K6K-K'K*K#K,KAK:K1K@KJKHKRKYKZKUK;K.K3K0K6K1K(K#K$K1K3K4K3K@KTKSKZK[KOKBK5K0K1K5K2K(K$K"K)K$KK&K1KMKQKTKUKQKLK3K.K-K*K)K#K KK%K+K+K*K/KOKPKSKSKOKPKBK%K'K$K"KKK$K)K&K%K)KFKTKUKXKYK\KNK1K,K$K#K#K4KIKHKHKIKHKHKHKHKHKHKIKIKGKFK/KK#KCKGKIKKKLKKKIK?K>K>K<K>K?K@K@K>K>K>K>K>K>K>K>K>K>K>K>K>K@K>K1K/K3K4K5K6K7K8K6K2K/K-KKKK$K2K3K3K4K5K5K5K4K4K5K3K1K2K3K3K3K2K3K4K4K>KfKwKtKvKuKuKuKwKyKxKwKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q$(KVKVKVKUKUKUKUKUKUKUKUKUKUKUKUKVKUKUKUKUKUKUKVKXKVKUKWKWKUKUKUKVKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKZKZKZKZK[KZKZK[KZKWKYK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K]K\KZKZKZK\K\K\K\KZK\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K`K]K\K\K\K\K^K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KbK`K_KbKaK_K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKdKeKdKeKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKgKeKdKeKdK^KNKMKTK\K`K`KZKXK[K]KYK\K]K^KaK\K]K[KkKqKYKVKXKUK*K'K)K&K%K3K9K@K4KKKKKK.K4K5K9K7K,K/K(K*K?K?KCKBKDKDK9K:K;K@KBK8K2K8K8K0K$K*K#K2KAK>K=KAKIKJKVKZK\KVK8K.K1K2K9K.K&K$K&K2K3K2K5KHKTKTKZKVKMKAK3K0K5K7K.K#K K$K*K%K&K*K;KOKQKUKSKPKKK3K/K,K*K*K#K K!K)K+K+K(K7KOKRKSKQKLKOK2K"K!K#KKK K'K*K$KK0KOKUKXKXKYK^KAK(K*K$K$K"K#K:KIKHKHKIKHKHKHKHKHKIKHKIKJKHK*KK(KFKEKHKKKKKJKIKCK=K<K>K>K=K>K?K>K>K>K>K>K>K>K>K>K>K?K>K?K@K>K1K/K2K4K6K8K9K7K5K2K2K.KKKK'K4K2K3K5K5K5K5K5K5K5K3K3K2K2K3K2K3K4K6K5K=KcKwKvKvKuKuKwKyKxKyKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q%(KUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKVKVKUKUKUKVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKYKZKZKZKXKXK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K\K]K\KZK\K\K[K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K`K]K\K\K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`K_K_K_K_K_K`KbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKcKdKbKaKaKbKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKgKeKdKeKdK^KOKLKRK\K_K`K\K[KZK\KYK[K\K^K`K\K^K^KmKpKVKWKYKPK&K(K)K$K#K4K9K@K.KKKKK K3K6K7K5K3K-K,K&K-K>K?KCKBKEKAK9K=K=KBK:K6K/K2K7K/K'K)K%K8K@K;K6KAKLKWK^K_K_KPK;K4K2K4K3K+K'K$K+K3K3K2K8KKKTKWKYKRKNK>K0K2K4K7K*K#K!K&K,K.K2K+KEKTKQKSKSKRKHK/K-K+K*K'K#KK%K*K.K-K'KBKRKRKPKOKNKNK(K#K#K$KKK%K)K)K)K'K:KTKVKYK]K\KVK2K+K(K$K$K$K#K'K?KJKHKHKIKIKHKIKHKIKIKIKHKJKEK&KK.KGKHKGKJKLKKKEK>K=K=K=K<K=K?K>K>K>K>K>K>K>K>K>K>K>K>K>K?K?K:K/K-K4K4K5K6K8K7K3K2K2K,KKKK#K4K4K5K5K5K5K5K5K5K5K4K3K4K5K3K3K3K3K5K3K<K_KuKwKvKuKxKyKxKxKuKvKuKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKuKwKyKxKxKxKxKxKxKxKxKxKxKxe]q&(KUKUKUKUKUKUKUKUKUKUKUKUKVKVKVKVKVKUKUKUKVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKYK[KZKZKWKWKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K[K[K[KZKZKZK\K\K\K[K[K\K\K[K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K]K\K\K\K^K`K]K]K]K^K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`K`K`K`K`K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKdKbKbKbKbKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKfKeKeKfKcKYKOKEKOK]K_K`K\K\KZK\KYK\K\K^K_K\K^K_KpKmKVKWKZKIK$K)K)K$K$K3K9K?K&KKKKK!K2K3K4K5K3K,K+K&K3K?KAKCK?KIK>K9K>K>KBK8K5K2K5K5K+K'K(K'K<K@K<K;KDKOKXK]KbK_KLK8K6K3K/K)K'K'K$K/K3K2K5K=KPKUKXKZKPKCK:K3K4K6K3K'K$K#K,K-K1K/K/KLKRKSKTKTKSK=K.K+K+K*K&K#K K%K)K-K+K.KLKQKVKTKSKUK@K#K$K$K%KKK&K)K)K(K-KJKVKYK[K\K]KKK+K-K&K$K#K#K$K$K)KBKIKHKHKHKIKHKIKHKHKIKIKIKLKBK!KK4KHKGKJKKKJKIKGK?K<K<K=K=K?K>K>K>K>K>K>K>K>K>K>K>K>K>K>K=K?K;K-K/K2K5K6K8K7K4K3K2K1K.KKKK%K3K4K4K5K5K5K5K5K5K4K3K4K5K3K3K3K2K/K2K4K9K\KuKwKuKwKxKxKxKvKvKvKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKvKwKyKxKxKxKxKxKxKxKxKxKxKxe]q'(KUKUKUKUKUKUKUKUKUKUKUKUKWKWKVKVKVKUKUKUKVKWKWKUKVKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKYK[KXKWKWKWKWKYK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K\K]K\KZKZKZK\K\KZK[K]K\K\K\K\K\K\K]K[KZK\K\K\K\K\K\K\K\K\K\K\K\K\K\K_K_K\K\K\K^K`K`K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKbKbKbKbK`K_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbK`KaKeKeKeKeKeKdKdKdKdKeKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKfKgKfKdKVKLK?KOK]K]KbK]K\KYK[KYK\K]K^K^K^K[K_KsKhKVKWKXKAK!K(K)K%K*K6K;K>KKKKKK'K6K8K8K5K1K.K*K&K9KCKBK@K>KKK=K:K>K@KDK8K6K8K9K8K*K&K%K*K<K=K<K?KJKUKYKZK]K[KGK3K1K4K-K,K)K#K%K3K1K1K5K@KTKXK[K\KSKAK6K1K3K:K2K$K!K%K.K.K0K,K8KSKSKVKVKWKOK3K+K+K,K'K$K"K"K(K,K,K&K6KRKRKSKTKQKNK1K!K$K$K KK!K)K)K)K'K3KSKWKYK]KYKZK=K.K-K&K#K K#K$K$K"K-KFKIKIKGKIKHKHKIKKKKKLKIKHKKK>KKK7KHKGKHKKKLKJKDK@K<K>K?K>K>K>K>K>K>K>K>K>K>K>K>K>K>K>K?K?K>K9K2K/K1K5K8K8K8K5K5K1K/K/KKKK+K4K4K5K5K5K5K4K4K5K2K1K3K5K4K3K2K1K3K5K3K6KWKqKuKvKuKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q((KUKUKUKUKUKUKUKUKVKVKVKUKVKVKVKVKVKUKUKUKVKWKWKVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKYKWKWKWKYK[KXKWKYKYKYKYKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K[K[KZK\K\K\K\KZKZKZK\K\KZK[K\K\K\K\K\K\K\K\K\K[K\K\K\K\K\K\K\K]K]K\K\K\K\K\K^K^K^K^K^K^K^K^K^K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`K`K`KaKaKaKaK`K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKcKbKaKaKbKcKcKcKcKcKdKdKdKdKcKcKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKeKfKfKeKeKRKIK8KOK]K^K`K[K\KYK[KYK\K\K_K]K\KZKdKvKdKVKXKXK<K$K)K)K#K+K4K;K<KKKKKK+K5K6K7K8K1K,K)K(K:KCK>K?KDKGK;K;K=KAKAK8K9K9K;K/K%K'K%K-K:K9K:KBKPKVK\K\K[KSK;K2K/K6K4K,K(K"K(K5K4K4K7KGKTKWKZK\KQKBK1K0K3K7K.K!KK(K/K/K0K,K?KTKTKVKVKVKKK-K,K,K-K'K$K K!K)K.K+K'KBKSKSKRKQKOK?K'K$K#K%KKK%K)K)K)K'K?KVKVKZK[KYKVK.K,K)K%K#K"K"K$K!K!K"K3KHKKKIKIKHKHKJKLKKKJKIKIKGKKK;KKK;KGKGKJKLKIKIKFK?K>K?K>K>K>K>K>K>K>K>K>K>K>K>K>K>K>K>K>K>K?K<K.K)K1K8K7K8K5K5K4K1K3K.K"K'K.K0K0K4K5K5K5K4K5K5K4K2K3K4K4K4K4K5K4K3K3K3K5KRKsKuKuKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q)(KUKUKUKUKUKUKUKUKVKXKVKUKUKUKUKUKUKUKUKUKVKWKWKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKZK[KWKWKWKYK[KXKWKWKXK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K]K[KZK\K\K\K[KZKZKZK\K\KZK[K]K\K\K\K\K\K\K\K\K]K\K\K\K\K\K\K\K_K_K\K\K\K\K\K\K]K`K`K`K^K\K\K\K_K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKbK`K_K_K_K_K`KbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKeKdKaKaKaKaKaKaKaKaKcKeKdKdKaKbKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKgKeKdKeKdKMKDK*KNK]K_K_KYK\KYK[KYK\K\K`K^KZKYKhKwK`KVKXKVK5K%K)K(K"K-K8K>K9KKKKKK0K6K8K8K8K-K+K)K*K>KCKBK?KEKHK9K;K=KAKBK=K:K:K8K2K&K&K%K4K;K:K:KBKRKWK\K^K\KVK=K3K5K7K5K,K'K"K+K2K5K8K;KOKUKXKYKZKKK@K6K5K8K4K(K#K!K)K/K1K/K0KHKSKWKVKUKQK>K,K.K-K,K%K$KK$K*K+K'K+KHKQKRKRKSKQK9K$K#K#K%KKK%K)K)K(K,KLKVKUK[KXKZKGK.K/K'K$K$K$K#K#K K!K"K$K<KMKKKIKHKHKHKHKHKHKHKIKHKHKIK8KKK?KHKFKHKHKIKHKDK>K>K?K?K>K?K>K>K>K>K>K>K>K>K>K>K>K>K>K?K?K>K:K.K.K3K4K5K5K5K5K4K2K0K-K,K(K+K/K3K5K5K5K5K5K5K5K3K2K3K3K2K3K5K3K3K3K3K2K3KOKqKwKxKyKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKxKxKxKxe]q*(KUKUKUKUKUKUKUKUKVKVKVKWKWKWKWKVKUKVKWKWKVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKXKYKYKYKZKZKZKYKYKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K[K[KZK[K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K]K\K\K\K^K_K]K]K_K_K]K]K\K^K_K`K]K_K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaK`K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKbKaKbKdKcKcKcKcKcKcKeKdKdKcKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKfKfKfKcKIKAK"KNK^K]K^KYK[KYK\KYK\K\K_K_KZKYKiKxK_KVKXKTK-K$K(K%K"K.K8K>K1KKKKKK3K5K6K5K5K/K+K&K+K@KCKCK?KGKCK7K;K=KBKDK;K:K8K5K.K%K%K%K7K<K7K;KGKWK\K_KZKVKSK?K7K6K:K7K)K%K#K/K4K6K7K=KTKUKYK\KZKIK;K6K4K7K4K&K#K$K,K0K3K/K4KPKSKTKTKVKMK3K,K-K+K(K"K"K!K(K+K,K$K4KOKRKUKTKRKPK.K$K$K&K"KKK&K)K(K%K4KVKYKYKYKZK[K5K/K+K$K$K$K#K%K"K"K#K$K KLK]KIKIKIKJKKKKKKKKKIKIKKKIKGKHK1KK!KAKGKFKHKIKJKJKCK@K>K>K?K>K?K>K>K>K>K>K>K>K>K>K>K>K?K=K=K>K?K:K/K/K4K3K6K7K8K8K5K2K1K.K*K0K4K2K3K4K5K5K5K5K5K4K4K3K3K1K3K5K3K3K3K3K3K3K4KNKrKxKvKvKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKvKxKxKxKxe]q+(KVKUKUKUKUKUKUKUKUKUKTKTKWKXKXKWKUKWKXKYKVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKZK[K[KZKZK[K[K[K[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K\K\K\K\K\K\K\K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K`K]K]K`K^K\K\K\K_K`K`K\K_K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KbK`K_KaKbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKcKeKdKeKdKdKeKdKeKdKeKeKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKcKdKgKgKfK`KFK;KKPK]K\K]KYK[KYK\KYK\K\K_K^KZK\KmKtK\KWKYKOK(K%K'K$K K0K7K=K*KKKKK"K6K4K4K4K5K-K+K%K0KAKCKBK>KJK?K9K:K>KFK@K:K?K8K2K-K%K%K(K;K9K5K;KIKXK]K`KXKUKJK9K5K2K7K4K'K$K(K2K5K8K6KCKVKWK[K]K\KIK2K3K3K7K-K%K"K%K.K1K4K,K<KRKWKVKVKVKHK/K.K-K*K&K%K!K"K)K+K+K%K@KRKTKVKUKTKGK%K"K"K(KKK$K*K)K(K%KCKXKXK[K\K[KTK,K.K'K$K$K$K$K%K"K#K$K"K*KkKqKSKHKIKLKLKLKLKLKIKIKLKKKHKIKIK,KK$KDKGKGKIKKKLKIKDK>K=K?K>K?K>K>K>K>K>K>K>K>K>K>K>K?K=K<K?K>K?K8K,K-K4K8K8K7K9K7K4K1K2K/K,K*K/K3K3K5K5K5K5K5K5K5K3K3K3K4K5K3K3K3K3K3K2K4K3KHKoKxKtKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKwKuKwKyKxKxe]q,(KUKUKUKUKUKUKUKUKUKUKUKUKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKZKYKWKWKWKWKWKZKZKWKYK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K\KZK[K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K\K\K\K\K\K\K\K]K`K]K\K_K_K\K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKaK_K_K_K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKbKdKdKbKbKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKfKgKdK]K=KYK'KLK^K]K]KYK\KYK[KYK^K[KaK\K\K]KnKpKZKWKYKJK$K&K'K#K#K5K9K?K$KKKKK(K5K2K8K8K4K*K)K$K4KDK@KBKAKHK<K9K;K?KDK;K:K>K7K6K'K&K$K/K;K8K8K<KJKYK`K`KYKXKDK6K4K4K8K,K%K"K*K4K4K5K8KIKUKYK]K\KSKFK6K1K5K5K'K#K!K(K/K0K0K-KDKSKWKXKWKXKAK,K/K-K*K$K$K"K%K+K-K)K+KJKSKTKTKRKTK<K"KK K$KK K&K*K)K(K.KOKXKNKXK]K]KCK,K.K%K"K#K$K&K$KK#K$KK@KuKsKiKPKJKLKKKKKKKLKIKHKIKIKHKHKJKEK%KK)KGKFKGKGKJKLKIKAK>K?K>K>K>K>K>K>K>K>K>K>K?K>K>K>K>K>K>K>K>K@K6K*K0K6K8K8K8K8K6K2K3K2K0K)K)K1K2K4K4K5K5K5K5K5K5K4K5K4K3K3K3K4K4K3K2K2K5K3KFKkKwKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q-(KVKVKUKVKVKUKUKVKVKUKUKUKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKZKYKXKXKWKXKXKZKYKXKYK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K\K[K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K^K_K^K]K_K_K]K^K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKaK_K_K_K`KbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKbKaKaKaKdKdKaKbKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKfKfKdK`KAKrKPKKK_K]K]KYK\KYK[KYK^K[KaK\K\K_KrKmKXKWKYKCK$K&K'K#K%K5K:K>KKKKKK*K1K3K8K8K1K*K(K%K;KCK?KAKBKGK:K:K=KCKEK<K8K9K3K4K&K&K%K4K=K;K<K@KTK\K_K`KaKOK3K:K4K4K6K)K%K"K-K7K5K6K;KNKXK]K`K^KOK=K7K5K5K1K#K!K%K,K/K0K.K0KLKWKYKWKVKLK6K+K+K,K)K$K"K!K(K,K-K'K4KPKQKSKQKQKLK+K K"K$K!KK$K&K)K)K'K<KVKVKUK^K_KYK3K/K*K%K"K#K$K&K"K K$K#K!K^KvKrKsKgKMKKKLKKKLKLKJKIKHKIKIKIKHKJKCK%KK-KGKFKFKIKLKKKGKBK>K>K?K>K>K>K?K>K>K>K?K>K>K?K>K>K>K?K>K?K=K<K7K.K0K5K7K8K8K8K6K3K1K0K0K+K-K.K1K1K4K5K6K5K5K5K5K5K4K3K3K3K4K4K3K3K3K3K5K2KAKhKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q.(KXKWKUKWKXKVKVKXKWKUKUKUKWKWKWKWKWKWKWKWKWKWKWKWKWKWKYKVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXK[KYKWKZKZKWKXK[KZKZK[K[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K\KZK[K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K[K]K\K\K\K\K\K\K]K`K^K\K^K`K_K_K_K_K_K_K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKdKdKaKaKaKdKeKaKaKaKcKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdK^KaKOK^KxKTK^K]K]KYK\KYK\KYK]K[K`K]KYK_KrKlKZKWKYK<K"K&K'K#K*K6K;K9KKKKKK-K2K6K7K5K.K)K%K'K>K?K;K>KBKDK8K=K?KGKFK9K8K:K6K.K$K'K%K8K<K<K=KDKSK^KaKbKcKPK8K:K9K8K3K)K#K$K0K6K8K7K=KRKZK`K_K_KNK8K6K4K3K*K#K"K%K0K0K1K,K:KUKXKYKWKTKCK,K+K+K+K'K$K"K$K*K.K-K(K?KQKRKRKRKRKDK%K K!K%KKK%K&K&K)K(KEKWKXK]K]K\KLK.K/K%K$K#K"K%K#K!K K$K K3KqKtKsKsKuK`KIKJKLKKKKKLKKKHKIKLKJKHKHKJK?KKK3KFKCKHKIKJKLKIKAK>K?K>K?K?K>K?K>K>K?K>K>K@K>K>K>K?K=K?K=K<K>K:K-K-K3K7K8K8K8K6K2K3K1K/K-K-K0K2K3K4K6K5K5K5K5K5K4K2K3K3K3K3K3K3K3K3K2K5K2K=KeKwKuKuKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q/(KWKVKUKVKWKVKVKWKWKUKUKUKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXK[KYKXKZKZKWKXKZKZKZKYKYKZKZKZKZKZKZKZKZKZKZKZKZK[K[KZKZKZKZKZK[K[KZKZKZKZKZKZKZK[K[KZK[K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K^K]K\K\K\K\K]K^K^K]K_K_K_K_K_K_K_K_K^K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`K`K`K`K_K`KbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKdKdKaKbKcKcKcKcKcKbKcKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKeKeKfKUK]K[KPKK`K\K]K]KYK\KYK\KYK]K[KaK]KZKbKtKfKWKXKXK6K$K(K&K#K*K5K:K4KKKKKK.K4K4K2K4K.K(K%K*K<K=K:K=KEKBK7K>KCKJKBK:K=K;K9K.K#K&K'K;K<K=K<KKKNK[KeKgKcKJK6K9K8K:K1K'K K(K3K5K8K6KCKUK]K`K`K[KEK8K6K2K3K)K#K"K'K0K0K2K+KCKWKXKXKYKXK?K)K+K*K)K&K#K"K(K+K-K*K*KKKRKTKTKRKSK;K#K"K&K$KK K(K'K'K%K/KPKSKXK[K[K\K=K/K.K%K$K#K#K'K#K!K"K$K KQKuKsKsKsKrKsK[KJKLKLKKKLKKKHKIKLKJKJKIKHKIK;KKK6KGKFKFKHKKKLKEK?K>K?K>K>K?K>K?K>K?K>K>K?K>K@K@K?K>K?K>K=K?K@K9K.K/K4K7K7K7K8K6K3K3K2K/K,K+K1K3K3K4K4K4K6K6K5K4K4K3K3K3K3K4K3K3K2K3K4K4K2K;KcKvKvKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q0(KUKUKUKUKUKUKUKWKWKUKUKUKWKWKWKWKWKWKWKUKUKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKZK[K[K[KZKWKXK[KZK[KXKWKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K]K[KZKZKZKZKZKZKZKZKZK[K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K`K_K_K_K_K_K_K`K]K\K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKbKbKaK_K`KbK_K_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKeKdKaKcKeKbKaKeKeKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKgKfKhKXKXK`KOKfK^K^K\K\KYK\KYK\KXK^K[KbK\KZKhKuKaKYKYKTK/K'K)K%K"K,K5K9K.KKKKK K3K4K2K6K3K(K'K%K-K>K?K;K<KEK=K7K=KEKLKBK;K:K3K1K)K$K#K,K?K=K=K:KJK[KcKfKiK\K9K1K8K6K8K+K%K$K,K4K7K8K8KIKVK\KbKbKTKCK9K4K1K/K$K#K#K-K0K0K0K/KJKWKXKZKVKVK:K*K)K)K(K$K!K!K'K*K+K%K4KOKSKSKSKRKTK+K"K$K%KKK"K(K(K)K%K;KPKRKWKZK[KWK4K4K)K$K$K$K%K(K"K#K%K!K)KiKtKsKsKsKsKtKqKVKLKLKLKLKKKHKIKLKLKLKIKHKHKJK7KKK=KGKEKHKKKNKKKDK?K>K?K?K>K>K>K>K>K?K?K>K>K>K>K>K?K>K?K?K>K>K?K2K*K0K3K5K5K8K8K5K4K2K/K1K-K.K3K3K2K2K3K5K5K5K5K5K3K2K3K3K5K4K2K3K3K3K3K5K4K>K`KtKtKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q1(KUKUKUKVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKWKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKZKXKWKWKXKYKZK[KYKXKZKZKYKYKXKYK[KXKWKZKZKZKZKZKZKZKZKZKZKZKZKYKYKZKZKZKZKZK[K[KZK[K\K[KZK[KYK[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K^K^K^K]K]K^K^K]K_K`K_K_K_K_K_K_K^K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaK`K_K`KaK`K`K`K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKcKcKdKdKdKcKbKcKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKfKfKfKeKeKhKcKXKTKcKQKTK`KYK[K^K[K]K[K]K[K^K[KaK]K[KkKsK^KZKYKQK)K(K)K&K$K1K5K:K&KKKKKK0K5K1K4K1K&K#K#K0K?K>K;K=KFK9K8K=KGKLK:K<K7K1K3K%K$K#K2K@K;K=K?KGKLKbKiKiKVK8K:K;K6K6K)K"K$K0K7K8K7K8KMK\KaK]K_KTK;K4K2K1K,K#K#K%K.K1K2K0K6KRKYKZKZKQKJK0K*K*K)K'K#K!K$K*K+K+K(KAKPKRKSKQKRKGK%K&K%K$KKK#K'K(K)K+KDKPKRKVKWKZKIK2K4K'K%K$K$K$K%K K#K%K KCKxKsKsKsKsKsKsKuKlKQKIKJKKKKKJKKKLKKKKKHKIKJKKKJK3KKK>KFKGKIKLKLKIKBK>K@K?K>K?K>K>K?K>K=K>K?K>K?K>K>K>K>K>K?K>K>K8K)K,K,K3K5K6K6K7K6K3K1K1K0K-K1K0K1K3K4K2K3K3K4K5K4K4K3K3K5K4K4K3K3K3K3K3K4K3K8K[KtKvKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q2(KUKUKUKWKXKWKWKWKWKWKXKXKWKWKWKWKWKWKWKWKXKWKXKWKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXK[KYKWKWKXKZKZK[KYKWKZK[K[KZKWKYK[KXKWKZKZKZKZKZKZKZKZKZKZKZKZK[K[KZKZKZKZKZKZKZKZK[K]K\KZK\KYK\K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K`K_K`K]K]K`K^K\K^K`K_K_K_K_K_K_K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KbKaK_KaKbK`K_K_K`KbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKeKdKdKdKeKeKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKfKgKgKfKdKfKcK\KVKaKVKTK{KTK[K_K]K]K\K^K]K^KZKaK^K\KnKrK\KZK[KLK&K'K*K'K&K2K4K7K KKKKK%K6K3K2K1K,K'K$K#K7K=K=K<K>KDK8K;KAKLKKK:K5K5K1K.K"K$K%K7K>K;K<KCKWK^KeKiKjKNK<K>K;K9K8K(K"K%K4K8K8K5K=KTK^KaK`K_KMK3K0K0K0K,K#K"K&K0K3K4K.K?KTKYK[KYKWKAK,K*K*K)K&K#K K&K+K,K*K.KLKOKRKSKQKSK7K$K'K&K$KKK$K&K(K'K2KLKQKPKRKUKVK9K6K.K%K&K$K$K$K#KK#K%K#K\KwKsKsKsKsKsKsKsKtKfKPKJKKKLKLKLKKKLKLKHKJKLKKKIKIK/KKK@KCKDKGKLKLKIKDK?K>K?K>K>K>K>K?K?K?K>K>K>K>K>K>K>K>K?K>K=K;K;K6K&K)K5K5K5K8K7K4K3K3K1K/K-K(K2K4K2K3K3K2K4K5K5K5K3K3K5K5K5K3K3K3K3K2K2K3K2K6KWKuKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q3(KUKVKWKWKXKXKXKWKWKWKWKXKXKXKWKWKWKWKVKWKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKWKWKWKXKZKZK[KYKWKZK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K\K[KZKZKZKZK[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K]K]K_K_K_K_K_K_K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`K`K_K_K`K_K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKdKdKaKcKeKdKdKeKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKfKeKdKdKeKfKfKfKgKdK]K[K\K^KHK~K\KWK]K\K]K\K^K\K^K\KaK]K_KpKoK[KZKZKFK$K)K)K#K%K1K7K8KKKKKK+K6K4K5K5K,K&K%K'K;K=K:K:K=K@K6K=KCKNKHK;K9K0K,K*K#K#K&K<K=K;K<KCK`KiKdKgKeKEK<K;K9K;K3K$K#K(K:K9K5K3K@KYK^KaKbK[KAK5K2K1K/K)K#K"K+K0K4K3K/KFKXKZKWKVKQK7K*K)K(K&K%K"K"K(K*K-K(K6KRKPKUKUKPKQK)K K&K'KKK"K%K$K'K(K<KOKOKPKSKVKOK4K6K(K%K$K$K&K%K"K#K$K!K3KnKtKsKsKsKsKsKsKsKtKtKcKMKLKKKIKJKLKKKLKIKIKIKIKHKIKFK,KK$KDKDKFKKKLKLKIKCK>K?K>K?K>K>K?K?K?K>K>K>K>K>K>K>K>K>K>K>K>K?K?K4K)K-K2K5K5K6K8K5K5K3K3K0K)K&K$K!K-K3K2K3K5K5K5K3K3K5K4K3K4K5K5K4K3K3K3K5K4K5KYKuKvKuKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q4(KVKVKXKXKXKXKXKWKWKWKXKXKXKXKWKWKWKXKYKXKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKXKWKXKZKZKZKYKXKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K]K[KZKZKZK[K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K]K\K\K\K\K\K_K_K_K_K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKdKcKaKcKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKgKeKdKeKeKfKfKfKdK_KZK[K[KbKHKsKuKTK]K\K]K\K^K]K^K\KbK\K_KrKlKZKZKZK<K&K)K)K%K)K1K6K5KKKKKK,K2K4K5K4K)K%K"K(K<K<K:K9K@K<K5K<KHKOK?K:K=K4K,K&K"K#K*K>K<K<K<KFK\K`KfKiK]K>K6K9K:K<K1K$K#K.K;K9K5K5KJK_K`KdKdKVK<K4K2K0K.K&K$K%K/K0K2K.K4KPKYKWKWKVKIK0K)K)K(K%K$K!K#K*K+K,K'KBKSKQKTKQKOKGKK K'K$KKK'K(K'K&K,KIKLKKKPKSKWKAK1K0K%K%K#K$K'K#K"K$K$KKNKxKrKtKsKsKsKsKsKsKtKuKuK\KJKLKHKIKLKKKLKIKHKHKHKIKHKIKDK%KK*KEKEKHKJKKKKKJKCK?K>K>K?K>K>K>K>K>K>K>K>K>K>K>K>K>K>K>K>K>K@K=K4K-K0K4K5K6K8K6K5K3K2K/K0K$KK KK1K3K2K5K5K5K3K3K5K4K3K4K5K5K4K3K3K3K4K5K2K3KPKrKvKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q5(KWKWKWKXKXKXKXKWKWKWKXKXKXKXKWKWKWKXKXKXKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXK[KWKWKWKWKWKYK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K]K[KZKZK[K]K\KZK\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K_K_K\K\K\K\K\K\K]K_K_K`K]K\K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaKaKbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKeKdKeKbKaKdKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKgKeKdKfKfKfKfKeK\KTK]K_KWKaKOK`K}KUK]K\K]K]K\K^K\K]KdK]KbKqKfKZKXKYK5K#K&K&K"K(K2K6K/KKKKKK,K3K4K5K4K(K&K K+K;K9K8K7K?K8K6K@KJKRK=K;K8K1K1K'K"K#K/K=K9K;K;KLKVKQK_KhKVK@K9K:K<K=K+K#K"K2K:K8K5K9KRK`KaKaKcKPK:K3K3K/K,K$K"K%K1K3K2K-K:KSKXKVKTKSKBK,K(K)K)K&K#K K%K+K/K*K*KJKSKSKSKPKQK7KK%K*K!KKK'K&K%K'K7KNKJKKKPKQKSK7K1K*K%K"K$K&K&K"K$K$K"K(KhKuKsKtKsKsKsKsKsKsKsKsKsKpKYKKKHKJKLKLKLKIKHKHKHKIKHKHKJKCK"KK-KGKGKGKJKLKLKHKCK?K>K>K>K>K>K>K>K>K>K>K>K>K>K>K?K>K<K=K?K>K?K>K5K-K2K3K4K5K7K8K4K4K1K0K-KK K K!K2K5K5K5K5K4K1K2K4K5K3K2K5K4K3K3K3K2K2K3K2K3KPKqKvKvKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q6(KWKWKWKWKXKXKXKWKWKWKWKXKXKWKWKWKWKWKWKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKXKWKXKXKYK[KYKXKYKYKXKYKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K]K[KZK[K[K\K[K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K^K]K^K^K^K^K\K]K_K_K_K^K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaKaK`KaKaKaKaKaKaKaKaKaKaKaKbKaKaKaKaKaKaKaKbKcKcKcKcKcKbKcKdKdKdKcKcKcKcKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKfKeKdKfKfKfKgKeK]KVK]KdKWK^KZKOKZKVKZK\K]K]K\K]K^K^KbK]KdKuKeKYKYKWK0K%K(K(K#K*K2K7K)KKKKK"K0K4K4K5K1K&K'K#K/K:K9K8K7K@K:K;KDKOKPK;K9K7K5K0K"K#K#K5K=K9K:K=KLKFKCKSKbKMK;K:K:K>K9K&K K$K5K:K8K6K=KYKcKfKbK`KHK6K3K2K2K*K"K"K)K2K3K1K/KFKSKXKWKTKQK9K)K*K&K%K%K#KK%K+K/K)K4KNKPKQKQKQKPK+K!K)K*KKK#K(K&K'K*K?KJKIKKKMKPKIK0K2K(K$K"K$K&K%K#K#K&K K?KuKsKtKsKsKsKsKsKsKsKsKtKsKuKpKQKIKJKJKJKJKIKIKJKIKHKIKHKIKKK>KKK7KFKFKIKJKKKKKJKCK>K?K?K>K?K?K?K?K>K>K>K>K>K>K>K?K>K=K?K>K>K?K>K5K-K/K2K4K7K8K7K7K5K2K0K/K!K KKK1K5K4K5K4K2K3K4K5K4K3K5K5K3K4K4K3K3K3K2K2K2KLKqKuKvKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q7(KXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKZK[KWKXK[KZKZK[K[K[K[K[KZKZK[K[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K]K[KZK\K\KZK[K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K`K`K`K`K`K]K]K_K_K_K_K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKaK_K`KbKaKaKaKaKaKaKaKaKbKbK`KaKaKaKaKaKaKbKeKeKeKeKeKeKeKdKdKdKeKdKaKbKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKfKfKcKcK]K`K[KYKaKZK[K`KLKlKcKUK^K]K]K\K\K_K_KaK[KgKtK`KYK\KSK+K(K)K(K$K-K2K5K"KKKKK%K1K1K4K3K.K#K#K$K4K9K7K8K8K@K<KAKIKTKGK7K;K7K0K-KK#K%K8K<K9K;K?KAK5K?KSK^KCK5K:K9K9K2K%K!K)K;K:K8K7KEK\KcKfKcKZK@K7K4K3K1K&K K!K-K3K3K/K4KNKTKWKXKRKNK1K*K+K&K#K$K K!K'K+K+K'K@KOKMKMKMKPKFK)K)K)K(KKK'K*K'K)K.KFKIKJKLKLKQK=K2K,K&K#K"K$K'K$K$K#K&K#K]KwKsKsKsKsKsKsKsKsKsKsKsKsKsKuKkKPKIKHKHKHKHKIKLKKKHKIKIKHKHKLK8K)K7K?KGKFKGKKKLKKKHKAK=K>K?K?K?K?K?K>K>K>K>K>K>K>K>K?K?K>K>K>K>K?K=K4K-K4K2K6K8K8K9K8K5K2K/K.K KKK!K1K5K5K5K5K5K5K5K5K5K5K5K5K5K5K5K5K3K3K3K2K2KHKpKvKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q8(KVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKXKWKXKZKZKZKYKXKZKZKZKZKZKYKXKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K\K\K]K[KZK\K\KZK[K\K\K\K\K\K\K\K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K]K^K_K_K_K]K]K_K_K_K_K_K_K_K_K_K_K_K_K_K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaKaKaKaKaKaKaKaK`K`KaKaKaKaKaKaKaKaKaKbKaKaKaKaKaKaKaKbKeKcKbKdKdKdKdKdKdKdKdKdK`KaKeKdKdKdKdKdKdKdKdKdKdKdKdKeKfKfKfKeKdKeKhKjK[KVK`K]KUK\K_KXKaKKKmKlKLK]K[K[K\K_K_K_K^K\KkKsK^KZK[KMK'K)K'K%K#K+K1K7KKKKKK$K.K/K2K3K*K$K#K&K7K9K8K7K9K@K<KEKQKUKAK7K:K4K)K%KK!K'K9K:K<K=KAK:K2K@KWKTK;K9K:K:K5K+K$K!K/K:K8K9K9KLK\KaKbK_KUK:K5K3K1K-K"K K#K0K1K1K,K9KQKSKVKUKRKBK+K)K)K&K#K$KK#K)K+K*K*KIKMKLKLKLKJK3K(K)K)K!KKK&K'K#K&K8KJKIKLKKKJKLK3K3K(K$K$K#K$K%K#K#K%K#K5KrKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKuKfKLKHKIKHKIKIKJKIKHKIKHKJKJKIKHKDKDK:KBKEKFKJKKKKKLKHK>K>K?K?K?K@K?K>K>K>K>K>K>K?K>K>K?K>K>K>K>K>K?K>K2K-K2K4K6K6K7K9K7K3K0K/K.K KKKK2K5K4K5K5K5K5K5K5K5K5K5K5K5K5K5K3K3K3K3K3K0KFKkKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q9(KUKVKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXK[KZK[KXKWKZKZKZKZK[KXKWKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K]K\K\K[KZK\K\KZK[K]K\K\K\K\K\K]K]K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K`K_K_K]K]K_K_K_K_K_K_K_K_K_K_K_K_K_K`K^K_K`K_K_K_K_K_K_K_K_K_K_K_K_K`KbKaKaKaKaKaKaKbK`K_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKeKcKaKdKeKdKdKdKdKdKdKdKbKcKdKdKdKdKdKdKdKdKdKdKdKdKdKeKgKgKgKfKfKaK`K^KVKVKaKaKVKYK`KWK`KSKXK|KRK\KZKZK]K`K_K_K\K]KoKsK\KZK[KCK#K)K(K%K%K-K1K4KKKKKK&K0K.K1K4K(K$K"K&K7K8K8K6K9K@K=KIKTKTK;K9K9K5K1K%K K!K,K;K:K=K<K?K5K6KEKZKSK5K7K:K:K3K(K#K"K5K9K7K8K=KQK[K_KaK^KMK7K1K0K0K)K#K!K)K0K/K0K-K?KPKQKRKPKOK8K*K(K(K'K$K$K!K&K+K+K(K6KKKKKLKKKLKLK/K'K)K)KKK#K(K"KK*KBKGKJKMKKKLKBK3K0K&K$K$K$K$K%K"K#K%KKLKwKrKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKvK\KIKIKHKHKHKHKHKIKIKHKKKLKHKIKGKCKCKAKFKGKEKGKJKLKIKGK@K<K?K>K>K>K?K>K>K>K>K>K>K?K?K>K>K>K>K>K>K>K>K;K3K.K0K1K4K5K8K7K5K2K.K/K0K KKK K1K5K4K5K5K5K5K5K5K5K5K5K5K5K5K3K3K3K3K3K4K2K@KjKvKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q:(KWKWKVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKZKYKWKWKWKWKXKZKYKWKYKZKXKWKWKWKWKWKWKWKXKZKZKZKZKZKZKZKZKZK[KYKWKYKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K\K[KZKZKZKZK[K]K\KYK\K\K\K\K\K\K\K\K\K\K\K\K]K]K]K\K\K\K\K\K\K[K]K]K\K\K\K\K\K\K\K\K\K]K`K^K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKbKaKdKeKdKdKdKdKdKdKdKeKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKfKeKaKbK[KSKNKSKTKZK`KUKSK`KTK]K^KIKKgKZK\K\K^K`K_K_K^K`KqKpKYKZK^K>K$K*K(K%K(K-K0K0KKKKKK-K0K.K1K2K&K"KK)K8K8K8K5K;K?K@KJKXKQK:K9K6K2K+K"K!K"K2K;K:K;K<K;K3K8KGKVKMK;K8K9K8K2K%K#K%K7K:K:K8K?KTK\K]K^KWKCK6K2K-K/K'K#K K,K3K2K/K2KGKOKQKOKOKKK3K*K)K)K$K K!K!K&K+K*K&K>KLKKKLKJKOKDK)K)K)K'KKK$K'K K K*KFKEKHKKKIKLK7K3K.K&K$K$K%K&K#K#K&K$K%KfKvKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKsK[KIKIKJKIKKKKKKKKKKKKKKKHKIKGKEKJKGKDKGKFKFKHKIKJKLKFK?K>K>K>K>K?K>K>K>K>K>K>K>K>K>K?K>K<K>K?K>K?K?K<K1K/K2K3K4K5K6K8K5K1K2K0K.K!KKK!K2K5K2K3K3K4K5K5K4K3K4K5K3K3K4K4K3K3K3K3K4K1K=KeKxKwKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q;(KXKWKVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKZKYKXKXKWKWKXKZKYKWKYKZKXKWKWKWKWKWKWKXKXKZKZKZKZKZKZKZKZKZK[KYKWKZK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K\K[KZK[K[K[K[K\K\KYK\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K]K\K\K\K[K]K]K\K\K\K\K\K\K]K]K]K^K`K]K\K_K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKbKbKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKeKeKbK]KZKVK[KTKRKRKNKDK_KZKRK^KYKYKaKIKiKfK[K\K]K]K`K_K`K^KbKrKiKXKZK\K8K&K)K(K%K(K,K0K+KKKKKK-K/K1K1K-K'K K!K/K8K8K8K7K>K?K@KJKXKKK8K:K5K2K,K!K!K"K2K;K9K7K:K9K4K<KLKUKCK7K7K6K7K-K#K K,K:K9K:K;KDKWK[K]K[KQK=K6K3K/K-K%K#K#K.K3K3K.K7KJKIKMKKKLKCK,K(K)K(K#K"K K$K)K+K)K,KFKHKJKKKHKQK8K&K)K*K KK#K&K'K%K%K9KHKHKIKGKGKGK4K3K(K&K$K$K%K%K!K&K'K!K>KtKsKtKsKsKsKsKsKsKsKsKsKsKtKsKsKtKtKsKuKpKVKIKMKLKKKKKLKKKKKKKKKIKJKKKIKHKBKCKFKGKFKGKHKJKKKIKFK?K?K?K>K>K>K>K>K>K>K>K>K>K>K?K>K=K>K?K>K?K?K=K<K1K-K2K3K4K6K8K8K6K4K0K0K.K KKKK0K4K3K3K4K5K5K4K3K4K5K4K3K5K5K3K3K3K3K3K4K1K;KbKwKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q<(KXKXKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXK[KYKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKZK[KWKXK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K\K\K\KZK[K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K]K\K\K\K\K\K\K\K\K\K\K\K\K_K_K_K_K`K]K\K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K^K`K_K_K_K_K_K_K_K_K_K`KbK`K_K_K_K_K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKeKeKaK]K[K]KZKVKZKYKRKPKOK9K`K]KQKYK_KZKaKQKOKWKYKYK^K\K^K^K^KZKcKtKgKYKYKVK.K&K)K(K%K(K,K1K%KKKKK!K,K.K.K/K.K&K!K#K2K5K6K8K6K=K>KAKMKRKAK7K8K6K-K)K K K$K7K;K9K7K<K:K3K?KPKQK>K4K5K2K6K,K"K!K2K<K9K:K;KIKTKWK\K\KMK8K1K3K2K*K$K#K&K/K2K3K.K:KDKEKHKGKHK:K*K+K)K(K$K"KK(K)K+K*K5KKKHKIKIKIKHK*K(K+K+KKK$K&K&K)K-KDKHKGKIKIKNK>K3K0K'K$K$K$K%K"K!K%K'K#K[KwKsKsKsKsKsKsKsKsKsKsKsKtKvKtKsKuKvKvKuKtKkKPKLKJKHKIKLKJKHKKKLKKKKKLKJKIKDKCKFKDKGKFKFKGKJKKKLKFK?K>K?K>K>K>K>K>K>K>K>K>K?K>K>K?K>K>K>K>K>K>K?K<K3K/K1K5K5K5K8K8K4K3K2K1K-K KKK K1K5K5K3K2K5K4K2K4K5K5K5K5K5K5K4K3K3K3K2K3K3K:KaKyKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q=(KXKXKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKYKXKWKWKWKWKWKWKWKWKXKXKWKWKWKWKWKYKYKYKYKZK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K[K[K[K\K\K\K\K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K[K[K\K\K\K\K\K\K\K\K\K\K\K\K_K_K_K_K_K^K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`K_K_K_K_K`K`K`K_K_K`KbKaK`K`K`K`K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKfKdKbK^K\K[K[K]K[KWKWK^KYKNKTK:KTK`KTKPKZKZK`KYKJK}KWKWK^K\K_KYKZK[KgKuKbKYK[KUK+K&K)K&K$K(K+K/K!KKKKK(K0K/K,K-K*K$K!K"K2K5K5K6K7K=K<KCKQKUK@K7K8K4K+K#KK K+K9K8K8K6K<K7K4K?KQKKK8K4K6K4K5K*K"K$K7K;K9K9K?KJKPKTKWKXKGK2K1K2K0K)K%K"K)K2K2K2K/K@KAKCKEKDKDK1K*K+K(K&K#K!K!K)K*K)K'KAKJKGKHKHKJK?K'K*K*K%KKK%K&K&K%K1KEKDKFKHKHKMK4K5K+K$K$K$K$K$K"K"K%K"K2KpKtKsKsKsKsKsKsKsKsKtKtKsKtKvKtKsKtKuKuKuKtKuKiKPKJKHKIKLKJKIKKKLKKKKKLKJKIKEKCKIKIKEKFKGKFKIKLKLKHKBK=K>K?K>K>K>K>K>K>K?K?K>K>K>K>K>K>K>K>K>K>K>K>K:K2K.K3K4K4K6K7K7K7K4K1K0K0K KKK K1K4K3K4K5K5K3K4K4K4K5K5K5K5K4K3K3K3K3K3K3K2K9K^KuKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q>(KWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKZK[KWKWKWKWKWKWKWK[KYKYK\KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K]K]K]K]K\K\K\K]K\K\K[KZK\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K_K_K_K_K_K_K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKbK`K_K_K`KbKaKbKaKbKbKbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKeKdKeKeKeKaK_K^K\K]K^K]K]K[K[KZK[KZKPKTKAKIK`KTKQK_KXK]KaKGK~K^KRK^K]KaKWKXK\KiKrK`KXKZKRK(K&K)K'K!K&K*K-KKKKKK(K/K,K*K-K*K K K&K5K5K4K4K6K=K;KCKNKSK:K6K8K2K,K#KK K/K9K8K7K6K:K5K7KEKPKCK1K6K8K6K2K'K"K'K9K:K<K<K@KJKPKQKSKQK@K5K3K0K-K'K#K#K.K2K3K2K7K@K>KAK@KAKAK.K,K*K*K%K KK#K*K+K'K+KEKHKGKGKEKLK5K&K)K*K"KK"K%K%K&K&K7KEKEKHKFKHKCK3K6K'K$K$K$K$K$K"K$K$KKLKxKrKsKsKsKsKsKsKsKsKuKuKsKtKvKtKsKsKsKsKtKvKuKuKdKJKHKJKLKLKLKLKKKKKKKLKJKHKJKJKIKHKCKDKGKDKHKKKJKHKIKBK>K?K?K?K?K?K>K>K?K@K=K>K>K>K>K>K>K>K>K>K?K=K<K8K/K.K2K2K4K5K8K8K6K1K.K.K.KKKKK.K5K5K5K4K5K4K2K4K5K5K5K5K4K3K3K3K3K3K3K4K3K9KXKuKwKxKyKxKxKxKxKxKxKxKxKxKxKxKxe]q?(KWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKYKYKWKWKWKWKWKXKXKWKWKWKWKWKYKYKZKZKZK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K\K[KZKZKZKZK[K[K[K[K[K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K^K]K\K\K]K^K^K^K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaK`K_KaKaKaK`K_K`K`KaKaKaKaK`K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKcKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKfKeKdKeKeKfKeKdK^K[K\K_K_K[KZK]K[K\KZKZKZKXKTKPKJK>KaKWKQK]K]KWKdKJKiKxKRK_K]K`KVKYK[KkKoK\KYK[KGK#K'K)K'KK$K,K/KKKKKK)K.K-K-K-K%K!KK)K5K5K4K4K9K=K;KDKKKGK6K6K4K.K-KKKK1K4K4K2K6K9K4K9KEKMK<K/K4K6K3K,K$K"K/K9K:K<K<KCKJKPKPKQKJK<K8K5K2K.K'K!K%K3K2K3K1K<K?K<K>K>KAK;K,K+K*K)K%K K!K(K*K*K&K4KJKFKEKEKHKHK*K(K)K*KKK%K'K(K(K,K@KCKCKFKEKJK:K9K2K'K%K%K%K$K"K$K%K%K,KfKuKsKsKsKsKsKsKsKtKuKuKuKuKtKtKtKuKuKuKuKuKuKuKuKvK^KJKJKLKKKKKKKKKKKKKLKKKJKIKIKGKFKHKBKEKFKGKGKGKIKLKHK@K>K=K=K=K>K?K>K?K?K>K>K>K>K>K>K>K>K>K>K?K>K>K=K;K1K2K4K4K5K6K6K5K4K3K3K0K/K KKKK0K4K5K7K5K4K3K3K4K3K3K4K3K3K4K4K3K3K3K3K3K2K3KUKtKwKwKyKxKxKxKxKxKxKxKxKxKxKxe]q@(KWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKZKZKWKWKWKWKWKWKWKWKWKWKWKWKZK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K]K[KZKZKZKZKZKZKZKZKZKZK\K\K\K\K\K\K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K`K]K\K\K^K`K`K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KbKaK_KaKaKaKaK_K_K_K`KbKaKaK_K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKdKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKcKdKeKdKeKbK^K^K\K]K^KXKMK<KPK^KZK\KYKZKZKXKXKNKPK9K]K\KOKWK^KXKaKRKVKK[K]K]K`KVKZK]KpKoKYKZK^K>K"K(K)K&KKK%K-KKKKK K.K-K.K.K-K"K K K,K3K5K3K0K=K;K;KEKOKEK5K7K2K*K'KKK K7K&KK)K:K6K4K<KFKCK7K2K3K5K0K*K$K!K4K;K:K:K:KEKIKMKNKMKFK8K5K3K4K-K#K K*K4K3K3K2K=K=K<K:K>KAK5K,K+K+K'K#K K#K*K(K)K'K=KGKDKCKCKJK@K*K)K'K&KKK%K&K)K(K3KCKAKBKFKFKEK5K9K-K'K#K%K%K$K!K$K'K!K@KuKsKsKsKsKsKsKsKsKuKvKuKuKvKuKsKuKvKuKvKvKuKuKuKuKvKrKZKJKLKLKKKLKLKKKKKKKLKLKIKIKKKGKIKIKFKFKFKFKFKHKLKLKHK>K<K=K<K=K?K>K>K>K?K>K>K>K>K>K>K>K>K>K>K?K?K?K?K<K1K1K5K5K5K5K5K5K5K5K0K.K/K KK KK-K2K5K5K4K3K3K3K3K3K3K3K3K4K5K3K3K3K3K3K3K2K2KPKuKxKxKyKxKxKxKxKxKxKxKxKxKxe]qA(KWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKZKXKWKWKWKWKWKWKWKWKZKZKWKWKWKWKWKWKWKWKWKWKWKWKXKXKZKZKZKZKZKZKZKZKZKZKZKZKZKZKYK[KZKZKZKZKZKZKZK[K[KZKZKZKZKZKZK[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K]K\K\K\K^K`K_K_K]K]K]K^K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaKaKaK`K`K_K`K`K`K`KaKaK_K`KbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKbKbKcKeKdKdKdKdKdKdKdKdKeKeKdKdKeKcKaK_K\K[K\K^K[KIK?KDKMKRK\K[KZKWKXKXKWKYKMKSK<KRK`KQKTK_KWK]KYKJKkKVKZK\KaKXKYKaKqKlKYKZK[K7K%K)K)K%K KK(K'KKKKK!K/K.K-K.K*K!KK"K1K3K3K2K2K:K8K;KCKKK=K4K5K0K,K"KK!K'K4K%KK+K=K7K3K>KIKAK1K/K5K7K1K'K#K#K7K:K:K<K?KDKIKMKMKIK>K4K1K2K4K*K"K K0K6K4K2K7K>K9K:K:K<K;K0K/K-K*K'K"KK&K)K(K(K+KFKDKCKDKBKHK7K*K)K'K KK"K(K(K)K(K;KBKBKCKCKFK@K5K7K(K'K#K"K&K#K!K$K%K"KXKvKsKsKsKsKsKsKsKsKuKvKuKuKsKtKuKuKuKtKtKuKuKuKuKuKuKuKpKVKKKMKJKLKMKKKKKLKKKKKKKKKLKJKHKLKKKHKFKEKCKGKIKNKKKGK@K>K<K=K?K>K>K>K>K>K>K>K>K>K>K>K>K>K?K=K=K>K>K?K<K/K-K4K5K5K5K6K8K6K3K1K.K.K KKKK.K4K3K4K5K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K2KOKrKwKxKxKyKyKxKxKxKxKxKxKxe]qB(KWKXKXKXKWKWKWKWKWKWKWKWKXKXKWKXKZKYKWKWKWKWKXKXKXKWKZKZKWKWKWKWKWKWKWKWKXKWKWKWKWKXKZKZKZKZKZKZKZKZKZKZKZKZKZKZKYK[KZKZKZKZKZKZKZKZKZKZK[K[K[K[K[K[K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K`K_K_K]K\K]K^K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaKaKaK_K_K_K_K_K_K`KaKaK_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKbKaKbKaKcKeKdKdKdKdKdKdKdKeKdKeKeKeKbK^K_K]K]K^KZKLK>KBKPKKKUKUKYK[KZKWKXKXKWKXKPKQKCKFK`KSKQK_KZKYK_KIKRKVKUK[K`K[K\KdKtKhKZKZKXK0K'K)K)K$K&K(K.K%KKKKK$K,K-K-K.K)K!KK#K4K4K2K2K3K5K4K8K>KBK8K4K4K1K0K$KK K,K5K3K0K4K=K5K4K@KFK>K0K3K5K2K/K&K!K)K;K8K:K=K@KEKHKJKKKIK:K6K3K2K0K&K"K%K2K,K)K2K9K=K9K:K8K9K:K0K0K-K*K"K!KK&K)K)K'K3KFKCKCKDKCKBK-K(K(K'KKK%K)K)K(K,K@KAKBKCKCKGK5K5K/K&K'K$K"K&K#K#K$K"K0KnKtKsKsKsKsKsKtKtKsKtKuKuKuKsKtKvKuKuKtKtKuKuKuKuKuKuKvKwKmKQKMKJKIKJKKKLKKKKKKKLKLKLKKKHKHKLKHK;KBKFKFKGKJKMKMKFK?K<K>K?K>K>K>K>K>K>K>K>K>K>K>K>K>K>K=K=K>K?K>K?K8K.K.K4K5K4K6K8K7K4K1K.K.K.K!KKKK-K3K3K5K3K3K3K3K3K3K3K3K3K3K3K3K2K2K3K3K1K1K2KKKoKxKyKxKxKxKxKxKxKxKxKxe]qC(KWKXK[KXKWKWKWKWKWKWKWKWKZKZKWKWKWKWKWKWKWKWKXK[KYKWKZKZKWKWKWKWKWKWKWKVKYKWKWKWKWKXKZKZK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K\K\K\K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K`K_K_K]K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKaKaKaKbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKdKdKcKfKeKeKeKdKdKdKdKdKdKdKeKeKcKcKaK\K\K_KaKZKIK=KEKUKLK+KKRKYKYK[K[K[KXKXKWKYKSKMKLK<K^KWKSK]KYKUK_KKKYKyKMK[K^KYKZKdKuKcKWKXKTK+K(K)K&K"K(K)K.K"KKKKK%K*K*K,K/K)K!K K&K3K3K0K/K3K2K0K6K=K@K5K4K5K2K+K"K K#K0K5K6K2K6K=K2K:KDKDK9K/K2K4K1K)K$K#K0K;K:K:K<K@KAKDKFKFKFK=K8K2K4K-K!KK*K6K0K-K1K;K:K:K:K8K?K6K0K0K.K)K!K K K(K)K*K)K;KAK@KCKAKEK<K#K(K*K#KK"K'K'K#K!K3KBK?KBKBKBKBK4K8K)K%K#K$K&K%K#K$K(KKJKyKrKsKsKsKsKtKvKuKsKsKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKuKsKuKfKMKLKLKLKLKKKKKKKKKKKKKLKKKHKHKHKKK>K8KEKDKGKGKLKLKIKEK?K?K?K?K>K>K>K>K?K?K?K>K>K?K?K?K>K?K>K>K>K?K=K=K9K/K/K3K5K5K5K7K8K5K3K.K0K/KKKKK/K3K2K4K5K3K3K3K4K5K4K3K3K3K3K2K1K2K3K3K3K2K1KJKpKxKwKyKxKxKxKxKxKxKxe]qD(KXKYK[KYKWKWKXKWKXKYKXKWKZKZKXKXKXKXKXKXKXKXKYK[KYKWKZKZKWKWKWKWKWKWKWKWKXKYKXKWKWKXKZKZKYKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K[K[K[KZK\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K^K_K_K]K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`K`K_K_K_K`K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKdKdKbKdKcKcKcKdKdKdKeKeKdKeKdKbK`K_K]K]K_KZKLKAKFKSKKK0KK#KAK[KWKXK[K[K[KXKXKXKWKQKIKOK5KYK\KUK[K]KVK^KSKKKKKKXK^KYK[KiKtK`KXKZKOK'K(K)K&K#K(K(K.KKKKKK'K*K*K+K-K&K!K K+K3K2K/K/K2K.K,K4K;K<K3K4K3K,K$KK K$K4K5K4K0K6K9K4K>KDKCK4K-K2K4K3K)K#K$K5K:K8K8K=K?K>K@K>K@KAK;K5K1K3K,K"K K.K7K3K2K4K=K6K7K7K;KBK2K0K.K/K(K K!K%K*K)K)K-KAK?K@KBK=KBK0K"K(K+K KK#K(K(K%K&K;K>K>KAKAKCK:K9K5K%K%K$K$K&K#K#K&K%K%KeKvKsKsKsKsKsKtKuKtKsKsKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKtKtKuKaKLKKKJKKKLKKKKKKKLKKKKKJKJKJKGKFKAK$K,KCKDKCKFKKKLKJKEK?K=K>K?K>K>K>K>K>K>K?K?K>K>K>K>K?K>K>K>K?K>K=K=K9K-K/K4K5K5K6K7K6K6K2K1K/K-K KKKK.K3K3K5K4K3K3K3K4K2K3K3K3K3K4K5K3K3K3K3K3K4K2KFKnKxKxKxKxKxKxKxKxKxe]qE(K[K[K[KYKWKVKYKWKXK[KYKWKZK[K[K[K[K[K[KXKWK[K[K[KYKWKZKZKWKWKWKWKWKWKWKWKXK[KYKWKWKWKZKZKWKYK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K]K]K\KZK\K\K\K\KZKZKZK\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K`K]K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KbKaK_K_K_KaKaK_K`KbK`K_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKdKdKaKaKaKaKaKdKeKeKeKeKbK^K^KZK_K_K]KXKJKBKHKUKLK.KK$KDK^KhK`KWKWK[K\K[KXKVKTKWKRKJKOK8KOK^KUKWK^KYK\K\KCKzKfKUK_KXK]KmKsK^KYK^KIK$K)K(K%K%K*K)K+KKKKKK*K*K*K+K)K"K K!K*K4K1K/K/K1K+K*K3K:K<K3K3K/K,K&KK K&K3K2K2K1K:K7K4K>KFK=K/K0K3K1K0K%K"K'K7K8K6K7K=K;K<K<K9K:K9K5K4K5K4K&KK"K3K5K5K3K7K:K8K7K6K9K>K2K2K,K,K&K K!K'K,K+K)K3KAK=K>K?K>K@K)K$K'K)KKK'K)K)K(K.K=K<K>K?K?KBK6K=K/K$K#K&K%K'K$K"K'K!K@KwKsKtKsKsKsKsKsKsKsKsKsKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKtK\KKKIKLKKKKKKKKKLKKKHKIKLKJKHKGKAKK +K/KGKCKDKGKLKMKJKBK=K>K?K>K>K?K=K<K>K?K?K>K<K=K?K>K?K?K>K>K?K?K=K=K9K.K.K4K5K5K5K5K5K3K0K/K,K+K KKKK/K5K5K5K4K3K3K3K1K3K3K3K3K2K2K3K3K3K3K3K3K3K3KEKjKwKxKyKxKxKxKxKxe]qF(KXKXKYKXKWKYKZKYKZKZKZKYKZK[KZKZKZKYKXKYKYKXKYK[KYKWKZKZKYKYKWKXKYKXKWKWKXK[KZKYKYKYKZKZKYKZKZKZKZKZKZKZKZKZKZKZKZK[K\K[KZK[K\K[K[K[K[KZK\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K^K^K^K^K^K^K^K^K\K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaK`K_K_K_K_K_K_K`KaKaKaK_K`KaK`K`KaK`K`K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKcKcKcKdKdKaKbKcKbKaKbKcKeKbK_K[K[K]K_KbK[KHK?KIKTKIK/KK(KDK`KiKgKgKcKYKTKZKYKYKXKVKUKXKWKLKJKFKKK_KUKSKYK[KVKaKHKiKKUK_KYK\KoKtK[KXK\KAK$K*K)K'K$K%K*K(KKKKKK,K*K*K+K(KKK!K-K3K1K/K/K.K*K+K1K:K9K3K2K/K+K$K!KK*K5K4K2K3K>K6K6K@KCK:K0K3K4K4K0K$K"K+K8K8K7K9K>K:K9K9K7K9K:K6K4K5K1K#KK%K4K5K5K4K9K6K5K8K8K<K6K2K1K-K(K!K"K#K*K+K+K)K:K?K<K@K?KCK:K*K)K(K#KK!K)K(K'K)K5K>K<K>K>K>K?K8K;K*K%K$K$K$K#K!K%K'K"KVKvKsKsKsKsKsKsKtKuKtKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKsKVKKKLKKKLKKKKKLKKKJKKKLKKKJKKKIK4KKK4KFKDKFKIKKKLKJKCK>K<K?K>K=K>K>K>K>K?K>K<K=K?K?K>K=K@K>K>K>K>K=K=K9K+K/K4K4K3K4K6K6K4K1K.K/K/K!KKKK-K5K5K4K3K3K3K2K3K1K1K3K2K1K2K3K1K1K3K3K3K3K1KBKjKxKwKxKyKxKxKxe]qG(KWKWKWKWKXKZK[K[K[KZK[K[K[K[KZKZK[KYKWKZKZKWKXK[KYKWKZKZK[KZKWKYK[KXKWKWKXK[KZK[K[K[KZKZK[KZKZKZKZKZKZKZKZKZKZKZKZK[K]K[KZK\K]KZKZKZKZKZK\K]K\K\K]K\K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K_K`K`K`K`K`K`K]K\K\K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKbK`K_K_K_K_K_K_KaKbKaKaK_KaKbK`K_KbKaK_KaKbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKeKeKeKdKdKaKbKeKcKaKbKbKbK]K[K\K]K`K[KJK?KHKQKJK+KK'KIK]KhKhKcKcKaK]KTKRKYKXKWKXKXKYKVKVKRKHKPKQK\KVKNKYK`KVKaKQKQKlKWK_K\K_KqKnKXKXKZK;K#K)K(K%K!K#K)K%KKKKK K,K*K+K+K'KKK"K.K/K/K0K.K(K&K(K.K8K4K.K/K/K,K#K"K"K.K7K6K3K5K;K6K8KAKBK6K.K0K2K1K,K#K#K/K:K7K8K;K<K6K:K9K7K;K9K9K8K5K+K#K K+K5K5K4K6K8K4K5K4K3K9K3K2K1K-K'K K K'K+K+K*K-K?K=K=K>K=KDK4K*K)K*K"KK!K(K)K&K+K<K=K<K>K?K?K7K=K2K&K#K$K$K#K K"K'K%K0KlKsKsKsKsKsKsKsKtKvKuKsKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKxKpKTKJKLKKKLKKKKKLKLKLKKKLKLKLKLKKK3KKK;KIKEKCKHKLKMKJKBK=K?K>K<K=K?K>K>K?K>K<K=K?K>K?K?K>K>K>K>K?K?K=K=K6K-K.K3K3K3K7K8K7K2K0K0K0K,K KKKK/K5K4K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K2K@KfKvKxKyKxKxKxe]qH(KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[KYKWKZKZKZKYKWKXKXKWKWKWKWKZK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K]K[KZK[K[KZK[K\K\K\K[K[K[K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K]K_K_K_K_K_K^K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`K`K_K_K_K`KaKaKaK`K`K`K`KaKaKaK`K_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKdKdKdKdKdKdKcKbKcKcKbK`K]K\K_KaK[KKKAKKKOK@K%KK%KHK`KdK`K]K]K\K]K^K]KYKUKVKXKXKXKXKXKVKUKVKHKNKRK[KYKOKXK`KVK^KYKHKSKRKVK\K`KsKiKXK[KZK4K'K*K(K K K)K)K!KKKKK$K,K*K(K*K'KKK$K/K-K-K.K-K(K&K&K-K5K2K,K/K0K)K!K!K"K0K6K3K2K6K:K6K9KAK?K4K.K/K/K1K(K"K$K4K;K7K6K;K:K8K:K7K6K:K8K<K;K5K&K K"K1K5K5K4K7K7K4K4K3K3K8K0K3K2K-K$K K K)K+K+K)K4K=K:K:K<K<K@K-K*K*K'KK K&K)K(K'K1K=K;K<K=K>K?K5K=K+K'K$K#K$K#K!K%K'KKGKxKsKsKsKsKsKsKuKuKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKwKjKNKKKLKLKLKLKKKLKJKKKLKKKLKKKIKHK.KKK>KHKFKFKGKLKLKJKCK>K?K>K>K>K?K?K>K>K>K>K?K>K?K?K>K>K>K>K>K>K>K>K=K7K.K0K3K5K5K5K6K7K6K3K0K0K-K KKKK/K2K2K2K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K1K>KdKxKwKxKxKxe]qI(KZKZK[K[K[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[KYKXKZKZKZKZKXKXKXKWKWKXKXKZK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K]K[KZK[K[K[K[K]K\K\K[KZK[K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K]K\K\K\K\K\K\K\K]K]K]K]K]K_K_K_K_K_K`K`K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`K`K_K`KbKaKaK`K_K_K`KbKaKaK`K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKbKdKdKdKdKdKdKdKaKaK`K_K\K]K_KZKOKCKGKNKCK,K%K2KDKZK`K_K]K_KdKhKiKgKiKiKcKVKUKXKXKXKXKXKWKVKVKIKKKJKSK]KPKSK_KZKYK_KEKgKmKHK]KfKuKfKYK\KTK,K'K)K'KKK'K(KKKKKK$K+K*K(K*K%KKK%K0K-K-K-K+K%K%K&K-K3K/K*K,K.K&K K K$K4K5K3K2K7K8K5K;KAK9K3K0K0K0K.K#K K'K7K7K8K6K;K8K5K6K3K5K7K5K9K8K1K#K K&K4K5K4K2K8K3K1K0K1K9K9K/K3K2K,K"K K#K-K+K+K)K7K9K9K:K<K=K8K*K*K*K%KK K'K&K)K(K7K<K=K<K<K>K8K;K7K'K$K#K$K$K#K#K&K%K%KdKvKsKsKsKsKsKsKuKuKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKvKfKMKKKLKLKLKKKLKJKKKLKKKLKLKLKLKHK+KK KCKHKEKDKHKKKKKJK@K>K?K>K>K>K>K>K>K?K?K>K>K>K>K?K>K>K>K>K>K>K?K=K<K9K,K.K4K5K4K6K8K7K4K1K0K/K/K KKKK-K4K2K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K4K:K^KyKyKxKxe]qJ(KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[KZKZKZKZKXKWKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K_K_K\K\K\K\K\K\K]K_K_K_K_K_K]K]K_K_K_K_K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKbKaKaKbK`K_KaKaK_K`KbK`K_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKaKaKbKaKaKbKdKeKeKcKaKdKeKaK_K`K^K_K_KaKXKKKBKHKNKSKQKRKXKcKiKhKhKiKjKjKjKmKlKkKeKfKgKaKVKTKXKXKWKYKXKXKXKWKNKLK?K@K_KSKNK^K]KWKaKKK\K{KEK_KgKtKbKYKZKOK'K%K&K#KK"K'K&KKKKKK'K+K+K)K&K!KKK'K-K*K*K+K(K"K"K%K+K1K0K-K-K-K&K K K)K5K5K3K3K8K7K4K9K9K8K4K0K0K0K.K#K"K+K8K7K8K8K;K4K3K3K2K8K6K3K6K9K0K#KK'K4K6K4K3K6K0K.K0K1K6K5K0K3K2K'K K K&K.K*K*K-K:K6K8K8K9K<K2K(K)K(KKK$K%K(K(K,K<K:K;K=K>K?K3K@K.K%K#K"K$K%K!K&K'K!K9KuKsKsKsKsKsKsKsKsKtKvKuKsKsKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuK_KIKKKLKKKKKKKKKKKKKKKKKKKLKJKJKDK#KK%KBKGKFKFKIKKKLKGK@K>K?K?K=K<K>K?K>K?K?K?K>K>K>K>K>K>K>K>K>K>K?K=K=K6K-K/K3K5K5K4K7K8K6K3K0K.K.K KKKK-K4K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K2K3K0K7K]KvKxKxe]qK(KZK[K[K[K[K[KZKZKZK[KZKZKZKZKZKZKZKZKZKZK[KZKZKZKZKZKZKZKYKZKZKZKZKYKYKZKZKZKZKZKZKZKZKZKZKZKZKZK[KZKZKZKZKZKZKZKZK[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K^K\K\K\K\K\K]K^K_K_K_K_K_K]K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`K`K_K_K_K_K_K_K_K`KaKaKaKaK`K`KaKaK_K`KbKaK`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKaKaKbKcKcKcKcKbKcKbK^K^K`K`KbKYKKKBKHKWK_K`KdKiKlKlKlKjKiKkKiKhKgKfKgKeKeKeKfKgKaKXKTKWKXKWKXKWKWKXKWKSKJKJK7K^KWKNKZK_KUKaKUKHK~KSK\KhKuK`KYK[KJK%K'K&K%K$K%K(K#KKKKKK*K*K*K)K&K KKK*K,K*K+K+K%KK"K&K+K1K0K.K-K+K$K K!K,K4K4K3K3K4K3K3K3K3K4K/K0K/K1K+K#K"K/K9K7K7K8K;K0K1K2K2K;K8K6K8K8K.K!KK.K4K4K3K5K4K/K/K/K2K6K2K2K2K0K&K!K!K*K.K,K*K2K:K3K7K8K9K9K-K)K)K'KKK%K'K(K(K4K;K8K;K=K>K=K8K=K'K$K#K$K$K$K#K&K&K#KXKxKsKsKsKsKsKsKsKsKtKuKtKsKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKwKtK[KJKKKLKKKKKKKKKKKKKKKKKLKKKIKKK@K KK(KEKEKGKHKJKLKMKFK?K>K?K=K=K>K>K>K>K>K>K?K>K>K>K>K?K>K>K?K>K>K=K<K<K6K,K/K3K4K6K6K7K6K4K1K/K0K,KKKKK/K2K2K3K3K3K3K3K3K3K3K3K3K2K4K2K2K3K3K3K3K2K1K7K[KyKxe]qL(K^K]K]K]K]K[KZK[K[K[KZKZKZKZKZKZK[KXKWKVKYK[KZKZKZKZKZKZKWKXK[KZKZK[K[KZKZKZKZKZKZKZKZKZKZKZKZKXK[K[KZKZKZKZKZKZKZK[K]K[KZKZKZK\K\K]K[KZK\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K_K`K_K_K_K_K_K\K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKbK`K_K_K_K_K_K_K_K_K_K_K_KaKbKaKaK_K`KaKaKbKaKaKbKaK_K`KbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKaKaKaKbKbK`K_K`KaKaK\KLKBKMKYKOKIKYKjKkKiKfKeKeKeKdKeKfKfKfKgKgKgKgKhKgKhKgKXKPKWKXKXKWKUKVKWKWKTKKKRK8KWK\KRKXK_KTKZK[K@K}KvKXKmKsK]KZK\KBK%K(K&K%K#K%K'K"KKKKKK)K(K)K)K&K KK!K-K.K*K+K,K$KK$K'K,K1K/K0K-K)K#K K!K-K3K3K2K2K1K-K-K-K1K2K.K0K/K/K'K#K"K1K8K8K6K9K4K/K0K/K0K7K9K:K:K6K&KK%K4K5K2K1K7K2K/K0K/K3K;K2K3K1K,K$K"K$K*K+K/K,K4K6K5K8K8K8K3K)K)K)K"KKK'K*K'K(K7K6K6K;K<K@K7K<K4K%K$K$K$K#K"K%K&K$K0KpKuKsKsKsKsKsKsKsKsKsKsKsKsKuKvKuKuKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKqKTKIKLKKKLKKKKKKKKKKKKKKKKKLKIKKK?KKK-KFKGKGKFKJKKKLKGK?K>K?K?K?K?K?K=K<K>K?K>K?K?K>K>K>K>K?K>K<K<K=K<K<K5K,K/K3K5K5K5K5K4K3K3K/K-K-K!KKKK+K2K3K3K3K3K3K3K3K3K3K2K1K4K2K2K2K2K2K3K3K3K3K6KXKue]qM(KXK\K_K]K[K\K\KZK[K\K[KZKZKZKZKZK[KYKVKRKXK[KZKZKZKZKZKZKYKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[KZK[K\K\K\K[KZK[K\K]K[KZK[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K^K\K]K^K]K\K]K^K_K_K_K_K_K^K^K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`K`K_K_K_K_K_K`KaK`K_K_K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaK`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKbKbKdKaK`K`K_K`KaK`KZKJKCKLKWKKK-K%K:KSK`KcKdKdKeKeKeKfKfKgKfKfKgKeKdKcKbKaK^K\KTKQKUKWKXKXKUKUKXKVKRKJKPK;KJK`KRKVK`KSKTK`KEKaKuK\KpKoKYKZK\K;K%K)K'K#K"K!K$KKKKKKK(K&K'K(K#KKK"K-K-K*K+K(K"K K"K%K*K0K0K.K+K&K K!K#K2K4K2K1K2K.K,K*K+K.K/K.K0K/K,K#K!K$K5K6K6K5K8K0K-K-K/K2K7K8K8K9K3K!KK'K7K7K4K4K7K0K0K/K/K8K9K3K1K/K,K#KK(K,K*K,K,K7K3K4K6K6K8K,K'K)K)KKK!K&K'K'K+K9K7K7K9K<K@K2K>K,K$K#K$K$K$K"K%K'KKFKxKrKsKsKsKsKsKsKsKsKsKsKsKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKmKRKLKKKKKLKKKKKKKKKKKKKKKLKKKKKKK9KKK0KGKFKFKGKJKLKLKGK>K>K>K=K>K<K>K>K>K?K?K>K=K>K@K?K>K?K>K>K=K<K=K=K=K5K-K0K2K4K3K4K5K4K4K1K-K*K.K!KKKK.K3K2K3K3K3K3K3K3K3K2K2K2K3K3K2K1K1K3K3K3K3K1K2KVe]qN(KKnKYK[K^K^K]K]K]K]K[KZKZKZKZKZKZK\KZKWKYKZKZKZKZKZKZKZK[K[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[KZKZK[K]K\K]K[KZK\K\K\K[KZK\K]K\K\K\K\K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K_K_K\K^K`K]K\K\K]K_K_K_K_K_K`K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKbK`K_K_K`KbKaKaKaKaKbKaKaKaKaKaKaKaKaKbKaKaKaKaKaKaKaKaKaKaKaKaKaKaK`KbKbKbKbK_K_K_KaK`KZKLKEKLKUKJK.K"K/KNKdKgKeKeKeKeKeKeKdKcKdKcK_K_K^K]K\K\K\K]K`KbK]KUKRKWKWKXKVKSKUKTKMKJKKKDKAK_KWKYK^K[KTK`KPKLK]K`KpKkKZK[KZK4K&K)K(K"K K K%KKKKKK K(K%K%K'K!KKK!K*K*K*K*K&K K!K!K%K*K,K,K,K*K&K KK&K3K4K3K1K3K+K)K'K)K.K.K.K/K1K(K KK(K7K4K4K4K5K,K-K.K.K3K8K7K7K;K0K KK-K8K8K5K5K3K.K0K/K/K9K4K4K0K.K(KK K)K+K+K)K/K8K2K3K4K6K9K*K(K*K'KKK$K$K&K&K2K8K6K8K8K=K:K5K9K(K$K"K$K$K#K"K%K&K%KbKwKsKsKsKsKsKsKsKsKsKsKsKsKsKuKvKuKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKyKeKBKKKLKKKKKKKKKLKKKKKKKKKLKKKLKLK6KKK7KGKEKGKGKHKLKKKBK?K>K<K=K;K>K?K>K>K?K>K<K=K>K>K?K>K>K?K>K<K<K<K<K>K6K,K/K3K3K3K6K5K5K1K0K-K,K,K KKKK/K4K3K3K3K3K3K3K3K3K3K3K3K2K2K3K3K3K3K3K3K3K2K3e]qO(KKKKvK]K\K`K_K^K\K\K\K\K\KZKZKZK[K[K[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K[K[KZKZKZKZKZKZKZKZKZKZKZKZKZK[K[K[KZKZK\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K[K\K\K\K\K]K]K_K_K_K_K_K]K\K]K^K`K_K_K_K_K_K_K_K_K_K_K_K_K`K_K^K`K_K_K_K_K_K_K_K_K_K_K_K_K`K`KaKaKaKaKaKaKaK`K`K`K`K`K`K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKaKaKbKaK`K`K`K_K_K^KcK^KKKDKPKXKHK+KK1KPKbKfKeKdKdKeKbK`K_K_K]K]K_K`KaKfKjKmKnKnKnKmKlKkKeKWKTKVKXKWKVKTKNKUKWKQKIKNKOK\K]K]KXK]KUK]KWKGK]KcKrKhKZK[KWK/K'K)K'K"K K K"KKKKKK#K$K$K$K%K KKK%K)K'K'K+K$K K!K!K%K+K+K*K,K+K$KK K)K1K1K1K2K/K'K'K'K)K/K.K.K/K1K'K!K K,K6K5K4K5K2K-K.K-K/K5K5K6K8K:K)KK"K1K6K5K5K5K.K.K0K/K0K7K4K3K1K,K"K K"K)K+K+K,K3K2K2K5K6K:K8K)K(K+K#KK!K&K$K&K)K5K8K6K6K9K=K6K9K4K'K$K"K$K%K"K$K%K!K:KqKsKsKsKsKsKsKsKsKsKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKxKYKFKMKLKKKLKKKKKKKKKKKKKKKLKLKKKIK1KKK<KGKEKFKGKJKMKJKBK;K>K>K<K=K=K>K>K?K>K>K=K<K>K?K>K>K?K>K>K=K<K<K>K<K3K-K/K3K4K5K5K5K7K6K3K-K-K.K KKKK.K2K2K4K2K2K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K2e]qP(KKKKKKK`KZK^K^K]K\K]K]K[K[K[KZKZKZKZK[K[KZKZKZKZKZK[K[K[KZKZKZKZK[K[K[KZKZKZKZKZKZKZKZKZK[K[K[K[K[K[K[K[K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K^K]K\K\K\K\K]K_K_K_K_K_K]K]K\K^K`K_K_K_K_K_K_K_K_K_K_K_K_K`K_K^K`K_K_K_K_K_K_K_K_K_K`K`K_K_K_KaKbKaKaKaKaKaK`K_K`K`K`K`K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKbKbKbKbK`K]K`K_KaKbK[KNKHKPKQK?K&KK3KOKcKbKaK_K^K^K_K_KaKcKgKiKnKpKrKrKrKsKqKmKmKjKiKhKdKdKfKWKTKVKXKWKVKVKUKVKVKTKIKMKRK[K]K\KTK`KWKXK\KJK[KgKvKgKZK\KTK*K'K)K&K"K K!K!KKKKKK#K$K#K$K%K KKK&K)K(K(K(K#KK!K!K%K+K,K*K,K*K#KK K*K0K/K.K3K,K#K$K$K(K/K.K.K/K/K$K K%K1K3K4K5K6K1K-K.K-K0K6K4K6K8K7K#KK&K6K4K4K5K4K.K.K0K.K4K8K4K3K0K(K K K%K-K+K+K/K7K0K2K5K4K;K1K(K)K(KKK$K&K%K%K,K8K8K7K6K9K9K1K<K+K&K$K"K$K"K$K&K'K KTKvKrKsKsKsKsKsKsKtKtKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKuKYKJKKKLKKKLKLKKKLKLKKKKKKKKKLKLKIK,KKK>KEKFKFKFKLKKKIKBK>K>K=K=K=K>K?K>K>K?K>K<K>K?K>K>K>K>K?K=K=K=K=K>K>K5K,K/K4K5K5K5K8K6K4K2K0K/K/K!KKKK*K3K3K2K2K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3e]qQ(KKKKKKKKKdK\K`K_K]K\K\K\K]K[KZKZKZK\K\KZKZKZKZKZK\K\K]K[KZKZKZK[K[K[KZKZKZKZKZKZKZKZKZK\K]K\K\K\K\K\K\K\KZKZKZK[K]K[KZK\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K\K\K\K\K\K\K\K^K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaKaK_K_K_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKbKbKaK_K_K_K`KaK\KNKGKMKLK<K%K K3KLK_KcK`K^KcKiKjKnKqKrKuKwKtKsKrKnKlKjKgKfKfKeKfKfKeKfKfKfKgKZKSKTKVKVKVKVKVKVKVKUKKKMKPKXKYKZKTK^K]KVK`KTK[KhKtK`KYK^KLK$K)K)K%KKK KKKKKKK#K$K$K$K#KKKK&K(K)K)K(K KK!K!K&K+K+K*K+K'K"K K"K-K0K0K/K2K*K$K"K%K+K2K/K0K*K+K!KK&K3K3K5K4K5K/K-K.K-K2K8K4K5K9K3KKK+K6K1K4K6K0K-K.K.K,K6K8K3K2K0K(K KK(K+K.K,K.K5K3K3K2K5K8K+K'K)K&KKK%K&K%K&K0K6K7K8K8K8K5K3K6K$K"K$K#K$K"K$K&K$K,KlKuKsKsKsKsKsKsKsKuKvKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKtKWKJKLKJKKKLKKKKKKKLKKKKKKKLKKKLKEK$KK"KCKGKEKFKHKJKJKIK@K?K?K>K>K?K@K=K=K?K=K<K>K?K?K>K<K=K?K?K?K>K>K?K@K=K3K+K/K3K5K5K5K5K5K4K3K.K0K-KKKKK,K1K3K3K3K3K3K3K3K3K3K3K4K1K3K3K3K3K3K3K3e]qR(KKKKKKKKKKKlKZK[K_K^K\K\K\K[K[K[K\K\KZK[K[K[K[K\K\K\K[KZKZKZK[K[KZK[K[K[KZKZKZKZKZKZK[K\K\K\K\K\K\K\K\K[K[K[K\K]K[K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K]K\K\K\K\K\K]K]K]K]K\K^K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaKaK_K`K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKcKaKaKaKaK`KaKaKYKLKJKIKSKVKJKLKVK_KgKjKjKmKsKvKwKuKtKsKoKmKkKjKfKfKfKdKcKfKdKeKgKgKgKhKhKgKgKfKfK[KLKQKVKVKVKUKUKUKTKSKMKJKPKMK[K\KSK_KgKYKcKaK[KjKqK^KXK]KBK#K*K)K%KKKKKKKKKK#K#K#K$K KKKK'K(K(K)K(KKK!K K&K,K*K,K+K(K!KK#K.K0K.K/K2K'K#K"K%K,K0K.K/K,K(KKK(K3K2K3K4K2K-K.K-K.K3K8K5K4K;K.KKK1K7K4K8K7K/K-K.K-K/K:K9K5K5K0K#KK#K,K,K-K,K2K0K/K2K2K9K5K)K+K*K KKK$K&K&K*K4K4K7K8K6K<K2K9K0K$K"K#K$K$K"K%K'K!KGKwKsKsKsKsKsKsKtKtKtKuKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKxKoKQKKKKKLKLKKKLKLKKKLKKKKKKKKKKKLKBK!KK(KGKEKDKEKFKHKIKIK@K=K>K?K<K;K=K=K?K>K=K>K?K>K>K=K=K>K>K>K>K>K>K=K<K;K/K+K0K5K5K5K5K5K6K5K2K2K0K.K!KKKK,K4K3K3K3K3K3K3K3K3K3K3K2K3K3K3K3K3K3K3e]qS(KKKKKKKKKKKKKtK\K[K^KaK^K\K]K]K]K\KZK\K]K]K\KZKZKZKZKZKZKZKZKZKZK\K]K[KZKZKZKZKZKZKZKZK\K\K\K\K\K\K\K\K\K]K\K\K]K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K_K`K]K\K\K\K\K_K`K`K_K\K^K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KbKaK_KaKbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKbKbKbKaKaKbK`K_KbKcKdK]KOKIKQK\K_K`KhKnKqKpKpKoKqKrKqKmKkKhKdKdKeKeKeKeKcKdKgKeKdKeKeKfKfKeKeKfKdKbK_K^K[KYKPKLKQKUKUKVKTKRKRKRKSKPKFKLK7KZKbKXK]KjK]KdKcK]KkKpK]KYK[K=K%K)K(K#KKK KKKKKKK!K K!K%K KKK K(K%K&K)K'KKKK K&K,K*K+K*K%KKK%K/K.K*K.K-K$K K"K$K+K.K+K.K-K&K K!K.K3K2K0K6K0K-K.K+K/K5K5K5K5K:K(KK#K7K9K6K8K6K,K.K.K-K0K<K:K7K4K*K!KK$K.K.K*K+K4K0K/K1K3K9K0K*K+K*KKK"K%K&K%K.K3K2K7K8K6K8K2K8K(K!K K"K&K#K$K&K'K)K`KvKsKsKsKsKsKsKtKvKtKsKsKtKuKuKuKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKyKkKNKKKOKLKLKNKMKKKLKKKKKKKLKKKKKMKAKKK,KFKGKFKFKGKJKMKGK>K=K?K<K<K>K?K?K?K?K?K?K?K?K?K>K<K<K<K>K>K<K<K=K<K9K1K-K2K5K5K5K5K5K4K3K3K3K0K.K KKKK+K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3e]qT(KKKKKKKKKKKKKKK|K`K\K^K^K^K\K[K[K\K\K\K\K\KZKZKZKZKZK[K[KZKZKZK[K[K[KZKZK[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K[K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K^K\K\K\K\K\K^K]K_K_K^K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaK`K_K`K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKbKbKbKbKaK`K`K`KbK[KMKGKRKYKUKUKbKlKlKkKkKiKfKeKeKcKdKcKcKdKdKeKfKfKfKfKdKeKgKfKeKdKaK_K[K[K[K[K[K]K_K]K_KdK_KVKRKUKVKVKTKSKSKRKSKRKFKMK6KQKiK\KYKnKcK]KeK`KmKmKYK[KZK3K$K)K(KKKKKKKKKKK"K K"K$KKKK#K&K%K%K)K$KKKKK'K+K'K(K)K#KKK&K-K*K*K.K)K$K!K!K$K.K-K-K.K,K#KK"K0K1K1K2K3K+K)K+K+K/K2K1K2K5K5K!KK(K7K6K5K8K1K+K,K-K.K3K;K9K7K3K%KKK&K,K,K+K.K1K0K/K1K4K7K,K)K+K$KKK%K&K&K&K0K1K1K6K5K7K3K7K4K#K K K#K$K"K&K&K"K9KtKsKsKsKsKsKsKsKtKtKtKsKtKuKuKuKuKuKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKvKwKeKMKKKKKLKLKLKKKLKKKKKKKKKLKKKKKMK:KKK1KGKGKFKGKHKJKMKDK=K>K?K?K?K>K=K=K=K=K=K=K=K=K=K>K>K>K>K>K<K=K=K<K=K;K1K-K2K5K4K5K5K5K4K4K2K/K0K-K KKKK,K4K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3e]qU(K-KLKKKKKKKKKKKKKKKKdKZK_KaK]K]K\K\K\K\K\KZKZKZKZKZK\K\KZKZKZKZKZKZKZKZK[K]K\K]K]K\K\K\K\K\K\K\K\K\K\K\K\K\K[KZK\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K[K_K`K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKbK`K_K_K`KbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKbKaKbKaKaKcKcK^KOKFKPKZKJK,K*KAKXKbKbKaKbKcKdKdKdKeKeKeKeKeKeKeKeKdKaK_K`K`K^K^K_K^K_KaKdKhKkKlKoKnKnKmKiKhKfKYKTKUKVKVKTKSKSKRKRKSKFKLK=KBKiKcKXKjKhKXKcKeKqKjKYKZKXK,K%K)K)KKKKKKKKKKK K!K#K#KKKK#K&K%K&K'KKKKKK)K)K&K)K(K!KKK'K,K+K+K/K'K!K K K#K/K,K-K.K)KKK$K0K0K0K3K1K&K%K(K+K/K/K/K1K7K.KKK-K5K4K6K5K*K)K+K+K.K7K:K8K8K1K"KK!K+K+K+K+K.K,K.K/K0K3K4K)K)K(KKK"K&K&K%K(K.K/K4K5K4K:K2K=K-K!K!K!K$K"K"K&K&K#KTKxKrKsKsKsKsKsKsKsKsKsKsKuKvKuKuKvKtKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKxK\KJKLKKKKKKKLKKKKKKKKKKKKKLKKKKKLK4KKK6KHKFKGKFKIKNKLKEK<K>K?K?K>K<K<K<K<K<K<K<K<K=K?K?K?K?K>K<K<K<K=K<K>K<K0K,K2K5K5K5K5K5K5K2K/K/K/K.KKKKK+K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3e]qV(KUK?K/KCKKKKKKKKKKKKKKKKhK]K`KaK]K^K_K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K_K\K]K_K^K\K^K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`K`KaKaKaKaK`K`K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKbKaKaKcKaKaKbKaKaKbKcK[KPKLKRKVKDK,K&K9KTKaKcKeKeKdKdKdKdKdKdKdKcKbKbKbK^K]K^K_K\K^KcKfKiKlKnKrKtKsKrKnKmKmKkKhKgKgKfKeKgK[KRKRKUKVKTKRKSKRKRKUKMKKKGK9KhKgKWKaKkKUKcKgKqKfKYK\KSK'K&K'K&KKKKKKKKKKK K K!K KKKK#K$K$K$K%KKKKKK)K*K)K)K$KKKK'K)K(K+K/K$KKKK(K-K+K+K,K(KKK&K/K.K.K3K-K#K&K(K)K/K1K/K2K7K(KK"K2K3K4K7K1K(K)K)K)K.K8K:K9K7K.KKK"K+K,K*K+K/K*K-K0K/K4K.K(K)K'KKK$K&K'K'K+K.K.K1K4K5K5K2K;K%K!K K!K$K!K!K&K$K.KmKtKsKsKsKsKsKsKsKsKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKxKwKuKqKVKKKLKLKJKKKLKKKLKKKKKKKKKLKKKMKKK0KKK8KHKDKDKGKKKLKLKCK=K?K;K>K>K=K<K=K=K<K<K<K=K=K>K?K=K=K<K<K<K<K=K=K=K:K0K-K1K3K5K5K5K5K4K5K3K/K0K/K KKKK-K4K3K3K3K3K3K3K3K3K3K3K3K3K3K3e]qW(K[K[KTKAK0K6KmKKKKKKKKKKKKKKKrK]K]KaKaK^K\K]K]K]K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K]K\K^K_K]K]K`K^K\K_K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKaKbKaK_K`K_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKbKbKaKaKcK`KaKaKcKdK]KPKIKSKVKDK+K#K9KXKeKgKcKbKdKdKdKdKbK^K^K^K_K`KbKcKgKjKlKnKpKqKuKvKtKpKoKmKjKiKgKfKeKfKgKfKfKfKfKfKfKhK`KSKQKTKVKTKRKSKRK[K[KSKJKOK8KcKjKYK\KiKPKaKhKsKcKXK]KNK%K'K&K%KKKKK KKKKK K!K K!KKKKK#K#K#K#K"KKKKKK(K*K*K)K$KKK K(K)K(K,K)KK!KKK)K-K*K*K+K$KKK)K.K-K.K0K)K#K$K$K(K1K4K3K3K3K#KK$K4K3K4K6K-K'K(K(K)K2K:K:K9K7K(KKK'K-K+K*K.K-K*K+K+K-K5K,K)K*K%KKK#K&K%K&K/K-K-K0K0K2K1K9K2K!KKK"K$K"K%K'K!KCKxKrKsKsKsKsKsKsKsKsKsKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKvKuKvKxKxKuKxKoKUKKKLKKKKKMKKKLKKKLKLKKKKKLKJKLKIK*KKK<KEKCKCKGKKKLKMKDK;K:K>K?K>K=K=K<K=K=K<K<K<K>K>K=K<K=K<K<K<K<K<K<K<K:K0K,K/K4K5K4K4K5K6K4K0K0K/K/K KKKK+K2K1K3K3K3K3K3K3K3K3K3K3K3K3e]qX(KKKKKKgKFK=K_KKKKKKKKKKKKKKKyK_K^KaKbK`K_K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K`K^K\K\K\K\K]K_K_K_K_K_K_K_K_K_K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKbKbKaKaKbKdKdKeK]KNKIKSKUKBK#KK9KWKfKdKbKaK_K]K^KaKbKbKgKiKjKnKqKsKuKuKwKwKsKqKkKjKkKgKfKfKeKfKdKcKfKfKfKfKgKgKgKgKgKgKgKgK`KOKQKUKVKTKRKRKMKUKZK]KKKSK9KQKcK\KVKkK[K[KiKpK_KYK]KHK&K)K'K$KKKKK KKKKKK!K K"KKKKK KKK KKKKKK!K(K)K(K'K!KKK!K'K'K'K)K%KKKKK)K*K+K+K&KKKK(K)K+K,K,K%K$K"K#K*K4K1K4K6K1KKK(K4K4K3K5K)K#K$K$K(K4K9K:K:K3K!KKK*K/K+K+K1K*K(K*K*K/K2K(K*K)KKK K%K&K%K)K.K*K+K/K0K3K0K<K*K!K K!K$K K#K&K%K!K_KvKsKtKsKsKsKsKsKsKtKuKuKuKuKuKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKyKvKuKuKvKyKwKuKlKQKLKOKNKLKKKLKKKKKKKLKKKKKKKKKMKFK$KKKAKHKGKDKFKJKLKJK@K>K?K?K>K?K=K<K>K>K<K<K<K=K=K=K<K<K<K<K<K<K=K<K=K=K8K0K-K/K3K3K3K5K5K4K3K2K/K-K+KKKKK,K1K3K3K3K3K3K3K3K3K3K3K3K3e]qY(KKKKKKKKKKKKKKKKKKKKKKKKKKKcK]K_KaK^K]K^K]K\K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K^K`K^K\K]K]K]K^K_K_K_K_K_K_K_K_K_K^K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`K`K`K`K`K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKbK`KcKdKaKKpKLKGKMKNK9K#KK2KQKaKcK`K`KcKdKiKmKpKvKwKyKzKxKwKuKpKlKjKiKfKfKeKeKcKdKeKdKeKeKfKfKeKeKgKgKhKhKfKeKdKcKaK\KZKYKRKNKQKSKVKTKRKRKNKQK^KcKNKPKCKEKbK^KWKbK`KUKfKlK[KZK^K=K#K*K'K!KKKKK KKKKKK K K"KKKKKKKKKKKKKK#K)K(K(K(KKKKK$K#K"K&K%KKKK"K)K(K*K*K'KKKK&K'K'K)K)K"K$K!K!K+K1K1K4K5K)KKK,K0K/K0K.K&K%K%K$K)K7K;K9K:K.KKKK*K-K*K,K,K'K(K)K*K0K+K'K)K#KKK!K$K&K%K*K+K*K*K-K2K1K2K9K%K!K!K"K$K!K#K'K"K1KsKrKsKtKsKsKsKsKsKsKtKvKuKuKuKuKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKxKwKvKvKwKxKyKwKwKeKOKKKMKLKKKLKLKLKLKKKLKKKLKKKMKNKAK KK#KDKFKDKDKHKKKLKJK@K<K>K?K?K=K<K>K>K<K<K<K<K<K<K=K=K=K<K<K<K<K<K=K<K<K:K1K+K/K2K3K5K5K4K3K3K1K.K.K.KK K KK-K3K3K2K2K3K3K3K3K3K3K3K3e]qZ(KKKQKVK\KfKtKKKKKKKKKKKKKKKKKKKKKKKeK[K`K`K`K]K]K]K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K_K`K`K^K\K_K`K_K`K_K_K_K_K_K_K_K`K^K\K^K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaKbKbKaKbK`K_KbKaKaKaKaKaKaKaKaKaKaKaKbKbKbKbKbKbKaKaKbKcKfKkK^KKWKAKVKVKJKGKRK\KfKiKgKlKpKuKxKyKxKxKuKqKoKkKiKfKdKeKcKcKcKcKdKdKdKfKhKfKeKfKfKfKfKfKdKbK`K_K]K[K[KZKYK\K^K\K`KbKbK\KPKQKVKTKSKQKSK`KbKaKTKOKQK=KaK_KZK_KbKOKZKeKZKZK\K5K$K*K(KKKKKK K K KKKKKK"KKKKKKKKKKKKKK#K$K%K&K%KKKKK!K!K K$K KKKK"K)K)K(K(K$KKKK#K$K$K(K&K K%K!KK+K.K.K2K1K KKK*K)K)K,K(K"K#K$K#K*K8K9K:K8K'KKK!K+K+K)K,K'K$K&K&K*K3K)K'K(K!KKK$K$K&K'K,K*K(K)K+K2K,K9K/K!KK"K$K#K"K%K'K KRKwKsKsKsKsKsKsKsKsKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKxKyKyKxKxKxKxKwKuK`KLKKKLKLKKKKKNKMKKKLKLKKKLKNKMKMK=KKK(KFKGKGKEKFKKKLKGK>K;K=K?K=K<K<K<K<K<K<K<K=K<K=K?K>K<K=K<K<K<K<K<K<K<K9K.K*K,K1K5K6K4K5K5K3K0K0K.K,KKKK1K0K/K3K3K2K3K3K3K3K2K2K2e]q[(K^K]K]K[KXKQKNKLKHKKKOKVK`KaKKKKKKKKKKKKKKKKKlK[K_K_K^K]K\K^K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K^K`K_K^K_K`K`K^K_K_K_K_K_K_K_K_K_K^K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaK`K_K_K`KaKaKaKaKaK`K_K`KaKaKaKaKaKaKaKaKaKaKaKbKbKbKaK`K`KbKdKbKjKgKhK[KcKPKUKKKIK_KgKlKlKlKmKmKmKnKmKjKhKgKdKbKcKcKcKeKdKdKeKdKeKfKfKfKeKdKeKdKdKbK_K\K]K\KZKZK^K^K`KcKgKjKmKpKqKnKmKjKiKjK\KPKPKTKSKRKRKTK]K_K_K\KLKWK=K`K_K[K`KaK]KTK]K[K[KZK0K%K'K&KKKKKK K K KKKKKKKKKKKKKKKKKKKK#K$K%K'K"KKKKKKKK!KKKKK&K+K'K'K(KKKKKKKK#K"KK"K K#K/K+K,K1K0KKKK K!K"K#K#K"K!K#K#K-K8K:K:K6KKKK&K+K)K)K,K#K#K%K$K+K2K+K(K'KKKK"K"K K$K*K(K(K)K+K0K,K;K&K K K!K$K#K#K(K$K,KlKvKsKsKsKsKsKsKsKsKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKwKwKwKxKxKxKxKxKxKxKxKyKtKYKLKLKKKLKMKLKLKKKLKKKKKLKLKLKKKLK6KKK.KHKFKEKEKJKKKJKGK?K<K>K>K>K=K<K<K<K<K<K<K<K=K?K>K<K=K<K<K<K<K<K<K<K=K8K/K)K/K4K5K6K5K5K3K1K2K/K.K,K#K&K+K-K0K3K3K2K3K3K3K3K2K2K3e]q\(K]K_KaKbKbK`KaKcK`K^K^K]KZK]KKKKKKKKKKKKKKKKKKKwK`K\K`KaK`K^K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K_K_K`K`K_K^K_K_K_K_K_K_K_K_K_K_K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KbK`K_K_K`KaKaKaKaKaK`K_K_K`KbKaKaKaKaKaKaKaKaKaKbKaKbK`K^KaKbKdK`KkK_K[K\K]KNK]K9K8K\KcKeKeKeKdKcKaKaKbKcKdKdKeKeKeKeKeKeKeKeKeKeKdKcKcK`K^K\K[KZK]K_K`KbKfKlKlKpKtKtKsKrKpKoKoKnKkKhKhKgKfKgK_KQKOKRKRKRKSKRKTKXK[K\KJKWK@KYKeK[K_K`KfKWKTK\K[KWK-K%K&K'KK KKK K K K KKKKKKKKKKKKKKKKKKKK$K#K!K%KKKKKKKKKKKKKK(K(K%K&K&KKKKKKKKKKK!KK$K-K)K+K0K(KKKKKKKKK K K K$K.K3K9K7K/KKKK$K"K!K"K%K"K%K$K#K,K.K(K(K K KKK"K%K$K&K$K%K%K'K,K/K5K4K K!K!K!K#K$K'K$K!KDKwKsKsKsKsKsKsKsKsKsKsKtKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKyKyKyKxKxKxKxKxKxKxKyKxKxKrKUKKKLKNKMKKKKKLKKKKKKKKKKKKKLKJKIK2KKK3KEKEKGKGKHKHKIKGK=K<K>K?K=K<K<K<K<K<K=K<K=K?K>K<K=K<K<K<K<K<K<K=K<K<K9K-K*K/K4K4K5K4K3K3K3K1K/K-K+K)K)K-K2K3K2K3K3K3K3K3K2K2K2e]q](KVKWKXKZK[K^K`K`KaKaKcKdKaKKKKKKKKKKKKKKKKKKKKKKKbK_KaK`K`K]K]K_K^K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K_K^K\K\K\K\K\K\K\K\K\K\K\K\K\K]K_K_K_K^K\K]KaK_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KbK`K_K_K_K`K`K`KaKaKaKaKaKaKaKaKaKaKaKbKbKbKbKaKbKbKaKbKcKeK`KQKQK[K]KcKdKcKQKZK?KMKfKaKaKaKaKdKdKeKdKdKeKeKdKdKdKdKcKdKaK`K]K]K_KaKcKbKeKiKmKoKuKvKvKvKtKsKqKoKlKjKjKiKfKeKfKfKfKfKfKfKfKfKhKeKUKPKSKSKRKSKRKSKWKZK]KQKRKJKUKkK[KZK_KaKK]KYKZKNK'K%K&K%KKKKK K K K KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK&K#K#K%K KKKKKKKKKKKKK&K*K(K'K.K!KKKKKKKKKKKK#K0K3K6K8K(KKKKKKKK!K!K!K#K%K,K,K%K(KK KK"K$K#K$K(K)K%K#K#K)K%K8K)KKKK!K$K$K&K%K&K^KwKsKsKsKsKsKsKsKsKtKuKuKuKuKuKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKxKvKuKwKxKuKvKyKwKvKxKxKvKwKyKxKxKxKxKxKxKkKOKKKNKNKMKMKKKLKKKKKKKLKLKLKKKKKJK.KKK9KHKEKFKFKIKKKKKGK=K>K?K>K>K>K>K>K=K<K>K>K?K>K<K<K<K<K=K>K=K:K<K=K<K=K9K,K*K1K3K3K3K4K4K4K3K2K/K0K/K)K+K/K3K3K3K3K3K3K3K3K3K2e]q^(KKKK}KrKjKeK^K[K[KZK[K[KKKKKKKKKKKKKKKKKKKKKKKKKcK]KbKcKaK`K_K^K]K]K]K]K]K]K\K\K\K\K\K\K\K\K\K\K]K]K_K_K\K]K]K]K]K\K\K]K]K]K\K\K\K]K_K_K`K]K\K^K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaK`K_K`K`K_K_K_KaKaKaKaKaKaKaKaKaKaKaKbKbKbKbKbKaK`KbKdK`KUKNKMKTKcKfKeKbKcKRKXKGKPKeKaKbKbKcKeKeKeKdKcKcKbK_K_K`K`K`KdKgKhKjKoKsKwKxKxKyKvKuKsKpKmKlKjKfKfKfKeKdKdKeKeKfKfKfKgKgKgKhKhKhKhKiKgKWKLKRKSKRKSKRKUK^K^K_KXKOKXKZKiK_KYKcKbKKmKWK\KHK$K%K'K$KK K K K +K K K KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK$KKKKKKKKKKKKKKKKK(K#K#K#K!KKKKKKKKKKKKK&K,K0K2K1KKKKKKKKKK K K"K'K,K'K#K$KKKKKKK K#K#K'K(K'K,K'K3K"KKKK"K#K$K&K$K7KtKuKsKsKsKsKsKsKtKsKtKuKuKuKuKuKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKyKwKuKxKxKuKvKyKwKuKxKxKuKvKyKxKxKxKxKyKwKyKgKNKLKNKNKMKKKLKKKKKKKKKKKKKLKKKLKGK'KKK=KHKHKFKFKIKLKLKGK>K?K?K?K?K>K?K=K=K>K?K>K=K<K=K=K<K=K?K>K>K=K<K=K=K?K7K,K.K4K3K3K5K5K5K4K3K0K0K.K,K+K*K/K2K3K2K2K3K3K3K3K3e]q_(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKhK]K`KbKaK`K_K_K`K_K_K^K\K\K\K\K\K\K\K\K\K\K]K]K\K\K\K^K`K_K_K\K]K_K_K_K]K\K\K]K_K_K_K_K_K_K`K^K_K_K_K_K_K_K_K_K_K_K_K_K_K`K^K_K`K_K_K_K_K_K_K_K_K_K_K_K_K_KaKbKaKaK_K`KaKaKaKaKaKaKaKaKaKaKbKbKaK`KaKbKaKcKdK_KUKNKRKXKXK[KeKdKbKbKcKRKXKNKPKhKbKbKaK_K`K`KaK_KcKgKkKkKoKsKuKwKxK{KzKvKsKrKpKlKkKhKfKfKeKeKeKfKfKfKfKfKgKgKgKgKgKgKgKgKhKhKfKhKdKaK`K]KYKKKIKPKRKSKSKQKZK_K\K]KZKOKWKWKeKbK\KaKjK^KYK]K]K@K#K'K'K K K +K K K K K KKKKKKKK K KKK KKK K K KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK"KKKKKKKKKKKKKKKKK$K!K!K"KKKKKKKKKKKKK K&K&K!K#K"KKKKKKK K#K!K%K'K*K,K,K,KKKK K!K!K%K&K"KSKxKsKsKsKsKsKsKuKuKtKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKxKxKuKvKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxK`KLKLKKKLKKKKKLKKKKKKKKKKKKKKKKKMKEK#KKK@KHKFKFKFKIKLKHKAK?K?K>K?K>K?K>K?K?K>K<K=K<K>K?K=K=K?K?K?K=K<K>K>K=K>K8K,K-K1K4K5K5K5K3K2K3K2K-K/K-K*K/K/K1K0K1K3K3K3K3K3e]q`(KMKSK[KeKqKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKqK]K_K`KaK_K^K^K_K_K]K]K]K]K]K\K]K]K]K]K\K\K]K]K]K^K`K_K_K]K^K_K_K_K^K]K]K^K_K_K_K_K_K_K`K^K_K_K_K_K_K_K_K_K_K_K_K_K_K`K^K_K`K_K_K_K_K_K_K_K_K_K_K_K`K`K`KaKaKaK`KaKaKaKaKaKaKaKaKaKaKbKaKaKcKbKbKdKdKbKVKPKVKUKSKRKXK`KcKcKbKaKdKTKPKRKXK^K^KaKdKfKjKmKpKtKvKvKwKwKuKrKoKlKkKiKgKeKeKdKcKdKdKcKfKgKfKfKfKfKfKfKhKgKgKfKfKeKcKbK_K\K\K\KYKWKYKVKWK[KZKWKOKOKRKSKRKSK\K\K\K]K]KNKUKUKaKfK]KVKiK\KQK\K[K8K!K%K%KKK +KKKKK KKKKKKKKK K K +K K K +K K KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK K%KKKKKKKKKKKKKK K"K)K(K,K#KKKK K!K#K%K&K,KkKtKsKsKsKsKtKtKuKuKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKwKuKvKyKxKxKxKxKwKxKxKxKxKxKxKxKxKxKxKuKZKLKMKLKLKLKKKLKKKKKKKKKKKKKLKKKLK@KKK#KCKFKFKFKFKHKJKJKBK>K?K>K=K>K>K>K>K>K<K=K<K=K>K=K=K?K>K?K=K<K>K>K<K=K>K7K*K/K4K4K4K5K4K3K2K1K/K.K-K,K,K/K1K/K0K3K3K3K3K3e]qa(K\KZKXKTKOKMKKKHKGKLKRKRKKKKKKKKKKKKKK[KKKKKKKKKKKKKKKKKKzK^K_KaK_K_K_K_K`K`K`K`K_K\K]K`K`K`K`K`K`K`K`K`K_K_K_K`K`K_K_K_K_K`K`K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KbK`K_K_K_KaKaKaKaKaKbKaKbKbKbKaKaKcKdKeKeKcKaKXKPKVKZKHKHKTKGKVK`KaKaKbKaKdKVKQKQK?KRKbKiKkKlKoKoKnKlKlKjKfKdKdKcKcKcKcKdKdKdKeKeKeKeKeKeKfKhKgKfKdKbK`K_K^K]K]K\KZKYKZKZK]K`KbKdKhKhKhKgKeKhKjKbKQKOKRKSKQKTKVKZK^KZKSKNKQKUKZKdK^KWKcKcKRKZKZK2K$K$K%KKKKKKKKK KKKKKKKK +K +K +K +K +KK K K KKKKKKKKK K K K K K KKKKKKKKKKKK KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK K%K&K+K KKKK K!K%K'K"KBKwKsKsKsKsKsKtKvKuKuKuKuKuKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKyKxKxKxKxKuKvKyKxKxKxKxKxKxKxKxKzKrKTKMKMKNKMKKKLKKKKKKKKKKKKKKKKKKKMK=KKK(KEKFKFKFKHKIKJKHKBK>K=K;K<K=K<K?K>K<K<K<K<K<K<K=K?K>K?K=K<K<K<K=K<K=K<K5K/K0K3K4K5K5K5K2K2K1K-K.K-K-K.K-K,K1K3K3K3K3K3e]qb(K_K_K`KaKbKbKbKaK`KaK`KhKKKKKKKKKKKKKK^KaKKKKZKKKKKKKKKKKKKKKKcK_KbKbK`K_K^K]K_K_K^K^K_K_K_K^K^K_K_K]K^K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`K`K`KaK`KaKaKaKaKaKaKbKbKbKbKaKcKcKeKhKfKbKXKPKVK_KNK2K@KYKPKRK_K\KaKbKbKaKdKVKUKNK/K]KeKeKeKbKcKcKbKcKcKeKdKdKfKeKeKfKeKdKfKfKdKdKeKdKaKbK`K_K[K[K[K[K\K^K`KdKfKhKlKmKmKlKnKnKnKnKkKjKhKhKfKeKfKdKTKNKPKRKQKRKQKUKXK\KPKQKQKRK?KdK_KXKZKYKRKZKWK)K#K%K$KK K K K K +K +K K K +K K K K KKK K K K K K K K K K KKKKKKKKK K K K K KKKKKKKKKKKK KK K K K KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK!K K%K#KKKKK K#K%K'K#K^KvKsKsKsKsKsKtKvKuKtKuKuKuKuKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKwKvKuKuKvKyKxKxKxKxKwKwKxKxKxKxKxKxKxKxKxKxKzKkKPKLKNKLKKKLKKKKKKKKKLKLKKKLKKKMKNK6KKK-KFKGKFKEKFKGKKKIK@K<K<K<K=K<K=K=K<K=K>K=K<K<K=K?K>K?K=K<K>K=K<K=K<K<K=K6K,K/K3K4K4K4K5K5K2K0K/K.K/K+K*K,K1K1K0K1K3K0e]qc(KZK\K]K_K`K`K`KaKaKdKaKKKKKKKKKKKKKKyKbKfKaKKKKJKGKKKKKKKKKKKKKKKKgK_KaKdKaK_K_K_K_K_K_K_K`K]K\K_K_K\K^K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKbKbKbKaKaKaKaKaKbKbKaKcKeKdKeKfKcKXKPKXK^KRK6K*KHK]KRKWKcK^K\KaKbKbKaKdKWKQKRK@KhKdKdKdKeKeKeKeKeKeKeKeKeKeKeKdKcKaK`K_K^K[K^K_K^K`KbKcKeKgKlKpKtKrKrKtKrKqKpKnKkKjKhKhKgKhKhKfKfKfKfKgKgKgKhKVKMKNKQKPKQKQKTKRK\KUKQKLKVK:K_K_KWKTKdK[KRKPK%K#K%K$KK K K K K K KK K KKKK KKKKKKKKKKKKKKKKKKKKKKKKKKKKK KKKKKKKK K K K K K KKKKKKKKKKKKK K KKK KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK(KKKKKK K%K&K#K6KqKtKsKtKsKsKsKtKvKtKsKuKuKuKuKsKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKyKvKuKuKvKyKxKxKxKxKyKyKxKxKxKxKxKxKxKxKxKxKwKxKfKMKNKMKKKLKLKLKLKLKKKKKLKLKKKLKKKKK2KKK1KHKFKEKGKFKIKKKHK?K<K=K<K<K<K<K<K=K?K=K<K<K=K?K>K?K=K<K>K>K<K>K<K;K=K<K4K+K0K3K2K3K4K4K3K3K0K-K.K.K.K.K.K0K0K/K0K/e]qd(K~KtKnKeK^K\KZK[K[K]K]KKKKKKKKKKKKKKfKbKcKcKbKxKKKjK8KBKtKKKKKKKKKKKKKKKlK]K`KbKbKaK`K`K`K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`K_K^K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKaK_K`KaK`K_KaKaKaKaKaKaKaK`K`KbKbKbKaKbKaKbKdKdKeKfKeKYKSKWKaKUK8K/K<KWK^KYK\KaKcK]KZKbKbKbKaKbKXKNKVKCKdKdKeKdKdKdKdKdKdKaK_KaKaK^K\K\KaKbKaKeKiKmKqKtKtKuKvKvKuKsKqKoKoKkKiKhKgKgKfKfKfKfKfKgKgKfKfKgKfKfKfKfKgKhKiK\KOKLKQKOKNKPKRKMKYKTKNKMKUK=KUKdKVKQK^KXKLKKK!K#K&K%KKK K K K K KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK K +K KKKKKKKKKKKKKKKKKKKKKKK$K#KKKKKK#K&K&K"KOKvKsKsKsKsKsKsKtKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKvKuKuKuKvKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKwK_KMKMKMKLKKKKKKKKKKKNKMKKKLKKKKKKKIK+KKK7KHKGKDKCKGKHKKKGK@K<K<K=K>K>K>K>K?K=K<K>K>K=K=K=K>K>K>K>K<K=K<K<K<K=K<K4K-K3K4K3K4K5K4K4K4K1K,K/K/K,K+K-K2K2K/K/e]qe(KKKKKKKKKKKKKKKKKKKKKKKKKKdK_KbKcKdKoKKKKOK?K7KbKKKKKKKKKKKKKKKtK_K^KcKdKbK_K_K_K_K`K_K_K`K_K_K_K_K_K_K_K_K`K_K^K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaKaK_K`KbK`K_KaKbKaKaKaKaKaK`K`KaKaKaKbKbKaKbKeKgKdKZKUKVK_KVK3K.K>KZKdK]KWK]KaKaKaK_K]KaKbKbK`KaKYKNKTKFKcKcKdK_K^K^K_K_K`KbKcKeKlKnKqKsKvKyKxKxKvKvKrKqKnKlKhKfKeKdKdKeKeKdKeKfKfKfKfKfKgKfKfKgKgKgKgKhKhKhKhKhKfKeKeKYKEKFKMKNKNKPKQKNKRKSKJKIKQKEKHKfKVKNKVKZKPK>KK$K'K KK K KK K K KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK%KKKKKKK$K&K$K*KiKvKsKsKsKtKtKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKvKuKuKuKuKuKuKuKvKvKvKvKuKuKvKvKvKuKvKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKtK[KMKNKLKKKLKLKKKMKMKKKLKKKKKLKJKKKHK&KKK>KJKGKGKEKGKJKKKGK?K<K<K>K?K?K?K?K=K=K>K>K<K<K<K>K?K?K>K<K<K=K=K=K<K=K=K3K,K1K3K4K5K5K5K5K4K0K/K/K/K-K,K2K3K2K1e]qf(KhKrKKKKKKKKKKKKKKKKKKKKKKKKKhK\KcKaKdKjKKKK^KYK@K3K^KKKKKKKKKKKKKKKyK`K_KbKaK`K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKbK`K_K_K`KbKaKaKaKaKaKaKaKaKbKbKaK`KaKcKdKdKbKbKcK[KSKTKYK`KVK9K<KXKdKbK\KYK]KbKaKcKWKQK]KaKcKbK`K`K^KNKSKSK^K]KaKdKdKhKlKnKsKuKvKwKuKtKqKoKmKlKjKhKfKeKeKfKeKeKfKfKfKfKfKfKfKgKgKgKgKgKfKfKiKfKgKgKeKbKaK^K]K\KZKXKVKRKPKHKBKJKNKQKQKNKNKLKKKRKMKMKLKOK?KgK[KMKUK_KPK.K#K%K%KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK!K%K'K#K@KwKtKsKsKsKuKuKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKyKvKuKuKuKuKuKuKxKyKyKwKuKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKrKWKJKKKLKMKNKNKMKKKMKMKJKJKLKLKKKMKEK"KKK@KIKHKCKFKGKHKLKFK?K<K>K>K>K>K>K?K>K=K=K<K=K?K>K>K?K>K<K<K<K<K<K<K<K=K=K0K*K0K3K3K5K5K5K5K5K1K.K-K.K-K.K.K3K3e]qg(KSKQKLKHKHKGKGKLKPKSKKKKKKKKKKKKKKWKsKKKuK[KcKaKdKdKKKKbKcK]KIKPKnKKKKKKKKKKKKKKKKcK^KdKeKbK_K`K`K`K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`K`K_K_K_K_K_K`KaK`K`K`K`KaKaKaKaKaKaKaKaKaK`KaKbKdKeKfKeKfKeKYKSKXK\K\K\KYKRKYKdKcK^K\K]K`KbKaKbK_KMKTK]KaKbKbKaKaK_KMKWK>KKK`KfKjKjKmKoKmKjKjKhKfKeKdKcKcKcKdKfKdKfKgKgKgKgKfKfKfKfKgKgKgKhKgKfKfKdKdKaK_K]KZKYKXKUKTKUKTKUKWKWKWKPKJKMKOKOKQKMKLKNKNKOKKKKKRKUKTKEKSK;K`K`KQKQKYKFKK#K K"KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK!K%K'K&KZKwKsKsKsKsKuKuKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKxKvKuKuKuKuKuKuKwKxKxKwKvKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKnKQKKKLKMKMKNKLKKKMKMKMKMKLKKKLKKKNK@KKK!KCKJKGKFKFKGKKKMKGK>K=K?K>K>K>K>K?K=K<K<K=K>K>K?K>K>K=K=K<K=K=K=K=K<K=K<K/K+K1K3K5K5K4K4K4K1K/K/K.K.K-K,K1K2e]qh(KbKbKcKbKaKbKbK_K\KiKKKKKKKKKKKKKK`KcKkKKKK[KcKcKeKdKKKKgKaKgK`KeK}KKKKKKKKKKKKKKKKKhK_KeKdKbKbKbK`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKbK`K_K_K_K_K_K_KaKbKbKaK_KaKbKbKbKbKbKbKaKaKcKdKgKfKfKeK\KQKYK\KRKYK^K^K^KbKcKaK_K\K`KbKaKbKaKYKUK[KaK^KbKbKbKbKbK`KLKXK/KPKeKdKdKdKdKcKcKcKcKdKdKdKdKeKeKdKfKgKgKgKhKfKeKeKeKcKcKcKaK_K_K^K\K[KWKXKYKYKZK[K[KZK[K[K\K\K_K`KaK`KYKQKPKRKQKQKPKLKPKQKKK;K?KPKPKQKUKGKSK?KRKaKPKKK\KGKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK$K&K'K2KoKtKsKsKsKsKuKuKsKtKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKfKNKLKKKLKNKLKKKKKKKKKKKKKKKLKKKKKLK:KKK0KGKJKFKEKEKIKHKKKBK=K?K>K>K?K=K?K=K<K=K<K<K>K?K>K?K?K=K<K>K?K=K<K=K<K=K;K1K+K0K3K3K3K2K3K2K3K4K/K-K.K-K+K/e]qi(K`K`K`KaKbKcKcKcKaKKKKKKKKKKKKKKK]KfKcKgKKKK[KaKcKcKdK|KKKtK[KgKeKeKuKKKKKKKKKKKKKKKKKKnK^K`KdKcKaKaKaK`KaK`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaK`K_K`K`K_K_K_K_K_K`KaK`K`K`K`K`KaKaKbKbKbKaKaKeKfKfKfKgKeK^KVKWKbKRKCKTK`KaKbKaKbKbK`K[K_KbKcKcKbK[KWK\KbKbK]K`KbKbKbKbKbKMKWK?K^KeKcKdKeKeKeKeKeKeKfKfKeKfKfKfKfKfKcKdKdKaK^K[K\K\K]K]K^K^K]K`KcKeKfKfKfKdKfKfKeKeKdKeKfKdKdKeKdK\KSKQKPKPKOKOKOKQKOKHK<K1K&K7KNKMKOKTKIKMKDKEKdKRKIKTK?KKKKKKKKKKKKKKKKKKKKKK K K!K!K K!K!K#K#K#K#K#K!K!K!K!K!K!K!K!K!KKK KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK$K(K'K$KLKvKsKtKsKsKsKtKtKsKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKwKuKvKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKaKKKLKMKNKMKKKLKMKLKKKKKLKKKLKKKKKLK4KK$K<KKKJKGKFKEKFKJKJKCK=K>K?K?K>K?K>K=K<K=K>K>K?K>K?K?K=K>K?K>K>K=K<K=K<K<K<K-K+K0K1K3K4K4K5K5K3K1K1K/K.K-K+e]qj(KXKYKZKZK\K]K^KbKbKKKKKKKKKKKKKKqK_KfKeKcKdKKKK^K_K`KbKgKsKKKK[KeKaKeKkKKKK`KKKKKKKKKKKKKKKtK`KaKdKbKbKaKbKaK_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KbKaK_K_K_K_K_K_K_K_KaKbK_K_K_K`KbKaKaKaKaKaKbKeKgKfKgKgK`KUKXKbKZK?K:KXKaK_KaKbKaKbK`K]K`KaKbKbKaK\KZK]KaKbK_KVK^KbKbKbKbKbKOKYKDK\KfKdKeKeKeKeKeKeKdKcKcK`K`K_K^K^K\K\K^K^K_KcKgKiKlKmKpKrKqKqKoKmKjKiKiKhKgKgKfKdKeKgKgKhKgKdK]KVKPKPKQKQKPKMKNKMKHK<K/K-K-K.K0KJKOKOKKKDKFKMK=KcKVKIKMKFK/KKKKKKKKKKKKKKKKKKKKKKK!K"K!K!K!K#K$K$K$K$K$K$K$K$K$K$K$K$K$K$K%K$K$K$K$K%K&K&K'K%K#K&K&K&K%K#K#K$K$K$K%K$K"K"K!K KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK(K)K&K*KfKuKsKtKsKsKsKsKsKsKsKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKxKxKuKvKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKvKZKLKNKNKLKKKMKNKLKLKLKKKKKKKLKLKKKIK/K K2K@KLKLKFKCKFKHKIKGK@K>K?K>K?K>K?K>K<K=K?K?K>K>K>K?K>K?K?K>K?K>K<K<K=K<K=K9K.K+K/K2K5K5K5K4K3K3K3K1K0K/K+e]qk(KKKKKK}KwKoKvKKKKKKKKKKKKKKKmK`KdKcKcKaKKKKeK^KbKaKfKnKKKK[KbKbKeKgKKKK>KSKKKKKKKKKKKKKKKzKaK_KdKeKbKaKaKaKaKaKaKaKaK_K_K_K_K_K_K_K_K_K_K`KaK_K`KaKaKaK`K_K_K_K_K_K_KaKaKaKaKaKaKbK_K`KaKcKdKeKgKiKgKbKZK\KcK\KCK1K?K]KaK_KaKaKbKcK`K\K_KbKbKcKaK[K[K^KaKcKcK^K\K_KaK`K`K`KbKPKWKFKYKeKaKcK`K_K]K]K_KaKaKcKcKdKhKkKoKqKsKvKsKrKsKsKqKoKnKlKjKjKjKhKfKfKfKfKfKfKgKfKfKfKiKhKbK[KQKPKQKQKQKPKQKOKMKDK<K2K/K.K1K0K3K<KIKEKLKJKLKCKPK:K]K[KKKGKVKAK"K!KKKKKKKKKKKKKKKKKK KKK!K!K"K!K#K#K#K$K$K$K#K$K$K$K#K#K#K#K#K$K%K%K%K%K&K&K%K%K&K&K%K&K&K&K&K%K$K'K(K(K'K'K&K&K%K%K$K&K&K&K&K'K&K&K(K#K#K%K%K%K%K'K&K"K"K"K!K!K"K!KK K KKKKKKKKKKKKKKKKKKKKKKKKKKK'K)K%K?KsKuKsKsKsKsKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKxKwKxKxKxKuKvKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzKrKWKMKNKMKMKMKMKMKMKKKLKMKMKJKKKKKKKFK:K@K@KEKKKIKDKFKIKHKJKFK@K=K?K>K>K>K>K>K>K?K>K?K>K>K?K>K?K>K=K>K>K>K=K<K=K<K<K;K0K,K.K1K4K5K5K4K4K4K2K.K.K-e]ql(KKKKKKKKKKKKKKKKKKKKKKKKKwK\KdKbKdKbKKKKqK\KdKaKfKgKKKK_KaKdKfKdKKKKZK8KIKKKKKKKKKKKKKKKKdKbKcKcKbKaKaKaKaKbKaK`K`K`K`K`K_K_K_K_K_KaKbK`K`KaKaKbK`K_K_K_K`K`K_KaKaKaKaKaKaKaKaKdKeKgKhKhKgKcKZKWKbK\KDK3K@KZKdKaKbKcKaKbKaKaKbKaKaKbKaK`K]K\KaKaKaKcKZKQK]K_KaK_K`K_KbKPKOKMKZK]K^K`KbKcKhKjKnKrKsKuKvKvKvKuKtKtKoKnKjKfKfKfKeKeKfKfKdKeKeKfKfKfKfKgKgKgKgKgKhKgKaKZKRKNKNKPKPKPKQKRKNKDK:K3K0K2K4K1K2K>KNKVKLKBKLKRKSKEKPK;KSK]KLKEKZKNK'K$K"KKK KKK K K KKKKKKKKKKKK"KKKKKK!K$K$K$K$K$K#K#K$K$K$K$K%K&K&K&K%K&K&K%K%K%K&K%K%K%K%K&K&K&K$K%K'K(K%K%K%K&K&K'K'K&K&K&K%K%K&K%K$K(K(K&K&K&K&K)K)K'K(K(K(K(K(K)K*K*K*K(K'K(K(K)K(K%K(K(K'K'K$K$K$K"K!K#K#K!K"K KKKKK!K(K*K$KXKvKsKsKsKsKsKsKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKvKuKuKuKwKxKwKyKxKxKvKvKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KnKRKMKNKMKMKMKNKMKKKLKNKMKJKKKLKLKKKHKBKAK=KDKIKJKFKDKDKGKJKDK>K>K?K>K>K>K?K>K?K>K?K>K>K>K>K>K=K<K>K>K?K>K=K=K<K=K<K;K0K+K0K3K5K4K5K5K5K3K/K-K-e]qm(KGKJKMKYK`KhKsKKKKKKKKKKKKKKKeKfKKKK[KbKbKcKdKrKKKKYKbKaKeKfKKKKgK^KeKfKcK|KKKwKMK=K=KrKKKKKKKKKKKKKKKgK`KdKdKcKaK`KaKaKaKaKaKaKaK_K_K_K`KbKaKaK_K`KaKaKaK`K_K_K`KaKaKaKaKbKbK`K`K_KdKgKfKfKhKhKbKZKXKaKeKGK0K?KXKfKeKaKaKbKbKaKbKbKaKbKbKaKcKaK_K`KcKbKcKbK[KNKTK^K^KaK`K`K_KaKQKTKIKAKZKgKeKkKpKpKpKnKmKkKgKgKgKhKgKeKdKcKfKeKdKeKgKgKgKgKgKgKgKfKgKgKeKeKfKgKhKhKgK_KXKRKPKQKQKQKNKNKNKKKAK8K1K2K5K4K4K7KCKQKUKOKHKIKHKGKPKIKFKKKDKAK`KRKGKQKWK/K&K$K"KKKKKKK +KKKKKKKKKKK KKKKKKK#K&K&K&K%KKKK&K%K%K'K&KK K"K"K)K*K)K(K'K"K K$K'K'K)K)K(K'K%K&K)K'K'K'K'K'K$K&K)K(K)K&K'K'K&K&K'K%K'K(K(K(K)K)K)K(K)K(K(K(K(K)K)K)K)K(K'K(K)K)K(K)K)K&K'K*K(K&K&K&K&K&K'K'K&K&K&K(K%K$K)K'K1KnKtKsKsKsKsKsKsKuKuKuKuKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKxKvKuKuKvKyKxKxKvKvKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKxKzKjKOKJKLKKKKKLKLKKKLKLKLKKKKKKKKKKKHKBK?K>K=KGKLKJKDKEKFKHKHKDK?K>K?K?K?K?K?K>K>K>K>K?K?K>K<K>K?K=K=K>K>K?K=K<K=K<K;K7K.K,K0K3K3K3K4K5K4K4K/K.e]qn(K_K^KZKYKUKRKNK_KKKKKKKKKKKKKKbKbKdKKKK[K`KbKcKcKlKKKK[KbKbKdKcKKKKoK]KfKdKdKpKKKK\KYK=K?KwKKKKKKKKKKKKKKKnK`KeKgKdKaKaKbKbKaKaKaK`K`K`K`KaKaKaK`K`KaKaKaK`K_K`K`KaKaKaKaKaKbK`KcKgKkKiKhKgKcK\KZK^KaK`KOK@KUKfKhKaK_KaKbKaKbKbKaKaKbKaKaKbK`K_K`K`KbKbKbK\KTKZKcK_K^KaK`K`K_KaKRKWK?K:KdKeKdKdKeKeKeKdKcKdKbKdKeKdKeKfKeKeKfKfKeKfKfKfKfKgKgKhKgKfKeKeKcK`K_K^K\KWKOKOKQKSKRKPKPKQKNKKKAK5K0K0K1K4K4K;KGKRKVKNKGKGKIKIKEKFKQKSKIKCKJKFKcKXKHKLKXK:K(K(K'KKKKKKK KKKKKKKKKKKKKKKKKK$K&K&K&K"KKK#K&K&K&K&K!KKKK#K+K,K-K-K$KKK&K*K)K+K1K/K-K-K.K0K+K+K+K)K"KK#K)K)K)K)K-K,K,K,K+K(K)K*K*K*K)K(K)K*K)K)K)K)K(K(K(K(K)K(K)K(K(K)K*K(K(K&K&K(K'K$K%K&K%K&K&K%K&K&K&K'K#K%K'K"KJKyKsKsKtKtKtKtKsKuKuKuKuKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKyKvKuKvKwKxKxKxKvKvKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKyKbKKKKKLKLKLKKKLKKKKKKKLKLKLKLKKKIKIKBKHKAK@KHKNKLKFKFKHKJKKKDK=K>K>K>K>K>K?K>K?K?K>K>K=K<K>K?K<K=K?K>K?K>K<K<K<K9K9K9K-K+K1K2K3K3K4K4K5K3K1e]qo(K`KaKaKcKdKfKdKKKKKKKKKKKKKKK^KeKdK_KKKKaK^KcKaKeKhKKKK^K_KcKdKfKrKKKK[KbKcKfKjKKKK`KeK\KTK]KKKKKKKKKKKKKKKKuKbKcKdKeKeKcKaKaKaKbKbKbK`K_KaKbKaKaKaKaKbK`K_KaKbKbKaKaKaK`KcKiKiKiKkKkKdKZK[K`KbKeKdK_K[KcKgKeKdKaKbKbKaKbKaKbKbKaKbKaKbKaKaKcKaKaKbKaK\KXK]KdKcK\K^KaK_K`K_KbKUKVKEKIKiKdKdKdKdKdKdKeKeKeKeKeKeKeKeKeKeKfKfKfKgKeKcKbKaK`K^K[K[KZKWKWKTKSKPKQKRKPKOKSKRKPKPKOKKKHKAK6K3K3K1K3K5K<KLKUKTKMKHKGKIKIKIKIKIKJKIKCK>K;KFKKKYK[KNKHKVKFK(K(K)K"K +KKKKK +KKKKKKKKKKKKKKKKKK%K&K%K&KKKK$K&K'K&K'K KKKK'K+K*K*K(K!KK K(K)K)K,K/K*K+K.K0K0K*K+K+K(K"KK#K)K)K(K+K2K0K0K0K0K.K,K.K.K.K)K'K-K1K0K1K4K2K-K)K-K0K3K=K=K9K,K"K'K0K1K.K-K-K*K)K)K)K)K(K(K&K(K'K(K#K#K&K%K)KfKwKrKtKuKuKvKtKsKsKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKyKvKvKyKxKtKwKyKvKuKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKwK]KKKKKKKKKLKKKKKLKKKLKNKNKNKMKKKMKFKCKDK@KBKJKMKIKGKFKHKKKIKAK<K<K<K<K=K?K>K?K@K=K<K<K<K>K?K<K<K?K>K>K?K;K<K<K<K;K:K8K.K-K2K3K3K2K4K5K4K1e]qp(K]K`K`KbKdKdKcKKKKKKKKKKKKKKwK`KeKdKbK\KKKKhK^KbKaKeKdKKKKfK_KdKcKcKjKKKK[KbKdKeKeKKKKaKcKfKfKmKKKKKKKKKKKKKKKKKzKdKbKeKdKaKcKcK`K`KaKaK`KaKaKaKaKaKaKaKaK`KaKaK`KaKaKeKgKjKlKlKkKeK\K[K`KZKWKeKgKeKdKbKbKeKcKcKaKaKaKbKaKbKbKbKbKaKbKbK`KbKbKaKbKaK^KZK^KbKcKaKUK^KaK_K`K_KbKVKSKLKJKhKdKeKfKdKdKeKeKeKeKeKcKdKcKbKaK_K_K]K]KZKZKZKYKYKZK\K]K^KaK_K]KXKRKQKRKRKQKQKRKRKPKOKEK:K4K2K4K4K4K5K<KMKVKTKMKIKIKIKIKIKHKGKGKHKAKCKEKEK;KBKHKTKYKQKEKRKQK,K)K*K(KKK K K K K K KKKKKKKKKKK KKKKK%K&K&K&KKKK$K$K%K&K%KKKKK(K-K+K*K)K!KK!K)K)K(K+K,K(K,K.K/K.K*K+K,K&K!KK&K)K(K'K,K/K/K0K0K0K-K+K,K.K,K&K(K0K1K2K4K6K7K/K*K1K4K=KNKKKEK-K K+K>K;K8K;K3K/K3K1K3K/K&K"KKK K"K K&K'K"K<KuKtKtKtKuKuKtKtKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKwKvKuKwKwKwKxKxKwKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzKxKyKrKTKIKJKLKKKKKKKKKKKLKLKLKLKLKMKKKJKDKAKEKAKDKLKLKGKEKGKHKHKIKAK;K=K>K>K?K>K?K?K>K>K>K=K>K?K=K=K>K>K?K>K<K<K=K>K=K;K=K8K.K/K3K3K3K3K4K3K2e]qq(KlKgKaK^K_KZKtKKKKKKKKKKKKKKKjKaKgKdKeKbK~KKKuK\KcKbKeKcKKKKpK\KcKcKeKhKKKK_K`KbKcKcKKKKhK_KeKdKhKKKKKKKKKKKKKKKKKKKeK_KeKfKeKbKbKaKaKaKaKaKaKaKaKaKaKbKbKbK`K`KeKhKlKlKlKmKdK\K\KeKaKGKMKdKgKcKaKaKdKdKdKcKaKaKaKbKaKaKcKaKbKbKbKaKaKbKaKbKbKcK`K_KaKcKaKcK]KZK_KaK_K`K_KbKYKSKQKJKgKdKdKcKbKaKaK^K\K\K\K\K]K\K[K]K^K^K^KbK`KbKeKdKdKfKiKjKhKfKcKZKUKSKSKQKPKPKRKSKNKFK<K5K1K2K3K4K7KAKQKUKRKLKJKLKJKHKHKHKIKHKFKFKGKEKFKIKGK:K?KFKKKUKPKFKRKVK7K)K+K,KKK K K K K KK +KKKKKKKKKK K KKK!K$K&K%K"KKKK$K#K%K&K!KKKKK,K*K+K+K(K!KK#K*K)K(K,K)K+K,K.K1K*K*K+K-K%KK!K)K)K(K&K-K.K/K0K0K0K.K*K*K-K(K$K(K1K/K3K5K4K7K*K-K1K3KCKKKIKAK'K!K;KDK=K;K9K2K2K3K0K4K-K"K!KKKK!K%K'K&K#KUKwKsKsKtKvKuKsKsKsKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKyKyKxKyKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxK{KzKxKyKmKQKKKLKKKLKKKKKKKKKKKKKKKLKNKLKKKJKBKCKFKGKEKIKKKGKFKGKHKKKKKAK=K?K?K?K?K>K>K?K?K?K?K?K>K?K>K<K=K?K=K<K<K<K<K<K=K=K<K8K-K.K2K3K3K3K3K3e]qr(KKKKKKKKKKKKKKKKKKKKKKKvK]KcKbKeKbKqKKKKYKcKaKbKbKuKKK}K[KcKcKgKdKKKKcK_KbKcKcK{KKKsK]KdKfKeKKKKWKoKKKKKKKKKKKKKKKjK`KdKgKdKbKcKdKbKaKbKbKaKbKbK`K`KaKeKhKjKlKmKnKiK[KYKdKcKJK6KLKfKgKeKaKaKcKeKeKdKcKaKcKdKcKaKaKbKaKaKaKaKbKdKcKaKbKcKbKaKaKcKbKcK^KOKZK^KaK`K`K`K`KYKPKOKRKaK]K\K\K]K^K_KaKcKcKfKiKjKkKfKhKgKgKeKfKgKfKeKfKeKeKgKfKbKZKTKRKSKQKQKQKRKRKMKBK9K2K3K5K6K5K8KEKSKXKRKLKHKKKLKIKIKHKIKHKGKGKGKFKDKDKAKBK4K2K?KBK<KTKSKJKNKXK?K)K*K)K K KK K KK K K K K K K KKKKKK K KKK!K$K&K$K KKK K$K%K$K%KKKKK!K+K+K)K*K$KK K'K)K'K*K*K'K+K+K.K0K(K)K,K,K"KK"K(K&K'K)K.K.K/K0K/K,K+K+K+K+K%K#K'K,K-K1K3K6K2K(K0K2K6KBKKKJK<K K*KIKFKBK>K6K/K1K0K1K6K'KKKKKK$K)K)K&K0KkKtKsKsKsKtKtKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKvKvKvKxKxKvKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKyKxKyKyKxKxKzKiKNKMKMKKKLKLKKKLKMKMKKKLKLKLKKKMKKKDKBKIKDKGKMKLKGKFKGKHKIKFK@K>K>K=K>K?K>K>K>K>K>K>K>K>K>K>K>K?K=K<K>K>K<K<K<K<K=K=K6K-K.K2K3K3K4K4e]qs(KKKKKKKKKKKKKKKKKKKKpKgKKKK[KbK_KbKbKiKKKK\KbKcKbKbKlKKKKZKdKcKdKcKKKKpK]KdKcKcKpKKKK]KeKfKfKvKKKgK:KbKKKKKKKKKKKKKKKtKdKaKgKfKeKcKbKbKaKbKbK`KfKhKhKlKmKmKmKiK\K[KcKcKMK6K:KWKiKdKbKbKaKcKfKeKeKeKbKbKcKeKbKbKbKbKbKbKbKaKbKeKcKaKaK`KaKbKbKaKaK_KRKVK^K^KaK`K`K_K`KSKLKRKDKPKYK^KeKiKjKlKlKmKlKjKlKlKhKfKgKfKfKeKeKfKeKdKfKhKdK_K\KSKQKRKSKSKPKQKQKJKAK7K2K3K7K4K7K;KFKSKWKQKMKJKMKJKKKKKHKHKHKHKHKGKFKFKFKEK>K3K,K4KAKCKCK,KKKVKKKMKXKIK,K*K*K(KK K KKK K K K K K K K K K K K K K KKK K$K&K%K KKK K"K!K"K$KKKKK%K+K+K)K)K!KKK(K)K&K*K)K'K+K*K.K,K)K*K-K'K!KK#K'K$K&K*K/K-K/K0K/K*K*K+K+K(K!K#K,K*K,K0K5K4K*K)K0K1K7K>KHKEK-KK2KLKIKFK@K2K/K1K/K2K0KKKKKKK%K)K)K#KIKtKrKtKsKsKsKsKsKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKuKuKvKvKuKuKuKuKuKxKxKuKvKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKyKxKxKxKxKxKxKzKdKNKKKKKLKLKKKLKNKMKKKKKKKKKLKKKLKIKBKGKEKAKFKMKKKGKFKHKHKIKFK@K;K<K>K?K>K>K>K>K>K?K>K>K>K?K?K?K=K<K>K>K<K=K=K=K<K=K=K9K-K-K2K3K4K4e]qt(KGKEKDKEKBK]KKKKKKKKKKKKKKgK_KiKKKK^KaKbKcKcKeKKKKaK_KaKaKcKiKKKK^K`KaKcKdKwKKK|K\KdKcKfKkKKKK\KeKfKgKmKKKKDK:KPKKKKKKKKKKKKKKKKgKcKeKeKdKcKcKcKdKgKKKkKnKoKlK_K]KeKbKMK7K=KVKiKiKgKcKaKaKcKdKeKeKdKcKfKdKcKaKaKbKdKdKdKdKdKbKbKeKbKaKaKaKdKeKcK`K]KYKZKaK^K^KaK`K^K\K_KQKMKRK+KSKcKeKeKgKfKeKfKfKeKdKfKeKcKeKgKgKgKgKfKdKeKhKdK`KYKTKUKTKRKRKSKQKQKKK?K4K3K4K6K7K4K:KKKWKUKOKLKMKLKKKLKKKIKIKIKIKIKGKFKFKFKEKFK=K+K'K$K;KDKBKGK2K@KWKKKLKVKQK1K)K*K,KKKKKKK K K K KKK K K +K K +K K K KK"K%K%K"KKKK!K K K"KKKKKK)K+K+K+K(K!KK K'K)K&K*K)K'K)K)K.K-K*K)K*K%KKK$K&K%K&K*K-K.K.K.K/K+K+K*K,K$KK#K*K+K+K0K5K1K(K-K0K2K8KAKEKCK'KK>KLKJKHK?K0K2K3K/K2K)KKK!K!K!KK%K)K)K+KdKwKsKsKsKsKsKsKsKuKuKuKuKsKtKvKtKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKyKvKuKxKxKuKuKuKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzKwK^KKKLKKKKKKKLKMKMKKKKKKKKKKKLKKKJKIKDKCKCKDKHKMKLKFKDKGKIKHKFK@K=K>K?K>K>K?K>K?K?K>K?K>K>K>K?K=K<K<K<K<K=K?K=K<K<K=K>K5K+K-K3K3K3e]qu(KdKcKaKdK]KKKKKKKKKKKKKKK_KfK_KaKKKK^K`K`KaKeK`KKKKdK]KeKdKdKaKKKKdK^KdKcKeKpKKKK\KcKbKcKdKKKK_KcKeKdKfKKKK^KNK7K?K~KKKKKKKKKKKKKKKhKfKmKgKeKhKvKuKKKlKqK[KPKbKaKGK.K8KUKhKjKhKfKcKaKbKeKfKcKcKdKdKcKeKeKcKbKbKcKeKdKdKeKeKbKbKdKbKaKbKbKdKeKbK_K\K]KbKaK]K\KaK`K^K\K_KQKKKSK7KcKfKdKdKfKeKbKdKfKeKeKfKfKeKfKgKfKfKgKgKdKdK_KVKTKRKTKVKTKSKSKQKJK>K4K1K1K6K4K6K@KMKXKUKOKMKLKMKMKLKKKKKKKIKHKIKHKHKGKGKHKDK;K0K%K'K'K$K3KCKAKFK8K4KWKLKIKNKSK:K,K,K,K#KKKKKKK K K KKKKK K K K +K K KKK!K"KKKKK!K K K"KKKKKK)K+K*K'K$KKK K'K)K'K)K'K)K(K*K.K,K)K'K'K!KKK&K&K&K&K*K,K-K-K.K-K+K*K*K+K#KK$K*K+K+K1K4K0K(K0K/K1K6K?KGK;K!K'KEKEKHKHK9K.K1K2K0K1K#KKK!K!K!KK%K*K&K;KtKsKsKtKsKsKtKtKsKuKuKuKuKtKtKvKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKxKwKvKwKwKvKvKvKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKyKxKyKzKzK{KtKTKJKLKLKLKLKMKMKLKLKLKLKKKKKLKMKLKIKEKCKDKEKHKMKLKFKFKHKHKHKHKAK<K>K?K>K?K>K?K?K>K>K>K>K>K?K>K=K=K=K=K>K?K=K<K=K=K>K=K6K,K.K3K2e]qv(KbKbKaKeKfKKKKKKKKKKKKKKK`KfKbKbKaKKKKcK_KbKbKdKcKKKKoK]KeKaKeK\KKKKkK]KgKfKeKfKKKK[KbKdKgKcKKKKhK`KfKgKeKKKKiKcKUK4K2KmKKKKKKKKKKKKKKKKKKuK{KzKK|KmKuKPK]KBKK'KOKdKgKjKjKhKcKaKbKgKgKgKdKaKdKeKeKdKeKeKeKeKeKdKeKeKeKeKbKaKaKaKaKcKeKdK`K`K]K^KbKaKdK[KYKaK`K^K\K]KRKGKVK?KbKeKdKeKeKeKeKeKeKeKeKeKeKeKeKeKdKdKcK`K\KWKTKTKVKSKSKSKTKQKGK;K3K2K4K4K5K8KBKMKWKUKOKLKKKKKKKLKLKLKLKIKHKIKIKIKGKGKHKCK:K/K)K)K$K&K'K%K-KCKBKBK@K/KQKNKIKGKUKCK-K.K.K)KKKK"KKKKKKKKKK K K K K +K KKKKKKKKKK!K!KKKKKKK)K(K)K'K KKK#K'K)K(K(K"K%K(K+K+K)K(K)K(KKK"K&K&K%K&K)K*K+K+K/K+K*K'K)K&K K K'K+K*K+K1K1K+K(K-K0K1K2K7KAK1KK/KAKAKFKCK2K.K0K0K3K0KKKK!K!K K"K'K)K%KUKwKrKsKsKsKtKvKuKsKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKyKuKuKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxK{KzKxKxKxKzKzKzKpKPKKKNKLKLKMKNKNKNKNKLKKKLKKKKKKKLKIKDKGKDKBKIKPKLKFKFKFKHKKKFK>K:K>K?K>K?K>K>K>K>K>K>K>K>K?K?K?K?K?K?K?K=K<K?K>K<K<K:K4K,K.K1e]qw(K^K_KcKbKuKKKKKKKKKKKKKKK_KeKdKdKcKaKKKKlK[KcKaKdK`KuKKK|K]KbKbKcK^KuKKKyK\KeKbKeKdKKKKcK_KgKfKaK}KKKpK^KfKfKfKKKKqKdKjKWK9KKKKKKKKKKKKKKKKKKyK}K}KKtKpKsK_KbKK5K`KeKgKhKjKhKbKaKbKdKhKgKdKbKaKdKeKeKdKcKdKeKdKeKeKcKcKcKcKbKbKcKcKcKdKdKaK^K`KbKcKcKcKcKYKZK`K`K^K\K^KTKGKXKBKaKeKdKeKeKdKeKdKbKdKbK`K_K\K]K\KZKYKVKQKTKTKTKUKUKSKQKNKGK:K2K3K5K5K5K9KDKRKXKRKLKKKLKMKLKKKJKJKJKJKJKIKHKGKFKIKJKCK8K.K)K(K(K(K&K'K"KK K@KDK?KBK/KJKTKLKCKRKNK0K.K.K,KKK K#K K!KKKK KKKKK K K K +K KKKKKKKKKKKKKKKKK!K(K&K'K(KKKK&K'K'K)K'K K#K(K)K)K*K*K*K$KKK#K&K&K'K(K)K'K*K,K,K)K*K(K)K$K K!K(K*K*K,K1K/K&K(K-K0K/K0K3K7K&KK2K9K=KEK;K-K,K.K0K4K(KKKK K!K"K'K)K(K/KkKuKsKsKsKsKtKtKtKsKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKwKwKwKwKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKyKyKyKxKyKyKzKzKxK{KkKPKNKLKKKMKLKLKLKLKLKKKKKLKKKKKLKKKHKIKIKHKFKKKOKKKGKFKGKHKIKEK<K>K?K>K>K>K>K>K>K>K>K>K>K>K>K>K>K=K=K>K=K<K=K=K=K<K:K<K2K+K/e]qx(KvKsKqKdKKKKKKKKKKKKKKKKKaKcKeKaKdKcK}KKKyK[KbKaKcKaKiKKKK[KbKcKbK_KlKKKK[KbKbKeKdKKKKjK]KgKeKaKpKKKK]KfKhKeKtKKK|K_KmKnKKKKKKKKKKKKKKKKKyK|K}K}KKuKpKsK_K`K2KaKaKcKiKiKhKcKbKeKfKiKhKbKaKcKeKdKeKeKcKaKcKeKdKeKeKaKaKaKaKaKbKeKeKeKdK`K_K`K`KdKeKdKbKQKUK]K^K`K^K\K^KUKEKSKFK]K`K^K^K^K[K\K[KZK[K]K[KYKZKVKQKVKXKWKUKVKVKVKTKTKMK?K8K4K3K6K4K6K<KGKTKVKPKMKLKMKLKKKKKLKKKIKHKHKHKHKHKHKFKHKAK5K-K'K&K)K(K&K%K KKKKK6KCK@KEK5K?KWKMKCKKKRK6K/K/K-K$KKK!K!K!K!K"KKKKKKKK K K KKKKKKKKKKKKKKKKKK#K&K%K&K$KKK K&K%K%K'K$K K"K%K(K+K+K+K)KKKK&K&K&K)K(K%K%K(K,K*K(K)K)K)K!KK#K)K)K*K-K1K,K%K.K.K-K+K,K2K2KKK*K)K4K<K3K-K-K,K1K2K KKKK K"K#K(K*K&KEKuKsKsKsKsKsKsKsKsKsKsKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKyKvKuKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzKzKxKxKxKyK{KzKzKxKxK|KdKNKLKKKLKKKKKKKKKMKJKKKLKLKLKKKMKKKHKIKJKDK@KKKOKJKGKFKGKIKHKBK>K?K>K>K>K?K?K?K?K?K?K>K>K>K?K>K<K<K<K<K<K<K<K<K=K=K<K:K2K,e]qy(KKKKKKKKKKKKKKKKKKKKKKjK^KbK`KcKbKpKKKK[KbKcKcKcKdKKKK]KaKbKcKdKgKKKK\KaKeKeKdKxKKKwK^KfKcKbKhKKKK_KeKdKfKmKKKK\KwKKKKKKKKKKKKKKKKKrKK}K}KKtKtKuK^K\K>K`KeKjKkKiKaKaKgKiKiKiKeKbKbKeKgKeKdKbKcKdKeKfKeKdKbKbKdKdKdKdKdKdKeKdKaK`KcKcKdKdKdKbKUKKKWK]K^K]K]K\K]KVKDKKKLKTKWKXK[K_K`K]KcKdKeKgKjKgK_KXKUKVKVKUKSKSKUKSKNKBK8K3K3K5K6K7K;KJKVKWKQKKKMKMKNKMKKKLKLKLKKKIKHKIKHKHKIKHK=K0K)K%K(K)K&K(K%KKKKKKKK5KDKAKFK:K3KUKOKFKEKUK=K.K/K.K)KKK!K K!K K!K$K!KKKKKKKKKKKKKKKKKKKKKKKKKK&K&K%K&K"KKK"K$K&K(K%KK K"K"K'K)K)K)K&KKK K&K$K$K(K$K#K&K)K+K+K)K(K(K#KKK$K)K)K)K/K0K)K$K0K/K*K*K*K/K)KK!K%K'K*K.K.K.K-K,K2K+KKK K K K"K#K)K+K)K]KwKsKsKsKsKsKtKuKuKuKtKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKvKwKxKxKxKvKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKyKxKzKzKzKzKzKzKzKzKzKzKyK{KyK^KKKKKLKKKKKLKMKLKKKJKMKJKLKKKLKLKIKDKGKAK7KFKOKPKIKFKFKHKIKEK@K>K>K?K?K=K=K=K=K=K>K?K>K>K=K=K>K>K>K=K<K=K;K<K<K<K<K=K:K3e]qz(KxKKKKKKKKKKKKKKKKKqKYKKKKqK\KdKbKbKcKiKKKK^KbKcKbKeKcKKKKdK^KcKdKcK`KKKKaKbKfKfKdKnKKKK\KbKdKfKeKKKKaKaKeKgKhKKKKKKKKKKKKKKKKKKKKKpKKyKKKqKuKtK`KWKCKjKhKiKfKbKcKfKiKjKhKbKdKfKeKfKgKeKdKaKcKeKfKgKeKdKaKcKeKeKdKeKeKdKdK_K`KbKdKeKeKdKaKXKQK\K`K[K^K\K\K\K\KWKCKVK8K9KUK\K`KeKiKgKfKgKgKfKbK\KWKVKVKVKUKUKWKTKLK@K6K4K7K:K7K9KBKLKWKWKPKMKMKNKNKMKMKMKKKKKKKKKKKHKHKIKJKEK;K0K(K'K)K(K)K)K#KKKKKKKK K"K-KAKAKCKAK.KPKPKIKEKTKIK.K/K.K-KKK K!K!K!K"K$K#K"K"K KKKKKKKKK KKKKKKKKKKKKKK&K%K&K&K"KKK"K$K$K%K$KK K"K#K(K)K(K)K%KKK"K!KK&K(K#K#K&K)K*K*K)K(K'K!KKK(K(K)K)K.K+K$K'K,K*K&K)K,K)KKK K&K&K'K,K.K.K-K,K2K$KKK K K K$K'K+K(K7KpKtKsKsKsKsKsKtKuKuKvKtKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKuKuKuKuKvKvKuKwKyKxKxKuKvKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KyKxKzKzKzKzKzKzKzKzKzKzK{KzK{KtKYKIKKKLKKKMKNKLKKKJKMKJKLKLKKKKKKKHKCKAK9K4KEKPKMKGKBKDKHKEKDKAK>K>K>K=K<K<K<K<K=K>K>K>K=K=K?K?K?K=K<K=K;K<K=K<K=K<K<K:e]q{(KKKJKCKKKKKKKKKKKKKKKcKgK^KxKKKK[KbKbKbKcKfKKKKaK]KcKaKeKbKKKKmK^KfKeKfK_KKKKiKaKeKaKeKhKKKK^KfKdKcKaKKKKdK`KfKfKgKKKKKKKKKKKKKKKKKKKKtKKvKKKqKtKpKaK[KXKiKiKdKaKdKhKjKjKgKbKaKbKeKgKgKeKdKeKdKeKgKeKdKbKbKdKdKeKcKaKdKdKbKaK_KbKeKdKeKbK_K]KYK^KdK`K\K^K\K]K\K]KVK?KVK0KIKeKcKdKdKdKeKdKcK_KYKWKVKWKVKUKUKUKSKLKAK7K6K5K7K8K:KAKPKZKUKQKMKMKNKNKMKNKMKLKLKKKLKJKLKKKJKKKBK8K.K'K'K(K)K)K(K"KKKKKKKK K#KKK!K?KDKCKFK/KGKRKKKGKOKPK2K0K0K/K#KK&K'K KK"K$K$K$K$K$K!KKKKKKKKKK K KKKKKKKKKKK#K#K%K$KKKK#K$K#K$K!KKK K%K*K(K)K(K!KKK$K&K%K&K%KK K#K&K*K'K%K&K$K KK!K)K)K'K)K.K*K#K*K,K)K(K)K*K&KKK#K(K%K'K.K.K.K-K/K0KKKKK"K#K&K)K,K(KPKwKsKsKsKsKsKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKyKvKuKuKvKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KyKxKzKzKzKzKzKzKzKzKzKzKzK{KzK{KqKRKKKMKMKMKMKMKMKKKKKLKKKKKLKKKLKKKEK=K*KKKDKMKNKFKDKFKFKIKHK@K<K=K=K=K<K=K?K>K<K>K?K>K>K?K>K<K=K<K=K<K<K<K<K<K<K=K=e]q|(KiKhKjKKKKKKKKKKKKKKK`KfKgK`KlKKKK]KcKbK_KdKdKKKKjK^KcKcKfKcKzKKKyK^KfKdKeK`KzKKKtK\KeKdKeKfKKKKbKaKdKeKaK~KKKpKaKgKhKeKKKKKKKKKKKKKKKKKKKxKKuKKKpKvKpK[K_KcKeKcK_KbKiKjKjKgKbKaKdKgKeKgKdKaKdKeKeKfKfKeKdKcKbKeKeKeKcKaKdKdKbKcKdKdKeKeKcK\KYK\KcKcKcK^KYK^K\K\K\K]KWK?KTK<KRKhKcKeKfKeKdK_KYKWKWKXKXKWKWKWKSKIK@K7K5K8K8K7K;KFKQKVKWKQKMKOKNKNKMKMKNKMKMKLKKKKKKKIKLKKKCK5K+K'K)K*K*K*K'K!KKKKKKKK!K KKKKKK9KFKCKFK4K;KWKNKFKKKTK:K/K1K1K)KKKKKK K#K$K#K#K"K#K$K#KKKKKKKKKK K KKKKKKKKK#K$K$K!KKKK"K#K$K$KKKK K%K)K)K)K(KKKK$K%K&K&K!KK K"K&K*K'K&K'K$K KK$K&K(K'K+K*K%K%K,K-K'K%K)K&KKK!K&K!K#K(K.K.K.K-K0K*KKKK K"K#K&K)K*K1KgKtKsKsKsKsKsKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKxKvKuKvKvKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKyKyKzKzKzKzKzKzKzKzKzKzKzKzK{KzKyKkKSKLKNKNKMKNKNKLKLKKKLKLKLKLKKKLKJKCK1KKK&KHKPKMKGKFKGKHKIKFK>K<K=K<K<K=K>K=K<K>K?K>K>K?K>K=K=K<K=K=K<K=K=K=K<K<K<e]q}(KeKcKxKKKKKKKKKKKKKK~K_KfKdKfKaKeKKKK_KbKcKaKeKdKKKKtK[KcKaKfKdKpKKKK[KeKeKfKbKoKKKK\KbKcKfKcKKKKiKaKiKeKbKtKKKK`KhKjKcKKKKKKKKKKKKKKKKKK{KKvKKKoKyKpK`K`K[KdK`KcKiKjKiKdKbKaKdKhKgKfKdKdKcKfKfKgKfKdKeKdKeKeKeKeKeKcKaKdKeKeKeKeKeKeKdK\KXK\KaKdKdKeK[KVK^K\K\K\K^KZK@KSKBKOKhKbKcKcK\KYKVKWKVKUKUKVKWKRKGK=K6K6K7K9K7K<KJKVKZKSKPKOKQKQKQKNKNKNKLKNKLKKKKKKKLKLKHKAK0K)K'K(K)K)K(K$KKKKKKKKK"K"KKKKKKKK1KDKCKEK<K3KVKOKGKEKSKAK1K3K0K.K KKKKK K K K%K&K"K$K$K$K%K!KKKKKKKKKK K KKKKKKK!K$K%K!KKK!K"K K"K!KKKKK#K'K)K(K%KKK!K$K#K%K&KKK!K K'K*K(K)K(K!KKK%K&K(K*K-K(K!K$K+K+K$K%K)K$KKK"K%K"K#K*K-K.K.K.K.K#KK!K!K K!K$K'K+K*KCKvKsKsKsKsKsKsKuKvKuKuKuKtKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKyKyKgKNKNKNKNKMKNKOKLKJKLKLKKKKKKKKKLKKKFK1KKK,KJKOKJKFKFKFKFKGKCK=K<K=K<K<K<K<K<K>K?K?K?K?K?K?K=K<K<K<K<K=K?K=K<K<K=e]q~(KgKbKKKKKKKKKKKKKKKKnKaKgKdKeKdKcKKKKeK_KcKbKcKcKvKKKK[KfKeKcKcKiKKKK_KdKeKeKdKeKKKK^KdKfKeKcK{KKKvK`KeKgKeKkKKKK_KjKmKKKKKKKKKKKKKKKKKKKKvKKKoK{KmKfKMKRKdKeKkKiKiKdKaKaKeKhKjKfKcKaKeKgKfKgKfKcKeKdKeKdKdKeKdKcKbKaKcKeKeKdKeKdKcK^KXK[KdKdKdKeK]KXK\K\K]K\K\K]K[KAKPKBKOKiK`K\KXKVKXKYKYKVKXKTKRKEK:K6K6K5K7K8K>KJKVKZKSKOKPKQKPKOKOKOKNKMKMKLKMKLKKKKKLKHK=K1K)K'K'K(K(K)K'KKKKKKKK!K!K KKKKKKKKKK(K@KBKDKCK/KPKTKLKEKTKKK4K3K0K1K&K K KKKK%K+K,K%K"K$K$K$K#K#K#K KKKKKKKKKKKKKKKKKK KKKKK!K!K"KKKKKK%K%K&K%K KKK$K%K#K%K$KKKK!K'K)K(K*K&KKK K&K&K'K*K*K(K K'K,K+K#K$K&KKKK"K%K$K&K+K,K,K-K/K+KKK!K!K"K"K%K+K.K,K\KwKrKsKtKtKtKtKuKuKuKuKuKuKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{KzKyK^KLKNKNKMKNKNKMKKKLKKKLKMKLKKKLKJKKKIK-KKK3KKKOKKKFKFKFKFKHKDK=K<K=K<K=K=K<K=K>K=K=K=K>K?K=K<K=K=K<K=K>K<K=K=K<e]q(KeKdKKKKKKKKKKKKKKKKKyK^KfKeKeKeKaKKKKoK^KcKbKeKdKkKKKK^KdKcKaKcKdKKKKcKaKfKeKdKeKKKKaKcKeKfKaKrKKKK\KfKhKeKgKKKKcKsKKKKKKKKKKKKKKKKKKKKvKKKpKzKmKeKDKRKiKkKiKiKdKcKeKgKgKhKdKeKaKeKgKgKhKeKdKbKeKdKdKdKdKeKcKaKcKeKdKdKdKdKeKbK]KZK_KbKdKeKdK^KKKNK\K[K]K\K]KZKYKBKMKDKKK^KYKXKWKXKWKWKYKVKQKCK9K6K9K;K7K:KCKPKWKYKTKPKPKQKQKQKOKMKMKMKNKMKKKKKKKLKNKGK9K-K'K&K(K)K&K&K KKKKKKKK!K%KKKKKKKKKKKKKK?KCKCKEK5KJKUKMKDKNKQK7K2K1K0K*K"K!K K&K+K*K&K"K#K$K$K$K$K$K$K$K%K"KKKKKKKKKKK K +KKKKKKKKKKKK!KKKKK K&K&K&K%KKKK$K$K#K%K"KKKK K'K(K)K'K#KKK"K&K%K&K+K)K#K#K)K,K'K"K#K%KKKK"K%K#K&K,K*K*K-K.K#KKK!K!K$K#K'K.K+K7KqKuKsKsKtKvKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKxKuKYKLKNKMKMKNKMKLKKKKKMKNKLKKKKKLKKKLKFK)KKK8KKKNKKKGKFKFKGKGKBK=K<K=K<K<K<K<K<K<K<K<K=K?K=K<K>K>K<K<K=K:K=K=K<e]q(KKKKKKKKKKKKKKKKxKhKKKK_KcKdKcKdKaKKKK{K\KeKdKeKdKfKKKK_KaKdKcKeKbKKKKkK`KeKeKfKbKKKKhKaKfKeKdKhKKKK_KfKfKfKbKKKKKKKKKKKKKKKKKKKKK{KK~KyKK~KrKzKiKdKBK\KlKiKiKdK`KfKiKgKfKeKcKbKcKgKgKiKhKcKdKdKdKfKhKeKdKeKcK_KcKfKeKdKdKeKdK`K^KaKeKeKeKcK^KRKOKYK[K[K]K\K[KZKZKAKEKOKSKWKWKXKVKUKXKVKMKCK8K6K7K9K9K<KCKQK[KYKQKPKPKPKQKQKPKNKNKMKMKMKLKKKKKKKLKEK:K.K'K(K*K)K)K(K"KKKKKKK!K$K!KKKKKKKKKKKKKKKK:KFKCKCKCKNKWKOKFKIKVK?K2K3K2K.K"K#K+K*K%K#K"K#K$K$K#K"K!K#K%K$K#K"K$K#KKKKKKKKKK KKKKKKKKKKKKKKKKK!K&K%K&K%KKK K$K$K$K&K!KKKK!K&K&K'K%KKKK$K&K%K'K*K'K K%K*K+K#K#K$K"KKK K#K$K$K)K*K-K-K-K,KKKKK!K"K%K+K.K)KOKwKuKtKsKtKuKuKuKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKxKvKuKuKvKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{KzK|KrKTKKKNKMKNKMKKKLKMKLKLKLKKKLKKKLKJKLKCK#KKK@KMKNKJKGKFKGKFKGKAK=K;K<K<K<K<K=K=K=K<K=K=K=K<K=K=K=K<K:K;K=K=K<e]q(KKKKKKKKKKKKKKKKnKYKfKKKK^KbKcKaKcK`KwKKKK]KeKeKeKfKbKKKKgK^KgKeKcKaKKKKvK^KfKeKgK`K~KKKsK_KfKeKdKaKKKKcKdKfKhKaKKKKKKKKKKKKKKKKKKKKuKKK}KKzKsK}KgKbKTKgKgKiKdKaKeKhKjKjKeKdKeKeKfKgKgKeKeKcKdKfKhKhKdKdKeKeKdKcKdKdKdKeKdKcK`KaKdKeKeKeKdK^KUKSKaKbKZK[K]K[KZK[K[KLKKKJKVKXKWKXKXKTKMK?K6K5K6K;K9K=KGKRKZKVKRKPKPKQKQKQKPKPKPKMKMKMKNKMKKKMKKK@K4K-K)K*K+K)K*K'KKKKKKKK!K#K"KKKKKKKKKKKKKKKKKK.KEKCKDKCKLKVKPKJKGKVKGK3K4K3K1K&K$K"KK"K$K$K$K$K#KK K!K!K"K#K$K$K%K$KK!KKKKKKKKKKKKKKKKKKKKKKKKK"K&K%K&K$KKK!K$K#K$K#KKKKK$K&K%K&K&KKKK&K&K%K'K*K"KK%K*K+K#K#K$K!KKK!K$K#K#K-K*K-K-K-K%KKKKK!K!K'K0K,K.KfKwKuKuKsKtKuKuKuKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKyKwKuKuKvKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKxKxKxKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{KyK{KlKQKNKMKNKMKKKLKNKLKKKLKKKKKKKKKLKKKJK?K"KKK@KNKOKJKFKGKFKGKGKCK=K=K=K<K<K<K<K<K<K<K<K<K=K=K=K<K=K=K=K<K=K=e]q(KKKKKKKKKKKKKKKKbKgK_K[KKKKbKbKdKaKdKdKlKKKK^KeKdKaKdK^KKKKrK^KgKeKeKaKrKKKK\KfKeKdKaKsKKKK]KgKeKcK`KKKKgKaKhKhKdKtKKKKKKKKKKKKKKKKKKuKKK}KKwKtK}KfK\K^KdKfKcKcKfKiKiKhKfKdKcKcKgKjKhKeKdKbKdKfKgKgKfKdKbKbKeKdKeKdKeKeKeKaK\K`KdKeKdKeKcK\KXKZKbKdK`KZK\K\KZK\KUKTKXKRKHKVKZKXKVKJK=K7K5K8K8K7K=KHKVKYKVKRKQKPKPKQKQKPKQKPKNKNKNKMKMKMKLKKK@K2K%K%K)K*K*K)K%K KKKKKKK"K%KKKKKKKKKKKKKKKKKKKKK.KCKAKCKDKFKTKSKMKFKRKOK5K3K2K2K*K$K!K!K"K"K"K"K!K KK#K$K$K#K"KKK"K#K#K$K%K!KKKKKKKKK KKKKKKKKKKKKKK%K&K'K&K KKK"K$K#K$KKKKKK&K&K(K*K#KKK!K&K&K&K)K'K K K&K)K%K K!K#KKKK!K$K#K&K-K*K+K*K-K KKKK K K"K,K1K,KAKtKtKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKyKvKuKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKyKxKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K|KfKPKMKNKMKKKKKLKLKLKKKKKKKKKKKLKKKHKJK:KKK#KGKOKNKGKGKGKGKGKGKBK?K=K<K<K<K<K<K<K<K=K<K<K>K>K<K=K?K=K<K>K>e]q(KKKKKKKKKKKKKKKK_KfKfKcK[KKKKlK_KcKbKeKeKcKKKKcKaKcKaKeKbKwKKKK]KfKdKcKcKiKKKK]KeKdKgKcKhKKKK]KeKeKfK`KKKKrK`KgKjK`KKKKKKKKKKKKKKKKKKuKKKKKtKuKKcKdKYK_KeKcKfKkKjKiKdKcKdKeKfKjKjKdKdKeKeKhKhKfKeKeKeKbKbKeKdKdKeKdKdKcK_K`KdKeKdKdKcK^KXK\KcKdKcK`KYKZKVKUK[KTKRKXKRKLKVKTKIK:K4K5K5K:K:K?KKKWKZKTKOKPKSKRKPKQKPKPKPKPKOKNKMKMKNKOKJK<K1K'K(K)K*K)K)K$KKKKKKKK&K&K"KKKKKKKKKKKKKKKKKKKK"K#K(K@KCKCKCKCKRKUKMKFKOKUK:K3K3K4K-K%K#KKKKKK!K!K!K#K#K"K$K.K1K&KK#K&K#K#K&K$KKKKKKKKKKKKKKKKKKKKKK$K&K&K$KKKK"K#K"K%KKKKKK&K%K(K(KKKK#K&K%K'K*K%KK"K'K)K#K!K K KKKK!K"K#K(K+K+K+K,K*KKKKK K!K%K/K1K-KYKyKrKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKwKyKwKuKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KyKxKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{KyK_KMKMKLKLKLKKKKKKKKKLKLKLKKKKKKKIKIKJK4KKK-KIKOKLKFKFKFKFKGKGK@K=K=K<K<K<K<K<K<K<K=K=K>K>K<K=K>K=K<K>K?e]q(KKKKKKKKKKKKKKKqKfKiKdKfKfKZKKKKxK`KfKdKeKdK_KKKKjK_KfKeKfKcKlKKKK_KcKbKcKeKeKKKKaKcKeKcKhKeKKKKaKcKfKfK`KqKKKK_KkKfKKKKKKKKKKKKKKKKKKwKKKKKtKuKKgKaK?KbKcKgKjKjKhKdKeKeKfKhKjKiKdKdKdKdKiKjKhKcKaKcKeKdKdKeKeKdKfKcKaKaKaKbKeKfKdKbK^K[K]KaKdKdKeK]KVKVKLKMKWKTKRKZKKKCKDK9K3K3K6K8K9KBKNKYKXKTKOKNKQKPKRKSKPKQKPKOKQKPKLKMKOKOKIK:K0K)K'K)K)K*K)K$KKKKKKKK$K$K!KKKKKKKKKKKKKKKKKKK!K#K$K"KKK;KGKCKEK?KDKVKPKGKLKWKAK3K5K4K0K(K$KKKKK!K$K$K#K"K'K.K3K4K.K)K&K%K$K$K$K#K$K$K!KKKKKKKKKK K KKKKKKKKK"K#K$K!KKKK K K!K"KKKKK#K'K%K'K%KKKK$K&K$K'K%K!KK$K)K'K!K!K"K KKK K!KK$K*K,K.K.K.K"KKKKK!K$K+K3K2K<KnKuKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKxKxKyKvKuKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KyKxKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{KvKZKMKLKKKKKLKKKKKLKKKKKKKLKLKKKLKKKHKJK2KKK0KHKPKLKGKFKGKFKFKDK?K<K=K=K=K<K<K<K<K>K?K?K>K<K<K<K<K<K<K<e]q(KKKKKKKKKKKKKKKwKcKcKeKfKgKfK[KvKKKK^KgKfKeKeK_KKKKvK]KfKdKeKeKeKKKKaKbKdKdKgKcKKKKhK`KgKdKfKbKKKKfKbKfKfKcKiKKKKaKlKKKKKKKKKKKKKKKKKKKKKKKsKvKKgKZK=KeKgKjKjKhKdKdKdKhKiKkKiKcKcKfKgKgKkKgKdKcKcKdKdKfKeKdKeKdKeKbK_KaKdKeKdKdKaK[KWK]KeKdKdKcK_KTKWK[KMKMKXKTKKKDK1K1K1K4K4K5K8KAKPKXKXKTKPKRKSKQKQKPKQKQKPKQKQKOKOKNKPKOKFK7K-K(K(K)K(K(K&K!KKKKKKK"K&K&KKKKKKKKKKKKKKKKKKKK"K)K(K KKKKK4KHKCKEK>K0KSKRKKKFKUKJK4K5K4K5K+K$KKK K!K!K!K"K&K0K5K3K.K+K+K,K+K(K%K#K"K#K$K#K#K&K"KKKKKKKKKK K KKKKKKK!K K"KKKKK!K K!KKKKKK%K&K%K'K#KKKK$K%K$K'K"KK!K&K)K$KK!K!KKKK K!KK%K.K-K/K.K.KKK KKK!K%K4K5K1KOKxKuKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKxKxKwKwKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKyKxKyKyKyKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|KpKVKLKKKKKLKKKLKKKLKLKMKKKKKKKLKKKHKJKGK*KKK5KLKOKLKFKDKFKGKGKCK>K<K;K<K=K<K=K>K=K=K>K>K<K<K<K<K<K;K;e]q(K{KKKKKKKKKKKKKKKKKhK`KeKhKgK_KiKKKKbKdKeKeKfK`KxKKKK_KfKeKdKcKaKKKKiKaKfKdKdKaKKKKsK_KeKdKeKaKKKKpK`KgKeKeKbKKKKvKKKKKKKKKKKKKKKKK{KKKKKKuKxKKgKWKEKmKiKjKiKcKdKdKgKiKjKhKcKaKbKgKjKjKhKdKdKeKdKfKgKfKbKdKeKaK`KbK`KdKeKdKaK`KZKXK[KaKdKaK_K^K[KUKUKZKNKKKYKLKHK;K K-K3K5K5K@KPKZK[KSKQKRKSKSKRKSKRKPKPKQKQKPKPKOKOKLKCK1K*K(K(K)K)K*K&K KKKKKKK#K%K KKKKKKKKKKKKKKKKKKKK&K*K'K!KKKKK"KK+KGKEKBKEK,KNKTKKKAKOKQK9K4K5K5K/K'K"KKKK#K*K0K2K1K,K)K*K.K.K.K,K*K,K1K/K"K#K$K$K#K$K$K KKKKKKKKK KKKKKKKKKKKKKK K!KKKKKKK$K$K&K$KKKK!K$K#K&K&KKK"K(K*K"K!K!K KKK K K!K K(K-K0K0K/K)KKKKKK K.K9K7K4KcKwKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKyKwKtKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzKzKxKxKxKyK{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKyKzKmKPKMKMKKKLKLKKKLKNKNKKKHKHKKKKKHKIKJKEK&KKK;KPKNKIKEKFKGKFKGKEK=K<K=K=K<K=K?K=K<K>K>K<K<K<K<K<K=K=e]q(KsKcKKKKKKKKKKKKKKKKKKrKbKcKiKbKcKKKKeKdKdKbKeKaKlKKKK^KeKdKdKeK_KKKKqKaKfKcKfKbKuKKK~K]KgKbKfKbKvKKK|K_KgKgKfK`KKKKKKKKKKKKKKKKKKKKKKKKKKrK{K{KdKZKZKlKjKhKbKdKfKgKiKmKgKdKdKdKdKiKiKdKdKeKdKeKfKfKfKeKdKeKaK_K`KeKeKeKdKdK^KXK[K`KeKbK_K]K[KZKZKUKXKYKGKNKYKVKZKKK#K)K4K>KPKXKWKRKSKSKSKRKRKQKQKQKQKQKPKMKNKOKQKMKCK5K'K%K*K*K(K*K%KKKKKKK K&K(K"KKKKKKKKKKKKKKKKKKKK%K(K"KKKKKKKKK K+KCKEKBKGK0KCKXKOKEKMKTK@K5K5K5K1K+K$KK#K)K/K2K-K*K)K)K,K-K.K,K.K2K4K4K,K%K$K$K$K$K"K$K$K%K$K KKKKKKKKKKK KKKKKKKKKKKKKKKKKK$K$K$K$KKKK#K$K#K(K#KKK#K&K'K KK KKKKKKK"K+K-K.K.K1K"KKKKKKK3K<K:KHKrKtKtKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKxKxKxKxKxKxKxKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{KfKMKLKMKLKKKKKLKLKLKLKIKGKKKKKKKJKIKLK?KKKK@KMKQKJKGKEKFKFKGKCK>K=K=K<K=K=K=K<K>K>K<K<K<K<K<K<K<e]q(KmKqKKKKKKKKKKKKKKKKKKKKKeKcKfKbKKKKmKaKfKeKfKcKhKKKKbKcKfKeKfK`KyKKK~K^KfKeKgKbKkKKKK]KdKeKgKcKnKKKK_KgKhKiKbKKKKKKKKKKKKKKKKKKK|KKKKKKqK|K}K]K_KgKjKhKcKcKgKjKlKiKfKeKdKeKhKjKiKfKcKdKeKdKeKgKeKdKdKdKaK_K`KaKeKeKeKbK\KWKWK`KcK`K^K\K\K[KZKZKPKHKBKIKWKYKXKXKIK9K7KKKWKVKSKRKSKRKRKRKSKSKQKPKPKPKPKOKMKOKLK>K/K)K*K*K*K*K)K$KKKKKKK"K(K'K KKKKKKKKKKKKKKKKKKKK'K'K!KKK KKKK!KKK%K%K$K=KEKCKFK8K5KYKQKHKNKVKGK5K5K5K4K.K)K&K,K+K'K(K(K(K*K+K-K-K-K0K4K2K(K"K K$K$K$K$K$K"K$K$K#K$K%K#KKKKKKKKKKKKKKKKKKKKKKKKKKK"K$K#K$K!KKKK$K#K#K%K KKK#K'K KKK KKKKKKK$K(K,K/K2K0KKKKK KK$K5K=K;KJKuKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKyKxKyKyKxKxKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K{K_KNKNKLKKKLKKKKKKKLKJKIKKKKKLKJKHKHKIK>KK K KDKOKNKIKDKFKGKGKEKAK=K<K<K<K<K<K<K>K>K<K<K<K<K<K<K<e]q(K_KKKKKKKKKKKKKKKKKKKKKKKKpKcK^KKKKyK_KfKeKfKhKaKKKKhKbKgKeKfKbKlKKKK_KeKeKeKeKgKKKK`KbKeKeKcKfKKKK`KgKhKkK`KKKKKKKKKKKKKKKKKKzKKKKKKrK|K{KbK`KbKfKcKbKfKiKmKmKeKaKeKeKgKiKiKdKdKeKeKeKgKfKeKeKeKbKbKaKbKdKdKeKdKdK\KSKXK]K`K^KZK\K\KZKYKZKVKCK2KFK[KYKWKTKPKFKHKTKXKVKRKRKSKRKSKSKSKRKQKQKPKPKPKRKQKKK=K-K'K'K)K(K+K*K#KKKKKKK"K'K&K!KKKKKKKKKKKKKKKKKKK"K(K&K"K KKK!K!K KK K!K'K$K KKK3KEKCKEK?K.KTKRKIKIKUKLK7K8K7K6K1K-K(K$K$K&K(K)K)K(K+K/K4K2K)K&K#K#K$K$K$K$K"K"K K#K%K$K$K$K$K$K%K KKKKKKKKKKKKKKKKKKKKKKKKK$K$K#K$KKKK"K$K"K$K#KKKK#K'K KKKKKKKKKK%K&K)K0K4K(KKKKK!K!K!K'K.K*KUKyKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|KuKYKMKLKLKKKKKLKLKKKKKKKIKIKLKKKKKJKHKIK5KKK%KEKPKOKHKFKFKGKFKFK?K;K=K<K<K<K<K=K=K<K<K<K<K<K<K<e]q(KKKKKKKKKKKtKKKKKKKKKKKKKKKKK_KwKKKK^KfKdKeKfK^KKKKrK_KgKeKfKcKgKKKKaKdKfKeKgKaKKKKeKaKfKeKfKbKKKKcKdKkKeKKKKKKKKKKKKKKKKKKzKKKKK|KrKKwKiKMKSKeKcKiKlKkKjKdKaKcKfKgKiKiKdKdKeKdKdKeKfKeKbKdKeKbKbKbKcKeKeKeKcKVKLKWK]K^K_K_K\K]K^K\KVKMK@K;K:KJKWKWKQKTKSKEKOKSKSKUKSKSKSKSKRKRKRKOKNKPKQKRKPKEK8K)K$K(K)K)K(K"K"KKKKKKK&K)K'KKKKKKKKKKKKKKKKKKKK%K)K&K KK K KKK K K!K%K)K&KKKKKK'KEKCKCKDK*KKKUKLKGKTKQK<K7K8K7K2K,K&K"K$K%K(K(K(K.K/K0K*K"K K!K#K$K&K'K$K"K K K!K"K!K#K"K#K#K$K%K%K$K KKKKKKKKKKKKKKKKKKKKKKK$K#K$K$KKKK%K$K"K%K KKKK#K%KKKKKKKKKKK%K%K)K2K5KKKKKKKKKK(KKKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKvKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKyKxKyKyKxKxKxKyKyKyKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|KqKTKKKKKLKLKLKLKKKLKLKIKJKLKLKKKLKMKJKJK4KKK)KJKPKPKHKCKGKGKGKCK?K<K=K<K<K<K<K<K<K<K<K<K<K<K<e]q(KKKKKKKKKKKeKpKKKKKKKKKKKKKKKKKKxKKKKaKeKdKeKfK^K{KKKK`KeKeKeKgKbKKKKfKbKfKdKfKaKKKKqKaKfKfKgK`KKKKoKgKkKKKKKKKKKKKKKKKKKKuKKKKKxKsKKqKhK@KVKhKjKlKmKkKdKcKcKhKjKkKhKdKeKdKeKfKgKhKdKdKeKeKaKaKbKdKdKdKeKbKTKOK\K_K\K]K\K]K^K]KWKKKAK<K=K?K>KJKPKWKSKQKFKEKTKUKUKRKRKSKRKRKSKQKPKPKQKSKOKBK4K(K'K(K*K)K'K%K!KKKKKK#K)K,K'KKKKKKKKKKKKKKKKKKK#K)K)K#KKK K!K!K KK K!K(K'K"KKKKKKKK!KCKEKBKHK/K@KXKOKIKOKUKDK7K8K6K1K-K)K!K!K#K&K,K/K*K&K#K"K$K$K$K$K$K#K"K!K!K$K$K$K#K!K$K+K0K&K#K$K$K#K#K$KKKKKKKKKKKK KKKKKKKKKK!K$K#KKKKK#K$K#K$K KKKK$K!KKKKKKKKKKK$K#K'K.K/KKKKKKKK$K@KKKKKpKwKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KyKxKzKzKxKxKxKyK{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{KiKPKJKLKLKKKKKLKKKKKLKLKKKKKKKKKLKKKMKIK,KKK3KJKPKKKGKFKGKGKEKBK=K=K=K=K<K<K<K<K<K<K<K<K<K<e]q(KKKKKKKKKKvKlKsKpKoKyKKKKKKKKKKKKKKKKKKKKfKeKgKdKgKbKmKKKK_KfKeKdKeK\KKKKnK`KhKfKgK`KwKKK|K^KfKgKhKaK{KKKyKmKKKKKKKKKKKKKKKKKKwKKKKKvKrKKnKhK?K]KmKkKkKkKbKbKeKhKkKkKhKcKcKcKdKgKjKgKdKdKeKeKaK_KbKdKfKfKeKbK^K[K`K`K]K\K[K]K\KVKMK@K<K>K?KAKCKIKSKSKRKGKDKNKVKWKWKUKRKRKSKRKRKRKQKPKRKOKBK0K)K'K)K+K*K*K$K!KKKKK!K'K*K)K$KKKKK KKKKKKKKKKKKKK$K+K(K#K KK!K"K"K K!K#K%K)K$K KKKKKKKKKKK;KGKDKFK6K4KWKRKKKLKWKKK8K9K8K6K1K-K#K"K*K-K+K%K!K#K#K$K"K"K#K K K"K"K"K"K"K$K$K&K,K4K7K6K1K,K$K$K$K$K#K$K#KKKKKKKKKK K K KKKKKKKK K"K!KKKKK!K"K#K!KKKKK%KKKKKKKKKKK!K"K"K%K.K$KKKKKK K!K,KKKKKKwKvKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKwKwKwKwKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKyKxKzKzKyKyKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K|K|K|K{KzKzKzK{KzKzKdKKKKKLKMKLKKKLKKKKKKKKKKKKKKKKKLKKKIKEK(KKK8KNKPKNKFKEKGKDKDKDK=K<K<K=K<K<K<K<K<K<K<K<K<e]q(KKKKKKKKKKaKcKlKsKwKtKqKsKKKKKKKKKKKKKKKKKKKnKfKhKhKhKbKfKKKKfKeKeKdKeK^K|KKK{K`KfKeKiKbKmKKKK^KhKfKhKdKnKKKKKKKKKKKKKKKKKKKKK{KKKKKtKqKKoKeKFKiKmKkKgKcKdKgKhKiKlKgKdKdKdKdKgKiKeKdKeKeKeKaK^KcKeKdKdKbK]K_K`KaK_K\K\K^K]KTKLK?K;K?KBK@KBKLKXK^KVKDKHKPKTKUKVKUKVKTKRKSKRKRKQKQKRKIK>K.K(K&K+K,K,K(K"KKKKKK K'K)K&K"KK K!K!KKKKKKKKKKKKKKK#K*K'K KK K!K!K"K$K!K$K(K(K!KKKKKKKKKKKKKK1KFKFKGK>K-KRKSKNKJKTKQK;K9K8K8K4K-K*K&K%K!K"K#K$K$K"K K K K K K!K"K K#K"K"K*K1K7K<K5K.K1K-K)K'K'K(K$K$K$K$K#K KKKKKKKKKK KKKKKKKKK!K KKKK K"K K"KKKKKK#KKKKKKKKKKK#KK"K'K.KKKKK K!K!K$K%K|KKKKKKrKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKyKyKyKyKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzK{KzK{K{K{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K~K~K~K{KzKzKzKzKzK{KxK]KNKLKJKKKKKLKLKLKKKKKKKKKKKKKLKKKHKIKBK!KKK=KNKQKKKFKFKDKDKDKBK?K<K=K<K<K<K<K<K<K<K<K<e]q(KKKKKKKKKKKKjKeKmKvKwKxKvKrKzKKKKKKKKKKKKKKKKKvKcKdKgKjKeKaKKKKjKcKfKfKgK_KnKKKK_KhKgKhKdKgKKKK_KeKhKiKeKjKKKKKKKKKKKKKKKKKKKKKKKKKtKtKKnKbK]KmKmKhKdKbKfKhKkKkKhKdKeKfKeKfKlKhKcKdKeKeKaK^KcKeKfKeKbK^K]K^K]K]K]K]K]KSKGK@K=K@K>KAKFKPKXK[KYKXKSKPKVKVKUKUKVKUKSKTKTKRKRKTKRKIK<K*K&K*K)K*K*K)K$K KKKKK$K*K+K&KKKK K!KKKKKKKKKKKKKK"K(K*K%K"K"K#K#K#K#K!K!K$K)K*K"KKKKKKKKKKKKKKKK%KFKDKDKCK<KPKSKOKFKOKVK@K:K:K9K8K2K*KKK"K#K#K"K!KKK K KK KK K"K%K*K2K:K9K9K:K4K-K+K0K2K4K5K2K$K#K$K$K#K#K#KKKKKKKKKK KKKKKKKKKKKKKKK!K!KKKKKKKKKKKKKKKKKKK K&K&KKKKKK!K!K$K%K=KKKKKKzKxKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKxKwKuKwKxKxKxKvKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKyKzKzKyKyKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K{K{K{K{KzKzK{K}K|KzK|KvKWKJKKKKKMKKKIKKKLKKKKKKKKKKKLKKKHKIKKK=KKKK@KOKPKKKGKEKFKEKFKBK<K<K=K<K<K<K<K<K<K<K<e]q(KKKKKKKKKKKKKKtKgKiKqKvKvKuKtKtKKKKKKKKKKKKKKKKKKjKeKhKiK`KKKKrKaKiKfKfKaKgKKKK`KfKhKgKhKbKKKKfKdKgKgKhKdKKKKKKKKKKKKKKKKKKpKKK|KKKsKuKKhK^KaKjKfKdKeKgKiKhKkKhKdKdKgKgKgKjKhKdKeKeKeKaK^KdKeKdKcKcKbK`K`K]K]K^K]KTKGK>K=K?K@KBKCKMKZK]K[KWKWKWKXKWKVKUKVKVKVKTKRKRKQKPKPKHK9K,K'K*K,K+K)K(K#KKKKK K%K+K*K"KKKKKK K!KKKKKKKKKKKK K&K*K'K!K K!K$K$K$K#K$K'K+K)K!KKKKKKKKKKKKKKKKKK!KCKDKCKFK7KAKYKQKIKIKWKFK9K:K9K8K4K-K!KK KKKKKKK K"K"K!K!K$K.K6K9K8K8K8K7K/K1K4K9K=K;K2K+K&K%K$K$K$K$K$K$K%K!KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK'K!KKKKKK!K!K#K)K'KxKKKKKKtKvKvKuKuKuKuKuKuKuKuKuKuKuKuKuKvKyKwKuKwKyKxKxKuKvKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzKzKxKxKxKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{KzK{K~K|KzK{K}KqKQKKKMKNKMKLKLKKKKKKKKKKKKKLKKKHKHKHKKK<KKK$KDKNKQKJKFKGKGKFKEKBK:K;K=K<K<K<K<K<K<K<e]q(KKKKKKKKKKKKKKKKKjKeKlKsKxKyKuKsK{KKKKKKKKKKKKKKKKKtKdKhK^K}KKKKaKgKgKfKdKbKKKKgKfKhKgKhK_KKKKoKdKjKiKcKKKKKKKKKKKKKKKKKKqKKKzKKKtKvKlK_KcKWKfKdKeKhKnKnKiKeKdKeKeKiKiKjKfKdKdKeKeK_K_KbKfKcKbKbKaK`K`K`K^K]KUKHKAK?K@K?KAKEKQKZK\K]KXKWKXKXKWKWKXKVKUKUKUKRKRKRKRKOKDK2K*K)K*K+K*K(K%K%KKKKK K'K,K&K KKKKKK KKKKKKKKKKKKK!K$K%K$KK"K#K$K$K#K"K%K*K+K(K KKKKKKKKKKKKKKKKKKK#K(K>KFKDKFK9K2KZKRKHKFKUKKK:K;K9K8K4K3K&KKKKKKK K!KKK#K(K/K8K;K8K9K8K4K3K0K2K:K=K;K1K)K$K#K$K!K"K#K$K%K%K$K$K$K$KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK"KK KKKKK!K"K$K&K)K:KKKKKK}KwKxKuKuKuKuKuKuKvKxKwKuKuKuKuKvKyKwKuKwKyKxKxKuKvKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzKzKzKzKzKyKxKxKxKxKxKxKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|K}K}K}K~K|KzK|K}K~KjKPKKKLKLKLKLKKKKKKKKKKKKKLKKKHKHKHKJKJK4KKK(KHKOKMKHKEKGKFKGKFK@K:K=K=K<K<K<K<K<K<e]q(KKKKKKKKKKKKKKKKKKKvKdKjKrKvKxKxKsKxKKKKKKKKKKKKKKKKKKjK^KrKKKK`KgKfKeKfK_KKKKmKbKiKeKhKaKKKKzKaKjKdKKKKKKKKKKKKKKKKKKsKKK{KKKtKxK_K]KZK?KfKdKiKlKnKlKeKcKeKhKiKjKjKdKdKdKfKcKZK\KdKcKbKbKaKbKaK_KaK^KSKGK@KAKBKBKCKIKRKZK^KZKXKZKYKWKWKWKWKWKWKVKUKVKUKUKTKMKBK0K)K(K*K+K,K(K%K"KKKKK!K)K,K&K!KKKKKK"KKKKKKKKKKKK K%K*K'K#K!K!K"K"K#K#K#K&K)K,K&KKKKKKKKKKKKKKKKKKKK&K(K%K!K6KCKBKDKBK-KUKTKLKEKPKQK=K:K9K8K6K3K*KKKKKKK KK!K)K1K8K6K5K8K7K4K2K0K2K6K:K4K-K&K!K!K#K#K$K!KK K"K%K'K%K#K$K%K!KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK KKK KKKKK K"K$K&K'K$KrKKKKKKuKyKuKuKuKuKuKuKvKxKwKuKuKuKuKvKxKwKuKwKyKxKxKvKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKyKzKzKzKzK{KzKyKyKyKyKyKyKzK{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K{K{K{K{K{KzK{K{K{KzK}K~K}K}K~K|KzK}K~K}K|K`KLKLKLKKKKKKKKKKKKKKKKKLKKKIKIKIKJKIKGK/KKK,KHKPKPKFKFKFKFKEKFK?K:K<K=K<K<K<K<K=e]q(KKKKKKKKKKKKKKKKKKKKKKkKeKnKwKyKzKuKtKKKKKKKKKKKKKKKKKKlKfKKKKdKfKgKgKgK_KKKKzKaKgKgKhKeKuKKKKbKjKKKKKKKKKKKKKKKKKKrKKKKKKuKxKYKaKUKDKhKhKkKmKkKcKdKeKfKiKjKiKeKdKeKhK`KRKZKaKeKcKaKaK^K`K`K[KRKHKBKCKCKBKEKKKTK^K_K\KZKWKXKWKWKXKVKUKXKWKUKUKUKWKWKPK?K/K)K)K*K,K-K*KKKKKKK$K-K+K#KKKKKKKKKKKKKKKKKKK"K(K*K$K"K#K#K#K$K$K!K#K&K.K+K%KKKKKKKKKKKKKKKKKKK#K(K)K#KK K K.KAKCKCKFK.KIKXKOKFKNKWKBK8K:K:K:K4K1KKKKKK K&K+K4K5K3K4K0K.K3K3K4K5K7K7K2K'KKK K!K!K!K!K K!K"K#K%K(K)K%K$K"K%K$K$K#K#KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK KKKKKK K"K$K%K&K'K:KKKKKKKvKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzK{KzKzKxKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K~K~K~K~K~KzK{K~K|KzK}K~K}K}K~K|KzK|K~K}K~KyK\KJKLKLKKKLKKKKKKKKKKKLKKKKKJKKKMKKKIKFK)KKK4KMKQKKKHKFKGKEKCKCK>K<K=K<K<K<K<K=e]q(KKKKKKKKKKKKKKKKKKKKKKKKwKgKmKtKxK{KyKsKwKKKKKKKKKKKKKKKKK|KKKKkKdKhKfKgK`KtKKKK`KgKgKiKeKlKKKKoKKKKKKKKKKKKKKKKKKvKKKKKKtKvKXKdKNKJKnKjKmKiKfKcKeKgKhKiKiKeKdKeKfK_KQK^KfKdKcKcKcKbK`K]KRKJKDKDKDKDKHKLKWK]K]KZKYKYKYKWKWKXKWKWKWKVKWKVKUKVKVKMK=K.K)K)K,K,K,K(KKKKKKK%K*K)K#KKKKK K KKKKKKKKK KK K K!K'K.K*K&K$K%K$K$K%K"K$K'K'K(K!KKKKKKKKKKKKKKKKKKK'K,K(K$K"K!K!K!K K'KAKDKCKEK8KCKWKPKHKHKXKIK8K:K:K9K4K5K KKKK$K,K0K2K0K/K0K2K3K2K6K8K8K4K,K'K!KK"K!K K!K K K!K!K!K%K(K(K&K)K+K/K/K'K#K#K&K%K%K#KKKKKKKKKK K KKKKKKKKKKKKKKKKKKKKKKKKKKK +KKKKKK!K"K$K$K&K+K&KpKKKKKKsKxKvKuKuKuKuKuKuKuKuKuKuKuKuKuKvKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKyKyKyKzKzKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K|K|K|K|K|K|K|K~K|KzK}K~K}K}K}K}K|K}K}K}K}K}KuKVKJKLKLKKKLKKKKKKKLKKKLKLKIKKKKKJKHKIKDK#KKK9KKKMKNKGKEKEKEKEKEK>K<K=K<K<K<K=e]q(KKKKKKKKKKKKKKKKKKKKKKKKKKKlKiKoKvKxKxKuKqK~KKKKKKKKKKKKKKKKKKKwKdKkKgKgK`KjKKKK`KfKgKhKhKgKKKKKKKKKKKKKKKKKKKKKwKKKKKKsKtKYKeKUK`KlKlKiKeKdKeKgKgKjKiKeKdKeKgKfKaKgKeKdKdKaKcKcK]KTKJKFKHKIKHKHKOKXK]K^K[KZK\KYKWKWKWKWKWKXKXKXKWKVKWKTKJK;K-K(K*K,K,K*K%KKK K KKKK(K)KKKKKKKKKKKKKKKKKKKK%K,K+K(K&K&K%K&K&K#K#K)K*K'K$KKKKKKKKKKKKKKKKKKK K)K+K(K&K$K#K$K!KK K K(K=KDKCKEKDKIKVKRKJKDKTKPK:K9K:K:K8K5K(KK#K'K+K.K-K,K,K.K3K2K2K5K2K/K*K#K!KK!K!K"KK K!KK K$K$K#K$K'K)K-K2K7K5K4K2K-K$K%K$K#K$K$K!KKKKKKKKK K K KKKKKKKKKKKKKKKKKKKKKKKKK KKKKKK K"K$K$K'K*K+K:KKKKKKKwKxKuKuKuKuKuKuKuKuKuKuKuKuKuKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KyKxKzK{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK}K~K~K|KzK|K~K}K}K}K}K~K}K}K~K}KzK|KoKQKKKLKKKLKKKKKKKKKLKKKKKLKLKIKHKHKHKKKAKKKK=KMKOKJKEKFKGKGKGKDK<K<K=K<K<K<e]q(KdKKKKKKKKKKKKKKKKKKKKKKKKKKKKxKhKjKsKxKwKuKtKvKKKKKKKKKKKKKKKKKKKdKhKiKhKeK`KKKKfKfKjKgKhKbKKKKKKKKKKKKKKKKKKKK}KKKKK~KtK|KdK^K^KiKnKjKcKdKeKgKhKjKgKdKbKeKaKeKdKeKfKeKdKdKdK^KVKPKLKJKKKLKLKRK[K_K^K\KYK[K[KZKXKXKXKXKWKXKVKUKXKXKSKGK8K-K)K+K+K)K)K&K#KKKKK KKKKKKKKKK KKKKKKKKKKKKK&K,K.K,K)K)K(K(K%K&K#K&K)K+K$KKKKKKKKKKKKKKKKKKK!K(K+K%K#K$K%K$K"K!KK$K)K(K"K3KCKCKDKCKEKUKUKNKEKQKTK?K9K9K;K:K5K.K&K%K&K&K)K,K-K,K/K3K5K5K.K$KKK!K!K K K!K!K K!K%K(K&K&K%K$K(K/K6K8K5K5K9K7K7K5K1K+K'K$K$K#K#K$K!KKKKKKKKK KKKKKKKKKKKKKKKKKKKKKKKK KKKKKKK"K$K$K$K&K(K-K)KnKKKKKKtKwKxKwKwKvKuKuKuKuKwKxKvKuKuKvKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzKyKxKxKxKyKzKzKzKzKzKzKyKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|K}K}K|KzK{K}K|K}K}K}K}K}K|K}K}K}K}K}K}K}K}K}K}K}K|K}K|KhKOKKKLKLKKKKKKKKKKKLKLKKKLKKKJKKKJKHKLK9KKKKBKKKNKKKGKEKFKFKDKAK<K<K=K<K<e]q(KPKOKUKsKKKKKKKKKKKKKKKKKKKKKKKKKKKKnKgKmKwKzKyKvKtK~KKKKKKKKKKKKKKKKKkKdKiKlKfK_KKKKmKcKhKiKlKZKKKKKKKKKKKKKKKKKKKKKKKK}KtKKtKbKcKiKhKdKdKfKhKfKjKhKfKeKeKfKdKeKdKeKdKfKcK^K[KRKQKPKRKSKSKYK^K_K]K[K\K\K[KZK[KYKWKXKWKWKWKWKWKXKUKEK4K*K(K+K+K,K+K(K&K&K(KKKKKKKKKKKKKKKKKKKKKKKKKK)K-K/K.K+K*K+K*K)K'K%K(K,K(K"KKKKKKKKKKKKKKKKKKK&K,K+K)K#K#K#K"K#K#K$K&K*K+K"KKK%KCKCKCKDKDKQKUKMKGKLKVKGK9K9K;K:K5K4K&K K#K)K-K.K.K0K4K/K)K%KKK!K!K!K K!K!K!K!K!K"K%K%K$K%K*K1K7K8K8K6K4K5K8K8K7K9K;K9K2K&K#K$K$K#K$K%KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK +KKKKKKK#K%K#K$K&K'K)K,K<KKKKKKKvKyKyKxKvKuKuKuKuKwKyKvKuKuKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzKxKxKxKzKzKzKzKzKzK{KyKxKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK}K~K~K}KzK|K~K}K~K}K}K}K}K~K}K}K}K}K}K}K}K}K}K}K}K~K|KzK{KcKPKLKKKLKKKKKKKKKKKLKKKLKKKJKLKKKHKIKIK2KKK&KCKMKQKJKEKFKFKDKCK@K=K<K<K<e]q(KPKRKRKMKOK]KKKKKKKKKKKKKKKKKKKKKKKKKKKKxKhKjKqKxKzKvKsKxKKKKKKKKKKKKKKKKKyKgKgKjK`KKKKwKcKiKmKbKKKKKKKKKKKKKKKKKKKKKKKKzKyKKrKiKLK^KeKdKjKgKdKhKeKdKhKgKgKgKgKdKeKfKeK`K[KYKWKUKWKYKYK]K`KaK`K\KZKZK[K[K[KZKXKXKWKXKWKUKVKWKRKCK2K)K(K,K,K-K+K&K#K%K-K+K+K&KKKKKKKKKKKKKKKKKKKKKK"K)K0K1K1K.K0K-K+K+K+K'K+K&K"K#KKKKKKKKKKKKKKKKKK"K)K*K'K%K$K$K$K$K!K K$K(K*K(KKKKKKKAKDKDKDKDKOKWKRKJKHKWKKK9K9K:K:K7K6K+K#K(K(K+K+K/K0K(KKKKKKKKKKK K!KK"K!K K"K(K2K:K<K8K7K8K8K7K6K8K9K7K6K3K,K(K(K&K"K#K$K#K%K#K%KKKKKKKKKK KKKKKKKKKKKKKKKKKKK K KKKKKK"K!K!K#K&K&K&K&K+K)KoKKKKKKvKyKxKxKxKxKuKuKuKwKyKuKvKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKxKxKyKzKyKxKyKyKxKzK{KzKzKzKzKxKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K{K{K{KzK|K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KyKZKLKJKKKLKLKKKLKLKLKJKHKKKLKLKKKIKHKHKIK.KKK+KGKPKPKHKFKGKFKEKDKAK;K=K=e]q(K]KUKNKNKQKPKMKTKmKKKKKKKKKKKKKKKKKKKKKKKKKKKKmKhKpKuKwKxKtKsKKKKKKKKKKKKKKKKKKlKeK_KyKKKKaKlKgKKKKKKKKKKKKKKKKKKKKKKKK}K~KKpKhK;K^KfKjKgKbKfKhKhKjKkKhKfKfKfKfKfKaK\KZKZK[K^K`K_KaKaK`K]K^K]K]K\K[K[KZK[KYKWKXKXKWKYKXKNK<K-K(K+K,K-K*K(K&K#K K#K'K*K*K)K+K%KKKKKKKKKKKKKKKKKKK$K,K3K0K0K0K0K0K0K.K/K0K1K(K!K KKKKKKKKKKKKKKKKKK#K,K,K'K$K"K!K"K#K#K#K%K+K/K#KKKKKKKKK<KFKDKCKDKKKVKTKKKDKTKQK:K9K:K:K9K9K1K#K$K&K,K$K!KKKKKKKKKKK K#K!K!KK"K'K,K:K@K9K=K=K8K7K7K7K7K6K8K8K.K(K&K)K)K)K)K#K"K$K$K$K"K%K&K#KKKKKKKKKKKKKKKKKKKKKKKKKKK KKKKKKK"K!K K#K&K&K&K'K*K.K;KKKKKKKvKyKxKxKwKuKvKvKwKyKvKvKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzK{KyKyKxKxKyKyKzKzKzKzKzKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K{K|K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K|K}KqKSKJKMKKKKKLKKKKKLKKKIKKKLKLKKKHKIKHKIKDK(KKK2KJKNKJKGKFKGKEKCKCK@K=K;e]q(KfKeK`KYKRKNKOKQKLKJKXK}KKKKKKKKKKKKKKKKKKKKKKKKKKKyKgKkKrKyKyKxKsKvKKKKKKKKKKKKKKKKKwK^KfKKKKeKqKKKKKKKKKKKKKKKKKK|KKKKKKK}KKlKgK@KcKlKbK]KiKjKjKjKjKjKhKgKgKeKcK^K^K]KaKcKcKfKeKbKbK_K\K\K]K]K]K[KZKZK[K[KZKWKZKZKVKLK8K+K(K*K+K+K+K'K#K!K$K&K'K)K,K+K(K*K+K*KKKKKKKKK KKKKKKKK%K,K2K4K3K2K3K3K3K3K3K3K4K/K%K!KKKKKKKKKKKKKKKKKK(K0K.K'K"K#K$K$K#K"K"K)K*K&K"KKKKKKKKKKK4KGKDKCKBK7KQKUKMKDKOKUKAK8K:K:K9K6K2K#K$K"KKKKKKKKKKKKKK K KK"K*K6KAK>K9K>K8K;K<K7K7K9K:K6K.K(K(K(K)K(K'K$K#K"K"K#K#K$K%K%K%K%K$K$K KKKKKKKKKKKKK K KKKKKKKKKKKKKKKKKK K!K K#K&K&K)K)K*K/K-KmKKKKKKuKyKyKvKuKuKwKyKxKxKyKyKxKxKxKxKxKuKuKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KzKzKzKzKxKyK{KyKxKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K{KyK~KnKQKMKKKJKLKHKIKLKLKLKLKKKIKIKIKIKIKHKIKAK!KKK7KKKLKLKHKFKEKCKCKDK>K:e]q(KdKdKeKfKcK^KUKPKNKMKKKKKMKbKKKKKKKKKKKKKKKKKKKKKKKKKKKKoKhKnKvKyKzKuKsKKKKKKKKKKKKKKKKK~KdKKKKzKKKKKKKKKKKKKKKKKKwKKKKKK|K|KKlKdK=KhK^KYKhKmKjKiKiKiKiKgKeKaK]K^K`KcKgKiKiKgKbK`K_K^K\K\K\KZK[K\K\K\K[KZKZKYKXKWKJK9K,K(K*K+K,K*K#KK"K"K#K&K*K*K,K)K%K&K+K*K(K'KKKKKKKKKKKKK#K&K/K4K2K1K3K1K3K2K0K0K2K1K+K#K KKKKKKKKKKKKKKKKKK-K.K+K%K#K$K%K#K#K!K%K,K-K'KKKKKKKKKKKKKK'KCKCKDKEK,KEKXKPKFKJKWKHK:K:K:K9K8K7K#KKKKKKKKKKKK K"K KKK$K-K6K<K<K9K5K7K8K5K>K<K:K9K1K*K'K'K(K(K'K'K$K!K!K"K$K&K%K$K#K$K)K'K%K#K$K%K#KKKKKKKKKKKK K KKKKKKKKKK K +KKKKKKKK!K!K#K$K%K(K)K*K.K0K:KKKKKKKuKyKwKwKwKwKwKxKxKwKwKxKxKxKxKxKwKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKyKxKyKyKyKyKzKzKyKzK{KyKxKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K|K|K|K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K|K|K~K~KgKLKIKKKLKJKJKJKKKLKKKKKHKGKGKHKIKIKHKIK<KKKK:KMKOKLKHKFKEKDKDKAK>e]q(KdKeKeKdKeKeKfKaK]KUKNKMKMKIKFKRKtKKKKKKKKKKKKKKKKKKKKKKKKKKKzKgKkKrKwKxKuKtKuKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKtKKKKKK~KKKiKaKVKfK_KkKlKkKiKiKjKjKgKdK_K`KbKfKjKmKnKiKeKbK_K`K_K\K]K]K\KZKZKZKZKZKZK[K\KXKHK2K)K,K-K,K,K)K%K K K!K!K%K)K.K+K'K&K%K%K%K*K+K)K)K$KKKKK K KKKKK(K0K4K2K2K3K3K3K2K2K3K4K,K&K#K K K!K!K!K!KKKKKKKKKKKK"K+K.K)K%K$K$K$K&K%K$K(K+K,K"KKKKKKKKKKKKKKKKKAKDKCKFK8K:K\KSKJKGKWKNK<K:K9K9K9K:K+KKKKKKKKKKK!K K K K&K.K9K>K>K;K7K2K5K:K9K;K?K9K2K'K#K"K&K'K&K$K!K"K#K#K&K'K&K&K$K%K,K7KAK<K-K$K$K#K$K$K!KKKKKKKKK K K K K KKKKKKKKKKKKKKKKK K"K#K%K'K'K*K*K-K1K-KgKKKKKKvKyKyKyKyKwKtKwKyKuKvKyKxKxKxKxKyKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzKzKxKxKxKxKxKzKzKzKzKzKyKxKzK{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK}K~K~K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K~K}K}K{K]KJKLKLKLKKKHKJKLKLKLKIKHKIKIKHKHKIKJKJK;KKKKAKJKLKMKHKGKEKCKFKBe]q(KdKdKdKdKdKeKeKeKfKeK_KWKPKKKKKKKGKIKaKKKKKKKKKKKKKKKKKKKKKKKKKKKKpKhKnKvKyKxKvKtKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKxKKKKKKKKKbKeKjKmKmKlKkKkKkKkKiKbK`KaKdKgKlKoKpKlKfKaK`K`K_K`K`K]K]K]K\K\K\KZKWKXK[KUKEK3K)K(K,K-K-K(K#K!KK!K K$K)K,K*K'K%K'K)K&K&K'K(K)K*K(K)KKKKK K +K KKKK%K+K3K4K4K4K5K1K4K4K0K(K KK K"K$K"KKKKKKKKKKKKK%K+K,K&K%K&K%K$K$K#K%K*K/K,K"KKKKKKKKKKKKKKKKKKK>KDKCKFK>K2KXKRKMKDKQKTK>K;K<K<K<K9K0KKKKKKKKKKKK"K$K,K4K8K9K<K<K<K;K7K<K=K<K7K.K$K"K#K$K%K&K!K K!K!K'K)K(K(K&K%K(K3K@KJKKKIKEK>K1K&K%K%K#K$K#KKKKKKKKKKKK K KKKKKK KKKKKKKKKK K"K$K&K'K(K)K*K+K.K1K;KKKKKKKtKyKxKxKxKwKxKxKvKuKvKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKyKxKyKyKxKxKxKyKzKzKzKzKzKzKzKzKyKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KwKXKIKJKJKIKKKJKIKIKIKLKKKJKIKHKIKHKHKHKIK3KKK"KDKKKOKKKFKEKFKEKDe]q(KdKdKdKdKdKdKdKdKdKdKeKfKcKZKRKJKLKKKIKFKRKoKKKKKKKKKKKKKKKKKKKKKKKKKKK}KlKkKsKyKyKvKrKxKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK{KKKKKK|KKKaKnKoKoKmKkKlKlKkKeKbKbKfKjKmKmKnKmKlKhKcKaK_K`K`K_K`K^K\K^K[K\K]K[K[KRKAK0K&K)K+K,K+K)K#KKKKK!K%K*K,K'K"K&K%K(K)K&K&K KK'K+K'K)K)KKKKK K KK KKK,K3K5K5K6K8K2K+K!KKKKK"K!KKKKKKKKKKKK!K+K-K+K(K$K%K'K&K#K"K&K+K*K*K%KKKKKKKKKKKKKKKKKKK%K&K7KDKCKCKDK.KOKTKOKFKMKYKDK:K=K=K=K:K7KKKKKKKKKK"K(K-K2K4K4K6K:K<K<K=K>K@K;K2K(K!K!K!K$K$K#K"K"K!K!K$K%K)K*K'K%K+K5KBKMKKKHKIKIKBK@K?K9K0K$K$K$K#K"K$KKKKKKKKKKKK KKKKKK KKKKKKKKK K"K$K%K&K(K)K+K*K-K0K-KgKKKKKKvKyKxKxKxKyKxKxKvKuKuKvKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KyKxKxKxKxKxKxKzK{KzKzKzKzKzKzKzKyKxKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KrKSKIKIKIKLKJKHKHKHKJKJKHKHKIKIKIKGKFKHKHK/KKK(KEKLKPKIKFKGKDKCe]q(KbKcKdKdKdKdKdKeKcKaKdKeKeKeKaK_KWKMKIKIKKKGKHK[KKKKKKKKKKKKKKKKKKKKKKKKKKKKpKhKnKvKyKwKtKuKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK}KKKKKKzKyKxKsKpKnKlKmKkKiKeKbKdKgKkKmKoKoKkKlKkKkKiKbK_K`K_K`K_K]K]K\K]K]K\K[KRK@K.K(K*K-K-K+K(K#KKKKK!K%K'K'K&K%K%K$K%K)K(K%K KK"K%K(K)K)K)K*K'KKKKK KKKKK(K0K7K<K.K#KKKKKK KKKKKKKKKKKKK$K*K-K+K'K%K&K&K#K$K$K'K-K+K$KKKKKKKKKKKKKKKKKKK K'K+K'K$K3KFKCKCKFK/KDKYKOKGKIKWKKK;K=K=K=K;K:K$KKKKKKK#K*K.K0K0K0K4K4K5K9K<K=K>K7K.K%KK"K$K!K!K!K"K"K!K$K)K)K(K(K%K(K/K=KHKIKKKKKHKFKAKAKCKFKDK5K*K$K"K"K%K$K$K%K"KKKKKKKKKK K KKK +KK KKKKKKKKK!K!K#K&K)K)K)K+K*K,K/K3K;KKKKKKKuKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KyKxKzKzKzKzKzKzKzKzKzKzKzKzKyKxKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K}K|KzK|K}K}K}K}K}K}K}K}K}K}K}K}K}K{K{K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKKmKOKLKIKIKIKHKJKKKIKHKIKIKIKHKHKHKHKIKIKHK'KKK.KGKNKOKIKFKDKBe]q(KfKeKdKdKdKdKdKeKdKbKcKeKdKdKdKdKdKdK\KSKLKIKJKJKHKLKgKKKKKKKKKKKKKKKKKKKKKKKKKKK~KjKiKrKuKvKtKrK{KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKuKuKwKtKpKoKnKlKiKfKfKgKhKmKpKrKoKmKmKkKmKmKiKdK`K_K`K_K`K^K\K[K\K]K[KPK=K-K+K+K,K,K*K%KKKKKK#K&K$K'K#K$K%K%K%K$K%K"KK K"K&K*K*K)K)K(K)K)K*K KKKK K KKKK'K3K1K)KKKKKKKKKKKKKK!KKKK K+K/K0K+K(K&K%K%K%K%K$K)K/K*K"KKKKKKKKKKKKKKKKKKK#K*K)K(K%K&K$K.KDKDKCKGK7K:K[KQKJKDKUKRK=K<K=K=K:K9K/KKKKK K)K,K,K-K.K/K0K4K3K7K<K<K3K%KKK!K!K#K$K!K K"K"K%K&K(K)K&K%K,K7KBKJKKKGKHKJKFKBK>KBKEKAK6K,K"KK KK!K$K#K$K$K$K%K"KKKKKKKKK K +K KKK KKKKKKKKK!K!K#K&K)K(K)K+K+K/K/K5K1KdKKKKKKvKzKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KyKyKzKzKzKzKzKzKzKzKzKzKzKzKyKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K|K~K|K{K|K}K}K}K}K}K}K}K}K}K}K}K}K}K|K{K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KfKLKJKHKIKIKKKLKIKHKIKIKHKIKIKIKIKIKHKJKDK#KKK3KHKNKNKJKFKCe]q(KgKfKeKeKdKdKdKdKdKeKbKeKeKdKdKdKdKeKfKeKaKZKRKKKJKKKGKGKTK}KKKKKKKKKKKKKKKKKKKKKKKKKKKqKgKkKuKzKyKuKtKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKwKxKsKqKqKoKjKeKdKgKjKnKqKrKrKpKpKpKqKpKmKmKjKdK`K_K_K_K`K^K]K^KZKMK;K.K*K*K-K,K+K%KKKKK!K"K%K&K"K#K%K$K$K$K%K&K"KKK K&K)K)K)K'K&K*K.K)K'K)K'KKKKK KKKK$K"KKKK K KKKKKKKKK KKK K"K-K4K4K1K.K+K(K%K$K$K)K,K.K+K KKKKKKKKKKKKKKKKKK K%K*K)K'K)K&K%K%K%K)K?KEKCKEK>K.KXKSKKKFKRKWKBK:K=K<K;K<K8KKK!K$K%K(K(K)K+K,K-K0K5K3K3K+K#KKK!K!K!K!K KK"K#K&K&K&K$K"K#K,K:KDKIKHKGKGKGKEK?K;KDKFK>K2K&K"K#K#K!K!K KK"K%K$K#K#K#K$K#KKKKKKKK K K K KK KKKKKKKK K K"K#K&K&K&K*K+K*K.K.K1K5K8KKKKKKKuKyKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K}K~K}K}K~K{KzK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K{K_KJKHKJKLKLKLKIKHKIKIKHKJKLKIKIKIKHKHKJK?KKKK8KLKNKMKHKDe]q(KdKhKfKdKdKdKdKdKdKdKcKeKeKdKdKdKdKdKdKdKdKeKfK_KUKNKJKIKHKCKIKbKKKKKKKKKKKKKKKKKKKKKKKKKKKKlKlKrKvKuKsKrKyKKKKKKKKKKKKKKKKKKKKKxKK{K}KKKzKvKpKpKnKiKgKgKkKoKrKtKrKnKmKqKrKpKpKnKlKjKdKbK`K_KaK_K_KaKYKJK6K+K,K-K.K+K*K$KKKKKK!K$K$K!KK K$K#K#K$K!KKKK K$K%K&K*K)K(K(K/K9K8K-K'K)K(K$KKKKKKKKKKKKKKKKKKKKKKKKK#K/K4K5K2K0K0K-K*K&K&K,K.K,K$KKKKKKKKKKKKKKKKKKK$K)K)K(K'K(K(K)K%K&K'K&K%K:KEKCKCKDK*KOKWKMKFKKKXKGK;K=K<K>K=K=K%KK"K#K$K'K)K)K*K,K0K1K/K(K!KKKK!K!KK K K#K%K%K%K%K"K#K&K2K@KFKIKGKAKAKDKGKGKGKAK@K8K-K#K"K#K"K KKK K$K%K'K)K(K%K$K%K$K#K%K"KKKKKKK K +KKK KKKKKKKKK K!K#K&K(K(K+K,K,K-K0K0K4K0KdKKKKKKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzKzKzKzKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K|K|K}K}K}K{KzK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K}KzKUKGKJKJKJKJKIKHKIKIKHKJKLKIKHKHKJKHKHKIK9KKKK=KJKOKJKEe]q(KKhKbKdKhKfKeKdKdKdKdKdKdKdKdKdKdKeKeKeKeKdKdKfKfKbK[KQKIKGKFKGKFKOKsKKKKKKKKKKKKKKKKKKKKKKKKKKKqKiKlKrKuKtKqKnKKKKKKKKKKKKKKKKKKKsKKK~KKKvKsKkKgKgKgKkKnKrKrKqKpKnKnKoKnKmKnKoKmKjKeKaK_K_K_KbK`KYKIK1K*K*K-K/K.K(K"KKKKKK!K"K$KKKK K"K$K$K!KKKK!K!K#K$K$K&K)K,K3K:K=K;K8K6K-K(K'K&K!KKKKKKKKKKKKKKKKKKKKKK(K1K8K:K6K1K/K/K,K,K-K0K1K-K"KKKKKKKKKKKKKKKKKKK)K+K(K(K(K)K)K)K(K(K*K)K)K)K"K1KCKCKCKGK.KCKYKNKJKHKUKLK=K<K=K<K<K=K.KK K$K%K'K(K)K/K0K*K#K KK KKKK K!K K!K$K%K'K$K"K$K)K6KAKGKGKBK@K?K@KBKHKKKHK=K4K+K"K!K$K$K KKK!K(K(K*K)K(K(K)K+K+K'K#K$K$K"K"K KKKKK K +KKK KKKKKKKKK!K K#K&K*K+K+K.K.K-K/K0K3K7K9KKKKKKKwK{KxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzKzKzKzKxKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K}K}K}K{KzK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KrKOKFKIKHKHKHKIKHKIKHKKKLKIKHKIKHKIKHKHKJK4KKK!KAKLKNKJe]q(KKKKuKdKbKeKhKgKdKeKfKeKdKdKdKdKdKdKdKdKdKdKdKcKeKfKfKaKWKMKHKGKGKFKIK]KKKKKKKKKKKKKKKKKKKKKKKKKKKKkKhKoKtKtKoKkKqKKKKKKKKKKKKKKKKpKKKKKKjKdKcKhKkKoKsKrKrKrKpKpKoKmKoKmKkKlKmKlKfK`K_K^K_K_KYKDK4K,K+K.K.K-K+K#KKKKKK K#K!KKK K K!K"K"K"KKKK K!K"K!K!K"K#K)K0K;K>K:K:K<K8K:K5K(K&K&K'KKKKKKKKKKKKKKK KKKK K,K7K8K6K5K6K5K4K2K1K0K5K3K+K!KKKKKKKKKKKKKKKKKK&K+K*K)K)K(K*K)K'K'K*K*K*K*K&K$K&K$K+KCKEKBKFK8K5K[KQKLKFKUKRK?K:K=K=K=K>K5KK K!K"K&K*K(K&K"K KKK KKKKK"K#K%K&K$K"K K%K0K:KEKFKDKAK=K>K=KBKFKGKDK:K.K%K#KK$K KK!K K&K)K+K-K,K*K'K*K,K-K0K.K)K&K$K#K#K!KKKKKKK K +K K K KKKKKKKK K!K#K&K)K*K,K+K,K/K0K/K0K8K5KdKKKKKK|KzKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|K}K|K}K}K}K}K}K|K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KlKMKHKIKIKHKHKIKIKHKIKJKIKHKHKIKHKHKHKIKGK/KKK'KEKNKLe]q(KKKKKKKmKdKeKhKiKgKeKeKdKdKdKdKdKdKdKdKdKdKeKdKdKdKfKgKcK]KRKJKFKGKHKGKLKmKKKKKKKKKKKKKKKKKKKKKKKKKKKtKgKiKnKpKlKfKdKzKKKKKKKKKKKKKrKKK~KKlKMK]KdKiKoKuKsKsKtKtKrKqKpKmKoKmKlKiKhKhKaK_KbK^KUK@K1K*K,K.K/K-K(K"KKKKKK$K$K KK KK K!K!K!K!KKKK K"K K!K K K K)K3K8K=K<K<K>K>K<K>K>K@K0K$K&K&K#KKKKKK K KKKKKKKKK#K-K7K;K9K5K5K5K3K3K6K7K7K4K)KKKKKKKKKKKKKKKKKKK'K-K.K+K(K(K(K)K*K*K)K*K,K)K&K$K%K$K!K#K#K>KEKBKDK?K,KXKTKOKJKUKWKCK9K>K=K=K=K;K$KK"K%K$K!KKKKKKKKKK"K"K$K$K"K KK(K6KCKGKFKEKDKCK<K;KAKFKEK<K4K*K$K$K#K!K K K K"K&K+K,K+K*K)K*K+K.K2K2K1K0K-K*K*K(K$KKK K KKKKK K K +K K KKKKKKKK K"K#K&K)K*K,K*K,K0K1K0K1K3K9K>KKKKKKKvKyKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK}K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KeKKKHKIKIKHKHKHKIKHKHKHKIKHKHKHKHKIKIKGKCK(KKK-KEKJe]q(KKKKKKKKKKiKbKeKgKgKgKfKfKfKfKeKdKdKdKdKdKdKdKdKdKdKdKeKgKbKYKPKHKGKGKFKDKXKKKKKKKKKKKKKKKKKKKKKKKKKKKKlKfKfKfKaK]KUKKKKKKKKKKKKvKKKKKVKHKaKsKKKtKyKwKuKsKtKrKpKmKlKkKlKgKbKcKdKaKTK?K/K+K-K/K-K+K&KKKKKKK"K%K KKKK!K!K!KKKKKKK!K!K K!K K!K*K4K8K:K7K:K=K=K<K<KBKAK<K3K/K(K&K&K'KKKKK KK K K KKKKK(K3K9K;K;K9K7K7K3K5K6K8K6K/K$K KKKKKKKKKKKKKKKKK K,K,K-K*K*K(K)K)K)K*K,K-K+K'K%K(K'K&K&K"KKKK5KEKCKCKDK-KMKWKOKJKRKVKIK<K=K=K=K<K?K+KKKKKKKKKKKK!K"K%K#K!K"K!K$K/K>KHKKKIKHKEKCK?K;K=KBKCK:K-K%K$K%K%K#KK!K%K&K,K,K+K*K)K)K,K1K2K3K5K4K3K0K.K+K/K1K-K+KKK$K$KKKKKKK K +K KKKKKKK K!K!K#K&K)K+K+K+K,K0K2K2K3K2K8K5KaKKKKKK{KyKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzKzKzKzKxKzK{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK}K}KzKzKzKzKzK}K}K~K}KzK|K~K{K{K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K{K{K^KIKJKIKHKFKGKIKHKIKIKHKHKHKHKHKIKJKGKIKEK%KKK/KHe]q(KKKKKKKKKKKKwKbK`KfKiKhKfKfKdKdKeKeKdKeKeKdKdKeKeKeKdKdKeKhKgK_KTKKKGKFKHKFKJKhKKKKKKKKKKKKKKKKKKKKKKKKKKKtKbK[KZK_KKKKKKKKKKKK}KKKKKFK9KfKKKKxKwKuKsKsKtKrKoKkKkKnKjKdKaK^KRK>K.K-K-K.K/K,K$KKKKKKK!K!KKKKKKK!K KKKKK K KK K KK"K+K1K5K8K7K:K<K;K=K=K@KAK=K1K+K)K-K.K*K&K&K'KKKKK K +K K KKK#K-K6K;K9K9K;K9K7K8K5K:K9K.K'K!KKKKKKKKKKKKKKKKK"K)K-K+K+K*K*K+K*K)K(K'K'K(K)K(K$K&K'K#K"KKKKKK,KFKCKCKGK2KCKZKRKKKOKXKOK=K=K=K=K<K?K3KKKKKKKKKKK"K!K"K!KKK%K4KGKPKNKLKJKCKCK?KAK@K=K9K.K'K"K$K&K%K$K"K#K&K+K,K-K*K)K*K,K.K1K7K:K9K5K5K6K8K4K1K1K4K/K$KKKK$K$K$KKKKKK K +K KKKKKKK K!K K#K&K)K*K+K)K,K0K2K3K3K3K6K9K:KKKKKKKwKzKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKyKxKyKzKzKzK{KzKxKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|K}KzKzKzKzKzK}K}K}K|K{K|K~K{K{K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K~KKwKVKIKHKHKGKGKIKHKHKHKHKHKHKIKHKHKIKHKHKIK>KKKK7e]q(KKKKKKKKKKKKKKKpKdKdKgKgKgKfKgKfKdKfKgKeKdKdKdKdKdKdKdKdKgKgKeKeKZKPKHKFKFKDKFKUKzKKKKKKKKKKKKKKKKKKKKKKKKKKK`KXKKKKKKKKKKKKKKKKK7KBKvKtKrKxKtKsKtKsKpKqKqKnKiKiKlKgK^KMK9K/K,K.K/K/K*K"KKKKKK"K#K!KKKKKK K"K!KKKKKKKKKKK"K(K2K6K5K5K4K8K:K9K;KAK@K8K-K,K-K+K,K-K*K'K&K%K&K%KKKKK +K KKKK(K0K4K8K8K7K6K9K=K;K3K(K#K"K$K KKKKKKKKKKKKKKK&K*K*K)K)K*K+K+K)K'K)K'K&K&K&K%K&K(K%K$KKKKKKKKK&KDKCKCKFK9K7K\KTKLKJKUKRK@K>K?K>K<K>K:KKKKKKKKKK$K KK K K)K8KCKNKPKPKLKHK>K@KEKCK>K.K'K!KK K!K!K"KK#K(K,K/K.K*K)K*K-K2K4K6K9K8K9K9K9K8K8K:K7K3K*K KKK K KK#K$K%KKKKKKK +K +K KKKKKKK!K K#K%K&K'K)K(K.K0K0K3K3K2K4K8K8K`KKKKKK|KxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KzKxKxKxKxKyKzKzK{KyKxKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK}K~KzK{K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K~KqKOKIKGKIKHKHKHKHKHKHKHKHKIKIKHKIKHKFKGKHK9KKKe]q(KKKKKKKKKKKKKKKKKKkKcKfKiKhKgKdKdKeKeKeKdKeKeKeKdKdKeKeKeKeKdKfKgKaKWKLKEKGKGKDKFKbKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK|KKKKKKrK|KvKwKxKuKtKsKsKrKqKpKpKlKkKkK_KOK8K*K+K0K.K,K(K%KKKKKK"K%K$KKKKKK K KKKKKKKKKKKK$K*K1K2K2K4K6K5K4K9K;K<K9K4K-K*K-K+K,K+K)K$KKK'K&K&K&K!KKKKK KKKK#K.K3K4K3K9K<K;K3K%KKK!K$K$K KKKKKKKKKKKK"K'K+K(K&K'K(K*K+K*K*K$K K%K&K%K$K&K&K$K"KKKKKKKKKKK!KAKEKCKEKBK/KWKVKMKIKUKVKDK=K?K>K=K=KAK$KKKKKKKK KKK"K.K9K@KDKDKFKIKHKHKFKHKCK1K(K"KK K"K!K"K K"K&K,K/K/K-K-K+K-K2K6K8K:K6K3K7K;K<K;K:K9K7K1K(KKK K KKKKK K$K%K&KKKKKK K K K KKKKKK!K!K$K$K&K(K*K.K/K0K1K3K3K3K4K7K;K:KKKKKKKtKyKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzKxKxKxKxKyKzKzKzKzKyKzKzKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K|K|K|K|K|KzK{K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K~K~KKjKJKGKIKHKHKHKHKHKHKHKHKIKIKHKIKHKGKGKGKHK5KKe]q(K_KnKKKKKKKKKKKKKKKKKK}KeKbKgKgKfKgKgKfKdKeKgKeKdKdKdKdKeKgKgKfKfKhKhKfK[KQKJKCKGKEKBKMKrKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK|KKKKKK}KxKwKzKxKuKqKpKnKnKnKlKmKlKdKNK8K/K-K.K/K.K(K!KKKKKK"K#K!KKKKKKK KKKKKKKKKKKK$K+K3K3K3K3K2K3K6K7K:K>K8K-K)K&K)K-K.K)K&K$KKK#K(K'K&K'K$K&KKKKK K KKK K-K1K8K7K3K-K#KKKKKKKKKKKKKKKKKK'K/K/K+K'K(K)K)K)K(K(K(K)K(K&K#K#K$K&K'K KKKKKKKKKKK K$K(K<KEKCKDKFK2KQK[KQKJKNKYKKK>K>K?K?K=KBK0KKKKKKKKK#K1K:K?K@K@K@K>K>K=KFKRKLK<K#KKKK!K K K K$K&K*K+K-K-K)K,K/K5K>K?K;K6K5K6K8K;K:K7K7K2K(K KKKKKKKKKKKK$K%K&KKKKKKK K +K KKKKKK!K"K$K$K'K*K-K.K.K0K2K3K2K3K5K6K;K8K`KKKKKKKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KzKxKxKxKxKyKzKzKzKzKzKzKzKxKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K~K~K~K{KzKzK{K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KKK}K~K~KaKIKIKIKIKIKHKHKHKHKHKIKHKIKHKHKIKHKFKGKGK0Ke]q(KKtKEKRKzKKKKKKKKKKKKKKKKKKwKgKdKgKhKgKeKdKeKeKfKfKfKfKfKfKfKfKeKeKgKgKhKhKcKWKKKEKEKEK@KDK[KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK|KKKKKKvKyKwKwKvKsKpKnKkKiKlKmKcKLK4K-K/K1K/K,K(K KKKKK"K&K%K!KKKKKK KKKKKKKKKKKK!K'K-K0K.K3K4K4K3K2K4K8K;K3K,K&K$K&K)K,K,K&K KK!K'K,K.K(KK%K%K$K&K$KKKKK KKKK1K2K3K'KKKKKKKKKKKK KKKKKK!K-K2K0K-K+K*K,K*K*K*K'K&K&K'K&K#K$K%K%K#K KKKKKKKKKKK%K*K'K'K&K5KEKEKFKGKBKMK[KUKJKEKTKRK?K?K=K<K>K@K:KKKKKKK%K,K<KDK<K;K;K9K3K2K?K>K?K7K(K#KKKKK!K#K$K'K)K#K$K*K)K+K2K;K=K>K9K;K9K;K=K>K;K8K3K-K%KKK KKKKKKKKKKKK#K'K%K#KKKKKK K K KKKKKKK!K"K#K'K(K+K-K/K0K0K3K3K3K4K6K6K;K<KKKKKKKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKyKxKxKxKxKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K~K|K{K|K|K|K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K~K}K}K~KzKZKIKHKGKHKIKHKIKHKIKHKIKHKIKHKHKIKHKEKGKDK)e]q(KKKK]KRKDKTKKKKKKKKKKKKKKKKKKKqKdKfKhKiKiKgKfKgKgKfKfKfKfKfKeKdKfKfKfKfKgKhKfK^KRKJKEKDKEK?KIKjKKKKKKKKKKKKKKKKKKKKKKKKKKKKKpKKKyKKKtKzKwKuKuKrKnKjKjKiK\KFK4K,K-K1K/K-K(K KKKKK K&K$KKKKKK!K"K KKKKKKKKKKK!K%K.K1K1K0K4K6K4K4K7K6K2K0K&K!K#K&K%K$K(K'K!KK!K)K.K/K/K.K*K%K0K,K'K&K&K KKKKKKKK(KKKKKKKKKKKKK!K"K!KKKK)K4K8K5K5K0K.K,K*K,K*K'K'K"KK K&K&K$K%K$KKKKKKKKKKKK&K)K)K'K%K&K$K-KDKGKFKGKCKJKZKVKNKEKRKWKDK?K<K:K>K>K?KKKKK#K0KBK@K;K9K8K8K9K9K6K8K3K+K"KK"K#K!KKK"K$K)K+K)K)K&K)K2K9K;K=K9K<K:K7K=K>K<K=K:K/K&KKKKKKKKKKKKKKKKKKK&K%K&KKKKKK K +K K KKKKKK!K K#K'K'K*K,K0K0K0K3K3K2K4K6K4K7K8K^KKKKKKKxKyKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K~K|KzK}K~K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KvKTKIKJKIKHKHKHKHKIKHKIKHKIKIKHKIKIKEKFKHKCe]q(K]KKKKgKgK_KEKAKhKKKKKKKKKKKKKKKKKKKiKcKgKiKhKgKfKfKfKfKfKfKfKfKfKfKfKfKfKfKgKhKhKcKYKMKGKHKEKCKBKWKKKKKKKKKKKKKKKKKKKKKKKKKKKoKKK{KKKvKwKvKsKsKrKoKjK[KBK1K.K.K.K.K.K&KKKKKK%K&K"KKKKK!K!K KKKKKKKKKKKKK)K.K0K1K0K2K4K5K4K6K8K2K*K!K K#K%K&K'K&KKKK"K)K-K-K.K.K)K*K4K=KBK=K,K'K&K'KKKKKKKKKK K KKKKKKKK KKKKK%K0K7K>K;K6K3K4K2K.K+K+K)K'K$K$K"KK K#K#K!K KKKKKKKKKK#K'K)K)K(K'K(K(K'K%K+KCKGKFKGKDKGKXKWKRKHKPKYKKK<K>K?K>K>K@K-KKK"K(K2K?K;K3K1K2K6K8K8K.K%KKK!K!K!K!K#K%K(K)K(K)K)K)K,K3K;K?K>K:K3K+K:K?K;K=K9K4K(KKKKKKKKKKKKKKKKKKKKKK$K$K%K%KKKKKK K K KKKKKK K!K$K'K(K)K-K0K0K/K1K3K5K3K5K8K8K;K<KKKKKKKuKzK{KzKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K{KzK{K{K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KK~K}K}K~KK~K}K}K}K|KKpKPKFKGKIKGKGKIKIKIKIKHKHKHKIKHKGKFKFKFKHe]q(KfK\KKKKkKdKmKgKWKQKxKKKKKKKKKKKKKKKKKKKzKfKfKgKhKgKgKfKfKfKfKgKfKfKfKfKgKfKfKfKfKfKhKgKbKVKJKEKDKDK?KGKfKKKKKKKKKKKKKKKKKKKKKKKKkKKzKxKKKrKvKvKsKtKmKXKCK1K-K0K2K1K,K$KKKKK!K%K#K"KKKKKK!K KKKKKKKKKKKK!K*K.K1K2K2K4K3K4K5K6K5K1K*K#K"K!K"K$K%K%K!KKK"K'K)K)K)K)K,K/K6K?KDKEKAKAK7K&K&K&K%KKKKKK KKKKKKKKKKKKKK!K*K2K<K=K:K;K8K7K5K4K-K'K%K'K#K%K$K$K%K!KKKKKKKKKKKKK'K*K*K)K)K)K)K)K(K)K+K+K'K;KGKFKFKGKEKUKZKRKIKHKXKQK@K@K?K?K?K@K9KKK!K(K+K/K3K2K3K7K4K+K#KKK"K K!K!K$K&K'K(K)K(K(K,K4K8K;K>K;K9K;K>KAKAKCKAK8K/K!KKKKKKKKKKKKKKKKKKKKKKKKK%K&K*KKKKKKK K KKKKKKK!K$K&K)K)K,K/K0K/K1K4K6K6K7K8K8K;K;K]KKKKKKKxK{KzKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K~K~K~KK~K}K}K~KKK~K~K~K~K~KKjKKKGKIKGKGKIKHKHKHKHKHKHKIKHKGKGKFKGKFe]q(KgKiK_K|KKKxKaKkKiKiKcK}KKKKKKKKKKKKKKKKKKKKKqKeKfKiKjKiKiKgKfKfKfKfKfKfKfKfKfKfKfKfKfKgKhKeK]KQKEKDKDKCKBKSKzKKKKKKKKKKKKKKKKKKKKKlKKKtKKKoKtKuKlKXK>K-K.K0K1K.K+K#KKKKK K#K$K!KKKKKKK KKKKK K"KKKKKK#K+K.K.K/K/K3K2K3K4K6K0K+K&K!K K#K$K$K%K"KKKK!K$K$K$K#K!K"K-K:K@KFKDKDKBKBKEK@K1K&K&K%K!KKKKKKKKKKKKKKKKKK-K;K@K@K=K<K=K;K:K7K-K'K$K%K%K#K$K#K%K"KKKKKKKKKKKK#K-K,K)K)K(K)K(K*K*K+K/K/K$KKK-KFKEKFKGKFKPKZKTKLKDKUKRKAKAKBKBKBKBKBK%KKK&K)K*K-K/K+K%K KKKK KK K$K%K)K)K'K%K)K0K5K;K>K<K:K>K<K>KAKCKCK=K2K$KKKKKKKKKKKKKKKKKKKKKKKKKKKK*K+K,K%KKKKKK K K KKKKKK!K$K$K(K)K*K-K0K3K3K4K4K4K7K8K8K:K=K<KKKKKKKtKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKKKK}K}K}K}K~KKKKKKKK~K}K`KKKJKIKHKIKHKFKHKIKHKHKHKIKIKGKFKFKFe]q(KiKgKjK`KmKKKK_KhKhKhKaKmKKKuK[KKKKKKKKKKKKKKKKKKKlKeKhKlKkKhKfKfKfKfKfKfKfKfKfKfKfKfKfKfKgKhKgKcKWKLKCKCKEKBKEKbKKKKKKKKKKKKKKKKKK}KrKKKvKKKsKkKWK=K,K-K/K/K/K)K!KKKKK K#K"KKKKKKK KKKKKK!K KK KKKK#K(K.K0K-K0K2K2K3K2K2K/K'K!K K#K!K!K#K$K#K KKK!K#K&K$K#K!K"K)K3K<K@K@KCKCKCKFKGKJKFK:K%K%K%K&KKKKKK K KKKKKKKK$K2K9K=K@K?K>K=K<K>K8K-K&K"K#K$K%K$K#K%K!K!KKKKKKKKKKK'K,K.K+K)K)K)K(K(K)K,K-K'K"KKKKK%KEKFKGKGKAK?KYKVKOKGKPKTKEK@KBKAK@KBKDK/KKK!K%K%K"KKKKKKKK!K%K(K(K&K(K)K,K1K8K?K=K>K=K@KBKBKCKCK>K:K*KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK$K,K*K*KKKKKKK K KKKKKK!K#K$K'K(K*K,K/K1K3K5K4K5K8K8K8K9K=K;K\KKKKKKKxKyKyKyKxKxKxKxKxKxKxKxKxKxKxKxKxKyKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K|K|K|K|K{K{K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K~K}K}K~K~K}K}K}K~KKK~K}K}K}K~K~KKKKKKKK~KKzKZKJKHKIKHKGKFKGKHKHKIKIKHKHKGKGKGKGe]q(KbKiKfKiKcKeKKKKcKgKgKhKdKdKKKKMKHKdKKKKKKKKKKKKKKKKKK~KhKfKgKjKiKfKfKfKfKfKfKfKfKfKfKfKfKfKfKfKhKiKeK^KRKIKCKCKDKBKMKtKKKKKKKKKKKKKKKoKxKK~K|KKuKQK7K1K0K/K/K.K%KKKKKKKKKKKKKKKKKKKKKKKKKKKK%K,K.K*K-K0K.K1K3K4K5K.K&K KK K"K$K$K$K"KKKK!K$K$K$K#K"K&K/K=KEK@K:K:K>K@KDKHKKKHK<K0K.K-K'K&K&K&KKKKK +K KKKKKK'K3K;K>K<K<K;K>K>K?K<K0K&K!K"K$K$K$K$K%K"KKKKKKKKKKK"K(K.K/K+K*K*K+K+K*K+K/K-K&KKKKKKKKK@KFKFKFKAK.KVKXKQKIKNKYKNK?KBKAK>K?KCK7KKKKKKKKKKK!K#K$K&K&K(K'K(K/K7K;KBK@K>K@KCKEKHKIKEKAK/KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK+K,K+K%KKKKKK K K KKKKK!K!K$K%K&K+K*K-K0K3K5K4K7K9K7K8K7K:K@K?KKKKKKKwK{K{KyKxKxKxKxKxKxKxKxKxKxKxKxKxKzK{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K~K~K~K~K~K{KzK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKK}K~KKK}K}K}K}K}K}K}K}K}K}KKKKKKKKKKKK~KuKQKHKIKFKFKFKFKFKHKHKFKFKFKHKIKIKHe]q(KKbKfKhKhKeK_KKKKgKfKhKhKgK^KKKKiKXKKKGKkKKKKKKKKKKKKKKKKKKwKgKfKhKjKiKiKhKgKgKgKgKfKfKfKfKfKfKgKgKgKhKhKdKYKMKGKDKDKCKBK[KKKKKKKKKKKKKiKyK~K|K_KMK<K,K,K-K.K+K#KKKKKKKKKKKKKKKKKKKKKKKKKKK!K'K.K1K0K-K,K/K1K1K1K1K.K'K!K KK K"K"K$K KKK K K!K"K#K"K"K&K/K9K>KDKEK?K:K9K=KCKGKCK8K1K.K.K0K/K*K%K&K(K"KKKK K K KKKK0K8K<K<K>K?K>K>K=KAK;K)K"K"K#K#K#K$K"K!KKKKKKKKKKK$K+K,K-K,K.K-K,K+K+K-K.K)K%KKKKKKKKKKK;KHKFKFKIK,KJK\KSKIKJKYKQK@KBKAKAK?KCK?K%KKKKKKKKK K#K%K$K$K&K,K3K9K<K<K?K?K@KFKJKKKJKFK8K(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK)K.K-K+KKKKKKK K KKKKKK!K$K&K&K*K+K-K0K3K5K5K5K7K8K8K:K9K;K=K[KKKKKKKxK{KzKzKzKyKxKxKxKxKxKxKxKxKxKxKzKzKzKzKyKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K}K{KzK{K|K}K}K}K}K|K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K~K}K}K~K~K}K}K}K}K}K}K~KKKKKKKKKKKKKKKKKnKNKHKGKGKHKGKFKGKGKEKFKHKHKIKHKHe]q(KKKgKdKhKgKhK\KKKKqKbKhKgKhK^KKKKvKiKeKPKEK]KKKKKKKKKKKKKKKKKKKoKdKiKkKkKhKgKgKfKfKfKfKfKfKfKfKfKfKfKfKgKiKiK_KTKKKCKDKCKCKIKQKKKKKKKKKKcKwKkKOK K"K&K)K&K'KKKK KKKKKKKKKKKKKKKKKKKKKKKK K*K1K0K0K0K0K/K1K3K4K/K)K&K KK K K"K#K"KKKKKKK!K!K K K)K0K9K=K<K?K@K@K>K;K@KCK?K4K+K-K0K0K1K.K&K!K%K&K&K$KKKKK K KKKK-K5K6K:K<K?K@KBK:K,K"K#K$K$K$K"K KKKKKKKKKKKK$K+K+K*K,K.K.K.K-K/K/K.K%KKKKKKKKKKKKKK/KHKFKFKJK3K?K^KTKOKFKTKUKDKAKBKBK@KBKDK0KKKKKKKK"K"K"K$K&K+K6K;K=K=K=K<K@KBKHKKKGK@K/KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK!K/K.K/K$KKKKKKK K KKKKK!K$K%K&K*K,K-K0K3K5K5K4K6K8K8K;K:K:K@K>KKKKKKKxK{K{K{K{KzKxKxKxKxKxKxKxKxKxKxKzKzKzKzKxKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|K~K{KzKzK{K}K}K}K}K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KKKKKKKKKKKKKKKKKKfKIKFKHKIKGKFKFKFKGKGKIKHKHKHKIe]q(KKKKpKbKhKgKiK^KuKKK~K`KjKjKlK`KtKKK}KbKlKjKbKYKKKKKKKKKKKKKKKKKKKKKlKdKgKkKhKhKgKiKiKgKfKfKfKfKfKfKfKgKhKhKhKiKeK[KNKGKDKJK?KcKKKKKKKKK_KzKtKWKKKK"KKKKK K KKKKKKKKKKKKKKKKKKKKKK K'K,K1K3K3K1K0K-K0K3K2K)K!KKKK K"K KKKKKK"K!K!K KK"K*K0K5K9K9K9K:K=K=K=K@K@K;K3K.K.K0K1K1K.K'K$K#K#K'K&K&K&K#KKKKK K KKK$K7K8K9K<K:K4K'KKKKK K!KKKKKKKKKKKK$K)K*K*K)K(K)K,K+K+K/K2K)K!KKKKKKKKKKKKKKKK%KIKFKFKIK=K3K]KWKPKGKRKYKJKAKBKBK@KAKEK7K$KK KKKKKK"K'K-K4K9K<K;K;K>KAK?KDKFK?K/KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK+K.K/K)KKKKKKKK KKKKKK"K&K&K)K*K.K/K1K3K3K4K7K8K9K:K8K<KAK>K[KKKKKKKvK{K{KyKxKxKyKzKyKxKzKzKxKyKzKzKzKzKzKzKzKzKzKzKzKzKxKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K}K}K}K}K}K}K|KzK|K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KK~K}K}K}KKK}K~KKKKKKKKKKKKKKKKKKKKKKKKK}K^KFKGKGKGKFKGKGKFKGKGKGKGKGKGe]q(K[KoKKKK`KhKgKiKaKiKKKKbKjKjKjKdKiKKKKaKhKkKkKaKKKKKKKKKKKKKKKKKKKKKK|KgKeKjKlKkKiKfKfKgKgKfKgKgKfKgKiKhKfKfKgKiKiKbKUKHK7KKKKKKKKKK]K{KyK]K%KKKKKKKKKKKKKKKKKKKKKKKKKKKKKK"K.K/K+K0K1K2K3K4K3K.K'K#KK K!KKK!K KKKKK KKK K K#K(K4K6K6K5K7K8K9K;K>K@K@K6K/K,K/K0K0K0K-K$KK!K'K.K.K-K(K%K%K&K!KKKKK KKKK7K?K8K-K!KKKKK KKKKKKKKKKKK K+K1K1K.K)K,K-K,K)K.K2K-K%KKKKKKKKKKKKKKKKKKK-KFKGKFKGKDK.KTKXKPKHKKK[KOKEKDKBKAKAKHK?K)KKKKKKK$K+K.K5K6K8K;K?K?KAKCK=K5K&KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK%K/K.K0K KKKKKKK KKKKKK"K%K&K)K)K-K.K0K3K3K4K7K8K9K:K9K;K=K@K?KKKKKKKxK{K{KyKyKyKyKzKyKyKzKzKyKyK{KzKzKzKzKzKzKzKzKzKzKzKyKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K{KzKzK{K|K~K}K}K}K}K~K}KzK|K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KK~K}K~K~KKK~K~KKKKKKKKKKKKKKKKKKKKKKKKKKzKVKHKFKFKGKFKFKFKFKFKFKFKFKGe]q(KiK`KeKKKKbKfKgKhKeKbKKKKeKeKhKhKgK`KKKKdKeKjKlKcKqKKKrKOKyKKKKKKKKKKKKKKKKKKtKeKhKiKiKiKjKiKhKjKjKgKfKfKfKfKiKiKiKiKkKlKgK\KKKKKKKKKK\KzKwKYK'KKKKKKKKKKKKKKKK K KKKKKKKKKKKK#K)K-K-K,K0K2K3K2K,K%K!K K"K$K"K!KKKKKKK K K KKK%K0K4K6K5K4K6K8K5K8K<K=K:K2K+K+K.K0K1K1K+K%K K#K(K-K1K1K/K.K.K)K&K&K&KKKKKKKKK/K)KKKKKKKKKKKKKKKKKK)K5K9K5K3K/K-K+K*K+K-K0K-K#KKKKKKKKKKKKKKKKKK$K,K1K1KAKHKFKFKHK.KJK[KRKJKFKXKRKCKCKDKDKCKFKDK,K#KKKKK'K'K,K1K3K8K=K>KCK@K4K&KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK+K/K1K)KKKKKKK K KKKK K!K$K(K)K(K+K.K1K3K3K5K4K7K:K:K9K9K;K?K<KYKKKKKKKxK{KzKzK{KzKxKzK{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|K~K{KzK}K~K}K}K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKsKRKGKGKFKGKFKFKFKGKFKFKEKGe]q(KfKiKdK[KKKKdKgKhKhKhK\KKKKjKdKiKhKhK\KKKKjKdKkKjKfKeKKKKTKLKTK}KKKKKKKKKKKKKKKKKKnKfKhKkKlKkKiKhKgKfKhKhKgKiKiKiKiKiKiKmKlKKKKKKKKKKdKyKxKUK*K$KKKKKKKKKKKKK +K K KKKKKKKKKK"K'K&K)K+K,K-K0K0K,K(K$K K!K K K#K#K KKKKKKKKKKK%K/K4K4K4K4K4K5K7K8K6K9K5K,K'K)K-K-K/K0K+K%K"K'K,K/K0K0K.K,K0K:KEKDK*K%K'K&KKKKKKKKKKKKKKKKKKK!K KKKK!K1K;K>K;K8K5K5K2K0K,K-K/K)KKKKKKKKKKKKKKKKKKK)K1K2K0K/K.K<KGKEKFKJK5K>K_KTKNKFKRKVKEKBKDKDKEKFKLK1K#KKKK!K%K(K.K5K5K;K?K6K)KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK KKKKKKKKKKKKKK(K1K/K/K KKKKKKK KKKKK!K#K'K)K)K+K,K0K3K3K4K5K7K9K:K<K:K<K>KBK@KKKKKKKyK{KzKzKzKzKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K{K|K~K{KzK|K}K}K}K|K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K~K~K~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKnKMKGKFKGKGKFKGKFKGKGKGKEe]q(KhKgKgKgK\KKKKlKcKhKgKiK]K{KKKtKcKhKgKiK^K|KKKvKbKkKkKhKbKKKKkKaKSKDK[KKKKKKKKKKKKKKKKKK~KkKgKiKjKkKjKhKjKjKjKiKiKiKiKiKiKlKnKKKKKKKKKKqKwKyKSK+K&KKKKKKKKKKK K KK KKKKKKKKKKK K)K,K*K,K0K-K(K$K"K#K#K$K$K#K"K KKKKKKKKKKK#K-K5K7K5K5K5K4K4K8K:K6K/K)K*K*K)K+K,K)K'K%K%K+K1K1K2K/K-K/K3K=KIKOKOKNK;K$K$K&K#KKKKKKKKKKKKKKKK KK KKK)K6K@KAK@K?K;K;K9K6K4K5K-K"KKKKKKKKKKKKKKKKKK"K,K3K4K1K/K/K0K/K8KEKEKFKIK=K4K]KXKQKHKNKWKKKCKDKDKBKJKMK<K&K&KKKK&K,K4K9K9K.K!KKKKKKKKKKKKKKKKKKKKK KKKKKKKKKKKKKK K!KKKK K!KKKKKKKKKKKKKK#K0K/K/K)KKKKKKKKKKKKK!K&K)K)K+K+K.K2K2K4K7K8K7K;K=K=K=K<K?K@K\KKKKKKKxK{KzKzKzK{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K~K~K~K{KzKzK{K~K|KzK}K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KKKKKKKKKKK~K}K}K~KKKKKKKKKKKKKKKKKKKKKKK~KdKJKHKFKFKFKGKFKHKIKGKGe]q(KaKfKjKiKjK\K|KKKvKbKiKgKhK`KoKKKK`KjKgKiKbKlKKKKcKjKjKkKaKKKKqKjKlK^KTKZKKKKKKKKKKKKKKKKKKKxKhKfKkKlKjKjKjKjKjKiKiKiKjKkKsKKKKKKKKKKrKvKxKMK+K$KKKKKKKKKKKKKK K KKKKKKK#K#KKK&K/K1K-K&K"K!K"K"K"K#K$K#KKKKKKKKKKKK&K.K0K1K4K6K4K6K5K5K6K2K.K)K(K(K)K)K(K&K#K#K&K-K1K1K1K2K-K-K6KCKLKSKPKPKOKPKNK2K$K&K&KKKKKKKKKKKKKKKKKK&K1K>KCK@K>K?K=K=K>K;K:K5K)KKKKKKKKKKKKKKKKKKK(K0K2K4K5K3K2K1K/K0K3K8KEKHKFKGKFK/KXKZKQKIKJKVKNKEKFKDKZKK\KBK+K&KKKK&K+K)K KKKKKKKKKKKKKKKKKKKKKK"K&K,K)KKKKKKK K K K K K K KK K"K K K!K K K K KKKKKKKKKKK,K1K1K1KKKKKKKKKKKKK"K%K(K*K+K+K.K3K3K4K6K9K:K:K:K<K=K;K;KCK@KKKKKKKzK{K{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K{K{K{K|K|K|K}K}K}K|K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KKKK~K~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK}K^KGKHKHKFKGKFKGKGKGKFe]q(KKiKfKiKfKjK^KnKKKKbKhKgKhKdKhKKKKbKfKgKhKdKdKKKKaKjKkKlKbKuKKK}KdKnKmKiKfKKKKKKKKKKKKKKKKKKKKKrKfKhKkKkKjKiKiKiKiKjKmKKKKKKKKKKKrKwKxKGK/K KKKKKKKKKKKKKKKK KKKKKK"K,K#KK"K'K#KKK K#K$K$K%K#KKKKKK KKKKKK'K0K4K1K3K4K5K2K3K4K5K0K)K'K'K)K(K)K%K#K!KK$K,K/K0K/K/K.K2K9KCKNKNKMKOKPKRKNKRKRK?K%K&K#K&KKKKKK K +KKKKKKKK'K4K@KAK>K=K?K?K>K=K<K9K3K(KKKKKKKKKKKKKKKKKK$K-K7K7K4K0K2K2K2K3K5K4K.K%KK=KIKFKFKIK0KJK[KSKLKIKXKSKEKIKhKKK{KCK4K(K&KKKKK K KKKKKKKKKKKKKKKKKK K!K$K)K,K*K'K&K$K KKKKK K!K!K!K!K!K KKK"K!K!K!K!K!K!K!K"K KKKKKKKKK%K0K2K4K)KKKKKKKKKKKK!K%K'K+K+K+K.K2K3K4K7K6K8K:K9K:K=K<K;K@KAK\KKKKKKKxK{K{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK}K~K~K}K}K}K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KKKK~K}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK~KKyKTKGKIKGKFKGKFKFKFKFe]q(KKKtKbKiKgKiKaKcKKKK`KfKjKiKfK`KKKKgKgKkKiKfK_KKKKgKgKkKlKdKhKKKKdKjKkKjK`KKKKeKKKKKKKKKKKKKKKKKKKlKeKhKlKkKjKkKjKlKKKKKKKKKKKqKvKrK@K1KKKKKKKKKKKKKK KKKK KKKKK K/K)KKKKKKK K"K%K$KKKKKKKKKKKK%K/K8K3K.K0K4K4K4K4K/K-K)K%K&K&K)K'K&K%K K K%K(K*K+K+K-K.K4K<KDKHKLKKKJKMKPKPKOKNKDK5K-K'K%K%K'K!KKKK K K KKKKKK,K:K>K>K?K@K<K;K?K<K;K9K/K#KKKKKKKKKKKKKKKKKK(K2K7K6K6K5K4K3K3K4K6K3K,K"KKKK0KHKFKFKJK7K?KaKXKPKIKVKXKSKzKKKKKJK<K*K)KKK K +KKKKKKKKKKKKKKKKKKK!K(K+K,K*K-K<KNKYKbK*KK!K!KKK!K K!K!K!K!K K K!K!K!K!K!K!K!K!K!K!K K K K K KKKK!K/K3K3K/KKKKKKKKKKKK K#K%K(K+K,K.K/K1K4K6K5K7K8K9K:K=K=K>K@KCKAKKKKKKKzK{K{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KK~K}K}K~KKK}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKrKOKFKGKGKFKGKGKFKFe]q(KtKKKK`KhKgKjKgK_KKKKhKgKjKgKhK`KKKKpKfKiKgKjK_KKKKpKdKkKlKeKbKKKKdKiKkKlKcKzKKKiKLKdKKKKKKKKKKKKKKKKKK}KkKhKlKmKnKnKKKKKKKKKKKpKuKpK<K0K KKKK K KKKKKKKKK KKK KKKKKK"K KKKK K K K K KKKK K!KKKKKK"K)K*K,K0K/K*K)K.K1K/K+K)K%K$K(K&K&K$K%K#K"K"K%K*K+K)K(K)K,K6K>KDKDKDKFKIKJKMKOKOKIK?K4K,K)K)K'K&K&K%K&K KKKK K KKKK"K<KCKCK<K=K=K?KAK>K9K4K-KKKKKKKKKKKKKKKKKK#K-K4K5K5K2K4K4K4K5K7K7K3K(KKKKKKK*KIKFKFKIK?K5K^KYKQKHKPKXKxKKKKKKYKFK,K*K'KKKK KKKKKKKKKKKKKKK!K#K)K,K)K*K5KIKaKpKwKxKvKAKK#K KKK!K!K!K!K!K!K!K!K!K!K!K!K!K!K!K!K!K!K!K!K!K!K KKKKK*K4K2K5K%KKKKKKKKKKK K"K%K(K+K+K.K/K1K4K7K9K8K8K9K:K=K=K>K@KBKAK[KKKKKKKxK{K{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K{KzK{K{K{K{KzK|K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KK~K}K~K~KKK}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKmKJKGKGKFKGKFKFKFe]q(K`KfKKKKbKhKgKgKgK]KKKKpKdKlKjKkK]KuKKK|KcKiKiKlK_KwKKK}KcKlKkKjK_KKKKlKfKkKlKcKlKKKKZKRKKKfKKKKKKKKKKKKKKKKKKwKjKkKpKKKKKKKKKKKoKtKjK9K1K KKKKK!KKKKKKKKKK K KKKKKKKKKKKK!KKKKKK#K$K$K$K"KKK$K+K0K,K'K*K*K)K)K)K*K+K&K!K$K$K'K'K$K$K"K"K"K&K(K*K)K(K'K*K1K<KCKCKDKCKCKCKDKHKNKEK5K-K,K-K,K,K)K%KK$K&K$K#KKKKK +K KKK#K;K?K?K=K>K?K>K;K/K$KKKKKKKKKKKKKKKKKK'K0K4K4K4K5K2K1K5K7K9K7K/K KKKKKKKKK)KEKGKFKGKEK/KUKZKSKKKLKWKyKKKKKKoKIK6K*K,KKKKKKKKKKKKKKKKK$K(K,K)K&K.K?KUKkKsKuKtKrKrKtK[K#K!K KKK K!K K K K K K K!K!K!K K#K$K K!K$K"K K!K!K K K!K KKK#K2K3K3K.KKKKKKKKKKKK!K%K&K(K+K*K.K1K1K5K8K:K9K8K;K=K=K>K=K?KFKCKKKKKKKxK{K{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K~K|KzK}K}K~K|KzKzKzK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KK~K}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK~KaKFKFKGKFKGKFKFe]q(KkKeK`KKKKeKeKiKhKkK_KvKKKKbKjKjKkKcKjKKKKcKkKiKlKdKiKKKKaKkKjKlK`KKKKvKeKlKkKhKbKKKKiKmK_KHKUKKKKKKKKKKKKKKKKKKKwKKKKKKKKKK{KoKtK_K4K2KKKKKK K KK KKKKKKKK KKKKKKKKKKKKKKKK!K"K$K"K"K KK'K,K0K0K-K)K'K*K*K(K&K%K#K$K#K#K!K!K#K"K K"K$K'K'K)K)K&K'K+K-K7KAKBKBKBKBKDKFKFKCK<K4K-K*K1K-K*K)K$KKK#K'K%K%K$K KKKKK KKKK4K>K=K?K<K7K,K%K KKKKKKKKKKKKKKKK K+K1K4K3K3K3K4K4K4K6K7K1K&KKKKKKKKK#K"K#K#K>KHKGKGKIK1KIK_KWKPKJKVKeKKKKKKKKK(K.K'KKKKKKKKKKKKK"K'K+K)K%K)K5KNKeKsKvKtKsKqKsKsKsKrKnK0K!K$K#KK K"K"K"K"K"K"K"K!K K"K"K#K$K"K"K$K#K"K!K K"K"K!K KK!KK,K6K3K5K$KKKKKKKKKKKK%K'K(K+K,K/K0K1K4K6K9K:K9K;K<K>K@K>K?KDKBKYKKKKKKKxK{K{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K~K|K{K}K}K}K}K{K{KzK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K~K~KKK~K~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK{KYKEKFKGKFKFKFe]q(KjKiKgK^KKKKnKdKlKiKlKaKiKKKKbKiKiKkKdKbKKKKcKhKjKlKgKaKKKKeKjKkKkKaKpKKKKbKjKkKiK_KKKKjKlKqKhK]KsKKKKKKKKKKKKKKKKKKKKKKKKKKKpKpKrKRK3K3KKKKKKKKKK KKKKKKKKKKKKKKKKKKKKKKKKKKKKK(K/K.K-K-K,K)K(K)K&K#K#K!K!K$K$K!KKKK!K"K&K&K&K&K#K$K(K/K4K6KGKOKCK>K@KCKCK@K;K1K.K/K.K1K/K)KK K K%K.K-K%K%K%K&K%K!KKKK K KKK,K=K<K6K'K!KK K$K!KKKKKKKKKKKK!K)K4K9K6K3K2K3K2K2K5K7K5K-K!KKKKKKKK#K%K%K#K#K(K-K>KHKIKHKJK7K>K`KXKRKIKVKZKKKKKKKKK:K)K,KKKKKKKKKK K$K(K*K&K&K.KEK`KpKvKsKpKsKsKtKtKsKsKsKsKwKGKK%K#KKK#K$K$K$K$K$K$K"K K#K$K$K$K$K$K$K$K$K#K#K$K#K K!K!KKK'K4K4K8K-KKKKKKKKKKKK"K'K)K(K,K-K-K1K4K4K9K:K:K:K=K=K?K?K?KAKCKCKKKKKKKzK|KzKyK{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K}K~K~K~K~K}K}K~K|KzK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKwKSKFKGKGKGKFe]q(KjKiKjKjK\K{KKK{KcKlKiKlKfK`KKKKcKgKiKjKiK_KKKKlKfKkKkKgK^KKKKkKfKjKkKfKeKKKKdKjKlKmKaKKKKtKfKmKpKgKmKKKKKKKKKKKKKKKKKKKKKKKKKKjKqKqKJK2K0KKK KKKKK K!K KKKKKKKKKKKKKKKK KKKKKKKKK!K!KK$K(K*K-K-K-K-K&K#K$K#K K"K KK K KKK K!K#K&K%K%K#K#K'K1K5K4K3K:KHKFK@K>K?K=K3K.K.K0K0K.K+K%KKK"K+K0K-K.K)K$K-K(K#K$K#KKKKKKKK$K*K#KKKKKKKKKKKKKKKKK(K1K<K:K9K8K5K4K3K3K2K2K0K%KKKKKKKKK!K#K"K"K#K(K1K9K>KDKGKHKIKHK=K5K^KYKRKIKSKWKKKKKKKKKdK%K.K%KKKKKKK%K(K)K*K'K+K=KUKhKtKwKtKrKrKrKsKsKsKsKsKsKsKsKwKaK$K#K#K"KK#K$K$K$K$K$K$K#K"K#K$K$K$K"K"K$K$K$K$K%K$K#K#K"K!KKK#K1K5K6K4K"KKKKKKKKKKK K$K'K(K*K+K-K1K4K4K8K:K:K:K<K>K?K?K?K>KAKEKXKKKKKKKyK}K{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K|K}K~K|K{K{K|K}K}K}K}K|K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KK~K}K~KK~K}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKrKLKGKFKFKGe]q(KgKhKjKjKmK_KnKKKKbKjKjKkKgK[KKKKkKfKkKjKlK`KKKKvKdKkKkKlK^KKKKsKdKkKlKgK`KKKKkKjKnKqKeKtKKKKfKhKmKoK{KKKKKKKKKKKKKKKKKKKKKKKKKiKpKnKFK6K0KKK +K +K KKKK K KKKKKKKKKKKKK +K +K K KKKKKKKK#K*K&KK$K#K'K*K,K%K!K K$K#KKK!KKKKKK K"K!K K!K!K%K*K/K3K4K5K5K7K=KAK@K=K7K-K)K-K1K4K5K0K'K#K!K%K-K0K3K.K'K+K5K@KNKEK$K#K$K KKKKKKKKKKKKKKKKKKK KKKKK-K>KKKDK;K8K;K<K8K6K5K4K*KKKKKKKKK"K%K$K#K"K$K,K4K=K@K@K>KAKHKHKIKGKEK/KUK\KTKMKMKXKvKKKKKKKKK(K.K+KKKK#K&K*K+K(K(K8KLKaKrKxKrKqKsKsKsKsKtKsKsKsKsKsKsKsKsKsKtK4KK'K'K!K!K$K$K$K$K$K$K$K$K$K$K$K$K$K$K$K$K$K$K#K$K$K$K"K K!K!K K+K6K5K8K,KKKKKKKKKKK K#K&K)K,K,K/K1K5K4K7K;K7K6K;K?K?K?K?K@KDKFKBKKKKKKK}K~K|KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K}K}K~K|KzKzKzK}K}K}K}K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKK}K~KK~K}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKhKKKIKFKGe]q(KKnKgKkKjKkKeKcKKKKcKjKiKiKlK_KKKKvKbKkKjKlKcKpKKKKdKlKkKlKcKnKKKKcKkKlKmKaKKKKrKeKeKkKhKrKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKdKqKjKBK7K/KKK K KKKKKK#K K"K KKKKKKKK K K K K K KKKKKKKKK#K$KK K K"K&K&K!K"K$K$K#K K KKKKK"K"K"KKK K#K(K0K3K5K5K4K4K6K;K>K:K3K,K)K+K1K4K2K.K&K"K$K)K/K/K.K*K-K,K8KMKUKKKGKNK6K K$K%KKKKKKKKKKKKKKKK K"KKKK&K6KBKNKNKIKFK<K5K?KDK;K.K#KKKKKKKKK#K%K#K!K%K,K1K8K9K:K=K?K?K>K>KFKIKHKHKJK5KLK^KWKOKJKZKbKKKKKKKKK5K0K/K'K%K*K+K)K'K.KCKZKnKvKsKrKrKpKqKtKsKsKsKsKsKsKsKsKsKsKsKtKrKvKMK!K'K&K"KK#K&K%K%K%K%K%K%K%K%K%K%K%K$K#K$K%K%K%K%K%K#K#K#K!K K K#K2K6K9K3K KKKKKKKKKKK#K&K)K+K,K/K.K4K5K7K8K8K9K<K>K?K?K?KAKCKEKEKYKKKKKKKxK|K}K{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|K}K}K}K{K{K{KzKzKzKzK{K{K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK`KKKGKIe]q(KKKzKcKkKjKkKhK^KKKKiKeKkKhKlKaKmKKKKaKkKiKkKdKeKKKKcKjKkKmKeKcKKKKgKhKgKiK^KxKKKKKKKKKKKKKKKKKKKKmKKKKKKKKKKKKKKKKKKVKeKmKfK>K7K*KKK K KKKKK"K K K!K"KKKKKKKKK K +K +K K KKKKK K"K#K K!K%K KKK#K&K&K"K#K$K$K"K KKK K K!K"K KKKK&K+K/K4K6K4K4K5K6K7K4K*K#K!K#K)K0K0K(K"K K%K,K0K0K/K-K,K3K?KEKNKYKVKHK<KFKMK/K#K$K%KKKKKKKKKKKKKKKKK"K-K?KJKMKLKHKIKIKFKBK>K=K/K KKKKKKKK!K$K"K!K"K&K.K7K=K>K>K;K<K?K>K>K@KBKEKHKHKIKIKJKOK_K[KSKMKXKZKKKKKKKKKTK(K3K.K.K)K-K;KTKjKsKuKrKpKpKsKsKqKrKsKsKsKsKsKsKsKsKsKsKsKsKtKsKtKgK,K%K%K#KK#K&K&K&K&K&K&K&K&K&K&K&K'K&K$K%K&K&K&K&K%K$K$K$K"K!K!K K-K8K7K8K*KKKKKKKKKKK K&K(K+K,K/K.K3K4K6K8K9K:K=K=K>K?K?KAKAKAKGKDKKKKKKK}K|K~K{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|K~K}K}K{K{K{K{K{K{K{K{K{K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K~K~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK}KZKHKGe]q(KrKKKKcKjKjKkKjKZKKKKvKfKlKhKkK`KbKKKKdKhKiKkKiK`KKKKlKgKhKiKdK[KKKKKKKKKKKKKKKKKKKKKvKfKhKhKaKxKKKKKKKKKKKKKKKKKK}KXKNKhKlKaK:K5K)KKK K KKKKKKKK K!K"KKKKKKKKK +K +K KKKKKK#K%K%K&K%K$K#K!K"K(K'K$K$K#K"K KKK!K"K KKKKKK"K$K'K,K/K2K4K3K5K3K.K'K KK KKK K$K KK$K*K0K0K0K.K.K6KEKPKUKSKPKSKTKVKOKMKIK/K"K$K$K#KKKKK K K KKKKKKK-K:KIKPKOKKKJKFKAKJKMKEK5K%KKKKKKKKK K K"K!K"K*K/K7KAK>K?KBK@K?K?K@K@K@K8K'K/KJKHKIKHKMKNK_K\KTKNKVKXKKKKKKKKKK&K1K0K8KNKdKrKwKuKsKrKpKrKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsK:K"K'K)K$K#K%K%K%K%K%K'K%K&K(K&K%K&K&K%K&K&K%K%K%K%K&K%K#K$K#K$K#K)K8K8K8K2KKKKKKKKKKKK$K&K)K,K/K0K/K2K5K9K9K:K=K=K<K>K?KAKAK@KEKFKWKKKKKKKzKK{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|K~K{K{K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KKKKKK}K~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK~KKxKPKFe]q(KeKgKKKKdKiKkKjKlK^KsKKKKeKlKjKkKiK\KKKKkKhKjKjKkK[KKKKwKwKKKKKKKKKKKKKKKKK{KpKlKfK^K~KKK{KhKoKpKgKnKKKKKKKKKKKKKKKKpK[KOKPKeKiK\K8K5K(KKKKKKKKKKKKK K!K!KKKKKKKKK +K K KKKKK%K)K(K%K"K#K%K(K)K(K#K!K!KKK!K%K#K!K KKKK!K'K(K(K'K'K+K0K3K2K/K*K%K!KK K K"KKKK K&K,K+K-K-K-K3K<KFKKKMKMKRKQKUKXKWKNK>K3K/K+K(K$K"K%KKKKK K +K KKKKK-K@KMKQKQKLKIKIKLKIKHK>K1K KKKKKKKKK!K!K"K#K'K-K6K;K>K?K@KAKAKAK=K?K@K;K.KKKK%KGKIKIKHKIKLKYK]KUKOKSKWKmKKKKKKKKK4KIK_KnKwKuKsKsKsKsKsKqKrKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKrKwKTK"K'K+K(K!K'K'K'K'K%K'K'K%K"K&K'K&K&K'K&K&K&K'K'K&K&K%K$K$K%K#K"K!K1K9K7K8K(KKKKKKKKKKK$K&K(K+K-K/K/K2K5K8K9K:K=K=K<K>K?KAKBKAKEKJKFKKKKKKKK}K|KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|K~K{K{K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K~K}K}K}K}K}K}K~KKKKKK~K~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKpKMe]q(KkKgK`KKKKkKfKkKiKmKeKiKKKKcKiKkKkKkKZKKKKtKfKoKxKKKKKKKKKKKKKKKKKKuKlK^KKKKlKjKnKoKdKnKKKKfKoKqKgKKKKKKKKKKKKKKKjK^KWKOKOKcKhKXK6K6K&KKKKKKKKKKKKKK"K!K KKKKKKKKK K KKKKK$K'K#K#K#K'K&K&K%KKKK!K!K!K$K$K!KKKK#K)K+K)K)K(K)K)K'K*K+K(K#K K K!K!KKKKK#K&K*K,K-K+K-K1K=KGKMKNKOKKKNKSKUKSKIK:K0K/K,K-K*K%K#K$K#K"KKKKK K +K KK"KCKGKMKLKKKJKLKMKIKBK4K(KKKKKKKKKKK K"K$K*K1K8K<K@KAKCKBKAKAKDKDK=K4K%KKKKKK!K@KJKHKHKHKIKSK^KWKQKPKYK\KKKKKKKKKvKuKwKtKsKsKsKsKsKsKsKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKuKjK,K'K)K(K"K%K)K)K)K&K&K)K'K%K(K)K&K&K)K'K%K(K)K)K(K%K&K&K&K&K$K!K!K+K7K6K9K1KKKKKKKKKKK!K%K&K+K*K,K/K2K4K5K:K:K=K=K<K>K?KAKBKAKBKGKIKWKKKKKKK{K|KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|K~K{KzK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKK}K}K}K}K}K}K}K}K~KKKKKKKKK~K}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKge]q(KkKkKiK_KKKKtKeKlKjKnKiK`KKKKiKdKeKiKrKkKKKKKKKKKKKKKKKKK|KnKlKKKKaKlKmKmK`KKKKtKfKlKnKhKeKKKKgKqKkKKKKKKKKKKKKK~KaK^KZKSKLKQKfKgKTK6K5K!KKKKKKKKKKKKKKK!KK"KKKKKKKK K +KKKKKKK!K%K&K'K%KKKK!K%K$K"K!K!KKKK#K&K+K)K(K*K*K*K(K&K$K#K"K#K!KKKKKK!K#K%K)K(K&K)K/K4K;KCKEKGKLKLKMKLKQKQKCK5K.K-K-K1K.K'K!KKK$K"K#K"KKKKK KKK KCKGKLKJKIKLKHK=K/K!KKKKKKKK K!K"K K K$K+K5K9K;K>K<K@KBKAKBKDKEK;K(KKKKKKKKKK6KIKHKHKIKIKPK]KWKQKOKXKWKKKKKKKKKKnKuKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKuKsKsKsKsKuK@K%K*K*K&K#K*K*K*K(K'K'K'K'K(K)K(K(K)K(K'K(K)K)K(K'K'K(K&K&K&K%K#K%K5K8K8K;K#KKKKKKKKKKK"K%K*K,K.K/K2K4K5K7K:K=K=K=K?K?KAKBK@KAKAKHKEKKKKKKKK|K}K|K|KzKzKzKzKzKzKzKzKzKzKzKzKzK{K|K}K}K|K|K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K~K}K}K}K}K}K}K~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(KmKkKjKmK`KwKKKKfKjKfKhKgK[KKKKKKKKKKKKKKKKKKKpKKKKaKhKlKmKiKdKKKKfKkKmKmK`KrKKKKeKmKmKjKbKKKKpKfKKKKKKKKKKaKUKTKNK@KZKXKSKKKRKfKfKOK5K4KKKKKKKKKKKKKKKKK!K K!KKKKKKKK KKKKKKK"K#K"KKKK!K%K$K%K"KKKKKK K$K'K)K)K*K,K*K(K%K KK K K"K KKKK!K&K&K'K'K#K&K*K1K9K=K=K>K=KDKHKIKIKFK>K3K.K.K1K2K.K'K!KKK K'K&K"K#K$KKKKKKKKK6KIKKKHKDK7K(KKKKKKKKK!K!K K#K$K)K/K5K;K=K<K@KAK=K>KAK>K<K2K#KKKKKKKKKKKK.KIKHKIKHKKKMKYK[KUKOKVKYK}KKKKKKKKKoKwKuKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKuKuKsKsKsKtKwKXK&K+K*K&K%K*K+K+K)K)K)K)K)K)K)K)K)K)K)K)K)K)K)K)K)K)K)K'K%K&K%K$K$K0K9K7K;K-KKKKKKKKKKK K$K)K-K0K/K2K4K5K5K9K=K=K:K>K?KAKBKBKDKEKGKGKYKKKKKKKyKK~K|KzKzKzKzKzKzKzKzKzKzKzKzKzK{K~K}K}K~K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(KfKlKlKjKjK_KhKKKKKKKKKKKKKKKKKKKKKKKdKfKhKiKhKaKKKKoKjKmKmKkK\KKKKkKiKmKmKeKfKKKKeKlKmKmKbKKKKnKKKKKKKKKKZKUKVKPKBKWKTKQKGKTKdKdKIK6K7KKKKKKKKKKKKKKKK+K'KK"K KKKKKKKKKKKKKKKKKKK$K&K'K%K!KKKK"KKK K!K'K(K(K$K$K!K K!K KKKKKKK$K%K'K'KK K'K(K-K8K;K;K9K<KAKBKCKDK?K6K0K-K.K1K1K.K(K"KKK&K-K,K,K(K#K!K"K!KKKKK KKK0KIK=K/K"KKKKKKKK K"K KKK#K+K2K8K=K>K?K=K>K>KAKBK?K8K(KKKKKKKKKKKKKKK)KHKIKHKFKJKBKTK\KWKPKTK[KhKKKKKKKKKuKvKuKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKuKtKsKtKtKsKsKsKsKuKlK2K(K*K+K&K'K+K+K)K)K)K*K+K+K*K*K*K*K*K*K)K)K)K)K)K)K)K)K'K%K&K&K#K*K9K9K9K8K#KKKKKKKKKKK$K(K+K-K1K3K3K3K7K8K;K:K;K?K?K@KCKBKBKDKDKHKGKKKKKKKK{K~K|KzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K|K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KK~K}KKKKKK~K~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(KKlKoKxKKKKKKKKKKKKKKKKKKsKoKkKgKaK]KKKKlKiKlKlKiK_KKKKvKgKmKjKkK`KKKKrKfKlKkKiKaKKKKgKiKmKnKdKwKKKKKKKKKKKKYKQKQKHK@KXKSKPKFKUKcKbKGK9K2KKKKK KKKKKKKKK#K/K;K=K&KK K KKKKKKKKKKKKK KKKKKK%K#K!K!K$K'K'K&K"KKK"K#K$K!KKKKK K#KKKKK K$K%K&K&K&K"K#K)K1K6K8K9K:K:K<K@K>K6K1K-K/K0K0K-K)K%K KK$K)K.K.K.K*K)K/K:K(K!K#K!KKKKKKKK#KKKKKKKKKKKK K"K&K+K7K=KAK@K@K?K<K=K@K@K:K-K KKKKKKKKKKKKKKKKK"KAKJKHKFKMK5KFK`KWKPKNKZK\KKKKKKKKKKrKuKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKuKtKsKsKsKtKtKtKtKsKvKGK&K,K+K)K$K+K+K,K,K,K+K+K*K+K+K+K+K+K+K)K(K)K)K)K)K)K*K'K&K'K&K&K%K4K:K9K;K,KKKKKKKKKKK!K%K(K,K0K2K3K3K7K7K:K9K<K?K>K@KCKAKBKDKDKHKLKYKKKKKKKyKK|KzKzKzK{K{K{K{KzKzKzKzKzK{K{KzK|K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KK~K~KKKKKK~K~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(KKKKKKKKKKKKKKKKwKlKdKfKKKKeKjKnKmKlK\KKKKyKfKkKjKlK_KsKKKKdKmKlKnKbKwKKKKcKmKlKjKbKKKKpKhKmKmKgKlKKKKKKKKKKxKVKPKPKEKBKYKRKNKGKXKaK^KCK7K0KKKKK KKKKKKKKK)K5K?KIKAK&K!K!K KKKKKKKKK K +K KKKKKK K#K,K0K.K*K)K)K&K&K&K$K!KKKKKK K KKKKK K#K$K$K#K"K"K$K%K&K'K.K5K9K:K<K<K7K4K.K+K/K1K2K.K&K"KK K&K*K1K1K/K,K-K6KDKOKRK>K!K$K$K KKKKKKKKKKKKKKKKK K$K+K1K6K:KDKEKDKBK>K>K?K9K2K#KKKKKKKKKKKKKKKKKKKK#K=KJKHKHKKK:K:K`KYKRKHKQKWKKKKKKKKKKnKuKvKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKuKuKvKtKrKwK[K,K-K*K+K&K*K+K+K+K+K+K*K+K&K(K)K+K+K+K*K*K+K+K+K*K(K)K)K)K(K%K%K#K*K8K:K:K6K KKKKKKKKKK K$K'K*K.K0K2K3K3K6K:K;K=K>K?KAKAK@KDKDKDKGKLKFKKKKKKKK}K}KzKzKzK}K}K~K|KzKzKzKzK{K}K|KzKzKzK}K}K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(KKKKKKKKKKlK|KKKxKbKjKjKlKhK^KKKKiKiKmKlKkK]KpKKKKcKlKkKkKeKiKKKKdKkKlKnKfKhKKKKcKkKkKmKaKzKKKzKfKmKoKhKvKKKKKKKKKqKSKQKPKKKKKVKRKMKKK]K`K\K=K7K0KKKKKKKKKKKKK'K1K;KGKKKJK.K K!K!KKKKKKKKKKK KKKKKKK2KAK:K0K)K(K*K%K$K%K"KKKKKKKKKKK!K"K K"K#K"K#K%K%K$K%K&K'K+K0K6K8K3K,K)K,K-K.K/K1K)K KK$K*K.K0K-K-K-K3K=KGKPKPKMKMKJK,K"K#K#KKKKK K KKKKKKKKK"K'K,K4K:K?KCKEKDKDKDKCK?K4K'KKKKKKKKKKKKKKKKKKK K!K$K'K'K3KJKHKHKJKDK0KZKZKSKMKKKZKxKKKKKKK�KKsKvKvKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKtKtKtKsKsKuKuKuKtKsKtKoK9K)K,K+K(K(K+K,K+K*K+K,K+K(K)K*K+K*K)K+K,K+K+K+K*K)K)K)K)K(K'K$K!K$K5K:K:K<K)KKKKKKKKKKK#K'K*K.K/K0K1K3K6K:K;K=K=K?KAKAKAKBKCKCKFKHKIKZKKKKKKKyK~K{K{KzK}K}K|K|KzK{K{KzK{K}K|KzKzKzK}K}K|K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(KKKKKKcKgKjKhKmKaKrKKKKeKjKjKkKkK\KKKKsKfKkKiKnKaKbKKKKdKlKlKkKhKaKKKKgKiKmKmKhKaKKKKfKhKmKnKbKjKKKKdKpKjKKKKKKKKKKhKPKPKRKMKOKTKPKMKPK\K]KZK;K8K/KKKKKK +K KKKKKK-K9KAK@K7K%KKK!K K!K KKKKKKK K K +K KKKKKK5KBKCK:K.K)K&K"KKKKKKKKKKKK!K"K!K K K"K$K#K#K)K)K(K'K%K(K(K)K-K)K%K'K.K/K,K&K!K K"K%K*K1K0K,K-K0K3K?KKKOKPKJKJKMKKKHK?K%K!K K"KKKKK KKKKKK!K#K'K0K7K8KBKEKEKGKGKEKEK;K0K KKKKKKKKKKKKKKKKKK K!K&K(K)K)K'K/KDKLKIKIKHKJK/KPK^KVKOKLKYKdKKKKKKKKK|KtKvKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKvKuKvKtKsKsKsKsKsKsKsKwKKK)K/K-K+K&K*K-K.K-K.K-K*K+K+K+K*K*K*K*K+K+K+K+K+K+K)K(K)K(K)K&K&K&K-K;K9K:K5KKKKKKKKKKK!K%K'K,K-K1K3K3K6K9K<K=K=K?KBKBKBK@KAKCKDKGKMKIKKKKKKKK}K~K|KzK}K}KzKzKzK|K~K{KzK~K|KzKzKzK}K}KzK{K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKK}K~KK~K}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(KKKyKKKKhKlKlKlKlKcKdKKKKdKhKkKjKmKbKwKKKKcKlKkKmKfK_KKKKjKiKlKmKlK_KKKKpKgKlKlKlK^KKKKmKhKnKmKgKbKKKKiKgKKKKKKKKKKeKOKOKNKKKRKTKPKLKRKZK^KWK8K:K.KKKKKKK KKKKK'K4K8K0KKKKKKK"K!K"KKKKKKKK K +K KKKKKK-K@KIK<K)K KKKKK KKKKKKK K K!KK K!K"K#K%K%K&K(K)K+K(K%K)K%K%K&K)K+K)K)K%KKK$K+K0K1K2K1K.K/K8KFKOKPKKKKKOKKKLKEK>K4K*K&K!K#K#KKKKKKKKKK!K,K4K7K=KEKCKDKFKGKGKBK7K'KKKKKKKKKKKKKKKKKKK K"K#K&K'K&K*K:KRKhKqKQKGKIKHKKK3KDKbKZKSKJKVK\KKKKKKKKKKqKwKuKuKuKtKsKsKsKsKsKsKsKsKtKuKtKsKsKsKtKuKuKuKuKuKtKuKuKuKuKuKtKwKbK,K-K.K*K%K*K.K.K.K0K,K+K-K,K,K,K-K.K+K*K+K+K+K+K+K*K*K*K*K)K(K)K'K'K8K:K;K=K'KKKKKKKKKK K#K%K)K.K/K1K2K5K9K;K;K<K@KAKAKBKBKCKCKDKFKEKHKZKKKKKKK{K~K|K|K}K}KzKzKzK{K|K|K|K{K|K|K|K|K}K}K|K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KK~K}K}K}K}K~KKKKKKKK~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(KKKKrKKKKkKiKmKjKkKgK`KKKKlKfKkKjKlKbKkKKKKcKlKlKjKlK]KKKKoKgKmKlKmK]K|KKK}KeKmKmKlK_KKKKwKfKmKmKiK[KKKKgKKKKKKKqKlKvKdKKKOKLKJKRKTKOKLKRKZK`KUK8K7K.KKKKKKK +KKKKK*K'KKKKKKKKKK#K K"KKKKKKKK KK KKKKK.K3K/K&K!KKKK#K!KKKK!K"K!K"KKKKK!K K#K&K&K%K&K)K(K'K$K"K$K&K&K'K)K'K$KKK#K*K0K1K0K,K.K5K?KHKPKMKGKJKGK@KBKEK>K2K*K'K%KKK%K!K!KKKKKKKKK(K1K=KCKEKCKAKBKCK<K.KKKKKKKKKKKKKKKKKKKK!K&K)K*K%K'K0KHK`KpKuKrKrKXKFKIKHKKK>K:KaK[KSKJKRKWKKKKKKKKKKpKwKvKuKvKuKsKsKsKsKsKsKsKsKtKvKtKsKsKtKuKvKuKuKuKuKvKuKuKvKuKuKuKuKrK:K+K-K+K'K(K.K-K-K/K-K-K.K.K.K.K.K.K+K*K+K+K+K+K+K+K+K+K*K)K)K)K'K$K2K<K=K=K2KKKKKKKKKKKK%K'K*K-K0K0K4K6K7K9K<K@K>K>KBKBKDKCKDKDKBKKKHKKKKKKKK|K}K~K}K}KzKzKzKzKzK}K}KzK{K~K~K~K}K}K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KK~K}K}K}K}K~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(KKKKKpKKKKwKeKmKkKmKmK_KKKKuKeKkKkKnKgKaKKKKhKjKmKkKkK_K{KKKzKeKmKjKlKcKlKKKKbKkKjKlKbKkKKKKbKkKjKmK]KKKKKKnKkKmKkKoKuKxK^KKKNKJKIKPKRKNKKKQK[K^KQK9K8K1KKKKKK +K KKKKKKKKKKKKKKKK!K!K"K"KKKKKKKKK KKKKKKKK K$K!K#K#K!KK!K%K%K$K#K!KKKKK!K!K K#K&K%K'K'K$K$K$K$K%K$K%K%K$K KK K%K,K.K.K.K,K.K4KAKKKOKIKIKHKEKMKHKCK:K3K,K'K,K'K KKK K#K$K#KKKKKKKK'K1K=KDKEKBK;K1K#KKKKKKKKKKKKKKKKKKKK!K%K)K'K'K-K=KWKhKsKtKqKpKpKrK`KFKIKHKJKCK1KYK]KWKMKMKYKnKKKKKKKKKvKvKvKuKtKtKsKsKsKsKsKsKsKsKsKtKsKsKsKsKtKtKtKtKtKtKtKuKuKsKtKuKuKuKwKQK*K/K0K.K'K,K/K/K/K-K-K.K.K.K.K.K.K-K-K-K-K-K,K*K+K+K+K*K(K)K)K)K'K+K;K=K=K:K#KKKKKKKKKKK#K&K(K-K1K1K4K4K7K8K9K>K>K?KBKBKBKCKDKDKGKJKKKYKKKKKKK{KK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KKKKKK}K}K}K~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(KKKKKKsK|KKKKfKmKlKlKnK`KxKKKKdKlKkKjKjK_KKKKnKhKlKiKnKaKkKKKKeKkKkKmKdK_KKKKeKiKjKmKeK^KKKKiKiKjKmK`KhKKeK\KgKrKqKkKoKrKwKYKKKNKIK=KPKQKNKDKJK]K]KKK8K8K0KKKKKK KKKKKKKKKKKKKKKKKKK K!K KKKKKKKK KKKKKKKK#K$KKKK#K$K KK#K"KKK"K$K#K"K!K!K!K#K%K%K%K$K#K#K#K%K'K$K!KKK!K)K,K+K,K-K-K-K.K7KCKFKEKCKEKGKJKDK;K7K/K,K,K.K)K$KKK!KKK#K#K KKKKKKKK K6K@KAK7K&KKKKKKKKKKKKKKKKKKKKK%K&K(K&K(K3KMKdKrKsKqKnKpKpKpKpKqKkKKKHKIKIKJK/KOK`KXKPKKKZK_KKKKKKKKKKrKvKuKtKsKsKsKsKsKsKsKsKsKsKsKsKsKtKtKsKsKtKtKtKsKtKuKuKsKtKuKuKuKuKgK2K.K0K/K)K*K/K0K/K.K.K.K.K-K-K.K.K.K.K.K.K.K,K+K+K+K+K+K)K)K)K)K*K(K5K>K=K=K.KKKKKKKKKKK"K&K)K-K0K1K3K4K7K7K9K=K?K?KAKAKAKCKCKDKGKGKLKIKKKKKKKK}K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KKKKKK~K~K}K~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(KKKKKKKtKlKKKKfKlKlKiKlKcKjKKKKdKjKlKkKkK\KKKKyKeKmKiKmKdKcKKKKfKjKlKmKhK\KKKKjKgKjKlKgKZKKKvKgKjKiKnKcKIKaKbKgKpKoKjKnKsKvKSKJKNKGK;KSKQKOKDKKK\K]KJK8K:K/KKKKKKKKKKKKKKKKKKKKKKKK*K*K K!K!KKKKKKKKKKKKKKKKKKK$K&K%KKK#K&K$K(K(K%K&K%K"K!K!K"K!K K#K'K&K'K&K#KKK!K"K(K*K*K)K)K)K(K-K2K7K9K<K<KCKIKHKDK9K2K-K/K,K,K(K$K!K K K!KKK K%K"K K!KKKKKKKK3K2K KKKKKKKKKKKKKKKKKKKK#K$K)K'K&K0KCKYKnKuKsKpKoKpKqKpKpKpKpKpKoKQKGKIKHKKK5KCK_KZKSKJKVKYKKKKKKKKKKpKwKuKuKuKsKsKsKsKsKsKsKsKsKsKsKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKsKsKAK-K0K1K,K(K/K0K/K-K/K0K.K,K+K.K-K-K-K-K.K.K.K.K.K.K.K.K.K+K(K(K'K%K/K?K=K=K9K#KKKKKKKKKKK$K*K+K.K1K4K4K6K7K9K:K?K?K?K>KAKDKDKDKDKHKKKLK\KKKKKKKyK~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(KKKKKKKKxKaKKKKhKjKkKjKnKfK`KKKKhKkKlKjKlK`KqKKKKeKlKiKmKjK[KKKKlKgKlKkKlK[KKKKmKhKkKlKlKYKOKbKjKiKkKkKmK\KcK`KiKqKpKlKoKtKrKPKJKNKFK@KUKPKOKDKMK\K^KGK8K9K.K"KKKKKKKKKKKKKKKKKKKK K2KCKNKDK(KK#K!KKKKKKKKKKKKK K KKKK"K"K!K$K-K8K6K-K+K+K'K&K%K!KK KK K!K"K&K&K"KKK$K*K,K*K)K(K&K%K&K'K-K0K3K5K6K9K<K?K;K6K0K/K/K.K,K'K"K!KK K K K"K&K.K5K6K'K!K"K"KKKKKKKKK KKKKKKKKKKKKKKKKKK'K)K(K(K*K7KNKfKsKtKqKpKpKpKpKpKpKpKpKpKpKpKqKWKFKJKIKMK=K8K_K[KUKMKQKZKKKKKKKKKKrKwKuKuKuKtKtKsKsKsKtKtKsKsKsKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKsKxKVK-K0K3K1K(K.K0K/K.K/K0K.K.K/K0K.K.K.K.K.K.K.K.K.K.K.K-K.K,K)K(K"K&K'K7K>K=K=K-KKKKKKKKKKK K%K)K-K0K3K5K5K6K9K:K>K>K?K?KAKCKCKCKDKGKHKLKKKKKKKKKK}K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K~K~K~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(KKKKKKKKKyK]KKKKqKgKlKkKmKhK^KKKKqKgKmKlKnKeKeKKKKeKlKlKkKjKZKKKKyKhKiKkKlK[KfKKoKiKlKiKkKkK_KLK[KiKjKjKkK[KeK_KdKhKiKgKkKtKoKNKKKNKDKCKVKPKNKBKRK]K]KEK8K6K,K&KKKKKKKKKKKKKKKKKK"K9KMKSKRKNKLKCK&KKKKKKKKKKKK K +K K KKKKKK#K-K:KAK@K8K0K-K,K'K"K K K K K!K!K!KKKKK&K*K,K,K,K+K&K!K"K#K%K*K-K/K2K5K5K5K3K1K.K/K2K3K*K$K$K!K K"K#K"K%K,K3K;KAKCKEK:K&K"K$K"KKKK KKK;KKKKKKKKKKKKKKKK#K(K'K'K'K0KEK[KmKtKrKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKaKFKLKLKJKEK0KXK\KUKMKMK[KjKKKKKKKKKKtKvKuKuKvKuKsKsKsKuKuKsKsKsKtKvKuKuKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKwKkK6K1K4K1K*K,K0K/K0K0K0K0K0K0K0K0K0K0K0K/K-K.K-K.K.K-K-K.K,K+K)K(K)K'K/K>K?K?K8K!KKKKKKKKKKK#K(K,K.K1K6K3K4K9K:K<K=K>K@KBKAKBKCKDKFKGKLKMK]KKKKKKKzKK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(KKKKKKKKKKoK^KKKKKhKmKkKmKmK_KKKK~KfKlKjKkKhK]KKKKhKgKkKiKnK]KnKKKhKiKjKiKlKaKEKSKdKhKiKiKiKkKdKPKYKhKjKiKZKbKjKKKKKKKjKKKKKLK?KGKVKPKLKCKVK\K\KCK9K6K)K)KKKKKKKKKKKKKKKK&K:KOKVKTKUKSKSKPKGK/K!K K"KKKKKKKKKK +K KKKKKK$K/K5K;K=KAK=K5K.K&K"K K K"K"K$K!KKKK K(K*K)K,K,K(K#K$K"K"K%K%K&K(K*K+K/K1K.K0K2K4K5K/K)K%K KK K!KK"K*K/K5K<KAK?K@KDKCKEK7K$K$K%KKKKKKKVKKKKKKKKKKKKK"K%K*K)K'K+K9KPKhKsKsKpKqKqKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKjKJKJKJKHKJK.KNK_KWKOKKKZK\KKKKKKKKKKpKwKuKuKuKuKtKuKuKtKtKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKtKEK0K5K0K,K+K/K1K1K0K/K0K0K0K0K0K0K0K0K/K/K/K/K.K-K-K.K.K,K*K+K+K)K(K+K<K?K?K?K*KKKKKKKKKKK"K&K*K-K0K3K4K4K9K:K<K=K>KAKAKBKBKEKEKCKFKJKNKJKKKKKKKK|K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KK~K}K}K~KKKK~K~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(KKKKKKKKKKiKjKbKuKKKKcKmKlKjKlKbKoKKKKeKlKiKkKmK[KKKKvKgKjKjKlKaKSK_KeKgKjKjKjKjKiKPKNKbKhKiKjKiKlKgKQKSKgKdKfKKKKKKKKK_KLKLKMK>KJKVKQKLKDKXK\KWK>K9K6K'K)KKKKKKKKKKKKKKK-KEKMKSKVKYKYKXKHK3K"K K KK!K KKKKKKKK K +K +K KKKKK'K+K0K8K?K=K5K*K!KK K!K!K!K!KKKK K&K%K(K(K(K$K#K KK#K$K$K%K&K%K%K)K-K,K.K1K3K4K.K'K"K K K K K"K%K*K1K7K;K<K@K<K=KCKCK?K5K%K K$K$K#KKKKK +KWK*KKKKKKKKK K#K(K)K(K*K4KJKaKoKrKqKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKnKOKHKHKHKKK5KCKbKZKUKKKUKXKKKKKKKKKKoKwKvKvKuKuKuKuKvKtKsKsKtKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKsKwK[K/K4K0K/K,K/K0K/K/K/K0K0K0K0K0K0K0K0K0K0K0K0K.K.K.K.K.K.K.K,K*K)K)K*K7K@K>K@K5K"KKKKKKKKKK!K%K)K+K/K1K4K5K7K6K9K9K>K@K<KAKBKCKBKCKEKDKGKMK^KKKKKKKzKK~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KK~K}K}K~KKKK~K}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(KAKgKKKvKhK^KWKSKSKZKlK^KeKKKKhKkKlKkKnKdKcKKKKgKkKkKjKmKZKrKKKcKlKiKiKmKeKJKIK_KgKiKjKiKjKkKTKKK_KjKfKgKkKlKjKWKTKKKKKKKKKKK_KMKNKMK<KMKUKOKIKBKYK\KUK>K:K5K)K'KKKKKKKKKKKKKKK7KLKQKVK^K]KHK3KKKKKKK K K!KKKKKKKK K +K KKKKK$K+K2K6K3K'KK"K!K K"K KKKKK#K'K'K&K$K%K%K KKKK"K$K$K#K$K'K(K(K,K/K.K/K0K,K&KKKK!K K$K(K.K0K3K7K8K=K@K@K?K>K<K+KKKKK!K#K$K!KKKK KDK0KKKKKKK"K(K(K*K'K-K@KYKlKtKsKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKrKXKGKIKHKLK>K8KaK\KUKKKQKYKxKKKKKKKKKxKwKwKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKmK9K1K2K3K0K,K2K1K2K2K0K/K0K/K2K2K1K0K0K0K0K0K/K/K/K.K-K/K.K.K,K*K+K)K/K>K?K?K=K'K KKKKKKKKKK!K&K+K,K/K2K3K4K4K5K9K=K<K<KAKBK@KAKEKGKGKGKMKSKKKKKKKK|K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KK~K}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(KK4K@KTKSKRKSKSKSKQKNKgKKKbKKKKlKhKmKkKmKgK\KKKKjKiKjKjKjK`KJKSKcKgKkKiKjKiKhKOKGKZKhKiKjKjKiKkK\KKKYKuKsKiKkKlKmK[KKKKKKKKKKK\KNKNKMK;KPKRKNKGKBKZK\KUK>K;K4K(K&KKKKKKKKKKKKKK$KCKSK\KTKEK.KKKKKKKKKK!K KKKKKKKKK K KKKKK$K)K)K#KK K K$K$K!KKKK K!K$K(K'K%K"K"K"KKKKKK"K#K$K$K&K*K-K-K/K.K-K'K KKKK K"K&K,K1K5K5K3K6K8K;K?K@K;K3K$KKKKKKKK#K$K#KKKKKKKKKKK$K'K(K(K*K6KMKdKqKsKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKpKpKqKqKpKsKaKGKIKHKJKGK1KXK_KXKPKLKYKdKKKKKKKKKKuKwKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKyKLK.K3K3K0K+K3K3K3K3K0K0K0K0K-K.K/K0K0K0K0K0K0K0K0K/K-K/K.K/K,K+K+K+K*K:K@K>KAK0KKKKKKKKKKK K%K*K+K/K1K2K1K7K:K=K=K=K@KBKBKAKAKFKHKFKGKLKWKcKKKKKKKzKK~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K~K}K}K}K}K~K}K}K}K~KK~K}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(KeKMK3K;KPKRKQKQKPKPKPKSKKKKKKKK|KjKlKkKmKlKZKvKKgKhKjKiKiKjKeKOKOK_KhKhKiKiKjKlKYKEKVKeKiKjKiKiKlKaKLKKK}KgKlKlKoKbKKKK�KKKKKKWKLKNKKK:KQKSKOKEKFK[K\KSK;K;K4K&K'K KKKKK KKKKKKKK3KRKQK>K(KKKKKKKKKKKKKKKKKKKKKKK +KKKKKK!K KK K K"K!KKKK%K)K)K&K%K&K$K%K%K$K#KKKKKK"K$K%K)K*K,K*K'K'K#KKK KK!K$K)K+K1K4K3K5K5K5K6K7K9K7K*KKKKKKKKKKK K$K#K!KKKKKKK"K'K(K'K)K0KDK[KlKtKrKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKtKqKpKsKsKsKtKjKJKHKIKHKIK1KNK`KYKSKHKWKZKKKKKKKKKKrKwKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKtKzK`K/K2K3K2K,K/K3K3K3K3K3K3K1K-K2K2K0K/K/K0K0K0K/K/K/K/K/K0K.K-K+K)K&K(K1K?K?K@K9K#KKKKKKKKKKK$K(K+K,K+K.K3K:KAK=K8K=K?K?KBKBKEKGKFKFKGKLKUKRKKKKKKKKKK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKK}K}K}K~KK~K}K}K~KKKKKKKKK~K}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(K/KMKFK0K9KOKQKPKQKQKQKRKQKKKKKKKKsKkKlKlKjKlKZKDKUKeKhKjKiKhKkKhKPKKK\KeKiKjKiKiKkK^KEKPKcKiKjKjKiKlKhKXKhKqKcKeKmKmKmKKKKKKKKKzKSKMKNKIK;KSKSKOKDKGK\K]KRK<K:K1K'K'K%KKKKK K K KKKKKK:K6K"KKKKKKKKKKKKKKKKKKKKKKKKKK KKKKKK!K#K"KKKK K%K)K*K(K(K%K'K'K(K(K(K(K&K"K K K K%K(K(K)K)K'K'K#KKKKKK K'K+K-K-K/K2K2K2K5K6K5K2K-K"KKKKKKKKKKKKKK&K'K$K!KKKK"K(K(K%K,K;KSKiKrKsKpKoKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKtKrKqKsKsKtKsKnKNKHKIKHKLK7KBKbKYKSKHKRKZKKKKKKKKKKuKxKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKoK;K2K4K4K0K-K3K2K3K3K3K3K3K1K3K3K1K1K1K0K/K0K1K1K1K1K0K0K.K-K,K+K)K*K*K<K@K@KBK.KKKKKKKKKKK K%K(K'K*K5K=K@K6K"K0K=K>KBKAKBKEKGKGKFKGKNKRKTKfKKKKKKK|KK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KK}K}K}K~KK~K~K~K~KKKKKKKKKK~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(KK4KEK;K1K5KMKQKQKTKRKTKUKVKwK{KKKxKlK^KcKiKjKjKiKlKbKLKQKbKiKiKkKkKiKjKVKIKWKbKhKjKiKiKlKcKHKJKbKiKdKkKlKlKkKTKTKKKkKmKcKhKuKKKwKtKsKxKvKPKMKOKGK;KTKRKPKCKKK]K_KOK<K;K0K(K(K&KK K?KK K +KKKKKKKKKKKKKKKKKKKKKKK'K7K)KKKKKKKKKKKKKKKKKKKKK#K'K*K'K$K&K'K&K*K+K*K)K*K'K$K!K"K%K*K+K+K)K$KKKKKKK"K&K$K'K.K.K.K.K0K1K4K4K2K0K$KKKKKKKKKKKKKKKKKK$K#K$K"K'K)K(K+K5KHKaKoKrKsKpKpKpKqKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKtKtKtKtKtKtKrKqKWKHKIKHKKK?K6K_K\KVKNKNKZKqKKKKKKKKK}KwKyKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKsKwKPK0K5K5K1K+K1K2K3K3K3K3K3K3K3K3K3K3K3K3K2K3K3K3K3K3K1K0K.K-K.K-K+K+K)K5K@KAKBK7K"KKKKKKKKKKK"K$K'K6KBKBK5KKK6K=K?KBKBKCKDKFKGKFKGKPKSKUKSKKKKKKKK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(KBK5K<KEKCK2K8KQKPKOKPKQKUK_KaKmKdK]KdKaK`K]K]K`KdKiKjKnKiKOKMKaKhKkKkKkKlKjK]KIKRKbKiKiKiKiKlKhKLKFK]KKoKgKkKlKmKZKKKKsKKKKKKKKKKlKPKLKOKDK@KVKQKNKBKLK\K^KJK8K=K8K&K'K&KKKKKK +KKKKKKKKKKKKKKKKKKKKK+KAKJKMK@K%KK KKKKKKKKKKKKKK KKKK"K%K&K&K'K-K-K*K+K)K)K)K(K&K%K%K(K&K+K+K&KKKKKKK!K'K)K+K*K*K-K-K-K.K0K1K/K)KKKKKKKKKKKKKKKKKKKKK#K(K(K(K)K-K>KVKhKrKtKpKoKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKrKqKpKpKqKtKrKqKrKrKsKsKtKaKFKIKHKIKEK/KYK^KYKQKKKWK`KKKKKKKKKKtKyKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKxKdK5K5K4K0K-K1K3K2K3K3K3K3K4K3K3K3K3K3K3K4K2K1K3K2K1K0K0K/K/K.K-K*K,K*K.K=K@K?K@K+KKKKKKKKKKKK(K8K@K@K-KKK,K9K<K>K?KAKBKEKGKFKFKFKNKQKRKWKcKKKKKKKzKKKK~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKKKKKK~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(KjKVKEK:KHKFK/K:KXKbKoKKKKKKKtKdKfKeKdK`K\K]K`KbKcKhKiKPKKK_KfKgKkKkKiKlKdKOKNK]KfKjKkKkKmKkKTKEKKKqKgKmKlKoK^KwKKKKKKKKKKKKdKLKMKPKBKDKVKPKMKBKQK\K^KGK:K@KBK'K&K&KKKKKK KKKKKKKKKKKKKKKKKK#K1K@KMKSKMKGKGK>K'KKKKKKKKKK K K K +K KKKKK!K$K(K/K0K.K.K+K*K(K)K)K&K%K&K&K*K'K%K KKK K!K#K%K%K)K+K+K+K)K+K-K.K/K/K+K#KKKKKKKKKKKKKKKKKKKKK"K'K(K*K*K4KIKbKqKsKoKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKrKtKqKpKpKqKtKqKpKpKqKsKsKuKiKJKIKIKHKIK:KRK`KYKQKIKTKWKKKKKKKKKKrKyKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKsK@K3K5K7K3K.K3K2K3K3K2K4K5K3K3K3K3K3K2K3K0K/K3K2K/K/K0K0K0K.K-K+K+K+K+K8K?K>KBK8K KKKKKKKKKK&K;KAK;K+KKK&K4K9K<K=K<K?KAKEKGKFKGKFKHKLKMKSKRKKKKKKKK~KKK~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKKKKK~K}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(KgKTKLK K"KMKGK-KPKKKKKKKKKKyK`KfKcK`K]K[KZKeKwKKyKKrKUKKKqKiKmKkKlKhKQKIK\KfKjKlKkKlKmKYKnKKKKcKlKmKqKdKhKKKKKKKKKKK_KLKNKMK?KHKWKPKKK?KUK\K[KDK:KAKHK'K%K&KKKKKKKKKKKKKKKKKKKKKK"K2K?KIKLKKKLKMKNKCK8K'K!K KKKKKKKKK K +K K +KKKKK%K*K,K.K-K.K,K+K(K&K&K&K$K$K$K%K$K KKKKK#K(K*K+K+K+K+K+K,K*K+K-K*K$KKKKKKKKKKKKKKKKKKKK K!K'K)K+K*K.K@KZKkKsKrKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKsKsKsKsKsKsKsKsKsKsKsKtKqKpKpKqKsKsKtKoKOKHKIKHKIKMKSK_KZKSKKKSKYKKKKKKKKKKyKxKxKwKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKtKyKTK2K5K5K6K,K2K5K4K4K4K4K5K3K3K3K4K4K3K3K3K1K2K3K2K2K2K0K/K/K/K-K,K*K*K2K?K?KAKAK&KKKKKKKKK&K:K?K:K&KKKK*K5K6K:K<K@KBK@KAKGKFKFKGKFKGKKKQKTKdKKKKKKK{KKK~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KK~K}K~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(KXKOKJK7KK-KvKwK=KzKKKKKKKKKK~K`KdK\KWKsKKKKKKKKKKKhKnKlKlKlKlKlKTKIKXKdKiKlKkKjKmK^KdKKKKiKlKnKoKiKaKKKKKKKKKKZKMKNKLK=KMKVKPKJK@KUK[KZKBK;K@KLK+K%K'KKKKKKKKKKKKKKKKKKKK$K4K@KDKEKGKFKJKKKFK7K(K K"K%K KKKKKKKKKK K K KKKKK$K)K*K-K.K+K'K&K'K%K&K'K$K"KKKK!K!K K%K%K)K-K-K*K+K+K*K*K*K)K&KKKKKKKKKKKKKKKKKKKKK K$K)K+K*K-K8KMKdKoKsKqKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKsKsKsKsKsKsKsKsKsKsKsKtKrKpKqKqKsKsKsKsKXKGKIKIKGKKKOK\K^KVKOKNKZKlKKKKKKKKKKuKyKxKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKuKuKuKuKuKuKuKyKgK5K3K5K5K.K0K6K5K5K5K5K5K3K2K/K4K5K4K2K4K1K2K3K3K3K3K1K0K0K/K.K-K+K*K,K<KBKAKCK3K KKKKKKK*K=K>K2K"KKKK$K-K6K6K9K<KAKBKBKCKFKFKGKGKGKGKKKRKVKSKKKKKKKK~KK~K}K~K~K~K}K}K~K~K}K}K}K}K~K}K~KKK}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(KPK\K_K\K9KKXKKKFKKKKKKKKKKK}K\KnKKKKKKKKKKKtK|KKKKoKgKjKlKlKZKFKRKbKjKkKlKkKnKeK\KKK|KgKnKmKqKmKKKKKKKKKKZKLKNKLK;KOKVKQKIKBKXK]KYKBK<K>KNK0K'K*K KKKKKKKKKKKKKKKKKK*K6K<KCKEKEKFKCK=K1K#KKK!K#K&K"KKKKKKKKKKK +K K KKKK!K%K(K(K(K'K)K)K)K&K$K"KKKKK!K&K'K)K*K,K-K.K,K*K-K+K)K&K KKKKKKKKKKKKKKKKKKKKK!K)K+K)K(K1KCK\KnKrKpKnKoKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKuK_KFKHKIKHKNKMKWK`K[KRKJK[K`KKKKKKKKKKrKzKwKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKyKvKuKuKuKuKuKuKvKtKDK3K8K7K3K.K5K5K5K5K5K5K5K5K4K5K5K4K2K3K2K5K4K3K3K3K3K2K0K/K-K/K)K'K+K4K@KAKBK>K%KKKKKK1K?K=K-KKKKKK&K2K7K8K8K:K=KAK@KCKCKFKGKGKGKGKKKPKTKTKbKKKKKKKzKKKKKKK~K}KKK}K}K}K~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(K{KKKSK_K@KKuKKKLKKKKKKKKKKKzK|KKKKKKKKK_KKKKKKKKKsKiKkKlK`KEKOKbKiKlKlKlKnKkKTKPKdKeKbKkKqKgKKKsKKKKKK|KYKKKMKJKBKQKUKPKJKOKZK[KXKAK<KCKPK7K%K)K$KKKKKK KKKKKKKKKKK#K-K5K;K@KDK@K4K'KKKK!K!K!KKKK KK K KKKKKKKK K KKKKK!K!K%K(K)K'K'K%KKKKKK"K$K'K*K*K,K-K0K.K-K,K+K)K#KKKKKKKKKKKKKKKKKKKKKK#K(K*K)K+K:KQKgKrKrKpKnKnKoKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKqKqKpKqKqKrKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKuKjKJKIKIKIKNKKKTK_K[KTKJKWKWKKKKKKKKKKtKyKxKvKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKyKvKuKuKvKvKvKvKwKzKWK2K8K7K7K0K4K5K5K5K5K5K5K5K5K5K5K4K3K3K4K4K3K3K3K3K3K3K0K/K.K/K+K)K+K,K<KBK@KAK0KKKKK2K>K:K'KKKKKK"K*K3K6K4K5K:K;K=K?KCKCKEKGKGKGKGKIKLKRKTKQKKKKKKKKKKKKKK~K}KKK~K~K~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(KKKKK7KOK@KKzKKKSKKKKKKKKKKK~KcKKKKKKfKsKKKKKKKKKKdKaKeKhKlKdKGKIK_KhKfKjKlKlKnK`KSKKKzKnKjKkKwKKKK|KKvKxKUKJKLKGKEKSKSKLKLKTK\K\KUK?K=KDKPK;K#K&K'KKKKKK KKKKKKKKKKK!K+K5K<K9K,K"KKKKKKKKKK!K%K&K!KKKKKKKKKK K KKKKK K K&K(K(K)K!KKKKK!K#K&K*K*K,K-K-K.K/K,K,K%KKKKKKKKKKKKKKKKKKKKKK"K)K*K)K)K2KDK\KkKsKpKnKpKoKnKpKqKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKtKqKpKsKsKpKqKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKqKQKHKHKJKLKNKSK^K[KUKLKRKWKzKKKKKKKKKKwKyKyKwKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKyKvKuKuKvKyKyKyKuKwKjK9K7K5K0K.K.K5K5K4K4K4K5K5K5K5K5K5K5K3K6K5K4K3K3K3K3K3K0K/K0K/K.K,K*K*K8KCKAKCK:K"K K K,K8K3K"KKKKKKK K,K2K3K3K7K:K:K=K@KCKDKEKGKGKGKHKHKHKQKSKTKdKKKKKKK~KKKKK}K}K}K}K}KKKKKK~K}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(KKKTKKK"KGKaK'KyKKKRKKKKKKKKKKKKZK`KgKkKZKuKKKKKKKKK]KvKK[KZK\K_KeKdKPKDK]KKrKiKnKlKoKcKsKKKKKKKKKKKKKrKRKLKLKIKKKVKRKLKKKUK[K^KRK=K=KFKPK@K$K&K%KKKKKK KKKKKKKKKKKK'K+K$KKKKKKKKKKKK$K'K$K$K"K!KKKKKKKKKKKKKKKK K K K K*K$K K"K!K"K(K(K,K.K-K.K0K.K,K(K!KKKKKKKKKKKKKKKKKKKKKK"K*K*K)K,K=KVKiKsKsKqKoKoKpKqKpKoKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKrKqKpKqKrKrKrKrKrKtKqKpKsKsKrKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKuKVKFKIKIKJKMKQK\K]KVKOKNKZKiKKKKKKKKKKtKxKyKwKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKwKwKwKwKwKwKwKxKwKvKwKwKwKJK4K8K6K5K/K5K5K6K6K6K5K5K5K5K5K5K5K4K5K6K5K2K3K3K3K3K2K1K0K/K-K-K,K*K/K@KBKCKBK+K!K'K/K+KKKKKKKKK!K+K3K3K3K8K:K;K=K>KAKBKDKEKFKGKHKIKHKKKSKVKUKKKKKKKK~KKKKK~K}K~KK~KKKKK~K}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(KKK;KKKKKKK&KvKKKSKKKKKKKKKKKKbKdKbKcKfKyKKKKKKjKeKKKKSK\KZKYKWK[K_KLKNKKKyKjKqKpKqKdKbKKKKKKKKKKKKlKQKNKLKKKLKWKRKMKKKUKZK^KPK;K=KHKOKDK&K%K'KKKKKK K K KKKKKKKKKKKKKKKKKKKKKKKK K!K"K#K)K-K%KKKKKKKKKKKKKKKKKK K&K$K!K#K&K'K+K,K.K/K.K,K)K$KKKKKKKKKKKKKKKKKKKKKK"K$K(K)K*K4KJKaKpKrKrKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKtKrKpKsKtKsKtKtKsKtKqKpKsKtKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKvKaKHKIKHKHKJKEKXK_KWKQKIKYK[KKKKKKKKKKrKxKyKwKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKyKwKuKxKyKyKxKyKwKuKxKxKzK^K6K9K8K7K/K2K8K8K8K8K5K4K5K5K5K5K5K5K4K4K5K6K4K2K3K3K3K2K0K/K-K.K.K+K*K:KCKCKDK8K"K#KKKKKKKKKKK"K*K1K2K4K6K;K<K=K=K?K@KCKCKEKGKHKIKIKHKPKVKVKbKKKKKKK|KKKKK~K}KKK}K~KKKK~K}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(KKK<K(KKKK=KKK)KsKKKPKoKKKKKKKKKKKfKjKfKaK\KYK`KvKtKVK|KKKKKQKZKXKYKXKXKZK\KaKKKKKhKiKlKrKkKZKKKKKKKKKKKfKMKLKLKLKNKVKRKMKLKUKZK]KOK;K<KHKMKGK*K%K(KKKKKK KK KKKKKKKKKKKKKKKKKKKKKKKKK!K'K-K/K/K,K KKKKKKKKKKKKKKKKK K!K"K$K%K'K*K,K*K+K+K&K!KKKKKKKKKKKKKKKKKKKKK K%K(K*K&K,K?KWKjKrKqKqKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKsKsKsKsKsKsKsKsKsKsKsKsKqKqKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKuKjKHKHKJKHKJK4KKKaK\KUKLKYKXKKKKKKKKKKyKyKyKxKxKvKuKuKuKuKuKuKuKuKuKuKuKwKxKvKuKuKuKvKwKxKxKxKxKxKxKxKxKxKxKxKoK?K7K7K8K3K2K9K8K8K8K7K7K3K4K5K3K5K6K7K5K5K6K4K2K3K3K3K2K/K0K/K,K+K)K)K3KCKDKAK?K+K KKKKKKKKKKK#K*K.K1K4K7K:K<K=K=K@K@KEKBKEKGKGKGKHKIKMKOKRKPKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(K~KTK:K=K(KK!KK~KK]KKzKKKBKXKKKKKKKKKKKiKiKlKiKeK`KZKXKXKiKKKKuKSKYKWKVKWKXKZKZK`KKKKKKK~KpKsKrK\KKKKKKKKKK^KMKLKMKDKGKWKRKMKCKRK[K]KMK<K<KIKMKIK.K#K'KKKKKK +KK KKKKKKKKKKKKKKKKKKKKKKK$K'K*K*K-K-K.K-K%KKKKKKKKKKK KKKKKKKK"K'K&K#K(K(K%K#KKKKKKKKKKKKKKKKKKKKKK$K%K'K*K*K5KMKcKqKsKqKoKnKpKqKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKpKpKpKqKsKsKsKsKsKsKsKsKsKsKsKsKqKqKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKqKNKGKJKHKLK8K=KaK\KUKNKXKZKwKKKKKKKKKKuKyKxKyKvKuKuKuKuKuKuKuKuKvKvKuKwKyKvKuKvKvKuKwKyKxKxKxKxKxKxKxKxKxKuKwKOK4K8K;K7K2K8K8K8K8K8K8K6K7K0K2K5K7K8K5K5K5K4K3K3K3K3K2K/K1K0K,K*K*K(K'K?KEKDKCK5KKKKKKKKKKKK$K*K.K1K4K8K:K;K<K=K?K@KDK>KDKGKFKGKIKHKIKJKMKQKcKKKKKKK~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(KKWK>KAK9K)KKK1KaKiKpKqKgKKkK2KJKKK|KKKKKKKKKuKpKmKiKjKdK[KQKGKJKUKIKHKPKRKUKVKWKWKYKYKKKKKKKKKpKrKoKKKKKKKKKKYKJKLKNK:KDKWKOKMK>KRK\K]KIK=K?KLKNKJK3K"K&K#KKKKK K +KKKKKKKKKKKKKKKKKKKKKK&K(K*K)K)K)K+K(K'K$K K!KKKKKKKKKKKKKKKKKK"K$K&K%K!KKKKKKKKKKKKKKKKKKKKKK"K$K'K(K'K/K@KZKmKtKtKpKoKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKrKtKqKpKsKsKpKpKpKrKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKRKFKIKHKJK=K3K^K^KYKOKPK[KdKKKKKKKKKKtKzKxKxKvKuKuKuKuKuKuKuKvKxKxKuKwKyKvKuKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxK{K`K8K9K:K:K2K3K8K8K7K8K8K8K7K5K6K6K7K8K5K5K4K5K5K5K5K3K2K0K0K0K-K+K%K$K2K>KCKCKEK?K$K K KKKKK KKKK#K'K*K0K4K8K:K:K:K:K>K@KDKCKEKGKEKHKIKHKHKMKQKSKQKKKKKKKK}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(KKNK<KJK?K<K-K KK`KKKKqK>KK|K=KeKKKKKKKK}KKKjKfKK{KlKkKjKdK\KNK>K;K8K8K>KCKMKSKWKYK\KKKKKKKKKK}KgKKKiKxKKKKKKXKIKLKKK8KKKTKNKKKAKWK[K\KGK=K?KLKMKJK9K$K&K%KKKKK K KKKKKKKKKKKKKKKKKKKK!K$K(K)K&K'K'K(K&K$K$K#K"K$K"KKKKKKKKKKKKKKKKK%K%KKKKKKKKKKKKKKKKKKKKKKK!K)K)K%K+K:KPKfKqKsKrKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKqKpKqKsKqKpKsKsKpKpKpKrKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKvK]KGKJKHKIKHK.KVK_KYKPKMK[KYKKKKKKKKKKuKyKyKxKvKuKvKvKuKvKvKuKvKxKwKuKvKxKvKuKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKpK@K8K9K;K5K/K7K8K9K8K8K8K8K9K9K9K8K7K5K5K6K5K5K4K4K3K2K0K0K/K.K*K(K5K?K>K?KCKCKDK/KK KKKKK K +KKK"K(K+K/K3K7K9K:K9K:K>K@KBKDKFKGKEKGKIKIKHKIKQKSKVKcKKKKKKK~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(KKNKDKGKGK=K<K1KKcKKKKK(KVKKKIKyKKKKKKKKKKKKKKKK~KsKpKiKcKTKDK6K1K3K7K>KHKOKcKKKKKKKKKKKKKKKKKKKzKuKYKKKLKJK7KOKSKOKJKBKXK[K[KFK=K@KLKLKJK>K$K#K#KKKKKKKKKKKKKKKKKKKKKKKKK K!K!K%K&K'K$K!K"K!K&K%K#K"KKKKKKKKKKKKKKKKK K$K$KKKKKKKKKKKKKKKKKKKKK!K&K(K(K&K/KCK\KnKrKqKpKpKpKqKpKnKoKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKtKsKpKpKpKpKpKsKsKpKpKpKrKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKvKeKJKIKHKHKKK0KHKaK[KRKNKYKXKKKKKKKKKKKwKyKxKvKuKyKxKuKwKyKvKuKuKuKuKuKuKuKvKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKxKzKOK8K:K9K4K/K5K7K7K7K8K8K8K8K8K8K8K8K7K7K8K6K5K3K2K3K2K/K0K-K)K/K;K?K=K'K4KDKCKCK>K#KKKKKKKKKKK%K+K/K1K4K7K:K9K:K>K>K?KDKGKGKGKGKHKIKHKHKLKSKYKRKKKKKKKKKKKKKKKKK~K}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(KK,KSKCKBKBK<K<K0KBKKKKKiKKmKKKSKKKKKKKKKKKKKKKKKiKdKuKsKsKkK^KPKBK6K5K9K?KMKKKKKKKKKKKKKKKKKKKKtKRKKKLKJK9KSKSKOKHKAKYK\K[KDK=K@KLKKKHK?K#K K!KKKKKKKKKKKKKKKKKKKKKKKK K!K"K#K#K!KKK#K&K$K"K KKKKKKKKKKKKKKKKKK)K#KKKKKKKKKKKKKKKKKKKKK!K'K)K&K+K:KPKgKsKrKpKnKoKqKpKpKqKpKnKoKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKrKrKrKqKpKqKrKsKsKrKrKrKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKuKmKMKIKJKIKKK6K<K`KZKVKNKWK[KqKKKKKKKKKKsKzKxKwKwKwKvKuKvKwKwKwKwKwKuKvKwKvKvKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxK|KeK7K:K9K7K4K3K8K7K8K9K8K8K8K8K8K8K8K9K8K6K5K5K5K3K3K2K0K0K,K2K=KBK=K%KK#KDKEKEKEK,K K!KKKKKKKKK"K+K/K3K5K7K:K:K;K=K@KBKDKDKEKFKGKHKIKHKJKIKOKUKVKfKKKKKKK}KKKKKKKKK~K}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(KTKKGKTKBKAKCK=K;KEKKKKK^KbK)KqKKKTKKKKKKKKKKKKKKKfKiKKKyK_KmKoKoKkK`KUKFK?K>K?KVKKKKKKKKKKKKKKKKKKqKQKKKLKHK9KVKTKNKDKEK]K]KZKAK>KAKLKJKFK@K'K K"KKKKKKKKKKKKKKKKKKKKKKKKK K KKKKKK"K#KKKKKKKKK K#KKK!KKKKKKKKKKKIKKKKKKKKKKKKKKKK!K$K%K$K&K3KGK^KmKrKoKnKmKnKnKoKpKpKpKpKpKnKoKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKtKrKpKsKtKsKsKtKtKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKrKSKHKMKIKJK@K4K\K\KYKQKSK[K`KKKKKKKKKKsKzKxKyKxKuKuKuKuKuKxKyKyKxKuKwKyKvKuKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKtKBK8K:K;K7K/K9K:K:K:K8K7K8K8K8K8K8K8K7K4K4K4K6K4K3K2K/K/K7K?KCK6K KKKK;KGKFKHK:K"K!K KKKKKKKK#K(K,K1K4K7K:K:K<K<K=KBKDKBKDKFKGKIKIKIKHKHKMKRKWKSKKKKKKKK~KKKKKKKK~K}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(K3K2K5KJKTK@KEKDK?KHKKKKiK|KKK3KtKKKXKKKKKKKKKKKK|KZKKKKKmKNKWK[KbKkKoKpKmKcK\KRKKKFKNKaKKKKKKKKKKKKKKKlKPKLKMKFK<KWKQKOKEKHK^K]KYKBK=KAKJKHKDKBK*KK"KKKKKK K K KKKKKKKKK!KKKKKKKKKKKKKKK!KKKKKKKK#K*K/K2K)KKKKKKKKKKKKKK"KKKKKKKKKKKKK!K$K&K$K+K<KUKiKrKrKpKoKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKrKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKuKtKsKsKtKuKtKsKtKuKuKwK[KFKIKIKIKIK/KTK_K[KRKOK[KUKKKKKKKKKKyKyKxKxKxKxKwKuKwKxKvKvKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxK{KUK7K:K9K:K2K5K6K9K:K8K8K7K8K7K8K9K8K7K7K7K7K6K5K1K/K1K8K@K=K/KKKKKK+KFKDKDKAK)KK KKKKKKKK!K'K+K/K2K5K8K:K=K=K<K>KBKCKDKGKFKFKHKIKIKIKLKRKTKUKiKKKKKKK|KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(K;K=K4KdKKcKEKIKIK=KKKKKDKeKKnKKuKKK_KKKKKKKKKKKKKKKKK\KQKRKOKOKTK[KbKkKqKnKmKjKbK\KVKQKVKhKKKKKKKKKKKKgKMKMKMKCK@KWKPKOKCKJK]K^KVK>K<KBKIKGKDK@K,KK!KKKKKK K +K KKKKKKK"K K K KKKKKKKKKKKKK"K+K$KKKK K&K+K-K1K3K3K5K&KKKKKKKKKKKKzK"KKKKKKKKKK"K&K)K%K&K0KHK`KpKrKpKnKnKoKqKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKqKpKqKpKsKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKvKtKsKsKtKvKuKsKuKvKuKxKgKGKIKIKHKKK1KHK_K[KSKOK[KVKKKKKKKKKKKvKyKxKxKyKxKuKwKyKvKvKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxK{KgK8K:K:K;K5K3K:K9K:K;K:K8K9K6K2K8K8K8K8K8K8K5K4K0K0K=KCK<K*KKKKKKKK?KDKDKEK7K"K"K"KKKKKKKK$K)K/K2K4K7K:K<K<K=K=KAKBKDKGKFKEKHKIKHKIKIKNKPKUKTKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(K$KDK~KKKKMKHKKKLKKKKK~K*K.K5K6K#KrKKK_KKKKKKKKKKKKKKKKRKRKSKQKPKPKOKOKRK^KKKyKpKnKmKiKdK]K[K^KjKKKKKKKKK`KKKLKLK>KAKXKQKMKAKMK]K^KSK?K?KDKJKGKEKAK0K!K KKKKKK K K KKKKKK K"K#K$K$K!KKKKKKKKKKKK K-K)K K!K(K+K/K1K0K2K3K3K2K.K"K KKKKKKKKKKqK!KKKKKKK!K#K(K'K'K.K>KVKjKsKsKoKmKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKsKsKpKrKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKuKuKuKuKsKuKuKuKvKmKJKHKIKHKLK7K<K_K]KWKQKWK[KmKKKKKKKKKKtK|KyKxKxKxKxKxKxKxKxKyKwKuKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKtKFK8K:K:K8K1K;K:K9K:K:K:K9K5K6K7K7K8K8K6K5K3K/K3K<KBK8K KKKKKKKKK2KFKBKDK@K'K"K$K KKKKKKK!K%K-K1K2K5K8K:K:K?K?K?KAKEKEKBKEKHKIKHKHKKKLKNKOKWKhKKKKKKK~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(K:KKKKKuKKaKBK]KKKKKKhK>K=K5K1KKrKKK^KKKKKKKKKKKKKKKOKSKRKQKQKPKPKQKQK]KKKKKK|KrKnKnKmKhKcK]K\KaK{KK|KKK_KIKJKIK:KEKXKQKMK@KNK^K^KQK@K@KEKJKFKBK@K7K!KKKKKKK KK K KKKKK!K&K'K KKKKKKKKKKKKKKK,K,K)K)K-K1K1K0K/K1K0K,K!KKKK!KKKKKKKKKrK#KKKKK!K&K'K'K)K5KMKcKoKrKpKpKpKoKoKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKqKpKpKpKpKpKpKpKpKpKpKpKpKsKsKqKrKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKtKuKuKuKuKtKuKuKuKuKtKTKGKIKHKLKBK1K\K^KZKQKJKZK^KKKKKKKKKKuK|KyKxKxKxKxKxKxKxKxKxKxKvKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKwK{KYK9K:K:K;K4K6K:K:K:K:K:K8K9K9K7K8K8K8K7K5K3K7K>K@K/KKKKK K$K'K!KK"K.KCKEKDKEK1K#K%K!KKKKKKK!K%K*K0K2K5K8K:K:K>K>K>KAKDKEKBKEKHKHKIKJKKKJKLKSKXKQKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(KKKKKKvKKKfKiKKKyKNKKKnKgK\KPK>K%KzKKKcKKKKKKKKKKKKyKTKRKQKQKQKPKMKOKRKPKiKKKKKKKKK~KpKnKnKlKhK\KMKQKtKmKwKYKFKHKKK<KJKWKQKLK?KTK_K^KPK>K@KHKJKFKDKBK:K!K KKKKKK KK K KKKKK KKKKKKKKKKKKKKKKK%K*K+K.K-K,K/K.K/K-K'K KKKKKKKKKKKKKKKrK$KK K%K'K'K'K/KCKZKmKrKqKnKmKnKoKqKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKsKsKpKpKpKpKpKpKpKpKpKpKpKpKsKsKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKuKvKuKuKuKuKuKuKuKuKuKxK]KHKKKHKHKHK.KSK_KZKTKIKWKWKKKKKKKKKK{KzKyKxKxKxKxKxKxKxKxKxKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxK{KkK<K;K>K7K3K4K;K9K:K:K:K9K7K9K:K8K8K8K7K4K4K>K=K'KKK"K&K(K*K(K*K,K(K)K*K=KGKFKIK=K%K&K"K KKKKKKK#K*K.K2K6K8K:K9K;K>K>KCKDKCKCKEKHKIKIKIKHKHKJKTKWKTKiKKKKKKK~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(KKKKK[KKKKKKKK`KCKYKKK~KnKqKnKQKKKKK]KsKKKKKKKKKKKNKQKOKOKMKMKMKNKPKNKzKKKKKKKKKKKKKuKmKpKXKxKKKtKQKHKHKKK;KMKVKOKHK?KXK_K^KKK?KAKGKHKFKBK@K;K"KK KKKKKKK KKKKKKKKKKKKKKKKKKKKK'K+K,K+K*K-K-K-K,K'K#KKKKKKKKKKK"K#KKKKKKDK(K%K*K(K*K8KPKeKrKrKpKoKoKoKoKoKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKsKsKpKpKpKpKpKpKpKpKpKpKqKrKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKtKsKsKtKtKtKtKtKuKuKuKuKuKuKtKuKuKwKfKIKIKHKIKLK1KGKaK[KTKLKSKXKzKKKKKKKKKKvKzKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKwKGK9K=K7K6K2K:K:K:K:K:K9K7K9K:K8K8K8K8K5K7K:K#KK%K*K,K/K2K2K1K.K/K,K,K*K3KFKGKFKEK/K#K#K KKKKKKK K&K-K2K5K8K9K:K:K=K>KAKCKCKCKEKHKGKGKGKHKIKKKOKSKWKVKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(KKfKKXKTKKKKKKsKpKJKSKFK[KKKqKnKqKmKKKKKKSKcKKKKKKKKKKyKJKOKMKNKNKNKMKOKMKKKKKKKKKKKKKKKKK[KKKKqKRKGKIKIK:KRKVKMKHKAKWK]K_KIK>KAKGKGKEK@K;K9K'KK!KKKKKK K KKKKKKKKKKKKKKKKKK%K,K+K+K*K*K+K*K+K+K&K KKKKKKKKKKKKK#K%K$K$KKK'K&K'K*K0KCK]KoKtKsKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKsKsKpKpKpKpKpKpKpKpKpKpKsKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKuKvKtKtKuKuKvKtKsKuKuKuKuKuKtKsKuKuKtKnKLKHKIKKKOK9K;K^K\KWKNKNK[KfKKKKKKKKKKuKyKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKwK{KZK9K>K=K>K7K7K:K:K:K:K:K:K:K9K8K7K8K8K7K;K5K)K2K2K4K3K5K5K3K2K0K0K-K+K)K,KBKGKEKJK;K%K%K!K!K KKKKK K#K*K/K2K5K8K;K9K=K?K@KBKCKCKEKHKFKHKIKHKIKKKMKSKVKUKiKKKKKKK{KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(KvKjK`K.KUK{KKKKKK|KBKJKTKAKYKKKiKpKmKKKQK?KKKBKLKyKKKKKKKKKbKKKOKLKLKMKMKQKMKKKKKKKKKKKKKKKKKKKKKmKSKJKKKGK:KTKRKMKEKAKYK\K]KIK?K@KHKHKHKDK=K;K,KKKKKKKK KKK K KKKKKKKKKKKK K%K/K1K1K-K*K+K-K+K,K#KKKKKKKKKKKKKKKKK&K&K'K&K)K)K,K<KRKhKqKsKpKnKoKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKrKsKqKqKpKpKpKrKsKsKrKpKqKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKuKtKtKsKtKuKuKuKuKuKtKtKuKuKuKuKuKuKuKuKtKRKJKKKIKKKBK2K[K]KYKRKJKZK[KKKKKKKKKKxK{KzKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKyKxK{KmK>K;K=K=K8K3K:K:K:K:K:K;K9K8K9K9K9K8K7K8K8K7K5K2K5K5K6K7K4K3K0K.K.K-K/K-K9KIKGKHKCK+K%K$K K KKKKKK#K(K-K0K3K8K:K:K:K=K@KDKDKDKFKGKFKHKIKHKIKKKMKQKRKWKVKKKKKKKK}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(KoKMKEK0KLKKKKKKKKjKDKNKPKAKOKKqKnKoKvKuKRKK.KmKjK9KHKKKK}KyKtKsKyKnKGKFKNKLKJKJKMKNKeKKKKKKKKKKKKKKKKKKKKhKOKKKLKDK:KWKPKMKEKAK\K\K\KFK>KAKUKYKYKXKWKYK>KK KKKKKK K K K K KKKKKKKKKK#K,K/K.K/K/K1K/K,K)K%KCK*KKKKKKKKKKKKKKKKK!K&K*K%K'K3KFK]KpKsKpKnKpKpKnKoKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKpKpKsKsKqKpKqKqKpKsKtKtKsKpKrKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKsKsKsKsKsKtKvKtKsKsKtKuKuKuKuKuKtKtKuKuKuKuKuKuKuKuKwKZKHKLKIKHKHK.KSK^KYKTKHKTKXKKKKKKKKKKKyK{KyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KzKxKxKxKLK9K=K<K<K5K:K:K:K:K:K9K:K;K6K7K:K9K8K8K8K5K5K6K6K4K3K3K2K0K/K1K1K0K1K,K2KEKHKFKHK5K$K%K!K KKKKKK!K%K)K/K3K4K8K:K9K<K>K@KCKDKFKFKFKHKHKIKIKLKKKQKSKWKWKkKKKKKKK|KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(KVK=K<K6KXKKKKKKKKKlKGKOKKKAKDKiKoKKKKwKSKK9KKKMKgKKKKKKKKKKQK9KFKIKMKOKRKjKgK[KVKXKdKwKKKKKKKKKKKKKKbKKKLKLKBK=KVKOKMKEKFK^K]KZKAK?K<K2K5K5K6K7K8K/KKKKK KKK K K K KKKKKKKKK&K)K,K.K-K.K2K2K.K&KKK#KjKKKKKKKKKKKKKKK!K%K*K(K%K,K9KSKiKrKqKoKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKtKrKpKpKpKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKvKtKsKsKsKsKtKuKuKuKsKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKcKIKLKJKIKKK3KFK_K\KWKLKPKZKwKKKKKKKKKKvK{KzKzKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KyKxKwK|K`K:K=K<K>K7K7K:K9K9K:K:K:K:K5K6K:K9K:K8K8K8K8K6K5K5K2K4K4K5K5K4K3K1K/K0K/K@KHKFKJKCK(K%K#K!KKKKKK!K$K(K-K1K5K9K:K9K<K?K>K?KCKDKEKGKGKEKHKIKLKJKLKSKTKWKVKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(K:KAK6K<KbKKKKKKKKKKkKEKJKHKAKDKaKKKKmKpKVKKUKKK[KpKKKKKKKKKKgK0K;K>KAKYKKKKKKKKqKdK\K[KdKuKKKKKKKK`KIKKKLK@K?KZKPKMKAKHK_K^KZKAK?K9KKKKKKKKKKKK K +K K K K K KKKKKKK"K(K*K+K-K/K/K*K+K#KKKKK.KxKKKKKKKKKKK KK"K&K)K'K'K3KKKbKnKrKqKoKnKoKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKqKpKqKsKrKqKqKqKrKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKtKtKtKsKsKtKtKvKtKsKtKtKtKtKuKuKuKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKnKLKKKMKLKLK:K<K_K\KXKOKLK[KcKKKKKKKKKKvK|K{KzKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKyKyKyKzKpKAK<K=K=K;K4K:K:K;K:K:K:K9K:K9K9K:K:K9K7K5K5K4K5K6K5K8K5K4K4K4K3K5K.K.K.K5KGKGKGKJK3K%K%K#K#KKKKKK"K'K,K1K5K9K9K:K<K?K?K@KCKCKDKFKEKGKHKIKKKKKIKMKPKQKWKlKKKKKKKKKKKKKKKKK~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(K0K<K6KBKmKKKKKKKKKKKmKFKJKJKDKJKKKKiKoKpKVKKcKKKbKvKKKKKKKKKKxK.K2K4K@KKKKKKKKKKKKKKsKbK\KWKMKUKxKbKFKIKJK>KCKYKPKMK>KHK^K_KWK>K?K7K*K%KKKKKKKKKKKKK K K K KKKKKKK#K$K(K)K+K+K$KKKKKKKK0KuKKKKKKKKK"K#K%K&K&K&K+K<KSKhKrKsKqKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKsKtKpKpKpKrKtKsKsKpKqKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKvKvKvKtKtKvKuKuKtKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKtKPKIKMKKKJKBK2KZK^KYKQKLKZKZKKKKKKKKKK~K{K{KzKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KzKzKxKRK;K?K<K:K2K8K:K=K;K9K9K9K:K:K:K:K7K4K2K1K6K8K8K8K8K8K4K4K6K5K5K4K1K/K/K/KCKGKFKHK?K(K&K"K K!KKKKK"K&K*K/K3K7K9K<K=K>K?KAKBKDKDKCKCKHKHKIKLKLKLKLKNKQKZKUKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(K/K9K:KJK{KKKKKKKKKKKKmKFKMKGK_KKKKhKpKmKoKWK!KhKKKiKwKKKKKKKKKKKJK@K2K,K3KQKKKKKKKKKKKKKKKKKyK[KHKIKIKFKNKWKOKKKDKSK]K`KTK>KAK=K2K+K%KKKKKKKKK KRK#KK K K K KKKKK K'K)K)K#KKKKKKKKKKK/KjKK K KK KKK"K'K&K(K(K2KHKbKpKrKqKnKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKrKrKrKrKrKrKsKsKsKsKpKqKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKtKsKtKtKtKtKsKtKtKuKuKuKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvK]KHKJKJKJKJK-KRK`KZKTKJKTKXKKKKKKKKKKKwK{KzKxKyKyKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKyKyKxKxKxKyKyKyKzKzKzKyK{KdK;K>K<K6K6K7K=K<K<K;K;K;K:K8K7K6K4K5K9K:K:K8K8K8K8K8K7K7K6K5K5K3K2K3K2K/K<KHKGKHKGK.K'K$K"K KKKKK K#K'K+K0K4K7K;K=K>K?K@K@KCKCKEKGKFKHKIKKKLKLKJKMKTKWKUKnKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKpKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(K)K4K=KJK{KKKKKKKKKKKKKmKHKEKxKKKKjKoKoKnKrKXK"KgKKKmKtKKKKKKKKKKKiKcKQK>K1K-K2KLKuKKKKKKKKKKKKKxKTKHKIKIKGKPKWKNKIKKKZK\K_KRK?KCK>K4K.K)K$K!KKKKKKK K9KKK K K K K KKKK!K"KKKKKKKKKKKKKK(K[KK!K K K#K%K'K)K&K.K@KWKjKrKsKpKpKpKqKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKrKtKqKpKsKtKtKsKsKsKsKpKqKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKuKuKsKsKsKsKsKsKsKsKtKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKxKfKIKIKHKLKMK3KGK_K[KUKMKPKYKrKKKKKKKKKKuK|KzKxKzK{KyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KyKxKxKxKzK{K{KzKzKzKzK{KtKFK;K<K>K<K3K=K=K>K>K:K9K6K5K7K8K7K9K:K:K9K8K8K8K8K8K8K8K8K6K4K3K2K3K2K1K6KFKIKIKHK;K(K&K#K K!K KKKK K'K,K0K4K6K;K=K>K?K>K?KAKBKGKHKFKHKHKJKMKLKJKKKRKWKWKTKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(K%K3K@KGKgKKKKKKKKKKKKKKiKCKKKK\KcKrKmKoKoKrKYK"KfKKKoKsKKKKKKKKKKKqKxKoKdKUKDK5K,K0KCKnKKKKKKKKKKpKOKGKIKHKGKTKVKOKIKMKZK\K^KNK?KBK=K5K2K/K*K&K"KKKK KKKKKK K KKK KKKKKKKKKKKKKKKKKKK*KSKKK"K%K'K%K&K5KMKdKpKsKqKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKrKsKpKpKpKpKpKpKpKpKpKpKrKtKsKsKsKsKsKsKsKsKsKsKsKsKtKsKrKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKtKuKuKuKuKuKuKuKuKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKkKIKHKIKIKKK9K9K_K]KVKOKLK[K`KKKKKKKKKKxK{KzKzKzKzKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzKzKyKyKzKzKzKzKzKyKzKzKzKzKzK}KVK;K>K>K?K2K8K;K7K5K4K9K;K=K<K<K<K<K<K:K9K9K9K9K9K9K8K8K8K7K7K5K4K3K2K2K1K@KJKHKHKEK,K%K#K K!K KKKKK#K*K.K2K7K8K:K>K?K?K?KAKCKDKFKGKGKGKIKIKKKLKKKNKTKUKUKpKKKKKKK~KKKKKKKKKKnKqKKKKKKKKKuKKKKKK|KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(KK$K:KEK;KwKKKKKKnKfK[KgKlKKKKaKKKKIKBKfKqKnKnKnKrKXK4KKKKrKuKKKKKKKKKKKmKtKsKuKrKiKYKIK8K.K0K@KcKKKKKKKgKLKHKIKGKHKVKUKOKHKMK[K]K^KJK?KBK=K6K3K0K*K(K&K!KKKKKKK KK KKKKKK KKKKKKKKKKKKKKKKK"K5K#K$K%K&K,K>KWKkKrKqKnKnKpKpKqKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKsKsKpKpKpKpKpKpKqKpKpKpKrKtKsKsKsKsKsKsKsKsKsKsKsKsKtKrKrKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKuKuKuKuKuKuKuKvKuKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKsKPKGKIKHKKKBK7KZK]KWKOKIKYKXKKKKKKKKKKKyK{KzKzKzKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzKxKyKzKzKzKzKzKxKyKzKzKzKzK}KgK@K>K>K?K8K0K5K6K:K;K<K=K=K=K=K=K=K=K:K:K:K:K:K:K:K8K8K8K8K8K6K5K3K3K3K1K5KHKIKIKJK6K'K(K"K!K KKKKK K&K*K/K8K7K9K>K?K>K>K@KCKDKFKGKFKGKHKHKKKMKKKLKRKTKYKWKKKKKKKKKKKKKqKnKdK`K`K`KoKbKzKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(KKK0K>K!KUK}KKKK7K:K<K=K<K;KAKKKKKKKHKGK@KdKrKmKoKpKqKpKKKKKoKuKKKKKKKKKKKmKsKpKpKrKsKrKkK[KJK;K-K*K6KLKXKKKgKIKFKGKFKLKWKQKOKIKPK\K`K`KJK?KBK=K6K2K0K-K*K'K#K KKKKK K K K K KK K K K KKKKKKKKKKKKKKKKKK"K%K(K7KMKcKoKtKqKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKrKsKsKsKpKpKpKpKpKsKsKpKqKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKvKtKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKYKIKKKHKIKJKMKUK]KXKQKIKRKVKKKKKKKKKKKvK|K{KzKzKyKxKxKxKxKxKxKxKxKxKxKxKyKzKzKzKxKxKxKxKxKzKzKzKzKzKzKzKzKzKzK{KuKHK<K>K?K:K0K:K=K=K=K=K<K<K=K:K9K<K=K<K<K=K;K9K:K:K:K9K:K9K8K5K4K5K4K2K1K0KCKIKHKJKBK+K)K'K!K K KKKK!K&K*K1K7K9K:K<K=K=K=KBKBKEKGKGKFKGKHKIKLKLKKKLKOKTKYKXKrKKKKKKKKKKKoKVKCKCKYKLKLKMKUKuKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(KKKK7K,K?KVKqKyKSK5K=K=K=K<K>K:KTKKKKKK=KRKFK@KfKrKoKrKlKKKKKKKqKqKKKKKKKKKKKnKqKqKpKpKpKrKsKsKlK`KOK;K)KK#KhKiKFKGKGKGKHKPKPKKKBKOK]K`K_KIK?KBK<K6K3K0K-K+K(K&K$K KKKK K K K K K +K K K KKKKKKKKKKKKKKKKK6K#KK7KWKmKtKrKpKoKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKqKrKtKsKsKpKqKqKqKpKsKsKqKrKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKtKsKtKtKtKtKuKuKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKyKcKHKLKJKJKKKMKRK\KYKTKKKLKVKiKKKKKKKKKKuK|K{KzKzKyKyKyKyKyKyKyKyKyKxKxKxKyKzKzKzKyKyKyKyKyKzKzKzKzKzKzKzKzKzKzKzK|KVK:K?K>K>K5K:K<K=K=K=K=K=K=K;K:K=K=K=K=K=K;K9K:K:K:K:K:K9K8K6K5K5K4K2K2K0K9KHKHKIKJK3K'K&K#K!K!KKKKK!K(K0K6K8K9K=K>K=K=KAK@KAKEKFKFKGKJKJKKKKKLKLKNKSKWKXKWKKKKKKKKKKnKXKIK/K?KAK2K.K2KUKJK?KNK{KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(KKKK)K>K5KdKKKIK5K:K8K9K9K<K<K7KKKKKdK@KGKPKCK@KfKrKqKkKKKK8KYKKKgK`KKKKKKKKKKKlKrKpKpKpKpKoKnKoKqKsKnKYK;K<KmKcKDKGKHKAK7KUKSKNKDKDK_K`K^KJKAKBK9K5K5K2K,K,K*K'K%KKKKK KKK +K K KKKKKKKKKKKKKKKKKK KKCK6KIKpKrKoKpKpKpKqKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKtKsKsKsKsKpKqKtKrKpKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKvKtKsKuKvKvKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKlKKKKKIKJKLKNKPK[KZKTKMKJKYK[KKKKKKKKKK~K{K{KzKzKzKzKzKzKzK{K{KzKzKxKxKxKxKxKxKxKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKhK?K>K>K<K;K6K=K?K=K<K?K>K<K=K=K=K=K=K=K=K;K9K:K:K9K9K:K:K:K8K7K5K5K.K.K3K5KDKIKHKKK?K&K$K&K%K!KKKKKK'K/K2K4K7K<K=K<K=K?K>KAKBKDKGKFKFKHKHKJKLKLKKKNKTKVKUKtKKKKKKKKnKJKHK:K%K2K*K"K"K1K0K"K%K8K>KBKGK<KcKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(K&KKK"KAK7KKKK>K"K"K K)K4K=K;K7KKKwKtKmKTK?KJKLK=K<KgKuKlKKKK[K$KQKKKXKPKKKKKKKKKKKoKqKqKqKqKpKoKoKnKpKoKrKcKLKpK^KCKGKIK=K;KWKPKMKBKEK_K`K\KNKEK@K:K7K5K1K,K+K(K&K"K KKKK KK K K KKKKKKKKKKKKKKKKKKK K K5KCK\KrKnKoKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKrKrKrKrKrKpKqKsKsKsKsKsKrKrKsKsKrKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKtKsKsKsKsKsKsKtKuKuKtKtKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKwKuKuKuKuKuKSKIKGKHKJKLKMKXK[KXKRKIKWKWKKKKKKKKKKKxK{KzKzKzKzKzKzKzKzKyKzKzKyKyKxKyKyKyKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{KxKIK=K?K=K6K1K<K?K>K>K?K>K>K=K<K=K=K=K=K=K<K;K:K:K;K;K:K9K:K:K8K6K6K3K2K3K3K>KIKIKKKHK-K&K%K%K!K KKKKK$K+K/K5K7K9K;K=K=K?K>KAKCKDKGKEKAKHKHKJKLKLKKKLKPKQKZKYKKKKKKKeKRK7K3K+K+K/K)K"K"K#KKK$K*KKK&KK5KQKwKlKKuKgKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]q(K/K&KKK=KFKKKKKKK K K!K6K?K7KvKKKKgKmKSKBKFKGK?K>KfKnKhKqKsKtK\K%K7KK|K<K=KkKuKiKnKpKwKKKKrKpKqKqKpKpKqKoKnKpKpKoK^KRKpKZKDKGKJK<K?KVKOKKKAKIKaK`K[KNKJKAK8K5K2K0K-K*K'K$K!K*K,K*KK +KK +K KKKKKKKKKKKKKKKKK K"KK$K&K2KFK[KrKpKqKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKtKtKtKtKsKpKqKtKsKsKsKsKtKsKsKsKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKuKuKsKsKsKsKsKsKtKuKuKvKtKsKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKxKxKuKuKuKwKwKZKJKIKIKHKKKNKVK\KYKTKIKRKVK{KKKKKKKKKKwK|KzKzKzKzKzKzK{KyKxKzKzK{KzKxKyK{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|KYK<K?K>K;K6K9K@K?K?K?K?K?K>K<K=K=K<K<K<K=K=K;K9K=K=K:K8K:K;K9K8K7K5K4K2K2K8KGKJKHKJK;K'K%K$K!K K!KKKK!K(K-K5K8K7K:K=K=K?K>KAKDKDKFKGKGKGKHKJKLKKKKKMKNKOKWKVKyKKKKKyK5K.K(K(K-K4K4K)K%KKKKK!KKKKKK#K7K\KgKKKyKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]r���(K-K-K&KK6KXKKKK*K4K2K9KHKYKmKKMKXKKKKRKsKqKVKCKEKCK?K@KaKKKKnKsK]K&K*KnK|KHKMKKKKKKK}KsKoKpKoKqKoKoKpKpKpKoKoKpKZKXKrKUKDKGKHK8KCKWKOKKK?KKK`K`KZKKKKKFK5K0K0K/K-K+K'K$K"K.K-K)KK +KK K K KKKKKKKKKKKKKKK!K!K"K$K&K%K6KJK]KqKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKsKsKsKrKqKpKqKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKuKtKsKuKuKsKtKuKtKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKxKvKuKvKvKuKuKuKvKxKaKHKIKIKHKIKLKRK\K[KVKLKMKXKfKKKKKKKKKKwK{K{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK}KkK>K?K?K@K<K6K@K?K?K?K?K?K>K>K>K>K=K<K<K=K=K<K<K=K=K<K<K<K;K9K8K7K5K4K4K4K4KBKKKKKLKEK+K&K&K$K#K"K KKKK#K*K4K5K6K:K;K>K?K?K?KBKDKDKEKGKFKHKJKLKKKKKLKLKRKVKXK]KKKKKBK&K&K#K(K.K1K2K*K%KKKKKKKKKKK KHKXKvKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]r��(K(K)K)K)K0KuKKKK9KQKgKKKKKKKwKKKKAKpKpKnKVKAKCKJK<K\KKKKlKsKsK[K(KDKKKbK]KKKKKKKKKKKnKrKqKpKpKqKoKnKoKYKYKqKOKDKFKGK4KHKWKOKKK>KOK_K`KZKKKIKBK4K0K0K.K-K+K'K$K"K.K.K.KK +KK K K KKKKKKKKKKKK!K"K#K%K%K%K%K&K(K7KKK^KqKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKtKsKsKqKpKpKqKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKvKtKsKuKuKsKtKvKtKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKyKwKuKuKuKvKvKvKuKxKjKKKIKIKIKHKLKOKYK\KVKNKHKYKYKKKKKKKKKKKzK{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{KyKMK>K?K>K?K6K<K?K>K?K?K?K?K?K?K?K=K<K=K=K=K=K=K=K=K=K=K=K;K9K8K7K5K5K5K5K4K:KKKLKKKKK8K'K&K%K$K"K!KKKK"K*K4K4K6K:K:K>K?K?K>KBKDKCKDKFKGKHKJKKKKKKKLKJKOKUKYKWKyKKKRK4K&K)K)K,K(K)K-K+K!K KKK KKK!KKKK#K:KPKoKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]r��(K+K+K*K*K&KKKKjKdKKKKKKKKKKKKsK9KiKqKnKnKRKFKIKAK{KKKKoKsKrKtK`K%KQKKKwKjKKKKKKKKKKKqKtKqKpKpKoKnKnKVK]KmKJKDKCKHK5KLKTKMKJK=KQK`KaKZKLKJK@K3K1K.K+K-K+K'K$K$K1K0K/KK +KK K K KKKKKKKKKKK#K$K$K%K'K(K)K(K+K(K8KKK^KpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKsKsKtKrKpKpKpKpKqKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKuKuKuKtKsKuKuKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKxKxKyKxKxKxKxKxKxKxKvKvKqKNKHKIKHKHKKKEKUK]KWKPKIKVKWKKKKKKKKKKKxK|KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K~K\K<KBK@K>K9K9K?K>K>K>K>K>K?K>K9K;K=K?K>K=K=K=K=K=K=K=K=K;K9K9K:K8K4K4K5K4K4KEKLKKKKKBK*K)K&K#K#K KKKK!K*K2K3K4K8K9K;K?K>K>KBKAKBKEKEKHKIKIKIKKKLKLKKKLKQKVKWKVKKK4K3K%K+K'K'K&K)K)K&K$KKKKKKKKKKK#K<KTKsKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]r��(K.K,K+K-K<KKKKKKKKKKKKKKKKKVK6K`KqKnKoKlKTKKKFKKKKKKKqKsKtK`K*KYKKKKnKKKKKKKKKKKnKqKpKpKoKoKmKXKkKhKGKDKDKDK6KOKTKMKJK<KWK`K`KYKLKKK?K1K0K.K*K(K)K'K#K$K1K0K0KK +K K K K KKKKKKK K!K"K#K&K(K(K)K*K)K(K*K.K+K;KKK]KqKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKqKqKpKpKpKqKqKqKqKqKqKqKsKsKsKrKqKqKqKqKrKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKtKtKtKuKuKuKtKtKuKuKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKvKvKvKxKxKxKxKxKxKxKxKxKyKwKvKxKWKFKIKHKIKGK3KPK]KXKQKIKPKWKvKKKKKKKKKKvK}K{K{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K}KnKAKAKAKAK?K7K?K?K?K?K?K?K?K?K6K9K@K?K=K;K<K=K<K<K=K=K=K;K:K:K:K9K5K5K5K5K3K<KMKLKMKKK2K)K&K$K$K!K KKKK%K1K2K3K7K9K<K>K>K?K@KCKBKDKEKHKIKHKHKKKLKKKLKIKMKVKWKRKnKaK4K3K,K*K$K'KK K"KK#KKKKKKKKKK$K3KJKYKmKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]r��(K$K#K!K5KVKKKKKKKKKKKKKKKKKCK4KQKrKpKpKrKkKSKJKKKKKKKKpKrKwKdK+KdKKKKmKKKKKKKKKKKnKpKqKpKqKiKdKK^KFKFKGKDK6KPKSKMKGK=KYK_K_KTKMKLK;K-K-K.K+K(K&K&K#K$K0K2K3KK K KK KKKKKKKK$K&K#K&K(K'K+K*K*K*K*K,K-K.K=KMK`KsKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKtKqKpKpKpKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKyKxKxKyKwKuKwKyKxKxKxKxKxKxKxK{KaKFKIKHKIKKK/KBK]K[KUKMKMK[KdKKKKKKKKKK}K~K~K{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{KzKPK@KBKAKBK:K>K?K>K?KBK@K>K@K>K>K?K?K=K;K<K=K<K<K=K=K=K=K=K:K:K:K8K8K4K2K4K6KIKLKKKNK?K)K&K&K%K$K#K KK K#K,K0K2K5K8K<K=K=K?KAKBKAKBKGKFKIKHKIKLKLKKKLKLKLKRKTKVK[KPK:K5K1K.K&K KKKKKKKKKKKKKKK)K;K^KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]r��(K%K(K(K5K`KKKKKKKKKKKKKKKKK,K5KCKlKpKpKpKsKlK\KKKKKKKKKqKsKwK]KrKKKKKjKKKKKKKKKKjKqKoKpKrKeKiKKYKDKGKGKBK6KTKQKLKEK>KZK^K^KRKMKNK:K-K-K,K*K(K'K&K#K%K2K2K1KKK K K KKKKKKK"K'K)K(K*K,K,K,K-K/K.K.K/K0K0KAKNKaKrKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKtKrKrKrKrKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKtKtKsKtKtKtKtKtKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKwKwKwKwKxKxKxKxKxKxKwKxKxKxKxKxKxKxKxKxKzKhKFKIKJKIKKK7K6K^K[KXKPKIKZKZKKKKKKKKKKKzK~K{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK}KbK>KBKBK@K9K9K=K?K@KBK@K>K?K>K>K?K?K?K?K=K=K?K>K=K=K=K=K=K:K:K:K8K8K4K/K5K4KDKNKKKLKHK.K&K&K%K#K"K KK K!K(K0K3K5K7K:K<K>K?K?K>KAKBKEKEKHKGKIKLKKKKKKKLKKKNKTKXK\KPKAK7K7K4K*K-K1K"KK"KKKKKKKKKKKK*KcKKK|KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]r��(K,K+K4KKKKKKKtK~KKKKKKKKKKKK K/K6KbKqKpKqKpKqKwKKKKKKKKKKqKvKqKKKKKKKjKKKKKKKKKVKgKrKoKrKeKVKhKZKEKGKHK>K7KVKQKJKBK@K[K^K]KRKMKMK6K,K-K*K*K)K'K&K"K&K4K2K/KKK K +K KKKKKK#K%K)K)K)K+K.K.K.K.K0K0K0K.K1K3KCKPKbKrKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKtKsKtKtKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKvKtKsKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKxKyKyKyKyKxKxKxKxKxKxKyKxKxKxKxKxKxKxKxKxKyKqKKKIKLKIKJKAK/KWK\KXKQKHKVKUKKKKKKKKKKKxKK{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{KpKEKBKCK>K7K7K?KBKBKBK@K>K>K?K?K>K>K?K?K=K<K?K>K<K=K=K=K=K:K9K:K9K7K8K4K4K4K<KLKLKLKLK9K(K&K%K#K!K KKKK&K-K3K5K6K8K;K?K?K?K?KAKBKDKDKFKFKEKGKHKLKLKLKJKJKSKWK\KPKDK=K;K3K4KAKJK@K%K)K"K!KKKKKKKKKKKEK]KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]r��(K)K.KQKKKKKKK~KxKmKbKoKKKKKKKfKK*K'KPKrKpKqKsKqKKKKKKKKKKKKrKpKK�KKTKKKKeKKKKKKKKOKRKeKqKrK`KRKcKSKAKDKFK<K<KTKOKKKBKBK]K]K\KOKKKMK5K*K+K+K)K&K&K&K!K'K3K3K0KKK K KKKKKK K#K(K+K*K*K.K/K/K/K0K2K1K0K/K1K7KHKQKaKrKpKpKpKqKsKqKpKpKpKpKpKpKpKpKrKsKsKrKqKrKsKsKsKsKsKsKsKsKqKqKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKuKuKuKuKuKuKuKuKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKvKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKxKSKGKJKIKHKGK+KNK]KZKTKJKQKXKoKKKKKKKKKKzK~K{K{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK}K|KzKzK{K{KzKzK{K}K}K}KzK{KQK@KBKAKAK:K>KAKBK?K@KAK?K>K>K>K>K?K?K>K>K?K?K>K>K>K=K=K<K<K<K;K9K9K6K5K4K5KFKMKNKMKFK+K(K&K#K#K#K!KKK"K*K/K1K6K7K9K<K?K?K>KAKBKDKEKGKGKEKGKHKIKJKLKKKJKOKQKYKPKDK>K:K/K-K9K7KMK4K%K&K(K#KKKKKKKKK KMKLKtKKKKKKKKKKK~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]r��(KK<KWK[K}K~KKKKKK{KgKWKvKKKKKKGK-K1K*K=KlKqKqKuKpKKKKKKKKKKKKKpKKKK#KSKKKKeKKKKKK{KMKOKPKgKsK`KRK`KNK@KCKEK9K@KSKNKKKAKDK^K\K[KMKKKKK0K*K*K&K&K%K%K&K!K(K7K5K0KK K +K KKKKKKK"K'K*K*K-K2K0K1K4K3K2K3K4K4K4K8KIKRKaKrKpKpKpKqKtKrKpKqKpKpKpKqKpKpKsKtKtKrKpKrKtKsKsKsKsKsKsKsKqKpKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKvKuKuKuKuKuKuKvKtKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKuKuKuKuKuKuKuKuKvKuKvKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxK{K_KFKIKIKHKJK0KDK^KZKUKKKLKZK^KKKKKKKKKKK|K{K{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K}K}K{KyK{K{KzKzK{K~K}K}K{K}KcK@KBKAKCK=K:K@KBK>K@KBK@K>K?K?K>K?K?K?K?K?K?K?K?K?K=K=K=K=K=K;K9K9K6K4K5K3K>KLKKKLKLK6K'K(K'K%K$K#KKK!K&K-K0K7K8K9K;K?K@K>KBKBKCKEKGKFKGKHKIKHKIKLKLKLKLKJKUKTKIK<K9K4KK2K*KDKDKKK'K&KK!KKKKKKK'KZKKKKKKKKKKKKKKKKK~K|K|K}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]r ��(K3KBK5KNKKKKKKKKKuKdK\KKKiKrKaK5K4K4K3K7KgKsKpKtKoKKKKKKKKKKKKKgKKKK]K.KOKKKKXKKKKKnKNKPKLKKK`K\KOKZKIKBKCKEK7KBKTKMKIK?KHK]K^K[KNKLKHK/K*K*K%K"K&K&K"K"K*K6K5K0KK K KKKKKKK!K$K)K,K+K.K1K/K2K3K2K3K4K3K7K8K7KHKQKcKqKpKpKpKpKpKpKpKsKsKpKqKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKyKvKuKxKxKuKuKuKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxK{KiKHKHKHKHKJK6K8K]KZKXKOKJKYKXKKKKKKKKKKKyK~K}KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K}K|KzK|K}K}K}K~K|KzKzK|KsKFKCKCKAKAK8K>KAKAKAKBKAKAKAKAK?K?K?K?K>K?K?K?K>K=K=K=K=K=K=K;K8K9K6K4K5K4K7KIKKKKKMK?K(K(K&K$K#K$K#KKK$K,K/K2K6K9K;K<K?K>KAKBKCKEKGKGKGKHKIKKKLKKKKKLKKKOK\K^KGK7K8K-KK/KAKDKMK$KK$K(K(K%KKKKKK8KKKKKKKKKKKKKKKKKK}KiK\KWKVK^KcKfKjKtKwKtK|KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]r +��(K2K;KPKKKKKKKKKKKK]KpKjKKKwK+K1K0K2K1K^KtKpKtKrKpKxKKKKKKKKKKK[KKKKpKfK'KFKKKzKIKKKK[KLKNKHKDKHKLKRKYKIKCKCKFK5KEKSKMKIK<KKK^K]KZKMKMKFK,K*K*K(K%K&K%K"K K,K5K5K/KK K KKKKKKK$K'K+K/K/K/K0K/K.K3K3K6K7K7K8K8K7KIKRKdKqKqKqKqKqKqKqKqKsKsKqKrKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKtKsKtKtKtKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKvKwKyKwKvKxKwKvKvKvKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzKrKMKIKIKHKGKAK.KYK[KYKSKHKTKVKKKKKKKKKKKxKK}K{K{K{K{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K|K~K|K{K}K~K}K}K~K|K{K{K{K|KUK@KCKBKDK8K;KAKAKBKBKBKBKBKAK>K>K?K?K?K?K?K?K>K=K=K=K=K=K=K<K<K:K7K5K4K5K4K?KMKLKLKJK0K%K&K%K$K#K#KKK!K(K+K.K2K8K:K<K?K?KAKCKCKDKFKFKGKHKIKJKKKLKLKLKLKNKYKbKFK5K;K)KK-KNKSKQK1KK%K*K*K(K"KKKK\KKKKKKKKKKKKKKKKKK`KKKqK^KSKEKQKVK]K3K +K K"KvKKKKKKKK}KhKjKyKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]r ��(KZKKKKKKKKKKKKKKrKOK_KKKK%K0K1K2K/KQKsKpKtKpKKKKKKKKKKKKKWKKKxKrKuKaK%K7KKK[K5KZKYKMKNKLKHKGKKKKKPKQKHKFKCKEK5KIKSKMKIK;KMK\K\KXKMKNKCK*K)K(K%K'K&K#K$K K,K5K5K-KK +K KKKKKKK$K'K,K/K/K1K2K4K4K5K6K7K7K8K7K7K8KLKTKfKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKvKtKsKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKyKxKxKxKxKuKuKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKvKTKKKJKHKGKGK,KPK]KYKTKLKPKYKnKKKKKKKKKK|K~K~K}K}K~K|KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KeK?KBKDKCK>K<KBKAKBKBKBKBKBKAK9K;K?KAKBK?K>K?K?K?K?K?K=K=K=K>K<K9K:K8K5K5K5K;KKKLKKKNK>K)K)K(K%K"K"K!KK#K(K+K.K1K6K:K;KCK@KAKEKDKDKCKEKGKHKIKJKKKKKMKNKLKJKQK^KLK8K=K.KK/KOKIKQK>KK,K4K/K"KK"K1KKKKKKKKKKKKKKKKKKKKKtKKKKKKKKK_KK +KKYKKKKxKhK^KMKCKUK^KnKxK~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]r ��(KKKKKKKKKKKKKKKK3KlKKKmK$K/K5K7K3KMKsKqKsKtKKKKKKKKKKKKgK`K{KoKrKrKpKsKcK%K KVK`K:K.KGKPKMKLKHKFKKKEKCKIKIKEKCKEK4KLKRKMKJK:KMK^K[K]K^K[KOK2K(K$K$K'K%K#K$K!K-K5K5K,KK K KKKKKK K%K)K.K1K1K3K5K7K7K4K6K9K8K7K9K9K;KLKTKeKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKtKtKsKsKsKtKuKuKuKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKwKxKxKxKxKxKwKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK|K[KGKKKJKGKJK/KCK^K[KUKMKLKYK]KKKKKKKKKKK|K~K}K}K|K|K|K{KzKzKzKzKzKzKzKzK|K{KzK{K|K|K|K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KvKFKAKDKCKCK;K@KBKBKBKBKBKAKBK@K@K@KAKBK@K@K?K>K?K?K?K>K=K;K<K=K:K:K:K7K5K7K8KFKMKLKLKGK.K(K&K%K%K%K"K!K"K%K+K.K1K4K8K:K?K@KCKDKCKCKDKEKGKHKIKMKLKKKLKMKMKKKLKXKTK?K;K4K%K7KWKSKSKEKK%K2K<KPKCKGKKKKKKKKKKKKKKKKKKKKKK}KKKKKKKKKKrKdKWK]KlKZKBK.K9K7KAKMKLKPKQKfKjKvKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]r ��(KKKKKKKKKKKKKKKiK!KKKKSK)K1K2K9K4KHKqKqKqKKKKKqK|KK[K?K~KKKKKKKKrKpKqKpKtK`K&KK9KIK7K.KCKNKHKDKFKJKIKGKEKFKFKCKDK5KPKRKLKFK:KRK^K\K]KZKZK_KNKMK=K/K%K"K!K$K"K0K5K6K,KK K KKKKKK"K&K*K,K1K3K4K5K7K8K8K8K8K7K7K7K7K:KIKUKeKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKvKtKsKsKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKxKyKxKxKxKxKxKyKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxK|KeKGKKKLKIKKK8K7K\K[KVKOKGKVKVKKKKKKKKKKKxKK~K|KzK|K~K{KzKzKzKzKzKzKzKzK~K}KzK|K~K~K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKTK@KEKCKEK<K<KBKAKBKBKBKBKBKBKBKBKBKBKBKBK?K=K?K?K?K?K>K<K<K=K;K9K8K5K/K6K5K>KMKLKKKLK:K)K'K%K&K%K$K#K K K'K+K.K0K5K:K<K?KCKCKAKBKDKEKGKHKIKKKKKLKLKKKMKNKMKUKYKLK@K2K+K@K]K[KOK7K5KpKKKKKKKKKKKKKKKKKKKKKKKKKyKpK|KvK~KK~KnKKuK@KKKMKQKRKEK4K!KK#K!K#K.K?KLKZKgKgKiKfKkKpKwKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]r��(KKKKKKKKKKKKKKK6K"KKKK;K-K2K.K4K5KCKpKrKoKKKKKKKKfKhKsKqKdKEKKKKYKtKpKpKoKtKbK%KK<KCK0K*KBKJKCKFKJKDKHKEKCKDKEKEKBKOKQKKKDKCKWK\K]K]KYKYK\KRK`K_KYKMK=K,K"KK-K7K9K)KK K +KKKKKK K&K*K.K1K4K5K5K8K9K:K8K7K9K9K8K8K?KRKRKdKtKrKsKsKsKsKsKsKsKsKsKrKsKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKuKuKuKuKuKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzKpKLKIKIKHKJKBK-KVK[KZKSKJKRKWKKKKKKKKKKKzKK}K}K|K}K}K}K|KzKzKzKzKzK|K}K{K{KzK|K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKiKBKAKCK<K:K;KAKBKBKBKBKBKBKAKBKAKAKBKBK@KAK@K>K?K?K?K?K?K>K<K<K<K:K9K6K6K6K7KHKLKKKNKGK,K(K(K&K%K$K"K K K$K'K-K1K5K:K<K?KDKFKCKCKDKDKDKHKIKKKKKKKKKLKLKLKMKPKWKRKKKDK8KFK[KgKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKiKSKYKKKnK@K1K KKKK KKKKK K K K KK-KAKdKyKxKuKtKnKdKfKrK}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]r��(KKKKKKKKKKKKKKXKK6KKKK,K.K1K.K2K4K@KmKrKmKKKKKKKKKKKKKLKKKK,K[KuKpKqKoKtK`K$K+K8K<K3K*K@KCKFKJK@KFKFKEKCKCKAKEKNKMKIKEKKKYK\K^K\K[K^KXKPK]K]K`K_K_K[KNK;K9K7K5K'KK K KKKKKK#K&K+K/K2K5K5K5K9K:K:K9K;K;K:K:K9KEKoKRKfKtKsKsKsKsKsKsKsKsKsKtKrKsKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKvKuKuKuKuKsKtKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKuKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKvKPKFKIKHKHKGK*KMK\KZKTKLKNKWKiKKKKKKKKKKK~K}K}K~K}K}K~K}K{KzKzKzKzK}K}KzKzKzK|K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKxKJK>KCK>K@K;K@KCKAKBKBKBKBKAKCKAK@KBKBK@KBKAK?K?K?K?K?K?K>K=K=K=K:K9K8K8K8K5K@KMKLKNKMK5K(K)K&K%K$K#K K!K$K'K.K1K5K:K<K?KAKDKDKDKDKCKCKHKHKGKFKFKKKLKKKKKLKOKMKMKZKUK?KhKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKLKNK?KgKtK{KKKKuKaKCKKKKK KKKKKK!K7KYKkKyKKKsKsKqKsKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]r��(KKKKKKKKKKKKKcK"KKNKKKK$K0K.K.K3K2K;KkKsKpKKKKKKKKKKKKKUKKKK2K6KXKtKpKqKpKrKaKBK7K7KCK5K)K1KGKJKDKJKBKDKCKCK?KDKNKKKGKBKMKZK[KZK[KRK^KXKRKbK_K\K]K^K`KcKbK^KQKFK.KKK KKKKK!K$K)K+K/K2K5K5K8K3K3K:K9K:K:K9K<K:KIKrKSKfKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKuKtKsKsKsKsKsKsKsKsKsKsKsKtKvKtKsKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKyKvKuKuKuKuKuKuKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzKWKEKIKGKEKIK/KBK[K[KUKLKJKXKYKKKKKKKKKKKyK~K}K}K}K}K}K}KzKzKzKzKzK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KWK@KEKCKFK?K<KAK@KAKBKAKBKBKAKAKAKBKBKAKBKAKBK?K>K?K?K>K?K?K=K=K;K8K8K6K5K5K9KLKMKKKMK?K)K)K)K&K%K&K#KK!K'K+K/K3K8K:K=K>KDKGKCKBKBKDKGKGKGKDKFKMKKKMKMKMKQKKKIKSKiKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK�KKAK:K)K K K#K)K7KGKfKKKkKIK%KKKKKKKKK6KKKVKeKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]r��(KwKKKKKKKKKKKdKK KKmKKKpK!K/K.K/K2K3K6KgKuKvKKKKKKKKKKKKK[KKKWK9KBK/KUKsKoKpKqKlKIKBK'K-KCK0K$K3KIKDKGKAKDKCKCK?KDKNKLKGKBKOKXKZKZK[K\KbKXKRKaK^K]K^K`K_K_K_K_K^K^K[KKK/KKKKKK!K%K*K,K0K3K5K5K9K8K7K:K:K:K:K:K=K;KIKrKSKfKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKuKtKtKsKsKsKtKtKsKsKtKtKtKuKuKtKtKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKvKuKuKuKvKxKvKuKvKvKvKvKvKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKyKxKxKxKxK{KbKGKIKGKGKJK7K5KZK[KYKRKGKUKTKKKKKKKKKKKyKK~K}K}K}K}K}K{K{KzKzKzK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K|KKjKBKDKCKEK@K8KBKCKBKBKBKBKAKBKBKAKAKAKBKBKBKBK@K?K?K?K?K?K?K=K=K<K9K8K7K5K5K4KCKMKKKMKJK.K(K)K'K&K%K#K K!K%K)K.K2K7K:K=K>KDKHKDKBKBKDKFKFKGKGKJKLKKKLKMKOKIKGKaKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK0K;K3K,K&K&K"KKK'KgKKKKK'KK KKK K K +KK"K,KKKcKzKKKKKKKxKzK}KmKmKKKKKKKKKKKKKKKKKKKKKKKKe]r��(KKKKKKKKKKKUKKK KKKKKJKK*K-K0K2K6K9KbKsKKKKKKKKKKKKKtK[KK^K>K<KCK;K,KOKrKpKsKhKEKEKK +K2KAK0K$K3KJKIK?KCKDKDK?KEKNKLKGKEKPKYKZK\K]KRKaKSKSK^K]K`K`K`K`K`K`K`K`K^K]KaK`KWKJK7K'KKK%K+K0K2K5K4K6K;K:K:K=K=K=K=K=K?K<KJKqKSKgKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKuKuKsKsKsKtKvKtKsKuKuKuKuKuKvKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKxKyKvKuKuKuKuKuKuKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KyKxKxKxKxK{KmKHKHKIKHKJKAK/KWK[K\KTKIKQKTK{KKKKKKKKKK}KKK}K}K}K}K}K~K}KzKzKzK}K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KzKMKBKDKCKCK=KBKDKBKAKDKBK@KDKAK?KAKBKBKBKBKBKBKBK?K@KBK>K?K=K<K=K=K:K9K7K4K3K<KMKNKMKNK;K*K)K(K&K#K$K#KK#K(K,K0K5K7K;K?KCKGKFKDKAKAKCKDKGKGKHKKKLKIKNKMK7KOKKKKKKK�KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK"K-K/K*K*K+K'K$KKK2KjKKKKCKK KKK K K K KKKK1KIKmKKKKK{KoK^KVKiKKKnK5K&K?K^KuKKKKKKKKKKKKKKKKe]r��(K;K^KKKwKsK}KKrK0KK KKK"KKKK*KK%K(K-K1K4K7K\KqKKKKKKKKKKKKK\KRKMKKKIK7K9K?K:K*KOKsKpKcKEKCKQKKK0K>K0K#K1KGK>KBKDKCK>KEKNKJKFKDKPKXKZKWKEKPK`KQKVK_K]K^K^K^K_K`K`K`K`K_K^K^K\K]KaK`K\KOK@K1K+K,K0K4K6K9K:K;K;K=K=K=K=K=K>K=KJKqKTKhKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKtKtKtKtKuKuKtKtKtKuKuKtKtKuKuKuKuKuKuKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKwKuKuKuKvKwKwKvKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KzKyKyKyKyK{KvKOKHKHKIKJKJK+KMK\K[KWKKKMKWKgKKKKKKKKKKKKK}K}K}K}K}K}K}K|K|K|K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KK[K@KDKDKDK=K=KCKCKBKDKCKBKCK@K9K>KBKAKBKBKBKBKBK@KAKBK?K?K>K=K<K=K:K9K8K6K6K6KHKNKMKNKFK.K*K)K(K$K#K$K!K!K%K,K0K5K6K8K>KBKGKHKEKCKCKCKDKGKFKHKKKLKJKJKKK`KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK(K K KKK%K'K(K'K&K$KKKKBKKKKpKLK4KKKKKKKK K +K KK4KKKKKKKtKaK}KKKKfKQKcKKyKKKKKKKKKKKKKKKe]r��(K KUK}KrK_KKKQKRKK KKKKK"K}KKxKKK"K#K*K-K0K2KYKpKKKKKnKKKKKKKKIKRKHK\KpKHK2K2K0K7K*KJKtK^KAKCKkKcK!KK2KJK8K!K#K>KGKDKBK3KCKNKIKGK>KJKZKXKSKK*K8K=KXKdKaK]K\K\K_K`K`K`K`K`K`K^K\K\K[K\K]K_KaK[KQKDK;K6K5K8K:K=K=K=K=K=K=K<K=K>KIKlKSKhKrKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKvKvKvKvKuKuKuKvKvKuKuKuKuKuKuKuKuKuKuKtKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKyKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzK{K{KzK{KzKxKXKHKHKJKLKNK0KBK]K[KYKOKNKXKYKKKKKKKKKKK{KK}K~K}K}K}K}K}K~K~K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKkKCKCKDKCKAK<KBKDKDKDKDKDKCKBKAKDKBKAKAKBKBKBKBKBKBKBKAK>K?K?K=K;K9K:K:K9K9K4K?KNKOKNKMK6K)K+K)K&K&K&K"K!K%K)K,K2K6K8K=K@KEKHKFKCKDKDKDKGKFKHKKKLKKKQKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK1KK KKKKKK"K$K"KKKKK2KbKKKVKHKRKEK$K KKKKKK0K6K&KGKKKKKKKoKcKKKKKKKKKKKKKKKKKKKKKKKe]r��(KKKKKOKEKLK'KKKKKKK3KmKlKBKK!KK K$K'K0K1KWKrKKKKvKtKqKKKKKK\KBKOKFK_KuKlKFK2K'K(K5K%KMKZKAKHKkKvKaK$KK[KgKKKCKEKBKAK-KDKPKIKFK9KHKVKYKQKKFKHK@KCKIKSK[K^K_K^K_K_K`K`K_K]K]K]K_K_K_K^K\K\K]K^K_K\KVKIK>K;K<K;K:K<K=K=K=K?K=KGKiKRKhKuKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKtKtKuKuKuKuKtKtKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKxKwKwKxKwKuKwKxKxKxKvKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KyKyKzKzKzKyK{KbKGKIKIKIKLK9K9K]K\KYKRKMKYKUKKKKKKKKKKKwKK}K|K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K{KLKBKDKCKDK>K@KCKDKBKBKDKCKCKCK@KAKBKBK@KBKBKBKBKBKBK@K>K?K?K>K>K<K;K:K4K5K7K9KLKMKMKNKBK,K*K)K'K&K%K#K!K!K$K*K-K3K8K:K?KBKHKHKFKCKCKDKGKFKHKKKIKLKKK}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK=KK +K KKK KKKKKKKKKKKSKKsKKK!K1K5K)KKKHKKK{K7K@KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]r��(K8KKKKK0K*KKKKKKKKQKKKPKK"K!KK#K#K)K0KNKrK]KWKhKuKsKuKsKnKdKRKIKAK@KJKEKbKrKrKnKIK3K%K1K5K&KCK?KNKqKpKrKeK%KKHKK-KDKAKBK?K.KLKPKIKFK9KIKUKZKOK"KQK\KXKTKJK@K?KGKTK_KcKaK`K_K^K\K]K]K_K`K`K_K]K]K]K\K\K]K^K^KZKTKMKCK;K;K<K=K=K>K>KHKhKSKiKvKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKuKuKuKsKsKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKyKxKxKyKxKuKwKyKxKxKuKvKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KyKxKzKzKzKyK{KlKFKHKIKHKJKAK0KXK\KYKRKKKUKUKuKKKKKKKKKK~KK~K|K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K|KK^KBKDKCK<K9K?KEKCKBKBKDKDKDKCK@KAKBKBK@KBKBKBKBKBKBK@K>K?K?K?K>K=K;K:K6K5K9K7KFKNKNKMKKK5K)K)K(K%K$K$K!K K$K*K-K3K9K;K=KAKHKIKIKGKDKDKGKFKIKJKIKgKKxK|KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKwK@K K K +K KKK KKKKKKKKK +KK^KKEKK K KKK&K9KKKKK'KKCKKKKKKKKKKKKKsKKK{KKKKKKKKKKKKKKe]r��(KrKKKK�KKKKKKKKKK}KKKLKK%K$K!K#K%K'K,KHKqKOKBKaKvKsKsKsKsKeKHKGKAK@KJKFKgKvKpKrKoKHK0K)K/K.KJKvKKuKpKoKtKdK1K4K&K;KEKBKBK>K.KMKNKIKEK8KLKWK[KMKK+K@KRK\K\KXKRKHKAKCKKKVK\KaK]K\K\K]K]K]K`K`K_K_K_K_K_K_K^K\K]K]K^K\KUKOKFK>K:K;K=KHKfKSKjKuKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKuKuKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKxKuKvKxKwKuKxKxKxKxKuKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzKzKzKzKzKzK{KuKMKHKHKIKIKHK.KPK\KZKWKNKOKWKcKKKKKKKKKKK~K~K|K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKqKDKCKEKAK@K<KCKDKCKCKDKDKCKCKCKCKCKAKAKBKBKBKBKAK@KBK>K>K?K?K>K=K;K9K;K:K8K6K=KNKNKMKOK?K)K+K*K(K%K%K$K"K$K'K,K0K8K;K=K@KDKHKIKIKEKEKGKFKIKHK[KwK}K{K{KKKKKKKKKKKKKKKKK~K|KKKKKKKKKKKKKKK~KKKKKKKKDK+KKK +KKKKK KKKKKKK K +KKlK}KK KK KKK K+KwKKKfK +KK`KKKKKKKKKKKKKqKKKrKlKKKKKKKKKKKKe]r��(KKKKfK KKKKKKKKK!KKKK6KK$K#K%K&K&K)K*KDKkKIKGKhKtKrKsKsKrKuKfKJK:K@KHKIKnKuKrKpKtKmKEK-K+K0KKKKoKsKpKpKuKcKGK5KGKCKBKBK<K0KOKLKIKDK7KOKWKYKGKKKK"K2KFKVK[K\KZKEK:KDKCKMKWK^K_K^K\K]K_K_K`K`K`K`K`K`K_K]K]K]K]K]K^K_K]KVKMKFK>KEKcKSKjKuKsKsKsKsKsKsKsKsKsKsKsKsKtKtKsKsKtKtKsKsKsKuKuKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKvKxKxKvKvKyKwKuKwKxKxKxKvKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKyKyKyKzKzKzKzKzKzKzKzK|KVKGKJKMKOKTK4KBK\K[KXKOKNKYKVKKKKKKKKKKKzKK}K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K|KQKAKEKEKEK=K@KDKCKDKDKDKCKCKDKDKCKBKBKBKBKBKBKAKAKBK?K?K?K?K>K=K<K:K:K9K8K7K4KGKOKMKOKJK.K)K+K(K%K&K%K"K!K#K)K.K5K:K>K@KBKDKHKIKIKHKFKGKEKSKzKKK{K{KKKKKKKKKKKKKKKKKK|K|KKKKKKKKKKKKKKKKKKKKKKGK0KKK +KKKKKK K KKKKK KKKK^K7KK K K KKK�KK^KKKMKKK}KKKKKKKKKKKKKKKKKKKKKKKKKKKKe]r��(KKKKCKKKKKKKKKK/KKKK$K$K$K$K%K(K)K'K)K?KaKDKIKkKrKoKsKsKpKpKsKiK>K@KFKNKqKtKsKtKpKrKmKBK-K7KKKK@KuKqKpKrK_KEKEKIKAKAK?K8K4KPKKKGKCK9KPKVKXKDK*K+K(K&K"K K&K5KJKXKXKZKXKQKFKCKDKNKYK^K_K]K]K_K`K`K`K_K`K`K`K^K\K_K`K_K_K`K^K^KZKUKUK`KUKkKwKtKsKsKsKsKsKsKsKsKsKsKsKuKvKtKsKvKuKsKsKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKyKxKxKyKyKyKwKuKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzKzKzKzKzKzKzKzKzKzKzKzK|KbKJKRKSKQKXK=K5K[K[KVKRKNKXKTKKKKKKKKKKKzKK~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKaKAKCKBKFKAK<KDKCKDKCKDKCKCKCKDKDKDKDKDKDKDKBKAKBKBKBKBKBK?K?K@K=K=K<K9K8K8K4K@KNKMKNKOK;K(K*K(K%K%K&K%K K!K)K0K3K8K<K>KAKBKJKLKKKGKEKFKPKvK}KKK~K{KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKHK7K"KK KKKKKKKK KKK KKKKKKK +K K KK-KWK7K!K5KvKKKLKK2KKKvKKKKKKKKKKKKUKdK`KYKKKKKKKKKKe]r��(KKKK)KKKKKKKKKK@KKKKK%K"K"K$K%K'K)K)K;KUKEKLKoKrKrKsKsKrKrKrKuKhKNKCKSKtKsKtKsKrKpKsKnKAK>KKKKKHKsKtKqK[K@KNKIK@KAK@K4K9KRKJKGKAK:KSKVKYKFK0K1K,K(K'K&K!KK!K.KIKRKYK^K]KZKRKHKAKEKQK\K`KbK`K^K`KaK`K`K`K_K^K_K`K`K`K`K`K^K]K]K]K^K[KeKoKrKtKtKsKtKtKsKsKsKsKsKsKuKuKtKsKtKtKtKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKwKwKwKwKwKwKxKxKwKwKxKxKvKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzKzKzKzKzKzKzKzKzKzKzKzK}KmKOKTKTKSKUKHK-KVK[KXKSKMKTKWKtKKKKKKKKKKKKK~K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K~K~K~K~K~K~K~K~KKrKEKCKDKCKCK;KDKDKCKDKCKCKDKCKCKDKDKCKDKCKCKCKBKBKBKBKBKBK?K>K?K=K=K;K9K7K8K7K:KJKNKMKOKHK/K)K)K'K'K&K%K"K!K%K,K1K6K:K>KAKCKHKKKJKGKFKFKoKKwK}KKK{KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKvKEK>K&KK KKKKKKKKK +KKK K KK KKK K +KKKKJKKKYKvKKKKKyKKKKKKKKKKKKKwKvKSKKK0K;KKKKKKKKKKe]r��(KKKK!KKKKKKKK KKNKKKEKK%K!K K"K%K%K)K*K6KNKEKSKqKrKtKsKsKtKsKsKsKqKQKJKfKvKsKsKsKsKtKsKuKnK^KKKpK3K,KEKrKqKWK@KHKFKAK@KAK4K=KTKIKGK?K9KVKVKYKDK4K2K)K&K&K&K%K#K#K'KSK<KK6KNK\K_K\KXKOK<K9KHKSK[KbKaK`K_K_K`K`K`K`K`K`K`K`KaK^K\K\K\K\K]K\K\KaKgKoKsKtKtKtKsKsKsKsKsKtKuKtKsKsKtKvKvKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKxKyKxKyKyKwKtKwKxKuKvKxKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzKzKzKzKzKzKzKzKzKzKzKzK|KvKUKRKVKVKSKPK.KNK\KZKSKKKRKWK_KKKKKKKKKKK{KKK~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KKKKKKKKKK}KTKAKDKCKEK=K@KDKCKDKCKCKCKDKCK>K@KDKDKBKAKDKCKBKBKBKBKBK@K=K?K>K=K;K9K:K:K8K8KCKNKMKOKMK8K+K+K*K)K&K&K$K"K$K*K-K5K9K;K@KDKEKJKIKIKIKSKKK}KKK|KwK}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKXK<K8K KKK KKKKKKKKK +KK KKKKKKK +KKKK K4KQKK*K\K_KeKtKvKzKKKKKKKKKKKKKKK{KlKiKnKKKKKKKKKe]r��(KKKK&KKKKKKKKKKMKeKJK'K#K$K#K#K#K$K%K'K(K6KKKCK[KuKsKsKsKsKsKsKsKuKmKoKKKoKtKsKsKsKsKtKsKsKoKKKAK8K8K&KBKnKUK@KIKDKAK@KAK/K@KTKHKFK<K>KTKTKUK@K5K4K!KKK$K%K%K#K)K[KAKK$K%K2KDKUK]K_KNKJKMKCK@KHKQK]KbKaK_K\K]K]K^K`K_K]K^K]K\K]K_K^K\K\K\K_K^K_KbKgKmKqKuKuKtKtKtKuKuKuKuKuKuKuKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKxKwKwKwKwKwKxKxKxKxKvKwKxKxKxKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzKzKxKyKzKzKzKzKzKxKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K|K\KQKVKUKSKPKHKPKZK[KUKOKKKWKUKKKKKKKKKKKyKKK~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KK~K}K}K}K}K~KKKKKK~KKK~KKcKAKDKDKDKAK<KDKCKBKCKDKCKDKCK>K@KCKDKBKBKCKCKBKBKBK@K?KAK@K?K>K>K=K<K;K:K8K7K>KLKNKNKOKBK-K*K)K(K&K%K$K"K#K'K+K1K5K8K=KBKDKHKFKIKLKsKKK{KyKK{KsKyK|K~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK~KiKYK;K-KK KK K +KKKKKKKK +K K K KK +KK KK KK KK KKAK5KEKbKsK{KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]r��(KKKwKKKKKKKKKK$KGKCK<K$K"K#K$K$K$K$K&K'K'K9KIKCKbKvKsKsKsKsKsKsKsKtKoKKKKmKuKsKsKsKsKsKtKsK_KOK@K.K.K1K5K%K:KNK@KLKDKAKAKAK.KBKSKHKEK<K?KSKSKUK>K3K3KKKKK K"K!K,KXKAK+K;K8K)KKK1KGKUKZK]KZKSKIKDKBKHKSK\K`K^K\K]K`K_K\K\K]K]K]K`K_K\K]K]K_K`K`K_K_K^KaKgKmKsKuKwKuKuKuKuKuKuKuKtKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKxKyKxKyKyKyKyKxKxKxKxKuKwKyKxKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzK{KzKzKxKyK{KzKzK{KzKxKyK{KzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K|KbKQKVKTKSKPKNKLKWK[KXKQKHKRKSKKKKKKKKKKKKKK~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KK~K}K}K}K}K~KKKKKK}K~KK}KKtKGKCKDKCKEK=KBKDKDKDKCKDKCKCKGKEKDKDKBKBKCKCKBKBKBK@K>KAKAK?K?K?K>K=K:K9K7K7K9KHKOKNKMKKK3K)K)K)K&K%K$K"K#K#K(K0K5K8K=KAKDKGKFKEKaKKKKuKsKKKtKsKyKyK~KKKKKKKKKKKKKKKKKK{KKKKKKKKKKKKKKKKKsK\KBK(KK K K KKKKKKKKKK K K K K KKKK KK +KK KKKK"K8KAKGKPKZKqKKKKKKKKKKKKKKKrKwKwKoKgKhKKKKKKKe]r��(KKKSKKKKKKKKKK%KIKFK7KKKKKK!K#K%K'K&K<KGKFKhKvKsKsKsKsKsKsKtKrKKKKKmKuKsKsKsKsKtKsKsKWK?KKKJK)K(K*K/K"K-K@KKKBK?K>K>K-KGKPKHKFK9K?KWKVKUK=K5K2KKKKKK$K+K8KWKBK+K2K8K-KKKKKMKIKFKVK^K_K[KPKFKBKAKIKSK_KbKaK`K\K]K]K\K]K]K]K\K^K`K`K`K`K`K`K`K_K\K\K^KdKkKsKvKvKuKuKuKvKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKxKwKuKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzKzKxKxKxKzKzKzKzKxKxKxKzKzKyKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK}KjKPKTKSKSKRKNKNKVK[KZKTKIKOKVKlKKKKKKKKKKK|KK~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KKKKKKKKKKK~K~KKKKKKKTKDKDKCKCK=K?KFKDKDKCKCKDKDKDKDKDKDKDKCKBKBKBKBKBKAKAKBKAK>K?K?K=K<K;K8K0K6K7K@KOKNKMKOK<K*K+K*K)K(K%K%K$K#K'K-K5K7K=KCKDKGKGKFKgKKKKvKqK|KKyKtKwKzK~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKyKbKIK+KKK KKK +KKKKKKKKK K K K K KK K K +K KK KKKKKKKKKKK+KAKPK\KmKyKKKKKKKKKKKKwKnKKK[KQKKKKe]r��(KKKKKKKKKKKKK/KGKDK4KK"K K"K#K$K%K%K(K'KCKFKGKnKuKsKsKsKsKsKsKtKpKKKKKpKtKsKsKsKsKtKsKpKSK@KPKsKJK%K"K*K1K#K:KHK?K>K>K=K,KIKOKHKFK8KCKVKUKPK9K4K1K0K(KKKK!K/KEKXKAK,K(K*K&KK KKKZK@KK!K7KLKXK`K]KYKNK1K7KBKLKXK`K`K^K]K]K]K]K]K]K^K_K_K_K_K_K`K`K`K]K\K[K[KZK^KfKmKqKuKwKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKvKwKxKxKxKxKwKvKuKuKuKuKuKuKuKuKuKuKuKvKyKwKvKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzKzKxKxKxKzK{KzKzKyKyKyKzKzKyKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|KtKTKRKSKSKSKOKOKTKZK[KXKNKKKXKYKKKKKKKKKKK{KK~K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KKKKKKKKKKKK~K~KKKKKKKfKDKEKEK?K;K=KCKCKCKCKCKCKCKCKCKCKCKCKDKBKBKBKBKBKBKBKBKAK?K?K?K>K=K<K8K8K9K7K9KLKNKNKNKHK0K,K*K)K(K&K%K"K$K%K*K1K4K9K@KCKGKEKCKbKKK~KuKqKuKK}KtKuKvK}KKKKKKKKKKKKKKKKKKKK~KKKKKKKKKKKKKKK}KfKNK0KKK KKKK KKKKKKKKK K K KKK K K K KK KKKKKKKK K K +KK KKK(KAKEK@K<KBKcKpKsKKKKKKKKK}KDKKKKe]r ��(KbK=KKKKKKKKKKK6KEK@K,KK%K&K%K$K#K!K"K"K*KGKHKYKrKtKsKsKsKsKsKsKuKoKKKK{KtKtKsKsKsKsKtKsKoKMK?KVKuKqKJK%K%K*K*K@KGK?K>K?K<K+KMKNKIKCK7KIKUKSKNK6K/K,K'K+K-K+K$K"K K<KWK;K&K4K6K,K%KKK#K[KEKK"K!K"K,K=KOKZKYKRKVKMKEK@KBKMKZKaK`K^K\K\K\K]K]K\K\K\K^K`K`K`K]K]K]K]K]K\K[KZK]KdKkKpKvKvKvKuKuKuKuKuKuKuKvKvKtKsKqKpKhK_K_KfKnKsKwKwKuKuKuKuKuKuKuKuKuKvKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{KzKYKQKSKPKQKLKOKPKZK\KVKMKIKWKSKKKKKKKKKKK{KKKK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KKKKKKKK}K}K}KKKKKKKKKKKtKJKFKGKDKCK<KBKCKCKDKCKCKDKCKCKCKCKCKDKDKDKDKCKAKCKDKBKBKBK@K>K?K?K>K:K:K:K7K5KCKNKMKMKNK:K*K+K)K(K&K$K"K K$K)K-K4K7K=KBKEKEKFKWKKK{KsKqKtK}KKwKsKvK~KKKK~K|KKKKKKKKKKKKKKKK~KKKKKKKKKKKKKKKkKTK6K"KK KKKKKKKKKKKKKK +K K KKKK K K K +K +KKKKKKKKK K KKRKrKpKtKtKrKjKaKpKKXK#K(KPKxKKKKKKKKKKe]r!��(K?K#KKKKKKKKKKK<KEK@K&KKKKK K"K$K&K'K/KbKKKtKtKsKsKsKsKsKsKuKlKKK~KsKuKsKsKsKsKsKsKuKlKHK?K\KuKrKtKMK%K$K*KCKEK>K>K@K<K0KPKLKGKCK7KJKTKSKNK6K/K-K%K$K$K#K'K,K,K=KXK4KK;KQKNK@K-K"K2K\KGK+K$K)K+K+K)K)K1KDKUKYK_KZKWKOKCK?KCKNKWK_K`K^K]K\K^K^K^K^K^K^K^K]K]K]K]K]K]K]K[KZKZKZK^KcKkKsKvKwKwKvKuKuKuKuKuKrKjKaKSKGKAK=K?KEKRK^KoKwKvKuKuKuKuKuKuKwKwKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKyKyKyKyKyKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K|K|K{K~K_KPKRKNKPKNKOKOKWK[KWKQKHKUKSKKKKKKKKKKKKKKK~K~K}K}K}K}K}K~K~K}K}K}K}K}K~K~K~KKKKKKK~K~K~KKKKKKKKKKKKWKBKGKFKGK?K>KFKEKCKDKDKCKDKEKEKEKDKCKDKDKDKCKBKCKDKBKBKBKAK@K@K>K>K<K:K9K7K7K;KMKNKNKPKDK,K*K)K)K'K&K&K"K$K%K+K1K4K9K?KBKEKGKGKrKKKrKrKtK{KK|KvKuKKKKK|KxKKKKKKKKKKKKKKzKKKKKKK}KKKxKKKKKK~KrKZK:K%KK KKKKK K KKKKKKKK K K K KK KKKK K K K +KKK&K%KKKKKKK]KKKKKKKKKzKnKmKEKK%K=KUKhK{KK~KtKKe]r"��(K>K"KKKKK KKK"K"K$KAKDK=K'K'K!K!KKKK K$K!K3KKKKpKuKsKsKsKsKsKsKtKnKZKNK\KuKsKsKsKsKsKsKsKuKhKFK@KaKtKsKtKuKNK$K)KDKCK=K?K?K8K4KSKKKDK@K6KKKSKTKMK2K0K-K'K&K&K&KK K!K=KWK2K#K?KTKXKWKOK?KCKZKEK/K'K)K+K1K8K8K0KAKSK<K>KSK\K^K^KWKLKEK@KAKOK[K`K_K`K`K`K_K\K\K\K]K]K]K]K]K]K]K]K]K]K\KZKYKZK^KdKmKvKyKxKvKuKuKtKiKWKPKIKIKBK=K;K5K9K7KBKWKjKvKwKuKuKuKvKyKxKtKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzK{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K~K~K}K~KjKPKRKQKRKOKOKLKTK[KXKVKKKOKUKiKKKKKKKKKKK{KKKKK}K}K}K}K}KKK}K}K}K}K}KKK}K~KKKKKKKKKKKKKKKKKKKKhKCKGKGKGKDK9KDKEKCKCKCKCKDKGKEKCKDKCKCKDKCKDKDKDKDKBKAKAKBKBKAK=K>K>K:K9K8K8K9KHKNKQKQKLK4K,K+K*K)K(K%K$K#K&K*K.K2K:K?KBKGKGKEKIKkKKuKtKsKwKzKKyKsK~KKKK}KxKzK}KKKKKKKKKKKvKwKKKxKKKKxKKKkK|K~KnKxKwKvKrK^KAK(KK +K K KKKKK +KKKKKKKK K K +KK +KKKK K K KK +KKAKdKyKK{KgKKKKK7KtKKKKKKKKxKKuK.KKKK KK K#K2K|Ke]r#��(K8KKKKKKKKKKK&KDKDK3KKK%K'K&K&K$K"K"K KJKKKKmKuKsKsKsKsKsKsKuKlKGKCKaKvKsKsKsKsKsKsKsKuKdKEKAKfKvKsKsKsKsKCK.KIKDK=K?K?K4K4KSKKKEK@K5KMKTKTKGK/K/K-K'K&K%K%K$K"KKAKYK0K%KBKUKWKXKYKZKXKZKDK5K:K.K,K.K1K7K9KIK_K+KK&K2KGKVK^K_K]KQK7K>K?KDKPK[K`K_K]K\K\K]K]K]K]K]K]K]K]K]K]K]K\K\K\K]K[KXKYK]KdKlKrKvKvKpK]KMKFK?K?K8K6K=K9K4K0K,K:KIKZKoKxKvKvKuKxKxKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K|K|K{K{K{K~KrKSKRKOKRKOKNKPKWKYKXKVKLKKKXK[KKKKKKKKKKKyKKKKK}K~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKxKJKEKEKEKFK:KCKDKCKEKFKDKDKGKEKCKCKCKDKCKDKCKDKCKBKCKCKCKBKBKAK@K@K=K<K;K9K8K7K?KNKQKOKPKAK-K+K*K)K'K%K%K#K&K)K,K0K6K=KBKDKEKGKFKGKuK{KpKqKtKwK}K{KsK|KKKKKzKyK{K~KKKKKKKKKKKsKKKnK|KKKKKKnKnKcK^KlKnKoKoKaKGK*KK K K KKKKKKKKKKKK +K K K KK KK +KK K K KK KKK +KK<KsKKK~K:K KK\KKKKKKKYKGKDK3K$KKK%KMKfK9K(KvKe]r$��(K.K KKKKKKKKKK.KFKEK*KK +K KKK%K(K(K)KKbKKKKnKuKsKsKsKsKsKsKvKfKFKDKhKuKsKsKsKsKsKsKsKuK`KCKDKjKvKsKsKtKpKSK@KFKCK=K?K?K3K7KRKKKEKAK8KNKRKTKEK/K1K.K&K&K%K$K#K!KKEKVK,K$KCKVKWKWKWKWKWKYKDK7KUKQKAK1K+K.K2KJK]K8K.K<K3K+K+K<KMKXKYKWKWKOKHK?K<KFKQK\K_K^K]K\K\K]K]K]K]K]K]K]K]K[K]K]K]K[K[K[KZKYK[K]KbKjKbKKK>K6K6K9K2K6K9K4K-K0K/K7K9KGKPKhKwKtKuKxKvKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K~K|KzKzKzK|KyKXKQKOKRKOKNKPKQKYK\KXKOKHKWKSKKKKKKKKKKK}KKKKK}K~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKYKBKDKCKGK@K=KGKCKFKGKDKDKGKFK@K=KGKCKCKDKCKDKBKAKCKDKDKCKAKBKBK@K=K=K<K:K9K7K:KLKQKOKNKLK2K*K*K)K'K%K$K#K#K&K,K/K4K:K>KBKEKGKHKJKwKKoKqKtKwKyK|KwK{KKKKK{K{KzKyK}KKKKKKKKKKKwKKqKiKKKKpKKK^KmK[K_KhKjKkK`KKK-KK K K K K K K KK KKKKKK K K K KK K K K K K K K K K K K KKK K'KRKKKKDK8KbKzKKKKKqK`KiK`KKKKKKKwKtKKe]r%��(K#KKKKKKKKKKK0KEKIK-KK KKKKKK#K(K"KyKKKKrKtKsKsKsKsKtKsKvK`KBKEKkKuKsKsKsKsKsKsKsKuK[K?KGKoKuKsKsKtKnKPKCKEK@K>K?K>K0K;KQKHKCK>K9KPKRKTKCK3K4K.K&K&K&K$K"K$K!KGKUK*K(KEKQKUKXKYK[KXKXKBK-K1KFKVKRK5K+K)KGK\K=K5K?KAK?K:K2K/K1KFKSKYK^K\KVKOKFK=K?KEKSK^K]K\K[K[K]K]K]K]K\K[K[K[K[K[K[K[K[K[K[K[KZKYKZKZKSKHK@K;K<K9K2K5K4K-K5K6K6K:K=KEKTKpK|KyKzK|K{KwKtKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzKzKzKzKzKzKxKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K}K|KzK|K}K{KzK}K}K}K}K}KzK{K]KPKRKTKRKPKOKJKVK[KXKPKGKUKTK~KKKKKKKKKKKKKKKKKKKKKK}K~KKKKKKKKKKKKKKKKKKKKKKKKKKKKjKDKFKFKGKEK;KCKCKFKGKFKFKGKFKBKAKDKCKCKCKCKDKCKCKDKCKBKBKBKBKBK?K>K=K;K9K9K8K7KEKNKOKPKOK;K,K-K+K)K&K&K%K#K#K'K,K2K8K:K?KDKGKGKJKvKKpKpKrKuKvK{K{KwK}KKK|K{K{KyKyKzK}KKKKKKKKKKsK|KrKdKoKKKuKuKqKOKgKeK^KaKfKaK\KNK1KK KK K +K +K K K KK +KKKKKK K K KK KKK K K K K K K +K K K K K +KKKKOKKKKmKoKtKiKlKKKKKKKKKKKKKKKe]r&��(K%KKKKKKKKKKK=KnKK=KKKK KKK +KKK"KKKKuKuKsKsKsKsKsKsKsKtKZKCKJKpKtKsKsKsKsKsKsKsKtKTK?KLKrKtKtKsKuKkKNK@KFK?K>K>K>K.K@KOKGKDK=K>KTKQKTKBK4K8K0K&K&K&K%K"K$K"KIKTK*K+KIKTKWKWKYKZKXKWK>K(KKK'K4K0K+K7KPKYK>K0K6K=K>K=K?K@K9KDKTK<K:KMK[K]K[KWKQKDK>K@KHKUK[K^K^K\K\K\K\KZK[K[K[K[K[K[K[K[K[K[K[K[K[K\K\KYKVKPKDK?K2K.K1K7K6K5K5K:K3K0K0K0K7K)K.KHKdKwK|KyKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKyKzKzKzKzKzKzKzKyKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K{KzKzKzKzKzKzK{K~K}K{K|K}K{K{K}K}K}K}K}K{K}KhKQKSKSKRKQKNK@KVK\KZKSKJKPKXKfKKKKKKKKKKK|KKKKKKKKKKK}KKKKKKKKKKKKKKKKKKKKKKKKKKKKK|KMKEKGKFKFK=K@KEKEKGKGKGKGKFKDKDKDKDKDKDKDKDKDKDKDKCKBKBKBKBKBK?K>K=K<K:K8K8K7K>KMKPKQKQKEK.K,K+K*K*K'K%K$K$K&K+K2K7K:K?KDKFKGKHK`KKrKpKrKsKsKwK}K{KzKKK|K{KzK{K|K{KzKKKKKKKKKKKoKoKhKcK{KK|KlKWKJKXKmKiKaK^KWKUKPK3KK +K K K K +K K K K KK KKKKK K K KK KKK +K K +K +K +K +K +K +K +K +K +K K K KKKKOKKKKyKkK]KiKKKKKKKKKKKKKKe]r'��(K$KKKKKKKKK KKkKKK<KK"KKKKKKK KK_K`K\KuKsKsKsKsKsKsKsKsKsKTKBKNKtKtKsKsKsKsKsKsKtKsKQK?KOKtKtKuKuKuKhKJK@KEK?K>K<K=K,KCKPKGKEK:K>KSKRKSK@K7K9K0K(K(K%K"K K$K$KOKRK)K+KJKUKWKXKWKWKXKXK=K+K0K%KKKKK0KSKXK=K9K7K,K2K:K=K=K8KLK^K/KK2K1KAKRKYK\K\KXKOK4K3KAKJKTK]K`K\KZKZK[K]K\KZK[K[K[K[K[K[K[K[K[K[KZK[KYKYK[KVKPK<K2K)K$K!KKKK KKKKKKKK%KNKpK{KyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK}K}KzKzKzKzKzKzK{K}K}K~K{KzK}K~K}K}K}K}K}K}KKpKQKSKQKPKPKJK*KPK\K[KTKLKJKWKWKKKKKKKKKKKzKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK]KEKGKEKAK;K<KFKFKGKFKFKFKGKGKGKGKDKDKGKEKCKDKDKDKDKDKDKDKBKAKAK@K?K>K>K5K1K;K9KGKQKPKPKNK6K+K*K*K(K&K&K$K#K%K)K.K2K:K<KBKDKEKGKTK~KvKpKpKqKrKwK}K{KzKKKK|KzKzK|K}KyK}KKKKKKKKKKrKfKiKbKpKK}KvKaKNKMKcKmKhK]KUKQKUK:K K K K K KK +K K K K KK KKK +K K K K K K K +K K K K +K +K +K +K K K K K K K K K KKKKYKKKwKiKhKiKtKKKKKKKKKKKnK}e]r(��(KKKKKK K&K!KKK#KKKK,K"K%KKKKKKK KKFK>KZKvKsKtKtKtKtKsKsKtKpKMK?KVKuKsKsKsKtKtKtKsKuKpKMK?KTKvKsKuKuKuKcKEK@KEK>K>K<K=K/KFKOKGKEK7K=KSKRKQK>K;K<K-K&K'K%KKK"K$KOKOK(K,KIKVKYKWKZKXKXKXK;K,K5K3KKK%K%K#KGK\K:KPKjKTKCK2K*K0K6KLK^K:K=KpKbKJK;K9KDKUK]K\KPKPKKKBK>KAKKKTK]K]K]K\KZKZK[K[K[K[K[K[K[K[KZKZKZK[KYKXKYKZKXKYKWKIK1KK KKKK +K +KK K K KKKKKKKxKzKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKxKyKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K|K}K}KzKzKzKzKzK{K|K}K}K|K|K|K}K}K}K}K}K}K}K}K~KwKUKRKRKQKPKOK.KDK\KZKUKOKIKVKSKKKKKKKKKKK|KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKmKFKHKFKBK@K;KDKGKFKFKFKFKFKGKFKEKEKEKGKEKCKDKDKDKDKDKDKDKBKAKCKBK?K>K=K9K8K:K7K?KQKQKPKRKBK.K,K,K)K&K%K$K$K#K'K*K/K6K:K@KBKEKGKMK|KwKpKpKqKsKuKxK|K{K}KKKxK{KwK|KK|K{KKKKKKKKKKKaKjKgKeKzK}KvKcKSKHKZKjKkKeK\KSKUK>KK K K K K K K +K K K K +K K KK K K K K K K K K +K K +K K +K +K K +K K K K K K K K K K K K K4KqKKwKqKkKmKsK{KKKKKKKKKOKwe]r)��(KK K"K"K%K$KKKKK:KKKKK#K%KKKKKKKK$KGK@KaKvKsKvKuKvKtKsKsKuKmKJK?K\KwKsKsKsKtKvKtKsKvKmKIK@K[KvKsKvKuKtK_KEKAKEK>K>K<K=K=KLKNKGKDK=KFKQKRKOK=K;K>K-K$K&K&K#KKK%KSKLK'K/KKKYKDK)KHKZKWKWK:K'K!K!K(K.K2K5K1KHKYK7K;KWKhKsKXK.K%K'KCK\KAKBKwK{KzKqK_KGK:K8KHKWKYKYKYKSKJKAK<KBKOKZK^K]K[KZKZKZK[KZKZKZKZKZKZK[K[K[K[K[KZKWKWKXKZK\KVKIK4KKKKK +K KKKK KKKK2KtKzKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKwKwKzKxKzK{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|K~K~K}KzKzKzKzKzK}K~K~K|KyK|K~K}K}K}K}K}K}K}K}K~K|K[KQKSKSKRKRK7K7K\K[KWKOKHKQKRKyKKKKKKKKKKK~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKzKPKFKGKFKFK>KAKHKFKGKFKFKFKGKDKCKFKGKGKEKCKDKCKDKDKDKDKDKBKAKBKAK?K>K=K;K:K8K6K8KLKQKPKQKLK3K-K-K+K)K$K%K%K!K&K*K0K6K:K=K@KEKFKHKuKyKpKpKpKsKuKuK{KK~KKKvKyKwKxKKKK~KKKKKKKKKKsKOKjKYKoK{KoKfKXKJKRKbKgKiKbKZKUKBKK +K K K K K K KK +K K +K +K +K K K K +K +K +K +K +K +K +K +K +K +K K K KKKKKKKKKKKK K +KKK:KPK^KeKmKqKrKuKzKKKKKKjK?Kje]r*��(K)K)K(K(K#KKKKKKQKKKKK&K$K"KKKKKKKGKBKCKjKuKsKvKuKtKuKuKsKuKiKEKAKbKxKrKtKuKuKuKuKtKuKhKEK@K`KvKsKtKsKsK^KBKBKDK<K=K<K>K:KKKMKFKAKCKMKPKSKOK?K>K<K+K%K%K$K$K#KK'KXKLK'K2KMKTK<KK=KUKWKVK8K(K$K(K,K0K4K7K7KMKXK:K0K6KBKOKXKEK1K?KRKZKCK7KYKrKyKzKwKyKqK]KKKNKJKLKYK[K[KXKSKHK?K:KBKNKYK]K\K[KYKZKZKZKZKZK[K[K[KZKXKYKXKWKXKZKXKWKYK[K\KUKEK.KK KKKKKKKK KK5KxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKwKwKzKxKyKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K{K{K{K|K|K|K{KzK}K}K}K}K|K}K}K}K}K}K}K}K}K}K}K}KKdKNKSKPKPKOK?K/KXK[KYKPKGKNKTKaKKKKKKKKKKK{KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK]KDKGKFKHKBK<KFKGKFKFKFKFKGKFKEKFKFKEKDKCKDKCKDKDKDKDKDKBKAKBKBKAK>K>K<K;K:K9K8KGKQKQKPKQK>K-K,K+K*K&K&K$K#K%K)K.K4K8K=K@KDKGKGKqK}KqKqKoKsKtKvKxK~KKKKwKsKuKwKKKKK~KKKKKKKKKKCK`KXKiKtKnKcKYKLKJKZKaKfKgK`KYKHKK K K K K +K K KKK K K +K +K +KKK K +K +K +K K K K K K K K K KKKKKKKKKKKK KK KK KK)K8KHKZKfKkKtK{KKKKK\K:KSe]r+��(K)K*K*K#KKK KK KKaKKKPKK'K$K#K KK"KK"KjKVK>KHKpKtKtKvKtKsKuKuKsKuKbKAKBKhKuKsKtKvKuKuKuKuKvKeKDK?KeKvKsKsKtKtKZK@KCKCK<K=K=K<K=KOKLKEK@KDKMKPKSKOK?K?K;K)K%K$K#K$K#KK*KYKFK$K2KMKUKSKIKPKWKXKUK7K(K%K(K+K0K4K8K6KNKXK8K4K5K2K4K9K9K4K?KVKZKCK7K3K5KIKbKtKyKyK|KoKZK@KK+K:KMKWK[KYKWKRK@K.K;KDKPKWK[K\KZKZKZK[KZKZK[KYKWKWKWKXKYK[KYKWKWKWKWKWKWKZKSKNK4KK KKKK KK K KEK|KxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKxKwKxKxKxKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K~K}K~K|KzK}K}K}K}K~K}K}K}K}K}K}K}K}K}K}K}KKoKPKRKNKLKNKLK+KOK\KZKSKJKLKXKUKKKKKKKKKKK}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKnKHKGKGKGKFK;KCKGKFKFKFKFKFKGKGKGKFKCKDKDKCKDKDKDKDKDKDKBKAKBKBKBK?K?K>K=K:K:K7K=KOKQKQKNKJK1K-K/K,K'K'K$K#K$K(K+K/K5K<K=KAKGKFKlKKoKqKnKoKsKvKwK}K}KKKxKrKpKsKxKKKKKKKKKKKKKKzKPKUKgKrKlK_KWKQKFKRK]KeKgKeKbKKK'K K K K K +K K KKKK K +K K KKKK +K +K K K K K K K K K KKKKKKKKKKKKKKKKK +KKKK&K7KDKKKRKXK^KhKtKsKbKGK]e]r,��(K+K,K)K"K K KKKKKgKKjK+K"K$K$K#K K KK4KjKvKMK@KNKrKuKuKuKuKuKuKuKuKwK\KAKDKlKvKuKuKuKuKuKuKuKwK^KAKCKhKwKuKsKuKuKWK>KCKCK<K=K=K;K>KOKIKEKAKDKOKQKQKLK>K?K9K(K%K%K"K!K!KK-KZKCK%K4KOKUKXKYKXKXKYKUK7K)K%K*K.K0K4K3K5KRKXK6K8K?K9K4K4K0K,K,KIK[K;KOKqKUK<K/K;KPKgKzKqK[KDK2KcKNK:K5KAKNKXK^KOKFKMKCK=K;KEKOKXK]K\KYKWKXKXKXKXKXKXKYKYKXKXKWKXKXKWKXKWKWKWKZKYKUKIK5KK KKK KKKVK}KwKxKxKxKxKyKzKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKyKxKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKwKVKTKSKPKPKRK1KEK[K[KWKOKHKVKSKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK}KQKEKGKFKGK=K@KGKGKGKGKGKFKFKFKEKEKCKDKCKDKFKDKCKDKDKDKCKCKCKCKBKAK>K<K=K:K9K9K8KGKQKQKPKRK;K.K.K,K(K%K%K%K%K%K)K-K3K8K;KAKEKDKfKKqKoKmKoKtKvKyK|KzKKKwKuKrKpKsKxKKKKKKKKK{KKKKKPKJKcKSKIKRKQKPKHKKKZKaKeKgKfKOK0KK +K K +K +K +K +K +KKKKK K +K +K +K +K +K +K +K K K K K KK K K KKKKKKKK K +KKKKKKKK KKKKKKKK$K*K7KHKQKQKPK\e]r-��(K.K,K)K"K KKK K K%KIKKK8K#K"K$K$K#K K!KKWKyKlKGK@KVKuKuKuKuKuKuKuKuKuKvKUK@KIKqKvKuKuKuKuKuKuKuKuKWK?KDKnKwKuKtKuKqKOK>KFKAK<K=K=K;KBKPKHKEKAKCKOKQKQKKK>K?K8K'K%K%K"K!K"KK1KZKAK&K6KPKUKWKWKXKWKXKTK7K)K%K*K.K0K4K6K7KRKXK5K8K>K<K<K;K:K6K+KEK[K;KEKlKsKxKYK.K%K.K;KSK\KKKAKyK{KuKdKLK:K9KDKPKVKVKXKTKMKCK<K:KCKOKWKZKXKXKWKWKWKWKVKVKWKWKWKWKWKWKWKXKWKWKVKVKXKZKXKVKIK0KKK +KKKkKzKxKxKxKyKyK{KyKyKyKyKxKxKxKxKxKxKxKxKxKxKyKyKyKyKyKyKzKyKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K{K{K|K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K|K\KRKSKQKOKRK8K9K[KZKWKPKFKOKTKsKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK`KCKGKGKGKCK=KDKFKGKGKGKFKFKGKDK>KDKCKDKGKGKDKCKDKDKDKDKDKDKCKBKAK?K=K=K;K:K:K8K@KPKQKPKRKGK0K-K.K+K'K&K&K%K%K'K)K0K4K8K?KDKEK^KKtKoKmKoKtKvKyK|K{KKKzKtKsKpKpKsKxKKKKKKKK}KKKKKtK>KBK K(K8KAKHKIKGKSK[KcKfKgKSKAKKK K K K K K K KKKKKK +K +K K +K K K +K K K K K K K K +KKKKKKKK K +KKKKKKKKKK KK K K KKKKK%K1K?KMKEe]r.��(K0K+K(K$K K K!K"K#K,K@KCK5KK!K$K$K"K K"K"KVKxKeKCKAK]KyKuKuKuKuKuKuKuKtKrKOK?KPKvKvKuKuKuKuKuKuKuKuKRK<KHKsKvKuKuKwKnKLK=KDK?K<K<K=K;KEKPKHKEKCKIKOKQKPKHK>K>K6K'K%K$K$K!K!KK4K\KAK'K:KSKUKVKXKXKWKVKRK8K(K%K)K/K1K4K6K9KQKWK2K7K@K<K<K=K>K;K;KLKXKAK2K:KJKbKiKNK5KDKCK@K[KNKBKpK{KyKyK{KtKaKJKHKKKMKVKZKYKXKSKKKBK=K>KGKQKWKZKYKWKWKWKWKXKXKXKXKXKXKXKXKXKWKXKXKXKWKTKUKXKZKTKGK7K"K KPKKwKxKxKzKzKzKzKzKzKzKxKxKxKxKxKxKxKxKxKxKzK{KzKzK{KyKxKzK{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKbKPKSKPKPKTKBK/KWKXKYKRKHKKKTK^KKKKKKKKKKK}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKoKGKGKGKGKEK?KCKGKGKGKGKFKFKGKEKBKGKGKGKGKFKDKCKCKDKDKDKDKDKCKBKBKBK@K>K=K<K:K9K;KKKSKQKQKNK7K/K,K*K*K&K%K%K&K&K)K.K2K6K9K?KCKYKKyKqKoKnKsKzKyK|K}KKKKuKsKoKoKrKrKxKKKKKKKKKKKKK0KKK"K'K3K9K@KDKOK[K_KbKgKSKRKKK K K K K K +K K +K K +KKKK +K K +K K K K K +K K K K K K K KKK K K K K KKKKKKKKKKKKKK +K K +K KKKK&K4KAKKe]r/��(K.K+K(K"K K K!K%K!K/K@K@K.KKKK$K#K K K$K`KzK^K@KAKdKyKuKuKuKuKuKuKuKuKmKIK>KUKxKuKuKuKuKuKuKuKvKsKNK<KLKvKuKuKuKwKnKJK<KCK?K=K=K<K8KCKPKHKDK>KAKQKPKQKHK=K=K4K&K%K%K#KK KK5K[K>K'K;KTKVKWKXKWKVKWKQK8K)K&K*K/K1K3K6K9KSKVK4K7K>K<K<K=K<K=K@KMKYK>K7K@K1K6K@KDK?KGKbKcKYKPK;K8KJKdKuK|K{KzKzK^KQK)K#K6KIKWKYKYKUKPKIK?K8K:KFKRKWKZKYKXKVKWKWKWKWKWKWKWKWKXKWKWKXKWKUKUKUKTKUKVKXKVKFKTKpKwKzKzK{KyKyKyKzKzKzKyKyKxKyKyKxKyKyKyKyKzKzKzKzKzKzKyKzKzKzKzKzKzKzKzKzK{K{KzKzKzKzKzK{K|K|K|K|K|K|K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKiKPKSKQKPKRKLK,KSKYKYKTKKKGKWKRKKKKKKKKKKK~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKSKEKHKIKIK@KBKGKFKGKGKFKFKFKGKGKFKGKFKFKFKEKEKDKDKDKDKDKDKCKBKAKBK@K>K=K;K8K9K8KEKQKPKQKSKCK.K-K,K*K(K'K&K&K%K(K*K0K6K9K=KAKTKKyKrKoKnKqKwKzK}K}K~KKKxKsKnKoKqKnKsKyK~KKKKKKyKKKKKKK KKK(K4K6KCKOKYK[KaKdKQKLKKK +KKK K +K K +K +K K +K K K K K K K K +K K K +K K K K K K KKKK K K K KKKKKKKKKKKKKKKKK +K K KKK#K-K/KAe]r0��(K,K,K(K KKKKK!K3K?K@K)KK"KK K#K KK(KkKzKXK>KBKlKvKuKuKuKuKuKuKuKwKjKCK<KZKxKuKuKuKuKuKuKuKwKqKJK<KQKvKuKuKuKwKhKEK;KEK>K?K>K<K+K>KRKHKCK;K6KOKPKRKHK<K:K3K%K%K&K"K K!KK:KZK<K%K=KVKXKXKXKVKUKXKPK8K)K(K+K.K3K3K6K:KVKVK7K7K<K<K=K<K>K?K>KLKZK9KEKuKfKTK=K2K1K1K<KRK\KNKDKYKCK0K;KSKjKuKyK^KTK0KHKVK<K7K<KKKTKVKUKUKFK3K=K9K>KGKTKYKXKVKUKWKWKUKUKUKWKXKXKXKXKWKUKVKVKVKUKUKTKQKWKVKSK[KbKnKvK{KzKyKxKzKzK{KzKxKzK{KxKyK{KzKzKzKzKzKzKzKzK{KzKzKzKzKzKzKzKzKzK}K}KzKzKzKzKzK}K~K~K~K~K~K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKtKRKSKRKOKPKRK0KHK[KYKWKNKEKUKRKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKcKDKIKHKDK;K?KGKFKFKFKGKFKGKIKHKFKGKFKFKFKGKFKCKCKCKCKDKDKCKAKCKAK>K?K>K;K2K4K:K<KNKNKQKRKMK5K.K.K+K+K)K'K&K#K&K*K0K3K7K?K@KNKKyKtKpKnKnKpKzKK}K}KKK|KwKpKnKoKlKqKvK{K~KKKKK{KKKKK+KKKKKK KKK0K?KPK^KcKPKHKKKKKK K K +K +K +K +K K K K KKK +K +K K K +K +K +K K K K K +K K +K K +KKKKKKKKKKKKKKKKKKKK +K K +K +KKK$K&K1e]r1��(K'K!KKKKKK#K&K6K@K>K&K!K$K%KKKKK*KnKvKOK>KGKrKuKuKuKuKuKuKuKuKxKdK@KAKbKxKuKuKuKuKuKuKuKvKmKGK=KYKxKuKuKuKwKcKDK=KCK<K=K<K:K)KCKPKFKDK;K9KQKPKRKEK<K:K0K&K&K#K!K K!KK<KXK:K)K?KVKWKVKVKVKUKWKNK8K(K&K)K,K1K4K6K:KUKVK6K7K;K<K<K=K<K=KAKNKZK<KDKvKzKzKsKcKJK7K(K9KZKMKEKyK|KjKFK*K-K@KWKYKUK;K]KKwKhKSK?K7KAKNKTKPKLKQKNKFK<K9K=KHKQKXKWKWKVKVKUKVKVKVKVKVKVKUKVKUKUKUKUKUKTKTKSKSKQKPKPKVKbKnKvKzK|KzKyKxKxKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K}K{KzK}K}KzKzKzK|K|K}K}K{K{K{K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K|KYKQKQKPKPKTK7K=K[KWKWKQKGKQKSKnKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKtKIKIKGKDKCK;KDKGKFKGKFKFKGKGKGKGKGKFKFKFKGKFKFKFKFKDKCKDKDKCKCKBK?K?K>K=K;K:K8K8KIKQKQKQKQK>K.K-K+K*K)K&K&K%K&K&K-K2K6K=KAKLK}K{KtKpKpKnKmKxK}K~KKKKKxKtKpKpKnKnKtKwKxK|KKKK|KKKKhKSKKKKKKKKKKKKK/K5K/K +KKK K +K +K +K +K +K +K +K +K +K +K +K +K K K KK K K K K K K KK K +K K +K +KKKKKKKKKK K K KKKKKKKKKKKK KKKKe]r2��(KK#K%KKKKKKK<KBK6KKKK!K!K KKK4KrKqKHK>KMKuKvKuKuKuKuKuKuKuKyK]K@KCKjKxKuKuKuKuKuKuKuKwKgKCK>K]KxKuKuKuKwK`K@K>KDK;K<K=K:K+KGKNKFKDK9K>KRKPKRKCK<K:K.K'K&K!K K!K!KK@KVK9K1K:KJKRKXKWKUKUKWKMK7K(K%K(K+K0K4K6K:KVKTK8K9K:K=K<K=K;K<KBKPKZK<KDKwKyKuKvKxKzKnKXKKKWKMK<KQKcKsKoKKK/K.K.KIKVK?KWK{KxKzK}KwKeKNK8K9KLKPKUKWKSKQKMKDK=K8K>KHKSKVKVKVKUKUKUKUKUKUKVKVKUKUKUKUKUKVKTKRKRKSKSKRKQKQKRKXKbKlKvKzKzKyKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|K~K{KzK}K}KzKzKzK|K~K}K}KzKzKzK|K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K`KPKQKQKPKSKAK0KVKWKXKTKKKKKTK\KKKKKKKKKKK}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKUKGKHKGKHK?K=KGKFKFKGKGKFKFKFKGKGKFKFKFKGKGKGKGKGKEKCKDKDKDKCKBK@K>K?K?K=K=K;K9KAKQKQKPKQKIK0K-K+K+K)K&K%K&K%K%K+K0K4K<K@KFKxK}KtKqKqKnKmKuKxK{K~KKKKzKyKrKnKnKlKpKrKvKxK|KKK}KKKK]K_K+K�KKKKKKKKKKKKKKKKK +K K +K +K +K K K +K K K K K K K K +KK K K K K K +K K K KKKKKKKKKKKK K K K K KKKKKKKKKKKK K KKKe]r3��(K(K'KKKKKKK K>KAK2KKKKK K#K!KKBKxKkKBK@KUKxKuKuKuKuKuKuKuKuKwKVK=KEKpKwKuKuKuKuKuKuKuKxKbK>K?KeKxKuKuKuKwK\K>K?KCK<K=K=K:K+KIKNKFKBK7KAKRKPKRKBK;K9K.K'K&K"K K K!KKEKVK:K7K9K9K/K8KNKWKXKXKMK8K(K%K)K.K2K3K4K9KUKSK;K9K:K=K<K=K<K?KCKQK\K?KFKwKyKxKxKwKwKyK~KqKYKOK8K5K<KHKYKZKEKBKUKSKSKEK;KKKeKsKzKzKzK{KnKIKHKLK9KDKPKUKTKRKPKKKEK8K7K>KHKUKWKWKWKUKUKVKUKUKVKUKVKVKVKVKTKRKSKSKSKRKSKSKRKQKPKQKVKbKmKuK|K}KyKxKzKzKzKzKzKzKzKzKzKzKzKzKzK{K}K|KzK|K}K}K}K}K|KzK|K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KKiKTKQKOKQKQKKK,KPKWKXKVKLKHKVKQKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKdKFKJKHKHKDK;KEKGKGKGKGKFKHKHKFKGKGKFKGKGKGKFKFKGKEKCKDKDKDKDKCKBKAK?K?K=K:K:K9K:KLKRKRKRKOK8K.K-K-K+K(K(K'K%K(K+K+K3K9K=KAKpKKuKrKpKoKmKrKxK{K~KKKKK{KvKoKnKnKnKpKqKtKvKyKKKKKkKaK[K9KKKKKKKKKKKKKKKKKK +K +KKKK K KKK K K KKKKKKK K K K K K K +KKKKKKKKK K +K K K K K K K K +K +K +K +KK KKKKKKK K K K e]r4��(K%KKKKKKKK)K@K@K,KK K"K!KK!K(K"KVK|KcKBK@K^KxKuKuKuKuKuKuKuKuKuKPK<KKKsKvKuKuKuKuKuKuKuKxK^K;KBKkKwKuKuKuKvKXK<K?KBK<K=K<K6K,KLKMKEKBK6KAKQKPKQKBK;K8K-K'K%K"K K K K!KIKTK:K7K:K8K"KKK1KIKVKKK8K'K%K)K/K2K3K4K9KUKSK<K9K;K<K<K=K=K?K?KNK[K?KHKwKyKxKxKxKxKxKyKnKYKOK?K`KLK<K9K>KAK?KJKSKTKDK@K9K/K:KRKjKtK|KzK4KHKWK$KK!K7KJKSKTKTKSKQKIK?K4K5K>KHKTKWKWKUKUKUKUKUKUKUKUKUKTKRKSKRKRKSKSKRKSKSKSKSKQKPKQKWK_KlKvK{K}K{KzKzK{KzKzKzKzKzKzKzKzK{K}K|KzK|K~K}K}K~K|K{K}K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K~K}K}K}K~K~K}K~KKrKSKQKPKQKPKQK.KEKXKWKUKNKHKUKPKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKuKJKIKIKHKFK=KAKGKGKGKGKFKHKHKFKGKFKGKFKFKGKGKFKGKEKCKDKDKDKDKDKBKBK?K?K>K:K:K9K9KEKQKRKRKQKDK/K.K.K+K)K)K'K%K%K'K+K2K5K:K>KoKKvKsKqKpKmKrKyK{K~KKK}KK|KzKrKnKnKnKpKpKpKqKuKyKzKsKfKaK^KUKGK KKKKK KKKKKKKKKKKK +K +KKKKKKKKKKKKKKKKKKKKKK +K KK K KKKKKK K +KKK +K KKKK K +K K +K K K K K KKKK K +K K +e]r5��(K!KKKKKK!KK+KBK=K)KKKK#KKKK&K_K{K[K=K@KdKxKuKuKuKuKuKuKuKvKqKKK>KPKwKuKuKuKuKuKuKuKuKxKYK<KCKoKwKuKuKvKtKSK:K@KAK:K<K=K7K*KLKKKCKAK3KEKOKMKNK@K;K7K+K&K#K!K KKK"KKKRK:K:K9K4K#KKKK*KYKMK5K$K&K)K/K1K4K4K8KUKRK<K<K=K<K=K<K<K<K:KPKXK>KJKwKyKxKxKxKxKxKzKnKYKOKDKyKzKpK]KGK2K.K3KIKVKBKRKsK]KEK-K/KAK]KIK KJKWK7KK +KKK-K=KMKSKUKSKPK:K>K@K8K7K?KIKTKWKWKVKUKUKSKRKRKRKRKRKRKRKRKSKRKRKSKRKSKRKQKQKPKOKQKTK_KmKwK|K~K|KzKzKzKzKzKzKzKzKzKzKzKzK|K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KKK}K}K}KKK}K}K~KzKUKQKRKQKOKQK4K;KVKVKWKPKGKNKSKjKKKKKKKKKKK~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKUKGKIKGKGK@K=KGKGKIKGKGKFKFKFKHKHKGKFKFKGKGKFKGKEKCKDKDKDKDKDKBKAK?K?K@K<K9K9K9K<KMKQKPKQKOK5K-K-K*K+K'K&K%K$K'K*K/K6K9K;KkKKxKuKtKrKlKrKwKyK}KK~K}KK{K{KtKoKnKoKkKmKrKsKuKxKyKpKeK`KYKRKHK-K7K?K=K<K6K0K'KKKKKKKKKK +K K K K K +K +K K K K K K K K K KKKKKKKKKKK +K KKKKKKKKKKKKKKKKKKKKKKK K KKKKKKe]r6��(KKKKKK"KKK2KCK=K#KKKK"K#KKKK`KyKTK=KCKlKwKuKuKuKuKvKwKuKwKnKDK>KTKyKvKwKuKvKwKvKuKuKvKSK<KFKsKvKuKuKwKpKOK<KCK?K;K<K=K5K.KOKIKCKAK5KIKPKNKNK@K;K7K+K&K#K"K KKK KNKQK9K8K9K5KKKKK,KZKKK5K$K&K)K-K0K4K4K:KVKSK<K<K;K<K<K=K;K=K>KQKYK@KLKwKyKxKxKxKxKxKzKpK[KOKCKtKwKzK}K{KlKRK=KHKVKBKEKdKnKqKVK3K+K+K%K$KMKWKBKKK +K +K KK#K4KAKMKQKMKQKQKPKIK=K6K:KCKMKTKWKVKSKRKRKRKRKRKRKRKSKRKSKRKRKRKRKQKQKQKRKSKQKOKNKPKTK^KkKuK{K}K{K{K{KzKzK{K|K{K|K{K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KKK~K~K~KKK~K~K~KK]KPKSKQKPKTKBK0KUKXKYKRKHKJKVKYKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKfKGKIKHKHKEK;KGKGKIKGKFKGKGKGKFK@KAKHKGKFKGKFKGKEKCKDKDKDKDKDKBKBK@K@K?K=K;K:K:K8KGKSKPKQKRK?K-K/K,K+K(K&K%K%K&K'K+K1K6K8KfKKxKvKsKrKnKrKwKxK}KKK}K~K|K{KtKrKoKoKkKkKjKpKuKzKxKoKeK]KSKMKFK0K@K1K2KIKLKIKKKDK9K0K(KKKKKK K +K +K +K +K +K K K K K K K K KKKKK K K K KKKKK K KKKKKKKKKKKKKKKKKKKKKKKKKKK K +K Ke]r7��(KKKKK%KKKK8KAK:K KKKK#K$K%K K&KkKvKNK>KIKrKvKuKuKuKuKwKyKvKwKhKCK?K\KyKxKyKuKvKyKwKuKuKuKNK>KMKuKuKuKuKyKnKIK;KDK?K<K=K=K0K3KQKGKBKAK6KIKNKRKLK:K;K8K*K%K$K!K K!K K"KQKQK8K7K8K6KKKKK1KZKHK6K$K&K(K+K0K4K5K>KXKQK=K:K:K=K=K=K;K=K>KOKZKCKMKwKyKxKxKxKxKxKzKrK^KPKCKuKyKxKwKxKzK{KxK_KQKCK1K8KFKVK]KMK=KGKMKAKQKVKDK#K KKKKKK KK%K>KIKLKRKTKQKNKMKEK?K8K<KAKKKRKUKTKRKRKRKRKRKSKRKTKRKRKSKQKPKPKQKSKQKPKQKQKQKPKNKOKTK\KlKtK|K}K|K{K}K~K~K~K~K~K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKKKKKKKKKKKfKPKSKQKPKRKKK*KOKYKVKRKKKGKTKPKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKxKKKHKIKIKHK>KBKHKIKGKFKHKHKFKFKCKCKGKGKFKFKGKGKEKCKCKCKDKDKDKBKAKBKAK?K>K=K;K9K8K@KQKQKPKQKKK2K.K/K*K)K'K%K&K%K'K*K/K6K7KaKKxKvKsKqKpKsKwKwK{KKK|K}K~KzKtKpKoKqKoKnKkKkKoKvKvKlKbK[KQKIKBK1K?K&K:KJKPKRKKKDK@K@KBK9K,K6KKKKK K K +K +K +K +K +K +K K K KKKKK K K K K K K +K KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]r8��(K#KK%K$KKKK#K>K=K4KKKKK#K%K%K!K.KrKtKFK=KOKwKxKwKwKwKwKxKxKuKxKaKAKAKcKzKvKwKwKxKxKxKwKwKsKIK<KQKwKuKuKuKyKkKFK;KDK>K<K<K>K.K6KQKFKBK>K4KJKPKRKJK:K;K6K)K%K#K"K KKK&KRKOK8K6K7K3KKKKK2KYKGK4K#K&K(K+K0K4K3K>KXKOK>K:K:K:K;K=K=K;K=KOKZKCKMKwKyKxKxKxKxKxKzKrK]KPKDKtKyKxKxKxKxKwKyKfKTKCK?KFK6K5K?KGKCKGK]KfK[KTKFK5K)KKK KKKKKK0KQK?K,K?KPKUKTKQKQKNKEK;K5K6KBKMKSKUKTKRKRKRKRKSKRKRKRKQKQKRKQKQKQKPKPKPKQKPKOKPKOKLKMKPKZKhKsKzKK~K|K|K}K|K{K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KKKKKKK~K}K~KKKKKKKKKKKKKKKKKrKQKSKQKPKPKRK-KFKWKWKUKNKFKTKPK~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKXKFKIKIKKKDK>KHKGKGKFKHKHKFKGKIKHKHKGKFKGKGKGKFKEKFKEKDKDKDKCKCKBKAK?K>K>K=K<K;K;KJKQKRKRKQK;K-K.K,K*K(K'K&K%K&K(K0K3K2K[KKyKyKsKqKpKsKwKwK{K~KK|K{K{KzKtKpKoKqKqKoKpKoKlKpKrKkKbKXKMKDK=K8K@K,K@KOKXKRKJKHK=K6K1K2KAKJK<KKKKK K K +K +K K K +K K K KK +K K +K +K K KKK K K KK K +K K K +KKKKKKKKKKKKKKKKKKKKKKKKKK K e]r9��(K:K,K#KKKKK*K@K>K/KKKKK#K&K'K$K5KtKnKAK=KXKyKxKyKyKyKyKxKxKuKxK[K@KBKhKyKuKvKxKyKxKxKxKyKmKEK=KVKxKuKuKtKzKeKBK:KDK=K<K<K>K,K9KOKFKCK<K4KLKOKRKIK=K;K6K'K'K#K!K KKK&KRKLK:K5K6K1KKKKK2KXKFK3K#K&K(K+K0K4K2K=KXKPK>K:K:K9K;K=K<K;K>KOKZKCKLKwKyKxKxKxKxKxKzKrK\KQKDKtKyKxKxKxKxKxKzKhKUKBKOK}KnKXKBK4K1K8KAKOKWKUKDKFKJK6K-K%KKK KKK'KXKEKKK#K6KGKQKSKRKPKOKIKAK7K.K:KEKLKSKUKSKRKRKRKRKRKQKQKSKRKPKPKQKPKPKQKOKMKMKMKNKNKMKLKNKQKYKfKrKzKKK{KzK|K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KKKKKKKK}K~KKKKKKKKKKKKKKKKK{KVKRKQKPKPKTK6K8KWKWKVKNKFKOKSKdKKKKKKKKKKK~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKhKGKIKIKIK@K>KGKJKGKFKHKHKFKGKIKIKIKGKFKGKGKGKGKGKGKFKDKDKDKDKDKBKAK?K?K?K>K6K9K9KCKRKSKTKTKDK.K.K.K+K)K)K&K%K%K%K,K1K1KXKKyK{KwKrKpKsKwKwK{KKK|KzKzKzKtKpKoKqKrKpKpKqKrKrKqKhK`KVKJK@K:K?K=K4KKK[KUKNKJK;K3K4K;KBKAK9K:K'K KKK K K +K +K +K +K +K +K +K K K K +K +K +K KKKKKKKK +K K K K K K K +KK K K KKKKKKKKKKKKKKKK KK KKe]r:��(KCK=KKKKKK-KCK>K*KK!KKK"K$K%K#K<KxKdK?K>K^KyKuKvKuKvKyKwKuKvKvKRK=KCKmKwKwKxKxKxKxKxKxKyKjKAK=K[K{KwKxKwKyKcK?K:KCK=K<K<K>K-K>KNKEKCK<K5KKKMKOKFK;K:K4K'K'K$K"K KKK*KSKIK;K7K7K1KKKKK6KYKEK0K"K&K(K/K1K4K1K>KWKNK<K9K:K<K<K=K;K<K@KPKWKCKNKvKyKxKxKxKxKxKzKrK\KRKGKuK{KzKzKyKxKxK{KhKSKEKLKzK{K~KxKgKQK;K2K>KQKVKCKMKlK`KMK5K*K'K KK K"KSKJK*KK +KKK+K<KIKOKSKPKQKBK:K@K8K4K9KCKNKRKSKSKQKPKPKPKQKQKQKQKPKPKPKQKPKPKNKMKMKMKNKNKNKNKLKKKNKVKeKqKyK|K~K}K}K}K~K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKK~K~K}KKKKKKKKKKKKKKKKKKKKKKK|K^KQKQKPKPKRK>K/KUKVKUKRKFKHKUKUKKKKKKKKKKKKKKKKKKKKK~K~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKyKKKIKIKGKEKCKEKKKGKFKGKGKHKHKIKGKGKGKFKFKGKGKFKFKGKFKCKCKDKDKDKCKCKAK@K?K=K1K7K;K=KPKQKRKTKNK6K-K-K*K*K'K(K(K&K%K(K/K/KUKK{KzKyKtKsKsKtKvK{KKKK~KxKvKrKqKpKoKrKrKmKlKpKtKqKfK`KUKIK9K9KDK;KGKYKTKNKTKAK:K=K>K@K?K<K7K6K.KK KK K KKKK K K KKKKKKKKKKKKKKKKKKKK K K +K K K K K K K K K K K K KKKK K K KK K K K K Ke]r;��(KAKBK4KKKKK4KHK@K'K K!KKK!K$K%K!KFK~K\K=K?KhK{KuKvKvKvKyKwKuKwKuKLK<KGKsKvKxKyKxKxKxKxKxKyKdK@K>KcKzKxKxKxKxK]K>K=KCK=K=K<K>K*KAKNKEKCK;K5KKKMKOKEK;K:K3K'K%K!K!K KKK/KUKHK:K8K7K0KKKKK8KYKDK/K#K&K(K/K1K4K2K?KXKMK;K9K:K=K<K=K;K<K@KQKWKDKRKwKyKxKxKyKyKxKzKrK\KRKHKuK{KzKzKyKxKxK{KhKTKFKNK{KxKxKzK|K~KwKWK+KIKWKDK=KOK^KfKHK/K/K3K-K%K2KRKMK4K K K +K KKK K0K=KHKRKMKKKPKMKHKAK8K5K<KGKMKQKRKQKQKQKPKPKPKPKQKPKPKQKQKOKNKNKNKMKNKNKNKNKNKNKLKLKSK]KgKrK|KKK}K|K|K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKK~K~K}KKKKKKKKKKKKKKKKKKKKKK~KKhKOKPKQKPKQKIK,KPKVKUKRKIKGKTKOKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKZKIKOKNKRKLK@KJKGKGKGKGKIKIKIKGKGKGKFKFKFKFKFKFKGKFKDKDKDKDKDKDKCKBKAK?K?K=K<K9K:KIKRKQKRKSKAK,K-K+K+K'K(K(K&K&K'K,K0KRKK~K{KyKuKtKtKsKvK{KKKKKwKuKyKvKtKqKoKnKmKjKkKqKqKeK]KTKHK.K9KBKHK\KYKIKSK\KPKLKLKHKIKIKIKFK=K7K,KKKKKKKKK +K KKKKKKKKKKKKKKKKKKKKKKKKKK K K K K KKKKKKKKKKK K K +K K +KK#e]r<��(KDK?KEK:K$K'K(K9KDK@K&K!KKKK K#K&K KLK|KRK<KAKoKzKxKyKxKxKxKxKxKyKsKGK>KOKxKxKxKxKxKxKxKxKxKyK\K>K@KjK{KxKxKxKwKYK>K>KCK>K?K=K<K)KDKNKEKCK8K8KLKNKOKCK<K;K1K%K"K K KKKK0KWKDK8K6K8K.KKKKK<KWKBK.K"K%K(K/K2K3K4K?KZKMK<K:K:K=K=K=K;K4K7KRKYKEKPKwKyKxKyKzKzKxK{KrK[KSKKKtKyKyK{KxKxKzK|KjKWKIKNK{KzKzKzKzKyKKdK +K;KZKEK7K<KAKMKLKCKAKJKSKGK@KSKKK9KK K +KKKKK KK%K5KCKKKPKRKPKOKMKGK?K7K5K;KFKNKRKSKQKQKPKQKPKQKQKPKPKQKQKQKPKMKNKMKNKNKNKMKPKSKRKUKRKTK[KfKpKyK~K}K~K}K}K}K}K}K}K}K}K}K}K}K}K}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKrKRKQKQKQKOKOK0KEKVKWKSKLKGKQKNKyKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKlKPKTKQKQKQK@KEKJKHKIKIKIKIKIKIKIKGKFKGKFKFKFKFKFKFKGKEKCKDKDKDKDKDKCKAK?K?K=K<K8K@KPKRKSKRKLK0K-K.K-K*K(K)K'K$K%K+K.KMKKK|KzKyKuKvKtKvK{K|KKKKtKsKvK|K{KxKtKoKmKiKgKnKlKeK]KUKEK-K=KIKYK]KNKLK[K\KXKSKOKIKMKPKQKOKEK>K7K4K-K"KK KKKKKK K K KKKKKKKKKK KKKKKKKKKKKKKKKKK +K K K KKKKKKKKKKKKKKe]r=��(KHKEKBKEK?K&K!K:KBK8K!KKKKKK#K%K!KSK{KJK>KHKtKyKxKxKxKxKxKxKxKzKkKAK>KVKyKxKxKxKxKxKxKxKxKyKWK=KCKnKyKxKxKyKvKSK=K?KAK=K>K=K:K)KHKKKDKBK6K:KLKMKNKBK<K;K1K&K%K#K"KK KK4KVKDK8K7K8K/KKKKKCKVKAK0K'K)K,K0K3K3K0K=K[KKK<K:K:K;K;K<K:K7K;KQKXKEKQKxKyKxKyKzKzKyK{KqK[KRKKKuKzKzKzKyKyKzK|KkKVKKKNK{KzKzK{KzKzK~KVKK3KZKHK/K(K4K;KBKDKEKBKNK[K`KSKNK=K.K#KKKK�KKK +K"K:KEKGK8K5KFKOKRKPKNKKKEK=K5K5K<KFKNKQKQKPKQKOKPKPKQKPKOKOKOKMKNKMKNKNKMKNKPKSKRKUKTKSKOKNKOKYKeKsK{KKK~K}K}K}K~K~K~K~K~K~K~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKxKSKPKQKQKOKRK6K8KVKVKTKOKFKNKSKeKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK|KYKPKPKPKTKDK@KIKHKIKHKHKHKIKIKIKGKFKGKGKGKGKGKGKGKGKFKEKDKDKDKDKDKCKAK?K?K=K<K9K<KLKRKSKRKTK=K-K.K-K*K(K'K&K$K%K)K+KIKKK~K{KyKtKuKsKuKzK~KKK~KrKqKoKpKvKzKzKwKsKnKiKlKiKaKYKTKEK3KGKXK\KVKHKXK_K\KWKQKOKNKQKRKRKMKFKBK?K;K6K7K7K2K$KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK K K K +K +K K +KK KKKKKKKKKKe]r>��(KIKIKCK@KCK9K,K:KAK4K KKKKKK K"K"KZKvKDK=KRKyKxKxKxKxKxKxKxKxKzKeK@K>K\K{KxKxKxKxKxKxKxKyKzKQK>KGKqKxKxKxKyKrKOK;KAK>K;K<K=K:K*KJKHKBK@K5K;KNKKKLK?K<K9K5K3K3K3K2K1K2K.K>KSKDK8K8K8K1K(K)K.K/KJKSK?K7K6K7K6K6K5K5K.K;K[KKK:K9K:K9K9K:K9K:K=KPKWKCKSKyKyKxKyKzKzKzK|KqK[KQKKKvK{KzKzKzKzKzK}KkKUKMKOK{KzKzKzKzKyK~KTKK/KYKJK*KK KK,K9K=K>KBKHKWKUKNK;KEK?K2K-K$KKK +KK.KGKMKQK2KK)K+K;KJKNKPKPKPKMKEK=K3K/K:KFKNKPKOKQKPKQKNKMKMKMKMKMKNKMKMKMKNKSKSKSKPKPKPKQKQKRKQKPKRKVK`KoK{K~KK~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKZKOKQKPKPKTK?K0KTKUKVKQKIKJKUKUKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKcKOKQKPKSKHK=KIKHKIKHKFKGKIKIKIKGKGKFKFKFKFKFKGKGKGKGKGKEKCKDKDKDKCKBK@K>K=K<K:K8KCKQKTKSKTKIK1K-K-K*K)K'K%K&K%K&K(KEKKKK|K{KvKvKuKtKxKKKKKsKpKqKlKoKtKvKvKwKsKnKmKhK_KWKQKDK;KSKZKYKMKQK\K`K^KVKRKSKUKUKUKTKSKJKFKAK=K;K8K8K6K4K1K%KKKK KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK KK K K KKKKKK"K)e]r?��(KIKHKHKDK?KAK>K=K>K,KKKKKKK K$K#K`KnK?K<KXK{KxKxKxKxKxKxKxKxK{K`K?K?KcK|KwKxKxKxKxKxKxKyKwKKK=KJKuKxKxKxKzKpKKK:K?K>K<K<K;K7K-KMKHKCKAK5K>KMKLKLK>K:K8K1K*K)K)K'K&K&K"K<KVKAK8K7K9K/K K"K$K$KHKSK?K.K K K KKKKK<KZKMK;K8K8K8K9K:K7K9K=KRKXKDKSKyKyKxKyKzKzKzK|KrKZKSKKKuK{KzKzKzKzKzK|KkKUKMKNKyK{KzKzKzKyKKSKK.KYKKK+KKKKKKK,K6K<KDKPKQK<KQKhK]K@K/K.K)K"K'K KKEKSK=KFKgKPK>K4K4K?KLKQKOKNKLK9K3K9K5K5K;KGKNKQKQKNKMKMKNKPKOKMKOKOKNKQKRKSKSKNKMKPKQKPKQKSKSKNKOKNKOKWKaKqKzKKKK~KK~K~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKeKPKQKQKPKOKGK,KQKVKUKRKLKGKUKOKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKqKPKQKPKQKOK<KCKJKHKHKHKHKIKIKIKGKFKHKHKHKHKHKGKGKGKGKGKGKEKDKDKDKCKBKAK>K=K<K<K9K<KNKRKQKRKQK6K+K/K+K+K'K%K%K&K&K%K@KKKK~K~KzKyKwKtKyKKKKKvKsKqKnKlKlKnKsKwKuKsKoKhK`KWKRKDKHKWKYKPKIKZK`KaKaKZKWKYKXKXKZKYKZKVKSKOKLKKKFKFKDK?K=K;K9K6K2K+K"KKKK KKKKKKKKKKKKKKKKKKKKKKKKKKKKK +K +K K +K +K KKK,K<K8K@e]r@��(KIKHKEKGKDK>K?KAK=K%KKKKKKKK!K&KgKcK;K=K_K|KxKxKxKxKxKxKxKxK{KZK>KAKjKzKxKxKxKxKxKxKxKzKsKGK;KOKzKxKxKxKzKoKLK8K@K=K=K<K;K3K/KOKGKCKAK4K@KMKLKLK=K8K9KKK +K +K KK KK5KVK?K8K7K9K&K K +K K KDKUK?KKK K K +K KK$KBKZKJK9K8K8K7K8K6K4K8K>KSKYKEKSKyKyKxKyKzKzKzK|KsKYKTKKKuK{KzKzKzKzKzK|KlKTKMKMKwK{KzKzKzKyKKLKK*KYKLK/KKKKKKKKK K1KLKRK=K?KWKcKaKFK1K4K3K/K)K(KDKVKEKCKeKgKjKnK\KCK7K6KAKJKOKIKIKLKIKAK;K5K6K<KFKNKOKNKNKQKOKMKPKQKRKUKRKSKSKNKMKPKQKPKQKSKPKPKSKSKSKQKMKQKYKbKoKyKKKK}K~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKmKQKQKQKPKLKMK.KJKWKUKRKMKEKOKOKwKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK|KVKPKQKPKQKBKAKLKHKIKIKIKIKHKIKFK>KHKIKIKIKIKGKGKGKGKGKHKEKCKDKDKCKBKAK>K<K=K=K<K9KHKSKPKPKTKEK-K/K-K*K&K%K&K&K'K%K<KKKKKK}KzKvKtKyKKKKKxKtKqKnKkKkKiKjKpKuKtKoKhKaKVKOKCKLKWKUKIKRK^KcKbKbK`K^K^K[K[K[K[K[KWKVKUKSKSKQKQKNKJKEKAK@KAK=K6K.K,K%KKKK KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK KKnKKKe]rA��(KIKHKFKGKIKEK=K;K;K"KKKKKKK!K K'KmK\K:K?KjK{KxKxKxKxKxKxKxKxKyKPK<KEKpKyKxKxKxKxKxKxKxKzKpKEK<KUK{KyKxKxKzKjKJK9KAK=K=K<K=K1K0KNKHKEK?K2KCKMKKKJK;K9K8KKK K K KK +KK:KWK=K7K7K:K%K +K +K +K KGKRK=KK +KKKK K+K.KBK[KIK7K3K3K3K4K5K6K:K<KQKYKEKSKzKzKzKzKzKzKzK{KrKZKTKKKuK|KzKzKzKzKzK}KnKUKNKMKwK{KzKzKzKyKKHKK&KXKOK4KKKKKKKKKKKEKRK?K;KFKGKPKUKEK?KKKLK@K4KCKUKHK@K\KfKdKoKzKvKoK[KCK4K;KIKLKKKNKLKJKHKBK8K5K9K?KEKOKQKOKMKNKRKUKVKUKTKRKQKPKRKRKRKRKSKRKRKSKRKRKSKRKRKQKRKSKXKbKnKyKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKwKTKPKNKMKMKPK1K=KZKUKTKOKDKLKSKaKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKaKNKQKPKPKJK?KJKHKHKHKIKIKIKIKHKHKIKHKIKHKHKHKHKGKGKGKGKFKFKFKDKCKCKBKAK@K=K=K<K8K?KQKRKRKSKOK6K.K-K,K)K(K&K&K%K%K6KKKKKK}K|KxKvKzK}KKK~KxKtKpKlKlKoKlKgKjKrKsKoKiK]KUKIK=KNKWKMKJK\K_KbKbKaKbK`K_K]K\KZKZK[KYKVKWKTKSKOKQKTKQKJKFKFKCK;K3K1K1K+K&K"KKKKKKKK K KKK KK KKKKKKKKKKKKKKKKKKK K KKK&KHKKe]rB��(KIKHKGKFKDKHKCK1K8K5KKKKKK"K!KK-KqKQK;KBKpKzKxKxKxKxKxKxKxKyKvKJK<KIKsKyKxKyKyKxKxKxKxK{KlK@K>K[K{KyKxKxKzKhKCK8KCK=K=K<K>K/K2KMKGKEK?K2KDKMKKKIK;K9K6K#KKKKK K K K?KUK<K8K7K7K"KKKK +KHKRK<KKKKK#K*K/K.KBKXKGK9K2K3K6K7K9K8K;K;KPKYKEKSKzK{KzKzKzKzKzK{KrKZKTKKKuK|KzKzKzKzKzK}KoKUKNKMKwK{KzKzKzKyKKBKK%KWKPK9K +KKKKKKKKKK@KSKAK,K/K9K:KAKCK>K>KMK[K\KQKQKLK:K8KJK[KgKtKrKuK{KzKpKXKIKEK=K@KMKPKNKKKKKHKAK9K4K5K;KIKNKSKWKUKTKVKUKSKTKTKSKSKRKRKSKSKSKRKSKRKRKSKSKSKRKQKQKPKOKTKaKlKzKKKKK~KKKKKKKKKKKKKKKKKKKKKKKKYKOKNKMKMKOK>K;KWKUKUKPKGKIKUKTKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKrKOKQKQKPKMKAKDKJKIKHKIKIKIKIKIKIKHKIKHKHKHKIKHKGKGKGKGKGKGKFKDKDKDKCKBK@K=K<K=K:K:KLKTKRKRKTK@K.K.K,K*K)K&K&K%K$K3KKKKKKK}KxKwKzK}KKKKxKtKrKqKoKoKoKlKiKkKnKnKiK]KVKEK@KTKUKFKTK]KbKdKbKbKbK`K_K]K\K[K[K[KZKWKVKRKRKTKUKUKQKHKLKOKCK;K8K9K8K5K0K3K,K$K#K(K)K"KKKKKKKK"K KKKK KKKKK K K K KKKKKKKKK'K)K3KdKe]rC��(KHKJKIKHKFKIKIKAK>KAK3K K K!K!K!K!KK7KtKFK<KIKtKzKxKxKxKxKxKxKxKzKrKFK<KLKxKyKxKzKzKxKxKxKxK|KcK<K<KbK|KxKxKxKzKcK@K9KBK>K<K<K=K,K8KNKEKCK<K2KHKLKKKGK9K:K7K%KKKKKK K KCKRK:K9K6K7K!K +KKKKLKQK;K KKK"K*K.K.K+KCKZKEK9K8K8K7K8K:K:K4K9KRKWKCKSKzK{KzKzKzKzKzK|KrKYKTKKKuK|KzKzKzKzKzK{KoKTKOKNKwK{KzKzKzKyKKGK�K"KUKNK<K KKKKKKKKKK=KVKFK KKKK-K:K<K>K=KEKVKYKQKMK=K5K-K1K?KVKkKvKzKvK|K|KRKNK,KK*K0KCKLKOKLKJKHKEK?K8K-K2KAKQKXKWKVKVKUKRKSKRKRKOKQKSKRKRKRKRKRKRKRKRKRKQKPKQKQKQKPKNKMKUK`KmKxKKKKKKKKKKKKKKKKKKKKKKKKKK_KMKNKMKNKMKNKSKTKVKVKRKHKEKTKPKKKKKKKKKKKK}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKXKPKQKPKDKAKCKLKKKHKIKIKIKIKIKHKIKHKHKHKHKIKHKGKGKGKGKGKGKGKGKFKCKDK@K?K>K;K5K9K;KEKTKRKRKTKIK2K/K+K+K)K&K&K$K#K.KKKKKKKKzKzK{K~KKK~KuKrKwKvKuKqKqKnKjKjKkKlKfK\KSKCKDKXKLKNK\K`KdKeKeKcKaK`K_K`K_K\K\K[KZKVKTKTKVKYKYKSKOKKKTKOK@K?K@KDKBK>K;K:K8K5K6K8K3K-K&K"KKKKKKK1K2K)K"KKKKKKKKKKKK!K%K'K*K,K1K>KGKAK`Ke]rD��(KHKIKHKGKGKHKHKHKDKAK@K7K!KK K K KK7KkKBK<KOKwKxKxKyKyKyKyKxKxK{KnK@K<KVK{KyKyKzKzKyKyKxKwK|K]K<K>KhK|KxKxKxKyK_K?K<KBK=K<K<K:K+K<KMKEKBK<K3KHKLKLKGK:K;K7K&K!KKKKKKKGKQK9K8K7K6K K KK KKOKPK<K#KK"K%K+K,K+K-KFKYKFK9K7K7K8K9K:K9K8K:KQKWKCKSKzK{KzKzKzKzKzK|KsKYKTKKKuK|KzKzKzKzKzK|KpKUKPKMKwK|KzKzKzKyKKTKKKWKQK@KKKKKKKK +KKK;KSKIK%K�KK +K%K2K/K6K<K<KEKNKPKMK?KPKEK5K+K)K6KGK_KsKxKKVKNK5KTKoKKK8K3K:KEKOKLKHKJKGK0K9K4K3K;KDKQKYKWKSKSKSKSKPKRKSKRKRKRKSKRKRKRKRKRKQKQKQKPKQKQKQKPKRKQKOKRK\KkKxKKKKKKKKKKKKKKKKKKKKKKKmKNKOKNKMKMKNKUKSKUKUKRKKKFKNKNKmKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKcKNKQKOKMKJK?KLKKKHKIKIKIKIKIKIKIKIKIKIKHKIKHKGKGKGKGKGKGKGKGKEKCKCKAK@K=K>K4K5K:K;KOKSKRKTKSK9K-K,K*K)K&K&K#K K,KKKKKKKK~K|K{K|K}KK|KrKmKqKuKvKuKuKsKoKnKlKiKeKZKOKAKIKSKIKXKaKdKdKeKeKcKaKaK`K`K_K^K\KZKYKWKWKXK[K[KXKQKOKRKVKLKDKFKEKHKGKDKAK>K>K>K?KAK7K3K.K.K-K,K+K,K'K#K,K?K<K1K%KK K#K(K,K+K*K+K.K/K0K4K9K>K@KDKKKSK{KKe]rE��(KIKHKFKFKFKFKGKIKIKDK@K@K6K KKKK!K)K<K?K=KVK|KxKxKzK{K{KzKxKxK{KgK>K;K]K|KyK{KzKzK{KzKxKwK|KXK<KAKmK{KxKxKxKyK\K>K;K@K;K;K=K<K*KAKMKEKBK;K3KHKLKLKGK<K=K8K'K"KKKKKKKJKOK8K7K8K4KK K +K KKPKOK=K#KK"K'K,K0K0K/KEKZKFK:K7K7K9K;K:K8K;K;KPKWKDKSKzK{KzKzKzKzKzK}KuKZKTKKKuK|KzKzKzKzKzK|KrKWKQKLKxK{KyK}K}KyK}K\KKKWKQKBKKKKKKKKKK=KFKOKIK4K%KK#K'K!K$K0K.K2K>KIKNKMK?KMKUKRKCK5K*K&K,K9KKKiKYKOK?KTKKKzKgKNK:K3K;KFKMKIKEKJKHKCK=K6K3K;KDKNKWKXKVKRKRKRKRKSKSKRKRKSKRKRKRKSKRKPKPKPKQKPKNKRKQKQKPKMKMKSK]KjKwKKKKKKKKKKKKKKKKKKKKvKRKQKOKMKMKNKRKUKUKTKRKOKGKKKRKPKQK[KbKuKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKsKRKQKOKRKOKAKFKKKHKIKHKIKHKHKIKHKIKIKIKIKIKHKFKFKGKGKGKGKGKGKFKCKBKAK@K=K=K:K:K:K8KGKSKRKSKUKFK.K,K)K)K&K&K%K"K(K|KKKKKKKKK~K}K|K}K{KrKkKiKnKqKtKvKvKuKpKlKiKcKZKNK@KPKLKQK_KdKeKdKeKeKcKaKbKaK_K`K`K\KYKWKXKZK[K]K[KWKRKTKZKUKMKLKKKKKKKIKHKCKBKCKFKFKCK?K<K;K9K;K;K7K8K7K/K0K@KFK=K0K-K.K.K6K;K;K:K:K;K=K<K@KHKJKMKOKpKKKKe]rF��(KIKHKHKGKFKFKGKIKHKGKEK?KAK8K!KK K!K+K4KAK=KaK}KxKxKzKzKyKyKxKxK{K`K=K=KcK|KyKzKzKzKzKzKxKxK{KRK;KCKtK{KyKxKxKxKYK=K=K@K<K=K;K;K0KEKIKDK@K:K;KIKIKLKFK<K=K6K$KKKKKKKKMKKK9K8K8K4KK K KKKRKMK:K$K!K%K*K/K1K1K0KGKXKFK9K7K8K8K9K8K8K:K=KRKWKDKSKzK{KzKzKzKzKzK}KuKXKRKJKtK|KzKzKzKyKyK|KuKWKRKJKWKLKIK[KoK|KK^KKKQKQKDKKKKKK +KKK(K/K=KRKIKBK:KKK!K*K1K8K0K-K6K?KOKNK=KCKKKKKPKEK5K2K2K-K(K-KFKPK@KPKKK}KKKyKfKKK6K5KBKIKJKLKLKJKHKCK<K5K3K;KGKRKUKTKSKSKRKRKSKSKRKSKRKRKQKQKPKPKPKPKPKPKPKPKQKRKQKPKPKNKOKRK]KkKxKKKKKKKKKKKKKKKKK{KVKOKPKOKNKMKQKVKVKSKSKPKIKJKPKPKXK^KMKPKSKPKcKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKXKPKOKQKQKFK@KIKHKHKIKHKIKIKHKHKIKIKIKIKIKHKHKGKGKGKGKGKFKEKEKCKCKCKAK@K?K<K=K<K8K?KRKSKRKSKPK7K-K+K)K&K$K$K#K%KyKKKKKKKKKKKKK{KrKnKlKhKiKnKtKvKvKtKmKkKcKYKJKAKOKKK[KcKdKeKfKeKeKdKcKbKaKaK`K_K\K[KYKZK]K]K]KZKWKTKVK^KXKOKRKPKNKNKKKJKJKIKLKPKIKBKBKEKGKEKCKBKAKAKAK;K9KBKKKJK=K:K<K?KCKEKCKCKCKDKFKFKGKLK\KpKKKKKKe]rG��(KIKIKIKGKFKGKFKEKFKGKGKDK>KAK:K#KK!K)K:KEK=KhK}KwKxKyKzKxKxKxKyKzKWK=K?KiK{KyKzKzKzKzKzKxKxKxKJK7KHKxK{KyKxKzKvKUK;K>K@K<K>K:K:K<KGKGKCKAK>KEKJKHKJKAK;K=K5K#K!KKKKKK!KOKJK9K8K8K1KKKKK"KTKLK9K%K#K&K*K.K/K0K0KHKVKGK8K7K8K7K7K7K8K:K>KSKWKDKSKzK{KzKzKzKzKzK}KuKWKQKHKsK{KzKzK|K~K|KuKbKUKRKIK>K8K8K8K?KNKjKSK�KKRKQKEKKKKK KK-K5K$KK:KTKMK@K KK)K-K-K1K5K4K3K<KAKOKOK<KBKKKCKEKDK@K6K7KBKDK8KCKPKFK?K^KtKKK~K~KKKuK`KKKAKBK?KHKMKKKKKJKHKCK8K3K3K9KFKLKQKVKSKSKSKQKSKRKRKQKPKPKPKPKPKPKQKQKPKQKSKQKQKQKQKQKPKPKQKRK[KkKuK~KKKKKKKKKKKKKK_KNKQKQKNKMKRKUKVKRKSKRKJKHKPKRKUKVKTKNKZKQKMKKKKKVKlKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKeKNKPKPKQKMK@KHKIKHKIKGKIKIKHKIKHKHKHKIKHKHKIKHKFKGKGKGKFKCKDKDKCKDKBKBK@K=K=K<K9K6KLKTKRKRKTKBK.K+K)K'K#K$K#K%KzKKKKKKKKKKKKKzKrKnKoKhKgKhKmKvKvKuKmKlKcKYKBKDKNKRKaKeKhKhKhKeKeKeKeKeKeKcK`K_K\K\K\K_K`K_KYKXKWKVKZK_KZKSKSKPKMKNKOKOKOKOKRKPKIKDKEKIKJKFKGKGKEKDKCK?K>KIKMKPKJKBKDKIKKKJKGKGKGKHKNKOKbKKKKKKKKKe]rH��(KGKHKIKIKIKHKEKFKEKDKDKGKDK@KAK7K"K K-K8K>K;KJKrK|K~K~K|KzKyKyKyKzKPK=KEKqK|KzKzKzKzKzKzKzK{KuKFK9KJKxK{KzKzK}KtKOK8K?K?K<K=K=K;K:KJKIKBK@K>KFKHKHKJK?K9K:K3K%K#K KKKKK$KQKJK8K7K8K1KKKKK$KVKLK8K&K"K%K*K.K/K1K2KJKWKDK:K6K8K6K9K7K8K:K;KRKWKDKVKzK{KzK{K}K|KzK}KtKWKRKIKsK~K}K|KrKbKTKDKAKSKSKIK@K9K;K=K=K:K<K,KKKQKRKHKKKKKKKK +KK%K<KSKNKAK0K+K*K,K*K-K0K3K6K>KBKNKPK=KBKOKJKIKGKBK@K;K@KKKQKQKNKJK8K/K7KIKcKxKK~KK}KKfKIKEKK#K.K@KGKOKLKJKJKJKAK7K1K3K=KKKPKSKPKQKQKRKSKRKRKRKRKRKQKPKPKPKPKQKQKRKRKQKPKQKPKQKQKPKMKOKPKYKiKsKKKKKKKKKKKKiKPKQKQKOKLKOKMKSKSKSKPKLKEKNKQKRKNKXKIKVKOKOKPKOKOKMKMKOK^KxKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKuKQKQKPKPKRKCKEKIKIKIKHKIKIKIKHKIKIKHKIKIKHKGKGKIKGKFKGKGKFKEKCKCKDKCK@K?K>K=K;K:K8KDKSKSKUKTKNK4K,K)K&K&K"K!K$K{KKKKKKKKKKKKK}KuKkKiKjKhKfKiKqKwKwKqKjKdKZK?KBKOK[KdKhKiKfKgKeKdKdKdKfKgKeKaKaK`K\K^KcKaK]KZKYKXKXK\K]K\KXKTKRKOKNKSKPKPKSKTKNKFKGKKKLKKKIKIKIKHKFKEKBKEKNKNKRKRKJKHKKKOKOKKKKKJKKKSKPKKKKKKKKKKe]rI��(KGKGKIKHKIKHKEKGKFKCKAKCKGKDK?K?K9K%K-K8K;K<K4K9KJKWKdKrK{K~K~K{KwKGK<KGKwK{KzKzKzKzKzKzKzK|KrKBK:KRK}KzK{KzK}KoKKK6K@K>K<K<K=K9K:KJKGKBK@K>KEKHKHKJK@K9K:K2K&K$K!KKKKK%KRKEK5K6K8K2KKKKK&KUKKK8K&K"K%K*K.K.K2K3KKKWKCK:K5K8K6K9K7K7K:K;KRKWKDKWK{K{KzK{K}K|KzK|KuKXKRKIKpKoK^KMK?K;K:K=KCKSKSKJK>K7K=K<K;K:K=K(KKKNKSKHKKKKKK KKKK)KEKQKNKBK-K(K*K,K+K,K0K2K5K=KAKNKRKBKDKVKWKXKWKPKKKFKCKHKLKPKOKIK<K@K7K.K.K9KPKgK{KKKkKJKFK7KgKSK<K/K2KBKKKLKKKKKJKGKAK/K*K4K;KHKLKQKUKRKQKRKSKRKSKQKPKQKPKPKPKPKRKRKQKPKPKQKQKPKQKOKQKNKMKMKOKYKeKtK~KKKKKKKKsKNKOKPKOKLKNKKKRK\KUKOKMKGKKKPKRK[K[KHKRKQKMKNKNKPKQKRKRKMKLKJKTKiKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKWKOKQKPKQKGK@KJKHKIKIKIKIKIKIKIKAKGKIKIKHKGKHKIKHKGKGKGKGKFKDKDKDKAKBK?K>K<K:K:K9K<KNKTKUKTKSK>K-K)K'K&K"KK(KKKKKKKKKKKKKKKzKqKhKjKiKfKfKlKrKwKrKjKdKWK;KDKSKaKhKlKmKjKgKdKdKeKeKfKgKfKbKbKcK`K_KbKaK\KZK[K[K\K`K]K[KXKSKRKRKSKTKRKSKVKSKIKHKKKNKLKLKMKMKMKKKIKHKFKIKQKRKSKSKRKLKKKOKTKPKOKQKPKTK]KKKKKKKKKKe]rJ��(KGKGKGKGKGKGKGKGKEKCKDKCKFKGKDK>K<K9K3K8K=K:K4K0K/K.K4K;KGKSKaKpKoKBK;KKKzK{KzKyKzKzKzKzKzK|KlK>K:KXK}KzK{KzK{KmKHK7K@K=K<K=K=K8K;KJKGKCK?KBKHKHKHKIK?K9K;K2K&K$K!KKKKK)KOKDK8K6K4K/KKKKK*KXKHK7K%K$K%K*K.K.K1K2KLKWKBK7K4K5K8K7K7K7K5K8KSKWKCKWK{KzKzKzKzKzK{KKwKXKRKGKGKAK<K<K<K=K=K<K@KSKSKKK=K5K:K9K9K9K;K)KK +KMKSKKKKKKKK.KKKKK.KQKOKBK/K-K/K/K.K.K0K2K5K:K?KOKPKDKCKRKUKYK_K_K]KYKSKUKZKTKNKLK=KPKTKFK;K0K+K.K>KRKnKmKKKKK@KsKK~KmKQK;K2K7KBKMKLKHKGK8K;KAK8K3K5K=KJKRKRKQKSKRKSKQKPKPKPKPKPKPKPKPKPKPKPKPKPKPKQKRKNKMKPKRKQKOKNKOKWKdKrK}KKKKK{KOKNKMKNKMKLKPKSK[KWKQKNKIKIKMKQKUKQKJKQKTKMKNKNKNKNKPKPKNKOKOKOKMKKKQK^KwKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKcKLKQKPKQKNKAKIKIKHKHKHKHKHKIKHK?KEKIKHKIKIKIKIKIKIKGKGKGKGKGKEKCKDKCK?K=K;K9K:K7K6KFKUKRKSKTKIK1K-K(KK K0KIKKKKKKKKKKKKKKK|KvKkKiKjKfKbKgKmKsKuKlKaKRK9KIK\KeKjKoKqKqKeKaKcKfKfKfKfKdKdKdKeKcKaKbKaK\K[K[K[K^K`K\KXKVKPKQKUKUKTKWKWKWKPKJKNKPKOKMKNKOKQKOKJKJKJKJKNKUKTKUKVKSKOKOKQKWKXKUKVKWKRKKKKKKKKKKKe]rK��(KGKGKGKGKGKGKGKGKFKCKDKEKGKFKHKCK?K?K<K<K?K9K3K1K3K4K7K8K5K5K6K:K>K=K;KJKsKyK}K~K|KzKzKyKyK}KeK;K<K^K}KzK{KzK|KhKAK8KAK=K<K<K=K:K>KKKEKCK?KBKJKJKHKIK>K:K:K1K%K$K!KKKKK-KPKCK7K6K6K/KKKKK0KYKGK7K$K#K%K*K.K/K1K2KLKWKBK7K4K5K7K7K8K6K0K7KUKWKCKVK{K{K{K}K}K}KvKhKWKWKRKDK=K=K?K@K=K<K=K;K?KSKSKKK;K7K:K:K9K9K;K.KK KMKSKNK"KKKKK.K5KKKKKRKMKBK+K%K(K*K,K-K/K2K8K;K?KNKQKFKAKMKQKUKZK]K`KeKfKdKbK]KNKLK=KGKRKSKOKBK6K2K-K*K2K@KLKPK?KkKK~KKKKjKRK=K5K:KEKIKJKKKJKGKDK?K6K1K5K=KIKPKTKRKQKQKRKQKPKPKPKPKPKPKPKPKPKPKPKQKQKOKNKMKOKOKPKQKPKMKLKLKTKfKrK~KKK\KLKNKNKMKMKNKMKUKWKTKPKIKHKMKQKQKKKQKMKUKNKOKNKNKNKQKPKNKNKNKPKPKPKRKOKKKKKTKgKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKrKLKQKQKPKSKFKEKLKIKJKJKJKJKJKIKIKIKJKIKIKIKIKIKHKHKGKGKGKFKGKEKCKDKCK@K>K>K:K:K:K6K;KSKSKRKSKSK8K0K/K)K3KKKSKKKKKKKKKKKKKKK|KyKnKiKgKgKcKaKhKqKvKkKaKNK:KRKcKjKlKmKpKqKfKbKeKgKhKhKgKfKeKeKfKfKbKaK`K^K_K_K_K`K`K\KXKUKTKVKWKVKTKXKZKUKNKMKPKRKSKOKQKSKRKOKNKNKMKNKTKYKUKVKWKVKRKSKSKVKXKTKVKeKyKKKKKKKKKKKe]rL��(KGKGKGKGKGKGKGKGKFKBKFKGKGKGKFKGKDK?K?K=K<K8K4K2K3K5K7K8K8K8K8K7K6K;K=K:K<KDKOK_KlKuKzKK}KK^K:K>KdK~KzK{KzK|KbK>K8KAK<K<K=K=K7K?KJKFKDK@KCKIKHKIKHK;K:K:K.K%K$K!KKKKK/KQKAK5K6K9K/KKKKK2KYKHK7K"K"K&K*K.K0K/K2KMKVKBK7K4K5K8K8K8K7K9K>KTKWKCKWKK~KzKqKcKRKEK;K@KXKSKBK=K=K<K<K<K=K>K?K@KRKTKJK-K+K8K9K8K:K9K0KK KHKQKMK$K�KKKKK#K*K +KKKPKNKEK-K#K%K)K(K)K/K2K6K8K<KKKRKJKBKMKQKTKWKZK\KaKfKiKmKiKPKKK=KFKQKHKIKIK=K5K5K9K5K6KIKMKBKVKvKKKKKKK|KgKQK9K8KBKCKHKJKGKEKDKEK<K3K0K3K;KHKLKVKSKNKPKPKPKQKPKPKPKPKPKPKPKPKQKQKOKKKNKNKMKPKQKQKPKNKLKNKPKWKcKqK[KKKNKMKMKKKJKJKQKSKRKNKIKFKNKPKXKXKRKHKUKPKNKMKNKNKQKPKMKNKNKMKNKMKNKQKRKRKPKLKJKOKaK~KKKKKKKKKKKKKKKKKKKKKKKKKKK~KUKOKQKQKOKEKCKMKKKLKLKLKLKLKKKHKKKLKIKHKIKIKIKGKFKGKGKGKEKGKEKCKCKDKBK>K<K;K9K;K5K6KJKTKTKVKWKEK1K5K<KDKJKRKKKKKKKKKKKKK~KK{KxKrKlKiKfKbKaKcKlKuKnKaKGK=KYKdKlKmKjKpKqKiKcKfKiKjKjKjKhKeKgKeKdKdK`K_K`K_K`K_KbKaK\KXKXKXKXKWKWKXK\KYKQKPKOKQKSKSKSKSKSKSKOKPKQKPKQKUKVKUKWKXKYKXKXKVKXKKKKKKKKKKKKKKKKe]rM��(KGKGKGKGKGKGKGKGKDKBKFKGKGKGKHKHKHKEK>K?KAK8K5K6K5K6K7K7K7K7K8K8K7K:K<K8K6K5K3K5K9K@KKKYKfKrKSK:K@KnKKyKyKyK|K_K=K6K>K<K=K;K;K7KAKJKEKBK=K7KFKIKIKGK:K:K:K,K%K$K!KKKKK.KQKAK5K5K7K-KKKKK1KVKFK5K#K#K&K*K.K/K0K1KNKWKAK7K4K5K6K6K6K6K8K;KSKVKCKTKnK]KKK?K;K:K>K<KCKVKQKEK=K<K;K<K>K<K7K-K-KPKQKJK5K(K'K+K3K7K8K4K KKDKTKOK&K�KKKKKKKKKKNKQKGK6K1K2K3K2K2K3K5K4K6K<KHKOKMKEKKKQKRKSKWK\K_KbKdKjKkKSKMK@KRKeKVKOKLKGK<K9K<KCKEKIKKKHK5K2KJKbKyKKKK}KKK{KdKEKAK6K1KCKLKJKHKGKGKCK=K2K/K4KAKKKNKRKQKQKPKPKQKPKPKPKPKPKPKOKOKLKKKQKPKMKPKPKOKOKMKOKPKQKHKCKPK@KHKOKNKMKJKJKLKLKQKRKQKLKFKJKPKUK[KZKGKSKQKNKPKNKNKOKOKPKOKMKNKMKMKNKOKOKOKNKNKPKPKOKLKLKVKlKKKKKKKKKKKKKKKKKKKKKKKKKgKOKOKOKJKHKBKKKLKKKKKLKLKLKJKHKIKJKIKHKIKIKIKHKHKGKGKGKFKGKEKCKDKDKBK>K>K<K9K.K4K3K>KVKTKUKVKQK5K0K7KDKHKCK}KKKKKKKKKKKKKKzKvKtKlKkKfK`KcKcKfKqKpKaK=KBKaKeKjKkKkKpKpKhKdKfKjKkKjKhKgKfKfKfKfKdK`K_K`KaK`K_KbK`K[KYKYKYKWKXKYK[K^KWKRKQKTKUKTKRKRKSKSKQKPKPKQKTKWK[KWKXKYKVKUK[KYKeKKKKKKKKKKKKKKKKKe]rN��(KGKGKGKGKGKGKGKGKDKCKCKGKGKGKIKIKIKIKDK6K8K?K3K0K9K;K:K9K8K7K7K7K6K8K:K6K7K9K8K7K7K6K4K2K4K9K;K:K<KdKxKzKK~K}K^K=K:K=K:K=K:K:K2K;KLKEK@K=K.KEKJKIKFK9K:K9K*K%K$K!KKKKK4KRK?K6K4K5K+KKKKK6KVKEK3K#K$K%K*K.K/K0K0KPKWKAK7K4K5K4K4K4K5K8K:KRKVKEK?K7K3K:K<K=K=K;K:KBKUKPKGK@K>K=K4K*K#KK"K/KPKOKJK:K5K3K%K(K)K-K6KKKEKVKOK*K�KKKKKKKKKKLKQKKK9K/K2K3K9K=K=K?K>K<K>KHKOKNKHKIKPKPKRKVKZK\K_KbKeKiKUKNKCKYKKtKhK]KUKMKAK;K?KIKLKLKHK;K3K(K*K;KQKiK|KKK~KK}KMKNK2KK&K)K8KEKJKNKIKEKFKBK;K2K1K7K@KKKQKTKQKPKPKPKQKQKPKQKNKNKKKJKPKRKQKQKPKMKNKLKPKQKQKDK<KQKBKBKPKNKMKLKMKBK9KSKRKSKNKFKGKNKRKZK]KIKLKRKOKQKNKMKMKNKQKOKMKOKMKMKNKMKMKMKNKNKMKOKRKQKQKPKLKLKPK`K~KKKKKKKKKKKKKKKKKKKKKwKTKRKPKRKUKEKEKMKKKLKLKLKLKJKHKIKHKHKHKHKIKIKIKIKGKGKGKGKGKEKCKDKDKBK?K?K=K9K3K5K4K5KOKUKRKUKWKAK.K3K:K)KKtKKKKKKKKKKKKKK{KxKpKkKnKiKbKaKeKeKmKoKaK=KMKaKfKjKjKkKqKpKhKeKfKjKlKkKgKfKgKfKfKgKdK`KbKbKaKbKcKaK]KZK[K[K\K[KZK[K]K[KUKQKRKVKVKSKSKVKVKRKPKQKSKTKXK\KXKTKVK]KqKKKKKKKKKKKKKKKKKKKKKe]rO��(KHKGKGKGKGKGKGKGKGKFKFKGKEKHKGKGKGKHKJK<K7K?K7KKK#K*K1K7K9K9K9K6K;K<K8K8K7K7K7K8K8K8K8K8K6K7K=K:K7K=KHKUKeKmKSK6K8K=K:K=K<K>K+K:KKKDKAK:K2KFKHKIKEK9K;K9K-K%K"KKKKKK9KRK>K6K4K6K)KKKKK9KWKDK2K#K$K&K)K,K.K.K2KQKTK?K7K4K5K5K5K5K4K5K:KRKSKCK;K5K5K;K:K<K<K<K:KBKTKQKDK3K(K"K K$K-K2K;KGKRKRKKK;K3K4K2K3K.K)K+KKKAKVKOK-KKKK�KKK KKKKJKRKLK3K$K*K*K-K1K:KEKHKEKBKIKPKNKHKMKOKOKQKRKVKZK]K`KcKiKYKMKCKQKKKK}KqKeKUKNKGKCKHKMKGK>KEK;K8K/K*K.K>KVKnKKKKSKKK;KQKyKWK=K-K/K;KFKJKJKGKGKFK?K8K/K+K2KAKLKRKRKOKMKMKOKMKMKNKPKOKPKQKQKOKNKMKNKKKKKOKPKQKNKRKFK>KPKOKMKMKPKEK*KPKRKRKMKHKFKNKQKRKNKPKFKRKOKQKPKPKPKPKNKNKMKNKMKNKNKNKNKNKNKNKNKNKNKNKMKPKQKOKPKNKJKKKTKmKKKKKKKKKKKKKKKKKKK[KTKTKSKRKKKCKLKKKLKKKKKLKJKHKJKKKKKKKKKJKHKIKIKHKHKHKHKGKFKFKDKCKBKAK?K<K9K7K4K3K3KEKUKUKUKWKKK1K.K(KKKtKKKKKKKKKKKKKK~KyKqKjKmKmKeK]KbKfKlKmK_K?KTK`KfKiKhKiKnKoKjKgKiKjKlKjKiKiKhKfKgKdKcKdKeKdKbKdKeK^K\K[K[K]K\K[K]K_K\KYKUKSKWKXKVKUKUKXKXKUKRKRKVKYKZKZKjKKKKKKKKKKKKKKKKKKKKKKKKKe]rP��(KDKEKGKGKGKGKGKDKEKFKFKFKDKGKGKGKGKGKIKEKAK=K=K8K.K$KKKKK$K,K7K=K<K9K7K8K7K7K8K8K7K8K8K9K<K>K:K6K8K4K.K1K0K1K.K:K<K;K=K<K>K(K>KGKCKAK:K0KFKGKIKEK:K<K8K*K#K!KKKKKK>KRK>K6K4K5K(KKKKK;KTKCK2K#K$K&K)K+K-K.K2KQKSK?K7K4K4K4K4K5K3K4K:KRKSKCK<K5K4K9K9K=K=K?K<K@KTKQKDK'K%K3K;KEKMKQKOKFKPKSKKKBKEKCK=K4K2K3K/K"KK?KWKNK6KK"KKK/KKKK#KKKJKQKLK3KK+K,K*K'K$K$K.KAKHKKKNKNKJKRKSKSKTKUKZK\K^KaKcKdKXKMKFKJKKKKKKKyKqKeKXKNKKKJK<KEKIKGKAK5K1K,K,K0K@KYKtK[KHKAKJKKKKtKZK<K0K2K>KFKJKHKDKGK>K/K4K/K1K9KCKLKNKPKQKMKKKPKRKOKKKNKQKNKMKNKNKKKJKNKPKQKPKQKLK<KNKOKNKMKNKOK(KHKTKRKQKLKBKJKQKUKWKXKDKPKPKQKQKQKQKPKNKNKNKNKNKNKNKNKNKMKMKMKMKMKMKMKMKPKPKNKNKRKQKOKNKKKGKLK[K{KKKKKKKKKKKKKKKfKOKTKTKTKRKFKGKLKKKLKLKLKKKIKKKLKLKLKLKKKIKIKIKIKIKIKHKGKGKGKDKCKBKAK>K<K9K5K3K9KBKLKTKVKUKUKRK9K,K,K&KKqKKKKKKKKKKKKKK~K|KtKkKiKkKhK`K^KcKlKnK\K@KVK_KeKeKfKiKnKnKjKhKjKkKlKjKjKjKiKgKgKcKdKeKeKeKfKfKdK]K\K_K^K\K]K^K_K`K\KXKUKWKXKYKWKUKVKWKWKTKRKSKVKYK]KKKKKKKKKKKKKKKKKKKKKKKKKKKe]rQ��(KDKDKEKFKGKGKGKFKFKCKCKDKCKEKGKGKGKGKGKGKGKDK@K@KBKCKCK=K4K)KKK5K<K6K+K0K5K8K8K8K7K5K5K8K8K;K<K9K8K:K3K2K1K)K-K1K;K=K<K=K;K9K'K@KHKDKAK:K;KGKGKIKDK:K=K8K'K#K!KKKKKK?KOK=K6K4K5K&KKKKK=KSKDK3K"K$K%K*K+K-K-K0KPKSKAK6K2K3K5K5K5K4K5K=KUKRKDK=K4K2K<K:K9K6K.K!K.KUKQKDKEKPKWKSKNKBK2K%K"KMKSKJK7K0KAKHKFKAK5K3K+KK;KWKNK=K\KKKKKKKKKKHKRKOK5KKKKK K KKK%K$K9KPKNKJKEKKKUKVKWKZKYKZK\K]KcKZKLKHKHKzKKKKKKKKKzKfKKKMK>K?KCKEKFK=K8K6K:K7K/K+K5KGKKKFKFKKK~KKKKqKXK=K-K2K=KFKIKBK=KDKFKAK:K3K2K6KBKJKPKPKPKNKIKFKNKQKNKNKPKPKNKKKNKNKMKAKIKQK>KLKOKNKMKMKSK-K@KWKQKOKLKBKFKOKSKUKWKKKQKSKQKQKQKQKQKQKQKQKNKMKNKNKNKNKMKMKMKMKMKNKNKMKNKNKMKNKNKNKNKNKNKOKNKKKJKHKRKkKKKKKKKKKKKKKKhKZKOKPKHKIKPKMKNKMKKKLKKKKKLKLKKKHKJKLKJKHKIKIKIKGKGKGKGKDKDKBK?K=K7K7K9KBKMKOKOKSKUKUKUKTKCK.K.K+K"KnKKKKKKKKKKKKKK~KzKvKpKhKeKeKcK^K`KgKmK[KCKTK^KaKbKfKiKoKmKkKlKkKkKkKkKkKlKkKjKhKcKcKgKgKfKgKeKaK]K\KbKaK`K`KbKbK]K[KXKWKYK[KYKVKWKWKVKUKTKTKXKZK`KKKKKKKKKKKKKKKKKKKKKKKKKKKKe]rR��(KDKDKAKDKEKFKFKGKFKCKCKDKCKEKGKGKGKGKGKGKGKGKCK?K=K:K8KAKEKFKFK=K7K;K/KKKKK#K*K/K4K8K8K8K:K<K8K6K9K0K0K-K,K/K1K;K=K<K=K<K7K&KDKHKCK@K;K>KGKGKIKAK;K>K6K'K$K!K KKKKK?KOK=K6K4K4K$KKKKK?KSKAK0K"K$K%K*K+K-K-K1KPKRKAK5K2K3K4K4K4K3K3K;KTKRKCK;K4K0K1K&KKK%K.KAKUKPKEKLKEK9K,K KKKK,KQKQKIKQKHK/K*K;KKKJKEK,KK<KVKLKGKKKK2KKKK +K +KKBKSKPK2KK KKKK!KKKKK+KLKOKKK5K6KFKKKPKSKSKSKTKUKZKXKMKKKIKzKKKKKKKKKKxKOKNK@KLKRKIKEKAK<K6K:KAKCK?K5K<KJKKK=K]K{KKKKKKKKoKTK8K.K8K@KGKGKFKIKHKDK@K6K0K2K8KBKIKIKGKIKPKPKMKNKQKPKNKLKNKNKMK?KHKSKAKGKOKMKMKMKSK;K4KWKQKPKLKBKDKOKRK9KK1K?KJKOKQKPKPKPKQKPKPKNKNKNKNKNKNKMKMKMKNKNKNKNKMKMKMKNKMKLKLKMKMKMKNKMKNKPKOKOKJKHKOK^KxKKKKKKKKKKKKKK`KKKLKLKOKPKNKLKLKLKKKKKKKJKJKLKJKHKIKIKIKGKGKFKEKCKCKAK?K:K:KBKNKQKOKQKJKBKRKUKUKSKPK3K*K*K$KjKKKKKKKKKKKKKK~KxKwKtKnKfKaKeKaK]KdKiK\KGKRK\K_K_KdKkKpKmKlKkKkKlKmKlKkKlKkKjKhKcKeKhKhKgKgKeKaK^K]KaKbKcKaK`KcK\KYKXKXKYK[K[KWKWKWKVKWKXKWKYKaKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]rS��(K@KBKAKAKAKAKAKCKCKCKDKDKBKEKGKGKGKGKFKFKFKGKGKDK?K;K5K*K(K1K;K=K9K7K7K7K1K'K#KKKKK!K%K/K<K9K7K:K:K-K2K-K-K0K0K:K=K<K=K<K6K'KHKHKBK?K7K4KHKFKHK>K<K<K5K'K$K!KKKKKKDKOK<K6K4K1K#KKKKKBKTKAK1K!K$K&K*K+K-K-K3KPKPK?K3K2K3K2K3K2K.K.K8KSKRKDK0K KKK)K7KBKKKPKKKSKPKBK)KKKK%K7K.K-K^K_KPKJKfKKgK(K-K)K0KCK5KKBKUKNK=K\KKQKK K KKK KK:KTKPK5KKKKKKKKK$KK'KMKNKKK?K?K@KBKHKOKPKPKSKWKWKUKOKLKJKyKKKKKKKKKK{KRKNKBK`K{KfK[KUKNKFK<K:KCKIKIKGKJKKK;K-K7KNKkK~KKKKKKKKiKPKBK=K=K?KIKKKHKFKGKCK?K:K3K1K9KCKGKMKPKJKLKQKOKMKLKNKNKMKPKQKSKFKBKPKMKMKMKPKDK)KOKRKRKMKFK@KKKPKCKK&K?K@KDKIKLKNKQKQKOKMKMKMKNKNKNKNKMKMKNKNKNKNKNKMKNKNKNKMKOKLKKKMKMKNKNKNKNKNKNKNKOKNKMKKKLKTKlKKKKKKKKKKKKKKpK[KOKLKLKPKPKOKHKEKLKLKLKJKHKIKHKIKHKFKEKCKBKAK>K=KBKNKSKRKUKNK<K$KKIKTKRKRKTK?K*K,K&KaKKKKKKKKKKKKKKKyKwKsKqKmKgKaK_K^KaKgKYKDKQKVKZK\KbKnKqKlKlKjKkKmKoKlKkKlKkKjKhKfKiKjKjKjKgKdKaK^K_KaKcKaKaKaKaK^KZKZKZKZKZKZKZKWKWKXKWKWKXK^KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]rT��(K?KBKAKAKBK@K@KBKCKDKDKDKEKFKGKGKGKGKFKFKGKGKGKGKDK?K<K6K%KKK(K;K9K9K:K8K=K?K<K7K1K+K"KK&K?K9K*K(K0K*K5K/K-K/K2K<K=K<K<K=K4K)KGKEKAK?K7K9KHKFKJK?K:K<K4K%K"K!KKKKKKHKMK;K4K3K2K"KKKKKDKSK@K/K"K$K%K*K+K-K-K2KRKPK?K4K2K3K2K3K2K/K2K9KSKRKDK+K(K9KGKMKMKDK;K-K0KTKQKCK+K2KHK^KqKKLK0KdKaKRKIK`KKxK-K0KQK=K)K%KK=KUKNK=K>KK;KK K +KK KKK5KVKNK;KKKKKKK)K$K K$K0KLKNKMKAK;K?KAKFKPKLKOKVKYK[KXKPKNKGKmKKKKKKKKKKKVKLKEKZKKK~KsKjKcKVKIKDKAKFKIKKKIK?K-K+K*K+K;KSKpKKKKK~KKKQKIK6KK"K2KAKIKKKFKDKEKBK=K8K2K1K8KDKJKLKOKPKMKLKLKMKOKPKFKMKLK>KNKMKMKMKMKIK%KHKSKQKMKIKBKHKMKLK(K+KJKLKJKJKHKGKIKNKOKOKOKOKNKMKNKNKMKNKNKNKNKNKNKNKMKLKNKMKMKLKKKLKLKLKLKLKLKLKMKNKMKLKNKOKLKNKLKIKMK^K~KKKKKKKKKKKKKKhKSKKKKKJKFKMKNKLKLKJKHKJKJKJKHKEKCKAK?KAKIKRKSKUKTKFK)KKKK5KTKSKRKUKJK/K/K&K\KKKKKKKKKKKKKKKxKsKtKoKfKeKiKaK`K^KcKTK>KMKTKVKYKaKnKpKmKlKkKkKmKoKlKkKlKkKiKiKiKiKiKiKiKeKdKaK\KaKbKeKcKdKdKaK^K\K[K\K[KZKYK[KZKYKWKYKYK[KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]rU��(K4K@KBKAKBKBKBKDKDKCKDKDKGKGKGKGKGKGKFKFKGKGKGKGKGKDK>K=K=KIKAK4K=K<K'KKK+K1K5K<K?K=K>K7K5K:K<K%KKKKKK$K)K3K=K=K<K<K>K4K,KIKDKAK?K5K<KHKFKGK>K8K<K4K$K!K!KKKKK KJKKK:K2K2K3K!KKKKKFKRK?K.K"K$K%K*K+K-K,K2KSKOK>K4K2K3K3K3K1K.K2K:KSKSKBKBKEK>K6K-K$KKKK)KUKQKGKfK}KKKKKHK*KdKaKPKHK`KKzK-K,KpKKjK<KK7KSKNKEK'KKGKKKKKKK�K.KTKIK@KKKK#K-K1K,K/K6K>K@KKKNKLKBK=KAKCKIKNKMKMKSKUKTKSKNKNKFKcKKKKKKKKKKK]KLKGKSKKKKKKzKsKeKTKKKGKDKIKKKFK'K*K2K3K+K(K-K=K[KtKKKKKUKGK<K>K\K9K*K)K6KDKGKEKDKEKFKBK>K5K-K2K9KCKOKPKNKJKLKOKLK:KHKOK=KKKNKMKNKKKHK*K>KSKRKMKIKCKFKOKNK3K'K?KOKLKKKPKOKJKCKDKJKLKMKNKNKMKNKNKMKNKNKNKNKNKNKLKKKMKMKKKKKKKKKKKKKKKKKKKKKMKNKNKNKNKMKKKMKNKNKNKKKHKKKTKlKKKKKKKKKKKKKK{K^KMKIKLKQKRKOKKKNKOKOKMKGKBKAKGKPKTKSKVKMK3KKKKKK$KQKUKRKSKTK;K/K&KWKKKKKKKKKKKKKKKsKoKuKpKcK_KeKgKbK^K`KOK?KJKQKTKYKcKnKnKnKlKlKkKmKoKlKkKlKkKiKiKiKiKiKiKiKcKcKeKbKdKdKdKeKfKdK`K_K]K]K]K\KZK[KZK[KZKWKXKZKtKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]rV��(K(K6KAK@K?KAKBKBKDKDKDKDKDKEKGKGKGKGKGKGKGKGKFKDKEKDKAK?K@KSKvKHK9K:K(K%KKKKK!K'K-K3K6K7K:K:K6K5K-K'KKK +KK7K=K<K=K<K?K3K-KIKDKAK@K3K>KHKFKFK<K9K:K2K$K!KKKKKK"KKKKK;K3K1K0K!KKKKKIKRK@K-K"K$K&K)K-K.K,K5KTKOK@K5K.K0K0K2K4K7K;K@KRKRKCK/KKKKKK%K8KRK_KWKQKIKuKK|K|K|KKDK$KcKaKNKIK`KK{K2K,KjKKK^KK7KTKNKCKK]KkK�KKKKK +K K-KRKLKBKKK$K KK&K,K1K3K3K=KMKNKJKGKHKEKIKMKNKPKRKRKRKOKQKNKNKGK]KKKKKKKKKKKdKKKHKMKKKKKKKKKvKkKaKVKMKJKGK(K'K5K=K>K<K4K+K$K.KDK`KwKKaKEKCKGKKKxK[K=K/K0K;KGKIKGKDKDKAK/K4K5K3K3K<KFKIKNKPKMKNKOKPK@KFKPKMKMKKKMK3K1KRKPKOKLKDKCKNKOK?K*K-K8KdKeKNKKKPKCKBKJKGKEKIKMKOKPKOKMKNKMKNKNKNKNKMKMKMKMKMKMKMKMKMKMKMKKKLKKKMKNKNKMKLKLKMKLKKKKKLKNKNKOKOKKKIKOK_K}KKKKKKKKKKKKKKrK]KQKOKPKRKTKPKKKEKHKPKQKQKVKPK9KKKKKKKKKBKUKUKTKUKGK1K(KQKKKKKKKKKKKKKK}KpKkKqKqKgK]K]KaKcKbK^KNK=KHKKKPKWKhKoKnKoKnKmKkKmKoKlKkKlKkKiKjKkKkKkKjKiKfKeKgKeKdKfKbKeKdKeKbKaK`K^K]K[K\K[KZK[KYKZKWKcK|KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]rW��(K$K'K4K?K?KAKAK?KBKDKDKDKDKDKFKGKGKGKGKGKGKGKFKDKDKDKDKBK?K=KJK=K<K5KDKxKfKYKKK;K*KKKKK1K<K9K6K=K<K;K;K5K(K+K8K=K<K<K<K?K-K0KHKDKAK?K2K=KHKFKFK<K9K:K3K$K!KKKKKK!KMKHK<K2K1K/KKKKKKJKQK@K+K K"K$K&K)K-K+K4KTKOK>K0K-K4K9K9K7K2K,K,KRKRKEK KKKKAKfKwKKKyKWKRKIKrKK}K~K|KKGK"K^KcKMKIK_KK}K:K/KjKKK^KK7KTKMKGK#K7KK K�KKK +KK K&KSKMKDKKKKK K KKK K KKBKPKKKHKFKEKNKSKSKQKTKRKSKUKSKNKNKGKUK}KKKKKKKKKKkKKKIKIK{KKKKKKKKKKKwK[KHKHK-K%K1K;K@KDKDKBK;K0K)K*K5KLKTKHKCKAKxKKKKKtKYK>K1K0K:KGKKKGK;KCKEKBK<K4K/K1K8KAKGKNKSKSKFKAKNKNKMKKKNKBK*KPKPKPKLKFKAKLKPKHK1K1K0KKKKzK]KFKJKRKNKJKHKFKHKHKLKOKNKNKMKMKMKMKNKNKMKNKNKNKNKMKMKMKMKKKLKKKMKNKNKMKKKLKNKLKLKKKLKMKMKMKMKMKPKOKLKHKJKTKlKKKKKKKKKKKKKKKnKYKNKIKJKRKTKVKWKTKBK&KKK K K K K K"K!K4KSKVKTKSKOK7K(KLKKKKKKKKKKKKKK}KoKjKmKoKmKfK]KWK\KdK^KNK9KDKGKKKVKkKoKnKoKoKnKlKmKoKmKlKlKkKjKkKlKkKkKjKjKgKfKgKeKcKfKbKeKdKeKbKbKaK^K]K\K]K[K[K[KYK[KYKdKsKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]rX��(KK&K+K3K<K?K>K@KDKDKDKDKDKDKDKEKGKGKGKGKGKGKGKGKEKDKDKDKAK?K>K=K=K2KNKKKKK|KtKiKHKK!K5K<K4KK"K'K-K6K:K>K;K9K;K9K;K<K>K*K3KJKDK@K<K1K=KFKGKCK;K<K>K0K#K"KKKKKK$KMKGK9K1K2K0K'K"K"K"K+KMKNKAK2K*K+K,K.K.K0K/K6KQKMK>K5K1K2K/K&KKKK KQKRKDK>K`KAK0KhKK~K|K}KtKWKRKHKsKKKK}KKIK"K^KeKOKKK_KK~K?K.KfKKK`KK2KTKPKIK+KKUK;KKK KK +KK!KSKMKFKKKKKKKKKKKKAKPKNKAK,K.K2KBKKKMKSK[K_K^K]KSKNKHKOKuKKKKKKKKKKrKKKKKIKvKKKKKKKKKKKKnKIKKK2K$K0K;K@KCKDKHKGKCKAK;K3K)K3KGKGK@KnKKKKKKKKrKUK>K1K5K@KFKJKGKCKCKDKCK:K3K.K,K0K:KGKGK>KNKNKMKKKKKLK*KIKPKRKMKHKBKGKNKMK;K/K]KKKKKKKhKPKSKRKOKMKHKHKFKEKJKMKOKNKNKNKMKNKNKMKMKMKNKLKKKLKLKLKKKKKMKNKNKMKKKLKLKLKLKKKKKLKKKLKKKKKMKNKNKOKOKNKJKHKNKaKKKKKKKKKKKKKKKyKdKXKRKTKLK3K K#K*K,K-K,K&K'K&K&K&K,KNKTKRKSKSKBK+KEKKKKKKKKKKKKKK~KoKiKjKlKlKjK`KWKVK]K^KLK4K?KCKLK[KnKoKnKoKoKoKnKnKnKoKnKlKkKkKkKlKjKiKkKkKjKhKfKgKdKdKdKhKeKdKbKaKcK`K`K^K\K]K\K[K[K[KXK[K]KgKK|KKKKKKKKKKKKKKKKKKKKKKKKKKKKe]rY��(KKK(K+K4K?K?KAKCKDKDKDKDKDKDKDKFKFKGKGKGKEKFKGKEKCKDKEKEKAK=K?K@K-KWKKzKzK{K|K~KK]KK@K?K9K@KAK0K"KKKKK*K9K;K:K<K<K=K*K6KIKCK@K;K3KCKFKGKCK;K<K=K:K:K=K=K:K<K@K?KCKLKFK<KAKAKCKDKDKBKCKDKOKMK=K8K7K7K4K2K2K0K-K7KTKLK>K(KKKKKKKK%KRKRKCKUKKGK-KbKK}K~KKvKWKRKHKsKKKK}KKMK%K^KeKPKKK^KKKDK/KfKKKhKK3KTKPKJK5K%KKK-KK K KKKK&KRKNKGK"KKKKK KK KK+K/KDKOKLKHK;K5K0K0K0K6K?KNK[KcKdKVKLKIKMKfKKKKKKKKKKwKNKLKIKoKKKKKKKKKKKKwKKKKK7K%K1K:K?KCKDKFKEKFKFKFKDK@K;KFKIKAK;KUKrKKKKKKKKKpKRKBK<K>KBKFKIKFKDKAKAK@K:K4K-K*K$K5KPKMKMKLKKKPK-K=KQKRKQKJKCKEKMKOK@K:KKKKKKKKKCKFKNKJKLKPKPKDK<KFKCKHKJKMKNKMKMKMKMKMKMKNKMKLKLKKKKKLKLKMKNKMKMKKKKKLKJKLKKKKKLKKKKKLKLKMKMKMKMKMKLKLKNKNKKKLKKKWKnKKKKKKKKKKKKKKKfK4K$K)K5K5K2K1K/K,K.K.K,K+K+KAKUKSKRKVKQK2K<KKKKKKKKKKKKKKKrKiKfKiKjKhKcK\KVKTKZKMK3K7KBKLK`KnKnKoKnKnKnKoKoKnKoKnKmKlKmKlKkKkKjKlKkKjKiKgKiKgKeKeKfKeKeKbKaKbK`K`K_K]K\K\KZKZK[KXKYKZK`KwKxKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]rZ��(KKK K'K+K6KAKBKBKDKDKDKDKDKDKDKCKFKGKGKGKFKFKGKFKCKEKGKDKDKBK9K5K8KcKK}K}K}K}K|KKNKKEK=K9KRKKzKoK_KMK6KK#K:K<K=K=K=K<K)K9KIKDKAK;K2KDKGKGKCK;K=K;K9K:K:K9K5K4K4K2K:KLKFK9K,K(K(K(K'K%K"K'KOKLK<K&KKKKKKKK*KUKLK<KK KKKKK!KK,KRKRKDKTKKEK)KdKKKKKxKXKRKIKsKKKK~KKRK(K]KeKPKJK\KKKKK/KbKKKlKK4KTKPKJKpKLK]KRK +KKKK +KK&KRKQKIK+KK!KKKKKKKKK;KRKJKHK>KDKEKFKHK?K7K2K9KLK\KTKLKIKJKcKwKKKKKKKKK|KSKLKIKjKKKKKKKKKKKK{KNKKK>K$K.K7K>KBKEKGKGKGKFKFKDKDKBKGKJKDK/K$K-K>K[KuKKKKKKKKqKEKDK0KK+K=KIKGKDKDKEKEK>K7KK&KRKMKNKNKLKPK4K0KRKRKQKLKDKBKNKSK@KeKKKKKKKKKUK*K,KiKeKIKNKGKBKLKIKEKDKDKDKJKNKNKNKNKMKMKNKNKLKKKKKMKNKNKNKKKKKLKLKLKKKKKKKKKKKLKKKKKKKKKKKKKKKKKKKKKMKNKNKOKOKNKKKHKNK`KKKKKKKKKKKKKKKtKTK=K6K7K7K8K8K5K5K1K1K8KPKTKRKSKUK;K3K~KKKKKKKKKKKKK}KzKmKfKdKdKfKgK\KVKRKRKMK2K3K@KLKaKkKoKoKnKkKlKoKnKoKnKoKoKoKoKmKkKlKlKlKkKiKiKjKgKfKgKfKdKdKeKbKaKbKaK_K`K_K`K^K\K\K]K[KZK[K[KoK~KeKKKKKKKKKKKKKKKKKKKKKKKKKKKe]r[��(K$KKKK(K'K5KBKBKDKBKCKDKDKDKDKDKDKEKEKEKHKGKGKEKCKFKGKEKFKFK@K6K>KMKsKKK}K}K}KKBKKGK;K:KYKK~KKKKlK5K)K9K<K=K=K=K;K'K@KGKBK?K:K1KDKGKGKCK;K=K;K#KKKKKKKK)KOKEK3KKKKKKKKKNKLK<KKKKKKKKK(KSKLK<K!KK$K$K'K%KKK+KRKRKDKTKKCK%KdKKKKKxKXKRKHKsKKKK~KKVK+KZKeKOKKKYKKKMK-K_KKKtK KKUKPKIKaKKOK3KK$K KKKK/KOKOKHK0K%K'K#K%K#K"K!KKKK/KPKMKGK(K'K.K(KFKQKRKGK=K3K<KHKLKJKIKaKlKKKKKKKKKKVKLKHKdKKKKKKKKKKKKKTKIKBK&K*K5K=KBKEKGKFKFKFKFKEKEKBKFKHKGK6K)K&K#K$K0KFKbK{KKKKKzKIKHK9K=KDK(K&K3K@KEKGKFKCKCK)K5KRKMKKKLKJKKK;K)KOKQKQKLKGKBKLKRKKKKKKKKKKKKvK4KAKKKsKWKNKMKOKNKMKMKIKEKCKEKKKKKMKNKMKMKMKLKKKKKLKLKLKLKMKMKLKKKLKKKKKLKLKKKKKKKKKLKLKLKLKLKLKLKLKLKLKLKLKKKMKNKMKMKLKKKNKZKuKKKKKKKKKKKKKKKnKPK?K8K9K;K;K9K7KIKTKSKRKTKKK/KnKKKKKKKKKKKK}KzK~KsKhKaK`KcKfK_KVKSKKKGK2K2K:KMKdKjKmKoKnKmKnKoKoKoKoKnKnKnKoKmKkKlKkKlKkKkKjKjKgKfKgKfKdKeKfKeKbKaKbKaK`KaKcK_K]K^K`K]KYK\K]K`KKtKwKKKKKKKKKKKKKKKKKKKKKKKKKKve]r\��(K/K'KKK"K+K)K8KCK@K?KBKDKDKDKDKDKDKDKCKDKFKGKGKEKCKFKGKGKGKGKFKCK=K;KJKqKK|K}K~K~K9KKIK<K<KbKK|K}K|KKdK2K'K:K<K=K=K=K:K%KAKFKAK>K9K1KEKFKGKCK;K=K;KKKKKKKKK.KQKCK2KKKKKKKK"KNKJK<K#KKK"K#K%K(K&K6KUKLK>K+K&K%K"K"KKKK&KPKRKDKTKKAK$KcKKKKKxKXKRKHKsKKKK~KKXK-KXKeKOKKKWK~KKPK+K[KKKKWK1KPKQKLK(KjKYKK!K!KKK&K'K-KLKNKKK4K'K)K&K(K&K&K'K'K#KK/KNKNKHK$KKKK#K,K9KEK?KPKNKHKKKKKFK[KeKtKKKKKKKKK]KJKIK\KKKKKKKKKKKKK\KHKFK(K*K3K<KBKEKGKFKGKFKFKGKGKCKEKGKGK8K,K'K&K(K)K$K'K5KMKhK~KKKTKEK=KZKK~K`KCK0K+K6KBKFKGK4KaK\KDKHKHKGKGKCK'KFKPKQKLKIKAKJKLK^KKKKKKKKKKK<KKKKKK|KYKIKKKNKOKNKMKJKGKBK?KFKJKKKMKNKLKKKKKKKKKKKKKMKNKKKJKLKIKLKKKJKLKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKLKKKKKKKLKMKOKQKMKKKIKOKbKKKKKKKKKKKKKKKKbKGK<K9K7KCKTKTKRKSKSK5KQKKKKKKKKKKKKKxKKuKmKgKaK`K`K^KZKTKIK?K0K,K9KVKiKjKlKnKoKoKoKoKoKoKoKnKoKnKoKmKkKlKkKkKkKlKkKiKgKfKgKfKeKdKcKfKcKaKbKbK`KbKaK^K\K]K`K^KXK\K]K_KpKsKrKKKK~KKKKKKKKKKKKKKKKKKKKKKue]r]��(K3K+K(KKK K(K,K:K@K@KBKDKDKDKDKDKCKDKFKFKGKGKGKFKFKFKGKGKGKGKFKCKAK?K<KHKoK~K}K~K{K3K*KLK8K7KhKK~KK~KK^K.K(K:K;K=K;K;K8K&KCKGKAK>K6K1KEKFKGK@K:K=K8K'K!KKKKKKK1KPKCK3K KK KKKKK'KQKIK;K$K K"K%K&K&K)K&K6KTKIK<K%K!K"KKKKKK&KRKRKCKWKKBK$KcKKKKKxKUKQKJKrKKKK~KKXK+KXKhKQKLKVKKKVK-KZKKKKKtKOKPKKK,K%K'KKKK"K'K&K%K+KJKNKKK7K&K)K+K*K(K(K(K"KK K4KPKNKJK-K!K"K KKKK)K+K5KNKOKKKLKHKJKYKoKaKlKKKKKKKdKJKJKSKKKKKKKKKKKKKdKFKHK+K&K3K<KAKBKGKEKFKGKFKFKFKDKCKFKGK9K!K!K K&K.K5K6K,K%K)K:KSKoKZKCKCKJKKKKK|KbKBK1K0K:K7KPKdKAKHKFKEKDKFK&K=KQKPKNKJKAKFKOKTKKKKKKKKKKKWKKKKKKKKNK8K9KHKHKLKNKPKDK8KHKDKBKFKJKLKLKLKKKKKKKLKMKNKMKLKKKKKLKKKKKLKLKKKMKMKLKKKLKKKKKKKKKLKKKLKKKIKIKIKKKLKKKLKMKMKLKMKMKKKHKJKUKrKKKKKKKKKKKKKKKuKRKAKJKTKVKTKVKDK8KKKKKKKKKKKKKvKzKtKkKmKfK_K\KYK[KVKKK;K-K(K<K_KjKkKlKnKoKnKnKoKoKoKoKoKoKnKoKmKkKlKlKlKlKkKkKiKhKiKgKfKeKaKaKdKdKbKbKbKaKbKcK]K]K`K]K^K\K]K\K^KbKgKlKKKKKKKKKKKKKKKKKKKKKKKKKKe]r^��(K;K3K/K*KKK"K*K.K9K@KBKCKCKCKCKDKBKDKFKFKFKGKGKGKGKGKFKFKFKGKFKCKDKBK>K:KHKpKK~KuK,K2KHK8K;KnKKKKKKXK-K+K;K;K=K;K:K5K'KEKGKAK?K6K6KFKFKFK@K9K=K8K'K!KKKKKKK2KOKCK1KKKKKKKK(KRKHK:K#K K K"K%K&K$K"K6KTKHK=K%KKKKKKKK'KRKRKDKWKKBK$KcKKKKKxKTKPKJKrKKKK~KKYK*KXKiKQKMKUKKKXK*KWKKKKKyKSKOKKK.KKK K"K%K'K&K&K&K+KJKNKJK7K'K)K+K+K*K'K K$K+K+K3KLKMKKK2K%K%K%K'K$K#KKKKK2KLKKKFKLKPK`KNK(KBK_KzKKKKiKJKKKOK}KKKKKKKKKKKKkKEKJK1K$K2K;K>KDKGKEKFKGKFKFKFKDKBKFKJK?KKK K"K%K1K<K;K9K4K,K(K-K?KGKFKBK}KKKKKKKxK^KBK-KJKpKCKGKGKEKBKGK/K2KQKPKOKKKDKCKQKLKKKKKKKKKKKhKKKKKKKKbK;KKDKK_KJKLKGKAKLKJKIKGKDKEKFKKKMKMKLKLKMKMKMKMKKKLKKKLKLKKKLKKKMKMKLKKKLKLKKKKKKKKKKKLKJKHKHKHKKKLKLKKKIKJKKKKKLKMKNKNKKKHKIKPKeKKKKKKKKKKKKKKKKwK^KRKQKMK4KfKKKKKKKKKKKKuKwKyKkKhKkKdK_K[KXKWKMK6K)K$KEKeKhKlKkKnKoKoKoKoKoKoKoKoKoKnKnKmKlKlKlKoKoKkKkKhKhKjKgKgKeKaKbKdKdKaKbKfKcKbKaK^K^K`K\K_KaK^KZK]K`K[K[KfKyKKKKKKKKKKKKKKKKKKKKKKKK}e]r_��(K2K8K3K/K+K$KK#K.K3K:K@KAKBKCKCKDKBKDKDKCKDKEKGKFKFKGKFKDKEKGKGKGKEKDKAK=K:KHKpKKlK%K8KEK8K@KrKKKK}K~KTK,K-K;K:K=K;K;K4K+KEKCKAK@K6K6KGKDKDK?K;K=K8K%KKKKKKKK8KOKAK-KKKKKKKK+KSKHK:K#KKK"K$K$K$K!K4KSKIK;K"KKKKKKKK*KQKSKCKXKKDK(KdKKKKKxKTKPKJKrKKKK~KK]K*KUKiKQKLKRKKKZK(KSKKKKKbKLKPKLK4K!K"K%K&K&K&K'K'K&K'KGKOKKK9K)K,K,K+K#KK(K1K0K,K2KKKMKKK7K'K'K'K'K'K'K$KKKKKHKMKGKMK]K[KVK+K%K(K6KMKnKKqKHKJKHKvKKKKKKKKKKKKtKHKJK6K%K-K8K?KCKCKEKGKGKGKGKGKDKBKFKJKCK$KK@K=K4K1K0K4K=KCKBK>K4K6KFKGKBKXKyKKKKKKKKKuK|KwKGKFKGKEKCKGK8K'KPKQKOKJKFKBKNKLKxKKKKKKKKKK|KKKKKKKKnKHK-KKKKqKaKPKJKKKLKLKLKLKHKDKDKEKGKJKNKLKLKKKKKLKLKKKKKKKKKKKKKLKLKKKKKLKKKLKLKKKKKKKLKJKHKHKHKKKLKLKJKHKIKIKKKLKKKLKLKLKMKMKLKKKHKNKZKuKKKKKKKKKKKKKKKKjKEK<KKKKKKKKKKKKwKtKzKpKfKgKhK]K[KWKUKMK5K"K$KNKdKiKlKkKnKoKnKnKnKnKoKoKnKoKnKkKmKoKlKkKnKoKlKiKfKhKjKgKgKeKaKaKeKcKcKcKeKdKaK_K`K`K_K_K_K`K\KWK\K]KVKZKZKbKnK|KxKKKKKKKKKKKKKKKKKKKKKye]r`��(K(K)K"K'K*K)K KK*K1K/K9KCKCKCKDKCKBKDKDKCKDKDKGKGKFKGKFKDKDKFKFKGKDKDKDKBK?K:KEKsKfK!K>KBK8KDKxKKKK~K}KNK,K1K:K:K<K;K<K3K,KEKDKBK>K2K8KEKCKDK>K:K>K5K!KKKKKKKK<KOK@K.KKKKKKKK-KSKHK;K"KKKK!K!K#K K5KSKIK;K"KKKK K!K K K/KSKOKAKXKKHK+KcKKKKKxKTKQKJKrKKKK~KK_K*KTKjKPKLKRK}KK`K(KPKKKKsK6KLKQKKK7K"K)K(K%K&K%K$K"K#K%KFKOKJK<K)K)K!KK"K-K1K0K/K,K2KKKNKKK;K,K+K+K*K*K*K#KKKKKEKMKHK4K-KYK]K1K'K-K,K'K,KBKVKMKJKEKpKKKKKKKKKKKK|KOKJK<K$K,K7K?KCKCKDKEKFKFKFKFKDKBKEKGKBK7K3K]K_KQKHKCK9K5K7K=KEKFKDKGKFKEK2K/KEKaK}KKKKKKKKKOKEKGKEKDKDK?K$KIKPKOKKKGKBKJKNK_KKKKKKKyKKKKuKKKKKKKxKLKPKKKKKKKaKKKJKMKMKNKMKJKJKGKDKBKCKIKLKMKLKKKKKLKKKKKKKKKKKKKKKKKLKKKLKKKKKLKKKKKLKKKIKIKIKKKKKKKJKHKIKHKJKKKKKJKJKJKJKJKJKLKMKNKLKHKJKSKiKKKKKKKKKKKKKKKKKKKKKKKK~KKK|KqKxKvKhKeKgK]K\KVKPKMK5KK0KWKcKhKkKkKnKoKoKoKoKoKnKoKnKoKnKkKmKnKlKlKnKnKkKiKgKhKiKgKfKeKcKbKdKeKeKeKdKdKbK`K_K`K_KaK_K^K_KZKZK`KXKYK^K[KaKuKyKyKKKKKKKKKKKKKKKKKKKK|e]ra��(K)K'K(K'K)K%K"KK K/K0K1K:KCKBKAKAKCKDKDKDKDKCKFKHKGKGKFKDKDKCKEKGKDKDKCKDKAK?K;KHKMK%KDK>K8KIK~KKKKK|KKK-K2K:K9K9K;K?K2K-KEKDKBK>K2K8KDKCKDK<K9K?K4KKKKKKKKK:KMK>K/K K"KKKKKK/KTKIK9KKKKK!K K!KK7KUKHK;K!K K!K K K K!K#K0KSKNKAKXKKIK-KeKKKKKxKSKRKJKrKKKK~KKbK*KTKkKOKLKQKzKKcK+KNKK~KKTK'KMKRKMK5K K&K&K$K#K"K"K$K(K*KEKOKKK>K!KKK+K0K2K.K/K/K.K2KIKNKJK<K/K.K-K*K*K#K K!K!K KK@KNKKK3KK.K\K5K"K*K-K/K,K(K3KKKJKDKkKKKKKKKKKKKKKTKIK@K&K*K5K>KEKDKDKDKBKDKDKCKDKCKCKHKCKFKrKKKuKgKZKPKGK>K;K;K=KAKGKGKFK7K(K#K&K3KJKgK~KKKKKK\KCKGKFKGKDKFK&K@KPKOKMKIKCKIKQKQKKKKKKKLKKKKbKKKK[KKKKUKKKKKKKKKTK>K3KDKGKJKLKKKMKKK9K>KEKCKEKJKJKKKLKKKKKLKLKKKKKLKKKKKLKKKKKKKLKKKKKKKLKLKLKLKLKKKHKHKIKHKIKHKHKHKJKLKLKLKLKLKKKKKLKLKLKOKMKGKGKNK_K|KKKKKKKKKKKKKKKKKKKKKKKKsKsKyKmK`KeK_K]KVKPKOK2KK?KZKbKfKjKlKnKoKqKqKqKpKnKoKnKlKkKlKkKkKkKkKnKnKiKhKjKgKfKgKeKgKhKcKbKfKdKhKcKdKfKaK`K_K_K`K^KXK_K^K\KbK[KWK^K\KYKbKcKnK{KKxK|KKKKKKKKKKKKKKKK~e]rb��(K)K*K+K*K(K%KKK K)K1K5K4K:K>KAKCKBKBKBKBKCKCKEKEKEKFKGKDKDKFKEKEKDKDKDKCKDKCK>K;K7K,KKK<K9KNKKKKKKwKFK-K4K<K;K;K;K=K-K0KHKAK@K=K2K;KEKCKDK;K:K?K1KKKKKKKKK?KJK<K/K&K'KKKKKK0KSKHK7KKKKK K!K!KK8KSKIK;K"K"K"K"K#K#K$K!K0KRKOKAKXKKHK-KdKKKKKxKSKQKIKrKKKKKKdK+KSKmKPKMKPK{KKiK+KHK~KKxK7K&KJKRKIK5K!K%K&K$K"K&K'K*K,K*KBKPKLK=KK'K/K0K/K0K/K1K1K2K3KGKNKKK<K/K/K-K+K%K"K"K"K#KKK<KLKIK8KKK2KDK%K'K*K.K,K-K3KIKLKIKHK]KyKKKKKKKKKKKXKGKDK(K+K7K>KBKDKCKFK1K3KBKCKCKCKCKIKIKDKzKKKKKKrKcKYKNKGKAK=KDKFKFK7K;K3K.K'K"K+K8KQKmKKKKiKDKGKFKGKCKGK-K4KQKOKMKJKDKCKPKLKKKKKKWKAKKKKhKKKWK5KUK\KsKXKvKKKKKKKKcKDK%K"KxKKVKLKJKJK7KBKKKGKBKBKDKGKJKLKLKKKKKKKKKLKKKKKLKKKKKLKKKLKKKKKKKKKKKKKLKKKJKJKHKIKHKJKJKHKIKJKJKJKJKJKLKKKJKIKHKIKJKKKMKLKKKHKJKSKkKKKKKKKKKKKKKKKKKKKyKKzKqKxKtKaKdK_KZKVKPKQK/KKKK[KcKfKiKlKnKoKoKoKoKoKnKoKnKkKkKlKlKlKlKnKoKmKkKiKhKfKeKgKhKgKgKdK`KcKhKgKdKbKcK_K]KaK_K[K[KXK`KaK\KbK]KVK^K]KWKWKVK_KqKtKrKjKiKqK|KgKKKKKKKKKKKKe]rc��(K+K+K+K)K$KKK K&KKK3K7K7K8KBKDKBKAKAKBKCKCKCKCKCKFKGKDKEKHKEKCKDKDKCKDKCKDKAK=K>K>KIK9K7KVKKKKKKpKAK.K6K>K=K=K;K;K*K3KGKAK?K=K1K=KFKDKCK;K;K?K/KKKKKKKKKAKKK;K.K(K)KKKKKK1KQKGK8KKKKK K!K!KK9KRKIK;K"K#K$K$K$K&K&K%K1KQKNKAKXKKHK-KdKKKKKxKTKQKIKrKKKKKKfK+KOKmKRKMKPK{KKhK*KHK|KKQK$K%KHKRKKK;K$K&K&K&K'K)K)K(K)K&K=KQKLKBK1K2K0K0K0K0K0K3K2K1K3KEKNKKK@K1K2K-K!K!K)K$K$K!KKK2KNKJK=KK'KK'K,K&K*K*K*K-K/KFKMKLKBK3K:KOKkKKKKKKKKKaKEKHK+K)K5K=K@KDKCK3KKK9KBKCKCKBKHKIKBKnKKKKKKKK}KpK`KVKNKFKDKDK9K;K=K6K3K3K1K*K&K*K:KSKsKsKHKFKGKGKDKFK5K*KQKPKMKJKGKBKNKKKuKKKKKAKBKKKKKKK4K2KLKTKPKNK]KKKKKKKKtKNK1KBKKK{KnKcKQKDKIKLKLKKKGKGKEKDKGKFKFKLKLKKKKKKKKKKKLKLKKKLKKKKKKKKKKKKKKKKKKKLKJKHKHKHKKKKKHKHKHKHKHKHKIKLKJKHKHKIKHKIKLKLKLKKKKKNKLKHKEKMK_KKKKKKKKKKKKKKKKKKKmKmKzKdKcKbKXKVKOKPK,K(KPK]KcKfKiKlKoKoKnKnKnKnKnKoKnKlKlKlKlKkKlKpKpKkKhKhKfKgKgKhKjKfKfKeK`KbKfKgKeKaKbK^K\KcK_KZK[KZK^KeK^KaKbKYK]K^KXKYKWK[KhKyKpKkKjKhKhKYKsKKKKKKKKKKKe]rd��(K*K,K*K&K!KKK#K!KKKK/K8K8K;KAKCKAKBKAKCKEKDKEKGKEKDKFKFKDKDKDKDKDKCKCKCKDKDKBK=K>K:K6K7K]KK~KKKKkK>K.K6K;K:K=K<K>K*K8KGKAK?K<K0K@KDKDKCK;K=K?K,KKKKKKKKKBKKK;K/K(K'KKKKKK4KRKFK5KKKKKKK KK;KTKHK<K%K%K%K$K'K(K(K'K4KTKPKDKZKKHK-KeKKKKKwKTKQKIKsKKKKKKiK*KJKlKQKNKNKzKKkK-KFK~KoK+K K$KGKRKMK<K"K%K&K'K(K)K&K$K KK6KPKLKEK2K.K0K/K.K1K2K2K3K4K3KEKNKKKBK3K*K!K$K)K'K'K#KKKK1KNKJK@KK'KK K'K'K(K(K)K,K,KAKJKIKEK7K6K6K5KEK]KvKKKKKKlKHKJK/K&K2K;KAKBKDK8K!K(K8KAKDKDKBKGKJKBKfKKKKKKKKKKKzKnK\KFKGK?K7K:K5K5K:K;K;K6K0K*K%K(K;KHKDKDKDKDKDK=K%KJKPKPKMKIKAKIKMK^KKKKQKBKBKbKKKKdK]K2K1K?KNKNKPKPKKKKKKKKKTK8KKKKKrKwKvKjKPKJKNKLKKKLKKKIKFKDKAKEKHKKKMKLKKKKKKKLKLKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKIKIKKKJKHKJKHKHKIKIKIKIKIKIKIKIKIKIKIKIKHKIKIKIKKKLKJKFKGKWKqKKKKKKKKKKKKKKKKKKgKXK^KWKVKTKNK-K6KPKZKbKfKiKlKnKnKnKnKnKpKpKnKoKmKjKjKjKlKoKpKpKkKfKfKfKiKjKiKiKgKeKeKaKcKeKiKeK^KaK]K]KdK_KZKZKZK[KcKcKcKcK\KZK]KYKYKZK\K_KbKaK^K\K^KPKVK\KyKKKKKKKKKKe]re��(K+K+K'K#KKK!K#KKKKKK0K6K6K8KAKBKAKAKCKEKCKEKGKDKDKGKFKCKDKDKDKDKCKCKCKCKCKDKAK=K;K;K:KeKKKKKKhK:K,K6K:K:K=K<K>K(K;KGKAK?K;K0K@KDKDKBK;K=K>K+KKKKKKKKKFKHK;K.K(K'KKK KKK6KRKFK2KKKKKKK K K=KRKHK;K&K&K%K%K(K)K)K'K4KTKPKDK[KKHK-KeKKKKKwKTKQKIKsKKKKKKkK*KIKmKQKOKNKzKKqK-KCKvKAK$K(K$KEKRKMK<K&K'K&K&K$K!KKKK"K<KNKLKEK0K.K0K1K2K2K2K2K3K5K3KDKNKJKAK'KK&K-K*K)K%KKKKK)KLKHKCKK"KKKK%K%K)K(K*K)K<KJKHKFK4K2K7K8K4K4K<KOKhKKKKrKIKJK6K#K.K7K>KAKBKEKBK>K@KAKDKDKBKGKJKBKYKKKKKKKKKKKKKKOKFKCK?KIK>K8K5K6K=KCKBKAK=K/K(K?KEKCKDKCKCKDK&K@KPKQKNKJKAKGKQKPKKKK<KDKCKJKKKKTK7K4K-K8KKKJKPKKKKKKKKKKK_KQKKKKK@KWKbKmKUKEKEKJKNKJKKKMKKKJKFKCK@KBKGKJKLKMKLKKKKKKKLKKKKKLKKKKKKKKKKKKKLKLKLKLKIKIKLKJKHKJKHKHKIKHKHKIKHKHKHKHKHKHKHKHKIKHKHKHKIKLKKKKKKKIKHKHKOKcKKKKKKKKKKKKKKKKKKhKVKSKJK0K8KRKZKaKfKiKlKkKjKjKkKnKqKpKnKoKmKjKiKiKlKpKqKoKkKfKgKfKiKjKjKiKgKdKdKdKgKfKhKfK^K^K`K[KaKaKYKXK[KZK`KdKdKcK^K]K^KYKYKZK]K^K[KWKUKUK[KRKTKZKcKKKKKKKKKKe]rf��(K,K(K$KKKK#KKKKKK KK.K6K8K:KDKDKCKAKAKAKDKDKDKDKDKDKDKDKDKDKDKCKCKCKDKDKBKBK?K:K:K:KcKKKKKKbK7K+K6K:K:K=K<K;K%K=KEKCK?K<K0KAKDKDKBK8K;K>K*KKKKKKKKKGKGK9K-K(K'KKK KKK9KRKCK0KKKKK!K K"K"K@KSKIK:K'K&K%K'K)K(K*K)K3KOKMKDK[KKHK.KdKKKKKwKTKQKIKsKKKKKKmK+KIKpKSKOKMKzKKrK/KAKMK(K(K$K$KCKPKKK=K(K&K$KKKKK$K+K,K;KOKLKEK1K1K3K2K5K4K3K3K4K5K5KCKMKLKBK"K&K'K"K"K KKKKKK*KOKIKIKKKKKKKK&K&K'K'K9KJKGKEK6K1K4K5K8K8K9K8K:KEKZKvK{KJKIK<K#K-K7K=K@KCKCKDKDKCKDKGKFKCKFKHKEKQKKKKKKKKKKKKKK]KDKCKDKaKWKJKBK>K:K;K;KBKJKLKCKCKGKGKGKDKCKGK-K6KQKMKMKIKDKEKQKJKKKOK?KCKEK=KKKKrK9K2K.K1KFKLKMKLKgKKKKKKKKoKVKKKKK;KCKbKmK\KKK5K&K8KUKQKKKKKMKLKJK7K=KBKAKBKFKLKLKLKLKKKLKLKKKKKKKKKKKKKKKLKLKLKKKLKKKIKIKHKIKHKHKHKHKHKIKHKHKHKHKIKIKIKIKHKIKIKIKIKIKHKHKIKIKJKKKLKIKHKJKYKxKKKKKKKKKKKKKKKKKjK6K4KKKYKaKhKjKjKjKfKgKkKmKqKmKmKoKmKjKiKjKkKpKqKmKiKfKgKeKiKjKiKjKfK`KaKfKkKgKfKgK]KZKaK^K]KaK[KVK[K]K`KdKeKeKaK\K\KXK\KYK]K[KUKUKVKWKPKPKWK\KZK}KKKKKKKKKe]rg��(K-K&K!KKK&K KKKKKKK K&K6K5K6K:KCKDKCK@KBKDKDKDKDKCKCKCKDKEKDKDKCKCKDKCKCKBKAKAKAK1K,KDKnKK~KKK\K5K+K6K:K;K=K;K:K*K?KDKBK?K;K:KCKDKDKAK8K>K=K*KKKKKKKKKHKGK9K-K(K'KKKKKK:KQKBK/KKKKK K!K#K#K@KRKHK:K'K&K'K(K)K)K)K)K3KOKNKCK[KKHK-KdKKKKKwKTKQKIKsKKKKKKoK,KHKpKRKNKMKyKKrK3K6K-K(K(K%K%KBKPKKK>K#KKKK K(K+K,K,K*K9KOKLKFK6K1K3K4K6K7K7K7K7K5K5KBKLKLKEKKKKKKKKKKKK!KJKIKIK%KKK +KKKKKK$K&K4KIKHKEK8K2K4K5K6K7K9K:K;K9K6K<KMKJKLKAK&K,K5K=K@KDKDKDKCKDKEKFKEKCKEKGKFKIK~KKKKKKKKKKKKKgKBKEKAK~KKpK`KRKJKFK?K;K9KCKLKKKEKEKFKDKCKGK6K,KPKMKNKJKEKBKMKIKwKK:KCKBKDK=K|KKKK=K3K0K1KBKIKLKOKTKKKKKKKKKMKKKKKKKKK{KSKAK%K1KKKRKOKJKJKHK3K;KJKHKEKCKCKCKFKKKLKKKKKLKKKKKKKKKKKLKKKKKKKLKLKKKHKHKIKHKIKHKIKIKIKHKIKHKHKHKHKHKHKHKHKHKHKHKHKHKIKIKHKIKHKIKIKIKMKLKIKGKGKPKiKKKKKKKKKKKKKKKKKKkK`K`KcKhKgKdKgKkKoKpKnKnKoKmKjKiKjKlKpKqKmKiKfKgKgKiKjKkKiKdK`KbKgKkKiKdKgK]KWK`K^K]KaK^KVKZK]K`KcKeKdKaK[K]KYK[KYK[K]KZKVKUKTKMKQKVK[K[K^KKKKKKKKKe]rh��(K+K$KKK"K%KKKKKKK KK4K1K2K2K5K8KAKDKDKDKCKCKDKCKBKBKAKCKDKDKDKCKCKDKCKBKBKBKBKBK>K6K8KDKmKKK~KVK4K+K6K;K>K=K;K9K7K@KFKBK?K=K?KCKDKDKAK;K>K;K&KKKKKKKKKIKGK:K-K(K%KKKKKK=KPK@K/KKKKKK!K%K%KAKQKEK9K'K&K*K)K(K)K'K'K4KPKOKAK[KKHK*KcKKKKKwKTKQKIKsKKKKKKqK,KEKpKQKPKJKvKKwK6K/K)K(K&K&K!K>KQKLK?KKK"K)K,K+K*K*K+K'K5KNKLKFK8K4K7K7K8K8K8K8K7K9K6K<KNKKKDK#KK KKKKK$K KKKKEKJKJK/KKKK&K"KKKKKK(KHKJKFK8K.K0K3K4K5K8K8K:K:K:K<K:KHKMKDK'K)K2K=K@KEKGKDKCKFKFKCKCKCKDKHKHKDKtKKKKKKKKKKKKKqKCKFK?KsKKKK{KkK]KSKKKGKBK<K@KDKCKCKCKCKEK?K&KJKOKOKLKFK?KIKNKRKEK@KBKBK@KAK]KKKKIK6K1K/K;KIKLKNKKKKKKKKKKKXKyKKKKKKxKKKbKKK/KfKKKeK^K[KRKKKBKCKKKIKIKIKGKBK?KCKFKGKIKKKLKLKLKLKKKLKJKHKJKLKLKKKIKHKIKIKIKHKIKLKKKHKIKHKHKHKHKHKHKHKHKHKHKHKHKIKHKHKIKHKIKHKFKGKIKHKIKJKJKKKGKDKJK^KwKKKKKKKKKKKKKKKKKKlK\K\KdKlKrKpKoKoKoKlKkKjKjKnKrKpKjKhKfKfKjKjKjKkKgKaKbKdKiKiKhKdKbK`KXK^K_K]KaK^KWKVK^K_KbKeKaK`K\K^K_K\KXKYK]K]KYKSKSKLKRKVKZK\KXKcK{KzK{KKKKKe]ri��(K&KKK K&KKKKKKK KK6K6K3K1K1K4K3K5KBKCKCKDKCKCKCKCKCKBKBKAKBKCKCKCKDKCKBKCKCKBKAKBKBK:K:KCKkKK|KPK1K,K7K<K=K=K<K;K6KAKDK@K?K;K?KCKCKCK@K:K<K:K%KKKKKKKKKJKFK8K,K*K&KKKKKK?KOK@K-KKKK K"K"K$K#KAKQKFK8K'K(K)K(K)K(K(K(K4KOKMKAK\KKHK-KcKKKKKwKTKQKIKsKKKKKKqK-KDKoKQKOKIKtKKzK7K*K*K(K#K#K K<KRKLKBK&K(K+K,K-K)K/K+K,K*K6KNKMKIK<K8K<K:K8K8K7K7K8K7K"K%KOKLKHK,KKK K&K(K,K(K"KKKKEKKKJK-K�K KK$K%K&K!KKKKKEKJKGK:K,K-K0K4K5K6K7K:K9K:K<K=KDKLKEK(K)K1K;K@KDKFKEKEKFKFKEKEKCKBKFKGK@KiKKKKKKKKKKKKK}KJKFK?KfKKKKKKKyKgK]KXKNKGKDKFKEKDKCKCKDK'K@KOKNKMKGK?KCKLKLKDK?K@KAKAKCKFKKKK]K7K2K-K4KGKIKMKJKzKKKKKKKKkKZKKKKKtKHKkKKtKOK@KKKK}KYK^K]K]K^KMKHKJKIKIKIKHKFKEKCKAKBKGKIKKKKKKKKKKKKKJKKKLKLKKKIKIKIKIKIKHKIKJKIKHKIKHKHKHKHKHKHKHKHKHKHKHKHKHKHKHKHKHKIKHKGKHKIKHKHKGKGKIKKKKKJKHKGKIKRKlKKKKKKKKKKKKKKKKKKzKnKmKnKmKpKoKmKkKgKkKpKrKnKgKdKhKhKkKkKiKhKaK`KbKdKhKlKjKcK]K_KYKYKaK_KbK`KZKTK\K`KaKgKdK_KZK^KdK\KXKWK[K`K^KTKSKMKNKYK[K[KXKVKbK~KkKKKKKe]rj��(K!KKK%K$KKKKKKKK.K:K4K4K0K*K)K0K.K2KBKDKCKDKDKDKDKCKBKBKBKBKBKCKDKDKCKBKDKDKBKBKBKAK?K=K9K?KmK~KJK-K.K8K=K<K<K=K=K:KCKCK?K?K;K@KCKCKDK=K8K;K8K%KKKKKKKK$KMKDK7K,K+K&KKKKKKAKNKAK+KKKK!K$K$K$K!KAKQKFK8K(K)K(K(K*K(K'K%K4KOKKKAK\KKHK/KdKKKKKwKTKQKIKsKKKKKKqK.KBKpKUKLKHKrKKtK4K(K$K%K%K"KK:KRKLKEK-K*K+K*K,KBKIK3K,K/K8KNKMKKKAK<K=K;K:K8K7K9K6KK +K%KNKNKJK0K'K(K,K-K+K,K*K'K$K"KK>KMKIK1KKKKKK"K%K'K&K K!KEKJKHK;K&K.K2K2K1K4K5K6K6K6K9K8K>KGKGK-K&K0K:K@KCKBKEKGKGKGKGKGKCKAKDKIKAK]KKKKKKKKKKKKKKSKDKAKZKKKKKKKKKKtKhK^KJKEKGKDKCKCKFK*K6KPKMKMKHKBKAKKKLKEK?K>K?KBKCK@KKKK{K;K1K/K0KBKHKKKLKaKKKKKKKKKOKKKKK{KiK[KkKwKMKeKKKKKoKdKZKYKaKQKDKFKHKKKJKJKJKJKGKGKDKBKAKBKFKLKMKLKLKLKLKKKLKKKHKHKHKHKHKIKHKHKHKIKHKHKHKHKHKHKHKHKHKHKHKHKHKHKHKHKHKHKHKIKIKIKHKIKIKIKIKIKHKHKIKIKLKLKIKFKFKLK`KKKKKKKKKKKKKKKKKKKwKnKiKiKjKjKmKrKrKkKcKfKjKjKlKlKgKbK^K^KcKhKlKkKjKbK[K`K]KZKbK`KcKaK[KWKVK_KbKeKfK`KXK]KdK_K[KUK[K`K_KTKQKMKLKYK\K_K]K`K_K}KzKpKKKKe]rk��(KKK#K%KKKKKKK +K%K8K5K4K0K/KKK!K)K(K1KAKCKBKBKBKDKCKAKBKBKBKBKBKBKBKBKBKBKAKBKAKBK@K?K=K8K7K?K^KGK-K-K9K=K=K>K=K:K7KBKBKAK=K;K?KCKDKFK<K8K=K8K#KKKKKKKK'KNKCK5K)K*K#KKKKKKCKNKAK*KKKK!K"K!K"K#KDKRKFK6K&K)K)K)K(K&K&K&K4KOKMKCK^KKJK-KeKKKKKyKTKQKIKsKKKKKKtK/K@KpKWKOKJKrKKZK%K'K(K&K"KKK;KRKMKDK-K+K)K2KOK`KXK<K.K2K:KKKMKJKCK=K=K;K9K8K:K7KK KK,KKKLKHK4K*K,K-K-K-K*K(K&K)KK K7KOKIKEK8K"KKKKKK!K'K(K&KAKKKJK=KKK&K-K1K4K3K4K3K5K5K4K;KFKHK5K$K.K8K>KBKCKDKDKDKDKDKDKDKAKEKIKCKUKKKKKKKKKKKKKK]KCKCKLKKKKKKKKKKKK~KaKEKGKFKFKCKFK4K.KOKLKMKIKFK@KHKMKHK?K?K?KAKAKSKKKKKBK5K1K-K=KFKHKMKQKKKKKbKzKrK^KKKKKKKKKKKrKWKbKKKKKKxKtKlKfK]KLK=K'K8KCKEKIKLKIKHKGKGKCK9KAK@KCKHKIKKKJKLKLKKKKKKKKKKKKKIKIKKKJKHKIKHKHKHKHKHKHKHKHKHKHKHKHKHKHKHKIKIKIKHKGKHKIKIKHKGKGKIKIKIKGKGKHKHKHKJKLKKKHKDKFKVKrKKKKKKKKKKKKKKKKKKKlKiKkKqKrKjKfKgKiKkKlKlKgK[K`KcKfKlKnKjKgKaKZK\KaKZK_KdKdK`K[K[KVK^KbKcKhKbK[KZKaKbK]KVK[K^KdKXKSKRKOKZKaKcKbKbK_KtKKhKKKKne]rl��(KKK%KKKKKKK KK7K4K4K3K1K'KKKKK%K*K3KAKBKAKBKCKCKAKBKBKBKAKAKAKAKBKBKAKAKBKAKBK@K?K?K<K8K3K6K=K-K-K9K=K<K;K;K9K;KCKAKAK<K;K?KCKCKCK9K8K<K4K"KKKKKKKK(KOKCK5K(K)K"KKKKKKEKNKAK*KKKK!K!K K!K$KEKRKFK6K%K(K)K)K(K%K&K'K4KOKNKCK^KKJK-KeKKKKKzKTKQKIKsKKKKKKuK0K?KoKWKOKIKsKyK5K(K*K#KKK$K&K<KRKNKEK.K*K6KSK_K]KQK7K6K9K@KNKMKJKCK=K=K;K9K:K5KKKK"K(KHKLKHK9K)K,K.K0K1K.K*K/K)KKK*KPKIKMK}KnKFK%KKKKKK K$K;KKKIKDK%KKKK"K*K1K1K3K5K5K4K;KGKIK;K$K,K8K>KAKDKCKCKCKCKCKCKDKCKDKFKEKMKKKKKKKKKKKKKKjKBKDKAK|KKKKKKKKKKKK~KIKFKGKFKDKDK=K&KJKMKMKJKGK>KDKNKJK?K>K>KBK?KKKKKKOK8K2K-K6KFKHKNKLKKKKRK=KHK;K>KLKhKKKKKKKKKqKKKKKKKKbKdK`KaKfKQKGK&KK?KKdKGKHKHKJKJKAK1K@KDK@K@K@KCKHKLKMKKKLKKKLKLKLKIKIKLKKKIKIKIKIKHKHKHKHKHKHKHKHKHKHKHKHKHKHKHKHKHKFKHKHKHKHKFKGKHKHKHKGKFKHKHKIKHKGKGKHKHKIKHKDKDKMKbKKKKKKKKKKKKKKKKKKK~KnKaKbKgKkKlKlKjKcKZK_KeKgKlKmKkKfKaK[KXKbK[K]KeKdK`K[K[KUKZKaKcKiKbK\KZKbKeK`KZK]K^KhK]KZKZKQKWKdKfKeKaK_KbKKiKvKKKle]rm��(KK$K$KKKKKKKK5K:K7K1K0K&K,KKKKKK'K/K7KAKDKCKDKCKAKBKBK@K>KBKAKAKBKBKCKDKBK@KBKAKBK?K?K=K:K7K8K0K,K7K:K:K<K<K8K=KCKAKAK<K;KBKDKCKDK6K4K;K4K KKKKKKKK-KPKCK5K(K)K"KKKKKKHKMKAK)KKK K K"K#K"K$KEKPKFK6K&K(K)K)K)K(K)K'K5KQKNKCK^KKKK+KcKKKKKyKRKNKJKsKKKKKKxK0K<KoKWKOKIKtKYK)K'K#K$K%K'K+K&K9KQKMKHK5K?KYKfKaKVKCK=KBKHKLKOKMKJKGK>K=K:K9K5KKKK%K"K'KFKMKJK<K*K.K0K0K0K4K3K4K,K)K>KVKKKJKHKzKKKGK0K(KKKKKK3KLKHKGK-K%K&K"KKKK(K1K5K7K5K9KGKJK>K+K/K7K>KBKDKCKDKDKCKCKCKDKCKCKGKFKFKyKKKKKKKKKKKKKwKEKHKAKpKKKKKKKKKKKKKRKCKDKDKCKDKCK#KDKOKNKLKHK@KCKOKLKBK>K?K?KSKKKKKKfK9K3K.K1KFKKKLKLKkKKK5K;KDKSKxKqKRKKKKKKKKKKKKKKKKKKKrKhKFKCKNK:K#KuKKKVKVKQKJKGKGK6KBKJKIKGKEKAK?KAKEKGKJKKKLKKKKKKKKKLKKKLKJKHKIKIKHKHKHKHKHKHKHKHKHKHKHKIKGKFKFKGKIKGKFKHKHKGKGKGKGKGKGKFKGKGKGKGKGKFKFKFKGKGKIKJKIKFKEKGKWKuKKKKKKKKKKKKKKKKKKKmKhKhKkKhKaK\K]KcKgKnKmKjKfKbK^KWKaK`KZKeKbK_K^K]KXKYK`KdKlKgK_K^KfKhKfK_KbK`KjKcK[K\KTKUKaKhKfKaK_KbKpKdK^KvKKte]rn��(K"K"KKKKKKK K,K>K;K:K2K(K'K>KKK%K&K$K K(K.K9KBKDKCKCKBKBK@K?K>KAKAKAKBKBKCKCKBK@KBKBKBK@K?K;K?K9K3K3K,K6K9K;K=K<K9K=KCKBKAK;K?KAKCKDKEK5K/K;K;K$KKKKKKKK/KOKBK4K(K)K!KKKKKKHKLK@K)KKK K K"K$K"K$KFKPKEK5K&K(K)K)K)K(K)K'K5KQKNKCK^KKKK+KbKKKKKyKRKNKIKrKKKKKKxK1K<KpKVKNKJKaK:K)K(K)K'K&K&K*K&K7KOKMKHK[KqKlK_KNKCKEKKKLKHKGKNKMKJKGK>K;K:K6KKK%K&K$K$K,KFKMKKKAK/K0K/K/K0K4K8K2K1K8KKwKHKKKFKvKKKEK$KSK\K4KKKK(KLKIKFK2K,K-K*K&K$KKKK#K.K3K7KEKJKCK8K8K8K=KAKDKDKBKCKDKDKDKDKBKDKGKGKCKoKKKKKKKKKKKKKKJKFK>KdKKKKKKKKKKKKK[KAKDKCKCKCKDK9KCKMKMKLKIKCKAKJKKKDK=KAK?KKKKKKKK=K6K0K-K@KIKHKJKUKK]K2K:KvKKKKPKKKKKKKKKKZK`KKKKKKKKKwKIKLKDK7KKKKcKVK[K^K[KOKFKHKHKGKGKHKFKDKBK@K@KCKHKJKLKMKLKKKKKKKLKJKIKIKHKIKIKHKHKHKHKHKHKHKHKHKIKGKGKFKGKHKGKFKHKHKGKFKGKFKFKFKGKGKFKGKFKDKFKGKGKGKFKGKHKGKGKIKHKFKCKBKNKdKKKKKKKKKKKKKKKKKKKzKdKWKZK^KfKkKqKnKkKfKbK_KWK`KeKZKaKfK`K^K^K\K]KaKdKnKnKcK`KfKlKkKdKbK`KkKeK^K^KUKUK\KeKfK`K\K`K]KaKzKKKe]ro��(K%KKKKKKK K K7K9K=K;K1K!K<K=K KK*K,K.K+K"K'K/K4KAKBKBKDKCK@K>K>K>K@KBKBKBKBKBKBKBKBKBKAKBKBK:K>K=K9K1K%K4K9K<K=K;K:K=KCKCK>K:K7K>KCKDKDK2K.K?KGK@K1KKKKKKK2KNK?K1K)K)KKKKKKKGKJK>K'KKKK K#K$K!K%KIKOKCK4K'K)K)K(K)K)K)K'K3KOKMKCK^KKJK,KbKKKKKyKQKPKGKqKKKKKK|K2K9KoKVKKKJKBK.K+K(K$K$K$K'K)K'K8KQKMKHKeKeKQKDK?KBK@K:K5K8K:KJKNKJKGK>K:K9K#KK K$K#K%K)K,KEKMKJKCK1K0K0K0K3K6K8K5K3K?KuK{KJKJKEKqKKKQKKUKKKhKGK%K'KJKIKIK2K)K.K+K,K+K&K%K"KKK K-KCKJKFK:K8K7K9K=K?KBKAKCKDKGKFKCKCKDKFKHKCKdKKKKKKKKKKKKKKSKAKAKTKKKKKKKKKKKKKjKBKDKDKCKDKCKGKEKKKLKLKJKGK?KHKKKHK@K@KQKKKKKKKKFK5K2K-K:KFK@K9KMKbK?K8K5KoKKKK[KhKKKKKKKKKoKLKKKKKKKKKK{KQKDKhKKKKK`K[KYKiKtKbKJKJKJKIKGKFKHKJKGKEKCKAK?KBKIKJKIKHKIKLKKKLKIKHKHKHKIKHKHKHKHKHKHKHKHKHKIKHKGKFKFKFKGKFKFKFKFKFKFKGKFKGKGKDKGKGKGKGKFKFKFKGKFKFKHKEKFKGKGKHKHKHKDKCKHKXKzKKKKKKKKKKKKKKKKKKKiKfKjKnKnKlKhKbK`K[K`KgK]K_KiKdKaK_KaKaKdKiKnKpKeKbKhKlKlKhKbK^KfKfK`K^KYKYK[K]KdK_K_K_K\KKKKKe]rp��(K"KKKKKK KK9K9K;K<K9K%K3KEK7K#K!K.K0K2K5K-K K'K-K1K?KCKBKDKDKAK@K?K?K@KAKBKBKBKBKBKBKBKAKAKAK;K>K=K=K3K"K4K:K;K=K;K%K7KEKBK?K:K,K<KCKCKAK0K-K@KGKGKHKAK+KKKKK4KOK?K3K'K(KKKKKKKGKIK>K(KKKK!K!K$K!K&KIKNKCK4K'K'K'K)K(K'K(K'K3KOKMKCK^KKJK,KbKKKKKzKRKOKHKrKKKKKK}K3K6KnKXKNKKK9K,K+K)K'K(K*K+K,K.K=KOKMKIKNK:K KKKKKKK'K/KFKPKKKFK<K:K&KKK"K"K#K%K)K*KCKMKJKCK2K0K0K4K8K9K=K;K6K=KqK~KNKJKFKkKKK]K KLKKKKKwKXKIKIKIK2KK!K(K,K0K-K,K*K(K$K KK7KJKGK>K5K4K5K7K9K<K@KCKDKEKEKCKEKCKFKGKCKJKlKKKKKKKKKKKKK_K@KDKIKKKKKKKKKKKKKyKGKCKDKCKCKDKDKAKJKLKMKKKGK?KGKKKJKDK;KKKKKKKKKWK6K1K-K7K:K-K%KCKEK8K9K3KTKKKKmKVKKKKKKKKKKLKKKKKKKKKKyKQKFKKKKKKaKgKcK[KaKoKNKCKDKEKJKHKHKHKHKHKHKGKCK?K@K@KCKGKIKKKLKKKIKIKJKJKHKIKHKHKHKHKHKHKHKHKIKHKFKGKHKGKFKGKHKFKGKHKGKFKFKGKGKFKGKFKFKFKFKFKFKFKGKGKGKFKFKGKGKGKGKGKGKIKHKFKCKEKPKkKKKKKKKKKKKKKKKKKKKKoKfKdK^K`KcK_KgKdK_KiKhKbK`KeKeKhKjKmKrKjKdKjKjKkKkKdK[KaKfK^K^K^K[KZKZKaKcK`K`K_KlKKwKKe]rq��(KKKKKKKK/K=K=K:K;K,K,KDK;K7K8K6K0K.K2K3K0K&KK%K(K/K@KBKBKDKBKAK?K?K>K@KBKBKBKBKBKBKBKAKAK@K;K=K=K;K8K0K1K:K:K=K;K!K:KCKBKAK9K1KAKCKAKCKDK?K=KDKHKGKIKKK=K%KKK7KOK>K1K&K'KKKKKKKKKHK>K&KKKK!K K$K"K'KHKNKCK4K'K&K&K)K'K%K(K'K3KPKMKCK^KKKK,KbKKKKKzKSKNKJKsKKKKKK~K6K5KnKYKPKLK:K)K+K+K,K)K"KKKK5KMKLKHKZK^KjKZK#K"KKKKKK?KQKLKEK<K$KKKK"K$K%K)K)K(KCKNKJKDK8KXKZK4K3K:K:K6KBK9KhKKPKKKDKdKKKhK!KDKKKKKKKVKGKHK:KKKK!K)K.K.K.K.K.K+K(K8KHKHKCK1K0K7K5K6K7K9K<K>K?KCKDKDKCKGKFKDKAKDKNKbKwKKKKKKKKKKnKBKDKAKyKKKKKKKKKKKKKMKBKDKDKCKCKDKDKGKMKNKLKGK@KCKLKMKCKNKKKKKKKKKrK;K2K.K/K5K$K"K7KNK<K8K5KDKKKKKNKKKKhKKKKKKXKeKKKKKK}KKwKgK^KHKkKKKKKDKNKeKiKeK_KQKHK<K$K1KCKCKDKIKIKIKIKFKDKDKCK@K?K?KFKHKHKIKIKLKJKHKIKHKHKHKHKHKHKHKHKIKHKFKGKIKGKFKHKIKFKGKIKGKFKGKFKFKGKFKFKFKFKFKFKFKFKFKFKFKGKGKFKFKFKFKGKGKGKGKGKIKJKGKEKAKIK\KKKKKKKKKKKKKKKKKKKKnK`KbK`KgKmKfKjKlKeKaKbKiKjKiKhKqKlKfKlKgKdKeKhK^K]KbK]KbKcKaKXKZK`KgKcK[K\K`KyKwKKe]rr��(KKKKK K +K(K;K<K=K<K/K%K?K=K0K=KAKBK:K3K1K,K&K!KKK$K(K/K?KCKBKBKAKAK@K?K?K?K?K?KBKBKBKBKAKAK=K=K@K8K1K=K6K0K:K:K;K9K$K?KBK?K>K:K0KCKBKAKBKFKFKCKDK>KBKHKIKKKHK7KK7KMK<K.K%K'KKKKKK KJKHK;K"KKKK!K"K$K!K'KKKOKCK3K$K&K&K)K'K%K'K&K3KOKLKEK^KKKK,KbKKKKKyKRKNKGKrKKKKKKK9K6KmKZKOKLK<K*K+K(K$KK$KRK=KFKOKMKMKNK8K.K`KkK0K)K'K+K'KKK:KPKKKGK)K KKK"K$K&K&K+K)K(K=KNKKKKKnKKKKgK`KnKKKQK[KKTKLKFK^KKKsK&K;K}KKKKKK[KFKIKDK5K&KKK!K%K)K1K4K4K0K0K8KHKHKDK+KK(K1K7K7K6K8K:K;K=KAKBKCKDKEKDKBKEKDKBKHKXKkKKKKKKKK|KFKDK=KlKKKKKKKKKKKKKWKAKDKCKDKCKDKGKGKJKMKMKIKDKBKMKOKBK~KKKKKKKKKKAK3K/K+K<KAK6K7KLK?K7K8K7KuKKKKVKwKKK;KuKKKKKgKPKKKKKrKwKKKKTKHKTKKKKK[KWKZKSKcKjKaKLKGK&KK+KKsKFKHKGKGKGKGKHKFK:K=K@K>K@KBKFKIKIKIKHKIKHKHKIKHKHKHKHKHKIKHKHKHKGKGKFKHKHKGKGKGKGKFKGKGKGKFKFKFKFKFKFKFKFKFKGKGKGKFKGKGKFKGKGKEKDKDKEKGKEKDKFKHKHKFKBKCKDKTKqKKKKKKKKKKKKKKKKKKKKkKjKiKgKnKkKdKaKfKkKhKdKmKlKeKhKgKeKdKgKgK_KhK_KbKcKfKYKXK_KfKcK_KYK\KgKoKKe]rs��(KKKKK +KK<K<K=K=K7K!K7KEK4K.KCKAKAKAK>K;K2K+K%KKKK&K(K/K?KBKAKAKBKAK?K?K?K>K?KAKBKBKBKBK@K<K>K@K8K5K<K3K1K;K9K;K5K%K@KBK?K>K9K1KBKAKBKBKDKCKFKDK;K?K?KGKHKJKMKBKCKIK;K/K%K'KKKKKK!KMKIK:K"KKKK!K#K$K K(KLKNKCK2K$K&K&K(K'K&K%K K4KOKLKFK^KKJK,KbKKKKKyKRKOKIKpKKKKKKK9K5KlK[KNKLK<K*K'K"K#K$K0KKKIK6K/KLKOKNK?K>KAK4K>KpKKtK<K&KK7KRKNKHKK KKK$K$K(K,K,K+K.KJKNKKKPKpK_KKKKKKKK@KVKK\KLKHKXKKKzK-K4KwKKKKKKbKEKIKAKJKqKQK2K!KK"K&K+K4K8K3K5KCKGKDK5K(K$K!K&K-K5K;K:K8K8K=K?K@KBKEKDKBKCKCKDKEKCKCKKK^KtKKKKKKLKDK=K]KKKKKKKKKKKKKeK@KDKDKCKDKCKBKDKLKMKMKIKEKBKMKMKTKKKKKKKKKKKMK7K3K*K5KHKHKCKMKDK6K9K1K^KKKKeKZKKlK/KaKKKKKnKKKKKKKKKKKdKKKcKJKKKKKKkKjKMKMKSKYKNKJK7K"KNKKK]KVKSKJKFKHKJKGK3K4KGKCKCK@K?KBKCKHKIKHKIKIKHKHKIKHKIKHKHKHKIKHKFKGKFKHKHKGKFKFKFKGKFKFKFKFKFKFKFKFKFKFKFKGKFKFKFKFKFKFKGKFKFKDKCKCKDKGKEKCKFKFKFKFKFKIKHKGKDKCKJKcKKKKKKKKKKKKKKKKKKKKuKmKeKdKgKdKiKkKeKjKlKiKdKgKgKiKeKhKaKfKcK]KbKgKdKYK]KeKaK^K_K[KaKdKmKye]rt��(KKK K +KK5K<K<K<K;K%K/KBK<K(K3KBKDKEKCKAK=K5K1K.K%KKKK(K*K.K<K@K>K@KCKBKAKAK?K>K>KAKBKBKBK@K>K>K=K;K;K=K4K5K:K:K?K5K&KBKAK?K?K6K1KAKCKCKCKEK9K>KCK?KHKFKCKDKHKKKKKKKIK?K/K%K'KKKKKK$KNKHK>K#KKKK!K"K$K#K*KLKMKCK2K$K&K%K%K'K%K(K'K5KNKLKDK^KKKK)K`KKKKKyKQKPKIKqKKKKKKK;K3KkK^KLKKK9K%K(K+K,K,K.K'K%K!K%KMKPKKK[K~KKKKKKKKKVK;KPKNKHKKKKK$K*K'K'K*K,K4K^KOKKKOKKfKDKKKKKKK*KTKK`KJKJKSKKKK6K.KqKKKKKKjKFKJKAKEKKKK`K?K'KK"K*K2K9K;KEKJKEK8K1K0K+K&K K!K)K1K6K8K;K=K<K@KFKDKBKAKCKDKDKDKDKCKCKEKQKiK~KKKWKAKAKQKKKKKKKKKKKKKuKCKCKCKCKCKCKGKDKKKNKMKIKFKBKIKLKXKKKKKKKKKKK]K;K2K,K0KEKGKEKLKIK7K8K6KMKKKKvKPKKGK4KJKKKKKKSKoKKKKKKKKwKtKzKKKmKKKKK~KxKOKRKNKLKKKJKDK&KKKKxKTKeKeK\KNKDKHK>K8KFKFKFKGKDK@K?K?K@KDKGKIKGKHKHKHKHKIKHKHKIKHKFKFKFKIKHKFKFKFKFKGKFKFKFKFKFKFKFKFKFKFKFKGKEKCKFKFKCKDKGKEKCKDKDKDKDKDKDKDKDKDKDKDKDKDKCKFKHKIKHKCK?KDKYKzKKKKKKKKKKKKKKKKKKKKoKeKeKgKiKnKpKlKcKeKhKjKgKcKgKbKhK^KaKaKgK`K_KgKeKcK_K[KXKMK?K0e]ru��(KKK K K,K=K<K;K?K-K,KCK;K)K,K6K;KAKAK@KBKBK:K4K1K)KKKKK+K+K-K;K@K@KBKBKBKBK@K?K?KAKBKBKBK@K<K?K?K>K=K>K5K6K;K;K>K1K(KCKAK?K>K6K2KBKCKCKCKEKDKFKAKAKJKNKQKJKBKEKIKCKFKMKDK3K&KKKKKK%KMKGK=K"KKKK!K"K$K"K+KLKMKCK2K%K&K%K%K&K%K'K'K6KNKLKEK_KKKK)K`KKKKKyKQKPKFKoKKKKKKK>K1KkK_KLKLK=K+K/K0K0K.K*K&K"KK%KJKOKGKKKKKKKKKKKKKLKNKIKKKKK K-K0K=K5K-K4KOKQKKKLKKKKKHKKKKKK!KMKKfKIKKKNKKKK@K*KhKKKKKKqKJKMKEKAKyKKKKKqKKK.K!K&K*K3KDKHKFK>K7K3K2K1K-K$K#K!K%K0K6K;K:K>KEKEKCKAKAKCKDKDKDKDKFKGKDKDKKK[KsK]KCKCKEKKKKKKKKKKKKKKJKBKDKDKCKCKGK?KHKOKMKJKGKCKFKNKLKKKKKKKKKKK|K<K0K/K,KAKHKFKJKNK:K5K8K<KKKKKVKDK9K9K:KuKK|KKKeKTKKKKKKKKKKKVKSKKKKKKKWKOKOKNKMKKKHKBKKKKKSKUK]KmKpKaKQKFKFKHKGKGKGKEKGKCKDKCK@K?K@KEKIKIKIKIKHKHKHKIKHKGKGKGKGKIKGKGKGKGKFKGKGKFKFKFKFKFKFKFKFKFKGKEKDKFKFKDKEKGKEKCKDKCKCKDKCKCKDKCKCKCKCKCKCKCKEKFKDKDKEKFKEKDKCKCKOKjKKKKKKKKKKKKKKKKKKKK}KmKgKkKmKhKfKiKjKiKbKeKbKgKbKdKdKcKgK\KTKHK8K)KKKK +Ke]rv��(KKK +K"K<K<K<K>K4K#KAKCK+K"K1K7K7K8K9K8K2K.K/K0K3K3K(K#K#KKK+K-K0K=KBKAKAKBKBKBKBKBKBKBKBKBK@K9K@KCKBKAK=K5K6K<K<K=K,K*KCKAK@K>K5K4KDKCKCKCKDKJKJK@KAKKKOKQKRKPKIKBK7KDKNKLKKK?K KKKKK(KOKFK;K"KKKK!K"K$K!K,KMKLKEK2K%K&K&K&K%K%K'K&K8KPKLKFKaKKJK)K`KKKKKyKRKOKHKoKKKKKKK?K.KjK`KLKOK?K.K/K/K.K(K$K"KK K,KIKMKIKsKKKKKKKKKKKKOKNKMKKKKKK&K3KKK\K,KFKNKNKIKKKK6KkKKKKnK#KEKKkKIKIKJK~KKKIK&K_KKKKKKyKKKJKFK=KrKKKKKKKxKXK3K'K$K<KFKFK@K;K7K5K5K2K2K2K.K*K$K#K*K5K=KEKGKFKAK>KBKBKBKDKDKCKEKGKGKFKEKGKHKDKDK@KxKKKKKKKKKKKKKUKCKFKFKDKDKCKDKGKNKNKKKIKEKCKOKJKKKKKKKKKKKKCK4K/K*K8KFKFKHKNK?K5K7K5KlKKKKcKFK<K:K6KYKKKKKyKJKKKKKKKKKKKbKGKKKKKKKfKLKOKNKKKLKEKiKKKKKtK`KXKSK^KlKtKgKKKCKGKIKIKFKFKFKFKGKEKCK@K@K@KBKHKJKIKIKHKHKHKIKIKIKIKHKIKIKIKHKFKFKFKFKFKFKFKFKFKFKFKFKFKGKGKGKFKGKGKGKEKCKEKBKCKDKCKDKCKDKCKCKDKDKDKDKCKCKCKCKCKCKCKFKHKHKGKCKBKKK]KKKKKKKKKKKKKKKKKKKKKuKjK`KdKkKnKgKbKgKgKdKWKHK5K(KKK +KKKKKKKe]rw��(KK KK9K:K;K;K9K K5KEK0K%K3K4K4K7K9K:K4K%KKKK%K(K/K+K&K!KK K/K0K3K?KAKAKBKBKBKBKAK?KBKBKBK@K:KAKAK4K3K9K5K8K<K<K=K*K/KDKAK?K?K4K5KCKCKCKCKEKGKHK@KBKMKPKQKPKQKRKNKEKDKFKKKLKMKGK/KKKK*KPKFK9K KKK K K"K$K"K,KKKJKCK/K$K&K%K%K&K%K(K$K5KOKMKEKaKKKK)K`KKKKKzKQKOKHKnKKKKKKK@K.KjK`KLKNK@K0K3K0K'K%K&K)K+K+K0KIKOKKKUKwKKKKKKKKKKKTKMKMKMK%KK!K"K#K%KRKKK8KEKNKMKGKKKKGKIKKKKmK)K?KKsKGKJKFKxKKKTK$KZKKKKKKKMKIKHK=KjKKKKKKKKKNK1K/K9KIKGKBKDKGK=K7K8K6K5K3K3K1K,K&K$K(K<KFKDK@K=K@K?K?KAKBKEKEKEKFKGKGKGKFKDKEK?K^KKKKKKKKKKKKKcKAKEKDKDKDKCKGKGKMKNKMKIKDKAKJKKKnKKKKKKKKKKKNK6K0K*K0KEKFKHKNKFK8K;K5KcKKKKrKJK=K9K6KHKKKKKKOKqKKKK^KKKKKKqKKKjKKKKKKxKMKNKNKLKMKGKeKKKKKK\KkKfK[KWK^KnKZKBKCKEKHKIKHKJKJKGKGKHKGKDK@K=K?K@KEKFKHKJKIKHKHKHKHKHKIKHKGKGKFKGKGKGKFKFKFKFKFKFKFKFKFKFKFKFKFKFKFKGKFKEKDKCKCKDKCKCKCKCKDKDKCKCKCKCKDKDKDKDKDKDKCKEKEKFKEKDKFKGKCK@KEKSKrKKKKKKKKKKKKKKKKKKKK}KnKaKSKIK@K2K!KK KKKKKKKKKKKKe]rx��(K KK4K;K8K8K;K%K.KCK9K#K0K;K8K7K:K=K6K(K!KKKKK&K5K3K.K)K"K K)K2K4K5K=KAKCKBKBKBK>K@KBKBKBK?K;KBKAK8K8K:K7K9K<K<K<K*K0KDKAK?K?K2K8KCKDKCKBKEKMKIK?KCKNKQKPKQKPKPKRKRKKKFKGKIKLKMKNKEK,KK(KNKEK8KKKK!K K"K$K#K,KMKKKBK,K#K&K%K&K%K&K&K#K4KNKMKDKbKKKK)K`KKKKKzKPKPKHKlKKKKKKKAK-KiKaKLKNK@K2K4K0K.K.K2K2K2K3K4KJKNKNKDK>KLK]KlK}KKKKKKKUKNKHKKKPK"K&K*K%K&KnKKlKEKMKOKHKKKKZK@KKKKwK.K:K}KwKHKJKEKrKKK_K'KQKKKKKKKRKIKJK?KaKKKKKKKKKTK'KRKiKGKIKEK;KIKPKGK>K:K7K7K5K4K3K5K2K)K6KEKBK>K;K=K?K?KAKBKCKCKCKCKCKGKGKGKEKDKAK?KOKgK~KKKKKKKKKKqKBKDKCKCKDKCKHKDKLKOKNKKKFKCKIKMKVKKKKKKKKKKKeK8K1K-K,KAKGKIKMKLK:K7K=KKKKKKNKBK8K6KAKKKKKKaKVKKKXKFKK~KKKKKWKQKKKKKKKWKLKNKNKNKMKNKKKKKKFKOK\KjKiKbKYKVKKKIK6K&K8KFKCKDKIKIKIKGKGKFKCKAK@K?K?K@KDKIKIKIKIKHKHKIKHKFKFKFKHKIKFKFKGKFKFKFKFKFKFKFKFKFKFKFKFKGKFKGKGKDKCKDKCKCKCKDKCKDKEKBKCKDKCKCKCKCKDKDKDKDKCKDKDKBKCKEKGKGKHKFKDKCKBKNKdKKKKKKKKKKKKKKKKKKKKaK.KK�K�KK K +KKKKKKKKKKKe]ry��(K K)K=K;K9K<K-K&K>K>K(K)K6K8K:K;K<K8K.K"KKKKK&K;K4K1K.K+K(K%K K(K2K2K5K>KBKBK@K?K>K@KBKBKCK?K:KBK@K<K<K;K4K:K:K:K:K(K/KCKAK@K>K1K:KEKBKBKCKDK;KFK@KDKPKQKPKPKQKRKRKSKRKQKIKFKGKHKKKNKMK?K:KLKEK9KKKKK!K"K$K!K-KMKKK@K,K$K&K%K%K%K%K&K#K6KPKMKDKbKKKK)K`KKKKKzKQKOKGKmKKKKKKKDK.KfKbKLKMKFK1K/K1K1K0K0K0K1K2K3KIKMKMKDK4K4K3K2K6KIKhK~KKKKVKNKMKmKKKK@K,K2K)K,KKKNKLKPKHK|KKKYK<KKKKK2K1KwK~KKKLKEKjKKKhK&KIKKKKKKKXKGKKK?KZKKKKKKKKKdK&KKK~KMKGKFK8K+K8KIKQKMKBK<K;K8K5K6K8K6K:KFKEKBK3K1K=K=K@K>K@KBKAKBKCKDKDKDKDKDKDK>KCKDKIKZKnKKKKKKKKKEKCKDKCKDKCKFKDKKKOKLKKKHKBKGKNKKKKKKKKKKKKKzK;K2K/K*K?KGKGKJKMK@K5KaKKKKKKYKCK7K9K6KlKKKKKuKHKKK:K:KxKK{KKKKhKGKKKKKKKgKLKNKLKLKMKFKKKKKKYKDKDKJKWKfKlKeKOKHKAK KK"KgKnKIKEKFKIKHKGKFKFKEKEK>K8K=K=K@KDKHKIKHKIKHKHKGKFKGKGKHKHKFKGKFKFKFKFKGKFKFKFKFKFKFKFKGKEKDKDKCKCKCKCKCKCKCKDKDKCKCKCKCKDKDKDKCKBKBKBKBKBKDKDKDKCKDKDKDKCKEKGKGKGKCKAKHKZKwKKKKKKKKKKKKKKKKKKKsK?KKKKK K +KKKKKKKKe]rz��(K"K9K:K9K;K2K K;KAK*K$K3K5K9K9K:K9K1K%KKKKK!K8K8K5K/K)K)K$K K K%K1K3K3K5K>KBK?K?K>K?KBK@K@K>K;KDK=K<K<K9K6K8K9K:K<K%K6KCKAK@K=K0K=KEKBKAKCKDK@KJK=KFKQKQKPKPKQKSKSKRKPKPKQKPKIKEKHKKKLKPKMKHKDK8KKKKK!K"K$K!K-KLKJK?K,K$K&K%K%K%K%K&K$K7KPKMKDKbKKKK)K`KKKKKzKRKOKGKmKKKKKKKEK.KeKcKMKJKYK]K.K.K0K0K0K0K/K.K/KEKLKMKGK1K.K1K9KPKgKxKxKKKK\KMKKK}KK{KKKdK3K1K&K>KKKPKNKGKyKKKUK@KKKKK7K-KrKKNKLKDKdKKKsK(K@KKKKKKK_KEKKKBKRKKKKKKKKKqK*K?K}KTKEKDKXKkKDK.K7KLKSKMKFKBK;K8K7K8K>KGKFKCK4K'K'K.K9K<K>KAKBK@K@KCKCKCKCKDKDK?KEKHKHKGKGKPKcKxKKKKKKNKAKDKDKCKCKEK@KFKPKKKKKHKDKCKNKJKKKKKKKKKKK}KFK4K/K)K6KDKGKIKMKFK9KKKKKKKnKHK;K:K5KUKKKKKKMKvKpK3K6KeKKKKKK|KIKoKKKKKKyKMKMKLKLKLKJK_KKKKKKAKHKHKGKEKNKbK]KEKFK1K K,KKKnK]KRKFKFKGKGKGKEKDK<K/K?K@K>K;K=KBKGKIKIKHKGKFKFKGKHKHKFKGKFKFKFKGKFKGKFKFKFKFKFKFKGKEKCKDKDKDKDKDKDKDKDKDKCKDKCKCKDKCKCKDKCKAKAKAKAKBKCKDKDKBKCKCKCKDKCKBKCKEKFKFKDKEKBKBKNKjKKKKKKKKKKKKKKKKKKKK_K0KKKKK +K +K KKKe]r{��(K6K<K<K<K:K K1K?K2K$K3K9K9K9K9K:K4K'K!KKKK K6K=K7K2K-K)K(K#KK"K5K6K-K4K0K6K<K>K>K>K?KBKAK>K<K<K@K?K@K=K9K6K;K=K<K;K$K:KEK?K@K=K/K>KBKAKAKDKGKFKJK=KGKPKPKQKPKQKSKRKRKSKRKPKRKSKMKIKEKHKJKKKKKIK@K$KKKK K"K$K#K0KMKIK@K-K#K&K%K%K%K&K$KK7KPKKKBKbKKJK)K`KKKKKzKRKOKHKjKKKKKKKIK)KeKgKOKLKWKKUK-K2K2K3K3K2K0K0KCKMKKKDK*K7KaKKKKKKKKKaKLKHKKKKKKKRK8K3K(KYKKtKIKGKyKKKXKEKKKKKCK)KiKKSKKKEKaKKK{K0K<K{KKKKKKiKGKJKCKLKKKKKKKKKyK4K6K{K[KBKFKTKKKpKOK8K<KJKSKUKLKBK:K:K=KDKDKDK?K7K1K*K)K,K3K=K@K@K?KBKBKDKDKCKDK@KBKGKGKHKIKHKFKIKTKlKKKK]K?KDKDKCKDKDK6K;KNKMKNKJKDK?KKKHKkKKKKKKKKKKmKNK6K/K,K0KCKHKHKMKIKXKKKKKKK|KMK@K:K8KDKKKKKK^KJK>K:K7KKKKKKKKKVKUKKKKKhKsK`KIKMKLKLKLKJKKKKKKJKHKGKGKGKEKCKHKIKIK=K!KZKKKKdKyKkKSKHKEKFKFKGK=K,K<KDKEKAK>K=K<K>KBKDKGKGKHKHKGKGKFKFKFKFKGKGKEKGKFKFKFKFKFKFKGKFKFKFKFKDKCKCKCKCKCKCKCKCKCKCKDKBKAKCKCKAKAKAKCKDKCKDKDKCKDKDKDKDKDK@KBKDKCKCKEKFKIKHKFKBK@KIK]KKKKKKKKKKKKKKKKKKKKzKIKKK�KKK +K e]r|��(K9K=K:K;K%K)K?K6K$K.K4K6K:K9K9K7K*K!KKKKK2K>K7K4K-K(K'K"K"K#K4K9K)K&K.K2K3K8K=K?K?K@KAK@K>K<K<KAK=K<K=K9K6K;K=K<K7K$K9KCK@K?K;K/K>KCKAKAKEKFKDKJK=KHKQKQKQKPKQKSKRKRKRKRKQKRKRKSKQKNKJKEKBK;KJKMKGK3K!KK!K#K$K#K1KMKHK?K,K#K&K%K&K%K%K$K!K7KPKJKBKbKKKK)K`KKKKKzKQKPKIKjKKKKKKKIK)KeKhKPKLKVKKKKK,K2K2K2K4K5K3KCKMKKKEK1K<KLKWKlKrKzKKKKKZKOKHK}KKKKKKKIK:K3K'KKKFKJKnKKKbKRKKKKKLK'K_KKWKIKIKYKKKK7K7KuKKKKKKpKHKIKCKGK~KKKKKKKKK>K2KsKfKCKFKLKKKKKdK2K1K3K@KOKQKIKCKAKDKEKDK@K8K8K8K5K.K*K)K1K:K?K@K?KCKDKCKGKCKAKGKFKGKHKIKIKHKGKEKLK]KyKeK@KDKCKEKCKDK2K+KMKLKNKKKEK@KIKLKVKKKKKKKKKqKeKUK<K/K.K-KAKHKIKNKKKKKKKKKKKVKDK9K:K8KuKKKKKoKHK>K8K7K;KKKKKKKhKGKKKKK}K>KdKHKIKIKKKLKFKxKKKKKbKCKHKFKGKGKGKGKIKIKGK,KKKKK^K_KoKxKpK^KMKFKGKEK:KCKFKEKDKCKBKAK>K<K<K@KEKGKIKGKFKFKFKGKGKFKGKEKGKFKFKFKFKFKFKFKGKGKGKFKDKDKDKDKDKDKDKDKCKCKCKDKBKBKCKCKBKBKAKCKDKCKCKCKCKDKCKCKCKCK@KBKDKCKDKBKBKDKDKDKFKEKEKBKDKGKUKqKKKKKKKKKKKKKKKKKKKKeK4KKK�e]r}��(K8K9K<K,K&K>K;K'K'K1K3K7K9K:K7K/K"KKKKK-K@K:K6K.K)K'K#K!K K,K9K+K&K(K$K)K2K6K:K>KBKAK>K@K?K9K=KDK6K.K=K9K7K;K=K=K8K#K<KBKAK>K8K.KAKDKBKBKFKGKHKIK=KIKPKSKRKPKQKSKSKSKPKQKSKRKSKSKSKSKSKOKDK<KIKIKLKMKCK0K#K!K$K"K1KMKHK<K,K$K%K&K&K%K#K$K$K6KQKLKDKaKKMK(K`KKKKKyKPKRKFKiKKKKKKKLK)KdKiKOKMKUKKKsK2K/K0K1K5K5K5KEKLKLKHK6K3K1K/K2K4K6K5K:K=KCKMKOKGKrKKKKKKKKVK9K-K9KXKMKJKeKKKeKbKKKKKVK%KXKK]KIKIKRKKKK>K1KnKKKKKKxKHKIKFKCKxKKKKKKKKKGK/KkKpKGKIKEK|KKKKrK1K>K[KFK1K=KLKRKNKGKEKDKBK8K6K7K8K:K9K2K+K(K*K5K=K>KBKEKEKAK?KFKGKGKFKFKGKHKIKJKHKEKFKIKCKDKDKCKDKCK>K KFKLKLKKKFKBKFKNKMKKKKKKKKKfKaKVKBK0K0K,K=KIKIKLKLKKKKKKKKKgKFK:K;K:K^KKKKKKNKDK5K7K4KiKKKKKK}KJKsKKKKwKK@KMKHKIKHKJKLKZKKKKKKBKHKFKGKFKGKFKFKIKGKJKKKKK~KZKYKbKoKwKmK\KMKEKGKGKFKGKEKCKDKDKDKCK=K<K<K;KAKEKHKGKGKFKHKDKFKGKFKFKFKFKFKFKFKFKFKFKGKFKCKDKGKGKGKEKDKDKCKCKCKCKDKDKCKDKDKCKAKCKDKCKDKDKDKDKBKAKDKCKAKBKDKDKCKDKDKCKCKCKCKDKDKEKHKGKDK@K@KJKcKKKKKKKKKKKKKKKKKKKKKLe]r~��(K9K;K2KK9K@K*K$K.K.K6K<K9K8K3K&K KKKK%K?K:K6K0K,K*K$K"K K-K<K0K&K%K$KKK$K4K8K;K?K@K>K>K>K8K?KBK;K:K<K7K6K;K=K=K6K%K?KBKAK>K8K0KBKDKCKBKEKIKKKHK=KFKMKRKQKPKQKRKQKQKQKRKRKRKRKRKRKRKRKUKPKIKFKFKJKLKNKKKAK0K"K K1KOKIK>K+K$K%K&K%K&K$K%K$K6KQKNKDKaKKMK)K`KKKKKyKPKQKEKfKKKKKKKPK(KaKkKNKNKVKKK|K5K/K/K0K3K5K4KBKLKLKJK9K7K7K7K7K5K5K5K0K+K-KEKQKIKcKKKKKKKKKgK?K,KFKPKJK\KKKkKnKKKKK[K#KPKKcKHKKKOKKKKJK,KgKKKKKKKLKHKHKAKpKKKKKKKKKTK-K]KyKFKHKBKsKKKK|K5K8K|KKuKVK?K8KDKNKGKEKCK@K;K8K6K8K9K9K:K5K,K(K)K0K9KBKDKBK=KDKEKEKFKFKEKIKHKGKIKKKHKFKDKCKDKCKDKBKBK!K=KNKLKKKGKCKEKLKHKKKKKKKKKnK`KWKJK5K1K.K6KEKJKMKJK~KKKKKKKKxKMK?K<K9K]KKKKKK[KHK:K7K3KQKKKKKKKVKUKKKKOK/K3KJKGKGKHKJKNKJKKKKKKMKFKGKFKGKCKAKCKIKFK_KKKKKKcKhK`KYKbKjKnKhK]KNKEKGKFKGKEKCKDKEKEKBKCKCK>K;K;K?KCKFKHKGKFKFKFKFKGKFKFKFKFKFKFKFKFKGKFKEKEKGKFKEKCKBKCKDKCKCKCKCKCKDKCKCKBKAKCKDKCKCKBKCKDKBKAKBKCKBKBKBKCKDKCKCKDKDKDKCKCKDKCKCKDKDKEKFKDKAK>KDKPK[KiKqKzKKKKKKKKKKKKKKe]r��(K8K5KK0KBK0K!K.K0K5K;K=K9K6K)KKKKK$K9K:K7K2K*K*K%K!K K'K:K2K*K)K$KKKKK#K3K:K:K=K?K=K=K9K?KAK;K=K<K5K6K;K=K>K4K%K>KAKAK>K8K2KCKDKDKCKFK>K?KHK=KFKMKQKPKQKPKPKPKPKRKSKSKSKRKRKRKRKRKRKSKQKOKJKGKHKJKMKNKLK>K)K2KMKIKAK+K$K&K&K%K'K&K%K#K7KPKMKCKaKKMK*K`KKKKKyKPKQKIKfKKKKKKKOK'K_KkKOKNKTKKK~K2K3K7K.K4K6K4K@KLKLKKK8K3K4K4K4K0K,K(K6KeKyK`KMKLK\KKKKKKKKKKK:K>KPKKKQKKK|K}KKKKKeK%KJKKkKHKKKMKKKKTK,K_KKKKKKKRKGKGKCKgKKKKKKKKKaK/KTK}KIKFKBKhKKKKK@K2KmKKKKKcKAK=KHKGKAKFKGK@K=K9K8K9K9K8K9K9K-K'K&K8KEKDK=K?KAK@KCKGKGKFKFKFKGKHKGKGKDKBKDKDKDKAKEK)K2KOKLKLKIKDKCKKKKKkKKKKKKKKK[K\KPK:K2K.KPKaKHKJKMKbKKKKKKKKKUKDK>K:KKKKKKKkKIK?K5K6K=KuKKKKKKgKDKKKK4KEKGKAKJKIKHKJKMKEKrKKKKKfKBKHKGKGK>K;KDKFKIKOKKKKKKZKVKgKcKaK_K]KcKeK^KGKAKCKFKJKFKCKCKDKDKDKDKBKBK@K=K=K<K@KBKEKGKGKFKFKFKGKGKFKFKFKFKFKFKFKGKGKGKEKCKDKDKDKDKCKCKCKCKCKDKCKAKAKAKCKDKDKCKAKBKDKBKAKAKBKDKCKAKCKDKDKDKDKDKDKDKDKDKDKDKDKDKDKDKDKDKEKEKEKBKBKAKBKBKGKKKKKQKQKRKVKYKaKfKhKlKte]r��(K3K"K+KAK8K!K*K-K6K;K<K<K7K.K"KKKK K7K:K6K4K-K(K%K K#K%K:K4K%K&K%KKKKKKK*K4K9K<K<K=K;K8K=K@K<K<K;K3K7K;K<K>K1K&K>KBK@K>K8K3KBKBKCKDKCKBKFKFK=KGKMKOKQKQKPKQKQKQKRKRKQKRKSKRKRKRKRKRKSKRKSKRKMKHKGKIKKKKKOKHK?KDKEK>K,K$K%K&K&K%K$K%K#K7KPKKKDKdKKMK'K_KKKKK{KPKQKIKfKKKKKKKRK&K]KmKOKMKQKKKK2K4KfKTK0K3K5KCKNKJKKK8K4K1K-K+K*K*K1KCKcKKwKIKMKUKKKKKKKKKKKK:KRKOKTKKbK[K~KKKKKoK(KBKKsKEKIKJKyKKK\K)KUKKKKKKKVKDKEKBK_KKKKKKKKKlK2KLKKOKFKBK`KKKKKMK3KbKKKKKKKnKIKFKDK@KGKKKLKGKBK;K;K7K6K8K7K7K3K7KDKDK=K=KAK@KAKDKDKCKDKFKDKDKFKFKEKBKBKCKDKAKEK2K&KLKKKLKIKDKDKIKKKUKKKKKKKKKYK]KTK>K1K/KKKwKZKLKQKKKKKKKKKbKGK;KSKKKKKKKyKMKDK6K8K8KFKKKAKLKlKjKWKGKoKKbK1K<KHK>KEKHKIKIKJKJKVKKKKKKBKHKEKDKCKCKCKEKJKFKKKKKKwK6KBKVKjKlKeK_K[K]KOKCKCK5K-K=KGKGKCKEKDKCKDKDKCKCKBK@K<K<K;K@KDKFKIKGKDKDKFKGKFKFKFKFKFKFKFKGKEKCKEKFKDKCKDKCKCKDKCKBKBKCKAK?KAKBKBKBKAKBKBKCKCKBKBKBKBKAKBKBKBKBKBKBKBKBKBKBKBKBKBKBKBKBKBKBKBKBKBKCKCKCK@KCKDKDKEKFKGKGKHKGKGKEKCKDKDe]r��(K%K K>K<K'K(K1K1K:K>K<K8K0K%KKKKK1K=K7K5K.K)K%K K K$K8K:K(K(K%K KKKKKK$K%K+K4K6K7K:K7K6K;K=K8K8K8K3K7K;K<K?K.K)KBKCK?K?K6K2KDKAKCKDKCKGKJKEK=KJKKKJKOKQKPKQKPKQKRKRKPKQKSKRKRKRKRKRKRKRKRKSKTKSKOKIKFKHKJKKKKKHKDK=K*K#K&K&K&K%K#K%K#K7KOKIKDKdKKMK&K^KKKKK|KPKQKHKdKKKKKKKTK&K[KnKOKMKPKKKK6K4KsKKcK:K.KAKNKIKHK=K9K7K4K4K4K3K:K3K+K0KCKMKHKKK?K:KXKKKKKKKKK_KMKQKMKKUKJKKKKKKuK.K=KKxKDKIKEKsKKKhK*KNKKKKKKK\KBKEKAKTKKKKKKKKKwK5KAKKYKEKDKVKKKKKZK3KWKKKKKKKKUKCKFK@K9KBKKKQKNKIKDK?K9K7K8K8K8K<KCKCK@K3K-K7K?KBK@K@KAKBKBKDKGKGKEKBKAKCKDKBKCK<K KGKKKLKIKEKDKHKKKJKKKKKKKKK_K\KUKGK/KOKKKKKtKOKKKKKKKKKrKLK:KKKKKKKKKTKIK9K7K6K8KQKNK;K<KAKFKHKXKK>K2K7KCK?K<KGKIKHKHKLKGKKKKKKOKDKDKCKDKDKCKCKFKFK]KKKKKKIKSK^KaKgKoKiKiKbKVKGKFK=KKK'K<KNKCKBKCKDKDKDKCKBKAKCK@K<K=K;K=KAKEKDKCKFKFKFKFKFKFKFKFKFKFKEKCKFKGKDKDKDKCKCKDKCKBKBKDKCKBKBKAKBKBKBKBKAKCKDKBKAKAKAKBKAKAKAKAKAKAKAKAKAKAKAKAKAKAKAKAKAKAKAKAKAKAKBKCK@K@K@K@K@K?K@KAKDKDKDKFKGKFKEe]r��(KK7K>K,K'K3K2K9K>K<K<K7K&KKKKK,K=K8K7K0K+K'K#K#K#K4K8K)K(K'K!KKKKKK K(K$KK#K3K5K5K6K3K;K;K-K5K;K2K8K;K<K>K-K,KCKCK@K?K5K2KDKCKCKDKFKFKGKDK:KIKHKIKNKOKPKQKPKQKQKQKSKRKRKRKRKRKRKRKSKSKSKRKRKSKTKSKMKGKEKIKJKHKKKHK5K%K$K&K&K&K&K&K$K7KPKKKAKdKKLK&K^KKKKK|KPKQKGKcKKKKKKKVK&KZKlKKKMKPKKKK9K4KpKKKvKCK;KMKKKJK9K6K:K9K9K9K8K6K3K3K3K=KKKIKHK8KEKvKKKKKKKKKKRKNKGKKgKRKKKKKK}K2K8KzKKHKIKDKmKKKpK.KFKKKKKKKeKEKJKBKNKKKKKKKKKK>K=K{KdKCKEKMKKKKKhK4KNKKKKKKKK`KBKFKBK@KEK:K?KLKOKLKLKDK>K;K:K7K6KBKDKBK8K/K*K,K3K<KAKBKBKCKEKEKEKDKCKDKBKBKBKAKAK!K=KLKKKJKGKCKEKLKIKKKKKKKKKlKZKYKPK0KKKyKKKKcKnKKKKKKKKKRKOKKKKyKKKKKPKHK:K7K6K=KKKIKIKIKEKFKIKMKJK5K4K4K;KCKAKHKGKFKEKIKHKmKKKKKmK?KEKDKDKDKDKBKDKJKIKKKKKKuK_KfKgKeK[KKKNK`KfKPKDKGK.KKKLKKKQKEKCKEKGKFKCKCKDKDKDKCK>K9K9K;K:K@KGKIKGKGKFKFKGKGKFKCKEKGKGKGKGKFKCKDKCKCKCKCKCKCKDKDKEKCKCKCKCKBKAKBKBKBKAKBKBKAKBKBKAKAKBKBKBKBKBKBKBKAKAKBKAKBKBKBKBKBKBKBKBKBKAKAKAKAKAKAKBKAK>K>K?K@KDKBKCe]r��(K1KCK2K&K0K/K6K>K?K<K:K,K KKKK(K?K:K8K1K+K)K"K#K!K3K?K,K(K(K&KKKKKK"K)K%K KKK,K5K4K4K-K6K<K7K;K9K2K8K;K<K>K)K/KBKBK?K>K5K7KEKCKCKCKFKFKGKBK<KHKHKJKOKPKPKQKPKQKPKQKRKRKSKRKRKRKRKSKRKRKRKSKSKSKRKSKTKQKJKGKAK7KJKMKIKCK4K&K$K%K%K%K"K6KOKKKAKdKKLK&K^KKKKK|KPKPKFKbKKKKKKKZK&KZKmKKKMKOKKKK>K1KmKKKKK]KJKLKLK5K/K3K5K6K6K7K8K7K5K4K>KKKIKHK9K;KEKZK|KKjKBKKKKK]KKKKKoKoKfKKKKKKK8K4KtKKMKHKDKgKKK{K2K>KKKKKKKlKHKKKCKGKKKKKKKKKKEK6KtKnKBKGKEKKKKKuK8KFKKKKKKKKkKAKFK@KKKKsKSK?K=KIKSKQKMKJK@K7K9K@KDKAK:K;K;K4K-K)K-K9K>KBKBKBKAKCKDKDKCKAKAKAKFK(K1KOKKKKKHKDKEKLKJKhKKKKKKKKxKUKZKNKSKKbK\KKKKyKXKKKKKKKKuKQKhKKKKKKKqKKKFKKK?K6K9K9KAKJKJKIKGKHKIKLKBK3K2K4K8KGK?KEKGKGKHKHKIKSKKKKKKDKEKDKCKBKBKBKDKIKDKtKKKKKKeKeKWKDK6K0K.K6KIKJKEKHK=K$K!K{KKK|KsK[KKKEKDKCKDKDKCKDKBK?K-K7K@K;K:K=K@KDKFKGKGKEKFKEKCKEKFKFKFKFKEKCKDKCKCKCKDKDKCKCKCKDKCKCKDKDKCKBKBKAKBKAKAKAKAKAKBKBKBKAKAKAKAKAKAKBKBKBKAKBKAKAKAKAKAKAKAKAKAKBKBKBKBKAKAK@KAK?K?K?K>K@K?K?e]r��(K?K5K$K0K2K3K<K?K>K<K0K KKKK#K;K;K;K4K-K)K&K#K!K.K=K0K+K)K$K KKK KKK+K(K"KKK!K"K)K2K3K2K+K1K>K;K7K3K8K;K9K<K%K1KBKBK@K=K2K8KDKCKDKCKEK;K?KBK;KHKHKOKRKQKPKPKPKPKPKQKPKQKSKRKRKRKRKSKRKPKRKSKRKRKRKRKRKSKTKOKDK?KGKIKHKJKLK@K/K#K KKK5KNKKKAKeKKLK&K_KKKKK|KOKMKDKaKKKKKKK[K$KVKqKPKNKMK}KKK@K.KjKKKKKxKJKLKLK;K0K.K.K/K0K3K3K2K4K4K=KLKIKJK9K0K3K5K9K-K*KyKKKKK`KHKNKOK_KrKKKKKKK?K0KlKKOKHKEK`KKKK7K:KwKKKKKKsKFKIKDKCKzKKKKKKKKKNK1KgKvKDKHK@KvKKKKKAK?KyKKKKKKKyKCKEKBKBKKKKKaKDK:KFKTKYKTKIKEKBKDKBK<K9K:K;K;K5K-K&K(K1K=KAK@KAKBKDKDK@KAKAKFK2K(KMKKKMKJKGKEKKKLKVKKKKKKKKKWKYKLKuKK=KMKKKKKPKKKKKK{KrKKgKXKKKKK_KPKJKKKLKIKDK:K9K9K=KHKHKHKIKGKFKJKGK7K2K5K6KDKCKCKGKFKFKGKJKHKKKKKKKKCKCKAKBKAK=K;KFKIKRKKKKKK\KBK6K1K1K6K=K=K:K=KEKFKDK+K/KKKK|KnK}KsK]KLKAKBKDKDKDK@K7K+K,KCKBK@K<K9K:K;KAKFKFKDKCKDKCKCKCKCKCKCKDKCKCKCKCKCKCKCKCKCKCKDKCKCKCKDKDKBKAKBKAKAKAKBKAKBKDKCKAKBKAKAKBKAKBKDKCK@KBKAKBKAKAKAKAKAKAKBKAKAKBKBKAK=K>K?K>K>K>K?K>K?K>e]r��(K:K&K*K4K8K=KBK>K?K6K#KKKK!K=K9K4K8K/K*K&K!K"K*K;K2K+K+K&K KKKKKK(K'K#KKK K"K%K'K,K0K3K3K,K1K9K4K4K9K:K:K:K$K5KBKAK?K=K/K:KDKCKBKDKEK>KEK@K:KGKKKPKQKPKPKPKPKPKPKQKPKQKSKRKRKRKRKSKRKQKSKRKRKSKRKSKRKRKSKRKSKNKEKFKIKHKMKOKJK>K+K#K"K5KNKKK@KeKKLK&K^KKKKK|KPKNKFKbKKKKKKK^K%KWKqKPKOKNK}KKKFK-KhKKKKKyKLKJKMK=K3K6K0K-K+K+K-K/K1K1K9KLKIKIK:K2KCK5K(K,K]K~KKKKKUKHKJKPKdKaKlKKKKKKJK,KeKKTKHKFKZKKKK?K5KsKKKKKK}KIKHKGKBKqKKKKKKKKKZK0K\K~KFKIK?KlKKKKKJK:KnKKKKKKKKIKEKCK?KuKKKKKKlKLK>KEKVK^KVKKKCKCKBK9K8K8K7K;K8K1K2K,K*K-K7K>KCKDKBKAKAKAKCK<K"KIKLKLKKKGKEKKKNKIKKKKKKKKKXKWKQKYKMK9K?KKKKKYK|KKKKKhKKKhKJKKKKKhKGKLKKKJKJKHK=K9K6K^KaKFKJKIKFKEKHKIK=K3K5K4K?KGKBKHKGKFKGKKKDKeKKKKKAKDKEKBK=K:K;KGKKKJKEKKKKKKhK*K5K:K?K?K?K<K<K;KAKGKHK6KMKKKKKYK_KoKxKqK_KMKCKBKCKCKCK1K5KBKCKDK?K?K@K;K9K;K<K@KDKCKEKEKCKDKDKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKDKCKBKBKAKBKBKAKAKBKBKBKAKAKAKAKBKAKBKCKBKAKBKAKBKAKAKAKAKAKAKAKAKBKAK@K@K>K?K>K>K?K>K>K?K>K>e]r��(K)K#K1K5K<K?K>K=K7K)KKKKK6KAK8K9K3K+K%K#K"K'K=K7K.K,K(K!KKKKKK'K&K#KKKK#K%K(K)K*K-K2K4K8K+K.K3K5K:K9K;K7K#K8KCK@K>K<K/K;KEKCKCK=KAKFKHK?K9KIKLKQKQKPKPKPKPKPKPKQKPKQKSKRKRKRKRKRKSKSKTKRKRKSKRKRKRKRKRKSKRKSKSKLKEKEKIKLKLKMKHK;K(K4KOKLK?KeKKLK&K^KKKKK|KRKOKHKcKKKKKKKaK#KUKrKPKOKKKzKKKIK,KfKKKKK{KLKJKKKDK8K1K6K4K.K-K.K0K0K.K7KNKIKIK<K)K4K$KK9KKKKKKiKXKLKJKOK^KcKpK|KKKKKSK*K_KK[KGKGKSKKKKJK2KhKKKKKKKOKGKJKAKhKKKKKKKKKdK2KRKKNKGKBK`KKKKKUK6KcKKKKKKKKMKBKDK=KiKKKKKKKKzKVK@KFKVKXKFKCKAK=KBK?K9K8K8K8K<K=K6K-K(K(K8KAKAKBKBKBKBKAK KBKNKKKLKHKCKGKNKIKKKKKKKKK^KVKSKLK=K=K9KoKKKKoKaKKKKK[KaKPKQKJKzKKKKKGKMKIKIKLKLKAK9K;KKKCKJKIKGKGKHKJKCK4K5K5K:KIKCKEKHKGKEKGKKKOKKKKcK5KEK?K8K;KEKKKSK[KPKFK`KKKKKK;K@K@K?K?K>K:K9K9K<KGKIK?KwKKKKKnKZK]KbKoKrKjKYKLKIKFKFKAK@KBKCKCKBKBKBKAK?K?K:K9K;K<KBKDKDKDKCKCKCKCKCKCKDKCKCKCKCKCKCKCKCKCKCKCKCKDKDKBKAKAKAKBKAKAKAKAKAKAKBKAKAKBKAKAKAKBKAKAKAKAKAKAKAKAKAKAKAKBK@K>K>K?K>K?K>K>K>K>K>K>K>e]r��(K%K/K4K8K=K;K:K7K*K KKKK1KAK:K;K3K,K&K"K"K'K;K7K.K-K)K"KKKKKK(K)K&KKKK!K#K%K(K*K+K-K/K0K3K8KK K5K:K9K:K8K"K;KCK@K>K<K1K<KCKCKFK&KK;KJKAK:KJKMKNKOKQKPKPKPKPKPKPKPKQKQKRKSKRKSKSKRKRKSKRKSKSKSKSKRKRKRKSKSKSKUKQKNKHKDKFKJKLKJKKKFK@KIKIK?KeKKMK%K\KKKKK|KOKNKFK_KKKKKKKdK#KPKtKPKPKJKwKKKJK)KcKKKKK|KLKKKJKQKvKPK5K4K2K1K1K.K-K-K3KKKIKKK<K!K!KKK.KMKeKdKhKpK|K[KKKLKIKDKXKbKlKKuKKKZK)KUKKaKGKHKMKKKKTK0K`KKKKKKKSKFKGK@K`KKKKKKKKKsK7KHKKXKDKCKUKKKKK_K8K]KKKKKKKKXKBKEK>K_KKKKKKKKKKaK6K8K=KBKDKCK>KNKJKDK@K:K8K5K6K=K<K7K2K/K?KCKAKAKBKAKDK$K5KPKKKKKIKEKDKKKJKfKKKKKKKKcKTKUKQKDK:K7KYKKKKuKPKKKKKOKLKLKOKNKaKKKKKMKJKIKIKLKKKEK7KRKKKJKGKGKGKGKGKJKHK7K4K6K8K1K$K?KGKDKDKEKIKEKKKK?K3K9K9K@KJKSKYKbKbKQKHKGKKKKKKXK:K>K;K=K>K<K:K8K8KDKGKAKKKKKKKZK]K]KbKcKcKeKcKZKOKJKFKFKDKCKCKDKCKAKBKAK@K@K?K=K:K8K:K?KBKEKEKEKDKCKDKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKBKAKBKAKBKBKAKAKAKAKAKAKBKBKBKAKBKAKAKAKAKAKAKAKAKBKAK?K?K>K?K>K>K?K>K>K>K>K>K>K>e]r��(K.K5K9K:K=K=K7K.K"KKKK,KAK<K:K2K,K(K"K"K$K8K>K/K+K)K%KKKKKK(K'K$KKKK K"K&K)K+K*K-K2K0K(K*K&KK&K6K9K9K:K6K#K=KBK@K>K=K0K@KDKCKFK&K KK+K:K@KJKLKMKNKQKPKPKPKPKPKPKQKPKPKRKSKRKRKRKRKSKRKSKSKSKSKSKRKRKRKSKSKSKRKOKNKPKNKFKBKFKLKLKMKMKJKHK>KfKKLK%K[KKKKK|KMKNKFK^KKKKKKKeK%KPKtKPKQKJKwKKKPK)K^KKKKKKMKKKKKNKKKiKDK7K1K,K,K)K'K.KHKHKKK?K(K KKK#K6KIK^KZKuKK^KGKMKGK9KHKMKaKfKvKKKfK+KNKKiKGKIKJKKKK\K.KZKKKKKKKYKEKFKAKWKKKKKKKKK}K=KBKK_KDKFKMKKKKKlK:KPKKKKKKKKgKAKEKAKTKKKKKKKKKKqK3KCKgKOKBKDK@KTKVKOKJKFKAK9K3K4K6K8K;K9K?KCK@K@KBKAKEK1K(KNKLKKKJKFKDKKKMKUKKKKKKKKjKSKVKSKGK;K;KFKoKhK[KTKKKKKKK[KKKOKKKMKQKKKKK`KDKIKIKKKKKJK9KKKK^KCKIKHKFKCKGKKK=K3K6K9K'KK5KGKCKDKCKHKGK^KKK0K3K?KNKRKYKaK_KOKCK;KHKFKuKKKKK|K8K>K>K;K<K<K:K:K8K?KJKDKiKKKKKKXKUKZK\K^K]K\KZK\K[KXKQKBKBKCKDKEKCKAKCKAKAKCKCKBKAK?K9K6K:K?K@KBKDKDKCKDKCKCKDKCKCKCKCKCKCKCKCKCKCKCKDKDKDKCKAKBKBKAKAKAKBKBKBKAKAKAKAKAKAKAKAKAKAKAKAKAKAKAKAKAK>K>K>K?K=K>K?K>K>K>K?K?K>K>e]r��(K6K9K<K?K=K:K0K&KKKK(K@K=K;K9K.K(K#K!K!K7K=K3K/K(K$KKKKKK'K)K$KKK!K$K"K&K*K+K*K+K/K2K,K!K!KK*K6K7K:K9K;K5K%K?KBK?K>K:K0KAKCKCKDK$KK KKK8KNKMKMKNKQKPKPKPKPKPKPKPKQKPKRKSKQKQKQKQKSKRKRKSKSKSKSKRKRKRKSKSKSKQKNKOKOKSKSKLKEKCKGKLKOKMKMKEKdKKNK%K\KKKKK|KJKNKFK^KKKKKKKhK%KNKvKQKOKGKvKKKTK(KZKKKKKKOKIKJKNK~KKKKyKeKOK>K5K1K0KFKHKIKAK$KKKKKLKiKfKuKKK_KGKMKIK;K;KFKQK\K]KKKnK.KHKKpKGKIKHK{KKKfK,KSKKKKKKKaKCKHKCKQKKKKKKKKKKAK=KyKjKCKGKGKKKKKyK>KGKKKKKKKKqKCKHKDKKKKKKKKKKKK}K;K<KKtK>KDK?K>KLKYKZKSKMKJKDK;K6K4K5K9K?KCK@KAKBKAKCK9K"KHKLKKKJKFKBKIKNKKKKKKKKKK|KVKTKSKIK:K=K<KBKLKMKOKKKrKKKKgKHKLKKKMKHKKKKK}KCKIKIKIKKKJKJKKKKKBKBK7K/K*K:KLKAK3K4K6K0K4K^KFK>K>K:K;KIKLKK]K-K2K@K\KYKVKMK>K2K-K3KCKGKXKKKKKKDK;K=K=K:K:K:K9K7K:KGKIKNKKKKKKnK@KNKPKUKVK]K]K\KZKWKXKJK>K?K@KBKEKFKCKCKDKCKDKCKBKBKBK?K<K:K9K;K<K?KEKEKDKCKCKCKDKCKCKCKCKCKCKCKCKCKCKDKCKDKCKAKAKAKBKAKBKDKBKAKBKAKAKAKAKAKAKAKAKAKBKBKBKAK?K>K>K>K?K>K=K>K?K>K>K?K>K>K>K>e]r��(K/K9K<K=K8K0K$KKK K%K?KAK<K:K1K+K$K KK/K>K5K0K,K%KKKK KK%K*K&KK KK%K"K#K'K*K+K+K/K2K.K"K K"K'K7K8K8K:K9K<K1K&KBKBK?K>K9K2KAKCKDKCK$KKK K +KK1KOKPKNKPKPKPKPKPKPKPKPKQKPKRKRKQKPKPKQKSKRKRKSKSKSKSKRKRKRKSKSKSKRKRKRKRKRKRKRKRKJKDKGKGK;KIKQKOKhKHK&KZKKKKK|KHKKKEK]KKKKKKKjK%KLKuKQKOKGKuKKKWK'KXKKKKKKRKIKJKLK}KKKKKKKfK/K=KAKGKIKKK@KKKKK.K<K#K%KeKKK^KGKKKKK;K>K5K=KIK/KlKKuK/KDKKwKGKJKEKuKKKrK0KIKKKKKKKkKDKHKFKKKKKKKKKKKKKK7KpKuKCKHKDKwKKKKKDKCKzKKKKKKK|KGKGKDKEK|KKKKKKKKKKHK7KoKKBKCK=KJKKK@KIKYK\KWKQKKKDK?K8K6K=KDK@KAKBKAKAK?KK@KMKKKJKHKCKCKKKHKKKKKKKKK\KSKQKMK=K:K<KCKMKLKMKOK[KKKKsKIKMKKKKKKKiKKKKKGKJKHKHKKKGKeKKKKK/K*K+K1K6K=KIKHK7K4K6K4K<KRK9K1K8K>KDKIKHKXK>K1K2K7KIKBK8K1K*K-K3K>KBKIKGKKKKKKdK5K=K=K:K9K9K9K7K8KBKJKCKKKKKKKHKHKHKJKIKGKSK]K\KXKWKQKDKDKCK0K'K8KBKGKDKDKEKCKBKBKBKAKAK?K?K=K9K9K<KAKCKDKEKDKCKCKCKCKCKCKCKCKCKCKDKCKCKDKCKBKBKBKBKAKBKCKBKAKBKAKAKAKAKAKAKAKAKBKAKAKAKAK?K?K?K?K>K?K@K?K>K?K?K>K?K?K?K>e]r��(K5K<K=K<K3K(K KKK!K;KCK9K7K2K+K&K"K K,K>K5K3K.K'K!KKKKK#K(K#KKKK&K&K$K%K)K,K/K/K3K.K$K!K K"K,K>K;K9K:K9K<K4K/K@KBK@K=K7K<KBKBKDKBK$KKKKK K K"KDKPKQKPKQKQKQKPKPKPKPKPKPKPKPKQKPKQKSKSKSKSKSKSKSKRKRKRKRKRKRKSKSKSKSKSKSKSKSKTKRKFK@K=KGKOKMKLKKK>KZKKKKK{KLKJKDKZKKKKKKKmK%KHKsKQKQKJKrKKKZK'KWKKKKKKTKHKKKKKzKKKKKKKqK(K@KxKXKGKHKMKSKK +KKKKKDKoKKrKQKHKIKIK<KIK?K+K*K;KKK|K4K=K~K}KGKJKCKnKKK|K4KCKKKKKKKpKGKIKEKFK{KKKKKKKKKXK6KdK|KEKHKBKoKKKKKKK>KnKKKKKKKKLKDKDKAKsKKKKKKKKKKUK7KbKKJKCK=K\KKuKSKCKGKXKXKXKTKOKJKCK?KBKAKBKAKAKAKDK$K4KOKKKMKHKDK?KIKHKeKKKKKKKK^KQKQKOKEK7K:K@KJKLKLKOKMKKKK{KJKLKJKHKMKVKKKKKWKHKEK?K=KGK[KKKKKQK:KAK@KBKAKFKKK=K5K8K7K<KDKDKKKOKSKVKPKIKBK4K2K2K1K0K)K)K3K;K6K4K<K?KHKFKmKKKKKK8K=K=K;K8K7K7K8K8K=KHKGK[KKKKKKbKIKJKIKFK2K*K7KHKTK_K]KJKAKFK<KKK"K2KIKEKBKFKEKCKDKBKAKBKAK>K@K=K<K9K7K<K@KBKAKDKDKCKCKDKDKCKCKCKDKCKAKAKAKCKDKBKAKAKAKAKAKBKAKAKAKAKAKAKAKAKAKBK@K>KAKBKBKBKBK@K>K>K>K>K>K>K?K>K?K?K?K>e]r��(K9K=K=K9K+K KKKK4KAK7K7K0K*K&K#K!K*KAK9K1K/K*K"KKK KK!K+K'KKKK"K%K%K'K*K*K-K1K1K+K"K"K#K"K#K0K@K;K9K:K9K;K9K:K@KBK@K=K9K@KCKCKEK@K#KKKKKKKK)K<KNKQKOKOKOKPKQKPKPKPKPKPKPKQKPKQKRKQKRKSKSKSKSKRKRKRKRKRKRKRKRKRKRKSKSKSKSKRKQKRKOKFKCKGKLKMKMKOKTKqKKKK|KLKLKDK[KKKKKKKnK$KHKtKPKOKFKrKKK^K%KSKKKKKKVKFKJKMKyKKKKKKKvK*K>KKaKEKIKJKbK=K4KK%KOKfKxKKjKJKJKIKHKIK;KGK9K/K5K2KhKKK;K7KxKKIKIKDKfKKKK7K<K|KKKKKKyKHKHKGKDKtKKKKKKKKKbK6K[KKJKGKAKcKKKKKUK;KdKKKKKKKKTKEKEKAKgKKKKKKKKKKgK9KRKKXKBKCKKKKKKKYK:KDKSK^K_K]KUKOKGKAKBKAKAKAKCK,K(KNKKKLKIKEK@KFKLKRKKKKKKKKgKRKPKQKIK8K:K=KGKJKJKLKGKKKKKIKJKHKGKLKIKKKKKmK/K/K+K*KCKMKKKKK|K>KAK?K?K=KBKLKCK6K8K9K?KVKWKRKOKNKLKJKIKHK8K1K3K3K0K/K7K<K=K;K;K<K=KDKHKOKKKKKKMK9K=K;K8K8K:K9K9K9KEKKKGKKKKKKKGKKKHKJK/KKK K/K?KPKRKEKCKEK.KKKKKKZKDKAKBKCKCKCKBKBK@KAKAKAK?K9K8K8K8K=KBKDKCKDKBKCKCKDKCKDKCKBKBKAKCKDKBKBKCKBKAKBKBKAKAKAKAKAKAKAKAKAKBK@K>K@K@K@K@K@K@K@K@K@K?K>K?K>K?K>K=K>K>e]r��(K:K8K8K-K!KKKK.K@K6K7K3K,K'KK!K(K<K:K2K-K*K#KKK KK!K)K&KKKK#K!K&K)K*K+K+K/K2K+KK"K$K#K%KK4KBK:K9K:K9K:K8K8K@KBK@K>K:K?KDKCKEK>K#KKK KKKK-K:K;K>KHKOKOKMKPKQKPKPKPKPKPKPKPKQKPKPKPKPKRKSKSKSKRKRKRKRKSKSKSKRKRKRKSKSKSKSKSKQKSKTKTKKKDKBKGKLKLKNKKKYKtKKKKKKKAKYKKKKKKKrK$KEKuKOKMKDKpKKKcK$KNKKKKKKYKIKKKKKuKKKKKKKzK,K:K}KdKEKHKCK#KQK_K!K3K:K]KKoKQKZKSKIKHKKK7K0K$K0K,K<KyKKKAK5KrKKMKHKCK`KKKK@K9KsKKKKKKKJKHKHKDKiKKKKKKKKKkK7KPKKQKEKCKXKKKKKaK:KZKKKKKKKK_KCKFK@K\KKKKKKKKKKrK=KIKKeK?KEKDKKKKKK:K6K6KAKTKbKbK`KMKAKBKAKAKAKDK6K KIKLKLKKKGK@KCKNKHKKKKKKKK}KVKPKQKLK<K9K9KEKIKJKMKGKdKKK}KGKJKIKHKHKGKsKKKKK,K8K;KAKGKEK~KKKKK<K=KBKKKYK_KKKHK;K7K5KMKfKGKFKDK@K;K7KAKIKAK1K3K3K3K7K:K9K9K9K9K:K;K?KKKEKKKKKKjK7K>K=K<K:K9K:K:K9K@KJKEKnKKKKKKUKGKIK<K KKKKKK%K8KEKCKEK:KKK9KKKKoKZKLKAKBKDKDKCKBKAKAKBKBK?K@K<K5K7K7K;K=KAKBKCKDKCKCKCKCKDKCKAKCKDKAKBKDKCKAKAKAKAKAKAKAKAKAKAKAKAKBK@K>K>K>K>K>K>K@KBKBKBK?K>K?K>K>K?K?K?K?e]r��(K8K7K0K$KKKK'K?K6K8K3K,K'K!K K$K9K:K3K/K)K$KKK K KK'K%KKKK$K#K$K(K)K*K+K,K1K-K%K#K$K"K&KK K1KDK9K9K9K:K;K6K9KAKBK?K;K:K@KDKCKEK<K#KKKKKK&K7KBKGKDKDKEKJKRKPKOKQKPKPKPKPKPKPKPKPKQKPKQKRKSKSKSKRKRKRKSKRKOKPKSKRKRKSKSKSKSKRKSKRKRKTKRKSKKKDKDKHKMKNKLKMKaKmKGKGKBKYKKKKKKKuK'KBKuKPKNKGKlKKKgK%KKKKKKKK\KJKLKHKqKKKKKKKK0K3KzKjKEKIKGKKKKK.KbK|KkKXKeK]KNKKKHKKK<K(KK6K.KiKKKKFK2KkKKSKJKEKYKKKKIK8KmKKKKKKKLKFKFK@KaKKKKKKKKKyK<KDKK\KDKCKLKKKKKoK;KPKKKKKKKKiK?KFKAKQKKKKKKKKKKKCKAKKtK>KFK>KvKKKKKFK6K7K3K3K=KKK^KUKAKBKAKBKAKBK?K!KCKMKKKKKGKAKCKOKGK~KKKKKKKK`KQKQKMK?K7K7K?KIKHKIKJKQKKKKDK=K8K2K.KDK]KKKKKLKBKBKAKBKGKaKKKKKYKUKfKjKjKfKPKIK>K:K5KzKK6K0K0K-K/K,K;KIKDK6K1K2K3K7K:K:K9K9K:K:K9K;KHKGKbKKKKKYK9K?K?K?K<K;K<K;K9K=KHKGKNKKKKKKxKFKFK&KKKKKKKKK-KFKCKCK+KK[KKKKK~KuK_KKKEKCKAK>KBKCKBKAKAKBK=K+K5K>K:K7K6K;KAKCKDKCKCKCKCKCKCKCKDKCKCKBKBKAKBKCKBKAKAKAKAKAKAKAKAKBKAKAKAKAKAK@K>K?K?K?K?K>K?K?K>K>K>K>K=K=e]r��(K8K0K#KKKK%K>K;K6K6K/K*K"KK K5K=K3K1K+K#KKKK KK)K$KKKKK#K$K(K+K,K-K/K2K/K#K#K$K#K#KK KK;KBK9K9K9K:K;K6K=KCKAK?K:K=KDKDKCKEK;K$KKKKK K-K=KIKIKBK6K$KK4KKKRKOKPKPKPKPKPKPKPKPKPKPKQKRKRKRKRKRKRKRKSKRKQKRKSKRKRKRKRKRKRKRKTKRKRKRKSKRKSKQKJKBKDKIKKKMKJKKKHKGK@KXKKKKKKKvK(KAKuKQKMKHKlKKKkK%KHKKKKKK^KJKLKGKmKKKKKKKK5K0KxKoKEKHKGK"KKIKJKeK~KyK^KfKgKcKKKGKIKJK>K1KK;K|KKKKKOK,KaKKYKJKFKSKKKKSK2KeKKKKKKKSKDKEK?KYKKKKKKKKKKCK@KKgKBKFKGKKKKK{KAKHKKKKKKKKtKBKEKBKHKKKKKKKKKKKPK?KrKKDKEK<KfKKKKKWK8K<K6K4K2K3K6KBKBKBKAKAKAKAKCK%K7KNKKKKKGKDKBKMKJKbKKKKKKKKyKOKQKOKEK9K8K=KFKGKGKJKHKKKKAK*K)K1K5KFKOKKKKKiK:K>K<K;KHKMKKKKK}KVK_KYKTKRKNKIKCK;K<KKKAK;K6K4K:K9K<KDKEK:K0K2K3K5K8K:K:K:K:K:K:K9KCKIKKKKKKK:K<K@K>K>K=K=K=K;K9K:KDKJKDKKKKKKKLK0KKKKKKKKKKK?KDKEK;K!K}KKKK}KbKzKxKkK[KLKAK<KBKDKBKBKBK@K0K.K+KAK?K=K9K8K8K8K=KAKCKDKDKCKDKDKCKDKCKAKAKAKCKDKBKAKAKAKAKAKAKAKAKAKBKBKBKBKBKAK?K?K>K>K>K=K@K?K>K?K>K>K=K=e]r��(K1K%KKKKK8K>K8K7K/K(K"K K K2K<K2K0K,K%KKKK KK)K'K!KKK!K!K%K'K(K+K-K/K1K-K#K#K%K#K#K$KKKK>K=K8K:K9K9K9K6K>KBKAKAK=K<KCKCKDKEK:K&KKKKK%K,K1K.K"KKKKKK#K@KPKQKPKQKQKQKQKPKPKPKPKPKQKQKSKSKSKQKQKRKSKSKSKRKRKRKSKSKRKRKRKSKRKQKQKRKRKRKSKTKNKGKAKEKIKJKIKLKKKCKVKKKKKKKwK'K?KuKTKLKHKiKKKpK%KCKKKKKK`KGKLKHKkKKKKKKKK9K/KsKtKEKGKHK!K'KOKIKrKrK^KaKiKeKVKJKEKIKKKBK"KcKKKKKKKWK)KYKK^KEKGKOKKKK\K1K]KKKKKKK\KDKGKAKSKKKKKKKKKKHK;KwKrKBKEKBK|KKKKKFKEKzKKKKKKKKIKDKCKFKxKKKKKKKKKK^K@KdKKKKBK?KYKKKKKjK5K<K9K4K5K4K3KAKCKAKBK@KAKAKBK+K,KNKHKIKJKEK@KGKOKQKKKKKKKKKPKMKOKIK:K9K:K=K8K0K-K?KuKKKCK9K=KBK@KCKJK~KKKKK<KGKKKLKNKEKKKKKKMKLKGK?K6K7KHKLK9KWKKKfKYKNK7K:K:K8KBKHK@K0K2K2K3K7K:K:K:K:K:K9K:K?KHKGK~KKK}K,K:K?K<K=K=K=K=K;K:K8K>KHKFKYKKKKKKYKKKKKKKKKKKK1KEKDKAK4KKKKKKWKXKeKnKqKhKUKHKEKFKBKDKBKAK9K0K%K?KBKAK@K=K=K9K8K7K:K<KAKEKEKDKCKBKBKBKAKAKBKBKAKAKAKAKAKAKAKAKAKAKAKBK@K>KAKAKAK@K>K?K?K>K>K?K>K?K=K<K<K<e]r��(K)KKKKK5K>K6K6K-K)K$KK K2K>K4K0K-K%KKKKKK,K*K KKK"K"K!K(K,K,K.K.K2K/K$K%K$K%K"K"K)K-K(K'K>K>K8K:K9K9K9K5K>KBKAK@K<K<KBKCKDKEK9K%K!KKKKKKKKKKKKKKKK,KIKSKOKOKPKPKQKPKPKPKPKPKQKRKRKSKQKPKRKRKRKRKRKRKRKRKRKRKRKSKQKRKQKQKRKRKSKRKRKRKRKNKDKAKEKGKCKEKJKNKnKKKKKKzK)K;KuKTKKKEKfKKKtK'KAKKKKKKbKIKKKGKeKKKKKKKK=K.KpKvKFKHKIK3K"KAKdKeKOKIKTKQKiKQK6KCKLKCK{KKKKKKKKKbK,KRKKeKDKFKKKKKKfK1KTKKKKKKKgKCKJKDKJKKKKKKKKKKQK8KmK{KCKGKAKoKKKKKNKAKqKKKKKKKKMKBKDKBKlKKKKKKKKKKjK>KWKKYK@KAKHKKKKKyK<K?K<K7K5K6K4KHKDK?KBKAKAKBKBK5K#KJKIKHKJKEK@KCKMKHKKKKKKKKK^KIKNKLK?K8K<K.KK)K1KAK[KKKCK=K?K<K5K8KHKdKKKKKVKOKQKQKQKKKdKKKKKKK1K9KBKNKgKXKKK;K~KKKKMK[K4K;K9K5K@KIKEK4K2K3K4K7K:K:K:K:K:K:K:K;KEKJKZKKKUK+K4K=K=K<K=K<K<K;K:K8K8KCKHKDKKKKKK~KKKKKKKKKKKK KCKFKBKQKKKKKKoK\K]KZK\KeKiK_KPKJKIKCK@KAKDK8K6KAKBKBKAKAKAK?K>K;K9K7K:K=K?KAKDKBKAKAKAKBKBKAKAKAKAKAKAKAKAKAKAKAKAKBK@K?KAKAKBKAK?K?K>K?K?K>K>K?K>K=K=K=e]r��(K"KKKK/K=K1K4K.K(K$K KK,K=K3K0K.K)KKKKKK)K)K%KKKK$K%K(K)K,K/K0K4K.K#K$K'K#K$K*K-K-K,K'K)KEK@K9K:K9K:K9K5K?KAKBK?K:K=KAKCKCKEK7K#K"KKK K K K K KKKKKKKKKKK9KOKQKLKNKQKPKQKPKPKPKPKPKQKSKQKPKRKSKSKSKRKSKSKQKQKSKSKSKOKQKSKRKRKRKRKRKRKRKRKTKSKNKDKAK5K?KKKMKKKXKsKKKKK+K6KtKSKJKGKcKKKxK(K>KKKKKKeKGKIKFKeKKKKKKKKBK+KiK{KIKIKFKbK5KPK\KEK;K<KXKUKKKBK$KCKOKCK|KKKKKKKKKkK/KIKKmKDKJKFK}KKKpK1KMKKKKKKKmKCKHKCKFK}KKKKKKKKK^K7KaKKJKHKBKfKKKKKXK=KfKKKKKKKKSKAKDK>K^KKKKKKKKKKuKDKPKKhK>KCK?KKKKKKIK?K=K:K9K8K9KdKTK?KBKAKAKAKBK<K!KBKJKHKIKGKBKCKKKDKyKKKKKKKKxKHKKKNKBK8K;K8K1K=K?KDKKKKKBK<KBKDKAKEKMKPKKKKKlKNKRKQKJKKKQKKKKKKoKKKKKvKGKPKKKKKTK[K<K9K:K9K<KHKHK;K2K3K4K7K:K:K9K:K:K=K=K<KAKIKFKKK:K1K4K9K=K=K=K<K9K:K:K:K9K?KIKAKjKKKKKK-KKKKKKKKKKKK6KEKDKPKKKKKKKCKWKbK_KZKWKYK`KWKUKOKKKHKFKBKAKAKBKAKAKBKBKBK?K>K>K>K<K8K8K7K;K@KCKBKBKAKAKAKBKAKAKAKAKAKAKAKAKAKAKAKBKBKBKBKBKBKBK@K>KAKAK>K>K>K?K?K?K>e]r��(KKKK)K=K1K,K0K+K$KKK'K;K2K*K*K'K!KK K KK(K)K"KKKK"K$K&K,K+K+K/K5K2K%K#K%K%K%K!KK"K!K KK.KHK=K9K:K9K:K8K6K?KAKAK?K:K<KBKCKDKEK5K%K"KKKKK K KKK +KKKKKKKKKK%KCKPKOKOKQKPKPKPKPKQKQKRKSKRKQKRKRKRKRKSKRKQKQKQKRKRKRKQKPKRKSKRKRKRKRKRKSKRKPKTKSKQKHK<KBKIKKKMKKKLK\KyKKK+K6KsKTKJKIK`KKK{K(K9K}KKKKKjKGKIKFKdKKKKKKKKHK(KcKKJKIKHKTK?K8K2K0K>K0KDKJKBK-K,KJKLKHKiKKKKKKKKKvK4KDKKvKDKIKEKwKKKxK4KFKKKKKKKuKEKHKFKDKvKKKKKKKKKiK8KVKKNKFKCKZKKKKKeK;KYKKKKKKKK`K@KEK>KRKKKKKKKKKKKKKHKKvK=KDK=KrKKKKKUK@KCK@K=K:K@KgK`KAKAKAKBKAK@KAK$K8KKKHKIKHKDKAKIKIKaKKKKKKKKKIKMKNKCK9K9K<K=K<K>KBKEKzKKCK<KNKXKUKMKSKJKKKKKK:K8K7K8KIKJKKKKKKKKKKKKOKSKKKKKcKPKLK6K:K:K:KEKIK@K3K1K4K6K8K;K;K:K:K=K=K<K>KGKAKrKK.K2K0K6K=K=K=K<K;K:K:K:K9K9KFKHKMKKKKKK_KKKKKKKKKKKK%KEKGKAKKKKKKK>K-K@KQK^KcK]KVKWK[KYKVKPKKKGKAKCKAKBKBKAKAKAK@K@KAKAK?K?K=K8K7K5K9K=K@KCKDKBKAKAKAKBKAKAKAKAKAKAKAKBKAK@K@K@K@K@K@K?K>KAKAK>K>K>K>K>K?K>e]r��(KKK'K<K1K,K.K+K&K KK#K7K/K&K'K#KKKK +KK%K'K!KK KK K%K)K*K+K.K-K1K4K'K#K'K%K'K!KK K KKKK,KIK:K9K:K9K:K3K0K@KBKAK>K;K2KAKDKDKEK5K%K"KKKKKK KKKKK +KKKKKKKKKK1KIKOKNKLKPKQKPKQKSKSKSKSKSKQKPKPKQKSKQKPKQKPKPKPKPKPKPKRKSKRKRKRKRKRKSKRKOKRKSKQKQKOKEK@KBKHKLKKKIKOKeKrK,K3KrKWKHKFK]KKK}K)K3KzKKKKKmKIKIKGK]KKKKKKKKNK&K`KKKKHKLK6K%K*K(K&K0K9K*K/K)K!KaK_KIKJK[KKKKKKKKK{K3K@KK|KFKJKDKqKKKK:K@KyKKKKKKKGKFKGKCKnKKKKKKKKKvK:KLKKWKDKEKQKKKKKoK?KQKKKKKKKKnK@KEK@KKKKKKKKKKKKKQKBKsKKDKDK<KaKKKKKdK>KEKCK@K=K@KNKXKDKAKBKAKAK?KAK+K.KLKHKHKIKFK?KGKLKPKKKKKKKKKPKIKOKJK:K9K;K@KCKKKRKLK_KKLK:KNK^K\KQKSKKKmKKKKKJK\KoKKKOKiKKKKKKKKKKKbKEKKKKKK1KJK9K:K9K:K@KGKGK8K3K5K4K7K<K=K=K=K=K=K=K<KBKGKKKBK/K0K.K3K<K=K=K=K=K;K9K:K9K4KAKKKCKKKKKKKKKKKKKKKKKKK?KIKCKZKKKKKKmKK&K'K4KHKVKaK^KWKSKUKXKVKQKCK<K?KBKBKCKBKAKBKBKBKAK>K@K?K<K=K<K7K5K5K;K@KAKBKBKAKAKAKAKAKAKBKAKAKBK?K>K>K>K>K>K>K>K>KAKAK>K>K>K>K>K>K?e]r��(KK!K;K3K(K,K*K%KKK!K3K+KKKKKKKK K"K)K"KK KKK#K&K+K*K+K.K4K5K(K$K%K&K&KKK KKKKKK0KIK=K9K:K9K;K.K)KAKAKBK=K8K+K@KDKCKCK2K'K$K KKKKK K K +K K K K KKKKKKKKKKK;KNKPKMKNKNKNKPKRKQKQKQKQKQKRKRKSKQKPKPKQKRKRKRKRKRKRKSKRKRKRKRKRKRKSKRKQKQKQKQKQKSKNKEKAKEKGKKKLKIKNK>K:KtKYKGKFKZKKKK.K1KvKKKKKpKHKJKGK\KKKKKKKKRK$K\KKMKGKJK6KK&K(K&K!K%K/K,K"K!KPK[KJKLKTKKKKKKKKKK6K:K|KKHKJKDKiKKKKBK9KsKKKKKKKJKFKFKAKcKKKKKKKKKK?KEKKbKBKFKJKKKKK|K@KHKKKKKKKKyKCKGKCKEK|KKKKKKKKKK^KAKhKKMKBK?KRKKKKKrK@KFKEKCK?K>K@KGKCKAKBKAKAKAKCK5K$KKKHKHKIKFK?KDKKKGKKKKKKKKKVKEKLKLK<K8K9KDKWKZK\KVKOKK\K;KEKQKIK5K1KFKVKKKKKKKKKKfKPKKKKKKKKKKKqKBKuKKKKKAK'K>K<K<K;K;KFKIK=K3K5K4K4K:K>K>K>K=K=K>K;K?KHKGK9K.K0K0K/K9K>K<K=K=K<K<K:K9K5K<KHKEKZKKKKKKFKKKKKKKKKKKK-KGKDKBKKKKKKK1K#KKK$K-K>KOK^K\KUKOKQKTKQKAK?K:K8K;KAKAKAKAKAKAKAK@K?K>K?K?K?K<K7K6K7K:K<K?KBKBKCKBKAKBK@KAKAKBKAKAKAK@K?K?K>K@KAKAKAK>K>K>K>K>K>K>e]r��(KK6K6K%K&K'K&K KKK/K.KKKKKK K K +KK)K"KKKKK!K&K*K*K+K/K1K3K)K&K&K%K&K#KKKKKK KKK8KEK:K8K:K9K<K-K(KAK@KBK=K7K,KAKDKCKBK0K(K$K#KKKKK KK K +K K K K K +K KKKKKKKKK&KCKQKMKMKMKOKQKPKPKPKPKQKSKRKSKQKPKPKQKSKSKSKSKSKRKRKRKRKRKRKRKRKRKSKRKPKPKPKPKQKRKRKMKDKAKDKIKJKKKJKHK^KOKGKDKXKKKK0K0KuKKKKKrKIKKKFKYKKKKKKKKZK&KUKKOKFKIK9KKKK%K K K*K*K#K9K=KEKLKKKRKKKKKKKKKK?K9KuKKLKIKEKbKKKKGK7KlKKKKKKKRKEKFK>KXKKKKKKKKKKFKCK|KoKBKHKEK}KKKKKGKDKwKKKKKKKKHKGKDK@KqKKKKKKKKKKlKBK[KKZK?KCKEKKKKKKFKFKGKBK@K?K?KCKAKBKBKBKBKAKCK<KKEKKKHKIKFKAKAKKKGKvKKKKKKKvKLKHKJKLKBK7K8K?K[KaK_KZKJKvKqK?K;K<K<KSKZKEKIKKKKKKKKKK{KHKKKKKKKKKKKKOKUKKKKKuKK9K?K=K=K:KCKHKBK2K5K4K;KMK?K>K>K=K<K5K<K>KEKGKAK0K/K0K/K3K=K=K=K=K=K=K:K9K8K7KCKHKCKKKKKKKKKKKKKKKKKKKBKHKDKjKKKKKKaKK#KKKK!K)K9KLK[K_KYKOKGKBKCKAK<K)KK0K@KDKCKBKCKBKCK@K>K>K?K=K;K;K;K:K5K2K7K>K?KBKCKBK?KAKAKAKBKBKBKAK>K?K>K@KBKAKAK>K>K>K>K>K>K>e]r��(K0K9K K!K#K"KKKK.K0KKKKKK KK KK,K%KKKK!K#K$K'K*K+K.K0K4K)K$K'K%K&K"KKKKKKKKKK;KEK:K:K:K9K<K+K*K@K@K?K>K6K/KBKDKCK?K/K)K&K%KKKKKK K K +K K K KK K KKKKKKKKKK%K<KLKPKLKMKPKQKPKQKRKRKQKQKQKQKPKPKQKQKQKPKRKSKQKQKRKRKRKSKSKQKQKSKRKPKQKPKQKQKQKQKQKRKHK@K@KDKJKJKIKGKHKEK@KVKKKK2K,KpKKKKKuKIKMKHKTKKKKKKKK`K#KQKxKKKGKKK:KKKK"K KK#K(K%KKK?KJKLKOKKKKKKKKKKFK5KjKKQKHKBKYKKKKOK4KfKKKKKKKZKDKGKCKTKKKKKKKKKKNK=KsKzKDKHKAKsKKKKKNK@KqKKKKKKKKPKBKDK=KdKKKKKKKKKKwKCKPKKlK@KEK@K~KKKKKQKEKFKBK@KJKYKMKEK@KAKBKAKAKAK@K K:KJKHKIKFKCK?KHKIK_KKKKKKrKAK@KIKHKMKEK8K9K<KOKOK=K:KCKgKKDK9KnKKKKUKHKwKKKKKKKKKKOKrKKKKKKKKKKKbKGKKKKKK0K0K@K>K>K<KBKGKEK7K6K3KWKKBKAKBKBK?K6K<K=KCKHKEK4K+K-K0K/K:K?K>K>K>K?K;K;K=K9K@KIKCKmKKKKKK0KKKKKKKKKKKK7KIKFKJKKKKKKK$K"K KKKKKK!K3KFKZKaKZKNK@KAKCK9KKKK(K;KAK>KBKBKBKAKAKAK=K?K>K<K=K;K:K7K5K6K9K?KAKDKBKAKAKAKBKBKAK>K>K>K@KBK?K?K>K>K>K>K>K>K>e]r��(K:K KKKKKKK(K2KKKKKK +K KKK,K(KKKK!K$K%K%K'K+K.K2K0K(K%K&K%K&K#KKKKKKKKKKKAKEK:K:K:K9K<K(K.K@K@K>K>K6K0KCKDKCK?K/K)K'K$K"KKKKK K K +K +K +K K K K K K K K KKKK K%KKK K@KOKMKOKQKPKQKRKRKPKPKPKPKPKPKPKPKQKQKRKRKQKQKRKRKSKRKRKQKQKRKRKPKQKQKPKPKQKPKOKQKRKLKEK@KBKGKGKIKKKJKEKSKKKK4K*KlKKKKKzKJKLKHKRKKKKKKKKcK"KJKPK@KIKJK:KK!KK K#K(K)K"K%KKKzKHKMKJKKKKKKKKKKKK2KcKKWKFKCKTKKKKZK4K_KKKKKKK_KCKGKCKOKKKKKKKKKKYK9KfKKGKHK@KgKKKKKXK@KfKKKKKKKKZKAKCK=KXKKKKKKKKKKKHKIKK|K@KCK@KoKKKKKaKCKGKBKAKVK]KLKIKAKAKBKAKAKAKDK(K/KKKHKIKFKCK?KFKJKNKKKKKvKAK;K=KFKEKIKIK<K9K:K4K7KXKsK`KRKKSK7KdKKKKdKEK]KKKKKKKKKK_KTKKKKKKKKKKKtKCKvKKKKKDK:K<K?K?KAKAKFKIK?K7K2KyKK]K=KBKBKBK@KAK@KDKGKGK<K.K.K/K0K9K?K?K?K?K?K>K>K=K<K:KDKHKNKKKKKKWKKKKKKKKKKKK%KEKHKBKyKKKKKKMKK"KKKKKKKKK(K=KPKYKDK@KAKBK.KKKK@KKjKDK@KAKBKBKBK>K@K?K=K=K>K>K=K:K9K5K4K7K:K?KAKBKAK@KAK@K>K?K?KAKBK?K>K?K>K>K>K>K>K>e]r��(K'KKKKKKK!K-KKKKK K KKKK(K&KKKK"K#K&K)K)K)K-K/K/K+K%K&K&K&K#KKKKKKKKKKK"KBKCK8K:K9K9K;K%K2K@K?K>K<K3K2KEKCKDK@K.K)K'K#K"KKKKK K K KKKKKKK +K +K +K KK K K!KKKKKK.KJKSKQKPKPKPKQKPKPKQKPKPKPKQKPKQKSKQKPKPKQKRKRKSKQKPKPKPKPKPKPKPKPKRKRKPKPKOKPKPKPKQKQKFK@KAKFKDK?KHKJK[K{KK8K(KiKKKKK{KIKJKGKOKKKKKKKKjK#KAK4K<KKKGK?KKKK!K0K*K%K-K!KIKKKLKLKFKqKKKKKKKKKUK.K^KK]KFKDKOKKKKeK3KWKKKKKKKiKCKGKCKHKKKKKKKKKKdK8K[KKLKGKAK[KKKKKfK?K\KKKKKKKKeKBKDK=KNKKKKKKKKKKKQKCKuKKFK?K?KaKKKKKoKDKFKDKJK]KUK=K<KDKAKBKAKBKAKEK2K&KIKHKHKEKCK?KDKKKEKKKKKFK<K=K<KCKEKGKLK>K9K:K7KDKKKyKIK}KcK8KQKKKKwKIKMKKKKKKKKKKvKHKKKKKKKKKKKKMKYKKKKKeK,KBK@KAKBK@KAKHKBK7K<KKKK>KBKDKCKBKBKBKBKEKGKEK2K-K-K0K5K>K@KBK@K>K>K?K=K:K:KBKEKDKKKKKKPKKKKKKKKKKKKK;KHKCKTKKKKKKK"K!KKKKKKKKKKK%K4K@KAKAKCK<KKKK]KKKpKTKGKAK@KAKBKBKBK?K>K?K>K?K=K;K<K8K6K3K5K7K;K?KAK@K>K>K@KBKBKAK?K>K?K>K>K>K>K>K>e]r��(KKKKKKKK/KKKKK K KKKK&K'K!KKK"K!K%K'K)K)K+K1K2K(K%K'K%K'K"KKKKKKKKKKKKKEKCK8K:K;K:K;K#K4KAK@K>K;K1K4KEKCKDK@K-K(K%K#K$K KKKK K K K KK K K +K K K K KKKK K"KKKKKKKK;KPKRKQKQKPKRKQKPKPKPKPKPKPKQKRKQKPKQKRKRKRKSKQKPKQKQKPKQKQKQKQKSKTKQKQKRKQKPKQKRKRKQKLKDKAK7K7KGKJKHKNK`K;K'KgKKKKKKHKJKFKMKKKKKKKKqK&K=K)K;KMKFKCKKKK-K,K!K(K'K!K9KKKRKHKIKHKKKKKKKKKaK-KVKKdKEKGKJKKKKnK4KPKKKKKKKuKDKFKCKDKxKKKKKKKKKoK:KSKKWKCKCKRKKKKKrK?KUKKKKKKKKqKAKEK>KDKKKKKKKKKKK]K@KeKKPKAK@KOKKKKKKHKDKDKTK^KZKHKLKMK@KBKBKAK@K@K;K KDKIKHKGKDK@KAKIKEKjKKpKGK<K=K=K;K@KBKDKLKCK9K8K:K=KsKKKOKmKuK@K@KKKKKNKHKKKKKKKKKKKKKuKKKKKKKKKKKbKGKKKKKK!KBKEKBKCKBKBKHKJK8KUKKKKKKDKEKDKBKCKBKCKDKFKHK8K-K/K/K0K=KBKBKAK@K@K@K>K<K<K=KDKEK\KKKKK<KKKKKKKKKKKKK-KEKGKCKKKKKKKCKKKKKKKKKKKKKK"K@KAKAKBK+KKK~KKKKnKfKUKIKBK@KAKAK@K@K?K>K>K>K?K>K=K=K;K8K5K4K5K9K<K>K?K@K@KAKAK?K>K>K>K>K>K>K>K>e]r��(KKKKKKK)KKKKK KKKKK#K%K KKKK!K#K'K)K*K*K0K1K'K#K'K&K&K K KK K K K KKKKKKK KHKCK8K:K9K:K:K"K6KBKBK>K=K1K6KEKCKCK=K*K&K#K$K$K#KKKK K K K K KKKKKKKKKKKK#KKKKKKKKK<KNKRKRKNKPKPKQKPKPKPKPKPKPKPKPKPKRKSKRKRKSKQKPKRKRKPKRKSKRKSKRKRKSKRKPKPKPKPKPKPKPKQKRKLKAK=K@KHKLKLKGKHK@KcKKKKKKJKJKFKHKKKKKKKKxK'K<K3K6KKKFKDK!KKKK(K%KK,KtKK~KKaKDKIKJK[KKKKKKKKhK.KNKKlKDKIKEKKKKxK7KGKKKKKKK|KEKEKDKCKpKKKKKKKKKzK>KKKKbKAKEKJKKKKK}KCKNKKKKKKKK}KCKFK@K@KwKKKKKKKKKKmKCKXKK^K>KBKBKKKKKKSKHKGK\K^K]K\K^KPKAKBKBK@K>K>K@K"K<KKKHKIKFKBK>KFKHKKKPKAK:K=K<K9K8K<KBKDKIKFK9K7K;K:KaKKK^K[KKJK8KmKKKK[KGKeKKKKKKKKKK[KYKKKKKKKKKKKwKCKwKKKKK<K'KFKDKCKDKDKHKJK>KzKKKKeKAKHKFKDKDKDKDKDKGKGK?K/K0K0K/K9KBKBKBKBKBKBK@K?K:K&K=KHKEKKKKK.K"KKKKKKKKKKKKK>KFKBK_KKKKKK|KKKKKKKKKKKKKKK.KAKAKCK:K"K)KKKKKvKqKlKbKWKLKDKBKAKAK?K>K?K?K>K?K?K=K<K<K.K/K9K6K5K5K<K?K?KAKBK?K>K?K?K>K>K>K>K>e]r��(K K K KKK$KK K KK K KKK +KK!KKKKK K$K(K)K*K,K-K0K'K"K%K&K%K&KKKK!K"K!K"K%K%K&K(K(K%K.KIKBK9K9K9K8K7K!K9KBKAK>K<K1K7KFKCKBK:K*K'K$K"K"K!KKKK K K K K K K +KKKKKKKKKK KKK#K#K'K5K<KEKHKGKIKQKOKMKNKQKPKPKPKPKPKPKQKPKPKRKSKRKSKQKPKQKRKRKRKRKSKRKSKSKRKSKRKPKQKPKPKQKQKPKPKQKQKQKGKAKCKHKIKJKKKKKOKkKKKKKJKIKGKIK~KKKKKKK~K(K9K`KHKGKGKEKKK"KK,K&K<KKKK}KKcKFKLKJKAKKKKKKKKrK0KEKKsKDKIKBKyKKKK;KAK}KKKKKKKHKEKEK@KgKKKKKKKKKKCK@KKlK>KEKBKKKKKKHKFK{KKKKKKKKIKDKAK?KlKKKKKKKKKKzKEKNKKnK;KBK>K|KKKKK^KGKCKLKYK^K_K_KNKAK@KBKAKAK>KAK&K0KLKHKHKFKBK>KBKHKGK=K;K<K<K<K8K8K;KAKBKEKGK9K8K:K9KRKKKtKPKKZK8KXKKKKoKFKQKKKKKKKKKKqKGKKKKKKKKKKKKLKYKKKKKK$K@KGKFKDKCKGKIKHKKKKKK?KHKFKCKDKDKDKCKDKFKCK5K0K0K/K3KAKBKBKBKBKBK?K?K1KK0KHKAKjKKKfK(K+KKKKKKKKKKKKK/KFKCKEKKKKKKK7KKKKKKKKKKKKKKK=KBKBKCK*K9KKKKKKXKgKiKgK`KXKOKGKBK>K>K=K=K?K?K>K>K>K6K1K"K1K=K8K6K4K5K6K9K?KAK>K=K=K>K>K?K?K>e]r��(K K K KK KKKK +KKKKKKKKKKK!K!K#K'K)K)K+K.K.K'K#K(K&K&K#K'K+K-K+K+K-K)K(K*K*K)K*K'K"K5KKKAK9K:K:K9K5KK<KBKAK>K;K1K:KCKCKBK9K*K'K%K!K K KKKK K K K K +K +K +K K K K KKKKKK"K9KBKLKMKUKWKTKUKTKRKIKMKOKMKOKPKPKPKPKPKPKPKPKPKPKRKSKRKSKPKOKQKRKRKSKRKRKRKRKRKRKSKRKPKQKPKPKPKPKQKQKPKPKRKRKOKHKAKAKEKGKHKKKGKSKqKKKMKHKHKHKzKKKKKKKK+K1KyKiKCKGKHK6K;K/K*K*K,KKKKKKK^KJKKKRKSKNKKKKKKKzK4KBKK{KEKJKAKqKKKKAK>KuKKKKKKKOKFKEK?K\KKKKKKKKKKKK>KvKxK@KEK@KuKKKKKQKBKnKKKKKKKKRKAKDK<K]KKKKKKKKKKKLKHKK~K=KBK<KjKKKKKhK;KBK<K>KIKVK\K8K=KCKAKBKBK?KAK0K&KLKIKIKFKBK=KAKGKHK@K;K=K=K<K:K8K7K@KAKCKKK=K7K:K:KDKKKKLKzKrK?KHKKKKKKKHKKKKKKKKKKKLK|KKKKKKKKKKK`KGKKKKKK@K?KGKGKEKCKGKJKIKKKKKKOKEKFKDKDKDKDKCKBKFKGK:K/K0K0K3K=KBKAKBKBKBK@K<K KKKDKGKKKKKHK*K.KKKKKKKKKKKKKKBKEKAKmKKKKKKkKKKKKKKKKKKKKKK0KDK@KDK6KRKKKKKKdKVKWK^KdKcK\KXKQKIKDK@K<K>K>K?K>K@K1K+K)K&K<K;K;K9K7K4K2K4K8K<KAK>K?K>K>K>K>e]r��(K K KKKKKKKKKKKKKKKKKKK"K)K'K&K*K.K/K%K$K'K'K'K'K#K"K K K$K#K$K#K#K$K$K%K#K!KK5KLK>K9K:K:K;K3K!K>K?K@K?K;K.K=KEKCKDK9K+K*K&KKKK KKKKK K K +KKK K +K +K K K K K KKAKSKUKXKYKZK[K[KXK[KXKVKWKTKLKIKNKPKPKQKQKPKPKPKPKPKRKRKQKQKPKPKPKQKQKQKQKQKQKQKQKQKQKQKPKPKPKQKPKOKRKSKQKPKQKQKQKQKNKFKAKAKDKHKGKGKHKZKnKLKGKHKDKuKKKKKKKK0K.KxKtKFKIKCKtKK6K)K/K/KKKKKKKXKJKGKdKKZKKKKKKKK8K?KKKFKKKAKjKKKKGK:KnKKKKKKKUKFKEK@KVKKKKKKKKKKUK?KmKKDKEK?KlKKKKK]K@KdKKKKKKKK\KCKGK<KRKKKKKKKKKKKTKCKsKKFKBK<K\KKKKKyKBKGKHKDK>K=K:K/K>K@K>K@KBKAKCK;K!KEKIKJKHKDKAKEKHKHKCK:K:K:K:K:K8K7K=KBKAKIKAK8K:K9K;KqKKKUKeKKHK<K|KKKKWKEKnKKKKKKKKKKYK_KKKKKKKKKKKwKCKyKKKKKQKBKGKFKFKFKGKLKDKxKKKKKpK?KEKDKDKDKDKDKCKDKHK@K0K/K0K1K8KCKBKAKAKAKBK4KKKK9KHK>KKK1K-K-K&K(K2K-K KKKKKKKKK3KFKDKMKKKKKKK&KKKKKKKKKKKKKKK@KCKEK<KpKKKKKKK[K^KWKSKWK\K^K\KZKVKRKIKCK@K<K>K@K=K5K*K*K=K=K<K<K<K:K:K6K5K5K6K9K<K>K?K?K>e]r��(KK KKK KKKKKKKKKKKK KKK K#K)K(K)K,K.K%K K&K%K%K!KKKKKKKKKKKKKKKKK5KIK:K9K9K9K;K2K#K>K>K?K>K;K-K=KEKCKDK8K+K*K&K KKKKKKKK K K +KKK +K K K K K KK KKIKXK[K\K[K]K_K_K_KcK_KVKMK<K*K!K7KOKSKOKOKQKQKQKPKPKRKRKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKQKPKOKRKRKQKPKQKQKQKPKMKNKLKCK=KBKFKIKJKHKHKFKDKCKAKsKKKKKKKK3K,KuKyKEKLKDKwKK@K,K2K+K\KKKKKKeKIKIK\KKK}KKKKKKK;K8KyKKKKGKBKcKKKKQK6KgKKKKKKK[KDKEK@KNKKKKKKKKKK^K;KaKKKKDK?K^KKKKKgK=KZKKKKKKKKjKBKGK?KJKKKKKKKKKKK_KBKdKKPK?K?KIKKKKKKJKGKHKIKHKFK5K7KAK=K?K@KBKAKAK?KK>KKKFKHKEK@K<KCKIKDK;K9K9K9K:K9K7K<KBKAKHKGK9K9K:K:K]KKKlKQKKYK8KeKKKKjKFKWKKKKKKKKKKnKKKKKKKxKKKKKKKNK[KKKKKhK#KHKGKGKGKGKJKFKXKKKKKKCKFKDKDKDKDKDKDKEKGKGK7K/K0K0K5KBKEK?KAKBKAK*KKKK(KFKAKXKkK+K.K-K-KAKIKHK@K+K"KKKKKKK$KEKFK@K|KKKKKKUKKKKKKKKKKKKKKK2KCKDK?KfKKKKKKKFKKKZK\KZKRKRKWK\K]K\KXKTKNKCK?K=K=K=K4K5K>K=K=K=K;K9K;K:K9K8K3K2K1K5K:K>K?e]r��(K KKKKKKKKKKKKKKK KKK!K%K&K(K)K,K0K'K!K'K&K'K KK K KKKKKKKKKKKKKKK<KHK:K9K9K8K<K/K%K?K>K>K<K8K.K>KDKCKEK8K,K*K&K!K!K KKKKK K K K K K +K +K K K K KKKKKKKYKZKZK\K`KbK`KVKGK6K&KKKKKK(KDKOKOKPKPKPKPKPKPKPKPKQKQKQKQKPKPKPKQKPKPKPKPKPKPKPKPKPKPKPKPKQKPKPKPKPKPKPKQKPKMKKKNKPKKKDK@KDKJKJKIKIKIKGKCKsKKKKKKKK7K)KnK~KEKJKDKpKKQK+K1K2KDKKKKKK{KHKIKXKKKKKKKKKKAK5KpKKLKEKFKZKKKKYK2K_KKKKKKKdKCKEK@KIKKKKKKKKKKjK;KXKKSKBKAKRKKKKKrK?KRKKKKKKKKvKAKHK?KAK{KKKKKKKKKKlKCKWKK_K>KBKBKKKKKKVKDKIKHKHKGKFKEKBK?KBKAKBK?K>K@K"K2KKKHKHKEKAK9KCKIKFK=K7K:K:K:K9K7K:KAKCKEKIK<K7K9K=KOKKKKJKKpK<KOKKKK}KHKKKKKKKKKKKKKGKKKKKKKKKKKpKUKJKKKKKKNKBKHKGKGKFKGKIKFKKKKKKVKEKHKEKCKDKDKCKEKFKHK?K1K3K1K0K9KBK@K@KBKBK,KKKK#KBKGKFK7K.K.K.K,K7KFKGKFKCK7KKKKKKKK:KEKBKSKKKKKKKKKKKKKKKKKKKK +KKAKBKCKHKKKKKKK]KK5KEKTKZK\KVKQKTKZK\K\KZKUKQKHK?K>KBK>K>K>K?K?K=K;K;K;K;K;K9K8K6K4K3K5K7e]r��(KKK KKKKKKKK KKKKKKKK$K'K'K'K*K0K*K$K'K'K%K"KKKKKKKKKKKKKKKKKKKAKHK:K9K:K;K<K*K'K?K>K>K<K7K/K>KCKBKDK6K+K*K&KK KKK KKK K K K K K +K +K K K KKKKKKQK[K]K]KUKEK3K&KKKKKKKKKKKK2KLKSKQKQKQKPKPKPKPKPKOKNKPKPKQKQKPKQKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKQKQKMKOKQKRKRKJKAK?KEKHKEK?KGKGK\KKKKKKKK<K#KjKKHKJKCKiKKcK*K3K3K7KKKKKKyKIKLKRKKKKKKKKKKKK3KgKKRKEKEKRKKKKbK3KVKKKKKKKnKBKHKDKFKyKKKKKKKKKvK>KNKK\KAKCKIKKKKKKDKKKKKKKKKKKEKGKCK@KpKKKKKKKKKKzKGKLKKqK=KCK:KyKKKKKcK<KGKIKDKFKGKEKCKAKCKBKAK?K>K@K7K8KIKHKHKEKBK>KBKHKFK?K8K9K9K:K9K7K:KAKEKCKHK=K6K7K;KEKKKKNKsKKGK>KKKKKPKGKxKKKKKKKKKKTKaKKKKKwKKKKKKrKGKyKKKKK>KKGKFKGKFKGKJKBKsKKKKKxKAKHKEKCKBKDKEKEKFKHKEK4K2K5K2KGKMKAKCK@KAK8K(K+K3K8K?KGKFK=K.K-K.K-K1KEKGKDKDKFK*KKKKKKK*KDKEKBKKKKKKK@KKKKKKKKKKK KKKK3KDKBK?KkKKKKKKKK +KK(K9KKKZK]KVKRKSKWK^K`K^K[KTKMKBK@KAK@K>K>K>K?K?K=K>K>K<K;K:K8K7K9K5e]r��(KK KKKKKKK KKKKKKKKK!K$K)K)K)K/K+K%K&K%K&K"KKKKKKKKKKKKKKKKKKKKEKHK9K9K9K9K;K'K+K@K>K>K<K7K.K?KCKAKCK4K+K*K(KKKK!K!KKKK K K K K +K K K K K K K K KKJKCK0K$KKK KKKKKKKKKKKKKK$K>KRKQKPKPKQKQKQKPKMKKKNKPKQKQKNKQKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKQKQKPKPKPKQKQKNKHK>K@K:K4KEKIKGKPKjKKKKKKCK"KdKKHKJKBKbKKwK3K5K2K6KKKKKKKGKPKRKKKKKKKKKKSK0K_KKZKCKCKNKKKKlK2KOKKKKKKKxKBKHKFKBKrKKKKKKKKKKBKEKKgK?KDKCKKKKKKLKEKwKKKKKKKKMKEKEK>KcKKKKKKKKKKKKKIKKK@KDK6KdKKKKKvKBKJKuKeKNKHKFKHKDKAKAKBK?K>K>KAK@KGKIKHKEKBK>K;KEKGKCK7K7K8K:K9K7K8K>KDKCKGKBK8K8K;KAKoKKK\K_KKSK8KpKKKKdKDK[KKKKKKKKKKfKLKKKKKKKKKKKKMKYKKKKKdKK=KHKFKFKGKIKFKSKKKKKKEKGKEKCKEKDKCKFKGKEKHK:K0K5K.K}K|K>KEKDKAKBKCK@K>K=K<KCKCKCK1K-K.K-K+K<KDKEKGKFK7KKKKKKK"KAKFKCKaKKKKKK|KKKKKKKKK KKKKKKKEKDKCKJKKKKKKKPKKKKKK3KEKXK^KYKRKRKWK^K_K`K^KJK8K;K?K@K?K?K>K=K<K>K>K=K;K9K:K:K9K:e]r��(K KKKKKKKK KKKK KKKK!K#K%K'K)K.K'K#K'K%K&K"KKKKKKKKKKKKKKKKKKKKKGKEK8K9K:K:K;K$K.KAK>K>K<K4K0KBKBKCKCK2K*K+K%KKKKKKKKKKK K K KKKKK +K KK +K KK KKK K K KK K K K KKKKKKKKKKK+KEKQKPKNKNKOKNKMKNKPKPKQKQKPKQKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKQKPKQKPKPKQKQKPKPKRKPKDK<K<KCKGKHKGKEKRKpKKKKGK K^KKJKGKBK_KKK@K0K1K2KKKKKKKHKOKMKKKKKKKKKK_K-KWKKbKCKEKIKKKKvK2KIKKKKKKKKGKDKDKBKhKKKKKKKKKKGK>K}KuKAKEK@KyKKKKKSK@KkKKKKKKKKUKEKEK<KVKKKKKKKKKKKSKCKsKKEKAK;KSKKKKKKKKHKKKKlKPKFKCK>K>K@K?K>K>K@K=KCKIKGKFKDK?K;KEKGKDK7K7K8K8K8K8K7K=KCKDKHKGK:K9K:K>K^KKKnKOKKfK9KZKKKKyKHKMKKKKKKKKKKMKGKKKKKKKKKKKKXKJKKKKKKK/KJKFKFKGKIKJKGKKKKKKZKCKGKEKFK=K8KDKGKEKHKAK3K4K7KKKBKEKGKCKAK@K=K;K<K:KAKDKEK:K*K-K.K+K2KEKGKFKGK>K&KKKKKKK1KBKBKEKKKKKKK2KKKKKK KKKKKKKKK8KDKCK?KrKKKKKKKKK KKK +KK.K@KRKZK]KVKRKTKWK\KZKBK:K8K8K;K@K?K=K=K>K>K=K<K;K<K=K;K<e]r��(KKKKK K KKKKKK KKKK K$K%K&K)K/K)K"K#K%K'K$KKKKKKKKKKKKKKKKKKKKK!KJKDK7K:K:K9K;K$K/K@K>K>K<K4K0KCKBKDKCK0K*K+K$KKKKKKKKKKK K K KKKK K K K KK K K KKKKKKKKKKKKKKKKKKKKKKK7KMKPKMKLKMKMKPKQKPKPKPKQKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKQKQKPKQKQKQKPKQKSKOKEK=K@KFKGKFKFKGKXKwKKOKKXKKPKFK@K\KKKTK.K1K-KKKKKKKLKOKHKKKKKKKKKKfK+KNKKkKDKHKEKKKKK7KCKKKKKKKKIKBKDK@K_KKKKKKKKKKOK>KrKKBKEK?KlKKKKK]KAKaKKKKKKKKcK@KFK=KNKKKKKKKKKKK`KAKcKKSK>K?KHKKKKKKSKBKnKKKKKsKRK@K@K>K?K?K>K?K?KBKJKGKGKEKBK=KCKGKFK<K7K8K7K7K8K8K9KBKEKDKHK=K9K9K>KOKKKKIKKKIKFKKKKKQKEKKKKKKKKKsK'KGKhKKKKKKKKKKKiKFKyKKKKK4KKGKGKFKGKIKJKFKlKKKKKKAKIKFKFKDK>KEKGKFKGKEK9K0KPKKKbKBKGKBK@K>K@K<K9K9K>KDKDK@K+K,K.K.K.K?KHKFKFKDK2KKKKKKKK?KEKAKnKKKKKKkKKK KKKKKKKKKKKK!KCKBKAKKKKKKKKKGK�KKKKKKKK(K4KDKVK^KYKOKNKWKJK;K=K8K8K5K-K7K?KAK>K<K=K=K=K=K=K<e]r��(K!KKKKKKKKKK KKKKK!K$K&K)K-K*K%K&K%K&K KKK +KK KKKKKKKKKKKKKKKKK#KKKBK9K:K9K:K;K#K3K@K>K>K<K5K2KCKBKDKAK0K,K*K"KKKKKKKKKKK K K K +K +K +K KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK#KAKOKPKNKMKOKQKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKQKPKQKPKPKPKPKPKPKPKPKQKQKQKPKPKPKQKQKQKKKAK@KBKEKHKIKGKJK_KIK KQKKSKGKDKWKKKpK2K3K-K~KKKKKKRKJKGKKKKKKKKKKlK/KJKKtKAKGKCKyKKKK>K=KyKKKKKKKNKBKDKAKVKKKKKKKKKKXK=KhKKDKBK?K`KKKKKhKAKVKKKKKKKKnK?KEK?KGKKKKKKKKKKKnKAKXKKaK=K@K>KKKKKK^K@K_KKKKKKKCK?KBKAKAK?K?K>KBKIKGKGKGKCK>KAKIKFK?K7K8K8K7K8K8K7K?KCKBKGK?K7K8K<KFK~KKKVKnKKfK=KzKKKK^KFKdKKKKKKKK\KKAKTKKKKKKKKKKKKJK\KKKKKdK5KYKEKGKGKGKFKJKSKKKKKKIKFKGKDKDKDKDKDKFKFKEK<K0KrKKKKBKEKDKBKAKAK?K;K9K:KBKAKCK2K)K+K.K,K7KFKGKFKIK7KKKKKKK K0KEKBKIKKKKKKKKKKKKKKKKKKKKK K:KEKEK>KyKKKKKKK KKKKKKKK +KgKzKMKBKPK]KZKPKIK=K>K?K=K:K"KK$K0K<K?K<K=K?K?K;K;e]r��(KKKKKK!KKKKK KKKK!K%K(K*K0K*K!K&K%K&K K KKK +K K KKKKKKKKKKKKKKKKK-KLKAK8K:K9K:K;K#K6K?K>K>K=K3K5KCKAKDK@K-K+K+K"KKKKKKKKKKK K K K K +K K K K K K K KKK KKKKKKKKKKKKKKKKKKKKKKKKK.KIKRKNKOKQKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKQKPKPKPKQKPKPKPKPKPKPKQKPKOKOKQKPKPKPKPKPKQKNKGK>K=KBKIKIKHKGKFK@KNKKYKFKEKRKKKK;K3K-K}KKKKKKZKHKHK~KKKKKKKKKvK2KCKKyKAKHKBKqKKKKEK8KpKKKKKKKVKAKCKBKOKKKKKKKKKKcK:K]KKNKDK@KTKKKKKtKBKRKKKKKKKKyKBKEKAKAKtKKKKKKKKKK}KFKMKKrK;KBK9KuKKKKKoKAKTKKKKKKKPK>KBKAKAK?K?K=K@KFKFKGKGKDK?K>KEKEK@K7K7K8K7K8K7K7K:K?KAKGKEK8K7K;KEKkKKKiKYKKKJKbKKKKpKFKPKKKKKKKKYKK6KKKKKKKKKKKKKK\KJKKKKKKwKsKBKGKGKGKDKGKGKKKKKKcKBKHKDKCKDKDKCKFKFKEKBK7KKKKKQKAKDKDKBK?K?K;K:K8K?KCKCK>K*K*K-K-K/KEKGKEKHK6KKKKKKK KKAKEK>K{KKKKKKKKKKKKKKKKKKKKK&KCKCKBKRKKKKKKK>KK KKKKK KgKKKKK]KDKIKZK\KJK=K?K=K=K6KKKKK.KAK=K<K>K>K=e]r��(KKKKKKKKKK KKKKK$K(K,K0K+K%K&K&K&K KKK KKKKKKKKKKKK K K K KK"K K"KK5KMK>K>K;K9K:K8K"K8KAK?K=K>K1K6KCKAKDK?K/K,K*K"KKKKKKKKKK K K K K K K K K +K K K K K K K K K KKKKKKKKKKKKKKKKKKKKK KKKK:KPKSKPKPKQKQKQKPKPKPKPKPKPKPKPKPKPKPKPKPKQKOKQKOKQKPKPKPKPKPKPKPKPKPKPKPKPKPKPKQKQKOKMKOKNKGK@KCKFKHKHKFKIKIKbKPKEKDKMKKKK?K4K0KMK^KPKKKKeKFKHKrKKKKKKKKKK2K>KKKCKHKBKgKKKKLK3KiKKKKKKKbK@KAK@KKKKKKKKKKKKpK:KRKKXKBKBKJKKKKKKDKLKKKKKKKKKEKDKDK?KjKKKKKKKKKKKLKHKKK?KCK;KdKKKKK}KGKKKKKKKKK`K<KCK?K>K>K?K>K>KFKGKFKGKFKCK<KCKDKCK8K5K8K7K8K5K4K7K=K=KDKHK;K5K9KBK^KKK~KKKKKgKTKKKKKJKFKKKKKKKK;K K(KHKlKKKKKKKKKKKsKDKyKKKKKK~KHKGKGKGKEKFKEKgKKKKKKAKHKFKFKGKFKCKFKGKHKDKOKKKKKqK?KEKCKBKAK>K=K<K9K=KEKDKBK0K*K,K.K,K=KGKCKDKAK(KKKKKKKK8KFKBKSKKKKKKKKKKKKKKKKKKKKKK<KAKDK@KKKKKKKKKKKKKK+KKKKKKKKiK>KGKQKEK?K>K>K?K(KKKKKwKKSK>KAK?e]r��(KKKKKKKKKKKKKK#K'K*K1K.K#K%K&K&K&K$K#K$K"KKKKKKKKKKKKKKKKKKKKK6KKKOKXKGK;K:K4K#K<K@K?K=K<K/K7KCKBKCK?K.K+K+K KKKKKKKKKKKK K K KKKK K K K K K K K K K KKKKKKKKKKKKKKKK K K KKKKKKK#K1KFKSKRKOKOKPKQKPKPKPKPKPKPKPKPKPKPKPKQKPKNKPKPKQKPKPKPKPKPKPKPKPKPKPKPKPKPKQKPKOKOKOKOKPKQKNKFK?KAKDKEKFKGKFKGKEKBKHKKKK>K4K5KK +KKK<KoKWKHKGKhKKKKKKKKKK8K:K|KKFKGKAK`KKKKVK3KbKKKKKKKlK@KCKBKGK}KKKKKKKKK{K>KKKKcK@KFKCKKKKKKKKEK{KKKKKKKKKKBKDK?K^KKKKKKKKKKKXKFKpKKIK@K?KVKKKKKKMKEKwKKKKKKrK=KBK@K@K?K?K>K?KCKGKGKGKFKBK<KAKDKFK:K5K9K8K8K6K6K6K<K>K@KGK<K5K8K=KPKKKKMKKKKPKKKKKSKDKlKKKKKKK)KKKEKWKKKKKKKKKKK}KGK[KKKKKK@KGKHKFKGKEKFKIKMKKKKKKLKEKHKHKGKEKCKEKEKGKBKfKKKKKKDKDKCKBKAK>K<K=K;K8K@KDKDK9K+K-K.K.K4KDKDKCKDK@K(KKKKKK K(KCKBK@KKKKKdK$KKKKKKKKKKKKKKKK(KDKBKAKVKKKKKKK5KK K +KK:KAK^KKKKKKKKjK8K@K>K=K@K@K?K7KKKK$KKKKYKKKBe]r��(KKKKKKKKKKKKK K&K*K-K+K$K&K&K&K#KKK"K$K!K#K'K'K'K$K#KKK KKKKKKKK KK K +K9KNKmKsKSK;K:K3K$K>K@K>K=K:K/K8KAKBKBK=K-K*K,KK KKK KKKKKKKK K K KKK K K K +K +K +K K K K K KKKKKKK K K K K K K +K K K +K +KK KKKKK%KKKK0KKKQKMKNKQKQKQKPKPKQKQKQKQKPKPKPKQKOKMKOKQKPKPKPKQKQKQKPKPKPKPKPKPKPKPKPKPKQKQKQKQKQKNKNKQKMKCK=K@KGKFKGKHKGKEKGKKKK>K2K2KKK +KK0K/KCKJKGKWKKKKKeKKKKK>K2KqKKKKDK@KZKKKK`K4KZKKKKKKKtKBKEKDKEKuKKKKKKKKKKDKDKKpK?KFK?K{KKKKKRKAKnKKKKKKKKVK?KDK>KRKKKKKKKKKKKbKCKbKKUK=KAKHKKKKKKYKBKkKKKKKKKBKBKBKAK?K>K?K?K@KHKIKGKFKBK<K>KFKGK=K5K:K9K7K8K8K5K:K?K>KFKAK7K8K=KHKxKKK\KjKKKcKsKKKKfKDKVKKKKKKK+KKK7KLKKKKKKKKKKKRKAKLKKKKKKTKeKFKEKGKEKFKKKDKKKKKKiKCKHKGKHKDKDKDKCKDKFKVKKKKKKZK@KDKBKAK>K=K<K:K9K>KCKDK?K-K-K-K,K1K?KDKCKCKEK5K"KKKK K KK>KCK@K\KKKKIK)KKKKKKKKKKKKKKKKK>K@KBK>KKKKKKK}K +K K +K-KGKBKCKKKKKKKKKLKXKKQK?KAK>K?K+KKK5KKKKKbKZe]r��(KKKKKKKKKKKKK%K)K-K,K$K&K%K&K$KKKKKKKKKK K!K"K!K#K'K(K&K&K KKK K +K +KK K=KTKKKaK=K;K1K"K>K@K?K<K8K-K;KBKBKCK:K-K,K+KKKKKKKKKKKK KK K K +KKK K K K K KKKKKKKKKK K K +K K K K K +K +K K K K K K KKKKK KKKKKK;KPKPKOKOKNKPKPKNKNKNKPKQKPKPKQKPKOKPKQKPKPKQKPKNKPKQKPKPKPKPKPKPKPKPKPKPKPKPKPKQKPKPKQKPKNKKKBK=K@KEKIKDKCKIKTKtKKDK*K*KKK +KK6K>KEKIKHKGKwKKKKOKZKKKKDK/KjKKOKCKAKSKKKKkK3KRKKKKKKK}KCKDKBKAKkKKKKKKKKKKJK>KzK}K@KFK>KnKKKKK\K?KdKKKKKKKKdK@KFKAKJKKKKKKKKKKKpKCKVKKdK<KCK=KKKKKKgKAK\KKKKKKKKK>KBKAK?K>K>K@K>KDKHKGKEKDK=K<KFKGK?K6K8K8K7K9K8K6K9K=K=KEKEK8K7K:KCKhKKKsKRKKKKgKKKK{KGKKKKKKKKK*KKK%KGKsKKKKKKKKKqK$K0KGKzKKKKKKKHKFKGKFKFKIKDKbKKKKKKCKGKGKEKDKDKDKDKDKIKEKKKKKK~K=KCKBKAK>K=K<K:K9K;KCKDKDK4K,K.K-K.K7KDKCKCKDK;K)KKKKK K K-KDKBKEKKKK5K,K%K KKKKKKKKKKKKKKK.KBK>K<K[KKKKKKK0KKKK9KGKCKeKKKKKKKKkK@KKK;KBK@KAK7K!KKEKKKKKuKme]r��(KKKKKKKKKKKK"K&K*K,K&K&K%K&K!KKKKKKKKKKKKKKKKKKKK!KKK +K K K +KKBK[KKKlK=K;K-K$K?K@K?K;K7K+K<KCKAKCK:K-K,K+KKKKKKKKKKKKK K K K +KKKKK K K K K K K K K K KK +KK +K K K K K K +K K K K +K +K K KKKKK#KKKKKKK'KCKPKOKMKPKPKMKMKMKOKQKPKPKPKQKQKQKPKPKPKQKPKMKOKQKPKPKPKPKPKPKPKPKPKPKPKPKPKPKQKQKQKPKMKPKNKHK>K<K@K1K;KGKEKEKZKCK+K'K +KK KK4KDKHKIKGKGKKKdKKmKRKKKKNK.K`KKUKBKBKJKKKKuK3KKKKKKKKKKGKDKBKAK`KKKKKKKKKKSK<KoKKDKEK?K`KKKKKjK@K[KKKKKKKKqK@KGKCKEKzKKKKKKKKKK~KDKKKKuK<KEK;KoKKKKKwKFKNKKKKKKKZK<KCKAK?K>K>K?K9KAKHKGKDKEK?K:KEKGKAK6K7K7K8K8K5K7K9K<K<KAKFK<K7K7K@KYKKKKHKKKKlKKKKKPKFKvKKKKKK'KKKKCKZKKKKKKKKK@K K#KHKYKKKKKKKWKGKGKGKGKGKIKMKKKKKKSKEKGKCKDKDKDKDKDKIKCKkKKKKKKHKBKBKAK>K=K<K:K:K6K?KEKDK=K-K-K.K-K0KBKDKCKDKBK1K"KKKK K KK>KCK>KhKKK)K*K,KKKKKKKKKKKKKKKKK>K?K?KAKKKKKKKsKKKK!KFKDKDKKKKKKKKKFK_KKXK?KCK@K>K.KKVKKKKKKie]r��(KKKKKKKKKKKK&K)K)K&K%K%K'K#KK KKKKKKKKKKKKKKKKKKKKKK K K K KKCKfKKKpK:K9K,K)K@K?K?K;K7K-K>KDKCKCK8K+K,K+KKKKK K KKKKKKK K K KKKKKKKKKKKKKKKKKKK +K K K +K K KK +K K +K K K KKKKKK#KKKKKKKKK3KLKNKPKQKPKOKMKNKQKQKPKQKPKNKQKPKQKQKQKQKPKPKPKPKPKPKQKQKQKPKPKPKPKPKQKOKMKPKQKQKPKPKPKPKOKNKGK<K6K>KDKFKHKDKEK?KDKKKKK&K;KDKHKFKFKxKnKxKKKKKKKWK,KYKK]KCKFKFKKKKK6KDKKKKKKKKLKDKCK?KUKKKKKKKKKK]K:KdKKKKFKBKVKKKKKvKBKSKKKKKKKK|K@KEKDKBKmKKKKKKKKKKKJKFK~KK?KCK=K`KKKKKKJKIKKKKKKKlK<KAK?K?K>K?K>K=KBKEKFKFKDK@K:K@KFKCK9K7K8K7K7K7K5K5K:K<K?KHK>K7K7K<KOKKKKQK|KKKrKKKKK`KDK_KKKKKK/KKKK7KLKKKKKK|KK{K4K KKCKJKKKKKKKqKDKFKGKFKHKJKEKKKKKKqKAKHKDKDKDKDKDKBKFKGKNKKKKKKdK<KCK@K>K>K>K=K<K8K<KEKDKCK1K,K.K.K/K;KEKCKDKEK6K(KKKKK K K3KBK@KFKKlK#K+K*K!K KKKKKKKKKKKKKKK2KBKBK?K^KKKKKKK)KKKKAKEKAKdKKKKKKKKjKDKKK=KBKAKBK=KKdKKKKKKXe]r��(KKKKKKKKKKKK+K*K#K%K%K&K#KK KKKKKKKKKKKKKKKKKKKKKKKKKK KKEKwKKKlK:K8K)K,KAK?K?K;K7K.K?KDKCKCK7K+K,K*KK K K +KKKKKKKKK K K KKKKKKKKKKKKKKKKKKKKK +K K K K KKKKKKKKKKKK KKKKKK)K4K=K@KFKOKPKPKQKOKMKMKPKPKPKPKNKIKOKPKPKPKPKPKPKPKPKPKPKQKPKPKPKQKPKPKQKPKPKOKNKOKPKPKPKQKPKPKNKNKOKMKGK?K=KBKFKGKGKFKKK>KKKKKK:KHKEKDKFKbKbKQKvKKKKK`K+KPKKdKBKFKDKKKKK;K<K|KKKKKKKTKBKEK?KPKKKKKKKKKKhK;KYKKTKDKDKLKKKKKKDKJKKKKKKKKKFKDKDK@KbKKKKKKKKKKKUKDKoKKIK@K?KPKKKKKKRKDKsKKKKKKK>K?K?K>K?K>K>K>K?KFKFKFKDK@K;K?KEKEK?K7K8K7K7K7K5K5K8K;K<KFKCK8K8K9KEKzKKKgK[KKKhKeKKKKwKFKMKKKKKK+KKKK&KFKyKKKKKvK`KOKKKK7KEKxKKKKKKKIKGKEKGKGKIKEK]KKKKKKDKGKDKDKDKDKDKBKEKIKCKKKKKKK=KCK@K>K?K?K=K;K9K9K@KCKEK:K-K.K.K/K4KAKEKDKFK@K-K&KKKKK K K@KCK?KgKMK&K,K)K'KKKKKKKKKKKKKKKKK?KBKDKBKKKKKKKmKKKK<KEKFKFKKKKKKKKKGKfKKVK>KBKAKBK,KvKKKKKKye]r��(KKKKKKKKKKK"K+K&K%K%K&K"KKKKKKKKKKKKKKKKKKKKKKKKK KKKK K!KHKKKKbK7K9K&K.KDKAK<K:K7K.KBKCKDKDK6K+K,K*KKKKK K KKKKKKKK K K +KKKKKKKKKKKKKKKKK K K KKKKKKKKKKKKKKKKK)K4KBKNKLKUK\K^K\KWKOKNKPKLKLKKKLKMKMKMKMKNKLKJKKKKKMKNKMKMKMKOKQKPKPKQKPKMKOKQKQKQKQKPKMKOKQKNKMKMKNKQKOKMKNKMKMKNKNKMKCK=KAKDKGKGKFKGKDK/KKKK2KGKDKBKJKLK/K/K;KYK{KKKkK*KKKKnK@KDK@KzKKKKCK8KsKKKKKKK[KAKEKBKIKKKKKKKKKKtK<KPKK`K?KCKFKKKKKKIKEKzKKKKKKKKOKBKDK=KVKKKKKKKKKKKcKCK`KKWK?KAKDKKKKKK_KCK`KKKKKKKGK?K?K>K?K<K<K;K=KEKFKFKDKAK>K=KDKDKBK6K5K8K7K6K8K7K6K;K<KBKDK:K:K7K@KgKKK|KKKKKRKKKKKKKNKFKKKKKK1KKKKKEK]KKKKKvK_K5K KKK&KHKZKKKKKKKXKEKEKGKEKGKHKFKKKKKKXKAKDKCKDKDKDKDKDKHKCK^KKKKKKQK?K@K>K;K<K=K:K:K9K=KCKDK?K0K0K0K.K7KNKFKFKDKCK2K+K!KKKKKK:KAKBK>K-K*K+K)K*KK KKKKKKKKKKKKKKK5KDKBK=KhKKKKKKK!KKK%KEKDK@KdKKKKKKKKjKDKKK=KBKBKAK<KKKKKKKe]r��(KKKKKKKKKKK$K&K#K'K'K$KKKKKKKKKKKKKKKKKKKKKKKKKK KKKK K'KMKKKK\K7K8K$K1KCKAK=K;K5K/KCKCKDKCK4K,K,K)KKKK K KKKKKKKKK K K +KKKKKKKKKKKKKKK K KKKKKKKKKKKKKKKKKKK8KWKaKeKgKlKjKgKeKaK`KYKSKSKGKFKLKRKNKMKNKMKMKNKMKJKKKMKNKMKNKMKNKPKPKPKQKPKMKOKQKOKOKQKPKMKNKOKNKMKNKNKOKNKMKNKMKMKMKMKLKKKIKCK<K@KBKEKFKDKHKBK4KK)KHKFKCK?K)K1K>KEKRKcKKKtK*KBKKwK>KDK=KqKKKKIK3KiKKKKKKKfK?KFKBKDK}KKKKKKKKK~K@KHKKnK>KEK@K}KKKKKSKDKpKKKKKKKK\K@KDK?KMKKKKKKKKKKKrKAKUKKiK=KDK<K}KKKKKqKBKUKKKKKKKUK;K@K?K>K=K=K<K:KDKGKFKEKCK?K=KBKDKCK8K5K8K9K9K8K7K6K:K;K?KFK:K9K8K=KUKKKKJKKKBK>KKKKKWKEKdKKKKKPK KKKK9KMKKKKKK<KKKKKKEKKKKKKKKKmKCKEKGKFKGKIKCKyKKKKKzK@KEKDKDKBKBKBKBKCKEKHKKKKKKtK;KAK>K<K=K=K;K:K9K;KBKDKBK5K-K1K+KKKKGKFKCKEK=K+K&KKKKK K'KDK?KAK1K'K*K)K)K'KKKKKKKKKKKKKKKKKCKAKCKFKKKKKKKaK�K K K8KFKEKHKKKKKKKKKGKhKKXK<KAKCK=KmKKKKKKe]r��(KK KKKKKKKKK$K&K&K&K#KKKKKKKKKKKKKKKKKKKKKKKKKKK KKKK K.KOKKKKWK7K4K#K2KAKCK?K<K3K0KDKCKCKCK0K)K,K)KK K K K +KKKKKKKKKK K +KKKKKKKKKKK K K K +K KKKKKKKKKKKKKKKKKKKKSKgKoKpKmKkKjKjKhKdKbK\KWKWKVKNKAKBKKKMKMKNKNKMKJKKKOKNKMKNKMKNKLKNKQKQKQKPKMKOKQKNKMKQKPKMKMKMKMKMKNKMKMKMKNKMKMKMKMKNKLKJKLKMKGK>K;K@KDKDKCKEKDK@K6K?KBKBK2KK3KFKPKKK}KK}K/K;KKK?KEK=KiKKKKQK0KaKKKKKKKoK>KGKGKBKuKKKKKKKKKKEKCKKzK?KEK<KpKKKKK`KAKeKKKKKKKKhK?KEK?KEKKKKKKKKKKKKFKNKKzK?KDK;KlKKKKKKFKMKKKKKKKcK:K@K@K=K?K>K>K*K;KHKFKGKEKAK;KAKCKDK9K6K8K7K7K8K7K7K9K9K=KDK?K8K7K;KJKKKKZKZKlK@K8KqKKKKjKHKSKKKKKfK KKKK*KFK}KKKKK5K KKKKK8KJKxKKKKKKuKDKEKGKGKGKHKHKZKKKKKKHKDKCKDKCK9K9KBKBKHKDKxKKKKKK@K@K>K<K=K=K<K:K9K6K@KFKDK;K.K0K,KdKK^K@KDKDKBK1K(K%KKKKKK<KCKBK<K)K(K)K(K)KKKKKKKKKKKKKKKK K;KCKCK?KlKKKKKKKKKKKDKEKBKiKKKKKKKKhKCKKK=KAKBK@KHKKKKKKe]r��(KKKK"K!KKKKK!K%K$K%K%KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK3KQKKKKQK6K3K!K5K?K@K?K<K3K2KEKCKDKCK/K(K,K)KK K K +K KKKKKKKKKK K KKKKKKKKKK K K K KKKKKKKKKKKKKKKKKKKKKK\KqKqKnKkKeKgKiKgKdKfK^KXKOK9K0K&KK-KDKMKKKLKLKMKMKNKNKMKMKMKNKMKNKNKOKQKPKMKNKOKNKNKOKNKMKOKPKNKMKNKMKNKMKLKMKNKMKMKMKNKNKMKMKNKOKGK?K=KAKBKCKDKDKFKAK>KAK4KKK+K<KjK~KoKKK2K7KKKEKFK>K`KKKK[K.KYKKKKKKKyKAKEKDK?KlKKKKKKKKKKMK>KvKKAKEK=KeKKKKKkK?KYKKKKKKKKwK@KEK@KBKsKKKKKKKKKKKLKGK{KKBKBK<KYKKKKKKKKFKyKKKKKKxK=KAK?K>K?K=K?K&K.KHKGKGKDKAK;K>KEKHK=K3K6K7K8K6K5K5K7K:K:KAKCK9K7K9KBKpKKKpKGKJK=K8KYKKKKKIKFKKKKKKKKKKKDKaKKKKKCKK)KKKKDKMKWKKKKKKZKCKGKEKEKGKGKIKHKKKKKK^KBKFKDKCK;K;KDKCKGKGKVKKKKKK[K=KAK=K=K=K<K:K9K7K=KEKDKBK2K0K.KKKKCKGKEKEK8K(K*KKKKKK-KDK@K@K0K'K)K(K(K%KKKKKKKKKKKKKKKK$KBKAKBKJKKKKKKK]KK K K-KCKAKHKKKKKKKKKEKfKK\K?KCKCK<KnKKKKKe]r��(K?K1K-K*K&K K!K KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK9KVKKKKMK5K0K!K;K?K?K?K:K/K4KDKBKDKCK/K*K,K$KK K K K K K KKKKKKKK KKKKKKKKKKKKK K KKKKKKKKKKKKKKKKKKKKKK_KpKjKlKiKgKgK_K_K]KGK4K2K%KKKKKK#K7KJKNKLKMKNKMKMKMKMKMKMKNKMKMKNKQKPKMKMKMKMKMKMKMKMKOKQKNKMKMKMKMKNKOKNKMKMKMKMKMKMKNKNKMKMKNKLKGK<K=K?KCKDKCKDKCKDK9KK KK?KWKiKKKK:K2KuKKHKFK@KVKKKKjK/KOKKKKKKKKDKDKAK?K`KKKKKKKKKKVK;KiKKHKAK?KVKKKKKxKAKPKKKKKKKKKCKDKDKAKgKKKKKKKKKKKUKCKmKKLK>KAKMKKKKKKYK?KhKKKKKKKCKAK?K>K?K=K@K+KKGKHKGKCKAK;K<KGKGKBK6K4K7K7K5K4K4K7K:K:KAKCK9K7K8K=K^KKKKIK?K=K;KEKKKKKSKEKoKKKKKKKKKK?KPKKKKKnKK:KKK+KGKQKEKKKKKKKeKCKDKDKHK@KFKEKrKKKKKKAKGKDKCKBKBK@KBKFKJKDKKKKKKK>KBK=K=K=K<K:K9K7K9K@KDKEK:K0K8KKKKRKEKEKDK@K,K(K*KKKK KKAKBKAK9K(K(K)K)K)KKKKKKKKKKKKKKKK K9KBKBK@KwKKKKKKKKK KK=KDK?KpKKKKKKKKhKCKKK=KBKBKBKFKKKKKe]r��(KKKK{KoKdKZKNKCK8K.K(K#K KKKKKKKKKKKKKKKKKKKKKKKKKKKKKK KKK;K[KKKKHK4K.K"K:KAKAK=K9K,K4KEKCKEKCK/K*K+K"KK K K K +K K K KK KKKKKKKKKKKKKKK K +K K K K KKKKKKKKKKKKKKKKKKKKK^KlKiKbK^K_KcKtKKKwKNK/K"K%K&K KKKKK*KAKJKLKMKMKMKNKNKMKMKNKMKMKNKQKPKMKNKNKMKMKMKNKMKNKNKNKMKMKNKMKNKPKNKMKMKMKMKNKMKMKMKNKMKKKKKLKJKEK;K:KAKAKCKAKCKCK7K KKFK\KTK|KKK@K,KlKKNKEK@KLKKKKsK.KFKKKKKKKKHKAKCK?KVKKKKKKKKKKaK:K]KKQK?K@KJKKKKKKDKIKKKKKKKKKJKBKDK>K[KKKKKKKKKKKdKDK`KK\K<KCKBKKKKKKfKBK[KKKKKKKMK>K@K>K?K=K>K5KKCKIKFKDKCK>K;KEKCKBK7K4K5K5K7K7K4K5K8K:K=KFK;K7K7K<KOKKKKSK@K;K>K;K}KKKKcKEKWKKKKK;K KKKK.KHKKKKKKMKLK0K0K=KK9KFKyKKKKKKKGKGKEKEK>KAKKKVKKKKKKGKDKDKBKDKCK?KAKBKHKDKkKKKKKKHK>K>K>K=K<K:K9K8K8K=KCKEK@K/KNKKKKvK>KEKCKCK3K)K,K&KKKKK4KDK>K?K.K(K)K(K)K KKKKKKKKKKKKKKKK(KDKBK?KLKKKKKKKSKK KK+KFKAKJKKKKKKKKKDKiKK_K=KAKAK<KmKKKKe]r��(KKKKKKKKKKKKKwKlK_KTKIK=K5K.K&K$KK KKKKKKKKKKKKKKKKKKKKKKK@KaKKKKBK8K6K0K<KBKAK=K8K4K?KEKDKDK?K-K*K+K"KK K K K +K K K KKKKKKKK +KKKKKKKKK K K K K K KKKKKKKKKKKKKKKKKKKKKHKGK<KdKKKKKKKKKKVK/K#K%K&K KKKKK1KFKNKMKNKMKMKMKMKNKMKNKNKPKOKMKMKMKMKNKNKMKMKMKMKMKMKMKMKNKMKLKMKNKMKMKNKMKNKNKMKNKMKLKLKKKMKHKDK@K=K<K:K0KBKEKFKDK6KBKgK+K]KiKKJK+KcKKRKBK>KJKKKK~K1K@KKKKKKKKPKCKDK?KNKKKKKKKKKKnK;KTKK^KAKBKBKKKKKKKKEKwKKKKKKKKRKAKDK>KRKKKKKKKKKKKqKCKQKKlK:KCK;KzKKKKKxKDKOKKKKKKK_K:K@K>K?K=K<K>KK:KKKFKEKDK?K:KBKFKEK9K4K5K4K7K7K4K5K7K7K9KFKAK6K7K:KBK|KKKfKCK:K<K:KdKKKKvKDKJKKKKKgK KKKKKEKbKKKKKOKK$K6KAKK,KGK[KKKKKKmKCKGKEKDKDKBKIKEKKKKKKcK>KEKDKDKCK@KAKBKGKGKMKKKKKKjK9K@K>K=K<K:K9K8K6K9KDKDKDK1KiKKKKKFKBKDKEK=K+K)K)K$KKK +K!KBK>KAK8K'K)K)K(K'KKKKKKKKKKKKKKKKK?KAKDK=KzKKKKKKKKKKK=KEK=KsKKKKKKKKeKEKKK?K>K=K?KGKKKKe]r��(KKKKKKKKKKKKKKKKKKKKKyKqKcKVKKKAK8K0K+K'K"KKKKKKKKKKKKKKKKGKlKKK}K?K9K7K7KAKBKAK?K6K5K@KFKCKDK>K,K*K+KKKK K K +K K +KKKK KKKKK KKKKKKKKK K K K KKKKKKKKKKKKKKKKKKKKKKKK&K,KKKKKKKKKKKKKcK5K#K%K(K$KKKK$K;KLKNKJKIKMKKKKKNKNKMKMKMKMKNKMKMKNKNKLKNKMKNKMKMKMKMKNKMKMKNKMKMKMKNKLKNKNKMKNKMKMKMKNKMKIKKKMKIKBK4K4KBKDKEKBKCKBKLK%K=KVKKQK'KYKKXKBKCKDKKKKK6K9K|KKKKKKKWK?KEK>KIKKKKKKKKKKxK?KKKKiK=KCK?KKKKKKTKAKmKKKKKKKK`K?KDK?KHKKKKKKKKKKKKDKHKKK=KDK;KiKKKKKKHKHKKKKKKKqK;K@K>K?K=K<K@K!K-KIKFKFKCK@K<K?KEKEK=K2K5K4K4K4K5K4K8K9K:KCKCK6K5K7K>KkKKK|KEK=K;K:KNKKKKKLKEKxKKKKKKKK KK=KOKKKKKoKK,K`K~KWKQKGKIKKKKKKyKaKDKEKCKDKDKFKDKmKKKKKK@KDKDKCKBKBKBKBKCKGKBKKKKKKK=K=K=K=K<K9K:K6K7K8KAKEKEK=KKKKKKcK@KDKDKDK2K*K+K)KKKKK9KCKBK<K*K+K)K)K*KK KKKKKKKKKKKKKKK)K@KBK?KQKKKKKKK@K�KKK$KDKAKKKKKKKKKKKFKjKK\K9K?K@K>KkKKKe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKK~KsKeK[KPKFK>K6K.K'K#KKKKKKKKHKtKKKwK=K<K6K5K@KAK?K<K7K8KAKEKCKEK<K+K*K+KKKK K K K K +K KKK +KKKKK KKKKKKKK K K K K KKKKKKKKKKKKKKKKKKKKKKK"K1K<KKKKKKKKKKKKKKKiK>K)K*K)K&KKKK*K?KLKKKLKKKLKMKNKMKMKNKNKMKNKMKNKNKLKMKNKMKMKMKMKMKMKNKMKMKNKMKMKNKMKNKNKMKMKMKMKMKNKMKKKLKLKLKMKHK>K;K?KAKAKCKEKDKAK>K>KqK_K#KOKKbK?KGKBKKKKK<K3KtKKKKKKK^K>KFK?KEKKKKKKKKKKKBKCKKvK=KEK=KsKKKKK`K@KaKKKKKKKKmK=KEK@KDKzKKKKKKKKKKKKKBK|KKCKBK<KWKKKKKKPKBKqKKKKKKK>K?K>K?K>K=KAK*KKGKFKFKDKDK>K=KBKDK@K3K3K5K4K4K4K4K6K8K9KAKDK8K5K7K<KZKKKKMK?K;K=K>KKKKK[KEK[KKKKK2K KKKK0KEKKKKKK!KAKYKGKbKZK?KDKyKKKKKuKdKHKDKCKDKCKEKHKRKKKKKKLKBKDKCKBKBKBKBKCKHKDK]KKKKKKPK<K>K=K<K9K:K8K8K7K:KDKDKCKKKKKKKBKEKDKDK8K*K+K)K(KKKK)KCK?KAK3K(K)K)K)K&KKK KKKKKKKKKKKKKK?K@KBK?KKKKKKKJKKKK K7KCK<KvKKKKKKKKeKBKKK<K@K?K@KFKKKe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKuKkK`KUKJK?K(KKKHKKKKqK<K:K6K8K@K?K<K9K8K>KCKCKCKEK:K)K+K,KKKK K K K K +K K +KKKKKKK KKKKKKKK K K +K K KKKKKKKKKKKKKKKKKKKKKKK$K5K@KKKKKKKKKKKKKKKKKqKIK0K-K,K(KKK K2KIKOKKKKKKKMKNKMKNKNKLKNKMKMKNKMKMKOKMKNKNKNKNKMKMKMKNKMKMKMKMKNKMKMKMKMKNKNKNKNKLKKKNKMKKKKKLKLKEK=K;K=KCKDKDKDKFK=KbK`K#KGKKiK=KGK>K{KKKKDK0KkKKKKKKKhK=KDK@K@KxKKKKKKKKKKGKAK|KK?KFK>KeKKKKKnKAKVKKKKKKKK{K@KDK@KBKlKKKKKKKKKKKWKBKjKKNK?K@KIKKKKKK]K?KcKKKKKKKGK=K?K>K?K>K@K3KKCKGKFKGKFK>K;KCKDKBK4K4K5K4K4K4K5K4K7K7K9KEK>K7K8K:KJKKKK[K@K=K>K9KsKKKKoKBKJKKKKKcKKKKK$KCKhKKKKKCKjKKTK:K4KEKIKZKKKKKKhKCKEKCKDKCKEKIKDKKKKKKlK@KEKDKBKBKBKAK@KEKGKGKKKKKKvK:K?K=K<K9K:K:K8K7K5KAKEK?KKKKKKKUKBKEKDKAK0K*K(K)K$KKKK:KAKCK:K)K(K)K(K+KK +K KKKKKKKKKKKKK K1K@K?K=KUKKKKKKBKKKKKKBK?KNKKKKKKKKKCKiKKYK;K>K?K<KjKKe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKXKK&KKKKKKjK<K;K6K7K@KAK?K:K6K9KBKDKCKEK:K*K*K+KKKK K K K K +K K +K K KKKKK KKKKKKKK +K K K KKKKKKKKKKKKKKKKKKKKKKKK&K9K@KKKKKKKKKKKKKKKKKKKyKQK6K+K)K'K KK"K:KKKMKJKLKLKMKNKNKMKNKMKMKNKMKLKMKLKLKLKLKMKNKMKNKNKMKMKMKMKMKMKMKMKNKMKJKKKLKMKMKLKLKMKMKKKKKLKKKEK<K<K?KDKCKDKEKEKEK2K@KKsK=KFK>KsKKKKNK.KbKKKKKKKuK>KBK?K=KmKKKKKKKKKKNK?KpKKDKDK>KWKKKKKyKAKLKKKKKKKKKCKBKAK?K^KKKKKKKKKKKeKAKYKK]K;KAK>KKKKKKoKAKVKKKKKKKVK;K?K?K>K?K?K:KK:KFKEKGKDK?K9KAKDKCK6K2K5K5K4K4K4K4K5K5K7KCK@K5K8K8K@KxKKKpKAK;K:K8KXKKKKKGKDKKKKKK-KKKKKAKRKKKKK{KvKKKOK$KPKKKIKKKKKKKiKAKDKDKCKDKHKFKgKKKKKKCKDKBKBKBKBKAK@KCKFKCKvKKKKKKDK>K=K<K9K:K:K9K5K4K=KEK@KZKKKKKKzK@KEKDKEK6K*K*K*K)KKKK-KAKAK?K0K'K)K(K)K'KKK +K KKKKKKKKKKKKK?K?KCK?KKKKKK6K!K KKK K2KDK?KzKKKKKKKKfKGKKK<K@K?K?KDKKe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKZK&K+KMKKKKaK9K=K5K7K@KBK@K:K6K;KCKDKCKFK9K+K*K*KKKK K K +K K K +K K K KKKKK KKKKKKKK +K K K KKKKKKKKKKKKKKKKKKKKKKKK(K9K@KKKKKKKKKKKKKKKKKKKKKKZK6K,K.K+K"KK(KAKMKKKKKMKNKNKNKMKMKMKNKMKKKKKKKKKKKKKLKNKNKMKMKMKMKMKMKMKMKMKMKNKMKLKLKKKMKNKKKLKNKNKJKJKLKLKLKHKBK<K?K@K@K@KDKCKBKAKzKzK>KFK>KhKKKKYK,KVKKKKKKKK@KAK>K:KcKKKKKKKKKKYK<KeKKLKBK@KLKKKKKKEKGKKKKKKKKKLK@KBK@KSKKKKKKKKKKKtK@KMKKpK9K@K:KuKKKKKKFKKKKKKKKKjK:K@K?K>K>K>K=KK/KGKDKGKDK@K:K>KDKEK<K2K6K5K4K4K4K4K4K5K5K>KDK6K8K7K9KeKKKKGK=K:K9KCKKKKKSKBKfKKKKKYKKKKK6KHKKKKKKKKKK5KEKHKEKxKKKKKKoKDKCKDKCKCKEKGKNKKKKKKQK?KBKBKBKBKBKBKBKEKEKUKKKKKK_K;K>K<K:K:K:K9K6K6K:KEKGKEKKKKKKKLKCKDKEK=K-K-K+K'K'K KKK@KBK@K8K(K)K(K)K+KK K +K +KKKKKKKKKKKK +K2K?KBK?K[KKKKK-K(KK +KKKK?K@KPKKKKKKKKKFKfKKZK<K?K?K;KfKe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKVK)K1KOKKKKZK8K<K4K:KAK?K>K9K8K=KCKDKCKDK7K*K+K)KKKKKK K K K K K +K KKKKK KKKKKKKK +K K KKKKKKKKKKKKKKKKKKKKKKKKK(K:KBKKKKKKKKKKKKKKKKKKKKKKKKdK@K,K*K'KKK3KJKOKLKMKMKMKNKMKMKMKMKMKLKKKMKMKLKLKNKMKMKMKMKNKNKMKMKMKNKNKLKLKLKMKMKMKMKLKLKLKLKKKKKKKKKKKLKLKEKAK=K=KAKAKBKCKDKGKPK;KAK;K^KKKKbK,KOKKKKKKKKCKDKCK;KYKKKKKKKKKKgK:KXKKWK?KAKDKKKKKKKKCKtKKKKKKKKVK?KBK?KKKKKKKKKKKKKKDKFKKK<KAK8KcKKKKKKKKEKzKKKKKK|K=KAK?K>K=K<K@K(K#KIKFKGKEKBK:K;KFKCK@K6K4K5K4K4K4K5K4K5K5K;KEK:K7K6K8KPKKKKPK>K:K=K=KeKKKKgKEKQKKKKKeKKKKK(KFKoKKKKKKKKKzK\KVKEK\KKKKKKHKLKCKDKDKCKEKHKCKKKKKKtK>KCKBKBKBKAK?K?KBKHKEKKKKKKK:K@K>K<K;K:K8K7K8K7K@KIKBKlKKKKKKfKBKGKDKCK4K-K+K)K)K(KKK7KBK@K=K.K'K(K*K,K&KK +K +K KKKKKKKKKKKKKAK?KAKAKKKKuK&K)KKK +KKK-KDK>K{KKKKKKKKcKCKKK<K>K@K>KEKe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKNK*K4KSKKKKUK9K<K5K;KAK?K=K9K7K<KCKCKCKDK5K*K+K)KKKKKK K K K K K +KKKKKK KKKKKKKK +K K KKKKKKKKKKKKKKKKKKKKKKKKK)K;KCKKKKKKKKKKKKKKKKKKKKKKKKKKkKAK*K)K%KK!K8KKKJKIKLKNKMKMKMKMKMKLKLKMKNKOKNKMKMKMKMKNKMKMKNKMKMKMKMKKKKKLKLKMKMKMKLKKKKKKKLKLKKKKKLKKKLKJKKKHKBK=K;K>K@KBKBKAKBK?K9KSKKKKoK+KFKKKKKKKKJKCKDK<KOKKKKKKKKKKsK<KLKKgK=K=K2KKKKKKRK=KiKKKKKKKKcK=KCK=KCK|KKKKKKKKKKKJK?KxKKDK>K:KOKKKKKKVK@KhKKKKKKKCK=K?K=K<K<K>K2KKCKGKFKEKCK;K:KCKCKDK8K4K5K4K4K4K5K3K5K6K8KCK>K6K6K6KDKKKK_K>K<K<K<K5KWKKK{KGKGKKKKKyK KKKKK@KWKKKKKKKKKKKKSKFKKKKKK/K<KDKDKDKDKEKHKCK`KKKKKKBKAKAKBKBKAK>K?KBKGKDKmKKKKKKHK<K>K=K;K9K8K8K8K6K<KEKDKKKKKKKKKCKFKDKEK=K,K*K)K)K'K&KK'KBK?K>K6K'K'K*K*K+KK K K K +KKKK K KKKKKK +K6K@K@K>K]KKK]K%K*K#KKKKKK>K?KPKKKKKKKKKDKbKKXK9K@K?K=Kce]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK*K:KWKKKKOK9K<K9K<K@K@K=K6K5K<KCKDKDKCK2K*K+K+KKKKKK K K K K +K +KKKKKK K +K KKKK +K +K K K KKKKKKKKKKKKKKKKKKKKKKKKK,K>KEKKKKKKKKKKKKKKKKKKKKKKKKKKKKxKBKKKKK%K@KOKOKMKNKNKNKMKKKMKNKMKMKKKLKNKMKMKMKNKLKJKNKMKNKMKJKKKLKNKMKKKMKNKLKKKLKKKKKKKKKKKKKKKLKLKKKLKIKEK?K<K?KCKBKEKEKFKBKJKKKK{K-K>KKKKKKKKRK>KCK=KJKKKKKKKKKKK=KEKKtK>K?K-KvKKKKK]K<K_KKKKKKKKpK<KCK=K?KoKKKKKKKKKKKVK>KhKKPK=K?KBKKKKKKeK?KYKKKKKKKPK;K?K>K<K<K=K:KK;KGKGKEKCKAK:KAKFKDK;K5K5K4K4K4K5K3K2K4K5KCKAK4K7K8K=KqKKKyKAK?K9K9KAKkKKKmKJKCKkKKKKK*KKKKK8KHKKKKKKKKKKKKkK?K|KKKKK^KNKHKCKDKDKCKFKJKHKKKKKKWK=KCKBKBKBK@KBKBKDKGKMKKKKKKkK8K>K=K;K:K8K7K8K7K8KBKFK@KKKKKKKWKAKDKDKBK0K(K*K+K)K)K$K K:K@K?K=K-K(K)K*K*K"KK K K +K KKK K +KKKKKKK!K@K@KBKAKKKIK&K*K'KK +K K +KK"KCK>KKKKKKKKKdKBKKK<K>K@K@KAe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKHK(K=K[KKKKIK9K;K9K;K@K@K=K6K9K?KBKBKDKCK1K*K+K)KKKKKKK K K K K KKKKKK K +K KKKK +K +K +K K KKKKKKKKKKKKKKKKKKKKKKKKK,K>KEKKKKKKKKKKKKKKKKKKKKKKKKKKKKKyKKKKK KK.KJKPKMKMKNKMKLKMKNKMKMKLKMKNKMKMKMKNKMKKKNKMKMKMKJKKKLKMKLKKKMKLKKKKKKKKKKKKKKKKKKKKKKKKKKKKKLKJKIKEK=K;K>KAK@K>KBKBKMKpKKK/K8K}KKKKKKKYK?KCK@KDKKKKKKKKKKKAKAKKKAKCK.KgKKKKKjK>KTKKKKKKKKK@KCK@K=KbKKKKKKKKKKKbK?KZKK`K<KAK<KKKKKKvK@KMKKKKKKKcK:K@K>K<K<K<K=KK2KHKFKEKCKAK:K>KDKDK<K5K4K5K4K4K5K4K4K4K5K>KBK5K5K7K8K\KKKKIKAK<K9K=KKKK`KIKGKTKKKKKEK KKKK*KDKsKKKKKKKKKKKKDK\KKKKKKKYK@KDKDKDKFKIKCK|KKKKKyK:KCKBKBK@K7K=KAKCKHKBKKKKKKK?K<K=K;K9K9K8K8K7K6K=KDK@KWKKKKKK~K@KEKCKDK8K+K*K+K)K'K%K#K2K@K>K?K3K'K*K+K)K*KK +K K K K K K K K KKKKKKK6KAKAK;KhKK9K'K*K)K"K KK K +K K5KAKSKKKKKKKKKCKgKK]K8K?K?K;e]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKDK%KAKbKKKKCK8K:K4K:KAK?K<K8K4K:KDKCKDKCK/K*K+K&KKKKKKK K K K KKKKKKK K +K KKKK +K K K K KKKKKKKKKKKKKKKKKKKKKKKKK-K>KFKKKKKKKKKKKKKKKKKKKKKKKKKKKKKyKKKKKKKKK9KOKOKMKMKNKNKMKMKMKNKNKMKNKNKNKNKNKNKNKLKKKKKLKLKKKKKKKKKMKIKKKLKKKKKKKKKKKKKKKKKLKLKLKLKLKKKHKKKHKAK;K:K5K.K>KBK@K@KTKkK5K1KtKKKKKKKbK>KEKAK@KzKKKKKKKKKKHK=KtKKDKCK+KVKKKKKxKAKNKKKKKKKKKGKBKCK=KUKKKKKKKKKKKsK?KLKKtK9KBK7KpKKKKKKGKEKKKKKKKyK<K?K>K<K=K<K?K'K&KGKFKDKCK@K<K<KEKDK?K2K3K6K4K4K5K3K2K4K4K9KFK:K5K4K8KOKKKKXK?K?K;K5KnKKKlKTKGKGKKKKK|K&KKKKKBKWKKKKKKKKKKKKTKHKKKKKKKZKAKDKDKDKCKGKFK[KKKKKKFKBKBKBKAK7K:K?KBKEKCK]KKKKKKQK9K>K;K9K:K:K8K6K7K:KDKDKDKKKKKKKLKBKCKBK>K,K)K+K*K&K%K$K)K>K>KAK:K)K*K+K+K*K;KK K K +K K KKK K KKKK K K%K@K>K>KHKnK1K'K)K)K*KKKK KKK?K?KKKKKKKKKcKEKKK>K>K>K?e]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKxK?K'KDKjKKKK>K:K5K$K:KAK@K<K9K-K3KEKDKEKCK/K*K+K&KKKKKKK K K K K +KKKKKK K +K KK K +K +K K K KKKKKKKKKKKKKKKKKKKKKKKKKK/K@KFKKKKKKKKKKKKKKKKKKKKKKKKKKKKK{KKKKKKKKKK$KAKMKNKMKNKMKOKOKNKMKNKMKLKLKLKMKMKLKLKKKLKKKKKKKLKKKKKLKKKLKJKKKKKKKKKLKMKLKLKKKIKJKJKJKJKKKJKHKGKFK?K4K/K<KBKCKDK?K@KAK8KkKKKKKKKnK<KDKAK<KmKKKKKKKKKKQK9KiKKIKBK<KLKKKKKKBKEKKKKKKKKKLK>KCK?KLKKKKKKKKKKKKCKCKKK?KCK:K]KKKKKKNKAKsKKKKKKK@K>K>K>K=K<K>K+KKEKGKCKBKBK=K<KDKDKAK4K3K5K4K4K5K3K2K5K4K6KEK>K4K5K7KAKKKKhKAK>K;K5KUKKKKpKFKCKxKKKKKUKKKKK:KIKKKKKKKKKKKKlK@K{KKKKKKKGKBKDKCK?K@KHKFKKKKKK]K>KBKBKBKAK?K?K@KDKFKGKKKKKK{K8K>K;K9K:K:K8K6K8K6K>KDK?KjKKKKKKiK?KFKCKBK2K)K+K)K(K(K&K%K7K@K?K@K/K(K+K+K&KKKKK K +K +K +K K +K K KKKK K KK:K>K?K?K;K*K&K'K&K'K!KKKKKK*K?KTKKKKKKKKKEKjKK`K:K@KAe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKsK>K(KEKrKKK}K>K;K1K"K<K@K@K<K9K.K6KEKCKCKAK/K*K+K'KKKKKKK K K K K KKKKKK K +K KK +K K +K K K KKKKKKKKKKKKKKKKKKKKKKKKKK/K@KHKKKKKKKKKKKKKKKKKKKKKKKKKKKKK|KKKKKKKK K*K0K>KEKKKNKMKMKMKMKMKMKNKLKKKKKKKMKMKKKKKKKKKKKKKKKKKKKLKKKLKLKIKLKKKKKKKKKJKJKLKJKHKHKHKHKIKLKKKHKFKGKIKEK<K8K=K@KAKBKBKBKBKPKKKKKKKyK>KCK@K9K`KKKKKKKKKK\K7K]KKSK>KBKGKKKKKKGKAKvKKKKKKKK[K=KCKAKFKKKKKKKKKKKKKKAKxKKHKAK?KMKKKKKK\K?KbKKKKKKKKK<K?K?K=K<K<K6K+KEKGKEKDKCK>K=KDKDKCK:K4K4K4K4K4K5K5K5K5K3K>K@K4K5K6K9KjKKKKCK?K9K7KCKaKrKKKPKDK]KKKKKrK3KKKK.KFKyKKKKKKKKKKKKEK[KKKKKKK[K@KDKDK?K7KEKAKsKKKKKK<KCKBKAKBK@K>K?KCKHKAKwKKKKKKCK<K;K9K:K:K8K6K8K5K9KCKDKKKKKKKKKCKEKDKCK;K,K*K)K)K)K(K$K-K>K>K@K:K*K+K*K1KKK K +K K +K +K K K K KKKK +K KK*K?K>K?K=K+K$K&K%K&K(KKKKK +KK5KAKKKKKKKKKbKDKKK>K=K?e]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKlK>K*KJK}KKKtK<K<K-K$K=K@K>K:K9K.K9KFKCKCK=K-K*K+K&KKKKKKK K K K K +KKKKKK K KKK +K K +K K KKKKKKKKKKKKKKKKKKKKKKKKKKK1K@KFKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK K,K5K1K=KHKUKUKWKNK?K?KGKHKLKMKMKLKLKNKMKMKLKKKLKLKKKKKKKKKKKKKKKLKJKIKKKLKLKKKKKKKKKLKKKHKIKLKKKKKIKHKGKHKIKIKHKHKHKIKHKHKEK=K7K:KAKAKCKCKBKCK_KKKKKKAKBK?K8KWKKKKKKKKKKhK5KRKKaK=KBK@KKKKKKRK>KgKKKKKKKKjK;KCK@KAKtKKKKKKKKKKKUK<KhKKRK>KBKBKKKKKKlK@KSKKKKKKK]K:K>K=K=K<K=K;K:KEKDKFKGKBK@K;KAKDKDK;K2K3K5K4K4K4K4K3K3K3K;KCK9K6K2K7KSKKKKMK>K7K:K9KXKKKKdKCKKKKKKKKKKKKK"KCK\KKKKKKKKKKKKVKHKKKKKKKkK?KDKCKAK@KDKEKVKKKKKKGK@KBK@KBK@K>KAKBKEKAKVKKKKKK`K:K>K<K:K:K8K6K8K5K6KAKEK?KKKKKKKXK>KEKCK@K0K*K*K*K)K)K(K'K;KBKAK>K0K-K*KDKKK4KK K K K K +KKKKKKK K KK;K?K>K@K3K%K&K%K%K'K K KKK +K KK;KYKKKKKKKKKEKhKK^K:K@e]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKcK9K,KKKKKKlK;K=K*K&K>K@K=K:K8K,K;KEKCKCK<K,K+K+K%KKKKKKKK K K K KKKKKK K KKK +K K K K KKKKKKKKKKKKKKKKKKKKKKK KKK K2K@KFKKKKKKKKKKKKKKKKKKKKKKKKKKKKKwKEKKKUKUK_KeKdKcK`K_KUKHKGKNKKKJKMKMKKKLKMKMKNKLKKKKKKKKKKKKKKKKKKKKKLKJKIKKKLKKKLKKKKKKKKKKKIKIKKKKKKKIKHKJKJKHKHKHKIKIKIKIKJKIKEK=K8K;K<K?KAKDKCK@KJKgKKKKDKAK?K8KNKKKKKKKKKKuK6KIKKoK<KCK<KxKKKKK^K;K[KKKKKKKKuK<KCK@K?KeKKKKKKKKKKKdK;KWKKeK<KDK9KKKKKK}KCKJKKKKKKKoK9K>K<K<K=K<K<K9KAKDKEKFKBKAK;K@KDKDK<K2K2K5K4K4K4K4K3K2K3K9KDK;K6K4K5KHKKKSKBKCK8K:K6KWKKKK}KEKFKKKKKKHK$KKKK;KLKKKKKKKKKKKKkKAKzKKKKKKqKDKDKCKBKBKDKIKFKKKKKKcK;KCK@KBK@K>KAKAKCKEKDKKKKKKK;K>K<K:K:K8K6K8K6K2K;KCK@KWKKKKKKK@KEKCKDK8K*K+K*K)K)K)K$K0KCKAK?K8K+K(K\KKKtKK K +K +K K +KKKKKKKK +K K*K>K>K?K;K*K&K%K%K%K&KKKKK KK0KCKKKKKKKKK`KEKKK@K@e]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK]K7K/KLKKKKfK;K=K*K*K>K?K>K:K6K+K<KDKCKDK<K,K,K+K!KKKKKKKKK K K KKKKKK K KKK K K K K K KKKKKKKKKKKKKKKKKKKKKKKKKK1K@KGKKKKKKKKKKKKKKKKKKKKKKKKKKKKKZKaKeKhKfKdKjKhKeKbK_KWKRKLKPKMKGKGKOKOKLKKKLKNKLKKKLKKKKKKKKKLKLKKKKKKKKKLKKKKKKKKKKKLKLKIKHKKKKKHKHKHKHKHKIKIKHKHKHKHKHKHKHKIKIKEKCKCK@K;K:K<K@KCKCKCK@KHKoKKHK>KAK;KDKKKKKKKKKKK<K?KKK>KCK:KiKKKKKlK;KRKKKKKKKKK>KBKAK=KZKKKKKKKKKKKrK>KMKKxK=KDK8KoKKKKKKIKBK|KKKKKKK<K=K=K<K<K=K<K:K@KDKDKCKDKCK>K?KDKDK@K5K2K5K4K5K4K1K2K3K2K5KBK>K6K5K5K=K{KKRK<KGK;K:K7KFKKKKKNKBKfKKKKKHK-KKKK2KFKKKKKKKKKKKKKEK]KKKKKKuKJKCKDKBKAKAKHKAKpKKKKKK=KBKBKBK@K>K?K?KBKHKBKlKKKKKKKK9K<K;K8K8K8K8K6K4K7KBKCKBKKKKKKKNKBKCKBK=K-K,K)K)K)K)K%K'K?KAK?K=K/K&KsKKKK)KK +K K +K K +K K K KKKKKKK9K=K=K>K2K#K#K%K%K%KKKKKKK#KAK[KKKKKKKKKEKiKKaK9e]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKXK4K4KOKKKK^K9K<K%K+K?K?K<K:K7K*K<KDKCKCK:K+K,K+K!KKKKKKKKKKKKKKKKK K KKK +K K K K KKKKKKKKKKKKKKKKKKKKKKKK KK K3K@KFKKKKKKKKKKKKKKKKKKKKKKKKKKKKK_KkKiKhKiKhKeKcKaK_K\K[KUKOKVKVKPKGK6K;KMKMKMKLKLKLKLKKKKKKKLKKKJKKKLKKKKKKKKKKKKKKKLKKKJKIKIKKKKKHKHKHKHKHKHKHKHKHKHKHKHKHKHKHKIKGKFKFKHKFK?K;K;K=K>KBKEKEK@KJKBK;K?K;K?KKKKKKKKKKK@K7KzKKAKCK=K\KKKKKyK>KHKKKKKKKKKFKAK@K;KNKKKKKKKKKKKKCKDKKK>KBK<KXKKKKKKTK=KiKKKKKKKEK;K=K<K<K<K=K;KAKEKDKCKDKCK?K>KCKCKAK7K3K4K4K4K4K1K2K3K2K2K>KCK6K5K5K8K^KKK]KFK=K:K9K8KdKpKJK{KXKAKPKKKKK^KIK3KKK#KDKcKKKKKKKKKKKKSKIKKKKKKK^KAKCKBKCK@KEKEKRKKKKKKMK@KBKBK@K>K?K?KBKDKEKNKKKKKKoK9K?K;K9K8K8K8K6K4K5K>KCK@KiKKKKKKlK>KDKAK@K4K+K)K)K)K)K&K#K3KAK?K@K4K+KKKKKeKK K K K +K K +K KKK KKKK +K0K?K<K=K9K'K$K$K&K%K&KKKKKKK=KBKKKKKKKKK`KCKKK=e]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKQK0K8KQKKKKUK7K<K$K-K@K>K:K9K7K+K>KDKCKBK7K)K,K,K!KKKKKKKKKKKKKKKKK K KKK +K K K KKKKKKKKKKKKKKKKKKKKKKKKK!K!K K5K@KGKKKKKKKKKKtKCKuKKKKKKKKKKKKKKKKKgKmKiKfKgKfKcKaK_K_K]K[KTKLK=K2K%KKKK*KBKNKKKMKNKLKKKKKKKLKIKHKKKKKKKLKKKKKKKKKKKLKJKHKHKHKKKKKHKIKHKHKHKHKHKHKHKHKHKHKHKHKHKIKIKGKFKFKGKHKGKAK;K<K=KAKCKGKEKDKAK<K9K9K{KKKKKKKKKKHK5KnKKHKAK@KSKKKKKKAKCKKKKKKKKKPK?KAK=KEKKKKKKKKKKKKLK@KxKKDK=K>KIKKKKKKcK>KZKKKKKKKTK:K=K<K=K<K=K=K'K8KEKCKDKCK?K=KCKAKBK;K0K1K4K2K2K2K2K3K3K2K:KCK8K4K4K7KMKHKHK]KNK>K:K;K9KJKyKcK{K`KDKEKKKKKK\KDKKKK@KOKKKKKKKKKKKKfKAKzKKKKKKxKIKBKBKBKBKBKFKBKKKKKKjK<KCKBK@K>K?K?KBKBKGKCKKKKKKK?K;K:K:K8K8K8K6K5K3K:KDKDKLKKKKKKKBKBKAKBK=K+K)K)K)K)K'K$K)K>KBKCK;K8KKKKKKK K K K +K K KKKK K +KKKKK;K:K=K=K0K$K#K&K&K'KKKKKKK,K@K\KKKKKKKKKAKjKKbe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKOK-K<KUKKKKQK8K:K"K2K@K>K;K9K5K-KAKCKCKCK7K)K*K*KKKKKKKKKKKKKKKKKK K K K K +K K K KKKKKKKKKKKKKKKKKKKK K KKKK K!K K5KBKGKKKKKKKKKK|K:KJKKKKKKKKKKKKKKKKKdKbKdKeKcK_KaKbKgKsK{K[K/K#K!K!KKKKKKK2KHKNKKKKKKKKKKKLKIKHKJKLKKKKKKKKKKKKKKKLKKKJKJKJKLKKKHKHKHKHKHKHKHKHKHKHKHKHKHKHKIKHKGKHKHKHKHKGKGKHKFK>K8K:K>KDKHKGKDKDKCK:KpKKKKKKKKKKQK5KbKKQK>K@KGKKKKKKFK<KsKKKKKKKK\K=KAK>K@KxKKKKKKKKKKKUK>KfKKQK:K@K?KKKKKKvK?KOKKKKKKKfK7K?K=K<K<K<K?KK%KFKCKDKCK?K=K@KAKBK?K2K1K4K2K3K4K3K2K1K/K5KDK<K3K4K4KGK9K@KyKNK@K;K9K;K1K\K~KKqKDKCKqKKKKK^KGKKKK6KCKKKKKKKKKpKKKKCKZKKKKKKK\K>KBKBKBKBKGKEKiKKKKKK>KAK@K?K?K?K?K@KAKHKCK`KKKKKKWK7K:K:K8K7K6K5K5K2K4KAKEK?K}KKKKKK[K@KCKBKAK1K*K)K)K)K'K%K%K:KBKAK>KQKKKKKKPKK K K +K K +K KKKK K K +K +K +K/K9K;K?K8K$K%K&K%K$K%KKK KKKKAK@KKKKKKKKK_KDKKe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKIK-K?KYKKKKKK8K7K"K5K?K>K;K8K3K-K@KCKCKCK7K)K+K(KKKKKKKKKKKKKKKKKK K K K K +K K K KKKKKKKKKKKKKK KKKKKK K KKKKK!K K6KDKHKKKKKKKKKKK[K"KPKKKKKKKKKKKKKKKK`K]K_KkKtK|KKKKKKKKXK3K K!K$K"KKKKK"K<KNKMKKKKKLKLKIKHKIKMKKKKKLKKKKKKKKKKKLKLKLKLKLKKKHKHKHKHKHKHKHKHKHKIKHKHKHKHKIKHKFKHKIKIKHKFKFKFKHKHKCK=K;K:K?KEKCK@KGKFKSKKKKKKKKKK_K3KUKK]K<KAK@KKKKKKPK:KeKKKKKKKKlK;KBK?K>KiKKKKKKKKKKKeK=KUKKbK8KAK8K{KKKKKKDKGKKKKKKK}K;K@K=K<K=K;K=K*KKFKCKDKCK?K<K?KAKBK>K3K1K4K2K4K5K3K2K/K/K0K>K>K4K5K5K;KSKK|KdKCK<K:K:K8KVKKKKSKDKWKKKKKqKFK$KKK&KDKdKKKKKKKKcKKKKQKFKKKKKKKvK>KCKBKBKBKEKEKNKKKKKKNK<K?K?K?K?K?K>K>KCKFKHKKKKKK}K6K;K:K8K8K8K6K4K3K2K:KCKAKYKKKKKKK?KEKBKBK:K*K)K)K)K&K&K&K/K>K@K=K[KKKKKKKK +K K +K +K K KKKKKKK KKK7K8K?K=K.K$K%K$K#K&KKKKKK K1KAKaKKKKKKKKKCKhKe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK|KGK/KBK^KKKKGK9K7K!K7K@K>K;K9K1K.KCKAKAKAK6K)K*K)KKKKKKKKKKKKKKKKKK K K K K K KKKKKKKKKKKKKKKKKKKK K K K K KKK!K!K"K7KCKJKKKKKKKKvKKgK9K%KXKoKvKKKKKKKKKKKKKKzKKKKKKKKKKKKKKKgK;K%K K%K$K KKKK,KBKMKKKJKLKIKHKIKIKIKIKIKKKLKIKIKLKJKHKKKLKJKHKHKHKHKHKHKHKHKHKHKHKHKHKHKHKHKHKHKGKGKGKGKFKGKFKFKGKHKGKBK8K8K;K2K4KEKIKFKHKbKKKKKKKKlK2KIKKiK=KEK=K|KKKKK[K9KZKKKKKKKK{K<KAK?K;KZKKKKKKKKKKKvK=KJKKyK9KBK7KgKKKKKKJK>KsKKKKKKKBK>K>K>K=K<K>K4KK@KGKCKDKAK>K>KAKBKAK6K2K3K2K4K1K2K2K0K/K.K9KAK7K5K4K7K_KKkK{KMK>K:K:K9KHKQKeKKdKCKGKKKKKK]KJKKKK=KOKKKKKKKKKKKKeK=K}KKKKKKKIK?KAKBKBKBKDKAKKKKKKqK9K@K?K?K=K=K=K=KAKEKAKvKKKKKKCK:K9K7K8K:K7K4K4K4K9K?KBKAKKKKKKKMK@KAKBK<K-K*K*K)K&K&K&K'K:KAKAKEKKKKKKK<KK K +K K KK K +KKKKKKKK1K9K<K>K6K%K$K%K%K&K%KKKK KK KCKDKKKKKKKKK]KEKe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKvKEK+KDKhKKKKBK9K4K!K7K?K>K:K9K0K/KAKAKAKBK2K)K,K(KKKKKKKKKKKKKKKKKK K K K K K KKKKKKKKKKKKKKKKKKKK K!K K K!K K"K%K%K#K8KCKJKKKKKKKvKKKEKIK.K7KtK|KKKKvKKKKKKKKKKKKKKKKKKKKKKKKKKKKuKKK.K#K&K#KKKKK2KGKMKKKIKHKHKHKIKHKHKJKKKHKIKKKJKHKJKKKIKGKHKIKIKHKHKHKHKHKIKIKIKHKHKHKHKHKIKGKGKFKFKFKFKFKFKFKFKEKGKHK@K5K-K5KBKDKEKHKCKGKeKKKKKKzK2KBKKyK<KBK8KlKKKKKmK8KPKKKKKKKKK?K@K?K<KPKKKKKKKKKKKKAKCKKK=K?K:KTKKKKKKVK;KbKKKKKKKQK:K>K?K=K<K=K;KK3KHKCKDKBK?K=KAKAKBK<K2K3K2K3K1K2K2K0K/K/K6KDK9K6K5K5KIKK~KQKJK?K:K9K9K9K4K3KjKkKDKBK}KKKKKaKeK4KK K5KFKKKKKKKK~KKKKKAK[KKKKKKK\K<KBKBKAKAKDKAKdKKKKKK@K>K?K?K=K=K<K=KAKDKCKVKKKKKKaK5K:K8K7K6K5K5K5K5K4K<KCK?KgKKKKKKoK<KBKBKCK3K)K*K)K'K%K&K$K2K@KBK>KoKKKKKK~K K K K K KK +K +KKKKKKKK!K>K<K<K:K+K&K&K%K%K'KKKKKKK;K@KaKKKKKKKKKCKhe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKoKCK*KEKpKKKK@K:K2K!K9K@K=K9K9K0K2K@KAKAK@K0K)K+K(KKKKKKKKKKKKKKKKKK K K K +K K KKKKKKKKKKKKKKKKKKK!K K K K K K!K K"K$K#K7KCKJKKKKKKKKKKKK~KgKlKKuKK|KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKZK6K#K"K$K!KKK K:KMKKKHKHKIKLKJKHKHKHKHKIKHKJKLKIKHKIKHKHKHKHKHKHKHKHKHKIKJKIKHKHKHKHKIKIKGKGKGKFKFKFKFKFKGKGKFKFKFKGKFK<K7K7K=KAKDKEKDK>KIKkKKKKK7K:KKK;KAK8K]KKKKKzK9KHKKKKKKKKKHK?K?K<KFKKKKKKKKKKKKIK>KuKKGK=K=KFKKKKKKkK=KUKKKKKKKaK9K?K?K<K=K<K=K K+KFKCKDKBK=K<K@KAKBK>K4K3K3K2K2K2K2K0K.K-K/KBK=K5K8K7K?KKKKbKBK;K9K:K3K5KIKeK~KLK@K_KKKKKsKNKKKGK.K$KEKiKKKKKKKKKKKKSKGKKKKKKK|K=KCKBK?K?KCKDKJKKKKKKUK<KAK>K<K=K<K>K?KAKEKCKKKKKKK9K;K:K9K8K6K4K5K5K0K9KCKCKJKKKKKKKBKAKBKAK9K(K(K)K&K%K&K$K)K@KBK?KJKKKKKKK+KK K K K KKKKK K KKKKK8K=K=K=K4K$K&K&K%K&K$KKKKKK#KAKEKKKKKKKKK^KEe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKiKAK,KIKzKKKwK=K<K-K!K;K@K>K:K9K/K3KAKBKBK@K0K*K+K)KKKKKKKKKKKKKKKKKK K K +K +K K KKKKKKKKKKKKKKKKK K K!K K K K!K K!K!K#K$K#K7KDKEKuKKKKKKKKKKKKfKmKxKzKtKkKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKjK>K$K#K'K(KKK)KBKJKHKIKJKIKHKIKJKIKHKHKIKKKJKIKIKHKHKIKJKIKHKHKHKIKHKFKHKIKHKHKIKHKHKFKEKFKGKFKFKFKFKFKFKFKGKFKFKGKGKCK:K8K9K<KBKCKDKDKCKLKpKKK<K2KwKKBKBK>KQKKKKKK<K@KKKKKKKKKTK=K?K>K@KKKKKKKKKKKKVK;KaKKVK:K@K;KKKKKK}K?KHKKKKKKKwK9K?K>K<K=K<K=K)KKEKCKDKCK?K=K@KBKDK?K3K3K2K3K3K3K2K1K/K-K-K=KAK4K7K5K8KpKKKrKEK>K9K9K:K?KlKKKOKCKMKKKKKKZKJK_K{K:K<KQKKKKKK~KKKKKKgK?KzKKKKKKKIK?KBK?K?KCKFK?KKKKKKyK;K@K>K<K;K2K6K>K?KFKCKjKKKKKKMK7K9K8K8K6K4K5K4K1K3K?KDK?K{KKKKKK]K>KBK@K?K/K'K)K'K'K&K%K#K6KDKAK<K|KKKKKKhKK K K K +K KKK K K +KK K KK%K?K=K<K9K)K"K&K&K$K'KKKKKKKK:KhKKKKKKKKKAe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKcK=K0KLKKKKoK:K<K,K$K?K?K>K;K9K-K5KCKCKDKAK/K*K,K)KKKKKKKKKKKKKKKKKK K K K K K KKKKKKKKKKKKKKKKK!K!K K K K!K!K K"K$K$K#K#K8KDKFKyKKKKKKKKxK^KKKKK{KKvKrKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKyKMK-K#K+K-KKK0KCKJKFKGKHKKKLKIKHKIKHKHKKKLKIKHKHKIKIKIKIKIKIKIKHKFKHKIKIKIKIKGKFKGKGKFKFKFKFKFKFKFKFKFKFKFKFKFKGKCKDKDK=K:K=K?KBKFKHKFKCKTKpKEK0KfKKKK?KAKEKKKKKKFK:KoKKKKKKKKbK<K@K>K=KqKKKKKKKKKKKeK<KUKKjK9KAK8KxKKKKKKDK@K|KKKKKKK>K=K=K<K<K=K<K2KK@KCKEKDKAK>K@KBKBK?K7K3K2K4K5K3K2K3K0K.K-K8KEK8K4K5K5KVKK{KSKHK@K7K9K9K@KXKpKmKPKBKEKKKKKKyKEKAKdKaK>KHKKKKKKoKyKKKKKKBK\KKKKKKkKLK?KBK?K?K@KEKBK]KKKKKKAK>K?K>K:K3K6K=K<KDKEKMKKKKKKqK4K9K7K8K6K4K6K4K3K2K<KBKAKVKKKKKKK?K@KBKCK7K(K)K)K)K&K&K#K-K@K@K?KXKKKKKKKKKKKK +K K K K +K K K K KKK8K=K<K;K2K%K&K%K%K&K$K KKKKKKKAKKKKKKKKK[e]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK\K9K2KMKKKKgK9K<K)K$K>K@K>K9K8K-K5KCKBKCK?K.K(K*K'KKKKKKKKKKKKKK KKKK K K +K K K KKKKKKKKKKKKKKKKK K K K K!K K!K"K"K#K$K#K#K7KBKMKKKKKKKKKKKKKK~KfKKK}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK\K7K*K'K'K"K"K5KEKHKHKHKJKIKHKHKIKHKIKJKIKHKIKHKGKGKGKGKHKIKHKFKHKIKGKHKIKHKHKGKFKFKFKFKFKFKFKFKFKGKGKGKGKGKGKEKEKFKDKAK>K;K<K@KCKEKGKBK=K?K7KWKKYK=KBK<KKKKKKQK5KaKKKKKKKKoK9K?K<K;KcKKKKKKKKKKKtK?KHKK}K9KBK6KdKKKKKKPK<KkKKKKKKKGK9K=K=K<K;K:K9KK6KDKEKDKBK>K?KBKBKAK:K1K3K3K3K3K2K3K0K,K-K4KCK;K4K5K7KEKKKPKDKBK:K:K9K;KGKVKVKUKDKCKgKKKKKK]K[KgKKcK>KqKKKKKwKcKsKKKKKQKHKKKKKKK`K>KBK?K?K<K=KGKHKKKKKK\K<K?K?K=K<K<K=K<K?KGKAKKKKKKK=K6K8K8K6K4K4K3K3K1K6KAKBK?KKKKKKKOK=KBKBK=K,K)K*K(K&K%K$K&K8KAKBKBKKKKKKKVK�KKK K +K +K K K K +K +K +K +K +K +K*K?K<K=K;K+K#K&K%K%K%KKKKKKKKKaKKKKKKKKe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKVK5K4KMKKKK_K8K:K'K'K>K@K=K8K5K,K7KCKBKBK>K,K(K)K&KKKKKKKKKKKKKK!K KKK K +K K +K KKKKKKKKKKKKKKKKKK!K K!K K K K!K$K"K#K%K#K#K7KBKNKKKKKKKKKKKKdKYKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKmKXK<K(K'K&K$K,KAKIKGKHKIKHKHKHKHKHKHKHKHKHKIKIKGKFKFKGKIKHKFKHKIKFKGKIKIKIKGKFKGKFKFKFKFKFKFKFKGKGKGKGKGKFKGKGKGKFKDKEKBK<K;K<KAKCKDKAK?K?KCKK_K7K@K7K{KKKKK\K4KTKKKKKKKK~K<K@K?K;KSKKKKKKKKKKKK@K?KKK@K@K8KOKKKKKKaK;KZKKKKKKKZK8K=K=K<K9K8K?KK)KEKDKDKBK>K9K;KBKAK=K2K2K2K2K2K2K3K0K,K.K.K>K>K4K5K7K9K~KKFKNKDK;K8K:K7K;KFK9K0KBKEKLKKKKKK^KjK{KjKKHKTKKKKKKpKbKwKKKKfK=K}KKKKKKKDKAK?K?K<K6KEKAK{KKKKKK;K?K=K<K=K=K=K=K>KGKCK^KKKKKKXK2K8K8K6K5K3K2K3K2K1K?KCK?KgKKKKKKqK:KCKAK?K5K(K)K)K&K%K$K"K/K?K>K>KaKKKKKKKKKKKKKKK K K K +K +K KKK<K=K=K<K3K$K%K&K%K&K"K KKKKKKKKKKKKKKKe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKRK2K8KPKKKK[K8K;K%K+K@K>K;K8K5K+K8KCKCKDK>K-K*K)K$KKKKKKKKKKKKK!K!K KKK K +K K K K KKKKKKKKKKKKKKK K K K!K K"K#K#K#K$K"K#K$K#K&K8KBKMKKKKKKKKKnKKKrKuKkKvKKtKKxKrKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKnKrKKuKHK&K$K#K#K3KIKLKHKHKIKHKHKIKIKHKHKIKHKGKGKFKFKGKGKGKHKGKGKFKDKIKHKGKGKFKFKFKFKGKFKFKFKFKFKGKFKFKGKFKFKFKFKFKGKEKBKBK>K:K8K<K>K?KAKAK?KBKCK7K<K5KkKKKKKjK3KJKKKKKKKKK@KBK@K:KHKKKKKKKKKKKKGK=KtKKIK<K<KAKKKKKKrK=KMKKKKKKKoK8K<K<K<K<K:K=K(K"KDKCKCKCKAK:K6K@KBK@K4K/K2K3K3K2K3K1K/K.K,K=KBK5K8K9K8KeKKtKDKFK;K7K:K;K.KXK=K!K=KFKEKKKKKKKKyKaKKVKCKKKKKKYKbKcKKKKK@K[KKKKKKKZK<K?K?K?K<KDKCK[KKKKKKDK=K>K<K=K<K=K=K=KCKGKIKKKKKKK2K8K8K6K4K4K4K3K1K.K7KCKCKJKKKKKKKDKAKAK@K;K*K)K)K'K$K&K#K&K>K?K@KDKKKKKKKDKKKKKKKKKKK +K K K K +K*K<K<K=K:K*K%K&K%K&K'KKKKKKK +KKFKKKKKKKe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKPK/K<KUKKKKTK8K:K#K.K@K>K:K8K4K+K:KCKCKDK>K-K+K)K#KKKKKKKKKKKKK!K!K KKK K +K K K K KKKKKKKKKKKKKKK K K K"K!K#K$K$K$K$K"K#K%K#K&K8KBKLKKKKKKKKK{KdK_KhK]KIKnKK|KoK~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKIK\K4K/K,K-K;KJKJKHKHKIKHKHKHKHKIKHKFKGKGKGKFKFKGKIKGKGKHKFKIKHKFKFKGKGKGKGKFKFKFKFKFKFKGKEKFKGKFKGKFKFKFKGKDKFKGKEKEK?K8K6K:K=K=KBKAK=K@K>K4K\KKKKK}K5K?KKKKKKKKKJK@K?K;KAKKKKKKKKKKKKWK:KcKK\K:K?K8KKKKKKK@KCKKKKKKKK<K;K<K=K=K;K;K2KK@KCKCKCKAK>K9K?KCKCK6K.K2K3K3K2K3K2K/K.K,K7KDK7K4K6K6KPKKKJKGK@K9K9K<K(KRK_K)K:KCKBKpKKKKKKKvKKKtKAKyKKKKK]KbKmKKKKKKKFKKKKKKKxK;K?K?K@K=K@KDKGKKKKKK_K7K=K<K=K<K=K<K<K?KEKBKvKKKKKKCK6K8K6K4K5K5K3K0K0K2K@KEK>KyKKKKKK_K>KBK>K?K.K(K)K'K$K&K%K"K3KAK@K=KoKKKKKKKKKKKKKKKKK K +K +K +K KK:K<K<K;K2K$K%K&K%K%K#K +KKKKKK K KsKKKKKKe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKMK-K@K[KKKKMK9K9K"K1K>K>K;K9K5K*K:KBKBKBK;K+K*K*K#KKKKKKKKKKKKK!K K!KKK K +K K +K K KKKKKKKKKKKKKK!K K!K K"K$K#K#K#K#K$K"K!K!K$K'K8KBKLKKtKKKKKKKKtKeKkKbKnKKKKfKKKKKzKqKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKsK|KKqKKKKK(K@KJKIKHKIKIKHKIKIKIKIKIKHKGKFKGKFKFKFKGKGKGKIKHKGKGKFKGKGKGKFKGKFKFKFKGKGKGKGKGKFKFKFKFKFKFKFKGKGKFKGKGKCK;K8K8K<K@KBK?KAKCK<KJKKKKKK9K;K|KKKKKKKKWK>K?K<K:KvKKKKKKKKKKKeK;KSKKnK9KBK5KrKKKKKKKK?KuKKKKKKKDK9K=K>K=K9K:K:KK7KCKCKBKBKAK9K=KCKCK:K/K2K3K2K2K3K2K/K.K,K0KBK:K3K5K3K@KtKK_KDKBK;K9K<K1K0KVK7KPKIKCKUKKKKKKxKsKKKKNKXKKKKKvKEK\K{KKKKFKBK|KKKKKKKDK>K?K?K>K>KDKAKwKKKKKK:K=K=K=K=K=K;K9K<KCKBKUKKKKKKhK3K9K6K4K5K4K3K2K0K1K:K@K?KUKKKKKKK>K@K>K@K5K(K)K&K%K#K$K#K(K<K@KAKLKKKKKKK4K�KKKKKKKKKKKKK +K K.K>K<K>K9K&K#K&K%K$K&KK KKKKK K K KKKKKKe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK{KKK-KDK_KKKKGK9K7K!K4K?K=K:K9K4K*K>KBKBKBK:K+K*K*K#KKKKKKKKKKKKK!K K"KKK K +K K +K K KKKKKKKKKKKK K K!K!K!K"K"K$K#K#K#K#K$K#KK K%K&K9KCKKKKKKKKKKUKXKBKSKbKQKSK|KhKKKKKKK|KLKyKKyKpKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKvKqKlKgKKKKKKK,KDKLKHKGKIKHKHKHKHKHKHKGKFKFKFKFKFKFKFKGKIKHKGKGKFKEKDKDKFKFKGKFKFKGKGKGKGKGKFKFKFKFKFKFKFKFKFKFKFKFKFKGKCK=K7K:K=K>KAKAK@K?KJKpKKKKCK6KmKKKKKKKKeK:K@K=K8KdKKKKKKKKKKKvK;KHKKK;KAK8K\KKKKKKYK=KbKKKKKKKTK5K=K=K=K:K9K>KK+KFKCKCKAK@K9K9K@KCK;K1K2K2K2K2K3K2K/K.K-K.K>K=K4K5K5K;KLKyKHK4KEK;K:K<K;K!K0K]KK^KAKGKKKKKKqKhK{KuKKdKEKKKKKK@KGKbKKKoK4KFK[KKKKKKKQK;K?K>K>K=KBKCKUKKKKKKIK;K=K<K<K<K;K:K9K>KEK@KKKKKKK9K8K6K4K5K4K3K1K.K/K4KBKCKAKKKKKKKRK;K@K>K;K,K'K&K%K$K$K$K K3KAKBK?K~KKKKKKsKKKKKKKKKKKKKK KKK=K<K=K<K/K$K&K'K)K%KaK/KK +KKKK KKAKKKKKe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKqKHK,KGKgKKKKCK7K5K K7K@K;K8K:K3K,KBKBKAKBK7K*K*K*K#KKKKKKKKKKKKK!K K!KKK K +K K +K KKKKKKKKKKKKKK K!K K!K K"K$K#K#K$K#K#K#K#K%K&K&K8KCKHKKK[K~KKKZK-K=KWKjKcK`K`KlKfK{KKKKxKKuKXKKKK|K{KKKKKKKKKKKKKKKKKKKKKKKKcKKKKKKKKKyK@KPK!KKKKKKKK7KIKIKHKHKFKFKFKHKIKFKFKFKFKFKFKGKFKGKIKHKGKGKFKGKGKGKEKFKHKFKFKGKGKGKGKGKFKFKFKFKGKGKGKFKFKFKFKGKGKFKGKGKCK=K9K;K:K5K7KCKCK>KMKvKKMK1K`KKKKKKKKrK:K@K?K9KSKKKKKKKKKKKK<K?KKKAK?K9KIKKKKKKjK;KSKKKKKKKhK7K>K=K=K;K9K;K&K!KCKCKDK@K>K:K9K@KBK?K1K1K3K2K2K3K1K/K.K.K-K:KCK5K4K4K4K^KKBK7KAK@K:K9K<K&K5KKKTK@K@KzKKKKKpKgKKKK|K=K|KKKKK?K5KTK|KKoKKCKFKKKKKKKqK<K?K=K=K<K@KEKBKKKKKKgK8K=K9K9K9K:K:K8K;KEK>KhKKKKKKOK3K5K5K5K4K3K2K/K0K1K>KDK=KcKKKKKKuK:K@K=K>K3K%K&K&K&K%K$K K(K@K>K>KVKKKKKKK$KKKKKKKKKKKKK K +K K2K<K<K=K6K'K%K(K)K$KKsKK K KKKK K KmKKKKe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKjKBK+KIKqKKKKBK:K4K K7K>K=K;K9K2K+KBKAKAKBK6K)K+K*K!KKKKKKKKKKKKK#K"K#KKK K K +K +K K KKKKKKKKKKKK K K!K"K"KK K$K#K$K#K$K%K%K&K&K%K&K:KCKJKKKqKnKKKuKMKmKKK}KKsKKKKKKKK|K8KFK{KK~KiKKKKKKKKKKKKKKKKKKKKKKyK{KKKKqK}KKkKKK}KK^KfK#KKKKKKKKK(K?KJKIKGKHKHKHKHKHKGKFKFKFKFKFKFKGKGKGKGKGKFKFKFKGKFKFKGKFKFKFKGKGKGKGKFKFKFKGKFKDKFKGKFKFKFKFKFKGKFKDKDKAKBKDK6K(K/K>KCKDKAK>KRKFK0KSKKKKKKKKK>K@K?K9KHKKKKKKKKKKKKGK:KsKKLK<K<K=KKKKKKK<KIKKKKKKKK9K=K=K=K;K9K:K,KKAKDKCKBK@K<K8K>KBKBK7K0K1K2K2K1K0K0K-K,K+K6KCK8K4K5K5KHKuKCKIKQK?K9K9K<K0K)KKK\K<KCK_KKKKKvKKKKKKGK]KKKKK^KK8KhKKKNKBKBK|KKKKKKK@K=K>K=K<K>KEK@KoKKKKKK:K>K<K:K:K:K:K9K;KDKBKKKKKKKKwK2K7K4K4K5K3K1K1K2K.K5KBKBKHKKKKKKKEK>K>K=K8K)K&K&K&K%K#K#K#K8KCKBK?KKKKKKKbKKKKKKKKKKKKK K KK K>K<K<K>K1K$K&K(K+KKK%KK K KKK +K KKKKKe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKdK@K.KIK}KKKzK=K8K0K"K7K=K=K=K:K1K-K@KBKAKBK5K(K,K*KKKKKKKKKKKKKK$K$K$KK K K K K +K K KKKKKKKKKKKK!K K!K#K#K!K"K$K#K#K#K$K&K&K%K&K%K%K;KCKJKdKKhKRKKKKaKOKmKKKeKxKtKKyKKKvKKvKPKvKKwKRKlK|KKKKKKKKKKKKKKKKKKKKKuKvKKlKXKrKKMKxKKqKbKK}KK'KKKKKKKKKKK-KEKIKHKGKHKIKIKHKFKFKFKFKFKFKFKFKFKFKFKFKFKFKFKGKFKFKFKFKFKFKFKFKFKFKFKFKGKEKCKEKGKFKFKFKFKFKGKGKDKBK?KDKEK@K9K:K;K=K?KAKBK>K=K:KFKKKKKKKKKDKAK?K;K>KKKKKKKKKKKKSK9K`KK_K9K?K8KKKKKKKCKAKKKKKKKKCK<K=K=K;K9K;K6KK;KEKCKDKAK>K8K<KBKBK8K/K/K2K2K/K/K0K.K)K*K.K@K;K3K4K4K:KgKKKgK=K9K9K9K9KKjKK^K5KFKLKKKKKKKKKKK[KGKKKKKKKKKKwKvKKcK@K[KKKKKK:K9K@K?K>K=K=KCKDKRKKKKKKKK8K>K:K:K:K:K:K9K?KDK@KKKKKKK=K5K5K4K5K3K1K3K3K.K2K=KDK>KwKKKKKKdK<KAK;K:K.K%K&K&K&K#K#K#K-K@KBK@K`KKKKKKKKKKKKKKKKKKK K K +K K6K>K<K>K7K&K$K'K9KKKiKK KKKKK K K<KKKe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK^K>K2KKKKKKpK9K7K-K!K;K=K=K;K7K/K.KBKAKAKBK4K)K)K*KKKKKKKKKKKKKK#K#K$KKK K K K +KKKKKKKKKKKKKK!K!K K#K$K#K$K%K%K%K%K%K%K%K&K%K%K%K:KCKIKdKvKcKtK{KuKcKSKZKKKxKOKQK>KXKtKKxKjKxKrKwKKK|KrKK_KwKKKKKKKKKKKKKKKKKKKKKKKKKKK[KvKuKHK[KKK{K)KKKKKKK!K&K-K1K5KBKEKHKIKGKGKIKHKFKGKFKFKFKFKFKGKGKFKFKFKFKFKGKGKGKGKFKFKFKFKFKFKFKFKFKFKGKFKFKFKGKGKGKFKFKFKGKGKFKFKEKFKFKEKEKBKAK=K<K=K=K?K>KAK?KYKKKKKKKKLK?K?K;K6KxKKKKKKKKKKKbK8KOKKqK:KAK6KoKKKKKKOK=KkKKKKKKKOK:K<K=K<K9K9K<KK1KEK@KBKAK=K6K:KAKDK=K0K/K0K0K/K0K1K0K+K*K*K>KAK3K3K5K6KeKKKqKDK<K9K:K;KK8KKiK/KFKCKKKKKK^KyKKKKpKAKKKKKK.KK0K`KfKKKNKFKKKKKKmKUK?K?K>K=K=K?KEKCKKKKKKpK5K>K:K:K:K:K:K9K:KAKAK^KKKKKK]K1K6K5K5K4K0K0K0K/K.K9KBK@KSKKKKKKK;K=K<K=K4K(K&K&K%K#K"K!K%K;K@KBKDKKKKKKKPKKKKKKKKKKKK +K KKK#K@K=K<K<K.K%K%KFKKKK#KKKKKKK +KKfKKe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKZK:K3KMKKKKjK7K8K*K"K=K=K=K:K7K.K3KCKAKAKBK3K)K(K*KKKKKKKKKKKKKK"K#K$KKK K K K +KKKKKKKKKKKKKK!K!K!K#K$K#K$K&K&K&K&K&K&K%K&K%K%K%K:KCKHKqKbKfKKKK~KxK7KvKKKKVKPKPKkKKpKKKtKKtKsK~KKfK~KtKKKKKKKKKKKKKKKKKKKKKKKuK\K~KfKcKnKeK!K;K{KxKnK5K"K3K?KEK?KJKMKLKGKFKIKGKAK@KEKGKFKHKHKFKFKFKGKFKFKFKFKFKFKGKGKGKFKFKFKFKFKGKFKFKGKFKFKFKFKFKFKGKGKGKFKFKFKFKFKFKFKGKGKGKGKGKFKFKEKCKEKFKFK@K=K:K:K<K@KBK@KAKbKKKKKKVK:K?K<K5KfKKKKKKKKKKKuK7KDKKK9K@K7KVKKKKKK^K;KXKKKKKKKcK8K<K=K<K9K8K<K&K%KDKAKAK@K>K8K7KAKBK@K3K/K0K/K0K/K.K/K,K*K)K8KDK7K5K5K4KOKKKKPK?K:K9K<K-KKnKwK"KAK@KiKKKKK`KWKKKK{KEKcKKKKKVKKKRKLKOKKlK9K~KKKKKKKAK>K>K=K=K>KEKBKkKKKKKK=K>K:K:K:K9K:K9K9K@KBKDKKKKKKK3K5K5K5K4K0K0K0K0K.K3K@KCKBKKKKKKKQK9K=K=K;K*K&K&K$K$K"K!K!K2K@K?K;KiKKKKKKK KKKKKKKKKKKKKKKK6K>K<K=K6K&K#KTKKKKdKK KKKK +K +KKKKe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKSK4K8KNKKKKcK9K:K(K$K=K=K<K8K9K,K2KBKAKCK@K0K)K(K*KKKKKKKKKKKKK!K#K$K"KK K K +K K KKKKKKKKKKKKKK K"K$K#K$K#K$K&K%K%K%K&K&K#K&K&K%K'K9KAKOKKhKmKKKKK;KMKKKK`KCK[KMKOKaKiKKKKKtKmKKvKUKZKqKKKKKKKKKKKKKKKKKKKKKKKYKpKKKKK:K,KhKkKXKdKOKLKUKWKXK]K\KWKSKKKIKFKEKBKCK?KBKFKEKHKFKFKFKGKFKFKFKFKFKFKGKGKGKFKCKDKCKFKGKFKGKGKGKGKGKFKGKGKGKGKGKEKDKDKDKGKGKGKGKGKEKGKGKEKCKDKCKFKFKDKEKGKDK;K9K=K@K?K>K=KFKiKKKKeK9K@K<K4KUKKKKKKKKKKKK:K>KKK?K<K:KHKKKKKKpK:KIKKKKKKKyK7K<K=K;K9K9K;K-KK@KAKBKAK=K9K6K>KBKAK4K.K0K/K0K/K/K/K,K)K(K1KCK;K4K4K5KBKKKKgK?K<K8K:K8KK-KhK$K<K@KOKKKKKKRKyKKKKQKKKKKKKKKKFK^K*KPKlK>K\KKKKKKKTK<K?K<K;K=KBKCKNKKKKKKUK9K;K:K8K8K8K:K8K>KEKAKtKKKKKKFK2K5K5K3K1K3K1K/K0K1K;KBK=KeKKKKKKyK9K=K=K=K1K%K&K$K$K$K#K"K)K?K@K@KIKKKKKKK:KK +KKKKKKKKKKKK +KK$K=K<K=K=K0K#KeKKKKK KK +KKKKKKK'Ke]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKPK2K;KPKKKK\K7K9K)K*K>K=K;K9K8K,K3KBKAKBK?K/K(K)K*KKKKKKKKKKKKK!K#K$K"KKK K +K K KKKKKKKKKKKKK K K"K$K#K$K#K$K%K%K&K%K&K&K$K&K&K%K&K9KAKOKiKiK\KpKKKK^KkKKK{KrKTKmKZK`KJKhKKfK|KKfK{KzKKdKAKKKKKKKKKKKKKKKKKKKKKKKKKoKKKKKLKbKKjKkKQKQKRK[KXKVKZK]K[KYKUKMKLKJKFKEK?KAK@KAKGKGKGKFKFKGKFKFKFKFKFKGKGKGKFKCKDKCKEKGKFKGKGKFKFKFKGKFKFKGKFKFKEKDKEKEKEKEKEKFKEKEKGKFKEKCKDKCKFKFKCKDKEKFKHKEK<K:K=K?K?K@K?KKKmKKzK8K?K=K6KGKKKKKKKKKKKKDK8KsKKKK8K=K9KKKKKKK>K?KKKKKKKK=K:K=K;K9K9K:K5KK9KBKAK@K=K:K5K=KDKBK8K,K/K0K/K0K/K/K,K)K(K-K@K=K4K4K4K7K~KK\KbKBK=K8K9K<K&K,KGK)K7KDKCKKKKKKKKKnKK[KBKKKKKK+K K+KjKwKBKPKGKGKKKKKKKvK:K>K;K;K<K>KGKAKKKKKKyK7K<K:K9K7K/K5K8K;KCKCKRKKKKKKjK0K5K5K4K1K3K1K/K.K-K5KAK?KHKKKKKKKEK;K=K=K7K'K&K$K$K$K$K!K"K7K@KAK>KyKKKKKK}KKK KKKKKKKKKKK +K KK9K<K=K>K;K(KvKKKKK^KK KKKKK +K KK<e]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKOK1K?KTKKKKTK4K7K$K-K>K=K:K9K6K+K6KCKAK@K>K-K'K+K(KKKKKKKKKKKKK!K#K$K#KKK K +K +K +KKKKKKKKKKKKK!K K"K$K#K#K$K#K#K%K&K%K%K%K&K%K%K%K%K:KBKKKK|KFKwKKKKK}KKKgKJKUKpKmKvKKnKKwKhKoKKKeKK{KdKpKzKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKPKSKWK]K\K\K]K`K^K\K[KVKUKMKKKJKFKHKGKDK<K@KIKGKFKFKFKFKGKGKFKGKGKGKFKCKDKCKFKGKFKFKGKFKCKEKGKFKFKGKEKCKDKDKGKFKCKCKCKCKCKFKFKCKCKCKCKCKFKFKCKCKCKCKDKIKLKDK<K:K<K@K?K<K<KRKYK3K;K=K7K=KKKKKKKKKKKKQK8KaKK^K8K?K5K~KKKKKKFK;KqKKKKKKKKK9K>K;K9K:K9K9KK0KDKAK?K>K;K6K:KBKBK>K/K/K0K/K0K/K0K+K)K(K)K9K?K4K5K6K5KhKKSKgKLK=K9K:K8KGK'K&KK,KBK?KqKKKKKKKKwKKnKAKfKKKKKKK KK2KMK;KEKGK?K|KKKKKKKBK;K:K:K9K<KCK@KfKKKKKK?K:K:K9K7K.K0K8K7K?KDKAKKKKKKK9K4K5K4K0K/K1K/K-K-K2K?KBK@KzKKKKKKcK8K=K=K:K-K%K$K$K$K#K!KK(K>K?K>KRKKKKKKK*K�KKKKKKKKKKKKK K K,K>K=K=K<K3KKKKKKKKK +K KKKKK +Ke]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK|KLK.KAKYKKKKOK5K6K K.K?K<K;K9K6K*K7KBKAKBK=K,K(K*K&KKKKKKKKKKKKK!K#K$K#KKK K +K K KKKKKKKKKKKKK K K"K$K#K$K#K$K%K%K&K%K%K&K%K'K'K%K%K:KAKPKKKQKKKKKKoKTKK|KgKlKvKwKuKqK~K~KyKaK[KKeKVKhKKKvKoKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKWKYK^K[K]K_K\K^K`K_K[K\K\KVKOKLKCK9K-K&KKK/KCKHKFKGKGKEKEKGKFKFKGKFKDKDKDKDKEKFKGKGKEKCKFKGKFKFKGKEKCKEKEKFKFKCKDKCKEKEKEKEKDKDKDKDKCKEKEKCKCKCKDKBKBKEKMKMKCK<K:K>K?KAK>K=K=K8K7K5K5KKKKKKKKKKKKaK6KRKKwK7K?K5KgKKKKKKSK8K`KKKKKKK[K5K<K:K9K9K8K:K K'KDKAK?K>K<K9K8KAKBK?K0K.K0K/K/K.K.K-K)K(K'K6K@K3K3K5K4KGKKKKHK?K9K:K4KRK=KKK(K=KAKYKKKKKKKKKKKLKNKKKKKzKK%K#K+KKKAKCK^KKKKKKKYK7K;K:K:K3K=KDKJKKKKKKYK5K;K8K8K7K6K8K8K<KDKAKhKKKKKKRK0K5K3K1K.K0K/K.K-K,K:K@KAKWKKKKKKK=K>K<K:K5K%K#K$K$K$K!K!KK7K@K@K>KKKKKKKiK�KKKKKKKKKKKKKKKK:K<K=K=K@KKKKKKKYKKKKKK KK +K +e]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKuKGK,KDK`KKKKJK8K7K K0K>K=K<K9K7K*K;KCKAKBK<K+K)K)K&KKKKKKKKKKKKK K#K$K#KK K K +K K KKKKKKKKKKKKKK K"K$K#K$K#K$K&K&K%K&K%K%K&K&K%K%K(K>KAKOKKKhKfKKKKKKKkKK_KaK[K_KmKRK|KKKVKMKeK<KgKpKiKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK^K[K]KaK`KaK_KbK`KaKaKUK\KJK1K1K3K)KKKKKKK2KEKGKDKCKEKGKDKBKFKFKDKDKDKDKDKFKGKGKEKCKFKGKFKFKGKEKCKFKGKGKFKCKDKCKFKGKDKCKDKDKDKDKCKCKCKCKCKCKCKDKDKCKCKFKLKJKAK:K;K>K?K?KAK@K=K:K4KpKKKKKKKKKKKtK6KCKKK;K>K7KSKKKKKKhK8KNKKKKKKKsK4K:K9K:K8K7K:K)KKAKBK?K>K>K:K7K?KBK@K3K.K0K0K.K-K-K.K*K'K(K0K?K7K5K5K4K;KhKKOK:KDK;K9K:K>K+KKK K;KCKEKKKKKKKKKKKhK>KKKKKK"K&K,K'K KK.KDKJKKKKKKKmK7K:K:K:K2K5KDK?KKKKKKK5K;K9K7K8K8K8K7K6K@KCKJKKKKKK|K0K6K3K2K0K1K/K-K.K,K5K?KCKCKKKKKKKTK;K;K:K9K*K"K$K$K$K!K"K!K,K?K?K>K\KKKKKKKK�KKKKKKKKKKKKKK +K*K<K=K<K?KKKKKKKK!KKKKKK KKe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKmKBK*KEKeKKKKEK6K7KK0K<K=K<K9K5K)K<KCKAKAK:K,K)K)K&KKKKKKKKKKKKK"K$K#K$KK K K +K K K KKKKKKKKKKK K!K K"K$K#K$K%K%K&K%K%K%K&K&K&K K#K)K)K<KAKNKKKnKOK|KKKKKKKK\KWKhK|KfK]KKKqK3K[KLKOKaKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK^KWK_KZK^KcK]KOKUKUKQK_KRKKiK>K2K2K/K'K$KKKKK#K6KFKEKCKDKDKCKFKGKFKFKFKFKFKGKFKDKDKCKDKDKFKFKDKEKFKFKFKGKFKCKCKCKDKDKDKDKDKDKDKEKCKCKDKCKCKCKCKCKDKCKAKAK@KBKKKMK>K7K9K>K?K?K=K?K=K[KKKKKKKKKKKK<K:KKKCK=K;KCKKKKKK|K=KCKKKKKKKK9K9K:K9K9K9K8K6K/K=KBKBKAK>K:K6K;K@KCK7K-K.K0K/K/K.K-K+K)K(K+K<K<K6K5K4K8KOKpKdKHKDK>K7K9K;K-KKKK1K>K?KKKKKKKKKKKxKAKmKKKKKGKK@K(KKKKAKCKKKKKKKfK:K=K;K:K8K8KCK?K_KKKKKKDK8K9K8K8K8K8K6K5K;KDK?KKKKKKK;K1K3K2K/K0K0K.K.K.K.K<KBK>KgKKKKKKyK9K<K:K:K/K$K$K#K K K!K#K$K8K@KBKCKKKKKKKYKKKKKKKKKKKKKKKKK=K<K?K;KeKKKKKKK]KKKKKKKK e]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKfK?K,KGKnKKKK@K8K5KK2K=K=K<K9K4K*K=KAKAK@K9K+K)K)K&KKKKKKKKKKKKK#K$K$K$KK K K +K K K KKKKKKKKKKK K"K!K#K$K$K"KK&K&K&K%K%K'K&K%K$K'K)K)K;KCKHKsKKSKSKdKKKKKKKtKvKbKKKKgKsKfKKKVKIK5KpKEKuKKKKKKKKKKKKKKKKKKKKKKKyKKKKKKKKKKKKKKjK_KsK{KKKbKBK>K_KvKKcKKKKtKWK9K'K/K5K+K KKKK+K=KCKDKDKCKFKFKFKGKGKGKGKGKFKDKDKCKDKDKFKFKCKEKFKFKFKFKFKCKCKCKCKCKCKDKDKDKDKEKCKCKDKCKCKCKCKCKCKCKBKAKBKAK?KIKLKCK9K;K=K:K6K<KBKEK`KKKKKKKKKKKCK8KmKKSK9K=K7KKKKKKKBK>K|KKKKKKKEK8K:K:K:K9K7K9K;K8KBKBKAK>K:K6K9K?KAK:K-K.K0K/K0K.K-K,K)K(K'K7KBK7K4K5K5KKKuKtKKRK=K<K:K:K:KKKK*K<K>KaKKKKKKKKKKKIKRKKKKKwKK;K=K0K+K*KAKDK`KKKKKKKJK;K;K9K8K8K>KCKFKKKKKKaK4K9K8K8K8K7K7K2K7KEKAK\KKKKKKaK,K4K2K0K0K0K.K.K/K*K5KCKBKGKKKKKKKDK9K:K:K5K&K$K#K K!K K KK-KBKCK=KiKKKKKKK!K KKKKKKKKKKKKKK K/K?K<K=KCKKKKKKKK$KKKKKKKe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK^K<K/KKKyKKKzK=K9K2KK7K>K=K<K8K1K*K@KAKAKAK9K(K(K)K$KKKKKKKKKKKKK$K$K&K"KK K K +K K KKKKKKKKKKKK K"K$K#K#K$K#KK&K&K%K&K%K%K%K&K%K(K(K*K:KBKEK9K9KEKyKyKTKKKKKKKKZKKyKmKLKHKnKcKKlK~KKtKKKKKKKKKKKKKKKKKKKKKKKyKqKKKKKKKKKKKKKKKKKKKKpKKKKKKKKKvKKKKmKHK7K,K0K.K#KKKK.KAKEKCKCKCKCKEKGKGKGKGKGKGKEKCKFKGKGKFKCKDKCKFKGKDKCKDKCKCKCKDKCKCKCKCKCKDKCKCKCKCKDKDKDKDKBKAKAKAKAKBKBK@K@KFKGK6K4K/K)K7K@K@K>KDKdKKKKKKKKKPK3KZKKcK4K<K3KxKKKKKKKK7KiKKKKKKKUK8K;K=K;K9K:K:K8K5KAKBKAK@K<K7K6K>K@K>K/K.K0K/K0K/K,K+K(K%K&K0K>K8K4K5K5K?KfKZKhKTK?K=K:K:K8K KK KK8KCKMKKKKKKKKKKKaKBKKKKKK,K4K9K?K>K-K/KDKJKKKKKKxKZK7K:K:K:K9K=KFK@K}KKKKKK4K7K8K8K8K5K5K4K6K?KEKEKKKKKKK1K4K0K0K0K0K/K,K.K-K-K?KCK=KvKKKKKKfK2K;K9K8K.K$K!K!K!K!K!KK#K=KAK@KGKKKKKKK`K#KKKKKKKKKKKKKKKK=K=K=K:KiKKKKKKKVKK KKKKKe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKYK:K2KKKKKKrK;K8K/K!K8K?K=K;K8K0K/K@K?KBKAK7K)K(K)K#KKKKKKKKKKKKK$K$K&K"KK K K +K K KKKKKKKKKKKK K!K#K#K$K$K%K%K%K&K&K%K&K&K&K%K&K(K(K*K:K?KDKBK9K;KKKXKyKKKKKKKzK`KoKrKRKYKVKIKKKKKKKKKKKKKKKKKKKK|KKKKKKKKKqKKKKKKKKKKKKKKKKKKKKKKKKKKzKKxKtKKKKKKKUK;K+K1K7K)KKK K7KDKDKDKCKEKFKEKFKFKEKFKEKCKEKFKFKEKCKDKCKEKFKDKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKDKCKCKCKCKBKCKBKBKBKAK@KAKBK?KAK=K2K+K0K9K>K=K@KAK=KGKlKKKKKKK`K2KIKKyK4K>K3K`KKKKKK[K5KVKKKKKKKgK6K>K;K9K9K9K8K8K8K@KBKAK@K<K7K5K>K>KAK4K-K0K/K/K/K,K,K*K&K%K-K=K9K4K5K5K:KOKOKWKTK?K=K9K:K8K!KKK$K6K?KBKKKKKKKKKKKxK?KuKKKKKCKK KGK>K6K.KAK@KKKKKKKK=K8K:K9K9K;KAKAK^KKKKKKBK4K8K7K6K5K5K5K4K;KEKAKrKKKKKKGK/K1K0K/K/K/K,K-K,K)K9KDK?KTKKKKKKK;K9K:K:K3K#K!K!K!K K KKK3K?K@K<KvKKKKKKK0K.K,KKK +KKKK K +KKKKK K2K<K<K<KFKKKKKKKK&KK KKKKe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKVK7K4KMKKKKkK7K6K/K(K:K>K=K:K6K/K6K?KAKAKAK5K*K+K)K"KKKKKKKKKKKKK$K$K&K"KKK K +K +K KKKKKKKKKKKK!KK K#K$K%K&K&K&K&K&K%K)K)K&K$K)K)K(K*K;K@KFKHKgKHKKKKpKKKlKKKKKKKK>K=KLKpKKKKKKKKKKKKKKKKKKKK}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKTKbKRKKKKKKKKKKkKGK3K+K.K/K#KK(K@KGKCKDKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKDKCKDKDKDKCKCKCKDKCKAKBKBKBKEKDKCKAKAK@KAKAKBKAKCKAK9K0K1K9K=K@K>K@K>K;KFKqKKKKKuK2K>KKK;K>K8KKKKKKKKpK6KFKKKKKKKsK9K=K9K9K:K7K8K:K9KAKBKAK@K=K:K4K:K@K?K:K-K/K0K-K-K.K.K-K&K%K*K9K=K5K4K4K6KDKBKQKjKDK=K8K9K;K&KKKK)K<K@KeKKKKKKKKKKKHKWKKKKKhK K(K+K@KSKcKaK>K`KKKKKKKWK5K;K8K8K8K=KDKFKKKKKKgK3K9K5K4K5K5K5K4K8KBKCKQKKKKKKnK,K1K0K/K-K/K,K*K+K)K1K?KBK>KKKKKKKUK6K:K8K7K(K!K!K!K KKKK&K>KAK@KPKKKKKKKQK&K+K)K'K#KKK KKKK KKKKK;K9K>K:KoKKKKKKKRKKKK%K'Ke]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKSK3K6KNKKKKbK7K6K4K5K9K<K=K:K6K0K8K?K@KAKBK4K'K+K)K"KKKKKKKKKKKKK$K$K&K"KKK K +K K KKKKKKKKKKKK!K K K#K$K%K&K%K%K&K&K%K(K)K(K%K)K)K(K*K;KAKDKEKKKKKKsKJK6KcKKKK;KhKK}KHKBKtKKKKKKKKKKKKKKKKKKKKKKKKKKjKBKyKKKKtKuKKKKKKKKKKKKKKKKKKKK_KbKKvKlKiKKKKKKKKKKKKKVK6K/K1K0K(K%K3KBKFKCKDKCKDKDKDKDKDKDKDKEKEKCKCKCKDKDKCKCKCKCKCKCKDKDKCKDKCKBKCKDKCKCKDKCKAKBKBKBKCKDKCKBKCKDKBKAKAKBKAKBKCK=K7K2K5K<K;K<K?K@K=K>KMKwKKKK3K4KKKBK8K:K;KKKKKKK9K:KKKKKKKK=K:K:K:K9K8K9K9K:K;KAKAK?K>K<K6K8K>K@K<K-K.K/K-K.K-K,K+K%K%K%K3K?K5K5K5K4KHKCK/KkKPK=K:K9K;K0KKK KK9KAKMKKKKKKKKKKKaKCKKKKKK$K6KOK1KAK_KKGKIKKKKKKKsK4K9K8K8K6K<KFK>KvKKKKKK4K6K5K5K5K5K5K3K3K?KEK?KKKKKKK9K1K0K.K.K-K+K+K+K+K+K;KBK>KeKKKKKK~K5K:K8K8K.K!K K K K KKKK7KAKAK>KKKKKKKK;KDKLKPKWKiKjKhKGKK K$K(KKKK0K<K>K<KJKKKKKKKK(KKK-K(Ke]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKQK2K<KOKKKK]K7K6K4K5K9K<K=K:K5K1K:K?K@KBK@K1K'K)K)K"KKKKKKKKKKKKK$K$K&K"KKK K +K K K KKKKKKKKKKK!K!K K"K$K%K&K%K%K%K%K%K(K)K)K'K)K)K(K*K;K?KIKKKKKK^K1K3KK6KcKpK<K-KwKxKKEKNKKwKKKKKKKKKKKKKKKKKKKKKKtK}KWKzKeKzKvK\KyKjK{KKKKKKKKKKKKKKKKK}KKKlK7KKKK{KuKKKKKKKKKKKKKKmKCK0K5K/K'K)K8KCKCKCKDKDKCKDKDKDKCKFKFKCKCKCKCKDKCKCKCKCKCKCKDKEKBKDKCKAKCKDKCKCKDKCKAKAKAKAKBKCKDKDKDKDKBKAKAKAKAKBKBK@K@K?K4K1K4K:K<K<K>KBK?K=KQKKK>K2KlKKTK6K<K2KKKKKKKAK6KlKKK|KjK{KaK>K:K9K;K9K9K:K8K;K7KAKAK?K>K:K7K5K<K@K>K0K-K-K-K.K-K+K*K%K%K$K.K?K7K2K5K4K>KKmKVKRK>K:K9K:K8KKKKK1KBKAKKKKKKKKKKK{K<K|KKKKKJK?KwK]K:KaKKRK>KKKKKKKK=K5K9K8K8K8KBKCKXKKKKKKJK3K5K5K5K5K5K2K2K9KCK?KcKKKKKKUK+K1K/K.K+K*K+K+K+K)K4KAKAKFKKKKKKKEK7K9K7K1K$KKKKKK KK-K?K?K>K[KKKKKKK{KqKpKcKPKWKUKQKHKFKNKFK+K1K)KK&K>K=K>K:KvKKKKKKKPKK!K1K&Ke]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK/KAKQKKKKWK6K7K5K4K9K<K>K:K5K4K;K?K?K?K>K/K'K(K(KKKKKKKKKKKKKK&K%K&K!KKK K +K KKKKKKKKKKKK K K!K#K$K%K%K&K%K&K%K&K(K)K)K)K(K)K)K(K*K;K>KNKcKoKKKKK=K'K0K9KMKXK}KKKyKoK/KdKK}KKKKKKKKKKKKKKKKKKK|KjKKmK3KKKKKKVKeKzKpKKKKKKKKKKKKKKKKKaKQK|KK+KYKKKmKKKKKKKKKKKKKKKKK}KVK7K'K$K*K8KCKDKDKCKDKDKDKDKCKEKGKDKCKCKDKCKCKDKCKCKCKCKDKEKBKDKCKAKCKDKBKBKBKBKAKAKAKAKBKDKCKBKBKBKAKAKAKAKBK@K?KAKAKCKAK<K6K3K6K8K;K>K?K>K=K<KSKDK3KVKKhK3K;K/KoKKKKKKNK2KOKzKpKiK3K%K*K<K;K<K;K8K8K8K8K9K5K@KBK?K>K;K7K5K<K@KAK2K-K.K-K.K-K+K*K&K$K%K(K<K<K1K3K4K7KNKKzKZK=K<K9K9K<K"KKKK(K>K>KuKKKKKKKKKKK@K\KKKKKwKYKKKSKhKKfK?K`KKKKKKKZK1K9K8K5K4K<KDKDKKKKKKnK0K7K5K3K4K3K0K0K3K>KBKIKKKKKKK,K1K0K.K,K*K+K)K*K'K,K<KBK=KxKKKKKKiK1K6K0K-K#K'KAKTK[K[KXKVK@K9K@KCKCKKKKKKKKVK`KXKZKTKZK]K`KeKuKpK0K$K?K2K%K9K=K=K<KMKKKKKKKKK&K2K%K e]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKwKHK,KBKWKKKKRK6K8K1K1K:K;K:K9K5K4K<K?K?K?K>K/K'K(K'KKKKKKKKKKKKK K&K%K&K!KK K K +K KKKKKKKKKKKKK K"K$K%K&K%K%K%K&K&K&K)K)K(K)K(K(K)K(K*K;K?KKKgKtKaKKKKSKVKjKSKIKSKKK}KKrK7KVKTKcKKKaKsKKKKKKKKKKKKKKKKnKKK,K\KKKKKYKyKK|KvKKKKKKKKKKKKKKKKKXKSKrKZKoKK{KuKKKKKKKKpKKKKKKKKKKKKiK@K)K*K7K>KDKDKDKCKDKDKCKDKGKDKCKCKEKCKCKDKCKCKCKCKDKDKCKCKCKAKCKCKAKAKAKAKAKAKAKAKBKCKCKAKAKAKBKBKBKAKAK@K?KAKBKAKAK@KBK<K9K6K6K9K;K9K<K>K<KAK9KGKKK2K9K/KYKKKKKKVK2KAKYK;K0K"K6KfKkK8K=K;K8K7K8K7K7K6K>KBK?K?K>K;K6K;K@KBK7K,K-K-K-K-K+K+K+K&K&K%K7K>K1K3K2K6K7KKZKKGK<K9K9K;K-KKKKK9K?KYKKKKKKKKKK\KDKHKKKKKK\KKKlK`KKZKDKIKKKKKKK\K4K8K7K5K4K;KDK>KoKKKKKK6K5K5K2K3K3K/K0K/K9KEK?K}KKKKKK=K+K-K,K+K-K-K)K(K(K(K8KBK?KRKKKKKKKKKKKKK9KsKoKiKUKnKZK?K?K@K<KgKKKKKKKzKKKzKyKxKlKgKlKkKmK@KK-KAK'K0K>K<K>K<KyKKKKKKKIK(K.K%K!e]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKpKGK+KDK_KKKKJK5K9K5K7K=K<K9K6K3K4K<K?KBKBK?K.K(K)K(KKKKKKKKKKKKK K&K%K&K!KKK +K K K KKKKKKKKKK K K K"K$K%K&K%K%K&K&K%K'K)K(K)K)K'K)K*K*K,K=K@KUKKKKkKbK_KqKEKTKiK2K:KSKKKKqKkK?KlKKKKQKwKKKKKKKKKKKKKKKKKKKHKKKKKKtKyKKKKKKKKKKKKKKKKKKKKnK[KpKwKnKrKKgKKKrKKKKKhKRKKKKKKKKKKKKK}KTK;K1K1K>KCKDKCKCKDKDKDKCKCKCKDKCKCKCKCKCKCKCKCKDKCKCKCKDKBKAK@K@KAKAKAKAKAKAKAKBKBKAKBKBKBKBK@K?K?K@KBK@K>K?K?K>K@KCK@K=K8K6K8K9K8K<K>K<K=K?KdKxK6K6K6KFKKKKKK8K5K>K9KKKaKKKK9K9K:K9K:K9K7K8K7K;KBK>K?K?K=K6K7K@KBK:K+K,K.K.K-K+K+K,K'K%K%K2K?K5K4K4K3K=KKK`KZK<K<K;K:K4KKKKK4KAKEKKKKKKKKK@K KBKBK~KKKKKhKnKKKTKcKcKGK?KKKKKKKHK7K5K5K5K5K7K?K@KRKKKKKKNK2K4K2K2K0K0K0K/K7KDK?KYKKKKKKgK:K@K9K-K!KKKK KK&K@K@K@KKKKKKK9K +KKKK0KSKeKfKgKjKuKkKLK?K?K?KIKKKKKKKKtKKKxKxKqKfK`KhKcK<K'KK:K.K'K:K<K=K;KSKKKKKKKK0K'K$K e]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKiKCK+KGKeKKKKFK5K7K2K6K=K<K9K7K3K5K;K<K@KAK?K.K(K)K(KKKKKKKKKKKKK!K&K%K&K!KKK +K K K KKKKKKKKKK K!K!K"K$K%K&K%K%K%K&K&K'K)K)K)K)K'K(K*K*K-K<K@KMKKKKNKZKSKxKcKKK,KK%KKKK'K\KjKWKK_KKKKKsKKKKKKKKKK~KlKKKKK|KKpKKKKKjKvKwKKKKKKKKKKKKKKKKKKKQK\K{KzKkKKKiKKqKKKKKKKKKKKKKKKKKKKKKKKiKDK0K2KAKFKEKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKBKBKCKCKBKBKAKAKAKAKAKAKAKBKAKAK@K?K=K=K?K?KAK?K>K>K?K>K?KAK?K>K?K;K4K2K5K6K<K<K>K?K=K@K9K1K4K7KKKKKUK+K:K:K5KKKKKKKKGK7K;K9K9K8K7K9K8K7K?K?K@K?K<K6K5K>K?K=K.K-K-K-K-K+K+K+K(K%K%K,K<K8K3K4K3K9KGKBK<K/KCK:K;K:K9K KKKK,K@K=KzKKKKKKKLK!K-KFKBK`KKKKKK[KKKBKSKtKJK@K`KKKKKKkK9K4K5K4K4K3K:KDK@KKKKKKvK1K6K4K3K/K-K-K*K+K<KBKDKKKKKKK`KcKTK0KK KKKKKK8KCK>KcKKKKKKvKKKKKCKyK|KmKoKmKrKsKVK@K>KAK>KsKKKKKKKvKwKKKKyKqKhKdKdKDK.K!K.K8K'K2K<K<K=K;KKKKKKKKIK$K$K*e]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKbK>K,KIKnKKKKBK4K5K1K7K=K<K9K6K3K6K:K?K>K?K<K+K(K)K(KKKKKKKKKKKKK#K&K%K&K!KKK +K K K KKKKKKKKKKK"K$K$K#K%K&K%K&K&K&K)K&K)K)K(K)K(KK&K+K.K<K<KUKKKKtKKKzKeKWK{K0K K9KKKK,KAKdKiKKK|KKKzKKzKvKwKKKKKKKKK|KsKKKYKyKdKvKKKuKGKMKnKKKKKKKKKKKKKKKKKKK`KYKpKKfKKKwKzKKKxKKKKKKKKKKKKKKKKKKKKKKK:KK"K-KCKEKCKCKCKDKCKCKCKCKCKCKCKCKCKDKCKCKDKCKAKCKDKDKDKDKCKAKAKAKAKAKAKAKAKBKBKCK?K?K?K?K?K>K>K>K?K>K>K?K>K>K?K>K?K=K7K5K2K5K9K<K<K>K?K>K<K7K-KKKKqKK+K9K5KNKKKKKKK`K;K?K:K9K9K7K8K;K5K=KBKBK?K;K6K4K>K>K?K4K*K*K-K-K+K*K+K(K%K$K&K<K;K1K2K4K8K1KK"K_KHK9K;K=K<K*KKKK'K>K?K^KKKKK|K7K'K K9KKKBKHKKKKKKGKdKlKYKYKfKeKCKGKKKKKKqKDK3K6K4K5K5K9KFK?KiKKKKKK/KKKKKK K KK%KEK<KoKKKKKKoKeKQK KKKKKKKK)KBKBKFKKKKKKK-KKKKHKkK{KzKvKkKjKwKcK<K>KAK?KNKKKKKKKKyKKzKzK|KzKqKgKiK]K>K-K&K4K*K(K=K=K=K:KUKKKKKKKK'K,K7e]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK\K;K.KHKvKKK}K>K5K6K/K5K<K;K9K8K4K4K<K?K>K?K:K)K(K)K(KKKKKKKKKKKKK#K&K&K&KKKK K +K KKKKKKKKKKK K!K"K#K%K%K%K&K$K#K'K)K'K)K)K(K)K(K&K)K*K.K<K=K[KKKK~KKuKKKXK|KK&K)KKK:K/KDKoKKKKKeKKKKzKeKsKKKKK}KKKKoKgKKRKKK[KeKKsKKoKOKlKKKKKKKKKKKKKKKKKKwKKKKKKnKKK{K`KKKNKWKKKKKKKKKKKKKKKKKKKKKKKKKKK8KDKFKDKBKDKCKCKCKCKCKCKCKDKCKDKCKBKBKCKBKBKCKCKCKBKBKBKBKBKBKBKBKBKAKAK@K?K>K?KAK>K=K>K?K>K?K?K>K?K?K>K>K>K?K@K>K9K4K3K7K:K<K>K=K<K?K9K]KKK%KK)K:K3KMKKKKKKKvK=KAK>K;K9K8K6K9K3K;KAK@K?K<K8K2K:K?K?K6K*K+K+K,K+K*K)K)K%K%K#K6K?K4K1K4K4K?KNKZK[KFK;K:K;K<K5KK KK'K8K>KJKKKKKKFK>K*K7KTKLK>KKKKKKLKGKQKOKVKvKpKHK>KKKKKKlK9K1K,K)K'K#K K6KBKOKKKKKK;K�K K KKKKKKK?KAKQKKKKKKKqKRKKKKKKKKKK=KEK>KvKKKKKK_KKKKJKjKqKyK}KqKfKsKoKHK9KBKBK>KKKKKKKKvKKwKnKnKfKVKWKQKTKOK>K=K<K0K!K1K>K=K>K<KKKKKKKKOK8K8e]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKXK7K/KHKKKKuK;K4K4K,K5K:K:K9K8K0K.K=K?K>K?K9K*K(K)K)KKKKKKKKKKKKK#K&K&K&KKK K K +K KKKKKKKKKKK!K K K"K&K&K%K(K"K K'K)K)K(K(K(K(K)K,K+K*K.K<K>K[KKKKKKKKKKPKRKOK'KdK7KKcKSKKKKKKrKtKhKKKKKKKKKKK}KKKZK`KbKrKaKjKK~KcKKkKcKxKoKKKKKKKKKKKKKKKKKKKeK9KvKKKKKhKKK1KqKKKKKKKKKKKKKKKKKKKKKK#KKKKKK%K?KFKDKCKDKCKCKCKCKCKCKDKCKDKCKAKBKDKBKAKCKCKBKBKBKBKBKBKBKBKBKBKBKBKAKBKBKBKBK@K?K?K>K>K>K?K=K?K?K>K?K>K>K>K?K?K<K8K4K2K6K:K:K9K;K>K<KTKGK"KKPKZK/K?KKKKKKKK=K?K?K:K8K8K6K8K4K:K?K>K?K=K9K5K8K?K?K5K(K+K*K*K+K*K(K)K$K&K$K.K@K5K1K3K1K<K0KkKKaK8K:K9K9K:KK +KK0K1K<K@KKKKKKdKOKEK>KJKNK?KgKKKKKvKKK8K3KNKMK>KCK@K^KKKKKKKKKKK K K#KFKAKKKKKKrKKKK K K K K K K1KCK>KKKKKKKlKAKK KKKKKK K K-KAK?KSKKKKKKKKK,KUKnKlKgKpK{KkKmKvKUK4KBKCK?KZKKKKKKKzKlKeKUKTKSKCK:K8KCK5K9KBK;K&KKK:K;K=K:K\KKKKKKKK<K:e]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKRK4K3KLKKKKoK:K5K0KK7K:K:K9K5K/K(K<K?K>K?K7K'K(K)K&KKKKKKKKKKKKK%K&K%K&KKKK K +K KKKKKKKKKKK!K!K#K$K&K%K%K&K%K(K)K(K)K*K*K*K(K)K*K+K*K-K=K>K[KKKKKKKKKKK]KFKK+KK+KK_KKvKKKKKKKKhK~KKKKKKKKxKKgKUKYKXKnKCK`KKyK^KoKfKbKKKvKKKKKKKKKKKKKKKKKKKZKTK}KKKKKZKKwKAKKKKKKKKKKKKKKKKKKKKKKK2KKKKKKKK/KDKDKCKCKDKCKCKCKCKCKCKDKCKCKCKBKCKCKBKBKCKCKBKBKBKBKBKBKBKBK@K?KBKBKBKBKAKBKBKAKAKAK@K>K>K?K>K>K>K>K?K>K=K>K?K@K=K7K3K1K6K6K/K8K>K:K=K>K KkKK1K5KKKKKKKKDK>K?K<K9K7K7K8K8K8KAK@K?K=K9K6K4K>K@K:K&K)K+K+K)K)K*K(K"K%K%K)K>K6K1K3K2K:K6KKQKrK?K;K9K9K<K'KKK0K0K:K=KiKKKKKvKEKTKLKIK@KAKMKKKKKKKKKKKKKEKGKKKKKK%KKKKKKKKAK?KdKKKKKKK KK K +K +K K K K"KDK>KcKKKKKKvK(KKKKKK K K K KKAKBK?KKKKKKKCKK4KYK]KTKQKVK[KbKDKQKCK$K8KAK?K?KKKKKKKKbK\K]KKKIKSKKKIK>K.K<K5K)KKKK/K=K=K<K?KKKKKKKKSK<e]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKPK0K6KJKKKKhK8K7K*KK8K:K:K9K5K0K+K?K?K>K@K6K&K(K)K%KKKKKKKKKKKKK&K&K%K&KKKK K +K KKKKKKKKKKK!K!K#K$K%K%K&K&K&K)K(K(K*K+K+K*K(K)K+K+K*K-K=K>K\KKKKKKKKKKYK]KK"KKK,KwKdKKgKKKKKKKKK|KKKKKKtKoKsK9K6KUKCK(KEKDKCKtKtKKTKYKKEKgKKKKKKKKKKKKKKKKKKKjK>K|KKKKK\KKOKvKKKKKKKKKKKKKKKKKKKKKKK@KKKKKKKKKK8KDKEKCKDKCKCKCKCKCKCKCKDKCKAKCKDKBKBKDKCKBKBKBKBKBKBKBKBK@K>KAKAKAKAKAKAKAKBKBKBKAK?K?K>K?K?K>K>K?K>K=K>K?K>K>K<K:K6K4K*K"K4K<K<K=K;K:KWKxK:K-KmKKKKKKKSK7K;K:K7K7K8K8K6K.KAKAK>K=K:K6K4K>K?K=K*K(K+K*K(K(K'K&K"K$K&K$K9K<K1K3K2K4KBK,K&K8KBK<K9K:K8KKK.K +KKK4K@KOKKKKKuK#K!KKKK8KEKKKKKKKKK K K KK?K?KKKKKKTK KKKKKKK3KGKKKKKKKKEKK K KK +K +K +K KK=KBKKKKKKKKK KKKKKK K KK KK8KDK>KdKKKKKKxK K%K@KFKBKGKNKKKfKUK<KFK-K/KAKAK<KcKKKKKKKyKjK[K/KK6K7K@K5K+K(K#KKKK$K!K;K=K<K9K_KKKKKKKKRe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKMK0K9KMKKKKbK6K9K)K"K:K=K<K:K7K.K*K?K>K>K@K4K%K(K)K%KKKKKKKKKKKKK%K%K%K'KKKK K K KKKKKKKKKKK K K"K%K#K%K&K(K(K(K)K(K*K+K+K*K(K)K+K+K*K.K<K=K_KKKKKKKKUK<K.KQKuKKFKK)KBKHKKKK}K{KnKxKKKKK~KKKKKKKTK=KVK^K~K}K~KKNKmKuKKyK[KKjKLKcKKKKKKKKKKKKKKKKKxKKKKKKKKfKKFKKKKKKKKKKKKKKKKKKKKKKKKPK KKKKKKKKKK#K:KAK@KCKDKCKCKCKCKCKCKCKDKBKAKCKDKDKCKAKBKAKAKAKAKAKBK@K>KAKBKAKBKAKAKAKAKAKBKBKBK@K>K>K>K>K>K>K>K>K>K>K?K>K>K>K=K=K4K.K3K8K9K<K=K=K:K=K=K3KYKKKKKKKeK4K;K9K9K7K7K7K7K2K=KAK>K?K<K9K5K<K?K=K0K'K*K)K+K'K)K&K#K$K&K#K2K@K3K2K2K3K7K)KDKLK=K?K:K<K<K?K.KK K KK0KCKKKKKKKKKKK'K@KjKKKKKOKKK K K K +K/K@K`KKKKKKKK K K KK +KKFK?KKKKKKzKK +K K KKKKKK.KFK@KKKKKKK.K KK K K K KK K KK%KAK@KHKKKKKKK0K5KGKFKIKTKaKYKUKmKZKHK4K*K=K@K>KDKKKKKKKrKKKKKKK3K8K*K KKK"K1KK%K8K:K<K=K@KKKKKKKKqe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKLK+K?KQKKKK\K4K7K(K&K<K<K;K9K6K,K+K?K>K>K@K3K&K)K(K$KKKKKKKKKKKKK&K&K&K&KKKK K K KKKKKKKKKKK K!K#K$K#K%K&K(K)K(K)K(K*K+K*K*K(K)K+K+K+K.K=K=K_KKKKKXK<KQK[KtKTKaKqKKKHK)KLKUKKKKKKK{KKKKKKKKKKKfKoKKKKKYKKeKKKlKKKKgKlKKK{KdK0KKKKKKKlK|KPKOK~KKKKuKKKKKkK|KKZKyKLKkKKKKKKKKKKKKKKKKKKKKKKK]K KKKKKKKKKKK"K8KAKDKCKCKCKCKCKCKCKCKDKCKBKCKCKCKBKAKAKAKAKBKBKBKBK@K?K@KAKBKAKBKAKAKAKAKBKAKAK@K?K?K>K>K>K>K>K?K>K>K>K?K?K>K=K>K>K=K9K6K5K8K:K<K;K;K=K:KFKKKKKKK~K4K:K:K9K8K8K8K5K*K7KBK>K?K<K9K5K5K?KAK5K%K*K-K>K4K'K(K#K'K%K K*KBK7K1K2K2KKKKK)KBK;K=K<K;KKKKKKK>KrKKKKK KK KKKK>KOKKKKK}K K K K +K KKKCKIKKKKKK'K +K K K KK KK>KAK]KKKKKK KK K KKKK KKKDK>K_KKKKKK[KK +KKKKKKK KKK:K@K=KxKKKKKKkKAKOK'K!KEKOK3K K-K1K$KKK0K@K?K<KoKKKKKKK=K3K6K7K7K@K;KCK3KKKK<K<K.K<K?K:K<K>K8KdKKKKKKKe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKvKIK+KBKUKKKKTK4K6K#K'K9K9K9K7K4K*K.K@K>K>K@K2K'K*K&K"KKKKKKKKKKKKK(K)K(K&KKK K K K KKKKKKKKKKK!K!K$K#K#K%K%K(K)K(K)K(K*K+K)K(K(K)K*K-K.K1K?K=K_KKKKKWKKKYKKKK^KKKK(K_KaK{KiKnKK~KKK~KKKKKKjKKKKwKKKKKK4K?KcKKKKKKKKVK_KK|KqK_K}KuKzKKKK`KiKtK]K|KlKNKbKKKbKbKqKUKKKuKxK|KKKKKKKKKKKKKKKKKKKKKKKKnK KKK(K,KK%K+K2K9K>KBK?K:K>KBKDKCKDKCKCKCKCKCKDKDKBKAKAKAKAKAKAKAKAKAKAKBK@K?K>K>K@KBKAKAKAKBKBKBKAK>K@KBK?K?K?K>K>K>K>K>K>K>K>K>K?K?K>K>K?K?K<K8K4K2K6K8K8K9K;K;KEKsKKKKKK<K8K:K9K:K9K8K6KK.KBK>K>K;K9K5K2K=K?K;K%K&K(K;K>K.K$K#K'K*KKK;K9K0K1K1KKKK�KK?K?K=K<K@K%KKKKKK;KXKKKKKGKK +KK K K7KBKKKKKKKK K +KKKK;KAKKKKKKRKK K +K +KK KK.KEKFKKKKKKOK�K KKKKKKK K7KCKFKKKKKKKKKKKK +KKKK K +K,KAK?KRKKKKKKK'KKKKKKKKKK K'K*K2K?K?K?KIKKKKKKKmK2K7K9K=KDK8K7K*KKK3K@K>K=K>K?K;K=K=K=KCKKKKKKKe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKlKEK)KDK[KKKKNK4K6K!K)K:K;K:K7K5K+K/K@K>K?K@K3K&K)K(K"KKKKKKKKKKKKK'K(K(K&KKK K K K KKKKKKKKKKK K!K#K$K$K&K'K(K)K(K)K(K)K)K*K)K(K)K+K+K+K/K=K<K`KKKKKKK~K^KKKtK+KuKmKK'KQKKsKHKbKKnKvKK|KKKKKKKKKKzKKKgKjKUK/KKKyKKKKKK|KlKKXKoKKbKGKBK:KKKKKK6KKK9KCK`KKKWKoKKOKpKcKKKKKKKKKKKKKKKKKKKKKKKKKKKzK7K<K@K?K:K>KFKLKLKJKHKGKHKCK:K7K@KEKCKCKCKCKCKCKCKCKBKAKBKAKAKAKAKBKAK@K@KBK?K=K@K@KAKAKAKAKBK@K@KBKAK@K@K@K?K>K?K>K>K>K>K>K>K>K>K>K>K>K>K>K>K?K?K?K<K8K3K2K4K8K:K8K9K9KLK}KKKKGK5K:K9K9K7K5K8KK"KAK>K?K=K9K4K2K=K?K>K)K"K'K7K<K9K%KK K)K KK/K>K0K0K2K#KKKKK7K@K<K<K>K2KKKKKK2KHKKKKKwKK +KKKK&K@KnKKKKKFKK K KKKK1KAK`KKKKKK K KKK K KKKFK@K|KKKKKKKKKKKKKKK'KFK=KsKKKKKK6K�KKKKKKKK KKKAK?K;KKKKKKKRKK*K,K0K2K4K5K5K4K5K6K7K6K<K@K@K:K{KKKKKKKBK;K@KFKCK<K(KKK"K9K,K#KKK"K.K;K;K>K:KiKKKKKKe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKgKAK)KFKdKKKKIK3K5KK+K;K=K;K6K5K*K1K@K>K?K@K1K'K)K)K"KKKKKKKKKKKKK'K(K(K'KKK K K K KKKKKKKKKKK K!K#K$K&K(K)K(K(K)K(K(K(K(K*K*K(K)K+K*K*K-K<K<KbKKKKKKKKK_KaK\KQKPKXKKSKK>K_KVKKKXKMKtK}KKKKcKuKKKKKoKeKKKKjK(K[K[KtK|KzKlKKkKwKuKKlKwKsK\K)KKKYKYKzKcK[KHKKKNKxKKKKoKhK}KQK\K]KOKvKKxKKKKKKKKKKKKKKKKKKKKKKXKPKLKJKOKPKPKMKOKNKOKNKJKIKKKHKBK<K=K>KBKEKCKCKCKDKDKDKBKAKAKAKAKAKAKAKAKBKBKBK@K>KAKBKBKBKBKBKBK?K>KBKBKBK@K>K>K>K>K>K>K>K>K>K>K>K?K?K?K>K>K>K>K>K>K>K?K=K:K6K1K4K7K7K7K;K;K:KQKKK[K5K;K9K8K5K4K7K'KK?K>K?K?K:K6K4K;K?K>K3K%K$K/K?K@K7KKKK(KK$K?K3K0K0K*KKKKK-K@K;K=K?K<KKK KKK%K>KKKKKKKKKKKK?KTKKKKKvKK K +KKKKKDKIKKKKKK#KKKKKKK +K;KBKYKKKKKK'KKKKKKKKKK@K?KRKKKKKKoKK +K +K K KKKKKK%K:K@K<K`KKKKKKK3K4K4K6K5K8K9K8K8K9K:K:K:K<K@K@K<KUKKKKKKKfK<KEKDK:KBK)KKKKKKKKKKK4K;K;K<KFKKKKKKe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKbK=K+KHKkKKKKCK1K3KK.K<K=K;K6K4K)K4K@K>K?K@K.K&K)K)K!KKKKKKKKKKKKK'K&K&K%KK K K K KKKKKKKKKKK!K!K!K#K$K%K'K)K)K(K'K*K*K*K*K*K*K*K*K+K+K*K0K?K>KcKKKKKKKKK\KKUKLKxK{KKKK KoKKKKKKKKKKKKWKKKKKKKKKKgKFKVKvKK]KIK\KqK[KjK_KLKyKmKPK=KjKVK>KCKKZK.KKKKKKKKK(KEKPK:KUK#KXK}KsK}KKKKKKKKKKKKKKKKKKKKKKJKMKPKTKUKWKTKQKQKOKQKQKKKIKKKHKEKHKFK@K9K;KCKEKCKBKBKBKBKAKBKBKAKAKAKBKBKAKAKBKAKAK?K?K?K?K?KAKBKAKAK?K@KBKAKAK?K?KAK@K>K?K>K>K>K?K>K=K>K?K>K>K?K?K?K>K?K<K=K<K6K.K/K2K4K4K6K;K:K0KmKxK0K;K9K8K6K4K5K/KK9KBK?K>K;K7K4K7K?K>K8K)K"K(K=K@K>K1KKK K&K-K;K6K0K2K0KKKKKK@K:K>KEKSK1KKKKKK;KbKKKKK6KK KKK +K9KCKKKKKKKK K +K KKK?K?KKKKKKOKK +KKKKKK)KEKCKKKKKKUKKKKKKKKK K3KEKAKKKKKKK-KK!K!K#K&K&K'K(K)K+K3K?K@KGKKKKKKKMK6K:K9K;K=K<K;K=K=K=K>K?K?K?K?K?K>KKKKKKKK}KtK_K'K-K'KKKKKKKKKKK+K;K9K<K8KnKKKKKe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK^K=K-KGKxKKKK@K2K2KK/K<K=K;K6K4K(K5K?K>K?K@K.K%K)K)K!KKKKKKKKKKKK K'K%K&K$KK K K +K KKKKKKKKKKK"K!K!K#K$K%K'K)K(K)K(K(K+K*K+K*K*K*K*K)K+K*K0K@K>KcKKKKKKKKTKcKsK`K.KUKKKK2KKKKKKKKKzKqKKKKKKKKKKKKzKDK>KIKrKKKKKGKmKgKnKVKNKRKVKQK:KK^K9K(KgK?KIKKnKKKKKKKLK0K5K?KYK+KDKIKjKKKKKKKKKKKKKKKKKKKKKKKTKTKTKSKUKXKTKSKTKOKQKPKMKLKOKLKJKJKDKEK=K0K3K>KCKBKAKBKBKAKAKAKAKAKAKAKAKAKAKAKBKBK?K>K?K?K>K@KAKBKAK>K?KBKAKBK?K?KAKAK>K?K>K>K>K?K>K<K>K?K>K>K>K>K>K>K>K=K=K>K=K5K*K(K0K2K2K4K6K-K,KgK9K9K9K7K6K4K7K7KK0KDK?K>K=K:K4K6K?K@K;K1K.K/K9K?K>K?K/KKKK*K=K9K1K2K4KKKKKK;KCKYKhK|KkK KKKK K5KLKKKKKfK�K +KKKK+KAKuKKKKK=KK K K KK K2KAK_KKKKKK K +K KKKKKKBK>KuKKKKKKKKKKKKKKK*KCK>KdKKKKKK^K"K*K)K)K*K)K)K,K,K.K3K>K@K>KtKKKKKKyK4K9K9K:K<K?KAK@K@K@KAKBKCKCK?K?K<K^KKKKKKKKKbK&K.K1K0K1K-K)K#KKKKKK&K:K:K:K:KIKKKKKe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKXK9K2KIKKKK{K;K4K/KK0K:K=K:K7K3K&K6K?K>K?K=K-K&K(K)K!KKKKKKKKKKKK K'K%K'K%KK K K K K KKKKKKKKKK"K!K!K#K#K&K(K)K&K'K)K'K)K*K+K+K*K)K(K"K*K*K0K?K;KeKKKKKKKKxK]K1KiKQKcKhKKK0K(KKKKKKKKTKeKwKKKKKKKK_K}KKsK.K$KCKmKKKKWK5K-KSKKjKgK?K6K5K^KKKaK;KyKOKrKKKKKKKKK_K>K-K6KLKMK=KLKgKKKKKKKKKKKKKKKKKKKKKKKWKUKVKUKUKXKWKVKVKQKPKSKPKRKPKKKIKEKAK<K8K)KKK-K?KCKBKAKAK>K@KBKBKBK?K?KAKAKAKAKAKAKAKAKAKBK@K>KAKBKAKAKAKBKBK?K>K?K?K>K>K>K?K?K?K?K>K?K>K?K>K=K=K=K=K=K>K?K=K>K>K9K/K&K)K/K1K0K2KK,K<K9K9K8K9K6K6K;KK$K@K>K>K>K;K5K4K;K=K;K4K5K/K.K9K?K>K?K)KKKK6K?K1K/K3K%KKKKK0KHKzKKKK*K�KKKK+KCKKKKKK KKKKKKAKWKKKKKoKK +KKK KKKCKHKKKKKK$KK K +K K +K KK9KAKTKKKKKK<KK KKK K K"K"K'K=K>KIKKKKKKK-K+K.K-K.K,K,K/K0K3K1K8K?K<KOKKKKKKKlKnKyKKKKgKGKDKDKCKCKCKDK@K=K>KBKKKKKKKKK@K(K3K2K0K4K4K2K0K+KK K#KKK/K=K<K<K;KuKKKKe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKSK4K6KIKKKKsK8K5K/KK4K<K<K9K7K2K'K:KAK>K?K=K,K'K(K(K KKKKKKKKKKKK K'K&K'K$KK K K K K KKKKKKKKKK!K!K!K$K$K%K(K)K'K$K&K*K(K*K+K*K)K)K)K(K+K+K0K?K;KeKKKKKKKKmK;K%K;KlKzKhKKK,K2KKKKKJKlK{KeKdKiKKKKKKKK:KCKKoKKNKhKuKKKgKMKKKBKWKjK_KKiKbKUKVKKKuKNK|KKKKKKKKKKJKOKEK4K2KBKKKaK7KeK]KKKKKKKKKKKKKKKKKKKKKWKRKVKWKVKXKWKSKPKLKPKQKQKVK]K`KGK+K*K,K)K"KKKKK5KAKBKAK?K@KBKAKBK@K?KAKAKAKAKAKBKBKBKAKBK@K?KAKBKBKAKAKAKAK?K>K>K>K>K>K?K>K>K?K?K>K?K>K>K>K=K=K=K=K=K>K>K=K=K=K:K;K8K,K%K*K-K0KK K:K:K:K<K;K7K6K:K(KK?K>K>K>K;K7K3K7K<K?K;K2K4K-K.K<K>K>K>K(KKK+K?K3K.K/K,K +KKKK%KAKKKKK^KKKKKKAKkKKKKK,KK KKK K:KDKKKKKKKKKKKKK=K?KKKKKKZKKKKKKKK0KFKAKKKKKKnK K%K$K$K"K#K&K'K)K9KDK?K}KKKKKKHK(K.K-K-K/K0K0K1K3K1K8K?K?K>KKKKKKKKKKKKKKUKCKDKDKEKDKDK@K<K?K<KjKKKKKKKK.K*K,K-K-K0K0K,K1K2K KK)K(K,K8K=K<K<K;KLKKKKe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKNK1K:KMKKKKlK7K6K.KK5K;K9K9K7K1K'K<KAK>K?K>K+K(K)K(KKKKKKKKKKKKK"K)K)K)K%KKKK K K KKKKKKKKKKK K!K&K&K%K'K(K(K#K$K(K(K*K+K*K*K+K+K+K-K-K1K?K<KcKKKKKKKKKKaK=KcKKKKK)KSKKKKK[KRK<KgKtK`KKKKKKKK8KKPKNK+KcK]KKKKyKtKJK_KpKSKqKKxKyKcKOKKK;K{KKKKKKKKbKK{K8K&KK@K3KQKKKLK2KSK[KKK\KuKKKKKKKKKKKKKKKKKYKLKPKMKOKNKRKUK^KmKxKKKKKKKwKNK3K)K(K*K)K!KKK)K:KAKBKAKBK@KBKBKAKAKAKAKAKAKAKAKAKAKAKBKBKBKBKAKAKBK@K>K?K>K>K>K>K>K>K>K>K?K?K?K?K?K=K<K?K>K<K>K@K=K<K=K=K=K;K9K:K;K6K*K&K)KKK5K;K9K<K=K9K9K7K.KK8K>K>K>K;K9K4K5K>K>K?K:K2K1K)K0K>K>K?K?K#KK K>K9K.K/K1KKKKKK;KyKKKKKKK KK K9KRKKKKKYKKKKKK-K@KyKKKKK<K +KKKKKK7K@K`KKKKKK K!K!K!K"K#K#K+KCK>KoKKKKKK0K%K(K(K%K&K)K'K$K/KDK>KWKKKKKKtK*K0K/K/K2K3K2K2K2K2K7K>K@K<K`KKKKKKKKKKKKKiKHKFKFKFKGKIKDK=K?K=KGKKKKKKKlK!K*K+K.K/K4K;K0K0K.KKK-K=KLKQKCK9K=K=K:K{KKKe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKMK/K=KOKKKKeK5K6K*K"K9K:K:K8K6K0K(K<K?K>K>K;K+K'K'K'KKKKKKKKKKKKK$K)K(K(K#KKK K K K KKKKKKKKKK K K"K%K$K%K(K(K'K(K'K(K*K*K+K,K,K*K+K*K-K-K1K?K=KfKKKKKKKKK\KVKhK~KiKKKK!K]KKKKKtKYK@KeK|KMKKKKKKKZK"K#KiKFKCKZKcKKKKyKnKfKKKRKdKYKdKsKrKdK/KiKKKKKKKKKKKlKXKmKbKMKK5KRKnKzKKKlKOKSKKKvKKKKKKKKKKKKKKKKKK_KQK^KgKvKKKKKKKKKKKKKKKKnKDK+K%K+K(K"KKK2KAKBK@K@KBKBKAKAKAKAKAKAKAKAKAKAKAKBKBK@K@KBKAK@K?K>K?K>K>K>K>K>K>K@K@K?K?K?K?K?K=K<K>K=K<K=K>K<K<K=K=K=K<K9K9K9K<K=K6K-K!K K(K=K9K:K<K9K7K5K4KK1KAK>K>K;K9K3K1K>K@K>K@K9K0K0K,K1K<K>K<K8KKK9K<K/K.K0KKKKKK6KbKKKKK KKKKK/KAKKKKKK KKKKK&K@KZKKKKKqKK!K K K"KK/KCKGKKKKKK;K K$K#K&K'K'K'K<KBKOKKKKKKPK#K+K*K)K)K(K/KJKDK>KBKAKKKKKKK9K0K3K2K3K4K3K4K3K4K8K<K?K@KEKKKKKKKKKKKKK|KUKEKHKIKIKJKIK@K=K?K9KtKKKKKKK0K&K-K3K:K=KEK6K/K*KK.KFKQKPKWKOK9K;K<K;KPKKKe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKxKJK+K@KQKKKK]K5K7K(K$K:K;K:K7K5K-K)K=K<K?K?K7K*K&K%K%KKKKKKKKKKKKK#K(K(K(K!KKK K K KKKKKKKKKK K!KK"K$K#K&K(K)K)K)K)K*K+K+K*K*K*K*K+K*K,K-K0K@K=KjKKKKKKxKyK_KsKKRKfKJKKKsKKVKKKKKKKgKcKMK|KKKKKKKKKgKzKqKYKOKbKKKKIKmKKvKXKOKKKLK<KaKEK)KDKKKKlKKKKKKKOKCKKKiK@KQKtKdKAK]KKKFKqKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK^K:K(K$K%K%KK&K:KAKBKAKAKAKAKAKAKAKAKBKBKAKAKBKBKBK?K>KBKAK>K>K>K>K>K>K>K>K?K?K>K>K>K>K>K>K?K=K<K<K<K<K<K=K<K;K=K<K=K<K9K9K:K<K<K=K>K8K&K K4K8K9K;K9K5K4K5KK'KAK>K>K;K8K5K1K<K?K>K>K@K<K1K3K-K2K=K<K>K8KK*K?K0K-K.K&KKKKK.KNKKKKKNKKK K K)K;KtKKKKK3KK KKK$K>KHKKKKKK(K#K$K$K$K#K)KAK>KKKKKKgKK'K&K%K%K%K$K6KEK;KKKKKK{K&K*K)K+K+K(K8KK^K7KCK=KlKKKKKK[K+K3K3K4K5K5K5K4K6K8K8K<KAK<KqKKKKKKKKKKKKKfKJKHKIKHKHKKKGK>K?K;KOKKKKKKK_K'K/K0K8KHKDK/K1K"K3KKKPKSKSKRKQK@K8K:K<K:KKKe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKrKGK*KCKTKKKKUK2K5K$K#K:K;K:K7K5K-K(K=K=K?K@K7K(K(K&K%KKKKKKKKKKKKK$K(K(K)K!KKK K K KKKKKKKKKKKK K!K#K$K%K(K)K(K)K(K)K+K*K*K+K*K+K*K*K-K-K1K?K;KgKKKxKKnKK|K)KLKHKKiKkKaKKaKKoKKKKKKKKiKeKKzKoKwKKKKKKKKKKrKnKKKKKKKKrKJKYK=K,K*K:K$K\KKK`KfKKdKKKKKSKEKKK/KWKIKvK}KDKuKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKSK0K$K#K!K"K-K=KCKAKAKAKBKCKBKBK@K?KBKAK?K?K?KAKAK?K@KAK?KAK?K>K?K>K?K>K=K=K=K>K?K>K>K?K=K<K=K<K<K<K=K<K<K;K=K=K<K9K9K:K:K:K:K<K=K@K.K%K7K:K;K;K7K2K7K$KK@K>K?K=K:K7K3K8K<K>K?K=K>K3K6K2K+K6K?K>K@K4K$K>K4K-K.K-KKK +K K%KAKKKKKKKKKK&K;KXKKKKK]KK%K!K!K#K:K?KKKKKKIK"K)K(K%K%K(K<K@K`KKKKKK*K(K)K&K&K)K(K.KCK<KgKKKKKK9K(K+K+K*K(K4KmKMK.KAK>KLKKKKKKK.K/K3K4K4K4K5K7K7K6K?KGKAK?KOKKKKKKKKKKKKKyKRKHKHKJKKKLKKKCK>K?K=KKKKKKKK6K1K5K<KLK:K,K/K9KMKOKRKRKQKOKQKKK;K:K:K9KTKKe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKkKCK+KEKZKKKKPK2K4K!K$K:K;K:K7K5K+K*K>K=K?K?K7K(K(K&K%KKKKKKKKKKKKK$K'K(K)K KKK K +K KKKKKKKKKKKK K!K$K$K%K'K)K)K(K(K)K+K*K*K*K*K+K*K+K-K-K1K?K:KiKKKKuKwKKNK K$K)KRKKKKKXK&KKKKKKKKlKbKxKfKfK~KKKKKKKKKKKKKKK|KKKKKKKKlKDK'K.KKLKKQK9KKKzKKKKK}KAKKKWKQKEKpK^KoKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKpKDK,KKK%K5KCKCKBKAK?KAKAK?K>KAK@K>K>K>KAKAK>K?KBK@KBK@K>K?K>K>K=K<K=K=K>K>K>K>K>K=K=K=K=K=K=K<K=K<K;K>K=K<K9K9K:K9K9K9K9K8K:K5K#K4K:K8K8K7K3K4K-KK;K?K?K>K;K9K4K6K=K>K?K=K9K7K0K>K2K+K7K>K>K@K4K4K8K-K-K,KKKKK"K;KpKKKKK)KK#KK!K:KFKKKKKK$K(K%K$K#K2K@K^KKKKKrK!K+K)K'K(K'K6KDKIKKKKKKEK&K)K)K)K'K&K&K;K?KKKKKKKK_K%K,K+K*K)K0KXK@K+K=KBK>KKKKKKKDK.K3K5K4K5K5K8K8K5KFKcKAKCK>KKKKKKKKKKKKKKcKJKHKKKLKKKMKHK<K@K<KXKKKKKKKVK,K5KHKMK0K.KBKPKNKOKQKPKPKPKOKKK=K:K:K<K;KKe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKcK@K*KHKbKKKKJK3K3K"K(K:K;K:K7K5K)K+K@K>K>K?K4K'K(K&K$KKKKKKKKKKKKK(K)K(K*K!KKK K KKKKKKKKKKK!K KK#K&K%K%K'K)K'K)K+K+K+K+K*K*K+K*K+K.K,K*K0K@K<KlKKKKKKVK8K+KK)KNKKKKaK&K,KKKKKKKKrKXKLKbK{KKKKKKKKKKKKKKKKKKKKKKKKK9K&K@K K?KKPKsKKKKK~KKK^K.KlKpKfKdKYKEKSKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKbK9KKK(K:KDKAK>K>K?K?K>K=K=K?K?K?K?K?K?K?K?K?K?K?K>K?K>K<K>K?K>K>K<K=K?K>K=K>K?K>K>K?K=K<K=K<K<K=K=K<K9K9K:K<K<K9K4K5K9K9K#K1K9K7K7K5K4K4K4KK3KBK>K?K>K9K4K5K;K>K?K8K1K:K.K=K;K0K,K6K>K>K;K6K2K-K)K+K'KKKKK9KWKKKKKKKK%K"K!K3K>KKKKKK2K#K'K#K'K,K?KIKKKKKK.K)K+K*K+K)K-KCKBKKKKKKhK&K+K(K)K(K'K%K6KCK>KKKKKKK*K,K+K*K+K*K>K5K+K5KAK@K_KKKKKKnK-K3K5K5K7K7K7K:K8KNKKRK?K<K_KKKKKKKKKKKKKtKQKKKLKLKKKMKJK>K=K>K@KKKKKKKK2K;KQKBK*K>KMKLKNKPKNKTKRKQKMK8K,K7K:K<K:KYKe]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK\K@K-KJKlKKKKEK3K3KK,K:K;K9K6K5K)K,K@K>K>K?K3K'K(K'K$KKKKKKKKKKKKK(K)K(K*K KKK K KKKKKKKKKKK K!K K"K%K&K&K(K(K&K+K*K*K*K*K+K*K+K*K,K.K,K*K0K@K<KlKKKKK1KGKmK<KKVKZK|KKgKyK-K(K}KKKKKKKKPKK7KTKKKKKKKKKKK{KKtKKKfKoKKKKKKKvK$K,K<K*K!KzKKKKKKK^KKK;KKBKkKLKuKK~KjKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKRK-KK)K<KBK@K?K?K>K=K=K>K>K?K>K>K?K>K>K>K>K>K>K>K>K<K>K?K?K>K=K=K?K>K=K>K?K?K?K>K=K<K=K<K=K<K=K<K9K9K:K;K<K7K*K/K9K<K&K*K8K7K8K6K5K4K7KK'KCK?K>K>K:K6K2K9K?K>K9K3K=K1K:K?K<K0K-K9K<K=K?K4K+K*K*K(K'KKKK3KFKKKKKrKK%K%K"K.K=KaKKKKKSK!K*K'K(K)K<K?KKKKKKGK&K+K*K+K*K+K>K@KaKKKKKK.K*K)K)K)K)K'K.K@K=KaKKKKKK?K(K,K+K+K+K.K.K(K/K?K@KEKKKKKKK6K2K4K5K7K8K8K9K<KYKKwK<K@KBKKKKKKKKKKKKKK^KMKMKLKKKMKNKBK;K>K<KfKKKKKKKMK@KBK/K<KLKHKMKMKIKMKTKOKFK<K#KK3K;K9K;K<Ke]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKWK:K/KJKwKKKK@K2K4KK.K;K9K7K4K3K'K1K@K>K>K?K2K(K(K(K#KKKKKKKKKKKKK(K)K(K)KKKK +K K KKKKKKKKKKK!K!K!K#K&K)K)K(K)K(K(K(K(K*K+K*K+K*K,K.K.K-K1K?K<KlKKK\K,KK\KXKmKkKKKnKKKK K+KKKKKKKKKKmKpKKKKKoKKKKKKKKKxKiKZKgKKKKKK^KPKVKHKDK.KKKkKKsKlKKK{KK~KNKKK@KQK^KKKKKaKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKrKEK,K2K?KCK?K>K?K>K<K>K?K>K>K?K>K<K=K>K>K>K<K<K<K>K?K>K>K?K?K?K>K=K<K;K<K<K<K<K<K<K<K<K<K=K;K9K:K9K9K:K9K.K4K<K?K-K&K6K7K8K8K8K4K6K!KK=K?K>K=K=K9K1K8K>K<K<K<K?K3K4K@K?K<K/K/K;K=K;K<K4K(K(K K#K.KKK-K=KKKKKK&K%K'K%K)K=KKKKKKK}K&K*K)K)K'K5K?KgKKKKKnK%K,K*K+K+K*K6KDKJKKKKKKFK)K,K*K(K)K(K(K:KCKGKKKKKKiK'K/K-K*K*K'K*K*K.K:K?K<KxKKKKKKTK0K6K5K7K8K:K9K=KhKKKHKBK<KqKKKKKKKKKKKKKlKOKMKNKJKMKSKHK<K;K<KFKKKKKKKzK2K0K?KKKEKJKMKGKGKKKKKDK8K'KKK$K8K9K;K8K\e]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKRK6K3KIKKKKyK;K3K2KK0K:K:K7K4K3K'K1K@K>K>K@K2K(K(K%K KKKKKKKKKKKKK)K(K(K)KKKK K K KKKKKKKKKK K!K"K#K$K&K'K(K)K(K)K(K)K*K*K+K*K+K*K,K.K-K-K1K?K;KlKKcK%KCKK'K5KRKKKKKKKK KKKKKKKKKKK\KnKKKKuKCKKKKKKKKKfKjK`KKKKKKtKWKyKKK\K5K(K'KdKKbKKKiKZKyK)KK@K(KUKoKvKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKaK=K:K>K?K>K>K<K>K?K>K>K?K=K<K?K@K?K>K=K=K=K>K?K>K>K?K>K=K=K=K=K=K;K;K=K<K<K<K<K<K<K=K<K9K9K:K9K:K<K;K;K<K>K5K$K6K7K8K6K6K4K7K+KK8K>K>K=K<K8K2K4K=K=K>K>K?K7K0K?K>K?K;K.K1K;K7K2K=K0K#K!KK'K+KK%K9KbKKKKK>K!K'K&K%K8K>KKKKKK3K)K*K*K(K.K@KPKKKKKK,K*K,K*K+K*K0KDKBKKKKKKjK'K,K+K*K)K)K(K3KBK>KKKKKKK.K,K,K+K)K'K+K-K,K3K@K>KVKKKKKK}K1K5K5K7K8K9K8KBKwKKKgK>K@KOKKKKKKKKKKKKKKXKLKNKJKLKTKPKAK:K=K;KoKKKKKKKLKEKIKDKHKGKAKCKIKGK=K8K-KKKKK0K9K9K<K@e]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKPK1K4KHKKKKrK6K3K0KK2K:K:K7K4K4K$K2K@K>K>K>K/K'K(K%K KKKKKKKKKKKKK)K(K(K*KKKK K K KKKKKKKKKK!K K#K$K&K%K%K'K)K(K(K(K)K+K*K*K*K*K*K+K(K-K-K1K>K:KoKKKKJKEK[KfKmKKKKKaKKKK+KoKKKKK}KKKqK0KDKKNKxKKUKKKKKKKKKKKKKKKK]K5KkKzKtK~KWKAK<K.K]KKKKKwKrKBK?KDK@K)K9KIKmKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKlKNKIKBKBK?K?K>K>K>K?K=K;K?K>K>K>K<K=K?K?K?K>K>K?K>K<K<K<K<K<K=K=K<K<K<K<K<K<K<K=K<K9K9K:K9K:K=K=K=K<K>K;K"K0K8K7K5K4K4K5K3KK2K@K>K=K<K8K3K3K=K<K>K>K@K:K-K=K?K>K@K<K-K1K4K(K>K<K+KKKK)K,K"K7KMKKKKK`KK'K&K$K0K?KkKKKKKKK%K,K+K*K)K>K?KKKKKKCK(K-K)K*K+K,K=K@KcKKKKKK-K+K+K,K)K)K(K,K?K?K\KKKKKKGK&K+K*K+K+K-K.K,K.K<KBKAKKKKKKKBK2K6K4K5K7K8KMKKKKK?KBK=KKKKKKKKKKKKKKgKMKNKMKOKUKMKCK<K=K;KJKKKKKKKsK:KDKGKGK>K:KCK@K8K2K*KKKKKK*K:K9K;K9e]r���(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKNK.K:KJKKKKjK6K5K/KK3K:K9K7K4K1K1K9K>K?K?K=K,K'K(K%K KKKKKKKKKKKKK)K(K(K)KKKK K KKKKKKKKKKK K!K#K$K%K&K(K(K(K(K+K(K(K+K*K*K*K*K+K(K'K/K-K3K@K=KcKKKKqKKKKKKKKKUKzKjK KIKLKyKKKKfKKK{KzK{KbKVKKKKKKKKKKKKKKKKKKKeKKKLKCK9KrKcKNK1K!K2KvKKKKKK!KZKvKBK1KGKSKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK@K:K@KAKAK?K>K?K>K<K=K>K>K>K>K>K?K>K=K>K?K?K>K<K=K>K>K>K=K<K<K<K<K<K<K<K<K<K<K<K;K9K;K<K=K;K:K=K?K?K&K-K7K8K7K7K5K4K7KK(KAK>K>K=K:K4K1K;K=K>K>K>K?K0K9K@K>K?K@K8K,K0K3K?K>K>K(KKKK+K*K3KAKKKKKK"K'K&K$K,K?KSKKKKKpK$K+K+K*K(K8K>KmKKKKKjK%K*K)K*K+K)K5KCKHKKKKKKGK'K+K+K*K(K)K+K:KBKDKKKKKKqK$K,K*K+K,K-K.K.K,K6KBK=KfKKKKKKdK-K4K5K4K9K;KWKKKKKUK?K>K\KKKKKKKKKKKKKyKUKOKMKQKWKLKDK=K<K>K:KyKKKKKKKMKFKFK=K0K4K9K,K KKKKKKK.KK1K8K9K9e]r��(KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK}KHK*K=KMKKKKdK3K5K*KK5K:K:K8K4K1K2K:K>K@K?K=K,K'K(K%K KKKKKKKKKKKKK)K(K(K(KKKK +K KKKKKKKKKKK K!K#K$K%K'K)K(K)K(K%K(K*K*K*K*K*K+K*K+K+K.K-K4K@K:K]KKKKK~KKKKKKKKSK6KKK]K~KKKKKxKrKKKKKBKvKKKKKKKKKKKKKKKKKKKKKnKMKZKKKKWK/K'K@KXKWKKK]KKTKqKNKCKPKnKFK~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK9KK4KBKBK?K>K?K>K<K>K?K>K>K?K?K?K=K<K>K?K?K>K<K>K?K?K>K=K<K<K<K<K<K<K<K<K<K<K=K;K9K<K=K<K=K=K>K>K?K,K)K7K8K8K8K5K5K7K KK@K>K?K>K:K7K0K9K=K>K?K>K?K3K6KAK?K?K?K>K3K)K6K?K>K?K;KKKKK-K5K:KoKKKKK5K$K&K%K&K;KCKKKKKK-K'K+K*K*K2K?KQKKKKKK.K,K,K+K+K)K0KCK?KKKKKKlK%K,K+K*K)K(K)K2KBK<KvKKKKKK/K)K+K*K)K,K.K.K,K0K@K@KJKKKKKKK0K+K.K1K8K?KcKKKKKzK?KBKBKKKKKKKKKKKKKK`KOKNKNKPKGKBK>K<K>K<KTKKKKKKKaK&K1K-K"K"KKK KKKKKKKKK$K9K9K:ee. \ No newline at end of file diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/common.py b/project/venv/lib/python2.7/site-packages/scipy/misc/common.py new file mode 100644 index 0000000..3cac1f3 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/misc/common.py @@ -0,0 +1,303 @@ +""" +Functions which are common and require SciPy Base and Level 1 SciPy +(special, linalg) +""" + +from __future__ import division, print_function, absolute_import + +from numpy import arange, newaxis, hstack, product, array, frombuffer, load + +__all__ = ['central_diff_weights', 'derivative', 'ascent', 'face', + 'electrocardiogram'] + + +def central_diff_weights(Np, ndiv=1): + """ + Return weights for an Np-point central derivative. + + Assumes equally-spaced function points. + + If weights are in the vector w, then + derivative is w[0] * f(x-ho*dx) + ... + w[-1] * f(x+h0*dx) + + Parameters + ---------- + Np : int + Number of points for the central derivative. + ndiv : int, optional + Number of divisions. Default is 1. + + Notes + ----- + Can be inaccurate for large number of points. + + """ + if Np < ndiv + 1: + raise ValueError("Number of points must be at least the derivative order + 1.") + if Np % 2 == 0: + raise ValueError("The number of points must be odd.") + from scipy import linalg + ho = Np >> 1 + x = arange(-ho,ho+1.0) + x = x[:,newaxis] + X = x**0.0 + for k in range(1,Np): + X = hstack([X,x**k]) + w = product(arange(1,ndiv+1),axis=0)*linalg.inv(X)[ndiv] + return w + + +def derivative(func, x0, dx=1.0, n=1, args=(), order=3): + """ + Find the n-th derivative of a function at a point. + + Given a function, use a central difference formula with spacing `dx` to + compute the `n`-th derivative at `x0`. + + Parameters + ---------- + func : function + Input function. + x0 : float + The point at which `n`-th derivative is found. + dx : float, optional + Spacing. + n : int, optional + Order of the derivative. Default is 1. + args : tuple, optional + Arguments + order : int, optional + Number of points to use, must be odd. + + Notes + ----- + Decreasing the step size too small can result in round-off error. + + Examples + -------- + >>> from scipy.misc import derivative + >>> def f(x): + ... return x**3 + x**2 + >>> derivative(f, 1.0, dx=1e-6) + 4.9999999999217337 + + """ + if order < n + 1: + raise ValueError("'order' (the number of points used to compute the derivative), " + "must be at least the derivative order 'n' + 1.") + if order % 2 == 0: + raise ValueError("'order' (the number of points used to compute the derivative) " + "must be odd.") + # pre-computed for n=1 and 2 and low-order for speed. + if n == 1: + if order == 3: + weights = array([-1,0,1])/2.0 + elif order == 5: + weights = array([1,-8,0,8,-1])/12.0 + elif order == 7: + weights = array([-1,9,-45,0,45,-9,1])/60.0 + elif order == 9: + weights = array([3,-32,168,-672,0,672,-168,32,-3])/840.0 + else: + weights = central_diff_weights(order,1) + elif n == 2: + if order == 3: + weights = array([1,-2.0,1]) + elif order == 5: + weights = array([-1,16,-30,16,-1])/12.0 + elif order == 7: + weights = array([2,-27,270,-490,270,-27,2])/180.0 + elif order == 9: + weights = array([-9,128,-1008,8064,-14350,8064,-1008,128,-9])/5040.0 + else: + weights = central_diff_weights(order,2) + else: + weights = central_diff_weights(order, n) + val = 0.0 + ho = order >> 1 + for k in range(order): + val += weights[k]*func(x0+(k-ho)*dx,*args) + return val / product((dx,)*n,axis=0) + + +def ascent(): + """ + Get an 8-bit grayscale bit-depth, 512 x 512 derived image for easy use in demos + + The image is derived from accent-to-the-top.jpg at + http://www.public-domain-image.com/people-public-domain-images-pictures/ + + Parameters + ---------- + None + + Returns + ------- + ascent : ndarray + convenient image to use for testing and demonstration + + Examples + -------- + >>> import scipy.misc + >>> ascent = scipy.misc.ascent() + >>> ascent.shape + (512, 512) + >>> ascent.max() + 255 + + >>> import matplotlib.pyplot as plt + >>> plt.gray() + >>> plt.imshow(ascent) + >>> plt.show() + + """ + import pickle + import os + fname = os.path.join(os.path.dirname(__file__),'ascent.dat') + with open(fname, 'rb') as f: + ascent = array(pickle.load(f)) + return ascent + + +def face(gray=False): + """ + Get a 1024 x 768, color image of a raccoon face. + + raccoon-procyon-lotor.jpg at http://www.public-domain-image.com + + Parameters + ---------- + gray : bool, optional + If True return 8-bit grey-scale image, otherwise return a color image + + Returns + ------- + face : ndarray + image of a racoon face + + Examples + -------- + >>> import scipy.misc + >>> face = scipy.misc.face() + >>> face.shape + (768, 1024, 3) + >>> face.max() + 255 + >>> face.dtype + dtype('uint8') + + >>> import matplotlib.pyplot as plt + >>> plt.gray() + >>> plt.imshow(face) + >>> plt.show() + + """ + import bz2 + import os + with open(os.path.join(os.path.dirname(__file__), 'face.dat'), 'rb') as f: + rawdata = f.read() + data = bz2.decompress(rawdata) + face = frombuffer(data, dtype='uint8') + face.shape = (768, 1024, 3) + if gray is True: + face = (0.21 * face[:,:,0] + 0.71 * face[:,:,1] + 0.07 * face[:,:,2]).astype('uint8') + return face + + +def electrocardiogram(): + """ + Load an electrocardiogram as an example for a one-dimensional signal. + + The returned signal is a 5 minute long electrocardiogram (ECG), a medical + recording of the heart's electrical activity, sampled at 360 Hz. + + Returns + ------- + ecg : ndarray + The electrocardiogram in millivolt (mV) sampled at 360 Hz. + + Notes + ----- + The provided signal is an excerpt (19:35 to 24:35) from the `record 208`_ + (lead MLII) provided by the MIT-BIH Arrhythmia Database [1]_ on + PhysioNet [2]_. The excerpt includes noise induced artifacts, typical + heartbeats as well as pathological changes. + + .. _record 208: https://physionet.org/physiobank/database/html/mitdbdir/records.htm#208 + + .. versionadded:: 1.1.0 + + References + ---------- + .. [1] Moody GB, Mark RG. The impact of the MIT-BIH Arrhythmia Database. + IEEE Eng in Med and Biol 20(3):45-50 (May-June 2001). + (PMID: 11446209); :doi:`10.13026/C2F305` + .. [2] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh, + Mark RG, Mietus JE, Moody GB, Peng C-K, Stanley HE. PhysioBank, + PhysioToolkit, and PhysioNet: Components of a New Research Resource + for Complex Physiologic Signals. Circulation 101(23):e215-e220; + :doi:`10.1161/01.CIR.101.23.e215` + + Examples + -------- + >>> from scipy.misc import electrocardiogram + >>> ecg = electrocardiogram() + >>> ecg + array([-0.245, -0.215, -0.185, ..., -0.405, -0.395, -0.385]) + >>> ecg.shape, ecg.mean(), ecg.std() + ((108000,), -0.16510875, 0.5992473991177294) + + As stated the signal features several areas with a different morphology. + E.g. the first few seconds show the electrical activity of a heart in + normal sinus rhythm as seen below. + + >>> import matplotlib.pyplot as plt + >>> fs = 360 + >>> time = np.arange(ecg.size) / fs + >>> plt.plot(time, ecg) + >>> plt.xlabel("time in s") + >>> plt.ylabel("ECG in mV") + >>> plt.xlim(9, 10.2) + >>> plt.ylim(-1, 1.5) + >>> plt.show() + + After second 16 however, the first premature ventricular contractions, also + called extrasystoles, appear. These have a different morphology compared to + typical heartbeats. The difference can easily be observed in the following + plot. + + >>> plt.plot(time, ecg) + >>> plt.xlabel("time in s") + >>> plt.ylabel("ECG in mV") + >>> plt.xlim(46.5, 50) + >>> plt.ylim(-2, 1.5) + >>> plt.show() + + At several points large artifacts disturb the recording, e.g.: + + >>> plt.plot(time, ecg) + >>> plt.xlabel("time in s") + >>> plt.ylabel("ECG in mV") + >>> plt.xlim(207, 215) + >>> plt.ylim(-2, 3.5) + >>> plt.show() + + Finally, examining the power spectrum reveals that most of the biosignal is + made up of lower frequencies. At 60 Hz the noise induced by the mains + electricity can be clearly observed. + + >>> from scipy.signal import welch + >>> f, Pxx = welch(ecg, fs=fs, nperseg=2048, scaling="spectrum") + >>> plt.semilogy(f, Pxx) + >>> plt.xlabel("Frequency in Hz") + >>> plt.ylabel("Power spectrum of the ECG in mV**2") + >>> plt.xlim(f[[0, -1]]) + >>> plt.show() + """ + import os + file_path = os.path.join(os.path.dirname(__file__), "ecg.dat") + with load(file_path) as file: + ecg = file["ecg"].astype(int) # np.uint16 -> int + # Convert raw output of ADC to mV: (ecg - adc_zero) / adc_gain + ecg = (ecg - 1024) / 200.0 + return ecg diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/common.pyc b/project/venv/lib/python2.7/site-packages/scipy/misc/common.pyc new file mode 100644 index 0000000..fde01f7 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/misc/common.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/doccer.py b/project/venv/lib/python2.7/site-packages/scipy/misc/doccer.py new file mode 100644 index 0000000..6ae3870 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/misc/doccer.py @@ -0,0 +1,250 @@ +''' Utilities to allow inserting docstring fragments for common +parameters into function and method docstrings''' + +from __future__ import division, print_function, absolute_import + +import sys + +__all__ = ['docformat', 'inherit_docstring_from', 'indentcount_lines', + 'filldoc', 'unindent_dict', 'unindent_string'] + + +def docformat(docstring, docdict=None): + ''' Fill a function docstring from variables in dictionary + + Adapt the indent of the inserted docs + + Parameters + ---------- + docstring : string + docstring from function, possibly with dict formatting strings + docdict : dict, optional + dictionary with keys that match the dict formatting strings + and values that are docstring fragments to be inserted. The + indentation of the inserted docstrings is set to match the + minimum indentation of the ``docstring`` by adding this + indentation to all lines of the inserted string, except the + first + + Returns + ------- + outstring : string + string with requested ``docdict`` strings inserted + + Examples + -------- + >>> docformat(' Test string with %(value)s', {'value':'inserted value'}) + ' Test string with inserted value' + >>> docstring = 'First line\\n Second line\\n %(value)s' + >>> inserted_string = "indented\\nstring" + >>> docdict = {'value': inserted_string} + >>> docformat(docstring, docdict) + 'First line\\n Second line\\n indented\\n string' + ''' + if not docstring: + return docstring + if docdict is None: + docdict = {} + if not docdict: + return docstring + lines = docstring.expandtabs().splitlines() + # Find the minimum indent of the main docstring, after first line + if len(lines) < 2: + icount = 0 + else: + icount = indentcount_lines(lines[1:]) + indent = ' ' * icount + # Insert this indent to dictionary docstrings + indented = {} + for name, dstr in docdict.items(): + lines = dstr.expandtabs().splitlines() + try: + newlines = [lines[0]] + for line in lines[1:]: + newlines.append(indent+line) + indented[name] = '\n'.join(newlines) + except IndexError: + indented[name] = dstr + return docstring % indented + + +def inherit_docstring_from(cls): + """ + This decorator modifies the decorated function's docstring by + replacing occurrences of '%(super)s' with the docstring of the + method of the same name from the class `cls`. + + If the decorated method has no docstring, it is simply given the + docstring of `cls`s method. + + Parameters + ---------- + cls : Python class or instance + A class with a method with the same name as the decorated method. + The docstring of the method in this class replaces '%(super)s' in the + docstring of the decorated method. + + Returns + ------- + f : function + The decorator function that modifies the __doc__ attribute + of its argument. + + Examples + -------- + In the following, the docstring for Bar.func created using the + docstring of `Foo.func`. + + >>> class Foo(object): + ... def func(self): + ... '''Do something useful.''' + ... return + ... + >>> class Bar(Foo): + ... @inherit_docstring_from(Foo) + ... def func(self): + ... '''%(super)s + ... Do it fast. + ... ''' + ... return + ... + >>> b = Bar() + >>> b.func.__doc__ + 'Do something useful.\n Do it fast.\n ' + + """ + def _doc(func): + cls_docstring = getattr(cls, func.__name__).__doc__ + func_docstring = func.__doc__ + if func_docstring is None: + func.__doc__ = cls_docstring + else: + new_docstring = func_docstring % dict(super=cls_docstring) + func.__doc__ = new_docstring + return func + return _doc + + +def extend_notes_in_docstring(cls, notes): + """ + This decorator replaces the decorated function's docstring + with the docstring from corresponding method in `cls`. + It extends the 'Notes' section of that docstring to include + the given `notes`. + """ + def _doc(func): + cls_docstring = getattr(cls, func.__name__).__doc__ + # If python is called with -OO option, + # there is no docstring + if cls_docstring is None: + return func + end_of_notes = cls_docstring.find(' References\n') + if end_of_notes == -1: + end_of_notes = cls_docstring.find(' Examples\n') + if end_of_notes == -1: + end_of_notes = len(cls_docstring) + func.__doc__ = (cls_docstring[:end_of_notes] + notes + + cls_docstring[end_of_notes:]) + return func + return _doc + + +def replace_notes_in_docstring(cls, notes): + """ + This decorator replaces the decorated function's docstring + with the docstring from corresponding method in `cls`. + It replaces the 'Notes' section of that docstring with + the given `notes`. + """ + def _doc(func): + cls_docstring = getattr(cls, func.__name__).__doc__ + notes_header = ' Notes\n -----\n' + # If python is called with -OO option, + # there is no docstring + if cls_docstring is None: + return func + start_of_notes = cls_docstring.find(notes_header) + end_of_notes = cls_docstring.find(' References\n') + if end_of_notes == -1: + end_of_notes = cls_docstring.find(' Examples\n') + if end_of_notes == -1: + end_of_notes = len(cls_docstring) + func.__doc__ = (cls_docstring[:start_of_notes + len(notes_header)] + + notes + + cls_docstring[end_of_notes:]) + return func + return _doc + + +def indentcount_lines(lines): + ''' Minimum indent for all lines in line list + + >>> lines = [' one', ' two', ' three'] + >>> indentcount_lines(lines) + 1 + >>> lines = [] + >>> indentcount_lines(lines) + 0 + >>> lines = [' one'] + >>> indentcount_lines(lines) + 1 + >>> indentcount_lines([' ']) + 0 + ''' + indentno = sys.maxsize + for line in lines: + stripped = line.lstrip() + if stripped: + indentno = min(indentno, len(line) - len(stripped)) + if indentno == sys.maxsize: + return 0 + return indentno + + +def filldoc(docdict, unindent_params=True): + ''' Return docstring decorator using docdict variable dictionary + + Parameters + ---------- + docdict : dictionary + dictionary containing name, docstring fragment pairs + unindent_params : {False, True}, boolean, optional + If True, strip common indentation from all parameters in + docdict + + Returns + ------- + decfunc : function + decorator that applies dictionary to input function docstring + + ''' + if unindent_params: + docdict = unindent_dict(docdict) + + def decorate(f): + f.__doc__ = docformat(f.__doc__, docdict) + return f + return decorate + + +def unindent_dict(docdict): + ''' Unindent all strings in a docdict ''' + can_dict = {} + for name, dstr in docdict.items(): + can_dict[name] = unindent_string(dstr) + return can_dict + + +def unindent_string(docstring): + ''' Set docstring to minimum indent for all lines, including first + + >>> unindent_string(' two') + 'two' + >>> unindent_string(' two\\n three') + 'two\\n three' + ''' + lines = docstring.expandtabs().splitlines() + icount = indentcount_lines(lines) + if icount == 0: + return docstring + return '\n'.join([line[icount:] for line in lines]) diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/doccer.pyc b/project/venv/lib/python2.7/site-packages/scipy/misc/doccer.pyc new file mode 100644 index 0000000..751951b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/misc/doccer.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/ecg.dat b/project/venv/lib/python2.7/site-packages/scipy/misc/ecg.dat new file mode 100644 index 0000000..37aec48 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/misc/ecg.dat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/face.dat b/project/venv/lib/python2.7/site-packages/scipy/misc/face.dat new file mode 100644 index 0000000..e45c9e0 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/misc/face.dat differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/pilutil.py b/project/venv/lib/python2.7/site-packages/scipy/misc/pilutil.py new file mode 100644 index 0000000..4c5d286 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/misc/pilutil.py @@ -0,0 +1,621 @@ +""" +A collection of image utilities using the Python Imaging Library (PIL). + +Note that PIL is not a dependency of SciPy and this module is not +available on systems that don't have PIL installed. + +""" +from __future__ import division, print_function, absolute_import + +# Functions which need the PIL + +import numpy +import tempfile + +from numpy import (amin, amax, ravel, asarray, arange, ones, newaxis, + transpose, iscomplexobj, uint8, issubdtype, array) + +try: + from PIL import Image, ImageFilter +except ImportError: + import Image + import ImageFilter + + +if not hasattr(Image, 'frombytes'): + Image.frombytes = Image.fromstring + +__all__ = ['fromimage', 'toimage', 'imsave', 'imread', 'bytescale', + 'imrotate', 'imresize', 'imshow', 'imfilter'] + + +@numpy.deprecate(message="`bytescale` is deprecated in SciPy 1.0.0, " + "and will be removed in 1.2.0.") +def bytescale(data, cmin=None, cmax=None, high=255, low=0): + """ + Byte scales an array (image). + + Byte scaling means converting the input image to uint8 dtype and scaling + the range to ``(low, high)`` (default 0-255). + If the input image already has dtype uint8, no scaling is done. + + This function is only available if Python Imaging Library (PIL) is installed. + + Parameters + ---------- + data : ndarray + PIL image data array. + cmin : scalar, optional + Bias scaling of small values. Default is ``data.min()``. + cmax : scalar, optional + Bias scaling of large values. Default is ``data.max()``. + high : scalar, optional + Scale max value to `high`. Default is 255. + low : scalar, optional + Scale min value to `low`. Default is 0. + + Returns + ------- + img_array : uint8 ndarray + The byte-scaled array. + + Examples + -------- + >>> from scipy.misc import bytescale + >>> img = np.array([[ 91.06794177, 3.39058326, 84.4221549 ], + ... [ 73.88003259, 80.91433048, 4.88878881], + ... [ 51.53875334, 34.45808177, 27.5873488 ]]) + >>> bytescale(img) + array([[255, 0, 236], + [205, 225, 4], + [140, 90, 70]], dtype=uint8) + >>> bytescale(img, high=200, low=100) + array([[200, 100, 192], + [180, 188, 102], + [155, 135, 128]], dtype=uint8) + >>> bytescale(img, cmin=0, cmax=255) + array([[91, 3, 84], + [74, 81, 5], + [52, 34, 28]], dtype=uint8) + + """ + if data.dtype == uint8: + return data + + if high > 255: + raise ValueError("`high` should be less than or equal to 255.") + if low < 0: + raise ValueError("`low` should be greater than or equal to 0.") + if high < low: + raise ValueError("`high` should be greater than or equal to `low`.") + + if cmin is None: + cmin = data.min() + if cmax is None: + cmax = data.max() + + cscale = cmax - cmin + if cscale < 0: + raise ValueError("`cmax` should be larger than `cmin`.") + elif cscale == 0: + cscale = 1 + + scale = float(high - low) / cscale + bytedata = (data - cmin) * scale + low + return (bytedata.clip(low, high) + 0.5).astype(uint8) + + +@numpy.deprecate(message="`imread` is deprecated in SciPy 1.0.0, " + "and will be removed in 1.2.0.\n" + "Use ``imageio.imread`` instead.") +def imread(name, flatten=False, mode=None): + """ + Read an image from a file as an array. + + This function is only available if Python Imaging Library (PIL) is installed. + + Parameters + ---------- + name : str or file object + The file name or file object to be read. + flatten : bool, optional + If True, flattens the color layers into a single gray-scale layer. + mode : str, optional + Mode to convert image to, e.g. ``'RGB'``. See the Notes for more + details. + + Returns + ------- + imread : ndarray + The array obtained by reading the image. + + Notes + ----- + `imread` uses the Python Imaging Library (PIL) to read an image. + The following notes are from the PIL documentation. + + `mode` can be one of the following strings: + + * 'L' (8-bit pixels, black and white) + * 'P' (8-bit pixels, mapped to any other mode using a color palette) + * 'RGB' (3x8-bit pixels, true color) + * 'RGBA' (4x8-bit pixels, true color with transparency mask) + * 'CMYK' (4x8-bit pixels, color separation) + * 'YCbCr' (3x8-bit pixels, color video format) + * 'I' (32-bit signed integer pixels) + * 'F' (32-bit floating point pixels) + + PIL also provides limited support for a few special modes, including + 'LA' ('L' with alpha), 'RGBX' (true color with padding) and 'RGBa' + (true color with premultiplied alpha). + + When translating a color image to black and white (mode 'L', 'I' or + 'F'), the library uses the ITU-R 601-2 luma transform:: + + L = R * 299/1000 + G * 587/1000 + B * 114/1000 + + When `flatten` is True, the image is converted using mode 'F'. + When `mode` is not None and `flatten` is True, the image is first + converted according to `mode`, and the result is then flattened using + mode 'F'. + + """ + + im = Image.open(name) + return fromimage(im, flatten=flatten, mode=mode) + + +@numpy.deprecate(message="`imsave` is deprecated in SciPy 1.0.0, " + "and will be removed in 1.2.0.\n" + "Use ``imageio.imwrite`` instead.") +def imsave(name, arr, format=None): + """ + Save an array as an image. + + This function is only available if Python Imaging Library (PIL) is installed. + + .. warning:: + + This function uses `bytescale` under the hood to rescale images to use + the full (0, 255) range if ``mode`` is one of ``None, 'L', 'P', 'l'``. + It will also cast data for 2-D images to ``uint32`` for ``mode=None`` + (which is the default). + + Parameters + ---------- + name : str or file object + Output file name or file object. + arr : ndarray, MxN or MxNx3 or MxNx4 + Array containing image values. If the shape is ``MxN``, the array + represents a grey-level image. Shape ``MxNx3`` stores the red, green + and blue bands along the last dimension. An alpha layer may be + included, specified as the last colour band of an ``MxNx4`` array. + format : str + Image format. If omitted, the format to use is determined from the + file name extension. If a file object was used instead of a file name, + this parameter should always be used. + + Examples + -------- + Construct an array of gradient intensity values and save to file: + + >>> from scipy.misc import imsave + >>> x = np.zeros((255, 255)) + >>> x = np.zeros((255, 255), dtype=np.uint8) + >>> x[:] = np.arange(255) + >>> imsave('gradient.png', x) + + Construct an array with three colour bands (R, G, B) and store to file: + + >>> rgb = np.zeros((255, 255, 3), dtype=np.uint8) + >>> rgb[..., 0] = np.arange(255) + >>> rgb[..., 1] = 55 + >>> rgb[..., 2] = 1 - np.arange(255) + >>> imsave('rgb_gradient.png', rgb) + + """ + im = toimage(arr, channel_axis=2) + if format is None: + im.save(name) + else: + im.save(name, format) + return + + +@numpy.deprecate(message="`fromimage` is deprecated in SciPy 1.0.0. " + "and will be removed in 1.2.0.\n" + "Use ``np.asarray(im)`` instead.") +def fromimage(im, flatten=False, mode=None): + """ + Return a copy of a PIL image as a numpy array. + + This function is only available if Python Imaging Library (PIL) is installed. + + Parameters + ---------- + im : PIL image + Input image. + flatten : bool + If true, convert the output to grey-scale. + mode : str, optional + Mode to convert image to, e.g. ``'RGB'``. See the Notes of the + `imread` docstring for more details. + + Returns + ------- + fromimage : ndarray + The different colour bands/channels are stored in the + third dimension, such that a grey-image is MxN, an + RGB-image MxNx3 and an RGBA-image MxNx4. + + """ + if not Image.isImageType(im): + raise TypeError("Input is not a PIL image.") + + if mode is not None: + if mode != im.mode: + im = im.convert(mode) + elif im.mode == 'P': + # Mode 'P' means there is an indexed "palette". If we leave the mode + # as 'P', then when we do `a = array(im)` below, `a` will be a 2-D + # containing the indices into the palette, and not a 3-D array + # containing the RGB or RGBA values. + if 'transparency' in im.info: + im = im.convert('RGBA') + else: + im = im.convert('RGB') + + if flatten: + im = im.convert('F') + elif im.mode == '1': + # Workaround for crash in PIL. When im is 1-bit, the call array(im) + # can cause a seg. fault, or generate garbage. See + # https://github.com/scipy/scipy/issues/2138 and + # https://github.com/python-pillow/Pillow/issues/350. + # + # This converts im from a 1-bit image to an 8-bit image. + im = im.convert('L') + + a = array(im) + return a + + +_errstr = "Mode is unknown or incompatible with input array shape." + + +@numpy.deprecate(message="`toimage` is deprecated in SciPy 1.0.0, " + "and will be removed in 1.2.0.\n" + "Use Pillow's ``Image.fromarray`` directly instead.") +def toimage(arr, high=255, low=0, cmin=None, cmax=None, pal=None, + mode=None, channel_axis=None): + """Takes a numpy array and returns a PIL image. + + This function is only available if Python Imaging Library (PIL) is installed. + + The mode of the PIL image depends on the array shape and the `pal` and + `mode` keywords. + + For 2-D arrays, if `pal` is a valid (N,3) byte-array giving the RGB values + (from 0 to 255) then ``mode='P'``, otherwise ``mode='L'``, unless mode + is given as 'F' or 'I' in which case a float and/or integer array is made. + + .. warning:: + + This function uses `bytescale` under the hood to rescale images to use + the full (0, 255) range if ``mode`` is one of ``None, 'L', 'P', 'l'``. + It will also cast data for 2-D images to ``uint32`` for ``mode=None`` + (which is the default). + + Notes + ----- + For 3-D arrays, the `channel_axis` argument tells which dimension of the + array holds the channel data. + + For 3-D arrays if one of the dimensions is 3, the mode is 'RGB' + by default or 'YCbCr' if selected. + + The numpy array must be either 2 dimensional or 3 dimensional. + + """ + data = asarray(arr) + if iscomplexobj(data): + raise ValueError("Cannot convert a complex-valued array.") + shape = list(data.shape) + valid = len(shape) == 2 or ((len(shape) == 3) and + ((3 in shape) or (4 in shape))) + if not valid: + raise ValueError("'arr' does not have a suitable array shape for " + "any mode.") + if len(shape) == 2: + shape = (shape[1], shape[0]) # columns show up first + if mode == 'F': + data32 = data.astype(numpy.float32) + image = Image.frombytes(mode, shape, data32.tostring()) + return image + if mode in [None, 'L', 'P']: + bytedata = bytescale(data, high=high, low=low, + cmin=cmin, cmax=cmax) + image = Image.frombytes('L', shape, bytedata.tostring()) + if pal is not None: + image.putpalette(asarray(pal, dtype=uint8).tostring()) + # Becomes a mode='P' automagically. + elif mode == 'P': # default gray-scale + pal = (arange(0, 256, 1, dtype=uint8)[:, newaxis] * + ones((3,), dtype=uint8)[newaxis, :]) + image.putpalette(asarray(pal, dtype=uint8).tostring()) + return image + if mode == '1': # high input gives threshold for 1 + bytedata = (data > high) + image = Image.frombytes('1', shape, bytedata.tostring()) + return image + if cmin is None: + cmin = amin(ravel(data)) + if cmax is None: + cmax = amax(ravel(data)) + data = (data*1.0 - cmin)*(high - low)/(cmax - cmin) + low + if mode == 'I': + data32 = data.astype(numpy.uint32) + image = Image.frombytes(mode, shape, data32.tostring()) + else: + raise ValueError(_errstr) + return image + + # if here then 3-d array with a 3 or a 4 in the shape length. + # Check for 3 in datacube shape --- 'RGB' or 'YCbCr' + if channel_axis is None: + if (3 in shape): + ca = numpy.flatnonzero(asarray(shape) == 3)[0] + else: + ca = numpy.flatnonzero(asarray(shape) == 4) + if len(ca): + ca = ca[0] + else: + raise ValueError("Could not find channel dimension.") + else: + ca = channel_axis + + numch = shape[ca] + if numch not in [3, 4]: + raise ValueError("Channel axis dimension is not valid.") + + bytedata = bytescale(data, high=high, low=low, cmin=cmin, cmax=cmax) + if ca == 2: + strdata = bytedata.tostring() + shape = (shape[1], shape[0]) + elif ca == 1: + strdata = transpose(bytedata, (0, 2, 1)).tostring() + shape = (shape[2], shape[0]) + elif ca == 0: + strdata = transpose(bytedata, (1, 2, 0)).tostring() + shape = (shape[2], shape[1]) + if mode is None: + if numch == 3: + mode = 'RGB' + else: + mode = 'RGBA' + + if mode not in ['RGB', 'RGBA', 'YCbCr', 'CMYK']: + raise ValueError(_errstr) + + if mode in ['RGB', 'YCbCr']: + if numch != 3: + raise ValueError("Invalid array shape for mode.") + if mode in ['RGBA', 'CMYK']: + if numch != 4: + raise ValueError("Invalid array shape for mode.") + + # Here we know data and mode is correct + image = Image.frombytes(mode, shape, strdata) + return image + + +@numpy.deprecate(message="`imrotate` is deprecated in SciPy 1.0.0, " + "and will be removed in 1.2.0.\n" + "Use ``skimage.transform.rotate`` instead.") +def imrotate(arr, angle, interp='bilinear'): + """ + Rotate an image counter-clockwise by angle degrees. + + This function is only available if Python Imaging Library (PIL) is installed. + + .. warning:: + + This function uses `bytescale` under the hood to rescale images to use + the full (0, 255) range if ``mode`` is one of ``None, 'L', 'P', 'l'``. + It will also cast data for 2-D images to ``uint32`` for ``mode=None`` + (which is the default). + + Parameters + ---------- + arr : ndarray + Input array of image to be rotated. + angle : float + The angle of rotation. + interp : str, optional + Interpolation + + - 'nearest' : for nearest neighbor + - 'bilinear' : for bilinear + - 'lanczos' : for lanczos + - 'cubic' : for bicubic + - 'bicubic' : for bicubic + + Returns + ------- + imrotate : ndarray + The rotated array of image. + + """ + arr = asarray(arr) + func = {'nearest': 0, 'lanczos': 1, 'bilinear': 2, 'bicubic': 3, 'cubic': 3} + im = toimage(arr) + im = im.rotate(angle, resample=func[interp]) + return fromimage(im) + + +@numpy.deprecate(message="`imshow` is deprecated in SciPy 1.0.0, " + "and will be removed in 1.2.0.\n" + "Use ``matplotlib.pyplot.imshow`` instead.") +def imshow(arr): + """ + Simple showing of an image through an external viewer. + + This function is only available if Python Imaging Library (PIL) is installed. + + Uses the image viewer specified by the environment variable + SCIPY_PIL_IMAGE_VIEWER, or if that is not defined then `see`, + to view a temporary file generated from array data. + + .. warning:: + + This function uses `bytescale` under the hood to rescale images to use + the full (0, 255) range if ``mode`` is one of ``None, 'L', 'P', 'l'``. + It will also cast data for 2-D images to ``uint32`` for ``mode=None`` + (which is the default). + + Parameters + ---------- + arr : ndarray + Array of image data to show. + + Returns + ------- + None + + Examples + -------- + >>> a = np.tile(np.arange(255), (255,1)) + >>> from scipy import misc + >>> misc.imshow(a) + + """ + im = toimage(arr) + fnum, fname = tempfile.mkstemp('.png') + try: + im.save(fname) + except Exception: + raise RuntimeError("Error saving temporary image data.") + + import os + os.close(fnum) + + cmd = os.environ.get('SCIPY_PIL_IMAGE_VIEWER', 'see') + status = os.system("%s %s" % (cmd, fname)) + + os.unlink(fname) + if status != 0: + raise RuntimeError('Could not execute image viewer.') + + +@numpy.deprecate(message="`imresize` is deprecated in SciPy 1.0.0, " + "and will be removed in 1.3.0.\n" + "Use Pillow instead: ``numpy.array(Image.fromarray(arr).resize())``.") +def imresize(arr, size, interp='bilinear', mode=None): + """ + Resize an image. + + This function is only available if Python Imaging Library (PIL) is installed. + + .. warning:: + + This function uses `bytescale` under the hood to rescale images to use + the full (0, 255) range if ``mode`` is one of ``None, 'L', 'P', 'l'``. + It will also cast data for 2-D images to ``uint32`` for ``mode=None`` + (which is the default). + + Parameters + ---------- + arr : ndarray + The array of image to be resized. + size : int, float or tuple + * int - Percentage of current size. + * float - Fraction of current size. + * tuple - Size of the output image (height, width). + + interp : str, optional + Interpolation to use for re-sizing ('nearest', 'lanczos', 'bilinear', + 'bicubic' or 'cubic'). + mode : str, optional + The PIL image mode ('P', 'L', etc.) to convert `arr` before resizing. + If ``mode=None`` (the default), 2-D images will be treated like + ``mode='L'``, i.e. casting to long integer. For 3-D and 4-D arrays, + `mode` will be set to ``'RGB'`` and ``'RGBA'`` respectively. + + Returns + ------- + imresize : ndarray + The resized array of image. + + See Also + -------- + toimage : Implicitly used to convert `arr` according to `mode`. + scipy.ndimage.zoom : More generic implementation that does not use PIL. + + """ + im = toimage(arr, mode=mode) + ts = type(size) + if issubdtype(ts, numpy.signedinteger): + percent = size / 100.0 + size = tuple((array(im.size)*percent).astype(int)) + elif issubdtype(type(size), numpy.floating): + size = tuple((array(im.size)*size).astype(int)) + else: + size = (size[1], size[0]) + func = {'nearest': 0, 'lanczos': 1, 'bilinear': 2, 'bicubic': 3, 'cubic': 3} + imnew = im.resize(size, resample=func[interp]) + return fromimage(imnew) + + +@numpy.deprecate(message="`imfilter` is deprecated in SciPy 1.0.0, " + "and will be removed in 1.2.0.\n" + "Use Pillow filtering functionality directly.") +def imfilter(arr, ftype): + """ + Simple filtering of an image. + + This function is only available if Python Imaging Library (PIL) is installed. + + .. warning:: + + This function uses `bytescale` under the hood to rescale images to use + the full (0, 255) range if ``mode`` is one of ``None, 'L', 'P', 'l'``. + It will also cast data for 2-D images to ``uint32`` for ``mode=None`` + (which is the default). + + Parameters + ---------- + arr : ndarray + The array of Image in which the filter is to be applied. + ftype : str + The filter that has to be applied. Legal values are: + 'blur', 'contour', 'detail', 'edge_enhance', 'edge_enhance_more', + 'emboss', 'find_edges', 'smooth', 'smooth_more', 'sharpen'. + + Returns + ------- + imfilter : ndarray + The array with filter applied. + + Raises + ------ + ValueError + *Unknown filter type.* If the filter you are trying + to apply is unsupported. + + """ + _tdict = {'blur': ImageFilter.BLUR, + 'contour': ImageFilter.CONTOUR, + 'detail': ImageFilter.DETAIL, + 'edge_enhance': ImageFilter.EDGE_ENHANCE, + 'edge_enhance_more': ImageFilter.EDGE_ENHANCE_MORE, + 'emboss': ImageFilter.EMBOSS, + 'find_edges': ImageFilter.FIND_EDGES, + 'smooth': ImageFilter.SMOOTH, + 'smooth_more': ImageFilter.SMOOTH_MORE, + 'sharpen': ImageFilter.SHARPEN + } + + im = toimage(arr) + if ftype not in _tdict: + raise ValueError("Unknown filter type.") + return fromimage(im.filter(_tdict[ftype])) diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/pilutil.pyc b/project/venv/lib/python2.7/site-packages/scipy/misc/pilutil.pyc new file mode 100644 index 0000000..062f12d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/misc/pilutil.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/setup.py b/project/venv/lib/python2.7/site-packages/scipy/misc/setup.py new file mode 100644 index 0000000..c8d9f20 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/misc/setup.py @@ -0,0 +1,14 @@ +from __future__ import division, print_function, absolute_import + + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('misc',parent_package,top_path) + config.add_data_files('*.dat') + config.add_data_dir('tests') + return config + + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(**configuration(top_path='').todict()) diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/setup.pyc b/project/venv/lib/python2.7/site-packages/scipy/misc/setup.pyc new file mode 100644 index 0000000..7cb52cc Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/misc/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/tests/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/tests/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/__init__.pyc new file mode 100644 index 0000000..c6eeb2e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/3x3x3.png b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/3x3x3.png new file mode 100644 index 0000000..554b7e6 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/3x3x3.png differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/3x3x4.png b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/3x3x4.png new file mode 100644 index 0000000..f84f80d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/3x3x4.png differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/3x4x3.png b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/3x4x3.png new file mode 100644 index 0000000..c8164a0 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/3x4x3.png differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/3x4x4.png b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/3x4x4.png new file mode 100644 index 0000000..893db6e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/3x4x4.png differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/3x5x3.png b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/3x5x3.png new file mode 100644 index 0000000..56fe687 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/3x5x3.png differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/3x5x4.png b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/3x5x4.png new file mode 100644 index 0000000..5f5b945 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/3x5x4.png differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/4x3x3.png b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/4x3x3.png new file mode 100644 index 0000000..14830b4 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/4x3x3.png differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/4x3x4.png b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/4x3x4.png new file mode 100644 index 0000000..a33aa34 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/4x3x4.png differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/4x4x3.png b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/4x4x3.png new file mode 100644 index 0000000..5811d5a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/4x4x3.png differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/4x4x4.png b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/4x4x4.png new file mode 100644 index 0000000..337ab9a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/4x4x4.png differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/4x5x3.png b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/4x5x3.png new file mode 100644 index 0000000..c2192aa Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/4x5x3.png differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/4x5x4.png b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/4x5x4.png new file mode 100644 index 0000000..287dcbb Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/4x5x4.png differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/5x3x3.png b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/5x3x3.png new file mode 100644 index 0000000..7be5563 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/5x3x3.png differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/5x3x4.png b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/5x3x4.png new file mode 100644 index 0000000..595b182 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/5x3x4.png differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/5x4x3.png b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/5x4x3.png new file mode 100644 index 0000000..91df79a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/5x4x3.png differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/5x4x4.png b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/5x4x4.png new file mode 100644 index 0000000..f0d3fff Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/5x4x4.png differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/5x5x3.png b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/5x5x3.png new file mode 100644 index 0000000..318fa3b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/5x5x3.png differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/5x5x4.png b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/5x5x4.png new file mode 100644 index 0000000..1f24869 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/5x5x4.png differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/blocks2bit.png b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/blocks2bit.png new file mode 100644 index 0000000..95cacf5 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/blocks2bit.png differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/box1.png b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/box1.png new file mode 100644 index 0000000..a5d1e9e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/box1.png differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/foo3x5x4indexed.png b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/foo3x5x4indexed.png new file mode 100644 index 0000000..cc969d0 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/foo3x5x4indexed.png differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/icon.png b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/icon.png new file mode 100644 index 0000000..e9037e2 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/icon.png differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/icon_mono.png b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/icon_mono.png new file mode 100644 index 0000000..612c9c6 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/icon_mono.png differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/icon_mono_flat.png b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/icon_mono_flat.png new file mode 100644 index 0000000..c42b9a0 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/icon_mono_flat.png differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/pattern4bit.png b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/pattern4bit.png new file mode 100644 index 0000000..58411fe Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/data/pattern4bit.png differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/tests/test_common.py b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/test_common.py new file mode 100644 index 0000000..feea580 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/test_common.py @@ -0,0 +1,40 @@ +from __future__ import division, print_function, absolute_import + +import pytest +from numpy.testing import assert_equal, assert_allclose, assert_almost_equal +from scipy._lib._numpy_compat import suppress_warnings + +from scipy.misc import pade, logsumexp, face, ascent, electrocardiogram +from scipy.special import logsumexp as sc_logsumexp + + +def test_logsumexp(): + # make sure logsumexp can be imported from either scipy.misc or + # scipy.special + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "`logsumexp` is deprecated") + assert_allclose(logsumexp([0, 1]), sc_logsumexp([0, 1]), atol=1e-16) + + +def test_pade(): + # make sure scipy.misc.pade exists + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "`pade` is deprecated") + pade([1, 2], 1) + + +def test_face(): + assert_equal(face().shape, (768, 1024, 3)) + + +def test_ascent(): + assert_equal(ascent().shape, (512, 512)) + + +def test_electrocardiogram(): + # Test shape, dtype and stats of signal + ecg = electrocardiogram() + assert ecg.dtype == float + assert_equal(ecg.shape, (108000,)) + assert_almost_equal(ecg.mean(), -0.16510875) + assert_almost_equal(ecg.std(), 0.5992473991177294) diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/tests/test_common.pyc b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/test_common.pyc new file mode 100644 index 0000000..0d0790f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/test_common.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/tests/test_doccer.py b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/test_doccer.py new file mode 100644 index 0000000..1f6c5a4 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/test_doccer.py @@ -0,0 +1,125 @@ +''' Some tests for the documenting decorator and support functions ''' + +from __future__ import division, print_function, absolute_import + +import sys +import pytest +from numpy.testing import assert_equal + +from scipy.misc import doccer + +# python -OO strips docstrings +DOCSTRINGS_STRIPPED = sys.flags.optimize > 1 + +docstring = \ +"""Docstring + %(strtest1)s + %(strtest2)s + %(strtest3)s +""" +param_doc1 = \ +"""Another test + with some indent""" + +param_doc2 = \ +"""Another test, one line""" + +param_doc3 = \ +""" Another test + with some indent""" + +doc_dict = {'strtest1':param_doc1, + 'strtest2':param_doc2, + 'strtest3':param_doc3} + +filled_docstring = \ +"""Docstring + Another test + with some indent + Another test, one line + Another test + with some indent +""" + + +def test_unindent(): + assert_equal(doccer.unindent_string(param_doc1), param_doc1) + assert_equal(doccer.unindent_string(param_doc2), param_doc2) + assert_equal(doccer.unindent_string(param_doc3), param_doc1) + + +def test_unindent_dict(): + d2 = doccer.unindent_dict(doc_dict) + assert_equal(d2['strtest1'], doc_dict['strtest1']) + assert_equal(d2['strtest2'], doc_dict['strtest2']) + assert_equal(d2['strtest3'], doc_dict['strtest1']) + + +def test_docformat(): + udd = doccer.unindent_dict(doc_dict) + formatted = doccer.docformat(docstring, udd) + assert_equal(formatted, filled_docstring) + single_doc = 'Single line doc %(strtest1)s' + formatted = doccer.docformat(single_doc, doc_dict) + # Note - initial indent of format string does not + # affect subsequent indent of inserted parameter + assert_equal(formatted, """Single line doc Another test + with some indent""") + + +@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstrings stripped") +def test_decorator(): + # with unindentation of parameters + decorator = doccer.filldoc(doc_dict, True) + + @decorator + def func(): + """ Docstring + %(strtest3)s + """ + assert_equal(func.__doc__, """ Docstring + Another test + with some indent + """) + + # without unindentation of parameters + decorator = doccer.filldoc(doc_dict, False) + + @decorator + def func(): + """ Docstring + %(strtest3)s + """ + assert_equal(func.__doc__, """ Docstring + Another test + with some indent + """) + + +@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstrings stripped") +def test_inherit_docstring_from(): + + class Foo(object): + def func(self): + '''Do something useful.''' + return + + def func2(self): + '''Something else.''' + + class Bar(Foo): + @doccer.inherit_docstring_from(Foo) + def func(self): + '''%(super)sABC''' + return + + @doccer.inherit_docstring_from(Foo) + def func2(self): + # No docstring. + return + + assert_equal(Bar.func.__doc__, Foo.func.__doc__ + 'ABC') + assert_equal(Bar.func2.__doc__, Foo.func2.__doc__) + bar = Bar() + assert_equal(bar.func.__doc__, Foo.func.__doc__ + 'ABC') + assert_equal(bar.func2.__doc__, Foo.func2.__doc__) diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/tests/test_doccer.pyc b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/test_doccer.pyc new file mode 100644 index 0000000..6d8c216 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/test_doccer.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/tests/test_pilutil.py b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/test_pilutil.py new file mode 100644 index 0000000..a5a2153 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/test_pilutil.py @@ -0,0 +1,283 @@ +from __future__ import division, print_function, absolute_import + +import os.path +import tempfile +import shutil +import numpy as np +import glob + +import pytest +from pytest import raises as assert_raises +from numpy.testing import (assert_equal, assert_allclose, + assert_array_equal, assert_) +from scipy._lib._numpy_compat import suppress_warnings +from scipy import misc +from numpy.ma.testutils import assert_mask_equal + +try: + import PIL.Image +except ImportError: + _have_PIL = False +else: + _have_PIL = True + + +# Function / method decorator for skipping PIL tests on import failure +_pilskip = pytest.mark.skipif(not _have_PIL, reason='Need to import PIL for this test') + +datapath = os.path.dirname(__file__) + +@_pilskip +class TestPILUtil(object): + def test_imresize(self): + im = np.random.random((10, 20)) + for T in np.sctypes['float'] + [float]: + # 1.1 rounds to below 1.1 for float16, 1.101 works + with suppress_warnings() as sup: + sup.filter(DeprecationWarning) + im1 = misc.imresize(im, T(1.101)) + assert_equal(im1.shape, (11, 22)) + + def test_imresize2(self): + im = np.random.random((20, 30)) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning) + im2 = misc.imresize(im, (30, 40), interp='bicubic') + assert_equal(im2.shape, (30, 40)) + + def test_imresize3(self): + im = np.random.random((15, 30)) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning) + im2 = misc.imresize(im, (30, 60), interp='nearest') + assert_equal(im2.shape, (30, 60)) + + def test_imresize4(self): + im = np.array([[1, 2], + [3, 4]]) + # Check that resizing by target size, float and int are the same + with suppress_warnings() as sup: + sup.filter(DeprecationWarning) + im2 = misc.imresize(im, (4, 4), mode='F') # output size + im3 = misc.imresize(im, 2., mode='F') # fraction + im4 = misc.imresize(im, 200, mode='F') # percentage + assert_equal(im2, im3) + assert_equal(im2, im4) + + def test_imresize5(self): + im = np.random.random((25, 15)) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning) + im2 = misc.imresize(im, (30, 60), interp='lanczos') + assert_equal(im2.shape, (30, 60)) + + def test_bytescale(self): + x = np.array([0, 1, 2], np.uint8) + y = np.array([0, 1, 2]) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning) + assert_equal(misc.bytescale(x), x) + assert_equal(misc.bytescale(y), [0, 128, 255]) + + def test_bytescale_keywords(self): + x = np.array([40, 60, 120, 200, 300, 500]) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning) + res_lowhigh = misc.bytescale(x, low=10, high=143) + assert_equal(res_lowhigh, [10, 16, 33, 56, 85, 143]) + res_cmincmax = misc.bytescale(x, cmin=60, cmax=300) + assert_equal(res_cmincmax, [0, 0, 64, 149, 255, 255]) + assert_equal(misc.bytescale(np.array([3, 3, 3]), low=4), [4, 4, 4]) + + def test_bytescale_cscale_lowhigh(self): + a = np.arange(10) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning) + actual = misc.bytescale(a, cmin=3, cmax=6, low=100, high=200) + expected = [100, 100, 100, 100, 133, 167, 200, 200, 200, 200] + assert_equal(actual, expected) + + def test_bytescale_mask(self): + a = np.ma.MaskedArray(data=[1, 2, 3], mask=[False, False, True]) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning) + actual = misc.bytescale(a) + expected = [0, 255, 3] + assert_equal(expected, actual) + assert_mask_equal(a.mask, actual.mask) + assert_(isinstance(actual, np.ma.MaskedArray)) + + def test_bytescale_rounding(self): + a = np.array([-0.5, 0.5, 1.5, 2.5, 3.5]) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning) + actual = misc.bytescale(a, cmin=0, cmax=10, low=0, high=10) + expected = [0, 1, 2, 3, 4] + assert_equal(actual, expected) + + def test_bytescale_low_greaterthan_high(self): + with assert_raises(ValueError): + with suppress_warnings() as sup: + sup.filter(DeprecationWarning) + misc.bytescale(np.arange(3), low=10, high=5) + + def test_bytescale_low_lessthan_0(self): + with assert_raises(ValueError): + with suppress_warnings() as sup: + sup.filter(DeprecationWarning) + misc.bytescale(np.arange(3), low=-1) + + def test_bytescale_high_greaterthan_255(self): + with assert_raises(ValueError): + with suppress_warnings() as sup: + sup.filter(DeprecationWarning) + misc.bytescale(np.arange(3), high=256) + + def test_bytescale_low_equals_high(self): + a = np.arange(3) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning) + actual = misc.bytescale(a, low=10, high=10) + expected = [10, 10, 10] + assert_equal(actual, expected) + + def test_imsave(self): + picdir = os.path.join(datapath, "data") + for png in glob.iglob(picdir + "/*.png"): + with suppress_warnings() as sup: + # PIL causes a Py3k ResourceWarning + sup.filter(message="unclosed file") + sup.filter(DeprecationWarning) + img = misc.imread(png) + tmpdir = tempfile.mkdtemp() + try: + fn1 = os.path.join(tmpdir, 'test.png') + fn2 = os.path.join(tmpdir, 'testimg') + with suppress_warnings() as sup: + # PIL causes a Py3k ResourceWarning + sup.filter(message="unclosed file") + sup.filter(DeprecationWarning) + misc.imsave(fn1, img) + misc.imsave(fn2, img, 'PNG') + + with suppress_warnings() as sup: + # PIL causes a Py3k ResourceWarning + sup.filter(message="unclosed file") + sup.filter(DeprecationWarning) + data1 = misc.imread(fn1) + data2 = misc.imread(fn2) + assert_allclose(data1, img) + assert_allclose(data2, img) + assert_equal(data1.shape, img.shape) + assert_equal(data2.shape, img.shape) + finally: + shutil.rmtree(tmpdir) + + +def check_fromimage(filename, irange, shape): + fp = open(filename, "rb") + with suppress_warnings() as sup: + sup.filter(DeprecationWarning) + img = misc.fromimage(PIL.Image.open(fp)) + fp.close() + imin, imax = irange + assert_equal(img.min(), imin) + assert_equal(img.max(), imax) + assert_equal(img.shape, shape) + + +@_pilskip +def test_fromimage(): + # Test generator for parametric tests + # Tuples in the list are (filename, (datamin, datamax), shape). + files = [('icon.png', (0, 255), (48, 48, 4)), + ('icon_mono.png', (0, 255), (48, 48, 4)), + ('icon_mono_flat.png', (0, 255), (48, 48, 3))] + for fn, irange, shape in files: + with suppress_warnings() as sup: + sup.filter(DeprecationWarning) + check_fromimage(os.path.join(datapath, 'data', fn), irange, shape) + + +@_pilskip +def test_imread_indexed_png(): + # The file `foo3x5x4indexed.png` was created with this array + # (3x5 is (height)x(width)): + data = np.array([[[127, 0, 255, 255], + [127, 0, 255, 255], + [127, 0, 255, 255], + [127, 0, 255, 255], + [127, 0, 255, 255]], + [[192, 192, 255, 0], + [192, 192, 255, 0], + [0, 0, 255, 0], + [0, 0, 255, 0], + [0, 0, 255, 0]], + [[0, 31, 255, 255], + [0, 31, 255, 255], + [0, 31, 255, 255], + [0, 31, 255, 255], + [0, 31, 255, 255]]], dtype=np.uint8) + + filename = os.path.join(datapath, 'data', 'foo3x5x4indexed.png') + with open(filename, 'rb') as f: + with suppress_warnings() as sup: + sup.filter(DeprecationWarning) + im = misc.imread(f) + assert_array_equal(im, data) + + +@_pilskip +def test_imread_1bit(): + # box1.png is a 48x48 grayscale image with bit depth 1. + # The border pixels are 1 and the rest are 0. + filename = os.path.join(datapath, 'data', 'box1.png') + with open(filename, 'rb') as f: + with suppress_warnings() as sup: + sup.filter(DeprecationWarning) + im = misc.imread(f) + assert_equal(im.dtype, np.uint8) + expected = np.zeros((48, 48), dtype=np.uint8) + # When scaled up from 1 bit to 8 bits, 1 becomes 255. + expected[:, 0] = 255 + expected[:, -1] = 255 + expected[0, :] = 255 + expected[-1, :] = 255 + assert_equal(im, expected) + + +@_pilskip +def test_imread_2bit(): + # blocks2bit.png is a 12x12 grayscale image with bit depth 2. + # The pattern is 4 square subblocks of size 6x6. Upper left + # is all 0, upper right is all 1, lower left is all 2, lower + # right is all 3. + # When scaled up to 8 bits, the values become [0, 85, 170, 255]. + filename = os.path.join(datapath, 'data', 'blocks2bit.png') + with open(filename, 'rb') as f: + with suppress_warnings() as sup: + sup.filter(DeprecationWarning) + im = misc.imread(f) + assert_equal(im.dtype, np.uint8) + expected = np.zeros((12, 12), dtype=np.uint8) + expected[:6, 6:] = 85 + expected[6:, :6] = 170 + expected[6:, 6:] = 255 + assert_equal(im, expected) + + +@_pilskip +def test_imread_4bit(): + # pattern4bit.png is a 12(h) x 31(w) grayscale image with bit depth 4. + # The value in row j and column i is maximum(j, i) % 16. + # When scaled up to 8 bits, the values become [0, 17, 34, ..., 255]. + filename = os.path.join(datapath, 'data', 'pattern4bit.png') + with open(filename, 'rb') as f: + with suppress_warnings() as sup: + sup.filter(DeprecationWarning) + im = misc.imread(f) + assert_equal(im.dtype, np.uint8) + j, i = np.meshgrid(np.arange(12), np.arange(31), indexing='ij') + expected = 17*(np.maximum(j, i) % 16).astype(np.uint8) + assert_equal(im, expected) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/misc/tests/test_pilutil.pyc b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/test_pilutil.pyc new file mode 100644 index 0000000..8277f40 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/misc/tests/test_pilutil.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/ndimage/__init__.py new file mode 100644 index 0000000..233fee0 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/ndimage/__init__.py @@ -0,0 +1,174 @@ +""" +========================================================= +Multi-dimensional image processing (:mod:`scipy.ndimage`) +========================================================= + +.. currentmodule:: scipy.ndimage + +This package contains various functions for multi-dimensional image +processing. + + +Filters +======= + +.. autosummary:: + :toctree: generated/ + + convolve - Multi-dimensional convolution + convolve1d - 1-D convolution along the given axis + correlate - Multi-dimensional correlation + correlate1d - 1-D correlation along the given axis + gaussian_filter + gaussian_filter1d + gaussian_gradient_magnitude + gaussian_laplace + generic_filter - Multi-dimensional filter using a given function + generic_filter1d - 1-D generic filter along the given axis + generic_gradient_magnitude + generic_laplace + laplace - n-D Laplace filter based on approximate second derivatives + maximum_filter + maximum_filter1d + median_filter - Calculates a multi-dimensional median filter + minimum_filter + minimum_filter1d + percentile_filter - Calculates a multi-dimensional percentile filter + prewitt + rank_filter - Calculates a multi-dimensional rank filter + sobel + uniform_filter - Multi-dimensional uniform filter + uniform_filter1d - 1-D uniform filter along the given axis + +Fourier filters +=============== + +.. autosummary:: + :toctree: generated/ + + fourier_ellipsoid + fourier_gaussian + fourier_shift + fourier_uniform + +Interpolation +============= + +.. autosummary:: + :toctree: generated/ + + affine_transform - Apply an affine transformation + geometric_transform - Apply an arbritrary geometric transform + map_coordinates - Map input array to new coordinates by interpolation + rotate - Rotate an array + shift - Shift an array + spline_filter + spline_filter1d + zoom - Zoom an array + +Measurements +============ + +.. autosummary:: + :toctree: generated/ + + center_of_mass - The center of mass of the values of an array at labels + extrema - Min's and max's of an array at labels, with their positions + find_objects - Find objects in a labeled array + histogram - Histogram of the values of an array, optionally at labels + label - Label features in an array + labeled_comprehension + maximum + maximum_position + mean - Mean of the values of an array at labels + median + minimum + minimum_position + standard_deviation - Standard deviation of an n-D image array + sum - Sum of the values of the array + variance - Variance of the values of an n-D image array + watershed_ift + +Morphology +========== + +.. autosummary:: + :toctree: generated/ + + binary_closing + binary_dilation + binary_erosion + binary_fill_holes + binary_hit_or_miss + binary_opening + binary_propagation + black_tophat + distance_transform_bf + distance_transform_cdt + distance_transform_edt + generate_binary_structure + grey_closing + grey_dilation + grey_erosion + grey_opening + iterate_structure + morphological_gradient + morphological_laplace + white_tophat + +Utility +======= + +.. autosummary:: + :toctree: generated/ + + imread - Load an image from a file + +""" + +# Copyright (C) 2003-2005 Peter J. Verveer +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# +# 3. The name of the author may not be used to endorse or promote +# products derived from this software without specific prior +# written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS +# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from __future__ import division, print_function, absolute_import + +from .filters import * +from .fourier import * +from .interpolation import * +from .measurements import * +from .morphology import * +from .io import * + +__version__ = '2.0' + +__all__ = [s for s in dir() if not s.startswith('_')] + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/ndimage/__init__.pyc new file mode 100644 index 0000000..55305ed Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/ndimage/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/_ctest.so b/project/venv/lib/python2.7/site-packages/scipy/ndimage/_ctest.so new file mode 100755 index 0000000..5e76abf Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/ndimage/_ctest.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/_ctest_oldapi.so b/project/venv/lib/python2.7/site-packages/scipy/ndimage/_ctest_oldapi.so new file mode 100755 index 0000000..4f25b48 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/ndimage/_ctest_oldapi.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/_cytest.so b/project/venv/lib/python2.7/site-packages/scipy/ndimage/_cytest.so new file mode 100755 index 0000000..441d89a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/ndimage/_cytest.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/_nd_image.so b/project/venv/lib/python2.7/site-packages/scipy/ndimage/_nd_image.so new file mode 100755 index 0000000..654052e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/ndimage/_nd_image.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/_ni_docstrings.py b/project/venv/lib/python2.7/site-packages/scipy/ndimage/_ni_docstrings.py new file mode 100644 index 0000000..ec7572c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/ndimage/_ni_docstrings.py @@ -0,0 +1,130 @@ +"""Docstring components common to several ndimage functions.""" +from __future__ import division, print_function, absolute_import + +from scipy.misc import doccer + +__all__ = ['docfiller'] + + +_input_doc = ( +"""input : array_like + The input array.""") +_axis_doc = ( +"""axis : int, optional + The axis of `input` along which to calculate. Default is -1.""") +_output_doc = ( +"""output : array or dtype, optional + The array in which to place the output, or the dtype of the + returned array. By default an array of the same dtype as input + will be created.""") +_size_foot_doc = ( +"""size : scalar or tuple, optional + See footprint, below. Ignored if footprint is given. +footprint : array, optional + Either `size` or `footprint` must be defined. `size` gives + the shape that is taken from the input array, at every element + position, to define the input to the filter function. + `footprint` is a boolean array that specifies (implicitly) a + shape, but also which of the elements within this shape will get + passed to the filter function. Thus ``size=(n,m)`` is equivalent + to ``footprint=np.ones((n,m))``. We adjust `size` to the number + of dimensions of the input array, so that, if the input array is + shape (10,10,10), and `size` is 2, then the actual size used is + (2,2,2). When `footprint` is given, `size` is ignored.""") +_mode_doc = ( +"""mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional + The `mode` parameter determines how the input array is extended + beyond its boundaries. Default is 'reflect'. Behavior for each valid + value is as follows: + + 'reflect' (`d c b a | a b c d | d c b a`) + The input is extended by reflecting about the edge of the last + pixel. + + 'constant' (`k k k k | a b c d | k k k k`) + The input is extended by filling all values beyond the edge with + the same constant value, defined by the `cval` parameter. + + 'nearest' (`a a a a | a b c d | d d d d`) + The input is extended by replicating the last pixel. + + 'mirror' (`d c b | a b c d | c b a`) + The input is extended by reflecting about the center of the last + pixel. + + 'wrap' (`a b c d | a b c d | a b c d`) + The input is extended by wrapping around to the opposite edge.""") +_mode_multiple_doc = ( +"""mode : str or sequence, optional + The `mode` parameter determines how the input array is extended + when the filter overlaps a border. By passing a sequence of modes + with length equal to the number of dimensions of the input array, + different modes can be specified along each axis. Default value is + 'reflect'. The valid values and their behavior is as follows: + + 'reflect' (`d c b a | a b c d | d c b a`) + The input is extended by reflecting about the edge of the last + pixel. + + 'constant' (`k k k k | a b c d | k k k k`) + The input is extended by filling all values beyond the edge with + the same constant value, defined by the `cval` parameter. + + 'nearest' (`a a a a | a b c d | d d d d`) + The input is extended by replicating the last pixel. + + 'mirror' (`d c b | a b c d | c b a`) + The input is extended by reflecting about the center of the last + pixel. + + 'wrap' (`a b c d | a b c d | a b c d`) + The input is extended by wrapping around to the opposite edge.""") +_cval_doc = ( +"""cval : scalar, optional + Value to fill past edges of input if `mode` is 'constant'. Default + is 0.0.""") +_origin_doc = ( +"""origin : int, optional + Controls the placement of the filter on the input array's pixels. + A value of 0 (the default) centers the filter over the pixel, with + positive values shifting the filter to the left, and negative ones + to the right.""") +_origin_multiple_doc = ( +"""origin : int or sequence, optional + Controls the placement of the filter on the input array's pixels. + A value of 0 (the default) centers the filter over the pixel, with + positive values shifting the filter to the left, and negative ones + to the right. By passing a sequence of origins with length equal to + the number of dimensions of the input array, different shifts can + be specified along each axis.""") +_extra_arguments_doc = ( +"""extra_arguments : sequence, optional + Sequence of extra positional arguments to pass to passed function.""") +_extra_keywords_doc = ( +"""extra_keywords : dict, optional + dict of extra keyword arguments to pass to passed function.""") +_prefilter_doc = ( +"""prefilter : bool, optional + Determines if the input array is prefiltered with `spline_filter` + before interpolation. The default is True, which will create a + temporary `float64` array of filtered values if `order > 1`. If + setting this to False, the output will be slightly blurred if + `order > 1`, unless the input is prefiltered, i.e. it is the result + of calling `spline_filter` on the original input.""") + +docdict = { + 'input': _input_doc, + 'axis': _axis_doc, + 'output': _output_doc, + 'size_foot': _size_foot_doc, + 'mode': _mode_doc, + 'mode_multiple': _mode_multiple_doc, + 'cval': _cval_doc, + 'origin': _origin_doc, + 'origin_multiple': _origin_multiple_doc, + 'extra_arguments': _extra_arguments_doc, + 'extra_keywords': _extra_keywords_doc, + 'prefilter': _prefilter_doc + } + +docfiller = doccer.filldoc(docdict) diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/_ni_docstrings.pyc b/project/venv/lib/python2.7/site-packages/scipy/ndimage/_ni_docstrings.pyc new file mode 100644 index 0000000..c3b83ea Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/ndimage/_ni_docstrings.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/_ni_label.so b/project/venv/lib/python2.7/site-packages/scipy/ndimage/_ni_label.so new file mode 100755 index 0000000..e05b354 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/ndimage/_ni_label.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/_ni_support.py b/project/venv/lib/python2.7/site-packages/scipy/ndimage/_ni_support.py new file mode 100644 index 0000000..202e386 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/ndimage/_ni_support.py @@ -0,0 +1,91 @@ +# Copyright (C) 2003-2005 Peter J. Verveer +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# +# 3. The name of the author may not be used to endorse or promote +# products derived from this software without specific prior +# written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS +# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from __future__ import division, print_function, absolute_import + +import numpy + +from scipy._lib.six import string_types + + +def _extend_mode_to_code(mode): + """Convert an extension mode to the corresponding integer code. + """ + if mode == 'nearest': + return 0 + elif mode == 'wrap': + return 1 + elif mode == 'reflect': + return 2 + elif mode == 'mirror': + return 3 + elif mode == 'constant': + return 4 + else: + raise RuntimeError('boundary mode not supported') + + +def _normalize_sequence(input, rank): + """If input is a scalar, create a sequence of length equal to the + rank by duplicating the input. If input is a sequence, + check if its length is equal to the length of array. + """ + is_str = isinstance(input, string_types) + if hasattr(input, '__iter__') and not is_str: + normalized = list(input) + if len(normalized) != rank: + err = "sequence argument must have length equal to input rank" + raise RuntimeError(err) + else: + normalized = [input] * rank + return normalized + + +def _get_output(output, input, shape=None): + if shape is None: + shape = input.shape + if output is None: + output = numpy.zeros(shape, dtype=input.dtype.name) + elif type(output) in [type(type), type(numpy.zeros((4,)).dtype)]: + output = numpy.zeros(shape, dtype=output) + elif isinstance(output, string_types): + output = numpy.typeDict[output] + output = numpy.zeros(shape, dtype=output) + elif output.shape != shape: + raise RuntimeError("output shape not correct") + return output + + +def _check_axis(axis, rank): + if axis < 0: + axis += rank + if axis < 0 or axis >= rank: + raise ValueError('invalid axis') + return axis diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/_ni_support.pyc b/project/venv/lib/python2.7/site-packages/scipy/ndimage/_ni_support.pyc new file mode 100644 index 0000000..663a41e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/ndimage/_ni_support.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/filters.py b/project/venv/lib/python2.7/site-packages/scipy/ndimage/filters.py new file mode 100644 index 0000000..5687218 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/ndimage/filters.py @@ -0,0 +1,1438 @@ +# Copyright (C) 2003-2005 Peter J. Verveer +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# +# 3. The name of the author may not be used to endorse or promote +# products derived from this software without specific prior +# written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS +# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from __future__ import division, print_function, absolute_import +import warnings +import numbers +import numpy +import operator +from . import _ni_support +from . import _nd_image +from . import _ni_docstrings + +__all__ = ['correlate1d', 'convolve1d', 'gaussian_filter1d', 'gaussian_filter', + 'prewitt', 'sobel', 'generic_laplace', 'laplace', + 'gaussian_laplace', 'generic_gradient_magnitude', + 'gaussian_gradient_magnitude', 'correlate', 'convolve', + 'uniform_filter1d', 'uniform_filter', 'minimum_filter1d', + 'maximum_filter1d', 'minimum_filter', 'maximum_filter', + 'rank_filter', 'median_filter', 'percentile_filter', + 'generic_filter1d', 'generic_filter'] + + +def _invalid_origin(origin, lenw): + return (origin < -(lenw // 2)) or (origin > (lenw - 1) // 2) + + +@_ni_docstrings.docfiller +def correlate1d(input, weights, axis=-1, output=None, mode="reflect", + cval=0.0, origin=0): + """Calculate a one-dimensional correlation along the given axis. + + The lines of the array along the given axis are correlated with the + given weights. + + Parameters + ---------- + %(input)s + weights : array + One-dimensional sequence of numbers. + %(axis)s + %(output)s + %(mode)s + %(cval)s + %(origin)s + + Examples + -------- + >>> from scipy.ndimage import correlate1d + >>> correlate1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3]) + array([ 8, 26, 8, 12, 7, 28, 36, 9]) + """ + input = numpy.asarray(input) + if numpy.iscomplexobj(input): + raise TypeError('Complex type not supported') + output = _ni_support._get_output(output, input) + weights = numpy.asarray(weights, dtype=numpy.float64) + if weights.ndim != 1 or weights.shape[0] < 1: + raise RuntimeError('no filter weights given') + if not weights.flags.contiguous: + weights = weights.copy() + axis = _ni_support._check_axis(axis, input.ndim) + if _invalid_origin(origin, len(weights)): + raise ValueError('Invalid origin; origin must satisfy ' + '-(len(weights) // 2) <= origin <= ' + '(len(weights)-1) // 2') + mode = _ni_support._extend_mode_to_code(mode) + _nd_image.correlate1d(input, weights, axis, output, mode, cval, + origin) + return output + + +@_ni_docstrings.docfiller +def convolve1d(input, weights, axis=-1, output=None, mode="reflect", + cval=0.0, origin=0): + """Calculate a one-dimensional convolution along the given axis. + + The lines of the array along the given axis are convolved with the + given weights. + + Parameters + ---------- + %(input)s + weights : ndarray + One-dimensional sequence of numbers. + %(axis)s + %(output)s + %(mode)s + %(cval)s + %(origin)s + + Returns + ------- + convolve1d : ndarray + Convolved array with same shape as input + + Examples + -------- + >>> from scipy.ndimage import convolve1d + >>> convolve1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3]) + array([14, 24, 4, 13, 12, 36, 27, 0]) + """ + weights = weights[::-1] + origin = -origin + if not len(weights) & 1: + origin -= 1 + return correlate1d(input, weights, axis, output, mode, cval, origin) + + +def _gaussian_kernel1d(sigma, order, radius): + """ + Computes a 1D Gaussian convolution kernel. + """ + if order < 0: + raise ValueError('order must be non-negative') + p = numpy.polynomial.Polynomial([0, 0, -0.5 / (sigma * sigma)]) + x = numpy.arange(-radius, radius + 1) + phi_x = numpy.exp(p(x), dtype=numpy.double) + phi_x /= phi_x.sum() + if order > 0: + q = numpy.polynomial.Polynomial([1]) + p_deriv = p.deriv() + for _ in range(order): + # f(x) = q(x) * phi(x) = q(x) * exp(p(x)) + # f'(x) = (q'(x) + q(x) * p'(x)) * phi(x) + q = q.deriv() + q * p_deriv + phi_x *= q(x) + return phi_x + + +@_ni_docstrings.docfiller +def gaussian_filter1d(input, sigma, axis=-1, order=0, output=None, + mode="reflect", cval=0.0, truncate=4.0): + """One-dimensional Gaussian filter. + + Parameters + ---------- + %(input)s + sigma : scalar + standard deviation for Gaussian kernel + %(axis)s + order : int, optional + An order of 0 corresponds to convolution with a Gaussian + kernel. A positive order corresponds to convolution with + that derivative of a Gaussian. + %(output)s + %(mode)s + %(cval)s + truncate : float, optional + Truncate the filter at this many standard deviations. + Default is 4.0. + + Returns + ------- + gaussian_filter1d : ndarray + + Examples + -------- + >>> from scipy.ndimage import gaussian_filter1d + >>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 1) + array([ 1.42704095, 2.06782203, 3. , 3.93217797, 4.57295905]) + >>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 4) + array([ 2.91948343, 2.95023502, 3. , 3.04976498, 3.08051657]) + >>> import matplotlib.pyplot as plt + >>> np.random.seed(280490) + >>> x = np.random.randn(101).cumsum() + >>> y3 = gaussian_filter1d(x, 3) + >>> y6 = gaussian_filter1d(x, 6) + >>> plt.plot(x, 'k', label='original data') + >>> plt.plot(y3, '--', label='filtered, sigma=3') + >>> plt.plot(y6, ':', label='filtered, sigma=6') + >>> plt.legend() + >>> plt.grid() + >>> plt.show() + """ + sd = float(sigma) + # make the radius of the filter equal to truncate standard deviations + lw = int(truncate * sd + 0.5) + # Since we are calling correlate, not convolve, revert the kernel + weights = _gaussian_kernel1d(sigma, order, lw)[::-1] + return correlate1d(input, weights, axis, output, mode, cval, 0) + + +@_ni_docstrings.docfiller +def gaussian_filter(input, sigma, order=0, output=None, + mode="reflect", cval=0.0, truncate=4.0): + """Multidimensional Gaussian filter. + + Parameters + ---------- + %(input)s + sigma : scalar or sequence of scalars + Standard deviation for Gaussian kernel. The standard + deviations of the Gaussian filter are given for each axis as a + sequence, or as a single number, in which case it is equal for + all axes. + order : int or sequence of ints, optional + The order of the filter along each axis is given as a sequence + of integers, or as a single number. An order of 0 corresponds + to convolution with a Gaussian kernel. A positive order + corresponds to convolution with that derivative of a Gaussian. + %(output)s + %(mode_multiple)s + %(cval)s + truncate : float + Truncate the filter at this many standard deviations. + Default is 4.0. + + Returns + ------- + gaussian_filter : ndarray + Returned array of same shape as `input`. + + Notes + ----- + The multidimensional filter is implemented as a sequence of + one-dimensional convolution filters. The intermediate arrays are + stored in the same data type as the output. Therefore, for output + types with a limited precision, the results may be imprecise + because intermediate results may be stored with insufficient + precision. + + Examples + -------- + >>> from scipy.ndimage import gaussian_filter + >>> a = np.arange(50, step=2).reshape((5,5)) + >>> a + array([[ 0, 2, 4, 6, 8], + [10, 12, 14, 16, 18], + [20, 22, 24, 26, 28], + [30, 32, 34, 36, 38], + [40, 42, 44, 46, 48]]) + >>> gaussian_filter(a, sigma=1) + array([[ 4, 6, 8, 9, 11], + [10, 12, 14, 15, 17], + [20, 22, 24, 25, 27], + [29, 31, 33, 34, 36], + [35, 37, 39, 40, 42]]) + + >>> from scipy import misc + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> plt.gray() # show the filtered result in grayscale + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + >>> ascent = misc.ascent() + >>> result = gaussian_filter(ascent, sigma=5) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result) + >>> plt.show() + """ + input = numpy.asarray(input) + output = _ni_support._get_output(output, input) + orders = _ni_support._normalize_sequence(order, input.ndim) + sigmas = _ni_support._normalize_sequence(sigma, input.ndim) + modes = _ni_support._normalize_sequence(mode, input.ndim) + axes = list(range(input.ndim)) + axes = [(axes[ii], sigmas[ii], orders[ii], modes[ii]) + for ii in range(len(axes)) if sigmas[ii] > 1e-15] + if len(axes) > 0: + for axis, sigma, order, mode in axes: + gaussian_filter1d(input, sigma, axis, order, output, + mode, cval, truncate) + input = output + else: + output[...] = input[...] + return output + + +@_ni_docstrings.docfiller +def prewitt(input, axis=-1, output=None, mode="reflect", cval=0.0): + """Calculate a Prewitt filter. + + Parameters + ---------- + %(input)s + %(axis)s + %(output)s + %(mode_multiple)s + %(cval)s + + Examples + -------- + >>> from scipy import ndimage, misc + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> plt.gray() # show the filtered result in grayscale + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + >>> ascent = misc.ascent() + >>> result = ndimage.prewitt(ascent) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result) + >>> plt.show() + """ + input = numpy.asarray(input) + axis = _ni_support._check_axis(axis, input.ndim) + output = _ni_support._get_output(output, input) + modes = _ni_support._normalize_sequence(mode, input.ndim) + correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0) + axes = [ii for ii in range(input.ndim) if ii != axis] + for ii in axes: + correlate1d(output, [1, 1, 1], ii, output, modes[ii], cval, 0,) + return output + + +@_ni_docstrings.docfiller +def sobel(input, axis=-1, output=None, mode="reflect", cval=0.0): + """Calculate a Sobel filter. + + Parameters + ---------- + %(input)s + %(axis)s + %(output)s + %(mode_multiple)s + %(cval)s + + Examples + -------- + >>> from scipy import ndimage, misc + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> plt.gray() # show the filtered result in grayscale + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + >>> ascent = misc.ascent() + >>> result = ndimage.sobel(ascent) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result) + >>> plt.show() + """ + input = numpy.asarray(input) + axis = _ni_support._check_axis(axis, input.ndim) + output = _ni_support._get_output(output, input) + modes = _ni_support._normalize_sequence(mode, input.ndim) + correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0) + axes = [ii for ii in range(input.ndim) if ii != axis] + for ii in axes: + correlate1d(output, [1, 2, 1], ii, output, modes[ii], cval, 0) + return output + + +@_ni_docstrings.docfiller +def generic_laplace(input, derivative2, output=None, mode="reflect", + cval=0.0, + extra_arguments=(), + extra_keywords=None): + """ + N-dimensional Laplace filter using a provided second derivative function. + + Parameters + ---------- + %(input)s + derivative2 : callable + Callable with the following signature:: + + derivative2(input, axis, output, mode, cval, + *extra_arguments, **extra_keywords) + + See `extra_arguments`, `extra_keywords` below. + %(output)s + %(mode_multiple)s + %(cval)s + %(extra_keywords)s + %(extra_arguments)s + """ + if extra_keywords is None: + extra_keywords = {} + input = numpy.asarray(input) + output = _ni_support._get_output(output, input) + axes = list(range(input.ndim)) + if len(axes) > 0: + modes = _ni_support._normalize_sequence(mode, len(axes)) + derivative2(input, axes[0], output, modes[0], cval, + *extra_arguments, **extra_keywords) + for ii in range(1, len(axes)): + tmp = derivative2(input, axes[ii], output.dtype, modes[ii], cval, + *extra_arguments, **extra_keywords) + output += tmp + else: + output[...] = input[...] + return output + + +@_ni_docstrings.docfiller +def laplace(input, output=None, mode="reflect", cval=0.0): + """N-dimensional Laplace filter based on approximate second derivatives. + + Parameters + ---------- + %(input)s + %(output)s + %(mode_multiple)s + %(cval)s + + Examples + -------- + >>> from scipy import ndimage, misc + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> plt.gray() # show the filtered result in grayscale + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + >>> ascent = misc.ascent() + >>> result = ndimage.laplace(ascent) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result) + >>> plt.show() + """ + def derivative2(input, axis, output, mode, cval): + return correlate1d(input, [1, -2, 1], axis, output, mode, cval, 0) + return generic_laplace(input, derivative2, output, mode, cval) + + +@_ni_docstrings.docfiller +def gaussian_laplace(input, sigma, output=None, mode="reflect", + cval=0.0, **kwargs): + """Multidimensional Laplace filter using gaussian second derivatives. + + Parameters + ---------- + %(input)s + sigma : scalar or sequence of scalars + The standard deviations of the Gaussian filter are given for + each axis as a sequence, or as a single number, in which case + it is equal for all axes. + %(output)s + %(mode_multiple)s + %(cval)s + Extra keyword arguments will be passed to gaussian_filter(). + + Examples + -------- + >>> from scipy import ndimage, misc + >>> import matplotlib.pyplot as plt + >>> ascent = misc.ascent() + + >>> fig = plt.figure() + >>> plt.gray() # show the filtered result in grayscale + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + + >>> result = ndimage.gaussian_laplace(ascent, sigma=1) + >>> ax1.imshow(result) + + >>> result = ndimage.gaussian_laplace(ascent, sigma=3) + >>> ax2.imshow(result) + >>> plt.show() + """ + input = numpy.asarray(input) + + def derivative2(input, axis, output, mode, cval, sigma, **kwargs): + order = [0] * input.ndim + order[axis] = 2 + return gaussian_filter(input, sigma, order, output, mode, cval, + **kwargs) + + return generic_laplace(input, derivative2, output, mode, cval, + extra_arguments=(sigma,), + extra_keywords=kwargs) + + +@_ni_docstrings.docfiller +def generic_gradient_magnitude(input, derivative, output=None, + mode="reflect", cval=0.0, + extra_arguments=(), extra_keywords=None): + """Gradient magnitude using a provided gradient function. + + Parameters + ---------- + %(input)s + derivative : callable + Callable with the following signature:: + + derivative(input, axis, output, mode, cval, + *extra_arguments, **extra_keywords) + + See `extra_arguments`, `extra_keywords` below. + `derivative` can assume that `input` and `output` are ndarrays. + Note that the output from `derivative` is modified inplace; + be careful to copy important inputs before returning them. + %(output)s + %(mode_multiple)s + %(cval)s + %(extra_keywords)s + %(extra_arguments)s + """ + if extra_keywords is None: + extra_keywords = {} + input = numpy.asarray(input) + output = _ni_support._get_output(output, input) + axes = list(range(input.ndim)) + if len(axes) > 0: + modes = _ni_support._normalize_sequence(mode, len(axes)) + derivative(input, axes[0], output, modes[0], cval, + *extra_arguments, **extra_keywords) + numpy.multiply(output, output, output) + for ii in range(1, len(axes)): + tmp = derivative(input, axes[ii], output.dtype, modes[ii], cval, + *extra_arguments, **extra_keywords) + numpy.multiply(tmp, tmp, tmp) + output += tmp + # This allows the sqrt to work with a different default casting + numpy.sqrt(output, output, casting='unsafe') + else: + output[...] = input[...] + return output + + +@_ni_docstrings.docfiller +def gaussian_gradient_magnitude(input, sigma, output=None, + mode="reflect", cval=0.0, **kwargs): + """Multidimensional gradient magnitude using Gaussian derivatives. + + Parameters + ---------- + %(input)s + sigma : scalar or sequence of scalars + The standard deviations of the Gaussian filter are given for + each axis as a sequence, or as a single number, in which case + it is equal for all axes.. + %(output)s + %(mode_multiple)s + %(cval)s + Extra keyword arguments will be passed to gaussian_filter(). + + Returns + ------- + gaussian_gradient_magnitude : ndarray + Filtered array. Has the same shape as `input`. + + Examples + -------- + >>> from scipy import ndimage, misc + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> plt.gray() # show the filtered result in grayscale + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + >>> ascent = misc.ascent() + >>> result = ndimage.gaussian_gradient_magnitude(ascent, sigma=5) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result) + >>> plt.show() + """ + input = numpy.asarray(input) + + def derivative(input, axis, output, mode, cval, sigma, **kwargs): + order = [0] * input.ndim + order[axis] = 1 + return gaussian_filter(input, sigma, order, output, mode, + cval, **kwargs) + + return generic_gradient_magnitude(input, derivative, output, mode, + cval, extra_arguments=(sigma,), + extra_keywords=kwargs) + + +def _correlate_or_convolve(input, weights, output, mode, cval, origin, + convolution): + input = numpy.asarray(input) + if numpy.iscomplexobj(input): + raise TypeError('Complex type not supported') + origins = _ni_support._normalize_sequence(origin, input.ndim) + weights = numpy.asarray(weights, dtype=numpy.float64) + wshape = [ii for ii in weights.shape if ii > 0] + if len(wshape) != input.ndim: + raise RuntimeError('filter weights array has incorrect shape.') + if convolution: + weights = weights[tuple([slice(None, None, -1)] * weights.ndim)] + for ii in range(len(origins)): + origins[ii] = -origins[ii] + if not weights.shape[ii] & 1: + origins[ii] -= 1 + for origin, lenw in zip(origins, wshape): + if _invalid_origin(origin, lenw): + raise ValueError('Invalid origin; origin must satisfy ' + '-(weights.shape[k] // 2) <= origin[k] <= ' + '(weights.shape[k]-1) // 2') + + if not weights.flags.contiguous: + weights = weights.copy() + output = _ni_support._get_output(output, input) + mode = _ni_support._extend_mode_to_code(mode) + _nd_image.correlate(input, weights, output, mode, cval, origins) + return output + + +@_ni_docstrings.docfiller +def correlate(input, weights, output=None, mode='reflect', cval=0.0, + origin=0): + """ + Multi-dimensional correlation. + + The array is correlated with the given kernel. + + Parameters + ---------- + %(input)s + weights : ndarray + array of weights, same number of dimensions as input + %(output)s + %(mode_multiple)s + %(cval)s + %(origin_multiple)s + + See Also + -------- + convolve : Convolve an image with a kernel. + """ + return _correlate_or_convolve(input, weights, output, mode, cval, + origin, False) + + +@_ni_docstrings.docfiller +def convolve(input, weights, output=None, mode='reflect', cval=0.0, + origin=0): + """ + Multidimensional convolution. + + The array is convolved with the given kernel. + + Parameters + ---------- + %(input)s + weights : array_like + Array of weights, same number of dimensions as input + %(output)s + %(mode_multiple)s + cval : scalar, optional + Value to fill past edges of input if `mode` is 'constant'. Default + is 0.0 + %(origin_multiple)s + + Returns + ------- + result : ndarray + The result of convolution of `input` with `weights`. + + See Also + -------- + correlate : Correlate an image with a kernel. + + Notes + ----- + Each value in result is :math:`C_i = \\sum_j{I_{i+k-j} W_j}`, where + W is the `weights` kernel, + j is the n-D spatial index over :math:`W`, + I is the `input` and k is the coordinate of the center of + W, specified by `origin` in the input parameters. + + Examples + -------- + Perhaps the simplest case to understand is ``mode='constant', cval=0.0``, + because in this case borders (i.e. where the `weights` kernel, centered + on any one value, extends beyond an edge of `input`) are treated as zeros. + + >>> a = np.array([[1, 2, 0, 0], + ... [5, 3, 0, 4], + ... [0, 0, 0, 7], + ... [9, 3, 0, 0]]) + >>> k = np.array([[1,1,1],[1,1,0],[1,0,0]]) + >>> from scipy import ndimage + >>> ndimage.convolve(a, k, mode='constant', cval=0.0) + array([[11, 10, 7, 4], + [10, 3, 11, 11], + [15, 12, 14, 7], + [12, 3, 7, 0]]) + + Setting ``cval=1.0`` is equivalent to padding the outer edge of `input` + with 1.0's (and then extracting only the original region of the result). + + >>> ndimage.convolve(a, k, mode='constant', cval=1.0) + array([[13, 11, 8, 7], + [11, 3, 11, 14], + [16, 12, 14, 10], + [15, 6, 10, 5]]) + + With ``mode='reflect'`` (the default), outer values are reflected at the + edge of `input` to fill in missing values. + + >>> b = np.array([[2, 0, 0], + ... [1, 0, 0], + ... [0, 0, 0]]) + >>> k = np.array([[0,1,0], [0,1,0], [0,1,0]]) + >>> ndimage.convolve(b, k, mode='reflect') + array([[5, 0, 0], + [3, 0, 0], + [1, 0, 0]]) + + This includes diagonally at the corners. + + >>> k = np.array([[1,0,0],[0,1,0],[0,0,1]]) + >>> ndimage.convolve(b, k) + array([[4, 2, 0], + [3, 2, 0], + [1, 1, 0]]) + + With ``mode='nearest'``, the single nearest value in to an edge in + `input` is repeated as many times as needed to match the overlapping + `weights`. + + >>> c = np.array([[2, 0, 1], + ... [1, 0, 0], + ... [0, 0, 0]]) + >>> k = np.array([[0, 1, 0], + ... [0, 1, 0], + ... [0, 1, 0], + ... [0, 1, 0], + ... [0, 1, 0]]) + >>> ndimage.convolve(c, k, mode='nearest') + array([[7, 0, 3], + [5, 0, 2], + [3, 0, 1]]) + + """ + return _correlate_or_convolve(input, weights, output, mode, cval, + origin, True) + + +@_ni_docstrings.docfiller +def uniform_filter1d(input, size, axis=-1, output=None, + mode="reflect", cval=0.0, origin=0): + """Calculate a one-dimensional uniform filter along the given axis. + + The lines of the array along the given axis are filtered with a + uniform filter of given size. + + Parameters + ---------- + %(input)s + size : int + length of uniform filter + %(axis)s + %(output)s + %(mode)s + %(cval)s + %(origin)s + + Examples + -------- + >>> from scipy.ndimage import uniform_filter1d + >>> uniform_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3) + array([4, 3, 4, 1, 4, 6, 6, 3]) + """ + input = numpy.asarray(input) + if numpy.iscomplexobj(input): + raise TypeError('Complex type not supported') + axis = _ni_support._check_axis(axis, input.ndim) + if size < 1: + raise RuntimeError('incorrect filter size') + output = _ni_support._get_output(output, input) + if (size // 2 + origin < 0) or (size // 2 + origin >= size): + raise ValueError('invalid origin') + mode = _ni_support._extend_mode_to_code(mode) + _nd_image.uniform_filter1d(input, size, axis, output, mode, cval, + origin) + return output + + +@_ni_docstrings.docfiller +def uniform_filter(input, size=3, output=None, mode="reflect", + cval=0.0, origin=0): + """Multi-dimensional uniform filter. + + Parameters + ---------- + %(input)s + size : int or sequence of ints, optional + The sizes of the uniform filter are given for each axis as a + sequence, or as a single number, in which case the size is + equal for all axes. + %(output)s + %(mode_multiple)s + %(cval)s + %(origin_multiple)s + + Returns + ------- + uniform_filter : ndarray + Filtered array. Has the same shape as `input`. + + Notes + ----- + The multi-dimensional filter is implemented as a sequence of + one-dimensional uniform filters. The intermediate arrays are stored + in the same data type as the output. Therefore, for output types + with a limited precision, the results may be imprecise because + intermediate results may be stored with insufficient precision. + + Examples + -------- + >>> from scipy import ndimage, misc + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> plt.gray() # show the filtered result in grayscale + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + >>> ascent = misc.ascent() + >>> result = ndimage.uniform_filter(ascent, size=20) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result) + >>> plt.show() + """ + input = numpy.asarray(input) + output = _ni_support._get_output(output, input) + sizes = _ni_support._normalize_sequence(size, input.ndim) + origins = _ni_support._normalize_sequence(origin, input.ndim) + modes = _ni_support._normalize_sequence(mode, input.ndim) + axes = list(range(input.ndim)) + axes = [(axes[ii], sizes[ii], origins[ii], modes[ii]) + for ii in range(len(axes)) if sizes[ii] > 1] + if len(axes) > 0: + for axis, size, origin, mode in axes: + uniform_filter1d(input, int(size), axis, output, mode, + cval, origin) + input = output + else: + output[...] = input[...] + return output + + +@_ni_docstrings.docfiller +def minimum_filter1d(input, size, axis=-1, output=None, + mode="reflect", cval=0.0, origin=0): + """Calculate a one-dimensional minimum filter along the given axis. + + The lines of the array along the given axis are filtered with a + minimum filter of given size. + + Parameters + ---------- + %(input)s + size : int + length along which to calculate 1D minimum + %(axis)s + %(output)s + %(mode)s + %(cval)s + %(origin)s + + Notes + ----- + This function implements the MINLIST algorithm [1]_, as described by + Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being + the `input` length, regardless of filter size. + + References + ---------- + .. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777 + .. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html + + + Examples + -------- + >>> from scipy.ndimage import minimum_filter1d + >>> minimum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3) + array([2, 0, 0, 0, 1, 1, 0, 0]) + """ + input = numpy.asarray(input) + if numpy.iscomplexobj(input): + raise TypeError('Complex type not supported') + axis = _ni_support._check_axis(axis, input.ndim) + if size < 1: + raise RuntimeError('incorrect filter size') + output = _ni_support._get_output(output, input) + if (size // 2 + origin < 0) or (size // 2 + origin >= size): + raise ValueError('invalid origin') + mode = _ni_support._extend_mode_to_code(mode) + _nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval, + origin, 1) + return output + + +@_ni_docstrings.docfiller +def maximum_filter1d(input, size, axis=-1, output=None, + mode="reflect", cval=0.0, origin=0): + """Calculate a one-dimensional maximum filter along the given axis. + + The lines of the array along the given axis are filtered with a + maximum filter of given size. + + Parameters + ---------- + %(input)s + size : int + Length along which to calculate the 1-D maximum. + %(axis)s + %(output)s + %(mode)s + %(cval)s + %(origin)s + + Returns + ------- + maximum1d : ndarray, None + Maximum-filtered array with same shape as input. + None if `output` is not None + + Notes + ----- + This function implements the MAXLIST algorithm [1]_, as described by + Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being + the `input` length, regardless of filter size. + + References + ---------- + .. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777 + .. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html + + Examples + -------- + >>> from scipy.ndimage import maximum_filter1d + >>> maximum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3) + array([8, 8, 8, 4, 9, 9, 9, 9]) + """ + input = numpy.asarray(input) + if numpy.iscomplexobj(input): + raise TypeError('Complex type not supported') + axis = _ni_support._check_axis(axis, input.ndim) + if size < 1: + raise RuntimeError('incorrect filter size') + output = _ni_support._get_output(output, input) + if (size // 2 + origin < 0) or (size // 2 + origin >= size): + raise ValueError('invalid origin') + mode = _ni_support._extend_mode_to_code(mode) + _nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval, + origin, 0) + return output + + +def _min_or_max_filter(input, size, footprint, structure, output, mode, + cval, origin, minimum): + if (size is not None) and (footprint is not None): + warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=3) + if structure is None: + if footprint is None: + if size is None: + raise RuntimeError("no footprint provided") + separable = True + else: + footprint = numpy.asarray(footprint, dtype=bool) + if not footprint.any(): + raise ValueError("All-zero footprint is not supported.") + if footprint.all(): + size = footprint.shape + footprint = None + separable = True + else: + separable = False + else: + structure = numpy.asarray(structure, dtype=numpy.float64) + separable = False + if footprint is None: + footprint = numpy.ones(structure.shape, bool) + else: + footprint = numpy.asarray(footprint, dtype=bool) + input = numpy.asarray(input) + if numpy.iscomplexobj(input): + raise TypeError('Complex type not supported') + output = _ni_support._get_output(output, input) + origins = _ni_support._normalize_sequence(origin, input.ndim) + if separable: + sizes = _ni_support._normalize_sequence(size, input.ndim) + modes = _ni_support._normalize_sequence(mode, input.ndim) + axes = list(range(input.ndim)) + axes = [(axes[ii], sizes[ii], origins[ii], modes[ii]) + for ii in range(len(axes)) if sizes[ii] > 1] + if minimum: + filter_ = minimum_filter1d + else: + filter_ = maximum_filter1d + if len(axes) > 0: + for axis, size, origin, mode in axes: + filter_(input, int(size), axis, output, mode, cval, origin) + input = output + else: + output[...] = input[...] + else: + fshape = [ii for ii in footprint.shape if ii > 0] + if len(fshape) != input.ndim: + raise RuntimeError('footprint array has incorrect shape.') + for origin, lenf in zip(origins, fshape): + if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf): + raise ValueError('invalid origin') + if not footprint.flags.contiguous: + footprint = footprint.copy() + if structure is not None: + if len(structure.shape) != input.ndim: + raise RuntimeError('structure array has incorrect shape') + if not structure.flags.contiguous: + structure = structure.copy() + mode = _ni_support._extend_mode_to_code(mode) + _nd_image.min_or_max_filter(input, footprint, structure, output, + mode, cval, origins, minimum) + return output + + +@_ni_docstrings.docfiller +def minimum_filter(input, size=None, footprint=None, output=None, + mode="reflect", cval=0.0, origin=0): + """Calculate a multi-dimensional minimum filter. + + Parameters + ---------- + %(input)s + %(size_foot)s + %(output)s + %(mode_multiple)s + %(cval)s + %(origin_multiple)s + + Returns + ------- + minimum_filter : ndarray + Filtered array. Has the same shape as `input`. + + Examples + -------- + >>> from scipy import ndimage, misc + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> plt.gray() # show the filtered result in grayscale + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + >>> ascent = misc.ascent() + >>> result = ndimage.minimum_filter(ascent, size=20) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result) + >>> plt.show() + """ + return _min_or_max_filter(input, size, footprint, None, output, mode, + cval, origin, 1) + + +@_ni_docstrings.docfiller +def maximum_filter(input, size=None, footprint=None, output=None, + mode="reflect", cval=0.0, origin=0): + """Calculate a multi-dimensional maximum filter. + + Parameters + ---------- + %(input)s + %(size_foot)s + %(output)s + %(mode_multiple)s + %(cval)s + %(origin_multiple)s + + Returns + ------- + maximum_filter : ndarray + Filtered array. Has the same shape as `input`. + + Examples + -------- + >>> from scipy import ndimage, misc + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> plt.gray() # show the filtered result in grayscale + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + >>> ascent = misc.ascent() + >>> result = ndimage.maximum_filter(ascent, size=20) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result) + >>> plt.show() + """ + return _min_or_max_filter(input, size, footprint, None, output, mode, + cval, origin, 0) + + +@_ni_docstrings.docfiller +def _rank_filter(input, rank, size=None, footprint=None, output=None, + mode="reflect", cval=0.0, origin=0, operation='rank'): + if (size is not None) and (footprint is not None): + warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=3) + input = numpy.asarray(input) + if numpy.iscomplexobj(input): + raise TypeError('Complex type not supported') + origins = _ni_support._normalize_sequence(origin, input.ndim) + if footprint is None: + if size is None: + raise RuntimeError("no footprint or filter size provided") + sizes = _ni_support._normalize_sequence(size, input.ndim) + footprint = numpy.ones(sizes, dtype=bool) + else: + footprint = numpy.asarray(footprint, dtype=bool) + fshape = [ii for ii in footprint.shape if ii > 0] + if len(fshape) != input.ndim: + raise RuntimeError('filter footprint array has incorrect shape.') + for origin, lenf in zip(origins, fshape): + if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf): + raise ValueError('invalid origin') + if not footprint.flags.contiguous: + footprint = footprint.copy() + filter_size = numpy.where(footprint, 1, 0).sum() + if operation == 'median': + rank = filter_size // 2 + elif operation == 'percentile': + percentile = rank + if percentile < 0.0: + percentile += 100.0 + if percentile < 0 or percentile > 100: + raise RuntimeError('invalid percentile') + if percentile == 100.0: + rank = filter_size - 1 + else: + rank = int(float(filter_size) * percentile / 100.0) + if rank < 0: + rank += filter_size + if rank < 0 or rank >= filter_size: + raise RuntimeError('rank not within filter footprint size') + if rank == 0: + return minimum_filter(input, None, footprint, output, mode, cval, + origins) + elif rank == filter_size - 1: + return maximum_filter(input, None, footprint, output, mode, cval, + origins) + else: + output = _ni_support._get_output(output, input) + mode = _ni_support._extend_mode_to_code(mode) + _nd_image.rank_filter(input, rank, footprint, output, mode, cval, + origins) + return output + + +@_ni_docstrings.docfiller +def rank_filter(input, rank, size=None, footprint=None, output=None, + mode="reflect", cval=0.0, origin=0): + """Calculate a multi-dimensional rank filter. + + Parameters + ---------- + %(input)s + rank : int + The rank parameter may be less then zero, i.e., rank = -1 + indicates the largest element. + %(size_foot)s + %(output)s + %(mode_multiple)s + %(cval)s + %(origin_multiple)s + + Returns + ------- + rank_filter : ndarray + Filtered array. Has the same shape as `input`. + + Examples + -------- + >>> from scipy import ndimage, misc + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> plt.gray() # show the filtered result in grayscale + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + >>> ascent = misc.ascent() + >>> result = ndimage.rank_filter(ascent, rank=42, size=20) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result) + >>> plt.show() + """ + rank = operator.index(rank) + return _rank_filter(input, rank, size, footprint, output, mode, cval, + origin, 'rank') + + +@_ni_docstrings.docfiller +def median_filter(input, size=None, footprint=None, output=None, + mode="reflect", cval=0.0, origin=0): + """ + Calculate a multidimensional median filter. + + Parameters + ---------- + %(input)s + %(size_foot)s + %(output)s + %(mode_multiple)s + %(cval)s + %(origin_multiple)s + + Returns + ------- + median_filter : ndarray + Filtered array. Has the same shape as `input`. + + Examples + -------- + >>> from scipy import ndimage, misc + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> plt.gray() # show the filtered result in grayscale + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + >>> ascent = misc.ascent() + >>> result = ndimage.median_filter(ascent, size=20) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result) + >>> plt.show() + """ + return _rank_filter(input, 0, size, footprint, output, mode, cval, + origin, 'median') + + +@_ni_docstrings.docfiller +def percentile_filter(input, percentile, size=None, footprint=None, + output=None, mode="reflect", cval=0.0, origin=0): + """Calculate a multi-dimensional percentile filter. + + Parameters + ---------- + %(input)s + percentile : scalar + The percentile parameter may be less then zero, i.e., + percentile = -20 equals percentile = 80 + %(size_foot)s + %(output)s + %(mode_multiple)s + %(cval)s + %(origin_multiple)s + + Returns + ------- + percentile_filter : ndarray + Filtered array. Has the same shape as `input`. + + Examples + -------- + >>> from scipy import ndimage, misc + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> plt.gray() # show the filtered result in grayscale + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + >>> ascent = misc.ascent() + >>> result = ndimage.percentile_filter(ascent, percentile=20, size=20) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result) + >>> plt.show() + """ + return _rank_filter(input, percentile, size, footprint, output, mode, + cval, origin, 'percentile') + + +@_ni_docstrings.docfiller +def generic_filter1d(input, function, filter_size, axis=-1, + output=None, mode="reflect", cval=0.0, origin=0, + extra_arguments=(), extra_keywords=None): + """Calculate a one-dimensional filter along the given axis. + + `generic_filter1d` iterates over the lines of the array, calling the + given function at each line. The arguments of the line are the + input line, and the output line. The input and output lines are 1D + double arrays. The input line is extended appropriately according + to the filter size and origin. The output line must be modified + in-place with the result. + + Parameters + ---------- + %(input)s + function : {callable, scipy.LowLevelCallable} + Function to apply along given axis. + filter_size : scalar + Length of the filter. + %(axis)s + %(output)s + %(mode)s + %(cval)s + %(origin)s + %(extra_arguments)s + %(extra_keywords)s + + Notes + ----- + This function also accepts low-level callback functions with one of + the following signatures and wrapped in `scipy.LowLevelCallable`: + + .. code:: c + + int function(double *input_line, npy_intp input_length, + double *output_line, npy_intp output_length, + void *user_data) + int function(double *input_line, intptr_t input_length, + double *output_line, intptr_t output_length, + void *user_data) + + The calling function iterates over the lines of the input and output + arrays, calling the callback function at each line. The current line + is extended according to the border conditions set by the calling + function, and the result is copied into the array that is passed + through ``input_line``. The length of the input line (after extension) + is passed through ``input_length``. The callback function should apply + the filter and store the result in the array passed through + ``output_line``. The length of the output line is passed through + ``output_length``. ``user_data`` is the data pointer provided + to `scipy.LowLevelCallable` as-is. + + The callback function must return an integer error status that is zero + if something went wrong and one otherwise. If an error occurs, you should + normally set the python error status with an informative message + before returning, otherwise a default error message is set by the + calling function. + + In addition, some other low-level function pointer specifications + are accepted, but these are for backward compatibility only and should + not be used in new code. + + """ + if extra_keywords is None: + extra_keywords = {} + input = numpy.asarray(input) + if numpy.iscomplexobj(input): + raise TypeError('Complex type not supported') + output = _ni_support._get_output(output, input) + if filter_size < 1: + raise RuntimeError('invalid filter size') + axis = _ni_support._check_axis(axis, input.ndim) + if (filter_size // 2 + origin < 0) or (filter_size // 2 + origin >= + filter_size): + raise ValueError('invalid origin') + mode = _ni_support._extend_mode_to_code(mode) + _nd_image.generic_filter1d(input, function, filter_size, axis, output, + mode, cval, origin, extra_arguments, + extra_keywords) + return output + + +@_ni_docstrings.docfiller +def generic_filter(input, function, size=None, footprint=None, + output=None, mode="reflect", cval=0.0, origin=0, + extra_arguments=(), extra_keywords=None): + """Calculate a multi-dimensional filter using the given function. + + At each element the provided function is called. The input values + within the filter footprint at that element are passed to the function + as a 1D array of double values. + + Parameters + ---------- + %(input)s + function : {callable, scipy.LowLevelCallable} + Function to apply at each element. + %(size_foot)s + %(output)s + %(mode_multiple)s + %(cval)s + %(origin_multiple)s + %(extra_arguments)s + %(extra_keywords)s + + Notes + ----- + This function also accepts low-level callback functions with one of + the following signatures and wrapped in `scipy.LowLevelCallable`: + + .. code:: c + + int callback(double *buffer, npy_intp filter_size, + double *return_value, void *user_data) + int callback(double *buffer, intptr_t filter_size, + double *return_value, void *user_data) + + The calling function iterates over the elements of the input and + output arrays, calling the callback function at each element. The + elements within the footprint of the filter at the current element are + passed through the ``buffer`` parameter, and the number of elements + within the footprint through ``filter_size``. The calculated value is + returned in ``return_value``. ``user_data`` is the data pointer provided + to `scipy.LowLevelCallable` as-is. + + The callback function must return an integer error status that is zero + if something went wrong and one otherwise. If an error occurs, you should + normally set the python error status with an informative message + before returning, otherwise a default error message is set by the + calling function. + + In addition, some other low-level function pointer specifications + are accepted, but these are for backward compatibility only and should + not be used in new code. + + """ + if (size is not None) and (footprint is not None): + warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=2) + if extra_keywords is None: + extra_keywords = {} + input = numpy.asarray(input) + if numpy.iscomplexobj(input): + raise TypeError('Complex type not supported') + origins = _ni_support._normalize_sequence(origin, input.ndim) + if footprint is None: + if size is None: + raise RuntimeError("no footprint or filter size provided") + sizes = _ni_support._normalize_sequence(size, input.ndim) + footprint = numpy.ones(sizes, dtype=bool) + else: + footprint = numpy.asarray(footprint, dtype=bool) + fshape = [ii for ii in footprint.shape if ii > 0] + if len(fshape) != input.ndim: + raise RuntimeError('filter footprint array has incorrect shape.') + for origin, lenf in zip(origins, fshape): + if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf): + raise ValueError('invalid origin') + if not footprint.flags.contiguous: + footprint = footprint.copy() + output = _ni_support._get_output(output, input) + mode = _ni_support._extend_mode_to_code(mode) + _nd_image.generic_filter(input, function, footprint, output, mode, + cval, origins, extra_arguments, extra_keywords) + return output diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/filters.pyc b/project/venv/lib/python2.7/site-packages/scipy/ndimage/filters.pyc new file mode 100644 index 0000000..9efc1b2 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/ndimage/filters.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/fourier.py b/project/venv/lib/python2.7/site-packages/scipy/ndimage/fourier.py new file mode 100644 index 0000000..3d94347 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/ndimage/fourier.py @@ -0,0 +1,306 @@ +# Copyright (C) 2003-2005 Peter J. Verveer +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# +# 3. The name of the author may not be used to endorse or promote +# products derived from this software without specific prior +# written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS +# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from __future__ import division, print_function, absolute_import + +import numpy +from . import _ni_support +from . import _nd_image + +__all__ = ['fourier_gaussian', 'fourier_uniform', 'fourier_ellipsoid', + 'fourier_shift'] + + +def _get_output_fourier(output, input): + if output is None: + if input.dtype.type in [numpy.complex64, numpy.complex128, + numpy.float32]: + output = numpy.zeros(input.shape, dtype=input.dtype) + else: + output = numpy.zeros(input.shape, dtype=numpy.float64) + elif type(output) is type: + if output not in [numpy.complex64, numpy.complex128, + numpy.float32, numpy.float64]: + raise RuntimeError("output type not supported") + output = numpy.zeros(input.shape, dtype=output) + elif output.shape != input.shape: + raise RuntimeError("output shape not correct") + return output + + +def _get_output_fourier_complex(output, input): + if output is None: + if input.dtype.type in [numpy.complex64, numpy.complex128]: + output = numpy.zeros(input.shape, dtype=input.dtype) + else: + output = numpy.zeros(input.shape, dtype=numpy.complex128) + elif type(output) is type: + if output not in [numpy.complex64, numpy.complex128]: + raise RuntimeError("output type not supported") + output = numpy.zeros(input.shape, dtype=output) + elif output.shape != input.shape: + raise RuntimeError("output shape not correct") + return output + + +def fourier_gaussian(input, sigma, n=-1, axis=-1, output=None): + """ + Multi-dimensional Gaussian fourier filter. + + The array is multiplied with the fourier transform of a Gaussian + kernel. + + Parameters + ---------- + input : array_like + The input array. + sigma : float or sequence + The sigma of the Gaussian kernel. If a float, `sigma` is the same for + all axes. If a sequence, `sigma` has to contain one value for each + axis. + n : int, optional + If `n` is negative (default), then the input is assumed to be the + result of a complex fft. + If `n` is larger than or equal to zero, the input is assumed to be the + result of a real fft, and `n` gives the length of the array before + transformation along the real transform direction. + axis : int, optional + The axis of the real transform. + output : ndarray, optional + If given, the result of filtering the input is placed in this array. + None is returned in this case. + + Returns + ------- + fourier_gaussian : ndarray + The filtered input. + + Examples + -------- + >>> from scipy import ndimage, misc + >>> import numpy.fft + >>> import matplotlib.pyplot as plt + >>> fig, (ax1, ax2) = plt.subplots(1, 2) + >>> plt.gray() # show the filtered result in grayscale + >>> ascent = misc.ascent() + >>> input_ = numpy.fft.fft2(ascent) + >>> result = ndimage.fourier_gaussian(input_, sigma=4) + >>> result = numpy.fft.ifft2(result) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result.real) # the imaginary part is an artifact + >>> plt.show() + """ + input = numpy.asarray(input) + output = _get_output_fourier(output, input) + axis = _ni_support._check_axis(axis, input.ndim) + sigmas = _ni_support._normalize_sequence(sigma, input.ndim) + sigmas = numpy.asarray(sigmas, dtype=numpy.float64) + if not sigmas.flags.contiguous: + sigmas = sigmas.copy() + + _nd_image.fourier_filter(input, sigmas, n, axis, output, 0) + return output + + +def fourier_uniform(input, size, n=-1, axis=-1, output=None): + """ + Multi-dimensional uniform fourier filter. + + The array is multiplied with the fourier transform of a box of given + size. + + Parameters + ---------- + input : array_like + The input array. + size : float or sequence + The size of the box used for filtering. + If a float, `size` is the same for all axes. If a sequence, `size` has + to contain one value for each axis. + n : int, optional + If `n` is negative (default), then the input is assumed to be the + result of a complex fft. + If `n` is larger than or equal to zero, the input is assumed to be the + result of a real fft, and `n` gives the length of the array before + transformation along the real transform direction. + axis : int, optional + The axis of the real transform. + output : ndarray, optional + If given, the result of filtering the input is placed in this array. + None is returned in this case. + + Returns + ------- + fourier_uniform : ndarray + The filtered input. + + Examples + -------- + >>> from scipy import ndimage, misc + >>> import numpy.fft + >>> import matplotlib.pyplot as plt + >>> fig, (ax1, ax2) = plt.subplots(1, 2) + >>> plt.gray() # show the filtered result in grayscale + >>> ascent = misc.ascent() + >>> input_ = numpy.fft.fft2(ascent) + >>> result = ndimage.fourier_uniform(input_, size=20) + >>> result = numpy.fft.ifft2(result) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result.real) # the imaginary part is an artifact + >>> plt.show() + """ + input = numpy.asarray(input) + output = _get_output_fourier(output, input) + axis = _ni_support._check_axis(axis, input.ndim) + sizes = _ni_support._normalize_sequence(size, input.ndim) + sizes = numpy.asarray(sizes, dtype=numpy.float64) + if not sizes.flags.contiguous: + sizes = sizes.copy() + _nd_image.fourier_filter(input, sizes, n, axis, output, 1) + return output + + +def fourier_ellipsoid(input, size, n=-1, axis=-1, output=None): + """ + Multi-dimensional ellipsoid fourier filter. + + The array is multiplied with the fourier transform of a ellipsoid of + given sizes. + + Parameters + ---------- + input : array_like + The input array. + size : float or sequence + The size of the box used for filtering. + If a float, `size` is the same for all axes. If a sequence, `size` has + to contain one value for each axis. + n : int, optional + If `n` is negative (default), then the input is assumed to be the + result of a complex fft. + If `n` is larger than or equal to zero, the input is assumed to be the + result of a real fft, and `n` gives the length of the array before + transformation along the real transform direction. + axis : int, optional + The axis of the real transform. + output : ndarray, optional + If given, the result of filtering the input is placed in this array. + None is returned in this case. + + Returns + ------- + fourier_ellipsoid : ndarray + The filtered input. + + Notes + ----- + This function is implemented for arrays of rank 1, 2, or 3. + + Examples + -------- + >>> from scipy import ndimage, misc + >>> import numpy.fft + >>> import matplotlib.pyplot as plt + >>> fig, (ax1, ax2) = plt.subplots(1, 2) + >>> plt.gray() # show the filtered result in grayscale + >>> ascent = misc.ascent() + >>> input_ = numpy.fft.fft2(ascent) + >>> result = ndimage.fourier_ellipsoid(input_, size=20) + >>> result = numpy.fft.ifft2(result) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result.real) # the imaginary part is an artifact + >>> plt.show() + """ + input = numpy.asarray(input) + output = _get_output_fourier(output, input) + axis = _ni_support._check_axis(axis, input.ndim) + sizes = _ni_support._normalize_sequence(size, input.ndim) + sizes = numpy.asarray(sizes, dtype=numpy.float64) + if not sizes.flags.contiguous: + sizes = sizes.copy() + _nd_image.fourier_filter(input, sizes, n, axis, output, 2) + return output + + +def fourier_shift(input, shift, n=-1, axis=-1, output=None): + """ + Multi-dimensional fourier shift filter. + + The array is multiplied with the fourier transform of a shift operation. + + Parameters + ---------- + input : array_like + The input array. + shift : float or sequence + The size of the box used for filtering. + If a float, `shift` is the same for all axes. If a sequence, `shift` + has to contain one value for each axis. + n : int, optional + If `n` is negative (default), then the input is assumed to be the + result of a complex fft. + If `n` is larger than or equal to zero, the input is assumed to be the + result of a real fft, and `n` gives the length of the array before + transformation along the real transform direction. + axis : int, optional + The axis of the real transform. + output : ndarray, optional + If given, the result of shifting the input is placed in this array. + None is returned in this case. + + Returns + ------- + fourier_shift : ndarray + The shifted input. + + Examples + -------- + >>> from scipy import ndimage, misc + >>> import matplotlib.pyplot as plt + >>> import numpy.fft + >>> fig, (ax1, ax2) = plt.subplots(1, 2) + >>> plt.gray() # show the filtered result in grayscale + >>> ascent = misc.ascent() + >>> input_ = numpy.fft.fft2(ascent) + >>> result = ndimage.fourier_shift(input_, shift=200) + >>> result = numpy.fft.ifft2(result) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result.real) # the imaginary part is an artifact + >>> plt.show() + """ + input = numpy.asarray(input) + output = _get_output_fourier_complex(output, input) + axis = _ni_support._check_axis(axis, input.ndim) + shifts = _ni_support._normalize_sequence(shift, input.ndim) + shifts = numpy.asarray(shifts, dtype=numpy.float64) + if not shifts.flags.contiguous: + shifts = shifts.copy() + _nd_image.fourier_shift(input, shifts, n, axis, output) + return output diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/fourier.pyc b/project/venv/lib/python2.7/site-packages/scipy/ndimage/fourier.pyc new file mode 100644 index 0000000..d497cd3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/ndimage/fourier.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/interpolation.py b/project/venv/lib/python2.7/site-packages/scipy/ndimage/interpolation.py new file mode 100644 index 0000000..3b15e9e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/ndimage/interpolation.py @@ -0,0 +1,746 @@ +# Copyright (C) 2003-2005 Peter J. Verveer +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# +# 3. The name of the author may not be used to endorse or promote +# products derived from this software without specific prior +# written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS +# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from __future__ import division, print_function, absolute_import + +import math +import numpy +import warnings + +from . import _ni_support +from . import _nd_image +from ._ni_docstrings import docdict +from scipy.misc import doccer + +# Change the default 'reflect' to 'constant' via modifying a copy of docdict +docdict_copy = docdict.copy() +del docdict +docdict_copy['mode'] = docdict_copy['mode'].replace("Default is 'reflect'", + "Default is 'constant'") + +docfiller = doccer.filldoc(docdict_copy) + +__all__ = ['spline_filter1d', 'spline_filter', 'geometric_transform', + 'map_coordinates', 'affine_transform', 'shift', 'zoom', 'rotate'] + + +@docfiller +def spline_filter1d(input, order=3, axis=-1, output=numpy.float64, + mode='mirror'): + """ + Calculate a one-dimensional spline filter along the given axis. + + The lines of the array along the given axis are filtered by a + spline filter. The order of the spline must be >= 2 and <= 5. + + Parameters + ---------- + %(input)s + order : int, optional + The order of the spline, default is 3. + axis : int, optional + The axis along which the spline filter is applied. Default is the last + axis. + output : ndarray or dtype, optional + The array in which to place the output, or the dtype of the returned + array. Default is `numpy.float64`. + %(mode)s + + Returns + ------- + spline_filter1d : ndarray + The filtered input. + + Notes + ----- + All functions in `ndimage.interpolation` do spline interpolation of + the input image. If using b-splines of `order > 1`, the input image + values have to be converted to b-spline coefficients first, which is + done by applying this one-dimensional filter sequentially along all + axes of the input. All functions that require b-spline coefficients + will automatically filter their inputs, a behavior controllable with + the `prefilter` keyword argument. For functions that accept a `mode` + parameter, the result will only be correct if it matches the `mode` + used when filtering. + """ + if order < 0 or order > 5: + raise RuntimeError('spline order not supported') + input = numpy.asarray(input) + if numpy.iscomplexobj(input): + raise TypeError('Complex type not supported') + output = _ni_support._get_output(output, input) + if order in [0, 1]: + output[...] = numpy.array(input) + else: + mode = _ni_support._extend_mode_to_code(mode) + axis = _ni_support._check_axis(axis, input.ndim) + _nd_image.spline_filter1d(input, order, axis, output, mode) + return output + + +def spline_filter(input, order=3, output=numpy.float64, mode='mirror'): + """ + Multi-dimensional spline filter. + + For more details, see `spline_filter1d`. + + See Also + -------- + spline_filter1d + + Notes + ----- + The multi-dimensional filter is implemented as a sequence of + one-dimensional spline filters. The intermediate arrays are stored + in the same data type as the output. Therefore, for output types + with a limited precision, the results may be imprecise because + intermediate results may be stored with insufficient precision. + + """ + if order < 2 or order > 5: + raise RuntimeError('spline order not supported') + input = numpy.asarray(input) + if numpy.iscomplexobj(input): + raise TypeError('Complex type not supported') + output = _ni_support._get_output(output, input) + if order not in [0, 1] and input.ndim > 0: + for axis in range(input.ndim): + spline_filter1d(input, order, axis, output=output, mode=mode) + input = output + else: + output[...] = input[...] + return output + + +@docfiller +def geometric_transform(input, mapping, output_shape=None, + output=None, order=3, + mode='constant', cval=0.0, prefilter=True, + extra_arguments=(), extra_keywords={}): + """ + Apply an arbitrary geometric transform. + + The given mapping function is used to find, for each point in the + output, the corresponding coordinates in the input. The value of the + input at those coordinates is determined by spline interpolation of + the requested order. + + Parameters + ---------- + %(input)s + mapping : {callable, scipy.LowLevelCallable} + A callable object that accepts a tuple of length equal to the output + array rank, and returns the corresponding input coordinates as a tuple + of length equal to the input array rank. + output_shape : tuple of ints, optional + Shape tuple. + %(output)s + order : int, optional + The order of the spline interpolation, default is 3. + The order has to be in the range 0-5. + %(mode)s + %(cval)s + %(prefilter)s + extra_arguments : tuple, optional + Extra arguments passed to `mapping`. + extra_keywords : dict, optional + Extra keywords passed to `mapping`. + + Returns + ------- + output : ndarray + The filtered input. + + See Also + -------- + map_coordinates, affine_transform, spline_filter1d + + + Notes + ----- + This function also accepts low-level callback functions with one + the following signatures and wrapped in `scipy.LowLevelCallable`: + + .. code:: c + + int mapping(npy_intp *output_coordinates, double *input_coordinates, + int output_rank, int input_rank, void *user_data) + int mapping(intptr_t *output_coordinates, double *input_coordinates, + int output_rank, int input_rank, void *user_data) + + The calling function iterates over the elements of the output array, + calling the callback function at each element. The coordinates of the + current output element are passed through ``output_coordinates``. The + callback function must return the coordinates at which the input must + be interpolated in ``input_coordinates``. The rank of the input and + output arrays are given by ``input_rank`` and ``output_rank`` + respectively. ``user_data`` is the data pointer provided + to `scipy.LowLevelCallable` as-is. + + The callback function must return an integer error status that is zero + if something went wrong and one otherwise. If an error occurs, you should + normally set the python error status with an informative message + before returning, otherwise a default error message is set by the + calling function. + + In addition, some other low-level function pointer specifications + are accepted, but these are for backward compatibility only and should + not be used in new code. + + Examples + -------- + >>> import numpy as np + >>> from scipy.ndimage import geometric_transform + >>> a = np.arange(12.).reshape((4, 3)) + >>> def shift_func(output_coords): + ... return (output_coords[0] - 0.5, output_coords[1] - 0.5) + ... + >>> geometric_transform(a, shift_func) + array([[ 0. , 0. , 0. ], + [ 0. , 1.362, 2.738], + [ 0. , 4.812, 6.187], + [ 0. , 8.263, 9.637]]) + + >>> b = [1, 2, 3, 4, 5] + >>> def shift_func(output_coords): + ... return (output_coords[0] - 3,) + ... + >>> geometric_transform(b, shift_func, mode='constant') + array([0, 0, 0, 1, 2]) + >>> geometric_transform(b, shift_func, mode='nearest') + array([1, 1, 1, 1, 2]) + >>> geometric_transform(b, shift_func, mode='reflect') + array([3, 2, 1, 1, 2]) + >>> geometric_transform(b, shift_func, mode='wrap') + array([2, 3, 4, 1, 2]) + + """ + if order < 0 or order > 5: + raise RuntimeError('spline order not supported') + input = numpy.asarray(input) + if numpy.iscomplexobj(input): + raise TypeError('Complex type not supported') + if output_shape is None: + output_shape = input.shape + if input.ndim < 1 or len(output_shape) < 1: + raise RuntimeError('input and output rank must be > 0') + mode = _ni_support._extend_mode_to_code(mode) + if prefilter and order > 1: + filtered = spline_filter(input, order, output=numpy.float64) + else: + filtered = input + output = _ni_support._get_output(output, input, shape=output_shape) + _nd_image.geometric_transform(filtered, mapping, None, None, None, output, + order, mode, cval, extra_arguments, + extra_keywords) + return output + + +@docfiller +def map_coordinates(input, coordinates, output=None, order=3, + mode='constant', cval=0.0, prefilter=True): + """ + Map the input array to new coordinates by interpolation. + + The array of coordinates is used to find, for each point in the output, + the corresponding coordinates in the input. The value of the input at + those coordinates is determined by spline interpolation of the + requested order. + + The shape of the output is derived from that of the coordinate + array by dropping the first axis. The values of the array along + the first axis are the coordinates in the input array at which the + output value is found. + + Parameters + ---------- + %(input)s + coordinates : array_like + The coordinates at which `input` is evaluated. + %(output)s + order : int, optional + The order of the spline interpolation, default is 3. + The order has to be in the range 0-5. + %(mode)s + %(cval)s + %(prefilter)s + + Returns + ------- + map_coordinates : ndarray + The result of transforming the input. The shape of the output is + derived from that of `coordinates` by dropping the first axis. + + See Also + -------- + spline_filter, geometric_transform, scipy.interpolate + + Examples + -------- + >>> from scipy import ndimage + >>> a = np.arange(12.).reshape((4, 3)) + >>> a + array([[ 0., 1., 2.], + [ 3., 4., 5.], + [ 6., 7., 8.], + [ 9., 10., 11.]]) + >>> ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1) + array([ 2., 7.]) + + Above, the interpolated value of a[0.5, 0.5] gives output[0], while + a[2, 1] is output[1]. + + >>> inds = np.array([[0.5, 2], [0.5, 4]]) + >>> ndimage.map_coordinates(a, inds, order=1, cval=-33.3) + array([ 2. , -33.3]) + >>> ndimage.map_coordinates(a, inds, order=1, mode='nearest') + array([ 2., 8.]) + >>> ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool) + array([ True, False], dtype=bool) + + """ + if order < 0 or order > 5: + raise RuntimeError('spline order not supported') + input = numpy.asarray(input) + if numpy.iscomplexobj(input): + raise TypeError('Complex type not supported') + coordinates = numpy.asarray(coordinates) + if numpy.iscomplexobj(coordinates): + raise TypeError('Complex type not supported') + output_shape = coordinates.shape[1:] + if input.ndim < 1 or len(output_shape) < 1: + raise RuntimeError('input and output rank must be > 0') + if coordinates.shape[0] != input.ndim: + raise RuntimeError('invalid shape for coordinate array') + mode = _ni_support._extend_mode_to_code(mode) + if prefilter and order > 1: + filtered = spline_filter(input, order, output=numpy.float64) + else: + filtered = input + output = _ni_support._get_output(output, input, + shape=output_shape) + _nd_image.geometric_transform(filtered, None, coordinates, None, None, + output, order, mode, cval, None, None) + return output + + +@docfiller +def affine_transform(input, matrix, offset=0.0, output_shape=None, + output=None, order=3, + mode='constant', cval=0.0, prefilter=True): + """ + Apply an affine transformation. + + Given an output image pixel index vector ``o``, the pixel value + is determined from the input image at position + ``np.dot(matrix, o) + offset``. + + Parameters + ---------- + %(input)s + matrix : ndarray + The inverse coordinate transformation matrix, mapping output + coordinates to input coordinates. If ``ndim`` is the number of + dimensions of ``input``, the given matrix must have one of the + following shapes: + + - ``(ndim, ndim)``: the linear transformation matrix for each + output coordinate. + - ``(ndim,)``: assume that the 2D transformation matrix is + diagonal, with the diagonal specified by the given value. A more + efficient algorithm is then used that exploits the separability + of the problem. + - ``(ndim + 1, ndim + 1)``: assume that the transformation is + specified using homogeneous coordinates [1]_. In this case, any + value passed to ``offset`` is ignored. + - ``(ndim, ndim + 1)``: as above, but the bottom row of a + homogeneous transformation matrix is always ``[0, 0, ..., 1]``, + and may be omitted. + + offset : float or sequence, optional + The offset into the array where the transform is applied. If a float, + `offset` is the same for each axis. If a sequence, `offset` should + contain one value for each axis. + output_shape : tuple of ints, optional + Shape tuple. + %(output)s + order : int, optional + The order of the spline interpolation, default is 3. + The order has to be in the range 0-5. + %(mode)s + %(cval)s + %(prefilter)s + + Returns + ------- + affine_transform : ndarray + The transformed input. + + Notes + ----- + The given matrix and offset are used to find for each point in the + output the corresponding coordinates in the input by an affine + transformation. The value of the input at those coordinates is + determined by spline interpolation of the requested order. Points + outside the boundaries of the input are filled according to the given + mode. + + .. versionchanged:: 0.18.0 + Previously, the exact interpretation of the affine transformation + depended on whether the matrix was supplied as a one-dimensional or + two-dimensional array. If a one-dimensional array was supplied + to the matrix parameter, the output pixel value at index ``o`` + was determined from the input image at position + ``matrix * (o + offset)``. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Homogeneous_coordinates + """ + if order < 0 or order > 5: + raise RuntimeError('spline order not supported') + input = numpy.asarray(input) + if numpy.iscomplexobj(input): + raise TypeError('Complex type not supported') + if output_shape is None: + output_shape = input.shape + if input.ndim < 1 or len(output_shape) < 1: + raise RuntimeError('input and output rank must be > 0') + mode = _ni_support._extend_mode_to_code(mode) + if prefilter and order > 1: + filtered = spline_filter(input, order, output=numpy.float64) + else: + filtered = input + output = _ni_support._get_output(output, input, + shape=output_shape) + matrix = numpy.asarray(matrix, dtype=numpy.float64) + if matrix.ndim not in [1, 2] or matrix.shape[0] < 1: + raise RuntimeError('no proper affine matrix provided') + if (matrix.ndim == 2 and matrix.shape[1] == input.ndim + 1 and + (matrix.shape[0] in [input.ndim, input.ndim + 1])): + if matrix.shape[0] == input.ndim + 1: + exptd = [0] * input.ndim + [1] + if not numpy.all(matrix[input.ndim] == exptd): + msg = ('Expected homogeneous transformation matrix with ' + 'shape %s for image shape %s, but bottom row was ' + 'not equal to %s' % (matrix.shape, input.shape, exptd)) + raise ValueError(msg) + # assume input is homogeneous coordinate transformation matrix + offset = matrix[:input.ndim, input.ndim] + matrix = matrix[:input.ndim, :input.ndim] + if matrix.shape[0] != input.ndim: + raise RuntimeError('affine matrix has wrong number of rows') + if matrix.ndim == 2 and matrix.shape[1] != output.ndim: + raise RuntimeError('affine matrix has wrong number of columns') + if not matrix.flags.contiguous: + matrix = matrix.copy() + offset = _ni_support._normalize_sequence(offset, input.ndim) + offset = numpy.asarray(offset, dtype=numpy.float64) + if offset.ndim != 1 or offset.shape[0] < 1: + raise RuntimeError('no proper offset provided') + if not offset.flags.contiguous: + offset = offset.copy() + if matrix.ndim == 1: + warnings.warn( + "The behaviour of affine_transform with a one-dimensional " + "array supplied for the matrix parameter has changed in " + "scipy 0.18.0." + ) + _nd_image.zoom_shift(filtered, matrix, offset/matrix, output, order, + mode, cval) + else: + _nd_image.geometric_transform(filtered, None, None, matrix, offset, + output, order, mode, cval, None, None) + return output + + +@docfiller +def shift(input, shift, output=None, order=3, mode='constant', cval=0.0, + prefilter=True): + """ + Shift an array. + + The array is shifted using spline interpolation of the requested order. + Points outside the boundaries of the input are filled according to the + given mode. + + Parameters + ---------- + %(input)s + shift : float or sequence + The shift along the axes. If a float, `shift` is the same for each + axis. If a sequence, `shift` should contain one value for each axis. + %(output)s + order : int, optional + The order of the spline interpolation, default is 3. + The order has to be in the range 0-5. + %(mode)s + %(cval)s + %(prefilter)s + + Returns + ------- + shift : ndarray + The shifted input. + + """ + if order < 0 or order > 5: + raise RuntimeError('spline order not supported') + input = numpy.asarray(input) + if numpy.iscomplexobj(input): + raise TypeError('Complex type not supported') + if input.ndim < 1: + raise RuntimeError('input and output rank must be > 0') + mode = _ni_support._extend_mode_to_code(mode) + if prefilter and order > 1: + filtered = spline_filter(input, order, output=numpy.float64) + else: + filtered = input + output = _ni_support._get_output(output, input) + shift = _ni_support._normalize_sequence(shift, input.ndim) + shift = [-ii for ii in shift] + shift = numpy.asarray(shift, dtype=numpy.float64) + if not shift.flags.contiguous: + shift = shift.copy() + _nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval) + return output + + +@docfiller +def zoom(input, zoom, output=None, order=3, mode='constant', cval=0.0, + prefilter=True): + """ + Zoom an array. + + The array is zoomed using spline interpolation of the requested order. + + Parameters + ---------- + %(input)s + zoom : float or sequence + The zoom factor along the axes. If a float, `zoom` is the same for each + axis. If a sequence, `zoom` should contain one value for each axis. + %(output)s + order : int, optional + The order of the spline interpolation, default is 3. + The order has to be in the range 0-5. + %(mode)s + %(cval)s + %(prefilter)s + + Returns + ------- + zoom : ndarray + The zoomed input. + + Examples + -------- + >>> from scipy import ndimage, misc + >>> import matplotlib.pyplot as plt + + >>> fig = plt.figure() + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + >>> ascent = misc.ascent() + >>> result = ndimage.zoom(ascent, 3.0) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result) + >>> plt.show() + + >>> print(ascent.shape) + (512, 512) + + >>> print(result.shape) + (1536, 1536) + """ + if order < 0 or order > 5: + raise RuntimeError('spline order not supported') + input = numpy.asarray(input) + if numpy.iscomplexobj(input): + raise TypeError('Complex type not supported') + if input.ndim < 1: + raise RuntimeError('input and output rank must be > 0') + mode = _ni_support._extend_mode_to_code(mode) + if prefilter and order > 1: + filtered = spline_filter(input, order, output=numpy.float64) + else: + filtered = input + zoom = _ni_support._normalize_sequence(zoom, input.ndim) + output_shape = tuple( + [int(round(ii * jj)) for ii, jj in zip(input.shape, zoom)]) + + output_shape_old = tuple( + [int(ii * jj) for ii, jj in zip(input.shape, zoom)]) + if output_shape != output_shape_old: + warnings.warn( + "From scipy 0.13.0, the output shape of zoom() is calculated " + "with round() instead of int() - for these inputs the size of " + "the returned array has changed.", UserWarning) + + zoom_div = numpy.array(output_shape, float) - 1 + # Zooming to infinite values is unpredictable, so just choose + # zoom factor 1 instead + zoom = numpy.divide(numpy.array(input.shape) - 1, zoom_div, + out=numpy.ones_like(input.shape, dtype=numpy.float64), + where=zoom_div != 0) + + output = _ni_support._get_output(output, input, + shape=output_shape) + zoom = numpy.ascontiguousarray(zoom) + _nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval) + return output + + +def _minmax(coor, minc, maxc): + if coor[0] < minc[0]: + minc[0] = coor[0] + if coor[0] > maxc[0]: + maxc[0] = coor[0] + if coor[1] < minc[1]: + minc[1] = coor[1] + if coor[1] > maxc[1]: + maxc[1] = coor[1] + return minc, maxc + + +@docfiller +def rotate(input, angle, axes=(1, 0), reshape=True, output=None, order=3, + mode='constant', cval=0.0, prefilter=True): + """ + Rotate an array. + + The array is rotated in the plane defined by the two axes given by the + `axes` parameter using spline interpolation of the requested order. + + Parameters + ---------- + %(input)s + angle : float + The rotation angle in degrees. + axes : tuple of 2 ints, optional + The two axes that define the plane of rotation. Default is the first + two axes. + reshape : bool, optional + If `reshape` is true, the output shape is adapted so that the input + array is contained completely in the output. Default is True. + %(output)s + order : int, optional + The order of the spline interpolation, default is 3. + The order has to be in the range 0-5. + %(mode)s + %(cval)s + %(prefilter)s + + Returns + ------- + rotate : ndarray + The rotated input. + + """ + input = numpy.asarray(input) + axes = list(axes) + rank = input.ndim + if axes[0] < 0: + axes[0] += rank + if axes[1] < 0: + axes[1] += rank + if axes[0] < 0 or axes[1] < 0 or axes[0] > rank or axes[1] > rank: + raise RuntimeError('invalid rotation plane specified') + if axes[0] > axes[1]: + axes = axes[1], axes[0] + angle = numpy.pi / 180 * angle + m11 = math.cos(angle) + m12 = math.sin(angle) + m21 = -math.sin(angle) + m22 = math.cos(angle) + matrix = numpy.array([[m11, m12], + [m21, m22]], dtype=numpy.float64) + iy = input.shape[axes[0]] + ix = input.shape[axes[1]] + if reshape: + mtrx = numpy.array([[m11, -m21], + [-m12, m22]], dtype=numpy.float64) + minc = [0, 0] + maxc = [0, 0] + coor = numpy.dot(mtrx, [0, ix]) + minc, maxc = _minmax(coor, minc, maxc) + coor = numpy.dot(mtrx, [iy, 0]) + minc, maxc = _minmax(coor, minc, maxc) + coor = numpy.dot(mtrx, [iy, ix]) + minc, maxc = _minmax(coor, minc, maxc) + oy = int(maxc[0] - minc[0] + 0.5) + ox = int(maxc[1] - minc[1] + 0.5) + else: + oy = input.shape[axes[0]] + ox = input.shape[axes[1]] + offset = numpy.zeros((2,), dtype=numpy.float64) + offset[0] = float(oy) / 2.0 - 0.5 + offset[1] = float(ox) / 2.0 - 0.5 + offset = numpy.dot(matrix, offset) + tmp = numpy.zeros((2,), dtype=numpy.float64) + tmp[0] = float(iy) / 2.0 - 0.5 + tmp[1] = float(ix) / 2.0 - 0.5 + offset = tmp - offset + output_shape = list(input.shape) + output_shape[axes[0]] = oy + output_shape[axes[1]] = ox + output_shape = tuple(output_shape) + output = _ni_support._get_output(output, input, + shape=output_shape) + if input.ndim <= 2: + affine_transform(input, matrix, offset, output_shape, output, + order, mode, cval, prefilter) + else: + coordinates = [] + size = numpy.product(input.shape, axis=0) + size //= input.shape[axes[0]] + size //= input.shape[axes[1]] + for ii in range(input.ndim): + if ii not in axes: + coordinates.append(0) + else: + coordinates.append(slice(None, None, None)) + iter_axes = list(range(input.ndim)) + iter_axes.reverse() + iter_axes.remove(axes[0]) + iter_axes.remove(axes[1]) + os = (output_shape[axes[0]], output_shape[axes[1]]) + for ii in range(size): + ia = input[tuple(coordinates)] + oa = output[tuple(coordinates)] + affine_transform(ia, matrix, offset, os, oa, order, mode, + cval, prefilter) + for jj in iter_axes: + if coordinates[jj] < input.shape[jj] - 1: + coordinates[jj] += 1 + break + else: + coordinates[jj] = 0 + return output diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/interpolation.pyc b/project/venv/lib/python2.7/site-packages/scipy/ndimage/interpolation.pyc new file mode 100644 index 0000000..a24ad25 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/ndimage/interpolation.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/io.py b/project/venv/lib/python2.7/site-packages/scipy/ndimage/io.py new file mode 100644 index 0000000..7e30406 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/ndimage/io.py @@ -0,0 +1,36 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np + + +_have_pil = True +try: + from scipy.misc.pilutil import imread as _imread +except ImportError: + _have_pil = False + + +__all__ = ['imread'] + + +# Use the implementation of `imread` in `scipy.misc.pilutil.imread`. +# If it weren't for the different names of the first arguments of +# ndimage.io.imread and misc.pilutil.imread, we could simplify this file +# by writing +# from scipy.misc.pilutil import imread +# Unfortunately, because the argument names are different, that +# introduces a backwards incompatibility. + +@np.deprecate(message="`imread` is deprecated in SciPy 1.0.0.\n" + "Use ``matplotlib.pyplot.imread`` instead.") +def imread(fname, flatten=False, mode=None): + if _have_pil: + return _imread(fname, flatten, mode) + raise ImportError("Could not import the Python Imaging Library (PIL)" + " required to load image files. Please refer to" + " http://pillow.readthedocs.org/en/latest/installation.html" + " for installation instructions.") + + +if _have_pil and _imread.__doc__ is not None: + imread.__doc__ = _imread.__doc__.replace('name : str', 'fname : str') diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/io.pyc b/project/venv/lib/python2.7/site-packages/scipy/ndimage/io.pyc new file mode 100644 index 0000000..e272100 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/ndimage/io.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/measurements.py b/project/venv/lib/python2.7/site-packages/scipy/ndimage/measurements.py new file mode 100644 index 0000000..38fbe8c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/ndimage/measurements.py @@ -0,0 +1,1467 @@ +# Copyright (C) 2003-2005 Peter J. Verveer +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# +# 3. The name of the author may not be used to endorse or promote +# products derived from this software without specific prior +# written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS +# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from __future__ import division, print_function, absolute_import + +import numpy +import numpy as np +from . import _ni_support +from . import _ni_label +from . import _nd_image +from . import morphology + +__all__ = ['label', 'find_objects', 'labeled_comprehension', 'sum', 'mean', + 'variance', 'standard_deviation', 'minimum', 'maximum', 'median', + 'minimum_position', 'maximum_position', 'extrema', 'center_of_mass', + 'histogram', 'watershed_ift'] + + +def label(input, structure=None, output=None): + """ + Label features in an array. + + Parameters + ---------- + input : array_like + An array-like object to be labeled. Any non-zero values in `input` are + counted as features and zero values are considered the background. + structure : array_like, optional + A structuring element that defines feature connections. + `structure` must be symmetric. If no structuring element is provided, + one is automatically generated with a squared connectivity equal to + one. That is, for a 2-D `input` array, the default structuring element + is:: + + [[0,1,0], + [1,1,1], + [0,1,0]] + + output : (None, data-type, array_like), optional + If `output` is a data type, it specifies the type of the resulting + labeled feature array + If `output` is an array-like object, then `output` will be updated + with the labeled features from this function. This function can + operate in-place, by passing output=input. + Note that the output must be able to store the largest label, or this + function will raise an Exception. + + Returns + ------- + label : ndarray or int + An integer ndarray where each unique feature in `input` has a unique + label in the returned array. + num_features : int + How many objects were found. + + If `output` is None, this function returns a tuple of + (`labeled_array`, `num_features`). + + If `output` is a ndarray, then it will be updated with values in + `labeled_array` and only `num_features` will be returned by this + function. + + See Also + -------- + find_objects : generate a list of slices for the labeled features (or + objects); useful for finding features' position or + dimensions + + Examples + -------- + Create an image with some features, then label it using the default + (cross-shaped) structuring element: + + >>> from scipy.ndimage import label, generate_binary_structure + >>> a = np.array([[0,0,1,1,0,0], + ... [0,0,0,1,0,0], + ... [1,1,0,0,1,0], + ... [0,0,0,1,0,0]]) + >>> labeled_array, num_features = label(a) + + Each of the 4 features are labeled with a different integer: + + >>> num_features + 4 + >>> labeled_array + array([[0, 0, 1, 1, 0, 0], + [0, 0, 0, 1, 0, 0], + [2, 2, 0, 0, 3, 0], + [0, 0, 0, 4, 0, 0]]) + + Generate a structuring element that will consider features connected even + if they touch diagonally: + + >>> s = generate_binary_structure(2,2) + + or, + + >>> s = [[1,1,1], + ... [1,1,1], + ... [1,1,1]] + + Label the image using the new structuring element: + + >>> labeled_array, num_features = label(a, structure=s) + + Show the 2 labeled features (note that features 1, 3, and 4 from above are + now considered a single feature): + + >>> num_features + 2 + >>> labeled_array + array([[0, 0, 1, 1, 0, 0], + [0, 0, 0, 1, 0, 0], + [2, 2, 0, 0, 1, 0], + [0, 0, 0, 1, 0, 0]]) + + """ + input = numpy.asarray(input) + if numpy.iscomplexobj(input): + raise TypeError('Complex type not supported') + if structure is None: + structure = morphology.generate_binary_structure(input.ndim, 1) + structure = numpy.asarray(structure, dtype=bool) + if structure.ndim != input.ndim: + raise RuntimeError('structure and input must have equal rank') + for ii in structure.shape: + if ii != 3: + raise ValueError('structure dimensions must be equal to 3') + + # Use 32 bits if it's large enough for this image. + # _ni_label.label() needs two entries for background and + # foreground tracking + need_64bits = input.size >= (2**31 - 2) + + if isinstance(output, numpy.ndarray): + if output.shape != input.shape: + raise ValueError("output shape not correct") + caller_provided_output = True + else: + caller_provided_output = False + if output is None: + output = np.empty(input.shape, np.intp if need_64bits else np.int32) + else: + output = np.empty(input.shape, output) + + # handle scalars, 0-dim arrays + if input.ndim == 0 or input.size == 0: + if input.ndim == 0: + # scalar + maxlabel = 1 if (input != 0) else 0 + output[...] = maxlabel + else: + # 0-dim + maxlabel = 0 + if caller_provided_output: + return maxlabel + else: + return output, maxlabel + + try: + max_label = _ni_label._label(input, structure, output) + except _ni_label.NeedMoreBits: + # Make another attempt with enough bits, then try to cast to the + # new type. + tmp_output = np.empty(input.shape, np.intp if need_64bits else np.int32) + max_label = _ni_label._label(input, structure, tmp_output) + output[...] = tmp_output[...] + if not np.all(output == tmp_output): + # refuse to return bad results + raise RuntimeError("insufficient bit-depth in requested output type") + + if caller_provided_output: + # result was written in-place + return max_label + else: + return output, max_label + + +def find_objects(input, max_label=0): + """ + Find objects in a labeled array. + + Parameters + ---------- + input : ndarray of ints + Array containing objects defined by different labels. Labels with + value 0 are ignored. + max_label : int, optional + Maximum label to be searched for in `input`. If max_label is not + given, the positions of all objects are returned. + + Returns + ------- + object_slices : list of tuples + A list of tuples, with each tuple containing N slices (with N the + dimension of the input array). Slices correspond to the minimal + parallelepiped that contains the object. If a number is missing, + None is returned instead of a slice. + + See Also + -------- + label, center_of_mass + + Notes + ----- + This function is very useful for isolating a volume of interest inside + a 3-D array, that cannot be "seen through". + + Examples + -------- + >>> from scipy import ndimage + >>> a = np.zeros((6,6), dtype=int) + >>> a[2:4, 2:4] = 1 + >>> a[4, 4] = 1 + >>> a[:2, :3] = 2 + >>> a[0, 5] = 3 + >>> a + array([[2, 2, 2, 0, 0, 3], + [2, 2, 2, 0, 0, 0], + [0, 0, 1, 1, 0, 0], + [0, 0, 1, 1, 0, 0], + [0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 0]]) + >>> ndimage.find_objects(a) + [(slice(2, 5, None), slice(2, 5, None)), (slice(0, 2, None), slice(0, 3, None)), (slice(0, 1, None), slice(5, 6, None))] + >>> ndimage.find_objects(a, max_label=2) + [(slice(2, 5, None), slice(2, 5, None)), (slice(0, 2, None), slice(0, 3, None))] + >>> ndimage.find_objects(a == 1, max_label=2) + [(slice(2, 5, None), slice(2, 5, None)), None] + + >>> loc = ndimage.find_objects(a)[0] + >>> a[loc] + array([[1, 1, 0], + [1, 1, 0], + [0, 0, 1]]) + + """ + input = numpy.asarray(input) + if numpy.iscomplexobj(input): + raise TypeError('Complex type not supported') + + if max_label < 1: + max_label = input.max() + + return _nd_image.find_objects(input, max_label) + + +def labeled_comprehension(input, labels, index, func, out_dtype, default, pass_positions=False): + """ + Roughly equivalent to [func(input[labels == i]) for i in index]. + + Sequentially applies an arbitrary function (that works on array_like input) + to subsets of an n-D image array specified by `labels` and `index`. + The option exists to provide the function with positional parameters as the + second argument. + + Parameters + ---------- + input : array_like + Data from which to select `labels` to process. + labels : array_like or None + Labels to objects in `input`. + If not None, array must be same shape as `input`. + If None, `func` is applied to raveled `input`. + index : int, sequence of ints or None + Subset of `labels` to which to apply `func`. + If a scalar, a single value is returned. + If None, `func` is applied to all non-zero values of `labels`. + func : callable + Python function to apply to `labels` from `input`. + out_dtype : dtype + Dtype to use for `result`. + default : int, float or None + Default return value when a element of `index` does not exist + in `labels`. + pass_positions : bool, optional + If True, pass linear indices to `func` as a second argument. + Default is False. + + Returns + ------- + result : ndarray + Result of applying `func` to each of `labels` to `input` in `index`. + + Examples + -------- + >>> a = np.array([[1, 2, 0, 0], + ... [5, 3, 0, 4], + ... [0, 0, 0, 7], + ... [9, 3, 0, 0]]) + >>> from scipy import ndimage + >>> lbl, nlbl = ndimage.label(a) + >>> lbls = np.arange(1, nlbl+1) + >>> ndimage.labeled_comprehension(a, lbl, lbls, np.mean, float, 0) + array([ 2.75, 5.5 , 6. ]) + + Falling back to `default`: + + >>> lbls = np.arange(1, nlbl+2) + >>> ndimage.labeled_comprehension(a, lbl, lbls, np.mean, float, -1) + array([ 2.75, 5.5 , 6. , -1. ]) + + Passing positions: + + >>> def fn(val, pos): + ... print("fn says: %s : %s" % (val, pos)) + ... return (val.sum()) if (pos.sum() % 2 == 0) else (-val.sum()) + ... + >>> ndimage.labeled_comprehension(a, lbl, lbls, fn, float, 0, True) + fn says: [1 2 5 3] : [0 1 4 5] + fn says: [4 7] : [ 7 11] + fn says: [9 3] : [12 13] + array([ 11., 11., -12., 0.]) + + """ + + as_scalar = numpy.isscalar(index) + input = numpy.asarray(input) + + if pass_positions: + positions = numpy.arange(input.size).reshape(input.shape) + + if labels is None: + if index is not None: + raise ValueError("index without defined labels") + if not pass_positions: + return func(input.ravel()) + else: + return func(input.ravel(), positions.ravel()) + + try: + input, labels = numpy.broadcast_arrays(input, labels) + except ValueError: + raise ValueError("input and labels must have the same shape " + "(excepting dimensions with width 1)") + + if index is None: + if not pass_positions: + return func(input[labels > 0]) + else: + return func(input[labels > 0], positions[labels > 0]) + + index = numpy.atleast_1d(index) + if np.any(index.astype(labels.dtype).astype(index.dtype) != index): + raise ValueError("Cannot convert index values from <%s> to <%s> " + "(labels' type) without loss of precision" % + (index.dtype, labels.dtype)) + + index = index.astype(labels.dtype) + + # optimization: find min/max in index, and select those parts of labels, input, and positions + lo = index.min() + hi = index.max() + mask = (labels >= lo) & (labels <= hi) + + # this also ravels the arrays + labels = labels[mask] + input = input[mask] + if pass_positions: + positions = positions[mask] + + # sort everything by labels + label_order = labels.argsort() + labels = labels[label_order] + input = input[label_order] + if pass_positions: + positions = positions[label_order] + + index_order = index.argsort() + sorted_index = index[index_order] + + def do_map(inputs, output): + """labels must be sorted""" + nidx = sorted_index.size + + # Find boundaries for each stretch of constant labels + # This could be faster, but we already paid N log N to sort labels. + lo = numpy.searchsorted(labels, sorted_index, side='left') + hi = numpy.searchsorted(labels, sorted_index, side='right') + + for i, l, h in zip(range(nidx), lo, hi): + if l == h: + continue + output[i] = func(*[inp[l:h] for inp in inputs]) + + temp = numpy.empty(index.shape, out_dtype) + temp[:] = default + if not pass_positions: + do_map([input], temp) + else: + do_map([input, positions], temp) + + output = numpy.zeros(index.shape, out_dtype) + output[index_order] = temp + if as_scalar: + output = output[0] + + return output + + +def _safely_castable_to_int(dt): + """Test whether the numpy data type `dt` can be safely cast to an int.""" + int_size = np.dtype(int).itemsize + safe = ((np.issubdtype(dt, np.signedinteger) and dt.itemsize <= int_size) or + (np.issubdtype(dt, np.unsignedinteger) and dt.itemsize < int_size)) + return safe + + +def _stats(input, labels=None, index=None, centered=False): + """Count, sum, and optionally compute (sum - centre)^2 of input by label + + Parameters + ---------- + input : array_like, n-dimensional + The input data to be analyzed. + labels : array_like (n-dimensional), optional + The labels of the data in `input`. This array must be broadcast + compatible with `input`; typically it is the same shape as `input`. + If `labels` is None, all nonzero values in `input` are treated as + the single labeled group. + index : label or sequence of labels, optional + These are the labels of the groups for which the stats are computed. + If `index` is None, the stats are computed for the single group where + `labels` is greater than 0. + centered : bool, optional + If True, the centered sum of squares for each labeled group is + also returned. Default is False. + + Returns + ------- + counts : int or ndarray of ints + The number of elements in each labeled group. + sums : scalar or ndarray of scalars + The sums of the values in each labeled group. + sums_c : scalar or ndarray of scalars, optional + The sums of mean-centered squares of the values in each labeled group. + This is only returned if `centered` is True. + + """ + def single_group(vals): + if centered: + vals_c = vals - vals.mean() + return vals.size, vals.sum(), (vals_c * vals_c.conjugate()).sum() + else: + return vals.size, vals.sum() + + if labels is None: + return single_group(input) + + # ensure input and labels match sizes + input, labels = numpy.broadcast_arrays(input, labels) + + if index is None: + return single_group(input[labels > 0]) + + if numpy.isscalar(index): + return single_group(input[labels == index]) + + def _sum_centered(labels): + # `labels` is expected to be an ndarray with the same shape as `input`. + # It must contain the label indices (which are not necessarily the labels + # themselves). + means = sums / counts + centered_input = input - means[labels] + # bincount expects 1d inputs, so we ravel the arguments. + bc = numpy.bincount(labels.ravel(), + weights=(centered_input * + centered_input.conjugate()).ravel()) + return bc + + # Remap labels to unique integers if necessary, or if the largest + # label is larger than the number of values. + + if (not _safely_castable_to_int(labels.dtype) or + labels.min() < 0 or labels.max() > labels.size): + # Use numpy.unique to generate the label indices. `new_labels` will + # be 1-d, but it should be interpreted as the flattened n-d array of + # label indices. + unique_labels, new_labels = numpy.unique(labels, return_inverse=True) + counts = numpy.bincount(new_labels) + sums = numpy.bincount(new_labels, weights=input.ravel()) + if centered: + # Compute the sum of the mean-centered squares. + # We must reshape new_labels to the n-d shape of `input` before + # passing it _sum_centered. + sums_c = _sum_centered(new_labels.reshape(labels.shape)) + idxs = numpy.searchsorted(unique_labels, index) + # make all of idxs valid + idxs[idxs >= unique_labels.size] = 0 + found = (unique_labels[idxs] == index) + else: + # labels are an integer type allowed by bincount, and there aren't too + # many, so call bincount directly. + counts = numpy.bincount(labels.ravel()) + sums = numpy.bincount(labels.ravel(), weights=input.ravel()) + if centered: + sums_c = _sum_centered(labels) + # make sure all index values are valid + idxs = numpy.asanyarray(index, numpy.int).copy() + found = (idxs >= 0) & (idxs < counts.size) + idxs[~found] = 0 + + counts = counts[idxs] + counts[~found] = 0 + sums = sums[idxs] + sums[~found] = 0 + + if not centered: + return (counts, sums) + else: + sums_c = sums_c[idxs] + sums_c[~found] = 0 + return (counts, sums, sums_c) + + +def sum(input, labels=None, index=None): + """ + Calculate the sum of the values of the array. + + Parameters + ---------- + input : array_like + Values of `input` inside the regions defined by `labels` + are summed together. + labels : array_like of ints, optional + Assign labels to the values of the array. Has to have the same shape as + `input`. + index : array_like, optional + A single label number or a sequence of label numbers of + the objects to be measured. + + Returns + ------- + sum : ndarray or scalar + An array of the sums of values of `input` inside the regions defined + by `labels` with the same shape as `index`. If 'index' is None or scalar, + a scalar is returned. + + See also + -------- + mean, median + + Examples + -------- + >>> from scipy import ndimage + >>> input = [0,1,2,3] + >>> labels = [1,1,2,2] + >>> ndimage.sum(input, labels, index=[1,2]) + [1.0, 5.0] + >>> ndimage.sum(input, labels, index=1) + 1 + >>> ndimage.sum(input, labels) + 6 + + + """ + count, sum = _stats(input, labels, index) + return sum + + +def mean(input, labels=None, index=None): + """ + Calculate the mean of the values of an array at labels. + + Parameters + ---------- + input : array_like + Array on which to compute the mean of elements over distinct + regions. + labels : array_like, optional + Array of labels of same shape, or broadcastable to the same shape as + `input`. All elements sharing the same label form one region over + which the mean of the elements is computed. + index : int or sequence of ints, optional + Labels of the objects over which the mean is to be computed. + Default is None, in which case the mean for all values where label is + greater than 0 is calculated. + + Returns + ------- + out : list + Sequence of same length as `index`, with the mean of the different + regions labeled by the labels in `index`. + + See also + -------- + ndimage.variance, ndimage.standard_deviation, ndimage.minimum, + ndimage.maximum, ndimage.sum + ndimage.label + + Examples + -------- + >>> from scipy import ndimage + >>> a = np.arange(25).reshape((5,5)) + >>> labels = np.zeros_like(a) + >>> labels[3:5,3:5] = 1 + >>> index = np.unique(labels) + >>> labels + array([[0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 1, 1], + [0, 0, 0, 1, 1]]) + >>> index + array([0, 1]) + >>> ndimage.mean(a, labels=labels, index=index) + [10.285714285714286, 21.0] + + """ + + count, sum = _stats(input, labels, index) + return sum / numpy.asanyarray(count).astype(numpy.float) + + +def variance(input, labels=None, index=None): + """ + Calculate the variance of the values of an n-D image array, optionally at + specified sub-regions. + + Parameters + ---------- + input : array_like + Nd-image data to process. + labels : array_like, optional + Labels defining sub-regions in `input`. + If not None, must be same shape as `input`. + index : int or sequence of ints, optional + `labels` to include in output. If None (default), all values where + `labels` is non-zero are used. + + Returns + ------- + variance : float or ndarray + Values of variance, for each sub-region if `labels` and `index` are + specified. + + See Also + -------- + label, standard_deviation, maximum, minimum, extrema + + Examples + -------- + >>> a = np.array([[1, 2, 0, 0], + ... [5, 3, 0, 4], + ... [0, 0, 0, 7], + ... [9, 3, 0, 0]]) + >>> from scipy import ndimage + >>> ndimage.variance(a) + 7.609375 + + Features to process can be specified using `labels` and `index`: + + >>> lbl, nlbl = ndimage.label(a) + >>> ndimage.variance(a, lbl, index=np.arange(1, nlbl+1)) + array([ 2.1875, 2.25 , 9. ]) + + If no index is given, all non-zero `labels` are processed: + + >>> ndimage.variance(a, lbl) + 6.1875 + + """ + count, sum, sum_c_sq = _stats(input, labels, index, centered=True) + return sum_c_sq / np.asanyarray(count).astype(float) + + +def standard_deviation(input, labels=None, index=None): + """ + Calculate the standard deviation of the values of an n-D image array, + optionally at specified sub-regions. + + Parameters + ---------- + input : array_like + Nd-image data to process. + labels : array_like, optional + Labels to identify sub-regions in `input`. + If not None, must be same shape as `input`. + index : int or sequence of ints, optional + `labels` to include in output. If None (default), all values where + `labels` is non-zero are used. + + Returns + ------- + standard_deviation : float or ndarray + Values of standard deviation, for each sub-region if `labels` and + `index` are specified. + + See Also + -------- + label, variance, maximum, minimum, extrema + + Examples + -------- + >>> a = np.array([[1, 2, 0, 0], + ... [5, 3, 0, 4], + ... [0, 0, 0, 7], + ... [9, 3, 0, 0]]) + >>> from scipy import ndimage + >>> ndimage.standard_deviation(a) + 2.7585095613392387 + + Features to process can be specified using `labels` and `index`: + + >>> lbl, nlbl = ndimage.label(a) + >>> ndimage.standard_deviation(a, lbl, index=np.arange(1, nlbl+1)) + array([ 1.479, 1.5 , 3. ]) + + If no index is given, non-zero `labels` are processed: + + >>> ndimage.standard_deviation(a, lbl) + 2.4874685927665499 + + """ + return numpy.sqrt(variance(input, labels, index)) + + +def _select(input, labels=None, index=None, find_min=False, find_max=False, + find_min_positions=False, find_max_positions=False, + find_median=False): + """Returns min, max, or both, plus their positions (if requested), and + median.""" + + input = numpy.asanyarray(input) + + find_positions = find_min_positions or find_max_positions + positions = None + if find_positions: + positions = numpy.arange(input.size).reshape(input.shape) + + def single_group(vals, positions): + result = [] + if find_min: + result += [vals.min()] + if find_min_positions: + result += [positions[vals == vals.min()][0]] + if find_max: + result += [vals.max()] + if find_max_positions: + result += [positions[vals == vals.max()][0]] + if find_median: + result += [numpy.median(vals)] + return result + + if labels is None: + return single_group(input, positions) + + # ensure input and labels match sizes + input, labels = numpy.broadcast_arrays(input, labels) + + if index is None: + mask = (labels > 0) + masked_positions = None + if find_positions: + masked_positions = positions[mask] + return single_group(input[mask], masked_positions) + + if numpy.isscalar(index): + mask = (labels == index) + masked_positions = None + if find_positions: + masked_positions = positions[mask] + return single_group(input[mask], masked_positions) + + # remap labels to unique integers if necessary, or if the largest + # label is larger than the number of values. + if (not _safely_castable_to_int(labels.dtype) or + labels.min() < 0 or labels.max() > labels.size): + # remap labels, and indexes + unique_labels, labels = numpy.unique(labels, return_inverse=True) + idxs = numpy.searchsorted(unique_labels, index) + + # make all of idxs valid + idxs[idxs >= unique_labels.size] = 0 + found = (unique_labels[idxs] == index) + else: + # labels are an integer type, and there aren't too many. + idxs = numpy.asanyarray(index, numpy.int).copy() + found = (idxs >= 0) & (idxs <= labels.max()) + + idxs[~ found] = labels.max() + 1 + + if find_median: + order = numpy.lexsort((input.ravel(), labels.ravel())) + else: + order = input.ravel().argsort() + input = input.ravel()[order] + labels = labels.ravel()[order] + if find_positions: + positions = positions.ravel()[order] + + result = [] + if find_min: + mins = numpy.zeros(labels.max() + 2, input.dtype) + mins[labels[::-1]] = input[::-1] + result += [mins[idxs]] + if find_min_positions: + minpos = numpy.zeros(labels.max() + 2, int) + minpos[labels[::-1]] = positions[::-1] + result += [minpos[idxs]] + if find_max: + maxs = numpy.zeros(labels.max() + 2, input.dtype) + maxs[labels] = input + result += [maxs[idxs]] + if find_max_positions: + maxpos = numpy.zeros(labels.max() + 2, int) + maxpos[labels] = positions + result += [maxpos[idxs]] + if find_median: + locs = numpy.arange(len(labels)) + lo = numpy.zeros(labels.max() + 2, numpy.int) + lo[labels[::-1]] = locs[::-1] + hi = numpy.zeros(labels.max() + 2, numpy.int) + hi[labels] = locs + lo = lo[idxs] + hi = hi[idxs] + # lo is an index to the lowest value in input for each label, + # hi is an index to the largest value. + # move them to be either the same ((hi - lo) % 2 == 0) or next + # to each other ((hi - lo) % 2 == 1), then average. + step = (hi - lo) // 2 + lo += step + hi -= step + result += [(input[lo] + input[hi]) / 2.0] + + return result + + +def minimum(input, labels=None, index=None): + """ + Calculate the minimum of the values of an array over labeled regions. + + Parameters + ---------- + input : array_like + Array_like of values. For each region specified by `labels`, the + minimal values of `input` over the region is computed. + labels : array_like, optional + An array_like of integers marking different regions over which the + minimum value of `input` is to be computed. `labels` must have the + same shape as `input`. If `labels` is not specified, the minimum + over the whole array is returned. + index : array_like, optional + A list of region labels that are taken into account for computing the + minima. If index is None, the minimum over all elements where `labels` + is non-zero is returned. + + Returns + ------- + minimum : float or list of floats + List of minima of `input` over the regions determined by `labels` and + whose index is in `index`. If `index` or `labels` are not specified, a + float is returned: the minimal value of `input` if `labels` is None, + and the minimal value of elements where `labels` is greater than zero + if `index` is None. + + See also + -------- + label, maximum, median, minimum_position, extrema, sum, mean, variance, + standard_deviation + + Notes + ----- + The function returns a Python list and not a Numpy array, use + `np.array` to convert the list to an array. + + Examples + -------- + >>> from scipy import ndimage + >>> a = np.array([[1, 2, 0, 0], + ... [5, 3, 0, 4], + ... [0, 0, 0, 7], + ... [9, 3, 0, 0]]) + >>> labels, labels_nb = ndimage.label(a) + >>> labels + array([[1, 1, 0, 0], + [1, 1, 0, 2], + [0, 0, 0, 2], + [3, 3, 0, 0]]) + >>> ndimage.minimum(a, labels=labels, index=np.arange(1, labels_nb + 1)) + [1.0, 4.0, 3.0] + >>> ndimage.minimum(a) + 0.0 + >>> ndimage.minimum(a, labels=labels) + 1.0 + + """ + return _select(input, labels, index, find_min=True)[0] + + +def maximum(input, labels=None, index=None): + """ + Calculate the maximum of the values of an array over labeled regions. + + Parameters + ---------- + input : array_like + Array_like of values. For each region specified by `labels`, the + maximal values of `input` over the region is computed. + labels : array_like, optional + An array of integers marking different regions over which the + maximum value of `input` is to be computed. `labels` must have the + same shape as `input`. If `labels` is not specified, the maximum + over the whole array is returned. + index : array_like, optional + A list of region labels that are taken into account for computing the + maxima. If index is None, the maximum over all elements where `labels` + is non-zero is returned. + + Returns + ------- + output : float or list of floats + List of maxima of `input` over the regions determined by `labels` and + whose index is in `index`. If `index` or `labels` are not specified, a + float is returned: the maximal value of `input` if `labels` is None, + and the maximal value of elements where `labels` is greater than zero + if `index` is None. + + See also + -------- + label, minimum, median, maximum_position, extrema, sum, mean, variance, + standard_deviation + + Notes + ----- + The function returns a Python list and not a Numpy array, use + `np.array` to convert the list to an array. + + Examples + -------- + >>> a = np.arange(16).reshape((4,4)) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + >>> labels = np.zeros_like(a) + >>> labels[:2,:2] = 1 + >>> labels[2:, 1:3] = 2 + >>> labels + array([[1, 1, 0, 0], + [1, 1, 0, 0], + [0, 2, 2, 0], + [0, 2, 2, 0]]) + >>> from scipy import ndimage + >>> ndimage.maximum(a) + 15.0 + >>> ndimage.maximum(a, labels=labels, index=[1,2]) + [5.0, 14.0] + >>> ndimage.maximum(a, labels=labels) + 14.0 + + >>> b = np.array([[1, 2, 0, 0], + ... [5, 3, 0, 4], + ... [0, 0, 0, 7], + ... [9, 3, 0, 0]]) + >>> labels, labels_nb = ndimage.label(b) + >>> labels + array([[1, 1, 0, 0], + [1, 1, 0, 2], + [0, 0, 0, 2], + [3, 3, 0, 0]]) + >>> ndimage.maximum(b, labels=labels, index=np.arange(1, labels_nb + 1)) + [5.0, 7.0, 9.0] + + """ + return _select(input, labels, index, find_max=True)[0] + + +def median(input, labels=None, index=None): + """ + Calculate the median of the values of an array over labeled regions. + + Parameters + ---------- + input : array_like + Array_like of values. For each region specified by `labels`, the + median value of `input` over the region is computed. + labels : array_like, optional + An array_like of integers marking different regions over which the + median value of `input` is to be computed. `labels` must have the + same shape as `input`. If `labels` is not specified, the median + over the whole array is returned. + index : array_like, optional + A list of region labels that are taken into account for computing the + medians. If index is None, the median over all elements where `labels` + is non-zero is returned. + + Returns + ------- + median : float or list of floats + List of medians of `input` over the regions determined by `labels` and + whose index is in `index`. If `index` or `labels` are not specified, a + float is returned: the median value of `input` if `labels` is None, + and the median value of elements where `labels` is greater than zero + if `index` is None. + + See also + -------- + label, minimum, maximum, extrema, sum, mean, variance, standard_deviation + + Notes + ----- + The function returns a Python list and not a Numpy array, use + `np.array` to convert the list to an array. + + Examples + -------- + >>> from scipy import ndimage + >>> a = np.array([[1, 2, 0, 1], + ... [5, 3, 0, 4], + ... [0, 0, 0, 7], + ... [9, 3, 0, 0]]) + >>> labels, labels_nb = ndimage.label(a) + >>> labels + array([[1, 1, 0, 2], + [1, 1, 0, 2], + [0, 0, 0, 2], + [3, 3, 0, 0]]) + >>> ndimage.median(a, labels=labels, index=np.arange(1, labels_nb + 1)) + [2.5, 4.0, 6.0] + >>> ndimage.median(a) + 1.0 + >>> ndimage.median(a, labels=labels) + 3.0 + + """ + return _select(input, labels, index, find_median=True)[0] + + +def minimum_position(input, labels=None, index=None): + """ + Find the positions of the minimums of the values of an array at labels. + + Parameters + ---------- + input : array_like + Array_like of values. + labels : array_like, optional + An array of integers marking different regions over which the + position of the minimum value of `input` is to be computed. + `labels` must have the same shape as `input`. If `labels` is not + specified, the location of the first minimum over the whole + array is returned. + + The `labels` argument only works when `index` is specified. + index : array_like, optional + A list of region labels that are taken into account for finding the + location of the minima. If `index` is None, the ``first`` minimum + over all elements where `labels` is non-zero is returned. + + The `index` argument only works when `labels` is specified. + + Returns + ------- + output : list of tuples of ints + Tuple of ints or list of tuples of ints that specify the location + of minima of `input` over the regions determined by `labels` and + whose index is in `index`. + + If `index` or `labels` are not specified, a tuple of ints is + returned specifying the location of the first minimal value of `input`. + + See also + -------- + label, minimum, median, maximum_position, extrema, sum, mean, variance, + standard_deviation + + Examples + -------- + >>> a = np.array([[10, 20, 30], + ... [40, 80, 100], + ... [1, 100, 200]]) + >>> b = np.array([[1, 2, 0, 1], + ... [5, 3, 0, 4], + ... [0, 0, 0, 7], + ... [9, 3, 0, 0]]) + + >>> from scipy import ndimage + + >>> ndimage.minimum_position(a) + (2, 0) + >>> ndimage.minimum_position(b) + (0, 2) + + Features to process can be specified using `labels` and `index`: + + >>> label, pos = ndimage.label(a) + >>> ndimage.minimum_position(a, label, index=np.arange(1, pos+1)) + [(2, 0)] + + >>> label, pos = ndimage.label(b) + >>> ndimage.minimum_position(b, label, index=np.arange(1, pos+1)) + [(0, 0), (0, 3), (3, 1)] + + """ + dims = numpy.array(numpy.asarray(input).shape) + # see numpy.unravel_index to understand this line. + dim_prod = numpy.cumprod([1] + list(dims[:0:-1]))[::-1] + + result = _select(input, labels, index, find_min_positions=True)[0] + + if numpy.isscalar(result): + return tuple((result // dim_prod) % dims) + + return [tuple(v) for v in (result.reshape(-1, 1) // dim_prod) % dims] + + +def maximum_position(input, labels=None, index=None): + """ + Find the positions of the maximums of the values of an array at labels. + + For each region specified by `labels`, the position of the maximum + value of `input` within the region is returned. + + Parameters + ---------- + input : array_like + Array_like of values. + labels : array_like, optional + An array of integers marking different regions over which the + position of the maximum value of `input` is to be computed. + `labels` must have the same shape as `input`. If `labels` is not + specified, the location of the first maximum over the whole + array is returned. + + The `labels` argument only works when `index` is specified. + index : array_like, optional + A list of region labels that are taken into account for finding the + location of the maxima. If `index` is None, the first maximum + over all elements where `labels` is non-zero is returned. + + The `index` argument only works when `labels` is specified. + + Returns + ------- + output : list of tuples of ints + List of tuples of ints that specify the location of maxima of + `input` over the regions determined by `labels` and whose index + is in `index`. + + If `index` or `labels` are not specified, a tuple of ints is + returned specifying the location of the ``first`` maximal value + of `input`. + + See also + -------- + label, minimum, median, maximum_position, extrema, sum, mean, variance, + standard_deviation + + """ + dims = numpy.array(numpy.asarray(input).shape) + # see numpy.unravel_index to understand this line. + dim_prod = numpy.cumprod([1] + list(dims[:0:-1]))[::-1] + + result = _select(input, labels, index, find_max_positions=True)[0] + + if numpy.isscalar(result): + return tuple((result // dim_prod) % dims) + + return [tuple(v) for v in (result.reshape(-1, 1) // dim_prod) % dims] + + +def extrema(input, labels=None, index=None): + """ + Calculate the minimums and maximums of the values of an array + at labels, along with their positions. + + Parameters + ---------- + input : ndarray + Nd-image data to process. + labels : ndarray, optional + Labels of features in input. + If not None, must be same shape as `input`. + index : int or sequence of ints, optional + Labels to include in output. If None (default), all values where + non-zero `labels` are used. + + Returns + ------- + minimums, maximums : int or ndarray + Values of minimums and maximums in each feature. + min_positions, max_positions : tuple or list of tuples + Each tuple gives the n-D coordinates of the corresponding minimum + or maximum. + + See Also + -------- + maximum, minimum, maximum_position, minimum_position, center_of_mass + + Examples + -------- + >>> a = np.array([[1, 2, 0, 0], + ... [5, 3, 0, 4], + ... [0, 0, 0, 7], + ... [9, 3, 0, 0]]) + >>> from scipy import ndimage + >>> ndimage.extrema(a) + (0, 9, (0, 2), (3, 0)) + + Features to process can be specified using `labels` and `index`: + + >>> lbl, nlbl = ndimage.label(a) + >>> ndimage.extrema(a, lbl, index=np.arange(1, nlbl+1)) + (array([1, 4, 3]), + array([5, 7, 9]), + [(0, 0), (1, 3), (3, 1)], + [(1, 0), (2, 3), (3, 0)]) + + If no index is given, non-zero `labels` are processed: + + >>> ndimage.extrema(a, lbl) + (1, 9, (0, 0), (3, 0)) + + """ + dims = numpy.array(numpy.asarray(input).shape) + # see numpy.unravel_index to understand this line. + dim_prod = numpy.cumprod([1] + list(dims[:0:-1]))[::-1] + + minimums, min_positions, maximums, max_positions = _select(input, labels, + index, + find_min=True, + find_max=True, + find_min_positions=True, + find_max_positions=True) + + if numpy.isscalar(minimums): + return (minimums, maximums, tuple((min_positions // dim_prod) % dims), + tuple((max_positions // dim_prod) % dims)) + + min_positions = [tuple(v) for v in (min_positions.reshape(-1, 1) // dim_prod) % dims] + max_positions = [tuple(v) for v in (max_positions.reshape(-1, 1) // dim_prod) % dims] + + return minimums, maximums, min_positions, max_positions + + +def center_of_mass(input, labels=None, index=None): + """ + Calculate the center of mass of the values of an array at labels. + + Parameters + ---------- + input : ndarray + Data from which to calculate center-of-mass. The masses can either + be positive or negative. + labels : ndarray, optional + Labels for objects in `input`, as generated by `ndimage.label`. + Only used with `index`. Dimensions must be the same as `input`. + index : int or sequence of ints, optional + Labels for which to calculate centers-of-mass. If not specified, + all labels greater than zero are used. Only used with `labels`. + + Returns + ------- + center_of_mass : tuple, or list of tuples + Coordinates of centers-of-mass. + + Examples + -------- + >>> a = np.array(([0,0,0,0], + ... [0,1,1,0], + ... [0,1,1,0], + ... [0,1,1,0])) + >>> from scipy import ndimage + >>> ndimage.measurements.center_of_mass(a) + (2.0, 1.5) + + Calculation of multiple objects in an image + + >>> b = np.array(([0,1,1,0], + ... [0,1,0,0], + ... [0,0,0,0], + ... [0,0,1,1], + ... [0,0,1,1])) + >>> lbl = ndimage.label(b)[0] + >>> ndimage.measurements.center_of_mass(b, lbl, [1,2]) + [(0.33333333333333331, 1.3333333333333333), (3.5, 2.5)] + + Negative masses are also accepted, which can occur for example when + bias is removed from measured data due to random noise. + + >>> c = np.array(([-1,0,0,0], + ... [0,-1,-1,0], + ... [0,1,-1,0], + ... [0,1,1,0])) + >>> ndimage.measurements.center_of_mass(c) + (-4.0, 1.0) + + If there are division by zero issues, the function does not raise an + error but rather issues a RuntimeWarning before returning inf and/or NaN. + + >>> d = np.array([-1, 1]) + >>> ndimage.measurements.center_of_mass(d) + (inf,) + """ + normalizer = sum(input, labels, index) + grids = numpy.ogrid[[slice(0, i) for i in input.shape]] + + results = [sum(input * grids[dir].astype(float), labels, index) / normalizer + for dir in range(input.ndim)] + + if numpy.isscalar(results[0]): + return tuple(results) + + return [tuple(v) for v in numpy.array(results).T] + + +def histogram(input, min, max, bins, labels=None, index=None): + """ + Calculate the histogram of the values of an array, optionally at labels. + + Histogram calculates the frequency of values in an array within bins + determined by `min`, `max`, and `bins`. The `labels` and `index` + keywords can limit the scope of the histogram to specified sub-regions + within the array. + + Parameters + ---------- + input : array_like + Data for which to calculate histogram. + min, max : int + Minimum and maximum values of range of histogram bins. + bins : int + Number of bins. + labels : array_like, optional + Labels for objects in `input`. + If not None, must be same shape as `input`. + index : int or sequence of ints, optional + Label or labels for which to calculate histogram. If None, all values + where label is greater than zero are used + + Returns + ------- + hist : ndarray + Histogram counts. + + Examples + -------- + >>> a = np.array([[ 0. , 0.2146, 0.5962, 0. ], + ... [ 0. , 0.7778, 0. , 0. ], + ... [ 0. , 0. , 0. , 0. ], + ... [ 0. , 0. , 0.7181, 0.2787], + ... [ 0. , 0. , 0.6573, 0.3094]]) + >>> from scipy import ndimage + >>> ndimage.measurements.histogram(a, 0, 1, 10) + array([13, 0, 2, 1, 0, 1, 1, 2, 0, 0]) + + With labels and no indices, non-zero elements are counted: + + >>> lbl, nlbl = ndimage.label(a) + >>> ndimage.measurements.histogram(a, 0, 1, 10, lbl) + array([0, 0, 2, 1, 0, 1, 1, 2, 0, 0]) + + Indices can be used to count only certain objects: + + >>> ndimage.measurements.histogram(a, 0, 1, 10, lbl, 2) + array([0, 0, 1, 1, 0, 0, 1, 1, 0, 0]) + + """ + _bins = numpy.linspace(min, max, bins + 1) + + def _hist(vals): + return numpy.histogram(vals, _bins)[0] + + return labeled_comprehension(input, labels, index, _hist, object, None, + pass_positions=False) + + +def watershed_ift(input, markers, structure=None, output=None): + """ + Apply watershed from markers using image foresting transform algorithm. + + Parameters + ---------- + input : array_like + Input. + markers : array_like + Markers are points within each watershed that form the beginning + of the process. Negative markers are considered background markers + which are processed after the other markers. + structure : structure element, optional + A structuring element defining the connectivity of the object can be + provided. If None, an element is generated with a squared + connectivity equal to one. + output : ndarray, optional + An output array can optionally be provided. The same shape as input. + + Returns + ------- + watershed_ift : ndarray + Output. Same shape as `input`. + + References + ---------- + .. [1] A.X. Falcao, J. Stolfi and R. de Alencar Lotufo, "The image + foresting transform: theory, algorithms, and applications", + Pattern Analysis and Machine Intelligence, vol. 26, pp. 19-29, 2004. + + """ + input = numpy.asarray(input) + if input.dtype.type not in [numpy.uint8, numpy.uint16]: + raise TypeError('only 8 and 16 unsigned inputs are supported') + + if structure is None: + structure = morphology.generate_binary_structure(input.ndim, 1) + structure = numpy.asarray(structure, dtype=bool) + if structure.ndim != input.ndim: + raise RuntimeError('structure and input must have equal rank') + for ii in structure.shape: + if ii != 3: + raise RuntimeError('structure dimensions must be equal to 3') + + if not structure.flags.contiguous: + structure = structure.copy() + markers = numpy.asarray(markers) + if input.shape != markers.shape: + raise RuntimeError('input and markers must have equal shape') + + integral_types = [numpy.int0, + numpy.int8, + numpy.int16, + numpy.int32, + numpy.int_, + numpy.int64, + numpy.intc, + numpy.intp] + + if markers.dtype.type not in integral_types: + raise RuntimeError('marker should be of integer type') + + if isinstance(output, numpy.ndarray): + if output.dtype.type not in integral_types: + raise RuntimeError('output should be of integer type') + else: + output = markers.dtype + + output = _ni_support._get_output(output, input) + _nd_image.watershed_ift(input, markers, structure, output) + return output diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/measurements.pyc b/project/venv/lib/python2.7/site-packages/scipy/ndimage/measurements.pyc new file mode 100644 index 0000000..768394a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/ndimage/measurements.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/morphology.py b/project/venv/lib/python2.7/site-packages/scipy/ndimage/morphology.py new file mode 100644 index 0000000..8d29a5b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/ndimage/morphology.py @@ -0,0 +1,2223 @@ +# Copyright (C) 2003-2005 Peter J. Verveer +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# +# 3. The name of the author may not be used to endorse or promote +# products derived from this software without specific prior +# written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS +# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from __future__ import division, print_function, absolute_import +import warnings + +import numpy +from . import _ni_support +from . import _nd_image +from . import filters + +__all__ = ['iterate_structure', 'generate_binary_structure', 'binary_erosion', + 'binary_dilation', 'binary_opening', 'binary_closing', + 'binary_hit_or_miss', 'binary_propagation', 'binary_fill_holes', + 'grey_erosion', 'grey_dilation', 'grey_opening', 'grey_closing', + 'morphological_gradient', 'morphological_laplace', 'white_tophat', + 'black_tophat', 'distance_transform_bf', 'distance_transform_cdt', + 'distance_transform_edt'] + + +def _center_is_true(structure, origin): + structure = numpy.array(structure) + coor = tuple([oo + ss // 2 for ss, oo in zip(structure.shape, + origin)]) + return bool(structure[coor]) + + +def iterate_structure(structure, iterations, origin=None): + """ + Iterate a structure by dilating it with itself. + + Parameters + ---------- + structure : array_like + Structuring element (an array of bools, for example), to be dilated with + itself. + iterations : int + number of dilations performed on the structure with itself + origin : optional + If origin is None, only the iterated structure is returned. If + not, a tuple of the iterated structure and the modified origin is + returned. + + Returns + ------- + iterate_structure : ndarray of bools + A new structuring element obtained by dilating `structure` + (`iterations` - 1) times with itself. + + See also + -------- + generate_binary_structure + + Examples + -------- + >>> from scipy import ndimage + >>> struct = ndimage.generate_binary_structure(2, 1) + >>> struct.astype(int) + array([[0, 1, 0], + [1, 1, 1], + [0, 1, 0]]) + >>> ndimage.iterate_structure(struct, 2).astype(int) + array([[0, 0, 1, 0, 0], + [0, 1, 1, 1, 0], + [1, 1, 1, 1, 1], + [0, 1, 1, 1, 0], + [0, 0, 1, 0, 0]]) + >>> ndimage.iterate_structure(struct, 3).astype(int) + array([[0, 0, 0, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [1, 1, 1, 1, 1, 1, 1], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 0, 0, 0]]) + + """ + structure = numpy.asarray(structure) + if iterations < 2: + return structure.copy() + ni = iterations - 1 + shape = [ii + ni * (ii - 1) for ii in structure.shape] + pos = [ni * (structure.shape[ii] // 2) for ii in range(len(shape))] + slc = tuple(slice(pos[ii], pos[ii] + structure.shape[ii], None) + for ii in range(len(shape))) + out = numpy.zeros(shape, bool) + out[slc] = structure != 0 + out = binary_dilation(out, structure, iterations=ni) + if origin is None: + return out + else: + origin = _ni_support._normalize_sequence(origin, structure.ndim) + origin = [iterations * o for o in origin] + return out, origin + + +def generate_binary_structure(rank, connectivity): + """ + Generate a binary structure for binary morphological operations. + + Parameters + ---------- + rank : int + Number of dimensions of the array to which the structuring element + will be applied, as returned by `np.ndim`. + connectivity : int + `connectivity` determines which elements of the output array belong + to the structure, i.e. are considered as neighbors of the central + element. Elements up to a squared distance of `connectivity` from + the center are considered neighbors. `connectivity` may range from 1 + (no diagonal elements are neighbors) to `rank` (all elements are + neighbors). + + Returns + ------- + output : ndarray of bools + Structuring element which may be used for binary morphological + operations, with `rank` dimensions and all dimensions equal to 3. + + See also + -------- + iterate_structure, binary_dilation, binary_erosion + + Notes + ----- + `generate_binary_structure` can only create structuring elements with + dimensions equal to 3, i.e. minimal dimensions. For larger structuring + elements, that are useful e.g. for eroding large objects, one may either + use `iterate_structure`, or create directly custom arrays with + numpy functions such as `numpy.ones`. + + Examples + -------- + >>> from scipy import ndimage + >>> struct = ndimage.generate_binary_structure(2, 1) + >>> struct + array([[False, True, False], + [ True, True, True], + [False, True, False]], dtype=bool) + >>> a = np.zeros((5,5)) + >>> a[2, 2] = 1 + >>> a + array([[ 0., 0., 0., 0., 0.], + [ 0., 0., 0., 0., 0.], + [ 0., 0., 1., 0., 0.], + [ 0., 0., 0., 0., 0.], + [ 0., 0., 0., 0., 0.]]) + >>> b = ndimage.binary_dilation(a, structure=struct).astype(a.dtype) + >>> b + array([[ 0., 0., 0., 0., 0.], + [ 0., 0., 1., 0., 0.], + [ 0., 1., 1., 1., 0.], + [ 0., 0., 1., 0., 0.], + [ 0., 0., 0., 0., 0.]]) + >>> ndimage.binary_dilation(b, structure=struct).astype(a.dtype) + array([[ 0., 0., 1., 0., 0.], + [ 0., 1., 1., 1., 0.], + [ 1., 1., 1., 1., 1.], + [ 0., 1., 1., 1., 0.], + [ 0., 0., 1., 0., 0.]]) + >>> struct = ndimage.generate_binary_structure(2, 2) + >>> struct + array([[ True, True, True], + [ True, True, True], + [ True, True, True]], dtype=bool) + >>> struct = ndimage.generate_binary_structure(3, 1) + >>> struct # no diagonal elements + array([[[False, False, False], + [False, True, False], + [False, False, False]], + [[False, True, False], + [ True, True, True], + [False, True, False]], + [[False, False, False], + [False, True, False], + [False, False, False]]], dtype=bool) + + """ + if connectivity < 1: + connectivity = 1 + if rank < 1: + return numpy.array(True, dtype=bool) + output = numpy.fabs(numpy.indices([3] * rank) - 1) + output = numpy.add.reduce(output, 0) + return output <= connectivity + + +def _binary_erosion(input, structure, iterations, mask, output, + border_value, origin, invert, brute_force): + input = numpy.asarray(input) + if numpy.iscomplexobj(input): + raise TypeError('Complex type not supported') + if structure is None: + structure = generate_binary_structure(input.ndim, 1) + else: + structure = numpy.asarray(structure, dtype=bool) + if structure.ndim != input.ndim: + raise RuntimeError('structure and input must have same dimensionality') + if not structure.flags.contiguous: + structure = structure.copy() + if numpy.product(structure.shape, axis=0) < 1: + raise RuntimeError('structure must not be empty') + if mask is not None: + mask = numpy.asarray(mask) + if mask.shape != input.shape: + raise RuntimeError('mask and input must have equal sizes') + origin = _ni_support._normalize_sequence(origin, input.ndim) + cit = _center_is_true(structure, origin) + if isinstance(output, numpy.ndarray): + if numpy.iscomplexobj(output): + raise TypeError('Complex output type not supported') + else: + output = bool + output = _ni_support._get_output(output, input) + + if iterations == 1: + _nd_image.binary_erosion(input, structure, mask, output, + border_value, origin, invert, cit, 0) + return output + elif cit and not brute_force: + changed, coordinate_list = _nd_image.binary_erosion( + input, structure, mask, output, + border_value, origin, invert, cit, 1) + structure = structure[tuple([slice(None, None, -1)] * + structure.ndim)] + for ii in range(len(origin)): + origin[ii] = -origin[ii] + if not structure.shape[ii] & 1: + origin[ii] -= 1 + if mask is not None: + mask = numpy.asarray(mask, dtype=numpy.int8) + if not structure.flags.contiguous: + structure = structure.copy() + _nd_image.binary_erosion2(output, structure, mask, iterations - 1, + origin, invert, coordinate_list) + return output + else: + tmp_in = numpy.empty_like(input, dtype=bool) + tmp_out = output + if iterations >= 1 and not iterations & 1: + tmp_in, tmp_out = tmp_out, tmp_in + changed = _nd_image.binary_erosion( + input, structure, mask, tmp_out, + border_value, origin, invert, cit, 0) + ii = 1 + while ii < iterations or (iterations < 1 and changed): + tmp_in, tmp_out = tmp_out, tmp_in + changed = _nd_image.binary_erosion( + tmp_in, structure, mask, tmp_out, + border_value, origin, invert, cit, 0) + ii += 1 + return output + + +def binary_erosion(input, structure=None, iterations=1, mask=None, output=None, + border_value=0, origin=0, brute_force=False): + """ + Multi-dimensional binary erosion with a given structuring element. + + Binary erosion is a mathematical morphology operation used for image + processing. + + Parameters + ---------- + input : array_like + Binary image to be eroded. Non-zero (True) elements form + the subset to be eroded. + structure : array_like, optional + Structuring element used for the erosion. Non-zero elements are + considered True. If no structuring element is provided, an element + is generated with a square connectivity equal to one. + iterations : {int, float}, optional + The erosion is repeated `iterations` times (one, by default). + If iterations is less than 1, the erosion is repeated until the + result does not change anymore. + mask : array_like, optional + If a mask is given, only those elements with a True value at + the corresponding mask element are modified at each iteration. + output : ndarray, optional + Array of the same shape as input, into which the output is placed. + By default, a new array is created. + border_value : int (cast to 0 or 1), optional + Value at the border in the output array. + origin : int or tuple of ints, optional + Placement of the filter, by default 0. + brute_force : boolean, optional + Memory condition: if False, only the pixels whose value was changed in + the last iteration are tracked as candidates to be updated (eroded) in + the current iteration; if True all pixels are considered as candidates + for erosion, regardless of what happened in the previous iteration. + False by default. + + Returns + ------- + binary_erosion : ndarray of bools + Erosion of the input by the structuring element. + + See also + -------- + grey_erosion, binary_dilation, binary_closing, binary_opening, + generate_binary_structure + + Notes + ----- + Erosion [1]_ is a mathematical morphology operation [2]_ that uses a + structuring element for shrinking the shapes in an image. The binary + erosion of an image by a structuring element is the locus of the points + where a superimposition of the structuring element centered on the point + is entirely contained in the set of non-zero elements of the image. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Erosion_%28morphology%29 + .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology + + Examples + -------- + >>> from scipy import ndimage + >>> a = np.zeros((7,7), dtype=int) + >>> a[1:6, 2:5] = 1 + >>> a + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> ndimage.binary_erosion(a).astype(a.dtype) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> #Erosion removes objects smaller than the structure + >>> ndimage.binary_erosion(a, structure=np.ones((5,5))).astype(a.dtype) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + + """ + return _binary_erosion(input, structure, iterations, mask, + output, border_value, origin, 0, brute_force) + + +def binary_dilation(input, structure=None, iterations=1, mask=None, + output=None, border_value=0, origin=0, + brute_force=False): + """ + Multi-dimensional binary dilation with the given structuring element. + + Parameters + ---------- + input : array_like + Binary array_like to be dilated. Non-zero (True) elements form + the subset to be dilated. + structure : array_like, optional + Structuring element used for the dilation. Non-zero elements are + considered True. If no structuring element is provided an element + is generated with a square connectivity equal to one. + iterations : {int, float}, optional + The dilation is repeated `iterations` times (one, by default). + If iterations is less than 1, the dilation is repeated until the + result does not change anymore. + mask : array_like, optional + If a mask is given, only those elements with a True value at + the corresponding mask element are modified at each iteration. + output : ndarray, optional + Array of the same shape as input, into which the output is placed. + By default, a new array is created. + border_value : int (cast to 0 or 1), optional + Value at the border in the output array. + origin : int or tuple of ints, optional + Placement of the filter, by default 0. + brute_force : boolean, optional + Memory condition: if False, only the pixels whose value was changed in + the last iteration are tracked as candidates to be updated (dilated) + in the current iteration; if True all pixels are considered as + candidates for dilation, regardless of what happened in the previous + iteration. False by default. + + Returns + ------- + binary_dilation : ndarray of bools + Dilation of the input by the structuring element. + + See also + -------- + grey_dilation, binary_erosion, binary_closing, binary_opening, + generate_binary_structure + + Notes + ----- + Dilation [1]_ is a mathematical morphology operation [2]_ that uses a + structuring element for expanding the shapes in an image. The binary + dilation of an image by a structuring element is the locus of the points + covered by the structuring element, when its center lies within the + non-zero points of the image. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Dilation_%28morphology%29 + .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology + + Examples + -------- + >>> from scipy import ndimage + >>> a = np.zeros((5, 5)) + >>> a[2, 2] = 1 + >>> a + array([[ 0., 0., 0., 0., 0.], + [ 0., 0., 0., 0., 0.], + [ 0., 0., 1., 0., 0.], + [ 0., 0., 0., 0., 0.], + [ 0., 0., 0., 0., 0.]]) + >>> ndimage.binary_dilation(a) + array([[False, False, False, False, False], + [False, False, True, False, False], + [False, True, True, True, False], + [False, False, True, False, False], + [False, False, False, False, False]], dtype=bool) + >>> ndimage.binary_dilation(a).astype(a.dtype) + array([[ 0., 0., 0., 0., 0.], + [ 0., 0., 1., 0., 0.], + [ 0., 1., 1., 1., 0.], + [ 0., 0., 1., 0., 0.], + [ 0., 0., 0., 0., 0.]]) + >>> # 3x3 structuring element with connectivity 1, used by default + >>> struct1 = ndimage.generate_binary_structure(2, 1) + >>> struct1 + array([[False, True, False], + [ True, True, True], + [False, True, False]], dtype=bool) + >>> # 3x3 structuring element with connectivity 2 + >>> struct2 = ndimage.generate_binary_structure(2, 2) + >>> struct2 + array([[ True, True, True], + [ True, True, True], + [ True, True, True]], dtype=bool) + >>> ndimage.binary_dilation(a, structure=struct1).astype(a.dtype) + array([[ 0., 0., 0., 0., 0.], + [ 0., 0., 1., 0., 0.], + [ 0., 1., 1., 1., 0.], + [ 0., 0., 1., 0., 0.], + [ 0., 0., 0., 0., 0.]]) + >>> ndimage.binary_dilation(a, structure=struct2).astype(a.dtype) + array([[ 0., 0., 0., 0., 0.], + [ 0., 1., 1., 1., 0.], + [ 0., 1., 1., 1., 0.], + [ 0., 1., 1., 1., 0.], + [ 0., 0., 0., 0., 0.]]) + >>> ndimage.binary_dilation(a, structure=struct1,\\ + ... iterations=2).astype(a.dtype) + array([[ 0., 0., 1., 0., 0.], + [ 0., 1., 1., 1., 0.], + [ 1., 1., 1., 1., 1.], + [ 0., 1., 1., 1., 0.], + [ 0., 0., 1., 0., 0.]]) + + """ + input = numpy.asarray(input) + if structure is None: + structure = generate_binary_structure(input.ndim, 1) + origin = _ni_support._normalize_sequence(origin, input.ndim) + structure = numpy.asarray(structure) + structure = structure[tuple([slice(None, None, -1)] * + structure.ndim)] + for ii in range(len(origin)): + origin[ii] = -origin[ii] + if not structure.shape[ii] & 1: + origin[ii] -= 1 + + return _binary_erosion(input, structure, iterations, mask, + output, border_value, origin, 1, brute_force) + + +def binary_opening(input, structure=None, iterations=1, output=None, + origin=0, mask=None, border_value=0, brute_force=False): + """ + Multi-dimensional binary opening with the given structuring element. + + The *opening* of an input image by a structuring element is the + *dilation* of the *erosion* of the image by the structuring element. + + Parameters + ---------- + input : array_like + Binary array_like to be opened. Non-zero (True) elements form + the subset to be opened. + structure : array_like, optional + Structuring element used for the opening. Non-zero elements are + considered True. If no structuring element is provided an element + is generated with a square connectivity equal to one (i.e., only + nearest neighbors are connected to the center, diagonally-connected + elements are not considered neighbors). + iterations : {int, float}, optional + The erosion step of the opening, then the dilation step are each + repeated `iterations` times (one, by default). If `iterations` is + less than 1, each operation is repeated until the result does + not change anymore. + output : ndarray, optional + Array of the same shape as input, into which the output is placed. + By default, a new array is created. + origin : int or tuple of ints, optional + Placement of the filter, by default 0. + mask : array_like, optional + If a mask is given, only those elements with a True value at + the corresponding mask element are modified at each iteration. + + .. versionadded:: 1.1.0 + border_value : int (cast to 0 or 1), optional + Value at the border in the output array. + + .. versionadded:: 1.1.0 + brute_force : boolean, optional + Memory condition: if False, only the pixels whose value was changed in + the last iteration are tracked as candidates to be updated in the + current iteration; if true all pixels are considered as candidates for + update, regardless of what happened in the previous iteration. + False by default. + + .. versionadded:: 1.1.0 + + Returns + ------- + binary_opening : ndarray of bools + Opening of the input by the structuring element. + + See also + -------- + grey_opening, binary_closing, binary_erosion, binary_dilation, + generate_binary_structure + + Notes + ----- + *Opening* [1]_ is a mathematical morphology operation [2]_ that + consists in the succession of an erosion and a dilation of the + input with the same structuring element. Opening therefore removes + objects smaller than the structuring element. + + Together with *closing* (`binary_closing`), opening can be used for + noise removal. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Opening_%28morphology%29 + .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology + + Examples + -------- + >>> from scipy import ndimage + >>> a = np.zeros((5,5), dtype=int) + >>> a[1:4, 1:4] = 1; a[4, 4] = 1 + >>> a + array([[0, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 1]]) + >>> # Opening removes small objects + >>> ndimage.binary_opening(a, structure=np.ones((3,3))).astype(int) + array([[0, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 0]]) + >>> # Opening can also smooth corners + >>> ndimage.binary_opening(a).astype(int) + array([[0, 0, 0, 0, 0], + [0, 0, 1, 0, 0], + [0, 1, 1, 1, 0], + [0, 0, 1, 0, 0], + [0, 0, 0, 0, 0]]) + >>> # Opening is the dilation of the erosion of the input + >>> ndimage.binary_erosion(a).astype(int) + array([[0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 1, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0]]) + >>> ndimage.binary_dilation(ndimage.binary_erosion(a)).astype(int) + array([[0, 0, 0, 0, 0], + [0, 0, 1, 0, 0], + [0, 1, 1, 1, 0], + [0, 0, 1, 0, 0], + [0, 0, 0, 0, 0]]) + + """ + input = numpy.asarray(input) + if structure is None: + rank = input.ndim + structure = generate_binary_structure(rank, 1) + + tmp = binary_erosion(input, structure, iterations, mask, None, + border_value, origin, brute_force) + return binary_dilation(tmp, structure, iterations, mask, output, + border_value, origin, brute_force) + + +def binary_closing(input, structure=None, iterations=1, output=None, + origin=0, mask=None, border_value=0, brute_force=False): + """ + Multi-dimensional binary closing with the given structuring element. + + The *closing* of an input image by a structuring element is the + *erosion* of the *dilation* of the image by the structuring element. + + Parameters + ---------- + input : array_like + Binary array_like to be closed. Non-zero (True) elements form + the subset to be closed. + structure : array_like, optional + Structuring element used for the closing. Non-zero elements are + considered True. If no structuring element is provided an element + is generated with a square connectivity equal to one (i.e., only + nearest neighbors are connected to the center, diagonally-connected + elements are not considered neighbors). + iterations : {int, float}, optional + The dilation step of the closing, then the erosion step are each + repeated `iterations` times (one, by default). If iterations is + less than 1, each operations is repeated until the result does + not change anymore. + output : ndarray, optional + Array of the same shape as input, into which the output is placed. + By default, a new array is created. + origin : int or tuple of ints, optional + Placement of the filter, by default 0. + mask : array_like, optional + If a mask is given, only those elements with a True value at + the corresponding mask element are modified at each iteration. + + .. versionadded:: 1.1.0 + border_value : int (cast to 0 or 1), optional + Value at the border in the output array. + + .. versionadded:: 1.1.0 + brute_force : boolean, optional + Memory condition: if False, only the pixels whose value was changed in + the last iteration are tracked as candidates to be updated in the + current iteration; if true al pixels are considered as candidates for + update, regardless of what happened in the previous iteration. + False by default. + + .. versionadded:: 1.1.0 + + Returns + ------- + binary_closing : ndarray of bools + Closing of the input by the structuring element. + + See also + -------- + grey_closing, binary_opening, binary_dilation, binary_erosion, + generate_binary_structure + + Notes + ----- + *Closing* [1]_ is a mathematical morphology operation [2]_ that + consists in the succession of a dilation and an erosion of the + input with the same structuring element. Closing therefore fills + holes smaller than the structuring element. + + Together with *opening* (`binary_opening`), closing can be used for + noise removal. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Closing_%28morphology%29 + .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology + + Examples + -------- + >>> from scipy import ndimage + >>> a = np.zeros((5,5), dtype=int) + >>> a[1:-1, 1:-1] = 1; a[2,2] = 0 + >>> a + array([[0, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 1, 0, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 0]]) + >>> # Closing removes small holes + >>> ndimage.binary_closing(a).astype(int) + array([[0, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 0]]) + >>> # Closing is the erosion of the dilation of the input + >>> ndimage.binary_dilation(a).astype(int) + array([[0, 1, 1, 1, 0], + [1, 1, 1, 1, 1], + [1, 1, 1, 1, 1], + [1, 1, 1, 1, 1], + [0, 1, 1, 1, 0]]) + >>> ndimage.binary_erosion(ndimage.binary_dilation(a)).astype(int) + array([[0, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 0]]) + + + >>> a = np.zeros((7,7), dtype=int) + >>> a[1:6, 2:5] = 1; a[1:3,3] = 0 + >>> a + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 1, 0, 0], + [0, 0, 1, 0, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> # In addition to removing holes, closing can also + >>> # coarsen boundaries with fine hollows. + >>> ndimage.binary_closing(a).astype(int) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> ndimage.binary_closing(a, structure=np.ones((2,2))).astype(int) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + + """ + input = numpy.asarray(input) + if structure is None: + rank = input.ndim + structure = generate_binary_structure(rank, 1) + + tmp = binary_dilation(input, structure, iterations, mask, None, + border_value, origin, brute_force) + return binary_erosion(tmp, structure, iterations, mask, output, + border_value, origin, brute_force) + + +def binary_hit_or_miss(input, structure1=None, structure2=None, + output=None, origin1=0, origin2=None): + """ + Multi-dimensional binary hit-or-miss transform. + + The hit-or-miss transform finds the locations of a given pattern + inside the input image. + + Parameters + ---------- + input : array_like (cast to booleans) + Binary image where a pattern is to be detected. + structure1 : array_like (cast to booleans), optional + Part of the structuring element to be fitted to the foreground + (non-zero elements) of `input`. If no value is provided, a + structure of square connectivity 1 is chosen. + structure2 : array_like (cast to booleans), optional + Second part of the structuring element that has to miss completely + the foreground. If no value is provided, the complementary of + `structure1` is taken. + output : ndarray, optional + Array of the same shape as input, into which the output is placed. + By default, a new array is created. + origin1 : int or tuple of ints, optional + Placement of the first part of the structuring element `structure1`, + by default 0 for a centered structure. + origin2 : int or tuple of ints, optional + Placement of the second part of the structuring element `structure2`, + by default 0 for a centered structure. If a value is provided for + `origin1` and not for `origin2`, then `origin2` is set to `origin1`. + + Returns + ------- + binary_hit_or_miss : ndarray + Hit-or-miss transform of `input` with the given structuring + element (`structure1`, `structure2`). + + See also + -------- + ndimage.morphology, binary_erosion + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Hit-or-miss_transform + + Examples + -------- + >>> from scipy import ndimage + >>> a = np.zeros((7,7), dtype=int) + >>> a[1, 1] = 1; a[2:4, 2:4] = 1; a[4:6, 4:6] = 1 + >>> a + array([[0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 0, 0, 0], + [0, 0, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 1, 1, 0], + [0, 0, 0, 0, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> structure1 = np.array([[1, 0, 0], [0, 1, 1], [0, 1, 1]]) + >>> structure1 + array([[1, 0, 0], + [0, 1, 1], + [0, 1, 1]]) + >>> # Find the matches of structure1 in the array a + >>> ndimage.binary_hit_or_miss(a, structure1=structure1).astype(int) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> # Change the origin of the filter + >>> # origin1=1 is equivalent to origin1=(1,1) here + >>> ndimage.binary_hit_or_miss(a, structure1=structure1,\\ + ... origin1=1).astype(int) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 0, 0]]) + + """ + input = numpy.asarray(input) + if structure1 is None: + structure1 = generate_binary_structure(input.ndim, 1) + if structure2 is None: + structure2 = numpy.logical_not(structure1) + origin1 = _ni_support._normalize_sequence(origin1, input.ndim) + if origin2 is None: + origin2 = origin1 + else: + origin2 = _ni_support._normalize_sequence(origin2, input.ndim) + + tmp1 = _binary_erosion(input, structure1, 1, None, None, 0, origin1, + 0, False) + inplace = isinstance(output, numpy.ndarray) + result = _binary_erosion(input, structure2, 1, None, output, 0, + origin2, 1, False) + if inplace: + numpy.logical_not(output, output) + numpy.logical_and(tmp1, output, output) + else: + numpy.logical_not(result, result) + return numpy.logical_and(tmp1, result) + + +def binary_propagation(input, structure=None, mask=None, + output=None, border_value=0, origin=0): + """ + Multi-dimensional binary propagation with the given structuring element. + + Parameters + ---------- + input : array_like + Binary image to be propagated inside `mask`. + structure : array_like, optional + Structuring element used in the successive dilations. The output + may depend on the structuring element, especially if `mask` has + several connex components. If no structuring element is + provided, an element is generated with a squared connectivity equal + to one. + mask : array_like, optional + Binary mask defining the region into which `input` is allowed to + propagate. + output : ndarray, optional + Array of the same shape as input, into which the output is placed. + By default, a new array is created. + border_value : int (cast to 0 or 1), optional + Value at the border in the output array. + origin : int or tuple of ints, optional + Placement of the filter, by default 0. + + Returns + ------- + binary_propagation : ndarray + Binary propagation of `input` inside `mask`. + + Notes + ----- + This function is functionally equivalent to calling binary_dilation + with the number of iterations less than one: iterative dilation until + the result does not change anymore. + + The succession of an erosion and propagation inside the original image + can be used instead of an *opening* for deleting small objects while + keeping the contours of larger objects untouched. + + References + ---------- + .. [1] http://cmm.ensmp.fr/~serra/cours/pdf/en/ch6en.pdf, slide 15. + .. [2] I.T. Young, J.J. Gerbrands, and L.J. van Vliet, "Fundamentals of + image processing", 1998 + ftp://qiftp.tudelft.nl/DIPimage/docs/FIP2.3.pdf + + Examples + -------- + >>> from scipy import ndimage + >>> input = np.zeros((8, 8), dtype=int) + >>> input[2, 2] = 1 + >>> mask = np.zeros((8, 8), dtype=int) + >>> mask[1:4, 1:4] = mask[4, 4] = mask[6:8, 6:8] = 1 + >>> input + array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]]) + >>> mask + array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 1], + [0, 0, 0, 0, 0, 0, 1, 1]]) + >>> ndimage.binary_propagation(input, mask=mask).astype(int) + array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]]) + >>> ndimage.binary_propagation(input, mask=mask,\\ + ... structure=np.ones((3,3))).astype(int) + array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]]) + + >>> # Comparison between opening and erosion+propagation + >>> a = np.zeros((6,6), dtype=int) + >>> a[2:5, 2:5] = 1; a[0, 0] = 1; a[5, 5] = 1 + >>> a + array([[1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 1]]) + >>> ndimage.binary_opening(a).astype(int) + array([[0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0], + [0, 0, 1, 1, 1, 0], + [0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0]]) + >>> b = ndimage.binary_erosion(a) + >>> b.astype(int) + array([[0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0]]) + >>> ndimage.binary_propagation(b, mask=a).astype(int) + array([[0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0]]) + + """ + return binary_dilation(input, structure, -1, mask, output, + border_value, origin) + + +def binary_fill_holes(input, structure=None, output=None, origin=0): + """ + Fill the holes in binary objects. + + + Parameters + ---------- + input : array_like + n-dimensional binary array with holes to be filled + structure : array_like, optional + Structuring element used in the computation; large-size elements + make computations faster but may miss holes separated from the + background by thin regions. The default element (with a square + connectivity equal to one) yields the intuitive result where all + holes in the input have been filled. + output : ndarray, optional + Array of the same shape as input, into which the output is placed. + By default, a new array is created. + origin : int, tuple of ints, optional + Position of the structuring element. + + Returns + ------- + out : ndarray + Transformation of the initial image `input` where holes have been + filled. + + See also + -------- + binary_dilation, binary_propagation, label + + Notes + ----- + The algorithm used in this function consists in invading the complementary + of the shapes in `input` from the outer boundary of the image, + using binary dilations. Holes are not connected to the boundary and are + therefore not invaded. The result is the complementary subset of the + invaded region. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Mathematical_morphology + + + Examples + -------- + >>> from scipy import ndimage + >>> a = np.zeros((5, 5), dtype=int) + >>> a[1:4, 1:4] = 1 + >>> a[2,2] = 0 + >>> a + array([[0, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 1, 0, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 0]]) + >>> ndimage.binary_fill_holes(a).astype(int) + array([[0, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 0]]) + >>> # Too big structuring element + >>> ndimage.binary_fill_holes(a, structure=np.ones((5,5))).astype(int) + array([[0, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 1, 0, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 0]]) + + """ + mask = numpy.logical_not(input) + tmp = numpy.zeros(mask.shape, bool) + inplace = isinstance(output, numpy.ndarray) + if inplace: + binary_dilation(tmp, structure, -1, mask, output, 1, origin) + numpy.logical_not(output, output) + else: + output = binary_dilation(tmp, structure, -1, mask, None, 1, + origin) + numpy.logical_not(output, output) + return output + + +def grey_erosion(input, size=None, footprint=None, structure=None, + output=None, mode="reflect", cval=0.0, origin=0): + """ + Calculate a greyscale erosion, using either a structuring element, + or a footprint corresponding to a flat structuring element. + + Grayscale erosion is a mathematical morphology operation. For the + simple case of a full and flat structuring element, it can be viewed + as a minimum filter over a sliding window. + + Parameters + ---------- + input : array_like + Array over which the grayscale erosion is to be computed. + size : tuple of ints + Shape of a flat and full structuring element used for the grayscale + erosion. Optional if `footprint` or `structure` is provided. + footprint : array of ints, optional + Positions of non-infinite elements of a flat structuring element + used for the grayscale erosion. Non-zero values give the set of + neighbors of the center over which the minimum is chosen. + structure : array of ints, optional + Structuring element used for the grayscale erosion. `structure` + may be a non-flat structuring element. + output : array, optional + An array used for storing the output of the erosion may be provided. + mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional + The `mode` parameter determines how the array borders are + handled, where `cval` is the value when mode is equal to + 'constant'. Default is 'reflect' + cval : scalar, optional + Value to fill past edges of input if `mode` is 'constant'. Default + is 0.0. + origin : scalar, optional + The `origin` parameter controls the placement of the filter. + Default 0 + + Returns + ------- + output : ndarray + Grayscale erosion of `input`. + + See also + -------- + binary_erosion, grey_dilation, grey_opening, grey_closing + generate_binary_structure, ndimage.minimum_filter + + Notes + ----- + The grayscale erosion of an image input by a structuring element s defined + over a domain E is given by: + + (input+s)(x) = min {input(y) - s(x-y), for y in E} + + In particular, for structuring elements defined as + s(y) = 0 for y in E, the grayscale erosion computes the minimum of the + input image inside a sliding window defined by E. + + Grayscale erosion [1]_ is a *mathematical morphology* operation [2]_. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Erosion_%28morphology%29 + .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology + + Examples + -------- + >>> from scipy import ndimage + >>> a = np.zeros((7,7), dtype=int) + >>> a[1:6, 1:6] = 3 + >>> a[4,4] = 2; a[2,3] = 1 + >>> a + array([[0, 0, 0, 0, 0, 0, 0], + [0, 3, 3, 3, 3, 3, 0], + [0, 3, 3, 1, 3, 3, 0], + [0, 3, 3, 3, 3, 3, 0], + [0, 3, 3, 3, 2, 3, 0], + [0, 3, 3, 3, 3, 3, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> ndimage.grey_erosion(a, size=(3,3)) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 3, 2, 2, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> footprint = ndimage.generate_binary_structure(2, 1) + >>> footprint + array([[False, True, False], + [ True, True, True], + [False, True, False]], dtype=bool) + >>> # Diagonally-connected elements are not considered neighbors + >>> ndimage.grey_erosion(a, size=(3,3), footprint=footprint) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 3, 1, 2, 0, 0], + [0, 0, 3, 2, 2, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + + """ + if size is None and footprint is None and structure is None: + raise ValueError("size, footprint or structure must be specified") + + return filters._min_or_max_filter(input, size, footprint, structure, + output, mode, cval, origin, 1) + + +def grey_dilation(input, size=None, footprint=None, structure=None, + output=None, mode="reflect", cval=0.0, origin=0): + """ + Calculate a greyscale dilation, using either a structuring element, + or a footprint corresponding to a flat structuring element. + + Grayscale dilation is a mathematical morphology operation. For the + simple case of a full and flat structuring element, it can be viewed + as a maximum filter over a sliding window. + + Parameters + ---------- + input : array_like + Array over which the grayscale dilation is to be computed. + size : tuple of ints + Shape of a flat and full structuring element used for the grayscale + dilation. Optional if `footprint` or `structure` is provided. + footprint : array of ints, optional + Positions of non-infinite elements of a flat structuring element + used for the grayscale dilation. Non-zero values give the set of + neighbors of the center over which the maximum is chosen. + structure : array of ints, optional + Structuring element used for the grayscale dilation. `structure` + may be a non-flat structuring element. + output : array, optional + An array used for storing the output of the dilation may be provided. + mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional + The `mode` parameter determines how the array borders are + handled, where `cval` is the value when mode is equal to + 'constant'. Default is 'reflect' + cval : scalar, optional + Value to fill past edges of input if `mode` is 'constant'. Default + is 0.0. + origin : scalar, optional + The `origin` parameter controls the placement of the filter. + Default 0 + + Returns + ------- + grey_dilation : ndarray + Grayscale dilation of `input`. + + See also + -------- + binary_dilation, grey_erosion, grey_closing, grey_opening + generate_binary_structure, ndimage.maximum_filter + + Notes + ----- + The grayscale dilation of an image input by a structuring element s defined + over a domain E is given by: + + (input+s)(x) = max {input(y) + s(x-y), for y in E} + + In particular, for structuring elements defined as + s(y) = 0 for y in E, the grayscale dilation computes the maximum of the + input image inside a sliding window defined by E. + + Grayscale dilation [1]_ is a *mathematical morphology* operation [2]_. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Dilation_%28morphology%29 + .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology + + Examples + -------- + >>> from scipy import ndimage + >>> a = np.zeros((7,7), dtype=int) + >>> a[2:5, 2:5] = 1 + >>> a[4,4] = 2; a[2,3] = 3 + >>> a + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 3, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 2, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> ndimage.grey_dilation(a, size=(3,3)) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 1, 3, 3, 3, 1, 0], + [0, 1, 3, 3, 3, 1, 0], + [0, 1, 3, 3, 3, 2, 0], + [0, 1, 1, 2, 2, 2, 0], + [0, 1, 1, 2, 2, 2, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> ndimage.grey_dilation(a, footprint=np.ones((3,3))) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 1, 3, 3, 3, 1, 0], + [0, 1, 3, 3, 3, 1, 0], + [0, 1, 3, 3, 3, 2, 0], + [0, 1, 1, 2, 2, 2, 0], + [0, 1, 1, 2, 2, 2, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> s = ndimage.generate_binary_structure(2,1) + >>> s + array([[False, True, False], + [ True, True, True], + [False, True, False]], dtype=bool) + >>> ndimage.grey_dilation(a, footprint=s) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 3, 1, 0, 0], + [0, 1, 3, 3, 3, 1, 0], + [0, 1, 1, 3, 2, 1, 0], + [0, 1, 1, 2, 2, 2, 0], + [0, 0, 1, 1, 2, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> ndimage.grey_dilation(a, size=(3,3), structure=np.ones((3,3))) + array([[1, 1, 1, 1, 1, 1, 1], + [1, 2, 4, 4, 4, 2, 1], + [1, 2, 4, 4, 4, 2, 1], + [1, 2, 4, 4, 4, 3, 1], + [1, 2, 2, 3, 3, 3, 1], + [1, 2, 2, 3, 3, 3, 1], + [1, 1, 1, 1, 1, 1, 1]]) + + """ + if size is None and footprint is None and structure is None: + raise ValueError("size, footprint or structure must be specified") + if structure is not None: + structure = numpy.asarray(structure) + structure = structure[tuple([slice(None, None, -1)] * + structure.ndim)] + if footprint is not None: + footprint = numpy.asarray(footprint) + footprint = footprint[tuple([slice(None, None, -1)] * + footprint.ndim)] + + input = numpy.asarray(input) + origin = _ni_support._normalize_sequence(origin, input.ndim) + for ii in range(len(origin)): + origin[ii] = -origin[ii] + if footprint is not None: + sz = footprint.shape[ii] + elif structure is not None: + sz = structure.shape[ii] + elif numpy.isscalar(size): + sz = size + else: + sz = size[ii] + if not sz & 1: + origin[ii] -= 1 + + return filters._min_or_max_filter(input, size, footprint, structure, + output, mode, cval, origin, 0) + + +def grey_opening(input, size=None, footprint=None, structure=None, + output=None, mode="reflect", cval=0.0, origin=0): + """ + Multi-dimensional greyscale opening. + + A greyscale opening consists in the succession of a greyscale erosion, + and a greyscale dilation. + + Parameters + ---------- + input : array_like + Array over which the grayscale opening is to be computed. + size : tuple of ints + Shape of a flat and full structuring element used for the grayscale + opening. Optional if `footprint` or `structure` is provided. + footprint : array of ints, optional + Positions of non-infinite elements of a flat structuring element + used for the grayscale opening. + structure : array of ints, optional + Structuring element used for the grayscale opening. `structure` + may be a non-flat structuring element. + output : array, optional + An array used for storing the output of the opening may be provided. + mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional + The `mode` parameter determines how the array borders are + handled, where `cval` is the value when mode is equal to + 'constant'. Default is 'reflect' + cval : scalar, optional + Value to fill past edges of input if `mode` is 'constant'. Default + is 0.0. + origin : scalar, optional + The `origin` parameter controls the placement of the filter. + Default 0 + + Returns + ------- + grey_opening : ndarray + Result of the grayscale opening of `input` with `structure`. + + See also + -------- + binary_opening, grey_dilation, grey_erosion, grey_closing + generate_binary_structure + + Notes + ----- + The action of a grayscale opening with a flat structuring element amounts + to smoothen high local maxima, whereas binary opening erases small objects. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Mathematical_morphology + + Examples + -------- + >>> from scipy import ndimage + >>> a = np.arange(36).reshape((6,6)) + >>> a[3, 3] = 50 + >>> a + array([[ 0, 1, 2, 3, 4, 5], + [ 6, 7, 8, 9, 10, 11], + [12, 13, 14, 15, 16, 17], + [18, 19, 20, 50, 22, 23], + [24, 25, 26, 27, 28, 29], + [30, 31, 32, 33, 34, 35]]) + >>> ndimage.grey_opening(a, size=(3,3)) + array([[ 0, 1, 2, 3, 4, 4], + [ 6, 7, 8, 9, 10, 10], + [12, 13, 14, 15, 16, 16], + [18, 19, 20, 22, 22, 22], + [24, 25, 26, 27, 28, 28], + [24, 25, 26, 27, 28, 28]]) + >>> # Note that the local maximum a[3,3] has disappeared + + """ + if (size is not None) and (footprint is not None): + warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=2) + tmp = grey_erosion(input, size, footprint, structure, None, mode, + cval, origin) + return grey_dilation(tmp, size, footprint, structure, output, mode, + cval, origin) + + +def grey_closing(input, size=None, footprint=None, structure=None, + output=None, mode="reflect", cval=0.0, origin=0): + """ + Multi-dimensional greyscale closing. + + A greyscale closing consists in the succession of a greyscale dilation, + and a greyscale erosion. + + Parameters + ---------- + input : array_like + Array over which the grayscale closing is to be computed. + size : tuple of ints + Shape of a flat and full structuring element used for the grayscale + closing. Optional if `footprint` or `structure` is provided. + footprint : array of ints, optional + Positions of non-infinite elements of a flat structuring element + used for the grayscale closing. + structure : array of ints, optional + Structuring element used for the grayscale closing. `structure` + may be a non-flat structuring element. + output : array, optional + An array used for storing the output of the closing may be provided. + mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional + The `mode` parameter determines how the array borders are + handled, where `cval` is the value when mode is equal to + 'constant'. Default is 'reflect' + cval : scalar, optional + Value to fill past edges of input if `mode` is 'constant'. Default + is 0.0. + origin : scalar, optional + The `origin` parameter controls the placement of the filter. + Default 0 + + Returns + ------- + grey_closing : ndarray + Result of the grayscale closing of `input` with `structure`. + + See also + -------- + binary_closing, grey_dilation, grey_erosion, grey_opening, + generate_binary_structure + + Notes + ----- + The action of a grayscale closing with a flat structuring element amounts + to smoothen deep local minima, whereas binary closing fills small holes. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Mathematical_morphology + + Examples + -------- + >>> from scipy import ndimage + >>> a = np.arange(36).reshape((6,6)) + >>> a[3,3] = 0 + >>> a + array([[ 0, 1, 2, 3, 4, 5], + [ 6, 7, 8, 9, 10, 11], + [12, 13, 14, 15, 16, 17], + [18, 19, 20, 0, 22, 23], + [24, 25, 26, 27, 28, 29], + [30, 31, 32, 33, 34, 35]]) + >>> ndimage.grey_closing(a, size=(3,3)) + array([[ 7, 7, 8, 9, 10, 11], + [ 7, 7, 8, 9, 10, 11], + [13, 13, 14, 15, 16, 17], + [19, 19, 20, 20, 22, 23], + [25, 25, 26, 27, 28, 29], + [31, 31, 32, 33, 34, 35]]) + >>> # Note that the local minimum a[3,3] has disappeared + + """ + if (size is not None) and (footprint is not None): + warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=2) + tmp = grey_dilation(input, size, footprint, structure, None, mode, + cval, origin) + return grey_erosion(tmp, size, footprint, structure, output, mode, + cval, origin) + + +def morphological_gradient(input, size=None, footprint=None, structure=None, + output=None, mode="reflect", cval=0.0, origin=0): + """ + Multi-dimensional morphological gradient. + + The morphological gradient is calculated as the difference between a + dilation and an erosion of the input with a given structuring element. + + Parameters + ---------- + input : array_like + Array over which to compute the morphlogical gradient. + size : tuple of ints + Shape of a flat and full structuring element used for the mathematical + morphology operations. Optional if `footprint` or `structure` is + provided. A larger `size` yields a more blurred gradient. + footprint : array of ints, optional + Positions of non-infinite elements of a flat structuring element + used for the morphology operations. Larger footprints + give a more blurred morphological gradient. + structure : array of ints, optional + Structuring element used for the morphology operations. + `structure` may be a non-flat structuring element. + output : array, optional + An array used for storing the output of the morphological gradient + may be provided. + mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional + The `mode` parameter determines how the array borders are + handled, where `cval` is the value when mode is equal to + 'constant'. Default is 'reflect' + cval : scalar, optional + Value to fill past edges of input if `mode` is 'constant'. Default + is 0.0. + origin : scalar, optional + The `origin` parameter controls the placement of the filter. + Default 0 + + Returns + ------- + morphological_gradient : ndarray + Morphological gradient of `input`. + + See also + -------- + grey_dilation, grey_erosion, ndimage.gaussian_gradient_magnitude + + Notes + ----- + For a flat structuring element, the morphological gradient + computed at a given point corresponds to the maximal difference + between elements of the input among the elements covered by the + structuring element centered on the point. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Mathematical_morphology + + Examples + -------- + >>> from scipy import ndimage + >>> a = np.zeros((7,7), dtype=int) + >>> a[2:5, 2:5] = 1 + >>> ndimage.morphological_gradient(a, size=(3,3)) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 0, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> # The morphological gradient is computed as the difference + >>> # between a dilation and an erosion + >>> ndimage.grey_dilation(a, size=(3,3)) -\\ + ... ndimage.grey_erosion(a, size=(3,3)) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 0, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> a = np.zeros((7,7), dtype=int) + >>> a[2:5, 2:5] = 1 + >>> a[4,4] = 2; a[2,3] = 3 + >>> a + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 3, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 2, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> ndimage.morphological_gradient(a, size=(3,3)) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 1, 3, 3, 3, 1, 0], + [0, 1, 3, 3, 3, 1, 0], + [0, 1, 3, 2, 3, 2, 0], + [0, 1, 1, 2, 2, 2, 0], + [0, 1, 1, 2, 2, 2, 0], + [0, 0, 0, 0, 0, 0, 0]]) + + """ + tmp = grey_dilation(input, size, footprint, structure, None, mode, + cval, origin) + if isinstance(output, numpy.ndarray): + grey_erosion(input, size, footprint, structure, output, mode, + cval, origin) + return numpy.subtract(tmp, output, output) + else: + return (tmp - grey_erosion(input, size, footprint, structure, + None, mode, cval, origin)) + + +def morphological_laplace(input, size=None, footprint=None, + structure=None, output=None, + mode="reflect", cval=0.0, origin=0): + """ + Multi-dimensional morphological laplace. + + Parameters + ---------- + input : array_like + Input. + size : int or sequence of ints, optional + See `structure`. + footprint : bool or ndarray, optional + See `structure`. + structure : structure, optional + Either `size`, `footprint`, or the `structure` must be provided. + output : ndarray, optional + An output array can optionally be provided. + mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional + The mode parameter determines how the array borders are handled. + For 'constant' mode, values beyond borders are set to be `cval`. + Default is 'reflect'. + cval : scalar, optional + Value to fill past edges of input if mode is 'constant'. + Default is 0.0 + origin : origin, optional + The origin parameter controls the placement of the filter. + + Returns + ------- + morphological_laplace : ndarray + Output + + """ + tmp1 = grey_dilation(input, size, footprint, structure, None, mode, + cval, origin) + if isinstance(output, numpy.ndarray): + grey_erosion(input, size, footprint, structure, output, mode, + cval, origin) + numpy.add(tmp1, output, output) + numpy.subtract(output, input, output) + return numpy.subtract(output, input, output) + else: + tmp2 = grey_erosion(input, size, footprint, structure, None, mode, + cval, origin) + numpy.add(tmp1, tmp2, tmp2) + numpy.subtract(tmp2, input, tmp2) + numpy.subtract(tmp2, input, tmp2) + return tmp2 + + +def white_tophat(input, size=None, footprint=None, structure=None, + output=None, mode="reflect", cval=0.0, origin=0): + """ + Multi-dimensional white tophat filter. + + Parameters + ---------- + input : array_like + Input. + size : tuple of ints + Shape of a flat and full structuring element used for the filter. + Optional if `footprint` or `structure` is provided. + footprint : array of ints, optional + Positions of elements of a flat structuring element + used for the white tophat filter. + structure : array of ints, optional + Structuring element used for the filter. `structure` + may be a non-flat structuring element. + output : array, optional + An array used for storing the output of the filter may be provided. + mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional + The `mode` parameter determines how the array borders are + handled, where `cval` is the value when mode is equal to + 'constant'. Default is 'reflect' + cval : scalar, optional + Value to fill past edges of input if `mode` is 'constant'. + Default is 0.0. + origin : scalar, optional + The `origin` parameter controls the placement of the filter. + Default is 0. + + Returns + ------- + output : ndarray + Result of the filter of `input` with `structure`. + + See also + -------- + black_tophat + + """ + if (size is not None) and (footprint is not None): + warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=2) + tmp = grey_erosion(input, size, footprint, structure, None, mode, + cval, origin) + tmp = grey_dilation(tmp, size, footprint, structure, output, mode, + cval, origin) + if tmp is None: + tmp = output + + if input.dtype == numpy.bool_ and tmp.dtype == numpy.bool_: + numpy.bitwise_xor(input, tmp, out=tmp) + else: + numpy.subtract(input, tmp, out=tmp) + return tmp + + +def black_tophat(input, size=None, footprint=None, + structure=None, output=None, mode="reflect", + cval=0.0, origin=0): + """ + Multi-dimensional black tophat filter. + + Parameters + ---------- + input : array_like + Input. + size : tuple of ints, optional + Shape of a flat and full structuring element used for the filter. + Optional if `footprint` or `structure` is provided. + footprint : array of ints, optional + Positions of non-infinite elements of a flat structuring element + used for the black tophat filter. + structure : array of ints, optional + Structuring element used for the filter. `structure` + may be a non-flat structuring element. + output : array, optional + An array used for storing the output of the filter may be provided. + mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional + The `mode` parameter determines how the array borders are + handled, where `cval` is the value when mode is equal to + 'constant'. Default is 'reflect' + cval : scalar, optional + Value to fill past edges of input if `mode` is 'constant'. Default + is 0.0. + origin : scalar, optional + The `origin` parameter controls the placement of the filter. + Default 0 + + Returns + ------- + black_tophat : ndarray + Result of the filter of `input` with `structure`. + + See also + -------- + white_tophat, grey_opening, grey_closing + + """ + if (size is not None) and (footprint is not None): + warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=2) + tmp = grey_dilation(input, size, footprint, structure, None, mode, + cval, origin) + tmp = grey_erosion(tmp, size, footprint, structure, output, mode, + cval, origin) + if tmp is None: + tmp = output + + if input.dtype == numpy.bool_ and tmp.dtype == numpy.bool_: + numpy.bitwise_xor(tmp, input, out=tmp) + else: + numpy.subtract(tmp, input, out=tmp) + return tmp + + +def distance_transform_bf(input, metric="euclidean", sampling=None, + return_distances=True, return_indices=False, + distances=None, indices=None): + """ + Distance transform function by a brute force algorithm. + + This function calculates the distance transform of the `input`, by + replacing each foreground (non-zero) element, with its + shortest distance to the background (any zero-valued element). + + In addition to the distance transform, the feature transform can + be calculated. In this case the index of the closest background + element is returned along the first axis of the result. + + Parameters + ---------- + input : array_like + Input + metric : str, optional + Three types of distance metric are supported: 'euclidean', 'taxicab' + and 'chessboard'. + sampling : {int, sequence of ints}, optional + This parameter is only used in the case of the euclidean `metric` + distance transform. + + The sampling along each axis can be given by the `sampling` parameter + which should be a sequence of length equal to the input rank, or a + single number in which the `sampling` is assumed to be equal along all + axes. + return_distances : bool, optional + The `return_distances` flag can be used to indicate if the distance + transform is returned. + + The default is True. + return_indices : bool, optional + The `return_indices` flags can be used to indicate if the feature + transform is returned. + + The default is False. + distances : float64 ndarray, optional + Optional output array to hold distances (if `return_distances` is + True). + indices : int64 ndarray, optional + Optional output array to hold indices (if `return_indices` is True). + + Returns + ------- + distances : ndarray + Distance array if `return_distances` is True. + indices : ndarray + Indices array if `return_indices` is True. + + Notes + ----- + This function employs a slow brute force algorithm, see also the + function distance_transform_cdt for more efficient taxicab and + chessboard algorithms. + + """ + if (not return_distances) and (not return_indices): + msg = 'at least one of distances/indices must be specified' + raise RuntimeError(msg) + + tmp1 = numpy.asarray(input) != 0 + struct = generate_binary_structure(tmp1.ndim, tmp1.ndim) + tmp2 = binary_dilation(tmp1, struct) + tmp2 = numpy.logical_xor(tmp1, tmp2) + tmp1 = tmp1.astype(numpy.int8) - tmp2.astype(numpy.int8) + metric = metric.lower() + if metric == 'euclidean': + metric = 1 + elif metric in ['taxicab', 'cityblock', 'manhattan']: + metric = 2 + elif metric == 'chessboard': + metric = 3 + else: + raise RuntimeError('distance metric not supported') + if sampling is not None: + sampling = _ni_support._normalize_sequence(sampling, tmp1.ndim) + sampling = numpy.asarray(sampling, dtype=numpy.float64) + if not sampling.flags.contiguous: + sampling = sampling.copy() + if return_indices: + ft = numpy.zeros(tmp1.shape, dtype=numpy.int32) + else: + ft = None + if return_distances: + if distances is None: + if metric == 1: + dt = numpy.zeros(tmp1.shape, dtype=numpy.float64) + else: + dt = numpy.zeros(tmp1.shape, dtype=numpy.uint32) + else: + if distances.shape != tmp1.shape: + raise RuntimeError('distances array has wrong shape') + if metric == 1: + if distances.dtype.type != numpy.float64: + raise RuntimeError('distances array must be float64') + else: + if distances.dtype.type != numpy.uint32: + raise RuntimeError('distances array must be uint32') + dt = distances + else: + dt = None + + _nd_image.distance_transform_bf(tmp1, metric, sampling, dt, ft) + if return_indices: + if isinstance(indices, numpy.ndarray): + if indices.dtype.type != numpy.int32: + raise RuntimeError('indices must of int32 type') + if indices.shape != (tmp1.ndim,) + tmp1.shape: + raise RuntimeError('indices has wrong shape') + tmp2 = indices + else: + tmp2 = numpy.indices(tmp1.shape, dtype=numpy.int32) + ft = numpy.ravel(ft) + for ii in range(tmp2.shape[0]): + rtmp = numpy.ravel(tmp2[ii, ...])[ft] + rtmp.shape = tmp1.shape + tmp2[ii, ...] = rtmp + ft = tmp2 + + # construct and return the result + result = [] + if return_distances and not isinstance(distances, numpy.ndarray): + result.append(dt) + if return_indices and not isinstance(indices, numpy.ndarray): + result.append(ft) + + if len(result) == 2: + return tuple(result) + elif len(result) == 1: + return result[0] + else: + return None + + +def distance_transform_cdt(input, metric='chessboard', return_distances=True, + return_indices=False, distances=None, indices=None): + """ + Distance transform for chamfer type of transforms. + + Parameters + ---------- + input : array_like + Input + metric : {'chessboard', 'taxicab'}, optional + The `metric` determines the type of chamfering that is done. If the + `metric` is equal to 'taxicab' a structure is generated using + generate_binary_structure with a squared distance equal to 1. If + the `metric` is equal to 'chessboard', a `metric` is generated + using generate_binary_structure with a squared distance equal to + the dimensionality of the array. These choices correspond to the + common interpretations of the 'taxicab' and the 'chessboard' + distance metrics in two dimensions. + + The default for `metric` is 'chessboard'. + return_distances, return_indices : bool, optional + The `return_distances`, and `return_indices` flags can be used to + indicate if the distance transform, the feature transform, or both + must be returned. + + If the feature transform is returned (``return_indices=True``), + the index of the closest background element is returned along + the first axis of the result. + + The `return_distances` default is True, and the + `return_indices` default is False. + distances, indices : ndarrays of int32, optional + The `distances` and `indices` arguments can be used to give optional + output arrays that must be the same shape as `input`. + + """ + if (not return_distances) and (not return_indices): + msg = 'at least one of distances/indices must be specified' + raise RuntimeError(msg) + + ft_inplace = isinstance(indices, numpy.ndarray) + dt_inplace = isinstance(distances, numpy.ndarray) + input = numpy.asarray(input) + if metric in ['taxicab', 'cityblock', 'manhattan']: + rank = input.ndim + metric = generate_binary_structure(rank, 1) + elif metric == 'chessboard': + rank = input.ndim + metric = generate_binary_structure(rank, rank) + else: + try: + metric = numpy.asarray(metric) + except Exception: + raise RuntimeError('invalid metric provided') + for s in metric.shape: + if s != 3: + raise RuntimeError('metric sizes must be equal to 3') + + if not metric.flags.contiguous: + metric = metric.copy() + if dt_inplace: + if distances.dtype.type != numpy.int32: + raise RuntimeError('distances must be of int32 type') + if distances.shape != input.shape: + raise RuntimeError('distances has wrong shape') + dt = distances + dt[...] = numpy.where(input, -1, 0).astype(numpy.int32) + else: + dt = numpy.where(input, -1, 0).astype(numpy.int32) + + rank = dt.ndim + if return_indices: + sz = numpy.product(dt.shape, axis=0) + ft = numpy.arange(sz, dtype=numpy.int32) + ft.shape = dt.shape + else: + ft = None + + _nd_image.distance_transform_op(metric, dt, ft) + dt = dt[tuple([slice(None, None, -1)] * rank)] + if return_indices: + ft = ft[tuple([slice(None, None, -1)] * rank)] + _nd_image.distance_transform_op(metric, dt, ft) + dt = dt[tuple([slice(None, None, -1)] * rank)] + if return_indices: + ft = ft[tuple([slice(None, None, -1)] * rank)] + ft = numpy.ravel(ft) + if ft_inplace: + if indices.dtype.type != numpy.int32: + raise RuntimeError('indices must of int32 type') + if indices.shape != (dt.ndim,) + dt.shape: + raise RuntimeError('indices has wrong shape') + tmp = indices + else: + tmp = numpy.indices(dt.shape, dtype=numpy.int32) + for ii in range(tmp.shape[0]): + rtmp = numpy.ravel(tmp[ii, ...])[ft] + rtmp.shape = dt.shape + tmp[ii, ...] = rtmp + ft = tmp + + # construct and return the result + result = [] + if return_distances and not dt_inplace: + result.append(dt) + if return_indices and not ft_inplace: + result.append(ft) + + if len(result) == 2: + return tuple(result) + elif len(result) == 1: + return result[0] + else: + return None + + +def distance_transform_edt(input, sampling=None, return_distances=True, + return_indices=False, distances=None, indices=None): + """ + Exact euclidean distance transform. + + In addition to the distance transform, the feature transform can + be calculated. In this case the index of the closest background + element is returned along the first axis of the result. + + Parameters + ---------- + input : array_like + Input data to transform. Can be any type but will be converted + into binary: 1 wherever input equates to True, 0 elsewhere. + sampling : float or int, or sequence of same, optional + Spacing of elements along each dimension. If a sequence, must be of + length equal to the input rank; if a single number, this is used for + all axes. If not specified, a grid spacing of unity is implied. + return_distances : bool, optional + Whether to return distance matrix. At least one of + return_distances/return_indices must be True. Default is True. + return_indices : bool, optional + Whether to return indices matrix. Default is False. + distances : ndarray, optional + Used for output of distance array, must be of type float64. + indices : ndarray, optional + Used for output of indices, must be of type int32. + + Returns + ------- + distance_transform_edt : ndarray or list of ndarrays + Either distance matrix, index matrix, or a list of the two, + depending on `return_x` flags and `distance` and `indices` + input parameters. + + Notes + ----- + The euclidean distance transform gives values of the euclidean + distance:: + + n + y_i = sqrt(sum (x[i]-b[i])**2) + i + + where b[i] is the background point (value 0) with the smallest + Euclidean distance to input points x[i], and n is the + number of dimensions. + + Examples + -------- + >>> from scipy import ndimage + >>> a = np.array(([0,1,1,1,1], + ... [0,0,1,1,1], + ... [0,1,1,1,1], + ... [0,1,1,1,0], + ... [0,1,1,0,0])) + >>> ndimage.distance_transform_edt(a) + array([[ 0. , 1. , 1.4142, 2.2361, 3. ], + [ 0. , 0. , 1. , 2. , 2. ], + [ 0. , 1. , 1.4142, 1.4142, 1. ], + [ 0. , 1. , 1.4142, 1. , 0. ], + [ 0. , 1. , 1. , 0. , 0. ]]) + + With a sampling of 2 units along x, 1 along y: + + >>> ndimage.distance_transform_edt(a, sampling=[2,1]) + array([[ 0. , 1. , 2. , 2.8284, 3.6056], + [ 0. , 0. , 1. , 2. , 3. ], + [ 0. , 1. , 2. , 2.2361, 2. ], + [ 0. , 1. , 2. , 1. , 0. ], + [ 0. , 1. , 1. , 0. , 0. ]]) + + Asking for indices as well: + + >>> edt, inds = ndimage.distance_transform_edt(a, return_indices=True) + >>> inds + array([[[0, 0, 1, 1, 3], + [1, 1, 1, 1, 3], + [2, 2, 1, 3, 3], + [3, 3, 4, 4, 3], + [4, 4, 4, 4, 4]], + [[0, 0, 1, 1, 4], + [0, 1, 1, 1, 4], + [0, 0, 1, 4, 4], + [0, 0, 3, 3, 4], + [0, 0, 3, 3, 4]]]) + + With arrays provided for inplace outputs: + + >>> indices = np.zeros(((np.ndim(a),) + a.shape), dtype=np.int32) + >>> ndimage.distance_transform_edt(a, return_indices=True, indices=indices) + array([[ 0. , 1. , 1.4142, 2.2361, 3. ], + [ 0. , 0. , 1. , 2. , 2. ], + [ 0. , 1. , 1.4142, 1.4142, 1. ], + [ 0. , 1. , 1.4142, 1. , 0. ], + [ 0. , 1. , 1. , 0. , 0. ]]) + >>> indices + array([[[0, 0, 1, 1, 3], + [1, 1, 1, 1, 3], + [2, 2, 1, 3, 3], + [3, 3, 4, 4, 3], + [4, 4, 4, 4, 4]], + [[0, 0, 1, 1, 4], + [0, 1, 1, 1, 4], + [0, 0, 1, 4, 4], + [0, 0, 3, 3, 4], + [0, 0, 3, 3, 4]]]) + + """ + if (not return_distances) and (not return_indices): + msg = 'at least one of distances/indices must be specified' + raise RuntimeError(msg) + + ft_inplace = isinstance(indices, numpy.ndarray) + dt_inplace = isinstance(distances, numpy.ndarray) + # calculate the feature transform + input = numpy.atleast_1d(numpy.where(input, 1, 0).astype(numpy.int8)) + if sampling is not None: + sampling = _ni_support._normalize_sequence(sampling, input.ndim) + sampling = numpy.asarray(sampling, dtype=numpy.float64) + if not sampling.flags.contiguous: + sampling = sampling.copy() + + if ft_inplace: + ft = indices + if ft.shape != (input.ndim,) + input.shape: + raise RuntimeError('indices has wrong shape') + if ft.dtype.type != numpy.int32: + raise RuntimeError('indices must be of int32 type') + else: + ft = numpy.zeros((input.ndim,) + input.shape, dtype=numpy.int32) + + _nd_image.euclidean_feature_transform(input, sampling, ft) + # if requested, calculate the distance transform + if return_distances: + dt = ft - numpy.indices(input.shape, dtype=ft.dtype) + dt = dt.astype(numpy.float64) + if sampling is not None: + for ii in range(len(sampling)): + dt[ii, ...] *= sampling[ii] + numpy.multiply(dt, dt, dt) + if dt_inplace: + dt = numpy.add.reduce(dt, axis=0) + if distances.shape != dt.shape: + raise RuntimeError('indices has wrong shape') + if distances.dtype.type != numpy.float64: + raise RuntimeError('indices must be of float64 type') + numpy.sqrt(dt, distances) + else: + dt = numpy.add.reduce(dt, axis=0) + dt = numpy.sqrt(dt) + + # construct and return the result + result = [] + if return_distances and not dt_inplace: + result.append(dt) + if return_indices and not ft_inplace: + result.append(ft) + + if len(result) == 2: + return tuple(result) + elif len(result) == 1: + return result[0] + else: + return None diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/morphology.pyc b/project/venv/lib/python2.7/site-packages/scipy/ndimage/morphology.pyc new file mode 100644 index 0000000..a3482b1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/ndimage/morphology.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/setup.py b/project/venv/lib/python2.7/site-packages/scipy/ndimage/setup.py new file mode 100644 index 0000000..f274538 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/ndimage/setup.py @@ -0,0 +1,59 @@ +from __future__ import division, print_function, absolute_import + +import os + +from numpy.distutils.core import setup +from numpy.distutils.misc_util import Configuration +from numpy import get_include +from scipy._build_utils import numpy_nodepr_api + + +def configuration(parent_package='', top_path=None): + + config = Configuration('ndimage', parent_package, top_path) + + include_dirs = ['src', + get_include(), + os.path.join(os.path.dirname(__file__), '..', '_lib', 'src')] + + config.add_extension("_nd_image", + sources=["src/nd_image.c", + "src/ni_filters.c", + "src/ni_fourier.c", + "src/ni_interpolation.c", + "src/ni_measure.c", + "src/ni_morphology.c", + "src/ni_splines.c", + "src/ni_support.c"], + include_dirs=include_dirs, + **numpy_nodepr_api) + + # Cython wants the .c and .pyx to have the underscore. + config.add_extension("_ni_label", + sources=["src/_ni_label.c",], + include_dirs=['src']+[get_include()]) + + config.add_extension("_ctest", + sources=["src/_ctest.c"], + include_dirs=[get_include()], + **numpy_nodepr_api) + + _define_macros = [("OLDAPI", 1)] + if 'define_macros' in numpy_nodepr_api: + _define_macros.extend(numpy_nodepr_api['define_macros']) + + config.add_extension("_ctest_oldapi", + sources=["src/_ctest.c"], + include_dirs=[get_include()], + define_macros=_define_macros) + + config.add_extension("_cytest", + sources=["src/_cytest.c"]) + + config.add_data_dir('tests') + + return config + + +if __name__ == '__main__': + setup(**configuration(top_path='').todict()) diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/setup.pyc b/project/venv/lib/python2.7/site-packages/scipy/ndimage/setup.pyc new file mode 100644 index 0000000..504bcb2 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/ndimage/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/__init__.pyc new file mode 100644 index 0000000..963bd07 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/data/README.txt b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/data/README.txt new file mode 100644 index 0000000..da9d4ce --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/data/README.txt @@ -0,0 +1,4 @@ +label_inputs.txt, label_strels.txt, and label_results.txt are test +vectors generated using ndimage.label from scipy version 0.10.0, and +are used to verify that the cython version behaves as expected. The +script to generate them is in ../../utils/generate_label_testvectors.py diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/data/label_inputs.txt b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/data/label_inputs.txt new file mode 100644 index 0000000..6c3cff3 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/data/label_inputs.txt @@ -0,0 +1,21 @@ +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 0 1 1 1 +1 1 0 0 0 1 1 +1 0 1 0 1 0 1 +0 0 0 1 0 0 0 +1 0 1 0 1 0 1 +1 1 0 0 0 1 1 +1 1 1 0 1 1 1 +1 0 1 1 1 0 1 +0 0 0 1 0 0 0 +1 0 0 1 0 0 1 +1 1 1 1 1 1 1 +1 0 0 1 0 0 1 +0 0 0 1 0 0 0 +1 0 1 1 1 0 1 diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/data/label_results.txt b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/data/label_results.txt new file mode 100644 index 0000000..c239b03 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/data/label_results.txt @@ -0,0 +1,294 @@ +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +2 2 2 2 2 2 2 +3 3 3 3 3 3 3 +4 4 4 4 4 4 4 +5 5 5 5 5 5 5 +6 6 6 6 6 6 6 +7 7 7 7 7 7 7 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 2 3 4 5 6 7 +8 9 10 11 12 13 14 +15 16 17 18 19 20 21 +22 23 24 25 26 27 28 +29 30 31 32 33 34 35 +36 37 38 39 40 41 42 +43 44 45 46 47 48 49 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 2 3 4 5 6 7 +8 1 2 3 4 5 6 +9 8 1 2 3 4 5 +10 9 8 1 2 3 4 +11 10 9 8 1 2 3 +12 11 10 9 8 1 2 +13 12 11 10 9 8 1 +1 2 3 4 5 6 7 +1 2 3 4 5 6 7 +1 2 3 4 5 6 7 +1 2 3 4 5 6 7 +1 2 3 4 5 6 7 +1 2 3 4 5 6 7 +1 2 3 4 5 6 7 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 2 1 2 1 2 1 +2 1 2 1 2 1 2 +1 2 1 2 1 2 1 +2 1 2 1 2 1 2 +1 2 1 2 1 2 1 +2 1 2 1 2 1 2 +1 2 1 2 1 2 1 +1 2 3 4 5 6 7 +2 3 4 5 6 7 8 +3 4 5 6 7 8 9 +4 5 6 7 8 9 10 +5 6 7 8 9 10 11 +6 7 8 9 10 11 12 +7 8 9 10 11 12 13 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 1 1 1 1 +1 1 1 0 2 2 2 +1 1 0 0 0 2 2 +1 0 3 0 2 0 4 +0 0 0 2 0 0 0 +5 0 2 0 6 0 7 +2 2 0 0 0 7 7 +2 2 2 0 7 7 7 +1 1 1 0 2 2 2 +1 1 0 0 0 2 2 +3 0 1 0 4 0 2 +0 0 0 1 0 0 0 +5 0 6 0 1 0 7 +5 5 0 0 0 1 1 +5 5 5 0 1 1 1 +1 1 1 0 2 2 2 +3 3 0 0 0 4 4 +5 0 6 0 7 0 8 +0 0 0 9 0 0 0 +10 0 11 0 12 0 13 +14 14 0 0 0 15 15 +16 16 16 0 17 17 17 +1 1 1 0 2 3 3 +1 1 0 0 0 3 3 +1 0 4 0 3 0 3 +0 0 0 3 0 0 0 +3 0 3 0 5 0 6 +3 3 0 0 0 6 6 +3 3 7 0 6 6 6 +1 2 3 0 4 5 6 +7 8 0 0 0 9 10 +11 0 12 0 13 0 14 +0 0 0 15 0 0 0 +16 0 17 0 18 0 19 +20 21 0 0 0 22 23 +24 25 26 0 27 28 29 +1 1 1 0 2 2 2 +1 1 0 0 0 2 2 +1 0 3 0 2 0 2 +0 0 0 2 0 0 0 +2 0 2 0 4 0 5 +2 2 0 0 0 5 5 +2 2 2 0 5 5 5 +1 1 1 0 2 2 2 +1 1 0 0 0 2 2 +1 0 3 0 4 0 2 +0 0 0 5 0 0 0 +6 0 7 0 8 0 9 +6 6 0 0 0 9 9 +6 6 6 0 9 9 9 +1 2 3 0 4 5 6 +7 1 0 0 0 4 5 +8 0 1 0 9 0 4 +0 0 0 1 0 0 0 +10 0 11 0 1 0 12 +13 10 0 0 0 1 14 +15 13 10 0 16 17 1 +1 2 3 0 4 5 6 +1 2 0 0 0 5 6 +1 0 7 0 8 0 6 +0 0 0 9 0 0 0 +10 0 11 0 12 0 13 +10 14 0 0 0 15 13 +10 14 16 0 17 15 13 +1 1 1 0 1 1 1 +1 1 0 0 0 1 1 +1 0 1 0 1 0 1 +0 0 0 1 0 0 0 +1 0 1 0 1 0 1 +1 1 0 0 0 1 1 +1 1 1 0 1 1 1 +1 1 2 0 3 3 3 +1 1 0 0 0 3 3 +1 0 1 0 4 0 3 +0 0 0 1 0 0 0 +5 0 6 0 1 0 1 +5 5 0 0 0 1 1 +5 5 5 0 7 1 1 +1 2 1 0 1 3 1 +2 1 0 0 0 1 3 +1 0 1 0 1 0 1 +0 0 0 1 0 0 0 +1 0 1 0 1 0 1 +4 1 0 0 0 1 5 +1 4 1 0 1 5 1 +1 2 3 0 4 5 6 +2 3 0 0 0 6 7 +3 0 8 0 6 0 9 +0 0 0 6 0 0 0 +10 0 6 0 11 0 12 +13 6 0 0 0 12 14 +6 15 16 0 12 14 17 +1 1 1 0 2 2 2 +1 1 0 0 0 2 2 +1 0 1 0 3 0 2 +0 0 0 1 0 0 0 +4 0 5 0 1 0 1 +4 4 0 0 0 1 1 +4 4 4 0 1 1 1 +1 0 2 2 2 0 3 +0 0 0 2 0 0 0 +4 0 0 5 0 0 5 +5 5 5 5 5 5 5 +5 0 0 5 0 0 6 +0 0 0 7 0 0 0 +8 0 7 7 7 0 9 +1 0 2 2 2 0 3 +0 0 0 2 0 0 0 +4 0 0 4 0 0 5 +4 4 4 4 4 4 4 +6 0 0 4 0 0 4 +0 0 0 7 0 0 0 +8 0 7 7 7 0 9 +1 0 2 2 2 0 3 +0 0 0 4 0 0 0 +5 0 0 6 0 0 7 +8 8 8 8 8 8 8 +9 0 0 10 0 0 11 +0 0 0 12 0 0 0 +13 0 14 14 14 0 15 +1 0 2 3 3 0 4 +0 0 0 3 0 0 0 +5 0 0 3 0 0 6 +5 5 3 3 3 6 6 +5 0 0 3 0 0 6 +0 0 0 3 0 0 0 +7 0 3 3 8 0 9 +1 0 2 3 4 0 5 +0 0 0 6 0 0 0 +7 0 0 8 0 0 9 +10 11 12 13 14 15 16 +17 0 0 18 0 0 19 +0 0 0 20 0 0 0 +21 0 22 23 24 0 25 +1 0 2 2 2 0 3 +0 0 0 2 0 0 0 +2 0 0 2 0 0 2 +2 2 2 2 2 2 2 +2 0 0 2 0 0 2 +0 0 0 2 0 0 0 +4 0 2 2 2 0 5 +1 0 2 2 2 0 3 +0 0 0 2 0 0 0 +2 0 0 2 0 0 2 +2 2 2 2 2 2 2 +2 0 0 2 0 0 2 +0 0 0 2 0 0 0 +4 0 2 2 2 0 5 +1 0 2 3 4 0 5 +0 0 0 2 0 0 0 +6 0 0 7 0 0 8 +9 6 10 11 7 12 13 +14 0 0 10 0 0 12 +0 0 0 15 0 0 0 +16 0 17 18 15 0 19 +1 0 2 3 4 0 5 +0 0 0 3 0 0 0 +6 0 0 3 0 0 7 +6 8 9 3 10 11 7 +6 0 0 3 0 0 7 +0 0 0 3 0 0 0 +12 0 13 3 14 0 15 +1 0 2 2 2 0 3 +0 0 0 2 0 0 0 +2 0 0 2 0 0 2 +2 2 2 2 2 2 2 +2 0 0 2 0 0 2 +0 0 0 2 0 0 0 +4 0 2 2 2 0 5 +1 0 2 2 3 0 4 +0 0 0 2 0 0 0 +5 0 0 2 0 0 6 +5 5 2 2 2 6 6 +5 0 0 2 0 0 6 +0 0 0 2 0 0 0 +7 0 8 2 2 0 9 +1 0 2 3 2 0 4 +0 0 0 2 0 0 0 +5 0 0 6 0 0 7 +8 5 6 9 6 7 10 +5 0 0 6 0 0 7 +0 0 0 11 0 0 0 +12 0 11 13 11 0 14 +1 0 2 3 4 0 5 +0 0 0 4 0 0 0 +6 0 0 7 0 0 8 +9 10 7 11 12 8 13 +10 0 0 12 0 0 14 +0 0 0 15 0 0 0 +16 0 15 17 18 0 19 +1 0 2 2 2 0 3 +0 0 0 2 0 0 0 +2 0 0 2 0 0 2 +2 2 2 2 2 2 2 +2 0 0 2 0 0 2 +0 0 0 2 0 0 0 +4 0 2 2 2 0 5 diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/data/label_strels.txt b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/data/label_strels.txt new file mode 100644 index 0000000..35ae812 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/data/label_strels.txt @@ -0,0 +1,42 @@ +0 0 1 +1 1 1 +1 0 0 +1 0 0 +1 1 1 +0 0 1 +0 0 0 +1 1 1 +0 0 0 +0 1 1 +0 1 0 +1 1 0 +0 0 0 +0 0 0 +0 0 0 +0 1 1 +1 1 1 +1 1 0 +0 1 0 +1 1 1 +0 1 0 +1 0 0 +0 1 0 +0 0 1 +0 1 0 +0 1 0 +0 1 0 +1 1 1 +1 1 1 +1 1 1 +1 1 0 +0 1 0 +0 1 1 +1 0 1 +0 1 0 +1 0 1 +0 0 1 +0 1 0 +1 0 0 +1 1 0 +1 1 1 +0 1 1 diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/dots.png b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/dots.png new file mode 100644 index 0000000..640030c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/dots.png differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_c_api.py b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_c_api.py new file mode 100644 index 0000000..318c655 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_c_api.py @@ -0,0 +1,100 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import assert_allclose + +from scipy import ndimage +from scipy.ndimage import _ctest +from scipy.ndimage import _ctest_oldapi +from scipy.ndimage import _cytest +from scipy._lib._ccallback import LowLevelCallable + +FILTER1D_FUNCTIONS = [ + lambda filter_size: _ctest.filter1d(filter_size), + lambda filter_size: _ctest_oldapi.filter1d(filter_size), + lambda filter_size: _cytest.filter1d(filter_size, with_signature=False), + lambda filter_size: LowLevelCallable(_cytest.filter1d(filter_size, with_signature=True)), + lambda filter_size: LowLevelCallable.from_cython(_cytest, "_filter1d", + _cytest.filter1d_capsule(filter_size)), +] + +FILTER2D_FUNCTIONS = [ + lambda weights: _ctest.filter2d(weights), + lambda weights: _ctest_oldapi.filter2d(weights), + lambda weights: _cytest.filter2d(weights, with_signature=False), + lambda weights: LowLevelCallable(_cytest.filter2d(weights, with_signature=True)), + lambda weights: LowLevelCallable.from_cython(_cytest, "_filter2d", _cytest.filter2d_capsule(weights)), +] + +TRANSFORM_FUNCTIONS = [ + lambda shift: _ctest.transform(shift), + lambda shift: _ctest_oldapi.transform(shift), + lambda shift: _cytest.transform(shift, with_signature=False), + lambda shift: LowLevelCallable(_cytest.transform(shift, with_signature=True)), + lambda shift: LowLevelCallable.from_cython(_cytest, "_transform", _cytest.transform_capsule(shift)), +] + + +def test_generic_filter(): + def filter2d(footprint_elements, weights): + return (weights*footprint_elements).sum() + + def check(j): + func = FILTER2D_FUNCTIONS[j] + + im = np.ones((20, 20)) + im[:10,:10] = 0 + footprint = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) + footprint_size = np.count_nonzero(footprint) + weights = np.ones(footprint_size)/footprint_size + + res = ndimage.generic_filter(im, func(weights), + footprint=footprint) + std = ndimage.generic_filter(im, filter2d, footprint=footprint, + extra_arguments=(weights,)) + assert_allclose(res, std, err_msg="#{} failed".format(j)) + + for j, func in enumerate(FILTER2D_FUNCTIONS): + check(j) + + +def test_generic_filter1d(): + def filter1d(input_line, output_line, filter_size): + for i in range(output_line.size): + output_line[i] = 0 + for j in range(filter_size): + output_line[i] += input_line[i+j] + output_line /= filter_size + + def check(j): + func = FILTER1D_FUNCTIONS[j] + + im = np.tile(np.hstack((np.zeros(10), np.ones(10))), (10, 1)) + filter_size = 3 + + res = ndimage.generic_filter1d(im, func(filter_size), + filter_size) + std = ndimage.generic_filter1d(im, filter1d, filter_size, + extra_arguments=(filter_size,)) + assert_allclose(res, std, err_msg="#{} failed".format(j)) + + for j, func in enumerate(FILTER1D_FUNCTIONS): + check(j) + + +def test_geometric_transform(): + def transform(output_coordinates, shift): + return output_coordinates[0] - shift, output_coordinates[1] - shift + + def check(j): + func = TRANSFORM_FUNCTIONS[j] + + im = np.arange(12).reshape(4, 3).astype(np.float64) + shift = 0.5 + + res = ndimage.geometric_transform(im, func(shift)) + std = ndimage.geometric_transform(im, transform, extra_arguments=(shift,)) + assert_allclose(res, std, err_msg="#{} failed".format(j)) + + for j, func in enumerate(TRANSFORM_FUNCTIONS): + check(j) diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_c_api.pyc b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_c_api.pyc new file mode 100644 index 0000000..70dd110 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_c_api.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_datatypes.py b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_datatypes.py new file mode 100644 index 0000000..a6fea95 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_datatypes.py @@ -0,0 +1,68 @@ +""" Testing data types for ndimage calls +""" +from __future__ import division, print_function, absolute_import + +import sys + +import numpy as np +from numpy.testing import assert_array_almost_equal, assert_ +import pytest + +from scipy import ndimage + + +def test_map_coordinates_dts(): + # check that ndimage accepts different data types for interpolation + data = np.array([[4, 1, 3, 2], + [7, 6, 8, 5], + [3, 5, 3, 6]]) + shifted_data = np.array([[0, 0, 0, 0], + [0, 4, 1, 3], + [0, 7, 6, 8]]) + idx = np.indices(data.shape) + dts = (np.uint8, np.uint16, np.uint32, np.uint64, + np.int8, np.int16, np.int32, np.int64, + np.intp, np.uintp, np.float32, np.float64) + for order in range(0, 6): + for data_dt in dts: + these_data = data.astype(data_dt) + for coord_dt in dts: + # affine mapping + mat = np.eye(2, dtype=coord_dt) + off = np.zeros((2,), dtype=coord_dt) + out = ndimage.affine_transform(these_data, mat, off) + assert_array_almost_equal(these_data, out) + # map coordinates + coords_m1 = idx.astype(coord_dt) - 1 + coords_p10 = idx.astype(coord_dt) + 10 + out = ndimage.map_coordinates(these_data, coords_m1, order=order) + assert_array_almost_equal(out, shifted_data) + # check constant fill works + out = ndimage.map_coordinates(these_data, coords_p10, order=order) + assert_array_almost_equal(out, np.zeros((3,4))) + # check shift and zoom + out = ndimage.shift(these_data, 1) + assert_array_almost_equal(out, shifted_data) + out = ndimage.zoom(these_data, 1) + assert_array_almost_equal(these_data, out) + + +@pytest.mark.xfail(not sys.platform == 'darwin', reason="runs only on darwin") +def test_uint64_max(): + # Test interpolation respects uint64 max. Reported to fail at least on + # win32 (due to the 32 bit visual C compiler using signed int64 when + # converting between uint64 to double) and Debian on s390x. + # Interpolation is always done in double precision floating point, so + # we use the largest uint64 value for which int(float(big)) still fits + # in a uint64. + big = 2**64 - 1025 + arr = np.array([big, big, big], dtype=np.uint64) + # Tests geometric transform (map_coordinates, affine_transform) + inds = np.indices(arr.shape) - 0.1 + x = ndimage.map_coordinates(arr, inds) + assert_(x[1] == int(float(big))) + assert_(x[2] == int(float(big))) + # Tests zoom / shift + x = ndimage.shift(arr, 0.1) + assert_(x[1] == int(float(big))) + assert_(x[2] == int(float(big))) diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_datatypes.pyc b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_datatypes.pyc new file mode 100644 index 0000000..dc7745e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_datatypes.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_filters.py b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_filters.py new file mode 100644 index 0000000..379167f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_filters.py @@ -0,0 +1,447 @@ +''' Some tests for filters ''' +from __future__ import division, print_function, absolute_import + +import sys +import numpy as np + +from numpy.testing import (assert_equal, assert_allclose, + assert_array_equal, assert_almost_equal) +from pytest import raises as assert_raises + +import scipy.ndimage as sndi +from scipy.ndimage.filters import _gaussian_kernel1d, rank_filter +from scipy._lib._numpy_compat import suppress_warnings + +def test_ticket_701(): + # Test generic filter sizes + arr = np.arange(4).reshape((2,2)) + func = lambda x: np.min(x) + res = sndi.generic_filter(arr, func, size=(1,1)) + # The following raises an error unless ticket 701 is fixed + res2 = sndi.generic_filter(arr, func, size=1) + assert_equal(res, res2) + + +def test_gh_5430(): + # At least one of these raises an error unless gh-5430 is + # fixed. In py2k an int is implemented using a C long, so + # which one fails depends on your system. In py3k there is only + # one arbitrary precision integer type, so both should fail. + sigma = np.int32(1) + out = sndi._ni_support._normalize_sequence(sigma, 1) + assert_equal(out, [sigma]) + sigma = np.int64(1) + out = sndi._ni_support._normalize_sequence(sigma, 1) + assert_equal(out, [sigma]) + # This worked before; make sure it still works + sigma = 1 + out = sndi._ni_support._normalize_sequence(sigma, 1) + assert_equal(out, [sigma]) + # This worked before; make sure it still works + sigma = [1, 1] + out = sndi._ni_support._normalize_sequence(sigma, 2) + assert_equal(out, sigma) + # Also include the OPs original example to make sure we fixed the issue + x = np.random.normal(size=(256, 256)) + perlin = np.zeros_like(x) + for i in 2**np.arange(6): + perlin += sndi.filters.gaussian_filter(x, i, mode="wrap") * i**2 + # This also fixes gh-4106, show that the OPs example now runs. + x = np.int64(21) + sndi._ni_support._normalize_sequence(x, 0) + + +def test_gaussian_kernel1d(): + radius = 10 + sigma = 2 + sigma2 = sigma * sigma + x = np.arange(-radius, radius + 1, dtype=np.double) + phi_x = np.exp(-0.5 * x * x / sigma2) + phi_x /= phi_x.sum() + assert_allclose(phi_x, _gaussian_kernel1d(sigma, 0, radius)) + assert_allclose(-phi_x * x / sigma2, _gaussian_kernel1d(sigma, 1, radius)) + assert_allclose(phi_x * (x * x / sigma2 - 1) / sigma2, + _gaussian_kernel1d(sigma, 2, radius)) + assert_allclose(phi_x * (3 - x * x / sigma2) * x / (sigma2 * sigma2), + _gaussian_kernel1d(sigma, 3, radius)) + + +def test_orders_gauss(): + # Check order inputs to Gaussians + arr = np.zeros((1,)) + assert_equal(0, sndi.gaussian_filter(arr, 1, order=0)) + assert_equal(0, sndi.gaussian_filter(arr, 1, order=3)) + assert_raises(ValueError, sndi.gaussian_filter, arr, 1, -1) + assert_equal(0, sndi.gaussian_filter1d(arr, 1, axis=-1, order=0)) + assert_equal(0, sndi.gaussian_filter1d(arr, 1, axis=-1, order=3)) + assert_raises(ValueError, sndi.gaussian_filter1d, arr, 1, -1, -1) + + +def test_valid_origins(): + """Regression test for #1311.""" + func = lambda x: np.mean(x) + data = np.array([1,2,3,4,5], dtype=np.float64) + assert_raises(ValueError, sndi.generic_filter, data, func, size=3, + origin=2) + func2 = lambda x, y: np.mean(x + y) + assert_raises(ValueError, sndi.generic_filter1d, data, func, + filter_size=3, origin=2) + assert_raises(ValueError, sndi.percentile_filter, data, 0.2, size=3, + origin=2) + + for filter in [sndi.uniform_filter, sndi.minimum_filter, + sndi.maximum_filter, sndi.maximum_filter1d, + sndi.median_filter, sndi.minimum_filter1d]: + # This should work, since for size == 3, the valid range for origin is + # -1 to 1. + list(filter(data, 3, origin=-1)) + list(filter(data, 3, origin=1)) + # Just check this raises an error instead of silently accepting or + # segfaulting. + assert_raises(ValueError, filter, data, 3, origin=2) + + +def test_bad_convolve_and_correlate_origins(): + """Regression test for gh-822.""" + # Before gh-822 was fixed, these would generate seg. faults or + # other crashes on many system. + assert_raises(ValueError, sndi.correlate1d, + [0, 1, 2, 3, 4, 5], [1, 1, 2, 0], origin=2) + assert_raises(ValueError, sndi.correlate, + [0, 1, 2, 3, 4, 5], [0, 1, 2], origin=[2]) + assert_raises(ValueError, sndi.correlate, + np.ones((3, 5)), np.ones((2, 2)), origin=[0, 1]) + + assert_raises(ValueError, sndi.convolve1d, + np.arange(10), np.ones(3), origin=-2) + assert_raises(ValueError, sndi.convolve, + np.arange(10), np.ones(3), origin=[-2]) + assert_raises(ValueError, sndi.convolve, + np.ones((3, 5)), np.ones((2, 2)), origin=[0, -2]) + + +def test_multiple_modes(): + # Test that the filters with multiple mode cababilities for different + # dimensions give the same result as applying a single mode. + arr = np.array([[1., 0., 0.], + [1., 1., 0.], + [0., 0., 0.]]) + + mode1 = 'reflect' + mode2 = ['reflect', 'reflect'] + + assert_equal(sndi.gaussian_filter(arr, 1, mode=mode1), + sndi.gaussian_filter(arr, 1, mode=mode2)) + assert_equal(sndi.prewitt(arr, mode=mode1), + sndi.prewitt(arr, mode=mode2)) + assert_equal(sndi.sobel(arr, mode=mode1), + sndi.sobel(arr, mode=mode2)) + assert_equal(sndi.laplace(arr, mode=mode1), + sndi.laplace(arr, mode=mode2)) + assert_equal(sndi.gaussian_laplace(arr, 1, mode=mode1), + sndi.gaussian_laplace(arr, 1, mode=mode2)) + assert_equal(sndi.maximum_filter(arr, size=5, mode=mode1), + sndi.maximum_filter(arr, size=5, mode=mode2)) + assert_equal(sndi.minimum_filter(arr, size=5, mode=mode1), + sndi.minimum_filter(arr, size=5, mode=mode2)) + assert_equal(sndi.gaussian_gradient_magnitude(arr, 1, mode=mode1), + sndi.gaussian_gradient_magnitude(arr, 1, mode=mode2)) + assert_equal(sndi.uniform_filter(arr, 5, mode=mode1), + sndi.uniform_filter(arr, 5, mode=mode2)) + + +def test_multiple_modes_sequentially(): + # Test that the filters with multiple mode cababilities for different + # dimensions give the same result as applying the filters with + # different modes sequentially + arr = np.array([[1., 0., 0.], + [1., 1., 0.], + [0., 0., 0.]]) + + modes = ['reflect', 'wrap'] + + expected = sndi.gaussian_filter1d(arr, 1, axis=0, mode=modes[0]) + expected = sndi.gaussian_filter1d(expected, 1, axis=1, mode=modes[1]) + assert_equal(expected, + sndi.gaussian_filter(arr, 1, mode=modes)) + + expected = sndi.uniform_filter1d(arr, 5, axis=0, mode=modes[0]) + expected = sndi.uniform_filter1d(expected, 5, axis=1, mode=modes[1]) + assert_equal(expected, + sndi.uniform_filter(arr, 5, mode=modes)) + + expected = sndi.maximum_filter1d(arr, size=5, axis=0, mode=modes[0]) + expected = sndi.maximum_filter1d(expected, size=5, axis=1, mode=modes[1]) + assert_equal(expected, + sndi.maximum_filter(arr, size=5, mode=modes)) + + expected = sndi.minimum_filter1d(arr, size=5, axis=0, mode=modes[0]) + expected = sndi.minimum_filter1d(expected, size=5, axis=1, mode=modes[1]) + assert_equal(expected, + sndi.minimum_filter(arr, size=5, mode=modes)) + + +def test_multiple_modes_prewitt(): + # Test prewitt filter for multiple extrapolation modes + arr = np.array([[1., 0., 0.], + [1., 1., 0.], + [0., 0., 0.]]) + + expected = np.array([[1., -3., 2.], + [1., -2., 1.], + [1., -1., 0.]]) + + modes = ['reflect', 'wrap'] + + assert_equal(expected, + sndi.prewitt(arr, mode=modes)) + + +def test_multiple_modes_sobel(): + # Test sobel filter for multiple extrapolation modes + arr = np.array([[1., 0., 0.], + [1., 1., 0.], + [0., 0., 0.]]) + + expected = np.array([[1., -4., 3.], + [2., -3., 1.], + [1., -1., 0.]]) + + modes = ['reflect', 'wrap'] + + assert_equal(expected, + sndi.sobel(arr, mode=modes)) + + +def test_multiple_modes_laplace(): + # Test laplace filter for multiple extrapolation modes + arr = np.array([[1., 0., 0.], + [1., 1., 0.], + [0., 0., 0.]]) + + expected = np.array([[-2., 2., 1.], + [-2., -3., 2.], + [1., 1., 0.]]) + + modes = ['reflect', 'wrap'] + + assert_equal(expected, + sndi.laplace(arr, mode=modes)) + + +def test_multiple_modes_gaussian_laplace(): + # Test gaussian_laplace filter for multiple extrapolation modes + arr = np.array([[1., 0., 0.], + [1., 1., 0.], + [0., 0., 0.]]) + + expected = np.array([[-0.28438687, 0.01559809, 0.19773499], + [-0.36630503, -0.20069774, 0.07483620], + [0.15849176, 0.18495566, 0.21934094]]) + + modes = ['reflect', 'wrap'] + + assert_almost_equal(expected, + sndi.gaussian_laplace(arr, 1, mode=modes)) + + +def test_multiple_modes_gaussian_gradient_magnitude(): + # Test gaussian_gradient_magnitude filter for multiple + # extrapolation modes + arr = np.array([[1., 0., 0.], + [1., 1., 0.], + [0., 0., 0.]]) + + expected = np.array([[0.04928965, 0.09745625, 0.06405368], + [0.23056905, 0.14025305, 0.04550846], + [0.19894369, 0.14950060, 0.06796850]]) + + modes = ['reflect', 'wrap'] + + calculated = sndi.gaussian_gradient_magnitude(arr, 1, mode=modes) + + assert_almost_equal(expected, calculated) + + +def test_multiple_modes_uniform(): + # Test uniform filter for multiple extrapolation modes + arr = np.array([[1., 0., 0.], + [1., 1., 0.], + [0., 0., 0.]]) + + expected = np.array([[0.32, 0.40, 0.48], + [0.20, 0.28, 0.32], + [0.28, 0.32, 0.40]]) + + modes = ['reflect', 'wrap'] + + assert_almost_equal(expected, + sndi.uniform_filter(arr, 5, mode=modes)) + + +def test_gaussian_truncate(): + # Test that Gaussian filters can be truncated at different widths. + # These tests only check that the result has the expected number + # of nonzero elements. + arr = np.zeros((100, 100), float) + arr[50, 50] = 1 + num_nonzeros_2 = (sndi.gaussian_filter(arr, 5, truncate=2) > 0).sum() + assert_equal(num_nonzeros_2, 21**2) + num_nonzeros_5 = (sndi.gaussian_filter(arr, 5, truncate=5) > 0).sum() + assert_equal(num_nonzeros_5, 51**2) + + # Test truncate when sigma is a sequence. + f = sndi.gaussian_filter(arr, [0.5, 2.5], truncate=3.5) + fpos = f > 0 + n0 = fpos.any(axis=0).sum() + # n0 should be 2*int(2.5*3.5 + 0.5) + 1 + assert_equal(n0, 19) + n1 = fpos.any(axis=1).sum() + # n1 should be 2*int(0.5*3.5 + 0.5) + 1 + assert_equal(n1, 5) + + # Test gaussian_filter1d. + x = np.zeros(51) + x[25] = 1 + f = sndi.gaussian_filter1d(x, sigma=2, truncate=3.5) + n = (f > 0).sum() + assert_equal(n, 15) + + # Test gaussian_laplace + y = sndi.gaussian_laplace(x, sigma=2, truncate=3.5) + nonzero_indices = np.nonzero(y != 0)[0] + n = nonzero_indices.ptp() + 1 + assert_equal(n, 15) + + # Test gaussian_gradient_magnitude + y = sndi.gaussian_gradient_magnitude(x, sigma=2, truncate=3.5) + nonzero_indices = np.nonzero(y != 0)[0] + n = nonzero_indices.ptp() + 1 + assert_equal(n, 15) + + +class TestThreading(object): + def check_func_thread(self, n, fun, args, out): + from threading import Thread + thrds = [Thread(target=fun, args=args, kwargs={'output': out[x]}) for x in range(n)] + [t.start() for t in thrds] + [t.join() for t in thrds] + + def check_func_serial(self, n, fun, args, out): + for i in range(n): + fun(*args, output=out[i]) + + def test_correlate1d(self): + d = np.random.randn(5000) + os = np.empty((4, d.size)) + ot = np.empty_like(os) + self.check_func_serial(4, sndi.correlate1d, (d, np.arange(5)), os) + self.check_func_thread(4, sndi.correlate1d, (d, np.arange(5)), ot) + assert_array_equal(os, ot) + + def test_correlate(self): + d = np.random.randn(500, 500) + k = np.random.randn(10, 10) + os = np.empty([4] + list(d.shape)) + ot = np.empty_like(os) + self.check_func_serial(4, sndi.correlate, (d, k), os) + self.check_func_thread(4, sndi.correlate, (d, k), ot) + assert_array_equal(os, ot) + + def test_median_filter(self): + d = np.random.randn(500, 500) + os = np.empty([4] + list(d.shape)) + ot = np.empty_like(os) + self.check_func_serial(4, sndi.median_filter, (d, 3), os) + self.check_func_thread(4, sndi.median_filter, (d, 3), ot) + assert_array_equal(os, ot) + + def test_uniform_filter1d(self): + d = np.random.randn(5000) + os = np.empty((4, d.size)) + ot = np.empty_like(os) + self.check_func_serial(4, sndi.uniform_filter1d, (d, 5), os) + self.check_func_thread(4, sndi.uniform_filter1d, (d, 5), ot) + assert_array_equal(os, ot) + + def test_minmax_filter(self): + d = np.random.randn(500, 500) + os = np.empty([4] + list(d.shape)) + ot = np.empty_like(os) + self.check_func_serial(4, sndi.maximum_filter, (d, 3), os) + self.check_func_thread(4, sndi.maximum_filter, (d, 3), ot) + assert_array_equal(os, ot) + self.check_func_serial(4, sndi.minimum_filter, (d, 3), os) + self.check_func_thread(4, sndi.minimum_filter, (d, 3), ot) + assert_array_equal(os, ot) + + +def test_minmaximum_filter1d(): + # Regression gh-3898 + in_ = np.arange(10) + out = sndi.minimum_filter1d(in_, 1) + assert_equal(in_, out) + out = sndi.maximum_filter1d(in_, 1) + assert_equal(in_, out) + # Test reflect + out = sndi.minimum_filter1d(in_, 5, mode='reflect') + assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 6, 7], out) + out = sndi.maximum_filter1d(in_, 5, mode='reflect') + assert_equal([2, 3, 4, 5, 6, 7, 8, 9, 9, 9], out) + #Test constant + out = sndi.minimum_filter1d(in_, 5, mode='constant', cval=-1) + assert_equal([-1, -1, 0, 1, 2, 3, 4, 5, -1, -1], out) + out = sndi.maximum_filter1d(in_, 5, mode='constant', cval=10) + assert_equal([10, 10, 4, 5, 6, 7, 8, 9, 10, 10], out) + # Test nearest + out = sndi.minimum_filter1d(in_, 5, mode='nearest') + assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 6, 7], out) + out = sndi.maximum_filter1d(in_, 5, mode='nearest') + assert_equal([2, 3, 4, 5, 6, 7, 8, 9, 9, 9], out) + # Test wrap + out = sndi.minimum_filter1d(in_, 5, mode='wrap') + assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 0, 0], out) + out = sndi.maximum_filter1d(in_, 5, mode='wrap') + assert_equal([9, 9, 4, 5, 6, 7, 8, 9, 9, 9], out) + + +def test_uniform_filter1d_roundoff_errors(): + # gh-6930 + in_ = np.repeat([0, 1, 0], [9, 9, 9]) + for filter_size in range(3, 10): + out = sndi.uniform_filter1d(in_, filter_size) + assert_equal(out.sum(), 10 - filter_size) + + +def test_footprint_all_zeros(): + # regression test for gh-6876: footprint of all zeros segfaults + arr = np.random.randint(0, 100, (100, 100)) + kernel = np.zeros((3, 3), bool) + with assert_raises(ValueError): + sndi.maximum_filter(arr, footprint=kernel) + +def test_gaussian_filter(): + # Test gaussian filter with np.float16 + # gh-8207 + data = np.array([1],dtype = np.float16) + sigma = 1.0 + with assert_raises(RuntimeError): + sndi.gaussian_filter(data,sigma) + + +def test_rank_filter_noninteger_rank(): + # regression test for issue 9388: ValueError for + # non integer rank when performing rank_filter + arr = np.random.random((10, 20, 30)) + assert_raises(TypeError, rank_filter, arr, 0.5, + footprint=np.ones((1, 1, 10), dtype=bool)) + + +def test_size_footprint_both_set(): + # test for input validation, expect user warning when + # size and footprint is set + with suppress_warnings() as sup: + sup.filter(UserWarning, + "ignoring size because footprint is set") + arr = np.random.random((10, 20, 30)) + rank_filter(arr, 5, size=2, footprint=np.ones((1, 1, 10), dtype=bool)) diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_filters.pyc b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_filters.pyc new file mode 100644 index 0000000..a58229c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_filters.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_io.py b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_io.py new file mode 100644 index 0000000..36966f2 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_io.py @@ -0,0 +1,38 @@ +from __future__ import division, print_function, absolute_import + +import pytest +from numpy.testing import assert_array_equal +from scipy._lib._numpy_compat import suppress_warnings +import scipy.ndimage as ndi + +import os + +try: + from PIL import Image + pil_missing = False +except ImportError: + pil_missing = True + + +@pytest.mark.skipif(pil_missing, reason="The Python Image Library could not be found.") +def test_imread(): + lp = os.path.join(os.path.dirname(__file__), 'dots.png') + with suppress_warnings() as sup: + # PIL causes a Py3k ResourceWarning + sup.filter(message="unclosed file") + sup.filter(DeprecationWarning) + img = ndi.imread(lp, mode="RGB") + assert_array_equal(img.shape, (300, 420, 3)) + + with suppress_warnings() as sup: + # PIL causes a Py3k ResourceWarning + sup.filter(message="unclosed file") + sup.filter(DeprecationWarning) + img = ndi.imread(lp, flatten=True) + assert_array_equal(img.shape, (300, 420)) + + with open(lp, 'rb') as fobj: + with suppress_warnings() as sup: + sup.filter(DeprecationWarning) + img = ndi.imread(fobj, mode="RGB") + assert_array_equal(img.shape, (300, 420, 3)) diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_io.pyc b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_io.pyc new file mode 100644 index 0000000..0bc153f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_io.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_measurements.py b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_measurements.py new file mode 100644 index 0000000..2a390f9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_measurements.py @@ -0,0 +1,1106 @@ +from __future__ import division, print_function, absolute_import + +import os.path + +import numpy as np +from numpy.testing import (assert_, assert_array_almost_equal, assert_equal, + assert_almost_equal, assert_array_equal) +from pytest import raises as assert_raises +from scipy._lib._numpy_compat import suppress_warnings + +import scipy.ndimage as ndimage + + +types = [np.int8, np.uint8, np.int16, + np.uint16, np.int32, np.uint32, + np.int64, np.uint64, + np.float32, np.float64] + + +np.mod(1., 1) # Silence fmod bug on win-amd64. See #1408 and #1238. + + +class Test_measurements_stats(object): + """ndimage.measurements._stats() is a utility function used by other functions.""" + + def test_a(self): + x = [0,1,2,6] + labels = [0,0,1,1] + index = [0,1] + for shp in [(4,), (2,2)]: + x = np.array(x).reshape(shp) + labels = np.array(labels).reshape(shp) + counts, sums = ndimage.measurements._stats(x, labels=labels, index=index) + assert_array_equal(counts, [2, 2]) + assert_array_equal(sums, [1.0, 8.0]) + + def test_b(self): + # Same data as test_a, but different labels. The label 9 exceeds the + # length of 'labels', so this test will follow a different code path. + x = [0,1,2,6] + labels = [0,0,9,9] + index = [0,9] + for shp in [(4,), (2,2)]: + x = np.array(x).reshape(shp) + labels = np.array(labels).reshape(shp) + counts, sums = ndimage.measurements._stats(x, labels=labels, index=index) + assert_array_equal(counts, [2, 2]) + assert_array_equal(sums, [1.0, 8.0]) + + def test_a_centered(self): + x = [0,1,2,6] + labels = [0,0,1,1] + index = [0,1] + for shp in [(4,), (2,2)]: + x = np.array(x).reshape(shp) + labels = np.array(labels).reshape(shp) + counts, sums, centers = ndimage.measurements._stats(x, labels=labels, + index=index, centered=True) + assert_array_equal(counts, [2, 2]) + assert_array_equal(sums, [1.0, 8.0]) + assert_array_equal(centers, [0.5, 8.0]) + + def test_b_centered(self): + x = [0,1,2,6] + labels = [0,0,9,9] + index = [0,9] + for shp in [(4,), (2,2)]: + x = np.array(x).reshape(shp) + labels = np.array(labels).reshape(shp) + counts, sums, centers = ndimage.measurements._stats(x, labels=labels, + index=index, centered=True) + assert_array_equal(counts, [2, 2]) + assert_array_equal(sums, [1.0, 8.0]) + assert_array_equal(centers, [0.5, 8.0]) + + def test_nonint_labels(self): + x = [0,1,2,6] + labels = [0.0, 0.0, 9.0, 9.0] + index = [0.0, 9.0] + for shp in [(4,), (2,2)]: + x = np.array(x).reshape(shp) + labels = np.array(labels).reshape(shp) + counts, sums, centers = ndimage.measurements._stats(x, labels=labels, + index=index, centered=True) + assert_array_equal(counts, [2, 2]) + assert_array_equal(sums, [1.0, 8.0]) + assert_array_equal(centers, [0.5, 8.0]) + + +class Test_measurements_select(object): + """ndimage.measurements._select() is a utility function used by other functions.""" + + def test_basic(self): + x = [0,1,6,2] + cases = [ + ([0,0,1,1], [0,1]), # "Small" integer labels + ([0,0,9,9], [0,9]), # A label larger than len(labels) + ([0.0,0.0,7.0,7.0], [0.0, 7.0]), # Non-integer labels + ] + for labels, index in cases: + result = ndimage.measurements._select(x, labels=labels, index=index) + assert_(len(result) == 0) + result = ndimage.measurements._select(x, labels=labels, index=index, find_max=True) + assert_(len(result) == 1) + assert_array_equal(result[0], [1, 6]) + result = ndimage.measurements._select(x, labels=labels, index=index, find_min=True) + assert_(len(result) == 1) + assert_array_equal(result[0], [0, 2]) + result = ndimage.measurements._select(x, labels=labels, index=index, + find_min=True, find_min_positions=True) + assert_(len(result) == 2) + assert_array_equal(result[0], [0, 2]) + assert_array_equal(result[1], [0, 3]) + assert_equal(result[1].dtype.kind, 'i') + result = ndimage.measurements._select(x, labels=labels, index=index, + find_max=True, find_max_positions=True) + assert_(len(result) == 2) + assert_array_equal(result[0], [1, 6]) + assert_array_equal(result[1], [1, 2]) + assert_equal(result[1].dtype.kind, 'i') + + +def test_label01(): + data = np.ones([]) + out, n = ndimage.label(data) + assert_array_almost_equal(out, 1) + assert_equal(n, 1) + + +def test_label02(): + data = np.zeros([]) + out, n = ndimage.label(data) + assert_array_almost_equal(out, 0) + assert_equal(n, 0) + + +def test_label03(): + data = np.ones([1]) + out, n = ndimage.label(data) + assert_array_almost_equal(out, [1]) + assert_equal(n, 1) + + +def test_label04(): + data = np.zeros([1]) + out, n = ndimage.label(data) + assert_array_almost_equal(out, [0]) + assert_equal(n, 0) + + +def test_label05(): + data = np.ones([5]) + out, n = ndimage.label(data) + assert_array_almost_equal(out, [1, 1, 1, 1, 1]) + assert_equal(n, 1) + + +def test_label06(): + data = np.array([1, 0, 1, 1, 0, 1]) + out, n = ndimage.label(data) + assert_array_almost_equal(out, [1, 0, 2, 2, 0, 3]) + assert_equal(n, 3) + + +def test_label07(): + data = np.array([[0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0]]) + out, n = ndimage.label(data) + assert_array_almost_equal(out, [[0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0]]) + assert_equal(n, 0) + + +def test_label08(): + data = np.array([[1, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0], + [1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 0]]) + out, n = ndimage.label(data) + assert_array_almost_equal(out, [[1, 0, 0, 0, 0, 0], + [0, 0, 2, 2, 0, 0], + [0, 0, 2, 2, 2, 0], + [3, 3, 0, 0, 0, 0], + [3, 3, 0, 0, 0, 0], + [0, 0, 0, 4, 4, 0]]) + assert_equal(n, 4) + + +def test_label09(): + data = np.array([[1, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0], + [1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 0]]) + struct = ndimage.generate_binary_structure(2, 2) + out, n = ndimage.label(data, struct) + assert_array_almost_equal(out, [[1, 0, 0, 0, 0, 0], + [0, 0, 2, 2, 0, 0], + [0, 0, 2, 2, 2, 0], + [2, 2, 0, 0, 0, 0], + [2, 2, 0, 0, 0, 0], + [0, 0, 0, 3, 3, 0]]) + assert_equal(n, 3) + + +def test_label10(): + data = np.array([[0, 0, 0, 0, 0, 0], + [0, 1, 1, 0, 1, 0], + [0, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0]]) + struct = ndimage.generate_binary_structure(2, 2) + out, n = ndimage.label(data, struct) + assert_array_almost_equal(out, [[0, 0, 0, 0, 0, 0], + [0, 1, 1, 0, 1, 0], + [0, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0]]) + assert_equal(n, 1) + + +def test_label11(): + for type in types: + data = np.array([[1, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0], + [1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 0]], type) + out, n = ndimage.label(data) + expected = [[1, 0, 0, 0, 0, 0], + [0, 0, 2, 2, 0, 0], + [0, 0, 2, 2, 2, 0], + [3, 3, 0, 0, 0, 0], + [3, 3, 0, 0, 0, 0], + [0, 0, 0, 4, 4, 0]] + assert_array_almost_equal(out, expected) + assert_equal(n, 4) + + +def test_label11_inplace(): + for type in types: + data = np.array([[1, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0], + [1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 0]], type) + n = ndimage.label(data, output=data) + expected = [[1, 0, 0, 0, 0, 0], + [0, 0, 2, 2, 0, 0], + [0, 0, 2, 2, 2, 0], + [3, 3, 0, 0, 0, 0], + [3, 3, 0, 0, 0, 0], + [0, 0, 0, 4, 4, 0]] + assert_array_almost_equal(data, expected) + assert_equal(n, 4) + + +def test_label12(): + for type in types: + data = np.array([[0, 0, 0, 0, 1, 1], + [0, 0, 0, 0, 0, 1], + [0, 0, 1, 0, 1, 1], + [0, 0, 1, 1, 1, 1], + [0, 0, 0, 1, 1, 0]], type) + out, n = ndimage.label(data) + expected = [[0, 0, 0, 0, 1, 1], + [0, 0, 0, 0, 0, 1], + [0, 0, 1, 0, 1, 1], + [0, 0, 1, 1, 1, 1], + [0, 0, 0, 1, 1, 0]] + assert_array_almost_equal(out, expected) + assert_equal(n, 1) + + +def test_label13(): + for type in types: + data = np.array([[1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1], + [1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1], + [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], + [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], + type) + out, n = ndimage.label(data) + expected = [[1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1], + [1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1], + [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], + [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] + assert_array_almost_equal(out, expected) + assert_equal(n, 1) + + +def test_label_output_typed(): + data = np.ones([5]) + for t in types: + output = np.zeros([5], dtype=t) + n = ndimage.label(data, output=output) + assert_array_almost_equal(output, 1) + assert_equal(n, 1) + + +def test_label_output_dtype(): + data = np.ones([5]) + for t in types: + output, n = ndimage.label(data, output=t) + assert_array_almost_equal(output, 1) + assert output.dtype == t + + +def test_label_output_wrong_size(): + data = np.ones([5]) + for t in types: + output = np.zeros([10], t) + assert_raises((RuntimeError, ValueError), ndimage.label, data, output=output) + + +def test_label_structuring_elements(): + data = np.loadtxt(os.path.join(os.path.dirname(__file__), "data", "label_inputs.txt")) + strels = np.loadtxt(os.path.join(os.path.dirname(__file__), "data", "label_strels.txt")) + results = np.loadtxt(os.path.join(os.path.dirname(__file__), "data", "label_results.txt")) + data = data.reshape((-1, 7, 7)) + strels = strels.reshape((-1, 3, 3)) + results = results.reshape((-1, 7, 7)) + r = 0 + for i in range(data.shape[0]): + d = data[i, :, :] + for j in range(strels.shape[0]): + s = strels[j, :, :] + assert_equal(ndimage.label(d, s)[0], results[r, :, :]) + r += 1 + + +def test_label_default_dtype(): + test_array = np.random.rand(10, 10) + label, no_features = ndimage.label(test_array > 0.5) + assert_(label.dtype in (np.int32, np.int64)) + # Shouldn't raise an exception + ndimage.find_objects(label) + + +def test_find_objects01(): + data = np.ones([], dtype=int) + out = ndimage.find_objects(data) + assert_(out == [()]) + + +def test_find_objects02(): + data = np.zeros([], dtype=int) + out = ndimage.find_objects(data) + assert_(out == []) + + +def test_find_objects03(): + data = np.ones([1], dtype=int) + out = ndimage.find_objects(data) + assert_equal(out, [(slice(0, 1, None),)]) + + +def test_find_objects04(): + data = np.zeros([1], dtype=int) + out = ndimage.find_objects(data) + assert_equal(out, []) + + +def test_find_objects05(): + data = np.ones([5], dtype=int) + out = ndimage.find_objects(data) + assert_equal(out, [(slice(0, 5, None),)]) + + +def test_find_objects06(): + data = np.array([1, 0, 2, 2, 0, 3]) + out = ndimage.find_objects(data) + assert_equal(out, [(slice(0, 1, None),), + (slice(2, 4, None),), + (slice(5, 6, None),)]) + + +def test_find_objects07(): + data = np.array([[0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0]]) + out = ndimage.find_objects(data) + assert_equal(out, []) + + +def test_find_objects08(): + data = np.array([[1, 0, 0, 0, 0, 0], + [0, 0, 2, 2, 0, 0], + [0, 0, 2, 2, 2, 0], + [3, 3, 0, 0, 0, 0], + [3, 3, 0, 0, 0, 0], + [0, 0, 0, 4, 4, 0]]) + out = ndimage.find_objects(data) + assert_equal(out, [(slice(0, 1, None), slice(0, 1, None)), + (slice(1, 3, None), slice(2, 5, None)), + (slice(3, 5, None), slice(0, 2, None)), + (slice(5, 6, None), slice(3, 5, None))]) + + +def test_find_objects09(): + data = np.array([[1, 0, 0, 0, 0, 0], + [0, 0, 2, 2, 0, 0], + [0, 0, 2, 2, 2, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 4, 4, 0]]) + out = ndimage.find_objects(data) + assert_equal(out, [(slice(0, 1, None), slice(0, 1, None)), + (slice(1, 3, None), slice(2, 5, None)), + None, + (slice(5, 6, None), slice(3, 5, None))]) + + +def test_sum01(): + for type in types: + input = np.array([], type) + output = ndimage.sum(input) + assert_equal(output, 0.0) + + +def test_sum02(): + for type in types: + input = np.zeros([0, 4], type) + output = ndimage.sum(input) + assert_equal(output, 0.0) + + +def test_sum03(): + for type in types: + input = np.ones([], type) + output = ndimage.sum(input) + assert_almost_equal(output, 1.0) + + +def test_sum04(): + for type in types: + input = np.array([1, 2], type) + output = ndimage.sum(input) + assert_almost_equal(output, 3.0) + + +def test_sum05(): + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.sum(input) + assert_almost_equal(output, 10.0) + + +def test_sum06(): + labels = np.array([], bool) + for type in types: + input = np.array([], type) + output = ndimage.sum(input, labels=labels) + assert_equal(output, 0.0) + + +def test_sum07(): + labels = np.ones([0, 4], bool) + for type in types: + input = np.zeros([0, 4], type) + output = ndimage.sum(input, labels=labels) + assert_equal(output, 0.0) + + +def test_sum08(): + labels = np.array([1, 0], bool) + for type in types: + input = np.array([1, 2], type) + output = ndimage.sum(input, labels=labels) + assert_equal(output, 1.0) + + +def test_sum09(): + labels = np.array([1, 0], bool) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.sum(input, labels=labels) + assert_almost_equal(output, 4.0) + + +def test_sum10(): + labels = np.array([1, 0], bool) + input = np.array([[1, 2], [3, 4]], bool) + output = ndimage.sum(input, labels=labels) + assert_almost_equal(output, 2.0) + + +def test_sum11(): + labels = np.array([1, 2], np.int8) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.sum(input, labels=labels, + index=2) + assert_almost_equal(output, 6.0) + + +def test_sum12(): + labels = np.array([[1, 2], [2, 4]], np.int8) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.sum(input, labels=labels, + index=[4, 8, 2]) + assert_array_almost_equal(output, [4.0, 0.0, 5.0]) + + +def test_mean01(): + labels = np.array([1, 0], bool) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.mean(input, labels=labels) + assert_almost_equal(output, 2.0) + + +def test_mean02(): + labels = np.array([1, 0], bool) + input = np.array([[1, 2], [3, 4]], bool) + output = ndimage.mean(input, labels=labels) + assert_almost_equal(output, 1.0) + + +def test_mean03(): + labels = np.array([1, 2]) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.mean(input, labels=labels, + index=2) + assert_almost_equal(output, 3.0) + + +def test_mean04(): + labels = np.array([[1, 2], [2, 4]], np.int8) + olderr = np.seterr(all='ignore') + try: + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.mean(input, labels=labels, + index=[4, 8, 2]) + assert_array_almost_equal(output[[0,2]], [4.0, 2.5]) + assert_(np.isnan(output[1])) + finally: + np.seterr(**olderr) + + +def test_minimum01(): + labels = np.array([1, 0], bool) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.minimum(input, labels=labels) + assert_almost_equal(output, 1.0) + + +def test_minimum02(): + labels = np.array([1, 0], bool) + input = np.array([[2, 2], [2, 4]], bool) + output = ndimage.minimum(input, labels=labels) + assert_almost_equal(output, 1.0) + + +def test_minimum03(): + labels = np.array([1, 2]) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.minimum(input, labels=labels, + index=2) + assert_almost_equal(output, 2.0) + + +def test_minimum04(): + labels = np.array([[1, 2], [2, 3]]) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.minimum(input, labels=labels, + index=[2, 3, 8]) + assert_array_almost_equal(output, [2.0, 4.0, 0.0]) + + +def test_maximum01(): + labels = np.array([1, 0], bool) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.maximum(input, labels=labels) + assert_almost_equal(output, 3.0) + + +def test_maximum02(): + labels = np.array([1, 0], bool) + input = np.array([[2, 2], [2, 4]], bool) + output = ndimage.maximum(input, labels=labels) + assert_almost_equal(output, 1.0) + + +def test_maximum03(): + labels = np.array([1, 2]) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.maximum(input, labels=labels, + index=2) + assert_almost_equal(output, 4.0) + + +def test_maximum04(): + labels = np.array([[1, 2], [2, 3]]) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.maximum(input, labels=labels, + index=[2, 3, 8]) + assert_array_almost_equal(output, [3.0, 4.0, 0.0]) + + +def test_maximum05(): + # Regression test for ticket #501 (Trac) + x = np.array([-3,-2,-1]) + assert_equal(ndimage.maximum(x),-1) + + +def test_median01(): + a = np.array([[1, 2, 0, 1], + [5, 3, 0, 4], + [0, 0, 0, 7], + [9, 3, 0, 0]]) + labels = np.array([[1, 1, 0, 2], + [1, 1, 0, 2], + [0, 0, 0, 2], + [3, 3, 0, 0]]) + output = ndimage.median(a, labels=labels, index=[1, 2, 3]) + assert_array_almost_equal(output, [2.5, 4.0, 6.0]) + + +def test_median02(): + a = np.array([[1, 2, 0, 1], + [5, 3, 0, 4], + [0, 0, 0, 7], + [9, 3, 0, 0]]) + output = ndimage.median(a) + assert_almost_equal(output, 1.0) + + +def test_median03(): + a = np.array([[1, 2, 0, 1], + [5, 3, 0, 4], + [0, 0, 0, 7], + [9, 3, 0, 0]]) + labels = np.array([[1, 1, 0, 2], + [1, 1, 0, 2], + [0, 0, 0, 2], + [3, 3, 0, 0]]) + output = ndimage.median(a, labels=labels) + assert_almost_equal(output, 3.0) + + +def test_variance01(): + olderr = np.seterr(all='ignore') + try: + for type in types: + input = np.array([], type) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "Mean of empty slice") + output = ndimage.variance(input) + assert_(np.isnan(output)) + finally: + np.seterr(**olderr) + + +def test_variance02(): + for type in types: + input = np.array([1], type) + output = ndimage.variance(input) + assert_almost_equal(output, 0.0) + + +def test_variance03(): + for type in types: + input = np.array([1, 3], type) + output = ndimage.variance(input) + assert_almost_equal(output, 1.0) + + +def test_variance04(): + input = np.array([1, 0], bool) + output = ndimage.variance(input) + assert_almost_equal(output, 0.25) + + +def test_variance05(): + labels = [2, 2, 3] + for type in types: + input = np.array([1, 3, 8], type) + output = ndimage.variance(input, labels, 2) + assert_almost_equal(output, 1.0) + + +def test_variance06(): + labels = [2, 2, 3, 3, 4] + olderr = np.seterr(all='ignore') + try: + for type in types: + input = np.array([1, 3, 8, 10, 8], type) + output = ndimage.variance(input, labels, [2, 3, 4]) + assert_array_almost_equal(output, [1.0, 1.0, 0.0]) + finally: + np.seterr(**olderr) + + +def test_standard_deviation01(): + olderr = np.seterr(all='ignore') + try: + for type in types: + input = np.array([], type) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "Mean of empty slice") + output = ndimage.standard_deviation(input) + assert_(np.isnan(output)) + finally: + np.seterr(**olderr) + + +def test_standard_deviation02(): + for type in types: + input = np.array([1], type) + output = ndimage.standard_deviation(input) + assert_almost_equal(output, 0.0) + + +def test_standard_deviation03(): + for type in types: + input = np.array([1, 3], type) + output = ndimage.standard_deviation(input) + assert_almost_equal(output, np.sqrt(1.0)) + + +def test_standard_deviation04(): + input = np.array([1, 0], bool) + output = ndimage.standard_deviation(input) + assert_almost_equal(output, 0.5) + + +def test_standard_deviation05(): + labels = [2, 2, 3] + for type in types: + input = np.array([1, 3, 8], type) + output = ndimage.standard_deviation(input, labels, 2) + assert_almost_equal(output, 1.0) + + +def test_standard_deviation06(): + labels = [2, 2, 3, 3, 4] + olderr = np.seterr(all='ignore') + try: + for type in types: + input = np.array([1, 3, 8, 10, 8], type) + output = ndimage.standard_deviation(input, labels, [2, 3, 4]) + assert_array_almost_equal(output, [1.0, 1.0, 0.0]) + finally: + np.seterr(**olderr) + + +def test_standard_deviation07(): + labels = [1] + olderr = np.seterr(all='ignore') + try: + for type in types: + input = np.array([-0.00619519], type) + output = ndimage.standard_deviation(input, labels, [1]) + assert_array_almost_equal(output, [0]) + finally: + np.seterr(**olderr) + + +def test_minimum_position01(): + labels = np.array([1, 0], bool) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.minimum_position(input, labels=labels) + assert_equal(output, (0, 0)) + + +def test_minimum_position02(): + for type in types: + input = np.array([[5, 4, 2, 5], + [3, 7, 0, 2], + [1, 5, 1, 1]], type) + output = ndimage.minimum_position(input) + assert_equal(output, (1, 2)) + + +def test_minimum_position03(): + input = np.array([[5, 4, 2, 5], + [3, 7, 0, 2], + [1, 5, 1, 1]], bool) + output = ndimage.minimum_position(input) + assert_equal(output, (1, 2)) + + +def test_minimum_position04(): + input = np.array([[5, 4, 2, 5], + [3, 7, 1, 2], + [1, 5, 1, 1]], bool) + output = ndimage.minimum_position(input) + assert_equal(output, (0, 0)) + + +def test_minimum_position05(): + labels = [1, 2, 0, 4] + for type in types: + input = np.array([[5, 4, 2, 5], + [3, 7, 0, 2], + [1, 5, 2, 3]], type) + output = ndimage.minimum_position(input, labels) + assert_equal(output, (2, 0)) + + +def test_minimum_position06(): + labels = [1, 2, 3, 4] + for type in types: + input = np.array([[5, 4, 2, 5], + [3, 7, 0, 2], + [1, 5, 1, 1]], type) + output = ndimage.minimum_position(input, labels, 2) + assert_equal(output, (0, 1)) + + +def test_minimum_position07(): + labels = [1, 2, 3, 4] + for type in types: + input = np.array([[5, 4, 2, 5], + [3, 7, 0, 2], + [1, 5, 1, 1]], type) + output = ndimage.minimum_position(input, labels, + [2, 3]) + assert_equal(output[0], (0, 1)) + assert_equal(output[1], (1, 2)) + + +def test_maximum_position01(): + labels = np.array([1, 0], bool) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output = ndimage.maximum_position(input, + labels=labels) + assert_equal(output, (1, 0)) + + +def test_maximum_position02(): + for type in types: + input = np.array([[5, 4, 2, 5], + [3, 7, 8, 2], + [1, 5, 1, 1]], type) + output = ndimage.maximum_position(input) + assert_equal(output, (1, 2)) + + +def test_maximum_position03(): + input = np.array([[5, 4, 2, 5], + [3, 7, 8, 2], + [1, 5, 1, 1]], bool) + output = ndimage.maximum_position(input) + assert_equal(output, (0, 0)) + + +def test_maximum_position04(): + labels = [1, 2, 0, 4] + for type in types: + input = np.array([[5, 4, 2, 5], + [3, 7, 8, 2], + [1, 5, 1, 1]], type) + output = ndimage.maximum_position(input, labels) + assert_equal(output, (1, 1)) + + +def test_maximum_position05(): + labels = [1, 2, 0, 4] + for type in types: + input = np.array([[5, 4, 2, 5], + [3, 7, 8, 2], + [1, 5, 1, 1]], type) + output = ndimage.maximum_position(input, labels, 1) + assert_equal(output, (0, 0)) + + +def test_maximum_position06(): + labels = [1, 2, 0, 4] + for type in types: + input = np.array([[5, 4, 2, 5], + [3, 7, 8, 2], + [1, 5, 1, 1]], type) + output = ndimage.maximum_position(input, labels, + [1, 2]) + assert_equal(output[0], (0, 0)) + assert_equal(output[1], (1, 1)) + + +def test_maximum_position07(): + # Test float labels + labels = np.array([1.0, 2.5, 0.0, 4.5]) + for type in types: + input = np.array([[5, 4, 2, 5], + [3, 7, 8, 2], + [1, 5, 1, 1]], type) + output = ndimage.maximum_position(input, labels, + [1.0, 4.5]) + assert_equal(output[0], (0, 0)) + assert_equal(output[1], (0, 3)) + + +def test_extrema01(): + labels = np.array([1, 0], bool) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output1 = ndimage.extrema(input, labels=labels) + output2 = ndimage.minimum(input, labels=labels) + output3 = ndimage.maximum(input, labels=labels) + output4 = ndimage.minimum_position(input, + labels=labels) + output5 = ndimage.maximum_position(input, + labels=labels) + assert_equal(output1, (output2, output3, output4, output5)) + + +def test_extrema02(): + labels = np.array([1, 2]) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output1 = ndimage.extrema(input, labels=labels, + index=2) + output2 = ndimage.minimum(input, labels=labels, + index=2) + output3 = ndimage.maximum(input, labels=labels, + index=2) + output4 = ndimage.minimum_position(input, + labels=labels, index=2) + output5 = ndimage.maximum_position(input, + labels=labels, index=2) + assert_equal(output1, (output2, output3, output4, output5)) + + +def test_extrema03(): + labels = np.array([[1, 2], [2, 3]]) + for type in types: + input = np.array([[1, 2], [3, 4]], type) + output1 = ndimage.extrema(input, labels=labels, + index=[2, 3, 8]) + output2 = ndimage.minimum(input, labels=labels, + index=[2, 3, 8]) + output3 = ndimage.maximum(input, labels=labels, + index=[2, 3, 8]) + output4 = ndimage.minimum_position(input, + labels=labels, index=[2, 3, 8]) + output5 = ndimage.maximum_position(input, + labels=labels, index=[2, 3, 8]) + assert_array_almost_equal(output1[0], output2) + assert_array_almost_equal(output1[1], output3) + assert_array_almost_equal(output1[2], output4) + assert_array_almost_equal(output1[3], output5) + + +def test_extrema04(): + labels = [1, 2, 0, 4] + for type in types: + input = np.array([[5, 4, 2, 5], + [3, 7, 8, 2], + [1, 5, 1, 1]], type) + output1 = ndimage.extrema(input, labels, [1, 2]) + output2 = ndimage.minimum(input, labels, [1, 2]) + output3 = ndimage.maximum(input, labels, [1, 2]) + output4 = ndimage.minimum_position(input, labels, + [1, 2]) + output5 = ndimage.maximum_position(input, labels, + [1, 2]) + assert_array_almost_equal(output1[0], output2) + assert_array_almost_equal(output1[1], output3) + assert_array_almost_equal(output1[2], output4) + assert_array_almost_equal(output1[3], output5) + + +def test_center_of_mass01(): + expected = [0.0, 0.0] + for type in types: + input = np.array([[1, 0], [0, 0]], type) + output = ndimage.center_of_mass(input) + assert_array_almost_equal(output, expected) + + +def test_center_of_mass02(): + expected = [1, 0] + for type in types: + input = np.array([[0, 0], [1, 0]], type) + output = ndimage.center_of_mass(input) + assert_array_almost_equal(output, expected) + + +def test_center_of_mass03(): + expected = [0, 1] + for type in types: + input = np.array([[0, 1], [0, 0]], type) + output = ndimage.center_of_mass(input) + assert_array_almost_equal(output, expected) + + +def test_center_of_mass04(): + expected = [1, 1] + for type in types: + input = np.array([[0, 0], [0, 1]], type) + output = ndimage.center_of_mass(input) + assert_array_almost_equal(output, expected) + + +def test_center_of_mass05(): + expected = [0.5, 0.5] + for type in types: + input = np.array([[1, 1], [1, 1]], type) + output = ndimage.center_of_mass(input) + assert_array_almost_equal(output, expected) + + +def test_center_of_mass06(): + expected = [0.5, 0.5] + input = np.array([[1, 2], [3, 1]], bool) + output = ndimage.center_of_mass(input) + assert_array_almost_equal(output, expected) + + +def test_center_of_mass07(): + labels = [1, 0] + expected = [0.5, 0.0] + input = np.array([[1, 2], [3, 1]], bool) + output = ndimage.center_of_mass(input, labels) + assert_array_almost_equal(output, expected) + + +def test_center_of_mass08(): + labels = [1, 2] + expected = [0.5, 1.0] + input = np.array([[5, 2], [3, 1]], bool) + output = ndimage.center_of_mass(input, labels, 2) + assert_array_almost_equal(output, expected) + + +def test_center_of_mass09(): + labels = [1, 2] + expected = [(0.5, 0.0), (0.5, 1.0)] + input = np.array([[1, 2], [1, 1]], bool) + output = ndimage.center_of_mass(input, labels, [1, 2]) + assert_array_almost_equal(output, expected) + + +def test_histogram01(): + expected = np.ones(10) + input = np.arange(10) + output = ndimage.histogram(input, 0, 10, 10) + assert_array_almost_equal(output, expected) + + +def test_histogram02(): + labels = [1, 1, 1, 1, 2, 2, 2, 2] + expected = [0, 2, 0, 1, 1] + input = np.array([1, 1, 3, 4, 3, 3, 3, 3]) + output = ndimage.histogram(input, 0, 4, 5, labels, 1) + assert_array_almost_equal(output, expected) + + +def test_histogram03(): + labels = [1, 0, 1, 1, 2, 2, 2, 2] + expected1 = [0, 1, 0, 1, 1] + expected2 = [0, 0, 0, 3, 0] + input = np.array([1, 1, 3, 4, 3, 5, 3, 3]) + output = ndimage.histogram(input, 0, 4, 5, labels, (1,2)) + + assert_array_almost_equal(output[0], expected1) + assert_array_almost_equal(output[1], expected2) + + +def test_stat_funcs_2d(): + a = np.array([[5,6,0,0,0], [8,9,0,0,0], [0,0,0,3,5]]) + lbl = np.array([[1,1,0,0,0], [1,1,0,0,0], [0,0,0,2,2]]) + + mean = ndimage.mean(a, labels=lbl, index=[1, 2]) + assert_array_equal(mean, [7.0, 4.0]) + + var = ndimage.variance(a, labels=lbl, index=[1, 2]) + assert_array_equal(var, [2.5, 1.0]) + + std = ndimage.standard_deviation(a, labels=lbl, index=[1, 2]) + assert_array_almost_equal(std, np.sqrt([2.5, 1.0])) + + med = ndimage.median(a, labels=lbl, index=[1, 2]) + assert_array_equal(med, [7.0, 4.0]) + + min = ndimage.minimum(a, labels=lbl, index=[1, 2]) + assert_array_equal(min, [5, 3]) + + max = ndimage.maximum(a, labels=lbl, index=[1, 2]) + assert_array_equal(max, [9, 5]) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_measurements.pyc b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_measurements.pyc new file mode 100644 index 0000000..77baaa0 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_measurements.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_ndimage.py b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_ndimage.py new file mode 100644 index 0000000..b5639a5 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_ndimage.py @@ -0,0 +1,4624 @@ +# Copyright (C) 2003-2005 Peter J. Verveer +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# +# 3. The name of the author may not be used to endorse or promote +# products derived from this software without specific prior +# written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS +# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from __future__ import division, print_function, absolute_import + +import math +import sys + +import numpy +from numpy import fft +from numpy.testing import (assert_, assert_equal, assert_array_equal, + assert_array_almost_equal, assert_almost_equal) +import pytest +from pytest import raises as assert_raises +from scipy._lib._numpy_compat import suppress_warnings +import scipy.ndimage as ndimage + + +eps = 1e-12 + + +def sumsq(a, b): + return math.sqrt(((a - b)**2).sum()) + + +class TestNdimage: + def setup_method(self): + # list of numarray data types + self.integer_types = [ + numpy.int8, numpy.uint8, numpy.int16, numpy.uint16, + numpy.int32, numpy.uint32, numpy.int64, numpy.uint64] + + self.float_types = [numpy.float32, numpy.float64] + + self.types = self.integer_types + self.float_types + + # list of boundary modes: + self.modes = ['nearest', 'wrap', 'reflect', 'mirror', 'constant'] + + def test_correlate01(self): + array = numpy.array([1, 2]) + weights = numpy.array([2]) + expected = [2, 4] + + output = ndimage.correlate(array, weights) + assert_array_almost_equal(output, expected) + + output = ndimage.convolve(array, weights) + assert_array_almost_equal(output, expected) + + output = ndimage.correlate1d(array, weights) + assert_array_almost_equal(output, expected) + + output = ndimage.convolve1d(array, weights) + assert_array_almost_equal(output, expected) + + def test_correlate02(self): + array = numpy.array([1, 2, 3]) + kernel = numpy.array([1]) + + output = ndimage.correlate(array, kernel) + assert_array_almost_equal(array, output) + + output = ndimage.convolve(array, kernel) + assert_array_almost_equal(array, output) + + output = ndimage.correlate1d(array, kernel) + assert_array_almost_equal(array, output) + + output = ndimage.convolve1d(array, kernel) + assert_array_almost_equal(array, output) + + def test_correlate03(self): + array = numpy.array([1]) + weights = numpy.array([1, 1]) + expected = [2] + + output = ndimage.correlate(array, weights) + assert_array_almost_equal(output, expected) + + output = ndimage.convolve(array, weights) + assert_array_almost_equal(output, expected) + + output = ndimage.correlate1d(array, weights) + assert_array_almost_equal(output, expected) + + output = ndimage.convolve1d(array, weights) + assert_array_almost_equal(output, expected) + + def test_correlate04(self): + array = numpy.array([1, 2]) + tcor = [2, 3] + tcov = [3, 4] + weights = numpy.array([1, 1]) + output = ndimage.correlate(array, weights) + assert_array_almost_equal(output, tcor) + output = ndimage.convolve(array, weights) + assert_array_almost_equal(output, tcov) + output = ndimage.correlate1d(array, weights) + assert_array_almost_equal(output, tcor) + output = ndimage.convolve1d(array, weights) + assert_array_almost_equal(output, tcov) + + def test_correlate05(self): + array = numpy.array([1, 2, 3]) + tcor = [2, 3, 5] + tcov = [3, 5, 6] + kernel = numpy.array([1, 1]) + output = ndimage.correlate(array, kernel) + assert_array_almost_equal(tcor, output) + output = ndimage.convolve(array, kernel) + assert_array_almost_equal(tcov, output) + output = ndimage.correlate1d(array, kernel) + assert_array_almost_equal(tcor, output) + output = ndimage.convolve1d(array, kernel) + assert_array_almost_equal(tcov, output) + + def test_correlate06(self): + array = numpy.array([1, 2, 3]) + tcor = [9, 14, 17] + tcov = [7, 10, 15] + weights = numpy.array([1, 2, 3]) + output = ndimage.correlate(array, weights) + assert_array_almost_equal(output, tcor) + output = ndimage.convolve(array, weights) + assert_array_almost_equal(output, tcov) + output = ndimage.correlate1d(array, weights) + assert_array_almost_equal(output, tcor) + output = ndimage.convolve1d(array, weights) + assert_array_almost_equal(output, tcov) + + def test_correlate07(self): + array = numpy.array([1, 2, 3]) + expected = [5, 8, 11] + weights = numpy.array([1, 2, 1]) + output = ndimage.correlate(array, weights) + assert_array_almost_equal(output, expected) + output = ndimage.convolve(array, weights) + assert_array_almost_equal(output, expected) + output = ndimage.correlate1d(array, weights) + assert_array_almost_equal(output, expected) + output = ndimage.convolve1d(array, weights) + assert_array_almost_equal(output, expected) + + def test_correlate08(self): + array = numpy.array([1, 2, 3]) + tcor = [1, 2, 5] + tcov = [3, 6, 7] + weights = numpy.array([1, 2, -1]) + output = ndimage.correlate(array, weights) + assert_array_almost_equal(output, tcor) + output = ndimage.convolve(array, weights) + assert_array_almost_equal(output, tcov) + output = ndimage.correlate1d(array, weights) + assert_array_almost_equal(output, tcor) + output = ndimage.convolve1d(array, weights) + assert_array_almost_equal(output, tcov) + + def test_correlate09(self): + array = [] + kernel = numpy.array([1, 1]) + output = ndimage.correlate(array, kernel) + assert_array_almost_equal(array, output) + output = ndimage.convolve(array, kernel) + assert_array_almost_equal(array, output) + output = ndimage.correlate1d(array, kernel) + assert_array_almost_equal(array, output) + output = ndimage.convolve1d(array, kernel) + assert_array_almost_equal(array, output) + + def test_correlate10(self): + array = [[]] + kernel = numpy.array([[1, 1]]) + output = ndimage.correlate(array, kernel) + assert_array_almost_equal(array, output) + output = ndimage.convolve(array, kernel) + assert_array_almost_equal(array, output) + + def test_correlate11(self): + array = numpy.array([[1, 2, 3], + [4, 5, 6]]) + kernel = numpy.array([[1, 1], + [1, 1]]) + output = ndimage.correlate(array, kernel) + assert_array_almost_equal([[4, 6, 10], [10, 12, 16]], output) + output = ndimage.convolve(array, kernel) + assert_array_almost_equal([[12, 16, 18], [18, 22, 24]], output) + + def test_correlate12(self): + array = numpy.array([[1, 2, 3], + [4, 5, 6]]) + kernel = numpy.array([[1, 0], + [0, 1]]) + output = ndimage.correlate(array, kernel) + assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output) + output = ndimage.convolve(array, kernel) + assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output) + + def test_correlate13(self): + kernel = numpy.array([[1, 0], + [0, 1]]) + for type1 in self.types: + array = numpy.array([[1, 2, 3], + [4, 5, 6]], type1) + for type2 in self.types: + output = ndimage.correlate(array, kernel, output=type2) + assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output) + assert_equal(output.dtype.type, type2) + + output = ndimage.convolve(array, kernel, + output=type2) + assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output) + assert_equal(output.dtype.type, type2) + + def test_correlate14(self): + kernel = numpy.array([[1, 0], + [0, 1]]) + for type1 in self.types: + array = numpy.array([[1, 2, 3], + [4, 5, 6]], type1) + for type2 in self.types: + output = numpy.zeros(array.shape, type2) + ndimage.correlate(array, kernel, + output=output) + assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output) + assert_equal(output.dtype.type, type2) + + ndimage.convolve(array, kernel, output=output) + assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output) + assert_equal(output.dtype.type, type2) + + def test_correlate15(self): + kernel = numpy.array([[1, 0], + [0, 1]]) + for type1 in self.types: + array = numpy.array([[1, 2, 3], + [4, 5, 6]], type1) + output = ndimage.correlate(array, kernel, + output=numpy.float32) + assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output) + assert_equal(output.dtype.type, numpy.float32) + + output = ndimage.convolve(array, kernel, + output=numpy.float32) + assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output) + assert_equal(output.dtype.type, numpy.float32) + + def test_correlate16(self): + kernel = numpy.array([[0.5, 0], + [0, 0.5]]) + for type1 in self.types: + array = numpy.array([[1, 2, 3], [4, 5, 6]], type1) + output = ndimage.correlate(array, kernel, output=numpy.float32) + assert_array_almost_equal([[1, 1.5, 2.5], [2.5, 3, 4]], output) + assert_equal(output.dtype.type, numpy.float32) + + output = ndimage.convolve(array, kernel, output=numpy.float32) + assert_array_almost_equal([[3, 4, 4.5], [4.5, 5.5, 6]], output) + assert_equal(output.dtype.type, numpy.float32) + + def test_correlate17(self): + array = numpy.array([1, 2, 3]) + tcor = [3, 5, 6] + tcov = [2, 3, 5] + kernel = numpy.array([1, 1]) + output = ndimage.correlate(array, kernel, origin=-1) + assert_array_almost_equal(tcor, output) + output = ndimage.convolve(array, kernel, origin=-1) + assert_array_almost_equal(tcov, output) + output = ndimage.correlate1d(array, kernel, origin=-1) + assert_array_almost_equal(tcor, output) + output = ndimage.convolve1d(array, kernel, origin=-1) + assert_array_almost_equal(tcov, output) + + def test_correlate18(self): + kernel = numpy.array([[1, 0], + [0, 1]]) + for type1 in self.types: + array = numpy.array([[1, 2, 3], + [4, 5, 6]], type1) + output = ndimage.correlate(array, kernel, + output=numpy.float32, + mode='nearest', origin=-1) + assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output) + assert_equal(output.dtype.type, numpy.float32) + + output = ndimage.convolve(array, kernel, + output=numpy.float32, + mode='nearest', origin=-1) + assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output) + assert_equal(output.dtype.type, numpy.float32) + + def test_correlate19(self): + kernel = numpy.array([[1, 0], + [0, 1]]) + for type1 in self.types: + array = numpy.array([[1, 2, 3], + [4, 5, 6]], type1) + output = ndimage.correlate(array, kernel, + output=numpy.float32, + mode='nearest', origin=[-1, 0]) + assert_array_almost_equal([[5, 6, 8], [8, 9, 11]], output) + assert_equal(output.dtype.type, numpy.float32) + + output = ndimage.convolve(array, kernel, + output=numpy.float32, + mode='nearest', origin=[-1, 0]) + assert_array_almost_equal([[3, 5, 6], [6, 8, 9]], output) + assert_equal(output.dtype.type, numpy.float32) + + def test_correlate20(self): + weights = numpy.array([1, 2, 1]) + expected = [[5, 10, 15], [7, 14, 21]] + for type1 in self.types: + array = numpy.array([[1, 2, 3], + [2, 4, 6]], type1) + for type2 in self.types: + output = numpy.zeros((2, 3), type2) + ndimage.correlate1d(array, weights, axis=0, + output=output) + assert_array_almost_equal(output, expected) + ndimage.convolve1d(array, weights, axis=0, + output=output) + assert_array_almost_equal(output, expected) + + def test_correlate21(self): + array = numpy.array([[1, 2, 3], + [2, 4, 6]]) + expected = [[5, 10, 15], [7, 14, 21]] + weights = numpy.array([1, 2, 1]) + output = ndimage.correlate1d(array, weights, axis=0) + assert_array_almost_equal(output, expected) + output = ndimage.convolve1d(array, weights, axis=0) + assert_array_almost_equal(output, expected) + + def test_correlate22(self): + weights = numpy.array([1, 2, 1]) + expected = [[6, 12, 18], [6, 12, 18]] + for type1 in self.types: + array = numpy.array([[1, 2, 3], + [2, 4, 6]], type1) + for type2 in self.types: + output = numpy.zeros((2, 3), type2) + ndimage.correlate1d(array, weights, axis=0, + mode='wrap', output=output) + assert_array_almost_equal(output, expected) + ndimage.convolve1d(array, weights, axis=0, + mode='wrap', output=output) + assert_array_almost_equal(output, expected) + + def test_correlate23(self): + weights = numpy.array([1, 2, 1]) + expected = [[5, 10, 15], [7, 14, 21]] + for type1 in self.types: + array = numpy.array([[1, 2, 3], + [2, 4, 6]], type1) + for type2 in self.types: + output = numpy.zeros((2, 3), type2) + ndimage.correlate1d(array, weights, axis=0, + mode='nearest', output=output) + assert_array_almost_equal(output, expected) + ndimage.convolve1d(array, weights, axis=0, + mode='nearest', output=output) + assert_array_almost_equal(output, expected) + + def test_correlate24(self): + weights = numpy.array([1, 2, 1]) + tcor = [[7, 14, 21], [8, 16, 24]] + tcov = [[4, 8, 12], [5, 10, 15]] + for type1 in self.types: + array = numpy.array([[1, 2, 3], + [2, 4, 6]], type1) + for type2 in self.types: + output = numpy.zeros((2, 3), type2) + ndimage.correlate1d(array, weights, axis=0, + mode='nearest', output=output, origin=-1) + assert_array_almost_equal(output, tcor) + ndimage.convolve1d(array, weights, axis=0, + mode='nearest', output=output, origin=-1) + assert_array_almost_equal(output, tcov) + + def test_correlate25(self): + weights = numpy.array([1, 2, 1]) + tcor = [[4, 8, 12], [5, 10, 15]] + tcov = [[7, 14, 21], [8, 16, 24]] + for type1 in self.types: + array = numpy.array([[1, 2, 3], + [2, 4, 6]], type1) + for type2 in self.types: + output = numpy.zeros((2, 3), type2) + ndimage.correlate1d(array, weights, axis=0, + mode='nearest', output=output, origin=1) + assert_array_almost_equal(output, tcor) + ndimage.convolve1d(array, weights, axis=0, + mode='nearest', output=output, origin=1) + assert_array_almost_equal(output, tcov) + + def test_gauss01(self): + input = numpy.array([[1, 2, 3], + [2, 4, 6]], numpy.float32) + output = ndimage.gaussian_filter(input, 0) + assert_array_almost_equal(output, input) + + def test_gauss02(self): + input = numpy.array([[1, 2, 3], + [2, 4, 6]], numpy.float32) + output = ndimage.gaussian_filter(input, 1.0) + assert_equal(input.dtype, output.dtype) + assert_equal(input.shape, output.shape) + + def test_gauss03(self): + # single precision data" + input = numpy.arange(100 * 100).astype(numpy.float32) + input.shape = (100, 100) + output = ndimage.gaussian_filter(input, [1.0, 1.0]) + + assert_equal(input.dtype, output.dtype) + assert_equal(input.shape, output.shape) + + # input.sum() is 49995000.0. With single precision floats, we can't + # expect more than 8 digits of accuracy, so use decimal=0 in this test. + assert_almost_equal(output.sum(dtype='d'), input.sum(dtype='d'), + decimal=0) + assert_(sumsq(input, output) > 1.0) + + def test_gauss04(self): + input = numpy.arange(100 * 100).astype(numpy.float32) + input.shape = (100, 100) + otype = numpy.float64 + output = ndimage.gaussian_filter(input, [1.0, 1.0], output=otype) + assert_equal(output.dtype.type, numpy.float64) + assert_equal(input.shape, output.shape) + assert_(sumsq(input, output) > 1.0) + + def test_gauss05(self): + input = numpy.arange(100 * 100).astype(numpy.float32) + input.shape = (100, 100) + otype = numpy.float64 + output = ndimage.gaussian_filter(input, [1.0, 1.0], + order=1, output=otype) + assert_equal(output.dtype.type, numpy.float64) + assert_equal(input.shape, output.shape) + assert_(sumsq(input, output) > 1.0) + + def test_gauss06(self): + input = numpy.arange(100 * 100).astype(numpy.float32) + input.shape = (100, 100) + otype = numpy.float64 + output1 = ndimage.gaussian_filter(input, [1.0, 1.0], output=otype) + output2 = ndimage.gaussian_filter(input, 1.0, output=otype) + assert_array_almost_equal(output1, output2) + + def test_prewitt01(self): + for type_ in self.types: + array = numpy.array([[3, 2, 5, 1, 4], + [5, 8, 3, 7, 1], + [5, 6, 9, 3, 5]], type_) + t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0) + t = ndimage.correlate1d(t, [1.0, 1.0, 1.0], 1) + output = ndimage.prewitt(array, 0) + assert_array_almost_equal(t, output) + + def test_prewitt02(self): + for type_ in self.types: + array = numpy.array([[3, 2, 5, 1, 4], + [5, 8, 3, 7, 1], + [5, 6, 9, 3, 5]], type_) + t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0) + t = ndimage.correlate1d(t, [1.0, 1.0, 1.0], 1) + output = numpy.zeros(array.shape, type_) + ndimage.prewitt(array, 0, output) + assert_array_almost_equal(t, output) + + def test_prewitt03(self): + for type_ in self.types: + array = numpy.array([[3, 2, 5, 1, 4], + [5, 8, 3, 7, 1], + [5, 6, 9, 3, 5]], type_) + t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 1) + t = ndimage.correlate1d(t, [1.0, 1.0, 1.0], 0) + output = ndimage.prewitt(array, 1) + assert_array_almost_equal(t, output) + + def test_prewitt04(self): + for type_ in self.types: + array = numpy.array([[3, 2, 5, 1, 4], + [5, 8, 3, 7, 1], + [5, 6, 9, 3, 5]], type_) + t = ndimage.prewitt(array, -1) + output = ndimage.prewitt(array, 1) + assert_array_almost_equal(t, output) + + def test_sobel01(self): + for type_ in self.types: + array = numpy.array([[3, 2, 5, 1, 4], + [5, 8, 3, 7, 1], + [5, 6, 9, 3, 5]], type_) + t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0) + t = ndimage.correlate1d(t, [1.0, 2.0, 1.0], 1) + output = ndimage.sobel(array, 0) + assert_array_almost_equal(t, output) + + def test_sobel02(self): + for type_ in self.types: + array = numpy.array([[3, 2, 5, 1, 4], + [5, 8, 3, 7, 1], + [5, 6, 9, 3, 5]], type_) + t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0) + t = ndimage.correlate1d(t, [1.0, 2.0, 1.0], 1) + output = numpy.zeros(array.shape, type_) + ndimage.sobel(array, 0, output) + assert_array_almost_equal(t, output) + + def test_sobel03(self): + for type_ in self.types: + array = numpy.array([[3, 2, 5, 1, 4], + [5, 8, 3, 7, 1], + [5, 6, 9, 3, 5]], type_) + t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 1) + t = ndimage.correlate1d(t, [1.0, 2.0, 1.0], 0) + output = numpy.zeros(array.shape, type_) + output = ndimage.sobel(array, 1) + assert_array_almost_equal(t, output) + + def test_sobel04(self): + for type_ in self.types: + array = numpy.array([[3, 2, 5, 1, 4], + [5, 8, 3, 7, 1], + [5, 6, 9, 3, 5]], type_) + t = ndimage.sobel(array, -1) + output = ndimage.sobel(array, 1) + assert_array_almost_equal(t, output) + + def test_laplace01(self): + for type_ in [numpy.int32, numpy.float32, numpy.float64]: + array = numpy.array([[3, 2, 5, 1, 4], + [5, 8, 3, 7, 1], + [5, 6, 9, 3, 5]], type_) * 100 + tmp1 = ndimage.correlate1d(array, [1, -2, 1], 0) + tmp2 = ndimage.correlate1d(array, [1, -2, 1], 1) + output = ndimage.laplace(array) + assert_array_almost_equal(tmp1 + tmp2, output) + + def test_laplace02(self): + for type_ in [numpy.int32, numpy.float32, numpy.float64]: + array = numpy.array([[3, 2, 5, 1, 4], + [5, 8, 3, 7, 1], + [5, 6, 9, 3, 5]], type_) * 100 + tmp1 = ndimage.correlate1d(array, [1, -2, 1], 0) + tmp2 = ndimage.correlate1d(array, [1, -2, 1], 1) + output = numpy.zeros(array.shape, type_) + ndimage.laplace(array, output=output) + assert_array_almost_equal(tmp1 + tmp2, output) + + def test_gaussian_laplace01(self): + for type_ in [numpy.int32, numpy.float32, numpy.float64]: + array = numpy.array([[3, 2, 5, 1, 4], + [5, 8, 3, 7, 1], + [5, 6, 9, 3, 5]], type_) * 100 + tmp1 = ndimage.gaussian_filter(array, 1.0, [2, 0]) + tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 2]) + output = ndimage.gaussian_laplace(array, 1.0) + assert_array_almost_equal(tmp1 + tmp2, output) + + def test_gaussian_laplace02(self): + for type_ in [numpy.int32, numpy.float32, numpy.float64]: + array = numpy.array([[3, 2, 5, 1, 4], + [5, 8, 3, 7, 1], + [5, 6, 9, 3, 5]], type_) * 100 + tmp1 = ndimage.gaussian_filter(array, 1.0, [2, 0]) + tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 2]) + output = numpy.zeros(array.shape, type_) + ndimage.gaussian_laplace(array, 1.0, output) + assert_array_almost_equal(tmp1 + tmp2, output) + + def test_generic_laplace01(self): + def derivative2(input, axis, output, mode, cval, a, b): + sigma = [a, b / 2.0] + input = numpy.asarray(input) + order = [0] * input.ndim + order[axis] = 2 + return ndimage.gaussian_filter(input, sigma, order, + output, mode, cval) + for type_ in self.types: + array = numpy.array([[3, 2, 5, 1, 4], + [5, 8, 3, 7, 1], + [5, 6, 9, 3, 5]], type_) + output = numpy.zeros(array.shape, type_) + tmp = ndimage.generic_laplace(array, derivative2, + extra_arguments=(1.0,), + extra_keywords={'b': 2.0}) + ndimage.gaussian_laplace(array, 1.0, output) + assert_array_almost_equal(tmp, output) + + def test_gaussian_gradient_magnitude01(self): + for type_ in [numpy.int32, numpy.float32, numpy.float64]: + array = numpy.array([[3, 2, 5, 1, 4], + [5, 8, 3, 7, 1], + [5, 6, 9, 3, 5]], type_) * 100 + tmp1 = ndimage.gaussian_filter(array, 1.0, [1, 0]) + tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 1]) + output = ndimage.gaussian_gradient_magnitude(array, 1.0) + expected = tmp1 * tmp1 + tmp2 * tmp2 + expected = numpy.sqrt(expected).astype(type_) + assert_array_almost_equal(expected, output) + + def test_gaussian_gradient_magnitude02(self): + for type_ in [numpy.int32, numpy.float32, numpy.float64]: + array = numpy.array([[3, 2, 5, 1, 4], + [5, 8, 3, 7, 1], + [5, 6, 9, 3, 5]], type_) * 100 + tmp1 = ndimage.gaussian_filter(array, 1.0, [1, 0]) + tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 1]) + output = numpy.zeros(array.shape, type_) + ndimage.gaussian_gradient_magnitude(array, 1.0, output) + expected = tmp1 * tmp1 + tmp2 * tmp2 + expected = numpy.sqrt(expected).astype(type_) + assert_array_almost_equal(expected, output) + + def test_generic_gradient_magnitude01(self): + array = numpy.array([[3, 2, 5, 1, 4], + [5, 8, 3, 7, 1], + [5, 6, 9, 3, 5]], numpy.float64) + + def derivative(input, axis, output, mode, cval, a, b): + sigma = [a, b / 2.0] + input = numpy.asarray(input) + order = [0] * input.ndim + order[axis] = 1 + return ndimage.gaussian_filter(input, sigma, order, + output, mode, cval) + tmp1 = ndimage.gaussian_gradient_magnitude(array, 1.0) + tmp2 = ndimage.generic_gradient_magnitude( + array, derivative, extra_arguments=(1.0,), + extra_keywords={'b': 2.0}) + assert_array_almost_equal(tmp1, tmp2) + + def test_uniform01(self): + array = numpy.array([2, 4, 6]) + size = 2 + output = ndimage.uniform_filter1d(array, size, origin=-1) + assert_array_almost_equal([3, 5, 6], output) + + def test_uniform02(self): + array = numpy.array([1, 2, 3]) + filter_shape = [0] + output = ndimage.uniform_filter(array, filter_shape) + assert_array_almost_equal(array, output) + + def test_uniform03(self): + array = numpy.array([1, 2, 3]) + filter_shape = [1] + output = ndimage.uniform_filter(array, filter_shape) + assert_array_almost_equal(array, output) + + def test_uniform04(self): + array = numpy.array([2, 4, 6]) + filter_shape = [2] + output = ndimage.uniform_filter(array, filter_shape) + assert_array_almost_equal([2, 3, 5], output) + + def test_uniform05(self): + array = [] + filter_shape = [1] + output = ndimage.uniform_filter(array, filter_shape) + assert_array_almost_equal([], output) + + def test_uniform06(self): + filter_shape = [2, 2] + for type1 in self.types: + array = numpy.array([[4, 8, 12], + [16, 20, 24]], type1) + for type2 in self.types: + output = ndimage.uniform_filter( + array, filter_shape, output=type2) + assert_array_almost_equal([[4, 6, 10], [10, 12, 16]], output) + assert_equal(output.dtype.type, type2) + + def test_minimum_filter01(self): + array = numpy.array([1, 2, 3, 4, 5]) + filter_shape = numpy.array([2]) + output = ndimage.minimum_filter(array, filter_shape) + assert_array_almost_equal([1, 1, 2, 3, 4], output) + + def test_minimum_filter02(self): + array = numpy.array([1, 2, 3, 4, 5]) + filter_shape = numpy.array([3]) + output = ndimage.minimum_filter(array, filter_shape) + assert_array_almost_equal([1, 1, 2, 3, 4], output) + + def test_minimum_filter03(self): + array = numpy.array([3, 2, 5, 1, 4]) + filter_shape = numpy.array([2]) + output = ndimage.minimum_filter(array, filter_shape) + assert_array_almost_equal([3, 2, 2, 1, 1], output) + + def test_minimum_filter04(self): + array = numpy.array([3, 2, 5, 1, 4]) + filter_shape = numpy.array([3]) + output = ndimage.minimum_filter(array, filter_shape) + assert_array_almost_equal([2, 2, 1, 1, 1], output) + + def test_minimum_filter05(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + filter_shape = numpy.array([2, 3]) + output = ndimage.minimum_filter(array, filter_shape) + assert_array_almost_equal([[2, 2, 1, 1, 1], + [2, 2, 1, 1, 1], + [5, 3, 3, 1, 1]], output) + + def test_minimum_filter06(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[1, 1, 1], [1, 1, 1]] + output = ndimage.minimum_filter(array, footprint=footprint) + assert_array_almost_equal([[2, 2, 1, 1, 1], + [2, 2, 1, 1, 1], + [5, 3, 3, 1, 1]], output) + + def test_minimum_filter07(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[1, 0, 1], [1, 1, 0]] + output = ndimage.minimum_filter(array, footprint=footprint) + assert_array_almost_equal([[2, 2, 1, 1, 1], + [2, 3, 1, 3, 1], + [5, 5, 3, 3, 1]], output) + + def test_minimum_filter08(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[1, 0, 1], [1, 1, 0]] + output = ndimage.minimum_filter(array, footprint=footprint, origin=-1) + assert_array_almost_equal([[3, 1, 3, 1, 1], + [5, 3, 3, 1, 1], + [3, 3, 1, 1, 1]], output) + + def test_minimum_filter09(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[1, 0, 1], [1, 1, 0]] + output = ndimage.minimum_filter(array, footprint=footprint, + origin=[-1, 0]) + assert_array_almost_equal([[2, 3, 1, 3, 1], + [5, 5, 3, 3, 1], + [5, 3, 3, 1, 1]], output) + + def test_maximum_filter01(self): + array = numpy.array([1, 2, 3, 4, 5]) + filter_shape = numpy.array([2]) + output = ndimage.maximum_filter(array, filter_shape) + assert_array_almost_equal([1, 2, 3, 4, 5], output) + + def test_maximum_filter02(self): + array = numpy.array([1, 2, 3, 4, 5]) + filter_shape = numpy.array([3]) + output = ndimage.maximum_filter(array, filter_shape) + assert_array_almost_equal([2, 3, 4, 5, 5], output) + + def test_maximum_filter03(self): + array = numpy.array([3, 2, 5, 1, 4]) + filter_shape = numpy.array([2]) + output = ndimage.maximum_filter(array, filter_shape) + assert_array_almost_equal([3, 3, 5, 5, 4], output) + + def test_maximum_filter04(self): + array = numpy.array([3, 2, 5, 1, 4]) + filter_shape = numpy.array([3]) + output = ndimage.maximum_filter(array, filter_shape) + assert_array_almost_equal([3, 5, 5, 5, 4], output) + + def test_maximum_filter05(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + filter_shape = numpy.array([2, 3]) + output = ndimage.maximum_filter(array, filter_shape) + assert_array_almost_equal([[3, 5, 5, 5, 4], + [7, 9, 9, 9, 5], + [8, 9, 9, 9, 7]], output) + + def test_maximum_filter06(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[1, 1, 1], [1, 1, 1]] + output = ndimage.maximum_filter(array, footprint=footprint) + assert_array_almost_equal([[3, 5, 5, 5, 4], + [7, 9, 9, 9, 5], + [8, 9, 9, 9, 7]], output) + + def test_maximum_filter07(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[1, 0, 1], [1, 1, 0]] + output = ndimage.maximum_filter(array, footprint=footprint) + assert_array_almost_equal([[3, 5, 5, 5, 4], + [7, 7, 9, 9, 5], + [7, 9, 8, 9, 7]], output) + + def test_maximum_filter08(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[1, 0, 1], [1, 1, 0]] + output = ndimage.maximum_filter(array, footprint=footprint, origin=-1) + assert_array_almost_equal([[7, 9, 9, 5, 5], + [9, 8, 9, 7, 5], + [8, 8, 7, 7, 7]], output) + + def test_maximum_filter09(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[1, 0, 1], [1, 1, 0]] + output = ndimage.maximum_filter(array, footprint=footprint, + origin=[-1, 0]) + assert_array_almost_equal([[7, 7, 9, 9, 5], + [7, 9, 8, 9, 7], + [8, 8, 8, 7, 7]], output) + + def test_rank01(self): + array = numpy.array([1, 2, 3, 4, 5]) + output = ndimage.rank_filter(array, 1, size=2) + assert_array_almost_equal(array, output) + output = ndimage.percentile_filter(array, 100, size=2) + assert_array_almost_equal(array, output) + output = ndimage.median_filter(array, 2) + assert_array_almost_equal(array, output) + + def test_rank02(self): + array = numpy.array([1, 2, 3, 4, 5]) + output = ndimage.rank_filter(array, 1, size=[3]) + assert_array_almost_equal(array, output) + output = ndimage.percentile_filter(array, 50, size=3) + assert_array_almost_equal(array, output) + output = ndimage.median_filter(array, (3,)) + assert_array_almost_equal(array, output) + + def test_rank03(self): + array = numpy.array([3, 2, 5, 1, 4]) + output = ndimage.rank_filter(array, 1, size=[2]) + assert_array_almost_equal([3, 3, 5, 5, 4], output) + output = ndimage.percentile_filter(array, 100, size=2) + assert_array_almost_equal([3, 3, 5, 5, 4], output) + + def test_rank04(self): + array = numpy.array([3, 2, 5, 1, 4]) + expected = [3, 3, 2, 4, 4] + output = ndimage.rank_filter(array, 1, size=3) + assert_array_almost_equal(expected, output) + output = ndimage.percentile_filter(array, 50, size=3) + assert_array_almost_equal(expected, output) + output = ndimage.median_filter(array, size=3) + assert_array_almost_equal(expected, output) + + def test_rank05(self): + array = numpy.array([3, 2, 5, 1, 4]) + expected = [3, 3, 2, 4, 4] + output = ndimage.rank_filter(array, -2, size=3) + assert_array_almost_equal(expected, output) + + def test_rank06(self): + array = numpy.array([[3, 2, 5, 1, 4], + [5, 8, 3, 7, 1], + [5, 6, 9, 3, 5]]) + expected = [[2, 2, 1, 1, 1], + [3, 3, 2, 1, 1], + [5, 5, 3, 3, 1]] + output = ndimage.rank_filter(array, 1, size=[2, 3]) + assert_array_almost_equal(expected, output) + output = ndimage.percentile_filter(array, 17, size=(2, 3)) + assert_array_almost_equal(expected, output) + + def test_rank07(self): + array = numpy.array([[3, 2, 5, 1, 4], + [5, 8, 3, 7, 1], + [5, 6, 9, 3, 5]]) + expected = [[3, 5, 5, 5, 4], + [5, 5, 7, 5, 4], + [6, 8, 8, 7, 5]] + output = ndimage.rank_filter(array, -2, size=[2, 3]) + assert_array_almost_equal(expected, output) + + def test_rank08(self): + array = numpy.array([[3, 2, 5, 1, 4], + [5, 8, 3, 7, 1], + [5, 6, 9, 3, 5]]) + expected = [[3, 3, 2, 4, 4], + [5, 5, 5, 4, 4], + [5, 6, 7, 5, 5]] + output = ndimage.percentile_filter(array, 50.0, size=(2, 3)) + assert_array_almost_equal(expected, output) + output = ndimage.rank_filter(array, 3, size=(2, 3)) + assert_array_almost_equal(expected, output) + output = ndimage.median_filter(array, size=(2, 3)) + assert_array_almost_equal(expected, output) + + def test_rank09(self): + expected = [[3, 3, 2, 4, 4], + [3, 5, 2, 5, 1], + [5, 5, 8, 3, 5]] + footprint = [[1, 0, 1], [0, 1, 0]] + for type_ in self.types: + array = numpy.array([[3, 2, 5, 1, 4], + [5, 8, 3, 7, 1], + [5, 6, 9, 3, 5]], type_) + output = ndimage.rank_filter(array, 1, footprint=footprint) + assert_array_almost_equal(expected, output) + output = ndimage.percentile_filter(array, 35, footprint=footprint) + assert_array_almost_equal(expected, output) + + def test_rank10(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + expected = [[2, 2, 1, 1, 1], + [2, 3, 1, 3, 1], + [5, 5, 3, 3, 1]] + footprint = [[1, 0, 1], [1, 1, 0]] + output = ndimage.rank_filter(array, 0, footprint=footprint) + assert_array_almost_equal(expected, output) + output = ndimage.percentile_filter(array, 0.0, footprint=footprint) + assert_array_almost_equal(expected, output) + + def test_rank11(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + expected = [[3, 5, 5, 5, 4], + [7, 7, 9, 9, 5], + [7, 9, 8, 9, 7]] + footprint = [[1, 0, 1], [1, 1, 0]] + output = ndimage.rank_filter(array, -1, footprint=footprint) + assert_array_almost_equal(expected, output) + output = ndimage.percentile_filter(array, 100.0, footprint=footprint) + assert_array_almost_equal(expected, output) + + def test_rank12(self): + expected = [[3, 3, 2, 4, 4], + [3, 5, 2, 5, 1], + [5, 5, 8, 3, 5]] + footprint = [[1, 0, 1], [0, 1, 0]] + for type_ in self.types: + array = numpy.array([[3, 2, 5, 1, 4], + [5, 8, 3, 7, 1], + [5, 6, 9, 3, 5]], type_) + output = ndimage.rank_filter(array, 1, footprint=footprint) + assert_array_almost_equal(expected, output) + output = ndimage.percentile_filter(array, 50.0, + footprint=footprint) + assert_array_almost_equal(expected, output) + output = ndimage.median_filter(array, footprint=footprint) + assert_array_almost_equal(expected, output) + + def test_rank13(self): + expected = [[5, 2, 5, 1, 1], + [5, 8, 3, 5, 5], + [6, 6, 5, 5, 5]] + footprint = [[1, 0, 1], [0, 1, 0]] + for type_ in self.types: + array = numpy.array([[3, 2, 5, 1, 4], + [5, 8, 3, 7, 1], + [5, 6, 9, 3, 5]], type_) + output = ndimage.rank_filter(array, 1, footprint=footprint, + origin=-1) + assert_array_almost_equal(expected, output) + + def test_rank14(self): + expected = [[3, 5, 2, 5, 1], + [5, 5, 8, 3, 5], + [5, 6, 6, 5, 5]] + footprint = [[1, 0, 1], [0, 1, 0]] + for type_ in self.types: + array = numpy.array([[3, 2, 5, 1, 4], + [5, 8, 3, 7, 1], + [5, 6, 9, 3, 5]], type_) + output = ndimage.rank_filter(array, 1, footprint=footprint, + origin=[-1, 0]) + assert_array_almost_equal(expected, output) + + def test_rank15(self): + "rank filter 15" + expected = [[2, 3, 1, 4, 1], + [5, 3, 7, 1, 1], + [5, 5, 3, 3, 3]] + footprint = [[1, 0, 1], [0, 1, 0]] + for type_ in self.types: + array = numpy.array([[3, 2, 5, 1, 4], + [5, 8, 3, 7, 1], + [5, 6, 9, 3, 5]], type_) + output = ndimage.rank_filter(array, 0, footprint=footprint, + origin=[-1, 0]) + assert_array_almost_equal(expected, output) + + def test_generic_filter1d01(self): + weights = numpy.array([1.1, 2.2, 3.3]) + + def _filter_func(input, output, fltr, total): + fltr = fltr / total + for ii in range(input.shape[0] - 2): + output[ii] = input[ii] * fltr[0] + output[ii] += input[ii + 1] * fltr[1] + output[ii] += input[ii + 2] * fltr[2] + for type_ in self.types: + a = numpy.arange(12, dtype=type_) + a.shape = (3, 4) + r1 = ndimage.correlate1d(a, weights / weights.sum(), 0, origin=-1) + r2 = ndimage.generic_filter1d( + a, _filter_func, 3, axis=0, origin=-1, + extra_arguments=(weights,), + extra_keywords={'total': weights.sum()}) + assert_array_almost_equal(r1, r2) + + def test_generic_filter01(self): + filter_ = numpy.array([[1.0, 2.0], [3.0, 4.0]]) + footprint = numpy.array([[1, 0], [0, 1]]) + cf = numpy.array([1., 4.]) + + def _filter_func(buffer, weights, total=1.0): + weights = cf / total + return (buffer * weights).sum() + for type_ in self.types: + a = numpy.arange(12, dtype=type_) + a.shape = (3, 4) + r1 = ndimage.correlate(a, filter_ * footprint) + if type_ in self.float_types: + r1 /= 5 + else: + r1 //= 5 + r2 = ndimage.generic_filter( + a, _filter_func, footprint=footprint, extra_arguments=(cf,), + extra_keywords={'total': cf.sum()}) + assert_array_almost_equal(r1, r2) + + def test_extend01(self): + array = numpy.array([1, 2, 3]) + weights = numpy.array([1, 0]) + expected_values = [[1, 1, 2], + [3, 1, 2], + [1, 1, 2], + [2, 1, 2], + [0, 1, 2]] + for mode, expected_value in zip(self.modes, expected_values): + output = ndimage.correlate1d(array, weights, 0, + mode=mode, cval=0) + assert_array_equal(output, expected_value) + + def test_extend02(self): + array = numpy.array([1, 2, 3]) + weights = numpy.array([1, 0, 0, 0, 0, 0, 0, 0]) + expected_values = [[1, 1, 1], + [3, 1, 2], + [3, 3, 2], + [1, 2, 3], + [0, 0, 0]] + for mode, expected_value in zip(self.modes, expected_values): + output = ndimage.correlate1d(array, weights, 0, + mode=mode, cval=0) + assert_array_equal(output, expected_value) + + def test_extend03(self): + array = numpy.array([1, 2, 3]) + weights = numpy.array([0, 0, 1]) + expected_values = [[2, 3, 3], + [2, 3, 1], + [2, 3, 3], + [2, 3, 2], + [2, 3, 0]] + for mode, expected_value in zip(self.modes, expected_values): + output = ndimage.correlate1d(array, weights, 0, + mode=mode, cval=0) + assert_array_equal(output, expected_value) + + def test_extend04(self): + array = numpy.array([1, 2, 3]) + weights = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 1]) + expected_values = [[3, 3, 3], + [2, 3, 1], + [2, 1, 1], + [1, 2, 3], + [0, 0, 0]] + for mode, expected_value in zip(self.modes, expected_values): + output = ndimage.correlate1d(array, weights, 0, + mode=mode, cval=0) + assert_array_equal(output, expected_value) + + def test_extend05(self): + array = numpy.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + weights = numpy.array([[1, 0], [0, 0]]) + expected_values = [[[1, 1, 2], [1, 1, 2], [4, 4, 5]], + [[9, 7, 8], [3, 1, 2], [6, 4, 5]], + [[1, 1, 2], [1, 1, 2], [4, 4, 5]], + [[5, 4, 5], [2, 1, 2], [5, 4, 5]], + [[0, 0, 0], [0, 1, 2], [0, 4, 5]]] + for mode, expected_value in zip(self.modes, expected_values): + output = ndimage.correlate(array, weights, + mode=mode, cval=0) + assert_array_equal(output, expected_value) + + def test_extend06(self): + array = numpy.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + weights = numpy.array([[0, 0, 0], [0, 0, 0], [0, 0, 1]]) + expected_values = [[[5, 6, 6], [8, 9, 9], [8, 9, 9]], + [[5, 6, 4], [8, 9, 7], [2, 3, 1]], + [[5, 6, 6], [8, 9, 9], [8, 9, 9]], + [[5, 6, 5], [8, 9, 8], [5, 6, 5]], + [[5, 6, 0], [8, 9, 0], [0, 0, 0]]] + for mode, expected_value in zip(self.modes, expected_values): + output = ndimage.correlate(array, weights, + mode=mode, cval=0) + assert_array_equal(output, expected_value) + + def test_extend07(self): + array = numpy.array([1, 2, 3]) + weights = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 1]) + expected_values = [[3, 3, 3], + [2, 3, 1], + [2, 1, 1], + [1, 2, 3], + [0, 0, 0]] + for mode, expected_value in zip(self.modes, expected_values): + output = ndimage.correlate(array, weights, mode=mode, cval=0) + assert_array_equal(output, expected_value) + + def test_extend08(self): + array = numpy.array([[1], [2], [3]]) + weights = numpy.array([[0], [0], [0], [0], [0], [0], [0], [0], [1]]) + expected_values = [[[3], [3], [3]], + [[2], [3], [1]], + [[2], [1], [1]], + [[1], [2], [3]], + [[0], [0], [0]]] + for mode, expected_value in zip(self.modes, expected_values): + output = ndimage.correlate(array, weights, mode=mode, cval=0) + assert_array_equal(output, expected_value) + + def test_extend09(self): + array = numpy.array([1, 2, 3]) + weights = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 1]) + expected_values = [[3, 3, 3], + [2, 3, 1], + [2, 1, 1], + [1, 2, 3], + [0, 0, 0]] + for mode, expected_value in zip(self.modes, expected_values): + output = ndimage.correlate(array, weights, + mode=mode, cval=0) + assert_array_equal(output, expected_value) + + def test_extend10(self): + array = numpy.array([[1], [2], [3]]) + weights = numpy.array([[0], [0], [0], [0], [0], [0], [0], [0], [1]]) + expected_values = [[[3], [3], [3]], + [[2], [3], [1]], + [[2], [1], [1]], + [[1], [2], [3]], + [[0], [0], [0]]] + for mode, expected_value in zip(self.modes, expected_values): + output = ndimage.correlate(array, weights, + mode=mode, cval=0) + assert_array_equal(output, expected_value) + + def test_boundaries(self): + def shift(x): + return (x[0] + 0.5,) + + data = numpy.array([1, 2, 3, 4.]) + expected = {'constant': [1.5, 2.5, 3.5, -1, -1, -1, -1], + 'wrap': [1.5, 2.5, 3.5, 1.5, 2.5, 3.5, 1.5], + 'mirror': [1.5, 2.5, 3.5, 3.5, 2.5, 1.5, 1.5], + 'nearest': [1.5, 2.5, 3.5, 4, 4, 4, 4]} + + for mode in expected: + assert_array_equal( + expected[mode], + ndimage.geometric_transform(data, shift, cval=-1, mode=mode, + output_shape=(7,), order=1)) + + def test_boundaries2(self): + def shift(x): + return (x[0] - 0.9,) + + data = numpy.array([1, 2, 3, 4]) + expected = {'constant': [-1, 1, 2, 3], + 'wrap': [3, 1, 2, 3], + 'mirror': [2, 1, 2, 3], + 'nearest': [1, 1, 2, 3]} + + for mode in expected: + assert_array_equal( + expected[mode], + ndimage.geometric_transform(data, shift, cval=-1, mode=mode, + output_shape=(4,))) + + def test_fourier_gaussian_real01(self): + for shape in [(32, 16), (31, 15)]: + for type_, dec in zip([numpy.float32, numpy.float64], [6, 14]): + a = numpy.zeros(shape, type_) + a[0, 0] = 1.0 + a = fft.rfft(a, shape[0], 0) + a = fft.fft(a, shape[1], 1) + a = ndimage.fourier_gaussian(a, [5.0, 2.5], shape[0], 0) + a = fft.ifft(a, shape[1], 1) + a = fft.irfft(a, shape[0], 0) + assert_almost_equal(ndimage.sum(a), 1, decimal=dec) + + def test_fourier_gaussian_complex01(self): + for shape in [(32, 16), (31, 15)]: + for type_, dec in zip([numpy.complex64, numpy.complex128], [6, 14]): + a = numpy.zeros(shape, type_) + a[0, 0] = 1.0 + a = fft.fft(a, shape[0], 0) + a = fft.fft(a, shape[1], 1) + a = ndimage.fourier_gaussian(a, [5.0, 2.5], -1, 0) + a = fft.ifft(a, shape[1], 1) + a = fft.ifft(a, shape[0], 0) + assert_almost_equal(ndimage.sum(a.real), 1.0, decimal=dec) + + def test_fourier_uniform_real01(self): + for shape in [(32, 16), (31, 15)]: + for type_, dec in zip([numpy.float32, numpy.float64], [6, 14]): + a = numpy.zeros(shape, type_) + a[0, 0] = 1.0 + a = fft.rfft(a, shape[0], 0) + a = fft.fft(a, shape[1], 1) + a = ndimage.fourier_uniform(a, [5.0, 2.5], shape[0], 0) + a = fft.ifft(a, shape[1], 1) + a = fft.irfft(a, shape[0], 0) + assert_almost_equal(ndimage.sum(a), 1.0, decimal=dec) + + def test_fourier_uniform_complex01(self): + for shape in [(32, 16), (31, 15)]: + for type_, dec in zip([numpy.complex64, numpy.complex128], [6, 14]): + a = numpy.zeros(shape, type_) + a[0, 0] = 1.0 + a = fft.fft(a, shape[0], 0) + a = fft.fft(a, shape[1], 1) + a = ndimage.fourier_uniform(a, [5.0, 2.5], -1, 0) + a = fft.ifft(a, shape[1], 1) + a = fft.ifft(a, shape[0], 0) + assert_almost_equal(ndimage.sum(a.real), 1.0, decimal=dec) + + def test_fourier_shift_real01(self): + for shape in [(32, 16), (31, 15)]: + for type_, dec in zip([numpy.float32, numpy.float64], [4, 11]): + expected = numpy.arange(shape[0] * shape[1], dtype=type_) + expected.shape = shape + a = fft.rfft(expected, shape[0], 0) + a = fft.fft(a, shape[1], 1) + a = ndimage.fourier_shift(a, [1, 1], shape[0], 0) + a = fft.ifft(a, shape[1], 1) + a = fft.irfft(a, shape[0], 0) + assert_array_almost_equal(a[1:, 1:], expected[:-1, :-1], + decimal=dec) + assert_array_almost_equal(a.imag, numpy.zeros(shape), + decimal=dec) + + def test_fourier_shift_complex01(self): + for shape in [(32, 16), (31, 15)]: + for type_, dec in zip([numpy.complex64, numpy.complex128], [4, 11]): + expected = numpy.arange(shape[0] * shape[1], dtype=type_) + expected.shape = shape + a = fft.fft(expected, shape[0], 0) + a = fft.fft(a, shape[1], 1) + a = ndimage.fourier_shift(a, [1, 1], -1, 0) + a = fft.ifft(a, shape[1], 1) + a = fft.ifft(a, shape[0], 0) + assert_array_almost_equal(a.real[1:, 1:], expected[:-1, :-1], + decimal=dec) + assert_array_almost_equal(a.imag, numpy.zeros(shape), + decimal=dec) + + def test_fourier_ellipsoid_real01(self): + for shape in [(32, 16), (31, 15)]: + for type_, dec in zip([numpy.float32, numpy.float64], [5, 14]): + a = numpy.zeros(shape, type_) + a[0, 0] = 1.0 + a = fft.rfft(a, shape[0], 0) + a = fft.fft(a, shape[1], 1) + a = ndimage.fourier_ellipsoid(a, [5.0, 2.5], + shape[0], 0) + a = fft.ifft(a, shape[1], 1) + a = fft.irfft(a, shape[0], 0) + assert_almost_equal(ndimage.sum(a), 1.0, decimal=dec) + + def test_fourier_ellipsoid_complex01(self): + for shape in [(32, 16), (31, 15)]: + for type_, dec in zip([numpy.complex64, numpy.complex128], + [5, 14]): + a = numpy.zeros(shape, type_) + a[0, 0] = 1.0 + a = fft.fft(a, shape[0], 0) + a = fft.fft(a, shape[1], 1) + a = ndimage.fourier_ellipsoid(a, [5.0, 2.5], -1, 0) + a = fft.ifft(a, shape[1], 1) + a = fft.ifft(a, shape[0], 0) + assert_almost_equal(ndimage.sum(a.real), 1.0, decimal=dec) + + def test_spline01(self): + for type_ in self.types: + data = numpy.ones([], type_) + for order in range(2, 6): + out = ndimage.spline_filter(data, order=order) + assert_array_almost_equal(out, 1) + + def test_spline02(self): + for type_ in self.types: + data = numpy.array([1], type_) + for order in range(2, 6): + out = ndimage.spline_filter(data, order=order) + assert_array_almost_equal(out, [1]) + + def test_spline03(self): + for type_ in self.types: + data = numpy.ones([], type_) + for order in range(2, 6): + out = ndimage.spline_filter(data, order, + output=type_) + assert_array_almost_equal(out, 1) + + def test_spline04(self): + for type_ in self.types: + data = numpy.ones([4], type_) + for order in range(2, 6): + out = ndimage.spline_filter(data, order) + assert_array_almost_equal(out, [1, 1, 1, 1]) + + def test_spline05(self): + for type_ in self.types: + data = numpy.ones([4, 4], type_) + for order in range(2, 6): + out = ndimage.spline_filter(data, order=order) + assert_array_almost_equal(out, [[1, 1, 1, 1], + [1, 1, 1, 1], + [1, 1, 1, 1], + [1, 1, 1, 1]]) + + def test_geometric_transform01(self): + data = numpy.array([1]) + + def mapping(x): + return x + for order in range(0, 6): + out = ndimage.geometric_transform(data, mapping, data.shape, + order=order) + assert_array_almost_equal(out, [1]) + + def test_geometric_transform02(self): + data = numpy.ones([4]) + + def mapping(x): + return x + for order in range(0, 6): + out = ndimage.geometric_transform(data, mapping, data.shape, + order=order) + assert_array_almost_equal(out, [1, 1, 1, 1]) + + def test_geometric_transform03(self): + data = numpy.ones([4]) + + def mapping(x): + return (x[0] - 1,) + for order in range(0, 6): + out = ndimage.geometric_transform(data, mapping, data.shape, + order=order) + assert_array_almost_equal(out, [0, 1, 1, 1]) + + def test_geometric_transform04(self): + data = numpy.array([4, 1, 3, 2]) + + def mapping(x): + return (x[0] - 1,) + for order in range(0, 6): + out = ndimage.geometric_transform(data, mapping, data.shape, + order=order) + assert_array_almost_equal(out, [0, 4, 1, 3]) + + def test_geometric_transform05(self): + data = numpy.array([[1, 1, 1, 1], + [1, 1, 1, 1], + [1, 1, 1, 1]]) + + def mapping(x): + return (x[0], x[1] - 1) + for order in range(0, 6): + out = ndimage.geometric_transform(data, mapping, data.shape, + order=order) + assert_array_almost_equal(out, [[0, 1, 1, 1], + [0, 1, 1, 1], + [0, 1, 1, 1]]) + + def test_geometric_transform06(self): + data = numpy.array([[4, 1, 3, 2], + [7, 6, 8, 5], + [3, 5, 3, 6]]) + + def mapping(x): + return (x[0], x[1] - 1) + for order in range(0, 6): + out = ndimage.geometric_transform(data, mapping, data.shape, + order=order) + assert_array_almost_equal(out, [[0, 4, 1, 3], + [0, 7, 6, 8], + [0, 3, 5, 3]]) + + def test_geometric_transform07(self): + data = numpy.array([[4, 1, 3, 2], + [7, 6, 8, 5], + [3, 5, 3, 6]]) + + def mapping(x): + return (x[0] - 1, x[1]) + for order in range(0, 6): + out = ndimage.geometric_transform(data, mapping, data.shape, + order=order) + assert_array_almost_equal(out, [[0, 0, 0, 0], + [4, 1, 3, 2], + [7, 6, 8, 5]]) + + def test_geometric_transform08(self): + data = numpy.array([[4, 1, 3, 2], + [7, 6, 8, 5], + [3, 5, 3, 6]]) + + def mapping(x): + return (x[0] - 1, x[1] - 1) + for order in range(0, 6): + out = ndimage.geometric_transform(data, mapping, data.shape, + order=order) + assert_array_almost_equal(out, [[0, 0, 0, 0], + [0, 4, 1, 3], + [0, 7, 6, 8]]) + + def test_geometric_transform10(self): + data = numpy.array([[4, 1, 3, 2], + [7, 6, 8, 5], + [3, 5, 3, 6]]) + + def mapping(x): + return (x[0] - 1, x[1] - 1) + for order in range(0, 6): + if (order > 1): + filtered = ndimage.spline_filter(data, order=order) + else: + filtered = data + out = ndimage.geometric_transform(filtered, mapping, data.shape, + order=order, prefilter=False) + assert_array_almost_equal(out, [[0, 0, 0, 0], + [0, 4, 1, 3], + [0, 7, 6, 8]]) + + def test_geometric_transform13(self): + data = numpy.ones([2], numpy.float64) + + def mapping(x): + return (x[0] // 2,) + for order in range(0, 6): + out = ndimage.geometric_transform(data, mapping, [4], order=order) + assert_array_almost_equal(out, [1, 1, 1, 1]) + + def test_geometric_transform14(self): + data = [1, 5, 2, 6, 3, 7, 4, 4] + + def mapping(x): + return (2 * x[0],) + for order in range(0, 6): + out = ndimage.geometric_transform(data, mapping, [4], order=order) + assert_array_almost_equal(out, [1, 2, 3, 4]) + + def test_geometric_transform15(self): + data = [1, 2, 3, 4] + + def mapping(x): + return (x[0] / 2,) + for order in range(0, 6): + out = ndimage.geometric_transform(data, mapping, [8], order=order) + assert_array_almost_equal(out[::2], [1, 2, 3, 4]) + + def test_geometric_transform16(self): + data = [[1, 2, 3, 4], + [5, 6, 7, 8], + [9.0, 10, 11, 12]] + + def mapping(x): + return (x[0], x[1] * 2) + for order in range(0, 6): + out = ndimage.geometric_transform(data, mapping, (3, 2), + order=order) + assert_array_almost_equal(out, [[1, 3], [5, 7], [9, 11]]) + + def test_geometric_transform17(self): + data = [[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12]] + + def mapping(x): + return (x[0] * 2, x[1]) + for order in range(0, 6): + out = ndimage.geometric_transform(data, mapping, (1, 4), + order=order) + assert_array_almost_equal(out, [[1, 2, 3, 4]]) + + def test_geometric_transform18(self): + data = [[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12]] + + def mapping(x): + return (x[0] * 2, x[1] * 2) + for order in range(0, 6): + out = ndimage.geometric_transform(data, mapping, (1, 2), + order=order) + assert_array_almost_equal(out, [[1, 3]]) + + def test_geometric_transform19(self): + data = [[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12]] + + def mapping(x): + return (x[0], x[1] / 2) + for order in range(0, 6): + out = ndimage.geometric_transform(data, mapping, (3, 8), + order=order) + assert_array_almost_equal(out[..., ::2], data) + + def test_geometric_transform20(self): + data = [[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12]] + + def mapping(x): + return (x[0] / 2, x[1]) + for order in range(0, 6): + out = ndimage.geometric_transform(data, mapping, (6, 4), + order=order) + assert_array_almost_equal(out[::2, ...], data) + + def test_geometric_transform21(self): + data = [[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12]] + + def mapping(x): + return (x[0] / 2, x[1] / 2) + for order in range(0, 6): + out = ndimage.geometric_transform(data, mapping, (6, 8), + order=order) + assert_array_almost_equal(out[::2, ::2], data) + + def test_geometric_transform22(self): + data = numpy.array([[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12]], numpy.float64) + + def mapping1(x): + return (x[0] / 2, x[1] / 2) + + def mapping2(x): + return (x[0] * 2, x[1] * 2) + for order in range(0, 6): + out = ndimage.geometric_transform(data, mapping1, + (6, 8), order=order) + out = ndimage.geometric_transform(out, mapping2, + (3, 4), order=order) + assert_array_almost_equal(out, data) + + def test_geometric_transform23(self): + data = [[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12]] + + def mapping(x): + return (1, x[0] * 2) + for order in range(0, 6): + out = ndimage.geometric_transform(data, mapping, (2,), order=order) + out = out.astype(numpy.int32) + assert_array_almost_equal(out, [5, 7]) + + def test_geometric_transform24(self): + data = [[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12]] + + def mapping(x, a, b): + return (a, x[0] * b) + for order in range(0, 6): + out = ndimage.geometric_transform( + data, mapping, (2,), order=order, extra_arguments=(1,), + extra_keywords={'b': 2}) + assert_array_almost_equal(out, [5, 7]) + + def test_geometric_transform_endianness_with_output_parameter(self): + # geometric transform given output ndarray or dtype with + # non-native endianness. see issue #4127 + data = numpy.array([1]) + + def mapping(x): + return x + + for out in [data.dtype, data.dtype.newbyteorder(), + numpy.empty_like(data), + numpy.empty_like(data).astype(data.dtype.newbyteorder())]: + returned = ndimage.geometric_transform(data, mapping, data.shape, + output=out) + result = out if returned is None else returned + assert_array_almost_equal(result, [1]) + + def test_geometric_transform_with_string_output(self): + data = numpy.array([1]) + + def mapping(x): + return x + out = ndimage.geometric_transform(data, mapping, output='f') + assert_(out.dtype is numpy.dtype('f')) + assert_array_almost_equal(out, [1]) + + def test_map_coordinates01(self): + data = numpy.array([[4, 1, 3, 2], + [7, 6, 8, 5], + [3, 5, 3, 6]]) + idx = numpy.indices(data.shape) + idx -= 1 + for order in range(0, 6): + out = ndimage.map_coordinates(data, idx, order=order) + assert_array_almost_equal(out, [[0, 0, 0, 0], + [0, 4, 1, 3], + [0, 7, 6, 8]]) + + def test_map_coordinates02(self): + data = numpy.array([[4, 1, 3, 2], + [7, 6, 8, 5], + [3, 5, 3, 6]]) + idx = numpy.indices(data.shape, numpy.float64) + idx -= 0.5 + for order in range(0, 6): + out1 = ndimage.shift(data, 0.5, order=order) + out2 = ndimage.map_coordinates(data, idx, order=order) + assert_array_almost_equal(out1, out2) + + def test_map_coordinates03(self): + data = numpy.array([[4, 1, 3, 2], + [7, 6, 8, 5], + [3, 5, 3, 6]], order='F') + idx = numpy.indices(data.shape) - 1 + out = ndimage.map_coordinates(data, idx) + assert_array_almost_equal(out, [[0, 0, 0, 0], + [0, 4, 1, 3], + [0, 7, 6, 8]]) + assert_array_almost_equal(out, ndimage.shift(data, (1, 1))) + idx = numpy.indices(data[::2].shape) - 1 + out = ndimage.map_coordinates(data[::2], idx) + assert_array_almost_equal(out, [[0, 0, 0, 0], + [0, 4, 1, 3]]) + assert_array_almost_equal(out, ndimage.shift(data[::2], (1, 1))) + idx = numpy.indices(data[:, ::2].shape) - 1 + out = ndimage.map_coordinates(data[:, ::2], idx) + assert_array_almost_equal(out, [[0, 0], [0, 4], [0, 7]]) + assert_array_almost_equal(out, ndimage.shift(data[:, ::2], (1, 1))) + + def test_map_coordinates_endianness_with_output_parameter(self): + # output parameter given as array or dtype with either endianness + # see issue #4127 + data = numpy.array([[1, 2], [7, 6]]) + expected = numpy.array([[0, 0], [0, 1]]) + idx = numpy.indices(data.shape) + idx -= 1 + for out in [data.dtype, data.dtype.newbyteorder(), numpy.empty_like(expected), + numpy.empty_like(expected).astype(expected.dtype.newbyteorder())]: + returned = ndimage.map_coordinates(data, idx, output=out) + result = out if returned is None else returned + assert_array_almost_equal(result, expected) + + def test_map_coordinates_with_string_output(self): + data = numpy.array([[1]]) + idx = numpy.indices(data.shape) + out = ndimage.map_coordinates(data, idx, output='f') + assert_(out.dtype is numpy.dtype('f')) + assert_array_almost_equal(out, [[1]]) + + @pytest.mark.skipif('win32' in sys.platform or numpy.intp(0).itemsize < 8, + reason="do not run on 32 bit or windows (no sparse memory)") + def test_map_coordinates_large_data(self): + # check crash on large data + try: + n = 30000 + a = numpy.empty(n**2, dtype=numpy.float32).reshape(n, n) + # fill the part we might read + a[n-3:, n-3:] = 0 + ndimage.map_coordinates(a, [[n - 1.5], [n - 1.5]], order=1) + except MemoryError: + raise pytest.skip("Not enough memory available") + + def test_affine_transform01(self): + data = numpy.array([1]) + for order in range(0, 6): + out = ndimage.affine_transform(data, [[1]], order=order) + assert_array_almost_equal(out, [1]) + + def test_affine_transform02(self): + data = numpy.ones([4]) + for order in range(0, 6): + out = ndimage.affine_transform(data, [[1]], order=order) + assert_array_almost_equal(out, [1, 1, 1, 1]) + + def test_affine_transform03(self): + data = numpy.ones([4]) + for order in range(0, 6): + out = ndimage.affine_transform(data, [[1]], -1, order=order) + assert_array_almost_equal(out, [0, 1, 1, 1]) + + def test_affine_transform04(self): + data = numpy.array([4, 1, 3, 2]) + for order in range(0, 6): + out = ndimage.affine_transform(data, [[1]], -1, order=order) + assert_array_almost_equal(out, [0, 4, 1, 3]) + + def test_affine_transform05(self): + data = numpy.array([[1, 1, 1, 1], + [1, 1, 1, 1], + [1, 1, 1, 1]]) + for order in range(0, 6): + out = ndimage.affine_transform(data, [[1, 0], [0, 1]], + [0, -1], order=order) + assert_array_almost_equal(out, [[0, 1, 1, 1], + [0, 1, 1, 1], + [0, 1, 1, 1]]) + + def test_affine_transform06(self): + data = numpy.array([[4, 1, 3, 2], + [7, 6, 8, 5], + [3, 5, 3, 6]]) + for order in range(0, 6): + out = ndimage.affine_transform(data, [[1, 0], [0, 1]], + [0, -1], order=order) + assert_array_almost_equal(out, [[0, 4, 1, 3], + [0, 7, 6, 8], + [0, 3, 5, 3]]) + + def test_affine_transform07(self): + data = numpy.array([[4, 1, 3, 2], + [7, 6, 8, 5], + [3, 5, 3, 6]]) + for order in range(0, 6): + out = ndimage.affine_transform(data, [[1, 0], [0, 1]], + [-1, 0], order=order) + assert_array_almost_equal(out, [[0, 0, 0, 0], + [4, 1, 3, 2], + [7, 6, 8, 5]]) + + def test_affine_transform08(self): + data = numpy.array([[4, 1, 3, 2], + [7, 6, 8, 5], + [3, 5, 3, 6]]) + for order in range(0, 6): + out = ndimage.affine_transform(data, [[1, 0], [0, 1]], + [-1, -1], order=order) + assert_array_almost_equal(out, [[0, 0, 0, 0], + [0, 4, 1, 3], + [0, 7, 6, 8]]) + + def test_affine_transform09(self): + data = numpy.array([[4, 1, 3, 2], + [7, 6, 8, 5], + [3, 5, 3, 6]]) + for order in range(0, 6): + if (order > 1): + filtered = ndimage.spline_filter(data, order=order) + else: + filtered = data + out = ndimage.affine_transform(filtered, [[1, 0], [0, 1]], + [-1, -1], order=order, + prefilter=False) + assert_array_almost_equal(out, [[0, 0, 0, 0], + [0, 4, 1, 3], + [0, 7, 6, 8]]) + + def test_affine_transform10(self): + data = numpy.ones([2], numpy.float64) + for order in range(0, 6): + out = ndimage.affine_transform(data, [[0.5]], output_shape=(4,), + order=order) + assert_array_almost_equal(out, [1, 1, 1, 0]) + + def test_affine_transform11(self): + data = [1, 5, 2, 6, 3, 7, 4, 4] + for order in range(0, 6): + out = ndimage.affine_transform(data, [[2]], 0, (4,), order=order) + assert_array_almost_equal(out, [1, 2, 3, 4]) + + def test_affine_transform12(self): + data = [1, 2, 3, 4] + for order in range(0, 6): + out = ndimage.affine_transform(data, [[0.5]], 0, (8,), order=order) + assert_array_almost_equal(out[::2], [1, 2, 3, 4]) + + def test_affine_transform13(self): + data = [[1, 2, 3, 4], + [5, 6, 7, 8], + [9.0, 10, 11, 12]] + for order in range(0, 6): + out = ndimage.affine_transform(data, [[1, 0], [0, 2]], 0, (3, 2), + order=order) + assert_array_almost_equal(out, [[1, 3], [5, 7], [9, 11]]) + + def test_affine_transform14(self): + data = [[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12]] + for order in range(0, 6): + out = ndimage.affine_transform(data, [[2, 0], [0, 1]], 0, (1, 4), + order=order) + assert_array_almost_equal(out, [[1, 2, 3, 4]]) + + def test_affine_transform15(self): + data = [[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12]] + for order in range(0, 6): + out = ndimage.affine_transform(data, [[2, 0], [0, 2]], 0, (1, 2), + order=order) + assert_array_almost_equal(out, [[1, 3]]) + + def test_affine_transform16(self): + data = [[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12]] + for order in range(0, 6): + out = ndimage.affine_transform(data, [[1, 0.0], [0, 0.5]], 0, + (3, 8), order=order) + assert_array_almost_equal(out[..., ::2], data) + + def test_affine_transform17(self): + data = [[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12]] + for order in range(0, 6): + out = ndimage.affine_transform(data, [[0.5, 0], [0, 1]], 0, + (6, 4), order=order) + assert_array_almost_equal(out[::2, ...], data) + + def test_affine_transform18(self): + data = [[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12]] + for order in range(0, 6): + out = ndimage.affine_transform(data, [[0.5, 0], [0, 0.5]], 0, + (6, 8), order=order) + assert_array_almost_equal(out[::2, ::2], data) + + def test_affine_transform19(self): + data = numpy.array([[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12]], numpy.float64) + for order in range(0, 6): + out = ndimage.affine_transform(data, [[0.5, 0], [0, 0.5]], 0, + (6, 8), order=order) + out = ndimage.affine_transform(out, [[2.0, 0], [0, 2.0]], 0, + (3, 4), order=order) + assert_array_almost_equal(out, data) + + def test_affine_transform20(self): + data = [[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12]] + for order in range(0, 6): + out = ndimage.affine_transform(data, [[0], [2]], 0, (2,), + order=order) + assert_array_almost_equal(out, [1, 3]) + + def test_affine_transform21(self): + data = [[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12]] + for order in range(0, 6): + out = ndimage.affine_transform(data, [[2], [0]], 0, (2,), + order=order) + assert_array_almost_equal(out, [1, 9]) + + def test_affine_transform22(self): + # shift and offset interaction; see issue #1547 + data = numpy.array([4, 1, 3, 2]) + for order in range(0, 6): + out = ndimage.affine_transform(data, [[2]], [-1], (3,), + order=order) + assert_array_almost_equal(out, [0, 1, 2]) + + def test_affine_transform23(self): + # shift and offset interaction; see issue #1547 + data = numpy.array([4, 1, 3, 2]) + for order in range(0, 6): + out = ndimage.affine_transform(data, [[0.5]], [-1], (8,), + order=order) + assert_array_almost_equal(out[::2], [0, 4, 1, 3]) + + def test_affine_transform24(self): + # consistency between diagonal and non-diagonal case; see issue #1547 + data = numpy.array([4, 1, 3, 2]) + for order in range(0, 6): + with suppress_warnings() as sup: + sup.filter(UserWarning, + "The behaviour of affine_transform with a one-dimensional array .* has changed") + out1 = ndimage.affine_transform(data, [2], -1, order=order) + out2 = ndimage.affine_transform(data, [[2]], -1, order=order) + assert_array_almost_equal(out1, out2) + + def test_affine_transform25(self): + # consistency between diagonal and non-diagonal case; see issue #1547 + data = numpy.array([4, 1, 3, 2]) + for order in range(0, 6): + with suppress_warnings() as sup: + sup.filter(UserWarning, + "The behaviour of affine_transform with a one-dimensional array .* has changed") + out1 = ndimage.affine_transform(data, [0.5], -1, order=order) + out2 = ndimage.affine_transform(data, [[0.5]], -1, order=order) + assert_array_almost_equal(out1, out2) + + def test_affine_transform26(self): + # test homogeneous coordinates + data = numpy.array([[4, 1, 3, 2], + [7, 6, 8, 5], + [3, 5, 3, 6]]) + for order in range(0, 6): + if (order > 1): + filtered = ndimage.spline_filter(data, order=order) + else: + filtered = data + tform_original = numpy.eye(2) + offset_original = -numpy.ones((2, 1)) + tform_h1 = numpy.hstack((tform_original, offset_original)) + tform_h2 = numpy.vstack((tform_h1, [[0, 0, 1]])) + out1 = ndimage.affine_transform(filtered, tform_original, + offset_original.ravel(), + order=order, prefilter=False) + out2 = ndimage.affine_transform(filtered, tform_h1, order=order, + prefilter=False) + out3 = ndimage.affine_transform(filtered, tform_h2, order=order, + prefilter=False) + for out in [out1, out2, out3]: + assert_array_almost_equal(out, [[0, 0, 0, 0], + [0, 4, 1, 3], + [0, 7, 6, 8]]) + + def test_affine_transform27(self): + # test valid homogeneous transformation matrix + data = numpy.array([[4, 1, 3, 2], + [7, 6, 8, 5], + [3, 5, 3, 6]]) + tform_h1 = numpy.hstack((numpy.eye(2), -numpy.ones((2, 1)))) + tform_h2 = numpy.vstack((tform_h1, [[5, 2, 1]])) + assert_raises(ValueError, ndimage.affine_transform, data, tform_h2) + + def test_affine_transform_1d_endianness_with_output_parameter(self): + # 1d affine transform given output ndarray or dtype with + # either endianness. see issue #7388 + data = numpy.ones((2, 2)) + for out in [numpy.empty_like(data), + numpy.empty_like(data).astype(data.dtype.newbyteorder()), + data.dtype, data.dtype.newbyteorder()]: + with suppress_warnings() as sup: + sup.filter(UserWarning, + "The behaviour of affine_transform with a one-dimensional array .* has changed") + returned = ndimage.affine_transform(data, [1, 1], output=out) + result = out if returned is None else returned + assert_array_almost_equal(result, [[1, 1], [1, 1]]) + + def test_affine_transform_multi_d_endianness_with_output_parameter(self): + # affine transform given output ndarray or dtype with either endianness + # see issue #4127 + data = numpy.array([1]) + for out in [data.dtype, data.dtype.newbyteorder(), + numpy.empty_like(data), + numpy.empty_like(data).astype(data.dtype.newbyteorder())]: + returned = ndimage.affine_transform(data, [[1]], output=out) + result = out if returned is None else returned + assert_array_almost_equal(result, [1]) + + def test_affine_transform_with_string_output(self): + data = numpy.array([1]) + out = ndimage.affine_transform(data, [[1]], output='f') + assert_(out.dtype is numpy.dtype('f')) + assert_array_almost_equal(out, [1]) + + def test_shift01(self): + data = numpy.array([1]) + for order in range(0, 6): + out = ndimage.shift(data, [1], order=order) + assert_array_almost_equal(out, [0]) + + def test_shift02(self): + data = numpy.ones([4]) + for order in range(0, 6): + out = ndimage.shift(data, [1], order=order) + assert_array_almost_equal(out, [0, 1, 1, 1]) + + def test_shift03(self): + data = numpy.ones([4]) + for order in range(0, 6): + out = ndimage.shift(data, -1, order=order) + assert_array_almost_equal(out, [1, 1, 1, 0]) + + def test_shift04(self): + data = numpy.array([4, 1, 3, 2]) + for order in range(0, 6): + out = ndimage.shift(data, 1, order=order) + assert_array_almost_equal(out, [0, 4, 1, 3]) + + def test_shift05(self): + data = numpy.array([[1, 1, 1, 1], + [1, 1, 1, 1], + [1, 1, 1, 1]]) + for order in range(0, 6): + out = ndimage.shift(data, [0, 1], order=order) + assert_array_almost_equal(out, [[0, 1, 1, 1], + [0, 1, 1, 1], + [0, 1, 1, 1]]) + + def test_shift06(self): + data = numpy.array([[4, 1, 3, 2], + [7, 6, 8, 5], + [3, 5, 3, 6]]) + for order in range(0, 6): + out = ndimage.shift(data, [0, 1], order=order) + assert_array_almost_equal(out, [[0, 4, 1, 3], + [0, 7, 6, 8], + [0, 3, 5, 3]]) + + def test_shift07(self): + data = numpy.array([[4, 1, 3, 2], + [7, 6, 8, 5], + [3, 5, 3, 6]]) + for order in range(0, 6): + out = ndimage.shift(data, [1, 0], order=order) + assert_array_almost_equal(out, [[0, 0, 0, 0], + [4, 1, 3, 2], + [7, 6, 8, 5]]) + + def test_shift08(self): + data = numpy.array([[4, 1, 3, 2], + [7, 6, 8, 5], + [3, 5, 3, 6]]) + for order in range(0, 6): + out = ndimage.shift(data, [1, 1], order=order) + assert_array_almost_equal(out, [[0, 0, 0, 0], + [0, 4, 1, 3], + [0, 7, 6, 8]]) + + def test_shift09(self): + data = numpy.array([[4, 1, 3, 2], + [7, 6, 8, 5], + [3, 5, 3, 6]]) + for order in range(0, 6): + if (order > 1): + filtered = ndimage.spline_filter(data, order=order) + else: + filtered = data + out = ndimage.shift(filtered, [1, 1], order=order, prefilter=False) + assert_array_almost_equal(out, [[0, 0, 0, 0], + [0, 4, 1, 3], + [0, 7, 6, 8]]) + + def test_zoom1(self): + for order in range(0, 6): + for z in [2, [2, 2]]: + arr = numpy.array(list(range(25))).reshape((5, 5)).astype(float) + arr = ndimage.zoom(arr, z, order=order) + assert_equal(arr.shape, (10, 10)) + assert_(numpy.all(arr[-1, :] != 0)) + assert_(numpy.all(arr[-1, :] >= (20 - eps))) + assert_(numpy.all(arr[0, :] <= (5 + eps))) + assert_(numpy.all(arr >= (0 - eps))) + assert_(numpy.all(arr <= (24 + eps))) + + def test_zoom2(self): + arr = numpy.arange(12).reshape((3, 4)) + out = ndimage.zoom(ndimage.zoom(arr, 2), 0.5) + assert_array_equal(out, arr) + + def test_zoom3(self): + arr = numpy.array([[1, 2]]) + out1 = ndimage.zoom(arr, (2, 1)) + out2 = ndimage.zoom(arr, (1, 2)) + + assert_array_almost_equal(out1, numpy.array([[1, 2], [1, 2]])) + assert_array_almost_equal(out2, numpy.array([[1, 1, 2, 2]])) + + def test_zoom_affine01(self): + data = [[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12]] + for order in range(0, 6): + with suppress_warnings() as sup: + sup.filter(UserWarning, + "The behaviour of affine_transform with a one-dimensional array .* has changed") + out = ndimage.affine_transform(data, [0.5, 0.5], 0, + (6, 8), order=order) + assert_array_almost_equal(out[::2, ::2], data) + + def test_zoom_infinity(self): + # Ticket #1419 regression test + dim = 8 + ndimage.zoom(numpy.zeros((dim, dim)), 1./dim, mode='nearest') + + def test_zoom_zoomfactor_one(self): + # Ticket #1122 regression test + arr = numpy.zeros((1, 5, 5)) + zoom = (1.0, 2.0, 2.0) + + out = ndimage.zoom(arr, zoom, cval=7) + ref = numpy.zeros((1, 10, 10)) + assert_array_almost_equal(out, ref) + + def test_zoom_output_shape_roundoff(self): + arr = numpy.zeros((3, 11, 25)) + zoom = (4.0 / 3, 15.0 / 11, 29.0 / 25) + with suppress_warnings() as sup: + sup.filter(UserWarning, + "From scipy 0.13.0, the output shape of zoom.. is calculated with round.. instead of int") + out = ndimage.zoom(arr, zoom) + assert_array_equal(out.shape, (4, 15, 29)) + + def test_rotate01(self): + data = numpy.array([[0, 0, 0, 0], + [0, 1, 1, 0], + [0, 0, 0, 0]], dtype=numpy.float64) + for order in range(0, 6): + out = ndimage.rotate(data, 0) + assert_array_almost_equal(out, data) + + def test_rotate02(self): + data = numpy.array([[0, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 0, 0]], dtype=numpy.float64) + expected = numpy.array([[0, 0, 0], + [0, 0, 0], + [0, 1, 0], + [0, 0, 0]], dtype=numpy.float64) + for order in range(0, 6): + out = ndimage.rotate(data, 90) + assert_array_almost_equal(out, expected) + + def test_rotate03(self): + data = numpy.array([[0, 0, 0, 0, 0], + [0, 1, 1, 0, 0], + [0, 0, 0, 0, 0]], dtype=numpy.float64) + expected = numpy.array([[0, 0, 0], + [0, 0, 0], + [0, 1, 0], + [0, 1, 0], + [0, 0, 0]], dtype=numpy.float64) + for order in range(0, 6): + out = ndimage.rotate(data, 90) + assert_array_almost_equal(out, expected) + + def test_rotate04(self): + data = numpy.array([[0, 0, 0, 0, 0], + [0, 1, 1, 0, 0], + [0, 0, 0, 0, 0]], dtype=numpy.float64) + expected = numpy.array([[0, 0, 0, 0, 0], + [0, 0, 1, 0, 0], + [0, 0, 1, 0, 0]], dtype=numpy.float64) + for order in range(0, 6): + out = ndimage.rotate(data, 90, reshape=False) + assert_array_almost_equal(out, expected) + + def test_rotate05(self): + data = numpy.empty((4, 3, 3)) + for i in range(3): + data[:, :, i] = numpy.array([[0, 0, 0], + [0, 1, 0], + [0, 1, 0], + [0, 0, 0]], dtype=numpy.float64) + + expected = numpy.array([[0, 0, 0, 0], + [0, 1, 1, 0], + [0, 0, 0, 0]], dtype=numpy.float64) + + for order in range(0, 6): + out = ndimage.rotate(data, 90) + for i in range(3): + assert_array_almost_equal(out[:, :, i], expected) + + def test_rotate06(self): + data = numpy.empty((3, 4, 3)) + for i in range(3): + data[:, :, i] = numpy.array([[0, 0, 0, 0], + [0, 1, 1, 0], + [0, 0, 0, 0]], dtype=numpy.float64) + + expected = numpy.array([[0, 0, 0], + [0, 1, 0], + [0, 1, 0], + [0, 0, 0]], dtype=numpy.float64) + + for order in range(0, 6): + out = ndimage.rotate(data, 90) + for i in range(3): + assert_array_almost_equal(out[:, :, i], expected) + + def test_rotate07(self): + data = numpy.array([[[0, 0, 0, 0, 0], + [0, 1, 1, 0, 0], + [0, 0, 0, 0, 0]]] * 2, dtype=numpy.float64) + data = data.transpose() + expected = numpy.array([[[0, 0, 0], + [0, 1, 0], + [0, 1, 0], + [0, 0, 0], + [0, 0, 0]]] * 2, dtype=numpy.float64) + expected = expected.transpose([2, 1, 0]) + + for order in range(0, 6): + out = ndimage.rotate(data, 90, axes=(0, 1)) + assert_array_almost_equal(out, expected) + + def test_rotate08(self): + data = numpy.array([[[0, 0, 0, 0, 0], + [0, 1, 1, 0, 0], + [0, 0, 0, 0, 0]]] * 2, dtype=numpy.float64) + data = data.transpose() + expected = numpy.array([[[0, 0, 1, 0, 0], + [0, 0, 1, 0, 0], + [0, 0, 0, 0, 0]]] * 2, dtype=numpy.float64) + expected = expected.transpose() + for order in range(0, 6): + out = ndimage.rotate(data, 90, axes=(0, 1), reshape=False) + assert_array_almost_equal(out, expected) + + def test_watershed_ift01(self): + data = numpy.array([[0, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 0, 0, 0, 1, 0], + [0, 1, 0, 0, 0, 1, 0], + [0, 1, 0, 0, 0, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]], numpy.uint8) + markers = numpy.array([[-1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]], numpy.int8) + out = ndimage.watershed_ift(data, markers, structure=[[1, 1, 1], + [1, 1, 1], + [1, 1, 1]]) + expected = [[-1, -1, -1, -1, -1, -1, -1], + [-1, 1, 1, 1, 1, 1, -1], + [-1, 1, 1, 1, 1, 1, -1], + [-1, 1, 1, 1, 1, 1, -1], + [-1, 1, 1, 1, 1, 1, -1], + [-1, 1, 1, 1, 1, 1, -1], + [-1, -1, -1, -1, -1, -1, -1], + [-1, -1, -1, -1, -1, -1, -1]] + assert_array_almost_equal(out, expected) + + def test_watershed_ift02(self): + data = numpy.array([[0, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 0, 0, 0, 1, 0], + [0, 1, 0, 0, 0, 1, 0], + [0, 1, 0, 0, 0, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]], numpy.uint8) + markers = numpy.array([[-1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]], numpy.int8) + out = ndimage.watershed_ift(data, markers) + expected = [[-1, -1, -1, -1, -1, -1, -1], + [-1, -1, 1, 1, 1, -1, -1], + [-1, 1, 1, 1, 1, 1, -1], + [-1, 1, 1, 1, 1, 1, -1], + [-1, 1, 1, 1, 1, 1, -1], + [-1, -1, 1, 1, 1, -1, -1], + [-1, -1, -1, -1, -1, -1, -1], + [-1, -1, -1, -1, -1, -1, -1]] + assert_array_almost_equal(out, expected) + + def test_watershed_ift03(self): + data = numpy.array([[0, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 0, 1, 0, 1, 0], + [0, 1, 0, 1, 0, 1, 0], + [0, 1, 0, 1, 0, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0]], numpy.uint8) + markers = numpy.array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 2, 0, 3, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, -1]], numpy.int8) + out = ndimage.watershed_ift(data, markers) + expected = [[-1, -1, -1, -1, -1, -1, -1], + [-1, -1, 2, -1, 3, -1, -1], + [-1, 2, 2, 3, 3, 3, -1], + [-1, 2, 2, 3, 3, 3, -1], + [-1, 2, 2, 3, 3, 3, -1], + [-1, -1, 2, -1, 3, -1, -1], + [-1, -1, -1, -1, -1, -1, -1]] + assert_array_almost_equal(out, expected) + + def test_watershed_ift04(self): + data = numpy.array([[0, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 0, 1, 0, 1, 0], + [0, 1, 0, 1, 0, 1, 0], + [0, 1, 0, 1, 0, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0]], numpy.uint8) + markers = numpy.array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 2, 0, 3, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, -1]], + numpy.int8) + out = ndimage.watershed_ift(data, markers, + structure=[[1, 1, 1], + [1, 1, 1], + [1, 1, 1]]) + expected = [[-1, -1, -1, -1, -1, -1, -1], + [-1, 2, 2, 3, 3, 3, -1], + [-1, 2, 2, 3, 3, 3, -1], + [-1, 2, 2, 3, 3, 3, -1], + [-1, 2, 2, 3, 3, 3, -1], + [-1, 2, 2, 3, 3, 3, -1], + [-1, -1, -1, -1, -1, -1, -1]] + assert_array_almost_equal(out, expected) + + def test_watershed_ift05(self): + data = numpy.array([[0, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 0, 1, 0, 1, 0], + [0, 1, 0, 1, 0, 1, 0], + [0, 1, 0, 1, 0, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0]], numpy.uint8) + markers = numpy.array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 3, 0, 2, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, -1]], + numpy.int8) + out = ndimage.watershed_ift(data, markers, + structure=[[1, 1, 1], + [1, 1, 1], + [1, 1, 1]]) + expected = [[-1, -1, -1, -1, -1, -1, -1], + [-1, 3, 3, 2, 2, 2, -1], + [-1, 3, 3, 2, 2, 2, -1], + [-1, 3, 3, 2, 2, 2, -1], + [-1, 3, 3, 2, 2, 2, -1], + [-1, 3, 3, 2, 2, 2, -1], + [-1, -1, -1, -1, -1, -1, -1]] + assert_array_almost_equal(out, expected) + + def test_watershed_ift06(self): + data = numpy.array([[0, 1, 0, 0, 0, 1, 0], + [0, 1, 0, 0, 0, 1, 0], + [0, 1, 0, 0, 0, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]], numpy.uint8) + markers = numpy.array([[-1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]], numpy.int8) + out = ndimage.watershed_ift(data, markers, + structure=[[1, 1, 1], + [1, 1, 1], + [1, 1, 1]]) + expected = [[-1, 1, 1, 1, 1, 1, -1], + [-1, 1, 1, 1, 1, 1, -1], + [-1, 1, 1, 1, 1, 1, -1], + [-1, 1, 1, 1, 1, 1, -1], + [-1, -1, -1, -1, -1, -1, -1], + [-1, -1, -1, -1, -1, -1, -1]] + assert_array_almost_equal(out, expected) + + def test_watershed_ift07(self): + shape = (7, 6) + data = numpy.zeros(shape, dtype=numpy.uint8) + data = data.transpose() + data[...] = numpy.array([[0, 1, 0, 0, 0, 1, 0], + [0, 1, 0, 0, 0, 1, 0], + [0, 1, 0, 0, 0, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]], numpy.uint8) + markers = numpy.array([[-1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]], numpy.int8) + out = numpy.zeros(shape, dtype=numpy.int16) + out = out.transpose() + ndimage.watershed_ift(data, markers, + structure=[[1, 1, 1], + [1, 1, 1], + [1, 1, 1]], + output=out) + expected = [[-1, 1, 1, 1, 1, 1, -1], + [-1, 1, 1, 1, 1, 1, -1], + [-1, 1, 1, 1, 1, 1, -1], + [-1, 1, 1, 1, 1, 1, -1], + [-1, -1, -1, -1, -1, -1, -1], + [-1, -1, -1, -1, -1, -1, -1]] + assert_array_almost_equal(out, expected) + + def test_distance_transform_bf01(self): + # brute force (bf) distance transform + for type_ in self.types: + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]], type_) + out, ft = ndimage.distance_transform_bf(data, 'euclidean', + return_indices=True) + expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 2, 4, 2, 1, 0, 0], + [0, 0, 1, 4, 8, 4, 1, 0, 0], + [0, 0, 1, 2, 4, 2, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]] + assert_array_almost_equal(out * out, expected) + + expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0], + [1, 1, 1, 1, 1, 1, 1, 1, 1], + [2, 2, 2, 2, 1, 2, 2, 2, 2], + [3, 3, 3, 2, 1, 2, 3, 3, 3], + [4, 4, 4, 4, 6, 4, 4, 4, 4], + [5, 5, 6, 6, 7, 6, 6, 5, 5], + [6, 6, 6, 7, 7, 7, 6, 6, 6], + [7, 7, 7, 7, 7, 7, 7, 7, 7], + [8, 8, 8, 8, 8, 8, 8, 8, 8]], + [[0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 2, 4, 6, 6, 7, 8], + [0, 1, 1, 2, 4, 6, 7, 7, 8], + [0, 1, 1, 1, 6, 7, 7, 7, 8], + [0, 1, 2, 2, 4, 6, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8]]] + assert_array_almost_equal(ft, expected) + + def test_distance_transform_bf02(self): + for type_ in self.types: + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]], type_) + out, ft = ndimage.distance_transform_bf(data, 'cityblock', + return_indices=True) + + expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 2, 2, 2, 1, 0, 0], + [0, 0, 1, 2, 3, 2, 1, 0, 0], + [0, 0, 1, 2, 2, 2, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]] + assert_array_almost_equal(out, expected) + + expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0], + [1, 1, 1, 1, 1, 1, 1, 1, 1], + [2, 2, 2, 2, 1, 2, 2, 2, 2], + [3, 3, 3, 3, 1, 3, 3, 3, 3], + [4, 4, 4, 4, 7, 4, 4, 4, 4], + [5, 5, 6, 7, 7, 7, 6, 5, 5], + [6, 6, 6, 7, 7, 7, 6, 6, 6], + [7, 7, 7, 7, 7, 7, 7, 7, 7], + [8, 8, 8, 8, 8, 8, 8, 8, 8]], + [[0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 2, 4, 6, 6, 7, 8], + [0, 1, 1, 1, 4, 7, 7, 7, 8], + [0, 1, 1, 1, 4, 7, 7, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8]]] + assert_array_almost_equal(expected, ft) + + def test_distance_transform_bf03(self): + for type_ in self.types: + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]], type_) + out, ft = ndimage.distance_transform_bf(data, 'chessboard', + return_indices=True) + + expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 1, 2, 1, 1, 0, 0], + [0, 0, 1, 2, 2, 2, 1, 0, 0], + [0, 0, 1, 1, 2, 1, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]] + assert_array_almost_equal(out, expected) + + expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0], + [1, 1, 1, 1, 1, 1, 1, 1, 1], + [2, 2, 2, 2, 1, 2, 2, 2, 2], + [3, 3, 4, 2, 2, 2, 4, 3, 3], + [4, 4, 5, 6, 6, 6, 5, 4, 4], + [5, 5, 6, 6, 7, 6, 6, 5, 5], + [6, 6, 6, 7, 7, 7, 6, 6, 6], + [7, 7, 7, 7, 7, 7, 7, 7, 7], + [8, 8, 8, 8, 8, 8, 8, 8, 8]], + [[0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 2, 5, 6, 6, 7, 8], + [0, 1, 1, 2, 6, 6, 7, 7, 8], + [0, 1, 1, 2, 6, 7, 7, 7, 8], + [0, 1, 2, 2, 6, 6, 7, 7, 8], + [0, 1, 2, 4, 5, 6, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8]]] + assert_array_almost_equal(ft, expected) + + def test_distance_transform_bf04(self): + for type_ in self.types: + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]], type_) + tdt, tft = ndimage.distance_transform_bf(data, return_indices=1) + dts = [] + fts = [] + dt = numpy.zeros(data.shape, dtype=numpy.float64) + ndimage.distance_transform_bf(data, distances=dt) + dts.append(dt) + ft = ndimage.distance_transform_bf( + data, return_distances=False, return_indices=1) + fts.append(ft) + ft = numpy.indices(data.shape, dtype=numpy.int32) + ndimage.distance_transform_bf( + data, return_distances=False, return_indices=True, indices=ft) + fts.append(ft) + dt, ft = ndimage.distance_transform_bf( + data, return_indices=1) + dts.append(dt) + fts.append(ft) + dt = numpy.zeros(data.shape, dtype=numpy.float64) + ft = ndimage.distance_transform_bf( + data, distances=dt, return_indices=True) + dts.append(dt) + fts.append(ft) + ft = numpy.indices(data.shape, dtype=numpy.int32) + dt = ndimage.distance_transform_bf( + data, return_indices=True, indices=ft) + dts.append(dt) + fts.append(ft) + dt = numpy.zeros(data.shape, dtype=numpy.float64) + ft = numpy.indices(data.shape, dtype=numpy.int32) + ndimage.distance_transform_bf( + data, distances=dt, return_indices=True, indices=ft) + dts.append(dt) + fts.append(ft) + for dt in dts: + assert_array_almost_equal(tdt, dt) + for ft in fts: + assert_array_almost_equal(tft, ft) + + def test_distance_transform_bf05(self): + for type_ in self.types: + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]], type_) + out, ft = ndimage.distance_transform_bf( + data, 'euclidean', return_indices=True, sampling=[2, 2]) + expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 4, 4, 4, 0, 0, 0], + [0, 0, 4, 8, 16, 8, 4, 0, 0], + [0, 0, 4, 16, 32, 16, 4, 0, 0], + [0, 0, 4, 8, 16, 8, 4, 0, 0], + [0, 0, 0, 4, 4, 4, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]] + assert_array_almost_equal(out * out, expected) + + expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0], + [1, 1, 1, 1, 1, 1, 1, 1, 1], + [2, 2, 2, 2, 1, 2, 2, 2, 2], + [3, 3, 3, 2, 1, 2, 3, 3, 3], + [4, 4, 4, 4, 6, 4, 4, 4, 4], + [5, 5, 6, 6, 7, 6, 6, 5, 5], + [6, 6, 6, 7, 7, 7, 6, 6, 6], + [7, 7, 7, 7, 7, 7, 7, 7, 7], + [8, 8, 8, 8, 8, 8, 8, 8, 8]], + [[0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 2, 4, 6, 6, 7, 8], + [0, 1, 1, 2, 4, 6, 7, 7, 8], + [0, 1, 1, 1, 6, 7, 7, 7, 8], + [0, 1, 2, 2, 4, 6, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8]]] + assert_array_almost_equal(ft, expected) + + def test_distance_transform_bf06(self): + for type_ in self.types: + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]], type_) + out, ft = ndimage.distance_transform_bf( + data, 'euclidean', return_indices=True, sampling=[2, 1]) + expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 4, 1, 0, 0, 0], + [0, 0, 1, 4, 8, 4, 1, 0, 0], + [0, 0, 1, 4, 9, 4, 1, 0, 0], + [0, 0, 1, 4, 8, 4, 1, 0, 0], + [0, 0, 0, 1, 4, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]] + assert_array_almost_equal(out * out, expected) + + expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0], + [1, 1, 1, 1, 1, 1, 1, 1, 1], + [2, 2, 2, 2, 2, 2, 2, 2, 2], + [3, 3, 3, 3, 2, 3, 3, 3, 3], + [4, 4, 4, 4, 4, 4, 4, 4, 4], + [5, 5, 5, 5, 6, 5, 5, 5, 5], + [6, 6, 6, 6, 7, 6, 6, 6, 6], + [7, 7, 7, 7, 7, 7, 7, 7, 7], + [8, 8, 8, 8, 8, 8, 8, 8, 8]], + [[0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 2, 6, 6, 6, 7, 8], + [0, 1, 1, 1, 6, 7, 7, 7, 8], + [0, 1, 1, 1, 7, 7, 7, 7, 8], + [0, 1, 1, 1, 6, 7, 7, 7, 8], + [0, 1, 2, 2, 4, 6, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8]]] + assert_array_almost_equal(ft, expected) + + def test_distance_transform_cdt01(self): + # chamfer type distance (cdt) transform + for type_ in self.types: + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]], type_) + out, ft = ndimage.distance_transform_cdt( + data, 'cityblock', return_indices=True) + bf = ndimage.distance_transform_bf(data, 'cityblock') + assert_array_almost_equal(bf, out) + + expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0], + [1, 1, 1, 1, 1, 1, 1, 1, 1], + [2, 2, 2, 1, 1, 1, 2, 2, 2], + [3, 3, 2, 1, 1, 1, 2, 3, 3], + [4, 4, 4, 4, 1, 4, 4, 4, 4], + [5, 5, 5, 5, 7, 7, 6, 5, 5], + [6, 6, 6, 6, 7, 7, 6, 6, 6], + [7, 7, 7, 7, 7, 7, 7, 7, 7], + [8, 8, 8, 8, 8, 8, 8, 8, 8]], + [[0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 1, 1, 4, 7, 7, 7, 8], + [0, 1, 1, 1, 4, 5, 6, 7, 8], + [0, 1, 2, 2, 4, 5, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8]]] + assert_array_almost_equal(ft, expected) + + def test_distance_transform_cdt02(self): + for type_ in self.types: + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]], type_) + out, ft = ndimage.distance_transform_cdt(data, 'chessboard', + return_indices=True) + bf = ndimage.distance_transform_bf(data, 'chessboard') + assert_array_almost_equal(bf, out) + + expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0], + [1, 1, 1, 1, 1, 1, 1, 1, 1], + [2, 2, 2, 1, 1, 1, 2, 2, 2], + [3, 3, 2, 2, 1, 2, 2, 3, 3], + [4, 4, 3, 2, 2, 2, 3, 4, 4], + [5, 5, 4, 6, 7, 6, 4, 5, 5], + [6, 6, 6, 6, 7, 7, 6, 6, 6], + [7, 7, 7, 7, 7, 7, 7, 7, 7], + [8, 8, 8, 8, 8, 8, 8, 8, 8]], + [[0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 2, 3, 4, 6, 7, 8], + [0, 1, 1, 2, 2, 6, 6, 7, 8], + [0, 1, 1, 1, 2, 6, 7, 7, 8], + [0, 1, 1, 2, 6, 6, 7, 7, 8], + [0, 1, 2, 2, 5, 6, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 2, 3, 4, 5, 6, 7, 8]]] + assert_array_almost_equal(ft, expected) + + def test_distance_transform_cdt03(self): + for type_ in self.types: + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]], type_) + tdt, tft = ndimage.distance_transform_cdt(data, return_indices=True) + dts = [] + fts = [] + dt = numpy.zeros(data.shape, dtype=numpy.int32) + ndimage.distance_transform_cdt(data, distances=dt) + dts.append(dt) + ft = ndimage.distance_transform_cdt( + data, return_distances=False, return_indices=True) + fts.append(ft) + ft = numpy.indices(data.shape, dtype=numpy.int32) + ndimage.distance_transform_cdt( + data, return_distances=False, return_indices=True, indices=ft) + fts.append(ft) + dt, ft = ndimage.distance_transform_cdt( + data, return_indices=True) + dts.append(dt) + fts.append(ft) + dt = numpy.zeros(data.shape, dtype=numpy.int32) + ft = ndimage.distance_transform_cdt( + data, distances=dt, return_indices=True) + dts.append(dt) + fts.append(ft) + ft = numpy.indices(data.shape, dtype=numpy.int32) + dt = ndimage.distance_transform_cdt( + data, return_indices=True, indices=ft) + dts.append(dt) + fts.append(ft) + dt = numpy.zeros(data.shape, dtype=numpy.int32) + ft = numpy.indices(data.shape, dtype=numpy.int32) + ndimage.distance_transform_cdt(data, distances=dt, + return_indices=True, indices=ft) + dts.append(dt) + fts.append(ft) + for dt in dts: + assert_array_almost_equal(tdt, dt) + for ft in fts: + assert_array_almost_equal(tft, ft) + + def test_distance_transform_edt01(self): + # euclidean distance transform (edt) + for type_ in self.types: + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]], type_) + out, ft = ndimage.distance_transform_edt(data, return_indices=True) + bf = ndimage.distance_transform_bf(data, 'euclidean') + assert_array_almost_equal(bf, out) + + dt = ft - numpy.indices(ft.shape[1:], dtype=ft.dtype) + dt = dt.astype(numpy.float64) + numpy.multiply(dt, dt, dt) + dt = numpy.add.reduce(dt, axis=0) + numpy.sqrt(dt, dt) + + assert_array_almost_equal(bf, dt) + + def test_distance_transform_edt02(self): + for type_ in self.types: + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]], type_) + tdt, tft = ndimage.distance_transform_edt(data, return_indices=True) + dts = [] + fts = [] + dt = numpy.zeros(data.shape, dtype=numpy.float64) + ndimage.distance_transform_edt(data, distances=dt) + dts.append(dt) + ft = ndimage.distance_transform_edt( + data, return_distances=0, return_indices=True) + fts.append(ft) + ft = numpy.indices(data.shape, dtype=numpy.int32) + ndimage.distance_transform_edt( + data, return_distances=False, return_indices=True, indices=ft) + fts.append(ft) + dt, ft = ndimage.distance_transform_edt( + data, return_indices=True) + dts.append(dt) + fts.append(ft) + dt = numpy.zeros(data.shape, dtype=numpy.float64) + ft = ndimage.distance_transform_edt( + data, distances=dt, return_indices=True) + dts.append(dt) + fts.append(ft) + ft = numpy.indices(data.shape, dtype=numpy.int32) + dt = ndimage.distance_transform_edt( + data, return_indices=True, indices=ft) + dts.append(dt) + fts.append(ft) + dt = numpy.zeros(data.shape, dtype=numpy.float64) + ft = numpy.indices(data.shape, dtype=numpy.int32) + ndimage.distance_transform_edt( + data, distances=dt, return_indices=True, indices=ft) + dts.append(dt) + fts.append(ft) + for dt in dts: + assert_array_almost_equal(tdt, dt) + for ft in fts: + assert_array_almost_equal(tft, ft) + + def test_distance_transform_edt03(self): + for type_ in self.types: + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]], type_) + ref = ndimage.distance_transform_bf(data, 'euclidean', sampling=[2, 2]) + out = ndimage.distance_transform_edt(data, sampling=[2, 2]) + assert_array_almost_equal(ref, out) + + def test_distance_transform_edt4(self): + for type_ in self.types: + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]], type_) + ref = ndimage.distance_transform_bf(data, 'euclidean', sampling=[2, 1]) + out = ndimage.distance_transform_edt(data, sampling=[2, 1]) + assert_array_almost_equal(ref, out) + + def test_distance_transform_edt5(self): + # Ticket #954 regression test + out = ndimage.distance_transform_edt(False) + assert_array_almost_equal(out, [0.]) + + def test_generate_structure01(self): + struct = ndimage.generate_binary_structure(0, 1) + assert_array_almost_equal(struct, 1) + + def test_generate_structure02(self): + struct = ndimage.generate_binary_structure(1, 1) + assert_array_almost_equal(struct, [1, 1, 1]) + + def test_generate_structure03(self): + struct = ndimage.generate_binary_structure(2, 1) + assert_array_almost_equal(struct, [[0, 1, 0], + [1, 1, 1], + [0, 1, 0]]) + + def test_generate_structure04(self): + struct = ndimage.generate_binary_structure(2, 2) + assert_array_almost_equal(struct, [[1, 1, 1], + [1, 1, 1], + [1, 1, 1]]) + + def test_iterate_structure01(self): + struct = [[0, 1, 0], + [1, 1, 1], + [0, 1, 0]] + out = ndimage.iterate_structure(struct, 2) + assert_array_almost_equal(out, [[0, 0, 1, 0, 0], + [0, 1, 1, 1, 0], + [1, 1, 1, 1, 1], + [0, 1, 1, 1, 0], + [0, 0, 1, 0, 0]]) + + def test_iterate_structure02(self): + struct = [[0, 1], + [1, 1], + [0, 1]] + out = ndimage.iterate_structure(struct, 2) + assert_array_almost_equal(out, [[0, 0, 1], + [0, 1, 1], + [1, 1, 1], + [0, 1, 1], + [0, 0, 1]]) + + def test_iterate_structure03(self): + struct = [[0, 1, 0], + [1, 1, 1], + [0, 1, 0]] + out = ndimage.iterate_structure(struct, 2, 1) + expected = [[0, 0, 1, 0, 0], + [0, 1, 1, 1, 0], + [1, 1, 1, 1, 1], + [0, 1, 1, 1, 0], + [0, 0, 1, 0, 0]] + assert_array_almost_equal(out[0], expected) + assert_equal(out[1], [2, 2]) + + def test_binary_erosion01(self): + for type_ in self.types: + data = numpy.ones([], type_) + out = ndimage.binary_erosion(data) + assert_array_almost_equal(out, 1) + + def test_binary_erosion02(self): + for type_ in self.types: + data = numpy.ones([], type_) + out = ndimage.binary_erosion(data, border_value=1) + assert_array_almost_equal(out, 1) + + def test_binary_erosion03(self): + for type_ in self.types: + data = numpy.ones([1], type_) + out = ndimage.binary_erosion(data) + assert_array_almost_equal(out, [0]) + + def test_binary_erosion04(self): + for type_ in self.types: + data = numpy.ones([1], type_) + out = ndimage.binary_erosion(data, border_value=1) + assert_array_almost_equal(out, [1]) + + def test_binary_erosion05(self): + for type_ in self.types: + data = numpy.ones([3], type_) + out = ndimage.binary_erosion(data) + assert_array_almost_equal(out, [0, 1, 0]) + + def test_binary_erosion06(self): + for type_ in self.types: + data = numpy.ones([3], type_) + out = ndimage.binary_erosion(data, border_value=1) + assert_array_almost_equal(out, [1, 1, 1]) + + def test_binary_erosion07(self): + for type_ in self.types: + data = numpy.ones([5], type_) + out = ndimage.binary_erosion(data) + assert_array_almost_equal(out, [0, 1, 1, 1, 0]) + + def test_binary_erosion08(self): + for type_ in self.types: + data = numpy.ones([5], type_) + out = ndimage.binary_erosion(data, border_value=1) + assert_array_almost_equal(out, [1, 1, 1, 1, 1]) + + def test_binary_erosion09(self): + for type_ in self.types: + data = numpy.ones([5], type_) + data[2] = 0 + out = ndimage.binary_erosion(data) + assert_array_almost_equal(out, [0, 0, 0, 0, 0]) + + def test_binary_erosion10(self): + for type_ in self.types: + data = numpy.ones([5], type_) + data[2] = 0 + out = ndimage.binary_erosion(data, border_value=1) + assert_array_almost_equal(out, [1, 0, 0, 0, 1]) + + def test_binary_erosion11(self): + for type_ in self.types: + data = numpy.ones([5], type_) + data[2] = 0 + struct = [1, 0, 1] + out = ndimage.binary_erosion(data, struct, border_value=1) + assert_array_almost_equal(out, [1, 0, 1, 0, 1]) + + def test_binary_erosion12(self): + for type_ in self.types: + data = numpy.ones([5], type_) + data[2] = 0 + struct = [1, 0, 1] + out = ndimage.binary_erosion(data, struct, border_value=1, + origin=-1) + assert_array_almost_equal(out, [0, 1, 0, 1, 1]) + + def test_binary_erosion13(self): + for type_ in self.types: + data = numpy.ones([5], type_) + data[2] = 0 + struct = [1, 0, 1] + out = ndimage.binary_erosion(data, struct, border_value=1, + origin=1) + assert_array_almost_equal(out, [1, 1, 0, 1, 0]) + + def test_binary_erosion14(self): + for type_ in self.types: + data = numpy.ones([5], type_) + data[2] = 0 + struct = [1, 1] + out = ndimage.binary_erosion(data, struct, border_value=1) + assert_array_almost_equal(out, [1, 1, 0, 0, 1]) + + def test_binary_erosion15(self): + for type_ in self.types: + data = numpy.ones([5], type_) + data[2] = 0 + struct = [1, 1] + out = ndimage.binary_erosion(data, struct, border_value=1, + origin=-1) + assert_array_almost_equal(out, [1, 0, 0, 1, 1]) + + def test_binary_erosion16(self): + for type_ in self.types: + data = numpy.ones([1, 1], type_) + out = ndimage.binary_erosion(data, border_value=1) + assert_array_almost_equal(out, [[1]]) + + def test_binary_erosion17(self): + for type_ in self.types: + data = numpy.ones([1, 1], type_) + out = ndimage.binary_erosion(data) + assert_array_almost_equal(out, [[0]]) + + def test_binary_erosion18(self): + for type_ in self.types: + data = numpy.ones([1, 3], type_) + out = ndimage.binary_erosion(data) + assert_array_almost_equal(out, [[0, 0, 0]]) + + def test_binary_erosion19(self): + for type_ in self.types: + data = numpy.ones([1, 3], type_) + out = ndimage.binary_erosion(data, border_value=1) + assert_array_almost_equal(out, [[1, 1, 1]]) + + def test_binary_erosion20(self): + for type_ in self.types: + data = numpy.ones([3, 3], type_) + out = ndimage.binary_erosion(data) + assert_array_almost_equal(out, [[0, 0, 0], + [0, 1, 0], + [0, 0, 0]]) + + def test_binary_erosion21(self): + for type_ in self.types: + data = numpy.ones([3, 3], type_) + out = ndimage.binary_erosion(data, border_value=1) + assert_array_almost_equal(out, [[1, 1, 1], + [1, 1, 1], + [1, 1, 1]]) + + def test_binary_erosion22(self): + expected = [[0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 1, 1, 0, 0, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]] + for type_ in self.types: + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 1, 1, 1, 1, 1, 1], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 0, 0, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], type_) + out = ndimage.binary_erosion(data, border_value=1) + assert_array_almost_equal(out, expected) + + def test_binary_erosion23(self): + struct = ndimage.generate_binary_structure(2, 2) + expected = [[0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]] + for type_ in self.types: + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 1, 1, 1, 1, 1, 1], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 0, 0, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], type_) + out = ndimage.binary_erosion(data, struct, border_value=1) + assert_array_almost_equal(out, expected) + + def test_binary_erosion24(self): + struct = [[0, 1], + [1, 1]] + expected = [[0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0]] + for type_ in self.types: + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 1, 1, 1, 1, 1, 1], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 0, 0, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], type_) + out = ndimage.binary_erosion(data, struct, border_value=1) + assert_array_almost_equal(out, expected) + + def test_binary_erosion25(self): + struct = [[0, 1, 0], + [1, 0, 1], + [0, 1, 0]] + expected = [[0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 1, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]] + for type_ in self.types: + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 1, 1, 1, 0, 1, 1], + [0, 0, 1, 0, 1, 1, 0, 0], + [0, 1, 0, 1, 1, 1, 1, 0], + [0, 1, 1, 0, 0, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], type_) + out = ndimage.binary_erosion(data, struct, border_value=1) + assert_array_almost_equal(out, expected) + + def test_binary_erosion26(self): + struct = [[0, 1, 0], + [1, 0, 1], + [0, 1, 0]] + expected = [[0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 1], + [0, 0, 0, 0, 1, 0, 0, 1], + [0, 0, 1, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 1]] + for type_ in self.types: + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 1, 1, 1, 0, 1, 1], + [0, 0, 1, 0, 1, 1, 0, 0], + [0, 1, 0, 1, 1, 1, 1, 0], + [0, 1, 1, 0, 0, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], type_) + out = ndimage.binary_erosion(data, struct, border_value=1, + origin=(-1, -1)) + assert_array_almost_equal(out, expected) + + def test_binary_erosion27(self): + struct = [[0, 1, 0], + [1, 1, 1], + [0, 1, 0]] + expected = [[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]] + data = numpy.array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]], bool) + out = ndimage.binary_erosion(data, struct, border_value=1, + iterations=2) + assert_array_almost_equal(out, expected) + + def test_binary_erosion28(self): + struct = [[0, 1, 0], + [1, 1, 1], + [0, 1, 0]] + expected = [[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]] + data = numpy.array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]], bool) + out = numpy.zeros(data.shape, bool) + ndimage.binary_erosion(data, struct, border_value=1, + iterations=2, output=out) + assert_array_almost_equal(out, expected) + + def test_binary_erosion29(self): + struct = [[0, 1, 0], + [1, 1, 1], + [0, 1, 0]] + expected = [[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]] + data = numpy.array([[0, 0, 0, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [1, 1, 1, 1, 1, 1, 1], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 0, 0, 0]], bool) + out = ndimage.binary_erosion(data, struct, + border_value=1, iterations=3) + assert_array_almost_equal(out, expected) + + def test_binary_erosion30(self): + struct = [[0, 1, 0], + [1, 1, 1], + [0, 1, 0]] + expected = [[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]] + data = numpy.array([[0, 0, 0, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [1, 1, 1, 1, 1, 1, 1], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 0, 0, 0]], bool) + out = numpy.zeros(data.shape, bool) + ndimage.binary_erosion(data, struct, border_value=1, + iterations=3, output=out) + assert_array_almost_equal(out, expected) + + def test_binary_erosion31(self): + struct = [[0, 1, 0], + [1, 1, 1], + [0, 1, 0]] + expected = [[0, 0, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0], + [1, 1, 1, 1, 1, 0, 1], + [0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 1]] + data = numpy.array([[0, 0, 0, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [1, 1, 1, 1, 1, 1, 1], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 0, 0, 0]], bool) + out = numpy.zeros(data.shape, bool) + ndimage.binary_erosion(data, struct, border_value=1, + iterations=1, output=out, origin=(-1, -1)) + assert_array_almost_equal(out, expected) + + def test_binary_erosion32(self): + struct = [[0, 1, 0], + [1, 1, 1], + [0, 1, 0]] + expected = [[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]] + data = numpy.array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]], bool) + out = ndimage.binary_erosion(data, struct, + border_value=1, iterations=2) + assert_array_almost_equal(out, expected) + + def test_binary_erosion33(self): + struct = [[0, 1, 0], + [1, 1, 1], + [0, 1, 0]] + expected = [[0, 0, 0, 0, 0, 1, 1], + [0, 0, 0, 0, 0, 0, 1], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]] + mask = [[1, 1, 1, 1, 1, 0, 0], + [1, 1, 1, 1, 1, 1, 0], + [1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1]] + data = numpy.array([[0, 0, 0, 0, 0, 1, 1], + [0, 0, 0, 1, 0, 0, 1], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]], bool) + out = ndimage.binary_erosion(data, struct, + border_value=1, mask=mask, iterations=-1) + assert_array_almost_equal(out, expected) + + def test_binary_erosion34(self): + struct = [[0, 1, 0], + [1, 1, 1], + [0, 1, 0]] + expected = [[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]] + mask = [[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 0, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]] + data = numpy.array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]], bool) + out = ndimage.binary_erosion(data, struct, + border_value=1, mask=mask) + assert_array_almost_equal(out, expected) + + def test_binary_erosion35(self): + struct = [[0, 1, 0], + [1, 1, 1], + [0, 1, 0]] + mask = [[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 0, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]] + data = numpy.array([[0, 0, 0, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [1, 1, 1, 1, 1, 1, 1], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 0, 0, 0]], bool) + tmp = [[0, 0, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0], + [1, 1, 1, 1, 1, 0, 1], + [0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 1]] + expected = numpy.logical_and(tmp, mask) + tmp = numpy.logical_and(data, numpy.logical_not(mask)) + expected = numpy.logical_or(expected, tmp) + out = numpy.zeros(data.shape, bool) + ndimage.binary_erosion(data, struct, border_value=1, + iterations=1, output=out, + origin=(-1, -1), mask=mask) + assert_array_almost_equal(out, expected) + + def test_binary_erosion36(self): + struct = [[0, 1, 0], + [1, 0, 1], + [0, 1, 0]] + mask = [[0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 0, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]] + tmp = [[0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 1], + [0, 0, 0, 0, 1, 0, 0, 1], + [0, 0, 1, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 1]] + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 1, 1, 1, 0, 1, 1], + [0, 0, 1, 0, 1, 1, 0, 0], + [0, 1, 0, 1, 1, 1, 1, 0], + [0, 1, 1, 0, 0, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0]]) + expected = numpy.logical_and(tmp, mask) + tmp = numpy.logical_and(data, numpy.logical_not(mask)) + expected = numpy.logical_or(expected, tmp) + out = ndimage.binary_erosion(data, struct, mask=mask, + border_value=1, origin=(-1, -1)) + assert_array_almost_equal(out, expected) + + def test_binary_erosion37(self): + a = numpy.array([[1, 0, 1], + [0, 1, 0], + [1, 0, 1]], dtype=bool) + b = numpy.zeros_like(a) + out = ndimage.binary_erosion(a, structure=a, output=b, iterations=0, + border_value=True, brute_force=True) + assert_(out is b) + assert_array_equal( + ndimage.binary_erosion(a, structure=a, iterations=0, + border_value=True), + b) + + def test_binary_dilation01(self): + for type_ in self.types: + data = numpy.ones([], type_) + out = ndimage.binary_dilation(data) + assert_array_almost_equal(out, 1) + + def test_binary_dilation02(self): + for type_ in self.types: + data = numpy.zeros([], type_) + out = ndimage.binary_dilation(data) + assert_array_almost_equal(out, 0) + + def test_binary_dilation03(self): + for type_ in self.types: + data = numpy.ones([1], type_) + out = ndimage.binary_dilation(data) + assert_array_almost_equal(out, [1]) + + def test_binary_dilation04(self): + for type_ in self.types: + data = numpy.zeros([1], type_) + out = ndimage.binary_dilation(data) + assert_array_almost_equal(out, [0]) + + def test_binary_dilation05(self): + for type_ in self.types: + data = numpy.ones([3], type_) + out = ndimage.binary_dilation(data) + assert_array_almost_equal(out, [1, 1, 1]) + + def test_binary_dilation06(self): + for type_ in self.types: + data = numpy.zeros([3], type_) + out = ndimage.binary_dilation(data) + assert_array_almost_equal(out, [0, 0, 0]) + + def test_binary_dilation07(self): + for type_ in self.types: + data = numpy.zeros([3], type_) + data[1] = 1 + out = ndimage.binary_dilation(data) + assert_array_almost_equal(out, [1, 1, 1]) + + def test_binary_dilation08(self): + for type_ in self.types: + data = numpy.zeros([5], type_) + data[1] = 1 + data[3] = 1 + out = ndimage.binary_dilation(data) + assert_array_almost_equal(out, [1, 1, 1, 1, 1]) + + def test_binary_dilation09(self): + for type_ in self.types: + data = numpy.zeros([5], type_) + data[1] = 1 + out = ndimage.binary_dilation(data) + assert_array_almost_equal(out, [1, 1, 1, 0, 0]) + + def test_binary_dilation10(self): + for type_ in self.types: + data = numpy.zeros([5], type_) + data[1] = 1 + out = ndimage.binary_dilation(data, origin=-1) + assert_array_almost_equal(out, [0, 1, 1, 1, 0]) + + def test_binary_dilation11(self): + for type_ in self.types: + data = numpy.zeros([5], type_) + data[1] = 1 + out = ndimage.binary_dilation(data, origin=1) + assert_array_almost_equal(out, [1, 1, 0, 0, 0]) + + def test_binary_dilation12(self): + for type_ in self.types: + data = numpy.zeros([5], type_) + data[1] = 1 + struct = [1, 0, 1] + out = ndimage.binary_dilation(data, struct) + assert_array_almost_equal(out, [1, 0, 1, 0, 0]) + + def test_binary_dilation13(self): + for type_ in self.types: + data = numpy.zeros([5], type_) + data[1] = 1 + struct = [1, 0, 1] + out = ndimage.binary_dilation(data, struct, border_value=1) + assert_array_almost_equal(out, [1, 0, 1, 0, 1]) + + def test_binary_dilation14(self): + for type_ in self.types: + data = numpy.zeros([5], type_) + data[1] = 1 + struct = [1, 0, 1] + out = ndimage.binary_dilation(data, struct, origin=-1) + assert_array_almost_equal(out, [0, 1, 0, 1, 0]) + + def test_binary_dilation15(self): + for type_ in self.types: + data = numpy.zeros([5], type_) + data[1] = 1 + struct = [1, 0, 1] + out = ndimage.binary_dilation(data, struct, + origin=-1, border_value=1) + assert_array_almost_equal(out, [1, 1, 0, 1, 0]) + + def test_binary_dilation16(self): + for type_ in self.types: + data = numpy.ones([1, 1], type_) + out = ndimage.binary_dilation(data) + assert_array_almost_equal(out, [[1]]) + + def test_binary_dilation17(self): + for type_ in self.types: + data = numpy.zeros([1, 1], type_) + out = ndimage.binary_dilation(data) + assert_array_almost_equal(out, [[0]]) + + def test_binary_dilation18(self): + for type_ in self.types: + data = numpy.ones([1, 3], type_) + out = ndimage.binary_dilation(data) + assert_array_almost_equal(out, [[1, 1, 1]]) + + def test_binary_dilation19(self): + for type_ in self.types: + data = numpy.ones([3, 3], type_) + out = ndimage.binary_dilation(data) + assert_array_almost_equal(out, [[1, 1, 1], + [1, 1, 1], + [1, 1, 1]]) + + def test_binary_dilation20(self): + for type_ in self.types: + data = numpy.zeros([3, 3], type_) + data[1, 1] = 1 + out = ndimage.binary_dilation(data) + assert_array_almost_equal(out, [[0, 1, 0], + [1, 1, 1], + [0, 1, 0]]) + + def test_binary_dilation21(self): + struct = ndimage.generate_binary_structure(2, 2) + for type_ in self.types: + data = numpy.zeros([3, 3], type_) + data[1, 1] = 1 + out = ndimage.binary_dilation(data, struct) + assert_array_almost_equal(out, [[1, 1, 1], + [1, 1, 1], + [1, 1, 1]]) + + def test_binary_dilation22(self): + expected = [[0, 1, 0, 0, 0, 0, 0, 0], + [1, 1, 1, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 1, 1, 1, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]] + + for type_ in self.types: + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 1, 1, 0, 0, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], type_) + out = ndimage.binary_dilation(data) + assert_array_almost_equal(out, expected) + + def test_binary_dilation23(self): + expected = [[1, 1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 0, 0, 0, 0, 1], + [1, 1, 0, 0, 0, 1, 0, 1], + [1, 0, 0, 1, 1, 1, 1, 1], + [1, 0, 1, 1, 1, 1, 0, 1], + [1, 1, 1, 1, 1, 1, 1, 1], + [1, 0, 1, 0, 0, 1, 0, 1], + [1, 1, 1, 1, 1, 1, 1, 1]] + + for type_ in self.types: + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 1, 1, 0, 0, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], type_) + out = ndimage.binary_dilation(data, border_value=1) + assert_array_almost_equal(out, expected) + + def test_binary_dilation24(self): + expected = [[1, 1, 0, 0, 0, 0, 0, 0], + [1, 0, 0, 0, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 1, 0, 0, 0], + [1, 1, 1, 1, 1, 1, 0, 0], + [0, 1, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]] + + for type_ in self.types: + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 1, 1, 0, 0, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], type_) + out = ndimage.binary_dilation(data, origin=(1, 1)) + assert_array_almost_equal(out, expected) + + def test_binary_dilation25(self): + expected = [[1, 1, 0, 0, 0, 0, 1, 1], + [1, 0, 0, 0, 1, 0, 1, 1], + [0, 0, 1, 1, 1, 1, 1, 1], + [0, 1, 1, 1, 1, 0, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 1], + [0, 1, 0, 0, 1, 0, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 1]] + + for type_ in self.types: + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 1, 1, 0, 0, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], type_) + out = ndimage.binary_dilation(data, origin=(1, 1), border_value=1) + assert_array_almost_equal(out, expected) + + def test_binary_dilation26(self): + struct = ndimage.generate_binary_structure(2, 2) + expected = [[1, 1, 1, 0, 0, 0, 0, 0], + [1, 1, 1, 0, 0, 0, 0, 0], + [1, 1, 1, 0, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0]] + + for type_ in self.types: + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 1, 1, 0, 0, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], type_) + out = ndimage.binary_dilation(data, struct) + assert_array_almost_equal(out, expected) + + def test_binary_dilation27(self): + struct = [[0, 1], + [1, 1]] + expected = [[0, 1, 0, 0, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 1, 1, 0, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]] + + for type_ in self.types: + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 1, 1, 0, 0, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], type_) + out = ndimage.binary_dilation(data, struct) + assert_array_almost_equal(out, expected) + + def test_binary_dilation28(self): + expected = [[1, 1, 1, 1], + [1, 0, 0, 1], + [1, 0, 0, 1], + [1, 1, 1, 1]] + + for type_ in self.types: + data = numpy.array([[0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]], type_) + out = ndimage.binary_dilation(data, border_value=1) + assert_array_almost_equal(out, expected) + + def test_binary_dilation29(self): + struct = [[0, 1], + [1, 1]] + expected = [[0, 0, 0, 0, 0], + [0, 0, 0, 1, 0], + [0, 0, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 0]] + + data = numpy.array([[0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 1, 0], + [0, 0, 0, 0, 0]], bool) + out = ndimage.binary_dilation(data, struct, iterations=2) + assert_array_almost_equal(out, expected) + + def test_binary_dilation30(self): + struct = [[0, 1], + [1, 1]] + expected = [[0, 0, 0, 0, 0], + [0, 0, 0, 1, 0], + [0, 0, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 0]] + + data = numpy.array([[0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 1, 0], + [0, 0, 0, 0, 0]], bool) + out = numpy.zeros(data.shape, bool) + ndimage.binary_dilation(data, struct, iterations=2, output=out) + assert_array_almost_equal(out, expected) + + def test_binary_dilation31(self): + struct = [[0, 1], + [1, 1]] + expected = [[0, 0, 0, 1, 0], + [0, 0, 1, 1, 0], + [0, 1, 1, 1, 0], + [1, 1, 1, 1, 0], + [0, 0, 0, 0, 0]] + + data = numpy.array([[0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 1, 0], + [0, 0, 0, 0, 0]], bool) + out = ndimage.binary_dilation(data, struct, iterations=3) + assert_array_almost_equal(out, expected) + + def test_binary_dilation32(self): + struct = [[0, 1], + [1, 1]] + expected = [[0, 0, 0, 1, 0], + [0, 0, 1, 1, 0], + [0, 1, 1, 1, 0], + [1, 1, 1, 1, 0], + [0, 0, 0, 0, 0]] + + data = numpy.array([[0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 1, 0], + [0, 0, 0, 0, 0]], bool) + out = numpy.zeros(data.shape, bool) + ndimage.binary_dilation(data, struct, iterations=3, output=out) + assert_array_almost_equal(out, expected) + + def test_binary_dilation33(self): + struct = [[0, 1, 0], + [1, 1, 1], + [0, 1, 0]] + expected = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0, 0], + [0, 1, 1, 0, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], bool) + mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0, 0], + [0, 1, 1, 0, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], bool) + data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], bool) + + out = ndimage.binary_dilation(data, struct, iterations=-1, + mask=mask, border_value=0) + assert_array_almost_equal(out, expected) + + def test_binary_dilation34(self): + struct = [[0, 1, 0], + [1, 1, 1], + [0, 1, 0]] + expected = [[0, 1, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]] + mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 1, 1, 0, 0, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], bool) + data = numpy.zeros(mask.shape, bool) + out = ndimage.binary_dilation(data, struct, iterations=-1, + mask=mask, border_value=1) + assert_array_almost_equal(out, expected) + + def test_binary_dilation35(self): + tmp = [[1, 1, 0, 0, 0, 0, 1, 1], + [1, 0, 0, 0, 1, 0, 1, 1], + [0, 0, 1, 1, 1, 1, 1, 1], + [0, 1, 1, 1, 1, 0, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 1], + [0, 1, 0, 0, 1, 0, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 1]] + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 1, 1, 0, 0, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]]) + mask = [[0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]] + expected = numpy.logical_and(tmp, mask) + tmp = numpy.logical_and(data, numpy.logical_not(mask)) + expected = numpy.logical_or(expected, tmp) + for type_ in self.types: + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 1, 1, 0, 0, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], type_) + out = ndimage.binary_dilation(data, mask=mask, + origin=(1, 1), border_value=1) + assert_array_almost_equal(out, expected) + + def test_binary_propagation01(self): + struct = [[0, 1, 0], + [1, 1, 1], + [0, 1, 0]] + expected = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0, 0], + [0, 1, 1, 0, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], bool) + mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0, 0], + [0, 1, 1, 0, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], bool) + data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], bool) + + out = ndimage.binary_propagation(data, struct, + mask=mask, border_value=0) + assert_array_almost_equal(out, expected) + + def test_binary_propagation02(self): + struct = [[0, 1, 0], + [1, 1, 1], + [0, 1, 0]] + expected = [[0, 1, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]] + mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 1, 1, 0, 0, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], bool) + data = numpy.zeros(mask.shape, bool) + out = ndimage.binary_propagation(data, struct, + mask=mask, border_value=1) + assert_array_almost_equal(out, expected) + + def test_binary_opening01(self): + expected = [[0, 1, 0, 0, 0, 0, 0, 0], + [1, 1, 1, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 1, 1, 1, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 1, 1, 1, 1, 1, 1, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]] + for type_ in self.types: + data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0], + [1, 1, 1, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 1, 0], + [0, 0, 1, 1, 0, 1, 0, 0], + [0, 1, 1, 1, 1, 1, 1, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], type_) + out = ndimage.binary_opening(data) + assert_array_almost_equal(out, expected) + + def test_binary_opening02(self): + struct = ndimage.generate_binary_structure(2, 2) + expected = [[1, 1, 1, 0, 0, 0, 0, 0], + [1, 1, 1, 0, 0, 0, 0, 0], + [1, 1, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]] + for type_ in self.types: + data = numpy.array([[1, 1, 1, 0, 0, 0, 0, 0], + [1, 1, 1, 0, 0, 0, 0, 0], + [1, 1, 1, 1, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 0, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], type_) + out = ndimage.binary_opening(data, struct) + assert_array_almost_equal(out, expected) + + def test_binary_closing01(self): + expected = [[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 1, 1, 1, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]] + for type_ in self.types: + data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0], + [1, 1, 1, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 1, 0], + [0, 0, 1, 1, 0, 1, 0, 0], + [0, 1, 1, 1, 1, 1, 1, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], type_) + out = ndimage.binary_closing(data) + assert_array_almost_equal(out, expected) + + def test_binary_closing02(self): + struct = ndimage.generate_binary_structure(2, 2) + expected = [[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0]] + for type_ in self.types: + data = numpy.array([[1, 1, 1, 0, 0, 0, 0, 0], + [1, 1, 1, 0, 0, 0, 0, 0], + [1, 1, 1, 1, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 0, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], type_) + out = ndimage.binary_closing(data, struct) + assert_array_almost_equal(out, expected) + + def test_binary_fill_holes01(self): + expected = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], bool) + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], bool) + out = ndimage.binary_fill_holes(data) + assert_array_almost_equal(out, expected) + + def test_binary_fill_holes02(self): + expected = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], bool) + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 0, 0, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 0, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], bool) + out = ndimage.binary_fill_holes(data) + assert_array_almost_equal(out, expected) + + def test_binary_fill_holes03(self): + expected = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 1, 1, 1], + [0, 1, 1, 1, 0, 1, 1, 1], + [0, 1, 1, 1, 0, 1, 1, 1], + [0, 0, 1, 0, 0, 1, 1, 1], + [0, 0, 0, 0, 0, 0, 0, 0]], bool) + data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0, 0], + [0, 1, 0, 1, 0, 1, 1, 1], + [0, 1, 0, 1, 0, 1, 0, 1], + [0, 1, 0, 1, 0, 1, 0, 1], + [0, 0, 1, 0, 0, 1, 1, 1], + [0, 0, 0, 0, 0, 0, 0, 0]], bool) + out = ndimage.binary_fill_holes(data) + assert_array_almost_equal(out, expected) + + def test_grey_erosion01(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[1, 0, 1], [1, 1, 0]] + output = ndimage.grey_erosion(array, footprint=footprint) + assert_array_almost_equal([[2, 2, 1, 1, 1], + [2, 3, 1, 3, 1], + [5, 5, 3, 3, 1]], output) + + def test_grey_erosion02(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[1, 0, 1], [1, 1, 0]] + structure = [[0, 0, 0], [0, 0, 0]] + output = ndimage.grey_erosion(array, footprint=footprint, + structure=structure) + assert_array_almost_equal([[2, 2, 1, 1, 1], + [2, 3, 1, 3, 1], + [5, 5, 3, 3, 1]], output) + + def test_grey_erosion03(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[1, 0, 1], [1, 1, 0]] + structure = [[1, 1, 1], [1, 1, 1]] + output = ndimage.grey_erosion(array, footprint=footprint, + structure=structure) + assert_array_almost_equal([[1, 1, 0, 0, 0], + [1, 2, 0, 2, 0], + [4, 4, 2, 2, 0]], output) + + def test_grey_dilation01(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[0, 1, 1], [1, 0, 1]] + output = ndimage.grey_dilation(array, footprint=footprint) + assert_array_almost_equal([[7, 7, 9, 9, 5], + [7, 9, 8, 9, 7], + [8, 8, 8, 7, 7]], output) + + def test_grey_dilation02(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[0, 1, 1], [1, 0, 1]] + structure = [[0, 0, 0], [0, 0, 0]] + output = ndimage.grey_dilation(array, footprint=footprint, + structure=structure) + assert_array_almost_equal([[7, 7, 9, 9, 5], + [7, 9, 8, 9, 7], + [8, 8, 8, 7, 7]], output) + + def test_grey_dilation03(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[0, 1, 1], [1, 0, 1]] + structure = [[1, 1, 1], [1, 1, 1]] + output = ndimage.grey_dilation(array, footprint=footprint, + structure=structure) + assert_array_almost_equal([[8, 8, 10, 10, 6], + [8, 10, 9, 10, 8], + [9, 9, 9, 8, 8]], output) + + def test_grey_opening01(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[1, 0, 1], [1, 1, 0]] + tmp = ndimage.grey_erosion(array, footprint=footprint) + expected = ndimage.grey_dilation(tmp, footprint=footprint) + output = ndimage.grey_opening(array, footprint=footprint) + assert_array_almost_equal(expected, output) + + def test_grey_opening02(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[1, 0, 1], [1, 1, 0]] + structure = [[0, 0, 0], [0, 0, 0]] + tmp = ndimage.grey_erosion(array, footprint=footprint, + structure=structure) + expected = ndimage.grey_dilation(tmp, footprint=footprint, + structure=structure) + output = ndimage.grey_opening(array, footprint=footprint, + structure=structure) + assert_array_almost_equal(expected, output) + + def test_grey_closing01(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[1, 0, 1], [1, 1, 0]] + tmp = ndimage.grey_dilation(array, footprint=footprint) + expected = ndimage.grey_erosion(tmp, footprint=footprint) + output = ndimage.grey_closing(array, footprint=footprint) + assert_array_almost_equal(expected, output) + + def test_grey_closing02(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[1, 0, 1], [1, 1, 0]] + structure = [[0, 0, 0], [0, 0, 0]] + tmp = ndimage.grey_dilation(array, footprint=footprint, + structure=structure) + expected = ndimage.grey_erosion(tmp, footprint=footprint, + structure=structure) + output = ndimage.grey_closing(array, footprint=footprint, + structure=structure) + assert_array_almost_equal(expected, output) + + def test_morphological_gradient01(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[1, 0, 1], [1, 1, 0]] + structure = [[0, 0, 0], [0, 0, 0]] + tmp1 = ndimage.grey_dilation(array, footprint=footprint, + structure=structure) + tmp2 = ndimage.grey_erosion(array, footprint=footprint, + structure=structure) + expected = tmp1 - tmp2 + output = numpy.zeros(array.shape, array.dtype) + ndimage.morphological_gradient(array, footprint=footprint, + structure=structure, output=output) + assert_array_almost_equal(expected, output) + + def test_morphological_gradient02(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[1, 0, 1], [1, 1, 0]] + structure = [[0, 0, 0], [0, 0, 0]] + tmp1 = ndimage.grey_dilation(array, footprint=footprint, + structure=structure) + tmp2 = ndimage.grey_erosion(array, footprint=footprint, + structure=structure) + expected = tmp1 - tmp2 + output = ndimage.morphological_gradient(array, footprint=footprint, + structure=structure) + assert_array_almost_equal(expected, output) + + def test_morphological_laplace01(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[1, 0, 1], [1, 1, 0]] + structure = [[0, 0, 0], [0, 0, 0]] + tmp1 = ndimage.grey_dilation(array, footprint=footprint, + structure=structure) + tmp2 = ndimage.grey_erosion(array, footprint=footprint, + structure=structure) + expected = tmp1 + tmp2 - 2 * array + output = numpy.zeros(array.shape, array.dtype) + ndimage.morphological_laplace(array, footprint=footprint, + structure=structure, output=output) + assert_array_almost_equal(expected, output) + + def test_morphological_laplace02(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[1, 0, 1], [1, 1, 0]] + structure = [[0, 0, 0], [0, 0, 0]] + tmp1 = ndimage.grey_dilation(array, footprint=footprint, + structure=structure) + tmp2 = ndimage.grey_erosion(array, footprint=footprint, + structure=structure) + expected = tmp1 + tmp2 - 2 * array + output = ndimage.morphological_laplace(array, footprint=footprint, + structure=structure) + assert_array_almost_equal(expected, output) + + def test_white_tophat01(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[1, 0, 1], [1, 1, 0]] + structure = [[0, 0, 0], [0, 0, 0]] + tmp = ndimage.grey_opening(array, footprint=footprint, + structure=structure) + expected = array - tmp + output = numpy.zeros(array.shape, array.dtype) + ndimage.white_tophat(array, footprint=footprint, + structure=structure, output=output) + assert_array_almost_equal(expected, output) + + def test_white_tophat02(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[1, 0, 1], [1, 1, 0]] + structure = [[0, 0, 0], [0, 0, 0]] + tmp = ndimage.grey_opening(array, footprint=footprint, + structure=structure) + expected = array - tmp + output = ndimage.white_tophat(array, footprint=footprint, + structure=structure) + assert_array_almost_equal(expected, output) + + def test_white_tophat03(self): + array = numpy.array([[1, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 0, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 1]], dtype=numpy.bool_) + structure = numpy.ones((3, 3), dtype=numpy.bool_) + expected = numpy.array([[0, 1, 1, 0, 0, 0, 0], + [1, 0, 0, 1, 1, 1, 0], + [1, 0, 0, 1, 1, 1, 0], + [0, 1, 1, 0, 0, 0, 1], + [0, 1, 1, 0, 1, 0, 1], + [0, 1, 1, 0, 0, 0, 1], + [0, 0, 0, 1, 1, 1, 1]], dtype=numpy.bool_) + + output = ndimage.white_tophat(array, structure=structure) + assert_array_equal(expected, output) + + def test_white_tophat04(self): + array = numpy.eye(5, dtype=numpy.bool_) + structure = numpy.ones((3, 3), dtype=numpy.bool_) + + # Check that type mismatch is properly handled + output = numpy.empty_like(array, dtype=numpy.float) + ndimage.white_tophat(array, structure=structure, output=output) + + def test_black_tophat01(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[1, 0, 1], [1, 1, 0]] + structure = [[0, 0, 0], [0, 0, 0]] + tmp = ndimage.grey_closing(array, footprint=footprint, + structure=structure) + expected = tmp - array + output = numpy.zeros(array.shape, array.dtype) + ndimage.black_tophat(array, footprint=footprint, + structure=structure, output=output) + assert_array_almost_equal(expected, output) + + def test_black_tophat02(self): + array = numpy.array([[3, 2, 5, 1, 4], + [7, 6, 9, 3, 5], + [5, 8, 3, 7, 1]]) + footprint = [[1, 0, 1], [1, 1, 0]] + structure = [[0, 0, 0], [0, 0, 0]] + tmp = ndimage.grey_closing(array, footprint=footprint, + structure=structure) + expected = tmp - array + output = ndimage.black_tophat(array, footprint=footprint, + structure=structure) + assert_array_almost_equal(expected, output) + + def test_black_tophat03(self): + array = numpy.array([[1, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 0, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 1]], dtype=numpy.bool_) + structure = numpy.ones((3, 3), dtype=numpy.bool_) + expected = numpy.array([[0, 1, 1, 1, 1, 1, 1], + [1, 0, 0, 0, 0, 0, 1], + [1, 0, 0, 0, 0, 0, 1], + [1, 0, 0, 0, 0, 0, 1], + [1, 0, 0, 0, 1, 0, 1], + [1, 0, 0, 0, 0, 0, 1], + [1, 1, 1, 1, 1, 1, 0]], dtype=numpy.bool_) + + output = ndimage.black_tophat(array, structure=structure) + assert_array_equal(expected, output) + + def test_black_tophat04(self): + array = numpy.eye(5, dtype=numpy.bool_) + structure = numpy.ones((3, 3), dtype=numpy.bool_) + + # Check that type mismatch is properly handled + output = numpy.empty_like(array, dtype=numpy.float) + ndimage.black_tophat(array, structure=structure, output=output) + + def test_hit_or_miss01(self): + struct = [[0, 1, 0], + [1, 1, 1], + [0, 1, 0]] + expected = [[0, 0, 0, 0, 0], + [0, 1, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0]] + for type_ in self.types: + data = numpy.array([[0, 1, 0, 0, 0], + [1, 1, 1, 0, 0], + [0, 1, 0, 1, 1], + [0, 0, 1, 1, 1], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 1], + [0, 1, 1, 1, 1], + [0, 0, 0, 0, 0]], type_) + out = numpy.zeros(data.shape, bool) + ndimage.binary_hit_or_miss(data, struct, output=out) + assert_array_almost_equal(expected, out) + + def test_hit_or_miss02(self): + struct = [[0, 1, 0], + [1, 1, 1], + [0, 1, 0]] + expected = [[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]] + for type_ in self.types: + data = numpy.array([[0, 1, 0, 0, 1, 1, 1, 0], + [1, 1, 1, 0, 0, 1, 0, 0], + [0, 1, 0, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], type_) + out = ndimage.binary_hit_or_miss(data, struct) + assert_array_almost_equal(expected, out) + + def test_hit_or_miss03(self): + struct1 = [[0, 0, 0], + [1, 1, 1], + [0, 0, 0]] + struct2 = [[1, 1, 1], + [0, 0, 0], + [1, 1, 1]] + expected = [[0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]] + for type_ in self.types: + data = numpy.array([[0, 1, 0, 0, 1, 1, 1, 0], + [1, 1, 1, 0, 0, 0, 0, 0], + [0, 1, 0, 1, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 0, 1, 1, 0], + [0, 0, 0, 0, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], type_) + out = ndimage.binary_hit_or_miss(data, struct1, struct2) + assert_array_almost_equal(expected, out) + + +class TestDilateFix: + + def setup_method(self): + # dilation related setup + self.array = numpy.array([[0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 1, 0], + [0, 0, 1, 1, 0], + [0, 0, 0, 0, 0]], dtype=numpy.uint8) + + self.sq3x3 = numpy.ones((3, 3)) + dilated3x3 = ndimage.binary_dilation(self.array, structure=self.sq3x3) + self.dilated3x3 = dilated3x3.view(numpy.uint8) + + def test_dilation_square_structure(self): + result = ndimage.grey_dilation(self.array, structure=self.sq3x3) + # +1 accounts for difference between grey and binary dilation + assert_array_almost_equal(result, self.dilated3x3 + 1) + + def test_dilation_scalar_size(self): + result = ndimage.grey_dilation(self.array, size=3) + assert_array_almost_equal(result, self.dilated3x3) + +class TestBinaryOpeningClosing: + + def setup_method(self): + a = numpy.zeros((5,5), dtype=bool) + a[1:4, 1:4] = True + a[4,4] = True + self.array = a + self.sq3x3 = numpy.ones((3,3)) + self.opened_old = ndimage.binary_opening(self.array, self.sq3x3, + 1, None, 0) + self.closed_old = ndimage.binary_closing(self.array, self.sq3x3, + 1, None, 0) + + def test_opening_new_arguments(self): + opened_new = ndimage.binary_opening(self.array, self.sq3x3, 1, None, + 0, None, 0, False) + assert_array_equal(opened_new, self.opened_old) + + def test_closing_new_arguments(self): + closed_new = ndimage.binary_closing(self.array, self.sq3x3, 1, None, + 0, None, 0, False) + assert_array_equal(closed_new, self.closed_old) diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_ndimage.pyc b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_ndimage.pyc new file mode 100644 index 0000000..a30c9f4 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_ndimage.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_regression.py b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_regression.py new file mode 100644 index 0000000..b5d8975 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_regression.py @@ -0,0 +1,47 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import assert_array_almost_equal + +import scipy.ndimage as ndimage + + +def test_byte_order_median(): + """Regression test for #413: median_filter does not handle bytes orders.""" + a = np.arange(9, dtype='<f4').reshape(3, 3) + ref = ndimage.filters.median_filter(a,(3, 3)) + b = np.arange(9, dtype='>f4').reshape(3, 3) + t = ndimage.filters.median_filter(b, (3, 3)) + assert_array_almost_equal(ref, t) + + +def test_zoom_output_shape(): + """Ticket #643""" + x = np.arange(12).reshape((3,4)) + ndimage.zoom(x, 2, output=np.zeros((6,8))) + + +def test_ticket_742(): + def SE(img, thresh=.7, size=4): + mask = img > thresh + rank = len(mask.shape) + la, co = ndimage.label(mask, + ndimage.generate_binary_structure(rank, rank)) + slices = ndimage.find_objects(la) + + if np.dtype(np.intp) != np.dtype('i'): + shape = (3,1240,1240) + a = np.random.rand(np.product(shape)).reshape(shape) + # shouldn't crash + SE(a) + + +def test_gh_issue_3025(): + """Github issue #3025 - improper merging of labels""" + d = np.zeros((60,320)) + d[:,:257] = 1 + d[:,260:] = 1 + d[36,257] = 1 + d[35,258] = 1 + d[35,259] = 1 + assert ndimage.label(d, np.ones((3,3)))[1] == 1 diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_regression.pyc b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_regression.pyc new file mode 100644 index 0000000..f57a170 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_regression.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_splines.py b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_splines.py new file mode 100644 index 0000000..9c693dc --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_splines.py @@ -0,0 +1,67 @@ +"""Tests for spline filtering.""" +from __future__ import division, print_function, absolute_import + +import numpy as np +import pytest + +from numpy.testing import assert_almost_equal + +from scipy import ndimage + + +def get_spline_knot_values(order): + """Knot values to the right of a B-spline's center.""" + knot_values = {0: [1], + 1: [1], + 2: [6, 1], + 3: [4, 1], + 4: [230, 76, 1], + 5: [66, 26, 1]} + + return knot_values[order] + + +def make_spline_knot_matrix(n, order, mode='mirror'): + """Matrix to invert to find the spline coefficients.""" + knot_values = get_spline_knot_values(order) + + matrix = np.zeros((n, n)) + for diag, knot_value in enumerate(knot_values): + indices = np.arange(diag, n) + if diag == 0: + matrix[indices, indices] = knot_value + else: + matrix[indices, indices - diag] = knot_value + matrix[indices - diag, indices] = knot_value + + knot_values_sum = knot_values[0] + 2 * sum(knot_values[1:]) + + if mode == 'mirror': + start, step = 1, 1 + elif mode == 'reflect': + start, step = 0, 1 + elif mode == 'wrap': + start, step = -1, -1 + else: + raise ValueError('unsupported mode {}'.format(mode)) + + for row in range(len(knot_values) - 1): + for idx, knot_value in enumerate(knot_values[row + 1:]): + matrix[row, start + step*idx] += knot_value + matrix[-row - 1, -start - 1 - step*idx] += knot_value + + return matrix / knot_values_sum + + +@pytest.mark.parametrize('order', [0, 1, 2, 3, 4, 5]) +@pytest.mark.parametrize('mode', ['mirror', 'wrap', 'reflect']) +def test_spline_filter_vs_matrix_solution(order, mode): + n = 100 + eye = np.eye(n, dtype=float) + spline_filter_axis_0 = ndimage.spline_filter1d(eye, axis=0, order=order, + mode=mode) + spline_filter_axis_1 = ndimage.spline_filter1d(eye, axis=1, order=order, + mode=mode) + matrix = make_spline_knot_matrix(n, order, mode=mode) + assert_almost_equal(eye, np.dot(spline_filter_axis_0, matrix)) + assert_almost_equal(eye, np.dot(spline_filter_axis_1, matrix.T)) diff --git a/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_splines.pyc b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_splines.pyc new file mode 100644 index 0000000..7e101f7 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/ndimage/tests/test_splines.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/odr/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/odr/__init__.py new file mode 100644 index 0000000..ec52ca3 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/odr/__init__.py @@ -0,0 +1,143 @@ +""" +================================================= +Orthogonal distance regression (:mod:`scipy.odr`) +================================================= + +.. currentmodule:: scipy.odr + +Package Content +=============== + +.. autosummary:: + :toctree: generated/ + + Data -- The data to fit. + RealData -- Data with weights as actual std. dev.s and/or covariances. + Model -- Stores information about the function to be fit. + ODR -- Gathers all info & manages the main fitting routine. + Output -- Result from the fit. + odr -- Low-level function for ODR. + + OdrWarning -- Warning about potential problems when running ODR + OdrError -- Error exception. + OdrStop -- Stop exception. + + odr_error -- Same as OdrError (for backwards compatibility) + odr_stop -- Same as OdrStop (for backwards compatibility) + +Prebuilt models: + +.. autosummary:: + :toctree: generated/ + + polynomial + +.. data:: exponential + +.. data:: multilinear + +.. data:: unilinear + +.. data:: quadratic + +.. data:: polynomial + +Usage information +================= + +Introduction +------------ + +Why Orthogonal Distance Regression (ODR)? Sometimes one has +measurement errors in the explanatory (a.k.a., "independent") +variable(s), not just the response (a.k.a., "dependent") variable(s). +Ordinary Least Squares (OLS) fitting procedures treat the data for +explanatory variables as fixed, i.e., not subject to error of any kind. +Furthermore, OLS procedures require that the response variables be an +explicit function of the explanatory variables; sometimes making the +equation explicit is impractical and/or introduces errors. ODR can +handle both of these cases with ease, and can even reduce to the OLS +case if that is sufficient for the problem. + +ODRPACK is a FORTRAN-77 library for performing ODR with possibly +non-linear fitting functions. It uses a modified trust-region +Levenberg-Marquardt-type algorithm [1]_ to estimate the function +parameters. The fitting functions are provided by Python functions +operating on NumPy arrays. The required derivatives may be provided +by Python functions as well, or may be estimated numerically. ODRPACK +can do explicit or implicit ODR fits, or it can do OLS. Input and +output variables may be multi-dimensional. Weights can be provided to +account for different variances of the observations, and even +covariances between dimensions of the variables. + +The `scipy.odr` package offers an object-oriented interface to +ODRPACK, in addition to the low-level `odr` function. + +Additional background information about ODRPACK can be found in the +`ODRPACK User's Guide +<https://docs.scipy.org/doc/external/odrpack_guide.pdf>`_, reading +which is recommended. + +Basic usage +----------- + +1. Define the function you want to fit against.:: + + def f(B, x): + '''Linear function y = m*x + b''' + # B is a vector of the parameters. + # x is an array of the current x values. + # x is in the same format as the x passed to Data or RealData. + # + # Return an array in the same format as y passed to Data or RealData. + return B[0]*x + B[1] + +2. Create a Model.:: + + linear = Model(f) + +3. Create a Data or RealData instance.:: + + mydata = Data(x, y, wd=1./power(sx,2), we=1./power(sy,2)) + + or, when the actual covariances are known:: + + mydata = RealData(x, y, sx=sx, sy=sy) + +4. Instantiate ODR with your data, model and initial parameter estimate.:: + + myodr = ODR(mydata, linear, beta0=[1., 2.]) + +5. Run the fit.:: + + myoutput = myodr.run() + +6. Examine output.:: + + myoutput.pprint() + + +References +---------- +.. [1] P. T. Boggs and J. E. Rogers, "Orthogonal Distance Regression," + in "Statistical analysis of measurement error models and + applications: proceedings of the AMS-IMS-SIAM joint summer research + conference held June 10-16, 1989," Contemporary Mathematics, + vol. 112, pg. 186, 1990. + +""" +# version: 0.7 +# author: Robert Kern <robert.kern@gmail.com> +# date: 2006-09-21 + +from __future__ import division, print_function, absolute_import + +from .odrpack import * +from .models import * +from . import add_newdocs + +__all__ = [s for s in dir() if not s.startswith('_')] + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/project/venv/lib/python2.7/site-packages/scipy/odr/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/odr/__init__.pyc new file mode 100644 index 0000000..b819e45 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/odr/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/odr/__odrpack.so b/project/venv/lib/python2.7/site-packages/scipy/odr/__odrpack.so new file mode 100755 index 0000000..7508826 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/odr/__odrpack.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/odr/add_newdocs.py b/project/venv/lib/python2.7/site-packages/scipy/odr/add_newdocs.py new file mode 100644 index 0000000..3b819ad --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/odr/add_newdocs.py @@ -0,0 +1,30 @@ +from numpy import add_newdoc + +add_newdoc('scipy.odr', 'odr', + """ + odr(fcn, beta0, y, x, we=None, wd=None, fjacb=None, fjacd=None, extra_args=None, ifixx=None, ifixb=None, job=0, iprint=0, errfile=None, rptfile=None, ndigit=0, taufac=0.0, sstol=-1.0, partol=-1.0, maxit=-1, stpb=None, stpd=None, sclb=None, scld=None, work=None, iwork=None, full_output=0) + + Low-level function for ODR. + + See Also + -------- + ODR + Model + Data + RealData + + Notes + ----- + This is a function performing the same operation as the `ODR`, + `Model` and `Data` classes together. The parameters of this + function are explained in the class documentation. + + """) + +add_newdoc('scipy.odr.__odrpack', '_set_exceptions', + """ + _set_exceptions(odr_error, odr_stop) + + Internal function: set exception classes. + + """) diff --git a/project/venv/lib/python2.7/site-packages/scipy/odr/add_newdocs.pyc b/project/venv/lib/python2.7/site-packages/scipy/odr/add_newdocs.pyc new file mode 100644 index 0000000..f1e0b5f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/odr/add_newdocs.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/odr/models.py b/project/venv/lib/python2.7/site-packages/scipy/odr/models.py new file mode 100644 index 0000000..6743e81 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/odr/models.py @@ -0,0 +1,187 @@ +""" Collection of Model instances for use with the odrpack fitting package. +""" +from __future__ import division, print_function, absolute_import + +import numpy as np +from scipy.odr.odrpack import Model + +__all__ = ['Model', 'exponential', 'multilinear', 'unilinear', 'quadratic', + 'polynomial'] + + +def _lin_fcn(B, x): + a, b = B[0], B[1:] + b.shape = (b.shape[0], 1) + + return a + (x*b).sum(axis=0) + + +def _lin_fjb(B, x): + a = np.ones(x.shape[-1], float) + res = np.concatenate((a, x.ravel())) + res.shape = (B.shape[-1], x.shape[-1]) + return res + + +def _lin_fjd(B, x): + b = B[1:] + b = np.repeat(b, (x.shape[-1],)*b.shape[-1],axis=0) + b.shape = x.shape + return b + + +def _lin_est(data): + # Eh. The answer is analytical, so just return all ones. + # Don't return zeros since that will interfere with + # ODRPACK's auto-scaling procedures. + + if len(data.x.shape) == 2: + m = data.x.shape[0] + else: + m = 1 + + return np.ones((m + 1,), float) + + +def _poly_fcn(B, x, powers): + a, b = B[0], B[1:] + b.shape = (b.shape[0], 1) + + return a + np.sum(b * np.power(x, powers), axis=0) + + +def _poly_fjacb(B, x, powers): + res = np.concatenate((np.ones(x.shape[-1], float), np.power(x, + powers).flat)) + res.shape = (B.shape[-1], x.shape[-1]) + return res + + +def _poly_fjacd(B, x, powers): + b = B[1:] + b.shape = (b.shape[0], 1) + + b = b * powers + + return np.sum(b * np.power(x, powers-1),axis=0) + + +def _exp_fcn(B, x): + return B[0] + np.exp(B[1] * x) + + +def _exp_fjd(B, x): + return B[1] * np.exp(B[1] * x) + + +def _exp_fjb(B, x): + res = np.concatenate((np.ones(x.shape[-1], float), x * np.exp(B[1] * x))) + res.shape = (2, x.shape[-1]) + return res + + +def _exp_est(data): + # Eh. + return np.array([1., 1.]) + + +multilinear = Model(_lin_fcn, fjacb=_lin_fjb, + fjacd=_lin_fjd, estimate=_lin_est, + meta={'name': 'Arbitrary-dimensional Linear', + 'equ':'y = B_0 + Sum[i=1..m, B_i * x_i]', + 'TeXequ':r'$y=\beta_0 + \sum_{i=1}^m \beta_i x_i$'}) + + +def polynomial(order): + """ + Factory function for a general polynomial model. + + Parameters + ---------- + order : int or sequence + If an integer, it becomes the order of the polynomial to fit. If + a sequence of numbers, then these are the explicit powers in the + polynomial. + A constant term (power 0) is always included, so don't include 0. + Thus, polynomial(n) is equivalent to polynomial(range(1, n+1)). + + Returns + ------- + polynomial : Model instance + Model instance. + + """ + + powers = np.asarray(order) + if powers.shape == (): + # Scalar. + powers = np.arange(1, powers + 1) + + powers.shape = (len(powers), 1) + len_beta = len(powers) + 1 + + def _poly_est(data, len_beta=len_beta): + # Eh. Ignore data and return all ones. + return np.ones((len_beta,), float) + + return Model(_poly_fcn, fjacd=_poly_fjacd, fjacb=_poly_fjacb, + estimate=_poly_est, extra_args=(powers,), + meta={'name': 'Sorta-general Polynomial', + 'equ': 'y = B_0 + Sum[i=1..%s, B_i * (x**i)]' % (len_beta-1), + 'TeXequ': r'$y=\beta_0 + \sum_{i=1}^{%s} \beta_i x^i$' % + (len_beta-1)}) + + +exponential = Model(_exp_fcn, fjacd=_exp_fjd, fjacb=_exp_fjb, + estimate=_exp_est, meta={'name':'Exponential', + 'equ': 'y= B_0 + exp(B_1 * x)', + 'TeXequ': r'$y=\beta_0 + e^{\beta_1 x}$'}) + + +def _unilin(B, x): + return x*B[0] + B[1] + + +def _unilin_fjd(B, x): + return np.ones(x.shape, float) * B[0] + + +def _unilin_fjb(B, x): + _ret = np.concatenate((x, np.ones(x.shape, float))) + _ret.shape = (2,) + x.shape + + return _ret + + +def _unilin_est(data): + return (1., 1.) + + +def _quadratic(B, x): + return x*(x*B[0] + B[1]) + B[2] + + +def _quad_fjd(B, x): + return 2*x*B[0] + B[1] + + +def _quad_fjb(B, x): + _ret = np.concatenate((x*x, x, np.ones(x.shape, float))) + _ret.shape = (3,) + x.shape + + return _ret + + +def _quad_est(data): + return (1.,1.,1.) + + +unilinear = Model(_unilin, fjacd=_unilin_fjd, fjacb=_unilin_fjb, + estimate=_unilin_est, meta={'name': 'Univariate Linear', + 'equ': 'y = B_0 * x + B_1', + 'TeXequ': '$y = \\beta_0 x + \\beta_1$'}) + +quadratic = Model(_quadratic, fjacd=_quad_fjd, fjacb=_quad_fjb, + estimate=_quad_est, meta={'name': 'Quadratic', + 'equ': 'y = B_0*x**2 + B_1*x + B_2', + 'TeXequ': '$y = \\beta_0 x^2 + \\beta_1 x + \\beta_2'}) diff --git a/project/venv/lib/python2.7/site-packages/scipy/odr/models.pyc b/project/venv/lib/python2.7/site-packages/scipy/odr/models.pyc new file mode 100644 index 0000000..766d852 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/odr/models.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/odr/odrpack.py b/project/venv/lib/python2.7/site-packages/scipy/odr/odrpack.py new file mode 100644 index 0000000..beefc5a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/odr/odrpack.py @@ -0,0 +1,1130 @@ +""" +Python wrappers for Orthogonal Distance Regression (ODRPACK). + +Notes +===== + +* Array formats -- FORTRAN stores its arrays in memory column first, i.e. an + array element A(i, j, k) will be next to A(i+1, j, k). In C and, consequently, + NumPy, arrays are stored row first: A[i, j, k] is next to A[i, j, k+1]. For + efficiency and convenience, the input and output arrays of the fitting + function (and its Jacobians) are passed to FORTRAN without transposition. + Therefore, where the ODRPACK documentation says that the X array is of shape + (N, M), it will be passed to the Python function as an array of shape (M, N). + If M==1, the one-dimensional case, then nothing matters; if M>1, then your + Python functions will be dealing with arrays that are indexed in reverse of + the ODRPACK documentation. No real biggie, but watch out for your indexing of + the Jacobians: the i,j'th elements (@f_i/@x_j) evaluated at the n'th + observation will be returned as jacd[j, i, n]. Except for the Jacobians, it + really is easier to deal with x[0] and x[1] than x[:,0] and x[:,1]. Of course, + you can always use the transpose() function from scipy explicitly. + +* Examples -- See the accompanying file test/test.py for examples of how to set + up fits of your own. Some are taken from the User's Guide; some are from + other sources. + +* Models -- Some common models are instantiated in the accompanying module + models.py . Contributions are welcome. + +Credits +======= + +* Thanks to Arnold Moene and Gerard Vermeulen for fixing some killer bugs. + +Robert Kern +robert.kern@gmail.com + +""" + +from __future__ import division, print_function, absolute_import + +import numpy +from warnings import warn +from scipy.odr import __odrpack + +__all__ = ['odr', 'OdrWarning', 'OdrError', 'OdrStop', + 'Data', 'RealData', 'Model', 'Output', 'ODR', + 'odr_error', 'odr_stop'] + +odr = __odrpack.odr + + +class OdrWarning(UserWarning): + """ + Warning indicating that the data passed into + ODR will cause problems when passed into 'odr' + that the user should be aware of. + """ + pass + + +class OdrError(Exception): + """ + Exception indicating an error in fitting. + + This is raised by `scipy.odr` if an error occurs during fitting. + """ + pass + + +class OdrStop(Exception): + """ + Exception stopping fitting. + + You can raise this exception in your objective function to tell + `scipy.odr` to stop fitting. + """ + pass + + +# Backwards compatibility +odr_error = OdrError +odr_stop = OdrStop + +__odrpack._set_exceptions(OdrError, OdrStop) + + +def _conv(obj, dtype=None): + """ Convert an object to the preferred form for input to the odr routine. + """ + + if obj is None: + return obj + else: + if dtype is None: + obj = numpy.asarray(obj) + else: + obj = numpy.asarray(obj, dtype) + if obj.shape == (): + # Scalar. + return obj.dtype.type(obj) + else: + return obj + + +def _report_error(info): + """ Interprets the return code of the odr routine. + + Parameters + ---------- + info : int + The return code of the odr routine. + + Returns + ------- + problems : list(str) + A list of messages about why the odr() routine stopped. + """ + + stopreason = ('Blank', + 'Sum of squares convergence', + 'Parameter convergence', + 'Both sum of squares and parameter convergence', + 'Iteration limit reached')[info % 5] + + if info >= 5: + # questionable results or fatal error + + I = (info//10000 % 10, + info//1000 % 10, + info//100 % 10, + info//10 % 10, + info % 10) + problems = [] + + if I[0] == 0: + if I[1] != 0: + problems.append('Derivatives possibly not correct') + if I[2] != 0: + problems.append('Error occurred in callback') + if I[3] != 0: + problems.append('Problem is not full rank at solution') + problems.append(stopreason) + elif I[0] == 1: + if I[1] != 0: + problems.append('N < 1') + if I[2] != 0: + problems.append('M < 1') + if I[3] != 0: + problems.append('NP < 1 or NP > N') + if I[4] != 0: + problems.append('NQ < 1') + elif I[0] == 2: + if I[1] != 0: + problems.append('LDY and/or LDX incorrect') + if I[2] != 0: + problems.append('LDWE, LD2WE, LDWD, and/or LD2WD incorrect') + if I[3] != 0: + problems.append('LDIFX, LDSTPD, and/or LDSCLD incorrect') + if I[4] != 0: + problems.append('LWORK and/or LIWORK too small') + elif I[0] == 3: + if I[1] != 0: + problems.append('STPB and/or STPD incorrect') + if I[2] != 0: + problems.append('SCLB and/or SCLD incorrect') + if I[3] != 0: + problems.append('WE incorrect') + if I[4] != 0: + problems.append('WD incorrect') + elif I[0] == 4: + problems.append('Error in derivatives') + elif I[0] == 5: + problems.append('Error occurred in callback') + elif I[0] == 6: + problems.append('Numerical error detected') + + return problems + + else: + return [stopreason] + + +class Data(object): + """ + The data to fit. + + Parameters + ---------- + x : array_like + Observed data for the independent variable of the regression + y : array_like, optional + If array-like, observed data for the dependent variable of the + regression. A scalar input implies that the model to be used on + the data is implicit. + we : array_like, optional + If `we` is a scalar, then that value is used for all data points (and + all dimensions of the response variable). + If `we` is a rank-1 array of length q (the dimensionality of the + response variable), then this vector is the diagonal of the covariant + weighting matrix for all data points. + If `we` is a rank-1 array of length n (the number of data points), then + the i'th element is the weight for the i'th response variable + observation (single-dimensional only). + If `we` is a rank-2 array of shape (q, q), then this is the full + covariant weighting matrix broadcast to each observation. + If `we` is a rank-2 array of shape (q, n), then `we[:,i]` is the + diagonal of the covariant weighting matrix for the i'th observation. + If `we` is a rank-3 array of shape (q, q, n), then `we[:,:,i]` is the + full specification of the covariant weighting matrix for each + observation. + If the fit is implicit, then only a positive scalar value is used. + wd : array_like, optional + If `wd` is a scalar, then that value is used for all data points + (and all dimensions of the input variable). If `wd` = 0, then the + covariant weighting matrix for each observation is set to the identity + matrix (so each dimension of each observation has the same weight). + If `wd` is a rank-1 array of length m (the dimensionality of the input + variable), then this vector is the diagonal of the covariant weighting + matrix for all data points. + If `wd` is a rank-1 array of length n (the number of data points), then + the i'th element is the weight for the i'th input variable observation + (single-dimensional only). + If `wd` is a rank-2 array of shape (m, m), then this is the full + covariant weighting matrix broadcast to each observation. + If `wd` is a rank-2 array of shape (m, n), then `wd[:,i]` is the + diagonal of the covariant weighting matrix for the i'th observation. + If `wd` is a rank-3 array of shape (m, m, n), then `wd[:,:,i]` is the + full specification of the covariant weighting matrix for each + observation. + fix : array_like of ints, optional + The `fix` argument is the same as ifixx in the class ODR. It is an + array of integers with the same shape as data.x that determines which + input observations are treated as fixed. One can use a sequence of + length m (the dimensionality of the input observations) to fix some + dimensions for all observations. A value of 0 fixes the observation, + a value > 0 makes it free. + meta : dict, optional + Free-form dictionary for metadata. + + Notes + ----- + Each argument is attached to the member of the instance of the same name. + The structures of `x` and `y` are described in the Model class docstring. + If `y` is an integer, then the Data instance can only be used to fit with + implicit models where the dimensionality of the response is equal to the + specified value of `y`. + + The `we` argument weights the effect a deviation in the response variable + has on the fit. The `wd` argument weights the effect a deviation in the + input variable has on the fit. To handle multidimensional inputs and + responses easily, the structure of these arguments has the n'th + dimensional axis first. These arguments heavily use the structured + arguments feature of ODRPACK to conveniently and flexibly support all + options. See the ODRPACK User's Guide for a full explanation of how these + weights are used in the algorithm. Basically, a higher value of the weight + for a particular data point makes a deviation at that point more + detrimental to the fit. + + """ + + def __init__(self, x, y=None, we=None, wd=None, fix=None, meta={}): + self.x = _conv(x) + + if not isinstance(self.x, numpy.ndarray): + raise ValueError(("Expected an 'ndarray' of data for 'x', " + "but instead got data of type '{name}'").format( + name=type(self.x).__name__)) + + self.y = _conv(y) + self.we = _conv(we) + self.wd = _conv(wd) + self.fix = _conv(fix) + self.meta = meta + + def set_meta(self, **kwds): + """ Update the metadata dictionary with the keywords and data provided + by keywords. + + Examples + -------- + :: + + data.set_meta(lab="Ph 7; Lab 26", title="Ag110 + Ag108 Decay") + """ + + self.meta.update(kwds) + + def __getattr__(self, attr): + """ Dispatch attribute access to the metadata dictionary. + """ + if attr in self.meta: + return self.meta[attr] + else: + raise AttributeError("'%s' not in metadata" % attr) + + +class RealData(Data): + """ + The data, with weightings as actual standard deviations and/or + covariances. + + Parameters + ---------- + x : array_like + Observed data for the independent variable of the regression + y : array_like, optional + If array-like, observed data for the dependent variable of the + regression. A scalar input implies that the model to be used on + the data is implicit. + sx : array_like, optional + Standard deviations of `x`. + `sx` are standard deviations of `x` and are converted to weights by + dividing 1.0 by their squares. + sy : array_like, optional + Standard deviations of `y`. + `sy` are standard deviations of `y` and are converted to weights by + dividing 1.0 by their squares. + covx : array_like, optional + Covariance of `x` + `covx` is an array of covariance matrices of `x` and are converted to + weights by performing a matrix inversion on each observation's + covariance matrix. + covy : array_like, optional + Covariance of `y` + `covy` is an array of covariance matrices and are converted to + weights by performing a matrix inversion on each observation's + covariance matrix. + fix : array_like, optional + The argument and member fix is the same as Data.fix and ODR.ifixx: + It is an array of integers with the same shape as `x` that + determines which input observations are treated as fixed. One can + use a sequence of length m (the dimensionality of the input + observations) to fix some dimensions for all observations. A value + of 0 fixes the observation, a value > 0 makes it free. + meta : dict, optional + Free-form dictionary for metadata. + + Notes + ----- + The weights `wd` and `we` are computed from provided values as follows: + + `sx` and `sy` are converted to weights by dividing 1.0 by their squares. + For example, ``wd = 1./numpy.power(`sx`, 2)``. + + `covx` and `covy` are arrays of covariance matrices and are converted to + weights by performing a matrix inversion on each observation's covariance + matrix. For example, ``we[i] = numpy.linalg.inv(covy[i])``. + + These arguments follow the same structured argument conventions as wd and + we only restricted by their natures: `sx` and `sy` can't be rank-3, but + `covx` and `covy` can be. + + Only set *either* `sx` or `covx` (not both). Setting both will raise an + exception. Same with `sy` and `covy`. + + """ + + def __init__(self, x, y=None, sx=None, sy=None, covx=None, covy=None, + fix=None, meta={}): + if (sx is not None) and (covx is not None): + raise ValueError("cannot set both sx and covx") + if (sy is not None) and (covy is not None): + raise ValueError("cannot set both sy and covy") + + # Set flags for __getattr__ + self._ga_flags = {} + if sx is not None: + self._ga_flags['wd'] = 'sx' + else: + self._ga_flags['wd'] = 'covx' + if sy is not None: + self._ga_flags['we'] = 'sy' + else: + self._ga_flags['we'] = 'covy' + + self.x = _conv(x) + + if not isinstance(self.x, numpy.ndarray): + raise ValueError(("Expected an 'ndarray' of data for 'x', " + "but instead got data of type '{name}'").format( + name=type(self.x).__name__)) + + self.y = _conv(y) + self.sx = _conv(sx) + self.sy = _conv(sy) + self.covx = _conv(covx) + self.covy = _conv(covy) + self.fix = _conv(fix) + self.meta = meta + + def _sd2wt(self, sd): + """ Convert standard deviation to weights. + """ + + return 1./numpy.power(sd, 2) + + def _cov2wt(self, cov): + """ Convert covariance matrix(-ices) to weights. + """ + + from numpy.dual import inv + + if len(cov.shape) == 2: + return inv(cov) + else: + weights = numpy.zeros(cov.shape, float) + + for i in range(cov.shape[-1]): # n + weights[:,:,i] = inv(cov[:,:,i]) + + return weights + + def __getattr__(self, attr): + lookup_tbl = {('wd', 'sx'): (self._sd2wt, self.sx), + ('wd', 'covx'): (self._cov2wt, self.covx), + ('we', 'sy'): (self._sd2wt, self.sy), + ('we', 'covy'): (self._cov2wt, self.covy)} + + if attr not in ('wd', 'we'): + if attr in self.meta: + return self.meta[attr] + else: + raise AttributeError("'%s' not in metadata" % attr) + else: + func, arg = lookup_tbl[(attr, self._ga_flags[attr])] + + if arg is not None: + return func(*(arg,)) + else: + return None + + +class Model(object): + """ + The Model class stores information about the function you wish to fit. + + It stores the function itself, at the least, and optionally stores + functions which compute the Jacobians used during fitting. Also, one + can provide a function that will provide reasonable starting values + for the fit parameters possibly given the set of data. + + Parameters + ---------- + fcn : function + fcn(beta, x) --> y + fjacb : function + Jacobian of fcn wrt the fit parameters beta. + + fjacb(beta, x) --> @f_i(x,B)/@B_j + fjacd : function + Jacobian of fcn wrt the (possibly multidimensional) input + variable. + + fjacd(beta, x) --> @f_i(x,B)/@x_j + extra_args : tuple, optional + If specified, `extra_args` should be a tuple of extra + arguments to pass to `fcn`, `fjacb`, and `fjacd`. Each will be called + by `apply(fcn, (beta, x) + extra_args)` + estimate : array_like of rank-1 + Provides estimates of the fit parameters from the data + + estimate(data) --> estbeta + implicit : boolean + If TRUE, specifies that the model + is implicit; i.e `fcn(beta, x)` ~= 0 and there is no y data to fit + against + meta : dict, optional + freeform dictionary of metadata for the model + + Notes + ----- + Note that the `fcn`, `fjacb`, and `fjacd` operate on NumPy arrays and + return a NumPy array. The `estimate` object takes an instance of the + Data class. + + Here are the rules for the shapes of the argument and return + arrays of the callback functions: + + `x` + if the input data is single-dimensional, then `x` is rank-1 + array; i.e. ``x = array([1, 2, 3, ...]); x.shape = (n,)`` + If the input data is multi-dimensional, then `x` is a rank-2 array; + i.e., ``x = array([[1, 2, ...], [2, 4, ...]]); x.shape = (m, n)``. + In all cases, it has the same shape as the input data array passed to + `odr`. `m` is the dimensionality of the input data, `n` is the number + of observations. + `y` + if the response variable is single-dimensional, then `y` is a + rank-1 array, i.e., ``y = array([2, 4, ...]); y.shape = (n,)``. + If the response variable is multi-dimensional, then `y` is a rank-2 + array, i.e., ``y = array([[2, 4, ...], [3, 6, ...]]); y.shape = + (q, n)`` where `q` is the dimensionality of the response variable. + `beta` + rank-1 array of length `p` where `p` is the number of parameters; + i.e. ``beta = array([B_1, B_2, ..., B_p])`` + `fjacb` + if the response variable is multi-dimensional, then the + return array's shape is `(q, p, n)` such that ``fjacb(x,beta)[l,k,i] = + d f_l(X,B)/d B_k`` evaluated at the i'th data point. If `q == 1`, then + the return array is only rank-2 and with shape `(p, n)`. + `fjacd` + as with fjacb, only the return array's shape is `(q, m, n)` + such that ``fjacd(x,beta)[l,j,i] = d f_l(X,B)/d X_j`` at the i'th data + point. If `q == 1`, then the return array's shape is `(m, n)`. If + `m == 1`, the shape is (q, n). If `m == q == 1`, the shape is `(n,)`. + + """ + + def __init__(self, fcn, fjacb=None, fjacd=None, + extra_args=None, estimate=None, implicit=0, meta=None): + + self.fcn = fcn + self.fjacb = fjacb + self.fjacd = fjacd + + if extra_args is not None: + extra_args = tuple(extra_args) + + self.extra_args = extra_args + self.estimate = estimate + self.implicit = implicit + self.meta = meta + + def set_meta(self, **kwds): + """ Update the metadata dictionary with the keywords and data provided + here. + + Examples + -------- + set_meta(name="Exponential", equation="y = a exp(b x) + c") + """ + + self.meta.update(kwds) + + def __getattr__(self, attr): + """ Dispatch attribute access to the metadata. + """ + + if attr in self.meta: + return self.meta[attr] + else: + raise AttributeError("'%s' not in metadata" % attr) + + +class Output(object): + """ + The Output class stores the output of an ODR run. + + Attributes + ---------- + beta : ndarray + Estimated parameter values, of shape (q,). + sd_beta : ndarray + Standard errors of the estimated parameters, of shape (p,). + cov_beta : ndarray + Covariance matrix of the estimated parameters, of shape (p,p). + delta : ndarray, optional + Array of estimated errors in input variables, of same shape as `x`. + eps : ndarray, optional + Array of estimated errors in response variables, of same shape as `y`. + xplus : ndarray, optional + Array of ``x + delta``. + y : ndarray, optional + Array ``y = fcn(x + delta)``. + res_var : float, optional + Residual variance. + sum_square : float, optional + Sum of squares error. + sum_square_delta : float, optional + Sum of squares of delta error. + sum_square_eps : float, optional + Sum of squares of eps error. + inv_condnum : float, optional + Inverse condition number (cf. ODRPACK UG p. 77). + rel_error : float, optional + Relative error in function values computed within fcn. + work : ndarray, optional + Final work array. + work_ind : dict, optional + Indices into work for drawing out values (cf. ODRPACK UG p. 83). + info : int, optional + Reason for returning, as output by ODRPACK (cf. ODRPACK UG p. 38). + stopreason : list of str, optional + `info` interpreted into English. + + Notes + ----- + Takes one argument for initialization, the return value from the + function `odr`. The attributes listed as "optional" above are only + present if `odr` was run with ``full_output=1``. + + """ + + def __init__(self, output): + self.beta = output[0] + self.sd_beta = output[1] + self.cov_beta = output[2] + + if len(output) == 4: + # full output + self.__dict__.update(output[3]) + self.stopreason = _report_error(self.info) + + def pprint(self): + """ Pretty-print important results. + """ + + print('Beta:', self.beta) + print('Beta Std Error:', self.sd_beta) + print('Beta Covariance:', self.cov_beta) + if hasattr(self, 'info'): + print('Residual Variance:',self.res_var) + print('Inverse Condition #:', self.inv_condnum) + print('Reason(s) for Halting:') + for r in self.stopreason: + print(' %s' % r) + + +class ODR(object): + """ + The ODR class gathers all information and coordinates the running of the + main fitting routine. + + Members of instances of the ODR class have the same names as the arguments + to the initialization routine. + + Parameters + ---------- + data : Data class instance + instance of the Data class + model : Model class instance + instance of the Model class + + Other Parameters + ---------------- + beta0 : array_like of rank-1 + a rank-1 sequence of initial parameter values. Optional if + model provides an "estimate" function to estimate these values. + delta0 : array_like of floats of rank-1, optional + a (double-precision) float array to hold the initial values of + the errors in the input variables. Must be same shape as data.x + ifixb : array_like of ints of rank-1, optional + sequence of integers with the same length as beta0 that determines + which parameters are held fixed. A value of 0 fixes the parameter, + a value > 0 makes the parameter free. + ifixx : array_like of ints with same shape as data.x, optional + an array of integers with the same shape as data.x that determines + which input observations are treated as fixed. One can use a sequence + of length m (the dimensionality of the input observations) to fix some + dimensions for all observations. A value of 0 fixes the observation, + a value > 0 makes it free. + job : int, optional + an integer telling ODRPACK what tasks to perform. See p. 31 of the + ODRPACK User's Guide if you absolutely must set the value here. Use the + method set_job post-initialization for a more readable interface. + iprint : int, optional + an integer telling ODRPACK what to print. See pp. 33-34 of the + ODRPACK User's Guide if you absolutely must set the value here. Use the + method set_iprint post-initialization for a more readable interface. + errfile : str, optional + string with the filename to print ODRPACK errors to. *Do Not Open + This File Yourself!* + rptfile : str, optional + string with the filename to print ODRPACK summaries to. *Do Not + Open This File Yourself!* + ndigit : int, optional + integer specifying the number of reliable digits in the computation + of the function. + taufac : float, optional + float specifying the initial trust region. The default value is 1. + The initial trust region is equal to taufac times the length of the + first computed Gauss-Newton step. taufac must be less than 1. + sstol : float, optional + float specifying the tolerance for convergence based on the relative + change in the sum-of-squares. The default value is eps**(1/2) where eps + is the smallest value such that 1 + eps > 1 for double precision + computation on the machine. sstol must be less than 1. + partol : float, optional + float specifying the tolerance for convergence based on the relative + change in the estimated parameters. The default value is eps**(2/3) for + explicit models and ``eps**(1/3)`` for implicit models. partol must be less + than 1. + maxit : int, optional + integer specifying the maximum number of iterations to perform. For + first runs, maxit is the total number of iterations performed and + defaults to 50. For restarts, maxit is the number of additional + iterations to perform and defaults to 10. + stpb : array_like, optional + sequence (``len(stpb) == len(beta0)``) of relative step sizes to compute + finite difference derivatives wrt the parameters. + stpd : optional + array (``stpd.shape == data.x.shape`` or ``stpd.shape == (m,)``) of relative + step sizes to compute finite difference derivatives wrt the input + variable errors. If stpd is a rank-1 array with length m (the + dimensionality of the input variable), then the values are broadcast to + all observations. + sclb : array_like, optional + sequence (``len(stpb) == len(beta0)``) of scaling factors for the + parameters. The purpose of these scaling factors are to scale all of + the parameters to around unity. Normally appropriate scaling factors + are computed if this argument is not specified. Specify them yourself + if the automatic procedure goes awry. + scld : array_like, optional + array (scld.shape == data.x.shape or scld.shape == (m,)) of scaling + factors for the *errors* in the input variables. Again, these factors + are automatically computed if you do not provide them. If scld.shape == + (m,), then the scaling factors are broadcast to all observations. + work : ndarray, optional + array to hold the double-valued working data for ODRPACK. When + restarting, takes the value of self.output.work. + iwork : ndarray, optional + array to hold the integer-valued working data for ODRPACK. When + restarting, takes the value of self.output.iwork. + + Attributes + ---------- + data : Data + The data for this fit + model : Model + The model used in fit + output : Output + An instance if the Output class containing all of the returned + data from an invocation of ODR.run() or ODR.restart() + + """ + + def __init__(self, data, model, beta0=None, delta0=None, ifixb=None, + ifixx=None, job=None, iprint=None, errfile=None, rptfile=None, + ndigit=None, taufac=None, sstol=None, partol=None, maxit=None, + stpb=None, stpd=None, sclb=None, scld=None, work=None, iwork=None): + + self.data = data + self.model = model + + if beta0 is None: + if self.model.estimate is not None: + self.beta0 = _conv(self.model.estimate(self.data)) + else: + raise ValueError( + "must specify beta0 or provide an estimater with the model" + ) + else: + self.beta0 = _conv(beta0) + + if ifixx is None and data.fix is not None: + ifixx = data.fix + + self.delta0 = _conv(delta0) + # These really are 32-bit integers in FORTRAN (gfortran), even on 64-bit + # platforms. + # XXX: some other FORTRAN compilers may not agree. + self.ifixx = _conv(ifixx, dtype=numpy.int32) + self.ifixb = _conv(ifixb, dtype=numpy.int32) + self.job = job + self.iprint = iprint + self.errfile = errfile + self.rptfile = rptfile + self.ndigit = ndigit + self.taufac = taufac + self.sstol = sstol + self.partol = partol + self.maxit = maxit + self.stpb = _conv(stpb) + self.stpd = _conv(stpd) + self.sclb = _conv(sclb) + self.scld = _conv(scld) + self.work = _conv(work) + self.iwork = _conv(iwork) + + self.output = None + + self._check() + + def _check(self): + """ Check the inputs for consistency, but don't bother checking things + that the builtin function odr will check. + """ + + x_s = list(self.data.x.shape) + + if isinstance(self.data.y, numpy.ndarray): + y_s = list(self.data.y.shape) + if self.model.implicit: + raise OdrError("an implicit model cannot use response data") + else: + # implicit model with q == self.data.y + y_s = [self.data.y, x_s[-1]] + if not self.model.implicit: + raise OdrError("an explicit model needs response data") + self.set_job(fit_type=1) + + if x_s[-1] != y_s[-1]: + raise OdrError("number of observations do not match") + + n = x_s[-1] + + if len(x_s) == 2: + m = x_s[0] + else: + m = 1 + if len(y_s) == 2: + q = y_s[0] + else: + q = 1 + + p = len(self.beta0) + + # permissible output array shapes + + fcn_perms = [(q, n)] + fjacd_perms = [(q, m, n)] + fjacb_perms = [(q, p, n)] + + if q == 1: + fcn_perms.append((n,)) + fjacd_perms.append((m, n)) + fjacb_perms.append((p, n)) + if m == 1: + fjacd_perms.append((q, n)) + if p == 1: + fjacb_perms.append((q, n)) + if m == q == 1: + fjacd_perms.append((n,)) + if p == q == 1: + fjacb_perms.append((n,)) + + # try evaluating the supplied functions to make sure they provide + # sensible outputs + + arglist = (self.beta0, self.data.x) + if self.model.extra_args is not None: + arglist = arglist + self.model.extra_args + res = self.model.fcn(*arglist) + + if res.shape not in fcn_perms: + print(res.shape) + print(fcn_perms) + raise OdrError("fcn does not output %s-shaped array" % y_s) + + if self.model.fjacd is not None: + res = self.model.fjacd(*arglist) + if res.shape not in fjacd_perms: + raise OdrError( + "fjacd does not output %s-shaped array" % repr((q, m, n))) + if self.model.fjacb is not None: + res = self.model.fjacb(*arglist) + if res.shape not in fjacb_perms: + raise OdrError( + "fjacb does not output %s-shaped array" % repr((q, p, n))) + + # check shape of delta0 + + if self.delta0 is not None and self.delta0.shape != self.data.x.shape: + raise OdrError( + "delta0 is not a %s-shaped array" % repr(self.data.x.shape)) + + if self.data.x.size == 0: + warn(("Empty data detected for ODR instance. " + "Do not expect any fitting to occur"), + OdrWarning) + + def _gen_work(self): + """ Generate a suitable work array if one does not already exist. + """ + + n = self.data.x.shape[-1] + p = self.beta0.shape[0] + + if len(self.data.x.shape) == 2: + m = self.data.x.shape[0] + else: + m = 1 + + if self.model.implicit: + q = self.data.y + elif len(self.data.y.shape) == 2: + q = self.data.y.shape[0] + else: + q = 1 + + if self.data.we is None: + ldwe = ld2we = 1 + elif len(self.data.we.shape) == 3: + ld2we, ldwe = self.data.we.shape[1:] + else: + # Okay, this isn't precisely right, but for this calculation, + # it's fine + ldwe = 1 + ld2we = self.data.we.shape[1] + + if self.job % 10 < 2: + # ODR not OLS + lwork = (18 + 11*p + p*p + m + m*m + 4*n*q + 6*n*m + 2*n*q*p + + 2*n*q*m + q*q + 5*q + q*(p+m) + ldwe*ld2we*q) + else: + # OLS not ODR + lwork = (18 + 11*p + p*p + m + m*m + 4*n*q + 2*n*m + 2*n*q*p + + 5*q + q*(p+m) + ldwe*ld2we*q) + + if isinstance(self.work, numpy.ndarray) and self.work.shape == (lwork,)\ + and self.work.dtype.str.endswith('f8'): + # the existing array is fine + return + else: + self.work = numpy.zeros((lwork,), float) + + def set_job(self, fit_type=None, deriv=None, var_calc=None, + del_init=None, restart=None): + """ + Sets the "job" parameter is a hopefully comprehensible way. + + If an argument is not specified, then the value is left as is. The + default value from class initialization is for all of these options set + to 0. + + Parameters + ---------- + fit_type : {0, 1, 2} int + 0 -> explicit ODR + + 1 -> implicit ODR + + 2 -> ordinary least-squares + deriv : {0, 1, 2, 3} int + 0 -> forward finite differences + + 1 -> central finite differences + + 2 -> user-supplied derivatives (Jacobians) with results + checked by ODRPACK + + 3 -> user-supplied derivatives, no checking + var_calc : {0, 1, 2} int + 0 -> calculate asymptotic covariance matrix and fit + parameter uncertainties (V_B, s_B) using derivatives + recomputed at the final solution + + 1 -> calculate V_B and s_B using derivatives from last iteration + + 2 -> do not calculate V_B and s_B + del_init : {0, 1} int + 0 -> initial input variable offsets set to 0 + + 1 -> initial offsets provided by user in variable "work" + restart : {0, 1} int + 0 -> fit is not a restart + + 1 -> fit is a restart + + Notes + ----- + The permissible values are different from those given on pg. 31 of the + ODRPACK User's Guide only in that one cannot specify numbers greater than + the last value for each variable. + + If one does not supply functions to compute the Jacobians, the fitting + procedure will change deriv to 0, finite differences, as a default. To + initialize the input variable offsets by yourself, set del_init to 1 and + put the offsets into the "work" variable correctly. + + """ + + if self.job is None: + job_l = [0, 0, 0, 0, 0] + else: + job_l = [self.job // 10000 % 10, + self.job // 1000 % 10, + self.job // 100 % 10, + self.job // 10 % 10, + self.job % 10] + + if fit_type in (0, 1, 2): + job_l[4] = fit_type + if deriv in (0, 1, 2, 3): + job_l[3] = deriv + if var_calc in (0, 1, 2): + job_l[2] = var_calc + if del_init in (0, 1): + job_l[1] = del_init + if restart in (0, 1): + job_l[0] = restart + + self.job = (job_l[0]*10000 + job_l[1]*1000 + + job_l[2]*100 + job_l[3]*10 + job_l[4]) + + def set_iprint(self, init=None, so_init=None, + iter=None, so_iter=None, iter_step=None, final=None, so_final=None): + """ Set the iprint parameter for the printing of computation reports. + + If any of the arguments are specified here, then they are set in the + iprint member. If iprint is not set manually or with this method, then + ODRPACK defaults to no printing. If no filename is specified with the + member rptfile, then ODRPACK prints to stdout. One can tell ODRPACK to + print to stdout in addition to the specified filename by setting the + so_* arguments to this function, but one cannot specify to print to + stdout but not a file since one can do that by not specifying a rptfile + filename. + + There are three reports: initialization, iteration, and final reports. + They are represented by the arguments init, iter, and final + respectively. The permissible values are 0, 1, and 2 representing "no + report", "short report", and "long report" respectively. + + The argument iter_step (0 <= iter_step <= 9) specifies how often to make + the iteration report; the report will be made for every iter_step'th + iteration starting with iteration one. If iter_step == 0, then no + iteration report is made, regardless of the other arguments. + + If the rptfile is None, then any so_* arguments supplied will raise an + exception. + """ + if self.iprint is None: + self.iprint = 0 + + ip = [self.iprint // 1000 % 10, + self.iprint // 100 % 10, + self.iprint // 10 % 10, + self.iprint % 10] + + # make a list to convert iprint digits to/from argument inputs + # rptfile, stdout + ip2arg = [[0, 0], # none, none + [1, 0], # short, none + [2, 0], # long, none + [1, 1], # short, short + [2, 1], # long, short + [1, 2], # short, long + [2, 2]] # long, long + + if (self.rptfile is None and + (so_init is not None or + so_iter is not None or + so_final is not None)): + raise OdrError( + "no rptfile specified, cannot output to stdout twice") + + iprint_l = ip2arg[ip[0]] + ip2arg[ip[1]] + ip2arg[ip[3]] + + if init is not None: + iprint_l[0] = init + if so_init is not None: + iprint_l[1] = so_init + if iter is not None: + iprint_l[2] = iter + if so_iter is not None: + iprint_l[3] = so_iter + if final is not None: + iprint_l[4] = final + if so_final is not None: + iprint_l[5] = so_final + + if iter_step in range(10): + # 0..9 + ip[2] = iter_step + + ip[0] = ip2arg.index(iprint_l[0:2]) + ip[1] = ip2arg.index(iprint_l[2:4]) + ip[3] = ip2arg.index(iprint_l[4:6]) + + self.iprint = ip[0]*1000 + ip[1]*100 + ip[2]*10 + ip[3] + + def run(self): + """ Run the fitting routine with all of the information given and with ``full_output=1``. + + Returns + ------- + output : Output instance + This object is also assigned to the attribute .output . + """ + + args = (self.model.fcn, self.beta0, self.data.y, self.data.x) + kwds = {'full_output': 1} + kwd_l = ['ifixx', 'ifixb', 'job', 'iprint', 'errfile', 'rptfile', + 'ndigit', 'taufac', 'sstol', 'partol', 'maxit', 'stpb', + 'stpd', 'sclb', 'scld', 'work', 'iwork'] + + if self.delta0 is not None and self.job % 1000 // 10 == 1: + # delta0 provided and fit is not a restart + self._gen_work() + + d0 = numpy.ravel(self.delta0) + + self.work[:len(d0)] = d0 + + # set the kwds from other objects explicitly + if self.model.fjacb is not None: + kwds['fjacb'] = self.model.fjacb + if self.model.fjacd is not None: + kwds['fjacd'] = self.model.fjacd + if self.data.we is not None: + kwds['we'] = self.data.we + if self.data.wd is not None: + kwds['wd'] = self.data.wd + if self.model.extra_args is not None: + kwds['extra_args'] = self.model.extra_args + + # implicitly set kwds from self's members + for attr in kwd_l: + obj = getattr(self, attr) + if obj is not None: + kwds[attr] = obj + + self.output = Output(odr(*args, **kwds)) + + return self.output + + def restart(self, iter=None): + """ Restarts the run with iter more iterations. + + Parameters + ---------- + iter : int, optional + ODRPACK's default for the number of new iterations is 10. + + Returns + ------- + output : Output instance + This object is also assigned to the attribute .output . + """ + + if self.output is None: + raise OdrError("cannot restart: run() has not been called before") + + self.set_job(restart=1) + self.work = self.output.work + self.iwork = self.output.iwork + + self.maxit = iter + + return self.run() diff --git a/project/venv/lib/python2.7/site-packages/scipy/odr/odrpack.pyc b/project/venv/lib/python2.7/site-packages/scipy/odr/odrpack.pyc new file mode 100644 index 0000000..2f2a798 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/odr/odrpack.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/odr/setup.py b/project/venv/lib/python2.7/site-packages/scipy/odr/setup.py new file mode 100644 index 0000000..86df6cb --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/odr/setup.py @@ -0,0 +1,43 @@ +from __future__ import division, print_function, absolute_import + +from os.path import join + + +def configuration(parent_package='', top_path=None): + import warnings + from numpy.distutils.misc_util import Configuration + from scipy._build_utils.system_info import get_info, BlasNotFoundError + config = Configuration('odr', parent_package, top_path) + + libodr_files = ['d_odr.f', + 'd_mprec.f', + 'dlunoc.f'] + + blas_info = get_info('blas_opt') + if blas_info: + libodr_files.append('d_lpk.f') + else: + warnings.warn(BlasNotFoundError.__doc__) + libodr_files.append('d_lpkbls.f') + + odrpack_src = [join('odrpack', x) for x in libodr_files] + config.add_library('odrpack', sources=odrpack_src) + + sources = ['__odrpack.c'] + libraries = ['odrpack'] + blas_info.pop('libraries', []) + include_dirs = ['.'] + blas_info.pop('include_dirs', []) + config.add_extension('__odrpack', + sources=sources, + libraries=libraries, + include_dirs=include_dirs, + depends=(['odrpack.h'] + odrpack_src), + **blas_info + ) + + config.add_data_dir('tests') + return config + + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(**configuration(top_path='').todict()) diff --git a/project/venv/lib/python2.7/site-packages/scipy/odr/setup.pyc b/project/venv/lib/python2.7/site-packages/scipy/odr/setup.pyc new file mode 100644 index 0000000..a44bb15 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/odr/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/odr/tests/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/odr/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/scipy/odr/tests/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/odr/tests/__init__.pyc new file mode 100644 index 0000000..fdb5cbe Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/odr/tests/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/odr/tests/test_odr.py b/project/venv/lib/python2.7/site-packages/scipy/odr/tests/test_odr.py new file mode 100644 index 0000000..3a18a5a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/odr/tests/test_odr.py @@ -0,0 +1,360 @@ +from __future__ import division, print_function, absolute_import + +# Scipy imports. +import numpy as np +from numpy import pi +from numpy.testing import (assert_array_almost_equal, + assert_equal, assert_warns) +from pytest import raises as assert_raises +from scipy.odr import Data, Model, ODR, RealData, OdrStop, OdrWarning + + +class TestODR(object): + + # Bad Data for 'x' + + def test_bad_data(self): + assert_raises(ValueError, Data, 2, 1) + assert_raises(ValueError, RealData, 2, 1) + + # Empty Data for 'x' + def empty_data_func(self, B, x): + return B[0]*x + B[1] + + def test_empty_data(self): + beta0 = [0.02, 0.0] + linear = Model(self.empty_data_func) + + empty_dat = Data([], []) + assert_warns(OdrWarning, ODR, + empty_dat, linear, beta0=beta0) + + empty_dat = RealData([], []) + assert_warns(OdrWarning, ODR, + empty_dat, linear, beta0=beta0) + + # Explicit Example + + def explicit_fcn(self, B, x): + ret = B[0] + B[1] * np.power(np.exp(B[2]*x) - 1.0, 2) + return ret + + def explicit_fjd(self, B, x): + eBx = np.exp(B[2]*x) + ret = B[1] * 2.0 * (eBx-1.0) * B[2] * eBx + return ret + + def explicit_fjb(self, B, x): + eBx = np.exp(B[2]*x) + res = np.vstack([np.ones(x.shape[-1]), + np.power(eBx-1.0, 2), + B[1]*2.0*(eBx-1.0)*eBx*x]) + return res + + def test_explicit(self): + explicit_mod = Model( + self.explicit_fcn, + fjacb=self.explicit_fjb, + fjacd=self.explicit_fjd, + meta=dict(name='Sample Explicit Model', + ref='ODRPACK UG, pg. 39'), + ) + explicit_dat = Data([0.,0.,5.,7.,7.5,10.,16.,26.,30.,34.,34.5,100.], + [1265.,1263.6,1258.,1254.,1253.,1249.8,1237.,1218.,1220.6, + 1213.8,1215.5,1212.]) + explicit_odr = ODR(explicit_dat, explicit_mod, beta0=[1500.0, -50.0, -0.1], + ifixx=[0,0,1,1,1,1,1,1,1,1,1,0]) + explicit_odr.set_job(deriv=2) + explicit_odr.set_iprint(init=0, iter=0, final=0) + + out = explicit_odr.run() + assert_array_almost_equal( + out.beta, + np.array([1.2646548050648876e+03, -5.4018409956678255e+01, + -8.7849712165253724e-02]), + ) + assert_array_almost_equal( + out.sd_beta, + np.array([1.0349270280543437, 1.583997785262061, 0.0063321988657267]), + ) + assert_array_almost_equal( + out.cov_beta, + np.array([[4.4949592379003039e-01, -3.7421976890364739e-01, + -8.0978217468468912e-04], + [-3.7421976890364739e-01, 1.0529686462751804e+00, + -1.9453521827942002e-03], + [-8.0978217468468912e-04, -1.9453521827942002e-03, + 1.6827336938454476e-05]]), + ) + + # Implicit Example + + def implicit_fcn(self, B, x): + return (B[2]*np.power(x[0]-B[0], 2) + + 2.0*B[3]*(x[0]-B[0])*(x[1]-B[1]) + + B[4]*np.power(x[1]-B[1], 2) - 1.0) + + def test_implicit(self): + implicit_mod = Model( + self.implicit_fcn, + implicit=1, + meta=dict(name='Sample Implicit Model', + ref='ODRPACK UG, pg. 49'), + ) + implicit_dat = Data([ + [0.5,1.2,1.6,1.86,2.12,2.36,2.44,2.36,2.06,1.74,1.34,0.9,-0.28, + -0.78,-1.36,-1.9,-2.5,-2.88,-3.18,-3.44], + [-0.12,-0.6,-1.,-1.4,-2.54,-3.36,-4.,-4.75,-5.25,-5.64,-5.97,-6.32, + -6.44,-6.44,-6.41,-6.25,-5.88,-5.5,-5.24,-4.86]], + 1, + ) + implicit_odr = ODR(implicit_dat, implicit_mod, + beta0=[-1.0, -3.0, 0.09, 0.02, 0.08]) + + out = implicit_odr.run() + assert_array_almost_equal( + out.beta, + np.array([-0.9993809167281279, -2.9310484652026476, 0.0875730502693354, + 0.0162299708984738, 0.0797537982976416]), + ) + assert_array_almost_equal( + out.sd_beta, + np.array([0.1113840353364371, 0.1097673310686467, 0.0041060738314314, + 0.0027500347539902, 0.0034962501532468]), + ) + assert_array_almost_equal( + out.cov_beta, + np.array([[2.1089274602333052e+00, -1.9437686411979040e+00, + 7.0263550868344446e-02, -4.7175267373474862e-02, + 5.2515575927380355e-02], + [-1.9437686411979040e+00, 2.0481509222414456e+00, + -6.1600515853057307e-02, 4.6268827806232933e-02, + -5.8822307501391467e-02], + [7.0263550868344446e-02, -6.1600515853057307e-02, + 2.8659542561579308e-03, -1.4628662260014491e-03, + 1.4528860663055824e-03], + [-4.7175267373474862e-02, 4.6268827806232933e-02, + -1.4628662260014491e-03, 1.2855592885514335e-03, + -1.2692942951415293e-03], + [5.2515575927380355e-02, -5.8822307501391467e-02, + 1.4528860663055824e-03, -1.2692942951415293e-03, + 2.0778813389755596e-03]]), + ) + + # Multi-variable Example + + def multi_fcn(self, B, x): + if (x < 0.0).any(): + raise OdrStop + theta = pi*B[3]/2. + ctheta = np.cos(theta) + stheta = np.sin(theta) + omega = np.power(2.*pi*x*np.exp(-B[2]), B[3]) + phi = np.arctan2((omega*stheta), (1.0 + omega*ctheta)) + r = (B[0] - B[1]) * np.power(np.sqrt(np.power(1.0 + omega*ctheta, 2) + + np.power(omega*stheta, 2)), -B[4]) + ret = np.vstack([B[1] + r*np.cos(B[4]*phi), + r*np.sin(B[4]*phi)]) + return ret + + def test_multi(self): + multi_mod = Model( + self.multi_fcn, + meta=dict(name='Sample Multi-Response Model', + ref='ODRPACK UG, pg. 56'), + ) + + multi_x = np.array([30.0, 50.0, 70.0, 100.0, 150.0, 200.0, 300.0, 500.0, + 700.0, 1000.0, 1500.0, 2000.0, 3000.0, 5000.0, 7000.0, 10000.0, + 15000.0, 20000.0, 30000.0, 50000.0, 70000.0, 100000.0, 150000.0]) + multi_y = np.array([ + [4.22, 4.167, 4.132, 4.038, 4.019, 3.956, 3.884, 3.784, 3.713, + 3.633, 3.54, 3.433, 3.358, 3.258, 3.193, 3.128, 3.059, 2.984, + 2.934, 2.876, 2.838, 2.798, 2.759], + [0.136, 0.167, 0.188, 0.212, 0.236, 0.257, 0.276, 0.297, 0.309, + 0.311, 0.314, 0.311, 0.305, 0.289, 0.277, 0.255, 0.24, 0.218, + 0.202, 0.182, 0.168, 0.153, 0.139], + ]) + n = len(multi_x) + multi_we = np.zeros((2, 2, n), dtype=float) + multi_ifixx = np.ones(n, dtype=int) + multi_delta = np.zeros(n, dtype=float) + + multi_we[0,0,:] = 559.6 + multi_we[1,0,:] = multi_we[0,1,:] = -1634.0 + multi_we[1,1,:] = 8397.0 + + for i in range(n): + if multi_x[i] < 100.0: + multi_ifixx[i] = 0 + elif multi_x[i] <= 150.0: + pass # defaults are fine + elif multi_x[i] <= 1000.0: + multi_delta[i] = 25.0 + elif multi_x[i] <= 10000.0: + multi_delta[i] = 560.0 + elif multi_x[i] <= 100000.0: + multi_delta[i] = 9500.0 + else: + multi_delta[i] = 144000.0 + if multi_x[i] == 100.0 or multi_x[i] == 150.0: + multi_we[:,:,i] = 0.0 + + multi_dat = Data(multi_x, multi_y, wd=1e-4/np.power(multi_x, 2), + we=multi_we) + multi_odr = ODR(multi_dat, multi_mod, beta0=[4.,2.,7.,.4,.5], + delta0=multi_delta, ifixx=multi_ifixx) + multi_odr.set_job(deriv=1, del_init=1) + + out = multi_odr.run() + assert_array_almost_equal( + out.beta, + np.array([4.3799880305938963, 2.4333057577497703, 8.0028845899503978, + 0.5101147161764654, 0.5173902330489161]), + ) + assert_array_almost_equal( + out.sd_beta, + np.array([0.0130625231081944, 0.0130499785273277, 0.1167085962217757, + 0.0132642749596149, 0.0288529201353984]), + ) + assert_array_almost_equal( + out.cov_beta, + np.array([[0.0064918418231375, 0.0036159705923791, 0.0438637051470406, + -0.0058700836512467, 0.011281212888768], + [0.0036159705923791, 0.0064793789429006, 0.0517610978353126, + -0.0051181304940204, 0.0130726943624117], + [0.0438637051470406, 0.0517610978353126, 0.5182263323095322, + -0.0563083340093696, 0.1269490939468611], + [-0.0058700836512467, -0.0051181304940204, -0.0563083340093696, + 0.0066939246261263, -0.0140184391377962], + [0.011281212888768, 0.0130726943624117, 0.1269490939468611, + -0.0140184391377962, 0.0316733013820852]]), + ) + + # Pearson's Data + # K. Pearson, Philosophical Magazine, 2, 559 (1901) + + def pearson_fcn(self, B, x): + return B[0] + B[1]*x + + def test_pearson(self): + p_x = np.array([0.,.9,1.8,2.6,3.3,4.4,5.2,6.1,6.5,7.4]) + p_y = np.array([5.9,5.4,4.4,4.6,3.5,3.7,2.8,2.8,2.4,1.5]) + p_sx = np.array([.03,.03,.04,.035,.07,.11,.13,.22,.74,1.]) + p_sy = np.array([1.,.74,.5,.35,.22,.22,.12,.12,.1,.04]) + + p_dat = RealData(p_x, p_y, sx=p_sx, sy=p_sy) + + # Reverse the data to test invariance of results + pr_dat = RealData(p_y, p_x, sx=p_sy, sy=p_sx) + + p_mod = Model(self.pearson_fcn, meta=dict(name='Uni-linear Fit')) + + p_odr = ODR(p_dat, p_mod, beta0=[1.,1.]) + pr_odr = ODR(pr_dat, p_mod, beta0=[1.,1.]) + + out = p_odr.run() + assert_array_almost_equal( + out.beta, + np.array([5.4767400299231674, -0.4796082367610305]), + ) + assert_array_almost_equal( + out.sd_beta, + np.array([0.3590121690702467, 0.0706291186037444]), + ) + assert_array_almost_equal( + out.cov_beta, + np.array([[0.0854275622946333, -0.0161807025443155], + [-0.0161807025443155, 0.003306337993922]]), + ) + + rout = pr_odr.run() + assert_array_almost_equal( + rout.beta, + np.array([11.4192022410781231, -2.0850374506165474]), + ) + assert_array_almost_equal( + rout.sd_beta, + np.array([0.9820231665657161, 0.3070515616198911]), + ) + assert_array_almost_equal( + rout.cov_beta, + np.array([[0.6391799462548782, -0.1955657291119177], + [-0.1955657291119177, 0.0624888159223392]]), + ) + + # Lorentz Peak + # The data is taken from one of the undergraduate physics labs I performed. + + def lorentz(self, beta, x): + return (beta[0]*beta[1]*beta[2] / np.sqrt(np.power(x*x - + beta[2]*beta[2], 2.0) + np.power(beta[1]*x, 2.0))) + + def test_lorentz(self): + l_sy = np.array([.29]*18) + l_sx = np.array([.000972971,.000948268,.000707632,.000706679, + .000706074, .000703918,.000698955,.000456856, + .000455207,.000662717,.000654619,.000652694, + .000000859202,.00106589,.00106378,.00125483, .00140818,.00241839]) + + l_dat = RealData( + [3.9094, 3.85945, 3.84976, 3.84716, 3.84551, 3.83964, 3.82608, + 3.78847, 3.78163, 3.72558, 3.70274, 3.6973, 3.67373, 3.65982, + 3.6562, 3.62498, 3.55525, 3.41886], + [652, 910.5, 984, 1000, 1007.5, 1053, 1160.5, 1409.5, 1430, 1122, + 957.5, 920, 777.5, 709.5, 698, 578.5, 418.5, 275.5], + sx=l_sx, + sy=l_sy, + ) + l_mod = Model(self.lorentz, meta=dict(name='Lorentz Peak')) + l_odr = ODR(l_dat, l_mod, beta0=(1000., .1, 3.8)) + + out = l_odr.run() + assert_array_almost_equal( + out.beta, + np.array([1.4306780846149925e+03, 1.3390509034538309e-01, + 3.7798193600109009e+00]), + ) + assert_array_almost_equal( + out.sd_beta, + np.array([7.3621186811330963e-01, 3.5068899941471650e-04, + 2.4451209281408992e-04]), + ) + assert_array_almost_equal( + out.cov_beta, + np.array([[2.4714409064597873e-01, -6.9067261911110836e-05, + -3.1236953270424990e-05], + [-6.9067261911110836e-05, 5.6077531517333009e-08, + 3.6133261832722601e-08], + [-3.1236953270424990e-05, 3.6133261832722601e-08, + 2.7261220025171730e-08]]), + ) + + def test_ticket_1253(self): + def linear(c, x): + return c[0]*x+c[1] + + c = [2.0, 3.0] + x = np.linspace(0, 10) + y = linear(c, x) + + model = Model(linear) + data = Data(x, y, wd=1.0, we=1.0) + job = ODR(data, model, beta0=[1.0, 1.0]) + result = job.run() + assert_equal(result.info, 2) + + # Verify fix for gh-9140 + + def test_ifixx(self): + x1 = [-2.01, -0.99, -0.001, 1.02, 1.98] + x2 = [3.98, 1.01, 0.001, 0.998, 4.01] + fix = np.vstack((np.zeros_like(x1, dtype=int), np.ones_like(x2, dtype=int))) + data = Data(np.vstack((x1, x2)), y=1, fix=fix) + model = Model(lambda beta, x: x[1, :] - beta[0] * x[0, :]**2., implicit=True) + + odr1 = ODR(data, model, beta0=np.array([1.])) + sol1 = odr1.run() + odr2 = ODR(data, model, beta0=np.array([1.]), ifixx=fix) + sol2 = odr2.run() + assert_equal(sol1.beta, sol2.beta) diff --git a/project/venv/lib/python2.7/site-packages/scipy/odr/tests/test_odr.pyc b/project/venv/lib/python2.7/site-packages/scipy/odr/tests/test_odr.pyc new file mode 100644 index 0000000..36eeeb5 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/odr/tests/test_odr.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/__init__.py new file mode 100644 index 0000000..a0b51a7 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/__init__.py @@ -0,0 +1,414 @@ +""" +===================================================== +Optimization and Root Finding (:mod:`scipy.optimize`) +===================================================== + +.. currentmodule:: scipy.optimize + +SciPy ``optimize`` provides functions for minimizing (or maximizing) +objective functions, possibly subject to constraints. It includes +solvers for nonlinear problems (with support for both local and global +optimization algorithms), linear programing, constrained +and nonlinear least-squares, root finding and curve fitting. + +Common functions and objects, shared across different solvers, are: + +.. autosummary:: + :toctree: generated/ + + show_options - Show specific options optimization solvers. + OptimizeResult - The optimization result returned by some optimizers. + OptimizeWarning - The optimization encountered problems. + + +Optimization +============ + +Scalar Functions Optimization +----------------------------- + +.. autosummary:: + :toctree: generated/ + + minimize_scalar - Interface for minimizers of univariate functions + +The `minimize_scalar` function supports the following methods: + +.. toctree:: + + optimize.minimize_scalar-brent + optimize.minimize_scalar-bounded + optimize.minimize_scalar-golden + +Local (Multivariate) Optimization +--------------------------------- + +.. autosummary:: + :toctree: generated/ + + minimize - Interface for minimizers of multivariate functions. + +The `minimize` function supports the following methods: + +.. toctree:: + + optimize.minimize-neldermead + optimize.minimize-powell + optimize.minimize-cg + optimize.minimize-bfgs + optimize.minimize-newtoncg + optimize.minimize-lbfgsb + optimize.minimize-tnc + optimize.minimize-cobyla + optimize.minimize-slsqp + optimize.minimize-trustconstr + optimize.minimize-dogleg + optimize.minimize-trustncg + optimize.minimize-trustkrylov + optimize.minimize-trustexact + +Constraints are passed to `minimize` function as a single object or +as a list of objects from the following classes: + +.. autosummary:: + :toctree: generated/ + + NonlinearConstraint - Class defining general nonlinear constraints. + LinearConstraint - Class defining general linear constraints. + +Simple bound constraints are handled separately and there is a special class +for them: + +.. autosummary:: + :toctree: generated/ + + Bounds - Bound constraints. + +Quasi-Newton strategies implementing `HessianUpdateStrategy` +interface can be used to approximate the Hessian in `minimize` +function (available only for the 'trust-constr' method). Available +quasi-Newton methods implementing this interface are: + +.. autosummary:: + :toctree: generated/ + + BFGS - Broyden-Fletcher-Goldfarb-Shanno (BFGS) Hessian update strategy. + SR1 - Symmetric-rank-1 Hessian update strategy. + +Global Optimization +------------------- + +.. autosummary:: + :toctree: generated/ + + basinhopping - Basinhopping stochastic optimizer. + brute - Brute force searching optimizer. + differential_evolution - stochastic minimization using differential evolution. + + shgo - simplicial homology global optimisation + dual_annealing - Dual annealing stochastic optimizer. + + +Least-squares and Curve Fitting +=============================== + +Nonlinear Least-Squares +----------------------- + +.. autosummary:: + :toctree: generated/ + + least_squares - Solve a nonlinear least-squares problem with bounds on the variables. + +Linear Least-Squares +-------------------- + +.. autosummary:: + :toctree: generated/ + + nnls - Linear least-squares problem with non-negativity constraint. + lsq_linear - Linear least-squares problem with bound constraints. + +Curve Fitting +------------- + +.. autosummary:: + :toctree: generated/ + + curve_fit -- Fit curve to a set of points. + +Root finding +============ + +Scalar functions +---------------- +.. autosummary:: + :toctree: generated/ + + root_scalar - Unified interface for nonlinear solvers of scalar functions. + brentq - quadratic interpolation Brent method. + brenth - Brent method, modified by Harris with hyperbolic extrapolation. + ridder - Ridder's method. + bisect - Bisection method. + newton - Newton's method (also Secant and Halley's methods). + toms748 - Alefeld, Potra & Shi Algorithm 748 + RootResults - The root finding result returned by some root finders. + +The `root_scalar` function supports the following methods: + +.. toctree:: + + optimize.root_scalar-brentq + optimize.root_scalar-brenth + optimize.root_scalar-bisect + optimize.root_scalar-ridder + optimize.root_scalar-newton + optimize.root_scalar-toms748 + optimize.root_scalar-secant + optimize.root_scalar-halley + + + +The table below lists situations and appropriate methods, along with +*asymptotic* convergence rates per iteration (and per function evaluation) +for successful convergence to a simple root(*). +Bisection is the slowest of them all, adding one bit of accuracy for each +function evaluation, but is guaranteed to converge. +The other bracketing methods all (eventually) increase the number of accurate +bits by about 50% for every function evaluation. +The derivative-based methods, all built on `newton`, can converge quite quickly +if the initial value is close to the root. They can also be applied to +functions defined on (a subset of) the complex plane. + ++-------------+----------+----------+-----------+-------------+-------------+----------------+ +| Domain of f | Bracket? | Derivatives? | Solvers | Convergence | ++ + +----------+-----------+ +-------------+----------------+ +| | | `fprime` | `fprime2` | | Guaranteed? | Rate(s)(*) | ++=============+==========+==========+===========+=============+=============+================+ +| `R` | Yes | N/A | N/A | - bisection | - Yes | - 1 "Linear" | +| | | | | - brentq | - Yes | - >=1, <= 1.62 | +| | | | | - brenth | - Yes | - >=1, <= 1.62 | +| | | | | - ridder | - Yes | - 2.0 (1.41) | +| | | | | - toms748 | - Yes | - 2.7 (1.65) | ++-------------+----------+----------+-----------+-------------+-------------+----------------+ +| `R` or `C` | No | No | No | secant | No | 1.62 (1.62) | ++-------------+----------+----------+-----------+-------------+-------------+----------------+ +| `R` or `C` | No | Yes | No | newton | No | 2.00 (1.41) | ++-------------+----------+----------+-----------+-------------+-------------+----------------+ +| `R` or `C` | No | Yes | Yes | halley | No | 3.00 (1.44) | ++-------------+----------+----------+-----------+-------------+-------------+----------------+ + + +Fixed point finding: + +.. autosummary:: + :toctree: generated/ + + fixed_point - Single-variable fixed-point solver. + +Multidimensional +---------------- + +.. autosummary:: + :toctree: generated/ + + root - Unified interface for nonlinear solvers of multivariate functions. + +The `root` function supports the following methods: + +.. toctree:: + + optimize.root-hybr + optimize.root-lm + optimize.root-broyden1 + optimize.root-broyden2 + optimize.root-anderson + optimize.root-linearmixing + optimize.root-diagbroyden + optimize.root-excitingmixing + optimize.root-krylov + optimize.root-dfsane + +Linear Programming +================== + +.. autosummary:: + :toctree: generated/ + + linprog -- Unified interface for minimizers of linear programming problems. + +The `linprog` function supports the following methods: + +.. toctree:: + + optimize.linprog-simplex + optimize.linprog-interior-point + +The simplex method supports callback functions, such as: + +.. autosummary:: + :toctree: generated/ + + linprog_verbose_callback -- Sample callback function for linprog (simplex). + +Assignment problems: + +.. autosummary:: + :toctree: generated/ + + linear_sum_assignment -- Solves the linear-sum assignment problem. + +Utilities +========= + +Finite-Difference Approximation +------------------------------- + +.. autosummary:: + :toctree: generated/ + + approx_fprime - Approximate the gradient of a scalar function. + check_grad - Check the supplied derivative using finite differences. + + +Line Search +----------- + +.. autosummary:: + :toctree: generated/ + + bracket - Bracket a minimum, given two starting points. + line_search - Return a step that satisfies the strong Wolfe conditions. + +Hessian Approximation +--------------------- + +.. autosummary:: + :toctree: generated/ + + LbfgsInvHessProduct - Linear operator for L-BFGS approximate inverse Hessian. + HessianUpdateStrategy - Interface for implementing Hessian update strategies + +Benchmark Problems +------------------ + +.. autosummary:: + :toctree: generated/ + + rosen - The Rosenbrock function. + rosen_der - The derivative of the Rosenbrock function. + rosen_hess - The Hessian matrix of the Rosenbrock function. + rosen_hess_prod - Product of the Rosenbrock Hessian with a vector. + +Legacy Functions +================ + +The functions below are not recommended for use in new scripts; +all of these methods are accessible via a newer, more consistent +interfaces, provided by the interfaces above. + +Optimization +------------ + +General-purpose multivariate methods: + +.. autosummary:: + :toctree: generated/ + + fmin - Nelder-Mead Simplex algorithm. + fmin_powell - Powell's (modified) level set method. + fmin_cg - Non-linear (Polak-Ribiere) conjugate gradient algorithm. + fmin_bfgs - Quasi-Newton method (Broydon-Fletcher-Goldfarb-Shanno). + fmin_ncg - Line-search Newton Conjugate Gradient. + +Constrained multivariate methods: + +.. autosummary:: + :toctree: generated/ + + fmin_l_bfgs_b - Zhu, Byrd, and Nocedal's constrained optimizer. + fmin_tnc - Truncated Newton code. + fmin_cobyla - Constrained optimization by linear approximation. + fmin_slsqp - Minimization using sequential least-squares programming. + differential_evolution - stochastic minimization using differential evolution. + +Univariate (scalar) minimization methods: + +.. autosummary:: + :toctree: generated/ + + fminbound - Bounded minimization of a scalar function. + brent - 1-D function minimization using Brent method. + golden - 1-D function minimization using Golden Section method. + +Least-Squares +------------- + +.. autosummary:: + :toctree: generated/ + + leastsq - Minimize the sum of squares of M equations in N unknowns. + +Root Finding +------------ + +General nonlinear solvers: + +.. autosummary:: + :toctree: generated/ + + fsolve - Non-linear multi-variable equation solver. + broyden1 - Broyden's first method. + broyden2 - Broyden's second method. + +Large-scale nonlinear solvers: + +.. autosummary:: + :toctree: generated/ + + newton_krylov + anderson + +Simple iteration solvers: + +.. autosummary:: + :toctree: generated/ + + excitingmixing + linearmixing + diagbroyden + +:mod:`Additional information on the nonlinear solvers <scipy.optimize.nonlin>` +""" + +from __future__ import division, print_function, absolute_import + +from .optimize import * +from ._minimize import * +from ._root import * +from ._root_scalar import * +from .minpack import * +from .zeros import * +from .lbfgsb import fmin_l_bfgs_b, LbfgsInvHessProduct +from .tnc import fmin_tnc +from .cobyla import fmin_cobyla +from .nonlin import * +from .slsqp import fmin_slsqp +from .nnls import nnls +from ._basinhopping import basinhopping +from ._linprog import linprog, linprog_verbose_callback +from ._hungarian import linear_sum_assignment +from ._differentialevolution import differential_evolution +from ._lsq import least_squares, lsq_linear +from ._constraints import (NonlinearConstraint, + LinearConstraint, + Bounds) +from ._hessian_update_strategy import HessianUpdateStrategy, BFGS, SR1 +from ._shgo import shgo +from ._dual_annealing import dual_annealing + +__all__ = [s for s in dir() if not s.startswith('_')] + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/__init__.pyc new file mode 100644 index 0000000..f294a7b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_basinhopping.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_basinhopping.py new file mode 100644 index 0000000..89cfe95 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_basinhopping.py @@ -0,0 +1,736 @@ +""" +basinhopping: The basinhopping global optimization algorithm +""" +from __future__ import division, print_function, absolute_import + +import numpy as np +import math +from numpy import cos, sin +import scipy.optimize +from scipy._lib._util import check_random_state + +__all__ = ['basinhopping'] + + +class Storage(object): + """ + Class used to store the lowest energy structure + """ + def __init__(self, minres): + self._add(minres) + + def _add(self, minres): + self.minres = minres + self.minres.x = np.copy(minres.x) + + def update(self, minres): + if minres.fun < self.minres.fun: + self._add(minres) + return True + else: + return False + + def get_lowest(self): + return self.minres + + +class BasinHoppingRunner(object): + """This class implements the core of the basinhopping algorithm. + + x0 : ndarray + The starting coordinates. + minimizer : callable + The local minimizer, with signature ``result = minimizer(x)``. + The return value is an `optimize.OptimizeResult` object. + step_taking : callable + This function displaces the coordinates randomly. Signature should + be ``x_new = step_taking(x)``. Note that `x` may be modified in-place. + accept_tests : list of callables + Each test is passed the kwargs `f_new`, `x_new`, `f_old` and + `x_old`. These tests will be used to judge whether or not to accept + the step. The acceptable return values are True, False, or ``"force + accept"``. If any of the tests return False then the step is rejected. + If ``"force accept"``, then this will override any other tests in + order to accept the step. This can be used, for example, to forcefully + escape from a local minimum that ``basinhopping`` is trapped in. + disp : bool, optional + Display status messages. + + """ + def __init__(self, x0, minimizer, step_taking, accept_tests, disp=False): + self.x = np.copy(x0) + self.minimizer = minimizer + self.step_taking = step_taking + self.accept_tests = accept_tests + self.disp = disp + + self.nstep = 0 + + # initialize return object + self.res = scipy.optimize.OptimizeResult() + self.res.minimization_failures = 0 + + # do initial minimization + minres = minimizer(self.x) + if not minres.success: + self.res.minimization_failures += 1 + if self.disp: + print("warning: basinhopping: local minimization failure") + self.x = np.copy(minres.x) + self.energy = minres.fun + if self.disp: + print("basinhopping step %d: f %g" % (self.nstep, self.energy)) + + # initialize storage class + self.storage = Storage(minres) + + if hasattr(minres, "nfev"): + self.res.nfev = minres.nfev + if hasattr(minres, "njev"): + self.res.njev = minres.njev + if hasattr(minres, "nhev"): + self.res.nhev = minres.nhev + + def _monte_carlo_step(self): + """Do one Monte Carlo iteration + + Randomly displace the coordinates, minimize, and decide whether + or not to accept the new coordinates. + """ + # Take a random step. Make a copy of x because the step_taking + # algorithm might change x in place + x_after_step = np.copy(self.x) + x_after_step = self.step_taking(x_after_step) + + # do a local minimization + minres = self.minimizer(x_after_step) + x_after_quench = minres.x + energy_after_quench = minres.fun + if not minres.success: + self.res.minimization_failures += 1 + if self.disp: + print("warning: basinhopping: local minimization failure") + + if hasattr(minres, "nfev"): + self.res.nfev += minres.nfev + if hasattr(minres, "njev"): + self.res.njev += minres.njev + if hasattr(minres, "nhev"): + self.res.nhev += minres.nhev + + # accept the move based on self.accept_tests. If any test is False, + # then reject the step. If any test returns the special string + # 'force accept', then accept the step regardless. This can be used + # to forcefully escape from a local minimum if normal basin hopping + # steps are not sufficient. + accept = True + for test in self.accept_tests: + testres = test(f_new=energy_after_quench, x_new=x_after_quench, + f_old=self.energy, x_old=self.x) + if testres == 'force accept': + accept = True + break + elif testres is None: + raise ValueError("accept_tests must return True, False, or " + "'force accept'") + elif not testres: + accept = False + + # Report the result of the acceptance test to the take step class. + # This is for adaptive step taking + if hasattr(self.step_taking, "report"): + self.step_taking.report(accept, f_new=energy_after_quench, + x_new=x_after_quench, f_old=self.energy, + x_old=self.x) + + return accept, minres + + def one_cycle(self): + """Do one cycle of the basinhopping algorithm + """ + self.nstep += 1 + new_global_min = False + + accept, minres = self._monte_carlo_step() + + if accept: + self.energy = minres.fun + self.x = np.copy(minres.x) + new_global_min = self.storage.update(minres) + + # print some information + if self.disp: + self.print_report(minres.fun, accept) + if new_global_min: + print("found new global minimum on step %d with function" + " value %g" % (self.nstep, self.energy)) + + # save some variables as BasinHoppingRunner attributes + self.xtrial = minres.x + self.energy_trial = minres.fun + self.accept = accept + + return new_global_min + + def print_report(self, energy_trial, accept): + """print a status update""" + minres = self.storage.get_lowest() + print("basinhopping step %d: f %g trial_f %g accepted %d " + " lowest_f %g" % (self.nstep, self.energy, energy_trial, + accept, minres.fun)) + + +class AdaptiveStepsize(object): + """ + Class to implement adaptive stepsize. + + This class wraps the step taking class and modifies the stepsize to + ensure the true acceptance rate is as close as possible to the target. + + Parameters + ---------- + takestep : callable + The step taking routine. Must contain modifiable attribute + takestep.stepsize + accept_rate : float, optional + The target step acceptance rate + interval : int, optional + Interval for how often to update the stepsize + factor : float, optional + The step size is multiplied or divided by this factor upon each + update. + verbose : bool, optional + Print information about each update + + """ + def __init__(self, takestep, accept_rate=0.5, interval=50, factor=0.9, + verbose=True): + self.takestep = takestep + self.target_accept_rate = accept_rate + self.interval = interval + self.factor = factor + self.verbose = verbose + + self.nstep = 0 + self.nstep_tot = 0 + self.naccept = 0 + + def __call__(self, x): + return self.take_step(x) + + def _adjust_step_size(self): + old_stepsize = self.takestep.stepsize + accept_rate = float(self.naccept) / self.nstep + if accept_rate > self.target_accept_rate: + # We're accepting too many steps. This generally means we're + # trapped in a basin. Take bigger steps + self.takestep.stepsize /= self.factor + else: + # We're not accepting enough steps. Take smaller steps + self.takestep.stepsize *= self.factor + if self.verbose: + print("adaptive stepsize: acceptance rate %f target %f new " + "stepsize %g old stepsize %g" % (accept_rate, + self.target_accept_rate, self.takestep.stepsize, + old_stepsize)) + + def take_step(self, x): + self.nstep += 1 + self.nstep_tot += 1 + if self.nstep % self.interval == 0: + self._adjust_step_size() + return self.takestep(x) + + def report(self, accept, **kwargs): + "called by basinhopping to report the result of the step" + if accept: + self.naccept += 1 + + +class RandomDisplacement(object): + """ + Add a random displacement of maximum size `stepsize` to each coordinate + + Calling this updates `x` in-place. + + Parameters + ---------- + stepsize : float, optional + Maximum stepsize in any dimension + random_state : None or `np.random.RandomState` instance, optional + The random number generator that generates the displacements + """ + def __init__(self, stepsize=0.5, random_state=None): + self.stepsize = stepsize + self.random_state = check_random_state(random_state) + + def __call__(self, x): + x += self.random_state.uniform(-self.stepsize, self.stepsize, + np.shape(x)) + return x + + +class MinimizerWrapper(object): + """ + wrap a minimizer function as a minimizer class + """ + def __init__(self, minimizer, func=None, **kwargs): + self.minimizer = minimizer + self.func = func + self.kwargs = kwargs + + def __call__(self, x0): + if self.func is None: + return self.minimizer(x0, **self.kwargs) + else: + return self.minimizer(self.func, x0, **self.kwargs) + + +class Metropolis(object): + """ + Metropolis acceptance criterion + + Parameters + ---------- + T : float + The "temperature" parameter for the accept or reject criterion. + random_state : None or `np.random.RandomState` object + Random number generator used for acceptance test + """ + def __init__(self, T, random_state=None): + # Avoid ZeroDivisionError since "MBH can be regarded as a special case + # of the BH framework with the Metropolis criterion, where temperature + # T = 0." (Reject all steps that increase energy.) + self.beta = 1.0 / T if T != 0 else float('inf') + self.random_state = check_random_state(random_state) + + def accept_reject(self, energy_new, energy_old): + """ + If new energy is lower than old, it will always be accepted. + If new is higher than old, there is a chance it will be accepted, + less likely for larger differences. + """ + w = math.exp(min(0, -float(energy_new - energy_old) * self.beta)) + rand = self.random_state.rand() + return w >= rand + + def __call__(self, **kwargs): + """ + f_new and f_old are mandatory in kwargs + """ + return bool(self.accept_reject(kwargs["f_new"], + kwargs["f_old"])) + + +def basinhopping(func, x0, niter=100, T=1.0, stepsize=0.5, + minimizer_kwargs=None, take_step=None, accept_test=None, + callback=None, interval=50, disp=False, niter_success=None, + seed=None): + """ + Find the global minimum of a function using the basin-hopping algorithm + + Basin-hopping is a two-phase method that combines a global stepping + algorithm with local minimization at each step. Designed to mimic + the natural process of energy minimization of clusters of atoms, it works + well for similar problems with "funnel-like, but rugged" energy landscapes + [5]_. + + As the step-taking, step acceptance, and minimization methods are all + customizable, this function can also be used to implement other two-phase + methods. + + Parameters + ---------- + func : callable ``f(x, *args)`` + Function to be optimized. ``args`` can be passed as an optional item + in the dict ``minimizer_kwargs`` + x0 : array_like + Initial guess. + niter : integer, optional + The number of basin-hopping iterations + T : float, optional + The "temperature" parameter for the accept or reject criterion. Higher + "temperatures" mean that larger jumps in function value will be + accepted. For best results ``T`` should be comparable to the + separation (in function value) between local minima. + stepsize : float, optional + Maximum step size for use in the random displacement. + minimizer_kwargs : dict, optional + Extra keyword arguments to be passed to the local minimizer + ``scipy.optimize.minimize()`` Some important options could be: + + method : str + The minimization method (e.g. ``"L-BFGS-B"``) + args : tuple + Extra arguments passed to the objective function (``func``) and + its derivatives (Jacobian, Hessian). + + take_step : callable ``take_step(x)``, optional + Replace the default step-taking routine with this routine. The default + step-taking routine is a random displacement of the coordinates, but + other step-taking algorithms may be better for some systems. + ``take_step`` can optionally have the attribute ``take_step.stepsize``. + If this attribute exists, then ``basinhopping`` will adjust + ``take_step.stepsize`` in order to try to optimize the global minimum + search. + accept_test : callable, ``accept_test(f_new=f_new, x_new=x_new, f_old=fold, x_old=x_old)``, optional + Define a test which will be used to judge whether or not to accept the + step. This will be used in addition to the Metropolis test based on + "temperature" ``T``. The acceptable return values are True, + False, or ``"force accept"``. If any of the tests return False + then the step is rejected. If the latter, then this will override any + other tests in order to accept the step. This can be used, for example, + to forcefully escape from a local minimum that ``basinhopping`` is + trapped in. + callback : callable, ``callback(x, f, accept)``, optional + A callback function which will be called for all minima found. ``x`` + and ``f`` are the coordinates and function value of the trial minimum, + and ``accept`` is whether or not that minimum was accepted. This can + be used, for example, to save the lowest N minima found. Also, + ``callback`` can be used to specify a user defined stop criterion by + optionally returning True to stop the ``basinhopping`` routine. + interval : integer, optional + interval for how often to update the ``stepsize`` + disp : bool, optional + Set to True to print status messages + niter_success : integer, optional + Stop the run if the global minimum candidate remains the same for this + number of iterations. + seed : int or `np.random.RandomState`, optional + If `seed` is not specified the `np.RandomState` singleton is used. + If `seed` is an int, a new `np.random.RandomState` instance is used, + seeded with seed. + If `seed` is already a `np.random.RandomState instance`, then that + `np.random.RandomState` instance is used. + Specify `seed` for repeatable minimizations. The random numbers + generated with this seed only affect the default Metropolis + `accept_test` and the default `take_step`. If you supply your own + `take_step` and `accept_test`, and these functions use random + number generation, then those functions are responsible for the state + of their random number generator. + + Returns + ------- + res : OptimizeResult + The optimization result represented as a ``OptimizeResult`` object. + Important attributes are: ``x`` the solution array, ``fun`` the value + of the function at the solution, and ``message`` which describes the + cause of the termination. The ``OptimizeResult`` object returned by the + selected minimizer at the lowest minimum is also contained within this + object and can be accessed through the ``lowest_optimization_result`` + attribute. See `OptimizeResult` for a description of other attributes. + + See Also + -------- + minimize : + The local minimization function called once for each basinhopping step. + ``minimizer_kwargs`` is passed to this routine. + + Notes + ----- + Basin-hopping is a stochastic algorithm which attempts to find the global + minimum of a smooth scalar function of one or more variables [1]_ [2]_ [3]_ + [4]_. The algorithm in its current form was described by David Wales and + Jonathan Doye [2]_ http://www-wales.ch.cam.ac.uk/. + + The algorithm is iterative with each cycle composed of the following + features + + 1) random perturbation of the coordinates + + 2) local minimization + + 3) accept or reject the new coordinates based on the minimized function + value + + The acceptance test used here is the Metropolis criterion of standard Monte + Carlo algorithms, although there are many other possibilities [3]_. + + This global minimization method has been shown to be extremely efficient + for a wide variety of problems in physics and chemistry. It is + particularly useful when the function has many minima separated by large + barriers. See the Cambridge Cluster Database + http://www-wales.ch.cam.ac.uk/CCD.html for databases of molecular systems + that have been optimized primarily using basin-hopping. This database + includes minimization problems exceeding 300 degrees of freedom. + + See the free software program GMIN (http://www-wales.ch.cam.ac.uk/GMIN) for + a Fortran implementation of basin-hopping. This implementation has many + different variations of the procedure described above, including more + advanced step taking algorithms and alternate acceptance criterion. + + For stochastic global optimization there is no way to determine if the true + global minimum has actually been found. Instead, as a consistency check, + the algorithm can be run from a number of different random starting points + to ensure the lowest minimum found in each example has converged to the + global minimum. For this reason ``basinhopping`` will by default simply + run for the number of iterations ``niter`` and return the lowest minimum + found. It is left to the user to ensure that this is in fact the global + minimum. + + Choosing ``stepsize``: This is a crucial parameter in ``basinhopping`` and + depends on the problem being solved. The step is chosen uniformly in the + region from x0-stepsize to x0+stepsize, in each dimension. Ideally it + should be comparable to the typical separation (in argument values) between + local minima of the function being optimized. ``basinhopping`` will, by + default, adjust ``stepsize`` to find an optimal value, but this may take + many iterations. You will get quicker results if you set a sensible + initial value for ``stepsize``. + + Choosing ``T``: The parameter ``T`` is the "temperature" used in the + Metropolis criterion. Basinhopping steps are always accepted if + ``func(xnew) < func(xold)``. Otherwise, they are accepted with + probability:: + + exp( -(func(xnew) - func(xold)) / T ) + + So, for best results, ``T`` should to be comparable to the typical + difference (in function values) between local minima. (The height of + "walls" between local minima is irrelevant.) + + If ``T`` is 0, the algorithm becomes Monotonic Basin-Hopping, in which all + steps that increase energy are rejected. + + .. versionadded:: 0.12.0 + + References + ---------- + .. [1] Wales, David J. 2003, Energy Landscapes, Cambridge University Press, + Cambridge, UK. + .. [2] Wales, D J, and Doye J P K, Global Optimization by Basin-Hopping and + the Lowest Energy Structures of Lennard-Jones Clusters Containing up to + 110 Atoms. Journal of Physical Chemistry A, 1997, 101, 5111. + .. [3] Li, Z. and Scheraga, H. A., Monte Carlo-minimization approach to the + multiple-minima problem in protein folding, Proc. Natl. Acad. Sci. USA, + 1987, 84, 6611. + .. [4] Wales, D. J. and Scheraga, H. A., Global optimization of clusters, + crystals, and biomolecules, Science, 1999, 285, 1368. + .. [5] Olson, B., Hashmi, I., Molloy, K., and Shehu1, A., Basin Hopping as + a General and Versatile Optimization Framework for the Characterization + of Biological Macromolecules, Advances in Artificial Intelligence, + Volume 2012 (2012), Article ID 674832, :doi:`10.1155/2012/674832` + + Examples + -------- + The following example is a one-dimensional minimization problem, with many + local minima superimposed on a parabola. + + >>> from scipy.optimize import basinhopping + >>> func = lambda x: np.cos(14.5 * x - 0.3) + (x + 0.2) * x + >>> x0=[1.] + + Basinhopping, internally, uses a local minimization algorithm. We will use + the parameter ``minimizer_kwargs`` to tell basinhopping which algorithm to + use and how to set up that minimizer. This parameter will be passed to + ``scipy.optimize.minimize()``. + + >>> minimizer_kwargs = {"method": "BFGS"} + >>> ret = basinhopping(func, x0, minimizer_kwargs=minimizer_kwargs, + ... niter=200) + >>> print("global minimum: x = %.4f, f(x0) = %.4f" % (ret.x, ret.fun)) + global minimum: x = -0.1951, f(x0) = -1.0009 + + Next consider a two-dimensional minimization problem. Also, this time we + will use gradient information to significantly speed up the search. + + >>> def func2d(x): + ... f = np.cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] + + ... 0.2) * x[0] + ... df = np.zeros(2) + ... df[0] = -14.5 * np.sin(14.5 * x[0] - 0.3) + 2. * x[0] + 0.2 + ... df[1] = 2. * x[1] + 0.2 + ... return f, df + + We'll also use a different local minimization algorithm. Also we must tell + the minimizer that our function returns both energy and gradient (jacobian) + + >>> minimizer_kwargs = {"method":"L-BFGS-B", "jac":True} + >>> x0 = [1.0, 1.0] + >>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs, + ... niter=200) + >>> print("global minimum: x = [%.4f, %.4f], f(x0) = %.4f" % (ret.x[0], + ... ret.x[1], + ... ret.fun)) + global minimum: x = [-0.1951, -0.1000], f(x0) = -1.0109 + + + Here is an example using a custom step-taking routine. Imagine you want + the first coordinate to take larger steps than the rest of the coordinates. + This can be implemented like so: + + >>> class MyTakeStep(object): + ... def __init__(self, stepsize=0.5): + ... self.stepsize = stepsize + ... def __call__(self, x): + ... s = self.stepsize + ... x[0] += np.random.uniform(-2.*s, 2.*s) + ... x[1:] += np.random.uniform(-s, s, x[1:].shape) + ... return x + + Since ``MyTakeStep.stepsize`` exists basinhopping will adjust the magnitude + of ``stepsize`` to optimize the search. We'll use the same 2-D function as + before + + >>> mytakestep = MyTakeStep() + >>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs, + ... niter=200, take_step=mytakestep) + >>> print("global minimum: x = [%.4f, %.4f], f(x0) = %.4f" % (ret.x[0], + ... ret.x[1], + ... ret.fun)) + global minimum: x = [-0.1951, -0.1000], f(x0) = -1.0109 + + + Now let's do an example using a custom callback function which prints the + value of every minimum found + + >>> def print_fun(x, f, accepted): + ... print("at minimum %.4f accepted %d" % (f, int(accepted))) + + We'll run it for only 10 basinhopping steps this time. + + >>> np.random.seed(1) + >>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs, + ... niter=10, callback=print_fun) + at minimum 0.4159 accepted 1 + at minimum -0.9073 accepted 1 + at minimum -0.1021 accepted 1 + at minimum -0.1021 accepted 1 + at minimum 0.9102 accepted 1 + at minimum 0.9102 accepted 1 + at minimum 2.2945 accepted 0 + at minimum -0.1021 accepted 1 + at minimum -1.0109 accepted 1 + at minimum -1.0109 accepted 1 + + + The minimum at -1.0109 is actually the global minimum, found already on the + 8th iteration. + + Now let's implement bounds on the problem using a custom ``accept_test``: + + >>> class MyBounds(object): + ... def __init__(self, xmax=[1.1,1.1], xmin=[-1.1,-1.1] ): + ... self.xmax = np.array(xmax) + ... self.xmin = np.array(xmin) + ... def __call__(self, **kwargs): + ... x = kwargs["x_new"] + ... tmax = bool(np.all(x <= self.xmax)) + ... tmin = bool(np.all(x >= self.xmin)) + ... return tmax and tmin + + >>> mybounds = MyBounds() + >>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs, + ... niter=10, accept_test=mybounds) + + """ + x0 = np.array(x0) + + # set up the np.random.RandomState generator + rng = check_random_state(seed) + + # set up minimizer + if minimizer_kwargs is None: + minimizer_kwargs = dict() + wrapped_minimizer = MinimizerWrapper(scipy.optimize.minimize, func, + **minimizer_kwargs) + + # set up step-taking algorithm + if take_step is not None: + if not callable(take_step): + raise TypeError("take_step must be callable") + # if take_step.stepsize exists then use AdaptiveStepsize to control + # take_step.stepsize + if hasattr(take_step, "stepsize"): + take_step_wrapped = AdaptiveStepsize(take_step, interval=interval, + verbose=disp) + else: + take_step_wrapped = take_step + else: + # use default + displace = RandomDisplacement(stepsize=stepsize, random_state=rng) + take_step_wrapped = AdaptiveStepsize(displace, interval=interval, + verbose=disp) + + # set up accept tests + accept_tests = [] + if accept_test is not None: + if not callable(accept_test): + raise TypeError("accept_test must be callable") + accept_tests = [accept_test] + + # use default + metropolis = Metropolis(T, random_state=rng) + accept_tests.append(metropolis) + + if niter_success is None: + niter_success = niter + 2 + + bh = BasinHoppingRunner(x0, wrapped_minimizer, take_step_wrapped, + accept_tests, disp=disp) + + # start main iteration loop + count, i = 0, 0 + message = ["requested number of basinhopping iterations completed" + " successfully"] + for i in range(niter): + new_global_min = bh.one_cycle() + + if callable(callback): + # should we pass a copy of x? + val = callback(bh.xtrial, bh.energy_trial, bh.accept) + if val is not None: + if val: + message = ["callback function requested stop early by" + "returning True"] + break + + count += 1 + if new_global_min: + count = 0 + elif count > niter_success: + message = ["success condition satisfied"] + break + + # prepare return object + res = bh.res + res.lowest_optimization_result = bh.storage.get_lowest() + res.x = np.copy(res.lowest_optimization_result.x) + res.fun = res.lowest_optimization_result.fun + res.message = message + res.nit = i + 1 + return res + + +def _test_func2d_nograd(x): + f = (cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] + 0.2) * x[0] + + 1.010876184442655) + return f + + +def _test_func2d(x): + f = (cos(14.5 * x[0] - 0.3) + (x[0] + 0.2) * x[0] + cos(14.5 * x[1] - + 0.3) + (x[1] + 0.2) * x[1] + x[0] * x[1] + 1.963879482144252) + df = np.zeros(2) + df[0] = -14.5 * sin(14.5 * x[0] - 0.3) + 2. * x[0] + 0.2 + x[1] + df[1] = -14.5 * sin(14.5 * x[1] - 0.3) + 2. * x[1] + 0.2 + x[0] + return f, df + + +if __name__ == "__main__": + print("\n\nminimize a 2d function without gradient") + # minimum expected at ~[-0.195, -0.1] + kwargs = {"method": "L-BFGS-B"} + x0 = np.array([1.0, 1.]) + scipy.optimize.minimize(_test_func2d_nograd, x0, **kwargs) + ret = basinhopping(_test_func2d_nograd, x0, minimizer_kwargs=kwargs, + niter=200, disp=False) + print("minimum expected at func([-0.195, -0.1]) = 0.0") + print(ret) + + print("\n\ntry a harder 2d problem") + kwargs = {"method": "L-BFGS-B", "jac": True} + x0 = np.array([1.0, 1.0]) + ret = basinhopping(_test_func2d, x0, minimizer_kwargs=kwargs, niter=200, + disp=False) + print("minimum expected at ~, func([-0.19415263, -0.19415263]) = 0") + print(ret) diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_basinhopping.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_basinhopping.pyc new file mode 100644 index 0000000..88bdbea Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_basinhopping.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_cobyla.so b/project/venv/lib/python2.7/site-packages/scipy/optimize/_cobyla.so new file mode 100755 index 0000000..8f1d89d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_cobyla.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_constraints.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_constraints.py new file mode 100644 index 0000000..7dd2947 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_constraints.py @@ -0,0 +1,420 @@ +"""Constraints definition for minimize.""" +from __future__ import division, print_function, absolute_import +import numpy as np +from ._hessian_update_strategy import BFGS +from ._differentiable_functions import ( + VectorFunction, LinearVectorFunction, IdentityVectorFunction) +from .optimize import OptimizeWarning +from warnings import warn +from scipy.sparse import issparse + +class NonlinearConstraint(object): + """Nonlinear constraint on the variables. + + The constraint has the general inequality form:: + + lb <= fun(x) <= ub + + Here the vector of independent variables x is passed as ndarray of shape + (n,) and ``fun`` returns a vector with m components. + + It is possible to use equal bounds to represent an equality constraint or + infinite bounds to represent a one-sided constraint. + + Parameters + ---------- + fun : callable + The function defining the constraint. + The signature is ``fun(x) -> array_like, shape (m,)``. + lb, ub : array_like + Lower and upper bounds on the constraint. Each array must have the + shape (m,) or be a scalar, in the latter case a bound will be the same + for all components of the constraint. Use ``np.inf`` with an + appropriate sign to specify a one-sided constraint. + Set components of `lb` and `ub` equal to represent an equality + constraint. Note that you can mix constraints of different types: + interval, one-sided or equality, by setting different components of + `lb` and `ub` as necessary. + jac : {callable, '2-point', '3-point', 'cs'}, optional + Method of computing the Jacobian matrix (an m-by-n matrix, + where element (i, j) is the partial derivative of f[i] with + respect to x[j]). The keywords {'2-point', '3-point', + 'cs'} select a finite difference scheme for the numerical estimation. + A callable must have the following signature: + ``jac(x) -> {ndarray, sparse matrix}, shape (m, n)``. + Default is '2-point'. + hess : {callable, '2-point', '3-point', 'cs', HessianUpdateStrategy, None}, optional + Method for computing the Hessian matrix. The keywords + {'2-point', '3-point', 'cs'} select a finite difference scheme for + numerical estimation. Alternatively, objects implementing + `HessianUpdateStrategy` interface can be used to approximate the + Hessian. Currently available implementations are: + + - `BFGS` (default option) + - `SR1` + + A callable must return the Hessian matrix of ``dot(fun, v)`` and + must have the following signature: + ``hess(x, v) -> {LinearOperator, sparse matrix, array_like}, shape (n, n)``. + Here ``v`` is ndarray with shape (m,) containing Lagrange multipliers. + keep_feasible : array_like of bool, optional + Whether to keep the constraint components feasible throughout + iterations. A single value set this property for all components. + Default is False. Has no effect for equality constraints. + finite_diff_rel_step: None or array_like, optional + Relative step size for the finite difference approximation. Default is + None, which will select a reasonable value automatically depending + on a finite difference scheme. + finite_diff_jac_sparsity: {None, array_like, sparse matrix}, optional + Defines the sparsity structure of the Jacobian matrix for finite + difference estimation, its shape must be (m, n). If the Jacobian has + only few non-zero elements in *each* row, providing the sparsity + structure will greatly speed up the computations. A zero entry means + that a corresponding element in the Jacobian is identically zero. + If provided, forces the use of 'lsmr' trust-region solver. + If None (default) then dense differencing will be used. + + Notes + ----- + Finite difference schemes {'2-point', '3-point', 'cs'} may be used for + approximating either the Jacobian or the Hessian. We, however, do not allow + its use for approximating both simultaneously. Hence whenever the Jacobian + is estimated via finite-differences, we require the Hessian to be estimated + using one of the quasi-Newton strategies. + + The scheme 'cs' is potentially the most accurate, but requires the function + to correctly handles complex inputs and be analytically continuable to the + complex plane. The scheme '3-point' is more accurate than '2-point' but + requires twice as many operations. + """ + def __init__(self, fun, lb, ub, jac='2-point', hess=BFGS(), + keep_feasible=False, finite_diff_rel_step=None, + finite_diff_jac_sparsity=None): + self.fun = fun + self.lb = lb + self.ub = ub + self.finite_diff_rel_step = finite_diff_rel_step + self.finite_diff_jac_sparsity = finite_diff_jac_sparsity + self.jac = jac + self.hess = hess + self.keep_feasible = keep_feasible + + +class LinearConstraint(object): + """Linear constraint on the variables. + + The constraint has the general inequality form:: + + lb <= A.dot(x) <= ub + + Here the vector of independent variables x is passed as ndarray of shape + (n,) and the matrix A has shape (m, n). + + It is possible to use equal bounds to represent an equality constraint or + infinite bounds to represent a one-sided constraint. + + Parameters + ---------- + A : {array_like, sparse matrix}, shape (m, n) + Matrix defining the constraint. + lb, ub : array_like + Lower and upper bounds on the constraint. Each array must have the + shape (m,) or be a scalar, in the latter case a bound will be the same + for all components of the constraint. Use ``np.inf`` with an + appropriate sign to specify a one-sided constraint. + Set components of `lb` and `ub` equal to represent an equality + constraint. Note that you can mix constraints of different types: + interval, one-sided or equality, by setting different components of + `lb` and `ub` as necessary. + keep_feasible : array_like of bool, optional + Whether to keep the constraint components feasible throughout + iterations. A single value set this property for all components. + Default is False. Has no effect for equality constraints. + """ + def __init__(self, A, lb, ub, keep_feasible=False): + self.A = A + self.lb = lb + self.ub = ub + self.keep_feasible = keep_feasible + + +class Bounds(object): + """Bounds constraint on the variables. + + The constraint has the general inequality form:: + + lb <= x <= ub + + It is possible to use equal bounds to represent an equality constraint or + infinite bounds to represent a one-sided constraint. + + Parameters + ---------- + lb, ub : array_like, optional + Lower and upper bounds on independent variables. Each array must + have the same size as x or be a scalar, in which case a bound will be + the same for all the variables. Set components of `lb` and `ub` equal + to fix a variable. Use ``np.inf`` with an appropriate sign to disable + bounds on all or some variables. Note that you can mix constraints of + different types: interval, one-sided or equality, by setting different + components of `lb` and `ub` as necessary. + keep_feasible : array_like of bool, optional + Whether to keep the constraint components feasible throughout + iterations. A single value set this property for all components. + Default is False. Has no effect for equality constraints. + """ + def __init__(self, lb, ub, keep_feasible=False): + self.lb = lb + self.ub = ub + self.keep_feasible = keep_feasible + + +class PreparedConstraint(object): + """Constraint prepared from a user defined constraint. + + On creation it will check whether a constraint definition is valid and + the initial point is feasible. If created successfully, it will contain + the attributes listed below. + + Parameters + ---------- + constraint : {NonlinearConstraint, LinearConstraint`, Bounds} + Constraint to check and prepare. + x0 : array_like + Initial vector of independent variables. + sparse_jacobian : bool or None, optional + If bool, then the Jacobian of the constraint will be converted + to the corresponded format if necessary. If None (default), such + conversion is not made. + finite_diff_bounds : 2-tuple, optional + Lower and upper bounds on the independent variables for the finite + difference approximation, if applicable. Defaults to no bounds. + + Attributes + ---------- + fun : {VectorFunction, LinearVectorFunction, IdentityVectorFunction} + Function defining the constraint wrapped by one of the convenience + classes. + bounds : 2-tuple + Contains lower and upper bounds for the constraints --- lb and ub. + These are converted to ndarray and have a size equal to the number of + the constraints. + keep_feasible : ndarray + Array indicating which components must be kept feasible with a size + equal to the number of the constraints. + """ + def __init__(self, constraint, x0, sparse_jacobian=None, + finite_diff_bounds=(-np.inf, np.inf)): + if isinstance(constraint, NonlinearConstraint): + fun = VectorFunction(constraint.fun, x0, + constraint.jac, constraint.hess, + constraint.finite_diff_rel_step, + constraint.finite_diff_jac_sparsity, + finite_diff_bounds, sparse_jacobian) + elif isinstance(constraint, LinearConstraint): + fun = LinearVectorFunction(constraint.A, x0, sparse_jacobian) + elif isinstance(constraint, Bounds): + fun = IdentityVectorFunction(x0, sparse_jacobian) + else: + raise ValueError("`constraint` of an unknown type is passed.") + + m = fun.m + lb = np.asarray(constraint.lb, dtype=float) + ub = np.asarray(constraint.ub, dtype=float) + if lb.ndim == 0: + lb = np.resize(lb, m) + if ub.ndim == 0: + ub = np.resize(ub, m) + + keep_feasible = np.asarray(constraint.keep_feasible, dtype=bool) + if keep_feasible.ndim == 0: + keep_feasible = np.resize(keep_feasible, m) + if keep_feasible.shape != (m,): + raise ValueError("`keep_feasible` has a wrong shape.") + + mask = keep_feasible & (lb != ub) + f0 = fun.f + if np.any(f0[mask] < lb[mask]) or np.any(f0[mask] > ub[mask]): + raise ValueError("`x0` is infeasible with respect to some " + "inequality constraint with `keep_feasible` " + "set to True.") + + self.fun = fun + self.bounds = (lb, ub) + self.keep_feasible = keep_feasible + + +def new_bounds_to_old(lb, ub, n): + """Convert the new bounds representation to the old one. + + The new representation is a tuple (lb, ub) and the old one is a list + containing n tuples, i-th containing lower and upper bound on a i-th + variable. + """ + lb = np.asarray(lb) + ub = np.asarray(ub) + if lb.ndim == 0: + lb = np.resize(lb, n) + if ub.ndim == 0: + ub = np.resize(ub, n) + + lb = [x if x > -np.inf else None for x in lb] + ub = [x if x < np.inf else None for x in ub] + + return list(zip(lb, ub)) + + +def old_bound_to_new(bounds): + """Convert the old bounds representation to the new one. + + The new representation is a tuple (lb, ub) and the old one is a list + containing n tuples, i-th containing lower and upper bound on a i-th + variable. + """ + lb, ub = zip(*bounds) + lb = np.array([x if x is not None else -np.inf for x in lb]) + ub = np.array([x if x is not None else np.inf for x in ub]) + return lb, ub + + +def strict_bounds(lb, ub, keep_feasible, n_vars): + """Remove bounds which are not asked to be kept feasible.""" + strict_lb = np.resize(lb, n_vars).astype(float) + strict_ub = np.resize(ub, n_vars).astype(float) + keep_feasible = np.resize(keep_feasible, n_vars) + strict_lb[~keep_feasible] = -np.inf + strict_ub[~keep_feasible] = np.inf + return strict_lb, strict_ub + + +def new_constraint_to_old(con, x0): + """ + Converts new-style constraint objects to old-style constraint dictionaries. + """ + if isinstance(con, NonlinearConstraint): + if (con.finite_diff_jac_sparsity is not None or + con.finite_diff_rel_step is not None or + not isinstance(con.hess, BFGS) or # misses user specified BFGS + con.keep_feasible): + warn("Constraint options `finite_diff_jac_sparsity`, " + "`finite_diff_rel_step`, `keep_feasible`, and `hess`" + "are ignored by this method.", OptimizeWarning) + + fun = con.fun + if callable(con.jac): + jac = con.jac + else: + jac = None + + else: # LinearConstraint + if con.keep_feasible: + warn("Constraint option `keep_feasible` is ignored by this " + "method.", OptimizeWarning) + + A = con.A + if issparse(A): + A = A.todense() + fun = lambda x: np.dot(A, x) + jac = lambda x: A + + # FIXME: when bugs in VectorFunction/LinearVectorFunction are worked out, + # use pcon.fun.fun and pcon.fun.jac. Until then, get fun/jac above. + pcon = PreparedConstraint(con, x0) + lb, ub = pcon.bounds + + i_eq = lb == ub + i_bound_below = np.logical_xor(lb != -np.inf, i_eq) + i_bound_above = np.logical_xor(ub != np.inf, i_eq) + i_unbounded = np.logical_and(lb == -np.inf, ub == np.inf) + + if np.any(i_unbounded): + warn("At least one constraint is unbounded above and below. Such " + "constraints are ignored.", OptimizeWarning) + + ceq = [] + if np.any(i_eq): + def f_eq(x): + y = np.array(fun(x)).flatten() + return y[i_eq] - lb[i_eq] + ceq = [{"type": "eq", "fun": f_eq}] + + if jac is not None: + def j_eq(x): + dy = jac(x) + if issparse(dy): + dy = dy.todense() + dy = np.atleast_2d(dy) + return dy[i_eq, :] + ceq[0]["jac"] = j_eq + + cineq = [] + n_bound_below = np.sum(i_bound_below) + n_bound_above = np.sum(i_bound_above) + if n_bound_below + n_bound_above: + def f_ineq(x): + y = np.zeros(n_bound_below + n_bound_above) + y_all = np.array(fun(x)).flatten() + y[:n_bound_below] = y_all[i_bound_below] - lb[i_bound_below] + y[n_bound_below:] = -(y_all[i_bound_above] - ub[i_bound_above]) + return y + cineq = [{"type": "ineq", "fun": f_ineq}] + + if jac is not None: + def j_ineq(x): + dy = np.zeros((n_bound_below + n_bound_above, len(x0))) + dy_all = jac(x) + if issparse(dy_all): + dy_all = dy_all.todense() + dy_all = np.atleast_2d(dy_all) + dy[:n_bound_below, :] = dy_all[i_bound_below] + dy[n_bound_below:, :] = -dy_all[i_bound_above] + return dy + cineq[0]["jac"] = j_ineq + + old_constraints = ceq + cineq + + if len(old_constraints) > 1: + warn("Equality and inequality constraints are specified in the same " + "element of the constraint list. For efficient use with this " + "method, equality and inequality constraints should be specified " + "in separate elements of the constraint list. ", OptimizeWarning) + return old_constraints + + +def old_constraint_to_new(ic, con): + """ + Converts old-style constraint dictionaries to new-style constraint objects. + """ + # check type + try: + ctype = con['type'].lower() + except KeyError: + raise KeyError('Constraint %d has no type defined.' % ic) + except TypeError: + raise TypeError('Constraints must be a sequence of dictionaries.') + except AttributeError: + raise TypeError("Constraint's type must be a string.") + else: + if ctype not in ['eq', 'ineq']: + raise ValueError("Unknown constraint type '%s'." % con['type']) + if 'fun' not in con: + raise ValueError('Constraint %d has no function defined.' % ic) + + lb = 0 + if ctype == 'eq': + ub = 0 + else: + ub = np.inf + + jac = '2-point' + if 'args' in con: + args = con['args'] + fun = lambda x: con['fun'](x, *args) + if 'jac' in con: + jac = lambda x: con['jac'](x, *args) + else: + fun = con['fun'] + if 'jac' in con: + jac = con['jac'] + + return NonlinearConstraint(fun, lb, ub, jac) diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_constraints.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_constraints.pyc new file mode 100644 index 0000000..2bcc55a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_constraints.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_differentiable_functions.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_differentiable_functions.py new file mode 100644 index 0000000..d4568b4 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_differentiable_functions.py @@ -0,0 +1,521 @@ +from __future__ import division, print_function, absolute_import +import numpy as np +import scipy.sparse as sps +from ._numdiff import approx_derivative, group_columns +from ._hessian_update_strategy import HessianUpdateStrategy +from scipy.sparse.linalg import LinearOperator + + +FD_METHODS = ('2-point', '3-point', 'cs') + + +class ScalarFunction(object): + """Scalar function and its derivatives. + + This class defines a scalar function F: R^n->R and methods for + computing or approximating its first and second derivatives. + + Notes + ----- + This class implements a memoization logic. There are methods `fun`, + `grad`, hess` and corresponding attributes `f`, `g` and `H`. The following + things should be considered: + + 1. Use only public methods `fun`, `grad` and `hess`. + 2. After one of the methods is called, the corresponding attribute + will be set. However, a subsequent call with a different argument + of *any* of the methods may overwrite the attribute. + """ + def __init__(self, fun, x0, args, grad, hess, finite_diff_rel_step, + finite_diff_bounds): + if not callable(grad) and grad not in FD_METHODS: + raise ValueError("`grad` must be either callable or one of {}." + .format(FD_METHODS)) + + if not (callable(hess) or hess in FD_METHODS + or isinstance(hess, HessianUpdateStrategy)): + raise ValueError("`hess` must be either callable," + "HessianUpdateStrategy or one of {}." + .format(FD_METHODS)) + + if grad in FD_METHODS and hess in FD_METHODS: + raise ValueError("Whenever the gradient is estimated via " + "finite-differences, we require the Hessian " + "to be estimated using one of the " + "quasi-Newton strategies.") + + self.x = np.atleast_1d(x0).astype(float) + self.n = self.x.size + self.nfev = 0 + self.ngev = 0 + self.nhev = 0 + self.f_updated = False + self.g_updated = False + self.H_updated = False + + finite_diff_options = {} + if grad in FD_METHODS: + finite_diff_options["method"] = grad + finite_diff_options["rel_step"] = finite_diff_rel_step + finite_diff_options["bounds"] = finite_diff_bounds + if hess in FD_METHODS: + finite_diff_options["method"] = hess + finite_diff_options["rel_step"] = finite_diff_rel_step + finite_diff_options["as_linear_operator"] = True + + # Function evaluation + def fun_wrapped(x): + self.nfev += 1 + return fun(x, *args) + + def update_fun(): + self.f = fun_wrapped(self.x) + + self._update_fun_impl = update_fun + self._update_fun() + + # Gradient evaluation + if callable(grad): + def grad_wrapped(x): + self.ngev += 1 + return np.atleast_1d(grad(x, *args)) + + def update_grad(): + self.g = grad_wrapped(self.x) + + elif grad in FD_METHODS: + def update_grad(): + self._update_fun() + self.g = approx_derivative(fun_wrapped, self.x, f0=self.f, + **finite_diff_options) + + self._update_grad_impl = update_grad + self._update_grad() + + # Hessian Evaluation + if callable(hess): + self.H = hess(x0, *args) + self.H_updated = True + self.nhev += 1 + + if sps.issparse(self.H): + def hess_wrapped(x): + self.nhev += 1 + return sps.csr_matrix(hess(x, *args)) + self.H = sps.csr_matrix(self.H) + + elif isinstance(self.H, LinearOperator): + def hess_wrapped(x): + self.nhev += 1 + return hess(x, *args) + + else: + def hess_wrapped(x): + self.nhev += 1 + return np.atleast_2d(np.asarray(hess(x, *args))) + self.H = np.atleast_2d(np.asarray(self.H)) + + def update_hess(): + self.H = hess_wrapped(self.x) + + elif hess in FD_METHODS: + def update_hess(): + self._update_grad() + self.H = approx_derivative(grad_wrapped, self.x, f0=self.g, + **finite_diff_options) + return self.H + + update_hess() + self.H_updated = True + elif isinstance(hess, HessianUpdateStrategy): + self.H = hess + self.H.initialize(self.n, 'hess') + self.H_updated = True + self.x_prev = None + self.g_prev = None + + def update_hess(): + self._update_grad() + self.H.update(self.x - self.x_prev, self.g - self.g_prev) + + self._update_hess_impl = update_hess + + if isinstance(hess, HessianUpdateStrategy): + def update_x(x): + self._update_grad() + self.x_prev = self.x + self.g_prev = self.g + + self.x = x + self.f_updated = False + self.g_updated = False + self.H_updated = False + self._update_hess() + else: + def update_x(x): + self.x = x + self.f_updated = False + self.g_updated = False + self.H_updated = False + self._update_x_impl = update_x + + def _update_fun(self): + if not self.f_updated: + self._update_fun_impl() + self.f_updated = True + + def _update_grad(self): + if not self.g_updated: + self._update_grad_impl() + self.g_updated = True + + def _update_hess(self): + if not self.H_updated: + self._update_hess_impl() + self.H_updated = True + + def fun(self, x): + if not np.array_equal(x, self.x): + self._update_x_impl(x) + self._update_fun() + return self.f + + def grad(self, x): + if not np.array_equal(x, self.x): + self._update_x_impl(x) + self._update_grad() + return self.g + + def hess(self, x): + if not np.array_equal(x, self.x): + self._update_x_impl(x) + self._update_hess() + return self.H + + +class VectorFunction(object): + """Vector function and its derivatives. + + This class defines a vector function F: R^n->R^m and methods for + computing or approximating its first and second derivatives. + + Notes + ----- + This class implements a memoization logic. There are methods `fun`, + `jac`, hess` and corresponding attributes `f`, `J` and `H`. The following + things should be considered: + + 1. Use only public methods `fun`, `jac` and `hess`. + 2. After one of the methods is called, the corresponding attribute + will be set. However, a subsequent call with a different argument + of *any* of the methods may overwrite the attribute. + """ + def __init__(self, fun, x0, jac, hess, + finite_diff_rel_step, finite_diff_jac_sparsity, + finite_diff_bounds, sparse_jacobian): + if not callable(jac) and jac not in FD_METHODS: + raise ValueError("`jac` must be either callable or one of {}." + .format(FD_METHODS)) + + if not (callable(hess) or hess in FD_METHODS + or isinstance(hess, HessianUpdateStrategy)): + raise ValueError("`hess` must be either callable," + "HessianUpdateStrategy or one of {}." + .format(FD_METHODS)) + + if jac in FD_METHODS and hess in FD_METHODS: + raise ValueError("Whenever the Jacobian is estimated via " + "finite-differences, we require the Hessian to " + "be estimated using one of the quasi-Newton " + "strategies.") + + self.x = np.atleast_1d(x0).astype(float) + self.n = self.x.size + self.nfev = 0 + self.njev = 0 + self.nhev = 0 + self.f_updated = False + self.J_updated = False + self.H_updated = False + + finite_diff_options = {} + if jac in FD_METHODS: + finite_diff_options["method"] = jac + finite_diff_options["rel_step"] = finite_diff_rel_step + if finite_diff_jac_sparsity is not None: + sparsity_groups = group_columns(finite_diff_jac_sparsity) + finite_diff_options["sparsity"] = (finite_diff_jac_sparsity, + sparsity_groups) + finite_diff_options["bounds"] = finite_diff_bounds + self.x_diff = np.copy(self.x) + if hess in FD_METHODS: + finite_diff_options["method"] = hess + finite_diff_options["rel_step"] = finite_diff_rel_step + finite_diff_options["as_linear_operator"] = True + self.x_diff = np.copy(self.x) + if jac in FD_METHODS and hess in FD_METHODS: + raise ValueError("Whenever the Jacobian is estimated via " + "finite-differences, we require the Hessian to " + "be estimated using one of the quasi-Newton " + "strategies.") + + # Function evaluation + def fun_wrapped(x): + self.nfev += 1 + return np.atleast_1d(fun(x)) + + def update_fun(): + self.f = fun_wrapped(self.x) + + self._update_fun_impl = update_fun + update_fun() + + self.v = np.zeros_like(self.f) + self.m = self.v.size + + # Jacobian Evaluation + if callable(jac): + self.J = jac(self.x) + self.J_updated = True + self.njev += 1 + + if (sparse_jacobian or + sparse_jacobian is None and sps.issparse(self.J)): + def jac_wrapped(x): + self.njev += 1 + return sps.csr_matrix(jac(x)) + self.J = sps.csr_matrix(self.J) + self.sparse_jacobian = True + + elif sps.issparse(self.J): + def jac_wrapped(x): + self.njev += 1 + return jac(x).toarray() + self.J = self.J.toarray() + self.sparse_jacobian = False + + else: + def jac_wrapped(x): + self.njev += 1 + return np.atleast_2d(jac(x)) + self.J = np.atleast_2d(self.J) + self.sparse_jacobian = False + + def update_jac(): + self.J = jac_wrapped(self.x) + + elif jac in FD_METHODS: + self.J = approx_derivative(fun_wrapped, self.x, f0=self.f, + **finite_diff_options) + self.J_updated = True + + if (sparse_jacobian or + sparse_jacobian is None and sps.issparse(self.J)): + def update_jac(): + self._update_fun() + self.J = sps.csr_matrix( + approx_derivative(fun_wrapped, self.x, f0=self.f, + **finite_diff_options)) + self.J = sps.csr_matrix(self.J) + self.sparse_jacobian = True + + elif sps.issparse(self.J): + def update_jac(): + self._update_fun() + self.J = approx_derivative(fun_wrapped, self.x, f0=self.f, + **finite_diff_options).toarray() + self.J = self.J.toarray() + self.sparse_jacobian = False + + else: + def update_jac(): + self._update_fun() + self.J = np.atleast_2d( + approx_derivative(fun_wrapped, self.x, f0=self.f, + **finite_diff_options)) + self.J = np.atleast_2d(self.J) + self.sparse_jacobian = False + + self._update_jac_impl = update_jac + + # Define Hessian + if callable(hess): + self.H = hess(self.x, self.v) + self.H_updated = True + self.nhev += 1 + + if sps.issparse(self.H): + def hess_wrapped(x, v): + self.nhev += 1 + return sps.csr_matrix(hess(x, v)) + self.H = sps.csr_matrix(self.H) + + elif isinstance(self.H, LinearOperator): + def hess_wrapped(x, v): + self.nhev += 1 + return hess(x, v) + + else: + def hess_wrapped(x, v): + self.nhev += 1 + return np.atleast_2d(np.asarray(hess(x, v))) + self.H = np.atleast_2d(np.asarray(self.H)) + + def update_hess(): + self.H = hess_wrapped(self.x, self.v) + elif hess in FD_METHODS: + def jac_dot_v(x, v): + return jac_wrapped(x).T.dot(v) + + def update_hess(): + self._update_jac() + self.H = approx_derivative(jac_dot_v, self.x, + f0=self.J.T.dot(self.v), + args=(self.v,), + **finite_diff_options) + update_hess() + self.H_updated = True + elif isinstance(hess, HessianUpdateStrategy): + self.H = hess + self.H.initialize(self.n, 'hess') + self.H_updated = True + self.x_prev = None + self.J_prev = None + + def update_hess(): + self._update_jac() + # When v is updated before x was updated, then x_prev and + # J_prev are None and we need this check. + if self.x_prev is not None and self.J_prev is not None: + delta_x = self.x - self.x_prev + delta_g = self.J.T.dot(self.v) - self.J_prev.T.dot(self.v) + self.H.update(delta_x, delta_g) + + self._update_hess_impl = update_hess + + if isinstance(hess, HessianUpdateStrategy): + def update_x(x): + self._update_jac() + self.x_prev = self.x + self.J_prev = self.J + self.x = x + self.f_updated = False + self.J_updated = False + self.H_updated = False + self._update_hess() + else: + def update_x(x): + self.x = x + self.f_updated = False + self.J_updated = False + self.H_updated = False + + self._update_x_impl = update_x + + def _update_v(self, v): + if not np.array_equal(v, self.v): + self.v = v + self.H_updated = False + + def _update_x(self, x): + if not np.array_equal(x, self.x): + self._update_x_impl(x) + + def _update_fun(self): + if not self.f_updated: + self._update_fun_impl() + self.f_updated = True + + def _update_jac(self): + if not self.J_updated: + self._update_jac_impl() + self.J_updated = True + + def _update_hess(self): + if not self.H_updated: + self._update_hess_impl() + self.H_updated = True + + def fun(self, x): + self._update_x(x) + self._update_fun() + return self.f + + def jac(self, x): + self._update_x(x) + self._update_jac() + return self.J + + def hess(self, x, v): + # v should be updated before x. + self._update_v(v) + self._update_x(x) + self._update_hess() + return self.H + + +class LinearVectorFunction(object): + """Linear vector function and its derivatives. + + Defines a linear function F = A x, where x is n-dimensional vector and + A is m-by-n matrix. The Jacobian is constant and equals to A. The Hessian + is identically zero and it is returned as a csr matrix. + """ + def __init__(self, A, x0, sparse_jacobian): + if sparse_jacobian or sparse_jacobian is None and sps.issparse(A): + self.J = sps.csr_matrix(A) + self.sparse_jacobian = True + elif sps.issparse(A): + self.J = A.toarray() + self.sparse_jacobian = False + else: + self.J = np.atleast_2d(A) + self.sparse_jacobian = False + + self.m, self.n = self.J.shape + + self.x = np.atleast_1d(x0).astype(float) + self.f = self.J.dot(self.x) + self.f_updated = True + + self.v = np.zeros(self.m, dtype=float) + self.H = sps.csr_matrix((self.n, self.n)) + + def _update_x(self, x): + if not np.array_equal(x, self.x): + self.x = x + self.f_updated = False + + def fun(self, x): + self._update_x(x) + if not self.f_updated: + self.f = self.J.dot(x) + self.f_updated = True + return self.f + + def jac(self, x): + self._update_x(x) + return self.J + + def hess(self, x, v): + self._update_x(x) + self.v = v + return self.H + + +class IdentityVectorFunction(LinearVectorFunction): + """Identity vector function and its derivatives. + + The Jacobian is the identity matrix, returned as a dense array when + `sparse_jacobian=False` and as a csr matrix otherwise. The Hessian is + identically zero and it is returned as a csr matrix. + """ + def __init__(self, x0, sparse_jacobian): + n = len(x0) + if sparse_jacobian or sparse_jacobian is None: + A = sps.eye(n, format='csr') + sparse_jacobian = True + else: + A = np.eye(n) + sparse_jacobian = False + super(IdentityVectorFunction, self).__init__(A, x0, sparse_jacobian) diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_differentiable_functions.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_differentiable_functions.pyc new file mode 100644 index 0000000..4bce2f6 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_differentiable_functions.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_differentialevolution.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_differentialevolution.py new file mode 100644 index 0000000..ea2404c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_differentialevolution.py @@ -0,0 +1,1012 @@ +""" +differential_evolution: The differential evolution global optimization algorithm +Added by Andrew Nelson 2014 +""" +from __future__ import division, print_function, absolute_import +import warnings + +import numpy as np +from scipy.optimize import OptimizeResult, minimize +from scipy.optimize.optimize import _status_message +from scipy._lib._util import check_random_state, MapWrapper +from scipy._lib.six import xrange, string_types + + +__all__ = ['differential_evolution'] + +_MACHEPS = np.finfo(np.float64).eps + + +def differential_evolution(func, bounds, args=(), strategy='best1bin', + maxiter=1000, popsize=15, tol=0.01, + mutation=(0.5, 1), recombination=0.7, seed=None, + callback=None, disp=False, polish=True, + init='latinhypercube', atol=0, updating='immediate', + workers=1): + """Finds the global minimum of a multivariate function. + + Differential Evolution is stochastic in nature (does not use gradient + methods) to find the minimium, and can search large areas of candidate + space, but often requires larger numbers of function evaluations than + conventional gradient based techniques. + + The algorithm is due to Storn and Price [1]_. + + Parameters + ---------- + func : callable + The objective function to be minimized. Must be in the form + ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array + and ``args`` is a tuple of any additional fixed parameters needed to + completely specify the function. + bounds : sequence + Bounds for variables. ``(min, max)`` pairs for each element in ``x``, + defining the lower and upper bounds for the optimizing argument of + `func`. It is required to have ``len(bounds) == len(x)``. + ``len(bounds)`` is used to determine the number of parameters in ``x``. + args : tuple, optional + Any additional fixed parameters needed to + completely specify the objective function. + strategy : str, optional + The differential evolution strategy to use. Should be one of: + + - 'best1bin' + - 'best1exp' + - 'rand1exp' + - 'randtobest1exp' + - 'currenttobest1exp' + - 'best2exp' + - 'rand2exp' + - 'randtobest1bin' + - 'currenttobest1bin' + - 'best2bin' + - 'rand2bin' + - 'rand1bin' + + The default is 'best1bin'. + maxiter : int, optional + The maximum number of generations over which the entire population is + evolved. The maximum number of function evaluations (with no polishing) + is: ``(maxiter + 1) * popsize * len(x)`` + popsize : int, optional + A multiplier for setting the total population size. The population has + ``popsize * len(x)`` individuals (unless the initial population is + supplied via the `init` keyword). + tol : float, optional + Relative tolerance for convergence, the solving stops when + ``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``, + where and `atol` and `tol` are the absolute and relative tolerance + respectively. + mutation : float or tuple(float, float), optional + The mutation constant. In the literature this is also known as + differential weight, being denoted by F. + If specified as a float it should be in the range [0, 2]. + If specified as a tuple ``(min, max)`` dithering is employed. Dithering + randomly changes the mutation constant on a generation by generation + basis. The mutation constant for that generation is taken from + ``U[min, max)``. Dithering can help speed convergence significantly. + Increasing the mutation constant increases the search radius, but will + slow down convergence. + recombination : float, optional + The recombination constant, should be in the range [0, 1]. In the + literature this is also known as the crossover probability, being + denoted by CR. Increasing this value allows a larger number of mutants + to progress into the next generation, but at the risk of population + stability. + seed : int or `np.random.RandomState`, optional + If `seed` is not specified the `np.RandomState` singleton is used. + If `seed` is an int, a new `np.random.RandomState` instance is used, + seeded with seed. + If `seed` is already a `np.random.RandomState instance`, then that + `np.random.RandomState` instance is used. + Specify `seed` for repeatable minimizations. + disp : bool, optional + Display status messages + callback : callable, `callback(xk, convergence=val)`, optional + A function to follow the progress of the minimization. ``xk`` is + the current value of ``x0``. ``val`` represents the fractional + value of the population convergence. When ``val`` is greater than one + the function halts. If callback returns `True`, then the minimization + is halted (any polishing is still carried out). + polish : bool, optional + If True (default), then `scipy.optimize.minimize` with the `L-BFGS-B` + method is used to polish the best population member at the end, which + can improve the minimization slightly. + init : str or array-like, optional + Specify which type of population initialization is performed. Should be + one of: + + - 'latinhypercube' + - 'random' + - array specifying the initial population. The array should have + shape ``(M, len(x))``, where len(x) is the number of parameters. + `init` is clipped to `bounds` before use. + + The default is 'latinhypercube'. Latin Hypercube sampling tries to + maximize coverage of the available parameter space. 'random' + initializes the population randomly - this has the drawback that + clustering can occur, preventing the whole of parameter space being + covered. Use of an array to specify a population subset could be used, + for example, to create a tight bunch of initial guesses in an location + where the solution is known to exist, thereby reducing time for + convergence. + atol : float, optional + Absolute tolerance for convergence, the solving stops when + ``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``, + where and `atol` and `tol` are the absolute and relative tolerance + respectively. + updating : {'immediate', 'deferred'}, optional + If ``'immediate'``, the best solution vector is continuously updated + within a single generation [4]_. This can lead to faster convergence as + trial vectors can take advantage of continuous improvements in the best + solution. + With ``'deferred'``, the best solution vector is updated once per + generation. Only ``'deferred'`` is compatible with parallelization, and + the `workers` keyword can over-ride this option. + + .. versionadded:: 1.2.0 + + workers : int or map-like callable, optional + If `workers` is an int the population is subdivided into `workers` + sections and evaluated in parallel (uses `multiprocessing.Pool`). + Supply -1 to use all available CPU cores. + Alternatively supply a map-like callable, such as + `multiprocessing.Pool.map` for evaluating the population in parallel. + This evaluation is carried out as ``workers(func, iterable)``. + This option will override the `updating` keyword to + ``updating='deferred'`` if ``workers != 1``. + Requires that `func` be pickleable. + + .. versionadded:: 1.2.0 + + Returns + ------- + res : OptimizeResult + The optimization result represented as a `OptimizeResult` object. + Important attributes are: ``x`` the solution array, ``success`` a + Boolean flag indicating if the optimizer exited successfully and + ``message`` which describes the cause of the termination. See + `OptimizeResult` for a description of other attributes. If `polish` + was employed, and a lower minimum was obtained by the polishing, then + OptimizeResult also contains the ``jac`` attribute. + + Notes + ----- + Differential evolution is a stochastic population based method that is + useful for global optimization problems. At each pass through the population + the algorithm mutates each candidate solution by mixing with other candidate + solutions to create a trial candidate. There are several strategies [2]_ for + creating trial candidates, which suit some problems more than others. The + 'best1bin' strategy is a good starting point for many systems. In this + strategy two members of the population are randomly chosen. Their difference + is used to mutate the best member (the `best` in `best1bin`), :math:`b_0`, + so far: + + .. math:: + + b' = b_0 + mutation * (population[rand0] - population[rand1]) + + A trial vector is then constructed. Starting with a randomly chosen 'i'th + parameter the trial is sequentially filled (in modulo) with parameters from + ``b'`` or the original candidate. The choice of whether to use ``b'`` or the + original candidate is made with a binomial distribution (the 'bin' in + 'best1bin') - a random number in [0, 1) is generated. If this number is + less than the `recombination` constant then the parameter is loaded from + ``b'``, otherwise it is loaded from the original candidate. The final + parameter is always loaded from ``b'``. Once the trial candidate is built + its fitness is assessed. If the trial is better than the original candidate + then it takes its place. If it is also better than the best overall + candidate it also replaces that. + To improve your chances of finding a global minimum use higher `popsize` + values, with higher `mutation` and (dithering), but lower `recombination` + values. This has the effect of widening the search radius, but slowing + convergence. + By default the best solution vector is updated continuously within a single + iteration (``updating='immediate'``). This is a modification [4]_ of the + original differential evolution algorithm which can lead to faster + convergence as trial vectors can immediately benefit from improved + solutions. To use the original Storn and Price behaviour, updating the best + solution once per iteration, set ``updating='deferred'``. + + .. versionadded:: 0.15.0 + + Examples + -------- + Let us consider the problem of minimizing the Rosenbrock function. This + function is implemented in `rosen` in `scipy.optimize`. + + >>> from scipy.optimize import rosen, differential_evolution + >>> bounds = [(0,2), (0, 2), (0, 2), (0, 2), (0, 2)] + >>> result = differential_evolution(rosen, bounds) + >>> result.x, result.fun + (array([1., 1., 1., 1., 1.]), 1.9216496320061384e-19) + + Now repeat, but with parallelization. + + >>> bounds = [(0,2), (0, 2), (0, 2), (0, 2), (0, 2)] + >>> result = differential_evolution(rosen, bounds, updating='deferred', + ... workers=2) + >>> result.x, result.fun + (array([1., 1., 1., 1., 1.]), 1.9216496320061384e-19) + + Next find the minimum of the Ackley function + (https://en.wikipedia.org/wiki/Test_functions_for_optimization). + + >>> from scipy.optimize import differential_evolution + >>> import numpy as np + >>> def ackley(x): + ... arg1 = -0.2 * np.sqrt(0.5 * (x[0] ** 2 + x[1] ** 2)) + ... arg2 = 0.5 * (np.cos(2. * np.pi * x[0]) + np.cos(2. * np.pi * x[1])) + ... return -20. * np.exp(arg1) - np.exp(arg2) + 20. + np.e + >>> bounds = [(-5, 5), (-5, 5)] + >>> result = differential_evolution(ackley, bounds) + >>> result.x, result.fun + (array([ 0., 0.]), 4.4408920985006262e-16) + + References + ---------- + .. [1] Storn, R and Price, K, Differential Evolution - a Simple and + Efficient Heuristic for Global Optimization over Continuous Spaces, + Journal of Global Optimization, 1997, 11, 341 - 359. + .. [2] http://www1.icsi.berkeley.edu/~storn/code.html + .. [3] http://en.wikipedia.org/wiki/Differential_evolution + .. [4] Wormington, M., Panaccione, C., Matney, K. M., Bowen, D. K., - + Characterization of structures from X-ray scattering data using + genetic algorithms, Phil. Trans. R. Soc. Lond. A, 1999, 357, + 2827-2848 + """ + + # using a context manager means that any created Pool objects are + # cleared up. + with DifferentialEvolutionSolver(func, bounds, args=args, + strategy=strategy, + maxiter=maxiter, + popsize=popsize, tol=tol, + mutation=mutation, + recombination=recombination, + seed=seed, polish=polish, + callback=callback, + disp=disp, init=init, atol=atol, + updating=updating, + workers=workers) as solver: + ret = solver.solve() + + return ret + + +class DifferentialEvolutionSolver(object): + + """This class implements the differential evolution solver + + Parameters + ---------- + func : callable + The objective function to be minimized. Must be in the form + ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array + and ``args`` is a tuple of any additional fixed parameters needed to + completely specify the function. + bounds : sequence + Bounds for variables. ``(min, max)`` pairs for each element in ``x``, + defining the lower and upper bounds for the optimizing argument of + `func`. It is required to have ``len(bounds) == len(x)``. + ``len(bounds)`` is used to determine the number of parameters in ``x``. + args : tuple, optional + Any additional fixed parameters needed to + completely specify the objective function. + strategy : str, optional + The differential evolution strategy to use. Should be one of: + + - 'best1bin' + - 'best1exp' + - 'rand1exp' + - 'randtobest1exp' + - 'currenttobest1exp' + - 'best2exp' + - 'rand2exp' + - 'randtobest1bin' + - 'currenttobest1bin' + - 'best2bin' + - 'rand2bin' + - 'rand1bin' + + The default is 'best1bin' + + maxiter : int, optional + The maximum number of generations over which the entire population is + evolved. The maximum number of function evaluations (with no polishing) + is: ``(maxiter + 1) * popsize * len(x)`` + popsize : int, optional + A multiplier for setting the total population size. The population has + ``popsize * len(x)`` individuals (unless the initial population is + supplied via the `init` keyword). + tol : float, optional + Relative tolerance for convergence, the solving stops when + ``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``, + where and `atol` and `tol` are the absolute and relative tolerance + respectively. + mutation : float or tuple(float, float), optional + The mutation constant. In the literature this is also known as + differential weight, being denoted by F. + If specified as a float it should be in the range [0, 2]. + If specified as a tuple ``(min, max)`` dithering is employed. Dithering + randomly changes the mutation constant on a generation by generation + basis. The mutation constant for that generation is taken from + U[min, max). Dithering can help speed convergence significantly. + Increasing the mutation constant increases the search radius, but will + slow down convergence. + recombination : float, optional + The recombination constant, should be in the range [0, 1]. In the + literature this is also known as the crossover probability, being + denoted by CR. Increasing this value allows a larger number of mutants + to progress into the next generation, but at the risk of population + stability. + seed : int or `np.random.RandomState`, optional + If `seed` is not specified the `np.random.RandomState` singleton is + used. + If `seed` is an int, a new `np.random.RandomState` instance is used, + seeded with `seed`. + If `seed` is already a `np.random.RandomState` instance, then that + `np.random.RandomState` instance is used. + Specify `seed` for repeatable minimizations. + disp : bool, optional + Display status messages + callback : callable, `callback(xk, convergence=val)`, optional + A function to follow the progress of the minimization. ``xk`` is + the current value of ``x0``. ``val`` represents the fractional + value of the population convergence. When ``val`` is greater than one + the function halts. If callback returns `True`, then the minimization + is halted (any polishing is still carried out). + polish : bool, optional + If True, then `scipy.optimize.minimize` with the `L-BFGS-B` method + is used to polish the best population member at the end. This requires + a few more function evaluations. + maxfun : int, optional + Set the maximum number of function evaluations. However, it probably + makes more sense to set `maxiter` instead. + init : str or array-like, optional + Specify which type of population initialization is performed. Should be + one of: + + - 'latinhypercube' + - 'random' + - array specifying the initial population. The array should have + shape ``(M, len(x))``, where len(x) is the number of parameters. + `init` is clipped to `bounds` before use. + + The default is 'latinhypercube'. Latin Hypercube sampling tries to + maximize coverage of the available parameter space. 'random' + initializes the population randomly - this has the drawback that + clustering can occur, preventing the whole of parameter space being + covered. Use of an array to specify a population could be used, for + example, to create a tight bunch of initial guesses in an location + where the solution is known to exist, thereby reducing time for + convergence. + atol : float, optional + Absolute tolerance for convergence, the solving stops when + ``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``, + where and `atol` and `tol` are the absolute and relative tolerance + respectively. + updating : {'immediate', 'deferred'}, optional + If `immediate` the best solution vector is continuously updated within + a single generation. This can lead to faster convergence as trial + vectors can take advantage of continuous improvements in the best + solution. + With `deferred` the best solution vector is updated once per + generation. Only `deferred` is compatible with parallelization, and the + `workers` keyword can over-ride this option. + workers : int or map-like callable, optional + If `workers` is an int the population is subdivided into `workers` + sections and evaluated in parallel (uses `multiprocessing.Pool`). + Supply `-1` to use all cores available to the Process. + Alternatively supply a map-like callable, such as + `multiprocessing.Pool.map` for evaluating the population in parallel. + This evaluation is carried out as ``workers(func, iterable)``. + This option will override the `updating` keyword to + `updating='deferred'` if `workers != 1`. + Requires that `func` be pickleable. + + """ + + # Dispatch of mutation strategy method (binomial or exponential). + _binomial = {'best1bin': '_best1', + 'randtobest1bin': '_randtobest1', + 'currenttobest1bin': '_currenttobest1', + 'best2bin': '_best2', + 'rand2bin': '_rand2', + 'rand1bin': '_rand1'} + _exponential = {'best1exp': '_best1', + 'rand1exp': '_rand1', + 'randtobest1exp': '_randtobest1', + 'currenttobest1exp': '_currenttobest1', + 'best2exp': '_best2', + 'rand2exp': '_rand2'} + + __init_error_msg = ("The population initialization method must be one of " + "'latinhypercube' or 'random', or an array of shape " + "(M, N) where N is the number of parameters and M>5") + + def __init__(self, func, bounds, args=(), + strategy='best1bin', maxiter=1000, popsize=15, + tol=0.01, mutation=(0.5, 1), recombination=0.7, seed=None, + maxfun=np.inf, callback=None, disp=False, polish=True, + init='latinhypercube', atol=0, updating='immediate', + workers=1): + + if strategy in self._binomial: + self.mutation_func = getattr(self, self._binomial[strategy]) + elif strategy in self._exponential: + self.mutation_func = getattr(self, self._exponential[strategy]) + else: + raise ValueError("Please select a valid mutation strategy") + self.strategy = strategy + + self.callback = callback + self.polish = polish + + # set the updating / parallelisation options + if updating in ['immediate', 'deferred']: + self._updating = updating + + # want to use parallelisation, but updating is immediate + if workers != 1 and updating == 'immediate': + warnings.warn("differential_evolution: the 'workers' keyword has" + " overridden updating='immediate' to" + " updating='deferred'", UserWarning) + self._updating = 'deferred' + + # an object with a map method. + self._mapwrapper = MapWrapper(workers) + + # relative and absolute tolerances for convergence + self.tol, self.atol = tol, atol + + # Mutation constant should be in [0, 2). If specified as a sequence + # then dithering is performed. + self.scale = mutation + if (not np.all(np.isfinite(mutation)) or + np.any(np.array(mutation) >= 2) or + np.any(np.array(mutation) < 0)): + raise ValueError('The mutation constant must be a float in ' + 'U[0, 2), or specified as a tuple(min, max)' + ' where min < max and min, max are in U[0, 2).') + + self.dither = None + if hasattr(mutation, '__iter__') and len(mutation) > 1: + self.dither = [mutation[0], mutation[1]] + self.dither.sort() + + self.cross_over_probability = recombination + + # we create a wrapped function to allow the use of map (and Pool.map + # in the future) + self.func = _FunctionWrapper(func, args) + self.args = args + + # convert tuple of lower and upper bounds to limits + # [(low_0, high_0), ..., (low_n, high_n] + # -> [[low_0, ..., low_n], [high_0, ..., high_n]] + self.limits = np.array(bounds, dtype='float').T + if (np.size(self.limits, 0) != 2 or not + np.all(np.isfinite(self.limits))): + raise ValueError('bounds should be a sequence containing ' + 'real valued (min, max) pairs for each value' + ' in x') + + if maxiter is None: # the default used to be None + maxiter = 1000 + self.maxiter = maxiter + if maxfun is None: # the default used to be None + maxfun = np.inf + self.maxfun = maxfun + + # population is scaled to between [0, 1]. + # We have to scale between parameter <-> population + # save these arguments for _scale_parameter and + # _unscale_parameter. This is an optimization + self.__scale_arg1 = 0.5 * (self.limits[0] + self.limits[1]) + self.__scale_arg2 = np.fabs(self.limits[0] - self.limits[1]) + + self.parameter_count = np.size(self.limits, 1) + + self.random_number_generator = check_random_state(seed) + + # default population initialization is a latin hypercube design, but + # there are other population initializations possible. + # the minimum is 5 because 'best2bin' requires a population that's at + # least 5 long + self.num_population_members = max(5, popsize * self.parameter_count) + + self.population_shape = (self.num_population_members, + self.parameter_count) + + self._nfev = 0 + if isinstance(init, string_types): + if init == 'latinhypercube': + self.init_population_lhs() + elif init == 'random': + self.init_population_random() + else: + raise ValueError(self.__init_error_msg) + else: + self.init_population_array(init) + + self.disp = disp + + def init_population_lhs(self): + """ + Initializes the population with Latin Hypercube Sampling. + Latin Hypercube Sampling ensures that each parameter is uniformly + sampled over its range. + """ + rng = self.random_number_generator + + # Each parameter range needs to be sampled uniformly. The scaled + # parameter range ([0, 1)) needs to be split into + # `self.num_population_members` segments, each of which has the following + # size: + segsize = 1.0 / self.num_population_members + + # Within each segment we sample from a uniform random distribution. + # We need to do this sampling for each parameter. + samples = (segsize * rng.random_sample(self.population_shape) + + # Offset each segment to cover the entire parameter range [0, 1) + + np.linspace(0., 1., self.num_population_members, + endpoint=False)[:, np.newaxis]) + + # Create an array for population of candidate solutions. + self.population = np.zeros_like(samples) + + # Initialize population of candidate solutions by permutation of the + # random samples. + for j in range(self.parameter_count): + order = rng.permutation(range(self.num_population_members)) + self.population[:, j] = samples[order, j] + + # reset population energies + self.population_energies = np.full(self.num_population_members, + np.inf) + + # reset number of function evaluations counter + self._nfev = 0 + + def init_population_random(self): + """ + Initialises the population at random. This type of initialization + can possess clustering, Latin Hypercube sampling is generally better. + """ + rng = self.random_number_generator + self.population = rng.random_sample(self.population_shape) + + # reset population energies + self.population_energies = np.full(self.num_population_members, + np.inf) + + # reset number of function evaluations counter + self._nfev = 0 + + def init_population_array(self, init): + """ + Initialises the population with a user specified population. + + Parameters + ---------- + init : np.ndarray + Array specifying subset of the initial population. The array should + have shape (M, len(x)), where len(x) is the number of parameters. + The population is clipped to the lower and upper bounds. + """ + # make sure you're using a float array + popn = np.asfarray(init) + + if (np.size(popn, 0) < 5 or + popn.shape[1] != self.parameter_count or + len(popn.shape) != 2): + raise ValueError("The population supplied needs to have shape" + " (M, len(x)), where M > 4.") + + # scale values and clip to bounds, assigning to population + self.population = np.clip(self._unscale_parameters(popn), 0, 1) + + self.num_population_members = np.size(self.population, 0) + + self.population_shape = (self.num_population_members, + self.parameter_count) + + # reset population energies + self.population_energies = (np.ones(self.num_population_members) * + np.inf) + + # reset number of function evaluations counter + self._nfev = 0 + + @property + def x(self): + """ + The best solution from the solver + """ + return self._scale_parameters(self.population[0]) + + @property + def convergence(self): + """ + The standard deviation of the population energies divided by their + mean. + """ + if np.any(np.isinf(self.population_energies)): + return np.inf + return (np.std(self.population_energies) / + np.abs(np.mean(self.population_energies) + _MACHEPS)) + + def converged(self): + """ + Return True if the solver has converged. + """ + return (np.std(self.population_energies) <= + self.atol + + self.tol * np.abs(np.mean(self.population_energies))) + + def solve(self): + """ + Runs the DifferentialEvolutionSolver. + + Returns + ------- + res : OptimizeResult + The optimization result represented as a ``OptimizeResult`` object. + Important attributes are: ``x`` the solution array, ``success`` a + Boolean flag indicating if the optimizer exited successfully and + ``message`` which describes the cause of the termination. See + `OptimizeResult` for a description of other attributes. If `polish` + was employed, and a lower minimum was obtained by the polishing, + then OptimizeResult also contains the ``jac`` attribute. + """ + nit, warning_flag = 0, False + status_message = _status_message['success'] + + # The population may have just been initialized (all entries are + # np.inf). If it has you have to calculate the initial energies. + # Although this is also done in the evolve generator it's possible + # that someone can set maxiter=0, at which point we still want the + # initial energies to be calculated (the following loop isn't run). + if np.all(np.isinf(self.population_energies)): + self.population_energies[:] = self._calculate_population_energies( + self.population) + self._promote_lowest_energy() + + # do the optimisation. + for nit in xrange(1, self.maxiter + 1): + # evolve the population by a generation + try: + next(self) + except StopIteration: + warning_flag = True + if self._nfev > self.maxfun: + status_message = _status_message['maxfev'] + elif self._nfev == self.maxfun: + status_message = ('Maximum number of function evaluations' + ' has been reached.') + break + + if self.disp: + print("differential_evolution step %d: f(x)= %g" + % (nit, + self.population_energies[0])) + + # should the solver terminate? + convergence = self.convergence + + if (self.callback and + self.callback(self._scale_parameters(self.population[0]), + convergence=self.tol / convergence) is True): + + warning_flag = True + status_message = ('callback function requested stop early ' + 'by returning True') + break + + if np.any(np.isinf(self.population_energies)): + intol = False + else: + intol = (np.std(self.population_energies) <= + self.atol + + self.tol * np.abs(np.mean(self.population_energies))) + if warning_flag or intol: + break + + else: + status_message = _status_message['maxiter'] + warning_flag = True + + DE_result = OptimizeResult( + x=self.x, + fun=self.population_energies[0], + nfev=self._nfev, + nit=nit, + message=status_message, + success=(warning_flag is not True)) + + if self.polish: + result = minimize(self.func, + np.copy(DE_result.x), + method='L-BFGS-B', + bounds=self.limits.T) + + self._nfev += result.nfev + DE_result.nfev = self._nfev + + if result.fun < DE_result.fun: + DE_result.fun = result.fun + DE_result.x = result.x + DE_result.jac = result.jac + # to keep internal state consistent + self.population_energies[0] = result.fun + self.population[0] = self._unscale_parameters(result.x) + + return DE_result + + def _calculate_population_energies(self, population): + """ + Calculate the energies of all the population members at the same time. + + Parameters + ---------- + population : ndarray + An array of parameter vectors normalised to [0, 1] using lower + and upper limits. Has shape ``(np.size(population, 0), len(x))``. + + Returns + ------- + energies : ndarray + An array of energies corresponding to each population member. If + maxfun will be exceeded during this call, then the number of + function evaluations will be reduced and energies will be + right-padded with np.inf. Has shape ``(np.size(population, 0),)`` + """ + num_members = np.size(population, 0) + nfevs = min(num_members, + self.maxfun - num_members) + + energies = np.full(num_members, np.inf) + + parameters_pop = self._scale_parameters(population) + try: + calc_energies = list(self._mapwrapper(self.func, + parameters_pop[0:nfevs])) + energies[0:nfevs] = calc_energies + except (TypeError, ValueError): + # wrong number of arguments for _mapwrapper + # or wrong length returned from the mapper + raise RuntimeError("The map-like callable must be of the" + " form f(func, iterable), returning a sequence" + " of numbers the same length as 'iterable'") + + self._nfev += nfevs + + return energies + + def _promote_lowest_energy(self): + # promotes the lowest energy to the first entry in the population + l = np.argmin(self.population_energies) + + # put the lowest energy into the best solution position. + self.population_energies[[0, l]] = self.population_energies[[l, 0]] + self.population[[0, l], :] = self.population[[l, 0], :] + + def __iter__(self): + return self + + def __enter__(self): + return self + + def __exit__(self, *args): + # to make sure resources are closed down + self._mapwrapper.close() + + def __del__(self): + # to make sure resources are closed down + self._mapwrapper.close() + + def __next__(self): + """ + Evolve the population by a single generation + + Returns + ------- + x : ndarray + The best solution from the solver. + fun : float + Value of objective function obtained from the best solution. + """ + # the population may have just been initialized (all entries are + # np.inf). If it has you have to calculate the initial energies + if np.all(np.isinf(self.population_energies)): + self.population_energies[:] = self._calculate_population_energies( + self.population) + self._promote_lowest_energy() + + if self.dither is not None: + self.scale = (self.random_number_generator.rand() + * (self.dither[1] - self.dither[0]) + self.dither[0]) + + if self._updating == 'immediate': + # update best solution immediately + for candidate in range(self.num_population_members): + if self._nfev > self.maxfun: + raise StopIteration + + # create a trial solution + trial = self._mutate(candidate) + + # ensuring that it's in the range [0, 1) + self._ensure_constraint(trial) + + # scale from [0, 1) to the actual parameter value + parameters = self._scale_parameters(trial) + + # determine the energy of the objective function + energy = self.func(parameters) + self._nfev += 1 + + # if the energy of the trial candidate is lower than the + # original population member then replace it + if energy < self.population_energies[candidate]: + self.population[candidate] = trial + self.population_energies[candidate] = energy + + # if the trial candidate also has a lower energy than the + # best solution then promote it to the best solution. + if energy < self.population_energies[0]: + self._promote_lowest_energy() + + elif self._updating == 'deferred': + # update best solution once per generation + if self._nfev >= self.maxfun: + raise StopIteration + + # 'deferred' approach, vectorised form. + # create trial solutions + trial_pop = np.array( + [self._mutate(i) for i in range(self.num_population_members)]) + + # enforce bounds + self._ensure_constraint(trial_pop) + + # determine the energies of the objective function + trial_energies = self._calculate_population_energies(trial_pop) + + # which solutions are improved? + loc = trial_energies < self.population_energies + self.population = np.where(loc[:, np.newaxis], + trial_pop, + self.population) + self.population_energies = np.where(loc, + trial_energies, + self.population_energies) + + # make sure the best solution is updated if updating='deferred'. + # put the lowest energy into the best solution position. + self._promote_lowest_energy() + + return self.x, self.population_energies[0] + + next = __next__ + + def _scale_parameters(self, trial): + """Scale from a number between 0 and 1 to parameters.""" + return self.__scale_arg1 + (trial - 0.5) * self.__scale_arg2 + + def _unscale_parameters(self, parameters): + """Scale from parameters to a number between 0 and 1.""" + return (parameters - self.__scale_arg1) / self.__scale_arg2 + 0.5 + + def _ensure_constraint(self, trial): + """Make sure the parameters lie between the limits.""" + mask = np.where((trial > 1) | (trial < 0)) + trial[mask] = self.random_number_generator.rand(mask[0].size) + + def _mutate(self, candidate): + """Create a trial vector based on a mutation strategy.""" + trial = np.copy(self.population[candidate]) + + rng = self.random_number_generator + + fill_point = rng.randint(0, self.parameter_count) + + if self.strategy in ['currenttobest1exp', 'currenttobest1bin']: + bprime = self.mutation_func(candidate, + self._select_samples(candidate, 5)) + else: + bprime = self.mutation_func(self._select_samples(candidate, 5)) + + if self.strategy in self._binomial: + crossovers = rng.rand(self.parameter_count) + crossovers = crossovers < self.cross_over_probability + # the last one is always from the bprime vector for binomial + # If you fill in modulo with a loop you have to set the last one to + # true. If you don't use a loop then you can have any random entry + # be True. + crossovers[fill_point] = True + trial = np.where(crossovers, bprime, trial) + return trial + + elif self.strategy in self._exponential: + i = 0 + while (i < self.parameter_count and + rng.rand() < self.cross_over_probability): + + trial[fill_point] = bprime[fill_point] + fill_point = (fill_point + 1) % self.parameter_count + i += 1 + + return trial + + def _best1(self, samples): + """best1bin, best1exp""" + r0, r1 = samples[:2] + return (self.population[0] + self.scale * + (self.population[r0] - self.population[r1])) + + def _rand1(self, samples): + """rand1bin, rand1exp""" + r0, r1, r2 = samples[:3] + return (self.population[r0] + self.scale * + (self.population[r1] - self.population[r2])) + + def _randtobest1(self, samples): + """randtobest1bin, randtobest1exp""" + r0, r1, r2 = samples[:3] + bprime = np.copy(self.population[r0]) + bprime += self.scale * (self.population[0] - bprime) + bprime += self.scale * (self.population[r1] - + self.population[r2]) + return bprime + + def _currenttobest1(self, candidate, samples): + """currenttobest1bin, currenttobest1exp""" + r0, r1 = samples[:2] + bprime = (self.population[candidate] + self.scale * + (self.population[0] - self.population[candidate] + + self.population[r0] - self.population[r1])) + return bprime + + def _best2(self, samples): + """best2bin, best2exp""" + r0, r1, r2, r3 = samples[:4] + bprime = (self.population[0] + self.scale * + (self.population[r0] + self.population[r1] - + self.population[r2] - self.population[r3])) + + return bprime + + def _rand2(self, samples): + """rand2bin, rand2exp""" + r0, r1, r2, r3, r4 = samples + bprime = (self.population[r0] + self.scale * + (self.population[r1] + self.population[r2] - + self.population[r3] - self.population[r4])) + + return bprime + + def _select_samples(self, candidate, number_samples): + """ + obtain random integers from range(self.num_population_members), + without replacement. You can't have the original candidate either. + """ + idxs = list(range(self.num_population_members)) + idxs.remove(candidate) + self.random_number_generator.shuffle(idxs) + idxs = idxs[:number_samples] + return idxs + + +class _FunctionWrapper(object): + """ + Object to wrap user cost function, allowing picklability + """ + def __init__(self, f, args): + self.f = f + self.args = [] if args is None else args + + def __call__(self, x): + return self.f(x, *self.args) diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_differentialevolution.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_differentialevolution.pyc new file mode 100644 index 0000000..4b76139 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_differentialevolution.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_dual_annealing.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_dual_annealing.py new file mode 100644 index 0000000..ca9e940 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_dual_annealing.py @@ -0,0 +1,672 @@ +# Dual Annealing implementation. +# Copyright (c) 2018 Sylvain Gubian <sylvain.gubian@pmi.com>, +# Yang Xiang <yang.xiang@pmi.com> +# Author: Sylvain Gubian, Yang Xiang, PMP S.A. + +""" +A Dual Annealing global optimization algorithm +""" + +from __future__ import division, print_function, absolute_import + +import numpy as np +from scipy.optimize import OptimizeResult +from scipy.optimize import minimize +from scipy.special import gammaln +from scipy._lib._util import check_random_state + + +__all__ = ['dual_annealing'] + + +class VisitingDistribution(object): + """ + Class used to generate new coordinates based on the distorted + Cauchy-Lorentz distribution. Depending on the steps within the strategy + chain, the class implements the strategy for generating new location + changes. + + Parameters + ---------- + lb : array_like + A 1-D numpy ndarray containing lower bounds of the generated + components. Neither NaN or inf are allowed. + ub : array_like + A 1-D numpy ndarray containing upper bounds for the generated + components. Neither NaN or inf are allowed. + visiting_param : float + Parameter for visiting distribution. Default value is 2.62. + Higher values give the visiting distribution a heavier tail, this + makes the algorithm jump to a more distant region. + The value range is (0, 3]. + rand_state : RandomState object + A numpy.random.RandomState object for using the current state of the + created random generator container. + """ + TAIL_LIMIT = 1.e8 + MIN_VISIT_BOUND = 1.e-10 + + def __init__(self, lb, ub, visiting_param, rand_state): + self.visiting_param = visiting_param + self.rand_state = rand_state + self.lower = lb + self.upper = ub + self.bound_range = ub - lb + + def visiting(self, x, step, temperature): + """ Based on the step in the strategy chain, new coordinated are + generated by changing all components is the same time or only + one of them, the new values are computed with visit_fn method + """ + dim = x.size + if step < dim: + # Changing all coordinates with a new visting value + visits = np.array([self.visit_fn( + temperature) for _ in range(dim)]) + upper_sample = self.rand_state.random_sample() + lower_sample = self.rand_state.random_sample() + visits[visits > self.TAIL_LIMIT] = self.TAIL_LIMIT * upper_sample + visits[visits < -self.TAIL_LIMIT] = -self.TAIL_LIMIT * lower_sample + x_visit = visits + x + a = x_visit - self.lower + b = np.fmod(a, self.bound_range) + self.bound_range + x_visit = np.fmod(b, self.bound_range) + self.lower + x_visit[np.fabs( + x_visit - self.lower) < self.MIN_VISIT_BOUND] += 1.e-10 + else: + # Changing only one coordinate at a time based on strategy + # chain step + x_visit = np.copy(x) + visit = self.visit_fn(temperature) + if visit > self.TAIL_LIMIT: + visit = self.TAIL_LIMIT * self.rand_state.random_sample() + elif visit < -self.TAIL_LIMIT: + visit = -self.TAIL_LIMIT * self.rand_state.random_sample() + index = step - dim + x_visit[index] = visit + x[index] + a = x_visit[index] - self.lower[index] + b = np.fmod(a, self.bound_range[index]) + self.bound_range[index] + x_visit[index] = np.fmod(b, self.bound_range[ + index]) + self.lower[index] + if np.fabs(x_visit[index] - self.lower[ + index]) < self.MIN_VISIT_BOUND: + x_visit[index] += self.MIN_VISIT_BOUND + return x_visit + + def visit_fn(self, temperature): + """ Formula Visita from p. 405 of reference [2] """ + factor1 = np.exp(np.log(temperature) / (self.visiting_param - 1.0)) + factor2 = np.exp((4.0 - self.visiting_param) * np.log( + self.visiting_param - 1.0)) + factor3 = np.exp((2.0 - self.visiting_param) * np.log(2.0) / ( + self.visiting_param - 1.0)) + factor4 = np.sqrt(np.pi) * factor1 * factor2 / (factor3 * ( + 3.0 - self.visiting_param)) + factor5 = 1.0 / (self.visiting_param - 1.0) - 0.5 + d1 = 2.0 - factor5 + factor6 = np.pi * (1.0 - factor5) / np.sin( + np.pi * (1.0 - factor5)) / np.exp(gammaln(d1)) + sigmax = np.exp(-(self.visiting_param - 1.0) * np.log( + factor6 / factor4) / (3.0 - self.visiting_param)) + x = sigmax * self.rand_state.normal() + y = self.rand_state.normal() + den = np.exp( + (self.visiting_param - 1.0) * np.log((np.fabs(y))) / ( + 3.0 - self.visiting_param)) + return x / den + + +class EnergyState(object): + """ + Class used to record the energy state. At any time, it knows what is the + currently used coordinates and the most recent best location. + + Parameters + ---------- + lower : array_like + A 1-D numpy ndarray containing lower bounds for generating an initial + random components in the `reset` method. + upper : array_like + A 1-D numpy ndarray containing upper bounds for generating an initial + random components in the `reset` method + components. Neither NaN or inf are allowed. + callback : callable, ``callback(x, f, context)``, optional + A callback function which will be called for all minima found. + ``x`` and ``f`` are the coordinates and function value of the + latest minimum found, and `context` has value in [0, 1, 2] + """ + # Maximimum number of trials for generating a valid starting point + MAX_REINIT_COUNT = 1000 + + def __init__(self, lower, upper, callback=None): + self.ebest = None + self.current_energy = None + self.current_location = None + self.xbest = None + self.lower = lower + self.upper = upper + self.callback = callback + + def reset(self, func_wrapper, rand_state, x0=None): + """ + Initialize current location is the search domain. If `x0` is not + provided, a random location within the bounds is generated. + """ + if x0 is None: + self.current_location = self.lower + rand_state.random_sample( + len(self.lower)) * (self.upper - self.lower) + else: + self.current_location = np.copy(x0) + init_error = True + reinit_counter = 0 + while init_error: + self.current_energy = func_wrapper.fun(self.current_location) + if self.current_energy is None: + raise ValueError('Objective function is returning None') + if (not np.isfinite(self.current_energy) or np.isnan( + self.current_energy)): + if reinit_counter >= EnergyState.MAX_REINIT_COUNT: + init_error = False + message = ( + 'Stopping algorithm because function ' + 'create NaN or (+/-) infinity values even with ' + 'trying new random parameters' + ) + raise ValueError(message) + self.current_location = self.lower + rand_state.random_sample( + self.lower.size) * (self.upper - self.lower) + reinit_counter += 1 + else: + init_error = False + # If first time reset, initialize ebest and xbest + if self.ebest is None and self.xbest is None: + self.ebest = self.current_energy + self.xbest = np.copy(self.current_location) + # Otherwise, we keep them in case of reannealing reset + + def update_best(self, e, x, context): + self.ebest = e + self.xbest = np.copy(x) + if self.callback is not None: + val = self.callback(x, e, context) + if val is not None: + if val: + return('Callback function requested to stop early by ' + 'returning True') + + def update_current(self, e, x): + self.current_energy = e + self.current_location = np.copy(x) + + +class StrategyChain(object): + """ + Class that implements within a Markov chain the strategy for location + acceptance and local search decision making. + + Parameters + ---------- + acceptance_param : float + Parameter for acceptance distribution. It is used to control the + probability of acceptance. The lower the acceptance parameter, the + smaller the probability of acceptance. Default value is -5.0 with + a range (-1e4, -5]. + visit_dist : VisitingDistribution + Instance of `VisitingDistribution` class. + func_wrapper : ObjectiveFunWrapper + Instance of `ObjectiveFunWrapper` class. + minimizer_wrapper: LocalSearchWrapper + Instance of `LocalSearchWrapper` class. + rand_state : RandomState object + A numpy.random.RandomState object for using the current state of the + created random generator container. + energy_state: EnergyState + Instance of `EnergyState` class. + """ + def __init__(self, acceptance_param, visit_dist, func_wrapper, + minimizer_wrapper, rand_state, energy_state): + # Local strategy chain minimum energy and location + self.emin = energy_state.current_energy + self.xmin = np.array(energy_state.current_location) + # Global optimizer state + self.energy_state = energy_state + # Acceptance parameter + self.acceptance_param = acceptance_param + # Visiting distribution instance + self.visit_dist = visit_dist + # Wrapper to objective function + self.func_wrapper = func_wrapper + # Wrapper to the local minimizer + self.minimizer_wrapper = minimizer_wrapper + self.not_improved_idx = 0 + self.not_improved_max_idx = 1000 + self._rand_state = rand_state + self.temperature_step = 0 + self.K = 100 * len(energy_state.current_location) + + def accept_reject(self, j, e, x_visit): + r = self._rand_state.random_sample() + pqv_temp = (self.acceptance_param - 1.0) * ( + e - self.energy_state.current_energy) / ( + self.temperature_step + 1.) + if pqv_temp <= 0.: + pqv = 0. + else: + pqv = np.exp(np.log(pqv_temp) / ( + 1. - self.acceptance_param)) + if r <= pqv: + # We accept the new location and update state + self.energy_state.update_current(e, x_visit) + self.xmin = np.copy(self.energy_state.current_location) + + # No improvement for a long time + if self.not_improved_idx >= self.not_improved_max_idx: + if j == 0 or self.energy_state.current_energy < self.emin: + self.emin = self.energy_state.current_energy + self.xmin = np.copy(self.energy_state.current_location) + + def run(self, step, temperature): + self.temperature_step = temperature / float(step + 1) + self.not_improved_idx += 1 + for j in range(self.energy_state.current_location.size * 2): + if j == 0: + if step == 0: + self.energy_state_improved = True + else: + self.energy_state_improved = False + x_visit = self.visit_dist.visiting( + self.energy_state.current_location, j, temperature) + # Calling the objective function + e = self.func_wrapper.fun(x_visit) + if e < self.energy_state.current_energy: + # We have got a better energy value + self.energy_state.update_current(e, x_visit) + if e < self.energy_state.ebest: + val = self.energy_state.update_best(e, x_visit, 0) + if val is not None: + if val: + return val + self.energy_state_improved = True + self.not_improved_idx = 0 + else: + # We have not improved but do we accept the new location? + self.accept_reject(j, e, x_visit) + if self.func_wrapper.nfev >= self.func_wrapper.maxfun: + return ('Maximum number of function call reached ' + 'during annealing') + # End of StrategyChain loop + + def local_search(self): + # Decision making for performing a local search + # based on strategy chain results + # If energy has been improved or no improvement since too long, + # performing a local search with the best strategy chain location + if self.energy_state_improved: + # Global energy has improved, let's see if LS improves further + e, x = self.minimizer_wrapper.local_search(self.energy_state.xbest, + self.energy_state.ebest) + if e < self.energy_state.ebest: + self.not_improved_idx = 0 + val = self.energy_state.update_best(e, x, 1) + if val is not None: + if val: + return val + self.energy_state.update_current(e, x) + if self.func_wrapper.nfev >= self.func_wrapper.maxfun: + return ('Maximum number of function call reached ' + 'during local search') + # Check probability of a need to perform a LS even if no improvement + do_ls = False + if self.K < 90 * len(self.energy_state.current_location): + pls = np.exp(self.K * ( + self.energy_state.ebest - self.energy_state.current_energy + ) / self.temperature_step) + if pls >= self._rand_state.random_sample(): + do_ls = True + # Global energy not improved, let's see what LS gives + # on the best strategy chain location + if self.not_improved_idx >= self.not_improved_max_idx: + do_ls = True + if do_ls: + e, x = self.minimizer_wrapper.local_search(self.xmin, self.emin) + self.xmin = np.copy(x) + self.emin = e + self.not_improved_idx = 0 + self.not_improved_max_idx = self.energy_state.current_location.size + if e < self.energy_state.ebest: + val = self.energy_state.update_best( + self.emin, self.xmin, 2) + if val is not None: + if val: + return val + self.energy_state.update_current(e, x) + if self.func_wrapper.nfev >= self.func_wrapper.maxfun: + return ('Maximum number of function call reached ' + 'during dual annealing') + + +class ObjectiveFunWrapper(object): + + def __init__(self, func, maxfun=1e7, *args): + self.func = func + self.args = args + # Number of objective function evaluations + self.nfev = 0 + # Number of gradient function evaluation if used + self.ngev = 0 + # Number of hessian of the objective function if used + self.nhev = 0 + self.maxfun = maxfun + + def fun(self, x): + self.nfev += 1 + return self.func(x, *self.args) + + +class LocalSearchWrapper(object): + """ + Class used to wrap around the minimizer used for local search + Default local minimizer is SciPy minimizer L-BFGS-B + """ + + LS_MAXITER_RATIO = 6 + LS_MAXITER_MIN = 100 + LS_MAXITER_MAX = 1000 + + def __init__(self, bounds, func_wrapper, **kwargs): + self.func_wrapper = func_wrapper + self.kwargs = kwargs + self.minimizer = minimize + bounds_list = list(zip(*bounds)) + self.lower = np.array(bounds_list[0]) + self.upper = np.array(bounds_list[1]) + + # If no minimizer specified, use SciPy minimize with 'L-BFGS-B' method + if not self.kwargs: + n = len(self.lower) + ls_max_iter = min(max(n * self.LS_MAXITER_RATIO, + self.LS_MAXITER_MIN), + self.LS_MAXITER_MAX) + self.kwargs['method'] = 'L-BFGS-B' + self.kwargs['options'] = { + 'maxiter': ls_max_iter, + } + self.kwargs['bounds'] = list(zip(self.lower, self.upper)) + + def local_search(self, x, e): + # Run local search from the given x location where energy value is e + x_tmp = np.copy(x) + mres = self.minimizer(self.func_wrapper.fun, x, **self.kwargs) + if 'njev' in mres.keys(): + self.func_wrapper.ngev += mres.njev + if 'nhev' in mres.keys(): + self.func_wrapper.nhev += mres.nhev + # Check if is valid value + is_finite = np.all(np.isfinite(mres.x)) and np.isfinite(mres.fun) + in_bounds = np.all(mres.x >= self.lower) and np.all( + mres.x <= self.upper) + is_valid = is_finite and in_bounds + + # Use the new point only if it is valid and return a better results + if is_valid and mres.fun < e: + return mres.fun, mres.x + else: + return e, x_tmp + + +def dual_annealing(func, bounds, args=(), maxiter=1000, + local_search_options={}, initial_temp=5230., + restart_temp_ratio=2.e-5, visit=2.62, accept=-5.0, + maxfun=1e7, seed=None, no_local_search=False, + callback=None, x0=None): + """ + Find the global minimum of a function using Dual Annealing. + + Parameters + ---------- + func : callable + The objective function to be minimized. Must be in the form + ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array + and ``args`` is a tuple of any additional fixed parameters needed to + completely specify the function. + bounds : sequence, shape (n, 2) + Bounds for variables. ``(min, max)`` pairs for each element in ``x``, + defining bounds for the objective function parameter. + args : tuple, optional + Any additional fixed parameters needed to completely specify the + objective function. + maxiter : int, optional + The maximum number of global search iterations. Default value is 1000. + local_search_options : dict, optional + Extra keyword arguments to be passed to the local minimizer + (`minimize`). Some important options could be: + ``method`` for the minimizer method to use and ``args`` for + objective function additional arguments. + initial_temp : float, optional + The initial temperature, use higher values to facilitates a wider + search of the energy landscape, allowing dual_annealing to escape + local minima that it is trapped in. Default value is 5230. Range is + (0.01, 5.e4]. + restart_temp_ratio : float, optional + During the annealing process, temperature is decreasing, when it + reaches ``initial_temp * restart_temp_ratio``, the reannealing process + is triggered. Default value of the ratio is 2e-5. Range is (0, 1). + visit : float, optional + Parameter for visiting distribution. Default value is 2.62. Higher + values give the visiting distribution a heavier tail, this makes + the algorithm jump to a more distant region. The value range is (0, 3]. + accept : float, optional + Parameter for acceptance distribution. It is used to control the + probability of acceptance. The lower the acceptance parameter, the + smaller the probability of acceptance. Default value is -5.0 with + a range (-1e4, -5]. + maxfun : int, optional + Soft limit for the number of objective function calls. If the + algorithm is in the middle of a local search, this number will be + exceeded, the algorithm will stop just after the local search is + done. Default value is 1e7. + seed : {int or `numpy.random.RandomState` instance}, optional + If `seed` is not specified the `numpy.random.RandomState` singleton is + used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``RandomState`` instance, then that + instance is used. + Specify `seed` for repeatable minimizations. The random numbers + generated with this seed only affect the visiting distribution + function and new coordinates generation. + no_local_search : bool, optional + If `no_local_search` is set to True, a traditional Generalized + Simulated Annealing will be performed with no local search + strategy applied. + callback : callable, optional + A callback function with signature ``callback(x, f, context)``, + which will be called for all minima found. + ``x`` and ``f`` are the coordinates and function value of the + latest minimum found, and ``context`` has value in [0, 1, 2], with the + following meaning: + + - 0: minimum detected in the annealing process. + - 1: detection occured in the local search process. + - 2: detection done in the dual annealing process. + + If the callback implementation returns True, the algorithm will stop. + x0 : ndarray, shape(n,), optional + Coordinates of a single n-dimensional starting point. + + Returns + ------- + res : OptimizeResult + The optimization result represented as a `OptimizeResult` object. + Important attributes are: ``x`` the solution array, ``fun`` the value + of the function at the solution, and ``message`` which describes the + cause of the termination. + See `OptimizeResult` for a description of other attributes. + + Notes + ----- + This function implements the Dual Annealing optimization. This stochastic + approach derived from [3]_ combines the generalization of CSA (Classical + Simulated Annealing) and FSA (Fast Simulated Annealing) [1]_ [2]_ coupled + to a strategy for applying a local search on accepted locations [4]_. + An alternative implementation of this same algorithm is described in [5]_ + and benchmarks are presented in [6]_. This approach introduces an advanced + method to refine the solution found by the generalized annealing + process. This algorithm uses a distorted Cauchy-Lorentz visiting + distribution, with its shape controlled by the parameter :math:`q_{v}` + + .. math:: + + g_{q_{v}}(\\Delta x(t)) \\propto \\frac{ \\ + \\left[T_{q_{v}}(t) \\right]^{-\\frac{D}{3-q_{v}}}}{ \\ + \\left[{1+(q_{v}-1)\\frac{(\\Delta x(t))^{2}} { \\ + \\left[T_{q_{v}}(t)\\right]^{\\frac{2}{3-q_{v}}}}}\\right]^{ \\ + \\frac{1}{q_{v}-1}+\\frac{D-1}{2}}} + + Where :math:`t` is the artificial time. This visiting distribution is used + to generate a trial jump distance :math:`\\Delta x(t)` of variable + :math:`x(t)` under artificial temperature :math:`T_{q_{v}}(t)`. + + From the starting point, after calling the visiting distribution + function, the acceptance probability is computed as follows: + + .. math:: + + p_{q_{a}} = \\min{\\{1,\\left[1-(1-q_{a}) \\beta \\Delta E \\right]^{ \\ + \\frac{1}{1-q_{a}}}\\}} + + Where :math:`q_{a}` is a acceptance parameter. For :math:`q_{a}<1`, zero + acceptance probability is assigned to the cases where + + .. math:: + + [1-(1-q_{a}) \\beta \\Delta E] < 0 + + The artificial temperature :math:`T_{q_{v}}(t)` is decreased according to + + .. math:: + + T_{q_{v}}(t) = T_{q_{v}}(1) \\frac{2^{q_{v}-1}-1}{\\left( \\ + 1 + t\\right)^{q_{v}-1}-1} + + Where :math:`q_{v}` is the visiting parameter. + + .. versionadded:: 1.2.0 + + References + ---------- + .. [1] Tsallis C. Possible generalization of Boltzmann-Gibbs + statistics. Journal of Statistical Physics, 52, 479-487 (1998). + .. [2] Tsallis C, Stariolo DA. Generalized Simulated Annealing. + Physica A, 233, 395-406 (1996). + .. [3] Xiang Y, Sun DY, Fan W, Gong XG. Generalized Simulated + Annealing Algorithm and Its Application to the Thomson Model. + Physics Letters A, 233, 216-220 (1997). + .. [4] Xiang Y, Gong XG. Efficiency of Generalized Simulated + Annealing. Physical Review E, 62, 4473 (2000). + .. [5] Xiang Y, Gubian S, Suomela B, Hoeng J. Generalized + Simulated Annealing for Efficient Global Optimization: the GenSA + Package for R. The R Journal, Volume 5/1 (2013). + .. [6] Mullen, K. Continuous Global Optimization in R. Journal of + Statistical Software, 60(6), 1 - 45, (2014). DOI:10.18637/jss.v060.i06 + + Examples + -------- + The following example is a 10-dimensional problem, with many local minima. + The function involved is called Rastrigin + (https://en.wikipedia.org/wiki/Rastrigin_function) + + >>> from scipy.optimize import dual_annealing + >>> func = lambda x: np.sum(x*x - 10*np.cos(2*np.pi*x)) + 10*np.size(x) + >>> lw = [-5.12] * 10 + >>> up = [5.12] * 10 + >>> ret = dual_annealing(func, bounds=list(zip(lw, up)), seed=1234) + >>> print("global minimum: xmin = {0}, f(xmin) = {1:.6f}".format( + ... ret.x, ret.fun)) + global minimum: xmin = [-4.26437714e-09 -3.91699361e-09 -1.86149218e-09 -3.97165720e-09 + -6.29151648e-09 -6.53145322e-09 -3.93616815e-09 -6.55623025e-09 + -6.05775280e-09 -5.00668935e-09], f(xmin) = 0.000000 + + """ + if x0 is not None and not len(x0) == len(bounds): + raise ValueError('Bounds size does not match x0') + + lu = list(zip(*bounds)) + lower = np.array(lu[0]) + upper = np.array(lu[1]) + # Check that restart temperature ratio is correct + if restart_temp_ratio <= 0. or restart_temp_ratio >= 1.: + raise ValueError('Restart temperature ratio has to be in range (0, 1)') + # Checking bounds are valid + if (np.any(np.isinf(lower)) or np.any(np.isinf(upper)) or np.any( + np.isnan(lower)) or np.any(np.isnan(upper))): + raise ValueError('Some bounds values are inf values or nan values') + # Checking that bounds are consistent + if not np.all(lower < upper): + raise ValueError('Bounds are not consistent min < max') + # Checking that bounds are the same length + if not len(lower) == len(upper): + raise ValueError('Bounds do not have the same dimensions') + + # Wrapper for the objective function + func_wrapper = ObjectiveFunWrapper(func, maxfun, *args) + # Wrapper fot the minimizer + minimizer_wrapper = LocalSearchWrapper( + bounds, func_wrapper, **local_search_options) + # Initialization of RandomState for reproducible runs if seed provided + rand_state = check_random_state(seed) + # Initialization of the energy state + energy_state = EnergyState(lower, upper, callback) + energy_state.reset(func_wrapper, rand_state, x0) + # Minimum value of annealing temperature reached to perform + # re-annealing + temperature_restart = initial_temp * restart_temp_ratio + # VisitingDistribution instance + visit_dist = VisitingDistribution(lower, upper, visit, rand_state) + # Strategy chain instance + strategy_chain = StrategyChain(accept, visit_dist, func_wrapper, + minimizer_wrapper, rand_state, energy_state) + # Run the search loop + need_to_stop = False + iteration = 0 + message = [] + t1 = np.exp((visit - 1) * np.log(2.0)) - 1.0 + while(not need_to_stop): + for i in range(maxiter): + # Compute temperature for this step + s = float(i) + 2.0 + t2 = np.exp((visit - 1) * np.log(s)) - 1.0 + temperature = initial_temp * t1 / t2 + if iteration >= maxiter: + message.append("Maximum number of iteration reached") + need_to_stop = True + break + # Need a re-annealing process? + if temperature < temperature_restart: + energy_state.reset(func_wrapper, rand_state) + break + # starting strategy chain + val = strategy_chain.run(i, temperature) + if val is not None: + message.append(val) + need_to_stop = True + break + # Possible local search at the end of the strategy chain + if not no_local_search: + val = strategy_chain.local_search() + if val is not None: + message.append(val) + need_to_stop = True + break + iteration += 1 + + # Return the OptimizeResult + res = OptimizeResult() + res.x = energy_state.xbest + res.fun = energy_state.ebest + res.nit = iteration + res.nfev = func_wrapper.nfev + res.njev = func_wrapper.ngev + res.nhev = func_wrapper.nhev + res.message = message + return res diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_dual_annealing.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_dual_annealing.pyc new file mode 100644 index 0000000..50f81e5 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_dual_annealing.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_group_columns.so b/project/venv/lib/python2.7/site-packages/scipy/optimize/_group_columns.so new file mode 100755 index 0000000..23c8fac Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_group_columns.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_hessian_update_strategy.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_hessian_update_strategy.py new file mode 100644 index 0000000..b61fa40 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_hessian_update_strategy.py @@ -0,0 +1,430 @@ +"""Hessian update strategies for quasi-Newton optimization methods.""" +from __future__ import division, print_function, absolute_import +import numpy as np +from numpy.linalg import norm +from scipy.linalg import get_blas_funcs +from warnings import warn + + +__all__ = ['HessianUpdateStrategy', 'BFGS', 'SR1'] + + +class HessianUpdateStrategy(object): + """Interface for implementing Hessian update strategies. + + Many optimization methods make use of Hessian (or inverse Hessian) + approximations, such as the quasi-Newton methods BFGS, SR1, L-BFGS. + Some of these approximations, however, do not actually need to store + the entire matrix or can compute the internal matrix product with a + given vector in a very efficiently manner. This class serves as an + abstract interface between the optimization algorithm and the + quasi-Newton update strategies, giving freedom of implementation + to store and update the internal matrix as efficiently as possible. + Different choices of initialization and update procedure will result + in different quasi-Newton strategies. + + Four methods should be implemented in derived classes: ``initialize``, + ``update``, ``dot`` and ``get_matrix``. + + Notes + ----- + Any instance of a class that implements this interface, + can be accepted by the method ``minimize`` and used by + the compatible solvers to approximate the Hessian (or + inverse Hessian) used by the optimization algorithms. + """ + + def initialize(self, n, approx_type): + """Initialize internal matrix. + + Allocate internal memory for storing and updating + the Hessian or its inverse. + + Parameters + ---------- + n : int + Problem dimension. + approx_type : {'hess', 'inv_hess'} + Selects either the Hessian or the inverse Hessian. + When set to 'hess' the Hessian will be stored and updated. + When set to 'inv_hess' its inverse will be used instead. + """ + raise NotImplementedError("The method ``initialize(n, approx_type)``" + " is not implemented.") + + def update(self, delta_x, delta_grad): + """Update internal matrix. + + Update Hessian matrix or its inverse (depending on how 'approx_type' + is defined) using information about the last evaluated points. + + Parameters + ---------- + delta_x : ndarray + The difference between two points the gradient + function have been evaluated at: ``delta_x = x2 - x1``. + delta_grad : ndarray + The difference between the gradients: + ``delta_grad = grad(x2) - grad(x1)``. + """ + raise NotImplementedError("The method ``update(delta_x, delta_grad)``" + " is not implemented.") + + def dot(self, p): + """Compute the product of the internal matrix with the given vector. + + Parameters + ---------- + p : array_like + 1-d array representing a vector. + + Returns + ------- + Hp : array + 1-d represents the result of multiplying the approximation matrix + by vector p. + """ + raise NotImplementedError("The method ``dot(p)``" + " is not implemented.") + + def get_matrix(self): + """Return current internal matrix. + + Returns + ------- + H : ndarray, shape (n, n) + Dense matrix containing either the Hessian + or its inverse (depending on how 'approx_type' + is defined). + """ + raise NotImplementedError("The method ``get_matrix(p)``" + " is not implemented.") + + +class FullHessianUpdateStrategy(HessianUpdateStrategy): + """Hessian update strategy with full dimensional internal representation. + """ + _syr = get_blas_funcs('syr', dtype='d') # Symmetric rank 1 update + _syr2 = get_blas_funcs('syr2', dtype='d') # Symmetric rank 2 update + # Symmetric matrix-vector product + _symv = get_blas_funcs('symv', dtype='d') + + def __init__(self, init_scale='auto'): + self.init_scale = init_scale + # Until initialize is called we can't really use the class, + # so it makes sense to set everything to None. + self.first_iteration = None + self.approx_type = None + self.B = None + self.H = None + + def initialize(self, n, approx_type): + """Initialize internal matrix. + + Allocate internal memory for storing and updating + the Hessian or its inverse. + + Parameters + ---------- + n : int + Problem dimension. + approx_type : {'hess', 'inv_hess'} + Selects either the Hessian or the inverse Hessian. + When set to 'hess' the Hessian will be stored and updated. + When set to 'inv_hess' its inverse will be used instead. + """ + self.first_iteration = True + self.n = n + self.approx_type = approx_type + if approx_type not in ('hess', 'inv_hess'): + raise ValueError("`approx_type` must be 'hess' or 'inv_hess'.") + # Create matrix + if self.approx_type == 'hess': + self.B = np.eye(n, dtype=float) + else: + self.H = np.eye(n, dtype=float) + + def _auto_scale(self, delta_x, delta_grad): + # Heuristic to scale matrix at first iteration. + # Described in Nocedal and Wright "Numerical Optimization" + # p.143 formula (6.20). + s_norm2 = np.dot(delta_x, delta_x) + y_norm2 = np.dot(delta_grad, delta_grad) + ys = np.abs(np.dot(delta_grad, delta_x)) + if ys == 0.0 or y_norm2 == 0 or s_norm2 == 0: + return 1 + if self.approx_type == 'hess': + return y_norm2 / ys + else: + return ys / y_norm2 + + def _update_implementation(self, delta_x, delta_grad): + raise NotImplementedError("The method ``_update_implementation``" + " is not implemented.") + + def update(self, delta_x, delta_grad): + """Update internal matrix. + + Update Hessian matrix or its inverse (depending on how 'approx_type' + is defined) using information about the last evaluated points. + + Parameters + ---------- + delta_x : ndarray + The difference between two points the gradient + function have been evaluated at: ``delta_x = x2 - x1``. + delta_grad : ndarray + The difference between the gradients: + ``delta_grad = grad(x2) - grad(x1)``. + """ + if np.all(delta_x == 0.0): + return + if np.all(delta_grad == 0.0): + warn('delta_grad == 0.0. Check if the approximated ' + 'function is linear. If the function is linear ' + 'better results can be obtained by defining the ' + 'Hessian as zero instead of using quasi-Newton ' + 'approximations.', UserWarning) + return + if self.first_iteration: + # Get user specific scale + if self.init_scale == "auto": + scale = self._auto_scale(delta_x, delta_grad) + else: + scale = float(self.init_scale) + # Scale initial matrix with ``scale * np.eye(n)`` + if self.approx_type == 'hess': + self.B *= scale + else: + self.H *= scale + self.first_iteration = False + self._update_implementation(delta_x, delta_grad) + + def dot(self, p): + """Compute the product of the internal matrix with the given vector. + + Parameters + ---------- + p : array_like + 1-d array representing a vector. + + Returns + ------- + Hp : array + 1-d represents the result of multiplying the approximation matrix + by vector p. + """ + if self.approx_type == 'hess': + return self._symv(1, self.B, p) + else: + return self._symv(1, self.H, p) + + def get_matrix(self): + """Return the current internal matrix. + + Returns + ------- + M : ndarray, shape (n, n) + Dense matrix containing either the Hessian or its inverse + (depending on how `approx_type` was defined). + """ + if self.approx_type == 'hess': + M = np.copy(self.B) + else: + M = np.copy(self.H) + li = np.tril_indices_from(M, k=-1) + M[li] = M.T[li] + return M + + +class BFGS(FullHessianUpdateStrategy): + """Broyden-Fletcher-Goldfarb-Shanno (BFGS) Hessian update strategy. + + Parameters + ---------- + exception_strategy : {'skip_update', 'damp_update'}, optional + Define how to proceed when the curvature condition is violated. + Set it to 'skip_update' to just skip the update. Or, alternatively, + set it to 'damp_update' to interpolate between the actual BFGS + result and the unmodified matrix. Both exceptions strategies + are explained in [1]_, p.536-537. + min_curvature : float + This number, scaled by a normalization factor, defines the + minimum curvature ``dot(delta_grad, delta_x)`` allowed to go + unaffected by the exception strategy. By default is equal to + 1e-8 when ``exception_strategy = 'skip_update'`` and equal + to 0.2 when ``exception_strategy = 'damp_update'``. + init_scale : {float, 'auto'} + Matrix scale at first iteration. At the first + iteration the Hessian matrix or its inverse will be initialized + with ``init_scale*np.eye(n)``, where ``n`` is the problem dimension. + Set it to 'auto' in order to use an automatic heuristic for choosing + the initial scale. The heuristic is described in [1]_, p.143. + By default uses 'auto'. + + Notes + ----- + The update is based on the description in [1]_, p.140. + + References + ---------- + .. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization" + Second Edition (2006). + """ + + def __init__(self, exception_strategy='skip_update', min_curvature=None, + init_scale='auto'): + if exception_strategy == 'skip_update': + if min_curvature is not None: + self.min_curvature = min_curvature + else: + self.min_curvature = 1e-8 + elif exception_strategy == 'damp_update': + if min_curvature is not None: + self.min_curvature = min_curvature + else: + self.min_curvature = 0.2 + else: + raise ValueError("`exception_strategy` must be 'skip_update' " + "or 'damp_update'.") + + super(BFGS, self).__init__(init_scale) + self.exception_strategy = exception_strategy + + def _update_inverse_hessian(self, ys, Hy, yHy, s): + """Update the inverse Hessian matrix. + + BFGS update using the formula: + + ``H <- H + ((H*y).T*y + s.T*y)/(s.T*y)^2 * (s*s.T) + - 1/(s.T*y) * ((H*y)*s.T + s*(H*y).T)`` + + where ``s = delta_x`` and ``y = delta_grad``. This formula is + equivalent to (6.17) in [1]_ written in a more efficient way + for implementation. + + References + ---------- + .. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization" + Second Edition (2006). + """ + self.H = self._syr2(-1.0 / ys, s, Hy, a=self.H) + self.H = self._syr((ys+yHy)/ys**2, s, a=self.H) + + def _update_hessian(self, ys, Bs, sBs, y): + """Update the Hessian matrix. + + BFGS update using the formula: + + ``B <- B - (B*s)*(B*s).T/s.T*(B*s) + y*y^T/s.T*y`` + + where ``s`` is short for ``delta_x`` and ``y`` is short + for ``delta_grad``. Formula (6.19) in [1]_. + + References + ---------- + .. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization" + Second Edition (2006). + """ + self.B = self._syr(1.0 / ys, y, a=self.B) + self.B = self._syr(-1.0 / sBs, Bs, a=self.B) + + def _update_implementation(self, delta_x, delta_grad): + # Auxiliary variables w and z + if self.approx_type == 'hess': + w = delta_x + z = delta_grad + else: + w = delta_grad + z = delta_x + # Do some common operations + wz = np.dot(w, z) + Mw = self.dot(w) + wMw = Mw.dot(w) + # Guarantee that wMw > 0 by reinitializing matrix. + # While this is always true in exact arithmetics, + # indefinite matrix may appear due to roundoff errors. + if wMw <= 0.0: + scale = self._auto_scale(delta_x, delta_grad) + # Reinitialize matrix + if self.approx_type == 'hess': + self.B = scale * np.eye(self.n, dtype=float) + else: + self.H = scale * np.eye(self.n, dtype=float) + # Do common operations for new matrix + Mw = self.dot(w) + wMw = Mw.dot(w) + # Check if curvature condition is violated + if wz <= self.min_curvature * wMw: + # If the option 'skip_update' is set + # we just skip the update when the condion + # is violated. + if self.exception_strategy == 'skip_update': + return + # If the option 'damp_update' is set we + # interpolate between the actual BFGS + # result and the unmodified matrix. + elif self.exception_strategy == 'damp_update': + update_factor = (1-self.min_curvature) / (1 - wz/wMw) + z = update_factor*z + (1-update_factor)*Mw + wz = np.dot(w, z) + # Update matrix + if self.approx_type == 'hess': + self._update_hessian(wz, Mw, wMw, z) + else: + self._update_inverse_hessian(wz, Mw, wMw, z) + + +class SR1(FullHessianUpdateStrategy): + """Symmetric-rank-1 Hessian update strategy. + + Parameters + ---------- + min_denominator : float + This number, scaled by a normalization factor, + defines the minimum denominator magnitude allowed + in the update. When the condition is violated we skip + the update. By default uses ``1e-8``. + init_scale : {float, 'auto'}, optional + Matrix scale at first iteration. At the first + iteration the Hessian matrix or its inverse will be initialized + with ``init_scale*np.eye(n)``, where ``n`` is the problem dimension. + Set it to 'auto' in order to use an automatic heuristic for choosing + the initial scale. The heuristic is described in [1]_, p.143. + By default uses 'auto'. + + Notes + ----- + The update is based on the description in [1]_, p.144-146. + + References + ---------- + .. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization" + Second Edition (2006). + """ + + def __init__(self, min_denominator=1e-8, init_scale='auto'): + self.min_denominator = min_denominator + super(SR1, self).__init__(init_scale) + + def _update_implementation(self, delta_x, delta_grad): + # Auxiliary variables w and z + if self.approx_type == 'hess': + w = delta_x + z = delta_grad + else: + w = delta_grad + z = delta_x + # Do some common operations + Mw = self.dot(w) + z_minus_Mw = z - Mw + denominator = np.dot(w, z_minus_Mw) + # If the denominator is too small + # we just skip the update. + if np.abs(denominator) <= self.min_denominator*norm(w)*norm(z_minus_Mw): + return + # Update matrix + if self.approx_type == 'hess': + self.B = self._syr(1/denominator, z_minus_Mw, a=self.B) + else: + self.H = self._syr(1/denominator, z_minus_Mw, a=self.H) diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_hessian_update_strategy.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_hessian_update_strategy.pyc new file mode 100644 index 0000000..f942356 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_hessian_update_strategy.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_hungarian.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_hungarian.py new file mode 100644 index 0000000..49b8b50 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_hungarian.py @@ -0,0 +1,282 @@ +# Hungarian algorithm (Kuhn-Munkres) for solving the linear sum assignment +# problem. Taken from scikit-learn. Based on original code by Brian Clapper, +# adapted to NumPy by Gael Varoquaux. +# Further improvements by Ben Root, Vlad Niculae and Lars Buitinck. +# +# Copyright (c) 2008 Brian M. Clapper <bmc@clapper.org>, Gael Varoquaux +# Author: Brian M. Clapper, Gael Varoquaux +# License: 3-clause BSD + +import numpy as np + + +def linear_sum_assignment(cost_matrix): + """Solve the linear sum assignment problem. + + The linear sum assignment problem is also known as minimum weight matching + in bipartite graphs. A problem instance is described by a matrix C, where + each C[i,j] is the cost of matching vertex i of the first partite set + (a "worker") and vertex j of the second set (a "job"). The goal is to find + a complete assignment of workers to jobs of minimal cost. + + Formally, let X be a boolean matrix where :math:`X[i,j] = 1` iff row i is + assigned to column j. Then the optimal assignment has cost + + .. math:: + \\min \\sum_i \\sum_j C_{i,j} X_{i,j} + + s.t. each row is assignment to at most one column, and each column to at + most one row. + + This function can also solve a generalization of the classic assignment + problem where the cost matrix is rectangular. If it has more rows than + columns, then not every row needs to be assigned to a column, and vice + versa. + + The method used is the Hungarian algorithm, also known as the Munkres or + Kuhn-Munkres algorithm. + + Parameters + ---------- + cost_matrix : array + The cost matrix of the bipartite graph. + + Returns + ------- + row_ind, col_ind : array + An array of row indices and one of corresponding column indices giving + the optimal assignment. The cost of the assignment can be computed + as ``cost_matrix[row_ind, col_ind].sum()``. The row indices will be + sorted; in the case of a square cost matrix they will be equal to + ``numpy.arange(cost_matrix.shape[0])``. + + Notes + ----- + .. versionadded:: 0.17.0 + + Examples + -------- + >>> cost = np.array([[4, 1, 3], [2, 0, 5], [3, 2, 2]]) + >>> from scipy.optimize import linear_sum_assignment + >>> row_ind, col_ind = linear_sum_assignment(cost) + >>> col_ind + array([1, 0, 2]) + >>> cost[row_ind, col_ind].sum() + 5 + + References + ---------- + 1. http://csclab.murraystate.edu/bob.pilgrim/445/munkres.html + + 2. Harold W. Kuhn. The Hungarian Method for the assignment problem. + *Naval Research Logistics Quarterly*, 2:83-97, 1955. + + 3. Harold W. Kuhn. Variants of the Hungarian method for assignment + problems. *Naval Research Logistics Quarterly*, 3: 253-258, 1956. + + 4. Munkres, J. Algorithms for the Assignment and Transportation Problems. + *J. SIAM*, 5(1):32-38, March, 1957. + + 5. https://en.wikipedia.org/wiki/Hungarian_algorithm + """ + cost_matrix = np.asarray(cost_matrix) + if len(cost_matrix.shape) != 2: + raise ValueError("expected a matrix (2-d array), got a %r array" + % (cost_matrix.shape,)) + + if not (np.issubdtype(cost_matrix.dtype, np.number) or + cost_matrix.dtype == np.dtype(np.bool)): + raise ValueError("expected a matrix containing numerical entries, got %s" + % (cost_matrix.dtype,)) + + if np.any(np.isinf(cost_matrix) | np.isnan(cost_matrix)): + raise ValueError("matrix contains invalid numeric entries") + + if cost_matrix.dtype == np.dtype(np.bool): + cost_matrix = cost_matrix.astype(np.int) + + # The algorithm expects more columns than rows in the cost matrix. + if cost_matrix.shape[1] < cost_matrix.shape[0]: + cost_matrix = cost_matrix.T + transposed = True + else: + transposed = False + + state = _Hungary(cost_matrix) + + # No need to bother with assignments if one of the dimensions + # of the cost matrix is zero-length. + step = None if 0 in cost_matrix.shape else _step1 + + while step is not None: + step = step(state) + + if transposed: + marked = state.marked.T + else: + marked = state.marked + return np.nonzero(marked == 1) + + +class _Hungary(object): + """State of the Hungarian algorithm. + + Parameters + ---------- + cost_matrix : 2D matrix + The cost matrix. Must have shape[1] >= shape[0]. + """ + + def __init__(self, cost_matrix): + self.C = cost_matrix.copy() + + n, m = self.C.shape + self.row_uncovered = np.ones(n, dtype=bool) + self.col_uncovered = np.ones(m, dtype=bool) + self.Z0_r = 0 + self.Z0_c = 0 + self.path = np.zeros((n + m, 2), dtype=int) + self.marked = np.zeros((n, m), dtype=int) + + def _clear_covers(self): + """Clear all covered matrix cells""" + self.row_uncovered[:] = True + self.col_uncovered[:] = True + + +# Individual steps of the algorithm follow, as a state machine: they return +# the next step to be taken (function to be called), if any. + +def _step1(state): + """Steps 1 and 2 in the Wikipedia page.""" + + # Step 1: For each row of the matrix, find the smallest element and + # subtract it from every element in its row. + state.C -= state.C.min(axis=1)[:, np.newaxis] + # Step 2: Find a zero (Z) in the resulting matrix. If there is no + # starred zero in its row or column, star Z. Repeat for each element + # in the matrix. + for i, j in zip(*np.nonzero(state.C == 0)): + if state.col_uncovered[j] and state.row_uncovered[i]: + state.marked[i, j] = 1 + state.col_uncovered[j] = False + state.row_uncovered[i] = False + + state._clear_covers() + return _step3 + + +def _step3(state): + """ + Cover each column containing a starred zero. If n columns are covered, + the starred zeros describe a complete set of unique assignments. + In this case, Go to DONE, otherwise, Go to Step 4. + """ + marked = (state.marked == 1) + state.col_uncovered[np.any(marked, axis=0)] = False + + if marked.sum() < state.C.shape[0]: + return _step4 + + +def _step4(state): + """ + Find a noncovered zero and prime it. If there is no starred zero + in the row containing this primed zero, Go to Step 5. Otherwise, + cover this row and uncover the column containing the starred + zero. Continue in this manner until there are no uncovered zeros + left. Save the smallest uncovered value and Go to Step 6. + """ + # We convert to int as numpy operations are faster on int + C = (state.C == 0).astype(int) + covered_C = C * state.row_uncovered[:, np.newaxis] + covered_C *= np.asarray(state.col_uncovered, dtype=int) + n = state.C.shape[0] + m = state.C.shape[1] + + while True: + # Find an uncovered zero + row, col = np.unravel_index(np.argmax(covered_C), (n, m)) + if covered_C[row, col] == 0: + return _step6 + else: + state.marked[row, col] = 2 + # Find the first starred element in the row + star_col = np.argmax(state.marked[row] == 1) + if state.marked[row, star_col] != 1: + # Could not find one + state.Z0_r = row + state.Z0_c = col + return _step5 + else: + col = star_col + state.row_uncovered[row] = False + state.col_uncovered[col] = True + covered_C[:, col] = C[:, col] * ( + np.asarray(state.row_uncovered, dtype=int)) + covered_C[row] = 0 + + +def _step5(state): + """ + Construct a series of alternating primed and starred zeros as follows. + Let Z0 represent the uncovered primed zero found in Step 4. + Let Z1 denote the starred zero in the column of Z0 (if any). + Let Z2 denote the primed zero in the row of Z1 (there will always be one). + Continue until the series terminates at a primed zero that has no starred + zero in its column. Unstar each starred zero of the series, star each + primed zero of the series, erase all primes and uncover every line in the + matrix. Return to Step 3 + """ + count = 0 + path = state.path + path[count, 0] = state.Z0_r + path[count, 1] = state.Z0_c + + while True: + # Find the first starred element in the col defined by + # the path. + row = np.argmax(state.marked[:, path[count, 1]] == 1) + if state.marked[row, path[count, 1]] != 1: + # Could not find one + break + else: + count += 1 + path[count, 0] = row + path[count, 1] = path[count - 1, 1] + + # Find the first prime element in the row defined by the + # first path step + col = np.argmax(state.marked[path[count, 0]] == 2) + if state.marked[row, col] != 2: + col = -1 + count += 1 + path[count, 0] = path[count - 1, 0] + path[count, 1] = col + + # Convert paths + for i in range(count + 1): + if state.marked[path[i, 0], path[i, 1]] == 1: + state.marked[path[i, 0], path[i, 1]] = 0 + else: + state.marked[path[i, 0], path[i, 1]] = 1 + + state._clear_covers() + # Erase all prime markings + state.marked[state.marked == 2] = 0 + return _step3 + + +def _step6(state): + """ + Add the value found in Step 4 to every element of each covered row, + and subtract it from every element of each uncovered column. + Return to Step 4 without altering any stars, primes, or covered lines. + """ + # the smallest uncovered value in the matrix + if np.any(state.row_uncovered) and np.any(state.col_uncovered): + minval = np.min(state.C[state.row_uncovered], axis=0) + minval = np.min(minval[state.col_uncovered]) + state.C[~state.row_uncovered] += minval + state.C[:, state.col_uncovered] -= minval + return _step4 diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_hungarian.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_hungarian.pyc new file mode 100644 index 0000000..a1b5317 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_hungarian.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_lbfgsb.so b/project/venv/lib/python2.7/site-packages/scipy/optimize/_lbfgsb.so new file mode 100755 index 0000000..fa1c6e4 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_lbfgsb.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_linprog.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_linprog.py new file mode 100644 index 0000000..a3fa97c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_linprog.py @@ -0,0 +1,490 @@ +""" +A top-level linear programming interface. Currently this interface solves +linear programming problems via the Simplex and Interior-Point methods. + +.. versionadded:: 0.15.0 + +Functions +--------- +.. autosummary:: + :toctree: generated/ + + linprog + linprog_verbose_callback + linprog_terse_callback + +""" + +from __future__ import division, print_function, absolute_import + +import numpy as np + +from .optimize import OptimizeResult +from ._linprog_ip import _linprog_ip +from ._linprog_simplex import _linprog_simplex +from ._linprog_util import ( + _parse_linprog, _presolve, _get_Abc, _postprocess + ) + +__all__ = ['linprog', 'linprog_verbose_callback', 'linprog_terse_callback'] + +__docformat__ = "restructuredtext en" + + +def linprog_verbose_callback(res): + """ + A sample callback function demonstrating the linprog callback interface. + This callback produces detailed output to sys.stdout before each iteration + and after the final iteration of the simplex algorithm. + + Parameters + ---------- + res : A `scipy.optimize.OptimizeResult` consisting of the following fields: + + x : 1D array + The independent variable vector which optimizes the linear + programming problem. + fun : float + Value of the objective function. + success : bool + True if the algorithm succeeded in finding an optimal solution. + slack : 1D array + The values of the slack variables. Each slack variable corresponds + to an inequality constraint. If the slack is zero, then the + corresponding constraint is active. + con : 1D array + The (nominally zero) residuals of the equality constraints, that is, + ``b - A_eq @ x`` + phase : int + The phase of the optimization being executed. In phase 1 a basic + feasible solution is sought and the T has an additional row + representing an alternate objective function. + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + nit : int + The number of iterations performed. + message : str + A string descriptor of the exit status of the optimization. + """ + x = res['x'] + fun = res['fun'] + success = res['success'] + phase = res['phase'] + status = res['status'] + nit = res['nit'] + message = res['message'] + complete = res['complete'] + + saved_printoptions = np.get_printoptions() + np.set_printoptions(linewidth=500, + formatter={'float': lambda x: "{0: 12.4f}".format(x)}) + if status: + print('--------- Simplex Early Exit -------\n'.format(nit)) + print('The simplex method exited early with status {0:d}'.format(status)) + print(message) + elif complete: + print('--------- Simplex Complete --------\n') + print('Iterations required: {}'.format(nit)) + else: + print('--------- Iteration {0:d} ---------\n'.format(nit)) + + if nit > 0: + if phase == 1: + print('Current Pseudo-Objective Value:') + else: + print('Current Objective Value:') + print('f = ', fun) + print() + print('Current Solution Vector:') + print('x = ', x) + print() + + np.set_printoptions(**saved_printoptions) + + +def linprog_terse_callback(res): + """ + A sample callback function demonstrating the linprog callback interface. + This callback produces brief output to sys.stdout before each iteration + and after the final iteration of the simplex algorithm. + + Parameters + ---------- + res : A `scipy.optimize.OptimizeResult` consisting of the following fields: + + x : 1D array + The independent variable vector which optimizes the linear + programming problem. + fun : float + Value of the objective function. + success : bool + True if the algorithm succeeded in finding an optimal solution. + slack : 1D array + The values of the slack variables. Each slack variable corresponds + to an inequality constraint. If the slack is zero, then the + corresponding constraint is active. + con : 1D array + The (nominally zero) residuals of the equality constraints, that is, + ``b - A_eq @ x`` + phase : int + The phase of the optimization being executed. In phase 1 a basic + feasible solution is sought and the T has an additional row + representing an alternate objective function. + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + nit : int + The number of iterations performed. + message : str + A string descriptor of the exit status of the optimization. + """ + nit = res['nit'] + x = res['x'] + + if nit == 0: + print("Iter: X:") + print("{0: <5d} ".format(nit), end="") + print(x) + + +def linprog(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, + bounds=None, method='simplex', callback=None, + options=None): + """ + Minimize a linear objective function subject to linear + equality and inequality constraints. Linear Programming is intended to + solve the following problem form: + + Minimize:: + + c @ x + + Subject to:: + + A_ub @ x <= b_ub + A_eq @ x == b_eq + lb <= x <= ub + + where ``lb = 0`` and ``ub = None`` unless set in ``bounds``. + + Parameters + ---------- + c : 1D array + Coefficients of the linear objective function to be minimized. + A_ub : 2D array, optional + 2D array such that ``A_ub @ x`` gives the values of the upper-bound + inequality constraints at ``x``. + b_ub : 1D array, optional + 1D array of values representing the upper-bound of each inequality + constraint (row) in ``A_ub``. + A_eq : 2D, optional + 2D array such that ``A_eq @ x`` gives the values of the equality + constraints at ``x``. + b_eq : 1D array, optional + 1D array of values representing the RHS of each equality constraint + (row) in ``A_eq``. + bounds : sequence, optional + ``(min, max)`` pairs for each element in ``x``, defining + the bounds on that parameter. Use None for one of ``min`` or + ``max`` when there is no bound in that direction. By default + bounds are ``(0, None)`` (non-negative). + If a sequence containing a single tuple is provided, then ``min`` and + ``max`` will be applied to all variables in the problem. + method : str, optional + Type of solver. :ref:`'simplex' <optimize.linprog-simplex>` + and :ref:`'interior-point' <optimize.linprog-interior-point>` + are supported. + callback : callable, optional (simplex only) + If a callback function is provided, it will be called within each + iteration of the simplex algorithm. The callback must require a + `scipy.optimize.OptimizeResult` consisting of the following fields: + + x : 1D array + The independent variable vector which optimizes the linear + programming problem. + fun : float + Value of the objective function. + success : bool + True if the algorithm succeeded in finding an optimal solution. + slack : 1D array + The values of the slack variables. Each slack variable + corresponds to an inequality constraint. If the slack is zero, + the corresponding constraint is active. + con : 1D array + The (nominally zero) residuals of the equality constraints + that is, ``b - A_eq @ x`` + phase : int + The phase of the optimization being executed. In phase 1 a basic + feasible solution is sought and the T has an additional row + representing an alternate objective function. + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + nit : int + The number of iterations performed. + message : str + A string descriptor of the exit status of the optimization. + + options : dict, optional + A dictionary of solver options. All methods accept the following + generic options: + + maxiter : int + Maximum number of iterations to perform. + disp : bool + Set to True to print convergence messages. + + For method-specific options, see :func:`show_options('linprog')`. + + Returns + ------- + res : OptimizeResult + A :class:`scipy.optimize.OptimizeResult` consisting of the fields: + + x : 1D array + The independent variable vector which optimizes the linear + programming problem. + fun : float + Value of the objective function. + slack : 1D array + The values of the slack variables. Each slack variable + corresponds to an inequality constraint. If the slack is zero, + then the corresponding constraint is active. + con : 1D array + The (nominally zero) residuals of the equality constraints, + that is, ``b - A_eq @ x`` + success : bool + Returns True if the algorithm succeeded in finding an optimal + solution. + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + nit : int + The number of iterations performed. + message : str + A string descriptor of the exit status of the optimization. + + See Also + -------- + show_options : Additional options accepted by the solvers + + Notes + ----- + This section describes the available solvers that can be selected by the + 'method' parameter. The default method + is :ref:`Simplex <optimize.linprog-simplex>`. + :ref:`Interior point <optimize.linprog-interior-point>` is also available. + + Method *simplex* uses the simplex algorithm (as it relates to linear + programming, NOT the Nelder-Mead simplex) [1]_, [2]_. This algorithm + should be reasonably reliable and fast for small problems. + + .. versionadded:: 0.15.0 + + Method *interior-point* uses the primal-dual path following algorithm + as outlined in [4]_. This algorithm is intended to provide a faster + and more reliable alternative to *simplex*, especially for large, + sparse problems. Note, however, that the solution returned may be slightly + less accurate than that of the simplex method and may not correspond with a + vertex of the polytope defined by the constraints. + + Before applying either method a presolve procedure based on [8]_ attempts to + identify trivial infeasibilities, trivial unboundedness, and potential + problem simplifications. Specifically, it checks for: + + - rows of zeros in ``A_eq`` or ``A_ub``, representing trivial constraints; + - columns of zeros in ``A_eq`` `and` ``A_ub``, representing unconstrained + variables; + - column singletons in ``A_eq``, representing fixed variables; and + - column singletons in ``A_ub``, representing simple bounds. + + If presolve reveals that the problem is unbounded (e.g. an unconstrained + and unbounded variable has negative cost) or infeasible (e.g. a row of + zeros in ``A_eq`` corresponds with a nonzero in ``b_eq``), the solver + terminates with the appropriate status code. Note that presolve terminates + as soon as any sign of unboundedness is detected; consequently, a problem + may be reported as unbounded when in reality the problem is infeasible + (but infeasibility has not been detected yet). Therefore, if the output + message states that unboundedness is detected in presolve and it is + necessary to know whether the problem is actually infeasible, set option + ``presolve=False``. + + If neither infeasibility nor unboundedness are detected in a single pass + of the presolve check, bounds are tightened where possible and fixed + variables are removed from the problem. Then, linearly dependent rows + of the ``A_eq`` matrix are removed, (unless they represent an + infeasibility) to avoid numerical difficulties in the primary solve + routine. Note that rows that are nearly linearly dependent (within a + prescribed tolerance) may also be removed, which can change the optimal + solution in rare cases. If this is a concern, eliminate redundancy from + your problem formulation and run with option ``rr=False`` or + ``presolve=False``. + + Several potential improvements can be made here: additional presolve + checks outlined in [8]_ should be implemented, the presolve routine should + be run multiple times (until no further simplifications can be made), and + more of the efficiency improvements from [5]_ should be implemented in the + redundancy removal routines. + + After presolve, the problem is transformed to standard form by converting + the (tightened) simple bounds to upper bound constraints, introducing + non-negative slack variables for inequality constraints, and expressing + unbounded variables as the difference between two non-negative variables. + + References + ---------- + .. [1] Dantzig, George B., Linear programming and extensions. Rand + Corporation Research Study Princeton Univ. Press, Princeton, NJ, + 1963 + .. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to + Mathematical Programming", McGraw-Hill, Chapter 4. + .. [3] Bland, Robert G. New finite pivoting rules for the simplex method. + Mathematics of Operations Research (2), 1977: pp. 103-107. + .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. + .. [5] Andersen, Erling D. "Finding all linearly dependent rows in + large-scale linear programming." Optimization Methods and Software + 6.3 (1995): 219-227. + .. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear + Programming based on Newton's Method." Unpublished Course Notes, + March 2004. Available 2/25/2017 at + https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf + .. [7] Fourer, Robert. "Solving Linear Programs by Interior-Point Methods." + Unpublished Course Notes, August 26, 2005. Available 2/25/2017 at + http://www.4er.org/CourseNotes/Book%20B/B-III.pdf + .. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear + programming." Mathematical Programming 71.2 (1995): 221-245. + .. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear + programming." Athena Scientific 1 (1997): 997. + .. [10] Andersen, Erling D., et al. Implementation of interior point + methods for large scale linear programming. HEC/Universite de + Geneve, 1996. + + Examples + -------- + Consider the following problem: + + Minimize:: + + f = -1x[0] + 4x[1] + + Subject to:: + + -3x[0] + 1x[1] <= 6 + 1x[0] + 2x[1] <= 4 + x[1] >= -3 + -inf <= x[0] <= inf + + This problem deviates from the standard linear programming problem. + In standard form, linear programming problems assume the variables x are + non-negative. Since the problem variables don't have the standard bounds of + ``(0, None)``, the variable bounds must be set using ``bounds`` explicitly. + + There are two upper-bound constraints, which can be expressed as + + dot(A_ub, x) <= b_ub + + The input for this problem is as follows: + + >>> c = [-1, 4] + >>> A = [[-3, 1], [1, 2]] + >>> b = [6, 4] + >>> x0_bounds = (None, None) + >>> x1_bounds = (-3, None) + >>> from scipy.optimize import linprog + >>> res = linprog(c, A_ub=A, b_ub=b, bounds=(x0_bounds, x1_bounds), + ... options={"disp": True}) + Optimization terminated successfully. + Current function value: -22.000000 + Iterations: 5 # may vary + >>> print(res) + con: array([], dtype=float64) + fun: -22.0 + message: 'Optimization terminated successfully.' + nit: 5 # may vary + slack: array([39., 0.]) # may vary + status: 0 + success: True + x: array([10., -3.]) + + """ + meth = method.lower() + default_tol = 1e-12 if meth == 'simplex' else 1e-9 + + c, A_ub, b_ub, A_eq, b_eq, bounds, solver_options = _parse_linprog( + c, A_ub, b_ub, A_eq, b_eq, bounds, options) + tol = solver_options.get('tol', default_tol) + + iteration = 0 + complete = False # will become True if solved in presolve + undo = [] + + # Keep the original arrays to calculate slack/residuals for original + # problem. + c_o, A_ub_o, b_ub_o, A_eq_o, b_eq_o = c.copy( + ), A_ub.copy(), b_ub.copy(), A_eq.copy(), b_eq.copy() + + # Solve trivial problem, eliminate variables, tighten bounds, etc... + c0 = 0 # we might get a constant term in the objective + if solver_options.pop('presolve', True): + rr = solver_options.pop('rr', True) + (c, c0, A_ub, b_ub, A_eq, b_eq, bounds, x, undo, complete, status, + message) = _presolve(c, A_ub, b_ub, A_eq, b_eq, bounds, rr, tol) + + if not complete: + A, b, c, c0 = _get_Abc(c, c0, A_ub, b_ub, A_eq, b_eq, bounds, undo) + T_o = (c_o, A_ub_o, b_ub_o, A_eq_o, b_eq_o, bounds, undo) + if meth == 'simplex': + x, status, message, iteration = _linprog_simplex( + c, c0=c0, A=A, b=b, callback=callback, _T_o=T_o, **solver_options) + elif meth == 'interior-point': + x, status, message, iteration = _linprog_ip( + c, c0=c0, A=A, b=b, callback=callback, **solver_options) + else: + raise ValueError('Unknown solver %s' % method) + + # Eliminate artificial variables, re-introduce presolved variables, etc... + # need modified bounds here to translate variables appropriately + disp = solver_options.get('disp', False) + x, fun, slack, con, status, message = _postprocess( + x, c_o, A_ub_o, b_ub_o, A_eq_o, b_eq_o, bounds, + complete, undo, status, message, tol, iteration, disp) + + sol = { + 'x': x, + 'fun': fun, + 'slack': slack, + 'con': con, + 'status': status, + 'message': message, + 'nit': iteration, + 'success': status == 0} + + return OptimizeResult(sol) diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_linprog.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_linprog.pyc new file mode 100644 index 0000000..cb1d998 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_linprog.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_linprog_ip.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_linprog_ip.py new file mode 100644 index 0000000..9ed32c9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_linprog_ip.py @@ -0,0 +1,1069 @@ +""" +An interior-point method for linear programming. +""" +# Author: Matt Haberland + +from __future__ import print_function, division, absolute_import +import numpy as np +import scipy as sp +import scipy.sparse as sps +from warnings import warn +from scipy.linalg import LinAlgError +from .optimize import OptimizeWarning, _check_unknown_options + + +def _get_solver(sparse=False, lstsq=False, sym_pos=True, cholesky=True): + """ + Given solver options, return a handle to the appropriate linear system + solver. + + Parameters + ---------- + sparse : bool + True if the system to be solved is sparse. This is typically set + True when the original ``A_ub`` and ``A_eq`` arrays are sparse. + lstsq : bool + True if the system is ill-conditioned and/or (nearly) singular and + thus a more robust least-squares solver is desired. This is sometimes + needed as the solution is approached. + sym_pos : bool + True if the system matrix is symmetric positive definite + Sometimes this needs to be set false as the solution is approached, + even when the system should be symmetric positive definite, due to + numerical difficulties. + cholesky : bool + True if the system is to be solved by Cholesky, rather than LU, + decomposition. This is typically faster unless the problem is very + small or prone to numerical difficulties. + + Returns + ------- + solve : function + Handle to the appropriate solver function + + """ + if sparse: + if lstsq or not(sym_pos): + def solve(M, r, sym_pos=False): + return sps.linalg.lsqr(M, r)[0] + else: + # this is not currently used; it is replaced by splu solve + # TODO: expose use of this as an option + def solve(M, r): + return sps.linalg.spsolve(M, r, permc_spec="MMD_AT_PLUS_A") + + else: + if lstsq: # sometimes necessary as solution is approached + def solve(M, r): + return sp.linalg.lstsq(M, r)[0] + elif cholesky: + solve = sp.linalg.cho_solve + else: + # this seems to cache the matrix factorization, so solving + # with multiple right hand sides is much faster + def solve(M, r, sym_pos=sym_pos): + return sp.linalg.solve(M, r, sym_pos=sym_pos) + + return solve + + +def _get_delta( + A, + b, + c, + x, + y, + z, + tau, + kappa, + gamma, + eta, + sparse=False, + lstsq=False, + sym_pos=True, + cholesky=True, + pc=True, + ip=False, + permc_spec='MMD_AT_PLUS_A' + ): + """ + Given standard form problem defined by ``A``, ``b``, and ``c``; + current variable estimates ``x``, ``y``, ``z``, ``tau``, and ``kappa``; + algorithmic parameters ``gamma and ``eta; + and options ``sparse``, ``lstsq``, ``sym_pos``, ``cholesky``, ``pc`` + (predictor-corrector), and ``ip`` (initial point improvement), + get the search direction for increments to the variable estimates. + + Parameters + ---------- + As defined in [4], except: + sparse : bool + True if the system to be solved is sparse. This is typically set + True when the original ``A_ub`` and ``A_eq`` arrays are sparse. + lstsq : bool + True if the system is ill-conditioned and/or (nearly) singular and + thus a more robust least-squares solver is desired. This is sometimes + needed as the solution is approached. + sym_pos : bool + True if the system matrix is symmetric positive definite + Sometimes this needs to be set false as the solution is approached, + even when the system should be symmetric positive definite, due to + numerical difficulties. + cholesky : bool + True if the system is to be solved by Cholesky, rather than LU, + decomposition. This is typically faster unless the problem is very + small or prone to numerical difficulties. + pc : bool + True if the predictor-corrector method of Mehrota is to be used. This + is almost always (if not always) beneficial. Even though it requires + the solution of an additional linear system, the factorization + is typically (implicitly) reused so solution is efficient, and the + number of algorithm iterations is typically reduced. + ip : bool + True if the improved initial point suggestion due to [4] section 4.3 + is desired. It's unclear whether this is beneficial. + permc_spec : str (default = 'MMD_AT_PLUS_A') + (Has effect only with ``sparse = True``, ``lstsq = False``, ``sym_pos = + True``.) A matrix is factorized in each iteration of the algorithm. + This option specifies how to permute the columns of the matrix for + sparsity preservation. Acceptable values are: + + - ``NATURAL``: natural ordering. + - ``MMD_ATA``: minimum degree ordering on the structure of A^T A. + - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A. + - ``COLAMD``: approximate minimum degree column ordering. + + This option can impact the convergence of the + interior point algorithm; test different values to determine which + performs best for your problem. For more information, refer to + ``scipy.sparse.linalg.splu``. + + Returns + ------- + Search directions as defined in [4] + + References + ---------- + .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. + + """ + + if A.shape[0] == 0: + # If there are no constraints, some solvers fail (understandably) + # rather than returning empty solution. This gets the job done. + sparse, lstsq, sym_pos, cholesky = False, False, True, False + solve = _get_solver(sparse, lstsq, sym_pos, cholesky) + n_x = len(x) + + # [4] Equation 8.8 + r_P = b * tau - A.dot(x) + r_D = c * tau - A.T.dot(y) - z + r_G = c.dot(x) - b.transpose().dot(y) + kappa + mu = (x.dot(z) + tau * kappa) / (n_x + 1) + + # Assemble M from [4] Equation 8.31 + Dinv = x / z + splu = False + if sparse and not lstsq: + # sparse requires Dinv to be diag matrix + M = A.dot(sps.diags(Dinv, 0, format="csc").dot(A.T)) + try: + # TODO: should use linalg.factorized instead, but I don't have + # umfpack and therefore cannot test its performance + solve = sps.linalg.splu(M, permc_spec=permc_spec).solve + splu = True + except Exception: + lstsq = True + solve = _get_solver(sparse, lstsq, sym_pos, cholesky) + else: + # dense does not; use broadcasting + M = A.dot(Dinv.reshape(-1, 1) * A.T) + + # For some small problems, calling sp.linalg.solve w/ sym_pos = True + # may be faster. I am pretty certain it caches the factorization for + # multiple uses and checks the incoming matrix to see if it's the same as + # the one it already factorized. (I can't explain the speed otherwise.) + if cholesky: + try: + L = sp.linalg.cho_factor(M) + except Exception: + cholesky = False + solve = _get_solver(sparse, lstsq, sym_pos, cholesky) + + # pc: "predictor-corrector" [4] Section 4.1 + # In development this option could be turned off + # but it always seems to improve performance substantially + n_corrections = 1 if pc else 0 + + i = 0 + alpha, d_x, d_z, d_tau, d_kappa = 0, 0, 0, 0, 0 + while i <= n_corrections: + # Reference [4] Eq. 8.6 + rhatp = eta(gamma) * r_P + rhatd = eta(gamma) * r_D + rhatg = np.array(eta(gamma) * r_G).reshape((1,)) + + # Reference [4] Eq. 8.7 + rhatxs = gamma * mu - x * z + rhattk = np.array(gamma * mu - tau * kappa).reshape((1,)) + + if i == 1: + if ip: # if the correction is to get "initial point" + # Reference [4] Eq. 8.23 + rhatxs = ((1 - alpha) * gamma * mu - + x * z - alpha**2 * d_x * d_z) + rhattk = np.array( + (1 - + alpha) * + gamma * + mu - + tau * + kappa - + alpha**2 * + d_tau * + d_kappa).reshape( + (1, + )) + else: # if the correction is for "predictor-corrector" + # Reference [4] Eq. 8.13 + rhatxs -= d_x * d_z + rhattk -= d_tau * d_kappa + + # sometimes numerical difficulties arise as the solution is approached + # this loop tries to solve the equations using a sequence of functions + # for solve. For dense systems, the order is: + # 1. scipy.linalg.cho_factor/scipy.linalg.cho_solve, + # 2. scipy.linalg.solve w/ sym_pos = True, + # 3. scipy.linalg.solve w/ sym_pos = False, and if all else fails + # 4. scipy.linalg.lstsq + # For sparse systems, the order is: + # 1. scipy.sparse.linalg.splu + # 2. scipy.sparse.linalg.lsqr + # TODO: if umfpack is installed, use factorized instead of splu. + # Can't do that now because factorized doesn't pass permc_spec + # to splu if umfpack isn't installed. Also, umfpack not tested. + solved = False + while(not solved): + try: + solve_this = L if cholesky else M + # [4] Equation 8.28 + p, q = _sym_solve(Dinv, solve_this, A, c, b, solve, splu) + # [4] Equation 8.29 + u, v = _sym_solve(Dinv, solve_this, A, rhatd - + (1 / x) * rhatxs, rhatp, solve, splu) + if np.any(np.isnan(p)) or np.any(np.isnan(q)): + raise LinAlgError + solved = True + except (LinAlgError, ValueError) as e: + # Usually this doesn't happen. If it does, it happens when + # there are redundant constraints or when approaching the + # solution. If so, change solver. + cholesky = False + if not lstsq: + if sym_pos: + warn( + "Solving system with option 'sym_pos':True " + "failed. It is normal for this to happen " + "occasionally, especially as the solution is " + "approached. However, if you see this frequently, " + "consider setting option 'sym_pos' to False.", + OptimizeWarning) + sym_pos = False + else: + warn( + "Solving system with option 'sym_pos':False " + "failed. This may happen occasionally, " + "especially as the solution is " + "approached. However, if you see this frequently, " + "your problem may be numerically challenging. " + "If you cannot improve the formulation, consider " + "setting 'lstsq' to True. Consider also setting " + "`presolve` to True, if it is not already.", + OptimizeWarning) + lstsq = True + else: + raise e + solve = _get_solver(sparse, lstsq, sym_pos) + # [4] Results after 8.29 + d_tau = ((rhatg + 1 / tau * rhattk - (-c.dot(u) + b.dot(v))) / + (1 / tau * kappa + (-c.dot(p) + b.dot(q)))) + d_x = u + p * d_tau + d_y = v + q * d_tau + + # [4] Relations between after 8.25 and 8.26 + d_z = (1 / x) * (rhatxs - z * d_x) + d_kappa = 1 / tau * (rhattk - kappa * d_tau) + + # [4] 8.12 and "Let alpha be the maximal possible step..." before 8.23 + alpha = _get_step(x, d_x, z, d_z, tau, d_tau, kappa, d_kappa, 1) + if ip: # initial point - see [4] 4.4 + gamma = 10 + else: # predictor-corrector, [4] definition after 8.12 + beta1 = 0.1 # [4] pg. 220 (Table 8.1) + gamma = (1 - alpha)**2 * min(beta1, (1 - alpha)) + i += 1 + + return d_x, d_y, d_z, d_tau, d_kappa + + +def _sym_solve(Dinv, M, A, r1, r2, solve, splu=False): + """ + An implementation of [4] equation 8.31 and 8.32 + + References + ---------- + .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. + + """ + # [4] 8.31 + r = r2 + A.dot(Dinv * r1) + if splu: + v = solve(r) + else: + v = solve(M, r) + # [4] 8.32 + u = Dinv * (A.T.dot(v) - r1) + return u, v + + +def _get_step(x, d_x, z, d_z, tau, d_tau, kappa, d_kappa, alpha0): + """ + An implementation of [4] equation 8.21 + + References + ---------- + .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. + + """ + # [4] 4.3 Equation 8.21, ignoring 8.20 requirement + # same step is taken in primal and dual spaces + # alpha0 is basically beta3 from [4] Table 8.1, but instead of beta3 + # the value 1 is used in Mehrota corrector and initial point correction + i_x = d_x < 0 + i_z = d_z < 0 + alpha_x = alpha0 * np.min(x[i_x] / -d_x[i_x]) if np.any(i_x) else 1 + alpha_tau = alpha0 * tau / -d_tau if d_tau < 0 else 1 + alpha_z = alpha0 * np.min(z[i_z] / -d_z[i_z]) if np.any(i_z) else 1 + alpha_kappa = alpha0 * kappa / -d_kappa if d_kappa < 0 else 1 + alpha = np.min([1, alpha_x, alpha_tau, alpha_z, alpha_kappa]) + return alpha + + +def _get_message(status): + """ + Given problem status code, return a more detailed message. + + Parameters + ---------- + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + Returns + ------- + message : str + A string descriptor of the exit status of the optimization. + + """ + messages = ( + ["Optimization terminated successfully.", + "The iteration limit was reached before the algorithm converged.", + "The algorithm terminated successfully and determined that the " + "problem is infeasible.", + "The algorithm terminated successfully and determined that the " + "problem is unbounded.", + "Numerical difficulties were encountered before the problem " + "converged. Please check your problem formulation for errors, " + "independence of linear equality constraints, and reasonable " + "scaling and matrix condition numbers. If you continue to " + "encounter this error, please submit a bug report." + ]) + return messages[status] + + +def _do_step(x, y, z, tau, kappa, d_x, d_y, d_z, d_tau, d_kappa, alpha): + """ + An implementation of [4] Equation 8.9 + + References + ---------- + .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. + + """ + x = x + alpha * d_x + tau = tau + alpha * d_tau + z = z + alpha * d_z + kappa = kappa + alpha * d_kappa + y = y + alpha * d_y + return x, y, z, tau, kappa + + +def _get_blind_start(shape): + """ + Return the starting point from [4] 4.4 + + References + ---------- + .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. + + """ + m, n = shape + x0 = np.ones(n) + y0 = np.zeros(m) + z0 = np.ones(n) + tau0 = 1 + kappa0 = 1 + return x0, y0, z0, tau0, kappa0 + + +def _indicators(A, b, c, c0, x, y, z, tau, kappa): + """ + Implementation of several equations from [4] used as indicators of + the status of optimization. + + References + ---------- + .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. + + """ + + # residuals for termination are relative to initial values + x0, y0, z0, tau0, kappa0 = _get_blind_start(A.shape) + + # See [4], Section 4 - The Homogeneous Algorithm, Equation 8.8 + def r_p(x, tau): + return b * tau - A.dot(x) + + def r_d(y, z, tau): + return c * tau - A.T.dot(y) - z + + def r_g(x, y, kappa): + return kappa + c.dot(x) - b.dot(y) + + # np.dot unpacks if they are arrays of size one + def mu(x, tau, z, kappa): + return (x.dot(z) + np.dot(tau, kappa)) / (len(x) + 1) + + obj = c.dot(x / tau) + c0 + + def norm(a): + return np.linalg.norm(a) + + # See [4], Section 4.5 - The Stopping Criteria + r_p0 = r_p(x0, tau0) + r_d0 = r_d(y0, z0, tau0) + r_g0 = r_g(x0, y0, kappa0) + mu_0 = mu(x0, tau0, z0, kappa0) + rho_A = norm(c.T.dot(x) - b.T.dot(y)) / (tau + norm(b.T.dot(y))) + rho_p = norm(r_p(x, tau)) / max(1, norm(r_p0)) + rho_d = norm(r_d(y, z, tau)) / max(1, norm(r_d0)) + rho_g = norm(r_g(x, y, kappa)) / max(1, norm(r_g0)) + rho_mu = mu(x, tau, z, kappa) / mu_0 + return rho_p, rho_d, rho_A, rho_g, rho_mu, obj + + +def _display_iter(rho_p, rho_d, rho_g, alpha, rho_mu, obj, header=False): + """ + Print indicators of optimization status to the console. + + Parameters + ---------- + rho_p : float + The (normalized) primal feasibility, see [4] 4.5 + rho_d : float + The (normalized) dual feasibility, see [4] 4.5 + rho_g : float + The (normalized) duality gap, see [4] 4.5 + alpha : float + The step size, see [4] 4.3 + rho_mu : float + The (normalized) path parameter, see [4] 4.5 + obj : float + The objective function value of the current iterate + header : bool + True if a header is to be printed + + References + ---------- + .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. + + """ + if header: + print("Primal Feasibility ", + "Dual Feasibility ", + "Duality Gap ", + "Step ", + "Path Parameter ", + "Objective ") + + # no clue why this works + fmt = '{0:<20.13}{1:<20.13}{2:<20.13}{3:<17.13}{4:<20.13}{5:<20.13}' + print(fmt.format( + rho_p, + rho_d, + rho_g, + alpha, + rho_mu, + obj)) + + +def _ip_hsd(A, b, c, c0, alpha0, beta, maxiter, disp, tol, + sparse, lstsq, sym_pos, cholesky, pc, ip, permc_spec): + r""" + Solve a linear programming problem in standard form: + + Minimize:: + + c @ x + + Subject to:: + + A @ x == b + x >= 0 + + using the interior point method of [4]. + + Parameters + ---------- + A : 2D array + 2D array such that ``A @ x``, gives the values of the equality + constraints at ``x``. + b : 1D array + 1D array of values representing the RHS of each equality constraint + (row) in ``A`` (for standard form problem). + c : 1D array + Coefficients of the linear objective function to be minimized (for + standard form problem). + c0 : float + Constant term in objective function due to fixed (and eliminated) + variables. (Purely for display.) + alpha0 : float + The maximal step size for Mehrota's predictor-corrector search + direction; see :math:`\beta_3`of [4] Table 8.1 + beta : float + The desired reduction of the path parameter :math:`\mu` (see [6]_) + maxiter : int + The maximum number of iterations of the algorithm. + disp : bool + Set to ``True`` if indicators of optimization status are to be printed + to the console each iteration. + tol : float + Termination tolerance; see [4]_ Section 4.5. + sparse : bool + Set to ``True`` if the problem is to be treated as sparse. However, + the inputs ``A_eq`` and ``A_ub`` should nonetheless be provided as + (dense) arrays rather than sparse matrices. + lstsq : bool + Set to ``True`` if the problem is expected to be very poorly + conditioned. This should always be left as ``False`` unless severe + numerical difficulties are frequently encountered, and a better option + would be to improve the formulation of the problem. + sym_pos : bool + Leave ``True`` if the problem is expected to yield a well conditioned + symmetric positive definite normal equation matrix (almost always). + cholesky : bool + Set to ``True`` if the normal equations are to be solved by explicit + Cholesky decomposition followed by explicit forward/backward + substitution. This is typically faster for moderate, dense problems + that are numerically well-behaved. + pc : bool + Leave ``True`` if the predictor-corrector method of Mehrota is to be + used. This is almost always (if not always) beneficial. + ip : bool + Set to ``True`` if the improved initial point suggestion due to [4]_ + Section 4.3 is desired. It's unclear whether this is beneficial. + permc_spec : str (default = 'MMD_AT_PLUS_A') + (Has effect only with ``sparse = True``, ``lstsq = False``, ``sym_pos = + True``.) A matrix is factorized in each iteration of the algorithm. + This option specifies how to permute the columns of the matrix for + sparsity preservation. Acceptable values are: + + - ``NATURAL``: natural ordering. + - ``MMD_ATA``: minimum degree ordering on the structure of A^T A. + - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A. + - ``COLAMD``: approximate minimum degree column ordering. + + This option can impact the convergence of the + interior point algorithm; test different values to determine which + performs best for your problem. For more information, refer to + ``scipy.sparse.linalg.splu``. + + Returns + ------- + x_hat : float + Solution vector (for standard form problem). + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + message : str + A string descriptor of the exit status of the optimization. + iteration : int + The number of iterations taken to solve the problem + + References + ---------- + .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. + .. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear + Programming based on Newton's Method." Unpublished Course Notes, + March 2004. Available 2/25/2017 at: + https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf + + """ + + iteration = 0 + + # default initial point + x, y, z, tau, kappa = _get_blind_start(A.shape) + + # first iteration is special improvement of initial point + ip = ip if pc else False + + # [4] 4.5 + rho_p, rho_d, rho_A, rho_g, rho_mu, obj = _indicators( + A, b, c, c0, x, y, z, tau, kappa) + go = rho_p > tol or rho_d > tol or rho_A > tol # we might get lucky : ) + + if disp: + _display_iter(rho_p, rho_d, rho_g, "-", rho_mu, obj, header=True) + + status = 0 + message = "Optimization terminated successfully." + + if sparse: + A = sps.csc_matrix(A) + A.T = A.transpose() # A.T is defined for sparse matrices but is slow + # Redefine it to avoid calculating again + # This is fine as long as A doesn't change + + while go: + + iteration += 1 + + if ip: # initial point + # [4] Section 4.4 + gamma = 1 + + def eta(g): + return 1 + else: + # gamma = 0 in predictor step according to [4] 4.1 + # if predictor/corrector is off, use mean of complementarity [6] + # 5.1 / [4] Below Figure 10-4 + gamma = 0 if pc else beta * np.mean(z * x) + # [4] Section 4.1 + + def eta(g=gamma): + return 1 - g + + try: + # Solve [4] 8.6 and 8.7/8.13/8.23 + d_x, d_y, d_z, d_tau, d_kappa = _get_delta( + A, b, c, x, y, z, tau, kappa, gamma, eta, + sparse, lstsq, sym_pos, cholesky, pc, ip, permc_spec) + + if ip: # initial point + # [4] 4.4 + # Formula after 8.23 takes a full step regardless if this will + # take it negative + alpha = 1.0 + x, y, z, tau, kappa = _do_step( + x, y, z, tau, kappa, d_x, d_y, + d_z, d_tau, d_kappa, alpha) + x[x < 1] = 1 + z[z < 1] = 1 + tau = max(1, tau) + kappa = max(1, kappa) + ip = False # done with initial point + else: + # [4] Section 4.3 + alpha = _get_step(x, d_x, z, d_z, tau, + d_tau, kappa, d_kappa, alpha0) + # [4] Equation 8.9 + x, y, z, tau, kappa = _do_step( + x, y, z, tau, kappa, d_x, d_y, d_z, d_tau, d_kappa, alpha) + + except (LinAlgError, FloatingPointError, + ValueError, ZeroDivisionError): + # this can happen when sparse solver is used and presolve + # is turned off. Also observed ValueError in AppVeyor Python 3.6 + # Win32 build (PR #8676). I've never seen it otherwise. + status = 4 + message = _get_message(status) + break + + # [4] 4.5 + rho_p, rho_d, rho_A, rho_g, rho_mu, obj = _indicators( + A, b, c, c0, x, y, z, tau, kappa) + go = rho_p > tol or rho_d > tol or rho_A > tol + + if disp: + _display_iter(rho_p, rho_d, rho_g, alpha, float(rho_mu), obj) + + # [4] 4.5 + inf1 = (rho_p < tol and rho_d < tol and rho_g < tol and tau < tol * + max(1, kappa)) + inf2 = rho_mu < tol and tau < tol * min(1, kappa) + if inf1 or inf2: + # [4] Lemma 8.4 / Theorem 8.3 + if b.transpose().dot(y) > tol: + status = 2 + else: # elif c.T.dot(x) < tol: ? Probably not necessary. + status = 3 + message = _get_message(status) + break + elif iteration >= maxiter: + status = 1 + message = _get_message(status) + break + + x_hat = x / tau + # [4] Statement after Theorem 8.2 + return x_hat, status, message, iteration + + +def _linprog_ip( + c, + c0=0, + A=None, + b=None, + callback=None, + alpha0=.99995, + beta=0.1, + maxiter=1000, + disp=False, + tol=1e-8, + sparse=False, + lstsq=False, + sym_pos=True, + cholesky=None, + pc=True, + ip=False, + permc_spec='MMD_AT_PLUS_A', + **unknown_options): + r""" + Minimize a linear objective function subject to linear + equality and non-negativity constraints using the interior point method + of [4]_. Linear programming is intended to solve problems + of the following form: + + Minimize:: + + c @ x + + Subject to:: + + A @ x == b + x >= 0 + + Parameters + ---------- + c : 1D array + Coefficients of the linear objective function to be minimized. + c0 : float + Constant term in objective function due to fixed (and eliminated) + variables. (Purely for display.) + A : 2D array + 2D array such that ``A @ x``, gives the values of the equality + constraints at ``x``. + b : 1D array + 1D array of values representing the right hand side of each equality + constraint (row) in ``A``. + + Options + ------- + maxiter : int (default = 1000) + The maximum number of iterations of the algorithm. + disp : bool (default = False) + Set to ``True`` if indicators of optimization status are to be printed + to the console each iteration. + tol : float (default = 1e-8) + Termination tolerance to be used for all termination criteria; + see [4]_ Section 4.5. + alpha0 : float (default = 0.99995) + The maximal step size for Mehrota's predictor-corrector search + direction; see :math:`\beta_{3}` of [4]_ Table 8.1. + beta : float (default = 0.1) + The desired reduction of the path parameter :math:`\mu` (see [6]_) + when Mehrota's predictor-corrector is not in use (uncommon). + sparse : bool (default = False) + Set to ``True`` if the problem is to be treated as sparse after + presolve. If either ``A_eq`` or ``A_ub`` is a sparse matrix, + this option will automatically be set ``True``, and the problem + will be treated as sparse even during presolve. If your constraint + matrices contain mostly zeros and the problem is not very small (less + than about 100 constraints or variables), consider setting ``True`` + or providing ``A_eq`` and ``A_ub`` as sparse matrices. + lstsq : bool (default = False) + Set to ``True`` if the problem is expected to be very poorly + conditioned. This should always be left ``False`` unless severe + numerical difficulties are encountered. Leave this at the default + unless you receive a warning message suggesting otherwise. + sym_pos : bool (default = True) + Leave ``True`` if the problem is expected to yield a well conditioned + symmetric positive definite normal equation matrix + (almost always). Leave this at the default unless you receive + a warning message suggesting otherwise. + cholesky : bool (default = True) + Set to ``True`` if the normal equations are to be solved by explicit + Cholesky decomposition followed by explicit forward/backward + substitution. This is typically faster for moderate, dense problems + that are numerically well-behaved. + pc : bool (default = True) + Leave ``True`` if the predictor-corrector method of Mehrota is to be + used. This is almost always (if not always) beneficial. + ip : bool (default = False) + Set to ``True`` if the improved initial point suggestion due to [4]_ + Section 4.3 is desired. Whether this is beneficial or not + depends on the problem. + permc_spec : str (default = 'MMD_AT_PLUS_A') + (Has effect only with ``sparse = True``, ``lstsq = False``, ``sym_pos = + True``.) A matrix is factorized in each iteration of the algorithm. + This option specifies how to permute the columns of the matrix for + sparsity preservation. Acceptable values are: + + - ``NATURAL``: natural ordering. + - ``MMD_ATA``: minimum degree ordering on the structure of A^T A. + - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A. + - ``COLAMD``: approximate minimum degree column ordering. + + This option can impact the convergence of the + interior point algorithm; test different values to determine which + performs best for your problem. For more information, refer to + ``scipy.sparse.linalg.splu``. + + Returns + ------- + x : 1D array + Solution vector. + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + message : str + A string descriptor of the exit status of the optimization. + iteration : int + The number of iterations taken to solve the problem. + + Notes + ----- + This method implements the algorithm outlined in [4]_ with ideas from [8]_ + and a structure inspired by the simpler methods of [6]_ and [4]_. + + The primal-dual path following method begins with initial 'guesses' of + the primal and dual variables of the standard form problem and iteratively + attempts to solve the (nonlinear) Karush-Kuhn-Tucker conditions for the + problem with a gradually reduced logarithmic barrier term added to the + objective. This particular implementation uses a homogeneous self-dual + formulation, which provides certificates of infeasibility or unboundedness + where applicable. + + The default initial point for the primal and dual variables is that + defined in [4]_ Section 4.4 Equation 8.22. Optionally (by setting initial + point option ``ip=True``), an alternate (potentially improved) starting + point can be calculated according to the additional recommendations of + [4]_ Section 4.4. + + A search direction is calculated using the predictor-corrector method + (single correction) proposed by Mehrota and detailed in [4]_ Section 4.1. + (A potential improvement would be to implement the method of multiple + corrections described in [4]_ Section 4.2.) In practice, this is + accomplished by solving the normal equations, [4]_ Section 5.1 Equations + 8.31 and 8.32, derived from the Newton equations [4]_ Section 5 Equations + 8.25 (compare to [4]_ Section 4 Equations 8.6-8.8). The advantage of + solving the normal equations rather than 8.25 directly is that the + matrices involved are symmetric positive definite, so Cholesky + decomposition can be used rather than the more expensive LU factorization. + + With the default ``cholesky=True``, this is accomplished using + ``scipy.linalg.cho_factor`` followed by forward/backward substitutions + via ``scipy.linalg.cho_solve``. With ``cholesky=False`` and + ``sym_pos=True``, Cholesky decomposition is performed instead by + ``scipy.linalg.solve``. Based on speed tests, this also appears to retain + the Cholesky decomposition of the matrix for later use, which is beneficial + as the same system is solved four times with different right hand sides + in each iteration of the algorithm. + + In problems with redundancy (e.g. if presolve is turned off with option + ``presolve=False``) or if the matrices become ill-conditioned (e.g. as the + solution is approached and some decision variables approach zero), + Cholesky decomposition can fail. Should this occur, successively more + robust solvers (``scipy.linalg.solve`` with ``sym_pos=False`` then + ``scipy.linalg.lstsq``) are tried, at the cost of computational efficiency. + These solvers can be used from the outset by setting the options + ``sym_pos=False`` and ``lstsq=True``, respectively. + + Note that with the option ``sparse=True``, the normal equations are solved + using ``scipy.sparse.linalg.spsolve``. Unfortunately, this uses the more + expensive LU decomposition from the outset, but for large, sparse problems, + the use of sparse linear algebra techniques improves the solve speed + despite the use of LU rather than Cholesky decomposition. A simple + improvement would be to use the sparse Cholesky decomposition of + ``CHOLMOD`` via ``scikit-sparse`` when available. + + Other potential improvements for combatting issues associated with dense + columns in otherwise sparse problems are outlined in [4]_ Section 5.3 and + [10]_ Section 4.1-4.2; the latter also discusses the alleviation of + accuracy issues associated with the substitution approach to free + variables. + + After calculating the search direction, the maximum possible step size + that does not activate the non-negativity constraints is calculated, and + the smaller of this step size and unity is applied (as in [4]_ Section + 4.1.) [4]_ Section 4.3 suggests improvements for choosing the step size. + + The new point is tested according to the termination conditions of [4]_ + Section 4.5. The same tolerance, which can be set using the ``tol`` option, + is used for all checks. (A potential improvement would be to expose + the different tolerances to be set independently.) If optimality, + unboundedness, or infeasibility is detected, the solve procedure + terminates; otherwise it repeats. + + The expected problem formulation differs between the top level ``linprog`` + module and the method specific solvers. The method specific solvers expect a + problem in standard form: + + Minimize:: + + c @ x + + Subject to:: + + A @ x == b + x >= 0 + + Whereas the top level ``linprog`` module expects a problem of form: + + Minimize:: + + c @ x + + Subject to:: + + A_ub @ x <= b_ub + A_eq @ x == b_eq + lb <= x <= ub + + where ``lb = 0`` and ``ub = None`` unless set in ``bounds``. + + The original problem contains equality, upper-bound and variable constraints + whereas the method specific solver requires equality constraints and + variable non-negativity. + + ``linprog`` module converts the original problem to standard form by + converting the simple bounds to upper bound constraints, introducing + non-negative slack variables for inequality constraints, and expressing + unbounded variables as the difference between two non-negative variables. + + + References + ---------- + .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. + .. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear + Programming based on Newton's Method." Unpublished Course Notes, + March 2004. Available 2/25/2017 at + https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf + .. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear + programming." Mathematical Programming 71.2 (1995): 221-245. + .. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear + programming." Athena Scientific 1 (1997): 997. + .. [10] Andersen, Erling D., et al. Implementation of interior point methods + for large scale linear programming. HEC/Universite de Geneve, 1996. + + """ + + _check_unknown_options(unknown_options) + if callback is not None: + raise NotImplementedError("method 'interior-point' does not support " + "callback functions.") + + # These should be warnings, not errors + if sparse and lstsq: + warn("Invalid option combination 'sparse':True " + "and 'lstsq':True; Sparse least squares is not recommended.", + OptimizeWarning) + + if sparse and not sym_pos: + warn("Invalid option combination 'sparse':True " + "and 'sym_pos':False; the effect is the same as sparse least " + "squares, which is not recommended.", + OptimizeWarning) + + if sparse and cholesky: + # Cholesky decomposition is not available for sparse problems + warn("Invalid option combination 'sparse':True " + "and 'cholesky':True; sparse Colesky decomposition is not " + "available.", + OptimizeWarning) + + if lstsq and cholesky: + warn("Invalid option combination 'lstsq':True " + "and 'cholesky':True; option 'cholesky' has no effect when " + "'lstsq' is set True.", + OptimizeWarning) + + valid_permc_spec = ('NATURAL', 'MMD_ATA', 'MMD_AT_PLUS_A', 'COLAMD') + if permc_spec.upper() not in valid_permc_spec: + warn("Invalid permc_spec option: '" + str(permc_spec) + "'. " + "Acceptable values are 'NATURAL', 'MMD_ATA', 'MMD_AT_PLUS_A', " + "and 'COLAMD'. Reverting to default.", + OptimizeWarning) + permc_spec = 'MMD_AT_PLUS_A' + + # This can be an error + if not sym_pos and cholesky: + raise ValueError( + "Invalid option combination 'sym_pos':False " + "and 'cholesky':True: Cholesky decomposition is only possible " + "for symmetric positive definite matrices.") + + cholesky = cholesky is None and sym_pos and not sparse and not lstsq + + x, status, message, iteration = _ip_hsd(A, b, c, c0, alpha0, beta, + maxiter, disp, tol, sparse, + lstsq, sym_pos, cholesky, + pc, ip, permc_spec) + + return x, status, message, iteration diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_linprog_ip.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_linprog_ip.pyc new file mode 100644 index 0000000..12ce664 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_linprog_ip.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_linprog_simplex.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_linprog_simplex.py new file mode 100644 index 0000000..41e3b3f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_linprog_simplex.py @@ -0,0 +1,616 @@ +""" +Simplex method for solving linear programming problems +""" + +import numpy as np +from warnings import warn +from .optimize import OptimizeResult, OptimizeWarning, _check_unknown_options +from ._linprog_util import _postsolve + +def _pivot_col(T, tol=1.0E-12, bland=False): + """ + Given a linear programming simplex tableau, determine the column + of the variable to enter the basis. + + Parameters + ---------- + T : 2D array + A 2D array representing the simplex tableau, T, corresponding to the + linear programming problem. It should have the form: + + [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]], + [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]], + . + . + . + [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]], + [c[0], c[1], ..., c[n_total], 0]] + + for a Phase 2 problem, or the form: + + [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]], + [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]], + . + . + . + [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]], + [c[0], c[1], ..., c[n_total], 0], + [c'[0], c'[1], ..., c'[n_total], 0]] + + for a Phase 1 problem (a problem in which a basic feasible solution is + sought prior to maximizing the actual objective. ``T`` is modified in + place by ``_solve_simplex``. + tol : float + Elements in the objective row larger than -tol will not be considered + for pivoting. Nominally this value is zero, but numerical issues + cause a tolerance about zero to be necessary. + bland : bool + If True, use Bland's rule for selection of the column (select the + first column with a negative coefficient in the objective row, + regardless of magnitude). + + Returns + ------- + status: bool + True if a suitable pivot column was found, otherwise False. + A return of False indicates that the linear programming simplex + algorithm is complete. + col: int + The index of the column of the pivot element. + If status is False, col will be returned as nan. + """ + ma = np.ma.masked_where(T[-1, :-1] >= -tol, T[-1, :-1], copy=False) + if ma.count() == 0: + return False, np.nan + if bland: + return True, np.nonzero(ma.mask == False)[0][0] + return True, np.ma.nonzero(ma == ma.min())[0][0] + + +def _pivot_row(T, basis, pivcol, phase, tol=1.0E-12, bland=False): + """ + Given a linear programming simplex tableau, determine the row for the + pivot operation. + + Parameters + ---------- + T : 2D array + A 2D array representing the simplex tableau, T, corresponding to the + linear programming problem. It should have the form: + + [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]], + [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]], + . + . + . + [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]], + [c[0], c[1], ..., c[n_total], 0]] + + for a Phase 2 problem, or the form: + + [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]], + [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]], + . + . + . + [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]], + [c[0], c[1], ..., c[n_total], 0], + [c'[0], c'[1], ..., c'[n_total], 0]] + + for a Phase 1 problem (a Problem in which a basic feasible solution is + sought prior to maximizing the actual objective. ``T`` is modified in + place by ``_solve_simplex``. + basis : array + A list of the current basic variables. + pivcol : int + The index of the pivot column. + phase : int + The phase of the simplex algorithm (1 or 2). + tol : float + Elements in the pivot column smaller than tol will not be considered + for pivoting. Nominally this value is zero, but numerical issues + cause a tolerance about zero to be necessary. + bland : bool + If True, use Bland's rule for selection of the row (if more than one + row can be used, choose the one with the lowest variable index). + + Returns + ------- + status: bool + True if a suitable pivot row was found, otherwise False. A return + of False indicates that the linear programming problem is unbounded. + row: int + The index of the row of the pivot element. If status is False, row + will be returned as nan. + """ + if phase == 1: + k = 2 + else: + k = 1 + ma = np.ma.masked_where(T[:-k, pivcol] <= tol, T[:-k, pivcol], copy=False) + if ma.count() == 0: + return False, np.nan + mb = np.ma.masked_where(T[:-k, pivcol] <= tol, T[:-k, -1], copy=False) + q = mb / ma + min_rows = np.ma.nonzero(q == q.min())[0] + if bland: + return True, min_rows[np.argmin(np.take(basis, min_rows))] + return True, min_rows[0] + + +def _apply_pivot(T, basis, pivrow, pivcol, tol=1e-12): + """ + Pivot the simplex tableau inplace on the element given by (pivrow, pivol). + The entering variable corresponds to the column given by pivcol forcing + the variable basis[pivrow] to leave the basis. + + Parameters + ---------- + T : 2D array + A 2D array representing the simplex tableau, T, corresponding to the + linear programming problem. It should have the form: + + [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]], + [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]], + . + . + . + [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]], + [c[0], c[1], ..., c[n_total], 0]] + + for a Phase 2 problem, or the form: + + [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]], + [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]], + . + . + . + [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]], + [c[0], c[1], ..., c[n_total], 0], + [c'[0], c'[1], ..., c'[n_total], 0]] + + for a Phase 1 problem (a problem in which a basic feasible solution is + sought prior to maximizing the actual objective. ``T`` is modified in + place by ``_solve_simplex``. + basis : 1D array + An array of the indices of the basic variables, such that basis[i] + contains the column corresponding to the basic variable for row i. + Basis is modified in place by _apply_pivot. + pivrow : int + Row index of the pivot. + pivcol : int + Column index of the pivot. + """ + basis[pivrow] = pivcol + pivval = T[pivrow, pivcol] + T[pivrow] = T[pivrow] / pivval + for irow in range(T.shape[0]): + if irow != pivrow: + T[irow] = T[irow] - T[pivrow] * T[irow, pivcol] + + # The selected pivot should never lead to a pivot value less than the tol. + if np.isclose(pivval, tol, atol=0, rtol=1e4): + message = ( + "The pivot operation produces a pivot value of:{0: .1e}, " + "which is only slightly greater than the specified " + "tolerance{1: .1e}. This may lead to issues regarding the " + "numerical stability of the simplex method. " + "Removing redundant constraints, changing the pivot strategy " + "via Bland's rule or increasing the tolerance may " + "help reduce the issue.".format(pivval, tol)) + warn(message, OptimizeWarning) + + +def _solve_simplex(T, n, basis, maxiter=1000, phase=2, status=0, message='', + callback=None, tol=1.0E-12, nit0=0, bland=False, _T_o=None): + """ + Solve a linear programming problem in "standard form" using the Simplex + Method. Linear Programming is intended to solve the following problem form: + + Minimize:: + + c @ x + + Subject to:: + + A @ x == b + x >= 0 + + Parameters + ---------- + T : 2D array + A 2D array representing the simplex tableau, T, corresponding to the + linear programming problem. It should have the form: + + [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]], + [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]], + . + . + . + [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]], + [c[0], c[1], ..., c[n_total], 0]] + + for a Phase 2 problem, or the form: + + [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]], + [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]], + . + . + . + [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]], + [c[0], c[1], ..., c[n_total], 0], + [c'[0], c'[1], ..., c'[n_total], 0]] + + for a Phase 1 problem (a problem in which a basic feasible solution is + sought prior to maximizing the actual objective. ``T`` is modified in + place by ``_solve_simplex``. + n : int + The number of true variables in the problem. + basis : 1D array + An array of the indices of the basic variables, such that basis[i] + contains the column corresponding to the basic variable for row i. + Basis is modified in place by _solve_simplex + maxiter : int + The maximum number of iterations to perform before aborting the + optimization. + phase : int + The phase of the optimization being executed. In phase 1 a basic + feasible solution is sought and the T has an additional row + representing an alternate objective function. + callback : callable, optional (simplex only) + If a callback function is provided, it will be called within each + iteration of the simplex algorithm. The callback must require a + `scipy.optimize.OptimizeResult` consisting of the following fields: + + x : 1D array + The independent variable vector which optimizes the linear + programming problem. + fun : float + Value of the objective function. + success : bool + True if the algorithm succeeded in finding an optimal solution. + slack : 1D array + The values of the slack variables. Each slack variable + corresponds to an inequality constraint. If the slack is zero, + the corresponding constraint is active. + con : 1D array + The (nominally zero) residuals of the equality constraints, + that is, ``b - A_eq @ x`` + phase : int + The phase of the optimization being executed. In phase 1 a basic + feasible solution is sought and the T has an additional row + representing an alternate objective function. + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + nit : int + The number of iterations performed. + message : str + A string descriptor of the exit status of the optimization. + tol : float + The tolerance which determines when a solution is "close enough" to + zero in Phase 1 to be considered a basic feasible solution or close + enough to positive to serve as an optimal solution. + nit0 : int + The initial iteration number used to keep an accurate iteration total + in a two-phase problem. + bland : bool + If True, choose pivots using Bland's rule [3]_. In problems which + fail to converge due to cycling, using Bland's rule can provide + convergence at the expense of a less optimal path about the simplex. + + Returns + ------- + nit : int + The number of iterations. Used to keep an accurate iteration total + in the two-phase problem. + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + """ + nit = nit0 + complete = False + + if phase == 1: + m = T.shape[0]-2 + elif phase == 2: + m = T.shape[0]-1 + else: + raise ValueError("Argument 'phase' to _solve_simplex must be 1 or 2") + + if phase == 2: + # Check if any artificial variables are still in the basis. + # If yes, check if any coefficients from this row and a column + # corresponding to one of the non-artificial variable is non-zero. + # If found, pivot at this term. If not, start phase 2. + # Do this for all artificial variables in the basis. + # Ref: "An Introduction to Linear Programming and Game Theory" + # by Paul R. Thie, Gerard E. Keough, 3rd Ed, + # Chapter 3.7 Redundant Systems (pag 102) + for pivrow in [row for row in range(basis.size) + if basis[row] > T.shape[1] - 2]: + non_zero_row = [col for col in range(T.shape[1] - 1) + if abs(T[pivrow, col]) > tol] + if len(non_zero_row) > 0: + pivcol = non_zero_row[0] + _apply_pivot(T, basis, pivrow, pivcol) + nit += 1 + + if len(basis[:m]) == 0: + solution = np.zeros(T.shape[1] - 1, dtype=np.float64) + else: + solution = np.zeros(max(T.shape[1] - 1, max(basis[:m]) + 1), + dtype=np.float64) + + while not complete: + # Find the pivot column + pivcol_found, pivcol = _pivot_col(T, tol, bland) + if not pivcol_found: + pivcol = np.nan + pivrow = np.nan + status = 0 + complete = True + else: + # Find the pivot row + pivrow_found, pivrow = _pivot_row(T, basis, pivcol, phase, tol, bland) + if not pivrow_found: + status = 3 + complete = True + + if callback is not None: + solution[basis[:n]] = T[:n, -1] + x = solution[:m] + c, A_ub, b_ub, A_eq, b_eq, bounds, undo = _T_o + x, fun, slack, con, _, _ = _postsolve( + x, c, A_ub, b_ub, A_eq, b_eq, bounds, undo=undo, tol=tol + ) + res = OptimizeResult({ + 'x': x, + 'fun': fun, + 'slack': slack, + 'con': con, + 'status': status, + 'message': message, + 'nit': nit, + 'success': status == 0 and complete, + 'phase': phase, + 'complete': complete, + }) + callback(res) + + if not complete: + if nit >= maxiter: + # Iteration limit exceeded + status = 1 + complete = True + else: + _apply_pivot(T, basis, pivrow, pivcol) + nit += 1 + return nit, status + + +def _linprog_simplex(c, c0, A, b, maxiter=1000, disp=False, callback=None, + tol=1.0E-12, bland=False, _T_o=None, **unknown_options): + """ + Minimize a linear objective function subject to linear equality and + non-negativity constraints using the two phase simplex method. + Linear programming is intended to solve problems of the following form: + + Minimize:: + + c @ x + + Subject to:: + + A @ x == b + x >= 0 + + Parameters + ---------- + c : 1D array + Coefficients of the linear objective function to be minimized. + c0 : float + Constant term in objective function due to fixed (and eliminated) + variables. (Purely for display.) + A : 2D array + 2D array such that ``A @ x``, gives the values of the equality + constraints at ``x``. + b : 1D array + 1D array of values representing the right hand side of each equality + constraint (row) in ``A``. + callback : callable, optional (simplex only) + If a callback function is provided, it will be called within each + iteration of the simplex algorithm. The callback must require a + `scipy.optimize.OptimizeResult` consisting of the following fields: + + x : 1D array + The independent variable vector which optimizes the linear + programming problem. + fun : float + Value of the objective function. + success : bool + True if the algorithm succeeded in finding an optimal solution. + slack : 1D array + The values of the slack variables. Each slack variable + corresponds to an inequality constraint. If the slack is zero, + the corresponding constraint is active. + con : 1D array + The (nominally zero) residuals of the equality constraints, that + is, ``b - A_eq @ x`` + phase : int + The phase of the optimization being executed. In phase 1 a basic + feasible solution is sought and the T has an additional row + representing an alternate objective function. + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + nit : int + The number of iterations performed. + message : str + A string descriptor of the exit status of the optimization. + Options + ------- + maxiter : int + The maximum number of iterations to perform. + disp : bool + If True, print exit status message to sys.stdout + tol : float + The tolerance which determines when a solution is "close enough" to + zero in Phase 1 to be considered a basic feasible solution or close + enough to positive to serve as an optimal solution. + bland : bool + If True, use Bland's anti-cycling rule [3]_ to choose pivots to + prevent cycling. If False, choose pivots which should lead to a + converged solution more quickly. The latter method is subject to + cycling (non-convergence) in rare instances. + + Returns + ------- + x : 1D array + Solution vector. + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + message : str + A string descriptor of the exit status of the optimization. + iteration : int + The number of iterations taken to solve the problem. + + References + ---------- + .. [1] Dantzig, George B., Linear programming and extensions. Rand + Corporation Research Study Princeton Univ. Press, Princeton, NJ, + 1963 + .. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to + Mathematical Programming", McGraw-Hill, Chapter 4. + .. [3] Bland, Robert G. New finite pivoting rules for the simplex method. + Mathematics of Operations Research (2), 1977: pp. 103-107. + + + Notes + ----- + The expected problem formulation differs between the top level ``linprog`` + module and the method specific solvers. The method specific solvers expect a + problem in standard form: + + Minimize:: + + c @ x + + Subject to:: + + A @ x == b + x >= 0 + + Whereas the top level ``linprog`` module expects a problem of form: + + Minimize:: + + c @ x + + Subject to:: + + A_ub @ x <= b_ub + A_eq @ x == b_eq + lb <= x <= ub + + where ``lb = 0`` and ``ub = None`` unless set in ``bounds``. + + The original problem contains equality, upper-bound and variable constraints + whereas the method specific solver requires equality constraints and + variable non-negativity. + + ``linprog`` module converts the original problem to standard form by + converting the simple bounds to upper bound constraints, introducing + non-negative slack variables for inequality constraints, and expressing + unbounded variables as the difference between two non-negative variables. + """ + _check_unknown_options(unknown_options) + + status = 0 + messages = {0: "Optimization terminated successfully.", + 1: "Iteration limit reached.", + 2: "Optimization failed. Unable to find a feasible" + " starting point.", + 3: "Optimization failed. The problem appears to be unbounded.", + 4: "Optimization failed. Singular matrix encountered."} + + n, m = A.shape + + # All constraints must have b >= 0. + is_negative_constraint = np.less(b, 0) + A[is_negative_constraint] *= -1 + b[is_negative_constraint] *= -1 + + # As all constraints are equality constraints the artificial variables + # will also be basic variables. + av = np.arange(n) + m + basis = av.copy() + + # Format the phase one tableau by adding artificial variables and stacking + # the constraints, the objective row and pseudo-objective row. + row_constraints = np.hstack((A, np.eye(n), b[:, np.newaxis])) + row_objective = np.hstack((c, np.zeros(n), c0)) + row_pseudo_objective = -row_constraints.sum(axis=0) + row_pseudo_objective[av] = 0 + T = np.vstack((row_constraints, row_objective, row_pseudo_objective)) + + nit1, status = _solve_simplex(T, n, basis, phase=1, callback=callback, + maxiter=maxiter, tol=tol, bland=bland, _T_o=_T_o) + # if pseudo objective is zero, remove the last row from the tableau and + # proceed to phase 2 + if abs(T[-1, -1]) < tol: + # Remove the pseudo-objective row from the tableau + T = T[:-1, :] + # Remove the artificial variable columns from the tableau + T = np.delete(T, av, 1) + else: + # Failure to find a feasible starting point + status = 2 + nit2 = nit1 + messages[status] = ( + "Phase 1 of the simplex method failed to find a feasible " + "solution. The pseudo-objective function evaluates to {0:.1e} " + "which exceeds the required tolerance of {1} for a solution to be " + "considered 'close enough' to zero to be a basic solution. " + "Consider increasing the tolerance to be greater than {0:.1e}. " + "If this tolerance is unacceptably large the problem may be " + "infeasible.".format(abs(T[-1, -1]), tol) + ) + + if status == 0: + # Phase 2 + nit2, status = _solve_simplex(T, n, basis, maxiter=maxiter, + phase=2, callback=callback, tol=tol, + nit0=nit1, bland=bland, _T_o=_T_o) + + solution = np.zeros(n + m) + solution[basis[:n]] = T[:n, -1] + x = solution[:m] + + return x, status, messages[status], int(nit2) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_linprog_simplex.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_linprog_simplex.pyc new file mode 100644 index 0000000..e06b0a8 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_linprog_simplex.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_linprog_util.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_linprog_util.py new file mode 100644 index 0000000..702975b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_linprog_util.py @@ -0,0 +1,1328 @@ +""" +Method agnostic utility functions for linear progamming +""" + +import numpy as np +import scipy.sparse as sps +from warnings import warn +from .optimize import OptimizeWarning +from scipy.optimize._remove_redundancy import ( + _remove_redundancy, _remove_redundancy_sparse, _remove_redundancy_dense + ) + + +def _check_sparse_inputs(options, A_ub, A_eq): + """ + Check the provided ``A_ub`` and ``A_eq`` matrices conform to the specified + optional sparsity variables. + + Parameters + ---------- + A_ub : 2D array, optional + 2D array such that ``A_ub @ x`` gives the values of the upper-bound + inequality constraints at ``x``. + A_eq : 2D array, optional + 2D array such that ``A_eq @ x`` gives the values of the equality + constraints at ``x``. + options : dict + A dictionary of solver options. All methods accept the following + generic options: + + maxiter : int + Maximum number of iterations to perform. + disp : bool + Set to True to print convergence messages. + + For method-specific options, see :func:`show_options('linprog')`. + + Returns + ------- + A_ub : 2D array, optional + 2D array such that ``A_ub @ x`` gives the values of the upper-bound + inequality constraints at ``x``. + A_eq : 2D array, optional + 2D array such that ``A_eq @ x`` gives the values of the equality + constraints at ``x``. + options : dict + A dictionary of solver options. All methods accept the following + generic options: + + maxiter : int + Maximum number of iterations to perform. + disp : bool + Set to True to print convergence messages. + + For method-specific options, see :func:`show_options('linprog')`. + """ + # This is an undocumented option for unit testing sparse presolve + _sparse_presolve = options.pop('_sparse_presolve', False) + if _sparse_presolve and A_eq is not None: + A_eq = sps.coo_matrix(A_eq) + if _sparse_presolve and A_ub is not None: + A_ub = sps.coo_matrix(A_ub) + + sparse = options.get('sparse', False) + if not sparse and (sps.issparse(A_eq) or sps.issparse(A_ub)): + options['sparse'] = True + warn("Sparse constraint matrix detected; setting 'sparse':True.", + OptimizeWarning) + return options, A_ub, A_eq + + +def _clean_inputs(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None): + """ + Given user inputs for a linear programming problem, return the + objective vector, upper bound constraints, equality constraints, + and simple bounds in a preferred format. + + Parameters + ---------- + c : 1D array + Coefficients of the linear objective function to be minimized. + A_ub : 2D array, optional + 2D array such that ``A_ub @ x`` gives the values of the upper-bound + inequality constraints at ``x``. + b_ub : 1D array, optional + 1D array of values representing the upper-bound of each inequality + constraint (row) in ``A_ub``. + A_eq : 2D array, optional + 2D array such that ``A_eq @ x`` gives the values of the equality + constraints at ``x``. + b_eq : 1D array, optional + 1D array of values representing the RHS of each equality constraint + (row) in ``A_eq``. + bounds : sequence, optional + ``(min, max)`` pairs for each element in ``x``, defining + the bounds on that parameter. Use None for one of ``min`` or + ``max`` when there is no bound in that direction. By default + bounds are ``(0, None)`` (non-negative). + If a sequence containing a single tuple is provided, then ``min`` and + ``max`` will be applied to all variables in the problem. + + Returns + ------- + c : 1D array + Coefficients of the linear objective function to be minimized. + A_ub : 2D array, optional + 2D array such that ``A_ub @ x`` gives the values of the upper-bound + inequality constraints at ``x``. + b_ub : 1D array, optional + 1D array of values representing the upper-bound of each inequality + constraint (row) in ``A_ub``. + A_eq : 2D array, optional + 2D array such that ``A_eq @ x`` gives the values of the equality + constraints at ``x``. + b_eq : 1D array, optional + 1D array of values representing the RHS of each equality constraint + (row) in ``A_eq``. + bounds : sequence of tuples + ``(min, max)`` pairs for each element in ``x``, defining + the bounds on that parameter. Use None for each of ``min`` or + ``max`` when there is no bound in that direction. By default + bounds are ``(0, None)`` (non-negative). + + """ + + try: + if c is None: + raise TypeError + try: + c = np.asarray(c, dtype=float).copy().squeeze() + except BaseException: # typically a ValueError and shouldn't be, IMO + raise TypeError + if c.size == 1: + c = c.reshape((-1)) + n_x = len(c) + if n_x == 0 or len(c.shape) != 1: + raise ValueError( + "Invalid input for linprog: c should be a 1D array; it must " + "not have more than one non-singleton dimension") + if not(np.isfinite(c).all()): + raise ValueError( + "Invalid input for linprog: c must not contain values " + "inf, nan, or None") + except TypeError: + raise TypeError( + "Invalid input for linprog: c must be a 1D array of numerical " + "coefficients") + + try: + try: + if sps.issparse(A_eq) or sps.issparse(A_ub): + A_ub = sps.coo_matrix( + (0, n_x), dtype=float) if A_ub is None else sps.coo_matrix( + A_ub, dtype=float).copy() + else: + A_ub = np.zeros( + (0, n_x), dtype=float) if A_ub is None else np.asarray( + A_ub, dtype=float).copy() + except BaseException: + raise TypeError + n_ub = A_ub.shape[0] + if len(A_ub.shape) != 2 or A_ub.shape[1] != len(c): + raise ValueError( + "Invalid input for linprog: A_ub must have exactly two " + "dimensions, and the number of columns in A_ub must be " + "equal to the size of c ") + if (sps.issparse(A_ub) and not np.isfinite(A_ub.data).all() + or not sps.issparse(A_ub) and not np.isfinite(A_ub).all()): + raise ValueError( + "Invalid input for linprog: A_ub must not contain values " + "inf, nan, or None") + except TypeError: + raise TypeError( + "Invalid input for linprog: A_ub must be a numerical 2D array " + "with each row representing an upper bound inequality constraint") + + try: + try: + b_ub = np.array( + [], dtype=float) if b_ub is None else np.asarray( + b_ub, dtype=float).copy().squeeze() + except BaseException: + raise TypeError + if b_ub.size == 1: + b_ub = b_ub.reshape((-1)) + if len(b_ub.shape) != 1: + raise ValueError( + "Invalid input for linprog: b_ub should be a 1D array; it " + "must not have more than one non-singleton dimension") + if len(b_ub) != n_ub: + raise ValueError( + "Invalid input for linprog: The number of rows in A_ub must " + "be equal to the number of values in b_ub") + if not(np.isfinite(b_ub).all()): + raise ValueError( + "Invalid input for linprog: b_ub must not contain values " + "inf, nan, or None") + except TypeError: + raise TypeError( + "Invalid input for linprog: b_ub must be a 1D array of " + "numerical values, each representing the upper bound of an " + "inequality constraint (row) in A_ub") + + try: + try: + if sps.issparse(A_eq) or sps.issparse(A_ub): + A_eq = sps.coo_matrix( + (0, n_x), dtype=float) if A_eq is None else sps.coo_matrix( + A_eq, dtype=float).copy() + else: + A_eq = np.zeros( + (0, n_x), dtype=float) if A_eq is None else np.asarray( + A_eq, dtype=float).copy() + except BaseException: + raise TypeError + n_eq = A_eq.shape[0] + if len(A_eq.shape) != 2 or A_eq.shape[1] != len(c): + raise ValueError( + "Invalid input for linprog: A_eq must have exactly two " + "dimensions, and the number of columns in A_eq must be " + "equal to the size of c ") + + if (sps.issparse(A_eq) and not np.isfinite(A_eq.data).all() + or not sps.issparse(A_eq) and not np.isfinite(A_eq).all()): + raise ValueError( + "Invalid input for linprog: A_eq must not contain values " + "inf, nan, or None") + except TypeError: + raise TypeError( + "Invalid input for linprog: A_eq must be a 2D array with each " + "row representing an equality constraint") + + try: + try: + b_eq = np.array( + [], dtype=float) if b_eq is None else np.asarray( + b_eq, dtype=float).copy().squeeze() + except BaseException: + raise TypeError + if b_eq.size == 1: + b_eq = b_eq.reshape((-1)) + if len(b_eq.shape) != 1: + raise ValueError( + "Invalid input for linprog: b_eq should be a 1D array; it " + "must not have more than one non-singleton dimension") + if len(b_eq) != n_eq: + raise ValueError( + "Invalid input for linprog: the number of rows in A_eq " + "must be equal to the number of values in b_eq") + if not(np.isfinite(b_eq).all()): + raise ValueError( + "Invalid input for linprog: b_eq must not contain values " + "inf, nan, or None") + except TypeError: + raise TypeError( + "Invalid input for linprog: b_eq must be a 1D array of " + "numerical values, each representing the right hand side of an " + "equality constraints (row) in A_eq") + + # "If a sequence containing a single tuple is provided, then min and max + # will be applied to all variables in the problem." + # linprog doesn't treat this right: it didn't accept a list with one tuple + # in it + try: + if isinstance(bounds, str): + raise TypeError + if bounds is None or len(bounds) == 0: + bounds = [(0, None)] * n_x + elif len(bounds) == 1: + b = bounds[0] + if len(b) != 2: + raise ValueError( + "Invalid input for linprog: exactly one lower bound and " + "one upper bound must be specified for each element of x") + bounds = [b] * n_x + elif len(bounds) == n_x: + try: + len(bounds[0]) + except BaseException: + bounds = [(bounds[0], bounds[1])] * n_x + for i, b in enumerate(bounds): + if len(b) != 2: + raise ValueError( + "Invalid input for linprog, bound " + + str(i) + + " " + + str(b) + + ": exactly one lower bound and one upper bound must " + "be specified for each element of x") + elif (len(bounds) == 2 and np.isreal(bounds[0]) + and np.isreal(bounds[1])): + bounds = [(bounds[0], bounds[1])] * n_x + else: + raise ValueError( + "Invalid input for linprog: exactly one lower bound and one " + "upper bound must be specified for each element of x") + + clean_bounds = [] # also creates a copy so user's object isn't changed + for i, b in enumerate(bounds): + if b[0] is not None and b[1] is not None and b[0] > b[1]: + raise ValueError( + "Invalid input for linprog, bound " + + str(i) + + " " + + str(b) + + ": a lower bound must be less than or equal to the " + "corresponding upper bound") + if b[0] == np.inf: + raise ValueError( + "Invalid input for linprog, bound " + + str(i) + + " " + + str(b) + + ": infinity is not a valid lower bound") + if b[1] == -np.inf: + raise ValueError( + "Invalid input for linprog, bound " + + str(i) + + " " + + str(b) + + ": negative infinity is not a valid upper bound") + lb = float(b[0]) if b[0] is not None and b[0] != -np.inf else None + ub = float(b[1]) if b[1] is not None and b[1] != np.inf else None + clean_bounds.append((lb, ub)) + bounds = clean_bounds + except ValueError as e: + if "could not convert string to float" in e.args[0]: + raise TypeError + else: + raise e + except TypeError as e: + print(e) + raise TypeError( + "Invalid input for linprog: bounds must be a sequence of " + "(min,max) pairs, each defining bounds on an element of x ") + + return c, A_ub, b_ub, A_eq, b_eq, bounds + + +def _presolve(c, A_ub, b_ub, A_eq, b_eq, bounds, rr, tol=1e-9): + """ + Given inputs for a linear programming problem in preferred format, + presolve the problem: identify trivial infeasibilities, redundancies, + and unboundedness, tighten bounds where possible, and eliminate fixed + variables. + + Parameters + ---------- + c : 1D array + Coefficients of the linear objective function to be minimized. + A_ub : 2D array, optional + 2D array such that ``A_ub @ x`` gives the values of the upper-bound + inequality constraints at ``x``. + b_ub : 1D array, optional + 1D array of values representing the upper-bound of each inequality + constraint (row) in ``A_ub``. + A_eq : 2D array, optional + 2D array such that ``A_eq @ x`` gives the values of the equality + constraints at ``x``. + b_eq : 1D array, optional + 1D array of values representing the RHS of each equality constraint + (row) in ``A_eq``. + bounds : sequence of tuples + ``(min, max)`` pairs for each element in ``x``, defining + the bounds on that parameter. Use None for each of ``min`` or + ``max`` when there is no bound in that direction. + rr : bool + If ``True`` attempts to eliminate any redundant rows in ``A_eq``. + Set False if ``A_eq`` is known to be of full row rank, or if you are + looking for a potential speedup (at the expense of reliability). + tol : float + The tolerance which determines when a solution is "close enough" to + zero in Phase 1 to be considered a basic feasible solution or close + enough to positive to serve as an optimal solution. + + Returns + ------- + c : 1D array + Coefficients of the linear objective function to be minimized. + c0 : 1D array + Constant term in objective function due to fixed (and eliminated) + variables. + A_ub : 2D array, optional + 2D array such that ``A_ub @ x`` gives the values of the upper-bound + inequality constraints at ``x``. + b_ub : 1D array, optional + 1D array of values representing the upper-bound of each inequality + constraint (row) in ``A_ub``. + A_eq : 2D array, optional + 2D array such that ``A_eq @ x`` gives the values of the equality + constraints at ``x``. + b_eq : 1D array, optional + 1D array of values representing the RHS of each equality constraint + (row) in ``A_eq``. + bounds : sequence of tuples + ``(min, max)`` pairs for each element in ``x``, defining + the bounds on that parameter. Use None for each of ``min`` or + ``max`` when there is no bound in that direction. Bounds have been + tightened where possible. + x : 1D array + Solution vector (when the solution is trivial and can be determined + in presolve) + undo: list of tuples + (index, value) pairs that record the original index and fixed value + for each variable removed from the problem + complete: bool + Whether the solution is complete (solved or determined to be infeasible + or unbounded in presolve) + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + message : str + A string descriptor of the exit status of the optimization. + + References + ---------- + .. [5] Andersen, Erling D. "Finding all linearly dependent rows in + large-scale linear programming." Optimization Methods and Software + 6.3 (1995): 219-227. + .. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear + programming." Mathematical Programming 71.2 (1995): 221-245. + + """ + # ideas from Reference [5] by Andersen and Andersen + # however, unlike the reference, this is performed before converting + # problem to standard form + # There are a few advantages: + # * artificial variables have not been added, so matrices are smaller + # * bounds have not been converted to constraints yet. (It is better to + # do that after presolve because presolve may adjust the simple bounds.) + # There are many improvements that can be made, namely: + # * implement remaining checks from [5] + # * loop presolve until no additional changes are made + # * implement additional efficiency improvements in redundancy removal [2] + + undo = [] # record of variables eliminated from problem + # constant term in cost function may be added if variables are eliminated + c0 = 0 + complete = False # complete is True if detected infeasible/unbounded + x = np.zeros(c.shape) # this is solution vector if completed in presolve + + status = 0 # all OK unless determined otherwise + message = "" + + # Standard form for bounds (from _clean_inputs) is list of tuples + # but numpy array is more convenient here + # In retrospect, numpy array should have been the standard + bounds = np.array(bounds) + lb = bounds[:, 0] + ub = bounds[:, 1] + lb[np.equal(lb, None)] = -np.inf + ub[np.equal(ub, None)] = np.inf + bounds = bounds.astype(float) + lb = lb.astype(float) + ub = ub.astype(float) + + m_eq, n = A_eq.shape + m_ub, n = A_ub.shape + + if (sps.issparse(A_eq)): + A_eq = A_eq.tolil() + A_ub = A_ub.tolil() + + def where(A): + return A.nonzero() + + vstack = sps.vstack + else: + where = np.where + vstack = np.vstack + + # zero row in equality constraints + zero_row = np.array(np.sum(A_eq != 0, axis=1) == 0).flatten() + if np.any(zero_row): + if np.any( + np.logical_and( + zero_row, + np.abs(b_eq) > tol)): # test_zero_row_1 + # infeasible if RHS is not zero + status = 2 + message = ("The problem is (trivially) infeasible due to a row " + "of zeros in the equality constraint matrix with a " + "nonzero corresponding constraint value.") + complete = True + return (c, c0, A_ub, b_ub, A_eq, b_eq, bounds, + x, undo, complete, status, message) + else: # test_zero_row_2 + # if RHS is zero, we can eliminate this equation entirely + A_eq = A_eq[np.logical_not(zero_row), :] + b_eq = b_eq[np.logical_not(zero_row)] + + # zero row in inequality constraints + zero_row = np.array(np.sum(A_ub != 0, axis=1) == 0).flatten() + if np.any(zero_row): + if np.any(np.logical_and(zero_row, b_ub < -tol)): # test_zero_row_1 + # infeasible if RHS is less than zero (because LHS is zero) + status = 2 + message = ("The problem is (trivially) infeasible due to a row " + "of zeros in the equality constraint matrix with a " + "nonzero corresponding constraint value.") + complete = True + return (c, c0, A_ub, b_ub, A_eq, b_eq, bounds, + x, undo, complete, status, message) + else: # test_zero_row_2 + # if LHS is >= 0, we can eliminate this constraint entirely + A_ub = A_ub[np.logical_not(zero_row), :] + b_ub = b_ub[np.logical_not(zero_row)] + + # zero column in (both) constraints + # this indicates that a variable isn't constrained and can be removed + A = vstack((A_eq, A_ub)) + if A.shape[0] > 0: + zero_col = np.array(np.sum(A != 0, axis=0) == 0).flatten() + # variable will be at upper or lower bound, depending on objective + x[np.logical_and(zero_col, c < 0)] = ub[ + np.logical_and(zero_col, c < 0)] + x[np.logical_and(zero_col, c > 0)] = lb[ + np.logical_and(zero_col, c > 0)] + if np.any(np.isinf(x)): # if an unconstrained variable has no bound + status = 3 + message = ("If feasible, the problem is (trivially) unbounded " + "due to a zero column in the constraint matrices. If " + "you wish to check whether the problem is infeasible, " + "turn presolve off.") + complete = True + return (c, c0, A_ub, b_ub, A_eq, b_eq, bounds, + x, undo, complete, status, message) + # variables will equal upper/lower bounds will be removed later + lb[np.logical_and(zero_col, c < 0)] = ub[ + np.logical_and(zero_col, c < 0)] + ub[np.logical_and(zero_col, c > 0)] = lb[ + np.logical_and(zero_col, c > 0)] + + # row singleton in equality constraints + # this fixes a variable and removes the constraint + singleton_row = np.array(np.sum(A_eq != 0, axis=1) == 1).flatten() + rows = where(singleton_row)[0] + cols = where(A_eq[rows, :])[1] + if len(rows) > 0: + for row, col in zip(rows, cols): + val = b_eq[row] / A_eq[row, col] + if not lb[col] - tol <= val <= ub[col] + tol: + # infeasible if fixed value is not within bounds + status = 2 + message = ("The problem is (trivially) infeasible because a " + "singleton row in the equality constraints is " + "inconsistent with the bounds.") + complete = True + return (c, c0, A_ub, b_ub, A_eq, b_eq, bounds, + x, undo, complete, status, message) + else: + # sets upper and lower bounds at that fixed value - variable + # will be removed later + lb[col] = val + ub[col] = val + A_eq = A_eq[np.logical_not(singleton_row), :] + b_eq = b_eq[np.logical_not(singleton_row)] + + # row singleton in inequality constraints + # this indicates a simple bound and the constraint can be removed + # simple bounds may be adjusted here + # After all of the simple bound information is combined here, get_Abc will + # turn the simple bounds into constraints + singleton_row = np.array(np.sum(A_ub != 0, axis=1) == 1).flatten() + cols = where(A_ub[singleton_row, :])[1] + rows = where(singleton_row)[0] + if len(rows) > 0: + for row, col in zip(rows, cols): + val = b_ub[row] / A_ub[row, col] + if A_ub[row, col] > 0: # upper bound + if val < lb[col] - tol: # infeasible + complete = True + elif val < ub[col]: # new upper bound + ub[col] = val + else: # lower bound + if val > ub[col] + tol: # infeasible + complete = True + elif val > lb[col]: # new lower bound + lb[col] = val + if complete: + status = 2 + message = ("The problem is (trivially) infeasible because a " + "singleton row in the upper bound constraints is " + "inconsistent with the bounds.") + return (c, c0, A_ub, b_ub, A_eq, b_eq, bounds, + x, undo, complete, status, message) + A_ub = A_ub[np.logical_not(singleton_row), :] + b_ub = b_ub[np.logical_not(singleton_row)] + + # identical bounds indicate that variable can be removed + i_f = np.abs(lb - ub) < tol # indices of "fixed" variables + i_nf = np.logical_not(i_f) # indices of "not fixed" variables + + # test_bounds_equal_but_infeasible + if np.all(i_f): # if bounds define solution, check for consistency + residual = b_eq - A_eq.dot(lb) + slack = b_ub - A_ub.dot(lb) + if ((A_ub.size > 0 and np.any(slack < 0)) or + (A_eq.size > 0 and not np.allclose(residual, 0))): + status = 2 + message = ("The problem is (trivially) infeasible because the " + "bounds fix all variables to values inconsistent with " + "the constraints") + complete = True + return (c, c0, A_ub, b_ub, A_eq, b_eq, bounds, + x, undo, complete, status, message) + + ub_mod = ub + lb_mod = lb + if np.any(i_f): + c0 += c[i_f].dot(lb[i_f]) + b_eq = b_eq - A_eq[:, i_f].dot(lb[i_f]) + b_ub = b_ub - A_ub[:, i_f].dot(lb[i_f]) + c = c[i_nf] + x = x[i_nf] + A_eq = A_eq[:, i_nf] + A_ub = A_ub[:, i_nf] + # record of variables to be added back in + undo = [np.nonzero(i_f)[0], lb[i_f]] + # don't remove these entries from bounds; they'll be used later. + # but we _also_ need a version of the bounds with these removed + lb_mod = lb[i_nf] + ub_mod = ub[i_nf] + + # no constraints indicates that problem is trivial + if A_eq.size == 0 and A_ub.size == 0: + b_eq = np.array([]) + b_ub = np.array([]) + # test_empty_constraint_1 + if c.size == 0: + status = 0 + message = ("The solution was determined in presolve as there are " + "no non-trivial constraints.") + elif (np.any(np.logical_and(c < 0, ub_mod == np.inf)) or + np.any(np.logical_and(c > 0, lb_mod == -np.inf))): + # test_no_constraints() + # test_unbounded_no_nontrivial_constraints_1 + # test_unbounded_no_nontrivial_constraints_2 + status = 3 + message = ("The problem is (trivially) unbounded " + "because there are no non-trivial constraints and " + "a) at least one decision variable is unbounded " + "above and its corresponding cost is negative, or " + "b) at least one decision variable is unbounded below " + "and its corresponding cost is positive. ") + else: # test_empty_constraint_2 + status = 0 + message = ("The solution was determined in presolve as there are " + "no non-trivial constraints.") + complete = True + x[c < 0] = ub_mod[c < 0] + x[c > 0] = lb_mod[c > 0] + # where c is zero, set x to a finite bound or zero + x_zero_c = ub_mod[c == 0] + x_zero_c[np.isinf(x_zero_c)] = ub_mod[c == 0][np.isinf(x_zero_c)] + x_zero_c[np.isinf(x_zero_c)] = 0 + x[c == 0] = x_zero_c + # if this is not the last step of presolve, should convert bounds back + # to array and return here + + # *sigh* - convert bounds back to their standard form (list of tuples) + # again, in retrospect, numpy array would be standard form + lb[np.equal(lb, -np.inf)] = None + ub[np.equal(ub, np.inf)] = None + bounds = np.hstack((lb[:, np.newaxis], ub[:, np.newaxis])) + bounds = bounds.tolist() + for i, row in enumerate(bounds): + for j, col in enumerate(row): + if str( + col) == "nan": # comparing col to float("nan") and + # np.nan doesn't work. should use np.isnan + bounds[i][j] = None + + # remove redundant (linearly dependent) rows from equality constraints + n_rows_A = A_eq.shape[0] + redundancy_warning = ("A_eq does not appear to be of full row rank. To " + "improve performance, check the problem formulation " + "for redundant equality constraints.") + if (sps.issparse(A_eq)): + if rr and A_eq.size > 0: # TODO: Fast sparse rank check? + A_eq, b_eq, status, message = _remove_redundancy_sparse(A_eq, b_eq) + if A_eq.shape[0] < n_rows_A: + warn(redundancy_warning, OptimizeWarning) + if status != 0: + complete = True + return (c, c0, A_ub, b_ub, A_eq, b_eq, bounds, + x, undo, complete, status, message) + + # This is a wild guess for which redundancy removal algorithm will be + # faster. More testing would be good. + small_nullspace = 5 + if rr and A_eq.size > 0: + try: # TODO: instead use results of first SVD in _remove_redundancy + rank = np.linalg.matrix_rank(A_eq) + except Exception: # oh well, we'll have to go with _remove_redundancy_dense + rank = 0 + if rr and A_eq.size > 0 and rank < A_eq.shape[0]: + warn(redundancy_warning, OptimizeWarning) + dim_row_nullspace = A_eq.shape[0]-rank + if dim_row_nullspace <= small_nullspace: + A_eq, b_eq, status, message = _remove_redundancy(A_eq, b_eq) + if dim_row_nullspace > small_nullspace or status == 4: + A_eq, b_eq, status, message = _remove_redundancy_dense(A_eq, b_eq) + if A_eq.shape[0] < rank: + message = ("Due to numerical issues, redundant equality " + "constraints could not be removed automatically. " + "Try providing your constraint matrices as sparse " + "matrices to activate sparse presolve, try turning " + "off redundancy removal, or try turning off presolve " + "altogether.") + status = 4 + if status != 0: + complete = True + return (c, c0, A_ub, b_ub, A_eq, b_eq, bounds, + x, undo, complete, status, message) + + +def _parse_linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, options): + """ + Parse the provided linear programming problem + + ``_parse_linprog`` employs two main steps ``_check_sparse_inputs`` and + ``_clean_inputs``. ``_check_sparse_inputs`` checks for sparsity in the + provided constraints (``A_ub`` and ``A_eq) and if these match the provided + sparsity optional values. + + ``_clean inputs`` checks of the provided inputs. If no violations are + identified the objective vector, upper bound constraints, equality + constraints, and simple bounds are returned in the expected format. + + Parameters + ---------- + c : 1D array + Coefficients of the linear objective function to be minimized. + A_ub : 2D array, optional + 2D array such that ``A_ub @ x`` gives the values of the upper-bound + inequality constraints at ``x``. + b_ub : 1D array, optional + 1D array of values representing the upper-bound of each inequality + constraint (row) in ``A_ub``. + A_eq : 2D array, optional + 2D array such that ``A_eq @ x`` gives the values of the equality + constraints at ``x``. + b_eq : 1D array, optional + 1D array of values representing the RHS of each equality constraint + (row) in ``A_eq``. + bounds : sequence + ``(min, max)`` pairs for each element in ``x``, defining + the bounds on that parameter. Use None for one of ``min`` or + ``max`` when there is no bound in that direction. By default + bounds are ``(0, None)`` (non-negative). If a sequence containing a + single tuple is provided, then ``min`` and ``max`` will be applied to + all variables in the problem. + options : dict + A dictionary of solver options. All methods accept the following + generic options: + + maxiter : int + Maximum number of iterations to perform. + disp : bool + Set to True to print convergence messages. + + For method-specific options, see :func:`show_options('linprog')`. + + Returns + ------- + c : 1D array + Coefficients of the linear objective function to be minimized. + A_ub : 2D array, optional + 2D array such that ``A_ub @ x`` gives the values of the upper-bound + inequality constraints at ``x``. + b_ub : 1D array, optional + 1D array of values representing the upper-bound of each inequality + constraint (row) in ``A_ub``. + A_eq : 2D array, optional + 2D array such that ``A_eq @ x`` gives the values of the equality + constraints at ``x``. + b_eq : 1D array, optional + 1D array of values representing the RHS of each equality constraint + (row) in ``A_eq``. + bounds : sequence, optional + ``(min, max)`` pairs for each element in ``x``, defining + the bounds on that parameter. Use None for one of ``min`` or + ``max`` when there is no bound in that direction. By default + bounds are ``(0, None)`` (non-negative). + If a sequence containing a single tuple is provided, then ``min`` and + ``max`` will be applied to all variables in the problem. + options : dict, optional + A dictionary of solver options. All methods accept the following + generic options: + + maxiter : int + Maximum number of iterations to perform. + disp : bool + Set to True to print convergence messages. + + For method-specific options, see :func:`show_options('linprog')`. + + """ + if options is None: + options = {} + + solver_options = {k: v for k, v in options.items()} + solver_options, A_ub, A_eq = _check_sparse_inputs(solver_options, A_ub, A_eq) + # Convert lists to numpy arrays, etc... + c, A_ub, b_ub, A_eq, b_eq, bounds = _clean_inputs( + c, A_ub, b_ub, A_eq, b_eq, bounds) + return c, A_ub, b_ub, A_eq, b_eq, bounds, solver_options + + +def _get_Abc(c, c0=0, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None, + undo=[]): + """ + Given a linear programming problem of the form: + + Minimize:: + + c @ x + + Subject to:: + + A_ub @ x <= b_ub + A_eq @ x == b_eq + lb <= x <= ub + + where ``lb = 0`` and ``ub = None`` unless set in ``bounds``. + + Return the problem in standard form: + + Minimize:: + + c @ x + + Subject to:: + + A @ x == b + x >= 0 + + by adding slack variables and making variable substitutions as necessary. + + Parameters + ---------- + c : 1D array + Coefficients of the linear objective function to be minimized. + Components corresponding with fixed variables have been eliminated. + c0 : float + Constant term in objective function due to fixed (and eliminated) + variables. + A_ub : 2D array, optional + 2D array such that ``A_ub @ x`` gives the values of the upper-bound + inequality constraints at ``x``. + b_ub : 1D array, optional + 1D array of values representing the upper-bound of each inequality + constraint (row) in ``A_ub``. + A_eq : 2D array, optional + 2D array such that ``A_eq @ x`` gives the values of the equality + constraints at ``x``. + b_eq : 1D array, optional + 1D array of values representing the RHS of each equality constraint + (row) in ``A_eq``. + bounds : sequence of tuples + ``(min, max)`` pairs for each element in ``x``, defining + the bounds on that parameter. Use None for each of ``min`` or + ``max`` when there is no bound in that direction. Bounds have been + tightened where possible. + undo: list of tuples + (`index`, `value`) pairs that record the original index and fixed value + for each variable removed from the problem + + Returns + ------- + A : 2D array + 2D array such that ``A`` @ ``x``, gives the values of the equality + constraints at ``x``. + b : 1D array + 1D array of values representing the RHS of each equality constraint + (row) in A (for standard form problem). + c : 1D array + Coefficients of the linear objective function to be minimized (for + standard form problem). + c0 : float + Constant term in objective function due to fixed (and eliminated) + variables. + + References + ---------- + .. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear + programming." Athena Scientific 1 (1997): 997. + + """ + + if sps.issparse(A_eq): + sparse = True + A_eq = sps.lil_matrix(A_eq) + A_ub = sps.lil_matrix(A_ub) + + def hstack(blocks): + return sps.hstack(blocks, format="lil") + + def vstack(blocks): + return sps.vstack(blocks, format="lil") + + zeros = sps.lil_matrix + eye = sps.eye + else: + sparse = False + hstack = np.hstack + vstack = np.vstack + zeros = np.zeros + eye = np.eye + + fixed_x = set() + if len(undo) > 0: + # these are indices of variables removed from the problem + # however, their bounds are still part of the bounds list + fixed_x = set(undo[0]) + # they are needed elsewhere, but not here + bounds = [bounds[i] for i in range(len(bounds)) if i not in fixed_x] + # in retrospect, the standard form of bounds should have been an n x 2 + # array. maybe change it someday. + + # modify problem such that all variables have only non-negativity bounds + + bounds = np.array(bounds) + lbs = bounds[:, 0] + ubs = bounds[:, 1] + m_ub, n_ub = A_ub.shape + + lb_none = np.equal(lbs, None) + ub_none = np.equal(ubs, None) + lb_some = np.logical_not(lb_none) + ub_some = np.logical_not(ub_none) + + # if preprocessing is on, lb == ub can't happen + # if preprocessing is off, then it would be best to convert that + # to an equality constraint, but it's tricky to make the other + # required modifications from inside here. + + # unbounded below: substitute xi = -xi' (unbounded above) + l_nolb_someub = np.logical_and(lb_none, ub_some) + i_nolb = np.nonzero(l_nolb_someub)[0] + lbs[l_nolb_someub], ubs[l_nolb_someub] = ( + -ubs[l_nolb_someub], lbs[l_nolb_someub]) + lb_none = np.equal(lbs, None) + ub_none = np.equal(ubs, None) + lb_some = np.logical_not(lb_none) + ub_some = np.logical_not(ub_none) + c[i_nolb] *= -1 + if len(i_nolb) > 0: + if A_ub.shape[0] > 0: # sometimes needed for sparse arrays... weird + A_ub[:, i_nolb] *= -1 + if A_eq.shape[0] > 0: + A_eq[:, i_nolb] *= -1 + + # upper bound: add inequality constraint + i_newub = np.nonzero(ub_some)[0] + ub_newub = ubs[ub_some] + n_bounds = np.count_nonzero(ub_some) + A_ub = vstack((A_ub, zeros((n_bounds, A_ub.shape[1])))) + b_ub = np.concatenate((b_ub, np.zeros(n_bounds))) + A_ub[range(m_ub, A_ub.shape[0]), i_newub] = 1 + b_ub[m_ub:] = ub_newub + + A1 = vstack((A_ub, A_eq)) + b = np.concatenate((b_ub, b_eq)) + c = np.concatenate((c, np.zeros((A_ub.shape[0],)))) + + # unbounded: substitute xi = xi+ + xi- + l_free = np.logical_and(lb_none, ub_none) + i_free = np.nonzero(l_free)[0] + n_free = len(i_free) + A1 = hstack((A1, zeros((A1.shape[0], n_free)))) + c = np.concatenate((c, np.zeros(n_free))) + A1[:, range(n_ub, A1.shape[1])] = -A1[:, i_free] + c[np.arange(n_ub, A1.shape[1])] = -c[i_free] + + # add slack variables + A2 = vstack([eye(A_ub.shape[0]), zeros((A_eq.shape[0], A_ub.shape[0]))]) + A = hstack([A1, A2]) + + # lower bound: substitute xi = xi' + lb + # now there is a constant term in objective + i_shift = np.nonzero(lb_some)[0] + lb_shift = lbs[lb_some].astype(float) + c0 += np.sum(lb_shift * c[i_shift]) + if sparse: + b = b.reshape(-1, 1) + A = A.tocsc() + b -= (A[:, i_shift] * sps.diags(lb_shift)).sum(axis=1) + b = b.ravel() + else: + b -= (A[:, i_shift] * lb_shift).sum(axis=1) + + return A, b, c, c0 + + +def _display_summary(message, status, fun, iteration): + """ + Print the termination summary of the linear program + + Parameters + ---------- + message : str + A string descriptor of the exit status of the optimization. + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + fun : float + Value of the objective function. + iteration : iteration + The number of iterations performed. + """ + print(message) + if status in (0, 1): + print(" Current function value: {0: <12.6f}".format(fun)) + print(" Iterations: {0:d}".format(iteration)) + + +def _postsolve(x, c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None, + complete=False, undo=[], tol=1e-8): + """ + Given solution x to presolved, standard form linear program x, add + fixed variables back into the problem and undo the variable substitutions + to get solution to original linear program. Also, calculate the objective + function value, slack in original upper bound constraints, and residuals + in original equality constraints. + + Parameters + ---------- + x : 1D array + Solution vector to the standard-form problem. + c : 1D array + Original coefficients of the linear objective function to be minimized. + A_ub : 2D array, optional + 2D array such that ``A_ub @ x`` gives the values of the upper-bound + inequality constraints at ``x``. + b_ub : 1D array, optional + 1D array of values representing the upper-bound of each inequality + constraint (row) in ``A_ub``. + A_eq : 2D array, optional + 2D array such that ``A_eq @ x`` gives the values of the equality + constraints at ``x``. + b_eq : 1D array, optional + 1D array of values representing the RHS of each equality constraint + (row) in ``A_eq``. + bounds : sequence of tuples + Bounds, as modified in presolve + complete : bool + Whether the solution is was determined in presolve (``True`` if so) + undo: list of tuples + (`index`, `value`) pairs that record the original index and fixed value + for each variable removed from the problem + tol : float + Termination tolerance; see [1]_ Section 4.5. + + Returns + ------- + x : 1D array + Solution vector to original linear programming problem + fun: float + optimal objective value for original problem + slack : 1D array + The (non-negative) slack in the upper bound constraints, that is, + ``b_ub - A_ub @ x`` + con : 1D array + The (nominally zero) residuals of the equality constraints, that is, + ``b - A_eq @ x`` + lb : 1D array + The lower bound constraints on the original variables + ub: 1D array + The upper bound constraints on the original variables + """ + # note that all the inputs are the ORIGINAL, unmodified versions + # no rows, columns have been removed + # the only exception is bounds; it has been modified + # we need these modified values to undo the variable substitutions + # in retrospect, perhaps this could have been simplified if the "undo" + # variable also contained information for undoing variable substitutions + + n_x = len(c) + + # we don't have to undo variable substitutions for fixed variables that + # were removed from the problem + no_adjust = set() + + # if there were variables removed from the problem, add them back into the + # solution vector + if len(undo) > 0: + no_adjust = set(undo[0]) + x = x.tolist() + for i, val in zip(undo[0], undo[1]): + x.insert(i, val) + x = np.array(x) + + # now undo variable substitutions + # if "complete", problem was solved in presolve; don't do anything here + if not complete and bounds is not None: # bounds are never none, probably + n_unbounded = 0 + for i, b in enumerate(bounds): + if i in no_adjust: + continue + lb, ub = b + if lb is None and ub is None: + n_unbounded += 1 + x[i] = x[i] - x[n_x + n_unbounded - 1] + else: + if lb is None: + x[i] = ub - x[i] + else: + x[i] += lb + + n_x = len(c) + x = x[:n_x] # all the rest of the variables were artificial + fun = x.dot(c) + slack = b_ub - A_ub.dot(x) # report slack for ORIGINAL UB constraints + # report residuals of ORIGINAL EQ constraints + con = b_eq - A_eq.dot(x) + + # Patch for bug #8664. Detecting this sort of issue earlier + # (via abnormalities in the indicators) would be better. + bounds = np.array(bounds) # again, this should have been the standard form + lb = bounds[:, 0] + ub = bounds[:, 1] + lb[np.equal(lb, None)] = -np.inf + ub[np.equal(ub, None)] = np.inf + + return x, fun, slack, con, lb, ub + + +def _check_result(x, fun, status, slack, con, lb, ub, tol, message): + """ + Check the validity of the provided solution. + + A valid (optimal) solution satisfies all bounds, all slack variables are + negative and all equality constraint residuals are strictly non-zero. + Further, the lower-bounds, upper-bounds, slack and residuals contain + no nan values. + + Parameters + ---------- + x : 1D array + Solution vector to original linear programming problem + fun: float + optimal objective value for original problem + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + slack : 1D array + The (non-negative) slack in the upper bound constraints, that is, + ``b_ub - A_ub @ x`` + con : 1D array + The (nominally zero) residuals of the equality constraints, that is, + ``b - A_eq @ x`` + lb : 1D array + The lower bound constraints on the original variables + ub: 1D array + The upper bound constraints on the original variables + message : str + A string descriptor of the exit status of the optimization. + tol : float + Termination tolerance; see [1]_ Section 4.5. + + Returns + ------- + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + message : str + A string descriptor of the exit status of the optimization. + """ + # Somewhat arbitrary, but status 5 is very unusual + tol = np.sqrt(tol) * 10 + + contains_nans = ( + np.isnan(x).any() + or np.isnan(fun) + or np.isnan(slack).any() + or np.isnan(con).any() + ) + + if contains_nans: + is_feasible = False + else: + invalid_bounds = (x < lb - tol).any() or (x > ub + tol).any() + invalid_slack = status != 3 and (slack < -tol).any() + invalid_con = status != 3 and (np.abs(con) > tol).any() + is_feasible = not (invalid_bounds or invalid_slack or invalid_con) + + if status == 0 and not is_feasible: + status = 4 + message = ("The solution does not satisfy the constraints, yet " + "no errors were raised and there is no certificate of " + "infeasibility or unboundedness. This is known to occur " + "if the `presolve` option is False and the problem is " + "infeasible. If you encounter this under different " + "circumstances, please submit a bug report. Otherwise, " + "please enable presolve.") + elif status == 0 and contains_nans: + status = 4 + message = ("Numerical difficulties were encountered but no errors " + "were raised. This is known to occur if the 'presolve' " + "option is False, 'sparse' is True, and A_eq includes " + "redundant rows. If you encounter this under different " + "circumstances, please submit a bug report. Otherwise, " + "remove linearly dependent equations from your equality " + "constraints or enable presolve.") + elif status == 2 and is_feasible: + # Occurs if the simplex method exits after phase one with a very + # nearly basic feasible solution. Postsolving can make the solution + #basic, however, this solution is NOT optimal + raise ValueError(message) + + return status, message + + +def _postprocess(x, c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None, + complete=False, undo=[], status=0, message="", tol=1e-8, + iteration=None, disp=False): + """ + Given solution x to presolved, standard form linear program x, add + fixed variables back into the problem and undo the variable substitutions + to get solution to original linear program. Also, calculate the objective + function value, slack in original upper bound constraints, and residuals + in original equality constraints. + + Parameters + ---------- + x : 1D array + Solution vector to the standard-form problem. + c : 1D array + Original coefficients of the linear objective function to be minimized. + A_ub : 2D array, optional + 2D array such that ``A_ub @ x`` gives the values of the upper-bound + inequality constraints at ``x``. + b_ub : 1D array, optional + 1D array of values representing the upper-bound of each inequality + constraint (row) in ``A_ub``. + A_eq : 2D array, optional + 2D array such that ``A_eq @ x`` gives the values of the equality + constraints at ``x``. + b_eq : 1D array, optional + 1D array of values representing the RHS of each equality constraint + (row) in ``A_eq``. + bounds : sequence of tuples + Bounds, as modified in presolve + complete : bool + Whether the solution is was determined in presolve (``True`` if so) + undo: list of tuples + (`index`, `value`) pairs that record the original index and fixed value + for each variable removed from the problem + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + message : str + A string descriptor of the exit status of the optimization. + tol : float + Termination tolerance; see [1]_ Section 4.5. + + Returns + ------- + x : 1D array + Solution vector to original linear programming problem + fun: float + optimal objective value for original problem + slack : 1D array + The (non-negative) slack in the upper bound constraints, that is, + ``b_ub - A_ub @ x`` + con : 1D array + The (nominally zero) residuals of the equality constraints, that is, + ``b - A_eq @ x`` + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + message : str + A string descriptor of the exit status of the optimization. + + """ + + x, fun, slack, con, lb, ub = _postsolve( + x, c, A_ub, b_ub, A_eq, b_eq, + bounds, complete, undo, tol + ) + + status, message = _check_result( + x, fun, status, slack, con, + lb, ub, tol, message + ) + + if disp: + _display_summary(message, status, fun, iteration) + + return x, fun, slack, con, status, message diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_linprog_util.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_linprog_util.pyc new file mode 100644 index 0000000..1acbb01 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_linprog_util.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/__init__.py new file mode 100644 index 0000000..2068938 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/__init__.py @@ -0,0 +1,7 @@ +"""This module contains least-squares algorithms.""" +from __future__ import division, print_function, absolute_import + +from .least_squares import least_squares +from .lsq_linear import lsq_linear + +__all__ = ['least_squares', 'lsq_linear'] diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/__init__.pyc new file mode 100644 index 0000000..ef9f81b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/bvls.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/bvls.py new file mode 100644 index 0000000..ede19ac --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/bvls.py @@ -0,0 +1,180 @@ +"""Bounded-Variable Least-Squares algorithm.""" +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.linalg import norm, lstsq +from scipy.optimize import OptimizeResult + +from .common import print_header_linear, print_iteration_linear + + +def compute_kkt_optimality(g, on_bound): + """Compute the maximum violation of KKT conditions.""" + g_kkt = g * on_bound + free_set = on_bound == 0 + g_kkt[free_set] = np.abs(g[free_set]) + return np.max(g_kkt) + + +def bvls(A, b, x_lsq, lb, ub, tol, max_iter, verbose): + m, n = A.shape + + x = x_lsq.copy() + on_bound = np.zeros(n) + + mask = x < lb + x[mask] = lb[mask] + on_bound[mask] = -1 + + mask = x > ub + x[mask] = ub[mask] + on_bound[mask] = 1 + + free_set = on_bound == 0 + active_set = ~free_set + free_set, = np.nonzero(free_set) + + r = A.dot(x) - b + cost = 0.5 * np.dot(r, r) + initial_cost = cost + g = A.T.dot(r) + + cost_change = None + step_norm = None + iteration = 0 + + if verbose == 2: + print_header_linear() + + # This is the initialization loop. The requirement is that the + # least-squares solution on free variables is feasible before BVLS starts. + # One possible initialization is to set all variables to lower or upper + # bounds, but many iterations may be required from this state later on. + # The implemented ad-hoc procedure which intuitively should give a better + # initial state: find the least-squares solution on current free variables, + # if its feasible then stop, otherwise set violating variables to + # corresponding bounds and continue on the reduced set of free variables. + + while free_set.size > 0: + if verbose == 2: + optimality = compute_kkt_optimality(g, on_bound) + print_iteration_linear(iteration, cost, cost_change, step_norm, + optimality) + + iteration += 1 + x_free_old = x[free_set].copy() + + A_free = A[:, free_set] + b_free = b - A.dot(x * active_set) + z = lstsq(A_free, b_free, rcond=-1)[0] + + lbv = z < lb[free_set] + ubv = z > ub[free_set] + v = lbv | ubv + + if np.any(lbv): + ind = free_set[lbv] + x[ind] = lb[ind] + active_set[ind] = True + on_bound[ind] = -1 + + if np.any(ubv): + ind = free_set[ubv] + x[ind] = ub[ind] + active_set[ind] = True + on_bound[ind] = 1 + + ind = free_set[~v] + x[ind] = z[~v] + + r = A.dot(x) - b + cost_new = 0.5 * np.dot(r, r) + cost_change = cost - cost_new + cost = cost_new + g = A.T.dot(r) + step_norm = norm(x[free_set] - x_free_old) + + if np.any(v): + free_set = free_set[~v] + else: + break + + if max_iter is None: + max_iter = n + max_iter += iteration + + termination_status = None + + # Main BVLS loop. + + optimality = compute_kkt_optimality(g, on_bound) + for iteration in range(iteration, max_iter): + if verbose == 2: + print_iteration_linear(iteration, cost, cost_change, + step_norm, optimality) + + if optimality < tol: + termination_status = 1 + + if termination_status is not None: + break + + move_to_free = np.argmax(g * on_bound) + on_bound[move_to_free] = 0 + free_set = on_bound == 0 + active_set = ~free_set + free_set, = np.nonzero(free_set) + + x_free = x[free_set] + x_free_old = x_free.copy() + lb_free = lb[free_set] + ub_free = ub[free_set] + + A_free = A[:, free_set] + b_free = b - A.dot(x * active_set) + z = lstsq(A_free, b_free, rcond=-1)[0] + + lbv, = np.nonzero(z < lb_free) + ubv, = np.nonzero(z > ub_free) + v = np.hstack((lbv, ubv)) + + if v.size > 0: + alphas = np.hstack(( + lb_free[lbv] - x_free[lbv], + ub_free[ubv] - x_free[ubv])) / (z[v] - x_free[v]) + + i = np.argmin(alphas) + i_free = v[i] + alpha = alphas[i] + + x_free *= 1 - alpha + x_free += alpha * z + + if i < lbv.size: + on_bound[free_set[i_free]] = -1 + else: + on_bound[free_set[i_free]] = 1 + else: + x_free = z + + x[free_set] = x_free + step_norm = norm(x_free - x_free_old) + + r = A.dot(x) - b + cost_new = 0.5 * np.dot(r, r) + cost_change = cost - cost_new + + if cost_change < tol * cost: + termination_status = 2 + cost = cost_new + + g = A.T.dot(r) + optimality = compute_kkt_optimality(g, on_bound) + + if termination_status is None: + termination_status = 0 + + return OptimizeResult( + x=x, fun=r, cost=cost, optimality=optimality, active_mask=on_bound, + nit=iteration + 1, status=termination_status, + initial_cost=initial_cost) diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/bvls.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/bvls.pyc new file mode 100644 index 0000000..c81bb36 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/bvls.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/common.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/common.py new file mode 100644 index 0000000..2327d61 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/common.py @@ -0,0 +1,735 @@ +"""Functions used by least-squares algorithms.""" +from __future__ import division, print_function, absolute_import + +from math import copysign + +import numpy as np +from numpy.linalg import norm + +from scipy.linalg import cho_factor, cho_solve, LinAlgError +from scipy.sparse import issparse +from scipy.sparse.linalg import LinearOperator, aslinearoperator + + +EPS = np.finfo(float).eps + + +# Functions related to a trust-region problem. + + +def intersect_trust_region(x, s, Delta): + """Find the intersection of a line with the boundary of a trust region. + + This function solves the quadratic equation with respect to t + ||(x + s*t)||**2 = Delta**2. + + Returns + ------- + t_neg, t_pos : tuple of float + Negative and positive roots. + + Raises + ------ + ValueError + If `s` is zero or `x` is not within the trust region. + """ + a = np.dot(s, s) + if a == 0: + raise ValueError("`s` is zero.") + + b = np.dot(x, s) + + c = np.dot(x, x) - Delta**2 + if c > 0: + raise ValueError("`x` is not within the trust region.") + + d = np.sqrt(b*b - a*c) # Root from one fourth of the discriminant. + + # Computations below avoid loss of significance, see "Numerical Recipes". + q = -(b + copysign(d, b)) + t1 = q / a + t2 = c / q + + if t1 < t2: + return t1, t2 + else: + return t2, t1 + + +def solve_lsq_trust_region(n, m, uf, s, V, Delta, initial_alpha=None, + rtol=0.01, max_iter=10): + """Solve a trust-region problem arising in least-squares minimization. + + This function implements a method described by J. J. More [1]_ and used + in MINPACK, but it relies on a single SVD of Jacobian instead of series + of Cholesky decompositions. Before running this function, compute: + ``U, s, VT = svd(J, full_matrices=False)``. + + Parameters + ---------- + n : int + Number of variables. + m : int + Number of residuals. + uf : ndarray + Computed as U.T.dot(f). + s : ndarray + Singular values of J. + V : ndarray + Transpose of VT. + Delta : float + Radius of a trust region. + initial_alpha : float, optional + Initial guess for alpha, which might be available from a previous + iteration. If None, determined automatically. + rtol : float, optional + Stopping tolerance for the root-finding procedure. Namely, the + solution ``p`` will satisfy ``abs(norm(p) - Delta) < rtol * Delta``. + max_iter : int, optional + Maximum allowed number of iterations for the root-finding procedure. + + Returns + ------- + p : ndarray, shape (n,) + Found solution of a trust-region problem. + alpha : float + Positive value such that (J.T*J + alpha*I)*p = -J.T*f. + Sometimes called Levenberg-Marquardt parameter. + n_iter : int + Number of iterations made by root-finding procedure. Zero means + that Gauss-Newton step was selected as the solution. + + References + ---------- + .. [1] More, J. J., "The Levenberg-Marquardt Algorithm: Implementation + and Theory," Numerical Analysis, ed. G. A. Watson, Lecture Notes + in Mathematics 630, Springer Verlag, pp. 105-116, 1977. + """ + def phi_and_derivative(alpha, suf, s, Delta): + """Function of which to find zero. + + It is defined as "norm of regularized (by alpha) least-squares + solution minus `Delta`". Refer to [1]_. + """ + denom = s**2 + alpha + p_norm = norm(suf / denom) + phi = p_norm - Delta + phi_prime = -np.sum(suf ** 2 / denom**3) / p_norm + return phi, phi_prime + + suf = s * uf + + # Check if J has full rank and try Gauss-Newton step. + if m >= n: + threshold = EPS * m * s[0] + full_rank = s[-1] > threshold + else: + full_rank = False + + if full_rank: + p = -V.dot(uf / s) + if norm(p) <= Delta: + return p, 0.0, 0 + + alpha_upper = norm(suf) / Delta + + if full_rank: + phi, phi_prime = phi_and_derivative(0.0, suf, s, Delta) + alpha_lower = -phi / phi_prime + else: + alpha_lower = 0.0 + + if initial_alpha is None or not full_rank and initial_alpha == 0: + alpha = max(0.001 * alpha_upper, (alpha_lower * alpha_upper)**0.5) + else: + alpha = initial_alpha + + for it in range(max_iter): + if alpha < alpha_lower or alpha > alpha_upper: + alpha = max(0.001 * alpha_upper, (alpha_lower * alpha_upper)**0.5) + + phi, phi_prime = phi_and_derivative(alpha, suf, s, Delta) + + if phi < 0: + alpha_upper = alpha + + ratio = phi / phi_prime + alpha_lower = max(alpha_lower, alpha - ratio) + alpha -= (phi + Delta) * ratio / Delta + + if np.abs(phi) < rtol * Delta: + break + + p = -V.dot(suf / (s**2 + alpha)) + + # Make the norm of p equal to Delta, p is changed only slightly during + # this. It is done to prevent p lie outside the trust region (which can + # cause problems later). + p *= Delta / norm(p) + + return p, alpha, it + 1 + + +def solve_trust_region_2d(B, g, Delta): + """Solve a general trust-region problem in 2 dimensions. + + The problem is reformulated as a 4-th order algebraic equation, + the solution of which is found by numpy.roots. + + Parameters + ---------- + B : ndarray, shape (2, 2) + Symmetric matrix, defines a quadratic term of the function. + g : ndarray, shape (2,) + Defines a linear term of the function. + Delta : float + Radius of a trust region. + + Returns + ------- + p : ndarray, shape (2,) + Found solution. + newton_step : bool + Whether the returned solution is the Newton step which lies within + the trust region. + """ + try: + R, lower = cho_factor(B) + p = -cho_solve((R, lower), g) + if np.dot(p, p) <= Delta**2: + return p, True + except LinAlgError: + pass + + a = B[0, 0] * Delta**2 + b = B[0, 1] * Delta**2 + c = B[1, 1] * Delta**2 + + d = g[0] * Delta + f = g[1] * Delta + + coeffs = np.array( + [-b + d, 2 * (a - c + f), 6 * b, 2 * (-a + c + f), -b - d]) + t = np.roots(coeffs) # Can handle leading zeros. + t = np.real(t[np.isreal(t)]) + + p = Delta * np.vstack((2 * t / (1 + t**2), (1 - t**2) / (1 + t**2))) + value = 0.5 * np.sum(p * B.dot(p), axis=0) + np.dot(g, p) + i = np.argmin(value) + p = p[:, i] + + return p, False + + +def update_tr_radius(Delta, actual_reduction, predicted_reduction, + step_norm, bound_hit): + """Update the radius of a trust region based on the cost reduction. + + Returns + ------- + Delta : float + New radius. + ratio : float + Ratio between actual and predicted reductions. Zero if predicted + reduction is zero. + """ + if predicted_reduction > 0: + ratio = actual_reduction / predicted_reduction + else: + ratio = 0 + + if ratio < 0.25: + Delta = 0.25 * step_norm + elif ratio > 0.75 and bound_hit: + Delta *= 2.0 + + return Delta, ratio + + +# Construction and minimization of quadratic functions. + + +def build_quadratic_1d(J, g, s, diag=None, s0=None): + """Parameterize a multivariate quadratic function along a line. + + The resulting univariate quadratic function is given as follows: + :: + f(t) = 0.5 * (s0 + s*t).T * (J.T*J + diag) * (s0 + s*t) + + g.T * (s0 + s*t) + + Parameters + ---------- + J : ndarray, sparse matrix or LinearOperator shape (m, n) + Jacobian matrix, affects the quadratic term. + g : ndarray, shape (n,) + Gradient, defines the linear term. + s : ndarray, shape (n,) + Direction vector of a line. + diag : None or ndarray with shape (n,), optional + Addition diagonal part, affects the quadratic term. + If None, assumed to be 0. + s0 : None or ndarray with shape (n,), optional + Initial point. If None, assumed to be 0. + + Returns + ------- + a : float + Coefficient for t**2. + b : float + Coefficient for t. + c : float + Free term. Returned only if `s0` is provided. + """ + v = J.dot(s) + a = np.dot(v, v) + if diag is not None: + a += np.dot(s * diag, s) + a *= 0.5 + + b = np.dot(g, s) + + if s0 is not None: + u = J.dot(s0) + b += np.dot(u, v) + c = 0.5 * np.dot(u, u) + np.dot(g, s0) + if diag is not None: + b += np.dot(s0 * diag, s) + c += 0.5 * np.dot(s0 * diag, s0) + return a, b, c + else: + return a, b + + +def minimize_quadratic_1d(a, b, lb, ub, c=0): + """Minimize a 1-d quadratic function subject to bounds. + + The free term `c` is 0 by default. Bounds must be finite. + + Returns + ------- + t : float + Minimum point. + y : float + Minimum value. + """ + t = [lb, ub] + if a != 0: + extremum = -0.5 * b / a + if lb < extremum < ub: + t.append(extremum) + t = np.asarray(t) + y = a * t**2 + b * t + c + min_index = np.argmin(y) + return t[min_index], y[min_index] + + +def evaluate_quadratic(J, g, s, diag=None): + """Compute values of a quadratic function arising in least squares. + + The function is 0.5 * s.T * (J.T * J + diag) * s + g.T * s. + + Parameters + ---------- + J : ndarray, sparse matrix or LinearOperator, shape (m, n) + Jacobian matrix, affects the quadratic term. + g : ndarray, shape (n,) + Gradient, defines the linear term. + s : ndarray, shape (k, n) or (n,) + Array containing steps as rows. + diag : ndarray, shape (n,), optional + Addition diagonal part, affects the quadratic term. + If None, assumed to be 0. + + Returns + ------- + values : ndarray with shape (k,) or float + Values of the function. If `s` was 2-dimensional then ndarray is + returned, otherwise float is returned. + """ + if s.ndim == 1: + Js = J.dot(s) + q = np.dot(Js, Js) + if diag is not None: + q += np.dot(s * diag, s) + else: + Js = J.dot(s.T) + q = np.sum(Js**2, axis=0) + if diag is not None: + q += np.sum(diag * s**2, axis=1) + + l = np.dot(s, g) + + return 0.5 * q + l + + +# Utility functions to work with bound constraints. + + +def in_bounds(x, lb, ub): + """Check if a point lies within bounds.""" + return np.all((x >= lb) & (x <= ub)) + + +def step_size_to_bound(x, s, lb, ub): + """Compute a min_step size required to reach a bound. + + The function computes a positive scalar t, such that x + s * t is on + the bound. + + Returns + ------- + step : float + Computed step. Non-negative value. + hits : ndarray of int with shape of x + Each element indicates whether a corresponding variable reaches the + bound: + + * 0 - the bound was not hit. + * -1 - the lower bound was hit. + * 1 - the upper bound was hit. + """ + non_zero = np.nonzero(s) + s_non_zero = s[non_zero] + steps = np.empty_like(x) + steps.fill(np.inf) + with np.errstate(over='ignore'): + steps[non_zero] = np.maximum((lb - x)[non_zero] / s_non_zero, + (ub - x)[non_zero] / s_non_zero) + min_step = np.min(steps) + return min_step, np.equal(steps, min_step) * np.sign(s).astype(int) + + +def find_active_constraints(x, lb, ub, rtol=1e-10): + """Determine which constraints are active in a given point. + + The threshold is computed using `rtol` and the absolute value of the + closest bound. + + Returns + ------- + active : ndarray of int with shape of x + Each component shows whether the corresponding constraint is active: + + * 0 - a constraint is not active. + * -1 - a lower bound is active. + * 1 - a upper bound is active. + """ + active = np.zeros_like(x, dtype=int) + + if rtol == 0: + active[x <= lb] = -1 + active[x >= ub] = 1 + return active + + lower_dist = x - lb + upper_dist = ub - x + + lower_threshold = rtol * np.maximum(1, np.abs(lb)) + upper_threshold = rtol * np.maximum(1, np.abs(ub)) + + lower_active = (np.isfinite(lb) & + (lower_dist <= np.minimum(upper_dist, lower_threshold))) + active[lower_active] = -1 + + upper_active = (np.isfinite(ub) & + (upper_dist <= np.minimum(lower_dist, upper_threshold))) + active[upper_active] = 1 + + return active + + +def make_strictly_feasible(x, lb, ub, rstep=1e-10): + """Shift a point to the interior of a feasible region. + + Each element of the returned vector is at least at a relative distance + `rstep` from the closest bound. If ``rstep=0`` then `np.nextafter` is used. + """ + x_new = x.copy() + + active = find_active_constraints(x, lb, ub, rstep) + lower_mask = np.equal(active, -1) + upper_mask = np.equal(active, 1) + + if rstep == 0: + x_new[lower_mask] = np.nextafter(lb[lower_mask], ub[lower_mask]) + x_new[upper_mask] = np.nextafter(ub[upper_mask], lb[upper_mask]) + else: + x_new[lower_mask] = (lb[lower_mask] + + rstep * np.maximum(1, np.abs(lb[lower_mask]))) + x_new[upper_mask] = (ub[upper_mask] - + rstep * np.maximum(1, np.abs(ub[upper_mask]))) + + tight_bounds = (x_new < lb) | (x_new > ub) + x_new[tight_bounds] = 0.5 * (lb[tight_bounds] + ub[tight_bounds]) + + return x_new + + +def CL_scaling_vector(x, g, lb, ub): + """Compute Coleman-Li scaling vector and its derivatives. + + Components of a vector v are defined as follows: + :: + | ub[i] - x[i], if g[i] < 0 and ub[i] < np.inf + v[i] = | x[i] - lb[i], if g[i] > 0 and lb[i] > -np.inf + | 1, otherwise + + According to this definition v[i] >= 0 for all i. It differs from the + definition in paper [1]_ (eq. (2.2)), where the absolute value of v is + used. Both definitions are equivalent down the line. + Derivatives of v with respect to x take value 1, -1 or 0 depending on a + case. + + Returns + ------- + v : ndarray with shape of x + Scaling vector. + dv : ndarray with shape of x + Derivatives of v[i] with respect to x[i], diagonal elements of v's + Jacobian. + + References + ---------- + .. [1] M.A. Branch, T.F. Coleman, and Y. Li, "A Subspace, Interior, + and Conjugate Gradient Method for Large-Scale Bound-Constrained + Minimization Problems," SIAM Journal on Scientific Computing, + Vol. 21, Number 1, pp 1-23, 1999. + """ + v = np.ones_like(x) + dv = np.zeros_like(x) + + mask = (g < 0) & np.isfinite(ub) + v[mask] = ub[mask] - x[mask] + dv[mask] = -1 + + mask = (g > 0) & np.isfinite(lb) + v[mask] = x[mask] - lb[mask] + dv[mask] = 1 + + return v, dv + + +def reflective_transformation(y, lb, ub): + """Compute reflective transformation and its gradient.""" + if in_bounds(y, lb, ub): + return y, np.ones_like(y) + + lb_finite = np.isfinite(lb) + ub_finite = np.isfinite(ub) + + x = y.copy() + g_negative = np.zeros_like(y, dtype=bool) + + mask = lb_finite & ~ub_finite + x[mask] = np.maximum(y[mask], 2 * lb[mask] - y[mask]) + g_negative[mask] = y[mask] < lb[mask] + + mask = ~lb_finite & ub_finite + x[mask] = np.minimum(y[mask], 2 * ub[mask] - y[mask]) + g_negative[mask] = y[mask] > ub[mask] + + mask = lb_finite & ub_finite + d = ub - lb + t = np.remainder(y[mask] - lb[mask], 2 * d[mask]) + x[mask] = lb[mask] + np.minimum(t, 2 * d[mask] - t) + g_negative[mask] = t > d[mask] + + g = np.ones_like(y) + g[g_negative] = -1 + + return x, g + + +# Functions to display algorithm's progress. + + +def print_header_nonlinear(): + print("{0:^15}{1:^15}{2:^15}{3:^15}{4:^15}{5:^15}" + .format("Iteration", "Total nfev", "Cost", "Cost reduction", + "Step norm", "Optimality")) + + +def print_iteration_nonlinear(iteration, nfev, cost, cost_reduction, + step_norm, optimality): + if cost_reduction is None: + cost_reduction = " " * 15 + else: + cost_reduction = "{0:^15.2e}".format(cost_reduction) + + if step_norm is None: + step_norm = " " * 15 + else: + step_norm = "{0:^15.2e}".format(step_norm) + + print("{0:^15}{1:^15}{2:^15.4e}{3}{4}{5:^15.2e}" + .format(iteration, nfev, cost, cost_reduction, + step_norm, optimality)) + + +def print_header_linear(): + print("{0:^15}{1:^15}{2:^15}{3:^15}{4:^15}" + .format("Iteration", "Cost", "Cost reduction", "Step norm", + "Optimality")) + + +def print_iteration_linear(iteration, cost, cost_reduction, step_norm, + optimality): + if cost_reduction is None: + cost_reduction = " " * 15 + else: + cost_reduction = "{0:^15.2e}".format(cost_reduction) + + if step_norm is None: + step_norm = " " * 15 + else: + step_norm = "{0:^15.2e}".format(step_norm) + + print("{0:^15}{1:^15.4e}{2}{3}{4:^15.2e}".format( + iteration, cost, cost_reduction, step_norm, optimality)) + + +# Simple helper functions. + + +def compute_grad(J, f): + """Compute gradient of the least-squares cost function.""" + if isinstance(J, LinearOperator): + return J.rmatvec(f) + else: + return J.T.dot(f) + + +def compute_jac_scale(J, scale_inv_old=None): + """Compute variables scale based on the Jacobian matrix.""" + if issparse(J): + scale_inv = np.asarray(J.power(2).sum(axis=0)).ravel()**0.5 + else: + scale_inv = np.sum(J**2, axis=0)**0.5 + + if scale_inv_old is None: + scale_inv[scale_inv == 0] = 1 + else: + scale_inv = np.maximum(scale_inv, scale_inv_old) + + return 1 / scale_inv, scale_inv + + +def left_multiplied_operator(J, d): + """Return diag(d) J as LinearOperator.""" + J = aslinearoperator(J) + + def matvec(x): + return d * J.matvec(x) + + def matmat(X): + return d[:, np.newaxis] * J.matmat(X) + + def rmatvec(x): + return J.rmatvec(x.ravel() * d) + + return LinearOperator(J.shape, matvec=matvec, matmat=matmat, + rmatvec=rmatvec) + + +def right_multiplied_operator(J, d): + """Return J diag(d) as LinearOperator.""" + J = aslinearoperator(J) + + def matvec(x): + return J.matvec(np.ravel(x) * d) + + def matmat(X): + return J.matmat(X * d[:, np.newaxis]) + + def rmatvec(x): + return d * J.rmatvec(x) + + return LinearOperator(J.shape, matvec=matvec, matmat=matmat, + rmatvec=rmatvec) + + +def regularized_lsq_operator(J, diag): + """Return a matrix arising in regularized least squares as LinearOperator. + + The matrix is + [ J ] + [ D ] + where D is diagonal matrix with elements from `diag`. + """ + J = aslinearoperator(J) + m, n = J.shape + + def matvec(x): + return np.hstack((J.matvec(x), diag * x)) + + def rmatvec(x): + x1 = x[:m] + x2 = x[m:] + return J.rmatvec(x1) + diag * x2 + + return LinearOperator((m + n, n), matvec=matvec, rmatvec=rmatvec) + + +def right_multiply(J, d, copy=True): + """Compute J diag(d). + + If `copy` is False, `J` is modified in place (unless being LinearOperator). + """ + if copy and not isinstance(J, LinearOperator): + J = J.copy() + + if issparse(J): + J.data *= d.take(J.indices, mode='clip') # scikit-learn recipe. + elif isinstance(J, LinearOperator): + J = right_multiplied_operator(J, d) + else: + J *= d + + return J + + +def left_multiply(J, d, copy=True): + """Compute diag(d) J. + + If `copy` is False, `J` is modified in place (unless being LinearOperator). + """ + if copy and not isinstance(J, LinearOperator): + J = J.copy() + + if issparse(J): + J.data *= np.repeat(d, np.diff(J.indptr)) # scikit-learn recipe. + elif isinstance(J, LinearOperator): + J = left_multiplied_operator(J, d) + else: + J *= d[:, np.newaxis] + + return J + + +def check_termination(dF, F, dx_norm, x_norm, ratio, ftol, xtol): + """Check termination condition for nonlinear least squares.""" + ftol_satisfied = dF < ftol * F and ratio > 0.25 + xtol_satisfied = dx_norm < xtol * (xtol + x_norm) + + if ftol_satisfied and xtol_satisfied: + return 4 + elif ftol_satisfied: + return 2 + elif xtol_satisfied: + return 3 + else: + return None + + +def scale_for_robust_loss_function(J, f, rho): + """Scale Jacobian and residuals for a robust loss function. + + Arrays are modified in place. + """ + J_scale = rho[1] + 2 * rho[2] * f**2 + J_scale[J_scale < EPS] = EPS + J_scale **= 0.5 + + f *= rho[1] / J_scale + + return left_multiply(J, J_scale, copy=False), f diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/common.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/common.pyc new file mode 100644 index 0000000..48875fa Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/common.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/dogbox.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/dogbox.py new file mode 100644 index 0000000..05981ef --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/dogbox.py @@ -0,0 +1,332 @@ +""" +dogleg algorithm with rectangular trust regions for least-squares minimization. + +The description of the algorithm can be found in [Voglis]_. The algorithm does +trust-region iterations, but the shape of trust regions is rectangular as +opposed to conventional elliptical. The intersection of a trust region and +an initial feasible region is again some rectangle. Thus on each iteration a +bound-constrained quadratic optimization problem is solved. + +A quadratic problem is solved by well-known dogleg approach, where the +function is minimized along piecewise-linear "dogleg" path [NumOpt]_, +Chapter 4. If Jacobian is not rank-deficient then the function is decreasing +along this path, and optimization amounts to simply following along this +path as long as a point stays within the bounds. A constrained Cauchy step +(along the anti-gradient) is considered for safety in rank deficient cases, +in this situations the convergence might be slow. + +If during iterations some variable hit the initial bound and the component +of anti-gradient points outside the feasible region, then a next dogleg step +won't make any progress. At this state such variables satisfy first-order +optimality conditions and they are excluded before computing a next dogleg +step. + +Gauss-Newton step can be computed exactly by `numpy.linalg.lstsq` (for dense +Jacobian matrices) or by iterative procedure `scipy.sparse.linalg.lsmr` (for +dense and sparse matrices, or Jacobian being LinearOperator). The second +option allows to solve very large problems (up to couple of millions of +residuals on a regular PC), provided the Jacobian matrix is sufficiently +sparse. But note that dogbox is not very good for solving problems with +large number of constraints, because of variables exclusion-inclusion on each +iteration (a required number of function evaluations might be high or accuracy +of a solution will be poor), thus its large-scale usage is probably limited +to unconstrained problems. + +References +---------- +.. [Voglis] C. Voglis and I. E. Lagaris, "A Rectangular Trust Region Dogleg + Approach for Unconstrained and Bound Constrained Nonlinear + Optimization", WSEAS International Conference on Applied + Mathematics, Corfu, Greece, 2004. +.. [NumOpt] J. Nocedal and S. J. Wright, "Numerical optimization, 2nd edition". +""" +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.linalg import lstsq, norm + +from scipy.sparse.linalg import LinearOperator, aslinearoperator, lsmr +from scipy.optimize import OptimizeResult +from scipy._lib.six import string_types + +from .common import ( + step_size_to_bound, in_bounds, update_tr_radius, evaluate_quadratic, + build_quadratic_1d, minimize_quadratic_1d, compute_grad, + compute_jac_scale, check_termination, scale_for_robust_loss_function, + print_header_nonlinear, print_iteration_nonlinear) + + +def lsmr_operator(Jop, d, active_set): + """Compute LinearOperator to use in LSMR by dogbox algorithm. + + `active_set` mask is used to excluded active variables from computations + of matrix-vector products. + """ + m, n = Jop.shape + + def matvec(x): + x_free = x.ravel().copy() + x_free[active_set] = 0 + return Jop.matvec(x * d) + + def rmatvec(x): + r = d * Jop.rmatvec(x) + r[active_set] = 0 + return r + + return LinearOperator((m, n), matvec=matvec, rmatvec=rmatvec, dtype=float) + + +def find_intersection(x, tr_bounds, lb, ub): + """Find intersection of trust-region bounds and initial bounds. + + Returns + ------- + lb_total, ub_total : ndarray with shape of x + Lower and upper bounds of the intersection region. + orig_l, orig_u : ndarray of bool with shape of x + True means that an original bound is taken as a corresponding bound + in the intersection region. + tr_l, tr_u : ndarray of bool with shape of x + True means that a trust-region bound is taken as a corresponding bound + in the intersection region. + """ + lb_centered = lb - x + ub_centered = ub - x + + lb_total = np.maximum(lb_centered, -tr_bounds) + ub_total = np.minimum(ub_centered, tr_bounds) + + orig_l = np.equal(lb_total, lb_centered) + orig_u = np.equal(ub_total, ub_centered) + + tr_l = np.equal(lb_total, -tr_bounds) + tr_u = np.equal(ub_total, tr_bounds) + + return lb_total, ub_total, orig_l, orig_u, tr_l, tr_u + + +def dogleg_step(x, newton_step, g, a, b, tr_bounds, lb, ub): + """Find dogleg step in a rectangular region. + + Returns + ------- + step : ndarray, shape (n,) + Computed dogleg step. + bound_hits : ndarray of int, shape (n,) + Each component shows whether a corresponding variable hits the + initial bound after the step is taken: + * 0 - a variable doesn't hit the bound. + * -1 - lower bound is hit. + * 1 - upper bound is hit. + tr_hit : bool + Whether the step hit the boundary of the trust-region. + """ + lb_total, ub_total, orig_l, orig_u, tr_l, tr_u = find_intersection( + x, tr_bounds, lb, ub + ) + bound_hits = np.zeros_like(x, dtype=int) + + if in_bounds(newton_step, lb_total, ub_total): + return newton_step, bound_hits, False + + to_bounds, _ = step_size_to_bound(np.zeros_like(x), -g, lb_total, ub_total) + + # The classical dogleg algorithm would check if Cauchy step fits into + # the bounds, and just return it constrained version if not. But in a + # rectangular trust region it makes sense to try to improve constrained + # Cauchy step too. Thus we don't distinguish these two cases. + + cauchy_step = -minimize_quadratic_1d(a, b, 0, to_bounds)[0] * g + + step_diff = newton_step - cauchy_step + step_size, hits = step_size_to_bound(cauchy_step, step_diff, + lb_total, ub_total) + bound_hits[(hits < 0) & orig_l] = -1 + bound_hits[(hits > 0) & orig_u] = 1 + tr_hit = np.any((hits < 0) & tr_l | (hits > 0) & tr_u) + + return cauchy_step + step_size * step_diff, bound_hits, tr_hit + + +def dogbox(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale, + loss_function, tr_solver, tr_options, verbose): + f = f0 + f_true = f.copy() + nfev = 1 + + J = J0 + njev = 1 + + if loss_function is not None: + rho = loss_function(f) + cost = 0.5 * np.sum(rho[0]) + J, f = scale_for_robust_loss_function(J, f, rho) + else: + cost = 0.5 * np.dot(f, f) + + g = compute_grad(J, f) + + jac_scale = isinstance(x_scale, string_types) and x_scale == 'jac' + if jac_scale: + scale, scale_inv = compute_jac_scale(J) + else: + scale, scale_inv = x_scale, 1 / x_scale + + Delta = norm(x0 * scale_inv, ord=np.inf) + if Delta == 0: + Delta = 1.0 + + on_bound = np.zeros_like(x0, dtype=int) + on_bound[np.equal(x0, lb)] = -1 + on_bound[np.equal(x0, ub)] = 1 + + x = x0 + step = np.empty_like(x0) + + if max_nfev is None: + max_nfev = x0.size * 100 + + termination_status = None + iteration = 0 + step_norm = None + actual_reduction = None + + if verbose == 2: + print_header_nonlinear() + + while True: + active_set = on_bound * g < 0 + free_set = ~active_set + + g_free = g[free_set] + g_full = g.copy() + g[active_set] = 0 + + g_norm = norm(g, ord=np.inf) + if g_norm < gtol: + termination_status = 1 + + if verbose == 2: + print_iteration_nonlinear(iteration, nfev, cost, actual_reduction, + step_norm, g_norm) + + if termination_status is not None or nfev == max_nfev: + break + + x_free = x[free_set] + lb_free = lb[free_set] + ub_free = ub[free_set] + scale_free = scale[free_set] + + # Compute (Gauss-)Newton and build quadratic model for Cauchy step. + if tr_solver == 'exact': + J_free = J[:, free_set] + newton_step = lstsq(J_free, -f, rcond=-1)[0] + + # Coefficients for the quadratic model along the anti-gradient. + a, b = build_quadratic_1d(J_free, g_free, -g_free) + elif tr_solver == 'lsmr': + Jop = aslinearoperator(J) + + # We compute lsmr step in scaled variables and then + # transform back to normal variables, if lsmr would give exact lsq + # solution this would be equivalent to not doing any + # transformations, but from experience it's better this way. + + # We pass active_set to make computations as if we selected + # the free subset of J columns, but without actually doing any + # slicing, which is expensive for sparse matrices and impossible + # for LinearOperator. + + lsmr_op = lsmr_operator(Jop, scale, active_set) + newton_step = -lsmr(lsmr_op, f, **tr_options)[0][free_set] + newton_step *= scale_free + + # Components of g for active variables were zeroed, so this call + # is correct and equivalent to using J_free and g_free. + a, b = build_quadratic_1d(Jop, g, -g) + + actual_reduction = -1.0 + while actual_reduction <= 0 and nfev < max_nfev: + tr_bounds = Delta * scale_free + + step_free, on_bound_free, tr_hit = dogleg_step( + x_free, newton_step, g_free, a, b, tr_bounds, lb_free, ub_free) + + step.fill(0.0) + step[free_set] = step_free + + if tr_solver == 'exact': + predicted_reduction = -evaluate_quadratic(J_free, g_free, + step_free) + elif tr_solver == 'lsmr': + predicted_reduction = -evaluate_quadratic(Jop, g, step) + + x_new = x + step + f_new = fun(x_new) + nfev += 1 + + step_h_norm = norm(step * scale_inv, ord=np.inf) + + if not np.all(np.isfinite(f_new)): + Delta = 0.25 * step_h_norm + continue + + # Usual trust-region step quality estimation. + if loss_function is not None: + cost_new = loss_function(f_new, cost_only=True) + else: + cost_new = 0.5 * np.dot(f_new, f_new) + actual_reduction = cost - cost_new + + Delta, ratio = update_tr_radius( + Delta, actual_reduction, predicted_reduction, + step_h_norm, tr_hit + ) + + step_norm = norm(step) + termination_status = check_termination( + actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol) + + if termination_status is not None: + break + + if actual_reduction > 0: + on_bound[free_set] = on_bound_free + + x = x_new + # Set variables exactly at the boundary. + mask = on_bound == -1 + x[mask] = lb[mask] + mask = on_bound == 1 + x[mask] = ub[mask] + + f = f_new + f_true = f.copy() + + cost = cost_new + + J = jac(x, f) + njev += 1 + + if loss_function is not None: + rho = loss_function(f) + J, f = scale_for_robust_loss_function(J, f, rho) + + g = compute_grad(J, f) + + if jac_scale: + scale, scale_inv = compute_jac_scale(J, scale_inv) + else: + step_norm = 0 + actual_reduction = 0 + + iteration += 1 + + if termination_status is None: + termination_status = 0 + + return OptimizeResult( + x=x, cost=cost, fun=f_true, jac=J, grad=g_full, optimality=g_norm, + active_mask=on_bound, nfev=nfev, njev=njev, status=termination_status) diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/dogbox.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/dogbox.pyc new file mode 100644 index 0000000..3ce0cac Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/dogbox.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/givens_elimination.so b/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/givens_elimination.so new file mode 100755 index 0000000..d2a6b74 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/givens_elimination.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/least_squares.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/least_squares.py new file mode 100644 index 0000000..6433a1e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/least_squares.py @@ -0,0 +1,930 @@ +"""Generic interface for least-square minimization.""" +from __future__ import division, print_function, absolute_import + +from warnings import warn + +import numpy as np +from numpy.linalg import norm + +from scipy.sparse import issparse, csr_matrix +from scipy.sparse.linalg import LinearOperator +from scipy.optimize import _minpack, OptimizeResult +from scipy.optimize._numdiff import approx_derivative, group_columns +from scipy._lib.six import string_types + +from .trf import trf +from .dogbox import dogbox +from .common import EPS, in_bounds, make_strictly_feasible + + +TERMINATION_MESSAGES = { + -1: "Improper input parameters status returned from `leastsq`", + 0: "The maximum number of function evaluations is exceeded.", + 1: "`gtol` termination condition is satisfied.", + 2: "`ftol` termination condition is satisfied.", + 3: "`xtol` termination condition is satisfied.", + 4: "Both `ftol` and `xtol` termination conditions are satisfied." +} + + +FROM_MINPACK_TO_COMMON = { + 0: -1, # Improper input parameters from MINPACK. + 1: 2, + 2: 3, + 3: 4, + 4: 1, + 5: 0 + # There are 6, 7, 8 for too small tolerance parameters, + # but we guard against it by checking ftol, xtol, gtol beforehand. +} + + +def call_minpack(fun, x0, jac, ftol, xtol, gtol, max_nfev, x_scale, diff_step): + n = x0.size + + if diff_step is None: + epsfcn = EPS + else: + epsfcn = diff_step**2 + + # Compute MINPACK's `diag`, which is inverse of our `x_scale` and + # ``x_scale='jac'`` corresponds to ``diag=None``. + if isinstance(x_scale, string_types) and x_scale == 'jac': + diag = None + else: + diag = 1 / x_scale + + full_output = True + col_deriv = False + factor = 100.0 + + if jac is None: + if max_nfev is None: + # n squared to account for Jacobian evaluations. + max_nfev = 100 * n * (n + 1) + x, info, status = _minpack._lmdif( + fun, x0, (), full_output, ftol, xtol, gtol, + max_nfev, epsfcn, factor, diag) + else: + if max_nfev is None: + max_nfev = 100 * n + x, info, status = _minpack._lmder( + fun, jac, x0, (), full_output, col_deriv, + ftol, xtol, gtol, max_nfev, factor, diag) + + f = info['fvec'] + + if callable(jac): + J = jac(x) + else: + J = np.atleast_2d(approx_derivative(fun, x)) + + cost = 0.5 * np.dot(f, f) + g = J.T.dot(f) + g_norm = norm(g, ord=np.inf) + + nfev = info['nfev'] + njev = info.get('njev', None) + + status = FROM_MINPACK_TO_COMMON[status] + active_mask = np.zeros_like(x0, dtype=int) + + return OptimizeResult( + x=x, cost=cost, fun=f, jac=J, grad=g, optimality=g_norm, + active_mask=active_mask, nfev=nfev, njev=njev, status=status) + + +def prepare_bounds(bounds, n): + lb, ub = [np.asarray(b, dtype=float) for b in bounds] + if lb.ndim == 0: + lb = np.resize(lb, n) + + if ub.ndim == 0: + ub = np.resize(ub, n) + + return lb, ub + + +def check_tolerance(ftol, xtol, gtol): + message = "{} is too low, setting to machine epsilon {}." + if ftol < EPS: + warn(message.format("`ftol`", EPS)) + ftol = EPS + if xtol < EPS: + warn(message.format("`xtol`", EPS)) + xtol = EPS + if gtol < EPS: + warn(message.format("`gtol`", EPS)) + gtol = EPS + + return ftol, xtol, gtol + + +def check_x_scale(x_scale, x0): + if isinstance(x_scale, string_types) and x_scale == 'jac': + return x_scale + + try: + x_scale = np.asarray(x_scale, dtype=float) + valid = np.all(np.isfinite(x_scale)) and np.all(x_scale > 0) + except (ValueError, TypeError): + valid = False + + if not valid: + raise ValueError("`x_scale` must be 'jac' or array_like with " + "positive numbers.") + + if x_scale.ndim == 0: + x_scale = np.resize(x_scale, x0.shape) + + if x_scale.shape != x0.shape: + raise ValueError("Inconsistent shapes between `x_scale` and `x0`.") + + return x_scale + + +def check_jac_sparsity(jac_sparsity, m, n): + if jac_sparsity is None: + return None + + if not issparse(jac_sparsity): + jac_sparsity = np.atleast_2d(jac_sparsity) + + if jac_sparsity.shape != (m, n): + raise ValueError("`jac_sparsity` has wrong shape.") + + return jac_sparsity, group_columns(jac_sparsity) + + +# Loss functions. + + +def huber(z, rho, cost_only): + mask = z <= 1 + rho[0, mask] = z[mask] + rho[0, ~mask] = 2 * z[~mask]**0.5 - 1 + if cost_only: + return + rho[1, mask] = 1 + rho[1, ~mask] = z[~mask]**-0.5 + rho[2, mask] = 0 + rho[2, ~mask] = -0.5 * z[~mask]**-1.5 + + +def soft_l1(z, rho, cost_only): + t = 1 + z + rho[0] = 2 * (t**0.5 - 1) + if cost_only: + return + rho[1] = t**-0.5 + rho[2] = -0.5 * t**-1.5 + + +def cauchy(z, rho, cost_only): + rho[0] = np.log1p(z) + if cost_only: + return + t = 1 + z + rho[1] = 1 / t + rho[2] = -1 / t**2 + + +def arctan(z, rho, cost_only): + rho[0] = np.arctan(z) + if cost_only: + return + t = 1 + z**2 + rho[1] = 1 / t + rho[2] = -2 * z / t**2 + + +IMPLEMENTED_LOSSES = dict(linear=None, huber=huber, soft_l1=soft_l1, + cauchy=cauchy, arctan=arctan) + + +def construct_loss_function(m, loss, f_scale): + if loss == 'linear': + return None + + if not callable(loss): + loss = IMPLEMENTED_LOSSES[loss] + rho = np.empty((3, m)) + + def loss_function(f, cost_only=False): + z = (f / f_scale) ** 2 + loss(z, rho, cost_only=cost_only) + if cost_only: + return 0.5 * f_scale ** 2 * np.sum(rho[0]) + rho[0] *= f_scale ** 2 + rho[2] /= f_scale ** 2 + return rho + else: + def loss_function(f, cost_only=False): + z = (f / f_scale) ** 2 + rho = loss(z) + if cost_only: + return 0.5 * f_scale ** 2 * np.sum(rho[0]) + rho[0] *= f_scale ** 2 + rho[2] /= f_scale ** 2 + return rho + + return loss_function + + +def least_squares( + fun, x0, jac='2-point', bounds=(-np.inf, np.inf), method='trf', + ftol=1e-8, xtol=1e-8, gtol=1e-8, x_scale=1.0, loss='linear', + f_scale=1.0, diff_step=None, tr_solver=None, tr_options={}, + jac_sparsity=None, max_nfev=None, verbose=0, args=(), kwargs={}): + """Solve a nonlinear least-squares problem with bounds on the variables. + + Given the residuals f(x) (an m-dimensional real function of n real + variables) and the loss function rho(s) (a scalar function), `least_squares` + finds a local minimum of the cost function F(x):: + + minimize F(x) = 0.5 * sum(rho(f_i(x)**2), i = 0, ..., m - 1) + subject to lb <= x <= ub + + The purpose of the loss function rho(s) is to reduce the influence of + outliers on the solution. + + Parameters + ---------- + fun : callable + Function which computes the vector of residuals, with the signature + ``fun(x, *args, **kwargs)``, i.e., the minimization proceeds with + respect to its first argument. The argument ``x`` passed to this + function is an ndarray of shape (n,) (never a scalar, even for n=1). + It must return a 1-d array_like of shape (m,) or a scalar. If the + argument ``x`` is complex or the function ``fun`` returns complex + residuals, it must be wrapped in a real function of real arguments, + as shown at the end of the Examples section. + x0 : array_like with shape (n,) or float + Initial guess on independent variables. If float, it will be treated + as a 1-d array with one element. + jac : {'2-point', '3-point', 'cs', callable}, optional + Method of computing the Jacobian matrix (an m-by-n matrix, where + element (i, j) is the partial derivative of f[i] with respect to + x[j]). The keywords select a finite difference scheme for numerical + estimation. The scheme '3-point' is more accurate, but requires + twice as many operations as '2-point' (default). The scheme 'cs' + uses complex steps, and while potentially the most accurate, it is + applicable only when `fun` correctly handles complex inputs and + can be analytically continued to the complex plane. Method 'lm' + always uses the '2-point' scheme. If callable, it is used as + ``jac(x, *args, **kwargs)`` and should return a good approximation + (or the exact value) for the Jacobian as an array_like (np.atleast_2d + is applied), a sparse matrix or a `scipy.sparse.linalg.LinearOperator`. + bounds : 2-tuple of array_like, optional + Lower and upper bounds on independent variables. Defaults to no bounds. + Each array must match the size of `x0` or be a scalar, in the latter + case a bound will be the same for all variables. Use ``np.inf`` with + an appropriate sign to disable bounds on all or some variables. + method : {'trf', 'dogbox', 'lm'}, optional + Algorithm to perform minimization. + + * 'trf' : Trust Region Reflective algorithm, particularly suitable + for large sparse problems with bounds. Generally robust method. + * 'dogbox' : dogleg algorithm with rectangular trust regions, + typical use case is small problems with bounds. Not recommended + for problems with rank-deficient Jacobian. + * 'lm' : Levenberg-Marquardt algorithm as implemented in MINPACK. + Doesn't handle bounds and sparse Jacobians. Usually the most + efficient method for small unconstrained problems. + + Default is 'trf'. See Notes for more information. + ftol : float, optional + Tolerance for termination by the change of the cost function. Default + is 1e-8. The optimization process is stopped when ``dF < ftol * F``, + and there was an adequate agreement between a local quadratic model and + the true model in the last step. + xtol : float, optional + Tolerance for termination by the change of the independent variables. + Default is 1e-8. The exact condition depends on the `method` used: + + * For 'trf' and 'dogbox' : ``norm(dx) < xtol * (xtol + norm(x))`` + * For 'lm' : ``Delta < xtol * norm(xs)``, where ``Delta`` is + a trust-region radius and ``xs`` is the value of ``x`` + scaled according to `x_scale` parameter (see below). + + gtol : float, optional + Tolerance for termination by the norm of the gradient. Default is 1e-8. + The exact condition depends on a `method` used: + + * For 'trf' : ``norm(g_scaled, ord=np.inf) < gtol``, where + ``g_scaled`` is the value of the gradient scaled to account for + the presence of the bounds [STIR]_. + * For 'dogbox' : ``norm(g_free, ord=np.inf) < gtol``, where + ``g_free`` is the gradient with respect to the variables which + are not in the optimal state on the boundary. + * For 'lm' : the maximum absolute value of the cosine of angles + between columns of the Jacobian and the residual vector is less + than `gtol`, or the residual vector is zero. + + x_scale : array_like or 'jac', optional + Characteristic scale of each variable. Setting `x_scale` is equivalent + to reformulating the problem in scaled variables ``xs = x / x_scale``. + An alternative view is that the size of a trust region along j-th + dimension is proportional to ``x_scale[j]``. Improved convergence may + be achieved by setting `x_scale` such that a step of a given size + along any of the scaled variables has a similar effect on the cost + function. If set to 'jac', the scale is iteratively updated using the + inverse norms of the columns of the Jacobian matrix (as described in + [JJMore]_). + loss : str or callable, optional + Determines the loss function. The following keyword values are allowed: + + * 'linear' (default) : ``rho(z) = z``. Gives a standard + least-squares problem. + * 'soft_l1' : ``rho(z) = 2 * ((1 + z)**0.5 - 1)``. The smooth + approximation of l1 (absolute value) loss. Usually a good + choice for robust least squares. + * 'huber' : ``rho(z) = z if z <= 1 else 2*z**0.5 - 1``. Works + similarly to 'soft_l1'. + * 'cauchy' : ``rho(z) = ln(1 + z)``. Severely weakens outliers + influence, but may cause difficulties in optimization process. + * 'arctan' : ``rho(z) = arctan(z)``. Limits a maximum loss on + a single residual, has properties similar to 'cauchy'. + + If callable, it must take a 1-d ndarray ``z=f**2`` and return an + array_like with shape (3, m) where row 0 contains function values, + row 1 contains first derivatives and row 2 contains second + derivatives. Method 'lm' supports only 'linear' loss. + f_scale : float, optional + Value of soft margin between inlier and outlier residuals, default + is 1.0. The loss function is evaluated as follows + ``rho_(f**2) = C**2 * rho(f**2 / C**2)``, where ``C`` is `f_scale`, + and ``rho`` is determined by `loss` parameter. This parameter has + no effect with ``loss='linear'``, but for other `loss` values it is + of crucial importance. + max_nfev : None or int, optional + Maximum number of function evaluations before the termination. + If None (default), the value is chosen automatically: + + * For 'trf' and 'dogbox' : 100 * n. + * For 'lm' : 100 * n if `jac` is callable and 100 * n * (n + 1) + otherwise (because 'lm' counts function calls in Jacobian + estimation). + + diff_step : None or array_like, optional + Determines the relative step size for the finite difference + approximation of the Jacobian. The actual step is computed as + ``x * diff_step``. If None (default), then `diff_step` is taken to be + a conventional "optimal" power of machine epsilon for the finite + difference scheme used [NR]_. + tr_solver : {None, 'exact', 'lsmr'}, optional + Method for solving trust-region subproblems, relevant only for 'trf' + and 'dogbox' methods. + + * 'exact' is suitable for not very large problems with dense + Jacobian matrices. The computational complexity per iteration is + comparable to a singular value decomposition of the Jacobian + matrix. + * 'lsmr' is suitable for problems with sparse and large Jacobian + matrices. It uses the iterative procedure + `scipy.sparse.linalg.lsmr` for finding a solution of a linear + least-squares problem and only requires matrix-vector product + evaluations. + + If None (default) the solver is chosen based on the type of Jacobian + returned on the first iteration. + tr_options : dict, optional + Keyword options passed to trust-region solver. + + * ``tr_solver='exact'``: `tr_options` are ignored. + * ``tr_solver='lsmr'``: options for `scipy.sparse.linalg.lsmr`. + Additionally ``method='trf'`` supports 'regularize' option + (bool, default is True) which adds a regularization term to the + normal equation, which improves convergence if the Jacobian is + rank-deficient [Byrd]_ (eq. 3.4). + + jac_sparsity : {None, array_like, sparse matrix}, optional + Defines the sparsity structure of the Jacobian matrix for finite + difference estimation, its shape must be (m, n). If the Jacobian has + only few non-zero elements in *each* row, providing the sparsity + structure will greatly speed up the computations [Curtis]_. A zero + entry means that a corresponding element in the Jacobian is identically + zero. If provided, forces the use of 'lsmr' trust-region solver. + If None (default) then dense differencing will be used. Has no effect + for 'lm' method. + verbose : {0, 1, 2}, optional + Level of algorithm's verbosity: + + * 0 (default) : work silently. + * 1 : display a termination report. + * 2 : display progress during iterations (not supported by 'lm' + method). + + args, kwargs : tuple and dict, optional + Additional arguments passed to `fun` and `jac`. Both empty by default. + The calling signature is ``fun(x, *args, **kwargs)`` and the same for + `jac`. + + Returns + ------- + `OptimizeResult` with the following fields defined: + x : ndarray, shape (n,) + Solution found. + cost : float + Value of the cost function at the solution. + fun : ndarray, shape (m,) + Vector of residuals at the solution. + jac : ndarray, sparse matrix or LinearOperator, shape (m, n) + Modified Jacobian matrix at the solution, in the sense that J^T J + is a Gauss-Newton approximation of the Hessian of the cost function. + The type is the same as the one used by the algorithm. + grad : ndarray, shape (m,) + Gradient of the cost function at the solution. + optimality : float + First-order optimality measure. In unconstrained problems, it is always + the uniform norm of the gradient. In constrained problems, it is the + quantity which was compared with `gtol` during iterations. + active_mask : ndarray of int, shape (n,) + Each component shows whether a corresponding constraint is active + (that is, whether a variable is at the bound): + + * 0 : a constraint is not active. + * -1 : a lower bound is active. + * 1 : an upper bound is active. + + Might be somewhat arbitrary for 'trf' method as it generates a sequence + of strictly feasible iterates and `active_mask` is determined within a + tolerance threshold. + nfev : int + Number of function evaluations done. Methods 'trf' and 'dogbox' do not + count function calls for numerical Jacobian approximation, as opposed + to 'lm' method. + njev : int or None + Number of Jacobian evaluations done. If numerical Jacobian + approximation is used in 'lm' method, it is set to None. + status : int + The reason for algorithm termination: + + * -1 : improper input parameters status returned from MINPACK. + * 0 : the maximum number of function evaluations is exceeded. + * 1 : `gtol` termination condition is satisfied. + * 2 : `ftol` termination condition is satisfied. + * 3 : `xtol` termination condition is satisfied. + * 4 : Both `ftol` and `xtol` termination conditions are satisfied. + + message : str + Verbal description of the termination reason. + success : bool + True if one of the convergence criteria is satisfied (`status` > 0). + + See Also + -------- + leastsq : A legacy wrapper for the MINPACK implementation of the + Levenberg-Marquadt algorithm. + curve_fit : Least-squares minimization applied to a curve fitting problem. + + Notes + ----- + Method 'lm' (Levenberg-Marquardt) calls a wrapper over least-squares + algorithms implemented in MINPACK (lmder, lmdif). It runs the + Levenberg-Marquardt algorithm formulated as a trust-region type algorithm. + The implementation is based on paper [JJMore]_, it is very robust and + efficient with a lot of smart tricks. It should be your first choice + for unconstrained problems. Note that it doesn't support bounds. Also + it doesn't work when m < n. + + Method 'trf' (Trust Region Reflective) is motivated by the process of + solving a system of equations, which constitute the first-order optimality + condition for a bound-constrained minimization problem as formulated in + [STIR]_. The algorithm iteratively solves trust-region subproblems + augmented by a special diagonal quadratic term and with trust-region shape + determined by the distance from the bounds and the direction of the + gradient. This enhancements help to avoid making steps directly into bounds + and efficiently explore the whole space of variables. To further improve + convergence, the algorithm considers search directions reflected from the + bounds. To obey theoretical requirements, the algorithm keeps iterates + strictly feasible. With dense Jacobians trust-region subproblems are + solved by an exact method very similar to the one described in [JJMore]_ + (and implemented in MINPACK). The difference from the MINPACK + implementation is that a singular value decomposition of a Jacobian + matrix is done once per iteration, instead of a QR decomposition and series + of Givens rotation eliminations. For large sparse Jacobians a 2-d subspace + approach of solving trust-region subproblems is used [STIR]_, [Byrd]_. + The subspace is spanned by a scaled gradient and an approximate + Gauss-Newton solution delivered by `scipy.sparse.linalg.lsmr`. When no + constraints are imposed the algorithm is very similar to MINPACK and has + generally comparable performance. The algorithm works quite robust in + unbounded and bounded problems, thus it is chosen as a default algorithm. + + Method 'dogbox' operates in a trust-region framework, but considers + rectangular trust regions as opposed to conventional ellipsoids [Voglis]_. + The intersection of a current trust region and initial bounds is again + rectangular, so on each iteration a quadratic minimization problem subject + to bound constraints is solved approximately by Powell's dogleg method + [NumOpt]_. The required Gauss-Newton step can be computed exactly for + dense Jacobians or approximately by `scipy.sparse.linalg.lsmr` for large + sparse Jacobians. The algorithm is likely to exhibit slow convergence when + the rank of Jacobian is less than the number of variables. The algorithm + often outperforms 'trf' in bounded problems with a small number of + variables. + + Robust loss functions are implemented as described in [BA]_. The idea + is to modify a residual vector and a Jacobian matrix on each iteration + such that computed gradient and Gauss-Newton Hessian approximation match + the true gradient and Hessian approximation of the cost function. Then + the algorithm proceeds in a normal way, i.e. robust loss functions are + implemented as a simple wrapper over standard least-squares algorithms. + + .. versionadded:: 0.17.0 + + References + ---------- + .. [STIR] M. A. Branch, T. F. Coleman, and Y. Li, "A Subspace, Interior, + and Conjugate Gradient Method for Large-Scale Bound-Constrained + Minimization Problems," SIAM Journal on Scientific Computing, + Vol. 21, Number 1, pp 1-23, 1999. + .. [NR] William H. Press et. al., "Numerical Recipes. The Art of Scientific + Computing. 3rd edition", Sec. 5.7. + .. [Byrd] R. H. Byrd, R. B. Schnabel and G. A. Shultz, "Approximate + solution of the trust region problem by minimization over + two-dimensional subspaces", Math. Programming, 40, pp. 247-263, + 1988. + .. [Curtis] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of + sparse Jacobian matrices", Journal of the Institute of + Mathematics and its Applications, 13, pp. 117-120, 1974. + .. [JJMore] J. J. More, "The Levenberg-Marquardt Algorithm: Implementation + and Theory," Numerical Analysis, ed. G. A. Watson, Lecture + Notes in Mathematics 630, Springer Verlag, pp. 105-116, 1977. + .. [Voglis] C. Voglis and I. E. Lagaris, "A Rectangular Trust Region + Dogleg Approach for Unconstrained and Bound Constrained + Nonlinear Optimization", WSEAS International Conference on + Applied Mathematics, Corfu, Greece, 2004. + .. [NumOpt] J. Nocedal and S. J. Wright, "Numerical optimization, + 2nd edition", Chapter 4. + .. [BA] B. Triggs et. al., "Bundle Adjustment - A Modern Synthesis", + Proceedings of the International Workshop on Vision Algorithms: + Theory and Practice, pp. 298-372, 1999. + + Examples + -------- + In this example we find a minimum of the Rosenbrock function without bounds + on independent variables. + + >>> def fun_rosenbrock(x): + ... return np.array([10 * (x[1] - x[0]**2), (1 - x[0])]) + + Notice that we only provide the vector of the residuals. The algorithm + constructs the cost function as a sum of squares of the residuals, which + gives the Rosenbrock function. The exact minimum is at ``x = [1.0, 1.0]``. + + >>> from scipy.optimize import least_squares + >>> x0_rosenbrock = np.array([2, 2]) + >>> res_1 = least_squares(fun_rosenbrock, x0_rosenbrock) + >>> res_1.x + array([ 1., 1.]) + >>> res_1.cost + 9.8669242910846867e-30 + >>> res_1.optimality + 8.8928864934219529e-14 + + We now constrain the variables, in such a way that the previous solution + becomes infeasible. Specifically, we require that ``x[1] >= 1.5``, and + ``x[0]`` left unconstrained. To this end, we specify the `bounds` parameter + to `least_squares` in the form ``bounds=([-np.inf, 1.5], np.inf)``. + + We also provide the analytic Jacobian: + + >>> def jac_rosenbrock(x): + ... return np.array([ + ... [-20 * x[0], 10], + ... [-1, 0]]) + + Putting this all together, we see that the new solution lies on the bound: + + >>> res_2 = least_squares(fun_rosenbrock, x0_rosenbrock, jac_rosenbrock, + ... bounds=([-np.inf, 1.5], np.inf)) + >>> res_2.x + array([ 1.22437075, 1.5 ]) + >>> res_2.cost + 0.025213093946805685 + >>> res_2.optimality + 1.5885401433157753e-07 + + Now we solve a system of equations (i.e., the cost function should be zero + at a minimum) for a Broyden tridiagonal vector-valued function of 100000 + variables: + + >>> def fun_broyden(x): + ... f = (3 - x) * x + 1 + ... f[1:] -= x[:-1] + ... f[:-1] -= 2 * x[1:] + ... return f + + The corresponding Jacobian matrix is sparse. We tell the algorithm to + estimate it by finite differences and provide the sparsity structure of + Jacobian to significantly speed up this process. + + >>> from scipy.sparse import lil_matrix + >>> def sparsity_broyden(n): + ... sparsity = lil_matrix((n, n), dtype=int) + ... i = np.arange(n) + ... sparsity[i, i] = 1 + ... i = np.arange(1, n) + ... sparsity[i, i - 1] = 1 + ... i = np.arange(n - 1) + ... sparsity[i, i + 1] = 1 + ... return sparsity + ... + >>> n = 100000 + >>> x0_broyden = -np.ones(n) + ... + >>> res_3 = least_squares(fun_broyden, x0_broyden, + ... jac_sparsity=sparsity_broyden(n)) + >>> res_3.cost + 4.5687069299604613e-23 + >>> res_3.optimality + 1.1650454296851518e-11 + + Let's also solve a curve fitting problem using robust loss function to + take care of outliers in the data. Define the model function as + ``y = a + b * exp(c * t)``, where t is a predictor variable, y is an + observation and a, b, c are parameters to estimate. + + First, define the function which generates the data with noise and + outliers, define the model parameters, and generate data: + + >>> def gen_data(t, a, b, c, noise=0, n_outliers=0, random_state=0): + ... y = a + b * np.exp(t * c) + ... + ... rnd = np.random.RandomState(random_state) + ... error = noise * rnd.randn(t.size) + ... outliers = rnd.randint(0, t.size, n_outliers) + ... error[outliers] *= 10 + ... + ... return y + error + ... + >>> a = 0.5 + >>> b = 2.0 + >>> c = -1 + >>> t_min = 0 + >>> t_max = 10 + >>> n_points = 15 + ... + >>> t_train = np.linspace(t_min, t_max, n_points) + >>> y_train = gen_data(t_train, a, b, c, noise=0.1, n_outliers=3) + + Define function for computing residuals and initial estimate of + parameters. + + >>> def fun(x, t, y): + ... return x[0] + x[1] * np.exp(x[2] * t) - y + ... + >>> x0 = np.array([1.0, 1.0, 0.0]) + + Compute a standard least-squares solution: + + >>> res_lsq = least_squares(fun, x0, args=(t_train, y_train)) + + Now compute two solutions with two different robust loss functions. The + parameter `f_scale` is set to 0.1, meaning that inlier residuals should + not significantly exceed 0.1 (the noise level used). + + >>> res_soft_l1 = least_squares(fun, x0, loss='soft_l1', f_scale=0.1, + ... args=(t_train, y_train)) + >>> res_log = least_squares(fun, x0, loss='cauchy', f_scale=0.1, + ... args=(t_train, y_train)) + + And finally plot all the curves. We see that by selecting an appropriate + `loss` we can get estimates close to optimal even in the presence of + strong outliers. But keep in mind that generally it is recommended to try + 'soft_l1' or 'huber' losses first (if at all necessary) as the other two + options may cause difficulties in optimization process. + + >>> t_test = np.linspace(t_min, t_max, n_points * 10) + >>> y_true = gen_data(t_test, a, b, c) + >>> y_lsq = gen_data(t_test, *res_lsq.x) + >>> y_soft_l1 = gen_data(t_test, *res_soft_l1.x) + >>> y_log = gen_data(t_test, *res_log.x) + ... + >>> import matplotlib.pyplot as plt + >>> plt.plot(t_train, y_train, 'o') + >>> plt.plot(t_test, y_true, 'k', linewidth=2, label='true') + >>> plt.plot(t_test, y_lsq, label='linear loss') + >>> plt.plot(t_test, y_soft_l1, label='soft_l1 loss') + >>> plt.plot(t_test, y_log, label='cauchy loss') + >>> plt.xlabel("t") + >>> plt.ylabel("y") + >>> plt.legend() + >>> plt.show() + + In the next example, we show how complex-valued residual functions of + complex variables can be optimized with ``least_squares()``. Consider the + following function: + + >>> def f(z): + ... return z - (0.5 + 0.5j) + + We wrap it into a function of real variables that returns real residuals + by simply handling the real and imaginary parts as independent variables: + + >>> def f_wrap(x): + ... fx = f(x[0] + 1j*x[1]) + ... return np.array([fx.real, fx.imag]) + + Thus, instead of the original m-dimensional complex function of n complex + variables we optimize a 2m-dimensional real function of 2n real variables: + + >>> from scipy.optimize import least_squares + >>> res_wrapped = least_squares(f_wrap, (0.1, 0.1), bounds=([0, 0], [1, 1])) + >>> z = res_wrapped.x[0] + res_wrapped.x[1]*1j + >>> z + (0.49999999999925893+0.49999999999925893j) + + """ + if method not in ['trf', 'dogbox', 'lm']: + raise ValueError("`method` must be 'trf', 'dogbox' or 'lm'.") + + if jac not in ['2-point', '3-point', 'cs'] and not callable(jac): + raise ValueError("`jac` must be '2-point', '3-point', 'cs' or " + "callable.") + + if tr_solver not in [None, 'exact', 'lsmr']: + raise ValueError("`tr_solver` must be None, 'exact' or 'lsmr'.") + + if loss not in IMPLEMENTED_LOSSES and not callable(loss): + raise ValueError("`loss` must be one of {0} or a callable." + .format(IMPLEMENTED_LOSSES.keys())) + + if method == 'lm' and loss != 'linear': + raise ValueError("method='lm' supports only 'linear' loss function.") + + if verbose not in [0, 1, 2]: + raise ValueError("`verbose` must be in [0, 1, 2].") + + if len(bounds) != 2: + raise ValueError("`bounds` must contain 2 elements.") + + if max_nfev is not None and max_nfev <= 0: + raise ValueError("`max_nfev` must be None or positive integer.") + + if np.iscomplexobj(x0): + raise ValueError("`x0` must be real.") + + x0 = np.atleast_1d(x0).astype(float) + + if x0.ndim > 1: + raise ValueError("`x0` must have at most 1 dimension.") + + lb, ub = prepare_bounds(bounds, x0.shape[0]) + + if method == 'lm' and not np.all((lb == -np.inf) & (ub == np.inf)): + raise ValueError("Method 'lm' doesn't support bounds.") + + if lb.shape != x0.shape or ub.shape != x0.shape: + raise ValueError("Inconsistent shapes between bounds and `x0`.") + + if np.any(lb >= ub): + raise ValueError("Each lower bound must be strictly less than each " + "upper bound.") + + if not in_bounds(x0, lb, ub): + raise ValueError("`x0` is infeasible.") + + x_scale = check_x_scale(x_scale, x0) + + ftol, xtol, gtol = check_tolerance(ftol, xtol, gtol) + + def fun_wrapped(x): + return np.atleast_1d(fun(x, *args, **kwargs)) + + if method == 'trf': + x0 = make_strictly_feasible(x0, lb, ub) + + f0 = fun_wrapped(x0) + + if f0.ndim != 1: + raise ValueError("`fun` must return at most 1-d array_like.") + + if not np.all(np.isfinite(f0)): + raise ValueError("Residuals are not finite in the initial point.") + + n = x0.size + m = f0.size + + if method == 'lm' and m < n: + raise ValueError("Method 'lm' doesn't work when the number of " + "residuals is less than the number of variables.") + + loss_function = construct_loss_function(m, loss, f_scale) + if callable(loss): + rho = loss_function(f0) + if rho.shape != (3, m): + raise ValueError("The return value of `loss` callable has wrong " + "shape.") + initial_cost = 0.5 * np.sum(rho[0]) + elif loss_function is not None: + initial_cost = loss_function(f0, cost_only=True) + else: + initial_cost = 0.5 * np.dot(f0, f0) + + if callable(jac): + J0 = jac(x0, *args, **kwargs) + + if issparse(J0): + J0 = csr_matrix(J0) + + def jac_wrapped(x, _=None): + return csr_matrix(jac(x, *args, **kwargs)) + + elif isinstance(J0, LinearOperator): + def jac_wrapped(x, _=None): + return jac(x, *args, **kwargs) + + else: + J0 = np.atleast_2d(J0) + + def jac_wrapped(x, _=None): + return np.atleast_2d(jac(x, *args, **kwargs)) + + else: # Estimate Jacobian by finite differences. + if method == 'lm': + if jac_sparsity is not None: + raise ValueError("method='lm' does not support " + "`jac_sparsity`.") + + if jac != '2-point': + warn("jac='{0}' works equivalently to '2-point' " + "for method='lm'.".format(jac)) + + J0 = jac_wrapped = None + else: + if jac_sparsity is not None and tr_solver == 'exact': + raise ValueError("tr_solver='exact' is incompatible " + "with `jac_sparsity`.") + + jac_sparsity = check_jac_sparsity(jac_sparsity, m, n) + + def jac_wrapped(x, f): + J = approx_derivative(fun, x, rel_step=diff_step, method=jac, + f0=f, bounds=bounds, args=args, + kwargs=kwargs, sparsity=jac_sparsity) + if J.ndim != 2: # J is guaranteed not sparse. + J = np.atleast_2d(J) + + return J + + J0 = jac_wrapped(x0, f0) + + if J0 is not None: + if J0.shape != (m, n): + raise ValueError( + "The return value of `jac` has wrong shape: expected {0}, " + "actual {1}.".format((m, n), J0.shape)) + + if not isinstance(J0, np.ndarray): + if method == 'lm': + raise ValueError("method='lm' works only with dense " + "Jacobian matrices.") + + if tr_solver == 'exact': + raise ValueError( + "tr_solver='exact' works only with dense " + "Jacobian matrices.") + + jac_scale = isinstance(x_scale, string_types) and x_scale == 'jac' + if isinstance(J0, LinearOperator) and jac_scale: + raise ValueError("x_scale='jac' can't be used when `jac` " + "returns LinearOperator.") + + if tr_solver is None: + if isinstance(J0, np.ndarray): + tr_solver = 'exact' + else: + tr_solver = 'lsmr' + + if method == 'lm': + result = call_minpack(fun_wrapped, x0, jac_wrapped, ftol, xtol, gtol, + max_nfev, x_scale, diff_step) + + elif method == 'trf': + result = trf(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol, xtol, + gtol, max_nfev, x_scale, loss_function, tr_solver, + tr_options.copy(), verbose) + + elif method == 'dogbox': + if tr_solver == 'lsmr' and 'regularize' in tr_options: + warn("The keyword 'regularize' in `tr_options` is not relevant " + "for 'dogbox' method.") + tr_options = tr_options.copy() + del tr_options['regularize'] + + result = dogbox(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol, + xtol, gtol, max_nfev, x_scale, loss_function, + tr_solver, tr_options, verbose) + + result.message = TERMINATION_MESSAGES[result.status] + result.success = result.status > 0 + + if verbose >= 1: + print(result.message) + print("Function evaluations {0}, initial cost {1:.4e}, final cost " + "{2:.4e}, first-order optimality {3:.2e}." + .format(result.nfev, initial_cost, result.cost, + result.optimality)) + + return result diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/least_squares.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/least_squares.pyc new file mode 100644 index 0000000..36d6626 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/least_squares.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/lsq_linear.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/lsq_linear.py new file mode 100644 index 0000000..c2ed220 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/lsq_linear.py @@ -0,0 +1,317 @@ +"""Linear least squares with bound constraints on independent variables.""" +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.linalg import norm +from scipy.sparse import issparse, csr_matrix +from scipy.sparse.linalg import LinearOperator, lsmr +from scipy.optimize import OptimizeResult + +from .common import in_bounds, compute_grad +from .trf_linear import trf_linear +from .bvls import bvls + + +def prepare_bounds(bounds, n): + lb, ub = [np.asarray(b, dtype=float) for b in bounds] + + if lb.ndim == 0: + lb = np.resize(lb, n) + + if ub.ndim == 0: + ub = np.resize(ub, n) + + return lb, ub + + +TERMINATION_MESSAGES = { + -1: "The algorithm was not able to make progress on the last iteration.", + 0: "The maximum number of iterations is exceeded.", + 1: "The first-order optimality measure is less than `tol`.", + 2: "The relative change of the cost function is less than `tol`.", + 3: "The unconstrained solution is optimal." +} + + +def lsq_linear(A, b, bounds=(-np.inf, np.inf), method='trf', tol=1e-10, + lsq_solver=None, lsmr_tol=None, max_iter=None, verbose=0): + r"""Solve a linear least-squares problem with bounds on the variables. + + Given a m-by-n design matrix A and a target vector b with m elements, + `lsq_linear` solves the following optimization problem:: + + minimize 0.5 * ||A x - b||**2 + subject to lb <= x <= ub + + This optimization problem is convex, hence a found minimum (if iterations + have converged) is guaranteed to be global. + + Parameters + ---------- + A : array_like, sparse matrix of LinearOperator, shape (m, n) + Design matrix. Can be `scipy.sparse.linalg.LinearOperator`. + b : array_like, shape (m,) + Target vector. + bounds : 2-tuple of array_like, optional + Lower and upper bounds on independent variables. Defaults to no bounds. + Each array must have shape (n,) or be a scalar, in the latter + case a bound will be the same for all variables. Use ``np.inf`` with + an appropriate sign to disable bounds on all or some variables. + method : 'trf' or 'bvls', optional + Method to perform minimization. + + * 'trf' : Trust Region Reflective algorithm adapted for a linear + least-squares problem. This is an interior-point-like method + and the required number of iterations is weakly correlated with + the number of variables. + * 'bvls' : Bounded-Variable Least-Squares algorithm. This is + an active set method, which requires the number of iterations + comparable to the number of variables. Can't be used when `A` is + sparse or LinearOperator. + + Default is 'trf'. + tol : float, optional + Tolerance parameter. The algorithm terminates if a relative change + of the cost function is less than `tol` on the last iteration. + Additionally the first-order optimality measure is considered: + + * ``method='trf'`` terminates if the uniform norm of the gradient, + scaled to account for the presence of the bounds, is less than + `tol`. + * ``method='bvls'`` terminates if Karush-Kuhn-Tucker conditions + are satisfied within `tol` tolerance. + + lsq_solver : {None, 'exact', 'lsmr'}, optional + Method of solving unbounded least-squares problems throughout + iterations: + + * 'exact' : Use dense QR or SVD decomposition approach. Can't be + used when `A` is sparse or LinearOperator. + * 'lsmr' : Use `scipy.sparse.linalg.lsmr` iterative procedure + which requires only matrix-vector product evaluations. Can't + be used with ``method='bvls'``. + + If None (default) the solver is chosen based on type of `A`. + lsmr_tol : None, float or 'auto', optional + Tolerance parameters 'atol' and 'btol' for `scipy.sparse.linalg.lsmr` + If None (default), it is set to ``1e-2 * tol``. If 'auto', the + tolerance will be adjusted based on the optimality of the current + iterate, which can speed up the optimization process, but is not always + reliable. + max_iter : None or int, optional + Maximum number of iterations before termination. If None (default), it + is set to 100 for ``method='trf'`` or to the number of variables for + ``method='bvls'`` (not counting iterations for 'bvls' initialization). + verbose : {0, 1, 2}, optional + Level of algorithm's verbosity: + + * 0 : work silently (default). + * 1 : display a termination report. + * 2 : display progress during iterations. + + Returns + ------- + OptimizeResult with the following fields defined: + x : ndarray, shape (n,) + Solution found. + cost : float + Value of the cost function at the solution. + fun : ndarray, shape (m,) + Vector of residuals at the solution. + optimality : float + First-order optimality measure. The exact meaning depends on `method`, + refer to the description of `tol` parameter. + active_mask : ndarray of int, shape (n,) + Each component shows whether a corresponding constraint is active + (that is, whether a variable is at the bound): + + * 0 : a constraint is not active. + * -1 : a lower bound is active. + * 1 : an upper bound is active. + + Might be somewhat arbitrary for the `trf` method as it generates a + sequence of strictly feasible iterates and active_mask is determined + within a tolerance threshold. + nit : int + Number of iterations. Zero if the unconstrained solution is optimal. + status : int + Reason for algorithm termination: + + * -1 : the algorithm was not able to make progress on the last + iteration. + * 0 : the maximum number of iterations is exceeded. + * 1 : the first-order optimality measure is less than `tol`. + * 2 : the relative change of the cost function is less than `tol`. + * 3 : the unconstrained solution is optimal. + + message : str + Verbal description of the termination reason. + success : bool + True if one of the convergence criteria is satisfied (`status` > 0). + + See Also + -------- + nnls : Linear least squares with non-negativity constraint. + least_squares : Nonlinear least squares with bounds on the variables. + + Notes + ----- + The algorithm first computes the unconstrained least-squares solution by + `numpy.linalg.lstsq` or `scipy.sparse.linalg.lsmr` depending on + `lsq_solver`. This solution is returned as optimal if it lies within the + bounds. + + Method 'trf' runs the adaptation of the algorithm described in [STIR]_ for + a linear least-squares problem. The iterations are essentially the same as + in the nonlinear least-squares algorithm, but as the quadratic function + model is always accurate, we don't need to track or modify the radius of + a trust region. The line search (backtracking) is used as a safety net + when a selected step does not decrease the cost function. Read more + detailed description of the algorithm in `scipy.optimize.least_squares`. + + Method 'bvls' runs a Python implementation of the algorithm described in + [BVLS]_. The algorithm maintains active and free sets of variables, on + each iteration chooses a new variable to move from the active set to the + free set and then solves the unconstrained least-squares problem on free + variables. This algorithm is guaranteed to give an accurate solution + eventually, but may require up to n iterations for a problem with n + variables. Additionally, an ad-hoc initialization procedure is + implemented, that determines which variables to set free or active + initially. It takes some number of iterations before actual BVLS starts, + but can significantly reduce the number of further iterations. + + References + ---------- + .. [STIR] M. A. Branch, T. F. Coleman, and Y. Li, "A Subspace, Interior, + and Conjugate Gradient Method for Large-Scale Bound-Constrained + Minimization Problems," SIAM Journal on Scientific Computing, + Vol. 21, Number 1, pp 1-23, 1999. + .. [BVLS] P. B. Start and R. L. Parker, "Bounded-Variable Least-Squares: + an Algorithm and Applications", Computational Statistics, 10, + 129-141, 1995. + + Examples + -------- + In this example a problem with a large sparse matrix and bounds on the + variables is solved. + + >>> from scipy.sparse import rand + >>> from scipy.optimize import lsq_linear + ... + >>> np.random.seed(0) + ... + >>> m = 20000 + >>> n = 10000 + ... + >>> A = rand(m, n, density=1e-4) + >>> b = np.random.randn(m) + ... + >>> lb = np.random.randn(n) + >>> ub = lb + 1 + ... + >>> res = lsq_linear(A, b, bounds=(lb, ub), lsmr_tol='auto', verbose=1) + # may vary + The relative change of the cost function is less than `tol`. + Number of iterations 16, initial cost 1.5039e+04, final cost 1.1112e+04, + first-order optimality 4.66e-08. + """ + if method not in ['trf', 'bvls']: + raise ValueError("`method` must be 'trf' or 'bvls'") + + if lsq_solver not in [None, 'exact', 'lsmr']: + raise ValueError("`solver` must be None, 'exact' or 'lsmr'.") + + if verbose not in [0, 1, 2]: + raise ValueError("`verbose` must be in [0, 1, 2].") + + if issparse(A): + A = csr_matrix(A) + elif not isinstance(A, LinearOperator): + A = np.atleast_2d(A) + + if method == 'bvls': + if lsq_solver == 'lsmr': + raise ValueError("method='bvls' can't be used with " + "lsq_solver='lsmr'") + + if not isinstance(A, np.ndarray): + raise ValueError("method='bvls' can't be used with `A` being " + "sparse or LinearOperator.") + + if lsq_solver is None: + if isinstance(A, np.ndarray): + lsq_solver = 'exact' + else: + lsq_solver = 'lsmr' + elif lsq_solver == 'exact' and not isinstance(A, np.ndarray): + raise ValueError("`exact` solver can't be used when `A` is " + "sparse or LinearOperator.") + + if len(A.shape) != 2: # No ndim for LinearOperator. + raise ValueError("`A` must have at most 2 dimensions.") + + if len(bounds) != 2: + raise ValueError("`bounds` must contain 2 elements.") + + if max_iter is not None and max_iter <= 0: + raise ValueError("`max_iter` must be None or positive integer.") + + m, n = A.shape + + b = np.atleast_1d(b) + if b.ndim != 1: + raise ValueError("`b` must have at most 1 dimension.") + + if b.size != m: + raise ValueError("Inconsistent shapes between `A` and `b`.") + + lb, ub = prepare_bounds(bounds, n) + + if lb.shape != (n,) and ub.shape != (n,): + raise ValueError("Bounds have wrong shape.") + + if np.any(lb >= ub): + raise ValueError("Each lower bound must be strictly less than each " + "upper bound.") + + if lsq_solver == 'exact': + x_lsq = np.linalg.lstsq(A, b, rcond=-1)[0] + elif lsq_solver == 'lsmr': + x_lsq = lsmr(A, b, atol=tol, btol=tol)[0] + + if in_bounds(x_lsq, lb, ub): + r = A.dot(x_lsq) - b + cost = 0.5 * np.dot(r, r) + termination_status = 3 + termination_message = TERMINATION_MESSAGES[termination_status] + g = compute_grad(A, r) + g_norm = norm(g, ord=np.inf) + + if verbose > 0: + print(termination_message) + print("Final cost {0:.4e}, first-order optimality {1:.2e}" + .format(cost, g_norm)) + + return OptimizeResult( + x=x_lsq, fun=r, cost=cost, optimality=g_norm, + active_mask=np.zeros(n), nit=0, status=termination_status, + message=termination_message, success=True) + + if method == 'trf': + res = trf_linear(A, b, x_lsq, lb, ub, tol, lsq_solver, lsmr_tol, + max_iter, verbose) + elif method == 'bvls': + res = bvls(A, b, x_lsq, lb, ub, tol, max_iter, verbose) + + res.message = TERMINATION_MESSAGES[res.status] + res.success = res.status > 0 + + if verbose > 0: + print(res.message) + print("Number of iterations {0}, initial cost {1:.4e}, " + "final cost {2:.4e}, first-order optimality {3:.2e}." + .format(res.nit, res.initial_cost, res.cost, res.optimality)) + + del res.initial_cost + + return res diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/lsq_linear.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/lsq_linear.pyc new file mode 100644 index 0000000..e03baaa Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/lsq_linear.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/setup.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/setup.py new file mode 100644 index 0000000..b9222a0 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/setup.py @@ -0,0 +1,14 @@ +from __future__ import division, print_function, absolute_import + + +def configuration(parent_package='', top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('_lsq', parent_package, top_path) + config.add_extension('givens_elimination', + sources=['givens_elimination.c']) + return config + + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(**configuration(top_path='').todict()) diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/setup.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/setup.pyc new file mode 100644 index 0000000..ed3e402 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/trf.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/trf.py new file mode 100644 index 0000000..7ec62f4 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/trf.py @@ -0,0 +1,568 @@ +"""Trust Region Reflective algorithm for least-squares optimization. + +The algorithm is based on ideas from paper [STIR]_. The main idea is to +account for presence of the bounds by appropriate scaling of the variables (or +equivalently changing a trust-region shape). Let's introduce a vector v: + + | ub[i] - x[i], if g[i] < 0 and ub[i] < np.inf + v[i] = | x[i] - lb[i], if g[i] > 0 and lb[i] > -np.inf + | 1, otherwise + +where g is the gradient of a cost function and lb, ub are the bounds. Its +components are distances to the bounds at which the anti-gradient points (if +this distance is finite). Define a scaling matrix D = diag(v**0.5). +First-order optimality conditions can be stated as + + D^2 g(x) = 0. + +Meaning that components of the gradient should be zero for strictly interior +variables, and components must point inside the feasible region for variables +on the bound. + +Now consider this system of equations as a new optimization problem. If the +point x is strictly interior (not on the bound) then the left-hand side is +differentiable and the Newton step for it satisfies + + (D^2 H + diag(g) Jv) p = -D^2 g + +where H is the Hessian matrix (or its J^T J approximation in least squares), +Jv is the Jacobian matrix of v with components -1, 1 or 0, such that all +elements of matrix C = diag(g) Jv are non-negative. Introduce the change +of the variables x = D x_h (_h would be "hat" in LaTeX). In the new variables +we have a Newton step satisfying + + B_h p_h = -g_h, + +where B_h = D H D + C, g_h = D g. In least squares B_h = J_h^T J_h, where +J_h = J D. Note that J_h and g_h are proper Jacobian and gradient with respect +to "hat" variables. To guarantee global convergence we formulate a +trust-region problem based on the Newton step in the new variables: + + 0.5 * p_h^T B_h p + g_h^T p_h -> min, ||p_h|| <= Delta + +In the original space B = H + D^{-1} C D^{-1}, and the equivalent trust-region +problem is + + 0.5 * p^T B p + g^T p -> min, ||D^{-1} p|| <= Delta + +Here the meaning of the matrix D becomes more clear: it alters the shape +of a trust-region, such that large steps towards the bounds are not allowed. +In the implementation the trust-region problem is solved in "hat" space, +but handling of the bounds is done in the original space (see below and read +the code). + +The introduction of the matrix D doesn't allow to ignore bounds, the algorithm +must keep iterates strictly feasible (to satisfy aforementioned +differentiability), the parameter theta controls step back from the boundary +(see the code for details). + +The algorithm does another important trick. If the trust-region solution +doesn't fit into the bounds, then a reflected (from a firstly encountered +bound) search direction is considered. For motivation and analysis refer to +[STIR]_ paper (and other papers of the authors). In practice it doesn't need +a lot of justifications, the algorithm simply chooses the best step among +three: a constrained trust-region step, a reflected step and a constrained +Cauchy step (a minimizer along -g_h in "hat" space, or -D^2 g in the original +space). + +Another feature is that a trust-region radius control strategy is modified to +account for appearance of the diagonal C matrix (called diag_h in the code). + +Note, that all described peculiarities are completely gone as we consider +problems without bounds (the algorithm becomes a standard trust-region type +algorithm very similar to ones implemented in MINPACK). + +The implementation supports two methods of solving the trust-region problem. +The first, called 'exact', applies SVD on Jacobian and then solves the problem +very accurately using the algorithm described in [JJMore]_. It is not +applicable to large problem. The second, called 'lsmr', uses the 2-D subspace +approach (sometimes called "indefinite dogleg"), where the problem is solved +in a subspace spanned by the gradient and the approximate Gauss-Newton step +found by ``scipy.sparse.linalg.lsmr``. A 2-D trust-region problem is +reformulated as a 4-th order algebraic equation and solved very accurately by +``numpy.roots``. The subspace approach allows to solve very large problems +(up to couple of millions of residuals on a regular PC), provided the Jacobian +matrix is sufficiently sparse. + +References +---------- +.. [STIR] Branch, M.A., T.F. Coleman, and Y. Li, "A Subspace, Interior, + and Conjugate Gradient Method for Large-Scale Bound-Constrained + Minimization Problems," SIAM Journal on Scientific Computing, + Vol. 21, Number 1, pp 1-23, 1999. +.. [JJMore] More, J. J., "The Levenberg-Marquardt Algorithm: Implementation + and Theory," Numerical Analysis, ed. G. A. Watson, Lecture +""" +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.linalg import norm +from scipy.linalg import svd, qr +from scipy.sparse.linalg import lsmr +from scipy.optimize import OptimizeResult +from scipy._lib.six import string_types + +from .common import ( + step_size_to_bound, find_active_constraints, in_bounds, + make_strictly_feasible, intersect_trust_region, solve_lsq_trust_region, + solve_trust_region_2d, minimize_quadratic_1d, build_quadratic_1d, + evaluate_quadratic, right_multiplied_operator, regularized_lsq_operator, + CL_scaling_vector, compute_grad, compute_jac_scale, check_termination, + update_tr_radius, scale_for_robust_loss_function, print_header_nonlinear, + print_iteration_nonlinear) + + +def trf(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale, + loss_function, tr_solver, tr_options, verbose): + # For efficiency it makes sense to run the simplified version of the + # algorithm when no bounds are imposed. We decided to write the two + # separate functions. It violates DRY principle, but the individual + # functions are kept the most readable. + if np.all(lb == -np.inf) and np.all(ub == np.inf): + return trf_no_bounds( + fun, jac, x0, f0, J0, ftol, xtol, gtol, max_nfev, x_scale, + loss_function, tr_solver, tr_options, verbose) + else: + return trf_bounds( + fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale, + loss_function, tr_solver, tr_options, verbose) + + +def select_step(x, J_h, diag_h, g_h, p, p_h, d, Delta, lb, ub, theta): + """Select the best step according to Trust Region Reflective algorithm.""" + if in_bounds(x + p, lb, ub): + p_value = evaluate_quadratic(J_h, g_h, p_h, diag=diag_h) + return p, p_h, -p_value + + p_stride, hits = step_size_to_bound(x, p, lb, ub) + + # Compute the reflected direction. + r_h = np.copy(p_h) + r_h[hits.astype(bool)] *= -1 + r = d * r_h + + # Restrict trust-region step, such that it hits the bound. + p *= p_stride + p_h *= p_stride + x_on_bound = x + p + + # Reflected direction will cross first either feasible region or trust + # region boundary. + _, to_tr = intersect_trust_region(p_h, r_h, Delta) + to_bound, _ = step_size_to_bound(x_on_bound, r, lb, ub) + + # Find lower and upper bounds on a step size along the reflected + # direction, considering the strict feasibility requirement. There is no + # single correct way to do that, the chosen approach seems to work best + # on test problems. + r_stride = min(to_bound, to_tr) + if r_stride > 0: + r_stride_l = (1 - theta) * p_stride / r_stride + if r_stride == to_bound: + r_stride_u = theta * to_bound + else: + r_stride_u = to_tr + else: + r_stride_l = 0 + r_stride_u = -1 + + # Check if reflection step is available. + if r_stride_l <= r_stride_u: + a, b, c = build_quadratic_1d(J_h, g_h, r_h, s0=p_h, diag=diag_h) + r_stride, r_value = minimize_quadratic_1d( + a, b, r_stride_l, r_stride_u, c=c) + r_h *= r_stride + r_h += p_h + r = r_h * d + else: + r_value = np.inf + + # Now correct p_h to make it strictly interior. + p *= theta + p_h *= theta + p_value = evaluate_quadratic(J_h, g_h, p_h, diag=diag_h) + + ag_h = -g_h + ag = d * ag_h + + to_tr = Delta / norm(ag_h) + to_bound, _ = step_size_to_bound(x, ag, lb, ub) + if to_bound < to_tr: + ag_stride = theta * to_bound + else: + ag_stride = to_tr + + a, b = build_quadratic_1d(J_h, g_h, ag_h, diag=diag_h) + ag_stride, ag_value = minimize_quadratic_1d(a, b, 0, ag_stride) + ag_h *= ag_stride + ag *= ag_stride + + if p_value < r_value and p_value < ag_value: + return p, p_h, -p_value + elif r_value < p_value and r_value < ag_value: + return r, r_h, -r_value + else: + return ag, ag_h, -ag_value + + +def trf_bounds(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, + x_scale, loss_function, tr_solver, tr_options, verbose): + x = x0.copy() + + f = f0 + f_true = f.copy() + nfev = 1 + + J = J0 + njev = 1 + m, n = J.shape + + if loss_function is not None: + rho = loss_function(f) + cost = 0.5 * np.sum(rho[0]) + J, f = scale_for_robust_loss_function(J, f, rho) + else: + cost = 0.5 * np.dot(f, f) + + g = compute_grad(J, f) + + jac_scale = isinstance(x_scale, string_types) and x_scale == 'jac' + if jac_scale: + scale, scale_inv = compute_jac_scale(J) + else: + scale, scale_inv = x_scale, 1 / x_scale + + v, dv = CL_scaling_vector(x, g, lb, ub) + v[dv != 0] *= scale_inv[dv != 0] + Delta = norm(x0 * scale_inv / v**0.5) + if Delta == 0: + Delta = 1.0 + + g_norm = norm(g * v, ord=np.inf) + + f_augmented = np.zeros((m + n)) + if tr_solver == 'exact': + J_augmented = np.empty((m + n, n)) + elif tr_solver == 'lsmr': + reg_term = 0.0 + regularize = tr_options.pop('regularize', True) + + if max_nfev is None: + max_nfev = x0.size * 100 + + alpha = 0.0 # "Levenberg-Marquardt" parameter + + termination_status = None + iteration = 0 + step_norm = None + actual_reduction = None + + if verbose == 2: + print_header_nonlinear() + + while True: + v, dv = CL_scaling_vector(x, g, lb, ub) + + g_norm = norm(g * v, ord=np.inf) + if g_norm < gtol: + termination_status = 1 + + if verbose == 2: + print_iteration_nonlinear(iteration, nfev, cost, actual_reduction, + step_norm, g_norm) + + if termination_status is not None or nfev == max_nfev: + break + + # Now compute variables in "hat" space. Here we also account for + # scaling introduced by `x_scale` parameter. This part is a bit tricky, + # you have to write down the formulas and see how the trust-region + # problem is formulated when the two types of scaling are applied. + # The idea is that first we apply `x_scale` and then apply Coleman-Li + # approach in the new variables. + + # v is recomputed in the variables after applying `x_scale`, note that + # components which were identically 1 not affected. + v[dv != 0] *= scale_inv[dv != 0] + + # Here we apply two types of scaling. + d = v**0.5 * scale + + # C = diag(g * scale) Jv + diag_h = g * dv * scale + + # After all this were done, we continue normally. + + # "hat" gradient. + g_h = d * g + + f_augmented[:m] = f + if tr_solver == 'exact': + J_augmented[:m] = J * d + J_h = J_augmented[:m] # Memory view. + J_augmented[m:] = np.diag(diag_h**0.5) + U, s, V = svd(J_augmented, full_matrices=False) + V = V.T + uf = U.T.dot(f_augmented) + elif tr_solver == 'lsmr': + J_h = right_multiplied_operator(J, d) + + if regularize: + a, b = build_quadratic_1d(J_h, g_h, -g_h, diag=diag_h) + to_tr = Delta / norm(g_h) + ag_value = minimize_quadratic_1d(a, b, 0, to_tr)[1] + reg_term = -ag_value / Delta**2 + + lsmr_op = regularized_lsq_operator(J_h, (diag_h + reg_term)**0.5) + gn_h = lsmr(lsmr_op, f_augmented, **tr_options)[0] + S = np.vstack((g_h, gn_h)).T + S, _ = qr(S, mode='economic') + JS = J_h.dot(S) # LinearOperator does dot too. + B_S = np.dot(JS.T, JS) + np.dot(S.T * diag_h, S) + g_S = S.T.dot(g_h) + + # theta controls step back step ratio from the bounds. + theta = max(0.995, 1 - g_norm) + + actual_reduction = -1 + while actual_reduction <= 0 and nfev < max_nfev: + if tr_solver == 'exact': + p_h, alpha, n_iter = solve_lsq_trust_region( + n, m, uf, s, V, Delta, initial_alpha=alpha) + elif tr_solver == 'lsmr': + p_S, _ = solve_trust_region_2d(B_S, g_S, Delta) + p_h = S.dot(p_S) + + p = d * p_h # Trust-region solution in the original space. + step, step_h, predicted_reduction = select_step( + x, J_h, diag_h, g_h, p, p_h, d, Delta, lb, ub, theta) + + x_new = make_strictly_feasible(x + step, lb, ub, rstep=0) + f_new = fun(x_new) + nfev += 1 + + step_h_norm = norm(step_h) + + if not np.all(np.isfinite(f_new)): + Delta = 0.25 * step_h_norm + continue + + # Usual trust-region step quality estimation. + if loss_function is not None: + cost_new = loss_function(f_new, cost_only=True) + else: + cost_new = 0.5 * np.dot(f_new, f_new) + actual_reduction = cost - cost_new + # Correction term is specific to the algorithm, + # vanishes in unbounded case. + correction = 0.5 * np.dot(step_h * diag_h, step_h) + + Delta_new, ratio = update_tr_radius( + Delta, actual_reduction - correction, predicted_reduction, + step_h_norm, step_h_norm > 0.95 * Delta + ) + alpha *= Delta / Delta_new + Delta = Delta_new + + step_norm = norm(step) + termination_status = check_termination( + actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol) + + if termination_status is not None: + break + + if actual_reduction > 0: + x = x_new + + f = f_new + f_true = f.copy() + + cost = cost_new + + J = jac(x, f) + njev += 1 + + if loss_function is not None: + rho = loss_function(f) + J, f = scale_for_robust_loss_function(J, f, rho) + + g = compute_grad(J, f) + + if jac_scale: + scale, scale_inv = compute_jac_scale(J, scale_inv) + else: + step_norm = 0 + actual_reduction = 0 + + iteration += 1 + + if termination_status is None: + termination_status = 0 + + active_mask = find_active_constraints(x, lb, ub, rtol=xtol) + return OptimizeResult( + x=x, cost=cost, fun=f_true, jac=J, grad=g, optimality=g_norm, + active_mask=active_mask, nfev=nfev, njev=njev, + status=termination_status) + + +def trf_no_bounds(fun, jac, x0, f0, J0, ftol, xtol, gtol, max_nfev, + x_scale, loss_function, tr_solver, tr_options, verbose): + x = x0.copy() + + f = f0 + f_true = f.copy() + nfev = 1 + + J = J0 + njev = 1 + m, n = J.shape + + if loss_function is not None: + rho = loss_function(f) + cost = 0.5 * np.sum(rho[0]) + J, f = scale_for_robust_loss_function(J, f, rho) + else: + cost = 0.5 * np.dot(f, f) + + g = compute_grad(J, f) + + jac_scale = isinstance(x_scale, string_types) and x_scale == 'jac' + if jac_scale: + scale, scale_inv = compute_jac_scale(J) + else: + scale, scale_inv = x_scale, 1 / x_scale + + Delta = norm(x0 * scale_inv) + if Delta == 0: + Delta = 1.0 + + if tr_solver == 'lsmr': + reg_term = 0 + damp = tr_options.pop('damp', 0.0) + regularize = tr_options.pop('regularize', True) + + if max_nfev is None: + max_nfev = x0.size * 100 + + alpha = 0.0 # "Levenberg-Marquardt" parameter + + termination_status = None + iteration = 0 + step_norm = None + actual_reduction = None + + if verbose == 2: + print_header_nonlinear() + + while True: + g_norm = norm(g, ord=np.inf) + if g_norm < gtol: + termination_status = 1 + + if verbose == 2: + print_iteration_nonlinear(iteration, nfev, cost, actual_reduction, + step_norm, g_norm) + + if termination_status is not None or nfev == max_nfev: + break + + d = scale + g_h = d * g + + if tr_solver == 'exact': + J_h = J * d + U, s, V = svd(J_h, full_matrices=False) + V = V.T + uf = U.T.dot(f) + elif tr_solver == 'lsmr': + J_h = right_multiplied_operator(J, d) + + if regularize: + a, b = build_quadratic_1d(J_h, g_h, -g_h) + to_tr = Delta / norm(g_h) + ag_value = minimize_quadratic_1d(a, b, 0, to_tr)[1] + reg_term = -ag_value / Delta**2 + + damp_full = (damp**2 + reg_term)**0.5 + gn_h = lsmr(J_h, f, damp=damp_full, **tr_options)[0] + S = np.vstack((g_h, gn_h)).T + S, _ = qr(S, mode='economic') + JS = J_h.dot(S) + B_S = np.dot(JS.T, JS) + g_S = S.T.dot(g_h) + + actual_reduction = -1 + while actual_reduction <= 0 and nfev < max_nfev: + if tr_solver == 'exact': + step_h, alpha, n_iter = solve_lsq_trust_region( + n, m, uf, s, V, Delta, initial_alpha=alpha) + elif tr_solver == 'lsmr': + p_S, _ = solve_trust_region_2d(B_S, g_S, Delta) + step_h = S.dot(p_S) + + predicted_reduction = -evaluate_quadratic(J_h, g_h, step_h) + step = d * step_h + x_new = x + step + f_new = fun(x_new) + nfev += 1 + + step_h_norm = norm(step_h) + + if not np.all(np.isfinite(f_new)): + Delta = 0.25 * step_h_norm + continue + + # Usual trust-region step quality estimation. + if loss_function is not None: + cost_new = loss_function(f_new, cost_only=True) + else: + cost_new = 0.5 * np.dot(f_new, f_new) + actual_reduction = cost - cost_new + + Delta_new, ratio = update_tr_radius( + Delta, actual_reduction, predicted_reduction, + step_h_norm, step_h_norm > 0.95 * Delta) + alpha *= Delta / Delta_new + Delta = Delta_new + + step_norm = norm(step) + termination_status = check_termination( + actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol) + + if termination_status is not None: + break + + if actual_reduction > 0: + x = x_new + + f = f_new + f_true = f.copy() + + cost = cost_new + + J = jac(x, f) + njev += 1 + + if loss_function is not None: + rho = loss_function(f) + J, f = scale_for_robust_loss_function(J, f, rho) + + g = compute_grad(J, f) + + if jac_scale: + scale, scale_inv = compute_jac_scale(J, scale_inv) + else: + step_norm = 0 + actual_reduction = 0 + + iteration += 1 + + if termination_status is None: + termination_status = 0 + + active_mask = np.zeros_like(x) + return OptimizeResult( + x=x, cost=cost, fun=f_true, jac=J, grad=g, optimality=g_norm, + active_mask=active_mask, nfev=nfev, njev=njev, + status=termination_status) diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/trf.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/trf.pyc new file mode 100644 index 0000000..1115737 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/trf.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/trf_linear.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/trf_linear.py new file mode 100644 index 0000000..849ef3a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/trf_linear.py @@ -0,0 +1,251 @@ +"""The adaptation of Trust Region Reflective algorithm for a linear +least-squares problem.""" +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.linalg import norm +from scipy.linalg import qr, solve_triangular +from scipy.sparse.linalg import lsmr +from scipy.optimize import OptimizeResult + +from .givens_elimination import givens_elimination +from .common import ( + EPS, step_size_to_bound, find_active_constraints, in_bounds, + make_strictly_feasible, build_quadratic_1d, evaluate_quadratic, + minimize_quadratic_1d, CL_scaling_vector, reflective_transformation, + print_header_linear, print_iteration_linear, compute_grad, + regularized_lsq_operator, right_multiplied_operator) + + +def regularized_lsq_with_qr(m, n, R, QTb, perm, diag, copy_R=True): + """Solve regularized least squares using information from QR-decomposition. + + The initial problem is to solve the following system in a least-squares + sense: + :: + + A x = b + D x = 0 + + Where D is diagonal matrix. The method is based on QR decomposition + of the form A P = Q R, where P is a column permutation matrix, Q is an + orthogonal matrix and R is an upper triangular matrix. + + Parameters + ---------- + m, n : int + Initial shape of A. + R : ndarray, shape (n, n) + Upper triangular matrix from QR decomposition of A. + QTb : ndarray, shape (n,) + First n components of Q^T b. + perm : ndarray, shape (n,) + Array defining column permutation of A, such that i-th column of + P is perm[i]-th column of identity matrix. + diag : ndarray, shape (n,) + Array containing diagonal elements of D. + + Returns + ------- + x : ndarray, shape (n,) + Found least-squares solution. + """ + if copy_R: + R = R.copy() + v = QTb.copy() + + givens_elimination(R, v, diag[perm]) + + abs_diag_R = np.abs(np.diag(R)) + threshold = EPS * max(m, n) * np.max(abs_diag_R) + nns, = np.nonzero(abs_diag_R > threshold) + + R = R[np.ix_(nns, nns)] + v = v[nns] + + x = np.zeros(n) + x[perm[nns]] = solve_triangular(R, v) + + return x + + +def backtracking(A, g, x, p, theta, p_dot_g, lb, ub): + """Find an appropriate step size using backtracking line search.""" + alpha = 1 + while True: + x_new, _ = reflective_transformation(x + alpha * p, lb, ub) + step = x_new - x + cost_change = -evaluate_quadratic(A, g, step) + if cost_change > -0.1 * alpha * p_dot_g: + break + alpha *= 0.5 + + active = find_active_constraints(x_new, lb, ub) + if np.any(active != 0): + x_new, _ = reflective_transformation(x + theta * alpha * p, lb, ub) + x_new = make_strictly_feasible(x_new, lb, ub, rstep=0) + step = x_new - x + cost_change = -evaluate_quadratic(A, g, step) + + return x, step, cost_change + + +def select_step(x, A_h, g_h, c_h, p, p_h, d, lb, ub, theta): + """Select the best step according to Trust Region Reflective algorithm.""" + if in_bounds(x + p, lb, ub): + return p + + p_stride, hits = step_size_to_bound(x, p, lb, ub) + r_h = np.copy(p_h) + r_h[hits.astype(bool)] *= -1 + r = d * r_h + + # Restrict step, such that it hits the bound. + p *= p_stride + p_h *= p_stride + x_on_bound = x + p + + # Find the step size along reflected direction. + r_stride_u, _ = step_size_to_bound(x_on_bound, r, lb, ub) + + # Stay interior. + r_stride_l = (1 - theta) * r_stride_u + r_stride_u *= theta + + if r_stride_u > 0: + a, b, c = build_quadratic_1d(A_h, g_h, r_h, s0=p_h, diag=c_h) + r_stride, r_value = minimize_quadratic_1d( + a, b, r_stride_l, r_stride_u, c=c) + r_h = p_h + r_h * r_stride + r = d * r_h + else: + r_value = np.inf + + # Now correct p_h to make it strictly interior. + p_h *= theta + p *= theta + p_value = evaluate_quadratic(A_h, g_h, p_h, diag=c_h) + + ag_h = -g_h + ag = d * ag_h + ag_stride_u, _ = step_size_to_bound(x, ag, lb, ub) + ag_stride_u *= theta + a, b = build_quadratic_1d(A_h, g_h, ag_h, diag=c_h) + ag_stride, ag_value = minimize_quadratic_1d(a, b, 0, ag_stride_u) + ag *= ag_stride + + if p_value < r_value and p_value < ag_value: + return p + elif r_value < p_value and r_value < ag_value: + return r + else: + return ag + + +def trf_linear(A, b, x_lsq, lb, ub, tol, lsq_solver, lsmr_tol, max_iter, + verbose): + m, n = A.shape + x, _ = reflective_transformation(x_lsq, lb, ub) + x = make_strictly_feasible(x, lb, ub, rstep=0.1) + + if lsq_solver == 'exact': + QT, R, perm = qr(A, mode='economic', pivoting=True) + QT = QT.T + + if m < n: + R = np.vstack((R, np.zeros((n - m, n)))) + + QTr = np.zeros(n) + k = min(m, n) + elif lsq_solver == 'lsmr': + r_aug = np.zeros(m + n) + auto_lsmr_tol = False + if lsmr_tol is None: + lsmr_tol = 1e-2 * tol + elif lsmr_tol == 'auto': + auto_lsmr_tol = True + + r = A.dot(x) - b + g = compute_grad(A, r) + cost = 0.5 * np.dot(r, r) + initial_cost = cost + + termination_status = None + step_norm = None + cost_change = None + + if max_iter is None: + max_iter = 100 + + if verbose == 2: + print_header_linear() + + for iteration in range(max_iter): + v, dv = CL_scaling_vector(x, g, lb, ub) + g_scaled = g * v + g_norm = norm(g_scaled, ord=np.inf) + if g_norm < tol: + termination_status = 1 + + if verbose == 2: + print_iteration_linear(iteration, cost, cost_change, + step_norm, g_norm) + + if termination_status is not None: + break + + diag_h = g * dv + diag_root_h = diag_h ** 0.5 + d = v ** 0.5 + g_h = d * g + + A_h = right_multiplied_operator(A, d) + if lsq_solver == 'exact': + QTr[:k] = QT.dot(r) + p_h = -regularized_lsq_with_qr(m, n, R * d[perm], QTr, perm, + diag_root_h, copy_R=False) + elif lsq_solver == 'lsmr': + lsmr_op = regularized_lsq_operator(A_h, diag_root_h) + r_aug[:m] = r + if auto_lsmr_tol: + eta = 1e-2 * min(0.5, g_norm) + lsmr_tol = max(EPS, min(0.1, eta * g_norm)) + p_h = -lsmr(lsmr_op, r_aug, atol=lsmr_tol, btol=lsmr_tol)[0] + + p = d * p_h + + p_dot_g = np.dot(p, g) + if p_dot_g > 0: + termination_status = -1 + + theta = 1 - min(0.005, g_norm) + step = select_step(x, A_h, g_h, diag_h, p, p_h, d, lb, ub, theta) + cost_change = -evaluate_quadratic(A, g, step) + + # Perhaps almost never executed, the idea is that `p` is descent + # direction thus we must find acceptable cost decrease using simple + # "backtracking", otherwise algorithm's logic would break. + if cost_change < 0: + x, step, cost_change = backtracking( + A, g, x, p, theta, p_dot_g, lb, ub) + else: + x = make_strictly_feasible(x + step, lb, ub, rstep=0) + + step_norm = norm(step) + r = A.dot(x) - b + g = compute_grad(A, r) + + if cost_change < tol * cost: + termination_status = 2 + + cost = 0.5 * np.dot(r, r) + + if termination_status is None: + termination_status = 0 + + active_mask = find_active_constraints(x, lb, ub, rtol=tol) + + return OptimizeResult( + x=x, fun=r, cost=cost, optimality=g_norm, active_mask=active_mask, + nit=iteration + 1, status=termination_status, + initial_cost=initial_cost) diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/trf_linear.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/trf_linear.pyc new file mode 100644 index 0000000..8af5ca0 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_lsq/trf_linear.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_minimize.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_minimize.py new file mode 100644 index 0000000..748a74e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_minimize.py @@ -0,0 +1,820 @@ +""" +Unified interfaces to minimization algorithms. + +Functions +--------- +- minimize : minimization of a function of several variables. +- minimize_scalar : minimization of a function of one variable. +""" +from __future__ import division, print_function, absolute_import + + +__all__ = ['minimize', 'minimize_scalar'] + + +from warnings import warn + +import numpy as np + +from scipy._lib.six import callable + +# unconstrained minimization +from .optimize import (_minimize_neldermead, _minimize_powell, _minimize_cg, + _minimize_bfgs, _minimize_newtoncg, + _minimize_scalar_brent, _minimize_scalar_bounded, + _minimize_scalar_golden, MemoizeJac) +from ._trustregion_dogleg import _minimize_dogleg +from ._trustregion_ncg import _minimize_trust_ncg +from ._trustregion_krylov import _minimize_trust_krylov +from ._trustregion_exact import _minimize_trustregion_exact +from ._trustregion_constr import _minimize_trustregion_constr + +# constrained minimization +from .lbfgsb import _minimize_lbfgsb +from .tnc import _minimize_tnc +from .cobyla import _minimize_cobyla +from .slsqp import _minimize_slsqp +from ._constraints import (old_bound_to_new, new_bounds_to_old, + old_constraint_to_new, new_constraint_to_old, + NonlinearConstraint, LinearConstraint, Bounds) + + +def minimize(fun, x0, args=(), method=None, jac=None, hess=None, + hessp=None, bounds=None, constraints=(), tol=None, + callback=None, options=None): + """Minimization of scalar function of one or more variables. + + Parameters + ---------- + fun : callable + The objective function to be minimized. + + ``fun(x, *args) -> float`` + + where x is an 1-D array with shape (n,) and `args` + is a tuple of the fixed parameters needed to completely + specify the function. + x0 : ndarray, shape (n,) + Initial guess. Array of real elements of size (n,), + where 'n' is the number of independent variables. + args : tuple, optional + Extra arguments passed to the objective function and its + derivatives (`fun`, `jac` and `hess` functions). + method : str or callable, optional + Type of solver. Should be one of + + - 'Nelder-Mead' :ref:`(see here) <optimize.minimize-neldermead>` + - 'Powell' :ref:`(see here) <optimize.minimize-powell>` + - 'CG' :ref:`(see here) <optimize.minimize-cg>` + - 'BFGS' :ref:`(see here) <optimize.minimize-bfgs>` + - 'Newton-CG' :ref:`(see here) <optimize.minimize-newtoncg>` + - 'L-BFGS-B' :ref:`(see here) <optimize.minimize-lbfgsb>` + - 'TNC' :ref:`(see here) <optimize.minimize-tnc>` + - 'COBYLA' :ref:`(see here) <optimize.minimize-cobyla>` + - 'SLSQP' :ref:`(see here) <optimize.minimize-slsqp>` + - 'trust-constr':ref:`(see here) <optimize.minimize-trustconstr>` + - 'dogleg' :ref:`(see here) <optimize.minimize-dogleg>` + - 'trust-ncg' :ref:`(see here) <optimize.minimize-trustncg>` + - 'trust-exact' :ref:`(see here) <optimize.minimize-trustexact>` + - 'trust-krylov' :ref:`(see here) <optimize.minimize-trustkrylov>` + - custom - a callable object (added in version 0.14.0), + see below for description. + + If not given, chosen to be one of ``BFGS``, ``L-BFGS-B``, ``SLSQP``, + depending if the problem has constraints or bounds. + jac : {callable, '2-point', '3-point', 'cs', bool}, optional + Method for computing the gradient vector. Only for CG, BFGS, + Newton-CG, L-BFGS-B, TNC, SLSQP, dogleg, trust-ncg, trust-krylov, + trust-exact and trust-constr. If it is a callable, it should be a + function that returns the gradient vector: + + ``jac(x, *args) -> array_like, shape (n,)`` + + where x is an array with shape (n,) and `args` is a tuple with + the fixed parameters. Alternatively, the keywords + {'2-point', '3-point', 'cs'} select a finite + difference scheme for numerical estimation of the gradient. Options + '3-point' and 'cs' are available only to 'trust-constr'. + If `jac` is a Boolean and is True, `fun` is assumed to return the + gradient along with the objective function. If False, the gradient + will be estimated using '2-point' finite difference estimation. + hess : {callable, '2-point', '3-point', 'cs', HessianUpdateStrategy}, optional + Method for computing the Hessian matrix. Only for Newton-CG, dogleg, + trust-ncg, trust-krylov, trust-exact and trust-constr. If it is + callable, it should return the Hessian matrix: + + ``hess(x, *args) -> {LinearOperator, spmatrix, array}, (n, n)`` + + where x is a (n,) ndarray and `args` is a tuple with the fixed + parameters. LinearOperator and sparse matrix returns are + allowed only for 'trust-constr' method. Alternatively, the keywords + {'2-point', '3-point', 'cs'} select a finite difference scheme + for numerical estimation. Or, objects implementing + `HessianUpdateStrategy` interface can be used to approximate + the Hessian. Available quasi-Newton methods implementing + this interface are: + + - `BFGS`; + - `SR1`. + + Whenever the gradient is estimated via finite-differences, + the Hessian cannot be estimated with options + {'2-point', '3-point', 'cs'} and needs to be + estimated using one of the quasi-Newton strategies. + Finite-difference options {'2-point', '3-point', 'cs'} and + `HessianUpdateStrategy` are available only for 'trust-constr' method. + hessp : callable, optional + Hessian of objective function times an arbitrary vector p. Only for + Newton-CG, trust-ncg, trust-krylov, trust-constr. + Only one of `hessp` or `hess` needs to be given. If `hess` is + provided, then `hessp` will be ignored. `hessp` must compute the + Hessian times an arbitrary vector: + + ``hessp(x, p, *args) -> ndarray shape (n,)`` + + where x is a (n,) ndarray, p is an arbitrary vector with + dimension (n,) and `args` is a tuple with the fixed + parameters. + bounds : sequence or `Bounds`, optional + Bounds on variables for L-BFGS-B, TNC, SLSQP and + trust-constr methods. There are two ways to specify the bounds: + + 1. Instance of `Bounds` class. + 2. Sequence of ``(min, max)`` pairs for each element in `x`. None + is used to specify no bound. + + constraints : {Constraint, dict} or List of {Constraint, dict}, optional + Constraints definition (only for COBYLA, SLSQP and trust-constr). + Constraints for 'trust-constr' are defined as a single object or a + list of objects specifying constraints to the optimization problem. + Available constraints are: + + - `LinearConstraint` + - `NonlinearConstraint` + + Constraints for COBYLA, SLSQP are defined as a list of dictionaries. + Each dictionary with fields: + + type : str + Constraint type: 'eq' for equality, 'ineq' for inequality. + fun : callable + The function defining the constraint. + jac : callable, optional + The Jacobian of `fun` (only for SLSQP). + args : sequence, optional + Extra arguments to be passed to the function and Jacobian. + + Equality constraint means that the constraint function result is to + be zero whereas inequality means that it is to be non-negative. + Note that COBYLA only supports inequality constraints. + tol : float, optional + Tolerance for termination. For detailed control, use solver-specific + options. + options : dict, optional + A dictionary of solver options. All methods accept the following + generic options: + + maxiter : int + Maximum number of iterations to perform. + disp : bool + Set to True to print convergence messages. + + For method-specific options, see :func:`show_options()`. + callback : callable, optional + Called after each iteration. For 'trust-constr' it is a callable with + the signature: + + ``callback(xk, OptimizeResult state) -> bool`` + + where ``xk`` is the current parameter vector. and ``state`` + is an `OptimizeResult` object, with the same fields + as the ones from the return. If callback returns True + the algorithm execution is terminated. + For all the other methods, the signature is: + + ``callback(xk)`` + + where ``xk`` is the current parameter vector. + + Returns + ------- + res : OptimizeResult + The optimization result represented as a ``OptimizeResult`` object. + Important attributes are: ``x`` the solution array, ``success`` a + Boolean flag indicating if the optimizer exited successfully and + ``message`` which describes the cause of the termination. See + `OptimizeResult` for a description of other attributes. + + + See also + -------- + minimize_scalar : Interface to minimization algorithms for scalar + univariate functions + show_options : Additional options accepted by the solvers + + Notes + ----- + This section describes the available solvers that can be selected by the + 'method' parameter. The default method is *BFGS*. + + **Unconstrained minimization** + + Method :ref:`Nelder-Mead <optimize.minimize-neldermead>` uses the + Simplex algorithm [1]_, [2]_. This algorithm is robust in many + applications. However, if numerical computation of derivative can be + trusted, other algorithms using the first and/or second derivatives + information might be preferred for their better performance in + general. + + Method :ref:`Powell <optimize.minimize-powell>` is a modification + of Powell's method [3]_, [4]_ which is a conjugate direction + method. It performs sequential one-dimensional minimizations along + each vector of the directions set (`direc` field in `options` and + `info`), which is updated at each iteration of the main + minimization loop. The function need not be differentiable, and no + derivatives are taken. + + Method :ref:`CG <optimize.minimize-cg>` uses a nonlinear conjugate + gradient algorithm by Polak and Ribiere, a variant of the + Fletcher-Reeves method described in [5]_ pp. 120-122. Only the + first derivatives are used. + + Method :ref:`BFGS <optimize.minimize-bfgs>` uses the quasi-Newton + method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) [5]_ + pp. 136. It uses the first derivatives only. BFGS has proven good + performance even for non-smooth optimizations. This method also + returns an approximation of the Hessian inverse, stored as + `hess_inv` in the OptimizeResult object. + + Method :ref:`Newton-CG <optimize.minimize-newtoncg>` uses a + Newton-CG algorithm [5]_ pp. 168 (also known as the truncated + Newton method). It uses a CG method to the compute the search + direction. See also *TNC* method for a box-constrained + minimization with a similar algorithm. Suitable for large-scale + problems. + + Method :ref:`dogleg <optimize.minimize-dogleg>` uses the dog-leg + trust-region algorithm [5]_ for unconstrained minimization. This + algorithm requires the gradient and Hessian; furthermore the + Hessian is required to be positive definite. + + Method :ref:`trust-ncg <optimize.minimize-trustncg>` uses the + Newton conjugate gradient trust-region algorithm [5]_ for + unconstrained minimization. This algorithm requires the gradient + and either the Hessian or a function that computes the product of + the Hessian with a given vector. Suitable for large-scale problems. + + Method :ref:`trust-krylov <optimize.minimize-trustkrylov>` uses + the Newton GLTR trust-region algorithm [14]_, [15]_ for unconstrained + minimization. This algorithm requires the gradient + and either the Hessian or a function that computes the product of + the Hessian with a given vector. Suitable for large-scale problems. + On indefinite problems it requires usually less iterations than the + `trust-ncg` method and is recommended for medium and large-scale problems. + + Method :ref:`trust-exact <optimize.minimize-trustexact>` + is a trust-region method for unconstrained minimization in which + quadratic subproblems are solved almost exactly [13]_. This + algorithm requires the gradient and the Hessian (which is + *not* required to be positive definite). It is, in many + situations, the Newton method to converge in fewer iteraction + and the most recommended for small and medium-size problems. + + **Bound-Constrained minimization** + + Method :ref:`L-BFGS-B <optimize.minimize-lbfgsb>` uses the L-BFGS-B + algorithm [6]_, [7]_ for bound constrained minimization. + + Method :ref:`TNC <optimize.minimize-tnc>` uses a truncated Newton + algorithm [5]_, [8]_ to minimize a function with variables subject + to bounds. This algorithm uses gradient information; it is also + called Newton Conjugate-Gradient. It differs from the *Newton-CG* + method described above as it wraps a C implementation and allows + each variable to be given upper and lower bounds. + + **Constrained Minimization** + + Method :ref:`COBYLA <optimize.minimize-cobyla>` uses the + Constrained Optimization BY Linear Approximation (COBYLA) method + [9]_, [10]_, [11]_. The algorithm is based on linear + approximations to the objective function and each constraint. The + method wraps a FORTRAN implementation of the algorithm. The + constraints functions 'fun' may return either a single number + or an array or list of numbers. + + Method :ref:`SLSQP <optimize.minimize-slsqp>` uses Sequential + Least SQuares Programming to minimize a function of several + variables with any combination of bounds, equality and inequality + constraints. The method wraps the SLSQP Optimization subroutine + originally implemented by Dieter Kraft [12]_. Note that the + wrapper handles infinite values in bounds by converting them into + large floating values. + + Method :ref:`trust-constr <optimize.minimize-trustconstr>` is a + trust-region algorithm for constrained optimization. It swiches + between two implementations depending on the problem definition. + It is the most versatile constrained minimization algorithm + implemented in SciPy and the most appropriate for large-scale problems. + For equality constrained problems it is an implementation of Byrd-Omojokun + Trust-Region SQP method described in [17]_ and in [5]_, p. 549. When + inequality constraints are imposed as well, it swiches to the trust-region + interior point method described in [16]_. This interior point algorithm, + in turn, solves inequality constraints by introducing slack variables + and solving a sequence of equality-constrained barrier problems + for progressively smaller values of the barrier parameter. + The previously described equality constrained SQP method is + used to solve the subproblems with increasing levels of accuracy + as the iterate gets closer to a solution. + + **Finite-Difference Options** + + For Method :ref:`trust-constr <optimize.minimize-trustconstr>` + the gradient and the Hessian may be approximated using + three finite-difference schemes: {'2-point', '3-point', 'cs'}. + The scheme 'cs' is, potentially, the most accurate but it + requires the function to correctly handles complex inputs and to + be differentiable in the complex plane. The scheme '3-point' is more + accurate than '2-point' but requires twice as much operations. + + **Custom minimizers** + + It may be useful to pass a custom minimization method, for example + when using a frontend to this method such as `scipy.optimize.basinhopping` + or a different library. You can simply pass a callable as the ``method`` + parameter. + + The callable is called as ``method(fun, x0, args, **kwargs, **options)`` + where ``kwargs`` corresponds to any other parameters passed to `minimize` + (such as `callback`, `hess`, etc.), except the `options` dict, which has + its contents also passed as `method` parameters pair by pair. Also, if + `jac` has been passed as a bool type, `jac` and `fun` are mangled so that + `fun` returns just the function values and `jac` is converted to a function + returning the Jacobian. The method shall return an ``OptimizeResult`` + object. + + The provided `method` callable must be able to accept (and possibly ignore) + arbitrary parameters; the set of parameters accepted by `minimize` may + expand in future versions and then these parameters will be passed to + the method. You can find an example in the scipy.optimize tutorial. + + .. versionadded:: 0.11.0 + + References + ---------- + .. [1] Nelder, J A, and R Mead. 1965. A Simplex Method for Function + Minimization. The Computer Journal 7: 308-13. + .. [2] Wright M H. 1996. Direct search methods: Once scorned, now + respectable, in Numerical Analysis 1995: Proceedings of the 1995 + Dundee Biennial Conference in Numerical Analysis (Eds. D F + Griffiths and G A Watson). Addison Wesley Longman, Harlow, UK. + 191-208. + .. [3] Powell, M J D. 1964. An efficient method for finding the minimum of + a function of several variables without calculating derivatives. The + Computer Journal 7: 155-162. + .. [4] Press W, S A Teukolsky, W T Vetterling and B P Flannery. + Numerical Recipes (any edition), Cambridge University Press. + .. [5] Nocedal, J, and S J Wright. 2006. Numerical Optimization. + Springer New York. + .. [6] Byrd, R H and P Lu and J. Nocedal. 1995. A Limited Memory + Algorithm for Bound Constrained Optimization. SIAM Journal on + Scientific and Statistical Computing 16 (5): 1190-1208. + .. [7] Zhu, C and R H Byrd and J Nocedal. 1997. L-BFGS-B: Algorithm + 778: L-BFGS-B, FORTRAN routines for large scale bound constrained + optimization. ACM Transactions on Mathematical Software 23 (4): + 550-560. + .. [8] Nash, S G. Newton-Type Minimization Via the Lanczos Method. + 1984. SIAM Journal of Numerical Analysis 21: 770-778. + .. [9] Powell, M J D. A direct search optimization method that models + the objective and constraint functions by linear interpolation. + 1994. Advances in Optimization and Numerical Analysis, eds. S. Gomez + and J-P Hennart, Kluwer Academic (Dordrecht), 51-67. + .. [10] Powell M J D. Direct search algorithms for optimization + calculations. 1998. Acta Numerica 7: 287-336. + .. [11] Powell M J D. A view of algorithms for optimization without + derivatives. 2007.Cambridge University Technical Report DAMTP + 2007/NA03 + .. [12] Kraft, D. A software package for sequential quadratic + programming. 1988. Tech. Rep. DFVLR-FB 88-28, DLR German Aerospace + Center -- Institute for Flight Mechanics, Koln, Germany. + .. [13] Conn, A. R., Gould, N. I., and Toint, P. L. + Trust region methods. 2000. Siam. pp. 169-200. + .. [14] F. Lenders, C. Kirches, A. Potschka: "trlib: A vector-free + implementation of the GLTR method for iterative solution of + the trust region problem", https://arxiv.org/abs/1611.04718 + .. [15] N. Gould, S. Lucidi, M. Roma, P. Toint: "Solving the + Trust-Region Subproblem using the Lanczos Method", + SIAM J. Optim., 9(2), 504--525, (1999). + .. [16] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal. 1999. + An interior point algorithm for large-scale nonlinear programming. + SIAM Journal on Optimization 9.4: 877-900. + .. [17] Lalee, Marucha, Jorge Nocedal, and Todd Plantega. 1998. On the + implementation of an algorithm for large-scale equality constrained + optimization. SIAM Journal on Optimization 8.3: 682-706. + + Examples + -------- + Let us consider the problem of minimizing the Rosenbrock function. This + function (and its respective derivatives) is implemented in `rosen` + (resp. `rosen_der`, `rosen_hess`) in the `scipy.optimize`. + + >>> from scipy.optimize import minimize, rosen, rosen_der + + A simple application of the *Nelder-Mead* method is: + + >>> x0 = [1.3, 0.7, 0.8, 1.9, 1.2] + >>> res = minimize(rosen, x0, method='Nelder-Mead', tol=1e-6) + >>> res.x + array([ 1., 1., 1., 1., 1.]) + + Now using the *BFGS* algorithm, using the first derivative and a few + options: + + >>> res = minimize(rosen, x0, method='BFGS', jac=rosen_der, + ... options={'gtol': 1e-6, 'disp': True}) + Optimization terminated successfully. + Current function value: 0.000000 + Iterations: 26 + Function evaluations: 31 + Gradient evaluations: 31 + >>> res.x + array([ 1., 1., 1., 1., 1.]) + >>> print(res.message) + Optimization terminated successfully. + >>> res.hess_inv + array([[ 0.00749589, 0.01255155, 0.02396251, 0.04750988, 0.09495377], # may vary + [ 0.01255155, 0.02510441, 0.04794055, 0.09502834, 0.18996269], + [ 0.02396251, 0.04794055, 0.09631614, 0.19092151, 0.38165151], + [ 0.04750988, 0.09502834, 0.19092151, 0.38341252, 0.7664427 ], + [ 0.09495377, 0.18996269, 0.38165151, 0.7664427, 1.53713523]]) + + + Next, consider a minimization problem with several constraints (namely + Example 16.4 from [5]_). The objective function is: + + >>> fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2 + + There are three constraints defined as: + + >>> cons = ({'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2}, + ... {'type': 'ineq', 'fun': lambda x: -x[0] - 2 * x[1] + 6}, + ... {'type': 'ineq', 'fun': lambda x: -x[0] + 2 * x[1] + 2}) + + And variables must be positive, hence the following bounds: + + >>> bnds = ((0, None), (0, None)) + + The optimization problem is solved using the SLSQP method as: + + >>> res = minimize(fun, (2, 0), method='SLSQP', bounds=bnds, + ... constraints=cons) + + It should converge to the theoretical solution (1.4 ,1.7). + + """ + x0 = np.asarray(x0) + if x0.dtype.kind in np.typecodes["AllInteger"]: + x0 = np.asarray(x0, dtype=float) + + if not isinstance(args, tuple): + args = (args,) + + if method is None: + # Select automatically + if constraints: + method = 'SLSQP' + elif bounds is not None: + method = 'L-BFGS-B' + else: + method = 'BFGS' + + if callable(method): + meth = "_custom" + else: + meth = method.lower() + + if options is None: + options = {} + # check if optional parameters are supported by the selected method + # - jac + if meth in ('nelder-mead', 'powell', 'cobyla') and bool(jac): + warn('Method %s does not use gradient information (jac).' % method, + RuntimeWarning) + # - hess + if meth not in ('newton-cg', 'dogleg', 'trust-ncg', 'trust-constr', + 'trust-krylov', 'trust-exact', '_custom') and hess is not None: + warn('Method %s does not use Hessian information (hess).' % method, + RuntimeWarning) + # - hessp + if meth not in ('newton-cg', 'dogleg', 'trust-ncg', 'trust-constr', + 'trust-krylov', '_custom') \ + and hessp is not None: + warn('Method %s does not use Hessian-vector product ' + 'information (hessp).' % method, RuntimeWarning) + # - constraints or bounds + if (meth in ('nelder-mead', 'powell', 'cg', 'bfgs', 'newton-cg', 'dogleg', + 'trust-ncg') and (bounds is not None or np.any(constraints))): + warn('Method %s cannot handle constraints nor bounds.' % method, + RuntimeWarning) + if meth in ('l-bfgs-b', 'tnc') and np.any(constraints): + warn('Method %s cannot handle constraints.' % method, + RuntimeWarning) + if meth == 'cobyla' and bounds is not None: + warn('Method %s cannot handle bounds.' % method, + RuntimeWarning) + # - callback + if (meth in ('cobyla',) and callback is not None): + warn('Method %s does not support callback.' % method, RuntimeWarning) + # - return_all + if (meth in ('l-bfgs-b', 'tnc', 'cobyla', 'slsqp') and + options.get('return_all', False)): + warn('Method %s does not support the return_all option.' % method, + RuntimeWarning) + + # check gradient vector + if meth == 'trust-constr': + if type(jac) is bool: + if jac: + fun = MemoizeJac(fun) + jac = fun.derivative + else: + jac = '2-point' + elif jac is None: + jac = '2-point' + elif not callable(jac) and jac not in ('2-point', '3-point', 'cs'): + raise ValueError("Unsupported jac definition.") + else: + if jac in ('2-point', '3-point', 'cs'): + if jac in ('3-point', 'cs'): + warn("Only 'trust-constr' method accept %s " + "options for 'jac'. Using '2-point' instead." % jac) + jac = None + elif not callable(jac): + if bool(jac): + fun = MemoizeJac(fun) + jac = fun.derivative + else: + jac = None + + # set default tolerances + if tol is not None: + options = dict(options) + if meth == 'nelder-mead': + options.setdefault('xatol', tol) + options.setdefault('fatol', tol) + if meth in ('newton-cg', 'powell', 'tnc'): + options.setdefault('xtol', tol) + if meth in ('powell', 'l-bfgs-b', 'tnc', 'slsqp'): + options.setdefault('ftol', tol) + if meth in ('bfgs', 'cg', 'l-bfgs-b', 'tnc', 'dogleg', + 'trust-ncg', 'trust-exact', 'trust-krylov'): + options.setdefault('gtol', tol) + if meth in ('cobyla', '_custom'): + options.setdefault('tol', tol) + if meth == 'trust-constr': + options.setdefault('xtol', tol) + options.setdefault('gtol', tol) + options.setdefault('barrier_tol', tol) + + if bounds is not None: + bounds = standardize_bounds(bounds, x0, meth) + + if constraints is not None: + constraints = standardize_constraints(constraints, x0, meth) + + if meth == '_custom': + return method(fun, x0, args=args, jac=jac, hess=hess, hessp=hessp, + bounds=bounds, constraints=constraints, + callback=callback, **options) + elif meth == 'nelder-mead': + return _minimize_neldermead(fun, x0, args, callback, **options) + elif meth == 'powell': + return _minimize_powell(fun, x0, args, callback, **options) + elif meth == 'cg': + return _minimize_cg(fun, x0, args, jac, callback, **options) + elif meth == 'bfgs': + return _minimize_bfgs(fun, x0, args, jac, callback, **options) + elif meth == 'newton-cg': + return _minimize_newtoncg(fun, x0, args, jac, hess, hessp, callback, + **options) + elif meth == 'l-bfgs-b': + return _minimize_lbfgsb(fun, x0, args, jac, bounds, + callback=callback, **options) + elif meth == 'tnc': + return _minimize_tnc(fun, x0, args, jac, bounds, callback=callback, + **options) + elif meth == 'cobyla': + return _minimize_cobyla(fun, x0, args, constraints, **options) + elif meth == 'slsqp': + return _minimize_slsqp(fun, x0, args, jac, bounds, + constraints, callback=callback, **options) + elif meth == 'trust-constr': + return _minimize_trustregion_constr(fun, x0, args, jac, hess, hessp, + bounds, constraints, + callback=callback, **options) + elif meth == 'dogleg': + return _minimize_dogleg(fun, x0, args, jac, hess, + callback=callback, **options) + elif meth == 'trust-ncg': + return _minimize_trust_ncg(fun, x0, args, jac, hess, hessp, + callback=callback, **options) + elif meth == 'trust-krylov': + return _minimize_trust_krylov(fun, x0, args, jac, hess, hessp, + callback=callback, **options) + elif meth == 'trust-exact': + return _minimize_trustregion_exact(fun, x0, args, jac, hess, + callback=callback, **options) + else: + raise ValueError('Unknown solver %s' % method) + + +def minimize_scalar(fun, bracket=None, bounds=None, args=(), + method='brent', tol=None, options=None): + """Minimization of scalar function of one variable. + + Parameters + ---------- + fun : callable + Objective function. + Scalar function, must return a scalar. + bracket : sequence, optional + For methods 'brent' and 'golden', `bracket` defines the bracketing + interval and can either have three items ``(a, b, c)`` so that + ``a < b < c`` and ``fun(b) < fun(a), fun(c)`` or two items ``a`` and + ``c`` which are assumed to be a starting interval for a downhill + bracket search (see `bracket`); it doesn't always mean that the + obtained solution will satisfy ``a <= x <= c``. + bounds : sequence, optional + For method 'bounded', `bounds` is mandatory and must have two items + corresponding to the optimization bounds. + args : tuple, optional + Extra arguments passed to the objective function. + method : str or callable, optional + Type of solver. Should be one of: + + - 'Brent' :ref:`(see here) <optimize.minimize_scalar-brent>` + - 'Bounded' :ref:`(see here) <optimize.minimize_scalar-bounded>` + - 'Golden' :ref:`(see here) <optimize.minimize_scalar-golden>` + - custom - a callable object (added in version 0.14.0), see below + + tol : float, optional + Tolerance for termination. For detailed control, use solver-specific + options. + options : dict, optional + A dictionary of solver options. + + maxiter : int + Maximum number of iterations to perform. + disp : bool + Set to True to print convergence messages. + + See :func:`show_options()` for solver-specific options. + + Returns + ------- + res : OptimizeResult + The optimization result represented as a ``OptimizeResult`` object. + Important attributes are: ``x`` the solution array, ``success`` a + Boolean flag indicating if the optimizer exited successfully and + ``message`` which describes the cause of the termination. See + `OptimizeResult` for a description of other attributes. + + See also + -------- + minimize : Interface to minimization algorithms for scalar multivariate + functions + show_options : Additional options accepted by the solvers + + Notes + ----- + This section describes the available solvers that can be selected by the + 'method' parameter. The default method is *Brent*. + + Method :ref:`Brent <optimize.minimize_scalar-brent>` uses Brent's + algorithm to find a local minimum. The algorithm uses inverse + parabolic interpolation when possible to speed up convergence of + the golden section method. + + Method :ref:`Golden <optimize.minimize_scalar-golden>` uses the + golden section search technique. It uses analog of the bisection + method to decrease the bracketed interval. It is usually + preferable to use the *Brent* method. + + Method :ref:`Bounded <optimize.minimize_scalar-bounded>` can + perform bounded minimization. It uses the Brent method to find a + local minimum in the interval x1 < xopt < x2. + + **Custom minimizers** + + It may be useful to pass a custom minimization method, for example + when using some library frontend to minimize_scalar. You can simply + pass a callable as the ``method`` parameter. + + The callable is called as ``method(fun, args, **kwargs, **options)`` + where ``kwargs`` corresponds to any other parameters passed to `minimize` + (such as `bracket`, `tol`, etc.), except the `options` dict, which has + its contents also passed as `method` parameters pair by pair. The method + shall return an ``OptimizeResult`` object. + + The provided `method` callable must be able to accept (and possibly ignore) + arbitrary parameters; the set of parameters accepted by `minimize` may + expand in future versions and then these parameters will be passed to + the method. You can find an example in the scipy.optimize tutorial. + + .. versionadded:: 0.11.0 + + Examples + -------- + Consider the problem of minimizing the following function. + + >>> def f(x): + ... return (x - 2) * x * (x + 2)**2 + + Using the *Brent* method, we find the local minimum as: + + >>> from scipy.optimize import minimize_scalar + >>> res = minimize_scalar(f) + >>> res.x + 1.28077640403 + + Using the *Bounded* method, we find a local minimum with specified + bounds as: + + >>> res = minimize_scalar(f, bounds=(-3, -1), method='bounded') + >>> res.x + -2.0000002026 + + """ + if not isinstance(args, tuple): + args = (args,) + + if callable(method): + meth = "_custom" + else: + meth = method.lower() + if options is None: + options = {} + + if tol is not None: + options = dict(options) + if meth == 'bounded' and 'xatol' not in options: + warn("Method 'bounded' does not support relative tolerance in x; " + "defaulting to absolute tolerance.", RuntimeWarning) + options['xatol'] = tol + elif meth == '_custom': + options.setdefault('tol', tol) + else: + options.setdefault('xtol', tol) + + if meth == '_custom': + return method(fun, args=args, bracket=bracket, bounds=bounds, **options) + elif meth == 'brent': + return _minimize_scalar_brent(fun, bracket, args, **options) + elif meth == 'bounded': + if bounds is None: + raise ValueError('The `bounds` parameter is mandatory for ' + 'method `bounded`.') + # replace boolean "disp" option, if specified, by an integer value, as + # expected by _minimize_scalar_bounded() + disp = options.get('disp') + if isinstance(disp, bool): + options['disp'] = 2 * int(disp) + return _minimize_scalar_bounded(fun, bounds, args, **options) + elif meth == 'golden': + return _minimize_scalar_golden(fun, bracket, args, **options) + else: + raise ValueError('Unknown solver %s' % method) + + +def standardize_bounds(bounds, x0, meth): + """Converts bounds to the form required by the solver.""" + if meth == 'trust-constr': + if not isinstance(bounds, Bounds): + lb, ub = old_bound_to_new(bounds) + bounds = Bounds(lb, ub) + elif meth in ('l-bfgs-b', 'tnc', 'slsqp'): + if isinstance(bounds, Bounds): + bounds = new_bounds_to_old(bounds.lb, bounds.ub, x0.shape[0]) + return bounds + + +def standardize_constraints(constraints, x0, meth): + """Converts constraints to the form required by the solver.""" + all_constraint_types = (NonlinearConstraint, LinearConstraint, dict) + new_constraint_types = all_constraint_types[:-1] + if isinstance(constraints, all_constraint_types): + constraints = [constraints] + constraints = list(constraints) # ensure it's a mutable sequence + + if meth == 'trust-constr': + for i, con in enumerate(constraints): + if not isinstance(con, new_constraint_types): + constraints[i] = old_constraint_to_new(i, con) + else: + # iterate over copy, changing original + for i, con in enumerate(list(constraints)): + if isinstance(con, new_constraint_types): + old_constraints = new_constraint_to_old(con, x0) + constraints[i] = old_constraints[0] + constraints.extend(old_constraints[1:]) # appends 1 if present + + return constraints diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_minimize.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_minimize.pyc new file mode 100644 index 0000000..4516bac Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_minimize.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_minpack.so b/project/venv/lib/python2.7/site-packages/scipy/optimize/_minpack.so new file mode 100755 index 0000000..cb54dbc Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_minpack.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_nnls.so b/project/venv/lib/python2.7/site-packages/scipy/optimize/_nnls.so new file mode 100755 index 0000000..f1013f5 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_nnls.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_numdiff.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_numdiff.py new file mode 100644 index 0000000..01a3157 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_numdiff.py @@ -0,0 +1,639 @@ +"""Routines for numerical differentiation.""" + +from __future__ import division + +import numpy as np +from numpy.linalg import norm + +from scipy.sparse.linalg import LinearOperator +from ..sparse import issparse, csc_matrix, csr_matrix, coo_matrix, find +from ._group_columns import group_dense, group_sparse + +EPS = np.finfo(np.float64).eps + + +def _adjust_scheme_to_bounds(x0, h, num_steps, scheme, lb, ub): + """Adjust final difference scheme to the presence of bounds. + + Parameters + ---------- + x0 : ndarray, shape (n,) + Point at which we wish to estimate derivative. + h : ndarray, shape (n,) + Desired finite difference steps. + num_steps : int + Number of `h` steps in one direction required to implement finite + difference scheme. For example, 2 means that we need to evaluate + f(x0 + 2 * h) or f(x0 - 2 * h) + scheme : {'1-sided', '2-sided'} + Whether steps in one or both directions are required. In other + words '1-sided' applies to forward and backward schemes, '2-sided' + applies to center schemes. + lb : ndarray, shape (n,) + Lower bounds on independent variables. + ub : ndarray, shape (n,) + Upper bounds on independent variables. + + Returns + ------- + h_adjusted : ndarray, shape (n,) + Adjusted step sizes. Step size decreases only if a sign flip or + switching to one-sided scheme doesn't allow to take a full step. + use_one_sided : ndarray of bool, shape (n,) + Whether to switch to one-sided scheme. Informative only for + ``scheme='2-sided'``. + """ + if scheme == '1-sided': + use_one_sided = np.ones_like(h, dtype=bool) + elif scheme == '2-sided': + h = np.abs(h) + use_one_sided = np.zeros_like(h, dtype=bool) + else: + raise ValueError("`scheme` must be '1-sided' or '2-sided'.") + + if np.all((lb == -np.inf) & (ub == np.inf)): + return h, use_one_sided + + h_total = h * num_steps + h_adjusted = h.copy() + + lower_dist = x0 - lb + upper_dist = ub - x0 + + if scheme == '1-sided': + x = x0 + h_total + violated = (x < lb) | (x > ub) + fitting = np.abs(h_total) <= np.maximum(lower_dist, upper_dist) + h_adjusted[violated & fitting] *= -1 + + forward = (upper_dist >= lower_dist) & ~fitting + h_adjusted[forward] = upper_dist[forward] / num_steps + backward = (upper_dist < lower_dist) & ~fitting + h_adjusted[backward] = -lower_dist[backward] / num_steps + elif scheme == '2-sided': + central = (lower_dist >= h_total) & (upper_dist >= h_total) + + forward = (upper_dist >= lower_dist) & ~central + h_adjusted[forward] = np.minimum( + h[forward], 0.5 * upper_dist[forward] / num_steps) + use_one_sided[forward] = True + + backward = (upper_dist < lower_dist) & ~central + h_adjusted[backward] = -np.minimum( + h[backward], 0.5 * lower_dist[backward] / num_steps) + use_one_sided[backward] = True + + min_dist = np.minimum(upper_dist, lower_dist) / num_steps + adjusted_central = (~central & (np.abs(h_adjusted) <= min_dist)) + h_adjusted[adjusted_central] = min_dist[adjusted_central] + use_one_sided[adjusted_central] = False + + return h_adjusted, use_one_sided + + +relative_step = {"2-point": EPS**0.5, + "3-point": EPS**(1/3), + "cs": EPS**0.5} + + +def _compute_absolute_step(rel_step, x0, method): + if rel_step is None: + rel_step = relative_step[method] + sign_x0 = (x0 >= 0).astype(float) * 2 - 1 + return rel_step * sign_x0 * np.maximum(1.0, np.abs(x0)) + + +def _prepare_bounds(bounds, x0): + lb, ub = [np.asarray(b, dtype=float) for b in bounds] + if lb.ndim == 0: + lb = np.resize(lb, x0.shape) + + if ub.ndim == 0: + ub = np.resize(ub, x0.shape) + + return lb, ub + + +def group_columns(A, order=0): + """Group columns of a 2-d matrix for sparse finite differencing [1]_. + + Two columns are in the same group if in each row at least one of them + has zero. A greedy sequential algorithm is used to construct groups. + + Parameters + ---------- + A : array_like or sparse matrix, shape (m, n) + Matrix of which to group columns. + order : int, iterable of int with shape (n,) or None + Permutation array which defines the order of columns enumeration. + If int or None, a random permutation is used with `order` used as + a random seed. Default is 0, that is use a random permutation but + guarantee repeatability. + + Returns + ------- + groups : ndarray of int, shape (n,) + Contains values from 0 to n_groups-1, where n_groups is the number + of found groups. Each value ``groups[i]`` is an index of a group to + which i-th column assigned. The procedure was helpful only if + n_groups is significantly less than n. + + References + ---------- + .. [1] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of + sparse Jacobian matrices", Journal of the Institute of Mathematics + and its Applications, 13 (1974), pp. 117-120. + """ + if issparse(A): + A = csc_matrix(A) + else: + A = np.atleast_2d(A) + A = (A != 0).astype(np.int32) + + if A.ndim != 2: + raise ValueError("`A` must be 2-dimensional.") + + m, n = A.shape + + if order is None or np.isscalar(order): + rng = np.random.RandomState(order) + order = rng.permutation(n) + else: + order = np.asarray(order) + if order.shape != (n,): + raise ValueError("`order` has incorrect shape.") + + A = A[:, order] + + if issparse(A): + groups = group_sparse(m, n, A.indices, A.indptr) + else: + groups = group_dense(m, n, A) + + groups[order] = groups.copy() + + return groups + + +def approx_derivative(fun, x0, method='3-point', rel_step=None, f0=None, + bounds=(-np.inf, np.inf), sparsity=None, + as_linear_operator=False, args=(), kwargs={}): + """Compute finite difference approximation of the derivatives of a + vector-valued function. + + If a function maps from R^n to R^m, its derivatives form m-by-n matrix + called the Jacobian, where an element (i, j) is a partial derivative of + f[i] with respect to x[j]. + + Parameters + ---------- + fun : callable + Function of which to estimate the derivatives. The argument x + passed to this function is ndarray of shape (n,) (never a scalar + even if n=1). It must return 1-d array_like of shape (m,) or a scalar. + x0 : array_like of shape (n,) or float + Point at which to estimate the derivatives. Float will be converted + to a 1-d array. + method : {'3-point', '2-point', 'cs'}, optional + Finite difference method to use: + - '2-point' - use the first order accuracy forward or backward + difference. + - '3-point' - use central difference in interior points and the + second order accuracy forward or backward difference + near the boundary. + - 'cs' - use a complex-step finite difference scheme. This assumes + that the user function is real-valued and can be + analytically continued to the complex plane. Otherwise, + produces bogus results. + rel_step : None or array_like, optional + Relative step size to use. The absolute step size is computed as + ``h = rel_step * sign(x0) * max(1, abs(x0))``, possibly adjusted to + fit into the bounds. For ``method='3-point'`` the sign of `h` is + ignored. If None (default) then step is selected automatically, + see Notes. + f0 : None or array_like, optional + If not None it is assumed to be equal to ``fun(x0)``, in this case + the ``fun(x0)`` is not called. Default is None. + bounds : tuple of array_like, optional + Lower and upper bounds on independent variables. Defaults to no bounds. + Each bound must match the size of `x0` or be a scalar, in the latter + case the bound will be the same for all variables. Use it to limit the + range of function evaluation. Bounds checking is not implemented + when `as_linear_operator` is True. + sparsity : {None, array_like, sparse matrix, 2-tuple}, optional + Defines a sparsity structure of the Jacobian matrix. If the Jacobian + matrix is known to have only few non-zero elements in each row, then + it's possible to estimate its several columns by a single function + evaluation [3]_. To perform such economic computations two ingredients + are required: + + * structure : array_like or sparse matrix of shape (m, n). A zero + element means that a corresponding element of the Jacobian + identically equals to zero. + * groups : array_like of shape (n,). A column grouping for a given + sparsity structure, use `group_columns` to obtain it. + + A single array or a sparse matrix is interpreted as a sparsity + structure, and groups are computed inside the function. A tuple is + interpreted as (structure, groups). If None (default), a standard + dense differencing will be used. + + Note, that sparse differencing makes sense only for large Jacobian + matrices where each row contains few non-zero elements. + as_linear_operator : bool, optional + When True the function returns an `scipy.sparse.linalg.LinearOperator`. + Otherwise it returns a dense array or a sparse matrix depending on + `sparsity`. The linear operator provides an efficient way of computing + ``J.dot(p)`` for any vector ``p`` of shape (n,), but does not allow + direct access to individual elements of the matrix. By default + `as_linear_operator` is False. + args, kwargs : tuple and dict, optional + Additional arguments passed to `fun`. Both empty by default. + The calling signature is ``fun(x, *args, **kwargs)``. + + Returns + ------- + J : {ndarray, sparse matrix, LinearOperator} + Finite difference approximation of the Jacobian matrix. + If `as_linear_operator` is True returns a LinearOperator + with shape (m, n). Otherwise it returns a dense array or sparse + matrix depending on how `sparsity` is defined. If `sparsity` + is None then a ndarray with shape (m, n) is returned. If + `sparsity` is not None returns a csr_matrix with shape (m, n). + For sparse matrices and linear operators it is always returned as + a 2-dimensional structure, for ndarrays, if m=1 it is returned + as a 1-dimensional gradient array with shape (n,). + + See Also + -------- + check_derivative : Check correctness of a function computing derivatives. + + Notes + ----- + If `rel_step` is not provided, it assigned to ``EPS**(1/s)``, where EPS is + machine epsilon for float64 numbers, s=2 for '2-point' method and s=3 for + '3-point' method. Such relative step approximately minimizes a sum of + truncation and round-off errors, see [1]_. + + A finite difference scheme for '3-point' method is selected automatically. + The well-known central difference scheme is used for points sufficiently + far from the boundary, and 3-point forward or backward scheme is used for + points near the boundary. Both schemes have the second-order accuracy in + terms of Taylor expansion. Refer to [2]_ for the formulas of 3-point + forward and backward difference schemes. + + For dense differencing when m=1 Jacobian is returned with a shape (n,), + on the other hand when n=1 Jacobian is returned with a shape (m, 1). + Our motivation is the following: a) It handles a case of gradient + computation (m=1) in a conventional way. b) It clearly separates these two + different cases. b) In all cases np.atleast_2d can be called to get 2-d + Jacobian with correct dimensions. + + References + ---------- + .. [1] W. H. Press et. al. "Numerical Recipes. The Art of Scientific + Computing. 3rd edition", sec. 5.7. + + .. [2] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of + sparse Jacobian matrices", Journal of the Institute of Mathematics + and its Applications, 13 (1974), pp. 117-120. + + .. [3] B. Fornberg, "Generation of Finite Difference Formulas on + Arbitrarily Spaced Grids", Mathematics of Computation 51, 1988. + + Examples + -------- + >>> import numpy as np + >>> from scipy.optimize import approx_derivative + >>> + >>> def f(x, c1, c2): + ... return np.array([x[0] * np.sin(c1 * x[1]), + ... x[0] * np.cos(c2 * x[1])]) + ... + >>> x0 = np.array([1.0, 0.5 * np.pi]) + >>> approx_derivative(f, x0, args=(1, 2)) + array([[ 1., 0.], + [-1., 0.]]) + + Bounds can be used to limit the region of function evaluation. + In the example below we compute left and right derivative at point 1.0. + + >>> def g(x): + ... return x**2 if x >= 1 else x + ... + >>> x0 = 1.0 + >>> approx_derivative(g, x0, bounds=(-np.inf, 1.0)) + array([ 1.]) + >>> approx_derivative(g, x0, bounds=(1.0, np.inf)) + array([ 2.]) + """ + if method not in ['2-point', '3-point', 'cs']: + raise ValueError("Unknown method '%s'. " % method) + + x0 = np.atleast_1d(x0) + if x0.ndim > 1: + raise ValueError("`x0` must have at most 1 dimension.") + + lb, ub = _prepare_bounds(bounds, x0) + + if lb.shape != x0.shape or ub.shape != x0.shape: + raise ValueError("Inconsistent shapes between bounds and `x0`.") + + if as_linear_operator and not (np.all(np.isinf(lb)) + and np.all(np.isinf(ub))): + raise ValueError("Bounds not supported when " + "`as_linear_operator` is True.") + + def fun_wrapped(x): + f = np.atleast_1d(fun(x, *args, **kwargs)) + if f.ndim > 1: + raise RuntimeError("`fun` return value has " + "more than 1 dimension.") + return f + + if f0 is None: + f0 = fun_wrapped(x0) + else: + f0 = np.atleast_1d(f0) + if f0.ndim > 1: + raise ValueError("`f0` passed has more than 1 dimension.") + + if np.any((x0 < lb) | (x0 > ub)): + raise ValueError("`x0` violates bound constraints.") + + if as_linear_operator: + if rel_step is None: + rel_step = relative_step[method] + + return _linear_operator_difference(fun_wrapped, x0, + f0, rel_step, method) + else: + h = _compute_absolute_step(rel_step, x0, method) + + if method == '2-point': + h, use_one_sided = _adjust_scheme_to_bounds( + x0, h, 1, '1-sided', lb, ub) + elif method == '3-point': + h, use_one_sided = _adjust_scheme_to_bounds( + x0, h, 1, '2-sided', lb, ub) + elif method == 'cs': + use_one_sided = False + + if sparsity is None: + return _dense_difference(fun_wrapped, x0, f0, h, + use_one_sided, method) + else: + if not issparse(sparsity) and len(sparsity) == 2: + structure, groups = sparsity + else: + structure = sparsity + groups = group_columns(sparsity) + + if issparse(structure): + structure = csc_matrix(structure) + else: + structure = np.atleast_2d(structure) + + groups = np.atleast_1d(groups) + return _sparse_difference(fun_wrapped, x0, f0, h, + use_one_sided, structure, + groups, method) + + +def _linear_operator_difference(fun, x0, f0, h, method): + m = f0.size + n = x0.size + + if method == '2-point': + def matvec(p): + if np.array_equal(p, np.zeros_like(p)): + return np.zeros(m) + dx = h / norm(p) + x = x0 + dx*p + df = fun(x) - f0 + return df / dx + + elif method == '3-point': + def matvec(p): + if np.array_equal(p, np.zeros_like(p)): + return np.zeros(m) + dx = 2*h / norm(p) + x1 = x0 - (dx/2)*p + x2 = x0 + (dx/2)*p + f1 = fun(x1) + f2 = fun(x2) + df = f2 - f1 + return df / dx + + elif method == 'cs': + def matvec(p): + if np.array_equal(p, np.zeros_like(p)): + return np.zeros(m) + dx = h / norm(p) + x = x0 + dx*p*1.j + f1 = fun(x) + df = f1.imag + return df / dx + + else: + raise RuntimeError("Never be here.") + + return LinearOperator((m, n), matvec) + + +def _dense_difference(fun, x0, f0, h, use_one_sided, method): + m = f0.size + n = x0.size + J_transposed = np.empty((n, m)) + h_vecs = np.diag(h) + + for i in range(h.size): + if method == '2-point': + x = x0 + h_vecs[i] + dx = x[i] - x0[i] # Recompute dx as exactly representable number. + df = fun(x) - f0 + elif method == '3-point' and use_one_sided[i]: + x1 = x0 + h_vecs[i] + x2 = x0 + 2 * h_vecs[i] + dx = x2[i] - x0[i] + f1 = fun(x1) + f2 = fun(x2) + df = -3.0 * f0 + 4 * f1 - f2 + elif method == '3-point' and not use_one_sided[i]: + x1 = x0 - h_vecs[i] + x2 = x0 + h_vecs[i] + dx = x2[i] - x1[i] + f1 = fun(x1) + f2 = fun(x2) + df = f2 - f1 + elif method == 'cs': + f1 = fun(x0 + h_vecs[i]*1.j) + df = f1.imag + dx = h_vecs[i, i] + else: + raise RuntimeError("Never be here.") + + J_transposed[i] = df / dx + + if m == 1: + J_transposed = np.ravel(J_transposed) + + return J_transposed.T + + +def _sparse_difference(fun, x0, f0, h, use_one_sided, + structure, groups, method): + m = f0.size + n = x0.size + row_indices = [] + col_indices = [] + fractions = [] + + n_groups = np.max(groups) + 1 + for group in range(n_groups): + # Perturb variables which are in the same group simultaneously. + e = np.equal(group, groups) + h_vec = h * e + if method == '2-point': + x = x0 + h_vec + dx = x - x0 + df = fun(x) - f0 + # The result is written to columns which correspond to perturbed + # variables. + cols, = np.nonzero(e) + # Find all non-zero elements in selected columns of Jacobian. + i, j, _ = find(structure[:, cols]) + # Restore column indices in the full array. + j = cols[j] + elif method == '3-point': + # Here we do conceptually the same but separate one-sided + # and two-sided schemes. + x1 = x0.copy() + x2 = x0.copy() + + mask_1 = use_one_sided & e + x1[mask_1] += h_vec[mask_1] + x2[mask_1] += 2 * h_vec[mask_1] + + mask_2 = ~use_one_sided & e + x1[mask_2] -= h_vec[mask_2] + x2[mask_2] += h_vec[mask_2] + + dx = np.zeros(n) + dx[mask_1] = x2[mask_1] - x0[mask_1] + dx[mask_2] = x2[mask_2] - x1[mask_2] + + f1 = fun(x1) + f2 = fun(x2) + + cols, = np.nonzero(e) + i, j, _ = find(structure[:, cols]) + j = cols[j] + + mask = use_one_sided[j] + df = np.empty(m) + + rows = i[mask] + df[rows] = -3 * f0[rows] + 4 * f1[rows] - f2[rows] + + rows = i[~mask] + df[rows] = f2[rows] - f1[rows] + elif method == 'cs': + f1 = fun(x0 + h_vec*1.j) + df = f1.imag + dx = h_vec + cols, = np.nonzero(e) + i, j, _ = find(structure[:, cols]) + j = cols[j] + else: + raise ValueError("Never be here.") + + # All that's left is to compute the fraction. We store i, j and + # fractions as separate arrays and later construct coo_matrix. + row_indices.append(i) + col_indices.append(j) + fractions.append(df[i] / dx[j]) + + row_indices = np.hstack(row_indices) + col_indices = np.hstack(col_indices) + fractions = np.hstack(fractions) + J = coo_matrix((fractions, (row_indices, col_indices)), shape=(m, n)) + return csr_matrix(J) + + +def check_derivative(fun, jac, x0, bounds=(-np.inf, np.inf), args=(), + kwargs={}): + """Check correctness of a function computing derivatives (Jacobian or + gradient) by comparison with a finite difference approximation. + + Parameters + ---------- + fun : callable + Function of which to estimate the derivatives. The argument x + passed to this function is ndarray of shape (n,) (never a scalar + even if n=1). It must return 1-d array_like of shape (m,) or a scalar. + jac : callable + Function which computes Jacobian matrix of `fun`. It must work with + argument x the same way as `fun`. The return value must be array_like + or sparse matrix with an appropriate shape. + x0 : array_like of shape (n,) or float + Point at which to estimate the derivatives. Float will be converted + to 1-d array. + bounds : 2-tuple of array_like, optional + Lower and upper bounds on independent variables. Defaults to no bounds. + Each bound must match the size of `x0` or be a scalar, in the latter + case the bound will be the same for all variables. Use it to limit the + range of function evaluation. + args, kwargs : tuple and dict, optional + Additional arguments passed to `fun` and `jac`. Both empty by default. + The calling signature is ``fun(x, *args, **kwargs)`` and the same + for `jac`. + + Returns + ------- + accuracy : float + The maximum among all relative errors for elements with absolute values + higher than 1 and absolute errors for elements with absolute values + less or equal than 1. If `accuracy` is on the order of 1e-6 or lower, + then it is likely that your `jac` implementation is correct. + + See Also + -------- + approx_derivative : Compute finite difference approximation of derivative. + + Examples + -------- + >>> import numpy as np + >>> from scipy.optimize import check_derivative + >>> + >>> + >>> def f(x, c1, c2): + ... return np.array([x[0] * np.sin(c1 * x[1]), + ... x[0] * np.cos(c2 * x[1])]) + ... + >>> def jac(x, c1, c2): + ... return np.array([ + ... [np.sin(c1 * x[1]), c1 * x[0] * np.cos(c1 * x[1])], + ... [np.cos(c2 * x[1]), -c2 * x[0] * np.sin(c2 * x[1])] + ... ]) + ... + >>> + >>> x0 = np.array([1.0, 0.5 * np.pi]) + >>> check_derivative(f, jac, x0, args=(1, 2)) + 2.4492935982947064e-16 + """ + J_to_test = jac(x0, *args, **kwargs) + if issparse(J_to_test): + J_diff = approx_derivative(fun, x0, bounds=bounds, sparsity=J_to_test, + args=args, kwargs=kwargs) + J_to_test = csr_matrix(J_to_test) + abs_err = J_to_test - J_diff + i, j, abs_err_data = find(abs_err) + J_diff_data = np.asarray(J_diff[i, j]).ravel() + return np.max(np.abs(abs_err_data) / + np.maximum(1, np.abs(J_diff_data))) + else: + J_diff = approx_derivative(fun, x0, bounds=bounds, + args=args, kwargs=kwargs) + abs_err = np.abs(J_to_test - J_diff) + return np.max(abs_err / np.maximum(1, np.abs(J_diff))) diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_numdiff.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_numdiff.pyc new file mode 100644 index 0000000..1c6bf46 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_numdiff.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_remove_redundancy.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_remove_redundancy.py new file mode 100644 index 0000000..0fcc07d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_remove_redundancy.py @@ -0,0 +1,451 @@ +""" +Routines for removing redundant (linearly dependent) equations from linear +programming equality constraints. +""" +# Author: Matt Haberland + +from __future__ import division, print_function, absolute_import +import numpy as np +from scipy.linalg import svd +import scipy + + +def _row_count(A): + """ + Counts the number of nonzeros in each row of input array A. + Nonzeros are defined as any element with absolute value greater than + tol = 1e-13. This value should probably be an input to the function. + + Parameters + ---------- + A : 2-D array + An array representing a matrix + + Returns + ------- + rowcount : 1-D array + Number of nonzeros in each row of A + + """ + tol = 1e-13 + return np.array((abs(A) > tol).sum(axis=1)).flatten() + + +def _get_densest(A, eligibleRows): + """ + Returns the index of the densest row of A. Ignores rows that are not + eligible for consideration. + + Parameters + ---------- + A : 2-D array + An array representing a matrix + eligibleRows : 1-D logical array + Values indicate whether the corresponding row of A is eligible + to be considered + + Returns + ------- + i_densest : int + Index of the densest row in A eligible for consideration + + """ + rowCounts = _row_count(A) + return np.argmax(rowCounts * eligibleRows) + + +def _remove_zero_rows(A, b): + """ + Eliminates trivial equations from system of equations defined by Ax = b + and identifies trivial infeasibilities + + Parameters + ---------- + A : 2-D array + An array representing the left-hand side of a system of equations + b : 1-D array + An array representing the right-hand side of a system of equations + + Returns + ------- + A : 2-D array + An array representing the left-hand side of a system of equations + b : 1-D array + An array representing the right-hand side of a system of equations + status: int + An integer indicating the status of the removal operation + 0: No infeasibility identified + 2: Trivially infeasible + message : str + A string descriptor of the exit status of the optimization. + + """ + status = 0 + message = "" + i_zero = _row_count(A) == 0 + A = A[np.logical_not(i_zero), :] + if not(np.allclose(b[i_zero], 0)): + status = 2 + message = "There is a zero row in A_eq with a nonzero corresponding " \ + "entry in b_eq. The problem is infeasible." + b = b[np.logical_not(i_zero)] + return A, b, status, message + + +def bg_update_dense(plu, perm_r, v, j): + LU, p = plu + + u = scipy.linalg.solve_triangular(LU, v[perm_r], lower=True, + unit_diagonal=True) + LU[:j+1, j] = u[:j+1] + l = u[j+1:] + piv = LU[j, j] + LU[j+1:, j] += (l/piv) + return LU, p + + +def _remove_redundancy_dense(A, rhs): + """ + Eliminates redundant equations from system of equations defined by Ax = b + and identifies infeasibilities. + + Parameters + ---------- + A : 2-D sparse matrix + An matrix representing the left-hand side of a system of equations + rhs : 1-D array + An array representing the right-hand side of a system of equations + + Returns + ---------- + A : 2-D sparse matrix + A matrix representing the left-hand side of a system of equations + rhs : 1-D array + An array representing the right-hand side of a system of equations + status: int + An integer indicating the status of the system + 0: No infeasibility identified + 2: Trivially infeasible + message : str + A string descriptor of the exit status of the optimization. + + References + ---------- + .. [2] Andersen, Erling D. "Finding all linearly dependent rows in + large-scale linear programming." Optimization Methods and Software + 6.3 (1995): 219-227. + + """ + tolapiv = 1e-8 + tolprimal = 1e-8 + status = 0 + message = "" + inconsistent = ("There is a linear combination of rows of A_eq that " + "results in zero, suggesting a redundant constraint. " + "However the same linear combination of b_eq is " + "nonzero, suggesting that the constraints conflict " + "and the problem is infeasible.") + A, rhs, status, message = _remove_zero_rows(A, rhs) + + if status != 0: + return A, rhs, status, message + + m, n = A.shape + + v = list(range(m)) # Artificial column indices. + b = list(v) # Basis column indices. + # This is better as a list than a set because column order of basis matrix + # needs to be consistent. + k = set(range(m, m+n)) # Structural column indices. + d = [] # Indices of dependent rows + lu = None + perm_r = None + + A_orig = A + A = np.hstack((np.eye(m), A)) + e = np.zeros(m) + + # Implements basic algorithm from [2] + # Uses some of the suggested improvements (removing zero rows and + # Bartels-Golub update idea). + # Removing column singletons would be easy, but it is not as important + # because the procedure is performed only on the equality constraint + # matrix from the original problem - not on the canonical form matrix, + # which would have many more column singletons due to slack variables + # from the inequality constraints. + # The thoughts on "crashing" the initial basis sound useful, but the + # description of the procedure seems to assume a lot of familiarity with + # the subject; it is not very explicit. I already went through enough + # trouble getting the basic algorithm working, so I was not interested in + # trying to decipher this, too. (Overall, the paper is fraught with + # mistakes and ambiguities - which is strange, because the rest of + # Andersen's papers are quite good.) + + B = A[:, b] + for i in v: + + e[i] = 1 + if i > 0: + e[i-1] = 0 + + try: # fails for i==0 and any time it gets ill-conditioned + j = b[i-1] + lu = bg_update_dense(lu, perm_r, A[:, j], i-1) + except Exception: + lu = scipy.linalg.lu_factor(B) + LU, p = lu + perm_r = list(range(m)) + for i1, i2 in enumerate(p): + perm_r[i1], perm_r[i2] = perm_r[i2], perm_r[i1] + + pi = scipy.linalg.lu_solve(lu, e, trans=1) + + # not efficient, but this is not the time sink... + js = np.array(list(k-set(b))) + batch = 50 + dependent = True + + # This is a tiny bit faster than looping over columns indivually, + # like for j in js: if abs(A[:,j].transpose().dot(pi)) > tolapiv: + for j_index in range(0, len(js), batch): + j_indices = js[np.arange(j_index, min(j_index+batch, len(js)))] + + c = abs(A[:, j_indices].transpose().dot(pi)) + if (c > tolapiv).any(): + j = js[j_index + np.argmax(c)] # very independent column + B[:, i] = A[:, j] + b[i] = j + dependent = False + break + if dependent: + bibar = pi.T.dot(rhs.reshape(-1, 1)) + bnorm = np.linalg.norm(rhs) + if abs(bibar)/(1+bnorm) > tolprimal: # inconsistent + status = 2 + message = inconsistent + return A_orig, rhs, status, message + else: # dependent + d.append(i) + + keep = set(range(m)) + keep = list(keep - set(d)) + return A_orig[keep, :], rhs[keep], status, message + + +def _remove_redundancy_sparse(A, rhs): + """ + Eliminates redundant equations from system of equations defined by Ax = b + and identifies infeasibilities. + + Parameters + ---------- + A : 2-D sparse matrix + An matrix representing the left-hand side of a system of equations + rhs : 1-D array + An array representing the right-hand side of a system of equations + + Returns + ------- + A : 2-D sparse matrix + A matrix representing the left-hand side of a system of equations + rhs : 1-D array + An array representing the right-hand side of a system of equations + status: int + An integer indicating the status of the system + 0: No infeasibility identified + 2: Trivially infeasible + message : str + A string descriptor of the exit status of the optimization. + + References + ---------- + .. [2] Andersen, Erling D. "Finding all linearly dependent rows in + large-scale linear programming." Optimization Methods and Software + 6.3 (1995): 219-227. + + """ + + tolapiv = 1e-8 + tolprimal = 1e-8 + status = 0 + message = "" + inconsistent = ("There is a linear combination of rows of A_eq that " + "results in zero, suggesting a redundant constraint. " + "However the same linear combination of b_eq is " + "nonzero, suggesting that the constraints conflict " + "and the problem is infeasible.") + A, rhs, status, message = _remove_zero_rows(A, rhs) + + if status != 0: + return A, rhs, status, message + + m, n = A.shape + + v = list(range(m)) # Artificial column indices. + b = list(v) # Basis column indices. + # This is better as a list than a set because column order of basis matrix + # needs to be consistent. + k = set(range(m, m+n)) # Structural column indices. + d = [] # Indices of dependent rows + + A_orig = A + A = scipy.sparse.hstack((scipy.sparse.eye(m), A)).tocsc() + e = np.zeros(m) + + # Implements basic algorithm from [2] + # Uses only one of the suggested improvements (removing zero rows). + # Removing column singletons would be easy, but it is not as important + # because the procedure is performed only on the equality constraint + # matrix from the original problem - not on the canonical form matrix, + # which would have many more column singletons due to slack variables + # from the inequality constraints. + # The thoughts on "crashing" the initial basis sound useful, but the + # description of the procedure seems to assume a lot of familiarity with + # the subject; it is not very explicit. I already went through enough + # trouble getting the basic algorithm working, so I was not interested in + # trying to decipher this, too. (Overall, the paper is fraught with + # mistakes and ambiguities - which is strange, because the rest of + # Andersen's papers are quite good.) + # I tried and tried and tried to improve performance using the + # Bartels-Golub update. It works, but it's only practical if the LU + # factorization can be specialized as described, and that is not possible + # until the Scipy SuperLU interface permits control over column + # permutation - see issue #7700. + + for i in v: + B = A[:, b] + + e[i] = 1 + if i > 0: + e[i-1] = 0 + + pi = scipy.sparse.linalg.spsolve(B.transpose(), e).reshape(-1, 1) + + js = list(k-set(b)) # not efficient, but this is not the time sink... + + # Due to overhead, it tends to be faster (for problems tested) to + # compute the full matrix-vector product rather than individual + # vector-vector products (with the chance of terminating as soon + # as any are nonzero). For very large matrices, it might be worth + # it to compute, say, 100 or 1000 at a time and stop when a nonzero + # is found. + + c = (np.abs(A[:, js].transpose().dot(pi)) > tolapiv).nonzero()[0] + if len(c) > 0: # independent + j = js[c[0]] + # in a previous commit, the previous line was changed to choose + # index j corresponding with the maximum dot product. + # While this avoided issues with almost + # singular matrices, it slowed the routine in most NETLIB tests. + # I think this is because these columns were denser than the + # first column with nonzero dot product (c[0]). + # It would be nice to have a heuristic that balances sparsity with + # high dot product, but I don't think it's worth the time to + # develop one right now. Bartels-Golub update is a much higher + # priority. + b[i] = j # replace artificial column + else: + bibar = pi.T.dot(rhs.reshape(-1, 1)) + bnorm = np.linalg.norm(rhs) + if abs(bibar)/(1 + bnorm) > tolprimal: + status = 2 + message = inconsistent + return A_orig, rhs, status, message + else: # dependent + d.append(i) + + keep = set(range(m)) + keep = list(keep - set(d)) + return A_orig[keep, :], rhs[keep], status, message + + +def _remove_redundancy(A, b): + """ + Eliminates redundant equations from system of equations defined by Ax = b + and identifies infeasibilities. + + Parameters + ---------- + A : 2-D array + An array representing the left-hand side of a system of equations + b : 1-D array + An array representing the right-hand side of a system of equations + + Returns + ------- + A : 2-D array + An array representing the left-hand side of a system of equations + b : 1-D array + An array representing the right-hand side of a system of equations + status: int + An integer indicating the status of the system + 0: No infeasibility identified + 2: Trivially infeasible + message : str + A string descriptor of the exit status of the optimization. + + References + ---------- + .. [2] Andersen, Erling D. "Finding all linearly dependent rows in + large-scale linear programming." Optimization Methods and Software + 6.3 (1995): 219-227. + + """ + + A, b, status, message = _remove_zero_rows(A, b) + + if status != 0: + return A, b, status, message + + U, s, Vh = svd(A) + eps = np.finfo(float).eps + tol = s.max() * max(A.shape) * eps + + m, n = A.shape + s_min = s[-1] if m <= n else 0 + + # this algorithm is faster than that of [2] when the nullspace is small + # but it could probably be improvement by randomized algorithms and with + # a sparse implementation. + # it relies on repeated singular value decomposition to find linearly + # dependent rows (as identified by columns of U that correspond with zero + # singular values). Unfortunately, only one row can be removed per + # decomposition (I tried otherwise; doing so can cause problems.) + # It would be nice if we could do truncated SVD like sp.sparse.linalg.svds + # but that function is unreliable at finding singular values near zero. + # Finding max eigenvalue L of A A^T, then largest eigenvalue (and + # associated eigenvector) of -A A^T + L I (I is identity) via power + # iteration would also work in theory, but is only efficient if the + # smallest nonzero eigenvalue of A A^T is close to the largest nonzero + # eigenvalue. + + while abs(s_min) < tol: + v = U[:, -1] # TODO: return these so user can eliminate from problem? + # rows need to be represented in significant amount + eligibleRows = np.abs(v) > tol * 10e6 + if not np.any(eligibleRows) or np.any(np.abs(v.dot(A)) > tol): + status = 4 + message = ("Due to numerical issues, redundant equality " + "constraints could not be removed automatically. " + "Try providing your constraint matrices as sparse " + "matrices to activate sparse presolve, try turning " + "off redundancy removal, or try turning off presolve " + "altogether.") + break + if np.any(np.abs(v.dot(b)) > tol): + status = 2 + message = ("There is a linear combination of rows of A_eq that " + "results in zero, suggesting a redundant constraint. " + "However the same linear combination of b_eq is " + "nonzero, suggesting that the constraints conflict " + "and the problem is infeasible.") + break + + i_remove = _get_densest(A, eligibleRows) + A = np.delete(A, i_remove, axis=0) + b = np.delete(b, i_remove) + U, s, Vh = svd(A) + m, n = A.shape + s_min = s[-1] if m <= n else 0 + + return A, b, status, message diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_remove_redundancy.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_remove_redundancy.pyc new file mode 100644 index 0000000..0540761 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_remove_redundancy.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_root.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_root.py new file mode 100644 index 0000000..c85f74a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_root.py @@ -0,0 +1,640 @@ +""" +Unified interfaces to root finding algorithms. + +Functions +--------- +- root : find a root of a vector function. +""" +from __future__ import division, print_function, absolute_import + +__all__ = ['root'] + +import numpy as np + +from scipy._lib.six import callable + +from warnings import warn + +from .optimize import MemoizeJac, OptimizeResult, _check_unknown_options +from .minpack import _root_hybr, leastsq +from ._spectral import _root_df_sane +from . import nonlin + + +def root(fun, x0, args=(), method='hybr', jac=None, tol=None, callback=None, + options=None): + """ + Find a root of a vector function. + + Parameters + ---------- + fun : callable + A vector function to find a root of. + x0 : ndarray + Initial guess. + args : tuple, optional + Extra arguments passed to the objective function and its Jacobian. + method : str, optional + Type of solver. Should be one of + + - 'hybr' :ref:`(see here) <optimize.root-hybr>` + - 'lm' :ref:`(see here) <optimize.root-lm>` + - 'broyden1' :ref:`(see here) <optimize.root-broyden1>` + - 'broyden2' :ref:`(see here) <optimize.root-broyden2>` + - 'anderson' :ref:`(see here) <optimize.root-anderson>` + - 'linearmixing' :ref:`(see here) <optimize.root-linearmixing>` + - 'diagbroyden' :ref:`(see here) <optimize.root-diagbroyden>` + - 'excitingmixing' :ref:`(see here) <optimize.root-excitingmixing>` + - 'krylov' :ref:`(see here) <optimize.root-krylov>` + - 'df-sane' :ref:`(see here) <optimize.root-dfsane>` + + jac : bool or callable, optional + If `jac` is a Boolean and is True, `fun` is assumed to return the + value of Jacobian along with the objective function. If False, the + Jacobian will be estimated numerically. + `jac` can also be a callable returning the Jacobian of `fun`. In + this case, it must accept the same arguments as `fun`. + tol : float, optional + Tolerance for termination. For detailed control, use solver-specific + options. + callback : function, optional + Optional callback function. It is called on every iteration as + ``callback(x, f)`` where `x` is the current solution and `f` + the corresponding residual. For all methods but 'hybr' and 'lm'. + options : dict, optional + A dictionary of solver options. E.g. `xtol` or `maxiter`, see + :obj:`show_options()` for details. + + Returns + ------- + sol : OptimizeResult + The solution represented as a ``OptimizeResult`` object. + Important attributes are: ``x`` the solution array, ``success`` a + Boolean flag indicating if the algorithm exited successfully and + ``message`` which describes the cause of the termination. See + `OptimizeResult` for a description of other attributes. + + See also + -------- + show_options : Additional options accepted by the solvers + + Notes + ----- + This section describes the available solvers that can be selected by the + 'method' parameter. The default method is *hybr*. + + Method *hybr* uses a modification of the Powell hybrid method as + implemented in MINPACK [1]_. + + Method *lm* solves the system of nonlinear equations in a least squares + sense using a modification of the Levenberg-Marquardt algorithm as + implemented in MINPACK [1]_. + + Method *df-sane* is a derivative-free spectral method. [3]_ + + Methods *broyden1*, *broyden2*, *anderson*, *linearmixing*, + *diagbroyden*, *excitingmixing*, *krylov* are inexact Newton methods, + with backtracking or full line searches [2]_. Each method corresponds + to a particular Jacobian approximations. See `nonlin` for details. + + - Method *broyden1* uses Broyden's first Jacobian approximation, it is + known as Broyden's good method. + - Method *broyden2* uses Broyden's second Jacobian approximation, it + is known as Broyden's bad method. + - Method *anderson* uses (extended) Anderson mixing. + - Method *Krylov* uses Krylov approximation for inverse Jacobian. It + is suitable for large-scale problem. + - Method *diagbroyden* uses diagonal Broyden Jacobian approximation. + - Method *linearmixing* uses a scalar Jacobian approximation. + - Method *excitingmixing* uses a tuned diagonal Jacobian + approximation. + + .. warning:: + + The algorithms implemented for methods *diagbroyden*, + *linearmixing* and *excitingmixing* may be useful for specific + problems, but whether they will work may depend strongly on the + problem. + + .. versionadded:: 0.11.0 + + References + ---------- + .. [1] More, Jorge J., Burton S. Garbow, and Kenneth E. Hillstrom. + 1980. User Guide for MINPACK-1. + .. [2] C. T. Kelley. 1995. Iterative Methods for Linear and Nonlinear + Equations. Society for Industrial and Applied Mathematics. + <https://archive.siam.org/books/kelley/fr16/> + .. [3] W. La Cruz, J.M. Martinez, M. Raydan. Math. Comp. 75, 1429 (2006). + + Examples + -------- + The following functions define a system of nonlinear equations and its + jacobian. + + >>> def fun(x): + ... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0, + ... 0.5 * (x[1] - x[0])**3 + x[1]] + + >>> def jac(x): + ... return np.array([[1 + 1.5 * (x[0] - x[1])**2, + ... -1.5 * (x[0] - x[1])**2], + ... [-1.5 * (x[1] - x[0])**2, + ... 1 + 1.5 * (x[1] - x[0])**2]]) + + A solution can be obtained as follows. + + >>> from scipy import optimize + >>> sol = optimize.root(fun, [0, 0], jac=jac, method='hybr') + >>> sol.x + array([ 0.8411639, 0.1588361]) + + """ + if not isinstance(args, tuple): + args = (args,) + + meth = method.lower() + if options is None: + options = {} + + if callback is not None and meth in ('hybr', 'lm'): + warn('Method %s does not accept callback.' % method, + RuntimeWarning) + + # fun also returns the jacobian + if not callable(jac) and meth in ('hybr', 'lm'): + if bool(jac): + fun = MemoizeJac(fun) + jac = fun.derivative + else: + jac = None + + # set default tolerances + if tol is not None: + options = dict(options) + if meth in ('hybr', 'lm'): + options.setdefault('xtol', tol) + elif meth in ('df-sane',): + options.setdefault('ftol', tol) + elif meth in ('broyden1', 'broyden2', 'anderson', 'linearmixing', + 'diagbroyden', 'excitingmixing', 'krylov'): + options.setdefault('xtol', tol) + options.setdefault('xatol', np.inf) + options.setdefault('ftol', np.inf) + options.setdefault('fatol', np.inf) + + if meth == 'hybr': + sol = _root_hybr(fun, x0, args=args, jac=jac, **options) + elif meth == 'lm': + sol = _root_leastsq(fun, x0, args=args, jac=jac, **options) + elif meth == 'df-sane': + _warn_jac_unused(jac, method) + sol = _root_df_sane(fun, x0, args=args, callback=callback, + **options) + elif meth in ('broyden1', 'broyden2', 'anderson', 'linearmixing', + 'diagbroyden', 'excitingmixing', 'krylov'): + _warn_jac_unused(jac, method) + sol = _root_nonlin_solve(fun, x0, args=args, jac=jac, + _method=meth, _callback=callback, + **options) + else: + raise ValueError('Unknown solver %s' % method) + + return sol + + +def _warn_jac_unused(jac, method): + if jac is not None: + warn('Method %s does not use the jacobian (jac).' % (method,), + RuntimeWarning) + + +def _root_leastsq(func, x0, args=(), jac=None, + col_deriv=0, xtol=1.49012e-08, ftol=1.49012e-08, + gtol=0.0, maxiter=0, eps=0.0, factor=100, diag=None, + **unknown_options): + """ + Solve for least squares with Levenberg-Marquardt + + Options + ------- + col_deriv : bool + non-zero to specify that the Jacobian function computes derivatives + down the columns (faster, because there is no transpose operation). + ftol : float + Relative error desired in the sum of squares. + xtol : float + Relative error desired in the approximate solution. + gtol : float + Orthogonality desired between the function vector and the columns + of the Jacobian. + maxiter : int + The maximum number of calls to the function. If zero, then + 100*(N+1) is the maximum where N is the number of elements in x0. + epsfcn : float + A suitable step length for the forward-difference approximation of + the Jacobian (for Dfun=None). If epsfcn is less than the machine + precision, it is assumed that the relative errors in the functions + are of the order of the machine precision. + factor : float + A parameter determining the initial step bound + (``factor * || diag * x||``). Should be in interval ``(0.1, 100)``. + diag : sequence + N positive entries that serve as a scale factors for the variables. + """ + + _check_unknown_options(unknown_options) + x, cov_x, info, msg, ier = leastsq(func, x0, args=args, Dfun=jac, + full_output=True, + col_deriv=col_deriv, xtol=xtol, + ftol=ftol, gtol=gtol, + maxfev=maxiter, epsfcn=eps, + factor=factor, diag=diag) + sol = OptimizeResult(x=x, message=msg, status=ier, + success=ier in (1, 2, 3, 4), cov_x=cov_x, + fun=info.pop('fvec')) + sol.update(info) + return sol + + +def _root_nonlin_solve(func, x0, args=(), jac=None, + _callback=None, _method=None, + nit=None, disp=False, maxiter=None, + ftol=None, fatol=None, xtol=None, xatol=None, + tol_norm=None, line_search='armijo', jac_options=None, + **unknown_options): + _check_unknown_options(unknown_options) + + f_tol = fatol + f_rtol = ftol + x_tol = xatol + x_rtol = xtol + verbose = disp + if jac_options is None: + jac_options = dict() + + jacobian = {'broyden1': nonlin.BroydenFirst, + 'broyden2': nonlin.BroydenSecond, + 'anderson': nonlin.Anderson, + 'linearmixing': nonlin.LinearMixing, + 'diagbroyden': nonlin.DiagBroyden, + 'excitingmixing': nonlin.ExcitingMixing, + 'krylov': nonlin.KrylovJacobian + }[_method] + + if args: + if jac: + def f(x): + return func(x, *args)[0] + else: + def f(x): + return func(x, *args) + else: + f = func + + x, info = nonlin.nonlin_solve(f, x0, jacobian=jacobian(**jac_options), + iter=nit, verbose=verbose, + maxiter=maxiter, f_tol=f_tol, + f_rtol=f_rtol, x_tol=x_tol, + x_rtol=x_rtol, tol_norm=tol_norm, + line_search=line_search, + callback=_callback, full_output=True, + raise_exception=False) + sol = OptimizeResult(x=x) + sol.update(info) + return sol + +def _root_broyden1_doc(): + """ + Options + ------- + nit : int, optional + Number of iterations to make. If omitted (default), make as many + as required to meet tolerances. + disp : bool, optional + Print status to stdout on every iteration. + maxiter : int, optional + Maximum number of iterations to make. If more are needed to + meet convergence, `NoConvergence` is raised. + ftol : float, optional + Relative tolerance for the residual. If omitted, not used. + fatol : float, optional + Absolute tolerance (in max-norm) for the residual. + If omitted, default is 6e-6. + xtol : float, optional + Relative minimum step size. If omitted, not used. + xatol : float, optional + Absolute minimum step size, as determined from the Jacobian + approximation. If the step size is smaller than this, optimization + is terminated as successful. If omitted, not used. + tol_norm : function(vector) -> scalar, optional + Norm to use in convergence check. Default is the maximum norm. + line_search : {None, 'armijo' (default), 'wolfe'}, optional + Which type of a line search to use to determine the step size in + the direction given by the Jacobian approximation. Defaults to + 'armijo'. + jac_options : dict, optional + Options for the respective Jacobian approximation. + alpha : float, optional + Initial guess for the Jacobian is (-1/alpha). + reduction_method : str or tuple, optional + Method used in ensuring that the rank of the Broyden + matrix stays low. Can either be a string giving the + name of the method, or a tuple of the form ``(method, + param1, param2, ...)`` that gives the name of the + method and values for additional parameters. + + Methods available: + - ``restart``: drop all matrix columns. Has no + extra parameters. + - ``simple``: drop oldest matrix column. Has no + extra parameters. + - ``svd``: keep only the most significant SVD + components. + Extra parameters: + - ``to_retain``: number of SVD components to + retain when rank reduction is done. + Default is ``max_rank - 2``. + max_rank : int, optional + Maximum rank for the Broyden matrix. + Default is infinity (ie., no rank reduction). + """ + pass + +def _root_broyden2_doc(): + """ + Options + ------- + nit : int, optional + Number of iterations to make. If omitted (default), make as many + as required to meet tolerances. + disp : bool, optional + Print status to stdout on every iteration. + maxiter : int, optional + Maximum number of iterations to make. If more are needed to + meet convergence, `NoConvergence` is raised. + ftol : float, optional + Relative tolerance for the residual. If omitted, not used. + fatol : float, optional + Absolute tolerance (in max-norm) for the residual. + If omitted, default is 6e-6. + xtol : float, optional + Relative minimum step size. If omitted, not used. + xatol : float, optional + Absolute minimum step size, as determined from the Jacobian + approximation. If the step size is smaller than this, optimization + is terminated as successful. If omitted, not used. + tol_norm : function(vector) -> scalar, optional + Norm to use in convergence check. Default is the maximum norm. + line_search : {None, 'armijo' (default), 'wolfe'}, optional + Which type of a line search to use to determine the step size in + the direction given by the Jacobian approximation. Defaults to + 'armijo'. + jac_options : dict, optional + Options for the respective Jacobian approximation. + + alpha : float, optional + Initial guess for the Jacobian is (-1/alpha). + reduction_method : str or tuple, optional + Method used in ensuring that the rank of the Broyden + matrix stays low. Can either be a string giving the + name of the method, or a tuple of the form ``(method, + param1, param2, ...)`` that gives the name of the + method and values for additional parameters. + + Methods available: + - ``restart``: drop all matrix columns. Has no + extra parameters. + - ``simple``: drop oldest matrix column. Has no + extra parameters. + - ``svd``: keep only the most significant SVD + components. + Extra parameters: + - ``to_retain``: number of SVD components to + retain when rank reduction is done. + Default is ``max_rank - 2``. + max_rank : int, optional + Maximum rank for the Broyden matrix. + Default is infinity (ie., no rank reduction). + """ + pass + +def _root_anderson_doc(): + """ + Options + ------- + nit : int, optional + Number of iterations to make. If omitted (default), make as many + as required to meet tolerances. + disp : bool, optional + Print status to stdout on every iteration. + maxiter : int, optional + Maximum number of iterations to make. If more are needed to + meet convergence, `NoConvergence` is raised. + ftol : float, optional + Relative tolerance for the residual. If omitted, not used. + fatol : float, optional + Absolute tolerance (in max-norm) for the residual. + If omitted, default is 6e-6. + xtol : float, optional + Relative minimum step size. If omitted, not used. + xatol : float, optional + Absolute minimum step size, as determined from the Jacobian + approximation. If the step size is smaller than this, optimization + is terminated as successful. If omitted, not used. + tol_norm : function(vector) -> scalar, optional + Norm to use in convergence check. Default is the maximum norm. + line_search : {None, 'armijo' (default), 'wolfe'}, optional + Which type of a line search to use to determine the step size in + the direction given by the Jacobian approximation. Defaults to + 'armijo'. + jac_options : dict, optional + Options for the respective Jacobian approximation. + + alpha : float, optional + Initial guess for the Jacobian is (-1/alpha). + M : float, optional + Number of previous vectors to retain. Defaults to 5. + w0 : float, optional + Regularization parameter for numerical stability. + Compared to unity, good values of the order of 0.01. + """ + pass + +def _root_linearmixing_doc(): + """ + Options + ------- + nit : int, optional + Number of iterations to make. If omitted (default), make as many + as required to meet tolerances. + disp : bool, optional + Print status to stdout on every iteration. + maxiter : int, optional + Maximum number of iterations to make. If more are needed to + meet convergence, ``NoConvergence`` is raised. + ftol : float, optional + Relative tolerance for the residual. If omitted, not used. + fatol : float, optional + Absolute tolerance (in max-norm) for the residual. + If omitted, default is 6e-6. + xtol : float, optional + Relative minimum step size. If omitted, not used. + xatol : float, optional + Absolute minimum step size, as determined from the Jacobian + approximation. If the step size is smaller than this, optimization + is terminated as successful. If omitted, not used. + tol_norm : function(vector) -> scalar, optional + Norm to use in convergence check. Default is the maximum norm. + line_search : {None, 'armijo' (default), 'wolfe'}, optional + Which type of a line search to use to determine the step size in + the direction given by the Jacobian approximation. Defaults to + 'armijo'. + jac_options : dict, optional + Options for the respective Jacobian approximation. + + alpha : float, optional + initial guess for the jacobian is (-1/alpha). + """ + pass + +def _root_diagbroyden_doc(): + """ + Options + ------- + nit : int, optional + Number of iterations to make. If omitted (default), make as many + as required to meet tolerances. + disp : bool, optional + Print status to stdout on every iteration. + maxiter : int, optional + Maximum number of iterations to make. If more are needed to + meet convergence, `NoConvergence` is raised. + ftol : float, optional + Relative tolerance for the residual. If omitted, not used. + fatol : float, optional + Absolute tolerance (in max-norm) for the residual. + If omitted, default is 6e-6. + xtol : float, optional + Relative minimum step size. If omitted, not used. + xatol : float, optional + Absolute minimum step size, as determined from the Jacobian + approximation. If the step size is smaller than this, optimization + is terminated as successful. If omitted, not used. + tol_norm : function(vector) -> scalar, optional + Norm to use in convergence check. Default is the maximum norm. + line_search : {None, 'armijo' (default), 'wolfe'}, optional + Which type of a line search to use to determine the step size in + the direction given by the Jacobian approximation. Defaults to + 'armijo'. + jac_options : dict, optional + Options for the respective Jacobian approximation. + + alpha : float, optional + initial guess for the jacobian is (-1/alpha). + """ + pass + +def _root_excitingmixing_doc(): + """ + Options + ------- + nit : int, optional + Number of iterations to make. If omitted (default), make as many + as required to meet tolerances. + disp : bool, optional + Print status to stdout on every iteration. + maxiter : int, optional + Maximum number of iterations to make. If more are needed to + meet convergence, `NoConvergence` is raised. + ftol : float, optional + Relative tolerance for the residual. If omitted, not used. + fatol : float, optional + Absolute tolerance (in max-norm) for the residual. + If omitted, default is 6e-6. + xtol : float, optional + Relative minimum step size. If omitted, not used. + xatol : float, optional + Absolute minimum step size, as determined from the Jacobian + approximation. If the step size is smaller than this, optimization + is terminated as successful. If omitted, not used. + tol_norm : function(vector) -> scalar, optional + Norm to use in convergence check. Default is the maximum norm. + line_search : {None, 'armijo' (default), 'wolfe'}, optional + Which type of a line search to use to determine the step size in + the direction given by the Jacobian approximation. Defaults to + 'armijo'. + jac_options : dict, optional + Options for the respective Jacobian approximation. + + alpha : float, optional + Initial Jacobian approximation is (-1/alpha). + alphamax : float, optional + The entries of the diagonal Jacobian are kept in the range + ``[alpha, alphamax]``. + """ + pass + +def _root_krylov_doc(): + """ + Options + ------- + nit : int, optional + Number of iterations to make. If omitted (default), make as many + as required to meet tolerances. + disp : bool, optional + Print status to stdout on every iteration. + maxiter : int, optional + Maximum number of iterations to make. If more are needed to + meet convergence, `NoConvergence` is raised. + ftol : float, optional + Relative tolerance for the residual. If omitted, not used. + fatol : float, optional + Absolute tolerance (in max-norm) for the residual. + If omitted, default is 6e-6. + xtol : float, optional + Relative minimum step size. If omitted, not used. + xatol : float, optional + Absolute minimum step size, as determined from the Jacobian + approximation. If the step size is smaller than this, optimization + is terminated as successful. If omitted, not used. + tol_norm : function(vector) -> scalar, optional + Norm to use in convergence check. Default is the maximum norm. + line_search : {None, 'armijo' (default), 'wolfe'}, optional + Which type of a line search to use to determine the step size in + the direction given by the Jacobian approximation. Defaults to + 'armijo'. + jac_options : dict, optional + Options for the respective Jacobian approximation. + + rdiff : float, optional + Relative step size to use in numerical differentiation. + method : {'lgmres', 'gmres', 'bicgstab', 'cgs', 'minres'} or function + Krylov method to use to approximate the Jacobian. + Can be a string, or a function implementing the same + interface as the iterative solvers in + `scipy.sparse.linalg`. + + The default is `scipy.sparse.linalg.lgmres`. + inner_M : LinearOperator or InverseJacobian + Preconditioner for the inner Krylov iteration. + Note that you can use also inverse Jacobians as (adaptive) + preconditioners. For example, + + >>> jac = BroydenFirst() + >>> kjac = KrylovJacobian(inner_M=jac.inverse). + + If the preconditioner has a method named 'update', it will + be called as ``update(x, f)`` after each nonlinear step, + with ``x`` giving the current point, and ``f`` the current + function value. + inner_tol, inner_maxiter, ... + Parameters to pass on to the "inner" Krylov solver. + See `scipy.sparse.linalg.gmres` for details. + outer_k : int, optional + Size of the subspace kept across LGMRES nonlinear + iterations. + + See `scipy.sparse.linalg.lgmres` for details. + """ + pass diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_root.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_root.pyc new file mode 100644 index 0000000..d7435c2 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_root.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_root_scalar.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_root_scalar.py new file mode 100644 index 0000000..fe62676 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_root_scalar.py @@ -0,0 +1,460 @@ +""" +Unified interfaces to root finding algorithms for real or complex +scalar functions. + +Functions +--------- +- root : find a root of a scalar function. +""" +from __future__ import division, print_function, absolute_import + +import numpy as np +from scipy._lib.six import callable + +from . import zeros as optzeros + +__all__ = ['root_scalar'] + + +class MemoizeDer(object): + """Decorator that caches the value and derivative(s) of function each + time it is called. + + This is a simplistic memoizer that calls and caches a single value + of `f(x, *args)`. + It assumes that `args` does not change between invocations. + It supports the use case of a root-finder where `args` is fixed, + `x` changes, and only rarely, if at all, does x assume the same value + more than once.""" + def __init__(self, fun): + self.fun = fun + self.vals = None + self.x = None + self.n_calls = 0 + + def __call__(self, x, *args): + r"""Calculate f or use cached value if available""" + # Derivative may be requested before the function itself, always check + if self.vals is None or x != self.x: + fg = self.fun(x, *args) + self.x = x + self.n_calls += 1 + self.vals = fg[:] + return self.vals[0] + + def fprime(self, x, *args): + r"""Calculate f' or use a cached value if available""" + if self.vals is None or x != self.x: + self(x, *args) + return self.vals[1] + + def fprime2(self, x, *args): + r"""Calculate f'' or use a cached value if available""" + if self.vals is None or x != self.x: + self(x, *args) + return self.vals[2] + + def ncalls(self): + return self.n_calls + + +def root_scalar(f, args=(), method=None, bracket=None, + fprime=None, fprime2=None, + x0=None, x1=None, + xtol=None, rtol=None, maxiter=None, + options=None): + """ + Find a root of a scalar function. + + Parameters + ---------- + f : callable + A function to find a root of. + args : tuple, optional + Extra arguments passed to the objective function and its derivative(s). + method : str, optional + Type of solver. Should be one of + + - 'bisect' :ref:`(see here) <optimize.root_scalar-bisect>` + - 'brentq' :ref:`(see here) <optimize.root_scalar-brentq>` + - 'brenth' :ref:`(see here) <optimize.root_scalar-brenth>` + - 'ridder' :ref:`(see here) <optimize.root_scalar-ridder>` + - 'toms748' :ref:`(see here) <optimize.root_scalar-toms748>` + - 'newton' :ref:`(see here) <optimize.root_scalar-newton>` + - 'secant' :ref:`(see here) <optimize.root_scalar-secant>` + - 'halley' :ref:`(see here) <optimize.root_scalar-halley>` + + bracket: A sequence of 2 floats, optional + An interval bracketing a root. `f(x, *args)` must have different + signs at the two endpoints. + x0 : float, optional + Initial guess. + x1 : float, optional + A second guess. + fprime : bool or callable, optional + If `fprime` is a boolean and is True, `f` is assumed to return the + value of derivative along with the objective function. + `fprime` can also be a callable returning the derivative of `f`. In + this case, it must accept the same arguments as `f`. + fprime2 : bool or callable, optional + If `fprime2` is a boolean and is True, `f` is assumed to return the + value of 1st and 2nd derivatives along with the objective function. + `fprime2` can also be a callable returning the 2nd derivative of `f`. + In this case, it must accept the same arguments as `f`. + xtol : float, optional + Tolerance (absolute) for termination. + rtol : float, optional + Tolerance (relative) for termination. + maxiter : int, optional + Maximum number of iterations. + options : dict, optional + A dictionary of solver options. E.g. ``k``, see + :obj:`show_options()` for details. + + Returns + ------- + sol : RootResults + The solution represented as a ``RootResults`` object. + Important attributes are: ``root`` the solution , ``converged`` a + boolean flag indicating if the algorithm exited successfully and + ``flag`` which describes the cause of the termination. See + `RootResults` for a description of other attributes. + + See also + -------- + show_options : Additional options accepted by the solvers + root : Find a root of a vector function. + + Notes + ----- + This section describes the available solvers that can be selected by the + 'method' parameter. + + The default is to use the best method available for the situation + presented. + If a bracket is provided, it may use one of the bracketing methods. + If a derivative and an initial value are specified, it may + select one of the derivative-based methods. + If no method is judged applicable, it will raise an Exception. + + + Examples + -------- + + Find the root of a simple cubic + + >>> from scipy import optimize + >>> def f(x): + ... return (x**3 - 1) # only one real root at x = 1 + + >>> def fprime(x): + ... return 3*x**2 + + The `brentq` method takes as input a bracket + + >>> sol = optimize.root_scalar(f, bracket=[0, 3], method='brentq') + >>> sol.root, sol.iterations, sol.function_calls + (1.0, 10, 11) + + The `newton` method takes as input a single point and uses the derivative(s) + + >>> sol = optimize.root_scalar(f, x0=0.2, fprime=fprime, method='newton') + >>> sol.root, sol.iterations, sol.function_calls + (1.0, 11, 22) + + The function can provide the value and derivative(s) in a single call. + + >>> def f_p_pp(x): + ... return (x**3 - 1), 3*x**2, 6*x + + >>> sol = optimize.root_scalar(f_p_pp, x0=0.2, fprime=True, method='newton') + >>> sol.root, sol.iterations, sol.function_calls + (1.0, 11, 11) + + >>> sol = optimize.root_scalar(f_p_pp, x0=0.2, fprime=True, fprime2=True, method='halley') + >>> sol.root, sol.iterations, sol.function_calls + (1.0, 7, 8) + + + """ + if not isinstance(args, tuple): + args = (args,) + + if options is None: + options = {} + + # fun also returns the derivative(s) + is_memoized = False + if fprime2 is not None and not callable(fprime2): + if bool(fprime2): + f = MemoizeDer(f) + is_memoized = True + fprime2 = f.fprime2 + fprime = f.fprime + else: + fprime2 = None + if fprime is not None and not callable(fprime): + if bool(fprime): + f = MemoizeDer(f) + is_memoized = True + fprime = f.fprime + else: + fprime = None + + # respect solver-specific default tolerances - only pass in if actually set + kwargs = {} + for k in ['xtol', 'rtol', 'maxiter']: + v = locals().get(k) + if v is not None: + kwargs[k] = v + + # Set any solver-specific options + if options: + kwargs.update(options) + # Always request full_output from the underlying method as _root_scalar + # always returns a RootResults object + kwargs.update(full_output=True, disp=False) + + # Pick a method if not specified. + # Use the "best" method available for the situation. + if not method: + if bracket: + method = 'brentq' + elif x0 is not None: + if fprime: + if fprime2: + method = 'halley' + else: + method = 'newton' + else: + method = 'secant' + if not method: + raise ValueError('Unable to select a solver as neither bracket ' + 'nor starting point provided.') + + meth = method.lower() + map2underlying = {'halley': 'newton', 'secant': 'newton'} + + try: + methodc = getattr(optzeros, map2underlying.get(meth, meth)) + except AttributeError: + raise ValueError('Unknown solver %s' % meth) + + if meth in ['bisect', 'ridder', 'brentq', 'brenth', 'toms748']: + if not isinstance(bracket, (list, tuple, np.ndarray)): + raise ValueError('Bracket needed for %s' % method) + + a, b = bracket[:2] + r, sol = methodc(f, a, b, args=args, **kwargs) + elif meth in ['secant']: + if x0 is None: + raise ValueError('x0 must not be None for %s' % method) + if x1 is None: + raise ValueError('x1 must not be None for %s' % method) + if 'xtol' in kwargs: + kwargs['tol'] = kwargs.pop('xtol') + r, sol = methodc(f, x0, args=args, fprime=None, fprime2=None, + x1=x1, **kwargs) + elif meth in ['newton']: + if x0 is None: + raise ValueError('x0 must not be None for %s' % method) + if not fprime: + raise ValueError('fprime must be specified for %s' % method) + if 'xtol' in kwargs: + kwargs['tol'] = kwargs.pop('xtol') + r, sol = methodc(f, x0, args=args, fprime=fprime, fprime2=None, + **kwargs) + elif meth in ['halley']: + if x0 is None: + raise ValueError('x0 must not be None for %s' % method) + if not fprime: + raise ValueError('fprime must be specified for %s' % method) + if not fprime2: + raise ValueError('fprime2 must be specified for %s' % method) + if 'xtol' in kwargs: + kwargs['tol'] = kwargs.pop('xtol') + r, sol = methodc(f, x0, args=args, fprime=fprime, fprime2=fprime2, **kwargs) + else: + raise ValueError('Unknown solver %s' % method) + + if is_memoized: + # Replace the function_calls count with the memoized count. + # Avoids double and triple-counting. + n_calls = f.n_calls + sol.function_calls = n_calls + + return sol + + +def _root_scalar_brentq_doc(): + r""" + Options + ------- + args : tuple, optional + Extra arguments passed to the objective function. + xtol : float, optional + Tolerance (absolute) for termination. + rtol : float, optional + Tolerance (relative) for termination. + maxiter : int, optional + Maximum number of iterations. + options: dict, optional + Specifies any method-specific options not covered above + + """ + pass + + +def _root_scalar_brenth_doc(): + r""" + Options + ------- + args : tuple, optional + Extra arguments passed to the objective function. + xtol : float, optional + Tolerance (absolute) for termination. + rtol : float, optional + Tolerance (relative) for termination. + maxiter : int, optional + Maximum number of iterations. + options: dict, optional + Specifies any method-specific options not covered above + + """ + pass + +def _root_scalar_toms748_doc(): + r""" + Options + ------- + args : tuple, optional + Extra arguments passed to the objective function. + xtol : float, optional + Tolerance (absolute) for termination. + rtol : float, optional + Tolerance (relative) for termination. + maxiter : int, optional + Maximum number of iterations. + options: dict, optional + Specifies any method-specific options not covered above + + """ + pass + + +def _root_scalar_secant_doc(): + r""" + Options + ------- + args : tuple, optional + Extra arguments passed to the objective function. + xtol : float, optional + Tolerance (absolute) for termination. + rtol : float, optional + Tolerance (relative) for termination. + maxiter : int, optional + Maximum number of iterations. + x0 : float, required + Initial guess. + x1 : float, required + A second guess. + options: dict, optional + Specifies any method-specific options not covered above + + """ + pass + + +def _root_scalar_newton_doc(): + r""" + Options + ------- + args : tuple, optional + Extra arguments passed to the objective function and its derivative. + xtol : float, optional + Tolerance (absolute) for termination. + rtol : float, optional + Tolerance (relative) for termination. + maxiter : int, optional + Maximum number of iterations. + x0 : float, required + Initial guess. + fprime : bool or callable, optional + If `fprime` is a boolean and is True, `f` is assumed to return the + value of derivative along with the objective function. + `fprime` can also be a callable returning the derivative of `f`. In + this case, it must accept the same arguments as `f`. + options: dict, optional + Specifies any method-specific options not covered above + + """ + pass + + +def _root_scalar_halley_doc(): + r""" + Options + ------- + args : tuple, optional + Extra arguments passed to the objective function and its derivatives. + xtol : float, optional + Tolerance (absolute) for termination. + rtol : float, optional + Tolerance (relative) for termination. + maxiter : int, optional + Maximum number of iterations. + x0 : float, required + Initial guess. + fprime : bool or callable, required + If `fprime` is a boolean and is True, `f` is assumed to return the + value of derivative along with the objective function. + `fprime` can also be a callable returning the derivative of `f`. In + this case, it must accept the same arguments as `f`. + fprime2 : bool or callable, required + If `fprime2` is a boolean and is True, `f` is assumed to return the + value of 1st and 2nd derivatives along with the objective function. + `fprime2` can also be a callable returning the 2nd derivative of `f`. + In this case, it must accept the same arguments as `f`. + options: dict, optional + Specifies any method-specific options not covered above + + """ + pass + + +def _root_scalar_ridder_doc(): + r""" + Options + ------- + args : tuple, optional + Extra arguments passed to the objective function. + xtol : float, optional + Tolerance (absolute) for termination. + rtol : float, optional + Tolerance (relative) for termination. + maxiter : int, optional + Maximum number of iterations. + options: dict, optional + Specifies any method-specific options not covered above + + """ + pass + + +def _root_scalar_bisect_doc(): + r""" + Options + ------- + args : tuple, optional + Extra arguments passed to the objective function. + xtol : float, optional + Tolerance (absolute) for termination. + rtol : float, optional + Tolerance (relative) for termination. + maxiter : int, optional + Maximum number of iterations. + options: dict, optional + Specifies any method-specific options not covered above + + """ + pass diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_root_scalar.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_root_scalar.pyc new file mode 100644 index 0000000..eecd535 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_root_scalar.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_shgo.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_shgo.py new file mode 100644 index 0000000..35b7ee5 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_shgo.py @@ -0,0 +1,1655 @@ +""" +shgo: The simplicial homology global optimisation algorithm +""" + +from __future__ import division, print_function, absolute_import + +import numpy as np +import time +import logging +import warnings +from scipy import spatial +from scipy.optimize import OptimizeResult, minimize +from scipy.optimize._shgo_lib import sobol_seq +from scipy.optimize._shgo_lib.triangulation import Complex + + +__all__ = ['shgo'] + + +def shgo(func, bounds, args=(), constraints=None, n=100, iters=1, callback=None, + minimizer_kwargs=None, options=None, sampling_method='simplicial'): + """ + Finds the global minimum of a function using SHG optimization. + + SHGO stands for "simplicial homology global optimization". + + Parameters + ---------- + func : callable + The objective function to be minimized. Must be in the form + ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array + and ``args`` is a tuple of any additional fixed parameters needed to + completely specify the function. + bounds : sequence + Bounds for variables. ``(min, max)`` pairs for each element in ``x``, + defining the lower and upper bounds for the optimizing argument of + `func`. It is required to have ``len(bounds) == len(x)``. + ``len(bounds)`` is used to determine the number of parameters in ``x``. + Use ``None`` for one of min or max when there is no bound in that + direction. By default bounds are ``(None, None)``. + args : tuple, optional + Any additional fixed parameters needed to completely specify the + objective function. + constraints : dict or sequence of dict, optional + Constraints definition. + Function(s) ``R**n`` in the form:: + + g(x) <= 0 applied as g : R^n -> R^m + h(x) == 0 applied as h : R^n -> R^p + + Each constraint is defined in a dictionary with fields: + + type : str + Constraint type: 'eq' for equality, 'ineq' for inequality. + fun : callable + The function defining the constraint. + jac : callable, optional + The Jacobian of `fun` (only for SLSQP). + args : sequence, optional + Extra arguments to be passed to the function and Jacobian. + + Equality constraint means that the constraint function result is to + be zero whereas inequality means that it is to be non-negative. + Note that COBYLA only supports inequality constraints. + + .. note:: + + Only the COBYLA and SLSQP local minimize methods currently + support constraint arguments. If the ``constraints`` sequence + used in the local optimization problem is not defined in + ``minimizer_kwargs`` and a constrained method is used then the + global ``constraints`` will be used. + (Defining a ``constraints`` sequence in ``minimizer_kwargs`` + means that ``constraints`` will not be added so if equality + constraints and so forth need to be added then the inequality + functions in ``constraints`` need to be added to + ``minimizer_kwargs`` too). + + n : int, optional + Number of sampling points used in the construction of the simplicial + complex. Note that this argument is only used for ``sobol`` and other + arbitrary `sampling_methods`. + iters : int, optional + Number of iterations used in the construction of the simplicial complex. + callback : callable, optional + Called after each iteration, as ``callback(xk)``, where ``xk`` is the + current parameter vector. + minimizer_kwargs : dict, optional + Extra keyword arguments to be passed to the minimizer + ``scipy.optimize.minimize`` Some important options could be: + + * method : str + The minimization method (e.g. ``SLSQP``). + * args : tuple + Extra arguments passed to the objective function (``func``) and + its derivatives (Jacobian, Hessian). + * options : dict, optional + Note that by default the tolerance is specified as + ``{ftol: 1e-12}`` + + options : dict, optional + A dictionary of solver options. Many of the options specified for the + global routine are also passed to the scipy.optimize.minimize routine. + The options that are also passed to the local routine are marked with + "(L)". + + Stopping criteria, the algorithm will terminate if any of the specified + criteria are met. However, the default algorithm does not require any to + be specified: + + * maxfev : int (L) + Maximum number of function evaluations in the feasible domain. + (Note only methods that support this option will terminate + the routine at precisely exact specified value. Otherwise the + criterion will only terminate during a global iteration) + * f_min + Specify the minimum objective function value, if it is known. + * f_tol : float + Precision goal for the value of f in the stopping + criterion. Note that the global routine will also + terminate if a sampling point in the global routine is + within this tolerance. + * maxiter : int + Maximum number of iterations to perform. + * maxev : int + Maximum number of sampling evaluations to perform (includes + searching in infeasible points). + * maxtime : float + Maximum processing runtime allowed + * minhgrd : int + Minimum homology group rank differential. The homology group of the + objective function is calculated (approximately) during every + iteration. The rank of this group has a one-to-one correspondence + with the number of locally convex subdomains in the objective + function (after adequate sampling points each of these subdomains + contain a unique global minimum). If the difference in the hgr is 0 + between iterations for ``maxhgrd`` specified iterations the + algorithm will terminate. + + Objective function knowledge: + + * symmetry : bool + Specify True if the objective function contains symmetric variables. + The search space (and therefore performance) is decreased by O(n!). + + * jac : bool or callable, optional + Jacobian (gradient) of objective function. Only for CG, BFGS, + Newton-CG, L-BFGS-B, TNC, SLSQP, dogleg, trust-ncg. If ``jac`` is a + boolean and is True, ``fun`` is assumed to return the gradient along + with the objective function. If False, the gradient will be + estimated numerically. ``jac`` can also be a callable returning the + gradient of the objective. In this case, it must accept the same + arguments as ``fun``. (Passed to `scipy.optimize.minmize` automatically) + + * hess, hessp : callable, optional + Hessian (matrix of second-order derivatives) of objective function + or Hessian of objective function times an arbitrary vector p. + Only for Newton-CG, dogleg, trust-ncg. Only one of ``hessp`` or + ``hess`` needs to be given. If ``hess`` is provided, then + ``hessp`` will be ignored. If neither ``hess`` nor ``hessp`` is + provided, then the Hessian product will be approximated using + finite differences on ``jac``. ``hessp`` must compute the Hessian + times an arbitrary vector. (Passed to `scipy.optimize.minmize` + automatically) + + Algorithm settings: + + * minimize_every_iter : bool + If True then promising global sampling points will be passed to a + local minimisation routine every iteration. If False then only the + final minimiser pool will be run. Defaults to False. + * local_iter : int + Only evaluate a few of the best minimiser pool candidates every + iteration. If False all potential points are passed to the local + minimisation routine. + * infty_constraints: bool + If True then any sampling points generated which are outside will + the feasible domain will be saved and given an objective function + value of ``inf``. If False then these points will be discarded. + Using this functionality could lead to higher performance with + respect to function evaluations before the global minimum is found, + specifying False will use less memory at the cost of a slight + decrease in performance. Defaults to True. + + Feedback: + + * disp : bool (L) + Set to True to print convergence messages. + + sampling_method : str or function, optional + Current built in sampling method options are ``sobol`` and + ``simplicial``. The default ``simplicial`` uses less memory and provides + the theoretical guarantee of convergence to the global minimum in finite + time. The ``sobol`` method is faster in terms of sampling point + generation at the cost of higher memory resources and the loss of + guaranteed convergence. It is more appropriate for most "easier" + problems where the convergence is relatively fast. + User defined sampling functions must accept two arguments of ``n`` + sampling points of dimension ``dim`` per call and output an array of + sampling points with shape `n x dim`. + + Returns + ------- + res : OptimizeResult + The optimization result represented as a `OptimizeResult` object. + Important attributes are: + ``x`` the solution array corresponding to the global minimum, + ``fun`` the function output at the global solution, + ``xl`` an ordered list of local minima solutions, + ``funl`` the function output at the corresponding local solutions, + ``success`` a Boolean flag indicating if the optimizer exited + successfully, + ``message`` which describes the cause of the termination, + ``nfev`` the total number of objective function evaluations including + the sampling calls, + ``nlfev`` the total number of objective function evaluations + culminating from all local search optimisations, + ``nit`` number of iterations performed by the global routine. + + Notes + ----- + Global optimization using simplicial homology global optimisation [1]_. + Appropriate for solving general purpose NLP and blackbox optimisation + problems to global optimality (low dimensional problems). + + In general, the optimization problems are of the form:: + + minimize f(x) subject to + + g_i(x) >= 0, i = 1,...,m + h_j(x) = 0, j = 1,...,p + + where x is a vector of one or more variables. ``f(x)`` is the objective + function ``R^n -> R``, ``g_i(x)`` are the inequality constraints, and + ``h_j(x)`` are the equality constraints. + + Optionally, the lower and upper bounds for each element in x can also be + specified using the `bounds` argument. + + While most of the theoretical advantages of SHGO are only proven for when + ``f(x)`` is a Lipschitz smooth function. The algorithm is also proven to + converge to the global optimum for the more general case where ``f(x)`` is + non-continuous, non-convex and non-smooth, if the default sampling method + is used [1]_. + + The local search method may be specified using the ``minimizer_kwargs`` + parameter which is passed on to ``scipy.optimize.minimize``. By default + the ``SLSQP`` method is used. In general it is recommended to use the + ``SLSQP`` or ``COBYLA`` local minimization if inequality constraints + are defined for the problem since the other methods do not use constraints. + + The ``sobol`` method points are generated using the Sobol (1967) [2]_ + sequence. The primitive polynomials and various sets of initial direction + numbers for generating Sobol sequences is provided by [3]_ by Frances Kuo + and Stephen Joe. The original program sobol.cc (MIT) is available and + described at http://web.maths.unsw.edu.au/~fkuo/sobol/ translated to + Python 3 by Carl Sandrock 2016-03-31. + + References + ---------- + .. [1] Endres, SC, Sandrock, C, Focke, WW (2018) "A simplicial homology + algorithm for lipschitz optimisation", Journal of Global Optimization. + .. [2] Sobol, IM (1967) "The distribution of points in a cube and the + approximate evaluation of integrals", USSR Comput. Math. Math. Phys. + 7, 86-112. + .. [3] Joe, SW and Kuo, FY (2008) "Constructing Sobol sequences with + better two-dimensional projections", SIAM J. Sci. Comput. 30, + 2635-2654. + .. [4] Hoch, W and Schittkowski, K (1981) "Test examples for nonlinear + programming codes", Lecture Notes in Economics and mathematical + Systems, 187. Springer-Verlag, New York. + http://www.ai7.uni-bayreuth.de/test_problem_coll.pdf + .. [5] Wales, DJ (2015) "Perspective: Insight into reaction coordinates and + dynamics from the potential energy landscape", + Journal of Chemical Physics, 142(13), 2015. + + Examples + -------- + First consider the problem of minimizing the Rosenbrock function, `rosen`: + + >>> from scipy.optimize import rosen, shgo + >>> bounds = [(0,2), (0, 2), (0, 2), (0, 2), (0, 2)] + >>> result = shgo(rosen, bounds) + >>> result.x, result.fun + (array([ 1., 1., 1., 1., 1.]), 2.9203923741900809e-18) + + Note that bounds determine the dimensionality of the objective + function and is therefore a required input, however you can specify + empty bounds using ``None`` or objects like ``np.inf`` which will be + converted to large float numbers. + + >>> bounds = [(None, None), ]*4 + >>> result = shgo(rosen, bounds) + >>> result.x + array([ 0.99999851, 0.99999704, 0.99999411, 0.9999882 ]) + + Next we consider the Eggholder function, a problem with several local + minima and one global minimum. We will demonstrate the use of arguments and + the capabilities of `shgo`. + (https://en.wikipedia.org/wiki/Test_functions_for_optimization) + + >>> def eggholder(x): + ... return (-(x[1] + 47.0) + ... * np.sin(np.sqrt(abs(x[0]/2.0 + (x[1] + 47.0)))) + ... - x[0] * np.sin(np.sqrt(abs(x[0] - (x[1] + 47.0)))) + ... ) + ... + >>> bounds = [(-512, 512), (-512, 512)] + + `shgo` has two built-in low discrepancy sampling sequences. First we will + input 30 initial sampling points of the Sobol sequence: + + >>> result = shgo(eggholder, bounds, n=30, sampling_method='sobol') + >>> result.x, result.fun + (array([ 512. , 404.23180542]), -959.64066272085051) + + `shgo` also has a return for any other local minima that was found, these + can be called using: + + >>> result.xl + array([[ 512. , 404.23180542], + [ 283.07593402, -487.12566542], + [-294.66820039, -462.01964031], + [-105.87688985, 423.15324143], + [-242.97923629, 274.38032063], + [-506.25823477, 6.3131022 ], + [-408.71981195, -156.10117154], + [ 150.23210485, 301.31378508], + [ 91.00922754, -391.28375925], + [ 202.8966344 , -269.38042147], + [ 361.66625957, -106.96490692], + [-219.40615102, -244.06022436], + [ 151.59603137, -100.61082677]]) + + >>> result.funl + array([-959.64066272, -718.16745962, -704.80659592, -565.99778097, + -559.78685655, -557.36868733, -507.87385942, -493.9605115 , + -426.48799655, -421.15571437, -419.31194957, -410.98477763, + -202.53912972]) + + These results are useful in applications where there are many global minima + and the values of other global minima are desired or where the local minima + can provide insight into the system (for example morphologies + in physical chemistry [5]_). + + If we want to find a larger number of local minima, we can increase the + number of sampling points or the number of iterations. We'll increase the + number of sampling points to 60 and the number of iterations from the + default of 1 to 5. This gives us 60 x 5 = 300 initial sampling points. + + >>> result_2 = shgo(eggholder, bounds, n=60, iters=5, sampling_method='sobol') + >>> len(result.xl), len(result_2.xl) + (13, 39) + + Note the difference between, e.g., ``n=180, iters=1`` and ``n=60, iters=3``. + In the first case the promising points contained in the minimiser pool + is processed only once. In the latter case it is processed every 60 sampling + points for a total of 3 times. + + To demonstrate solving problems with non-linear constraints consider the + following example from Hock and Schittkowski problem 73 (cattle-feed) [4]_:: + + minimize: f = 24.55 * x_1 + 26.75 * x_2 + 39 * x_3 + 40.50 * x_4 + + subject to: 2.3 * x_1 + 5.6 * x_2 + 11.1 * x_3 + 1.3 * x_4 - 5 >= 0, + + 12 * x_1 + 11.9 * x_2 + 41.8 * x_3 + 52.1 * x_4 - 21 + -1.645 * sqrt(0.28 * x_1**2 + 0.19 * x_2**2 + + 20.5 * x_3**2 + 0.62 * x_4**2) >= 0, + + x_1 + x_2 + x_3 + x_4 - 1 == 0, + + 1 >= x_i >= 0 for all i + + The approximate answer given in [4]_ is:: + + f([0.6355216, -0.12e-11, 0.3127019, 0.05177655]) = 29.894378 + + >>> def f(x): # (cattle-feed) + ... return 24.55*x[0] + 26.75*x[1] + 39*x[2] + 40.50*x[3] + ... + >>> def g1(x): + ... return 2.3*x[0] + 5.6*x[1] + 11.1*x[2] + 1.3*x[3] - 5 # >=0 + ... + >>> def g2(x): + ... return (12*x[0] + 11.9*x[1] +41.8*x[2] + 52.1*x[3] - 21 + ... - 1.645 * np.sqrt(0.28*x[0]**2 + 0.19*x[1]**2 + ... + 20.5*x[2]**2 + 0.62*x[3]**2) + ... ) # >=0 + ... + >>> def h1(x): + ... return x[0] + x[1] + x[2] + x[3] - 1 # == 0 + ... + >>> cons = ({'type': 'ineq', 'fun': g1}, + ... {'type': 'ineq', 'fun': g2}, + ... {'type': 'eq', 'fun': h1}) + >>> bounds = [(0, 1.0),]*4 + >>> res = shgo(f, bounds, iters=3, constraints=cons) + >>> res + fun: 29.894378159142136 + funl: array([29.89437816]) + message: 'Optimization terminated successfully.' + nfev: 119 + nit: 3 + nlfev: 40 + nlhev: 0 + nljev: 5 + success: True + x: array([6.35521569e-01, 1.13700270e-13, 3.12701881e-01, 5.17765506e-02]) + xl: array([[6.35521569e-01, 1.13700270e-13, 3.12701881e-01, 5.17765506e-02]]) + + >>> g1(res.x), g2(res.x), h1(res.x) + (-5.0626169922907138e-14, -2.9594104944408173e-12, 0.0) + + """ + # Initiate SHGO class + shc = SHGO(func, bounds, args=args, constraints=constraints, n=n, + iters=iters, callback=callback, + minimizer_kwargs=minimizer_kwargs, + options=options, sampling_method=sampling_method) + + # Run the algorithm, process results and test success + shc.construct_complex() + + if not shc.break_routine: + if shc.disp: + print("Successfully completed construction of complex.") + + # Test post iterations success + if len(shc.LMC.xl_maps) == 0: + # If sampling failed to find pool, return lowest sampled point + # with a warning + shc.find_lowest_vertex() + shc.break_routine = True + shc.fail_routine(mes="Failed to find a feasible minimiser point. " + "Lowest sampling point = {}".format(shc.f_lowest)) + shc.res.fun = shc.f_lowest + shc.res.x = shc.x_lowest + shc.res.nfev = shc.fn + + # Confirm the routine ran successfully + if not shc.break_routine: + shc.res.message = 'Optimization terminated successfully.' + shc.res.success = True + + # Return the final results + return shc.res + + +class SHGO(object): + def __init__(self, func, bounds, args=(), constraints=None, n=None, + iters=None, callback=None, minimizer_kwargs=None, + options=None, sampling_method='sobol'): + + # Input checks + methods = ['sobol', 'simplicial'] + if sampling_method not in methods: + raise ValueError(("Unknown sampling_method specified." + " Valid methods: {}").format(', '.join(methods))) + + # Initiate class + self.func = func + self.bounds = bounds + self.args = args + self.callback = callback + + # Bounds + abound = np.array(bounds, float) + self.dim = np.shape(abound)[0] # Dimensionality of problem + + # Set none finite values to large floats + infind = ~np.isfinite(abound) + abound[infind[:, 0], 0] = -1e50 + abound[infind[:, 1], 1] = 1e50 + + # Check if bounds are correctly specified + bnderr = abound[:, 0] > abound[:, 1] + if bnderr.any(): + raise ValueError('Error: lb > ub in bounds {}.' + .format(', '.join(str(b) for b in bnderr))) + + self.bounds = abound + + # Constraints + # Process constraint dict sequence: + if constraints is not None: + self.min_cons = constraints + self.g_cons = [] + self.g_args = [] + if (type(constraints) is not tuple) and (type(constraints) + is not list): + constraints = (constraints,) + + for cons in constraints: + if cons['type'] is 'ineq': + self.g_cons.append(cons['fun']) + try: + self.g_args.append(cons['args']) + except KeyError: + self.g_args.append(()) + self.g_cons = tuple(self.g_cons) + self.g_args = tuple(self.g_args) + else: + self.g_cons = None + self.g_args = None + + # Define local minimization keyword arguments + # Start with defaults + self.minimizer_kwargs = {'args': self.args, + 'method': 'SLSQP', + 'bounds': self.bounds, + 'options': {}, + 'callback': self.callback + } + if minimizer_kwargs is not None: + # Overwrite with supplied values + self.minimizer_kwargs.update(minimizer_kwargs) + + else: + self.minimizer_kwargs['options'] = {'ftol': 1e-12} + + if (self.minimizer_kwargs['method'] in ('SLSQP', 'COBYLA') and + (minimizer_kwargs is not None and + 'constraints' not in minimizer_kwargs and + constraints is not None) or + (self.g_cons is not None)): + self.minimizer_kwargs['constraints'] = self.min_cons + + # Process options dict + if options is not None: + self.init_options(options) + else: # Default settings: + self.f_min_true = None + self.minimize_every_iter = False + + # Algorithm limits + self.maxiter = None + self.maxfev = None + self.maxev = None + self.maxtime = None + self.f_min_true = None + self.minhgrd = None + + # Objective function knowledge + self.symmetry = False + + # Algorithm functionality + self.local_iter = False + self.infty_cons_sampl = True + + # Feedback + self.disp = False + + # Remove unknown arguments in self.minimizer_kwargs + # Start with arguments all the solvers have in common + self.min_solver_args = ['fun', 'x0', 'args', + 'callback', 'options', 'method'] + # then add the ones unique to specific solvers + solver_args = { + '_custom': ['jac', 'hess', 'hessp', 'bounds', 'constraints'], + 'nelder-mead': [], + 'powell': [], + 'cg': ['jac'], + 'bfgs': ['jac'], + 'newton-cg': ['jac', 'hess', 'hessp'], + 'l-bfgs-b': ['jac', 'bounds'], + 'tnc': ['jac', 'bounds'], + 'cobyla': ['constraints'], + 'slsqp': ['jac', 'bounds', 'constraints'], + 'dogleg': ['jac', 'hess'], + 'trust-ncg': ['jac', 'hess', 'hessp'], + 'trust-krylov': ['jac', 'hess', 'hessp'], + 'trust-exact': ['jac', 'hess'], + } + method = self.minimizer_kwargs['method'] + self.min_solver_args += solver_args[method.lower()] + + # Only retain the known arguments + def _restrict_to_keys(dictionary, goodkeys): + """Remove keys from dictionary if not in goodkeys - inplace""" + existingkeys = set(dictionary) + for key in existingkeys - set(goodkeys): + dictionary.pop(key, None) + + _restrict_to_keys(self.minimizer_kwargs, self.min_solver_args) + _restrict_to_keys(self.minimizer_kwargs['options'], + self.min_solver_args + ['ftol']) + + # Algorithm controls + # Global controls + self.stop_global = False # Used in the stopping_criteria method + self.break_routine = False # Break the algorithm globally + self.iters = iters # Iterations to be ran + self.iters_done = 0 # Iterations to be ran + self.n = n # Sampling points per iteration + self.nc = n # Sampling points to sample in current iteration + self.n_prc = 0 # Processed points (used to track Delaunay iters) + self.n_sampled = 0 # To track no. of sampling points already generated + self.fn = 0 # Number of feasible sampling points evaluations performed + self.hgr = 0 # Homology group rank + + # Default settings if no sampling criteria. + if self.iters is None: + self.iters = 1 + if self.n is None: + self.n = 100 + self.nc = self.n + + if not ((self.maxiter is None) and (self.maxfev is None) and ( + self.maxev is None) + and (self.minhgrd is None) and (self.f_min_true is None)): + self.iters = None + + # Set complex construction mode based on a provided stopping criteria: + # Choose complex constructor + if sampling_method == 'simplicial': + self.iterate_complex = self.iterate_hypercube + self.minimizers = self.simplex_minimizers + self.sampling_method = sampling_method + + elif (sampling_method == 'sobol') or (type(sampling_method) is not str): + self.iterate_complex = self.iterate_delaunay + self.minimizers = self.delaunay_complex_minimisers + # Sampling method used + if sampling_method == 'sobol': + self.sampling_method = sampling_method + self.sampling = self.sampling_sobol + self.Sobol = sobol_seq.Sobol() # Init Sobol class + if self.dim < 40: + self.sobol_points = self.sobol_points_40 + else: + self.sobol_points = self.sobol_points_10k + else: + # A user defined sampling method: + # self.sampling_points = sampling_method + self.sampling = sampling_method + + # Local controls + self.stop_l_iter = False # Local minimisation iterations + self.stop_complex_iter = False # Sampling iterations + + # Initiate storage objects used in algorithm classes + self.minimizer_pool = [] + + # Cache of local minimizers mapped + self.LMC = LMapCache() + + # Initialize return object + self.res = OptimizeResult() # scipy.optimize.OptimizeResult object + self.res.nfev = 0 # Includes each sampling point as func evaluation + self.res.nlfev = 0 # Local function evals for all minimisers + self.res.nljev = 0 # Local Jacobian evals for all minimisers + self.res.nlhev = 0 # Local Hessian evals for all minimisers + + # Initiation aids + def init_options(self, options): + """ + Initiates the options. + + Can also be useful to change parameters after class initiation. + + Parameters + ---------- + options : dict + + Returns + ------- + None + + """ + self.minimizer_kwargs['options'].update(options) + # Default settings: + self.minimize_every_iter = options.get('minimize_every_iter', False) + + # Algorithm limits + # Maximum number of iterations to perform. + self.maxiter = options.get('maxiter', None) + # Maximum number of function evaluations in the feasible domain + self.maxfev = options.get('maxfev', None) + # Maximum number of sampling evaluations (includes searching in + # infeasible points + self.maxev = options.get('maxev', None) + # Maximum processing runtime allowed + self.init = time.time() + self.maxtime = options.get('maxtime', None) + if 'f_min' in options: + # Specify the minimum objective function value, if it is known. + self.f_min_true = options['f_min'] + self.f_tol = options.get('f_tol', 1e-4) + else: + self.f_min_true = None + + self.minhgrd = options.get('minhgrd', None) + + # Objective function knowledge + self.symmetry = 'symmetry' in options + + # Algorithm functionality + # Only evaluate a few of the best candiates + self.local_iter = options.get('local_iter', False) + + self.infty_cons_sampl = options.get('infty_constraints', True) + + # Feedback + self.disp = options.get('disp', False) + + # Iteration properties + # Main construction loop: + def construct_complex(self): + """ + Construct for `iters` iterations. + + If uniform sampling is used, every iteration adds 'n' sampling points. + + Iterations if a stopping criteria (ex. sampling points or + processing time) has been met. + + """ + if self.disp: + print('Splitting first generation') + + while not self.stop_global: + if self.break_routine: + break + # Iterate complex, process minimisers + self.iterate() + self.stopping_criteria() + + # Build minimiser pool + # Final iteration only needed if pools weren't minimised every iteration + if not self.minimize_every_iter: + if not self.break_routine: + self.find_minima() + + self.res.nit = self.iters_done + 1 + + def find_minima(self): + """ + Construct the minimiser pool, map the minimisers to local minima + and sort the results into a global return object. + """ + self.minimizers() + if len(self.X_min) is not 0: + # Minimise the pool of minimisers with local minimisation methods + # Note that if Options['local_iter'] is an `int` instead of default + # value False then only that number of candidates will be minimised + self.minimise_pool(self.local_iter) + # Sort results and build the global return object + self.sort_result() + + # Lowest values used to report in case of failures + self.f_lowest = self.res.fun + self.x_lowest = self.res.x + else: + self.find_lowest_vertex() + + def find_lowest_vertex(self): + # Find the lowest objective function value on one of + # the vertices of the simplicial complex + if self.sampling_method == 'simplicial': + self.f_lowest = np.inf + for x in self.HC.V.cache: + if self.HC.V[x].f < self.f_lowest: + self.f_lowest = self.HC.V[x].f + self.x_lowest = self.HC.V[x].x_a + if self.f_lowest == np.inf: # no feasible point + self.f_lowest = None + self.x_lowest = None + else: + if self.fn == 0: + self.f_lowest = None + self.x_lowest = None + else: + self.f_I = np.argsort(self.F, axis=-1) + self.f_lowest = self.F[self.f_I[0]] + self.x_lowest = self.C[self.f_I[0]] + + # Stopping criteria functions: + def finite_iterations(self): + if self.iters is not None: + if self.iters_done >= (self.iters - 1): + self.stop_global = True + + if self.maxiter is not None: # Stop for infeasible sampling + if self.iters_done >= (self.maxiter - 1): + self.stop_global = True + return self.stop_global + + def finite_fev(self): + # Finite function evals in the feasible domain + if self.fn >= self.maxfev: + self.stop_global = True + return self.stop_global + + def finite_ev(self): + # Finite evaluations including infeasible sampling points + if self.n_sampled >= self.maxev: + self.stop_global = True + + def finite_time(self): + if (time.time() - self.init) >= self.maxtime: + self.stop_global = True + + def finite_precision(self): + """ + Stop the algorithm if the final function value is known + + Specify in options (with ``self.f_min_true = options['f_min']``) + and the tolerance with ``f_tol = options['f_tol']`` + """ + # If no minimiser has been found use the lowest sampling value + if len(self.LMC.xl_maps) == 0: + self.find_lowest_vertex() + + # Function to stop algorithm at specified percentage error: + if self.f_lowest == 0.0: + if self.f_min_true == 0.0: + if self.f_lowest <= self.f_tol: + self.stop_global = True + else: + pe = (self.f_lowest - self.f_min_true) / abs(self.f_min_true) + if self.f_lowest <= self.f_min_true: + self.stop_global = True + # 2if (pe - self.f_tol) <= abs(1.0 / abs(self.f_min_true)): + if abs(pe) >= 2 * self.f_tol: + warnings.warn("A much lower value than expected f* =" + + " {} than".format(self.f_min_true) + + " the was found f_lowest =" + + "{} ".format(self.f_lowest)) + if pe <= self.f_tol: + self.stop_global = True + + return self.stop_global + + def finite_homology_growth(self): + if self.LMC.size == 0: + return # pass on no reason to stop yet. + self.hgrd = self.LMC.size - self.hgr + + self.hgr = self.LMC.size + if self.hgrd <= self.minhgrd: + self.stop_global = True + return self.stop_global + + def stopping_criteria(self): + """ + Various stopping criteria ran every iteration + + Returns + ------- + stop : bool + """ + if self.maxiter is not None: + self.finite_iterations() + if self.iters is not None: + self.finite_iterations() + if self.maxfev is not None: + self.finite_fev() + if self.maxev is not None: + self.finite_ev() + if self.maxtime is not None: + self.finite_time() + if self.f_min_true is not None: + self.finite_precision() + if self.minhgrd is not None: + self.finite_homology_growth() + + def iterate(self): + self.iterate_complex() + + # Build minimiser pool + if self.minimize_every_iter: + if not self.break_routine: + self.find_minima() # Process minimiser pool + + # Algorithm updates + self.iters_done += 1 + + def iterate_hypercube(self): + """ + Iterate a subdivision of the complex + + Note: called with ``self.iterate_complex()`` after class initiation + """ + # Iterate the complex + if self.n_sampled == 0: + # Initial triangulation of the hyper-rectangle + self.HC = Complex(self.dim, self.func, self.args, + self.symmetry, self.bounds, self.g_cons, + self.g_args) + else: + self.HC.split_generation() + + # feasible sampling points counted by the triangulation.py routines + self.fn = self.HC.V.nfev + self.n_sampled = self.HC.V.size # nevs counted in triangulation.py + return + + def iterate_delaunay(self): + """ + Build a complex of Delaunay triangulated points + + Note: called with ``self.iterate_complex()`` after class initiation + """ + self.nc += self.n + self.sampled_surface(infty_cons_sampl=self.infty_cons_sampl) + self.n_sampled = self.nc + return + + # Hypercube minimizers + def simplex_minimizers(self): + """ + Returns the indexes of all minimizers + """ + self.minimizer_pool = [] + # Note: Can implement parallelization here + for x in self.HC.V.cache: + if self.HC.V[x].minimiser(): + if self.disp: + logging.info('=' * 60) + logging.info( + 'v.x = {} is minimiser'.format(self.HC.V[x].x_a)) + logging.info('v.f = {} is minimiser'.format(self.HC.V[x].f)) + logging.info('=' * 30) + + if self.HC.V[x] not in self.minimizer_pool: + self.minimizer_pool.append(self.HC.V[x]) + + if self.disp: + logging.info('Neighbours:') + logging.info('=' * 30) + for vn in self.HC.V[x].nn: + logging.info('x = {} || f = {}'.format(vn.x, vn.f)) + + logging.info('=' * 60) + + self.minimizer_pool_F = [] + self.X_min = [] + # normalized tuple in the Vertex cache + self.X_min_cache = {} # Cache used in hypercube sampling + + for v in self.minimizer_pool: + self.X_min.append(v.x_a) + self.minimizer_pool_F.append(v.f) + self.X_min_cache[tuple(v.x_a)] = v.x + + self.minimizer_pool_F = np.array(self.minimizer_pool_F) + self.X_min = np.array(self.X_min) + + # TODO: Only do this if global mode + self.sort_min_pool() + + return self.X_min + + # Local minimisation + # Minimiser pool processing + def minimise_pool(self, force_iter=False): + """ + This processing method can optionally minimise only the best candidate + solutions in the minimiser pool + + Parameters + ---------- + force_iter : int + Number of starting minimisers to process (can be sepcified + globally or locally) + + """ + # Find first local minimum + # NOTE: Since we always minimize this value regardless it is a waste to + # build the topograph first before minimizing + lres_f_min = self.minimize(self.X_min[0], ind=self.minimizer_pool[0]) + + # Trim minimised point from current minimiser set + self.trim_min_pool(0) + + # Force processing to only + if force_iter: + self.local_iter = force_iter + + while not self.stop_l_iter: + # Global stopping criteria: + if self.f_min_true is not None: + if (lres_f_min.fun - self.f_min_true) / abs( + self.f_min_true) <= self.f_tol: + self.stop_l_iter = True + break + # Note first iteration is outside loop: + if self.local_iter is not None: + if self.disp: + logging.info( + 'SHGO.iters in function minimise_pool = {}'.format( + self.local_iter)) + self.local_iter -= 1 + if self.local_iter == 0: + self.stop_l_iter = True + break + + if np.shape(self.X_min)[0] == 0: + self.stop_l_iter = True + break + + # Construct topograph from current minimiser set + # (NOTE: This is a very small topograph using only the miniser pool + # , it might be worth using some graph theory tools instead. + self.g_topograph(lres_f_min.x, self.X_min) + + # Find local minimum at the miniser with the greatest euclidean + # distance from the current solution + ind_xmin_l = self.Z[:, -1] + lres_f_min = self.minimize(self.Ss[-1, :], self.minimizer_pool[-1]) + + # Trim minimised point from current minimiser set + self.trim_min_pool(ind_xmin_l) + + # Reset controls + self.stop_l_iter = False + return + + def sort_min_pool(self): + # Sort to find minimum func value in min_pool + self.ind_f_min = np.argsort(self.minimizer_pool_F) + self.minimizer_pool = np.array(self.minimizer_pool)[self.ind_f_min] + self.minimizer_pool_F = np.array(self.minimizer_pool_F)[ + self.ind_f_min] + return + + def trim_min_pool(self, trim_ind): + self.X_min = np.delete(self.X_min, trim_ind, axis=0) + self.minimizer_pool_F = np.delete(self.minimizer_pool_F, trim_ind) + self.minimizer_pool = np.delete(self.minimizer_pool, trim_ind) + return + + def g_topograph(self, x_min, X_min): + """ + Returns the topographical vector stemming from the specified value + ``x_min`` for the current feasible set ``X_min`` with True boolean + values indicating positive entries and False values indicating + negative entries. + + """ + x_min = np.array([x_min]) + self.Y = spatial.distance.cdist(x_min, X_min, 'euclidean') + # Find sorted indexes of spatial distances: + self.Z = np.argsort(self.Y, axis=-1) + + self.Ss = X_min[self.Z][0] + self.minimizer_pool = self.minimizer_pool[self.Z] + self.minimizer_pool = self.minimizer_pool[0] + return self.Ss + + # Local bound functions + def construct_lcb_simplicial(self, v_min): + """ + Construct locally (approximately) convex bounds + + Parameters + ---------- + v_min : Vertex object + The minimiser vertex + + Returns + ------- + cbounds : list of lists + List of size dim with length-2 list of bounds for each dimension + + """ + cbounds = [[x_b_i[0], x_b_i[1]] for x_b_i in self.bounds] + # Loop over all bounds + for vn in v_min.nn: + for i, x_i in enumerate(vn.x_a): + # Lower bound + if (x_i < v_min.x_a[i]) and (x_i > cbounds[i][0]): + cbounds[i][0] = x_i + + # Upper bound + if (x_i > v_min.x_a[i]) and (x_i < cbounds[i][1]): + cbounds[i][1] = x_i + + if self.disp: + logging.info('cbounds found for v_min.x_a = {}'.format(v_min.x_a)) + logging.info('cbounds = {}'.format(cbounds)) + + return cbounds + + def construct_lcb_delaunay(self, v_min, ind=None): + """ + Construct locally (approximately) convex bounds + + Parameters + ---------- + v_min : Vertex object + The minimiser vertex + + Returns + ------- + cbounds : list of lists + List of size dim with length-2 list of bounds for each dimension + """ + cbounds = [] + for x_b_i in self.bounds: + cbounds.append([x_b_i[0], x_b_i[1]]) + + return cbounds + + # Minimize a starting point locally + def minimize(self, x_min, ind=None): + """ + This function is used to calculate the local minima using the specified + sampling point as a starting value. + + Parameters + ---------- + x_min : vector of floats + Current starting point to minimise. + + Returns + ------- + lres : OptimizeResult + The local optimization result represented as a `OptimizeResult` + object. + """ + # Use minima maps if vertex was already run + if self.disp: + logging.info('Vertex minimiser maps = {}'.format(self.LMC.v_maps)) + + if self.LMC[x_min].lres is not None: + return self.LMC[x_min].lres + + # TODO: Check discarded bound rules + + if self.callback is not None: + print('Callback for ' + 'minimizer starting at {}:'.format(x_min)) + + if self.disp: + print('Starting ' + 'minimization at {}...'.format(x_min)) + + if self.sampling_method == 'simplicial': + x_min_t = tuple(x_min) + # Find the normalized tuple in the Vertex cache: + x_min_t_norm = self.X_min_cache[tuple(x_min_t)] + + x_min_t_norm = tuple(x_min_t_norm) + + g_bounds = self.construct_lcb_simplicial(self.HC.V[x_min_t_norm]) + if 'bounds' in self.min_solver_args: + self.minimizer_kwargs['bounds'] = g_bounds + + if self.disp: + print('bounds in kwarg:') + print(self.minimizer_kwargs['bounds']) + else: + g_bounds = self.construct_lcb_delaunay(x_min, ind=ind) + if 'bounds' in self.min_solver_args: + self.minimizer_kwargs['bounds'] = g_bounds + + # Local minimization using scipy.optimize.minimize: + lres = minimize(self.func, x_min, **self.minimizer_kwargs) + + if self.disp: + print('lres = {}'.format(lres)) + + # Local function evals for all minimisers + self.res.nlfev += lres.nfev + if 'njev' in lres: + self.res.nljev += lres.njev + if 'nhev' in lres: + self.res.nlhev += lres.nhev + + try: # Needed because of the brain dead 1x1 numpy arrays + lres.fun = lres.fun[0] + except (IndexError, TypeError): + lres.fun + + # Append minima maps + self.LMC[x_min] + self.LMC.add_res(x_min, lres, bounds=g_bounds) + + return lres + + # Post local minimisation processing + def sort_result(self): + """ + Sort results and build the global return object + """ + # Sort results in local minima cache + results = self.LMC.sort_cache_result() + self.res.xl = results['xl'] + self.res.funl = results['funl'] + self.res.x = results['x'] + self.res.fun = results['fun'] + + # Add local func evals to sampling func evals + # Count the number of feasible vertices and add to local func evals: + self.res.nfev = self.fn + self.res.nlfev + return self.res + + # Algorithm controls + def fail_routine(self, mes=("Failed to converge")): + self.break_routine = True + self.res.success = False + self.X_min = [None] + self.res.message = mes + + def sampled_surface(self, infty_cons_sampl=False): + """ + Sample the function surface. + + There are 2 modes, if ``infty_cons_sampl`` is True then the sampled + points that are generated outside the feasible domain will be + assigned an ``inf`` value in accordance with SHGO rules. + This guarantees convergence and usually requires less objective function + evaluations at the computational costs of more Delaunay triangulation + points. + + If ``infty_cons_sampl`` is False then the infeasible points are discarded + and only a subspace of the sampled points are used. This comes at the + cost of the loss of guaranteed convergence and usually requires more + objective function evaluations. + """ + # Generate sampling points + if self.disp: + print('Generating sampling points') + self.sampling(self.nc, self.dim) + + if not infty_cons_sampl: + # Find subspace of feasible points + if self.g_cons is not None: + self.sampling_subspace() + + # Sort remaining samples + self.sorted_samples() + + # Find objective function references + self.fun_ref() + + self.n_sampled = self.nc + + def delaunay_complex_minimisers(self): + # Construct complex minimisers on the current sampling set. + # if self.fn >= (self.dim + 1): + if self.fn >= (self.dim + 2): + # TODO: Check on strange Qhull error where the number of vertices + # required for an initial simplex is higher than n + 1? + if self.dim < 2: # Scalar objective functions + if self.disp: + print('Constructing 1D minimizer pool') + + self.ax_subspace() + self.surface_topo_ref() + self.minimizers_1D() + + else: # Multivariate functions. + if self.disp: + print('Constructing Gabrial graph and minimizer pool') + + if self.iters == 1: + self.delaunay_triangulation(grow=False) + else: + self.delaunay_triangulation(grow=True, n_prc=self.n_prc) + self.n_prc = self.C.shape[0] + + if self.disp: + print('Triangulation completed, building minimizer pool') + + self.delaunay_minimizers() + + if self.disp: + logging.info( + "Minimiser pool = SHGO.X_min = {}".format(self.X_min)) + else: + if self.disp: + print( + 'Not enough sampling points found in the feasible domain.') + self.minimizer_pool = [None] + try: + self.X_min + except AttributeError: + self.X_min = [] + + def sobol_points_40(self, n, d, skip=0): + """ + Wrapper for ``sobol_seq.i4_sobol_generate`` + + Generate N sampling points in D dimensions + """ + points = self.Sobol.i4_sobol_generate(d, n, skip=0) + + return points + + def sobol_points_10k(self, N, D): + """ + sobol.cc by Frances Kuo and Stephen Joe translated to Python 3 by + Carl Sandrock 2016-03-31 + + The original program is available and described at + http://web.maths.unsw.edu.au/~fkuo/sobol/ + """ + import gzip + import os + path = os.path.join(os.path.dirname(__file__), '_shgo_lib', + 'sobol_vec.gz') + f = gzip.open(path, 'rb') + unsigned = "uint64" + # swallow header + next(f) + + L = int(np.log(N) // np.log(2.0)) + 1 + + C = np.ones(N, dtype=unsigned) + for i in range(1, N): + value = i + while value & 1: + value >>= 1 + C[i] += 1 + + points = np.zeros((N, D), dtype='double') + + # XXX: This appears not to set the first element of V + V = np.empty(L + 1, dtype=unsigned) + for i in range(1, L + 1): + V[i] = 1 << (32 - i) + + X = np.empty(N, dtype=unsigned) + X[0] = 0 + for i in range(1, N): + X[i] = X[i - 1] ^ V[C[i - 1]] + points[i, 0] = X[i] / 2 ** 32 + + for j in range(1, D): + F_int = [int(item) for item in next(f).strip().split()] + (d, s, a), m = F_int[:3], [0] + F_int[3:] + + if L <= s: + for i in range(1, L + 1): + V[i] = m[i] << (32 - i) + else: + for i in range(1, s + 1): + V[i] = m[i] << (32 - i) + for i in range(s + 1, L + 1): + V[i] = V[i - s] ^ ( + V[i - s] >> np.array(s, dtype=unsigned)) + for k in range(1, s): + V[i] ^= np.array( + (((a >> (s - 1 - k)) & 1) * V[i - k]), + dtype=unsigned) + + X[0] = 0 + for i in range(1, N): + X[i] = X[i - 1] ^ V[C[i - 1]] + points[i, j] = X[i] / 2 ** 32 # *** the actual points + + f.close() + return points + + def sampling_sobol(self, n, dim): + """ + Generates uniform sampling points in a hypercube and scales the points + to the bound limits. + """ + # Generate sampling points. + # Generate uniform sample points in [0, 1]^m \subset R^m + if self.n_sampled == 0: + self.C = self.sobol_points(n, dim) + else: + self.C = self.sobol_points(n, dim, skip=self.n_sampled) + # Distribute over bounds + for i in range(len(self.bounds)): + self.C[:, i] = (self.C[:, i] * + (self.bounds[i][1] - self.bounds[i][0]) + + self.bounds[i][0]) + return self.C + + def sampling_subspace(self): + """Find subspace of feasible points from g_func definition""" + # Subspace of feasible points. + for ind, g in enumerate(self.g_cons): + self.C = self.C[g(self.C.T, *self.g_args[ind]) >= 0.0] + if self.C.size == 0: + self.res.message = ('No sampling point found within the ' + + 'feasible set. Increasing sampling ' + + 'size.') + # sampling correctly for both 1D and >1D cases + if self.disp: + print(self.res.message) + + def sorted_samples(self): # Validated + """Find indexes of the sorted sampling points""" + self.Ind_sorted = np.argsort(self.C, axis=0) + self.Xs = self.C[self.Ind_sorted] + return self.Ind_sorted, self.Xs + + def ax_subspace(self): # Validated + """ + Finds the subspace vectors along each component axis. + """ + self.Ci = [] + self.Xs_i = [] + self.Ii = [] + for i in range(self.dim): + self.Ci.append(self.C[:, i]) + self.Ii.append(self.Ind_sorted[:, i]) + self.Xs_i.append(self.Xs[:, i]) + + def fun_ref(self): + """ + Find the objective function output reference table + """ + # TODO: Replace with cached wrapper + + # Note: This process can be pooled easily + # Obj. function returns to be used as reference table.: + f_cache_bool = False + if self.fn > 0: # Store old function evaluations + Ftemp = self.F + fn_old = self.fn + f_cache_bool = True + + self.F = np.zeros(np.shape(self.C)[0]) + # NOTE: It might be easier to replace this with a cached + # objective function + for i in range(self.fn, np.shape(self.C)[0]): + eval_f = True + if self.g_cons is not None: + for g in self.g_cons: + if g(self.C[i, :], *self.args) < 0.0: + eval_f = False + break # Breaks the g loop + + if eval_f: + self.F[i] = self.func(self.C[i, :], *self.args) + self.fn += 1 + elif self.infty_cons_sampl: + self.F[i] = np.inf + self.fn += 1 + if f_cache_bool: + if fn_old > 0: # Restore saved function evaluations + self.F[0:fn_old] = Ftemp + + return self.F + + def surface_topo_ref(self): # Validated + """ + Find the BD and FD finite differences along each component vector. + """ + # Replace numpy inf, -inf and nan objects with floating point numbers + # nan --> float + self.F[np.isnan(self.F)] = np.inf + # inf, -inf --> floats + self.F = np.nan_to_num(self.F) + + self.Ft = self.F[self.Ind_sorted] + self.Ftp = np.diff(self.Ft, axis=0) # FD + self.Ftm = np.diff(self.Ft[::-1], axis=0)[::-1] # BD + + def sample_topo(self, ind): + # Find the position of the sample in the component axial directions + self.Xi_ind_pos = [] + self.Xi_ind_topo_i = [] + + for i in range(self.dim): + for x, I_ind in zip(self.Ii[i], range(len(self.Ii[i]))): + if x == ind: + self.Xi_ind_pos.append(I_ind) + + # Use the topo reference tables to find if point is a minimizer on + # the current axis + + # First check if index is on the boundary of the sampling points: + if self.Xi_ind_pos[i] == 0: + # if boundary is in basin + self.Xi_ind_topo_i.append(self.Ftp[:, i][0] > 0) + + elif self.Xi_ind_pos[i] == self.fn - 1: + # Largest value at sample size + self.Xi_ind_topo_i.append(self.Ftp[:, i][self.fn - 2] < 0) + + # Find axial reference for other points + else: + Xi_ind_top_p = self.Ftp[:, i][self.Xi_ind_pos[i]] > 0 + Xi_ind_top_m = self.Ftm[:, i][self.Xi_ind_pos[i] - 1] > 0 + self.Xi_ind_topo_i.append(Xi_ind_top_p and Xi_ind_top_m) + + if np.array(self.Xi_ind_topo_i).all(): + self.Xi_ind_topo = True + else: + self.Xi_ind_topo = False + self.Xi_ind_topo = np.array(self.Xi_ind_topo_i).all() + + return self.Xi_ind_topo + + def minimizers_1D(self): + """ + Returns the indexes of all minimizers + """ + self.minimizer_pool = [] + # Note: Can implement parallelization here + for ind in range(self.fn): + min_bool = self.sample_topo(ind) + if min_bool: + self.minimizer_pool.append(ind) + + self.minimizer_pool_F = self.F[self.minimizer_pool] + + # Sort to find minimum func value in min_pool + self.sort_min_pool() + if not len(self.minimizer_pool) == 0: + self.X_min = self.C[self.minimizer_pool] + # If function is called again and pool is found unbreak: + else: + self.X_min = [] + + return self.X_min + + def delaunay_triangulation(self, grow=False, n_prc=0): + if not grow: + self.Tri = spatial.Delaunay(self.C) + else: + if hasattr(self, 'Tri'): + self.Tri.add_points(self.C[n_prc:, :]) + else: + self.Tri = spatial.Delaunay(self.C, incremental=True) + + return self.Tri + + @staticmethod + def find_neighbors_delaunay(pindex, triang): + """ + Returns the indexes of points connected to ``pindex`` on the Gabriel + chain subgraph of the Delaunay triangulation. + """ + return triang.vertex_neighbor_vertices[1][ + triang.vertex_neighbor_vertices[0][pindex]: + triang.vertex_neighbor_vertices[0][pindex + 1]] + + def sample_delaunay_topo(self, ind): + self.Xi_ind_topo_i = [] + + # Find the position of the sample in the component Gabrial chain + G_ind = self.find_neighbors_delaunay(ind, self.Tri) + + # Find finite deference between each point + for g_i in G_ind: + rel_topo_bool = self.F[ind] < self.F[g_i] + self.Xi_ind_topo_i.append(rel_topo_bool) + + # Check if minimizer + self.Xi_ind_topo = np.array(self.Xi_ind_topo_i).all() + + return self.Xi_ind_topo + + def delaunay_minimizers(self): + """ + Returns the indexes of all minimizers + """ + self.minimizer_pool = [] + # Note: Can easily be parralized + if self.disp: + logging.info('self.fn = {}'.format(self.fn)) + logging.info('self.nc = {}'.format(self.nc)) + logging.info('np.shape(self.C)' + ' = {}'.format(np.shape(self.C))) + for ind in range(self.fn): + min_bool = self.sample_delaunay_topo(ind) + if min_bool: + self.minimizer_pool.append(ind) + + self.minimizer_pool_F = self.F[self.minimizer_pool] + + # Sort to find minimum func value in min_pool + self.sort_min_pool() + if self.disp: + logging.info('self.minimizer_pool = {}'.format(self.minimizer_pool)) + if not len(self.minimizer_pool) == 0: + self.X_min = self.C[self.minimizer_pool] + else: + self.X_min = [] # Empty pool breaks main routine + return self.X_min + + +class LMap: + def __init__(self, v): + self.v = v + self.x_l = None + self.lres = None + self.f_min = None + self.lbounds = [] + + +class LMapCache: + def __init__(self): + self.cache = {} + + # Lists for search queries + self.v_maps = [] + self.xl_maps = [] + self.f_maps = [] + self.lbound_maps = [] + self.size = 0 + + def __getitem__(self, v): + v = np.ndarray.tolist(v) + v = tuple(v) + try: + return self.cache[v] + except KeyError: + xval = LMap(v) + self.cache[v] = xval + + return self.cache[v] + + def add_res(self, v, lres, bounds=None): + v = np.ndarray.tolist(v) + v = tuple(v) + self.cache[v].x_l = lres.x + self.cache[v].lres = lres + self.cache[v].f_min = lres.fun + self.cache[v].lbounds = bounds + + # Update cache size + self.size += 1 + + # Cache lists for search queries + self.v_maps.append(v) + self.xl_maps.append(lres.x) + self.f_maps.append(lres.fun) + self.lbound_maps.append(bounds) + + def sort_cache_result(self): + """ + Sort results and build the global return object + """ + results = {} + # Sort results and save + self.xl_maps = np.array(self.xl_maps) + self.f_maps = np.array(self.f_maps) + + # Sorted indexes in Func_min + ind_sorted = np.argsort(self.f_maps) + + # Save ordered list of minima + results['xl'] = self.xl_maps[ind_sorted] # Ordered x vals + self.f_maps = np.array(self.f_maps) + results['funl'] = self.f_maps[ind_sorted] + results['funl'] = results['funl'].T + + # Find global of all minimisers + results['x'] = self.xl_maps[ind_sorted[0]] # Save global minima + results['fun'] = self.f_maps[ind_sorted[0]] # Save global fun value + + self.xl_maps = np.ndarray.tolist(self.xl_maps) + self.f_maps = np.ndarray.tolist(self.f_maps) + return results diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_shgo.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_shgo.pyc new file mode 100644 index 0000000..87e34f7 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_shgo.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_shgo_lib/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_shgo_lib/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_shgo_lib/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_shgo_lib/__init__.pyc new file mode 100644 index 0000000..9b0bf95 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_shgo_lib/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_shgo_lib/sobol_seq.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_shgo_lib/sobol_seq.py new file mode 100644 index 0000000..9db8211 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_shgo_lib/sobol_seq.py @@ -0,0 +1,372 @@ +""" + Licensing: + This code is distributed under the MIT license. + + + Authors: + Original FORTRAN77 version of i4_sobol by Bennett Fox. + MATLAB version by John Burkardt. + PYTHON version by Corrado Chisari + + Original Python version of is_prime by Corrado Chisari + + Original MATLAB versions of other functions by John Burkardt. + PYTHON versions by Corrado Chisari + + Original code is available from + http://people.sc.fsu.edu/~jburkardt/py_src/sobol/sobol.html + + Modifications: + Wrapped into Python class [30.10.2017] +""" +import numpy as np + +__all__ = ['Sobol'] + + +class Sobol: + def __init__(self): + # Init class variables + self.atmost = None + self.dim_max = None + self.dim_num_save = None + self.initialized = None + self.lastq = None + self.log_max = None + self.maxcol = None + self.poly = None + self.recipd = None + self.seed_save = None + self.v = None + + def i4_sobol_generate(self, dim_num, n, skip=1): + """ + i4_sobol_generate generates a Sobol dataset. + + Parameters: + Input, integer dim_num, the spatial dimension. + Input, integer N, the number of points to generate. + Input, integer SKIP, the number of initial points to skip. + + Output, real R(M,N), the points. + """ + r = np.full((n, dim_num), np.nan) + for j in range(n): + seed = j + skip + r[j, 0:dim_num], next_seed = self.i4_sobol(dim_num, seed) + + return r + + def i4_bit_hi1(self, n): + """ + i4_bit_hi1 returns the position of the high 1 bit base 2 in an integer. + + Example: + +------+-------------+----- + | N | Binary | BIT + +------|-------------+----- + | 0 | 0 | 0 + | 1 | 1 | 1 + | 2 | 10 | 2 + | 3 | 11 | 2 + | 4 | 100 | 3 + | 5 | 101 | 3 + | 6 | 110 | 3 + | 7 | 111 | 3 + | 8 | 1000 | 4 + | 9 | 1001 | 4 + | 10 | 1010 | 4 + | 11 | 1011 | 4 + | 12 | 1100 | 4 + | 13 | 1101 | 4 + | 14 | 1110 | 4 + | 15 | 1111 | 4 + | 16 | 10000 | 5 + | 17 | 10001 | 5 + | 1023 | 1111111111 | 10 + | 1024 | 10000000000 | 11 + | 1025 | 10000000001 | 11 + + Parameters: + Input, integer N, the integer to be measured. + N should be nonnegative. If N is nonpositive, + the value will always be 0. + + Output, integer BIT, the number of bits base 2. + """ + i = np.floor(n) + bit = 0 + while i > 0: + bit += 1 + i //= 2 + return bit + + def i4_bit_lo0(self, n): + """ + I4_BIT_LO0 returns the position of the low 0 bit base 2 in an integer. + + Example: + +------+------------+---- + | N | Binary | BIT + +------+------------+---- + | 0 | 0 | 1 + | 1 | 1 | 2 + | 2 | 10 | 1 + | 3 | 11 | 3 + | 4 | 100 | 1 + | 5 | 101 | 2 + | 6 | 110 | 1 + | 7 | 111 | 4 + | 8 | 1000 | 1 + | 9 | 1001 | 2 + | 10 | 1010 | 1 + | 11 | 1011 | 3 + | 12 | 1100 | 1 + | 13 | 1101 | 2 + | 14 | 1110 | 1 + | 15 | 1111 | 5 + | 16 | 10000 | 1 + | 17 | 10001 | 2 + | 1023 | 1111111111 | 1 + | 1024 | 0000000000 | 1 + | 1025 | 0000000001 | 1 + + Parameters: + Input, integer N, the integer to be measured. + N should be nonnegative. + + Output, integer BIT, the position of the low 1 bit. + """ + bit = 1 + i = np.floor(n) + while i != 2 * (i // 2): + bit += 1 + i //= 2 + return bit + + def i4_sobol(self, dim_num, seed): + """ + i4_sobol generates a new quasirandom Sobol vector with each call. + + Discussion: + The routine adapts the ideas of Antonov and Saleev. + + Reference: + Antonov, Saleev, + USSR Computational Mathematics and Mathematical Physics, + Volume 19, 1980, pages 252 - 256. + + Paul Bratley, Bennett Fox, + Algorithm 659: + Implementing Sobol's Quasirandom Sequence Generator, + ACM Transactions on Mathematical Software, + Volume 14, Number 1, pages 88-100, 1988. + + Bennett Fox, + Algorithm 647: + Implementation and Relative Efficiency of Quasirandom + Sequence Generators, + ACM Transactions on Mathematical Software, + Volume 12, Number 4, pages 362-376, 1986. + + Ilya Sobol, + USSR Computational Mathematics and Mathematical Physics, + Volume 16, pages 236-242, 1977. + + Ilya Sobol, Levitan, + The Production of Points Uniformly Distributed in a Multidimensional + Cube (in Russian), + Preprint IPM Akad. Nauk SSSR, + Number 40, Moscow 1976. + + Parameters: + Input, integer DIM_NUM, the number of spatial dimensions. + DIM_NUM must satisfy 1 <= DIM_NUM <= 40. + + Input/output, integer SEED, the "seed" for the sequence. + This is essentially the index in the sequence of the quasirandom + value to be generated. On output, SEED has been set to the + appropriate next value, usually simply SEED+1. + If SEED is less than 0 on input, it is treated as though it were 0. + An input value of 0 requests the first (0-th) element of the sequence. + + Output, real QUASI(DIM_NUM), the next quasirandom vector. + """ + + # if 'self.initialized' not in list(globals().keys()): + if self.initialized is None: + self.initialized = 0 + self.dim_num_save = -1 + + if not self.initialized or dim_num != self.dim_num_save: + self.initialized = 1 + self.dim_max = 40 + self.dim_num_save = -1 + self.log_max = 30 + self.seed_save = -1 + + # Initialize (part of) V. + self.v = np.zeros((self.dim_max, self.log_max)) + self.v[0:40, 0] = np.transpose([ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]) + + self.v[2:40, 1] = np.transpose([ + 1, 3, 1, 3, 1, 3, 3, 1, + 3, 1, 3, 1, 3, 1, 1, 3, 1, 3, + 1, 3, 1, 3, 3, 1, 3, 1, 3, 1, + 3, 1, 1, 3, 1, 3, 1, 3, 1, 3]) + + self.v[3:40, 2] = np.transpose([ + 7, 5, 1, 3, 3, 7, 5, + 5, 7, 7, 1, 3, 3, 7, 5, 1, 1, + 5, 3, 3, 1, 7, 5, 1, 3, 3, 7, + 5, 1, 1, 5, 7, 7, 5, 1, 3, 3]) + + self.v[5:40, 3] = np.transpose([ + 1, 7, 9, 13, 11, + 1, 3, 7, 9, 5, 13, 13, 11, 3, 15, + 5, 3, 15, 7, 9, 13, 9, 1, 11, 7, + 5, 15, 1, 15, 11, 5, 3, 1, 7, 9]) + + self.v[7:40, 4] = np.transpose([ + 9, 3, 27, + 15, 29, 21, 23, 19, 11, 25, 7, 13, 17, + 1, 25, 29, 3, 31, 11, 5, 23, 27, 19, + 21, 5, 1, 17, 13, 7, 15, 9, 31, 9]) + + self.v[13:40, 5] = np.transpose([ + 37, 33, 7, 5, 11, 39, 63, + 27, 17, 15, 23, 29, 3, 21, 13, 31, 25, + 9, 49, 33, 19, 29, 11, 19, 27, 15, 25]) + + self.v[19:40, 6] = np.transpose([ + 13, + 33, 115, 41, 79, 17, 29, 119, 75, 73, 105, + 7, 59, 65, 21, 3, 113, 61, 89, 45, 107]) + + self.v[37:40, 7] = np.transpose([ + 7, 23, 39]) + + # Set POLY. + self.poly = [ + 1, 3, 7, 11, 13, 19, 25, 37, 59, 47, + 61, 55, 41, 67, 97, 91, 109, 103, 115, 131, + 193, 137, 145, 143, 241, 157, 185, 167, 229, 171, + 213, 191, 253, 203, 211, 239, 247, 285, 369, 299] + + self.atmost = 2 ** self.log_max - 1 + + # Find the number of bits in ATMOST. + self.maxcol = self.i4_bit_hi1(self.atmost) + + # Initialize row 1 of V. + self.v[0, 0:self.maxcol] = 1 + + # Things to do only if the dimension changed. + if dim_num != self.dim_num_save: + self.dim_num_save = dim_num + + # Initialize the remaining rows of V. + for i in range(2, dim_num + 1): + + # The bits of the integer POLY(I) gives the form of + # self.polynomial I. + # Find the degree of self.polynomial I from binary encoding. + j = self.poly[i - 1] + m = 0 + j //= 2 + while j > 0: + j //= 2 + m += 1 + + # Expand this bit pattern to separate + # components of the logical array INCLUD. + j = self.poly[i - 1] + includ = np.zeros(m) + for k in range(m, 0, -1): + j2 = j // 2 + includ[k - 1] = (j != 2 * j2) + j = j2 + + # Calculate the remaining elements of row I as explained + # in Bratley and Fox, section 2. + for j in range(m + 1, self.maxcol + 1): + newv = self.v[i - 1, j - m - 1] + lseed = 1 + for k in range(1, m + 1): + lseed *= 2 + if includ[k - 1]: + newv = np.bitwise_xor( + int(newv), + int(lseed * self.v[i - 1, j - k - 1])) + self.v[i - 1, j - 1] = newv + + # Multiply columns of V by appropriate power of 2. + lseed = 1 + for j in range(self.maxcol - 1, 0, -1): + lseed *= 2 + self.v[0:dim_num, j - 1] = self.v[0:dim_num, j - 1] * lseed + + # RECIPD is 1/(common denominator of the elements in V). + self.recipd = 1.0 / (2 * lseed) + self.lastq = np.zeros(dim_num) + + seed = int(np.floor(seed)) + + if seed < 0: + seed = 0 + + lseed = 1 + if seed == 0: + self.lastq = np.zeros(dim_num) + + elif seed == self.seed_save + 1: + + # Find the position of the right-hand zero in SEED. + lseed = self.i4_bit_lo0(seed) + + elif seed <= self.seed_save: + + self.seed_save = 0 + self.lastq = np.zeros(dim_num) + + for seed_temp in range(int(self.seed_save), int(seed)): + lseed = self.i4_bit_lo0(seed_temp) + for i in range(1, dim_num + 1): + self.lastq[i - 1] = np.bitwise_xor( + int(self.lastq[i - 1]), int(self.v[i - 1, lseed - 1])) + + lseed = self.i4_bit_lo0(seed) + + elif self.seed_save + 1 < seed: + + for seed_temp in range(int(self.seed_save + 1), int(seed)): + lseed = self.i4_bit_lo0(seed_temp) + for i in range(1, dim_num + 1): + self.lastq[i - 1] = np.bitwise_xor( + int(self.lastq[i - 1]), int(self.v[i - 1, lseed - 1])) + + lseed = self.i4_bit_lo0(seed) + + # Check that the user is not calling too many times! + if self.maxcol < lseed: + print('I4_SOBOL - Fatal error!') + print(' Too many calls!') + print(' MAXCOL = %d\n' % self.maxcol) + print(' L = %d\n' % lseed) + return + + # Calculate the new components of QUASI. + quasi = np.zeros(dim_num) + for i in range(1, dim_num + 1): + quasi[i - 1] = self.lastq[i - 1] * self.recipd + self.lastq[i - 1] = np.bitwise_xor( + int(self.lastq[i - 1]), int(self.v[i - 1, lseed - 1])) + + self.seed_save = seed + seed += 1 + + return [quasi, seed] diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_shgo_lib/sobol_seq.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_shgo_lib/sobol_seq.pyc new file mode 100644 index 0000000..8233268 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_shgo_lib/sobol_seq.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_shgo_lib/sobol_vec.gz b/project/venv/lib/python2.7/site-packages/scipy/optimize/_shgo_lib/sobol_vec.gz new file mode 100755 index 0000000..b277dd4 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_shgo_lib/sobol_vec.gz differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_shgo_lib/triangulation.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_shgo_lib/triangulation.py new file mode 100644 index 0000000..2a7ac0a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_shgo_lib/triangulation.py @@ -0,0 +1,790 @@ +import numpy as np +import copy + +try: + from functools import lru_cache # For Python 3 only +except ImportError: # Python 2: + import time + import functools + import collections + + # Note to avoid using external packages such as functools32 we use this code + # only using the standard library + def lru_cache(maxsize=255, timeout=None): + """ + Thanks to ilialuk @ https://stackoverflow.com/users/2121105/ilialuk for + this code snippet. Modifications by S. Endres + """ + + class LruCacheClass(object): + def __init__(self, input_func, max_size, timeout): + self._input_func = input_func + self._max_size = max_size + self._timeout = timeout + + # This will store the cache for this function, + # format - {caller1 : [OrderedDict1, last_refresh_time1], + # caller2 : [OrderedDict2, last_refresh_time2]}. + # In case of an instance method - the caller is the instance, + # in case called from a regular function - the caller is None. + self._caches_dict = {} + + def cache_clear(self, caller=None): + # Remove the cache for the caller, only if exists: + if caller in self._caches_dict: + del self._caches_dict[caller] + self._caches_dict[caller] = [collections.OrderedDict(), + time.time()] + + def __get__(self, obj, objtype): + """ Called for instance methods """ + return_func = functools.partial(self._cache_wrapper, obj) + return_func.cache_clear = functools.partial(self.cache_clear, + obj) + # Return the wrapped function and wraps it to maintain the + # docstring and the name of the original function: + return functools.wraps(self._input_func)(return_func) + + def __call__(self, *args, **kwargs): + """ Called for regular functions """ + return self._cache_wrapper(None, *args, **kwargs) + + # Set the cache_clear function in the __call__ operator: + __call__.cache_clear = cache_clear + + def _cache_wrapper(self, caller, *args, **kwargs): + # Create a unique key including the types (in order to + # differentiate between 1 and '1'): + kwargs_key = "".join(map( + lambda x: str(x) + str(type(kwargs[x])) + str(kwargs[x]), + sorted(kwargs))) + key = "".join( + map(lambda x: str(type(x)) + str(x), args)) + kwargs_key + + # Check if caller exists, if not create one: + if caller not in self._caches_dict: + self._caches_dict[caller] = [collections.OrderedDict(), + time.time()] + else: + # Validate in case the refresh time has passed: + if self._timeout is not None: + if (time.time() - self._caches_dict[caller][1] + > self._timeout): + self.cache_clear(caller) + + # Check if the key exists, if so - return it: + cur_caller_cache_dict = self._caches_dict[caller][0] + if key in cur_caller_cache_dict: + return cur_caller_cache_dict[key] + + # Validate we didn't exceed the max_size: + if len(cur_caller_cache_dict) >= self._max_size: + # Delete the first item in the dict: + try: + cur_caller_cache_dict.popitem(False) + except KeyError: + pass + # Call the function and store the data in the cache (call it + # with the caller in case it's an instance function) + if caller is not None: + args = (caller,) + args + cur_caller_cache_dict[key] = self._input_func(*args, **kwargs) + + return cur_caller_cache_dict[key] + + # Return the decorator wrapping the class (also wraps the instance to + # maintain the docstring and the name of the original function): + return (lambda input_func: functools.wraps(input_func)( + LruCacheClass(input_func, maxsize, timeout))) + + +class Complex: + def __init__(self, dim, func, func_args=(), symmetry=False, bounds=None, + g_cons=None, g_args=()): + self.dim = dim + self.bounds = bounds + self.symmetry = symmetry # TODO: Define the functions to be used + # here in init to avoid if checks + self.gen = 0 + self.perm_cycle = 0 + + # Every cell is stored in a list of its generation, + # ex. the initial cell is stored in self.H[0] + # 1st get new cells are stored in self.H[1] etc. + # When a cell is subgenerated it is removed from this list + + self.H = [] # Storage structure of cells + # Cache of all vertices + self.V = VertexCache(func, func_args, bounds, g_cons, g_args) + + # Generate n-cube here: + self.n_cube(dim, symmetry=symmetry) + + # TODO: Assign functions to a the complex instead + if symmetry: + self.generation_cycle = 1 + # self.centroid = self.C0()[-1].x + # self.C0.centroid = self.centroid + else: + self.add_centroid() + + self.H.append([]) + self.H[0].append(self.C0) + self.hgr = self.C0.homology_group_rank() + self.hgrd = 0 # Complex group rank differential + # self.hgr = self.C0.hg_n + + # Build initial graph + self.graph_map() + + self.performance = [] + self.performance.append(0) + self.performance.append(0) + + def __call__(self): + return self.H + + def n_cube(self, dim, symmetry=False, printout=False): + """ + Generate the simplicial triangulation of the n dimensional hypercube + containing 2**n vertices + """ + origin = list(np.zeros(dim, dtype=int)) + self.origin = origin + supremum = list(np.ones(dim, dtype=int)) + self.supremum = supremum + + # tuple versions for indexing + origintuple = tuple(origin) + supremumtuple = tuple(supremum) + + x_parents = [origintuple] + + if symmetry: + self.C0 = Simplex(0, 0, 0, self.dim) # Initial cell object + self.C0.add_vertex(self.V[origintuple]) + + i_s = 0 + self.perm_symmetry(i_s, x_parents, origin) + self.C0.add_vertex(self.V[supremumtuple]) + else: + self.C0 = Cell(0, 0, origin, supremum) # Initial cell object + self.C0.add_vertex(self.V[origintuple]) + self.C0.add_vertex(self.V[supremumtuple]) + + i_parents = [] + self.perm(i_parents, x_parents, origin) + + if printout: + print("Initial hyper cube:") + for v in self.C0(): + v.print_out() + + def perm(self, i_parents, x_parents, xi): + # TODO: Cut out of for if outside linear constraint cutting planes + xi_t = tuple(xi) + + # Construct required iterator + iter_range = [x for x in range(self.dim) if x not in i_parents] + + for i in iter_range: + i2_parents = copy.copy(i_parents) + i2_parents.append(i) + xi2 = copy.copy(xi) + xi2[i] = 1 + # Make new vertex list a hashable tuple + xi2_t = tuple(xi2) + # Append to cell + self.C0.add_vertex(self.V[xi2_t]) + # Connect neighbours and vice versa + # Parent point + self.V[xi2_t].connect(self.V[xi_t]) + + # Connect all family of simplices in parent containers + for x_ip in x_parents: + self.V[xi2_t].connect(self.V[x_ip]) + + x_parents2 = copy.copy(x_parents) + x_parents2.append(xi_t) + + # Permutate + self.perm(i2_parents, x_parents2, xi2) + + def perm_symmetry(self, i_s, x_parents, xi): + # TODO: Cut out of for if outside linear constraint cutting planes + xi_t = tuple(xi) + xi2 = copy.copy(xi) + xi2[i_s] = 1 + # Make new vertex list a hashable tuple + xi2_t = tuple(xi2) + # Append to cell + self.C0.add_vertex(self.V[xi2_t]) + # Connect neighbours and vice versa + # Parent point + self.V[xi2_t].connect(self.V[xi_t]) + + # Connect all family of simplices in parent containers + for x_ip in x_parents: + self.V[xi2_t].connect(self.V[x_ip]) + + x_parents2 = copy.copy(x_parents) + x_parents2.append(xi_t) + + i_s += 1 + if i_s == self.dim: + return + # Permutate + self.perm_symmetry(i_s, x_parents2, xi2) + + def add_centroid(self): + """Split the central edge between the origin and supremum of + a cell and add the new vertex to the complex""" + self.centroid = list( + (np.array(self.origin) + np.array(self.supremum)) / 2.0) + self.C0.add_vertex(self.V[tuple(self.centroid)]) + self.C0.centroid = self.centroid + + # Disconnect origin and supremum + self.V[tuple(self.origin)].disconnect(self.V[tuple(self.supremum)]) + + # Connect centroid to all other vertices + for v in self.C0(): + self.V[tuple(self.centroid)].connect(self.V[tuple(v.x)]) + + self.centroid_added = True + return + + # Construct incidence array: + def incidence(self): + if self.centroid_added: + self.structure = np.zeros([2 ** self.dim + 1, 2 ** self.dim + 1], + dtype=int) + else: + self.structure = np.zeros([2 ** self.dim, 2 ** self.dim], + dtype=int) + + for v in self.HC.C0(): + for v2 in v.nn: + self.structure[v.index, v2.index] = 1 + + return + + # A more sparse incidence generator: + def graph_map(self): + """ Make a list of size 2**n + 1 where an entry is a vertex + incidence, each list element contains a list of indexes + corresponding to that entries neighbours""" + + self.graph = [[v2.index for v2 in v.nn] for v in self.C0()] + + # Graph structure method: + # 0. Capture the indices of the initial cell. + # 1. Generate new origin and supremum scalars based on current generation + # 2. Generate a new set of vertices corresponding to a new + # "origin" and "supremum" + # 3. Connected based on the indices of the previous graph structure + # 4. Disconnect the edges in the original cell + + def sub_generate_cell(self, C_i, gen): + """Subgenerate a cell `C_i` of generation `gen` and + homology group rank `hgr`.""" + origin_new = tuple(C_i.centroid) + centroid_index = len(C_i()) - 1 + + # If not gen append + try: + self.H[gen] + except IndexError: + self.H.append([]) + + # Generate subcubes using every extreme vertex in C_i as a supremum + # and the centroid of C_i as the origin + H_new = [] # list storing all the new cubes split from C_i + for i, v in enumerate(C_i()[:-1]): + supremum = tuple(v.x) + H_new.append( + self.construct_hypercube(origin_new, supremum, gen, C_i.hg_n)) + + for i, connections in enumerate(self.graph): + # Present vertex V_new[i]; connect to all connections: + if i == centroid_index: # Break out of centroid + break + + for j in connections: + C_i()[i].disconnect(C_i()[j]) + + # Destroy the old cell + if C_i is not self.C0: # Garbage collector does this anyway; not needed + del C_i + + # TODO: Recalculate all the homology group ranks of each cell + return H_new + + def split_generation(self): + """ + Run sub_generate_cell for every cell in the current complex self.gen + """ + no_splits = False # USED IN SHGO + try: + for c in self.H[self.gen]: + if self.symmetry: + # self.sub_generate_cell_symmetry(c, self.gen + 1) + self.split_simplex_symmetry(c, self.gen + 1) + else: + self.sub_generate_cell(c, self.gen + 1) + except IndexError: + no_splits = True # USED IN SHGO + + self.gen += 1 + return no_splits # USED IN SHGO + + # @lru_cache(maxsize=None) + def construct_hypercube(self, origin, supremum, gen, hgr, + printout=False): + """ + Build a hypercube with triangulations symmetric to C0. + + Parameters + ---------- + origin : vec + supremum : vec (tuple) + gen : generation + hgr : parent homology group rank + """ + + # Initiate new cell + C_new = Cell(gen, hgr, origin, supremum) + C_new.centroid = tuple( + (np.array(origin) + np.array(supremum)) / 2.0) + + # Build new indexed vertex list + V_new = [] + + # Cached calculation + for i, v in enumerate(self.C0()[:-1]): + t1 = self.generate_sub_cell_t1(origin, v.x) + t2 = self.generate_sub_cell_t2(supremum, v.x) + + vec = t1 + t2 + + vec = tuple(vec) + C_new.add_vertex(self.V[vec]) + V_new.append(vec) + + # Add new centroid + C_new.add_vertex(self.V[C_new.centroid]) + V_new.append(C_new.centroid) + + # Connect new vertices #TODO: Thread into other loop; no need for V_new + for i, connections in enumerate(self.graph): + # Present vertex V_new[i]; connect to all connections: + for j in connections: + self.V[V_new[i]].connect(self.V[V_new[j]]) + + if printout: + print("A sub hyper cube with:") + print("origin: {}".format(origin)) + print("supremum: {}".format(supremum)) + for v in C_new(): + v.print_out() + + # Append the new cell to the to complex + self.H[gen].append(C_new) + + return C_new + + def split_simplex_symmetry(self, S, gen): + """ + Split a hypersimplex S into two sub simplices by building a hyperplane + which connects to a new vertex on an edge (the longest edge in + dim = {2, 3}) and every other vertex in the simplex that is not + connected to the edge being split. + + This function utilizes the knowledge that the problem is specified + with symmetric constraints + + The longest edge is tracked by an ordering of the + vertices in every simplices, the edge between first and second + vertex is the longest edge to be split in the next iteration. + """ + # If not gen append + try: + self.H[gen] + except IndexError: + self.H.append([]) + + # Find new vertex. + # V_new_x = tuple((np.array(C()[0].x) + np.array(C()[1].x)) / 2.0) + s = S() + firstx = s[0].x + lastx = s[-1].x + V_new = self.V[tuple((np.array(firstx) + np.array(lastx)) / 2.0)] + + # Disconnect old longest edge + self.V[firstx].disconnect(self.V[lastx]) + + # Connect new vertices to all other vertices + for v in s[:]: + v.connect(self.V[V_new.x]) + + # New "lower" simplex + S_new_l = Simplex(gen, S.hg_n, self.generation_cycle, + self.dim) + S_new_l.add_vertex(s[0]) + S_new_l.add_vertex(V_new) # Add new vertex + for v in s[1:-1]: # Add all other vertices + S_new_l.add_vertex(v) + + # New "upper" simplex + S_new_u = Simplex(gen, S.hg_n, S.generation_cycle, self.dim) + + # First vertex on new long edge + S_new_u.add_vertex(s[S_new_u.generation_cycle + 1]) + + for v in s[1:-1]: # Remaining vertices + S_new_u.add_vertex(v) + + for k, v in enumerate(s[1:-1]): # iterate through inner vertices + if k == S.generation_cycle: + S_new_u.add_vertex(V_new) + else: + S_new_u.add_vertex(v) + + S_new_u.add_vertex(s[-1]) # Second vertex on new long edge + + self.H[gen].append(S_new_l) + self.H[gen].append(S_new_u) + + return + + @lru_cache(maxsize=None) + def generate_sub_cell_2(self, origin, supremum, v_x_t): # No hits + """ + Use the origin and supremum vectors to find a new cell in that + subspace direction + + NOTE: NOT CURRENTLY IN USE! + + Parameters + ---------- + origin : tuple vector (hashable) + supremum : tuple vector (hashable) + + Returns + ------- + + """ + t1 = self.generate_sub_cell_t1(origin, v_x_t) + t2 = self.generate_sub_cell_t2(supremum, v_x_t) + vec = t1 + t2 + return tuple(vec) + + @lru_cache(maxsize=None) + def generate_sub_cell_t1(self, origin, v_x): + # TODO: Calc these arrays outside + v_o = np.array(origin) + return v_o - v_o * np.array(v_x) + + @lru_cache(maxsize=None) + def generate_sub_cell_t2(self, supremum, v_x): + v_s = np.array(supremum) + return v_s * np.array(v_x) + + # Plots + def plot_complex(self): + """ + Here C is the LIST of simplexes S in the + 2 or 3 dimensional complex + + To plot a single simplex S in a set C, use ex. [C[0]] + """ + from matplotlib import pyplot + if self.dim == 2: + pyplot.figure() + for C in self.H: + for c in C: + for v in c(): + if self.bounds is None: + x_a = np.array(v.x, dtype=float) + else: + x_a = np.array(v.x, dtype=float) + for i in range(len(self.bounds)): + x_a[i] = (x_a[i] * (self.bounds[i][1] + - self.bounds[i][0]) + + self.bounds[i][0]) + + # logging.info('v.x_a = {}'.format(x_a)) + + pyplot.plot([x_a[0]], [x_a[1]], 'o') + + xlines = [] + ylines = [] + for vn in v.nn: + if self.bounds is None: + xn_a = np.array(vn.x, dtype=float) + else: + xn_a = np.array(vn.x, dtype=float) + for i in range(len(self.bounds)): + xn_a[i] = (xn_a[i] * (self.bounds[i][1] + - self.bounds[i][0]) + + self.bounds[i][0]) + + # logging.info('vn.x = {}'.format(vn.x)) + + xlines.append(xn_a[0]) + ylines.append(xn_a[1]) + xlines.append(x_a[0]) + ylines.append(x_a[1]) + + pyplot.plot(xlines, ylines) + + if self.bounds is None: + pyplot.ylim([-1e-2, 1 + 1e-2]) + pyplot.xlim([-1e-2, 1 + 1e-2]) + else: + pyplot.ylim( + [self.bounds[1][0] - 1e-2, self.bounds[1][1] + 1e-2]) + pyplot.xlim( + [self.bounds[0][0] - 1e-2, self.bounds[0][1] + 1e-2]) + + pyplot.show() + + elif self.dim == 3: + fig = pyplot.figure() + ax = fig.add_subplot(111, projection='3d') + + for C in self.H: + for c in C: + for v in c(): + x = [] + y = [] + z = [] + # logging.info('v.x = {}'.format(v.x)) + x.append(v.x[0]) + y.append(v.x[1]) + z.append(v.x[2]) + for vn in v.nn: + x.append(vn.x[0]) + y.append(vn.x[1]) + z.append(vn.x[2]) + x.append(v.x[0]) + y.append(v.x[1]) + z.append(v.x[2]) + # logging.info('vn.x = {}'.format(vn.x)) + + ax.plot(x, y, z, label='simplex') + + pyplot.show() + else: + print("dimension higher than 3 or wrong complex format") + return + + +class VertexGroup(object): + def __init__(self, p_gen, p_hgr): + self.p_gen = p_gen # parent generation + self.p_hgr = p_hgr # parent homology group rank + self.hg_n = None + self.hg_d = None + + # Maybe add parent homology group rank total history + # This is the sum off all previously split cells + # cumulatively throughout its entire history + self.C = [] + + def __call__(self): + return self.C + + def add_vertex(self, V): + if V not in self.C: + self.C.append(V) + + def homology_group_rank(self): + """ + Returns the homology group order of the current cell + """ + if self.hg_n is None: + self.hg_n = sum(1 for v in self.C if v.minimiser()) + + return self.hg_n + + def homology_group_differential(self): + """ + Returns the difference between the current homology group of the + cell and it's parent group + """ + if self.hg_d is None: + self.hgd = self.hg_n - self.p_hgr + + return self.hgd + + def polytopial_sperner_lemma(self): + """ + Returns the number of stationary points theoretically contained in the + cell based information currently known about the cell + """ + pass + + def print_out(self): + """ + Print the current cell to console + """ + for v in self(): + v.print_out() + + +class Cell(VertexGroup): + """ + Contains a cell that is symmetric to the initial hypercube triangulation + """ + + def __init__(self, p_gen, p_hgr, origin, supremum): + super(Cell, self).__init__(p_gen, p_hgr) + + self.origin = origin + self.supremum = supremum + self.centroid = None # (Not always used) + # TODO: self.bounds + + +class Simplex(VertexGroup): + """ + Contains a simplex that is symmetric to the initial symmetry constrained + hypersimplex triangulation + """ + + def __init__(self, p_gen, p_hgr, generation_cycle, dim): + super(Simplex, self).__init__(p_gen, p_hgr) + + self.generation_cycle = (generation_cycle + 1) % (dim - 1) + + +class Vertex: + def __init__(self, x, bounds=None, func=None, func_args=(), g_cons=None, + g_cons_args=(), nn=None, index=None): + self.x = x + self.order = sum(x) + x_a = np.array(x, dtype=float) + if bounds is not None: + for i, (lb, ub) in enumerate(bounds): + x_a[i] = x_a[i] * (ub - lb) + lb + + # TODO: Make saving the array structure optional + self.x_a = x_a + + # Note Vertex is only initiated once for all x so only + # evaluated once + if func is not None: + self.feasible = True + if g_cons is not None: + for g, args in zip(g_cons, g_cons_args): + if g(self.x_a, *args) < 0.0: + self.f = np.inf + self.feasible = False + break + if self.feasible: + self.f = func(x_a, *func_args) + + if nn is not None: + self.nn = nn + else: + self.nn = set() + + self.fval = None + self.check_min = True + + # Index: + if index is not None: + self.index = index + + def __hash__(self): + return hash(self.x) + + def connect(self, v): + if v is not self and v not in self.nn: + self.nn.add(v) + v.nn.add(self) + + if self.minimiser(): + v._min = False + v.check_min = False + + # TEMPORARY + self.check_min = True + v.check_min = True + + def disconnect(self, v): + if v in self.nn: + self.nn.remove(v) + v.nn.remove(self) + self.check_min = True + v.check_min = True + + def minimiser(self): + """Check whether this vertex is strictly less than all its neighbours""" + if self.check_min: + self._min = all(self.f < v.f for v in self.nn) + self.check_min = False + + return self._min + + def print_out(self): + print("Vertex: {}".format(self.x)) + constr = 'Connections: ' + for vc in self.nn: + constr += '{} '.format(vc.x) + + print(constr) + print('Order = {}'.format(self.order)) + + +class VertexCache: + def __init__(self, func, func_args=(), bounds=None, g_cons=None, + g_cons_args=(), indexed=True): + + self.cache = {} + self.func = func + self.g_cons = g_cons + self.g_cons_args = g_cons_args + self.func_args = func_args + self.bounds = bounds + self.nfev = 0 + self.size = 0 + + if indexed: + self.index = -1 + + def __getitem__(self, x, indexed=True): + try: + return self.cache[x] + except KeyError: + if indexed: + self.index += 1 + xval = Vertex(x, bounds=self.bounds, + func=self.func, func_args=self.func_args, + g_cons=self.g_cons, + g_cons_args=self.g_cons_args, + index=self.index) + else: + xval = Vertex(x, bounds=self.bounds, + func=self.func, func_args=self.func_args, + g_cons=self.g_cons, + g_cons_args=self.g_cons_args) + + # logging.info("New generated vertex at x = {}".format(x)) + # NOTE: Surprisingly high performance increase if logging is commented out + self.cache[x] = xval + + # TODO: Check + if self.func is not None: + if self.g_cons is not None: + if xval.feasible: + self.nfev += 1 + self.size += 1 + else: + self.size += 1 + else: + self.nfev += 1 + self.size += 1 + + return self.cache[x] diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_shgo_lib/triangulation.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_shgo_lib/triangulation.pyc new file mode 100644 index 0000000..f1b678e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_shgo_lib/triangulation.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_slsqp.so b/project/venv/lib/python2.7/site-packages/scipy/optimize/_slsqp.so new file mode 100755 index 0000000..3f65eac Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_slsqp.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_spectral.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_spectral.py new file mode 100644 index 0000000..f903fe1 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_spectral.py @@ -0,0 +1,259 @@ +""" +Spectral Algorithm for Nonlinear Equations +""" +from __future__ import division, absolute_import, print_function + +import collections + +import numpy as np +from scipy.optimize import OptimizeResult +from scipy.optimize.optimize import _check_unknown_options +from .linesearch import _nonmonotone_line_search_cruz, _nonmonotone_line_search_cheng + +class _NoConvergence(Exception): + pass + + +def _root_df_sane(func, x0, args=(), ftol=1e-8, fatol=1e-300, maxfev=1000, + fnorm=None, callback=None, disp=False, M=10, eta_strategy=None, + sigma_eps=1e-10, sigma_0=1.0, line_search='cruz', **unknown_options): + r""" + Solve nonlinear equation with the DF-SANE method + + Options + ------- + ftol : float, optional + Relative norm tolerance. + fatol : float, optional + Absolute norm tolerance. + Algorithm terminates when ``||func(x)|| < fatol + ftol ||func(x_0)||``. + fnorm : callable, optional + Norm to use in the convergence check. If None, 2-norm is used. + maxfev : int, optional + Maximum number of function evaluations. + disp : bool, optional + Whether to print convergence process to stdout. + eta_strategy : callable, optional + Choice of the ``eta_k`` parameter, which gives slack for growth + of ``||F||**2``. Called as ``eta_k = eta_strategy(k, x, F)`` with + `k` the iteration number, `x` the current iterate and `F` the current + residual. Should satisfy ``eta_k > 0`` and ``sum(eta, k=0..inf) < inf``. + Default: ``||F||**2 / (1 + k)**2``. + sigma_eps : float, optional + The spectral coefficient is constrained to ``sigma_eps < sigma < 1/sigma_eps``. + Default: 1e-10 + sigma_0 : float, optional + Initial spectral coefficient. + Default: 1.0 + M : int, optional + Number of iterates to include in the nonmonotonic line search. + Default: 10 + line_search : {'cruz', 'cheng'} + Type of line search to employ. 'cruz' is the original one defined in + [Martinez & Raydan. Math. Comp. 75, 1429 (2006)], 'cheng' is + a modified search defined in [Cheng & Li. IMA J. Numer. Anal. 29, 814 (2009)]. + Default: 'cruz' + + References + ---------- + .. [1] "Spectral residual method without gradient information for solving + large-scale nonlinear systems of equations." W. La Cruz, + J.M. Martinez, M. Raydan. Math. Comp. **75**, 1429 (2006). + .. [2] W. La Cruz, Opt. Meth. Software, 29, 24 (2014). + .. [3] W. Cheng, D.-H. Li. IMA J. Numer. Anal. **29**, 814 (2009). + + """ + _check_unknown_options(unknown_options) + + if line_search not in ('cheng', 'cruz'): + raise ValueError("Invalid value %r for 'line_search'" % (line_search,)) + + nexp = 2 + + if eta_strategy is None: + # Different choice from [1], as their eta is not invariant + # vs. scaling of F. + def eta_strategy(k, x, F): + # Obtain squared 2-norm of the initial residual from the outer scope + return f_0 / (1 + k)**2 + + if fnorm is None: + def fnorm(F): + # Obtain squared 2-norm of the current residual from the outer scope + return f_k**(1.0/nexp) + + def fmerit(F): + return np.linalg.norm(F)**nexp + + nfev = [0] + f, x_k, x_shape, f_k, F_k, is_complex = _wrap_func(func, x0, fmerit, nfev, maxfev, args) + + k = 0 + f_0 = f_k + sigma_k = sigma_0 + + F_0_norm = fnorm(F_k) + + # For the 'cruz' line search + prev_fs = collections.deque([f_k], M) + + # For the 'cheng' line search + Q = 1.0 + C = f_0 + + converged = False + message = "too many function evaluations required" + + while True: + F_k_norm = fnorm(F_k) + + if disp: + print("iter %d: ||F|| = %g, sigma = %g" % (k, F_k_norm, sigma_k)) + + if callback is not None: + callback(x_k, F_k) + + if F_k_norm < ftol * F_0_norm + fatol: + # Converged! + message = "successful convergence" + converged = True + break + + # Control spectral parameter, from [2] + if abs(sigma_k) > 1/sigma_eps: + sigma_k = 1/sigma_eps * np.sign(sigma_k) + elif abs(sigma_k) < sigma_eps: + sigma_k = sigma_eps + + # Line search direction + d = -sigma_k * F_k + + # Nonmonotone line search + eta = eta_strategy(k, x_k, F_k) + try: + if line_search == 'cruz': + alpha, xp, fp, Fp = _nonmonotone_line_search_cruz(f, x_k, d, prev_fs, eta=eta) + elif line_search == 'cheng': + alpha, xp, fp, Fp, C, Q = _nonmonotone_line_search_cheng(f, x_k, d, f_k, C, Q, eta=eta) + except _NoConvergence: + break + + # Update spectral parameter + s_k = xp - x_k + y_k = Fp - F_k + sigma_k = np.vdot(s_k, s_k) / np.vdot(s_k, y_k) + + # Take step + x_k = xp + F_k = Fp + f_k = fp + + # Store function value + if line_search == 'cruz': + prev_fs.append(fp) + + k += 1 + + x = _wrap_result(x_k, is_complex, shape=x_shape) + F = _wrap_result(F_k, is_complex) + + result = OptimizeResult(x=x, success=converged, + message=message, + fun=F, nfev=nfev[0], nit=k) + + return result + + +def _wrap_func(func, x0, fmerit, nfev_list, maxfev, args=()): + """ + Wrap a function and an initial value so that (i) complex values + are wrapped to reals, and (ii) value for a merit function + fmerit(x, f) is computed at the same time, (iii) iteration count + is maintained and an exception is raised if it is exceeded. + + Parameters + ---------- + func : callable + Function to wrap + x0 : ndarray + Initial value + fmerit : callable + Merit function fmerit(f) for computing merit value from residual. + nfev_list : list + List to store number of evaluations in. Should be [0] in the beginning. + maxfev : int + Maximum number of evaluations before _NoConvergence is raised. + args : tuple + Extra arguments to func + + Returns + ------- + wrap_func : callable + Wrapped function, to be called as + ``F, fp = wrap_func(x0)`` + x0_wrap : ndarray of float + Wrapped initial value; raveled to 1D and complex + values mapped to reals. + x0_shape : tuple + Shape of the initial value array + f : float + Merit function at F + F : ndarray of float + Residual at x0_wrap + is_complex : bool + Whether complex values were mapped to reals + + """ + x0 = np.asarray(x0) + x0_shape = x0.shape + F = np.asarray(func(x0, *args)).ravel() + is_complex = np.iscomplexobj(x0) or np.iscomplexobj(F) + x0 = x0.ravel() + + nfev_list[0] = 1 + + if is_complex: + def wrap_func(x): + if nfev_list[0] >= maxfev: + raise _NoConvergence() + nfev_list[0] += 1 + z = _real2complex(x).reshape(x0_shape) + v = np.asarray(func(z, *args)).ravel() + F = _complex2real(v) + f = fmerit(F) + return f, F + + x0 = _complex2real(x0) + F = _complex2real(F) + else: + def wrap_func(x): + if nfev_list[0] >= maxfev: + raise _NoConvergence() + nfev_list[0] += 1 + x = x.reshape(x0_shape) + F = np.asarray(func(x, *args)).ravel() + f = fmerit(F) + return f, F + + return wrap_func, x0, x0_shape, fmerit(F), F, is_complex + + +def _wrap_result(result, is_complex, shape=None): + """ + Convert from real to complex and reshape result arrays. + """ + if is_complex: + z = _real2complex(result) + else: + z = result + if shape is not None: + z = z.reshape(shape) + return z + + +def _real2complex(x): + return np.ascontiguousarray(x, dtype=float).view(np.complex128) + + +def _complex2real(z): + return np.ascontiguousarray(z, dtype=complex).view(np.float64) diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_spectral.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_spectral.pyc new file mode 100644 index 0000000..75d393a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_spectral.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_trlib/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trlib/__init__.py new file mode 100644 index 0000000..537b73b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trlib/__init__.py @@ -0,0 +1,12 @@ +from ._trlib import TRLIBQuadraticSubproblem + +__all__ = ['TRLIBQuadraticSubproblem', 'get_trlib_quadratic_subproblem'] + + +def get_trlib_quadratic_subproblem(tol_rel_i=-2.0, tol_rel_b=-3.0, disp=False): + def subproblem_factory(x, fun, jac, hess, hessp): + return TRLIBQuadraticSubproblem(x, fun, jac, hess, hessp, + tol_rel_i=tol_rel_i, + tol_rel_b=tol_rel_b, + disp=disp) + return subproblem_factory diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_trlib/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trlib/__init__.pyc new file mode 100644 index 0000000..5de6d33 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trlib/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_trlib/_trlib.so b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trlib/_trlib.so new file mode 100755 index 0000000..0bd1683 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trlib/_trlib.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_trlib/setup.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trlib/setup.py new file mode 100644 index 0000000..0c00c93 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trlib/setup.py @@ -0,0 +1,26 @@ +from __future__ import division, print_function, absolute_import + +def configuration(parent_package='', top_path=None): + from numpy import get_include + from scipy._build_utils.system_info import get_info, NotFoundError + from numpy.distutils.misc_util import Configuration + + from os.path import join, dirname + + lapack_opt = get_info('lapack_opt') + lib_inc = join(dirname(dirname(dirname(__file__))), '_lib') + + config = Configuration('_trlib', parent_package, top_path) + config.add_extension('_trlib', + sources=['_trlib.c', 'trlib_krylov.c', + 'trlib_eigen_inverse.c', 'trlib_leftmost.c', + 'trlib_quadratic_zero.c', 'trlib_tri_factor.c'], + include_dirs=[get_include(), lib_inc, 'trlib'], + extra_info=lapack_opt, + ) + return config + + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(**configuration(top_path='').todict()) diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_trlib/setup.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trlib/setup.pyc new file mode 100644 index 0000000..8d8af08 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trlib/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion.py new file mode 100644 index 0000000..9ac1e97 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion.py @@ -0,0 +1,266 @@ +"""Trust-region optimization.""" +from __future__ import division, print_function, absolute_import + +import math + +import numpy as np +import scipy.linalg +from .optimize import (_check_unknown_options, wrap_function, _status_message, + OptimizeResult) + +__all__ = [] + + +class BaseQuadraticSubproblem(object): + """ + Base/abstract class defining the quadratic model for trust-region + minimization. Child classes must implement the ``solve`` method. + + Values of the objective function, jacobian and hessian (if provided) at + the current iterate ``x`` are evaluated on demand and then stored as + attributes ``fun``, ``jac``, ``hess``. + """ + + def __init__(self, x, fun, jac, hess=None, hessp=None): + self._x = x + self._f = None + self._g = None + self._h = None + self._g_mag = None + self._cauchy_point = None + self._newton_point = None + self._fun = fun + self._jac = jac + self._hess = hess + self._hessp = hessp + + def __call__(self, p): + return self.fun + np.dot(self.jac, p) + 0.5 * np.dot(p, self.hessp(p)) + + @property + def fun(self): + """Value of objective function at current iteration.""" + if self._f is None: + self._f = self._fun(self._x) + return self._f + + @property + def jac(self): + """Value of jacobian of objective function at current iteration.""" + if self._g is None: + self._g = self._jac(self._x) + return self._g + + @property + def hess(self): + """Value of hessian of objective function at current iteration.""" + if self._h is None: + self._h = self._hess(self._x) + return self._h + + def hessp(self, p): + if self._hessp is not None: + return self._hessp(self._x, p) + else: + return np.dot(self.hess, p) + + @property + def jac_mag(self): + """Magniture of jacobian of objective function at current iteration.""" + if self._g_mag is None: + self._g_mag = scipy.linalg.norm(self.jac) + return self._g_mag + + def get_boundaries_intersections(self, z, d, trust_radius): + """ + Solve the scalar quadratic equation ||z + t d|| == trust_radius. + This is like a line-sphere intersection. + Return the two values of t, sorted from low to high. + """ + a = np.dot(d, d) + b = 2 * np.dot(z, d) + c = np.dot(z, z) - trust_radius**2 + sqrt_discriminant = math.sqrt(b*b - 4*a*c) + + # The following calculation is mathematically + # equivalent to: + # ta = (-b - sqrt_discriminant) / (2*a) + # tb = (-b + sqrt_discriminant) / (2*a) + # but produce smaller round off errors. + # Look at Matrix Computation p.97 + # for a better justification. + aux = b + math.copysign(sqrt_discriminant, b) + ta = -aux / (2*a) + tb = -2*c / aux + return sorted([ta, tb]) + + def solve(self, trust_radius): + raise NotImplementedError('The solve method should be implemented by ' + 'the child class') + + +def _minimize_trust_region(fun, x0, args=(), jac=None, hess=None, hessp=None, + subproblem=None, initial_trust_radius=1.0, + max_trust_radius=1000.0, eta=0.15, gtol=1e-4, + maxiter=None, disp=False, return_all=False, + callback=None, inexact=True, **unknown_options): + """ + Minimization of scalar function of one or more variables using a + trust-region algorithm. + + Options for the trust-region algorithm are: + initial_trust_radius : float + Initial trust radius. + max_trust_radius : float + Never propose steps that are longer than this value. + eta : float + Trust region related acceptance stringency for proposed steps. + gtol : float + Gradient norm must be less than `gtol` + before successful termination. + maxiter : int + Maximum number of iterations to perform. + disp : bool + If True, print convergence message. + inexact : bool + Accuracy to solve subproblems. If True requires less nonlinear + iterations, but more vector products. Only effective for method + trust-krylov. + + This function is called by the `minimize` function. + It is not supposed to be called directly. + """ + _check_unknown_options(unknown_options) + + if jac is None: + raise ValueError('Jacobian is currently required for trust-region ' + 'methods') + if hess is None and hessp is None: + raise ValueError('Either the Hessian or the Hessian-vector product ' + 'is currently required for trust-region methods') + if subproblem is None: + raise ValueError('A subproblem solving strategy is required for ' + 'trust-region methods') + if not (0 <= eta < 0.25): + raise Exception('invalid acceptance stringency') + if max_trust_radius <= 0: + raise Exception('the max trust radius must be positive') + if initial_trust_radius <= 0: + raise ValueError('the initial trust radius must be positive') + if initial_trust_radius >= max_trust_radius: + raise ValueError('the initial trust radius must be less than the ' + 'max trust radius') + + # force the initial guess into a nice format + x0 = np.asarray(x0).flatten() + + # Wrap the functions, for a couple reasons. + # This tracks how many times they have been called + # and it automatically passes the args. + nfun, fun = wrap_function(fun, args) + njac, jac = wrap_function(jac, args) + nhess, hess = wrap_function(hess, args) + nhessp, hessp = wrap_function(hessp, args) + + # limit the number of iterations + if maxiter is None: + maxiter = len(x0)*200 + + # init the search status + warnflag = 0 + + # initialize the search + trust_radius = initial_trust_radius + x = x0 + if return_all: + allvecs = [x] + m = subproblem(x, fun, jac, hess, hessp) + k = 0 + + # search for the function min + # do not even start if the gradient is small enough + while m.jac_mag >= gtol: + + # Solve the sub-problem. + # This gives us the proposed step relative to the current position + # and it tells us whether the proposed step + # has reached the trust region boundary or not. + try: + p, hits_boundary = m.solve(trust_radius) + except np.linalg.linalg.LinAlgError as e: + warnflag = 3 + break + + # calculate the predicted value at the proposed point + predicted_value = m(p) + + # define the local approximation at the proposed point + x_proposed = x + p + m_proposed = subproblem(x_proposed, fun, jac, hess, hessp) + + # evaluate the ratio defined in equation (4.4) + actual_reduction = m.fun - m_proposed.fun + predicted_reduction = m.fun - predicted_value + if predicted_reduction <= 0: + warnflag = 2 + break + rho = actual_reduction / predicted_reduction + + # update the trust radius according to the actual/predicted ratio + if rho < 0.25: + trust_radius *= 0.25 + elif rho > 0.75 and hits_boundary: + trust_radius = min(2*trust_radius, max_trust_radius) + + # if the ratio is high enough then accept the proposed step + if rho > eta: + x = x_proposed + m = m_proposed + + # append the best guess, call back, increment the iteration count + if return_all: + allvecs.append(np.copy(x)) + if callback is not None: + callback(np.copy(x)) + k += 1 + + # check if the gradient is small enough to stop + if m.jac_mag < gtol: + warnflag = 0 + break + + # check if we have looked at enough iterations + if k >= maxiter: + warnflag = 1 + break + + # print some stuff if requested + status_messages = ( + _status_message['success'], + _status_message['maxiter'], + 'A bad approximation caused failure to predict improvement.', + 'A linalg error occurred, such as a non-psd Hessian.', + ) + if disp: + if warnflag == 0: + print(status_messages[warnflag]) + else: + print('Warning: ' + status_messages[warnflag]) + print(" Current function value: %f" % m.fun) + print(" Iterations: %d" % k) + print(" Function evaluations: %d" % nfun[0]) + print(" Gradient evaluations: %d" % njac[0]) + print(" Hessian evaluations: %d" % nhess[0]) + + result = OptimizeResult(x=x, success=(warnflag == 0), status=warnflag, + fun=m.fun, jac=m.jac, nfev=nfun[0], njev=njac[0], + nhev=nhess[0], nit=k, + message=status_messages[warnflag]) + + if hess is not None: + result['hess'] = m.hess + + if return_all: + result['allvecs'] = allvecs + + return result diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion.pyc new file mode 100644 index 0000000..9eae373 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/__init__.py new file mode 100644 index 0000000..549cfb9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/__init__.py @@ -0,0 +1,6 @@ +"""This module contains the equality constrained SQP solver.""" + + +from .minimize_trustregion_constr import _minimize_trustregion_constr + +__all__ = ['_minimize_trustregion_constr'] diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/__init__.pyc new file mode 100644 index 0000000..dee11e1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/canonical_constraint.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/canonical_constraint.py new file mode 100644 index 0000000..74c36cb --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/canonical_constraint.py @@ -0,0 +1,391 @@ +import numpy as np +import scipy.sparse as sps + + +class CanonicalConstraint(object): + """Canonical constraint to use with trust-constr algorithm. + + It represents the set of constraints of the form:: + + f_eq(x) = 0 + f_ineq(x) <= 0 + + Where ``f_eq`` and ``f_ineq`` are evaluated by a single function, see + below. + + The class is supposed to be instantiated by factory methods, which + should prepare the parameters listed below. + + Parameters + ---------- + n_eq, n_ineq : int + Number of equality and inequality constraints respectively. + fun : callable + Function defining the constraints. The signature is + ``fun(x) -> c_eq, c_ineq``, where ``c_eq`` is ndarray with `n_eq` + components and ``c_ineq`` is ndarray with `n_ineq` components. + jac : callable + Function to evaluate the Jacobian of the constraint. The signature + is ``jac(x) -> J_eq, J_ineq``, where ``J_eq`` and ``J_ineq`` are + either ndarray of csr_matrix of shapes (n_eq, n) and (n_ineq, n) + respectively. + hess : callable + Function to evaluate the Hessian of the constraints multiplied + by Lagrange multipliers, that is + ``dot(f_eq, v_eq) + dot(f_ineq, v_ineq)``. The signature is + ``hess(x, v_eq, v_ineq) -> H``, where ``H`` has an implied + shape (n, n) and provide a matrix-vector product operation + ``H.dot(p)``. + keep_feasible : ndarray, shape (n_ineq,) + Mask indicating which inequality constraints should be kept feasible. + """ + def __init__(self, n_eq, n_ineq, fun, jac, hess, keep_feasible): + self.n_eq = n_eq + self.n_ineq = n_ineq + self.fun = fun + self.jac = jac + self.hess = hess + self.keep_feasible = keep_feasible + + @classmethod + def from_PreparedConstraint(cls, constraint): + """Create an instance from `PreparedConstrained` object.""" + lb, ub = constraint.bounds + cfun = constraint.fun + keep_feasible = constraint.keep_feasible + + if np.all(lb == -np.inf) and np.all(ub == np.inf): + return cls.empty(cfun.n) + + if np.all(lb == -np.inf) and np.all(ub == np.inf): + return cls.empty(cfun.n) + elif np.all(lb == ub): + return cls._equal_to_canonical(cfun, lb) + elif np.all(lb == -np.inf): + return cls._less_to_canonical(cfun, ub, keep_feasible) + elif np.all(ub == np.inf): + return cls._greater_to_canonical(cfun, lb, keep_feasible) + else: + return cls._interval_to_canonical(cfun, lb, ub, keep_feasible) + + @classmethod + def empty(cls, n): + """Create an "empty" instance. + + This "empty" instance is required to allow working with unconstrained + problems as if they have some constraints. + """ + empty_fun = np.empty(0) + empty_jac = np.empty((0, n)) + empty_hess = sps.csr_matrix((n, n)) + + def fun(x): + return empty_fun, empty_fun + + def jac(x): + return empty_jac, empty_jac + + def hess(x, v_eq, v_ineq): + return empty_hess + + return cls(0, 0, fun, jac, hess, np.empty(0)) + + @classmethod + def concatenate(cls, canonical_constraints, sparse_jacobian): + """Concatenate multiple `CanonicalConstraint` into one. + + `sparse_jacobian` (bool) determines the Jacobian format of the + concatenated constraint. Note that items in `canonical_constraints` + must have their Jacobians in the same format. + """ + def fun(x): + eq_all = [] + ineq_all = [] + for c in canonical_constraints: + eq, ineq = c.fun(x) + eq_all.append(eq) + ineq_all.append(ineq) + + return np.hstack(eq_all), np.hstack(ineq_all) + + if sparse_jacobian: + vstack = sps.vstack + else: + vstack = np.vstack + + def jac(x): + eq_all = [] + ineq_all = [] + for c in canonical_constraints: + eq, ineq = c.jac(x) + eq_all.append(eq) + ineq_all.append(ineq) + return vstack(eq_all), vstack(ineq_all) + + def hess(x, v_eq, v_ineq): + hess_all = [] + index_eq = 0 + index_ineq = 0 + for c in canonical_constraints: + vc_eq = v_eq[index_eq:index_eq + c.n_eq] + vc_ineq = v_ineq[index_ineq:index_ineq + c.n_ineq] + hess_all.append(c.hess(x, vc_eq, vc_ineq)) + index_eq += c.n_eq + index_ineq += c.n_ineq + + def matvec(p): + result = np.zeros_like(p) + for h in hess_all: + result += h.dot(p) + return result + + n = x.shape[0] + return sps.linalg.LinearOperator((n, n), matvec, dtype=float) + + n_eq = sum(c.n_eq for c in canonical_constraints) + n_ineq = sum(c.n_ineq for c in canonical_constraints) + keep_feasible = np.hstack([c.keep_feasible for c in + canonical_constraints]) + + return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible) + + @classmethod + def _equal_to_canonical(cls, cfun, value): + empty_fun = np.empty(0) + n = cfun.n + + n_eq = value.shape[0] + n_ineq = 0 + keep_feasible = np.empty(0, dtype=bool) + + if cfun.sparse_jacobian: + empty_jac = sps.csr_matrix((0, n)) + else: + empty_jac = np.empty((0, n)) + + def fun(x): + return cfun.fun(x) - value, empty_fun + + def jac(x): + return cfun.jac(x), empty_jac + + def hess(x, v_eq, v_ineq): + return cfun.hess(x, v_eq) + + empty_fun = np.empty(0) + n = cfun.n + if cfun.sparse_jacobian: + empty_jac = sps.csr_matrix((0, n)) + else: + empty_jac = np.empty((0, n)) + + return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible) + + @classmethod + def _less_to_canonical(cls, cfun, ub, keep_feasible): + empty_fun = np.empty(0) + n = cfun.n + if cfun.sparse_jacobian: + empty_jac = sps.csr_matrix((0, n)) + else: + empty_jac = np.empty((0, n)) + + finite_ub = ub < np.inf + n_eq = 0 + n_ineq = np.sum(finite_ub) + + if np.all(finite_ub): + def fun(x): + return empty_fun, cfun.fun(x) - ub + + def jac(x): + return empty_jac, cfun.jac(x) + + def hess(x, v_eq, v_ineq): + return cfun.hess(x, v_ineq) + else: + finite_ub = np.nonzero(finite_ub)[0] + keep_feasible = keep_feasible[finite_ub] + ub = ub[finite_ub] + + def fun(x): + return empty_fun, cfun.fun(x)[finite_ub] - ub + + def jac(x): + return empty_jac, cfun.jac(x)[finite_ub] + + def hess(x, v_eq, v_ineq): + v = np.zeros(cfun.m) + v[finite_ub] = v_ineq + return cfun.hess(x, v) + + return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible) + + @classmethod + def _greater_to_canonical(cls, cfun, lb, keep_feasible): + empty_fun = np.empty(0) + n = cfun.n + if cfun.sparse_jacobian: + empty_jac = sps.csr_matrix((0, n)) + else: + empty_jac = np.empty((0, n)) + + finite_lb = lb > -np.inf + n_eq = 0 + n_ineq = np.sum(finite_lb) + + if np.all(finite_lb): + def fun(x): + return empty_fun, lb - cfun.fun(x) + + def jac(x): + return empty_jac, -cfun.jac(x) + + def hess(x, v_eq, v_ineq): + return cfun.hess(x, -v_ineq) + else: + finite_lb = np.nonzero(finite_lb)[0] + keep_feasible = keep_feasible[finite_lb] + lb = lb[finite_lb] + + def fun(x): + return empty_fun, lb - cfun.fun(x)[finite_lb] + + def jac(x): + return empty_jac, -cfun.jac(x)[finite_lb] + + def hess(x, v_eq, v_ineq): + v = np.zeros(cfun.m) + v[finite_lb] = -v_ineq + return cfun.hess(x, v) + + return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible) + + @classmethod + def _interval_to_canonical(cls, cfun, lb, ub, keep_feasible): + lb_inf = lb == -np.inf + ub_inf = ub == np.inf + equal = lb == ub + less = lb_inf & ~ub_inf + greater = ub_inf & ~lb_inf + interval = ~equal & ~lb_inf & ~ub_inf + + equal = np.nonzero(equal)[0] + less = np.nonzero(less)[0] + greater = np.nonzero(greater)[0] + interval = np.nonzero(interval)[0] + n_less = less.shape[0] + n_greater = greater.shape[0] + n_interval = interval.shape[0] + n_ineq = n_less + n_greater + 2 * n_interval + n_eq = equal.shape[0] + + keep_feasible = np.hstack((keep_feasible[less], + keep_feasible[greater], + keep_feasible[interval], + keep_feasible[interval])) + + def fun(x): + f = cfun.fun(x) + eq = f[equal] - lb[equal] + le = f[less] - ub[less] + ge = lb[greater] - f[greater] + il = f[interval] - ub[interval] + ig = lb[interval] - f[interval] + return eq, np.hstack((le, ge, il, ig)) + + def jac(x): + J = cfun.jac(x) + eq = J[equal] + le = J[less] + ge = -J[greater] + il = J[interval] + ig = -il + if sps.issparse(J): + ineq = sps.vstack((le, ge, il, ig)) + else: + ineq = np.vstack((le, ge, il, ig)) + return eq, ineq + + def hess(x, v_eq, v_ineq): + n_start = 0 + v_l = v_ineq[n_start:n_start + n_less] + n_start += n_less + v_g = v_ineq[n_start:n_start + n_greater] + n_start += n_greater + v_il = v_ineq[n_start:n_start + n_interval] + n_start += n_interval + v_ig = v_ineq[n_start:n_start + n_interval] + + v = np.zeros_like(lb) + v[equal] = v_eq + v[less] = v_l + v[greater] = -v_g + v[interval] = v_il - v_ig + + return cfun.hess(x, v) + + return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible) + + +def initial_constraints_as_canonical(n, prepared_constraints, sparse_jacobian): + """Convert initial values of the constraints to the canonical format. + + The purpose to avoid one additional call to the constraints at the initial + point. It takes saved values in `PreparedConstraint`, modify and + concatenate them to the the canonical constraint format. + """ + c_eq = [] + c_ineq = [] + J_eq = [] + J_ineq = [] + + for c in prepared_constraints: + f = c.fun.f + J = c.fun.J + lb, ub = c.bounds + if np.all(lb == ub): + c_eq.append(f - lb) + J_eq.append(J) + elif np.all(lb == -np.inf): + finite_ub = ub < np.inf + c_ineq.append(f[finite_ub] - ub[finite_ub]) + J_ineq.append(J[finite_ub]) + elif np.all(ub == np.inf): + finite_lb = lb > -np.inf + c_ineq.append(lb[finite_lb] - f[finite_lb]) + J_ineq.append(-J[finite_lb]) + else: + lb_inf = lb == -np.inf + ub_inf = ub == np.inf + equal = lb == ub + less = lb_inf & ~ub_inf + greater = ub_inf & ~lb_inf + interval = ~equal & ~lb_inf & ~ub_inf + + c_eq.append(f[equal] - lb[equal]) + c_ineq.append(f[less] - ub[less]) + c_ineq.append(lb[greater] - f[greater]) + c_ineq.append(f[interval] - ub[interval]) + c_ineq.append(lb[interval] - f[interval]) + + J_eq.append(J[equal]) + J_ineq.append(J[less]) + J_ineq.append(-J[greater]) + J_ineq.append(J[interval]) + J_ineq.append(-J[interval]) + + c_eq = np.hstack(c_eq) if c_eq else np.empty(0) + c_ineq = np.hstack(c_ineq) if c_ineq else np.empty(0) + + if sparse_jacobian: + vstack = sps.vstack + empty = sps.csr_matrix((0, n)) + else: + vstack = np.vstack + empty = np.empty((0, n)) + + J_eq = vstack(J_eq) if J_eq else empty + J_ineq = vstack(J_ineq) if J_ineq else empty + + return c_eq, c_ineq, J_eq, J_ineq diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/canonical_constraint.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/canonical_constraint.pyc new file mode 100644 index 0000000..9347849 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/canonical_constraint.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/equality_constrained_sqp.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/equality_constrained_sqp.py new file mode 100644 index 0000000..9117044 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/equality_constrained_sqp.py @@ -0,0 +1,218 @@ +"""Byrd-Omojokun Trust-Region SQP method.""" + +from __future__ import division, print_function, absolute_import +from scipy.sparse import eye as speye +from .projections import projections +from .qp_subproblem import modified_dogleg, projected_cg, box_intersections +import numpy as np +from numpy.linalg import norm + +__all__ = ['equality_constrained_sqp'] + + +def default_scaling(x): + n, = np.shape(x) + return speye(n) + + +def equality_constrained_sqp(fun_and_constr, grad_and_jac, lagr_hess, + x0, fun0, grad0, constr0, + jac0, stop_criteria, + state, + initial_penalty, + initial_trust_radius, + factorization_method, + trust_lb=None, + trust_ub=None, + scaling=default_scaling): + """Solve nonlinear equality-constrained problem using trust-region SQP. + + Solve optimization problem: + + minimize fun(x) + subject to: constr(x) = 0 + + using Byrd-Omojokun Trust-Region SQP method described in [1]_. Several + implementation details are based on [2]_ and [3]_, p. 549. + + References + ---------- + .. [1] Lalee, Marucha, Jorge Nocedal, and Todd Plantenga. "On the + implementation of an algorithm for large-scale equality + constrained optimization." SIAM Journal on + Optimization 8.3 (1998): 682-706. + .. [2] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal. + "An interior point algorithm for large-scale nonlinear + programming." SIAM Journal on Optimization 9.4 (1999): 877-900. + .. [3] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization" + Second Edition (2006). + """ + PENALTY_FACTOR = 0.3 # Rho from formula (3.51), reference [2]_, p.891. + LARGE_REDUCTION_RATIO = 0.9 + INTERMEDIARY_REDUCTION_RATIO = 0.3 + SUFFICIENT_REDUCTION_RATIO = 1e-8 # Eta from reference [2]_, p.892. + TRUST_ENLARGEMENT_FACTOR_L = 7.0 + TRUST_ENLARGEMENT_FACTOR_S = 2.0 + MAX_TRUST_REDUCTION = 0.5 + MIN_TRUST_REDUCTION = 0.1 + SOC_THRESHOLD = 0.1 + TR_FACTOR = 0.8 # Zeta from formula (3.21), reference [2]_, p.885. + BOX_FACTOR = 0.5 + + n, = np.shape(x0) # Number of parameters + + # Set default lower and upper bounds. + if trust_lb is None: + trust_lb = np.full(n, -np.inf) + if trust_ub is None: + trust_ub = np.full(n, np.inf) + + # Initial values + x = np.copy(x0) + trust_radius = initial_trust_radius + penalty = initial_penalty + # Compute Values + f = fun0 + c = grad0 + b = constr0 + A = jac0 + S = scaling(x) + # Get projections + Z, LS, Y = projections(A, factorization_method) + # Compute least-square lagrange multipliers + v = -LS.dot(c) + # Compute Hessian + H = lagr_hess(x, v) + + # Update state parameters + optimality = norm(c + A.T.dot(v), np.inf) + constr_violation = norm(b, np.inf) if len(b) > 0 else 0 + cg_info = {'niter': 0, 'stop_cond': 0, + 'hits_boundary': False} + + last_iteration_failed = False + while not stop_criteria(state, x, last_iteration_failed, + optimality, constr_violation, + trust_radius, penalty, cg_info): + # Normal Step - `dn` + # minimize 1/2*||A dn + b||^2 + # subject to: + # ||dn|| <= TR_FACTOR * trust_radius + # BOX_FACTOR * lb <= dn <= BOX_FACTOR * ub. + dn = modified_dogleg(A, Y, b, + TR_FACTOR*trust_radius, + BOX_FACTOR*trust_lb, + BOX_FACTOR*trust_ub) + + # Tangential Step - `dt` + # Solve the QP problem: + # minimize 1/2 dt.T H dt + dt.T (H dn + c) + # subject to: + # A dt = 0 + # ||dt|| <= sqrt(trust_radius**2 - ||dn||**2) + # lb - dn <= dt <= ub - dn + c_t = H.dot(dn) + c + b_t = np.zeros_like(b) + trust_radius_t = np.sqrt(trust_radius**2 - np.linalg.norm(dn)**2) + lb_t = trust_lb - dn + ub_t = trust_ub - dn + dt, cg_info = projected_cg(H, c_t, Z, Y, b_t, + trust_radius_t, + lb_t, ub_t) + + # Compute update (normal + tangential steps). + d = dn + dt + + # Compute second order model: 1/2 d H d + c.T d + f. + quadratic_model = 1/2*(H.dot(d)).dot(d) + c.T.dot(d) + # Compute linearized constraint: l = A d + b. + linearized_constr = A.dot(d)+b + # Compute new penalty parameter according to formula (3.52), + # reference [2]_, p.891. + vpred = norm(b) - norm(linearized_constr) + # Guarantee `vpred` always positive, + # regardless of roundoff errors. + vpred = max(1e-16, vpred) + previous_penalty = penalty + if quadratic_model > 0: + new_penalty = quadratic_model / ((1-PENALTY_FACTOR)*vpred) + penalty = max(penalty, new_penalty) + # Compute predicted reduction according to formula (3.52), + # reference [2]_, p.891. + predicted_reduction = -quadratic_model + penalty*vpred + + # Compute merit function at current point + merit_function = f + penalty*norm(b) + # Evaluate function and constraints at trial point + x_next = x + S.dot(d) + f_next, b_next = fun_and_constr(x_next) + # Compute merit function at trial point + merit_function_next = f_next + penalty*norm(b_next) + # Compute actual reduction according to formula (3.54), + # reference [2]_, p.892. + actual_reduction = merit_function - merit_function_next + # Compute reduction ratio + reduction_ratio = actual_reduction / predicted_reduction + + # Second order correction (SOC), reference [2]_, p.892. + if reduction_ratio < SUFFICIENT_REDUCTION_RATIO and \ + norm(dn) <= SOC_THRESHOLD * norm(dt): + # Compute second order correction + y = -Y.dot(b_next) + # Make sure increment is inside box constraints + _, t, intersect = box_intersections(d, y, trust_lb, trust_ub) + # Compute tentative point + x_soc = x + S.dot(d + t*y) + f_soc, b_soc = fun_and_constr(x_soc) + # Recompute actual reduction + merit_function_soc = f_soc + penalty*norm(b_soc) + actual_reduction_soc = merit_function - merit_function_soc + # Recompute reduction ratio + reduction_ratio_soc = actual_reduction_soc / predicted_reduction + if intersect and reduction_ratio_soc >= SUFFICIENT_REDUCTION_RATIO: + x_next = x_soc + f_next = f_soc + b_next = b_soc + reduction_ratio = reduction_ratio_soc + + # Readjust trust region step, formula (3.55), reference [2]_, p.892. + if reduction_ratio >= LARGE_REDUCTION_RATIO: + trust_radius = max(TRUST_ENLARGEMENT_FACTOR_L * norm(d), + trust_radius) + elif reduction_ratio >= INTERMEDIARY_REDUCTION_RATIO: + trust_radius = max(TRUST_ENLARGEMENT_FACTOR_S * norm(d), + trust_radius) + # Reduce trust region step, according to reference [3]_, p.696. + elif reduction_ratio < SUFFICIENT_REDUCTION_RATIO: + trust_reduction \ + = (1-SUFFICIENT_REDUCTION_RATIO)/(1-reduction_ratio) + new_trust_radius = trust_reduction * norm(d) + if new_trust_radius >= MAX_TRUST_REDUCTION * trust_radius: + trust_radius *= MAX_TRUST_REDUCTION + elif new_trust_radius >= MIN_TRUST_REDUCTION * trust_radius: + trust_radius = new_trust_radius + else: + trust_radius *= MIN_TRUST_REDUCTION + + # Update iteration + if reduction_ratio >= SUFFICIENT_REDUCTION_RATIO: + x = x_next + f, b = f_next, b_next + c, A = grad_and_jac(x) + S = scaling(x) + # Get projections + Z, LS, Y = projections(A, factorization_method) + # Compute least-square lagrange multipliers + v = -LS.dot(c) + # Compute Hessian + H = lagr_hess(x, v) + # Set Flag + last_iteration_failed = False + # Otimality values + optimality = norm(c + A.T.dot(v), np.inf) + constr_violation = norm(b, np.inf) if len(b) > 0 else 0 + else: + penalty = previous_penalty + last_iteration_failed = True + + return x, state diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/equality_constrained_sqp.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/equality_constrained_sqp.pyc new file mode 100644 index 0000000..b6a1531 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/equality_constrained_sqp.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/minimize_trustregion_constr.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/minimize_trustregion_constr.py new file mode 100644 index 0000000..20b0219 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/minimize_trustregion_constr.py @@ -0,0 +1,539 @@ +from __future__ import division, print_function, absolute_import +import time +import numpy as np +from scipy.sparse.linalg import LinearOperator +from .._differentiable_functions import VectorFunction +from .._constraints import ( + NonlinearConstraint, LinearConstraint, PreparedConstraint, strict_bounds) +from .._hessian_update_strategy import BFGS +from ..optimize import OptimizeResult +from .._differentiable_functions import ScalarFunction +from .equality_constrained_sqp import equality_constrained_sqp +from .canonical_constraint import (CanonicalConstraint, + initial_constraints_as_canonical) +from .tr_interior_point import tr_interior_point +from .report import BasicReport, SQPReport, IPReport + + +TERMINATION_MESSAGES = { + 0: "The maximum number of function evaluations is exceeded.", + 1: "`gtol` termination condition is satisfied.", + 2: "`xtol` termination condition is satisfied.", + 3: "`callback` function requested termination" +} + + +class HessianLinearOperator(object): + """Build LinearOperator from hessp""" + def __init__(self, hessp, n): + self.hessp = hessp + self.n = n + + def __call__(self, x, *args): + def matvec(p): + return self.hessp(x, p, *args) + + return LinearOperator((self.n, self.n), matvec=matvec) + + +class LagrangianHessian(object): + """The Hessian of the Lagrangian as LinearOperator. + + The Lagrangian is computed as the objective function plus all the + constraints multiplied with some numbers (Lagrange multipliers). + """ + def __init__(self, n, objective_hess, constraints_hess): + self.n = n + self.objective_hess = objective_hess + self.constraints_hess = constraints_hess + + def __call__(self, x, v_eq=np.empty(0), v_ineq=np.empty(0)): + H_objective = self.objective_hess(x) + H_constraints = self.constraints_hess(x, v_eq, v_ineq) + + def matvec(p): + return H_objective.dot(p) + H_constraints.dot(p) + + return LinearOperator((self.n, self.n), matvec) + + +def update_state_sqp(state, x, last_iteration_failed, objective, prepared_constraints, + start_time, tr_radius, constr_penalty, cg_info): + state.nit += 1 + state.nfev = objective.nfev + state.njev = objective.ngev + state.nhev = objective.nhev + state.constr_nfev = [c.fun.nfev if isinstance(c.fun, VectorFunction) else 0 + for c in prepared_constraints] + state.constr_njev = [c.fun.njev if isinstance(c.fun, VectorFunction) else 0 + for c in prepared_constraints] + state.constr_nhev = [c.fun.nhev if isinstance(c.fun, VectorFunction) else 0 + for c in prepared_constraints] + + if not last_iteration_failed: + state.x = x + state.fun = objective.f + state.grad = objective.g + state.v = [c.fun.v for c in prepared_constraints] + state.constr = [c.fun.f for c in prepared_constraints] + state.jac = [c.fun.J for c in prepared_constraints] + # Compute Lagrangian Gradient + state.lagrangian_grad = np.copy(state.grad) + for c in prepared_constraints: + state.lagrangian_grad += c.fun.J.T.dot(c.fun.v) + state.optimality = np.linalg.norm(state.lagrangian_grad, np.inf) + # Compute maximum constraint violation + state.constr_violation = 0 + for i in range(len(prepared_constraints)): + lb, ub = prepared_constraints[i].bounds + c = state.constr[i] + state.constr_violation = np.max([state.constr_violation, + np.max(lb - c), + np.max(c - ub)]) + + state.execution_time = time.time() - start_time + state.tr_radius = tr_radius + state.constr_penalty = constr_penalty + state.cg_niter += cg_info["niter"] + state.cg_stop_cond = cg_info["stop_cond"] + + return state + + +def update_state_ip(state, x, last_iteration_failed, objective, + prepared_constraints, start_time, + tr_radius, constr_penalty, cg_info, + barrier_parameter, barrier_tolerance): + state = update_state_sqp(state, x, last_iteration_failed, objective, + prepared_constraints, start_time, tr_radius, + constr_penalty, cg_info) + state.barrier_parameter = barrier_parameter + state.barrier_tolerance = barrier_tolerance + return state + + +def _minimize_trustregion_constr(fun, x0, args, grad, + hess, hessp, bounds, constraints, + xtol=1e-8, gtol=1e-8, + barrier_tol=1e-8, + sparse_jacobian=None, + callback=None, maxiter=1000, + verbose=0, finite_diff_rel_step=None, + initial_constr_penalty=1.0, initial_tr_radius=1.0, + initial_barrier_parameter=0.1, + initial_barrier_tolerance=0.1, + factorization_method=None, + disp=False): + """Minimize a scalar function subject to constraints. + + Parameters + ---------- + gtol : float, optional + Tolerance for termination by the norm of the Lagrangian gradient. + The algorithm will terminate when both the infinity norm (i.e. max + abs value) of the Lagrangian gradient and the constraint violation + are smaller than ``gtol``. Default is 1e-8. + xtol : float, optional + Tolerance for termination by the change of the independent variable. + The algorithm will terminate when ``tr_radius < xtol``, where + ``tr_radius`` is the radius of the trust region used in the algorithm. + Default is 1e-8. + barrier_tol : float, optional + Threshold on the barrier parameter for the algorithm termination. + When inequality constraints are present the algorithm will terminate + only when the barrier parameter is less than `barrier_tol`. + Default is 1e-8. + sparse_jacobian : {bool, None}, optional + Determines how to represent Jacobians of the constraints. If bool, + then Jacobians of all the constraints will be converted to the + corresponding format. If None (default), then Jacobians won't be + converted, but the algorithm can proceed only if they all have the + same format. + initial_tr_radius: float, optional + Initial trust radius. The trust radius gives the maximum distance + between solution points in consecutive iterations. It reflects the + trust the algorithm puts in the local approximation of the optimization + problem. For an accurate local approximation the trust-region should be + large and for an approximation valid only close to the current point it + should be a small one. The trust radius is automatically updated throughout + the optimization process, with ``initial_tr_radius`` being its initial value. + Default is 1 (recommended in [1]_, p. 19). + initial_constr_penalty : float, optional + Initial constraints penalty parameter. The penalty parameter is used for + balancing the requirements of decreasing the objective function + and satisfying the constraints. It is used for defining the merit function: + ``merit_function(x) = fun(x) + constr_penalty * constr_norm_l2(x)``, + where ``constr_norm_l2(x)`` is the l2 norm of a vector containing all + the constraints. The merit function is used for accepting or rejecting + trial points and ``constr_penalty`` weights the two conflicting goals + of reducing objective function and constraints. The penalty is automatically + updated throughout the optimization process, with + ``initial_constr_penalty`` being its initial value. Default is 1 + (recommended in [1]_, p 19). + initial_barrier_parameter, initial_barrier_tolerance: float, optional + Initial barrier parameter and initial tolerance for the barrier subproblem. + Both are used only when inequality constraints are present. For dealing with + optimization problems ``min_x f(x)`` subject to inequality constraints + ``c(x) <= 0`` the algorithm introduces slack variables, solving the problem + ``min_(x,s) f(x) + barrier_parameter*sum(ln(s))`` subject to the equality + constraints ``c(x) + s = 0`` instead of the original problem. This subproblem + is solved for increasing values of ``barrier_parameter`` and with decreasing + tolerances for the termination, starting with ``initial_barrier_parameter`` + for the barrier parameter and ``initial_barrier_tolerance`` for the + barrier subproblem barrier. Default is 0.1 for both values (recommended in [1]_ p. 19). + factorization_method : string or None, optional + Method to factorize the Jacobian of the constraints. Use None (default) + for the auto selection or one of: + + - 'NormalEquation' (requires scikit-sparse) + - 'AugmentedSystem' + - 'QRFactorization' + - 'SVDFactorization' + + The methods 'NormalEquation' and 'AugmentedSystem' can be used only + with sparse constraints. The projections required by the algorithm + will be computed using, respectively, the the normal equation and the + augmented system approaches explained in [1]_. 'NormalEquation' + computes the Cholesky factorization of ``A A.T`` and 'AugmentedSystem' + performs the LU factorization of an augmented system. They usually + provide similar results. 'AugmentedSystem' is used by default for + sparse matrices. + + The methods 'QRFactorization' and 'SVDFactorization' can be used + only with dense constraints. They compute the required projections + using, respectively, QR and SVD factorizations. The 'SVDFactorization' + method can cope with Jacobian matrices with deficient row rank and will + be used whenever other factorization methods fail (which may imply the + conversion of sparse matrices to a dense format when required). + By default 'QRFactorization' is used for dense matrices. + finite_diff_rel_step : None or array_like, optional + Relative step size for the finite difference approximation. + maxiter : int, optional + Maximum number of algorithm iterations. Default is 1000. + verbose : {0, 1, 2}, optional + Level of algorithm's verbosity: + + * 0 (default) : work silently. + * 1 : display a termination report. + * 2 : display progress during iterations. + * 3 : display progress during iterations (more complete report). + + disp : bool, optional + If True (default) then `verbose` will be set to 1 if it was 0. + + Returns + ------- + `OptimizeResult` with the fields documented below. Note the following: + + 1. All values corresponding to the constraints are ordered as they + were passed to the solver. And values corresponding to `bounds` + constraints are put *after* other constraints. + 2. All numbers of function, Jacobian or Hessian evaluations correspond + to numbers of actual Python function calls. It means, for example, + that if a Jacobian is estimated by finite differences then the + number of Jacobian evaluations will be zero and the number of + function evaluations will be incremented by all calls during the + finite difference estimation. + + x : ndarray, shape (n,) + Solution found. + optimality : float + Infinity norm of the Lagrangian gradient at the solution. + constr_violation : float + Maximum constraint violation at the solution. + fun : float + Objective function at the solution. + grad : ndarray, shape (n,) + Gradient of the objective function at the solution. + lagrangian_grad : ndarray, shape (n,) + Gradient of the Lagrangian function at the solution. + nit : int + Total number of iterations. + nfev : integer + Number of the objective function evaluations. + ngev : integer + Number of the objective function gradient evaluations. + nhev : integer + Number of the objective function Hessian evaluations. + cg_niter : int + Total number of the conjugate gradient method iterations. + method : {'equality_constrained_sqp', 'tr_interior_point'} + Optimization method used. + constr : list of ndarray + List of constraint values at the solution. + jac : list of {ndarray, sparse matrix} + List of the Jacobian matrices of the constraints at the solution. + v : list of ndarray + List of the Lagrange multipliers for the constraints at the solution. + For an inequality constraint a positive multiplier means that the upper + bound is active, a negative multiplier means that the lower bound is + active and if a multiplier is zero it means the constraint is not + active. + constr_nfev : list of int + Number of constraint evaluations for each of the constraints. + constr_njev : list of int + Number of Jacobian matrix evaluations for each of the constraints. + constr_nhev : list of int + Number of Hessian evaluations for each of the constraints. + tr_radius : float + Radius of the trust region at the last iteration. + constr_penalty : float + Penalty parameter at the last iteration, see `initial_constr_penalty`. + barrier_tolerance : float + Tolerance for the barrier subproblem at the last iteration. + Only for problems with inequality constraints. + barrier_parameter : float + Barrier parameter at the last iteration. Only for problems + with inequality constraints. + execution_time : float + Total execution time. + message : str + Termination message. + status : {0, 1, 2, 3} + Termination status: + + * 0 : The maximum number of function evaluations is exceeded. + * 1 : `gtol` termination condition is satisfied. + * 2 : `xtol` termination condition is satisfied. + * 3 : `callback` function requested termination. + + cg_stop_cond : int + Reason for CG subproblem termination at the last iteration: + + * 0 : CG subproblem not evaluated. + * 1 : Iteration limit was reached. + * 2 : Reached the trust-region boundary. + * 3 : Negative curvature detected. + * 4 : Tolerance was satisfied. + """ + x0 = np.atleast_1d(x0).astype(float) + n_vars = np.size(x0) + if hess is None: + if callable(hessp): + hess = HessianLinearOperator(hessp, n_vars) + else: + hess = BFGS() + if disp and verbose == 0: + verbose = 1 + + if bounds is not None: + finite_diff_bounds = strict_bounds(bounds.lb, bounds.ub, + bounds.keep_feasible, n_vars) + else: + finite_diff_bounds = (-np.inf, np.inf) + + # Define Objective Function + objective = ScalarFunction(fun, x0, args, grad, hess, + finite_diff_rel_step, finite_diff_bounds) + + # Put constraints in list format when needed + if isinstance(constraints, (NonlinearConstraint, LinearConstraint)): + constraints = [constraints] + + # Prepare constraints. + prepared_constraints = [ + PreparedConstraint(c, x0, sparse_jacobian, finite_diff_bounds) + for c in constraints] + + # Check that all constraints are either sparse or dense. + n_sparse = sum(c.fun.sparse_jacobian for c in prepared_constraints) + if 0 < n_sparse < len(prepared_constraints): + raise ValueError("All constraints must have the same kind of the " + "Jacobian --- either all sparse or all dense. " + "You can set the sparsity globally by setting " + "`sparse_jacobian` to either True of False.") + if prepared_constraints: + sparse_jacobian = n_sparse > 0 + + if bounds is not None: + if sparse_jacobian is None: + sparse_jacobian = True + prepared_constraints.append(PreparedConstraint(bounds, x0, + sparse_jacobian)) + + # Concatenate initial constraints to the canonical form. + c_eq0, c_ineq0, J_eq0, J_ineq0 = initial_constraints_as_canonical( + n_vars, prepared_constraints, sparse_jacobian) + + # Prepare all canonical constraints and concatenate it into one. + canonical_all = [CanonicalConstraint.from_PreparedConstraint(c) + for c in prepared_constraints] + + if len(canonical_all) == 0: + canonical = CanonicalConstraint.empty(n_vars) + elif len(canonical_all) == 1: + canonical = canonical_all[0] + else: + canonical = CanonicalConstraint.concatenate(canonical_all, + sparse_jacobian) + + # Generate the Hessian of the Lagrangian. + lagrangian_hess = LagrangianHessian(n_vars, objective.hess, canonical.hess) + + # Choose appropriate method + if canonical.n_ineq == 0: + method = 'equality_constrained_sqp' + else: + method = 'tr_interior_point' + + # Construct OptimizeResult + state = OptimizeResult( + nit=0, nfev=0, njev=0, nhev=0, + cg_niter=0, cg_stop_cond=0, + fun=objective.f, grad=objective.g, + lagrangian_grad=np.copy(objective.g), + constr=[c.fun.f for c in prepared_constraints], + jac=[c.fun.J for c in prepared_constraints], + constr_nfev=[0 for c in prepared_constraints], + constr_njev=[0 for c in prepared_constraints], + constr_nhev=[0 for c in prepared_constraints], + v=[c.fun.v for c in prepared_constraints], + method=method) + + # Start counting + start_time = time.time() + + # Define stop criteria + if method == 'equality_constrained_sqp': + def stop_criteria(state, x, last_iteration_failed, + optimality, constr_violation, + tr_radius, constr_penalty, cg_info): + state = update_state_sqp(state, x, last_iteration_failed, + objective, prepared_constraints, + start_time, tr_radius, constr_penalty, + cg_info) + if verbose == 2: + BasicReport.print_iteration(state.nit, + state.nfev, + state.cg_niter, + state.fun, + state.tr_radius, + state.optimality, + state.constr_violation) + elif verbose > 2: + SQPReport.print_iteration(state.nit, + state.nfev, + state.cg_niter, + state.fun, + state.tr_radius, + state.optimality, + state.constr_violation, + state.constr_penalty, + state.cg_stop_cond) + state.status = None + state.niter = state.nit # Alias for callback (backward-compatibility) + if callback is not None and callback(np.copy(state.x), state): + state.status = 3 + elif state.optimality < gtol and state.constr_violation < gtol: + state.status = 1 + elif state.tr_radius < xtol: + state.status = 2 + elif state.nit > maxiter: + state.status = 0 + return state.status in (0, 1, 2, 3) + elif method == 'tr_interior_point': + def stop_criteria(state, x, last_iteration_failed, tr_radius, + constr_penalty, cg_info, barrier_parameter, + barrier_tolerance): + state = update_state_ip(state, x, last_iteration_failed, + objective, prepared_constraints, + start_time, tr_radius, constr_penalty, + cg_info, barrier_parameter, barrier_tolerance) + if verbose == 2: + BasicReport.print_iteration(state.nit, + state.nfev, + state.cg_niter, + state.fun, + state.tr_radius, + state.optimality, + state.constr_violation) + elif verbose > 2: + IPReport.print_iteration(state.nit, + state.nfev, + state.cg_niter, + state.fun, + state.tr_radius, + state.optimality, + state.constr_violation, + state.constr_penalty, + state.barrier_parameter, + state.cg_stop_cond) + state.status = None + state.niter = state.nit # Alias for callback (backward-compatibility) + if callback is not None and callback(np.copy(state.x), state): + state.status = 3 + elif state.optimality < gtol and state.constr_violation < gtol: + state.status = 1 + elif (state.tr_radius < xtol + and state.barrier_parameter < barrier_tol): + state.status = 2 + elif state.nit > maxiter: + state.status = 0 + return state.status in (0, 1, 2, 3) + + if verbose == 2: + BasicReport.print_header() + elif verbose > 2: + if method == 'equality_constrained_sqp': + SQPReport.print_header() + elif method == 'tr_interior_point': + IPReport.print_header() + + # Call inferior function to do the optimization + if method == 'equality_constrained_sqp': + def fun_and_constr(x): + f = objective.fun(x) + c_eq, _ = canonical.fun(x) + return f, c_eq + + def grad_and_jac(x): + g = objective.grad(x) + J_eq, _ = canonical.jac(x) + return g, J_eq + + _, result = equality_constrained_sqp( + fun_and_constr, grad_and_jac, lagrangian_hess, + x0, objective.f, objective.g, + c_eq0, J_eq0, + stop_criteria, state, + initial_constr_penalty, initial_tr_radius, + factorization_method) + + elif method == 'tr_interior_point': + _, result = tr_interior_point( + objective.fun, objective.grad, lagrangian_hess, + n_vars, canonical.n_ineq, canonical.n_eq, + canonical.fun, canonical.jac, + x0, objective.f, objective.g, + c_ineq0, J_ineq0, c_eq0, J_eq0, + stop_criteria, + canonical.keep_feasible, + xtol, state, initial_barrier_parameter, + initial_barrier_tolerance, + initial_constr_penalty, initial_tr_radius, + factorization_method) + + # Status 3 occurs when the callback function requests termination, + # this is assumed to not be a success. + result.success = True if result.status in (1, 2) else False + result.message = TERMINATION_MESSAGES[result.status] + + # Alias (for backward compatibility with 1.1.0) + result.niter = result.nit + + if verbose == 2: + BasicReport.print_footer() + elif verbose > 2: + if method == 'equality_constrained_sqp': + SQPReport.print_footer() + elif method == 'tr_interior_point': + IPReport.print_footer() + if verbose >= 1: + print(result.message) + print("Number of iterations: {}, function evaluations: {}, " + "CG iterations: {}, optimality: {:.2e}, " + "constraint violation: {:.2e}, execution time: {:4.2} s." + .format(result.nit, result.nfev, result.cg_niter, + result.optimality, result.constr_violation, + result.execution_time)) + return result diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/minimize_trustregion_constr.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/minimize_trustregion_constr.pyc new file mode 100644 index 0000000..e05a039 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/minimize_trustregion_constr.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/projections.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/projections.py new file mode 100644 index 0000000..8aa6eab --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/projections.py @@ -0,0 +1,406 @@ +"""Basic linear factorizations needed by the solver.""" + +from __future__ import division, print_function, absolute_import +from scipy.sparse import (bmat, csc_matrix, eye, issparse) +from scipy.sparse.linalg import LinearOperator +import scipy.linalg +import scipy.sparse.linalg +try: + from sksparse.cholmod import cholesky_AAt + sksparse_available = True +except ImportError: + import warnings + sksparse_available = False +import numpy as np +from warnings import warn + +__all__ = [ + 'orthogonality', + 'projections', +] + + +def orthogonality(A, g): + """Measure orthogonality between a vector and the null space of a matrix. + + Compute a measure of orthogonality between the null space + of the (possibly sparse) matrix ``A`` and a given vector ``g``. + + The formula is a simplified (and cheaper) version of formula (3.13) + from [1]_. + ``orth = norm(A g, ord=2)/(norm(A, ord='fro')*norm(g, ord=2))``. + + References + ---------- + .. [1] Gould, Nicholas IM, Mary E. Hribar, and Jorge Nocedal. + "On the solution of equality constrained quadratic + programming problems arising in optimization." + SIAM Journal on Scientific Computing 23.4 (2001): 1376-1395. + """ + # Compute vector norms + norm_g = np.linalg.norm(g) + # Compute Frobenius norm of the matrix A + if issparse(A): + norm_A = scipy.sparse.linalg.norm(A, ord='fro') + else: + norm_A = np.linalg.norm(A, ord='fro') + + # Check if norms are zero + if norm_g == 0 or norm_A == 0: + return 0 + + norm_A_g = np.linalg.norm(A.dot(g)) + # Orthogonality measure + orth = norm_A_g / (norm_A*norm_g) + return orth + + +def normal_equation_projections(A, m, n, orth_tol, max_refin, tol): + """Return linear operators for matrix A using ``NormalEquation`` approach. + """ + # Cholesky factorization + factor = cholesky_AAt(A) + + # z = x - A.T inv(A A.T) A x + def null_space(x): + v = factor(A.dot(x)) + z = x - A.T.dot(v) + + # Iterative refinement to improve roundoff + # errors described in [2]_, algorithm 5.1. + k = 0 + while orthogonality(A, z) > orth_tol: + if k >= max_refin: + break + # z_next = z - A.T inv(A A.T) A z + v = factor(A.dot(z)) + z = z - A.T.dot(v) + k += 1 + + return z + + # z = inv(A A.T) A x + def least_squares(x): + return factor(A.dot(x)) + + # z = A.T inv(A A.T) x + def row_space(x): + return A.T.dot(factor(x)) + + return null_space, least_squares, row_space + + +def augmented_system_projections(A, m, n, orth_tol, max_refin, tol): + """Return linear operators for matrix A - ``AugmentedSystem``.""" + # Form augmented system + K = csc_matrix(bmat([[eye(n), A.T], [A, None]])) + # LU factorization + # TODO: Use a symmetric indefinite factorization + # to solve the system twice as fast (because + # of the symmetry). + try: + solve = scipy.sparse.linalg.factorized(K) + except RuntimeError: + warn("Singular Jacobian matrix. Using dense SVD decomposition to " + "perform the factorizations.") + return svd_factorization_projections(A.toarray(), + m, n, orth_tol, + max_refin, tol) + + # z = x - A.T inv(A A.T) A x + # is computed solving the extended system: + # [I A.T] * [ z ] = [x] + # [A O ] [aux] [0] + def null_space(x): + # v = [x] + # [0] + v = np.hstack([x, np.zeros(m)]) + # lu_sol = [ z ] + # [aux] + lu_sol = solve(v) + z = lu_sol[:n] + + # Iterative refinement to improve roundoff + # errors described in [2]_, algorithm 5.2. + k = 0 + while orthogonality(A, z) > orth_tol: + if k >= max_refin: + break + # new_v = [x] - [I A.T] * [ z ] + # [0] [A O ] [aux] + new_v = v - K.dot(lu_sol) + # [I A.T] * [delta z ] = new_v + # [A O ] [delta aux] + lu_update = solve(new_v) + # [ z ] += [delta z ] + # [aux] [delta aux] + lu_sol += lu_update + z = lu_sol[:n] + k += 1 + + # return z = x - A.T inv(A A.T) A x + return z + + # z = inv(A A.T) A x + # is computed solving the extended system: + # [I A.T] * [aux] = [x] + # [A O ] [ z ] [0] + def least_squares(x): + # v = [x] + # [0] + v = np.hstack([x, np.zeros(m)]) + # lu_sol = [aux] + # [ z ] + lu_sol = solve(v) + # return z = inv(A A.T) A x + return lu_sol[n:m+n] + + # z = A.T inv(A A.T) x + # is computed solving the extended system: + # [I A.T] * [ z ] = [0] + # [A O ] [aux] [x] + def row_space(x): + # v = [0] + # [x] + v = np.hstack([np.zeros(n), x]) + # lu_sol = [ z ] + # [aux] + lu_sol = solve(v) + # return z = A.T inv(A A.T) x + return lu_sol[:n] + + return null_space, least_squares, row_space + + +def qr_factorization_projections(A, m, n, orth_tol, max_refin, tol): + """Return linear operators for matrix A using ``QRFactorization`` approach. + """ + # QRFactorization + Q, R, P = scipy.linalg.qr(A.T, pivoting=True, mode='economic') + + if np.linalg.norm(R[-1, :], np.inf) < tol: + warn('Singular Jacobian matrix. Using SVD decomposition to ' + + 'perform the factorizations.') + return svd_factorization_projections(A, m, n, + orth_tol, + max_refin, + tol) + + # z = x - A.T inv(A A.T) A x + def null_space(x): + # v = P inv(R) Q.T x + aux1 = Q.T.dot(x) + aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False) + v = np.zeros(m) + v[P] = aux2 + z = x - A.T.dot(v) + + # Iterative refinement to improve roundoff + # errors described in [2]_, algorithm 5.1. + k = 0 + while orthogonality(A, z) > orth_tol: + if k >= max_refin: + break + # v = P inv(R) Q.T x + aux1 = Q.T.dot(z) + aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False) + v[P] = aux2 + # z_next = z - A.T v + z = z - A.T.dot(v) + k += 1 + + return z + + # z = inv(A A.T) A x + def least_squares(x): + # z = P inv(R) Q.T x + aux1 = Q.T.dot(x) + aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False) + z = np.zeros(m) + z[P] = aux2 + return z + + # z = A.T inv(A A.T) x + def row_space(x): + # z = Q inv(R.T) P.T x + aux1 = x[P] + aux2 = scipy.linalg.solve_triangular(R, aux1, + lower=False, + trans='T') + z = Q.dot(aux2) + return z + + return null_space, least_squares, row_space + + +def svd_factorization_projections(A, m, n, orth_tol, max_refin, tol): + """Return linear operators for matrix A using ``SVDFactorization`` approach. + """ + # SVD Factorization + U, s, Vt = scipy.linalg.svd(A, full_matrices=False) + + # Remove dimensions related with very small singular values + U = U[:, s > tol] + Vt = Vt[s > tol, :] + s = s[s > tol] + + # z = x - A.T inv(A A.T) A x + def null_space(x): + # v = U 1/s V.T x = inv(A A.T) A x + aux1 = Vt.dot(x) + aux2 = 1/s*aux1 + v = U.dot(aux2) + z = x - A.T.dot(v) + + # Iterative refinement to improve roundoff + # errors described in [2]_, algorithm 5.1. + k = 0 + while orthogonality(A, z) > orth_tol: + if k >= max_refin: + break + # v = U 1/s V.T x = inv(A A.T) A x + aux1 = Vt.dot(z) + aux2 = 1/s*aux1 + v = U.dot(aux2) + # z_next = z - A.T v + z = z - A.T.dot(v) + k += 1 + + return z + + # z = inv(A A.T) A x + def least_squares(x): + # z = U 1/s V.T x = inv(A A.T) A x + aux1 = Vt.dot(x) + aux2 = 1/s*aux1 + z = U.dot(aux2) + return z + + # z = A.T inv(A A.T) x + def row_space(x): + # z = V 1/s U.T x + aux1 = U.T.dot(x) + aux2 = 1/s*aux1 + z = Vt.T.dot(aux2) + return z + + return null_space, least_squares, row_space + + +def projections(A, method=None, orth_tol=1e-12, max_refin=3, tol=1e-15): + """Return three linear operators related with a given matrix A. + + Parameters + ---------- + A : sparse matrix (or ndarray), shape (m, n) + Matrix ``A`` used in the projection. + method : string, optional + Method used for compute the given linear + operators. Should be one of: + + - 'NormalEquation': The operators + will be computed using the + so-called normal equation approach + explained in [1]_. In order to do + so the Cholesky factorization of + ``(A A.T)`` is computed. Exclusive + for sparse matrices. + - 'AugmentedSystem': The operators + will be computed using the + so-called augmented system approach + explained in [1]_. Exclusive + for sparse matrices. + - 'QRFactorization': Compute projections + using QR factorization. Exclusive for + dense matrices. + - 'SVDFactorization': Compute projections + using SVD factorization. Exclusive for + dense matrices. + + orth_tol : float, optional + Tolerance for iterative refinements. + max_refin : int, optional + Maximum number of iterative refinements + tol : float, optional + Tolerance for singular values + + Returns + ------- + Z : LinearOperator, shape (n, n) + Null-space operator. For a given vector ``x``, + the null space operator is equivalent to apply + a projection matrix ``P = I - A.T inv(A A.T) A`` + to the vector. It can be shown that this is + equivalent to project ``x`` into the null space + of A. + LS : LinearOperator, shape (m, n) + Least-Square operator. For a given vector ``x``, + the least-square operator is equivalent to apply a + pseudoinverse matrix ``pinv(A.T) = inv(A A.T) A`` + to the vector. It can be shown that this vector + ``pinv(A.T) x`` is the least_square solution to + ``A.T y = x``. + Y : LinearOperator, shape (n, m) + Row-space operator. For a given vector ``x``, + the row-space operator is equivalent to apply a + projection matrix ``Q = A.T inv(A A.T)`` + to the vector. It can be shown that this + vector ``y = Q x`` the minimum norm solution + of ``A y = x``. + + Notes + ----- + Uses iterative refinements described in [1] + during the computation of ``Z`` in order to + cope with the possibility of large roundoff errors. + + References + ---------- + .. [1] Gould, Nicholas IM, Mary E. Hribar, and Jorge Nocedal. + "On the solution of equality constrained quadratic + programming problems arising in optimization." + SIAM Journal on Scientific Computing 23.4 (2001): 1376-1395. + """ + m, n = np.shape(A) + + # The factorization of an empty matrix + # only works for the sparse representation. + if m*n == 0: + A = csc_matrix(A) + + # Check Argument + if issparse(A): + if method is None: + method = "AugmentedSystem" + if method not in ("NormalEquation", "AugmentedSystem"): + raise ValueError("Method not allowed for sparse matrix.") + if method == "NormalEquation" and not sksparse_available: + warnings.warn(("Only accepts 'NormalEquation' option when" + " scikit-sparse is available. Using " + "'AugmentedSystem' option instead."), + ImportWarning) + method = 'AugmentedSystem' + else: + if method is None: + method = "QRFactorization" + if method not in ("QRFactorization", "SVDFactorization"): + raise ValueError("Method not allowed for dense array.") + + if method == 'NormalEquation': + null_space, least_squares, row_space \ + = normal_equation_projections(A, m, n, orth_tol, max_refin, tol) + elif method == 'AugmentedSystem': + null_space, least_squares, row_space \ + = augmented_system_projections(A, m, n, orth_tol, max_refin, tol) + elif method == "QRFactorization": + null_space, least_squares, row_space \ + = qr_factorization_projections(A, m, n, orth_tol, max_refin, tol) + elif method == "SVDFactorization": + null_space, least_squares, row_space \ + = svd_factorization_projections(A, m, n, orth_tol, max_refin, tol) + + Z = LinearOperator((n, n), null_space) + LS = LinearOperator((m, n), least_squares) + Y = LinearOperator((n, m), row_space) + + return Z, LS, Y diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/projections.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/projections.pyc new file mode 100644 index 0000000..f66975e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/projections.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/qp_subproblem.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/qp_subproblem.py new file mode 100644 index 0000000..0b030ad --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/qp_subproblem.py @@ -0,0 +1,639 @@ +"""Equality-constrained quadratic programming solvers.""" + +from __future__ import division, print_function, absolute_import +from scipy.sparse import (linalg, bmat, csc_matrix) +from math import copysign +import numpy as np +from numpy.linalg import norm + +__all__ = [ + 'eqp_kktfact', + 'sphere_intersections', + 'box_intersections', + 'box_sphere_intersections', + 'inside_box_boundaries', + 'modified_dogleg', + 'projected_cg' +] + + +# For comparison with the projected CG +def eqp_kktfact(H, c, A, b): + """Solve equality-constrained quadratic programming (EQP) problem. + + Solve ``min 1/2 x.T H x + x.t c`` subject to ``A x + b = 0`` + using direct factorization of the KKT system. + + Parameters + ---------- + H : sparse matrix, shape (n, n) + Hessian matrix of the EQP problem. + c : array_like, shape (n,) + Gradient of the quadratic objective function. + A : sparse matrix + Jacobian matrix of the EQP problem. + b : array_like, shape (m,) + Right-hand side of the constraint equation. + + Returns + ------- + x : array_like, shape (n,) + Solution of the KKT problem. + lagrange_multipliers : ndarray, shape (m,) + Lagrange multipliers of the KKT problem. + """ + n, = np.shape(c) # Number of parameters + m, = np.shape(b) # Number of constraints + + # Karush-Kuhn-Tucker matrix of coefficients. + # Defined as in Nocedal/Wright "Numerical + # Optimization" p.452 in Eq. (16.4). + kkt_matrix = csc_matrix(bmat([[H, A.T], [A, None]])) + # Vector of coefficients. + kkt_vec = np.hstack([-c, -b]) + + # TODO: Use a symmetric indefinite factorization + # to solve the system twice as fast (because + # of the symmetry). + lu = linalg.splu(kkt_matrix) + kkt_sol = lu.solve(kkt_vec) + x = kkt_sol[:n] + lagrange_multipliers = -kkt_sol[n:n+m] + + return x, lagrange_multipliers + + +def sphere_intersections(z, d, trust_radius, + entire_line=False): + """Find the intersection between segment (or line) and spherical constraints. + + Find the intersection between the segment (or line) defined by the + parametric equation ``x(t) = z + t*d`` and the ball + ``||x|| <= trust_radius``. + + Parameters + ---------- + z : array_like, shape (n,) + Initial point. + d : array_like, shape (n,) + Direction. + trust_radius : float + Ball radius. + entire_line : bool, optional + When ``True`` the function returns the intersection between the line + ``x(t) = z + t*d`` (``t`` can assume any value) and the ball + ``||x|| <= trust_radius``. When ``False`` returns the intersection + between the segment ``x(t) = z + t*d``, ``0 <= t <= 1``, and the ball. + + Returns + ------- + ta, tb : float + The line/segment ``x(t) = z + t*d`` is inside the ball for + for ``ta <= t <= tb``. + intersect : bool + When ``True`` there is a intersection between the line/segment + and the sphere. On the other hand, when ``False``, there is no + intersection. + """ + # Special case when d=0 + if norm(d) == 0: + return 0, 0, False + # Check for inf trust_radius + if np.isinf(trust_radius): + if entire_line: + ta = -np.inf + tb = np.inf + else: + ta = 0 + tb = 1 + intersect = True + return ta, tb, intersect + + a = np.dot(d, d) + b = 2 * np.dot(z, d) + c = np.dot(z, z) - trust_radius**2 + discriminant = b*b - 4*a*c + if discriminant < 0: + intersect = False + return 0, 0, intersect + sqrt_discriminant = np.sqrt(discriminant) + + # The following calculation is mathematically + # equivalent to: + # ta = (-b - sqrt_discriminant) / (2*a) + # tb = (-b + sqrt_discriminant) / (2*a) + # but produce smaller round off errors. + # Look at Matrix Computation p.97 + # for a better justification. + aux = b + copysign(sqrt_discriminant, b) + ta = -aux / (2*a) + tb = -2*c / aux + ta, tb = sorted([ta, tb]) + + if entire_line: + intersect = True + else: + # Checks to see if intersection happens + # within vectors length. + if tb < 0 or ta > 1: + intersect = False + ta = 0 + tb = 0 + else: + intersect = True + # Restrict intersection interval + # between 0 and 1. + ta = max(0, ta) + tb = min(1, tb) + + return ta, tb, intersect + + +def box_intersections(z, d, lb, ub, + entire_line=False): + """Find the intersection between segment (or line) and box constraints. + + Find the intersection between the segment (or line) defined by the + parametric equation ``x(t) = z + t*d`` and the rectangular box + ``lb <= x <= ub``. + + Parameters + ---------- + z : array_like, shape (n,) + Initial point. + d : array_like, shape (n,) + Direction. + lb : array_like, shape (n,) + Lower bounds to each one of the components of ``x``. Used + to delimit the rectangular box. + ub : array_like, shape (n, ) + Upper bounds to each one of the components of ``x``. Used + to delimit the rectangular box. + entire_line : bool, optional + When ``True`` the function returns the intersection between the line + ``x(t) = z + t*d`` (``t`` can assume any value) and the rectangular + box. When ``False`` returns the intersection between the segment + ``x(t) = z + t*d``, ``0 <= t <= 1``, and the rectangular box. + + Returns + ------- + ta, tb : float + The line/segment ``x(t) = z + t*d`` is inside the box for + for ``ta <= t <= tb``. + intersect : bool + When ``True`` there is a intersection between the line (or segment) + and the rectangular box. On the other hand, when ``False``, there is no + intersection. + """ + # Make sure it is a numpy array + z = np.asarray(z) + d = np.asarray(d) + lb = np.asarray(lb) + ub = np.asarray(ub) + # Special case when d=0 + if norm(d) == 0: + return 0, 0, False + + # Get values for which d==0 + zero_d = (d == 0) + # If the boundaries are not satisfied for some coordinate + # for which "d" is zero, there is no box-line intersection. + if (z[zero_d] < lb[zero_d]).any() or (z[zero_d] > ub[zero_d]).any(): + intersect = False + return 0, 0, intersect + # Remove values for which d is zero + not_zero_d = np.logical_not(zero_d) + z = z[not_zero_d] + d = d[not_zero_d] + lb = lb[not_zero_d] + ub = ub[not_zero_d] + + # Find a series of intervals (t_lb[i], t_ub[i]). + t_lb = (lb-z) / d + t_ub = (ub-z) / d + # Get the intersection of all those intervals. + ta = max(np.minimum(t_lb, t_ub)) + tb = min(np.maximum(t_lb, t_ub)) + + # Check if intersection is feasible + if ta <= tb: + intersect = True + else: + intersect = False + # Checks to see if intersection happens within vectors length. + if not entire_line: + if tb < 0 or ta > 1: + intersect = False + ta = 0 + tb = 0 + else: + # Restrict intersection interval between 0 and 1. + ta = max(0, ta) + tb = min(1, tb) + + return ta, tb, intersect + + +def box_sphere_intersections(z, d, lb, ub, trust_radius, + entire_line=False, + extra_info=False): + """Find the intersection between segment (or line) and box/sphere constraints. + + Find the intersection between the segment (or line) defined by the + parametric equation ``x(t) = z + t*d``, the rectangular box + ``lb <= x <= ub`` and the ball ``||x|| <= trust_radius``. + + Parameters + ---------- + z : array_like, shape (n,) + Initial point. + d : array_like, shape (n,) + Direction. + lb : array_like, shape (n,) + Lower bounds to each one of the components of ``x``. Used + to delimit the rectangular box. + ub : array_like, shape (n, ) + Upper bounds to each one of the components of ``x``. Used + to delimit the rectangular box. + trust_radius : float + Ball radius. + entire_line : bool, optional + When ``True`` the function returns the intersection between the line + ``x(t) = z + t*d`` (``t`` can assume any value) and the constraints. + When ``False`` returns the intersection between the segment + ``x(t) = z + t*d``, ``0 <= t <= 1`` and the constraints. + extra_info : bool, optional + When ``True`` returns ``intersect_sphere`` and ``intersect_box``. + + Returns + ------- + ta, tb : float + The line/segment ``x(t) = z + t*d`` is inside the rectangular box and + inside the ball for for ``ta <= t <= tb``. + intersect : bool + When ``True`` there is a intersection between the line (or segment) + and both constraints. On the other hand, when ``False``, there is no + intersection. + sphere_info : dict, optional + Dictionary ``{ta, tb, intersect}`` containing the interval ``[ta, tb]`` + for which the line intercept the ball. And a boolean value indicating + whether the sphere is intersected by the line. + box_info : dict, optional + Dictionary ``{ta, tb, intersect}`` containing the interval ``[ta, tb]`` + for which the line intercept the box. And a boolean value indicating + whether the box is intersected by the line. + """ + ta_b, tb_b, intersect_b = box_intersections(z, d, lb, ub, + entire_line) + ta_s, tb_s, intersect_s = sphere_intersections(z, d, + trust_radius, + entire_line) + ta = np.maximum(ta_b, ta_s) + tb = np.minimum(tb_b, tb_s) + if intersect_b and intersect_s and ta <= tb: + intersect = True + else: + intersect = False + + if extra_info: + sphere_info = {'ta': ta_s, 'tb': tb_s, 'intersect': intersect_s} + box_info = {'ta': ta_b, 'tb': tb_b, 'intersect': intersect_b} + return ta, tb, intersect, sphere_info, box_info + else: + return ta, tb, intersect + + +def inside_box_boundaries(x, lb, ub): + """Check if lb <= x <= ub.""" + return (lb <= x).all() and (x <= ub).all() + + +def reinforce_box_boundaries(x, lb, ub): + """Return clipped value of x""" + return np.minimum(np.maximum(x, lb), ub) + + +def modified_dogleg(A, Y, b, trust_radius, lb, ub): + """Approximately minimize ``1/2*|| A x + b ||^2`` inside trust-region. + + Approximately solve the problem of minimizing ``1/2*|| A x + b ||^2`` + subject to ``||x|| < Delta`` and ``lb <= x <= ub`` using a modification + of the classical dogleg approach. + + Parameters + ---------- + A : LinearOperator (or sparse matrix or ndarray), shape (m, n) + Matrix ``A`` in the minimization problem. It should have + dimension ``(m, n)`` such that ``m < n``. + Y : LinearOperator (or sparse matrix or ndarray), shape (n, m) + LinearOperator that apply the projection matrix + ``Q = A.T inv(A A.T)`` to the vector. The obtained vector + ``y = Q x`` being the minimum norm solution of ``A y = x``. + b : array_like, shape (m,) + Vector ``b``in the minimization problem. + trust_radius: float + Trust radius to be considered. Delimits a sphere boundary + to the problem. + lb : array_like, shape (n,) + Lower bounds to each one of the components of ``x``. + It is expected that ``lb <= 0``, otherwise the algorithm + may fail. If ``lb[i] = -Inf`` the lower + bound for the i-th component is just ignored. + ub : array_like, shape (n, ) + Upper bounds to each one of the components of ``x``. + It is expected that ``ub >= 0``, otherwise the algorithm + may fail. If ``ub[i] = Inf`` the upper bound for the i-th + component is just ignored. + + Returns + ------- + x : array_like, shape (n,) + Solution to the problem. + + Notes + ----- + Based on implementations described in p.p. 885-886 from [1]_. + + References + ---------- + .. [1] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal. + "An interior point algorithm for large-scale nonlinear + programming." SIAM Journal on Optimization 9.4 (1999): 877-900. + """ + # Compute minimum norm minimizer of 1/2*|| A x + b ||^2. + newton_point = -Y.dot(b) + # Check for interior point + if inside_box_boundaries(newton_point, lb, ub) \ + and norm(newton_point) <= trust_radius: + x = newton_point + return x + + # Compute gradient vector ``g = A.T b`` + g = A.T.dot(b) + # Compute cauchy point + # `cauchy_point = g.T g / (g.T A.T A g)``. + A_g = A.dot(g) + cauchy_point = -np.dot(g, g) / np.dot(A_g, A_g) * g + # Origin + origin_point = np.zeros_like(cauchy_point) + + # Check the segment between cauchy_point and newton_point + # for a possible solution. + z = cauchy_point + p = newton_point - cauchy_point + _, alpha, intersect = box_sphere_intersections(z, p, lb, ub, + trust_radius) + if intersect: + x1 = z + alpha*p + else: + # Check the segment between the origin and cauchy_point + # for a possible solution. + z = origin_point + p = cauchy_point + _, alpha, _ = box_sphere_intersections(z, p, lb, ub, + trust_radius) + x1 = z + alpha*p + + # Check the segment between origin and newton_point + # for a possible solution. + z = origin_point + p = newton_point + _, alpha, _ = box_sphere_intersections(z, p, lb, ub, + trust_radius) + x2 = z + alpha*p + + # Return the best solution among x1 and x2. + if norm(A.dot(x1) + b) < norm(A.dot(x2) + b): + return x1 + else: + return x2 + + +def projected_cg(H, c, Z, Y, b, trust_radius=np.inf, + lb=None, ub=None, tol=None, + max_iter=None, max_infeasible_iter=None, + return_all=False): + """Solve EQP problem with projected CG method. + + Solve equality-constrained quadratic programming problem + ``min 1/2 x.T H x + x.t c`` subject to ``A x + b = 0`` and, + possibly, to trust region constraints ``||x|| < trust_radius`` + and box constraints ``lb <= x <= ub``. + + Parameters + ---------- + H : LinearOperator (or sparse matrix or ndarray), shape (n, n) + Operator for computing ``H v``. + c : array_like, shape (n,) + Gradient of the quadratic objective function. + Z : LinearOperator (or sparse matrix or ndarray), shape (n, n) + Operator for projecting ``x`` into the null space of A. + Y : LinearOperator, sparse matrix, ndarray, shape (n, m) + Operator that, for a given a vector ``b``, compute smallest + norm solution of ``A x + b = 0``. + b : array_like, shape (m,) + Right-hand side of the constraint equation. + trust_radius : float, optional + Trust radius to be considered. By default uses ``trust_radius=inf``, + which means no trust radius at all. + lb : array_like, shape (n,), optional + Lower bounds to each one of the components of ``x``. + If ``lb[i] = -Inf`` the lower bound for the i-th + component is just ignored (default). + ub : array_like, shape (n, ), optional + Upper bounds to each one of the components of ``x``. + If ``ub[i] = Inf`` the upper bound for the i-th + component is just ignored (default). + tol : float, optional + Tolerance used to interrupt the algorithm. + max_iter : int, optional + Maximum algorithm iterations. Where ``max_inter <= n-m``. + By default uses ``max_iter = n-m``. + max_infeasible_iter : int, optional + Maximum infeasible (regarding box constraints) iterations the + algorithm is allowed to take. + By default uses ``max_infeasible_iter = n-m``. + return_all : bool, optional + When ``true`` return the list of all vectors through the iterations. + + Returns + ------- + x : array_like, shape (n,) + Solution of the EQP problem. + info : Dict + Dictionary containing the following: + + - niter : Number of iterations. + - stop_cond : Reason for algorithm termination: + 1. Iteration limit was reached; + 2. Reached the trust-region boundary; + 3. Negative curvature detected; + 4. Tolerance was satisfied. + - allvecs : List containing all intermediary vectors (optional). + - hits_boundary : True if the proposed step is on the boundary + of the trust region. + + Notes + ----- + Implementation of Algorithm 6.2 on [1]_. + + In the absence of spherical and box constraints, for sufficient + iterations, the method returns a truly optimal result. + In the presence of those constraints the value returned is only + a inexpensive approximation of the optimal value. + + References + ---------- + .. [1] Gould, Nicholas IM, Mary E. Hribar, and Jorge Nocedal. + "On the solution of equality constrained quadratic + programming problems arising in optimization." + SIAM Journal on Scientific Computing 23.4 (2001): 1376-1395. + """ + CLOSE_TO_ZERO = 1e-25 + + n, = np.shape(c) # Number of parameters + m, = np.shape(b) # Number of constraints + + # Initial Values + x = Y.dot(-b) + r = Z.dot(H.dot(x) + c) + g = Z.dot(r) + p = -g + + # Store ``x`` value + if return_all: + allvecs = [x] + # Values for the first iteration + H_p = H.dot(p) + rt_g = norm(g)**2 # g.T g = r.T Z g = r.T g (ref [1]_ p.1389) + + # If x > trust-region the problem does not have a solution. + tr_distance = trust_radius - norm(x) + if tr_distance < 0: + raise ValueError("Trust region problem does not have a solution.") + # If x == trust_radius, then x is the solution + # to the optimization problem, since x is the + # minimum norm solution to Ax=b. + elif tr_distance < CLOSE_TO_ZERO: + info = {'niter': 0, 'stop_cond': 2, 'hits_boundary': True} + if return_all: + allvecs.append(x) + info['allvecs'] = allvecs + return x, info + + # Set default tolerance + if tol is None: + tol = max(min(0.01 * np.sqrt(rt_g), 0.1 * rt_g), CLOSE_TO_ZERO) + # Set default lower and upper bounds + if lb is None: + lb = np.full(n, -np.inf) + if ub is None: + ub = np.full(n, np.inf) + # Set maximum iterations + if max_iter is None: + max_iter = n-m + max_iter = min(max_iter, n-m) + # Set maximum infeasible iterations + if max_infeasible_iter is None: + max_infeasible_iter = n-m + + hits_boundary = False + stop_cond = 1 + counter = 0 + last_feasible_x = np.zeros_like(x) + k = 0 + for i in range(max_iter): + # Stop criteria - Tolerance : r.T g < tol + if rt_g < tol: + stop_cond = 4 + break + k += 1 + # Compute curvature + pt_H_p = H_p.dot(p) + # Stop criteria - Negative curvature + if pt_H_p <= 0: + if np.isinf(trust_radius): + raise ValueError("Negative curvature not " + "allowed for unrestrited " + "problems.") + else: + # Find intersection with constraints + _, alpha, intersect = box_sphere_intersections( + x, p, lb, ub, trust_radius, entire_line=True) + # Update solution + if intersect: + x = x + alpha*p + # Reinforce variables are inside box constraints. + # This is only necessary because of roundoff errors. + x = reinforce_box_boundaries(x, lb, ub) + # Attribute information + stop_cond = 3 + hits_boundary = True + break + + # Get next step + alpha = rt_g / pt_H_p + x_next = x + alpha*p + + # Stop criteria - Hits boundary + if np.linalg.norm(x_next) >= trust_radius: + # Find intersection with box constraints + _, theta, intersect = box_sphere_intersections(x, alpha*p, lb, ub, + trust_radius) + # Update solution + if intersect: + x = x + theta*alpha*p + # Reinforce variables are inside box constraints. + # This is only necessary because of roundoff errors. + x = reinforce_box_boundaries(x, lb, ub) + # Attribute information + stop_cond = 2 + hits_boundary = True + break + + # Check if ``x`` is inside the box and start counter if it is not. + if inside_box_boundaries(x_next, lb, ub): + counter = 0 + else: + counter += 1 + # Whenever outside box constraints keep looking for intersections. + if counter > 0: + _, theta, intersect = box_sphere_intersections(x, alpha*p, lb, ub, + trust_radius) + if intersect: + last_feasible_x = x + theta*alpha*p + # Reinforce variables are inside box constraints. + # This is only necessary because of roundoff errors. + last_feasible_x = reinforce_box_boundaries(last_feasible_x, + lb, ub) + counter = 0 + # Stop after too many infeasible (regarding box constraints) iteration. + if counter > max_infeasible_iter: + break + # Store ``x_next`` value + if return_all: + allvecs.append(x_next) + + # Update residual + r_next = r + alpha*H_p + # Project residual g+ = Z r+ + g_next = Z.dot(r_next) + # Compute conjugate direction step d + rt_g_next = norm(g_next)**2 # g.T g = r.T g (ref [1]_ p.1389) + beta = rt_g_next / rt_g + p = - g_next + beta*p + # Prepare for next iteration + x = x_next + g = g_next + r = g_next + rt_g = norm(g)**2 # g.T g = r.T Z g = r.T g (ref [1]_ p.1389) + H_p = H.dot(p) + + if not inside_box_boundaries(x, lb, ub): + x = last_feasible_x + hits_boundary = True + info = {'niter': k, 'stop_cond': stop_cond, + 'hits_boundary': hits_boundary} + if return_all: + info['allvecs'] = allvecs + return x, info diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/qp_subproblem.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/qp_subproblem.pyc new file mode 100644 index 0000000..6d6016c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/qp_subproblem.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/report.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/report.py new file mode 100644 index 0000000..70b36dc --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/report.py @@ -0,0 +1,50 @@ +"""Progress report printers.""" + + +class ReportBase(object): + COLUMN_NAMES = NotImplemented + COLUMN_WIDTHS = NotImplemented + ITERATION_FORMATS = NotImplemented + + @classmethod + def print_header(cls): + fmt = ("|" + + "|".join(["{{:^{}}}".format(x) for x in cls.COLUMN_WIDTHS]) + + "|") + separators = ['-' * x for x in cls.COLUMN_WIDTHS] + print(fmt.format(*cls.COLUMN_NAMES)) + print(fmt.format(*separators)) + + @classmethod + def print_iteration(cls, *args): + iteration_format = ["{{:{}}}".format(x) for x in cls.ITERATION_FORMATS] + fmt = "|" + "|".join(iteration_format) + "|" + print(fmt.format(*args)) + + @classmethod + def print_footer(cls): + print() + + +class BasicReport(ReportBase): + COLUMN_NAMES = ["niter", "f evals", "CG iter", "obj func", "tr radius", + "opt", "c viol"] + COLUMN_WIDTHS = [7, 7, 7, 13, 10, 10, 10] + ITERATION_FORMATS = ["^7", "^7", "^7", "^+13.4e", + "^10.2e", "^10.2e", "^10.2e"] + + +class SQPReport(ReportBase): + COLUMN_NAMES = ["niter", "f evals", "CG iter", "obj func", "tr radius", + "opt", "c viol", "penalty", "CG stop"] + COLUMN_WIDTHS = [7, 7, 7, 13, 10, 10, 10, 10, 7] + ITERATION_FORMATS = ["^7", "^7", "^7", "^+13.4e", "^10.2e", "^10.2e", + "^10.2e", "^10.2e", "^7"] + + +class IPReport(ReportBase): + COLUMN_NAMES = ["niter", "f evals", "CG iter", "obj func", "tr radius", + "opt", "c viol", "penalty", "barrier param", "CG stop"] + COLUMN_WIDTHS = [7, 7, 7, 13, 10, 10, 10, 10, 13, 7] + ITERATION_FORMATS = ["^7", "^7", "^7", "^+13.4e", "^10.2e", "^10.2e", + "^10.2e", "^10.2e", "^13.2e", "^7"] diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/report.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/report.pyc new file mode 100644 index 0000000..68bded6 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/report.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/setup.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/setup.py new file mode 100644 index 0000000..e4c4a24 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/setup.py @@ -0,0 +1,13 @@ +from __future__ import division, print_function, absolute_import + + +def configuration(parent_package='', top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('_trustregion_constr', parent_package, top_path) + config.add_data_dir('tests') + return config + + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(**configuration(top_path='').todict()) diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/setup.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/setup.pyc new file mode 100644 index 0000000..7700020 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/tests/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/tests/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/tests/__init__.pyc new file mode 100644 index 0000000..b137c66 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/tests/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/tests/test_canonical_constraint.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/tests/test_canonical_constraint.py new file mode 100644 index 0000000..3ac51b6 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/tests/test_canonical_constraint.py @@ -0,0 +1,294 @@ +from __future__ import division, print_function, absolute_import +import numpy as np +from numpy.testing import assert_array_equal, assert_equal +from scipy.optimize._constraints import (NonlinearConstraint, Bounds, + PreparedConstraint) +from scipy.optimize._trustregion_constr.canonical_constraint \ + import CanonicalConstraint, initial_constraints_as_canonical + + +def create_quadratic_function(n, m, rng): + a = rng.rand(m) + A = rng.rand(m, n) + H = rng.rand(m, n, n) + HT = np.transpose(H, (1, 2, 0)) + + def fun(x): + return a + A.dot(x) + 0.5 * H.dot(x).dot(x) + + def jac(x): + return A + H.dot(x) + + def hess(x, v): + return HT.dot(v) + + return fun, jac, hess + + +def test_bounds_cases(): + # Test 1: no constraints. + user_constraint = Bounds(-np.inf, np.inf) + x0 = np.array([-1, 2]) + prepared_constraint = PreparedConstraint(user_constraint, x0, False) + c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint) + + assert_equal(c.n_eq, 0) + assert_equal(c.n_ineq, 0) + + c_eq, c_ineq = c.fun(x0) + assert_array_equal(c_eq, []) + assert_array_equal(c_ineq, []) + + J_eq, J_ineq = c.jac(x0) + assert_array_equal(J_eq, np.empty((0, 2))) + assert_array_equal(J_ineq, np.empty((0, 2))) + + assert_array_equal(c.keep_feasible, []) + + # Test 2: infinite lower bound. + user_constraint = Bounds(-np.inf, [0, np.inf, 1], [False, True, True]) + x0 = np.array([-1, -2, -3], dtype=float) + prepared_constraint = PreparedConstraint(user_constraint, x0, False) + c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint) + + assert_equal(c.n_eq, 0) + assert_equal(c.n_ineq, 2) + + c_eq, c_ineq = c.fun(x0) + assert_array_equal(c_eq, []) + assert_array_equal(c_ineq, [-1, -4]) + + J_eq, J_ineq = c.jac(x0) + assert_array_equal(J_eq, np.empty((0, 3))) + assert_array_equal(J_ineq, np.array([[1, 0, 0], [0, 0, 1]])) + + assert_array_equal(c.keep_feasible, [False, True]) + + # Test 3: infinite upper bound. + user_constraint = Bounds([0, 1, -np.inf], np.inf, [True, False, True]) + x0 = np.array([1, 2, 3], dtype=float) + prepared_constraint = PreparedConstraint(user_constraint, x0, False) + c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint) + + assert_equal(c.n_eq, 0) + assert_equal(c.n_ineq, 2) + + c_eq, c_ineq = c.fun(x0) + assert_array_equal(c_eq, []) + assert_array_equal(c_ineq, [-1, -1]) + + J_eq, J_ineq = c.jac(x0) + assert_array_equal(J_eq, np.empty((0, 3))) + assert_array_equal(J_ineq, np.array([[-1, 0, 0], [0, -1, 0]])) + + assert_array_equal(c.keep_feasible, [True, False]) + + # Test 4: interval constraint. + user_constraint = Bounds([-1, -np.inf, 2, 3], [1, np.inf, 10, 3], + [False, True, True, True]) + x0 = np.array([0, 10, 8, 5]) + prepared_constraint = PreparedConstraint(user_constraint, x0, False) + c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint) + + assert_equal(c.n_eq, 1) + assert_equal(c.n_ineq, 4) + + c_eq, c_ineq = c.fun(x0) + assert_array_equal(c_eq, [2]) + assert_array_equal(c_ineq, [-1, -2, -1, -6]) + + J_eq, J_ineq = c.jac(x0) + assert_array_equal(J_eq, [[0, 0, 0, 1]]) + assert_array_equal(J_ineq, [[1, 0, 0, 0], + [0, 0, 1, 0], + [-1, 0, 0, 0], + [0, 0, -1, 0]]) + + assert_array_equal(c.keep_feasible, [False, True, False, True]) + + +def test_nonlinear_constraint(): + n = 3 + m = 5 + rng = np.random.RandomState(0) + x0 = rng.rand(n) + + fun, jac, hess = create_quadratic_function(n, m, rng) + f = fun(x0) + J = jac(x0) + + lb = [-10, 3, -np.inf, -np.inf, -5] + ub = [10, 3, np.inf, 3, np.inf] + user_constraint = NonlinearConstraint( + fun, lb, ub, jac, hess, [True, False, False, True, False]) + + for sparse_jacobian in [False, True]: + prepared_constraint = PreparedConstraint(user_constraint, x0, + sparse_jacobian) + c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint) + + assert_array_equal(c.n_eq, 1) + assert_array_equal(c.n_ineq, 4) + + c_eq, c_ineq = c.fun(x0) + assert_array_equal(c_eq, [f[1] - lb[1]]) + assert_array_equal(c_ineq, [f[3] - ub[3], lb[4] - f[4], + f[0] - ub[0], lb[0] - f[0]]) + + J_eq, J_ineq = c.jac(x0) + if sparse_jacobian: + J_eq = J_eq.toarray() + J_ineq = J_ineq.toarray() + + assert_array_equal(J_eq, J[1, None]) + assert_array_equal(J_ineq, np.vstack((J[3], -J[4], J[0], -J[0]))) + + v_eq = rng.rand(c.n_eq) + v_ineq = rng.rand(c.n_ineq) + v = np.zeros(m) + v[1] = v_eq[0] + v[3] = v_ineq[0] + v[4] = -v_ineq[1] + v[0] = v_ineq[2] - v_ineq[3] + assert_array_equal(c.hess(x0, v_eq, v_ineq), hess(x0, v)) + + assert_array_equal(c.keep_feasible, [True, False, True, True]) + + +def test_concatenation(): + rng = np.random.RandomState(0) + n = 4 + x0 = np.random.rand(n) + + f1 = x0 + J1 = np.eye(n) + lb1 = [-1, -np.inf, -2, 3] + ub1 = [1, np.inf, np.inf, 3] + bounds = Bounds(lb1, ub1, [False, False, True, False]) + + fun, jac, hess = create_quadratic_function(n, 5, rng) + f2 = fun(x0) + J2 = jac(x0) + lb2 = [-10, 3, -np.inf, -np.inf, -5] + ub2 = [10, 3, np.inf, 5, np.inf] + nonlinear = NonlinearConstraint( + fun, lb2, ub2, jac, hess, [True, False, False, True, False]) + + for sparse_jacobian in [False, True]: + bounds_prepared = PreparedConstraint(bounds, x0, sparse_jacobian) + nonlinear_prepared = PreparedConstraint(nonlinear, x0, sparse_jacobian) + + c1 = CanonicalConstraint.from_PreparedConstraint(bounds_prepared) + c2 = CanonicalConstraint.from_PreparedConstraint(nonlinear_prepared) + c = CanonicalConstraint.concatenate([c1, c2], sparse_jacobian) + + assert_equal(c.n_eq, 2) + assert_equal(c.n_ineq, 7) + + c_eq, c_ineq = c.fun(x0) + assert_array_equal(c_eq, [f1[3] - lb1[3], f2[1] - lb2[1]]) + assert_array_equal(c_ineq, [lb1[2] - f1[2], f1[0] - ub1[0], + lb1[0] - f1[0], f2[3] - ub2[3], + lb2[4] - f2[4], f2[0] - ub2[0], + lb2[0] - f2[0]]) + + J_eq, J_ineq = c.jac(x0) + if sparse_jacobian: + J_eq = J_eq.toarray() + J_ineq = J_ineq.toarray() + + assert_array_equal(J_eq, np.vstack((J1[3], J2[1]))) + assert_array_equal(J_ineq, np.vstack((-J1[2], J1[0], -J1[0], J2[3], + -J2[4], J2[0], -J2[0]))) + + v_eq = rng.rand(c.n_eq) + v_ineq = rng.rand(c.n_ineq) + v = np.zeros(5) + v[1] = v_eq[1] + v[3] = v_ineq[3] + v[4] = -v_ineq[4] + v[0] = v_ineq[5] - v_ineq[6] + H = c.hess(x0, v_eq, v_ineq).dot(np.eye(n)) + assert_array_equal(H, hess(x0, v)) + + assert_array_equal(c.keep_feasible, + [True, False, False, True, False, True, True]) + + +def test_empty(): + x = np.array([1, 2, 3]) + c = CanonicalConstraint.empty(3) + assert_equal(c.n_eq, 0) + assert_equal(c.n_ineq, 0) + + c_eq, c_ineq = c.fun(x) + assert_array_equal(c_eq, []) + assert_array_equal(c_ineq, []) + + J_eq, J_ineq = c.jac(x) + assert_array_equal(J_eq, np.empty((0, 3))) + assert_array_equal(J_ineq, np.empty((0, 3))) + + H = c.hess(x, None, None).toarray() + assert_array_equal(H, np.zeros((3, 3))) + + +def test_initial_constraints_as_canonical(): + rng = np.random.RandomState(0) + n = 4 + x0 = np.random.rand(n) + + lb1 = [-1, -np.inf, -2, 3] + ub1 = [1, np.inf, np.inf, 3] + bounds = Bounds(lb1, ub1, [False, False, True, False]) + + fun, jac, hess = create_quadratic_function(n, 5, rng) + lb2 = [-10, 3, -np.inf, -np.inf, -5] + ub2 = [10, 3, np.inf, 5, np.inf] + nonlinear = NonlinearConstraint( + fun, lb2, ub2, jac, hess, [True, False, False, True, False]) + + for sparse_jacobian in [False, True]: + bounds_prepared = PreparedConstraint(bounds, x0, sparse_jacobian) + nonlinear_prepared = PreparedConstraint(nonlinear, x0, sparse_jacobian) + + f1 = bounds_prepared.fun.f + J1 = bounds_prepared.fun.J + f2 = nonlinear_prepared.fun.f + J2 = nonlinear_prepared.fun.J + + c_eq, c_ineq, J_eq, J_ineq = initial_constraints_as_canonical( + n, [bounds_prepared, nonlinear_prepared], sparse_jacobian) + + assert_array_equal(c_eq, [f1[3] - lb1[3], f2[1] - lb2[1]]) + assert_array_equal(c_ineq, [lb1[2] - f1[2], f1[0] - ub1[0], + lb1[0] - f1[0], f2[3] - ub2[3], + lb2[4] - f2[4], f2[0] - ub2[0], + lb2[0] - f2[0]]) + + if sparse_jacobian: + J1 = J1.toarray() + J2 = J2.toarray() + J_eq = J_eq.toarray() + J_ineq = J_ineq.toarray() + + assert_array_equal(J_eq, np.vstack((J1[3], J2[1]))) + assert_array_equal(J_ineq, np.vstack((-J1[2], J1[0], -J1[0], J2[3], + -J2[4], J2[0], -J2[0]))) + + +def test_initial_constraints_as_canonical_empty(): + n = 3 + for sparse_jacobian in [False, True]: + c_eq, c_ineq, J_eq, J_ineq = initial_constraints_as_canonical( + n, [], sparse_jacobian) + + assert_array_equal(c_eq, []) + assert_array_equal(c_ineq, []) + + if sparse_jacobian: + J_eq = J_eq.toarray() + J_ineq = J_ineq.toarray() + + assert_array_equal(J_eq, np.empty((0, n))) + assert_array_equal(J_ineq, np.empty((0, n))) diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/tests/test_canonical_constraint.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/tests/test_canonical_constraint.pyc new file mode 100644 index 0000000..fdb064e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/tests/test_canonical_constraint.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/tests/test_projections.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/tests/test_projections.py new file mode 100644 index 0000000..75b7583 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/tests/test_projections.py @@ -0,0 +1,223 @@ +from __future__ import division, print_function, absolute_import +import numpy as np +import scipy.linalg +from scipy.sparse import csc_matrix +from scipy.optimize._trustregion_constr.projections \ + import projections, orthogonality +from numpy.testing import (TestCase, assert_array_almost_equal, + assert_array_equal, assert_array_less, + assert_raises, assert_equal, assert_, + run_module_suite, assert_allclose, assert_warns, + dec) +import pytest +import sys +import platform + +try: + from sksparse.cholmod import cholesky_AAt + sksparse_available = True + available_sparse_methods = ("NormalEquation", "AugmentedSystem") +except ImportError: + import warnings + sksparse_available = False + available_sparse_methods = ("AugmentedSystem",) +available_dense_methods = ('QRFactorization', 'SVDFactorization') + + +class TestProjections(TestCase): + + def test_nullspace_and_least_squares_sparse(self): + A_dense = np.array([[1, 2, 3, 4, 0, 5, 0, 7], + [0, 8, 7, 0, 1, 5, 9, 0], + [1, 0, 0, 0, 0, 1, 2, 3]]) + At_dense = A_dense.T + A = csc_matrix(A_dense) + test_points = ([1, 2, 3, 4, 5, 6, 7, 8], + [1, 10, 3, 0, 1, 6, 7, 8], + [1.12, 10, 0, 0, 100000, 6, 0.7, 8]) + + for method in available_sparse_methods: + Z, LS, _ = projections(A, method) + for z in test_points: + # Test if x is in the null_space + x = Z.matvec(z) + assert_array_almost_equal(A.dot(x), 0) + # Test orthogonality + assert_array_almost_equal(orthogonality(A, x), 0) + # Test if x is the least square solution + x = LS.matvec(z) + x2 = scipy.linalg.lstsq(At_dense, z)[0] + assert_array_almost_equal(x, x2) + + def test_iterative_refinements_sparse(self): + A_dense = np.array([[1, 2, 3, 4, 0, 5, 0, 7], + [0, 8, 7, 0, 1, 5, 9, 0], + [1, 0, 0, 0, 0, 1, 2, 3]]) + A = csc_matrix(A_dense) + test_points = ([1, 2, 3, 4, 5, 6, 7, 8], + [1, 10, 3, 0, 1, 6, 7, 8], + [1.12, 10, 0, 0, 100000, 6, 0.7, 8], + [1, 0, 0, 0, 0, 1, 2, 3+1e-10]) + + for method in available_sparse_methods: + Z, LS, _ = projections(A, method, orth_tol=1e-18, max_refin=100) + for z in test_points: + # Test if x is in the null_space + x = Z.matvec(z) + atol = 1e-13 * abs(x).max() + err = abs(A.dot(x)).max() + assert_allclose(A.dot(x), 0, atol=atol) + # Test orthogonality + assert_allclose(orthogonality(A, x), 0, atol=1e-13) + + def test_rowspace_sparse(self): + A_dense = np.array([[1, 2, 3, 4, 0, 5, 0, 7], + [0, 8, 7, 0, 1, 5, 9, 0], + [1, 0, 0, 0, 0, 1, 2, 3]]) + A = csc_matrix(A_dense) + test_points = ([1, 2, 3], + [1, 10, 3], + [1.12, 10, 0]) + + for method in available_sparse_methods: + _, _, Y = projections(A, method) + for z in test_points: + # Test if x is solution of A x = z + x = Y.matvec(z) + assert_array_almost_equal(A.dot(x), z) + # Test if x is in the return row space of A + A_ext = np.vstack((A_dense, x)) + assert_equal(np.linalg.matrix_rank(A_dense), + np.linalg.matrix_rank(A_ext)) + + def test_nullspace_and_least_squares_dense(self): + A = np.array([[1, 2, 3, 4, 0, 5, 0, 7], + [0, 8, 7, 0, 1, 5, 9, 0], + [1, 0, 0, 0, 0, 1, 2, 3]]) + At = A.T + test_points = ([1, 2, 3, 4, 5, 6, 7, 8], + [1, 10, 3, 0, 1, 6, 7, 8], + [1.12, 10, 0, 0, 100000, 6, 0.7, 8]) + + for method in available_dense_methods: + Z, LS, _ = projections(A, method) + for z in test_points: + # Test if x is in the null_space + x = Z.matvec(z) + assert_array_almost_equal(A.dot(x), 0) + # Test orthogonality + assert_array_almost_equal(orthogonality(A, x), 0) + # Test if x is the least square solution + x = LS.matvec(z) + x2 = scipy.linalg.lstsq(At, z)[0] + assert_array_almost_equal(x, x2) + + def test_compare_dense_and_sparse(self): + D = np.diag(range(1, 101)) + A = np.hstack([D, D, D, D]) + A_sparse = csc_matrix(A) + np.random.seed(0) + + Z, LS, Y = projections(A) + Z_sparse, LS_sparse, Y_sparse = projections(A_sparse) + for k in range(20): + z = np.random.normal(size=(400,)) + assert_array_almost_equal(Z.dot(z), Z_sparse.dot(z)) + assert_array_almost_equal(LS.dot(z), LS_sparse.dot(z)) + x = np.random.normal(size=(100,)) + assert_array_almost_equal(Y.dot(x), Y_sparse.dot(x)) + + def test_compare_dense_and_sparse2(self): + D1 = np.diag([-1.7, 1, 0.5]) + D2 = np.diag([1, -0.6, -0.3]) + D3 = np.diag([-0.3, -1.5, 2]) + A = np.hstack([D1, D2, D3]) + A_sparse = csc_matrix(A) + np.random.seed(0) + + Z, LS, Y = projections(A) + Z_sparse, LS_sparse, Y_sparse = projections(A_sparse) + for k in range(1): + z = np.random.normal(size=(9,)) + assert_array_almost_equal(Z.dot(z), Z_sparse.dot(z)) + assert_array_almost_equal(LS.dot(z), LS_sparse.dot(z)) + x = np.random.normal(size=(3,)) + assert_array_almost_equal(Y.dot(x), Y_sparse.dot(x)) + + def test_iterative_refinements_dense(self): + A = np.array([[1, 2, 3, 4, 0, 5, 0, 7], + [0, 8, 7, 0, 1, 5, 9, 0], + [1, 0, 0, 0, 0, 1, 2, 3]]) + test_points = ([1, 2, 3, 4, 5, 6, 7, 8], + [1, 10, 3, 0, 1, 6, 7, 8], + [1, 0, 0, 0, 0, 1, 2, 3+1e-10]) + + for method in available_dense_methods: + Z, LS, _ = projections(A, method, orth_tol=1e-18, max_refin=10) + for z in test_points: + # Test if x is in the null_space + x = Z.matvec(z) + assert_array_almost_equal(A.dot(x), 0, decimal=14) + # Test orthogonality + assert_array_almost_equal(orthogonality(A, x), 0, decimal=16) + + def test_rowspace_dense(self): + A = np.array([[1, 2, 3, 4, 0, 5, 0, 7], + [0, 8, 7, 0, 1, 5, 9, 0], + [1, 0, 0, 0, 0, 1, 2, 3]]) + test_points = ([1, 2, 3], + [1, 10, 3], + [1.12, 10, 0]) + + for method in available_dense_methods: + _, _, Y = projections(A, method) + for z in test_points: + # Test if x is solution of A x = z + x = Y.matvec(z) + assert_array_almost_equal(A.dot(x), z) + # Test if x is in the return row space of A + A_ext = np.vstack((A, x)) + assert_equal(np.linalg.matrix_rank(A), + np.linalg.matrix_rank(A_ext)) + + +class TestOrthogonality(TestCase): + + def test_dense_matrix(self): + A = np.array([[1, 2, 3, 4, 0, 5, 0, 7], + [0, 8, 7, 0, 1, 5, 9, 0], + [1, 0, 0, 0, 0, 1, 2, 3]]) + test_vectors = ([-1.98931144, -1.56363389, + -0.84115584, 2.2864762, + 5.599141, 0.09286976, + 1.37040802, -0.28145812], + [697.92794044, -4091.65114008, + -3327.42316335, 836.86906951, + 99434.98929065, -1285.37653682, + -4109.21503806, 2935.29289083]) + test_expected_orth = (0, 0) + + for i in range(len(test_vectors)): + x = test_vectors[i] + orth = test_expected_orth[i] + assert_array_almost_equal(orthogonality(A, x), orth) + + def test_sparse_matrix(self): + A = np.array([[1, 2, 3, 4, 0, 5, 0, 7], + [0, 8, 7, 0, 1, 5, 9, 0], + [1, 0, 0, 0, 0, 1, 2, 3]]) + A = csc_matrix(A) + test_vectors = ([-1.98931144, -1.56363389, + -0.84115584, 2.2864762, + 5.599141, 0.09286976, + 1.37040802, -0.28145812], + [697.92794044, -4091.65114008, + -3327.42316335, 836.86906951, + 99434.98929065, -1285.37653682, + -4109.21503806, 2935.29289083]) + test_expected_orth = (0, 0) + + for i in range(len(test_vectors)): + x = test_vectors[i] + orth = test_expected_orth[i] + assert_array_almost_equal(orthogonality(A, x), orth) diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/tests/test_projections.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/tests/test_projections.pyc new file mode 100644 index 0000000..d610f1f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/tests/test_projections.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/tests/test_qp_subproblem.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/tests/test_qp_subproblem.py new file mode 100644 index 0000000..f94cc32 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/tests/test_qp_subproblem.py @@ -0,0 +1,649 @@ +import numpy as np +from scipy.sparse import csc_matrix +from scipy.optimize._trustregion_constr.qp_subproblem \ + import (eqp_kktfact, + projected_cg, + box_intersections, + sphere_intersections, + box_sphere_intersections, + modified_dogleg) +from scipy.optimize._trustregion_constr.projections \ + import projections +from numpy.testing import (TestCase, assert_array_almost_equal, + assert_array_equal, assert_array_less, + assert_equal, assert_, + run_module_suite, assert_allclose, assert_warns, + dec) +import pytest + + +class TestEQPDirectFactorization(TestCase): + + # From Example 16.2 Nocedal/Wright "Numerical + # Optimization" p.452. + def test_nocedal_example(self): + H = csc_matrix([[6, 2, 1], + [2, 5, 2], + [1, 2, 4]]) + A = csc_matrix([[1, 0, 1], + [0, 1, 1]]) + c = np.array([-8, -3, -3]) + b = -np.array([3, 0]) + x, lagrange_multipliers = eqp_kktfact(H, c, A, b) + assert_array_almost_equal(x, [2, -1, 1]) + assert_array_almost_equal(lagrange_multipliers, [3, -2]) + + +class TestSphericalBoundariesIntersections(TestCase): + + def test_2d_sphere_constraints(self): + # Interior inicial point + ta, tb, intersect = sphere_intersections([0, 0], + [1, 0], 0.5) + assert_array_almost_equal([ta, tb], [0, 0.5]) + assert_equal(intersect, True) + + # No intersection between line and circle + ta, tb, intersect = sphere_intersections([2, 0], + [0, 1], 1) + assert_equal(intersect, False) + + # Outside inicial point pointing toward outside the circle + ta, tb, intersect = sphere_intersections([2, 0], + [1, 0], 1) + assert_equal(intersect, False) + + # Outside inicial point pointing toward inside the circle + ta, tb, intersect = sphere_intersections([2, 0], + [-1, 0], 1.5) + assert_array_almost_equal([ta, tb], [0.5, 1]) + assert_equal(intersect, True) + + # Inicial point on the boundary + ta, tb, intersect = sphere_intersections([2, 0], + [1, 0], 2) + assert_array_almost_equal([ta, tb], [0, 0]) + assert_equal(intersect, True) + + def test_2d_sphere_constraints_line_intersections(self): + # Interior inicial point + ta, tb, intersect = sphere_intersections([0, 0], + [1, 0], 0.5, + entire_line=True) + assert_array_almost_equal([ta, tb], [-0.5, 0.5]) + assert_equal(intersect, True) + + # No intersection between line and circle + ta, tb, intersect = sphere_intersections([2, 0], + [0, 1], 1, + entire_line=True) + assert_equal(intersect, False) + + # Outside inicial point pointing toward outside the circle + ta, tb, intersect = sphere_intersections([2, 0], + [1, 0], 1, + entire_line=True) + assert_array_almost_equal([ta, tb], [-3, -1]) + assert_equal(intersect, True) + + # Outside inicial point pointing toward inside the circle + ta, tb, intersect = sphere_intersections([2, 0], + [-1, 0], 1.5, + entire_line=True) + assert_array_almost_equal([ta, tb], [0.5, 3.5]) + assert_equal(intersect, True) + + # Inicial point on the boundary + ta, tb, intersect = sphere_intersections([2, 0], + [1, 0], 2, + entire_line=True) + assert_array_almost_equal([ta, tb], [-4, 0]) + assert_equal(intersect, True) + + +class TestBoxBoundariesIntersections(TestCase): + + def test_2d_box_constraints(self): + # Box constraint in the direction of vector d + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [1, 1], [3, 3]) + assert_array_almost_equal([ta, tb], [0.5, 1]) + assert_equal(intersect, True) + + # Negative direction + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [1, -3], [3, -1]) + assert_equal(intersect, False) + + # Some constraints are absent (set to +/- inf) + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [-np.inf, 1], + [np.inf, np.inf]) + assert_array_almost_equal([ta, tb], [0.5, 1]) + assert_equal(intersect, True) + + # Intersect on the face of the box + ta, tb, intersect = box_intersections([1, 0], [0, 1], + [1, 1], [3, 3]) + assert_array_almost_equal([ta, tb], [1, 1]) + assert_equal(intersect, True) + + # Interior inicial pointoint + ta, tb, intersect = box_intersections([0, 0], [4, 4], + [-2, -3], [3, 2]) + assert_array_almost_equal([ta, tb], [0, 0.5]) + assert_equal(intersect, True) + + # No intersection between line and box constraints + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [-3, -3], [-1, -1]) + assert_equal(intersect, False) + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [-3, 3], [-1, 1]) + assert_equal(intersect, False) + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [-3, -np.inf], + [-1, np.inf]) + assert_equal(intersect, False) + ta, tb, intersect = box_intersections([0, 0], [1, 100], + [1, 1], [3, 3]) + assert_equal(intersect, False) + ta, tb, intersect = box_intersections([0.99, 0], [0, 2], + [1, 1], [3, 3]) + assert_equal(intersect, False) + + # Inicial point on the boundary + ta, tb, intersect = box_intersections([2, 2], [0, 1], + [-2, -2], [2, 2]) + assert_array_almost_equal([ta, tb], [0, 0]) + assert_equal(intersect, True) + + def test_2d_box_constraints_entire_line(self): + # Box constraint in the direction of vector d + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [1, 1], [3, 3], + entire_line=True) + assert_array_almost_equal([ta, tb], [0.5, 1.5]) + assert_equal(intersect, True) + + # Negative direction + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [1, -3], [3, -1], + entire_line=True) + assert_array_almost_equal([ta, tb], [-1.5, -0.5]) + assert_equal(intersect, True) + + # Some constraints are absent (set to +/- inf) + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [-np.inf, 1], + [np.inf, np.inf], + entire_line=True) + assert_array_almost_equal([ta, tb], [0.5, np.inf]) + assert_equal(intersect, True) + + # Intersect on the face of the box + ta, tb, intersect = box_intersections([1, 0], [0, 1], + [1, 1], [3, 3], + entire_line=True) + assert_array_almost_equal([ta, tb], [1, 3]) + assert_equal(intersect, True) + + # Interior inicial pointoint + ta, tb, intersect = box_intersections([0, 0], [4, 4], + [-2, -3], [3, 2], + entire_line=True) + assert_array_almost_equal([ta, tb], [-0.5, 0.5]) + assert_equal(intersect, True) + + # No intersection between line and box constraints + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [-3, -3], [-1, -1], + entire_line=True) + assert_equal(intersect, False) + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [-3, 3], [-1, 1], + entire_line=True) + assert_equal(intersect, False) + ta, tb, intersect = box_intersections([2, 0], [0, 2], + [-3, -np.inf], + [-1, np.inf], + entire_line=True) + assert_equal(intersect, False) + ta, tb, intersect = box_intersections([0, 0], [1, 100], + [1, 1], [3, 3], + entire_line=True) + assert_equal(intersect, False) + ta, tb, intersect = box_intersections([0.99, 0], [0, 2], + [1, 1], [3, 3], + entire_line=True) + assert_equal(intersect, False) + + # Inicial point on the boundary + ta, tb, intersect = box_intersections([2, 2], [0, 1], + [-2, -2], [2, 2], + entire_line=True) + assert_array_almost_equal([ta, tb], [-4, 0]) + assert_equal(intersect, True) + + def test_3d_box_constraints(self): + # Simple case + ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, 1], + [1, 1, 1], [3, 3, 3]) + assert_array_almost_equal([ta, tb], [1, 1]) + assert_equal(intersect, True) + + # Negative direction + ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, -1], + [1, 1, 1], [3, 3, 3]) + assert_equal(intersect, False) + + # Interior Point + ta, tb, intersect = box_intersections([2, 2, 2], [0, -1, 1], + [1, 1, 1], [3, 3, 3]) + assert_array_almost_equal([ta, tb], [0, 1]) + assert_equal(intersect, True) + + def test_3d_box_constraints_entire_line(self): + # Simple case + ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, 1], + [1, 1, 1], [3, 3, 3], + entire_line=True) + assert_array_almost_equal([ta, tb], [1, 3]) + assert_equal(intersect, True) + + # Negative direction + ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, -1], + [1, 1, 1], [3, 3, 3], + entire_line=True) + assert_array_almost_equal([ta, tb], [-3, -1]) + assert_equal(intersect, True) + + # Interior Point + ta, tb, intersect = box_intersections([2, 2, 2], [0, -1, 1], + [1, 1, 1], [3, 3, 3], + entire_line=True) + assert_array_almost_equal([ta, tb], [-1, 1]) + assert_equal(intersect, True) + + +class TestBoxSphereBoundariesIntersections(TestCase): + + def test_2d_box_constraints(self): + # Both constraints are active + ta, tb, intersect = box_sphere_intersections([1, 1], [-2, 2], + [-1, -2], [1, 2], 2, + entire_line=False) + assert_array_almost_equal([ta, tb], [0, 0.5]) + assert_equal(intersect, True) + + # None of the constraints are active + ta, tb, intersect = box_sphere_intersections([1, 1], [-1, 1], + [-1, -3], [1, 3], 10, + entire_line=False) + assert_array_almost_equal([ta, tb], [0, 1]) + assert_equal(intersect, True) + + # Box Constraints are active + ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4], + [-1, -3], [1, 3], 10, + entire_line=False) + assert_array_almost_equal([ta, tb], [0, 0.5]) + assert_equal(intersect, True) + + # Spherical Constraints are active + ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4], + [-1, -3], [1, 3], 2, + entire_line=False) + assert_array_almost_equal([ta, tb], [0, 0.25]) + assert_equal(intersect, True) + + # Infeasible problems + ta, tb, intersect = box_sphere_intersections([2, 2], [-4, 4], + [-1, -3], [1, 3], 2, + entire_line=False) + assert_equal(intersect, False) + ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4], + [2, 4], [2, 4], 2, + entire_line=False) + assert_equal(intersect, False) + + def test_2d_box_constraints_entire_line(self): + # Both constraints are active + ta, tb, intersect = box_sphere_intersections([1, 1], [-2, 2], + [-1, -2], [1, 2], 2, + entire_line=True) + assert_array_almost_equal([ta, tb], [0, 0.5]) + assert_equal(intersect, True) + + # None of the constraints are active + ta, tb, intersect = box_sphere_intersections([1, 1], [-1, 1], + [-1, -3], [1, 3], 10, + entire_line=True) + assert_array_almost_equal([ta, tb], [0, 2]) + assert_equal(intersect, True) + + # Box Constraints are active + ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4], + [-1, -3], [1, 3], 10, + entire_line=True) + assert_array_almost_equal([ta, tb], [0, 0.5]) + assert_equal(intersect, True) + + # Spherical Constraints are active + ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4], + [-1, -3], [1, 3], 2, + entire_line=True) + assert_array_almost_equal([ta, tb], [0, 0.25]) + assert_equal(intersect, True) + + # Infeasible problems + ta, tb, intersect = box_sphere_intersections([2, 2], [-4, 4], + [-1, -3], [1, 3], 2, + entire_line=True) + assert_equal(intersect, False) + ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4], + [2, 4], [2, 4], 2, + entire_line=True) + assert_equal(intersect, False) + + +class TestModifiedDogleg(TestCase): + + def test_cauchypoint_equalsto_newtonpoint(self): + A = np.array([[1, 8]]) + b = np.array([-16]) + _, _, Y = projections(A) + newton_point = np.array([0.24615385, 1.96923077]) + + # Newton point inside boundaries + x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf], [np.inf, np.inf]) + assert_array_almost_equal(x, newton_point) + + # Spherical constraint active + x = modified_dogleg(A, Y, b, 1, [-np.inf, -np.inf], [np.inf, np.inf]) + assert_array_almost_equal(x, newton_point/np.linalg.norm(newton_point)) + + # Box Constraints active + x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf], [0.1, np.inf]) + assert_array_almost_equal(x, (newton_point/newton_point[0]) * 0.1) + + def test_3d_example(self): + A = np.array([[1, 8, 1], + [4, 2, 2]]) + b = np.array([-16, 2]) + Z, LS, Y = projections(A) + + newton_point = np.array([-1.37090909, 2.23272727, -0.49090909]) + cauchy_point = np.array([0.11165723, 1.73068711, 0.16748585]) + origin = np.zeros_like(newton_point) + + # newton_point inside boundaries + x = modified_dogleg(A, Y, b, 3, [-np.inf, -np.inf, -np.inf], + [np.inf, np.inf, np.inf]) + assert_array_almost_equal(x, newton_point) + + # line between cauchy_point and newton_point contains best point + # (spherical constrain is active). + x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf, -np.inf], + [np.inf, np.inf, np.inf]) + z = cauchy_point + d = newton_point-cauchy_point + t = ((x-z)/(d)) + assert_array_almost_equal(t, 0.40807330*np.ones(3)) + assert_array_almost_equal(np.linalg.norm(x), 2) + + # line between cauchy_point and newton_point contains best point + # (box constrain is active). + x = modified_dogleg(A, Y, b, 5, [-1, -np.inf, -np.inf], + [np.inf, np.inf, np.inf]) + z = cauchy_point + d = newton_point-cauchy_point + t = ((x-z)/(d)) + assert_array_almost_equal(t, 0.7498195*np.ones(3)) + assert_array_almost_equal(x[0], -1) + + # line between origin and cauchy_point contains best point + # (spherical constrain is active). + x = modified_dogleg(A, Y, b, 1, [-np.inf, -np.inf, -np.inf], + [np.inf, np.inf, np.inf]) + z = origin + d = cauchy_point + t = ((x-z)/(d)) + assert_array_almost_equal(t, 0.573936265*np.ones(3)) + assert_array_almost_equal(np.linalg.norm(x), 1) + + # line between origin and newton_point contains best point + # (box constrain is active). + x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf, -np.inf], + [np.inf, 1, np.inf]) + z = origin + d = newton_point + t = ((x-z)/(d)) + assert_array_almost_equal(t, 0.4478827364*np.ones(3)) + assert_array_almost_equal(x[1], 1) + + +class TestProjectCG(TestCase): + + # From Example 16.2 Nocedal/Wright "Numerical + # Optimization" p.452. + def test_nocedal_example(self): + H = csc_matrix([[6, 2, 1], + [2, 5, 2], + [1, 2, 4]]) + A = csc_matrix([[1, 0, 1], + [0, 1, 1]]) + c = np.array([-8, -3, -3]) + b = -np.array([3, 0]) + Z, _, Y = projections(A) + x, info = projected_cg(H, c, Z, Y, b) + assert_equal(info["stop_cond"], 4) + assert_equal(info["hits_boundary"], False) + assert_array_almost_equal(x, [2, -1, 1]) + + def test_compare_with_direct_fact(self): + H = csc_matrix([[6, 2, 1, 3], + [2, 5, 2, 4], + [1, 2, 4, 5], + [3, 4, 5, 7]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 1, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + Z, _, Y = projections(A) + x, info = projected_cg(H, c, Z, Y, b, tol=0) + x_kkt, _ = eqp_kktfact(H, c, A, b) + assert_equal(info["stop_cond"], 1) + assert_equal(info["hits_boundary"], False) + assert_array_almost_equal(x, x_kkt) + + def test_trust_region_infeasible(self): + H = csc_matrix([[6, 2, 1, 3], + [2, 5, 2, 4], + [1, 2, 4, 5], + [3, 4, 5, 7]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 1, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + trust_radius = 1 + Z, _, Y = projections(A) + with pytest.raises(ValueError): + projected_cg(H, c, Z, Y, b, trust_radius=trust_radius) + + def test_trust_region_barely_feasible(self): + H = csc_matrix([[6, 2, 1, 3], + [2, 5, 2, 4], + [1, 2, 4, 5], + [3, 4, 5, 7]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 1, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + trust_radius = 2.32379000772445021283 + Z, _, Y = projections(A) + x, info = projected_cg(H, c, Z, Y, b, + tol=0, + trust_radius=trust_radius) + assert_equal(info["stop_cond"], 2) + assert_equal(info["hits_boundary"], True) + assert_array_almost_equal(np.linalg.norm(x), trust_radius) + assert_array_almost_equal(x, -Y.dot(b)) + + def test_hits_boundary(self): + H = csc_matrix([[6, 2, 1, 3], + [2, 5, 2, 4], + [1, 2, 4, 5], + [3, 4, 5, 7]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 1, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + trust_radius = 3 + Z, _, Y = projections(A) + x, info = projected_cg(H, c, Z, Y, b, + tol=0, + trust_radius=trust_radius) + assert_equal(info["stop_cond"], 2) + assert_equal(info["hits_boundary"], True) + assert_array_almost_equal(np.linalg.norm(x), trust_radius) + + def test_negative_curvature_unconstrained(self): + H = csc_matrix([[1, 2, 1, 3], + [2, 0, 2, 4], + [1, 2, 0, 2], + [3, 4, 2, 0]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 0, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + Z, _, Y = projections(A) + with pytest.raises(ValueError): + projected_cg(H, c, Z, Y, b, tol=0) + + def test_negative_curvature(self): + H = csc_matrix([[1, 2, 1, 3], + [2, 0, 2, 4], + [1, 2, 0, 2], + [3, 4, 2, 0]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 0, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + Z, _, Y = projections(A) + trust_radius = 1000 + x, info = projected_cg(H, c, Z, Y, b, + tol=0, + trust_radius=trust_radius) + assert_equal(info["stop_cond"], 3) + assert_equal(info["hits_boundary"], True) + assert_array_almost_equal(np.linalg.norm(x), trust_radius) + + # The box constraints are inactive at the solution but + # are active during the iterations. + def test_inactive_box_constraints(self): + H = csc_matrix([[6, 2, 1, 3], + [2, 5, 2, 4], + [1, 2, 4, 5], + [3, 4, 5, 7]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 1, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + Z, _, Y = projections(A) + x, info = projected_cg(H, c, Z, Y, b, + tol=0, + lb=[0.5, -np.inf, + -np.inf, -np.inf], + return_all=True) + x_kkt, _ = eqp_kktfact(H, c, A, b) + assert_equal(info["stop_cond"], 1) + assert_equal(info["hits_boundary"], False) + assert_array_almost_equal(x, x_kkt) + + # The box constraints active and the termination is + # by maximum iterations (infeasible iteraction). + def test_active_box_constraints_maximum_iterations_reached(self): + H = csc_matrix([[6, 2, 1, 3], + [2, 5, 2, 4], + [1, 2, 4, 5], + [3, 4, 5, 7]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 1, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + Z, _, Y = projections(A) + x, info = projected_cg(H, c, Z, Y, b, + tol=0, + lb=[0.8, -np.inf, + -np.inf, -np.inf], + return_all=True) + assert_equal(info["stop_cond"], 1) + assert_equal(info["hits_boundary"], True) + assert_array_almost_equal(A.dot(x), -b) + assert_array_almost_equal(x[0], 0.8) + + # The box constraints are active and the termination is + # because it hits boundary (without infeasible iteraction). + def test_active_box_constraints_hits_boundaries(self): + H = csc_matrix([[6, 2, 1, 3], + [2, 5, 2, 4], + [1, 2, 4, 5], + [3, 4, 5, 7]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 1, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + trust_radius = 3 + Z, _, Y = projections(A) + x, info = projected_cg(H, c, Z, Y, b, + tol=0, + ub=[np.inf, np.inf, 1.6, np.inf], + trust_radius=trust_radius, + return_all=True) + assert_equal(info["stop_cond"], 2) + assert_equal(info["hits_boundary"], True) + assert_array_almost_equal(x[2], 1.6) + + # The box constraints are active and the termination is + # because it hits boundary (infeasible iteraction). + def test_active_box_constraints_hits_boundaries_infeasible_iter(self): + H = csc_matrix([[6, 2, 1, 3], + [2, 5, 2, 4], + [1, 2, 4, 5], + [3, 4, 5, 7]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 1, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + trust_radius = 4 + Z, _, Y = projections(A) + x, info = projected_cg(H, c, Z, Y, b, + tol=0, + ub=[np.inf, 0.1, np.inf, np.inf], + trust_radius=trust_radius, + return_all=True) + assert_equal(info["stop_cond"], 2) + assert_equal(info["hits_boundary"], True) + assert_array_almost_equal(x[1], 0.1) + + # The box constraints are active and the termination is + # because it hits boundary (no infeasible iteraction). + def test_active_box_constraints_negative_curvature(self): + H = csc_matrix([[1, 2, 1, 3], + [2, 0, 2, 4], + [1, 2, 0, 2], + [3, 4, 2, 0]]) + A = csc_matrix([[1, 0, 1, 0], + [0, 1, 0, 1]]) + c = np.array([-2, -3, -3, 1]) + b = -np.array([3, 0]) + Z, _, Y = projections(A) + trust_radius = 1000 + x, info = projected_cg(H, c, Z, Y, b, + tol=0, + ub=[np.inf, np.inf, 100, np.inf], + trust_radius=trust_radius) + assert_equal(info["stop_cond"], 3) + assert_equal(info["hits_boundary"], True) + assert_array_almost_equal(x[2], 100) diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/tests/test_qp_subproblem.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/tests/test_qp_subproblem.pyc new file mode 100644 index 0000000..7d078ae Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/tests/test_qp_subproblem.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/tr_interior_point.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/tr_interior_point.py new file mode 100644 index 0000000..b30c398 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/tr_interior_point.py @@ -0,0 +1,347 @@ +"""Trust-region interior point method. + +References +---------- +.. [1] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal. + "An interior point algorithm for large-scale nonlinear + programming." SIAM Journal on Optimization 9.4 (1999): 877-900. +.. [2] Byrd, Richard H., Guanghui Liu, and Jorge Nocedal. + "On the local behavior of an interior point method for + nonlinear programming." Numerical analysis 1997 (1997): 37-56. +.. [3] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization" + Second Edition (2006). +""" + +from __future__ import division, print_function, absolute_import +import scipy.sparse as sps +import numpy as np +from .equality_constrained_sqp import equality_constrained_sqp +from scipy.sparse.linalg import LinearOperator + +__all__ = ['tr_interior_point'] + + +class BarrierSubproblem: + """ + Barrier optimization problem: + minimize fun(x) - barrier_parameter*sum(log(s)) + subject to: constr_eq(x) = 0 + constr_ineq(x) + s = 0 + """ + + def __init__(self, x0, s0, fun, grad, lagr_hess, n_vars, n_ineq, n_eq, + constr, jac, barrier_parameter, tolerance, + enforce_feasibility, global_stop_criteria, + xtol, fun0, grad0, constr_ineq0, jac_ineq0, constr_eq0, + jac_eq0): + # Store parameters + self.n_vars = n_vars + self.x0 = x0 + self.s0 = s0 + self.fun = fun + self.grad = grad + self.lagr_hess = lagr_hess + self.constr = constr + self.jac = jac + self.barrier_parameter = barrier_parameter + self.tolerance = tolerance + self.n_eq = n_eq + self.n_ineq = n_ineq + self.enforce_feasibility = enforce_feasibility + self.global_stop_criteria = global_stop_criteria + self.xtol = xtol + self.fun0 = self._compute_function(fun0, constr_ineq0, s0) + self.grad0 = self._compute_gradient(grad0) + self.constr0 = self._compute_constr(constr_ineq0, constr_eq0, s0) + self.jac0 = self._compute_jacobian(jac_eq0, jac_ineq0, s0) + self.terminate = False + + def update(self, barrier_parameter, tolerance): + self.barrier_parameter = barrier_parameter + self.tolerance = tolerance + + def get_slack(self, z): + return z[self.n_vars:self.n_vars+self.n_ineq] + + def get_variables(self, z): + return z[:self.n_vars] + + def function_and_constraints(self, z): + """Returns barrier function and constraints at given point. + + For z = [x, s], returns barrier function: + function(z) = fun(x) - barrier_parameter*sum(log(s)) + and barrier constraints: + constraints(z) = [ constr_eq(x) ] + [ constr_ineq(x) + s ] + + """ + # Get variables and slack variables + x = self.get_variables(z) + s = self.get_slack(z) + # Compute function and constraints + f = self.fun(x) + c_eq, c_ineq = self.constr(x) + # Return objective function and constraints + return (self._compute_function(f, c_ineq, s), + self._compute_constr(c_ineq, c_eq, s)) + + def _compute_function(self, f, c_ineq, s): + # Use technique from Nocedal and Wright book, ref [3]_, p.576, + # to guarantee constraints from `enforce_feasibility` + # stay feasible along iterations. + s[self.enforce_feasibility] = -c_ineq[self.enforce_feasibility] + log_s = [np.log(s_i) if s_i > 0 else -np.inf for s_i in s] + # Compute barrier objective function + return f - self.barrier_parameter*np.sum(log_s) + + def _compute_constr(self, c_ineq, c_eq, s): + # Compute barrier constraint + return np.hstack((c_eq, + c_ineq + s)) + + def scaling(self, z): + """Returns scaling vector. + Given by: + scaling = [ones(n_vars), s] + """ + s = self.get_slack(z) + diag_elements = np.hstack((np.ones(self.n_vars), s)) + + # Diagonal Matrix + def matvec(vec): + return diag_elements*vec + return LinearOperator((self.n_vars+self.n_ineq, + self.n_vars+self.n_ineq), + matvec) + + def gradient_and_jacobian(self, z): + """Returns scaled gradient. + + Return scaled gradient: + gradient = [ grad(x) ] + [ -barrier_parameter*ones(n_ineq) ] + and scaled Jacobian Matrix: + jacobian = [ jac_eq(x) 0 ] + [ jac_ineq(x) S ] + Both of them scaled by the previously defined scaling factor. + """ + # Get variables and slack variables + x = self.get_variables(z) + s = self.get_slack(z) + # Compute first derivatives + g = self.grad(x) + J_eq, J_ineq = self.jac(x) + # Return gradient and jacobian + return (self._compute_gradient(g), + self._compute_jacobian(J_eq, J_ineq, s)) + + def _compute_gradient(self, g): + return np.hstack((g, -self.barrier_parameter*np.ones(self.n_ineq))) + + def _compute_jacobian(self, J_eq, J_ineq, s): + if self.n_ineq == 0: + return J_eq + else: + if sps.issparse(J_eq) or sps.issparse(J_ineq): + # It is expected that J_eq and J_ineq + # are already `csr_matrix` because of + # the way ``BoxConstraint``, ``NonlinearConstraint`` + # and ``LinearConstraint`` are defined. + J_eq = sps.csr_matrix(J_eq) + J_ineq = sps.csr_matrix(J_ineq) + return self._assemble_sparse_jacobian(J_eq, J_ineq, s) + else: + S = np.diag(s) + zeros = np.zeros((self.n_eq, self.n_ineq)) + # Convert to matrix + if sps.issparse(J_ineq): + J_ineq = J_ineq.toarray() + if sps.issparse(J_eq): + J_eq = J_eq.toarray() + # Concatenate matrices + return np.asarray(np.bmat([[J_eq, zeros], + [J_ineq, S]])) + + def _assemble_sparse_jacobian(self, J_eq, J_ineq, s): + """Assemble sparse jacobian given its components. + + Given ``J_eq``, ``J_ineq`` and ``s`` returns: + jacobian = [ J_eq, 0 ] + [ J_ineq, diag(s) ] + + It is equivalent to: + sps.bmat([[ J_eq, None ], + [ J_ineq, diag(s) ]], "csr") + but significantly more efficient for this + given structure. + """ + n_vars, n_ineq, n_eq = self.n_vars, self.n_ineq, self.n_eq + J_aux = sps.vstack([J_eq, J_ineq], "csr") + indptr, indices, data = J_aux.indptr, J_aux.indices, J_aux.data + new_indptr = indptr + np.hstack((np.zeros(n_eq, dtype=int), + np.arange(n_ineq+1, dtype=int))) + size = indices.size+n_ineq + new_indices = np.empty(size) + new_data = np.empty(size) + mask = np.full(size, False, bool) + mask[new_indptr[-n_ineq:]-1] = True + new_indices[mask] = n_vars+np.arange(n_ineq) + new_indices[~mask] = indices + new_data[mask] = s + new_data[~mask] = data + J = sps.csr_matrix((new_data, new_indices, new_indptr), + (n_eq + n_ineq, n_vars + n_ineq)) + return J + + def lagrangian_hessian_x(self, z, v): + """Returns Lagrangian Hessian (in relation to `x`) -> Hx""" + x = self.get_variables(z) + # Get lagrange multipliers relatated to nonlinear equality constraints + v_eq = v[:self.n_eq] + # Get lagrange multipliers relatated to nonlinear ineq. constraints + v_ineq = v[self.n_eq:self.n_eq+self.n_ineq] + lagr_hess = self.lagr_hess + return lagr_hess(x, v_eq, v_ineq) + + def lagrangian_hessian_s(self, z, v): + """Returns scaled Lagrangian Hessian (in relation to`s`) -> S Hs S""" + s = self.get_slack(z) + # Using the primal formulation: + # S Hs S = diag(s)*diag(barrier_parameter/s**2)*diag(s). + # Reference [1]_ p. 882, formula (3.1) + primal = self.barrier_parameter + # Using the primal-dual formulation + # S Hs S = diag(s)*diag(v/s)*diag(s) + # Reference [1]_ p. 883, formula (3.11) + primal_dual = v[-self.n_ineq:]*s + # Uses the primal-dual formulation for + # positives values of v_ineq, and primal + # formulation for the remaining ones. + return np.where(v[-self.n_ineq:] > 0, primal_dual, primal) + + def lagrangian_hessian(self, z, v): + """Returns scaled Lagrangian Hessian""" + # Compute Hessian in relation to x and s + Hx = self.lagrangian_hessian_x(z, v) + if self.n_ineq > 0: + S_Hs_S = self.lagrangian_hessian_s(z, v) + + # The scaled Lagragian Hessian is: + # [ Hx 0 ] + # [ 0 S Hs S ] + def matvec(vec): + vec_x = self.get_variables(vec) + vec_s = self.get_slack(vec) + if self.n_ineq > 0: + return np.hstack((Hx.dot(vec_x), S_Hs_S*vec_s)) + else: + return Hx.dot(vec_x) + return LinearOperator((self.n_vars+self.n_ineq, + self.n_vars+self.n_ineq), + matvec) + + def stop_criteria(self, state, z, last_iteration_failed, + optimality, constr_violation, + trust_radius, penalty, cg_info): + """Stop criteria to the barrier problem. + The criteria here proposed is similar to formula (2.3) + from [1]_, p.879. + """ + x = self.get_variables(z) + if self.global_stop_criteria(state, x, + last_iteration_failed, + trust_radius, penalty, + cg_info, + self.barrier_parameter, + self.tolerance): + self.terminate = True + return True + else: + g_cond = (optimality < self.tolerance and + constr_violation < self.tolerance) + x_cond = trust_radius < self.xtol + return g_cond or x_cond + + +def tr_interior_point(fun, grad, lagr_hess, n_vars, n_ineq, n_eq, + constr, jac, x0, fun0, grad0, + constr_ineq0, jac_ineq0, constr_eq0, + jac_eq0, stop_criteria, + enforce_feasibility, xtol, state, + initial_barrier_parameter, + initial_tolerance, + initial_penalty, + initial_trust_radius, + factorization_method): + """Trust-region interior points method. + + Solve problem: + minimize fun(x) + subject to: constr_ineq(x) <= 0 + constr_eq(x) = 0 + using trust-region interior point method described in [1]_. + """ + # BOUNDARY_PARAMETER controls the decrease on the slack + # variables. Represents ``tau`` from [1]_ p.885, formula (3.18). + BOUNDARY_PARAMETER = 0.995 + # BARRIER_DECAY_RATIO controls the decay of the barrier parameter + # and of the subproblem toloerance. Represents ``theta`` from [1]_ p.879. + BARRIER_DECAY_RATIO = 0.2 + # TRUST_ENLARGEMENT controls the enlargement on trust radius + # after each iteration + TRUST_ENLARGEMENT = 5 + + # Default enforce_feasibility + if enforce_feasibility is None: + enforce_feasibility = np.zeros(n_ineq, bool) + # Initial Values + barrier_parameter = initial_barrier_parameter + tolerance = initial_tolerance + trust_radius = initial_trust_radius + # Define initial value for the slack variables + s0 = np.maximum(-1.5*constr_ineq0, np.ones(n_ineq)) + # Define barrier subproblem + subprob = BarrierSubproblem( + x0, s0, fun, grad, lagr_hess, n_vars, n_ineq, n_eq, constr, jac, + barrier_parameter, tolerance, enforce_feasibility, + stop_criteria, xtol, fun0, grad0, constr_ineq0, jac_ineq0, + constr_eq0, jac_eq0) + # Define initial parameter for the first iteration. + z = np.hstack((x0, s0)) + fun0_subprob, constr0_subprob = subprob.fun0, subprob.constr0 + grad0_subprob, jac0_subprob = subprob.grad0, subprob.jac0 + # Define trust region bounds + trust_lb = np.hstack((np.full(subprob.n_vars, -np.inf), + np.full(subprob.n_ineq, -BOUNDARY_PARAMETER))) + trust_ub = np.full(subprob.n_vars+subprob.n_ineq, np.inf) + + # Solves a sequence of barrier problems + while True: + # Solve SQP subproblem + z, state = equality_constrained_sqp( + subprob.function_and_constraints, + subprob.gradient_and_jacobian, + subprob.lagrangian_hessian, + z, fun0_subprob, grad0_subprob, + constr0_subprob, jac0_subprob, subprob.stop_criteria, + state, initial_penalty, trust_radius, + factorization_method, trust_lb, trust_ub, subprob.scaling) + if subprob.terminate: + break + # Update parameters + trust_radius = max(initial_trust_radius, + TRUST_ENLARGEMENT*state.tr_radius) + # TODO: Use more advanced strategies from [2]_ + # to update this parameters. + barrier_parameter *= BARRIER_DECAY_RATIO + tolerance *= BARRIER_DECAY_RATIO + # Update Barrier Problem + subprob.update(barrier_parameter, tolerance) + # Compute initial values for next iteration + fun0_subprob, constr0_subprob = subprob.function_and_constraints(z) + grad0_subprob, jac0_subprob = subprob.gradient_and_jacobian(z) + + # Get x and s + x = subprob.get_variables(z) + return x, state diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/tr_interior_point.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/tr_interior_point.pyc new file mode 100644 index 0000000..0d3103a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_constr/tr_interior_point.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_dogleg.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_dogleg.py new file mode 100644 index 0000000..3d76558 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_dogleg.py @@ -0,0 +1,124 @@ +"""Dog-leg trust-region optimization.""" +from __future__ import division, print_function, absolute_import + +import numpy as np +import scipy.linalg +from ._trustregion import (_minimize_trust_region, BaseQuadraticSubproblem) + +__all__ = [] + + +def _minimize_dogleg(fun, x0, args=(), jac=None, hess=None, + **trust_region_options): + """ + Minimization of scalar function of one or more variables using + the dog-leg trust-region algorithm. + + Options + ------- + initial_trust_radius : float + Initial trust-region radius. + max_trust_radius : float + Maximum value of the trust-region radius. No steps that are longer + than this value will be proposed. + eta : float + Trust region related acceptance stringency for proposed steps. + gtol : float + Gradient norm must be less than `gtol` before successful + termination. + + """ + if jac is None: + raise ValueError('Jacobian is required for dogleg minimization') + if hess is None: + raise ValueError('Hessian is required for dogleg minimization') + return _minimize_trust_region(fun, x0, args=args, jac=jac, hess=hess, + subproblem=DoglegSubproblem, + **trust_region_options) + + +class DoglegSubproblem(BaseQuadraticSubproblem): + """Quadratic subproblem solved by the dogleg method""" + + def cauchy_point(self): + """ + The Cauchy point is minimal along the direction of steepest descent. + """ + if self._cauchy_point is None: + g = self.jac + Bg = self.hessp(g) + self._cauchy_point = -(np.dot(g, g) / np.dot(g, Bg)) * g + return self._cauchy_point + + def newton_point(self): + """ + The Newton point is a global minimum of the approximate function. + """ + if self._newton_point is None: + g = self.jac + B = self.hess + cho_info = scipy.linalg.cho_factor(B) + self._newton_point = -scipy.linalg.cho_solve(cho_info, g) + return self._newton_point + + def solve(self, trust_radius): + """ + Minimize a function using the dog-leg trust-region algorithm. + + This algorithm requires function values and first and second derivatives. + It also performs a costly Hessian decomposition for most iterations, + and the Hessian is required to be positive definite. + + Parameters + ---------- + trust_radius : float + We are allowed to wander only this far away from the origin. + + Returns + ------- + p : ndarray + The proposed step. + hits_boundary : bool + True if the proposed step is on the boundary of the trust region. + + Notes + ----- + The Hessian is required to be positive definite. + + References + ---------- + .. [1] Jorge Nocedal and Stephen Wright, + Numerical Optimization, second edition, + Springer-Verlag, 2006, page 73. + """ + + # Compute the Newton point. + # This is the optimum for the quadratic model function. + # If it is inside the trust radius then return this point. + p_best = self.newton_point() + if scipy.linalg.norm(p_best) < trust_radius: + hits_boundary = False + return p_best, hits_boundary + + # Compute the Cauchy point. + # This is the predicted optimum along the direction of steepest descent. + p_u = self.cauchy_point() + + # If the Cauchy point is outside the trust region, + # then return the point where the path intersects the boundary. + p_u_norm = scipy.linalg.norm(p_u) + if p_u_norm >= trust_radius: + p_boundary = p_u * (trust_radius / p_u_norm) + hits_boundary = True + return p_boundary, hits_boundary + + # Compute the intersection of the trust region boundary + # and the line segment connecting the Cauchy and Newton points. + # This requires solving a quadratic equation. + # ||p_u + t*(p_best - p_u)||**2 == trust_radius**2 + # Solve this for positive time t using the quadratic formula. + _, tb = self.get_boundaries_intersections(p_u, p_best - p_u, + trust_radius) + p_boundary = p_u + tb * (p_best - p_u) + hits_boundary = True + return p_boundary, hits_boundary diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_dogleg.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_dogleg.pyc new file mode 100644 index 0000000..3c8ed03 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_dogleg.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_exact.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_exact.py new file mode 100644 index 0000000..2fd1b09 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_exact.py @@ -0,0 +1,432 @@ +"""Nearly exact trust-region optimization subproblem.""" +from __future__ import division, print_function, absolute_import + +import numpy as np +from scipy.linalg import (norm, get_lapack_funcs, solve_triangular, + cho_solve) +from ._trustregion import (_minimize_trust_region, BaseQuadraticSubproblem) + +__all__ = ['_minimize_trustregion_exact', + 'estimate_smallest_singular_value', + 'singular_leading_submatrix', + 'IterativeSubproblem'] + + +def _minimize_trustregion_exact(fun, x0, args=(), jac=None, hess=None, + **trust_region_options): + """ + Minimization of scalar function of one or more variables using + a nearly exact trust-region algorithm. + + Options + ------- + initial_tr_radius : float + Initial trust-region radius. + max_tr_radius : float + Maximum value of the trust-region radius. No steps that are longer + than this value will be proposed. + eta : float + Trust region related acceptance stringency for proposed steps. + gtol : float + Gradient norm must be less than ``gtol`` before successful + termination. + """ + + if jac is None: + raise ValueError('Jacobian is required for trust region ' + 'exact minimization.') + if hess is None: + raise ValueError('Hessian matrix is required for trust region ' + 'exact minimization.') + return _minimize_trust_region(fun, x0, args=args, jac=jac, hess=hess, + subproblem=IterativeSubproblem, + **trust_region_options) + + +def estimate_smallest_singular_value(U): + """Given upper triangular matrix ``U`` estimate the smallest singular + value and the correspondent right singular vector in O(n**2) operations. + + Parameters + ---------- + U : ndarray + Square upper triangular matrix. + + Returns + ------- + s_min : float + Estimated smallest singular value of the provided matrix. + z_min : ndarray + Estimatied right singular vector. + + Notes + ----- + The procedure is based on [1]_ and is done in two steps. First it finds + a vector ``e`` with components selected from {+1, -1} such that the + solution ``w`` from the system ``U.T w = e`` is as large as possible. + Next it estimate ``U v = w``. The smallest singular value is close + to ``norm(w)/norm(v)`` and the right singular vector is close + to ``v/norm(v)``. + + The estimation will be better more ill-conditioned is the matrix. + + References + ---------- + .. [1] Cline, A. K., Moler, C. B., Stewart, G. W., Wilkinson, J. H. + An estimate for the condition number of a matrix. 1979. + SIAM Journal on Numerical Analysis, 16(2), 368-375. + """ + + U = np.atleast_2d(U) + m, n = U.shape + + if m != n: + raise ValueError("A square triangular matrix should be provided.") + + # A vector `e` with components selected from {+1, -1} + # is selected so that the solution `w` to the system + # `U.T w = e` is as large as possible. Implementation + # based on algorithm 3.5.1, p. 142, from reference [2] + # adapted for lower triangular matrix. + + p = np.zeros(n) + w = np.empty(n) + + # Implemented according to: Golub, G. H., Van Loan, C. F. (2013). + # "Matrix computations". Forth Edition. JHU press. pp. 140-142. + for k in range(n): + wp = (1-p[k]) / U.T[k, k] + wm = (-1-p[k]) / U.T[k, k] + pp = p[k+1:] + U.T[k+1:, k]*wp + pm = p[k+1:] + U.T[k+1:, k]*wm + + if abs(wp) + norm(pp, 1) >= abs(wm) + norm(pm, 1): + w[k] = wp + p[k+1:] = pp + else: + w[k] = wm + p[k+1:] = pm + + # The system `U v = w` is solved using backward substitution. + v = solve_triangular(U, w) + + v_norm = norm(v) + w_norm = norm(w) + + # Smallest singular value + s_min = w_norm / v_norm + + # Associated vector + z_min = v / v_norm + + return s_min, z_min + + +def gershgorin_bounds(H): + """ + Given a square matrix ``H`` compute upper + and lower bounds for its eigenvalues (Gregoshgorin Bounds). + Defined ref. [1]. + + References + ---------- + .. [1] Conn, A. R., Gould, N. I., & Toint, P. L. + Trust region methods. 2000. Siam. pp. 19. + """ + + H_diag = np.diag(H) + H_diag_abs = np.abs(H_diag) + H_row_sums = np.sum(np.abs(H), axis=1) + lb = np.min(H_diag + H_diag_abs - H_row_sums) + ub = np.max(H_diag - H_diag_abs + H_row_sums) + + return lb, ub + + +def singular_leading_submatrix(A, U, k): + """ + Compute term that makes the leading ``k`` by ``k`` + submatrix from ``A`` singular. + + Parameters + ---------- + A : ndarray + Symmetric matrix that is not positive definite. + U : ndarray + Upper triangular matrix resulting of an incomplete + Cholesky decomposition of matrix ``A``. + k : int + Positive integer such that the leading k by k submatrix from + `A` is the first non-positive definite leading submatrix. + + Returns + ------- + delta : float + Amount that should be added to the element (k, k) of the + leading k by k submatrix of ``A`` to make it singular. + v : ndarray + A vector such that ``v.T B v = 0``. Where B is the matrix A after + ``delta`` is added to its element (k, k). + """ + + # Compute delta + delta = np.sum(U[:k-1, k-1]**2) - A[k-1, k-1] + + n = len(A) + + # Inicialize v + v = np.zeros(n) + v[k-1] = 1 + + # Compute the remaining values of v by solving a triangular system. + if k != 1: + v[:k-1] = solve_triangular(U[:k-1, :k-1], -U[:k-1, k-1]) + + return delta, v + + +class IterativeSubproblem(BaseQuadraticSubproblem): + """Quadratic subproblem solved by nearly exact iterative method. + + Notes + ----- + This subproblem solver was based on [1]_, [2]_ and [3]_, + which implement similar algorithms. The algorithm is basically + that of [1]_ but ideas from [2]_ and [3]_ were also used. + + References + ---------- + .. [1] A.R. Conn, N.I. Gould, and P.L. Toint, "Trust region methods", + Siam, pp. 169-200, 2000. + .. [2] J. Nocedal and S. Wright, "Numerical optimization", + Springer Science & Business Media. pp. 83-91, 2006. + .. [3] J.J. More and D.C. Sorensen, "Computing a trust region step", + SIAM Journal on Scientific and Statistical Computing, vol. 4(3), + pp. 553-572, 1983. + """ + + # UPDATE_COEFF appears in reference [1]_ + # in formula 7.3.14 (p. 190) named as "theta". + # As recommended there it value is fixed in 0.01. + UPDATE_COEFF = 0.01 + + EPS = np.finfo(float).eps + + def __init__(self, x, fun, jac, hess, hessp=None, + k_easy=0.1, k_hard=0.2): + + super(IterativeSubproblem, self).__init__(x, fun, jac, hess) + + # When the trust-region shrinks in two consecutive + # calculations (``tr_radius < previous_tr_radius``) + # the lower bound ``lambda_lb`` may be reused, + # facilitating the convergence. To indicate no + # previous value is known at first ``previous_tr_radius`` + # is set to -1 and ``lambda_lb`` to None. + self.previous_tr_radius = -1 + self.lambda_lb = None + + self.niter = 0 + + # ``k_easy`` and ``k_hard`` are parameters used + # to determine the stop criteria to the iterative + # subproblem solver. Take a look at pp. 194-197 + # from reference _[1] for a more detailed description. + self.k_easy = k_easy + self.k_hard = k_hard + + # Get Lapack function for cholesky decomposition. + # The implemented Scipy wrapper does not return + # the incomplete factorization needed by the method. + self.cholesky, = get_lapack_funcs(('potrf',), (self.hess,)) + + # Get info about Hessian + self.dimension = len(self.hess) + self.hess_gershgorin_lb,\ + self.hess_gershgorin_ub = gershgorin_bounds(self.hess) + self.hess_inf = norm(self.hess, np.Inf) + self.hess_fro = norm(self.hess, 'fro') + + # A constant such that for vectors smaler than that + # backward substituition is not reliable. It was stabilished + # based on Golub, G. H., Van Loan, C. F. (2013). + # "Matrix computations". Forth Edition. JHU press., p.165. + self.CLOSE_TO_ZERO = self.dimension * self.EPS * self.hess_inf + + def _initial_values(self, tr_radius): + """Given a trust radius, return a good initial guess for + the damping factor, the lower bound and the upper bound. + The values were chosen accordingly to the guidelines on + section 7.3.8 (p. 192) from [1]_. + """ + + # Upper bound for the damping factor + lambda_ub = max(0, self.jac_mag/tr_radius + min(-self.hess_gershgorin_lb, + self.hess_fro, + self.hess_inf)) + + # Lower bound for the damping factor + lambda_lb = max(0, -min(self.hess.diagonal()), + self.jac_mag/tr_radius - min(self.hess_gershgorin_ub, + self.hess_fro, + self.hess_inf)) + + # Improve bounds with previous info + if tr_radius < self.previous_tr_radius: + lambda_lb = max(self.lambda_lb, lambda_lb) + + # Initial guess for the damping factor + if lambda_lb == 0: + lambda_initial = 0 + else: + lambda_initial = max(np.sqrt(lambda_lb * lambda_ub), + lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb)) + + return lambda_initial, lambda_lb, lambda_ub + + def solve(self, tr_radius): + """Solve quadratic subproblem""" + + lambda_current, lambda_lb, lambda_ub = self._initial_values(tr_radius) + n = self.dimension + hits_boundary = True + already_factorized = False + self.niter = 0 + + while True: + + # Compute Cholesky factorization + if already_factorized: + already_factorized = False + else: + H = self.hess+lambda_current*np.eye(n) + U, info = self.cholesky(H, lower=False, + overwrite_a=False, + clean=True) + + self.niter += 1 + + # Check if factorization succeeded + if info == 0 and self.jac_mag > self.CLOSE_TO_ZERO: + # Successful factorization + + # Solve `U.T U p = s` + p = cho_solve((U, False), -self.jac) + + p_norm = norm(p) + + # Check for interior convergence + if p_norm <= tr_radius and lambda_current == 0: + hits_boundary = False + break + + # Solve `U.T w = p` + w = solve_triangular(U, p, trans='T') + + w_norm = norm(w) + + # Compute Newton step accordingly to + # formula (4.44) p.87 from ref [2]_. + delta_lambda = (p_norm/w_norm)**2 * (p_norm-tr_radius)/tr_radius + lambda_new = lambda_current + delta_lambda + + if p_norm < tr_radius: # Inside boundary + s_min, z_min = estimate_smallest_singular_value(U) + + ta, tb = self.get_boundaries_intersections(p, z_min, + tr_radius) + + # Choose `step_len` with the smallest magnitude. + # The reason for this choice is explained at + # ref [3]_, p. 6 (Immediately before the formula + # for `tau`). + step_len = min([ta, tb], key=abs) + + # Compute the quadratic term (p.T*H*p) + quadratic_term = np.dot(p, np.dot(H, p)) + + # Check stop criteria + relative_error = (step_len**2 * s_min**2) / (quadratic_term + lambda_current*tr_radius**2) + if relative_error <= self.k_hard: + p += step_len * z_min + break + + # Update uncertanty bounds + lambda_ub = lambda_current + lambda_lb = max(lambda_lb, lambda_current - s_min**2) + + # Compute Cholesky factorization + H = self.hess + lambda_new*np.eye(n) + c, info = self.cholesky(H, lower=False, + overwrite_a=False, + clean=True) + + # Check if the factorization have succeeded + # + if info == 0: # Successful factorization + # Update damping factor + lambda_current = lambda_new + already_factorized = True + else: # Unsuccessful factorization + # Update uncertanty bounds + lambda_lb = max(lambda_lb, lambda_new) + + # Update damping factor + lambda_current = max(np.sqrt(lambda_lb * lambda_ub), + lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb)) + + else: # Outside boundary + # Check stop criteria + relative_error = abs(p_norm - tr_radius) / tr_radius + if relative_error <= self.k_easy: + break + + # Update uncertanty bounds + lambda_lb = lambda_current + + # Update damping factor + lambda_current = lambda_new + + elif info == 0 and self.jac_mag <= self.CLOSE_TO_ZERO: + # jac_mag very close to zero + + # Check for interior convergence + if lambda_current == 0: + p = np.zeros(n) + hits_boundary = False + break + + s_min, z_min = estimate_smallest_singular_value(U) + step_len = tr_radius + + # Check stop criteria + if step_len**2 * s_min**2 <= self.k_hard * lambda_current * tr_radius**2: + p = step_len * z_min + break + + # Update uncertanty bounds + lambda_ub = lambda_current + lambda_lb = max(lambda_lb, lambda_current - s_min**2) + + # Update damping factor + lambda_current = max(np.sqrt(lambda_lb * lambda_ub), + lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb)) + + else: # Unsuccessful factorization + + # Compute auxiliary terms + delta, v = singular_leading_submatrix(H, U, info) + v_norm = norm(v) + + # Update uncertanty interval + lambda_lb = max(lambda_lb, lambda_current + delta/v_norm**2) + + # Update damping factor + lambda_current = max(np.sqrt(lambda_lb * lambda_ub), + lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb)) + + self.lambda_lb = lambda_lb + self.lambda_current = lambda_current + self.previous_tr_radius = tr_radius + + return p, hits_boundary diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_exact.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_exact.pyc new file mode 100644 index 0000000..b6e9a47 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_exact.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_krylov.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_krylov.py new file mode 100644 index 0000000..13696ba --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_krylov.py @@ -0,0 +1,65 @@ +from ._trustregion import (_minimize_trust_region) +from ._trlib import (get_trlib_quadratic_subproblem) + +__all__ = ['_minimize_trust_krylov'] + +def _minimize_trust_krylov(fun, x0, args=(), jac=None, hess=None, hessp=None, + inexact=True, **trust_region_options): + """ + Minimization of a scalar function of one or more variables using + a nearly exact trust-region algorithm that only requires matrix + vector products with the hessian matrix. + + .. versionadded:: 1.0.0 + + Options + ------- + inexact : bool, optional + Accuracy to solve subproblems. If True requires less nonlinear + iterations, but more vector products. + """ + + if jac is None: + raise ValueError('Jacobian is required for trust region ', + 'exact minimization.') + if hess is None and hessp is None: + raise ValueError('Either the Hessian or the Hessian-vector product ' + 'is required for Krylov trust-region minimization') + + # tol_rel specifies the termination tolerance relative to the initial + # gradient norm in the krylov subspace iteration. + + # - tol_rel_i specifies the tolerance for interior convergence. + # - tol_rel_b specifies the tolerance for boundary convergence. + # in nonlinear programming applications it is not necessary to solve + # the boundary case as exact as the interior case. + + # - setting tol_rel_i=-2 leads to a forcing sequence in the krylov + # subspace iteration leading to quadratic convergence if eventually + # the trust region stays inactive. + # - setting tol_rel_b=-3 leads to a forcing sequence in the krylov + # subspace iteration leading to superlinear convergence as long + # as the iterates hit the trust region boundary. + + # For details consult the documentation of trlib_krylov_min + # in _trlib/trlib_krylov.h + # + # Optimality of this choice of parameters among a range of possibilities + # has been tested on the unconstrained subset of the CUTEst library. + + if inexact: + return _minimize_trust_region(fun, x0, args=args, jac=jac, + hess=hess, hessp=hessp, + subproblem=get_trlib_quadratic_subproblem( + tol_rel_i=-2.0, tol_rel_b=-3.0, + disp=trust_region_options.get('disp', False) + ), + **trust_region_options) + else: + return _minimize_trust_region(fun, x0, args=args, jac=jac, + hess=hess, hessp=hessp, + subproblem=get_trlib_quadratic_subproblem( + tol_rel_i=1e-8, tol_rel_b=1e-6, + disp=trust_region_options.get('disp', False) + ), + **trust_region_options) diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_krylov.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_krylov.pyc new file mode 100644 index 0000000..8bfd75d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_krylov.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_ncg.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_ncg.py new file mode 100644 index 0000000..a281ddd --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_ncg.py @@ -0,0 +1,128 @@ +"""Newton-CG trust-region optimization.""" +from __future__ import division, print_function, absolute_import + +import math + +import numpy as np +import scipy.linalg +from ._trustregion import (_minimize_trust_region, BaseQuadraticSubproblem) + +__all__ = [] + + +def _minimize_trust_ncg(fun, x0, args=(), jac=None, hess=None, hessp=None, + **trust_region_options): + """ + Minimization of scalar function of one or more variables using + the Newton conjugate gradient trust-region algorithm. + + Options + ------- + initial_trust_radius : float + Initial trust-region radius. + max_trust_radius : float + Maximum value of the trust-region radius. No steps that are longer + than this value will be proposed. + eta : float + Trust region related acceptance stringency for proposed steps. + gtol : float + Gradient norm must be less than `gtol` before successful + termination. + + """ + if jac is None: + raise ValueError('Jacobian is required for Newton-CG trust-region ' + 'minimization') + if hess is None and hessp is None: + raise ValueError('Either the Hessian or the Hessian-vector product ' + 'is required for Newton-CG trust-region minimization') + return _minimize_trust_region(fun, x0, args=args, jac=jac, hess=hess, + hessp=hessp, subproblem=CGSteihaugSubproblem, + **trust_region_options) + + +class CGSteihaugSubproblem(BaseQuadraticSubproblem): + """Quadratic subproblem solved by a conjugate gradient method""" + def solve(self, trust_radius): + """ + Solve the subproblem using a conjugate gradient method. + + Parameters + ---------- + trust_radius : float + We are allowed to wander only this far away from the origin. + + Returns + ------- + p : ndarray + The proposed step. + hits_boundary : bool + True if the proposed step is on the boundary of the trust region. + + Notes + ----- + This is algorithm (7.2) of Nocedal and Wright 2nd edition. + Only the function that computes the Hessian-vector product is required. + The Hessian itself is not required, and the Hessian does + not need to be positive semidefinite. + """ + + # get the norm of jacobian and define the origin + p_origin = np.zeros_like(self.jac) + + # define a default tolerance + tolerance = min(0.5, math.sqrt(self.jac_mag)) * self.jac_mag + + # Stop the method if the search direction + # is a direction of nonpositive curvature. + if self.jac_mag < tolerance: + hits_boundary = False + return p_origin, hits_boundary + + # init the state for the first iteration + z = p_origin + r = self.jac + d = -r + + # Search for the min of the approximation of the objective function. + while True: + + # do an iteration + Bd = self.hessp(d) + dBd = np.dot(d, Bd) + if dBd <= 0: + # Look at the two boundary points. + # Find both values of t to get the boundary points such that + # ||z + t d|| == trust_radius + # and then choose the one with the predicted min value. + ta, tb = self.get_boundaries_intersections(z, d, trust_radius) + pa = z + ta * d + pb = z + tb * d + if self(pa) < self(pb): + p_boundary = pa + else: + p_boundary = pb + hits_boundary = True + return p_boundary, hits_boundary + r_squared = np.dot(r, r) + alpha = r_squared / dBd + z_next = z + alpha * d + if scipy.linalg.norm(z_next) >= trust_radius: + # Find t >= 0 to get the boundary point such that + # ||z + t d|| == trust_radius + ta, tb = self.get_boundaries_intersections(z, d, trust_radius) + p_boundary = z + tb * d + hits_boundary = True + return p_boundary, hits_boundary + r_next = r + alpha * Bd + r_next_squared = np.dot(r_next, r_next) + if math.sqrt(r_next_squared) < tolerance: + hits_boundary = False + return z_next, hits_boundary + beta_next = r_next_squared / r_squared + d_next = -r_next + beta_next * d + + # update the state for the next iteration + z = z_next + r = r_next + d = d_next diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_ncg.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_ncg.pyc new file mode 100644 index 0000000..f2b4f19 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_trustregion_ncg.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_tstutils.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/_tstutils.py new file mode 100644 index 0000000..27b39c5 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/_tstutils.py @@ -0,0 +1,678 @@ +from __future__ import division, print_function, absolute_import + +r""" +Parameters used in test and benchmark methods. + +Collections of test cases suitable for testing 1-dimensional root-finders + 'original': The original benchmarking functions. + Real-valued functions of real-valued inputs on an interval + with a zero. + f1, .., f3 are continuous and infinitely differentiable + f4 has a left- and right- discontinuity at the root + f5 has a root at 1 replacing a 1st order pole + f6 is randomly positive on one side of the root, + randomly negative on the other. + f4 - f6 are not continuous at the root. + + 'aps': The test problems in the 1995 paper + TOMS "Algorithm 748: Enclosing Zeros of Continuous Functions" + by Alefeld, Potra and Shi. Real-valued functions of + real-valued inputs on an interval with a zero. + Suitable for methods which start with an enclosing interval, and + derivatives up to 2nd order. + + 'complex': Some complex-valued functions of complex-valued inputs. + No enclosing bracket is provided. + Suitable for methods which use one or more starting values, and + derivatives up to 2nd order. + + The test cases are provided as a list of dictionaries. The dictionary + keys will be a subset of: + ["f", "fprime", "fprime2", "args", "bracket", "smoothness", + "a", "b", "x0", "x1", "root", "ID"] +""" + +# Sources: +# [1] Alefeld, G. E. and Potra, F. A. and Shi, Yixun, +# "Algorithm 748: Enclosing Zeros of Continuous Functions", +# ACM Trans. Math. Softw. Volume 221(1995) +# doi = {10.1145/210089.210111}, + +from random import random + +import numpy as np + +from scipy.optimize import zeros as cc + +# "description" refers to the original functions +description = """ +f2 is a symmetric parabola, x**2 - 1 +f3 is a quartic polynomial with large hump in interval +f4 is step function with a discontinuity at 1 +f5 is a hyperbola with vertical asymptote at 1 +f6 has random values positive to left of 1, negative to right + +of course these are not real problems. They just test how the +'good' solvers behave in bad circumstances where bisection is +really the best. A good solver should not be much worse than +bisection in such circumstance, while being faster for smooth +monotone sorts of functions. +""" + + +def f1(x): + r"""f1 is a quadratic with roots at 0 and 1""" + return x * (x - 1.) + + +def f1_fp(x): + return 2 * x - 1 + + +def f1_fpp(x): + return 2 + + +def f2(x): + r"""f2 is a symmetric parabola, x**2 - 1""" + return x**2 - 1 + + +def f2_fp(x): + return 2 * x + + +def f2_fpp(x): + return 2 + + +def f3(x): + r"""A quartic with roots at 0, 1, 2 and 3""" + return x * (x - 1.) * (x - 2.) * (x - 3.) # x**4 - 6x**3 + 11x**2 - 6x + + +def f3_fp(x): + return 4 * x**3 - 18 * x**2 + 22 * x - 6 + + +def f3_fpp(x): + return 12 * x**2 - 36 * x + 22 + + +def f4(x): + r"""Piecewise linear, left and right discontinuous at x=1, the root.""" + if x > 1: + return 1.0 + .1 * x + if x < 1: + return -1.0 + .1 * x + return 0 + + +def f5(x): + r"""Hyperbola with a pole at x=1, but pole replaced with 0. Not continuous at root.""" + if x != 1: + return 1.0 / (1. - x) + return 0 + + +# f6(x) returns random value. Without memoization, calling twice with the +# same x returns different values, hence a "random value", not a +# "function with random values" +_f6_cache = {} +def f6(x): + v = _f6_cache.get(x, None) + if v is None: + if x > 1: + v = random() + elif x < 1: + v = -random() + else: + v = 0 + _f6_cache[x] = v + return v + + +# Each Original test case has +# - a function and its two derivatives, +# - additional arguments, +# - a bracket enclosing a root, +# - the order of differentiability (smoothness) on this interval +# - a starting value for methods which don't require a bracket +# - the root (inside the bracket) +# - an Identifier of the test case + +_ORIGINAL_TESTS_KEYS = ["f", "fprime", "fprime2", "args", "bracket", "smoothness", "x0", "root", "ID"] +_ORIGINAL_TESTS = [ + [f1, f1_fp, f1_fpp, (), [0.5, np.sqrt(3)], np.inf, 0.6, 1.0, "original.01.00"], + [f2, f2_fp, f2_fpp, (), [0.5, np.sqrt(3)], np.inf, 0.6, 1.0, "original.02.00"], + [f3, f3_fp, f3_fpp, (), [0.5, np.sqrt(3)], np.inf, 0.6, 1.0, "original.03.00"], + [f4, None, None, (), [0.5, np.sqrt(3)], -1, 0.6, 1.0, "original.04.00"], + [f5, None, None, (), [0.5, np.sqrt(3)], -1, 0.6, 1.0, "original.05.00"], + [f6, None, None, (), [0.5, np.sqrt(3)], -np.inf, 0.6, 1.0, "original.05.00"] +] + +_ORIGINAL_TESTS_DICTS = [dict(zip(_ORIGINAL_TESTS_KEYS, testcase)) for testcase in _ORIGINAL_TESTS] + +# ################## +# "APS" test cases +# Functions and test cases that appear in [1] + + +def aps01_f(x): + r"""Straight forward sum of trigonometric function and polynomial""" + return np.sin(x) - x / 2 + + +def aps01_fp(x): + return np.cos(x) - 1.0 / 2 + + +def aps01_fpp(x): + return -np.sin(x) + + +def aps02_f(x): + r"""poles at x=n**2, 1st and 2nd derivatives at root are also close to 0""" + ii = np.arange(1, 21) + return -2 * np.sum((2 * ii - 5)**2 / (x - ii**2)**3) + + +def aps02_fp(x): + ii = np.arange(1, 21) + return 6 * np.sum((2 * ii - 5)**2 / (x - ii**2)**4) + + +def aps02_fpp(x): + ii = np.arange(1, 21) + return 24 * np.sum((2 * ii - 5)**2 / (x - ii**2)**5) + + +def aps03_f(x, a, b): + r"""Rapidly changing at the root""" + return a * x * np.exp(b * x) + + +def aps03_fp(x, a, b): + return a * (b * x + 1) * np.exp(b * x) + + +def aps03_fpp(x, a, b): + return a * (b * (b * x + 1) + b) * np.exp(b * x) + + +def aps04_f(x, n, a): + r"""Medium-degree polynomial""" + return x**n - a + + +def aps04_fp(x, n, a): + return n * x**(n - 1) + + +def aps04_fpp(x, n, a): + return n * (n - 1) * x**(n - 2) + + +def aps05_f(x): + r"""Simple Trigonometric function""" + return np.sin(x) - 1.0 / 2 + + +def aps05_fp(x): + return np.cos(x) + + +def aps05_fpp(x): + return -np.sin(x) + + +def aps06_f(x, n): + r"""Exponential rapidly changing from -1 to 1 at x=0""" + return 2 * x * np.exp(-n) - 2 * np.exp(-n * x) + 1 + + +def aps06_fp(x, n): + return 2 * np.exp(-n) + 2 * n * np.exp(-n * x) + + +def aps06_fpp(x, n): + return -2 * n * n * np.exp(-n * x) + + +def aps07_f(x, n): + r"""Upside down parabola with parametrizable height""" + return (1 + (1 - n)**2) * x - (1 - n * x)**2 + + +def aps07_fp(x, n): + return (1 + (1 - n)**2) + 2 * n * (1 - n * x) + + +def aps07_fpp(x, n): + return -2 * n * n + + +def aps08_f(x, n): + r"""Degree n polynomial""" + return x * x - (1 - x)**n + + +def aps08_fp(x, n): + return 2 * x + n * (1 - x)**(n - 1) + + +def aps08_fpp(x, n): + return 2 - n * (n - 1) * (1 - x)**(n - 2) + + +def aps09_f(x, n): + r"""Upside down quartic with parametrizable height""" + return (1 + (1 - n)**4) * x - (1 - n * x)**4 + + +def aps09_fp(x, n): + return (1 + (1 - n)**4) + 4 * n * (1 - n * x)**3 + + +def aps09_fpp(x, n): + return -12 * n * (1 - n * x)**2 + + +def aps10_f(x, n): + r"""Exponential plus a polynomial""" + return np.exp(-n * x) * (x - 1) + x**n + + +def aps10_fp(x, n): + return np.exp(-n * x) * (-n * (x - 1) + 1) + n * x**(n - 1) + + +def aps10_fpp(x, n): + return np.exp(-n * x) * (-n * (-n * (x - 1) + 1) + -n * x) + n * (n - 1) * x**(n - 2) + + +def aps11_f(x, n): + r"""Rational function with a zero at x=1/n and a pole at x=0""" + return (n * x - 1) / ((n - 1) * x) + + +def aps11_fp(x, n): + return 1 / (n - 1) / x**2 + + +def aps11_fpp(x, n): + return -2 / (n - 1) / x**3 + + +def aps12_f(x, n): + r"""n-th root of x, with a zero at x=n""" + return np.power(x, 1.0 / n) - np.power(n, 1.0 / n) + + +def aps12_fp(x, n): + return np.power(x, (1.0 - n) / n) / n + + +def aps12_fpp(x, n): + return np.power(x, (1.0 - 2 * n) / n) * (1.0 / n) * (1.0 - n) / n + + +_MAX_EXPABLE = np.log(np.finfo(float).max) + + +def aps13_f(x): + r"""Function with *all* derivatives 0 at the root""" + if x == 0: + return 0 + # x2 = 1.0/x**2 + # if x2 > 708: + # return 0 + y = 1 / x**2 + if y > _MAX_EXPABLE: + return 0 + return x / np.exp(y) + + +def aps13_fp(x): + if x == 0: + return 0 + y = 1 / x**2 + if y > _MAX_EXPABLE: + return 0 + return (1 + 2 / x**2) / np.exp(y) + + +def aps13_fpp(x): + if x == 0: + return 0 + y = 1 / x**2 + if y > _MAX_EXPABLE: + return 0 + return 2 * (2 - x**2) / x**5 / np.exp(y) + + +def aps14_f(x, n): + r"""0 for negative x-values, trigonometric+linear for x positive""" + if x <= 0: + return -n / 20.0 + return n / 20.0 * (x / 1.5 + np.sin(x) - 1) + + +def aps14_fp(x, n): + if x <= 0: + return 0 + return n / 20.0 * (1.0 / 1.5 + np.cos(x)) + + +def aps14_fpp(x, n): + if x <= 0: + return 0 + return -n / 20.0 * (np.sin(x)) + + +def aps15_f(x, n): + r"""piecewise linear, constant outside of [0, 0.002/(1+n)]""" + if x < 0: + return -0.859 + if x > 2 * 1e-3 / (1 + n): + return np.e - 1.859 + return np.exp((n + 1) * x / 2 * 1000) - 1.859 + + +def aps15_fp(x, n): + if not 0 <= x <= 2 * 1e-3 / (1 + n): + return np.e - 1.859 + return np.exp((n + 1) * x / 2 * 1000) * (n + 1) / 2 * 1000 + + +def aps15_fpp(x, n): + if not 0 <= x <= 2 * 1e-3 / (1 + n): + return np.e - 1.859 + return np.exp((n + 1) * x / 2 * 1000) * (n + 1) / 2 * 1000 * (n + 1) / 2 * 1000 + + +# Each APS test case has +# - a function and its two derivatives, +# - additional arguments, +# - a bracket enclosing a root, +# - the order of differentiability of the the function on this interval +# - a starting value for methods which don't require a bracket +# - the root (inside the bracket) +# - an Identifier of the test case +# +# Algorithm 748 is a bracketing algorithm so a bracketing interval was provided +# in [1] for each test case. Newton and Halley methods need a single +# starting point x0, which was chosen to be near the middle of the interval, +# unless that would have made the problem too easy. + +_APS_TESTS_KEYS = ["f", "fprime", "fprime2", "args", "bracket", "smoothness", "x0", "root", "ID"] +_APS_TESTS = [ + [aps01_f, aps01_fp, aps01_fpp, (), [np.pi / 2, np.pi], np.inf, 3, 1.89549426703398094e+00, "aps.01.00"], + [aps02_f, aps02_fp, aps02_fpp, (), [1 + 1e-9, 4 - 1e-9], np.inf, 2, 3.02291534727305677e+00, "aps.02.00"], + [aps02_f, aps02_fp, aps02_fpp, (), [4 + 1e-9, 9 - 1e-9], np.inf, 5, 6.68375356080807848e+00, "aps.02.01"], + [aps02_f, aps02_fp, aps02_fpp, (), [9 + 1e-9, 16 - 1e-9], np.inf, 10, 1.12387016550022114e+01, "aps.02.02"], + [aps02_f, aps02_fp, aps02_fpp, (), [16 + 1e-9, 25 - 1e-9], np.inf, 17, 1.96760000806234103e+01, "aps.02.03"], + [aps02_f, aps02_fp, aps02_fpp, (), [25 + 1e-9, 36 - 1e-9], np.inf, 26, 2.98282273265047557e+01, "aps.02.04"], + [aps02_f, aps02_fp, aps02_fpp, (), [36 + 1e-9, 49 - 1e-9], np.inf, 37, 4.19061161952894139e+01, "aps.02.05"], + [aps02_f, aps02_fp, aps02_fpp, (), [49 + 1e-9, 64 - 1e-9], np.inf, 50, 5.59535958001430913e+01, "aps.02.06"], + [aps02_f, aps02_fp, aps02_fpp, (), [64 + 1e-9, 81 - 1e-9], np.inf, 65, 7.19856655865877997e+01, "aps.02.07"], + [aps02_f, aps02_fp, aps02_fpp, (), [81 + 1e-9, 100 - 1e-9], np.inf, 82, 9.00088685391666701e+01, "aps.02.08"], + [aps02_f, aps02_fp, aps02_fpp, (), [100 + 1e-9, 121 - 1e-9], np.inf, 101, 1.10026532748330197e+02, "aps.02.09"], + [aps03_f, aps03_fp, aps03_fpp, (-40, -1), [-9, 31], np.inf, -2, 0, "aps.03.00"], + [aps03_f, aps03_fp, aps03_fpp, (-100, -2), [-9, 31], np.inf, -2, 0, "aps.03.01"], + [aps03_f, aps03_fp, aps03_fpp, (-200, -3), [-9, 31], np.inf, -2, 0, "aps.03.02"], + [aps04_f, aps04_fp, aps04_fpp, (4, 0.2), [0, 5], np.inf, 2.5, 6.68740304976422006e-01, "aps.04.00"], + [aps04_f, aps04_fp, aps04_fpp, (6, 0.2), [0, 5], np.inf, 2.5, 7.64724491331730039e-01, "aps.04.01"], + [aps04_f, aps04_fp, aps04_fpp, (8, 0.2), [0, 5], np.inf, 2.5, 8.17765433957942545e-01, "aps.04.02"], + [aps04_f, aps04_fp, aps04_fpp, (10, 0.2), [0, 5], np.inf, 2.5, 8.51339922520784609e-01, "aps.04.03"], + [aps04_f, aps04_fp, aps04_fpp, (12, 0.2), [0, 5], np.inf, 2.5, 8.74485272221167897e-01, "aps.04.04"], + [aps04_f, aps04_fp, aps04_fpp, (4, 1), [0, 5], np.inf, 2.5, 1, "aps.04.05"], + [aps04_f, aps04_fp, aps04_fpp, (6, 1), [0, 5], np.inf, 2.5, 1, "aps.04.06"], + [aps04_f, aps04_fp, aps04_fpp, (8, 1), [0, 5], np.inf, 2.5, 1, "aps.04.07"], + [aps04_f, aps04_fp, aps04_fpp, (10, 1), [0, 5], np.inf, 2.5, 1, "aps.04.08"], + [aps04_f, aps04_fp, aps04_fpp, (12, 1), [0, 5], np.inf, 2.5, 1, "aps.04.09"], + [aps04_f, aps04_fp, aps04_fpp, (8, 1), [-0.95, 4.05], np.inf, 1.5, 1, "aps.04.10"], + [aps04_f, aps04_fp, aps04_fpp, (10, 1), [-0.95, 4.05], np.inf, 1.5, 1, "aps.04.11"], + [aps04_f, aps04_fp, aps04_fpp, (12, 1), [-0.95, 4.05], np.inf, 1.5, 1, "aps.04.12"], + [aps04_f, aps04_fp, aps04_fpp, (14, 1), [-0.95, 4.05], np.inf, 1.5, 1, "aps.04.13"], + [aps05_f, aps05_fp, aps05_fpp, (), [0, 1.5], np.inf, 1.3, np.pi / 6, "aps.05.00"], + [aps06_f, aps06_fp, aps06_fpp, (1,), [0, 1], np.inf, 0.5, 4.22477709641236709e-01, "aps.06.00"], + [aps06_f, aps06_fp, aps06_fpp, (2,), [0, 1], np.inf, 0.5, 3.06699410483203705e-01, "aps.06.01"], + [aps06_f, aps06_fp, aps06_fpp, (3,), [0, 1], np.inf, 0.5, 2.23705457654662959e-01, "aps.06.02"], + [aps06_f, aps06_fp, aps06_fpp, (4,), [0, 1], np.inf, 0.5, 1.71719147519508369e-01, "aps.06.03"], + [aps06_f, aps06_fp, aps06_fpp, (5,), [0, 1], np.inf, 0.4, 1.38257155056824066e-01, "aps.06.04"], + [aps06_f, aps06_fp, aps06_fpp, (20,), [0, 1], np.inf, 0.1, 3.46573590208538521e-02, "aps.06.05"], + [aps06_f, aps06_fp, aps06_fpp, (40,), [0, 1], np.inf, 5e-02, 1.73286795139986315e-02, "aps.06.06"], + [aps06_f, aps06_fp, aps06_fpp, (60,), [0, 1], np.inf, 1.0 / 30, 1.15524530093324210e-02, "aps.06.07"], + [aps06_f, aps06_fp, aps06_fpp, (80,), [0, 1], np.inf, 2.5e-02, 8.66433975699931573e-03, "aps.06.08"], + [aps06_f, aps06_fp, aps06_fpp, (100,), [0, 1], np.inf, 2e-02, 6.93147180559945415e-03, "aps.06.09"], + [aps07_f, aps07_fp, aps07_fpp, (5,), [0, 1], np.inf, 0.4, 3.84025518406218985e-02, "aps.07.00"], + [aps07_f, aps07_fp, aps07_fpp, (10,), [0, 1], np.inf, 0.4, 9.90000999800049949e-03, "aps.07.01"], + [aps07_f, aps07_fp, aps07_fpp, (20,), [0, 1], np.inf, 0.4, 2.49375003906201174e-03, "aps.07.02"], + [aps08_f, aps08_fp, aps08_fpp, (2,), [0, 1], np.inf, 0.9, 0.5, "aps.08.00"], + [aps08_f, aps08_fp, aps08_fpp, (5,), [0, 1], np.inf, 0.9, 3.45954815848242059e-01, "aps.08.01"], + [aps08_f, aps08_fp, aps08_fpp, (10,), [0, 1], np.inf, 0.9, 2.45122333753307220e-01, "aps.08.02"], + [aps08_f, aps08_fp, aps08_fpp, (15,), [0, 1], np.inf, 0.9, 1.95547623536565629e-01, "aps.08.03"], + [aps08_f, aps08_fp, aps08_fpp, (20,), [0, 1], np.inf, 0.9, 1.64920957276440960e-01, "aps.08.04"], + [aps09_f, aps09_fp, aps09_fpp, (1,), [0, 1], np.inf, 0.5, 2.75508040999484394e-01, "aps.09.00"], + [aps09_f, aps09_fp, aps09_fpp, (2,), [0, 1], np.inf, 0.5, 1.37754020499742197e-01, "aps.09.01"], + [aps09_f, aps09_fp, aps09_fpp, (4,), [0, 1], np.inf, 0.5, 1.03052837781564422e-02, "aps.09.02"], + [aps09_f, aps09_fp, aps09_fpp, (5,), [0, 1], np.inf, 0.5, 3.61710817890406339e-03, "aps.09.03"], + [aps09_f, aps09_fp, aps09_fpp, (8,), [0, 1], np.inf, 0.5, 4.10872918496395375e-04, "aps.09.04"], + [aps09_f, aps09_fp, aps09_fpp, (15,), [0, 1], np.inf, 0.5, 2.59895758929076292e-05, "aps.09.05"], + [aps09_f, aps09_fp, aps09_fpp, (20,), [0, 1], np.inf, 0.5, 7.66859512218533719e-06, "aps.09.06"], + [aps10_f, aps10_fp, aps10_fpp, (1,), [0, 1], np.inf, 0.9, 4.01058137541547011e-01, "aps.10.00"], + [aps10_f, aps10_fp, aps10_fpp, (5,), [0, 1], np.inf, 0.9, 5.16153518757933583e-01, "aps.10.01"], + [aps10_f, aps10_fp, aps10_fpp, (10,), [0, 1], np.inf, 0.9, 5.39522226908415781e-01, "aps.10.02"], + [aps10_f, aps10_fp, aps10_fpp, (15,), [0, 1], np.inf, 0.9, 5.48182294340655241e-01, "aps.10.03"], + [aps10_f, aps10_fp, aps10_fpp, (20,), [0, 1], np.inf, 0.9, 5.52704666678487833e-01, "aps.10.04"], + [aps11_f, aps11_fp, aps11_fpp, (2,), [0.01, 1], np.inf, 1e-02, 1.0 / 2, "aps.11.00"], + [aps11_f, aps11_fp, aps11_fpp, (5,), [0.01, 1], np.inf, 1e-02, 1.0 / 5, "aps.11.01"], + [aps11_f, aps11_fp, aps11_fpp, (15,), [0.01, 1], np.inf, 1e-02, 1.0 / 15, "aps.11.02"], + [aps11_f, aps11_fp, aps11_fpp, (20,), [0.01, 1], np.inf, 1e-02, 1.0 / 20, "aps.11.03"], + [aps12_f, aps12_fp, aps12_fpp, (2,), [1, 100], np.inf, 1.1, 2, "aps.12.00"], + [aps12_f, aps12_fp, aps12_fpp, (3,), [1, 100], np.inf, 1.1, 3, "aps.12.01"], + [aps12_f, aps12_fp, aps12_fpp, (4,), [1, 100], np.inf, 1.1, 4, "aps.12.02"], + [aps12_f, aps12_fp, aps12_fpp, (5,), [1, 100], np.inf, 1.1, 5, "aps.12.03"], + [aps12_f, aps12_fp, aps12_fpp, (6,), [1, 100], np.inf, 1.1, 6, "aps.12.04"], + [aps12_f, aps12_fp, aps12_fpp, (7,), [1, 100], np.inf, 1.1, 7, "aps.12.05"], + [aps12_f, aps12_fp, aps12_fpp, (9,), [1, 100], np.inf, 1.1, 9, "aps.12.06"], + [aps12_f, aps12_fp, aps12_fpp, (11,), [1, 100], np.inf, 1.1, 11, "aps.12.07"], + [aps12_f, aps12_fp, aps12_fpp, (13,), [1, 100], np.inf, 1.1, 13, "aps.12.08"], + [aps12_f, aps12_fp, aps12_fpp, (15,), [1, 100], np.inf, 1.1, 15, "aps.12.09"], + [aps12_f, aps12_fp, aps12_fpp, (17,), [1, 100], np.inf, 1.1, 17, "aps.12.10"], + [aps12_f, aps12_fp, aps12_fpp, (19,), [1, 100], np.inf, 1.1, 19, "aps.12.11"], + [aps12_f, aps12_fp, aps12_fpp, (21,), [1, 100], np.inf, 1.1, 21, "aps.12.12"], + [aps12_f, aps12_fp, aps12_fpp, (23,), [1, 100], np.inf, 1.1, 23, "aps.12.13"], + [aps12_f, aps12_fp, aps12_fpp, (25,), [1, 100], np.inf, 1.1, 25, "aps.12.14"], + [aps12_f, aps12_fp, aps12_fpp, (27,), [1, 100], np.inf, 1.1, 27, "aps.12.15"], + [aps12_f, aps12_fp, aps12_fpp, (29,), [1, 100], np.inf, 1.1, 29, "aps.12.16"], + [aps12_f, aps12_fp, aps12_fpp, (31,), [1, 100], np.inf, 1.1, 31, "aps.12.17"], + [aps12_f, aps12_fp, aps12_fpp, (33,), [1, 100], np.inf, 1.1, 33, "aps.12.18"], + [aps13_f, aps13_fp, aps13_fpp, (), [-1, 4], np.inf, 1.5, 1.54720911915117165e-02, "aps.13.00"], + [aps14_f, aps14_fp, aps14_fpp, (1,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.00"], + [aps14_f, aps14_fp, aps14_fpp, (2,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.01"], + [aps14_f, aps14_fp, aps14_fpp, (3,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.02"], + [aps14_f, aps14_fp, aps14_fpp, (4,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.03"], + [aps14_f, aps14_fp, aps14_fpp, (5,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.04"], + [aps14_f, aps14_fp, aps14_fpp, (6,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.05"], + [aps14_f, aps14_fp, aps14_fpp, (7,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.06"], + [aps14_f, aps14_fp, aps14_fpp, (8,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.07"], + [aps14_f, aps14_fp, aps14_fpp, (9,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.08"], + [aps14_f, aps14_fp, aps14_fpp, (10,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.09"], + [aps14_f, aps14_fp, aps14_fpp, (11,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.10"], + [aps14_f, aps14_fp, aps14_fpp, (12,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.11"], + [aps14_f, aps14_fp, aps14_fpp, (13,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.12"], + [aps14_f, aps14_fp, aps14_fpp, (14,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.13"], + [aps14_f, aps14_fp, aps14_fpp, (15,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.14"], + [aps14_f, aps14_fp, aps14_fpp, (16,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.15"], + [aps14_f, aps14_fp, aps14_fpp, (17,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.16"], + [aps14_f, aps14_fp, aps14_fpp, (18,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.17"], + [aps14_f, aps14_fp, aps14_fpp, (19,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.18"], + [aps14_f, aps14_fp, aps14_fpp, (20,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.19"], + [aps14_f, aps14_fp, aps14_fpp, (21,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.20"], + [aps14_f, aps14_fp, aps14_fpp, (22,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.21"], + [aps14_f, aps14_fp, aps14_fpp, (23,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.22"], + [aps14_f, aps14_fp, aps14_fpp, (24,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.23"], + [aps14_f, aps14_fp, aps14_fpp, (25,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.24"], + [aps14_f, aps14_fp, aps14_fpp, (26,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.25"], + [aps14_f, aps14_fp, aps14_fpp, (27,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.26"], + [aps14_f, aps14_fp, aps14_fpp, (28,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.27"], + [aps14_f, aps14_fp, aps14_fpp, (29,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.28"], + [aps14_f, aps14_fp, aps14_fpp, (30,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.29"], + [aps14_f, aps14_fp, aps14_fpp, (31,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.30"], + [aps14_f, aps14_fp, aps14_fpp, (32,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.31"], + [aps14_f, aps14_fp, aps14_fpp, (33,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.32"], + [aps14_f, aps14_fp, aps14_fpp, (34,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.33"], + [aps14_f, aps14_fp, aps14_fpp, (35,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.34"], + [aps14_f, aps14_fp, aps14_fpp, (36,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.35"], + [aps14_f, aps14_fp, aps14_fpp, (37,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.36"], + [aps14_f, aps14_fp, aps14_fpp, (38,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.37"], + [aps14_f, aps14_fp, aps14_fpp, (39,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.38"], + [aps14_f, aps14_fp, aps14_fpp, (40,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.39"], + [aps15_f, aps15_fp, aps15_fpp, (20,), [-1000, 1e-4], 0, -2, 5.90513055942197166e-05, "aps.15.00"], + [aps15_f, aps15_fp, aps15_fpp, (21,), [-1000, 1e-4], 0, -2, 5.63671553399369967e-05, "aps.15.01"], + [aps15_f, aps15_fp, aps15_fpp, (22,), [-1000, 1e-4], 0, -2, 5.39164094555919196e-05, "aps.15.02"], + [aps15_f, aps15_fp, aps15_fpp, (23,), [-1000, 1e-4], 0, -2, 5.16698923949422470e-05, "aps.15.03"], + [aps15_f, aps15_fp, aps15_fpp, (24,), [-1000, 1e-4], 0, -2, 4.96030966991445609e-05, "aps.15.04"], + [aps15_f, aps15_fp, aps15_fpp, (25,), [-1000, 1e-4], 0, -2, 4.76952852876389951e-05, "aps.15.05"], + [aps15_f, aps15_fp, aps15_fpp, (26,), [-1000, 1e-4], 0, -2, 4.59287932399486662e-05, "aps.15.06"], + [aps15_f, aps15_fp, aps15_fpp, (27,), [-1000, 1e-4], 0, -2, 4.42884791956647841e-05, "aps.15.07"], + [aps15_f, aps15_fp, aps15_fpp, (28,), [-1000, 1e-4], 0, -2, 4.27612902578832391e-05, "aps.15.08"], + [aps15_f, aps15_fp, aps15_fpp, (29,), [-1000, 1e-4], 0, -2, 4.13359139159538030e-05, "aps.15.09"], + [aps15_f, aps15_fp, aps15_fpp, (30,), [-1000, 1e-4], 0, -2, 4.00024973380198076e-05, "aps.15.10"], + [aps15_f, aps15_fp, aps15_fpp, (31,), [-1000, 1e-4], 0, -2, 3.87524192962066869e-05, "aps.15.11"], + [aps15_f, aps15_fp, aps15_fpp, (32,), [-1000, 1e-4], 0, -2, 3.75781035599579910e-05, "aps.15.12"], + [aps15_f, aps15_fp, aps15_fpp, (33,), [-1000, 1e-4], 0, -2, 3.64728652199592355e-05, "aps.15.13"], + [aps15_f, aps15_fp, aps15_fpp, (34,), [-1000, 1e-4], 0, -2, 3.54307833565318273e-05, "aps.15.14"], + [aps15_f, aps15_fp, aps15_fpp, (35,), [-1000, 1e-4], 0, -2, 3.44465949299614980e-05, "aps.15.15"], + [aps15_f, aps15_fp, aps15_fpp, (36,), [-1000, 1e-4], 0, -2, 3.35156058778003705e-05, "aps.15.16"], + [aps15_f, aps15_fp, aps15_fpp, (37,), [-1000, 1e-4], 0, -2, 3.26336162494372125e-05, "aps.15.17"], + [aps15_f, aps15_fp, aps15_fpp, (38,), [-1000, 1e-4], 0, -2, 3.17968568584260013e-05, "aps.15.18"], + [aps15_f, aps15_fp, aps15_fpp, (39,), [-1000, 1e-4], 0, -2, 3.10019354369653455e-05, "aps.15.19"], + [aps15_f, aps15_fp, aps15_fpp, (40,), [-1000, 1e-4], 0, -2, 3.02457906702100968e-05, "aps.15.20"], + [aps15_f, aps15_fp, aps15_fpp, (100,), [-1000, 1e-4], 0, -2, 1.22779942324615231e-05, "aps.15.21"], + [aps15_f, aps15_fp, aps15_fpp, (200,), [-1000, 1e-4], 0, -2, 6.16953939044086617e-06, "aps.15.22"], + [aps15_f, aps15_fp, aps15_fpp, (300,), [-1000, 1e-4], 0, -2, 4.11985852982928163e-06, "aps.15.23"], + [aps15_f, aps15_fp, aps15_fpp, (400,), [-1000, 1e-4], 0, -2, 3.09246238772721682e-06, "aps.15.24"], + [aps15_f, aps15_fp, aps15_fpp, (500,), [-1000, 1e-4], 0, -2, 2.47520442610501789e-06, "aps.15.25"], + [aps15_f, aps15_fp, aps15_fpp, (600,), [-1000, 1e-4], 0, -2, 2.06335676785127107e-06, "aps.15.26"], + [aps15_f, aps15_fp, aps15_fpp, (700,), [-1000, 1e-4], 0, -2, 1.76901200781542651e-06, "aps.15.27"], + [aps15_f, aps15_fp, aps15_fpp, (800,), [-1000, 1e-4], 0, -2, 1.54816156988591016e-06, "aps.15.28"], + [aps15_f, aps15_fp, aps15_fpp, (900,), [-1000, 1e-4], 0, -2, 1.37633453660223511e-06, "aps.15.29"], + [aps15_f, aps15_fp, aps15_fpp, (1000,), [-1000, 1e-4], 0, -2, 1.23883857889971403e-06, "aps.15.30"] +] + +_APS_TESTS_DICTS = [dict(zip(_APS_TESTS_KEYS, testcase)) for testcase in _APS_TESTS] + + +# ################## +# "complex" test cases +# A few simple, complex-valued, functions, defined on the complex plane. + + +def cplx01_f(z, n, a): + r"""z**n-a: Use to find the n-th root of a""" + return z**n - a + + +def cplx01_fp(z, n, a): + return n * z**(n - 1) + + +def cplx01_fpp(z, n, a): + return n * (n - 1) * z**(n - 2) + + +def cplx02_f(z, a): + r"""e**z - a: Use to find the log of a""" + return np.exp(z) - a + + +def cplx02_fp(z, a): + return np.exp(z) + + +def cplx02_fpp(z, a): + return np.exp(z) + + +# Each "complex" test case has +# - a function and its two derivatives, +# - additional arguments, +# - the order of differentiability of the the function on this interval +# - two starting values x0 and x1 +# - the root +# - an Identifier of the test case +# +# Algorithm 748 is a bracketing algorithm so a bracketing interval was provided +# in [1] for each test case. Newton and Halley need a single starting point +# x0, which was chosen to be near the middle of the interval, unless that +# would make the problem too easy. + + +_COMPLEX_TESTS_KEYS = ["f", "fprime", "fprime2", "args", "smoothness", "x0", "x1", "root", "ID"] +_COMPLEX_TESTS = [ + [cplx01_f, cplx01_fp, cplx01_fpp, (2, -1), np.inf, (1 + 1j), (0.5 + 0.5j), 1j, "complex.01.00"], + [cplx01_f, cplx01_fp, cplx01_fpp, (3, 1), np.inf, (-1 + 1j), (-0.5 + 2.0j), (-0.5 + np.sqrt(3) / 2 * 1.0j), + "complex.01.01"], + [cplx01_f, cplx01_fp, cplx01_fpp, (3, -1), np.inf, 1j, (0.5 + 0.5j), (0.5 + np.sqrt(3) / 2 * 1.0j), + "complex.01.02"], + [cplx01_f, cplx01_fp, cplx01_fpp, (3, 8), np.inf, 5, 4, 2, "complex.01.03"], + [cplx02_f, cplx02_fp, cplx02_fpp, (-1,), np.inf, (1 + 2j), (0.5 + 0.5j), np.pi * 1.0j, "complex.02.00"], + [cplx02_f, cplx02_fp, cplx02_fpp, (1j,), np.inf, (1 + 2j), (0.5 + 0.5j), np.pi * 0.5j, "complex.02.01"], +] + +_COMPLEX_TESTS_DICTS = [dict(zip(_COMPLEX_TESTS_KEYS, testcase)) for testcase in _COMPLEX_TESTS] + + +def _add_a_b(tests): + r"""Add "a" and "b" keys to each test from the "bracket" value""" + for d in tests: + for k, v in zip(['a', 'b'], d.get('bracket', [])): + d[k] = v + + +_add_a_b(_ORIGINAL_TESTS_DICTS) +_add_a_b(_APS_TESTS_DICTS) +_add_a_b(_COMPLEX_TESTS_DICTS) + + +def get_tests(collection='original', smoothness=None): + r"""Return the requested collection of test cases, as an array of dicts with subset-specific keys + + Allowed values of collection: + 'original': The original benchmarking functions. + Real-valued functions of real-valued inputs on an interval with a zero. + f1, .., f3 are continuous and infinitely differentiable + f4 has a single discontinuity at the root + f5 has a root at 1 replacing a 1st order pole + f6 is randomly positive on one side of the root, randomly negative on the other + 'aps': The test problems in the TOMS "Algorithm 748: Enclosing Zeros of Continuous Functions" + paper by Alefeld, Potra and Shi. Real-valued functions of + real-valued inputs on an interval with a zero. + Suitable for methods which start with an enclosing interval, and + derivatives up to 2nd order. + 'complex': Some complex-valued functions of complex-valued inputs. + No enclosing bracket is provided. + Suitable for methods which use one or more starting values, and + derivatives up to 2nd order. + + The dictionary keys will be a subset of + ["f", "fprime", "fprime2", "args", "bracket", "a", b", "smoothness", "x0", "x1", "root", "ID"] + """ + collection = collection or "original" + subsets = {"aps": _APS_TESTS_DICTS, + "complex": _COMPLEX_TESTS_DICTS, + "original": _ORIGINAL_TESTS_DICTS} + tests = subsets.get(collection, []) + if smoothness is not None: + tests = [tc for tc in tests if tc['smoothness'] >= smoothness] + return tests + + +# Backwards compatibility +methods = [cc.bisect, cc.ridder, cc.brenth, cc.brentq] +mstrings = ['cc.bisect', 'cc.ridder', 'cc.brenth', 'cc.brentq'] +functions = [f2, f3, f4, f5, f6] +fstrings = ['f2', 'f3', 'f4', 'f5', 'f6'] diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_tstutils.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/_tstutils.pyc new file mode 100644 index 0000000..5d474c9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_tstutils.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/_zeros.so b/project/venv/lib/python2.7/site-packages/scipy/optimize/_zeros.so new file mode 100755 index 0000000..fb7198f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/_zeros.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/cobyla.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/cobyla.py new file mode 100644 index 0000000..0a9d621 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/cobyla.py @@ -0,0 +1,272 @@ +""" +Interface to Constrained Optimization By Linear Approximation + +Functions +--------- +.. autosummary:: + :toctree: generated/ + + fmin_cobyla + +""" + +from __future__ import division, print_function, absolute_import + +import numpy as np +from scipy._lib.six import callable +from scipy.optimize import _cobyla +from .optimize import OptimizeResult, _check_unknown_options +try: + from itertools import izip +except ImportError: + izip = zip + + +__all__ = ['fmin_cobyla'] + + +def fmin_cobyla(func, x0, cons, args=(), consargs=None, rhobeg=1.0, + rhoend=1e-4, maxfun=1000, disp=None, catol=2e-4): + """ + Minimize a function using the Constrained Optimization BY Linear + Approximation (COBYLA) method. This method wraps a FORTRAN + implementation of the algorithm. + + Parameters + ---------- + func : callable + Function to minimize. In the form func(x, \\*args). + x0 : ndarray + Initial guess. + cons : sequence + Constraint functions; must all be ``>=0`` (a single function + if only 1 constraint). Each function takes the parameters `x` + as its first argument, and it can return either a single number or + an array or list of numbers. + args : tuple, optional + Extra arguments to pass to function. + consargs : tuple, optional + Extra arguments to pass to constraint functions (default of None means + use same extra arguments as those passed to func). + Use ``()`` for no extra arguments. + rhobeg : float, optional + Reasonable initial changes to the variables. + rhoend : float, optional + Final accuracy in the optimization (not precisely guaranteed). This + is a lower bound on the size of the trust region. + disp : {0, 1, 2, 3}, optional + Controls the frequency of output; 0 implies no output. + maxfun : int, optional + Maximum number of function evaluations. + catol : float, optional + Absolute tolerance for constraint violations. + + Returns + ------- + x : ndarray + The argument that minimises `f`. + + See also + -------- + minimize: Interface to minimization algorithms for multivariate + functions. See the 'COBYLA' `method` in particular. + + Notes + ----- + This algorithm is based on linear approximations to the objective + function and each constraint. We briefly describe the algorithm. + + Suppose the function is being minimized over k variables. At the + jth iteration the algorithm has k+1 points v_1, ..., v_(k+1), + an approximate solution x_j, and a radius RHO_j. + (i.e. linear plus a constant) approximations to the objective + function and constraint functions such that their function values + agree with the linear approximation on the k+1 points v_1,.., v_(k+1). + This gives a linear program to solve (where the linear approximations + of the constraint functions are constrained to be non-negative). + + However the linear approximations are likely only good + approximations near the current simplex, so the linear program is + given the further requirement that the solution, which + will become x_(j+1), must be within RHO_j from x_j. RHO_j only + decreases, never increases. The initial RHO_j is rhobeg and the + final RHO_j is rhoend. In this way COBYLA's iterations behave + like a trust region algorithm. + + Additionally, the linear program may be inconsistent, or the + approximation may give poor improvement. For details about + how these issues are resolved, as well as how the points v_i are + updated, refer to the source code or the references below. + + + References + ---------- + Powell M.J.D. (1994), "A direct search optimization method that models + the objective and constraint functions by linear interpolation.", in + Advances in Optimization and Numerical Analysis, eds. S. Gomez and + J-P Hennart, Kluwer Academic (Dordrecht), pp. 51-67 + + Powell M.J.D. (1998), "Direct search algorithms for optimization + calculations", Acta Numerica 7, 287-336 + + Powell M.J.D. (2007), "A view of algorithms for optimization without + derivatives", Cambridge University Technical Report DAMTP 2007/NA03 + + + Examples + -------- + Minimize the objective function f(x,y) = x*y subject + to the constraints x**2 + y**2 < 1 and y > 0:: + + >>> def objective(x): + ... return x[0]*x[1] + ... + >>> def constr1(x): + ... return 1 - (x[0]**2 + x[1]**2) + ... + >>> def constr2(x): + ... return x[1] + ... + >>> from scipy.optimize import fmin_cobyla + >>> fmin_cobyla(objective, [0.0, 0.1], [constr1, constr2], rhoend=1e-7) + array([-0.70710685, 0.70710671]) + + The exact solution is (-sqrt(2)/2, sqrt(2)/2). + + + + """ + err = "cons must be a sequence of callable functions or a single"\ + " callable function." + try: + len(cons) + except TypeError: + if callable(cons): + cons = [cons] + else: + raise TypeError(err) + else: + for thisfunc in cons: + if not callable(thisfunc): + raise TypeError(err) + + if consargs is None: + consargs = args + + # build constraints + con = tuple({'type': 'ineq', 'fun': c, 'args': consargs} for c in cons) + + # options + opts = {'rhobeg': rhobeg, + 'tol': rhoend, + 'disp': disp, + 'maxiter': maxfun, + 'catol': catol} + + sol = _minimize_cobyla(func, x0, args, constraints=con, + **opts) + if disp and not sol['success']: + print("COBYLA failed to find a solution: %s" % (sol.message,)) + return sol['x'] + + +def _minimize_cobyla(fun, x0, args=(), constraints=(), + rhobeg=1.0, tol=1e-4, maxiter=1000, + disp=False, catol=2e-4, **unknown_options): + """ + Minimize a scalar function of one or more variables using the + Constrained Optimization BY Linear Approximation (COBYLA) algorithm. + + Options + ------- + rhobeg : float + Reasonable initial changes to the variables. + tol : float + Final accuracy in the optimization (not precisely guaranteed). + This is a lower bound on the size of the trust region. + disp : bool + Set to True to print convergence messages. If False, + `verbosity` is ignored as set to 0. + maxiter : int + Maximum number of function evaluations. + catol : float + Tolerance (absolute) for constraint violations + + """ + _check_unknown_options(unknown_options) + maxfun = maxiter + rhoend = tol + iprint = int(bool(disp)) + + # check constraints + if isinstance(constraints, dict): + constraints = (constraints, ) + + for ic, con in enumerate(constraints): + # check type + try: + ctype = con['type'].lower() + except KeyError: + raise KeyError('Constraint %d has no type defined.' % ic) + except TypeError: + raise TypeError('Constraints must be defined using a ' + 'dictionary.') + except AttributeError: + raise TypeError("Constraint's type must be a string.") + else: + if ctype != 'ineq': + raise ValueError("Constraints of type '%s' not handled by " + "COBYLA." % con['type']) + + # check function + if 'fun' not in con: + raise KeyError('Constraint %d has no function defined.' % ic) + + # check extra arguments + if 'args' not in con: + con['args'] = () + + # m is the total number of constraint values + # it takes into account that some constraints may be vector-valued + cons_lengths = [] + for c in constraints: + f = c['fun'](x0, *c['args']) + try: + cons_length = len(f) + except TypeError: + cons_length = 1 + cons_lengths.append(cons_length) + m = sum(cons_lengths) + + def calcfc(x, con): + f = fun(x, *args) + i = 0 + for size, c in izip(cons_lengths, constraints): + con[i: i + size] = c['fun'](x, *c['args']) + i += size + return f + + info = np.zeros(4, np.float64) + xopt, info = _cobyla.minimize(calcfc, m=m, x=np.copy(x0), rhobeg=rhobeg, + rhoend=rhoend, iprint=iprint, maxfun=maxfun, + dinfo=info) + + if info[3] > catol: + # Check constraint violation + info[0] = 4 + + return OptimizeResult(x=xopt, + status=int(info[0]), + success=info[0] == 1, + message={1: 'Optimization terminated successfully.', + 2: 'Maximum number of function evaluations ' + 'has been exceeded.', + 3: 'Rounding errors are becoming damaging ' + 'in COBYLA subroutine.', + 4: 'Did not converge to a solution ' + 'satisfying the constraints. See ' + '`maxcv` for magnitude of violation.' + }.get(info[0], 'Unknown exit status.'), + nfev=int(info[1]), + fun=info[2], + maxcv=info[3]) diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/cobyla.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/cobyla.pyc new file mode 100644 index 0000000..bbb537c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/cobyla.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/lbfgsb.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/lbfgsb.py new file mode 100644 index 0000000..5425fff --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/lbfgsb.py @@ -0,0 +1,468 @@ +""" +Functions +--------- +.. autosummary:: + :toctree: generated/ + + fmin_l_bfgs_b + +""" + +## License for the Python wrapper +## ============================== + +## Copyright (c) 2004 David M. Cooke <cookedm@physics.mcmaster.ca> + +## Permission is hereby granted, free of charge, to any person obtaining a +## copy of this software and associated documentation files (the "Software"), +## to deal in the Software without restriction, including without limitation +## the rights to use, copy, modify, merge, publish, distribute, sublicense, +## and/or sell copies of the Software, and to permit persons to whom the +## Software is furnished to do so, subject to the following conditions: + +## The above copyright notice and this permission notice shall be included in +## all copies or substantial portions of the Software. + +## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +## FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +## DEALINGS IN THE SOFTWARE. + +## Modifications by Travis Oliphant and Enthought, Inc. for inclusion in SciPy + +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy import array, asarray, float64, int32, zeros +from . import _lbfgsb +from .optimize import (MemoizeJac, OptimizeResult, + _check_unknown_options, wrap_function, + _approx_fprime_helper) +from scipy.sparse.linalg import LinearOperator + +__all__ = ['fmin_l_bfgs_b', 'LbfgsInvHessProduct'] + + +def fmin_l_bfgs_b(func, x0, fprime=None, args=(), + approx_grad=0, + bounds=None, m=10, factr=1e7, pgtol=1e-5, + epsilon=1e-8, + iprint=-1, maxfun=15000, maxiter=15000, disp=None, + callback=None, maxls=20): + """ + Minimize a function func using the L-BFGS-B algorithm. + + Parameters + ---------- + func : callable f(x,*args) + Function to minimise. + x0 : ndarray + Initial guess. + fprime : callable fprime(x,*args), optional + The gradient of `func`. If None, then `func` returns the function + value and the gradient (``f, g = func(x, *args)``), unless + `approx_grad` is True in which case `func` returns only ``f``. + args : sequence, optional + Arguments to pass to `func` and `fprime`. + approx_grad : bool, optional + Whether to approximate the gradient numerically (in which case + `func` returns only the function value). + bounds : list, optional + ``(min, max)`` pairs for each element in ``x``, defining + the bounds on that parameter. Use None or +-inf for one of ``min`` or + ``max`` when there is no bound in that direction. + m : int, optional + The maximum number of variable metric corrections + used to define the limited memory matrix. (The limited memory BFGS + method does not store the full hessian but uses this many terms in an + approximation to it.) + factr : float, optional + The iteration stops when + ``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``, + where ``eps`` is the machine precision, which is automatically + generated by the code. Typical values for `factr` are: 1e12 for + low accuracy; 1e7 for moderate accuracy; 10.0 for extremely + high accuracy. See Notes for relationship to `ftol`, which is exposed + (instead of `factr`) by the `scipy.optimize.minimize` interface to + L-BFGS-B. + pgtol : float, optional + The iteration will stop when + ``max{|proj g_i | i = 1, ..., n} <= pgtol`` + where ``pg_i`` is the i-th component of the projected gradient. + epsilon : float, optional + Step size used when `approx_grad` is True, for numerically + calculating the gradient + iprint : int, optional + Controls the frequency of output. ``iprint < 0`` means no output; + ``iprint = 0`` print only one line at the last iteration; + ``0 < iprint < 99`` print also f and ``|proj g|`` every iprint iterations; + ``iprint = 99`` print details of every iteration except n-vectors; + ``iprint = 100`` print also the changes of active set and final x; + ``iprint > 100`` print details of every iteration including x and g. + disp : int, optional + If zero, then no output. If a positive number, then this over-rides + `iprint` (i.e., `iprint` gets the value of `disp`). + maxfun : int, optional + Maximum number of function evaluations. + maxiter : int, optional + Maximum number of iterations. + callback : callable, optional + Called after each iteration, as ``callback(xk)``, where ``xk`` is the + current parameter vector. + maxls : int, optional + Maximum number of line search steps (per iteration). Default is 20. + + Returns + ------- + x : array_like + Estimated position of the minimum. + f : float + Value of `func` at the minimum. + d : dict + Information dictionary. + + * d['warnflag'] is + + - 0 if converged, + - 1 if too many function evaluations or too many iterations, + - 2 if stopped for another reason, given in d['task'] + + * d['grad'] is the gradient at the minimum (should be 0 ish) + * d['funcalls'] is the number of function calls made. + * d['nit'] is the number of iterations. + + See also + -------- + minimize: Interface to minimization algorithms for multivariate + functions. See the 'L-BFGS-B' `method` in particular. Note that the + `ftol` option is made available via that interface, while `factr` is + provided via this interface, where `factr` is the factor multiplying + the default machine floating-point precision to arrive at `ftol`: + ``ftol = factr * numpy.finfo(float).eps``. + + Notes + ----- + License of L-BFGS-B (FORTRAN code): + + The version included here (in fortran code) is 3.0 + (released April 25, 2011). It was written by Ciyou Zhu, Richard Byrd, + and Jorge Nocedal <nocedal@ece.nwu.edu>. It carries the following + condition for use: + + This software is freely available, but we expect that all publications + describing work using this software, or all commercial products using it, + quote at least one of the references given below. This software is released + under the BSD License. + + References + ---------- + * R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for Bound + Constrained Optimization, (1995), SIAM Journal on Scientific and + Statistical Computing, 16, 5, pp. 1190-1208. + * C. Zhu, R. H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B, + FORTRAN routines for large scale bound constrained optimization (1997), + ACM Transactions on Mathematical Software, 23, 4, pp. 550 - 560. + * J.L. Morales and J. Nocedal. L-BFGS-B: Remark on Algorithm 778: L-BFGS-B, + FORTRAN routines for large scale bound constrained optimization (2011), + ACM Transactions on Mathematical Software, 38, 1. + + """ + # handle fprime/approx_grad + if approx_grad: + fun = func + jac = None + elif fprime is None: + fun = MemoizeJac(func) + jac = fun.derivative + else: + fun = func + jac = fprime + + # build options + if disp is None: + disp = iprint + opts = {'disp': disp, + 'iprint': iprint, + 'maxcor': m, + 'ftol': factr * np.finfo(float).eps, + 'gtol': pgtol, + 'eps': epsilon, + 'maxfun': maxfun, + 'maxiter': maxiter, + 'callback': callback, + 'maxls': maxls} + + res = _minimize_lbfgsb(fun, x0, args=args, jac=jac, bounds=bounds, + **opts) + d = {'grad': res['jac'], + 'task': res['message'], + 'funcalls': res['nfev'], + 'nit': res['nit'], + 'warnflag': res['status']} + f = res['fun'] + x = res['x'] + + return x, f, d + + +def _minimize_lbfgsb(fun, x0, args=(), jac=None, bounds=None, + disp=None, maxcor=10, ftol=2.2204460492503131e-09, + gtol=1e-5, eps=1e-8, maxfun=15000, maxiter=15000, + iprint=-1, callback=None, maxls=20, **unknown_options): + """ + Minimize a scalar function of one or more variables using the L-BFGS-B + algorithm. + + Options + ------- + disp : None or int + If `disp is None` (the default), then the supplied version of `iprint` + is used. If `disp is not None`, then it overrides the supplied version + of `iprint` with the behaviour you outlined. + maxcor : int + The maximum number of variable metric corrections used to + define the limited memory matrix. (The limited memory BFGS + method does not store the full hessian but uses this many terms + in an approximation to it.) + ftol : float + The iteration stops when ``(f^k - + f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= ftol``. + gtol : float + The iteration will stop when ``max{|proj g_i | i = 1, ..., n} + <= gtol`` where ``pg_i`` is the i-th component of the + projected gradient. + eps : float + Step size used for numerical approximation of the jacobian. + maxfun : int + Maximum number of function evaluations. + maxiter : int + Maximum number of iterations. + maxls : int, optional + Maximum number of line search steps (per iteration). Default is 20. + + Notes + ----- + The option `ftol` is exposed via the `scipy.optimize.minimize` interface, + but calling `scipy.optimize.fmin_l_bfgs_b` directly exposes `factr`. The + relationship between the two is ``ftol = factr * numpy.finfo(float).eps``. + I.e., `factr` multiplies the default machine floating-point precision to + arrive at `ftol`. + + """ + _check_unknown_options(unknown_options) + m = maxcor + epsilon = eps + pgtol = gtol + factr = ftol / np.finfo(float).eps + + x0 = asarray(x0).ravel() + n, = x0.shape + + if bounds is None: + bounds = [(None, None)] * n + if len(bounds) != n: + raise ValueError('length of x0 != length of bounds') + # unbounded variables must use None, not +-inf, for optimizer to work properly + bounds = [(None if l == -np.inf else l, None if u == np.inf else u) for l, u in bounds] + + if disp is not None: + if disp == 0: + iprint = -1 + else: + iprint = disp + + n_function_evals, fun = wrap_function(fun, ()) + if jac is None: + def func_and_grad(x): + f = fun(x, *args) + g = _approx_fprime_helper(x, fun, epsilon, args=args, f0=f) + return f, g + else: + def func_and_grad(x): + f = fun(x, *args) + g = jac(x, *args) + return f, g + + nbd = zeros(n, int32) + low_bnd = zeros(n, float64) + upper_bnd = zeros(n, float64) + bounds_map = {(None, None): 0, + (1, None): 1, + (1, 1): 2, + (None, 1): 3} + for i in range(0, n): + l, u = bounds[i] + if l is not None: + low_bnd[i] = l + l = 1 + if u is not None: + upper_bnd[i] = u + u = 1 + nbd[i] = bounds_map[l, u] + + if not maxls > 0: + raise ValueError('maxls must be positive.') + + x = array(x0, float64) + f = array(0.0, float64) + g = zeros((n,), float64) + wa = zeros(2*m*n + 5*n + 11*m*m + 8*m, float64) + iwa = zeros(3*n, int32) + task = zeros(1, 'S60') + csave = zeros(1, 'S60') + lsave = zeros(4, int32) + isave = zeros(44, int32) + dsave = zeros(29, float64) + + task[:] = 'START' + + n_iterations = 0 + + while 1: + # x, f, g, wa, iwa, task, csave, lsave, isave, dsave = \ + _lbfgsb.setulb(m, x, low_bnd, upper_bnd, nbd, f, g, factr, + pgtol, wa, iwa, task, iprint, csave, lsave, + isave, dsave, maxls) + task_str = task.tostring() + if task_str.startswith(b'FG'): + # The minimization routine wants f and g at the current x. + # Note that interruptions due to maxfun are postponed + # until the completion of the current minimization iteration. + # Overwrite f and g: + f, g = func_and_grad(x) + elif task_str.startswith(b'NEW_X'): + # new iteration + n_iterations += 1 + if callback is not None: + callback(np.copy(x)) + + if n_iterations >= maxiter: + task[:] = 'STOP: TOTAL NO. of ITERATIONS REACHED LIMIT' + elif n_function_evals[0] > maxfun: + task[:] = ('STOP: TOTAL NO. of f AND g EVALUATIONS ' + 'EXCEEDS LIMIT') + else: + break + + task_str = task.tostring().strip(b'\x00').strip() + if task_str.startswith(b'CONV'): + warnflag = 0 + elif n_function_evals[0] > maxfun or n_iterations >= maxiter: + warnflag = 1 + else: + warnflag = 2 + + # These two portions of the workspace are described in the mainlb + # subroutine in lbfgsb.f. See line 363. + s = wa[0: m*n].reshape(m, n) + y = wa[m*n: 2*m*n].reshape(m, n) + + # See lbfgsb.f line 160 for this portion of the workspace. + # isave(31) = the total number of BFGS updates prior the current iteration; + n_bfgs_updates = isave[30] + + n_corrs = min(n_bfgs_updates, maxcor) + hess_inv = LbfgsInvHessProduct(s[:n_corrs], y[:n_corrs]) + + return OptimizeResult(fun=f, jac=g, nfev=n_function_evals[0], + nit=n_iterations, status=warnflag, message=task_str, + x=x, success=(warnflag == 0), hess_inv=hess_inv) + + +class LbfgsInvHessProduct(LinearOperator): + """Linear operator for the L-BFGS approximate inverse Hessian. + + This operator computes the product of a vector with the approximate inverse + of the Hessian of the objective function, using the L-BFGS limited + memory approximation to the inverse Hessian, accumulated during the + optimization. + + Objects of this class implement the ``scipy.sparse.linalg.LinearOperator`` + interface. + + Parameters + ---------- + sk : array_like, shape=(n_corr, n) + Array of `n_corr` most recent updates to the solution vector. + (See [1]). + yk : array_like, shape=(n_corr, n) + Array of `n_corr` most recent updates to the gradient. (See [1]). + + References + ---------- + .. [1] Nocedal, Jorge. "Updating quasi-Newton matrices with limited + storage." Mathematics of computation 35.151 (1980): 773-782. + + """ + def __init__(self, sk, yk): + """Construct the operator.""" + if sk.shape != yk.shape or sk.ndim != 2: + raise ValueError('sk and yk must have matching shape, (n_corrs, n)') + n_corrs, n = sk.shape + + super(LbfgsInvHessProduct, self).__init__( + dtype=np.float64, shape=(n, n)) + + self.sk = sk + self.yk = yk + self.n_corrs = n_corrs + self.rho = 1 / np.einsum('ij,ij->i', sk, yk) + + def _matvec(self, x): + """Efficient matrix-vector multiply with the BFGS matrices. + + This calculation is described in Section (4) of [1]. + + Parameters + ---------- + x : ndarray + An array with shape (n,) or (n,1). + + Returns + ------- + y : ndarray + The matrix-vector product + + """ + s, y, n_corrs, rho = self.sk, self.yk, self.n_corrs, self.rho + q = np.array(x, dtype=self.dtype, copy=True) + if q.ndim == 2 and q.shape[1] == 1: + q = q.reshape(-1) + + alpha = np.zeros(n_corrs) + + for i in range(n_corrs-1, -1, -1): + alpha[i] = rho[i] * np.dot(s[i], q) + q = q - alpha[i]*y[i] + + r = q + for i in range(n_corrs): + beta = rho[i] * np.dot(y[i], r) + r = r + s[i] * (alpha[i] - beta) + + return r + + def todense(self): + """Return a dense array representation of this operator. + + Returns + ------- + arr : ndarray, shape=(n, n) + An array with the same shape and containing + the same data represented by this `LinearOperator`. + + """ + s, y, n_corrs, rho = self.sk, self.yk, self.n_corrs, self.rho + I = np.eye(*self.shape, dtype=self.dtype) + Hk = I + + for i in range(n_corrs): + A1 = I - s[i][:, np.newaxis] * y[i][np.newaxis, :] * rho[i] + A2 = I - y[i][:, np.newaxis] * s[i][np.newaxis, :] * rho[i] + + Hk = np.dot(A1, np.dot(Hk, A2)) + (rho[i] * s[i][:, np.newaxis] * + s[i][np.newaxis, :]) + return Hk diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/lbfgsb.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/lbfgsb.pyc new file mode 100644 index 0000000..0eb2a93 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/lbfgsb.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/lbfgsb_src/README b/project/venv/lib/python2.7/site-packages/scipy/optimize/lbfgsb_src/README new file mode 100644 index 0000000..ff3b10c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/lbfgsb_src/README @@ -0,0 +1,87 @@ +From the website for the L-BFGS-B code (from at +http://www.ece.northwestern.edu/~nocedal/lbfgsb.html): + +""" +L-BFGS-B is a limited-memory quasi-Newton code for bound-constrained +optimization, i.e. for problems where the only constraints are of the +form l<= x <= u. +""" + +This is a Python wrapper (using F2PY) written by David M. Cooke +<cookedm@physics.mcmaster.ca> and released as version 0.9 on April 9, 2004. +The wrapper was slightly modified by Joonas Paalasmaa for the 3.0 version +in March 2012. + +License of L-BFGS-B (Fortran code) +================================== + +The version included here (in lbfgsb.f) is 3.0 (released April 25, 2011). It was +written by Ciyou Zhu, Richard Byrd, and Jorge Nocedal <nocedal@ece.nwu.edu>. It +carries the following condition for use: + + """ + This software is freely available, but we expect that all publications + describing work using this software, or all commercial products using it, + quote at least one of the references given below. This software is released + under the BSD License. + + References + * R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for Bound + Constrained Optimization, (1995), SIAM Journal on Scientific and + Statistical Computing, 16, 5, pp. 1190-1208. + * C. Zhu, R. H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B, + FORTRAN routines for large scale bound constrained optimization (1997), + ACM Transactions on Mathematical Software, 23, 4, pp. 550 - 560. + * J.L. Morales and J. Nocedal. L-BFGS-B: Remark on Algorithm 778: L-BFGS-B, + FORTRAN routines for large scale bound constrained optimization (2011), + ACM Transactions on Mathematical Software, 38, 1. + """ + +The Python wrapper +================== + +This code uses F2PY (http://cens.ioc.ee/projects/f2py2e/) to generate +the wrapper around the Fortran code. + +The Python code and wrapper are copyrighted 2004 by David M. Cooke +<cookedm@physics.mcmaster.ca>. + +Installation +============ + +Make sure you have F2PY, scipy_distutils, and a BLAS library that +scipy_distutils can find. Then, + +$ python setup.py build +$ python setup.py install + +and you're done. + +Example usage +============= + +An example of the usage is given at the bottom of the lbfgsb.py file. +Run it with 'python lbfgsb.py'. + +License for the Python wrapper +============================== + +Copyright (c) 2004 David M. Cooke <cookedm@physics.mcmaster.ca> + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/linesearch.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/linesearch.py new file mode 100644 index 0000000..d9072f9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/linesearch.py @@ -0,0 +1,879 @@ +""" +Functions +--------- +.. autosummary:: + :toctree: generated/ + + line_search_armijo + line_search_wolfe1 + line_search_wolfe2 + scalar_search_wolfe1 + scalar_search_wolfe2 + +""" +from __future__ import division, print_function, absolute_import + +from warnings import warn + +from scipy.optimize import minpack2 +import numpy as np +from scipy._lib.six import xrange + +__all__ = ['LineSearchWarning', 'line_search_wolfe1', 'line_search_wolfe2', + 'scalar_search_wolfe1', 'scalar_search_wolfe2', + 'line_search_armijo'] + +class LineSearchWarning(RuntimeWarning): + pass + + +#------------------------------------------------------------------------------ +# Minpack's Wolfe line and scalar searches +#------------------------------------------------------------------------------ + +def line_search_wolfe1(f, fprime, xk, pk, gfk=None, + old_fval=None, old_old_fval=None, + args=(), c1=1e-4, c2=0.9, amax=50, amin=1e-8, + xtol=1e-14): + """ + As `scalar_search_wolfe1` but do a line search to direction `pk` + + Parameters + ---------- + f : callable + Function `f(x)` + fprime : callable + Gradient of `f` + xk : array_like + Current point + pk : array_like + Search direction + + gfk : array_like, optional + Gradient of `f` at point `xk` + old_fval : float, optional + Value of `f` at point `xk` + old_old_fval : float, optional + Value of `f` at point preceding `xk` + + The rest of the parameters are the same as for `scalar_search_wolfe1`. + + Returns + ------- + stp, f_count, g_count, fval, old_fval + As in `line_search_wolfe1` + gval : array + Gradient of `f` at the final point + + """ + if gfk is None: + gfk = fprime(xk) + + if isinstance(fprime, tuple): + eps = fprime[1] + fprime = fprime[0] + newargs = (f, eps) + args + gradient = False + else: + newargs = args + gradient = True + + gval = [gfk] + gc = [0] + fc = [0] + + def phi(s): + fc[0] += 1 + return f(xk + s*pk, *args) + + def derphi(s): + gval[0] = fprime(xk + s*pk, *newargs) + if gradient: + gc[0] += 1 + else: + fc[0] += len(xk) + 1 + return np.dot(gval[0], pk) + + derphi0 = np.dot(gfk, pk) + + stp, fval, old_fval = scalar_search_wolfe1( + phi, derphi, old_fval, old_old_fval, derphi0, + c1=c1, c2=c2, amax=amax, amin=amin, xtol=xtol) + + return stp, fc[0], gc[0], fval, old_fval, gval[0] + + +def scalar_search_wolfe1(phi, derphi, phi0=None, old_phi0=None, derphi0=None, + c1=1e-4, c2=0.9, + amax=50, amin=1e-8, xtol=1e-14): + """ + Scalar function search for alpha that satisfies strong Wolfe conditions + + alpha > 0 is assumed to be a descent direction. + + Parameters + ---------- + phi : callable phi(alpha) + Function at point `alpha` + derphi : callable dphi(alpha) + Derivative `d phi(alpha)/ds`. Returns a scalar. + + phi0 : float, optional + Value of `f` at 0 + old_phi0 : float, optional + Value of `f` at the previous point + derphi0 : float, optional + Value `derphi` at 0 + c1, c2 : float, optional + Wolfe parameters + amax, amin : float, optional + Maximum and minimum step size + xtol : float, optional + Relative tolerance for an acceptable step. + + Returns + ------- + alpha : float + Step size, or None if no suitable step was found + phi : float + Value of `phi` at the new point `alpha` + phi0 : float + Value of `phi` at `alpha=0` + + Notes + ----- + Uses routine DCSRCH from MINPACK. + + """ + + if phi0 is None: + phi0 = phi(0.) + if derphi0 is None: + derphi0 = derphi(0.) + + if old_phi0 is not None and derphi0 != 0: + alpha1 = min(1.0, 1.01*2*(phi0 - old_phi0)/derphi0) + if alpha1 < 0: + alpha1 = 1.0 + else: + alpha1 = 1.0 + + phi1 = phi0 + derphi1 = derphi0 + isave = np.zeros((2,), np.intc) + dsave = np.zeros((13,), float) + task = b'START' + + maxiter = 100 + for i in xrange(maxiter): + stp, phi1, derphi1, task = minpack2.dcsrch(alpha1, phi1, derphi1, + c1, c2, xtol, task, + amin, amax, isave, dsave) + if task[:2] == b'FG': + alpha1 = stp + phi1 = phi(stp) + derphi1 = derphi(stp) + else: + break + else: + # maxiter reached, the line search did not converge + stp = None + + if task[:5] == b'ERROR' or task[:4] == b'WARN': + stp = None # failed + + return stp, phi1, phi0 + + +line_search = line_search_wolfe1 + + +#------------------------------------------------------------------------------ +# Pure-Python Wolfe line and scalar searches +#------------------------------------------------------------------------------ + +def line_search_wolfe2(f, myfprime, xk, pk, gfk=None, old_fval=None, + old_old_fval=None, args=(), c1=1e-4, c2=0.9, amax=None, + extra_condition=None, maxiter=10): + """Find alpha that satisfies strong Wolfe conditions. + + Parameters + ---------- + f : callable f(x,*args) + Objective function. + myfprime : callable f'(x,*args) + Objective function gradient. + xk : ndarray + Starting point. + pk : ndarray + Search direction. + gfk : ndarray, optional + Gradient value for x=xk (xk being the current parameter + estimate). Will be recomputed if omitted. + old_fval : float, optional + Function value for x=xk. Will be recomputed if omitted. + old_old_fval : float, optional + Function value for the point preceding x=xk + args : tuple, optional + Additional arguments passed to objective function. + c1 : float, optional + Parameter for Armijo condition rule. + c2 : float, optional + Parameter for curvature condition rule. + amax : float, optional + Maximum step size + extra_condition : callable, optional + A callable of the form ``extra_condition(alpha, x, f, g)`` + returning a boolean. Arguments are the proposed step ``alpha`` + and the corresponding ``x``, ``f`` and ``g`` values. The line search + accepts the value of ``alpha`` only if this + callable returns ``True``. If the callable returns ``False`` + for the step length, the algorithm will continue with + new iterates. The callable is only called for iterates + satisfying the strong Wolfe conditions. + maxiter : int, optional + Maximum number of iterations to perform + + Returns + ------- + alpha : float or None + Alpha for which ``x_new = x0 + alpha * pk``, + or None if the line search algorithm did not converge. + fc : int + Number of function evaluations made. + gc : int + Number of gradient evaluations made. + new_fval : float or None + New function value ``f(x_new)=f(x0+alpha*pk)``, + or None if the line search algorithm did not converge. + old_fval : float + Old function value ``f(x0)``. + new_slope : float or None + The local slope along the search direction at the + new value ``<myfprime(x_new), pk>``, + or None if the line search algorithm did not converge. + + + Notes + ----- + Uses the line search algorithm to enforce strong Wolfe + conditions. See Wright and Nocedal, 'Numerical Optimization', + 1999, pg. 59-60. + + For the zoom phase it uses an algorithm by [...]. + + """ + fc = [0] + gc = [0] + gval = [None] + gval_alpha = [None] + + def phi(alpha): + fc[0] += 1 + return f(xk + alpha * pk, *args) + + if isinstance(myfprime, tuple): + def derphi(alpha): + fc[0] += len(xk) + 1 + eps = myfprime[1] + fprime = myfprime[0] + newargs = (f, eps) + args + gval[0] = fprime(xk + alpha * pk, *newargs) # store for later use + gval_alpha[0] = alpha + return np.dot(gval[0], pk) + else: + fprime = myfprime + + def derphi(alpha): + gc[0] += 1 + gval[0] = fprime(xk + alpha * pk, *args) # store for later use + gval_alpha[0] = alpha + return np.dot(gval[0], pk) + + if gfk is None: + gfk = fprime(xk, *args) + derphi0 = np.dot(gfk, pk) + + if extra_condition is not None: + # Add the current gradient as argument, to avoid needless + # re-evaluation + def extra_condition2(alpha, phi): + if gval_alpha[0] != alpha: + derphi(alpha) + x = xk + alpha * pk + return extra_condition(alpha, x, phi, gval[0]) + else: + extra_condition2 = None + + alpha_star, phi_star, old_fval, derphi_star = scalar_search_wolfe2( + phi, derphi, old_fval, old_old_fval, derphi0, c1, c2, amax, + extra_condition2, maxiter=maxiter) + + if derphi_star is None: + warn('The line search algorithm did not converge', LineSearchWarning) + else: + # derphi_star is a number (derphi) -- so use the most recently + # calculated gradient used in computing it derphi = gfk*pk + # this is the gradient at the next step no need to compute it + # again in the outer loop. + derphi_star = gval[0] + + return alpha_star, fc[0], gc[0], phi_star, old_fval, derphi_star + + +def scalar_search_wolfe2(phi, derphi=None, phi0=None, + old_phi0=None, derphi0=None, + c1=1e-4, c2=0.9, amax=None, + extra_condition=None, maxiter=10): + """Find alpha that satisfies strong Wolfe conditions. + + alpha > 0 is assumed to be a descent direction. + + Parameters + ---------- + phi : callable f(x) + Objective scalar function. + derphi : callable f'(x), optional + Objective function derivative (can be None) + phi0 : float, optional + Value of phi at s=0 + old_phi0 : float, optional + Value of phi at previous point + derphi0 : float, optional + Value of derphi at s=0 + c1 : float, optional + Parameter for Armijo condition rule. + c2 : float, optional + Parameter for curvature condition rule. + amax : float, optional + Maximum step size + extra_condition : callable, optional + A callable of the form ``extra_condition(alpha, phi_value)`` + returning a boolean. The line search accepts the value + of ``alpha`` only if this callable returns ``True``. + If the callable returns ``False`` for the step length, + the algorithm will continue with new iterates. + The callable is only called for iterates satisfying + the strong Wolfe conditions. + maxiter : int, optional + Maximum number of iterations to perform + + Returns + ------- + alpha_star : float or None + Best alpha, or None if the line search algorithm did not converge. + phi_star : float + phi at alpha_star + phi0 : float + phi at 0 + derphi_star : float or None + derphi at alpha_star, or None if the line search algorithm + did not converge. + + Notes + ----- + Uses the line search algorithm to enforce strong Wolfe + conditions. See Wright and Nocedal, 'Numerical Optimization', + 1999, pg. 59-60. + + For the zoom phase it uses an algorithm by [...]. + + """ + + if phi0 is None: + phi0 = phi(0.) + + if derphi0 is None and derphi is not None: + derphi0 = derphi(0.) + + alpha0 = 0 + if old_phi0 is not None and derphi0 != 0: + alpha1 = min(1.0, 1.01*2*(phi0 - old_phi0)/derphi0) + else: + alpha1 = 1.0 + + if alpha1 < 0: + alpha1 = 1.0 + + phi_a1 = phi(alpha1) + #derphi_a1 = derphi(alpha1) evaluated below + + phi_a0 = phi0 + derphi_a0 = derphi0 + + if extra_condition is None: + extra_condition = lambda alpha, phi: True + + for i in xrange(maxiter): + if alpha1 == 0 or (amax is not None and alpha0 == amax): + # alpha1 == 0: This shouldn't happen. Perhaps the increment has + # slipped below machine precision? + alpha_star = None + phi_star = phi0 + phi0 = old_phi0 + derphi_star = None + + if alpha1 == 0: + msg = 'Rounding errors prevent the line search from converging' + else: + msg = "The line search algorithm could not find a solution " + \ + "less than or equal to amax: %s" % amax + + warn(msg, LineSearchWarning) + break + + if (phi_a1 > phi0 + c1 * alpha1 * derphi0) or \ + ((phi_a1 >= phi_a0) and (i > 1)): + alpha_star, phi_star, derphi_star = \ + _zoom(alpha0, alpha1, phi_a0, + phi_a1, derphi_a0, phi, derphi, + phi0, derphi0, c1, c2, extra_condition) + break + + derphi_a1 = derphi(alpha1) + if (abs(derphi_a1) <= -c2*derphi0): + if extra_condition(alpha1, phi_a1): + alpha_star = alpha1 + phi_star = phi_a1 + derphi_star = derphi_a1 + break + + if (derphi_a1 >= 0): + alpha_star, phi_star, derphi_star = \ + _zoom(alpha1, alpha0, phi_a1, + phi_a0, derphi_a1, phi, derphi, + phi0, derphi0, c1, c2, extra_condition) + break + + alpha2 = 2 * alpha1 # increase by factor of two on each iteration + if amax is not None: + alpha2 = min(alpha2, amax) + alpha0 = alpha1 + alpha1 = alpha2 + phi_a0 = phi_a1 + phi_a1 = phi(alpha1) + derphi_a0 = derphi_a1 + + else: + # stopping test maxiter reached + alpha_star = alpha1 + phi_star = phi_a1 + derphi_star = None + warn('The line search algorithm did not converge', LineSearchWarning) + + return alpha_star, phi_star, phi0, derphi_star + + +def _cubicmin(a, fa, fpa, b, fb, c, fc): + """ + Finds the minimizer for a cubic polynomial that goes through the + points (a,fa), (b,fb), and (c,fc) with derivative at a of fpa. + + If no minimizer can be found return None + + """ + # f(x) = A *(x-a)^3 + B*(x-a)^2 + C*(x-a) + D + + with np.errstate(divide='raise', over='raise', invalid='raise'): + try: + C = fpa + db = b - a + dc = c - a + denom = (db * dc) ** 2 * (db - dc) + d1 = np.empty((2, 2)) + d1[0, 0] = dc ** 2 + d1[0, 1] = -db ** 2 + d1[1, 0] = -dc ** 3 + d1[1, 1] = db ** 3 + [A, B] = np.dot(d1, np.asarray([fb - fa - C * db, + fc - fa - C * dc]).flatten()) + A /= denom + B /= denom + radical = B * B - 3 * A * C + xmin = a + (-B + np.sqrt(radical)) / (3 * A) + except ArithmeticError: + return None + if not np.isfinite(xmin): + return None + return xmin + + +def _quadmin(a, fa, fpa, b, fb): + """ + Finds the minimizer for a quadratic polynomial that goes through + the points (a,fa), (b,fb) with derivative at a of fpa, + + """ + # f(x) = B*(x-a)^2 + C*(x-a) + D + with np.errstate(divide='raise', over='raise', invalid='raise'): + try: + D = fa + C = fpa + db = b - a * 1.0 + B = (fb - D - C * db) / (db * db) + xmin = a - C / (2.0 * B) + except ArithmeticError: + return None + if not np.isfinite(xmin): + return None + return xmin + + +def _zoom(a_lo, a_hi, phi_lo, phi_hi, derphi_lo, + phi, derphi, phi0, derphi0, c1, c2, extra_condition): + """ + Part of the optimization algorithm in `scalar_search_wolfe2`. + """ + + maxiter = 10 + i = 0 + delta1 = 0.2 # cubic interpolant check + delta2 = 0.1 # quadratic interpolant check + phi_rec = phi0 + a_rec = 0 + while True: + # interpolate to find a trial step length between a_lo and + # a_hi Need to choose interpolation here. Use cubic + # interpolation and then if the result is within delta * + # dalpha or outside of the interval bounded by a_lo or a_hi + # then use quadratic interpolation, if the result is still too + # close, then use bisection + + dalpha = a_hi - a_lo + if dalpha < 0: + a, b = a_hi, a_lo + else: + a, b = a_lo, a_hi + + # minimizer of cubic interpolant + # (uses phi_lo, derphi_lo, phi_hi, and the most recent value of phi) + # + # if the result is too close to the end points (or out of the + # interval) then use quadratic interpolation with phi_lo, + # derphi_lo and phi_hi if the result is still too close to the + # end points (or out of the interval) then use bisection + + if (i > 0): + cchk = delta1 * dalpha + a_j = _cubicmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi, + a_rec, phi_rec) + if (i == 0) or (a_j is None) or (a_j > b - cchk) or (a_j < a + cchk): + qchk = delta2 * dalpha + a_j = _quadmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi) + if (a_j is None) or (a_j > b-qchk) or (a_j < a+qchk): + a_j = a_lo + 0.5*dalpha + + # Check new value of a_j + + phi_aj = phi(a_j) + if (phi_aj > phi0 + c1*a_j*derphi0) or (phi_aj >= phi_lo): + phi_rec = phi_hi + a_rec = a_hi + a_hi = a_j + phi_hi = phi_aj + else: + derphi_aj = derphi(a_j) + if abs(derphi_aj) <= -c2*derphi0 and extra_condition(a_j, phi_aj): + a_star = a_j + val_star = phi_aj + valprime_star = derphi_aj + break + if derphi_aj*(a_hi - a_lo) >= 0: + phi_rec = phi_hi + a_rec = a_hi + a_hi = a_lo + phi_hi = phi_lo + else: + phi_rec = phi_lo + a_rec = a_lo + a_lo = a_j + phi_lo = phi_aj + derphi_lo = derphi_aj + i += 1 + if (i > maxiter): + # Failed to find a conforming step size + a_star = None + val_star = None + valprime_star = None + break + return a_star, val_star, valprime_star + + +#------------------------------------------------------------------------------ +# Armijo line and scalar searches +#------------------------------------------------------------------------------ + +def line_search_armijo(f, xk, pk, gfk, old_fval, args=(), c1=1e-4, alpha0=1): + """Minimize over alpha, the function ``f(xk+alpha pk)``. + + Parameters + ---------- + f : callable + Function to be minimized. + xk : array_like + Current point. + pk : array_like + Search direction. + gfk : array_like + Gradient of `f` at point `xk`. + old_fval : float + Value of `f` at point `xk`. + args : tuple, optional + Optional arguments. + c1 : float, optional + Value to control stopping criterion. + alpha0 : scalar, optional + Value of `alpha` at start of the optimization. + + Returns + ------- + alpha + f_count + f_val_at_alpha + + Notes + ----- + Uses the interpolation algorithm (Armijo backtracking) as suggested by + Wright and Nocedal in 'Numerical Optimization', 1999, pg. 56-57 + + """ + xk = np.atleast_1d(xk) + fc = [0] + + def phi(alpha1): + fc[0] += 1 + return f(xk + alpha1*pk, *args) + + if old_fval is None: + phi0 = phi(0.) + else: + phi0 = old_fval # compute f(xk) -- done in past loop + + derphi0 = np.dot(gfk, pk) + alpha, phi1 = scalar_search_armijo(phi, phi0, derphi0, c1=c1, + alpha0=alpha0) + return alpha, fc[0], phi1 + + +def line_search_BFGS(f, xk, pk, gfk, old_fval, args=(), c1=1e-4, alpha0=1): + """ + Compatibility wrapper for `line_search_armijo` + """ + r = line_search_armijo(f, xk, pk, gfk, old_fval, args=args, c1=c1, + alpha0=alpha0) + return r[0], r[1], 0, r[2] + + +def scalar_search_armijo(phi, phi0, derphi0, c1=1e-4, alpha0=1, amin=0): + """Minimize over alpha, the function ``phi(alpha)``. + + Uses the interpolation algorithm (Armijo backtracking) as suggested by + Wright and Nocedal in 'Numerical Optimization', 1999, pg. 56-57 + + alpha > 0 is assumed to be a descent direction. + + Returns + ------- + alpha + phi1 + + """ + phi_a0 = phi(alpha0) + if phi_a0 <= phi0 + c1*alpha0*derphi0: + return alpha0, phi_a0 + + # Otherwise compute the minimizer of a quadratic interpolant: + + alpha1 = -(derphi0) * alpha0**2 / 2.0 / (phi_a0 - phi0 - derphi0 * alpha0) + phi_a1 = phi(alpha1) + + if (phi_a1 <= phi0 + c1*alpha1*derphi0): + return alpha1, phi_a1 + + # Otherwise loop with cubic interpolation until we find an alpha which + # satisfies the first Wolfe condition (since we are backtracking, we will + # assume that the value of alpha is not too small and satisfies the second + # condition. + + while alpha1 > amin: # we are assuming alpha>0 is a descent direction + factor = alpha0**2 * alpha1**2 * (alpha1-alpha0) + a = alpha0**2 * (phi_a1 - phi0 - derphi0*alpha1) - \ + alpha1**2 * (phi_a0 - phi0 - derphi0*alpha0) + a = a / factor + b = -alpha0**3 * (phi_a1 - phi0 - derphi0*alpha1) + \ + alpha1**3 * (phi_a0 - phi0 - derphi0*alpha0) + b = b / factor + + alpha2 = (-b + np.sqrt(abs(b**2 - 3 * a * derphi0))) / (3.0*a) + phi_a2 = phi(alpha2) + + if (phi_a2 <= phi0 + c1*alpha2*derphi0): + return alpha2, phi_a2 + + if (alpha1 - alpha2) > alpha1 / 2.0 or (1 - alpha2/alpha1) < 0.96: + alpha2 = alpha1 / 2.0 + + alpha0 = alpha1 + alpha1 = alpha2 + phi_a0 = phi_a1 + phi_a1 = phi_a2 + + # Failed to find a suitable step length + return None, phi_a1 + + +#------------------------------------------------------------------------------ +# Non-monotone line search for DF-SANE +#------------------------------------------------------------------------------ + +def _nonmonotone_line_search_cruz(f, x_k, d, prev_fs, eta, + gamma=1e-4, tau_min=0.1, tau_max=0.5): + """ + Nonmonotone backtracking line search as described in [1]_ + + Parameters + ---------- + f : callable + Function returning a tuple ``(f, F)`` where ``f`` is the value + of a merit function and ``F`` the residual. + x_k : ndarray + Initial position + d : ndarray + Search direction + prev_fs : float + List of previous merit function values. Should have ``len(prev_fs) <= M`` + where ``M`` is the nonmonotonicity window parameter. + eta : float + Allowed merit function increase, see [1]_ + gamma, tau_min, tau_max : float, optional + Search parameters, see [1]_ + + Returns + ------- + alpha : float + Step length + xp : ndarray + Next position + fp : float + Merit function value at next position + Fp : ndarray + Residual at next position + + References + ---------- + [1] "Spectral residual method without gradient information for solving + large-scale nonlinear systems of equations." W. La Cruz, + J.M. Martinez, M. Raydan. Math. Comp. **75**, 1429 (2006). + + """ + f_k = prev_fs[-1] + f_bar = max(prev_fs) + + alpha_p = 1 + alpha_m = 1 + alpha = 1 + + while True: + xp = x_k + alpha_p * d + fp, Fp = f(xp) + + if fp <= f_bar + eta - gamma * alpha_p**2 * f_k: + alpha = alpha_p + break + + alpha_tp = alpha_p**2 * f_k / (fp + (2*alpha_p - 1)*f_k) + + xp = x_k - alpha_m * d + fp, Fp = f(xp) + + if fp <= f_bar + eta - gamma * alpha_m**2 * f_k: + alpha = -alpha_m + break + + alpha_tm = alpha_m**2 * f_k / (fp + (2*alpha_m - 1)*f_k) + + alpha_p = np.clip(alpha_tp, tau_min * alpha_p, tau_max * alpha_p) + alpha_m = np.clip(alpha_tm, tau_min * alpha_m, tau_max * alpha_m) + + return alpha, xp, fp, Fp + + +def _nonmonotone_line_search_cheng(f, x_k, d, f_k, C, Q, eta, + gamma=1e-4, tau_min=0.1, tau_max=0.5, + nu=0.85): + """ + Nonmonotone line search from [1] + + Parameters + ---------- + f : callable + Function returning a tuple ``(f, F)`` where ``f`` is the value + of a merit function and ``F`` the residual. + x_k : ndarray + Initial position + d : ndarray + Search direction + f_k : float + Initial merit function value + C, Q : float + Control parameters. On the first iteration, give values + Q=1.0, C=f_k + eta : float + Allowed merit function increase, see [1]_ + nu, gamma, tau_min, tau_max : float, optional + Search parameters, see [1]_ + + Returns + ------- + alpha : float + Step length + xp : ndarray + Next position + fp : float + Merit function value at next position + Fp : ndarray + Residual at next position + C : float + New value for the control parameter C + Q : float + New value for the control parameter Q + + References + ---------- + .. [1] W. Cheng & D.-H. Li, ''A derivative-free nonmonotone line + search and its application to the spectral residual + method'', IMA J. Numer. Anal. 29, 814 (2009). + + """ + alpha_p = 1 + alpha_m = 1 + alpha = 1 + + while True: + xp = x_k + alpha_p * d + fp, Fp = f(xp) + + if fp <= C + eta - gamma * alpha_p**2 * f_k: + alpha = alpha_p + break + + alpha_tp = alpha_p**2 * f_k / (fp + (2*alpha_p - 1)*f_k) + + xp = x_k - alpha_m * d + fp, Fp = f(xp) + + if fp <= C + eta - gamma * alpha_m**2 * f_k: + alpha = -alpha_m + break + + alpha_tm = alpha_m**2 * f_k / (fp + (2*alpha_m - 1)*f_k) + + alpha_p = np.clip(alpha_tp, tau_min * alpha_p, tau_max * alpha_p) + alpha_m = np.clip(alpha_tm, tau_min * alpha_m, tau_max * alpha_m) + + # Update C and Q + Q_next = nu * Q + 1 + C = (nu * Q * (C + eta) + fp) / Q_next + Q = Q_next + + return alpha, xp, fp, Fp, C, Q diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/linesearch.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/linesearch.pyc new file mode 100644 index 0000000..4e802b9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/linesearch.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/minpack.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/minpack.py new file mode 100644 index 0000000..18abc4e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/minpack.py @@ -0,0 +1,895 @@ +from __future__ import division, print_function, absolute_import + +import threading +import warnings +from . import _minpack + +import numpy as np +from numpy import (atleast_1d, dot, take, triu, shape, eye, + transpose, zeros, product, greater, array, + all, where, isscalar, asarray, inf, abs, + finfo, inexact, issubdtype, dtype) +from scipy.linalg import svd, cholesky, solve_triangular, LinAlgError +from scipy._lib._util import _asarray_validated, _lazywhere +from .optimize import OptimizeResult, _check_unknown_options, OptimizeWarning +from ._lsq import least_squares +from ._lsq.common import make_strictly_feasible +from ._lsq.least_squares import prepare_bounds + +error = _minpack.error + +__all__ = ['fsolve', 'leastsq', 'fixed_point', 'curve_fit'] + + +def _check_func(checker, argname, thefunc, x0, args, numinputs, + output_shape=None): + res = atleast_1d(thefunc(*((x0[:numinputs],) + args))) + if (output_shape is not None) and (shape(res) != output_shape): + if (output_shape[0] != 1): + if len(output_shape) > 1: + if output_shape[1] == 1: + return shape(res) + msg = "%s: there is a mismatch between the input and output " \ + "shape of the '%s' argument" % (checker, argname) + func_name = getattr(thefunc, '__name__', None) + if func_name: + msg += " '%s'." % func_name + else: + msg += "." + msg += 'Shape should be %s but it is %s.' % (output_shape, shape(res)) + raise TypeError(msg) + if issubdtype(res.dtype, inexact): + dt = res.dtype + else: + dt = dtype(float) + return shape(res), dt + + +def fsolve(func, x0, args=(), fprime=None, full_output=0, + col_deriv=0, xtol=1.49012e-8, maxfev=0, band=None, + epsfcn=None, factor=100, diag=None): + """ + Find the roots of a function. + + Return the roots of the (non-linear) equations defined by + ``func(x) = 0`` given a starting estimate. + + Parameters + ---------- + func : callable ``f(x, *args)`` + A function that takes at least one (possibly vector) argument, + and returns a value of the same length. + x0 : ndarray + The starting estimate for the roots of ``func(x) = 0``. + args : tuple, optional + Any extra arguments to `func`. + fprime : callable ``f(x, *args)``, optional + A function to compute the Jacobian of `func` with derivatives + across the rows. By default, the Jacobian will be estimated. + full_output : bool, optional + If True, return optional outputs. + col_deriv : bool, optional + Specify whether the Jacobian function computes derivatives down + the columns (faster, because there is no transpose operation). + xtol : float, optional + The calculation will terminate if the relative error between two + consecutive iterates is at most `xtol`. + maxfev : int, optional + The maximum number of calls to the function. If zero, then + ``100*(N+1)`` is the maximum where N is the number of elements + in `x0`. + band : tuple, optional + If set to a two-sequence containing the number of sub- and + super-diagonals within the band of the Jacobi matrix, the + Jacobi matrix is considered banded (only for ``fprime=None``). + epsfcn : float, optional + A suitable step length for the forward-difference + approximation of the Jacobian (for ``fprime=None``). If + `epsfcn` is less than the machine precision, it is assumed + that the relative errors in the functions are of the order of + the machine precision. + factor : float, optional + A parameter determining the initial step bound + (``factor * || diag * x||``). Should be in the interval + ``(0.1, 100)``. + diag : sequence, optional + N positive entries that serve as a scale factors for the + variables. + + Returns + ------- + x : ndarray + The solution (or the result of the last iteration for + an unsuccessful call). + infodict : dict + A dictionary of optional outputs with the keys: + + ``nfev`` + number of function calls + ``njev`` + number of Jacobian calls + ``fvec`` + function evaluated at the output + ``fjac`` + the orthogonal matrix, q, produced by the QR + factorization of the final approximate Jacobian + matrix, stored column wise + ``r`` + upper triangular matrix produced by QR factorization + of the same matrix + ``qtf`` + the vector ``(transpose(q) * fvec)`` + + ier : int + An integer flag. Set to 1 if a solution was found, otherwise refer + to `mesg` for more information. + mesg : str + If no solution is found, `mesg` details the cause of failure. + + See Also + -------- + root : Interface to root finding algorithms for multivariate + functions. See the 'hybr' `method` in particular. + + Notes + ----- + ``fsolve`` is a wrapper around MINPACK's hybrd and hybrj algorithms. + + """ + options = {'col_deriv': col_deriv, + 'xtol': xtol, + 'maxfev': maxfev, + 'band': band, + 'eps': epsfcn, + 'factor': factor, + 'diag': diag} + + res = _root_hybr(func, x0, args, jac=fprime, **options) + if full_output: + x = res['x'] + info = dict((k, res.get(k)) + for k in ('nfev', 'njev', 'fjac', 'r', 'qtf') if k in res) + info['fvec'] = res['fun'] + return x, info, res['status'], res['message'] + else: + status = res['status'] + msg = res['message'] + if status == 0: + raise TypeError(msg) + elif status == 1: + pass + elif status in [2, 3, 4, 5]: + warnings.warn(msg, RuntimeWarning) + else: + raise TypeError(msg) + return res['x'] + + +def _root_hybr(func, x0, args=(), jac=None, + col_deriv=0, xtol=1.49012e-08, maxfev=0, band=None, eps=None, + factor=100, diag=None, **unknown_options): + """ + Find the roots of a multivariate function using MINPACK's hybrd and + hybrj routines (modified Powell method). + + Options + ------- + col_deriv : bool + Specify whether the Jacobian function computes derivatives down + the columns (faster, because there is no transpose operation). + xtol : float + The calculation will terminate if the relative error between two + consecutive iterates is at most `xtol`. + maxfev : int + The maximum number of calls to the function. If zero, then + ``100*(N+1)`` is the maximum where N is the number of elements + in `x0`. + band : tuple + If set to a two-sequence containing the number of sub- and + super-diagonals within the band of the Jacobi matrix, the + Jacobi matrix is considered banded (only for ``fprime=None``). + eps : float + A suitable step length for the forward-difference + approximation of the Jacobian (for ``fprime=None``). If + `eps` is less than the machine precision, it is assumed + that the relative errors in the functions are of the order of + the machine precision. + factor : float + A parameter determining the initial step bound + (``factor * || diag * x||``). Should be in the interval + ``(0.1, 100)``. + diag : sequence + N positive entries that serve as a scale factors for the + variables. + + """ + _check_unknown_options(unknown_options) + epsfcn = eps + + x0 = asarray(x0).flatten() + n = len(x0) + if not isinstance(args, tuple): + args = (args,) + shape, dtype = _check_func('fsolve', 'func', func, x0, args, n, (n,)) + if epsfcn is None: + epsfcn = finfo(dtype).eps + Dfun = jac + if Dfun is None: + if band is None: + ml, mu = -10, -10 + else: + ml, mu = band[:2] + if maxfev == 0: + maxfev = 200 * (n + 1) + retval = _minpack._hybrd(func, x0, args, 1, xtol, maxfev, + ml, mu, epsfcn, factor, diag) + else: + _check_func('fsolve', 'fprime', Dfun, x0, args, n, (n, n)) + if (maxfev == 0): + maxfev = 100 * (n + 1) + retval = _minpack._hybrj(func, Dfun, x0, args, 1, + col_deriv, xtol, maxfev, factor, diag) + + x, status = retval[0], retval[-1] + + errors = {0: "Improper input parameters were entered.", + 1: "The solution converged.", + 2: "The number of calls to function has " + "reached maxfev = %d." % maxfev, + 3: "xtol=%f is too small, no further improvement " + "in the approximate\n solution " + "is possible." % xtol, + 4: "The iteration is not making good progress, as measured " + "by the \n improvement from the last five " + "Jacobian evaluations.", + 5: "The iteration is not making good progress, " + "as measured by the \n improvement from the last " + "ten iterations.", + 'unknown': "An error occurred."} + + info = retval[1] + info['fun'] = info.pop('fvec') + sol = OptimizeResult(x=x, success=(status == 1), status=status) + sol.update(info) + try: + sol['message'] = errors[status] + except KeyError: + sol['message'] = errors['unknown'] + + return sol + + +LEASTSQ_SUCCESS = [1, 2, 3, 4] +LEASTSQ_FAILURE = [5, 6, 7, 8] + + +def leastsq(func, x0, args=(), Dfun=None, full_output=0, + col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8, + gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None): + """ + Minimize the sum of squares of a set of equations. + + :: + + x = arg min(sum(func(y)**2,axis=0)) + y + + Parameters + ---------- + func : callable + should take at least one (possibly length N vector) argument and + returns M floating point numbers. It must not return NaNs or + fitting might fail. + x0 : ndarray + The starting estimate for the minimization. + args : tuple, optional + Any extra arguments to func are placed in this tuple. + Dfun : callable, optional + A function or method to compute the Jacobian of func with derivatives + across the rows. If this is None, the Jacobian will be estimated. + full_output : bool, optional + non-zero to return all optional outputs. + col_deriv : bool, optional + non-zero to specify that the Jacobian function computes derivatives + down the columns (faster, because there is no transpose operation). + ftol : float, optional + Relative error desired in the sum of squares. + xtol : float, optional + Relative error desired in the approximate solution. + gtol : float, optional + Orthogonality desired between the function vector and the columns of + the Jacobian. + maxfev : int, optional + The maximum number of calls to the function. If `Dfun` is provided + then the default `maxfev` is 100*(N+1) where N is the number of elements + in x0, otherwise the default `maxfev` is 200*(N+1). + epsfcn : float, optional + A variable used in determining a suitable step length for the forward- + difference approximation of the Jacobian (for Dfun=None). + Normally the actual step length will be sqrt(epsfcn)*x + If epsfcn is less than the machine precision, it is assumed that the + relative errors are of the order of the machine precision. + factor : float, optional + A parameter determining the initial step bound + (``factor * || diag * x||``). Should be in interval ``(0.1, 100)``. + diag : sequence, optional + N positive entries that serve as a scale factors for the variables. + + Returns + ------- + x : ndarray + The solution (or the result of the last iteration for an unsuccessful + call). + cov_x : ndarray + Uses the fjac and ipvt optional outputs to construct an + estimate of the jacobian around the solution. None if a + singular matrix encountered (indicates very flat curvature in + some direction). This matrix must be multiplied by the + residual variance to get the covariance of the + parameter estimates -- see curve_fit. + infodict : dict + a dictionary of optional outputs with the key s: + + ``nfev`` + The number of function calls + ``fvec`` + The function evaluated at the output + ``fjac`` + A permutation of the R matrix of a QR + factorization of the final approximate + Jacobian matrix, stored column wise. + Together with ipvt, the covariance of the + estimate can be approximated. + ``ipvt`` + An integer array of length N which defines + a permutation matrix, p, such that + fjac*p = q*r, where r is upper triangular + with diagonal elements of nonincreasing + magnitude. Column j of p is column ipvt(j) + of the identity matrix. + ``qtf`` + The vector (transpose(q) * fvec). + + mesg : str + A string message giving information about the cause of failure. + ier : int + An integer flag. If it is equal to 1, 2, 3 or 4, the solution was + found. Otherwise, the solution was not found. In either case, the + optional output variable 'mesg' gives more information. + + Notes + ----- + "leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms. + + cov_x is a Jacobian approximation to the Hessian of the least squares + objective function. + This approximation assumes that the objective function is based on the + difference between some observed target data (ydata) and a (non-linear) + function of the parameters `f(xdata, params)` :: + + func(params) = ydata - f(xdata, params) + + so that the objective function is :: + + min sum((ydata - f(xdata, params))**2, axis=0) + params + + The solution, `x`, is always a 1D array, regardless of the shape of `x0`, + or whether `x0` is a scalar. + """ + x0 = asarray(x0).flatten() + n = len(x0) + if not isinstance(args, tuple): + args = (args,) + shape, dtype = _check_func('leastsq', 'func', func, x0, args, n) + m = shape[0] + if n > m: + raise TypeError('Improper input: N=%s must not exceed M=%s' % (n, m)) + if epsfcn is None: + epsfcn = finfo(dtype).eps + if Dfun is None: + if maxfev == 0: + maxfev = 200*(n + 1) + retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol, + gtol, maxfev, epsfcn, factor, diag) + else: + if col_deriv: + _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m)) + else: + _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n)) + if maxfev == 0: + maxfev = 100 * (n + 1) + retval = _minpack._lmder(func, Dfun, x0, args, full_output, + col_deriv, ftol, xtol, gtol, maxfev, + factor, diag) + + errors = {0: ["Improper input parameters.", TypeError], + 1: ["Both actual and predicted relative reductions " + "in the sum of squares\n are at most %f" % ftol, None], + 2: ["The relative error between two consecutive " + "iterates is at most %f" % xtol, None], + 3: ["Both actual and predicted relative reductions in " + "the sum of squares\n are at most %f and the " + "relative error between two consecutive " + "iterates is at \n most %f" % (ftol, xtol), None], + 4: ["The cosine of the angle between func(x) and any " + "column of the\n Jacobian is at most %f in " + "absolute value" % gtol, None], + 5: ["Number of calls to function has reached " + "maxfev = %d." % maxfev, ValueError], + 6: ["ftol=%f is too small, no further reduction " + "in the sum of squares\n is possible.""" % ftol, + ValueError], + 7: ["xtol=%f is too small, no further improvement in " + "the approximate\n solution is possible." % xtol, + ValueError], + 8: ["gtol=%f is too small, func(x) is orthogonal to the " + "columns of\n the Jacobian to machine " + "precision." % gtol, ValueError]} + + # The FORTRAN return value (possible return values are >= 0 and <= 8) + info = retval[-1] + + if full_output: + cov_x = None + if info in LEASTSQ_SUCCESS: + from numpy.dual import inv + perm = take(eye(n), retval[1]['ipvt'] - 1, 0) + r = triu(transpose(retval[1]['fjac'])[:n, :]) + R = dot(r, perm) + try: + cov_x = inv(dot(transpose(R), R)) + except (LinAlgError, ValueError): + pass + return (retval[0], cov_x) + retval[1:-1] + (errors[info][0], info) + else: + if info in LEASTSQ_FAILURE: + warnings.warn(errors[info][0], RuntimeWarning) + elif info == 0: + raise errors[info][1](errors[info][0]) + return retval[0], info + + +def _wrap_func(func, xdata, ydata, transform): + if transform is None: + def func_wrapped(params): + return func(xdata, *params) - ydata + elif transform.ndim == 1: + def func_wrapped(params): + return transform * (func(xdata, *params) - ydata) + else: + # Chisq = (y - yd)^T C^{-1} (y-yd) + # transform = L such that C = L L^T + # C^{-1} = L^{-T} L^{-1} + # Chisq = (y - yd)^T L^{-T} L^{-1} (y-yd) + # Define (y-yd)' = L^{-1} (y-yd) + # by solving + # L (y-yd)' = (y-yd) + # and minimize (y-yd)'^T (y-yd)' + def func_wrapped(params): + return solve_triangular(transform, func(xdata, *params) - ydata, lower=True) + return func_wrapped + + +def _wrap_jac(jac, xdata, transform): + if transform is None: + def jac_wrapped(params): + return jac(xdata, *params) + elif transform.ndim == 1: + def jac_wrapped(params): + return transform[:, np.newaxis] * np.asarray(jac(xdata, *params)) + else: + def jac_wrapped(params): + return solve_triangular(transform, np.asarray(jac(xdata, *params)), lower=True) + return jac_wrapped + + +def _initialize_feasible(lb, ub): + p0 = np.ones_like(lb) + lb_finite = np.isfinite(lb) + ub_finite = np.isfinite(ub) + + mask = lb_finite & ub_finite + p0[mask] = 0.5 * (lb[mask] + ub[mask]) + + mask = lb_finite & ~ub_finite + p0[mask] = lb[mask] + 1 + + mask = ~lb_finite & ub_finite + p0[mask] = ub[mask] - 1 + + return p0 + + +def curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False, + check_finite=True, bounds=(-np.inf, np.inf), method=None, + jac=None, **kwargs): + """ + Use non-linear least squares to fit a function, f, to data. + + Assumes ``ydata = f(xdata, *params) + eps`` + + Parameters + ---------- + f : callable + The model function, f(x, ...). It must take the independent + variable as the first argument and the parameters to fit as + separate remaining arguments. + xdata : An M-length sequence or an (k,M)-shaped array for functions with k predictors + The independent variable where the data is measured. + ydata : M-length sequence + The dependent data --- nominally f(xdata, ...) + p0 : None, scalar, or N-length sequence, optional + Initial guess for the parameters. If None, then the initial + values will all be 1 (if the number of parameters for the function + can be determined using introspection, otherwise a ValueError + is raised). + sigma : None or M-length sequence or MxM array, optional + Determines the uncertainty in `ydata`. If we define residuals as + ``r = ydata - f(xdata, *popt)``, then the interpretation of `sigma` + depends on its number of dimensions: + + - A 1-d `sigma` should contain values of standard deviations of + errors in `ydata`. In this case, the optimized function is + ``chisq = sum((r / sigma) ** 2)``. + + - A 2-d `sigma` should contain the covariance matrix of + errors in `ydata`. In this case, the optimized function is + ``chisq = r.T @ inv(sigma) @ r``. + + .. versionadded:: 0.19 + + None (default) is equivalent of 1-d `sigma` filled with ones. + absolute_sigma : bool, optional + If True, `sigma` is used in an absolute sense and the estimated parameter + covariance `pcov` reflects these absolute values. + + If False, only the relative magnitudes of the `sigma` values matter. + The returned parameter covariance matrix `pcov` is based on scaling + `sigma` by a constant factor. This constant is set by demanding that the + reduced `chisq` for the optimal parameters `popt` when using the + *scaled* `sigma` equals unity. In other words, `sigma` is scaled to + match the sample variance of the residuals after the fit. + Mathematically, + ``pcov(absolute_sigma=False) = pcov(absolute_sigma=True) * chisq(popt)/(M-N)`` + check_finite : bool, optional + If True, check that the input arrays do not contain nans of infs, + and raise a ValueError if they do. Setting this parameter to + False may silently produce nonsensical results if the input arrays + do contain nans. Default is True. + bounds : 2-tuple of array_like, optional + Lower and upper bounds on parameters. Defaults to no bounds. + Each element of the tuple must be either an array with the length equal + to the number of parameters, or a scalar (in which case the bound is + taken to be the same for all parameters.) Use ``np.inf`` with an + appropriate sign to disable bounds on all or some parameters. + + .. versionadded:: 0.17 + method : {'lm', 'trf', 'dogbox'}, optional + Method to use for optimization. See `least_squares` for more details. + Default is 'lm' for unconstrained problems and 'trf' if `bounds` are + provided. The method 'lm' won't work when the number of observations + is less than the number of variables, use 'trf' or 'dogbox' in this + case. + + .. versionadded:: 0.17 + jac : callable, string or None, optional + Function with signature ``jac(x, ...)`` which computes the Jacobian + matrix of the model function with respect to parameters as a dense + array_like structure. It will be scaled according to provided `sigma`. + If None (default), the Jacobian will be estimated numerically. + String keywords for 'trf' and 'dogbox' methods can be used to select + a finite difference scheme, see `least_squares`. + + .. versionadded:: 0.18 + kwargs + Keyword arguments passed to `leastsq` for ``method='lm'`` or + `least_squares` otherwise. + + Returns + ------- + popt : array + Optimal values for the parameters so that the sum of the squared + residuals of ``f(xdata, *popt) - ydata`` is minimized + pcov : 2d array + The estimated covariance of popt. The diagonals provide the variance + of the parameter estimate. To compute one standard deviation errors + on the parameters use ``perr = np.sqrt(np.diag(pcov))``. + + How the `sigma` parameter affects the estimated covariance + depends on `absolute_sigma` argument, as described above. + + If the Jacobian matrix at the solution doesn't have a full rank, then + 'lm' method returns a matrix filled with ``np.inf``, on the other hand + 'trf' and 'dogbox' methods use Moore-Penrose pseudoinverse to compute + the covariance matrix. + + Raises + ------ + ValueError + if either `ydata` or `xdata` contain NaNs, or if incompatible options + are used. + + RuntimeError + if the least-squares minimization fails. + + OptimizeWarning + if covariance of the parameters can not be estimated. + + See Also + -------- + least_squares : Minimize the sum of squares of nonlinear functions. + scipy.stats.linregress : Calculate a linear least squares regression for + two sets of measurements. + + Notes + ----- + With ``method='lm'``, the algorithm uses the Levenberg-Marquardt algorithm + through `leastsq`. Note that this algorithm can only deal with + unconstrained problems. + + Box constraints can be handled by methods 'trf' and 'dogbox'. Refer to + the docstring of `least_squares` for more information. + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.optimize import curve_fit + + >>> def func(x, a, b, c): + ... return a * np.exp(-b * x) + c + + Define the data to be fit with some noise: + + >>> xdata = np.linspace(0, 4, 50) + >>> y = func(xdata, 2.5, 1.3, 0.5) + >>> np.random.seed(1729) + >>> y_noise = 0.2 * np.random.normal(size=xdata.size) + >>> ydata = y + y_noise + >>> plt.plot(xdata, ydata, 'b-', label='data') + + Fit for the parameters a, b, c of the function `func`: + + >>> popt, pcov = curve_fit(func, xdata, ydata) + >>> popt + array([ 2.55423706, 1.35190947, 0.47450618]) + >>> plt.plot(xdata, func(xdata, *popt), 'r-', + ... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt)) + + Constrain the optimization to the region of ``0 <= a <= 3``, + ``0 <= b <= 1`` and ``0 <= c <= 0.5``: + + >>> popt, pcov = curve_fit(func, xdata, ydata, bounds=(0, [3., 1., 0.5])) + >>> popt + array([ 2.43708906, 1. , 0.35015434]) + >>> plt.plot(xdata, func(xdata, *popt), 'g--', + ... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt)) + + >>> plt.xlabel('x') + >>> plt.ylabel('y') + >>> plt.legend() + >>> plt.show() + + """ + if p0 is None: + # determine number of parameters by inspecting the function + from scipy._lib._util import getargspec_no_self as _getargspec + args, varargs, varkw, defaults = _getargspec(f) + if len(args) < 2: + raise ValueError("Unable to determine number of fit parameters.") + n = len(args) - 1 + else: + p0 = np.atleast_1d(p0) + n = p0.size + + lb, ub = prepare_bounds(bounds, n) + if p0 is None: + p0 = _initialize_feasible(lb, ub) + + bounded_problem = np.any((lb > -np.inf) | (ub < np.inf)) + if method is None: + if bounded_problem: + method = 'trf' + else: + method = 'lm' + + if method == 'lm' and bounded_problem: + raise ValueError("Method 'lm' only works for unconstrained problems. " + "Use 'trf' or 'dogbox' instead.") + + # NaNs can not be handled + if check_finite: + ydata = np.asarray_chkfinite(ydata) + else: + ydata = np.asarray(ydata) + + if isinstance(xdata, (list, tuple, np.ndarray)): + # `xdata` is passed straight to the user-defined `f`, so allow + # non-array_like `xdata`. + if check_finite: + xdata = np.asarray_chkfinite(xdata) + else: + xdata = np.asarray(xdata) + + # Determine type of sigma + if sigma is not None: + sigma = np.asarray(sigma) + + # if 1-d, sigma are errors, define transform = 1/sigma + if sigma.shape == (ydata.size, ): + transform = 1.0 / sigma + # if 2-d, sigma is the covariance matrix, + # define transform = L such that L L^T = C + elif sigma.shape == (ydata.size, ydata.size): + try: + # scipy.linalg.cholesky requires lower=True to return L L^T = A + transform = cholesky(sigma, lower=True) + except LinAlgError: + raise ValueError("`sigma` must be positive definite.") + else: + raise ValueError("`sigma` has incorrect shape.") + else: + transform = None + + func = _wrap_func(f, xdata, ydata, transform) + if callable(jac): + jac = _wrap_jac(jac, xdata, transform) + elif jac is None and method != 'lm': + jac = '2-point' + + if method == 'lm': + # Remove full_output from kwargs, otherwise we're passing it in twice. + return_full = kwargs.pop('full_output', False) + res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs) + popt, pcov, infodict, errmsg, ier = res + cost = np.sum(infodict['fvec'] ** 2) + if ier not in [1, 2, 3, 4]: + raise RuntimeError("Optimal parameters not found: " + errmsg) + else: + # Rename maxfev (leastsq) to max_nfev (least_squares), if specified. + if 'max_nfev' not in kwargs: + kwargs['max_nfev'] = kwargs.pop('maxfev', None) + + res = least_squares(func, p0, jac=jac, bounds=bounds, method=method, + **kwargs) + + if not res.success: + raise RuntimeError("Optimal parameters not found: " + res.message) + + cost = 2 * res.cost # res.cost is half sum of squares! + popt = res.x + + # Do Moore-Penrose inverse discarding zero singular values. + _, s, VT = svd(res.jac, full_matrices=False) + threshold = np.finfo(float).eps * max(res.jac.shape) * s[0] + s = s[s > threshold] + VT = VT[:s.size] + pcov = np.dot(VT.T / s**2, VT) + return_full = False + + warn_cov = False + if pcov is None: + # indeterminate covariance + pcov = zeros((len(popt), len(popt)), dtype=float) + pcov.fill(inf) + warn_cov = True + elif not absolute_sigma: + if ydata.size > p0.size: + s_sq = cost / (ydata.size - p0.size) + pcov = pcov * s_sq + else: + pcov.fill(inf) + warn_cov = True + + if warn_cov: + warnings.warn('Covariance of the parameters could not be estimated', + category=OptimizeWarning) + + if return_full: + return popt, pcov, infodict, errmsg, ier + else: + return popt, pcov + + +def check_gradient(fcn, Dfcn, x0, args=(), col_deriv=0): + """Perform a simple check on the gradient for correctness. + + """ + + x = atleast_1d(x0) + n = len(x) + x = x.reshape((n,)) + fvec = atleast_1d(fcn(x, *args)) + m = len(fvec) + fvec = fvec.reshape((m,)) + ldfjac = m + fjac = atleast_1d(Dfcn(x, *args)) + fjac = fjac.reshape((m, n)) + if col_deriv == 0: + fjac = transpose(fjac) + + xp = zeros((n,), float) + err = zeros((m,), float) + fvecp = None + _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 1, err) + + fvecp = atleast_1d(fcn(xp, *args)) + fvecp = fvecp.reshape((m,)) + _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 2, err) + + good = (product(greater(err, 0.5), axis=0)) + + return (good, err) + + +def _del2(p0, p1, d): + return p0 - np.square(p1 - p0) / d + + +def _relerr(actual, desired): + return (actual - desired) / desired + + +def _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel): + p0 = x0 + for i in range(maxiter): + p1 = func(p0, *args) + if use_accel: + p2 = func(p1, *args) + d = p2 - 2.0 * p1 + p0 + p = _lazywhere(d != 0, (p0, p1, d), f=_del2, fillvalue=p2) + else: + p = p1 + relerr = _lazywhere(p0 != 0, (p, p0), f=_relerr, fillvalue=p) + if np.all(np.abs(relerr) < xtol): + return p + p0 = p + msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p) + raise RuntimeError(msg) + + +def fixed_point(func, x0, args=(), xtol=1e-8, maxiter=500, method='del2'): + """ + Find a fixed point of the function. + + Given a function of one or more variables and a starting point, find a + fixed-point of the function: i.e. where ``func(x0) == x0``. + + Parameters + ---------- + func : function + Function to evaluate. + x0 : array_like + Fixed point of function. + args : tuple, optional + Extra arguments to `func`. + xtol : float, optional + Convergence tolerance, defaults to 1e-08. + maxiter : int, optional + Maximum number of iterations, defaults to 500. + method : {"del2", "iteration"}, optional + Method of finding the fixed-point, defaults to "del2" + which uses Steffensen's Method with Aitken's ``Del^2`` + convergence acceleration [1]_. The "iteration" method simply iterates + the function until convergence is detected, without attempting to + accelerate the convergence. + + References + ---------- + .. [1] Burden, Faires, "Numerical Analysis", 5th edition, pg. 80 + + Examples + -------- + >>> from scipy import optimize + >>> def func(x, c1, c2): + ... return np.sqrt(c1/(x+c2)) + >>> c1 = np.array([10,12.]) + >>> c2 = np.array([3, 5.]) + >>> optimize.fixed_point(func, [1.2, 1.3], args=(c1,c2)) + array([ 1.4920333 , 1.37228132]) + + """ + use_accel = {'del2': True, 'iteration': False}[method] + x0 = _asarray_validated(x0, as_inexact=True) + return _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel) diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/minpack.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/minpack.pyc new file mode 100644 index 0000000..684c4ca Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/minpack.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/minpack2.so b/project/venv/lib/python2.7/site-packages/scipy/optimize/minpack2.so new file mode 100755 index 0000000..4701b8e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/minpack2.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/moduleTNC.so b/project/venv/lib/python2.7/site-packages/scipy/optimize/moduleTNC.so new file mode 100755 index 0000000..7b2ce4a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/moduleTNC.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/nnls.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/nnls.py new file mode 100644 index 0000000..d0b8987 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/nnls.py @@ -0,0 +1,65 @@ +from __future__ import division, print_function, absolute_import + +from . import _nnls +from numpy import asarray_chkfinite, zeros, double + +__all__ = ['nnls'] + + +def nnls(A, b, maxiter=None): + """ + Solve ``argmin_x || Ax - b ||_2`` for ``x>=0``. This is a wrapper + for a FORTRAN non-negative least squares solver. + + Parameters + ---------- + A : ndarray + Matrix ``A`` as shown above. + b : ndarray + Right-hand side vector. + maxiter: int, optional + Maximum number of iterations, optional. + Default is ``3 * A.shape[1]``. + + Returns + ------- + x : ndarray + Solution vector. + rnorm : float + The residual, ``|| Ax-b ||_2``. + + Notes + ----- + The FORTRAN code was published in the book below. The algorithm + is an active set method. It solves the KKT (Karush-Kuhn-Tucker) + conditions for the non-negative least squares problem. + + References + ---------- + Lawson C., Hanson R.J., (1987) Solving Least Squares Problems, SIAM + + """ + + A, b = map(asarray_chkfinite, (A, b)) + + if len(A.shape) != 2: + raise ValueError("expected matrix") + if len(b.shape) != 1: + raise ValueError("expected vector") + + m, n = A.shape + + if m != b.shape[0]: + raise ValueError("incompatible dimensions") + + maxiter = -1 if maxiter is None else int(maxiter) + + w = zeros((n,), dtype=double) + zz = zeros((m,), dtype=double) + index = zeros((n,), dtype=int) + + x, rnorm, mode = _nnls.nnls(A, m, n, b, w, zz, index, maxiter) + if mode != 1: + raise RuntimeError("too many iterations") + + return x, rnorm diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/nnls.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/nnls.pyc new file mode 100644 index 0000000..c1387b7 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/nnls.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/nonlin.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/nonlin.py new file mode 100644 index 0000000..eaa6a11 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/nonlin.py @@ -0,0 +1,1545 @@ +r""" + +Nonlinear solvers +----------------- + +.. currentmodule:: scipy.optimize + +This is a collection of general-purpose nonlinear multidimensional +solvers. These solvers find *x* for which *F(x) = 0*. Both *x* +and *F* can be multidimensional. + +Routines +~~~~~~~~ + +Large-scale nonlinear solvers: + +.. autosummary:: + + newton_krylov + anderson + +General nonlinear solvers: + +.. autosummary:: + + broyden1 + broyden2 + +Simple iterations: + +.. autosummary:: + + excitingmixing + linearmixing + diagbroyden + + +Examples +~~~~~~~~ + +**Small problem** + +>>> def F(x): +... return np.cos(x) + x[::-1] - [1, 2, 3, 4] +>>> import scipy.optimize +>>> x = scipy.optimize.broyden1(F, [1,1,1,1], f_tol=1e-14) +>>> x +array([ 4.04674914, 3.91158389, 2.71791677, 1.61756251]) +>>> np.cos(x) + x[::-1] +array([ 1., 2., 3., 4.]) + + +**Large problem** + +Suppose that we needed to solve the following integrodifferential +equation on the square :math:`[0,1]\times[0,1]`: + +.. math:: + + \nabla^2 P = 10 \left(\int_0^1\int_0^1\cosh(P)\,dx\,dy\right)^2 + +with :math:`P(x,1) = 1` and :math:`P=0` elsewhere on the boundary of +the square. + +The solution can be found using the `newton_krylov` solver: + +.. plot:: + + import numpy as np + from scipy.optimize import newton_krylov + from numpy import cosh, zeros_like, mgrid, zeros + + # parameters + nx, ny = 75, 75 + hx, hy = 1./(nx-1), 1./(ny-1) + + P_left, P_right = 0, 0 + P_top, P_bottom = 1, 0 + + def residual(P): + d2x = zeros_like(P) + d2y = zeros_like(P) + + d2x[1:-1] = (P[2:] - 2*P[1:-1] + P[:-2]) / hx/hx + d2x[0] = (P[1] - 2*P[0] + P_left)/hx/hx + d2x[-1] = (P_right - 2*P[-1] + P[-2])/hx/hx + + d2y[:,1:-1] = (P[:,2:] - 2*P[:,1:-1] + P[:,:-2])/hy/hy + d2y[:,0] = (P[:,1] - 2*P[:,0] + P_bottom)/hy/hy + d2y[:,-1] = (P_top - 2*P[:,-1] + P[:,-2])/hy/hy + + return d2x + d2y - 10*cosh(P).mean()**2 + + # solve + guess = zeros((nx, ny), float) + sol = newton_krylov(residual, guess, method='lgmres', verbose=1) + print('Residual: %g' % abs(residual(sol)).max()) + + # visualize + import matplotlib.pyplot as plt + x, y = mgrid[0:1:(nx*1j), 0:1:(ny*1j)] + plt.pcolor(x, y, sol) + plt.colorbar() + plt.show() + +""" +# Copyright (C) 2009, Pauli Virtanen <pav@iki.fi> +# Distributed under the same license as Scipy. + +from __future__ import division, print_function, absolute_import + +import sys +import numpy as np +from scipy._lib.six import callable, exec_, xrange +from scipy.linalg import norm, solve, inv, qr, svd, LinAlgError +from numpy import asarray, dot, vdot +import scipy.sparse.linalg +import scipy.sparse +from scipy.linalg import get_blas_funcs +import inspect +from scipy._lib._util import getargspec_no_self as _getargspec +from .linesearch import scalar_search_wolfe1, scalar_search_armijo + + +__all__ = [ + 'broyden1', 'broyden2', 'anderson', 'linearmixing', + 'diagbroyden', 'excitingmixing', 'newton_krylov'] + +#------------------------------------------------------------------------------ +# Utility functions +#------------------------------------------------------------------------------ + + +class NoConvergence(Exception): + pass + + +def maxnorm(x): + return np.absolute(x).max() + + +def _as_inexact(x): + """Return `x` as an array, of either floats or complex floats""" + x = asarray(x) + if not np.issubdtype(x.dtype, np.inexact): + return asarray(x, dtype=np.float_) + return x + + +def _array_like(x, x0): + """Return ndarray `x` as same array subclass and shape as `x0`""" + x = np.reshape(x, np.shape(x0)) + wrap = getattr(x0, '__array_wrap__', x.__array_wrap__) + return wrap(x) + + +def _safe_norm(v): + if not np.isfinite(v).all(): + return np.array(np.inf) + return norm(v) + +#------------------------------------------------------------------------------ +# Generic nonlinear solver machinery +#------------------------------------------------------------------------------ + + +_doc_parts = dict( + params_basic=""" + F : function(x) -> f + Function whose root to find; should take and return an array-like + object. + xin : array_like + Initial guess for the solution + """.strip(), + params_extra=""" + iter : int, optional + Number of iterations to make. If omitted (default), make as many + as required to meet tolerances. + verbose : bool, optional + Print status to stdout on every iteration. + maxiter : int, optional + Maximum number of iterations to make. If more are needed to + meet convergence, `NoConvergence` is raised. + f_tol : float, optional + Absolute tolerance (in max-norm) for the residual. + If omitted, default is 6e-6. + f_rtol : float, optional + Relative tolerance for the residual. If omitted, not used. + x_tol : float, optional + Absolute minimum step size, as determined from the Jacobian + approximation. If the step size is smaller than this, optimization + is terminated as successful. If omitted, not used. + x_rtol : float, optional + Relative minimum step size. If omitted, not used. + tol_norm : function(vector) -> scalar, optional + Norm to use in convergence check. Default is the maximum norm. + line_search : {None, 'armijo' (default), 'wolfe'}, optional + Which type of a line search to use to determine the step size in the + direction given by the Jacobian approximation. Defaults to 'armijo'. + callback : function, optional + Optional callback function. It is called on every iteration as + ``callback(x, f)`` where `x` is the current solution and `f` + the corresponding residual. + + Returns + ------- + sol : ndarray + An array (of similar array type as `x0`) containing the final solution. + + Raises + ------ + NoConvergence + When a solution was not found. + + """.strip() +) + + +def _set_doc(obj): + if obj.__doc__: + obj.__doc__ = obj.__doc__ % _doc_parts + + +def nonlin_solve(F, x0, jacobian='krylov', iter=None, verbose=False, + maxiter=None, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None, + tol_norm=None, line_search='armijo', callback=None, + full_output=False, raise_exception=True): + """ + Find a root of a function, in a way suitable for large-scale problems. + + Parameters + ---------- + %(params_basic)s + jacobian : Jacobian + A Jacobian approximation: `Jacobian` object or something that + `asjacobian` can transform to one. Alternatively, a string specifying + which of the builtin Jacobian approximations to use: + + krylov, broyden1, broyden2, anderson + diagbroyden, linearmixing, excitingmixing + + %(params_extra)s + full_output : bool + If true, returns a dictionary `info` containing convergence + information. + raise_exception : bool + If True, a `NoConvergence` exception is raise if no solution is found. + + See Also + -------- + asjacobian, Jacobian + + Notes + ----- + This algorithm implements the inexact Newton method, with + backtracking or full line searches. Several Jacobian + approximations are available, including Krylov and Quasi-Newton + methods. + + References + ---------- + .. [KIM] C. T. Kelley, \"Iterative Methods for Linear and Nonlinear + Equations\". Society for Industrial and Applied Mathematics. (1995) + https://archive.siam.org/books/kelley/fr16/ + + """ + # Can't use default parameters because it's being explicitly passed as None + # from the calling function, so we need to set it here. + tol_norm = maxnorm if tol_norm is None else tol_norm + condition = TerminationCondition(f_tol=f_tol, f_rtol=f_rtol, + x_tol=x_tol, x_rtol=x_rtol, + iter=iter, norm=tol_norm) + + x0 = _as_inexact(x0) + func = lambda z: _as_inexact(F(_array_like(z, x0))).flatten() + x = x0.flatten() + + dx = np.inf + Fx = func(x) + Fx_norm = norm(Fx) + + jacobian = asjacobian(jacobian) + jacobian.setup(x.copy(), Fx, func) + + if maxiter is None: + if iter is not None: + maxiter = iter + 1 + else: + maxiter = 100*(x.size+1) + + if line_search is True: + line_search = 'armijo' + elif line_search is False: + line_search = None + + if line_search not in (None, 'armijo', 'wolfe'): + raise ValueError("Invalid line search") + + # Solver tolerance selection + gamma = 0.9 + eta_max = 0.9999 + eta_treshold = 0.1 + eta = 1e-3 + + for n in xrange(maxiter): + status = condition.check(Fx, x, dx) + if status: + break + + # The tolerance, as computed for scipy.sparse.linalg.* routines + tol = min(eta, eta*Fx_norm) + dx = -jacobian.solve(Fx, tol=tol) + + if norm(dx) == 0: + raise ValueError("Jacobian inversion yielded zero vector. " + "This indicates a bug in the Jacobian " + "approximation.") + + # Line search, or Newton step + if line_search: + s, x, Fx, Fx_norm_new = _nonlin_line_search(func, x, Fx, dx, + line_search) + else: + s = 1.0 + x = x + dx + Fx = func(x) + Fx_norm_new = norm(Fx) + + jacobian.update(x.copy(), Fx) + + if callback: + callback(x, Fx) + + # Adjust forcing parameters for inexact methods + eta_A = gamma * Fx_norm_new**2 / Fx_norm**2 + if gamma * eta**2 < eta_treshold: + eta = min(eta_max, eta_A) + else: + eta = min(eta_max, max(eta_A, gamma*eta**2)) + + Fx_norm = Fx_norm_new + + # Print status + if verbose: + sys.stdout.write("%d: |F(x)| = %g; step %g\n" % ( + n, tol_norm(Fx), s)) + sys.stdout.flush() + else: + if raise_exception: + raise NoConvergence(_array_like(x, x0)) + else: + status = 2 + + if full_output: + info = {'nit': condition.iteration, + 'fun': Fx, + 'status': status, + 'success': status == 1, + 'message': {1: 'A solution was found at the specified ' + 'tolerance.', + 2: 'The maximum number of iterations allowed ' + 'has been reached.' + }[status] + } + return _array_like(x, x0), info + else: + return _array_like(x, x0) + + +_set_doc(nonlin_solve) + + +def _nonlin_line_search(func, x, Fx, dx, search_type='armijo', rdiff=1e-8, + smin=1e-2): + tmp_s = [0] + tmp_Fx = [Fx] + tmp_phi = [norm(Fx)**2] + s_norm = norm(x) / norm(dx) + + def phi(s, store=True): + if s == tmp_s[0]: + return tmp_phi[0] + xt = x + s*dx + v = func(xt) + p = _safe_norm(v)**2 + if store: + tmp_s[0] = s + tmp_phi[0] = p + tmp_Fx[0] = v + return p + + def derphi(s): + ds = (abs(s) + s_norm + 1) * rdiff + return (phi(s+ds, store=False) - phi(s)) / ds + + if search_type == 'wolfe': + s, phi1, phi0 = scalar_search_wolfe1(phi, derphi, tmp_phi[0], + xtol=1e-2, amin=smin) + elif search_type == 'armijo': + s, phi1 = scalar_search_armijo(phi, tmp_phi[0], -tmp_phi[0], + amin=smin) + + if s is None: + # XXX: No suitable step length found. Take the full Newton step, + # and hope for the best. + s = 1.0 + + x = x + s*dx + if s == tmp_s[0]: + Fx = tmp_Fx[0] + else: + Fx = func(x) + Fx_norm = norm(Fx) + + return s, x, Fx, Fx_norm + + +class TerminationCondition(object): + """ + Termination condition for an iteration. It is terminated if + + - |F| < f_rtol*|F_0|, AND + - |F| < f_tol + + AND + + - |dx| < x_rtol*|x|, AND + - |dx| < x_tol + + """ + def __init__(self, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None, + iter=None, norm=maxnorm): + + if f_tol is None: + f_tol = np.finfo(np.float_).eps ** (1./3) + if f_rtol is None: + f_rtol = np.inf + if x_tol is None: + x_tol = np.inf + if x_rtol is None: + x_rtol = np.inf + + self.x_tol = x_tol + self.x_rtol = x_rtol + self.f_tol = f_tol + self.f_rtol = f_rtol + + self.norm = norm + + self.iter = iter + + self.f0_norm = None + self.iteration = 0 + + def check(self, f, x, dx): + self.iteration += 1 + f_norm = self.norm(f) + x_norm = self.norm(x) + dx_norm = self.norm(dx) + + if self.f0_norm is None: + self.f0_norm = f_norm + + if f_norm == 0: + return 1 + + if self.iter is not None: + # backwards compatibility with Scipy 0.6.0 + return 2 * (self.iteration > self.iter) + + # NB: condition must succeed for rtol=inf even if norm == 0 + return int((f_norm <= self.f_tol + and f_norm/self.f_rtol <= self.f0_norm) + and (dx_norm <= self.x_tol + and dx_norm/self.x_rtol <= x_norm)) + + +#------------------------------------------------------------------------------ +# Generic Jacobian approximation +#------------------------------------------------------------------------------ + +class Jacobian(object): + """ + Common interface for Jacobians or Jacobian approximations. + + The optional methods come useful when implementing trust region + etc. algorithms that often require evaluating transposes of the + Jacobian. + + Methods + ------- + solve + Returns J^-1 * v + update + Updates Jacobian to point `x` (where the function has residual `Fx`) + + matvec : optional + Returns J * v + rmatvec : optional + Returns A^H * v + rsolve : optional + Returns A^-H * v + matmat : optional + Returns A * V, where V is a dense matrix with dimensions (N,K). + todense : optional + Form the dense Jacobian matrix. Necessary for dense trust region + algorithms, and useful for testing. + + Attributes + ---------- + shape + Matrix dimensions (M, N) + dtype + Data type of the matrix. + func : callable, optional + Function the Jacobian corresponds to + + """ + + def __init__(self, **kw): + names = ["solve", "update", "matvec", "rmatvec", "rsolve", + "matmat", "todense", "shape", "dtype"] + for name, value in kw.items(): + if name not in names: + raise ValueError("Unknown keyword argument %s" % name) + if value is not None: + setattr(self, name, kw[name]) + + if hasattr(self, 'todense'): + self.__array__ = lambda: self.todense() + + def aspreconditioner(self): + return InverseJacobian(self) + + def solve(self, v, tol=0): + raise NotImplementedError + + def update(self, x, F): + pass + + def setup(self, x, F, func): + self.func = func + self.shape = (F.size, x.size) + self.dtype = F.dtype + if self.__class__.setup is Jacobian.setup: + # Call on the first point unless overridden + self.update(x, F) + + +class InverseJacobian(object): + def __init__(self, jacobian): + self.jacobian = jacobian + self.matvec = jacobian.solve + self.update = jacobian.update + if hasattr(jacobian, 'setup'): + self.setup = jacobian.setup + if hasattr(jacobian, 'rsolve'): + self.rmatvec = jacobian.rsolve + + @property + def shape(self): + return self.jacobian.shape + + @property + def dtype(self): + return self.jacobian.dtype + + +def asjacobian(J): + """ + Convert given object to one suitable for use as a Jacobian. + """ + spsolve = scipy.sparse.linalg.spsolve + if isinstance(J, Jacobian): + return J + elif inspect.isclass(J) and issubclass(J, Jacobian): + return J() + elif isinstance(J, np.ndarray): + if J.ndim > 2: + raise ValueError('array must have rank <= 2') + J = np.atleast_2d(np.asarray(J)) + if J.shape[0] != J.shape[1]: + raise ValueError('array must be square') + + return Jacobian(matvec=lambda v: dot(J, v), + rmatvec=lambda v: dot(J.conj().T, v), + solve=lambda v: solve(J, v), + rsolve=lambda v: solve(J.conj().T, v), + dtype=J.dtype, shape=J.shape) + elif scipy.sparse.isspmatrix(J): + if J.shape[0] != J.shape[1]: + raise ValueError('matrix must be square') + return Jacobian(matvec=lambda v: J*v, + rmatvec=lambda v: J.conj().T * v, + solve=lambda v: spsolve(J, v), + rsolve=lambda v: spsolve(J.conj().T, v), + dtype=J.dtype, shape=J.shape) + elif hasattr(J, 'shape') and hasattr(J, 'dtype') and hasattr(J, 'solve'): + return Jacobian(matvec=getattr(J, 'matvec'), + rmatvec=getattr(J, 'rmatvec'), + solve=J.solve, + rsolve=getattr(J, 'rsolve'), + update=getattr(J, 'update'), + setup=getattr(J, 'setup'), + dtype=J.dtype, + shape=J.shape) + elif callable(J): + # Assume it's a function J(x) that returns the Jacobian + class Jac(Jacobian): + def update(self, x, F): + self.x = x + + def solve(self, v, tol=0): + m = J(self.x) + if isinstance(m, np.ndarray): + return solve(m, v) + elif scipy.sparse.isspmatrix(m): + return spsolve(m, v) + else: + raise ValueError("Unknown matrix type") + + def matvec(self, v): + m = J(self.x) + if isinstance(m, np.ndarray): + return dot(m, v) + elif scipy.sparse.isspmatrix(m): + return m*v + else: + raise ValueError("Unknown matrix type") + + def rsolve(self, v, tol=0): + m = J(self.x) + if isinstance(m, np.ndarray): + return solve(m.conj().T, v) + elif scipy.sparse.isspmatrix(m): + return spsolve(m.conj().T, v) + else: + raise ValueError("Unknown matrix type") + + def rmatvec(self, v): + m = J(self.x) + if isinstance(m, np.ndarray): + return dot(m.conj().T, v) + elif scipy.sparse.isspmatrix(m): + return m.conj().T * v + else: + raise ValueError("Unknown matrix type") + return Jac() + elif isinstance(J, str): + return dict(broyden1=BroydenFirst, + broyden2=BroydenSecond, + anderson=Anderson, + diagbroyden=DiagBroyden, + linearmixing=LinearMixing, + excitingmixing=ExcitingMixing, + krylov=KrylovJacobian)[J]() + else: + raise TypeError('Cannot convert object to a Jacobian') + + +#------------------------------------------------------------------------------ +# Broyden +#------------------------------------------------------------------------------ + +class GenericBroyden(Jacobian): + def setup(self, x0, f0, func): + Jacobian.setup(self, x0, f0, func) + self.last_f = f0 + self.last_x = x0 + + if hasattr(self, 'alpha') and self.alpha is None: + # Autoscale the initial Jacobian parameter + # unless we have already guessed the solution. + normf0 = norm(f0) + if normf0: + self.alpha = 0.5*max(norm(x0), 1) / normf0 + else: + self.alpha = 1.0 + + def _update(self, x, f, dx, df, dx_norm, df_norm): + raise NotImplementedError + + def update(self, x, f): + df = f - self.last_f + dx = x - self.last_x + self._update(x, f, dx, df, norm(dx), norm(df)) + self.last_f = f + self.last_x = x + + +class LowRankMatrix(object): + r""" + A matrix represented as + + .. math:: \alpha I + \sum_{n=0}^{n=M} c_n d_n^\dagger + + However, if the rank of the matrix reaches the dimension of the vectors, + full matrix representation will be used thereon. + + """ + + def __init__(self, alpha, n, dtype): + self.alpha = alpha + self.cs = [] + self.ds = [] + self.n = n + self.dtype = dtype + self.collapsed = None + + @staticmethod + def _matvec(v, alpha, cs, ds): + axpy, scal, dotc = get_blas_funcs(['axpy', 'scal', 'dotc'], + cs[:1] + [v]) + w = alpha * v + for c, d in zip(cs, ds): + a = dotc(d, v) + w = axpy(c, w, w.size, a) + return w + + @staticmethod + def _solve(v, alpha, cs, ds): + """Evaluate w = M^-1 v""" + if len(cs) == 0: + return v/alpha + + # (B + C D^H)^-1 = B^-1 - B^-1 C (I + D^H B^-1 C)^-1 D^H B^-1 + + axpy, dotc = get_blas_funcs(['axpy', 'dotc'], cs[:1] + [v]) + + c0 = cs[0] + A = alpha * np.identity(len(cs), dtype=c0.dtype) + for i, d in enumerate(ds): + for j, c in enumerate(cs): + A[i,j] += dotc(d, c) + + q = np.zeros(len(cs), dtype=c0.dtype) + for j, d in enumerate(ds): + q[j] = dotc(d, v) + q /= alpha + q = solve(A, q) + + w = v/alpha + for c, qc in zip(cs, q): + w = axpy(c, w, w.size, -qc) + + return w + + def matvec(self, v): + """Evaluate w = M v""" + if self.collapsed is not None: + return np.dot(self.collapsed, v) + return LowRankMatrix._matvec(v, self.alpha, self.cs, self.ds) + + def rmatvec(self, v): + """Evaluate w = M^H v""" + if self.collapsed is not None: + return np.dot(self.collapsed.T.conj(), v) + return LowRankMatrix._matvec(v, np.conj(self.alpha), self.ds, self.cs) + + def solve(self, v, tol=0): + """Evaluate w = M^-1 v""" + if self.collapsed is not None: + return solve(self.collapsed, v) + return LowRankMatrix._solve(v, self.alpha, self.cs, self.ds) + + def rsolve(self, v, tol=0): + """Evaluate w = M^-H v""" + if self.collapsed is not None: + return solve(self.collapsed.T.conj(), v) + return LowRankMatrix._solve(v, np.conj(self.alpha), self.ds, self.cs) + + def append(self, c, d): + if self.collapsed is not None: + self.collapsed += c[:,None] * d[None,:].conj() + return + + self.cs.append(c) + self.ds.append(d) + + if len(self.cs) > c.size: + self.collapse() + + def __array__(self): + if self.collapsed is not None: + return self.collapsed + + Gm = self.alpha*np.identity(self.n, dtype=self.dtype) + for c, d in zip(self.cs, self.ds): + Gm += c[:,None]*d[None,:].conj() + return Gm + + def collapse(self): + """Collapse the low-rank matrix to a full-rank one.""" + self.collapsed = np.array(self) + self.cs = None + self.ds = None + self.alpha = None + + def restart_reduce(self, rank): + """ + Reduce the rank of the matrix by dropping all vectors. + """ + if self.collapsed is not None: + return + assert rank > 0 + if len(self.cs) > rank: + del self.cs[:] + del self.ds[:] + + def simple_reduce(self, rank): + """ + Reduce the rank of the matrix by dropping oldest vectors. + """ + if self.collapsed is not None: + return + assert rank > 0 + while len(self.cs) > rank: + del self.cs[0] + del self.ds[0] + + def svd_reduce(self, max_rank, to_retain=None): + """ + Reduce the rank of the matrix by retaining some SVD components. + + This corresponds to the \"Broyden Rank Reduction Inverse\" + algorithm described in [1]_. + + Note that the SVD decomposition can be done by solving only a + problem whose size is the effective rank of this matrix, which + is viable even for large problems. + + Parameters + ---------- + max_rank : int + Maximum rank of this matrix after reduction. + to_retain : int, optional + Number of SVD components to retain when reduction is done + (ie. rank > max_rank). Default is ``max_rank - 2``. + + References + ---------- + .. [1] B.A. van der Rotten, PhD thesis, + \"A limited memory Broyden method to solve high-dimensional + systems of nonlinear equations\". Mathematisch Instituut, + Universiteit Leiden, The Netherlands (2003). + + https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf + + """ + if self.collapsed is not None: + return + + p = max_rank + if to_retain is not None: + q = to_retain + else: + q = p - 2 + + if self.cs: + p = min(p, len(self.cs[0])) + q = max(0, min(q, p-1)) + + m = len(self.cs) + if m < p: + # nothing to do + return + + C = np.array(self.cs).T + D = np.array(self.ds).T + + D, R = qr(D, mode='economic') + C = dot(C, R.T.conj()) + + U, S, WH = svd(C, full_matrices=False, compute_uv=True) + + C = dot(C, inv(WH)) + D = dot(D, WH.T.conj()) + + for k in xrange(q): + self.cs[k] = C[:,k].copy() + self.ds[k] = D[:,k].copy() + + del self.cs[q:] + del self.ds[q:] + + +_doc_parts['broyden_params'] = """ + alpha : float, optional + Initial guess for the Jacobian is ``(-1/alpha)``. + reduction_method : str or tuple, optional + Method used in ensuring that the rank of the Broyden matrix + stays low. Can either be a string giving the name of the method, + or a tuple of the form ``(method, param1, param2, ...)`` + that gives the name of the method and values for additional parameters. + + Methods available: + + - ``restart``: drop all matrix columns. Has no extra parameters. + - ``simple``: drop oldest matrix column. Has no extra parameters. + - ``svd``: keep only the most significant SVD components. + Takes an extra parameter, ``to_retain``, which determines the + number of SVD components to retain when rank reduction is done. + Default is ``max_rank - 2``. + + max_rank : int, optional + Maximum rank for the Broyden matrix. + Default is infinity (ie., no rank reduction). + """.strip() + + +class BroydenFirst(GenericBroyden): + r""" + Find a root of a function, using Broyden's first Jacobian approximation. + + This method is also known as \"Broyden's good method\". + + Parameters + ---------- + %(params_basic)s + %(broyden_params)s + %(params_extra)s + + Notes + ----- + This algorithm implements the inverse Jacobian Quasi-Newton update + + .. math:: H_+ = H + (dx - H df) dx^\dagger H / ( dx^\dagger H df) + + which corresponds to Broyden's first Jacobian update + + .. math:: J_+ = J + (df - J dx) dx^\dagger / dx^\dagger dx + + + References + ---------- + .. [1] B.A. van der Rotten, PhD thesis, + \"A limited memory Broyden method to solve high-dimensional + systems of nonlinear equations\". Mathematisch Instituut, + Universiteit Leiden, The Netherlands (2003). + + https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf + + """ + + def __init__(self, alpha=None, reduction_method='restart', max_rank=None): + GenericBroyden.__init__(self) + self.alpha = alpha + self.Gm = None + + if max_rank is None: + max_rank = np.inf + self.max_rank = max_rank + + if isinstance(reduction_method, str): + reduce_params = () + else: + reduce_params = reduction_method[1:] + reduction_method = reduction_method[0] + reduce_params = (max_rank - 1,) + reduce_params + + if reduction_method == 'svd': + self._reduce = lambda: self.Gm.svd_reduce(*reduce_params) + elif reduction_method == 'simple': + self._reduce = lambda: self.Gm.simple_reduce(*reduce_params) + elif reduction_method == 'restart': + self._reduce = lambda: self.Gm.restart_reduce(*reduce_params) + else: + raise ValueError("Unknown rank reduction method '%s'" % + reduction_method) + + def setup(self, x, F, func): + GenericBroyden.setup(self, x, F, func) + self.Gm = LowRankMatrix(-self.alpha, self.shape[0], self.dtype) + + def todense(self): + return inv(self.Gm) + + def solve(self, f, tol=0): + r = self.Gm.matvec(f) + if not np.isfinite(r).all(): + # singular; reset the Jacobian approximation + self.setup(self.last_x, self.last_f, self.func) + return self.Gm.matvec(f) + + def matvec(self, f): + return self.Gm.solve(f) + + def rsolve(self, f, tol=0): + return self.Gm.rmatvec(f) + + def rmatvec(self, f): + return self.Gm.rsolve(f) + + def _update(self, x, f, dx, df, dx_norm, df_norm): + self._reduce() # reduce first to preserve secant condition + + v = self.Gm.rmatvec(dx) + c = dx - self.Gm.matvec(df) + d = v / vdot(df, v) + + self.Gm.append(c, d) + + +class BroydenSecond(BroydenFirst): + """ + Find a root of a function, using Broyden\'s second Jacobian approximation. + + This method is also known as \"Broyden's bad method\". + + Parameters + ---------- + %(params_basic)s + %(broyden_params)s + %(params_extra)s + + Notes + ----- + This algorithm implements the inverse Jacobian Quasi-Newton update + + .. math:: H_+ = H + (dx - H df) df^\\dagger / ( df^\\dagger df) + + corresponding to Broyden's second method. + + References + ---------- + .. [1] B.A. van der Rotten, PhD thesis, + \"A limited memory Broyden method to solve high-dimensional + systems of nonlinear equations\". Mathematisch Instituut, + Universiteit Leiden, The Netherlands (2003). + + https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf + + """ + + def _update(self, x, f, dx, df, dx_norm, df_norm): + self._reduce() # reduce first to preserve secant condition + + v = df + c = dx - self.Gm.matvec(df) + d = v / df_norm**2 + self.Gm.append(c, d) + + +#------------------------------------------------------------------------------ +# Broyden-like (restricted memory) +#------------------------------------------------------------------------------ + +class Anderson(GenericBroyden): + """ + Find a root of a function, using (extended) Anderson mixing. + + The Jacobian is formed by for a 'best' solution in the space + spanned by last `M` vectors. As a result, only a MxM matrix + inversions and MxN multiplications are required. [Ey]_ + + Parameters + ---------- + %(params_basic)s + alpha : float, optional + Initial guess for the Jacobian is (-1/alpha). + M : float, optional + Number of previous vectors to retain. Defaults to 5. + w0 : float, optional + Regularization parameter for numerical stability. + Compared to unity, good values of the order of 0.01. + %(params_extra)s + + References + ---------- + .. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996). + + """ + + # Note: + # + # Anderson method maintains a rank M approximation of the inverse Jacobian, + # + # J^-1 v ~ -v*alpha + (dX + alpha dF) A^-1 dF^H v + # A = W + dF^H dF + # W = w0^2 diag(dF^H dF) + # + # so that for w0 = 0 the secant condition applies for last M iterates, ie., + # + # J^-1 df_j = dx_j + # + # for all j = 0 ... M-1. + # + # Moreover, (from Sherman-Morrison-Woodbury formula) + # + # J v ~ [ b I - b^2 C (I + b dF^H A^-1 C)^-1 dF^H ] v + # C = (dX + alpha dF) A^-1 + # b = -1/alpha + # + # and after simplification + # + # J v ~ -v/alpha + (dX/alpha + dF) (dF^H dX - alpha W)^-1 dF^H v + # + + def __init__(self, alpha=None, w0=0.01, M=5): + GenericBroyden.__init__(self) + self.alpha = alpha + self.M = M + self.dx = [] + self.df = [] + self.gamma = None + self.w0 = w0 + + def solve(self, f, tol=0): + dx = -self.alpha*f + + n = len(self.dx) + if n == 0: + return dx + + df_f = np.empty(n, dtype=f.dtype) + for k in xrange(n): + df_f[k] = vdot(self.df[k], f) + + try: + gamma = solve(self.a, df_f) + except LinAlgError: + # singular; reset the Jacobian approximation + del self.dx[:] + del self.df[:] + return dx + + for m in xrange(n): + dx += gamma[m]*(self.dx[m] + self.alpha*self.df[m]) + return dx + + def matvec(self, f): + dx = -f/self.alpha + + n = len(self.dx) + if n == 0: + return dx + + df_f = np.empty(n, dtype=f.dtype) + for k in xrange(n): + df_f[k] = vdot(self.df[k], f) + + b = np.empty((n, n), dtype=f.dtype) + for i in xrange(n): + for j in xrange(n): + b[i,j] = vdot(self.df[i], self.dx[j]) + if i == j and self.w0 != 0: + b[i,j] -= vdot(self.df[i], self.df[i])*self.w0**2*self.alpha + gamma = solve(b, df_f) + + for m in xrange(n): + dx += gamma[m]*(self.df[m] + self.dx[m]/self.alpha) + return dx + + def _update(self, x, f, dx, df, dx_norm, df_norm): + if self.M == 0: + return + + self.dx.append(dx) + self.df.append(df) + + while len(self.dx) > self.M: + self.dx.pop(0) + self.df.pop(0) + + n = len(self.dx) + a = np.zeros((n, n), dtype=f.dtype) + + for i in xrange(n): + for j in xrange(i, n): + if i == j: + wd = self.w0**2 + else: + wd = 0 + a[i,j] = (1+wd)*vdot(self.df[i], self.df[j]) + + a += np.triu(a, 1).T.conj() + self.a = a + +#------------------------------------------------------------------------------ +# Simple iterations +#------------------------------------------------------------------------------ + + +class DiagBroyden(GenericBroyden): + """ + Find a root of a function, using diagonal Broyden Jacobian approximation. + + The Jacobian approximation is derived from previous iterations, by + retaining only the diagonal of Broyden matrices. + + .. warning:: + + This algorithm may be useful for specific problems, but whether + it will work may depend strongly on the problem. + + Parameters + ---------- + %(params_basic)s + alpha : float, optional + Initial guess for the Jacobian is (-1/alpha). + %(params_extra)s + """ + + def __init__(self, alpha=None): + GenericBroyden.__init__(self) + self.alpha = alpha + + def setup(self, x, F, func): + GenericBroyden.setup(self, x, F, func) + self.d = np.ones((self.shape[0],), dtype=self.dtype) / self.alpha + + def solve(self, f, tol=0): + return -f / self.d + + def matvec(self, f): + return -f * self.d + + def rsolve(self, f, tol=0): + return -f / self.d.conj() + + def rmatvec(self, f): + return -f * self.d.conj() + + def todense(self): + return np.diag(-self.d) + + def _update(self, x, f, dx, df, dx_norm, df_norm): + self.d -= (df + self.d*dx)*dx/dx_norm**2 + + +class LinearMixing(GenericBroyden): + """ + Find a root of a function, using a scalar Jacobian approximation. + + .. warning:: + + This algorithm may be useful for specific problems, but whether + it will work may depend strongly on the problem. + + Parameters + ---------- + %(params_basic)s + alpha : float, optional + The Jacobian approximation is (-1/alpha). + %(params_extra)s + """ + + def __init__(self, alpha=None): + GenericBroyden.__init__(self) + self.alpha = alpha + + def solve(self, f, tol=0): + return -f*self.alpha + + def matvec(self, f): + return -f/self.alpha + + def rsolve(self, f, tol=0): + return -f*np.conj(self.alpha) + + def rmatvec(self, f): + return -f/np.conj(self.alpha) + + def todense(self): + return np.diag(np.full(self.shape[0], -1/self.alpha)) + + def _update(self, x, f, dx, df, dx_norm, df_norm): + pass + + +class ExcitingMixing(GenericBroyden): + """ + Find a root of a function, using a tuned diagonal Jacobian approximation. + + The Jacobian matrix is diagonal and is tuned on each iteration. + + .. warning:: + + This algorithm may be useful for specific problems, but whether + it will work may depend strongly on the problem. + + Parameters + ---------- + %(params_basic)s + alpha : float, optional + Initial Jacobian approximation is (-1/alpha). + alphamax : float, optional + The entries of the diagonal Jacobian are kept in the range + ``[alpha, alphamax]``. + %(params_extra)s + """ + + def __init__(self, alpha=None, alphamax=1.0): + GenericBroyden.__init__(self) + self.alpha = alpha + self.alphamax = alphamax + self.beta = None + + def setup(self, x, F, func): + GenericBroyden.setup(self, x, F, func) + self.beta = np.full((self.shape[0],), self.alpha, dtype=self.dtype) + + def solve(self, f, tol=0): + return -f*self.beta + + def matvec(self, f): + return -f/self.beta + + def rsolve(self, f, tol=0): + return -f*self.beta.conj() + + def rmatvec(self, f): + return -f/self.beta.conj() + + def todense(self): + return np.diag(-1/self.beta) + + def _update(self, x, f, dx, df, dx_norm, df_norm): + incr = f*self.last_f > 0 + self.beta[incr] += self.alpha + self.beta[~incr] = self.alpha + np.clip(self.beta, 0, self.alphamax, out=self.beta) + + +#------------------------------------------------------------------------------ +# Iterative/Krylov approximated Jacobians +#------------------------------------------------------------------------------ + +class KrylovJacobian(Jacobian): + r""" + Find a root of a function, using Krylov approximation for inverse Jacobian. + + This method is suitable for solving large-scale problems. + + Parameters + ---------- + %(params_basic)s + rdiff : float, optional + Relative step size to use in numerical differentiation. + method : {'lgmres', 'gmres', 'bicgstab', 'cgs', 'minres'} or function + Krylov method to use to approximate the Jacobian. + Can be a string, or a function implementing the same interface as + the iterative solvers in `scipy.sparse.linalg`. + + The default is `scipy.sparse.linalg.lgmres`. + inner_M : LinearOperator or InverseJacobian + Preconditioner for the inner Krylov iteration. + Note that you can use also inverse Jacobians as (adaptive) + preconditioners. For example, + + >>> from scipy.optimize.nonlin import BroydenFirst, KrylovJacobian + >>> from scipy.optimize.nonlin import InverseJacobian + >>> jac = BroydenFirst() + >>> kjac = KrylovJacobian(inner_M=InverseJacobian(jac)) + + If the preconditioner has a method named 'update', it will be called + as ``update(x, f)`` after each nonlinear step, with ``x`` giving + the current point, and ``f`` the current function value. + inner_tol, inner_maxiter, ... + Parameters to pass on to the \"inner\" Krylov solver. + See `scipy.sparse.linalg.gmres` for details. + outer_k : int, optional + Size of the subspace kept across LGMRES nonlinear iterations. + See `scipy.sparse.linalg.lgmres` for details. + %(params_extra)s + + See Also + -------- + scipy.sparse.linalg.gmres + scipy.sparse.linalg.lgmres + + Notes + ----- + This function implements a Newton-Krylov solver. The basic idea is + to compute the inverse of the Jacobian with an iterative Krylov + method. These methods require only evaluating the Jacobian-vector + products, which are conveniently approximated by a finite difference: + + .. math:: J v \approx (f(x + \omega*v/|v|) - f(x)) / \omega + + Due to the use of iterative matrix inverses, these methods can + deal with large nonlinear problems. + + Scipy's `scipy.sparse.linalg` module offers a selection of Krylov + solvers to choose from. The default here is `lgmres`, which is a + variant of restarted GMRES iteration that reuses some of the + information obtained in the previous Newton steps to invert + Jacobians in subsequent steps. + + For a review on Newton-Krylov methods, see for example [1]_, + and for the LGMRES sparse inverse method, see [2]_. + + References + ---------- + .. [1] D.A. Knoll and D.E. Keyes, J. Comp. Phys. 193, 357 (2004). + :doi:`10.1016/j.jcp.2003.08.010` + .. [2] A.H. Baker and E.R. Jessup and T. Manteuffel, + SIAM J. Matrix Anal. Appl. 26, 962 (2005). + :doi:`10.1137/S0895479803422014` + + """ + + def __init__(self, rdiff=None, method='lgmres', inner_maxiter=20, + inner_M=None, outer_k=10, **kw): + self.preconditioner = inner_M + self.rdiff = rdiff + self.method = dict( + bicgstab=scipy.sparse.linalg.bicgstab, + gmres=scipy.sparse.linalg.gmres, + lgmres=scipy.sparse.linalg.lgmres, + cgs=scipy.sparse.linalg.cgs, + minres=scipy.sparse.linalg.minres, + ).get(method, method) + + self.method_kw = dict(maxiter=inner_maxiter, M=self.preconditioner) + + if self.method is scipy.sparse.linalg.gmres: + # Replace GMRES's outer iteration with Newton steps + self.method_kw['restrt'] = inner_maxiter + self.method_kw['maxiter'] = 1 + self.method_kw.setdefault('atol', 0) + elif self.method is scipy.sparse.linalg.gcrotmk: + self.method_kw.setdefault('atol', 0) + elif self.method is scipy.sparse.linalg.lgmres: + self.method_kw['outer_k'] = outer_k + # Replace LGMRES's outer iteration with Newton steps + self.method_kw['maxiter'] = 1 + # Carry LGMRES's `outer_v` vectors across nonlinear iterations + self.method_kw.setdefault('outer_v', []) + self.method_kw.setdefault('prepend_outer_v', True) + # But don't carry the corresponding Jacobian*v products, in case + # the Jacobian changes a lot in the nonlinear step + # + # XXX: some trust-region inspired ideas might be more efficient... + # See eg. Brown & Saad. But needs to be implemented separately + # since it's not an inexact Newton method. + self.method_kw.setdefault('store_outer_Av', False) + self.method_kw.setdefault('atol', 0) + + for key, value in kw.items(): + if not key.startswith('inner_'): + raise ValueError("Unknown parameter %s" % key) + self.method_kw[key[6:]] = value + + def _update_diff_step(self): + mx = abs(self.x0).max() + mf = abs(self.f0).max() + self.omega = self.rdiff * max(1, mx) / max(1, mf) + + def matvec(self, v): + nv = norm(v) + if nv == 0: + return 0*v + sc = self.omega / nv + r = (self.func(self.x0 + sc*v) - self.f0) / sc + if not np.all(np.isfinite(r)) and np.all(np.isfinite(v)): + raise ValueError('Function returned non-finite results') + return r + + def solve(self, rhs, tol=0): + if 'tol' in self.method_kw: + sol, info = self.method(self.op, rhs, **self.method_kw) + else: + sol, info = self.method(self.op, rhs, tol=tol, **self.method_kw) + return sol + + def update(self, x, f): + self.x0 = x + self.f0 = f + self._update_diff_step() + + # Update also the preconditioner, if possible + if self.preconditioner is not None: + if hasattr(self.preconditioner, 'update'): + self.preconditioner.update(x, f) + + def setup(self, x, f, func): + Jacobian.setup(self, x, f, func) + self.x0 = x + self.f0 = f + self.op = scipy.sparse.linalg.aslinearoperator(self) + + if self.rdiff is None: + self.rdiff = np.finfo(x.dtype).eps ** (1./2) + + self._update_diff_step() + + # Setup also the preconditioner, if possible + if self.preconditioner is not None: + if hasattr(self.preconditioner, 'setup'): + self.preconditioner.setup(x, f, func) + + +#------------------------------------------------------------------------------ +# Wrapper functions +#------------------------------------------------------------------------------ + +def _nonlin_wrapper(name, jac): + """ + Construct a solver wrapper with given name and jacobian approx. + + It inspects the keyword arguments of ``jac.__init__``, and allows to + use the same arguments in the wrapper function, in addition to the + keyword arguments of `nonlin_solve` + + """ + args, varargs, varkw, defaults = _getargspec(jac.__init__) + kwargs = list(zip(args[-len(defaults):], defaults)) + kw_str = ", ".join(["%s=%r" % (k, v) for k, v in kwargs]) + if kw_str: + kw_str = ", " + kw_str + kwkw_str = ", ".join(["%s=%s" % (k, k) for k, v in kwargs]) + if kwkw_str: + kwkw_str = kwkw_str + ", " + + # Construct the wrapper function so that its keyword arguments + # are visible in pydoc.help etc. + wrapper = """ +def %(name)s(F, xin, iter=None %(kw)s, verbose=False, maxiter=None, + f_tol=None, f_rtol=None, x_tol=None, x_rtol=None, + tol_norm=None, line_search='armijo', callback=None, **kw): + jac = %(jac)s(%(kwkw)s **kw) + return nonlin_solve(F, xin, jac, iter, verbose, maxiter, + f_tol, f_rtol, x_tol, x_rtol, tol_norm, line_search, + callback) +""" + + wrapper = wrapper % dict(name=name, kw=kw_str, jac=jac.__name__, + kwkw=kwkw_str) + ns = {} + ns.update(globals()) + exec_(wrapper, ns) + func = ns[name] + func.__doc__ = jac.__doc__ + _set_doc(func) + return func + + +broyden1 = _nonlin_wrapper('broyden1', BroydenFirst) +broyden2 = _nonlin_wrapper('broyden2', BroydenSecond) +anderson = _nonlin_wrapper('anderson', Anderson) +linearmixing = _nonlin_wrapper('linearmixing', LinearMixing) +diagbroyden = _nonlin_wrapper('diagbroyden', DiagBroyden) +excitingmixing = _nonlin_wrapper('excitingmixing', ExcitingMixing) +newton_krylov = _nonlin_wrapper('newton_krylov', KrylovJacobian) diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/nonlin.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/nonlin.pyc new file mode 100644 index 0000000..1d6f520 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/nonlin.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/optimize.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/optimize.py new file mode 100644 index 0000000..3e50513 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/optimize.py @@ -0,0 +1,3135 @@ +#__docformat__ = "restructuredtext en" +# ******NOTICE*************** +# optimize.py module by Travis E. Oliphant +# +# You may copy and use this module as you see fit with no +# guarantee implied provided you keep this notice in all copies. +# *****END NOTICE************ + +# A collection of optimization algorithms. Version 0.5 +# CHANGES +# Added fminbound (July 2001) +# Added brute (Aug. 2002) +# Finished line search satisfying strong Wolfe conditions (Mar. 2004) +# Updated strong Wolfe conditions line search to use +# cubic-interpolation (Mar. 2004) + +from __future__ import division, print_function, absolute_import + + +# Minimization routines + +__all__ = ['fmin', 'fmin_powell', 'fmin_bfgs', 'fmin_ncg', 'fmin_cg', + 'fminbound', 'brent', 'golden', 'bracket', 'rosen', 'rosen_der', + 'rosen_hess', 'rosen_hess_prod', 'brute', 'approx_fprime', + 'line_search', 'check_grad', 'OptimizeResult', 'show_options', + 'OptimizeWarning'] + +__docformat__ = "restructuredtext en" + +import warnings +import sys +import numpy +from scipy._lib.six import callable, xrange +from numpy import (atleast_1d, eye, mgrid, argmin, zeros, shape, squeeze, + vectorize, asarray, sqrt, Inf, asfarray, isinf) +import numpy as np +from .linesearch import (line_search_wolfe1, line_search_wolfe2, + line_search_wolfe2 as line_search, + LineSearchWarning) +from scipy._lib._util import getargspec_no_self as _getargspec + + +# standard status messages of optimizers +_status_message = {'success': 'Optimization terminated successfully.', + 'maxfev': 'Maximum number of function evaluations has ' + 'been exceeded.', + 'maxiter': 'Maximum number of iterations has been ' + 'exceeded.', + 'pr_loss': 'Desired error not necessarily achieved due ' + 'to precision loss.'} + + +class MemoizeJac(object): + """ Decorator that caches the value gradient of function each time it + is called. """ + def __init__(self, fun): + self.fun = fun + self.jac = None + self.x = None + + def __call__(self, x, *args): + self.x = numpy.asarray(x).copy() + fg = self.fun(x, *args) + self.jac = fg[1] + return fg[0] + + def derivative(self, x, *args): + if self.jac is not None and numpy.alltrue(x == self.x): + return self.jac + else: + self(x, *args) + return self.jac + + +class OptimizeResult(dict): + """ Represents the optimization result. + + Attributes + ---------- + x : ndarray + The solution of the optimization. + success : bool + Whether or not the optimizer exited successfully. + status : int + Termination status of the optimizer. Its value depends on the + underlying solver. Refer to `message` for details. + message : str + Description of the cause of the termination. + fun, jac, hess: ndarray + Values of objective function, its Jacobian and its Hessian (if + available). The Hessians may be approximations, see the documentation + of the function in question. + hess_inv : object + Inverse of the objective function's Hessian; may be an approximation. + Not available for all solvers. The type of this attribute may be + either np.ndarray or scipy.sparse.linalg.LinearOperator. + nfev, njev, nhev : int + Number of evaluations of the objective functions and of its + Jacobian and Hessian. + nit : int + Number of iterations performed by the optimizer. + maxcv : float + The maximum constraint violation. + + Notes + ----- + There may be additional attributes not listed above depending of the + specific solver. Since this class is essentially a subclass of dict + with attribute accessors, one can see which attributes are available + using the `keys()` method. + """ + def __getattr__(self, name): + try: + return self[name] + except KeyError: + raise AttributeError(name) + + __setattr__ = dict.__setitem__ + __delattr__ = dict.__delitem__ + + def __repr__(self): + if self.keys(): + m = max(map(len, list(self.keys()))) + 1 + return '\n'.join([k.rjust(m) + ': ' + repr(v) + for k, v in sorted(self.items())]) + else: + return self.__class__.__name__ + "()" + + def __dir__(self): + return list(self.keys()) + + +class OptimizeWarning(UserWarning): + pass + + +def _check_unknown_options(unknown_options): + if unknown_options: + msg = ", ".join(map(str, unknown_options.keys())) + # Stack level 4: this is called from _minimize_*, which is + # called from another function in Scipy. Level 4 is the first + # level in user code. + warnings.warn("Unknown solver options: %s" % msg, OptimizeWarning, 4) + + +def is_array_scalar(x): + """Test whether `x` is either a scalar or an array scalar. + + """ + return np.size(x) == 1 + + +_epsilon = sqrt(numpy.finfo(float).eps) + + +def vecnorm(x, ord=2): + if ord == Inf: + return numpy.amax(numpy.abs(x)) + elif ord == -Inf: + return numpy.amin(numpy.abs(x)) + else: + return numpy.sum(numpy.abs(x)**ord, axis=0)**(1.0 / ord) + + +def rosen(x): + """ + The Rosenbrock function. + + The function computed is:: + + sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0) + + Parameters + ---------- + x : array_like + 1-D array of points at which the Rosenbrock function is to be computed. + + Returns + ------- + f : float + The value of the Rosenbrock function. + + See Also + -------- + rosen_der, rosen_hess, rosen_hess_prod + + Examples + -------- + >>> from scipy.optimize import rosen + >>> X = 0.1 * np.arange(10) + >>> rosen(X) + 76.56 + + """ + x = asarray(x) + r = numpy.sum(100.0 * (x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0, + axis=0) + return r + + +def rosen_der(x): + """ + The derivative (i.e. gradient) of the Rosenbrock function. + + Parameters + ---------- + x : array_like + 1-D array of points at which the derivative is to be computed. + + Returns + ------- + rosen_der : (N,) ndarray + The gradient of the Rosenbrock function at `x`. + + See Also + -------- + rosen, rosen_hess, rosen_hess_prod + + """ + x = asarray(x) + xm = x[1:-1] + xm_m1 = x[:-2] + xm_p1 = x[2:] + der = numpy.zeros_like(x) + der[1:-1] = (200 * (xm - xm_m1**2) - + 400 * (xm_p1 - xm**2) * xm - 2 * (1 - xm)) + der[0] = -400 * x[0] * (x[1] - x[0]**2) - 2 * (1 - x[0]) + der[-1] = 200 * (x[-1] - x[-2]**2) + return der + + +def rosen_hess(x): + """ + The Hessian matrix of the Rosenbrock function. + + Parameters + ---------- + x : array_like + 1-D array of points at which the Hessian matrix is to be computed. + + Returns + ------- + rosen_hess : ndarray + The Hessian matrix of the Rosenbrock function at `x`. + + See Also + -------- + rosen, rosen_der, rosen_hess_prod + + """ + x = atleast_1d(x) + H = numpy.diag(-400 * x[:-1], 1) - numpy.diag(400 * x[:-1], -1) + diagonal = numpy.zeros(len(x), dtype=x.dtype) + diagonal[0] = 1200 * x[0]**2 - 400 * x[1] + 2 + diagonal[-1] = 200 + diagonal[1:-1] = 202 + 1200 * x[1:-1]**2 - 400 * x[2:] + H = H + numpy.diag(diagonal) + return H + + +def rosen_hess_prod(x, p): + """ + Product of the Hessian matrix of the Rosenbrock function with a vector. + + Parameters + ---------- + x : array_like + 1-D array of points at which the Hessian matrix is to be computed. + p : array_like + 1-D array, the vector to be multiplied by the Hessian matrix. + + Returns + ------- + rosen_hess_prod : ndarray + The Hessian matrix of the Rosenbrock function at `x` multiplied + by the vector `p`. + + See Also + -------- + rosen, rosen_der, rosen_hess + + """ + x = atleast_1d(x) + Hp = numpy.zeros(len(x), dtype=x.dtype) + Hp[0] = (1200 * x[0]**2 - 400 * x[1] + 2) * p[0] - 400 * x[0] * p[1] + Hp[1:-1] = (-400 * x[:-2] * p[:-2] + + (202 + 1200 * x[1:-1]**2 - 400 * x[2:]) * p[1:-1] - + 400 * x[1:-1] * p[2:]) + Hp[-1] = -400 * x[-2] * p[-2] + 200*p[-1] + return Hp + + +def wrap_function(function, args): + ncalls = [0] + if function is None: + return ncalls, None + + def function_wrapper(*wrapper_args): + ncalls[0] += 1 + return function(*(wrapper_args + args)) + + return ncalls, function_wrapper + + +def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, + full_output=0, disp=1, retall=0, callback=None, initial_simplex=None): + """ + Minimize a function using the downhill simplex algorithm. + + This algorithm only uses function values, not derivatives or second + derivatives. + + Parameters + ---------- + func : callable func(x,*args) + The objective function to be minimized. + x0 : ndarray + Initial guess. + args : tuple, optional + Extra arguments passed to func, i.e. ``f(x,*args)``. + xtol : float, optional + Absolute error in xopt between iterations that is acceptable for + convergence. + ftol : number, optional + Absolute error in func(xopt) between iterations that is acceptable for + convergence. + maxiter : int, optional + Maximum number of iterations to perform. + maxfun : number, optional + Maximum number of function evaluations to make. + full_output : bool, optional + Set to True if fopt and warnflag outputs are desired. + disp : bool, optional + Set to True to print convergence messages. + retall : bool, optional + Set to True to return list of solutions at each iteration. + callback : callable, optional + Called after each iteration, as callback(xk), where xk is the + current parameter vector. + initial_simplex : array_like of shape (N + 1, N), optional + Initial simplex. If given, overrides `x0`. + ``initial_simplex[j,:]`` should contain the coordinates of + the j-th vertex of the ``N+1`` vertices in the simplex, where + ``N`` is the dimension. + + Returns + ------- + xopt : ndarray + Parameter that minimizes function. + fopt : float + Value of function at minimum: ``fopt = func(xopt)``. + iter : int + Number of iterations performed. + funcalls : int + Number of function calls made. + warnflag : int + 1 : Maximum number of function evaluations made. + 2 : Maximum number of iterations reached. + allvecs : list + Solution at each iteration. + + See also + -------- + minimize: Interface to minimization algorithms for multivariate + functions. See the 'Nelder-Mead' `method` in particular. + + Notes + ----- + Uses a Nelder-Mead simplex algorithm to find the minimum of function of + one or more variables. + + This algorithm has a long history of successful use in applications. + But it will usually be slower than an algorithm that uses first or + second derivative information. In practice it can have poor + performance in high-dimensional problems and is not robust to + minimizing complicated functions. Additionally, there currently is no + complete theory describing when the algorithm will successfully + converge to the minimum, or how fast it will if it does. Both the ftol and + xtol criteria must be met for convergence. + + Examples + -------- + >>> def f(x): + ... return x**2 + + >>> from scipy import optimize + + >>> minimum = optimize.fmin(f, 1) + Optimization terminated successfully. + Current function value: 0.000000 + Iterations: 17 + Function evaluations: 34 + >>> minimum[0] + -8.8817841970012523e-16 + + References + ---------- + .. [1] Nelder, J.A. and Mead, R. (1965), "A simplex method for function + minimization", The Computer Journal, 7, pp. 308-313 + + .. [2] Wright, M.H. (1996), "Direct Search Methods: Once Scorned, Now + Respectable", in Numerical Analysis 1995, Proceedings of the + 1995 Dundee Biennial Conference in Numerical Analysis, D.F. + Griffiths and G.A. Watson (Eds.), Addison Wesley Longman, + Harlow, UK, pp. 191-208. + + """ + opts = {'xatol': xtol, + 'fatol': ftol, + 'maxiter': maxiter, + 'maxfev': maxfun, + 'disp': disp, + 'return_all': retall, + 'initial_simplex': initial_simplex} + + res = _minimize_neldermead(func, x0, args, callback=callback, **opts) + if full_output: + retlist = res['x'], res['fun'], res['nit'], res['nfev'], res['status'] + if retall: + retlist += (res['allvecs'], ) + return retlist + else: + if retall: + return res['x'], res['allvecs'] + else: + return res['x'] + + +def _minimize_neldermead(func, x0, args=(), callback=None, + maxiter=None, maxfev=None, disp=False, + return_all=False, initial_simplex=None, + xatol=1e-4, fatol=1e-4, adaptive=False, + **unknown_options): + """ + Minimization of scalar function of one or more variables using the + Nelder-Mead algorithm. + + Options + ------- + disp : bool + Set to True to print convergence messages. + maxiter, maxfev : int + Maximum allowed number of iterations and function evaluations. + Will default to ``N*200``, where ``N`` is the number of + variables, if neither `maxiter` or `maxfev` is set. If both + `maxiter` and `maxfev` are set, minimization will stop at the + first reached. + initial_simplex : array_like of shape (N + 1, N) + Initial simplex. If given, overrides `x0`. + ``initial_simplex[j,:]`` should contain the coordinates of + the j-th vertex of the ``N+1`` vertices in the simplex, where + ``N`` is the dimension. + xatol : float, optional + Absolute error in xopt between iterations that is acceptable for + convergence. + fatol : number, optional + Absolute error in func(xopt) between iterations that is acceptable for + convergence. + adaptive : bool, optional + Adapt algorithm parameters to dimensionality of problem. Useful for + high-dimensional minimization [1]_. + + References + ---------- + .. [1] Gao, F. and Han, L. + Implementing the Nelder-Mead simplex algorithm with adaptive + parameters. 2012. Computational Optimization and Applications. + 51:1, pp. 259-277 + + """ + if 'ftol' in unknown_options: + warnings.warn("ftol is deprecated for Nelder-Mead," + " use fatol instead. If you specified both, only" + " fatol is used.", + DeprecationWarning) + if (np.isclose(fatol, 1e-4) and + not np.isclose(unknown_options['ftol'], 1e-4)): + # only ftol was probably specified, use it. + fatol = unknown_options['ftol'] + unknown_options.pop('ftol') + if 'xtol' in unknown_options: + warnings.warn("xtol is deprecated for Nelder-Mead," + " use xatol instead. If you specified both, only" + " xatol is used.", + DeprecationWarning) + if (np.isclose(xatol, 1e-4) and + not np.isclose(unknown_options['xtol'], 1e-4)): + # only xtol was probably specified, use it. + xatol = unknown_options['xtol'] + unknown_options.pop('xtol') + + _check_unknown_options(unknown_options) + maxfun = maxfev + retall = return_all + + fcalls, func = wrap_function(func, args) + + if adaptive: + dim = float(len(x0)) + rho = 1 + chi = 1 + 2/dim + psi = 0.75 - 1/(2*dim) + sigma = 1 - 1/dim + else: + rho = 1 + chi = 2 + psi = 0.5 + sigma = 0.5 + + nonzdelt = 0.05 + zdelt = 0.00025 + + x0 = asfarray(x0).flatten() + + if initial_simplex is None: + N = len(x0) + + sim = numpy.zeros((N + 1, N), dtype=x0.dtype) + sim[0] = x0 + for k in range(N): + y = numpy.array(x0, copy=True) + if y[k] != 0: + y[k] = (1 + nonzdelt)*y[k] + else: + y[k] = zdelt + sim[k + 1] = y + else: + sim = np.asfarray(initial_simplex).copy() + if sim.ndim != 2 or sim.shape[0] != sim.shape[1] + 1: + raise ValueError("`initial_simplex` should be an array of shape (N+1,N)") + if len(x0) != sim.shape[1]: + raise ValueError("Size of `initial_simplex` is not consistent with `x0`") + N = sim.shape[1] + + if retall: + allvecs = [sim[0]] + + # If neither are set, then set both to default + if maxiter is None and maxfun is None: + maxiter = N * 200 + maxfun = N * 200 + elif maxiter is None: + # Convert remaining Nones, to np.inf, unless the other is np.inf, in + # which case use the default to avoid unbounded iteration + if maxfun == np.inf: + maxiter = N * 200 + else: + maxiter = np.inf + elif maxfun is None: + if maxiter == np.inf: + maxfun = N * 200 + else: + maxfun = np.inf + + one2np1 = list(range(1, N + 1)) + fsim = numpy.zeros((N + 1,), float) + + for k in range(N + 1): + fsim[k] = func(sim[k]) + + ind = numpy.argsort(fsim) + fsim = numpy.take(fsim, ind, 0) + # sort so sim[0,:] has the lowest function value + sim = numpy.take(sim, ind, 0) + + iterations = 1 + + while (fcalls[0] < maxfun and iterations < maxiter): + if (numpy.max(numpy.ravel(numpy.abs(sim[1:] - sim[0]))) <= xatol and + numpy.max(numpy.abs(fsim[0] - fsim[1:])) <= fatol): + break + + xbar = numpy.add.reduce(sim[:-1], 0) / N + xr = (1 + rho) * xbar - rho * sim[-1] + fxr = func(xr) + doshrink = 0 + + if fxr < fsim[0]: + xe = (1 + rho * chi) * xbar - rho * chi * sim[-1] + fxe = func(xe) + + if fxe < fxr: + sim[-1] = xe + fsim[-1] = fxe + else: + sim[-1] = xr + fsim[-1] = fxr + else: # fsim[0] <= fxr + if fxr < fsim[-2]: + sim[-1] = xr + fsim[-1] = fxr + else: # fxr >= fsim[-2] + # Perform contraction + if fxr < fsim[-1]: + xc = (1 + psi * rho) * xbar - psi * rho * sim[-1] + fxc = func(xc) + + if fxc <= fxr: + sim[-1] = xc + fsim[-1] = fxc + else: + doshrink = 1 + else: + # Perform an inside contraction + xcc = (1 - psi) * xbar + psi * sim[-1] + fxcc = func(xcc) + + if fxcc < fsim[-1]: + sim[-1] = xcc + fsim[-1] = fxcc + else: + doshrink = 1 + + if doshrink: + for j in one2np1: + sim[j] = sim[0] + sigma * (sim[j] - sim[0]) + fsim[j] = func(sim[j]) + + ind = numpy.argsort(fsim) + sim = numpy.take(sim, ind, 0) + fsim = numpy.take(fsim, ind, 0) + if callback is not None: + callback(sim[0]) + iterations += 1 + if retall: + allvecs.append(sim[0]) + + x = sim[0] + fval = numpy.min(fsim) + warnflag = 0 + + if fcalls[0] >= maxfun: + warnflag = 1 + msg = _status_message['maxfev'] + if disp: + print('Warning: ' + msg) + elif iterations >= maxiter: + warnflag = 2 + msg = _status_message['maxiter'] + if disp: + print('Warning: ' + msg) + else: + msg = _status_message['success'] + if disp: + print(msg) + print(" Current function value: %f" % fval) + print(" Iterations: %d" % iterations) + print(" Function evaluations: %d" % fcalls[0]) + + result = OptimizeResult(fun=fval, nit=iterations, nfev=fcalls[0], + status=warnflag, success=(warnflag == 0), + message=msg, x=x, final_simplex=(sim, fsim)) + if retall: + result['allvecs'] = allvecs + return result + + +def _approx_fprime_helper(xk, f, epsilon, args=(), f0=None): + """ + See ``approx_fprime``. An optional initial function value arg is added. + + """ + if f0 is None: + f0 = f(*((xk,) + args)) + grad = numpy.zeros((len(xk),), float) + ei = numpy.zeros((len(xk),), float) + for k in range(len(xk)): + ei[k] = 1.0 + d = epsilon * ei + grad[k] = (f(*((xk + d,) + args)) - f0) / d[k] + ei[k] = 0.0 + return grad + + +def approx_fprime(xk, f, epsilon, *args): + """Finite-difference approximation of the gradient of a scalar function. + + Parameters + ---------- + xk : array_like + The coordinate vector at which to determine the gradient of `f`. + f : callable + The function of which to determine the gradient (partial derivatives). + Should take `xk` as first argument, other arguments to `f` can be + supplied in ``*args``. Should return a scalar, the value of the + function at `xk`. + epsilon : array_like + Increment to `xk` to use for determining the function gradient. + If a scalar, uses the same finite difference delta for all partial + derivatives. If an array, should contain one value per element of + `xk`. + \\*args : args, optional + Any other arguments that are to be passed to `f`. + + Returns + ------- + grad : ndarray + The partial derivatives of `f` to `xk`. + + See Also + -------- + check_grad : Check correctness of gradient function against approx_fprime. + + Notes + ----- + The function gradient is determined by the forward finite difference + formula:: + + f(xk[i] + epsilon[i]) - f(xk[i]) + f'[i] = --------------------------------- + epsilon[i] + + The main use of `approx_fprime` is in scalar function optimizers like + `fmin_bfgs`, to determine numerically the Jacobian of a function. + + Examples + -------- + >>> from scipy import optimize + >>> def func(x, c0, c1): + ... "Coordinate vector `x` should be an array of size two." + ... return c0 * x[0]**2 + c1*x[1]**2 + + >>> x = np.ones(2) + >>> c0, c1 = (1, 200) + >>> eps = np.sqrt(np.finfo(float).eps) + >>> optimize.approx_fprime(x, func, [eps, np.sqrt(200) * eps], c0, c1) + array([ 2. , 400.00004198]) + + """ + return _approx_fprime_helper(xk, f, epsilon, args=args) + + +def check_grad(func, grad, x0, *args, **kwargs): + """Check the correctness of a gradient function by comparing it against a + (forward) finite-difference approximation of the gradient. + + Parameters + ---------- + func : callable ``func(x0, *args)`` + Function whose derivative is to be checked. + grad : callable ``grad(x0, *args)`` + Gradient of `func`. + x0 : ndarray + Points to check `grad` against forward difference approximation of grad + using `func`. + args : \\*args, optional + Extra arguments passed to `func` and `grad`. + epsilon : float, optional + Step size used for the finite difference approximation. It defaults to + ``sqrt(numpy.finfo(float).eps)``, which is approximately 1.49e-08. + + Returns + ------- + err : float + The square root of the sum of squares (i.e. the 2-norm) of the + difference between ``grad(x0, *args)`` and the finite difference + approximation of `grad` using func at the points `x0`. + + See Also + -------- + approx_fprime + + Examples + -------- + >>> def func(x): + ... return x[0]**2 - 0.5 * x[1]**3 + >>> def grad(x): + ... return [2 * x[0], -1.5 * x[1]**2] + >>> from scipy.optimize import check_grad + >>> check_grad(func, grad, [1.5, -1.5]) + 2.9802322387695312e-08 + + """ + step = kwargs.pop('epsilon', _epsilon) + if kwargs: + raise ValueError("Unknown keyword arguments: %r" % + (list(kwargs.keys()),)) + return sqrt(sum((grad(x0, *args) - + approx_fprime(x0, func, step, *args))**2)) + + +def approx_fhess_p(x0, p, fprime, epsilon, *args): + f2 = fprime(*((x0 + epsilon*p,) + args)) + f1 = fprime(*((x0,) + args)) + return (f2 - f1) / epsilon + + +class _LineSearchError(RuntimeError): + pass + + +def _line_search_wolfe12(f, fprime, xk, pk, gfk, old_fval, old_old_fval, + **kwargs): + """ + Same as line_search_wolfe1, but fall back to line_search_wolfe2 if + suitable step length is not found, and raise an exception if a + suitable step length is not found. + + Raises + ------ + _LineSearchError + If no suitable step size is found + + """ + + extra_condition = kwargs.pop('extra_condition', None) + + ret = line_search_wolfe1(f, fprime, xk, pk, gfk, + old_fval, old_old_fval, + **kwargs) + + if ret[0] is not None and extra_condition is not None: + xp1 = xk + ret[0] * pk + if not extra_condition(ret[0], xp1, ret[3], ret[5]): + # Reject step if extra_condition fails + ret = (None,) + + if ret[0] is None: + # line search failed: try different one. + with warnings.catch_warnings(): + warnings.simplefilter('ignore', LineSearchWarning) + kwargs2 = {} + for key in ('c1', 'c2', 'amax'): + if key in kwargs: + kwargs2[key] = kwargs[key] + ret = line_search_wolfe2(f, fprime, xk, pk, gfk, + old_fval, old_old_fval, + extra_condition=extra_condition, + **kwargs2) + + if ret[0] is None: + raise _LineSearchError() + + return ret + + +def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, + epsilon=_epsilon, maxiter=None, full_output=0, disp=1, + retall=0, callback=None): + """ + Minimize a function using the BFGS algorithm. + + Parameters + ---------- + f : callable f(x,*args) + Objective function to be minimized. + x0 : ndarray + Initial guess. + fprime : callable f'(x,*args), optional + Gradient of f. + args : tuple, optional + Extra arguments passed to f and fprime. + gtol : float, optional + Gradient norm must be less than gtol before successful termination. + norm : float, optional + Order of norm (Inf is max, -Inf is min) + epsilon : int or ndarray, optional + If fprime is approximated, use this value for the step size. + callback : callable, optional + An optional user-supplied function to call after each + iteration. Called as callback(xk), where xk is the + current parameter vector. + maxiter : int, optional + Maximum number of iterations to perform. + full_output : bool, optional + If True,return fopt, func_calls, grad_calls, and warnflag + in addition to xopt. + disp : bool, optional + Print convergence message if True. + retall : bool, optional + Return a list of results at each iteration if True. + + Returns + ------- + xopt : ndarray + Parameters which minimize f, i.e. f(xopt) == fopt. + fopt : float + Minimum value. + gopt : ndarray + Value of gradient at minimum, f'(xopt), which should be near 0. + Bopt : ndarray + Value of 1/f''(xopt), i.e. the inverse hessian matrix. + func_calls : int + Number of function_calls made. + grad_calls : int + Number of gradient calls made. + warnflag : integer + 1 : Maximum number of iterations exceeded. + 2 : Gradient and/or function calls not changing. + allvecs : list + The value of xopt at each iteration. Only returned if retall is True. + + See also + -------- + minimize: Interface to minimization algorithms for multivariate + functions. See the 'BFGS' `method` in particular. + + Notes + ----- + Optimize the function, f, whose gradient is given by fprime + using the quasi-Newton method of Broyden, Fletcher, Goldfarb, + and Shanno (BFGS) + + References + ---------- + Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. + + """ + opts = {'gtol': gtol, + 'norm': norm, + 'eps': epsilon, + 'disp': disp, + 'maxiter': maxiter, + 'return_all': retall} + + res = _minimize_bfgs(f, x0, args, fprime, callback=callback, **opts) + + if full_output: + retlist = (res['x'], res['fun'], res['jac'], res['hess_inv'], + res['nfev'], res['njev'], res['status']) + if retall: + retlist += (res['allvecs'], ) + return retlist + else: + if retall: + return res['x'], res['allvecs'] + else: + return res['x'] + + +def _minimize_bfgs(fun, x0, args=(), jac=None, callback=None, + gtol=1e-5, norm=Inf, eps=_epsilon, maxiter=None, + disp=False, return_all=False, + **unknown_options): + """ + Minimization of scalar function of one or more variables using the + BFGS algorithm. + + Options + ------- + disp : bool + Set to True to print convergence messages. + maxiter : int + Maximum number of iterations to perform. + gtol : float + Gradient norm must be less than `gtol` before successful + termination. + norm : float + Order of norm (Inf is max, -Inf is min). + eps : float or ndarray + If `jac` is approximated, use this value for the step size. + + """ + _check_unknown_options(unknown_options) + f = fun + fprime = jac + epsilon = eps + retall = return_all + + x0 = asarray(x0).flatten() + if x0.ndim == 0: + x0.shape = (1,) + if maxiter is None: + maxiter = len(x0) * 200 + func_calls, f = wrap_function(f, args) + if fprime is None: + grad_calls, myfprime = wrap_function(approx_fprime, (f, epsilon)) + else: + grad_calls, myfprime = wrap_function(fprime, args) + gfk = myfprime(x0) + k = 0 + N = len(x0) + I = numpy.eye(N, dtype=int) + Hk = I + + # Sets the initial step guess to dx ~ 1 + old_fval = f(x0) + old_old_fval = old_fval + np.linalg.norm(gfk) / 2 + + xk = x0 + if retall: + allvecs = [x0] + warnflag = 0 + gnorm = vecnorm(gfk, ord=norm) + while (gnorm > gtol) and (k < maxiter): + pk = -numpy.dot(Hk, gfk) + try: + alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ + _line_search_wolfe12(f, myfprime, xk, pk, gfk, + old_fval, old_old_fval, amin=1e-100, amax=1e100) + except _LineSearchError: + # Line search failed to find a better solution. + warnflag = 2 + break + + xkp1 = xk + alpha_k * pk + if retall: + allvecs.append(xkp1) + sk = xkp1 - xk + xk = xkp1 + if gfkp1 is None: + gfkp1 = myfprime(xkp1) + + yk = gfkp1 - gfk + gfk = gfkp1 + if callback is not None: + callback(xk) + k += 1 + gnorm = vecnorm(gfk, ord=norm) + if (gnorm <= gtol): + break + + if not numpy.isfinite(old_fval): + # We correctly found +-Inf as optimal value, or something went + # wrong. + warnflag = 2 + break + + try: # this was handled in numeric, let it remaines for more safety + rhok = 1.0 / (numpy.dot(yk, sk)) + except ZeroDivisionError: + rhok = 1000.0 + if disp: + print("Divide-by-zero encountered: rhok assumed large") + if isinf(rhok): # this is patch for numpy + rhok = 1000.0 + if disp: + print("Divide-by-zero encountered: rhok assumed large") + A1 = I - sk[:, numpy.newaxis] * yk[numpy.newaxis, :] * rhok + A2 = I - yk[:, numpy.newaxis] * sk[numpy.newaxis, :] * rhok + Hk = numpy.dot(A1, numpy.dot(Hk, A2)) + (rhok * sk[:, numpy.newaxis] * + sk[numpy.newaxis, :]) + + fval = old_fval + if np.isnan(fval): + # This can happen if the first call to f returned NaN; + # the loop is then never entered. + warnflag = 2 + + if warnflag == 2: + msg = _status_message['pr_loss'] + elif k >= maxiter: + warnflag = 1 + msg = _status_message['maxiter'] + else: + msg = _status_message['success'] + + if disp: + print("%s%s" % ("Warning: " if warnflag != 0 else "", msg)) + print(" Current function value: %f" % fval) + print(" Iterations: %d" % k) + print(" Function evaluations: %d" % func_calls[0]) + print(" Gradient evaluations: %d" % grad_calls[0]) + + result = OptimizeResult(fun=fval, jac=gfk, hess_inv=Hk, nfev=func_calls[0], + njev=grad_calls[0], status=warnflag, + success=(warnflag == 0), message=msg, x=xk, + nit=k) + if retall: + result['allvecs'] = allvecs + return result + + +def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, + maxiter=None, full_output=0, disp=1, retall=0, callback=None): + """ + Minimize a function using a nonlinear conjugate gradient algorithm. + + Parameters + ---------- + f : callable, ``f(x, *args)`` + Objective function to be minimized. Here `x` must be a 1-D array of + the variables that are to be changed in the search for a minimum, and + `args` are the other (fixed) parameters of `f`. + x0 : ndarray + A user-supplied initial estimate of `xopt`, the optimal value of `x`. + It must be a 1-D array of values. + fprime : callable, ``fprime(x, *args)``, optional + A function that returns the gradient of `f` at `x`. Here `x` and `args` + are as described above for `f`. The returned value must be a 1-D array. + Defaults to None, in which case the gradient is approximated + numerically (see `epsilon`, below). + args : tuple, optional + Parameter values passed to `f` and `fprime`. Must be supplied whenever + additional fixed parameters are needed to completely specify the + functions `f` and `fprime`. + gtol : float, optional + Stop when the norm of the gradient is less than `gtol`. + norm : float, optional + Order to use for the norm of the gradient + (``-np.Inf`` is min, ``np.Inf`` is max). + epsilon : float or ndarray, optional + Step size(s) to use when `fprime` is approximated numerically. Can be a + scalar or a 1-D array. Defaults to ``sqrt(eps)``, with eps the + floating point machine precision. Usually ``sqrt(eps)`` is about + 1.5e-8. + maxiter : int, optional + Maximum number of iterations to perform. Default is ``200 * len(x0)``. + full_output : bool, optional + If True, return `fopt`, `func_calls`, `grad_calls`, and `warnflag` in + addition to `xopt`. See the Returns section below for additional + information on optional return values. + disp : bool, optional + If True, return a convergence message, followed by `xopt`. + retall : bool, optional + If True, add to the returned values the results of each iteration. + callback : callable, optional + An optional user-supplied function, called after each iteration. + Called as ``callback(xk)``, where ``xk`` is the current value of `x0`. + + Returns + ------- + xopt : ndarray + Parameters which minimize f, i.e. ``f(xopt) == fopt``. + fopt : float, optional + Minimum value found, f(xopt). Only returned if `full_output` is True. + func_calls : int, optional + The number of function_calls made. Only returned if `full_output` + is True. + grad_calls : int, optional + The number of gradient calls made. Only returned if `full_output` is + True. + warnflag : int, optional + Integer value with warning status, only returned if `full_output` is + True. + + 0 : Success. + + 1 : The maximum number of iterations was exceeded. + + 2 : Gradient and/or function calls were not changing. May indicate + that precision was lost, i.e., the routine did not converge. + + allvecs : list of ndarray, optional + List of arrays, containing the results at each iteration. + Only returned if `retall` is True. + + See Also + -------- + minimize : common interface to all `scipy.optimize` algorithms for + unconstrained and constrained minimization of multivariate + functions. It provides an alternative way to call + ``fmin_cg``, by specifying ``method='CG'``. + + Notes + ----- + This conjugate gradient algorithm is based on that of Polak and Ribiere + [1]_. + + Conjugate gradient methods tend to work better when: + + 1. `f` has a unique global minimizing point, and no local minima or + other stationary points, + 2. `f` is, at least locally, reasonably well approximated by a + quadratic function of the variables, + 3. `f` is continuous and has a continuous gradient, + 4. `fprime` is not too large, e.g., has a norm less than 1000, + 5. The initial guess, `x0`, is reasonably close to `f` 's global + minimizing point, `xopt`. + + References + ---------- + .. [1] Wright & Nocedal, "Numerical Optimization", 1999, pp. 120-122. + + Examples + -------- + Example 1: seek the minimum value of the expression + ``a*u**2 + b*u*v + c*v**2 + d*u + e*v + f`` for given values + of the parameters and an initial guess ``(u, v) = (0, 0)``. + + >>> args = (2, 3, 7, 8, 9, 10) # parameter values + >>> def f(x, *args): + ... u, v = x + ... a, b, c, d, e, f = args + ... return a*u**2 + b*u*v + c*v**2 + d*u + e*v + f + >>> def gradf(x, *args): + ... u, v = x + ... a, b, c, d, e, f = args + ... gu = 2*a*u + b*v + d # u-component of the gradient + ... gv = b*u + 2*c*v + e # v-component of the gradient + ... return np.asarray((gu, gv)) + >>> x0 = np.asarray((0, 0)) # Initial guess. + >>> from scipy import optimize + >>> res1 = optimize.fmin_cg(f, x0, fprime=gradf, args=args) + Optimization terminated successfully. + Current function value: 1.617021 + Iterations: 4 + Function evaluations: 8 + Gradient evaluations: 8 + >>> res1 + array([-1.80851064, -0.25531915]) + + Example 2: solve the same problem using the `minimize` function. + (This `myopts` dictionary shows all of the available options, + although in practice only non-default values would be needed. + The returned value will be a dictionary.) + + >>> opts = {'maxiter' : None, # default value. + ... 'disp' : True, # non-default value. + ... 'gtol' : 1e-5, # default value. + ... 'norm' : np.inf, # default value. + ... 'eps' : 1.4901161193847656e-08} # default value. + >>> res2 = optimize.minimize(f, x0, jac=gradf, args=args, + ... method='CG', options=opts) + Optimization terminated successfully. + Current function value: 1.617021 + Iterations: 4 + Function evaluations: 8 + Gradient evaluations: 8 + >>> res2.x # minimum found + array([-1.80851064, -0.25531915]) + + """ + opts = {'gtol': gtol, + 'norm': norm, + 'eps': epsilon, + 'disp': disp, + 'maxiter': maxiter, + 'return_all': retall} + + res = _minimize_cg(f, x0, args, fprime, callback=callback, **opts) + + if full_output: + retlist = res['x'], res['fun'], res['nfev'], res['njev'], res['status'] + if retall: + retlist += (res['allvecs'], ) + return retlist + else: + if retall: + return res['x'], res['allvecs'] + else: + return res['x'] + + +def _minimize_cg(fun, x0, args=(), jac=None, callback=None, + gtol=1e-5, norm=Inf, eps=_epsilon, maxiter=None, + disp=False, return_all=False, + **unknown_options): + """ + Minimization of scalar function of one or more variables using the + conjugate gradient algorithm. + + Options + ------- + disp : bool + Set to True to print convergence messages. + maxiter : int + Maximum number of iterations to perform. + gtol : float + Gradient norm must be less than `gtol` before successful + termination. + norm : float + Order of norm (Inf is max, -Inf is min). + eps : float or ndarray + If `jac` is approximated, use this value for the step size. + + """ + _check_unknown_options(unknown_options) + f = fun + fprime = jac + epsilon = eps + retall = return_all + + x0 = asarray(x0).flatten() + if maxiter is None: + maxiter = len(x0) * 200 + func_calls, f = wrap_function(f, args) + if fprime is None: + grad_calls, myfprime = wrap_function(approx_fprime, (f, epsilon)) + else: + grad_calls, myfprime = wrap_function(fprime, args) + gfk = myfprime(x0) + k = 0 + xk = x0 + + # Sets the initial step guess to dx ~ 1 + old_fval = f(xk) + old_old_fval = old_fval + np.linalg.norm(gfk) / 2 + + if retall: + allvecs = [xk] + warnflag = 0 + pk = -gfk + gnorm = vecnorm(gfk, ord=norm) + + sigma_3 = 0.01 + + while (gnorm > gtol) and (k < maxiter): + deltak = numpy.dot(gfk, gfk) + + cached_step = [None] + + def polak_ribiere_powell_step(alpha, gfkp1=None): + xkp1 = xk + alpha * pk + if gfkp1 is None: + gfkp1 = myfprime(xkp1) + yk = gfkp1 - gfk + beta_k = max(0, numpy.dot(yk, gfkp1) / deltak) + pkp1 = -gfkp1 + beta_k * pk + gnorm = vecnorm(gfkp1, ord=norm) + return (alpha, xkp1, pkp1, gfkp1, gnorm) + + def descent_condition(alpha, xkp1, fp1, gfkp1): + # Polak-Ribiere+ needs an explicit check of a sufficient + # descent condition, which is not guaranteed by strong Wolfe. + # + # See Gilbert & Nocedal, "Global convergence properties of + # conjugate gradient methods for optimization", + # SIAM J. Optimization 2, 21 (1992). + cached_step[:] = polak_ribiere_powell_step(alpha, gfkp1) + alpha, xk, pk, gfk, gnorm = cached_step + + # Accept step if it leads to convergence. + if gnorm <= gtol: + return True + + # Accept step if sufficient descent condition applies. + return numpy.dot(pk, gfk) <= -sigma_3 * numpy.dot(gfk, gfk) + + try: + alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ + _line_search_wolfe12(f, myfprime, xk, pk, gfk, old_fval, + old_old_fval, c2=0.4, amin=1e-100, amax=1e100, + extra_condition=descent_condition) + except _LineSearchError: + # Line search failed to find a better solution. + warnflag = 2 + break + + # Reuse already computed results if possible + if alpha_k == cached_step[0]: + alpha_k, xk, pk, gfk, gnorm = cached_step + else: + alpha_k, xk, pk, gfk, gnorm = polak_ribiere_powell_step(alpha_k, gfkp1) + + if retall: + allvecs.append(xk) + if callback is not None: + callback(xk) + k += 1 + + fval = old_fval + if warnflag == 2: + msg = _status_message['pr_loss'] + elif k >= maxiter: + warnflag = 1 + msg = _status_message['maxiter'] + else: + msg = _status_message['success'] + + if disp: + print("%s%s" % ("Warning: " if warnflag != 0 else "", msg)) + print(" Current function value: %f" % fval) + print(" Iterations: %d" % k) + print(" Function evaluations: %d" % func_calls[0]) + print(" Gradient evaluations: %d" % grad_calls[0]) + + result = OptimizeResult(fun=fval, jac=gfk, nfev=func_calls[0], + njev=grad_calls[0], status=warnflag, + success=(warnflag == 0), message=msg, x=xk, + nit=k) + if retall: + result['allvecs'] = allvecs + return result + + +def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5, + epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0, + callback=None): + """ + Unconstrained minimization of a function using the Newton-CG method. + + Parameters + ---------- + f : callable ``f(x, *args)`` + Objective function to be minimized. + x0 : ndarray + Initial guess. + fprime : callable ``f'(x, *args)`` + Gradient of f. + fhess_p : callable ``fhess_p(x, p, *args)``, optional + Function which computes the Hessian of f times an + arbitrary vector, p. + fhess : callable ``fhess(x, *args)``, optional + Function to compute the Hessian matrix of f. + args : tuple, optional + Extra arguments passed to f, fprime, fhess_p, and fhess + (the same set of extra arguments is supplied to all of + these functions). + epsilon : float or ndarray, optional + If fhess is approximated, use this value for the step size. + callback : callable, optional + An optional user-supplied function which is called after + each iteration. Called as callback(xk), where xk is the + current parameter vector. + avextol : float, optional + Convergence is assumed when the average relative error in + the minimizer falls below this amount. + maxiter : int, optional + Maximum number of iterations to perform. + full_output : bool, optional + If True, return the optional outputs. + disp : bool, optional + If True, print convergence message. + retall : bool, optional + If True, return a list of results at each iteration. + + Returns + ------- + xopt : ndarray + Parameters which minimize f, i.e. ``f(xopt) == fopt``. + fopt : float + Value of the function at xopt, i.e. ``fopt = f(xopt)``. + fcalls : int + Number of function calls made. + gcalls : int + Number of gradient calls made. + hcalls : int + Number of hessian calls made. + warnflag : int + Warnings generated by the algorithm. + 1 : Maximum number of iterations exceeded. + allvecs : list + The result at each iteration, if retall is True (see below). + + See also + -------- + minimize: Interface to minimization algorithms for multivariate + functions. See the 'Newton-CG' `method` in particular. + + Notes + ----- + Only one of `fhess_p` or `fhess` need to be given. If `fhess` + is provided, then `fhess_p` will be ignored. If neither `fhess` + nor `fhess_p` is provided, then the hessian product will be + approximated using finite differences on `fprime`. `fhess_p` + must compute the hessian times an arbitrary vector. If it is not + given, finite-differences on `fprime` are used to compute + it. + + Newton-CG methods are also called truncated Newton methods. This + function differs from scipy.optimize.fmin_tnc because + + 1. scipy.optimize.fmin_ncg is written purely in python using numpy + and scipy while scipy.optimize.fmin_tnc calls a C function. + 2. scipy.optimize.fmin_ncg is only for unconstrained minimization + while scipy.optimize.fmin_tnc is for unconstrained minimization + or box constrained minimization. (Box constraints give + lower and upper bounds for each variable separately.) + + References + ---------- + Wright & Nocedal, 'Numerical Optimization', 1999, pg. 140. + + """ + opts = {'xtol': avextol, + 'eps': epsilon, + 'maxiter': maxiter, + 'disp': disp, + 'return_all': retall} + + res = _minimize_newtoncg(f, x0, args, fprime, fhess, fhess_p, + callback=callback, **opts) + + if full_output: + retlist = (res['x'], res['fun'], res['nfev'], res['njev'], + res['nhev'], res['status']) + if retall: + retlist += (res['allvecs'], ) + return retlist + else: + if retall: + return res['x'], res['allvecs'] + else: + return res['x'] + + +def _minimize_newtoncg(fun, x0, args=(), jac=None, hess=None, hessp=None, + callback=None, xtol=1e-5, eps=_epsilon, maxiter=None, + disp=False, return_all=False, + **unknown_options): + """ + Minimization of scalar function of one or more variables using the + Newton-CG algorithm. + + Note that the `jac` parameter (Jacobian) is required. + + Options + ------- + disp : bool + Set to True to print convergence messages. + xtol : float + Average relative error in solution `xopt` acceptable for + convergence. + maxiter : int + Maximum number of iterations to perform. + eps : float or ndarray + If `jac` is approximated, use this value for the step size. + + """ + _check_unknown_options(unknown_options) + if jac is None: + raise ValueError('Jacobian is required for Newton-CG method') + f = fun + fprime = jac + fhess_p = hessp + fhess = hess + avextol = xtol + epsilon = eps + retall = return_all + + def terminate(warnflag, msg): + if disp: + print(msg) + print(" Current function value: %f" % old_fval) + print(" Iterations: %d" % k) + print(" Function evaluations: %d" % fcalls[0]) + print(" Gradient evaluations: %d" % gcalls[0]) + print(" Hessian evaluations: %d" % hcalls) + fval = old_fval + result = OptimizeResult(fun=fval, jac=gfk, nfev=fcalls[0], + njev=gcalls[0], nhev=hcalls, status=warnflag, + success=(warnflag == 0), message=msg, x=xk, + nit=k) + if retall: + result['allvecs'] = allvecs + return result + + x0 = asarray(x0).flatten() + fcalls, f = wrap_function(f, args) + gcalls, fprime = wrap_function(fprime, args) + hcalls = 0 + if maxiter is None: + maxiter = len(x0)*200 + cg_maxiter = 20*len(x0) + + xtol = len(x0) * avextol + update = [2 * xtol] + xk = x0 + if retall: + allvecs = [xk] + k = 0 + gfk = None + old_fval = f(x0) + old_old_fval = None + float64eps = numpy.finfo(numpy.float64).eps + while numpy.add.reduce(numpy.abs(update)) > xtol: + if k >= maxiter: + msg = "Warning: " + _status_message['maxiter'] + return terminate(1, msg) + # Compute a search direction pk by applying the CG method to + # del2 f(xk) p = - grad f(xk) starting from 0. + b = -fprime(xk) + maggrad = numpy.add.reduce(numpy.abs(b)) + eta = numpy.min([0.5, numpy.sqrt(maggrad)]) + termcond = eta * maggrad + xsupi = zeros(len(x0), dtype=x0.dtype) + ri = -b + psupi = -ri + i = 0 + dri0 = numpy.dot(ri, ri) + + if fhess is not None: # you want to compute hessian once. + A = fhess(*(xk,) + args) + hcalls = hcalls + 1 + + for k2 in xrange(cg_maxiter): + if numpy.add.reduce(numpy.abs(ri)) <= termcond: + break + if fhess is None: + if fhess_p is None: + Ap = approx_fhess_p(xk, psupi, fprime, epsilon) + else: + Ap = fhess_p(xk, psupi, *args) + hcalls = hcalls + 1 + else: + Ap = numpy.dot(A, psupi) + # check curvature + Ap = asarray(Ap).squeeze() # get rid of matrices... + curv = numpy.dot(psupi, Ap) + if 0 <= curv <= 3 * float64eps: + break + elif curv < 0: + if (i > 0): + break + else: + # fall back to steepest descent direction + xsupi = dri0 / (-curv) * b + break + alphai = dri0 / curv + xsupi = xsupi + alphai * psupi + ri = ri + alphai * Ap + dri1 = numpy.dot(ri, ri) + betai = dri1 / dri0 + psupi = -ri + betai * psupi + i = i + 1 + dri0 = dri1 # update numpy.dot(ri,ri) for next time. + else: + # curvature keeps increasing, bail out + msg = ("Warning: CG iterations didn't converge. The Hessian is not " + "positive definite.") + return terminate(3, msg) + + pk = xsupi # search direction is solution to system. + gfk = -b # gradient at xk + + try: + alphak, fc, gc, old_fval, old_old_fval, gfkp1 = \ + _line_search_wolfe12(f, fprime, xk, pk, gfk, + old_fval, old_old_fval) + except _LineSearchError: + # Line search failed to find a better solution. + msg = "Warning: " + _status_message['pr_loss'] + return terminate(2, msg) + + update = alphak * pk + xk = xk + update # upcast if necessary + if callback is not None: + callback(xk) + if retall: + allvecs.append(xk) + k += 1 + else: + msg = _status_message['success'] + return terminate(0, msg) + + +def fminbound(func, x1, x2, args=(), xtol=1e-5, maxfun=500, + full_output=0, disp=1): + """Bounded minimization for scalar functions. + + Parameters + ---------- + func : callable f(x,*args) + Objective function to be minimized (must accept and return scalars). + x1, x2 : float or array scalar + The optimization bounds. + args : tuple, optional + Extra arguments passed to function. + xtol : float, optional + The convergence tolerance. + maxfun : int, optional + Maximum number of function evaluations allowed. + full_output : bool, optional + If True, return optional outputs. + disp : int, optional + If non-zero, print messages. + 0 : no message printing. + 1 : non-convergence notification messages only. + 2 : print a message on convergence too. + 3 : print iteration results. + + + Returns + ------- + xopt : ndarray + Parameters (over given interval) which minimize the + objective function. + fval : number + The function value at the minimum point. + ierr : int + An error flag (0 if converged, 1 if maximum number of + function calls reached). + numfunc : int + The number of function calls made. + + See also + -------- + minimize_scalar: Interface to minimization algorithms for scalar + univariate functions. See the 'Bounded' `method` in particular. + + Notes + ----- + Finds a local minimizer of the scalar function `func` in the + interval x1 < xopt < x2 using Brent's method. (See `brent` + for auto-bracketing). + + Examples + -------- + `fminbound` finds the minimum of the function in the given range. + The following examples illustrate the same + + >>> def f(x): + ... return x**2 + + >>> from scipy import optimize + + >>> minimum = optimize.fminbound(f, -1, 2) + >>> minimum + 0.0 + >>> minimum = optimize.fminbound(f, 1, 2) + >>> minimum + 1.0000059608609866 + """ + options = {'xatol': xtol, + 'maxiter': maxfun, + 'disp': disp} + + res = _minimize_scalar_bounded(func, (x1, x2), args, **options) + if full_output: + return res['x'], res['fun'], res['status'], res['nfev'] + else: + return res['x'] + + +def _minimize_scalar_bounded(func, bounds, args=(), + xatol=1e-5, maxiter=500, disp=0, + **unknown_options): + """ + Options + ------- + maxiter : int + Maximum number of iterations to perform. + disp: int, optional + If non-zero, print messages. + 0 : no message printing. + 1 : non-convergence notification messages only. + 2 : print a message on convergence too. + 3 : print iteration results. + xatol : float + Absolute error in solution `xopt` acceptable for convergence. + + """ + _check_unknown_options(unknown_options) + maxfun = maxiter + # Test bounds are of correct form + if len(bounds) != 2: + raise ValueError('bounds must have two elements.') + x1, x2 = bounds + + if not (is_array_scalar(x1) and is_array_scalar(x2)): + raise ValueError("Optimisation bounds must be scalars" + " or array scalars.") + if x1 > x2: + raise ValueError("The lower bound exceeds the upper bound.") + + flag = 0 + header = ' Func-count x f(x) Procedure' + step = ' initial' + + sqrt_eps = sqrt(2.2e-16) + golden_mean = 0.5 * (3.0 - sqrt(5.0)) + a, b = x1, x2 + fulc = a + golden_mean * (b - a) + nfc, xf = fulc, fulc + rat = e = 0.0 + x = xf + fx = func(x, *args) + num = 1 + fmin_data = (1, xf, fx) + + ffulc = fnfc = fx + xm = 0.5 * (a + b) + tol1 = sqrt_eps * numpy.abs(xf) + xatol / 3.0 + tol2 = 2.0 * tol1 + + if disp > 2: + print(" ") + print(header) + print("%5.0f %12.6g %12.6g %s" % (fmin_data + (step,))) + + while (numpy.abs(xf - xm) > (tol2 - 0.5 * (b - a))): + golden = 1 + # Check for parabolic fit + if numpy.abs(e) > tol1: + golden = 0 + r = (xf - nfc) * (fx - ffulc) + q = (xf - fulc) * (fx - fnfc) + p = (xf - fulc) * q - (xf - nfc) * r + q = 2.0 * (q - r) + if q > 0.0: + p = -p + q = numpy.abs(q) + r = e + e = rat + + # Check for acceptability of parabola + if ((numpy.abs(p) < numpy.abs(0.5*q*r)) and (p > q*(a - xf)) and + (p < q * (b - xf))): + rat = (p + 0.0) / q + x = xf + rat + step = ' parabolic' + + if ((x - a) < tol2) or ((b - x) < tol2): + si = numpy.sign(xm - xf) + ((xm - xf) == 0) + rat = tol1 * si + else: # do a golden section step + golden = 1 + + if golden: # Do a golden-section step + if xf >= xm: + e = a - xf + else: + e = b - xf + rat = golden_mean*e + step = ' golden' + + si = numpy.sign(rat) + (rat == 0) + x = xf + si * numpy.max([numpy.abs(rat), tol1]) + fu = func(x, *args) + num += 1 + fmin_data = (num, x, fu) + if disp > 2: + print("%5.0f %12.6g %12.6g %s" % (fmin_data + (step,))) + + if fu <= fx: + if x >= xf: + a = xf + else: + b = xf + fulc, ffulc = nfc, fnfc + nfc, fnfc = xf, fx + xf, fx = x, fu + else: + if x < xf: + a = x + else: + b = x + if (fu <= fnfc) or (nfc == xf): + fulc, ffulc = nfc, fnfc + nfc, fnfc = x, fu + elif (fu <= ffulc) or (fulc == xf) or (fulc == nfc): + fulc, ffulc = x, fu + + xm = 0.5 * (a + b) + tol1 = sqrt_eps * numpy.abs(xf) + xatol / 3.0 + tol2 = 2.0 * tol1 + + if num >= maxfun: + flag = 1 + break + + fval = fx + if disp > 0: + _endprint(x, flag, fval, maxfun, xatol, disp) + + result = OptimizeResult(fun=fval, status=flag, success=(flag == 0), + message={0: 'Solution found.', + 1: 'Maximum number of function calls ' + 'reached.'}.get(flag, ''), + x=xf, nfev=num) + + return result + + +class Brent: + #need to rethink design of __init__ + def __init__(self, func, args=(), tol=1.48e-8, maxiter=500, + full_output=0): + self.func = func + self.args = args + self.tol = tol + self.maxiter = maxiter + self._mintol = 1.0e-11 + self._cg = 0.3819660 + self.xmin = None + self.fval = None + self.iter = 0 + self.funcalls = 0 + + # need to rethink design of set_bracket (new options, etc) + def set_bracket(self, brack=None): + self.brack = brack + + def get_bracket_info(self): + #set up + func = self.func + args = self.args + brack = self.brack + ### BEGIN core bracket_info code ### + ### carefully DOCUMENT any CHANGES in core ## + if brack is None: + xa, xb, xc, fa, fb, fc, funcalls = bracket(func, args=args) + elif len(brack) == 2: + xa, xb, xc, fa, fb, fc, funcalls = bracket(func, xa=brack[0], + xb=brack[1], args=args) + elif len(brack) == 3: + xa, xb, xc = brack + if (xa > xc): # swap so xa < xc can be assumed + xc, xa = xa, xc + if not ((xa < xb) and (xb < xc)): + raise ValueError("Not a bracketing interval.") + fa = func(*((xa,) + args)) + fb = func(*((xb,) + args)) + fc = func(*((xc,) + args)) + if not ((fb < fa) and (fb < fc)): + raise ValueError("Not a bracketing interval.") + funcalls = 3 + else: + raise ValueError("Bracketing interval must be " + "length 2 or 3 sequence.") + ### END core bracket_info code ### + + return xa, xb, xc, fa, fb, fc, funcalls + + def optimize(self): + # set up for optimization + func = self.func + xa, xb, xc, fa, fb, fc, funcalls = self.get_bracket_info() + _mintol = self._mintol + _cg = self._cg + ################################# + #BEGIN CORE ALGORITHM + ################################# + x = w = v = xb + fw = fv = fx = func(*((x,) + self.args)) + if (xa < xc): + a = xa + b = xc + else: + a = xc + b = xa + deltax = 0.0 + funcalls += 1 + iter = 0 + while (iter < self.maxiter): + tol1 = self.tol * numpy.abs(x) + _mintol + tol2 = 2.0 * tol1 + xmid = 0.5 * (a + b) + # check for convergence + if numpy.abs(x - xmid) < (tol2 - 0.5 * (b - a)): + break + # XXX In the first iteration, rat is only bound in the true case + # of this conditional. This used to cause an UnboundLocalError + # (gh-4140). It should be set before the if (but to what?). + if (numpy.abs(deltax) <= tol1): + if (x >= xmid): + deltax = a - x # do a golden section step + else: + deltax = b - x + rat = _cg * deltax + else: # do a parabolic step + tmp1 = (x - w) * (fx - fv) + tmp2 = (x - v) * (fx - fw) + p = (x - v) * tmp2 - (x - w) * tmp1 + tmp2 = 2.0 * (tmp2 - tmp1) + if (tmp2 > 0.0): + p = -p + tmp2 = numpy.abs(tmp2) + dx_temp = deltax + deltax = rat + # check parabolic fit + if ((p > tmp2 * (a - x)) and (p < tmp2 * (b - x)) and + (numpy.abs(p) < numpy.abs(0.5 * tmp2 * dx_temp))): + rat = p * 1.0 / tmp2 # if parabolic step is useful. + u = x + rat + if ((u - a) < tol2 or (b - u) < tol2): + if xmid - x >= 0: + rat = tol1 + else: + rat = -tol1 + else: + if (x >= xmid): + deltax = a - x # if it's not do a golden section step + else: + deltax = b - x + rat = _cg * deltax + + if (numpy.abs(rat) < tol1): # update by at least tol1 + if rat >= 0: + u = x + tol1 + else: + u = x - tol1 + else: + u = x + rat + fu = func(*((u,) + self.args)) # calculate new output value + funcalls += 1 + + if (fu > fx): # if it's bigger than current + if (u < x): + a = u + else: + b = u + if (fu <= fw) or (w == x): + v = w + w = u + fv = fw + fw = fu + elif (fu <= fv) or (v == x) or (v == w): + v = u + fv = fu + else: + if (u >= x): + a = x + else: + b = x + v = w + w = x + x = u + fv = fw + fw = fx + fx = fu + + iter += 1 + ################################# + #END CORE ALGORITHM + ################################# + + self.xmin = x + self.fval = fx + self.iter = iter + self.funcalls = funcalls + + def get_result(self, full_output=False): + if full_output: + return self.xmin, self.fval, self.iter, self.funcalls + else: + return self.xmin + + +def brent(func, args=(), brack=None, tol=1.48e-8, full_output=0, maxiter=500): + """ + Given a function of one-variable and a possible bracket, return + the local minimum of the function isolated to a fractional precision + of tol. + + Parameters + ---------- + func : callable f(x,*args) + Objective function. + args : tuple, optional + Additional arguments (if present). + brack : tuple, optional + Either a triple (xa,xb,xc) where xa<xb<xc and func(xb) < + func(xa), func(xc) or a pair (xa,xb) which are used as a + starting interval for a downhill bracket search (see + `bracket`). Providing the pair (xa,xb) does not always mean + the obtained solution will satisfy xa<=x<=xb. + tol : float, optional + Stop if between iteration change is less than `tol`. + full_output : bool, optional + If True, return all output args (xmin, fval, iter, + funcalls). + maxiter : int, optional + Maximum number of iterations in solution. + + Returns + ------- + xmin : ndarray + Optimum point. + fval : float + Optimum value. + iter : int + Number of iterations. + funcalls : int + Number of objective function evaluations made. + + See also + -------- + minimize_scalar: Interface to minimization algorithms for scalar + univariate functions. See the 'Brent' `method` in particular. + + Notes + ----- + Uses inverse parabolic interpolation when possible to speed up + convergence of golden section method. + + Does not ensure that the minimum lies in the range specified by + `brack`. See `fminbound`. + + Examples + -------- + We illustrate the behaviour of the function when `brack` is of + size 2 and 3 respectively. In the case where `brack` is of the + form (xa,xb), we can see for the given values, the output need + not necessarily lie in the range (xa,xb). + + >>> def f(x): + ... return x**2 + + >>> from scipy import optimize + + >>> minimum = optimize.brent(f,brack=(1,2)) + >>> minimum + 0.0 + >>> minimum = optimize.brent(f,brack=(-1,0.5,2)) + >>> minimum + -2.7755575615628914e-17 + + """ + options = {'xtol': tol, + 'maxiter': maxiter} + res = _minimize_scalar_brent(func, brack, args, **options) + if full_output: + return res['x'], res['fun'], res['nit'], res['nfev'] + else: + return res['x'] + + +def _minimize_scalar_brent(func, brack=None, args=(), + xtol=1.48e-8, maxiter=500, + **unknown_options): + """ + Options + ------- + maxiter : int + Maximum number of iterations to perform. + xtol : float + Relative error in solution `xopt` acceptable for convergence. + + Notes + ----- + Uses inverse parabolic interpolation when possible to speed up + convergence of golden section method. + + """ + _check_unknown_options(unknown_options) + tol = xtol + if tol < 0: + raise ValueError('tolerance should be >= 0, got %r' % tol) + + brent = Brent(func=func, args=args, tol=tol, + full_output=True, maxiter=maxiter) + brent.set_bracket(brack) + brent.optimize() + x, fval, nit, nfev = brent.get_result(full_output=True) + return OptimizeResult(fun=fval, x=x, nit=nit, nfev=nfev, + success=nit < maxiter) + + +def golden(func, args=(), brack=None, tol=_epsilon, + full_output=0, maxiter=5000): + """ + Return the minimum of a function of one variable using golden section + method. + + Given a function of one variable and a possible bracketing interval, + return the minimum of the function isolated to a fractional precision of + tol. + + Parameters + ---------- + func : callable func(x,*args) + Objective function to minimize. + args : tuple, optional + Additional arguments (if present), passed to func. + brack : tuple, optional + Triple (a,b,c), where (a<b<c) and func(b) < + func(a),func(c). If bracket consists of two numbers (a, + c), then they are assumed to be a starting interval for a + downhill bracket search (see `bracket`); it doesn't always + mean that obtained solution will satisfy a<=x<=c. + tol : float, optional + x tolerance stop criterion + full_output : bool, optional + If True, return optional outputs. + maxiter : int + Maximum number of iterations to perform. + + See also + -------- + minimize_scalar: Interface to minimization algorithms for scalar + univariate functions. See the 'Golden' `method` in particular. + + Notes + ----- + Uses analog of bisection method to decrease the bracketed + interval. + + Examples + -------- + We illustrate the behaviour of the function when `brack` is of + size 2 and 3 respectively. In the case where `brack` is of the + form (xa,xb), we can see for the given values, the output need + not necessarily lie in the range ``(xa, xb)``. + + >>> def f(x): + ... return x**2 + + >>> from scipy import optimize + + >>> minimum = optimize.golden(f, brack=(1, 2)) + >>> minimum + 1.5717277788484873e-162 + >>> minimum = optimize.golden(f, brack=(-1, 0.5, 2)) + >>> minimum + -1.5717277788484873e-162 + + """ + options = {'xtol': tol, 'maxiter': maxiter} + res = _minimize_scalar_golden(func, brack, args, **options) + if full_output: + return res['x'], res['fun'], res['nfev'] + else: + return res['x'] + + +def _minimize_scalar_golden(func, brack=None, args=(), + xtol=_epsilon, maxiter=5000, **unknown_options): + """ + Options + ------- + maxiter : int + Maximum number of iterations to perform. + xtol : float + Relative error in solution `xopt` acceptable for convergence. + + """ + _check_unknown_options(unknown_options) + tol = xtol + if brack is None: + xa, xb, xc, fa, fb, fc, funcalls = bracket(func, args=args) + elif len(brack) == 2: + xa, xb, xc, fa, fb, fc, funcalls = bracket(func, xa=brack[0], + xb=brack[1], args=args) + elif len(brack) == 3: + xa, xb, xc = brack + if (xa > xc): # swap so xa < xc can be assumed + xc, xa = xa, xc + if not ((xa < xb) and (xb < xc)): + raise ValueError("Not a bracketing interval.") + fa = func(*((xa,) + args)) + fb = func(*((xb,) + args)) + fc = func(*((xc,) + args)) + if not ((fb < fa) and (fb < fc)): + raise ValueError("Not a bracketing interval.") + funcalls = 3 + else: + raise ValueError("Bracketing interval must be length 2 or 3 sequence.") + + _gR = 0.61803399 # golden ratio conjugate: 2.0/(1.0+sqrt(5.0)) + _gC = 1.0 - _gR + x3 = xc + x0 = xa + if (numpy.abs(xc - xb) > numpy.abs(xb - xa)): + x1 = xb + x2 = xb + _gC * (xc - xb) + else: + x2 = xb + x1 = xb - _gC * (xb - xa) + f1 = func(*((x1,) + args)) + f2 = func(*((x2,) + args)) + funcalls += 2 + nit = 0 + for i in xrange(maxiter): + if numpy.abs(x3 - x0) <= tol * (numpy.abs(x1) + numpy.abs(x2)): + break + if (f2 < f1): + x0 = x1 + x1 = x2 + x2 = _gR * x1 + _gC * x3 + f1 = f2 + f2 = func(*((x2,) + args)) + else: + x3 = x2 + x2 = x1 + x1 = _gR * x2 + _gC * x0 + f2 = f1 + f1 = func(*((x1,) + args)) + funcalls += 1 + nit += 1 + if (f1 < f2): + xmin = x1 + fval = f1 + else: + xmin = x2 + fval = f2 + + return OptimizeResult(fun=fval, nfev=funcalls, x=xmin, nit=nit, + success=nit < maxiter) + + +def bracket(func, xa=0.0, xb=1.0, args=(), grow_limit=110.0, maxiter=1000): + """ + Bracket the minimum of the function. + + Given a function and distinct initial points, search in the + downhill direction (as defined by the initital points) and return + new points xa, xb, xc that bracket the minimum of the function + f(xa) > f(xb) < f(xc). It doesn't always mean that obtained + solution will satisfy xa<=x<=xb + + Parameters + ---------- + func : callable f(x,*args) + Objective function to minimize. + xa, xb : float, optional + Bracketing interval. Defaults `xa` to 0.0, and `xb` to 1.0. + args : tuple, optional + Additional arguments (if present), passed to `func`. + grow_limit : float, optional + Maximum grow limit. Defaults to 110.0 + maxiter : int, optional + Maximum number of iterations to perform. Defaults to 1000. + + Returns + ------- + xa, xb, xc : float + Bracket. + fa, fb, fc : float + Objective function values in bracket. + funcalls : int + Number of function evaluations made. + + """ + _gold = 1.618034 # golden ratio: (1.0+sqrt(5.0))/2.0 + _verysmall_num = 1e-21 + fa = func(*(xa,) + args) + fb = func(*(xb,) + args) + if (fa < fb): # Switch so fa > fb + xa, xb = xb, xa + fa, fb = fb, fa + xc = xb + _gold * (xb - xa) + fc = func(*((xc,) + args)) + funcalls = 3 + iter = 0 + while (fc < fb): + tmp1 = (xb - xa) * (fb - fc) + tmp2 = (xb - xc) * (fb - fa) + val = tmp2 - tmp1 + if numpy.abs(val) < _verysmall_num: + denom = 2.0 * _verysmall_num + else: + denom = 2.0 * val + w = xb - ((xb - xc) * tmp2 - (xb - xa) * tmp1) / denom + wlim = xb + grow_limit * (xc - xb) + if iter > maxiter: + raise RuntimeError("Too many iterations.") + iter += 1 + if (w - xc) * (xb - w) > 0.0: + fw = func(*((w,) + args)) + funcalls += 1 + if (fw < fc): + xa = xb + xb = w + fa = fb + fb = fw + return xa, xb, xc, fa, fb, fc, funcalls + elif (fw > fb): + xc = w + fc = fw + return xa, xb, xc, fa, fb, fc, funcalls + w = xc + _gold * (xc - xb) + fw = func(*((w,) + args)) + funcalls += 1 + elif (w - wlim)*(wlim - xc) >= 0.0: + w = wlim + fw = func(*((w,) + args)) + funcalls += 1 + elif (w - wlim)*(xc - w) > 0.0: + fw = func(*((w,) + args)) + funcalls += 1 + if (fw < fc): + xb = xc + xc = w + w = xc + _gold * (xc - xb) + fb = fc + fc = fw + fw = func(*((w,) + args)) + funcalls += 1 + else: + w = xc + _gold * (xc - xb) + fw = func(*((w,) + args)) + funcalls += 1 + xa = xb + xb = xc + xc = w + fa = fb + fb = fc + fc = fw + return xa, xb, xc, fa, fb, fc, funcalls + + +def _linesearch_powell(func, p, xi, tol=1e-3): + """Line-search algorithm using fminbound. + + Find the minimium of the function ``func(x0+ alpha*direc)``. + + """ + def myfunc(alpha): + return func(p + alpha*xi) + alpha_min, fret, iter, num = brent(myfunc, full_output=1, tol=tol) + xi = alpha_min*xi + return squeeze(fret), p + xi, xi + + +def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, + maxfun=None, full_output=0, disp=1, retall=0, callback=None, + direc=None): + """ + Minimize a function using modified Powell's method. This method + only uses function values, not derivatives. + + Parameters + ---------- + func : callable f(x,*args) + Objective function to be minimized. + x0 : ndarray + Initial guess. + args : tuple, optional + Extra arguments passed to func. + callback : callable, optional + An optional user-supplied function, called after each + iteration. Called as ``callback(xk)``, where ``xk`` is the + current parameter vector. + direc : ndarray, optional + Initial direction set. + xtol : float, optional + Line-search error tolerance. + ftol : float, optional + Relative error in ``func(xopt)`` acceptable for convergence. + maxiter : int, optional + Maximum number of iterations to perform. + maxfun : int, optional + Maximum number of function evaluations to make. + full_output : bool, optional + If True, fopt, xi, direc, iter, funcalls, and + warnflag are returned. + disp : bool, optional + If True, print convergence messages. + retall : bool, optional + If True, return a list of the solution at each iteration. + + Returns + ------- + xopt : ndarray + Parameter which minimizes `func`. + fopt : number + Value of function at minimum: ``fopt = func(xopt)``. + direc : ndarray + Current direction set. + iter : int + Number of iterations. + funcalls : int + Number of function calls made. + warnflag : int + Integer warning flag: + 1 : Maximum number of function evaluations. + 2 : Maximum number of iterations. + allvecs : list + List of solutions at each iteration. + + See also + -------- + minimize: Interface to unconstrained minimization algorithms for + multivariate functions. See the 'Powell' `method` in particular. + + Notes + ----- + Uses a modification of Powell's method to find the minimum of + a function of N variables. Powell's method is a conjugate + direction method. + + The algorithm has two loops. The outer loop + merely iterates over the inner loop. The inner loop minimizes + over each current direction in the direction set. At the end + of the inner loop, if certain conditions are met, the direction + that gave the largest decrease is dropped and replaced with + the difference between the current estimated x and the estimated + x from the beginning of the inner-loop. + + The technical conditions for replacing the direction of greatest + increase amount to checking that + + 1. No further gain can be made along the direction of greatest increase + from that iteration. + 2. The direction of greatest increase accounted for a large sufficient + fraction of the decrease in the function value from that iteration of + the inner loop. + + Examples + -------- + >>> def f(x): + ... return x**2 + + >>> from scipy import optimize + + >>> minimum = optimize.fmin_powell(f, -1) + Optimization terminated successfully. + Current function value: 0.000000 + Iterations: 2 + Function evaluations: 18 + >>> minimum + array(0.0) + + References + ---------- + Powell M.J.D. (1964) An efficient method for finding the minimum of a + function of several variables without calculating derivatives, + Computer Journal, 7 (2):155-162. + + Press W., Teukolsky S.A., Vetterling W.T., and Flannery B.P.: + Numerical Recipes (any edition), Cambridge University Press + + """ + opts = {'xtol': xtol, + 'ftol': ftol, + 'maxiter': maxiter, + 'maxfev': maxfun, + 'disp': disp, + 'direc': direc, + 'return_all': retall} + + res = _minimize_powell(func, x0, args, callback=callback, **opts) + + if full_output: + retlist = (res['x'], res['fun'], res['direc'], res['nit'], + res['nfev'], res['status']) + if retall: + retlist += (res['allvecs'], ) + return retlist + else: + if retall: + return res['x'], res['allvecs'] + else: + return res['x'] + + +def _minimize_powell(func, x0, args=(), callback=None, + xtol=1e-4, ftol=1e-4, maxiter=None, maxfev=None, + disp=False, direc=None, return_all=False, + **unknown_options): + """ + Minimization of scalar function of one or more variables using the + modified Powell algorithm. + + Options + ------- + disp : bool + Set to True to print convergence messages. + xtol : float + Relative error in solution `xopt` acceptable for convergence. + ftol : float + Relative error in ``fun(xopt)`` acceptable for convergence. + maxiter, maxfev : int + Maximum allowed number of iterations and function evaluations. + Will default to ``N*1000``, where ``N`` is the number of + variables, if neither `maxiter` or `maxfev` is set. If both + `maxiter` and `maxfev` are set, minimization will stop at the + first reached. + direc : ndarray + Initial set of direction vectors for the Powell method. + + """ + _check_unknown_options(unknown_options) + maxfun = maxfev + retall = return_all + # we need to use a mutable object here that we can update in the + # wrapper function + fcalls, func = wrap_function(func, args) + x = asarray(x0).flatten() + if retall: + allvecs = [x] + N = len(x) + # If neither are set, then set both to default + if maxiter is None and maxfun is None: + maxiter = N * 1000 + maxfun = N * 1000 + elif maxiter is None: + # Convert remaining Nones, to np.inf, unless the other is np.inf, in + # which case use the default to avoid unbounded iteration + if maxfun == np.inf: + maxiter = N * 1000 + else: + maxiter = np.inf + elif maxfun is None: + if maxiter == np.inf: + maxfun = N * 1000 + else: + maxfun = np.inf + + if direc is None: + direc = eye(N, dtype=float) + else: + direc = asarray(direc, dtype=float) + + fval = squeeze(func(x)) + x1 = x.copy() + iter = 0 + ilist = list(range(N)) + while True: + fx = fval + bigind = 0 + delta = 0.0 + for i in ilist: + direc1 = direc[i] + fx2 = fval + fval, x, direc1 = _linesearch_powell(func, x, direc1, + tol=xtol * 100) + if (fx2 - fval) > delta: + delta = fx2 - fval + bigind = i + iter += 1 + if callback is not None: + callback(x) + if retall: + allvecs.append(x) + bnd = ftol * (numpy.abs(fx) + numpy.abs(fval)) + 1e-20 + if 2.0 * (fx - fval) <= bnd: + break + if fcalls[0] >= maxfun: + break + if iter >= maxiter: + break + + # Construct the extrapolated point + direc1 = x - x1 + x2 = 2*x - x1 + x1 = x.copy() + fx2 = squeeze(func(x2)) + + if (fx > fx2): + t = 2.0*(fx + fx2 - 2.0*fval) + temp = (fx - fval - delta) + t *= temp*temp + temp = fx - fx2 + t -= delta*temp*temp + if t < 0.0: + fval, x, direc1 = _linesearch_powell(func, x, direc1, + tol=xtol*100) + direc[bigind] = direc[-1] + direc[-1] = direc1 + + warnflag = 0 + if fcalls[0] >= maxfun: + warnflag = 1 + msg = _status_message['maxfev'] + if disp: + print("Warning: " + msg) + elif iter >= maxiter: + warnflag = 2 + msg = _status_message['maxiter'] + if disp: + print("Warning: " + msg) + else: + msg = _status_message['success'] + if disp: + print(msg) + print(" Current function value: %f" % fval) + print(" Iterations: %d" % iter) + print(" Function evaluations: %d" % fcalls[0]) + + x = squeeze(x) + + result = OptimizeResult(fun=fval, direc=direc, nit=iter, nfev=fcalls[0], + status=warnflag, success=(warnflag == 0), + message=msg, x=x) + if retall: + result['allvecs'] = allvecs + return result + + +def _endprint(x, flag, fval, maxfun, xtol, disp): + if flag == 0: + if disp > 1: + print("\nOptimization terminated successfully;\n" + "The returned value satisfies the termination criteria\n" + "(using xtol = ", xtol, ")") + if flag == 1: + if disp: + print("\nMaximum number of function evaluations exceeded --- " + "increase maxfun argument.\n") + return + + +def brute(func, ranges, args=(), Ns=20, full_output=0, finish=fmin, + disp=False): + """Minimize a function over a given range by brute force. + + Uses the "brute force" method, i.e. computes the function's value + at each point of a multidimensional grid of points, to find the global + minimum of the function. + + The function is evaluated everywhere in the range with the datatype of the + first call to the function, as enforced by the ``vectorize`` NumPy + function. The value and type of the function evaluation returned when + ``full_output=True`` are affected in addition by the ``finish`` argument + (see Notes). + + The brute force approach is inefficient because the number of grid points + increases exponentially - the number of grid points to evaluate is + ``Ns ** len(x)``. Consequently, even with coarse grid spacing, even + moderately sized problems can take a long time to run, and/or run into + memory limitations. + + Parameters + ---------- + func : callable + The objective function to be minimized. Must be in the + form ``f(x, *args)``, where ``x`` is the argument in + the form of a 1-D array and ``args`` is a tuple of any + additional fixed parameters needed to completely specify + the function. + ranges : tuple + Each component of the `ranges` tuple must be either a + "slice object" or a range tuple of the form ``(low, high)``. + The program uses these to create the grid of points on which + the objective function will be computed. See `Note 2` for + more detail. + args : tuple, optional + Any additional fixed parameters needed to completely specify + the function. + Ns : int, optional + Number of grid points along the axes, if not otherwise + specified. See `Note2`. + full_output : bool, optional + If True, return the evaluation grid and the objective function's + values on it. + finish : callable, optional + An optimization function that is called with the result of brute force + minimization as initial guess. `finish` should take `func` and + the initial guess as positional arguments, and take `args` as + keyword arguments. It may additionally take `full_output` + and/or `disp` as keyword arguments. Use None if no "polishing" + function is to be used. See Notes for more details. + disp : bool, optional + Set to True to print convergence messages. + + Returns + ------- + x0 : ndarray + A 1-D array containing the coordinates of a point at which the + objective function had its minimum value. (See `Note 1` for + which point is returned.) + fval : float + Function value at the point `x0`. (Returned when `full_output` is + True.) + grid : tuple + Representation of the evaluation grid. It has the same + length as `x0`. (Returned when `full_output` is True.) + Jout : ndarray + Function values at each point of the evaluation + grid, `i.e.`, ``Jout = func(*grid)``. (Returned + when `full_output` is True.) + + See Also + -------- + basinhopping, differential_evolution + + Notes + ----- + *Note 1*: The program finds the gridpoint at which the lowest value + of the objective function occurs. If `finish` is None, that is the + point returned. When the global minimum occurs within (or not very far + outside) the grid's boundaries, and the grid is fine enough, that + point will be in the neighborhood of the global minimum. + + However, users often employ some other optimization program to + "polish" the gridpoint values, `i.e.`, to seek a more precise + (local) minimum near `brute's` best gridpoint. + The `brute` function's `finish` option provides a convenient way to do + that. Any polishing program used must take `brute's` output as its + initial guess as a positional argument, and take `brute's` input values + for `args` as keyword arguments, otherwise an error will be raised. + It may additionally take `full_output` and/or `disp` as keyword arguments. + + `brute` assumes that the `finish` function returns either an + `OptimizeResult` object or a tuple in the form: + ``(xmin, Jmin, ... , statuscode)``, where ``xmin`` is the minimizing + value of the argument, ``Jmin`` is the minimum value of the objective + function, "..." may be some other returned values (which are not used + by `brute`), and ``statuscode`` is the status code of the `finish` program. + + Note that when `finish` is not None, the values returned are those + of the `finish` program, *not* the gridpoint ones. Consequently, + while `brute` confines its search to the input grid points, + the `finish` program's results usually will not coincide with any + gridpoint, and may fall outside the grid's boundary. Thus, if a + minimum only needs to be found over the provided grid points, make + sure to pass in `finish=None`. + + *Note 2*: The grid of points is a `numpy.mgrid` object. + For `brute` the `ranges` and `Ns` inputs have the following effect. + Each component of the `ranges` tuple can be either a slice object or a + two-tuple giving a range of values, such as (0, 5). If the component is a + slice object, `brute` uses it directly. If the component is a two-tuple + range, `brute` internally converts it to a slice object that interpolates + `Ns` points from its low-value to its high-value, inclusive. + + Examples + -------- + We illustrate the use of `brute` to seek the global minimum of a function + of two variables that is given as the sum of a positive-definite + quadratic and two deep "Gaussian-shaped" craters. Specifically, define + the objective function `f` as the sum of three other functions, + ``f = f1 + f2 + f3``. We suppose each of these has a signature + ``(z, *params)``, where ``z = (x, y)``, and ``params`` and the functions + are as defined below. + + >>> params = (2, 3, 7, 8, 9, 10, 44, -1, 2, 26, 1, -2, 0.5) + >>> def f1(z, *params): + ... x, y = z + ... a, b, c, d, e, f, g, h, i, j, k, l, scale = params + ... return (a * x**2 + b * x * y + c * y**2 + d*x + e*y + f) + + >>> def f2(z, *params): + ... x, y = z + ... a, b, c, d, e, f, g, h, i, j, k, l, scale = params + ... return (-g*np.exp(-((x-h)**2 + (y-i)**2) / scale)) + + >>> def f3(z, *params): + ... x, y = z + ... a, b, c, d, e, f, g, h, i, j, k, l, scale = params + ... return (-j*np.exp(-((x-k)**2 + (y-l)**2) / scale)) + + >>> def f(z, *params): + ... return f1(z, *params) + f2(z, *params) + f3(z, *params) + + Thus, the objective function may have local minima near the minimum + of each of the three functions of which it is composed. To + use `fmin` to polish its gridpoint result, we may then continue as + follows: + + >>> rranges = (slice(-4, 4, 0.25), slice(-4, 4, 0.25)) + >>> from scipy import optimize + >>> resbrute = optimize.brute(f, rranges, args=params, full_output=True, + ... finish=optimize.fmin) + >>> resbrute[0] # global minimum + array([-1.05665192, 1.80834843]) + >>> resbrute[1] # function value at global minimum + -3.4085818767 + + Note that if `finish` had been set to None, we would have gotten the + gridpoint [-1.0 1.75] where the rounded function value is -2.892. + + """ + N = len(ranges) + if N > 40: + raise ValueError("Brute Force not possible with more " + "than 40 variables.") + lrange = list(ranges) + for k in range(N): + if type(lrange[k]) is not type(slice(None)): + if len(lrange[k]) < 3: + lrange[k] = tuple(lrange[k]) + (complex(Ns),) + lrange[k] = slice(*lrange[k]) + if (N == 1): + lrange = lrange[0] + + def _scalarfunc(*params): + params = asarray(params).flatten() + return func(params, *args) + + vecfunc = vectorize(_scalarfunc) + grid = mgrid[lrange] + if (N == 1): + grid = (grid,) + Jout = vecfunc(*grid) + Nshape = shape(Jout) + indx = argmin(Jout.ravel(), axis=-1) + Nindx = zeros(N, int) + xmin = zeros(N, float) + for k in range(N - 1, -1, -1): + thisN = Nshape[k] + Nindx[k] = indx % Nshape[k] + indx = indx // thisN + for k in range(N): + xmin[k] = grid[k][tuple(Nindx)] + + Jmin = Jout[tuple(Nindx)] + if (N == 1): + grid = grid[0] + xmin = xmin[0] + if callable(finish): + # set up kwargs for `finish` function + finish_args = _getargspec(finish).args + finish_kwargs = dict() + if 'full_output' in finish_args: + finish_kwargs['full_output'] = 1 + if 'disp' in finish_args: + finish_kwargs['disp'] = disp + elif 'options' in finish_args: + # pass 'disp' as `options` + # (e.g. if `finish` is `minimize`) + finish_kwargs['options'] = {'disp': disp} + + # run minimizer + res = finish(func, xmin, args=args, **finish_kwargs) + + if isinstance(res, OptimizeResult): + xmin = res.x + Jmin = res.fun + success = res.success + else: + xmin = res[0] + Jmin = res[1] + success = res[-1] == 0 + if not success: + if disp: + print("Warning: Either final optimization did not succeed " + "or `finish` does not return `statuscode` as its last " + "argument.") + + if full_output: + return xmin, Jmin, grid, Jout + else: + return xmin + + +def show_options(solver=None, method=None, disp=True): + """ + Show documentation for additional options of optimization solvers. + + These are method-specific options that can be supplied through the + ``options`` dict. + + Parameters + ---------- + solver : str + Type of optimization solver. One of 'minimize', 'minimize_scalar', + 'root', or 'linprog'. + method : str, optional + If not given, shows all methods of the specified solver. Otherwise, + show only the options for the specified method. Valid values + corresponds to methods' names of respective solver (e.g. 'BFGS' for + 'minimize'). + disp : bool, optional + Whether to print the result rather than returning it. + + Returns + ------- + text + Either None (for disp=False) or the text string (disp=True) + + Notes + ----- + The solver-specific methods are: + + `scipy.optimize.minimize` + + - :ref:`Nelder-Mead <optimize.minimize-neldermead>` + - :ref:`Powell <optimize.minimize-powell>` + - :ref:`CG <optimize.minimize-cg>` + - :ref:`BFGS <optimize.minimize-bfgs>` + - :ref:`Newton-CG <optimize.minimize-newtoncg>` + - :ref:`L-BFGS-B <optimize.minimize-lbfgsb>` + - :ref:`TNC <optimize.minimize-tnc>` + - :ref:`COBYLA <optimize.minimize-cobyla>` + - :ref:`SLSQP <optimize.minimize-slsqp>` + - :ref:`dogleg <optimize.minimize-dogleg>` + - :ref:`trust-ncg <optimize.minimize-trustncg>` + + `scipy.optimize.root` + + - :ref:`hybr <optimize.root-hybr>` + - :ref:`lm <optimize.root-lm>` + - :ref:`broyden1 <optimize.root-broyden1>` + - :ref:`broyden2 <optimize.root-broyden2>` + - :ref:`anderson <optimize.root-anderson>` + - :ref:`linearmixing <optimize.root-linearmixing>` + - :ref:`diagbroyden <optimize.root-diagbroyden>` + - :ref:`excitingmixing <optimize.root-excitingmixing>` + - :ref:`krylov <optimize.root-krylov>` + - :ref:`df-sane <optimize.root-dfsane>` + + `scipy.optimize.minimize_scalar` + + - :ref:`brent <optimize.minimize_scalar-brent>` + - :ref:`golden <optimize.minimize_scalar-golden>` + - :ref:`bounded <optimize.minimize_scalar-bounded>` + + `scipy.optimize.linprog` + + - :ref:`simplex <optimize.linprog-simplex>` + - :ref:`interior-point <optimize.linprog-interior-point>` + + """ + import textwrap + + doc_routines = { + 'minimize': ( + ('bfgs', 'scipy.optimize.optimize._minimize_bfgs'), + ('cg', 'scipy.optimize.optimize._minimize_cg'), + ('cobyla', 'scipy.optimize.cobyla._minimize_cobyla'), + ('dogleg', 'scipy.optimize._trustregion_dogleg._minimize_dogleg'), + ('l-bfgs-b', 'scipy.optimize.lbfgsb._minimize_lbfgsb'), + ('nelder-mead', 'scipy.optimize.optimize._minimize_neldermead'), + ('newton-cg', 'scipy.optimize.optimize._minimize_newtoncg'), + ('powell', 'scipy.optimize.optimize._minimize_powell'), + ('slsqp', 'scipy.optimize.slsqp._minimize_slsqp'), + ('tnc', 'scipy.optimize.tnc._minimize_tnc'), + ('trust-ncg', 'scipy.optimize._trustregion_ncg._minimize_trust_ncg'), + ), + 'root': ( + ('hybr', 'scipy.optimize.minpack._root_hybr'), + ('lm', 'scipy.optimize._root._root_leastsq'), + ('broyden1', 'scipy.optimize._root._root_broyden1_doc'), + ('broyden2', 'scipy.optimize._root._root_broyden2_doc'), + ('anderson', 'scipy.optimize._root._root_anderson_doc'), + ('diagbroyden', 'scipy.optimize._root._root_diagbroyden_doc'), + ('excitingmixing', 'scipy.optimize._root._root_excitingmixing_doc'), + ('linearmixing', 'scipy.optimize._root._root_linearmixing_doc'), + ('krylov', 'scipy.optimize._root._root_krylov_doc'), + ('df-sane', 'scipy.optimize._spectral._root_df_sane'), + ), + 'root_scalar': ( + ('bisect', 'scipy.optimize._root_scalar._root_scalar_bisect_doc'), + ('brentq', 'scipy.optimize._root_scalar._root_scalar_brentq_doc'), + ('brenth', 'scipy.optimize._root_scalar._root_scalar_brenth_doc'), + ('ridder', 'scipy.optimize._root_scalar._root_scalar_ridder_doc'), + ('toms748', 'scipy.optimize._root_scalar._root_scalar_toms748_doc'), + ('secant', 'scipy.optimize._root_scalar._root_scalar_secant_doc'), + ('newton', 'scipy.optimize._root_scalar._root_scalar_newton_doc'), + ('halley', 'scipy.optimize._root_scalar._root_scalar_halley_doc'), + ), + 'linprog': ( + ('simplex', 'scipy.optimize._linprog._linprog_simplex'), + ('interior-point', 'scipy.optimize._linprog._linprog_ip'), + ), + 'minimize_scalar': ( + ('brent', 'scipy.optimize.optimize._minimize_scalar_brent'), + ('bounded', 'scipy.optimize.optimize._minimize_scalar_bounded'), + ('golden', 'scipy.optimize.optimize._minimize_scalar_golden'), + ), + } + + if solver is None: + text = ["\n\n\n========\n", "minimize\n", "========\n"] + text.append(show_options('minimize', disp=False)) + text.extend(["\n\n===============\n", "minimize_scalar\n", + "===============\n"]) + text.append(show_options('minimize_scalar', disp=False)) + text.extend(["\n\n\n====\n", "root\n", + "====\n"]) + text.append(show_options('root', disp=False)) + text.extend(['\n\n\n=======\n', 'linprog\n', + '=======\n']) + text.append(show_options('linprog', disp=False)) + text = "".join(text) + else: + solver = solver.lower() + if solver not in doc_routines: + raise ValueError('Unknown solver %r' % (solver,)) + + if method is None: + text = [] + for name, _ in doc_routines[solver]: + text.extend(["\n\n" + name, "\n" + "="*len(name) + "\n\n"]) + text.append(show_options(solver, name, disp=False)) + text = "".join(text) + else: + method = method.lower() + methods = dict(doc_routines[solver]) + if method not in methods: + raise ValueError("Unknown method %r" % (method,)) + name = methods[method] + + # Import function object + parts = name.split('.') + mod_name = ".".join(parts[:-1]) + __import__(mod_name) + obj = getattr(sys.modules[mod_name], parts[-1]) + + # Get doc + doc = obj.__doc__ + if doc is not None: + text = textwrap.dedent(doc).strip() + else: + text = "" + + if disp: + print(text) + return + else: + return text + + +def main(): + import time + + times = [] + algor = [] + x0 = [0.8, 1.2, 0.7] + print("Nelder-Mead Simplex") + print("===================") + start = time.time() + x = fmin(rosen, x0) + print(x) + times.append(time.time() - start) + algor.append('Nelder-Mead Simplex\t') + + print() + print("Powell Direction Set Method") + print("===========================") + start = time.time() + x = fmin_powell(rosen, x0) + print(x) + times.append(time.time() - start) + algor.append('Powell Direction Set Method.') + + print() + print("Nonlinear CG") + print("============") + start = time.time() + x = fmin_cg(rosen, x0, fprime=rosen_der, maxiter=200) + print(x) + times.append(time.time() - start) + algor.append('Nonlinear CG \t') + + print() + print("BFGS Quasi-Newton") + print("=================") + start = time.time() + x = fmin_bfgs(rosen, x0, fprime=rosen_der, maxiter=80) + print(x) + times.append(time.time() - start) + algor.append('BFGS Quasi-Newton\t') + + print() + print("BFGS approximate gradient") + print("=========================") + start = time.time() + x = fmin_bfgs(rosen, x0, gtol=1e-4, maxiter=100) + print(x) + times.append(time.time() - start) + algor.append('BFGS without gradient\t') + + print() + print("Newton-CG with Hessian product") + print("==============================") + start = time.time() + x = fmin_ncg(rosen, x0, rosen_der, fhess_p=rosen_hess_prod, maxiter=80) + print(x) + times.append(time.time() - start) + algor.append('Newton-CG with hessian product') + + print() + print("Newton-CG with full Hessian") + print("===========================") + start = time.time() + x = fmin_ncg(rosen, x0, rosen_der, fhess=rosen_hess, maxiter=80) + print(x) + times.append(time.time() - start) + algor.append('Newton-CG with full hessian') + + print() + print("\nMinimizing the Rosenbrock function of order 3\n") + print(" Algorithm \t\t\t Seconds") + print("===========\t\t\t =========") + for k in range(len(algor)): + print(algor[k], "\t -- ", times[k]) + + +if __name__ == "__main__": + main() diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/optimize.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/optimize.pyc new file mode 100644 index 0000000..49b81a9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/optimize.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/setup.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/setup.py new file mode 100644 index 0000000..0ecd7e0 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/setup.py @@ -0,0 +1,96 @@ +from __future__ import division, print_function, absolute_import + +import os.path +from os.path import join + +from scipy._build_utils import numpy_nodepr_api + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + from scipy._build_utils.system_info import get_info + config = Configuration('optimize',parent_package, top_path) + + include_dirs = [join(os.path.dirname(__file__), '..', '_lib', 'src')] + + minpack_src = [join('minpack','*f')] + config.add_library('minpack',sources=minpack_src) + config.add_extension('_minpack', + sources=['_minpackmodule.c'], + libraries=['minpack'], + depends=(["minpack.h","__minpack.h"] + + minpack_src), + include_dirs=include_dirs, + **numpy_nodepr_api) + + rootfind_src = [join('Zeros','*.c')] + rootfind_hdr = [join('Zeros','zeros.h')] + config.add_library('rootfind', + sources=rootfind_src, + headers=rootfind_hdr, + **numpy_nodepr_api) + + config.add_extension('_zeros', + sources=['zeros.c'], + libraries=['rootfind'], + depends=(rootfind_src + rootfind_hdr), + **numpy_nodepr_api) + + lapack = get_info('lapack_opt') + if 'define_macros' in numpy_nodepr_api: + if ('define_macros' in lapack) and (lapack['define_macros'] is not None): + lapack['define_macros'] = (lapack['define_macros'] + + numpy_nodepr_api['define_macros']) + else: + lapack['define_macros'] = numpy_nodepr_api['define_macros'] + sources = ['lbfgsb.pyf', 'lbfgsb.f', 'linpack.f', 'timer.f'] + config.add_extension('_lbfgsb', + sources=[join('lbfgsb_src',x) for x in sources], + **lapack) + + sources = ['moduleTNC.c','tnc.c'] + config.add_extension('moduleTNC', + sources=[join('tnc',x) for x in sources], + depends=[join('tnc','tnc.h')], + **numpy_nodepr_api) + + config.add_extension('_cobyla', + sources=[join('cobyla',x) for x in ['cobyla.pyf', + 'cobyla2.f', + 'trstlp.f']], + **numpy_nodepr_api) + + sources = ['minpack2.pyf', 'dcsrch.f', 'dcstep.f'] + config.add_extension('minpack2', + sources=[join('minpack2',x) for x in sources], + **numpy_nodepr_api) + + sources = ['slsqp.pyf', 'slsqp_optmz.f'] + config.add_extension('_slsqp', sources=[join('slsqp', x) for x in sources], + **numpy_nodepr_api) + + config.add_extension('_nnls', sources=[join('nnls', x) + for x in ["nnls.f","nnls.pyf"]], + **numpy_nodepr_api) + + config.add_extension('_group_columns', sources=['_group_columns.c'],) + + config.add_subpackage('_lsq') + + config.add_subpackage('_trlib') + + config.add_subpackage('_trustregion_constr') + + config.add_subpackage('_shgo_lib') + config.add_data_dir('_shgo_lib') + + config.add_data_dir('tests') + + # Add license files + config.add_data_files('lbfgsb_src/README') + + return config + + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(**configuration(top_path='').todict()) diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/setup.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/setup.pyc new file mode 100644 index 0000000..652df25 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/slsqp.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/slsqp.py new file mode 100644 index 0000000..18828f3 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/slsqp.py @@ -0,0 +1,532 @@ +""" +This module implements the Sequential Least SQuares Programming optimization +algorithm (SLSQP), originally developed by Dieter Kraft. +See http://www.netlib.org/toms/733 + +Functions +--------- +.. autosummary:: + :toctree: generated/ + + approx_jacobian + fmin_slsqp + +""" + +from __future__ import division, print_function, absolute_import + +__all__ = ['approx_jacobian', 'fmin_slsqp'] + +import numpy as np +from scipy.optimize._slsqp import slsqp +from numpy import (zeros, array, linalg, append, asfarray, concatenate, finfo, + sqrt, vstack, exp, inf, isfinite, atleast_1d) +from .optimize import wrap_function, OptimizeResult, _check_unknown_options + +__docformat__ = "restructuredtext en" + +_epsilon = sqrt(finfo(float).eps) + + +def approx_jacobian(x, func, epsilon, *args): + """ + Approximate the Jacobian matrix of a callable function. + + Parameters + ---------- + x : array_like + The state vector at which to compute the Jacobian matrix. + func : callable f(x,*args) + The vector-valued function. + epsilon : float + The perturbation used to determine the partial derivatives. + args : sequence + Additional arguments passed to func. + + Returns + ------- + An array of dimensions ``(lenf, lenx)`` where ``lenf`` is the length + of the outputs of `func`, and ``lenx`` is the number of elements in + `x`. + + Notes + ----- + The approximation is done using forward differences. + + """ + x0 = asfarray(x) + f0 = atleast_1d(func(*((x0,)+args))) + jac = zeros([len(x0), len(f0)]) + dx = zeros(len(x0)) + for i in range(len(x0)): + dx[i] = epsilon + jac[i] = (func(*((x0+dx,)+args)) - f0)/epsilon + dx[i] = 0.0 + + return jac.transpose() + + +def fmin_slsqp(func, x0, eqcons=(), f_eqcons=None, ieqcons=(), f_ieqcons=None, + bounds=(), fprime=None, fprime_eqcons=None, + fprime_ieqcons=None, args=(), iter=100, acc=1.0E-6, + iprint=1, disp=None, full_output=0, epsilon=_epsilon, + callback=None): + """ + Minimize a function using Sequential Least SQuares Programming + + Python interface function for the SLSQP Optimization subroutine + originally implemented by Dieter Kraft. + + Parameters + ---------- + func : callable f(x,*args) + Objective function. Must return a scalar. + x0 : 1-D ndarray of float + Initial guess for the independent variable(s). + eqcons : list, optional + A list of functions of length n such that + eqcons[j](x,*args) == 0.0 in a successfully optimized + problem. + f_eqcons : callable f(x,*args), optional + Returns a 1-D array in which each element must equal 0.0 in a + successfully optimized problem. If f_eqcons is specified, + eqcons is ignored. + ieqcons : list, optional + A list of functions of length n such that + ieqcons[j](x,*args) >= 0.0 in a successfully optimized + problem. + f_ieqcons : callable f(x,*args), optional + Returns a 1-D ndarray in which each element must be greater or + equal to 0.0 in a successfully optimized problem. If + f_ieqcons is specified, ieqcons is ignored. + bounds : list, optional + A list of tuples specifying the lower and upper bound + for each independent variable [(xl0, xu0),(xl1, xu1),...] + Infinite values will be interpreted as large floating values. + fprime : callable `f(x,*args)`, optional + A function that evaluates the partial derivatives of func. + fprime_eqcons : callable `f(x,*args)`, optional + A function of the form `f(x, *args)` that returns the m by n + array of equality constraint normals. If not provided, + the normals will be approximated. The array returned by + fprime_eqcons should be sized as ( len(eqcons), len(x0) ). + fprime_ieqcons : callable `f(x,*args)`, optional + A function of the form `f(x, *args)` that returns the m by n + array of inequality constraint normals. If not provided, + the normals will be approximated. The array returned by + fprime_ieqcons should be sized as ( len(ieqcons), len(x0) ). + args : sequence, optional + Additional arguments passed to func and fprime. + iter : int, optional + The maximum number of iterations. + acc : float, optional + Requested accuracy. + iprint : int, optional + The verbosity of fmin_slsqp : + + * iprint <= 0 : Silent operation + * iprint == 1 : Print summary upon completion (default) + * iprint >= 2 : Print status of each iterate and summary + disp : int, optional + Over-rides the iprint interface (preferred). + full_output : bool, optional + If False, return only the minimizer of func (default). + Otherwise, output final objective function and summary + information. + epsilon : float, optional + The step size for finite-difference derivative estimates. + callback : callable, optional + Called after each iteration, as ``callback(x)``, where ``x`` is the + current parameter vector. + + Returns + ------- + out : ndarray of float + The final minimizer of func. + fx : ndarray of float, if full_output is true + The final value of the objective function. + its : int, if full_output is true + The number of iterations. + imode : int, if full_output is true + The exit mode from the optimizer (see below). + smode : string, if full_output is true + Message describing the exit mode from the optimizer. + + See also + -------- + minimize: Interface to minimization algorithms for multivariate + functions. See the 'SLSQP' `method` in particular. + + Notes + ----- + Exit modes are defined as follows :: + + -1 : Gradient evaluation required (g & a) + 0 : Optimization terminated successfully. + 1 : Function evaluation required (f & c) + 2 : More equality constraints than independent variables + 3 : More than 3*n iterations in LSQ subproblem + 4 : Inequality constraints incompatible + 5 : Singular matrix E in LSQ subproblem + 6 : Singular matrix C in LSQ subproblem + 7 : Rank-deficient equality constraint subproblem HFTI + 8 : Positive directional derivative for linesearch + 9 : Iteration limit exceeded + + Examples + -------- + Examples are given :ref:`in the tutorial <tutorial-sqlsp>`. + + """ + if disp is not None: + iprint = disp + + opts = {'maxiter': iter, + 'ftol': acc, + 'iprint': iprint, + 'disp': iprint != 0, + 'eps': epsilon, + 'callback': callback} + + # Build the constraints as a tuple of dictionaries + cons = () + # 1. constraints of the 1st kind (eqcons, ieqcons); no Jacobian; take + # the same extra arguments as the objective function. + cons += tuple({'type': 'eq', 'fun': c, 'args': args} for c in eqcons) + cons += tuple({'type': 'ineq', 'fun': c, 'args': args} for c in ieqcons) + # 2. constraints of the 2nd kind (f_eqcons, f_ieqcons) and their Jacobian + # (fprime_eqcons, fprime_ieqcons); also take the same extra arguments + # as the objective function. + if f_eqcons: + cons += ({'type': 'eq', 'fun': f_eqcons, 'jac': fprime_eqcons, + 'args': args}, ) + if f_ieqcons: + cons += ({'type': 'ineq', 'fun': f_ieqcons, 'jac': fprime_ieqcons, + 'args': args}, ) + + res = _minimize_slsqp(func, x0, args, jac=fprime, bounds=bounds, + constraints=cons, **opts) + if full_output: + return res['x'], res['fun'], res['nit'], res['status'], res['message'] + else: + return res['x'] + + +def _minimize_slsqp(func, x0, args=(), jac=None, bounds=None, + constraints=(), + maxiter=100, ftol=1.0E-6, iprint=1, disp=False, + eps=_epsilon, callback=None, + **unknown_options): + """ + Minimize a scalar function of one or more variables using Sequential + Least SQuares Programming (SLSQP). + + Options + ------- + ftol : float + Precision goal for the value of f in the stopping criterion. + eps : float + Step size used for numerical approximation of the Jacobian. + disp : bool + Set to True to print convergence messages. If False, + `verbosity` is ignored and set to 0. + maxiter : int + Maximum number of iterations. + + """ + _check_unknown_options(unknown_options) + fprime = jac + iter = maxiter + acc = ftol + epsilon = eps + + if not disp: + iprint = 0 + + # Constraints are triaged per type into a dictionary of tuples + if isinstance(constraints, dict): + constraints = (constraints, ) + + cons = {'eq': (), 'ineq': ()} + for ic, con in enumerate(constraints): + # check type + try: + ctype = con['type'].lower() + except KeyError: + raise KeyError('Constraint %d has no type defined.' % ic) + except TypeError: + raise TypeError('Constraints must be defined using a ' + 'dictionary.') + except AttributeError: + raise TypeError("Constraint's type must be a string.") + else: + if ctype not in ['eq', 'ineq']: + raise ValueError("Unknown constraint type '%s'." % con['type']) + + # check function + if 'fun' not in con: + raise ValueError('Constraint %d has no function defined.' % ic) + + # check Jacobian + cjac = con.get('jac') + if cjac is None: + # approximate Jacobian function. The factory function is needed + # to keep a reference to `fun`, see gh-4240. + def cjac_factory(fun): + def cjac(x, *args): + return approx_jacobian(x, fun, epsilon, *args) + return cjac + cjac = cjac_factory(con['fun']) + + # update constraints' dictionary + cons[ctype] += ({'fun': con['fun'], + 'jac': cjac, + 'args': con.get('args', ())}, ) + + exit_modes = {-1: "Gradient evaluation required (g & a)", + 0: "Optimization terminated successfully.", + 1: "Function evaluation required (f & c)", + 2: "More equality constraints than independent variables", + 3: "More than 3*n iterations in LSQ subproblem", + 4: "Inequality constraints incompatible", + 5: "Singular matrix E in LSQ subproblem", + 6: "Singular matrix C in LSQ subproblem", + 7: "Rank-deficient equality constraint subproblem HFTI", + 8: "Positive directional derivative for linesearch", + 9: "Iteration limit exceeded"} + + # Wrap func + feval, func = wrap_function(func, args) + + # Wrap fprime, if provided, or approx_jacobian if not + if fprime: + geval, fprime = wrap_function(fprime, args) + else: + geval, fprime = wrap_function(approx_jacobian, (func, epsilon)) + + # Transform x0 into an array. + x = asfarray(x0).flatten() + + # Set the parameters that SLSQP will need + # meq, mieq: number of equality and inequality constraints + meq = sum(map(len, [atleast_1d(c['fun'](x, *c['args'])) + for c in cons['eq']])) + mieq = sum(map(len, [atleast_1d(c['fun'](x, *c['args'])) + for c in cons['ineq']])) + # m = The total number of constraints + m = meq + mieq + # la = The number of constraints, or 1 if there are no constraints + la = array([1, m]).max() + # n = The number of independent variables + n = len(x) + + # Define the workspaces for SLSQP + n1 = n + 1 + mineq = m - meq + n1 + n1 + len_w = (3*n1+m)*(n1+1)+(n1-meq+1)*(mineq+2) + 2*mineq+(n1+mineq)*(n1-meq) \ + + 2*meq + n1 + ((n+1)*n)//2 + 2*m + 3*n + 3*n1 + 1 + len_jw = mineq + w = zeros(len_w) + jw = zeros(len_jw) + + # Decompose bounds into xl and xu + if bounds is None or len(bounds) == 0: + xl = np.empty(n, dtype=float) + xu = np.empty(n, dtype=float) + xl.fill(np.nan) + xu.fill(np.nan) + else: + bnds = array(bounds, float) + if bnds.shape[0] != n: + raise IndexError('SLSQP Error: the length of bounds is not ' + 'compatible with that of x0.') + + with np.errstate(invalid='ignore'): + bnderr = bnds[:, 0] > bnds[:, 1] + + if bnderr.any(): + raise ValueError('SLSQP Error: lb > ub in bounds %s.' % + ', '.join(str(b) for b in bnderr)) + xl, xu = bnds[:, 0], bnds[:, 1] + + # Mark infinite bounds with nans; the Fortran code understands this + infbnd = ~isfinite(bnds) + xl[infbnd[:, 0]] = np.nan + xu[infbnd[:, 1]] = np.nan + + # Clip initial guess to bounds (SLSQP may fail with bounds-infeasible + # initial point) + have_bound = np.isfinite(xl) + x[have_bound] = np.clip(x[have_bound], xl[have_bound], np.inf) + have_bound = np.isfinite(xu) + x[have_bound] = np.clip(x[have_bound], -np.inf, xu[have_bound]) + + # Initialize the iteration counter and the mode value + mode = array(0, int) + acc = array(acc, float) + majiter = array(iter, int) + majiter_prev = 0 + + # Initialize internal SLSQP state variables + alpha = array(0, float) + f0 = array(0, float) + gs = array(0, float) + h1 = array(0, float) + h2 = array(0, float) + h3 = array(0, float) + h4 = array(0, float) + t = array(0, float) + t0 = array(0, float) + tol = array(0, float) + iexact = array(0, int) + incons = array(0, int) + ireset = array(0, int) + itermx = array(0, int) + line = array(0, int) + n1 = array(0, int) + n2 = array(0, int) + n3 = array(0, int) + + # Print the header if iprint >= 2 + if iprint >= 2: + print("%5s %5s %16s %16s" % ("NIT", "FC", "OBJFUN", "GNORM")) + + while 1: + + if mode == 0 or mode == 1: # objective and constraint evaluation required + + # Compute objective function + fx = func(x) + try: + fx = float(np.asarray(fx)) + except (TypeError, ValueError): + raise ValueError("Objective function must return a scalar") + # Compute the constraints + if cons['eq']: + c_eq = concatenate([atleast_1d(con['fun'](x, *con['args'])) + for con in cons['eq']]) + else: + c_eq = zeros(0) + if cons['ineq']: + c_ieq = concatenate([atleast_1d(con['fun'](x, *con['args'])) + for con in cons['ineq']]) + else: + c_ieq = zeros(0) + + # Now combine c_eq and c_ieq into a single matrix + c = concatenate((c_eq, c_ieq)) + + if mode == 0 or mode == -1: # gradient evaluation required + + # Compute the derivatives of the objective function + # For some reason SLSQP wants g dimensioned to n+1 + g = append(fprime(x), 0.0) + + # Compute the normals of the constraints + if cons['eq']: + a_eq = vstack([con['jac'](x, *con['args']) + for con in cons['eq']]) + else: # no equality constraint + a_eq = zeros((meq, n)) + + if cons['ineq']: + a_ieq = vstack([con['jac'](x, *con['args']) + for con in cons['ineq']]) + else: # no inequality constraint + a_ieq = zeros((mieq, n)) + + # Now combine a_eq and a_ieq into a single a matrix + if m == 0: # no constraints + a = zeros((la, n)) + else: + a = vstack((a_eq, a_ieq)) + a = concatenate((a, zeros([la, 1])), 1) + + # Call SLSQP + slsqp(m, meq, x, xl, xu, fx, c, g, a, acc, majiter, mode, w, jw, + alpha, f0, gs, h1, h2, h3, h4, t, t0, tol, + iexact, incons, ireset, itermx, line, + n1, n2, n3) + + # call callback if major iteration has incremented + if callback is not None and majiter > majiter_prev: + callback(np.copy(x)) + + # Print the status of the current iterate if iprint > 2 and the + # major iteration has incremented + if iprint >= 2 and majiter > majiter_prev: + print("%5i %5i % 16.6E % 16.6E" % (majiter, feval[0], + fx, linalg.norm(g))) + + # If exit mode is not -1 or 1, slsqp has completed + if abs(mode) != 1: + break + + majiter_prev = int(majiter) + + # Optimization loop complete. Print status if requested + if iprint >= 1: + print(exit_modes[int(mode)] + " (Exit mode " + str(mode) + ')') + print(" Current function value:", fx) + print(" Iterations:", majiter) + print(" Function evaluations:", feval[0]) + print(" Gradient evaluations:", geval[0]) + + return OptimizeResult(x=x, fun=fx, jac=g[:-1], nit=int(majiter), + nfev=feval[0], njev=geval[0], status=int(mode), + message=exit_modes[int(mode)], success=(mode == 0)) + + +if __name__ == '__main__': + + # objective function + def fun(x, r=[4, 2, 4, 2, 1]): + """ Objective function """ + return exp(x[0]) * (r[0] * x[0]**2 + r[1] * x[1]**2 + + r[2] * x[0] * x[1] + r[3] * x[1] + + r[4]) + + # bounds + bnds = array([[-inf]*2, [inf]*2]).T + bnds[:, 0] = [0.1, 0.2] + + # constraints + def feqcon(x, b=1): + """ Equality constraint """ + return array([x[0]**2 + x[1] - b]) + + def jeqcon(x, b=1): + """ Jacobian of equality constraint """ + return array([[2*x[0], 1]]) + + def fieqcon(x, c=10): + """ Inequality constraint """ + return array([x[0] * x[1] + c]) + + def jieqcon(x, c=10): + """ Jacobian of Inequality constraint """ + return array([[1, 1]]) + + # constraints dictionaries + cons = ({'type': 'eq', 'fun': feqcon, 'jac': jeqcon, 'args': (1, )}, + {'type': 'ineq', 'fun': fieqcon, 'jac': jieqcon, 'args': (10,)}) + + # Bounds constraint problem + print(' Bounds constraints '.center(72, '-')) + print(' * fmin_slsqp') + x, f = fmin_slsqp(fun, array([-1, 1]), bounds=bnds, disp=1, + full_output=True)[:2] + print(' * _minimize_slsqp') + res = _minimize_slsqp(fun, array([-1, 1]), bounds=bnds, + **{'disp': True}) + + # Equality and inequality constraints problem + print(' Equality and inequality constraints '.center(72, '-')) + print(' * fmin_slsqp') + x, f = fmin_slsqp(fun, array([-1, 1]), + f_eqcons=feqcon, fprime_eqcons=jeqcon, + f_ieqcons=fieqcon, fprime_ieqcons=jieqcon, + disp=1, full_output=True)[:2] + print(' * _minimize_slsqp') + res = _minimize_slsqp(fun, array([-1, 1]), constraints=cons, + **{'disp': True}) diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/slsqp.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/slsqp.pyc new file mode 100644 index 0000000..8430de3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/slsqp.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/__init__.pyc new file mode 100644 index 0000000..96027ad Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__basinhopping.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__basinhopping.py new file mode 100644 index 0000000..84deeb8 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__basinhopping.py @@ -0,0 +1,447 @@ +""" +Unit tests for the basin hopping global minimization algorithm. +""" +from __future__ import division, print_function, absolute_import +import copy + +from numpy.testing import assert_almost_equal, assert_equal, assert_ +from pytest import raises as assert_raises +import numpy as np +from numpy import cos, sin + +from scipy.optimize import basinhopping, OptimizeResult +from scipy.optimize._basinhopping import ( + Storage, RandomDisplacement, Metropolis, AdaptiveStepsize) + + +def func1d(x): + f = cos(14.5 * x - 0.3) + (x + 0.2) * x + df = np.array(-14.5 * sin(14.5 * x - 0.3) + 2. * x + 0.2) + return f, df + + +def func2d_nograd(x): + f = cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] + 0.2) * x[0] + return f + + +def func2d(x): + f = cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] + 0.2) * x[0] + df = np.zeros(2) + df[0] = -14.5 * sin(14.5 * x[0] - 0.3) + 2. * x[0] + 0.2 + df[1] = 2. * x[1] + 0.2 + return f, df + + +def func2d_easyderiv(x): + f = 2.0*x[0]**2 + 2.0*x[0]*x[1] + 2.0*x[1]**2 - 6.0*x[0] + df = np.zeros(2) + df[0] = 4.0*x[0] + 2.0*x[1] - 6.0 + df[1] = 2.0*x[0] + 4.0*x[1] + + return f, df + + +class MyTakeStep1(RandomDisplacement): + """use a copy of displace, but have it set a special parameter to + make sure it's actually being used.""" + def __init__(self): + self.been_called = False + super(MyTakeStep1, self).__init__() + + def __call__(self, x): + self.been_called = True + return super(MyTakeStep1, self).__call__(x) + + +def myTakeStep2(x): + """redo RandomDisplacement in function form without the attribute stepsize + to make sure everything still works ok + """ + s = 0.5 + x += np.random.uniform(-s, s, np.shape(x)) + return x + + +class MyAcceptTest(object): + """pass a custom accept test + + This does nothing but make sure it's being used and ensure all the + possible return values are accepted + """ + def __init__(self): + self.been_called = False + self.ncalls = 0 + self.testres = [False, 'force accept', True, np.bool_(True), + np.bool_(False), [], {}, 0, 1] + + def __call__(self, **kwargs): + self.been_called = True + self.ncalls += 1 + if self.ncalls - 1 < len(self.testres): + return self.testres[self.ncalls - 1] + else: + return True + + +class MyCallBack(object): + """pass a custom callback function + + This makes sure it's being used. It also returns True after 10 + steps to ensure that it's stopping early. + + """ + def __init__(self): + self.been_called = False + self.ncalls = 0 + + def __call__(self, x, f, accepted): + self.been_called = True + self.ncalls += 1 + if self.ncalls == 10: + return True + + +class TestBasinHopping(object): + + def setup_method(self): + """ Tests setup. + + Run tests based on the 1-D and 2-D functions described above. + """ + self.x0 = (1.0, [1.0, 1.0]) + self.sol = (-0.195, np.array([-0.195, -0.1])) + + self.tol = 3 # number of decimal places + + self.niter = 100 + self.disp = False + + # fix random seed + np.random.seed(1234) + + self.kwargs = {"method": "L-BFGS-B", "jac": True} + self.kwargs_nograd = {"method": "L-BFGS-B"} + + def test_TypeError(self): + # test the TypeErrors are raised on bad input + i = 1 + # if take_step is passed, it must be callable + assert_raises(TypeError, basinhopping, func2d, self.x0[i], + take_step=1) + # if accept_test is passed, it must be callable + assert_raises(TypeError, basinhopping, func2d, self.x0[i], + accept_test=1) + + def test_1d_grad(self): + # test 1d minimizations with gradient + i = 0 + res = basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs, + niter=self.niter, disp=self.disp) + assert_almost_equal(res.x, self.sol[i], self.tol) + + def test_2d(self): + # test 2d minimizations with gradient + i = 1 + res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs, + niter=self.niter, disp=self.disp) + assert_almost_equal(res.x, self.sol[i], self.tol) + assert_(res.nfev > 0) + + def test_njev(self): + # test njev is returned correctly + i = 1 + minimizer_kwargs = self.kwargs.copy() + # L-BFGS-B doesn't use njev, but BFGS does + minimizer_kwargs["method"] = "BFGS" + res = basinhopping(func2d, self.x0[i], + minimizer_kwargs=minimizer_kwargs, niter=self.niter, + disp=self.disp) + assert_(res.nfev > 0) + assert_equal(res.nfev, res.njev) + + def test_jac(self): + # test jacobian returned + minimizer_kwargs = self.kwargs.copy() + # BFGS returns a Jacobian + minimizer_kwargs["method"] = "BFGS" + + res = basinhopping(func2d_easyderiv, [0.0, 0.0], + minimizer_kwargs=minimizer_kwargs, niter=self.niter, + disp=self.disp) + + assert_(hasattr(res.lowest_optimization_result, "jac")) + + # in this case, the jacobian is just [df/dx, df/dy] + _, jacobian = func2d_easyderiv(res.x) + assert_almost_equal(res.lowest_optimization_result.jac, jacobian, + self.tol) + + def test_2d_nograd(self): + # test 2d minimizations without gradient + i = 1 + res = basinhopping(func2d_nograd, self.x0[i], + minimizer_kwargs=self.kwargs_nograd, + niter=self.niter, disp=self.disp) + assert_almost_equal(res.x, self.sol[i], self.tol) + + def test_all_minimizers(self): + # test 2d minimizations with gradient. Nelder-Mead, Powell and COBYLA + # don't accept jac=True, so aren't included here. + i = 1 + methods = ['CG', 'BFGS', 'Newton-CG', 'L-BFGS-B', 'TNC', 'SLSQP'] + minimizer_kwargs = copy.copy(self.kwargs) + for method in methods: + minimizer_kwargs["method"] = method + res = basinhopping(func2d, self.x0[i], + minimizer_kwargs=minimizer_kwargs, + niter=self.niter, disp=self.disp) + assert_almost_equal(res.x, self.sol[i], self.tol) + + def test_all_nograd_minimizers(self): + # test 2d minimizations without gradient. Newton-CG requires jac=True, + # so not included here. + i = 1 + methods = ['CG', 'BFGS', 'L-BFGS-B', 'TNC', 'SLSQP', + 'Nelder-Mead', 'Powell', 'COBYLA'] + minimizer_kwargs = copy.copy(self.kwargs_nograd) + for method in methods: + minimizer_kwargs["method"] = method + res = basinhopping(func2d_nograd, self.x0[i], + minimizer_kwargs=minimizer_kwargs, + niter=self.niter, disp=self.disp) + tol = self.tol + if method == 'COBYLA': + tol = 2 + assert_almost_equal(res.x, self.sol[i], decimal=tol) + + def test_pass_takestep(self): + # test that passing a custom takestep works + # also test that the stepsize is being adjusted + takestep = MyTakeStep1() + initial_step_size = takestep.stepsize + i = 1 + res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs, + niter=self.niter, disp=self.disp, + take_step=takestep) + assert_almost_equal(res.x, self.sol[i], self.tol) + assert_(takestep.been_called) + # make sure that the built in adaptive step size has been used + assert_(initial_step_size != takestep.stepsize) + + def test_pass_simple_takestep(self): + # test that passing a custom takestep without attribute stepsize + takestep = myTakeStep2 + i = 1 + res = basinhopping(func2d_nograd, self.x0[i], + minimizer_kwargs=self.kwargs_nograd, + niter=self.niter, disp=self.disp, + take_step=takestep) + assert_almost_equal(res.x, self.sol[i], self.tol) + + def test_pass_accept_test(self): + # test passing a custom accept test + # makes sure it's being used and ensures all the possible return values + # are accepted. + accept_test = MyAcceptTest() + i = 1 + # there's no point in running it more than a few steps. + basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs, + niter=10, disp=self.disp, accept_test=accept_test) + assert_(accept_test.been_called) + + def test_pass_callback(self): + # test passing a custom callback function + # This makes sure it's being used. It also returns True after 10 steps + # to ensure that it's stopping early. + callback = MyCallBack() + i = 1 + # there's no point in running it more than a few steps. + res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs, + niter=30, disp=self.disp, callback=callback) + assert_(callback.been_called) + assert_("callback" in res.message[0]) + assert_equal(res.nit, 10) + + def test_minimizer_fail(self): + # test if a minimizer fails + i = 1 + self.kwargs["options"] = dict(maxiter=0) + self.niter = 10 + res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs, + niter=self.niter, disp=self.disp) + # the number of failed minimizations should be the number of + # iterations + 1 + assert_equal(res.nit + 1, res.minimization_failures) + + def test_niter_zero(self): + # gh5915, what happens if you call basinhopping with niter=0 + i = 0 + basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs, + niter=0, disp=self.disp) + + def test_seed_reproducibility(self): + # seed should ensure reproducibility between runs + minimizer_kwargs = {"method": "L-BFGS-B", "jac": True} + + f_1 = [] + + def callback(x, f, accepted): + f_1.append(f) + + basinhopping(func2d, [1.0, 1.0], minimizer_kwargs=minimizer_kwargs, + niter=10, callback=callback, seed=10) + + f_2 = [] + + def callback2(x, f, accepted): + f_2.append(f) + + basinhopping(func2d, [1.0, 1.0], minimizer_kwargs=minimizer_kwargs, + niter=10, callback=callback2, seed=10) + assert_equal(np.array(f_1), np.array(f_2)) + + def test_monotonic_basin_hopping(self): + # test 1d minimizations with gradient and T=0 + i = 0 + res = basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs, + niter=self.niter, disp=self.disp, T=0) + assert_almost_equal(res.x, self.sol[i], self.tol) + + +class Test_Storage(object): + def setup_method(self): + self.x0 = np.array(1) + self.f0 = 0 + + minres = OptimizeResult() + minres.x = self.x0 + minres.fun = self.f0 + + self.storage = Storage(minres) + + def test_higher_f_rejected(self): + new_minres = OptimizeResult() + new_minres.x = self.x0 + 1 + new_minres.fun = self.f0 + 1 + + ret = self.storage.update(new_minres) + minres = self.storage.get_lowest() + assert_equal(self.x0, minres.x) + assert_equal(self.f0, minres.fun) + assert_(not ret) + + def test_lower_f_accepted(self): + new_minres = OptimizeResult() + new_minres.x = self.x0 + 1 + new_minres.fun = self.f0 - 1 + + ret = self.storage.update(new_minres) + minres = self.storage.get_lowest() + assert_(self.x0 != minres.x) + assert_(self.f0 != minres.fun) + assert_(ret) + + +class Test_RandomDisplacement(object): + def setup_method(self): + self.stepsize = 1.0 + self.displace = RandomDisplacement(stepsize=self.stepsize) + self.N = 300000 + self.x0 = np.zeros([self.N]) + + def test_random(self): + # the mean should be 0 + # the variance should be (2*stepsize)**2 / 12 + # note these tests are random, they will fail from time to time + x = self.displace(self.x0) + v = (2. * self.stepsize) ** 2 / 12 + assert_almost_equal(np.mean(x), 0., 1) + assert_almost_equal(np.var(x), v, 1) + + +class Test_Metropolis(object): + def setup_method(self): + self.T = 2. + self.met = Metropolis(self.T) + + def test_boolean_return(self): + # the return must be a bool. else an error will be raised in + # basinhopping + ret = self.met(f_new=0., f_old=1.) + assert isinstance(ret, bool) + + def test_lower_f_accepted(self): + assert_(self.met(f_new=0., f_old=1.)) + + def test_KeyError(self): + # should raise KeyError if kwargs f_old or f_new is not passed + assert_raises(KeyError, self.met, f_old=1.) + assert_raises(KeyError, self.met, f_new=1.) + + def test_accept(self): + # test that steps are randomly accepted for f_new > f_old + one_accept = False + one_reject = False + for i in range(1000): + if one_accept and one_reject: + break + ret = self.met(f_new=1., f_old=0.5) + if ret: + one_accept = True + else: + one_reject = True + assert_(one_accept) + assert_(one_reject) + + def test_GH7495(self): + # an overflow in exp was producing a RuntimeWarning + # create own object here in case someone changes self.T + met = Metropolis(2) + with np.errstate(over='raise'): + met.accept_reject(0, 2000) + + +class Test_AdaptiveStepsize(object): + def setup_method(self): + self.stepsize = 1. + self.ts = RandomDisplacement(stepsize=self.stepsize) + self.target_accept_rate = 0.5 + self.takestep = AdaptiveStepsize(takestep=self.ts, verbose=False, + accept_rate=self.target_accept_rate) + + def test_adaptive_increase(self): + # if few steps are rejected, the stepsize should increase + x = 0. + self.takestep(x) + self.takestep.report(False) + for i in range(self.takestep.interval): + self.takestep(x) + self.takestep.report(True) + assert_(self.ts.stepsize > self.stepsize) + + def test_adaptive_decrease(self): + # if few steps are rejected, the stepsize should increase + x = 0. + self.takestep(x) + self.takestep.report(True) + for i in range(self.takestep.interval): + self.takestep(x) + self.takestep.report(False) + assert_(self.ts.stepsize < self.stepsize) + + def test_all_accepted(self): + # test that everything works OK if all steps were accepted + x = 0. + for i in range(self.takestep.interval + 1): + self.takestep(x) + self.takestep.report(True) + assert_(self.ts.stepsize > self.stepsize) + + def test_all_rejected(self): + # test that everything works OK if all steps were rejected + x = 0. + for i in range(self.takestep.interval + 1): + self.takestep(x) + self.takestep.report(False) + assert_(self.ts.stepsize < self.stepsize) diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__basinhopping.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__basinhopping.pyc new file mode 100644 index 0000000..5cb9ae9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__basinhopping.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__differential_evolution.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__differential_evolution.py new file mode 100644 index 0000000..832dcb4 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__differential_evolution.py @@ -0,0 +1,540 @@ +""" +Unit tests for the differential global minimization algorithm. +""" +from scipy.optimize import _differentialevolution +from scipy.optimize._differentialevolution import DifferentialEvolutionSolver +from scipy.optimize import differential_evolution +import numpy as np +from scipy.optimize import rosen +from numpy.testing import (assert_equal, assert_allclose, + assert_almost_equal, + assert_string_equal, assert_) +from pytest import raises as assert_raises, warns + + +class TestDifferentialEvolutionSolver(object): + + def setup_method(self): + self.old_seterr = np.seterr(invalid='raise') + self.limits = np.array([[0., 0.], + [2., 2.]]) + self.bounds = [(0., 2.), (0., 2.)] + + self.dummy_solver = DifferentialEvolutionSolver(self.quadratic, + [(0, 100)]) + + # dummy_solver2 will be used to test mutation strategies + self.dummy_solver2 = DifferentialEvolutionSolver(self.quadratic, + [(0, 1)], + popsize=7, + mutation=0.5) + # create a population that's only 7 members long + # [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7] + population = np.atleast_2d(np.arange(0.1, 0.8, 0.1)).T + self.dummy_solver2.population = population + + def teardown_method(self): + np.seterr(**self.old_seterr) + + def quadratic(self, x): + return x[0]**2 + + def test__strategy_resolves(self): + # test that the correct mutation function is resolved by + # different requested strategy arguments + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + strategy='best1exp') + assert_equal(solver.strategy, 'best1exp') + assert_equal(solver.mutation_func.__name__, '_best1') + + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + strategy='best1bin') + assert_equal(solver.strategy, 'best1bin') + assert_equal(solver.mutation_func.__name__, '_best1') + + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + strategy='rand1bin') + assert_equal(solver.strategy, 'rand1bin') + assert_equal(solver.mutation_func.__name__, '_rand1') + + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + strategy='rand1exp') + assert_equal(solver.strategy, 'rand1exp') + assert_equal(solver.mutation_func.__name__, '_rand1') + + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + strategy='rand2exp') + assert_equal(solver.strategy, 'rand2exp') + assert_equal(solver.mutation_func.__name__, '_rand2') + + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + strategy='best2bin') + assert_equal(solver.strategy, 'best2bin') + assert_equal(solver.mutation_func.__name__, '_best2') + + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + strategy='rand2bin') + assert_equal(solver.strategy, 'rand2bin') + assert_equal(solver.mutation_func.__name__, '_rand2') + + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + strategy='rand2exp') + assert_equal(solver.strategy, 'rand2exp') + assert_equal(solver.mutation_func.__name__, '_rand2') + + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + strategy='randtobest1bin') + assert_equal(solver.strategy, 'randtobest1bin') + assert_equal(solver.mutation_func.__name__, '_randtobest1') + + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + strategy='randtobest1exp') + assert_equal(solver.strategy, 'randtobest1exp') + assert_equal(solver.mutation_func.__name__, '_randtobest1') + + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + strategy='currenttobest1bin') + assert_equal(solver.strategy, 'currenttobest1bin') + assert_equal(solver.mutation_func.__name__, '_currenttobest1') + + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + strategy='currenttobest1exp') + assert_equal(solver.strategy, 'currenttobest1exp') + assert_equal(solver.mutation_func.__name__, '_currenttobest1') + + def test__mutate1(self): + # strategies */1/*, i.e. rand/1/bin, best/1/exp, etc. + result = np.array([0.05]) + trial = self.dummy_solver2._best1((2, 3, 4, 5, 6)) + assert_allclose(trial, result) + + result = np.array([0.25]) + trial = self.dummy_solver2._rand1((2, 3, 4, 5, 6)) + assert_allclose(trial, result) + + def test__mutate2(self): + # strategies */2/*, i.e. rand/2/bin, best/2/exp, etc. + # [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7] + + result = np.array([-0.1]) + trial = self.dummy_solver2._best2((2, 3, 4, 5, 6)) + assert_allclose(trial, result) + + result = np.array([0.1]) + trial = self.dummy_solver2._rand2((2, 3, 4, 5, 6)) + assert_allclose(trial, result) + + def test__randtobest1(self): + # strategies randtobest/1/* + result = np.array([0.15]) + trial = self.dummy_solver2._randtobest1((2, 3, 4, 5, 6)) + assert_allclose(trial, result) + + def test__currenttobest1(self): + # strategies currenttobest/1/* + result = np.array([0.1]) + trial = self.dummy_solver2._currenttobest1(1, (2, 3, 4, 5, 6)) + assert_allclose(trial, result) + + def test_can_init_with_dithering(self): + mutation = (0.5, 1) + solver = DifferentialEvolutionSolver(self.quadratic, + self.bounds, + mutation=mutation) + + assert_equal(solver.dither, list(mutation)) + + def test_invalid_mutation_values_arent_accepted(self): + func = rosen + mutation = (0.5, 3) + assert_raises(ValueError, + DifferentialEvolutionSolver, + func, + self.bounds, + mutation=mutation) + + mutation = (-1, 1) + assert_raises(ValueError, + DifferentialEvolutionSolver, + func, + self.bounds, + mutation=mutation) + + mutation = (0.1, np.nan) + assert_raises(ValueError, + DifferentialEvolutionSolver, + func, + self.bounds, + mutation=mutation) + + mutation = 0.5 + solver = DifferentialEvolutionSolver(func, + self.bounds, + mutation=mutation) + assert_equal(0.5, solver.scale) + assert_equal(None, solver.dither) + + def test__scale_parameters(self): + trial = np.array([0.3]) + assert_equal(30, self.dummy_solver._scale_parameters(trial)) + + # it should also work with the limits reversed + self.dummy_solver.limits = np.array([[100], [0.]]) + assert_equal(30, self.dummy_solver._scale_parameters(trial)) + + def test__unscale_parameters(self): + trial = np.array([30]) + assert_equal(0.3, self.dummy_solver._unscale_parameters(trial)) + + # it should also work with the limits reversed + self.dummy_solver.limits = np.array([[100], [0.]]) + assert_equal(0.3, self.dummy_solver._unscale_parameters(trial)) + + def test__ensure_constraint(self): + trial = np.array([1.1, -100, 0.9, 2., 300., -0.00001]) + self.dummy_solver._ensure_constraint(trial) + + assert_equal(trial[2], 0.9) + assert_(np.logical_and(trial >= 0, trial <= 1).all()) + + def test_differential_evolution(self): + # test that the Jmin of DifferentialEvolutionSolver + # is the same as the function evaluation + solver = DifferentialEvolutionSolver(self.quadratic, [(-2, 2)]) + result = solver.solve() + assert_almost_equal(result.fun, self.quadratic(result.x)) + + def test_best_solution_retrieval(self): + # test that the getter property method for the best solution works. + solver = DifferentialEvolutionSolver(self.quadratic, [(-2, 2)]) + result = solver.solve() + assert_almost_equal(result.x, solver.x) + + def test_callback_terminates(self): + # test that if the callback returns true, then the minimization halts + bounds = [(0, 2), (0, 2)] + + def callback(param, convergence=0.): + return True + + result = differential_evolution(rosen, bounds, callback=callback) + + assert_string_equal(result.message, + 'callback function requested stop early ' + 'by returning True') + + def test_args_tuple_is_passed(self): + # test that the args tuple is passed to the cost function properly. + bounds = [(-10, 10)] + args = (1., 2., 3.) + + def quadratic(x, *args): + if type(args) != tuple: + raise ValueError('args should be a tuple') + return args[0] + args[1] * x + args[2] * x**2. + + result = differential_evolution(quadratic, + bounds, + args=args, + polish=True) + assert_almost_equal(result.fun, 2 / 3.) + + def test_init_with_invalid_strategy(self): + # test that passing an invalid strategy raises ValueError + func = rosen + bounds = [(-3, 3)] + assert_raises(ValueError, + differential_evolution, + func, + bounds, + strategy='abc') + + def test_bounds_checking(self): + # test that the bounds checking works + func = rosen + bounds = [(-3, None)] + assert_raises(ValueError, + differential_evolution, + func, + bounds) + bounds = [(-3)] + assert_raises(ValueError, + differential_evolution, + func, + bounds) + bounds = [(-3, 3), (3, 4, 5)] + assert_raises(ValueError, + differential_evolution, + func, + bounds) + + def test_select_samples(self): + # select_samples should return 5 separate random numbers. + limits = np.arange(12., dtype='float64').reshape(2, 6) + bounds = list(zip(limits[0, :], limits[1, :])) + solver = DifferentialEvolutionSolver(None, bounds, popsize=1) + candidate = 0 + r1, r2, r3, r4, r5 = solver._select_samples(candidate, 5) + assert_equal( + len(np.unique(np.array([candidate, r1, r2, r3, r4, r5]))), 6) + + def test_maxiter_stops_solve(self): + # test that if the maximum number of iterations is exceeded + # the solver stops. + solver = DifferentialEvolutionSolver(rosen, self.bounds, maxiter=1) + result = solver.solve() + assert_equal(result.success, False) + assert_equal(result.message, + 'Maximum number of iterations has been exceeded.') + + def test_maxfun_stops_solve(self): + # test that if the maximum number of function evaluations is exceeded + # during initialisation the solver stops + solver = DifferentialEvolutionSolver(rosen, self.bounds, maxfun=1, + polish=False) + result = solver.solve() + + assert_equal(result.nfev, 2) + assert_equal(result.success, False) + assert_equal(result.message, + 'Maximum number of function evaluations has ' + 'been exceeded.') + + # test that if the maximum number of function evaluations is exceeded + # during the actual minimisation, then the solver stops. + # Have to turn polishing off, as this will still occur even if maxfun + # is reached. For popsize=5 and len(bounds)=2, then there are only 10 + # function evaluations during initialisation. + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + popsize=5, + polish=False, + maxfun=40) + result = solver.solve() + + assert_equal(result.nfev, 41) + assert_equal(result.success, False) + assert_equal(result.message, + 'Maximum number of function evaluations has ' + 'been exceeded.') + + # now repeat for updating='deferred version + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + popsize=5, + polish=False, + maxfun=40, + updating='deferred') + result = solver.solve() + + assert_equal(result.nfev, 40) + assert_equal(result.success, False) + assert_equal(result.message, + 'Maximum number of function evaluations has ' + 'been reached.') + + def test_quadratic(self): + # test the quadratic function from object + solver = DifferentialEvolutionSolver(self.quadratic, + [(-100, 100)], + tol=0.02) + solver.solve() + assert_equal(np.argmin(solver.population_energies), 0) + + def test_quadratic_from_diff_ev(self): + # test the quadratic function from differential_evolution function + differential_evolution(self.quadratic, + [(-100, 100)], + tol=0.02) + + def test_seed_gives_repeatability(self): + result = differential_evolution(self.quadratic, + [(-100, 100)], + polish=False, + seed=1, + tol=0.5) + result2 = differential_evolution(self.quadratic, + [(-100, 100)], + polish=False, + seed=1, + tol=0.5) + assert_equal(result.x, result2.x) + assert_equal(result.nfev, result2.nfev) + + def test_exp_runs(self): + # test whether exponential mutation loop runs + solver = DifferentialEvolutionSolver(rosen, + self.bounds, + strategy='best1exp', + maxiter=1) + + solver.solve() + + def test_gh_4511_regression(self): + # This modification of the differential evolution docstring example + # uses a custom popsize that had triggered an off-by-one error. + # Because we do not care about solving the optimization problem in + # this test, we use maxiter=1 to reduce the testing time. + bounds = [(-5, 5), (-5, 5)] + result = differential_evolution(rosen, bounds, popsize=1815, maxiter=1) + + def test_calculate_population_energies(self): + # if popsize is 3 then the overall generation has size (6,) + solver = DifferentialEvolutionSolver(rosen, self.bounds, popsize=3) + solver._calculate_population_energies(solver.population) + solver._promote_lowest_energy() + assert_equal(np.argmin(solver.population_energies), 0) + + # initial calculation of the energies should require 6 nfev. + assert_equal(solver._nfev, 6) + + def test_iteration(self): + # test that DifferentialEvolutionSolver is iterable + # if popsize is 3 then the overall generation has size (6,) + solver = DifferentialEvolutionSolver(rosen, self.bounds, popsize=3, + maxfun=12) + x, fun = next(solver) + assert_equal(np.size(x, 0), 2) + + # 6 nfev are required for initial calculation of energies, 6 nfev are + # required for the evolution of the 6 population members. + assert_equal(solver._nfev, 12) + + # the next generation should halt because it exceeds maxfun + assert_raises(StopIteration, next, solver) + + # check a proper minimisation can be done by an iterable solver + solver = DifferentialEvolutionSolver(rosen, self.bounds) + for i, soln in enumerate(solver): + x_current, fun_current = soln + # need to have this otherwise the solver would never stop. + if i == 1000: + break + + assert_almost_equal(fun_current, 0) + + def test_convergence(self): + solver = DifferentialEvolutionSolver(rosen, self.bounds, tol=0.2, + polish=False) + solver.solve() + assert_(solver.convergence < 0.2) + + def test_maxiter_none_GH5731(self): + # Pre 0.17 the previous default for maxiter and maxfun was None. + # the numerical defaults are now 1000 and np.inf. However, some scripts + # will still supply None for both of those, this will raise a TypeError + # in the solve method. + solver = DifferentialEvolutionSolver(rosen, self.bounds, maxiter=None, + maxfun=None) + solver.solve() + + def test_population_initiation(self): + # test the different modes of population initiation + + # init must be either 'latinhypercube' or 'random' + # raising ValueError is something else is passed in + assert_raises(ValueError, + DifferentialEvolutionSolver, + *(rosen, self.bounds), + **{'init': 'rubbish'}) + + solver = DifferentialEvolutionSolver(rosen, self.bounds) + + # check that population initiation: + # 1) resets _nfev to 0 + # 2) all population energies are np.inf + solver.init_population_random() + assert_equal(solver._nfev, 0) + assert_(np.all(np.isinf(solver.population_energies))) + + solver.init_population_lhs() + assert_equal(solver._nfev, 0) + assert_(np.all(np.isinf(solver.population_energies))) + + # we should be able to initialise with our own array + population = np.linspace(-1, 3, 10).reshape(5, 2) + solver = DifferentialEvolutionSolver(rosen, self.bounds, + init=population, + strategy='best2bin', + atol=0.01, seed=1, popsize=5) + + assert_equal(solver._nfev, 0) + assert_(np.all(np.isinf(solver.population_energies))) + assert_(solver.num_population_members == 5) + assert_(solver.population_shape == (5, 2)) + + # check that the population was initialised correctly + unscaled_population = np.clip(solver._unscale_parameters(population), + 0, 1) + assert_almost_equal(solver.population[:5], unscaled_population) + + # population values need to be clipped to bounds + assert_almost_equal(np.min(solver.population[:5]), 0) + assert_almost_equal(np.max(solver.population[:5]), 1) + + # shouldn't be able to initialise with an array if it's the wrong shape + # this would have too many parameters + population = np.linspace(-1, 3, 15).reshape(5, 3) + assert_raises(ValueError, + DifferentialEvolutionSolver, + *(rosen, self.bounds), + **{'init': population}) + + def test_infinite_objective_function(self): + # Test that there are no problems if the objective function + # returns inf on some runs + def sometimes_inf(x): + if x[0] < .5: + return np.inf + return x[1] + bounds = [(0, 1), (0, 1)] + x_fit = differential_evolution(sometimes_inf, + bounds=[(0, 1), (0, 1)], + disp=False) + + def test_deferred_updating(self): + # check setting of deferred updating, with default workers + bounds = [(0., 2.), (0., 2.), (0, 2), (0, 2)] + solver = DifferentialEvolutionSolver(rosen, bounds, updating='deferred') + assert_(solver._updating == 'deferred') + assert_(solver._mapwrapper._mapfunc is map) + solver.solve() + + def test_immediate_updating(self): + # check setting of immediate updating, with default workers + bounds = [(0., 2.), (0., 2.)] + solver = DifferentialEvolutionSolver(rosen, bounds) + assert_(solver._updating == 'immediate') + + # should raise a UserWarning because the updating='immediate' + # is being overriden by the workers keyword + with warns(UserWarning): + solver = DifferentialEvolutionSolver(rosen, bounds, workers=2) + assert_(solver._updating == 'deferred') + + def test_parallel(self): + # smoke test for parallelisation with deferred updating + bounds = [(0., 2.), (0., 2.)] + with DifferentialEvolutionSolver(rosen, bounds, + updating='deferred', + workers=2) as solver: + assert_(solver._mapwrapper.pool is not None) + assert_(solver._updating == 'deferred') + solver.solve() + + def test_converged(self): + solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)]) + solver.solve() + assert_(solver.converged()) diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__differential_evolution.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__differential_evolution.pyc new file mode 100644 index 0000000..a2ecd5c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__differential_evolution.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__dual_annealing.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__dual_annealing.py new file mode 100644 index 0000000..1e7b645 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__dual_annealing.py @@ -0,0 +1,266 @@ +# Dual annealing unit tests implementation. +# Copyright (c) 2018 Sylvain Gubian <sylvain.gubian@pmi.com>, +# Yang Xiang <yang.xiang@pmi.com> +# Author: Sylvain Gubian, PMP S.A. +""" +Unit tests for the dual annealing global optimizer +""" +from scipy.optimize import dual_annealing +from scipy.optimize._dual_annealing import VisitingDistribution +from scipy.optimize._dual_annealing import ObjectiveFunWrapper +from scipy.optimize._dual_annealing import EnergyState +from scipy.optimize._dual_annealing import LocalSearchWrapper +from scipy.optimize import rosen, rosen_der +import numpy as np +from numpy.testing import (assert_equal, TestCase, assert_allclose, + assert_array_less) +from pytest import raises as assert_raises +from scipy._lib._util import check_random_state + + +class TestDualAnnealing(TestCase): + + def setUp(self): + # A function that returns always infinity for initialization tests + self.weirdfunc = lambda x: np.inf + # 2-D bounds for testing function + self.ld_bounds = [(-5.12, 5.12)] * 2 + # 4-D bounds for testing function + self.hd_bounds = self.ld_bounds * 4 + # Number of values to be generated for testing visit function + self.nbtestvalues = 5000 + self.high_temperature = 5230 + self.low_temperature = 0.1 + self.qv = 2.62 + self.seed = 1234 + self.rs = check_random_state(self.seed) + self.nb_fun_call = 0 + self.ngev = 0 + + def tearDown(self): + pass + + def callback(self, x, f, context): + # For testing callback mechanism. Should stop for e <= 1 as + # the callback function returns True + if f <= 1.0: + return True + + def func(self, x, args=()): + # Using Rastrigin function for performing tests + if args: + shift = args + else: + shift = 0 + y = np.sum((x - shift) ** 2 - 10 * np.cos(2 * np.pi * ( + x - shift))) + 10 * np.size(x) + shift + self.nb_fun_call += 1 + return y + + def rosen_der_wrapper(self, x, args=()): + self.ngev += 1 + return rosen_der(x, *args) + + def test_visiting_stepping(self): + lu = list(zip(*self.ld_bounds)) + lower = np.array(lu[0]) + upper = np.array(lu[1]) + dim = lower.size + vd = VisitingDistribution(lower, upper, self.qv, self.rs) + values = np.zeros(dim) + x_step_low = vd.visiting(values, 0, self.high_temperature) + # Make sure that only the first component is changed + assert_equal(np.not_equal(x_step_low, 0), True) + values = np.zeros(dim) + x_step_high = vd.visiting(values, dim, self.high_temperature) + # Make sure that component other than at dim has changed + assert_equal(np.not_equal(x_step_high[0], 0), True) + + def test_visiting_dist_high_temperature(self): + lu = list(zip(*self.ld_bounds)) + lower = np.array(lu[0]) + upper = np.array(lu[1]) + vd = VisitingDistribution(lower, upper, self.qv, self.rs) + values = np.zeros(self.nbtestvalues) + for i in np.arange(self.nbtestvalues): + values[i] = vd.visit_fn(self.high_temperature) + # Visiting distribution is a distorted version of Cauchy-Lorentz + # distribution, and as no 1st and higher moments (no mean defined, + # no variance defined). + # Check that big tails values are generated + assert_array_less(np.min(values), 1e-10) + assert_array_less(1e+10, np.max(values)) + + def test_reset(self): + owf = ObjectiveFunWrapper(self.weirdfunc) + lu = list(zip(*self.ld_bounds)) + lower = np.array(lu[0]) + upper = np.array(lu[1]) + es = EnergyState(lower, upper) + assert_raises(ValueError, es.reset, owf, check_random_state(None)) + + def test_low_dim(self): + ret = dual_annealing( + self.func, self.ld_bounds, seed=self.seed) + assert_allclose(ret.fun, 0., atol=1e-12) + + def test_high_dim(self): + ret = dual_annealing(self.func, self.hd_bounds) + assert_allclose(ret.fun, 0., atol=1e-12) + + def test_low_dim_no_ls(self): + ret = dual_annealing(self.func, self.ld_bounds, + no_local_search=True) + assert_allclose(ret.fun, 0., atol=1e-4) + + def test_high_dim_no_ls(self): + ret = dual_annealing(self.func, self.hd_bounds, + no_local_search=True) + assert_allclose(ret.fun, 0., atol=1e-4) + + def test_nb_fun_call(self): + ret = dual_annealing(self.func, self.ld_bounds) + assert_equal(self.nb_fun_call, ret.nfev) + + def test_nb_fun_call_no_ls(self): + ret = dual_annealing(self.func, self.ld_bounds, + no_local_search=True) + assert_equal(self.nb_fun_call, ret.nfev) + + def test_max_reinit(self): + assert_raises(ValueError, dual_annealing, self.weirdfunc, + self.ld_bounds) + + def test_reproduce(self): + seed = 1234 + res1 = dual_annealing(self.func, self.ld_bounds, seed=seed) + res2 = dual_annealing(self.func, self.ld_bounds, seed=seed) + res3 = dual_annealing(self.func, self.ld_bounds, seed=seed) + # If we have reproducible results, x components found has to + # be exactly the same, which is not the case with no seeding + assert_equal(res1.x, res2.x) + assert_equal(res1.x, res3.x) + + def test_bounds_integrity(self): + wrong_bounds = [(-5.12, 5.12), (1, 0), (5.12, 5.12)] + assert_raises(ValueError, dual_annealing, self.func, + wrong_bounds) + + def test_bound_validity(self): + invalid_bounds = [(-5, 5), (-np.inf, 0), (-5, 5)] + assert_raises(ValueError, dual_annealing, self.func, + invalid_bounds) + invalid_bounds = [(-5, 5), (0, np.inf), (-5, 5)] + assert_raises(ValueError, dual_annealing, self.func, + invalid_bounds) + invalid_bounds = [(-5, 5), (0, np.nan), (-5, 5)] + assert_raises(ValueError, dual_annealing, self.func, + invalid_bounds) + + def test_max_fun_ls(self): + ret = dual_annealing(self.func, self.ld_bounds, maxfun=100) + + ls_max_iter = min(max( + len(self.ld_bounds) * LocalSearchWrapper.LS_MAXITER_RATIO, + LocalSearchWrapper.LS_MAXITER_MIN), + LocalSearchWrapper.LS_MAXITER_MAX) + assert ret.nfev <= 100 + ls_max_iter + + def test_max_fun_no_ls(self): + ret = dual_annealing(self.func, self.ld_bounds, + no_local_search=True, maxfun=500) + assert ret.nfev <= 500 + + def test_maxiter(self): + ret = dual_annealing(self.func, self.ld_bounds, maxiter=700) + assert ret.nit <= 700 + + # Testing that args are passed correctly for dual_annealing + def test_fun_args_ls(self): + ret = dual_annealing(self.func, self.ld_bounds, + args=((3.14159, ))) + assert_allclose(ret.fun, 3.14159, atol=1e-6) + + # Testing that args are passed correctly for pure simulated annealing + def test_fun_args_no_ls(self): + ret = dual_annealing(self.func, self.ld_bounds, + args=((3.14159, )), no_local_search=True) + assert_allclose(ret.fun, 3.14159, atol=1e-4) + + def test_callback_stop(self): + # Testing that callback make the algorithm stop for + # fun value <= 1.0 (see callback method) + ret = dual_annealing(self.func, self.ld_bounds, + callback=self.callback) + assert ret.fun <= 1.0 + assert 'stop early' in ret.message[0] + + def test_neldermed_ls_minimizer(self): + minimizer_opts = { + 'method': 'Nelder-Mead', + } + ret = dual_annealing(self.func, self.ld_bounds, + local_search_options=minimizer_opts) + assert_allclose(ret.fun, 0., atol=1e-6) + + def test_powell_ls_minimizer(self): + minimizer_opts = { + 'method': 'Powell', + } + ret = dual_annealing(self.func, self.ld_bounds, + local_search_options=minimizer_opts) + assert_allclose(ret.fun, 0., atol=1e-8) + + def test_cg_ls_minimizer(self): + minimizer_opts = { + 'method': 'CG', + } + ret = dual_annealing(self.func, self.ld_bounds, + local_search_options=minimizer_opts) + assert_allclose(ret.fun, 0., atol=1e-8) + + def test_bfgs_ls_minimizer(self): + minimizer_opts = { + 'method': 'BFGS', + } + ret = dual_annealing(self.func, self.ld_bounds, + local_search_options=minimizer_opts) + assert_allclose(ret.fun, 0., atol=1e-8) + + def test_tnc_ls_minimizer(self): + minimizer_opts = { + 'method': 'TNC', + } + ret = dual_annealing(self.func, self.ld_bounds, + local_search_options=minimizer_opts) + assert_allclose(ret.fun, 0., atol=1e-8) + + def test_colyba_ls_minimizer(self): + minimizer_opts = { + 'method': 'COBYLA', + } + ret = dual_annealing(self.func, self.ld_bounds, + local_search_options=minimizer_opts) + assert_allclose(ret.fun, 0., atol=1e-5) + + def test_slsqp_ls_minimizer(self): + minimizer_opts = { + 'method': 'SLSQP', + } + ret = dual_annealing(self.func, self.ld_bounds, + local_search_options=minimizer_opts) + assert_allclose(ret.fun, 0., atol=1e-7) + + def test_wrong_restart_temp(self): + assert_raises(ValueError, dual_annealing, self.func, + self.ld_bounds, restart_temp_ratio=1) + assert_raises(ValueError, dual_annealing, self.func, + self.ld_bounds, restart_temp_ratio=0) + + def test_gradient_gnev(self): + minimizer_opts = { + 'jac': self.rosen_der_wrapper, + } + ret = dual_annealing(rosen, self.ld_bounds, + local_search_options=minimizer_opts) + assert ret.njev == self.ngev diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__dual_annealing.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__dual_annealing.pyc new file mode 100644 index 0000000..35bee15 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__dual_annealing.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__linprog_clean_inputs.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__linprog_clean_inputs.py new file mode 100644 index 0000000..b7a517e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__linprog_clean_inputs.py @@ -0,0 +1,365 @@ +""" +Unit test for Linear Programming via Simplex Algorithm. +""" +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import assert_, assert_allclose +from pytest import raises as assert_raises +from scipy.optimize._linprog_util import _clean_inputs +from copy import deepcopy + + +def test_aliasing(): + c = 1 + A_ub = [[1]] + b_ub = [1] + A_eq = [[1]] + b_eq = [1] + bounds = (-np.inf, np.inf) + + c_copy = deepcopy(c) + A_ub_copy = deepcopy(A_ub) + b_ub_copy = deepcopy(b_ub) + A_eq_copy = deepcopy(A_eq) + b_eq_copy = deepcopy(b_eq) + bounds_copy = deepcopy(bounds) + + _clean_inputs(c, A_ub, b_ub, A_eq, b_eq, bounds) + + assert_(c == c_copy, "c modified by _clean_inputs") + assert_(A_ub == A_ub_copy, "A_ub modified by _clean_inputs") + assert_(b_ub == b_ub_copy, "b_ub modified by _clean_inputs") + assert_(A_eq == A_eq_copy, "A_eq modified by _clean_inputs") + assert_(b_eq == b_eq_copy, "b_eq modified by _clean_inputs") + assert_(bounds == bounds_copy, "bounds modified by _clean_inputs") + + +def test_aliasing2(): + c = np.array([1, 1]) + A_ub = np.array([[1, 1], [2, 2]]) + b_ub = np.array([[1], [1]]) + A_eq = np.array([[1, 1]]) + b_eq = np.array([1]) + bounds = [(-np.inf, np.inf), (None, 1)] + + c_copy = c.copy() + A_ub_copy = A_ub.copy() + b_ub_copy = b_ub.copy() + A_eq_copy = A_eq.copy() + b_eq_copy = b_eq.copy() + bounds_copy = deepcopy(bounds) + + _clean_inputs(c, A_ub, b_ub, A_eq, b_eq, bounds) + + assert_allclose(c, c_copy, err_msg="c modified by _clean_inputs") + assert_allclose(A_ub, A_ub_copy, err_msg="A_ub modified by _clean_inputs") + assert_allclose(b_ub, b_ub_copy, err_msg="b_ub modified by _clean_inputs") + assert_allclose(A_eq, A_eq_copy, err_msg="A_eq modified by _clean_inputs") + assert_allclose(b_eq, b_eq_copy, err_msg="b_eq modified by _clean_inputs") + assert_(bounds == bounds_copy, "bounds modified by _clean_inputs") + + +def test_missing_inputs(): + c = [1, 2] + A_ub = np.array([[1, 1], [2, 2]]) + b_ub = np.array([1, 1]) + A_eq = np.array([[1, 1], [2, 2]]) + b_eq = np.array([1, 1]) + + assert_raises(TypeError, _clean_inputs) + assert_raises(TypeError, _clean_inputs, c=None) + assert_raises(ValueError, _clean_inputs, c=c, A_ub=A_ub) + assert_raises(ValueError, _clean_inputs, c=c, A_ub=A_ub, b_ub=None) + assert_raises(ValueError, _clean_inputs, c=c, b_ub=b_ub) + assert_raises(ValueError, _clean_inputs, c=c, A_ub=None, b_ub=b_ub) + assert_raises(ValueError, _clean_inputs, c=c, A_eq=A_eq) + assert_raises(ValueError, _clean_inputs, c=c, A_eq=A_eq, b_eq=None) + assert_raises(ValueError, _clean_inputs, c=c, b_eq=b_eq) + assert_raises(ValueError, _clean_inputs, c=c, A_eq=None, b_eq=b_eq) + + +def test_too_many_dimensions(): + cb = [1, 2, 3, 4] + A = np.random.rand(4, 4) + bad2D = [[1, 2], [3, 4]] + bad3D = np.random.rand(4, 4, 4) + assert_raises(ValueError, _clean_inputs, c=bad2D, A_ub=A, b_ub=cb) + assert_raises(ValueError, _clean_inputs, c=cb, A_ub=bad3D, b_ub=cb) + assert_raises(ValueError, _clean_inputs, c=cb, A_ub=A, b_ub=bad2D) + assert_raises(ValueError, _clean_inputs, c=cb, A_eq=bad3D, b_eq=cb) + assert_raises(ValueError, _clean_inputs, c=cb, A_eq=A, b_eq=bad2D) + + +def test_too_few_dimensions(): + bad = np.random.rand(4, 4).ravel() + cb = np.random.rand(4) + assert_raises(ValueError, _clean_inputs, c=cb, A_ub=bad, b_ub=cb) + assert_raises(ValueError, _clean_inputs, c=cb, A_eq=bad, b_eq=cb) + + +def test_inconsistent_dimensions(): + m = 2 + n = 4 + c = [1, 2, 3, 4] + + Agood = np.random.rand(m, n) + Abad = np.random.rand(m, n + 1) + bgood = np.random.rand(m) + bbad = np.random.rand(m + 1) + boundsbad = [(0, 1)] * (n + 1) + assert_raises(ValueError, _clean_inputs, c=c, A_ub=Abad, b_ub=bgood) + assert_raises(ValueError, _clean_inputs, c=c, A_ub=Agood, b_ub=bbad) + assert_raises(ValueError, _clean_inputs, c=c, A_eq=Abad, b_eq=bgood) + assert_raises(ValueError, _clean_inputs, c=c, A_eq=Agood, b_eq=bbad) + assert_raises(ValueError, _clean_inputs, c=c, bounds=boundsbad) + + +def test_type_errors(): + bad = "hello" + c = [1, 2] + A_ub = np.array([[1, 1], [2, 2]]) + b_ub = np.array([1, 1]) + A_eq = np.array([[1, 1], [2, 2]]) + b_eq = np.array([1, 1]) + bounds = [(0, 1)] + assert_raises( + TypeError, + _clean_inputs, + c=bad, + A_ub=A_ub, + b_ub=b_ub, + A_eq=A_eq, + b_eq=b_eq, + bounds=bounds) + assert_raises( + TypeError, + _clean_inputs, + c=c, + A_ub=bad, + b_ub=b_ub, + A_eq=A_eq, + b_eq=b_eq, + bounds=bounds) + assert_raises( + TypeError, + _clean_inputs, + c=c, + A_ub=A_ub, + b_ub=bad, + A_eq=A_eq, + b_eq=b_eq, + bounds=bounds) + assert_raises( + TypeError, + _clean_inputs, + c=c, + A_ub=A_ub, + b_ub=b_ub, + A_eq=bad, + b_eq=b_eq, + bounds=bounds) + + assert_raises( + TypeError, + _clean_inputs, + c=c, + A_ub=A_ub, + b_ub=b_ub, + A_eq=A_eq, + b_eq=b_eq, + bounds=bad) + assert_raises( + TypeError, + _clean_inputs, + c=c, + A_ub=A_ub, + b_ub=b_ub, + A_eq=A_eq, + b_eq=b_eq, + bounds="hi") + assert_raises( + TypeError, + _clean_inputs, + c=c, + A_ub=A_ub, + b_ub=b_ub, + A_eq=A_eq, + b_eq=b_eq, + bounds=["hi"]) + assert_raises( + TypeError, + _clean_inputs, + c=c, + A_ub=A_ub, + b_ub=b_ub, + A_eq=A_eq, + b_eq=b_eq, + bounds=[ + ("hi")]) + assert_raises(TypeError, _clean_inputs, c=c, A_ub=A_ub, + b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, bounds=[(1, "")]) + assert_raises(TypeError, _clean_inputs, c=c, A_ub=A_ub, + b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, bounds=[(1, 2), (1, "")]) + + +def test_non_finite_errors(): + c = [1, 2] + A_ub = np.array([[1, 1], [2, 2]]) + b_ub = np.array([1, 1]) + A_eq = np.array([[1, 1], [2, 2]]) + b_eq = np.array([1, 1]) + bounds = [(0, 1)] + assert_raises( + ValueError, _clean_inputs, c=[0, None], A_ub=A_ub, b_ub=b_ub, + A_eq=A_eq, b_eq=b_eq, bounds=bounds) + assert_raises( + ValueError, _clean_inputs, c=[np.inf, 0], A_ub=A_ub, b_ub=b_ub, + A_eq=A_eq, b_eq=b_eq, bounds=bounds) + assert_raises( + ValueError, _clean_inputs, c=[0, -np.inf], A_ub=A_ub, b_ub=b_ub, + A_eq=A_eq, b_eq=b_eq, bounds=bounds) + assert_raises( + ValueError, _clean_inputs, c=[np.nan, 0], A_ub=A_ub, b_ub=b_ub, + A_eq=A_eq, b_eq=b_eq, bounds=bounds) + + assert_raises(ValueError, _clean_inputs, c=c, A_ub=[[1, 2], [None, 1]], + b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, bounds=bounds) + assert_raises( + ValueError, + _clean_inputs, + c=c, + A_ub=A_ub, + b_ub=[ + np.inf, + 1], + A_eq=A_eq, + b_eq=b_eq, + bounds=bounds) + assert_raises(ValueError, _clean_inputs, c=c, A_ub=A_ub, b_ub=b_ub, A_eq=[ + [1, 2], [1, -np.inf]], b_eq=b_eq, bounds=bounds) + assert_raises( + ValueError, + _clean_inputs, + c=c, + A_ub=A_ub, + b_ub=b_ub, + A_eq=A_eq, + b_eq=[ + 1, + np.nan], + bounds=bounds) + + +def test__clean_inputs1(): + c = [1, 2] + A_ub = [[1, 1], [2, 2]] + b_ub = [1, 1] + A_eq = [[1, 1], [2, 2]] + b_eq = [1, 1] + bounds = None + outputs = _clean_inputs( + c=c, + A_ub=A_ub, + b_ub=b_ub, + A_eq=A_eq, + b_eq=b_eq, + bounds=bounds) + assert_allclose(outputs[0], np.array(c)) + assert_allclose(outputs[1], np.array(A_ub)) + assert_allclose(outputs[2], np.array(b_ub)) + assert_allclose(outputs[3], np.array(A_eq)) + assert_allclose(outputs[4], np.array(b_eq)) + assert_(outputs[5] == [(0, None)] * 2, "") + + assert_(outputs[0].shape == (2,), "") + assert_(outputs[1].shape == (2, 2), "") + assert_(outputs[2].shape == (2,), "") + assert_(outputs[3].shape == (2, 2), "") + assert_(outputs[4].shape == (2,), "") + + +def test__clean_inputs2(): + c = 1 + A_ub = [[1]] + b_ub = 1 + A_eq = [[1]] + b_eq = 1 + bounds = (0, 1) + outputs = _clean_inputs( + c=c, + A_ub=A_ub, + b_ub=b_ub, + A_eq=A_eq, + b_eq=b_eq, + bounds=bounds) + assert_allclose(outputs[0], np.array(c)) + assert_allclose(outputs[1], np.array(A_ub)) + assert_allclose(outputs[2], np.array(b_ub)) + assert_allclose(outputs[3], np.array(A_eq)) + assert_allclose(outputs[4], np.array(b_eq)) + assert_(outputs[5] == [(0, 1)], "") + + assert_(outputs[0].shape == (1,), "") + assert_(outputs[1].shape == (1, 1), "") + assert_(outputs[2].shape == (1,), "") + assert_(outputs[3].shape == (1, 1), "") + assert_(outputs[4].shape == (1,), "") + + +def test__clean_inputs3(): + c = [[1, 2]] + A_ub = np.random.rand(2, 2) + b_ub = [[1], [2]] + A_eq = np.random.rand(2, 2) + b_eq = [[1], [2]] + bounds = [(0, 1)] + outputs = _clean_inputs( + c=c, + A_ub=A_ub, + b_ub=b_ub, + A_eq=A_eq, + b_eq=b_eq, + bounds=bounds) + assert_allclose(outputs[0], np.array([1, 2])) + assert_allclose(outputs[2], np.array([1, 2])) + assert_allclose(outputs[4], np.array([1, 2])) + assert_(outputs[5] == [(0, 1)] * 2, "") + + assert_(outputs[0].shape == (2,), "") + assert_(outputs[2].shape == (2,), "") + assert_(outputs[4].shape == (2,), "") + + +def test_bad_bounds(): + c = [1, 2] + assert_raises(ValueError, _clean_inputs, c=c, bounds=(1, -2)) + assert_raises(ValueError, _clean_inputs, c=c, bounds=[(1, -2)]) + assert_raises(ValueError, _clean_inputs, c=c, bounds=[(1, -2), (1, 2)]) + + assert_raises(ValueError, _clean_inputs, c=c, bounds=(1, 2, 2)) + assert_raises(ValueError, _clean_inputs, c=c, bounds=[(1, 2, 2)]) + assert_raises(ValueError, _clean_inputs, c=c, bounds=[(1, 2), (1, 2, 2)]) + assert_raises(ValueError, _clean_inputs, c=c, + bounds=[(1, 2), (1, 2), (1, 2)]) + + +def test_good_bounds(): + c = [1, 2] + outputs = _clean_inputs(c=c, bounds=None) + assert_(outputs[5] == [(0, None)] * 2, "") + + outputs = _clean_inputs(c=c, bounds=(1, 2)) + assert_(outputs[5] == [(1, 2)] * 2, "") + + outputs = _clean_inputs(c=c, bounds=[(1, 2)]) + assert_(outputs[5] == [(1, 2)] * 2, "") + + outputs = _clean_inputs(c=c, bounds=[(1, np.inf)]) + assert_(outputs[5] == [(1, None)] * 2, "") + + outputs = _clean_inputs(c=c, bounds=[(-np.inf, 1)]) + assert_(outputs[5] == [(None, 1)] * 2, "") + + outputs = _clean_inputs(c=c, bounds=[(-np.inf, np.inf), (-np.inf, np.inf)]) + assert_(outputs[5] == [(None, None)] * 2, "") diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__linprog_clean_inputs.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__linprog_clean_inputs.pyc new file mode 100644 index 0000000..0fbdad0 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__linprog_clean_inputs.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__numdiff.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__numdiff.py new file mode 100644 index 0000000..5d340b9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__numdiff.py @@ -0,0 +1,598 @@ +from __future__ import division + +import math +from itertools import product + +import numpy as np +from numpy.testing import assert_allclose, assert_equal, assert_ +from pytest import raises as assert_raises + +from scipy.sparse import csr_matrix, csc_matrix, lil_matrix + +from scipy.optimize._numdiff import ( + _adjust_scheme_to_bounds, approx_derivative, check_derivative, + group_columns) + + +def test_group_columns(): + structure = [ + [1, 1, 0, 0, 0, 0], + [1, 1, 1, 0, 0, 0], + [0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0], + [0, 0, 0, 1, 1, 1], + [0, 0, 0, 0, 1, 1], + [0, 0, 0, 0, 0, 0] + ] + for transform in [np.asarray, csr_matrix, csc_matrix, lil_matrix]: + A = transform(structure) + order = np.arange(6) + groups_true = np.array([0, 1, 2, 0, 1, 2]) + groups = group_columns(A, order) + assert_equal(groups, groups_true) + + order = [1, 2, 4, 3, 5, 0] + groups_true = np.array([2, 0, 1, 2, 0, 1]) + groups = group_columns(A, order) + assert_equal(groups, groups_true) + + # Test repeatability. + groups_1 = group_columns(A) + groups_2 = group_columns(A) + assert_equal(groups_1, groups_2) + + +class TestAdjustSchemeToBounds(object): + def test_no_bounds(self): + x0 = np.zeros(3) + h = np.ones(3) * 1e-2 + inf_lower = np.empty_like(x0) + inf_upper = np.empty_like(x0) + inf_lower.fill(-np.inf) + inf_upper.fill(np.inf) + + h_adjusted, one_sided = _adjust_scheme_to_bounds( + x0, h, 1, '1-sided', inf_lower, inf_upper) + assert_allclose(h_adjusted, h) + assert_(np.all(one_sided)) + + h_adjusted, one_sided = _adjust_scheme_to_bounds( + x0, h, 2, '1-sided', inf_lower, inf_upper) + assert_allclose(h_adjusted, h) + assert_(np.all(one_sided)) + + h_adjusted, one_sided = _adjust_scheme_to_bounds( + x0, h, 1, '2-sided', inf_lower, inf_upper) + assert_allclose(h_adjusted, h) + assert_(np.all(~one_sided)) + + h_adjusted, one_sided = _adjust_scheme_to_bounds( + x0, h, 2, '2-sided', inf_lower, inf_upper) + assert_allclose(h_adjusted, h) + assert_(np.all(~one_sided)) + + def test_with_bound(self): + x0 = np.array([0.0, 0.85, -0.85]) + lb = -np.ones(3) + ub = np.ones(3) + h = np.array([1, 1, -1]) * 1e-1 + + h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 1, '1-sided', lb, ub) + assert_allclose(h_adjusted, h) + + h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 2, '1-sided', lb, ub) + assert_allclose(h_adjusted, np.array([1, -1, 1]) * 1e-1) + + h_adjusted, one_sided = _adjust_scheme_to_bounds( + x0, h, 1, '2-sided', lb, ub) + assert_allclose(h_adjusted, np.abs(h)) + assert_(np.all(~one_sided)) + + h_adjusted, one_sided = _adjust_scheme_to_bounds( + x0, h, 2, '2-sided', lb, ub) + assert_allclose(h_adjusted, np.array([1, -1, 1]) * 1e-1) + assert_equal(one_sided, np.array([False, True, True])) + + def test_tight_bounds(self): + lb = np.array([-0.03, -0.03]) + ub = np.array([0.05, 0.05]) + x0 = np.array([0.0, 0.03]) + h = np.array([-0.1, -0.1]) + + h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 1, '1-sided', lb, ub) + assert_allclose(h_adjusted, np.array([0.05, -0.06])) + + h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 2, '1-sided', lb, ub) + assert_allclose(h_adjusted, np.array([0.025, -0.03])) + + h_adjusted, one_sided = _adjust_scheme_to_bounds( + x0, h, 1, '2-sided', lb, ub) + assert_allclose(h_adjusted, np.array([0.03, -0.03])) + assert_equal(one_sided, np.array([False, True])) + + h_adjusted, one_sided = _adjust_scheme_to_bounds( + x0, h, 2, '2-sided', lb, ub) + assert_allclose(h_adjusted, np.array([0.015, -0.015])) + assert_equal(one_sided, np.array([False, True])) + + +class TestApproxDerivativesDense(object): + def fun_scalar_scalar(self, x): + return np.sinh(x) + + def jac_scalar_scalar(self, x): + return np.cosh(x) + + def fun_scalar_vector(self, x): + return np.array([x[0]**2, np.tan(x[0]), np.exp(x[0])]) + + def jac_scalar_vector(self, x): + return np.array( + [2 * x[0], np.cos(x[0]) ** -2, np.exp(x[0])]).reshape(-1, 1) + + def fun_vector_scalar(self, x): + return np.sin(x[0] * x[1]) * np.log(x[0]) + + def wrong_dimensions_fun(self, x): + return np.array([x**2, np.tan(x), np.exp(x)]) + + def jac_vector_scalar(self, x): + return np.array([ + x[1] * np.cos(x[0] * x[1]) * np.log(x[0]) + + np.sin(x[0] * x[1]) / x[0], + x[0] * np.cos(x[0] * x[1]) * np.log(x[0]) + ]) + + def fun_vector_vector(self, x): + return np.array([ + x[0] * np.sin(x[1]), + x[1] * np.cos(x[0]), + x[0] ** 3 * x[1] ** -0.5 + ]) + + def jac_vector_vector(self, x): + return np.array([ + [np.sin(x[1]), x[0] * np.cos(x[1])], + [-x[1] * np.sin(x[0]), np.cos(x[0])], + [3 * x[0] ** 2 * x[1] ** -0.5, -0.5 * x[0] ** 3 * x[1] ** -1.5] + ]) + + def fun_parametrized(self, x, c0, c1=1.0): + return np.array([np.exp(c0 * x[0]), np.exp(c1 * x[1])]) + + def jac_parametrized(self, x, c0, c1=0.1): + return np.array([ + [c0 * np.exp(c0 * x[0]), 0], + [0, c1 * np.exp(c1 * x[1])] + ]) + + def fun_with_nan(self, x): + return x if np.abs(x) <= 1e-8 else np.nan + + def jac_with_nan(self, x): + return 1.0 if np.abs(x) <= 1e-8 else np.nan + + def fun_zero_jacobian(self, x): + return np.array([x[0] * x[1], np.cos(x[0] * x[1])]) + + def jac_zero_jacobian(self, x): + return np.array([ + [x[1], x[0]], + [-x[1] * np.sin(x[0] * x[1]), -x[0] * np.sin(x[0] * x[1])] + ]) + + def fun_non_numpy(self, x): + return math.exp(x) + + def jac_non_numpy(self, x): + return math.exp(x) + + def test_scalar_scalar(self): + x0 = 1.0 + jac_diff_2 = approx_derivative(self.fun_scalar_scalar, x0, + method='2-point') + jac_diff_3 = approx_derivative(self.fun_scalar_scalar, x0) + jac_diff_4 = approx_derivative(self.fun_scalar_scalar, x0, + method='cs') + jac_true = self.jac_scalar_scalar(x0) + assert_allclose(jac_diff_2, jac_true, rtol=1e-6) + assert_allclose(jac_diff_3, jac_true, rtol=1e-9) + assert_allclose(jac_diff_4, jac_true, rtol=1e-12) + + def test_scalar_vector(self): + x0 = 0.5 + jac_diff_2 = approx_derivative(self.fun_scalar_vector, x0, + method='2-point') + jac_diff_3 = approx_derivative(self.fun_scalar_vector, x0) + jac_diff_4 = approx_derivative(self.fun_scalar_vector, x0, + method='cs') + jac_true = self.jac_scalar_vector(np.atleast_1d(x0)) + assert_allclose(jac_diff_2, jac_true, rtol=1e-6) + assert_allclose(jac_diff_3, jac_true, rtol=1e-9) + assert_allclose(jac_diff_4, jac_true, rtol=1e-12) + + def test_vector_scalar(self): + x0 = np.array([100.0, -0.5]) + jac_diff_2 = approx_derivative(self.fun_vector_scalar, x0, + method='2-point') + jac_diff_3 = approx_derivative(self.fun_vector_scalar, x0) + jac_diff_4 = approx_derivative(self.fun_vector_scalar, x0, + method='cs') + jac_true = self.jac_vector_scalar(x0) + assert_allclose(jac_diff_2, jac_true, rtol=1e-6) + assert_allclose(jac_diff_3, jac_true, rtol=1e-7) + assert_allclose(jac_diff_4, jac_true, rtol=1e-12) + + def test_vector_vector(self): + x0 = np.array([-100.0, 0.2]) + jac_diff_2 = approx_derivative(self.fun_vector_vector, x0, + method='2-point') + jac_diff_3 = approx_derivative(self.fun_vector_vector, x0) + jac_diff_4 = approx_derivative(self.fun_vector_vector, x0, + method='cs') + jac_true = self.jac_vector_vector(x0) + assert_allclose(jac_diff_2, jac_true, rtol=1e-5) + assert_allclose(jac_diff_3, jac_true, rtol=1e-6) + assert_allclose(jac_diff_4, jac_true, rtol=1e-12) + + def test_wrong_dimensions(self): + x0 = 1.0 + assert_raises(RuntimeError, approx_derivative, + self.wrong_dimensions_fun, x0) + f0 = self.wrong_dimensions_fun(np.atleast_1d(x0)) + assert_raises(ValueError, approx_derivative, + self.wrong_dimensions_fun, x0, f0=f0) + + def test_custom_rel_step(self): + x0 = np.array([-0.1, 0.1]) + jac_diff_2 = approx_derivative(self.fun_vector_vector, x0, + method='2-point', rel_step=1e-4) + jac_diff_3 = approx_derivative(self.fun_vector_vector, x0, + rel_step=1e-4) + jac_true = self.jac_vector_vector(x0) + assert_allclose(jac_diff_2, jac_true, rtol=1e-2) + assert_allclose(jac_diff_3, jac_true, rtol=1e-4) + + def test_options(self): + x0 = np.array([1.0, 1.0]) + c0 = -1.0 + c1 = 1.0 + lb = 0.0 + ub = 2.0 + f0 = self.fun_parametrized(x0, c0, c1=c1) + rel_step = np.array([-1e-6, 1e-7]) + jac_true = self.jac_parametrized(x0, c0, c1) + jac_diff_2 = approx_derivative( + self.fun_parametrized, x0, method='2-point', rel_step=rel_step, + f0=f0, args=(c0,), kwargs=dict(c1=c1), bounds=(lb, ub)) + jac_diff_3 = approx_derivative( + self.fun_parametrized, x0, rel_step=rel_step, + f0=f0, args=(c0,), kwargs=dict(c1=c1), bounds=(lb, ub)) + assert_allclose(jac_diff_2, jac_true, rtol=1e-6) + assert_allclose(jac_diff_3, jac_true, rtol=1e-9) + + def test_with_bounds_2_point(self): + lb = -np.ones(2) + ub = np.ones(2) + + x0 = np.array([-2.0, 0.2]) + assert_raises(ValueError, approx_derivative, + self.fun_vector_vector, x0, bounds=(lb, ub)) + + x0 = np.array([-1.0, 1.0]) + jac_diff = approx_derivative(self.fun_vector_vector, x0, + method='2-point', bounds=(lb, ub)) + jac_true = self.jac_vector_vector(x0) + assert_allclose(jac_diff, jac_true, rtol=1e-6) + + def test_with_bounds_3_point(self): + lb = np.array([1.0, 1.0]) + ub = np.array([2.0, 2.0]) + + x0 = np.array([1.0, 2.0]) + jac_true = self.jac_vector_vector(x0) + + jac_diff = approx_derivative(self.fun_vector_vector, x0) + assert_allclose(jac_diff, jac_true, rtol=1e-9) + + jac_diff = approx_derivative(self.fun_vector_vector, x0, + bounds=(lb, np.inf)) + assert_allclose(jac_diff, jac_true, rtol=1e-9) + + jac_diff = approx_derivative(self.fun_vector_vector, x0, + bounds=(-np.inf, ub)) + assert_allclose(jac_diff, jac_true, rtol=1e-9) + + jac_diff = approx_derivative(self.fun_vector_vector, x0, + bounds=(lb, ub)) + assert_allclose(jac_diff, jac_true, rtol=1e-9) + + def test_tight_bounds(self): + x0 = np.array([10.0, 10.0]) + lb = x0 - 3e-9 + ub = x0 + 2e-9 + jac_true = self.jac_vector_vector(x0) + jac_diff = approx_derivative( + self.fun_vector_vector, x0, method='2-point', bounds=(lb, ub)) + assert_allclose(jac_diff, jac_true, rtol=1e-6) + jac_diff = approx_derivative( + self.fun_vector_vector, x0, method='2-point', + rel_step=1e-6, bounds=(lb, ub)) + assert_allclose(jac_diff, jac_true, rtol=1e-6) + + jac_diff = approx_derivative( + self.fun_vector_vector, x0, bounds=(lb, ub)) + assert_allclose(jac_diff, jac_true, rtol=1e-6) + jac_diff = approx_derivative( + self.fun_vector_vector, x0, rel_step=1e-6, bounds=(lb, ub)) + assert_allclose(jac_true, jac_diff, rtol=1e-6) + + def test_bound_switches(self): + lb = -1e-8 + ub = 1e-8 + x0 = 0.0 + jac_true = self.jac_with_nan(x0) + jac_diff_2 = approx_derivative( + self.fun_with_nan, x0, method='2-point', rel_step=1e-6, + bounds=(lb, ub)) + jac_diff_3 = approx_derivative( + self.fun_with_nan, x0, rel_step=1e-6, bounds=(lb, ub)) + assert_allclose(jac_diff_2, jac_true, rtol=1e-6) + assert_allclose(jac_diff_3, jac_true, rtol=1e-9) + + x0 = 1e-8 + jac_true = self.jac_with_nan(x0) + jac_diff_2 = approx_derivative( + self.fun_with_nan, x0, method='2-point', rel_step=1e-6, + bounds=(lb, ub)) + jac_diff_3 = approx_derivative( + self.fun_with_nan, x0, rel_step=1e-6, bounds=(lb, ub)) + assert_allclose(jac_diff_2, jac_true, rtol=1e-6) + assert_allclose(jac_diff_3, jac_true, rtol=1e-9) + + def test_non_numpy(self): + x0 = 1.0 + jac_true = self.jac_non_numpy(x0) + jac_diff_2 = approx_derivative(self.jac_non_numpy, x0, + method='2-point') + jac_diff_3 = approx_derivative(self.jac_non_numpy, x0) + assert_allclose(jac_diff_2, jac_true, rtol=1e-6) + assert_allclose(jac_diff_3, jac_true, rtol=1e-8) + + # math.exp cannot handle complex arguments, hence this raises + assert_raises(TypeError, approx_derivative, self.jac_non_numpy, x0, + **dict(method='cs')) + + def test_check_derivative(self): + x0 = np.array([-10.0, 10]) + accuracy = check_derivative(self.fun_vector_vector, + self.jac_vector_vector, x0) + assert_(accuracy < 1e-9) + accuracy = check_derivative(self.fun_vector_vector, + self.jac_vector_vector, x0) + assert_(accuracy < 1e-6) + + x0 = np.array([0.0, 0.0]) + accuracy = check_derivative(self.fun_zero_jacobian, + self.jac_zero_jacobian, x0) + assert_(accuracy == 0) + accuracy = check_derivative(self.fun_zero_jacobian, + self.jac_zero_jacobian, x0) + assert_(accuracy == 0) + + +class TestApproxDerivativeSparse(object): + # Example from Numerical Optimization 2nd edition, p. 198. + def setup_method(self): + np.random.seed(0) + self.n = 50 + self.lb = -0.1 * (1 + np.arange(self.n)) + self.ub = 0.1 * (1 + np.arange(self.n)) + self.x0 = np.empty(self.n) + self.x0[::2] = (1 - 1e-7) * self.lb[::2] + self.x0[1::2] = (1 - 1e-7) * self.ub[1::2] + + self.J_true = self.jac(self.x0) + + def fun(self, x): + e = x[1:]**3 - x[:-1]**2 + return np.hstack((0, 3 * e)) + np.hstack((2 * e, 0)) + + def jac(self, x): + n = x.size + J = np.zeros((n, n)) + J[0, 0] = -4 * x[0] + J[0, 1] = 6 * x[1]**2 + for i in range(1, n - 1): + J[i, i - 1] = -6 * x[i-1] + J[i, i] = 9 * x[i]**2 - 4 * x[i] + J[i, i + 1] = 6 * x[i+1]**2 + J[-1, -1] = 9 * x[-1]**2 + J[-1, -2] = -6 * x[-2] + + return J + + def structure(self, n): + A = np.zeros((n, n), dtype=int) + A[0, 0] = 1 + A[0, 1] = 1 + for i in range(1, n - 1): + A[i, i - 1: i + 2] = 1 + A[-1, -1] = 1 + A[-1, -2] = 1 + + return A + + def test_all(self): + A = self.structure(self.n) + order = np.arange(self.n) + groups_1 = group_columns(A, order) + np.random.shuffle(order) + groups_2 = group_columns(A, order) + + for method, groups, l, u in product( + ['2-point', '3-point', 'cs'], [groups_1, groups_2], + [-np.inf, self.lb], [np.inf, self.ub]): + J = approx_derivative(self.fun, self.x0, method=method, + bounds=(l, u), sparsity=(A, groups)) + assert_(isinstance(J, csr_matrix)) + assert_allclose(J.toarray(), self.J_true, rtol=1e-6) + + rel_step = 1e-8 * np.ones_like(self.x0) + rel_step[::2] *= -1 + J = approx_derivative(self.fun, self.x0, method=method, + rel_step=rel_step, sparsity=(A, groups)) + assert_allclose(J.toarray(), self.J_true, rtol=1e-5) + + def test_no_precomputed_groups(self): + A = self.structure(self.n) + J = approx_derivative(self.fun, self.x0, sparsity=A) + assert_allclose(J.toarray(), self.J_true, rtol=1e-6) + + def test_equivalence(self): + structure = np.ones((self.n, self.n), dtype=int) + groups = np.arange(self.n) + for method in ['2-point', '3-point', 'cs']: + J_dense = approx_derivative(self.fun, self.x0, method=method) + J_sparse = approx_derivative( + self.fun, self.x0, sparsity=(structure, groups), method=method) + assert_equal(J_dense, J_sparse.toarray()) + + def test_check_derivative(self): + def jac(x): + return csr_matrix(self.jac(x)) + + accuracy = check_derivative(self.fun, jac, self.x0, + bounds=(self.lb, self.ub)) + assert_(accuracy < 1e-9) + + accuracy = check_derivative(self.fun, jac, self.x0, + bounds=(self.lb, self.ub)) + assert_(accuracy < 1e-9) + + +class TestApproxDerivativeLinearOperator(object): + + def fun_scalar_scalar(self, x): + return np.sinh(x) + + def jac_scalar_scalar(self, x): + return np.cosh(x) + + def fun_scalar_vector(self, x): + return np.array([x[0]**2, np.tan(x[0]), np.exp(x[0])]) + + def jac_scalar_vector(self, x): + return np.array( + [2 * x[0], np.cos(x[0]) ** -2, np.exp(x[0])]).reshape(-1, 1) + + def fun_vector_scalar(self, x): + return np.sin(x[0] * x[1]) * np.log(x[0]) + + def jac_vector_scalar(self, x): + return np.array([ + x[1] * np.cos(x[0] * x[1]) * np.log(x[0]) + + np.sin(x[0] * x[1]) / x[0], + x[0] * np.cos(x[0] * x[1]) * np.log(x[0]) + ]) + + def fun_vector_vector(self, x): + return np.array([ + x[0] * np.sin(x[1]), + x[1] * np.cos(x[0]), + x[0] ** 3 * x[1] ** -0.5 + ]) + + def jac_vector_vector(self, x): + return np.array([ + [np.sin(x[1]), x[0] * np.cos(x[1])], + [-x[1] * np.sin(x[0]), np.cos(x[0])], + [3 * x[0] ** 2 * x[1] ** -0.5, -0.5 * x[0] ** 3 * x[1] ** -1.5] + ]) + + def test_scalar_scalar(self): + x0 = 1.0 + jac_diff_2 = approx_derivative(self.fun_scalar_scalar, x0, + method='2-point', + as_linear_operator=True) + jac_diff_3 = approx_derivative(self.fun_scalar_scalar, x0, + as_linear_operator=True) + jac_diff_4 = approx_derivative(self.fun_scalar_scalar, x0, + method='cs', + as_linear_operator=True) + jac_true = self.jac_scalar_scalar(x0) + np.random.seed(1) + for i in range(10): + p = np.random.uniform(-10, 10, size=(1,)) + assert_allclose(jac_diff_2.dot(p), jac_true*p, + rtol=1e-5) + assert_allclose(jac_diff_3.dot(p), jac_true*p, + rtol=5e-6) + assert_allclose(jac_diff_4.dot(p), jac_true*p, + rtol=5e-6) + + def test_scalar_vector(self): + x0 = 0.5 + jac_diff_2 = approx_derivative(self.fun_scalar_vector, x0, + method='2-point', + as_linear_operator=True) + jac_diff_3 = approx_derivative(self.fun_scalar_vector, x0, + as_linear_operator=True) + jac_diff_4 = approx_derivative(self.fun_scalar_vector, x0, + method='cs', + as_linear_operator=True) + jac_true = self.jac_scalar_vector(np.atleast_1d(x0)) + np.random.seed(1) + for i in range(10): + p = np.random.uniform(-10, 10, size=(1,)) + assert_allclose(jac_diff_2.dot(p), jac_true.dot(p), + rtol=1e-5) + assert_allclose(jac_diff_3.dot(p), jac_true.dot(p), + rtol=5e-6) + assert_allclose(jac_diff_4.dot(p), jac_true.dot(p), + rtol=5e-6) + + def test_vector_scalar(self): + x0 = np.array([100.0, -0.5]) + jac_diff_2 = approx_derivative(self.fun_vector_scalar, x0, + method='2-point', + as_linear_operator=True) + jac_diff_3 = approx_derivative(self.fun_vector_scalar, x0, + as_linear_operator=True) + jac_diff_4 = approx_derivative(self.fun_vector_scalar, x0, + method='cs', + as_linear_operator=True) + jac_true = self.jac_vector_scalar(x0) + np.random.seed(1) + for i in range(10): + p = np.random.uniform(-10, 10, size=x0.shape) + assert_allclose(jac_diff_2.dot(p), np.atleast_1d(jac_true.dot(p)), + rtol=1e-5) + assert_allclose(jac_diff_3.dot(p), np.atleast_1d(jac_true.dot(p)), + rtol=5e-6) + assert_allclose(jac_diff_4.dot(p), np.atleast_1d(jac_true.dot(p)), + rtol=1e-7) + + def test_vector_vector(self): + x0 = np.array([-100.0, 0.2]) + jac_diff_2 = approx_derivative(self.fun_vector_vector, x0, + method='2-point', + as_linear_operator=True) + jac_diff_3 = approx_derivative(self.fun_vector_vector, x0, + as_linear_operator=True) + jac_diff_4 = approx_derivative(self.fun_vector_vector, x0, + method='cs', + as_linear_operator=True) + jac_true = self.jac_vector_vector(x0) + np.random.seed(1) + for i in range(10): + p = np.random.uniform(-10, 10, size=x0.shape) + assert_allclose(jac_diff_2.dot(p), jac_true.dot(p), rtol=1e-5) + assert_allclose(jac_diff_3.dot(p), jac_true.dot(p), rtol=1e-6) + assert_allclose(jac_diff_4.dot(p), jac_true.dot(p), rtol=1e-7) + + def test_exception(self): + x0 = np.array([-100.0, 0.2]) + assert_raises(ValueError, approx_derivative, + self.fun_vector_vector, x0, + method='2-point', bounds=(1, np.inf)) diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__numdiff.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__numdiff.pyc new file mode 100644 index 0000000..bb11a49 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__numdiff.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__remove_redundancy.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__remove_redundancy.py new file mode 100644 index 0000000..34f3c92 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__remove_redundancy.py @@ -0,0 +1,239 @@ +""" +Unit test for Linear Programming via Simplex Algorithm. +""" + +# TODO: add tests for: +# https://github.com/scipy/scipy/issues/5400 +# https://github.com/scipy/scipy/issues/6690 + +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import ( + assert_, + assert_allclose, + assert_equal) + +from .test_linprog import magic_square +from scipy.optimize._remove_redundancy import _remove_redundancy + + +def setup_module(): + np.random.seed(2017) + + +def _assert_success( + res, + desired_fun=None, + desired_x=None, + rtol=1e-7, + atol=1e-7): + # res: linprog result object + # desired_fun: desired objective function value or None + # desired_x: desired solution or None + assert_(res.success) + assert_equal(res.status, 0) + if desired_fun is not None: + assert_allclose( + res.fun, + desired_fun, + err_msg="converged to an unexpected objective value", + rtol=rtol, + atol=atol) + if desired_x is not None: + assert_allclose( + res.x, + desired_x, + err_msg="converged to an unexpected solution", + rtol=rtol, + atol=atol) + + +def test_no_redundancy(): + m, n = 10, 10 + A0 = np.random.rand(m, n) + b0 = np.random.rand(m) + A1, b1, status, message = _remove_redundancy(A0, b0) + assert_allclose(A0, A1) + assert_allclose(b0, b1) + assert_equal(status, 0) + + +def test_infeasible_zero_row(): + A = np.eye(3) + A[1, :] = 0 + b = np.random.rand(3) + A1, b1, status, message = _remove_redundancy(A, b) + assert_equal(status, 2) + + +def test_remove_zero_row(): + A = np.eye(3) + A[1, :] = 0 + b = np.random.rand(3) + b[1] = 0 + A1, b1, status, message = _remove_redundancy(A, b) + assert_equal(status, 0) + assert_allclose(A1, A[[0, 2], :]) + assert_allclose(b1, b[[0, 2]]) + + +def test_infeasible_m_gt_n(): + m, n = 20, 10 + A0 = np.random.rand(m, n) + b0 = np.random.rand(m) + A1, b1, status, message = _remove_redundancy(A0, b0) + assert_equal(status, 2) + + +def test_infeasible_m_eq_n(): + m, n = 10, 10 + A0 = np.random.rand(m, n) + b0 = np.random.rand(m) + A0[-1, :] = 2 * A0[-2, :] + A1, b1, status, message = _remove_redundancy(A0, b0) + assert_equal(status, 2) + + +def test_infeasible_m_lt_n(): + m, n = 9, 10 + A0 = np.random.rand(m, n) + b0 = np.random.rand(m) + A0[-1, :] = np.arange(m - 1).dot(A0[:-1]) + A1, b1, status, message = _remove_redundancy(A0, b0) + assert_equal(status, 2) + + +def test_m_gt_n(): + np.random.seed(2032) + m, n = 20, 10 + A0 = np.random.rand(m, n) + b0 = np.random.rand(m) + x = np.linalg.solve(A0[:n, :], b0[:n]) + b0[n:] = A0[n:, :].dot(x) + A1, b1, status, message = _remove_redundancy(A0, b0) + assert_equal(status, 0) + assert_equal(A1.shape[0], n) + assert_equal(np.linalg.matrix_rank(A1), n) + + +def test_m_gt_n_rank_deficient(): + m, n = 20, 10 + A0 = np.zeros((m, n)) + A0[:, 0] = 1 + b0 = np.ones(m) + A1, b1, status, message = _remove_redundancy(A0, b0) + assert_equal(status, 0) + assert_allclose(A1, A0[0:1, :]) + assert_allclose(b1, b0[0]) + + +def test_m_lt_n_rank_deficient(): + m, n = 9, 10 + A0 = np.random.rand(m, n) + b0 = np.random.rand(m) + A0[-1, :] = np.arange(m - 1).dot(A0[:-1]) + b0[-1] = np.arange(m - 1).dot(b0[:-1]) + A1, b1, status, message = _remove_redundancy(A0, b0) + assert_equal(status, 0) + assert_equal(A1.shape[0], 8) + assert_equal(np.linalg.matrix_rank(A1), 8) + + +def test_dense1(): + A = np.ones((6, 6)) + A[0, :3] = 0 + A[1, 3:] = 0 + A[3:, ::2] = -1 + A[3, :2] = 0 + A[4, 2:] = 0 + b = np.zeros(A.shape[0]) + + A2 = A[[0, 1, 3, 4], :] + b2 = np.zeros(4) + + A1, b1, status, message = _remove_redundancy(A, b) + assert_allclose(A1, A2) + assert_allclose(b1, b2) + assert_equal(status, 0) + + +def test_dense2(): + A = np.eye(6) + A[-2, -1] = 1 + A[-1, :] = 1 + b = np.zeros(A.shape[0]) + A1, b1, status, message = _remove_redundancy(A, b) + assert_allclose(A1, A[:-1, :]) + assert_allclose(b1, b[:-1]) + assert_equal(status, 0) + + +def test_dense3(): + A = np.eye(6) + A[-2, -1] = 1 + A[-1, :] = 1 + b = np.random.rand(A.shape[0]) + b[-1] = np.sum(b[:-1]) + A1, b1, status, message = _remove_redundancy(A, b) + assert_allclose(A1, A[:-1, :]) + assert_allclose(b1, b[:-1]) + assert_equal(status, 0) + + +def test_m_gt_n_sparse(): + np.random.seed(2013) + m, n = 20, 5 + p = 0.1 + A = np.random.rand(m, n) + A[np.random.rand(m, n) > p] = 0 + rank = np.linalg.matrix_rank(A) + b = np.zeros(A.shape[0]) + A1, b1, status, message = _remove_redundancy(A, b) + assert_equal(status, 0) + assert_equal(A1.shape[0], rank) + assert_equal(np.linalg.matrix_rank(A1), rank) + + +def test_m_lt_n_sparse(): + np.random.seed(2017) + m, n = 20, 50 + p = 0.05 + A = np.random.rand(m, n) + A[np.random.rand(m, n) > p] = 0 + rank = np.linalg.matrix_rank(A) + b = np.zeros(A.shape[0]) + A1, b1, status, message = _remove_redundancy(A, b) + assert_equal(status, 0) + assert_equal(A1.shape[0], rank) + assert_equal(np.linalg.matrix_rank(A1), rank) + + +def test_m_eq_n_sparse(): + np.random.seed(2017) + m, n = 100, 100 + p = 0.01 + A = np.random.rand(m, n) + A[np.random.rand(m, n) > p] = 0 + rank = np.linalg.matrix_rank(A) + b = np.zeros(A.shape[0]) + A1, b1, status, message = _remove_redundancy(A, b) + assert_equal(status, 0) + assert_equal(A1.shape[0], rank) + assert_equal(np.linalg.matrix_rank(A1), rank) + + +def test_magic_square(): + A, b, c, numbers = magic_square(3) + A1, b1, status, message = _remove_redundancy(A, b) + assert_equal(status, 0) + assert_equal(A1.shape[0], 23) + assert_equal(np.linalg.matrix_rank(A1), 23) + + +def test_magic_square2(): + A, b, c, numbers = magic_square(4) + A1, b1, status, message = _remove_redundancy(A, b) + assert_equal(status, 0) + assert_equal(A1.shape[0], 39) + assert_equal(np.linalg.matrix_rank(A1), 39) diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__remove_redundancy.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__remove_redundancy.pyc new file mode 100644 index 0000000..c808f44 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__remove_redundancy.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__root.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__root.py new file mode 100644 index 0000000..f90721a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__root.py @@ -0,0 +1,71 @@ +""" +Unit tests for optimization routines from _root.py. +""" +from __future__ import division, print_function, absolute_import + +from numpy.testing import assert_ +from pytest import raises as assert_raises +import numpy as np + +from scipy.optimize import root + + +class TestRoot(object): + def test_tol_parameter(self): + # Check that the minimize() tol= argument does something + def func(z): + x, y = z + return np.array([x**3 - 1, y**3 - 1]) + + def dfunc(z): + x, y = z + return np.array([[3*x**2, 0], [0, 3*y**2]]) + + for method in ['hybr', 'lm', 'broyden1', 'broyden2', 'anderson', + 'diagbroyden', 'krylov']: + if method in ('linearmixing', 'excitingmixing'): + # doesn't converge + continue + + if method in ('hybr', 'lm'): + jac = dfunc + else: + jac = None + + sol1 = root(func, [1.1,1.1], jac=jac, tol=1e-4, method=method) + sol2 = root(func, [1.1,1.1], jac=jac, tol=0.5, method=method) + msg = "%s: %s vs. %s" % (method, func(sol1.x), func(sol2.x)) + assert_(sol1.success, msg) + assert_(sol2.success, msg) + assert_(abs(func(sol1.x)).max() < abs(func(sol2.x)).max(), + msg) + + def test_minimize_scalar_coerce_args_param(self): + # github issue #3503 + def func(z, f=1): + x, y = z + return np.array([x**3 - 1, y**3 - f]) + root(func, [1.1, 1.1], args=1.5) + + def test_f_size(self): + # gh8320 + # check that decreasing the size of the returned array raises an error + # and doesn't segfault + class fun(object): + def __init__(self): + self.count = 0 + + def __call__(self, x): + self.count += 1 + + if not (self.count % 5): + ret = x[0] + 0.5 * (x[0] - x[1]) ** 3 - 1.0 + else: + ret = ([x[0] + 0.5 * (x[0] - x[1]) ** 3 - 1.0, + 0.5 * (x[1] - x[0]) ** 3 + x[1]]) + + return ret + + F = fun() + with assert_raises(ValueError): + sol = root(F, [0.1, 0.0], method='lm') diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__root.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__root.pyc new file mode 100644 index 0000000..25a5d19 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__root.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__shgo.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__shgo.py new file mode 100644 index 0000000..bbaa57d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__shgo.py @@ -0,0 +1,748 @@ +import logging +import numpy +import pytest +from pytest import raises as assert_raises, warns +from scipy.optimize import shgo +from scipy.optimize._shgo import SHGO + + +class StructTestFunction(object): + def __init__(self, bounds, expected_x, expected_fun=None, + expected_xl=None, expected_funl=None): + self.bounds = bounds + self.expected_x = expected_x + self.expected_fun = expected_fun + self.expected_xl = expected_xl + self.expected_funl = expected_funl + + +def wrap_constraints(g): + cons = [] + if g is not None: + if (type(g) is not tuple) and (type(g) is not list): + g = (g,) + else: + pass + for g in g: + cons.append({'type': 'ineq', + 'fun': g}) + cons = tuple(cons) + else: + cons = None + return cons + + +class StructTest1(StructTestFunction): + def f(self, x): + return x[0] ** 2 + x[1] ** 2 + + def g(x): + return -(numpy.sum(x, axis=0) - 6.0) + + cons = wrap_constraints(g) + + +test1_1 = StructTest1(bounds=[(-1, 6), (-1, 6)], + expected_x=[0, 0]) +test1_2 = StructTest1(bounds=[(0, 1), (0, 1)], + expected_x=[0, 0]) +test1_3 = StructTest1(bounds=[(None, None), (None, None)], + expected_x=[0, 0]) + + +class StructTest2(StructTestFunction): + """ + Scalar function with several minima to test all minimiser retrievals + """ + + def f(self, x): + return (x - 30) * numpy.sin(x) + + def g(x): + return 58 - numpy.sum(x, axis=0) + + cons = wrap_constraints(g) + + +test2_1 = StructTest2(bounds=[(0, 60)], + expected_x=[1.53567906], + expected_fun=-28.44677132, + # Important: test that funl return is in the correct order + expected_xl=numpy.array([[1.53567906], + [55.01782167], + [7.80894889], + [48.74797493], + [14.07445705], + [42.4913859], + [20.31743841], + [36.28607535], + [26.43039605], + [30.76371366]]), + + expected_funl=numpy.array([-28.44677132, -24.99785984, + -22.16855376, -18.72136195, + -15.89423937, -12.45154942, + -9.63133158, -6.20801301, + -3.43727232, -0.46353338]) + ) + +test2_2 = StructTest2(bounds=[(0, 4.5)], + expected_x=[1.53567906], + expected_fun=[-28.44677132], + expected_xl=numpy.array([[1.53567906]]), + expected_funl=numpy.array([-28.44677132]) + ) + + +class StructTest3(StructTestFunction): + """ + Hock and Schittkowski 18 problem (HS18). Hoch and Schittkowski (1981) + http://www.ai7.uni-bayreuth.de/test_problem_coll.pdf + Minimize: f = 0.01 * (x_1)**2 + (x_2)**2 + + Subject to: x_1 * x_2 - 25.0 >= 0, + (x_1)**2 + (x_2)**2 - 25.0 >= 0, + 2 <= x_1 <= 50, + 0 <= x_2 <= 50. + + Approx. Answer: + f([(250)**0.5 , (2.5)**0.5]) = 5.0 + + + """ + + def f(self, x): + return 0.01 * (x[0]) ** 2 + (x[1]) ** 2 + + def g1(x): + return x[0] * x[1] - 25.0 + + def g2(x): + return x[0] ** 2 + x[1] ** 2 - 25.0 + + g = (g1, g2) + + cons = wrap_constraints(g) + + +test3_1 = StructTest3(bounds=[(2, 50), (0, 50)], + expected_x=[250 ** 0.5, 2.5 ** 0.5], + expected_fun=5.0 + ) + + +class StructTest4(StructTestFunction): + """ + Hock and Schittkowski 11 problem (HS11). Hoch and Schittkowski (1981) + + NOTE: Did not find in original reference to HS collection, refer to + Henderson (2015) problem 7 instead. 02.03.2016 + """ + + def f(self, x): + return ((x[0] - 10) ** 2 + 5 * (x[1] - 12) ** 2 + x[2] ** 4 + + 3 * (x[3] - 11) ** 2 + 10 * x[4] ** 6 + 7 * x[5] ** 2 + x[ + 6] ** 4 + - 4 * x[5] * x[6] - 10 * x[5] - 8 * x[6] + ) + + def g1(x): + return -(2 * x[0] ** 2 + 3 * x[1] ** 4 + x[2] + 4 * x[3] ** 2 + + 5 * x[4] - 127) + + def g2(x): + return -(7 * x[0] + 3 * x[1] + 10 * x[2] ** 2 + x[3] - x[4] - 282.0) + + def g3(x): + return -(23 * x[0] + x[1] ** 2 + 6 * x[5] ** 2 - 8 * x[6] - 196) + + def g4(x): + return -(4 * x[0] ** 2 + x[1] ** 2 - 3 * x[0] * x[1] + 2 * x[2] ** 2 + + 5 * x[5] - 11 * x[6]) + + g = (g1, g2, g3, g4) + + cons = wrap_constraints(g) + + +test4_1 = StructTest4(bounds=[(-10, 10), ] * 7, + expected_x=[2.330499, 1.951372, -0.4775414, + 4.365726, -0.6244870, 1.038131, 1.594227], + expected_fun=680.6300573 + ) + + +class StructTest5(StructTestFunction): + def f(self, x): + return (-(x[1] + 47.0) + * numpy.sin(numpy.sqrt(abs(x[0] / 2.0 + (x[1] + 47.0)))) + - x[0] * numpy.sin(numpy.sqrt(abs(x[0] - (x[1] + 47.0)))) + ) + + g = None + cons = wrap_constraints(g) + + +test5_1 = StructTest5(bounds=[(-512, 512), (-512, 512)], + expected_fun=[-959.64066272085051], + expected_x=[512., 404.23180542]) + + +class StructTestLJ(StructTestFunction): + """ + LennardJones objective function. Used to test symmetry constraints settings. + """ + + def f(self, x, *args): + self.N = args[0] + k = int(self.N / 3) + s = 0.0 + + for i in range(k - 1): + for j in range(i + 1, k): + a = 3 * i + b = 3 * j + xd = x[a] - x[b] + yd = x[a + 1] - x[b + 1] + zd = x[a + 2] - x[b + 2] + ed = xd * xd + yd * yd + zd * zd + ud = ed * ed * ed + if ed > 0.0: + s += (1.0 / ud - 2.0) / ud + + return s + + g = None + cons = wrap_constraints(g) + + +N = 6 +boundsLJ = list(zip([-4.0] * 6, [4.0] * 6)) + +testLJ = StructTestLJ(bounds=boundsLJ, + expected_fun=[-1.0], + expected_x=[-2.71247337e-08, + -2.71247337e-08, + -2.50000222e+00, + -2.71247337e-08, + -2.71247337e-08, + -1.50000222e+00] + ) + + +class StructTestTable(StructTestFunction): + def f(self, x): + if x[0] == 3.0 and x[1] == 3.0: + return 50 + else: + return 100 + + g = None + cons = wrap_constraints(g) + + +test_table = StructTestTable(bounds=[(-10, 10), (-10, 10)], + expected_fun=[50], + expected_x=[3.0, 3.0]) + + +class StructTestInfeasible(StructTestFunction): + """ + Test function with no feasible domain. + """ + + def f(self, x, *args): + return x[0] ** 2 + x[1] ** 2 + + def g1(x): + return x[0] + x[1] - 1 + + def g2(x): + return -(x[0] + x[1] - 1) + + def g3(x): + return -x[0] + x[1] - 1 + + def g4(x): + return -(-x[0] + x[1] - 1) + + g = (g1, g2, g3, g4) + cons = wrap_constraints(g) + + +test_infeasible = StructTestInfeasible(bounds=[(2, 50), (-1, 1)], + expected_fun=None, + expected_x=None + ) + + +def run_test(test, args=(), test_atol=1e-5, n=100, iters=None, + callback=None, minimizer_kwargs=None, options=None, + sampling_method='sobol'): + res = shgo(test.f, test.bounds, args=args, constraints=test.cons, + n=n, iters=iters, callback=callback, + minimizer_kwargs=minimizer_kwargs, options=options, + sampling_method=sampling_method) + + logging.info(res) + + if test.expected_x is not None: + numpy.testing.assert_allclose(res.x, test.expected_x, + rtol=test_atol, + atol=test_atol) + + # (Optional tests) + if test.expected_fun is not None: + numpy.testing.assert_allclose(res.fun, + test.expected_fun, + atol=test_atol) + + if test.expected_xl is not None: + numpy.testing.assert_allclose(res.xl, + test.expected_xl, + atol=test_atol) + + if test.expected_funl is not None: + numpy.testing.assert_allclose(res.funl, + test.expected_funl, + atol=test_atol) + return + + +# Base test functions: +class TestShgoSobolTestFunctions(object): + """ + Global optimisation tests with Sobol sampling: + """ + + # Sobol algorithm + def test_f1_1_sobol(self): + """Multivariate test function 1: + x[0]**2 + x[1]**2 with bounds=[(-1, 6), (-1, 6)]""" + run_test(test1_1) + + def test_f1_2_sobol(self): + """Multivariate test function 1: + x[0]**2 + x[1]**2 with bounds=[(0, 1), (0, 1)]""" + run_test(test1_2) + + def test_f1_3_sobol(self): + """Multivariate test function 1: + x[0]**2 + x[1]**2 with bounds=[(None, None),(None, None)]""" + run_test(test1_3) + + def test_f2_1_sobol(self): + """Univariate test function on + f(x) = (x - 30) * sin(x) with bounds=[(0, 60)]""" + run_test(test2_1) + + def test_f2_2_sobol(self): + """Univariate test function on + f(x) = (x - 30) * sin(x) bounds=[(0, 4.5)]""" + run_test(test2_2) + + def test_f3_sobol(self): + """NLP: Hock and Schittkowski problem 18""" + run_test(test3_1) + + @pytest.mark.slow + def test_f4_sobol(self): + """NLP: (High dimensional) Hock and Schittkowski 11 problem (HS11)""" + # run_test(test4_1, n=500) + # run_test(test4_1, n=800) + options = {'infty_constraints': False} + run_test(test4_1, n=990, options=options) + + def test_f5_1_sobol(self): + """NLP: Eggholder, multimodal""" + run_test(test5_1, n=30) + + def test_f5_2_sobol(self): + """NLP: Eggholder, multimodal""" + # run_test(test5_1, n=60, iters=5) + run_test(test5_1, n=60, iters=5) + + # def test_t911(self): + # """1D tabletop function""" + # run_test(test11_1) + + +class TestShgoSimplicialTestFunctions(object): + """ + Global optimisation tests with Simplicial sampling: + """ + + def test_f1_1_simplicial(self): + """Multivariate test function 1: + x[0]**2 + x[1]**2 with bounds=[(-1, 6), (-1, 6)]""" + run_test(test1_1, n=1, sampling_method='simplicial') + + def test_f1_2_simplicial(self): + """Multivariate test function 1: + x[0]**2 + x[1]**2 with bounds=[(0, 1), (0, 1)]""" + run_test(test1_2, n=1, sampling_method='simplicial') + + def test_f1_3_simplicial(self): + """Multivariate test function 1: x[0]**2 + x[1]**2 + with bounds=[(None, None),(None, None)]""" + run_test(test1_3, n=1, sampling_method='simplicial') + + def test_f2_1_simplicial(self): + """Univariate test function on + f(x) = (x - 30) * sin(x) with bounds=[(0, 60)]""" + options = {'minimize_every_iter': False} + run_test(test2_1, iters=7, options=options, + sampling_method='simplicial') + + def test_f2_2_simplicial(self): + """Univariate test function on + f(x) = (x - 30) * sin(x) bounds=[(0, 4.5)]""" + run_test(test2_2, n=1, sampling_method='simplicial') + + def test_f3_simplicial(self): + """NLP: Hock and Schittkowski problem 18""" + run_test(test3_1, n=1, sampling_method='simplicial') + + @pytest.mark.slow + def test_f4_simplicial(self): + """NLP: (High dimensional) Hock and Schittkowski 11 problem (HS11)""" + run_test(test4_1, n=1, sampling_method='simplicial') + + def test_lj_symmetry(self): + """LJ: Symmetry constrained test function""" + options = {'symmetry': True, + 'disp': True} + args = (6,) # No. of atoms + run_test(testLJ, args=args, n=None, + options=options, iters=4, + sampling_method='simplicial') + + +# Argument test functions +class TestShgoArguments(object): + def test_1_1_simpl_iter(self): + """Iterative simplicial sampling on TestFunction 1 (multivariate)""" + run_test(test1_2, n=None, iters=2, sampling_method='simplicial') + + def test_1_2_simpl_iter(self): + """Iterative simplicial on TestFunction 2 (univariate)""" + options = {'minimize_every_iter': False} + run_test(test2_1, n=None, iters=7, options=options, + sampling_method='simplicial') + + def test_2_1_sobol_iter(self): + """Iterative Sobol sampling on TestFunction 1 (multivariate)""" + run_test(test1_2, n=None, iters=1, sampling_method='sobol') + + def test_2_2_sobol_iter(self): + """Iterative Sobol sampling on TestFunction 2 (univariate)""" + res = shgo(test2_1.f, test2_1.bounds, constraints=test2_1.cons, + n=None, iters=1, sampling_method='sobol') + + numpy.testing.assert_allclose(res.x, test2_1.expected_x, rtol=1e-5, + atol=1e-5) + numpy.testing.assert_allclose(res.fun, test2_1.expected_fun, atol=1e-5) + + def test_3_1_disp_simplicial(self): + """Iterative sampling on TestFunction 1 and 2 (multi and univariate)""" + + def callback_func(x): + print("Local minimization callback test") + + for test in [test1_1, test2_1]: + res = shgo(test.f, test.bounds, iters=1, + sampling_method='simplicial', + callback=callback_func, options={'disp': True}) + res = shgo(test.f, test.bounds, n=1, sampling_method='simplicial', + callback=callback_func, options={'disp': True}) + + def test_3_2_disp_sobol(self): + """Iterative sampling on TestFunction 1 and 2 (multi and univariate)""" + + def callback_func(x): + print("Local minimization callback test") + + for test in [test1_1, test2_1]: + res = shgo(test.f, test.bounds, iters=1, sampling_method='sobol', + callback=callback_func, options={'disp': True}) + + res = shgo(test.f, test.bounds, n=1, sampling_method='simplicial', + callback=callback_func, options={'disp': True}) + + @pytest.mark.slow + def test_4_1_known_f_min(self): + """Test known function minima stopping criteria""" + # Specify known function value + options = {'f_min': test4_1.expected_fun, + 'f_tol': 1e-6, + 'minimize_every_iter': True} + # TODO: Make default n higher for faster tests + run_test(test4_1, n=None, test_atol=1e-5, options=options, + sampling_method='simplicial') + + @pytest.mark.slow + def test_4_2_known_f_min(self): + """Test Global mode limiting local evalutions""" + options = { # Specify known function value + 'f_min': test4_1.expected_fun, + 'f_tol': 1e-6, + # Specify number of local iterations to perform + 'minimize_every_iter': True, + 'local_iter': 1} + + run_test(test4_1, n=None, test_atol=1e-5, options=options, + sampling_method='simplicial') + + @pytest.mark.slow + def test_4_3_known_f_min(self): + """Test Global mode limiting local evalutions""" + options = { # Specify known function value + 'f_min': test4_1.expected_fun, + 'f_tol': 1e-6, + # Specify number of local iterations to perform+ + 'minimize_every_iter': True, + 'local_iter': 1, + 'infty_constraints': False} + + run_test(test4_1, n=300, test_atol=1e-5, options=options, + sampling_method='sobol') + + def test_4_4_known_f_min(self): + """Test Global mode limiting local evalutions for 1D funcs""" + options = { # Specify known function value + 'f_min': test2_1.expected_fun, + 'f_tol': 1e-6, + # Specify number of local iterations to perform+ + 'minimize_every_iter': True, + 'local_iter': 1, + 'infty_constraints': False} + + res = shgo(test2_1.f, test2_1.bounds, constraints=test2_1.cons, + n=None, iters=None, options=options, + sampling_method='sobol') + numpy.testing.assert_allclose(res.x, test2_1.expected_x, rtol=1e-5, + atol=1e-5) + + def test_5_1_simplicial_argless(self): + """Test Default simplicial sampling settings on TestFunction 1""" + res = shgo(test1_1.f, test1_1.bounds, constraints=test1_1.cons) + numpy.testing.assert_allclose(res.x, test1_1.expected_x, rtol=1e-5, + atol=1e-5) + + def test_5_2_sobol_argless(self): + """Test Default sobol sampling settings on TestFunction 1""" + res = shgo(test1_1.f, test1_1.bounds, constraints=test1_1.cons, + sampling_method='sobol') + numpy.testing.assert_allclose(res.x, test1_1.expected_x, rtol=1e-5, + atol=1e-5) + + def test_6_1_simplicial_max_iter(self): + """Test that maximum iteration option works on TestFunction 3""" + options = {'max_iter': 2} + res = shgo(test3_1.f, test3_1.bounds, constraints=test3_1.cons, + options=options, sampling_method='simplicial') + numpy.testing.assert_allclose(res.x, test3_1.expected_x, rtol=1e-5, + atol=1e-5) + numpy.testing.assert_allclose(res.fun, test3_1.expected_fun, atol=1e-5) + + def test_6_2_simplicial_min_iter(self): + """Test that maximum iteration option works on TestFunction 3""" + options = {'min_iter': 2} + res = shgo(test3_1.f, test3_1.bounds, constraints=test3_1.cons, + options=options, sampling_method='simplicial') + numpy.testing.assert_allclose(res.x, test3_1.expected_x, rtol=1e-5, + atol=1e-5) + numpy.testing.assert_allclose(res.fun, test3_1.expected_fun, atol=1e-5) + + def test_7_1_minkwargs(self): + """Test the minimizer_kwargs arguments for solvers with constraints""" + # Test solvers + for solver in ['COBYLA', 'SLSQP']: + # Note that passing global constraints to SLSQP is tested in other + # unittests which run test4_1 normally + minimizer_kwargs = {'method': solver, + 'constraints': test3_1.cons} + print("Solver = {}".format(solver)) + print("=" * 100) + run_test(test3_1, n=100, test_atol=1e-3, + minimizer_kwargs=minimizer_kwargs, sampling_method='sobol') + + def test_7_2_minkwargs(self): + """Test the minimizer_kwargs default inits""" + minimizer_kwargs = {'ftol': 1e-5} + options = {'disp': True} # For coverage purposes + SHGOc = SHGO(test3_1.f, test3_1.bounds, constraints=test3_1.cons[0], + minimizer_kwargs=minimizer_kwargs, options=options) + + def test_7_3_minkwargs(self): + """Test minimizer_kwargs arguments for solvers without constraints""" + for solver in ['Nelder-Mead', 'Powell', 'CG', 'BFGS', 'Newton-CG', + 'L-BFGS-B', 'TNC', 'dogleg', 'trust-ncg', 'trust-exact', + 'trust-krylov']: + def jac(x): + return numpy.array([2 * x[0], 2 * x[1]]).T + + def hess(x): + return numpy.array([[2, 0], [0, 2]]) + + minimizer_kwargs = {'method': solver, + 'jac': jac, + 'hess': hess} + logging.info("Solver = {}".format(solver)) + logging.info("=" * 100) + run_test(test1_1, n=100, test_atol=1e-3, + minimizer_kwargs=minimizer_kwargs, sampling_method='sobol') + + def test_8_homology_group_diff(self): + options = {'minhgrd': 1, + 'minimize_every_iter': True} + + run_test(test1_1, n=None, iters=None, options=options, + sampling_method='simplicial') + + def test_9_cons_g(self): + """Test single function constraint passing""" + SHGOc = SHGO(test3_1.f, test3_1.bounds, constraints=test3_1.cons[0]) + + def test_10_finite_time(self): + """Test single function constraint passing""" + options = {'maxtime': 1e-15} + res = shgo(test1_1.f, test1_1.bounds, n=1, iters=None, + options=options, sampling_method='sobol') + + def test_11_f_min_time(self): + """Test to cover the case where f_lowest == 0""" + options = {'maxtime': 1e-15, + 'f_min': 0.0} + res = shgo(test1_2.f, test1_2.bounds, n=1, iters=None, + options=options, sampling_method='sobol') + + def test_12_sobol_inf_cons(self): + """Test to cover the case where f_lowest == 0""" + options = {'maxtime': 1e-15, + 'f_min': 0.0} + res = shgo(test1_2.f, test1_2.bounds, n=1, iters=None, + options=options, sampling_method='sobol') + + def test_13_high_sobol(self): + """Test init of high-dimensional sobol sequences""" + + def f(x): + return 0 + + bounds = [(None, None), ] * 41 + SHGOc = SHGO(f, bounds) + SHGOc.sobol_points(2, 50) + + def test_14_local_iter(self): + """Test limited local iterations for a pseudo-global mode""" + options = {'local_iter': 4} + run_test(test5_1, n=30, options=options) + + def test_15_min_every_iter(self): + """Test minimize every iter options and cover function cache""" + options = {'minimize_every_iter': True} + run_test(test1_1, n=1, iters=7, options=options, + sampling_method='sobol') + + +# Failure test functions +class TestShgoFailures(object): + def test_1_maxiter(self): + """Test failure on insufficient iterations""" + options = {'maxiter': 2} + res = shgo(test4_1.f, test4_1.bounds, n=2, iters=None, + options=options, sampling_method='sobol') + + numpy.testing.assert_equal(False, res.success) + numpy.testing.assert_equal(4, res.nfev) + + def test_2_sampling(self): + """Rejection of unknown sampling method""" + assert_raises(ValueError, shgo, test1_1.f, test1_1.bounds, + sampling_method='not_Sobol') + + def test_3_1_no_min_pool_sobol(self): + """Check that the routine stops when no minimiser is found + after maximum specified function evaluations""" + options = {'maxfev': 10, + 'disp': True} + res = shgo(test_table.f, test_table.bounds, n=3, options=options, + sampling_method='sobol') + numpy.testing.assert_equal(False, res.success) + # numpy.testing.assert_equal(9, res.nfev) + numpy.testing.assert_equal(12, res.nfev) + + def test_3_2_no_min_pool_simplicial(self): + """Check that the routine stops when no minimiser is found + after maximum specified sampling evaluations""" + options = {'maxev': 10, + 'disp': True} + res = shgo(test_table.f, test_table.bounds, n=3, options=options, + sampling_method='simplicial') + numpy.testing.assert_equal(False, res.success) + + def test_4_1_bound_err(self): + """Specified bounds ub > lb""" + bounds = [(6, 3), (3, 5)] + assert_raises(ValueError, shgo, test1_1.f, bounds) + + def test_4_2_bound_err(self): + """Specified bounds are of the form (lb, ub)""" + bounds = [(3, 5, 5), (3, 5)] + assert_raises(ValueError, shgo, test1_1.f, bounds) + + def test_5_1_1_infeasible_sobol(self): + """Ensures the algorithm terminates on infeasible problems + after maxev is exceeded. Use infty constraints option""" + options = {'maxev': 100, + 'disp': True} + + res = shgo(test_infeasible.f, test_infeasible.bounds, + constraints=test_infeasible.cons, n=100, options=options, + sampling_method='sobol') + + numpy.testing.assert_equal(False, res.success) + + def test_5_1_2_infeasible_sobol(self): + """Ensures the algorithm terminates on infeasible problems + after maxev is exceeded. Do not use infty constraints option""" + options = {'maxev': 100, + 'disp': True, + 'infty_constraints': False} + + res = shgo(test_infeasible.f, test_infeasible.bounds, + constraints=test_infeasible.cons, n=100, options=options, + sampling_method='sobol') + + numpy.testing.assert_equal(False, res.success) + + def test_5_2_infeasible_simplicial(self): + """Ensures the algorithm terminates on infeasible problems + after maxev is exceeded.""" + options = {'maxev': 1000, + 'disp': False} + + res = shgo(test_infeasible.f, test_infeasible.bounds, + constraints=test_infeasible.cons, n=100, options=options, + sampling_method='simplicial') + + numpy.testing.assert_equal(False, res.success) + + def test_6_1_lower_known_f_min(self): + """Test Global mode limiting local evalutions with f* too high""" + options = { # Specify known function value + 'f_min': test2_1.expected_fun + 2.0, + 'f_tol': 1e-6, + # Specify number of local iterations to perform+ + 'minimize_every_iter': True, + 'local_iter': 1, + 'infty_constraints': False} + args = (test2_1.f, test2_1.bounds) + kwargs = {'constraints': test2_1.cons, + 'n': None, + 'iters': None, + 'options': options, + 'sampling_method': 'sobol' + } + warns(UserWarning, shgo, *args, **kwargs) diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__shgo.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__shgo.pyc new file mode 100644 index 0000000..bcfdb00 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__shgo.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__spectral.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__spectral.py new file mode 100644 index 0000000..f709755 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__spectral.py @@ -0,0 +1,210 @@ +from __future__ import division, absolute_import, print_function + +import itertools + +import numpy as np +from numpy import exp +from numpy.testing import assert_, assert_equal + +from scipy.optimize import root + + +def test_performance(): + # Compare performance results to those listed in + # [Cheng & Li, IMA J. Num. An. 29, 814 (2008)] + # and + # [W. La Cruz, J.M. Martinez, M. Raydan, Math. Comp. 75, 1429 (2006)]. + # and those produced by dfsane.f from M. Raydan's website. + # + # Where the results disagree, the largest limits are taken. + + e_a = 1e-5 + e_r = 1e-4 + + table_1 = [ + dict(F=F_1, x0=x0_1, n=1000, nit=5, nfev=5), + dict(F=F_1, x0=x0_1, n=10000, nit=2, nfev=2), + dict(F=F_2, x0=x0_2, n=500, nit=11, nfev=11), + dict(F=F_2, x0=x0_2, n=2000, nit=11, nfev=11), + # dict(F=F_4, x0=x0_4, n=999, nit=243, nfev=1188), removed: too sensitive to rounding errors + dict(F=F_6, x0=x0_6, n=100, nit=6, nfev=6), # Results from dfsane.f; papers list nit=3, nfev=3 + dict(F=F_7, x0=x0_7, n=99, nit=23, nfev=29), # Must have n%3==0, typo in papers? + dict(F=F_7, x0=x0_7, n=999, nit=23, nfev=29), # Must have n%3==0, typo in papers? + dict(F=F_9, x0=x0_9, n=100, nit=12, nfev=18), # Results from dfsane.f; papers list nit=nfev=6? + dict(F=F_9, x0=x0_9, n=1000, nit=12, nfev=18), + dict(F=F_10, x0=x0_10, n=1000, nit=5, nfev=5), # Results from dfsane.f; papers list nit=2, nfev=12 + ] + + # Check also scaling invariance + for xscale, yscale, line_search in itertools.product([1.0, 1e-10, 1e10], [1.0, 1e-10, 1e10], + ['cruz', 'cheng']): + for problem in table_1: + n = problem['n'] + func = lambda x, n: yscale*problem['F'](x/xscale, n) + args = (n,) + x0 = problem['x0'](n) * xscale + + fatol = np.sqrt(n) * e_a * yscale + e_r * np.linalg.norm(func(x0, n)) + + sigma_eps = 1e-10 * min(yscale/xscale, xscale/yscale) + sigma_0 = xscale/yscale + + with np.errstate(over='ignore'): + sol = root(func, x0, args=args, + options=dict(ftol=0, fatol=fatol, maxfev=problem['nfev'] + 1, + sigma_0=sigma_0, sigma_eps=sigma_eps, + line_search=line_search), + method='DF-SANE') + + err_msg = repr([xscale, yscale, line_search, problem, np.linalg.norm(func(sol.x, n)), + fatol, sol.success, sol.nit, sol.nfev]) + assert_(sol.success, err_msg) + assert_(sol.nfev <= problem['nfev'] + 1, err_msg) # nfev+1: dfsane.f doesn't count first eval + assert_(sol.nit <= problem['nit'], err_msg) + assert_(np.linalg.norm(func(sol.x, n)) <= fatol, err_msg) + + +def test_complex(): + def func(z): + return z**2 - 1 + 2j + x0 = 2.0j + + ftol = 1e-4 + sol = root(func, x0, tol=ftol, method='DF-SANE') + + assert_(sol.success) + + f0 = np.linalg.norm(func(x0)) + fx = np.linalg.norm(func(sol.x)) + assert_(fx <= ftol*f0) + + +def test_linear_definite(): + # The DF-SANE paper proves convergence for "strongly isolated" + # solutions. + # + # For linear systems F(x) = A x - b = 0, with A positive or + # negative definite, the solution is strongly isolated. + + def check_solvability(A, b, line_search='cruz'): + func = lambda x: A.dot(x) - b + xp = np.linalg.solve(A, b) + eps = np.linalg.norm(func(xp)) * 1e3 + sol = root(func, b, options=dict(fatol=eps, ftol=0, maxfev=17523, line_search=line_search), + method='DF-SANE') + assert_(sol.success) + assert_(np.linalg.norm(func(sol.x)) <= eps) + + n = 90 + + # Test linear pos.def. system + np.random.seed(1234) + A = np.arange(n*n).reshape(n, n) + A = A + n*n * np.diag(1 + np.arange(n)) + assert_(np.linalg.eigvals(A).min() > 0) + b = np.arange(n) * 1.0 + check_solvability(A, b, 'cruz') + check_solvability(A, b, 'cheng') + + # Test linear neg.def. system + check_solvability(-A, b, 'cruz') + check_solvability(-A, b, 'cheng') + + +def test_shape(): + def f(x, arg): + return x - arg + + for dt in [float, complex]: + x = np.zeros([2,2]) + arg = np.ones([2,2], dtype=dt) + + sol = root(f, x, args=(arg,), method='DF-SANE') + assert_(sol.success) + assert_equal(sol.x.shape, x.shape) + + +# Some of the test functions and initial guesses listed in +# [W. La Cruz, M. Raydan. Optimization Methods and Software, 18, 583 (2003)] + +def F_1(x, n): + g = np.zeros([n]) + i = np.arange(2, n+1) + g[0] = exp(x[0] - 1) - 1 + g[1:] = i*(exp(x[1:] - 1) - x[1:]) + return g + +def x0_1(n): + x0 = np.empty([n]) + x0.fill(n/(n-1)) + return x0 + +def F_2(x, n): + g = np.zeros([n]) + i = np.arange(2, n+1) + g[0] = exp(x[0]) - 1 + g[1:] = 0.1*i*(exp(x[1:]) + x[:-1] - 1) + return g + +def x0_2(n): + x0 = np.empty([n]) + x0.fill(1/n**2) + return x0 + +def F_4(x, n): + assert_equal(n % 3, 0) + g = np.zeros([n]) + # Note: the first line is typoed in some of the references; + # correct in original [Gasparo, Optimization Meth. 13, 79 (2000)] + g[::3] = 0.6 * x[::3] + 1.6 * x[1::3]**3 - 7.2 * x[1::3]**2 + 9.6 * x[1::3] - 4.8 + g[1::3] = 0.48 * x[::3] - 0.72 * x[1::3]**3 + 3.24 * x[1::3]**2 - 4.32 * x[1::3] - x[2::3] + 0.2 * x[2::3]**3 + 2.16 + g[2::3] = 1.25 * x[2::3] - 0.25*x[2::3]**3 + return g + +def x0_4(n): + assert_equal(n % 3, 0) + x0 = np.array([-1, 1/2, -1] * (n//3)) + return x0 + +def F_6(x, n): + c = 0.9 + mu = (np.arange(1, n+1) - 0.5)/n + return x - 1/(1 - c/(2*n) * (mu[:,None]*x / (mu[:,None] + mu)).sum(axis=1)) + +def x0_6(n): + return np.ones([n]) + +def F_7(x, n): + assert_equal(n % 3, 0) + + def phi(t): + v = 0.5*t - 2 + v[t > -1] = ((-592*t**3 + 888*t**2 + 4551*t - 1924)/1998)[t > -1] + v[t >= 2] = (0.5*t + 2)[t >= 2] + return v + g = np.zeros([n]) + g[::3] = 1e4 * x[1::3]**2 - 1 + g[1::3] = exp(-x[::3]) + exp(-x[1::3]) - 1.0001 + g[2::3] = phi(x[2::3]) + return g + +def x0_7(n): + assert_equal(n % 3, 0) + return np.array([1e-3, 18, 1] * (n//3)) + +def F_9(x, n): + g = np.zeros([n]) + i = np.arange(2, n) + g[0] = x[0]**3/3 + x[1]**2/2 + g[1:-1] = -x[1:-1]**2/2 + i*x[1:-1]**3/3 + x[2:]**2/2 + g[-1] = -x[-1]**2/2 + n*x[-1]**3/3 + return g + +def x0_9(n): + return np.ones([n]) + +def F_10(x, n): + return np.log(1 + x) - x/n + +def x0_10(n): + return np.ones([n]) diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__spectral.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__spectral.pyc new file mode 100644 index 0000000..bd5b355 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test__spectral.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_cobyla.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_cobyla.py new file mode 100644 index 0000000..f969929 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_cobyla.py @@ -0,0 +1,115 @@ +from __future__ import division, print_function, absolute_import + +import math +import numpy as np + +from numpy.testing import assert_allclose, assert_ + +from scipy.optimize import fmin_cobyla, minimize + + +class TestCobyla(object): + def setup_method(self): + self.x0 = [4.95, 0.66] + self.solution = [math.sqrt(25 - (2.0/3)**2), 2.0/3] + self.opts = {'disp': False, 'rhobeg': 1, 'tol': 1e-5, + 'maxiter': 100} + + def fun(self, x): + return x[0]**2 + abs(x[1])**3 + + def con1(self, x): + return x[0]**2 + x[1]**2 - 25 + + def con2(self, x): + return -self.con1(x) + + def test_simple(self): + # use disp=True as smoke test for gh-8118 + x = fmin_cobyla(self.fun, self.x0, [self.con1, self.con2], rhobeg=1, + rhoend=1e-5, maxfun=100, disp=True) + assert_allclose(x, self.solution, atol=1e-4) + + def test_minimize_simple(self): + # Minimize with method='COBYLA' + cons = ({'type': 'ineq', 'fun': self.con1}, + {'type': 'ineq', 'fun': self.con2}) + sol = minimize(self.fun, self.x0, method='cobyla', constraints=cons, + options=self.opts) + assert_allclose(sol.x, self.solution, atol=1e-4) + assert_(sol.success, sol.message) + assert_(sol.maxcv < 1e-5, sol) + assert_(sol.nfev < 70, sol) + assert_(sol.fun < self.fun(self.solution) + 1e-3, sol) + + def test_minimize_constraint_violation(self): + np.random.seed(1234) + pb = np.random.rand(10, 10) + spread = np.random.rand(10) + + def p(w): + return pb.dot(w) + + def f(w): + return -(w * spread).sum() + + def c1(w): + return 500 - abs(p(w)).sum() + + def c2(w): + return 5 - abs(p(w).sum()) + + def c3(w): + return 5 - abs(p(w)).max() + + cons = ({'type': 'ineq', 'fun': c1}, + {'type': 'ineq', 'fun': c2}, + {'type': 'ineq', 'fun': c3}) + w0 = np.zeros((10, 1)) + sol = minimize(f, w0, method='cobyla', constraints=cons, + options={'catol': 1e-6}) + assert_(sol.maxcv > 1e-6) + assert_(not sol.success) + + +def test_vector_constraints(): + # test that fmin_cobyla and minimize can take a combination + # of constraints, some returning a number and others an array + def fun(x): + return (x[0] - 1)**2 + (x[1] - 2.5)**2 + + def fmin(x): + return fun(x) - 1 + + def cons1(x): + a = np.array([[1, -2, 2], [-1, -2, 6], [-1, 2, 2]]) + return np.array([a[i, 0] * x[0] + a[i, 1] * x[1] + + a[i, 2] for i in range(len(a))]) + + def cons2(x): + return x # identity, acts as bounds x > 0 + + x0 = np.array([2, 0]) + cons_list = [fun, cons1, cons2] + + xsol = [1.4, 1.7] + fsol = 0.8 + + # testing fmin_cobyla + sol = fmin_cobyla(fun, x0, cons_list, rhoend=1e-5) + assert_allclose(sol, xsol, atol=1e-4) + + sol = fmin_cobyla(fun, x0, fmin, rhoend=1e-5) + assert_allclose(fun(sol), 1, atol=1e-4) + + # testing minimize + constraints = [{'type': 'ineq', 'fun': cons} for cons in cons_list] + sol = minimize(fun, x0, constraints=constraints, tol=1e-5) + assert_allclose(sol.x, xsol, atol=1e-4) + assert_(sol.success, sol.message) + assert_allclose(sol.fun, fsol, atol=1e-4) + + constraints = {'type': 'ineq', 'fun': fmin} + sol = minimize(fun, x0, constraints=constraints, tol=1e-5) + assert_allclose(sol.fun, 1, atol=1e-4) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_cobyla.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_cobyla.pyc new file mode 100644 index 0000000..0f094ce Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_cobyla.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_constraint_conversion.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_constraint_conversion.py new file mode 100644 index 0000000..d29408f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_constraint_conversion.py @@ -0,0 +1,270 @@ +from __future__ import division, print_function, absolute_import +""" +Unit test for constraint conversion +""" + +import numpy as np +from numpy.testing import (assert_, assert_array_almost_equal, + assert_allclose, assert_equal, TestCase) +import pytest +from scipy._lib._numpy_compat import suppress_warnings +from scipy.optimize import (NonlinearConstraint, LinearConstraint, Bounds, + OptimizeWarning, minimize, BFGS) +from .test_minimize_constrained import (Maratos, HyperbolicIneq, Rosenbrock, + IneqRosenbrock, EqIneqRosenbrock, + BoundedRosenbrock, Elec) +from scipy._lib._numpy_compat import _assert_warns, suppress_warnings + + +class TestOldToNew(object): + x0 = (2, 0) + bnds = ((0, None), (0, None)) + method = "trust-constr" + + def test_constraint_dictionary_1(self): + fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2 + cons = ({'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2}, + {'type': 'ineq', 'fun': lambda x: -x[0] - 2 * x[1] + 6}, + {'type': 'ineq', 'fun': lambda x: -x[0] + 2 * x[1] + 2}) + + with suppress_warnings() as sup: + sup.filter(UserWarning, "delta_grad == 0.0") + res = minimize(fun, self.x0, method=self.method, + bounds=self.bnds, constraints=cons) + assert_allclose(res.x, [1.4, 1.7], rtol=1e-4) + assert_allclose(res.fun, 0.8, rtol=1e-4) + + def test_constraint_dictionary_2(self): + fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2 + cons = {'type': 'eq', + 'fun': lambda x, p1, p2: p1*x[0] - p2*x[1], + 'args': (1, 1.1), + 'jac': lambda x, p1, p2: np.array([[p1, -p2]])} + with suppress_warnings() as sup: + sup.filter(UserWarning, "delta_grad == 0.0") + res = minimize(fun, self.x0, method=self.method, + bounds=self.bnds, constraints=cons) + assert_allclose(res.x, [1.7918552, 1.62895927]) + assert_allclose(res.fun, 1.3857466063348418) + + def test_constraint_dictionary_3(self): + fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2 + cons = [{'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2}, + NonlinearConstraint(lambda x: x[0] - x[1], 0, 0)] + + with suppress_warnings() as sup: + sup.filter(UserWarning, "delta_grad == 0.0") + res = minimize(fun, self.x0, method=self.method, + bounds=self.bnds, constraints=cons) + assert_allclose(res.x, [1.75, 1.75], rtol=1e-4) + assert_allclose(res.fun, 1.125, rtol=1e-4) + + +class TestNewToOld(object): + + def test_multiple_constraint_objects(self): + fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2 + (x[2] - 0.75)**2 + x0 = [2, 0, 1] + coni = [] # only inequality constraints (can use cobyla) + methods = ["slsqp", "cobyla", "trust-constr"] + + # mixed old and new + coni.append([{'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2}, + NonlinearConstraint(lambda x: x[0] - x[1], -1, 1)]) + + coni.append([LinearConstraint([1, -2, 0], -2, np.inf), + NonlinearConstraint(lambda x: x[0] - x[1], -1, 1)]) + + coni.append([NonlinearConstraint(lambda x: x[0] - 2 * x[1] + 2, 0, np.inf), + NonlinearConstraint(lambda x: x[0] - x[1], -1, 1)]) + + for con in coni: + funs = {} + for method in methods: + with suppress_warnings() as sup: + sup.filter(UserWarning) + result = minimize(fun, x0, method=method, constraints=con) + funs[method] = result.fun + assert_allclose(funs['slsqp'], funs['trust-constr'], rtol=1e-4) + assert_allclose(funs['cobyla'], funs['trust-constr'], rtol=1e-4) + + def test_individual_constraint_objects(self): + fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2 + (x[2] - 0.75)**2 + x0 = [2, 0, 1] + + cone = [] # with equality constraints (can't use cobyla) + coni = [] # only inequality constraints (can use cobyla) + methods = ["slsqp", "cobyla", "trust-constr"] + + # nonstandard data types for constraint equality bounds + cone.append(NonlinearConstraint(lambda x: x[0] - x[1], 1, 1)) + cone.append(NonlinearConstraint(lambda x: x[0] - x[1], [1.21], [1.21])) + cone.append(NonlinearConstraint(lambda x: x[0] - x[1], + 1.21, np.array([1.21]))) + + # multiple equalities + cone.append(NonlinearConstraint( + lambda x: [x[0] - x[1], x[1] - x[2]], + 1.21, 1.21)) # two same equalities + cone.append(NonlinearConstraint( + lambda x: [x[0] - x[1], x[1] - x[2]], + [1.21, 1.4], [1.21, 1.4])) # two different equalities + cone.append(NonlinearConstraint( + lambda x: [x[0] - x[1], x[1] - x[2]], + [1.21, 1.21], 1.21)) # equality specified two ways + cone.append(NonlinearConstraint( + lambda x: [x[0] - x[1], x[1] - x[2]], + [1.21, -np.inf], [1.21, np.inf])) # equality + unbounded + + # nonstandard data types for constraint inequality bounds + coni.append(NonlinearConstraint(lambda x: x[0] - x[1], 1.21, np.inf)) + coni.append(NonlinearConstraint(lambda x: x[0] - x[1], [1.21], np.inf)) + coni.append(NonlinearConstraint(lambda x: x[0] - x[1], + 1.21, np.array([np.inf]))) + coni.append(NonlinearConstraint(lambda x: x[0] - x[1], -np.inf, -3)) + coni.append(NonlinearConstraint(lambda x: x[0] - x[1], + np.array(-np.inf), -3)) + + # multiple inequalities/equalities + coni.append(NonlinearConstraint( + lambda x: [x[0] - x[1], x[1] - x[2]], + 1.21, np.inf)) # two same inequalities + cone.append(NonlinearConstraint( + lambda x: [x[0] - x[1], x[1] - x[2]], + [1.21, -np.inf], [1.21, 1.4])) # mixed equality/inequality + coni.append(NonlinearConstraint( + lambda x: [x[0] - x[1], x[1] - x[2]], + [1.1, .8], [1.2, 1.4])) # bounded above and below + coni.append(NonlinearConstraint( + lambda x: [x[0] - x[1], x[1] - x[2]], + [-1.2, -1.4], [-1.1, -.8])) # - bounded above and below + + # quick check of LinearConstraint class (very little new code to test) + cone.append(LinearConstraint([1, -1, 0], 1.21, 1.21)) + cone.append(LinearConstraint([[1, -1, 0], [0, 1, -1]], 1.21, 1.21)) + cone.append(LinearConstraint([[1, -1, 0], [0, 1, -1]], + [1.21, -np.inf], [1.21, 1.4])) + + for con in coni: + funs = {} + for method in methods: + with suppress_warnings() as sup: + sup.filter(UserWarning) + result = minimize(fun, x0, method=method, constraints=con) + funs[method] = result.fun + assert_allclose(funs['slsqp'], funs['trust-constr'], rtol=1e-3) + assert_allclose(funs['cobyla'], funs['trust-constr'], rtol=1e-3) + + for con in cone: + funs = {} + for method in methods[::2]: # skip cobyla + with suppress_warnings() as sup: + sup.filter(UserWarning) + result = minimize(fun, x0, method=method, constraints=con) + funs[method] = result.fun + assert_allclose(funs['slsqp'], funs['trust-constr'], rtol=1e-3) + + +class TestNewToOldSLSQP(object): + method = 'slsqp' + elec = Elec(n_electrons=2) + elec.x_opt = np.array([-0.58438468, 0.58438466, 0.73597047, + -0.73597044, 0.34180668, -0.34180667]) + brock = BoundedRosenbrock() + brock.x_opt = [0, 0] + list_of_problems = [Maratos(), + HyperbolicIneq(), + Rosenbrock(), + IneqRosenbrock(), + EqIneqRosenbrock(), + elec, + brock + ] + + def test_list_of_problems(self): + + for prob in self.list_of_problems: + + with suppress_warnings() as sup: + sup.filter(UserWarning) + result = minimize(prob.fun, prob.x0, + method=self.method, + bounds=prob.bounds, + constraints=prob.constr) + + assert_array_almost_equal(result.x, prob.x_opt, decimal=3) + + def test_warn_mixed_constraints(self): + # warns about inefficiency of mixed equality/inequality constraints + fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2 + (x[2] - 0.75)**2 + cons = NonlinearConstraint(lambda x: [x[0]**2 - x[1], x[1] - x[2]], + [1.1, .8], [1.1, 1.4]) + bnds = ((0, None), (0, None), (0, None)) + with suppress_warnings() as sup: + sup.filter(UserWarning, "delta_grad == 0.0") + _assert_warns(OptimizeWarning, minimize, fun, (2, 0, 1), + method=self.method, bounds=bnds, constraints=cons) + + def test_warn_ignored_options(self): + # warns about constraint options being ignored + fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2 + (x[2] - 0.75)**2 + x0 = (2, 0, 1) + + if self.method == "slsqp": + bnds = ((0, None), (0, None), (0, None)) + else: + bnds = None + + cons = NonlinearConstraint(lambda x: x[0], 2, np.inf) + res = minimize(fun, x0, method=self.method, + bounds=bnds, constraints=cons) + # no warnings without constraint options + assert_allclose(res.fun, 1) + + cons = LinearConstraint([1, 0, 0], 2, np.inf) + res = minimize(fun, x0, method=self.method, + bounds=bnds, constraints=cons) + # no warnings without constraint options + assert_allclose(res.fun, 1) + + cons = [] + cons.append(NonlinearConstraint(lambda x: x[0]**2, 2, np.inf, + keep_feasible=True)) + cons.append(NonlinearConstraint(lambda x: x[0]**2, 2, np.inf, + hess=BFGS())) + cons.append(NonlinearConstraint(lambda x: x[0]**2, 2, np.inf, + finite_diff_jac_sparsity=42)) + cons.append(NonlinearConstraint(lambda x: x[0]**2, 2, np.inf, + finite_diff_rel_step=42)) + cons.append(LinearConstraint([1, 0, 0], 2, np.inf, + keep_feasible=True)) + for con in cons: + _assert_warns(OptimizeWarning, minimize, fun, x0, + method=self.method, bounds=bnds, constraints=cons) + + +class TestNewToOldCobyla(object): + method = 'cobyla' + + list_of_problems = [ + Elec(n_electrons=2), + Elec(n_electrons=4), + ] + + @pytest.mark.slow + def test_list_of_problems(self): + + for prob in self.list_of_problems: + + with suppress_warnings() as sup: + sup.filter(UserWarning) + truth = minimize(prob.fun, prob.x0, + method='trust-constr', + bounds=prob.bounds, + constraints=prob.constr) + result = minimize(prob.fun, prob.x0, + method=self.method, + bounds=prob.bounds, + constraints=prob.constr) + + assert_allclose(result.fun, truth.fun, rtol=1e-3) diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_constraint_conversion.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_constraint_conversion.pyc new file mode 100644 index 0000000..315cd6c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_constraint_conversion.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_constraints.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_constraints.py new file mode 100644 index 0000000..78b5637 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_constraints.py @@ -0,0 +1,132 @@ +from __future__ import division, print_function, absolute_import +import pytest +import numpy as np +from numpy.testing import TestCase, assert_array_equal +import scipy.sparse as sps +from scipy.optimize._constraints import ( + Bounds, LinearConstraint, NonlinearConstraint, PreparedConstraint, + new_bounds_to_old, old_bound_to_new, strict_bounds) + + +class TestStrictBounds(TestCase): + def test_scalarvalue_unique_enforce_feasibility(self): + m = 3 + lb = 2 + ub = 4 + enforce_feasibility = False + strict_lb, strict_ub = strict_bounds(lb, ub, + enforce_feasibility, + m) + assert_array_equal(strict_lb, [-np.inf, -np.inf, -np.inf]) + assert_array_equal(strict_ub, [np.inf, np.inf, np.inf]) + + enforce_feasibility = True + strict_lb, strict_ub = strict_bounds(lb, ub, + enforce_feasibility, + m) + assert_array_equal(strict_lb, [2, 2, 2]) + assert_array_equal(strict_ub, [4, 4, 4]) + + def test_vectorvalue_unique_enforce_feasibility(self): + m = 3 + lb = [1, 2, 3] + ub = [4, 5, 6] + enforce_feasibility = False + strict_lb, strict_ub = strict_bounds(lb, ub, + enforce_feasibility, + m) + assert_array_equal(strict_lb, [-np.inf, -np.inf, -np.inf]) + assert_array_equal(strict_ub, [np.inf, np.inf, np.inf]) + + enforce_feasibility = True + strict_lb, strict_ub = strict_bounds(lb, ub, + enforce_feasibility, + m) + assert_array_equal(strict_lb, [1, 2, 3]) + assert_array_equal(strict_ub, [4, 5, 6]) + + def test_scalarvalue_vector_enforce_feasibility(self): + m = 3 + lb = 2 + ub = 4 + enforce_feasibility = [False, True, False] + strict_lb, strict_ub = strict_bounds(lb, ub, + enforce_feasibility, + m) + assert_array_equal(strict_lb, [-np.inf, 2, -np.inf]) + assert_array_equal(strict_ub, [np.inf, 4, np.inf]) + + def test_vectorvalue_vector_enforce_feasibility(self): + m = 3 + lb = [1, 2, 3] + ub = [4, 6, np.inf] + enforce_feasibility = [True, False, True] + strict_lb, strict_ub = strict_bounds(lb, ub, + enforce_feasibility, + m) + assert_array_equal(strict_lb, [1, -np.inf, 3]) + assert_array_equal(strict_ub, [4, np.inf, np.inf]) + + +def test_prepare_constraint_infeasible_x0(): + lb = np.array([0, 20, 30]) + ub = np.array([0.5, np.inf, 70]) + x0 = np.array([1, 2, 3]) + enforce_feasibility = np.array([False, True, True], dtype=bool) + bounds = Bounds(lb, ub, enforce_feasibility) + pytest.raises(ValueError, PreparedConstraint, bounds, x0) + + x0 = np.array([1, 2, 3, 4]) + A = np.array([[1, 2, 3, 4], [5, 0, 0, 6], [7, 0, 8, 0]]) + enforce_feasibility = np.array([True, True, True], dtype=bool) + linear = LinearConstraint(A, -np.inf, 0, enforce_feasibility) + pytest.raises(ValueError, PreparedConstraint, linear, x0) + + def fun(x): + return A.dot(x) + + def jac(x): + return A + + def hess(x, v): + return sps.csr_matrix((4, 4)) + + nonlinear = NonlinearConstraint(fun, -np.inf, 0, jac, hess, + enforce_feasibility) + pytest.raises(ValueError, PreparedConstraint, nonlinear, x0) + + +def test_new_bounds_to_old(): + lb = np.array([-np.inf, 2, 3]) + ub = np.array([3, np.inf, 10]) + + bounds = [(None, 3), (2, None), (3, 10)] + assert_array_equal(new_bounds_to_old(lb, ub, 3), bounds) + + bounds_single_lb = [(-1, 3), (-1, None), (-1, 10)] + assert_array_equal(new_bounds_to_old(-1, ub, 3), bounds_single_lb) + + bounds_no_lb = [(None, 3), (None, None), (None, 10)] + assert_array_equal(new_bounds_to_old(-np.inf, ub, 3), bounds_no_lb) + + bounds_single_ub = [(None, 20), (2, 20), (3, 20)] + assert_array_equal(new_bounds_to_old(lb, 20, 3), bounds_single_ub) + + bounds_no_ub = [(None, None), (2, None), (3, None)] + assert_array_equal(new_bounds_to_old(lb, np.inf, 3), bounds_no_ub) + + bounds_single_both = [(1, 2), (1, 2), (1, 2)] + assert_array_equal(new_bounds_to_old(1, 2, 3), bounds_single_both) + + bounds_no_both = [(None, None), (None, None), (None, None)] + assert_array_equal(new_bounds_to_old(-np.inf, np.inf, 3), bounds_no_both) + + +def test_old_bounds_to_new(): + bounds = ([1, 2], (None, 3), (-1, None)) + lb_true = np.array([1, -np.inf, -1]) + ub_true = np.array([2, 3, np.inf]) + + lb, ub = old_bound_to_new(bounds) + assert_array_equal(lb, lb_true) + assert_array_equal(ub, ub_true) diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_constraints.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_constraints.pyc new file mode 100644 index 0000000..ef300fe Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_constraints.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_differentiable_functions.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_differentiable_functions.py new file mode 100644 index 0000000..482cd7e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_differentiable_functions.py @@ -0,0 +1,580 @@ +from __future__ import division, print_function, absolute_import +import numpy as np +from numpy.testing import (TestCase, assert_array_almost_equal, + assert_array_equal, assert_) +from scipy.sparse import csr_matrix +from scipy.sparse.linalg import LinearOperator +from scipy.optimize._differentiable_functions import (ScalarFunction, + VectorFunction, + LinearVectorFunction, + IdentityVectorFunction) + + +class ExScalarFunction: + + def __init__(self): + self.nfev = 0 + self.ngev = 0 + self.nhev = 0 + + def fun(self, x): + self.nfev += 1 + return 2*(x[0]**2 + x[1]**2 - 1) - x[0] + + def grad(self, x): + self.ngev += 1 + return np.array([4*x[0]-1, 4*x[1]]) + + def hess(self, x): + self.nhev += 1 + return 4*np.eye(2) + + +class TestScalarFunction(TestCase): + + def test_finite_difference_grad(self): + ex = ExScalarFunction() + nfev = 0 + ngev = 0 + + x0 = [1.0, 0.0] + analit = ScalarFunction(ex.fun, x0, (), ex.grad, + ex.hess, None, (-np.inf, np.inf)) + nfev += 1 + ngev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev, nfev) + approx = ScalarFunction(ex.fun, x0, (), '2-point', + ex.hess, None, (-np.inf, np.inf)) + nfev += 3 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_equal(analit.f, approx.f) + assert_array_almost_equal(analit.g, approx.g) + + x = [10, 0.3] + f_analit = analit.fun(x) + g_analit = analit.grad(x) + nfev += 1 + ngev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + f_approx = approx.fun(x) + g_approx = approx.grad(x) + nfev += 3 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_almost_equal(f_analit, f_approx) + assert_array_almost_equal(g_analit, g_approx) + + x = [2.0, 1.0] + g_analit = analit.grad(x) + ngev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + + g_approx = approx.grad(x) + nfev += 3 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_almost_equal(g_analit, g_approx) + + x = [2.5, 0.3] + f_analit = analit.fun(x) + g_analit = analit.grad(x) + nfev += 1 + ngev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + f_approx = approx.fun(x) + g_approx = approx.grad(x) + nfev += 3 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_almost_equal(f_analit, f_approx) + assert_array_almost_equal(g_analit, g_approx) + + x = [2, 0.3] + f_analit = analit.fun(x) + g_analit = analit.grad(x) + nfev += 1 + ngev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + f_approx = approx.fun(x) + g_approx = approx.grad(x) + nfev += 3 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_almost_equal(f_analit, f_approx) + assert_array_almost_equal(g_analit, g_approx) + + def test_finite_difference_hess_linear_operator(self): + ex = ExScalarFunction() + nfev = 0 + ngev = 0 + nhev = 0 + + x0 = [1.0, 0.0] + analit = ScalarFunction(ex.fun, x0, (), ex.grad, + ex.hess, None, (-np.inf, np.inf)) + nfev += 1 + ngev += 1 + nhev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev, ngev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev, nhev) + approx = ScalarFunction(ex.fun, x0, (), ex.grad, + '2-point', None, (-np.inf, np.inf)) + assert_(isinstance(approx.H, LinearOperator)) + for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): + assert_array_equal(analit.f, approx.f) + assert_array_almost_equal(analit.g, approx.g) + assert_array_almost_equal(analit.H.dot(v), approx.H.dot(v)) + nfev += 1 + ngev += 4 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + + x = [2.0, 1.0] + H_analit = analit.hess(x) + nhev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + H_approx = approx.hess(x) + assert_(isinstance(H_approx, LinearOperator)) + for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): + assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v)) + ngev += 4 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + + x = [2.1, 1.2] + H_analit = analit.hess(x) + nhev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + H_approx = approx.hess(x) + assert_(isinstance(H_approx, LinearOperator)) + for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): + assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v)) + ngev += 4 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + + x = [2.5, 0.3] + _ = analit.grad(x) + H_analit = analit.hess(x) + ngev += 1 + nhev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + _ = approx.grad(x) + H_approx = approx.hess(x) + assert_(isinstance(H_approx, LinearOperator)) + for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): + assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v)) + ngev += 4 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + + x = [5.2, 2.3] + _ = analit.grad(x) + H_analit = analit.hess(x) + ngev += 1 + nhev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + _ = approx.grad(x) + H_approx = approx.hess(x) + assert_(isinstance(H_approx, LinearOperator)) + for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): + assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v)) + ngev += 4 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.ngev, ngev) + assert_array_equal(analit.ngev+approx.ngev, ngev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + + +class ExVectorialFunction: + + def __init__(self): + self.nfev = 0 + self.njev = 0 + self.nhev = 0 + + def fun(self, x): + self.nfev += 1 + return np.array([2*(x[0]**2 + x[1]**2 - 1) - x[0], + 4*(x[0]**3 + x[1]**2 - 4) - 3*x[0]]) + + def jac(self, x): + self.njev += 1 + return np.array([[4*x[0]-1, 4*x[1]], + [12*x[0]**2-3, 8*x[1]]]) + + def hess(self, x, v): + self.nhev += 1 + return v[0]*4*np.eye(2) + v[1]*np.array([[24*x[0], 0], + [0, 8]]) + + +class TestVectorialFunction(TestCase): + + def test_finite_difference_jac(self): + ex = ExVectorialFunction() + nfev = 0 + njev = 0 + + x0 = [1.0, 0.0] + v0 = [0.0, 1.0] + analit = VectorFunction(ex.fun, x0, ex.jac, ex.hess, None, None, + (-np.inf, np.inf), None) + nfev += 1 + njev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev, njev) + approx = VectorFunction(ex.fun, x0, '2-point', ex.hess, None, None, + (-np.inf, np.inf), None) + nfev += 3 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_equal(analit.f, approx.f) + assert_array_almost_equal(analit.J, approx.J) + + x = [10, 0.3] + f_analit = analit.fun(x) + J_analit = analit.jac(x) + nfev += 1 + njev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + f_approx = approx.fun(x) + J_approx = approx.jac(x) + nfev += 3 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_almost_equal(f_analit, f_approx) + assert_array_almost_equal(J_analit, J_approx, decimal=4) + + x = [2.0, 1.0] + J_analit = analit.jac(x) + njev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + J_approx = approx.jac(x) + nfev += 3 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_almost_equal(J_analit, J_approx) + + x = [2.5, 0.3] + f_analit = analit.fun(x) + J_analit = analit.jac(x) + nfev += 1 + njev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + f_approx = approx.fun(x) + J_approx = approx.jac(x) + nfev += 3 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_almost_equal(f_analit, f_approx) + assert_array_almost_equal(J_analit, J_approx) + + x = [2, 0.3] + f_analit = analit.fun(x) + J_analit = analit.jac(x) + nfev += 1 + njev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + f_approx = approx.fun(x) + J_approx = approx.jac(x) + nfev += 3 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_almost_equal(f_analit, f_approx) + assert_array_almost_equal(J_analit, J_approx) + + def test_finite_difference_hess_linear_operator(self): + ex = ExVectorialFunction() + nfev = 0 + njev = 0 + nhev = 0 + + x0 = [1.0, 0.0] + v0 = [1.0, 2.0] + analit = VectorFunction(ex.fun, x0, ex.jac, ex.hess, None, None, + (-np.inf, np.inf), None) + nfev += 1 + njev += 1 + nhev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev, njev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev, nhev) + approx = VectorFunction(ex.fun, x0, ex.jac, '2-point', None, None, + (-np.inf, np.inf), None) + assert_(isinstance(approx.H, LinearOperator)) + for p in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): + assert_array_equal(analit.f, approx.f) + assert_array_almost_equal(analit.J, approx.J) + assert_array_almost_equal(analit.H.dot(p), approx.H.dot(p)) + nfev += 1 + njev += 4 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + + x = [2.0, 1.0] + H_analit = analit.hess(x, v0) + nhev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + H_approx = approx.hess(x, v0) + assert_(isinstance(H_approx, LinearOperator)) + for p in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): + assert_array_almost_equal(H_analit.dot(p), H_approx.dot(p), + decimal=5) + njev += 4 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + + x = [2.1, 1.2] + v = [1.0, 1.0] + H_analit = analit.hess(x, v) + nhev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + H_approx = approx.hess(x, v) + assert_(isinstance(H_approx, LinearOperator)) + for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): + assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v)) + njev += 4 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + + x = [2.5, 0.3] + _ = analit.jac(x) + H_analit = analit.hess(x, v0) + njev += 1 + nhev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + _ = approx.jac(x) + H_approx = approx.hess(x, v0) + assert_(isinstance(H_approx, LinearOperator)) + for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): + assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v), decimal=4) + njev += 4 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + + x = [5.2, 2.3] + v = [2.3, 5.2] + _ = analit.jac(x) + H_analit = analit.hess(x, v) + njev += 1 + nhev += 1 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + _ = approx.jac(x) + H_approx = approx.hess(x, v) + assert_(isinstance(H_approx, LinearOperator)) + for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): + assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v), decimal=4) + njev += 4 + assert_array_equal(ex.nfev, nfev) + assert_array_equal(analit.nfev+approx.nfev, nfev) + assert_array_equal(ex.njev, njev) + assert_array_equal(analit.njev+approx.njev, njev) + assert_array_equal(ex.nhev, nhev) + assert_array_equal(analit.nhev+approx.nhev, nhev) + + +def test_LinearVectorFunction(): + A_dense = np.array([ + [-1, 2, 0], + [0, 4, 2] + ]) + x0 = np.zeros(3) + A_sparse = csr_matrix(A_dense) + x = np.array([1, -1, 0]) + v = np.array([-1, 1]) + Ax = np.array([-3, -4]) + + f1 = LinearVectorFunction(A_dense, x0, None) + assert_(not f1.sparse_jacobian) + + f2 = LinearVectorFunction(A_dense, x0, True) + assert_(f2.sparse_jacobian) + + f3 = LinearVectorFunction(A_dense, x0, False) + assert_(not f3.sparse_jacobian) + + f4 = LinearVectorFunction(A_sparse, x0, None) + assert_(f4.sparse_jacobian) + + f5 = LinearVectorFunction(A_sparse, x0, True) + assert_(f5.sparse_jacobian) + + f6 = LinearVectorFunction(A_sparse, x0, False) + assert_(not f6.sparse_jacobian) + + assert_array_equal(f1.fun(x), Ax) + assert_array_equal(f2.fun(x), Ax) + assert_array_equal(f1.jac(x), A_dense) + assert_array_equal(f2.jac(x).toarray(), A_sparse.toarray()) + assert_array_equal(f1.hess(x, v).toarray(), np.zeros((3, 3))) + + +def test_LinearVectorFunction_memoization(): + A = np.array([[-1, 2, 0], [0, 4, 2]]) + x0 = np.array([1, 2, -1]) + fun = LinearVectorFunction(A, x0, False) + + assert_array_equal(x0, fun.x) + assert_array_equal(A.dot(x0), fun.f) + + x1 = np.array([-1, 3, 10]) + assert_array_equal(A, fun.jac(x1)) + assert_array_equal(x1, fun.x) + assert_array_equal(A.dot(x0), fun.f) + assert_array_equal(A.dot(x1), fun.fun(x1)) + assert_array_equal(A.dot(x1), fun.f) + + +def test_IdentityVectorFunction(): + x0 = np.zeros(3) + + f1 = IdentityVectorFunction(x0, None) + f2 = IdentityVectorFunction(x0, False) + f3 = IdentityVectorFunction(x0, True) + + assert_(f1.sparse_jacobian) + assert_(not f2.sparse_jacobian) + assert_(f3.sparse_jacobian) + + x = np.array([-1, 2, 1]) + v = np.array([-2, 3, 0]) + + assert_array_equal(f1.fun(x), x) + assert_array_equal(f2.fun(x), x) + + assert_array_equal(f1.jac(x).toarray(), np.eye(3)) + assert_array_equal(f2.jac(x), np.eye(3)) + + assert_array_equal(f1.hess(x, v).toarray(), np.zeros((3, 3))) diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_differentiable_functions.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_differentiable_functions.pyc new file mode 100644 index 0000000..437dc64 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_differentiable_functions.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_hessian_update_strategy.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_hessian_update_strategy.py new file mode 100644 index 0000000..d92bc82 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_hessian_update_strategy.py @@ -0,0 +1,219 @@ +from __future__ import division, print_function, absolute_import +import numpy as np +from copy import deepcopy +from numpy.linalg import norm +from numpy.testing import (TestCase, assert_array_almost_equal, + assert_array_equal, assert_array_less, + assert_raises, assert_equal, assert_, + run_module_suite, assert_allclose, assert_warns, + dec) +from scipy.optimize import (BFGS, + SR1, + HessianUpdateStrategy, + minimize) + + +class Rosenbrock: + """Rosenbrock function. + + The following optimization problem: + minimize sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0) + """ + + def __init__(self, n=2, random_state=0): + rng = np.random.RandomState(random_state) + self.x0 = rng.uniform(-1, 1, n) + self.x_opt = np.ones(n) + + def fun(self, x): + x = np.asarray(x) + r = np.sum(100.0 * (x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0, + axis=0) + return r + + def grad(self, x): + x = np.asarray(x) + xm = x[1:-1] + xm_m1 = x[:-2] + xm_p1 = x[2:] + der = np.zeros_like(x) + der[1:-1] = (200 * (xm - xm_m1**2) - + 400 * (xm_p1 - xm**2) * xm - 2 * (1 - xm)) + der[0] = -400 * x[0] * (x[1] - x[0]**2) - 2 * (1 - x[0]) + der[-1] = 200 * (x[-1] - x[-2]**2) + return der + + def hess(self, x): + x = np.atleast_1d(x) + H = np.diag(-400 * x[:-1], 1) - np.diag(400 * x[:-1], -1) + diagonal = np.zeros(len(x), dtype=x.dtype) + diagonal[0] = 1200 * x[0]**2 - 400 * x[1] + 2 + diagonal[-1] = 200 + diagonal[1:-1] = 202 + 1200 * x[1:-1]**2 - 400 * x[2:] + H = H + np.diag(diagonal) + return H + + +class TestHessianUpdateStrategy(TestCase): + + def test_hessian_initialization(self): + quasi_newton = (BFGS(), SR1()) + + for qn in quasi_newton: + qn.initialize(5, 'hess') + B = qn.get_matrix() + + assert_array_equal(B, np.eye(5)) + + # For this list of points it is known + # that no exception occur during the + # Hessian update. Hence no update is + # skiped or damped. + def test_rosenbrock_with_no_exception(self): + # Define auxiliar problem + prob = Rosenbrock(n=5) + # Define iteration points + x_list = [[0.0976270, 0.4303787, 0.2055267, 0.0897663, -0.15269040], + [0.1847239, 0.0505757, 0.2123832, 0.0255081, 0.00083286], + [0.2142498, -0.0188480, 0.0503822, 0.0347033, 0.03323606], + [0.2071680, -0.0185071, 0.0341337, -0.0139298, 0.02881750], + [0.1533055, -0.0322935, 0.0280418, -0.0083592, 0.01503699], + [0.1382378, -0.0276671, 0.0266161, -0.0074060, 0.02801610], + [0.1651957, -0.0049124, 0.0269665, -0.0040025, 0.02138184], + [0.2354930, 0.0443711, 0.0173959, 0.0041872, 0.00794563], + [0.4168118, 0.1433867, 0.0111714, 0.0126265, -0.00658537], + [0.4681972, 0.2153273, 0.0225249, 0.0152704, -0.00463809], + [0.6023068, 0.3346815, 0.0731108, 0.0186618, -0.00371541], + [0.6415743, 0.3985468, 0.1324422, 0.0214160, -0.00062401], + [0.7503690, 0.5447616, 0.2804541, 0.0539851, 0.00242230], + [0.7452626, 0.5644594, 0.3324679, 0.0865153, 0.00454960], + [0.8059782, 0.6586838, 0.4229577, 0.1452990, 0.00976702], + [0.8549542, 0.7226562, 0.4991309, 0.2420093, 0.02772661], + [0.8571332, 0.7285741, 0.5279076, 0.2824549, 0.06030276], + [0.8835633, 0.7727077, 0.5957984, 0.3411303, 0.09652185], + [0.9071558, 0.8299587, 0.6771400, 0.4402896, 0.17469338], + [0.9190793, 0.8486480, 0.7163332, 0.5083780, 0.26107691], + [0.9371223, 0.8762177, 0.7653702, 0.5773109, 0.32181041], + [0.9554613, 0.9119893, 0.8282687, 0.6776178, 0.43162744], + [0.9545744, 0.9099264, 0.8270244, 0.6822220, 0.45237623], + [0.9688112, 0.9351710, 0.8730961, 0.7546601, 0.56622448], + [0.9743227, 0.9491953, 0.9005150, 0.8086497, 0.64505437], + [0.9807345, 0.9638853, 0.9283012, 0.8631675, 0.73812581], + [0.9886746, 0.9777760, 0.9558950, 0.9123417, 0.82726553], + [0.9899096, 0.9803828, 0.9615592, 0.9255600, 0.85822149], + [0.9969510, 0.9935441, 0.9864657, 0.9726775, 0.94358663], + [0.9979533, 0.9960274, 0.9921724, 0.9837415, 0.96626288], + [0.9995981, 0.9989171, 0.9974178, 0.9949954, 0.99023356], + [1.0002640, 1.0005088, 1.0010594, 1.0021161, 1.00386912], + [0.9998903, 0.9998459, 0.9997795, 0.9995484, 0.99916305], + [1.0000008, 0.9999905, 0.9999481, 0.9998903, 0.99978047], + [1.0000004, 0.9999983, 1.0000001, 1.0000031, 1.00000297], + [0.9999995, 1.0000003, 1.0000005, 1.0000001, 1.00000032], + [0.9999999, 0.9999997, 0.9999994, 0.9999989, 0.99999786], + [0.9999999, 0.9999999, 0.9999999, 0.9999999, 0.99999991]] + # Get iteration points + grad_list = [prob.grad(x) for x in x_list] + delta_x = [np.array(x_list[i+1])-np.array(x_list[i]) + for i in range(len(x_list)-1)] + delta_grad = [grad_list[i+1]-grad_list[i] + for i in range(len(grad_list)-1)] + # Check curvature condition + for i in range(len(delta_x)): + s = delta_x[i] + y = delta_grad[i] + if np.dot(s, y) <= 0: + raise ArithmeticError() + # Define QuasiNewton update + for quasi_newton in (BFGS(init_scale=1, min_curvature=1e-4), + SR1(init_scale=1)): + hess = deepcopy(quasi_newton) + inv_hess = deepcopy(quasi_newton) + hess.initialize(len(x_list[0]), 'hess') + inv_hess.initialize(len(x_list[0]), 'inv_hess') + # Compare the hessian and its inverse + for i in range(len(delta_x)): + s = delta_x[i] + y = delta_grad[i] + hess.update(s, y) + inv_hess.update(s, y) + B = hess.get_matrix() + H = inv_hess.get_matrix() + assert_array_almost_equal(np.linalg.inv(B), H, decimal=10) + B_true = prob.hess(x_list[i+1]) + assert_array_less(norm(B - B_true)/norm(B_true), 0.1) + + def test_SR1_skip_update(self): + # Define auxiliar problem + prob = Rosenbrock(n=5) + # Define iteration points + x_list = [[0.0976270, 0.4303787, 0.2055267, 0.0897663, -0.15269040], + [0.1847239, 0.0505757, 0.2123832, 0.0255081, 0.00083286], + [0.2142498, -0.0188480, 0.0503822, 0.0347033, 0.03323606], + [0.2071680, -0.0185071, 0.0341337, -0.0139298, 0.02881750], + [0.1533055, -0.0322935, 0.0280418, -0.0083592, 0.01503699], + [0.1382378, -0.0276671, 0.0266161, -0.0074060, 0.02801610], + [0.1651957, -0.0049124, 0.0269665, -0.0040025, 0.02138184], + [0.2354930, 0.0443711, 0.0173959, 0.0041872, 0.00794563], + [0.4168118, 0.1433867, 0.0111714, 0.0126265, -0.00658537], + [0.4681972, 0.2153273, 0.0225249, 0.0152704, -0.00463809], + [0.6023068, 0.3346815, 0.0731108, 0.0186618, -0.00371541], + [0.6415743, 0.3985468, 0.1324422, 0.0214160, -0.00062401], + [0.7503690, 0.5447616, 0.2804541, 0.0539851, 0.00242230], + [0.7452626, 0.5644594, 0.3324679, 0.0865153, 0.00454960], + [0.8059782, 0.6586838, 0.4229577, 0.1452990, 0.00976702], + [0.8549542, 0.7226562, 0.4991309, 0.2420093, 0.02772661], + [0.8571332, 0.7285741, 0.5279076, 0.2824549, 0.06030276], + [0.8835633, 0.7727077, 0.5957984, 0.3411303, 0.09652185], + [0.9071558, 0.8299587, 0.6771400, 0.4402896, 0.17469338]] + # Get iteration points + grad_list = [prob.grad(x) for x in x_list] + delta_x = [np.array(x_list[i+1])-np.array(x_list[i]) + for i in range(len(x_list)-1)] + delta_grad = [grad_list[i+1]-grad_list[i] + for i in range(len(grad_list)-1)] + hess = SR1(init_scale=1, min_denominator=1e-2) + hess.initialize(len(x_list[0]), 'hess') + # Compare the hessian and its inverse + for i in range(len(delta_x)-1): + s = delta_x[i] + y = delta_grad[i] + hess.update(s, y) + # Test skip update + B = np.copy(hess.get_matrix()) + s = delta_x[17] + y = delta_grad[17] + hess.update(s, y) + B_updated = np.copy(hess.get_matrix()) + assert_array_equal(B, B_updated) + + def test_BFGS_skip_update(self): + # Define auxiliar problem + prob = Rosenbrock(n=5) + # Define iteration points + x_list = [[0.0976270, 0.4303787, 0.2055267, 0.0897663, -0.15269040], + [0.1847239, 0.0505757, 0.2123832, 0.0255081, 0.00083286], + [0.2142498, -0.0188480, 0.0503822, 0.0347033, 0.03323606], + [0.2071680, -0.0185071, 0.0341337, -0.0139298, 0.02881750], + [0.1533055, -0.0322935, 0.0280418, -0.0083592, 0.01503699], + [0.1382378, -0.0276671, 0.0266161, -0.0074060, 0.02801610], + [0.1651957, -0.0049124, 0.0269665, -0.0040025, 0.02138184]] + # Get iteration points + grad_list = [prob.grad(x) for x in x_list] + delta_x = [np.array(x_list[i+1])-np.array(x_list[i]) + for i in range(len(x_list)-1)] + delta_grad = [grad_list[i+1]-grad_list[i] + for i in range(len(grad_list)-1)] + hess = BFGS(init_scale=1, min_curvature=10) + hess.initialize(len(x_list[0]), 'hess') + # Compare the hessian and its inverse + for i in range(len(delta_x)-1): + s = delta_x[i] + y = delta_grad[i] + hess.update(s, y) + # Test skip update + B = np.copy(hess.get_matrix()) + s = delta_x[5] + y = delta_grad[5] + hess.update(s, y) + B_updated = np.copy(hess.get_matrix()) + assert_array_equal(B, B_updated) diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_hessian_update_strategy.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_hessian_update_strategy.pyc new file mode 100644 index 0000000..98f1f5d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_hessian_update_strategy.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_hungarian.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_hungarian.py new file mode 100644 index 0000000..69ae037 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_hungarian.py @@ -0,0 +1,74 @@ +# Author: Brian M. Clapper, G. Varoquaux, Lars Buitinck +# License: BSD + +from numpy.testing import assert_array_equal +from pytest import raises as assert_raises + +import numpy as np + +from scipy.optimize import linear_sum_assignment + + +def test_linear_sum_assignment(): + for cost_matrix, expected_cost in [ + # Square + ([[400, 150, 400], + [400, 450, 600], + [300, 225, 300]], + [150, 400, 300] + ), + + # Rectangular variant + ([[400, 150, 400, 1], + [400, 450, 600, 2], + [300, 225, 300, 3]], + [150, 2, 300]), + + # Square + ([[10, 10, 8], + [9, 8, 1], + [9, 7, 4]], + [10, 1, 7]), + + # Rectangular variant + ([[10, 10, 8, 11], + [9, 8, 1, 1], + [9, 7, 4, 10]], + [10, 1, 4]), + + # n == 2, m == 0 matrix + ([[], []], + []), + ]: + cost_matrix = np.array(cost_matrix) + row_ind, col_ind = linear_sum_assignment(cost_matrix) + assert_array_equal(row_ind, np.sort(row_ind)) + assert_array_equal(expected_cost, cost_matrix[row_ind, col_ind]) + + cost_matrix = cost_matrix.T + row_ind, col_ind = linear_sum_assignment(cost_matrix) + assert_array_equal(row_ind, np.sort(row_ind)) + assert_array_equal(np.sort(expected_cost), + np.sort(cost_matrix[row_ind, col_ind])) + + +def test_linear_sum_assignment_input_validation(): + assert_raises(ValueError, linear_sum_assignment, [1, 2, 3]) + + C = [[1, 2, 3], [4, 5, 6]] + assert_array_equal(linear_sum_assignment(C), + linear_sum_assignment(np.asarray(C))) + assert_array_equal(linear_sum_assignment(C), + linear_sum_assignment(np.matrix(C))) + + I = np.identity(3) + assert_array_equal(linear_sum_assignment(I.astype(np.bool)), + linear_sum_assignment(I)) + assert_raises(ValueError, linear_sum_assignment, I.astype(str)) + + I[0][0] = np.nan + assert_raises(ValueError, linear_sum_assignment, I) + + I = np.identity(3) + I[1][1] = np.inf + assert_raises(ValueError, linear_sum_assignment, I) diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_hungarian.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_hungarian.pyc new file mode 100644 index 0000000..163b844 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_hungarian.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_lbfgsb_hessinv.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_lbfgsb_hessinv.py new file mode 100644 index 0000000..3bf88f2 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_lbfgsb_hessinv.py @@ -0,0 +1,45 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import assert_, assert_allclose +import scipy.linalg +from scipy.optimize import minimize + + +def test_1(): + def f(x): + return x**4, 4*x**3 + + for gtol in [1e-8, 1e-12, 1e-20]: + for maxcor in range(20, 35): + result = minimize(fun=f, jac=True, method='L-BFGS-B', x0=20, + options={'gtol': gtol, 'maxcor': maxcor}) + + H1 = result.hess_inv(np.array([1])).reshape(1,1) + H2 = result.hess_inv.todense() + + assert_allclose(H1, H2) + + +def test_2(): + H0 = [[3, 0], [1, 2]] + + def f(x): + return np.dot(x, np.dot(scipy.linalg.inv(H0), x)) + + result1 = minimize(fun=f, method='L-BFGS-B', x0=[10, 20]) + result2 = minimize(fun=f, method='BFGS', x0=[10, 20]) + + H1 = result1.hess_inv.todense() + + H2 = np.vstack(( + result1.hess_inv(np.array([1, 0])), + result1.hess_inv(np.array([0, 1])))) + + assert_allclose( + result1.hess_inv(np.array([1, 0]).reshape(2,1)).reshape(-1), + result1.hess_inv(np.array([1, 0]))) + assert_allclose(H1, H2) + assert_allclose(H1, result2.hess_inv, rtol=1e-2, atol=0.03) + + diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_lbfgsb_hessinv.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_lbfgsb_hessinv.pyc new file mode 100644 index 0000000..e002642 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_lbfgsb_hessinv.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_least_squares.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_least_squares.py new file mode 100644 index 0000000..42c6929 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_least_squares.py @@ -0,0 +1,735 @@ +from __future__ import division + +from itertools import product + +import numpy as np +from numpy.linalg import norm +from numpy.testing import (assert_, assert_allclose, + assert_equal) +from pytest import raises as assert_raises +from scipy._lib._numpy_compat import suppress_warnings + +from scipy.sparse import issparse, lil_matrix +from scipy.sparse.linalg import aslinearoperator + +from scipy.optimize import least_squares +from scipy.optimize._lsq.least_squares import IMPLEMENTED_LOSSES +from scipy.optimize._lsq.common import EPS, make_strictly_feasible + + +def fun_trivial(x, a=0): + return (x - a)**2 + 5.0 + + +def jac_trivial(x, a=0.0): + return 2 * (x - a) + + +def fun_2d_trivial(x): + return np.array([x[0], x[1]]) + + +def jac_2d_trivial(x): + return np.identity(2) + + +def fun_rosenbrock(x): + return np.array([10 * (x[1] - x[0]**2), (1 - x[0])]) + + +def jac_rosenbrock(x): + return np.array([ + [-20 * x[0], 10], + [-1, 0] + ]) + + +def jac_rosenbrock_bad_dim(x): + return np.array([ + [-20 * x[0], 10], + [-1, 0], + [0.0, 0.0] + ]) + + +def fun_rosenbrock_cropped(x): + return fun_rosenbrock(x)[0] + + +def jac_rosenbrock_cropped(x): + return jac_rosenbrock(x)[0] + + +# When x is 1-d array, return is 2-d array. +def fun_wrong_dimensions(x): + return np.array([x, x**2, x**3]) + + +def jac_wrong_dimensions(x, a=0.0): + return np.atleast_3d(jac_trivial(x, a=a)) + + +def fun_bvp(x): + n = int(np.sqrt(x.shape[0])) + u = np.zeros((n + 2, n + 2)) + x = x.reshape((n, n)) + u[1:-1, 1:-1] = x + y = u[:-2, 1:-1] + u[2:, 1:-1] + u[1:-1, :-2] + u[1:-1, 2:] - 4 * x + x**3 + return y.ravel() + + +class BroydenTridiagonal(object): + def __init__(self, n=100, mode='sparse'): + np.random.seed(0) + + self.n = n + + self.x0 = -np.ones(n) + self.lb = np.linspace(-2, -1.5, n) + self.ub = np.linspace(-0.8, 0.0, n) + + self.lb += 0.1 * np.random.randn(n) + self.ub += 0.1 * np.random.randn(n) + + self.x0 += 0.1 * np.random.randn(n) + self.x0 = make_strictly_feasible(self.x0, self.lb, self.ub) + + if mode == 'sparse': + self.sparsity = lil_matrix((n, n), dtype=int) + i = np.arange(n) + self.sparsity[i, i] = 1 + i = np.arange(1, n) + self.sparsity[i, i - 1] = 1 + i = np.arange(n - 1) + self.sparsity[i, i + 1] = 1 + + self.jac = self._jac + elif mode == 'operator': + self.jac = lambda x: aslinearoperator(self._jac(x)) + elif mode == 'dense': + self.sparsity = None + self.jac = lambda x: self._jac(x).toarray() + else: + assert_(False) + + def fun(self, x): + f = (3 - x) * x + 1 + f[1:] -= x[:-1] + f[:-1] -= 2 * x[1:] + return f + + def _jac(self, x): + J = lil_matrix((self.n, self.n)) + i = np.arange(self.n) + J[i, i] = 3 - 2 * x + i = np.arange(1, self.n) + J[i, i - 1] = -1 + i = np.arange(self.n - 1) + J[i, i + 1] = -2 + return J + + +class ExponentialFittingProblem(object): + """Provide data and function for exponential fitting in the form + y = a + exp(b * x) + noise.""" + + def __init__(self, a, b, noise, n_outliers=1, x_range=(-1, 1), + n_points=11, random_seed=None): + np.random.seed(random_seed) + self.m = n_points + self.n = 2 + + self.p0 = np.zeros(2) + self.x = np.linspace(x_range[0], x_range[1], n_points) + + self.y = a + np.exp(b * self.x) + self.y += noise * np.random.randn(self.m) + + outliers = np.random.randint(0, self.m, n_outliers) + self.y[outliers] += 50 * noise * np.random.rand(n_outliers) + + self.p_opt = np.array([a, b]) + + def fun(self, p): + return p[0] + np.exp(p[1] * self.x) - self.y + + def jac(self, p): + J = np.empty((self.m, self.n)) + J[:, 0] = 1 + J[:, 1] = self.x * np.exp(p[1] * self.x) + return J + + +def cubic_soft_l1(z): + rho = np.empty((3, z.size)) + + t = 1 + z + rho[0] = 3 * (t**(1/3) - 1) + rho[1] = t ** (-2/3) + rho[2] = -2/3 * t**(-5/3) + + return rho + + +LOSSES = list(IMPLEMENTED_LOSSES.keys()) + [cubic_soft_l1] + + +class BaseMixin(object): + def test_basic(self): + # Test that the basic calling sequence works. + res = least_squares(fun_trivial, 2., method=self.method) + assert_allclose(res.x, 0, atol=1e-4) + assert_allclose(res.fun, fun_trivial(res.x)) + + def test_args_kwargs(self): + # Test that args and kwargs are passed correctly to the functions. + a = 3.0 + for jac in ['2-point', '3-point', 'cs', jac_trivial]: + with suppress_warnings() as sup: + sup.filter(UserWarning, + "jac='(3-point|cs)' works equivalently to '2-point' for method='lm'") + res = least_squares(fun_trivial, 2.0, jac, args=(a,), + method=self.method) + res1 = least_squares(fun_trivial, 2.0, jac, kwargs={'a': a}, + method=self.method) + + assert_allclose(res.x, a, rtol=1e-4) + assert_allclose(res1.x, a, rtol=1e-4) + + assert_raises(TypeError, least_squares, fun_trivial, 2.0, + args=(3, 4,), method=self.method) + assert_raises(TypeError, least_squares, fun_trivial, 2.0, + kwargs={'kaboom': 3}, method=self.method) + + def test_jac_options(self): + for jac in ['2-point', '3-point', 'cs', jac_trivial]: + with suppress_warnings() as sup: + sup.filter(UserWarning, + "jac='(3-point|cs)' works equivalently to '2-point' for method='lm'") + res = least_squares(fun_trivial, 2.0, jac, method=self.method) + assert_allclose(res.x, 0, atol=1e-4) + + assert_raises(ValueError, least_squares, fun_trivial, 2.0, jac='oops', + method=self.method) + + def test_nfev_options(self): + for max_nfev in [None, 20]: + res = least_squares(fun_trivial, 2.0, max_nfev=max_nfev, + method=self.method) + assert_allclose(res.x, 0, atol=1e-4) + + def test_x_scale_options(self): + for x_scale in [1.0, np.array([0.5]), 'jac']: + res = least_squares(fun_trivial, 2.0, x_scale=x_scale) + assert_allclose(res.x, 0) + assert_raises(ValueError, least_squares, fun_trivial, + 2.0, x_scale='auto', method=self.method) + assert_raises(ValueError, least_squares, fun_trivial, + 2.0, x_scale=-1.0, method=self.method) + assert_raises(ValueError, least_squares, fun_trivial, + 2.0, x_scale=None, method=self.method) + assert_raises(ValueError, least_squares, fun_trivial, + 2.0, x_scale=1.0+2.0j, method=self.method) + + def test_diff_step(self): + # res1 and res2 should be equivalent. + # res2 and res3 should be different. + res1 = least_squares(fun_trivial, 2.0, diff_step=1e-1, + method=self.method) + res2 = least_squares(fun_trivial, 2.0, diff_step=-1e-1, + method=self.method) + res3 = least_squares(fun_trivial, 2.0, + diff_step=None, method=self.method) + assert_allclose(res1.x, 0, atol=1e-4) + assert_allclose(res2.x, 0, atol=1e-4) + assert_allclose(res3.x, 0, atol=1e-4) + assert_equal(res1.x, res2.x) + assert_equal(res1.nfev, res2.nfev) + assert_(res2.nfev != res3.nfev) + + def test_incorrect_options_usage(self): + assert_raises(TypeError, least_squares, fun_trivial, 2.0, + method=self.method, options={'no_such_option': 100}) + assert_raises(TypeError, least_squares, fun_trivial, 2.0, + method=self.method, options={'max_nfev': 100}) + + def test_full_result(self): + # MINPACK doesn't work very well with factor=100 on this problem, + # thus using low 'atol'. + res = least_squares(fun_trivial, 2.0, method=self.method) + assert_allclose(res.x, 0, atol=1e-4) + assert_allclose(res.cost, 12.5) + assert_allclose(res.fun, 5) + assert_allclose(res.jac, 0, atol=1e-4) + assert_allclose(res.grad, 0, atol=1e-2) + assert_allclose(res.optimality, 0, atol=1e-2) + assert_equal(res.active_mask, 0) + if self.method == 'lm': + assert_(res.nfev < 30) + assert_(res.njev is None) + else: + assert_(res.nfev < 10) + assert_(res.njev < 10) + assert_(res.status > 0) + assert_(res.success) + + def test_full_result_single_fev(self): + # MINPACK checks the number of nfev after the iteration, + # so it's hard to tell what he is going to compute. + if self.method == 'lm': + return + + res = least_squares(fun_trivial, 2.0, method=self.method, + max_nfev=1) + assert_equal(res.x, np.array([2])) + assert_equal(res.cost, 40.5) + assert_equal(res.fun, np.array([9])) + assert_equal(res.jac, np.array([[4]])) + assert_equal(res.grad, np.array([36])) + assert_equal(res.optimality, 36) + assert_equal(res.active_mask, np.array([0])) + assert_equal(res.nfev, 1) + assert_equal(res.njev, 1) + assert_equal(res.status, 0) + assert_equal(res.success, 0) + + def test_rosenbrock(self): + x0 = [-2, 1] + x_opt = [1, 1] + for jac, x_scale, tr_solver in product( + ['2-point', '3-point', 'cs', jac_rosenbrock], + [1.0, np.array([1.0, 0.2]), 'jac'], + ['exact', 'lsmr']): + with suppress_warnings() as sup: + sup.filter(UserWarning, + "jac='(3-point|cs)' works equivalently to '2-point' for method='lm'") + res = least_squares(fun_rosenbrock, x0, jac, x_scale=x_scale, + tr_solver=tr_solver, method=self.method) + assert_allclose(res.x, x_opt) + + def test_rosenbrock_cropped(self): + x0 = [-2, 1] + if self.method == 'lm': + assert_raises(ValueError, least_squares, fun_rosenbrock_cropped, + x0, method='lm') + else: + for jac, x_scale, tr_solver in product( + ['2-point', '3-point', 'cs', jac_rosenbrock_cropped], + [1.0, np.array([1.0, 0.2]), 'jac'], + ['exact', 'lsmr']): + res = least_squares( + fun_rosenbrock_cropped, x0, jac, x_scale=x_scale, + tr_solver=tr_solver, method=self.method) + assert_allclose(res.cost, 0, atol=1e-14) + + def test_fun_wrong_dimensions(self): + assert_raises(ValueError, least_squares, fun_wrong_dimensions, + 2.0, method=self.method) + + def test_jac_wrong_dimensions(self): + assert_raises(ValueError, least_squares, fun_trivial, + 2.0, jac_wrong_dimensions, method=self.method) + + def test_fun_and_jac_inconsistent_dimensions(self): + x0 = [1, 2] + assert_raises(ValueError, least_squares, fun_rosenbrock, x0, + jac_rosenbrock_bad_dim, method=self.method) + + def test_x0_multidimensional(self): + x0 = np.ones(4).reshape(2, 2) + assert_raises(ValueError, least_squares, fun_trivial, x0, + method=self.method) + + def test_x0_complex_scalar(self): + x0 = 2.0 + 0.0*1j + assert_raises(ValueError, least_squares, fun_trivial, x0, + method=self.method) + + def test_x0_complex_array(self): + x0 = [1.0, 2.0 + 0.0*1j] + assert_raises(ValueError, least_squares, fun_trivial, x0, + method=self.method) + + def test_bvp(self): + # This test was introduced with fix #5556. It turned out that + # dogbox solver had a bug with trust-region radius update, which + # could block its progress and create an infinite loop. And this + # discrete boundary value problem is the one which triggers it. + n = 10 + x0 = np.ones(n**2) + if self.method == 'lm': + max_nfev = 5000 # To account for Jacobian estimation. + else: + max_nfev = 100 + res = least_squares(fun_bvp, x0, ftol=1e-2, method=self.method, + max_nfev=max_nfev) + + assert_(res.nfev < max_nfev) + assert_(res.cost < 0.5) + + +class BoundsMixin(object): + def test_inconsistent(self): + assert_raises(ValueError, least_squares, fun_trivial, 2.0, + bounds=(10.0, 0.0), method=self.method) + + def test_infeasible(self): + assert_raises(ValueError, least_squares, fun_trivial, 2.0, + bounds=(3., 4), method=self.method) + + def test_wrong_number(self): + assert_raises(ValueError, least_squares, fun_trivial, 2., + bounds=(1., 2, 3), method=self.method) + + def test_inconsistent_shape(self): + assert_raises(ValueError, least_squares, fun_trivial, 2.0, + bounds=(1.0, [2.0, 3.0]), method=self.method) + # 1-D array wont't be broadcasted + assert_raises(ValueError, least_squares, fun_rosenbrock, [1.0, 2.0], + bounds=([0.0], [3.0, 4.0]), method=self.method) + + def test_in_bounds(self): + for jac in ['2-point', '3-point', 'cs', jac_trivial]: + res = least_squares(fun_trivial, 2.0, jac=jac, + bounds=(-1.0, 3.0), method=self.method) + assert_allclose(res.x, 0.0, atol=1e-4) + assert_equal(res.active_mask, [0]) + assert_(-1 <= res.x <= 3) + res = least_squares(fun_trivial, 2.0, jac=jac, + bounds=(0.5, 3.0), method=self.method) + assert_allclose(res.x, 0.5, atol=1e-4) + assert_equal(res.active_mask, [-1]) + assert_(0.5 <= res.x <= 3) + + def test_bounds_shape(self): + for jac in ['2-point', '3-point', 'cs', jac_2d_trivial]: + x0 = [1.0, 1.0] + res = least_squares(fun_2d_trivial, x0, jac=jac) + assert_allclose(res.x, [0.0, 0.0]) + res = least_squares(fun_2d_trivial, x0, jac=jac, + bounds=(0.5, [2.0, 2.0]), method=self.method) + assert_allclose(res.x, [0.5, 0.5]) + res = least_squares(fun_2d_trivial, x0, jac=jac, + bounds=([0.3, 0.2], 3.0), method=self.method) + assert_allclose(res.x, [0.3, 0.2]) + res = least_squares( + fun_2d_trivial, x0, jac=jac, bounds=([-1, 0.5], [1.0, 3.0]), + method=self.method) + assert_allclose(res.x, [0.0, 0.5], atol=1e-5) + + def test_rosenbrock_bounds(self): + x0_1 = np.array([-2.0, 1.0]) + x0_2 = np.array([2.0, 2.0]) + x0_3 = np.array([-2.0, 2.0]) + x0_4 = np.array([0.0, 2.0]) + x0_5 = np.array([-1.2, 1.0]) + problems = [ + (x0_1, ([-np.inf, -1.5], np.inf)), + (x0_2, ([-np.inf, 1.5], np.inf)), + (x0_3, ([-np.inf, 1.5], np.inf)), + (x0_4, ([-np.inf, 1.5], [1.0, np.inf])), + (x0_2, ([1.0, 1.5], [3.0, 3.0])), + (x0_5, ([-50.0, 0.0], [0.5, 100])) + ] + for x0, bounds in problems: + for jac, x_scale, tr_solver in product( + ['2-point', '3-point', 'cs', jac_rosenbrock], + [1.0, [1.0, 0.5], 'jac'], + ['exact', 'lsmr']): + res = least_squares(fun_rosenbrock, x0, jac, bounds, + x_scale=x_scale, tr_solver=tr_solver, + method=self.method) + assert_allclose(res.optimality, 0.0, atol=1e-5) + + +class SparseMixin(object): + def test_exact_tr_solver(self): + p = BroydenTridiagonal() + assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac, + tr_solver='exact', method=self.method) + assert_raises(ValueError, least_squares, p.fun, p.x0, + tr_solver='exact', jac_sparsity=p.sparsity, + method=self.method) + + def test_equivalence(self): + sparse = BroydenTridiagonal(mode='sparse') + dense = BroydenTridiagonal(mode='dense') + res_sparse = least_squares( + sparse.fun, sparse.x0, jac=sparse.jac, + method=self.method) + res_dense = least_squares( + dense.fun, dense.x0, jac=sparse.jac, + method=self.method) + assert_equal(res_sparse.nfev, res_dense.nfev) + assert_allclose(res_sparse.x, res_dense.x, atol=1e-20) + assert_allclose(res_sparse.cost, 0, atol=1e-20) + assert_allclose(res_dense.cost, 0, atol=1e-20) + + def test_tr_options(self): + p = BroydenTridiagonal() + res = least_squares(p.fun, p.x0, p.jac, method=self.method, + tr_options={'btol': 1e-10}) + assert_allclose(res.cost, 0, atol=1e-20) + + def test_wrong_parameters(self): + p = BroydenTridiagonal() + assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac, + tr_solver='best', method=self.method) + assert_raises(TypeError, least_squares, p.fun, p.x0, p.jac, + tr_solver='lsmr', tr_options={'tol': 1e-10}) + + def test_solver_selection(self): + sparse = BroydenTridiagonal(mode='sparse') + dense = BroydenTridiagonal(mode='dense') + res_sparse = least_squares(sparse.fun, sparse.x0, jac=sparse.jac, + method=self.method) + res_dense = least_squares(dense.fun, dense.x0, jac=dense.jac, + method=self.method) + assert_allclose(res_sparse.cost, 0, atol=1e-20) + assert_allclose(res_dense.cost, 0, atol=1e-20) + assert_(issparse(res_sparse.jac)) + assert_(isinstance(res_dense.jac, np.ndarray)) + + def test_numerical_jac(self): + p = BroydenTridiagonal() + for jac in ['2-point', '3-point', 'cs']: + res_dense = least_squares(p.fun, p.x0, jac, method=self.method) + res_sparse = least_squares( + p.fun, p.x0, jac,method=self.method, + jac_sparsity=p.sparsity) + assert_equal(res_dense.nfev, res_sparse.nfev) + assert_allclose(res_dense.x, res_sparse.x, atol=1e-20) + assert_allclose(res_dense.cost, 0, atol=1e-20) + assert_allclose(res_sparse.cost, 0, atol=1e-20) + + def test_with_bounds(self): + p = BroydenTridiagonal() + for jac, jac_sparsity in product( + [p.jac, '2-point', '3-point', 'cs'], [None, p.sparsity]): + res_1 = least_squares( + p.fun, p.x0, jac, bounds=(p.lb, np.inf), + method=self.method,jac_sparsity=jac_sparsity) + res_2 = least_squares( + p.fun, p.x0, jac, bounds=(-np.inf, p.ub), + method=self.method, jac_sparsity=jac_sparsity) + res_3 = least_squares( + p.fun, p.x0, jac, bounds=(p.lb, p.ub), + method=self.method, jac_sparsity=jac_sparsity) + assert_allclose(res_1.optimality, 0, atol=1e-10) + assert_allclose(res_2.optimality, 0, atol=1e-10) + assert_allclose(res_3.optimality, 0, atol=1e-10) + + def test_wrong_jac_sparsity(self): + p = BroydenTridiagonal() + sparsity = p.sparsity[:-1] + assert_raises(ValueError, least_squares, p.fun, p.x0, + jac_sparsity=sparsity, method=self.method) + + def test_linear_operator(self): + p = BroydenTridiagonal(mode='operator') + res = least_squares(p.fun, p.x0, p.jac, method=self.method) + assert_allclose(res.cost, 0.0, atol=1e-20) + assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac, + method=self.method, tr_solver='exact') + + def test_x_scale_jac_scale(self): + p = BroydenTridiagonal() + res = least_squares(p.fun, p.x0, p.jac, method=self.method, + x_scale='jac') + assert_allclose(res.cost, 0.0, atol=1e-20) + + p = BroydenTridiagonal(mode='operator') + assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac, + method=self.method, x_scale='jac') + + +class LossFunctionMixin(object): + def test_options(self): + for loss in LOSSES: + res = least_squares(fun_trivial, 2.0, loss=loss, + method=self.method) + assert_allclose(res.x, 0, atol=1e-15) + + assert_raises(ValueError, least_squares, fun_trivial, 2.0, + loss='hinge', method=self.method) + + def test_fun(self): + # Test that res.fun is actual residuals, and not modified by loss + # function stuff. + for loss in LOSSES: + res = least_squares(fun_trivial, 2.0, loss=loss, + method=self.method) + assert_equal(res.fun, fun_trivial(res.x)) + + def test_grad(self): + # Test that res.grad is true gradient of loss function at the + # solution. Use max_nfev = 1, to avoid reaching minimum. + x = np.array([2.0]) # res.x will be this. + + res = least_squares(fun_trivial, x, jac_trivial, loss='linear', + max_nfev=1, method=self.method) + assert_equal(res.grad, 2 * x * (x**2 + 5)) + + res = least_squares(fun_trivial, x, jac_trivial, loss='huber', + max_nfev=1, method=self.method) + assert_equal(res.grad, 2 * x) + + res = least_squares(fun_trivial, x, jac_trivial, loss='soft_l1', + max_nfev=1, method=self.method) + assert_allclose(res.grad, + 2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2)**0.5) + + res = least_squares(fun_trivial, x, jac_trivial, loss='cauchy', + max_nfev=1, method=self.method) + assert_allclose(res.grad, 2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2)) + + res = least_squares(fun_trivial, x, jac_trivial, loss='arctan', + max_nfev=1, method=self.method) + assert_allclose(res.grad, 2 * x * (x**2 + 5) / (1 + (x**2 + 5)**4)) + + res = least_squares(fun_trivial, x, jac_trivial, loss=cubic_soft_l1, + max_nfev=1, method=self.method) + assert_allclose(res.grad, + 2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2)**(2/3)) + + def test_jac(self): + # Test that res.jac.T.dot(res.jac) gives Gauss-Newton approximation + # of Hessian. This approximation is computed by doubly differentiating + # the cost function and dropping the part containing second derivative + # of f. For a scalar function it is computed as + # H = (rho' + 2 * rho'' * f**2) * f'**2, if the expression inside the + # brackets is less than EPS it is replaced by EPS. Here we check + # against the root of H. + + x = 2.0 # res.x will be this. + f = x**2 + 5 # res.fun will be this. + + res = least_squares(fun_trivial, x, jac_trivial, loss='linear', + max_nfev=1, method=self.method) + assert_equal(res.jac, 2 * x) + + # For `huber` loss the Jacobian correction is identically zero + # in outlier region, in such cases it is modified to be equal EPS**0.5. + res = least_squares(fun_trivial, x, jac_trivial, loss='huber', + max_nfev=1, method=self.method) + assert_equal(res.jac, 2 * x * EPS**0.5) + + # Now let's apply `loss_scale` to turn the residual into an inlier. + # The loss function becomes linear. + res = least_squares(fun_trivial, x, jac_trivial, loss='huber', + f_scale=10, max_nfev=1) + assert_equal(res.jac, 2 * x) + + # 'soft_l1' always gives a positive scaling. + res = least_squares(fun_trivial, x, jac_trivial, loss='soft_l1', + max_nfev=1, method=self.method) + assert_allclose(res.jac, 2 * x * (1 + f**2)**-0.75) + + # For 'cauchy' the correction term turns out to be negative, and it + # replaced by EPS**0.5. + res = least_squares(fun_trivial, x, jac_trivial, loss='cauchy', + max_nfev=1, method=self.method) + assert_allclose(res.jac, 2 * x * EPS**0.5) + + # Now use scaling to turn the residual to inlier. + res = least_squares(fun_trivial, x, jac_trivial, loss='cauchy', + f_scale=10, max_nfev=1, method=self.method) + fs = f / 10 + assert_allclose(res.jac, 2 * x * (1 - fs**2)**0.5 / (1 + fs**2)) + + # 'arctan' gives an outlier. + res = least_squares(fun_trivial, x, jac_trivial, loss='arctan', + max_nfev=1, method=self.method) + assert_allclose(res.jac, 2 * x * EPS**0.5) + + # Turn to inlier. + res = least_squares(fun_trivial, x, jac_trivial, loss='arctan', + f_scale=20.0, max_nfev=1, method=self.method) + fs = f / 20 + assert_allclose(res.jac, 2 * x * (1 - 3 * fs**4)**0.5 / (1 + fs**4)) + + # cubic_soft_l1 will give an outlier. + res = least_squares(fun_trivial, x, jac_trivial, loss=cubic_soft_l1, + max_nfev=1) + assert_allclose(res.jac, 2 * x * EPS**0.5) + + # Turn to inlier. + res = least_squares(fun_trivial, x, jac_trivial, + loss=cubic_soft_l1, f_scale=6, max_nfev=1) + fs = f / 6 + assert_allclose(res.jac, + 2 * x * (1 - fs**2 / 3)**0.5 * (1 + fs**2)**(-5/6)) + + def test_robustness(self): + for noise in [0.1, 1.0]: + p = ExponentialFittingProblem(1, 0.1, noise, random_seed=0) + + for jac in ['2-point', '3-point', 'cs', p.jac]: + res_lsq = least_squares(p.fun, p.p0, jac=jac, + method=self.method) + assert_allclose(res_lsq.optimality, 0, atol=1e-2) + for loss in LOSSES: + if loss == 'linear': + continue + res_robust = least_squares( + p.fun, p.p0, jac=jac, loss=loss, f_scale=noise, + method=self.method) + assert_allclose(res_robust.optimality, 0, atol=1e-2) + assert_(norm(res_robust.x - p.p_opt) < + norm(res_lsq.x - p.p_opt)) + + +class TestDogbox(BaseMixin, BoundsMixin, SparseMixin, LossFunctionMixin): + method = 'dogbox' + + +class TestTRF(BaseMixin, BoundsMixin, SparseMixin, LossFunctionMixin): + method = 'trf' + + def test_lsmr_regularization(self): + p = BroydenTridiagonal() + for regularize in [True, False]: + res = least_squares(p.fun, p.x0, p.jac, method='trf', + tr_options={'regularize': regularize}) + assert_allclose(res.cost, 0, atol=1e-20) + + +class TestLM(BaseMixin): + method = 'lm' + + def test_bounds_not_supported(self): + assert_raises(ValueError, least_squares, fun_trivial, + 2.0, bounds=(-3.0, 3.0), method='lm') + + def test_m_less_n_not_supported(self): + x0 = [-2, 1] + assert_raises(ValueError, least_squares, fun_rosenbrock_cropped, x0, + method='lm') + + def test_sparse_not_supported(self): + p = BroydenTridiagonal() + assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac, + method='lm') + + def test_jac_sparsity_not_supported(self): + assert_raises(ValueError, least_squares, fun_trivial, 2.0, + jac_sparsity=[1], method='lm') + + def test_LinearOperator_not_supported(self): + p = BroydenTridiagonal(mode="operator") + assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac, + method='lm') + + def test_loss(self): + res = least_squares(fun_trivial, 2.0, loss='linear', method='lm') + assert_allclose(res.x, 0.0, atol=1e-4) + + assert_raises(ValueError, least_squares, fun_trivial, 2.0, + method='lm', loss='huber') + + +def test_basic(): + # test that 'method' arg is really optional + res = least_squares(fun_trivial, 2.0) + assert_allclose(res.x, 0, atol=1e-10) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_least_squares.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_least_squares.pyc new file mode 100644 index 0000000..a0ff441 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_least_squares.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_linesearch.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_linesearch.py new file mode 100644 index 0000000..faf3c8a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_linesearch.py @@ -0,0 +1,283 @@ +""" +Tests for line search routines +""" +from __future__ import division, print_function, absolute_import + +from numpy.testing import assert_, assert_equal, \ + assert_array_almost_equal, assert_array_almost_equal_nulp, assert_warns +from scipy._lib._numpy_compat import suppress_warnings +import scipy.optimize.linesearch as ls +from scipy.optimize.linesearch import LineSearchWarning +import numpy as np + + +def assert_wolfe(s, phi, derphi, c1=1e-4, c2=0.9, err_msg=""): + """ + Check that strong Wolfe conditions apply + """ + phi1 = phi(s) + phi0 = phi(0) + derphi0 = derphi(0) + derphi1 = derphi(s) + msg = "s = %s; phi(0) = %s; phi(s) = %s; phi'(0) = %s; phi'(s) = %s; %s" % ( + s, phi0, phi1, derphi0, derphi1, err_msg) + + assert_(phi1 <= phi0 + c1*s*derphi0, "Wolfe 1 failed: " + msg) + assert_(abs(derphi1) <= abs(c2*derphi0), "Wolfe 2 failed: " + msg) + + +def assert_armijo(s, phi, c1=1e-4, err_msg=""): + """ + Check that Armijo condition applies + """ + phi1 = phi(s) + phi0 = phi(0) + msg = "s = %s; phi(0) = %s; phi(s) = %s; %s" % (s, phi0, phi1, err_msg) + assert_(phi1 <= (1 - c1*s)*phi0, msg) + + +def assert_line_wolfe(x, p, s, f, fprime, **kw): + assert_wolfe(s, phi=lambda sp: f(x + p*sp), + derphi=lambda sp: np.dot(fprime(x + p*sp), p), **kw) + + +def assert_line_armijo(x, p, s, f, **kw): + assert_armijo(s, phi=lambda sp: f(x + p*sp), **kw) + + +def assert_fp_equal(x, y, err_msg="", nulp=50): + """Assert two arrays are equal, up to some floating-point rounding error""" + try: + assert_array_almost_equal_nulp(x, y, nulp) + except AssertionError as e: + raise AssertionError("%s\n%s" % (e, err_msg)) + + +class TestLineSearch(object): + # -- scalar functions; must have dphi(0.) < 0 + def _scalar_func_1(self, s): + self.fcount += 1 + p = -s - s**3 + s**4 + dp = -1 - 3*s**2 + 4*s**3 + return p, dp + + def _scalar_func_2(self, s): + self.fcount += 1 + p = np.exp(-4*s) + s**2 + dp = -4*np.exp(-4*s) + 2*s + return p, dp + + def _scalar_func_3(self, s): + self.fcount += 1 + p = -np.sin(10*s) + dp = -10*np.cos(10*s) + return p, dp + + # -- n-d functions + + def _line_func_1(self, x): + self.fcount += 1 + f = np.dot(x, x) + df = 2*x + return f, df + + def _line_func_2(self, x): + self.fcount += 1 + f = np.dot(x, np.dot(self.A, x)) + 1 + df = np.dot(self.A + self.A.T, x) + return f, df + + # -- + + def setup_method(self): + self.scalar_funcs = [] + self.line_funcs = [] + self.N = 20 + self.fcount = 0 + + def bind_index(func, idx): + # Remember Python's closure semantics! + return lambda *a, **kw: func(*a, **kw)[idx] + + for name in sorted(dir(self)): + if name.startswith('_scalar_func_'): + value = getattr(self, name) + self.scalar_funcs.append( + (name, bind_index(value, 0), bind_index(value, 1))) + elif name.startswith('_line_func_'): + value = getattr(self, name) + self.line_funcs.append( + (name, bind_index(value, 0), bind_index(value, 1))) + + np.random.seed(1234) + self.A = np.random.randn(self.N, self.N) + + def scalar_iter(self): + for name, phi, derphi in self.scalar_funcs: + for old_phi0 in np.random.randn(3): + yield name, phi, derphi, old_phi0 + + def line_iter(self): + for name, f, fprime in self.line_funcs: + k = 0 + while k < 9: + x = np.random.randn(self.N) + p = np.random.randn(self.N) + if np.dot(p, fprime(x)) >= 0: + # always pick a descent direction + continue + k += 1 + old_fv = float(np.random.randn()) + yield name, f, fprime, x, p, old_fv + + # -- Generic scalar searches + + def test_scalar_search_wolfe1(self): + c = 0 + for name, phi, derphi, old_phi0 in self.scalar_iter(): + c += 1 + s, phi1, phi0 = ls.scalar_search_wolfe1(phi, derphi, phi(0), + old_phi0, derphi(0)) + assert_fp_equal(phi0, phi(0), name) + assert_fp_equal(phi1, phi(s), name) + assert_wolfe(s, phi, derphi, err_msg=name) + + assert_(c > 3) # check that the iterator really works... + + def test_scalar_search_wolfe2(self): + for name, phi, derphi, old_phi0 in self.scalar_iter(): + s, phi1, phi0, derphi1 = ls.scalar_search_wolfe2( + phi, derphi, phi(0), old_phi0, derphi(0)) + assert_fp_equal(phi0, phi(0), name) + assert_fp_equal(phi1, phi(s), name) + if derphi1 is not None: + assert_fp_equal(derphi1, derphi(s), name) + assert_wolfe(s, phi, derphi, err_msg="%s %g" % (name, old_phi0)) + + def test_scalar_search_armijo(self): + for name, phi, derphi, old_phi0 in self.scalar_iter(): + s, phi1 = ls.scalar_search_armijo(phi, phi(0), derphi(0)) + assert_fp_equal(phi1, phi(s), name) + assert_armijo(s, phi, err_msg="%s %g" % (name, old_phi0)) + + # -- Generic line searches + + def test_line_search_wolfe1(self): + c = 0 + smax = 100 + for name, f, fprime, x, p, old_f in self.line_iter(): + f0 = f(x) + g0 = fprime(x) + self.fcount = 0 + s, fc, gc, fv, ofv, gv = ls.line_search_wolfe1(f, fprime, x, p, + g0, f0, old_f, + amax=smax) + assert_equal(self.fcount, fc+gc) + assert_fp_equal(ofv, f(x)) + if s is None: + continue + assert_fp_equal(fv, f(x + s*p)) + assert_array_almost_equal(gv, fprime(x + s*p), decimal=14) + if s < smax: + c += 1 + assert_line_wolfe(x, p, s, f, fprime, err_msg=name) + + assert_(c > 3) # check that the iterator really works... + + def test_line_search_wolfe2(self): + c = 0 + smax = 512 + for name, f, fprime, x, p, old_f in self.line_iter(): + f0 = f(x) + g0 = fprime(x) + self.fcount = 0 + with suppress_warnings() as sup: + sup.filter(LineSearchWarning, + "The line search algorithm could not find a solution") + sup.filter(LineSearchWarning, + "The line search algorithm did not converge") + s, fc, gc, fv, ofv, gv = ls.line_search_wolfe2(f, fprime, x, p, + g0, f0, old_f, + amax=smax) + assert_equal(self.fcount, fc+gc) + assert_fp_equal(ofv, f(x)) + assert_fp_equal(fv, f(x + s*p)) + if gv is not None: + assert_array_almost_equal(gv, fprime(x + s*p), decimal=14) + if s < smax: + c += 1 + assert_line_wolfe(x, p, s, f, fprime, err_msg=name) + assert_(c > 3) # check that the iterator really works... + + def test_line_search_wolfe2_bounds(self): + # See gh-7475 + + # For this f and p, starting at a point on axis 0, the strong Wolfe + # condition 2 is met if and only if the step length s satisfies + # |x + s| <= c2 * |x| + f = lambda x: np.dot(x, x) + fp = lambda x: 2 * x + p = np.array([1, 0]) + + # Smallest s satisfying strong Wolfe conditions for these arguments is 30 + x = -60 * p + c2 = 0.5 + + s, _, _, _, _, _ = ls.line_search_wolfe2(f, fp, x, p, amax=30, c2=c2) + assert_line_wolfe(x, p, s, f, fp) + + s, _, _, _, _, _ = assert_warns(LineSearchWarning, + ls.line_search_wolfe2, f, fp, x, p, + amax=29, c2=c2) + assert_(s is None) + + # s=30 will only be tried on the 6th iteration, so this won't converge + assert_warns(LineSearchWarning, ls.line_search_wolfe2, f, fp, x, p, + c2=c2, maxiter=5) + + def test_line_search_armijo(self): + c = 0 + for name, f, fprime, x, p, old_f in self.line_iter(): + f0 = f(x) + g0 = fprime(x) + self.fcount = 0 + s, fc, fv = ls.line_search_armijo(f, x, p, g0, f0) + c += 1 + assert_equal(self.fcount, fc) + assert_fp_equal(fv, f(x + s*p)) + assert_line_armijo(x, p, s, f, err_msg=name) + assert_(c >= 9) + + # -- More specific tests + + def test_armijo_terminate_1(self): + # Armijo should evaluate the function only once if the trial step + # is already suitable + count = [0] + + def phi(s): + count[0] += 1 + return -s + 0.01*s**2 + s, phi1 = ls.scalar_search_armijo(phi, phi(0), -1, alpha0=1) + assert_equal(s, 1) + assert_equal(count[0], 2) + assert_armijo(s, phi) + + def test_wolfe_terminate(self): + # wolfe1 and wolfe2 should also evaluate the function only a few + # times if the trial step is already suitable + + def phi(s): + count[0] += 1 + return -s + 0.05*s**2 + + def derphi(s): + count[0] += 1 + return -1 + 0.05*2*s + + for func in [ls.scalar_search_wolfe1, ls.scalar_search_wolfe2]: + count = [0] + r = func(phi, derphi, phi(0), None, derphi(0)) + assert_(r[0] is not None, (r, func)) + assert_(count[0] <= 2 + 2, (count, func)) + assert_wolfe(r[0], phi, derphi, err_msg=str(func)) diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_linesearch.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_linesearch.pyc new file mode 100644 index 0000000..6bfd1b4 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_linesearch.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_linprog.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_linprog.py new file mode 100644 index 0000000..674128b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_linprog.py @@ -0,0 +1,1375 @@ +""" +Unit test for Linear Programming +""" +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import (assert_, assert_allclose, assert_equal, + assert_array_less) +from pytest import raises as assert_raises +from scipy.optimize import linprog, OptimizeWarning +from scipy._lib._numpy_compat import _assert_warns, suppress_warnings +from scipy.sparse.linalg import MatrixRankWarning + +import pytest + + +def magic_square(n): + np.random.seed(0) + M = n * (n**2 + 1) / 2 + + numbers = np.arange(n**4) // n**2 + 1 + + numbers = numbers.reshape(n**2, n, n) + + zeros = np.zeros((n**2, n, n)) + + A_list = [] + b_list = [] + + # Rule 1: use every number exactly once + for i in range(n**2): + A_row = zeros.copy() + A_row[i, :, :] = 1 + A_list.append(A_row.flatten()) + b_list.append(1) + + # Rule 2: Only one number per square + for i in range(n): + for j in range(n): + A_row = zeros.copy() + A_row[:, i, j] = 1 + A_list.append(A_row.flatten()) + b_list.append(1) + + # Rule 3: sum of rows is M + for i in range(n): + A_row = zeros.copy() + A_row[:, i, :] = numbers[:, i, :] + A_list.append(A_row.flatten()) + b_list.append(M) + + # Rule 4: sum of columns is M + for i in range(n): + A_row = zeros.copy() + A_row[:, :, i] = numbers[:, :, i] + A_list.append(A_row.flatten()) + b_list.append(M) + + # Rule 5: sum of diagonals is M + A_row = zeros.copy() + A_row[:, range(n), range(n)] = numbers[:, range(n), range(n)] + A_list.append(A_row.flatten()) + b_list.append(M) + A_row = zeros.copy() + A_row[:, range(n), range(-1, -n - 1, -1)] = \ + numbers[:, range(n), range(-1, -n - 1, -1)] + A_list.append(A_row.flatten()) + b_list.append(M) + + A = np.array(np.vstack(A_list), dtype=float) + b = np.array(b_list, dtype=float) + c = np.random.rand(A.shape[1]) + + return A, b, c, numbers + + +def lpgen_2d(m, n): + """ -> A b c LP test: m*n vars, m+n constraints + row sums == n/m, col sums == 1 + https://gist.github.com/denis-bz/8647461 + """ + np.random.seed(0) + c = - np.random.exponential(size=(m, n)) + Arow = np.zeros((m, m * n)) + brow = np.zeros(m) + for j in range(m): + j1 = j + 1 + Arow[j, j * n:j1 * n] = 1 + brow[j] = n / m + + Acol = np.zeros((n, m * n)) + bcol = np.zeros(n) + for j in range(n): + j1 = j + 1 + Acol[j, j::n] = 1 + bcol[j] = 1 + + A = np.vstack((Arow, Acol)) + b = np.hstack((brow, bcol)) + + return A, b, c.ravel() + + +def _assert_iteration_limit_reached(res, maxiter): + assert_(not res.success, "Incorrectly reported success") + assert_(res.success < maxiter, "Incorrectly reported number of iterations") + assert_equal(res.status, 1, "Failed to report iteration limit reached") + + +def _assert_infeasible(res): + # res: linprog result object + assert_(not res.success, "incorrectly reported success") + assert_equal(res.status, 2, "failed to report infeasible status") + + +def _assert_unbounded(res): + # res: linprog result object + assert_(not res.success, "incorrectly reported success") + assert_equal(res.status, 3, "failed to report unbounded status") + + +def _assert_unable_to_find_basic_feasible_sol(res): + # res: linprog result object + assert_(not res.success, "incorrectly reported success") + assert_equal(res.status, 2, "failed to report optimization failure") + + +def _assert_success(res, desired_fun=None, desired_x=None, + rtol=1e-8, atol=1e-8): + # res: linprog result object + # desired_fun: desired objective function value or None + # desired_x: desired solution or None + if not res.success: + msg = "linprog status {0}, message: {1}".format(res.status, + res.message) + raise AssertionError(msg) + + assert_equal(res.status, 0) + if desired_fun is not None: + assert_allclose(res.fun, desired_fun, + err_msg="converged to an unexpected objective value", + rtol=rtol, atol=atol) + if desired_x is not None: + assert_allclose(res.x, desired_x, + err_msg="converged to an unexpected solution", + rtol=rtol, atol=atol) + + +class LinprogCommonTests(object): + + def test_docstring_example(self): + # Example from linprog docstring. + c = [-1, 4] + A = [[-3, 1], [1, 2]] + b = [6, 4] + x0_bounds = (None, None) + x1_bounds = (-3, None) + + res = linprog(c, A_ub=A, b_ub=b, bounds=(x0_bounds, x1_bounds), + options=self.options, method=self.method) + _assert_success(res, desired_fun=-22) + + def test_aliasing_b_ub(self): + c = np.array([1.0]) + A_ub = np.array([[1.0]]) + b_ub_orig = np.array([3.0]) + b_ub = b_ub_orig.copy() + bounds = (-4.0, np.inf) + res = linprog(c, A_ub=A_ub, b_ub=b_ub, bounds=bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=-4, desired_x=[-4]) + assert_allclose(b_ub_orig, b_ub) + + def test_aliasing_b_eq(self): + c = np.array([1.0]) + A_eq = np.array([[1.0]]) + b_eq_orig = np.array([3.0]) + b_eq = b_eq_orig.copy() + bounds = (-4.0, np.inf) + res = linprog(c, A_eq=A_eq, b_eq=b_eq, bounds=bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=3, desired_x=[3]) + assert_allclose(b_eq_orig, b_eq) + + def test_bounds_second_form_unbounded_below(self): + c = np.array([1.0]) + A_eq = np.array([[1.0]]) + b_eq = np.array([3.0]) + bounds = (None, 10.0) + res = linprog(c, A_eq=A_eq, b_eq=b_eq, bounds=bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=3, desired_x=[3]) + + def test_bounds_second_form_unbounded_above(self): + c = np.array([1.0]) + A_eq = np.array([[1.0]]) + b_eq = np.array([3.0]) + bounds = (1.0, None) + res = linprog(c, A_eq=A_eq, b_eq=b_eq, bounds=bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=3, desired_x=[3]) + + def test_non_ndarray_args(self): + c = [1.0] + A_ub = [[1.0]] + b_ub = [3.0] + A_eq = [[1.0]] + b_eq = [2.0] + bounds = (-1.0, 10.0) + res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, + bounds=bounds, method=self.method, options=self.options) + _assert_success(res, desired_fun=2, desired_x=[2]) + + def test_linprog_upper_bound_constraints(self): + # Maximize a linear function subject to only linear upper bound + # constraints. + # http://www.dam.brown.edu/people/huiwang/classes/am121/Archive/simplex_121_c.pdf + c = np.array([3, 2]) * -1 # maximize + A_ub = [[2, 1], + [1, 1], + [1, 0]] + b_ub = [10, 8, 4] + res = (linprog(c, A_ub=A_ub, b_ub=b_ub, + method=self.method, options=self.options)) + _assert_success(res, desired_fun=-18, desired_x=[2, 6]) + + def test_linprog_mixed_constraints(self): + # Minimize linear function subject to non-negative variables. + # http://www.statslab.cam.ac.uk/~ff271/teaching/opt/notes/notes8.pdf + # (dead link) + c = [6, 3] + A_ub = [[0, 3], + [-1, -1], + [-2, 1]] + b_ub = [2, -1, -1] + res = linprog(c, A_ub=A_ub, b_ub=b_ub, + method=self.method, options=self.options) + _assert_success(res, desired_fun=5, desired_x=[2 / 3, 1 / 3]) + + def test_linprog_cyclic_recovery(self): + # Test linprogs recovery from cycling using the Klee-Minty problem + # Klee-Minty https://www.math.ubc.ca/~israel/m340/kleemin3.pdf + c = np.array([100, 10, 1]) * -1 # maximize + A_ub = [[1, 0, 0], + [20, 1, 0], + [200, 20, 1]] + b_ub = [1, 100, 10000] + res = linprog(c, A_ub=A_ub, b_ub=b_ub, + method=self.method, options=self.options) + _assert_success(res, desired_x=[0, 0, 10000], atol=5e-6, rtol=1e-7) + + def test_linprog_cyclic_bland(self): + # Test the effect of Bland's rule on a cycling problem + c = np.array([-10, 57, 9, 24.]) + A_ub = np.array([[0.5, -5.5, -2.5, 9], + [0.5, -1.5, -0.5, 1], + [1, 0, 0, 0]]) + b_ub = [0, 0, 1] + + maxiter = 100 + o = {key: val for key, val in self.options.items()} + o['maxiter'] = maxiter + + res = linprog(c, A_ub=A_ub, b_ub=b_ub, options=o, + method=self.method) + + if self.method == 'simplex'and not self.options.get('bland'): + _assert_iteration_limit_reached(res, o['maxiter']) + else: + _assert_success(res, desired_x=[1, 0, 1, 0]) + + def test_linprog_cyclic_bland_bug_8561(self): + # Test that pivot row is chosen correctly when using Bland's rule + c = np.array([7, 0, -4, 1.5, 1.5]) + A_ub = np.array([ + [4, 5.5, 1.5, 1.0, -3.5], + [1, -2.5, -2, 2.5, 0.5], + [3, -0.5, 4, -12.5, -7], + [-1, 4.5, 2, -3.5, -2], + [5.5, 2, -4.5, -1, 9.5]]) + b_ub = np.array([0, 0, 0, 0, 1]) + if self.method == "simplex": + res = linprog(c, A_ub=A_ub, b_ub=b_ub, + options=dict(maxiter=100, bland=True), + method=self.method) + else: + res = linprog(c, A_ub=A_ub, b_ub=b_ub, options=self.options, + method=self.method) + _assert_success(res, desired_x=[0, 0, 19, 16/3, 29/3]) + + def test_linprog_unbounded(self): + # Test linprog response to an unbounded problem + c = np.array([1, 1]) * -1 # maximize + A_ub = [[-1, 1], + [-1, -1]] + b_ub = [-1, -2] + res = linprog(c, A_ub=A_ub, b_ub=b_ub, + method=self.method, options=self.options) + _assert_unbounded(res) + + def test_linprog_infeasible(self): + # Test linrpog response to an infeasible problem + c = [-1, -1] + A_ub = [[1, 0], + [0, 1], + [-1, -1]] + b_ub = [2, 2, -5] + res = linprog(c, A_ub=A_ub, b_ub=b_ub, + method=self.method, options=self.options) + _assert_infeasible(res) + + def test_nontrivial_problem(self): + # Test linprog for a problem involving all constraint types, + # negative resource limits, and rounding issues. + c = [-1, 8, 4, -6] + A_ub = [[-7, -7, 6, 9], + [1, -1, -3, 0], + [10, -10, -7, 7], + [6, -1, 3, 4]] + b_ub = [-3, 6, -6, 6] + A_eq = [[-10, 1, 1, -8]] + b_eq = [-4] + res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, + method=self.method, options=self.options) + _assert_success(res, desired_fun=7083 / 1391, + desired_x=[101 / 1391, 1462 / 1391, 0, 752 / 1391]) + + def test_negative_variable(self): + # Test linprog with a problem with one unbounded variable and + # another with a negative lower bound. + c = np.array([-1, 4]) * -1 # maximize + A_ub = np.array([[-3, 1], + [1, 2]], dtype=np.float64) + A_ub_orig = A_ub.copy() + b_ub = [6, 4] + x0_bounds = (-np.inf, np.inf) + x1_bounds = (-3, np.inf) + + res = linprog(c, A_ub=A_ub, b_ub=b_ub, bounds=(x0_bounds, x1_bounds), + method=self.method, options=self.options) + + assert_equal(A_ub, A_ub_orig) # user input not overwritten + _assert_success(res, desired_fun=-80 / 7, desired_x=[-8 / 7, 18 / 7]) + + def test_large_problem(self): + # Test linprog simplex with a rather large problem (400 variables, + # 40 constraints) generated by https://gist.github.com/denis-bz/8647461 + A, b, c = lpgen_2d(20, 20) + res = linprog(c, A_ub=A, b_ub=b, + method=self.method, options=self.options) + _assert_success(res, desired_fun=-64.049494229) + + def test_network_flow(self): + # A network flow problem with supply and demand at nodes + # and with costs along directed edges. + # https://www.princeton.edu/~rvdb/542/lectures/lec10.pdf + c = [2, 4, 9, 11, 4, 3, 8, 7, 0, 15, 16, 18] + n, p = -1, 1 + A_eq = [ + [n, n, p, 0, p, 0, 0, 0, 0, p, 0, 0], + [p, 0, 0, p, 0, p, 0, 0, 0, 0, 0, 0], + [0, 0, n, n, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, p, p, 0, 0, p, 0], + [0, 0, 0, 0, n, n, n, 0, p, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, n, n, 0, 0, p], + [0, 0, 0, 0, 0, 0, 0, 0, 0, n, n, n]] + b_eq = [0, 19, -16, 33, 0, 0, -36] + res = linprog(c=c, A_eq=A_eq, b_eq=b_eq, + method=self.method, options=self.options) + _assert_success(res, desired_fun=755, atol=1e-6, rtol=1e-7) + + def test_network_flow_limited_capacity(self): + # A network flow problem with supply and demand at nodes + # and with costs and capacities along directed edges. + # http://blog.sommer-forst.de/2013/04/10/ + cost = [2, 2, 1, 3, 1] + bounds = [ + [0, 4], + [0, 2], + [0, 2], + [0, 3], + [0, 5]] + n, p = -1, 1 + A_eq = [ + [n, n, 0, 0, 0], + [p, 0, n, n, 0], + [0, p, p, 0, n], + [0, 0, 0, p, p]] + b_eq = [-4, 0, 0, 4] + + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "scipy.linalg.solve\nIll...") + sup.filter(OptimizeWarning, "A_eq does not appear...") + sup.filter(OptimizeWarning, "Solving system with option...") + res = linprog(c=cost, A_eq=A_eq, b_eq=b_eq, bounds=bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=14) + + def test_simplex_algorithm_wikipedia_example(self): + # https://en.wikipedia.org/wiki/Simplex_algorithm#Example + Z = [-2, -3, -4] + A_ub = [ + [3, 2, 1], + [2, 5, 3]] + b_ub = [10, 15] + res = linprog(c=Z, A_ub=A_ub, b_ub=b_ub, + method=self.method, options=self.options) + _assert_success(res, desired_fun=-20) + + def test_enzo_example(self): + # https://github.com/scipy/scipy/issues/1779 lp2.py + # + # Translated from Octave code at: + # http://www.ecs.shimane-u.ac.jp/~kyoshida/lpeng.htm + # and placed under MIT licence by Enzo Michelangeli + # with permission explicitly granted by the original author, + # Prof. Kazunobu Yoshida + c = [4, 8, 3, 0, 0, 0] + A_eq = [ + [2, 5, 3, -1, 0, 0], + [3, 2.5, 8, 0, -1, 0], + [8, 10, 4, 0, 0, -1]] + b_eq = [185, 155, 600] + res = linprog(c=c, A_eq=A_eq, b_eq=b_eq, + method=self.method, options=self.options) + _assert_success(res, desired_fun=317.5, + desired_x=[66.25, 0, 17.5, 0, 183.75, 0], + atol=6e-6, rtol=1e-7) + + def test_enzo_example_b(self): + # rescued from https://github.com/scipy/scipy/pull/218 + c = [2.8, 6.3, 10.8, -2.8, -6.3, -10.8] + A_eq = [[-1, -1, -1, 0, 0, 0], + [0, 0, 0, 1, 1, 1], + [1, 0, 0, 1, 0, 0], + [0, 1, 0, 0, 1, 0], + [0, 0, 1, 0, 0, 1]] + b_eq = [-0.5, 0.4, 0.3, 0.3, 0.3] + + with suppress_warnings() as sup: + sup.filter(OptimizeWarning, "A_eq does not appear...") + res = linprog(c=c, A_eq=A_eq, b_eq=b_eq, + method=self.method, options=self.options) + _assert_success(res, desired_fun=-1.77, + desired_x=[0.3, 0.2, 0.0, 0.0, 0.1, 0.3]) + + def test_enzo_example_c_with_degeneracy(self): + # rescued from https://github.com/scipy/scipy/pull/218 + m = 20 + c = -np.ones(m) + tmp = 2 * np.pi * np.arange(1, m + 1) / (m + 1) + A_eq = np.vstack((np.cos(tmp) - 1, np.sin(tmp))) + b_eq = [0, 0] + res = linprog(c=c, A_eq=A_eq, b_eq=b_eq, + method=self.method, options=self.options) + _assert_success(res, desired_fun=0, desired_x=np.zeros(m)) + + def test_enzo_example_c_with_unboundedness(self): + # rescued from https://github.com/scipy/scipy/pull/218 + m = 50 + c = -np.ones(m) + tmp = 2 * np.pi * np.arange(m) / (m + 1) + A_eq = np.vstack((np.cos(tmp) - 1, np.sin(tmp))) + b_eq = [0, 0] + res = linprog(c=c, A_eq=A_eq, b_eq=b_eq, + method=self.method, options=self.options) + _assert_unbounded(res) + + def test_enzo_example_c_with_infeasibility(self): + # rescued from https://github.com/scipy/scipy/pull/218 + m = 50 + c = -np.ones(m) + tmp = 2 * np.pi * np.arange(m) / (m + 1) + A_eq = np.vstack((np.cos(tmp) - 1, np.sin(tmp))) + b_eq = [1, 1] + o = {key: self.options[key] for key in self.options} + o["presolve"] = False + + res = linprog(c=c, A_eq=A_eq, b_eq=b_eq, method=self.method, + options=o) + _assert_infeasible(res) + + def test_unknown_options(self): + c = np.array([-3, -2]) + A_ub = [[2, 1], [1, 1], [1, 0]] + b_ub = [10, 8, 4] + + def f(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None, + options={}): + linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, + options=options) + + o = {key: self.options[key] for key in self.options} + o['spam'] = 42 + _assert_warns(OptimizeWarning, f, + c, A_ub=A_ub, b_ub=b_ub, options=o) + + def test_no_constraints(self): + res = linprog([-1, -2], method=self.method, options=self.options) + _assert_unbounded(res) + + def test_simple_bounds(self): + res = linprog([1, 2], bounds=(1, 2), + method=self.method, options=self.options) + _assert_success(res, desired_x=[1, 1]) + res = linprog([1, 2], bounds=[(1, 2), (1, 2)], + method=self.method, options=self.options) + _assert_success(res, desired_x=[1, 1]) + + def test_invalid_inputs(self): + + def f(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None): + linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, + method=self.method, options=self.options) + + for bad_bound in [[(5, 0), (1, 2), (3, 4)], + [(1, 2), (3, 4)], + [(1, 2), (3, 4), (3, 4, 5)], + [(1, 2), (np.inf, np.inf), (3, 4)], + [(1, 2), (-np.inf, -np.inf), (3, 4)], + ]: + assert_raises(ValueError, f, [1, 2, 3], bounds=bad_bound) + + assert_raises(ValueError, f, [1, 2], A_ub=[[1, 2]], b_ub=[1, 2]) + assert_raises(ValueError, f, [1, 2], A_ub=[[1]], b_ub=[1]) + assert_raises(ValueError, f, [1, 2], A_eq=[[1, 2]], b_eq=[1, 2]) + assert_raises(ValueError, f, [1, 2], A_eq=[[1]], b_eq=[1]) + assert_raises(ValueError, f, [1, 2], A_eq=[1], b_eq=1) + + if ("_sparse_presolve" in self.options and + self.options["_sparse_presolve"]): + return + # this test doesn't make sense for sparse presolve + # there aren't 3D sparse matrices + assert_raises(ValueError, f, [1, 2], A_ub=np.zeros((1, 1, 3)), b_eq=1) + + def test_basic_artificial_vars(self): + # Test if linprog succeeds when at the end of Phase 1 some artificial + # variables remain basic, and the row in T corresponding to the + # artificial variables is not all zero. + c = np.array([-0.1, -0.07, 0.004, 0.004, 0.004, 0.004]) + A_ub = np.array([[1.0, 0, 0, 0, 0, 0], [-1.0, 0, 0, 0, 0, 0], + [0, -1.0, 0, 0, 0, 0], [0, 1.0, 0, 0, 0, 0], + [1.0, 1.0, 0, 0, 0, 0]]) + b_ub = np.array([3.0, 3.0, 3.0, 3.0, 20.0]) + A_eq = np.array([[1.0, 0, -1, 1, -1, 1], [0, -1.0, -1, 1, -1, 1]]) + b_eq = np.array([0, 0]) + res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, + method=self.method, options=self.options) + _assert_success(res, desired_fun=0, desired_x=np.zeros_like(c), + atol=2e-6) + + def test_empty_constraint_2(self): + res = linprog([1, -1, 1, -1], + bounds=[(0, np.inf), (-np.inf, 0), (-1, 1), (-1, 1)], + method=self.method, options=self.options) + _assert_success(res, desired_x=[0, 0, -1, 1], desired_fun=-2) + + def test_zero_row_2(self): + A_eq = [[0, 0, 0], [1, 1, 1], [0, 0, 0]] + b_eq = [0, 3, 0] + c = [1, 2, 3] + res = linprog(c=c, A_eq=A_eq, b_eq=b_eq, + method=self.method, options=self.options) + _assert_success(res, desired_fun=3) + + def test_zero_row_4(self): + A_ub = [[0, 0, 0], [1, 1, 1], [0, 0, 0]] + b_ub = [0, 3, 0] + c = [1, 2, 3] + res = linprog(c=c, A_ub=A_ub, b_ub=b_ub, + method=self.method, options=self.options) + _assert_success(res, desired_fun=0) + + def test_zero_column_1(self): + m, n = 3, 4 + np.random.seed(0) + c = np.random.rand(n) + c[1] = 1 + A_eq = np.random.rand(m, n) + A_eq[:, 1] = 0 + b_eq = np.random.rand(m) + A_ub = [[1, 0, 1, 1]] + b_ub = 3 + res = linprog(c, A_ub, b_ub, A_eq, b_eq, + bounds=[(-10, 10), (-10, 10), + (-10, None), (None, None)], + method=self.method, options=self.options) + _assert_success(res, desired_fun=-9.7087836730413404) + + def test_singleton_row_eq_2(self): + c = [1, 1, 1, 2] + A_eq = [[1, 0, 0, 0], [0, 2, 0, 0], [1, 0, 0, 0], [1, 1, 1, 1]] + b_eq = [1, 2, 1, 4] + res = linprog(c, A_eq=A_eq, b_eq=b_eq, + method=self.method, options=self.options) + _assert_success(res, desired_fun=4) + + def test_singleton_row_ub_2(self): + c = [1, 1, 1, 2] + A_ub = [[1, 0, 0, 0], [0, 2, 0, 0], [-1, 0, 0, 0], [1, 1, 1, 1]] + b_ub = [1, 2, -0.5, 4] + res = linprog(c, A_ub=A_ub, b_ub=b_ub, + bounds=[(None, None), (0, None), (0, None), (0, None)], + method=self.method, options=self.options) + _assert_success(res, desired_fun=0.5) + + def test_remove_redundancy_infeasibility(self): + m, n = 10, 10 + c = np.random.rand(n) + A0 = np.random.rand(m, n) + b0 = np.random.rand(m) + A0[-1, :] = 2 * A0[-2, :] + b0[-1] *= -1 + with suppress_warnings() as sup: + sup.filter(OptimizeWarning, "A_eq does not appear...") + res = linprog(c, A_eq=A0, b_eq=b0, + method=self.method, options=self.options) + _assert_infeasible(res) + + def test_bounded_below_only(self): + A = np.eye(3) + b = np.array([1, 2, 3]) + c = np.ones(3) + res = linprog(c, A_eq=A, b_eq=b, bounds=(0.5, np.inf), + method=self.method, options=self.options) + _assert_success(res, desired_x=b, desired_fun=np.sum(b)) + + def test_bounded_above_only(self): + A = np.eye(3) + b = np.array([1, 2, 3]) + c = np.ones(3) + res = linprog(c, A_eq=A, b_eq=b, bounds=(-np.inf, 4), + method=self.method, options=self.options) + _assert_success(res, desired_x=b, desired_fun=np.sum(b)) + + def test_unbounded_below_and_above(self): + A = np.eye(3) + b = np.array([1, 2, 3]) + c = np.ones(3) + res = linprog(c, A_eq=A, b_eq=b, bounds=(-np.inf, np.inf), + method=self.method, options=self.options) + _assert_success(res, desired_x=b, desired_fun=np.sum(b)) + + def test_bounds_equal_but_infeasible(self): + c = [-4, 1] + A_ub = [[7, -2], [0, 1], [2, -2]] + b_ub = [14, 0, 3] + bounds = [(2, 2), (0, None)] + res = linprog(c=c, A_ub=A_ub, b_ub=b_ub, bounds=bounds, + method=self.method, options=self.options) + _assert_infeasible(res) + + def test_bounds_equal_but_infeasible2(self): + c = [-4, 1] + A_eq = [[7, -2], [0, 1], [2, -2]] + b_eq = [14, 0, 3] + bounds = [(2, 2), (0, None)] + res = linprog(c=c, A_eq=A_eq, b_eq=b_eq, bounds=bounds, + method=self.method, options=self.options) + _assert_infeasible(res) + + def test_empty_constraint_1(self): + res = linprog([-1, 1, -1, 1], + bounds=[(0, np.inf), (-np.inf, 0), (-1, 1), (-1, 1)], + method=self.method, options=self.options) + _assert_unbounded(res) + # Infeasibility detected in presolve requiring no iterations + # if presolve is not used nit > 0 is expected. + n = 0 if self.options.get('presolve', True) else 2 + assert_equal(res.nit, n) + + def test_singleton_row_eq_1(self): + c = [1, 1, 1, 2] + A_eq = [[1, 0, 0, 0], [0, 2, 0, 0], [1, 0, 0, 0], [1, 1, 1, 1]] + b_eq = [1, 2, 2, 4] + res = linprog(c, A_eq=A_eq, b_eq=b_eq, + method=self.method, options=self.options) + _assert_infeasible(res) + # Infeasibility detected in presolve requiring no iterations + # if presolve is not used nit > 0 is expected. + n = 0 if self.options.get('presolve', True) else 3 + assert_equal(res.nit, n) + + def test_singleton_row_ub_1(self): + c = [1, 1, 1, 2] + A_ub = [[1, 0, 0, 0], [0, 2, 0, 0], [-1, 0, 0, 0], [1, 1, 1, 1]] + b_ub = [1, 2, -2, 4] + res = linprog(c, A_ub=A_ub, b_ub=b_ub, + bounds=[(None, None), (0, None), (0, None), (0, None)], + method=self.method, options=self.options) + _assert_infeasible(res) + + # Infeasibility detected in presolve requiring no iterations + # if presolve is not used nit > 0 is expected. + n = 0 if self.options.get('presolve', True) else 3 + assert_equal(res.nit, n) + + def test_zero_column_2(self): + np.random.seed(0) + m, n = 2, 4 + c = np.random.rand(n) + c[1] = -1 + A_eq = np.random.rand(m, n) + A_eq[:, 1] = 0 + b_eq = np.random.rand(m) + + A_ub = np.random.rand(m, n) + A_ub[:, 1] = 0 + b_ub = np.random.rand(m) + res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds=(None, None), + method=self.method, options=self.options) + _assert_unbounded(res) + # Infeasibility detected in presolve requiring no iterations + # if presolve is not used nit > 0 is expected. + n = 0 if self.options.get('presolve', True) else 5 + assert_equal(res.nit, n) + + def test_zero_row_1(self): + m, n = 2, 4 + c = np.random.rand(n) + A_eq = np.random.rand(m, n) + A_eq[0, :] = 0 + b_eq = np.random.rand(m) + res = linprog(c=c, A_eq=A_eq, b_eq=b_eq, + method=self.method, options=self.options) + _assert_infeasible(res) + + # Infeasibility detected in presolve requiring no iterations + # if presolve is not used nit > 0 is expected. + n = 0 if self.options.get('presolve', True) else 1 + assert_equal(res.nit, n) + + def test_zero_row_3(self): + # detected in presolve? + m, n = 2, 4 + c = np.random.rand(n) + A_ub = np.random.rand(m, n) + A_ub[0, :] = 0 + b_ub = -np.random.rand(m) + res = linprog(c=c, A_ub=A_ub, b_ub=b_ub, + method=self.method, options=self.options) + _assert_infeasible(res) + assert_equal(res.nit, 0) + + def test_infeasible_ub(self): + c = [1] + A_ub = [[2]] + b_ub = 4 + bounds = (5, 6) + res = linprog(c=c, A_ub=A_ub, b_ub=b_ub, bounds=bounds, + method=self.method, options=self.options) + + _assert_infeasible(res) + # Infeasibility detected in presolve requiring no iterations + # if presolve is not used nit > 0 is expected. + n = 0 if self.options.get('presolve', True) else 1 + assert_equal(res.nit, n) + + def test_type_error(self): + c = [1] + A_eq = [[1]] + b_eq = "hello" + assert_raises(TypeError, linprog, + c, A_eq=A_eq, b_eq=b_eq, + method=self.method, options=self.options) + + def test_equal_bounds_no_presolve(self): + # There was a bug when a lower and upper bound were equal but + # presolve was not on to eliminate the variable. The bound + # was being converted to an equality constraint, but the bound + # was not eliminated, leading to issues in postprocessing. + c = [1, 2] + A_ub = [[1, 2], [1.1, 2.2]] + b_ub = [4, 8] + bounds = [(1, 2), (2, 2)] + o = {key: self.options[key] for key in self.options} + o["presolve"] = False + res = linprog(c=c, A_ub=A_ub, b_ub=b_ub, bounds=bounds, + method=self.method, options=o) + _assert_infeasible(res) + + def test_unbounded_below_no_presolve_corrected(self): + c = [1] + bounds = [(None, 1)] + o = {key: self.options[key] for key in self.options} + o["presolve"] = False + res = linprog(c=c, bounds=bounds, + method=self.method, + options=o) + _assert_unbounded(res) + + def test_unbounded_no_nontrivial_constraints_1(self): + """ + Test whether presolve pathway for detecting unboundedness after + constraint elimination is working. + """ + c = np.array([0, 0, 0, 1, -1, -1]) + A = np.array([[1, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, -1]]) + b = np.array([2, -2, 0]) + bounds = [(None, None), (None, None), (None, None), + (-1, 1), (-1, 1), (0, None)] + res = linprog(c, A, b, None, None, bounds, method=self.method, + options=self.options) + _assert_unbounded(res) + assert_equal(res.x[-1], np.inf) + assert_equal(res.message[:36], "The problem is (trivially) unbounded") + + def test_unbounded_no_nontrivial_constraints_2(self): + """ + Test whether presolve pathway for detecting unboundedness after + constraint elimination is working. + """ + c = np.array([0, 0, 0, 1, -1, 1]) + A = np.array([[1, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1]]) + b = np.array([2, -2, 0]) + bounds = [(None, None), (None, None), (None, None), + (-1, 1), (-1, 1), (None, 0)] + res = linprog(c, A, b, None, None, bounds, method=self.method, + options=self.options) + _assert_unbounded(res) + assert_equal(res.x[-1], -np.inf) + assert_equal(res.message[:36], "The problem is (trivially) unbounded") + + def test_bug_5400(self): + # https://github.com/scipy/scipy/issues/5400 + bounds = [ + (0, None), + (0, 100), (0, 100), (0, 100), (0, 100), (0, 100), (0, 100), + (0, 900), (0, 900), (0, 900), (0, 900), (0, 900), (0, 900), + (0, None), (0, None), (0, None), (0, None), (0, None), (0, None)] + + f = 1 / 9 + g = -1e4 + h = -3.1 + A_ub = np.array([ + [1, -2.99, 0, 0, -3, 0, 0, 0, -1, -1, 0, -1, -1, 1, 1, 0, 0, 0, 0], + [1, 0, -2.9, h, 0, -3, 0, -1, 0, 0, -1, 0, -1, 0, 0, 1, 1, 0, 0], + [1, 0, 0, h, 0, 0, -3, -1, -1, 0, -1, -1, 0, 0, 0, 0, 0, 1, 1], + [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1], + [0, 1.99, -1, -1, 0, 0, 0, -1, f, f, 0, 0, 0, g, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 2, -1, -1, 0, 0, 0, -1, f, f, 0, g, 0, 0, 0, 0], + [0, -1, 1.9, 2.1, 0, 0, 0, f, -1, -1, 0, 0, 0, 0, 0, g, 0, 0, 0], + [0, 0, 0, 0, -1, 2, -1, 0, 0, 0, f, -1, f, 0, 0, 0, g, 0, 0], + [0, -1, -1, 2.1, 0, 0, 0, f, f, -1, 0, 0, 0, 0, 0, 0, 0, g, 0], + [0, 0, 0, 0, -1, -1, 2, 0, 0, 0, f, f, -1, 0, 0, 0, 0, 0, g]]) + + b_ub = np.array([ + 0.0, 0, 0, 100, 100, 100, 100, 100, 100, 900, 900, 900, 900, 900, + 900, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) + + c = np.array([-1.0, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]) + + if self.method == 'simplex': + with pytest.warns(OptimizeWarning): + res = linprog(c, A_ub, b_ub, bounds=bounds, + method=self.method, options=self.options) + else: + res = linprog(c, A_ub, b_ub, bounds=bounds, + method=self.method, options=self.options) + _assert_success(res, desired_fun=-106.63507541835018) + + def test_issue_6139(self): + # Linprog(method='simplex') fails to find a basic feasible solution + # if phase 1 pseudo-objective function is outside the provided tol. + # https://github.com/scipy/scipy/issues/6139 + + # Note: This is not strictly a bug as the default tolerance determines + # if a result is "close enough" to zero and should not be expected + # to work for all cases. + + c = np.array([1, 1, 1]) + A_eq = np.array([[1., 0., 0.], [-1000., 0., - 1000.]]) + b_eq = np.array([5.00000000e+00, -1.00000000e+04]) + A_ub = -np.array([[0., 1000000., 1010000.]]) + b_ub = -np.array([10000000.]) + bounds = (None, None) + + res = linprog( + c, A_ub, b_ub, A_eq, b_eq, method=self.method, + bounds=bounds, options=self.options + ) + + _assert_success( + res, desired_fun=14.95, desired_x=np.array([5, 4.95, 5]) + ) + + def test_bug_6690(self): + # SciPy violates bound constraint despite result status being success + # when the simplex method is used. + # https://github.com/scipy/scipy/issues/6690 + + A_eq = np.array([[0, 0, 0, 0.93, 0, 0.65, 0, 0, 0.83, 0]]) + b_eq = np.array([0.9626]) + A_ub = np.array([ + [0, 0, 0, 1.18, 0, 0, 0, -0.2, 0, -0.22], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0.43, 0, 0, 0, 0, 0, 0], + [0, -1.22, -0.25, 0, 0, 0, -2.06, 0, 0, 1.37], + [0, 0, 0, 0, 0, 0, 0, -0.25, 0, 0] + ]) + b_ub = np.array([0.615, 0, 0.172, -0.869, -0.022]) + bounds = np.array([ + [-0.84, -0.97, 0.34, 0.4, -0.33, -0.74, 0.47, 0.09, -1.45, -0.73], + [0.37, 0.02, 2.86, 0.86, 1.18, 0.5, 1.76, 0.17, 0.32, -0.15] + ]).T + c = np.array([ + -1.64, 0.7, 1.8, -1.06, -1.16, 0.26, 2.13, 1.53, 0.66, 0.28 + ]) + + with suppress_warnings() as sup: + sup.filter(OptimizeWarning, "Solving system with option 'sym_pos'") + res = linprog( + c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, + bounds=bounds, method=self.method, options=self.options + ) + + desired_fun = -1.19099999999 + desired_x = np.array([ + 0.3700, -0.9700, 0.3400, 0.4000, 1.1800, + 0.5000, 0.4700, 0.0900, 0.3200, -0.7300 + ]) + _assert_success( + res, + desired_fun=desired_fun, + desired_x=desired_x + ) + + # Add small tol value to ensure arrays are less than or equal. + atol = 1e-6 + assert_array_less(bounds[:, 0] - atol, res.x) + assert_array_less(res.x, bounds[:, 1] + atol) + + def test_bug_7044(self): + # linprog fails to identify correct constraints with simplex method + # leading to a non-optimal solution if A is rank-deficient. + # https://github.com/scipy/scipy/issues/7044 + + A, b, c, N = magic_square(3) + with suppress_warnings() as sup: + sup.filter(OptimizeWarning, "A_eq does not appear...") + res = linprog(c, A_eq=A, b_eq=b, + method=self.method, options=self.options) + + desired_fun = 1.730550597 + _assert_success(res, desired_fun=desired_fun) + assert_allclose(A.dot(res.x), b) + assert_array_less(np.zeros(res.x.size) - 1e-5, res.x) + + def test_issue_7237(self): + # https://github.com/scipy/scipy/issues/7237 + # The simplex method sometimes "explodes" if the pivot value is very + # close to zero. + + c = np.array([-1, 0, 0, 0, 0, 0, 0, 0, 0]) + A_ub = np.array([ + [1., -724., 911., -551., -555., -896., 478., -80., -293.], + [1., 566., 42., 937.,233., 883., 392., -909., 57.], + [1., -208., -894., 539., 321., 532., -924., 942., 55.], + [1., 857., -859., 83., 462., -265., -971., 826., 482.], + [1., 314., -424., 245., -424., 194., -443., -104., -429.], + [1., 540., 679., 361., 149., -827., 876., 633., 302.], + [0., -1., -0., -0., -0., -0., -0., -0., -0.], + [0., -0., -1., -0., -0., -0., -0., -0., -0.], + [0., -0., -0., -1., -0., -0., -0., -0., -0.], + [0., -0., -0., -0., -1., -0., -0., -0., -0.], + [0., -0., -0., -0., -0., -1., -0., -0., -0.], + [0., -0., -0., -0., -0., -0., -1., -0., -0.], + [0., -0., -0., -0., -0., -0., -0., -1., -0.], + [0., -0., -0., -0., -0., -0., -0., -0., -1.], + [0., 1., 0., 0., 0., 0., 0., 0., 0.], + [0., 0., 1., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 1., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 1., 0., 0., 0., 0.], + [0., 0., 0., 0., 0., 1., 0., 0., 0.], + [0., 0., 0., 0., 0., 0., 1., 0., 0.], + [0., 0., 0., 0., 0., 0., 0., 1., 0.], + [0., 0., 0., 0., 0., 0., 0., 0., 1.] + ]) + b_ub = np.array([ + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1.]) + A_eq = np.array([[0., 1., 1., 1., 1., 1., 1., 1., 1.]]) + b_eq = np.array([[1.]]) + bounds = [(None, None)] * 9 + + res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, + bounds=bounds, method=self.method, options=self.options) + _assert_success(res, desired_fun=108.568535, atol=1e-6) + + def test_issue_8174(self): + # https://github.com/scipy/scipy/issues/8174 + # The simplex method sometimes "explodes" if the pivot value is very + # close to zero. + A_ub = np.array([ + [22714, 1008, 13380, -2713.5, -1116], + [-4986, -1092, -31220, 17386.5, 684], + [-4986, 0, 0, -2713.5, 0], + [22714, 0, 0, 17386.5, 0]]) + b_ub = np.zeros(A_ub.shape[0]) + c = -np.ones(A_ub.shape[1]) + bounds = [(0,1)] * A_ub.shape[1] + + res = linprog(c=c, A_ub=A_ub, b_ub=b_ub, bounds=bounds, + options=self.options, method=self.method) + + def test_issue_8174_stackoverflow(self): + # Test supplementary example from issue 8174. + # https://github.com/scipy/scipy/issues/8174 + # https://stackoverflow.com/questions/47717012/linprog-in-scipy-optimize-checking-solution + c = np.array([1, 0, 0, 0, 0, 0, 0]) + A_ub = -np.identity(7) + b_ub = np.array([[-2],[-2],[-2],[-2],[-2],[-2],[-2]]) + A_eq = np.array([ + [1, 1, 1, 1, 1, 1, 0], + [0.3, 1.3, 0.9, 0, 0, 0, -1], + [0.3, 0, 0, 0, 0, 0, -2/3], + [0, 0.65, 0, 0, 0, 0, -1/15], + [0, 0, 0.3, 0, 0, 0, -1/15] + ]) + b_eq = np.array([[100],[0],[0],[0],[0]]) + + with pytest.warns(OptimizeWarning): + res = linprog( + c=c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, + method=self.method, options=self.options + ) + _assert_success(res, desired_fun=43.3333333331385) + + def test_bug_8662(self): + # scipy.linprog returns incorrect optimal result for constraints using + # default bounds, but, correct if boundary condition as constraint. + # https://github.com/scipy/scipy/issues/8662 + c = [-10, 10, 6, 3] + A = [ + [8, -8, -4, 6], + [-8, 8, 4, -6], + [-4, 4, 8, -4], + [3, -3, -3, -10] + ] + b = [9, -9, -9, -4] + bounds = [(0, None), (0, None), (0, None), (0, None)] + desired_fun = 36.0000000000 + + res1 = linprog(c, A, b, bounds=bounds, + method=self.method, options=self.options) + + # Set boundary condition as a constraint + A.append([0, 0, -1, 0]) + b.append(0) + bounds[2] = (None, None) + + res2 = linprog(c, A, b, bounds=bounds, method=self.method, + options=self.options) + rtol = 1e-5 + _assert_success(res1, desired_fun=desired_fun, rtol=rtol) + _assert_success(res2, desired_fun=desired_fun, rtol=rtol) + + def test_bug_8663(self): + A = [[0, -7]] + b = [-6] + c = [1, 5] + bounds = [(0, None), (None, None)] + res = linprog(c, A_eq=A, b_eq=b, bounds=bounds, + method=self.method, options=self.options) + _assert_success(res, + desired_x=[0, 6./7], + desired_fun=5*6./7) + + def test_bug_8664(self): + # Weak test. Ideally should _detect infeasibility_ for all options. + c = [4] + A_ub = [[2], [5]] + b_ub = [4, 4] + A_eq = [[0], [-8], [9]] + b_eq = [3, 2, 10] + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + sup.filter(OptimizeWarning, "Solving system with option...") + o = {key: self.options[key] for key in self.options} + o["presolve"] = False + res = linprog(c, A_ub, b_ub, A_eq, b_eq, options=o, + method=self.method) + assert_(not res.success, "incorrectly reported success") + + def test_bug_8973(self): + """ + Test whether bug described at: + https://github.com/scipy/scipy/issues/8973 + was fixed. + """ + c = np.array([0, 0, 0, 1, -1]) + A = np.array([[1, 0, 0, 0, 0], [0, 1, 0, 0, 0]]) + b = np.array([2, -2]) + bounds = [(None, None), (None, None), (None, None), (-1, 1), (-1, 1)] + res = linprog(c, A, b, None, None, bounds, method=self.method, + options=self.options) + _assert_success(res, + desired_x=[2, -2, 0, -1, 1], + desired_fun=-2) + + def test_bug_8973_2(self): + """ + Additional test for: + https://github.com/scipy/scipy/issues/8973 + suggested in + https://github.com/scipy/scipy/pull/8985 + review by @antonior92 + """ + c = np.zeros(1) + A = np.array([[1]]) + b = np.array([-2]) + bounds = (None, None) + res = linprog(c, A, b, None, None, bounds, method=self.method, + options=self.options) + _assert_success(res) # would not pass if solution is infeasible + + +class BaseTestLinprogSimplex(LinprogCommonTests): + method = "simplex" + + +class TestLinprogSimplexCommon(BaseTestLinprogSimplex): + options = {} + + def test_callback(self): + # Check that callback is as advertised + last_cb = {} + + def cb(res): + message = res.pop('message') + complete = res.pop('complete') + + assert_(res.pop('phase') in (1, 2)) + assert_(res.pop('status') in range(4)) + assert_(isinstance(res.pop('nit'), int)) + assert_(isinstance(complete, bool)) + assert_(isinstance(message, str)) + + if complete: + last_cb['x'] = res['x'] + last_cb['fun'] = res['fun'] + last_cb['slack'] = res['slack'] + last_cb['con'] = res['con'] + + c = np.array([-3, -2]) + A_ub = [[2, 1], [1, 1], [1, 0]] + b_ub = [10, 8, 4] + res = linprog(c, A_ub=A_ub, b_ub=b_ub, callback=cb, method=self.method) + + _assert_success(res, desired_fun=-18.0, desired_x=[2, 6]) + assert_allclose(last_cb['fun'], res['fun']) + assert_allclose(last_cb['x'], res['x']) + assert_allclose(last_cb['con'], res['con']) + assert_allclose(last_cb['slack'], res['slack']) + + def test_issue_7237(self): + with pytest.raises(ValueError): + super(TestLinprogSimplexCommon, self).test_issue_7237() + + def test_issue_8174(self): + with pytest.warns(OptimizeWarning): + super(TestLinprogSimplexCommon, self).test_issue_8174() + + +class TestLinprogSimplexBland(BaseTestLinprogSimplex): + options = {'bland': True} + + def test_bug_5400(self): + with pytest.raises(ValueError): + super(TestLinprogSimplexBland, self).test_bug_5400() + + def test_issue_8174(self): + with pytest.warns(OptimizeWarning): + super(TestLinprogSimplexBland, self).test_issue_8174() + + +class TestLinprogSimplexNoPresolve(BaseTestLinprogSimplex): + options = {'presolve': False} + + def test_issue_6139(self): + # Linprog(method='simplex') fails to find a basic feasible solution + # if phase 1 pseudo-objective function is outside the provided tol. + # https://github.com/scipy/scipy/issues/6139 + # Without ``presolve`` eliminating such rows the result is incorrect. + with pytest.raises(ValueError): + return super(TestLinprogSimplexNoPresolve, self).test_issue_6139() + + def test_issue_7237(self): + with pytest.raises(ValueError): + super(TestLinprogSimplexNoPresolve, self).test_issue_7237() + + def test_issue_8174(self): + with pytest.warns(OptimizeWarning): + super(TestLinprogSimplexNoPresolve, self).test_issue_8174() + + def test_issue_8174_stackoverflow(self): + # Test expects linprog to raise a warning during presolve. + # As ``'presolve'=False`` no warning should be raised. + # Despite not presolving the result is still correct. + with pytest.warns(OptimizeWarning) as redundant_warning: + super(TestLinprogSimplexNoPresolve, self).test_issue_8174() + + def test_unbounded_no_nontrivial_constraints_1(self): + pytest.skip("Tests behavior specific to presolve") + + def test_unbounded_no_nontrivial_constraints_2(self): + pytest.skip("Tests behavior specific to presolve") + + +class BaseTestLinprogIP(LinprogCommonTests): + method = "interior-point" + + +class TestLinprogIPSpecific(object): + method = "interior-point" + # the following tests don't need to be performed separately for + # sparse presolve, sparse after presolve, and dense + + def test_unbounded_below_no_presolve_original(self): + # formerly caused segfault in TravisCI w/ "cholesky":True + c = [-1] + bounds = [(None, 1)] + res = linprog(c=c, bounds=bounds, + method=self.method, + options={"presolve": False, "cholesky": True}) + _assert_success(res, desired_fun=-1) + + def test_cholesky(self): + # Test with a rather large problem (400 variables, + # 40 constraints) generated by https://gist.github.com/denis-bz/8647461 + # use cholesky factorization and triangular solves + A, b, c = lpgen_2d(20, 20) + res = linprog(c, A_ub=A, b_ub=b, method=self.method, + options={"cholesky": True}) # only for dense + _assert_success(res, desired_fun=-64.049494229) + + def test_alternate_initial_point(self): + # Test with a rather large problem (400 variables, + # 40 constraints) generated by https://gist.github.com/denis-bz/8647461 + # use "improved" initial point + A, b, c = lpgen_2d(20, 20) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "scipy.linalg.solve\nIll...") + sup.filter(OptimizeWarning, "Solving system with option...") + res = linprog(c, A_ub=A, b_ub=b, method=self.method, + options={"ip": True, "disp": True}) + # ip code is independent of sparse/dense + _assert_success(res, desired_fun=-64.049494229) + + def test_maxiter(self): + # Test with a rather large problem (400 variables, + # 40 constraints) generated by https://gist.github.com/denis-bz/8647461 + # test iteration limit + A, b, c = lpgen_2d(20, 20) + maxiter = np.random.randint(6) + 1 # problem takes 7 iterations + res = linprog(c, A_ub=A, b_ub=b, method=self.method, + options={"maxiter": maxiter}) + # maxiter is independent of sparse/dense + assert_equal(res.status, 1) + assert_equal(res.nit, maxiter) + + def test_disp(self): + # Test with a rather large problem (400 variables, + # 40 constraints) generated by https://gist.github.com/denis-bz/8647461 + # test that display option does not break anything. + A, b, c = lpgen_2d(20, 20) + res = linprog(c, A_ub=A, b_ub=b, method=self.method, + options={"disp": True}) + # disp is independent of sparse/dense + _assert_success(res, desired_fun=-64.049494229) + + def test_callback(self): + def f(): + pass + + A = [[0, -7]] + b = [-6] + c = [1, 5] + bounds = [(0, None), (None, None)] + + # Linprog should solve in presolve. As the interior-point method is + # not used the the callback should never be needed and no error + # returned + res = linprog(c, A_eq=A, b_eq=b, bounds=bounds, + method=self.method, callback=f) + _assert_success(res, desired_x=[0, 6./7], desired_fun=5*6./7) + + # Without presolve the solver reverts to the interior-point method + # Interior-point currently does not implement callback functions. + with pytest.raises(NotImplementedError): + res = linprog(c, A_eq=A, b_eq=b, bounds=bounds, method=self.method, + callback=f, options={'presolve': False}) + + +class TestLinprogIPSparse(BaseTestLinprogIP): + options = {"sparse": True} + + @pytest.mark.xfail(reason='Fails with ATLAS, see gh-7877') + def test_bug_6690(self): + # Test defined in base class, but can't mark as xfail there + super(TestLinprogIPSparse, self).test_bug_6690() + + def test_magic_square_sparse_no_presolve(self): + # test linprog with a problem with a rank-deficient A_eq matrix + A, b, c, N = magic_square(3) + with suppress_warnings() as sup: + sup.filter(MatrixRankWarning, "Matrix is exactly singular") + sup.filter(OptimizeWarning, "Solving system with option...") + o = {key: self.options[key] for key in self.options} + o["presolve"] = False + res = linprog(c, A_eq=A, b_eq=b, bounds=(0, 1), + options=o, method=self.method) + _assert_success(res, desired_fun=1.730550597) + + def test_sparse_solve_options(self): + A, b, c, N = magic_square(3) + with suppress_warnings() as sup: + sup.filter(OptimizeWarning, "A_eq does not appear...") + sup.filter(OptimizeWarning, "Invalid permc_spec option") + o = {key: self.options[key] for key in self.options} + permc_specs = ('NATURAL', 'MMD_ATA', 'MMD_AT_PLUS_A', + 'COLAMD', 'ekki-ekki-ekki') + for permc_spec in permc_specs: + o["permc_spec"] = permc_spec + res = linprog(c, A_eq=A, b_eq=b, bounds=(0, 1), + method=self.method, options=o) + _assert_success(res, desired_fun=1.730550597) + + +class TestLinprogIPDense(BaseTestLinprogIP): + options = {"sparse": False} + + +class TestLinprogIPSparsePresolve(BaseTestLinprogIP): + options = {"sparse": True, "_sparse_presolve": True} + + def test_enzo_example_c_with_infeasibility(self): + pytest.skip('_sparse_presolve=True incompatible with presolve=False') + + @pytest.mark.xfail(reason='Fails with ATLAS, see gh-7877') + def test_bug_6690(self): + # Test defined in base class, but can't mark as xfail there + super(TestLinprogIPSparsePresolve, self).test_bug_6690() + + +def test_unknown_solver(): + c = np.array([-3, -2]) + A_ub = [[2, 1], [1, 1], [1, 0]] + b_ub = [10, 8, 4] + + assert_raises(ValueError, linprog, + c, A_ub=A_ub, b_ub=b_ub, method='ekki-ekki-ekki') diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_linprog.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_linprog.pyc new file mode 100644 index 0000000..235a7ea Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_linprog.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_lsq_common.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_lsq_common.py new file mode 100644 index 0000000..bda69c9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_lsq_common.py @@ -0,0 +1,275 @@ +from __future__ import division, absolute_import, print_function + +from numpy.testing import assert_, assert_allclose, assert_equal +from pytest import raises as assert_raises +import numpy as np + +from scipy.sparse.linalg import LinearOperator +from scipy.optimize._lsq.common import ( + step_size_to_bound, find_active_constraints, make_strictly_feasible, + CL_scaling_vector, intersect_trust_region, build_quadratic_1d, + minimize_quadratic_1d, evaluate_quadratic, reflective_transformation, + left_multiplied_operator, right_multiplied_operator) + + +class TestBounds(object): + def test_step_size_to_bounds(self): + lb = np.array([-1.0, 2.5, 10.0]) + ub = np.array([1.0, 5.0, 100.0]) + x = np.array([0.0, 2.5, 12.0]) + + s = np.array([0.1, 0.0, 0.0]) + step, hits = step_size_to_bound(x, s, lb, ub) + assert_equal(step, 10) + assert_equal(hits, [1, 0, 0]) + + s = np.array([0.01, 0.05, -1.0]) + step, hits = step_size_to_bound(x, s, lb, ub) + assert_equal(step, 2) + assert_equal(hits, [0, 0, -1]) + + s = np.array([10.0, -0.0001, 100.0]) + step, hits = step_size_to_bound(x, s, lb, ub) + assert_equal(step, np.array(-0)) + assert_equal(hits, [0, -1, 0]) + + s = np.array([1.0, 0.5, -2.0]) + step, hits = step_size_to_bound(x, s, lb, ub) + assert_equal(step, 1.0) + assert_equal(hits, [1, 0, -1]) + + s = np.zeros(3) + step, hits = step_size_to_bound(x, s, lb, ub) + assert_equal(step, np.inf) + assert_equal(hits, [0, 0, 0]) + + def test_find_active_constraints(self): + lb = np.array([0.0, -10.0, 1.0]) + ub = np.array([1.0, 0.0, 100.0]) + + x = np.array([0.5, -5.0, 2.0]) + active = find_active_constraints(x, lb, ub) + assert_equal(active, [0, 0, 0]) + + x = np.array([0.0, 0.0, 10.0]) + active = find_active_constraints(x, lb, ub) + assert_equal(active, [-1, 1, 0]) + + active = find_active_constraints(x, lb, ub, rtol=0) + assert_equal(active, [-1, 1, 0]) + + x = np.array([1e-9, -1e-8, 100 - 1e-9]) + active = find_active_constraints(x, lb, ub) + assert_equal(active, [0, 0, 1]) + + active = find_active_constraints(x, lb, ub, rtol=1.5e-9) + assert_equal(active, [-1, 0, 1]) + + lb = np.array([1.0, -np.inf, -np.inf]) + ub = np.array([np.inf, 10.0, np.inf]) + + x = np.ones(3) + active = find_active_constraints(x, lb, ub) + assert_equal(active, [-1, 0, 0]) + + # Handles out-of-bound cases. + x = np.array([0.0, 11.0, 0.0]) + active = find_active_constraints(x, lb, ub) + assert_equal(active, [-1, 1, 0]) + + active = find_active_constraints(x, lb, ub, rtol=0) + assert_equal(active, [-1, 1, 0]) + + def test_make_strictly_feasible(self): + lb = np.array([-0.5, -0.8, 2.0]) + ub = np.array([0.8, 1.0, 3.0]) + + x = np.array([-0.5, 0.0, 2 + 1e-10]) + + x_new = make_strictly_feasible(x, lb, ub, rstep=0) + assert_(x_new[0] > -0.5) + assert_equal(x_new[1:], x[1:]) + + x_new = make_strictly_feasible(x, lb, ub, rstep=1e-4) + assert_equal(x_new, [-0.5 + 1e-4, 0.0, 2 * (1 + 1e-4)]) + + x = np.array([-0.5, -1, 3.1]) + x_new = make_strictly_feasible(x, lb, ub) + assert_(np.all((x_new >= lb) & (x_new <= ub))) + + x_new = make_strictly_feasible(x, lb, ub, rstep=0) + assert_(np.all((x_new >= lb) & (x_new <= ub))) + + lb = np.array([-1, 100.0]) + ub = np.array([1, 100.0 + 1e-10]) + x = np.array([0, 100.0]) + x_new = make_strictly_feasible(x, lb, ub, rstep=1e-8) + assert_equal(x_new, [0, 100.0 + 0.5e-10]) + + def test_scaling_vector(self): + lb = np.array([-np.inf, -5.0, 1.0, -np.inf]) + ub = np.array([1.0, np.inf, 10.0, np.inf]) + x = np.array([0.5, 2.0, 5.0, 0.0]) + g = np.array([1.0, 0.1, -10.0, 0.0]) + v, dv = CL_scaling_vector(x, g, lb, ub) + assert_equal(v, [1.0, 7.0, 5.0, 1.0]) + assert_equal(dv, [0.0, 1.0, -1.0, 0.0]) + + +class TestQuadraticFunction(object): + def setup_method(self): + self.J = np.array([ + [0.1, 0.2], + [-1.0, 1.0], + [0.5, 0.2]]) + self.g = np.array([0.8, -2.0]) + self.diag = np.array([1.0, 2.0]) + + def test_build_quadratic_1d(self): + s = np.zeros(2) + a, b = build_quadratic_1d(self.J, self.g, s) + assert_equal(a, 0) + assert_equal(b, 0) + + a, b = build_quadratic_1d(self.J, self.g, s, diag=self.diag) + assert_equal(a, 0) + assert_equal(b, 0) + + s = np.array([1.0, -1.0]) + a, b = build_quadratic_1d(self.J, self.g, s) + assert_equal(a, 2.05) + assert_equal(b, 2.8) + + a, b = build_quadratic_1d(self.J, self.g, s, diag=self.diag) + assert_equal(a, 3.55) + assert_equal(b, 2.8) + + s0 = np.array([0.5, 0.5]) + a, b, c = build_quadratic_1d(self.J, self.g, s, diag=self.diag, s0=s0) + assert_equal(a, 3.55) + assert_allclose(b, 2.39) + assert_allclose(c, -0.1525) + + def test_minimize_quadratic_1d(self): + a = 5 + b = -1 + + t, y = minimize_quadratic_1d(a, b, 1, 2) + assert_equal(t, 1) + assert_equal(y, a * t**2 + b * t) + + t, y = minimize_quadratic_1d(a, b, -2, -1) + assert_equal(t, -1) + assert_equal(y, a * t**2 + b * t) + + t, y = minimize_quadratic_1d(a, b, -1, 1) + assert_equal(t, 0.1) + assert_equal(y, a * t**2 + b * t) + + c = 10 + t, y = minimize_quadratic_1d(a, b, -1, 1, c=c) + assert_equal(t, 0.1) + assert_equal(y, a * t**2 + b * t + c) + + def test_evaluate_quadratic(self): + s = np.array([1.0, -1.0]) + + value = evaluate_quadratic(self.J, self.g, s) + assert_equal(value, 4.85) + + value = evaluate_quadratic(self.J, self.g, s, diag=self.diag) + assert_equal(value, 6.35) + + s = np.array([[1.0, -1.0], + [1.0, 1.0], + [0.0, 0.0]]) + + values = evaluate_quadratic(self.J, self.g, s) + assert_allclose(values, [4.85, -0.91, 0.0]) + + values = evaluate_quadratic(self.J, self.g, s, diag=self.diag) + assert_allclose(values, [6.35, 0.59, 0.0]) + + +class TestTrustRegion(object): + def test_intersect(self): + Delta = 1.0 + + x = np.zeros(3) + s = np.array([1.0, 0.0, 0.0]) + t_neg, t_pos = intersect_trust_region(x, s, Delta) + assert_equal(t_neg, -1) + assert_equal(t_pos, 1) + + s = np.array([-1.0, 1.0, -1.0]) + t_neg, t_pos = intersect_trust_region(x, s, Delta) + assert_allclose(t_neg, -3**-0.5) + assert_allclose(t_pos, 3**-0.5) + + x = np.array([0.5, -0.5, 0]) + s = np.array([0, 0, 1.0]) + t_neg, t_pos = intersect_trust_region(x, s, Delta) + assert_allclose(t_neg, -2**-0.5) + assert_allclose(t_pos, 2**-0.5) + + x = np.ones(3) + assert_raises(ValueError, intersect_trust_region, x, s, Delta) + + x = np.zeros(3) + s = np.zeros(3) + assert_raises(ValueError, intersect_trust_region, x, s, Delta) + + +def test_reflective_transformation(): + lb = np.array([-1, -2], dtype=float) + ub = np.array([5, 3], dtype=float) + + y = np.array([0, 0]) + x, g = reflective_transformation(y, lb, ub) + assert_equal(x, y) + assert_equal(g, np.ones(2)) + + y = np.array([-4, 4], dtype=float) + + x, g = reflective_transformation(y, lb, np.array([np.inf, np.inf])) + assert_equal(x, [2, 4]) + assert_equal(g, [-1, 1]) + + x, g = reflective_transformation(y, np.array([-np.inf, -np.inf]), ub) + assert_equal(x, [-4, 2]) + assert_equal(g, [1, -1]) + + x, g = reflective_transformation(y, lb, ub) + assert_equal(x, [2, 2]) + assert_equal(g, [-1, -1]) + + lb = np.array([-np.inf, -2]) + ub = np.array([5, np.inf]) + y = np.array([10, 10], dtype=float) + x, g = reflective_transformation(y, lb, ub) + assert_equal(x, [0, 10]) + assert_equal(g, [-1, 1]) + + +def test_linear_operators(): + A = np.arange(6).reshape((3, 2)) + + d_left = np.array([-1, 2, 5]) + DA = np.diag(d_left).dot(A) + J_left = left_multiplied_operator(A, d_left) + + d_right = np.array([5, 10]) + AD = A.dot(np.diag(d_right)) + J_right = right_multiplied_operator(A, d_right) + + x = np.array([-2, 3]) + X = -2 * np.arange(2, 8).reshape((2, 3)) + xt = np.array([0, -2, 15]) + + assert_allclose(DA.dot(x), J_left.matvec(x)) + assert_allclose(DA.dot(X), J_left.matmat(X)) + assert_allclose(DA.T.dot(xt), J_left.rmatvec(xt)) + + assert_allclose(AD.dot(x), J_right.matvec(x)) + assert_allclose(AD.dot(X), J_right.matmat(X)) + assert_allclose(AD.T.dot(xt), J_right.rmatvec(xt)) diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_lsq_common.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_lsq_common.pyc new file mode 100644 index 0000000..0783014 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_lsq_common.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_lsq_linear.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_lsq_linear.py new file mode 100644 index 0000000..f093a44 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_lsq_linear.py @@ -0,0 +1,150 @@ +import numpy as np +from numpy.linalg import lstsq +from numpy.testing import assert_allclose, assert_equal, assert_ +from pytest import raises as assert_raises + +from scipy.sparse import rand +from scipy.sparse.linalg import aslinearoperator +from scipy.optimize import lsq_linear + + +A = np.array([ + [0.171, -0.057], + [-0.049, -0.248], + [-0.166, 0.054], +]) +b = np.array([0.074, 1.014, -0.383]) + + +class BaseMixin(object): + def setup_method(self): + self.rnd = np.random.RandomState(0) + + def test_dense_no_bounds(self): + for lsq_solver in self.lsq_solvers: + res = lsq_linear(A, b, method=self.method, lsq_solver=lsq_solver) + assert_allclose(res.x, lstsq(A, b, rcond=-1)[0]) + + def test_dense_bounds(self): + # Solutions for comparison are taken from MATLAB. + lb = np.array([-1, -10]) + ub = np.array([1, 0]) + for lsq_solver in self.lsq_solvers: + res = lsq_linear(A, b, (lb, ub), method=self.method, + lsq_solver=lsq_solver) + assert_allclose(res.x, lstsq(A, b, rcond=-1)[0]) + + lb = np.array([0.0, -np.inf]) + for lsq_solver in self.lsq_solvers: + res = lsq_linear(A, b, (lb, np.inf), method=self.method, + lsq_solver=lsq_solver) + assert_allclose(res.x, np.array([0.0, -4.084174437334673]), + atol=1e-6) + + lb = np.array([-1, 0]) + for lsq_solver in self.lsq_solvers: + res = lsq_linear(A, b, (lb, np.inf), method=self.method, + lsq_solver=lsq_solver) + assert_allclose(res.x, np.array([0.448427311733504, 0]), + atol=1e-15) + + ub = np.array([np.inf, -5]) + for lsq_solver in self.lsq_solvers: + res = lsq_linear(A, b, (-np.inf, ub), method=self.method, + lsq_solver=lsq_solver) + assert_allclose(res.x, np.array([-0.105560998682388, -5])) + + ub = np.array([-1, np.inf]) + for lsq_solver in self.lsq_solvers: + res = lsq_linear(A, b, (-np.inf, ub), method=self.method, + lsq_solver=lsq_solver) + assert_allclose(res.x, np.array([-1, -4.181102129483254])) + + lb = np.array([0, -4]) + ub = np.array([1, 0]) + for lsq_solver in self.lsq_solvers: + res = lsq_linear(A, b, (lb, ub), method=self.method, + lsq_solver=lsq_solver) + assert_allclose(res.x, np.array([0.005236663400791, -4])) + + def test_dense_rank_deficient(self): + A = np.array([[-0.307, -0.184]]) + b = np.array([0.773]) + lb = [-0.1, -0.1] + ub = [0.1, 0.1] + for lsq_solver in self.lsq_solvers: + res = lsq_linear(A, b, (lb, ub), method=self.method, + lsq_solver=lsq_solver) + assert_allclose(res.x, [-0.1, -0.1]) + + A = np.array([ + [0.334, 0.668], + [-0.516, -1.032], + [0.192, 0.384], + ]) + b = np.array([-1.436, 0.135, 0.909]) + lb = [0, -1] + ub = [1, -0.5] + for lsq_solver in self.lsq_solvers: + res = lsq_linear(A, b, (lb, ub), method=self.method, + lsq_solver=lsq_solver) + assert_allclose(res.optimality, 0, atol=1e-11) + + def test_full_result(self): + lb = np.array([0, -4]) + ub = np.array([1, 0]) + res = lsq_linear(A, b, (lb, ub), method=self.method) + + assert_allclose(res.x, [0.005236663400791, -4]) + + r = A.dot(res.x) - b + assert_allclose(res.cost, 0.5 * np.dot(r, r)) + assert_allclose(res.fun, r) + + assert_allclose(res.optimality, 0.0, atol=1e-12) + assert_equal(res.active_mask, [0, -1]) + assert_(res.nit < 15) + assert_(res.status == 1 or res.status == 3) + assert_(isinstance(res.message, str)) + assert_(res.success) + + +class SparseMixin(object): + def test_sparse_and_LinearOperator(self): + m = 5000 + n = 1000 + A = rand(m, n, random_state=0) + b = self.rnd.randn(m) + res = lsq_linear(A, b) + assert_allclose(res.optimality, 0, atol=1e-6) + + A = aslinearoperator(A) + res = lsq_linear(A, b) + assert_allclose(res.optimality, 0, atol=1e-6) + + def test_sparse_bounds(self): + m = 5000 + n = 1000 + A = rand(m, n, random_state=0) + b = self.rnd.randn(m) + lb = self.rnd.randn(n) + ub = lb + 1 + res = lsq_linear(A, b, (lb, ub)) + assert_allclose(res.optimality, 0.0, atol=1e-6) + + res = lsq_linear(A, b, (lb, ub), lsmr_tol=1e-13) + assert_allclose(res.optimality, 0.0, atol=1e-6) + + res = lsq_linear(A, b, (lb, ub), lsmr_tol='auto') + assert_allclose(res.optimality, 0.0, atol=1e-6) + + +class TestTRF(BaseMixin, SparseMixin): + method = 'trf' + lsq_solvers = ['exact', 'lsmr'] + + +class TestBVLS(BaseMixin): + method = 'bvls' + lsq_solvers = ['exact'] + diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_lsq_linear.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_lsq_linear.pyc new file mode 100644 index 0000000..a952fb9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_lsq_linear.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_minimize_constrained.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_minimize_constrained.py new file mode 100644 index 0000000..358bcc1 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_minimize_constrained.py @@ -0,0 +1,617 @@ +from __future__ import division, print_function, absolute_import +import numpy as np +import pytest +from scipy.linalg import block_diag +from scipy.sparse import csc_matrix +from numpy.testing import (TestCase, assert_array_almost_equal, + assert_array_less, assert_allclose, assert_) +from pytest import raises +from scipy.optimize import (NonlinearConstraint, + LinearConstraint, + Bounds, + minimize, + BFGS, + SR1) +from scipy._lib._numpy_compat import suppress_warnings + + +class Maratos: + """Problem 15.4 from Nocedal and Wright + + The following optimization problem: + minimize 2*(x[0]**2 + x[1]**2 - 1) - x[0] + Subject to: x[0]**2 + x[1]**2 - 1 = 0 + """ + + def __init__(self, degrees=60, constr_jac=None, constr_hess=None): + rads = degrees/180*np.pi + self.x0 = [np.cos(rads), np.sin(rads)] + self.x_opt = np.array([1.0, 0.0]) + self.constr_jac = constr_jac + self.constr_hess = constr_hess + self.bounds = None + + def fun(self, x): + return 2*(x[0]**2 + x[1]**2 - 1) - x[0] + + def grad(self, x): + return np.array([4*x[0]-1, 4*x[1]]) + + def hess(self, x): + return 4*np.eye(2) + + @property + def constr(self): + def fun(x): + return x[0]**2 + x[1]**2 + + if self.constr_jac is None: + def jac(x): + return [[2*x[0], 2*x[1]]] + else: + jac = self.constr_jac + + if self.constr_hess is None: + def hess(x, v): + return 2*v[0]*np.eye(2) + else: + hess = self.constr_hess + + return NonlinearConstraint(fun, 1, 1, jac, hess) + + +class MaratosTestArgs: + """Problem 15.4 from Nocedal and Wright + + The following optimization problem: + minimize 2*(x[0]**2 + x[1]**2 - 1) - x[0] + Subject to: x[0]**2 + x[1]**2 - 1 = 0 + """ + + def __init__(self, a, b, degrees=60, constr_jac=None, constr_hess=None): + rads = degrees/180*np.pi + self.x0 = [np.cos(rads), np.sin(rads)] + self.x_opt = np.array([1.0, 0.0]) + self.constr_jac = constr_jac + self.constr_hess = constr_hess + self.a = a + self.b = b + self.bounds = None + + def _test_args(self, a, b): + if self.a != a or self.b != b: + raise ValueError() + + def fun(self, x, a, b): + self._test_args(a, b) + return 2*(x[0]**2 + x[1]**2 - 1) - x[0] + + def grad(self, x, a, b): + self._test_args(a, b) + return np.array([4*x[0]-1, 4*x[1]]) + + def hess(self, x, a, b): + self._test_args(a, b) + return 4*np.eye(2) + + @property + def constr(self): + def fun(x): + return x[0]**2 + x[1]**2 + + if self.constr_jac is None: + def jac(x): + return [[4*x[0], 4*x[1]]] + else: + jac = self.constr_jac + + if self.constr_hess is None: + def hess(x, v): + return 2*v[0]*np.eye(2) + else: + hess = self.constr_hess + + return NonlinearConstraint(fun, 1, 1, jac, hess) + + +class MaratosGradInFunc: + """Problem 15.4 from Nocedal and Wright + + The following optimization problem: + minimize 2*(x[0]**2 + x[1]**2 - 1) - x[0] + Subject to: x[0]**2 + x[1]**2 - 1 = 0 + """ + + def __init__(self, degrees=60, constr_jac=None, constr_hess=None): + rads = degrees/180*np.pi + self.x0 = [np.cos(rads), np.sin(rads)] + self.x_opt = np.array([1.0, 0.0]) + self.constr_jac = constr_jac + self.constr_hess = constr_hess + self.bounds = None + + def fun(self, x): + return (2*(x[0]**2 + x[1]**2 - 1) - x[0], + np.array([4*x[0]-1, 4*x[1]])) + + @property + def grad(self): + return True + + def hess(self, x): + return 4*np.eye(2) + + @property + def constr(self): + def fun(x): + return x[0]**2 + x[1]**2 + + if self.constr_jac is None: + def jac(x): + return [[4*x[0], 4*x[1]]] + else: + jac = self.constr_jac + + if self.constr_hess is None: + def hess(x, v): + return 2*v[0]*np.eye(2) + else: + hess = self.constr_hess + + return NonlinearConstraint(fun, 1, 1, jac, hess) + + +class HyperbolicIneq: + """Problem 15.1 from Nocedal and Wright + + The following optimization problem: + minimize 1/2*(x[0] - 2)**2 + 1/2*(x[1] - 1/2)**2 + Subject to: 1/(x[0] + 1) - x[1] >= 1/4 + x[0] >= 0 + x[1] >= 0 + """ + def __init__(self, constr_jac=None, constr_hess=None): + self.x0 = [0, 0] + self.x_opt = [1.952823, 0.088659] + self.constr_jac = constr_jac + self.constr_hess = constr_hess + self.bounds = Bounds(0, np.inf) + + def fun(self, x): + return 1/2*(x[0] - 2)**2 + 1/2*(x[1] - 1/2)**2 + + def grad(self, x): + return [x[0] - 2, x[1] - 1/2] + + def hess(self, x): + return np.eye(2) + + @property + def constr(self): + def fun(x): + return 1/(x[0] + 1) - x[1] + + if self.constr_jac is None: + def jac(x): + return [[-1/(x[0] + 1)**2, -1]] + else: + jac = self.constr_jac + + if self.constr_hess is None: + def hess(x, v): + return 2*v[0]*np.array([[1/(x[0] + 1)**3, 0], + [0, 0]]) + else: + hess = self.constr_hess + + return NonlinearConstraint(fun, 0.25, np.inf, jac, hess) + + +class Rosenbrock: + """Rosenbrock function. + + The following optimization problem: + minimize sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0) + """ + + def __init__(self, n=2, random_state=0): + rng = np.random.RandomState(random_state) + self.x0 = rng.uniform(-1, 1, n) + self.x_opt = np.ones(n) + self.bounds = None + + def fun(self, x): + x = np.asarray(x) + r = np.sum(100.0 * (x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0, + axis=0) + return r + + def grad(self, x): + x = np.asarray(x) + xm = x[1:-1] + xm_m1 = x[:-2] + xm_p1 = x[2:] + der = np.zeros_like(x) + der[1:-1] = (200 * (xm - xm_m1**2) - + 400 * (xm_p1 - xm**2) * xm - 2 * (1 - xm)) + der[0] = -400 * x[0] * (x[1] - x[0]**2) - 2 * (1 - x[0]) + der[-1] = 200 * (x[-1] - x[-2]**2) + return der + + def hess(self, x): + x = np.atleast_1d(x) + H = np.diag(-400 * x[:-1], 1) - np.diag(400 * x[:-1], -1) + diagonal = np.zeros(len(x), dtype=x.dtype) + diagonal[0] = 1200 * x[0]**2 - 400 * x[1] + 2 + diagonal[-1] = 200 + diagonal[1:-1] = 202 + 1200 * x[1:-1]**2 - 400 * x[2:] + H = H + np.diag(diagonal) + return H + + @property + def constr(self): + return () + + +class IneqRosenbrock(Rosenbrock): + """Rosenbrock subject to inequality constraints. + + The following optimization problem: + minimize sum(100.0*(x[1] - x[0]**2)**2.0 + (1 - x[0])**2) + subject to: x[0] + 2 x[1] <= 1 + + Taken from matlab ``fmincon`` documentation. + """ + def __init__(self, random_state=0): + Rosenbrock.__init__(self, 2, random_state) + self.x0 = [-1, -0.5] + self.x_opt = [0.5022, 0.2489] + self.bounds = None + + @property + def constr(self): + A = [[1, 2]] + b = 1 + return LinearConstraint(A, -np.inf, b) + + +class BoundedRosenbrock(Rosenbrock): + """Rosenbrock subject to inequality constraints. + + The following optimization problem: + minimize sum(100.0*(x[1] - x[0]**2)**2.0 + (1 - x[0])**2) + subject to: -2 <= x[0] <= 0 + 0 <= x[1] <= 2 + + Taken from matlab ``fmincon`` documentation. + """ + def __init__(self, random_state=0): + Rosenbrock.__init__(self, 2, random_state) + self.x0 = [-0.2, 0.2] + self.x_opt = None + self.bounds = Bounds([-2, 0], [0, 2]) + + +class EqIneqRosenbrock(Rosenbrock): + """Rosenbrock subject to equality and inequality constraints. + + The following optimization problem: + minimize sum(100.0*(x[1] - x[0]**2)**2.0 + (1 - x[0])**2) + subject to: x[0] + 2 x[1] <= 1 + 2 x[0] + x[1] = 1 + + Taken from matlab ``fimincon`` documentation. + """ + def __init__(self, random_state=0): + Rosenbrock.__init__(self, 2, random_state) + self.x0 = [-1, -0.5] + self.x_opt = [0.41494, 0.17011] + self.bounds = None + + @property + def constr(self): + A_ineq = [[1, 2]] + b_ineq = 1 + A_eq = [[2, 1]] + b_eq = 1 + return (LinearConstraint(A_ineq, -np.inf, b_ineq), + LinearConstraint(A_eq, b_eq, b_eq)) + + +class Elec: + """Distribution of electrons on a sphere. + + Problem no 2 from COPS collection [2]_. Find + the equilibrium state distribution (of minimal + potential) of the electrons positioned on a + conducting sphere. + + References + ---------- + .. [1] E. D. Dolan, J. J. Mor\'{e}, and T. S. Munson, + "Benchmarking optimization software with COPS 3.0.", + Argonne National Lab., Argonne, IL (US), 2004. + """ + def __init__(self, n_electrons=200, random_state=0, + constr_jac=None, constr_hess=None): + self.n_electrons = n_electrons + self.rng = np.random.RandomState(random_state) + # Initial Guess + phi = self.rng.uniform(0, 2 * np.pi, self.n_electrons) + theta = self.rng.uniform(-np.pi, np.pi, self.n_electrons) + x = np.cos(theta) * np.cos(phi) + y = np.cos(theta) * np.sin(phi) + z = np.sin(theta) + self.x0 = np.hstack((x, y, z)) + self.x_opt = None + self.constr_jac = constr_jac + self.constr_hess = constr_hess + self.bounds = None + + def _get_cordinates(self, x): + x_coord = x[:self.n_electrons] + y_coord = x[self.n_electrons:2 * self.n_electrons] + z_coord = x[2 * self.n_electrons:] + return x_coord, y_coord, z_coord + + def _compute_coordinate_deltas(self, x): + x_coord, y_coord, z_coord = self._get_cordinates(x) + dx = x_coord[:, None] - x_coord + dy = y_coord[:, None] - y_coord + dz = z_coord[:, None] - z_coord + return dx, dy, dz + + def fun(self, x): + dx, dy, dz = self._compute_coordinate_deltas(x) + with np.errstate(divide='ignore'): + dm1 = (dx**2 + dy**2 + dz**2) ** -0.5 + dm1[np.diag_indices_from(dm1)] = 0 + return 0.5 * np.sum(dm1) + + def grad(self, x): + dx, dy, dz = self._compute_coordinate_deltas(x) + + with np.errstate(divide='ignore'): + dm3 = (dx**2 + dy**2 + dz**2) ** -1.5 + dm3[np.diag_indices_from(dm3)] = 0 + + grad_x = -np.sum(dx * dm3, axis=1) + grad_y = -np.sum(dy * dm3, axis=1) + grad_z = -np.sum(dz * dm3, axis=1) + + return np.hstack((grad_x, grad_y, grad_z)) + + def hess(self, x): + dx, dy, dz = self._compute_coordinate_deltas(x) + d = (dx**2 + dy**2 + dz**2) ** 0.5 + + with np.errstate(divide='ignore'): + dm3 = d ** -3 + dm5 = d ** -5 + + i = np.arange(self.n_electrons) + dm3[i, i] = 0 + dm5[i, i] = 0 + + Hxx = dm3 - 3 * dx**2 * dm5 + Hxx[i, i] = -np.sum(Hxx, axis=1) + + Hxy = -3 * dx * dy * dm5 + Hxy[i, i] = -np.sum(Hxy, axis=1) + + Hxz = -3 * dx * dz * dm5 + Hxz[i, i] = -np.sum(Hxz, axis=1) + + Hyy = dm3 - 3 * dy**2 * dm5 + Hyy[i, i] = -np.sum(Hyy, axis=1) + + Hyz = -3 * dy * dz * dm5 + Hyz[i, i] = -np.sum(Hyz, axis=1) + + Hzz = dm3 - 3 * dz**2 * dm5 + Hzz[i, i] = -np.sum(Hzz, axis=1) + + H = np.vstack(( + np.hstack((Hxx, Hxy, Hxz)), + np.hstack((Hxy, Hyy, Hyz)), + np.hstack((Hxz, Hyz, Hzz)) + )) + + return H + + @property + def constr(self): + def fun(x): + x_coord, y_coord, z_coord = self._get_cordinates(x) + return x_coord**2 + y_coord**2 + z_coord**2 - 1 + + if self.constr_jac is None: + def jac(x): + x_coord, y_coord, z_coord = self._get_cordinates(x) + Jx = 2 * np.diag(x_coord) + Jy = 2 * np.diag(y_coord) + Jz = 2 * np.diag(z_coord) + return csc_matrix(np.hstack((Jx, Jy, Jz))) + else: + jac = self.constr_jac + + if self.constr_hess is None: + def hess(x, v): + D = 2 * np.diag(v) + return block_diag(D, D, D) + else: + hess = self.constr_hess + + return NonlinearConstraint(fun, -np.inf, 0, jac, hess) + + +class TestTrustRegionConstr(TestCase): + + @pytest.mark.slow + def test_list_of_problems(self): + list_of_problems = [Maratos(), + Maratos(constr_hess='2-point'), + Maratos(constr_hess=SR1()), + Maratos(constr_jac='2-point', constr_hess=SR1()), + MaratosGradInFunc(), + HyperbolicIneq(), + HyperbolicIneq(constr_hess='3-point'), + HyperbolicIneq(constr_hess=BFGS()), + HyperbolicIneq(constr_jac='3-point', + constr_hess=BFGS()), + Rosenbrock(), + IneqRosenbrock(), + EqIneqRosenbrock(), + BoundedRosenbrock(), + Elec(n_electrons=2), + Elec(n_electrons=2, constr_hess='2-point'), + Elec(n_electrons=2, constr_hess=SR1()), + Elec(n_electrons=2, constr_jac='3-point', + constr_hess=SR1())] + + for prob in list_of_problems: + for grad in (prob.grad, '3-point', False): + for hess in (prob.hess, + '3-point', + SR1(), + BFGS(exception_strategy='damp_update'), + BFGS(exception_strategy='skip_update')): + + # Remove exceptions + if grad in ('2-point', '3-point', 'cs', False) and \ + hess in ('2-point', '3-point', 'cs'): + continue + if prob.grad is True and grad in ('3-point', False): + continue + with suppress_warnings() as sup: + sup.filter(UserWarning, "delta_grad == 0.0") + result = minimize(prob.fun, prob.x0, + method='trust-constr', + jac=grad, hess=hess, + bounds=prob.bounds, + constraints=prob.constr) + + if prob.x_opt is not None: + assert_array_almost_equal(result.x, prob.x_opt, + decimal=5) + # gtol + if result.status == 1: + assert_array_less(result.optimality, 1e-8) + # xtol + if result.status == 2: + assert_array_less(result.tr_radius, 1e-8) + + if result.method == "tr_interior_point": + assert_array_less(result.barrier_parameter, 1e-8) + # max iter + if result.status in (0, 3): + raise RuntimeError("Invalid termination condition.") + + def test_default_jac_and_hess(self): + def fun(x): + return (x - 1) ** 2 + bounds = [(-2, 2)] + res = minimize(fun, x0=[-1.5], bounds=bounds, method='trust-constr') + assert_array_almost_equal(res.x, 1, decimal=5) + + def test_default_hess(self): + def fun(x): + return (x - 1) ** 2 + bounds = [(-2, 2)] + res = minimize(fun, x0=[-1.5], bounds=bounds, method='trust-constr', + jac='2-point') + assert_array_almost_equal(res.x, 1, decimal=5) + + def test_no_constraints(self): + prob = Rosenbrock() + result = minimize(prob.fun, prob.x0, + method='trust-constr', + jac=prob.grad, hess=prob.hess) + result1 = minimize(prob.fun, prob.x0, + method='L-BFGS-B', + jac='2-point') + with pytest.warns(UserWarning): + result2 = minimize(prob.fun, prob.x0, + method='L-BFGS-B', + jac='3-point') + assert_array_almost_equal(result.x, prob.x_opt, decimal=5) + assert_array_almost_equal(result1.x, prob.x_opt, decimal=5) + assert_array_almost_equal(result2.x, prob.x_opt, decimal=5) + + def test_hessp(self): + prob = Maratos() + + def hessp(x, p): + H = prob.hess(x) + return H.dot(p) + + result = minimize(prob.fun, prob.x0, + method='trust-constr', + jac=prob.grad, hessp=hessp, + bounds=prob.bounds, + constraints=prob.constr) + + if prob.x_opt is not None: + assert_array_almost_equal(result.x, prob.x_opt, decimal=2) + + # gtol + if result.status == 1: + assert_array_less(result.optimality, 1e-8) + # xtol + if result.status == 2: + assert_array_less(result.tr_radius, 1e-8) + + if result.method == "tr_interior_point": + assert_array_less(result.barrier_parameter, 1e-8) + # max iter + if result.status in (0, 3): + raise RuntimeError("Invalid termination condition.") + + def test_args(self): + prob = MaratosTestArgs("a", 234) + + result = minimize(prob.fun, prob.x0, ("a", 234), + method='trust-constr', + jac=prob.grad, hess=prob.hess, + bounds=prob.bounds, + constraints=prob.constr) + + if prob.x_opt is not None: + assert_array_almost_equal(result.x, prob.x_opt, decimal=2) + + # gtol + if result.status == 1: + assert_array_less(result.optimality, 1e-8) + # xtol + if result.status == 2: + assert_array_less(result.tr_radius, 1e-8) + if result.method == "tr_interior_point": + assert_array_less(result.barrier_parameter, 1e-8) + # max iter + if result.status in (0, 3): + raise RuntimeError("Invalid termination condition.") + + def test_raise_exception(self): + prob = Maratos() + + raises(ValueError, minimize, prob.fun, prob.x0, method='trust-constr', + jac='2-point', hess='2-point', constraints=prob.constr) + + def test_issue_9044(self): + # https://github.com/scipy/scipy/issues/9044 + # Test the returned `OptimizeResult` contains keys consistent with + # other solvers. + + def callback(x, info): + assert_('nit' in info) + assert_('niter' in info) + + result = minimize(lambda x: x**2, [0], jac=lambda x: 2*x, + hess=lambda x: 2, callback=callback, + method='trust-constr') + assert_(result.get('success')) + assert_(result.get('nit', -1) == 1) + + # Also check existence of the 'niter' attribute, for backward + # compatibility + assert_(result.get('niter', -1) == 1) diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_minimize_constrained.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_minimize_constrained.pyc new file mode 100644 index 0000000..19ccd13 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_minimize_constrained.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_minpack.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_minpack.py new file mode 100644 index 0000000..8ea0fec --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_minpack.py @@ -0,0 +1,790 @@ +""" +Unit tests for optimization routines from minpack.py. +""" +from __future__ import division, print_function, absolute_import + +from numpy.testing import (assert_, assert_almost_equal, assert_array_equal, + assert_array_almost_equal, assert_allclose) +from pytest import raises as assert_raises +import numpy as np +from numpy import array, float64, matrix +from multiprocessing.pool import ThreadPool + +from scipy import optimize +from scipy.special import lambertw +from scipy.optimize.minpack import leastsq, curve_fit, fixed_point +from scipy._lib._numpy_compat import _assert_warns, suppress_warnings +from scipy.optimize import OptimizeWarning + + +class ReturnShape(object): + """This class exists to create a callable that does not have a '__name__' attribute. + + __init__ takes the argument 'shape', which should be a tuple of ints. When an instance + it called with a single argument 'x', it returns numpy.ones(shape). + """ + def __init__(self, shape): + self.shape = shape + + def __call__(self, x): + return np.ones(self.shape) + + +def dummy_func(x, shape): + """A function that returns an array of ones of the given shape. + `x` is ignored. + """ + return np.ones(shape) + + +def sequence_parallel(fs): + pool = ThreadPool(len(fs)) + try: + return pool.map(lambda f: f(), fs) + finally: + pool.terminate() + + +# Function and jacobian for tests of solvers for systems of nonlinear +# equations + + +def pressure_network(flow_rates, Qtot, k): + """Evaluate non-linear equation system representing + the pressures and flows in a system of n parallel pipes:: + + f_i = P_i - P_0, for i = 1..n + f_0 = sum(Q_i) - Qtot + + Where Q_i is the flow rate in pipe i and P_i the pressure in that pipe. + Pressure is modeled as a P=kQ**2 where k is a valve coefficient and + Q is the flow rate. + + Parameters + ---------- + flow_rates : float + A 1D array of n flow rates [kg/s]. + k : float + A 1D array of n valve coefficients [1/kg m]. + Qtot : float + A scalar, the total input flow rate [kg/s]. + + Returns + ------- + F : float + A 1D array, F[i] == f_i. + + """ + P = k * flow_rates**2 + F = np.hstack((P[1:] - P[0], flow_rates.sum() - Qtot)) + return F + + +def pressure_network_jacobian(flow_rates, Qtot, k): + """Return the jacobian of the equation system F(flow_rates) + computed by `pressure_network` with respect to + *flow_rates*. See `pressure_network` for the detailed + description of parrameters. + + Returns + ------- + jac : float + *n* by *n* matrix ``df_i/dQ_i`` where ``n = len(flow_rates)`` + and *f_i* and *Q_i* are described in the doc for `pressure_network` + """ + n = len(flow_rates) + pdiff = np.diag(flow_rates[1:] * 2 * k[1:] - 2 * flow_rates[0] * k[0]) + + jac = np.empty((n, n)) + jac[:n-1, :n-1] = pdiff * 0 + jac[:n-1, n-1] = 0 + jac[n-1, :] = np.ones(n) + + return jac + + +def pressure_network_fun_and_grad(flow_rates, Qtot, k): + return (pressure_network(flow_rates, Qtot, k), + pressure_network_jacobian(flow_rates, Qtot, k)) + + +class TestFSolve(object): + def test_pressure_network_no_gradient(self): + # fsolve without gradient, equal pipes -> equal flows. + k = np.ones(4) * 0.5 + Qtot = 4 + initial_guess = array([2., 0., 2., 0.]) + final_flows, info, ier, mesg = optimize.fsolve( + pressure_network, initial_guess, args=(Qtot, k), + full_output=True) + assert_array_almost_equal(final_flows, np.ones(4)) + assert_(ier == 1, mesg) + + def test_pressure_network_with_gradient(self): + # fsolve with gradient, equal pipes -> equal flows + k = np.ones(4) * 0.5 + Qtot = 4 + initial_guess = array([2., 0., 2., 0.]) + final_flows = optimize.fsolve( + pressure_network, initial_guess, args=(Qtot, k), + fprime=pressure_network_jacobian) + assert_array_almost_equal(final_flows, np.ones(4)) + + def test_wrong_shape_func_callable(self): + func = ReturnShape(1) + # x0 is a list of two elements, but func will return an array with + # length 1, so this should result in a TypeError. + x0 = [1.5, 2.0] + assert_raises(TypeError, optimize.fsolve, func, x0) + + def test_wrong_shape_func_function(self): + # x0 is a list of two elements, but func will return an array with + # length 1, so this should result in a TypeError. + x0 = [1.5, 2.0] + assert_raises(TypeError, optimize.fsolve, dummy_func, x0, args=((1,),)) + + def test_wrong_shape_fprime_callable(self): + func = ReturnShape(1) + deriv_func = ReturnShape((2,2)) + assert_raises(TypeError, optimize.fsolve, func, x0=[0,1], fprime=deriv_func) + + def test_wrong_shape_fprime_function(self): + func = lambda x: dummy_func(x, (2,)) + deriv_func = lambda x: dummy_func(x, (3,3)) + assert_raises(TypeError, optimize.fsolve, func, x0=[0,1], fprime=deriv_func) + + def test_func_can_raise(self): + def func(*args): + raise ValueError('I raised') + + with assert_raises(ValueError, match='I raised'): + optimize.fsolve(func, x0=[0]) + + def test_Dfun_can_raise(self): + func = lambda x: x - np.array([10]) + + def deriv_func(*args): + raise ValueError('I raised') + + with assert_raises(ValueError, match='I raised'): + optimize.fsolve(func, x0=[0], fprime=deriv_func) + + def test_float32(self): + func = lambda x: np.array([x[0] - 100, x[1] - 1000], dtype=np.float32)**2 + p = optimize.fsolve(func, np.array([1, 1], np.float32)) + assert_allclose(func(p), [0, 0], atol=1e-3) + + def test_reentrant_func(self): + def func(*args): + self.test_pressure_network_no_gradient() + return pressure_network(*args) + + # fsolve without gradient, equal pipes -> equal flows. + k = np.ones(4) * 0.5 + Qtot = 4 + initial_guess = array([2., 0., 2., 0.]) + final_flows, info, ier, mesg = optimize.fsolve( + func, initial_guess, args=(Qtot, k), + full_output=True) + assert_array_almost_equal(final_flows, np.ones(4)) + assert_(ier == 1, mesg) + + def test_reentrant_Dfunc(self): + def deriv_func(*args): + self.test_pressure_network_with_gradient() + return pressure_network_jacobian(*args) + + # fsolve with gradient, equal pipes -> equal flows + k = np.ones(4) * 0.5 + Qtot = 4 + initial_guess = array([2., 0., 2., 0.]) + final_flows = optimize.fsolve( + pressure_network, initial_guess, args=(Qtot, k), + fprime=deriv_func) + assert_array_almost_equal(final_flows, np.ones(4)) + + def test_concurrent_no_gradient(self): + return sequence_parallel([self.test_pressure_network_no_gradient] * 10) + + def test_concurrent_with_gradient(self): + return sequence_parallel([self.test_pressure_network_with_gradient] * 10) + + +class TestRootHybr(object): + def test_pressure_network_no_gradient(self): + # root/hybr without gradient, equal pipes -> equal flows + k = np.ones(4) * 0.5 + Qtot = 4 + initial_guess = array([2., 0., 2., 0.]) + final_flows = optimize.root(pressure_network, initial_guess, + method='hybr', args=(Qtot, k)).x + assert_array_almost_equal(final_flows, np.ones(4)) + + def test_pressure_network_with_gradient(self): + # root/hybr with gradient, equal pipes -> equal flows + k = np.ones(4) * 0.5 + Qtot = 4 + initial_guess = matrix([2., 0., 2., 0.]) + final_flows = optimize.root(pressure_network, initial_guess, + args=(Qtot, k), method='hybr', + jac=pressure_network_jacobian).x + assert_array_almost_equal(final_flows, np.ones(4)) + + def test_pressure_network_with_gradient_combined(self): + # root/hybr with gradient and function combined, equal pipes -> equal + # flows + k = np.ones(4) * 0.5 + Qtot = 4 + initial_guess = array([2., 0., 2., 0.]) + final_flows = optimize.root(pressure_network_fun_and_grad, + initial_guess, args=(Qtot, k), + method='hybr', jac=True).x + assert_array_almost_equal(final_flows, np.ones(4)) + + +class TestRootLM(object): + def test_pressure_network_no_gradient(self): + # root/lm without gradient, equal pipes -> equal flows + k = np.ones(4) * 0.5 + Qtot = 4 + initial_guess = array([2., 0., 2., 0.]) + final_flows = optimize.root(pressure_network, initial_guess, + method='lm', args=(Qtot, k)).x + assert_array_almost_equal(final_flows, np.ones(4)) + + +class TestLeastSq(object): + def setup_method(self): + x = np.linspace(0, 10, 40) + a,b,c = 3.1, 42, -304.2 + self.x = x + self.abc = a,b,c + y_true = a*x**2 + b*x + c + np.random.seed(0) + self.y_meas = y_true + 0.01*np.random.standard_normal(y_true.shape) + + def residuals(self, p, y, x): + a,b,c = p + err = y-(a*x**2 + b*x + c) + return err + + def residuals_jacobian(self, _p, _y, x): + return -np.vstack([x**2, x, np.ones_like(x)]).T + + def test_basic(self): + p0 = array([0,0,0]) + params_fit, ier = leastsq(self.residuals, p0, + args=(self.y_meas, self.x)) + assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier) + # low precision due to random + assert_array_almost_equal(params_fit, self.abc, decimal=2) + + def test_basic_with_gradient(self): + p0 = array([0,0,0]) + params_fit, ier = leastsq(self.residuals, p0, + args=(self.y_meas, self.x), + Dfun=self.residuals_jacobian) + assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier) + # low precision due to random + assert_array_almost_equal(params_fit, self.abc, decimal=2) + + def test_full_output(self): + p0 = matrix([0,0,0]) + full_output = leastsq(self.residuals, p0, + args=(self.y_meas, self.x), + full_output=True) + params_fit, cov_x, infodict, mesg, ier = full_output + assert_(ier in (1,2,3,4), 'solution not found: %s' % mesg) + + def test_input_untouched(self): + p0 = array([0,0,0],dtype=float64) + p0_copy = array(p0, copy=True) + full_output = leastsq(self.residuals, p0, + args=(self.y_meas, self.x), + full_output=True) + params_fit, cov_x, infodict, mesg, ier = full_output + assert_(ier in (1,2,3,4), 'solution not found: %s' % mesg) + assert_array_equal(p0, p0_copy) + + def test_wrong_shape_func_callable(self): + func = ReturnShape(1) + # x0 is a list of two elements, but func will return an array with + # length 1, so this should result in a TypeError. + x0 = [1.5, 2.0] + assert_raises(TypeError, optimize.leastsq, func, x0) + + def test_wrong_shape_func_function(self): + # x0 is a list of two elements, but func will return an array with + # length 1, so this should result in a TypeError. + x0 = [1.5, 2.0] + assert_raises(TypeError, optimize.leastsq, dummy_func, x0, args=((1,),)) + + def test_wrong_shape_Dfun_callable(self): + func = ReturnShape(1) + deriv_func = ReturnShape((2,2)) + assert_raises(TypeError, optimize.leastsq, func, x0=[0,1], Dfun=deriv_func) + + def test_wrong_shape_Dfun_function(self): + func = lambda x: dummy_func(x, (2,)) + deriv_func = lambda x: dummy_func(x, (3,3)) + assert_raises(TypeError, optimize.leastsq, func, x0=[0,1], Dfun=deriv_func) + + def test_float32(self): + # Regression test for gh-1447 + def func(p,x,y): + q = p[0]*np.exp(-(x-p[1])**2/(2.0*p[2]**2))+p[3] + return q - y + + x = np.array([1.475,1.429,1.409,1.419,1.455,1.519,1.472, 1.368,1.286, + 1.231], dtype=np.float32) + y = np.array([0.0168,0.0193,0.0211,0.0202,0.0171,0.0151,0.0185,0.0258, + 0.034,0.0396], dtype=np.float32) + p0 = np.array([1.0,1.0,1.0,1.0]) + p1, success = optimize.leastsq(func, p0, args=(x,y)) + + assert_(success in [1,2,3,4]) + assert_((func(p1,x,y)**2).sum() < 1e-4 * (func(p0,x,y)**2).sum()) + + def test_func_can_raise(self): + def func(*args): + raise ValueError('I raised') + + with assert_raises(ValueError, match='I raised'): + optimize.leastsq(func, x0=[0]) + + def test_Dfun_can_raise(self): + func = lambda x: x - np.array([10]) + + def deriv_func(*args): + raise ValueError('I raised') + + with assert_raises(ValueError, match='I raised'): + optimize.leastsq(func, x0=[0], Dfun=deriv_func) + + def test_reentrant_func(self): + def func(*args): + self.test_basic() + return self.residuals(*args) + + p0 = array([0,0,0]) + params_fit, ier = leastsq(func, p0, + args=(self.y_meas, self.x)) + assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier) + # low precision due to random + assert_array_almost_equal(params_fit, self.abc, decimal=2) + + def test_reentrant_Dfun(self): + def deriv_func(*args): + self.test_basic() + return self.residuals_jacobian(*args) + + p0 = array([0,0,0]) + params_fit, ier = leastsq(self.residuals, p0, + args=(self.y_meas, self.x), + Dfun=deriv_func) + assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier) + # low precision due to random + assert_array_almost_equal(params_fit, self.abc, decimal=2) + + def test_concurrent_no_gradient(self): + return sequence_parallel([self.test_basic] * 10) + + def test_concurrent_with_gradient(self): + return sequence_parallel([self.test_basic_with_gradient] * 10) + + +class TestCurveFit(object): + def setup_method(self): + self.y = array([1.0, 3.2, 9.5, 13.7]) + self.x = array([1.0, 2.0, 3.0, 4.0]) + + def test_one_argument(self): + def func(x,a): + return x**a + popt, pcov = curve_fit(func, self.x, self.y) + assert_(len(popt) == 1) + assert_(pcov.shape == (1,1)) + assert_almost_equal(popt[0], 1.9149, decimal=4) + assert_almost_equal(pcov[0,0], 0.0016, decimal=4) + + # Test if we get the same with full_output. Regression test for #1415. + res = curve_fit(func, self.x, self.y, full_output=1) + (popt2, pcov2, infodict, errmsg, ier) = res + assert_array_almost_equal(popt, popt2) + + def test_two_argument(self): + def func(x, a, b): + return b*x**a + popt, pcov = curve_fit(func, self.x, self.y) + assert_(len(popt) == 2) + assert_(pcov.shape == (2,2)) + assert_array_almost_equal(popt, [1.7989, 1.1642], decimal=4) + assert_array_almost_equal(pcov, [[0.0852, -0.1260], [-0.1260, 0.1912]], + decimal=4) + + def test_func_is_classmethod(self): + class test_self(object): + """This class tests if curve_fit passes the correct number of + arguments when the model function is a class instance method. + """ + def func(self, x, a, b): + return b * x**a + + test_self_inst = test_self() + popt, pcov = curve_fit(test_self_inst.func, self.x, self.y) + assert_(pcov.shape == (2,2)) + assert_array_almost_equal(popt, [1.7989, 1.1642], decimal=4) + assert_array_almost_equal(pcov, [[0.0852, -0.1260], [-0.1260, 0.1912]], + decimal=4) + + def test_regression_2639(self): + # This test fails if epsfcn in leastsq is too large. + x = [574.14200000000005, 574.154, 574.16499999999996, + 574.17700000000002, 574.18799999999999, 574.19899999999996, + 574.21100000000001, 574.22199999999998, 574.23400000000004, + 574.245] + y = [859.0, 997.0, 1699.0, 2604.0, 2013.0, 1964.0, 2435.0, + 1550.0, 949.0, 841.0] + guess = [574.1861428571428, 574.2155714285715, 1302.0, 1302.0, + 0.0035019999999983615, 859.0] + good = [5.74177150e+02, 5.74209188e+02, 1.74187044e+03, 1.58646166e+03, + 1.0068462e-02, 8.57450661e+02] + + def f_double_gauss(x, x0, x1, A0, A1, sigma, c): + return (A0*np.exp(-(x-x0)**2/(2.*sigma**2)) + + A1*np.exp(-(x-x1)**2/(2.*sigma**2)) + c) + popt, pcov = curve_fit(f_double_gauss, x, y, guess, maxfev=10000) + assert_allclose(popt, good, rtol=1e-5) + + def test_pcov(self): + xdata = np.array([0, 1, 2, 3, 4, 5]) + ydata = np.array([1, 1, 5, 7, 8, 12]) + sigma = np.array([1, 2, 1, 2, 1, 2]) + + def f(x, a, b): + return a*x + b + + for method in ['lm', 'trf', 'dogbox']: + popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=sigma, + method=method) + perr_scaled = np.sqrt(np.diag(pcov)) + assert_allclose(perr_scaled, [0.20659803, 0.57204404], rtol=1e-3) + + popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=3*sigma, + method=method) + perr_scaled = np.sqrt(np.diag(pcov)) + assert_allclose(perr_scaled, [0.20659803, 0.57204404], rtol=1e-3) + + popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=sigma, + absolute_sigma=True, method=method) + perr = np.sqrt(np.diag(pcov)) + assert_allclose(perr, [0.30714756, 0.85045308], rtol=1e-3) + + popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=3*sigma, + absolute_sigma=True, method=method) + perr = np.sqrt(np.diag(pcov)) + assert_allclose(perr, [3*0.30714756, 3*0.85045308], rtol=1e-3) + + # infinite variances + + def f_flat(x, a, b): + return a*x + + pcov_expected = np.array([np.inf]*4).reshape(2, 2) + + with suppress_warnings() as sup: + sup.filter(OptimizeWarning, + "Covariance of the parameters could not be estimated") + popt, pcov = curve_fit(f_flat, xdata, ydata, p0=[2, 0], sigma=sigma) + popt1, pcov1 = curve_fit(f, xdata[:2], ydata[:2], p0=[2, 0]) + + assert_(pcov.shape == (2, 2)) + assert_array_equal(pcov, pcov_expected) + + assert_(pcov1.shape == (2, 2)) + assert_array_equal(pcov1, pcov_expected) + + def test_array_like(self): + # Test sequence input. Regression test for gh-3037. + def f_linear(x, a, b): + return a*x + b + + x = [1, 2, 3, 4] + y = [3, 5, 7, 9] + assert_allclose(curve_fit(f_linear, x, y)[0], [2, 1], atol=1e-10) + + def test_indeterminate_covariance(self): + # Test that a warning is returned when pcov is indeterminate + xdata = np.array([1, 2, 3, 4, 5, 6]) + ydata = np.array([1, 2, 3, 4, 5.5, 6]) + _assert_warns(OptimizeWarning, curve_fit, + lambda x, a, b: a*x, xdata, ydata) + + def test_NaN_handling(self): + # Test for correct handling of NaNs in input data: gh-3422 + + # create input with NaNs + xdata = np.array([1, np.nan, 3]) + ydata = np.array([1, 2, 3]) + + assert_raises(ValueError, curve_fit, + lambda x, a, b: a*x + b, xdata, ydata) + assert_raises(ValueError, curve_fit, + lambda x, a, b: a*x + b, ydata, xdata) + + assert_raises(ValueError, curve_fit, lambda x, a, b: a*x + b, + xdata, ydata, **{"check_finite": True}) + + def test_method_argument(self): + def f(x, a, b): + return a * np.exp(-b*x) + + xdata = np.linspace(0, 1, 11) + ydata = f(xdata, 2., 2.) + + for method in ['trf', 'dogbox', 'lm', None]: + popt, pcov = curve_fit(f, xdata, ydata, method=method) + assert_allclose(popt, [2., 2.]) + + assert_raises(ValueError, curve_fit, f, xdata, ydata, method='unknown') + + def test_bounds(self): + def f(x, a, b): + return a * np.exp(-b*x) + + xdata = np.linspace(0, 1, 11) + ydata = f(xdata, 2., 2.) + + # The minimum w/out bounds is at [2., 2.], + # and with bounds it's at [1.5, smth]. + bounds = ([1., 0], [1.5, 3.]) + for method in [None, 'trf', 'dogbox']: + popt, pcov = curve_fit(f, xdata, ydata, bounds=bounds, + method=method) + assert_allclose(popt[0], 1.5) + + # With bounds, the starting estimate is feasible. + popt, pcov = curve_fit(f, xdata, ydata, method='trf', + bounds=([0., 0], [0.6, np.inf])) + assert_allclose(popt[0], 0.6) + + # method='lm' doesn't support bounds. + assert_raises(ValueError, curve_fit, f, xdata, ydata, bounds=bounds, + method='lm') + + def test_bounds_p0(self): + # This test is for issue #5719. The problem was that an initial guess + # was ignored when 'trf' or 'dogbox' methods were invoked. + def f(x, a): + return np.sin(x + a) + + xdata = np.linspace(-2*np.pi, 2*np.pi, 40) + ydata = np.sin(xdata) + bounds = (-3 * np.pi, 3 * np.pi) + for method in ['trf', 'dogbox']: + popt_1, _ = curve_fit(f, xdata, ydata, p0=2.1*np.pi) + popt_2, _ = curve_fit(f, xdata, ydata, p0=2.1*np.pi, + bounds=bounds, method=method) + + # If the initial guess is ignored, then popt_2 would be close 0. + assert_allclose(popt_1, popt_2) + + def test_jac(self): + # Test that Jacobian callable is handled correctly and + # weighted if sigma is provided. + def f(x, a, b): + return a * np.exp(-b*x) + + def jac(x, a, b): + e = np.exp(-b*x) + return np.vstack((e, -a * x * e)).T + + xdata = np.linspace(0, 1, 11) + ydata = f(xdata, 2., 2.) + + # Test numerical options for least_squares backend. + for method in ['trf', 'dogbox']: + for scheme in ['2-point', '3-point', 'cs']: + popt, pcov = curve_fit(f, xdata, ydata, jac=scheme, + method=method) + assert_allclose(popt, [2, 2]) + + # Test the analytic option. + for method in ['lm', 'trf', 'dogbox']: + popt, pcov = curve_fit(f, xdata, ydata, method=method, jac=jac) + assert_allclose(popt, [2, 2]) + + # Now add an outlier and provide sigma. + ydata[5] = 100 + sigma = np.ones(xdata.shape[0]) + sigma[5] = 200 + for method in ['lm', 'trf', 'dogbox']: + popt, pcov = curve_fit(f, xdata, ydata, sigma=sigma, method=method, + jac=jac) + # Still the optimization process is influenced somehow, + # have to set rtol=1e-3. + assert_allclose(popt, [2, 2], rtol=1e-3) + + def test_maxfev_and_bounds(self): + # gh-6340: with no bounds, curve_fit accepts parameter maxfev (via leastsq) + # but with bounds, the parameter is `max_nfev` (via least_squares) + x = np.arange(0, 10) + y = 2*x + popt1, _ = curve_fit(lambda x,p: p*x, x, y, bounds=(0, 3), maxfev=100) + popt2, _ = curve_fit(lambda x,p: p*x, x, y, bounds=(0, 3), max_nfev=100) + + assert_allclose(popt1, 2, atol=1e-14) + assert_allclose(popt2, 2, atol=1e-14) + + def test_curvefit_simplecovariance(self): + + def func(x, a, b): + return a * np.exp(-b*x) + + def jac(x, a, b): + e = np.exp(-b*x) + return np.vstack((e, -a * x * e)).T + + np.random.seed(0) + xdata = np.linspace(0, 4, 50) + y = func(xdata, 2.5, 1.3) + ydata = y + 0.2 * np.random.normal(size=len(xdata)) + + sigma = np.zeros(len(xdata)) + 0.2 + covar = np.diag(sigma**2) + + for jac1, jac2 in [(jac, jac), (None, None)]: + for absolute_sigma in [False, True]: + popt1, pcov1 = curve_fit(func, xdata, ydata, sigma=sigma, + jac=jac1, absolute_sigma=absolute_sigma) + popt2, pcov2 = curve_fit(func, xdata, ydata, sigma=covar, + jac=jac2, absolute_sigma=absolute_sigma) + + assert_allclose(popt1, popt2, atol=1e-14) + assert_allclose(pcov1, pcov2, atol=1e-14) + + def test_curvefit_covariance(self): + + def funcp(x, a, b): + rotn = np.array([[1./np.sqrt(2), -1./np.sqrt(2), 0], [1./np.sqrt(2), 1./np.sqrt(2), 0], [0, 0, 1.0]]) + return rotn.dot(a * np.exp(-b*x)) + + def jacp(x, a, b): + rotn = np.array([[1./np.sqrt(2), -1./np.sqrt(2), 0], [1./np.sqrt(2), 1./np.sqrt(2), 0], [0, 0, 1.0]]) + e = np.exp(-b*x) + return rotn.dot(np.vstack((e, -a * x * e)).T) + + def func(x, a, b): + return a * np.exp(-b*x) + + def jac(x, a, b): + e = np.exp(-b*x) + return np.vstack((e, -a * x * e)).T + + np.random.seed(0) + xdata = np.arange(1, 4) + y = func(xdata, 2.5, 1.0) + ydata = y + 0.2 * np.random.normal(size=len(xdata)) + sigma = np.zeros(len(xdata)) + 0.2 + covar = np.diag(sigma**2) + # Get a rotation matrix, and obtain ydatap = R ydata + # Chisq = ydata^T C^{-1} ydata + # = ydata^T R^T R C^{-1} R^T R ydata + # = ydatap^T Cp^{-1} ydatap + # Cp^{-1} = R C^{-1} R^T + # Cp = R C R^T, since R^-1 = R^T + rotn = np.array([[1./np.sqrt(2), -1./np.sqrt(2), 0], [1./np.sqrt(2), 1./np.sqrt(2), 0], [0, 0, 1.0]]) + ydatap = rotn.dot(ydata) + covarp = rotn.dot(covar).dot(rotn.T) + + for jac1, jac2 in [(jac, jacp), (None, None)]: + for absolute_sigma in [False, True]: + popt1, pcov1 = curve_fit(func, xdata, ydata, sigma=sigma, + jac=jac1, absolute_sigma=absolute_sigma) + popt2, pcov2 = curve_fit(funcp, xdata, ydatap, sigma=covarp, + jac=jac2, absolute_sigma=absolute_sigma) + + assert_allclose(popt1, popt2, atol=1e-14) + assert_allclose(pcov1, pcov2, atol=1e-14) + + +class TestFixedPoint(object): + + def test_scalar_trivial(self): + # f(x) = 2x; fixed point should be x=0 + def func(x): + return 2.0*x + x0 = 1.0 + x = fixed_point(func, x0) + assert_almost_equal(x, 0.0) + + def test_scalar_basic1(self): + # f(x) = x**2; x0=1.05; fixed point should be x=1 + def func(x): + return x**2 + x0 = 1.05 + x = fixed_point(func, x0) + assert_almost_equal(x, 1.0) + + def test_scalar_basic2(self): + # f(x) = x**0.5; x0=1.05; fixed point should be x=1 + def func(x): + return x**0.5 + x0 = 1.05 + x = fixed_point(func, x0) + assert_almost_equal(x, 1.0) + + def test_array_trivial(self): + def func(x): + return 2.0*x + x0 = [0.3, 0.15] + olderr = np.seterr(all='ignore') + try: + x = fixed_point(func, x0) + finally: + np.seterr(**olderr) + assert_almost_equal(x, [0.0, 0.0]) + + def test_array_basic1(self): + # f(x) = c * x**2; fixed point should be x=1/c + def func(x, c): + return c * x**2 + c = array([0.75, 1.0, 1.25]) + x0 = [1.1, 1.15, 0.9] + olderr = np.seterr(all='ignore') + try: + x = fixed_point(func, x0, args=(c,)) + finally: + np.seterr(**olderr) + assert_almost_equal(x, 1.0/c) + + def test_array_basic2(self): + # f(x) = c * x**0.5; fixed point should be x=c**2 + def func(x, c): + return c * x**0.5 + c = array([0.75, 1.0, 1.25]) + x0 = [0.8, 1.1, 1.1] + x = fixed_point(func, x0, args=(c,)) + assert_almost_equal(x, c**2) + + def test_lambertw(self): + # python-list/2010-December/594592.html + xxroot = fixed_point(lambda xx: np.exp(-2.0*xx)/2.0, 1.0, + args=(), xtol=1e-12, maxiter=500) + assert_allclose(xxroot, np.exp(-2.0*xxroot)/2.0) + assert_allclose(xxroot, lambertw(1)/2) + + def test_no_acceleration(self): + # github issue 5460 + ks = 2 + kl = 6 + m = 1.3 + n0 = 1.001 + i0 = ((m-1)/m)*(kl/ks/m)**(1/(m-1)) + + def func(n): + return np.log(kl/ks/n) / np.log((i0*n/(n - 1))) + 1 + + n = fixed_point(func, n0, method='iteration') + assert_allclose(n, m) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_minpack.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_minpack.pyc new file mode 100644 index 0000000..55d656c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_minpack.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_nnls.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_nnls.py new file mode 100644 index 0000000..a02e95f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_nnls.py @@ -0,0 +1,36 @@ +""" Unit tests for nonnegative least squares +Author: Uwe Schmitt +Sep 2008 +""" +from __future__ import division, print_function, absolute_import + +import numpy as np + +from numpy.testing import assert_ +from pytest import raises as assert_raises + +from scipy.optimize import nnls +from numpy import arange, dot +from numpy.linalg import norm + + +class TestNNLS(object): + + def test_nnls(self): + a = arange(25.0).reshape(-1,5) + x = arange(5.0) + y = dot(a,x) + x, res = nnls(a,y) + assert_(res < 1e-7) + assert_(norm(dot(a,x)-y) < 1e-7) + + def test_maxiter(self): + # test that maxiter argument does stop iterations + # NB: did not manage to find a test case where the default value + # of maxiter is not sufficient, so use a too-small value + rndm = np.random.RandomState(1234) + a = rndm.uniform(size=(100, 100)) + b = rndm.uniform(size=100) + with assert_raises(RuntimeError): + nnls(a, b, maxiter=1) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_nnls.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_nnls.pyc new file mode 100644 index 0000000..df52773 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_nnls.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_nonlin.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_nonlin.py new file mode 100644 index 0000000..ac9f4b8 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_nonlin.py @@ -0,0 +1,448 @@ +""" Unit tests for nonlinear solvers +Author: Ondrej Certik +May 2007 +""" +from __future__ import division, print_function, absolute_import + +from numpy.testing import assert_ +import pytest + +from scipy._lib.six import xrange +from scipy.optimize import nonlin, root +from numpy import matrix, diag, dot +from numpy.linalg import inv +import numpy as np + +from .test_minpack import pressure_network + +SOLVERS = {'anderson': nonlin.anderson, 'diagbroyden': nonlin.diagbroyden, + 'linearmixing': nonlin.linearmixing, 'excitingmixing': nonlin.excitingmixing, + 'broyden1': nonlin.broyden1, 'broyden2': nonlin.broyden2, + 'krylov': nonlin.newton_krylov} +MUST_WORK = {'anderson': nonlin.anderson, 'broyden1': nonlin.broyden1, + 'broyden2': nonlin.broyden2, 'krylov': nonlin.newton_krylov} + +#------------------------------------------------------------------------------- +# Test problems +#------------------------------------------------------------------------------- + + +def F(x): + x = np.asmatrix(x).T + d = matrix(diag([3,2,1.5,1,0.5])) + c = 0.01 + f = -d*x - c*float(x.T*x)*x + return f + + +F.xin = [1,1,1,1,1] +F.KNOWN_BAD = {} + + +def F2(x): + return x + + +F2.xin = [1,2,3,4,5,6] +F2.KNOWN_BAD = {'linearmixing': nonlin.linearmixing, + 'excitingmixing': nonlin.excitingmixing} + + +def F2_lucky(x): + return x + + +F2_lucky.xin = [0,0,0,0,0,0] +F2_lucky.KNOWN_BAD = {} + + +def F3(x): + A = np.mat('-2 1 0; 1 -2 1; 0 1 -2') + b = np.mat('1 2 3') + return np.dot(A, x) - b + + +F3.xin = [1,2,3] +F3.KNOWN_BAD = {} + + +def F4_powell(x): + A = 1e4 + return [A*x[0]*x[1] - 1, np.exp(-x[0]) + np.exp(-x[1]) - (1 + 1/A)] + + +F4_powell.xin = [-1, -2] +F4_powell.KNOWN_BAD = {'linearmixing': nonlin.linearmixing, + 'excitingmixing': nonlin.excitingmixing, + 'diagbroyden': nonlin.diagbroyden} + + +def F5(x): + return pressure_network(x, 4, np.array([.5, .5, .5, .5])) + + +F5.xin = [2., 0, 2, 0] +F5.KNOWN_BAD = {'excitingmixing': nonlin.excitingmixing, + 'linearmixing': nonlin.linearmixing, + 'diagbroyden': nonlin.diagbroyden} + + +def F6(x): + x1, x2 = x + J0 = np.array([[-4.256, 14.7], + [0.8394989, 0.59964207]]) + v = np.array([(x1 + 3) * (x2**5 - 7) + 3*6, + np.sin(x2 * np.exp(x1) - 1)]) + return -np.linalg.solve(J0, v) + + +F6.xin = [-0.5, 1.4] +F6.KNOWN_BAD = {'excitingmixing': nonlin.excitingmixing, + 'linearmixing': nonlin.linearmixing, + 'diagbroyden': nonlin.diagbroyden} + + +#------------------------------------------------------------------------------- +# Tests +#------------------------------------------------------------------------------- + + +class TestNonlin(object): + """ + Check the Broyden methods for a few test problems. + + broyden1, broyden2, and newton_krylov must succeed for + all functions. Some of the others don't -- tests in KNOWN_BAD are skipped. + + """ + + def _check_nonlin_func(self, f, func, f_tol=1e-2): + x = func(f, f.xin, f_tol=f_tol, maxiter=200, verbose=0) + assert_(np.absolute(f(x)).max() < f_tol) + + def _check_root(self, f, method, f_tol=1e-2): + res = root(f, f.xin, method=method, + options={'ftol': f_tol, 'maxiter': 200, 'disp': 0}) + assert_(np.absolute(res.fun).max() < f_tol) + + @pytest.mark.xfail + def _check_func_fail(self, *a, **kw): + pass + + def test_problem_nonlin(self): + for f in [F, F2, F2_lucky, F3, F4_powell, F5, F6]: + for func in SOLVERS.values(): + if func in f.KNOWN_BAD.values(): + if func in MUST_WORK.values(): + self._check_func_fail(f, func) + continue + self._check_nonlin_func(f, func) + + def test_tol_norm_called(self): + # Check that supplying tol_norm keyword to nonlin_solve works + self._tol_norm_used = False + + def local_norm_func(x): + self._tol_norm_used = True + return np.absolute(x).max() + + nonlin.newton_krylov(F, F.xin, f_tol=1e-2, maxiter=200, verbose=0, + tol_norm=local_norm_func) + assert_(self._tol_norm_used) + + def test_problem_root(self): + for f in [F, F2, F2_lucky, F3, F4_powell, F5, F6]: + for meth in SOLVERS: + if meth in f.KNOWN_BAD: + if meth in MUST_WORK: + self._check_func_fail(f, meth) + continue + self._check_root(f, meth) + + +class TestSecant(object): + """Check that some Jacobian approximations satisfy the secant condition""" + + xs = [np.array([1,2,3,4,5], float), + np.array([2,3,4,5,1], float), + np.array([3,4,5,1,2], float), + np.array([4,5,1,2,3], float), + np.array([9,1,9,1,3], float), + np.array([0,1,9,1,3], float), + np.array([5,5,7,1,1], float), + np.array([1,2,7,5,1], float),] + fs = [x**2 - 1 for x in xs] + + def _check_secant(self, jac_cls, npoints=1, **kw): + """ + Check that the given Jacobian approximation satisfies secant + conditions for last `npoints` points. + """ + jac = jac_cls(**kw) + jac.setup(self.xs[0], self.fs[0], None) + for j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])): + jac.update(x, f) + + for k in xrange(min(npoints, j+1)): + dx = self.xs[j-k+1] - self.xs[j-k] + df = self.fs[j-k+1] - self.fs[j-k] + assert_(np.allclose(dx, jac.solve(df))) + + # Check that the `npoints` secant bound is strict + if j >= npoints: + dx = self.xs[j-npoints+1] - self.xs[j-npoints] + df = self.fs[j-npoints+1] - self.fs[j-npoints] + assert_(not np.allclose(dx, jac.solve(df))) + + def test_broyden1(self): + self._check_secant(nonlin.BroydenFirst) + + def test_broyden2(self): + self._check_secant(nonlin.BroydenSecond) + + def test_broyden1_update(self): + # Check that BroydenFirst update works as for a dense matrix + jac = nonlin.BroydenFirst(alpha=0.1) + jac.setup(self.xs[0], self.fs[0], None) + + B = np.identity(5) * (-1/0.1) + + for last_j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])): + df = f - self.fs[last_j] + dx = x - self.xs[last_j] + B += (df - dot(B, dx))[:,None] * dx[None,:] / dot(dx, dx) + jac.update(x, f) + assert_(np.allclose(jac.todense(), B, rtol=1e-10, atol=1e-13)) + + def test_broyden2_update(self): + # Check that BroydenSecond update works as for a dense matrix + jac = nonlin.BroydenSecond(alpha=0.1) + jac.setup(self.xs[0], self.fs[0], None) + + H = np.identity(5) * (-0.1) + + for last_j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])): + df = f - self.fs[last_j] + dx = x - self.xs[last_j] + H += (dx - dot(H, df))[:,None] * df[None,:] / dot(df, df) + jac.update(x, f) + assert_(np.allclose(jac.todense(), inv(H), rtol=1e-10, atol=1e-13)) + + def test_anderson(self): + # Anderson mixing (with w0=0) satisfies secant conditions + # for the last M iterates, see [Ey]_ + # + # .. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996). + self._check_secant(nonlin.Anderson, M=3, w0=0, npoints=3) + + +class TestLinear(object): + """Solve a linear equation; + some methods find the exact solution in a finite number of steps""" + + def _check(self, jac, N, maxiter, complex=False, **kw): + np.random.seed(123) + + A = np.random.randn(N, N) + if complex: + A = A + 1j*np.random.randn(N, N) + b = np.random.randn(N) + if complex: + b = b + 1j*np.random.randn(N) + + def func(x): + return dot(A, x) - b + + sol = nonlin.nonlin_solve(func, np.zeros(N), jac, maxiter=maxiter, + f_tol=1e-6, line_search=None, verbose=0) + assert_(np.allclose(dot(A, sol), b, atol=1e-6)) + + def test_broyden1(self): + # Broyden methods solve linear systems exactly in 2*N steps + self._check(nonlin.BroydenFirst(alpha=1.0), 20, 41, False) + self._check(nonlin.BroydenFirst(alpha=1.0), 20, 41, True) + + def test_broyden2(self): + # Broyden methods solve linear systems exactly in 2*N steps + self._check(nonlin.BroydenSecond(alpha=1.0), 20, 41, False) + self._check(nonlin.BroydenSecond(alpha=1.0), 20, 41, True) + + def test_anderson(self): + # Anderson is rather similar to Broyden, if given enough storage space + self._check(nonlin.Anderson(M=50, alpha=1.0), 20, 29, False) + self._check(nonlin.Anderson(M=50, alpha=1.0), 20, 29, True) + + def test_krylov(self): + # Krylov methods solve linear systems exactly in N inner steps + self._check(nonlin.KrylovJacobian, 20, 2, False, inner_m=10) + self._check(nonlin.KrylovJacobian, 20, 2, True, inner_m=10) + + +class TestJacobianDotSolve(object): + """Check that solve/dot methods in Jacobian approximations are consistent""" + + def _func(self, x): + return x**2 - 1 + np.dot(self.A, x) + + def _check_dot(self, jac_cls, complex=False, tol=1e-6, **kw): + np.random.seed(123) + + N = 7 + + def rand(*a): + q = np.random.rand(*a) + if complex: + q = q + 1j*np.random.rand(*a) + return q + + def assert_close(a, b, msg): + d = abs(a - b).max() + f = tol + abs(b).max()*tol + if d > f: + raise AssertionError('%s: err %g' % (msg, d)) + + self.A = rand(N, N) + + # initialize + x0 = np.random.rand(N) + jac = jac_cls(**kw) + jac.setup(x0, self._func(x0), self._func) + + # check consistency + for k in xrange(2*N): + v = rand(N) + + if hasattr(jac, '__array__'): + Jd = np.array(jac) + if hasattr(jac, 'solve'): + Gv = jac.solve(v) + Gv2 = np.linalg.solve(Jd, v) + assert_close(Gv, Gv2, 'solve vs array') + if hasattr(jac, 'rsolve'): + Gv = jac.rsolve(v) + Gv2 = np.linalg.solve(Jd.T.conj(), v) + assert_close(Gv, Gv2, 'rsolve vs array') + if hasattr(jac, 'matvec'): + Jv = jac.matvec(v) + Jv2 = np.dot(Jd, v) + assert_close(Jv, Jv2, 'dot vs array') + if hasattr(jac, 'rmatvec'): + Jv = jac.rmatvec(v) + Jv2 = np.dot(Jd.T.conj(), v) + assert_close(Jv, Jv2, 'rmatvec vs array') + + if hasattr(jac, 'matvec') and hasattr(jac, 'solve'): + Jv = jac.matvec(v) + Jv2 = jac.solve(jac.matvec(Jv)) + assert_close(Jv, Jv2, 'dot vs solve') + + if hasattr(jac, 'rmatvec') and hasattr(jac, 'rsolve'): + Jv = jac.rmatvec(v) + Jv2 = jac.rmatvec(jac.rsolve(Jv)) + assert_close(Jv, Jv2, 'rmatvec vs rsolve') + + x = rand(N) + jac.update(x, self._func(x)) + + def test_broyden1(self): + self._check_dot(nonlin.BroydenFirst, complex=False) + self._check_dot(nonlin.BroydenFirst, complex=True) + + def test_broyden2(self): + self._check_dot(nonlin.BroydenSecond, complex=False) + self._check_dot(nonlin.BroydenSecond, complex=True) + + def test_anderson(self): + self._check_dot(nonlin.Anderson, complex=False) + self._check_dot(nonlin.Anderson, complex=True) + + def test_diagbroyden(self): + self._check_dot(nonlin.DiagBroyden, complex=False) + self._check_dot(nonlin.DiagBroyden, complex=True) + + def test_linearmixing(self): + self._check_dot(nonlin.LinearMixing, complex=False) + self._check_dot(nonlin.LinearMixing, complex=True) + + def test_excitingmixing(self): + self._check_dot(nonlin.ExcitingMixing, complex=False) + self._check_dot(nonlin.ExcitingMixing, complex=True) + + def test_krylov(self): + self._check_dot(nonlin.KrylovJacobian, complex=False, tol=1e-3) + self._check_dot(nonlin.KrylovJacobian, complex=True, tol=1e-3) + + +class TestNonlinOldTests(object): + """ Test case for a simple constrained entropy maximization problem + (the machine translation example of Berger et al in + Computational Linguistics, vol 22, num 1, pp 39--72, 1996.) + """ + + def test_broyden1(self): + x = nonlin.broyden1(F,F.xin,iter=12,alpha=1) + assert_(nonlin.norm(x) < 1e-9) + assert_(nonlin.norm(F(x)) < 1e-9) + + def test_broyden2(self): + x = nonlin.broyden2(F,F.xin,iter=12,alpha=1) + assert_(nonlin.norm(x) < 1e-9) + assert_(nonlin.norm(F(x)) < 1e-9) + + def test_anderson(self): + x = nonlin.anderson(F,F.xin,iter=12,alpha=0.03,M=5) + assert_(nonlin.norm(x) < 0.33) + + def test_linearmixing(self): + x = nonlin.linearmixing(F,F.xin,iter=60,alpha=0.5) + assert_(nonlin.norm(x) < 1e-7) + assert_(nonlin.norm(F(x)) < 1e-7) + + def test_exciting(self): + x = nonlin.excitingmixing(F,F.xin,iter=20,alpha=0.5) + assert_(nonlin.norm(x) < 1e-5) + assert_(nonlin.norm(F(x)) < 1e-5) + + def test_diagbroyden(self): + x = nonlin.diagbroyden(F,F.xin,iter=11,alpha=1) + assert_(nonlin.norm(x) < 1e-8) + assert_(nonlin.norm(F(x)) < 1e-8) + + def test_root_broyden1(self): + res = root(F, F.xin, method='broyden1', + options={'nit': 12, 'jac_options': {'alpha': 1}}) + assert_(nonlin.norm(res.x) < 1e-9) + assert_(nonlin.norm(res.fun) < 1e-9) + + def test_root_broyden2(self): + res = root(F, F.xin, method='broyden2', + options={'nit': 12, 'jac_options': {'alpha': 1}}) + assert_(nonlin.norm(res.x) < 1e-9) + assert_(nonlin.norm(res.fun) < 1e-9) + + def test_root_anderson(self): + res = root(F, F.xin, method='anderson', + options={'nit': 12, + 'jac_options': {'alpha': 0.03, 'M': 5}}) + assert_(nonlin.norm(res.x) < 0.33) + + def test_root_linearmixing(self): + res = root(F, F.xin, method='linearmixing', + options={'nit': 60, + 'jac_options': {'alpha': 0.5}}) + assert_(nonlin.norm(res.x) < 1e-7) + assert_(nonlin.norm(res.fun) < 1e-7) + + def test_root_excitingmixing(self): + res = root(F, F.xin, method='excitingmixing', + options={'nit': 20, + 'jac_options': {'alpha': 0.5}}) + assert_(nonlin.norm(res.x) < 1e-5) + assert_(nonlin.norm(res.fun) < 1e-5) + + def test_root_diagbroyden(self): + res = root(F, F.xin, method='diagbroyden', + options={'nit': 11, + 'jac_options': {'alpha': 1}}) + assert_(nonlin.norm(res.x) < 1e-8) + assert_(nonlin.norm(res.fun) < 1e-8) diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_nonlin.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_nonlin.pyc new file mode 100644 index 0000000..c978d83 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_nonlin.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_optimize.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_optimize.py new file mode 100644 index 0000000..e50c153 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_optimize.py @@ -0,0 +1,1345 @@ +""" +Unit tests for optimization routines from optimize.py + +Authors: + Ed Schofield, Nov 2005 + Andrew Straw, April 2008 + +To run it in its simplest form:: + nosetests test_optimize.py + +""" +from __future__ import division, print_function, absolute_import + +import itertools + +import numpy as np +from numpy.testing import (assert_allclose, assert_equal, + assert_, + assert_almost_equal, assert_warns, + assert_array_less) +import pytest +from pytest import raises as assert_raises + +from scipy._lib._numpy_compat import suppress_warnings +from scipy import optimize + + +def test_check_grad(): + # Verify if check_grad is able to estimate the derivative of the + # logistic function. + + def logit(x): + return 1 / (1 + np.exp(-x)) + + def der_logit(x): + return np.exp(-x) / (1 + np.exp(-x))**2 + + x0 = np.array([1.5]) + + r = optimize.check_grad(logit, der_logit, x0) + assert_almost_equal(r, 0) + + r = optimize.check_grad(logit, der_logit, x0, epsilon=1e-6) + assert_almost_equal(r, 0) + + # Check if the epsilon parameter is being considered. + r = abs(optimize.check_grad(logit, der_logit, x0, epsilon=1e-1) - 0) + assert_(r > 1e-7) + + +class CheckOptimize(object): + """ Base test case for a simple constrained entropy maximization problem + (the machine translation example of Berger et al in + Computational Linguistics, vol 22, num 1, pp 39--72, 1996.) + """ + def setup_method(self): + self.F = np.array([[1,1,1],[1,1,0],[1,0,1],[1,0,0],[1,0,0]]) + self.K = np.array([1., 0.3, 0.5]) + self.startparams = np.zeros(3, np.float64) + self.solution = np.array([0., -0.524869316, 0.487525860]) + self.maxiter = 1000 + self.funccalls = 0 + self.gradcalls = 0 + self.trace = [] + + def func(self, x): + self.funccalls += 1 + if self.funccalls > 6000: + raise RuntimeError("too many iterations in optimization routine") + log_pdot = np.dot(self.F, x) + logZ = np.log(sum(np.exp(log_pdot))) + f = logZ - np.dot(self.K, x) + self.trace.append(x) + return f + + def grad(self, x): + self.gradcalls += 1 + log_pdot = np.dot(self.F, x) + logZ = np.log(sum(np.exp(log_pdot))) + p = np.exp(log_pdot - logZ) + return np.dot(self.F.transpose(), p) - self.K + + def hess(self, x): + log_pdot = np.dot(self.F, x) + logZ = np.log(sum(np.exp(log_pdot))) + p = np.exp(log_pdot - logZ) + return np.dot(self.F.T, + np.dot(np.diag(p), self.F - np.dot(self.F.T, p))) + + def hessp(self, x, p): + return np.dot(self.hess(x), p) + + +class CheckOptimizeParameterized(CheckOptimize): + + def test_cg(self): + # conjugate gradient optimization routine + if self.use_wrapper: + opts = {'maxiter': self.maxiter, 'disp': self.disp, + 'return_all': False} + res = optimize.minimize(self.func, self.startparams, args=(), + method='CG', jac=self.grad, + options=opts) + params, fopt, func_calls, grad_calls, warnflag = \ + res['x'], res['fun'], res['nfev'], res['njev'], res['status'] + else: + retval = optimize.fmin_cg(self.func, self.startparams, + self.grad, (), maxiter=self.maxiter, + full_output=True, disp=self.disp, + retall=False) + (params, fopt, func_calls, grad_calls, warnflag) = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + # Ensure that function call counts are 'known good'; these are from + # Scipy 0.7.0. Don't allow them to increase. + assert_(self.funccalls == 9, self.funccalls) + assert_(self.gradcalls == 7, self.gradcalls) + + # Ensure that the function behaves the same; this is from Scipy 0.7.0 + assert_allclose(self.trace[2:4], + [[0, -0.5, 0.5], + [0, -5.05700028e-01, 4.95985862e-01]], + atol=1e-14, rtol=1e-7) + + def test_cg_cornercase(self): + def f(r): + return 2.5 * (1 - np.exp(-1.5*(r - 0.5)))**2 + + # Check several initial guesses. (Too far away from the + # minimum, the function ends up in the flat region of exp.) + for x0 in np.linspace(-0.75, 3, 71): + sol = optimize.minimize(f, [x0], method='CG') + assert_(sol.success) + assert_allclose(sol.x, [0.5], rtol=1e-5) + + def test_bfgs(self): + # Broyden-Fletcher-Goldfarb-Shanno optimization routine + if self.use_wrapper: + opts = {'maxiter': self.maxiter, 'disp': self.disp, + 'return_all': False} + res = optimize.minimize(self.func, self.startparams, + jac=self.grad, method='BFGS', args=(), + options=opts) + + params, fopt, gopt, Hopt, func_calls, grad_calls, warnflag = ( + res['x'], res['fun'], res['jac'], res['hess_inv'], + res['nfev'], res['njev'], res['status']) + else: + retval = optimize.fmin_bfgs(self.func, self.startparams, self.grad, + args=(), maxiter=self.maxiter, + full_output=True, disp=self.disp, + retall=False) + (params, fopt, gopt, Hopt, func_calls, grad_calls, warnflag) = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + # Ensure that function call counts are 'known good'; these are from + # Scipy 0.7.0. Don't allow them to increase. + assert_(self.funccalls == 10, self.funccalls) + assert_(self.gradcalls == 8, self.gradcalls) + + # Ensure that the function behaves the same; this is from Scipy 0.7.0 + assert_allclose(self.trace[6:8], + [[0, -5.25060743e-01, 4.87748473e-01], + [0, -5.24885582e-01, 4.87530347e-01]], + atol=1e-14, rtol=1e-7) + + def test_bfgs_infinite(self): + # Test corner case where -Inf is the minimum. See gh-2019. + func = lambda x: -np.e**-x + fprime = lambda x: -func(x) + x0 = [0] + olderr = np.seterr(over='ignore') + try: + if self.use_wrapper: + opts = {'disp': self.disp} + x = optimize.minimize(func, x0, jac=fprime, method='BFGS', + args=(), options=opts)['x'] + else: + x = optimize.fmin_bfgs(func, x0, fprime, disp=self.disp) + assert_(not np.isfinite(func(x))) + finally: + np.seterr(**olderr) + + def test_powell(self): + # Powell (direction set) optimization routine + if self.use_wrapper: + opts = {'maxiter': self.maxiter, 'disp': self.disp, + 'return_all': False} + res = optimize.minimize(self.func, self.startparams, args=(), + method='Powell', options=opts) + params, fopt, direc, numiter, func_calls, warnflag = ( + res['x'], res['fun'], res['direc'], res['nit'], + res['nfev'], res['status']) + else: + retval = optimize.fmin_powell(self.func, self.startparams, + args=(), maxiter=self.maxiter, + full_output=True, disp=self.disp, + retall=False) + (params, fopt, direc, numiter, func_calls, warnflag) = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + # Ensure that function call counts are 'known good'; these are from + # Scipy 0.7.0. Don't allow them to increase. + # + # However, some leeway must be added: the exact evaluation + # count is sensitive to numerical error, and floating-point + # computations are not bit-for-bit reproducible across + # machines, and when using e.g. MKL, data alignment + # etc. affect the rounding error. + # + assert_(self.funccalls <= 116 + 20, self.funccalls) + assert_(self.gradcalls == 0, self.gradcalls) + + # Ensure that the function behaves the same; this is from Scipy 0.7.0 + assert_allclose(self.trace[34:39], + [[0.72949016, -0.44156936, 0.47100962], + [0.72949016, -0.44156936, 0.48052496], + [1.45898031, -0.88313872, 0.95153458], + [0.72949016, -0.44156936, 0.47576729], + [1.72949016, -0.44156936, 0.47576729]], + atol=1e-14, rtol=1e-7) + + def test_neldermead(self): + # Nelder-Mead simplex algorithm + if self.use_wrapper: + opts = {'maxiter': self.maxiter, 'disp': self.disp, + 'return_all': False} + res = optimize.minimize(self.func, self.startparams, args=(), + method='Nelder-mead', options=opts) + params, fopt, numiter, func_calls, warnflag, final_simplex = ( + res['x'], res['fun'], res['nit'], res['nfev'], + res['status'], res['final_simplex']) + else: + retval = optimize.fmin(self.func, self.startparams, + args=(), maxiter=self.maxiter, + full_output=True, disp=self.disp, + retall=False) + (params, fopt, numiter, func_calls, warnflag) = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + # Ensure that function call counts are 'known good'; these are from + # Scipy 0.7.0. Don't allow them to increase. + assert_(self.funccalls == 167, self.funccalls) + assert_(self.gradcalls == 0, self.gradcalls) + + # Ensure that the function behaves the same; this is from Scipy 0.7.0 + assert_allclose(self.trace[76:78], + [[0.1928968, -0.62780447, 0.35166118], + [0.19572515, -0.63648426, 0.35838135]], + atol=1e-14, rtol=1e-7) + + def test_neldermead_initial_simplex(self): + # Nelder-Mead simplex algorithm + simplex = np.zeros((4, 3)) + simplex[...] = self.startparams + for j in range(3): + simplex[j+1,j] += 0.1 + + if self.use_wrapper: + opts = {'maxiter': self.maxiter, 'disp': False, + 'return_all': True, 'initial_simplex': simplex} + res = optimize.minimize(self.func, self.startparams, args=(), + method='Nelder-mead', options=opts) + params, fopt, numiter, func_calls, warnflag = \ + res['x'], res['fun'], res['nit'], res['nfev'], \ + res['status'] + assert_allclose(res['allvecs'][0], simplex[0]) + else: + retval = optimize.fmin(self.func, self.startparams, + args=(), maxiter=self.maxiter, + full_output=True, disp=False, retall=False, + initial_simplex=simplex) + + (params, fopt, numiter, func_calls, warnflag) = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + # Ensure that function call counts are 'known good'; these are from + # Scipy 0.17.0. Don't allow them to increase. + assert_(self.funccalls == 100, self.funccalls) + assert_(self.gradcalls == 0, self.gradcalls) + + # Ensure that the function behaves the same; this is from Scipy 0.15.0 + assert_allclose(self.trace[50:52], + [[0.14687474, -0.5103282, 0.48252111], + [0.14474003, -0.5282084, 0.48743951]], + atol=1e-14, rtol=1e-7) + + def test_neldermead_initial_simplex_bad(self): + # Check it fails with a bad simplices + bad_simplices = [] + + simplex = np.zeros((3, 2)) + simplex[...] = self.startparams[:2] + for j in range(2): + simplex[j+1,j] += 0.1 + bad_simplices.append(simplex) + + simplex = np.zeros((3, 3)) + bad_simplices.append(simplex) + + for simplex in bad_simplices: + if self.use_wrapper: + opts = {'maxiter': self.maxiter, 'disp': False, + 'return_all': False, 'initial_simplex': simplex} + assert_raises(ValueError, + optimize.minimize, self.func, self.startparams, args=(), + method='Nelder-mead', options=opts) + else: + assert_raises(ValueError, optimize.fmin, self.func, self.startparams, + args=(), maxiter=self.maxiter, + full_output=True, disp=False, retall=False, + initial_simplex=simplex) + + def test_ncg_negative_maxiter(self): + # Regression test for gh-8241 + opts = {'maxiter': -1} + result = optimize.minimize(self.func, self.startparams, + method='Newton-CG', jac=self.grad, + args=(), options=opts) + assert_(result.status == 1) + + def test_ncg(self): + # line-search Newton conjugate gradient optimization routine + if self.use_wrapper: + opts = {'maxiter': self.maxiter, 'disp': self.disp, + 'return_all': False} + retval = optimize.minimize(self.func, self.startparams, + method='Newton-CG', jac=self.grad, + args=(), options=opts)['x'] + else: + retval = optimize.fmin_ncg(self.func, self.startparams, self.grad, + args=(), maxiter=self.maxiter, + full_output=False, disp=self.disp, + retall=False) + + params = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + # Ensure that function call counts are 'known good'; these are from + # Scipy 0.7.0. Don't allow them to increase. + assert_(self.funccalls == 7, self.funccalls) + assert_(self.gradcalls <= 22, self.gradcalls) # 0.13.0 + #assert_(self.gradcalls <= 18, self.gradcalls) # 0.9.0 + #assert_(self.gradcalls == 18, self.gradcalls) # 0.8.0 + #assert_(self.gradcalls == 22, self.gradcalls) # 0.7.0 + + # Ensure that the function behaves the same; this is from Scipy 0.7.0 + assert_allclose(self.trace[3:5], + [[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01], + [-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]], + atol=1e-6, rtol=1e-7) + + def test_ncg_hess(self): + # Newton conjugate gradient with Hessian + if self.use_wrapper: + opts = {'maxiter': self.maxiter, 'disp': self.disp, + 'return_all': False} + retval = optimize.minimize(self.func, self.startparams, + method='Newton-CG', jac=self.grad, + hess=self.hess, + args=(), options=opts)['x'] + else: + retval = optimize.fmin_ncg(self.func, self.startparams, self.grad, + fhess=self.hess, + args=(), maxiter=self.maxiter, + full_output=False, disp=self.disp, + retall=False) + + params = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + # Ensure that function call counts are 'known good'; these are from + # Scipy 0.7.0. Don't allow them to increase. + assert_(self.funccalls == 7, self.funccalls) + assert_(self.gradcalls <= 18, self.gradcalls) # 0.9.0 + # assert_(self.gradcalls == 18, self.gradcalls) # 0.8.0 + # assert_(self.gradcalls == 22, self.gradcalls) # 0.7.0 + + # Ensure that the function behaves the same; this is from Scipy 0.7.0 + assert_allclose(self.trace[3:5], + [[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01], + [-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]], + atol=1e-6, rtol=1e-7) + + def test_ncg_hessp(self): + # Newton conjugate gradient with Hessian times a vector p. + if self.use_wrapper: + opts = {'maxiter': self.maxiter, 'disp': self.disp, + 'return_all': False} + retval = optimize.minimize(self.func, self.startparams, + method='Newton-CG', jac=self.grad, + hessp=self.hessp, + args=(), options=opts)['x'] + else: + retval = optimize.fmin_ncg(self.func, self.startparams, self.grad, + fhess_p=self.hessp, + args=(), maxiter=self.maxiter, + full_output=False, disp=self.disp, + retall=False) + + params = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + # Ensure that function call counts are 'known good'; these are from + # Scipy 0.7.0. Don't allow them to increase. + assert_(self.funccalls == 7, self.funccalls) + assert_(self.gradcalls <= 18, self.gradcalls) # 0.9.0 + # assert_(self.gradcalls == 18, self.gradcalls) # 0.8.0 + # assert_(self.gradcalls == 22, self.gradcalls) # 0.7.0 + + # Ensure that the function behaves the same; this is from Scipy 0.7.0 + assert_allclose(self.trace[3:5], + [[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01], + [-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]], + atol=1e-6, rtol=1e-7) + + +def test_neldermead_xatol_fatol(): + # gh4484 + # test we can call with fatol, xatol specified + func = lambda x: x[0]**2 + x[1]**2 + + optimize._minimize._minimize_neldermead(func, [1, 1], maxiter=2, + xatol=1e-3, fatol=1e-3) + assert_warns(DeprecationWarning, + optimize._minimize._minimize_neldermead, + func, [1, 1], xtol=1e-3, ftol=1e-3, maxiter=2) + +def test_neldermead_adaptive(): + func = lambda x: np.sum(x**2) + p0 = [0.15746215, 0.48087031, 0.44519198, 0.4223638, 0.61505159, 0.32308456, + 0.9692297, 0.4471682, 0.77411992, 0.80441652, 0.35994957, 0.75487856, + 0.99973421, 0.65063887, 0.09626474] + + res = optimize.minimize(func, p0, method='Nelder-Mead') + assert_equal(res.success, False) + + res = optimize.minimize(func, p0, method='Nelder-Mead', + options={'adaptive':True}) + assert_equal(res.success, True) + +class TestOptimizeWrapperDisp(CheckOptimizeParameterized): + use_wrapper = True + disp = True + + +class TestOptimizeWrapperNoDisp(CheckOptimizeParameterized): + use_wrapper = True + disp = False + + +class TestOptimizeNoWrapperDisp(CheckOptimizeParameterized): + use_wrapper = False + disp = True + + +class TestOptimizeNoWrapperNoDisp(CheckOptimizeParameterized): + use_wrapper = False + disp = False + + +class TestOptimizeSimple(CheckOptimize): + + def test_bfgs_nan(self): + # Test corner case where nan is fed to optimizer. See gh-2067. + func = lambda x: x + fprime = lambda x: np.ones_like(x) + x0 = [np.nan] + with np.errstate(over='ignore', invalid='ignore'): + x = optimize.fmin_bfgs(func, x0, fprime, disp=False) + assert_(np.isnan(func(x))) + + def test_bfgs_nan_return(self): + # Test corner cases where fun returns NaN. See gh-4793. + + # First case: NaN from first call. + func = lambda x: np.nan + with np.errstate(invalid='ignore'): + result = optimize.minimize(func, 0) + + assert_(np.isnan(result['fun'])) + assert_(result['success'] is False) + + # Second case: NaN from second call. + func = lambda x: 0 if x == 0 else np.nan + fprime = lambda x: np.ones_like(x) # Steer away from zero. + with np.errstate(invalid='ignore'): + result = optimize.minimize(func, 0, jac=fprime) + + assert_(np.isnan(result['fun'])) + assert_(result['success'] is False) + + def test_bfgs_numerical_jacobian(self): + # BFGS with numerical jacobian and a vector epsilon parameter. + # define the epsilon parameter using a random vector + epsilon = np.sqrt(np.finfo(float).eps) * np.random.rand(len(self.solution)) + + params = optimize.fmin_bfgs(self.func, self.startparams, + epsilon=epsilon, args=(), + maxiter=self.maxiter, disp=False) + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + def test_bfgs_gh_2169(self): + def f(x): + if x < 0: + return 1.79769313e+308 + else: + return x + 1./x + xs = optimize.fmin_bfgs(f, [10.], disp=False) + assert_allclose(xs, 1.0, rtol=1e-4, atol=1e-4) + + def test_l_bfgs_b(self): + # limited-memory bound-constrained BFGS algorithm + retval = optimize.fmin_l_bfgs_b(self.func, self.startparams, + self.grad, args=(), + maxiter=self.maxiter) + + (params, fopt, d) = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + # Ensure that function call counts are 'known good'; these are from + # Scipy 0.7.0. Don't allow them to increase. + assert_(self.funccalls == 7, self.funccalls) + assert_(self.gradcalls == 5, self.gradcalls) + + # Ensure that the function behaves the same; this is from Scipy 0.7.0 + assert_allclose(self.trace[3:5], + [[0., -0.52489628, 0.48753042], + [0., -0.52489628, 0.48753042]], + atol=1e-14, rtol=1e-7) + + def test_l_bfgs_b_numjac(self): + # L-BFGS-B with numerical jacobian + retval = optimize.fmin_l_bfgs_b(self.func, self.startparams, + approx_grad=True, + maxiter=self.maxiter) + + (params, fopt, d) = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + def test_l_bfgs_b_funjac(self): + # L-BFGS-B with combined objective function and jacobian + def fun(x): + return self.func(x), self.grad(x) + + retval = optimize.fmin_l_bfgs_b(fun, self.startparams, + maxiter=self.maxiter) + + (params, fopt, d) = retval + + assert_allclose(self.func(params), self.func(self.solution), + atol=1e-6) + + def test_l_bfgs_b_maxiter(self): + # gh7854 + # Ensure that not more than maxiters are ever run. + class Callback(object): + def __init__(self): + self.nit = 0 + self.fun = None + self.x = None + + def __call__(self, x): + self.x = x + self.fun = optimize.rosen(x) + self.nit += 1 + + c = Callback() + res = optimize.minimize(optimize.rosen, [0., 0.], method='l-bfgs-b', + callback=c, options={'maxiter': 5}) + + assert_equal(res.nit, 5) + assert_almost_equal(res.x, c.x) + assert_almost_equal(res.fun, c.fun) + assert_equal(res.status, 1) + assert_(res.success is False) + assert_equal(res.message.decode(), 'STOP: TOTAL NO. of ITERATIONS REACHED LIMIT') + + def test_minimize_l_bfgs_b(self): + # Minimize with L-BFGS-B method + opts = {'disp': False, 'maxiter': self.maxiter} + r = optimize.minimize(self.func, self.startparams, + method='L-BFGS-B', jac=self.grad, + options=opts) + assert_allclose(self.func(r.x), self.func(self.solution), + atol=1e-6) + # approximate jacobian + ra = optimize.minimize(self.func, self.startparams, + method='L-BFGS-B', options=opts) + assert_allclose(self.func(ra.x), self.func(self.solution), + atol=1e-6) + # check that function evaluations in approximate jacobian are counted + assert_(ra.nfev > r.nfev) + + def test_minimize_l_bfgs_b_ftol(self): + # Check that the `ftol` parameter in l_bfgs_b works as expected + v0 = None + for tol in [1e-1, 1e-4, 1e-7, 1e-10]: + opts = {'disp': False, 'maxiter': self.maxiter, 'ftol': tol} + sol = optimize.minimize(self.func, self.startparams, + method='L-BFGS-B', jac=self.grad, + options=opts) + v = self.func(sol.x) + + if v0 is None: + v0 = v + else: + assert_(v < v0) + + assert_allclose(v, self.func(self.solution), rtol=tol) + + def test_minimize_l_bfgs_maxls(self): + # check that the maxls is passed down to the Fortran routine + sol = optimize.minimize(optimize.rosen, np.array([-1.2,1.0]), + method='L-BFGS-B', jac=optimize.rosen_der, + options={'disp': False, 'maxls': 1}) + assert_(not sol.success) + + def test_minimize_l_bfgs_b_maxfun_interruption(self): + # gh-6162 + f = optimize.rosen + g = optimize.rosen_der + values = [] + x0 = np.ones(7) * 1000 + + def objfun(x): + value = f(x) + values.append(value) + return value + + # Look for an interesting test case. + # Request a maxfun that stops at a particularly bad function + # evaluation somewhere between 100 and 300 evaluations. + low, medium, high = 30, 100, 300 + optimize.fmin_l_bfgs_b(objfun, x0, fprime=g, maxfun=high) + v, k = max((y, i) for i, y in enumerate(values[medium:])) + maxfun = medium + k + # If the minimization strategy is reasonable, + # the minimize() result should not be worse than the best + # of the first 30 function evaluations. + target = min(values[:low]) + xmin, fmin, d = optimize.fmin_l_bfgs_b(f, x0, fprime=g, maxfun=maxfun) + assert_array_less(fmin, target) + + def test_custom(self): + # This function comes from the documentation example. + def custmin(fun, x0, args=(), maxfev=None, stepsize=0.1, + maxiter=100, callback=None, **options): + bestx = x0 + besty = fun(x0) + funcalls = 1 + niter = 0 + improved = True + stop = False + + while improved and not stop and niter < maxiter: + improved = False + niter += 1 + for dim in range(np.size(x0)): + for s in [bestx[dim] - stepsize, bestx[dim] + stepsize]: + testx = np.copy(bestx) + testx[dim] = s + testy = fun(testx, *args) + funcalls += 1 + if testy < besty: + besty = testy + bestx = testx + improved = True + if callback is not None: + callback(bestx) + if maxfev is not None and funcalls >= maxfev: + stop = True + break + + return optimize.OptimizeResult(fun=besty, x=bestx, nit=niter, + nfev=funcalls, success=(niter > 1)) + + x0 = [1.35, 0.9, 0.8, 1.1, 1.2] + res = optimize.minimize(optimize.rosen, x0, method=custmin, + options=dict(stepsize=0.05)) + assert_allclose(res.x, 1.0, rtol=1e-4, atol=1e-4) + + def test_minimize_tol_parameter(self): + # Check that the minimize() tol= argument does something + def func(z): + x, y = z + return x**2*y**2 + x**4 + 1 + + def dfunc(z): + x, y = z + return np.array([2*x*y**2 + 4*x**3, 2*x**2*y]) + + for method in ['nelder-mead', 'powell', 'cg', 'bfgs', + 'newton-cg', 'l-bfgs-b', 'tnc', + 'cobyla', 'slsqp']: + if method in ('nelder-mead', 'powell', 'cobyla'): + jac = None + else: + jac = dfunc + + sol1 = optimize.minimize(func, [1, 1], jac=jac, tol=1e-10, + method=method) + sol2 = optimize.minimize(func, [1, 1], jac=jac, tol=1.0, + method=method) + assert_(func(sol1.x) < func(sol2.x), + "%s: %s vs. %s" % (method, func(sol1.x), func(sol2.x))) + + @pytest.mark.parametrize('method', ['fmin', 'fmin_powell', 'fmin_cg', 'fmin_bfgs', + 'fmin_ncg', 'fmin_l_bfgs_b', 'fmin_tnc', + 'fmin_slsqp', + 'Nelder-Mead', 'Powell', 'CG', 'BFGS', 'Newton-CG', 'L-BFGS-B', + 'TNC', 'SLSQP', 'trust-constr', 'dogleg', 'trust-ncg', + 'trust-exact', 'trust-krylov']) + def test_minimize_callback_copies_array(self, method): + # Check that arrays passed to callbacks are not modified + # inplace by the optimizer afterward + + if method in ('fmin_tnc', 'fmin_l_bfgs_b'): + func = lambda x: (optimize.rosen(x), optimize.rosen_der(x)) + else: + func = optimize.rosen + jac = optimize.rosen_der + hess = optimize.rosen_hess + + x0 = np.zeros(10) + + # Set options + kwargs = {} + if method.startswith('fmin'): + routine = getattr(optimize, method) + if method == 'fmin_slsqp': + kwargs['iter'] = 5 + elif method == 'fmin_tnc': + kwargs['maxfun'] = 100 + else: + kwargs['maxiter'] = 5 + else: + def routine(*a, **kw): + kw['method'] = method + return optimize.minimize(*a, **kw) + + if method == 'TNC': + kwargs['options'] = dict(maxiter=100) + else: + kwargs['options'] = dict(maxiter=5) + + if method in ('fmin_ncg',): + kwargs['fprime'] = jac + elif method in ('Newton-CG',): + kwargs['jac'] = jac + elif method in ('trust-krylov', 'trust-exact', 'trust-ncg', 'dogleg', + 'trust-constr'): + kwargs['jac'] = jac + kwargs['hess'] = hess + + # Run with callback + results = [] + + def callback(x, *args, **kwargs): + results.append((x, np.copy(x))) + + sol = routine(func, x0, callback=callback, **kwargs) + + # Check returned arrays coincide with their copies and have no memory overlap + assert_(len(results) > 2) + assert_(all(np.all(x == y) for x, y in results)) + assert_(not any(np.may_share_memory(x[0], y[0]) for x, y in itertools.combinations(results, 2))) + + @pytest.mark.parametrize('method', ['nelder-mead', 'powell', 'cg', 'bfgs', 'newton-cg', + 'l-bfgs-b', 'tnc', 'cobyla', 'slsqp']) + def test_no_increase(self, method): + # Check that the solver doesn't return a value worse than the + # initial point. + + def func(x): + return (x - 1)**2 + + def bad_grad(x): + # purposefully invalid gradient function, simulates a case + # where line searches start failing + return 2*(x - 1) * (-1) - 2 + + x0 = np.array([2.0]) + f0 = func(x0) + jac = bad_grad + if method in ['nelder-mead', 'powell', 'cobyla']: + jac = None + sol = optimize.minimize(func, x0, jac=jac, method=method, + options=dict(maxiter=20)) + assert_equal(func(sol.x), sol.fun) + + if method == 'slsqp': + pytest.xfail("SLSQP returns slightly worse") + assert_(func(sol.x) <= f0) + + def test_slsqp_respect_bounds(self): + # Regression test for gh-3108 + def f(x): + return sum((x - np.array([1., 2., 3., 4.]))**2) + + def cons(x): + a = np.array([[-1, -1, -1, -1], [-3, -3, -2, -1]]) + return np.concatenate([np.dot(a, x) + np.array([5, 10]), x]) + + x0 = np.array([0.5, 1., 1.5, 2.]) + res = optimize.minimize(f, x0, method='slsqp', + constraints={'type': 'ineq', 'fun': cons}) + assert_allclose(res.x, np.array([0., 2, 5, 8])/3, atol=1e-12) + + def test_minimize_automethod(self): + def f(x): + return x**2 + + def cons(x): + return x - 2 + + x0 = np.array([10.]) + sol_0 = optimize.minimize(f, x0) + sol_1 = optimize.minimize(f, x0, constraints=[{'type': 'ineq', 'fun': cons}]) + sol_2 = optimize.minimize(f, x0, bounds=[(5, 10)]) + sol_3 = optimize.minimize(f, x0, constraints=[{'type': 'ineq', 'fun': cons}], bounds=[(5, 10)]) + sol_4 = optimize.minimize(f, x0, constraints=[{'type': 'ineq', 'fun': cons}], bounds=[(1, 10)]) + for sol in [sol_0, sol_1, sol_2, sol_3, sol_4]: + assert_(sol.success) + assert_allclose(sol_0.x, 0, atol=1e-7) + assert_allclose(sol_1.x, 2, atol=1e-7) + assert_allclose(sol_2.x, 5, atol=1e-7) + assert_allclose(sol_3.x, 5, atol=1e-7) + assert_allclose(sol_4.x, 2, atol=1e-7) + + def test_minimize_coerce_args_param(self): + # Regression test for gh-3503 + def Y(x, c): + return np.sum((x-c)**2) + + def dY_dx(x, c=None): + return 2*(x-c) + + c = np.array([3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5]) + xinit = np.random.randn(len(c)) + optimize.minimize(Y, xinit, jac=dY_dx, args=(c), method="BFGS") + + def test_initial_step_scaling(self): + # Check that optimizer initial step is not huge even if the + # function and gradients are + + scales = [1e-50, 1, 1e50] + methods = ['CG', 'BFGS', 'L-BFGS-B', 'Newton-CG'] + + def f(x): + if first_step_size[0] is None and x[0] != x0[0]: + first_step_size[0] = abs(x[0] - x0[0]) + if abs(x).max() > 1e4: + raise AssertionError("Optimization stepped far away!") + return scale*(x[0] - 1)**2 + + def g(x): + return np.array([scale*(x[0] - 1)]) + + for scale, method in itertools.product(scales, methods): + if method in ('CG', 'BFGS'): + options = dict(gtol=scale*1e-8) + else: + options = dict() + + if scale < 1e-10 and method in ('L-BFGS-B', 'Newton-CG'): + # XXX: return initial point if they see small gradient + continue + + x0 = [-1.0] + first_step_size = [None] + res = optimize.minimize(f, x0, jac=g, method=method, + options=options) + + err_msg = "{0} {1}: {2}: {3}".format(method, scale, first_step_size, + res) + + assert_(res.success, err_msg) + assert_allclose(res.x, [1.0], err_msg=err_msg) + assert_(res.nit <= 3, err_msg) + + if scale > 1e-10: + if method in ('CG', 'BFGS'): + assert_allclose(first_step_size[0], 1.01, err_msg=err_msg) + else: + # Newton-CG and L-BFGS-B use different logic for the first step, + # but are both scaling invariant with step sizes ~ 1 + assert_(first_step_size[0] > 0.5 and first_step_size[0] < 3, + err_msg) + else: + # step size has upper bound of ||grad||, so line + # search makes many small steps + pass + + +class TestLBFGSBBounds(object): + def setup_method(self): + self.bounds = ((1, None), (None, None)) + self.solution = (1, 0) + + def fun(self, x, p=2.0): + return 1.0 / p * (x[0]**p + x[1]**p) + + def jac(self, x, p=2.0): + return x**(p - 1) + + def fj(self, x, p=2.0): + return self.fun(x, p), self.jac(x, p) + + def test_l_bfgs_b_bounds(self): + x, f, d = optimize.fmin_l_bfgs_b(self.fun, [0, -1], + fprime=self.jac, + bounds=self.bounds) + assert_(d['warnflag'] == 0, d['task']) + assert_allclose(x, self.solution, atol=1e-6) + + def test_l_bfgs_b_funjac(self): + # L-BFGS-B with fun and jac combined and extra arguments + x, f, d = optimize.fmin_l_bfgs_b(self.fj, [0, -1], args=(2.0, ), + bounds=self.bounds) + assert_(d['warnflag'] == 0, d['task']) + assert_allclose(x, self.solution, atol=1e-6) + + def test_minimize_l_bfgs_b_bounds(self): + # Minimize with method='L-BFGS-B' with bounds + res = optimize.minimize(self.fun, [0, -1], method='L-BFGS-B', + jac=self.jac, bounds=self.bounds) + assert_(res['success'], res['message']) + assert_allclose(res.x, self.solution, atol=1e-6) + + +class TestOptimizeScalar(object): + def setup_method(self): + self.solution = 1.5 + + def fun(self, x, a=1.5): + """Objective function""" + return (x - a)**2 - 0.8 + + def test_brent(self): + x = optimize.brent(self.fun) + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.brent(self.fun, brack=(-3, -2)) + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.brent(self.fun, full_output=True) + assert_allclose(x[0], self.solution, atol=1e-6) + + x = optimize.brent(self.fun, brack=(-15, -1, 15)) + assert_allclose(x, self.solution, atol=1e-6) + + def test_golden(self): + x = optimize.golden(self.fun) + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.golden(self.fun, brack=(-3, -2)) + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.golden(self.fun, full_output=True) + assert_allclose(x[0], self.solution, atol=1e-6) + + x = optimize.golden(self.fun, brack=(-15, -1, 15)) + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.golden(self.fun, tol=0) + assert_allclose(x, self.solution) + + maxiter_test_cases = [0, 1, 5] + for maxiter in maxiter_test_cases: + x0 = optimize.golden(self.fun, maxiter=0, full_output=True) + x = optimize.golden(self.fun, maxiter=maxiter, full_output=True) + nfev0, nfev = x0[2], x[2] + assert_equal(nfev - nfev0, maxiter) + + def test_fminbound(self): + x = optimize.fminbound(self.fun, 0, 1) + assert_allclose(x, 1, atol=1e-4) + + x = optimize.fminbound(self.fun, 1, 5) + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.fminbound(self.fun, np.array([1]), np.array([5])) + assert_allclose(x, self.solution, atol=1e-6) + assert_raises(ValueError, optimize.fminbound, self.fun, 5, 1) + + def test_fminbound_scalar(self): + with pytest.raises(ValueError, match='.*must be scalar.*'): + optimize.fminbound(self.fun, np.zeros((1, 2)), 1) + + x = optimize.fminbound(self.fun, 1, np.array(5)) + assert_allclose(x, self.solution, atol=1e-6) + + def test_minimize_scalar(self): + # combine all tests above for the minimize_scalar wrapper + x = optimize.minimize_scalar(self.fun).x + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.minimize_scalar(self.fun, method='Brent') + assert_(x.success) + + x = optimize.minimize_scalar(self.fun, method='Brent', + options=dict(maxiter=3)) + assert_(not x.success) + + x = optimize.minimize_scalar(self.fun, bracket=(-3, -2), + args=(1.5, ), method='Brent').x + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.minimize_scalar(self.fun, method='Brent', + args=(1.5,)).x + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.minimize_scalar(self.fun, bracket=(-15, -1, 15), + args=(1.5, ), method='Brent').x + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.minimize_scalar(self.fun, bracket=(-3, -2), + args=(1.5, ), method='golden').x + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.minimize_scalar(self.fun, method='golden', + args=(1.5,)).x + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.minimize_scalar(self.fun, bracket=(-15, -1, 15), + args=(1.5, ), method='golden').x + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.minimize_scalar(self.fun, bounds=(0, 1), args=(1.5,), + method='Bounded').x + assert_allclose(x, 1, atol=1e-4) + + x = optimize.minimize_scalar(self.fun, bounds=(1, 5), args=(1.5, ), + method='bounded').x + assert_allclose(x, self.solution, atol=1e-6) + + x = optimize.minimize_scalar(self.fun, bounds=(np.array([1]), + np.array([5])), + args=(np.array([1.5]), ), + method='bounded').x + assert_allclose(x, self.solution, atol=1e-6) + + assert_raises(ValueError, optimize.minimize_scalar, self.fun, + bounds=(5, 1), method='bounded', args=(1.5, )) + + assert_raises(ValueError, optimize.minimize_scalar, self.fun, + bounds=(np.zeros(2), 1), method='bounded', args=(1.5, )) + + x = optimize.minimize_scalar(self.fun, bounds=(1, np.array(5)), + method='bounded').x + assert_allclose(x, self.solution, atol=1e-6) + + def test_minimize_scalar_custom(self): + # This function comes from the documentation example. + def custmin(fun, bracket, args=(), maxfev=None, stepsize=0.1, + maxiter=100, callback=None, **options): + bestx = (bracket[1] + bracket[0]) / 2.0 + besty = fun(bestx) + funcalls = 1 + niter = 0 + improved = True + stop = False + + while improved and not stop and niter < maxiter: + improved = False + niter += 1 + for testx in [bestx - stepsize, bestx + stepsize]: + testy = fun(testx, *args) + funcalls += 1 + if testy < besty: + besty = testy + bestx = testx + improved = True + if callback is not None: + callback(bestx) + if maxfev is not None and funcalls >= maxfev: + stop = True + break + + return optimize.OptimizeResult(fun=besty, x=bestx, nit=niter, + nfev=funcalls, success=(niter > 1)) + + res = optimize.minimize_scalar(self.fun, bracket=(0, 4), method=custmin, + options=dict(stepsize=0.05)) + assert_allclose(res.x, self.solution, atol=1e-6) + + def test_minimize_scalar_coerce_args_param(self): + # Regression test for gh-3503 + optimize.minimize_scalar(self.fun, args=1.5) + + +def test_brent_negative_tolerance(): + assert_raises(ValueError, optimize.brent, np.cos, tol=-.01) + + +class TestNewtonCg(object): + def test_rosenbrock(self): + x0 = np.array([-1.2, 1.0]) + sol = optimize.minimize(optimize.rosen, x0, + jac=optimize.rosen_der, + hess=optimize.rosen_hess, + tol=1e-5, + method='Newton-CG') + assert_(sol.success, sol.message) + assert_allclose(sol.x, np.array([1, 1]), rtol=1e-4) + + def test_himmelblau(self): + x0 = np.array(himmelblau_x0) + sol = optimize.minimize(himmelblau, + x0, + jac=himmelblau_grad, + hess=himmelblau_hess, + method='Newton-CG', + tol=1e-6) + assert_(sol.success, sol.message) + assert_allclose(sol.x, himmelblau_xopt, rtol=1e-4) + assert_allclose(sol.fun, himmelblau_min, atol=1e-4) + + +class TestRosen(object): + + def test_hess(self): + # Compare rosen_hess(x) times p with rosen_hess_prod(x,p). See gh-1775 + x = np.array([3, 4, 5]) + p = np.array([2, 2, 2]) + hp = optimize.rosen_hess_prod(x, p) + dothp = np.dot(optimize.rosen_hess(x), p) + assert_equal(hp, dothp) + + +def himmelblau(p): + """ + R^2 -> R^1 test function for optimization. The function has four local + minima where himmelblau(xopt) == 0. + """ + x, y = p + a = x*x + y - 11 + b = x + y*y - 7 + return a*a + b*b + + +def himmelblau_grad(p): + x, y = p + return np.array([4*x**3 + 4*x*y - 42*x + 2*y**2 - 14, + 2*x**2 + 4*x*y + 4*y**3 - 26*y - 22]) + + +def himmelblau_hess(p): + x, y = p + return np.array([[12*x**2 + 4*y - 42, 4*x + 4*y], + [4*x + 4*y, 4*x + 12*y**2 - 26]]) + + +himmelblau_x0 = [-0.27, -0.9] +himmelblau_xopt = [3, 2] +himmelblau_min = 0.0 + + +def test_minimize_multiple_constraints(): + # Regression test for gh-4240. + def func(x): + return np.array([25 - 0.2 * x[0] - 0.4 * x[1] - 0.33 * x[2]]) + + def func1(x): + return np.array([x[1]]) + + def func2(x): + return np.array([x[2]]) + + cons = ({'type': 'ineq', 'fun': func}, + {'type': 'ineq', 'fun': func1}, + {'type': 'ineq', 'fun': func2}) + + f = lambda x: -1 * (x[0] + x[1] + x[2]) + + res = optimize.minimize(f, [0, 0, 0], method='SLSQP', constraints=cons) + assert_allclose(res.x, [125, 0, 0], atol=1e-10) + + +class TestOptimizeResultAttributes(object): + # Test that all minimizers return an OptimizeResult containing + # all the OptimizeResult attributes + def setup_method(self): + self.x0 = [5, 5] + self.func = optimize.rosen + self.jac = optimize.rosen_der + self.hess = optimize.rosen_hess + self.hessp = optimize.rosen_hess_prod + self.bounds = [(0., 10.), (0., 10.)] + + def test_attributes_present(self): + methods = ['Nelder-Mead', 'Powell', 'CG', 'BFGS', 'Newton-CG', + 'L-BFGS-B', 'TNC', 'COBYLA', 'SLSQP', 'dogleg', + 'trust-ncg'] + attributes = ['nit', 'nfev', 'x', 'success', 'status', 'fun', + 'message'] + skip = {'COBYLA': ['nit']} + for method in methods: + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, + "Method .+ does not use (gradient|Hessian.*) information") + res = optimize.minimize(self.func, self.x0, method=method, + jac=self.jac, hess=self.hess, + hessp=self.hessp) + for attribute in attributes: + if method in skip and attribute in skip[method]: + continue + + assert_(hasattr(res, attribute)) + assert_(attribute in dir(res)) + + +class TestBrute: + # Test the "brute force" method + def setup_method(self): + self.params = (2, 3, 7, 8, 9, 10, 44, -1, 2, 26, 1, -2, 0.5) + self.rranges = (slice(-4, 4, 0.25), slice(-4, 4, 0.25)) + self.solution = np.array([-1.05665192, 1.80834843]) + + def f1(self, z, *params): + x, y = z + a, b, c, d, e, f, g, h, i, j, k, l, scale = params + return (a * x**2 + b * x * y + c * y**2 + d*x + e*y + f) + + def f2(self, z, *params): + x, y = z + a, b, c, d, e, f, g, h, i, j, k, l, scale = params + return (-g*np.exp(-((x-h)**2 + (y-i)**2) / scale)) + + def f3(self, z, *params): + x, y = z + a, b, c, d, e, f, g, h, i, j, k, l, scale = params + return (-j*np.exp(-((x-k)**2 + (y-l)**2) / scale)) + + def func(self, z, *params): + return self.f1(z, *params) + self.f2(z, *params) + self.f3(z, *params) + + def test_brute(self): + # test fmin + resbrute = optimize.brute(self.func, self.rranges, args=self.params, + full_output=True, finish=optimize.fmin) + assert_allclose(resbrute[0], self.solution, atol=1e-3) + assert_allclose(resbrute[1], self.func(self.solution, *self.params), + atol=1e-3) + + # test minimize + resbrute = optimize.brute(self.func, self.rranges, args=self.params, + full_output=True, + finish=optimize.minimize) + assert_allclose(resbrute[0], self.solution, atol=1e-3) + assert_allclose(resbrute[1], self.func(self.solution, *self.params), + atol=1e-3) + + def test_1D(self): + # test that for a 1D problem the test function is passed an array, + # not a scalar. + def f(x): + assert_(len(x.shape) == 1) + assert_(x.shape[0] == 1) + return x ** 2 + + optimize.brute(f, [(-1, 1)], Ns=3, finish=None) + + +class TestIterationLimits(object): + # Tests that optimisation does not give up before trying requested + # number of iterations or evaluations. And that it does not succeed + # by exceeding the limits. + def setup_method(self): + self.funcalls = 0 + + def slow_func(self, v): + self.funcalls += 1 + r,t = np.sqrt(v[0]**2+v[1]**2), np.arctan2(v[0],v[1]) + return np.sin(r*20 + t)+r*0.5 + + def test_neldermead_limit(self): + self.check_limits("Nelder-Mead", 200) + + def test_powell_limit(self): + self.check_limits("powell", 1000) + + def check_limits(self, method, default_iters): + for start_v in [[0.1,0.1], [1,1], [2,2]]: + for mfev in [50, 500, 5000]: + self.funcalls = 0 + res = optimize.minimize(self.slow_func, start_v, + method=method, options={"maxfev":mfev}) + assert_(self.funcalls == res["nfev"]) + if res["success"]: + assert_(res["nfev"] < mfev) + else: + assert_(res["nfev"] >= mfev) + for mit in [50, 500,5000]: + res = optimize.minimize(self.slow_func, start_v, + method=method, options={"maxiter":mit}) + if res["success"]: + assert_(res["nit"] <= mit) + else: + assert_(res["nit"] >= mit) + for mfev,mit in [[50,50], [5000,5000],[5000,np.inf]]: + self.funcalls = 0 + res = optimize.minimize(self.slow_func, start_v, + method=method, options={"maxiter":mit, "maxfev":mfev}) + assert_(self.funcalls == res["nfev"]) + if res["success"]: + assert_(res["nfev"] < mfev and res["nit"] <= mit) + else: + assert_(res["nfev"] >= mfev or res["nit"] >= mit) + for mfev,mit in [[np.inf,None], [None,np.inf]]: + self.funcalls = 0 + res = optimize.minimize(self.slow_func, start_v, + method=method, options={"maxiter":mit, "maxfev":mfev}) + assert_(self.funcalls == res["nfev"]) + if res["success"]: + if mfev is None: + assert_(res["nfev"] < default_iters*2) + else: + assert_(res["nit"] <= default_iters*2) + else: + assert_(res["nfev"] >= default_iters*2 or + res["nit"] >= default_iters*2) diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_optimize.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_optimize.pyc new file mode 100644 index 0000000..dd5210f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_optimize.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_regression.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_regression.py new file mode 100644 index 0000000..0d82462 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_regression.py @@ -0,0 +1,42 @@ +"""Regression tests for optimize. + +""" +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import assert_almost_equal +from pytest import raises as assert_raises + +import scipy.optimize + + +class TestRegression(object): + + def test_newton_x0_is_0(self): + # Regression test for gh-1601 + tgt = 1 + res = scipy.optimize.newton(lambda x: x - 1, 0) + assert_almost_equal(res, tgt) + + def test_newton_integers(self): + # Regression test for gh-1741 + root = scipy.optimize.newton(lambda x: x**2 - 1, x0=2, + fprime=lambda x: 2*x) + assert_almost_equal(root, 1.0) + + def test_lmdif_errmsg(self): + # This shouldn't cause a crash on Python 3 + class SomeError(Exception): + pass + counter = [0] + + def func(x): + counter[0] += 1 + if counter[0] < 3: + return x**2 - np.array([9, 10, 11]) + else: + raise SomeError() + assert_raises(SomeError, + scipy.optimize.leastsq, + func, [1, 2, 3]) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_regression.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_regression.pyc new file mode 100644 index 0000000..5bdc0c6 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_regression.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_slsqp.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_slsqp.py new file mode 100644 index 0000000..2afbad5 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_slsqp.py @@ -0,0 +1,512 @@ +""" +Unit test for SLSQP optimization. +""" +from __future__ import division, print_function, absolute_import + +import pytest +from numpy.testing import (assert_, assert_array_almost_equal, + assert_allclose, assert_equal) +from pytest import raises as assert_raises +import numpy as np + +from scipy.optimize import fmin_slsqp, minimize, NonlinearConstraint, Bounds + + +class MyCallBack(object): + """pass a custom callback function + + This makes sure it's being used. + """ + def __init__(self): + self.been_called = False + self.ncalls = 0 + + def __call__(self, x): + self.been_called = True + self.ncalls += 1 + + +class TestSLSQP(object): + """ + Test SLSQP algorithm using Example 14.4 from Numerical Methods for + Engineers by Steven Chapra and Raymond Canale. + This example maximizes the function f(x) = 2*x*y + 2*x - x**2 - 2*y**2, + which has a maximum at x=2, y=1. + """ + def setup_method(self): + self.opts = {'disp': False} + + def fun(self, d, sign=1.0): + """ + Arguments: + d - A list of two elements, where d[0] represents x and d[1] represents y + in the following equation. + sign - A multiplier for f. Since we want to optimize it, and the scipy + optimizers can only minimize functions, we need to multiply it by + -1 to achieve the desired solution + Returns: + 2*x*y + 2*x - x**2 - 2*y**2 + + """ + x = d[0] + y = d[1] + return sign*(2*x*y + 2*x - x**2 - 2*y**2) + + def jac(self, d, sign=1.0): + """ + This is the derivative of fun, returning a numpy array + representing df/dx and df/dy. + + """ + x = d[0] + y = d[1] + dfdx = sign*(-2*x + 2*y + 2) + dfdy = sign*(2*x - 4*y) + return np.array([dfdx, dfdy], float) + + def fun_and_jac(self, d, sign=1.0): + return self.fun(d, sign), self.jac(d, sign) + + def f_eqcon(self, x, sign=1.0): + """ Equality constraint """ + return np.array([x[0] - x[1]]) + + def fprime_eqcon(self, x, sign=1.0): + """ Equality constraint, derivative """ + return np.array([[1, -1]]) + + def f_eqcon_scalar(self, x, sign=1.0): + """ Scalar equality constraint """ + return self.f_eqcon(x, sign)[0] + + def fprime_eqcon_scalar(self, x, sign=1.0): + """ Scalar equality constraint, derivative """ + return self.fprime_eqcon(x, sign)[0].tolist() + + def f_ieqcon(self, x, sign=1.0): + """ Inequality constraint """ + return np.array([x[0] - x[1] - 1.0]) + + def fprime_ieqcon(self, x, sign=1.0): + """ Inequality constraint, derivative """ + return np.array([[1, -1]]) + + def f_ieqcon2(self, x): + """ Vector inequality constraint """ + return np.asarray(x) + + def fprime_ieqcon2(self, x): + """ Vector inequality constraint, derivative """ + return np.identity(x.shape[0]) + + # minimize + def test_minimize_unbounded_approximated(self): + # Minimize, method='SLSQP': unbounded, approximated jacobian. + res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ), + method='SLSQP', options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [2, 1]) + + def test_minimize_unbounded_given(self): + # Minimize, method='SLSQP': unbounded, given jacobian. + res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ), + jac=self.jac, method='SLSQP', options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [2, 1]) + + def test_minimize_bounded_approximated(self): + # Minimize, method='SLSQP': bounded, approximated jacobian. + with np.errstate(invalid='ignore'): + res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ), + bounds=((2.5, None), (None, 0.5)), + method='SLSQP', options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [2.5, 0.5]) + assert_(2.5 <= res.x[0]) + assert_(res.x[1] <= 0.5) + + def test_minimize_unbounded_combined(self): + # Minimize, method='SLSQP': unbounded, combined function and jacobian. + res = minimize(self.fun_and_jac, [-1.0, 1.0], args=(-1.0, ), + jac=True, method='SLSQP', options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [2, 1]) + + def test_minimize_equality_approximated(self): + # Minimize with method='SLSQP': equality constraint, approx. jacobian. + res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ), + constraints={'type': 'eq', + 'fun': self.f_eqcon, + 'args': (-1.0, )}, + method='SLSQP', options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [1, 1]) + + def test_minimize_equality_given(self): + # Minimize with method='SLSQP': equality constraint, given jacobian. + res = minimize(self.fun, [-1.0, 1.0], jac=self.jac, + method='SLSQP', args=(-1.0,), + constraints={'type': 'eq', 'fun':self.f_eqcon, + 'args': (-1.0, )}, + options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [1, 1]) + + def test_minimize_equality_given2(self): + # Minimize with method='SLSQP': equality constraint, given jacobian + # for fun and const. + res = minimize(self.fun, [-1.0, 1.0], method='SLSQP', + jac=self.jac, args=(-1.0,), + constraints={'type': 'eq', + 'fun': self.f_eqcon, + 'args': (-1.0, ), + 'jac': self.fprime_eqcon}, + options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [1, 1]) + + def test_minimize_equality_given_cons_scalar(self): + # Minimize with method='SLSQP': scalar equality constraint, given + # jacobian for fun and const. + res = minimize(self.fun, [-1.0, 1.0], method='SLSQP', + jac=self.jac, args=(-1.0,), + constraints={'type': 'eq', + 'fun': self.f_eqcon_scalar, + 'args': (-1.0, ), + 'jac': self.fprime_eqcon_scalar}, + options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [1, 1]) + + def test_minimize_inequality_given(self): + # Minimize with method='SLSQP': inequality constraint, given jacobian. + res = minimize(self.fun, [-1.0, 1.0], method='SLSQP', + jac=self.jac, args=(-1.0, ), + constraints={'type': 'ineq', + 'fun': self.f_ieqcon, + 'args': (-1.0, )}, + options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [2, 1], atol=1e-3) + + def test_minimize_inequality_given_vector_constraints(self): + # Minimize with method='SLSQP': vector inequality constraint, given + # jacobian. + res = minimize(self.fun, [-1.0, 1.0], jac=self.jac, + method='SLSQP', args=(-1.0,), + constraints={'type': 'ineq', + 'fun': self.f_ieqcon2, + 'jac': self.fprime_ieqcon2}, + options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [2, 1]) + + def test_minimize_bound_equality_given2(self): + # Minimize with method='SLSQP': bounds, eq. const., given jac. for + # fun. and const. + res = minimize(self.fun, [-1.0, 1.0], method='SLSQP', + jac=self.jac, args=(-1.0, ), + bounds=[(-0.8, 1.), (-1, 0.8)], + constraints={'type': 'eq', + 'fun': self.f_eqcon, + 'args': (-1.0, ), + 'jac': self.fprime_eqcon}, + options=self.opts) + assert_(res['success'], res['message']) + assert_allclose(res.x, [0.8, 0.8], atol=1e-3) + assert_(-0.8 <= res.x[0] <= 1) + assert_(-1 <= res.x[1] <= 0.8) + + # fmin_slsqp + def test_unbounded_approximated(self): + # SLSQP: unbounded, approximated jacobian. + res = fmin_slsqp(self.fun, [-1.0, 1.0], args=(-1.0, ), + iprint = 0, full_output = 1) + x, fx, its, imode, smode = res + assert_(imode == 0, imode) + assert_array_almost_equal(x, [2, 1]) + + def test_unbounded_given(self): + # SLSQP: unbounded, given jacobian. + res = fmin_slsqp(self.fun, [-1.0, 1.0], args=(-1.0, ), + fprime = self.jac, iprint = 0, + full_output = 1) + x, fx, its, imode, smode = res + assert_(imode == 0, imode) + assert_array_almost_equal(x, [2, 1]) + + def test_equality_approximated(self): + # SLSQP: equality constraint, approximated jacobian. + res = fmin_slsqp(self.fun,[-1.0,1.0], args=(-1.0,), + eqcons = [self.f_eqcon], + iprint = 0, full_output = 1) + x, fx, its, imode, smode = res + assert_(imode == 0, imode) + assert_array_almost_equal(x, [1, 1]) + + def test_equality_given(self): + # SLSQP: equality constraint, given jacobian. + res = fmin_slsqp(self.fun, [-1.0, 1.0], + fprime=self.jac, args=(-1.0,), + eqcons = [self.f_eqcon], iprint = 0, + full_output = 1) + x, fx, its, imode, smode = res + assert_(imode == 0, imode) + assert_array_almost_equal(x, [1, 1]) + + def test_equality_given2(self): + # SLSQP: equality constraint, given jacobian for fun and const. + res = fmin_slsqp(self.fun, [-1.0, 1.0], + fprime=self.jac, args=(-1.0,), + f_eqcons = self.f_eqcon, + fprime_eqcons = self.fprime_eqcon, + iprint = 0, + full_output = 1) + x, fx, its, imode, smode = res + assert_(imode == 0, imode) + assert_array_almost_equal(x, [1, 1]) + + def test_inequality_given(self): + # SLSQP: inequality constraint, given jacobian. + res = fmin_slsqp(self.fun, [-1.0, 1.0], + fprime=self.jac, args=(-1.0, ), + ieqcons = [self.f_ieqcon], + iprint = 0, full_output = 1) + x, fx, its, imode, smode = res + assert_(imode == 0, imode) + assert_array_almost_equal(x, [2, 1], decimal=3) + + def test_bound_equality_given2(self): + # SLSQP: bounds, eq. const., given jac. for fun. and const. + res = fmin_slsqp(self.fun, [-1.0, 1.0], + fprime=self.jac, args=(-1.0, ), + bounds = [(-0.8, 1.), (-1, 0.8)], + f_eqcons = self.f_eqcon, + fprime_eqcons = self.fprime_eqcon, + iprint = 0, full_output = 1) + x, fx, its, imode, smode = res + assert_(imode == 0, imode) + assert_array_almost_equal(x, [0.8, 0.8], decimal=3) + assert_(-0.8 <= x[0] <= 1) + assert_(-1 <= x[1] <= 0.8) + + def test_scalar_constraints(self): + # Regression test for gh-2182 + x = fmin_slsqp(lambda z: z**2, [3.], + ieqcons=[lambda z: z[0] - 1], + iprint=0) + assert_array_almost_equal(x, [1.]) + + x = fmin_slsqp(lambda z: z**2, [3.], + f_ieqcons=lambda z: [z[0] - 1], + iprint=0) + assert_array_almost_equal(x, [1.]) + + def test_integer_bounds(self): + # This should not raise an exception + fmin_slsqp(lambda z: z**2 - 1, [0], bounds=[[0, 1]], iprint=0) + + def test_obj_must_return_scalar(self): + # Regression test for Github Issue #5433 + # If objective function does not return a scalar, raises ValueError + with assert_raises(ValueError): + fmin_slsqp(lambda x: [0, 1], [1, 2, 3]) + + def test_obj_returns_scalar_in_list(self): + # Test for Github Issue #5433 and PR #6691 + # Objective function should be able to return length-1 Python list + # containing the scalar + fmin_slsqp(lambda x: [0], [1, 2, 3], iprint=0) + + def test_callback(self): + # Minimize, method='SLSQP': unbounded, approximated jacobian. Check for callback + callback = MyCallBack() + res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ), + method='SLSQP', callback=callback, options=self.opts) + assert_(res['success'], res['message']) + assert_(callback.been_called) + assert_equal(callback.ncalls, res['nit']) + + def test_inconsistent_linearization(self): + # SLSQP must be able to solve this problem, even if the + # linearized problem at the starting point is infeasible. + + # Linearized constraints are + # + # 2*x0[0]*x[0] >= 1 + # + # At x0 = [0, 1], the second constraint is clearly infeasible. + # This triggers a call with n2==1 in the LSQ subroutine. + x = [0, 1] + f1 = lambda x: x[0] + x[1] - 2 + f2 = lambda x: x[0]**2 - 1 + sol = minimize( + lambda x: x[0]**2 + x[1]**2, + x, + constraints=({'type':'eq','fun': f1}, + {'type':'ineq','fun': f2}), + bounds=((0,None), (0,None)), + method='SLSQP') + x = sol.x + + assert_allclose(f1(x), 0, atol=1e-8) + assert_(f2(x) >= -1e-8) + assert_(sol.success, sol) + + def test_regression_5743(self): + # SLSQP must not indicate success for this problem, + # which is infeasible. + x = [1, 2] + sol = minimize( + lambda x: x[0]**2 + x[1]**2, + x, + constraints=({'type':'eq','fun': lambda x: x[0]+x[1]-1}, + {'type':'ineq','fun': lambda x: x[0]-2}), + bounds=((0,None), (0,None)), + method='SLSQP') + assert_(not sol.success, sol) + + def test_gh_6676(self): + def func(x): + return (x[0] - 1)**2 + 2*(x[1] - 1)**2 + 0.5*(x[2] - 1)**2 + + sol = minimize(func, [0, 0, 0], method='SLSQP') + assert_(sol.jac.shape == (3,)) + + def test_invalid_bounds(self): + # Raise correct error when lower bound is greater than upper bound. + # See Github issue 6875. + bounds_list = [ + ((1, 2), (2, 1)), + ((2, 1), (1, 2)), + ((2, 1), (2, 1)), + ((np.inf, 0), (np.inf, 0)), + ((1, -np.inf), (0, 1)), + ] + for bounds in bounds_list: + with assert_raises(ValueError): + minimize(self.fun, [-1.0, 1.0], bounds=bounds, method='SLSQP') + + def test_bounds_clipping(self): + # + # SLSQP returns bogus results for initial guess out of bounds, gh-6859 + # + def f(x): + return (x[0] - 1)**2 + + sol = minimize(f, [10], method='slsqp', bounds=[(None, 0)]) + assert_(sol.success) + assert_allclose(sol.x, 0, atol=1e-10) + + sol = minimize(f, [-10], method='slsqp', bounds=[(2, None)]) + assert_(sol.success) + assert_allclose(sol.x, 2, atol=1e-10) + + sol = minimize(f, [-10], method='slsqp', bounds=[(None, 0)]) + assert_(sol.success) + assert_allclose(sol.x, 0, atol=1e-10) + + sol = minimize(f, [10], method='slsqp', bounds=[(2, None)]) + assert_(sol.success) + assert_allclose(sol.x, 2, atol=1e-10) + + sol = minimize(f, [-0.5], method='slsqp', bounds=[(-1, 0)]) + assert_(sol.success) + assert_allclose(sol.x, 0, atol=1e-10) + + sol = minimize(f, [10], method='slsqp', bounds=[(-1, 0)]) + assert_(sol.success) + assert_allclose(sol.x, 0, atol=1e-10) + + def test_infeasible_initial(self): + # Check SLSQP behavior with infeasible initial point + def f(x): + x, = x + return x*x - 2*x + 1 + + cons_u = [{'type': 'ineq', 'fun': lambda x: 0 - x}] + cons_l = [{'type': 'ineq', 'fun': lambda x: x - 2}] + cons_ul = [{'type': 'ineq', 'fun': lambda x: 0 - x}, + {'type': 'ineq', 'fun': lambda x: x + 1}] + + sol = minimize(f, [10], method='slsqp', constraints=cons_u) + assert_(sol.success) + assert_allclose(sol.x, 0, atol=1e-10) + + sol = minimize(f, [-10], method='slsqp', constraints=cons_l) + assert_(sol.success) + assert_allclose(sol.x, 2, atol=1e-10) + + sol = minimize(f, [-10], method='slsqp', constraints=cons_u) + assert_(sol.success) + assert_allclose(sol.x, 0, atol=1e-10) + + sol = minimize(f, [10], method='slsqp', constraints=cons_l) + assert_(sol.success) + assert_allclose(sol.x, 2, atol=1e-10) + + sol = minimize(f, [-0.5], method='slsqp', constraints=cons_ul) + assert_(sol.success) + assert_allclose(sol.x, 0, atol=1e-10) + + sol = minimize(f, [10], method='slsqp', constraints=cons_ul) + assert_(sol.success) + assert_allclose(sol.x, 0, atol=1e-10) + + def test_inconsistent_inequalities(self): + # gh-7618 + + def cost(x): + return -1 * x[0] + 4 * x[1] + + def ineqcons1(x): + return x[1] - x[0] - 1 + + def ineqcons2(x): + return x[0] - x[1] + + # The inequalities are inconsistent, so no solution can exist: + # + # x1 >= x0 + 1 + # x0 >= x1 + + x0 = (1,5) + bounds = ((-5, 5), (-5, 5)) + cons = (dict(type='ineq', fun=ineqcons1), dict(type='ineq', fun=ineqcons2)) + res = minimize(cost, x0, method='SLSQP', bounds=bounds, constraints=cons) + + assert_(not res.success) + + def test_new_bounds_type(self): + f = lambda x: x[0]**2 + x[1]**2 + bounds = Bounds([1, 0], [np.inf, np.inf]) + sol = minimize(f, [0, 0], method='slsqp', bounds=bounds) + assert_(sol.success) + assert_allclose(sol.x, [1, 0]) + + def test_nested_minimization(self): + + class NestedProblem(): + + def __init__(self): + self.F_outer_count = 0 + + def F_outer(self, x): + self.F_outer_count += 1 + if self.F_outer_count > 1000: + raise Exception("Nested minimization failed to terminate.") + inner_res = minimize(self.F_inner, (3, 4), method="SLSQP") + assert_(inner_res.success) + assert_allclose(inner_res.x, [1, 1]) + return x[0]**2 + x[1]**2 + x[2]**2 + + def F_inner(self, x): + return (x[0] - 1)**2 + (x[1] - 1)**2 + + def solve(self): + outer_res = minimize(self.F_outer, (5, 5, 5), method="SLSQP") + assert_(outer_res.success) + assert_allclose(outer_res.x, [0, 0, 0]) + + problem = NestedProblem() + problem.solve() diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_slsqp.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_slsqp.pyc new file mode 100644 index 0000000..7428b2d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_slsqp.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_tnc.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_tnc.py new file mode 100644 index 0000000..dd769ea --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_tnc.py @@ -0,0 +1,302 @@ +""" +Unit tests for TNC optimization routine from tnc.py +""" + +from numpy.testing import assert_allclose, assert_equal + +from scipy import optimize +import numpy as np +from math import pow + + +class TestTnc(object): + """TNC non-linear optimization. + + These tests are taken from Prof. K. Schittkowski's test examples + for constrained non-linear programming. + + http://www.uni-bayreuth.de/departments/math/~kschittkowski/home.htm + + """ + def setup_method(self): + # options for minimize + self.opts = {'disp': False, 'maxiter': 200} + + # objective functions and jacobian for each test + def f1(self, x, a=100.0): + return a * pow((x[1] - pow(x[0], 2)), 2) + pow(1.0 - x[0], 2) + + def g1(self, x, a=100.0): + dif = [0, 0] + dif[1] = 2 * a * (x[1] - pow(x[0], 2)) + dif[0] = -2.0 * (x[0] * (dif[1] - 1.0) + 1.0) + return dif + + def fg1(self, x, a=100.0): + return self.f1(x, a), self.g1(x, a) + + def f3(self, x): + return x[1] + pow(x[1] - x[0], 2) * 1.0e-5 + + def g3(self, x): + dif = [0, 0] + dif[0] = -2.0 * (x[1] - x[0]) * 1.0e-5 + dif[1] = 1.0 - dif[0] + return dif + + def fg3(self, x): + return self.f3(x), self.g3(x) + + def f4(self, x): + return pow(x[0] + 1.0, 3) / 3.0 + x[1] + + def g4(self, x): + dif = [0, 0] + dif[0] = pow(x[0] + 1.0, 2) + dif[1] = 1.0 + return dif + + def fg4(self, x): + return self.f4(x), self.g4(x) + + def f5(self, x): + return np.sin(x[0] + x[1]) + pow(x[0] - x[1], 2) - \ + 1.5 * x[0] + 2.5 * x[1] + 1.0 + + def g5(self, x): + dif = [0, 0] + v1 = np.cos(x[0] + x[1]) + v2 = 2.0*(x[0] - x[1]) + + dif[0] = v1 + v2 - 1.5 + dif[1] = v1 - v2 + 2.5 + return dif + + def fg5(self, x): + return self.f5(x), self.g5(x) + + def f38(self, x): + return (100.0 * pow(x[1] - pow(x[0], 2), 2) + + pow(1.0 - x[0], 2) + 90.0 * pow(x[3] - pow(x[2], 2), 2) + + pow(1.0 - x[2], 2) + 10.1 * (pow(x[1] - 1.0, 2) + + pow(x[3] - 1.0, 2)) + + 19.8 * (x[1] - 1.0) * (x[3] - 1.0)) * 1.0e-5 + + def g38(self, x): + dif = [0, 0, 0, 0] + dif[0] = (-400.0 * x[0] * (x[1] - pow(x[0], 2)) - + 2.0 * (1.0 - x[0])) * 1.0e-5 + dif[1] = (200.0 * (x[1] - pow(x[0], 2)) + 20.2 * (x[1] - 1.0) + + 19.8 * (x[3] - 1.0)) * 1.0e-5 + dif[2] = (- 360.0 * x[2] * (x[3] - pow(x[2], 2)) - + 2.0 * (1.0 - x[2])) * 1.0e-5 + dif[3] = (180.0 * (x[3] - pow(x[2], 2)) + 20.2 * (x[3] - 1.0) + + 19.8 * (x[1] - 1.0)) * 1.0e-5 + return dif + + def fg38(self, x): + return self.f38(x), self.g38(x) + + def f45(self, x): + return 2.0 - x[0] * x[1] * x[2] * x[3] * x[4] / 120.0 + + def g45(self, x): + dif = [0] * 5 + dif[0] = - x[1] * x[2] * x[3] * x[4] / 120.0 + dif[1] = - x[0] * x[2] * x[3] * x[4] / 120.0 + dif[2] = - x[0] * x[1] * x[3] * x[4] / 120.0 + dif[3] = - x[0] * x[1] * x[2] * x[4] / 120.0 + dif[4] = - x[0] * x[1] * x[2] * x[3] / 120.0 + return dif + + def fg45(self, x): + return self.f45(x), self.g45(x) + + # tests + # minimize with method=TNC + def test_minimize_tnc1(self): + x0, bnds = [-2, 1], ([-np.inf, None], [-1.5, None]) + xopt = [1, 1] + iterx = [] # to test callback + + res = optimize.minimize(self.f1, x0, method='TNC', jac=self.g1, + bounds=bnds, options=self.opts, + callback=iterx.append) + assert_allclose(res.fun, self.f1(xopt), atol=1e-8) + assert_equal(len(iterx), res.nit) + + def test_minimize_tnc1b(self): + x0, bnds = np.matrix([-2, 1]), ([-np.inf, None],[-1.5, None]) + xopt = [1, 1] + x = optimize.minimize(self.f1, x0, method='TNC', + bounds=bnds, options=self.opts).x + assert_allclose(self.f1(x), self.f1(xopt), atol=1e-4) + + def test_minimize_tnc1c(self): + x0, bnds = [-2, 1], ([-np.inf, None],[-1.5, None]) + xopt = [1, 1] + x = optimize.minimize(self.fg1, x0, method='TNC', + jac=True, bounds=bnds, + options=self.opts).x + assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8) + + def test_minimize_tnc2(self): + x0, bnds = [-2, 1], ([-np.inf, None], [1.5, None]) + xopt = [-1.2210262419616387, 1.5] + x = optimize.minimize(self.f1, x0, method='TNC', + jac=self.g1, bounds=bnds, + options=self.opts).x + assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8) + + def test_minimize_tnc3(self): + x0, bnds = [10, 1], ([-np.inf, None], [0.0, None]) + xopt = [0, 0] + x = optimize.minimize(self.f3, x0, method='TNC', + jac=self.g3, bounds=bnds, + options=self.opts).x + assert_allclose(self.f3(x), self.f3(xopt), atol=1e-8) + + def test_minimize_tnc4(self): + x0,bnds = [1.125, 0.125], [(1, None), (0, None)] + xopt = [1, 0] + x = optimize.minimize(self.f4, x0, method='TNC', + jac=self.g4, bounds=bnds, + options=self.opts).x + assert_allclose(self.f4(x), self.f4(xopt), atol=1e-8) + + def test_minimize_tnc5(self): + x0, bnds = [0, 0], [(-1.5, 4),(-3, 3)] + xopt = [-0.54719755119659763, -1.5471975511965976] + x = optimize.minimize(self.f5, x0, method='TNC', + jac=self.g5, bounds=bnds, + options=self.opts).x + assert_allclose(self.f5(x), self.f5(xopt), atol=1e-8) + + def test_minimize_tnc38(self): + x0, bnds = np.array([-3, -1, -3, -1]), [(-10, 10)]*4 + xopt = [1]*4 + x = optimize.minimize(self.f38, x0, method='TNC', + jac=self.g38, bounds=bnds, + options=self.opts).x + assert_allclose(self.f38(x), self.f38(xopt), atol=1e-8) + + def test_minimize_tnc45(self): + x0, bnds = [2] * 5, [(0, 1), (0, 2), (0, 3), (0, 4), (0, 5)] + xopt = [1, 2, 3, 4, 5] + x = optimize.minimize(self.f45, x0, method='TNC', + jac=self.g45, bounds=bnds, + options=self.opts).x + assert_allclose(self.f45(x), self.f45(xopt), atol=1e-8) + + # fmin_tnc + def test_tnc1(self): + fg, x, bounds = self.fg1, [-2, 1], ([-np.inf, None], [-1.5, None]) + xopt = [1, 1] + + x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds, args=(100.0, ), + messages=optimize.tnc.MSG_NONE, + maxfun=200) + + assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8, + err_msg="TNC failed with status: " + + optimize.tnc.RCSTRINGS[rc]) + + def test_tnc1b(self): + x, bounds = [-2, 1], ([-np.inf, None], [-1.5, None]) + xopt = [1, 1] + + x, nf, rc = optimize.fmin_tnc(self.f1, x, approx_grad=True, + bounds=bounds, + messages=optimize.tnc.MSG_NONE, + maxfun=200) + + assert_allclose(self.f1(x), self.f1(xopt), atol=1e-4, + err_msg="TNC failed with status: " + + optimize.tnc.RCSTRINGS[rc]) + + def test_tnc1c(self): + x, bounds = [-2, 1], ([-np.inf, None], [-1.5, None]) + xopt = [1, 1] + + x, nf, rc = optimize.fmin_tnc(self.f1, x, fprime=self.g1, + bounds=bounds, + messages=optimize.tnc.MSG_NONE, + maxfun=200) + + assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8, + err_msg="TNC failed with status: " + + optimize.tnc.RCSTRINGS[rc]) + + def test_tnc2(self): + fg, x, bounds = self.fg1, [-2, 1], ([-np.inf, None], [1.5, None]) + xopt = [-1.2210262419616387, 1.5] + + x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds, + messages=optimize.tnc.MSG_NONE, + maxfun=200) + + assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8, + err_msg="TNC failed with status: " + + optimize.tnc.RCSTRINGS[rc]) + + def test_tnc3(self): + fg, x, bounds = self.fg3, [10, 1], ([-np.inf, None], [0.0, None]) + xopt = [0, 0] + + x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds, + messages=optimize.tnc.MSG_NONE, + maxfun=200) + + assert_allclose(self.f3(x), self.f3(xopt), atol=1e-8, + err_msg="TNC failed with status: " + + optimize.tnc.RCSTRINGS[rc]) + + def test_tnc4(self): + fg, x, bounds = self.fg4, [1.125, 0.125], [(1, None), (0, None)] + xopt = [1, 0] + + x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds, + messages=optimize.tnc.MSG_NONE, + maxfun=200) + + assert_allclose(self.f4(x), self.f4(xopt), atol=1e-8, + err_msg="TNC failed with status: " + + optimize.tnc.RCSTRINGS[rc]) + + def test_tnc5(self): + fg, x, bounds = self.fg5, [0, 0], [(-1.5, 4),(-3, 3)] + xopt = [-0.54719755119659763, -1.5471975511965976] + + x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds, + messages=optimize.tnc.MSG_NONE, + maxfun=200) + + assert_allclose(self.f5(x), self.f5(xopt), atol=1e-8, + err_msg="TNC failed with status: " + + optimize.tnc.RCSTRINGS[rc]) + + def test_tnc38(self): + fg, x, bounds = self.fg38, np.array([-3, -1, -3, -1]), [(-10, 10)]*4 + xopt = [1]*4 + + x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds, + messages=optimize.tnc.MSG_NONE, + maxfun=200) + + assert_allclose(self.f38(x), self.f38(xopt), atol=1e-8, + err_msg="TNC failed with status: " + + optimize.tnc.RCSTRINGS[rc]) + + def test_tnc45(self): + fg, x, bounds = self.fg45, [2] * 5, [(0, 1), (0, 2), (0, 3), + (0, 4), (0, 5)] + xopt = [1, 2, 3, 4, 5] + + x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds, + messages=optimize.tnc.MSG_NONE, + maxfun=200) + + assert_allclose(self.f45(x), self.f45(xopt), atol=1e-8, + err_msg="TNC failed with status: " + + optimize.tnc.RCSTRINGS[rc]) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_tnc.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_tnc.pyc new file mode 100644 index 0000000..3d8fe8c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_tnc.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_trustregion.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_trustregion.py new file mode 100644 index 0000000..5c84b1e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_trustregion.py @@ -0,0 +1,106 @@ +""" +Unit tests for trust-region optimization routines. + +To run it in its simplest form:: + nosetests test_optimize.py + +""" +from __future__ import division, print_function, absolute_import + +import numpy as np +from scipy.optimize import (minimize, rosen, rosen_der, rosen_hess, + rosen_hess_prod) +from numpy.testing import assert_, assert_equal, assert_allclose + + +class Accumulator: + """ This is for testing callbacks.""" + def __init__(self): + self.count = 0 + self.accum = None + + def __call__(self, x): + self.count += 1 + if self.accum is None: + self.accum = np.array(x) + else: + self.accum += x + + +class TestTrustRegionSolvers(object): + + def setup_method(self): + self.x_opt = [1.0, 1.0] + self.easy_guess = [2.0, 2.0] + self.hard_guess = [-1.2, 1.0] + + def test_dogleg_accuracy(self): + # test the accuracy and the return_all option + x0 = self.hard_guess + r = minimize(rosen, x0, jac=rosen_der, hess=rosen_hess, tol=1e-8, + method='dogleg', options={'return_all': True},) + assert_allclose(x0, r['allvecs'][0]) + assert_allclose(r['x'], r['allvecs'][-1]) + assert_allclose(r['x'], self.x_opt) + + def test_dogleg_callback(self): + # test the callback mechanism and the maxiter and return_all options + accumulator = Accumulator() + maxiter = 5 + r = minimize(rosen, self.hard_guess, jac=rosen_der, hess=rosen_hess, + callback=accumulator, method='dogleg', + options={'return_all': True, 'maxiter': maxiter},) + assert_equal(accumulator.count, maxiter) + assert_equal(len(r['allvecs']), maxiter+1) + assert_allclose(r['x'], r['allvecs'][-1]) + assert_allclose(sum(r['allvecs'][1:]), accumulator.accum) + + def test_solver_concordance(self): + # Assert that dogleg uses fewer iterations than ncg on the Rosenbrock + # test function, although this does not necessarily mean + # that dogleg is faster or better than ncg even for this function + # and especially not for other test functions. + f = rosen + g = rosen_der + h = rosen_hess + for x0 in (self.easy_guess, self.hard_guess): + r_dogleg = minimize(f, x0, jac=g, hess=h, tol=1e-8, + method='dogleg', options={'return_all': True}) + r_trust_ncg = minimize(f, x0, jac=g, hess=h, tol=1e-8, + method='trust-ncg', + options={'return_all': True}) + r_trust_krylov = minimize(f, x0, jac=g, hess=h, tol=1e-8, + method='trust-krylov', + options={'return_all': True}) + r_ncg = minimize(f, x0, jac=g, hess=h, tol=1e-8, + method='newton-cg', options={'return_all': True}) + r_iterative = minimize(f, x0, jac=g, hess=h, tol=1e-8, + method='trust-exact', + options={'return_all': True}) + assert_allclose(self.x_opt, r_dogleg['x']) + assert_allclose(self.x_opt, r_trust_ncg['x']) + assert_allclose(self.x_opt, r_trust_krylov['x']) + assert_allclose(self.x_opt, r_ncg['x']) + assert_allclose(self.x_opt, r_iterative['x']) + assert_(len(r_dogleg['allvecs']) < len(r_ncg['allvecs'])) + + def test_trust_ncg_hessp(self): + for x0 in (self.easy_guess, self.hard_guess, self.x_opt): + r = minimize(rosen, x0, jac=rosen_der, hessp=rosen_hess_prod, + tol=1e-8, method='trust-ncg') + assert_allclose(self.x_opt, r['x']) + + def test_trust_ncg_start_in_optimum(self): + r = minimize(rosen, x0=self.x_opt, jac=rosen_der, hess=rosen_hess, + tol=1e-8, method='trust-ncg') + assert_allclose(self.x_opt, r['x']) + + def test_trust_krylov_start_in_optimum(self): + r = minimize(rosen, x0=self.x_opt, jac=rosen_der, hess=rosen_hess, + tol=1e-8, method='trust-krylov') + assert_allclose(self.x_opt, r['x']) + + def test_trust_exact_start_in_optimum(self): + r = minimize(rosen, x0=self.x_opt, jac=rosen_der, hess=rosen_hess, + tol=1e-8, method='trust-exact') + assert_allclose(self.x_opt, r['x']) diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_trustregion.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_trustregion.pyc new file mode 100644 index 0000000..b478794 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_trustregion.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_trustregion_exact.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_trustregion_exact.py new file mode 100644 index 0000000..44b5be9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_trustregion_exact.py @@ -0,0 +1,357 @@ +""" +Unit tests for trust-region iterative subproblem. + +To run it in its simplest form:: + nosetests test_optimize.py + +""" +from __future__ import division, print_function, absolute_import + +import numpy as np +from scipy.optimize._trustregion_exact import ( + estimate_smallest_singular_value, + singular_leading_submatrix, + IterativeSubproblem) +from scipy.linalg import (svd, get_lapack_funcs, det, + cho_factor, cho_solve, qr, + eigvalsh, eig, norm) +from numpy.testing import (assert_, assert_array_equal, + assert_equal, assert_array_almost_equal, + assert_array_less) + + +def random_entry(n, min_eig, max_eig, case): + + # Generate random matrix + rand = np.random.uniform(-1, 1, (n, n)) + + # QR decomposition + Q, _, _ = qr(rand, pivoting='True') + + # Generate random eigenvalues + eigvalues = np.random.uniform(min_eig, max_eig, n) + eigvalues = np.sort(eigvalues)[::-1] + + # Generate matrix + Qaux = np.multiply(eigvalues, Q) + A = np.dot(Qaux, Q.T) + + # Generate gradient vector accordingly + # to the case is being tested. + if case == 'hard': + g = np.zeros(n) + g[:-1] = np.random.uniform(-1, 1, n-1) + g = np.dot(Q, g) + elif case == 'jac_equal_zero': + g = np.zeros(n) + else: + g = np.random.uniform(-1, 1, n) + + return A, g + + +class TestEstimateSmallestSingularValue(object): + + def test_for_ill_condiotioned_matrix(self): + + # Ill-conditioned triangular matrix + C = np.array([[1, 2, 3, 4], + [0, 0.05, 60, 7], + [0, 0, 0.8, 9], + [0, 0, 0, 10]]) + + # Get svd decomposition + U, s, Vt = svd(C) + + # Get smallest singular value and correspondent right singular vector. + smin_svd = s[-1] + zmin_svd = Vt[-1, :] + + # Estimate smallest singular value + smin, zmin = estimate_smallest_singular_value(C) + + # Check the estimation + assert_array_almost_equal(smin, smin_svd, decimal=8) + assert_array_almost_equal(abs(zmin), abs(zmin_svd), decimal=8) + + +class TestSingularLeadingSubmatrix(object): + + def test_for_already_singular_leading_submatrix(self): + + # Define test matrix A. + # Note that the leading 2x2 submatrix is singular. + A = np.array([[1, 2, 3], + [2, 4, 5], + [3, 5, 6]]) + + # Get Cholesky from lapack functions + cholesky, = get_lapack_funcs(('potrf',), (A,)) + + # Compute Cholesky Decomposition + c, k = cholesky(A, lower=False, overwrite_a=False, clean=True) + + delta, v = singular_leading_submatrix(A, c, k) + + A[k-1, k-1] += delta + + # Check if the leading submatrix is singular. + assert_array_almost_equal(det(A[:k, :k]), 0) + + # Check if `v` fullfil the specified properties + quadratic_term = np.dot(v, np.dot(A, v)) + assert_array_almost_equal(quadratic_term, 0) + + def test_for_simetric_indefinite_matrix(self): + + # Define test matrix A. + # Note that the leading 5x5 submatrix is indefinite. + A = np.asarray([[1, 2, 3, 7, 8], + [2, 5, 5, 9, 0], + [3, 5, 11, 1, 2], + [7, 9, 1, 7, 5], + [8, 0, 2, 5, 8]]) + + # Get Cholesky from lapack functions + cholesky, = get_lapack_funcs(('potrf',), (A,)) + + # Compute Cholesky Decomposition + c, k = cholesky(A, lower=False, overwrite_a=False, clean=True) + + delta, v = singular_leading_submatrix(A, c, k) + + A[k-1, k-1] += delta + + # Check if the leading submatrix is singular. + assert_array_almost_equal(det(A[:k, :k]), 0) + + # Check if `v` fullfil the specified properties + quadratic_term = np.dot(v, np.dot(A, v)) + assert_array_almost_equal(quadratic_term, 0) + + def test_for_first_element_equal_to_zero(self): + + # Define test matrix A. + # Note that the leading 2x2 submatrix is singular. + A = np.array([[0, 3, 11], + [3, 12, 5], + [11, 5, 6]]) + + # Get Cholesky from lapack functions + cholesky, = get_lapack_funcs(('potrf',), (A,)) + + # Compute Cholesky Decomposition + c, k = cholesky(A, lower=False, overwrite_a=False, clean=True) + + delta, v = singular_leading_submatrix(A, c, k) + + A[k-1, k-1] += delta + + # Check if the leading submatrix is singular + assert_array_almost_equal(det(A[:k, :k]), 0) + + # Check if `v` fullfil the specified properties + quadratic_term = np.dot(v, np.dot(A, v)) + assert_array_almost_equal(quadratic_term, 0) + + +class TestIterativeSubproblem(object): + + def test_for_the_easy_case(self): + + # `H` is chosen such that `g` is not orthogonal to the + # eigenvector associated with the smallest eigenvalue `s`. + H = [[10, 2, 3, 4], + [2, 1, 7, 1], + [3, 7, 1, 7], + [4, 1, 7, 2]] + g = [1, 1, 1, 1] + + # Trust Radius + trust_radius = 1 + + # Solve Subproblem + subprob = IterativeSubproblem(x=0, + fun=lambda x: 0, + jac=lambda x: np.array(g), + hess=lambda x: np.array(H), + k_easy=1e-10, + k_hard=1e-10) + p, hits_boundary = subprob.solve(trust_radius) + + assert_array_almost_equal(p, [0.00393332, -0.55260862, + 0.67065477, -0.49480341]) + assert_array_almost_equal(hits_boundary, True) + + def test_for_the_hard_case(self): + + # `H` is chosen such that `g` is orthogonal to the + # eigenvector associated with the smallest eigenvalue `s`. + H = [[10, 2, 3, 4], + [2, 1, 7, 1], + [3, 7, 1, 7], + [4, 1, 7, 2]] + g = [6.4852641521327437, 1, 1, 1] + s = -8.2151519874416614 + + # Trust Radius + trust_radius = 1 + + # Solve Subproblem + subprob = IterativeSubproblem(x=0, + fun=lambda x: 0, + jac=lambda x: np.array(g), + hess=lambda x: np.array(H), + k_easy=1e-10, + k_hard=1e-10) + p, hits_boundary = subprob.solve(trust_radius) + + assert_array_almost_equal(-s, subprob.lambda_current) + + def test_for_interior_convergence(self): + + H = [[1.812159, 0.82687265, 0.21838879, -0.52487006, 0.25436988], + [0.82687265, 2.66380283, 0.31508988, -0.40144163, 0.08811588], + [0.21838879, 0.31508988, 2.38020726, -0.3166346, 0.27363867], + [-0.52487006, -0.40144163, -0.3166346, 1.61927182, -0.42140166], + [0.25436988, 0.08811588, 0.27363867, -0.42140166, 1.33243101]] + + g = [0.75798952, 0.01421945, 0.33847612, 0.83725004, -0.47909534] + + # Solve Subproblem + subprob = IterativeSubproblem(x=0, + fun=lambda x: 0, + jac=lambda x: np.array(g), + hess=lambda x: np.array(H)) + p, hits_boundary = subprob.solve(1.1) + + assert_array_almost_equal(p, [-0.68585435, 0.1222621, -0.22090999, + -0.67005053, 0.31586769]) + assert_array_almost_equal(hits_boundary, False) + assert_array_almost_equal(subprob.lambda_current, 0) + assert_array_almost_equal(subprob.niter, 1) + + def test_for_jac_equal_zero(self): + + H = [[0.88547534, 2.90692271, 0.98440885, -0.78911503, -0.28035809], + [2.90692271, -0.04618819, 0.32867263, -0.83737945, 0.17116396], + [0.98440885, 0.32867263, -0.87355957, -0.06521957, -1.43030957], + [-0.78911503, -0.83737945, -0.06521957, -1.645709, -0.33887298], + [-0.28035809, 0.17116396, -1.43030957, -0.33887298, -1.68586978]] + + g = [0, 0, 0, 0, 0] + + # Solve Subproblem + subprob = IterativeSubproblem(x=0, + fun=lambda x: 0, + jac=lambda x: np.array(g), + hess=lambda x: np.array(H), + k_easy=1e-10, + k_hard=1e-10) + p, hits_boundary = subprob.solve(1.1) + + assert_array_almost_equal(p, [0.06910534, -0.01432721, + -0.65311947, -0.23815972, + -0.84954934]) + assert_array_almost_equal(hits_boundary, True) + + def test_for_jac_very_close_to_zero(self): + + H = [[0.88547534, 2.90692271, 0.98440885, -0.78911503, -0.28035809], + [2.90692271, -0.04618819, 0.32867263, -0.83737945, 0.17116396], + [0.98440885, 0.32867263, -0.87355957, -0.06521957, -1.43030957], + [-0.78911503, -0.83737945, -0.06521957, -1.645709, -0.33887298], + [-0.28035809, 0.17116396, -1.43030957, -0.33887298, -1.68586978]] + + g = [0, 0, 0, 0, 1e-15] + + # Solve Subproblem + subprob = IterativeSubproblem(x=0, + fun=lambda x: 0, + jac=lambda x: np.array(g), + hess=lambda x: np.array(H), + k_easy=1e-10, + k_hard=1e-10) + p, hits_boundary = subprob.solve(1.1) + + assert_array_almost_equal(p, [0.06910534, -0.01432721, + -0.65311947, -0.23815972, + -0.84954934]) + assert_array_almost_equal(hits_boundary, True) + + def test_for_random_entries(self): + # Seed + np.random.seed(1) + + # Dimension + n = 5 + + for case in ('easy', 'hard', 'jac_equal_zero'): + + eig_limits = [(-20, -15), + (-10, -5), + (-10, 0), + (-5, 5), + (-10, 10), + (0, 10), + (5, 10), + (15, 20)] + + for min_eig, max_eig in eig_limits: + # Generate random symmetric matrix H with + # eigenvalues between min_eig and max_eig. + H, g = random_entry(n, min_eig, max_eig, case) + + # Trust radius + trust_radius_list = [0.1, 0.3, 0.6, 0.8, 1, 1.2, 3.3, 5.5, 10] + + for trust_radius in trust_radius_list: + # Solve subproblem with very high accuracy + subprob_ac = IterativeSubproblem(0, + lambda x: 0, + lambda x: g, + lambda x: H, + k_easy=1e-10, + k_hard=1e-10) + + p_ac, hits_boundary_ac = subprob_ac.solve(trust_radius) + + # Compute objective function value + J_ac = 1/2*np.dot(p_ac, np.dot(H, p_ac))+np.dot(g, p_ac) + + stop_criteria = [(0.1, 2), + (0.5, 1.1), + (0.9, 1.01)] + + for k_opt, k_trf in stop_criteria: + + # k_easy and k_hard computed in function + # of k_opt and k_trf accordingly to + # Conn, A. R., Gould, N. I., & Toint, P. L. (2000). + # "Trust region methods". Siam. p. 197. + k_easy = min(k_trf-1, + 1-np.sqrt(k_opt)) + k_hard = 1-k_opt + + # Solve subproblem + subprob = IterativeSubproblem(0, + lambda x: 0, + lambda x: g, + lambda x: H, + k_easy=k_easy, + k_hard=k_hard) + p, hits_boundary = subprob.solve(trust_radius) + + # Compute objective function value + J = 1/2*np.dot(p, np.dot(H, p))+np.dot(g, p) + + # Check if it respect k_trf + if hits_boundary: + assert_array_equal(np.abs(norm(p)-trust_radius) <= + (k_trf-1)*trust_radius, True) + else: + assert_equal(norm(p) <= trust_radius, True) + + # Check if it respect k_opt + assert_equal(J <= k_opt*J_ac, True) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_trustregion_exact.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_trustregion_exact.pyc new file mode 100644 index 0000000..0b6da24 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_trustregion_exact.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_trustregion_krylov.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_trustregion_krylov.py new file mode 100644 index 0000000..f8cabd7 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_trustregion_krylov.py @@ -0,0 +1,173 @@ +""" +Unit tests for Krylov space trust-region subproblem solver. + +To run it in its simplest form:: + nosetests test_optimize.py + +""" +from __future__ import division, print_function, absolute_import + +import numpy as np +from scipy.optimize._trlib import (get_trlib_quadratic_subproblem) +from numpy.testing import (assert_, assert_array_equal, + assert_almost_equal, + assert_equal, assert_array_almost_equal, + assert_array_less) + +KrylovQP = get_trlib_quadratic_subproblem(tol_rel_i=1e-8, tol_rel_b=1e-6) +KrylovQP_disp = get_trlib_quadratic_subproblem(tol_rel_i=1e-8, tol_rel_b=1e-6, disp=True) + +class TestKrylovQuadraticSubproblem(object): + + def test_for_the_easy_case(self): + + # `H` is chosen such that `g` is not orthogonal to the + # eigenvector associated with the smallest eigenvalue. + H = np.array([[1.0, 0.0, 4.0], + [0.0, 2.0, 0.0], + [4.0, 0.0, 3.0]]) + g = np.array([5.0, 0.0, 4.0]) + + # Trust Radius + trust_radius = 1.0 + + # Solve Subproblem + subprob = KrylovQP(x=0, + fun=lambda x: 0, + jac=lambda x: g, + hess=lambda x: None, + hessp=lambda x, y: H.dot(y)) + p, hits_boundary = subprob.solve(trust_radius) + + assert_array_almost_equal(p, np.array([-1.0, 0.0, 0.0])) + assert_equal(hits_boundary, True) + # check kkt satisfaction + assert_almost_equal( + np.linalg.norm(H.dot(p) + subprob.lam * p + g), + 0.0) + # check trust region constraint + assert_almost_equal(np.linalg.norm(p), trust_radius) + + trust_radius = 0.5 + p, hits_boundary = subprob.solve(trust_radius) + + assert_array_almost_equal(p, + np.array([-0.46125446, 0., -0.19298788])) + assert_equal(hits_boundary, True) + # check kkt satisfaction + assert_almost_equal( + np.linalg.norm(H.dot(p) + subprob.lam * p + g), + 0.0) + # check trust region constraint + assert_almost_equal(np.linalg.norm(p), trust_radius) + + def test_for_the_hard_case(self): + + # `H` is chosen such that `g` is orthogonal to the + # eigenvector associated with the smallest eigenvalue. + H = np.array([[1.0, 0.0, 4.0], + [0.0, 2.0, 0.0], + [4.0, 0.0, 3.0]]) + g = np.array([0.0, 2.0, 0.0]) + + # Trust Radius + trust_radius = 1.0 + + # Solve Subproblem + subprob = KrylovQP(x=0, + fun=lambda x: 0, + jac=lambda x: g, + hess=lambda x: None, + hessp=lambda x, y: H.dot(y)) + p, hits_boundary = subprob.solve(trust_radius) + + assert_array_almost_equal(p, np.array([0.0, -1.0, 0.0])) + # check kkt satisfaction + assert_almost_equal( + np.linalg.norm(H.dot(p) + subprob.lam * p + g), + 0.0) + # check trust region constraint + assert_almost_equal(np.linalg.norm(p), trust_radius) + + trust_radius = 0.5 + p, hits_boundary = subprob.solve(trust_radius) + + assert_array_almost_equal(p, np.array([0.0, -0.5, 0.0])) + # check kkt satisfaction + assert_almost_equal( + np.linalg.norm(H.dot(p) + subprob.lam * p + g), + 0.0) + # check trust region constraint + assert_almost_equal(np.linalg.norm(p), trust_radius) + + def test_for_interior_convergence(self): + + H = np.array([[1.812159, 0.82687265, 0.21838879, -0.52487006, 0.25436988], + [0.82687265, 2.66380283, 0.31508988, -0.40144163, 0.08811588], + [0.21838879, 0.31508988, 2.38020726, -0.3166346, 0.27363867], + [-0.52487006, -0.40144163, -0.3166346, 1.61927182, -0.42140166], + [0.25436988, 0.08811588, 0.27363867, -0.42140166, 1.33243101]]) + g = np.array([0.75798952, 0.01421945, 0.33847612, 0.83725004, -0.47909534]) + trust_radius = 1.1 + + # Solve Subproblem + subprob = KrylovQP(x=0, + fun=lambda x: 0, + jac=lambda x: g, + hess=lambda x: None, + hessp=lambda x, y: H.dot(y)) + p, hits_boundary = subprob.solve(trust_radius) + + # check kkt satisfaction + assert_almost_equal( + np.linalg.norm(H.dot(p) + subprob.lam * p + g), + 0.0) + + assert_array_almost_equal(p, [-0.68585435, 0.1222621, -0.22090999, + -0.67005053, 0.31586769]) + assert_array_almost_equal(hits_boundary, False) + + def test_for_very_close_to_zero(self): + + H = np.array([[0.88547534, 2.90692271, 0.98440885, -0.78911503, -0.28035809], + [2.90692271, -0.04618819, 0.32867263, -0.83737945, 0.17116396], + [0.98440885, 0.32867263, -0.87355957, -0.06521957, -1.43030957], + [-0.78911503, -0.83737945, -0.06521957, -1.645709, -0.33887298], + [-0.28035809, 0.17116396, -1.43030957, -0.33887298, -1.68586978]]) + g = np.array([0, 0, 0, 0, 1e-6]) + trust_radius = 1.1 + + # Solve Subproblem + subprob = KrylovQP(x=0, + fun=lambda x: 0, + jac=lambda x: g, + hess=lambda x: None, + hessp=lambda x, y: H.dot(y)) + p, hits_boundary = subprob.solve(trust_radius) + + # check kkt satisfaction + assert_almost_equal( + np.linalg.norm(H.dot(p) + subprob.lam * p + g), + 0.0) + # check trust region constraint + assert_almost_equal(np.linalg.norm(p), trust_radius) + + assert_array_almost_equal(p, [0.06910534, -0.01432721, + -0.65311947, -0.23815972, + -0.84954934]) + assert_array_almost_equal(hits_boundary, True) + + def test_disp(self, capsys): + H = -np.eye(5) + g = np.array([0, 0, 0, 0, 1e-6]) + trust_radius = 1.1 + + subprob = KrylovQP_disp(x=0, + fun=lambda x: 0, + jac=lambda x: g, + hess=lambda x: None, + hessp=lambda x, y: H.dot(y)) + p, hits_boundary = subprob.solve(trust_radius) + out, err = capsys.readouterr() + assert_(out.startswith(' TR Solving trust region problem'), repr(out)) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_trustregion_krylov.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_trustregion_krylov.pyc new file mode 100644 index 0000000..9629e08 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_trustregion_krylov.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_zeros.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_zeros.py new file mode 100644 index 0000000..73d0dd4 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_zeros.py @@ -0,0 +1,655 @@ +from __future__ import division, print_function, absolute_import +import pytest + +from math import sqrt, exp, sin, cos + +from numpy.testing import (assert_warns, assert_, + assert_allclose, + assert_equal) +import numpy as np +from numpy import finfo, power, nan, isclose + + +from scipy.optimize import zeros, newton, root_scalar + +from scipy._lib._util import getargspec_no_self as _getargspec + +# Import testing parameters +from scipy.optimize._tstutils import get_tests, functions as tstutils_functions, fstrings as tstutils_fstrings +from scipy._lib._numpy_compat import suppress_warnings + +TOL = 4*np.finfo(float).eps # tolerance + +_FLOAT_EPS = finfo(float).eps + +# A few test functions used frequently: +# # A simple quadratic, (x-1)^2 - 1 +def f1(x): + return x ** 2 - 2 * x - 1 + + +def f1_1(x): + return 2 * x - 2 + + +def f1_2(x): + return 2.0 + 0 * x + + +def f1_and_p_and_pp(x): + return f1(x), f1_1(x), f1_2(x) + + +# Simple transcendental function +def f2(x): + return exp(x) - cos(x) + + +def f2_1(x): + return exp(x) + sin(x) + + +def f2_2(x): + return exp(x) + cos(x) + + +class TestBasic(object): + + def run_check_by_name(self, name, smoothness=0, **kwargs): + a = .5 + b = sqrt(3) + xtol = 4*np.finfo(float).eps + rtol = 4*np.finfo(float).eps + for function, fname in zip(tstutils_functions, tstutils_fstrings): + if smoothness > 0 and fname in ['f4', 'f5', 'f6']: + continue + r = root_scalar(function, method=name, bracket=[a, b], x0=a, + xtol=xtol, rtol=rtol, **kwargs) + zero = r.root + assert_(r.converged) + assert_allclose(zero, 1.0, atol=xtol, rtol=rtol, + err_msg='method %s, function %s' % (name, fname)) + + def run_check(self, method, name): + a = .5 + b = sqrt(3) + xtol = 4 * _FLOAT_EPS + rtol = 4 * _FLOAT_EPS + for function, fname in zip(tstutils_functions, tstutils_fstrings): + zero, r = method(function, a, b, xtol=xtol, rtol=rtol, + full_output=True) + assert_(r.converged) + assert_allclose(zero, 1.0, atol=xtol, rtol=rtol, + err_msg='method %s, function %s' % (name, fname)) + + def _run_one_test(self, tc, method, sig_args_keys=None, + sig_kwargs_keys=None, **kwargs): + method_args = [] + for k in sig_args_keys or []: + if k not in tc: + # If a,b not present use x0, x1. Similarly for f and func + k = {'a': 'x0', 'b': 'x1', 'func': 'f'}.get(k, k) + method_args.append(tc[k]) + + method_kwargs = dict(**kwargs) + method_kwargs.update({'full_output': True, 'disp': False}) + for k in sig_kwargs_keys or []: + method_kwargs[k] = tc[k] + + root = tc.get('root') + func_args = tc.get('args', ()) + + try: + r, rr = method(*method_args, args=func_args, **method_kwargs) + return root, rr, tc + except Exception: + return root, zeros.RootResults(nan, -1, -1, zeros._EVALUEERR), tc + + def run_tests(self, tests, method, name, + xtol=4 * _FLOAT_EPS, rtol=4 * _FLOAT_EPS, + known_fail=None, **kwargs): + r"""Run test-cases using the specified method and the supplied signature. + + Extract the arguments for the method call from the test case + dictionary using the supplied keys for the method's signature.""" + # The methods have one of two base signatures: + # (f, a, b, **kwargs) # newton + # (func, x0, **kwargs) # bisect/brentq/... + sig = _getargspec(method) # ArgSpec with args, varargs, varkw, defaults + nDefaults = len(sig[3]) + nRequired = len(sig[0]) - nDefaults + sig_args_keys = sig[0][:nRequired] + sig_kwargs_keys = [] + if name in ['secant', 'newton', 'halley']: + if name in ['newton', 'halley']: + sig_kwargs_keys.append('fprime') + if name in ['halley']: + sig_kwargs_keys.append('fprime2') + kwargs['tol'] = xtol + else: + kwargs['xtol'] = xtol + kwargs['rtol'] = rtol + + results = [list(self._run_one_test( + tc, method, sig_args_keys=sig_args_keys, + sig_kwargs_keys=sig_kwargs_keys, **kwargs)) for tc in tests] + # results= [[true root, full output, tc], ...] + + known_fail = known_fail or [] + notcvgd = [elt for elt in results if not elt[1].converged] + notcvgd = [elt for elt in notcvgd if elt[-1]['ID'] not in known_fail] + notcvged_IDS = [elt[-1]['ID'] for elt in notcvgd] + assert_equal([len(notcvged_IDS), notcvged_IDS], [0, []]) + + # The usable xtol and rtol depend on the test + tols = {'xtol': 4 * _FLOAT_EPS, 'rtol': 4 * _FLOAT_EPS} + tols.update(**kwargs) + rtol = tols['rtol'] + atol = tols.get('tol', tols['xtol']) + + cvgd = [elt for elt in results if elt[1].converged] + approx = [elt[1].root for elt in cvgd] + correct = [elt[0] for elt in cvgd] + notclose = [[a] + elt for a, c, elt in zip(approx, correct, cvgd) if + not isclose(a, c, rtol=rtol, atol=atol) + and elt[-1]['ID'] not in known_fail] + # Evaluate the function and see if is 0 at the purported root + fvs = [tc['f'](aroot, *(tc['args'])) for aroot, c, fullout, tc in notclose] + notclose = [[fv] + elt for fv, elt in zip(fvs, notclose) if fv != 0] + assert_equal([notclose, len(notclose)], [[], 0]) + + def run_collection(self, collection, method, name, smoothness=None, + known_fail=None, + xtol=4 * _FLOAT_EPS, rtol=4 * _FLOAT_EPS, + **kwargs): + r"""Run a collection of tests using the specified method. + + The name is used to determine some optional arguments.""" + tests = get_tests(collection, smoothness=smoothness) + self.run_tests(tests, method, name, xtol=xtol, rtol=rtol, + known_fail=known_fail, **kwargs) + + def test_bisect(self): + self.run_check(zeros.bisect, 'bisect') + self.run_check_by_name('bisect') + self.run_collection('aps', zeros.bisect, 'bisect', smoothness=1) + + def test_ridder(self): + self.run_check(zeros.ridder, 'ridder') + self.run_check_by_name('ridder') + self.run_collection('aps', zeros.ridder, 'ridder', smoothness=1) + + def test_brentq(self): + self.run_check(zeros.brentq, 'brentq') + self.run_check_by_name('brentq') + # Brentq/h needs a lower tolerance to be specified + self.run_collection('aps', zeros.brentq, 'brentq', smoothness=1, + xtol=1e-14, rtol=1e-14) + + def test_brenth(self): + self.run_check(zeros.brenth, 'brenth') + self.run_check_by_name('brenth') + self.run_collection('aps', zeros.brenth, 'brenth', smoothness=1, + xtol=1e-14, rtol=1e-14) + + def test_toms748(self): + self.run_check(zeros.toms748, 'toms748') + self.run_check_by_name('toms748') + self.run_collection('aps', zeros.toms748, 'toms748', smoothness=1) + + def test_newton_collections(self): + known_fail = ['aps.13.00'] + known_fail += ['aps.12.05', 'aps.12.17'] # fails under Windows Py27 + for collection in ['aps', 'complex']: + self.run_collection(collection, zeros.newton, 'newton', + smoothness=2, known_fail=known_fail) + + def test_halley_collections(self): + known_fail = ['aps.12.06', 'aps.12.07', 'aps.12.08', 'aps.12.09', + 'aps.12.10', 'aps.12.11', 'aps.12.12', 'aps.12.13', + 'aps.12.14', 'aps.12.15', 'aps.12.16', 'aps.12.17', + 'aps.12.18', 'aps.13.00'] + for collection in ['aps', 'complex']: + self.run_collection(collection, zeros.newton, 'halley', + smoothness=2, known_fail=known_fail) + + @staticmethod + def f1(x): + return x**2 - 2*x - 1 # == (x-1)**2 - 2 + + @staticmethod + def f1_1(x): + return 2*x - 2 + + @staticmethod + def f1_2(x): + return 2.0 + 0*x + + @staticmethod + def f2(x): + return exp(x) - cos(x) + + @staticmethod + def f2_1(x): + return exp(x) + sin(x) + + @staticmethod + def f2_2(x): + return exp(x) + cos(x) + + def test_newton(self): + for f, f_1, f_2 in [(self.f1, self.f1_1, self.f1_2), + (self.f2, self.f2_1, self.f2_2)]: + x = zeros.newton(f, 3, tol=1e-6) + assert_allclose(f(x), 0, atol=1e-6) + x = zeros.newton(f, 3, x1=5, tol=1e-6) # secant, x0 and x1 + assert_allclose(f(x), 0, atol=1e-6) + x = zeros.newton(f, 3, fprime=f_1, tol=1e-6) # newton + assert_allclose(f(x), 0, atol=1e-6) + x = zeros.newton(f, 3, fprime=f_1, fprime2=f_2, tol=1e-6) # halley + assert_allclose(f(x), 0, atol=1e-6) + + def test_newton_by_name(self): + r"""Invoke newton through root_scalar()""" + for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]: + r = root_scalar(f, method='newton', x0=3, fprime=f_1, xtol=1e-6) + assert_allclose(f(r.root), 0, atol=1e-6) + + def test_secant_by_name(self): + r"""Invoke secant through root_scalar()""" + for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]: + r = root_scalar(f, method='secant', x0=3, x1=2, xtol=1e-6) + assert_allclose(f(r.root), 0, atol=1e-6) + r = root_scalar(f, method='secant', x0=3, x1=5, xtol=1e-6) + assert_allclose(f(r.root), 0, atol=1e-6) + + def test_halley_by_name(self): + r"""Invoke halley through root_scalar()""" + for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]: + r = root_scalar(f, method='halley', x0=3, + fprime=f_1, fprime2=f_2, xtol=1e-6) + assert_allclose(f(r.root), 0, atol=1e-6) + + def test_root_scalar_fail(self): + with pytest.raises(ValueError): + root_scalar(f1, method='secant', x0=3, xtol=1e-6) # no x1 + with pytest.raises(ValueError): + root_scalar(f1, method='newton', x0=3, xtol=1e-6) # no fprime + with pytest.raises(ValueError): + root_scalar(f1, method='halley', fprime=f1_1, x0=3, xtol=1e-6) # no fprime2 + with pytest.raises(ValueError): + root_scalar(f1, method='halley', fprime2=f1_2, x0=3, xtol=1e-6) # no fprime + + def test_array_newton(self): + """test newton with array""" + + def f1(x, *a): + b = a[0] + x * a[3] + return a[1] - a[2] * (np.exp(b / a[5]) - 1.0) - b / a[4] - x + + def f1_1(x, *a): + b = a[3] / a[5] + return -a[2] * np.exp(a[0] / a[5] + x * b) * b - a[3] / a[4] - 1 + + def f1_2(x, *a): + b = a[3] / a[5] + return -a[2] * np.exp(a[0] / a[5] + x * b) * b**2 + + a0 = np.array([ + 5.32725221, 5.48673747, 5.49539973, + 5.36387202, 4.80237316, 1.43764452, + 5.23063958, 5.46094772, 5.50512718, + 5.42046290 + ]) + a1 = (np.sin(range(10)) + 1.0) * 7.0 + args = (a0, a1, 1e-09, 0.004, 10, 0.27456) + x0 = [7.0] * 10 + x = zeros.newton(f1, x0, f1_1, args) + x_expected = ( + 6.17264965, 11.7702805, 12.2219954, + 7.11017681, 1.18151293, 0.143707955, + 4.31928228, 10.5419107, 12.7552490, + 8.91225749 + ) + assert_allclose(x, x_expected) + # test halley's + x = zeros.newton(f1, x0, f1_1, args, fprime2=f1_2) + assert_allclose(x, x_expected) + # test secant + x = zeros.newton(f1, x0, args=args) + assert_allclose(x, x_expected) + + def test_array_secant_active_zero_der(self): + """test secant doesn't continue to iterate zero derivatives""" + x = zeros.newton(lambda x, *a: x*x - a[0], x0=[4.123, 5], + args=[np.array([17, 25])]) + assert_allclose(x, (4.123105625617661, 5.0)) + + def test_array_newton_integers(self): + # test secant with float + x = zeros.newton(lambda y, z: z - y ** 2, [4.0] * 2, + args=([15.0, 17.0],)) + assert_allclose(x, (3.872983346207417, 4.123105625617661)) + # test integer becomes float + x = zeros.newton(lambda y, z: z - y ** 2, [4] * 2, args=([15, 17],)) + assert_allclose(x, (3.872983346207417, 4.123105625617661)) + + def test_array_newton_zero_der_failures(self): + # test derivative zero warning + assert_warns(RuntimeWarning, zeros.newton, + lambda y: y**2 - 2, [0., 0.], lambda y: 2 * y) + # test failures and zero_der + with pytest.warns(RuntimeWarning): + results = zeros.newton(lambda y: y**2 - 2, [0., 0.], + lambda y: 2*y, full_output=True) + assert_allclose(results.root, 0) + assert results.zero_der.all() + assert not results.converged.any() + + def test_newton_combined(self): + f1 = lambda x: x**2 - 2*x - 1 + f1_1 = lambda x: 2*x - 2 + f1_2 = lambda x: 2.0 + 0*x + + def f1_and_p_and_pp(x): + return x**2 - 2*x-1, 2*x-2, 2.0 + + sol0 = root_scalar(f1, method='newton', x0=3, fprime=f1_1) + sol = root_scalar(f1_and_p_and_pp, method='newton', x0=3, fprime=True) + assert_allclose(sol0.root, sol.root, atol=1e-8) + assert_equal(2*sol.function_calls, sol0.function_calls) + + sol0 = root_scalar(f1, method='halley', x0=3, fprime=f1_1, fprime2=f1_2) + sol = root_scalar(f1_and_p_and_pp, method='halley', x0=3, fprime2=True) + assert_allclose(sol0.root, sol.root, atol=1e-8) + assert_equal(3*sol.function_calls, sol0.function_calls) + + def test_newton_full_output(self): + # Test the full_output capability, both when converging and not. + # Use simple polynomials, to avoid hitting platform dependencies + # (e.g. exp & trig) in number of iterations + + x0 = 3 + expected_counts = [(6, 7), (5, 10), (3, 9)] + + for derivs in range(3): + kwargs = {'tol': 1e-6, 'full_output': True, } + for k, v in [['fprime', self.f1_1], ['fprime2', self.f1_2]][:derivs]: + kwargs[k] = v + + x, r = zeros.newton(self.f1, x0, disp=False, **kwargs) + assert_(r.converged) + assert_equal(x, r.root) + assert_equal((r.iterations, r.function_calls), expected_counts[derivs]) + if derivs == 0: + assert(r.function_calls <= r.iterations + 1) + else: + assert_equal(r.function_calls, (derivs + 1) * r.iterations) + + # Now repeat, allowing one fewer iteration to force convergence failure + iters = r.iterations - 1 + x, r = zeros.newton(self.f1, x0, maxiter=iters, disp=False, **kwargs) + assert_(not r.converged) + assert_equal(x, r.root) + assert_equal(r.iterations, iters) + + if derivs == 1: + # Check that the correct Exception is raised and + # validate the start of the message. + with pytest.raises( + RuntimeError, + match='Failed to converge after %d iterations, value is .*' % (iters)): + x, r = zeros.newton(self.f1, x0, maxiter=iters, disp=True, **kwargs) + + def test_deriv_zero_warning(self): + func = lambda x: x**2 - 2.0 + dfunc = lambda x: 2*x + assert_warns(RuntimeWarning, zeros.newton, func, 0.0, dfunc) + + +def test_gh_5555(): + root = 0.1 + + def f(x): + return x - root + + methods = [zeros.bisect, zeros.ridder] + xtol = rtol = TOL + for method in methods: + res = method(f, -1e8, 1e7, xtol=xtol, rtol=rtol) + assert_allclose(root, res, atol=xtol, rtol=rtol, + err_msg='method %s' % method.__name__) + + +def test_gh_5557(): + # Show that without the changes in 5557 brentq and brenth might + # only achieve a tolerance of 2*(xtol + rtol*|res|). + + # f linearly interpolates (0, -0.1), (0.5, -0.1), and (1, + # 0.4). The important parts are that |f(0)| < |f(1)| (so that + # brent takes 0 as the initial guess), |f(0)| < atol (so that + # brent accepts 0 as the root), and that the exact root of f lies + # more than atol away from 0 (so that brent doesn't achieve the + # desired tolerance). + def f(x): + if x < 0.5: + return -0.1 + else: + return x - 0.6 + + atol = 0.51 + rtol = 4 * _FLOAT_EPS + methods = [zeros.brentq, zeros.brenth] + for method in methods: + res = method(f, 0, 1, xtol=atol, rtol=rtol) + assert_allclose(0.6, res, atol=atol, rtol=rtol) + + +class TestRootResults: + def test_repr(self): + r = zeros.RootResults(root=1.0, + iterations=44, + function_calls=46, + flag=0) + expected_repr = (" converged: True\n flag: 'converged'" + "\n function_calls: 46\n iterations: 44\n" + " root: 1.0") + assert_equal(repr(r), expected_repr) + + +def test_complex_halley(): + """Test Halley's works with complex roots""" + def f(x, *a): + return a[0] * x**2 + a[1] * x + a[2] + + def f_1(x, *a): + return 2 * a[0] * x + a[1] + + def f_2(x, *a): + retval = 2 * a[0] + try: + size = len(x) + except TypeError: + return retval + else: + return [retval] * size + + z = complex(1.0, 2.0) + coeffs = (2.0, 3.0, 4.0) + y = zeros.newton(f, z, args=coeffs, fprime=f_1, fprime2=f_2, tol=1e-6) + # (-0.75000000000000078+1.1989578808281789j) + assert_allclose(f(y, *coeffs), 0, atol=1e-6) + z = [z] * 10 + coeffs = (2.0, 3.0, 4.0) + y = zeros.newton(f, z, args=coeffs, fprime=f_1, fprime2=f_2, tol=1e-6) + assert_allclose(f(y, *coeffs), 0, atol=1e-6) + + +def test_zero_der_nz_dp(): + """Test secant method with a non-zero dp, but an infinite newton step""" + # pick a symmetrical functions and choose a point on the side that with dx + # makes a secant that is a flat line with zero slope, EG: f = (x - 100)**2, + # which has a root at x = 100 and is symmetrical around the line x = 100 + # we have to pick a really big number so that it is consistently true + # now find a point on each side so that the secant has a zero slope + dx = np.finfo(float).eps ** 0.33 + # 100 - p0 = p1 - 100 = p0 * (1 + dx) + dx - 100 + # -> 200 = p0 * (2 + dx) + dx + p0 = (200.0 - dx) / (2.0 + dx) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "RMS of") + x = zeros.newton(lambda y: (y - 100.0)**2, x0=[p0] * 10) + assert_allclose(x, [100] * 10) + # test scalar cases too + p0 = (2.0 - 1e-4) / (2.0 + 1e-4) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "Tolerance of") + x = zeros.newton(lambda y: (y - 1.0) ** 2, x0=p0) + assert_allclose(x, 1) + p0 = (-2.0 + 1e-4) / (2.0 + 1e-4) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "Tolerance of") + x = zeros.newton(lambda y: (y + 1.0) ** 2, x0=p0) + assert_allclose(x, -1) + + +def test_array_newton_failures(): + """Test that array newton fails as expected""" + # p = 0.68 # [MPa] + # dp = -0.068 * 1e6 # [Pa] + # T = 323 # [K] + diameter = 0.10 # [m] + # L = 100 # [m] + roughness = 0.00015 # [m] + rho = 988.1 # [kg/m**3] + mu = 5.4790e-04 # [Pa*s] + u = 2.488 # [m/s] + reynolds_number = rho * u * diameter / mu # Reynolds number + + def colebrook_eqn(darcy_friction, re, dia): + return (1 / np.sqrt(darcy_friction) + + 2 * np.log10(roughness / 3.7 / dia + + 2.51 / re / np.sqrt(darcy_friction))) + + # only some failures + with pytest.warns(RuntimeWarning): + result = zeros.newton( + colebrook_eqn, x0=[0.01, 0.2, 0.02223, 0.3], maxiter=2, + args=[reynolds_number, diameter], full_output=True + ) + assert not result.converged.all() + # they all fail + with pytest.raises(RuntimeError): + result = zeros.newton( + colebrook_eqn, x0=[0.01] * 2, maxiter=2, + args=[reynolds_number, diameter], full_output=True + ) + + +# this test should **not** raise a RuntimeWarning +def test_gh8904_zeroder_at_root_fails(): + """Test that Newton or Halley don't warn if zero derivative at root""" + + # a function that has a zero derivative at it's root + def f_zeroder_root(x): + return x**3 - x**2 + + # should work with secant + r = zeros.newton(f_zeroder_root, x0=0) + assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol) + # test again with array + r = zeros.newton(f_zeroder_root, x0=[0]*10) + assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol) + + # 1st derivative + def fder(x): + return 3 * x**2 - 2 * x + + # 2nd derivative + def fder2(x): + return 6*x - 2 + + # should work with newton and halley + r = zeros.newton(f_zeroder_root, x0=0, fprime=fder) + assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol) + r = zeros.newton(f_zeroder_root, x0=0, fprime=fder, + fprime2=fder2) + assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol) + # test again with array + r = zeros.newton(f_zeroder_root, x0=[0]*10, fprime=fder) + assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol) + r = zeros.newton(f_zeroder_root, x0=[0]*10, fprime=fder, + fprime2=fder2) + assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol) + + # also test that if a root is found we do not raise RuntimeWarning even if + # the derivative is zero, EG: at x = 0.5, then fval = -0.125 and + # fder = -0.25 so the next guess is 0.5 - (-0.125/-0.5) = 0 which is the + # root, but if the solver continued with that guess, then it will calculate + # a zero derivative, so it should return the root w/o RuntimeWarning + r = zeros.newton(f_zeroder_root, x0=0.5, fprime=fder) + assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol) + # test again with array + r = zeros.newton(f_zeroder_root, x0=[0.5]*10, fprime=fder) + assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol) + # doesn't apply to halley + + +def test_gh_8881(): + r"""Test that Halley's method realizes that the 2nd order adjustment + is too big and drops off to the 1st order adjustment.""" + n = 9 + + def f(x): + return power(x, 1.0/n) - power(n, 1.0/n) + + def fp(x): + return power(x, (1.0-n)/n)/n + + def fpp(x): + return power(x, (1.0-2*n)/n) * (1.0/n) * (1.0-n)/n + + x0 = 0.1 + # The root is at x=9. + # The function has positive slope, x0 < root. + # Newton succeeds in 8 iterations + rt, r = newton(f, x0, fprime=fp, full_output=True) + assert(r.converged) + # Before the Issue 8881/PR 8882, halley would send x in the wrong direction. + # Check that it now succeeds. + rt, r = newton(f, x0, fprime=fp, fprime2=fpp, full_output=True) + assert(r.converged) + + +def test_gh_9608_preserve_array_shape(): + """ + Test that shape is preserved for array inputs even if fprime or fprime2 is + scalar + """ + def f(x): + return x**2 + + def fp(x): + return 2 * x + + def fpp(x): + return 2 + + x0 = np.array([-2], dtype=np.float32) + rt, r = newton(f, x0, fprime=fp, fprime2=fpp, full_output=True) + assert(r.converged) + + x0_array = np.array([-2, -3], dtype=np.float32) + # This next invocation should fail + with pytest.raises(IndexError): + result = zeros.newton( + f, x0_array, fprime=fp, fprime2=fpp, full_output=True + ) + + def fpp_array(x): + return 2*np.ones(np.shape(x), dtype=np.float32) + + result = zeros.newton( + f, x0_array, fprime=fp, fprime2=fpp_array, full_output=True + ) + assert result.converged.all() diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_zeros.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_zeros.pyc new file mode 100644 index 0000000..5638691 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/tests/test_zeros.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tnc.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/tnc.py new file mode 100644 index 0000000..a3f4762 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/tnc.py @@ -0,0 +1,441 @@ +# TNC Python interface +# @(#) $Jeannot: tnc.py,v 1.11 2005/01/28 18:27:31 js Exp $ + +# Copyright (c) 2004-2005, Jean-Sebastien Roy (js@jeannot.org) + +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: + +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +""" +TNC: A python interface to the TNC non-linear optimizer + +TNC is a non-linear optimizer. To use it, you must provide a function to +minimize. The function must take one argument: the list of coordinates where to +evaluate the function; and it must return either a tuple, whose first element is the +value of the function, and whose second argument is the gradient of the function +(as a list of values); or None, to abort the minimization. +""" + +from __future__ import division, print_function, absolute_import + +from scipy.optimize import moduleTNC, approx_fprime +from .optimize import MemoizeJac, OptimizeResult, _check_unknown_options +from numpy import inf, array, zeros, asfarray + +__all__ = ['fmin_tnc'] + + +MSG_NONE = 0 # No messages +MSG_ITER = 1 # One line per iteration +MSG_INFO = 2 # Informational messages +MSG_VERS = 4 # Version info +MSG_EXIT = 8 # Exit reasons +MSG_ALL = MSG_ITER + MSG_INFO + MSG_VERS + MSG_EXIT + +MSGS = { + MSG_NONE: "No messages", + MSG_ITER: "One line per iteration", + MSG_INFO: "Informational messages", + MSG_VERS: "Version info", + MSG_EXIT: "Exit reasons", + MSG_ALL: "All messages" +} + +INFEASIBLE = -1 # Infeasible (lower bound > upper bound) +LOCALMINIMUM = 0 # Local minimum reached (|pg| ~= 0) +FCONVERGED = 1 # Converged (|f_n-f_(n-1)| ~= 0) +XCONVERGED = 2 # Converged (|x_n-x_(n-1)| ~= 0) +MAXFUN = 3 # Max. number of function evaluations reached +LSFAIL = 4 # Linear search failed +CONSTANT = 5 # All lower bounds are equal to the upper bounds +NOPROGRESS = 6 # Unable to progress +USERABORT = 7 # User requested end of minimization + +RCSTRINGS = { + INFEASIBLE: "Infeasible (lower bound > upper bound)", + LOCALMINIMUM: "Local minimum reached (|pg| ~= 0)", + FCONVERGED: "Converged (|f_n-f_(n-1)| ~= 0)", + XCONVERGED: "Converged (|x_n-x_(n-1)| ~= 0)", + MAXFUN: "Max. number of function evaluations reached", + LSFAIL: "Linear search failed", + CONSTANT: "All lower bounds are equal to the upper bounds", + NOPROGRESS: "Unable to progress", + USERABORT: "User requested end of minimization" +} + +# Changes to interface made by Travis Oliphant, Apr. 2004 for inclusion in +# SciPy + + +def fmin_tnc(func, x0, fprime=None, args=(), approx_grad=0, + bounds=None, epsilon=1e-8, scale=None, offset=None, + messages=MSG_ALL, maxCGit=-1, maxfun=None, eta=-1, + stepmx=0, accuracy=0, fmin=0, ftol=-1, xtol=-1, pgtol=-1, + rescale=-1, disp=None, callback=None): + """ + Minimize a function with variables subject to bounds, using + gradient information in a truncated Newton algorithm. This + method wraps a C implementation of the algorithm. + + Parameters + ---------- + func : callable ``func(x, *args)`` + Function to minimize. Must do one of: + + 1. Return f and g, where f is the value of the function and g its + gradient (a list of floats). + + 2. Return the function value but supply gradient function + separately as `fprime`. + + 3. Return the function value and set ``approx_grad=True``. + + If the function returns None, the minimization + is aborted. + x0 : array_like + Initial estimate of minimum. + fprime : callable ``fprime(x, *args)``, optional + Gradient of `func`. If None, then either `func` must return the + function value and the gradient (``f,g = func(x, *args)``) + or `approx_grad` must be True. + args : tuple, optional + Arguments to pass to function. + approx_grad : bool, optional + If true, approximate the gradient numerically. + bounds : list, optional + (min, max) pairs for each element in x0, defining the + bounds on that parameter. Use None or +/-inf for one of + min or max when there is no bound in that direction. + epsilon : float, optional + Used if approx_grad is True. The stepsize in a finite + difference approximation for fprime. + scale : array_like, optional + Scaling factors to apply to each variable. If None, the + factors are up-low for interval bounded variables and + 1+|x| for the others. Defaults to None. + offset : array_like, optional + Value to subtract from each variable. If None, the + offsets are (up+low)/2 for interval bounded variables + and x for the others. + messages : int, optional + Bit mask used to select messages display during + minimization values defined in the MSGS dict. Defaults to + MGS_ALL. + disp : int, optional + Integer interface to messages. 0 = no message, 5 = all messages + maxCGit : int, optional + Maximum number of hessian*vector evaluations per main + iteration. If maxCGit == 0, the direction chosen is + -gradient if maxCGit < 0, maxCGit is set to + max(1,min(50,n/2)). Defaults to -1. + maxfun : int, optional + Maximum number of function evaluation. if None, maxfun is + set to max(100, 10*len(x0)). Defaults to None. + eta : float, optional + Severity of the line search. if < 0 or > 1, set to 0.25. + Defaults to -1. + stepmx : float, optional + Maximum step for the line search. May be increased during + call. If too small, it will be set to 10.0. Defaults to 0. + accuracy : float, optional + Relative precision for finite difference calculations. If + <= machine_precision, set to sqrt(machine_precision). + Defaults to 0. + fmin : float, optional + Minimum function value estimate. Defaults to 0. + ftol : float, optional + Precision goal for the value of f in the stopping criterion. + If ftol < 0.0, ftol is set to 0.0 defaults to -1. + xtol : float, optional + Precision goal for the value of x in the stopping + criterion (after applying x scaling factors). If xtol < + 0.0, xtol is set to sqrt(machine_precision). Defaults to + -1. + pgtol : float, optional + Precision goal for the value of the projected gradient in + the stopping criterion (after applying x scaling factors). + If pgtol < 0.0, pgtol is set to 1e-2 * sqrt(accuracy). + Setting it to 0.0 is not recommended. Defaults to -1. + rescale : float, optional + Scaling factor (in log10) used to trigger f value + rescaling. If 0, rescale at each iteration. If a large + value, never rescale. If < 0, rescale is set to 1.3. + callback : callable, optional + Called after each iteration, as callback(xk), where xk is the + current parameter vector. + + Returns + ------- + x : ndarray + The solution. + nfeval : int + The number of function evaluations. + rc : int + Return code, see below + + See also + -------- + minimize: Interface to minimization algorithms for multivariate + functions. See the 'TNC' `method` in particular. + + Notes + ----- + The underlying algorithm is truncated Newton, also called + Newton Conjugate-Gradient. This method differs from + scipy.optimize.fmin_ncg in that + + 1. It wraps a C implementation of the algorithm + 2. It allows each variable to be given an upper and lower bound. + + The algorithm incorporates the bound constraints by determining + the descent direction as in an unconstrained truncated Newton, + but never taking a step-size large enough to leave the space + of feasible x's. The algorithm keeps track of a set of + currently active constraints, and ignores them when computing + the minimum allowable step size. (The x's associated with the + active constraint are kept fixed.) If the maximum allowable + step size is zero then a new constraint is added. At the end + of each iteration one of the constraints may be deemed no + longer active and removed. A constraint is considered + no longer active is if it is currently active + but the gradient for that variable points inward from the + constraint. The specific constraint removed is the one + associated with the variable of largest index whose + constraint is no longer active. + + Return codes are defined as follows:: + + -1 : Infeasible (lower bound > upper bound) + 0 : Local minimum reached (|pg| ~= 0) + 1 : Converged (|f_n-f_(n-1)| ~= 0) + 2 : Converged (|x_n-x_(n-1)| ~= 0) + 3 : Max. number of function evaluations reached + 4 : Linear search failed + 5 : All lower bounds are equal to the upper bounds + 6 : Unable to progress + 7 : User requested end of minimization + + References + ---------- + Wright S., Nocedal J. (2006), 'Numerical Optimization' + + Nash S.G. (1984), "Newton-Type Minimization Via the Lanczos Method", + SIAM Journal of Numerical Analysis 21, pp. 770-778 + + """ + # handle fprime/approx_grad + if approx_grad: + fun = func + jac = None + elif fprime is None: + fun = MemoizeJac(func) + jac = fun.derivative + else: + fun = func + jac = fprime + + if disp is not None: # disp takes precedence over messages + mesg_num = disp + else: + mesg_num = {0:MSG_NONE, 1:MSG_ITER, 2:MSG_INFO, 3:MSG_VERS, + 4:MSG_EXIT, 5:MSG_ALL}.get(messages, MSG_ALL) + # build options + opts = {'eps': epsilon, + 'scale': scale, + 'offset': offset, + 'mesg_num': mesg_num, + 'maxCGit': maxCGit, + 'maxiter': maxfun, + 'eta': eta, + 'stepmx': stepmx, + 'accuracy': accuracy, + 'minfev': fmin, + 'ftol': ftol, + 'xtol': xtol, + 'gtol': pgtol, + 'rescale': rescale, + 'disp': False} + + res = _minimize_tnc(fun, x0, args, jac, bounds, callback=callback, **opts) + + return res['x'], res['nfev'], res['status'] + + +def _minimize_tnc(fun, x0, args=(), jac=None, bounds=None, + eps=1e-8, scale=None, offset=None, mesg_num=None, + maxCGit=-1, maxiter=None, eta=-1, stepmx=0, accuracy=0, + minfev=0, ftol=-1, xtol=-1, gtol=-1, rescale=-1, disp=False, + callback=None, **unknown_options): + """ + Minimize a scalar function of one or more variables using a truncated + Newton (TNC) algorithm. + + Options + ------- + eps : float + Step size used for numerical approximation of the jacobian. + scale : list of floats + Scaling factors to apply to each variable. If None, the + factors are up-low for interval bounded variables and + 1+|x] fo the others. Defaults to None + offset : float + Value to subtract from each variable. If None, the + offsets are (up+low)/2 for interval bounded variables + and x for the others. + disp : bool + Set to True to print convergence messages. + maxCGit : int + Maximum number of hessian*vector evaluations per main + iteration. If maxCGit == 0, the direction chosen is + -gradient if maxCGit < 0, maxCGit is set to + max(1,min(50,n/2)). Defaults to -1. + maxiter : int + Maximum number of function evaluation. if None, `maxiter` is + set to max(100, 10*len(x0)). Defaults to None. + eta : float + Severity of the line search. if < 0 or > 1, set to 0.25. + Defaults to -1. + stepmx : float + Maximum step for the line search. May be increased during + call. If too small, it will be set to 10.0. Defaults to 0. + accuracy : float + Relative precision for finite difference calculations. If + <= machine_precision, set to sqrt(machine_precision). + Defaults to 0. + minfev : float + Minimum function value estimate. Defaults to 0. + ftol : float + Precision goal for the value of f in the stopping criterion. + If ftol < 0.0, ftol is set to 0.0 defaults to -1. + xtol : float + Precision goal for the value of x in the stopping + criterion (after applying x scaling factors). If xtol < + 0.0, xtol is set to sqrt(machine_precision). Defaults to + -1. + gtol : float + Precision goal for the value of the projected gradient in + the stopping criterion (after applying x scaling factors). + If gtol < 0.0, gtol is set to 1e-2 * sqrt(accuracy). + Setting it to 0.0 is not recommended. Defaults to -1. + rescale : float + Scaling factor (in log10) used to trigger f value + rescaling. If 0, rescale at each iteration. If a large + value, never rescale. If < 0, rescale is set to 1.3. + + """ + _check_unknown_options(unknown_options) + epsilon = eps + maxfun = maxiter + fmin = minfev + pgtol = gtol + + x0 = asfarray(x0).flatten() + n = len(x0) + + if bounds is None: + bounds = [(None,None)] * n + if len(bounds) != n: + raise ValueError('length of x0 != length of bounds') + + if mesg_num is not None: + messages = {0:MSG_NONE, 1:MSG_ITER, 2:MSG_INFO, 3:MSG_VERS, + 4:MSG_EXIT, 5:MSG_ALL}.get(mesg_num, MSG_ALL) + elif disp: + messages = MSG_ALL + else: + messages = MSG_NONE + + if jac is None: + def func_and_grad(x): + f = fun(x, *args) + g = approx_fprime(x, fun, epsilon, *args) + return f, g + else: + def func_and_grad(x): + f = fun(x, *args) + g = jac(x, *args) + return f, g + + """ + low, up : the bounds (lists of floats) + if low is None, the lower bounds are removed. + if up is None, the upper bounds are removed. + low and up defaults to None + """ + low = zeros(n) + up = zeros(n) + for i in range(n): + if bounds[i] is None: + l, u = -inf, inf + else: + l,u = bounds[i] + if l is None: + low[i] = -inf + else: + low[i] = l + if u is None: + up[i] = inf + else: + up[i] = u + + if scale is None: + scale = array([]) + + if offset is None: + offset = array([]) + + if maxfun is None: + maxfun = max(100, 10*len(x0)) + + rc, nf, nit, x = moduleTNC.minimize(func_and_grad, x0, low, up, scale, + offset, messages, maxCGit, maxfun, + eta, stepmx, accuracy, fmin, ftol, + xtol, pgtol, rescale, callback) + + funv, jacv = func_and_grad(x) + + return OptimizeResult(x=x, fun=funv, jac=jacv, nfev=nf, nit=nit, status=rc, + message=RCSTRINGS[rc], success=(-1 < rc < 3)) + + +if __name__ == '__main__': + # Examples for TNC + + def example(): + print("Example") + + # A function to minimize + def function(x): + f = pow(x[0],2.0)+pow(abs(x[1]),3.0) + g = [0,0] + g[0] = 2.0*x[0] + g[1] = 3.0*pow(abs(x[1]),2.0) + if x[1] < 0: + g[1] = -g[1] + return f, g + + # Optimizer call + x, nf, rc = fmin_tnc(function, [-7, 3], bounds=([-10, 1], [10, 10])) + + print("After", nf, "function evaluations, TNC returned:", RCSTRINGS[rc]) + print("x =", x) + print("exact value = [0, 1]") + print() + + example() diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/tnc.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/tnc.pyc new file mode 100644 index 0000000..519b6f4 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/tnc.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/zeros.py b/project/venv/lib/python2.7/site-packages/scipy/optimize/zeros.py new file mode 100644 index 0000000..e2e454e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/optimize/zeros.py @@ -0,0 +1,1342 @@ +from __future__ import division, print_function, absolute_import + +import warnings +from collections import namedtuple +from . import _zeros +import numpy as np + + +_iter = 100 +_xtol = 2e-12 +_rtol = 4 * np.finfo(float).eps + +__all__ = ['newton', 'bisect', 'ridder', 'brentq', 'brenth', 'toms748', 'RootResults'] + +_ECONVERGED = 0 +_ESIGNERR = -1 +_ECONVERR = -2 +_EVALUEERR = -3 +_EINPROGRESS = 1 + +flag_map = {_ECONVERGED: 'converged', + _ESIGNERR: 'sign error', + _ECONVERR: 'convergence error', + _EVALUEERR: 'value error', + _EINPROGRESS: 'in progress'} + + +class RootResults(object): + """Represents the root finding result. + + Attributes + ---------- + root : float + Estimated root location. + iterations : int + Number of iterations needed to find the root. + function_calls : int + Number of times the function was called. + converged : bool + True if the routine converged. + flag : str + Description of the cause of termination. + + """ + + def __init__(self, root, iterations, function_calls, flag): + self.root = root + self.iterations = iterations + self.function_calls = function_calls + self.converged = flag == _ECONVERGED + self.flag = None + try: + self.flag = flag_map[flag] + except KeyError: + self.flag = 'unknown error %d' % (flag,) + + def __repr__(self): + attrs = ['converged', 'flag', 'function_calls', + 'iterations', 'root'] + m = max(map(len, attrs)) + 1 + return '\n'.join([a.rjust(m) + ': ' + repr(getattr(self, a)) + for a in attrs]) + + +def results_c(full_output, r): + if full_output: + x, funcalls, iterations, flag = r + results = RootResults(root=x, + iterations=iterations, + function_calls=funcalls, + flag=flag) + return x, results + else: + return r + + +def _results_select(full_output, r): + """Select from a tuple of (root, funccalls, iterations, flag)""" + x, funcalls, iterations, flag = r + if full_output: + results = RootResults(root=x, + iterations=iterations, + function_calls=funcalls, + flag=flag) + return x, results + return x + + +def newton(func, x0, fprime=None, args=(), tol=1.48e-8, maxiter=50, + fprime2=None, x1=None, rtol=0.0, + full_output=False, disp=True): + """ + Find a zero of a real or complex function using the Newton-Raphson + (or secant or Halley's) method. + + Find a zero of the function `func` given a nearby starting point `x0`. + The Newton-Raphson method is used if the derivative `fprime` of `func` + is provided, otherwise the secant method is used. If the second order + derivative `fprime2` of `func` is also provided, then Halley's method is + used. + + If `x0` is a sequence with more than one item, then `newton` returns an + array, and `func` must be vectorized and return a sequence or array of the + same shape as its first argument. If `fprime` or `fprime2` is given then + its return must also have the same shape. + + Parameters + ---------- + func : callable + The function whose zero is wanted. It must be a function of a + single variable of the form ``f(x,a,b,c...)``, where ``a,b,c...`` + are extra arguments that can be passed in the `args` parameter. + x0 : float, sequence, or ndarray + An initial estimate of the zero that should be somewhere near the + actual zero. If not scalar, then `func` must be vectorized and return + a sequence or array of the same shape as its first argument. + fprime : callable, optional + The derivative of the function when available and convenient. If it + is None (default), then the secant method is used. + args : tuple, optional + Extra arguments to be used in the function call. + tol : float, optional + The allowable error of the zero value. If `func` is complex-valued, + a larger `tol` is recommended as both the real and imaginary parts + of `x` contribute to ``|x - x0|``. + maxiter : int, optional + Maximum number of iterations. + fprime2 : callable, optional + The second order derivative of the function when available and + convenient. If it is None (default), then the normal Newton-Raphson + or the secant method is used. If it is not None, then Halley's method + is used. + x1 : float, optional + Another estimate of the zero that should be somewhere near the + actual zero. Used if `fprime` is not provided. + rtol : float, optional + Tolerance (relative) for termination. + full_output : bool, optional + If `full_output` is False (default), the root is returned. + If True and `x0` is scalar, the return value is ``(x, r)``, where ``x`` + is the root and ``r`` is a `RootResults` object. + If True and `x0` is non-scalar, the return value is ``(x, converged, + zero_der)`` (see Returns section for details). + disp : bool, optional + If True, raise a RuntimeError if the algorithm didn't converge, with + the error message containing the number of iterations and current + function value. Otherwise the convergence status is recorded in a + `RootResults` return object. + Ignored if `x0` is not scalar. + *Note: this has little to do with displaying, however + the `disp` keyword cannot be renamed for backwards compatibility.* + + Returns + ------- + root : float, sequence, or ndarray + Estimated location where function is zero. + r : `RootResults`, optional + Present if ``full_output=True`` and `x0` is scalar. + Object containing information about the convergence. In particular, + ``r.converged`` is True if the routine converged. + converged : ndarray of bool, optional + Present if ``full_output=True`` and `x0` is non-scalar. + For vector functions, indicates which elements converged successfully. + zero_der : ndarray of bool, optional + Present if ``full_output=True`` and `x0` is non-scalar. + For vector functions, indicates which elements had a zero derivative. + + See Also + -------- + brentq, brenth, ridder, bisect + fsolve : find zeros in n dimensions. + + Notes + ----- + The convergence rate of the Newton-Raphson method is quadratic, + the Halley method is cubic, and the secant method is + sub-quadratic. This means that if the function is well behaved + the actual error in the estimated zero after the n-th iteration + is approximately the square (cube for Halley) of the error + after the (n-1)-th step. However, the stopping criterion used + here is the step size and there is no guarantee that a zero + has been found. Consequently the result should be verified. + Safer algorithms are brentq, brenth, ridder, and bisect, + but they all require that the root first be bracketed in an + interval where the function changes sign. The brentq algorithm + is recommended for general use in one dimensional problems + when such an interval has been found. + + When `newton` is used with arrays, it is best suited for the following + types of problems: + + * The initial guesses, `x0`, are all relatively the same distance from + the roots. + * Some or all of the extra arguments, `args`, are also arrays so that a + class of similar problems can be solved together. + * The size of the initial guesses, `x0`, is larger than O(100) elements. + Otherwise, a naive loop may perform as well or better than a vector. + + Examples + -------- + >>> from scipy import optimize + >>> import matplotlib.pyplot as plt + + >>> def f(x): + ... return (x**3 - 1) # only one real root at x = 1 + + ``fprime`` is not provided, use the secant method: + + >>> root = optimize.newton(f, 1.5) + >>> root + 1.0000000000000016 + >>> root = optimize.newton(f, 1.5, fprime2=lambda x: 6 * x) + >>> root + 1.0000000000000016 + + Only ``fprime`` is provided, use the Newton-Raphson method: + + >>> root = optimize.newton(f, 1.5, fprime=lambda x: 3 * x**2) + >>> root + 1.0 + + Both ``fprime2`` and ``fprime`` are provided, use Halley's method: + + >>> root = optimize.newton(f, 1.5, fprime=lambda x: 3 * x**2, + ... fprime2=lambda x: 6 * x) + >>> root + 1.0 + + When we want to find zeros for a set of related starting values and/or + function parameters, we can provide both of those as an array of inputs: + + >>> f = lambda x, a: x**3 - a + >>> fder = lambda x, a: 3 * x**2 + >>> x = np.random.randn(100) + >>> a = np.arange(-50, 50) + >>> vec_res = optimize.newton(f, x, fprime=fder, args=(a, )) + + The above is the equivalent of solving for each value in ``(x, a)`` + separately in a for-loop, just faster: + + >>> loop_res = [optimize.newton(f, x0, fprime=fder, args=(a0,)) + ... for x0, a0 in zip(x, a)] + >>> np.allclose(vec_res, loop_res) + True + + Plot the results found for all values of ``a``: + + >>> analytical_result = np.sign(a) * np.abs(a)**(1/3) + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111) + >>> ax.plot(a, analytical_result, 'o') + >>> ax.plot(a, vec_res, '.') + >>> ax.set_xlabel('$a$') + >>> ax.set_ylabel('$x$ where $f(x, a)=0$') + >>> plt.show() + + """ + if tol <= 0: + raise ValueError("tol too small (%g <= 0)" % tol) + if maxiter < 1: + raise ValueError("maxiter must be greater than 0") + if np.size(x0) > 1: + return _array_newton(func, x0, fprime, args, tol, maxiter, fprime2, + full_output) + + # Convert to float (don't use float(x0); this works also for complex x0) + p0 = 1.0 * x0 + funcalls = 0 + if fprime is not None: + # Newton-Raphson method + for itr in range(maxiter): + # first evaluate fval + fval = func(p0, *args) + funcalls += 1 + # If fval is 0, a root has been found, then terminate + if fval == 0: + return _results_select( + full_output, (p0, funcalls, itr, _ECONVERGED)) + fder = fprime(p0, *args) + funcalls += 1 + if fder == 0: + msg = "derivative was zero." + warnings.warn(msg, RuntimeWarning) + return _results_select( + full_output, (p0, funcalls, itr + 1, _ECONVERR)) + newton_step = fval / fder + if fprime2: + fder2 = fprime2(p0, *args) + funcalls += 1 + # Halley's method: + # newton_step /= (1.0 - 0.5 * newton_step * fder2 / fder) + # Only do it if denominator stays close enough to 1 + # Rationale: If 1-adj < 0, then Halley sends x in the + # opposite direction to Newton. Doesn't happen if x is close + # enough to root. + adj = newton_step * fder2 / fder / 2 + if np.abs(adj) < 1: + newton_step /= 1.0 - adj + p = p0 - newton_step + if np.isclose(p, p0, rtol=rtol, atol=tol): + return _results_select( + full_output, (p, funcalls, itr + 1, _ECONVERGED)) + p0 = p + else: + # Secant method + if x1 is not None: + if x1 == x0: + raise ValueError("x1 and x0 must be different") + p1 = x1 + else: + eps = 1e-4 + p1 = x0 * (1 + eps) + p1 += (eps if p1 >= 0 else -eps) + q0 = func(p0, *args) + funcalls += 1 + q1 = func(p1, *args) + funcalls += 1 + if abs(q1) < abs(q0): + p0, p1, q0, q1 = p1, p0, q1, q0 + for itr in range(maxiter): + if q1 == q0: + if p1 != p0: + msg = "Tolerance of %s reached" % (p1 - p0) + warnings.warn(msg, RuntimeWarning) + p = (p1 + p0) / 2.0 + return _results_select( + full_output, (p, funcalls, itr + 1, _ECONVERGED)) + else: + if abs(q1) > abs(q0): + p = (-q0 / q1 * p1 + p0) / (1 - q0 / q1) + else: + p = (-q1 / q0 * p0 + p1) / (1 - q1 / q0) + if np.isclose(p, p1, rtol=rtol, atol=tol): + return _results_select( + full_output, (p, funcalls, itr + 1, _ECONVERGED)) + p0, q0 = p1, q1 + p1 = p + q1 = func(p1, *args) + funcalls += 1 + + if disp: + msg = "Failed to converge after %d iterations, value is %s" % (itr + 1, p) + raise RuntimeError(msg) + + return _results_select(full_output, (p, funcalls, itr + 1, _ECONVERR)) + + +def _array_newton(func, x0, fprime, args, tol, maxiter, fprime2, full_output): + """ + A vectorized version of Newton, Halley, and secant methods for arrays. + + Do not use this method directly. This method is called from `newton` + when ``np.size(x0) > 1`` is ``True``. For docstring, see `newton`. + """ + try: + p = np.asarray(x0, dtype=float) + except TypeError: + # can't convert complex to float + p = np.asarray(x0) + + failures = np.ones_like(p, dtype=bool) + nz_der = np.ones_like(failures) + if fprime is not None: + # Newton-Raphson method + for iteration in range(maxiter): + # first evaluate fval + fval = np.asarray(func(p, *args)) + # If all fval are 0, all roots have been found, then terminate + if not fval.any(): + failures = fval.astype(bool) + break + fder = np.asarray(fprime(p, *args)) + nz_der = (fder != 0) + # stop iterating if all derivatives are zero + if not nz_der.any(): + break + # Newton step + dp = fval[nz_der] / fder[nz_der] + if fprime2 is not None: + fder2 = np.asarray(fprime2(p, *args)) + dp = dp / (1.0 - 0.5 * dp * fder2[nz_der] / fder[nz_der]) + # only update nonzero derivatives + p[nz_der] -= dp + failures[nz_der] = np.abs(dp) >= tol # items not yet converged + # stop iterating if there aren't any failures, not incl zero der + if not failures[nz_der].any(): + break + else: + # Secant method + dx = np.finfo(float).eps**0.33 + p1 = p * (1 + dx) + np.where(p >= 0, dx, -dx) + q0 = np.asarray(func(p, *args)) + q1 = np.asarray(func(p1, *args)) + active = np.ones_like(p, dtype=bool) + for iteration in range(maxiter): + nz_der = (q1 != q0) + # stop iterating if all derivatives are zero + if not nz_der.any(): + p = (p1 + p) / 2.0 + break + # Secant Step + dp = (q1 * (p1 - p))[nz_der] / (q1 - q0)[nz_der] + # only update nonzero derivatives + p[nz_der] = p1[nz_der] - dp + active_zero_der = ~nz_der & active + p[active_zero_der] = (p1 + p)[active_zero_der] / 2.0 + active &= nz_der # don't assign zero derivatives again + failures[nz_der] = np.abs(dp) >= tol # not yet converged + # stop iterating if there aren't any failures, not incl zero der + if not failures[nz_der].any(): + break + p1, p = p, p1 + q0 = q1 + q1 = np.asarray(func(p1, *args)) + + zero_der = ~nz_der & failures # don't include converged with zero-ders + if zero_der.any(): + # Secant warnings + if fprime is None: + nonzero_dp = (p1 != p) + # non-zero dp, but infinite newton step + zero_der_nz_dp = (zero_der & nonzero_dp) + if zero_der_nz_dp.any(): + rms = np.sqrt( + sum((p1[zero_der_nz_dp] - p[zero_der_nz_dp]) ** 2) + ) + warnings.warn('RMS of {:g} reached'.format(rms), RuntimeWarning) + # Newton or Halley warnings + else: + all_or_some = 'all' if zero_der.all() else 'some' + msg = '{:s} derivatives were zero'.format(all_or_some) + warnings.warn(msg, RuntimeWarning) + elif failures.any(): + all_or_some = 'all' if failures.all() else 'some' + msg = '{0:s} failed to converge after {1:d} iterations'.format( + all_or_some, maxiter + ) + if failures.all(): + raise RuntimeError(msg) + warnings.warn(msg, RuntimeWarning) + + if full_output: + result = namedtuple('result', ('root', 'converged', 'zero_der')) + p = result(p, ~failures, zero_der) + + return p + + +def bisect(f, a, b, args=(), + xtol=_xtol, rtol=_rtol, maxiter=_iter, + full_output=False, disp=True): + """ + Find root of a function within an interval using bisection. + + Basic bisection routine to find a zero of the function `f` between the + arguments `a` and `b`. `f(a)` and `f(b)` cannot have the same signs. + Slow but sure. + + Parameters + ---------- + f : function + Python function returning a number. `f` must be continuous, and + f(a) and f(b) must have opposite signs. + a : scalar + One end of the bracketing interval [a,b]. + b : scalar + The other end of the bracketing interval [a,b]. + xtol : number, optional + The computed root ``x0`` will satisfy ``np.allclose(x, x0, + atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The + parameter must be nonnegative. + rtol : number, optional + The computed root ``x0`` will satisfy ``np.allclose(x, x0, + atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The + parameter cannot be smaller than its default value of + ``4*np.finfo(float).eps``. + maxiter : int, optional + if convergence is not achieved in `maxiter` iterations, an error is + raised. Must be >= 0. + args : tuple, optional + containing extra arguments for the function `f`. + `f` is called by ``apply(f, (x)+args)``. + full_output : bool, optional + If `full_output` is False, the root is returned. If `full_output` is + True, the return value is ``(x, r)``, where x is the root, and r is + a `RootResults` object. + disp : bool, optional + If True, raise RuntimeError if the algorithm didn't converge. + Otherwise the convergence status is recorded in a `RootResults` + return object. + + Returns + ------- + x0 : float + Zero of `f` between `a` and `b`. + r : `RootResults` (present if ``full_output = True``) + Object containing information about the convergence. In particular, + ``r.converged`` is True if the routine converged. + + Examples + -------- + + >>> def f(x): + ... return (x**2 - 1) + + >>> from scipy import optimize + + >>> root = optimize.bisect(f, 0, 2) + >>> root + 1.0 + + >>> root = optimize.bisect(f, -2, 0) + >>> root + -1.0 + + See Also + -------- + brentq, brenth, bisect, newton + fixed_point : scalar fixed-point finder + fsolve : n-dimensional root-finding + + """ + if not isinstance(args, tuple): + args = (args,) + if xtol <= 0: + raise ValueError("xtol too small (%g <= 0)" % xtol) + if rtol < _rtol: + raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol)) + r = _zeros._bisect(f, a, b, xtol, rtol, maxiter, args, full_output, disp) + return results_c(full_output, r) + + +def ridder(f, a, b, args=(), + xtol=_xtol, rtol=_rtol, maxiter=_iter, + full_output=False, disp=True): + """ + Find a root of a function in an interval using Ridder's method. + + Parameters + ---------- + f : function + Python function returning a number. f must be continuous, and f(a) and + f(b) must have opposite signs. + a : scalar + One end of the bracketing interval [a,b]. + b : scalar + The other end of the bracketing interval [a,b]. + xtol : number, optional + The computed root ``x0`` will satisfy ``np.allclose(x, x0, + atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The + parameter must be nonnegative. + rtol : number, optional + The computed root ``x0`` will satisfy ``np.allclose(x, x0, + atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The + parameter cannot be smaller than its default value of + ``4*np.finfo(float).eps``. + maxiter : int, optional + if convergence is not achieved in `maxiter` iterations, an error is + raised. Must be >= 0. + args : tuple, optional + containing extra arguments for the function `f`. + `f` is called by ``apply(f, (x)+args)``. + full_output : bool, optional + If `full_output` is False, the root is returned. If `full_output` is + True, the return value is ``(x, r)``, where `x` is the root, and `r` is + a `RootResults` object. + disp : bool, optional + If True, raise RuntimeError if the algorithm didn't converge. + Otherwise the convergence status is recorded in any `RootResults` + return object. + + Returns + ------- + x0 : float + Zero of `f` between `a` and `b`. + r : `RootResults` (present if ``full_output = True``) + Object containing information about the convergence. + In particular, ``r.converged`` is True if the routine converged. + + See Also + -------- + brentq, brenth, bisect, newton : one-dimensional root-finding + fixed_point : scalar fixed-point finder + + Notes + ----- + Uses [Ridders1979]_ method to find a zero of the function `f` between the + arguments `a` and `b`. Ridders' method is faster than bisection, but not + generally as fast as the Brent routines. [Ridders1979]_ provides the + classic description and source of the algorithm. A description can also be + found in any recent edition of Numerical Recipes. + + The routine used here diverges slightly from standard presentations in + order to be a bit more careful of tolerance. + + References + ---------- + .. [Ridders1979] + Ridders, C. F. J. "A New Algorithm for Computing a + Single Root of a Real Continuous Function." + IEEE Trans. Circuits Systems 26, 979-980, 1979. + + Examples + -------- + + >>> def f(x): + ... return (x**2 - 1) + + >>> from scipy import optimize + + >>> root = optimize.ridder(f, 0, 2) + >>> root + 1.0 + + >>> root = optimize.ridder(f, -2, 0) + >>> root + -1.0 + """ + if not isinstance(args, tuple): + args = (args,) + if xtol <= 0: + raise ValueError("xtol too small (%g <= 0)" % xtol) + if rtol < _rtol: + raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol)) + r = _zeros._ridder(f, a, b, xtol, rtol, maxiter, args, full_output, disp) + return results_c(full_output, r) + + +def brentq(f, a, b, args=(), + xtol=_xtol, rtol=_rtol, maxiter=_iter, + full_output=False, disp=True): + """ + Find a root of a function in a bracketing interval using Brent's method. + + Uses the classic Brent's method to find a zero of the function `f` on + the sign changing interval [a , b]. Generally considered the best of the + rootfinding routines here. It is a safe version of the secant method that + uses inverse quadratic extrapolation. Brent's method combines root + bracketing, interval bisection, and inverse quadratic interpolation. It is + sometimes known as the van Wijngaarden-Dekker-Brent method. Brent (1973) + claims convergence is guaranteed for functions computable within [a,b]. + + [Brent1973]_ provides the classic description of the algorithm. Another + description can be found in a recent edition of Numerical Recipes, including + [PressEtal1992]_. Another description is at + http://mathworld.wolfram.com/BrentsMethod.html. It should be easy to + understand the algorithm just by reading our code. Our code diverges a bit + from standard presentations: we choose a different formula for the + extrapolation step. + + Parameters + ---------- + f : function + Python function returning a number. The function :math:`f` + must be continuous, and :math:`f(a)` and :math:`f(b)` must + have opposite signs. + a : scalar + One end of the bracketing interval :math:`[a, b]`. + b : scalar + The other end of the bracketing interval :math:`[a, b]`. + xtol : number, optional + The computed root ``x0`` will satisfy ``np.allclose(x, x0, + atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The + parameter must be nonnegative. For nice functions, Brent's + method will often satisfy the above condition with ``xtol/2`` + and ``rtol/2``. [Brent1973]_ + rtol : number, optional + The computed root ``x0`` will satisfy ``np.allclose(x, x0, + atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The + parameter cannot be smaller than its default value of + ``4*np.finfo(float).eps``. For nice functions, Brent's + method will often satisfy the above condition with ``xtol/2`` + and ``rtol/2``. [Brent1973]_ + maxiter : int, optional + if convergence is not achieved in `maxiter` iterations, an error is + raised. Must be >= 0. + args : tuple, optional + containing extra arguments for the function `f`. + `f` is called by ``apply(f, (x)+args)``. + full_output : bool, optional + If `full_output` is False, the root is returned. If `full_output` is + True, the return value is ``(x, r)``, where `x` is the root, and `r` is + a `RootResults` object. + disp : bool, optional + If True, raise RuntimeError if the algorithm didn't converge. + Otherwise the convergence status is recorded in any `RootResults` + return object. + + Returns + ------- + x0 : float + Zero of `f` between `a` and `b`. + r : `RootResults` (present if ``full_output = True``) + Object containing information about the convergence. In particular, + ``r.converged`` is True if the routine converged. + + See Also + -------- + multivariate local optimizers + `fmin`, `fmin_powell`, `fmin_cg`, `fmin_bfgs`, `fmin_ncg` + nonlinear least squares minimizer + `leastsq` + constrained multivariate optimizers + `fmin_l_bfgs_b`, `fmin_tnc`, `fmin_cobyla` + global optimizers + `basinhopping`, `brute`, `differential_evolution` + local scalar minimizers + `fminbound`, `brent`, `golden`, `bracket` + n-dimensional root-finding + `fsolve` + one-dimensional root-finding + `brenth`, `ridder`, `bisect`, `newton` + scalar fixed-point finder + `fixed_point` + + Notes + ----- + `f` must be continuous. f(a) and f(b) must have opposite signs. + + References + ---------- + .. [Brent1973] + Brent, R. P., + *Algorithms for Minimization Without Derivatives*. + Englewood Cliffs, NJ: Prentice-Hall, 1973. Ch. 3-4. + + .. [PressEtal1992] + Press, W. H.; Flannery, B. P.; Teukolsky, S. A.; and Vetterling, W. T. + *Numerical Recipes in FORTRAN: The Art of Scientific Computing*, 2nd ed. + Cambridge, England: Cambridge University Press, pp. 352-355, 1992. + Section 9.3: "Van Wijngaarden-Dekker-Brent Method." + + Examples + -------- + >>> def f(x): + ... return (x**2 - 1) + + >>> from scipy import optimize + + >>> root = optimize.brentq(f, -2, 0) + >>> root + -1.0 + + >>> root = optimize.brentq(f, 0, 2) + >>> root + 1.0 + """ + if not isinstance(args, tuple): + args = (args,) + if xtol <= 0: + raise ValueError("xtol too small (%g <= 0)" % xtol) + if rtol < _rtol: + raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol)) + r = _zeros._brentq(f, a, b, xtol, rtol, maxiter, args, full_output, disp) + return results_c(full_output, r) + + +def brenth(f, a, b, args=(), + xtol=_xtol, rtol=_rtol, maxiter=_iter, + full_output=False, disp=True): + """Find a root of a function in a bracketing interval using Brent's + method with hyperbolic extrapolation. + + A variation on the classic Brent routine to find a zero of the function f + between the arguments a and b that uses hyperbolic extrapolation instead of + inverse quadratic extrapolation. There was a paper back in the 1980's ... + f(a) and f(b) cannot have the same signs. Generally on a par with the + brent routine, but not as heavily tested. It is a safe version of the + secant method that uses hyperbolic extrapolation. The version here is by + Chuck Harris. + + Parameters + ---------- + f : function + Python function returning a number. f must be continuous, and f(a) and + f(b) must have opposite signs. + a : scalar + One end of the bracketing interval [a,b]. + b : scalar + The other end of the bracketing interval [a,b]. + xtol : number, optional + The computed root ``x0`` will satisfy ``np.allclose(x, x0, + atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The + parameter must be nonnegative. As with `brentq`, for nice + functions the method will often satisfy the above condition + with ``xtol/2`` and ``rtol/2``. + rtol : number, optional + The computed root ``x0`` will satisfy ``np.allclose(x, x0, + atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The + parameter cannot be smaller than its default value of + ``4*np.finfo(float).eps``. As with `brentq`, for nice functions + the method will often satisfy the above condition with + ``xtol/2`` and ``rtol/2``. + maxiter : int, optional + if convergence is not achieved in `maxiter` iterations, an error is + raised. Must be >= 0. + args : tuple, optional + containing extra arguments for the function `f`. + `f` is called by ``apply(f, (x)+args)``. + full_output : bool, optional + If `full_output` is False, the root is returned. If `full_output` is + True, the return value is ``(x, r)``, where `x` is the root, and `r` is + a `RootResults` object. + disp : bool, optional + If True, raise RuntimeError if the algorithm didn't converge. + Otherwise the convergence status is recorded in any `RootResults` + return object. + + Returns + ------- + x0 : float + Zero of `f` between `a` and `b`. + r : `RootResults` (present if ``full_output = True``) + Object containing information about the convergence. In particular, + ``r.converged`` is True if the routine converged. + + Examples + -------- + >>> def f(x): + ... return (x**2 - 1) + + >>> from scipy import optimize + + >>> root = optimize.brenth(f, -2, 0) + >>> root + -1.0 + + >>> root = optimize.brenth(f, 0, 2) + >>> root + 1.0 + + See Also + -------- + fmin, fmin_powell, fmin_cg, + fmin_bfgs, fmin_ncg : multivariate local optimizers + + leastsq : nonlinear least squares minimizer + + fmin_l_bfgs_b, fmin_tnc, fmin_cobyla : constrained multivariate optimizers + + basinhopping, differential_evolution, brute : global optimizers + + fminbound, brent, golden, bracket : local scalar minimizers + + fsolve : n-dimensional root-finding + + brentq, brenth, ridder, bisect, newton : one-dimensional root-finding + + fixed_point : scalar fixed-point finder + + """ + if not isinstance(args, tuple): + args = (args,) + if xtol <= 0: + raise ValueError("xtol too small (%g <= 0)" % xtol) + if rtol < _rtol: + raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol)) + r = _zeros._brenth(f, a, b, xtol, rtol, maxiter, args, full_output, disp) + return results_c(full_output, r) + + +################################ +# TOMS "Algorithm 748: Enclosing Zeros of Continuous Functions", by +# Alefeld, G. E. and Potra, F. A. and Shi, Yixun, +# See [1] + + +def _within_tolerance(x, y, rtol, atol): + diff = np.abs(x - y) + z = np.abs(y) + result = (diff <= (atol + rtol * z)) + return result + + +def _notclose(fs, rtol=_rtol, atol=_xtol): + # Ensure not None, not 0, all finite, and not very close to each other + notclosefvals = all(fs) and all(np.isfinite(fs)) and \ + not any(any( + np.isclose(_f, fs[i + 1:], rtol=rtol, atol=atol)) + for i, _f in enumerate(fs[:-1])) + return notclosefvals + + +def _secant(xvals, fvals): + """Perform a secant step, taking a little care""" + # Secant has many "mathematically" equivalent formulations + # x2 = x0 - (x1 - x0)/(f1 - f0) * f0 + # = x1 - (x1 - x0)/(f1 - f0) * f1 + # = (-x1 * f0 + x0 * f1) / (f1 - f0) + # = (-f0 / f1 * x1 + x0) / (1 - f0 / f1) + # = (-f1 / f0 * x0 + x1) / (1 - f1 / f0) + x0, x1 = xvals[:2] + f0, f1 = fvals[:2] + if f0 == f1: + return np.nan + if np.abs(f1) > np.abs(f0): + x2 = (-f0 / f1 * x1 + x0) / (1 - f0 / f1) + else: + x2 = (-f1 / f0 * x0 + x1) / (1 - f1 / f0) + return x2 + + +def _update_bracket(ab, fab, c, fc): + """Update a bracket given (c, fc) with a < c < b. Return the discarded endpoints""" + fa, fb = fab + idx = (0 if np.sign(fa) * np.sign(fc) > 0 else 1) + rx, rfx = ab[idx], fab[idx] + fab[idx] = fc + ab[idx] = c + return rx, rfx + + +def _compute_divided_differences(xvals, fvals, N=None, full=True, forward=True): + """Return a matrix of divided differences for the xvals, fvals pairs + + DD[i, j] = f[x_{i-j}, ..., x_i] for 0 <= j <= i + + If full is False, just return the main diagonal(or last row): + f[a], f[a, b] and f[a, b, c]. + If forward is False, return f[c], f[b, c], f[a, b, c].""" + if full: + if forward: + xvals = np.asarray(xvals) + else: + xvals = np.array(xvals)[::-1] + M = len(xvals) + N = M if N is None else min(N, M) + DD = np.zeros([M, N]) + DD[:, 0] = fvals[:] + for i in range(1, N): + DD[i:, i] = np.diff(DD[i - 1:, i - 1]) / (xvals[i:] - xvals[:M - i]) + return DD + + xvals = np.asarray(xvals) + dd = np.array(fvals) + row = np.array(fvals) + idx2Use = (0 if forward else -1) + dd[0] = fvals[idx2Use] + for i in range(1, len(xvals)): + denom = xvals[i:i + len(row) - 1] - xvals[:len(row) - 1] + row = np.diff(row)[:] / denom + dd[i] = row[idx2Use] + return dd + + +def _interpolated_poly(xvals, fvals, x): + """Compute p(x) for the polynomial passing through the specified locations. + + Use Neville's algorithm to compute p(x) where p is the minimal degree + polynomial passing through the points xvals, fvals""" + xvals = np.asarray(xvals) + N = len(xvals) + Q = np.zeros([N, N]) + D = np.zeros([N, N]) + Q[:, 0] = fvals[:] + D[:, 0] = fvals[:] + for k in range(1, N): + alpha = D[k:, k - 1] - Q[k - 1:N - 1, k - 1] + diffik = xvals[0:N - k] - xvals[k:N] + Q[k:, k] = (xvals[k:] - x) / diffik * alpha + D[k:, k] = (xvals[:N - k] - x) / diffik * alpha + # Expect Q[-1, 1:] to be small relative to Q[-1, 0] as x approaches a root + return np.sum(Q[-1, 1:]) + Q[-1, 0] + + +def _inverse_poly_zero(a, b, c, d, fa, fb, fc, fd): + """Inverse cubic interpolation f-values -> x-values + + Given four points (fa, a), (fb, b), (fc, c), (fd, d) with + fa, fb, fc, fd all distinct, find poly IP(y) through the 4 points + and compute x=IP(0). + """ + return _interpolated_poly([fa, fb, fc, fd], [a, b, c, d], 0) + + +def _newton_quadratic(ab, fab, d, fd, k): + """Apply Newton-Raphson like steps, using divided differences to approximate f' + + ab is a real interval [a, b] containing a root, + fab holds the real values of f(a), f(b) + d is a real number outside [ab, b] + k is the number of steps to apply + """ + a, b = ab + fa, fb = fab + _, B, A = _compute_divided_differences([a, b, d], [fa, fb, fd], + forward=True, full=False) + + # _P is the quadratic polynomial through the 3 points + def _P(x): + # Horner evaluation of fa + B * (x - a) + A * (x - a) * (x - b) + return (A * (x - b) + B) * (x - a) + fa + + if A == 0: + r = a - fa / B + else: + r = (a if np.sign(A) * np.sign(fa) > 0 else b) + # Apply k Newton-Raphson steps to _P(x), starting from x=r + for i in range(k): + r1 = r - _P(r) / (B + A * (2 * r - a - b)) + if not (ab[0] < r1 < ab[1]): + if (ab[0] < r < ab[1]): + return r + r = sum(ab) / 2.0 + break + r = r1 + + return r + + +class TOMS748Solver(object): + """Solve f(x, *args) == 0 using Algorithm748 of Alefeld, Potro & Shi. + """ + _MU = 0.5 + _K_MIN = 1 + _K_MAX = 100 # A very high value for real usage. Expect 1, 2, maybe 3. + + def __init__(self): + self.f = None + self.args = None + self.function_calls = 0 + self.iterations = 0 + self.k = 2 + self.ab = [np.nan, np.nan] # ab=[a,b] is a global interval containing a root + self.fab = [np.nan, np.nan] # fab is function values at a, b + self.d = None + self.fd = None + self.e = None + self.fe = None + self.disp = False + self.xtol = _xtol + self.rtol = _rtol + self.maxiter = _iter + + def configure(self, xtol, rtol, maxiter, disp, k): + self.disp = disp + self.xtol = xtol + self.rtol = rtol + self.maxiter = maxiter + # Silently replace a low value of k with 1 + self.k = max(k, self._K_MIN) + # Noisily replace a high value of k with self._K_MAX + if self.k > self._K_MAX: + msg = "toms748: Overriding k: ->%d" % self._K_MAX + warnings.warn(msg, RuntimeWarning) + self.k = self._K_MAX + + def _callf(self, x, error=True): + """Call the user-supplied function, update book-keeping""" + fx = self.f(x, *self.args) + self.function_calls += 1 + if not np.isfinite(fx) and error: + raise ValueError("Invalid function value: f(%f) -> %s " % (x, fx)) + return fx + + def get_result(self, x, flag=_ECONVERGED): + r"""Package the result and statistics into a tuple.""" + return (x, self.function_calls, self.iterations, flag) + + def _update_bracket(self, c, fc): + return _update_bracket(self.ab, self.fab, c, fc) + + def start(self, f, a, b, args=()): + r"""Prepare for the iterations.""" + self.function_calls = 0 + self.iterations = 0 + + self.f = f + self.args = args + self.ab[:] = [a, b] + if not np.isfinite(a) or np.imag(a) != 0: + raise ValueError("Invalid x value: %s " % (a)) + if not np.isfinite(b) or np.imag(b) != 0: + raise ValueError("Invalid x value: %s " % (b)) + + fa = self._callf(a) + if not np.isfinite(fa) or np.imag(fa) != 0: + raise ValueError("Invalid function value: f(%f) -> %s " % (a, fa)) + if fa == 0: + return _ECONVERGED, a + fb = self._callf(b) + if not np.isfinite(fb) or np.imag(fb) != 0: + raise ValueError("Invalid function value: f(%f) -> %s " % (b, fb)) + if fb == 0: + return _ECONVERGED, b + + if np.sign(fb) * np.sign(fa) > 0: + raise ValueError("a, b must bracket a root f(%e)=%e, f(%e)=%e " % + (a, fa, b, fb)) + self.fab[:] = [fa, fb] + + return _EINPROGRESS, sum(self.ab) / 2.0 + + def get_status(self): + """Determine the current status.""" + a, b = self.ab[:2] + if _within_tolerance(a, b, self.rtol, self.xtol): + return _ECONVERGED, sum(self.ab) / 2.0 + if self.iterations >= self.maxiter: + return _ECONVERR, sum(self.ab) / 2.0 + return _EINPROGRESS, sum(self.ab) / 2.0 + + def iterate(self): + """Perform one step in the algorithm. + + Implements Algorithm 4.1(k=1) or 4.2(k=2) in [APS1995] + """ + self.iterations += 1 + eps = np.finfo(float).eps + d, fd, e, fe = self.d, self.fd, self.e, self.fe + ab_width = self.ab[1] - self.ab[0] # Need the start width below + c = None + + for nsteps in range(2, self.k+2): + # If the f-values are sufficiently separated, perform an inverse + # polynomial interpolation step. Otherwise nsteps repeats of + # an approximate Newton-Raphson step. + if _notclose(self.fab + [fd, fe], rtol=0, atol=32*eps): + c0 = _inverse_poly_zero(self.ab[0], self.ab[1], d, e, + self.fab[0], self.fab[1], fd, fe) + if self.ab[0] < c0 < self.ab[1]: + c = c0 + if c is None: + c = _newton_quadratic(self.ab, self.fab, d, fd, nsteps) + + fc = self._callf(c) + if fc == 0: + return _ECONVERGED, c + + # re-bracket + e, fe = d, fd + d, fd = self._update_bracket(c, fc) + + # u is the endpoint with the smallest f-value + uix = (0 if np.abs(self.fab[0]) < np.abs(self.fab[1]) else 1) + u, fu = self.ab[uix], self.fab[uix] + + _, A = _compute_divided_differences(self.ab, self.fab, + forward=(uix == 0), full=False) + c = u - 2 * fu / A + if np.abs(c - u) > 0.5 * (self.ab[1] - self.ab[0]): + c = sum(self.ab) / 2.0 + else: + if np.isclose(c, u, rtol=eps, atol=0): + # c didn't change (much). + # Either because the f-values at the endpoints have vastly + # differing magnitudes, or because the root is very close to + # that endpoint + frs = np.frexp(self.fab)[1] + if frs[uix] < frs[1 - uix] - 50: # Differ by more than 2**50 + c = (31 * self.ab[uix] + self.ab[1 - uix]) / 32 + else: + # Make a bigger adjustment, about the + # size of the requested tolerance. + mm = (1 if uix == 0 else -1) + adj = mm * np.abs(c) * self.rtol + mm * self.xtol + c = u + adj + if not self.ab[0] < c < self.ab[1]: + c = sum(self.ab) / 2.0 + + fc = self._callf(c) + if fc == 0: + return _ECONVERGED, c + + e, fe = d, fd + d, fd = self._update_bracket(c, fc) + + # If the width of the new interval did not decrease enough, bisect + if self.ab[1] - self.ab[0] > self._MU * ab_width: + e, fe = d, fd + z = sum(self.ab) / 2.0 + fz = self._callf(z) + if fz == 0: + return _ECONVERGED, z + d, fd = self._update_bracket(z, fz) + + # Record d and e for next iteration + self.d, self.fd = d, fd + self.e, self.fe = e, fe + + status, xn = self.get_status() + return status, xn + + def solve(self, f, a, b, args=(), + xtol=_xtol, rtol=_rtol, k=2, maxiter=_iter, disp=True): + r"""Solve f(x) = 0 given an interval containing a zero.""" + self.configure(xtol=xtol, rtol=rtol, maxiter=maxiter, disp=disp, k=k) + status, xn = self.start(f, a, b, args) + if status == _ECONVERGED: + return self.get_result(xn) + + # The first step only has two x-values. + c = _secant(self.ab, self.fab) + if not self.ab[0] < c < self.ab[1]: + c = sum(self.ab) / 2.0 + fc = self._callf(c) + if fc == 0: + return self.get_result(c) + + self.d, self.fd = self._update_bracket(c, fc) + self.e, self.fe = None, None + self.iterations += 1 + + while True: + status, xn = self.iterate() + if status == _ECONVERGED: + return self.get_result(xn) + if status == _ECONVERR: + fmt = "Failed to converge after %d iterations, bracket is %s" + if disp: + msg = fmt % (self.iterations + 1, self.ab) + raise RuntimeError(msg) + return self.get_result(xn, _ECONVERR) + + +def toms748(f, a, b, args=(), k=1, + xtol=_xtol, rtol=_rtol, maxiter=_iter, + full_output=False, disp=True): + """ + Find a zero using TOMS Algorithm 748 method. + + Implements the Algorithm 748 method of Alefeld, Potro and Shi to find a + zero of the function `f` on the interval `[a , b]`, where `f(a)` and + `f(b)` must have opposite signs. + + It uses a mixture of inverse cubic interpolation and + "Newton-quadratic" steps. [APS1995]. + + Parameters + ---------- + f : function + Python function returning a scalar. The function :math:`f` + must be continuous, and :math:`f(a)` and :math:`f(b)` + have opposite signs. + a : scalar, + lower boundary of the search interval + b : scalar, + upper boundary of the search interval + args : tuple, optional + containing extra arguments for the function `f`. + `f` is called by ``f(x, *args)``. + k : int, optional + The number of Newton quadratic steps to perform each iteration. ``k>=1``. + xtol : scalar, optional + The computed root ``x0`` will satisfy ``np.allclose(x, x0, + atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The + parameter must be nonnegative. + rtol : scalar, optional + The computed root ``x0`` will satisfy ``np.allclose(x, x0, + atol=xtol, rtol=rtol)``, where ``x`` is the exact root. + maxiter : int, optional + if convergence is not achieved in `maxiter` iterations, an error is + raised. Must be >= 0. + full_output : bool, optional + If `full_output` is False, the root is returned. If `full_output` is + True, the return value is ``(x, r)``, where `x` is the root, and `r` is + a `RootResults` object. + disp : bool, optional + If True, raise RuntimeError if the algorithm didn't converge. + Otherwise the convergence status is recorded in the `RootResults` + return object. + + Returns + ------- + x0 : float + Approximate Zero of `f` + r : `RootResults` (present if ``full_output = True``) + Object containing information about the convergence. In particular, + ``r.converged`` is True if the routine converged. + + See Also + -------- + brentq, brenth, ridder, bisect, newton + fsolve : find zeroes in n dimensions. + + Notes + ----- + `f` must be continuous. + Algorithm 748 with ``k=2`` is asymptotically the most efficient + algorithm known for finding roots of a four times continuously + differentiable function. + In contrast with Brent's algorithm, which may only decrease the length of + the enclosing bracket on the last step, Algorithm 748 decreases it each + iteration with the same asymptotic efficiency as it finds the root. + + For easy statement of efficiency indices, assume that `f` has 4 + continuouous deriviatives. + For ``k=1``, the convergence order is at least 2.7, and with about + asymptotically 2 function evaluations per iteration, the efficiency + index is approximately 1.65. + For ``k=2``, the order is about 4.6 with asymptotically 3 function + evaluations per iteration, and the efficiency index 1.66. + For higher values of `k`, the efficiency index approaches + the `k`-th root of ``(3k-2)``, hence ``k=1`` or ``k=2`` are + usually appropriate. + + References + ---------- + .. [APS1995] + Alefeld, G. E. and Potra, F. A. and Shi, Yixun, + *Algorithm 748: Enclosing Zeros of Continuous Functions*, + ACM Trans. Math. Softw. Volume 221(1995) + doi = {10.1145/210089.210111} + + Examples + -------- + >>> def f(x): + ... return (x**3 - 1) # only one real root at x = 1 + + >>> from scipy import optimize + >>> root, results = optimize.toms748(f, 0, 2, full_output=True) + >>> root + 1.0 + >>> results + converged: True + flag: 'converged' + function_calls: 11 + iterations: 5 + root: 1.0 + """ + if xtol <= 0: + raise ValueError("xtol too small (%g <= 0)" % xtol) + if rtol < _rtol / 4: + raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol)) + if maxiter < 1: + raise ValueError("maxiter must be greater than 0") + if not np.isfinite(a): + raise ValueError("a is not finite %s" % a) + if not np.isfinite(b): + raise ValueError("b is not finite %s" % b) + if a >= b: + raise ValueError("a and b are not an interval [%d, %d]" % (a, b)) + if not k >= 1: + raise ValueError("k too small (%s < 1)" % k) + + if not isinstance(args, tuple): + args = (args,) + solver = TOMS748Solver() + result = solver.solve(f, a, b, args=args, k=k, xtol=xtol, rtol=rtol, + maxiter=maxiter, disp=disp) + x, function_calls, iterations, flag = result + return _results_select(full_output, (x, function_calls, iterations, flag)) diff --git a/project/venv/lib/python2.7/site-packages/scipy/optimize/zeros.pyc b/project/venv/lib/python2.7/site-packages/scipy/optimize/zeros.pyc new file mode 100644 index 0000000..f962e46 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/optimize/zeros.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/setup.py b/project/venv/lib/python2.7/site-packages/scipy/setup.py new file mode 100644 index 0000000..0a17f94 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/setup.py @@ -0,0 +1,34 @@ +from __future__ import division, print_function, absolute_import + +def configuration(parent_package='',top_path=None): + from scipy._build_utils.system_info import get_info, NotFoundError + lapack_opt = get_info("lapack_opt") + + from numpy.distutils.misc_util import Configuration + config = Configuration('scipy',parent_package,top_path) + config.add_subpackage('cluster') + config.add_subpackage('constants') + config.add_subpackage('fftpack') + config.add_subpackage('integrate') + config.add_subpackage('interpolate') + config.add_subpackage('io') + config.add_subpackage('linalg') + config.add_data_files('*.pxd') + config.add_subpackage('misc') + config.add_subpackage('odr') + config.add_subpackage('optimize') + config.add_subpackage('signal') + config.add_subpackage('sparse') + config.add_subpackage('spatial') + config.add_subpackage('special') + config.add_subpackage('stats') + config.add_subpackage('ndimage') + config.add_subpackage('_build_utils') + config.add_subpackage('_lib') + config.make_config_py() + return config + + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(**configuration(top_path='').todict()) diff --git a/project/venv/lib/python2.7/site-packages/scipy/setup.pyc b/project/venv/lib/python2.7/site-packages/scipy/setup.pyc new file mode 100644 index 0000000..093123e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/signal/__init__.py new file mode 100644 index 0000000..b053d74 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/signal/__init__.py @@ -0,0 +1,384 @@ +""" +======================================= +Signal processing (:mod:`scipy.signal`) +======================================= + +Convolution +=========== + +.. autosummary:: + :toctree: generated/ + + convolve -- N-dimensional convolution. + correlate -- N-dimensional correlation. + fftconvolve -- N-dimensional convolution using the FFT. + convolve2d -- 2-dimensional convolution (more options). + correlate2d -- 2-dimensional correlation (more options). + sepfir2d -- Convolve with a 2-D separable FIR filter. + choose_conv_method -- Chooses faster of FFT and direct convolution methods. + +B-splines +========= + +.. autosummary:: + :toctree: generated/ + + bspline -- B-spline basis function of order n. + cubic -- B-spline basis function of order 3. + quadratic -- B-spline basis function of order 2. + gauss_spline -- Gaussian approximation to the B-spline basis function. + cspline1d -- Coefficients for 1-D cubic (3rd order) B-spline. + qspline1d -- Coefficients for 1-D quadratic (2nd order) B-spline. + cspline2d -- Coefficients for 2-D cubic (3rd order) B-spline. + qspline2d -- Coefficients for 2-D quadratic (2nd order) B-spline. + cspline1d_eval -- Evaluate a cubic spline at the given points. + qspline1d_eval -- Evaluate a quadratic spline at the given points. + spline_filter -- Smoothing spline (cubic) filtering of a rank-2 array. + +Filtering +========= + +.. autosummary:: + :toctree: generated/ + + order_filter -- N-dimensional order filter. + medfilt -- N-dimensional median filter. + medfilt2d -- 2-dimensional median filter (faster). + wiener -- N-dimensional wiener filter. + + symiirorder1 -- 2nd-order IIR filter (cascade of first-order systems). + symiirorder2 -- 4th-order IIR filter (cascade of second-order systems). + lfilter -- 1-dimensional FIR and IIR digital linear filtering. + lfiltic -- Construct initial conditions for `lfilter`. + lfilter_zi -- Compute an initial state zi for the lfilter function that + -- corresponds to the steady state of the step response. + filtfilt -- A forward-backward filter. + savgol_filter -- Filter a signal using the Savitzky-Golay filter. + + deconvolve -- 1-d deconvolution using lfilter. + + sosfilt -- 1-dimensional IIR digital linear filtering using + -- a second-order sections filter representation. + sosfilt_zi -- Compute an initial state zi for the sosfilt function that + -- corresponds to the steady state of the step response. + sosfiltfilt -- A forward-backward filter for second-order sections. + hilbert -- Compute 1-D analytic signal, using the Hilbert transform. + hilbert2 -- Compute 2-D analytic signal, using the Hilbert transform. + + decimate -- Downsample a signal. + detrend -- Remove linear and/or constant trends from data. + resample -- Resample using Fourier method. + resample_poly -- Resample using polyphase filtering method. + upfirdn -- Upsample, apply FIR filter, downsample. + +Filter design +============= + +.. autosummary:: + :toctree: generated/ + + bilinear -- Digital filter from an analog filter using + -- the bilinear transform. + bilinear_zpk -- Digital filter from an analog filter using + -- the bilinear transform. + findfreqs -- Find array of frequencies for computing filter response. + firls -- FIR filter design using least-squares error minimization. + firwin -- Windowed FIR filter design, with frequency response + -- defined as pass and stop bands. + firwin2 -- Windowed FIR filter design, with arbitrary frequency + -- response. + freqs -- Analog filter frequency response from TF coefficients. + freqs_zpk -- Analog filter frequency response from ZPK coefficients. + freqz -- Digital filter frequency response from TF coefficients. + freqz_zpk -- Digital filter frequency response from ZPK coefficients. + sosfreqz -- Digital filter frequency response for SOS format filter. + group_delay -- Digital filter group delay. + iirdesign -- IIR filter design given bands and gains. + iirfilter -- IIR filter design given order and critical frequencies. + kaiser_atten -- Compute the attenuation of a Kaiser FIR filter, given + -- the number of taps and the transition width at + -- discontinuities in the frequency response. + kaiser_beta -- Compute the Kaiser parameter beta, given the desired + -- FIR filter attenuation. + kaiserord -- Design a Kaiser window to limit ripple and width of + -- transition region. + minimum_phase -- Convert a linear phase FIR filter to minimum phase. + savgol_coeffs -- Compute the FIR filter coefficients for a Savitzky-Golay + -- filter. + remez -- Optimal FIR filter design. + + unique_roots -- Unique roots and their multiplicities. + residue -- Partial fraction expansion of b(s) / a(s). + residuez -- Partial fraction expansion of b(z) / a(z). + invres -- Inverse partial fraction expansion for analog filter. + invresz -- Inverse partial fraction expansion for digital filter. + BadCoefficients -- Warning on badly conditioned filter coefficients + +Lower-level filter design functions: + +.. autosummary:: + :toctree: generated/ + + abcd_normalize -- Check state-space matrices and ensure they are rank-2. + band_stop_obj -- Band Stop Objective Function for order minimization. + besselap -- Return (z,p,k) for analog prototype of Bessel filter. + buttap -- Return (z,p,k) for analog prototype of Butterworth filter. + cheb1ap -- Return (z,p,k) for type I Chebyshev filter. + cheb2ap -- Return (z,p,k) for type II Chebyshev filter. + cmplx_sort -- Sort roots based on magnitude. + ellipap -- Return (z,p,k) for analog prototype of elliptic filter. + lp2bp -- Transform a lowpass filter prototype to a bandpass filter. + lp2bp_zpk -- Transform a lowpass filter prototype to a bandpass filter. + lp2bs -- Transform a lowpass filter prototype to a bandstop filter. + lp2bs_zpk -- Transform a lowpass filter prototype to a bandstop filter. + lp2hp -- Transform a lowpass filter prototype to a highpass filter. + lp2hp_zpk -- Transform a lowpass filter prototype to a highpass filter. + lp2lp -- Transform a lowpass filter prototype to a lowpass filter. + lp2lp_zpk -- Transform a lowpass filter prototype to a lowpass filter. + normalize -- Normalize polynomial representation of a transfer function. + + + +Matlab-style IIR filter design +============================== + +.. autosummary:: + :toctree: generated/ + + butter -- Butterworth + buttord + cheby1 -- Chebyshev Type I + cheb1ord + cheby2 -- Chebyshev Type II + cheb2ord + ellip -- Elliptic (Cauer) + ellipord + bessel -- Bessel (no order selection available -- try butterod) + iirnotch -- Design second-order IIR notch digital filter. + iirpeak -- Design second-order IIR peak (resonant) digital filter. + +Continuous-Time Linear Systems +============================== + +.. autosummary:: + :toctree: generated/ + + lti -- Continuous-time linear time invariant system base class. + StateSpace -- Linear time invariant system in state space form. + TransferFunction -- Linear time invariant system in transfer function form. + ZerosPolesGain -- Linear time invariant system in zeros, poles, gain form. + lsim -- continuous-time simulation of output to linear system. + lsim2 -- like lsim, but `scipy.integrate.odeint` is used. + impulse -- impulse response of linear, time-invariant (LTI) system. + impulse2 -- like impulse, but `scipy.integrate.odeint` is used. + step -- step response of continous-time LTI system. + step2 -- like step, but `scipy.integrate.odeint` is used. + freqresp -- frequency response of a continuous-time LTI system. + bode -- Bode magnitude and phase data (continuous-time LTI). + +Discrete-Time Linear Systems +============================ + +.. autosummary:: + :toctree: generated/ + + dlti -- Discrete-time linear time invariant system base class. + StateSpace -- Linear time invariant system in state space form. + TransferFunction -- Linear time invariant system in transfer function form. + ZerosPolesGain -- Linear time invariant system in zeros, poles, gain form. + dlsim -- simulation of output to a discrete-time linear system. + dimpulse -- impulse response of a discrete-time LTI system. + dstep -- step response of a discrete-time LTI system. + dfreqresp -- frequency response of a discrete-time LTI system. + dbode -- Bode magnitude and phase data (discrete-time LTI). + +LTI Representations +=================== + +.. autosummary:: + :toctree: generated/ + + tf2zpk -- transfer function to zero-pole-gain. + tf2sos -- transfer function to second-order sections. + tf2ss -- transfer function to state-space. + zpk2tf -- zero-pole-gain to transfer function. + zpk2sos -- zero-pole-gain to second-order sections. + zpk2ss -- zero-pole-gain to state-space. + ss2tf -- state-pace to transfer function. + ss2zpk -- state-space to pole-zero-gain. + sos2zpk -- second-order sections to zero-pole-gain. + sos2tf -- second-order sections to transfer function. + cont2discrete -- continuous-time to discrete-time LTI conversion. + place_poles -- pole placement. + +Waveforms +========= + +.. autosummary:: + :toctree: generated/ + + chirp -- Frequency swept cosine signal, with several freq functions. + gausspulse -- Gaussian modulated sinusoid + max_len_seq -- Maximum length sequence + sawtooth -- Periodic sawtooth + square -- Square wave + sweep_poly -- Frequency swept cosine signal; freq is arbitrary polynomial + unit_impulse -- Discrete unit impulse + +Window functions +================ + +Most window functions are available in the `scipy.signal.windows` namespace, +but we list them here for convenience: + +.. autosummary:: + :toctree: generated/ + + get_window -- Return a window of a given length and type. + + windows.barthann -- Bartlett-Hann window + windows.bartlett -- Bartlett window + windows.blackman -- Blackman window + windows.blackmanharris -- Minimum 4-term Blackman-Harris window + windows.bohman -- Bohman window + windows.boxcar -- Boxcar window + windows.chebwin -- Dolph-Chebyshev window + windows.cosine -- Cosine window + windows.dpss -- Discrete prolate spheroidal sequences + windows.exponential -- Exponential window + windows.flattop -- Flat top window + windows.gaussian -- Gaussian window + windows.general_cosine -- Generalized Cosine window + windows.general_gaussian -- Generalized Gaussian window + windows.general_hamming -- Generalized Hamming window + windows.hamming -- Hamming window + windows.hann -- Hann window + windows.hanning -- Hann window + windows.kaiser -- Kaiser window + windows.nuttall -- Nuttall's minimum 4-term Blackman-Harris window + windows.parzen -- Parzen window + windows.slepian -- Slepian window + windows.triang -- Triangular window + windows.tukey -- Tukey window + +Wavelets +======== + +.. autosummary:: + :toctree: generated/ + + cascade -- compute scaling function and wavelet from coefficients + daub -- return low-pass + morlet -- Complex Morlet wavelet. + qmf -- return quadrature mirror filter from low-pass + ricker -- return ricker wavelet + cwt -- perform continuous wavelet transform + +Peak finding +============ + +.. autosummary:: + :toctree: generated/ + + argrelmin -- Calculate the relative minima of data + argrelmax -- Calculate the relative maxima of data + argrelextrema -- Calculate the relative extrema of data + find_peaks -- Find a subset of peaks inside a signal. + find_peaks_cwt -- Find peaks in a 1-D array with wavelet transformation. + peak_prominences -- Calculate the prominence of each peak in a signal. + peak_widths -- Calculate the width of each peak in a signal. + +Spectral Analysis +================= + +.. autosummary:: + :toctree: generated/ + + periodogram -- Compute a (modified) periodogram + welch -- Compute a periodogram using Welch's method + csd -- Compute the cross spectral density, using Welch's method + coherence -- Compute the magnitude squared coherence, using Welch's method + spectrogram -- Compute the spectrogram + lombscargle -- Computes the Lomb-Scargle periodogram + vectorstrength -- Computes the vector strength + stft -- Compute the Short Time Fourier Transform + istft -- Compute the Inverse Short Time Fourier Transform + check_COLA -- Check the COLA constraint for iSTFT reconstruction + check_NOLA -- Check the NOLA constraint for iSTFT reconstruction + +""" +from __future__ import division, print_function, absolute_import + +from . import sigtools, windows +from .waveforms import * +from ._max_len_seq import max_len_seq +from ._upfirdn import upfirdn + +# The spline module (a C extension) provides: +# cspline2d, qspline2d, sepfir2d, symiirord1, symiirord2 +from .spline import * + +from .bsplines import * +from .filter_design import * +from .fir_filter_design import * +from .ltisys import * +from .lti_conversion import * +from .signaltools import * +from ._savitzky_golay import savgol_coeffs, savgol_filter +from .spectral import * +from .wavelets import * +from ._peak_finding import * +from .windows import get_window # keep this one in signal namespace + + +# deal with * -> windows.* doc-only soft-deprecation +deprecated_windows = ('boxcar', 'triang', 'parzen', 'bohman', 'blackman', + 'nuttall', 'blackmanharris', 'flattop', 'bartlett', + 'barthann', 'hamming', 'kaiser', 'gaussian', + 'general_gaussian', 'chebwin', 'slepian', 'cosine', + 'hann', 'exponential', 'tukey') + +# backward compatibility imports for actually deprecated windows not +# in the above list +from .windows import hanning + + +def deco(name): + f = getattr(windows, name) + # Add deprecation to docstring + + def wrapped(*args, **kwargs): + return f(*args, **kwargs) + + wrapped.__name__ = name + wrapped.__module__ = 'scipy.signal' + if hasattr(f, '__qualname__'): + wrapped.__qualname__ = f.__qualname__ + + if f.__doc__ is not None: + lines = f.__doc__.splitlines() + for li, line in enumerate(lines): + if line.strip() == 'Parameters': + break + else: + raise RuntimeError('dev error: badly formatted doc') + spacing = ' ' * line.find('P') + lines.insert(li, ('{0}.. warning:: scipy.signal.{1} is deprecated,\n' + '{0} use scipy.signal.windows.{1} ' + 'instead.\n'.format(spacing, name))) + wrapped.__doc__ = '\n'.join(lines) + + return wrapped + + +for name in deprecated_windows: + locals()[name] = deco(name) + +del deprecated_windows, name, deco + + +__all__ = [s for s in dir() if not s.startswith('_')] + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/signal/__init__.pyc new file mode 100644 index 0000000..99cd935 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/signal/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/_arraytools.py b/project/venv/lib/python2.7/site-packages/scipy/signal/_arraytools.py new file mode 100644 index 0000000..2661420 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/signal/_arraytools.py @@ -0,0 +1,243 @@ +""" +Functions for acting on a axis of an array. +""" +from __future__ import division, print_function, absolute_import + +import numpy as np + + +def axis_slice(a, start=None, stop=None, step=None, axis=-1): + """Take a slice along axis 'axis' from 'a'. + + Parameters + ---------- + a : numpy.ndarray + The array to be sliced. + start, stop, step : int or None + The slice parameters. + axis : int, optional + The axis of `a` to be sliced. + + Examples + -------- + >>> a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + >>> axis_slice(a, start=0, stop=1, axis=1) + array([[1], + [4], + [7]]) + >>> axis_slice(a, start=1, axis=0) + array([[4, 5, 6], + [7, 8, 9]]) + + Notes + ----- + The keyword arguments start, stop and step are used by calling + slice(start, stop, step). This implies axis_slice() does not + handle its arguments the exactly the same as indexing. To select + a single index k, for example, use + axis_slice(a, start=k, stop=k+1) + In this case, the length of the axis 'axis' in the result will + be 1; the trivial dimension is not removed. (Use numpy.squeeze() + to remove trivial axes.) + """ + a_slice = [slice(None)] * a.ndim + a_slice[axis] = slice(start, stop, step) + b = a[tuple(a_slice)] + return b + + +def axis_reverse(a, axis=-1): + """Reverse the 1-d slices of `a` along axis `axis`. + + Returns axis_slice(a, step=-1, axis=axis). + """ + return axis_slice(a, step=-1, axis=axis) + + +def odd_ext(x, n, axis=-1): + """ + Odd extension at the boundaries of an array + + Generate a new ndarray by making an odd extension of `x` along an axis. + + Parameters + ---------- + x : ndarray + The array to be extended. + n : int + The number of elements by which to extend `x` at each end of the axis. + axis : int, optional + The axis along which to extend `x`. Default is -1. + + Examples + -------- + >>> from scipy.signal._arraytools import odd_ext + >>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]]) + >>> odd_ext(a, 2) + array([[-1, 0, 1, 2, 3, 4, 5, 6, 7], + [-4, -1, 0, 1, 4, 9, 16, 23, 28]]) + + Odd extension is a "180 degree rotation" at the endpoints of the original + array: + + >>> t = np.linspace(0, 1.5, 100) + >>> a = 0.9 * np.sin(2 * np.pi * t**2) + >>> b = odd_ext(a, 40) + >>> import matplotlib.pyplot as plt + >>> plt.plot(arange(-40, 140), b, 'b', lw=1, label='odd extension') + >>> plt.plot(arange(100), a, 'r', lw=2, label='original') + >>> plt.legend(loc='best') + >>> plt.show() + """ + if n < 1: + return x + if n > x.shape[axis] - 1: + raise ValueError(("The extension length n (%d) is too big. " + + "It must not exceed x.shape[axis]-1, which is %d.") + % (n, x.shape[axis] - 1)) + left_end = axis_slice(x, start=0, stop=1, axis=axis) + left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis) + right_end = axis_slice(x, start=-1, axis=axis) + right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis) + ext = np.concatenate((2 * left_end - left_ext, + x, + 2 * right_end - right_ext), + axis=axis) + return ext + + +def even_ext(x, n, axis=-1): + """ + Even extension at the boundaries of an array + + Generate a new ndarray by making an even extension of `x` along an axis. + + Parameters + ---------- + x : ndarray + The array to be extended. + n : int + The number of elements by which to extend `x` at each end of the axis. + axis : int, optional + The axis along which to extend `x`. Default is -1. + + Examples + -------- + >>> from scipy.signal._arraytools import even_ext + >>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]]) + >>> even_ext(a, 2) + array([[ 3, 2, 1, 2, 3, 4, 5, 4, 3], + [ 4, 1, 0, 1, 4, 9, 16, 9, 4]]) + + Even extension is a "mirror image" at the boundaries of the original array: + + >>> t = np.linspace(0, 1.5, 100) + >>> a = 0.9 * np.sin(2 * np.pi * t**2) + >>> b = even_ext(a, 40) + >>> import matplotlib.pyplot as plt + >>> plt.plot(arange(-40, 140), b, 'b', lw=1, label='even extension') + >>> plt.plot(arange(100), a, 'r', lw=2, label='original') + >>> plt.legend(loc='best') + >>> plt.show() + """ + if n < 1: + return x + if n > x.shape[axis] - 1: + raise ValueError(("The extension length n (%d) is too big. " + + "It must not exceed x.shape[axis]-1, which is %d.") + % (n, x.shape[axis] - 1)) + left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis) + right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis) + ext = np.concatenate((left_ext, + x, + right_ext), + axis=axis) + return ext + + +def const_ext(x, n, axis=-1): + """ + Constant extension at the boundaries of an array + + Generate a new ndarray that is a constant extension of `x` along an axis. + + The extension repeats the values at the first and last element of + the axis. + + Parameters + ---------- + x : ndarray + The array to be extended. + n : int + The number of elements by which to extend `x` at each end of the axis. + axis : int, optional + The axis along which to extend `x`. Default is -1. + + Examples + -------- + >>> from scipy.signal._arraytools import const_ext + >>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]]) + >>> const_ext(a, 2) + array([[ 1, 1, 1, 2, 3, 4, 5, 5, 5], + [ 0, 0, 0, 1, 4, 9, 16, 16, 16]]) + + Constant extension continues with the same values as the endpoints of the + array: + + >>> t = np.linspace(0, 1.5, 100) + >>> a = 0.9 * np.sin(2 * np.pi * t**2) + >>> b = const_ext(a, 40) + >>> import matplotlib.pyplot as plt + >>> plt.plot(arange(-40, 140), b, 'b', lw=1, label='constant extension') + >>> plt.plot(arange(100), a, 'r', lw=2, label='original') + >>> plt.legend(loc='best') + >>> plt.show() + """ + if n < 1: + return x + left_end = axis_slice(x, start=0, stop=1, axis=axis) + ones_shape = [1] * x.ndim + ones_shape[axis] = n + ones = np.ones(ones_shape, dtype=x.dtype) + left_ext = ones * left_end + right_end = axis_slice(x, start=-1, axis=axis) + right_ext = ones * right_end + ext = np.concatenate((left_ext, + x, + right_ext), + axis=axis) + return ext + + +def zero_ext(x, n, axis=-1): + """ + Zero padding at the boundaries of an array + + Generate a new ndarray that is a zero padded extension of `x` along + an axis. + + Parameters + ---------- + x : ndarray + The array to be extended. + n : int + The number of elements by which to extend `x` at each end of the + axis. + axis : int, optional + The axis along which to extend `x`. Default is -1. + + Examples + -------- + >>> from scipy.signal._arraytools import zero_ext + >>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]]) + >>> zero_ext(a, 2) + array([[ 0, 0, 1, 2, 3, 4, 5, 0, 0], + [ 0, 0, 0, 1, 4, 9, 16, 0, 0]]) + """ + if n < 1: + return x + zeros_shape = list(x.shape) + zeros_shape[axis] = n + zeros = np.zeros(zeros_shape, dtype=x.dtype) + ext = np.concatenate((zeros, x, zeros), axis=axis) + return ext diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/_arraytools.pyc b/project/venv/lib/python2.7/site-packages/scipy/signal/_arraytools.pyc new file mode 100644 index 0000000..3f0e8db Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/signal/_arraytools.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/_max_len_seq.py b/project/venv/lib/python2.7/site-packages/scipy/signal/_max_len_seq.py new file mode 100644 index 0000000..3386998 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/signal/_max_len_seq.py @@ -0,0 +1,137 @@ +# Author: Eric Larson +# 2014 + +"""Tools for MLS generation""" + +import numpy as np + +from ._max_len_seq_inner import _max_len_seq_inner + +__all__ = ['max_len_seq'] + + +# These are definitions of linear shift register taps for use in max_len_seq() +_mls_taps = {2: [1], 3: [2], 4: [3], 5: [3], 6: [5], 7: [6], 8: [7, 6, 1], + 9: [5], 10: [7], 11: [9], 12: [11, 10, 4], 13: [12, 11, 8], + 14: [13, 12, 2], 15: [14], 16: [15, 13, 4], 17: [14], + 18: [11], 19: [18, 17, 14], 20: [17], 21: [19], 22: [21], + 23: [18], 24: [23, 22, 17], 25: [22], 26: [25, 24, 20], + 27: [26, 25, 22], 28: [25], 29: [27], 30: [29, 28, 7], + 31: [28], 32: [31, 30, 10]} + +def max_len_seq(nbits, state=None, length=None, taps=None): + """ + Maximum length sequence (MLS) generator. + + Parameters + ---------- + nbits : int + Number of bits to use. Length of the resulting sequence will + be ``(2**nbits) - 1``. Note that generating long sequences + (e.g., greater than ``nbits == 16``) can take a long time. + state : array_like, optional + If array, must be of length ``nbits``, and will be cast to binary + (bool) representation. If None, a seed of ones will be used, + producing a repeatable representation. If ``state`` is all + zeros, an error is raised as this is invalid. Default: None. + length : int, optional + Number of samples to compute. If None, the entire length + ``(2**nbits) - 1`` is computed. + taps : array_like, optional + Polynomial taps to use (e.g., ``[7, 6, 1]`` for an 8-bit sequence). + If None, taps will be automatically selected (for up to + ``nbits == 32``). + + Returns + ------- + seq : array + Resulting MLS sequence of 0's and 1's. + state : array + The final state of the shift register. + + Notes + ----- + The algorithm for MLS generation is generically described in: + + https://en.wikipedia.org/wiki/Maximum_length_sequence + + The default values for taps are specifically taken from the first + option listed for each value of ``nbits`` in: + + http://www.newwaveinstruments.com/resources/articles/m_sequence_linear_feedback_shift_register_lfsr.htm + + .. versionadded:: 0.15.0 + + Examples + -------- + MLS uses binary convention: + + >>> from scipy.signal import max_len_seq + >>> max_len_seq(4)[0] + array([1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0], dtype=int8) + + MLS has a white spectrum (except for DC): + + >>> import matplotlib.pyplot as plt + >>> from numpy.fft import fft, ifft, fftshift, fftfreq + >>> seq = max_len_seq(6)[0]*2-1 # +1 and -1 + >>> spec = fft(seq) + >>> N = len(seq) + >>> plt.plot(fftshift(fftfreq(N)), fftshift(np.abs(spec)), '.-') + >>> plt.margins(0.1, 0.1) + >>> plt.grid(True) + >>> plt.show() + + Circular autocorrelation of MLS is an impulse: + + >>> acorrcirc = ifft(spec * np.conj(spec)).real + >>> plt.figure() + >>> plt.plot(np.arange(-N/2+1, N/2+1), fftshift(acorrcirc), '.-') + >>> plt.margins(0.1, 0.1) + >>> plt.grid(True) + >>> plt.show() + + Linear autocorrelation of MLS is approximately an impulse: + + >>> acorr = np.correlate(seq, seq, 'full') + >>> plt.figure() + >>> plt.plot(np.arange(-N+1, N), acorr, '.-') + >>> plt.margins(0.1, 0.1) + >>> plt.grid(True) + >>> plt.show() + + """ + if taps is None: + if nbits not in _mls_taps: + known_taps = np.array(list(_mls_taps.keys())) + raise ValueError('nbits must be between %s and %s if taps is None' + % (known_taps.min(), known_taps.max())) + taps = np.array(_mls_taps[nbits], np.intp) + else: + taps = np.unique(np.array(taps, np.intp))[::-1] + if np.any(taps < 0) or np.any(taps > nbits) or taps.size < 1: + raise ValueError('taps must be non-empty with values between ' + 'zero and nbits (inclusive)') + taps = np.ascontiguousarray(taps) # needed for Cython + n_max = (2**nbits) - 1 + if length is None: + length = n_max + else: + length = int(length) + if length < 0: + raise ValueError('length must be greater than or equal to 0') + # We use int8 instead of bool here because numpy arrays of bools + # don't seem to work nicely with Cython + if state is None: + state = np.ones(nbits, dtype=np.int8, order='c') + else: + # makes a copy if need be, ensuring it's 0's and 1's + state = np.array(state, dtype=bool, order='c').astype(np.int8) + if state.ndim != 1 or state.size != nbits: + raise ValueError('state must be a 1-dimensional array of size nbits') + if np.all(state == 0): + raise ValueError('state must not be all zeros') + + seq = np.empty(length, dtype=np.int8, order='c') + state = _max_len_seq_inner(taps, state, nbits, length, seq) + return seq, state diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/_max_len_seq.pyc b/project/venv/lib/python2.7/site-packages/scipy/signal/_max_len_seq.pyc new file mode 100644 index 0000000..f840506 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/signal/_max_len_seq.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/_max_len_seq_inner.so b/project/venv/lib/python2.7/site-packages/scipy/signal/_max_len_seq_inner.so new file mode 100755 index 0000000..6043606 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/signal/_max_len_seq_inner.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/_peak_finding.py b/project/venv/lib/python2.7/site-packages/scipy/signal/_peak_finding.py new file mode 100644 index 0000000..e009dee --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/signal/_peak_finding.py @@ -0,0 +1,1299 @@ +""" +Functions for identifying peaks in signals. +""" +from __future__ import division, print_function, absolute_import + +import math +import numpy as np + +from scipy._lib.six import xrange +from scipy.signal.wavelets import cwt, ricker +from scipy.stats import scoreatpercentile + +from ._peak_finding_utils import ( + _local_maxima_1d, + _select_by_peak_distance, + _peak_prominences, + _peak_widths +) + + +__all__ = ['argrelmin', 'argrelmax', 'argrelextrema', 'peak_prominences', + 'peak_widths', 'find_peaks', 'find_peaks_cwt'] + + +def _boolrelextrema(data, comparator, axis=0, order=1, mode='clip'): + """ + Calculate the relative extrema of `data`. + + Relative extrema are calculated by finding locations where + ``comparator(data[n], data[n+1:n+order+1])`` is True. + + Parameters + ---------- + data : ndarray + Array in which to find the relative extrema. + comparator : callable + Function to use to compare two data points. + Should take two arrays as arguments. + axis : int, optional + Axis over which to select from `data`. Default is 0. + order : int, optional + How many points on each side to use for the comparison + to consider ``comparator(n,n+x)`` to be True. + mode : str, optional + How the edges of the vector are treated. 'wrap' (wrap around) or + 'clip' (treat overflow as the same as the last (or first) element). + Default 'clip'. See numpy.take + + Returns + ------- + extrema : ndarray + Boolean array of the same shape as `data` that is True at an extrema, + False otherwise. + + See also + -------- + argrelmax, argrelmin + + Examples + -------- + >>> testdata = np.array([1,2,3,2,1]) + >>> _boolrelextrema(testdata, np.greater, axis=0) + array([False, False, True, False, False], dtype=bool) + + """ + if((int(order) != order) or (order < 1)): + raise ValueError('Order must be an int >= 1') + + datalen = data.shape[axis] + locs = np.arange(0, datalen) + + results = np.ones(data.shape, dtype=bool) + main = data.take(locs, axis=axis, mode=mode) + for shift in xrange(1, order + 1): + plus = data.take(locs + shift, axis=axis, mode=mode) + minus = data.take(locs - shift, axis=axis, mode=mode) + results &= comparator(main, plus) + results &= comparator(main, minus) + if(~results.any()): + return results + return results + + +def argrelmin(data, axis=0, order=1, mode='clip'): + """ + Calculate the relative minima of `data`. + + Parameters + ---------- + data : ndarray + Array in which to find the relative minima. + axis : int, optional + Axis over which to select from `data`. Default is 0. + order : int, optional + How many points on each side to use for the comparison + to consider ``comparator(n, n+x)`` to be True. + mode : str, optional + How the edges of the vector are treated. + Available options are 'wrap' (wrap around) or 'clip' (treat overflow + as the same as the last (or first) element). + Default 'clip'. See numpy.take + + Returns + ------- + extrema : tuple of ndarrays + Indices of the minima in arrays of integers. ``extrema[k]`` is + the array of indices of axis `k` of `data`. Note that the + return value is a tuple even when `data` is one-dimensional. + + See Also + -------- + argrelextrema, argrelmax, find_peaks + + Notes + ----- + This function uses `argrelextrema` with np.less as comparator. Therefore it + requires a strict inequality on both sides of a value to consider it a + minimum. This means flat minima (more than one sample wide) are not detected. + In case of one-dimensional `data` `find_peaks` can be used to detect all + local minima, including flat ones, by calling it with negated `data`. + + .. versionadded:: 0.11.0 + + Examples + -------- + >>> from scipy.signal import argrelmin + >>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0]) + >>> argrelmin(x) + (array([1, 5]),) + >>> y = np.array([[1, 2, 1, 2], + ... [2, 2, 0, 0], + ... [5, 3, 4, 4]]) + ... + >>> argrelmin(y, axis=1) + (array([0, 2]), array([2, 1])) + + """ + return argrelextrema(data, np.less, axis, order, mode) + + +def argrelmax(data, axis=0, order=1, mode='clip'): + """ + Calculate the relative maxima of `data`. + + Parameters + ---------- + data : ndarray + Array in which to find the relative maxima. + axis : int, optional + Axis over which to select from `data`. Default is 0. + order : int, optional + How many points on each side to use for the comparison + to consider ``comparator(n, n+x)`` to be True. + mode : str, optional + How the edges of the vector are treated. + Available options are 'wrap' (wrap around) or 'clip' (treat overflow + as the same as the last (or first) element). + Default 'clip'. See `numpy.take`. + + Returns + ------- + extrema : tuple of ndarrays + Indices of the maxima in arrays of integers. ``extrema[k]`` is + the array of indices of axis `k` of `data`. Note that the + return value is a tuple even when `data` is one-dimensional. + + See Also + -------- + argrelextrema, argrelmin, find_peaks + + Notes + ----- + This function uses `argrelextrema` with np.greater as comparator. Therefore + it requires a strict inequality on both sides of a value to consider it a + maximum. This means flat maxima (more than one sample wide) are not detected. + In case of one-dimensional `data` `find_peaks` can be used to detect all + local maxima, including flat ones. + + .. versionadded:: 0.11.0 + + Examples + -------- + >>> from scipy.signal import argrelmax + >>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0]) + >>> argrelmax(x) + (array([3, 6]),) + >>> y = np.array([[1, 2, 1, 2], + ... [2, 2, 0, 0], + ... [5, 3, 4, 4]]) + ... + >>> argrelmax(y, axis=1) + (array([0]), array([1])) + """ + return argrelextrema(data, np.greater, axis, order, mode) + + +def argrelextrema(data, comparator, axis=0, order=1, mode='clip'): + """ + Calculate the relative extrema of `data`. + + Parameters + ---------- + data : ndarray + Array in which to find the relative extrema. + comparator : callable + Function to use to compare two data points. + Should take two arrays as arguments. + axis : int, optional + Axis over which to select from `data`. Default is 0. + order : int, optional + How many points on each side to use for the comparison + to consider ``comparator(n, n+x)`` to be True. + mode : str, optional + How the edges of the vector are treated. 'wrap' (wrap around) or + 'clip' (treat overflow as the same as the last (or first) element). + Default is 'clip'. See `numpy.take`. + + Returns + ------- + extrema : tuple of ndarrays + Indices of the maxima in arrays of integers. ``extrema[k]`` is + the array of indices of axis `k` of `data`. Note that the + return value is a tuple even when `data` is one-dimensional. + + See Also + -------- + argrelmin, argrelmax + + Notes + ----- + + .. versionadded:: 0.11.0 + + Examples + -------- + >>> from scipy.signal import argrelextrema + >>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0]) + >>> argrelextrema(x, np.greater) + (array([3, 6]),) + >>> y = np.array([[1, 2, 1, 2], + ... [2, 2, 0, 0], + ... [5, 3, 4, 4]]) + ... + >>> argrelextrema(y, np.less, axis=1) + (array([0, 2]), array([2, 1])) + + """ + results = _boolrelextrema(data, comparator, + axis, order, mode) + return np.nonzero(results) + + +def _arg_x_as_expected(value): + """Ensure argument `x` is a 1D C-contiguous array of dtype('float64'). + + Used in `find_peaks`, `peak_prominences` and `peak_widths` to make `x` + compatible with the signature of the wrapped Cython functions. + + Returns + ------- + value : ndarray + A one-dimensional C-contiguous array with dtype('float64'). + """ + value = np.asarray(value, order='C', dtype=np.float64) + if value.ndim != 1: + raise ValueError('`x` must be a 1D array') + return value + + +def _arg_peaks_as_expected(value): + """Ensure argument `peaks` is a 1D C-contiguous array of dtype('intp'). + + Used in `peak_prominences` and `peak_widths` to make `peaks` compatible + with the signature of the wrapped Cython functions. + + Returns + ------- + value : ndarray + A one-dimensional C-contiguous array with dtype('intp'). + """ + value = np.asarray(value) + if value.size == 0: + # Empty arrays default to np.float64 but are valid input + value = np.array([], dtype=np.intp) + try: + # Safely convert to C-contiguous array of type np.intp + value = value.astype(np.intp, order='C', casting='safe', + subok=False, copy=False) + except TypeError: + raise TypeError("cannot safely cast `peaks` to dtype('intp')") + if value.ndim != 1: + raise ValueError('`peaks` must be a 1D array') + return value + + +def _arg_wlen_as_expected(value): + """Ensure argument `wlen` is of type `np.intp` and larger than 1. + + Used in `peak_prominences` and `peak_widths`. + + Returns + ------- + value : np.intp + The original `value` rounded up to an integer or -1 if `value` was + None. + """ + if value is None: + # _peak_prominences expects an intp; -1 signals that no value was + # supplied by the user + value = -1 + elif 1 < value: + # Round up to a positive integer + if not np.can_cast(value, np.intp, "safe"): + value = math.ceil(value) + value = np.intp(value) + else: + raise ValueError('`wlen` must be larger than 1, was {}' + .format(value)) + return value + + +def peak_prominences(x, peaks, wlen=None): + """ + Calculate the prominence of each peak in a signal. + + The prominence of a peak measures how much a peak stands out from the + surrounding baseline of the signal and is defined as the vertical distance + between the peak and its lowest contour line. + + Parameters + ---------- + x : sequence + A signal with peaks. + peaks : sequence + Indices of peaks in `x`. + wlen : int, optional + A window length in samples that optionally limits the evaluated area for + each peak to a subset of `x`. The peak is always placed in the middle of + the window therefore the given length is rounded up to the next odd + integer. This parameter can speed up the calculation (see Notes). + + Returns + ------- + prominences : ndarray + The calculated prominences for each peak in `peaks`. + left_bases, right_bases : ndarray + The peaks' bases as indices in `x` to the left and right of each peak. + The higher base of each pair is a peak's lowest contour line. + + Raises + ------ + ValueError + If a value in `peaks` is an invalid index for `x`. + + Warns + ----- + PeakPropertyWarning + For indices in `peaks` that don't point to valid local maxima in `x` + the returned prominence will be 0 and this warning is raised. This + also happens if `wlen` is smaller than the plateau size of a peak. + + Warnings + -------- + This function may return unexpected results for data containing NaNs. To + avoid this, NaNs should either be removed or replaced. + + See Also + -------- + find_peaks + Find peaks inside a signal based on peak properties. + peak_widths + Calculate the width of peaks. + + Notes + ----- + Strategy to compute a peak's prominence: + + 1. Extend a horizontal line from the current peak to the left and right + until the line either reaches the window border (see `wlen`) or + intersects the signal again at the slope of a higher peak. An + intersection with a peak of the same height is ignored. + 2. On each side find the minimal signal value within the interval defined + above. These points are the peak's bases. + 3. The higher one of the two bases marks the peak's lowest contour line. The + prominence can then be calculated as the vertical difference between the + peaks height itself and its lowest contour line. + + Searching for the peak's bases can be slow for large `x` with periodic + behavior because large chunks or even the full signal need to be evaluated + for the first algorithmic step. This evaluation area can be limited with the + parameter `wlen` which restricts the algorithm to a window around the + current peak and can shorten the calculation time if the window length is + short in relation to `x`. + However this may stop the algorithm from finding the true global contour + line if the peak's true bases are outside this window. Instead a higher + contour line is found within the restricted window leading to a smaller + calculated prominence. In practice this is only relevant for the highest set + of peaks in `x`. This behavior may even be used intentionally to calculate + "local" prominences. + + .. versionadded:: 1.1.0 + + References + ---------- + .. [1] Wikipedia Article for Topographic Prominence: + https://en.wikipedia.org/wiki/Topographic_prominence + + Examples + -------- + >>> from scipy.signal import find_peaks, peak_prominences + >>> import matplotlib.pyplot as plt + + Create a test signal with two overlayed harmonics + + >>> x = np.linspace(0, 6 * np.pi, 1000) + >>> x = np.sin(x) + 0.6 * np.sin(2.6 * x) + + Find all peaks and calculate prominences + + >>> peaks, _ = find_peaks(x) + >>> prominences = peak_prominences(x, peaks)[0] + >>> prominences + array([1.24159486, 0.47840168, 0.28470524, 3.10716793, 0.284603 , + 0.47822491, 2.48340261, 0.47822491]) + + Calculate the height of each peak's contour line and plot the results + + >>> contour_heights = x[peaks] - prominences + >>> plt.plot(x) + >>> plt.plot(peaks, x[peaks], "x") + >>> plt.vlines(x=peaks, ymin=contour_heights, ymax=x[peaks]) + >>> plt.show() + + Let's evaluate a second example that demonstrates several edge cases for + one peak at index 5. + + >>> x = np.array([0, 1, 0, 3, 1, 3, 0, 4, 0]) + >>> peaks = np.array([5]) + >>> plt.plot(x) + >>> plt.plot(peaks, x[peaks], "x") + >>> plt.show() + >>> peak_prominences(x, peaks) # -> (prominences, left_bases, right_bases) + (array([3.]), array([2]), array([6])) + + Note how the peak at index 3 of the same height is not considered as a + border while searching for the left base. Instead two minima at 0 and 2 + are found in which case the one closer to the evaluated peak is always + chosen. On the right side however the base must be placed at 6 because the + higher peak represents the right border to the evaluated area. + + >>> peak_prominences(x, peaks, wlen=3.1) + (array([2.]), array([4]), array([6])) + + Here we restricted the algorithm to a window from 3 to 7 (the length is 5 + samples because `wlen` was rounded up to the next odd integer). Thus the + only two candidates in the evaluated area are the two neighbouring samples + and a smaller prominence is calculated. + """ + x = _arg_x_as_expected(x) + peaks = _arg_peaks_as_expected(peaks) + wlen = _arg_wlen_as_expected(wlen) + return _peak_prominences(x, peaks, wlen) + + +def peak_widths(x, peaks, rel_height=0.5, prominence_data=None, wlen=None): + """ + Calculate the width of each peak in a signal. + + This function calculates the width of a peak in samples at a relative + distance to the peak's height and prominence. + + Parameters + ---------- + x : sequence + A signal with peaks. + peaks : sequence + Indices of peaks in `x`. + rel_height : float, optional + Chooses the relative height at which the peak width is measured as a + percentage of its prominence. 1.0 calculates the width of the peak at + its lowest contour line while 0.5 evaluates at half the prominence + height. Must be at least 0. See notes for further explanation. + prominence_data : tuple, optional + A tuple of three arrays matching the output of `peak_prominences` when + called with the same arguments `x` and `peaks`. This data is calculated + internally if not provided. + wlen : int, optional + A window length in samples passed to `peak_prominences` as an optional + argument for internal calculation of `prominence_data`. This argument + is ignored if `prominence_data` is given. + + Returns + ------- + widths : ndarray + The widths for each peak in samples. + width_heights : ndarray + The height of the contour lines at which the `widths` where evaluated. + left_ips, right_ips : ndarray + Interpolated positions of left and right intersection points of a + horizontal line at the respective evaluation height. + + Raises + ------ + ValueError + If `prominence_data` is supplied but doesn't satisfy the condition + ``0 <= left_base <= peak <= right_base < x.shape[0]`` for each peak, + has the wrong dtype, is not C-contiguous or does not have the same + shape. + + Warns + ----- + PeakPropertyWarning + Raised if any calculated width is 0. This may stem from the supplied + `prominence_data` or if `rel_height` is set to 0. + + Warnings + -------- + This function may return unexpected results for data containing NaNs. To + avoid this, NaNs should either be removed or replaced. + + See Also + -------- + find_peaks + Find peaks inside a signal based on peak properties. + peak_prominences + Calculate the prominence of peaks. + + Notes + ----- + The basic algorithm to calculate a peak's width is as follows: + + * Calculate the evaluation height :math:`h_{eval}` with the formula + :math:`h_{eval} = h_{Peak} - P \\cdot R`, where :math:`h_{Peak}` is the + height of the peak itself, :math:`P` is the peak's prominence and + :math:`R` a positive ratio specified with the argument `rel_height`. + * Draw a horizontal line at the evaluation height to both sides, starting at + the peak's current vertical position until the lines either intersect a + slope, the signal border or cross the vertical position of the peak's + base (see `peak_prominences` for an definition). For the first case, + intersection with the signal, the true intersection point is estimated + with linear interpolation. + * Calculate the width as the horizontal distance between the chosen + endpoints on both sides. As a consequence of this the maximal possible + width for each peak is the horizontal distance between its bases. + + As shown above to calculate a peak's width its prominence and bases must be + known. You can supply these yourself with the argument `prominence_data`. + Otherwise they are internally calculated (see `peak_prominences`). + + .. versionadded:: 1.1.0 + + Examples + -------- + >>> from scipy.signal import chirp, find_peaks, peak_widths + >>> import matplotlib.pyplot as plt + + Create a test signal with two overlayed harmonics + + >>> x = np.linspace(0, 6 * np.pi, 1000) + >>> x = np.sin(x) + 0.6 * np.sin(2.6 * x) + + Find all peaks and calculate their widths at the relative height of 0.5 + (contour line at half the prominence height) and 1 (at the lowest contour + line at full prominence height). + + >>> peaks, _ = find_peaks(x) + >>> results_half = peak_widths(x, peaks, rel_height=0.5) + >>> results_half[0] # widths + array([ 64.25172825, 41.29465463, 35.46943289, 104.71586081, + 35.46729324, 41.30429622, 181.93835853, 45.37078546]) + >>> results_full = peak_widths(x, peaks, rel_height=1) + >>> results_full[0] # widths + array([181.9396084 , 72.99284945, 61.28657872, 373.84622694, + 61.78404617, 72.48822812, 253.09161876, 79.36860878]) + + Plot signal, peaks and contour lines at which the widths where calculated + + >>> plt.plot(x) + >>> plt.plot(peaks, x[peaks], "x") + >>> plt.hlines(*results_half[1:], color="C2") + >>> plt.hlines(*results_full[1:], color="C3") + >>> plt.show() + """ + x = _arg_x_as_expected(x) + peaks = _arg_peaks_as_expected(peaks) + if prominence_data is None: + # Calculate prominence if not supplied and use wlen if supplied. + wlen = _arg_wlen_as_expected(wlen) + prominence_data = _peak_prominences(x, peaks, wlen) + return _peak_widths(x, peaks, rel_height, *prominence_data) + + +def _unpack_condition_args(interval, x, peaks): + """ + Parse condition arguments for `find_peaks`. + + Parameters + ---------- + interval : number or ndarray or sequence + Either a number or ndarray or a 2-element sequence of the former. The + first value is always interpreted as `imin` and the second, if supplied, + as `imax`. + x : ndarray + The signal with `peaks`. + peaks : ndarray + An array with indices used to reduce `imin` and / or `imax` if those are + arrays. + + Returns + ------- + imin, imax : number or ndarray or None + Minimal and maximal value in `argument`. + + Raises + ------ + ValueError : + If interval border is given as array and its size does not match the size + of `x`. + + Notes + ----- + + .. versionadded:: 1.1.0 + """ + try: + imin, imax = interval + except (TypeError, ValueError): + imin, imax = (interval, None) + + # Reduce arrays if arrays + if isinstance(imin, np.ndarray): + if imin.size != x.size: + raise ValueError('array size of lower interval border must match x') + imin = imin[peaks] + if isinstance(imax, np.ndarray): + if imax.size != x.size: + raise ValueError('array size of upper interval border must match x') + imax = imax[peaks] + + return imin, imax + + +def _select_by_property(peak_properties, pmin, pmax): + """ + Evaluate where the generic property of peaks confirms to an interval. + + Parameters + ---------- + peak_properties : ndarray + An array with properties for each peak. + pmin : None or number or ndarray + Lower interval boundary for `peak_properties`. ``None`` is interpreted as + an open border. + pmax : None or number or ndarray + Upper interval boundary for `peak_properties`. ``None`` is interpreted as + an open border. + + Returns + ------- + keep : bool + A boolean mask evaluating to true where `peak_properties` confirms to the + interval. + + See Also + -------- + find_peaks + + Notes + ----- + + .. versionadded:: 1.1.0 + """ + keep = np.ones(peak_properties.size, dtype=bool) + if pmin is not None: + keep &= (pmin <= peak_properties) + if pmax is not None: + keep &= (peak_properties <= pmax) + return keep + + +def _select_by_peak_threshold(x, peaks, tmin, tmax): + """ + Evaluate which peaks fulfill the threshold condition. + + Parameters + ---------- + x : ndarray + A one-dimensional array which is indexable by `peaks`. + peaks : ndarray + Indices of peaks in `x`. + tmin, tmax : scalar or ndarray or None + Minimal and / or maximal required thresholds. If supplied as ndarrays + their size must match `peaks`. ``None`` is interpreted as an open + border. + + Returns + ------- + keep : bool + A boolean mask evaluating to true where `peaks` fulfill the threshold + condition. + left_thresholds, right_thresholds : ndarray + Array matching `peak` containing the thresholds of each peak on + both sides. + + Notes + ----- + + .. versionadded:: 1.1.0 + """ + # Stack thresholds on both sides to make min / max operations easier: + # tmin is compared with the smaller, and tmax with the greater thresold to + # each peak's side + stacked_thresholds = np.vstack([x[peaks] - x[peaks - 1], + x[peaks] - x[peaks + 1]]) + keep = np.ones(peaks.size, dtype=bool) + if tmin is not None: + min_thresholds = np.min(stacked_thresholds, axis=0) + keep &= (tmin <= min_thresholds) + if tmax is not None: + max_thresholds = np.max(stacked_thresholds, axis=0) + keep &= (max_thresholds <= tmax) + + return keep, stacked_thresholds[0], stacked_thresholds[1] + + +def find_peaks(x, height=None, threshold=None, distance=None, + prominence=None, width=None, wlen=None, rel_height=0.5, + plateau_size=None): + """ + Find peaks inside a signal based on peak properties. + + This function takes a one-dimensional array and finds all local maxima by + simple comparison of neighbouring values. Optionally, a subset of these + peaks can be selected by specifying conditions for a peak's properties. + + Parameters + ---------- + x : sequence + A signal with peaks. + height : number or ndarray or sequence, optional + Required height of peaks. Either a number, ``None``, an array matching + `x` or a 2-element sequence of the former. The first element is + always interpreted as the minimal and the second, if supplied, as the + maximal required height. + threshold : number or ndarray or sequence, optional + Required threshold of peaks, the vertical distance to its neighbouring + samples. Either a number, ``None``, an array matching `x` or a + 2-element sequence of the former. The first element is always + interpreted as the minimal and the second, if supplied, as the maximal + required threshold. + distance : number, optional + Required minimal horizontal distance (>= 1) in samples between + neighbouring peaks. The removal order is explained in the notes section. + prominence : number or ndarray or sequence, optional + Required prominence of peaks. Either a number, ``None``, an array + matching `x` or a 2-element sequence of the former. The first + element is always interpreted as the minimal and the second, if + supplied, as the maximal required prominence. + width : number or ndarray or sequence, optional + Required width of peaks in samples. Either a number, ``None``, an array + matching `x` or a 2-element sequence of the former. The first + element is always interpreted as the minimal and the second, if + supplied, as the maximal required prominence. + wlen : int, optional + Used for calculation of the peaks prominences, thus it is only used if + one of the arguments `prominence` or `width` is given. See argument + `wlen` in `peak_prominences` for a full description of its effects. + rel_height : float, optional + Used for calculation of the peaks width, thus it is only used if `width` + is given. See argument `rel_height` in `peak_widths` for a full + description of its effects. + plateau_size : number or ndarray or sequence, optional + Required size of the flat top of peaks in samples. Either a number, + ``None``, an array matching `x` or a 2-element sequence of the former. + The first element is always interpreted as the minimal and the second, + if supplied as the maximal required plateau size. + + .. versionadded:: 1.2.0 + + Returns + ------- + peaks : ndarray + Indices of peaks in `x` that satisfy all given conditions. + properties : dict + A dictionary containing properties of the returned peaks which were + calculated as intermediate results during evaluation of the specified + conditions: + + * 'peak_heights' + If `height` is given, the height of each peak in `x`. + * 'left_thresholds', 'right_thresholds' + If `threshold` is given, these keys contain a peaks vertical + distance to its neighbouring samples. + * 'prominences', 'right_bases', 'left_bases' + If `prominence` is given, these keys are accessible. See + `peak_prominences` for a description of their content. + * 'width_heights', 'left_ips', 'right_ips' + If `width` is given, these keys are accessible. See `peak_widths` + for a description of their content. + * 'plateau_sizes', left_edges', 'right_edges' + If `plateau_size` is given, these keys are accessible and contain + the indices of a peak's edges (edges are still part of the + plateau) and the calculated plateau sizes. + + .. versionadded:: 1.2.0 + + To calculate and return properties without excluding peaks, provide the + open interval ``(None, None)`` as a value to the appropriate argument + (excluding `distance`). + + Warns + ----- + PeakPropertyWarning + Raised if a peak's properties have unexpected values (see + `peak_prominences` and `peak_widths`). + + Warnings + -------- + This function may return unexpected results for data containing NaNs. To + avoid this, NaNs should either be removed or replaced. + + See Also + -------- + find_peaks_cwt + Find peaks using the wavelet transformation. + peak_prominences + Directly calculate the prominence of peaks. + peak_widths + Directly calculate the width of peaks. + + Notes + ----- + In the context of this function, a peak or local maximum is defined as any + sample whose two direct neighbours have a smaller amplitude. For flat peaks + (more than one sample of equal amplitude wide) the index of the middle + sample is returned (rounded down in case the number of samples is even). + For noisy signals the peak locations can be off because the noise might + change the position of local maxima. In those cases consider smoothing the + signal before searching for peaks or use other peak finding and fitting + methods (like `find_peaks_cwt`). + + Some additional comments on specifying conditions: + + * Almost all conditions (excluding `distance`) can be given as half-open or + closed intervals, e.g ``1`` or ``(1, None)`` defines the half-open + interval :math:`[1, \\infty]` while ``(None, 1)`` defines the interval + :math:`[-\\infty, 1]`. The open interval ``(None, None)`` can be specified + as well, which returns the matching properties without exclusion of peaks. + * The border is always included in the interval used to select valid peaks. + * For several conditions the interval borders can be specified with + arrays matching `x` in shape which enables dynamic constrains based on + the sample position. + * The conditions are evaluated in the following order: `plateau_size`, + `height`, `threshold`, `distance`, `prominence`, `width`. In most cases + this order is the fastest one because faster operations are applied first + to reduce the number of peaks that need to be evaluated later. + * Satisfying the distance condition is accomplished by iterating over all + peaks in descending order based on their height and removing all lower + peaks that are too close. + * Use `wlen` to reduce the time it takes to evaluate the conditions for + `prominence` or `width` if `x` is large or has many local maxima + (see `peak_prominences`). + + .. versionadded:: 1.1.0 + + Examples + -------- + To demonstrate this function's usage we use a signal `x` supplied with + SciPy (see `scipy.misc.electrocardiogram`). Let's find all peaks (local + maxima) in `x` whose amplitude lies above 0. + + >>> import matplotlib.pyplot as plt + >>> from scipy.misc import electrocardiogram + >>> from scipy.signal import find_peaks + >>> x = electrocardiogram()[2000:4000] + >>> peaks, _ = find_peaks(x, height=0) + >>> plt.plot(x) + >>> plt.plot(peaks, x[peaks], "x") + >>> plt.plot(np.zeros_like(x), "--", color="gray") + >>> plt.show() + + We can select peaks below 0 with ``height=(None, 0)`` or use arrays matching + `x` in size to reflect a changing condition for different parts of the + signal. + + >>> border = np.sin(np.linspace(0, 3 * np.pi, x.size)) + >>> peaks, _ = find_peaks(x, height=(-border, border)) + >>> plt.plot(x) + >>> plt.plot(-border, "--", color="gray") + >>> plt.plot(border, ":", color="gray") + >>> plt.plot(peaks, x[peaks], "x") + >>> plt.show() + + Another useful condition for periodic signals can be given with the + `distance` argument. In this case we can easily select the positions of + QRS complexes within the electrocardiogram (ECG) by demanding a distance of + at least 150 samples. + + >>> peaks, _ = find_peaks(x, distance=150) + >>> np.diff(peaks) + array([186, 180, 177, 171, 177, 169, 167, 164, 158, 162, 172]) + >>> plt.plot(x) + >>> plt.plot(peaks, x[peaks], "x") + >>> plt.show() + + Especially for noisy signals peaks can be easily grouped by their + prominence (see `peak_prominences`). E.g. we can select all peaks except + for the mentioned QRS complexes by limiting the allowed prominenence to 0.6. + + >>> peaks, properties = find_peaks(x, prominence=(None, 0.6)) + >>> properties["prominences"].max() + 0.5049999999999999 + >>> plt.plot(x) + >>> plt.plot(peaks, x[peaks], "x") + >>> plt.show() + + And finally let's examine a different section of the ECG which contains + beat forms of different shape. To select only the atypical heart beats we + combine two conditions: a minimal prominence of 1 and width of at least 20 + samples. + + >>> x = electrocardiogram()[17000:18000] + >>> peaks, properties = find_peaks(x, prominence=1, width=20) + >>> properties["prominences"], properties["widths"] + (array([1.495, 2.3 ]), array([36.93773946, 39.32723577])) + >>> plt.plot(x) + >>> plt.plot(peaks, x[peaks], "x") + >>> plt.vlines(x=peaks, ymin=x[peaks] - properties["prominences"], + ... ymax = x[peaks], color = "C1") + >>> plt.hlines(y=properties["width_heights"], xmin=properties["left_ips"], + ... xmax=properties["right_ips"], color = "C1") + >>> plt.show() + """ + # _argmaxima1d expects array of dtype 'float64' + x = _arg_x_as_expected(x) + if distance is not None and distance < 1: + raise ValueError('`distance` must be greater or equal to 1') + + peaks, left_edges, right_edges = _local_maxima_1d(x) + properties = {} + + if plateau_size is not None: + # Evaluate plateau size + plateau_sizes = right_edges - left_edges + 1 + pmin, pmax = _unpack_condition_args(plateau_size, x, peaks) + keep = _select_by_property(plateau_sizes, pmin, pmax) + peaks = peaks[keep] + properties["plateau_sizes"] = plateau_sizes + properties["left_edges"] = left_edges + properties["right_edges"] = right_edges + properties = {key: array[keep] for key, array in properties.items()} + + if height is not None: + # Evaluate height condition + peak_heights = x[peaks] + hmin, hmax = _unpack_condition_args(height, x, peaks) + keep = _select_by_property(peak_heights, hmin, hmax) + peaks = peaks[keep] + properties["peak_heights"] = peak_heights + properties = {key: array[keep] for key, array in properties.items()} + + if threshold is not None: + # Evaluate threshold condition + tmin, tmax = _unpack_condition_args(threshold, x, peaks) + keep, left_thresholds, right_thresholds = _select_by_peak_threshold( + x, peaks, tmin, tmax) + peaks = peaks[keep] + properties["left_thresholds"] = left_thresholds + properties["right_thresholds"] = right_thresholds + properties = {key: array[keep] for key, array in properties.items()} + + if distance is not None: + # Evaluate distance condition + keep = _select_by_peak_distance(peaks, x[peaks], distance) + peaks = peaks[keep] + properties = {key: array[keep] for key, array in properties.items()} + + if prominence is not None or width is not None: + # Calculate prominence (required for both conditions) + wlen = _arg_wlen_as_expected(wlen) + properties.update(zip( + ['prominences', 'left_bases', 'right_bases'], + _peak_prominences(x, peaks, wlen=wlen) + )) + + if prominence is not None: + # Evaluate prominence condition + pmin, pmax = _unpack_condition_args(prominence, x, peaks) + keep = _select_by_property(properties['prominences'], pmin, pmax) + peaks = peaks[keep] + properties = {key: array[keep] for key, array in properties.items()} + + if width is not None: + # Calculate widths + properties.update(zip( + ['widths', 'width_heights', 'left_ips', 'right_ips'], + _peak_widths(x, peaks, rel_height, properties['prominences'], + properties['left_bases'], properties['right_bases']) + )) + # Evaluate width condition + wmin, wmax = _unpack_condition_args(width, x, peaks) + keep = _select_by_property(properties['widths'], wmin, wmax) + peaks = peaks[keep] + properties = {key: array[keep] for key, array in properties.items()} + + return peaks, properties + + +def _identify_ridge_lines(matr, max_distances, gap_thresh): + """ + Identify ridges in the 2-D matrix. + + Expect that the width of the wavelet feature increases with increasing row + number. + + Parameters + ---------- + matr : 2-D ndarray + Matrix in which to identify ridge lines. + max_distances : 1-D sequence + At each row, a ridge line is only connected + if the relative max at row[n] is within + `max_distances`[n] from the relative max at row[n+1]. + gap_thresh : int + If a relative maximum is not found within `max_distances`, + there will be a gap. A ridge line is discontinued if + there are more than `gap_thresh` points without connecting + a new relative maximum. + + Returns + ------- + ridge_lines : tuple + Tuple of 2 1-D sequences. `ridge_lines`[ii][0] are the rows of the + ii-th ridge-line, `ridge_lines`[ii][1] are the columns. Empty if none + found. Each ridge-line will be sorted by row (increasing), but the + order of the ridge lines is not specified. + + References + ---------- + Bioinformatics (2006) 22 (17): 2059-2065. + :doi:`10.1093/bioinformatics/btl355` + http://bioinformatics.oxfordjournals.org/content/22/17/2059.long + + Examples + -------- + >>> data = np.random.rand(5,5) + >>> ridge_lines = _identify_ridge_lines(data, 1, 1) + + Notes + ----- + This function is intended to be used in conjunction with `cwt` + as part of `find_peaks_cwt`. + + """ + if(len(max_distances) < matr.shape[0]): + raise ValueError('Max_distances must have at least as many rows ' + 'as matr') + + all_max_cols = _boolrelextrema(matr, np.greater, axis=1, order=1) + # Highest row for which there are any relative maxima + has_relmax = np.nonzero(all_max_cols.any(axis=1))[0] + if(len(has_relmax) == 0): + return [] + start_row = has_relmax[-1] + # Each ridge line is a 3-tuple: + # rows, cols,Gap number + ridge_lines = [[[start_row], + [col], + 0] for col in np.nonzero(all_max_cols[start_row])[0]] + final_lines = [] + rows = np.arange(start_row - 1, -1, -1) + cols = np.arange(0, matr.shape[1]) + for row in rows: + this_max_cols = cols[all_max_cols[row]] + + # Increment gap number of each line, + # set it to zero later if appropriate + for line in ridge_lines: + line[2] += 1 + + # XXX These should always be all_max_cols[row] + # But the order might be different. Might be an efficiency gain + # to make sure the order is the same and avoid this iteration + prev_ridge_cols = np.array([line[1][-1] for line in ridge_lines]) + # Look through every relative maximum found at current row + # Attempt to connect them with existing ridge lines. + for ind, col in enumerate(this_max_cols): + # If there is a previous ridge line within + # the max_distance to connect to, do so. + # Otherwise start a new one. + line = None + if(len(prev_ridge_cols) > 0): + diffs = np.abs(col - prev_ridge_cols) + closest = np.argmin(diffs) + if diffs[closest] <= max_distances[row]: + line = ridge_lines[closest] + if(line is not None): + # Found a point close enough, extend current ridge line + line[1].append(col) + line[0].append(row) + line[2] = 0 + else: + new_line = [[row], + [col], + 0] + ridge_lines.append(new_line) + + # Remove the ridge lines with gap_number too high + # XXX Modifying a list while iterating over it. + # Should be safe, since we iterate backwards, but + # still tacky. + for ind in xrange(len(ridge_lines) - 1, -1, -1): + line = ridge_lines[ind] + if line[2] > gap_thresh: + final_lines.append(line) + del ridge_lines[ind] + + out_lines = [] + for line in (final_lines + ridge_lines): + sortargs = np.array(np.argsort(line[0])) + rows, cols = np.zeros_like(sortargs), np.zeros_like(sortargs) + rows[sortargs] = line[0] + cols[sortargs] = line[1] + out_lines.append([rows, cols]) + + return out_lines + + +def _filter_ridge_lines(cwt, ridge_lines, window_size=None, min_length=None, + min_snr=1, noise_perc=10): + """ + Filter ridge lines according to prescribed criteria. Intended + to be used for finding relative maxima. + + Parameters + ---------- + cwt : 2-D ndarray + Continuous wavelet transform from which the `ridge_lines` were defined. + ridge_lines : 1-D sequence + Each element should contain 2 sequences, the rows and columns + of the ridge line (respectively). + window_size : int, optional + Size of window to use to calculate noise floor. + Default is ``cwt.shape[1] / 20``. + min_length : int, optional + Minimum length a ridge line needs to be acceptable. + Default is ``cwt.shape[0] / 4``, ie 1/4-th the number of widths. + min_snr : float, optional + Minimum SNR ratio. Default 1. The signal is the value of + the cwt matrix at the shortest length scale (``cwt[0, loc]``), the + noise is the `noise_perc`th percentile of datapoints contained within a + window of `window_size` around ``cwt[0, loc]``. + noise_perc : float, optional + When calculating the noise floor, percentile of data points + examined below which to consider noise. Calculated using + scipy.stats.scoreatpercentile. + + References + ---------- + Bioinformatics (2006) 22 (17): 2059-2065. :doi:`10.1093/bioinformatics/btl355` + http://bioinformatics.oxfordjournals.org/content/22/17/2059.long + + """ + num_points = cwt.shape[1] + if min_length is None: + min_length = np.ceil(cwt.shape[0] / 4) + if window_size is None: + window_size = np.ceil(num_points / 20) + + window_size = int(window_size) + hf_window, odd = divmod(window_size, 2) + + # Filter based on SNR + row_one = cwt[0, :] + noises = np.zeros_like(row_one) + for ind, val in enumerate(row_one): + window_start = max(ind - hf_window, 0) + window_end = min(ind + hf_window + odd, num_points) + noises[ind] = scoreatpercentile(row_one[window_start:window_end], + per=noise_perc) + + def filt_func(line): + if len(line[0]) < min_length: + return False + snr = abs(cwt[line[0][0], line[1][0]] / noises[line[1][0]]) + if snr < min_snr: + return False + return True + + return list(filter(filt_func, ridge_lines)) + + +def find_peaks_cwt(vector, widths, wavelet=None, max_distances=None, + gap_thresh=None, min_length=None, min_snr=1, noise_perc=10): + """ + Find peaks in a 1-D array with wavelet transformation. + + The general approach is to smooth `vector` by convolving it with + `wavelet(width)` for each width in `widths`. Relative maxima which + appear at enough length scales, and with sufficiently high SNR, are + accepted. + + Parameters + ---------- + vector : ndarray + 1-D array in which to find the peaks. + widths : sequence + 1-D array of widths to use for calculating the CWT matrix. In general, + this range should cover the expected width of peaks of interest. + wavelet : callable, optional + Should take two parameters and return a 1-D array to convolve + with `vector`. The first parameter determines the number of points + of the returned wavelet array, the second parameter is the scale + (`width`) of the wavelet. Should be normalized and symmetric. + Default is the ricker wavelet. + max_distances : ndarray, optional + At each row, a ridge line is only connected if the relative max at + row[n] is within ``max_distances[n]`` from the relative max at + ``row[n+1]``. Default value is ``widths/4``. + gap_thresh : float, optional + If a relative maximum is not found within `max_distances`, + there will be a gap. A ridge line is discontinued if there are more + than `gap_thresh` points without connecting a new relative maximum. + Default is the first value of the widths array i.e. widths[0]. + min_length : int, optional + Minimum length a ridge line needs to be acceptable. + Default is ``cwt.shape[0] / 4``, ie 1/4-th the number of widths. + min_snr : float, optional + Minimum SNR ratio. Default 1. The signal is the value of + the cwt matrix at the shortest length scale (``cwt[0, loc]``), the + noise is the `noise_perc`th percentile of datapoints contained within a + window of `window_size` around ``cwt[0, loc]``. + noise_perc : float, optional + When calculating the noise floor, percentile of data points + examined below which to consider noise. Calculated using + `stats.scoreatpercentile`. Default is 10. + + Returns + ------- + peaks_indices : ndarray + Indices of the locations in the `vector` where peaks were found. + The list is sorted. + + See Also + -------- + cwt + Continuous wavelet transform. + find_peaks + Find peaks inside a signal based on peak properties. + + Notes + ----- + This approach was designed for finding sharp peaks among noisy data, + however with proper parameter selection it should function well for + different peak shapes. + + The algorithm is as follows: + 1. Perform a continuous wavelet transform on `vector`, for the supplied + `widths`. This is a convolution of `vector` with `wavelet(width)` for + each width in `widths`. See `cwt` + 2. Identify "ridge lines" in the cwt matrix. These are relative maxima + at each row, connected across adjacent rows. See identify_ridge_lines + 3. Filter the ridge_lines using filter_ridge_lines. + + .. versionadded:: 0.11.0 + + References + ---------- + .. [1] Bioinformatics (2006) 22 (17): 2059-2065. + :doi:`10.1093/bioinformatics/btl355` + http://bioinformatics.oxfordjournals.org/content/22/17/2059.long + + Examples + -------- + >>> from scipy import signal + >>> xs = np.arange(0, np.pi, 0.05) + >>> data = np.sin(xs) + >>> peakind = signal.find_peaks_cwt(data, np.arange(1,10)) + >>> peakind, xs[peakind], data[peakind] + ([32], array([ 1.6]), array([ 0.9995736])) + + """ + widths = np.asarray(widths) + + if gap_thresh is None: + gap_thresh = np.ceil(widths[0]) + if max_distances is None: + max_distances = widths / 4.0 + if wavelet is None: + wavelet = ricker + + cwt_dat = cwt(vector, wavelet, widths) + ridge_lines = _identify_ridge_lines(cwt_dat, max_distances, gap_thresh) + filtered = _filter_ridge_lines(cwt_dat, ridge_lines, min_length=min_length, + min_snr=min_snr, noise_perc=noise_perc) + max_locs = np.asarray([x[1][0] for x in filtered]) + max_locs.sort() + + return max_locs diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/_peak_finding.pyc b/project/venv/lib/python2.7/site-packages/scipy/signal/_peak_finding.pyc new file mode 100644 index 0000000..22ef7c1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/signal/_peak_finding.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/_peak_finding_utils.so b/project/venv/lib/python2.7/site-packages/scipy/signal/_peak_finding_utils.so new file mode 100755 index 0000000..2882a50 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/signal/_peak_finding_utils.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/_savitzky_golay.py b/project/venv/lib/python2.7/site-packages/scipy/signal/_savitzky_golay.py new file mode 100644 index 0000000..b388bc4 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/signal/_savitzky_golay.py @@ -0,0 +1,349 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from scipy.linalg import lstsq +from math import factorial +from scipy.ndimage import convolve1d +from ._arraytools import axis_slice + + +def savgol_coeffs(window_length, polyorder, deriv=0, delta=1.0, pos=None, + use="conv"): + """Compute the coefficients for a 1-d Savitzky-Golay FIR filter. + + Parameters + ---------- + window_length : int + The length of the filter window (i.e. the number of coefficients). + `window_length` must be an odd positive integer. + polyorder : int + The order of the polynomial used to fit the samples. + `polyorder` must be less than `window_length`. + deriv : int, optional + The order of the derivative to compute. This must be a + nonnegative integer. The default is 0, which means to filter + the data without differentiating. + delta : float, optional + The spacing of the samples to which the filter will be applied. + This is only used if deriv > 0. + pos : int or None, optional + If pos is not None, it specifies evaluation position within the + window. The default is the middle of the window. + use : str, optional + Either 'conv' or 'dot'. This argument chooses the order of the + coefficients. The default is 'conv', which means that the + coefficients are ordered to be used in a convolution. With + use='dot', the order is reversed, so the filter is applied by + dotting the coefficients with the data set. + + Returns + ------- + coeffs : 1-d ndarray + The filter coefficients. + + References + ---------- + A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of Data by + Simplified Least Squares Procedures. Analytical Chemistry, 1964, 36 (8), + pp 1627-1639. + + See Also + -------- + savgol_filter + + Notes + ----- + + .. versionadded:: 0.14.0 + + Examples + -------- + >>> from scipy.signal import savgol_coeffs + >>> savgol_coeffs(5, 2) + array([-0.08571429, 0.34285714, 0.48571429, 0.34285714, -0.08571429]) + >>> savgol_coeffs(5, 2, deriv=1) + array([ 2.00000000e-01, 1.00000000e-01, 2.00607895e-16, + -1.00000000e-01, -2.00000000e-01]) + + Note that use='dot' simply reverses the coefficients. + + >>> savgol_coeffs(5, 2, pos=3) + array([ 0.25714286, 0.37142857, 0.34285714, 0.17142857, -0.14285714]) + >>> savgol_coeffs(5, 2, pos=3, use='dot') + array([-0.14285714, 0.17142857, 0.34285714, 0.37142857, 0.25714286]) + + `x` contains data from the parabola x = t**2, sampled at + t = -1, 0, 1, 2, 3. `c` holds the coefficients that will compute the + derivative at the last position. When dotted with `x` the result should + be 6. + + >>> x = np.array([1, 0, 1, 4, 9]) + >>> c = savgol_coeffs(5, 2, pos=4, deriv=1, use='dot') + >>> c.dot(x) + 6.0000000000000018 + """ + + # An alternative method for finding the coefficients when deriv=0 is + # t = np.arange(window_length) + # unit = (t == pos).astype(int) + # coeffs = np.polyval(np.polyfit(t, unit, polyorder), t) + # The method implemented here is faster. + + # To recreate the table of sample coefficients shown in the chapter on + # the Savitzy-Golay filter in the Numerical Recipes book, use + # window_length = nL + nR + 1 + # pos = nL + 1 + # c = savgol_coeffs(window_length, M, pos=pos, use='dot') + + if polyorder >= window_length: + raise ValueError("polyorder must be less than window_length.") + + halflen, rem = divmod(window_length, 2) + + if rem == 0: + raise ValueError("window_length must be odd.") + + if pos is None: + pos = halflen + + if not (0 <= pos < window_length): + raise ValueError("pos must be nonnegative and less than " + "window_length.") + + if use not in ['conv', 'dot']: + raise ValueError("`use` must be 'conv' or 'dot'") + + # Form the design matrix A. The columns of A are powers of the integers + # from -pos to window_length - pos - 1. The powers (i.e. rows) range + # from 0 to polyorder. (That is, A is a vandermonde matrix, but not + # necessarily square.) + x = np.arange(-pos, window_length - pos, dtype=float) + if use == "conv": + # Reverse so that result can be used in a convolution. + x = x[::-1] + + order = np.arange(polyorder + 1).reshape(-1, 1) + A = x ** order + + # y determines which order derivative is returned. + y = np.zeros(polyorder + 1) + # The coefficient assigned to y[deriv] scales the result to take into + # account the order of the derivative and the sample spacing. + y[deriv] = factorial(deriv) / (delta ** deriv) + + # Find the least-squares solution of A*c = y + coeffs, _, _, _ = lstsq(A, y) + + return coeffs + + +def _polyder(p, m): + """Differentiate polynomials represented with coefficients. + + p must be a 1D or 2D array. In the 2D case, each column gives + the coefficients of a polynomial; the first row holds the coefficients + associated with the highest power. m must be a nonnegative integer. + (numpy.polyder doesn't handle the 2D case.) + """ + + if m == 0: + result = p + else: + n = len(p) + if n <= m: + result = np.zeros_like(p[:1, ...]) + else: + dp = p[:-m].copy() + for k in range(m): + rng = np.arange(n - k - 1, m - k - 1, -1) + dp *= rng.reshape((n - m,) + (1,) * (p.ndim - 1)) + result = dp + return result + + +def _fit_edge(x, window_start, window_stop, interp_start, interp_stop, + axis, polyorder, deriv, delta, y): + """ + Given an n-d array `x` and the specification of a slice of `x` from + `window_start` to `window_stop` along `axis`, create an interpolating + polynomial of each 1-d slice, and evaluate that polynomial in the slice + from `interp_start` to `interp_stop`. Put the result into the + corresponding slice of `y`. + """ + + # Get the edge into a (window_length, -1) array. + x_edge = axis_slice(x, start=window_start, stop=window_stop, axis=axis) + if axis == 0 or axis == -x.ndim: + xx_edge = x_edge + swapped = False + else: + xx_edge = x_edge.swapaxes(axis, 0) + swapped = True + xx_edge = xx_edge.reshape(xx_edge.shape[0], -1) + + # Fit the edges. poly_coeffs has shape (polyorder + 1, -1), + # where '-1' is the same as in xx_edge. + poly_coeffs = np.polyfit(np.arange(0, window_stop - window_start), + xx_edge, polyorder) + + if deriv > 0: + poly_coeffs = _polyder(poly_coeffs, deriv) + + # Compute the interpolated values for the edge. + i = np.arange(interp_start - window_start, interp_stop - window_start) + values = np.polyval(poly_coeffs, i.reshape(-1, 1)) / (delta ** deriv) + + # Now put the values into the appropriate slice of y. + # First reshape values to match y. + shp = list(y.shape) + shp[0], shp[axis] = shp[axis], shp[0] + values = values.reshape(interp_stop - interp_start, *shp[1:]) + if swapped: + values = values.swapaxes(0, axis) + # Get a view of the data to be replaced by values. + y_edge = axis_slice(y, start=interp_start, stop=interp_stop, axis=axis) + y_edge[...] = values + + +def _fit_edges_polyfit(x, window_length, polyorder, deriv, delta, axis, y): + """ + Use polynomial interpolation of x at the low and high ends of the axis + to fill in the halflen values in y. + + This function just calls _fit_edge twice, once for each end of the axis. + """ + halflen = window_length // 2 + _fit_edge(x, 0, window_length, 0, halflen, axis, + polyorder, deriv, delta, y) + n = x.shape[axis] + _fit_edge(x, n - window_length, n, n - halflen, n, axis, + polyorder, deriv, delta, y) + + +def savgol_filter(x, window_length, polyorder, deriv=0, delta=1.0, + axis=-1, mode='interp', cval=0.0): + """ Apply a Savitzky-Golay filter to an array. + + This is a 1-d filter. If `x` has dimension greater than 1, `axis` + determines the axis along which the filter is applied. + + Parameters + ---------- + x : array_like + The data to be filtered. If `x` is not a single or double precision + floating point array, it will be converted to type `numpy.float64` + before filtering. + window_length : int + The length of the filter window (i.e. the number of coefficients). + `window_length` must be a positive odd integer. If `mode` is 'interp', + `window_length` must be less than or equal to the size of `x`. + polyorder : int + The order of the polynomial used to fit the samples. + `polyorder` must be less than `window_length`. + deriv : int, optional + The order of the derivative to compute. This must be a + nonnegative integer. The default is 0, which means to filter + the data without differentiating. + delta : float, optional + The spacing of the samples to which the filter will be applied. + This is only used if deriv > 0. Default is 1.0. + axis : int, optional + The axis of the array `x` along which the filter is to be applied. + Default is -1. + mode : str, optional + Must be 'mirror', 'constant', 'nearest', 'wrap' or 'interp'. This + determines the type of extension to use for the padded signal to + which the filter is applied. When `mode` is 'constant', the padding + value is given by `cval`. See the Notes for more details on 'mirror', + 'constant', 'wrap', and 'nearest'. + When the 'interp' mode is selected (the default), no extension + is used. Instead, a degree `polyorder` polynomial is fit to the + last `window_length` values of the edges, and this polynomial is + used to evaluate the last `window_length // 2` output values. + cval : scalar, optional + Value to fill past the edges of the input if `mode` is 'constant'. + Default is 0.0. + + Returns + ------- + y : ndarray, same shape as `x` + The filtered data. + + See Also + -------- + savgol_coeffs + + Notes + ----- + Details on the `mode` options: + + 'mirror': + Repeats the values at the edges in reverse order. The value + closest to the edge is not included. + 'nearest': + The extension contains the nearest input value. + 'constant': + The extension contains the value given by the `cval` argument. + 'wrap': + The extension contains the values from the other end of the array. + + For example, if the input is [1, 2, 3, 4, 5, 6, 7, 8], and + `window_length` is 7, the following shows the extended data for + the various `mode` options (assuming `cval` is 0):: + + mode | Ext | Input | Ext + -----------+---------+------------------------+--------- + 'mirror' | 4 3 2 | 1 2 3 4 5 6 7 8 | 7 6 5 + 'nearest' | 1 1 1 | 1 2 3 4 5 6 7 8 | 8 8 8 + 'constant' | 0 0 0 | 1 2 3 4 5 6 7 8 | 0 0 0 + 'wrap' | 6 7 8 | 1 2 3 4 5 6 7 8 | 1 2 3 + + .. versionadded:: 0.14.0 + + Examples + -------- + >>> from scipy.signal import savgol_filter + >>> np.set_printoptions(precision=2) # For compact display. + >>> x = np.array([2, 2, 5, 2, 1, 0, 1, 4, 9]) + + Filter with a window length of 5 and a degree 2 polynomial. Use + the defaults for all other parameters. + + >>> savgol_filter(x, 5, 2) + array([ 1.66, 3.17, 3.54, 2.86, 0.66, 0.17, 1. , 4. , 9. ]) + + Note that the last five values in x are samples of a parabola, so + when mode='interp' (the default) is used with polyorder=2, the last + three values are unchanged. Compare that to, for example, + `mode='nearest'`: + + >>> savgol_filter(x, 5, 2, mode='nearest') + array([ 1.74, 3.03, 3.54, 2.86, 0.66, 0.17, 1. , 4.6 , 7.97]) + + """ + if mode not in ["mirror", "constant", "nearest", "interp", "wrap"]: + raise ValueError("mode must be 'mirror', 'constant', 'nearest' " + "'wrap' or 'interp'.") + + x = np.asarray(x) + # Ensure that x is either single or double precision floating point. + if x.dtype != np.float64 and x.dtype != np.float32: + x = x.astype(np.float64) + + coeffs = savgol_coeffs(window_length, polyorder, deriv=deriv, delta=delta) + + if mode == "interp": + if window_length > x.size: + raise ValueError("If mode is 'interp', window_length must be less " + "than or equal to the size of x.") + + # Do not pad. Instead, for the elements within `window_length // 2` + # of the ends of the sequence, use the polynomial that is fitted to + # the last `window_length` elements. + y = convolve1d(x, coeffs, axis=axis, mode="constant") + _fit_edges_polyfit(x, window_length, polyorder, deriv, delta, axis, y) + else: + # Any mode other than 'interp' is passed on to ndimage.convolve1d. + y = convolve1d(x, coeffs, axis=axis, mode=mode, cval=cval) + + return y diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/_savitzky_golay.pyc b/project/venv/lib/python2.7/site-packages/scipy/signal/_savitzky_golay.pyc new file mode 100644 index 0000000..00ba5d5 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/signal/_savitzky_golay.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/_spectral.so b/project/venv/lib/python2.7/site-packages/scipy/signal/_spectral.so new file mode 100755 index 0000000..135cef9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/signal/_spectral.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/_upfirdn.py b/project/venv/lib/python2.7/site-packages/scipy/signal/_upfirdn.py new file mode 100644 index 0000000..ea3616f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/signal/_upfirdn.py @@ -0,0 +1,183 @@ +# Code adapted from "upfirdn" python library with permission: +# +# Copyright (c) 2009, Motorola, Inc +# +# All Rights Reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# * Neither the name of Motorola nor the names of its contributors may be +# used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import numpy as np + +from ._upfirdn_apply import _output_len, _apply + +__all__ = ['upfirdn', '_output_len'] + + +def _pad_h(h, up): + """Store coefficients in a transposed, flipped arrangement. + + For example, suppose upRate is 3, and the + input number of coefficients is 10, represented as h[0], ..., h[9]. + + Then the internal buffer will look like this:: + + h[9], h[6], h[3], h[0], // flipped phase 0 coefs + 0, h[7], h[4], h[1], // flipped phase 1 coefs (zero-padded) + 0, h[8], h[5], h[2], // flipped phase 2 coefs (zero-padded) + + """ + h_padlen = len(h) + (-len(h) % up) + h_full = np.zeros(h_padlen, h.dtype) + h_full[:len(h)] = h + h_full = h_full.reshape(-1, up).T[:, ::-1].ravel() + return h_full + + +class _UpFIRDn(object): + def __init__(self, h, x_dtype, up, down): + """Helper for resampling""" + h = np.asarray(h) + if h.ndim != 1 or h.size == 0: + raise ValueError('h must be 1D with non-zero length') + self._output_type = np.result_type(h.dtype, x_dtype, np.float32) + h = np.asarray(h, self._output_type) + self._up = int(up) + self._down = int(down) + if self._up < 1 or self._down < 1: + raise ValueError('Both up and down must be >= 1') + # This both transposes, and "flips" each phase for filtering + self._h_trans_flip = _pad_h(h, self._up) + self._h_trans_flip = np.ascontiguousarray(self._h_trans_flip) + + def apply_filter(self, x, axis=-1): + """Apply the prepared filter to the specified axis of a nD signal x""" + output_len = _output_len(len(self._h_trans_flip), x.shape[axis], + self._up, self._down) + output_shape = np.asarray(x.shape) + output_shape[axis] = output_len + out = np.zeros(output_shape, dtype=self._output_type, order='C') + axis = axis % x.ndim + _apply(np.asarray(x, self._output_type), + self._h_trans_flip, out, + self._up, self._down, axis) + return out + + +def upfirdn(h, x, up=1, down=1, axis=-1): + """Upsample, FIR filter, and downsample + + Parameters + ---------- + h : array_like + 1-dimensional FIR (finite-impulse response) filter coefficients. + x : array_like + Input signal array. + up : int, optional + Upsampling rate. Default is 1. + down : int, optional + Downsampling rate. Default is 1. + axis : int, optional + The axis of the input data array along which to apply the + linear filter. The filter is applied to each subarray along + this axis. Default is -1. + + Returns + ------- + y : ndarray + The output signal array. Dimensions will be the same as `x` except + for along `axis`, which will change size according to the `h`, + `up`, and `down` parameters. + + Notes + ----- + The algorithm is an implementation of the block diagram shown on page 129 + of the Vaidyanathan text [1]_ (Figure 4.3-8d). + + .. [1] P. P. Vaidyanathan, Multirate Systems and Filter Banks, + Prentice Hall, 1993. + + The direct approach of upsampling by factor of P with zero insertion, + FIR filtering of length ``N``, and downsampling by factor of Q is + O(N*Q) per output sample. The polyphase implementation used here is + O(N/P). + + .. versionadded:: 0.18 + + Examples + -------- + Simple operations: + + >>> from scipy.signal import upfirdn + >>> upfirdn([1, 1, 1], [1, 1, 1]) # FIR filter + array([ 1., 2., 3., 2., 1.]) + >>> upfirdn([1], [1, 2, 3], 3) # upsampling with zeros insertion + array([ 1., 0., 0., 2., 0., 0., 3., 0., 0.]) + >>> upfirdn([1, 1, 1], [1, 2, 3], 3) # upsampling with sample-and-hold + array([ 1., 1., 1., 2., 2., 2., 3., 3., 3.]) + >>> upfirdn([.5, 1, .5], [1, 1, 1], 2) # linear interpolation + array([ 0.5, 1. , 1. , 1. , 1. , 1. , 0.5, 0. ]) + >>> upfirdn([1], np.arange(10), 1, 3) # decimation by 3 + array([ 0., 3., 6., 9.]) + >>> upfirdn([.5, 1, .5], np.arange(10), 2, 3) # linear interp, rate 2/3 + array([ 0. , 1. , 2.5, 4. , 5.5, 7. , 8.5, 0. ]) + + Apply a single filter to multiple signals: + + >>> x = np.reshape(np.arange(8), (4, 2)) + >>> x + array([[0, 1], + [2, 3], + [4, 5], + [6, 7]]) + + Apply along the last dimension of ``x``: + + >>> h = [1, 1] + >>> upfirdn(h, x, 2) + array([[ 0., 0., 1., 1.], + [ 2., 2., 3., 3.], + [ 4., 4., 5., 5.], + [ 6., 6., 7., 7.]]) + + Apply along the 0th dimension of ``x``: + + >>> upfirdn(h, x, 2, axis=0) + array([[ 0., 1.], + [ 0., 1.], + [ 2., 3.], + [ 2., 3.], + [ 4., 5.], + [ 4., 5.], + [ 6., 7.], + [ 6., 7.]]) + + """ + x = np.asarray(x) + ufd = _UpFIRDn(h, x.dtype, up, down) + # This is equivalent to (but faster than) using np.apply_along_axis + return ufd.apply_filter(x, axis) diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/_upfirdn.pyc b/project/venv/lib/python2.7/site-packages/scipy/signal/_upfirdn.pyc new file mode 100644 index 0000000..cb232f4 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/signal/_upfirdn.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/_upfirdn_apply.so b/project/venv/lib/python2.7/site-packages/scipy/signal/_upfirdn_apply.so new file mode 100755 index 0000000..8143eb9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/signal/_upfirdn_apply.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/bsplines.py b/project/venv/lib/python2.7/site-packages/scipy/signal/bsplines.py new file mode 100644 index 0000000..fd0067a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/signal/bsplines.py @@ -0,0 +1,394 @@ +from __future__ import division, print_function, absolute_import + +from scipy._lib.six import xrange +from numpy import (logical_and, asarray, pi, zeros_like, + piecewise, array, arctan2, tan, zeros, arange, floor) +from numpy.core.umath import (sqrt, exp, greater, less, cos, add, sin, + less_equal, greater_equal) + +# From splinemodule.c +from .spline import cspline2d, sepfir2d + +from scipy.special import comb, gamma + +__all__ = ['spline_filter', 'bspline', 'gauss_spline', 'cubic', 'quadratic', + 'cspline1d', 'qspline1d', 'cspline1d_eval', 'qspline1d_eval'] + + +def factorial(n): + return gamma(n + 1) + + +def spline_filter(Iin, lmbda=5.0): + """Smoothing spline (cubic) filtering of a rank-2 array. + + Filter an input data set, `Iin`, using a (cubic) smoothing spline of + fall-off `lmbda`. + """ + intype = Iin.dtype.char + hcol = array([1.0, 4.0, 1.0], 'f') / 6.0 + if intype in ['F', 'D']: + Iin = Iin.astype('F') + ckr = cspline2d(Iin.real, lmbda) + cki = cspline2d(Iin.imag, lmbda) + outr = sepfir2d(ckr, hcol, hcol) + outi = sepfir2d(cki, hcol, hcol) + out = (outr + 1j * outi).astype(intype) + elif intype in ['f', 'd']: + ckr = cspline2d(Iin, lmbda) + out = sepfir2d(ckr, hcol, hcol) + out = out.astype(intype) + else: + raise TypeError("Invalid data type for Iin") + return out + + +_splinefunc_cache = {} + + +def _bspline_piecefunctions(order): + """Returns the function defined over the left-side pieces for a bspline of + a given order. + + The 0th piece is the first one less than 0. The last piece is a function + identical to 0 (returned as the constant 0). (There are order//2 + 2 total + pieces). + + Also returns the condition functions that when evaluated return boolean + arrays for use with `numpy.piecewise`. + """ + try: + return _splinefunc_cache[order] + except KeyError: + pass + + def condfuncgen(num, val1, val2): + if num == 0: + return lambda x: logical_and(less_equal(x, val1), + greater_equal(x, val2)) + elif num == 2: + return lambda x: less_equal(x, val2) + else: + return lambda x: logical_and(less(x, val1), + greater_equal(x, val2)) + + last = order // 2 + 2 + if order % 2: + startbound = -1.0 + else: + startbound = -0.5 + condfuncs = [condfuncgen(0, 0, startbound)] + bound = startbound + for num in xrange(1, last - 1): + condfuncs.append(condfuncgen(1, bound, bound - 1)) + bound = bound - 1 + condfuncs.append(condfuncgen(2, 0, -(order + 1) / 2.0)) + + # final value of bound is used in piecefuncgen below + + # the functions to evaluate are taken from the left-hand-side + # in the general expression derived from the central difference + # operator (because they involve fewer terms). + + fval = factorial(order) + + def piecefuncgen(num): + Mk = order // 2 - num + if (Mk < 0): + return 0 # final function is 0 + coeffs = [(1 - 2 * (k % 2)) * float(comb(order + 1, k, exact=1)) / fval + for k in xrange(Mk + 1)] + shifts = [-bound - k for k in xrange(Mk + 1)] + + def thefunc(x): + res = 0.0 + for k in range(Mk + 1): + res += coeffs[k] * (x + shifts[k]) ** order + return res + return thefunc + + funclist = [piecefuncgen(k) for k in xrange(last)] + + _splinefunc_cache[order] = (funclist, condfuncs) + + return funclist, condfuncs + + +def bspline(x, n): + """B-spline basis function of order n. + + Notes + ----- + Uses numpy.piecewise and automatic function-generator. + + """ + ax = -abs(asarray(x)) + # number of pieces on the left-side is (n+1)/2 + funclist, condfuncs = _bspline_piecefunctions(n) + condlist = [func(ax) for func in condfuncs] + return piecewise(ax, condlist, funclist) + + +def gauss_spline(x, n): + """Gaussian approximation to B-spline basis function of order n. + + Parameters + ---------- + n : int + The order of the spline. Must be nonnegative, i.e. n >= 0 + + References + ---------- + .. [1] Bouma H., Vilanova A., Bescos J.O., ter Haar Romeny B.M., Gerritsen + F.A. (2007) Fast and Accurate Gaussian Derivatives Based on B-Splines. In: + Sgallari F., Murli A., Paragios N. (eds) Scale Space and Variational + Methods in Computer Vision. SSVM 2007. Lecture Notes in Computer + Science, vol 4485. Springer, Berlin, Heidelberg + """ + signsq = (n + 1) / 12.0 + return 1 / sqrt(2 * pi * signsq) * exp(-x ** 2 / 2 / signsq) + + +def cubic(x): + """A cubic B-spline. + + This is a special case of `bspline`, and equivalent to ``bspline(x, 3)``. + """ + ax = abs(asarray(x)) + res = zeros_like(ax) + cond1 = less(ax, 1) + if cond1.any(): + ax1 = ax[cond1] + res[cond1] = 2.0 / 3 - 1.0 / 2 * ax1 ** 2 * (2 - ax1) + cond2 = ~cond1 & less(ax, 2) + if cond2.any(): + ax2 = ax[cond2] + res[cond2] = 1.0 / 6 * (2 - ax2) ** 3 + return res + + +def quadratic(x): + """A quadratic B-spline. + + This is a special case of `bspline`, and equivalent to ``bspline(x, 2)``. + """ + ax = abs(asarray(x)) + res = zeros_like(ax) + cond1 = less(ax, 0.5) + if cond1.any(): + ax1 = ax[cond1] + res[cond1] = 0.75 - ax1 ** 2 + cond2 = ~cond1 & less(ax, 1.5) + if cond2.any(): + ax2 = ax[cond2] + res[cond2] = (ax2 - 1.5) ** 2 / 2.0 + return res + + +def _coeff_smooth(lam): + xi = 1 - 96 * lam + 24 * lam * sqrt(3 + 144 * lam) + omeg = arctan2(sqrt(144 * lam - 1), sqrt(xi)) + rho = (24 * lam - 1 - sqrt(xi)) / (24 * lam) + rho = rho * sqrt((48 * lam + 24 * lam * sqrt(3 + 144 * lam)) / xi) + return rho, omeg + + +def _hc(k, cs, rho, omega): + return (cs / sin(omega) * (rho ** k) * sin(omega * (k + 1)) * + greater(k, -1)) + + +def _hs(k, cs, rho, omega): + c0 = (cs * cs * (1 + rho * rho) / (1 - rho * rho) / + (1 - 2 * rho * rho * cos(2 * omega) + rho ** 4)) + gamma = (1 - rho * rho) / (1 + rho * rho) / tan(omega) + ak = abs(k) + return c0 * rho ** ak * (cos(omega * ak) + gamma * sin(omega * ak)) + + +def _cubic_smooth_coeff(signal, lamb): + rho, omega = _coeff_smooth(lamb) + cs = 1 - 2 * rho * cos(omega) + rho * rho + K = len(signal) + yp = zeros((K,), signal.dtype.char) + k = arange(K) + yp[0] = (_hc(0, cs, rho, omega) * signal[0] + + add.reduce(_hc(k + 1, cs, rho, omega) * signal)) + + yp[1] = (_hc(0, cs, rho, omega) * signal[0] + + _hc(1, cs, rho, omega) * signal[1] + + add.reduce(_hc(k + 2, cs, rho, omega) * signal)) + + for n in range(2, K): + yp[n] = (cs * signal[n] + 2 * rho * cos(omega) * yp[n - 1] - + rho * rho * yp[n - 2]) + + y = zeros((K,), signal.dtype.char) + + y[K - 1] = add.reduce((_hs(k, cs, rho, omega) + + _hs(k + 1, cs, rho, omega)) * signal[::-1]) + y[K - 2] = add.reduce((_hs(k - 1, cs, rho, omega) + + _hs(k + 2, cs, rho, omega)) * signal[::-1]) + + for n in range(K - 3, -1, -1): + y[n] = (cs * yp[n] + 2 * rho * cos(omega) * y[n + 1] - + rho * rho * y[n + 2]) + + return y + + +def _cubic_coeff(signal): + zi = -2 + sqrt(3) + K = len(signal) + yplus = zeros((K,), signal.dtype.char) + powers = zi ** arange(K) + yplus[0] = signal[0] + zi * add.reduce(powers * signal) + for k in range(1, K): + yplus[k] = signal[k] + zi * yplus[k - 1] + output = zeros((K,), signal.dtype) + output[K - 1] = zi / (zi - 1) * yplus[K - 1] + for k in range(K - 2, -1, -1): + output[k] = zi * (output[k + 1] - yplus[k]) + return output * 6.0 + + +def _quadratic_coeff(signal): + zi = -3 + 2 * sqrt(2.0) + K = len(signal) + yplus = zeros((K,), signal.dtype.char) + powers = zi ** arange(K) + yplus[0] = signal[0] + zi * add.reduce(powers * signal) + for k in range(1, K): + yplus[k] = signal[k] + zi * yplus[k - 1] + output = zeros((K,), signal.dtype.char) + output[K - 1] = zi / (zi - 1) * yplus[K - 1] + for k in range(K - 2, -1, -1): + output[k] = zi * (output[k + 1] - yplus[k]) + return output * 8.0 + + +def cspline1d(signal, lamb=0.0): + """ + Compute cubic spline coefficients for rank-1 array. + + Find the cubic spline coefficients for a 1-D signal assuming + mirror-symmetric boundary conditions. To obtain the signal back from the + spline representation mirror-symmetric-convolve these coefficients with a + length 3 FIR window [1.0, 4.0, 1.0]/ 6.0 . + + Parameters + ---------- + signal : ndarray + A rank-1 array representing samples of a signal. + lamb : float, optional + Smoothing coefficient, default is 0.0. + + Returns + ------- + c : ndarray + Cubic spline coefficients. + + """ + if lamb != 0.0: + return _cubic_smooth_coeff(signal, lamb) + else: + return _cubic_coeff(signal) + + +def qspline1d(signal, lamb=0.0): + """Compute quadratic spline coefficients for rank-1 array. + + Find the quadratic spline coefficients for a 1-D signal assuming + mirror-symmetric boundary conditions. To obtain the signal back from the + spline representation mirror-symmetric-convolve these coefficients with a + length 3 FIR window [1.0, 6.0, 1.0]/ 8.0 . + + Parameters + ---------- + signal : ndarray + A rank-1 array representing samples of a signal. + lamb : float, optional + Smoothing coefficient (must be zero for now). + + Returns + ------- + c : ndarray + Cubic spline coefficients. + + """ + if lamb != 0.0: + raise ValueError("Smoothing quadratic splines not supported yet.") + else: + return _quadratic_coeff(signal) + + +def cspline1d_eval(cj, newx, dx=1.0, x0=0): + """Evaluate a spline at the new set of points. + + `dx` is the old sample-spacing while `x0` was the old origin. In + other-words the old-sample points (knot-points) for which the `cj` + represent spline coefficients were at equally-spaced points of: + + oldx = x0 + j*dx j=0...N-1, with N=len(cj) + + Edges are handled using mirror-symmetric boundary conditions. + + """ + newx = (asarray(newx) - x0) / float(dx) + res = zeros_like(newx, dtype=cj.dtype) + if res.size == 0: + return res + N = len(cj) + cond1 = newx < 0 + cond2 = newx > (N - 1) + cond3 = ~(cond1 | cond2) + # handle general mirror-symmetry + res[cond1] = cspline1d_eval(cj, -newx[cond1]) + res[cond2] = cspline1d_eval(cj, 2 * (N - 1) - newx[cond2]) + newx = newx[cond3] + if newx.size == 0: + return res + result = zeros_like(newx, dtype=cj.dtype) + jlower = floor(newx - 2).astype(int) + 1 + for i in range(4): + thisj = jlower + i + indj = thisj.clip(0, N - 1) # handle edge cases + result += cj[indj] * cubic(newx - thisj) + res[cond3] = result + return res + + +def qspline1d_eval(cj, newx, dx=1.0, x0=0): + """Evaluate a quadratic spline at the new set of points. + + `dx` is the old sample-spacing while `x0` was the old origin. In + other-words the old-sample points (knot-points) for which the `cj` + represent spline coefficients were at equally-spaced points of:: + + oldx = x0 + j*dx j=0...N-1, with N=len(cj) + + Edges are handled using mirror-symmetric boundary conditions. + + """ + newx = (asarray(newx) - x0) / dx + res = zeros_like(newx) + if res.size == 0: + return res + N = len(cj) + cond1 = newx < 0 + cond2 = newx > (N - 1) + cond3 = ~(cond1 | cond2) + # handle general mirror-symmetry + res[cond1] = qspline1d_eval(cj, -newx[cond1]) + res[cond2] = qspline1d_eval(cj, 2 * (N - 1) - newx[cond2]) + newx = newx[cond3] + if newx.size == 0: + return res + result = zeros_like(newx) + jlower = floor(newx - 1.5).astype(int) + 1 + for i in range(3): + thisj = jlower + i + indj = thisj.clip(0, N - 1) # handle edge cases + result += cj[indj] * quadratic(newx - thisj) + res[cond3] = result + return res diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/bsplines.pyc b/project/venv/lib/python2.7/site-packages/scipy/signal/bsplines.pyc new file mode 100644 index 0000000..e1b564e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/signal/bsplines.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/filter_design.py b/project/venv/lib/python2.7/site-packages/scipy/signal/filter_design.py new file mode 100644 index 0000000..c28936a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/signal/filter_design.py @@ -0,0 +1,4574 @@ +"""Filter design. +""" +from __future__ import division, print_function, absolute_import + +import math +import operator +import warnings + +import numpy +import numpy as np +from numpy import (atleast_1d, poly, polyval, roots, real, asarray, + resize, pi, absolute, logspace, r_, sqrt, tan, log10, + arctan, arcsinh, sin, exp, cosh, arccosh, ceil, conjugate, + zeros, sinh, append, concatenate, prod, ones, array, + mintypecode) +from numpy.polynomial.polynomial import polyval as npp_polyval + +from scipy import special, optimize, fftpack +from scipy.special import comb, factorial +from scipy._lib._numpy_compat import polyvalfromroots + + +__all__ = ['findfreqs', 'freqs', 'freqz', 'tf2zpk', 'zpk2tf', 'normalize', + 'lp2lp', 'lp2hp', 'lp2bp', 'lp2bs', 'bilinear', 'iirdesign', + 'iirfilter', 'butter', 'cheby1', 'cheby2', 'ellip', 'bessel', + 'band_stop_obj', 'buttord', 'cheb1ord', 'cheb2ord', 'ellipord', + 'buttap', 'cheb1ap', 'cheb2ap', 'ellipap', 'besselap', + 'BadCoefficients', 'freqs_zpk', 'freqz_zpk', + 'tf2sos', 'sos2tf', 'zpk2sos', 'sos2zpk', 'group_delay', + 'sosfreqz', 'iirnotch', 'iirpeak', 'bilinear_zpk', + 'lp2lp_zpk', 'lp2hp_zpk', 'lp2bp_zpk', 'lp2bs_zpk'] + + +class BadCoefficients(UserWarning): + """Warning about badly conditioned filter coefficients""" + pass + + +abs = absolute + + +def _is_int_type(x): + """ + Check if input is of a scalar integer type (so ``5`` and ``array(5)`` will + pass, while ``5.0`` and ``array([5])`` will fail. + """ + if np.ndim(x) != 0: + # Older versions of numpy did not raise for np.array([1]).__index__() + # This is safe to remove when support for those versions is dropped + return False + try: + operator.index(x) + except TypeError: + return False + else: + return True + + +def findfreqs(num, den, N, kind='ba'): + """ + Find array of frequencies for computing the response of an analog filter. + + Parameters + ---------- + num, den : array_like, 1-D + The polynomial coefficients of the numerator and denominator of the + transfer function of the filter or LTI system, where the coefficients + are ordered from highest to lowest degree. Or, the roots of the + transfer function numerator and denominator (i.e. zeroes and poles). + N : int + The length of the array to be computed. + kind : str {'ba', 'zp'}, optional + Specifies whether the numerator and denominator are specified by their + polynomial coefficients ('ba'), or their roots ('zp'). + + Returns + ------- + w : (N,) ndarray + A 1-D array of frequencies, logarithmically spaced. + + Examples + -------- + Find a set of nine frequencies that span the "interesting part" of the + frequency response for the filter with the transfer function + + H(s) = s / (s^2 + 8s + 25) + + >>> from scipy import signal + >>> signal.findfreqs([1, 0], [1, 8, 25], N=9) + array([ 1.00000000e-02, 3.16227766e-02, 1.00000000e-01, + 3.16227766e-01, 1.00000000e+00, 3.16227766e+00, + 1.00000000e+01, 3.16227766e+01, 1.00000000e+02]) + """ + if kind == 'ba': + ep = atleast_1d(roots(den)) + 0j + tz = atleast_1d(roots(num)) + 0j + elif kind == 'zp': + ep = atleast_1d(den) + 0j + tz = atleast_1d(num) + 0j + else: + raise ValueError("input must be one of {'ba', 'zp'}") + + if len(ep) == 0: + ep = atleast_1d(-1000) + 0j + + ez = r_['-1', + numpy.compress(ep.imag >= 0, ep, axis=-1), + numpy.compress((abs(tz) < 1e5) & (tz.imag >= 0), tz, axis=-1)] + + integ = abs(ez) < 1e-10 + hfreq = numpy.around(numpy.log10(numpy.max(3 * abs(ez.real + integ) + + 1.5 * ez.imag)) + 0.5) + lfreq = numpy.around(numpy.log10(0.1 * numpy.min(abs(real(ez + integ)) + + 2 * ez.imag)) - 0.5) + + w = logspace(lfreq, hfreq, N) + return w + + +def freqs(b, a, worN=200, plot=None): + """ + Compute frequency response of analog filter. + + Given the M-order numerator `b` and N-order denominator `a` of an analog + filter, compute its frequency response:: + + b[0]*(jw)**M + b[1]*(jw)**(M-1) + ... + b[M] + H(w) = ---------------------------------------------- + a[0]*(jw)**N + a[1]*(jw)**(N-1) + ... + a[N] + + Parameters + ---------- + b : array_like + Numerator of a linear filter. + a : array_like + Denominator of a linear filter. + worN : {None, int, array_like}, optional + If None, then compute at 200 frequencies around the interesting parts + of the response curve (determined by pole-zero locations). If a single + integer, then compute at that many frequencies. Otherwise, compute the + response at the angular frequencies (e.g. rad/s) given in `worN`. + plot : callable, optional + A callable that takes two arguments. If given, the return parameters + `w` and `h` are passed to plot. Useful for plotting the frequency + response inside `freqs`. + + Returns + ------- + w : ndarray + The angular frequencies at which `h` was computed. + h : ndarray + The frequency response. + + See Also + -------- + freqz : Compute the frequency response of a digital filter. + + Notes + ----- + Using Matplotlib's "plot" function as the callable for `plot` produces + unexpected results, this plots the real part of the complex transfer + function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``. + + Examples + -------- + >>> from scipy.signal import freqs, iirfilter + + >>> b, a = iirfilter(4, [1, 10], 1, 60, analog=True, ftype='cheby1') + + >>> w, h = freqs(b, a, worN=np.logspace(-1, 2, 1000)) + + >>> import matplotlib.pyplot as plt + >>> plt.semilogx(w, 20 * np.log10(abs(h))) + >>> plt.xlabel('Frequency') + >>> plt.ylabel('Amplitude response [dB]') + >>> plt.grid() + >>> plt.show() + + """ + if worN is None: + # For backwards compatibility + w = findfreqs(b, a, 200) + elif _is_int_type(worN): + w = findfreqs(b, a, worN) + else: + w = atleast_1d(worN) + + s = 1j * w + h = polyval(b, s) / polyval(a, s) + if plot is not None: + plot(w, h) + + return w, h + + +def freqs_zpk(z, p, k, worN=200): + """ + Compute frequency response of analog filter. + + Given the zeros `z`, poles `p`, and gain `k` of a filter, compute its + frequency response:: + + (jw-z[0]) * (jw-z[1]) * ... * (jw-z[-1]) + H(w) = k * ---------------------------------------- + (jw-p[0]) * (jw-p[1]) * ... * (jw-p[-1]) + + Parameters + ---------- + z : array_like + Zeroes of a linear filter + p : array_like + Poles of a linear filter + k : scalar + Gain of a linear filter + worN : {None, int, array_like}, optional + If None, then compute at 200 frequencies around the interesting parts + of the response curve (determined by pole-zero locations). If a single + integer, then compute at that many frequencies. Otherwise, compute the + response at the angular frequencies (e.g. rad/s) given in `worN`. + + Returns + ------- + w : ndarray + The angular frequencies at which `h` was computed. + h : ndarray + The frequency response. + + See Also + -------- + freqs : Compute the frequency response of an analog filter in TF form + freqz : Compute the frequency response of a digital filter in TF form + freqz_zpk : Compute the frequency response of a digital filter in ZPK form + + Notes + ----- + .. versionadded:: 0.19.0 + + Examples + -------- + >>> from scipy.signal import freqs_zpk, iirfilter + + >>> z, p, k = iirfilter(4, [1, 10], 1, 60, analog=True, ftype='cheby1', + ... output='zpk') + + >>> w, h = freqs_zpk(z, p, k, worN=np.logspace(-1, 2, 1000)) + + >>> import matplotlib.pyplot as plt + >>> plt.semilogx(w, 20 * np.log10(abs(h))) + >>> plt.xlabel('Frequency') + >>> plt.ylabel('Amplitude response [dB]') + >>> plt.grid() + >>> plt.show() + + """ + k = np.asarray(k) + if k.size > 1: + raise ValueError('k must be a single scalar gain') + + if worN is None: + # For backwards compatibility + w = findfreqs(z, p, 200, kind='zp') + elif _is_int_type(worN): + w = findfreqs(z, p, worN, kind='zp') + else: + w = worN + + w = atleast_1d(w) + s = 1j * w + num = polyvalfromroots(s, z) + den = polyvalfromroots(s, p) + h = k * num/den + return w, h + + +def freqz(b, a=1, worN=512, whole=False, plot=None, fs=2*pi): + """ + Compute the frequency response of a digital filter. + + Given the M-order numerator `b` and N-order denominator `a` of a digital + filter, compute its frequency response:: + + jw -jw -jwM + jw B(e ) b[0] + b[1]e + ... + b[M]e + H(e ) = ------ = ----------------------------------- + jw -jw -jwN + A(e ) a[0] + a[1]e + ... + a[N]e + + Parameters + ---------- + b : array_like + Numerator of a linear filter. If `b` has dimension greater than 1, + it is assumed that the coefficients are stored in the first dimension, + and ``b.shape[1:]``, ``a.shape[1:]``, and the shape of the frequencies + array must be compatible for broadcasting. + a : array_like + Denominator of a linear filter. If `b` has dimension greater than 1, + it is assumed that the coefficients are stored in the first dimension, + and ``b.shape[1:]``, ``a.shape[1:]``, and the shape of the frequencies + array must be compatible for broadcasting. + worN : {None, int, array_like}, optional + If a single integer, then compute at that many frequencies (default is + N=512). This is a convenient alternative to:: + + np.linspace(0, fs if whole else fs/2, N, endpoint=False) + + Using a number that is fast for FFT computations can result in + faster computations (see Notes). + + If an array_like, compute the response at the frequencies given. + These are in the same units as `fs`. + whole : bool, optional + Normally, frequencies are computed from 0 to the Nyquist frequency, + fs/2 (upper-half of unit-circle). If `whole` is True, compute + frequencies from 0 to fs. Ignored if w is array_like. + plot : callable + A callable that takes two arguments. If given, the return parameters + `w` and `h` are passed to plot. Useful for plotting the frequency + response inside `freqz`. + fs : float, optional + The sampling frequency of the digital system. Defaults to 2*pi + radians/sample (so w is from 0 to pi). + + .. versionadded:: 1.2.0 + + Returns + ------- + w : ndarray + The frequencies at which `h` was computed, in the same units as `fs`. + By default, `w` is normalized to the range [0, pi) (radians/sample). + h : ndarray + The frequency response, as complex numbers. + + See Also + -------- + freqz_zpk + sosfreqz + + Notes + ----- + Using Matplotlib's :func:`matplotlib.pyplot.plot` function as the callable + for `plot` produces unexpected results, as this plots the real part of the + complex transfer function, not the magnitude. + Try ``lambda w, h: plot(w, np.abs(h))``. + + A direct computation via (R)FFT is used to compute the frequency response + when the following conditions are met: + + 1. An integer value is given for `worN`. + 2. `worN` is fast to compute via FFT (i.e., + `next_fast_len(worN) <scipy.fftpack.next_fast_len>` equals `worN`). + 3. The denominator coefficients are a single value (``a.shape[0] == 1``). + 4. `worN` is at least as long as the numerator coefficients + (``worN >= b.shape[0]``). + 5. If ``b.ndim > 1``, then ``b.shape[-1] == 1``. + + For long FIR filters, the FFT approach can have lower error and be much + faster than the equivalent direct polynomial calculation. + + Examples + -------- + >>> from scipy import signal + >>> b = signal.firwin(80, 0.5, window=('kaiser', 8)) + >>> w, h = signal.freqz(b) + + >>> import matplotlib.pyplot as plt + >>> fig, ax1 = plt.subplots() + >>> ax1.set_title('Digital filter frequency response') + + >>> ax1.plot(w, 20 * np.log10(abs(h)), 'b') + >>> ax1.set_ylabel('Amplitude [dB]', color='b') + >>> ax1.set_xlabel('Frequency [rad/sample]') + + >>> ax2 = ax1.twinx() + >>> angles = np.unwrap(np.angle(h)) + >>> ax2.plot(w, angles, 'g') + >>> ax2.set_ylabel('Angle (radians)', color='g') + >>> ax2.grid() + >>> ax2.axis('tight') + >>> plt.show() + + Broadcasting Examples + + Suppose we have two FIR filters whose coefficients are stored in the + rows of an array with shape (2, 25). For this demonstration we'll + use random data: + + >>> np.random.seed(42) + >>> b = np.random.rand(2, 25) + + To compute the frequency response for these two filters with one call + to `freqz`, we must pass in ``b.T``, because `freqz` expects the first + axis to hold the coefficients. We must then extend the shape with a + trivial dimension of length 1 to allow broadcasting with the array + of frequencies. That is, we pass in ``b.T[..., np.newaxis]``, which has + shape (25, 2, 1): + + >>> w, h = signal.freqz(b.T[..., np.newaxis], worN=1024) + >>> w.shape + (1024,) + >>> h.shape + (2, 1024) + + Now suppose we have two transfer functions, with the same numerator + coefficients ``b = [0.5, 0.5]``. The coefficients for the two denominators + are stored in the first dimension of the two-dimensional array `a`:: + + a = [ 1 1 ] + [ -0.25, -0.5 ] + + >>> b = np.array([0.5, 0.5]) + >>> a = np.array([[1, 1], [-0.25, -0.5]]) + + Only `a` is more than one-dimensional. To make it compatible for + broadcasting with the frequencies, we extend it with a trivial dimension + in the call to `freqz`: + + >>> w, h = signal.freqz(b, a[..., np.newaxis], worN=1024) + >>> w.shape + (1024,) + >>> h.shape + (2, 1024) + + """ + b = atleast_1d(b) + a = atleast_1d(a) + + if worN is None: + # For backwards compatibility + worN = 512 + + h = None + + if _is_int_type(worN): + N = operator.index(worN) + del worN + if N < 0: + raise ValueError('worN must be nonnegative, got %s' % (N,)) + lastpoint = 2 * pi if whole else pi + w = np.linspace(0, lastpoint, N, endpoint=False) + if (a.size == 1 and N >= b.shape[0] and + fftpack.next_fast_len(N) == N and + (b.ndim == 1 or (b.shape[-1] == 1))): + # if N is fast, 2 * N will be fast, too, so no need to check + n_fft = N if whole else N * 2 + if np.isrealobj(b) and np.isrealobj(a): + fft_func = np.fft.rfft + else: + fft_func = fftpack.fft + h = fft_func(b, n=n_fft, axis=0)[:N] + h /= a + if fft_func is np.fft.rfft and whole: + # exclude DC and maybe Nyquist (no need to use axis_reverse + # here because we can build reversal with the truncation) + stop = -1 if n_fft % 2 == 1 else -2 + h_flip = slice(stop, 0, -1) + h = np.concatenate((h, h[h_flip].conj())) + if b.ndim > 1: + # Last axis of h has length 1, so drop it. + h = h[..., 0] + # Rotate the first axis of h to the end. + h = np.rollaxis(h, 0, h.ndim) + else: + w = atleast_1d(worN) + del worN + w = 2*pi*w/fs + + if h is None: # still need to compute using freqs w + zm1 = exp(-1j * w) + h = (npp_polyval(zm1, b, tensor=False) / + npp_polyval(zm1, a, tensor=False)) + + w = w*fs/(2*pi) + + if plot is not None: + plot(w, h) + + return w, h + + +def freqz_zpk(z, p, k, worN=512, whole=False, fs=2*pi): + r""" + Compute the frequency response of a digital filter in ZPK form. + + Given the Zeros, Poles and Gain of a digital filter, compute its frequency + response: + + :math:`H(z)=k \prod_i (z - Z[i]) / \prod_j (z - P[j])` + + where :math:`k` is the `gain`, :math:`Z` are the `zeros` and :math:`P` are + the `poles`. + + Parameters + ---------- + z : array_like + Zeroes of a linear filter + p : array_like + Poles of a linear filter + k : scalar + Gain of a linear filter + worN : {None, int, array_like}, optional + If a single integer, then compute at that many frequencies (default is + N=512). + + If an array_like, compute the response at the frequencies given. + These are in the same units as `fs`. + whole : bool, optional + Normally, frequencies are computed from 0 to the Nyquist frequency, + fs/2 (upper-half of unit-circle). If `whole` is True, compute + frequencies from 0 to fs. Ignored if w is array_like. + fs : float, optional + The sampling frequency of the digital system. Defaults to 2*pi + radians/sample (so w is from 0 to pi). + + .. versionadded:: 1.2.0 + + Returns + ------- + w : ndarray + The frequencies at which `h` was computed, in the same units as `fs`. + By default, `w` is normalized to the range [0, pi) (radians/sample). + h : ndarray + The frequency response, as complex numbers. + + See Also + -------- + freqs : Compute the frequency response of an analog filter in TF form + freqs_zpk : Compute the frequency response of an analog filter in ZPK form + freqz : Compute the frequency response of a digital filter in TF form + + Notes + ----- + .. versionadded:: 0.19.0 + + Examples + -------- + Design a 4th-order digital Butterworth filter with cut-off of 100 Hz in a + system with sample rate of 1000 Hz, and plot the frequency response: + + >>> from scipy import signal + >>> z, p, k = signal.butter(4, 100, output='zpk', fs=1000) + >>> w, h = signal.freqz_zpk(z, p, k, fs=1000) + + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> ax1 = fig.add_subplot(1, 1, 1) + >>> ax1.set_title('Digital filter frequency response') + + >>> ax1.plot(w, 20 * np.log10(abs(h)), 'b') + >>> ax1.set_ylabel('Amplitude [dB]', color='b') + >>> ax1.set_xlabel('Frequency [Hz]') + >>> ax1.grid() + + >>> ax2 = ax1.twinx() + >>> angles = np.unwrap(np.angle(h)) + >>> ax2.plot(w, angles, 'g') + >>> ax2.set_ylabel('Angle [radians]', color='g') + + >>> plt.axis('tight') + >>> plt.show() + + """ + z, p = map(atleast_1d, (z, p)) + + if whole: + lastpoint = 2 * pi + else: + lastpoint = pi + + if worN is None: + # For backwards compatibility + w = numpy.linspace(0, lastpoint, 512, endpoint=False) + elif _is_int_type(worN): + w = numpy.linspace(0, lastpoint, worN, endpoint=False) + else: + w = atleast_1d(worN) + w = 2*pi*w/fs + + zm1 = exp(1j * w) + h = k * polyvalfromroots(zm1, z) / polyvalfromroots(zm1, p) + + w = w*fs/(2*pi) + + return w, h + + +def group_delay(system, w=512, whole=False, fs=2*pi): + r"""Compute the group delay of a digital filter. + + The group delay measures by how many samples amplitude envelopes of + various spectral components of a signal are delayed by a filter. + It is formally defined as the derivative of continuous (unwrapped) phase:: + + d jw + D(w) = - -- arg H(e) + dw + + Parameters + ---------- + system : tuple of array_like (b, a) + Numerator and denominator coefficients of a filter transfer function. + w : {None, int, array_like}, optional + If a single integer, then compute at that many frequencies (default is + N=512). + + If an array_like, compute the delay at the frequencies given. These + are in the same units as `fs`. + whole : bool, optional + Normally, frequencies are computed from 0 to the Nyquist frequency, + fs/2 (upper-half of unit-circle). If `whole` is True, compute + frequencies from 0 to fs. Ignored if w is array_like. + fs : float, optional + The sampling frequency of the digital system. Defaults to 2*pi + radians/sample (so w is from 0 to pi). + + .. versionadded:: 1.2.0 + + Returns + ------- + w : ndarray + The frequencies at which group delay was computed, in the same units + as `fs`. By default, `w` is normalized to the range [0, pi) + (radians/sample). + gd : ndarray + The group delay. + + Notes + ----- + The similar function in MATLAB is called `grpdelay`. + + If the transfer function :math:`H(z)` has zeros or poles on the unit + circle, the group delay at corresponding frequencies is undefined. + When such a case arises the warning is raised and the group delay + is set to 0 at those frequencies. + + For the details of numerical computation of the group delay refer to [1]_. + + .. versionadded:: 0.16.0 + + See Also + -------- + freqz : Frequency response of a digital filter + + References + ---------- + .. [1] Richard G. Lyons, "Understanding Digital Signal Processing, + 3rd edition", p. 830. + + Examples + -------- + >>> from scipy import signal + >>> b, a = signal.iirdesign(0.1, 0.3, 5, 50, ftype='cheby1') + >>> w, gd = signal.group_delay((b, a)) + + >>> import matplotlib.pyplot as plt + >>> plt.title('Digital filter group delay') + >>> plt.plot(w, gd) + >>> plt.ylabel('Group delay [samples]') + >>> plt.xlabel('Frequency [rad/sample]') + >>> plt.show() + + """ + if w is None: + # For backwards compatibility + w = 512 + + if _is_int_type(w): + if whole: + w = np.linspace(0, 2 * pi, w, endpoint=False) + else: + w = np.linspace(0, pi, w, endpoint=False) + else: + w = np.atleast_1d(w) + w = 2*pi*w/fs + + b, a = map(np.atleast_1d, system) + c = np.convolve(b, a[::-1]) + cr = c * np.arange(c.size) + z = np.exp(-1j * w) + num = np.polyval(cr[::-1], z) + den = np.polyval(c[::-1], z) + singular = np.absolute(den) < 10 * EPSILON + if np.any(singular): + warnings.warn( + "The group delay is singular at frequencies [{0}], setting to 0". + format(", ".join("{0:.3f}".format(ws) for ws in w[singular])) + ) + + gd = np.zeros_like(w) + gd[~singular] = np.real(num[~singular] / den[~singular]) - a.size + 1 + + w = w*fs/(2*pi) + + return w, gd + + +def _validate_sos(sos): + """Helper to validate a SOS input""" + sos = np.atleast_2d(sos) + if sos.ndim != 2: + raise ValueError('sos array must be 2D') + n_sections, m = sos.shape + if m != 6: + raise ValueError('sos array must be shape (n_sections, 6)') + if not (sos[:, 3] == 1).all(): + raise ValueError('sos[:, 3] should be all ones') + return sos, n_sections + + +def sosfreqz(sos, worN=512, whole=False, fs=2*pi): + r""" + Compute the frequency response of a digital filter in SOS format. + + Given `sos`, an array with shape (n, 6) of second order sections of + a digital filter, compute the frequency response of the system function:: + + B0(z) B1(z) B{n-1}(z) + H(z) = ----- * ----- * ... * --------- + A0(z) A1(z) A{n-1}(z) + + for z = exp(omega*1j), where B{k}(z) and A{k}(z) are numerator and + denominator of the transfer function of the k-th second order section. + + Parameters + ---------- + sos : array_like + Array of second-order filter coefficients, must have shape + ``(n_sections, 6)``. Each row corresponds to a second-order + section, with the first three columns providing the numerator + coefficients and the last three providing the denominator + coefficients. + worN : {None, int, array_like}, optional + If a single integer, then compute at that many frequencies (default is + N=512). Using a number that is fast for FFT computations can result + in faster computations (see Notes of `freqz`). + + If an array_like, compute the response at the frequencies given (must + be 1D). These are in the same units as `fs`. + whole : bool, optional + Normally, frequencies are computed from 0 to the Nyquist frequency, + fs/2 (upper-half of unit-circle). If `whole` is True, compute + frequencies from 0 to fs. + fs : float, optional + The sampling frequency of the digital system. Defaults to 2*pi + radians/sample (so w is from 0 to pi). + + .. versionadded:: 1.2.0 + + Returns + ------- + w : ndarray + The frequencies at which `h` was computed, in the same units as `fs`. + By default, `w` is normalized to the range [0, pi) (radians/sample). + h : ndarray + The frequency response, as complex numbers. + + See Also + -------- + freqz, sosfilt + + Notes + ----- + .. versionadded:: 0.19.0 + + Examples + -------- + Design a 15th-order bandpass filter in SOS format. + + >>> from scipy import signal + >>> sos = signal.ellip(15, 0.5, 60, (0.2, 0.4), btype='bandpass', + ... output='sos') + + Compute the frequency response at 1500 points from DC to Nyquist. + + >>> w, h = signal.sosfreqz(sos, worN=1500) + + Plot the response. + + >>> import matplotlib.pyplot as plt + >>> plt.subplot(2, 1, 1) + >>> db = 20*np.log10(np.abs(h)) + >>> plt.plot(w/np.pi, db) + >>> plt.ylim(-75, 5) + >>> plt.grid(True) + >>> plt.yticks([0, -20, -40, -60]) + >>> plt.ylabel('Gain [dB]') + >>> plt.title('Frequency Response') + >>> plt.subplot(2, 1, 2) + >>> plt.plot(w/np.pi, np.angle(h)) + >>> plt.grid(True) + >>> plt.yticks([-np.pi, -0.5*np.pi, 0, 0.5*np.pi, np.pi], + ... [r'$-\pi$', r'$-\pi/2$', '0', r'$\pi/2$', r'$\pi$']) + >>> plt.ylabel('Phase [rad]') + >>> plt.xlabel('Normalized frequency (1.0 = Nyquist)') + >>> plt.show() + + If the same filter is implemented as a single transfer function, + numerical error corrupts the frequency response: + + >>> b, a = signal.ellip(15, 0.5, 60, (0.2, 0.4), btype='bandpass', + ... output='ba') + >>> w, h = signal.freqz(b, a, worN=1500) + >>> plt.subplot(2, 1, 1) + >>> db = 20*np.log10(np.abs(h)) + >>> plt.plot(w/np.pi, db) + >>> plt.ylim(-75, 5) + >>> plt.grid(True) + >>> plt.yticks([0, -20, -40, -60]) + >>> plt.ylabel('Gain [dB]') + >>> plt.title('Frequency Response') + >>> plt.subplot(2, 1, 2) + >>> plt.plot(w/np.pi, np.angle(h)) + >>> plt.grid(True) + >>> plt.yticks([-np.pi, -0.5*np.pi, 0, 0.5*np.pi, np.pi], + ... [r'$-\pi$', r'$-\pi/2$', '0', r'$\pi/2$', r'$\pi$']) + >>> plt.ylabel('Phase [rad]') + >>> plt.xlabel('Normalized frequency (1.0 = Nyquist)') + >>> plt.show() + + """ + + sos, n_sections = _validate_sos(sos) + if n_sections == 0: + raise ValueError('Cannot compute frequencies with no sections') + h = 1. + for row in sos: + w, rowh = freqz(row[:3], row[3:], worN=worN, whole=whole, fs=fs) + h *= rowh + return w, h + + +def _cplxreal(z, tol=None): + """ + Split into complex and real parts, combining conjugate pairs. + + The 1D input vector `z` is split up into its complex (`zc`) and real (`zr`) + elements. Every complex element must be part of a complex-conjugate pair, + which are combined into a single number (with positive imaginary part) in + the output. Two complex numbers are considered a conjugate pair if their + real and imaginary parts differ in magnitude by less than ``tol * abs(z)``. + + Parameters + ---------- + z : array_like + Vector of complex numbers to be sorted and split + tol : float, optional + Relative tolerance for testing realness and conjugate equality. + Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for + float64) + + Returns + ------- + zc : ndarray + Complex elements of `z`, with each pair represented by a single value + having positive imaginary part, sorted first by real part, and then + by magnitude of imaginary part. The pairs are averaged when combined + to reduce error. + zr : ndarray + Real elements of `z` (those having imaginary part less than + `tol` times their magnitude), sorted by value. + + Raises + ------ + ValueError + If there are any complex numbers in `z` for which a conjugate + cannot be found. + + See Also + -------- + _cplxpair + + Examples + -------- + >>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j] + >>> zc, zr = _cplxreal(a) + >>> print(zc) + [ 1.+1.j 2.+1.j 2.+1.j 2.+2.j] + >>> print(zr) + [ 1. 3. 4.] + """ + + z = atleast_1d(z) + if z.size == 0: + return z, z + elif z.ndim != 1: + raise ValueError('_cplxreal only accepts 1D input') + + if tol is None: + # Get tolerance from dtype of input + tol = 100 * np.finfo((1.0 * z).dtype).eps + + # Sort by real part, magnitude of imaginary part (speed up further sorting) + z = z[np.lexsort((abs(z.imag), z.real))] + + # Split reals from conjugate pairs + real_indices = abs(z.imag) <= tol * abs(z) + zr = z[real_indices].real + + if len(zr) == len(z): + # Input is entirely real + return array([]), zr + + # Split positive and negative halves of conjugates + z = z[~real_indices] + zp = z[z.imag > 0] + zn = z[z.imag < 0] + + if len(zp) != len(zn): + raise ValueError('Array contains complex value with no matching ' + 'conjugate.') + + # Find runs of (approximately) the same real part + same_real = np.diff(zp.real) <= tol * abs(zp[:-1]) + diffs = numpy.diff(concatenate(([0], same_real, [0]))) + run_starts = numpy.nonzero(diffs > 0)[0] + run_stops = numpy.nonzero(diffs < 0)[0] + + # Sort each run by their imaginary parts + for i in range(len(run_starts)): + start = run_starts[i] + stop = run_stops[i] + 1 + for chunk in (zp[start:stop], zn[start:stop]): + chunk[...] = chunk[np.lexsort([abs(chunk.imag)])] + + # Check that negatives match positives + if any(abs(zp - zn.conj()) > tol * abs(zn)): + raise ValueError('Array contains complex value with no matching ' + 'conjugate.') + + # Average out numerical inaccuracy in real vs imag parts of pairs + zc = (zp + zn.conj()) / 2 + + return zc, zr + + +def _cplxpair(z, tol=None): + """ + Sort into pairs of complex conjugates. + + Complex conjugates in `z` are sorted by increasing real part. In each + pair, the number with negative imaginary part appears first. + + If pairs have identical real parts, they are sorted by increasing + imaginary magnitude. + + Two complex numbers are considered a conjugate pair if their real and + imaginary parts differ in magnitude by less than ``tol * abs(z)``. The + pairs are forced to be exact complex conjugates by averaging the positive + and negative values. + + Purely real numbers are also sorted, but placed after the complex + conjugate pairs. A number is considered real if its imaginary part is + smaller than `tol` times the magnitude of the number. + + Parameters + ---------- + z : array_like + 1-dimensional input array to be sorted. + tol : float, optional + Relative tolerance for testing realness and conjugate equality. + Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for + float64) + + Returns + ------- + y : ndarray + Complex conjugate pairs followed by real numbers. + + Raises + ------ + ValueError + If there are any complex numbers in `z` for which a conjugate + cannot be found. + + See Also + -------- + _cplxreal + + Examples + -------- + >>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j] + >>> z = _cplxpair(a) + >>> print(z) + [ 1.-1.j 1.+1.j 2.-1.j 2.+1.j 2.-1.j 2.+1.j 2.-2.j 2.+2.j 1.+0.j + 3.+0.j 4.+0.j] + """ + + z = atleast_1d(z) + if z.size == 0 or np.isrealobj(z): + return np.sort(z) + + if z.ndim != 1: + raise ValueError('z must be 1-dimensional') + + zc, zr = _cplxreal(z, tol) + + # Interleave complex values and their conjugates, with negative imaginary + # parts first in each pair + zc = np.dstack((zc.conj(), zc)).flatten() + z = np.append(zc, zr) + return z + + +def tf2zpk(b, a): + r"""Return zero, pole, gain (z, p, k) representation from a numerator, + denominator representation of a linear filter. + + Parameters + ---------- + b : array_like + Numerator polynomial coefficients. + a : array_like + Denominator polynomial coefficients. + + Returns + ------- + z : ndarray + Zeros of the transfer function. + p : ndarray + Poles of the transfer function. + k : float + System gain. + + Notes + ----- + If some values of `b` are too close to 0, they are removed. In that case, + a BadCoefficients warning is emitted. + + The `b` and `a` arrays are interpreted as coefficients for positive, + descending powers of the transfer function variable. So the inputs + :math:`b = [b_0, b_1, ..., b_M]` and :math:`a =[a_0, a_1, ..., a_N]` + can represent an analog filter of the form: + + .. math:: + + H(s) = \frac + {b_0 s^M + b_1 s^{(M-1)} + \cdots + b_M} + {a_0 s^N + a_1 s^{(N-1)} + \cdots + a_N} + + or a discrete-time filter of the form: + + .. math:: + + H(z) = \frac + {b_0 z^M + b_1 z^{(M-1)} + \cdots + b_M} + {a_0 z^N + a_1 z^{(N-1)} + \cdots + a_N} + + This "positive powers" form is found more commonly in controls + engineering. If `M` and `N` are equal (which is true for all filters + generated by the bilinear transform), then this happens to be equivalent + to the "negative powers" discrete-time form preferred in DSP: + + .. math:: + + H(z) = \frac + {b_0 + b_1 z^{-1} + \cdots + b_M z^{-M}} + {a_0 + a_1 z^{-1} + \cdots + a_N z^{-N}} + + Although this is true for common filters, remember that this is not true + in the general case. If `M` and `N` are not equal, the discrete-time + transfer function coefficients must first be converted to the "positive + powers" form before finding the poles and zeros. + + """ + b, a = normalize(b, a) + b = (b + 0.0) / a[0] + a = (a + 0.0) / a[0] + k = b[0] + b /= b[0] + z = roots(b) + p = roots(a) + return z, p, k + + +def zpk2tf(z, p, k): + """ + Return polynomial transfer function representation from zeros and poles + + Parameters + ---------- + z : array_like + Zeros of the transfer function. + p : array_like + Poles of the transfer function. + k : float + System gain. + + Returns + ------- + b : ndarray + Numerator polynomial coefficients. + a : ndarray + Denominator polynomial coefficients. + + """ + z = atleast_1d(z) + k = atleast_1d(k) + if len(z.shape) > 1: + temp = poly(z[0]) + b = zeros((z.shape[0], z.shape[1] + 1), temp.dtype.char) + if len(k) == 1: + k = [k[0]] * z.shape[0] + for i in range(z.shape[0]): + b[i] = k[i] * poly(z[i]) + else: + b = k * poly(z) + a = atleast_1d(poly(p)) + + # Use real output if possible. Copied from numpy.poly, since + # we can't depend on a specific version of numpy. + if issubclass(b.dtype.type, numpy.complexfloating): + # if complex roots are all complex conjugates, the roots are real. + roots = numpy.asarray(z, complex) + pos_roots = numpy.compress(roots.imag > 0, roots) + neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots)) + if len(pos_roots) == len(neg_roots): + if numpy.all(numpy.sort_complex(neg_roots) == + numpy.sort_complex(pos_roots)): + b = b.real.copy() + + if issubclass(a.dtype.type, numpy.complexfloating): + # if complex roots are all complex conjugates, the roots are real. + roots = numpy.asarray(p, complex) + pos_roots = numpy.compress(roots.imag > 0, roots) + neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots)) + if len(pos_roots) == len(neg_roots): + if numpy.all(numpy.sort_complex(neg_roots) == + numpy.sort_complex(pos_roots)): + a = a.real.copy() + + return b, a + + +def tf2sos(b, a, pairing='nearest'): + """ + Return second-order sections from transfer function representation + + Parameters + ---------- + b : array_like + Numerator polynomial coefficients. + a : array_like + Denominator polynomial coefficients. + pairing : {'nearest', 'keep_odd'}, optional + The method to use to combine pairs of poles and zeros into sections. + See `zpk2sos`. + + Returns + ------- + sos : ndarray + Array of second-order filter coefficients, with shape + ``(n_sections, 6)``. See `sosfilt` for the SOS filter format + specification. + + See Also + -------- + zpk2sos, sosfilt + + Notes + ----- + It is generally discouraged to convert from TF to SOS format, since doing + so usually will not improve numerical precision errors. Instead, consider + designing filters in ZPK format and converting directly to SOS. TF is + converted to SOS by first converting to ZPK format, then converting + ZPK to SOS. + + .. versionadded:: 0.16.0 + """ + return zpk2sos(*tf2zpk(b, a), pairing=pairing) + + +def sos2tf(sos): + """ + Return a single transfer function from a series of second-order sections + + Parameters + ---------- + sos : array_like + Array of second-order filter coefficients, must have shape + ``(n_sections, 6)``. See `sosfilt` for the SOS filter format + specification. + + Returns + ------- + b : ndarray + Numerator polynomial coefficients. + a : ndarray + Denominator polynomial coefficients. + + Notes + ----- + .. versionadded:: 0.16.0 + """ + sos = np.asarray(sos) + b = [1.] + a = [1.] + n_sections = sos.shape[0] + for section in range(n_sections): + b = np.polymul(b, sos[section, :3]) + a = np.polymul(a, sos[section, 3:]) + return b, a + + +def sos2zpk(sos): + """ + Return zeros, poles, and gain of a series of second-order sections + + Parameters + ---------- + sos : array_like + Array of second-order filter coefficients, must have shape + ``(n_sections, 6)``. See `sosfilt` for the SOS filter format + specification. + + Returns + ------- + z : ndarray + Zeros of the transfer function. + p : ndarray + Poles of the transfer function. + k : float + System gain. + + Notes + ----- + .. versionadded:: 0.16.0 + """ + sos = np.asarray(sos) + n_sections = sos.shape[0] + z = np.empty(n_sections*2, np.complex128) + p = np.empty(n_sections*2, np.complex128) + k = 1. + for section in range(n_sections): + zpk = tf2zpk(sos[section, :3], sos[section, 3:]) + z[2*section:2*(section+1)] = zpk[0] + p[2*section:2*(section+1)] = zpk[1] + k *= zpk[2] + return z, p, k + + +def _nearest_real_complex_idx(fro, to, which): + """Get the next closest real or complex element based on distance""" + assert which in ('real', 'complex') + order = np.argsort(np.abs(fro - to)) + mask = np.isreal(fro[order]) + if which == 'complex': + mask = ~mask + return order[np.nonzero(mask)[0][0]] + + +def zpk2sos(z, p, k, pairing='nearest'): + """ + Return second-order sections from zeros, poles, and gain of a system + + Parameters + ---------- + z : array_like + Zeros of the transfer function. + p : array_like + Poles of the transfer function. + k : float + System gain. + pairing : {'nearest', 'keep_odd'}, optional + The method to use to combine pairs of poles and zeros into sections. + See Notes below. + + Returns + ------- + sos : ndarray + Array of second-order filter coefficients, with shape + ``(n_sections, 6)``. See `sosfilt` for the SOS filter format + specification. + + See Also + -------- + sosfilt + + Notes + ----- + The algorithm used to convert ZPK to SOS format is designed to + minimize errors due to numerical precision issues. The pairing + algorithm attempts to minimize the peak gain of each biquadratic + section. This is done by pairing poles with the nearest zeros, starting + with the poles closest to the unit circle. + + *Algorithms* + + The current algorithms are designed specifically for use with digital + filters. (The output coefficients are not correct for analog filters.) + + The steps in the ``pairing='nearest'`` and ``pairing='keep_odd'`` + algorithms are mostly shared. The ``nearest`` algorithm attempts to + minimize the peak gain, while ``'keep_odd'`` minimizes peak gain under + the constraint that odd-order systems should retain one section + as first order. The algorithm steps and are as follows: + + As a pre-processing step, add poles or zeros to the origin as + necessary to obtain the same number of poles and zeros for pairing. + If ``pairing == 'nearest'`` and there are an odd number of poles, + add an additional pole and a zero at the origin. + + The following steps are then iterated over until no more poles or + zeros remain: + + 1. Take the (next remaining) pole (complex or real) closest to the + unit circle to begin a new filter section. + + 2. If the pole is real and there are no other remaining real poles [#]_, + add the closest real zero to the section and leave it as a first + order section. Note that after this step we are guaranteed to be + left with an even number of real poles, complex poles, real zeros, + and complex zeros for subsequent pairing iterations. + + 3. Else: + + 1. If the pole is complex and the zero is the only remaining real + zero*, then pair the pole with the *next* closest zero + (guaranteed to be complex). This is necessary to ensure that + there will be a real zero remaining to eventually create a + first-order section (thus keeping the odd order). + + 2. Else pair the pole with the closest remaining zero (complex or + real). + + 3. Proceed to complete the second-order section by adding another + pole and zero to the current pole and zero in the section: + + 1. If the current pole and zero are both complex, add their + conjugates. + + 2. Else if the pole is complex and the zero is real, add the + conjugate pole and the next closest real zero. + + 3. Else if the pole is real and the zero is complex, add the + conjugate zero and the real pole closest to those zeros. + + 4. Else (we must have a real pole and real zero) add the next + real pole closest to the unit circle, and then add the real + zero closest to that pole. + + .. [#] This conditional can only be met for specific odd-order inputs + with the ``pairing == 'keep_odd'`` method. + + .. versionadded:: 0.16.0 + + Examples + -------- + + Design a 6th order low-pass elliptic digital filter for a system with a + sampling rate of 8000 Hz that has a pass-band corner frequency of + 1000 Hz. The ripple in the pass-band should not exceed 0.087 dB, and + the attenuation in the stop-band should be at least 90 dB. + + In the following call to `signal.ellip`, we could use ``output='sos'``, + but for this example, we'll use ``output='zpk'``, and then convert to SOS + format with `zpk2sos`: + + >>> from scipy import signal + >>> z, p, k = signal.ellip(6, 0.087, 90, 1000/(0.5*8000), output='zpk') + + Now convert to SOS format. + + >>> sos = signal.zpk2sos(z, p, k) + + The coefficients of the numerators of the sections: + + >>> sos[:, :3] + array([[ 0.0014154 , 0.00248707, 0.0014154 ], + [ 1. , 0.72965193, 1. ], + [ 1. , 0.17594966, 1. ]]) + + The symmetry in the coefficients occurs because all the zeros are on the + unit circle. + + The coefficients of the denominators of the sections: + + >>> sos[:, 3:] + array([[ 1. , -1.32543251, 0.46989499], + [ 1. , -1.26117915, 0.6262586 ], + [ 1. , -1.25707217, 0.86199667]]) + + The next example shows the effect of the `pairing` option. We have a + system with three poles and three zeros, so the SOS array will have + shape (2, 6). The means there is, in effect, an extra pole and an extra + zero at the origin in the SOS representation. + + >>> z1 = np.array([-1, -0.5-0.5j, -0.5+0.5j]) + >>> p1 = np.array([0.75, 0.8+0.1j, 0.8-0.1j]) + + With ``pairing='nearest'`` (the default), we obtain + + >>> signal.zpk2sos(z1, p1, 1) + array([[ 1. , 1. , 0.5 , 1. , -0.75, 0. ], + [ 1. , 1. , 0. , 1. , -1.6 , 0.65]]) + + The first section has the zeros {-0.5-0.05j, -0.5+0.5j} and the poles + {0, 0.75}, and the second section has the zeros {-1, 0} and poles + {0.8+0.1j, 0.8-0.1j}. Note that the extra pole and zero at the origin + have been assigned to different sections. + + With ``pairing='keep_odd'``, we obtain: + + >>> signal.zpk2sos(z1, p1, 1, pairing='keep_odd') + array([[ 1. , 1. , 0. , 1. , -0.75, 0. ], + [ 1. , 1. , 0.5 , 1. , -1.6 , 0.65]]) + + The extra pole and zero at the origin are in the same section. + The first section is, in effect, a first-order section. + + """ + # TODO in the near future: + # 1. Add SOS capability to `filtfilt`, `freqz`, etc. somehow (#3259). + # 2. Make `decimate` use `sosfilt` instead of `lfilter`. + # 3. Make sosfilt automatically simplify sections to first order + # when possible. Note this might make `sosfiltfilt` a bit harder (ICs). + # 4. Further optimizations of the section ordering / pole-zero pairing. + # See the wiki for other potential issues. + + valid_pairings = ['nearest', 'keep_odd'] + if pairing not in valid_pairings: + raise ValueError('pairing must be one of %s, not %s' + % (valid_pairings, pairing)) + if len(z) == len(p) == 0: + return array([[k, 0., 0., 1., 0., 0.]]) + + # ensure we have the same number of poles and zeros, and make copies + p = np.concatenate((p, np.zeros(max(len(z) - len(p), 0)))) + z = np.concatenate((z, np.zeros(max(len(p) - len(z), 0)))) + n_sections = (max(len(p), len(z)) + 1) // 2 + sos = zeros((n_sections, 6)) + + if len(p) % 2 == 1 and pairing == 'nearest': + p = np.concatenate((p, [0.])) + z = np.concatenate((z, [0.])) + assert len(p) == len(z) + + # Ensure we have complex conjugate pairs + # (note that _cplxreal only gives us one element of each complex pair): + z = np.concatenate(_cplxreal(z)) + p = np.concatenate(_cplxreal(p)) + + p_sos = np.zeros((n_sections, 2), np.complex128) + z_sos = np.zeros_like(p_sos) + for si in range(n_sections): + # Select the next "worst" pole + p1_idx = np.argmin(np.abs(1 - np.abs(p))) + p1 = p[p1_idx] + p = np.delete(p, p1_idx) + + # Pair that pole with a zero + + if np.isreal(p1) and np.isreal(p).sum() == 0: + # Special case to set a first-order section + z1_idx = _nearest_real_complex_idx(z, p1, 'real') + z1 = z[z1_idx] + z = np.delete(z, z1_idx) + p2 = z2 = 0 + else: + if not np.isreal(p1) and np.isreal(z).sum() == 1: + # Special case to ensure we choose a complex zero to pair + # with so later (setting up a first-order section) + z1_idx = _nearest_real_complex_idx(z, p1, 'complex') + assert not np.isreal(z[z1_idx]) + else: + # Pair the pole with the closest zero (real or complex) + z1_idx = np.argmin(np.abs(p1 - z)) + z1 = z[z1_idx] + z = np.delete(z, z1_idx) + + # Now that we have p1 and z1, figure out what p2 and z2 need to be + if not np.isreal(p1): + if not np.isreal(z1): # complex pole, complex zero + p2 = p1.conj() + z2 = z1.conj() + else: # complex pole, real zero + p2 = p1.conj() + z2_idx = _nearest_real_complex_idx(z, p1, 'real') + z2 = z[z2_idx] + assert np.isreal(z2) + z = np.delete(z, z2_idx) + else: + if not np.isreal(z1): # real pole, complex zero + z2 = z1.conj() + p2_idx = _nearest_real_complex_idx(p, z1, 'real') + p2 = p[p2_idx] + assert np.isreal(p2) + else: # real pole, real zero + # pick the next "worst" pole to use + idx = np.nonzero(np.isreal(p))[0] + assert len(idx) > 0 + p2_idx = idx[np.argmin(np.abs(np.abs(p[idx]) - 1))] + p2 = p[p2_idx] + # find a real zero to match the added pole + assert np.isreal(p2) + z2_idx = _nearest_real_complex_idx(z, p2, 'real') + z2 = z[z2_idx] + assert np.isreal(z2) + z = np.delete(z, z2_idx) + p = np.delete(p, p2_idx) + p_sos[si] = [p1, p2] + z_sos[si] = [z1, z2] + assert len(p) == len(z) == 0 # we've consumed all poles and zeros + del p, z + + # Construct the system, reversing order so the "worst" are last + p_sos = np.reshape(p_sos[::-1], (n_sections, 2)) + z_sos = np.reshape(z_sos[::-1], (n_sections, 2)) + gains = np.ones(n_sections) + gains[0] = k + for si in range(n_sections): + x = zpk2tf(z_sos[si], p_sos[si], gains[si]) + sos[si] = np.concatenate(x) + return sos + + +def _align_nums(nums): + """Aligns the shapes of multiple numerators. + + Given an array of numerator coefficient arrays [[a_1, a_2,..., + a_n],..., [b_1, b_2,..., b_m]], this function pads shorter numerator + arrays with zero's so that all numerators have the same length. Such + alignment is necessary for functions like 'tf2ss', which needs the + alignment when dealing with SIMO transfer functions. + + Parameters + ---------- + nums: array_like + Numerator or list of numerators. Not necessarily with same length. + + Returns + ------- + nums: array + The numerator. If `nums` input was a list of numerators then a 2d + array with padded zeros for shorter numerators is returned. Otherwise + returns ``np.asarray(nums)``. + """ + try: + # The statement can throw a ValueError if one + # of the numerators is a single digit and another + # is array-like e.g. if nums = [5, [1, 2, 3]] + nums = asarray(nums) + + if not np.issubdtype(nums.dtype, np.number): + raise ValueError("dtype of numerator is non-numeric") + + return nums + + except ValueError: + nums = [np.atleast_1d(num) for num in nums] + max_width = max(num.size for num in nums) + + # pre-allocate + aligned_nums = np.zeros((len(nums), max_width)) + + # Create numerators with padded zeros + for index, num in enumerate(nums): + aligned_nums[index, -num.size:] = num + + return aligned_nums + + +def normalize(b, a): + """Normalize numerator/denominator of a continuous-time transfer function. + + If values of `b` are too close to 0, they are removed. In that case, a + BadCoefficients warning is emitted. + + Parameters + ---------- + b: array_like + Numerator of the transfer function. Can be a 2d array to normalize + multiple transfer functions. + a: array_like + Denominator of the transfer function. At most 1d. + + Returns + ------- + num: array + The numerator of the normalized transfer function. At least a 1d + array. A 2d-array if the input `num` is a 2d array. + den: 1d-array + The denominator of the normalized transfer function. + + Notes + ----- + Coefficients for both the numerator and denominator should be specified in + descending exponent order (e.g., ``s^2 + 3s + 5`` would be represented as + ``[1, 3, 5]``). + """ + num, den = b, a + + den = np.atleast_1d(den) + num = np.atleast_2d(_align_nums(num)) + + if den.ndim != 1: + raise ValueError("Denominator polynomial must be rank-1 array.") + if num.ndim > 2: + raise ValueError("Numerator polynomial must be rank-1 or" + " rank-2 array.") + if np.all(den == 0): + raise ValueError("Denominator must have at least on nonzero element.") + + # Trim leading zeros in denominator, leave at least one. + den = np.trim_zeros(den, 'f') + + # Normalize transfer function + num, den = num / den[0], den / den[0] + + # Count numerator columns that are all zero + leading_zeros = 0 + for col in num.T: + if np.allclose(col, 0, atol=1e-14): + leading_zeros += 1 + else: + break + + # Trim leading zeros of numerator + if leading_zeros > 0: + warnings.warn("Badly conditioned filter coefficients (numerator): the " + "results may be meaningless", BadCoefficients) + # Make sure at least one column remains + if leading_zeros == num.shape[1]: + leading_zeros -= 1 + num = num[:, leading_zeros:] + + # Squeeze first dimension if singular + if num.shape[0] == 1: + num = num[0, :] + + return num, den + + +def lp2lp(b, a, wo=1.0): + """ + Transform a lowpass filter prototype to a different frequency. + + Return an analog low-pass filter with cutoff frequency `wo` + from an analog low-pass filter prototype with unity cutoff frequency, in + transfer function ('ba') representation. + + See Also + -------- + lp2hp, lp2bp, lp2bs, bilinear + lp2lp_zpk + + """ + a, b = map(atleast_1d, (a, b)) + try: + wo = float(wo) + except TypeError: + wo = float(wo[0]) + d = len(a) + n = len(b) + M = max((d, n)) + pwo = pow(wo, numpy.arange(M - 1, -1, -1)) + start1 = max((n - d, 0)) + start2 = max((d - n, 0)) + b = b * pwo[start1] / pwo[start2:] + a = a * pwo[start1] / pwo[start1:] + return normalize(b, a) + + +def lp2hp(b, a, wo=1.0): + """ + Transform a lowpass filter prototype to a highpass filter. + + Return an analog high-pass filter with cutoff frequency `wo` + from an analog low-pass filter prototype with unity cutoff frequency, in + transfer function ('ba') representation. + + See Also + -------- + lp2lp, lp2bp, lp2bs, bilinear + lp2hp_zpk + + """ + a, b = map(atleast_1d, (a, b)) + try: + wo = float(wo) + except TypeError: + wo = float(wo[0]) + d = len(a) + n = len(b) + if wo != 1: + pwo = pow(wo, numpy.arange(max((d, n)))) + else: + pwo = numpy.ones(max((d, n)), b.dtype.char) + if d >= n: + outa = a[::-1] * pwo + outb = resize(b, (d,)) + outb[n:] = 0.0 + outb[:n] = b[::-1] * pwo[:n] + else: + outb = b[::-1] * pwo + outa = resize(a, (n,)) + outa[d:] = 0.0 + outa[:d] = a[::-1] * pwo[:d] + + return normalize(outb, outa) + + +def lp2bp(b, a, wo=1.0, bw=1.0): + """ + Transform a lowpass filter prototype to a bandpass filter. + + Return an analog band-pass filter with center frequency `wo` and + bandwidth `bw` from an analog low-pass filter prototype with unity + cutoff frequency, in transfer function ('ba') representation. + + See Also + -------- + lp2lp, lp2hp, lp2bs, bilinear + lp2bp_zpk + + """ + a, b = map(atleast_1d, (a, b)) + D = len(a) - 1 + N = len(b) - 1 + artype = mintypecode((a, b)) + ma = max([N, D]) + Np = N + ma + Dp = D + ma + bprime = numpy.zeros(Np + 1, artype) + aprime = numpy.zeros(Dp + 1, artype) + wosq = wo * wo + for j in range(Np + 1): + val = 0.0 + for i in range(0, N + 1): + for k in range(0, i + 1): + if ma - i + 2 * k == j: + val += comb(i, k) * b[N - i] * (wosq) ** (i - k) / bw ** i + bprime[Np - j] = val + for j in range(Dp + 1): + val = 0.0 + for i in range(0, D + 1): + for k in range(0, i + 1): + if ma - i + 2 * k == j: + val += comb(i, k) * a[D - i] * (wosq) ** (i - k) / bw ** i + aprime[Dp - j] = val + + return normalize(bprime, aprime) + + +def lp2bs(b, a, wo=1.0, bw=1.0): + """ + Transform a lowpass filter prototype to a bandstop filter. + + Return an analog band-stop filter with center frequency `wo` and + bandwidth `bw` from an analog low-pass filter prototype with unity + cutoff frequency, in transfer function ('ba') representation. + + See Also + -------- + lp2lp, lp2hp, lp2bp, bilinear + lp2bs_zpk + + """ + a, b = map(atleast_1d, (a, b)) + D = len(a) - 1 + N = len(b) - 1 + artype = mintypecode((a, b)) + M = max([N, D]) + Np = M + M + Dp = M + M + bprime = numpy.zeros(Np + 1, artype) + aprime = numpy.zeros(Dp + 1, artype) + wosq = wo * wo + for j in range(Np + 1): + val = 0.0 + for i in range(0, N + 1): + for k in range(0, M - i + 1): + if i + 2 * k == j: + val += (comb(M - i, k) * b[N - i] * + (wosq) ** (M - i - k) * bw ** i) + bprime[Np - j] = val + for j in range(Dp + 1): + val = 0.0 + for i in range(0, D + 1): + for k in range(0, M - i + 1): + if i + 2 * k == j: + val += (comb(M - i, k) * a[D - i] * + (wosq) ** (M - i - k) * bw ** i) + aprime[Dp - j] = val + + return normalize(bprime, aprime) + + +def bilinear(b, a, fs=1.0): + """Return a digital filter from an analog one using a bilinear transform. + + The bilinear transform substitutes ``(z-1) / (z+1)`` for ``s``. + + See Also + -------- + lp2lp, lp2hp, lp2bp, lp2bs + bilinear_zpk + + """ + fs = float(fs) + a, b = map(atleast_1d, (a, b)) + D = len(a) - 1 + N = len(b) - 1 + artype = float + M = max([N, D]) + Np = M + Dp = M + bprime = numpy.zeros(Np + 1, artype) + aprime = numpy.zeros(Dp + 1, artype) + for j in range(Np + 1): + val = 0.0 + for i in range(N + 1): + for k in range(i + 1): + for l in range(M - i + 1): + if k + l == j: + val += (comb(i, k) * comb(M - i, l) * b[N - i] * + pow(2 * fs, i) * (-1) ** k) + bprime[j] = real(val) + for j in range(Dp + 1): + val = 0.0 + for i in range(D + 1): + for k in range(i + 1): + for l in range(M - i + 1): + if k + l == j: + val += (comb(i, k) * comb(M - i, l) * a[D - i] * + pow(2 * fs, i) * (-1) ** k) + aprime[j] = real(val) + + return normalize(bprime, aprime) + + +def iirdesign(wp, ws, gpass, gstop, analog=False, ftype='ellip', output='ba', + fs=None): + """Complete IIR digital and analog filter design. + + Given passband and stopband frequencies and gains, construct an analog or + digital IIR filter of minimum order for a given basic type. Return the + output in numerator, denominator ('ba'), pole-zero ('zpk') or second order + sections ('sos') form. + + Parameters + ---------- + wp, ws : float + Passband and stopband edge frequencies. + For digital filters, these are in the same units as `fs`. By default, + `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, + where 1 is the Nyquist frequency. For example: + + - Lowpass: wp = 0.2, ws = 0.3 + - Highpass: wp = 0.3, ws = 0.2 + - Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6] + - Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5] + + For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s). + gpass : float + The maximum loss in the passband (dB). + gstop : float + The minimum attenuation in the stopband (dB). + analog : bool, optional + When True, return an analog filter, otherwise a digital filter is + returned. + ftype : str, optional + The type of IIR filter to design: + + - Butterworth : 'butter' + - Chebyshev I : 'cheby1' + - Chebyshev II : 'cheby2' + - Cauer/elliptic: 'ellip' + - Bessel/Thomson: 'bessel' + + output : {'ba', 'zpk', 'sos'}, optional + Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or + second-order sections ('sos'). Default is 'ba'. + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0 + + Returns + ------- + b, a : ndarray, ndarray + Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. + Only returned if ``output='ba'``. + z, p, k : ndarray, ndarray, float + Zeros, poles, and system gain of the IIR filter transfer + function. Only returned if ``output='zpk'``. + sos : ndarray + Second-order sections representation of the IIR filter. + Only returned if ``output=='sos'``. + + See Also + -------- + butter : Filter design using order and critical points + cheby1, cheby2, ellip, bessel + buttord : Find order and critical points from passband and stopband spec + cheb1ord, cheb2ord, ellipord + iirfilter : General filter design using order and critical frequencies + + Notes + ----- + The ``'sos'`` output parameter was added in 0.16.0. + """ + try: + ordfunc = filter_dict[ftype][1] + except KeyError: + raise ValueError("Invalid IIR filter type: %s" % ftype) + except IndexError: + raise ValueError(("%s does not have order selection. Use " + "iirfilter function.") % ftype) + + wp = atleast_1d(wp) + ws = atleast_1d(ws) + + band_type = 2 * (len(wp) - 1) + band_type += 1 + if wp[0] >= ws[0]: + band_type += 1 + + btype = {1: 'lowpass', 2: 'highpass', + 3: 'bandstop', 4: 'bandpass'}[band_type] + + N, Wn = ordfunc(wp, ws, gpass, gstop, analog=analog, fs=fs) + return iirfilter(N, Wn, rp=gpass, rs=gstop, analog=analog, btype=btype, + ftype=ftype, output=output, fs=fs) + + +def iirfilter(N, Wn, rp=None, rs=None, btype='band', analog=False, + ftype='butter', output='ba', fs=None): + """ + IIR digital and analog filter design given order and critical points. + + Design an Nth-order digital or analog filter and return the filter + coefficients. + + Parameters + ---------- + N : int + The order of the filter. + Wn : array_like + A scalar or length-2 sequence giving the critical frequencies. + + For digital filters, `Wn` are in the same units as `fs`. By default, + `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, + where 1 is the Nyquist frequency. (`Wn` is thus in + half-cycles / sample.) + + For analog filters, `Wn` is an angular frequency (e.g. rad/s). + rp : float, optional + For Chebyshev and elliptic filters, provides the maximum ripple + in the passband. (dB) + rs : float, optional + For Chebyshev and elliptic filters, provides the minimum attenuation + in the stop band. (dB) + btype : {'bandpass', 'lowpass', 'highpass', 'bandstop'}, optional + The type of filter. Default is 'bandpass'. + analog : bool, optional + When True, return an analog filter, otherwise a digital filter is + returned. + ftype : str, optional + The type of IIR filter to design: + + - Butterworth : 'butter' + - Chebyshev I : 'cheby1' + - Chebyshev II : 'cheby2' + - Cauer/elliptic: 'ellip' + - Bessel/Thomson: 'bessel' + + output : {'ba', 'zpk', 'sos'}, optional + Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or + second-order sections ('sos'). Default is 'ba'. + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0 + + Returns + ------- + b, a : ndarray, ndarray + Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. + Only returned if ``output='ba'``. + z, p, k : ndarray, ndarray, float + Zeros, poles, and system gain of the IIR filter transfer + function. Only returned if ``output='zpk'``. + sos : ndarray + Second-order sections representation of the IIR filter. + Only returned if ``output=='sos'``. + + See Also + -------- + butter : Filter design using order and critical points + cheby1, cheby2, ellip, bessel + buttord : Find order and critical points from passband and stopband spec + cheb1ord, cheb2ord, ellipord + iirdesign : General filter design using passband and stopband spec + + Notes + ----- + The ``'sos'`` output parameter was added in 0.16.0. + + Examples + -------- + Generate a 17th-order Chebyshev II analog bandpass filter from 50 Hz to + 200 Hz and plot the frequency response: + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> b, a = signal.iirfilter(17, [2*np.pi*50, 2*np.pi*200], rs=60, + ... btype='band', analog=True, ftype='cheby2') + >>> w, h = signal.freqs(b, a, 1000) + >>> fig = plt.figure() + >>> ax = fig.add_subplot(1, 1, 1) + >>> ax.semilogx(w / (2*np.pi), 20 * np.log10(abs(h))) + >>> ax.set_title('Chebyshev Type II bandpass frequency response') + >>> ax.set_xlabel('Frequency [Hz]') + >>> ax.set_ylabel('Amplitude [dB]') + >>> ax.axis((10, 1000, -100, 10)) + >>> ax.grid(which='both', axis='both') + >>> plt.show() + + Create a digital filter with the same properties, in a system with + sampling rate of 2000 Hz, and plot the frequency response. (Second-order + sections implementation is required to ensure stability of a filter of + this order): + + >>> sos = signal.iirfilter(17, [50, 200], rs=60, btype='band', + ... analog=False, ftype='cheby2', fs=2000, + ... output='sos') + >>> w, h = signal.sosfreqz(sos, 2000, fs=2000) + >>> fig = plt.figure() + >>> ax = fig.add_subplot(1, 1, 1) + >>> ax.semilogx(w, 20 * np.log10(abs(h))) + >>> ax.set_title('Chebyshev Type II bandpass frequency response') + >>> ax.set_xlabel('Frequency [Hz]') + >>> ax.set_ylabel('Amplitude [dB]') + >>> ax.axis((10, 1000, -100, 10)) + >>> ax.grid(which='both', axis='both') + >>> plt.show() + + """ + ftype, btype, output = [x.lower() for x in (ftype, btype, output)] + Wn = asarray(Wn) + if fs is not None: + if analog: + raise ValueError("fs cannot be specified for an analog filter") + Wn = 2*Wn/fs + + try: + btype = band_dict[btype] + except KeyError: + raise ValueError("'%s' is an invalid bandtype for filter." % btype) + + try: + typefunc = filter_dict[ftype][0] + except KeyError: + raise ValueError("'%s' is not a valid basic IIR filter." % ftype) + + if output not in ['ba', 'zpk', 'sos']: + raise ValueError("'%s' is not a valid output form." % output) + + if rp is not None and rp < 0: + raise ValueError("passband ripple (rp) must be positive") + + if rs is not None and rs < 0: + raise ValueError("stopband attenuation (rs) must be positive") + + # Get analog lowpass prototype + if typefunc == buttap: + z, p, k = typefunc(N) + elif typefunc == besselap: + z, p, k = typefunc(N, norm=bessel_norms[ftype]) + elif typefunc == cheb1ap: + if rp is None: + raise ValueError("passband ripple (rp) must be provided to " + "design a Chebyshev I filter.") + z, p, k = typefunc(N, rp) + elif typefunc == cheb2ap: + if rs is None: + raise ValueError("stopband attenuation (rs) must be provided to " + "design an Chebyshev II filter.") + z, p, k = typefunc(N, rs) + elif typefunc == ellipap: + if rs is None or rp is None: + raise ValueError("Both rp and rs must be provided to design an " + "elliptic filter.") + z, p, k = typefunc(N, rp, rs) + else: + raise NotImplementedError("'%s' not implemented in iirfilter." % ftype) + + # Pre-warp frequencies for digital filter design + if not analog: + if numpy.any(Wn <= 0) or numpy.any(Wn >= 1): + raise ValueError("Digital filter critical frequencies " + "must be 0 < Wn < 1") + fs = 2.0 + warped = 2 * fs * tan(pi * Wn / fs) + else: + warped = Wn + + # transform to lowpass, bandpass, highpass, or bandstop + if btype in ('lowpass', 'highpass'): + if numpy.size(Wn) != 1: + raise ValueError('Must specify a single critical frequency Wn') + + if btype == 'lowpass': + z, p, k = lp2lp_zpk(z, p, k, wo=warped) + elif btype == 'highpass': + z, p, k = lp2hp_zpk(z, p, k, wo=warped) + elif btype in ('bandpass', 'bandstop'): + try: + bw = warped[1] - warped[0] + wo = sqrt(warped[0] * warped[1]) + except IndexError: + raise ValueError('Wn must specify start and stop frequencies') + + if btype == 'bandpass': + z, p, k = lp2bp_zpk(z, p, k, wo=wo, bw=bw) + elif btype == 'bandstop': + z, p, k = lp2bs_zpk(z, p, k, wo=wo, bw=bw) + else: + raise NotImplementedError("'%s' not implemented in iirfilter." % btype) + + # Find discrete equivalent if necessary + if not analog: + z, p, k = bilinear_zpk(z, p, k, fs=fs) + + # Transform to proper out type (pole-zero, state-space, numer-denom) + if output == 'zpk': + return z, p, k + elif output == 'ba': + return zpk2tf(z, p, k) + elif output == 'sos': + return zpk2sos(z, p, k) + + +def _relative_degree(z, p): + """ + Return relative degree of transfer function from zeros and poles + """ + degree = len(p) - len(z) + if degree < 0: + raise ValueError("Improper transfer function. " + "Must have at least as many poles as zeros.") + else: + return degree + + +def bilinear_zpk(z, p, k, fs): + """ + Return a digital IIR filter from an analog one using a bilinear transform. + + Transform a set of poles and zeros from the analog s-plane to the digital + z-plane using Tustin's method, which substitutes ``(z-1) / (z+1)`` for + ``s``, maintaining the shape of the frequency response. + + Parameters + ---------- + z : array_like + Zeros of the analog filter transfer function. + p : array_like + Poles of the analog filter transfer function. + k : float + System gain of the analog filter transfer function. + fs : float + Sample rate, as ordinary frequency (e.g. hertz). No prewarping is + done in this function. + + Returns + ------- + z : ndarray + Zeros of the transformed digital filter transfer function. + p : ndarray + Poles of the transformed digital filter transfer function. + k : float + System gain of the transformed digital filter. + + See Also + -------- + lp2lp_zpk, lp2hp_zpk, lp2bp_zpk, lp2bs_zpk + bilinear + + Notes + ----- + .. versionadded:: 1.1.0 + + """ + z = atleast_1d(z) + p = atleast_1d(p) + + degree = _relative_degree(z, p) + + fs2 = 2.0*fs + + # Bilinear transform the poles and zeros + z_z = (fs2 + z) / (fs2 - z) + p_z = (fs2 + p) / (fs2 - p) + + # Any zeros that were at infinity get moved to the Nyquist frequency + z_z = append(z_z, -ones(degree)) + + # Compensate for gain change + k_z = k * real(prod(fs2 - z) / prod(fs2 - p)) + + return z_z, p_z, k_z + + +def lp2lp_zpk(z, p, k, wo=1.0): + r""" + Transform a lowpass filter prototype to a different frequency. + + Return an analog low-pass filter with cutoff frequency `wo` + from an analog low-pass filter prototype with unity cutoff frequency, + using zeros, poles, and gain ('zpk') representation. + + Parameters + ---------- + z : array_like + Zeros of the analog filter transfer function. + p : array_like + Poles of the analog filter transfer function. + k : float + System gain of the analog filter transfer function. + wo : float + Desired cutoff, as angular frequency (e.g. rad/s). + Defaults to no change. + + Returns + ------- + z : ndarray + Zeros of the transformed low-pass filter transfer function. + p : ndarray + Poles of the transformed low-pass filter transfer function. + k : float + System gain of the transformed low-pass filter. + + See Also + -------- + lp2hp_zpk, lp2bp_zpk, lp2bs_zpk, bilinear + lp2lp + + Notes + ----- + This is derived from the s-plane substitution + + .. math:: s \rightarrow \frac{s}{\omega_0} + + .. versionadded:: 1.1.0 + + """ + z = atleast_1d(z) + p = atleast_1d(p) + wo = float(wo) # Avoid int wraparound + + degree = _relative_degree(z, p) + + # Scale all points radially from origin to shift cutoff frequency + z_lp = wo * z + p_lp = wo * p + + # Each shifted pole decreases gain by wo, each shifted zero increases it. + # Cancel out the net change to keep overall gain the same + k_lp = k * wo**degree + + return z_lp, p_lp, k_lp + + +def lp2hp_zpk(z, p, k, wo=1.0): + r""" + Transform a lowpass filter prototype to a highpass filter. + + Return an analog high-pass filter with cutoff frequency `wo` + from an analog low-pass filter prototype with unity cutoff frequency, + using zeros, poles, and gain ('zpk') representation. + + Parameters + ---------- + z : array_like + Zeros of the analog filter transfer function. + p : array_like + Poles of the analog filter transfer function. + k : float + System gain of the analog filter transfer function. + wo : float + Desired cutoff, as angular frequency (e.g. rad/s). + Defaults to no change. + + Returns + ------- + z : ndarray + Zeros of the transformed high-pass filter transfer function. + p : ndarray + Poles of the transformed high-pass filter transfer function. + k : float + System gain of the transformed high-pass filter. + + See Also + -------- + lp2lp_zpk, lp2bp_zpk, lp2bs_zpk, bilinear + lp2hp + + Notes + ----- + This is derived from the s-plane substitution + + .. math:: s \rightarrow \frac{\omega_0}{s} + + This maintains symmetry of the lowpass and highpass responses on a + logarithmic scale. + + .. versionadded:: 1.1.0 + + """ + z = atleast_1d(z) + p = atleast_1d(p) + wo = float(wo) + + degree = _relative_degree(z, p) + + # Invert positions radially about unit circle to convert LPF to HPF + # Scale all points radially from origin to shift cutoff frequency + z_hp = wo / z + p_hp = wo / p + + # If lowpass had zeros at infinity, inverting moves them to origin. + z_hp = append(z_hp, zeros(degree)) + + # Cancel out gain change caused by inversion + k_hp = k * real(prod(-z) / prod(-p)) + + return z_hp, p_hp, k_hp + + +def lp2bp_zpk(z, p, k, wo=1.0, bw=1.0): + r""" + Transform a lowpass filter prototype to a bandpass filter. + + Return an analog band-pass filter with center frequency `wo` and + bandwidth `bw` from an analog low-pass filter prototype with unity + cutoff frequency, using zeros, poles, and gain ('zpk') representation. + + Parameters + ---------- + z : array_like + Zeros of the analog filter transfer function. + p : array_like + Poles of the analog filter transfer function. + k : float + System gain of the analog filter transfer function. + wo : float + Desired passband center, as angular frequency (e.g. rad/s). + Defaults to no change. + bw : float + Desired passband width, as angular frequency (e.g. rad/s). + Defaults to 1. + + Returns + ------- + z : ndarray + Zeros of the transformed band-pass filter transfer function. + p : ndarray + Poles of the transformed band-pass filter transfer function. + k : float + System gain of the transformed band-pass filter. + + See Also + -------- + lp2lp_zpk, lp2hp_zpk, lp2bs_zpk, bilinear + lp2bp + + Notes + ----- + This is derived from the s-plane substitution + + .. math:: s \rightarrow \frac{s^2 + {\omega_0}^2}{s \cdot \mathrm{BW}} + + This is the "wideband" transformation, producing a passband with + geometric (log frequency) symmetry about `wo`. + + .. versionadded:: 1.1.0 + + """ + z = atleast_1d(z) + p = atleast_1d(p) + wo = float(wo) + bw = float(bw) + + degree = _relative_degree(z, p) + + # Scale poles and zeros to desired bandwidth + z_lp = z * bw/2 + p_lp = p * bw/2 + + # Square root needs to produce complex result, not NaN + z_lp = z_lp.astype(complex) + p_lp = p_lp.astype(complex) + + # Duplicate poles and zeros and shift from baseband to +wo and -wo + z_bp = concatenate((z_lp + sqrt(z_lp**2 - wo**2), + z_lp - sqrt(z_lp**2 - wo**2))) + p_bp = concatenate((p_lp + sqrt(p_lp**2 - wo**2), + p_lp - sqrt(p_lp**2 - wo**2))) + + # Move degree zeros to origin, leaving degree zeros at infinity for BPF + z_bp = append(z_bp, zeros(degree)) + + # Cancel out gain change from frequency scaling + k_bp = k * bw**degree + + return z_bp, p_bp, k_bp + + +def lp2bs_zpk(z, p, k, wo=1.0, bw=1.0): + r""" + Transform a lowpass filter prototype to a bandstop filter. + + Return an analog band-stop filter with center frequency `wo` and + stopband width `bw` from an analog low-pass filter prototype with unity + cutoff frequency, using zeros, poles, and gain ('zpk') representation. + + Parameters + ---------- + z : array_like + Zeros of the analog filter transfer function. + p : array_like + Poles of the analog filter transfer function. + k : float + System gain of the analog filter transfer function. + wo : float + Desired stopband center, as angular frequency (e.g. rad/s). + Defaults to no change. + bw : float + Desired stopband width, as angular frequency (e.g. rad/s). + Defaults to 1. + + Returns + ------- + z : ndarray + Zeros of the transformed band-stop filter transfer function. + p : ndarray + Poles of the transformed band-stop filter transfer function. + k : float + System gain of the transformed band-stop filter. + + See Also + -------- + lp2lp_zpk, lp2hp_zpk, lp2bp_zpk, bilinear + lp2bs + + Notes + ----- + This is derived from the s-plane substitution + + .. math:: s \rightarrow \frac{s \cdot \mathrm{BW}}{s^2 + {\omega_0}^2} + + This is the "wideband" transformation, producing a stopband with + geometric (log frequency) symmetry about `wo`. + + .. versionadded:: 1.1.0 + + """ + z = atleast_1d(z) + p = atleast_1d(p) + wo = float(wo) + bw = float(bw) + + degree = _relative_degree(z, p) + + # Invert to a highpass filter with desired bandwidth + z_hp = (bw/2) / z + p_hp = (bw/2) / p + + # Square root needs to produce complex result, not NaN + z_hp = z_hp.astype(complex) + p_hp = p_hp.astype(complex) + + # Duplicate poles and zeros and shift from baseband to +wo and -wo + z_bs = concatenate((z_hp + sqrt(z_hp**2 - wo**2), + z_hp - sqrt(z_hp**2 - wo**2))) + p_bs = concatenate((p_hp + sqrt(p_hp**2 - wo**2), + p_hp - sqrt(p_hp**2 - wo**2))) + + # Move any zeros that were at infinity to the center of the stopband + z_bs = append(z_bs, +1j*wo * ones(degree)) + z_bs = append(z_bs, -1j*wo * ones(degree)) + + # Cancel out gain change caused by inversion + k_bs = k * real(prod(-z) / prod(-p)) + + return z_bs, p_bs, k_bs + + +def butter(N, Wn, btype='low', analog=False, output='ba', fs=None): + """ + Butterworth digital and analog filter design. + + Design an Nth-order digital or analog Butterworth filter and return + the filter coefficients. + + Parameters + ---------- + N : int + The order of the filter. + Wn : array_like + A scalar or length-2 sequence giving the critical frequencies. + For a Butterworth filter, this is the point at which the gain + drops to 1/sqrt(2) that of the passband (the "-3 dB point"). + + For digital filters, `Wn` are in the same units as `fs`. By default, + `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, + where 1 is the Nyquist frequency. (`Wn` is thus in + half-cycles / sample.) + + For analog filters, `Wn` is an angular frequency (e.g. rad/s). + btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional + The type of filter. Default is 'lowpass'. + analog : bool, optional + When True, return an analog filter, otherwise a digital filter is + returned. + output : {'ba', 'zpk', 'sos'}, optional + Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or + second-order sections ('sos'). Default is 'ba'. + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0 + + Returns + ------- + b, a : ndarray, ndarray + Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. + Only returned if ``output='ba'``. + z, p, k : ndarray, ndarray, float + Zeros, poles, and system gain of the IIR filter transfer + function. Only returned if ``output='zpk'``. + sos : ndarray + Second-order sections representation of the IIR filter. + Only returned if ``output=='sos'``. + + See Also + -------- + buttord, buttap + + Notes + ----- + The Butterworth filter has maximally flat frequency response in the + passband. + + The ``'sos'`` output parameter was added in 0.16.0. + + Examples + -------- + Design an analog filter and plot its frequency response, showing the + critical points: + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> b, a = signal.butter(4, 100, 'low', analog=True) + >>> w, h = signal.freqs(b, a) + >>> plt.semilogx(w, 20 * np.log10(abs(h))) + >>> plt.title('Butterworth filter frequency response') + >>> plt.xlabel('Frequency [radians / second]') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.margins(0, 0.1) + >>> plt.grid(which='both', axis='both') + >>> plt.axvline(100, color='green') # cutoff frequency + >>> plt.show() + + Generate a signal made up of 10 Hz and 20 Hz, sampled at 1 kHz + + >>> t = np.linspace(0, 1, 1000, False) # 1 second + >>> sig = np.sin(2*np.pi*10*t) + np.sin(2*np.pi*20*t) + >>> fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True) + >>> ax1.plot(t, sig) + >>> ax1.set_title('10 Hz and 20 Hz sinusoids') + >>> ax1.axis([0, 1, -2, 2]) + + Design a digital high-pass filter at 15 Hz to remove the 10 Hz tone, and + apply it to the signal. (It's recommended to use second-order sections + format when filtering, to avoid numerical error with transfer function + (``ba``) format): + + >>> sos = signal.butter(10, 15, 'hp', fs=1000, output='sos') + >>> filtered = signal.sosfilt(sos, sig) + >>> ax2.plot(t, filtered) + >>> ax2.set_title('After 15 Hz high-pass filter') + >>> ax2.axis([0, 1, -2, 2]) + >>> ax2.set_xlabel('Time [seconds]') + >>> plt.tight_layout() + >>> plt.show() + """ + return iirfilter(N, Wn, btype=btype, analog=analog, + output=output, ftype='butter', fs=fs) + + +def cheby1(N, rp, Wn, btype='low', analog=False, output='ba', fs=None): + """ + Chebyshev type I digital and analog filter design. + + Design an Nth-order digital or analog Chebyshev type I filter and + return the filter coefficients. + + Parameters + ---------- + N : int + The order of the filter. + rp : float + The maximum ripple allowed below unity gain in the passband. + Specified in decibels, as a positive number. + Wn : array_like + A scalar or length-2 sequence giving the critical frequencies. + For Type I filters, this is the point in the transition band at which + the gain first drops below -`rp`. + + For digital filters, `Wn` are in the same units as `fs`. By default, + `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, + where 1 is the Nyquist frequency. (`Wn` is thus in + half-cycles / sample.) + + For analog filters, `Wn` is an angular frequency (e.g. rad/s). + btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional + The type of filter. Default is 'lowpass'. + analog : bool, optional + When True, return an analog filter, otherwise a digital filter is + returned. + output : {'ba', 'zpk', 'sos'}, optional + Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or + second-order sections ('sos'). Default is 'ba'. + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0 + + Returns + ------- + b, a : ndarray, ndarray + Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. + Only returned if ``output='ba'``. + z, p, k : ndarray, ndarray, float + Zeros, poles, and system gain of the IIR filter transfer + function. Only returned if ``output='zpk'``. + sos : ndarray + Second-order sections representation of the IIR filter. + Only returned if ``output=='sos'``. + + See Also + -------- + cheb1ord, cheb1ap + + Notes + ----- + The Chebyshev type I filter maximizes the rate of cutoff between the + frequency response's passband and stopband, at the expense of ripple in + the passband and increased ringing in the step response. + + Type I filters roll off faster than Type II (`cheby2`), but Type II + filters do not have any ripple in the passband. + + The equiripple passband has N maxima or minima (for example, a + 5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is + unity for odd-order filters, or -rp dB for even-order filters. + + The ``'sos'`` output parameter was added in 0.16.0. + + Examples + -------- + Design an analog filter and plot its frequency response, showing the + critical points: + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> b, a = signal.cheby1(4, 5, 100, 'low', analog=True) + >>> w, h = signal.freqs(b, a) + >>> plt.semilogx(w, 20 * np.log10(abs(h))) + >>> plt.title('Chebyshev Type I frequency response (rp=5)') + >>> plt.xlabel('Frequency [radians / second]') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.margins(0, 0.1) + >>> plt.grid(which='both', axis='both') + >>> plt.axvline(100, color='green') # cutoff frequency + >>> plt.axhline(-5, color='green') # rp + >>> plt.show() + + Generate a signal made up of 10 Hz and 20 Hz, sampled at 1 kHz + + >>> t = np.linspace(0, 1, 1000, False) # 1 second + >>> sig = np.sin(2*np.pi*10*t) + np.sin(2*np.pi*20*t) + >>> fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True) + >>> ax1.plot(t, sig) + >>> ax1.set_title('10 Hz and 20 Hz sinusoids') + >>> ax1.axis([0, 1, -2, 2]) + + Design a digital high-pass filter at 15 Hz to remove the 10 Hz tone, and + apply it to the signal. (It's recommended to use second-order sections + format when filtering, to avoid numerical error with transfer function + (``ba``) format): + + >>> sos = signal.cheby1(10, 1, 15, 'hp', fs=1000, output='sos') + >>> filtered = signal.sosfilt(sos, sig) + >>> ax2.plot(t, filtered) + >>> ax2.set_title('After 15 Hz high-pass filter') + >>> ax2.axis([0, 1, -2, 2]) + >>> ax2.set_xlabel('Time [seconds]') + >>> plt.tight_layout() + >>> plt.show() + """ + return iirfilter(N, Wn, rp=rp, btype=btype, analog=analog, + output=output, ftype='cheby1', fs=fs) + + +def cheby2(N, rs, Wn, btype='low', analog=False, output='ba', fs=None): + """ + Chebyshev type II digital and analog filter design. + + Design an Nth-order digital or analog Chebyshev type II filter and + return the filter coefficients. + + Parameters + ---------- + N : int + The order of the filter. + rs : float + The minimum attenuation required in the stop band. + Specified in decibels, as a positive number. + Wn : array_like + A scalar or length-2 sequence giving the critical frequencies. + For Type II filters, this is the point in the transition band at which + the gain first reaches -`rs`. + + For digital filters, `Wn` are in the same units as `fs`. By default, + `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, + where 1 is the Nyquist frequency. (`Wn` is thus in + half-cycles / sample.) + + For analog filters, `Wn` is an angular frequency (e.g. rad/s). + btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional + The type of filter. Default is 'lowpass'. + analog : bool, optional + When True, return an analog filter, otherwise a digital filter is + returned. + output : {'ba', 'zpk', 'sos'}, optional + Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or + second-order sections ('sos'). Default is 'ba'. + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0 + + Returns + ------- + b, a : ndarray, ndarray + Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. + Only returned if ``output='ba'``. + z, p, k : ndarray, ndarray, float + Zeros, poles, and system gain of the IIR filter transfer + function. Only returned if ``output='zpk'``. + sos : ndarray + Second-order sections representation of the IIR filter. + Only returned if ``output=='sos'``. + + See Also + -------- + cheb2ord, cheb2ap + + Notes + ----- + The Chebyshev type II filter maximizes the rate of cutoff between the + frequency response's passband and stopband, at the expense of ripple in + the stopband and increased ringing in the step response. + + Type II filters do not roll off as fast as Type I (`cheby1`). + + The ``'sos'`` output parameter was added in 0.16.0. + + Examples + -------- + Design an analog filter and plot its frequency response, showing the + critical points: + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> b, a = signal.cheby2(4, 40, 100, 'low', analog=True) + >>> w, h = signal.freqs(b, a) + >>> plt.semilogx(w, 20 * np.log10(abs(h))) + >>> plt.title('Chebyshev Type II frequency response (rs=40)') + >>> plt.xlabel('Frequency [radians / second]') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.margins(0, 0.1) + >>> plt.grid(which='both', axis='both') + >>> plt.axvline(100, color='green') # cutoff frequency + >>> plt.axhline(-40, color='green') # rs + >>> plt.show() + + Generate a signal made up of 10 Hz and 20 Hz, sampled at 1 kHz + + >>> t = np.linspace(0, 1, 1000, False) # 1 second + >>> sig = np.sin(2*np.pi*10*t) + np.sin(2*np.pi*20*t) + >>> fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True) + >>> ax1.plot(t, sig) + >>> ax1.set_title('10 Hz and 20 Hz sinusoids') + >>> ax1.axis([0, 1, -2, 2]) + + Design a digital high-pass filter at 17 Hz to remove the 10 Hz tone, and + apply it to the signal. (It's recommended to use second-order sections + format when filtering, to avoid numerical error with transfer function + (``ba``) format): + + >>> sos = signal.cheby2(12, 20, 17, 'hp', fs=1000, output='sos') + >>> filtered = signal.sosfilt(sos, sig) + >>> ax2.plot(t, filtered) + >>> ax2.set_title('After 17 Hz high-pass filter') + >>> ax2.axis([0, 1, -2, 2]) + >>> ax2.set_xlabel('Time [seconds]') + >>> plt.show() + """ + return iirfilter(N, Wn, rs=rs, btype=btype, analog=analog, + output=output, ftype='cheby2', fs=fs) + + +def ellip(N, rp, rs, Wn, btype='low', analog=False, output='ba', fs=None): + """ + Elliptic (Cauer) digital and analog filter design. + + Design an Nth-order digital or analog elliptic filter and return + the filter coefficients. + + Parameters + ---------- + N : int + The order of the filter. + rp : float + The maximum ripple allowed below unity gain in the passband. + Specified in decibels, as a positive number. + rs : float + The minimum attenuation required in the stop band. + Specified in decibels, as a positive number. + Wn : array_like + A scalar or length-2 sequence giving the critical frequencies. + For elliptic filters, this is the point in the transition band at + which the gain first drops below -`rp`. + + For digital filters, `Wn` are in the same units as `fs`. By default, + `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, + where 1 is the Nyquist frequency. (`Wn` is thus in + half-cycles / sample.) + + For analog filters, `Wn` is an angular frequency (e.g. rad/s). + btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional + The type of filter. Default is 'lowpass'. + analog : bool, optional + When True, return an analog filter, otherwise a digital filter is + returned. + output : {'ba', 'zpk', 'sos'}, optional + Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or + second-order sections ('sos'). Default is 'ba'. + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0 + + Returns + ------- + b, a : ndarray, ndarray + Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. + Only returned if ``output='ba'``. + z, p, k : ndarray, ndarray, float + Zeros, poles, and system gain of the IIR filter transfer + function. Only returned if ``output='zpk'``. + sos : ndarray + Second-order sections representation of the IIR filter. + Only returned if ``output=='sos'``. + + See Also + -------- + ellipord, ellipap + + Notes + ----- + Also known as Cauer or Zolotarev filters, the elliptical filter maximizes + the rate of transition between the frequency response's passband and + stopband, at the expense of ripple in both, and increased ringing in the + step response. + + As `rp` approaches 0, the elliptical filter becomes a Chebyshev + type II filter (`cheby2`). As `rs` approaches 0, it becomes a Chebyshev + type I filter (`cheby1`). As both approach 0, it becomes a Butterworth + filter (`butter`). + + The equiripple passband has N maxima or minima (for example, a + 5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is + unity for odd-order filters, or -rp dB for even-order filters. + + The ``'sos'`` output parameter was added in 0.16.0. + + Examples + -------- + Design an analog filter and plot its frequency response, showing the + critical points: + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> b, a = signal.ellip(4, 5, 40, 100, 'low', analog=True) + >>> w, h = signal.freqs(b, a) + >>> plt.semilogx(w, 20 * np.log10(abs(h))) + >>> plt.title('Elliptic filter frequency response (rp=5, rs=40)') + >>> plt.xlabel('Frequency [radians / second]') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.margins(0, 0.1) + >>> plt.grid(which='both', axis='both') + >>> plt.axvline(100, color='green') # cutoff frequency + >>> plt.axhline(-40, color='green') # rs + >>> plt.axhline(-5, color='green') # rp + >>> plt.show() + + Generate a signal made up of 10 Hz and 20 Hz, sampled at 1 kHz + + >>> t = np.linspace(0, 1, 1000, False) # 1 second + >>> sig = np.sin(2*np.pi*10*t) + np.sin(2*np.pi*20*t) + >>> fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True) + >>> ax1.plot(t, sig) + >>> ax1.set_title('10 Hz and 20 Hz sinusoids') + >>> ax1.axis([0, 1, -2, 2]) + + Design a digital high-pass filter at 17 Hz to remove the 10 Hz tone, and + apply it to the signal. (It's recommended to use second-order sections + format when filtering, to avoid numerical error with transfer function + (``ba``) format): + + >>> sos = signal.ellip(8, 1, 100, 17, 'hp', fs=1000, output='sos') + >>> filtered = signal.sosfilt(sos, sig) + >>> ax2.plot(t, filtered) + >>> ax2.set_title('After 17 Hz high-pass filter') + >>> ax2.axis([0, 1, -2, 2]) + >>> ax2.set_xlabel('Time [seconds]') + >>> plt.tight_layout() + >>> plt.show() + """ + return iirfilter(N, Wn, rs=rs, rp=rp, btype=btype, analog=analog, + output=output, ftype='elliptic', fs=fs) + + +def bessel(N, Wn, btype='low', analog=False, output='ba', norm='phase', + fs=None): + """ + Bessel/Thomson digital and analog filter design. + + Design an Nth-order digital or analog Bessel filter and return the + filter coefficients. + + Parameters + ---------- + N : int + The order of the filter. + Wn : array_like + A scalar or length-2 sequence giving the critical frequencies (defined + by the `norm` parameter). + For analog filters, `Wn` is an angular frequency (e.g. rad/s). + + For digital filters, `Wn` are in the same units as `fs`. By default, + `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, + where 1 is the Nyquist frequency. (`Wn` is thus in + half-cycles / sample.) + btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional + The type of filter. Default is 'lowpass'. + analog : bool, optional + When True, return an analog filter, otherwise a digital filter is + returned. (See Notes.) + output : {'ba', 'zpk', 'sos'}, optional + Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or + second-order sections ('sos'). Default is 'ba'. + norm : {'phase', 'delay', 'mag'}, optional + Critical frequency normalization: + + ``phase`` + The filter is normalized such that the phase response reaches its + midpoint at angular (e.g. rad/s) frequency `Wn`. This happens for + both low-pass and high-pass filters, so this is the + "phase-matched" case. + + The magnitude response asymptotes are the same as a Butterworth + filter of the same order with a cutoff of `Wn`. + + This is the default, and matches MATLAB's implementation. + + ``delay`` + The filter is normalized such that the group delay in the passband + is 1/`Wn` (e.g. seconds). This is the "natural" type obtained by + solving Bessel polynomials. + + ``mag`` + The filter is normalized such that the gain magnitude is -3 dB at + angular frequency `Wn`. + + .. versionadded:: 0.18.0 + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0 + + Returns + ------- + b, a : ndarray, ndarray + Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. + Only returned if ``output='ba'``. + z, p, k : ndarray, ndarray, float + Zeros, poles, and system gain of the IIR filter transfer + function. Only returned if ``output='zpk'``. + sos : ndarray + Second-order sections representation of the IIR filter. + Only returned if ``output=='sos'``. + + Notes + ----- + Also known as a Thomson filter, the analog Bessel filter has maximally + flat group delay and maximally linear phase response, with very little + ringing in the step response. [1]_ + + The Bessel is inherently an analog filter. This function generates digital + Bessel filters using the bilinear transform, which does not preserve the + phase response of the analog filter. As such, it is only approximately + correct at frequencies below about fs/4. To get maximally-flat group + delay at higher frequencies, the analog Bessel filter must be transformed + using phase-preserving techniques. + + See `besselap` for implementation details and references. + + The ``'sos'`` output parameter was added in 0.16.0. + + Examples + -------- + Plot the phase-normalized frequency response, showing the relationship + to the Butterworth's cutoff frequency (green): + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> b, a = signal.butter(4, 100, 'low', analog=True) + >>> w, h = signal.freqs(b, a) + >>> plt.semilogx(w, 20 * np.log10(np.abs(h)), color='silver', ls='dashed') + >>> b, a = signal.bessel(4, 100, 'low', analog=True, norm='phase') + >>> w, h = signal.freqs(b, a) + >>> plt.semilogx(w, 20 * np.log10(np.abs(h))) + >>> plt.title('Bessel filter magnitude response (with Butterworth)') + >>> plt.xlabel('Frequency [radians / second]') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.margins(0, 0.1) + >>> plt.grid(which='both', axis='both') + >>> plt.axvline(100, color='green') # cutoff frequency + >>> plt.show() + + and the phase midpoint: + + >>> plt.figure() + >>> plt.semilogx(w, np.unwrap(np.angle(h))) + >>> plt.axvline(100, color='green') # cutoff frequency + >>> plt.axhline(-np.pi, color='red') # phase midpoint + >>> plt.title('Bessel filter phase response') + >>> plt.xlabel('Frequency [radians / second]') + >>> plt.ylabel('Phase [radians]') + >>> plt.margins(0, 0.1) + >>> plt.grid(which='both', axis='both') + >>> plt.show() + + Plot the magnitude-normalized frequency response, showing the -3 dB cutoff: + + >>> b, a = signal.bessel(3, 10, 'low', analog=True, norm='mag') + >>> w, h = signal.freqs(b, a) + >>> plt.semilogx(w, 20 * np.log10(np.abs(h))) + >>> plt.axhline(-3, color='red') # -3 dB magnitude + >>> plt.axvline(10, color='green') # cutoff frequency + >>> plt.title('Magnitude-normalized Bessel filter frequency response') + >>> plt.xlabel('Frequency [radians / second]') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.margins(0, 0.1) + >>> plt.grid(which='both', axis='both') + >>> plt.show() + + Plot the delay-normalized filter, showing the maximally-flat group delay + at 0.1 seconds: + + >>> b, a = signal.bessel(5, 1/0.1, 'low', analog=True, norm='delay') + >>> w, h = signal.freqs(b, a) + >>> plt.figure() + >>> plt.semilogx(w[1:], -np.diff(np.unwrap(np.angle(h)))/np.diff(w)) + >>> plt.axhline(0.1, color='red') # 0.1 seconds group delay + >>> plt.title('Bessel filter group delay') + >>> plt.xlabel('Frequency [radians / second]') + >>> plt.ylabel('Group delay [seconds]') + >>> plt.margins(0, 0.1) + >>> plt.grid(which='both', axis='both') + >>> plt.show() + + References + ---------- + .. [1] Thomson, W.E., "Delay Networks having Maximally Flat Frequency + Characteristics", Proceedings of the Institution of Electrical + Engineers, Part III, November 1949, Vol. 96, No. 44, pp. 487-490. + + """ + return iirfilter(N, Wn, btype=btype, analog=analog, + output=output, ftype='bessel_'+norm, fs=fs) + + +def maxflat(): + pass + + +def yulewalk(): + pass + + +def band_stop_obj(wp, ind, passb, stopb, gpass, gstop, type): + """ + Band Stop Objective Function for order minimization. + + Returns the non-integer order for an analog band stop filter. + + Parameters + ---------- + wp : scalar + Edge of passband `passb`. + ind : int, {0, 1} + Index specifying which `passb` edge to vary (0 or 1). + passb : ndarray + Two element sequence of fixed passband edges. + stopb : ndarray + Two element sequence of fixed stopband edges. + gstop : float + Amount of attenuation in stopband in dB. + gpass : float + Amount of ripple in the passband in dB. + type : {'butter', 'cheby', 'ellip'} + Type of filter. + + Returns + ------- + n : scalar + Filter order (possibly non-integer). + + """ + passbC = passb.copy() + passbC[ind] = wp + nat = (stopb * (passbC[0] - passbC[1]) / + (stopb ** 2 - passbC[0] * passbC[1])) + nat = min(abs(nat)) + + if type == 'butter': + GSTOP = 10 ** (0.1 * abs(gstop)) + GPASS = 10 ** (0.1 * abs(gpass)) + n = (log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat))) + elif type == 'cheby': + GSTOP = 10 ** (0.1 * abs(gstop)) + GPASS = 10 ** (0.1 * abs(gpass)) + n = arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) / arccosh(nat) + elif type == 'ellip': + GSTOP = 10 ** (0.1 * gstop) + GPASS = 10 ** (0.1 * gpass) + arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0)) + arg0 = 1.0 / nat + d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2]) + d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2]) + n = (d0[0] * d1[1] / (d0[1] * d1[0])) + else: + raise ValueError("Incorrect type: %s" % type) + return n + + +def buttord(wp, ws, gpass, gstop, analog=False, fs=None): + """Butterworth filter order selection. + + Return the order of the lowest order digital or analog Butterworth filter + that loses no more than `gpass` dB in the passband and has at least + `gstop` dB attenuation in the stopband. + + Parameters + ---------- + wp, ws : float + Passband and stopband edge frequencies. + + For digital filters, these are in the same units as `fs`. By default, + `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, + where 1 is the Nyquist frequency. (`wp` and `ws` are thus in + half-cycles / sample.) For example: + + - Lowpass: wp = 0.2, ws = 0.3 + - Highpass: wp = 0.3, ws = 0.2 + - Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6] + - Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5] + + For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s). + gpass : float + The maximum loss in the passband (dB). + gstop : float + The minimum attenuation in the stopband (dB). + analog : bool, optional + When True, return an analog filter, otherwise a digital filter is + returned. + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0 + + Returns + ------- + ord : int + The lowest order for a Butterworth filter which meets specs. + wn : ndarray or float + The Butterworth natural frequency (i.e. the "3dB frequency"). Should + be used with `butter` to give filter results. If `fs` is specified, + this is in the same units, and `fs` must also be passed to `butter`. + + See Also + -------- + butter : Filter design using order and critical points + cheb1ord : Find order and critical points from passband and stopband spec + cheb2ord, ellipord + iirfilter : General filter design using order and critical frequencies + iirdesign : General filter design using passband and stopband spec + + Examples + -------- + Design an analog bandpass filter with passband within 3 dB from 20 to + 50 rad/s, while rejecting at least -40 dB below 14 and above 60 rad/s. + Plot its frequency response, showing the passband and stopband + constraints in gray. + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> N, Wn = signal.buttord([20, 50], [14, 60], 3, 40, True) + >>> b, a = signal.butter(N, Wn, 'band', True) + >>> w, h = signal.freqs(b, a, np.logspace(1, 2, 500)) + >>> plt.semilogx(w, 20 * np.log10(abs(h))) + >>> plt.title('Butterworth bandpass filter fit to constraints') + >>> plt.xlabel('Frequency [radians / second]') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.grid(which='both', axis='both') + >>> plt.fill([1, 14, 14, 1], [-40, -40, 99, 99], '0.9', lw=0) # stop + >>> plt.fill([20, 20, 50, 50], [-99, -3, -3, -99], '0.9', lw=0) # pass + >>> plt.fill([60, 60, 1e9, 1e9], [99, -40, -40, 99], '0.9', lw=0) # stop + >>> plt.axis([10, 100, -60, 3]) + >>> plt.show() + + """ + wp = atleast_1d(wp) + ws = atleast_1d(ws) + if fs is not None: + if analog: + raise ValueError("fs cannot be specified for an analog filter") + wp = 2*wp/fs + ws = 2*ws/fs + + filter_type = 2 * (len(wp) - 1) + filter_type += 1 + if wp[0] >= ws[0]: + filter_type += 1 + + # Pre-warp frequencies for digital filter design + if not analog: + passb = tan(pi * wp / 2.0) + stopb = tan(pi * ws / 2.0) + else: + passb = wp * 1.0 + stopb = ws * 1.0 + + if filter_type == 1: # low + nat = stopb / passb + elif filter_type == 2: # high + nat = passb / stopb + elif filter_type == 3: # stop + wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12, + args=(0, passb, stopb, gpass, gstop, + 'butter'), + disp=0) + passb[0] = wp0 + wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1], + args=(1, passb, stopb, gpass, gstop, + 'butter'), + disp=0) + passb[1] = wp1 + nat = ((stopb * (passb[0] - passb[1])) / + (stopb ** 2 - passb[0] * passb[1])) + elif filter_type == 4: # pass + nat = ((stopb ** 2 - passb[0] * passb[1]) / + (stopb * (passb[0] - passb[1]))) + + nat = min(abs(nat)) + + GSTOP = 10 ** (0.1 * abs(gstop)) + GPASS = 10 ** (0.1 * abs(gpass)) + ord = int(ceil(log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat)))) + + # Find the Butterworth natural frequency WN (or the "3dB" frequency") + # to give exactly gpass at passb. + try: + W0 = (GPASS - 1.0) ** (-1.0 / (2.0 * ord)) + except ZeroDivisionError: + W0 = 1.0 + print("Warning, order is zero...check input parameters.") + + # now convert this frequency back from lowpass prototype + # to the original analog filter + + if filter_type == 1: # low + WN = W0 * passb + elif filter_type == 2: # high + WN = passb / W0 + elif filter_type == 3: # stop + WN = numpy.zeros(2, float) + discr = sqrt((passb[1] - passb[0]) ** 2 + + 4 * W0 ** 2 * passb[0] * passb[1]) + WN[0] = ((passb[1] - passb[0]) + discr) / (2 * W0) + WN[1] = ((passb[1] - passb[0]) - discr) / (2 * W0) + WN = numpy.sort(abs(WN)) + elif filter_type == 4: # pass + W0 = numpy.array([-W0, W0], float) + WN = (-W0 * (passb[1] - passb[0]) / 2.0 + + sqrt(W0 ** 2 / 4.0 * (passb[1] - passb[0]) ** 2 + + passb[0] * passb[1])) + WN = numpy.sort(abs(WN)) + else: + raise ValueError("Bad type: %s" % filter_type) + + if not analog: + wn = (2.0 / pi) * arctan(WN) + else: + wn = WN + + if len(wn) == 1: + wn = wn[0] + + if fs is not None: + wn = wn*fs/2 + + return ord, wn + + +def cheb1ord(wp, ws, gpass, gstop, analog=False, fs=None): + """Chebyshev type I filter order selection. + + Return the order of the lowest order digital or analog Chebyshev Type I + filter that loses no more than `gpass` dB in the passband and has at + least `gstop` dB attenuation in the stopband. + + Parameters + ---------- + wp, ws : float + Passband and stopband edge frequencies. + + For digital filters, these are in the same units as `fs`. By default, + `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, + where 1 is the Nyquist frequency. (`wp` and `ws` are thus in + half-cycles / sample.) For example: + + - Lowpass: wp = 0.2, ws = 0.3 + - Highpass: wp = 0.3, ws = 0.2 + - Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6] + - Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5] + + For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s). + gpass : float + The maximum loss in the passband (dB). + gstop : float + The minimum attenuation in the stopband (dB). + analog : bool, optional + When True, return an analog filter, otherwise a digital filter is + returned. + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0 + + Returns + ------- + ord : int + The lowest order for a Chebyshev type I filter that meets specs. + wn : ndarray or float + The Chebyshev natural frequency (the "3dB frequency") for use with + `cheby1` to give filter results. If `fs` is specified, + this is in the same units, and `fs` must also be passed to `cheby1`. + + See Also + -------- + cheby1 : Filter design using order and critical points + buttord : Find order and critical points from passband and stopband spec + cheb2ord, ellipord + iirfilter : General filter design using order and critical frequencies + iirdesign : General filter design using passband and stopband spec + + Examples + -------- + Design a digital lowpass filter such that the passband is within 3 dB up + to 0.2*(fs/2), while rejecting at least -40 dB above 0.3*(fs/2). Plot its + frequency response, showing the passband and stopband constraints in gray. + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> N, Wn = signal.cheb1ord(0.2, 0.3, 3, 40) + >>> b, a = signal.cheby1(N, 3, Wn, 'low') + >>> w, h = signal.freqz(b, a) + >>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h))) + >>> plt.title('Chebyshev I lowpass filter fit to constraints') + >>> plt.xlabel('Normalized frequency') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.grid(which='both', axis='both') + >>> plt.fill([.01, 0.2, 0.2, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop + >>> plt.fill([0.3, 0.3, 2, 2], [ 9, -40, -40, 9], '0.9', lw=0) # pass + >>> plt.axis([0.08, 1, -60, 3]) + >>> plt.show() + + """ + wp = atleast_1d(wp) + ws = atleast_1d(ws) + if fs is not None: + if analog: + raise ValueError("fs cannot be specified for an analog filter") + wp = 2*wp/fs + ws = 2*ws/fs + + filter_type = 2 * (len(wp) - 1) + if wp[0] < ws[0]: + filter_type += 1 + else: + filter_type += 2 + + # Pre-warp frequencies for digital filter design + if not analog: + passb = tan(pi * wp / 2.0) + stopb = tan(pi * ws / 2.0) + else: + passb = wp * 1.0 + stopb = ws * 1.0 + + if filter_type == 1: # low + nat = stopb / passb + elif filter_type == 2: # high + nat = passb / stopb + elif filter_type == 3: # stop + wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12, + args=(0, passb, stopb, gpass, gstop, 'cheby'), + disp=0) + passb[0] = wp0 + wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1], + args=(1, passb, stopb, gpass, gstop, 'cheby'), + disp=0) + passb[1] = wp1 + nat = ((stopb * (passb[0] - passb[1])) / + (stopb ** 2 - passb[0] * passb[1])) + elif filter_type == 4: # pass + nat = ((stopb ** 2 - passb[0] * passb[1]) / + (stopb * (passb[0] - passb[1]))) + + nat = min(abs(nat)) + + GSTOP = 10 ** (0.1 * abs(gstop)) + GPASS = 10 ** (0.1 * abs(gpass)) + ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) / + arccosh(nat))) + + # Natural frequencies are just the passband edges + if not analog: + wn = (2.0 / pi) * arctan(passb) + else: + wn = passb + + if len(wn) == 1: + wn = wn[0] + + if fs is not None: + wn = wn*fs/2 + + return ord, wn + + +def cheb2ord(wp, ws, gpass, gstop, analog=False, fs=None): + """Chebyshev type II filter order selection. + + Return the order of the lowest order digital or analog Chebyshev Type II + filter that loses no more than `gpass` dB in the passband and has at least + `gstop` dB attenuation in the stopband. + + Parameters + ---------- + wp, ws : float + Passband and stopband edge frequencies. + + For digital filters, these are in the same units as `fs`. By default, + `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, + where 1 is the Nyquist frequency. (`wp` and `ws` are thus in + half-cycles / sample.) For example: + + - Lowpass: wp = 0.2, ws = 0.3 + - Highpass: wp = 0.3, ws = 0.2 + - Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6] + - Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5] + + For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s). + gpass : float + The maximum loss in the passband (dB). + gstop : float + The minimum attenuation in the stopband (dB). + analog : bool, optional + When True, return an analog filter, otherwise a digital filter is + returned. + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0 + + Returns + ------- + ord : int + The lowest order for a Chebyshev type II filter that meets specs. + wn : ndarray or float + The Chebyshev natural frequency (the "3dB frequency") for use with + `cheby2` to give filter results. If `fs` is specified, + this is in the same units, and `fs` must also be passed to `cheby2`. + + See Also + -------- + cheby2 : Filter design using order and critical points + buttord : Find order and critical points from passband and stopband spec + cheb1ord, ellipord + iirfilter : General filter design using order and critical frequencies + iirdesign : General filter design using passband and stopband spec + + Examples + -------- + Design a digital bandstop filter which rejects -60 dB from 0.2*(fs/2) to + 0.5*(fs/2), while staying within 3 dB below 0.1*(fs/2) or above + 0.6*(fs/2). Plot its frequency response, showing the passband and + stopband constraints in gray. + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> N, Wn = signal.cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 60) + >>> b, a = signal.cheby2(N, 60, Wn, 'stop') + >>> w, h = signal.freqz(b, a) + >>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h))) + >>> plt.title('Chebyshev II bandstop filter fit to constraints') + >>> plt.xlabel('Normalized frequency') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.grid(which='both', axis='both') + >>> plt.fill([.01, .1, .1, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop + >>> plt.fill([.2, .2, .5, .5], [ 9, -60, -60, 9], '0.9', lw=0) # pass + >>> plt.fill([.6, .6, 2, 2], [-99, -3, -3, -99], '0.9', lw=0) # stop + >>> plt.axis([0.06, 1, -80, 3]) + >>> plt.show() + + """ + wp = atleast_1d(wp) + ws = atleast_1d(ws) + if fs is not None: + if analog: + raise ValueError("fs cannot be specified for an analog filter") + wp = 2*wp/fs + ws = 2*ws/fs + + filter_type = 2 * (len(wp) - 1) + if wp[0] < ws[0]: + filter_type += 1 + else: + filter_type += 2 + + # Pre-warp frequencies for digital filter design + if not analog: + passb = tan(pi * wp / 2.0) + stopb = tan(pi * ws / 2.0) + else: + passb = wp * 1.0 + stopb = ws * 1.0 + + if filter_type == 1: # low + nat = stopb / passb + elif filter_type == 2: # high + nat = passb / stopb + elif filter_type == 3: # stop + wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12, + args=(0, passb, stopb, gpass, gstop, 'cheby'), + disp=0) + passb[0] = wp0 + wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1], + args=(1, passb, stopb, gpass, gstop, 'cheby'), + disp=0) + passb[1] = wp1 + nat = ((stopb * (passb[0] - passb[1])) / + (stopb ** 2 - passb[0] * passb[1])) + elif filter_type == 4: # pass + nat = ((stopb ** 2 - passb[0] * passb[1]) / + (stopb * (passb[0] - passb[1]))) + + nat = min(abs(nat)) + + GSTOP = 10 ** (0.1 * abs(gstop)) + GPASS = 10 ** (0.1 * abs(gpass)) + ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) / + arccosh(nat))) + + # Find frequency where analog response is -gpass dB. + # Then convert back from low-pass prototype to the original filter. + + new_freq = cosh(1.0 / ord * arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0)))) + new_freq = 1.0 / new_freq + + if filter_type == 1: + nat = passb / new_freq + elif filter_type == 2: + nat = passb * new_freq + elif filter_type == 3: + nat = numpy.zeros(2, float) + nat[0] = (new_freq / 2.0 * (passb[0] - passb[1]) + + sqrt(new_freq ** 2 * (passb[1] - passb[0]) ** 2 / 4.0 + + passb[1] * passb[0])) + nat[1] = passb[1] * passb[0] / nat[0] + elif filter_type == 4: + nat = numpy.zeros(2, float) + nat[0] = (1.0 / (2.0 * new_freq) * (passb[0] - passb[1]) + + sqrt((passb[1] - passb[0]) ** 2 / (4.0 * new_freq ** 2) + + passb[1] * passb[0])) + nat[1] = passb[0] * passb[1] / nat[0] + + if not analog: + wn = (2.0 / pi) * arctan(nat) + else: + wn = nat + + if len(wn) == 1: + wn = wn[0] + + if fs is not None: + wn = wn*fs/2 + + return ord, wn + + +def ellipord(wp, ws, gpass, gstop, analog=False, fs=None): + """Elliptic (Cauer) filter order selection. + + Return the order of the lowest order digital or analog elliptic filter + that loses no more than `gpass` dB in the passband and has at least + `gstop` dB attenuation in the stopband. + + Parameters + ---------- + wp, ws : float + Passband and stopband edge frequencies. + + For digital filters, these are in the same units as `fs`. By default, + `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, + where 1 is the Nyquist frequency. (`wp` and `ws` are thus in + half-cycles / sample.) For example: + + - Lowpass: wp = 0.2, ws = 0.3 + - Highpass: wp = 0.3, ws = 0.2 + - Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6] + - Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5] + + For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s). + gpass : float + The maximum loss in the passband (dB). + gstop : float + The minimum attenuation in the stopband (dB). + analog : bool, optional + When True, return an analog filter, otherwise a digital filter is + returned. + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0 + + Returns + ------- + ord : int + The lowest order for an Elliptic (Cauer) filter that meets specs. + wn : ndarray or float + The Chebyshev natural frequency (the "3dB frequency") for use with + `ellip` to give filter results. If `fs` is specified, + this is in the same units, and `fs` must also be passed to `ellip`. + + See Also + -------- + ellip : Filter design using order and critical points + buttord : Find order and critical points from passband and stopband spec + cheb1ord, cheb2ord + iirfilter : General filter design using order and critical frequencies + iirdesign : General filter design using passband and stopband spec + + Examples + -------- + Design an analog highpass filter such that the passband is within 3 dB + above 30 rad/s, while rejecting -60 dB at 10 rad/s. Plot its + frequency response, showing the passband and stopband constraints in gray. + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> N, Wn = signal.ellipord(30, 10, 3, 60, True) + >>> b, a = signal.ellip(N, 3, 60, Wn, 'high', True) + >>> w, h = signal.freqs(b, a, np.logspace(0, 3, 500)) + >>> plt.semilogx(w, 20 * np.log10(abs(h))) + >>> plt.title('Elliptical highpass filter fit to constraints') + >>> plt.xlabel('Frequency [radians / second]') + >>> plt.ylabel('Amplitude [dB]') + >>> plt.grid(which='both', axis='both') + >>> plt.fill([.1, 10, 10, .1], [1e4, 1e4, -60, -60], '0.9', lw=0) # stop + >>> plt.fill([30, 30, 1e9, 1e9], [-99, -3, -3, -99], '0.9', lw=0) # pass + >>> plt.axis([1, 300, -80, 3]) + >>> plt.show() + + """ + wp = atleast_1d(wp) + ws = atleast_1d(ws) + if fs is not None: + if analog: + raise ValueError("fs cannot be specified for an analog filter") + wp = 2*wp/fs + ws = 2*ws/fs + + filter_type = 2 * (len(wp) - 1) + filter_type += 1 + if wp[0] >= ws[0]: + filter_type += 1 + + # Pre-warp frequencies for digital filter design + if not analog: + passb = tan(pi * wp / 2.0) + stopb = tan(pi * ws / 2.0) + else: + passb = wp * 1.0 + stopb = ws * 1.0 + + if filter_type == 1: # low + nat = stopb / passb + elif filter_type == 2: # high + nat = passb / stopb + elif filter_type == 3: # stop + wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12, + args=(0, passb, stopb, gpass, gstop, 'ellip'), + disp=0) + passb[0] = wp0 + wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1], + args=(1, passb, stopb, gpass, gstop, 'ellip'), + disp=0) + passb[1] = wp1 + nat = ((stopb * (passb[0] - passb[1])) / + (stopb ** 2 - passb[0] * passb[1])) + elif filter_type == 4: # pass + nat = ((stopb ** 2 - passb[0] * passb[1]) / + (stopb * (passb[0] - passb[1]))) + + nat = min(abs(nat)) + + GSTOP = 10 ** (0.1 * gstop) + GPASS = 10 ** (0.1 * gpass) + arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0)) + arg0 = 1.0 / nat + d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2]) + d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2]) + ord = int(ceil(d0[0] * d1[1] / (d0[1] * d1[0]))) + + if not analog: + wn = arctan(passb) * 2.0 / pi + else: + wn = passb + + if len(wn) == 1: + wn = wn[0] + + if fs is not None: + wn = wn*fs/2 + + return ord, wn + + +def buttap(N): + """Return (z,p,k) for analog prototype of Nth-order Butterworth filter. + + The filter will have an angular (e.g. rad/s) cutoff frequency of 1. + + See Also + -------- + butter : Filter design function using this prototype + + """ + if abs(int(N)) != N: + raise ValueError("Filter order must be a nonnegative integer") + z = numpy.array([]) + m = numpy.arange(-N+1, N, 2) + # Middle value is 0 to ensure an exactly real pole + p = -numpy.exp(1j * pi * m / (2 * N)) + k = 1 + return z, p, k + + +def cheb1ap(N, rp): + """ + Return (z,p,k) for Nth-order Chebyshev type I analog lowpass filter. + + The returned filter prototype has `rp` decibels of ripple in the passband. + + The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1, + defined as the point at which the gain first drops below ``-rp``. + + See Also + -------- + cheby1 : Filter design function using this prototype + + """ + if abs(int(N)) != N: + raise ValueError("Filter order must be a nonnegative integer") + elif N == 0: + # Avoid divide-by-zero error + # Even order filters have DC gain of -rp dB + return numpy.array([]), numpy.array([]), 10**(-rp/20) + z = numpy.array([]) + + # Ripple factor (epsilon) + eps = numpy.sqrt(10 ** (0.1 * rp) - 1.0) + mu = 1.0 / N * arcsinh(1 / eps) + + # Arrange poles in an ellipse on the left half of the S-plane + m = numpy.arange(-N+1, N, 2) + theta = pi * m / (2*N) + p = -sinh(mu + 1j*theta) + + k = numpy.prod(-p, axis=0).real + if N % 2 == 0: + k = k / sqrt((1 + eps * eps)) + + return z, p, k + + +def cheb2ap(N, rs): + """ + Return (z,p,k) for Nth-order Chebyshev type I analog lowpass filter. + + The returned filter prototype has `rs` decibels of ripple in the stopband. + + The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1, + defined as the point at which the gain first reaches ``-rs``. + + See Also + -------- + cheby2 : Filter design function using this prototype + + """ + if abs(int(N)) != N: + raise ValueError("Filter order must be a nonnegative integer") + elif N == 0: + # Avoid divide-by-zero warning + return numpy.array([]), numpy.array([]), 1 + + # Ripple factor (epsilon) + de = 1.0 / sqrt(10 ** (0.1 * rs) - 1) + mu = arcsinh(1.0 / de) / N + + if N % 2: + m = numpy.concatenate((numpy.arange(-N+1, 0, 2), + numpy.arange(2, N, 2))) + else: + m = numpy.arange(-N+1, N, 2) + + z = -conjugate(1j / sin(m * pi / (2.0 * N))) + + # Poles around the unit circle like Butterworth + p = -exp(1j * pi * numpy.arange(-N+1, N, 2) / (2 * N)) + # Warp into Chebyshev II + p = sinh(mu) * p.real + 1j * cosh(mu) * p.imag + p = 1.0 / p + + k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real + return z, p, k + + +EPSILON = 2e-16 + + +def _vratio(u, ineps, mp): + [s, c, d, phi] = special.ellipj(u, mp) + ret = abs(ineps - s / c) + return ret + + +def _kratio(m, k_ratio): + m = float(m) + if m < 0: + m = 0.0 + if m > 1: + m = 1.0 + if abs(m) > EPSILON and (abs(m) + EPSILON) < 1: + k = special.ellipk([m, 1 - m]) + r = k[0] / k[1] - k_ratio + elif abs(m) > EPSILON: + r = -k_ratio + else: + r = 1e20 + return abs(r) + + +def ellipap(N, rp, rs): + """Return (z,p,k) of Nth-order elliptic analog lowpass filter. + + The filter is a normalized prototype that has `rp` decibels of ripple + in the passband and a stopband `rs` decibels down. + + The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1, + defined as the point at which the gain first drops below ``-rp``. + + See Also + -------- + ellip : Filter design function using this prototype + + References + ---------- + .. [1] Lutova, Tosic, and Evans, "Filter Design for Signal Processing", + Chapters 5 and 12. + + """ + if abs(int(N)) != N: + raise ValueError("Filter order must be a nonnegative integer") + elif N == 0: + # Avoid divide-by-zero warning + # Even order filters have DC gain of -rp dB + return numpy.array([]), numpy.array([]), 10**(-rp/20) + elif N == 1: + p = -sqrt(1.0 / (10 ** (0.1 * rp) - 1.0)) + k = -p + z = [] + return asarray(z), asarray(p), k + + eps = numpy.sqrt(10 ** (0.1 * rp) - 1) + ck1 = eps / numpy.sqrt(10 ** (0.1 * rs) - 1) + ck1p = numpy.sqrt(1 - ck1 * ck1) + if ck1p == 1: + raise ValueError("Cannot design a filter with given rp and rs" + " specifications.") + + val = special.ellipk([ck1 * ck1, ck1p * ck1p]) + if abs(1 - ck1p * ck1p) < EPSILON: + krat = 0 + else: + krat = N * val[0] / val[1] + + m = optimize.fmin(_kratio, [0.5], args=(krat,), maxfun=250, maxiter=250, + disp=0) + if m < 0 or m > 1: + m = optimize.fminbound(_kratio, 0, 1, args=(krat,), maxfun=250, + disp=0) + + capk = special.ellipk(m) + + j = numpy.arange(1 - N % 2, N, 2) + jj = len(j) + + [s, c, d, phi] = special.ellipj(j * capk / N, m * numpy.ones(jj)) + snew = numpy.compress(abs(s) > EPSILON, s, axis=-1) + z = 1.0 / (sqrt(m) * snew) + z = 1j * z + z = numpy.concatenate((z, conjugate(z))) + + r = optimize.fmin(_vratio, special.ellipk(m), args=(1. / eps, ck1p * ck1p), + maxfun=250, maxiter=250, disp=0) + v0 = capk * r / (N * val[0]) + + [sv, cv, dv, phi] = special.ellipj(v0, 1 - m) + p = -(c * d * sv * cv + 1j * s * dv) / (1 - (d * sv) ** 2.0) + + if N % 2: + newp = numpy.compress(abs(p.imag) > EPSILON * + numpy.sqrt(numpy.sum(p * numpy.conjugate(p), + axis=0).real), + p, axis=-1) + p = numpy.concatenate((p, conjugate(newp))) + else: + p = numpy.concatenate((p, conjugate(p))) + + k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real + if N % 2 == 0: + k = k / numpy.sqrt((1 + eps * eps)) + + return z, p, k + + +# TODO: Make this a real public function scipy.misc.ff +def _falling_factorial(x, n): + r""" + Return the factorial of `x` to the `n` falling. + + This is defined as: + + .. math:: x^\underline n = (x)_n = x (x-1) \cdots (x-n+1) + + This can more efficiently calculate ratios of factorials, since: + + n!/m! == falling_factorial(n, n-m) + + where n >= m + + skipping the factors that cancel out + + the usual factorial n! == ff(n, n) + """ + val = 1 + for k in range(x - n + 1, x + 1): + val *= k + return val + + +def _bessel_poly(n, reverse=False): + """ + Return the coefficients of Bessel polynomial of degree `n` + + If `reverse` is true, a reverse Bessel polynomial is output. + + Output is a list of coefficients: + [1] = 1 + [1, 1] = 1*s + 1 + [1, 3, 3] = 1*s^2 + 3*s + 3 + [1, 6, 15, 15] = 1*s^3 + 6*s^2 + 15*s + 15 + [1, 10, 45, 105, 105] = 1*s^4 + 10*s^3 + 45*s^2 + 105*s + 105 + etc. + + Output is a Python list of arbitrary precision long ints, so n is only + limited by your hardware's memory. + + Sequence is http://oeis.org/A001498 , and output can be confirmed to + match http://oeis.org/A001498/b001498.txt : + + >>> i = 0 + >>> for n in range(51): + ... for x in _bessel_poly(n, reverse=True): + ... print(i, x) + ... i += 1 + + """ + if abs(int(n)) != n: + raise ValueError("Polynomial order must be a nonnegative integer") + else: + n = int(n) # np.int32 doesn't work, for instance + + out = [] + for k in range(n + 1): + num = _falling_factorial(2*n - k, n) + den = 2**(n - k) * factorial(k, exact=True) + out.append(num // den) + + if reverse: + return out[::-1] + else: + return out + + +def _campos_zeros(n): + """ + Return approximate zero locations of Bessel polynomials y_n(x) for order + `n` using polynomial fit (Campos-Calderon 2011) + """ + if n == 1: + return asarray([-1+0j]) + + s = npp_polyval(n, [0, 0, 2, 0, -3, 1]) + b3 = npp_polyval(n, [16, -8]) / s + b2 = npp_polyval(n, [-24, -12, 12]) / s + b1 = npp_polyval(n, [8, 24, -12, -2]) / s + b0 = npp_polyval(n, [0, -6, 0, 5, -1]) / s + + r = npp_polyval(n, [0, 0, 2, 1]) + a1 = npp_polyval(n, [-6, -6]) / r + a2 = 6 / r + + k = np.arange(1, n+1) + x = npp_polyval(k, [0, a1, a2]) + y = npp_polyval(k, [b0, b1, b2, b3]) + + return x + 1j*y + + +def _aberth(f, fp, x0, tol=1e-15, maxiter=50): + """ + Given a function `f`, its first derivative `fp`, and a set of initial + guesses `x0`, simultaneously find the roots of the polynomial using the + Aberth-Ehrlich method. + + ``len(x0)`` should equal the number of roots of `f`. + + (This is not a complete implementation of Bini's algorithm.) + """ + + N = len(x0) + + x = array(x0, complex) + beta = np.empty_like(x0) + + for iteration in range(maxiter): + alpha = -f(x) / fp(x) # Newton's method + + # Model "repulsion" between zeros + for k in range(N): + beta[k] = np.sum(1/(x[k] - x[k+1:])) + beta[k] += np.sum(1/(x[k] - x[:k])) + + x += alpha / (1 + alpha * beta) + + if not all(np.isfinite(x)): + raise RuntimeError('Root-finding calculation failed') + + # Mekwi: The iterative process can be stopped when |hn| has become + # less than the largest error one is willing to permit in the root. + if all(abs(alpha) <= tol): + break + else: + raise Exception('Zeros failed to converge') + + return x + + +def _bessel_zeros(N): + """ + Find zeros of ordinary Bessel polynomial of order `N`, by root-finding of + modified Bessel function of the second kind + """ + if N == 0: + return asarray([]) + + # Generate starting points + x0 = _campos_zeros(N) + + # Zeros are the same for exp(1/x)*K_{N+0.5}(1/x) and Nth-order ordinary + # Bessel polynomial y_N(x) + def f(x): + return special.kve(N+0.5, 1/x) + + # First derivative of above + def fp(x): + return (special.kve(N-0.5, 1/x)/(2*x**2) - + special.kve(N+0.5, 1/x)/(x**2) + + special.kve(N+1.5, 1/x)/(2*x**2)) + + # Starting points converge to true zeros + x = _aberth(f, fp, x0) + + # Improve precision using Newton's method on each + for i in range(len(x)): + x[i] = optimize.newton(f, x[i], fp, tol=1e-15) + + # Average complex conjugates to make them exactly symmetrical + x = np.mean((x, x[::-1].conj()), 0) + + # Zeros should sum to -1 + if abs(np.sum(x) + 1) > 1e-15: + raise RuntimeError('Generated zeros are inaccurate') + + return x + + +def _norm_factor(p, k): + """ + Numerically find frequency shift to apply to delay-normalized filter such + that -3 dB point is at 1 rad/sec. + + `p` is an array_like of polynomial poles + `k` is a float gain + + First 10 values are listed in "Bessel Scale Factors" table, + "Bessel Filters Polynomials, Poles and Circuit Elements 2003, C. Bond." + """ + p = asarray(p, dtype=complex) + + def G(w): + """ + Gain of filter + """ + return abs(k / prod(1j*w - p)) + + def cutoff(w): + """ + When gain = -3 dB, return 0 + """ + return G(w) - 1/np.sqrt(2) + + return optimize.newton(cutoff, 1.5) + + +def besselap(N, norm='phase'): + """ + Return (z,p,k) for analog prototype of an Nth-order Bessel filter. + + Parameters + ---------- + N : int + The order of the filter. + norm : {'phase', 'delay', 'mag'}, optional + Frequency normalization: + + ``phase`` + The filter is normalized such that the phase response reaches its + midpoint at an angular (e.g. rad/s) cutoff frequency of 1. This + happens for both low-pass and high-pass filters, so this is the + "phase-matched" case. [6]_ + + The magnitude response asymptotes are the same as a Butterworth + filter of the same order with a cutoff of `Wn`. + + This is the default, and matches MATLAB's implementation. + + ``delay`` + The filter is normalized such that the group delay in the passband + is 1 (e.g. 1 second). This is the "natural" type obtained by + solving Bessel polynomials + + ``mag`` + The filter is normalized such that the gain magnitude is -3 dB at + angular frequency 1. This is called "frequency normalization" by + Bond. [1]_ + + .. versionadded:: 0.18.0 + + Returns + ------- + z : ndarray + Zeros of the transfer function. Is always an empty array. + p : ndarray + Poles of the transfer function. + k : scalar + Gain of the transfer function. For phase-normalized, this is always 1. + + See Also + -------- + bessel : Filter design function using this prototype + + Notes + ----- + To find the pole locations, approximate starting points are generated [2]_ + for the zeros of the ordinary Bessel polynomial [3]_, then the + Aberth-Ehrlich method [4]_ [5]_ is used on the Kv(x) Bessel function to + calculate more accurate zeros, and these locations are then inverted about + the unit circle. + + References + ---------- + .. [1] C.R. Bond, "Bessel Filter Constants", + http://www.crbond.com/papers/bsf.pdf + .. [2] Campos and Calderon, "Approximate closed-form formulas for the + zeros of the Bessel Polynomials", :arXiv:`1105.0957`. + .. [3] Thomson, W.E., "Delay Networks having Maximally Flat Frequency + Characteristics", Proceedings of the Institution of Electrical + Engineers, Part III, November 1949, Vol. 96, No. 44, pp. 487-490. + .. [4] Aberth, "Iteration Methods for Finding all Zeros of a Polynomial + Simultaneously", Mathematics of Computation, Vol. 27, No. 122, + April 1973 + .. [5] Ehrlich, "A modified Newton method for polynomials", Communications + of the ACM, Vol. 10, Issue 2, pp. 107-108, Feb. 1967, + :DOI:`10.1145/363067.363115` + .. [6] Miller and Bohn, "A Bessel Filter Crossover, and Its Relation to + Others", RaneNote 147, 1998, http://www.rane.com/note147.html + + """ + if abs(int(N)) != N: + raise ValueError("Filter order must be a nonnegative integer") + if N == 0: + p = [] + k = 1 + else: + # Find roots of reverse Bessel polynomial + p = 1/_bessel_zeros(N) + + a_last = _falling_factorial(2*N, N) // 2**N + + # Shift them to a different normalization if required + if norm in ('delay', 'mag'): + # Normalized for group delay of 1 + k = a_last + if norm == 'mag': + # -3 dB magnitude point is at 1 rad/sec + norm_factor = _norm_factor(p, k) + p /= norm_factor + k = norm_factor**-N * a_last + elif norm == 'phase': + # Phase-matched (1/2 max phase shift at 1 rad/sec) + # Asymptotes are same as Butterworth filter + p *= 10**(-math.log10(a_last)/N) + k = 1 + else: + raise ValueError('normalization not understood') + + return asarray([]), asarray(p, dtype=complex), float(k) + + +def iirnotch(w0, Q, fs=2.0): + """ + Design second-order IIR notch digital filter. + + A notch filter is a band-stop filter with a narrow bandwidth + (high quality factor). It rejects a narrow frequency band and + leaves the rest of the spectrum little changed. + + Parameters + ---------- + w0 : float + Frequency to remove from a signal. If `fs` is specified, this is in + the same units as `fs`. By default, it is a normalized scalar that must + satisfy ``0 < w0 < 1``, with ``w0 = 1`` corresponding to half of the + sampling frequency. + Q : float + Quality factor. Dimensionless parameter that characterizes + notch filter -3 dB bandwidth ``bw`` relative to its center + frequency, ``Q = w0/bw``. + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0 + + Returns + ------- + b, a : ndarray, ndarray + Numerator (``b``) and denominator (``a``) polynomials + of the IIR filter. + + See Also + -------- + iirpeak + + Notes + ----- + .. versionadded:: 0.19.0 + + References + ---------- + .. [1] Sophocles J. Orfanidis, "Introduction To Signal Processing", + Prentice-Hall, 1996 + + Examples + -------- + Design and plot filter to remove the 60 Hz component from a + signal sampled at 200 Hz, using a quality factor Q = 30 + + >>> from scipy import signal + >>> import numpy as np + >>> import matplotlib.pyplot as plt + + >>> fs = 200.0 # Sample frequency (Hz) + >>> f0 = 60.0 # Frequency to be removed from signal (Hz) + >>> Q = 30.0 # Quality factor + >>> # Design notch filter + >>> b, a = signal.iirnotch(f0, Q, fs) + + >>> # Frequency response + >>> freq, h = signal.freqz(b, a, fs=fs) + >>> # Plot + >>> fig, ax = plt.subplots(2, 1, figsize=(8, 6)) + >>> ax[0].plot(freq, 20*np.log10(abs(h)), color='blue') + >>> ax[0].set_title("Frequency Response") + >>> ax[0].set_ylabel("Amplitude (dB)", color='blue') + >>> ax[0].set_xlim([0, 100]) + >>> ax[0].set_ylim([-25, 10]) + >>> ax[0].grid() + >>> ax[1].plot(freq, np.unwrap(np.angle(h))*180/np.pi, color='green') + >>> ax[1].set_ylabel("Angle (degrees)", color='green') + >>> ax[1].set_xlabel("Frequency (Hz)") + >>> ax[1].set_xlim([0, 100]) + >>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90]) + >>> ax[1].set_ylim([-90, 90]) + >>> ax[1].grid() + >>> plt.show() + """ + + return _design_notch_peak_filter(w0, Q, "notch", fs) + + +def iirpeak(w0, Q, fs=2.0): + """ + Design second-order IIR peak (resonant) digital filter. + + A peak filter is a band-pass filter with a narrow bandwidth + (high quality factor). It rejects components outside a narrow + frequency band. + + Parameters + ---------- + w0 : float + Frequency to be retained in a signal. If `fs` is specified, this is in + the same units as `fs`. By default, it is a normalized scalar that must + satisfy ``0 < w0 < 1``, with ``w0 = 1`` corresponding to half of the + sampling frequency. + Q : float + Quality factor. Dimensionless parameter that characterizes + peak filter -3 dB bandwidth ``bw`` relative to its center + frequency, ``Q = w0/bw``. + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0 + + Returns + ------- + b, a : ndarray, ndarray + Numerator (``b``) and denominator (``a``) polynomials + of the IIR filter. + + See Also + -------- + iirnotch + + Notes + ----- + .. versionadded:: 0.19.0 + + References + ---------- + .. [1] Sophocles J. Orfanidis, "Introduction To Signal Processing", + Prentice-Hall, 1996 + + Examples + -------- + Design and plot filter to remove the frequencies other than the 300 Hz + component from a signal sampled at 1000 Hz, using a quality factor Q = 30 + + >>> from scipy import signal + >>> import numpy as np + >>> import matplotlib.pyplot as plt + + >>> fs = 1000.0 # Sample frequency (Hz) + >>> f0 = 300.0 # Frequency to be retained (Hz) + >>> Q = 30.0 # Quality factor + >>> # Design peak filter + >>> b, a = signal.iirpeak(f0, Q, fs) + + >>> # Frequency response + >>> freq, h = signal.freqz(b, a, fs=fs) + >>> # Plot + >>> fig, ax = plt.subplots(2, 1, figsize=(8, 6)) + >>> ax[0].plot(freq, 20*np.log10(abs(h)), color='blue') + >>> ax[0].set_title("Frequency Response") + >>> ax[0].set_ylabel("Amplitude (dB)", color='blue') + >>> ax[0].set_xlim([0, 500]) + >>> ax[0].set_ylim([-50, 10]) + >>> ax[0].grid() + >>> ax[1].plot(freq, np.unwrap(np.angle(h))*180/np.pi, color='green') + >>> ax[1].set_ylabel("Angle (degrees)", color='green') + >>> ax[1].set_xlabel("Frequency (Hz)") + >>> ax[1].set_xlim([0, 500]) + >>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90]) + >>> ax[1].set_ylim([-90, 90]) + >>> ax[1].grid() + >>> plt.show() + """ + + return _design_notch_peak_filter(w0, Q, "peak", fs) + + +def _design_notch_peak_filter(w0, Q, ftype, fs=2.0): + """ + Design notch or peak digital filter. + + Parameters + ---------- + w0 : float + Normalized frequency to remove from a signal. If `fs` is specified, + this is in the same units as `fs`. By default, it is a normalized + scalar that must satisfy ``0 < w0 < 1``, with ``w0 = 1`` + corresponding to half of the sampling frequency. + Q : float + Quality factor. Dimensionless parameter that characterizes + notch filter -3 dB bandwidth ``bw`` relative to its center + frequency, ``Q = w0/bw``. + ftype : str + The type of IIR filter to design: + + - notch filter : ``notch`` + - peak filter : ``peak`` + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0: + + Returns + ------- + b, a : ndarray, ndarray + Numerator (``b``) and denominator (``a``) polynomials + of the IIR filter. + """ + + # Guarantee that the inputs are floats + w0 = float(w0) + Q = float(Q) + w0 = 2*w0/fs + + # Checks if w0 is within the range + if w0 > 1.0 or w0 < 0.0: + raise ValueError("w0 should be such that 0 < w0 < 1") + + # Get bandwidth + bw = w0/Q + + # Normalize inputs + bw = bw*np.pi + w0 = w0*np.pi + + # Compute -3dB atenuation + gb = 1/np.sqrt(2) + + if ftype == "notch": + # Compute beta: formula 11.3.4 (p.575) from reference [1] + beta = (np.sqrt(1.0-gb**2.0)/gb)*np.tan(bw/2.0) + elif ftype == "peak": + # Compute beta: formula 11.3.19 (p.579) from reference [1] + beta = (gb/np.sqrt(1.0-gb**2.0))*np.tan(bw/2.0) + else: + raise ValueError("Unknown ftype.") + + # Compute gain: formula 11.3.6 (p.575) from reference [1] + gain = 1.0/(1.0+beta) + + # Compute numerator b and denominator a + # formulas 11.3.7 (p.575) and 11.3.21 (p.579) + # from reference [1] + if ftype == "notch": + b = gain*np.array([1.0, -2.0*np.cos(w0), 1.0]) + else: + b = (1.0-gain)*np.array([1.0, 0.0, -1.0]) + a = np.array([1.0, -2.0*gain*np.cos(w0), (2.0*gain-1.0)]) + + return b, a + + +filter_dict = {'butter': [buttap, buttord], + 'butterworth': [buttap, buttord], + + 'cauer': [ellipap, ellipord], + 'elliptic': [ellipap, ellipord], + 'ellip': [ellipap, ellipord], + + 'bessel': [besselap], + 'bessel_phase': [besselap], + 'bessel_delay': [besselap], + 'bessel_mag': [besselap], + + 'cheby1': [cheb1ap, cheb1ord], + 'chebyshev1': [cheb1ap, cheb1ord], + 'chebyshevi': [cheb1ap, cheb1ord], + + 'cheby2': [cheb2ap, cheb2ord], + 'chebyshev2': [cheb2ap, cheb2ord], + 'chebyshevii': [cheb2ap, cheb2ord], + } + +band_dict = {'band': 'bandpass', + 'bandpass': 'bandpass', + 'pass': 'bandpass', + 'bp': 'bandpass', + + 'bs': 'bandstop', + 'bandstop': 'bandstop', + 'bands': 'bandstop', + 'stop': 'bandstop', + + 'l': 'lowpass', + 'low': 'lowpass', + 'lowpass': 'lowpass', + 'lp': 'lowpass', + + 'high': 'highpass', + 'highpass': 'highpass', + 'h': 'highpass', + 'hp': 'highpass', + } + +bessel_norms = {'bessel': 'phase', + 'bessel_phase': 'phase', + 'bessel_delay': 'delay', + 'bessel_mag': 'mag'} diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/filter_design.pyc b/project/venv/lib/python2.7/site-packages/scipy/signal/filter_design.pyc new file mode 100644 index 0000000..54a1e2e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/signal/filter_design.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/fir_filter_design.py b/project/venv/lib/python2.7/site-packages/scipy/signal/fir_filter_design.py new file mode 100644 index 0000000..01f4989 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/signal/fir_filter_design.py @@ -0,0 +1,1144 @@ +# -*- coding: utf-8 -*- +"""Functions for FIR filter design.""" +from __future__ import division, print_function, absolute_import + +from math import ceil, log +import warnings + +import numpy as np +from numpy.fft import irfft, fft, ifft +from scipy.special import sinc +from scipy.linalg import toeplitz, hankel, pinv +from scipy._lib.six import string_types + +from . import sigtools + +__all__ = ['kaiser_beta', 'kaiser_atten', 'kaiserord', + 'firwin', 'firwin2', 'remez', 'firls', 'minimum_phase'] + + +def _get_fs(fs, nyq): + """ + Utility for replacing the argument 'nyq' (with default 1) with 'fs'. + """ + if nyq is None and fs is None: + fs = 2 + elif nyq is not None: + if fs is not None: + raise ValueError("Values cannot be given for both 'nyq' and 'fs'.") + fs = 2*nyq + return fs + + +# Some notes on function parameters: +# +# `cutoff` and `width` are given as numbers between 0 and 1. These are +# relative frequencies, expressed as a fraction of the Nyquist frequency. +# For example, if the Nyquist frequency is 2 KHz, then width=0.15 is a width +# of 300 Hz. +# +# The `order` of a FIR filter is one less than the number of taps. +# This is a potential source of confusion, so in the following code, +# we will always use the number of taps as the parameterization of +# the 'size' of the filter. The "number of taps" means the number +# of coefficients, which is the same as the length of the impulse +# response of the filter. + + +def kaiser_beta(a): + """Compute the Kaiser parameter `beta`, given the attenuation `a`. + + Parameters + ---------- + a : float + The desired attenuation in the stopband and maximum ripple in + the passband, in dB. This should be a *positive* number. + + Returns + ------- + beta : float + The `beta` parameter to be used in the formula for a Kaiser window. + + References + ---------- + Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476. + + Examples + -------- + Suppose we want to design a lowpass filter, with 65 dB attenuation + in the stop band. The Kaiser window parameter to be used in the + window method is computed by `kaiser_beta(65)`: + + >>> from scipy.signal import kaiser_beta + >>> kaiser_beta(65) + 6.20426 + + """ + if a > 50: + beta = 0.1102 * (a - 8.7) + elif a > 21: + beta = 0.5842 * (a - 21) ** 0.4 + 0.07886 * (a - 21) + else: + beta = 0.0 + return beta + + +def kaiser_atten(numtaps, width): + """Compute the attenuation of a Kaiser FIR filter. + + Given the number of taps `N` and the transition width `width`, compute the + attenuation `a` in dB, given by Kaiser's formula: + + a = 2.285 * (N - 1) * pi * width + 7.95 + + Parameters + ---------- + numtaps : int + The number of taps in the FIR filter. + width : float + The desired width of the transition region between passband and + stopband (or, in general, at any discontinuity) for the filter, + expressed as a fraction of the Nyquist frequency. + + Returns + ------- + a : float + The attenuation of the ripple, in dB. + + See Also + -------- + kaiserord, kaiser_beta + + Examples + -------- + Suppose we want to design a FIR filter using the Kaiser window method + that will have 211 taps and a transition width of 9 Hz for a signal that + is sampled at 480 Hz. Expressed as a fraction of the Nyquist frequency, + the width is 9/(0.5*480) = 0.0375. The approximate attenuation (in dB) + is computed as follows: + + >>> from scipy.signal import kaiser_atten + >>> kaiser_atten(211, 0.0375) + 64.48099630593983 + + """ + a = 2.285 * (numtaps - 1) * np.pi * width + 7.95 + return a + + +def kaiserord(ripple, width): + """ + Determine the filter window parameters for the Kaiser window method. + + The parameters returned by this function are generally used to create + a finite impulse response filter using the window method, with either + `firwin` or `firwin2`. + + Parameters + ---------- + ripple : float + Upper bound for the deviation (in dB) of the magnitude of the + filter's frequency response from that of the desired filter (not + including frequencies in any transition intervals). That is, if w + is the frequency expressed as a fraction of the Nyquist frequency, + A(w) is the actual frequency response of the filter and D(w) is the + desired frequency response, the design requirement is that:: + + abs(A(w) - D(w))) < 10**(-ripple/20) + + for 0 <= w <= 1 and w not in a transition interval. + width : float + Width of transition region, normalized so that 1 corresponds to pi + radians / sample. That is, the frequency is expressed as a fraction + of the Nyquist frequency. + + Returns + ------- + numtaps : int + The length of the Kaiser window. + beta : float + The beta parameter for the Kaiser window. + + See Also + -------- + kaiser_beta, kaiser_atten + + Notes + ----- + There are several ways to obtain the Kaiser window: + + - ``signal.kaiser(numtaps, beta, sym=True)`` + - ``signal.get_window(beta, numtaps)`` + - ``signal.get_window(('kaiser', beta), numtaps)`` + + The empirical equations discovered by Kaiser are used. + + References + ---------- + Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476. + + Examples + -------- + We will use the Kaiser window method to design a lowpass FIR filter + for a signal that is sampled at 1000 Hz. + + We want at least 65 dB rejection in the stop band, and in the pass + band the gain should vary no more than 0.5%. + + We want a cutoff frequency of 175 Hz, with a transition between the + pass band and the stop band of 24 Hz. That is, in the band [0, 163], + the gain varies no more than 0.5%, and in the band [187, 500], the + signal is attenuated by at least 65 dB. + + >>> from scipy.signal import kaiserord, firwin, freqz + >>> import matplotlib.pyplot as plt + >>> fs = 1000.0 + >>> cutoff = 175 + >>> width = 24 + + The Kaiser method accepts just a single parameter to control the pass + band ripple and the stop band rejection, so we use the more restrictive + of the two. In this case, the pass band ripple is 0.005, or 46.02 dB, + so we will use 65 dB as the design parameter. + + Use `kaiserord` to determine the length of the filter and the + parameter for the Kaiser window. + + >>> numtaps, beta = kaiserord(65, width/(0.5*fs)) + >>> numtaps + 167 + >>> beta + 6.20426 + + Use `firwin` to create the FIR filter. + + >>> taps = firwin(numtaps, cutoff, window=('kaiser', beta), + ... scale=False, nyq=0.5*fs) + + Compute the frequency response of the filter. ``w`` is the array of + frequencies, and ``h`` is the corresponding complex array of frequency + responses. + + >>> w, h = freqz(taps, worN=8000) + >>> w *= 0.5*fs/np.pi # Convert w to Hz. + + Compute the deviation of the magnitude of the filter's response from + that of the ideal lowpass filter. Values in the transition region are + set to ``nan``, so they won't appear in the plot. + + >>> ideal = w < cutoff # The "ideal" frequency response. + >>> deviation = np.abs(np.abs(h) - ideal) + >>> deviation[(w > cutoff - 0.5*width) & (w < cutoff + 0.5*width)] = np.nan + + Plot the deviation. A close look at the left end of the stop band shows + that the requirement for 65 dB attenuation is violated in the first lobe + by about 0.125 dB. This is not unusual for the Kaiser window method. + + >>> plt.plot(w, 20*np.log10(np.abs(deviation))) + >>> plt.xlim(0, 0.5*fs) + >>> plt.ylim(-90, -60) + >>> plt.grid(alpha=0.25) + >>> plt.axhline(-65, color='r', ls='--', alpha=0.3) + >>> plt.xlabel('Frequency (Hz)') + >>> plt.ylabel('Deviation from ideal (dB)') + >>> plt.title('Lowpass Filter Frequency Response') + >>> plt.show() + + """ + A = abs(ripple) # in case somebody is confused as to what's meant + if A < 8: + # Formula for N is not valid in this range. + raise ValueError("Requested maximum ripple attentuation %f is too " + "small for the Kaiser formula." % A) + beta = kaiser_beta(A) + + # Kaiser's formula (as given in Oppenheim and Schafer) is for the filter + # order, so we have to add 1 to get the number of taps. + numtaps = (A - 7.95) / 2.285 / (np.pi * width) + 1 + + return int(ceil(numtaps)), beta + + +def firwin(numtaps, cutoff, width=None, window='hamming', pass_zero=True, + scale=True, nyq=None, fs=None): + """ + FIR filter design using the window method. + + This function computes the coefficients of a finite impulse response + filter. The filter will have linear phase; it will be Type I if + `numtaps` is odd and Type II if `numtaps` is even. + + Type II filters always have zero response at the Nyquist frequency, so a + ValueError exception is raised if firwin is called with `numtaps` even and + having a passband whose right end is at the Nyquist frequency. + + Parameters + ---------- + numtaps : int + Length of the filter (number of coefficients, i.e. the filter + order + 1). `numtaps` must be odd if a passband includes the + Nyquist frequency. + cutoff : float or 1D array_like + Cutoff frequency of filter (expressed in the same units as `fs`) + OR an array of cutoff frequencies (that is, band edges). In the + latter case, the frequencies in `cutoff` should be positive and + monotonically increasing between 0 and `fs/2`. The values 0 and + `fs/2` must not be included in `cutoff`. + width : float or None, optional + If `width` is not None, then assume it is the approximate width + of the transition region (expressed in the same units as `fs`) + for use in Kaiser FIR filter design. In this case, the `window` + argument is ignored. + window : string or tuple of string and parameter values, optional + Desired window to use. See `scipy.signal.get_window` for a list + of windows and required parameters. + pass_zero : bool, optional + If True, the gain at the frequency 0 (i.e. the "DC gain") is 1. + Otherwise the DC gain is 0. + scale : bool, optional + Set to True to scale the coefficients so that the frequency + response is exactly unity at a certain frequency. + That frequency is either: + + - 0 (DC) if the first passband starts at 0 (i.e. pass_zero + is True) + - `fs/2` (the Nyquist frequency) if the first passband ends at + `fs/2` (i.e the filter is a single band highpass filter); + center of first passband otherwise + + nyq : float, optional + *Deprecated. Use `fs` instead.* This is the Nyquist frequency. + Each frequency in `cutoff` must be between 0 and `nyq`. Default + is 1. + fs : float, optional + The sampling frequency of the signal. Each frequency in `cutoff` + must be between 0 and ``fs/2``. Default is 2. + + Returns + ------- + h : (numtaps,) ndarray + Coefficients of length `numtaps` FIR filter. + + Raises + ------ + ValueError + If any value in `cutoff` is less than or equal to 0 or greater + than or equal to ``fs/2``, if the values in `cutoff` are not strictly + monotonically increasing, or if `numtaps` is even but a passband + includes the Nyquist frequency. + + See Also + -------- + firwin2 + firls + minimum_phase + remez + + Examples + -------- + Low-pass from 0 to f: + + >>> from scipy import signal + >>> numtaps = 3 + >>> f = 0.1 + >>> signal.firwin(numtaps, f) + array([ 0.06799017, 0.86401967, 0.06799017]) + + Use a specific window function: + + >>> signal.firwin(numtaps, f, window='nuttall') + array([ 3.56607041e-04, 9.99286786e-01, 3.56607041e-04]) + + High-pass ('stop' from 0 to f): + + >>> signal.firwin(numtaps, f, pass_zero=False) + array([-0.00859313, 0.98281375, -0.00859313]) + + Band-pass: + + >>> f1, f2 = 0.1, 0.2 + >>> signal.firwin(numtaps, [f1, f2], pass_zero=False) + array([ 0.06301614, 0.88770441, 0.06301614]) + + Band-stop: + + >>> signal.firwin(numtaps, [f1, f2]) + array([-0.00801395, 1.0160279 , -0.00801395]) + + Multi-band (passbands are [0, f1], [f2, f3] and [f4, 1]): + + >>> f3, f4 = 0.3, 0.4 + >>> signal.firwin(numtaps, [f1, f2, f3, f4]) + array([-0.01376344, 1.02752689, -0.01376344]) + + Multi-band (passbands are [f1, f2] and [f3,f4]): + + >>> signal.firwin(numtaps, [f1, f2, f3, f4], pass_zero=False) + array([ 0.04890915, 0.91284326, 0.04890915]) + + """ + # The major enhancements to this function added in November 2010 were + # developed by Tom Krauss (see ticket #902). + + nyq = 0.5 * _get_fs(fs, nyq) + + cutoff = np.atleast_1d(cutoff) / float(nyq) + + # Check for invalid input. + if cutoff.ndim > 1: + raise ValueError("The cutoff argument must be at most " + "one-dimensional.") + if cutoff.size == 0: + raise ValueError("At least one cutoff frequency must be given.") + if cutoff.min() <= 0 or cutoff.max() >= 1: + raise ValueError("Invalid cutoff frequency: frequencies must be " + "greater than 0 and less than fs/2.") + if np.any(np.diff(cutoff) <= 0): + raise ValueError("Invalid cutoff frequencies: the frequencies " + "must be strictly increasing.") + + if width is not None: + # A width was given. Find the beta parameter of the Kaiser window + # and set `window`. This overrides the value of `window` passed in. + atten = kaiser_atten(numtaps, float(width) / nyq) + beta = kaiser_beta(atten) + window = ('kaiser', beta) + + pass_nyquist = bool(cutoff.size & 1) ^ pass_zero + if pass_nyquist and numtaps % 2 == 0: + raise ValueError("A filter with an even number of coefficients must " + "have zero response at the Nyquist frequency.") + + # Insert 0 and/or 1 at the ends of cutoff so that the length of cutoff + # is even, and each pair in cutoff corresponds to passband. + cutoff = np.hstack(([0.0] * pass_zero, cutoff, [1.0] * pass_nyquist)) + + # `bands` is a 2D array; each row gives the left and right edges of + # a passband. + bands = cutoff.reshape(-1, 2) + + # Build up the coefficients. + alpha = 0.5 * (numtaps - 1) + m = np.arange(0, numtaps) - alpha + h = 0 + for left, right in bands: + h += right * sinc(right * m) + h -= left * sinc(left * m) + + # Get and apply the window function. + from .signaltools import get_window + win = get_window(window, numtaps, fftbins=False) + h *= win + + # Now handle scaling if desired. + if scale: + # Get the first passband. + left, right = bands[0] + if left == 0: + scale_frequency = 0.0 + elif right == 1: + scale_frequency = 1.0 + else: + scale_frequency = 0.5 * (left + right) + c = np.cos(np.pi * m * scale_frequency) + s = np.sum(h * c) + h /= s + + return h + + +# Original version of firwin2 from scipy ticket #457, submitted by "tash". +# +# Rewritten by Warren Weckesser, 2010. + +def firwin2(numtaps, freq, gain, nfreqs=None, window='hamming', nyq=None, + antisymmetric=False, fs=None): + """ + FIR filter design using the window method. + + From the given frequencies `freq` and corresponding gains `gain`, + this function constructs an FIR filter with linear phase and + (approximately) the given frequency response. + + Parameters + ---------- + numtaps : int + The number of taps in the FIR filter. `numtaps` must be less than + `nfreqs`. + freq : array_like, 1D + The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being + Nyquist. The Nyquist frequency is half `fs`. + The values in `freq` must be nondecreasing. A value can be repeated + once to implement a discontinuity. The first value in `freq` must + be 0, and the last value must be ``fs/2``. + gain : array_like + The filter gains at the frequency sampling points. Certain + constraints to gain values, depending on the filter type, are applied, + see Notes for details. + nfreqs : int, optional + The size of the interpolation mesh used to construct the filter. + For most efficient behavior, this should be a power of 2 plus 1 + (e.g, 129, 257, etc). The default is one more than the smallest + power of 2 that is not less than `numtaps`. `nfreqs` must be greater + than `numtaps`. + window : string or (string, float) or float, or None, optional + Window function to use. Default is "hamming". See + `scipy.signal.get_window` for the complete list of possible values. + If None, no window function is applied. + nyq : float, optional + *Deprecated. Use `fs` instead.* This is the Nyquist frequency. + Each frequency in `freq` must be between 0 and `nyq`. Default is 1. + antisymmetric : bool, optional + Whether resulting impulse response is symmetric/antisymmetric. + See Notes for more details. + fs : float, optional + The sampling frequency of the signal. Each frequency in `cutoff` + must be between 0 and ``fs/2``. Default is 2. + + Returns + ------- + taps : ndarray + The filter coefficients of the FIR filter, as a 1-D array of length + `numtaps`. + + See also + -------- + firls + firwin + minimum_phase + remez + + Notes + ----- + From the given set of frequencies and gains, the desired response is + constructed in the frequency domain. The inverse FFT is applied to the + desired response to create the associated convolution kernel, and the + first `numtaps` coefficients of this kernel, scaled by `window`, are + returned. + + The FIR filter will have linear phase. The type of filter is determined by + the value of 'numtaps` and `antisymmetric` flag. + There are four possible combinations: + + - odd `numtaps`, `antisymmetric` is False, type I filter is produced + - even `numtaps`, `antisymmetric` is False, type II filter is produced + - odd `numtaps`, `antisymmetric` is True, type III filter is produced + - even `numtaps`, `antisymmetric` is True, type IV filter is produced + + Magnitude response of all but type I filters are subjects to following + constraints: + + - type II -- zero at the Nyquist frequency + - type III -- zero at zero and Nyquist frequencies + - type IV -- zero at zero frequency + + .. versionadded:: 0.9.0 + + References + ---------- + .. [1] Oppenheim, A. V. and Schafer, R. W., "Discrete-Time Signal + Processing", Prentice-Hall, Englewood Cliffs, New Jersey (1989). + (See, for example, Section 7.4.) + + .. [2] Smith, Steven W., "The Scientist and Engineer's Guide to Digital + Signal Processing", Ch. 17. http://www.dspguide.com/ch17/1.htm + + Examples + -------- + A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and + that decreases linearly on [0.5, 1.0] from 1 to 0: + + >>> from scipy import signal + >>> taps = signal.firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0]) + >>> print(taps[72:78]) + [-0.02286961 -0.06362756 0.57310236 0.57310236 -0.06362756 -0.02286961] + + """ + nyq = 0.5 * _get_fs(fs, nyq) + + if len(freq) != len(gain): + raise ValueError('freq and gain must be of same length.') + + if nfreqs is not None and numtaps >= nfreqs: + raise ValueError(('ntaps must be less than nfreqs, but firwin2 was ' + 'called with ntaps=%d and nfreqs=%s') % + (numtaps, nfreqs)) + + if freq[0] != 0 or freq[-1] != nyq: + raise ValueError('freq must start with 0 and end with fs/2.') + d = np.diff(freq) + if (d < 0).any(): + raise ValueError('The values in freq must be nondecreasing.') + d2 = d[:-1] + d[1:] + if (d2 == 0).any(): + raise ValueError('A value in freq must not occur more than twice.') + + if antisymmetric: + if numtaps % 2 == 0: + ftype = 4 + else: + ftype = 3 + else: + if numtaps % 2 == 0: + ftype = 2 + else: + ftype = 1 + + if ftype == 2 and gain[-1] != 0.0: + raise ValueError("A Type II filter must have zero gain at the " + "Nyquist frequency.") + elif ftype == 3 and (gain[0] != 0.0 or gain[-1] != 0.0): + raise ValueError("A Type III filter must have zero gain at zero " + "and Nyquist frequencies.") + elif ftype == 4 and gain[0] != 0.0: + raise ValueError("A Type IV filter must have zero gain at zero " + "frequency.") + + if nfreqs is None: + nfreqs = 1 + 2 ** int(ceil(log(numtaps, 2))) + + # Tweak any repeated values in freq so that interp works. + eps = np.finfo(float).eps + for k in range(len(freq)): + if k < len(freq) - 1 and freq[k] == freq[k + 1]: + freq[k] = freq[k] - eps + freq[k + 1] = freq[k + 1] + eps + + # Linearly interpolate the desired response on a uniform mesh `x`. + x = np.linspace(0.0, nyq, nfreqs) + fx = np.interp(x, freq, gain) + + # Adjust the phases of the coefficients so that the first `ntaps` of the + # inverse FFT are the desired filter coefficients. + shift = np.exp(-(numtaps - 1) / 2. * 1.j * np.pi * x / nyq) + if ftype > 2: + shift *= 1j + + fx2 = fx * shift + + # Use irfft to compute the inverse FFT. + out_full = irfft(fx2) + + if window is not None: + # Create the window to apply to the filter coefficients. + from .signaltools import get_window + wind = get_window(window, numtaps, fftbins=False) + else: + wind = 1 + + # Keep only the first `numtaps` coefficients in `out`, and multiply by + # the window. + out = out_full[:numtaps] * wind + + if ftype == 3: + out[out.size // 2] = 0.0 + + return out + + +def remez(numtaps, bands, desired, weight=None, Hz=None, type='bandpass', + maxiter=25, grid_density=16, fs=None): + """ + Calculate the minimax optimal filter using the Remez exchange algorithm. + + Calculate the filter-coefficients for the finite impulse response + (FIR) filter whose transfer function minimizes the maximum error + between the desired gain and the realized gain in the specified + frequency bands using the Remez exchange algorithm. + + Parameters + ---------- + numtaps : int + The desired number of taps in the filter. The number of taps is + the number of terms in the filter, or the filter order plus one. + bands : array_like + A monotonic sequence containing the band edges. + All elements must be non-negative and less than half the sampling + frequency as given by `fs`. + desired : array_like + A sequence half the size of bands containing the desired gain + in each of the specified bands. + weight : array_like, optional + A relative weighting to give to each band region. The length of + `weight` has to be half the length of `bands`. + Hz : scalar, optional + *Deprecated. Use `fs` instead.* + The sampling frequency in Hz. Default is 1. + type : {'bandpass', 'differentiator', 'hilbert'}, optional + The type of filter: + + * 'bandpass' : flat response in bands. This is the default. + + * 'differentiator' : frequency proportional response in bands. + + * 'hilbert' : filter with odd symmetry, that is, type III + (for even order) or type IV (for odd order) + linear phase filters. + + maxiter : int, optional + Maximum number of iterations of the algorithm. Default is 25. + grid_density : int, optional + Grid density. The dense grid used in `remez` is of size + ``(numtaps + 1) * grid_density``. Default is 16. + fs : float, optional + The sampling frequency of the signal. Default is 1. + + Returns + ------- + out : ndarray + A rank-1 array containing the coefficients of the optimal + (in a minimax sense) filter. + + See Also + -------- + firls + firwin + firwin2 + minimum_phase + + References + ---------- + .. [1] J. H. McClellan and T. W. Parks, "A unified approach to the + design of optimum FIR linear phase digital filters", + IEEE Trans. Circuit Theory, vol. CT-20, pp. 697-701, 1973. + .. [2] J. H. McClellan, T. W. Parks and L. R. Rabiner, "A Computer + Program for Designing Optimum FIR Linear Phase Digital + Filters", IEEE Trans. Audio Electroacoust., vol. AU-21, + pp. 506-525, 1973. + + Examples + -------- + For a signal sampled at 100 Hz, we want to construct a filter with a + passband at 20-40 Hz, and stop bands at 0-10 Hz and 45-50 Hz. Note that + this means that the behavior in the frequency ranges between those bands + is unspecified and may overshoot. + + >>> from scipy import signal + >>> fs = 100 + >>> bpass = signal.remez(72, [0, 10, 20, 40, 45, 50], [0, 1, 0], fs=fs) + >>> freq, response = signal.freqz(bpass) + + >>> import matplotlib.pyplot as plt + >>> plt.semilogy(0.5*fs*freq/np.pi, np.abs(response), 'b-') + >>> plt.grid(alpha=0.25) + >>> plt.xlabel('Frequency (Hz)') + >>> plt.ylabel('Gain') + >>> plt.show() + + """ + if Hz is None and fs is None: + fs = 1.0 + elif Hz is not None: + if fs is not None: + raise ValueError("Values cannot be given for both 'Hz' and 'fs'.") + fs = Hz + + # Convert type + try: + tnum = {'bandpass': 1, 'differentiator': 2, 'hilbert': 3}[type] + except KeyError: + raise ValueError("Type must be 'bandpass', 'differentiator', " + "or 'hilbert'") + + # Convert weight + if weight is None: + weight = [1] * len(desired) + + bands = np.asarray(bands).copy() + return sigtools._remez(numtaps, bands, desired, weight, tnum, fs, + maxiter, grid_density) + + +def firls(numtaps, bands, desired, weight=None, nyq=None, fs=None): + """ + FIR filter design using least-squares error minimization. + + Calculate the filter coefficients for the linear-phase finite + impulse response (FIR) filter which has the best approximation + to the desired frequency response described by `bands` and + `desired` in the least squares sense (i.e., the integral of the + weighted mean-squared error within the specified bands is + minimized). + + Parameters + ---------- + numtaps : int + The number of taps in the FIR filter. `numtaps` must be odd. + bands : array_like + A monotonic nondecreasing sequence containing the band edges in + Hz. All elements must be non-negative and less than or equal to + the Nyquist frequency given by `nyq`. + desired : array_like + A sequence the same size as `bands` containing the desired gain + at the start and end point of each band. + weight : array_like, optional + A relative weighting to give to each band region when solving + the least squares problem. `weight` has to be half the size of + `bands`. + nyq : float, optional + *Deprecated. Use `fs` instead.* + Nyquist frequency. Each frequency in `bands` must be between 0 + and `nyq` (inclusive). Default is 1. + fs : float, optional + The sampling frequency of the signal. Each frequency in `bands` + must be between 0 and ``fs/2`` (inclusive). Default is 2. + + Returns + ------- + coeffs : ndarray + Coefficients of the optimal (in a least squares sense) FIR filter. + + See also + -------- + firwin + firwin2 + minimum_phase + remez + + Notes + ----- + This implementation follows the algorithm given in [1]_. + As noted there, least squares design has multiple advantages: + + 1. Optimal in a least-squares sense. + 2. Simple, non-iterative method. + 3. The general solution can obtained by solving a linear + system of equations. + 4. Allows the use of a frequency dependent weighting function. + + This function constructs a Type I linear phase FIR filter, which + contains an odd number of `coeffs` satisfying for :math:`n < numtaps`: + + .. math:: coeffs(n) = coeffs(numtaps - 1 - n) + + The odd number of coefficients and filter symmetry avoid boundary + conditions that could otherwise occur at the Nyquist and 0 frequencies + (e.g., for Type II, III, or IV variants). + + .. versionadded:: 0.18 + + References + ---------- + .. [1] Ivan Selesnick, Linear-Phase Fir Filter Design By Least Squares. + OpenStax CNX. Aug 9, 2005. + http://cnx.org/contents/eb1ecb35-03a9-4610-ba87-41cd771c95f2@7 + + Examples + -------- + We want to construct a band-pass filter. Note that the behavior in the + frequency ranges between our stop bands and pass bands is unspecified, + and thus may overshoot depending on the parameters of our filter: + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> fig, axs = plt.subplots(2) + >>> fs = 10.0 # Hz + >>> desired = (0, 0, 1, 1, 0, 0) + >>> for bi, bands in enumerate(((0, 1, 2, 3, 4, 5), (0, 1, 2, 4, 4.5, 5))): + ... fir_firls = signal.firls(73, bands, desired, fs=fs) + ... fir_remez = signal.remez(73, bands, desired[::2], fs=fs) + ... fir_firwin2 = signal.firwin2(73, bands, desired, fs=fs) + ... hs = list() + ... ax = axs[bi] + ... for fir in (fir_firls, fir_remez, fir_firwin2): + ... freq, response = signal.freqz(fir) + ... hs.append(ax.semilogy(0.5*fs*freq/np.pi, np.abs(response))[0]) + ... for band, gains in zip(zip(bands[::2], bands[1::2]), + ... zip(desired[::2], desired[1::2])): + ... ax.semilogy(band, np.maximum(gains, 1e-7), 'k--', linewidth=2) + ... if bi == 0: + ... ax.legend(hs, ('firls', 'remez', 'firwin2'), + ... loc='lower center', frameon=False) + ... else: + ... ax.set_xlabel('Frequency (Hz)') + ... ax.grid(True) + ... ax.set(title='Band-pass %d-%d Hz' % bands[2:4], ylabel='Magnitude') + ... + >>> fig.tight_layout() + >>> plt.show() + + """ # noqa + nyq = 0.5 * _get_fs(fs, nyq) + + numtaps = int(numtaps) + if numtaps % 2 == 0 or numtaps < 1: + raise ValueError("numtaps must be odd and >= 1") + M = (numtaps-1) // 2 + + # normalize bands 0->1 and make it 2 columns + nyq = float(nyq) + if nyq <= 0: + raise ValueError('nyq must be positive, got %s <= 0.' % nyq) + bands = np.asarray(bands).flatten() / nyq + if len(bands) % 2 != 0: + raise ValueError("bands must contain frequency pairs.") + bands.shape = (-1, 2) + + # check remaining params + desired = np.asarray(desired).flatten() + if bands.size != desired.size: + raise ValueError("desired must have one entry per frequency, got %s " + "gains for %s frequencies." + % (desired.size, bands.size)) + desired.shape = (-1, 2) + if (np.diff(bands) <= 0).any() or (np.diff(bands[:, 0]) < 0).any(): + raise ValueError("bands must be monotonically nondecreasing and have " + "width > 0.") + if (bands[:-1, 1] > bands[1:, 0]).any(): + raise ValueError("bands must not overlap.") + if (desired < 0).any(): + raise ValueError("desired must be non-negative.") + if weight is None: + weight = np.ones(len(desired)) + weight = np.asarray(weight).flatten() + if len(weight) != len(desired): + raise ValueError("weight must be the same size as the number of " + "band pairs (%s)." % (len(bands),)) + if (weight < 0).any(): + raise ValueError("weight must be non-negative.") + + # Set up the linear matrix equation to be solved, Qa = b + + # We can express Q(k,n) = 0.5 Q1(k,n) + 0.5 Q2(k,n) + # where Q1(k,n)=q(k−n) and Q2(k,n)=q(k+n), i.e. a Toeplitz plus Hankel. + + # We omit the factor of 0.5 above, instead adding it during coefficient + # calculation. + + # We also omit the 1/π from both Q and b equations, as they cancel + # during solving. + + # We have that: + # q(n) = 1/π ∫W(ω)cos(nω)dω (over 0->π) + # Using our nomalization ω=πf and with a constant weight W over each + # interval f1->f2 we get: + # q(n) = W∫cos(πnf)df (0->1) = Wf sin(πnf)/πnf + # integrated over each f1->f2 pair (i.e., value at f2 - value at f1). + n = np.arange(numtaps)[:, np.newaxis, np.newaxis] + q = np.dot(np.diff(np.sinc(bands * n) * bands, axis=2)[:, :, 0], weight) + + # Now we assemble our sum of Toeplitz and Hankel + Q1 = toeplitz(q[:M+1]) + Q2 = hankel(q[:M+1], q[M:]) + Q = Q1 + Q2 + + # Now for b(n) we have that: + # b(n) = 1/π ∫ W(ω)D(ω)cos(nω)dω (over 0->π) + # Using our normalization ω=πf and with a constant weight W over each + # interval and a linear term for D(ω) we get (over each f1->f2 interval): + # b(n) = W ∫ (mf+c)cos(πnf)df + # = f(mf+c)sin(πnf)/πnf + mf**2 cos(nπf)/(πnf)**2 + # integrated over each f1->f2 pair (i.e., value at f2 - value at f1). + n = n[:M + 1] # only need this many coefficients here + # Choose m and c such that we are at the start and end weights + m = (np.diff(desired, axis=1) / np.diff(bands, axis=1)) + c = desired[:, [0]] - bands[:, [0]] * m + b = bands * (m*bands + c) * np.sinc(bands * n) + # Use L'Hospital's rule here for cos(nπf)/(πnf)**2 @ n=0 + b[0] -= m * bands * bands / 2. + b[1:] += m * np.cos(n[1:] * np.pi * bands) / (np.pi * n[1:]) ** 2 + b = np.dot(np.diff(b, axis=2)[:, :, 0], weight) + + # Now we can solve the equation (use pinv because Q can be rank deficient) + a = np.dot(pinv(Q), b) + + # make coefficients symmetric (linear phase) + coeffs = np.hstack((a[:0:-1], 2 * a[0], a[1:])) + return coeffs + + +def _dhtm(mag): + """Compute the modified 1D discrete Hilbert transform + + Parameters + ---------- + mag : ndarray + The magnitude spectrum. Should be 1D with an even length, and + preferably a fast length for FFT/IFFT. + """ + # Adapted based on code by Niranjan Damera-Venkata, + # Brian L. Evans and Shawn R. McCaslin (see refs for `minimum_phase`) + sig = np.zeros(len(mag)) + # Leave Nyquist and DC at 0, knowing np.abs(fftfreq(N)[midpt]) == 0.5 + midpt = len(mag) // 2 + sig[1:midpt] = 1 + sig[midpt+1:] = -1 + # eventually if we want to support complex filters, we will need a + # np.abs() on the mag inside the log, and should remove the .real + recon = ifft(mag * np.exp(fft(sig * ifft(np.log(mag))))).real + return recon + + +def minimum_phase(h, method='homomorphic', n_fft=None): + """Convert a linear-phase FIR filter to minimum phase + + Parameters + ---------- + h : array + Linear-phase FIR filter coefficients. + method : {'hilbert', 'homomorphic'} + The method to use: + + 'homomorphic' (default) + This method [4]_ [5]_ works best with filters with an + odd number of taps, and the resulting minimum phase filter + will have a magnitude response that approximates the square + root of the the original filter's magnitude response. + + 'hilbert' + This method [1]_ is designed to be used with equiripple + filters (e.g., from `remez`) with unity or zero gain + regions. + + n_fft : int + The number of points to use for the FFT. Should be at least a + few times larger than the signal length (see Notes). + + Returns + ------- + h_minimum : array + The minimum-phase version of the filter, with length + ``(length(h) + 1) // 2``. + + See Also + -------- + firwin + firwin2 + remez + + Notes + ----- + Both the Hilbert [1]_ or homomorphic [4]_ [5]_ methods require selection + of an FFT length to estimate the complex cepstrum of the filter. + + In the case of the Hilbert method, the deviation from the ideal + spectrum ``epsilon`` is related to the number of stopband zeros + ``n_stop`` and FFT length ``n_fft`` as:: + + epsilon = 2. * n_stop / n_fft + + For example, with 100 stopband zeros and a FFT length of 2048, + ``epsilon = 0.0976``. If we conservatively assume that the number of + stopband zeros is one less than the filter length, we can take the FFT + length to be the next power of 2 that satisfies ``epsilon=0.01`` as:: + + n_fft = 2 ** int(np.ceil(np.log2(2 * (len(h) - 1) / 0.01))) + + This gives reasonable results for both the Hilbert and homomorphic + methods, and gives the value used when ``n_fft=None``. + + Alternative implementations exist for creating minimum-phase filters, + including zero inversion [2]_ and spectral factorization [3]_ [4]_. + For more information, see: + + http://dspguru.com/dsp/howtos/how-to-design-minimum-phase-fir-filters + + Examples + -------- + Create an optimal linear-phase filter, then convert it to minimum phase: + + >>> from scipy.signal import remez, minimum_phase, freqz, group_delay + >>> import matplotlib.pyplot as plt + >>> freq = [0, 0.2, 0.3, 1.0] + >>> desired = [1, 0] + >>> h_linear = remez(151, freq, desired, Hz=2.) + + Convert it to minimum phase: + + >>> h_min_hom = minimum_phase(h_linear, method='homomorphic') + >>> h_min_hil = minimum_phase(h_linear, method='hilbert') + + Compare the three filters: + + >>> fig, axs = plt.subplots(4, figsize=(4, 8)) + >>> for h, style, color in zip((h_linear, h_min_hom, h_min_hil), + ... ('-', '-', '--'), ('k', 'r', 'c')): + ... w, H = freqz(h) + ... w, gd = group_delay((h, 1)) + ... w /= np.pi + ... axs[0].plot(h, color=color, linestyle=style) + ... axs[1].plot(w, np.abs(H), color=color, linestyle=style) + ... axs[2].plot(w, 20 * np.log10(np.abs(H)), color=color, linestyle=style) + ... axs[3].plot(w, gd, color=color, linestyle=style) + >>> for ax in axs: + ... ax.grid(True, color='0.5') + ... ax.fill_between(freq[1:3], *ax.get_ylim(), color='#ffeeaa', zorder=1) + >>> axs[0].set(xlim=[0, len(h_linear) - 1], ylabel='Amplitude', xlabel='Samples') + >>> axs[1].legend(['Linear', 'Min-Hom', 'Min-Hil'], title='Phase') + >>> for ax, ylim in zip(axs[1:], ([0, 1.1], [-150, 10], [-60, 60])): + ... ax.set(xlim=[0, 1], ylim=ylim, xlabel='Frequency') + >>> axs[1].set(ylabel='Magnitude') + >>> axs[2].set(ylabel='Magnitude (dB)') + >>> axs[3].set(ylabel='Group delay') + >>> plt.tight_layout() + + References + ---------- + .. [1] N. Damera-Venkata and B. L. Evans, "Optimal design of real and + complex minimum phase digital FIR filters," Acoustics, Speech, + and Signal Processing, 1999. Proceedings., 1999 IEEE International + Conference on, Phoenix, AZ, 1999, pp. 1145-1148 vol.3. + doi: 10.1109/ICASSP.1999.756179 + .. [2] X. Chen and T. W. Parks, "Design of optimal minimum phase FIR + filters by direct factorization," Signal Processing, + vol. 10, no. 4, pp. 369-383, Jun. 1986. + .. [3] T. Saramaki, "Finite Impulse Response Filter Design," in + Handbook for Digital Signal Processing, chapter 4, + New York: Wiley-Interscience, 1993. + .. [4] J. S. Lim, Advanced Topics in Signal Processing. + Englewood Cliffs, N.J.: Prentice Hall, 1988. + .. [5] A. V. Oppenheim, R. W. Schafer, and J. R. Buck, + "Discrete-Time Signal Processing," 2nd edition. + Upper Saddle River, N.J.: Prentice Hall, 1999. + """ # noqa + h = np.asarray(h) + if np.iscomplexobj(h): + raise ValueError('Complex filters not supported') + if h.ndim != 1 or h.size <= 2: + raise ValueError('h must be 1D and at least 2 samples long') + n_half = len(h) // 2 + if not np.allclose(h[-n_half:][::-1], h[:n_half]): + warnings.warn('h does not appear to by symmetric, conversion may ' + 'fail', RuntimeWarning) + if not isinstance(method, string_types) or method not in \ + ('homomorphic', 'hilbert',): + raise ValueError('method must be "homomorphic" or "hilbert", got %r' + % (method,)) + if n_fft is None: + n_fft = 2 ** int(np.ceil(np.log2(2 * (len(h) - 1) / 0.01))) + n_fft = int(n_fft) + if n_fft < len(h): + raise ValueError('n_fft must be at least len(h)==%s' % len(h)) + if method == 'hilbert': + w = np.arange(n_fft) * (2 * np.pi / n_fft * n_half) + H = np.real(fft(h, n_fft) * np.exp(1j * w)) + dp = max(H) - 1 + ds = 0 - min(H) + S = 4. / (np.sqrt(1+dp+ds) + np.sqrt(1-dp+ds)) ** 2 + H += ds + H *= S + H = np.sqrt(H, out=H) + H += 1e-10 # ensure that the log does not explode + h_minimum = _dhtm(H) + else: # method == 'homomorphic' + # zero-pad; calculate the DFT + h_temp = np.abs(fft(h, n_fft)) + # take 0.25*log(|H|**2) = 0.5*log(|H|) + h_temp += 1e-7 * h_temp[h_temp > 0].min() # don't let log blow up + np.log(h_temp, out=h_temp) + h_temp *= 0.5 + # IDFT + h_temp = ifft(h_temp).real + # multiply pointwise by the homomorphic filter + # lmin[n] = 2u[n] - d[n] + win = np.zeros(n_fft) + win[0] = 1 + stop = (len(h) + 1) // 2 + win[1:stop] = 2 + if len(h) % 2: + win[stop] = 1 + h_temp *= win + h_temp = ifft(np.exp(fft(h_temp))) + h_minimum = h_temp.real + n_out = n_half + len(h) % 2 + return h_minimum[:n_out] diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/fir_filter_design.pyc b/project/venv/lib/python2.7/site-packages/scipy/signal/fir_filter_design.pyc new file mode 100644 index 0000000..55f32da Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/signal/fir_filter_design.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/lti_conversion.py b/project/venv/lib/python2.7/site-packages/scipy/signal/lti_conversion.py new file mode 100644 index 0000000..4f686c0 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/signal/lti_conversion.py @@ -0,0 +1,465 @@ +""" +ltisys -- a collection of functions to convert linear time invariant systems +from one representation to another. +""" +from __future__ import division, print_function, absolute_import + +import numpy +import numpy as np +from numpy import (r_, eye, atleast_2d, poly, dot, + asarray, product, zeros, array, outer) +from scipy import linalg + +from .filter_design import tf2zpk, zpk2tf, normalize + + +__all__ = ['tf2ss', 'abcd_normalize', 'ss2tf', 'zpk2ss', 'ss2zpk', + 'cont2discrete'] + + +def tf2ss(num, den): + r"""Transfer function to state-space representation. + + Parameters + ---------- + num, den : array_like + Sequences representing the coefficients of the numerator and + denominator polynomials, in order of descending degree. The + denominator needs to be at least as long as the numerator. + + Returns + ------- + A, B, C, D : ndarray + State space representation of the system, in controller canonical + form. + + Examples + -------- + Convert the transfer function: + + .. math:: H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1} + + >>> num = [1, 3, 3] + >>> den = [1, 2, 1] + + to the state-space representation: + + .. math:: + + \dot{\textbf{x}}(t) = + \begin{bmatrix} -2 & -1 \\ 1 & 0 \end{bmatrix} \textbf{x}(t) + + \begin{bmatrix} 1 \\ 0 \end{bmatrix} \textbf{u}(t) \\ + + \textbf{y}(t) = \begin{bmatrix} 1 & 2 \end{bmatrix} \textbf{x}(t) + + \begin{bmatrix} 1 \end{bmatrix} \textbf{u}(t) + + >>> from scipy.signal import tf2ss + >>> A, B, C, D = tf2ss(num, den) + >>> A + array([[-2., -1.], + [ 1., 0.]]) + >>> B + array([[ 1.], + [ 0.]]) + >>> C + array([[ 1., 2.]]) + >>> D + array([[ 1.]]) + """ + # Controller canonical state-space representation. + # if M+1 = len(num) and K+1 = len(den) then we must have M <= K + # states are found by asserting that X(s) = U(s) / D(s) + # then Y(s) = N(s) * X(s) + # + # A, B, C, and D follow quite naturally. + # + num, den = normalize(num, den) # Strips zeros, checks arrays + nn = len(num.shape) + if nn == 1: + num = asarray([num], num.dtype) + M = num.shape[1] + K = len(den) + if M > K: + msg = "Improper transfer function. `num` is longer than `den`." + raise ValueError(msg) + if M == 0 or K == 0: # Null system + return (array([], float), array([], float), array([], float), + array([], float)) + + # pad numerator to have same number of columns has denominator + num = r_['-1', zeros((num.shape[0], K - M), num.dtype), num] + + if num.shape[-1] > 0: + D = atleast_2d(num[:, 0]) + + else: + # We don't assign it an empty array because this system + # is not 'null'. It just doesn't have a non-zero D + # matrix. Thus, it should have a non-zero shape so that + # it can be operated on by functions like 'ss2tf' + D = array([[0]], float) + + if K == 1: + D = D.reshape(num.shape) + + return (zeros((1, 1)), zeros((1, D.shape[1])), + zeros((D.shape[0], 1)), D) + + frow = -array([den[1:]]) + A = r_[frow, eye(K - 2, K - 1)] + B = eye(K - 1, 1) + C = num[:, 1:] - outer(num[:, 0], den[1:]) + D = D.reshape((C.shape[0], B.shape[1])) + + return A, B, C, D + + +def _none_to_empty_2d(arg): + if arg is None: + return zeros((0, 0)) + else: + return arg + + +def _atleast_2d_or_none(arg): + if arg is not None: + return atleast_2d(arg) + + +def _shape_or_none(M): + if M is not None: + return M.shape + else: + return (None,) * 2 + + +def _choice_not_none(*args): + for arg in args: + if arg is not None: + return arg + + +def _restore(M, shape): + if M.shape == (0, 0): + return zeros(shape) + else: + if M.shape != shape: + raise ValueError("The input arrays have incompatible shapes.") + return M + + +def abcd_normalize(A=None, B=None, C=None, D=None): + """Check state-space matrices and ensure they are two-dimensional. + + If enough information on the system is provided, that is, enough + properly-shaped arrays are passed to the function, the missing ones + are built from this information, ensuring the correct number of + rows and columns. Otherwise a ValueError is raised. + + Parameters + ---------- + A, B, C, D : array_like, optional + State-space matrices. All of them are None (missing) by default. + See `ss2tf` for format. + + Returns + ------- + A, B, C, D : array + Properly shaped state-space matrices. + + Raises + ------ + ValueError + If not enough information on the system was provided. + + """ + A, B, C, D = map(_atleast_2d_or_none, (A, B, C, D)) + + MA, NA = _shape_or_none(A) + MB, NB = _shape_or_none(B) + MC, NC = _shape_or_none(C) + MD, ND = _shape_or_none(D) + + p = _choice_not_none(MA, MB, NC) + q = _choice_not_none(NB, ND) + r = _choice_not_none(MC, MD) + if p is None or q is None or r is None: + raise ValueError("Not enough information on the system.") + + A, B, C, D = map(_none_to_empty_2d, (A, B, C, D)) + A = _restore(A, (p, p)) + B = _restore(B, (p, q)) + C = _restore(C, (r, p)) + D = _restore(D, (r, q)) + + return A, B, C, D + + +def ss2tf(A, B, C, D, input=0): + r"""State-space to transfer function. + + A, B, C, D defines a linear state-space system with `p` inputs, + `q` outputs, and `n` state variables. + + Parameters + ---------- + A : array_like + State (or system) matrix of shape ``(n, n)`` + B : array_like + Input matrix of shape ``(n, p)`` + C : array_like + Output matrix of shape ``(q, n)`` + D : array_like + Feedthrough (or feedforward) matrix of shape ``(q, p)`` + input : int, optional + For multiple-input systems, the index of the input to use. + + Returns + ------- + num : 2-D ndarray + Numerator(s) of the resulting transfer function(s). `num` has one row + for each of the system's outputs. Each row is a sequence representation + of the numerator polynomial. + den : 1-D ndarray + Denominator of the resulting transfer function(s). `den` is a sequence + representation of the denominator polynomial. + + Examples + -------- + Convert the state-space representation: + + .. math:: + + \dot{\textbf{x}}(t) = + \begin{bmatrix} -2 & -1 \\ 1 & 0 \end{bmatrix} \textbf{x}(t) + + \begin{bmatrix} 1 \\ 0 \end{bmatrix} \textbf{u}(t) \\ + + \textbf{y}(t) = \begin{bmatrix} 1 & 2 \end{bmatrix} \textbf{x}(t) + + \begin{bmatrix} 1 \end{bmatrix} \textbf{u}(t) + + >>> A = [[-2, -1], [1, 0]] + >>> B = [[1], [0]] # 2-dimensional column vector + >>> C = [[1, 2]] # 2-dimensional row vector + >>> D = 1 + + to the transfer function: + + .. math:: H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1} + + >>> from scipy.signal import ss2tf + >>> ss2tf(A, B, C, D) + (array([[1, 3, 3]]), array([ 1., 2., 1.])) + """ + # transfer function is C (sI - A)**(-1) B + D + + # Check consistency and make them all rank-2 arrays + A, B, C, D = abcd_normalize(A, B, C, D) + + nout, nin = D.shape + if input >= nin: + raise ValueError("System does not have the input specified.") + + # make SIMO from possibly MIMO system. + B = B[:, input:input + 1] + D = D[:, input:input + 1] + + try: + den = poly(A) + except ValueError: + den = 1 + + if (product(B.shape, axis=0) == 0) and (product(C.shape, axis=0) == 0): + num = numpy.ravel(D) + if (product(D.shape, axis=0) == 0) and (product(A.shape, axis=0) == 0): + den = [] + return num, den + + num_states = A.shape[0] + type_test = A[:, 0] + B[:, 0] + C[0, :] + D + num = numpy.zeros((nout, num_states + 1), type_test.dtype) + for k in range(nout): + Ck = atleast_2d(C[k, :]) + num[k] = poly(A - dot(B, Ck)) + (D[k] - 1) * den + + return num, den + + +def zpk2ss(z, p, k): + """Zero-pole-gain representation to state-space representation + + Parameters + ---------- + z, p : sequence + Zeros and poles. + k : float + System gain. + + Returns + ------- + A, B, C, D : ndarray + State space representation of the system, in controller canonical + form. + + """ + return tf2ss(*zpk2tf(z, p, k)) + + +def ss2zpk(A, B, C, D, input=0): + """State-space representation to zero-pole-gain representation. + + A, B, C, D defines a linear state-space system with `p` inputs, + `q` outputs, and `n` state variables. + + Parameters + ---------- + A : array_like + State (or system) matrix of shape ``(n, n)`` + B : array_like + Input matrix of shape ``(n, p)`` + C : array_like + Output matrix of shape ``(q, n)`` + D : array_like + Feedthrough (or feedforward) matrix of shape ``(q, p)`` + input : int, optional + For multiple-input systems, the index of the input to use. + + Returns + ------- + z, p : sequence + Zeros and poles. + k : float + System gain. + + """ + return tf2zpk(*ss2tf(A, B, C, D, input=input)) + + +def cont2discrete(system, dt, method="zoh", alpha=None): + """ + Transform a continuous to a discrete state-space system. + + Parameters + ---------- + system : a tuple describing the system or an instance of `lti` + The following gives the number of elements in the tuple and + the interpretation: + + * 1: (instance of `lti`) + * 2: (num, den) + * 3: (zeros, poles, gain) + * 4: (A, B, C, D) + + dt : float + The discretization time step. + method : {"gbt", "bilinear", "euler", "backward_diff", "zoh"}, optional + Which method to use: + + * gbt: generalized bilinear transformation + * bilinear: Tustin's approximation ("gbt" with alpha=0.5) + * euler: Euler (or forward differencing) method ("gbt" with alpha=0) + * backward_diff: Backwards differencing ("gbt" with alpha=1.0) + * zoh: zero-order hold (default) + + alpha : float within [0, 1], optional + The generalized bilinear transformation weighting parameter, which + should only be specified with method="gbt", and is ignored otherwise + + Returns + ------- + sysd : tuple containing the discrete system + Based on the input type, the output will be of the form + + * (num, den, dt) for transfer function input + * (zeros, poles, gain, dt) for zeros-poles-gain input + * (A, B, C, D, dt) for state-space system input + + Notes + ----- + By default, the routine uses a Zero-Order Hold (zoh) method to perform + the transformation. Alternatively, a generalized bilinear transformation + may be used, which includes the common Tustin's bilinear approximation, + an Euler's method technique, or a backwards differencing technique. + + The Zero-Order Hold (zoh) method is based on [1]_, the generalized bilinear + approximation is based on [2]_ and [3]_. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Discretization#Discretization_of_linear_state_space_models + + .. [2] http://techteach.no/publications/discretetime_signals_systems/discrete.pdf + + .. [3] G. Zhang, X. Chen, and T. Chen, Digital redesign via the generalized + bilinear transformation, Int. J. Control, vol. 82, no. 4, pp. 741-754, + 2009. + (https://www.mypolyuweb.hk/~magzhang/Research/ZCC09_IJC.pdf) + + """ + if len(system) == 1: + return system.to_discrete() + if len(system) == 2: + sysd = cont2discrete(tf2ss(system[0], system[1]), dt, method=method, + alpha=alpha) + return ss2tf(sysd[0], sysd[1], sysd[2], sysd[3]) + (dt,) + elif len(system) == 3: + sysd = cont2discrete(zpk2ss(system[0], system[1], system[2]), dt, + method=method, alpha=alpha) + return ss2zpk(sysd[0], sysd[1], sysd[2], sysd[3]) + (dt,) + elif len(system) == 4: + a, b, c, d = system + else: + raise ValueError("First argument must either be a tuple of 2 (tf), " + "3 (zpk), or 4 (ss) arrays.") + + if method == 'gbt': + if alpha is None: + raise ValueError("Alpha parameter must be specified for the " + "generalized bilinear transform (gbt) method") + elif alpha < 0 or alpha > 1: + raise ValueError("Alpha parameter must be within the interval " + "[0,1] for the gbt method") + + if method == 'gbt': + # This parameter is used repeatedly - compute once here + ima = np.eye(a.shape[0]) - alpha*dt*a + ad = linalg.solve(ima, np.eye(a.shape[0]) + (1.0-alpha)*dt*a) + bd = linalg.solve(ima, dt*b) + + # Similarly solve for the output equation matrices + cd = linalg.solve(ima.transpose(), c.transpose()) + cd = cd.transpose() + dd = d + alpha*np.dot(c, bd) + + elif method == 'bilinear' or method == 'tustin': + return cont2discrete(system, dt, method="gbt", alpha=0.5) + + elif method == 'euler' or method == 'forward_diff': + return cont2discrete(system, dt, method="gbt", alpha=0.0) + + elif method == 'backward_diff': + return cont2discrete(system, dt, method="gbt", alpha=1.0) + + elif method == 'zoh': + # Build an exponential matrix + em_upper = np.hstack((a, b)) + + # Need to stack zeros under the a and b matrices + em_lower = np.hstack((np.zeros((b.shape[1], a.shape[0])), + np.zeros((b.shape[1], b.shape[1])))) + + em = np.vstack((em_upper, em_lower)) + ms = linalg.expm(dt * em) + + # Dispose of the lower rows + ms = ms[:a.shape[0], :] + + ad = ms[:, 0:a.shape[1]] + bd = ms[:, a.shape[1]:] + + cd = c + dd = d + + else: + raise ValueError("Unknown transformation method '%s'" % method) + + return ad, bd, cd, dd, dt diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/lti_conversion.pyc b/project/venv/lib/python2.7/site-packages/scipy/signal/lti_conversion.pyc new file mode 100644 index 0000000..2aeb69b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/signal/lti_conversion.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/ltisys.py b/project/venv/lib/python2.7/site-packages/scipy/signal/ltisys.py new file mode 100644 index 0000000..d6485b0 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/signal/ltisys.py @@ -0,0 +1,3664 @@ +""" +ltisys -- a collection of classes and functions for modeling linear +time invariant systems. +""" +from __future__ import division, print_function, absolute_import + +# +# Author: Travis Oliphant 2001 +# +# Feb 2010: Warren Weckesser +# Rewrote lsim2 and added impulse2. +# Apr 2011: Jeffrey Armstrong <jeff@approximatrix.com> +# Added dlsim, dstep, dimpulse, cont2discrete +# Aug 2013: Juan Luis Cano +# Rewrote abcd_normalize. +# Jan 2015: Irvin Probst irvin DOT probst AT ensta-bretagne DOT fr +# Added pole placement +# Mar 2015: Clancy Rowley +# Rewrote lsim +# May 2015: Felix Berkenkamp +# Split lti class into subclasses +# Merged discrete systems and added dlti + +import warnings + +# np.linalg.qr fails on some tests with LinAlgError: zgeqrf returns -7 +# use scipy's qr until this is solved + +import scipy._lib.six as six +from scipy.linalg import qr as s_qr +from scipy import integrate, interpolate, linalg +from scipy.interpolate import interp1d +from scipy._lib.six import xrange +from .filter_design import (tf2zpk, zpk2tf, normalize, freqs, freqz, freqs_zpk, + freqz_zpk) +from .lti_conversion import (tf2ss, abcd_normalize, ss2tf, zpk2ss, ss2zpk, + cont2discrete) + +import numpy +import numpy as np +from numpy import (real, atleast_1d, atleast_2d, squeeze, asarray, zeros, + dot, transpose, ones, zeros_like, linspace, nan_to_num) +import copy + +__all__ = ['lti', 'dlti', 'TransferFunction', 'ZerosPolesGain', 'StateSpace', + 'lsim', 'lsim2', 'impulse', 'impulse2', 'step', 'step2', 'bode', + 'freqresp', 'place_poles', 'dlsim', 'dstep', 'dimpulse', + 'dfreqresp', 'dbode'] + + +class LinearTimeInvariant(object): + def __new__(cls, *system, **kwargs): + """Create a new object, don't allow direct instances.""" + if cls is LinearTimeInvariant: + raise NotImplementedError('The LinearTimeInvariant class is not ' + 'meant to be used directly, use `lti` ' + 'or `dlti` instead.') + return super(LinearTimeInvariant, cls).__new__(cls) + + def __init__(self): + """ + Initialize the `lti` baseclass. + + The heavy lifting is done by the subclasses. + """ + super(LinearTimeInvariant, self).__init__() + + self.inputs = None + self.outputs = None + self._dt = None + + @property + def dt(self): + """Return the sampling time of the system, `None` for `lti` systems.""" + return self._dt + + @property + def _dt_dict(self): + if self.dt is None: + return {} + else: + return {'dt': self.dt} + + @property + def zeros(self): + """Zeros of the system.""" + return self.to_zpk().zeros + + @property + def poles(self): + """Poles of the system.""" + return self.to_zpk().poles + + def _as_ss(self): + """Convert to `StateSpace` system, without copying. + + Returns + ------- + sys: StateSpace + The `StateSpace` system. If the class is already an instance of + `StateSpace` then this instance is returned. + """ + if isinstance(self, StateSpace): + return self + else: + return self.to_ss() + + def _as_zpk(self): + """Convert to `ZerosPolesGain` system, without copying. + + Returns + ------- + sys: ZerosPolesGain + The `ZerosPolesGain` system. If the class is already an instance of + `ZerosPolesGain` then this instance is returned. + """ + if isinstance(self, ZerosPolesGain): + return self + else: + return self.to_zpk() + + def _as_tf(self): + """Convert to `TransferFunction` system, without copying. + + Returns + ------- + sys: ZerosPolesGain + The `TransferFunction` system. If the class is already an instance of + `TransferFunction` then this instance is returned. + """ + if isinstance(self, TransferFunction): + return self + else: + return self.to_tf() + + +class lti(LinearTimeInvariant): + """ + Continuous-time linear time invariant system base class. + + Parameters + ---------- + *system : arguments + The `lti` class can be instantiated with either 2, 3 or 4 arguments. + The following gives the number of arguments and the corresponding + continuous-time subclass that is created: + + * 2: `TransferFunction`: (numerator, denominator) + * 3: `ZerosPolesGain`: (zeros, poles, gain) + * 4: `StateSpace`: (A, B, C, D) + + Each argument can be an array or a sequence. + + See Also + -------- + ZerosPolesGain, StateSpace, TransferFunction, dlti + + Notes + ----- + `lti` instances do not exist directly. Instead, `lti` creates an instance + of one of its subclasses: `StateSpace`, `TransferFunction` or + `ZerosPolesGain`. + + If (numerator, denominator) is passed in for ``*system``, coefficients for + both the numerator and denominator should be specified in descending + exponent order (e.g., ``s^2 + 3s + 5`` would be represented as ``[1, 3, + 5]``). + + Changing the value of properties that are not directly part of the current + system representation (such as the `zeros` of a `StateSpace` system) is + very inefficient and may lead to numerical inaccuracies. It is better to + convert to the specific system representation first. For example, call + ``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain. + + Examples + -------- + >>> from scipy import signal + + >>> signal.lti(1, 2, 3, 4) + StateSpaceContinuous( + array([[1]]), + array([[2]]), + array([[3]]), + array([[4]]), + dt: None + ) + + >>> signal.lti([1, 2], [3, 4], 5) + ZerosPolesGainContinuous( + array([1, 2]), + array([3, 4]), + 5, + dt: None + ) + + >>> signal.lti([3, 4], [1, 2]) + TransferFunctionContinuous( + array([3., 4.]), + array([1., 2.]), + dt: None + ) + + """ + def __new__(cls, *system): + """Create an instance of the appropriate subclass.""" + if cls is lti: + N = len(system) + if N == 2: + return TransferFunctionContinuous.__new__( + TransferFunctionContinuous, *system) + elif N == 3: + return ZerosPolesGainContinuous.__new__( + ZerosPolesGainContinuous, *system) + elif N == 4: + return StateSpaceContinuous.__new__(StateSpaceContinuous, + *system) + else: + raise ValueError("`system` needs to be an instance of `lti` " + "or have 2, 3 or 4 arguments.") + # __new__ was called from a subclass, let it call its own functions + return super(lti, cls).__new__(cls) + + def __init__(self, *system): + """ + Initialize the `lti` baseclass. + + The heavy lifting is done by the subclasses. + """ + super(lti, self).__init__(*system) + + def impulse(self, X0=None, T=None, N=None): + """ + Return the impulse response of a continuous-time system. + See `impulse` for details. + """ + return impulse(self, X0=X0, T=T, N=N) + + def step(self, X0=None, T=None, N=None): + """ + Return the step response of a continuous-time system. + See `step` for details. + """ + return step(self, X0=X0, T=T, N=N) + + def output(self, U, T, X0=None): + """ + Return the response of a continuous-time system to input `U`. + See `lsim` for details. + """ + return lsim(self, U, T, X0=X0) + + def bode(self, w=None, n=100): + """ + Calculate Bode magnitude and phase data of a continuous-time system. + + Returns a 3-tuple containing arrays of frequencies [rad/s], magnitude + [dB] and phase [deg]. See `bode` for details. + + Examples + -------- + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> sys = signal.TransferFunction([1], [1, 1]) + >>> w, mag, phase = sys.bode() + + >>> plt.figure() + >>> plt.semilogx(w, mag) # Bode magnitude plot + >>> plt.figure() + >>> plt.semilogx(w, phase) # Bode phase plot + >>> plt.show() + + """ + return bode(self, w=w, n=n) + + def freqresp(self, w=None, n=10000): + """ + Calculate the frequency response of a continuous-time system. + + Returns a 2-tuple containing arrays of frequencies [rad/s] and + complex magnitude. + See `freqresp` for details. + """ + return freqresp(self, w=w, n=n) + + def to_discrete(self, dt, method='zoh', alpha=None): + """Return a discretized version of the current system. + + Parameters: See `cont2discrete` for details. + + Returns + ------- + sys: instance of `dlti` + """ + raise NotImplementedError('to_discrete is not implemented for this ' + 'system class.') + + +class dlti(LinearTimeInvariant): + """ + Discrete-time linear time invariant system base class. + + Parameters + ---------- + *system: arguments + The `dlti` class can be instantiated with either 2, 3 or 4 arguments. + The following gives the number of arguments and the corresponding + discrete-time subclass that is created: + + * 2: `TransferFunction`: (numerator, denominator) + * 3: `ZerosPolesGain`: (zeros, poles, gain) + * 4: `StateSpace`: (A, B, C, D) + + Each argument can be an array or a sequence. + dt: float, optional + Sampling time [s] of the discrete-time systems. Defaults to ``True`` + (unspecified sampling time). Must be specified as a keyword argument, + for example, ``dt=0.1``. + + See Also + -------- + ZerosPolesGain, StateSpace, TransferFunction, lti + + Notes + ----- + `dlti` instances do not exist directly. Instead, `dlti` creates an instance + of one of its subclasses: `StateSpace`, `TransferFunction` or + `ZerosPolesGain`. + + Changing the value of properties that are not directly part of the current + system representation (such as the `zeros` of a `StateSpace` system) is + very inefficient and may lead to numerical inaccuracies. It is better to + convert to the specific system representation first. For example, call + ``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain. + + If (numerator, denominator) is passed in for ``*system``, coefficients for + both the numerator and denominator should be specified in descending + exponent order (e.g., ``z^2 + 3z + 5`` would be represented as ``[1, 3, + 5]``). + + .. versionadded:: 0.18.0 + + Examples + -------- + >>> from scipy import signal + + >>> signal.dlti(1, 2, 3, 4) + StateSpaceDiscrete( + array([[1]]), + array([[2]]), + array([[3]]), + array([[4]]), + dt: True + ) + + >>> signal.dlti(1, 2, 3, 4, dt=0.1) + StateSpaceDiscrete( + array([[1]]), + array([[2]]), + array([[3]]), + array([[4]]), + dt: 0.1 + ) + + >>> signal.dlti([1, 2], [3, 4], 5, dt=0.1) + ZerosPolesGainDiscrete( + array([1, 2]), + array([3, 4]), + 5, + dt: 0.1 + ) + + >>> signal.dlti([3, 4], [1, 2], dt=0.1) + TransferFunctionDiscrete( + array([3., 4.]), + array([1., 2.]), + dt: 0.1 + ) + + """ + def __new__(cls, *system, **kwargs): + """Create an instance of the appropriate subclass.""" + if cls is dlti: + N = len(system) + if N == 2: + return TransferFunctionDiscrete.__new__( + TransferFunctionDiscrete, *system, **kwargs) + elif N == 3: + return ZerosPolesGainDiscrete.__new__(ZerosPolesGainDiscrete, + *system, **kwargs) + elif N == 4: + return StateSpaceDiscrete.__new__(StateSpaceDiscrete, *system, + **kwargs) + else: + raise ValueError("`system` needs to be an instance of `dlti` " + "or have 2, 3 or 4 arguments.") + # __new__ was called from a subclass, let it call its own functions + return super(dlti, cls).__new__(cls) + + def __init__(self, *system, **kwargs): + """ + Initialize the `lti` baseclass. + + The heavy lifting is done by the subclasses. + """ + dt = kwargs.pop('dt', True) + super(dlti, self).__init__(*system, **kwargs) + + self.dt = dt + + @property + def dt(self): + """Return the sampling time of the system.""" + return self._dt + + @dt.setter + def dt(self, dt): + self._dt = dt + + def impulse(self, x0=None, t=None, n=None): + """ + Return the impulse response of the discrete-time `dlti` system. + See `dimpulse` for details. + """ + return dimpulse(self, x0=x0, t=t, n=n) + + def step(self, x0=None, t=None, n=None): + """ + Return the step response of the discrete-time `dlti` system. + See `dstep` for details. + """ + return dstep(self, x0=x0, t=t, n=n) + + def output(self, u, t, x0=None): + """ + Return the response of the discrete-time system to input `u`. + See `dlsim` for details. + """ + return dlsim(self, u, t, x0=x0) + + def bode(self, w=None, n=100): + """ + Calculate Bode magnitude and phase data of a discrete-time system. + + Returns a 3-tuple containing arrays of frequencies [rad/s], magnitude + [dB] and phase [deg]. See `dbode` for details. + + Examples + -------- + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + Transfer function: H(z) = 1 / (z^2 + 2z + 3) with sampling time 0.5s + + >>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.5) + + Equivalent: signal.dbode(sys) + + >>> w, mag, phase = sys.bode() + + >>> plt.figure() + >>> plt.semilogx(w, mag) # Bode magnitude plot + >>> plt.figure() + >>> plt.semilogx(w, phase) # Bode phase plot + >>> plt.show() + + """ + return dbode(self, w=w, n=n) + + def freqresp(self, w=None, n=10000, whole=False): + """ + Calculate the frequency response of a discrete-time system. + + Returns a 2-tuple containing arrays of frequencies [rad/s] and + complex magnitude. + See `dfreqresp` for details. + + """ + return dfreqresp(self, w=w, n=n, whole=whole) + + +class TransferFunction(LinearTimeInvariant): + r"""Linear Time Invariant system class in transfer function form. + + Represents the system as the continuous-time transfer function + :math:`H(s)=\sum_{i=0}^N b[N-i] s^i / \sum_{j=0}^M a[M-j] s^j` or the + discrete-time transfer function + :math:`H(s)=\sum_{i=0}^N b[N-i] z^i / \sum_{j=0}^M a[M-j] z^j`, where + :math:`b` are elements of the numerator `num`, :math:`a` are elements of + the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``. + `TransferFunction` systems inherit additional + functionality from the `lti`, respectively the `dlti` classes, depending on + which system representation is used. + + Parameters + ---------- + *system: arguments + The `TransferFunction` class can be instantiated with 1 or 2 + arguments. The following gives the number of input arguments and their + interpretation: + + * 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 2: array_like: (numerator, denominator) + dt: float, optional + Sampling time [s] of the discrete-time systems. Defaults to `None` + (continuous-time). Must be specified as a keyword argument, for + example, ``dt=0.1``. + + See Also + -------- + ZerosPolesGain, StateSpace, lti, dlti + tf2ss, tf2zpk, tf2sos + + Notes + ----- + Changing the value of properties that are not part of the + `TransferFunction` system representation (such as the `A`, `B`, `C`, `D` + state-space matrices) is very inefficient and may lead to numerical + inaccuracies. It is better to convert to the specific system + representation first. For example, call ``sys = sys.to_ss()`` before + accessing/changing the A, B, C, D system matrices. + + If (numerator, denominator) is passed in for ``*system``, coefficients + for both the numerator and denominator should be specified in descending + exponent order (e.g. ``s^2 + 3s + 5`` or ``z^2 + 3z + 5`` would be + represented as ``[1, 3, 5]``) + + Examples + -------- + Construct the transfer function: + + .. math:: H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1} + + >>> from scipy import signal + + >>> num = [1, 3, 3] + >>> den = [1, 2, 1] + + >>> signal.TransferFunction(num, den) + TransferFunctionContinuous( + array([1., 3., 3.]), + array([1., 2., 1.]), + dt: None + ) + + Construct the transfer function with a sampling time of 0.1 seconds: + + .. math:: H(z) = \frac{z^2 + 3z + 3}{z^2 + 2z + 1} + + >>> signal.TransferFunction(num, den, dt=0.1) + TransferFunctionDiscrete( + array([1., 3., 3.]), + array([1., 2., 1.]), + dt: 0.1 + ) + + """ + def __new__(cls, *system, **kwargs): + """Handle object conversion if input is an instance of lti.""" + if len(system) == 1 and isinstance(system[0], LinearTimeInvariant): + return system[0].to_tf() + + # Choose whether to inherit from `lti` or from `dlti` + if cls is TransferFunction: + if kwargs.get('dt') is None: + return TransferFunctionContinuous.__new__( + TransferFunctionContinuous, + *system, + **kwargs) + else: + return TransferFunctionDiscrete.__new__( + TransferFunctionDiscrete, + *system, + **kwargs) + + # No special conversion needed + return super(TransferFunction, cls).__new__(cls) + + def __init__(self, *system, **kwargs): + """Initialize the state space LTI system.""" + # Conversion of lti instances is handled in __new__ + if isinstance(system[0], LinearTimeInvariant): + return + + # Remove system arguments, not needed by parents anymore + super(TransferFunction, self).__init__(**kwargs) + + self._num = None + self._den = None + + self.num, self.den = normalize(*system) + + def __repr__(self): + """Return representation of the system's transfer function""" + return '{0}(\n{1},\n{2},\ndt: {3}\n)'.format( + self.__class__.__name__, + repr(self.num), + repr(self.den), + repr(self.dt), + ) + + @property + def num(self): + """Numerator of the `TransferFunction` system.""" + return self._num + + @num.setter + def num(self, num): + self._num = atleast_1d(num) + + # Update dimensions + if len(self.num.shape) > 1: + self.outputs, self.inputs = self.num.shape + else: + self.outputs = 1 + self.inputs = 1 + + @property + def den(self): + """Denominator of the `TransferFunction` system.""" + return self._den + + @den.setter + def den(self, den): + self._den = atleast_1d(den) + + def _copy(self, system): + """ + Copy the parameters of another `TransferFunction` object + + Parameters + ---------- + system : `TransferFunction` + The `StateSpace` system that is to be copied + + """ + self.num = system.num + self.den = system.den + + def to_tf(self): + """ + Return a copy of the current `TransferFunction` system. + + Returns + ------- + sys : instance of `TransferFunction` + The current system (copy) + + """ + return copy.deepcopy(self) + + def to_zpk(self): + """ + Convert system representation to `ZerosPolesGain`. + + Returns + ------- + sys : instance of `ZerosPolesGain` + Zeros, poles, gain representation of the current system + + """ + return ZerosPolesGain(*tf2zpk(self.num, self.den), + **self._dt_dict) + + def to_ss(self): + """ + Convert system representation to `StateSpace`. + + Returns + ------- + sys : instance of `StateSpace` + State space model of the current system + + """ + return StateSpace(*tf2ss(self.num, self.den), + **self._dt_dict) + + @staticmethod + def _z_to_zinv(num, den): + """Change a transfer function from the variable `z` to `z**-1`. + + Parameters + ---------- + num, den: 1d array_like + Sequences representing the coefficients of the numerator and + denominator polynomials, in order of descending degree of 'z'. + That is, ``5z**2 + 3z + 2`` is presented as ``[5, 3, 2]``. + + Returns + ------- + num, den: 1d array_like + Sequences representing the coefficients of the numerator and + denominator polynomials, in order of ascending degree of 'z**-1'. + That is, ``5 + 3 z**-1 + 2 z**-2`` is presented as ``[5, 3, 2]``. + """ + diff = len(num) - len(den) + if diff > 0: + den = np.hstack((np.zeros(diff), den)) + elif diff < 0: + num = np.hstack((np.zeros(-diff), num)) + return num, den + + @staticmethod + def _zinv_to_z(num, den): + """Change a transfer function from the variable `z` to `z**-1`. + + Parameters + ---------- + num, den: 1d array_like + Sequences representing the coefficients of the numerator and + denominator polynomials, in order of ascending degree of 'z**-1'. + That is, ``5 + 3 z**-1 + 2 z**-2`` is presented as ``[5, 3, 2]``. + + Returns + ------- + num, den: 1d array_like + Sequences representing the coefficients of the numerator and + denominator polynomials, in order of descending degree of 'z'. + That is, ``5z**2 + 3z + 2`` is presented as ``[5, 3, 2]``. + """ + diff = len(num) - len(den) + if diff > 0: + den = np.hstack((den, np.zeros(diff))) + elif diff < 0: + num = np.hstack((num, np.zeros(-diff))) + return num, den + + +class TransferFunctionContinuous(TransferFunction, lti): + r""" + Continuous-time Linear Time Invariant system in transfer function form. + + Represents the system as the transfer function + :math:`H(s)=\sum_{i=0}^N b[N-i] s^i / \sum_{j=0}^M a[M-j] s^j`, where + :math:`b` are elements of the numerator `num`, :math:`a` are elements of + the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``. + Continuous-time `TransferFunction` systems inherit additional + functionality from the `lti` class. + + Parameters + ---------- + *system: arguments + The `TransferFunction` class can be instantiated with 1 or 2 + arguments. The following gives the number of input arguments and their + interpretation: + + * 1: `lti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 2: array_like: (numerator, denominator) + + See Also + -------- + ZerosPolesGain, StateSpace, lti + tf2ss, tf2zpk, tf2sos + + Notes + ----- + Changing the value of properties that are not part of the + `TransferFunction` system representation (such as the `A`, `B`, `C`, `D` + state-space matrices) is very inefficient and may lead to numerical + inaccuracies. It is better to convert to the specific system + representation first. For example, call ``sys = sys.to_ss()`` before + accessing/changing the A, B, C, D system matrices. + + If (numerator, denominator) is passed in for ``*system``, coefficients + for both the numerator and denominator should be specified in descending + exponent order (e.g. ``s^2 + 3s + 5`` would be represented as + ``[1, 3, 5]``) + + Examples + -------- + Construct the transfer function: + + .. math:: H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1} + + >>> from scipy import signal + + >>> num = [1, 3, 3] + >>> den = [1, 2, 1] + + >>> signal.TransferFunction(num, den) + TransferFunctionContinuous( + array([ 1., 3., 3.]), + array([ 1., 2., 1.]), + dt: None + ) + + """ + def to_discrete(self, dt, method='zoh', alpha=None): + """ + Returns the discretized `TransferFunction` system. + + Parameters: See `cont2discrete` for details. + + Returns + ------- + sys: instance of `dlti` and `StateSpace` + """ + return TransferFunction(*cont2discrete((self.num, self.den), + dt, + method=method, + alpha=alpha)[:-1], + dt=dt) + + +class TransferFunctionDiscrete(TransferFunction, dlti): + r""" + Discrete-time Linear Time Invariant system in transfer function form. + + Represents the system as the transfer function + :math:`H(z)=\sum_{i=0}^N b[N-i] z^i / \sum_{j=0}^M a[M-j] z^j`, where + :math:`b` are elements of the numerator `num`, :math:`a` are elements of + the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``. + Discrete-time `TransferFunction` systems inherit additional functionality + from the `dlti` class. + + Parameters + ---------- + *system: arguments + The `TransferFunction` class can be instantiated with 1 or 2 + arguments. The following gives the number of input arguments and their + interpretation: + + * 1: `dlti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 2: array_like: (numerator, denominator) + dt: float, optional + Sampling time [s] of the discrete-time systems. Defaults to `True` + (unspecified sampling time). Must be specified as a keyword argument, + for example, ``dt=0.1``. + + See Also + -------- + ZerosPolesGain, StateSpace, dlti + tf2ss, tf2zpk, tf2sos + + Notes + ----- + Changing the value of properties that are not part of the + `TransferFunction` system representation (such as the `A`, `B`, `C`, `D` + state-space matrices) is very inefficient and may lead to numerical + inaccuracies. + + If (numerator, denominator) is passed in for ``*system``, coefficients + for both the numerator and denominator should be specified in descending + exponent order (e.g., ``z^2 + 3z + 5`` would be represented as + ``[1, 3, 5]``). + + Examples + -------- + Construct the transfer function with a sampling time of 0.5 seconds: + + .. math:: H(z) = \frac{z^2 + 3z + 3}{z^2 + 2z + 1} + + >>> from scipy import signal + + >>> num = [1, 3, 3] + >>> den = [1, 2, 1] + + >>> signal.TransferFunction(num, den, 0.5) + TransferFunctionDiscrete( + array([ 1., 3., 3.]), + array([ 1., 2., 1.]), + dt: 0.5 + ) + + """ + pass + + +class ZerosPolesGain(LinearTimeInvariant): + r""" + Linear Time Invariant system class in zeros, poles, gain form. + + Represents the system as the continuous- or discrete-time transfer function + :math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is + the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`. + `ZerosPolesGain` systems inherit additional functionality from the `lti`, + respectively the `dlti` classes, depending on which system representation + is used. + + Parameters + ---------- + *system : arguments + The `ZerosPolesGain` class can be instantiated with 1 or 3 + arguments. The following gives the number of input arguments and their + interpretation: + + * 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 3: array_like: (zeros, poles, gain) + dt: float, optional + Sampling time [s] of the discrete-time systems. Defaults to `None` + (continuous-time). Must be specified as a keyword argument, for + example, ``dt=0.1``. + + + See Also + -------- + TransferFunction, StateSpace, lti, dlti + zpk2ss, zpk2tf, zpk2sos + + Notes + ----- + Changing the value of properties that are not part of the + `ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D` + state-space matrices) is very inefficient and may lead to numerical + inaccuracies. It is better to convert to the specific system + representation first. For example, call ``sys = sys.to_ss()`` before + accessing/changing the A, B, C, D system matrices. + + Examples + -------- + >>> from scipy import signal + + Transfer function: H(s) = 5(s - 1)(s - 2) / (s - 3)(s - 4) + + >>> signal.ZerosPolesGain([1, 2], [3, 4], 5) + ZerosPolesGainContinuous( + array([1, 2]), + array([3, 4]), + 5, + dt: None + ) + + Transfer function: H(z) = 5(z - 1)(z - 2) / (z - 3)(z - 4) + + >>> signal.ZerosPolesGain([1, 2], [3, 4], 5, dt=0.1) + ZerosPolesGainDiscrete( + array([1, 2]), + array([3, 4]), + 5, + dt: 0.1 + ) + + """ + def __new__(cls, *system, **kwargs): + """Handle object conversion if input is an instance of `lti`""" + if len(system) == 1 and isinstance(system[0], LinearTimeInvariant): + return system[0].to_zpk() + + # Choose whether to inherit from `lti` or from `dlti` + if cls is ZerosPolesGain: + if kwargs.get('dt') is None: + return ZerosPolesGainContinuous.__new__( + ZerosPolesGainContinuous, + *system, + **kwargs) + else: + return ZerosPolesGainDiscrete.__new__( + ZerosPolesGainDiscrete, + *system, + **kwargs + ) + + # No special conversion needed + return super(ZerosPolesGain, cls).__new__(cls) + + def __init__(self, *system, **kwargs): + """Initialize the zeros, poles, gain system.""" + # Conversion of lti instances is handled in __new__ + if isinstance(system[0], LinearTimeInvariant): + return + + super(ZerosPolesGain, self).__init__(**kwargs) + + self._zeros = None + self._poles = None + self._gain = None + + self.zeros, self.poles, self.gain = system + + def __repr__(self): + """Return representation of the `ZerosPolesGain` system.""" + return '{0}(\n{1},\n{2},\n{3},\ndt: {4}\n)'.format( + self.__class__.__name__, + repr(self.zeros), + repr(self.poles), + repr(self.gain), + repr(self.dt), + ) + + @property + def zeros(self): + """Zeros of the `ZerosPolesGain` system.""" + return self._zeros + + @zeros.setter + def zeros(self, zeros): + self._zeros = atleast_1d(zeros) + + # Update dimensions + if len(self.zeros.shape) > 1: + self.outputs, self.inputs = self.zeros.shape + else: + self.outputs = 1 + self.inputs = 1 + + @property + def poles(self): + """Poles of the `ZerosPolesGain` system.""" + return self._poles + + @poles.setter + def poles(self, poles): + self._poles = atleast_1d(poles) + + @property + def gain(self): + """Gain of the `ZerosPolesGain` system.""" + return self._gain + + @gain.setter + def gain(self, gain): + self._gain = gain + + def _copy(self, system): + """ + Copy the parameters of another `ZerosPolesGain` system. + + Parameters + ---------- + system : instance of `ZerosPolesGain` + The zeros, poles gain system that is to be copied + + """ + self.poles = system.poles + self.zeros = system.zeros + self.gain = system.gain + + def to_tf(self): + """ + Convert system representation to `TransferFunction`. + + Returns + ------- + sys : instance of `TransferFunction` + Transfer function of the current system + + """ + return TransferFunction(*zpk2tf(self.zeros, self.poles, self.gain), + **self._dt_dict) + + def to_zpk(self): + """ + Return a copy of the current 'ZerosPolesGain' system. + + Returns + ------- + sys : instance of `ZerosPolesGain` + The current system (copy) + + """ + return copy.deepcopy(self) + + def to_ss(self): + """ + Convert system representation to `StateSpace`. + + Returns + ------- + sys : instance of `StateSpace` + State space model of the current system + + """ + return StateSpace(*zpk2ss(self.zeros, self.poles, self.gain), + **self._dt_dict) + + +class ZerosPolesGainContinuous(ZerosPolesGain, lti): + r""" + Continuous-time Linear Time Invariant system in zeros, poles, gain form. + + Represents the system as the continuous time transfer function + :math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is + the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`. + Continuous-time `ZerosPolesGain` systems inherit additional functionality + from the `lti` class. + + Parameters + ---------- + *system : arguments + The `ZerosPolesGain` class can be instantiated with 1 or 3 + arguments. The following gives the number of input arguments and their + interpretation: + + * 1: `lti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 3: array_like: (zeros, poles, gain) + + See Also + -------- + TransferFunction, StateSpace, lti + zpk2ss, zpk2tf, zpk2sos + + Notes + ----- + Changing the value of properties that are not part of the + `ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D` + state-space matrices) is very inefficient and may lead to numerical + inaccuracies. It is better to convert to the specific system + representation first. For example, call ``sys = sys.to_ss()`` before + accessing/changing the A, B, C, D system matrices. + + Examples + -------- + >>> from scipy import signal + + Transfer function: H(s) = 5(s - 1)(s - 2) / (s - 3)(s - 4) + + >>> signal.ZerosPolesGain([1, 2], [3, 4], 5) + ZerosPolesGainContinuous( + array([1, 2]), + array([3, 4]), + 5, + dt: None + ) + + """ + def to_discrete(self, dt, method='zoh', alpha=None): + """ + Returns the discretized `ZerosPolesGain` system. + + Parameters: See `cont2discrete` for details. + + Returns + ------- + sys: instance of `dlti` and `ZerosPolesGain` + """ + return ZerosPolesGain( + *cont2discrete((self.zeros, self.poles, self.gain), + dt, + method=method, + alpha=alpha)[:-1], + dt=dt) + + +class ZerosPolesGainDiscrete(ZerosPolesGain, dlti): + r""" + Discrete-time Linear Time Invariant system in zeros, poles, gain form. + + Represents the system as the discrete-time transfer function + :math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is + the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`. + Discrete-time `ZerosPolesGain` systems inherit additional functionality + from the `dlti` class. + + Parameters + ---------- + *system : arguments + The `ZerosPolesGain` class can be instantiated with 1 or 3 + arguments. The following gives the number of input arguments and their + interpretation: + + * 1: `dlti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 3: array_like: (zeros, poles, gain) + dt: float, optional + Sampling time [s] of the discrete-time systems. Defaults to `True` + (unspecified sampling time). Must be specified as a keyword argument, + for example, ``dt=0.1``. + + See Also + -------- + TransferFunction, StateSpace, dlti + zpk2ss, zpk2tf, zpk2sos + + Notes + ----- + Changing the value of properties that are not part of the + `ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D` + state-space matrices) is very inefficient and may lead to numerical + inaccuracies. It is better to convert to the specific system + representation first. For example, call ``sys = sys.to_ss()`` before + accessing/changing the A, B, C, D system matrices. + + Examples + -------- + >>> from scipy import signal + + Transfer function: H(s) = 5(s - 1)(s - 2) / (s - 3)(s - 4) + + >>> signal.ZerosPolesGain([1, 2], [3, 4], 5) + ZerosPolesGainContinuous( + array([1, 2]), + array([3, 4]), + 5, + dt: None + ) + + Transfer function: H(z) = 5(z - 1)(z - 2) / (z - 3)(z - 4) + + >>> signal.ZerosPolesGain([1, 2], [3, 4], 5, dt=0.1) + ZerosPolesGainDiscrete( + array([1, 2]), + array([3, 4]), + 5, + dt: 0.1 + ) + + """ + pass + + +def _atleast_2d_or_none(arg): + if arg is not None: + return atleast_2d(arg) + + +class StateSpace(LinearTimeInvariant): + r""" + Linear Time Invariant system in state-space form. + + Represents the system as the continuous-time, first order differential + equation :math:`\dot{x} = A x + B u` or the discrete-time difference + equation :math:`x[k+1] = A x[k] + B u[k]`. `StateSpace` systems + inherit additional functionality from the `lti`, respectively the `dlti` + classes, depending on which system representation is used. + + Parameters + ---------- + *system: arguments + The `StateSpace` class can be instantiated with 1 or 3 arguments. + The following gives the number of input arguments and their + interpretation: + + * 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 4: array_like: (A, B, C, D) + dt: float, optional + Sampling time [s] of the discrete-time systems. Defaults to `None` + (continuous-time). Must be specified as a keyword argument, for + example, ``dt=0.1``. + + See Also + -------- + TransferFunction, ZerosPolesGain, lti, dlti + ss2zpk, ss2tf, zpk2sos + + Notes + ----- + Changing the value of properties that are not part of the + `StateSpace` system representation (such as `zeros` or `poles`) is very + inefficient and may lead to numerical inaccuracies. It is better to + convert to the specific system representation first. For example, call + ``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain. + + Examples + -------- + >>> from scipy import signal + + >>> a = np.array([[0, 1], [0, 0]]) + >>> b = np.array([[0], [1]]) + >>> c = np.array([[1, 0]]) + >>> d = np.array([[0]]) + + >>> sys = signal.StateSpace(a, b, c, d) + >>> print(sys) + StateSpaceContinuous( + array([[0, 1], + [0, 0]]), + array([[0], + [1]]), + array([[1, 0]]), + array([[0]]), + dt: None + ) + + >>> sys.to_discrete(0.1) + StateSpaceDiscrete( + array([[1. , 0.1], + [0. , 1. ]]), + array([[0.005], + [0.1 ]]), + array([[1, 0]]), + array([[0]]), + dt: 0.1 + ) + + >>> a = np.array([[1, 0.1], [0, 1]]) + >>> b = np.array([[0.005], [0.1]]) + + >>> signal.StateSpace(a, b, c, d, dt=0.1) + StateSpaceDiscrete( + array([[1. , 0.1], + [0. , 1. ]]), + array([[0.005], + [0.1 ]]), + array([[1, 0]]), + array([[0]]), + dt: 0.1 + ) + + """ + + # Override Numpy binary operations and ufuncs + __array_priority__ = 100.0 + __array_ufunc__ = None + + def __new__(cls, *system, **kwargs): + """Create new StateSpace object and settle inheritance.""" + # Handle object conversion if input is an instance of `lti` + if len(system) == 1 and isinstance(system[0], LinearTimeInvariant): + return system[0].to_ss() + + # Choose whether to inherit from `lti` or from `dlti` + if cls is StateSpace: + if kwargs.get('dt') is None: + return StateSpaceContinuous.__new__(StateSpaceContinuous, + *system, **kwargs) + else: + return StateSpaceDiscrete.__new__(StateSpaceDiscrete, + *system, **kwargs) + + # No special conversion needed + return super(StateSpace, cls).__new__(cls) + + def __init__(self, *system, **kwargs): + """Initialize the state space lti/dlti system.""" + # Conversion of lti instances is handled in __new__ + if isinstance(system[0], LinearTimeInvariant): + return + + # Remove system arguments, not needed by parents anymore + super(StateSpace, self).__init__(**kwargs) + + self._A = None + self._B = None + self._C = None + self._D = None + + self.A, self.B, self.C, self.D = abcd_normalize(*system) + + def __repr__(self): + """Return representation of the `StateSpace` system.""" + return '{0}(\n{1},\n{2},\n{3},\n{4},\ndt: {5}\n)'.format( + self.__class__.__name__, + repr(self.A), + repr(self.B), + repr(self.C), + repr(self.D), + repr(self.dt), + ) + + def _check_binop_other(self, other): + return isinstance(other, (StateSpace, np.ndarray, float, complex, + np.number) + six.integer_types) + + def __mul__(self, other): + """ + Post-multiply another system or a scalar + + Handles multiplication of systems in the sense of a frequency domain + multiplication. That means, given two systems E1(s) and E2(s), their + multiplication, H(s) = E1(s) * E2(s), means that applying H(s) to U(s) + is equivalent to first applying E2(s), and then E1(s). + + Notes + ----- + For SISO systems the order of system application does not matter. + However, for MIMO systems, where the two systems are matrices, the + order above ensures standard Matrix multiplication rules apply. + """ + if not self._check_binop_other(other): + return NotImplemented + + if isinstance(other, StateSpace): + # Disallow mix of discrete and continuous systems. + if type(other) is not type(self): + return NotImplemented + + if self.dt != other.dt: + raise TypeError('Cannot multiply systems with different `dt`.') + + n1 = self.A.shape[0] + n2 = other.A.shape[0] + + # Interconnection of systems + # x1' = A1 x1 + B1 u1 + # y1 = C1 x1 + D1 u1 + # x2' = A2 x2 + B2 y1 + # y2 = C2 x2 + D2 y1 + # + # Plugging in with u1 = y2 yields + # [x1'] [A1 B1*C2 ] [x1] [B1*D2] + # [x2'] = [0 A2 ] [x2] + [B2 ] u2 + # [x1] + # y2 = [C1 D1*C2] [x2] + D1*D2 u2 + a = np.vstack((np.hstack((self.A, np.dot(self.B, other.C))), + np.hstack((zeros((n2, n1)), other.A)))) + b = np.vstack((np.dot(self.B, other.D), other.B)) + c = np.hstack((self.C, np.dot(self.D, other.C))) + d = np.dot(self.D, other.D) + else: + # Assume that other is a scalar / matrix + # For post multiplication the input gets scaled + a = self.A + b = np.dot(self.B, other) + c = self.C + d = np.dot(self.D, other) + + common_dtype = np.find_common_type((a.dtype, b.dtype, c.dtype, d.dtype), ()) + return StateSpace(np.asarray(a, dtype=common_dtype), + np.asarray(b, dtype=common_dtype), + np.asarray(c, dtype=common_dtype), + np.asarray(d, dtype=common_dtype)) + + def __rmul__(self, other): + """Pre-multiply a scalar or matrix (but not StateSpace)""" + if not self._check_binop_other(other) or isinstance(other, StateSpace): + return NotImplemented + + # For pre-multiplication only the output gets scaled + a = self.A + b = self.B + c = np.dot(other, self.C) + d = np.dot(other, self.D) + + common_dtype = np.find_common_type((a.dtype, b.dtype, c.dtype, d.dtype), ()) + return StateSpace(np.asarray(a, dtype=common_dtype), + np.asarray(b, dtype=common_dtype), + np.asarray(c, dtype=common_dtype), + np.asarray(d, dtype=common_dtype)) + + def __neg__(self): + """Negate the system (equivalent to pre-multiplying by -1).""" + return StateSpace(self.A, self.B, -self.C, -self.D) + + def __add__(self, other): + """ + Adds two systems in the sense of frequency domain addition. + """ + if not self._check_binop_other(other): + return NotImplemented + + if isinstance(other, StateSpace): + # Disallow mix of discrete and continuous systems. + if type(other) is not type(self): + raise TypeError('Cannot add {} and {}'.format(type(self), + type(other))) + + if self.dt != other.dt: + raise TypeError('Cannot add systems with different `dt`.') + # Interconnection of systems + # x1' = A1 x1 + B1 u + # y1 = C1 x1 + D1 u + # x2' = A2 x2 + B2 u + # y2 = C2 x2 + D2 u + # y = y1 + y2 + # + # Plugging in yields + # [x1'] [A1 0 ] [x1] [B1] + # [x2'] = [0 A2] [x2] + [B2] u + # [x1] + # y = [C1 C2] [x2] + [D1 + D2] u + a = linalg.block_diag(self.A, other.A) + b = np.vstack((self.B, other.B)) + c = np.hstack((self.C, other.C)) + d = self.D + other.D + else: + other = np.atleast_2d(other) + if self.D.shape == other.shape: + # A scalar/matrix is really just a static system (A=0, B=0, C=0) + a = self.A + b = self.B + c = self.C + d = self.D + other + else: + raise ValueError("Cannot add systems with incompatible dimensions") + + common_dtype = np.find_common_type((a.dtype, b.dtype, c.dtype, d.dtype), ()) + return StateSpace(np.asarray(a, dtype=common_dtype), + np.asarray(b, dtype=common_dtype), + np.asarray(c, dtype=common_dtype), + np.asarray(d, dtype=common_dtype)) + + def __sub__(self, other): + if not self._check_binop_other(other): + return NotImplemented + + return self.__add__(-other) + + def __radd__(self, other): + if not self._check_binop_other(other): + return NotImplemented + + return self.__add__(other) + + def __rsub__(self, other): + if not self._check_binop_other(other): + return NotImplemented + + return (-self).__add__(other) + + def __truediv__(self, other): + """ + Divide by a scalar + """ + # Division by non-StateSpace scalars + if not self._check_binop_other(other) or isinstance(other, StateSpace): + return NotImplemented + + if isinstance(other, np.ndarray) and other.ndim > 0: + # It's ambiguous what this means, so disallow it + raise ValueError("Cannot divide StateSpace by non-scalar numpy arrays") + + return self.__mul__(1/other) + + @property + def A(self): + """State matrix of the `StateSpace` system.""" + return self._A + + @A.setter + def A(self, A): + self._A = _atleast_2d_or_none(A) + + @property + def B(self): + """Input matrix of the `StateSpace` system.""" + return self._B + + @B.setter + def B(self, B): + self._B = _atleast_2d_or_none(B) + self.inputs = self.B.shape[-1] + + @property + def C(self): + """Output matrix of the `StateSpace` system.""" + return self._C + + @C.setter + def C(self, C): + self._C = _atleast_2d_or_none(C) + self.outputs = self.C.shape[0] + + @property + def D(self): + """Feedthrough matrix of the `StateSpace` system.""" + return self._D + + @D.setter + def D(self, D): + self._D = _atleast_2d_or_none(D) + + def _copy(self, system): + """ + Copy the parameters of another `StateSpace` system. + + Parameters + ---------- + system : instance of `StateSpace` + The state-space system that is to be copied + + """ + self.A = system.A + self.B = system.B + self.C = system.C + self.D = system.D + + def to_tf(self, **kwargs): + """ + Convert system representation to `TransferFunction`. + + Parameters + ---------- + kwargs : dict, optional + Additional keywords passed to `ss2zpk` + + Returns + ------- + sys : instance of `TransferFunction` + Transfer function of the current system + + """ + return TransferFunction(*ss2tf(self._A, self._B, self._C, self._D, + **kwargs), **self._dt_dict) + + def to_zpk(self, **kwargs): + """ + Convert system representation to `ZerosPolesGain`. + + Parameters + ---------- + kwargs : dict, optional + Additional keywords passed to `ss2zpk` + + Returns + ------- + sys : instance of `ZerosPolesGain` + Zeros, poles, gain representation of the current system + + """ + return ZerosPolesGain(*ss2zpk(self._A, self._B, self._C, self._D, + **kwargs), **self._dt_dict) + + def to_ss(self): + """ + Return a copy of the current `StateSpace` system. + + Returns + ------- + sys : instance of `StateSpace` + The current system (copy) + + """ + return copy.deepcopy(self) + + +class StateSpaceContinuous(StateSpace, lti): + r""" + Continuous-time Linear Time Invariant system in state-space form. + + Represents the system as the continuous-time, first order differential + equation :math:`\dot{x} = A x + B u`. + Continuous-time `StateSpace` systems inherit additional functionality + from the `lti` class. + + Parameters + ---------- + *system: arguments + The `StateSpace` class can be instantiated with 1 or 3 arguments. + The following gives the number of input arguments and their + interpretation: + + * 1: `lti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 4: array_like: (A, B, C, D) + + See Also + -------- + TransferFunction, ZerosPolesGain, lti + ss2zpk, ss2tf, zpk2sos + + Notes + ----- + Changing the value of properties that are not part of the + `StateSpace` system representation (such as `zeros` or `poles`) is very + inefficient and may lead to numerical inaccuracies. It is better to + convert to the specific system representation first. For example, call + ``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain. + + Examples + -------- + >>> from scipy import signal + + >>> a = np.array([[0, 1], [0, 0]]) + >>> b = np.array([[0], [1]]) + >>> c = np.array([[1, 0]]) + >>> d = np.array([[0]]) + + >>> sys = signal.StateSpace(a, b, c, d) + >>> print(sys) + StateSpaceContinuous( + array([[0, 1], + [0, 0]]), + array([[0], + [1]]), + array([[1, 0]]), + array([[0]]), + dt: None + ) + + """ + def to_discrete(self, dt, method='zoh', alpha=None): + """ + Returns the discretized `StateSpace` system. + + Parameters: See `cont2discrete` for details. + + Returns + ------- + sys: instance of `dlti` and `StateSpace` + """ + return StateSpace(*cont2discrete((self.A, self.B, self.C, self.D), + dt, + method=method, + alpha=alpha)[:-1], + dt=dt) + + +class StateSpaceDiscrete(StateSpace, dlti): + r""" + Discrete-time Linear Time Invariant system in state-space form. + + Represents the system as the discrete-time difference equation + :math:`x[k+1] = A x[k] + B u[k]`. + `StateSpace` systems inherit additional functionality from the `dlti` + class. + + Parameters + ---------- + *system: arguments + The `StateSpace` class can be instantiated with 1 or 3 arguments. + The following gives the number of input arguments and their + interpretation: + + * 1: `dlti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 4: array_like: (A, B, C, D) + dt: float, optional + Sampling time [s] of the discrete-time systems. Defaults to `True` + (unspecified sampling time). Must be specified as a keyword argument, + for example, ``dt=0.1``. + + See Also + -------- + TransferFunction, ZerosPolesGain, dlti + ss2zpk, ss2tf, zpk2sos + + Notes + ----- + Changing the value of properties that are not part of the + `StateSpace` system representation (such as `zeros` or `poles`) is very + inefficient and may lead to numerical inaccuracies. It is better to + convert to the specific system representation first. For example, call + ``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain. + + Examples + -------- + >>> from scipy import signal + + >>> a = np.array([[1, 0.1], [0, 1]]) + >>> b = np.array([[0.005], [0.1]]) + >>> c = np.array([[1, 0]]) + >>> d = np.array([[0]]) + + >>> signal.StateSpace(a, b, c, d, dt=0.1) + StateSpaceDiscrete( + array([[ 1. , 0.1], + [ 0. , 1. ]]), + array([[ 0.005], + [ 0.1 ]]), + array([[1, 0]]), + array([[0]]), + dt: 0.1 + ) + + """ + pass + + +def lsim2(system, U=None, T=None, X0=None, **kwargs): + """ + Simulate output of a continuous-time linear system, by using + the ODE solver `scipy.integrate.odeint`. + + Parameters + ---------- + system : an instance of the `lti` class or a tuple describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1: (instance of `lti`) + * 2: (num, den) + * 3: (zeros, poles, gain) + * 4: (A, B, C, D) + + U : array_like (1D or 2D), optional + An input array describing the input at each time T. Linear + interpolation is used between given times. If there are + multiple inputs, then each column of the rank-2 array + represents an input. If U is not given, the input is assumed + to be zero. + T : array_like (1D or 2D), optional + The time steps at which the input is defined and at which the + output is desired. The default is 101 evenly spaced points on + the interval [0,10.0]. + X0 : array_like (1D), optional + The initial condition of the state vector. If `X0` is not + given, the initial conditions are assumed to be 0. + kwargs : dict + Additional keyword arguments are passed on to the function + `odeint`. See the notes below for more details. + + Returns + ------- + T : 1D ndarray + The time values for the output. + yout : ndarray + The response of the system. + xout : ndarray + The time-evolution of the state-vector. + + Notes + ----- + This function uses `scipy.integrate.odeint` to solve the + system's differential equations. Additional keyword arguments + given to `lsim2` are passed on to `odeint`. See the documentation + for `scipy.integrate.odeint` for the full list of arguments. + + If (num, den) is passed in for ``system``, coefficients for both the + numerator and denominator should be specified in descending exponent + order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). + + """ + if isinstance(system, lti): + sys = system._as_ss() + elif isinstance(system, dlti): + raise AttributeError('lsim2 can only be used with continuous-time ' + 'systems.') + else: + sys = lti(*system)._as_ss() + + if X0 is None: + X0 = zeros(sys.B.shape[0], sys.A.dtype) + + if T is None: + # XXX T should really be a required argument, but U was + # changed from a required positional argument to a keyword, + # and T is after U in the argument list. So we either: change + # the API and move T in front of U; check here for T being + # None and raise an exception; or assign a default value to T + # here. This code implements the latter. + T = linspace(0, 10.0, 101) + + T = atleast_1d(T) + if len(T.shape) != 1: + raise ValueError("T must be a rank-1 array.") + + if U is not None: + U = atleast_1d(U) + if len(U.shape) == 1: + U = U.reshape(-1, 1) + sU = U.shape + if sU[0] != len(T): + raise ValueError("U must have the same number of rows " + "as elements in T.") + + if sU[1] != sys.inputs: + raise ValueError("The number of inputs in U (%d) is not " + "compatible with the number of system " + "inputs (%d)" % (sU[1], sys.inputs)) + # Create a callable that uses linear interpolation to + # calculate the input at any time. + ufunc = interpolate.interp1d(T, U, kind='linear', + axis=0, bounds_error=False) + + def fprime(x, t, sys, ufunc): + """The vector field of the linear system.""" + return dot(sys.A, x) + squeeze(dot(sys.B, nan_to_num(ufunc([t])))) + xout = integrate.odeint(fprime, X0, T, args=(sys, ufunc), **kwargs) + yout = dot(sys.C, transpose(xout)) + dot(sys.D, transpose(U)) + else: + def fprime(x, t, sys): + """The vector field of the linear system.""" + return dot(sys.A, x) + xout = integrate.odeint(fprime, X0, T, args=(sys,), **kwargs) + yout = dot(sys.C, transpose(xout)) + + return T, squeeze(transpose(yout)), xout + + +def _cast_to_array_dtype(in1, in2): + """Cast array to dtype of other array, while avoiding ComplexWarning. + + Those can be raised when casting complex to real. + """ + if numpy.issubdtype(in2.dtype, numpy.float): + # dtype to cast to is not complex, so use .real + in1 = in1.real.astype(in2.dtype) + else: + in1 = in1.astype(in2.dtype) + + return in1 + + +def lsim(system, U, T, X0=None, interp=True): + """ + Simulate output of a continuous-time linear system. + + Parameters + ---------- + system : an instance of the LTI class or a tuple describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1: (instance of `lti`) + * 2: (num, den) + * 3: (zeros, poles, gain) + * 4: (A, B, C, D) + + U : array_like + An input array describing the input at each time `T` + (interpolation is assumed between given times). If there are + multiple inputs, then each column of the rank-2 array + represents an input. If U = 0 or None, a zero input is used. + T : array_like + The time steps at which the input is defined and at which the + output is desired. Must be nonnegative, increasing, and equally spaced. + X0 : array_like, optional + The initial conditions on the state vector (zero by default). + interp : bool, optional + Whether to use linear (True, the default) or zero-order-hold (False) + interpolation for the input array. + + Returns + ------- + T : 1D ndarray + Time values for the output. + yout : 1D ndarray + System response. + xout : ndarray + Time evolution of the state vector. + + Notes + ----- + If (num, den) is passed in for ``system``, coefficients for both the + numerator and denominator should be specified in descending exponent + order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). + + Examples + -------- + Simulate a double integrator y'' = u, with a constant input u = 1 + + >>> from scipy import signal + >>> system = signal.lti([[0., 1.], [0., 0.]], [[0.], [1.]], [[1., 0.]], 0.) + >>> t = np.linspace(0, 5) + >>> u = np.ones_like(t) + >>> tout, y, x = signal.lsim(system, u, t) + >>> import matplotlib.pyplot as plt + >>> plt.plot(t, y) + """ + if isinstance(system, lti): + sys = system._as_ss() + elif isinstance(system, dlti): + raise AttributeError('lsim can only be used with continuous-time ' + 'systems.') + else: + sys = lti(*system)._as_ss() + T = atleast_1d(T) + if len(T.shape) != 1: + raise ValueError("T must be a rank-1 array.") + + A, B, C, D = map(np.asarray, (sys.A, sys.B, sys.C, sys.D)) + n_states = A.shape[0] + n_inputs = B.shape[1] + + n_steps = T.size + if X0 is None: + X0 = zeros(n_states, sys.A.dtype) + xout = zeros((n_steps, n_states), sys.A.dtype) + + if T[0] == 0: + xout[0] = X0 + elif T[0] > 0: + # step forward to initial time, with zero input + xout[0] = dot(X0, linalg.expm(transpose(A) * T[0])) + else: + raise ValueError("Initial time must be nonnegative") + + no_input = (U is None or + (isinstance(U, (int, float)) and U == 0.) or + not np.any(U)) + + if n_steps == 1: + yout = squeeze(dot(xout, transpose(C))) + if not no_input: + yout += squeeze(dot(U, transpose(D))) + return T, squeeze(yout), squeeze(xout) + + dt = T[1] - T[0] + if not np.allclose((T[1:] - T[:-1]) / dt, 1.0): + warnings.warn("Non-uniform timesteps are deprecated. Results may be " + "slow and/or inaccurate.", DeprecationWarning) + return lsim2(system, U, T, X0) + + if no_input: + # Zero input: just use matrix exponential + # take transpose because state is a row vector + expAT_dt = linalg.expm(transpose(A) * dt) + for i in xrange(1, n_steps): + xout[i] = dot(xout[i-1], expAT_dt) + yout = squeeze(dot(xout, transpose(C))) + return T, squeeze(yout), squeeze(xout) + + # Nonzero input + U = atleast_1d(U) + if U.ndim == 1: + U = U[:, np.newaxis] + + if U.shape[0] != n_steps: + raise ValueError("U must have the same number of rows " + "as elements in T.") + + if U.shape[1] != n_inputs: + raise ValueError("System does not define that many inputs.") + + if not interp: + # Zero-order hold + # Algorithm: to integrate from time 0 to time dt, we solve + # xdot = A x + B u, x(0) = x0 + # udot = 0, u(0) = u0. + # + # Solution is + # [ x(dt) ] [ A*dt B*dt ] [ x0 ] + # [ u(dt) ] = exp [ 0 0 ] [ u0 ] + M = np.vstack([np.hstack([A * dt, B * dt]), + np.zeros((n_inputs, n_states + n_inputs))]) + # transpose everything because the state and input are row vectors + expMT = linalg.expm(transpose(M)) + Ad = expMT[:n_states, :n_states] + Bd = expMT[n_states:, :n_states] + for i in xrange(1, n_steps): + xout[i] = dot(xout[i-1], Ad) + dot(U[i-1], Bd) + else: + # Linear interpolation between steps + # Algorithm: to integrate from time 0 to time dt, with linear + # interpolation between inputs u(0) = u0 and u(dt) = u1, we solve + # xdot = A x + B u, x(0) = x0 + # udot = (u1 - u0) / dt, u(0) = u0. + # + # Solution is + # [ x(dt) ] [ A*dt B*dt 0 ] [ x0 ] + # [ u(dt) ] = exp [ 0 0 I ] [ u0 ] + # [u1 - u0] [ 0 0 0 ] [u1 - u0] + M = np.vstack([np.hstack([A * dt, B * dt, + np.zeros((n_states, n_inputs))]), + np.hstack([np.zeros((n_inputs, n_states + n_inputs)), + np.identity(n_inputs)]), + np.zeros((n_inputs, n_states + 2 * n_inputs))]) + expMT = linalg.expm(transpose(M)) + Ad = expMT[:n_states, :n_states] + Bd1 = expMT[n_states+n_inputs:, :n_states] + Bd0 = expMT[n_states:n_states + n_inputs, :n_states] - Bd1 + for i in xrange(1, n_steps): + xout[i] = (dot(xout[i-1], Ad) + dot(U[i-1], Bd0) + dot(U[i], Bd1)) + + yout = (squeeze(dot(xout, transpose(C))) + squeeze(dot(U, transpose(D)))) + return T, squeeze(yout), squeeze(xout) + + +def _default_response_times(A, n): + """Compute a reasonable set of time samples for the response time. + + This function is used by `impulse`, `impulse2`, `step` and `step2` + to compute the response time when the `T` argument to the function + is None. + + Parameters + ---------- + A : array_like + The system matrix, which is square. + n : int + The number of time samples to generate. + + Returns + ------- + t : ndarray + The 1-D array of length `n` of time samples at which the response + is to be computed. + """ + # Create a reasonable time interval. + # TODO: This could use some more work. + # For example, what is expected when the system is unstable? + vals = linalg.eigvals(A) + r = min(abs(real(vals))) + if r == 0.0: + r = 1.0 + tc = 1.0 / r + t = linspace(0.0, 7 * tc, n) + return t + + +def impulse(system, X0=None, T=None, N=None): + """Impulse response of continuous-time system. + + Parameters + ---------- + system : an instance of the LTI class or a tuple of array_like + describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1 (instance of `lti`) + * 2 (num, den) + * 3 (zeros, poles, gain) + * 4 (A, B, C, D) + + X0 : array_like, optional + Initial state-vector. Defaults to zero. + T : array_like, optional + Time points. Computed if not given. + N : int, optional + The number of time points to compute (if `T` is not given). + + Returns + ------- + T : ndarray + A 1-D array of time points. + yout : ndarray + A 1-D array containing the impulse response of the system (except for + singularities at zero). + + Notes + ----- + If (num, den) is passed in for ``system``, coefficients for both the + numerator and denominator should be specified in descending exponent + order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). + + """ + if isinstance(system, lti): + sys = system._as_ss() + elif isinstance(system, dlti): + raise AttributeError('impulse can only be used with continuous-time ' + 'systems.') + else: + sys = lti(*system)._as_ss() + if X0 is None: + X = squeeze(sys.B) + else: + X = squeeze(sys.B + X0) + if N is None: + N = 100 + if T is None: + T = _default_response_times(sys.A, N) + else: + T = asarray(T) + + _, h, _ = lsim(sys, 0., T, X, interp=False) + return T, h + + +def impulse2(system, X0=None, T=None, N=None, **kwargs): + """ + Impulse response of a single-input, continuous-time linear system. + + Parameters + ---------- + system : an instance of the LTI class or a tuple of array_like + describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1 (instance of `lti`) + * 2 (num, den) + * 3 (zeros, poles, gain) + * 4 (A, B, C, D) + + X0 : 1-D array_like, optional + The initial condition of the state vector. Default: 0 (the + zero vector). + T : 1-D array_like, optional + The time steps at which the input is defined and at which the + output is desired. If `T` is not given, the function will + generate a set of time samples automatically. + N : int, optional + Number of time points to compute. Default: 100. + kwargs : various types + Additional keyword arguments are passed on to the function + `scipy.signal.lsim2`, which in turn passes them on to + `scipy.integrate.odeint`; see the latter's documentation for + information about these arguments. + + Returns + ------- + T : ndarray + The time values for the output. + yout : ndarray + The output response of the system. + + See Also + -------- + impulse, lsim2, integrate.odeint + + Notes + ----- + The solution is generated by calling `scipy.signal.lsim2`, which uses + the differential equation solver `scipy.integrate.odeint`. + + If (num, den) is passed in for ``system``, coefficients for both the + numerator and denominator should be specified in descending exponent + order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). + + .. versionadded:: 0.8.0 + + Examples + -------- + Second order system with a repeated root: x''(t) + 2*x(t) + x(t) = u(t) + + >>> from scipy import signal + >>> system = ([1.0], [1.0, 2.0, 1.0]) + >>> t, y = signal.impulse2(system) + >>> import matplotlib.pyplot as plt + >>> plt.plot(t, y) + + """ + if isinstance(system, lti): + sys = system._as_ss() + elif isinstance(system, dlti): + raise AttributeError('impulse2 can only be used with continuous-time ' + 'systems.') + else: + sys = lti(*system)._as_ss() + B = sys.B + if B.shape[-1] != 1: + raise ValueError("impulse2() requires a single-input system.") + B = B.squeeze() + if X0 is None: + X0 = zeros_like(B) + if N is None: + N = 100 + if T is None: + T = _default_response_times(sys.A, N) + + # Move the impulse in the input to the initial conditions, and then + # solve using lsim2(). + ic = B + X0 + Tr, Yr, Xr = lsim2(sys, T=T, X0=ic, **kwargs) + return Tr, Yr + + +def step(system, X0=None, T=None, N=None): + """Step response of continuous-time system. + + Parameters + ---------- + system : an instance of the LTI class or a tuple of array_like + describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1 (instance of `lti`) + * 2 (num, den) + * 3 (zeros, poles, gain) + * 4 (A, B, C, D) + + X0 : array_like, optional + Initial state-vector (default is zero). + T : array_like, optional + Time points (computed if not given). + N : int, optional + Number of time points to compute if `T` is not given. + + Returns + ------- + T : 1D ndarray + Output time points. + yout : 1D ndarray + Step response of system. + + See also + -------- + scipy.signal.step2 + + Notes + ----- + If (num, den) is passed in for ``system``, coefficients for both the + numerator and denominator should be specified in descending exponent + order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). + + """ + if isinstance(system, lti): + sys = system._as_ss() + elif isinstance(system, dlti): + raise AttributeError('step can only be used with continuous-time ' + 'systems.') + else: + sys = lti(*system)._as_ss() + if N is None: + N = 100 + if T is None: + T = _default_response_times(sys.A, N) + else: + T = asarray(T) + U = ones(T.shape, sys.A.dtype) + vals = lsim(sys, U, T, X0=X0, interp=False) + return vals[0], vals[1] + + +def step2(system, X0=None, T=None, N=None, **kwargs): + """Step response of continuous-time system. + + This function is functionally the same as `scipy.signal.step`, but + it uses the function `scipy.signal.lsim2` to compute the step + response. + + Parameters + ---------- + system : an instance of the LTI class or a tuple of array_like + describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1 (instance of `lti`) + * 2 (num, den) + * 3 (zeros, poles, gain) + * 4 (A, B, C, D) + + X0 : array_like, optional + Initial state-vector (default is zero). + T : array_like, optional + Time points (computed if not given). + N : int, optional + Number of time points to compute if `T` is not given. + kwargs : various types + Additional keyword arguments are passed on the function + `scipy.signal.lsim2`, which in turn passes them on to + `scipy.integrate.odeint`. See the documentation for + `scipy.integrate.odeint` for information about these arguments. + + Returns + ------- + T : 1D ndarray + Output time points. + yout : 1D ndarray + Step response of system. + + See also + -------- + scipy.signal.step + + Notes + ----- + If (num, den) is passed in for ``system``, coefficients for both the + numerator and denominator should be specified in descending exponent + order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). + + .. versionadded:: 0.8.0 + """ + if isinstance(system, lti): + sys = system._as_ss() + elif isinstance(system, dlti): + raise AttributeError('step2 can only be used with continuous-time ' + 'systems.') + else: + sys = lti(*system)._as_ss() + if N is None: + N = 100 + if T is None: + T = _default_response_times(sys.A, N) + else: + T = asarray(T) + U = ones(T.shape, sys.A.dtype) + vals = lsim2(sys, U, T, X0=X0, **kwargs) + return vals[0], vals[1] + + +def bode(system, w=None, n=100): + """ + Calculate Bode magnitude and phase data of a continuous-time system. + + Parameters + ---------- + system : an instance of the LTI class or a tuple describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1 (instance of `lti`) + * 2 (num, den) + * 3 (zeros, poles, gain) + * 4 (A, B, C, D) + + w : array_like, optional + Array of frequencies (in rad/s). Magnitude and phase data is calculated + for every value in this array. If not given a reasonable set will be + calculated. + n : int, optional + Number of frequency points to compute if `w` is not given. The `n` + frequencies are logarithmically spaced in an interval chosen to + include the influence of the poles and zeros of the system. + + Returns + ------- + w : 1D ndarray + Frequency array [rad/s] + mag : 1D ndarray + Magnitude array [dB] + phase : 1D ndarray + Phase array [deg] + + Notes + ----- + If (num, den) is passed in for ``system``, coefficients for both the + numerator and denominator should be specified in descending exponent + order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). + + .. versionadded:: 0.11.0 + + Examples + -------- + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> sys = signal.TransferFunction([1], [1, 1]) + >>> w, mag, phase = signal.bode(sys) + + >>> plt.figure() + >>> plt.semilogx(w, mag) # Bode magnitude plot + >>> plt.figure() + >>> plt.semilogx(w, phase) # Bode phase plot + >>> plt.show() + + """ + w, y = freqresp(system, w=w, n=n) + + mag = 20.0 * numpy.log10(abs(y)) + phase = numpy.unwrap(numpy.arctan2(y.imag, y.real)) * 180.0 / numpy.pi + + return w, mag, phase + + +def freqresp(system, w=None, n=10000): + """Calculate the frequency response of a continuous-time system. + + Parameters + ---------- + system : an instance of the `lti` class or a tuple describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1 (instance of `lti`) + * 2 (num, den) + * 3 (zeros, poles, gain) + * 4 (A, B, C, D) + + w : array_like, optional + Array of frequencies (in rad/s). Magnitude and phase data is + calculated for every value in this array. If not given, a reasonable + set will be calculated. + n : int, optional + Number of frequency points to compute if `w` is not given. The `n` + frequencies are logarithmically spaced in an interval chosen to + include the influence of the poles and zeros of the system. + + Returns + ------- + w : 1D ndarray + Frequency array [rad/s] + H : 1D ndarray + Array of complex magnitude values + + Notes + ----- + If (num, den) is passed in for ``system``, coefficients for both the + numerator and denominator should be specified in descending exponent + order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). + + Examples + -------- + Generating the Nyquist plot of a transfer function + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + Transfer function: H(s) = 5 / (s-1)^3 + + >>> s1 = signal.ZerosPolesGain([], [1, 1, 1], [5]) + + >>> w, H = signal.freqresp(s1) + + >>> plt.figure() + >>> plt.plot(H.real, H.imag, "b") + >>> plt.plot(H.real, -H.imag, "r") + >>> plt.show() + """ + if isinstance(system, lti): + if isinstance(system, (TransferFunction, ZerosPolesGain)): + sys = system + else: + sys = system._as_zpk() + elif isinstance(system, dlti): + raise AttributeError('freqresp can only be used with continuous-time ' + 'systems.') + else: + sys = lti(*system)._as_zpk() + + if sys.inputs != 1 or sys.outputs != 1: + raise ValueError("freqresp() requires a SISO (single input, single " + "output) system.") + + if w is not None: + worN = w + else: + worN = n + + if isinstance(sys, TransferFunction): + # In the call to freqs(), sys.num.ravel() is used because there are + # cases where sys.num is a 2-D array with a single row. + w, h = freqs(sys.num.ravel(), sys.den, worN=worN) + + elif isinstance(sys, ZerosPolesGain): + w, h = freqs_zpk(sys.zeros, sys.poles, sys.gain, worN=worN) + + return w, h + + +# This class will be used by place_poles to return its results +# see https://code.activestate.com/recipes/52308/ +class Bunch: + def __init__(self, **kwds): + self.__dict__.update(kwds) + + +def _valid_inputs(A, B, poles, method, rtol, maxiter): + """ + Check the poles come in complex conjugage pairs + Check shapes of A, B and poles are compatible. + Check the method chosen is compatible with provided poles + Return update method to use and ordered poles + + """ + poles = np.asarray(poles) + if poles.ndim > 1: + raise ValueError("Poles must be a 1D array like.") + # Will raise ValueError if poles do not come in complex conjugates pairs + poles = _order_complex_poles(poles) + if A.ndim > 2: + raise ValueError("A must be a 2D array/matrix.") + if B.ndim > 2: + raise ValueError("B must be a 2D array/matrix") + if A.shape[0] != A.shape[1]: + raise ValueError("A must be square") + if len(poles) > A.shape[0]: + raise ValueError("maximum number of poles is %d but you asked for %d" % + (A.shape[0], len(poles))) + if len(poles) < A.shape[0]: + raise ValueError("number of poles is %d but you should provide %d" % + (len(poles), A.shape[0])) + r = np.linalg.matrix_rank(B) + for p in poles: + if sum(p == poles) > r: + raise ValueError("at least one of the requested pole is repeated " + "more than rank(B) times") + # Choose update method + update_loop = _YT_loop + if method not in ('KNV0','YT'): + raise ValueError("The method keyword must be one of 'YT' or 'KNV0'") + + if method == "KNV0": + update_loop = _KNV0_loop + if not all(np.isreal(poles)): + raise ValueError("Complex poles are not supported by KNV0") + + if maxiter < 1: + raise ValueError("maxiter must be at least equal to 1") + + # We do not check rtol <= 0 as the user can use a negative rtol to + # force maxiter iterations + if rtol > 1: + raise ValueError("rtol can not be greater than 1") + + return update_loop, poles + + +def _order_complex_poles(poles): + """ + Check we have complex conjugates pairs and reorder P according to YT, ie + real_poles, complex_i, conjugate complex_i, .... + The lexicographic sort on the complex poles is added to help the user to + compare sets of poles. + """ + ordered_poles = np.sort(poles[np.isreal(poles)]) + im_poles = [] + for p in np.sort(poles[np.imag(poles) < 0]): + if np.conj(p) in poles: + im_poles.extend((p, np.conj(p))) + + ordered_poles = np.hstack((ordered_poles, im_poles)) + + if poles.shape[0] != len(ordered_poles): + raise ValueError("Complex poles must come with their conjugates") + return ordered_poles + + +def _KNV0(B, ker_pole, transfer_matrix, j, poles): + """ + Algorithm "KNV0" Kautsky et Al. Robust pole + assignment in linear state feedback, Int journal of Control + 1985, vol 41 p 1129->1155 + https://la.epfl.ch/files/content/sites/la/files/ + users/105941/public/KautskyNicholsDooren + + """ + # Remove xj form the base + transfer_matrix_not_j = np.delete(transfer_matrix, j, axis=1) + # If we QR this matrix in full mode Q=Q0|Q1 + # then Q1 will be a single column orthogonnal to + # Q0, that's what we are looking for ! + + # After merge of gh-4249 great speed improvements could be achieved + # using QR updates instead of full QR in the line below + + # To debug with numpy qr uncomment the line below + # Q, R = np.linalg.qr(transfer_matrix_not_j, mode="complete") + Q, R = s_qr(transfer_matrix_not_j, mode="full") + + mat_ker_pj = np.dot(ker_pole[j], ker_pole[j].T) + yj = np.dot(mat_ker_pj, Q[:, -1]) + + # If Q[:, -1] is "almost" orthogonal to ker_pole[j] its + # projection into ker_pole[j] will yield a vector + # close to 0. As we are looking for a vector in ker_pole[j] + # simply stick with transfer_matrix[:, j] (unless someone provides me with + # a better choice ?) + + if not np.allclose(yj, 0): + xj = yj/np.linalg.norm(yj) + transfer_matrix[:, j] = xj + + # KNV does not support complex poles, using YT technique the two lines + # below seem to work 9 out of 10 times but it is not reliable enough: + # transfer_matrix[:, j]=real(xj) + # transfer_matrix[:, j+1]=imag(xj) + + # Add this at the beginning of this function if you wish to test + # complex support: + # if ~np.isreal(P[j]) and (j>=B.shape[0]-1 or P[j]!=np.conj(P[j+1])): + # return + # Problems arise when imag(xj)=>0 I have no idea on how to fix this + + +def _YT_real(ker_pole, Q, transfer_matrix, i, j): + """ + Applies algorithm from YT section 6.1 page 19 related to real pairs + """ + # step 1 page 19 + u = Q[:, -2, np.newaxis] + v = Q[:, -1, np.newaxis] + + # step 2 page 19 + m = np.dot(np.dot(ker_pole[i].T, np.dot(u, v.T) - + np.dot(v, u.T)), ker_pole[j]) + + # step 3 page 19 + um, sm, vm = np.linalg.svd(m) + # mu1, mu2 two first columns of U => 2 first lines of U.T + mu1, mu2 = um.T[:2, :, np.newaxis] + # VM is V.T with numpy we want the first two lines of V.T + nu1, nu2 = vm[:2, :, np.newaxis] + + # what follows is a rough python translation of the formulas + # in section 6.2 page 20 (step 4) + transfer_matrix_j_mo_transfer_matrix_j = np.vstack(( + transfer_matrix[:, i, np.newaxis], + transfer_matrix[:, j, np.newaxis])) + + if not np.allclose(sm[0], sm[1]): + ker_pole_imo_mu1 = np.dot(ker_pole[i], mu1) + ker_pole_i_nu1 = np.dot(ker_pole[j], nu1) + ker_pole_mu_nu = np.vstack((ker_pole_imo_mu1, ker_pole_i_nu1)) + else: + ker_pole_ij = np.vstack(( + np.hstack((ker_pole[i], + np.zeros(ker_pole[i].shape))), + np.hstack((np.zeros(ker_pole[j].shape), + ker_pole[j])) + )) + mu_nu_matrix = np.vstack( + (np.hstack((mu1, mu2)), np.hstack((nu1, nu2))) + ) + ker_pole_mu_nu = np.dot(ker_pole_ij, mu_nu_matrix) + transfer_matrix_ij = np.dot(np.dot(ker_pole_mu_nu, ker_pole_mu_nu.T), + transfer_matrix_j_mo_transfer_matrix_j) + if not np.allclose(transfer_matrix_ij, 0): + transfer_matrix_ij = (np.sqrt(2)*transfer_matrix_ij / + np.linalg.norm(transfer_matrix_ij)) + transfer_matrix[:, i] = transfer_matrix_ij[ + :transfer_matrix[:, i].shape[0], 0 + ] + transfer_matrix[:, j] = transfer_matrix_ij[ + transfer_matrix[:, i].shape[0]:, 0 + ] + else: + # As in knv0 if transfer_matrix_j_mo_transfer_matrix_j is orthogonal to + # Vect{ker_pole_mu_nu} assign transfer_matrixi/transfer_matrix_j to + # ker_pole_mu_nu and iterate. As we are looking for a vector in + # Vect{Matker_pole_MU_NU} (see section 6.1 page 19) this might help + # (that's a guess, not a claim !) + transfer_matrix[:, i] = ker_pole_mu_nu[ + :transfer_matrix[:, i].shape[0], 0 + ] + transfer_matrix[:, j] = ker_pole_mu_nu[ + transfer_matrix[:, i].shape[0]:, 0 + ] + + +def _YT_complex(ker_pole, Q, transfer_matrix, i, j): + """ + Applies algorithm from YT section 6.2 page 20 related to complex pairs + """ + # step 1 page 20 + ur = np.sqrt(2)*Q[:, -2, np.newaxis] + ui = np.sqrt(2)*Q[:, -1, np.newaxis] + u = ur + 1j*ui + + # step 2 page 20 + ker_pole_ij = ker_pole[i] + m = np.dot(np.dot(np.conj(ker_pole_ij.T), np.dot(u, np.conj(u).T) - + np.dot(np.conj(u), u.T)), ker_pole_ij) + + # step 3 page 20 + e_val, e_vec = np.linalg.eig(m) + # sort eigenvalues according to their module + e_val_idx = np.argsort(np.abs(e_val)) + mu1 = e_vec[:, e_val_idx[-1], np.newaxis] + mu2 = e_vec[:, e_val_idx[-2], np.newaxis] + + # what follows is a rough python translation of the formulas + # in section 6.2 page 20 (step 4) + + # remember transfer_matrix_i has been split as + # transfer_matrix[i]=real(transfer_matrix_i) and + # transfer_matrix[j]=imag(transfer_matrix_i) + transfer_matrix_j_mo_transfer_matrix_j = ( + transfer_matrix[:, i, np.newaxis] + + 1j*transfer_matrix[:, j, np.newaxis] + ) + if not np.allclose(np.abs(e_val[e_val_idx[-1]]), + np.abs(e_val[e_val_idx[-2]])): + ker_pole_mu = np.dot(ker_pole_ij, mu1) + else: + mu1_mu2_matrix = np.hstack((mu1, mu2)) + ker_pole_mu = np.dot(ker_pole_ij, mu1_mu2_matrix) + transfer_matrix_i_j = np.dot(np.dot(ker_pole_mu, np.conj(ker_pole_mu.T)), + transfer_matrix_j_mo_transfer_matrix_j) + + if not np.allclose(transfer_matrix_i_j, 0): + transfer_matrix_i_j = (transfer_matrix_i_j / + np.linalg.norm(transfer_matrix_i_j)) + transfer_matrix[:, i] = np.real(transfer_matrix_i_j[:, 0]) + transfer_matrix[:, j] = np.imag(transfer_matrix_i_j[:, 0]) + else: + # same idea as in YT_real + transfer_matrix[:, i] = np.real(ker_pole_mu[:, 0]) + transfer_matrix[:, j] = np.imag(ker_pole_mu[:, 0]) + + +def _YT_loop(ker_pole, transfer_matrix, poles, B, maxiter, rtol): + """ + Algorithm "YT" Tits, Yang. Globally Convergent + Algorithms for Robust Pole Assignment by State Feedback + https://hdl.handle.net/1903/5598 + The poles P have to be sorted accordingly to section 6.2 page 20 + + """ + # The IEEE edition of the YT paper gives useful information on the + # optimal update order for the real poles in order to minimize the number + # of times we have to loop over all poles, see page 1442 + nb_real = poles[np.isreal(poles)].shape[0] + # hnb => Half Nb Real + hnb = nb_real // 2 + + # Stick to the indices in the paper and then remove one to get numpy array + # index it is a bit easier to link the code to the paper this way even if it + # is not very clean. The paper is unclear about what should be done when + # there is only one real pole => use KNV0 on this real pole seem to work + if nb_real > 0: + #update the biggest real pole with the smallest one + update_order = [[nb_real], [1]] + else: + update_order = [[],[]] + + r_comp = np.arange(nb_real+1, len(poles)+1, 2) + # step 1.a + r_p = np.arange(1, hnb+nb_real % 2) + update_order[0].extend(2*r_p) + update_order[1].extend(2*r_p+1) + # step 1.b + update_order[0].extend(r_comp) + update_order[1].extend(r_comp+1) + # step 1.c + r_p = np.arange(1, hnb+1) + update_order[0].extend(2*r_p-1) + update_order[1].extend(2*r_p) + # step 1.d + if hnb == 0 and np.isreal(poles[0]): + update_order[0].append(1) + update_order[1].append(1) + update_order[0].extend(r_comp) + update_order[1].extend(r_comp+1) + # step 2.a + r_j = np.arange(2, hnb+nb_real % 2) + for j in r_j: + for i in range(1, hnb+1): + update_order[0].append(i) + update_order[1].append(i+j) + # step 2.b + if hnb == 0 and np.isreal(poles[0]): + update_order[0].append(1) + update_order[1].append(1) + update_order[0].extend(r_comp) + update_order[1].extend(r_comp+1) + # step 2.c + r_j = np.arange(2, hnb+nb_real % 2) + for j in r_j: + for i in range(hnb+1, nb_real+1): + idx_1 = i+j + if idx_1 > nb_real: + idx_1 = i+j-nb_real + update_order[0].append(i) + update_order[1].append(idx_1) + # step 2.d + if hnb == 0 and np.isreal(poles[0]): + update_order[0].append(1) + update_order[1].append(1) + update_order[0].extend(r_comp) + update_order[1].extend(r_comp+1) + # step 3.a + for i in range(1, hnb+1): + update_order[0].append(i) + update_order[1].append(i+hnb) + # step 3.b + if hnb == 0 and np.isreal(poles[0]): + update_order[0].append(1) + update_order[1].append(1) + update_order[0].extend(r_comp) + update_order[1].extend(r_comp+1) + + update_order = np.array(update_order).T-1 + stop = False + nb_try = 0 + while nb_try < maxiter and not stop: + det_transfer_matrixb = np.abs(np.linalg.det(transfer_matrix)) + for i, j in update_order: + if i == j: + assert i == 0, "i!=0 for KNV call in YT" + assert np.isreal(poles[i]), "calling KNV on a complex pole" + _KNV0(B, ker_pole, transfer_matrix, i, poles) + else: + transfer_matrix_not_i_j = np.delete(transfer_matrix, (i, j), + axis=1) + # after merge of gh-4249 great speed improvements could be + # achieved using QR updates instead of full QR in the line below + + #to debug with numpy qr uncomment the line below + #Q, _ = np.linalg.qr(transfer_matrix_not_i_j, mode="complete") + Q, _ = s_qr(transfer_matrix_not_i_j, mode="full") + + if np.isreal(poles[i]): + assert np.isreal(poles[j]), "mixing real and complex " + \ + "in YT_real" + str(poles) + _YT_real(ker_pole, Q, transfer_matrix, i, j) + else: + assert ~np.isreal(poles[i]), "mixing real and complex " + \ + "in YT_real" + str(poles) + _YT_complex(ker_pole, Q, transfer_matrix, i, j) + + det_transfer_matrix = np.max((np.sqrt(np.spacing(1)), + np.abs(np.linalg.det(transfer_matrix)))) + cur_rtol = np.abs( + (det_transfer_matrix - + det_transfer_matrixb) / + det_transfer_matrix) + if cur_rtol < rtol and det_transfer_matrix > np.sqrt(np.spacing(1)): + # Convergence test from YT page 21 + stop = True + nb_try += 1 + return stop, cur_rtol, nb_try + + +def _KNV0_loop(ker_pole, transfer_matrix, poles, B, maxiter, rtol): + """ + Loop over all poles one by one and apply KNV method 0 algorithm + """ + # This method is useful only because we need to be able to call + # _KNV0 from YT without looping over all poles, otherwise it would + # have been fine to mix _KNV0_loop and _KNV0 in a single function + stop = False + nb_try = 0 + while nb_try < maxiter and not stop: + det_transfer_matrixb = np.abs(np.linalg.det(transfer_matrix)) + for j in range(B.shape[0]): + _KNV0(B, ker_pole, transfer_matrix, j, poles) + + det_transfer_matrix = np.max((np.sqrt(np.spacing(1)), + np.abs(np.linalg.det(transfer_matrix)))) + cur_rtol = np.abs((det_transfer_matrix - det_transfer_matrixb) / + det_transfer_matrix) + if cur_rtol < rtol and det_transfer_matrix > np.sqrt(np.spacing(1)): + # Convergence test from YT page 21 + stop = True + + nb_try += 1 + return stop, cur_rtol, nb_try + + +def place_poles(A, B, poles, method="YT", rtol=1e-3, maxiter=30): + """ + Compute K such that eigenvalues (A - dot(B, K))=poles. + + K is the gain matrix such as the plant described by the linear system + ``AX+BU`` will have its closed-loop poles, i.e the eigenvalues ``A - B*K``, + as close as possible to those asked for in poles. + + SISO, MISO and MIMO systems are supported. + + Parameters + ---------- + A, B : ndarray + State-space representation of linear system ``AX + BU``. + poles : array_like + Desired real poles and/or complex conjugates poles. + Complex poles are only supported with ``method="YT"`` (default). + method: {'YT', 'KNV0'}, optional + Which method to choose to find the gain matrix K. One of: + + - 'YT': Yang Tits + - 'KNV0': Kautsky, Nichols, Van Dooren update method 0 + + See References and Notes for details on the algorithms. + rtol: float, optional + After each iteration the determinant of the eigenvectors of + ``A - B*K`` is compared to its previous value, when the relative + error between these two values becomes lower than `rtol` the algorithm + stops. Default is 1e-3. + maxiter: int, optional + Maximum number of iterations to compute the gain matrix. + Default is 30. + + Returns + ------- + full_state_feedback : Bunch object + full_state_feedback is composed of: + gain_matrix : 1-D ndarray + The closed loop matrix K such as the eigenvalues of ``A-BK`` + are as close as possible to the requested poles. + computed_poles : 1-D ndarray + The poles corresponding to ``A-BK`` sorted as first the real + poles in increasing order, then the complex congugates in + lexicographic order. + requested_poles : 1-D ndarray + The poles the algorithm was asked to place sorted as above, + they may differ from what was achieved. + X : 2-D ndarray + The transfer matrix such as ``X * diag(poles) = (A - B*K)*X`` + (see Notes) + rtol : float + The relative tolerance achieved on ``det(X)`` (see Notes). + `rtol` will be NaN if it is possible to solve the system + ``diag(poles) = (A - B*K)``, or 0 when the optimization + algorithms can't do anything i.e when ``B.shape[1] == 1``. + nb_iter : int + The number of iterations performed before converging. + `nb_iter` will be NaN if it is possible to solve the system + ``diag(poles) = (A - B*K)``, or 0 when the optimization + algorithms can't do anything i.e when ``B.shape[1] == 1``. + + Notes + ----- + The Tits and Yang (YT), [2]_ paper is an update of the original Kautsky et + al. (KNV) paper [1]_. KNV relies on rank-1 updates to find the transfer + matrix X such that ``X * diag(poles) = (A - B*K)*X``, whereas YT uses + rank-2 updates. This yields on average more robust solutions (see [2]_ + pp 21-22), furthermore the YT algorithm supports complex poles whereas KNV + does not in its original version. Only update method 0 proposed by KNV has + been implemented here, hence the name ``'KNV0'``. + + KNV extended to complex poles is used in Matlab's ``place`` function, YT is + distributed under a non-free licence by Slicot under the name ``robpole``. + It is unclear and undocumented how KNV0 has been extended to complex poles + (Tits and Yang claim on page 14 of their paper that their method can not be + used to extend KNV to complex poles), therefore only YT supports them in + this implementation. + + As the solution to the problem of pole placement is not unique for MIMO + systems, both methods start with a tentative transfer matrix which is + altered in various way to increase its determinant. Both methods have been + proven to converge to a stable solution, however depending on the way the + initial transfer matrix is chosen they will converge to different + solutions and therefore there is absolutely no guarantee that using + ``'KNV0'`` will yield results similar to Matlab's or any other + implementation of these algorithms. + + Using the default method ``'YT'`` should be fine in most cases; ``'KNV0'`` + is only provided because it is needed by ``'YT'`` in some specific cases. + Furthermore ``'YT'`` gives on average more robust results than ``'KNV0'`` + when ``abs(det(X))`` is used as a robustness indicator. + + [2]_ is available as a technical report on the following URL: + https://hdl.handle.net/1903/5598 + + References + ---------- + .. [1] J. Kautsky, N.K. Nichols and P. van Dooren, "Robust pole assignment + in linear state feedback", International Journal of Control, Vol. 41 + pp. 1129-1155, 1985. + .. [2] A.L. Tits and Y. Yang, "Globally convergent algorithms for robust + pole assignment by state feedback, IEEE Transactions on Automatic + Control, Vol. 41, pp. 1432-1452, 1996. + + Examples + -------- + A simple example demonstrating real pole placement using both KNV and YT + algorithms. This is example number 1 from section 4 of the reference KNV + publication ([1]_): + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> A = np.array([[ 1.380, -0.2077, 6.715, -5.676 ], + ... [-0.5814, -4.290, 0, 0.6750 ], + ... [ 1.067, 4.273, -6.654, 5.893 ], + ... [ 0.0480, 4.273, 1.343, -2.104 ]]) + >>> B = np.array([[ 0, 5.679 ], + ... [ 1.136, 1.136 ], + ... [ 0, 0, ], + ... [-3.146, 0 ]]) + >>> P = np.array([-0.2, -0.5, -5.0566, -8.6659]) + + Now compute K with KNV method 0, with the default YT method and with the YT + method while forcing 100 iterations of the algorithm and print some results + after each call. + + >>> fsf1 = signal.place_poles(A, B, P, method='KNV0') + >>> fsf1.gain_matrix + array([[ 0.20071427, -0.96665799, 0.24066128, -0.10279785], + [ 0.50587268, 0.57779091, 0.51795763, -0.41991442]]) + + >>> fsf2 = signal.place_poles(A, B, P) # uses YT method + >>> fsf2.computed_poles + array([-8.6659, -5.0566, -0.5 , -0.2 ]) + + >>> fsf3 = signal.place_poles(A, B, P, rtol=-1, maxiter=100) + >>> fsf3.X + array([[ 0.52072442+0.j, -0.08409372+0.j, -0.56847937+0.j, 0.74823657+0.j], + [-0.04977751+0.j, -0.80872954+0.j, 0.13566234+0.j, -0.29322906+0.j], + [-0.82266932+0.j, -0.19168026+0.j, -0.56348322+0.j, -0.43815060+0.j], + [ 0.22267347+0.j, 0.54967577+0.j, -0.58387806+0.j, -0.40271926+0.j]]) + + The absolute value of the determinant of X is a good indicator to check the + robustness of the results, both ``'KNV0'`` and ``'YT'`` aim at maximizing + it. Below a comparison of the robustness of the results above: + + >>> abs(np.linalg.det(fsf1.X)) < abs(np.linalg.det(fsf2.X)) + True + >>> abs(np.linalg.det(fsf2.X)) < abs(np.linalg.det(fsf3.X)) + True + + Now a simple example for complex poles: + + >>> A = np.array([[ 0, 7/3., 0, 0 ], + ... [ 0, 0, 0, 7/9. ], + ... [ 0, 0, 0, 0 ], + ... [ 0, 0, 0, 0 ]]) + >>> B = np.array([[ 0, 0 ], + ... [ 0, 0 ], + ... [ 1, 0 ], + ... [ 0, 1 ]]) + >>> P = np.array([-3, -1, -2-1j, -2+1j]) / 3. + >>> fsf = signal.place_poles(A, B, P, method='YT') + + We can plot the desired and computed poles in the complex plane: + + >>> t = np.linspace(0, 2*np.pi, 401) + >>> plt.plot(np.cos(t), np.sin(t), 'k--') # unit circle + >>> plt.plot(fsf.requested_poles.real, fsf.requested_poles.imag, + ... 'wo', label='Desired') + >>> plt.plot(fsf.computed_poles.real, fsf.computed_poles.imag, 'bx', + ... label='Placed') + >>> plt.grid() + >>> plt.axis('image') + >>> plt.axis([-1.1, 1.1, -1.1, 1.1]) + >>> plt.legend(bbox_to_anchor=(1.05, 1), loc=2, numpoints=1) + + """ + # Move away all the inputs checking, it only adds noise to the code + update_loop, poles = _valid_inputs(A, B, poles, method, rtol, maxiter) + + # The current value of the relative tolerance we achieved + cur_rtol = 0 + # The number of iterations needed before converging + nb_iter = 0 + + # Step A: QR decomposition of B page 1132 KN + # to debug with numpy qr uncomment the line below + # u, z = np.linalg.qr(B, mode="complete") + u, z = s_qr(B, mode="full") + rankB = np.linalg.matrix_rank(B) + u0 = u[:, :rankB] + u1 = u[:, rankB:] + z = z[:rankB, :] + + # If we can use the identity matrix as X the solution is obvious + if B.shape[0] == rankB: + # if B is square and full rank there is only one solution + # such as (A+BK)=inv(X)*diag(P)*X with X=eye(A.shape[0]) + # i.e K=inv(B)*(diag(P)-A) + # if B has as many lines as its rank (but not square) there are many + # solutions and we can choose one using least squares + # => use lstsq in both cases. + # In both cases the transfer matrix X will be eye(A.shape[0]) and I + # can hardly think of a better one so there is nothing to optimize + # + # for complex poles we use the following trick + # + # |a -b| has for eigenvalues a+b and a-b + # |b a| + # + # |a+bi 0| has the obvious eigenvalues a+bi and a-bi + # |0 a-bi| + # + # e.g solving the first one in R gives the solution + # for the second one in C + diag_poles = np.zeros(A.shape) + idx = 0 + while idx < poles.shape[0]: + p = poles[idx] + diag_poles[idx, idx] = np.real(p) + if ~np.isreal(p): + diag_poles[idx, idx+1] = -np.imag(p) + diag_poles[idx+1, idx+1] = np.real(p) + diag_poles[idx+1, idx] = np.imag(p) + idx += 1 # skip next one + idx += 1 + gain_matrix = np.linalg.lstsq(B, diag_poles-A, rcond=-1)[0] + transfer_matrix = np.eye(A.shape[0]) + cur_rtol = np.nan + nb_iter = np.nan + else: + # step A (p1144 KNV) and beginning of step F: decompose + # dot(U1.T, A-P[i]*I).T and build our set of transfer_matrix vectors + # in the same loop + ker_pole = [] + + # flag to skip the conjugate of a complex pole + skip_conjugate = False + # select orthonormal base ker_pole for each Pole and vectors for + # transfer_matrix + for j in range(B.shape[0]): + if skip_conjugate: + skip_conjugate = False + continue + pole_space_j = np.dot(u1.T, A-poles[j]*np.eye(B.shape[0])).T + + # after QR Q=Q0|Q1 + # only Q0 is used to reconstruct the qr'ed (dot Q, R) matrix. + # Q1 is orthogonnal to Q0 and will be multiplied by the zeros in + # R when using mode "complete". In default mode Q1 and the zeros + # in R are not computed + + # To debug with numpy qr uncomment the line below + # Q, _ = np.linalg.qr(pole_space_j, mode="complete") + Q, _ = s_qr(pole_space_j, mode="full") + + ker_pole_j = Q[:, pole_space_j.shape[1]:] + + # We want to select one vector in ker_pole_j to build the transfer + # matrix, however qr returns sometimes vectors with zeros on the + # same line for each pole and this yields very long convergence + # times. + # Or some other times a set of vectors, one with zero imaginary + # part and one (or several) with imaginary parts. After trying + # many ways to select the best possible one (eg ditch vectors + # with zero imaginary part for complex poles) I ended up summing + # all vectors in ker_pole_j, this solves 100% of the problems and + # is a valid choice for transfer_matrix. + # This way for complex poles we are sure to have a non zero + # imaginary part that way, and the problem of lines full of zeros + # in transfer_matrix is solved too as when a vector from + # ker_pole_j has a zero the other one(s) when + # ker_pole_j.shape[1]>1) for sure won't have a zero there. + + transfer_matrix_j = np.sum(ker_pole_j, axis=1)[:, np.newaxis] + transfer_matrix_j = (transfer_matrix_j / + np.linalg.norm(transfer_matrix_j)) + if ~np.isreal(poles[j]): # complex pole + transfer_matrix_j = np.hstack([np.real(transfer_matrix_j), + np.imag(transfer_matrix_j)]) + ker_pole.extend([ker_pole_j, ker_pole_j]) + + # Skip next pole as it is the conjugate + skip_conjugate = True + else: # real pole, nothing to do + ker_pole.append(ker_pole_j) + + if j == 0: + transfer_matrix = transfer_matrix_j + else: + transfer_matrix = np.hstack((transfer_matrix, transfer_matrix_j)) + + if rankB > 1: # otherwise there is nothing we can optimize + stop, cur_rtol, nb_iter = update_loop(ker_pole, transfer_matrix, + poles, B, maxiter, rtol) + if not stop and rtol > 0: + # if rtol<=0 the user has probably done that on purpose, + # don't annoy him + err_msg = ( + "Convergence was not reached after maxiter iterations.\n" + "You asked for a relative tolerance of %f we got %f" % + (rtol, cur_rtol) + ) + warnings.warn(err_msg) + + # reconstruct transfer_matrix to match complex conjugate pairs, + # ie transfer_matrix_j/transfer_matrix_j+1 are + # Re(Complex_pole), Im(Complex_pole) now and will be Re-Im/Re+Im after + transfer_matrix = transfer_matrix.astype(complex) + idx = 0 + while idx < poles.shape[0]-1: + if ~np.isreal(poles[idx]): + rel = transfer_matrix[:, idx].copy() + img = transfer_matrix[:, idx+1] + # rel will be an array referencing a column of transfer_matrix + # if we don't copy() it will changer after the next line and + # and the line after will not yield the correct value + transfer_matrix[:, idx] = rel-1j*img + transfer_matrix[:, idx+1] = rel+1j*img + idx += 1 # skip next one + idx += 1 + + try: + m = np.linalg.solve(transfer_matrix.T, np.dot(np.diag(poles), + transfer_matrix.T)).T + gain_matrix = np.linalg.solve(z, np.dot(u0.T, m-A)) + except np.linalg.LinAlgError: + raise ValueError("The poles you've chosen can't be placed. " + "Check the controllability matrix and try " + "another set of poles") + + # Beware: Kautsky solves A+BK but the usual form is A-BK + gain_matrix = -gain_matrix + # K still contains complex with ~=0j imaginary parts, get rid of them + gain_matrix = np.real(gain_matrix) + + full_state_feedback = Bunch() + full_state_feedback.gain_matrix = gain_matrix + full_state_feedback.computed_poles = _order_complex_poles( + np.linalg.eig(A - np.dot(B, gain_matrix))[0] + ) + full_state_feedback.requested_poles = poles + full_state_feedback.X = transfer_matrix + full_state_feedback.rtol = cur_rtol + full_state_feedback.nb_iter = nb_iter + + return full_state_feedback + + +def dlsim(system, u, t=None, x0=None): + """ + Simulate output of a discrete-time linear system. + + Parameters + ---------- + system : tuple of array_like or instance of `dlti` + A tuple describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1: (instance of `dlti`) + * 3: (num, den, dt) + * 4: (zeros, poles, gain, dt) + * 5: (A, B, C, D, dt) + + u : array_like + An input array describing the input at each time `t` (interpolation is + assumed between given times). If there are multiple inputs, then each + column of the rank-2 array represents an input. + t : array_like, optional + The time steps at which the input is defined. If `t` is given, it + must be the same length as `u`, and the final value in `t` determines + the number of steps returned in the output. + x0 : array_like, optional + The initial conditions on the state vector (zero by default). + + Returns + ------- + tout : ndarray + Time values for the output, as a 1-D array. + yout : ndarray + System response, as a 1-D array. + xout : ndarray, optional + Time-evolution of the state-vector. Only generated if the input is a + `StateSpace` system. + + See Also + -------- + lsim, dstep, dimpulse, cont2discrete + + Examples + -------- + A simple integrator transfer function with a discrete time step of 1.0 + could be implemented as: + + >>> from scipy import signal + >>> tf = ([1.0,], [1.0, -1.0], 1.0) + >>> t_in = [0.0, 1.0, 2.0, 3.0] + >>> u = np.asarray([0.0, 0.0, 1.0, 1.0]) + >>> t_out, y = signal.dlsim(tf, u, t=t_in) + >>> y.T + array([[ 0., 0., 0., 1.]]) + + """ + # Convert system to dlti-StateSpace + if isinstance(system, lti): + raise AttributeError('dlsim can only be used with discrete-time dlti ' + 'systems.') + elif not isinstance(system, dlti): + system = dlti(*system[:-1], dt=system[-1]) + + # Condition needed to ensure output remains compatible + is_ss_input = isinstance(system, StateSpace) + system = system._as_ss() + + u = np.atleast_1d(u) + + if u.ndim == 1: + u = np.atleast_2d(u).T + + if t is None: + out_samples = len(u) + stoptime = (out_samples - 1) * system.dt + else: + stoptime = t[-1] + out_samples = int(np.floor(stoptime / system.dt)) + 1 + + # Pre-build output arrays + xout = np.zeros((out_samples, system.A.shape[0])) + yout = np.zeros((out_samples, system.C.shape[0])) + tout = np.linspace(0.0, stoptime, num=out_samples) + + # Check initial condition + if x0 is None: + xout[0, :] = np.zeros((system.A.shape[1],)) + else: + xout[0, :] = np.asarray(x0) + + # Pre-interpolate inputs into the desired time steps + if t is None: + u_dt = u + else: + if len(u.shape) == 1: + u = u[:, np.newaxis] + + u_dt_interp = interp1d(t, u.transpose(), copy=False, bounds_error=True) + u_dt = u_dt_interp(tout).transpose() + + # Simulate the system + for i in range(0, out_samples - 1): + xout[i+1, :] = (np.dot(system.A, xout[i, :]) + + np.dot(system.B, u_dt[i, :])) + yout[i, :] = (np.dot(system.C, xout[i, :]) + + np.dot(system.D, u_dt[i, :])) + + # Last point + yout[out_samples-1, :] = (np.dot(system.C, xout[out_samples-1, :]) + + np.dot(system.D, u_dt[out_samples-1, :])) + + if is_ss_input: + return tout, yout, xout + else: + return tout, yout + + +def dimpulse(system, x0=None, t=None, n=None): + """ + Impulse response of discrete-time system. + + Parameters + ---------- + system : tuple of array_like or instance of `dlti` + A tuple describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1: (instance of `dlti`) + * 3: (num, den, dt) + * 4: (zeros, poles, gain, dt) + * 5: (A, B, C, D, dt) + + x0 : array_like, optional + Initial state-vector. Defaults to zero. + t : array_like, optional + Time points. Computed if not given. + n : int, optional + The number of time points to compute (if `t` is not given). + + Returns + ------- + tout : ndarray + Time values for the output, as a 1-D array. + yout : tuple of ndarray + Impulse response of system. Each element of the tuple represents + the output of the system based on an impulse in each input. + + See Also + -------- + impulse, dstep, dlsim, cont2discrete + + """ + # Convert system to dlti-StateSpace + if isinstance(system, dlti): + system = system._as_ss() + elif isinstance(system, lti): + raise AttributeError('dimpulse can only be used with discrete-time ' + 'dlti systems.') + else: + system = dlti(*system[:-1], dt=system[-1])._as_ss() + + # Default to 100 samples if unspecified + if n is None: + n = 100 + + # If time is not specified, use the number of samples + # and system dt + if t is None: + t = np.linspace(0, n * system.dt, n, endpoint=False) + else: + t = np.asarray(t) + + # For each input, implement a step change + yout = None + for i in range(0, system.inputs): + u = np.zeros((t.shape[0], system.inputs)) + u[0, i] = 1.0 + + one_output = dlsim(system, u, t=t, x0=x0) + + if yout is None: + yout = (one_output[1],) + else: + yout = yout + (one_output[1],) + + tout = one_output[0] + + return tout, yout + + +def dstep(system, x0=None, t=None, n=None): + """ + Step response of discrete-time system. + + Parameters + ---------- + system : tuple of array_like + A tuple describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1: (instance of `dlti`) + * 3: (num, den, dt) + * 4: (zeros, poles, gain, dt) + * 5: (A, B, C, D, dt) + + x0 : array_like, optional + Initial state-vector. Defaults to zero. + t : array_like, optional + Time points. Computed if not given. + n : int, optional + The number of time points to compute (if `t` is not given). + + Returns + ------- + tout : ndarray + Output time points, as a 1-D array. + yout : tuple of ndarray + Step response of system. Each element of the tuple represents + the output of the system based on a step response to each input. + + See Also + -------- + step, dimpulse, dlsim, cont2discrete + + """ + # Convert system to dlti-StateSpace + if isinstance(system, dlti): + system = system._as_ss() + elif isinstance(system, lti): + raise AttributeError('dstep can only be used with discrete-time dlti ' + 'systems.') + else: + system = dlti(*system[:-1], dt=system[-1])._as_ss() + + # Default to 100 samples if unspecified + if n is None: + n = 100 + + # If time is not specified, use the number of samples + # and system dt + if t is None: + t = np.linspace(0, n * system.dt, n, endpoint=False) + else: + t = np.asarray(t) + + # For each input, implement a step change + yout = None + for i in range(0, system.inputs): + u = np.zeros((t.shape[0], system.inputs)) + u[:, i] = np.ones((t.shape[0],)) + + one_output = dlsim(system, u, t=t, x0=x0) + + if yout is None: + yout = (one_output[1],) + else: + yout = yout + (one_output[1],) + + tout = one_output[0] + + return tout, yout + + +def dfreqresp(system, w=None, n=10000, whole=False): + """ + Calculate the frequency response of a discrete-time system. + + Parameters + ---------- + system : an instance of the `dlti` class or a tuple describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1 (instance of `dlti`) + * 2 (numerator, denominator, dt) + * 3 (zeros, poles, gain, dt) + * 4 (A, B, C, D, dt) + + w : array_like, optional + Array of frequencies (in radians/sample). Magnitude and phase data is + calculated for every value in this array. If not given a reasonable + set will be calculated. + n : int, optional + Number of frequency points to compute if `w` is not given. The `n` + frequencies are logarithmically spaced in an interval chosen to + include the influence of the poles and zeros of the system. + whole : bool, optional + Normally, if 'w' is not given, frequencies are computed from 0 to the + Nyquist frequency, pi radians/sample (upper-half of unit-circle). If + `whole` is True, compute frequencies from 0 to 2*pi radians/sample. + + Returns + ------- + w : 1D ndarray + Frequency array [radians/sample] + H : 1D ndarray + Array of complex magnitude values + + Notes + ----- + If (num, den) is passed in for ``system``, coefficients for both the + numerator and denominator should be specified in descending exponent + order (e.g. ``z^2 + 3z + 5`` would be represented as ``[1, 3, 5]``). + + .. versionadded:: 0.18.0 + + Examples + -------- + Generating the Nyquist plot of a transfer function + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + Transfer function: H(z) = 1 / (z^2 + 2z + 3) + + >>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.05) + + >>> w, H = signal.dfreqresp(sys) + + >>> plt.figure() + >>> plt.plot(H.real, H.imag, "b") + >>> plt.plot(H.real, -H.imag, "r") + >>> plt.show() + + """ + if not isinstance(system, dlti): + if isinstance(system, lti): + raise AttributeError('dfreqresp can only be used with ' + 'discrete-time systems.') + + system = dlti(*system[:-1], dt=system[-1]) + + if isinstance(system, StateSpace): + # No SS->ZPK code exists right now, just SS->TF->ZPK + system = system._as_tf() + + if not isinstance(system, (TransferFunction, ZerosPolesGain)): + raise ValueError('Unknown system type') + + if system.inputs != 1 or system.outputs != 1: + raise ValueError("dfreqresp requires a SISO (single input, single " + "output) system.") + + if w is not None: + worN = w + else: + worN = n + + if isinstance(system, TransferFunction): + # Convert numerator and denominator from polynomials in the variable + # 'z' to polynomials in the variable 'z^-1', as freqz expects. + num, den = TransferFunction._z_to_zinv(system.num.ravel(), system.den) + w, h = freqz(num, den, worN=worN, whole=whole) + + elif isinstance(system, ZerosPolesGain): + w, h = freqz_zpk(system.zeros, system.poles, system.gain, worN=worN, + whole=whole) + + return w, h + + +def dbode(system, w=None, n=100): + """ + Calculate Bode magnitude and phase data of a discrete-time system. + + Parameters + ---------- + system : an instance of the LTI class or a tuple describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1 (instance of `dlti`) + * 2 (num, den, dt) + * 3 (zeros, poles, gain, dt) + * 4 (A, B, C, D, dt) + + w : array_like, optional + Array of frequencies (in radians/sample). Magnitude and phase data is + calculated for every value in this array. If not given a reasonable + set will be calculated. + n : int, optional + Number of frequency points to compute if `w` is not given. The `n` + frequencies are logarithmically spaced in an interval chosen to + include the influence of the poles and zeros of the system. + + Returns + ------- + w : 1D ndarray + Frequency array [rad/time_unit] + mag : 1D ndarray + Magnitude array [dB] + phase : 1D ndarray + Phase array [deg] + + Notes + ----- + If (num, den) is passed in for ``system``, coefficients for both the + numerator and denominator should be specified in descending exponent + order (e.g. ``z^2 + 3z + 5`` would be represented as ``[1, 3, 5]``). + + .. versionadded:: 0.18.0 + + Examples + -------- + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + Transfer function: H(z) = 1 / (z^2 + 2z + 3) + + >>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.05) + + Equivalent: sys.bode() + + >>> w, mag, phase = signal.dbode(sys) + + >>> plt.figure() + >>> plt.semilogx(w, mag) # Bode magnitude plot + >>> plt.figure() + >>> plt.semilogx(w, phase) # Bode phase plot + >>> plt.show() + + """ + w, y = dfreqresp(system, w=w, n=n) + + if isinstance(system, dlti): + dt = system.dt + else: + dt = system[-1] + + mag = 20.0 * numpy.log10(abs(y)) + phase = numpy.rad2deg(numpy.unwrap(numpy.angle(y))) + + return w / dt, mag, phase diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/ltisys.pyc b/project/venv/lib/python2.7/site-packages/scipy/signal/ltisys.pyc new file mode 100644 index 0000000..c58f6df Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/signal/ltisys.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/setup.py b/project/venv/lib/python2.7/site-packages/scipy/signal/setup.py new file mode 100644 index 0000000..6011bb9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/signal/setup.py @@ -0,0 +1,37 @@ +from __future__ import division, print_function, absolute_import + +from scipy._build_utils import numpy_nodepr_api + + +def configuration(parent_package='', top_path=None): + from numpy.distutils.misc_util import Configuration + + config = Configuration('signal', parent_package, top_path) + + config.add_data_dir('tests') + + config.add_subpackage('windows') + + config.add_extension('sigtools', + sources=['sigtoolsmodule.c', 'firfilter.c', + 'medianfilter.c', 'lfilter.c.src', + 'correlate_nd.c.src'], + depends=['sigtools.h'], + include_dirs=['.'], + **numpy_nodepr_api) + + config.add_extension('_spectral', sources=['_spectral.c']) + config.add_extension('_max_len_seq_inner', sources=['_max_len_seq_inner.c']) + config.add_extension('_peak_finding_utils', + sources=['_peak_finding_utils.c']) + config.add_extension('_upfirdn_apply', sources=['_upfirdn_apply.c']) + spline_src = ['splinemodule.c', 'S_bspline_util.c', 'D_bspline_util.c', + 'C_bspline_util.c', 'Z_bspline_util.c', 'bspline_util.c'] + config.add_extension('spline', sources=spline_src, **numpy_nodepr_api) + + return config + + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(**configuration(top_path='').todict()) diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/setup.pyc b/project/venv/lib/python2.7/site-packages/scipy/signal/setup.pyc new file mode 100644 index 0000000..11fe452 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/signal/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/signaltools.py b/project/venv/lib/python2.7/site-packages/scipy/signal/signaltools.py new file mode 100644 index 0000000..4b460f9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/signal/signaltools.py @@ -0,0 +1,3490 @@ +# Author: Travis Oliphant +# 1999 -- 2002 + +from __future__ import division, print_function, absolute_import + +import operator +import threading +import sys +import timeit + +from . import sigtools, dlti +from ._upfirdn import upfirdn, _output_len +from scipy._lib.six import callable +from scipy._lib._version import NumpyVersion +from scipy import fftpack, linalg +from scipy.fftpack.helper import _init_nd_shape_and_axes_sorted +from numpy import (allclose, angle, arange, argsort, array, asarray, + atleast_1d, atleast_2d, cast, dot, exp, expand_dims, + iscomplexobj, mean, ndarray, newaxis, ones, pi, + poly, polyadd, polyder, polydiv, polymul, polysub, polyval, + product, r_, ravel, real_if_close, reshape, + roots, sort, take, transpose, unique, where, zeros, + zeros_like) +import numpy as np +import math +from scipy.special import factorial +from .windows import get_window +from ._arraytools import axis_slice, axis_reverse, odd_ext, even_ext, const_ext +from .filter_design import cheby1, _validate_sos +from .fir_filter_design import firwin + +if sys.version_info.major >= 3 and sys.version_info.minor >= 5: + from math import gcd +else: + from fractions import gcd + + +__all__ = ['correlate', 'fftconvolve', 'convolve', 'convolve2d', 'correlate2d', + 'order_filter', 'medfilt', 'medfilt2d', 'wiener', 'lfilter', + 'lfiltic', 'sosfilt', 'deconvolve', 'hilbert', 'hilbert2', + 'cmplx_sort', 'unique_roots', 'invres', 'invresz', 'residue', + 'residuez', 'resample', 'resample_poly', 'detrend', + 'lfilter_zi', 'sosfilt_zi', 'sosfiltfilt', 'choose_conv_method', + 'filtfilt', 'decimate', 'vectorstrength'] + + +_modedict = {'valid': 0, 'same': 1, 'full': 2} + +_boundarydict = {'fill': 0, 'pad': 0, 'wrap': 2, 'circular': 2, 'symm': 1, + 'symmetric': 1, 'reflect': 4} + + +_rfft_mt_safe = (NumpyVersion(np.__version__) >= '1.9.0.dev-e24486e') + +_rfft_lock = threading.Lock() + + +def _valfrommode(mode): + try: + return _modedict[mode] + except KeyError: + raise ValueError("Acceptable mode flags are 'valid'," + " 'same', or 'full'.") + + +def _bvalfromboundary(boundary): + try: + return _boundarydict[boundary] << 2 + except KeyError: + raise ValueError("Acceptable boundary flags are 'fill', 'circular' " + "(or 'wrap'), and 'symmetric' (or 'symm').") + + +def _inputs_swap_needed(mode, shape1, shape2): + """ + If in 'valid' mode, returns whether or not the input arrays need to be + swapped depending on whether `shape1` is at least as large as `shape2` in + every dimension. + + This is important for some of the correlation and convolution + implementations in this module, where the larger array input needs to come + before the smaller array input when operating in this mode. + + Note that if the mode provided is not 'valid', False is immediately + returned. + """ + if mode == 'valid': + ok1, ok2 = True, True + + for d1, d2 in zip(shape1, shape2): + if not d1 >= d2: + ok1 = False + if not d2 >= d1: + ok2 = False + + if not (ok1 or ok2): + raise ValueError("For 'valid' mode, one must be at least " + "as large as the other in every dimension") + + return not ok1 + + return False + + +def correlate(in1, in2, mode='full', method='auto'): + r""" + Cross-correlate two N-dimensional arrays. + + Cross-correlate `in1` and `in2`, with the output size determined by the + `mode` argument. + + Parameters + ---------- + in1 : array_like + First input. + in2 : array_like + Second input. Should have the same number of dimensions as `in1`. + mode : str {'full', 'valid', 'same'}, optional + A string indicating the size of the output: + + ``full`` + The output is the full discrete linear cross-correlation + of the inputs. (Default) + ``valid`` + The output consists only of those elements that do not + rely on the zero-padding. In 'valid' mode, either `in1` or `in2` + must be at least as large as the other in every dimension. + ``same`` + The output is the same size as `in1`, centered + with respect to the 'full' output. + method : str {'auto', 'direct', 'fft'}, optional + A string indicating which method to use to calculate the correlation. + + ``direct`` + The correlation is determined directly from sums, the definition of + correlation. + ``fft`` + The Fast Fourier Transform is used to perform the correlation more + quickly (only available for numerical arrays.) + ``auto`` + Automatically chooses direct or Fourier method based on an estimate + of which is faster (default). See `convolve` Notes for more detail. + + .. versionadded:: 0.19.0 + + Returns + ------- + correlate : array + An N-dimensional array containing a subset of the discrete linear + cross-correlation of `in1` with `in2`. + + See Also + -------- + choose_conv_method : contains more documentation on `method`. + + Notes + ----- + The correlation z of two d-dimensional arrays x and y is defined as:: + + z[...,k,...] = sum[..., i_l, ...] x[..., i_l,...] * conj(y[..., i_l - k,...]) + + This way, if x and y are 1-D arrays and ``z = correlate(x, y, 'full')`` + then + + .. math:: + + z[k] = (x * y)(k - N + 1) + = \sum_{l=0}^{||x||-1}x_l y_{l-k+N-1}^{*} + + for :math:`k = 0, 1, ..., ||x|| + ||y|| - 2` + + where :math:`||x||` is the length of ``x``, :math:`N = \max(||x||,||y||)`, + and :math:`y_m` is 0 when m is outside the range of y. + + ``method='fft'`` only works for numerical arrays as it relies on + `fftconvolve`. In certain cases (i.e., arrays of objects or when + rounding integers can lose precision), ``method='direct'`` is always used. + + Examples + -------- + Implement a matched filter using cross-correlation, to recover a signal + that has passed through a noisy channel. + + >>> from scipy import signal + >>> sig = np.repeat([0., 1., 1., 0., 1., 0., 0., 1.], 128) + >>> sig_noise = sig + np.random.randn(len(sig)) + >>> corr = signal.correlate(sig_noise, np.ones(128), mode='same') / 128 + + >>> import matplotlib.pyplot as plt + >>> clock = np.arange(64, len(sig), 128) + >>> fig, (ax_orig, ax_noise, ax_corr) = plt.subplots(3, 1, sharex=True) + >>> ax_orig.plot(sig) + >>> ax_orig.plot(clock, sig[clock], 'ro') + >>> ax_orig.set_title('Original signal') + >>> ax_noise.plot(sig_noise) + >>> ax_noise.set_title('Signal with noise') + >>> ax_corr.plot(corr) + >>> ax_corr.plot(clock, corr[clock], 'ro') + >>> ax_corr.axhline(0.5, ls=':') + >>> ax_corr.set_title('Cross-correlated with rectangular pulse') + >>> ax_orig.margins(0, 0.1) + >>> fig.tight_layout() + >>> fig.show() + + """ + in1 = asarray(in1) + in2 = asarray(in2) + + if in1.ndim == in2.ndim == 0: + return in1 * in2.conj() + elif in1.ndim != in2.ndim: + raise ValueError("in1 and in2 should have the same dimensionality") + + # Don't use _valfrommode, since correlate should not accept numeric modes + try: + val = _modedict[mode] + except KeyError: + raise ValueError("Acceptable mode flags are 'valid'," + " 'same', or 'full'.") + + # this either calls fftconvolve or this function with method=='direct' + if method in ('fft', 'auto'): + return convolve(in1, _reverse_and_conj(in2), mode, method) + + elif method == 'direct': + # fastpath to faster numpy.correlate for 1d inputs when possible + if _np_conv_ok(in1, in2, mode): + return np.correlate(in1, in2, mode) + + # _correlateND is far slower when in2.size > in1.size, so swap them + # and then undo the effect afterward if mode == 'full'. Also, it fails + # with 'valid' mode if in2 is larger than in1, so swap those, too. + # Don't swap inputs for 'same' mode, since shape of in1 matters. + swapped_inputs = ((mode == 'full') and (in2.size > in1.size) or + _inputs_swap_needed(mode, in1.shape, in2.shape)) + + if swapped_inputs: + in1, in2 = in2, in1 + + if mode == 'valid': + ps = [i - j + 1 for i, j in zip(in1.shape, in2.shape)] + out = np.empty(ps, in1.dtype) + + z = sigtools._correlateND(in1, in2, out, val) + + else: + ps = [i + j - 1 for i, j in zip(in1.shape, in2.shape)] + + # zero pad input + in1zpadded = np.zeros(ps, in1.dtype) + sc = tuple(slice(0, i) for i in in1.shape) + in1zpadded[sc] = in1.copy() + + if mode == 'full': + out = np.empty(ps, in1.dtype) + elif mode == 'same': + out = np.empty(in1.shape, in1.dtype) + + z = sigtools._correlateND(in1zpadded, in2, out, val) + + if swapped_inputs: + # Reverse and conjugate to undo the effect of swapping inputs + z = _reverse_and_conj(z) + + return z + + else: + raise ValueError("Acceptable method flags are 'auto'," + " 'direct', or 'fft'.") + + +def _centered(arr, newshape): + # Return the center newshape portion of the array. + newshape = asarray(newshape) + currshape = array(arr.shape) + startind = (currshape - newshape) // 2 + endind = startind + newshape + myslice = [slice(startind[k], endind[k]) for k in range(len(endind))] + return arr[tuple(myslice)] + + +def fftconvolve(in1, in2, mode="full", axes=None): + """Convolve two N-dimensional arrays using FFT. + + Convolve `in1` and `in2` using the fast Fourier transform method, with + the output size determined by the `mode` argument. + + This is generally much faster than `convolve` for large arrays (n > ~500), + but can be slower when only a few output values are needed, and can only + output float arrays (int or object array inputs will be cast to float). + + As of v0.19, `convolve` automatically chooses this method or the direct + method based on an estimation of which is faster. + + Parameters + ---------- + in1 : array_like + First input. + in2 : array_like + Second input. Should have the same number of dimensions as `in1`. + mode : str {'full', 'valid', 'same'}, optional + A string indicating the size of the output: + + ``full`` + The output is the full discrete linear convolution + of the inputs. (Default) + ``valid`` + The output consists only of those elements that do not + rely on the zero-padding. In 'valid' mode, either `in1` or `in2` + must be at least as large as the other in every dimension. + ``same`` + The output is the same size as `in1`, centered + with respect to the 'full' output. + axis : tuple, optional + axes : int or array_like of ints or None, optional + Axes over which to compute the convolution. + The default is over all axes. + + + Returns + ------- + out : array + An N-dimensional array containing a subset of the discrete linear + convolution of `in1` with `in2`. + + Examples + -------- + Autocorrelation of white noise is an impulse. + + >>> from scipy import signal + >>> sig = np.random.randn(1000) + >>> autocorr = signal.fftconvolve(sig, sig[::-1], mode='full') + + >>> import matplotlib.pyplot as plt + >>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1) + >>> ax_orig.plot(sig) + >>> ax_orig.set_title('White noise') + >>> ax_mag.plot(np.arange(-len(sig)+1,len(sig)), autocorr) + >>> ax_mag.set_title('Autocorrelation') + >>> fig.tight_layout() + >>> fig.show() + + Gaussian blur implemented using FFT convolution. Notice the dark borders + around the image, due to the zero-padding beyond its boundaries. + The `convolve2d` function allows for other types of image boundaries, + but is far slower. + + >>> from scipy import misc + >>> face = misc.face(gray=True) + >>> kernel = np.outer(signal.gaussian(70, 8), signal.gaussian(70, 8)) + >>> blurred = signal.fftconvolve(face, kernel, mode='same') + + >>> fig, (ax_orig, ax_kernel, ax_blurred) = plt.subplots(3, 1, + ... figsize=(6, 15)) + >>> ax_orig.imshow(face, cmap='gray') + >>> ax_orig.set_title('Original') + >>> ax_orig.set_axis_off() + >>> ax_kernel.imshow(kernel, cmap='gray') + >>> ax_kernel.set_title('Gaussian kernel') + >>> ax_kernel.set_axis_off() + >>> ax_blurred.imshow(blurred, cmap='gray') + >>> ax_blurred.set_title('Blurred') + >>> ax_blurred.set_axis_off() + >>> fig.show() + + """ + in1 = asarray(in1) + in2 = asarray(in2) + noaxes = axes is None + + if in1.ndim == in2.ndim == 0: # scalar inputs + return in1 * in2 + elif in1.ndim != in2.ndim: + raise ValueError("in1 and in2 should have the same dimensionality") + elif in1.size == 0 or in2.size == 0: # empty arrays + return array([]) + + _, axes = _init_nd_shape_and_axes_sorted(in1, shape=None, axes=axes) + + if not noaxes and not axes.size: + raise ValueError("when provided, axes cannot be empty") + + if noaxes: + other_axes = array([], dtype=np.intc) + else: + other_axes = np.setdiff1d(np.arange(in1.ndim), axes) + + s1 = array(in1.shape) + s2 = array(in2.shape) + + if not np.all((s1[other_axes] == s2[other_axes]) + | (s1[other_axes] == 1) | (s2[other_axes] == 1)): + raise ValueError("incompatible shapes for in1 and in2:" + " {0} and {1}".format(in1.shape, in2.shape)) + + complex_result = (np.issubdtype(in1.dtype, np.complexfloating) + or np.issubdtype(in2.dtype, np.complexfloating)) + shape = np.maximum(s1, s2) + shape[axes] = s1[axes] + s2[axes] - 1 + + # Check that input sizes are compatible with 'valid' mode + if _inputs_swap_needed(mode, s1, s2): + # Convolution is commutative; order doesn't have any effect on output + in1, s1, in2, s2 = in2, s2, in1, s1 + + # Speed up FFT by padding to optimal size for FFTPACK + fshape = [fftpack.helper.next_fast_len(d) for d in shape[axes]] + fslice = tuple([slice(sz) for sz in shape]) + # Pre-1.9 NumPy FFT routines are not threadsafe. For older NumPys, make + # sure we only call rfftn/irfftn from one thread at a time. + if not complex_result and (_rfft_mt_safe or _rfft_lock.acquire(False)): + try: + sp1 = np.fft.rfftn(in1, fshape, axes=axes) + sp2 = np.fft.rfftn(in2, fshape, axes=axes) + ret = np.fft.irfftn(sp1 * sp2, fshape, axes=axes)[fslice].copy() + finally: + if not _rfft_mt_safe: + _rfft_lock.release() + else: + # If we're here, it's either because we need a complex result, or we + # failed to acquire _rfft_lock (meaning rfftn isn't threadsafe and + # is already in use by another thread). In either case, use the + # (threadsafe but slower) SciPy complex-FFT routines instead. + sp1 = fftpack.fftn(in1, fshape, axes=axes) + sp2 = fftpack.fftn(in2, fshape, axes=axes) + ret = fftpack.ifftn(sp1 * sp2, axes=axes)[fslice].copy() + if not complex_result: + ret = ret.real + + if mode == "full": + return ret + elif mode == "same": + return _centered(ret, s1) + elif mode == "valid": + shape_valid = shape.copy() + shape_valid[axes] = s1[axes] - s2[axes] + 1 + return _centered(ret, shape_valid) + else: + raise ValueError("acceptable mode flags are 'valid'," + " 'same', or 'full'") + + +def _numeric_arrays(arrays, kinds='buifc'): + """ + See if a list of arrays are all numeric. + + Parameters + ---------- + ndarrays : array or list of arrays + arrays to check if numeric. + numeric_kinds : string-like + The dtypes of the arrays to be checked. If the dtype.kind of + the ndarrays are not in this string the function returns False and + otherwise returns True. + """ + if type(arrays) == ndarray: + return arrays.dtype.kind in kinds + for array_ in arrays: + if array_.dtype.kind not in kinds: + return False + return True + + +def _prod(iterable): + """ + Product of a list of numbers. + Faster than np.prod for short lists like array shapes. + """ + product = 1 + for x in iterable: + product *= x + return product + + +def _fftconv_faster(x, h, mode): + """ + See if using `fftconvolve` or `_correlateND` is faster. The boolean value + returned depends on the sizes and shapes of the input values. + + The big O ratios were found to hold across different machines, which makes + sense as it's the ratio that matters (the effective speed of the computer + is found in both big O constants). Regardless, this had been tuned on an + early 2015 MacBook Pro with 8GB RAM and an Intel i5 processor. + """ + if mode == 'full': + out_shape = [n + k - 1 for n, k in zip(x.shape, h.shape)] + big_O_constant = 10963.92823819 if x.ndim == 1 else 8899.1104874 + elif mode == 'same': + out_shape = x.shape + if x.ndim == 1: + if h.size <= x.size: + big_O_constant = 7183.41306773 + else: + big_O_constant = 856.78174111 + else: + big_O_constant = 34519.21021589 + elif mode == 'valid': + out_shape = [n - k + 1 for n, k in zip(x.shape, h.shape)] + big_O_constant = 41954.28006344 if x.ndim == 1 else 66453.24316434 + else: + raise ValueError("Acceptable mode flags are 'valid'," + " 'same', or 'full'.") + + # see whether the Fourier transform convolution method or the direct + # convolution method is faster (discussed in scikit-image PR #1792) + direct_time = (x.size * h.size * _prod(out_shape)) + fft_time = sum(n * math.log(n) for n in (x.shape + h.shape + + tuple(out_shape))) + return big_O_constant * fft_time < direct_time + + +def _reverse_and_conj(x): + """ + Reverse array `x` in all dimensions and perform the complex conjugate + """ + reverse = (slice(None, None, -1),) * x.ndim + return x[reverse].conj() + + +def _np_conv_ok(volume, kernel, mode): + """ + See if numpy supports convolution of `volume` and `kernel` (i.e. both are + 1D ndarrays and of the appropriate shape). Numpy's 'same' mode uses the + size of the larger input, while Scipy's uses the size of the first input. + + Invalid mode strings will return False and be caught by the calling func. + """ + if volume.ndim == kernel.ndim == 1: + if mode in ('full', 'valid'): + return True + elif mode == 'same': + return volume.size >= kernel.size + else: + return False + + +def _timeit_fast(stmt="pass", setup="pass", repeat=3): + """ + Returns the time the statement/function took, in seconds. + + Faster, less precise version of IPython's timeit. `stmt` can be a statement + written as a string or a callable. + + Will do only 1 loop (like IPython's timeit) with no repetitions + (unlike IPython) for very slow functions. For fast functions, only does + enough loops to take 5 ms, which seems to produce similar results (on + Windows at least), and avoids doing an extraneous cycle that isn't + measured. + + """ + timer = timeit.Timer(stmt, setup) + + # determine number of calls per rep so total time for 1 rep >= 5 ms + x = 0 + for p in range(0, 10): + number = 10**p + x = timer.timeit(number) # seconds + if x >= 5e-3 / 10: # 5 ms for final test, 1/10th that for this one + break + if x > 1: # second + # If it's macroscopic, don't bother with repetitions + best = x + else: + number *= 10 + r = timer.repeat(repeat, number) + best = min(r) + + sec = best / number + return sec + + +def choose_conv_method(in1, in2, mode='full', measure=False): + """ + Find the fastest convolution/correlation method. + + This primarily exists to be called during the ``method='auto'`` option in + `convolve` and `correlate`, but can also be used when performing many + convolutions of the same input shapes and dtypes, determining + which method to use for all of them, either to avoid the overhead of the + 'auto' option or to use accurate real-world measurements. + + Parameters + ---------- + in1 : array_like + The first argument passed into the convolution function. + in2 : array_like + The second argument passed into the convolution function. + mode : str {'full', 'valid', 'same'}, optional + A string indicating the size of the output: + + ``full`` + The output is the full discrete linear convolution + of the inputs. (Default) + ``valid`` + The output consists only of those elements that do not + rely on the zero-padding. + ``same`` + The output is the same size as `in1`, centered + with respect to the 'full' output. + measure : bool, optional + If True, run and time the convolution of `in1` and `in2` with both + methods and return the fastest. If False (default), predict the fastest + method using precomputed values. + + Returns + ------- + method : str + A string indicating which convolution method is fastest, either + 'direct' or 'fft' + times : dict, optional + A dictionary containing the times (in seconds) needed for each method. + This value is only returned if ``measure=True``. + + See Also + -------- + convolve + correlate + + Notes + ----- + For large n, ``measure=False`` is accurate and can quickly determine the + fastest method to perform the convolution. However, this is not as + accurate for small n (when any dimension in the input or output is small). + + In practice, we found that this function estimates the faster method up to + a multiplicative factor of 5 (i.e., the estimated method is *at most* 5 + times slower than the fastest method). The estimation values were tuned on + an early 2015 MacBook Pro with 8GB RAM but we found that the prediction + held *fairly* accurately across different machines. + + If ``measure=True``, time the convolutions. Because this function uses + `fftconvolve`, an error will be thrown if it does not support the inputs. + There are cases when `fftconvolve` supports the inputs but this function + returns `direct` (e.g., to protect against floating point integer + precision). + + .. versionadded:: 0.19 + + Examples + -------- + Estimate the fastest method for a given input: + + >>> from scipy import signal + >>> a = np.random.randn(1000) + >>> b = np.random.randn(1000000) + >>> method = signal.choose_conv_method(a, b, mode='same') + >>> method + 'fft' + + This can then be applied to other arrays of the same dtype and shape: + + >>> c = np.random.randn(1000) + >>> d = np.random.randn(1000000) + >>> # `method` works with correlate and convolve + >>> corr1 = signal.correlate(a, b, mode='same', method=method) + >>> corr2 = signal.correlate(c, d, mode='same', method=method) + >>> conv1 = signal.convolve(a, b, mode='same', method=method) + >>> conv2 = signal.convolve(c, d, mode='same', method=method) + + """ + volume = asarray(in1) + kernel = asarray(in2) + + if measure: + times = {} + for method in ['fft', 'direct']: + times[method] = _timeit_fast(lambda: convolve(volume, kernel, + mode=mode, method=method)) + + chosen_method = 'fft' if times['fft'] < times['direct'] else 'direct' + return chosen_method, times + + # fftconvolve doesn't support complex256 + fftconv_unsup = "complex256" if sys.maxsize > 2**32 else "complex192" + if hasattr(np, fftconv_unsup): + if volume.dtype == fftconv_unsup or kernel.dtype == fftconv_unsup: + return 'direct' + + # for integer input, + # catch when more precision required than float provides (representing an + # integer as float can lose precision in fftconvolve if larger than 2**52) + if any([_numeric_arrays([x], kinds='ui') for x in [volume, kernel]]): + max_value = int(np.abs(volume).max()) * int(np.abs(kernel).max()) + max_value *= int(min(volume.size, kernel.size)) + if max_value > 2**np.finfo('float').nmant - 1: + return 'direct' + + if _numeric_arrays([volume, kernel], kinds='b'): + return 'direct' + + if _numeric_arrays([volume, kernel]): + if _fftconv_faster(volume, kernel, mode): + return 'fft' + + return 'direct' + + +def convolve(in1, in2, mode='full', method='auto'): + """ + Convolve two N-dimensional arrays. + + Convolve `in1` and `in2`, with the output size determined by the + `mode` argument. + + Parameters + ---------- + in1 : array_like + First input. + in2 : array_like + Second input. Should have the same number of dimensions as `in1`. + mode : str {'full', 'valid', 'same'}, optional + A string indicating the size of the output: + + ``full`` + The output is the full discrete linear convolution + of the inputs. (Default) + ``valid`` + The output consists only of those elements that do not + rely on the zero-padding. In 'valid' mode, either `in1` or `in2` + must be at least as large as the other in every dimension. + ``same`` + The output is the same size as `in1`, centered + with respect to the 'full' output. + method : str {'auto', 'direct', 'fft'}, optional + A string indicating which method to use to calculate the convolution. + + ``direct`` + The convolution is determined directly from sums, the definition of + convolution. + ``fft`` + The Fourier Transform is used to perform the convolution by calling + `fftconvolve`. + ``auto`` + Automatically chooses direct or Fourier method based on an estimate + of which is faster (default). See Notes for more detail. + + .. versionadded:: 0.19.0 + + Returns + ------- + convolve : array + An N-dimensional array containing a subset of the discrete linear + convolution of `in1` with `in2`. + + See Also + -------- + numpy.polymul : performs polynomial multiplication (same operation, but + also accepts poly1d objects) + choose_conv_method : chooses the fastest appropriate convolution method + fftconvolve + + Notes + ----- + By default, `convolve` and `correlate` use ``method='auto'``, which calls + `choose_conv_method` to choose the fastest method using pre-computed + values (`choose_conv_method` can also measure real-world timing with a + keyword argument). Because `fftconvolve` relies on floating point numbers, + there are certain constraints that may force `method=direct` (more detail + in `choose_conv_method` docstring). + + Examples + -------- + Smooth a square pulse using a Hann window: + + >>> from scipy import signal + >>> sig = np.repeat([0., 1., 0.], 100) + >>> win = signal.hann(50) + >>> filtered = signal.convolve(sig, win, mode='same') / sum(win) + + >>> import matplotlib.pyplot as plt + >>> fig, (ax_orig, ax_win, ax_filt) = plt.subplots(3, 1, sharex=True) + >>> ax_orig.plot(sig) + >>> ax_orig.set_title('Original pulse') + >>> ax_orig.margins(0, 0.1) + >>> ax_win.plot(win) + >>> ax_win.set_title('Filter impulse response') + >>> ax_win.margins(0, 0.1) + >>> ax_filt.plot(filtered) + >>> ax_filt.set_title('Filtered signal') + >>> ax_filt.margins(0, 0.1) + >>> fig.tight_layout() + >>> fig.show() + + """ + volume = asarray(in1) + kernel = asarray(in2) + + if volume.ndim == kernel.ndim == 0: + return volume * kernel + elif volume.ndim != kernel.ndim: + raise ValueError("volume and kernel should have the same " + "dimensionality") + + if _inputs_swap_needed(mode, volume.shape, kernel.shape): + # Convolution is commutative; order doesn't have any effect on output + volume, kernel = kernel, volume + + if method == 'auto': + method = choose_conv_method(volume, kernel, mode=mode) + + if method == 'fft': + out = fftconvolve(volume, kernel, mode=mode) + result_type = np.result_type(volume, kernel) + if result_type.kind in {'u', 'i'}: + out = np.around(out) + return out.astype(result_type) + elif method == 'direct': + # fastpath to faster numpy.convolve for 1d inputs when possible + if _np_conv_ok(volume, kernel, mode): + return np.convolve(volume, kernel, mode) + + return correlate(volume, _reverse_and_conj(kernel), mode, 'direct') + else: + raise ValueError("Acceptable method flags are 'auto'," + " 'direct', or 'fft'.") + + +def order_filter(a, domain, rank): + """ + Perform an order filter on an N-dimensional array. + + Perform an order filter on the array in. The domain argument acts as a + mask centered over each pixel. The non-zero elements of domain are + used to select elements surrounding each input pixel which are placed + in a list. The list is sorted, and the output for that pixel is the + element corresponding to rank in the sorted list. + + Parameters + ---------- + a : ndarray + The N-dimensional input array. + domain : array_like + A mask array with the same number of dimensions as `a`. + Each dimension should have an odd number of elements. + rank : int + A non-negative integer which selects the element from the + sorted list (0 corresponds to the smallest element, 1 is the + next smallest element, etc.). + + Returns + ------- + out : ndarray + The results of the order filter in an array with the same + shape as `a`. + + Examples + -------- + >>> from scipy import signal + >>> x = np.arange(25).reshape(5, 5) + >>> domain = np.identity(3) + >>> x + array([[ 0, 1, 2, 3, 4], + [ 5, 6, 7, 8, 9], + [10, 11, 12, 13, 14], + [15, 16, 17, 18, 19], + [20, 21, 22, 23, 24]]) + >>> signal.order_filter(x, domain, 0) + array([[ 0., 0., 0., 0., 0.], + [ 0., 0., 1., 2., 0.], + [ 0., 5., 6., 7., 0.], + [ 0., 10., 11., 12., 0.], + [ 0., 0., 0., 0., 0.]]) + >>> signal.order_filter(x, domain, 2) + array([[ 6., 7., 8., 9., 4.], + [ 11., 12., 13., 14., 9.], + [ 16., 17., 18., 19., 14.], + [ 21., 22., 23., 24., 19.], + [ 20., 21., 22., 23., 24.]]) + + """ + domain = asarray(domain) + size = domain.shape + for k in range(len(size)): + if (size[k] % 2) != 1: + raise ValueError("Each dimension of domain argument " + " should have an odd number of elements.") + return sigtools._order_filterND(a, domain, rank) + + +def medfilt(volume, kernel_size=None): + """ + Perform a median filter on an N-dimensional array. + + Apply a median filter to the input array using a local window-size + given by `kernel_size`. + + Parameters + ---------- + volume : array_like + An N-dimensional input array. + kernel_size : array_like, optional + A scalar or an N-length list giving the size of the median filter + window in each dimension. Elements of `kernel_size` should be odd. + If `kernel_size` is a scalar, then this scalar is used as the size in + each dimension. Default size is 3 for each dimension. + + Returns + ------- + out : ndarray + An array the same size as input containing the median filtered + result. + + """ + volume = atleast_1d(volume) + if kernel_size is None: + kernel_size = [3] * volume.ndim + kernel_size = asarray(kernel_size) + if kernel_size.shape == (): + kernel_size = np.repeat(kernel_size.item(), volume.ndim) + + for k in range(volume.ndim): + if (kernel_size[k] % 2) != 1: + raise ValueError("Each element of kernel_size should be odd.") + + domain = ones(kernel_size) + + numels = product(kernel_size, axis=0) + order = numels // 2 + return sigtools._order_filterND(volume, domain, order) + + +def wiener(im, mysize=None, noise=None): + """ + Perform a Wiener filter on an N-dimensional array. + + Apply a Wiener filter to the N-dimensional array `im`. + + Parameters + ---------- + im : ndarray + An N-dimensional array. + mysize : int or array_like, optional + A scalar or an N-length list giving the size of the Wiener filter + window in each dimension. Elements of mysize should be odd. + If mysize is a scalar, then this scalar is used as the size + in each dimension. + noise : float, optional + The noise-power to use. If None, then noise is estimated as the + average of the local variance of the input. + + Returns + ------- + out : ndarray + Wiener filtered result with the same shape as `im`. + + """ + im = asarray(im) + if mysize is None: + mysize = [3] * im.ndim + mysize = asarray(mysize) + if mysize.shape == (): + mysize = np.repeat(mysize.item(), im.ndim) + + # Estimate the local mean + lMean = correlate(im, ones(mysize), 'same') / product(mysize, axis=0) + + # Estimate the local variance + lVar = (correlate(im ** 2, ones(mysize), 'same') / + product(mysize, axis=0) - lMean ** 2) + + # Estimate the noise power if needed. + if noise is None: + noise = mean(ravel(lVar), axis=0) + + res = (im - lMean) + res *= (1 - noise / lVar) + res += lMean + out = where(lVar < noise, lMean, res) + + return out + + +def convolve2d(in1, in2, mode='full', boundary='fill', fillvalue=0): + """ + Convolve two 2-dimensional arrays. + + Convolve `in1` and `in2` with output size determined by `mode`, and + boundary conditions determined by `boundary` and `fillvalue`. + + Parameters + ---------- + in1 : array_like + First input. + in2 : array_like + Second input. Should have the same number of dimensions as `in1`. + mode : str {'full', 'valid', 'same'}, optional + A string indicating the size of the output: + + ``full`` + The output is the full discrete linear convolution + of the inputs. (Default) + ``valid`` + The output consists only of those elements that do not + rely on the zero-padding. In 'valid' mode, either `in1` or `in2` + must be at least as large as the other in every dimension. + ``same`` + The output is the same size as `in1`, centered + with respect to the 'full' output. + boundary : str {'fill', 'wrap', 'symm'}, optional + A flag indicating how to handle boundaries: + + ``fill`` + pad input arrays with fillvalue. (default) + ``wrap`` + circular boundary conditions. + ``symm`` + symmetrical boundary conditions. + + fillvalue : scalar, optional + Value to fill pad input arrays with. Default is 0. + + Returns + ------- + out : ndarray + A 2-dimensional array containing a subset of the discrete linear + convolution of `in1` with `in2`. + + Examples + -------- + Compute the gradient of an image by 2D convolution with a complex Scharr + operator. (Horizontal operator is real, vertical is imaginary.) Use + symmetric boundary condition to avoid creating edges at the image + boundaries. + + >>> from scipy import signal + >>> from scipy import misc + >>> ascent = misc.ascent() + >>> scharr = np.array([[ -3-3j, 0-10j, +3 -3j], + ... [-10+0j, 0+ 0j, +10 +0j], + ... [ -3+3j, 0+10j, +3 +3j]]) # Gx + j*Gy + >>> grad = signal.convolve2d(ascent, scharr, boundary='symm', mode='same') + + >>> import matplotlib.pyplot as plt + >>> fig, (ax_orig, ax_mag, ax_ang) = plt.subplots(3, 1, figsize=(6, 15)) + >>> ax_orig.imshow(ascent, cmap='gray') + >>> ax_orig.set_title('Original') + >>> ax_orig.set_axis_off() + >>> ax_mag.imshow(np.absolute(grad), cmap='gray') + >>> ax_mag.set_title('Gradient magnitude') + >>> ax_mag.set_axis_off() + >>> ax_ang.imshow(np.angle(grad), cmap='hsv') # hsv is cyclic, like angles + >>> ax_ang.set_title('Gradient orientation') + >>> ax_ang.set_axis_off() + >>> fig.show() + + """ + in1 = asarray(in1) + in2 = asarray(in2) + + if not in1.ndim == in2.ndim == 2: + raise ValueError('convolve2d inputs must both be 2D arrays') + + if _inputs_swap_needed(mode, in1.shape, in2.shape): + in1, in2 = in2, in1 + + val = _valfrommode(mode) + bval = _bvalfromboundary(boundary) + out = sigtools._convolve2d(in1, in2, 1, val, bval, fillvalue) + return out + + +def correlate2d(in1, in2, mode='full', boundary='fill', fillvalue=0): + """ + Cross-correlate two 2-dimensional arrays. + + Cross correlate `in1` and `in2` with output size determined by `mode`, and + boundary conditions determined by `boundary` and `fillvalue`. + + Parameters + ---------- + in1 : array_like + First input. + in2 : array_like + Second input. Should have the same number of dimensions as `in1`. + mode : str {'full', 'valid', 'same'}, optional + A string indicating the size of the output: + + ``full`` + The output is the full discrete linear cross-correlation + of the inputs. (Default) + ``valid`` + The output consists only of those elements that do not + rely on the zero-padding. In 'valid' mode, either `in1` or `in2` + must be at least as large as the other in every dimension. + ``same`` + The output is the same size as `in1`, centered + with respect to the 'full' output. + boundary : str {'fill', 'wrap', 'symm'}, optional + A flag indicating how to handle boundaries: + + ``fill`` + pad input arrays with fillvalue. (default) + ``wrap`` + circular boundary conditions. + ``symm`` + symmetrical boundary conditions. + + fillvalue : scalar, optional + Value to fill pad input arrays with. Default is 0. + + Returns + ------- + correlate2d : ndarray + A 2-dimensional array containing a subset of the discrete linear + cross-correlation of `in1` with `in2`. + + Examples + -------- + Use 2D cross-correlation to find the location of a template in a noisy + image: + + >>> from scipy import signal + >>> from scipy import misc + >>> face = misc.face(gray=True) - misc.face(gray=True).mean() + >>> template = np.copy(face[300:365, 670:750]) # right eye + >>> template -= template.mean() + >>> face = face + np.random.randn(*face.shape) * 50 # add noise + >>> corr = signal.correlate2d(face, template, boundary='symm', mode='same') + >>> y, x = np.unravel_index(np.argmax(corr), corr.shape) # find the match + + >>> import matplotlib.pyplot as plt + >>> fig, (ax_orig, ax_template, ax_corr) = plt.subplots(3, 1, + ... figsize=(6, 15)) + >>> ax_orig.imshow(face, cmap='gray') + >>> ax_orig.set_title('Original') + >>> ax_orig.set_axis_off() + >>> ax_template.imshow(template, cmap='gray') + >>> ax_template.set_title('Template') + >>> ax_template.set_axis_off() + >>> ax_corr.imshow(corr, cmap='gray') + >>> ax_corr.set_title('Cross-correlation') + >>> ax_corr.set_axis_off() + >>> ax_orig.plot(x, y, 'ro') + >>> fig.show() + + """ + in1 = asarray(in1) + in2 = asarray(in2) + + if not in1.ndim == in2.ndim == 2: + raise ValueError('correlate2d inputs must both be 2D arrays') + + swapped_inputs = _inputs_swap_needed(mode, in1.shape, in2.shape) + if swapped_inputs: + in1, in2 = in2, in1 + + val = _valfrommode(mode) + bval = _bvalfromboundary(boundary) + out = sigtools._convolve2d(in1, in2.conj(), 0, val, bval, fillvalue) + + if swapped_inputs: + out = out[::-1, ::-1] + + return out + + +def medfilt2d(input, kernel_size=3): + """ + Median filter a 2-dimensional array. + + Apply a median filter to the `input` array using a local window-size + given by `kernel_size` (must be odd). + + Parameters + ---------- + input : array_like + A 2-dimensional input array. + kernel_size : array_like, optional + A scalar or a list of length 2, giving the size of the + median filter window in each dimension. Elements of + `kernel_size` should be odd. If `kernel_size` is a scalar, + then this scalar is used as the size in each dimension. + Default is a kernel of size (3, 3). + + Returns + ------- + out : ndarray + An array the same size as input containing the median filtered + result. + + """ + image = asarray(input) + if kernel_size is None: + kernel_size = [3] * 2 + kernel_size = asarray(kernel_size) + if kernel_size.shape == (): + kernel_size = np.repeat(kernel_size.item(), 2) + + for size in kernel_size: + if (size % 2) != 1: + raise ValueError("Each element of kernel_size should be odd.") + + return sigtools._medfilt2d(image, kernel_size) + + +def lfilter(b, a, x, axis=-1, zi=None): + """ + Filter data along one-dimension with an IIR or FIR filter. + + Filter a data sequence, `x`, using a digital filter. This works for many + fundamental data types (including Object type). The filter is a direct + form II transposed implementation of the standard difference equation + (see Notes). + + Parameters + ---------- + b : array_like + The numerator coefficient vector in a 1-D sequence. + a : array_like + The denominator coefficient vector in a 1-D sequence. If ``a[0]`` + is not 1, then both `a` and `b` are normalized by ``a[0]``. + x : array_like + An N-dimensional input array. + axis : int, optional + The axis of the input data array along which to apply the + linear filter. The filter is applied to each subarray along + this axis. Default is -1. + zi : array_like, optional + Initial conditions for the filter delays. It is a vector + (or array of vectors for an N-dimensional input) of length + ``max(len(a), len(b)) - 1``. If `zi` is None or is not given then + initial rest is assumed. See `lfiltic` for more information. + + Returns + ------- + y : array + The output of the digital filter. + zf : array, optional + If `zi` is None, this is not returned, otherwise, `zf` holds the + final filter delay values. + + See Also + -------- + lfiltic : Construct initial conditions for `lfilter`. + lfilter_zi : Compute initial state (steady state of step response) for + `lfilter`. + filtfilt : A forward-backward filter, to obtain a filter with linear phase. + savgol_filter : A Savitzky-Golay filter. + sosfilt: Filter data using cascaded second-order sections. + sosfiltfilt: A forward-backward filter using second-order sections. + + Notes + ----- + The filter function is implemented as a direct II transposed structure. + This means that the filter implements:: + + a[0]*y[n] = b[0]*x[n] + b[1]*x[n-1] + ... + b[M]*x[n-M] + - a[1]*y[n-1] - ... - a[N]*y[n-N] + + where `M` is the degree of the numerator, `N` is the degree of the + denominator, and `n` is the sample number. It is implemented using + the following difference equations (assuming M = N):: + + a[0]*y[n] = b[0] * x[n] + d[0][n-1] + d[0][n] = b[1] * x[n] - a[1] * y[n] + d[1][n-1] + d[1][n] = b[2] * x[n] - a[2] * y[n] + d[2][n-1] + ... + d[N-2][n] = b[N-1]*x[n] - a[N-1]*y[n] + d[N-1][n-1] + d[N-1][n] = b[N] * x[n] - a[N] * y[n] + + where `d` are the state variables. + + The rational transfer function describing this filter in the + z-transform domain is:: + + -1 -M + b[0] + b[1]z + ... + b[M] z + Y(z) = -------------------------------- X(z) + -1 -N + a[0] + a[1]z + ... + a[N] z + + Examples + -------- + Generate a noisy signal to be filtered: + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> t = np.linspace(-1, 1, 201) + >>> x = (np.sin(2*np.pi*0.75*t*(1-t) + 2.1) + + ... 0.1*np.sin(2*np.pi*1.25*t + 1) + + ... 0.18*np.cos(2*np.pi*3.85*t)) + >>> xn = x + np.random.randn(len(t)) * 0.08 + + Create an order 3 lowpass butterworth filter: + + >>> b, a = signal.butter(3, 0.05) + + Apply the filter to xn. Use lfilter_zi to choose the initial condition of + the filter: + + >>> zi = signal.lfilter_zi(b, a) + >>> z, _ = signal.lfilter(b, a, xn, zi=zi*xn[0]) + + Apply the filter again, to have a result filtered at an order the same as + filtfilt: + + >>> z2, _ = signal.lfilter(b, a, z, zi=zi*z[0]) + + Use filtfilt to apply the filter: + + >>> y = signal.filtfilt(b, a, xn) + + Plot the original signal and the various filtered versions: + + >>> plt.figure + >>> plt.plot(t, xn, 'b', alpha=0.75) + >>> plt.plot(t, z, 'r--', t, z2, 'r', t, y, 'k') + >>> plt.legend(('noisy signal', 'lfilter, once', 'lfilter, twice', + ... 'filtfilt'), loc='best') + >>> plt.grid(True) + >>> plt.show() + + """ + a = np.atleast_1d(a) + if len(a) == 1: + # This path only supports types fdgFDGO to mirror _linear_filter below. + # Any of b, a, x, or zi can set the dtype, but there is no default + # casting of other types; instead a NotImplementedError is raised. + b = np.asarray(b) + a = np.asarray(a) + if b.ndim != 1 and a.ndim != 1: + raise ValueError('object of too small depth for desired array') + x = np.asarray(x) + inputs = [b, a, x] + if zi is not None: + # _linear_filter does not broadcast zi, but does do expansion of + # singleton dims. + zi = np.asarray(zi) + if zi.ndim != x.ndim: + raise ValueError('object of too small depth for desired array') + expected_shape = list(x.shape) + expected_shape[axis] = b.shape[0] - 1 + expected_shape = tuple(expected_shape) + # check the trivial case where zi is the right shape first + if zi.shape != expected_shape: + strides = zi.ndim * [None] + if axis < 0: + axis += zi.ndim + for k in range(zi.ndim): + if k == axis and zi.shape[k] == expected_shape[k]: + strides[k] = zi.strides[k] + elif k != axis and zi.shape[k] == expected_shape[k]: + strides[k] = zi.strides[k] + elif k != axis and zi.shape[k] == 1: + strides[k] = 0 + else: + raise ValueError('Unexpected shape for zi: expected ' + '%s, found %s.' % + (expected_shape, zi.shape)) + zi = np.lib.stride_tricks.as_strided(zi, expected_shape, + strides) + inputs.append(zi) + dtype = np.result_type(*inputs) + + if dtype.char not in 'fdgFDGO': + raise NotImplementedError("input type '%s' not supported" % dtype) + + b = np.array(b, dtype=dtype) + a = np.array(a, dtype=dtype, copy=False) + b /= a[0] + x = np.array(x, dtype=dtype, copy=False) + + out_full = np.apply_along_axis(lambda y: np.convolve(b, y), axis, x) + ind = out_full.ndim * [slice(None)] + if zi is not None: + ind[axis] = slice(zi.shape[axis]) + out_full[tuple(ind)] += zi + + ind[axis] = slice(out_full.shape[axis] - len(b) + 1) + out = out_full[tuple(ind)] + + if zi is None: + return out + else: + ind[axis] = slice(out_full.shape[axis] - len(b) + 1, None) + zf = out_full[tuple(ind)] + return out, zf + else: + if zi is None: + return sigtools._linear_filter(b, a, x, axis) + else: + return sigtools._linear_filter(b, a, x, axis, zi) + + +def lfiltic(b, a, y, x=None): + """ + Construct initial conditions for lfilter given input and output vectors. + + Given a linear filter (b, a) and initial conditions on the output `y` + and the input `x`, return the initial conditions on the state vector zi + which is used by `lfilter` to generate the output given the input. + + Parameters + ---------- + b : array_like + Linear filter term. + a : array_like + Linear filter term. + y : array_like + Initial conditions. + + If ``N = len(a) - 1``, then ``y = {y[-1], y[-2], ..., y[-N]}``. + + If `y` is too short, it is padded with zeros. + x : array_like, optional + Initial conditions. + + If ``M = len(b) - 1``, then ``x = {x[-1], x[-2], ..., x[-M]}``. + + If `x` is not given, its initial conditions are assumed zero. + + If `x` is too short, it is padded with zeros. + + Returns + ------- + zi : ndarray + The state vector ``zi = {z_0[-1], z_1[-1], ..., z_K-1[-1]}``, + where ``K = max(M, N)``. + + See Also + -------- + lfilter, lfilter_zi + + """ + N = np.size(a) - 1 + M = np.size(b) - 1 + K = max(M, N) + y = asarray(y) + if y.dtype.kind in 'bui': + # ensure calculations are floating point + y = y.astype(np.float64) + zi = zeros(K, y.dtype) + if x is None: + x = zeros(M, y.dtype) + else: + x = asarray(x) + L = np.size(x) + if L < M: + x = r_[x, zeros(M - L)] + L = np.size(y) + if L < N: + y = r_[y, zeros(N - L)] + + for m in range(M): + zi[m] = np.sum(b[m + 1:] * x[:M - m], axis=0) + + for m in range(N): + zi[m] -= np.sum(a[m + 1:] * y[:N - m], axis=0) + + return zi + + +def deconvolve(signal, divisor): + """Deconvolves ``divisor`` out of ``signal`` using inverse filtering. + + Returns the quotient and remainder such that + ``signal = convolve(divisor, quotient) + remainder`` + + Parameters + ---------- + signal : array_like + Signal data, typically a recorded signal + divisor : array_like + Divisor data, typically an impulse response or filter that was + applied to the original signal + + Returns + ------- + quotient : ndarray + Quotient, typically the recovered original signal + remainder : ndarray + Remainder + + Examples + -------- + Deconvolve a signal that's been filtered: + + >>> from scipy import signal + >>> original = [0, 1, 0, 0, 1, 1, 0, 0] + >>> impulse_response = [2, 1] + >>> recorded = signal.convolve(impulse_response, original) + >>> recorded + array([0, 2, 1, 0, 2, 3, 1, 0, 0]) + >>> recovered, remainder = signal.deconvolve(recorded, impulse_response) + >>> recovered + array([ 0., 1., 0., 0., 1., 1., 0., 0.]) + + See Also + -------- + numpy.polydiv : performs polynomial division (same operation, but + also accepts poly1d objects) + + """ + num = atleast_1d(signal) + den = atleast_1d(divisor) + N = len(num) + D = len(den) + if D > N: + quot = [] + rem = num + else: + input = zeros(N - D + 1, float) + input[0] = 1 + quot = lfilter(num, den, input) + rem = num - convolve(den, quot, mode='full') + return quot, rem + + +def hilbert(x, N=None, axis=-1): + """ + Compute the analytic signal, using the Hilbert transform. + + The transformation is done along the last axis by default. + + Parameters + ---------- + x : array_like + Signal data. Must be real. + N : int, optional + Number of Fourier components. Default: ``x.shape[axis]`` + axis : int, optional + Axis along which to do the transformation. Default: -1. + + Returns + ------- + xa : ndarray + Analytic signal of `x`, of each 1-D array along `axis` + + See Also + -------- + scipy.fftpack.hilbert : Return Hilbert transform of a periodic sequence x. + + Notes + ----- + The analytic signal ``x_a(t)`` of signal ``x(t)`` is: + + .. math:: x_a = F^{-1}(F(x) 2U) = x + i y + + where `F` is the Fourier transform, `U` the unit step function, + and `y` the Hilbert transform of `x`. [1]_ + + In other words, the negative half of the frequency spectrum is zeroed + out, turning the real-valued signal into a complex signal. The Hilbert + transformed signal can be obtained from ``np.imag(hilbert(x))``, and the + original signal from ``np.real(hilbert(x))``. + + Examples + --------- + In this example we use the Hilbert transform to determine the amplitude + envelope and instantaneous frequency of an amplitude-modulated signal. + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.signal import hilbert, chirp + + >>> duration = 1.0 + >>> fs = 400.0 + >>> samples = int(fs*duration) + >>> t = np.arange(samples) / fs + + We create a chirp of which the frequency increases from 20 Hz to 100 Hz and + apply an amplitude modulation. + + >>> signal = chirp(t, 20.0, t[-1], 100.0) + >>> signal *= (1.0 + 0.5 * np.sin(2.0*np.pi*3.0*t) ) + + The amplitude envelope is given by magnitude of the analytic signal. The + instantaneous frequency can be obtained by differentiating the + instantaneous phase in respect to time. The instantaneous phase corresponds + to the phase angle of the analytic signal. + + >>> analytic_signal = hilbert(signal) + >>> amplitude_envelope = np.abs(analytic_signal) + >>> instantaneous_phase = np.unwrap(np.angle(analytic_signal)) + >>> instantaneous_frequency = (np.diff(instantaneous_phase) / + ... (2.0*np.pi) * fs) + + >>> fig = plt.figure() + >>> ax0 = fig.add_subplot(211) + >>> ax0.plot(t, signal, label='signal') + >>> ax0.plot(t, amplitude_envelope, label='envelope') + >>> ax0.set_xlabel("time in seconds") + >>> ax0.legend() + >>> ax1 = fig.add_subplot(212) + >>> ax1.plot(t[1:], instantaneous_frequency) + >>> ax1.set_xlabel("time in seconds") + >>> ax1.set_ylim(0.0, 120.0) + + References + ---------- + .. [1] Wikipedia, "Analytic signal". + https://en.wikipedia.org/wiki/Analytic_signal + .. [2] Leon Cohen, "Time-Frequency Analysis", 1995. Chapter 2. + .. [3] Alan V. Oppenheim, Ronald W. Schafer. Discrete-Time Signal + Processing, Third Edition, 2009. Chapter 12. + ISBN 13: 978-1292-02572-8 + + """ + x = asarray(x) + if iscomplexobj(x): + raise ValueError("x must be real.") + if N is None: + N = x.shape[axis] + if N <= 0: + raise ValueError("N must be positive.") + + Xf = fftpack.fft(x, N, axis=axis) + h = zeros(N) + if N % 2 == 0: + h[0] = h[N // 2] = 1 + h[1:N // 2] = 2 + else: + h[0] = 1 + h[1:(N + 1) // 2] = 2 + + if x.ndim > 1: + ind = [newaxis] * x.ndim + ind[axis] = slice(None) + h = h[tuple(ind)] + x = fftpack.ifft(Xf * h, axis=axis) + return x + + +def hilbert2(x, N=None): + """ + Compute the '2-D' analytic signal of `x` + + Parameters + ---------- + x : array_like + 2-D signal data. + N : int or tuple of two ints, optional + Number of Fourier components. Default is ``x.shape`` + + Returns + ------- + xa : ndarray + Analytic signal of `x` taken along axes (0,1). + + References + ---------- + .. [1] Wikipedia, "Analytic signal", + https://en.wikipedia.org/wiki/Analytic_signal + + """ + x = atleast_2d(x) + if x.ndim > 2: + raise ValueError("x must be 2-D.") + if iscomplexobj(x): + raise ValueError("x must be real.") + if N is None: + N = x.shape + elif isinstance(N, int): + if N <= 0: + raise ValueError("N must be positive.") + N = (N, N) + elif len(N) != 2 or np.any(np.asarray(N) <= 0): + raise ValueError("When given as a tuple, N must hold exactly " + "two positive integers") + + Xf = fftpack.fft2(x, N, axes=(0, 1)) + h1 = zeros(N[0], 'd') + h2 = zeros(N[1], 'd') + for p in range(2): + h = eval("h%d" % (p + 1)) + N1 = N[p] + if N1 % 2 == 0: + h[0] = h[N1 // 2] = 1 + h[1:N1 // 2] = 2 + else: + h[0] = 1 + h[1:(N1 + 1) // 2] = 2 + exec("h%d = h" % (p + 1), globals(), locals()) + + h = h1[:, newaxis] * h2[newaxis, :] + k = x.ndim + while k > 2: + h = h[:, newaxis] + k -= 1 + x = fftpack.ifft2(Xf * h, axes=(0, 1)) + return x + + +def cmplx_sort(p): + """Sort roots based on magnitude. + + Parameters + ---------- + p : array_like + The roots to sort, as a 1-D array. + + Returns + ------- + p_sorted : ndarray + Sorted roots. + indx : ndarray + Array of indices needed to sort the input `p`. + + Examples + -------- + >>> from scipy import signal + >>> vals = [1, 4, 1+1.j, 3] + >>> p_sorted, indx = signal.cmplx_sort(vals) + >>> p_sorted + array([1.+0.j, 1.+1.j, 3.+0.j, 4.+0.j]) + >>> indx + array([0, 2, 3, 1]) + + """ + p = asarray(p) + if iscomplexobj(p): + indx = argsort(abs(p)) + else: + indx = argsort(p) + return take(p, indx, 0), indx + + +def unique_roots(p, tol=1e-3, rtype='min'): + """ + Determine unique roots and their multiplicities from a list of roots. + + Parameters + ---------- + p : array_like + The list of roots. + tol : float, optional + The tolerance for two roots to be considered equal. Default is 1e-3. + rtype : {'max', 'min, 'avg'}, optional + How to determine the returned root if multiple roots are within + `tol` of each other. + + - 'max': pick the maximum of those roots. + - 'min': pick the minimum of those roots. + - 'avg': take the average of those roots. + + Returns + ------- + pout : ndarray + The list of unique roots, sorted from low to high. + mult : ndarray + The multiplicity of each root. + + Notes + ----- + This utility function is not specific to roots but can be used for any + sequence of values for which uniqueness and multiplicity has to be + determined. For a more general routine, see `numpy.unique`. + + Examples + -------- + >>> from scipy import signal + >>> vals = [0, 1.3, 1.31, 2.8, 1.25, 2.2, 10.3] + >>> uniq, mult = signal.unique_roots(vals, tol=2e-2, rtype='avg') + + Check which roots have multiplicity larger than 1: + + >>> uniq[mult > 1] + array([ 1.305]) + + """ + if rtype in ['max', 'maximum']: + comproot = np.max + elif rtype in ['min', 'minimum']: + comproot = np.min + elif rtype in ['avg', 'mean']: + comproot = np.mean + else: + raise ValueError("`rtype` must be one of " + "{'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}") + p = asarray(p) * 1.0 + tol = abs(tol) + p, indx = cmplx_sort(p) + pout = [] + mult = [] + indx = -1 + curp = p[0] + 5 * tol + sameroots = [] + for k in range(len(p)): + tr = p[k] + if abs(tr - curp) < tol: + sameroots.append(tr) + curp = comproot(sameroots) + pout[indx] = curp + mult[indx] += 1 + else: + pout.append(tr) + curp = tr + sameroots = [tr] + indx += 1 + mult.append(1) + return array(pout), array(mult) + + +def invres(r, p, k, tol=1e-3, rtype='avg'): + """ + Compute b(s) and a(s) from partial fraction expansion. + + If `M` is the degree of numerator `b` and `N` the degree of denominator + `a`:: + + b(s) b[0] s**(M) + b[1] s**(M-1) + ... + b[M] + H(s) = ------ = ------------------------------------------ + a(s) a[0] s**(N) + a[1] s**(N-1) + ... + a[N] + + then the partial-fraction expansion H(s) is defined as:: + + r[0] r[1] r[-1] + = -------- + -------- + ... + --------- + k(s) + (s-p[0]) (s-p[1]) (s-p[-1]) + + If there are any repeated roots (closer together than `tol`), then H(s) + has terms like:: + + r[i] r[i+1] r[i+n-1] + -------- + ----------- + ... + ----------- + (s-p[i]) (s-p[i])**2 (s-p[i])**n + + This function is used for polynomials in positive powers of s or z, + such as analog filters or digital filters in controls engineering. For + negative powers of z (typical for digital filters in DSP), use `invresz`. + + Parameters + ---------- + r : array_like + Residues. + p : array_like + Poles. + k : array_like + Coefficients of the direct polynomial term. + tol : float, optional + The tolerance for two roots to be considered equal. Default is 1e-3. + rtype : {'max', 'min, 'avg'}, optional + How to determine the returned root if multiple roots are within + `tol` of each other. + + - 'max': pick the maximum of those roots. + - 'min': pick the minimum of those roots. + - 'avg': take the average of those roots. + + Returns + ------- + b : ndarray + Numerator polynomial coefficients. + a : ndarray + Denominator polynomial coefficients. + + See Also + -------- + residue, invresz, unique_roots + + """ + extra = k + p, indx = cmplx_sort(p) + r = take(r, indx, 0) + pout, mult = unique_roots(p, tol=tol, rtype=rtype) + p = [] + for k in range(len(pout)): + p.extend([pout[k]] * mult[k]) + a = atleast_1d(poly(p)) + if len(extra) > 0: + b = polymul(extra, a) + else: + b = [0] + indx = 0 + for k in range(len(pout)): + temp = [] + for l in range(len(pout)): + if l != k: + temp.extend([pout[l]] * mult[l]) + for m in range(mult[k]): + t2 = temp[:] + t2.extend([pout[k]] * (mult[k] - m - 1)) + b = polyadd(b, r[indx] * atleast_1d(poly(t2))) + indx += 1 + b = real_if_close(b) + while allclose(b[0], 0, rtol=1e-14) and (b.shape[-1] > 1): + b = b[1:] + return b, a + + +def residue(b, a, tol=1e-3, rtype='avg'): + """ + Compute partial-fraction expansion of b(s) / a(s). + + If `M` is the degree of numerator `b` and `N` the degree of denominator + `a`:: + + b(s) b[0] s**(M) + b[1] s**(M-1) + ... + b[M] + H(s) = ------ = ------------------------------------------ + a(s) a[0] s**(N) + a[1] s**(N-1) + ... + a[N] + + then the partial-fraction expansion H(s) is defined as:: + + r[0] r[1] r[-1] + = -------- + -------- + ... + --------- + k(s) + (s-p[0]) (s-p[1]) (s-p[-1]) + + If there are any repeated roots (closer together than `tol`), then H(s) + has terms like:: + + r[i] r[i+1] r[i+n-1] + -------- + ----------- + ... + ----------- + (s-p[i]) (s-p[i])**2 (s-p[i])**n + + This function is used for polynomials in positive powers of s or z, + such as analog filters or digital filters in controls engineering. For + negative powers of z (typical for digital filters in DSP), use `residuez`. + + Parameters + ---------- + b : array_like + Numerator polynomial coefficients. + a : array_like + Denominator polynomial coefficients. + + Returns + ------- + r : ndarray + Residues. + p : ndarray + Poles. + k : ndarray + Coefficients of the direct polynomial term. + + See Also + -------- + invres, residuez, numpy.poly, unique_roots + + """ + + b, a = map(asarray, (b, a)) + rscale = a[0] + k, b = polydiv(b, a) + p = roots(a) + r = p * 0.0 + pout, mult = unique_roots(p, tol=tol, rtype=rtype) + p = [] + for n in range(len(pout)): + p.extend([pout[n]] * mult[n]) + p = asarray(p) + # Compute the residue from the general formula + indx = 0 + for n in range(len(pout)): + bn = b.copy() + pn = [] + for l in range(len(pout)): + if l != n: + pn.extend([pout[l]] * mult[l]) + an = atleast_1d(poly(pn)) + # bn(s) / an(s) is (s-po[n])**Nn * b(s) / a(s) where Nn is + # multiplicity of pole at po[n] + sig = mult[n] + for m in range(sig, 0, -1): + if sig > m: + # compute next derivative of bn(s) / an(s) + term1 = polymul(polyder(bn, 1), an) + term2 = polymul(bn, polyder(an, 1)) + bn = polysub(term1, term2) + an = polymul(an, an) + r[indx + m - 1] = (polyval(bn, pout[n]) / polyval(an, pout[n]) / + factorial(sig - m)) + indx += sig + return r / rscale, p, k + + +def residuez(b, a, tol=1e-3, rtype='avg'): + """ + Compute partial-fraction expansion of b(z) / a(z). + + If `M` is the degree of numerator `b` and `N` the degree of denominator + `a`:: + + b(z) b[0] + b[1] z**(-1) + ... + b[M] z**(-M) + H(z) = ------ = ------------------------------------------ + a(z) a[0] + a[1] z**(-1) + ... + a[N] z**(-N) + + then the partial-fraction expansion H(z) is defined as:: + + r[0] r[-1] + = --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ... + (1-p[0]z**(-1)) (1-p[-1]z**(-1)) + + If there are any repeated roots (closer than `tol`), then the partial + fraction expansion has terms like:: + + r[i] r[i+1] r[i+n-1] + -------------- + ------------------ + ... + ------------------ + (1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n + + This function is used for polynomials in negative powers of z, + such as digital filters in DSP. For positive powers, use `residue`. + + Parameters + ---------- + b : array_like + Numerator polynomial coefficients. + a : array_like + Denominator polynomial coefficients. + + Returns + ------- + r : ndarray + Residues. + p : ndarray + Poles. + k : ndarray + Coefficients of the direct polynomial term. + + See Also + -------- + invresz, residue, unique_roots + + """ + b, a = map(asarray, (b, a)) + gain = a[0] + brev, arev = b[::-1], a[::-1] + krev, brev = polydiv(brev, arev) + if krev == []: + k = [] + else: + k = krev[::-1] + b = brev[::-1] + p = roots(a) + r = p * 0.0 + pout, mult = unique_roots(p, tol=tol, rtype=rtype) + p = [] + for n in range(len(pout)): + p.extend([pout[n]] * mult[n]) + p = asarray(p) + # Compute the residue from the general formula (for discrete-time) + # the polynomial is in z**(-1) and the multiplication is by terms + # like this (1-p[i] z**(-1))**mult[i]. After differentiation, + # we must divide by (-p[i])**(m-k) as well as (m-k)! + indx = 0 + for n in range(len(pout)): + bn = brev.copy() + pn = [] + for l in range(len(pout)): + if l != n: + pn.extend([pout[l]] * mult[l]) + an = atleast_1d(poly(pn))[::-1] + # bn(z) / an(z) is (1-po[n] z**(-1))**Nn * b(z) / a(z) where Nn is + # multiplicity of pole at po[n] and b(z) and a(z) are polynomials. + sig = mult[n] + for m in range(sig, 0, -1): + if sig > m: + # compute next derivative of bn(s) / an(s) + term1 = polymul(polyder(bn, 1), an) + term2 = polymul(bn, polyder(an, 1)) + bn = polysub(term1, term2) + an = polymul(an, an) + r[indx + m - 1] = (polyval(bn, 1.0 / pout[n]) / + polyval(an, 1.0 / pout[n]) / + factorial(sig - m) / (-pout[n]) ** (sig - m)) + indx += sig + return r / gain, p, k + + +def invresz(r, p, k, tol=1e-3, rtype='avg'): + """ + Compute b(z) and a(z) from partial fraction expansion. + + If `M` is the degree of numerator `b` and `N` the degree of denominator + `a`:: + + b(z) b[0] + b[1] z**(-1) + ... + b[M] z**(-M) + H(z) = ------ = ------------------------------------------ + a(z) a[0] + a[1] z**(-1) + ... + a[N] z**(-N) + + then the partial-fraction expansion H(z) is defined as:: + + r[0] r[-1] + = --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ... + (1-p[0]z**(-1)) (1-p[-1]z**(-1)) + + If there are any repeated roots (closer than `tol`), then the partial + fraction expansion has terms like:: + + r[i] r[i+1] r[i+n-1] + -------------- + ------------------ + ... + ------------------ + (1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n + + This function is used for polynomials in negative powers of z, + such as digital filters in DSP. For positive powers, use `invres`. + + Parameters + ---------- + r : array_like + Residues. + p : array_like + Poles. + k : array_like + Coefficients of the direct polynomial term. + tol : float, optional + The tolerance for two roots to be considered equal. Default is 1e-3. + rtype : {'max', 'min, 'avg'}, optional + How to determine the returned root if multiple roots are within + `tol` of each other. + + - 'max': pick the maximum of those roots. + - 'min': pick the minimum of those roots. + - 'avg': take the average of those roots. + + Returns + ------- + b : ndarray + Numerator polynomial coefficients. + a : ndarray + Denominator polynomial coefficients. + + See Also + -------- + residuez, unique_roots, invres + + """ + extra = asarray(k) + p, indx = cmplx_sort(p) + r = take(r, indx, 0) + pout, mult = unique_roots(p, tol=tol, rtype=rtype) + p = [] + for k in range(len(pout)): + p.extend([pout[k]] * mult[k]) + a = atleast_1d(poly(p)) + if len(extra) > 0: + b = polymul(extra, a) + else: + b = [0] + indx = 0 + brev = asarray(b)[::-1] + for k in range(len(pout)): + temp = [] + # Construct polynomial which does not include any of this root + for l in range(len(pout)): + if l != k: + temp.extend([pout[l]] * mult[l]) + for m in range(mult[k]): + t2 = temp[:] + t2.extend([pout[k]] * (mult[k] - m - 1)) + brev = polyadd(brev, (r[indx] * atleast_1d(poly(t2)))[::-1]) + indx += 1 + b = real_if_close(brev[::-1]) + return b, a + + +def resample(x, num, t=None, axis=0, window=None): + """ + Resample `x` to `num` samples using Fourier method along the given axis. + + The resampled signal starts at the same value as `x` but is sampled + with a spacing of ``len(x) / num * (spacing of x)``. Because a + Fourier method is used, the signal is assumed to be periodic. + + Parameters + ---------- + x : array_like + The data to be resampled. + num : int + The number of samples in the resampled signal. + t : array_like, optional + If `t` is given, it is assumed to be the sample positions + associated with the signal data in `x`. + axis : int, optional + The axis of `x` that is resampled. Default is 0. + window : array_like, callable, string, float, or tuple, optional + Specifies the window applied to the signal in the Fourier + domain. See below for details. + + Returns + ------- + resampled_x or (resampled_x, resampled_t) + Either the resampled array, or, if `t` was given, a tuple + containing the resampled array and the corresponding resampled + positions. + + See Also + -------- + decimate : Downsample the signal after applying an FIR or IIR filter. + resample_poly : Resample using polyphase filtering and an FIR filter. + + Notes + ----- + The argument `window` controls a Fourier-domain window that tapers + the Fourier spectrum before zero-padding to alleviate ringing in + the resampled values for sampled signals you didn't intend to be + interpreted as band-limited. + + If `window` is a function, then it is called with a vector of inputs + indicating the frequency bins (i.e. fftfreq(x.shape[axis]) ). + + If `window` is an array of the same length as `x.shape[axis]` it is + assumed to be the window to be applied directly in the Fourier + domain (with dc and low-frequency first). + + For any other type of `window`, the function `scipy.signal.get_window` + is called to generate the window. + + The first sample of the returned vector is the same as the first + sample of the input vector. The spacing between samples is changed + from ``dx`` to ``dx * len(x) / num``. + + If `t` is not None, then it represents the old sample positions, + and the new sample positions will be returned as well as the new + samples. + + As noted, `resample` uses FFT transformations, which can be very + slow if the number of input or output samples is large and prime; + see `scipy.fftpack.fft`. + + Examples + -------- + Note that the end of the resampled data rises to meet the first + sample of the next cycle: + + >>> from scipy import signal + + >>> x = np.linspace(0, 10, 20, endpoint=False) + >>> y = np.cos(-x**2/6.0) + >>> f = signal.resample(y, 100) + >>> xnew = np.linspace(0, 10, 100, endpoint=False) + + >>> import matplotlib.pyplot as plt + >>> plt.plot(x, y, 'go-', xnew, f, '.-', 10, y[0], 'ro') + >>> plt.legend(['data', 'resampled'], loc='best') + >>> plt.show() + """ + x = asarray(x) + X = fftpack.fft(x, axis=axis) + Nx = x.shape[axis] + if window is not None: + if callable(window): + W = window(fftpack.fftfreq(Nx)) + elif isinstance(window, ndarray): + if window.shape != (Nx,): + raise ValueError('window must have the same length as data') + W = window + else: + W = fftpack.ifftshift(get_window(window, Nx)) + newshape = [1] * x.ndim + newshape[axis] = len(W) + W.shape = newshape + X = X * W + W.shape = (Nx,) + sl = [slice(None)] * x.ndim + newshape = list(x.shape) + newshape[axis] = num + N = int(np.minimum(num, Nx)) + Y = zeros(newshape, 'D') + sl[axis] = slice(0, (N + 1) // 2) + Y[tuple(sl)] = X[tuple(sl)] + sl[axis] = slice(-(N - 1) // 2, None) + Y[tuple(sl)] = X[tuple(sl)] + + if N % 2 == 0: # special treatment if low number of points is even. So far we have set Y[-N/2]=X[-N/2] + if N < Nx: # if downsampling + sl[axis] = slice(N//2,N//2+1,None) # select the component at frequency N/2 + Y[tuple(sl)] += X[tuple(sl)] # add the component of X at N/2 + elif N < num: # if upsampling + sl[axis] = slice(num-N//2,num-N//2+1,None) # select the component at frequency -N/2 + Y[tuple(sl)] /= 2 # halve the component at -N/2 + temp = Y[tuple(sl)] + sl[axis] = slice(N//2,N//2+1,None) # select the component at +N/2 + Y[tuple(sl)] = temp # set that equal to the component at -N/2 + + y = fftpack.ifft(Y, axis=axis) * (float(num) / float(Nx)) + + if x.dtype.char not in ['F', 'D']: + y = y.real + + if t is None: + return y + else: + new_t = arange(0, num) * (t[1] - t[0]) * Nx / float(num) + t[0] + return y, new_t + + +def resample_poly(x, up, down, axis=0, window=('kaiser', 5.0)): + """ + Resample `x` along the given axis using polyphase filtering. + + The signal `x` is upsampled by the factor `up`, a zero-phase low-pass + FIR filter is applied, and then it is downsampled by the factor `down`. + The resulting sample rate is ``up / down`` times the original sample + rate. Values beyond the boundary of the signal are assumed to be zero + during the filtering step. + + Parameters + ---------- + x : array_like + The data to be resampled. + up : int + The upsampling factor. + down : int + The downsampling factor. + axis : int, optional + The axis of `x` that is resampled. Default is 0. + window : string, tuple, or array_like, optional + Desired window to use to design the low-pass filter, or the FIR filter + coefficients to employ. See below for details. + + Returns + ------- + resampled_x : array + The resampled array. + + See Also + -------- + decimate : Downsample the signal after applying an FIR or IIR filter. + resample : Resample up or down using the FFT method. + + Notes + ----- + This polyphase method will likely be faster than the Fourier method + in `scipy.signal.resample` when the number of samples is large and + prime, or when the number of samples is large and `up` and `down` + share a large greatest common denominator. The length of the FIR + filter used will depend on ``max(up, down) // gcd(up, down)``, and + the number of operations during polyphase filtering will depend on + the filter length and `down` (see `scipy.signal.upfirdn` for details). + + The argument `window` specifies the FIR low-pass filter design. + + If `window` is an array_like it is assumed to be the FIR filter + coefficients. Note that the FIR filter is applied after the upsampling + step, so it should be designed to operate on a signal at a sampling + frequency higher than the original by a factor of `up//gcd(up, down)`. + This function's output will be centered with respect to this array, so it + is best to pass a symmetric filter with an odd number of samples if, as + is usually the case, a zero-phase filter is desired. + + For any other type of `window`, the functions `scipy.signal.get_window` + and `scipy.signal.firwin` are called to generate the appropriate filter + coefficients. + + The first sample of the returned vector is the same as the first + sample of the input vector. The spacing between samples is changed + from ``dx`` to ``dx * down / float(up)``. + + Examples + -------- + Note that the end of the resampled data rises to meet the first + sample of the next cycle for the FFT method, and gets closer to zero + for the polyphase method: + + >>> from scipy import signal + + >>> x = np.linspace(0, 10, 20, endpoint=False) + >>> y = np.cos(-x**2/6.0) + >>> f_fft = signal.resample(y, 100) + >>> f_poly = signal.resample_poly(y, 100, 20) + >>> xnew = np.linspace(0, 10, 100, endpoint=False) + + >>> import matplotlib.pyplot as plt + >>> plt.plot(xnew, f_fft, 'b.-', xnew, f_poly, 'r.-') + >>> plt.plot(x, y, 'ko-') + >>> plt.plot(10, y[0], 'bo', 10, 0., 'ro') # boundaries + >>> plt.legend(['resample', 'resamp_poly', 'data'], loc='best') + >>> plt.show() + """ + x = asarray(x) + if up != int(up): + raise ValueError("up must be an integer") + if down != int(down): + raise ValueError("down must be an integer") + up = int(up) + down = int(down) + if up < 1 or down < 1: + raise ValueError('up and down must be >= 1') + + # Determine our up and down factors + # Use a rational approximation to save computation time on really long + # signals + g_ = gcd(up, down) + up //= g_ + down //= g_ + if up == down == 1: + return x.copy() + n_out = x.shape[axis] * up + n_out = n_out // down + bool(n_out % down) + + if isinstance(window, (list, np.ndarray)): + window = array(window) # use array to force a copy (we modify it) + if window.ndim > 1: + raise ValueError('window must be 1-D') + half_len = (window.size - 1) // 2 + h = window + else: + # Design a linear-phase low-pass FIR filter + max_rate = max(up, down) + f_c = 1. / max_rate # cutoff of FIR filter (rel. to Nyquist) + half_len = 10 * max_rate # reasonable cutoff for our sinc-like function + h = firwin(2 * half_len + 1, f_c, window=window) + h *= up + + # Zero-pad our filter to put the output samples at the center + n_pre_pad = (down - half_len % down) + n_post_pad = 0 + n_pre_remove = (half_len + n_pre_pad) // down + # We should rarely need to do this given our filter lengths... + while _output_len(len(h) + n_pre_pad + n_post_pad, x.shape[axis], + up, down) < n_out + n_pre_remove: + n_post_pad += 1 + h = np.concatenate((np.zeros(n_pre_pad, dtype=h.dtype), h, + np.zeros(n_post_pad, dtype=h.dtype))) + n_pre_remove_end = n_pre_remove + n_out + + # filter then remove excess + y = upfirdn(h, x, up, down, axis=axis) + keep = [slice(None), ]*x.ndim + keep[axis] = slice(n_pre_remove, n_pre_remove_end) + return y[tuple(keep)] + + +def vectorstrength(events, period): + ''' + Determine the vector strength of the events corresponding to the given + period. + + The vector strength is a measure of phase synchrony, how well the + timing of the events is synchronized to a single period of a periodic + signal. + + If multiple periods are used, calculate the vector strength of each. + This is called the "resonating vector strength". + + Parameters + ---------- + events : 1D array_like + An array of time points containing the timing of the events. + period : float or array_like + The period of the signal that the events should synchronize to. + The period is in the same units as `events`. It can also be an array + of periods, in which case the outputs are arrays of the same length. + + Returns + ------- + strength : float or 1D array + The strength of the synchronization. 1.0 is perfect synchronization + and 0.0 is no synchronization. If `period` is an array, this is also + an array with each element containing the vector strength at the + corresponding period. + phase : float or array + The phase that the events are most strongly synchronized to in radians. + If `period` is an array, this is also an array with each element + containing the phase for the corresponding period. + + References + ---------- + van Hemmen, JL, Longtin, A, and Vollmayr, AN. Testing resonating vector + strength: Auditory system, electric fish, and noise. + Chaos 21, 047508 (2011); + :doi:`10.1063/1.3670512`. + van Hemmen, JL. Vector strength after Goldberg, Brown, and von Mises: + biological and mathematical perspectives. Biol Cybern. + 2013 Aug;107(4):385-96. :doi:`10.1007/s00422-013-0561-7`. + van Hemmen, JL and Vollmayr, AN. Resonating vector strength: what happens + when we vary the "probing" frequency while keeping the spike times + fixed. Biol Cybern. 2013 Aug;107(4):491-94. + :doi:`10.1007/s00422-013-0560-8`. + ''' + events = asarray(events) + period = asarray(period) + if events.ndim > 1: + raise ValueError('events cannot have dimensions more than 1') + if period.ndim > 1: + raise ValueError('period cannot have dimensions more than 1') + + # we need to know later if period was originally a scalar + scalarperiod = not period.ndim + + events = atleast_2d(events) + period = atleast_2d(period) + if (period <= 0).any(): + raise ValueError('periods must be positive') + + # this converts the times to vectors + vectors = exp(dot(2j*pi/period.T, events)) + + # the vector strength is just the magnitude of the mean of the vectors + # the vector phase is the angle of the mean of the vectors + vectormean = mean(vectors, axis=1) + strength = abs(vectormean) + phase = angle(vectormean) + + # if the original period was a scalar, return scalars + if scalarperiod: + strength = strength[0] + phase = phase[0] + return strength, phase + + +def detrend(data, axis=-1, type='linear', bp=0): + """ + Remove linear trend along axis from data. + + Parameters + ---------- + data : array_like + The input data. + axis : int, optional + The axis along which to detrend the data. By default this is the + last axis (-1). + type : {'linear', 'constant'}, optional + The type of detrending. If ``type == 'linear'`` (default), + the result of a linear least-squares fit to `data` is subtracted + from `data`. + If ``type == 'constant'``, only the mean of `data` is subtracted. + bp : array_like of ints, optional + A sequence of break points. If given, an individual linear fit is + performed for each part of `data` between two break points. + Break points are specified as indices into `data`. + + Returns + ------- + ret : ndarray + The detrended input data. + + Examples + -------- + >>> from scipy import signal + >>> randgen = np.random.RandomState(9) + >>> npoints = 1000 + >>> noise = randgen.randn(npoints) + >>> x = 3 + 2*np.linspace(0, 1, npoints) + noise + >>> (signal.detrend(x) - noise).max() < 0.01 + True + + """ + if type not in ['linear', 'l', 'constant', 'c']: + raise ValueError("Trend type must be 'linear' or 'constant'.") + data = asarray(data) + dtype = data.dtype.char + if dtype not in 'dfDF': + dtype = 'd' + if type in ['constant', 'c']: + ret = data - expand_dims(mean(data, axis), axis) + return ret + else: + dshape = data.shape + N = dshape[axis] + bp = sort(unique(r_[0, bp, N])) + if np.any(bp > N): + raise ValueError("Breakpoints must be less than length " + "of data along given axis.") + Nreg = len(bp) - 1 + # Restructure data so that axis is along first dimension and + # all other dimensions are collapsed into second dimension + rnk = len(dshape) + if axis < 0: + axis = axis + rnk + newdims = r_[axis, 0:axis, axis + 1:rnk] + newdata = reshape(transpose(data, tuple(newdims)), + (N, _prod(dshape) // N)) + newdata = newdata.copy() # make sure we have a copy + if newdata.dtype.char not in 'dfDF': + newdata = newdata.astype(dtype) + # Find leastsq fit and remove it for each piece + for m in range(Nreg): + Npts = bp[m + 1] - bp[m] + A = ones((Npts, 2), dtype) + A[:, 0] = cast[dtype](arange(1, Npts + 1) * 1.0 / Npts) + sl = slice(bp[m], bp[m + 1]) + coef, resids, rank, s = linalg.lstsq(A, newdata[sl]) + newdata[sl] = newdata[sl] - dot(A, coef) + # Put data back in original shape. + tdshape = take(dshape, newdims, 0) + ret = reshape(newdata, tuple(tdshape)) + vals = list(range(1, rnk)) + olddims = vals[:axis] + [0] + vals[axis:] + ret = transpose(ret, tuple(olddims)) + return ret + + +def lfilter_zi(b, a): + """ + Construct initial conditions for lfilter for step response steady-state. + + Compute an initial state `zi` for the `lfilter` function that corresponds + to the steady state of the step response. + + A typical use of this function is to set the initial state so that the + output of the filter starts at the same value as the first element of + the signal to be filtered. + + Parameters + ---------- + b, a : array_like (1-D) + The IIR filter coefficients. See `lfilter` for more + information. + + Returns + ------- + zi : 1-D ndarray + The initial state for the filter. + + See Also + -------- + lfilter, lfiltic, filtfilt + + Notes + ----- + A linear filter with order m has a state space representation (A, B, C, D), + for which the output y of the filter can be expressed as:: + + z(n+1) = A*z(n) + B*x(n) + y(n) = C*z(n) + D*x(n) + + where z(n) is a vector of length m, A has shape (m, m), B has shape + (m, 1), C has shape (1, m) and D has shape (1, 1) (assuming x(n) is + a scalar). lfilter_zi solves:: + + zi = A*zi + B + + In other words, it finds the initial condition for which the response + to an input of all ones is a constant. + + Given the filter coefficients `a` and `b`, the state space matrices + for the transposed direct form II implementation of the linear filter, + which is the implementation used by scipy.signal.lfilter, are:: + + A = scipy.linalg.companion(a).T + B = b[1:] - a[1:]*b[0] + + assuming `a[0]` is 1.0; if `a[0]` is not 1, `a` and `b` are first + divided by a[0]. + + Examples + -------- + The following code creates a lowpass Butterworth filter. Then it + applies that filter to an array whose values are all 1.0; the + output is also all 1.0, as expected for a lowpass filter. If the + `zi` argument of `lfilter` had not been given, the output would have + shown the transient signal. + + >>> from numpy import array, ones + >>> from scipy.signal import lfilter, lfilter_zi, butter + >>> b, a = butter(5, 0.25) + >>> zi = lfilter_zi(b, a) + >>> y, zo = lfilter(b, a, ones(10), zi=zi) + >>> y + array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) + + Another example: + + >>> x = array([0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0]) + >>> y, zf = lfilter(b, a, x, zi=zi*x[0]) + >>> y + array([ 0.5 , 0.5 , 0.5 , 0.49836039, 0.48610528, + 0.44399389, 0.35505241]) + + Note that the `zi` argument to `lfilter` was computed using + `lfilter_zi` and scaled by `x[0]`. Then the output `y` has no + transient until the input drops from 0.5 to 0.0. + + """ + + # FIXME: Can this function be replaced with an appropriate + # use of lfiltic? For example, when b,a = butter(N,Wn), + # lfiltic(b, a, y=numpy.ones_like(a), x=numpy.ones_like(b)). + # + + # We could use scipy.signal.normalize, but it uses warnings in + # cases where a ValueError is more appropriate, and it allows + # b to be 2D. + b = np.atleast_1d(b) + if b.ndim != 1: + raise ValueError("Numerator b must be 1-D.") + a = np.atleast_1d(a) + if a.ndim != 1: + raise ValueError("Denominator a must be 1-D.") + + while len(a) > 1 and a[0] == 0.0: + a = a[1:] + if a.size < 1: + raise ValueError("There must be at least one nonzero `a` coefficient.") + + if a[0] != 1.0: + # Normalize the coefficients so a[0] == 1. + b = b / a[0] + a = a / a[0] + + n = max(len(a), len(b)) + + # Pad a or b with zeros so they are the same length. + if len(a) < n: + a = np.r_[a, np.zeros(n - len(a))] + elif len(b) < n: + b = np.r_[b, np.zeros(n - len(b))] + + IminusA = np.eye(n - 1) - linalg.companion(a).T + B = b[1:] - a[1:] * b[0] + # Solve zi = A*zi + B + zi = np.linalg.solve(IminusA, B) + + # For future reference: we could also use the following + # explicit formulas to solve the linear system: + # + # zi = np.zeros(n - 1) + # zi[0] = B.sum() / IminusA[:,0].sum() + # asum = 1.0 + # csum = 0.0 + # for k in range(1,n-1): + # asum += a[k] + # csum += b[k] - a[k]*b[0] + # zi[k] = asum*zi[0] - csum + + return zi + + +def sosfilt_zi(sos): + """ + Construct initial conditions for sosfilt for step response steady-state. + + Compute an initial state `zi` for the `sosfilt` function that corresponds + to the steady state of the step response. + + A typical use of this function is to set the initial state so that the + output of the filter starts at the same value as the first element of + the signal to be filtered. + + Parameters + ---------- + sos : array_like + Array of second-order filter coefficients, must have shape + ``(n_sections, 6)``. See `sosfilt` for the SOS filter format + specification. + + Returns + ------- + zi : ndarray + Initial conditions suitable for use with ``sosfilt``, shape + ``(n_sections, 2)``. + + See Also + -------- + sosfilt, zpk2sos + + Notes + ----- + .. versionadded:: 0.16.0 + + Examples + -------- + Filter a rectangular pulse that begins at time 0, with and without + the use of the `zi` argument of `scipy.signal.sosfilt`. + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> sos = signal.butter(9, 0.125, output='sos') + >>> zi = signal.sosfilt_zi(sos) + >>> x = (np.arange(250) < 100).astype(int) + >>> f1 = signal.sosfilt(sos, x) + >>> f2, zo = signal.sosfilt(sos, x, zi=zi) + + >>> plt.plot(x, 'k--', label='x') + >>> plt.plot(f1, 'b', alpha=0.5, linewidth=2, label='filtered') + >>> plt.plot(f2, 'g', alpha=0.25, linewidth=4, label='filtered with zi') + >>> plt.legend(loc='best') + >>> plt.show() + + """ + sos = np.asarray(sos) + if sos.ndim != 2 or sos.shape[1] != 6: + raise ValueError('sos must be shape (n_sections, 6)') + + n_sections = sos.shape[0] + zi = np.empty((n_sections, 2)) + scale = 1.0 + for section in range(n_sections): + b = sos[section, :3] + a = sos[section, 3:] + zi[section] = scale * lfilter_zi(b, a) + # If H(z) = B(z)/A(z) is this section's transfer function, then + # b.sum()/a.sum() is H(1), the gain at omega=0. That's the steady + # state value of this section's step response. + scale *= b.sum() / a.sum() + + return zi + + +def _filtfilt_gust(b, a, x, axis=-1, irlen=None): + """Forward-backward IIR filter that uses Gustafsson's method. + + Apply the IIR filter defined by `(b,a)` to `x` twice, first forward + then backward, using Gustafsson's initial conditions [1]_. + + Let ``y_fb`` be the result of filtering first forward and then backward, + and let ``y_bf`` be the result of filtering first backward then forward. + Gustafsson's method is to compute initial conditions for the forward + pass and the backward pass such that ``y_fb == y_bf``. + + Parameters + ---------- + b : scalar or 1-D ndarray + Numerator coefficients of the filter. + a : scalar or 1-D ndarray + Denominator coefficients of the filter. + x : ndarray + Data to be filtered. + axis : int, optional + Axis of `x` to be filtered. Default is -1. + irlen : int or None, optional + The length of the nonnegligible part of the impulse response. + If `irlen` is None, or if the length of the signal is less than + ``2 * irlen``, then no part of the impulse response is ignored. + + Returns + ------- + y : ndarray + The filtered data. + x0 : ndarray + Initial condition for the forward filter. + x1 : ndarray + Initial condition for the backward filter. + + Notes + ----- + Typically the return values `x0` and `x1` are not needed by the + caller. The intended use of these return values is in unit tests. + + References + ---------- + .. [1] F. Gustaffson. Determining the initial states in forward-backward + filtering. Transactions on Signal Processing, 46(4):988-992, 1996. + + """ + # In the comments, "Gustafsson's paper" and [1] refer to the + # paper referenced in the docstring. + + b = np.atleast_1d(b) + a = np.atleast_1d(a) + + order = max(len(b), len(a)) - 1 + if order == 0: + # The filter is just scalar multiplication, with no state. + scale = (b[0] / a[0])**2 + y = scale * x + return y, np.array([]), np.array([]) + + if axis != -1 or axis != x.ndim - 1: + # Move the axis containing the data to the end. + x = np.swapaxes(x, axis, x.ndim - 1) + + # n is the number of samples in the data to be filtered. + n = x.shape[-1] + + if irlen is None or n <= 2*irlen: + m = n + else: + m = irlen + + # Create Obs, the observability matrix (called O in the paper). + # This matrix can be interpreted as the operator that propagates + # an arbitrary initial state to the output, assuming the input is + # zero. + # In Gustafsson's paper, the forward and backward filters are not + # necessarily the same, so he has both O_f and O_b. We use the same + # filter in both directions, so we only need O. The same comment + # applies to S below. + Obs = np.zeros((m, order)) + zi = np.zeros(order) + zi[0] = 1 + Obs[:, 0] = lfilter(b, a, np.zeros(m), zi=zi)[0] + for k in range(1, order): + Obs[k:, k] = Obs[:-k, 0] + + # Obsr is O^R (Gustafsson's notation for row-reversed O) + Obsr = Obs[::-1] + + # Create S. S is the matrix that applies the filter to the reversed + # propagated initial conditions. That is, + # out = S.dot(zi) + # is the same as + # tmp, _ = lfilter(b, a, zeros(), zi=zi) # Propagate ICs. + # out = lfilter(b, a, tmp[::-1]) # Reverse and filter. + + # Equations (5) & (6) of [1] + S = lfilter(b, a, Obs[::-1], axis=0) + + # Sr is S^R (row-reversed S) + Sr = S[::-1] + + # M is [(S^R - O), (O^R - S)] + if m == n: + M = np.hstack((Sr - Obs, Obsr - S)) + else: + # Matrix described in section IV of [1]. + M = np.zeros((2*m, 2*order)) + M[:m, :order] = Sr - Obs + M[m:, order:] = Obsr - S + + # Naive forward-backward and backward-forward filters. + # These have large transients because the filters use zero initial + # conditions. + y_f = lfilter(b, a, x) + y_fb = lfilter(b, a, y_f[..., ::-1])[..., ::-1] + + y_b = lfilter(b, a, x[..., ::-1])[..., ::-1] + y_bf = lfilter(b, a, y_b) + + delta_y_bf_fb = y_bf - y_fb + if m == n: + delta = delta_y_bf_fb + else: + start_m = delta_y_bf_fb[..., :m] + end_m = delta_y_bf_fb[..., -m:] + delta = np.concatenate((start_m, end_m), axis=-1) + + # ic_opt holds the "optimal" initial conditions. + # The following code computes the result shown in the formula + # of the paper between equations (6) and (7). + if delta.ndim == 1: + ic_opt = linalg.lstsq(M, delta)[0] + else: + # Reshape delta so it can be used as an array of multiple + # right-hand-sides in linalg.lstsq. + delta2d = delta.reshape(-1, delta.shape[-1]).T + ic_opt0 = linalg.lstsq(M, delta2d)[0].T + ic_opt = ic_opt0.reshape(delta.shape[:-1] + (M.shape[-1],)) + + # Now compute the filtered signal using equation (7) of [1]. + # First, form [S^R, O^R] and call it W. + if m == n: + W = np.hstack((Sr, Obsr)) + else: + W = np.zeros((2*m, 2*order)) + W[:m, :order] = Sr + W[m:, order:] = Obsr + + # Equation (7) of [1] says + # Y_fb^opt = Y_fb^0 + W * [x_0^opt; x_{N-1}^opt] + # `wic` is (almost) the product on the right. + # W has shape (m, 2*order), and ic_opt has shape (..., 2*order), + # so we can't use W.dot(ic_opt). Instead, we dot ic_opt with W.T, + # so wic has shape (..., m). + wic = ic_opt.dot(W.T) + + # `wic` is "almost" the product of W and the optimal ICs in equation + # (7)--if we're using a truncated impulse response (m < n), `wic` + # contains only the adjustments required for the ends of the signal. + # Here we form y_opt, taking this into account if necessary. + y_opt = y_fb + if m == n: + y_opt += wic + else: + y_opt[..., :m] += wic[..., :m] + y_opt[..., -m:] += wic[..., -m:] + + x0 = ic_opt[..., :order] + x1 = ic_opt[..., -order:] + if axis != -1 or axis != x.ndim - 1: + # Restore the data axis to its original position. + x0 = np.swapaxes(x0, axis, x.ndim - 1) + x1 = np.swapaxes(x1, axis, x.ndim - 1) + y_opt = np.swapaxes(y_opt, axis, x.ndim - 1) + + return y_opt, x0, x1 + + +def filtfilt(b, a, x, axis=-1, padtype='odd', padlen=None, method='pad', + irlen=None): + """ + Apply a digital filter forward and backward to a signal. + + This function applies a linear digital filter twice, once forward and + once backwards. The combined filter has zero phase and a filter order + twice that of the original. + + The function provides options for handling the edges of the signal. + + Parameters + ---------- + b : (N,) array_like + The numerator coefficient vector of the filter. + a : (N,) array_like + The denominator coefficient vector of the filter. If ``a[0]`` + is not 1, then both `a` and `b` are normalized by ``a[0]``. + x : array_like + The array of data to be filtered. + axis : int, optional + The axis of `x` to which the filter is applied. + Default is -1. + padtype : str or None, optional + Must be 'odd', 'even', 'constant', or None. This determines the + type of extension to use for the padded signal to which the filter + is applied. If `padtype` is None, no padding is used. The default + is 'odd'. + padlen : int or None, optional + The number of elements by which to extend `x` at both ends of + `axis` before applying the filter. This value must be less than + ``x.shape[axis] - 1``. ``padlen=0`` implies no padding. + The default value is ``3 * max(len(a), len(b))``. + method : str, optional + Determines the method for handling the edges of the signal, either + "pad" or "gust". When `method` is "pad", the signal is padded; the + type of padding is determined by `padtype` and `padlen`, and `irlen` + is ignored. When `method` is "gust", Gustafsson's method is used, + and `padtype` and `padlen` are ignored. + irlen : int or None, optional + When `method` is "gust", `irlen` specifies the length of the + impulse response of the filter. If `irlen` is None, no part + of the impulse response is ignored. For a long signal, specifying + `irlen` can significantly improve the performance of the filter. + + Returns + ------- + y : ndarray + The filtered output with the same shape as `x`. + + See Also + -------- + sosfiltfilt, lfilter_zi, lfilter, lfiltic, savgol_filter, sosfilt + + Notes + ----- + When `method` is "pad", the function pads the data along the given axis + in one of three ways: odd, even or constant. The odd and even extensions + have the corresponding symmetry about the end point of the data. The + constant extension extends the data with the values at the end points. On + both the forward and backward passes, the initial condition of the + filter is found by using `lfilter_zi` and scaling it by the end point of + the extended data. + + When `method` is "gust", Gustafsson's method [1]_ is used. Initial + conditions are chosen for the forward and backward passes so that the + forward-backward filter gives the same result as the backward-forward + filter. + + The option to use Gustaffson's method was added in scipy version 0.16.0. + + References + ---------- + .. [1] F. Gustaffson, "Determining the initial states in forward-backward + filtering", Transactions on Signal Processing, Vol. 46, pp. 988-992, + 1996. + + Examples + -------- + The examples will use several functions from `scipy.signal`. + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + First we create a one second signal that is the sum of two pure sine + waves, with frequencies 5 Hz and 250 Hz, sampled at 2000 Hz. + + >>> t = np.linspace(0, 1.0, 2001) + >>> xlow = np.sin(2 * np.pi * 5 * t) + >>> xhigh = np.sin(2 * np.pi * 250 * t) + >>> x = xlow + xhigh + + Now create a lowpass Butterworth filter with a cutoff of 0.125 times + the Nyquist frequency, or 125 Hz, and apply it to ``x`` with `filtfilt`. + The result should be approximately ``xlow``, with no phase shift. + + >>> b, a = signal.butter(8, 0.125) + >>> y = signal.filtfilt(b, a, x, padlen=150) + >>> np.abs(y - xlow).max() + 9.1086182074789912e-06 + + We get a fairly clean result for this artificial example because + the odd extension is exact, and with the moderately long padding, + the filter's transients have dissipated by the time the actual data + is reached. In general, transient effects at the edges are + unavoidable. + + The following example demonstrates the option ``method="gust"``. + + First, create a filter. + + >>> b, a = signal.ellip(4, 0.01, 120, 0.125) # Filter to be applied. + >>> np.random.seed(123456) + + `sig` is a random input signal to be filtered. + + >>> n = 60 + >>> sig = np.random.randn(n)**3 + 3*np.random.randn(n).cumsum() + + Apply `filtfilt` to `sig`, once using the Gustafsson method, and + once using padding, and plot the results for comparison. + + >>> fgust = signal.filtfilt(b, a, sig, method="gust") + >>> fpad = signal.filtfilt(b, a, sig, padlen=50) + >>> plt.plot(sig, 'k-', label='input') + >>> plt.plot(fgust, 'b-', linewidth=4, label='gust') + >>> plt.plot(fpad, 'c-', linewidth=1.5, label='pad') + >>> plt.legend(loc='best') + >>> plt.show() + + The `irlen` argument can be used to improve the performance + of Gustafsson's method. + + Estimate the impulse response length of the filter. + + >>> z, p, k = signal.tf2zpk(b, a) + >>> eps = 1e-9 + >>> r = np.max(np.abs(p)) + >>> approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r))) + >>> approx_impulse_len + 137 + + Apply the filter to a longer signal, with and without the `irlen` + argument. The difference between `y1` and `y2` is small. For long + signals, using `irlen` gives a significant performance improvement. + + >>> x = np.random.randn(5000) + >>> y1 = signal.filtfilt(b, a, x, method='gust') + >>> y2 = signal.filtfilt(b, a, x, method='gust', irlen=approx_impulse_len) + >>> print(np.max(np.abs(y1 - y2))) + 1.80056858312e-10 + + """ + b = np.atleast_1d(b) + a = np.atleast_1d(a) + x = np.asarray(x) + + if method not in ["pad", "gust"]: + raise ValueError("method must be 'pad' or 'gust'.") + + if method == "gust": + y, z1, z2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen) + return y + + # method == "pad" + edge, ext = _validate_pad(padtype, padlen, x, axis, + ntaps=max(len(a), len(b))) + + # Get the steady state of the filter's step response. + zi = lfilter_zi(b, a) + + # Reshape zi and create x0 so that zi*x0 broadcasts + # to the correct value for the 'zi' keyword argument + # to lfilter. + zi_shape = [1] * x.ndim + zi_shape[axis] = zi.size + zi = np.reshape(zi, zi_shape) + x0 = axis_slice(ext, stop=1, axis=axis) + + # Forward filter. + (y, zf) = lfilter(b, a, ext, axis=axis, zi=zi * x0) + + # Backward filter. + # Create y0 so zi*y0 broadcasts appropriately. + y0 = axis_slice(y, start=-1, axis=axis) + (y, zf) = lfilter(b, a, axis_reverse(y, axis=axis), axis=axis, zi=zi * y0) + + # Reverse y. + y = axis_reverse(y, axis=axis) + + if edge > 0: + # Slice the actual signal from the extended signal. + y = axis_slice(y, start=edge, stop=-edge, axis=axis) + + return y + + +def _validate_pad(padtype, padlen, x, axis, ntaps): + """Helper to validate padding for filtfilt""" + if padtype not in ['even', 'odd', 'constant', None]: + raise ValueError(("Unknown value '%s' given to padtype. padtype " + "must be 'even', 'odd', 'constant', or None.") % + padtype) + + if padtype is None: + padlen = 0 + + if padlen is None: + # Original padding; preserved for backwards compatibility. + edge = ntaps * 3 + else: + edge = padlen + + # x's 'axis' dimension must be bigger than edge. + if x.shape[axis] <= edge: + raise ValueError("The length of the input vector x must be at least " + "padlen, which is %d." % edge) + + if padtype is not None and edge > 0: + # Make an extension of length `edge` at each + # end of the input array. + if padtype == 'even': + ext = even_ext(x, edge, axis=axis) + elif padtype == 'odd': + ext = odd_ext(x, edge, axis=axis) + else: + ext = const_ext(x, edge, axis=axis) + else: + ext = x + return edge, ext + + +def sosfilt(sos, x, axis=-1, zi=None): + """ + Filter data along one dimension using cascaded second-order sections. + + Filter a data sequence, `x`, using a digital IIR filter defined by + `sos`. This is implemented by performing `lfilter` for each + second-order section. See `lfilter` for details. + + Parameters + ---------- + sos : array_like + Array of second-order filter coefficients, must have shape + ``(n_sections, 6)``. Each row corresponds to a second-order + section, with the first three columns providing the numerator + coefficients and the last three providing the denominator + coefficients. + x : array_like + An N-dimensional input array. + axis : int, optional + The axis of the input data array along which to apply the + linear filter. The filter is applied to each subarray along + this axis. Default is -1. + zi : array_like, optional + Initial conditions for the cascaded filter delays. It is a (at + least 2D) vector of shape ``(n_sections, ..., 2, ...)``, where + ``..., 2, ...`` denotes the shape of `x`, but with ``x.shape[axis]`` + replaced by 2. If `zi` is None or is not given then initial rest + (i.e. all zeros) is assumed. + Note that these initial conditions are *not* the same as the initial + conditions given by `lfiltic` or `lfilter_zi`. + + Returns + ------- + y : ndarray + The output of the digital filter. + zf : ndarray, optional + If `zi` is None, this is not returned, otherwise, `zf` holds the + final filter delay values. + + See Also + -------- + zpk2sos, sos2zpk, sosfilt_zi, sosfiltfilt, sosfreqz + + Notes + ----- + The filter function is implemented as a series of second-order filters + with direct-form II transposed structure. It is designed to minimize + numerical precision errors for high-order filters. + + .. versionadded:: 0.16.0 + + Examples + -------- + Plot a 13th-order filter's impulse response using both `lfilter` and + `sosfilt`, showing the instability that results from trying to do a + 13th-order filter in a single stage (the numerical error pushes some poles + outside of the unit circle): + + >>> import matplotlib.pyplot as plt + >>> from scipy import signal + >>> b, a = signal.ellip(13, 0.009, 80, 0.05, output='ba') + >>> sos = signal.ellip(13, 0.009, 80, 0.05, output='sos') + >>> x = signal.unit_impulse(700) + >>> y_tf = signal.lfilter(b, a, x) + >>> y_sos = signal.sosfilt(sos, x) + >>> plt.plot(y_tf, 'r', label='TF') + >>> plt.plot(y_sos, 'k', label='SOS') + >>> plt.legend(loc='best') + >>> plt.show() + + """ + x = np.asarray(x) + sos, n_sections = _validate_sos(sos) + use_zi = zi is not None + if use_zi: + zi = np.asarray(zi) + x_zi_shape = list(x.shape) + x_zi_shape[axis] = 2 + x_zi_shape = tuple([n_sections] + x_zi_shape) + if zi.shape != x_zi_shape: + raise ValueError('Invalid zi shape. With axis=%r, an input with ' + 'shape %r, and an sos array with %d sections, zi ' + 'must have shape %r, got %r.' % + (axis, x.shape, n_sections, x_zi_shape, zi.shape)) + zf = zeros_like(zi) + + for section in range(n_sections): + if use_zi: + x, zf[section] = lfilter(sos[section, :3], sos[section, 3:], + x, axis, zi=zi[section]) + else: + x = lfilter(sos[section, :3], sos[section, 3:], x, axis) + out = (x, zf) if use_zi else x + return out + + +def sosfiltfilt(sos, x, axis=-1, padtype='odd', padlen=None): + """ + A forward-backward digital filter using cascaded second-order sections. + + See `filtfilt` for more complete information about this method. + + Parameters + ---------- + sos : array_like + Array of second-order filter coefficients, must have shape + ``(n_sections, 6)``. Each row corresponds to a second-order + section, with the first three columns providing the numerator + coefficients and the last three providing the denominator + coefficients. + x : array_like + The array of data to be filtered. + axis : int, optional + The axis of `x` to which the filter is applied. + Default is -1. + padtype : str or None, optional + Must be 'odd', 'even', 'constant', or None. This determines the + type of extension to use for the padded signal to which the filter + is applied. If `padtype` is None, no padding is used. The default + is 'odd'. + padlen : int or None, optional + The number of elements by which to extend `x` at both ends of + `axis` before applying the filter. This value must be less than + ``x.shape[axis] - 1``. ``padlen=0`` implies no padding. + The default value is:: + + 3 * (2 * len(sos) + 1 - min((sos[:, 2] == 0).sum(), + (sos[:, 5] == 0).sum())) + + The extra subtraction at the end attempts to compensate for poles + and zeros at the origin (e.g. for odd-order filters) to yield + equivalent estimates of `padlen` to those of `filtfilt` for + second-order section filters built with `scipy.signal` functions. + + Returns + ------- + y : ndarray + The filtered output with the same shape as `x`. + + See Also + -------- + filtfilt, sosfilt, sosfilt_zi, sosfreqz + + Notes + ----- + .. versionadded:: 0.18.0 + + Examples + -------- + >>> from scipy.signal import sosfiltfilt, butter + >>> import matplotlib.pyplot as plt + + Create an interesting signal to filter. + + >>> n = 201 + >>> t = np.linspace(0, 1, n) + >>> np.random.seed(123) + >>> x = 1 + (t < 0.5) - 0.25*t**2 + 0.05*np.random.randn(n) + + Create a lowpass Butterworth filter, and use it to filter `x`. + + >>> sos = butter(4, 0.125, output='sos') + >>> y = sosfiltfilt(sos, x) + + For comparison, apply an 8th order filter using `sosfilt`. The filter + is initialized using the mean of the first four values of `x`. + + >>> from scipy.signal import sosfilt, sosfilt_zi + >>> sos8 = butter(8, 0.125, output='sos') + >>> zi = x[:4].mean() * sosfilt_zi(sos8) + >>> y2, zo = sosfilt(sos8, x, zi=zi) + + Plot the results. Note that the phase of `y` matches the input, while + `y2` has a significant phase delay. + + >>> plt.plot(t, x, alpha=0.5, label='x(t)') + >>> plt.plot(t, y, label='y(t)') + >>> plt.plot(t, y2, label='y2(t)') + >>> plt.legend(framealpha=1, shadow=True) + >>> plt.grid(alpha=0.25) + >>> plt.xlabel('t') + >>> plt.show() + + """ + sos, n_sections = _validate_sos(sos) + + # `method` is "pad"... + ntaps = 2 * n_sections + 1 + ntaps -= min((sos[:, 2] == 0).sum(), (sos[:, 5] == 0).sum()) + edge, ext = _validate_pad(padtype, padlen, x, axis, + ntaps=ntaps) + + # These steps follow the same form as filtfilt with modifications + zi = sosfilt_zi(sos) # shape (n_sections, 2) --> (n_sections, ..., 2, ...) + zi_shape = [1] * x.ndim + zi_shape[axis] = 2 + zi.shape = [n_sections] + zi_shape + x_0 = axis_slice(ext, stop=1, axis=axis) + (y, zf) = sosfilt(sos, ext, axis=axis, zi=zi * x_0) + y_0 = axis_slice(y, start=-1, axis=axis) + (y, zf) = sosfilt(sos, axis_reverse(y, axis=axis), axis=axis, zi=zi * y_0) + y = axis_reverse(y, axis=axis) + if edge > 0: + y = axis_slice(y, start=edge, stop=-edge, axis=axis) + return y + + +def decimate(x, q, n=None, ftype='iir', axis=-1, zero_phase=True): + """ + Downsample the signal after applying an anti-aliasing filter. + + By default, an order 8 Chebyshev type I filter is used. A 30 point FIR + filter with Hamming window is used if `ftype` is 'fir'. + + Parameters + ---------- + x : array_like + The signal to be downsampled, as an N-dimensional array. + q : int + The downsampling factor. When using IIR downsampling, it is recommended + to call `decimate` multiple times for downsampling factors higher than + 13. + n : int, optional + The order of the filter (1 less than the length for 'fir'). Defaults to + 8 for 'iir' and 20 times the downsampling factor for 'fir'. + ftype : str {'iir', 'fir'} or ``dlti`` instance, optional + If 'iir' or 'fir', specifies the type of lowpass filter. If an instance + of an `dlti` object, uses that object to filter before downsampling. + axis : int, optional + The axis along which to decimate. + zero_phase : bool, optional + Prevent phase shift by filtering with `filtfilt` instead of `lfilter` + when using an IIR filter, and shifting the outputs back by the filter's + group delay when using an FIR filter. The default value of ``True`` is + recommended, since a phase shift is generally not desired. + + .. versionadded:: 0.18.0 + + Returns + ------- + y : ndarray + The down-sampled signal. + + See Also + -------- + resample : Resample up or down using the FFT method. + resample_poly : Resample using polyphase filtering and an FIR filter. + + Notes + ----- + The ``zero_phase`` keyword was added in 0.18.0. + The possibility to use instances of ``dlti`` as ``ftype`` was added in + 0.18.0. + """ + + x = asarray(x) + q = operator.index(q) + + if n is not None: + n = operator.index(n) + + if ftype == 'fir': + if n is None: + half_len = 10 * q # reasonable cutoff for our sinc-like function + n = 2 * half_len + b, a = firwin(n+1, 1. / q, window='hamming'), 1. + elif ftype == 'iir': + if n is None: + n = 8 + system = dlti(*cheby1(n, 0.05, 0.8 / q)) + b, a = system.num, system.den + elif isinstance(ftype, dlti): + system = ftype._as_tf() # Avoids copying if already in TF form + b, a = system.num, system.den + else: + raise ValueError('invalid ftype') + + sl = [slice(None)] * x.ndim + a = np.asarray(a) + + if a.size == 1: # FIR case + b = b / a + if zero_phase: + y = resample_poly(x, 1, q, axis=axis, window=b) + else: + # upfirdn is generally faster than lfilter by a factor equal to the + # downsampling factor, since it only calculates the needed outputs + n_out = x.shape[axis] // q + bool(x.shape[axis] % q) + y = upfirdn(b, x, up=1, down=q, axis=axis) + sl[axis] = slice(None, n_out, None) + + else: # IIR case + if zero_phase: + y = filtfilt(b, a, x, axis=axis) + else: + y = lfilter(b, a, x, axis=axis) + sl[axis] = slice(None, None, q) + + return y[tuple(sl)] diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/signaltools.pyc b/project/venv/lib/python2.7/site-packages/scipy/signal/signaltools.pyc new file mode 100644 index 0000000..1eaccbe Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/signal/signaltools.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/sigtools.so b/project/venv/lib/python2.7/site-packages/scipy/signal/sigtools.so new file mode 100755 index 0000000..ac220e8 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/signal/sigtools.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/spectral.py b/project/venv/lib/python2.7/site-packages/scipy/signal/spectral.py new file mode 100644 index 0000000..3d31f13 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/signal/spectral.py @@ -0,0 +1,2006 @@ +"""Tools for spectral analysis. +""" + +from __future__ import division, print_function, absolute_import + +import numpy as np +from scipy import fftpack +from . import signaltools +from .windows import get_window +from ._spectral import _lombscargle +from ._arraytools import const_ext, even_ext, odd_ext, zero_ext +import warnings + +from scipy._lib.six import string_types + +__all__ = ['periodogram', 'welch', 'lombscargle', 'csd', 'coherence', + 'spectrogram', 'stft', 'istft', 'check_COLA', 'check_NOLA'] + + +def lombscargle(x, + y, + freqs, + precenter=False, + normalize=False): + """ + lombscargle(x, y, freqs) + + Computes the Lomb-Scargle periodogram. + + The Lomb-Scargle periodogram was developed by Lomb [1]_ and further + extended by Scargle [2]_ to find, and test the significance of weak + periodic signals with uneven temporal sampling. + + When *normalize* is False (default) the computed periodogram + is unnormalized, it takes the value ``(A**2) * N/4`` for a harmonic + signal with amplitude A for sufficiently large N. + + When *normalize* is True the computed periodogram is normalized by + the residuals of the data around a constant reference model (at zero). + + Input arrays should be one-dimensional and will be cast to float64. + + Parameters + ---------- + x : array_like + Sample times. + y : array_like + Measurement values. + freqs : array_like + Angular frequencies for output periodogram. + precenter : bool, optional + Pre-center amplitudes by subtracting the mean. + normalize : bool, optional + Compute normalized periodogram. + + Returns + ------- + pgram : array_like + Lomb-Scargle periodogram. + + Raises + ------ + ValueError + If the input arrays `x` and `y` do not have the same shape. + + Notes + ----- + This subroutine calculates the periodogram using a slightly + modified algorithm due to Townsend [3]_ which allows the + periodogram to be calculated using only a single pass through + the input arrays for each frequency. + + The algorithm running time scales roughly as O(x * freqs) or O(N^2) + for a large number of samples and frequencies. + + References + ---------- + .. [1] N.R. Lomb "Least-squares frequency analysis of unequally spaced + data", Astrophysics and Space Science, vol 39, pp. 447-462, 1976 + + .. [2] J.D. Scargle "Studies in astronomical time series analysis. II - + Statistical aspects of spectral analysis of unevenly spaced data", + The Astrophysical Journal, vol 263, pp. 835-853, 1982 + + .. [3] R.H.D. Townsend, "Fast calculation of the Lomb-Scargle + periodogram using graphics processing units.", The Astrophysical + Journal Supplement Series, vol 191, pp. 247-253, 2010 + + See Also + -------- + istft: Inverse Short Time Fourier Transform + check_COLA: Check whether the Constant OverLap Add (COLA) constraint is met + welch: Power spectral density by Welch's method + spectrogram: Spectrogram by Welch's method + csd: Cross spectral density by Welch's method + + Examples + -------- + >>> import matplotlib.pyplot as plt + + First define some input parameters for the signal: + + >>> A = 2. + >>> w = 1. + >>> phi = 0.5 * np.pi + >>> nin = 1000 + >>> nout = 100000 + >>> frac_points = 0.9 # Fraction of points to select + + Randomly select a fraction of an array with timesteps: + + >>> r = np.random.rand(nin) + >>> x = np.linspace(0.01, 10*np.pi, nin) + >>> x = x[r >= frac_points] + + Plot a sine wave for the selected times: + + >>> y = A * np.sin(w*x+phi) + + Define the array of frequencies for which to compute the periodogram: + + >>> f = np.linspace(0.01, 10, nout) + + Calculate Lomb-Scargle periodogram: + + >>> import scipy.signal as signal + >>> pgram = signal.lombscargle(x, y, f, normalize=True) + + Now make a plot of the input data: + + >>> plt.subplot(2, 1, 1) + >>> plt.plot(x, y, 'b+') + + Then plot the normalized periodogram: + + >>> plt.subplot(2, 1, 2) + >>> plt.plot(f, pgram) + >>> plt.show() + + """ + + x = np.asarray(x, dtype=np.float64) + y = np.asarray(y, dtype=np.float64) + freqs = np.asarray(freqs, dtype=np.float64) + + assert x.ndim == 1 + assert y.ndim == 1 + assert freqs.ndim == 1 + + if precenter: + pgram = _lombscargle(x, y - y.mean(), freqs) + else: + pgram = _lombscargle(x, y, freqs) + + if normalize: + pgram *= 2 / np.dot(y, y) + + return pgram + + +def periodogram(x, fs=1.0, window='boxcar', nfft=None, detrend='constant', + return_onesided=True, scaling='density', axis=-1): + """ + Estimate power spectral density using a periodogram. + + Parameters + ---------- + x : array_like + Time series of measurement values + fs : float, optional + Sampling frequency of the `x` time series. Defaults to 1.0. + window : str or tuple or array_like, optional + Desired window to use. If `window` is a string or tuple, it is + passed to `get_window` to generate the window values, which are + DFT-even by default. See `get_window` for a list of windows and + required parameters. If `window` is array_like it will be used + directly as the window and its length must be nperseg. Defaults + to 'boxcar'. + nfft : int, optional + Length of the FFT used. If `None` the length of `x` will be + used. + detrend : str or function or `False`, optional + Specifies how to detrend each segment. If `detrend` is a + string, it is passed as the `type` argument to the `detrend` + function. If it is a function, it takes a segment and returns a + detrended segment. If `detrend` is `False`, no detrending is + done. Defaults to 'constant'. + return_onesided : bool, optional + If `True`, return a one-sided spectrum for real data. If + `False` return a two-sided spectrum. Note that for complex + data, a two-sided spectrum is always returned. + scaling : { 'density', 'spectrum' }, optional + Selects between computing the power spectral density ('density') + where `Pxx` has units of V**2/Hz and computing the power + spectrum ('spectrum') where `Pxx` has units of V**2, if `x` + is measured in V and `fs` is measured in Hz. Defaults to + 'density' + axis : int, optional + Axis along which the periodogram is computed; the default is + over the last axis (i.e. ``axis=-1``). + + Returns + ------- + f : ndarray + Array of sample frequencies. + Pxx : ndarray + Power spectral density or power spectrum of `x`. + + Notes + ----- + .. versionadded:: 0.12.0 + + See Also + -------- + welch: Estimate power spectral density using Welch's method + lombscargle: Lomb-Scargle periodogram for unevenly sampled data + + Examples + -------- + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> np.random.seed(1234) + + Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by + 0.001 V**2/Hz of white noise sampled at 10 kHz. + + >>> fs = 10e3 + >>> N = 1e5 + >>> amp = 2*np.sqrt(2) + >>> freq = 1234.0 + >>> noise_power = 0.001 * fs / 2 + >>> time = np.arange(N) / fs + >>> x = amp*np.sin(2*np.pi*freq*time) + >>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape) + + Compute and plot the power spectral density. + + >>> f, Pxx_den = signal.periodogram(x, fs) + >>> plt.semilogy(f, Pxx_den) + >>> plt.ylim([1e-7, 1e2]) + >>> plt.xlabel('frequency [Hz]') + >>> plt.ylabel('PSD [V**2/Hz]') + >>> plt.show() + + If we average the last half of the spectral density, to exclude the + peak, we can recover the noise power on the signal. + + >>> np.mean(Pxx_den[25000:]) + 0.00099728892368242854 + + Now compute and plot the power spectrum. + + >>> f, Pxx_spec = signal.periodogram(x, fs, 'flattop', scaling='spectrum') + >>> plt.figure() + >>> plt.semilogy(f, np.sqrt(Pxx_spec)) + >>> plt.ylim([1e-4, 1e1]) + >>> plt.xlabel('frequency [Hz]') + >>> plt.ylabel('Linear spectrum [V RMS]') + >>> plt.show() + + The peak height in the power spectrum is an estimate of the RMS + amplitude. + + >>> np.sqrt(Pxx_spec.max()) + 2.0077340678640727 + + """ + x = np.asarray(x) + + if x.size == 0: + return np.empty(x.shape), np.empty(x.shape) + + if window is None: + window = 'boxcar' + + if nfft is None: + nperseg = x.shape[axis] + elif nfft == x.shape[axis]: + nperseg = nfft + elif nfft > x.shape[axis]: + nperseg = x.shape[axis] + elif nfft < x.shape[axis]: + s = [np.s_[:]]*len(x.shape) + s[axis] = np.s_[:nfft] + x = x[tuple(s)] + nperseg = nfft + nfft = None + + return welch(x, fs=fs, window=window, nperseg=nperseg, noverlap=0, + nfft=nfft, detrend=detrend, return_onesided=return_onesided, + scaling=scaling, axis=axis) + + +def welch(x, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None, + detrend='constant', return_onesided=True, scaling='density', + axis=-1, average='mean'): + r""" + Estimate power spectral density using Welch's method. + + Welch's method [1]_ computes an estimate of the power spectral + density by dividing the data into overlapping segments, computing a + modified periodogram for each segment and averaging the + periodograms. + + Parameters + ---------- + x : array_like + Time series of measurement values + fs : float, optional + Sampling frequency of the `x` time series. Defaults to 1.0. + window : str or tuple or array_like, optional + Desired window to use. If `window` is a string or tuple, it is + passed to `get_window` to generate the window values, which are + DFT-even by default. See `get_window` for a list of windows and + required parameters. If `window` is array_like it will be used + directly as the window and its length must be nperseg. Defaults + to a Hann window. + nperseg : int, optional + Length of each segment. Defaults to None, but if window is str or + tuple, is set to 256, and if window is array_like, is set to the + length of the window. + noverlap : int, optional + Number of points to overlap between segments. If `None`, + ``noverlap = nperseg // 2``. Defaults to `None`. + nfft : int, optional + Length of the FFT used, if a zero padded FFT is desired. If + `None`, the FFT length is `nperseg`. Defaults to `None`. + detrend : str or function or `False`, optional + Specifies how to detrend each segment. If `detrend` is a + string, it is passed as the `type` argument to the `detrend` + function. If it is a function, it takes a segment and returns a + detrended segment. If `detrend` is `False`, no detrending is + done. Defaults to 'constant'. + return_onesided : bool, optional + If `True`, return a one-sided spectrum for real data. If + `False` return a two-sided spectrum. Note that for complex + data, a two-sided spectrum is always returned. + scaling : { 'density', 'spectrum' }, optional + Selects between computing the power spectral density ('density') + where `Pxx` has units of V**2/Hz and computing the power + spectrum ('spectrum') where `Pxx` has units of V**2, if `x` + is measured in V and `fs` is measured in Hz. Defaults to + 'density' + axis : int, optional + Axis along which the periodogram is computed; the default is + over the last axis (i.e. ``axis=-1``). + average : { 'mean', 'median' }, optional + Method to use when averaging periodograms. Defaults to 'mean'. + + .. versionadded:: 1.2.0 + + Returns + ------- + f : ndarray + Array of sample frequencies. + Pxx : ndarray + Power spectral density or power spectrum of x. + + See Also + -------- + periodogram: Simple, optionally modified periodogram + lombscargle: Lomb-Scargle periodogram for unevenly sampled data + + Notes + ----- + An appropriate amount of overlap will depend on the choice of window + and on your requirements. For the default Hann window an overlap of + 50% is a reasonable trade off between accurately estimating the + signal power, while not over counting any of the data. Narrower + windows may require a larger overlap. + + If `noverlap` is 0, this method is equivalent to Bartlett's method + [2]_. + + .. versionadded:: 0.12.0 + + References + ---------- + .. [1] P. Welch, "The use of the fast Fourier transform for the + estimation of power spectra: A method based on time averaging + over short, modified periodograms", IEEE Trans. Audio + Electroacoust. vol. 15, pp. 70-73, 1967. + .. [2] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra", + Biometrika, vol. 37, pp. 1-16, 1950. + + Examples + -------- + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> np.random.seed(1234) + + Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by + 0.001 V**2/Hz of white noise sampled at 10 kHz. + + >>> fs = 10e3 + >>> N = 1e5 + >>> amp = 2*np.sqrt(2) + >>> freq = 1234.0 + >>> noise_power = 0.001 * fs / 2 + >>> time = np.arange(N) / fs + >>> x = amp*np.sin(2*np.pi*freq*time) + >>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape) + + Compute and plot the power spectral density. + + >>> f, Pxx_den = signal.welch(x, fs, nperseg=1024) + >>> plt.semilogy(f, Pxx_den) + >>> plt.ylim([0.5e-3, 1]) + >>> plt.xlabel('frequency [Hz]') + >>> plt.ylabel('PSD [V**2/Hz]') + >>> plt.show() + + If we average the last half of the spectral density, to exclude the + peak, we can recover the noise power on the signal. + + >>> np.mean(Pxx_den[256:]) + 0.0009924865443739191 + + Now compute and plot the power spectrum. + + >>> f, Pxx_spec = signal.welch(x, fs, 'flattop', 1024, scaling='spectrum') + >>> plt.figure() + >>> plt.semilogy(f, np.sqrt(Pxx_spec)) + >>> plt.xlabel('frequency [Hz]') + >>> plt.ylabel('Linear spectrum [V RMS]') + >>> plt.show() + + The peak height in the power spectrum is an estimate of the RMS + amplitude. + + >>> np.sqrt(Pxx_spec.max()) + 2.0077340678640727 + + If we now introduce a discontinuity in the signal, by increasing the + amplitude of a small portion of the signal by 50, we can see the + corruption of the mean average power spectral density, but using a + median average better estimates the normal behaviour. + + >>> x[int(N//2):int(N//2)+10] *= 50. + >>> f, Pxx_den = signal.welch(x, fs, nperseg=1024) + >>> f_med, Pxx_den_med = signal.welch(x, fs, nperseg=1024, average='median') + >>> plt.semilogy(f, Pxx_den, label='mean') + >>> plt.semilogy(f_med, Pxx_den_med, label='median') + >>> plt.ylim([0.5e-3, 1]) + >>> plt.xlabel('frequency [Hz]') + >>> plt.ylabel('PSD [V**2/Hz]') + >>> plt.legend() + >>> plt.show() + + """ + + freqs, Pxx = csd(x, x, fs=fs, window=window, nperseg=nperseg, + noverlap=noverlap, nfft=nfft, detrend=detrend, + return_onesided=return_onesided, scaling=scaling, + axis=axis, average=average) + + return freqs, Pxx.real + + +def csd(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None, + detrend='constant', return_onesided=True, scaling='density', + axis=-1, average='mean'): + r""" + Estimate the cross power spectral density, Pxy, using Welch's + method. + + Parameters + ---------- + x : array_like + Time series of measurement values + y : array_like + Time series of measurement values + fs : float, optional + Sampling frequency of the `x` and `y` time series. Defaults + to 1.0. + window : str or tuple or array_like, optional + Desired window to use. If `window` is a string or tuple, it is + passed to `get_window` to generate the window values, which are + DFT-even by default. See `get_window` for a list of windows and + required parameters. If `window` is array_like it will be used + directly as the window and its length must be nperseg. Defaults + to a Hann window. + nperseg : int, optional + Length of each segment. Defaults to None, but if window is str or + tuple, is set to 256, and if window is array_like, is set to the + length of the window. + noverlap: int, optional + Number of points to overlap between segments. If `None`, + ``noverlap = nperseg // 2``. Defaults to `None`. + nfft : int, optional + Length of the FFT used, if a zero padded FFT is desired. If + `None`, the FFT length is `nperseg`. Defaults to `None`. + detrend : str or function or `False`, optional + Specifies how to detrend each segment. If `detrend` is a + string, it is passed as the `type` argument to the `detrend` + function. If it is a function, it takes a segment and returns a + detrended segment. If `detrend` is `False`, no detrending is + done. Defaults to 'constant'. + return_onesided : bool, optional + If `True`, return a one-sided spectrum for real data. If + `False` return a two-sided spectrum. Note that for complex + data, a two-sided spectrum is always returned. + scaling : { 'density', 'spectrum' }, optional + Selects between computing the cross spectral density ('density') + where `Pxy` has units of V**2/Hz and computing the cross spectrum + ('spectrum') where `Pxy` has units of V**2, if `x` and `y` are + measured in V and `fs` is measured in Hz. Defaults to 'density' + axis : int, optional + Axis along which the CSD is computed for both inputs; the + default is over the last axis (i.e. ``axis=-1``). + average : { 'mean', 'median' }, optional + Method to use when averaging periodograms. Defaults to 'mean'. + + .. versionadded:: 1.2.0 + + Returns + ------- + f : ndarray + Array of sample frequencies. + Pxy : ndarray + Cross spectral density or cross power spectrum of x,y. + + See Also + -------- + periodogram: Simple, optionally modified periodogram + lombscargle: Lomb-Scargle periodogram for unevenly sampled data + welch: Power spectral density by Welch's method. [Equivalent to + csd(x,x)] + coherence: Magnitude squared coherence by Welch's method. + + Notes + -------- + By convention, Pxy is computed with the conjugate FFT of X + multiplied by the FFT of Y. + + If the input series differ in length, the shorter series will be + zero-padded to match. + + An appropriate amount of overlap will depend on the choice of window + and on your requirements. For the default Hann window an overlap of + 50% is a reasonable trade off between accurately estimating the + signal power, while not over counting any of the data. Narrower + windows may require a larger overlap. + + .. versionadded:: 0.16.0 + + References + ---------- + .. [1] P. Welch, "The use of the fast Fourier transform for the + estimation of power spectra: A method based on time averaging + over short, modified periodograms", IEEE Trans. Audio + Electroacoust. vol. 15, pp. 70-73, 1967. + .. [2] Rabiner, Lawrence R., and B. Gold. "Theory and Application of + Digital Signal Processing" Prentice-Hall, pp. 414-419, 1975 + + Examples + -------- + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + Generate two test signals with some common features. + + >>> fs = 10e3 + >>> N = 1e5 + >>> amp = 20 + >>> freq = 1234.0 + >>> noise_power = 0.001 * fs / 2 + >>> time = np.arange(N) / fs + >>> b, a = signal.butter(2, 0.25, 'low') + >>> x = np.random.normal(scale=np.sqrt(noise_power), size=time.shape) + >>> y = signal.lfilter(b, a, x) + >>> x += amp*np.sin(2*np.pi*freq*time) + >>> y += np.random.normal(scale=0.1*np.sqrt(noise_power), size=time.shape) + + Compute and plot the magnitude of the cross spectral density. + + >>> f, Pxy = signal.csd(x, y, fs, nperseg=1024) + >>> plt.semilogy(f, np.abs(Pxy)) + >>> plt.xlabel('frequency [Hz]') + >>> plt.ylabel('CSD [V**2/Hz]') + >>> plt.show() + """ + + freqs, _, Pxy = _spectral_helper(x, y, fs, window, nperseg, noverlap, nfft, + detrend, return_onesided, scaling, axis, + mode='psd') + + # Average over windows. + if len(Pxy.shape) >= 2 and Pxy.size > 0: + if Pxy.shape[-1] > 1: + if average == 'median': + Pxy = np.median(Pxy, axis=-1) / _median_bias(Pxy.shape[-1]) + elif average == 'mean': + Pxy = Pxy.mean(axis=-1) + else: + raise ValueError('average must be "median" or "mean", got %s' + % (average,)) + else: + Pxy = np.reshape(Pxy, Pxy.shape[:-1]) + + return freqs, Pxy + + +def spectrogram(x, fs=1.0, window=('tukey', .25), nperseg=None, noverlap=None, + nfft=None, detrend='constant', return_onesided=True, + scaling='density', axis=-1, mode='psd'): + """ + Compute a spectrogram with consecutive Fourier transforms. + + Spectrograms can be used as a way of visualizing the change of a + nonstationary signal's frequency content over time. + + Parameters + ---------- + x : array_like + Time series of measurement values + fs : float, optional + Sampling frequency of the `x` time series. Defaults to 1.0. + window : str or tuple or array_like, optional + Desired window to use. If `window` is a string or tuple, it is + passed to `get_window` to generate the window values, which are + DFT-even by default. See `get_window` for a list of windows and + required parameters. If `window` is array_like it will be used + directly as the window and its length must be nperseg. + Defaults to a Tukey window with shape parameter of 0.25. + nperseg : int, optional + Length of each segment. Defaults to None, but if window is str or + tuple, is set to 256, and if window is array_like, is set to the + length of the window. + noverlap : int, optional + Number of points to overlap between segments. If `None`, + ``noverlap = nperseg // 8``. Defaults to `None`. + nfft : int, optional + Length of the FFT used, if a zero padded FFT is desired. If + `None`, the FFT length is `nperseg`. Defaults to `None`. + detrend : str or function or `False`, optional + Specifies how to detrend each segment. If `detrend` is a + string, it is passed as the `type` argument to the `detrend` + function. If it is a function, it takes a segment and returns a + detrended segment. If `detrend` is `False`, no detrending is + done. Defaults to 'constant'. + return_onesided : bool, optional + If `True`, return a one-sided spectrum for real data. If + `False` return a two-sided spectrum. Note that for complex + data, a two-sided spectrum is always returned. + scaling : { 'density', 'spectrum' }, optional + Selects between computing the power spectral density ('density') + where `Sxx` has units of V**2/Hz and computing the power + spectrum ('spectrum') where `Sxx` has units of V**2, if `x` + is measured in V and `fs` is measured in Hz. Defaults to + 'density'. + axis : int, optional + Axis along which the spectrogram is computed; the default is over + the last axis (i.e. ``axis=-1``). + mode : str, optional + Defines what kind of return values are expected. Options are + ['psd', 'complex', 'magnitude', 'angle', 'phase']. 'complex' is + equivalent to the output of `stft` with no padding or boundary + extension. 'magnitude' returns the absolute magnitude of the + STFT. 'angle' and 'phase' return the complex angle of the STFT, + with and without unwrapping, respectively. + + Returns + ------- + f : ndarray + Array of sample frequencies. + t : ndarray + Array of segment times. + Sxx : ndarray + Spectrogram of x. By default, the last axis of Sxx corresponds + to the segment times. + + See Also + -------- + periodogram: Simple, optionally modified periodogram + lombscargle: Lomb-Scargle periodogram for unevenly sampled data + welch: Power spectral density by Welch's method. + csd: Cross spectral density by Welch's method. + + Notes + ----- + An appropriate amount of overlap will depend on the choice of window + and on your requirements. In contrast to welch's method, where the + entire data stream is averaged over, one may wish to use a smaller + overlap (or perhaps none at all) when computing a spectrogram, to + maintain some statistical independence between individual segments. + It is for this reason that the default window is a Tukey window with + 1/8th of a window's length overlap at each end. + + .. versionadded:: 0.16.0 + + References + ---------- + .. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck + "Discrete-Time Signal Processing", Prentice Hall, 1999. + + Examples + -------- + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + Generate a test signal, a 2 Vrms sine wave whose frequency is slowly + modulated around 3kHz, corrupted by white noise of exponentially + decreasing magnitude sampled at 10 kHz. + + >>> fs = 10e3 + >>> N = 1e5 + >>> amp = 2 * np.sqrt(2) + >>> noise_power = 0.01 * fs / 2 + >>> time = np.arange(N) / float(fs) + >>> mod = 500*np.cos(2*np.pi*0.25*time) + >>> carrier = amp * np.sin(2*np.pi*3e3*time + mod) + >>> noise = np.random.normal(scale=np.sqrt(noise_power), size=time.shape) + >>> noise *= np.exp(-time/5) + >>> x = carrier + noise + + Compute and plot the spectrogram. + + >>> f, t, Sxx = signal.spectrogram(x, fs) + >>> plt.pcolormesh(t, f, Sxx) + >>> plt.ylabel('Frequency [Hz]') + >>> plt.xlabel('Time [sec]') + >>> plt.show() + + Note, if using output that is not one sided, then use the following: + + >>> f, t, Sxx = signal.spectrogram(x, fs, return_onesided=False) + >>> plt.pcolormesh(t, np.fft.fftshift(f), np.fft.fftshift(Sxx, axes=0)) + >>> plt.ylabel('Frequency [Hz]') + >>> plt.xlabel('Time [sec]') + >>> plt.show() + """ + modelist = ['psd', 'complex', 'magnitude', 'angle', 'phase'] + if mode not in modelist: + raise ValueError('unknown value for mode {}, must be one of {}' + .format(mode, modelist)) + + # need to set default for nperseg before setting default for noverlap below + window, nperseg = _triage_segments(window, nperseg, + input_length=x.shape[axis]) + + # Less overlap than welch, so samples are more statisically independent + if noverlap is None: + noverlap = nperseg // 8 + + if mode == 'psd': + freqs, time, Sxx = _spectral_helper(x, x, fs, window, nperseg, + noverlap, nfft, detrend, + return_onesided, scaling, axis, + mode='psd') + + else: + freqs, time, Sxx = _spectral_helper(x, x, fs, window, nperseg, + noverlap, nfft, detrend, + return_onesided, scaling, axis, + mode='stft') + + if mode == 'magnitude': + Sxx = np.abs(Sxx) + elif mode in ['angle', 'phase']: + Sxx = np.angle(Sxx) + if mode == 'phase': + # Sxx has one additional dimension for time strides + if axis < 0: + axis -= 1 + Sxx = np.unwrap(Sxx, axis=axis) + + # mode =='complex' is same as `stft`, doesn't need modification + + return freqs, time, Sxx + + +def check_COLA(window, nperseg, noverlap, tol=1e-10): + r""" + Check whether the Constant OverLap Add (COLA) constraint is met + + Parameters + ---------- + window : str or tuple or array_like + Desired window to use. If `window` is a string or tuple, it is + passed to `get_window` to generate the window values, which are + DFT-even by default. See `get_window` for a list of windows and + required parameters. If `window` is array_like it will be used + directly as the window and its length must be nperseg. + nperseg : int + Length of each segment. + noverlap : int + Number of points to overlap between segments. + tol : float, optional + The allowed variance of a bin's weighted sum from the median bin + sum. + + Returns + ------- + verdict : bool + `True` if chosen combination satisfies COLA within `tol`, + `False` otherwise + + See Also + -------- + check_NOLA: Check whether the Nonzero Overlap Add (NOLA) constraint is met + stft: Short Time Fourier Transform + istft: Inverse Short Time Fourier Transform + + Notes + ----- + In order to enable inversion of an STFT via the inverse STFT in + `istft`, it is sufficient that the signal windowing obeys the constraint of + "Constant OverLap Add" (COLA). This ensures that every point in the input + data is equally weighted, thereby avoiding aliasing and allowing full + reconstruction. + + Some examples of windows that satisfy COLA: + - Rectangular window at overlap of 0, 1/2, 2/3, 3/4, ... + - Bartlett window at overlap of 1/2, 3/4, 5/6, ... + - Hann window at 1/2, 2/3, 3/4, ... + - Any Blackman family window at 2/3 overlap + - Any window with ``noverlap = nperseg-1`` + + A very comprehensive list of other windows may be found in [2]_, + wherein the COLA condition is satisfied when the "Amplitude + Flatness" is unity. + + .. versionadded:: 0.19.0 + + References + ---------- + .. [1] Julius O. Smith III, "Spectral Audio Signal Processing", W3K + Publishing, 2011,ISBN 978-0-9745607-3-1. + .. [2] G. Heinzel, A. Ruediger and R. Schilling, "Spectrum and + spectral density estimation by the Discrete Fourier transform + (DFT), including a comprehensive list of window functions and + some new at-top windows", 2002, + http://hdl.handle.net/11858/00-001M-0000-0013-557A-5 + + Examples + -------- + >>> from scipy import signal + + Confirm COLA condition for rectangular window of 75% (3/4) overlap: + + >>> signal.check_COLA(signal.boxcar(100), 100, 75) + True + + COLA is not true for 25% (1/4) overlap, though: + + >>> signal.check_COLA(signal.boxcar(100), 100, 25) + False + + "Symmetrical" Hann window (for filter design) is not COLA: + + >>> signal.check_COLA(signal.hann(120, sym=True), 120, 60) + False + + "Periodic" or "DFT-even" Hann window (for FFT analysis) is COLA for + overlap of 1/2, 2/3, 3/4, etc.: + + >>> signal.check_COLA(signal.hann(120, sym=False), 120, 60) + True + + >>> signal.check_COLA(signal.hann(120, sym=False), 120, 80) + True + + >>> signal.check_COLA(signal.hann(120, sym=False), 120, 90) + True + + """ + + nperseg = int(nperseg) + + if nperseg < 1: + raise ValueError('nperseg must be a positive integer') + + if noverlap >= nperseg: + raise ValueError('noverlap must be less than nperseg.') + noverlap = int(noverlap) + + if isinstance(window, string_types) or type(window) is tuple: + win = get_window(window, nperseg) + else: + win = np.asarray(window) + if len(win.shape) != 1: + raise ValueError('window must be 1-D') + if win.shape[0] != nperseg: + raise ValueError('window must have length of nperseg') + + step = nperseg - noverlap + binsums = sum(win[ii*step:(ii+1)*step] for ii in range(nperseg//step)) + + if nperseg % step != 0: + binsums[:nperseg % step] += win[-(nperseg % step):] + + deviation = binsums - np.median(binsums) + return np.max(np.abs(deviation)) < tol + + +def check_NOLA(window, nperseg, noverlap, tol=1e-10): + r""" + Check whether the Nonzero Overlap Add (NOLA) constraint is met + + Parameters + ---------- + window : str or tuple or array_like + Desired window to use. If `window` is a string or tuple, it is + passed to `get_window` to generate the window values, which are + DFT-even by default. See `get_window` for a list of windows and + required parameters. If `window` is array_like it will be used + directly as the window and its length must be nperseg. + nperseg : int + Length of each segment. + noverlap : int + Number of points to overlap between segments. + tol : float, optional + The allowed variance of a bin's weighted sum from the median bin + sum. + + Returns + ------- + verdict : bool + `True` if chosen combination satisfies the NOLA constraint within + `tol`, `False` otherwise + + See Also + -------- + check_COLA: Check whether the Constant OverLap Add (COLA) constraint is met + stft: Short Time Fourier Transform + istft: Inverse Short Time Fourier Transform + + Notes + ----- + In order to enable inversion of an STFT via the inverse STFT in + `istft`, the signal windowing must obey the constraint of "nonzero + overlap add" (NOLA): + + .. math:: \sum_{t}w^{2}[n-tH] \ne 0 + + for all :math:`n`, where :math:`w` is the window function, :math:`t` is the + frame index, and :math:`H` is the hop size (:math:`H` = `nperseg` - + `noverlap`). + + This ensures that the normalization factors in the denominator of the + overlap-add inversion equation are not zero. Only very pathological windows + will fail the NOLA constraint. + + .. versionadded:: 1.2.0 + + References + ---------- + .. [1] Julius O. Smith III, "Spectral Audio Signal Processing", W3K + Publishing, 2011,ISBN 978-0-9745607-3-1. + .. [2] G. Heinzel, A. Ruediger and R. Schilling, "Spectrum and + spectral density estimation by the Discrete Fourier transform + (DFT), including a comprehensive list of window functions and + some new at-top windows", 2002, + http://hdl.handle.net/11858/00-001M-0000-0013-557A-5 + + Examples + -------- + >>> from scipy import signal + + Confirm NOLA condition for rectangular window of 75% (3/4) overlap: + + >>> signal.check_NOLA(signal.boxcar(100), 100, 75) + True + + NOLA is also true for 25% (1/4) overlap: + + >>> signal.check_NOLA(signal.boxcar(100), 100, 25) + True + + "Symmetrical" Hann window (for filter design) is also NOLA: + + >>> signal.check_NOLA(signal.hann(120, sym=True), 120, 60) + True + + As long as there is overlap, it takes quite a pathological window to fail + NOLA: + + >>> w = np.ones(64, dtype="float") + >>> w[::2] = 0 + >>> signal.check_NOLA(w, 64, 32) + False + + If there is not enough overlap, a window with zeros at the ends will not + work: + + >>> signal.check_NOLA(signal.hann(64), 64, 0) + False + >>> signal.check_NOLA(signal.hann(64), 64, 1) + False + >>> signal.check_NOLA(signal.hann(64), 64, 2) + True + """ + + nperseg = int(nperseg) + + if nperseg < 1: + raise ValueError('nperseg must be a positive integer') + + if noverlap >= nperseg: + raise ValueError('noverlap must be less than nperseg') + if noverlap < 0: + raise ValueError('noverlap must be a nonnegative integer') + noverlap = int(noverlap) + + if isinstance(window, string_types) or type(window) is tuple: + win = get_window(window, nperseg) + else: + win = np.asarray(window) + if len(win.shape) != 1: + raise ValueError('window must be 1-D') + if win.shape[0] != nperseg: + raise ValueError('window must have length of nperseg') + + step = nperseg - noverlap + binsums = sum(win[ii*step:(ii+1)*step]**2 for ii in range(nperseg//step)) + + if nperseg % step != 0: + binsums[:nperseg % step] += win[-(nperseg % step):]**2 + + return np.min(binsums) > tol + + +def stft(x, fs=1.0, window='hann', nperseg=256, noverlap=None, nfft=None, + detrend=False, return_onesided=True, boundary='zeros', padded=True, + axis=-1): + r""" + Compute the Short Time Fourier Transform (STFT). + + STFTs can be used as a way of quantifying the change of a + nonstationary signal's frequency and phase content over time. + + Parameters + ---------- + x : array_like + Time series of measurement values + fs : float, optional + Sampling frequency of the `x` time series. Defaults to 1.0. + window : str or tuple or array_like, optional + Desired window to use. If `window` is a string or tuple, it is + passed to `get_window` to generate the window values, which are + DFT-even by default. See `get_window` for a list of windows and + required parameters. If `window` is array_like it will be used + directly as the window and its length must be nperseg. Defaults + to a Hann window. + nperseg : int, optional + Length of each segment. Defaults to 256. + noverlap : int, optional + Number of points to overlap between segments. If `None`, + ``noverlap = nperseg // 2``. Defaults to `None`. When + specified, the COLA constraint must be met (see Notes below). + nfft : int, optional + Length of the FFT used, if a zero padded FFT is desired. If + `None`, the FFT length is `nperseg`. Defaults to `None`. + detrend : str or function or `False`, optional + Specifies how to detrend each segment. If `detrend` is a + string, it is passed as the `type` argument to the `detrend` + function. If it is a function, it takes a segment and returns a + detrended segment. If `detrend` is `False`, no detrending is + done. Defaults to `False`. + return_onesided : bool, optional + If `True`, return a one-sided spectrum for real data. If + `False` return a two-sided spectrum. Note that for complex + data, a two-sided spectrum is always returned. Defaults to + `True`. + boundary : str or None, optional + Specifies whether the input signal is extended at both ends, and + how to generate the new values, in order to center the first + windowed segment on the first input point. This has the benefit + of enabling reconstruction of the first input point when the + employed window function starts at zero. Valid options are + ``['even', 'odd', 'constant', 'zeros', None]``. Defaults to + 'zeros', for zero padding extension. I.e. ``[1, 2, 3, 4]`` is + extended to ``[0, 1, 2, 3, 4, 0]`` for ``nperseg=3``. + padded : bool, optional + Specifies whether the input signal is zero-padded at the end to + make the signal fit exactly into an integer number of window + segments, so that all of the signal is included in the output. + Defaults to `True`. Padding occurs after boundary extension, if + `boundary` is not `None`, and `padded` is `True`, as is the + default. + axis : int, optional + Axis along which the STFT is computed; the default is over the + last axis (i.e. ``axis=-1``). + + Returns + ------- + f : ndarray + Array of sample frequencies. + t : ndarray + Array of segment times. + Zxx : ndarray + STFT of `x`. By default, the last axis of `Zxx` corresponds + to the segment times. + + See Also + -------- + istft: Inverse Short Time Fourier Transform + check_COLA: Check whether the Constant OverLap Add (COLA) constraint + is met + check_NOLA: Check whether the Nonzero Overlap Add (NOLA) constraint is met + welch: Power spectral density by Welch's method. + spectrogram: Spectrogram by Welch's method. + csd: Cross spectral density by Welch's method. + lombscargle: Lomb-Scargle periodogram for unevenly sampled data + + Notes + ----- + In order to enable inversion of an STFT via the inverse STFT in + `istft`, the signal windowing must obey the constraint of "Nonzero + OverLap Add" (NOLA), and the input signal must have complete + windowing coverage (i.e. ``(x.shape[axis] - nperseg) % + (nperseg-noverlap) == 0``). The `padded` argument may be used to + accomplish this. + + Given a time-domain signal :math:`x[n]`, a window :math:`w[n]`, and a hop + size :math:`H` = `nperseg - noverlap`, the windowed frame at time index + :math:`t` is given by + + .. math:: x_{t}[n]=x[n]w[n-tH] + + The overlap-add (OLA) reconstruction equation is given by + + .. math:: x[n]=\frac{\sum_{t}x_{t}[n]w[n-tH]}{\sum_{t}w^{2}[n-tH]} + + The NOLA constraint ensures that every normalization term that appears + in the denomimator of the OLA reconstruction equation is nonzero. Whether a + choice of `window`, `nperseg`, and `noverlap` satisfy this constraint can + be tested with `check_NOLA`. + + .. versionadded:: 0.19.0 + + References + ---------- + .. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck + "Discrete-Time Signal Processing", Prentice Hall, 1999. + .. [2] Daniel W. Griffin, Jae S. Lim "Signal Estimation from + Modified Short-Time Fourier Transform", IEEE 1984, + 10.1109/TASSP.1984.1164317 + + Examples + -------- + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + Generate a test signal, a 2 Vrms sine wave whose frequency is slowly + modulated around 3kHz, corrupted by white noise of exponentially + decreasing magnitude sampled at 10 kHz. + + >>> fs = 10e3 + >>> N = 1e5 + >>> amp = 2 * np.sqrt(2) + >>> noise_power = 0.01 * fs / 2 + >>> time = np.arange(N) / float(fs) + >>> mod = 500*np.cos(2*np.pi*0.25*time) + >>> carrier = amp * np.sin(2*np.pi*3e3*time + mod) + >>> noise = np.random.normal(scale=np.sqrt(noise_power), + ... size=time.shape) + >>> noise *= np.exp(-time/5) + >>> x = carrier + noise + + Compute and plot the STFT's magnitude. + + >>> f, t, Zxx = signal.stft(x, fs, nperseg=1000) + >>> plt.pcolormesh(t, f, np.abs(Zxx), vmin=0, vmax=amp) + >>> plt.title('STFT Magnitude') + >>> plt.ylabel('Frequency [Hz]') + >>> plt.xlabel('Time [sec]') + >>> plt.show() + """ + + freqs, time, Zxx = _spectral_helper(x, x, fs, window, nperseg, noverlap, + nfft, detrend, return_onesided, + scaling='spectrum', axis=axis, + mode='stft', boundary=boundary, + padded=padded) + + return freqs, time, Zxx + + +def istft(Zxx, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None, + input_onesided=True, boundary=True, time_axis=-1, freq_axis=-2): + r""" + Perform the inverse Short Time Fourier transform (iSTFT). + + Parameters + ---------- + Zxx : array_like + STFT of the signal to be reconstructed. If a purely real array + is passed, it will be cast to a complex data type. + fs : float, optional + Sampling frequency of the time series. Defaults to 1.0. + window : str or tuple or array_like, optional + Desired window to use. If `window` is a string or tuple, it is + passed to `get_window` to generate the window values, which are + DFT-even by default. See `get_window` for a list of windows and + required parameters. If `window` is array_like it will be used + directly as the window and its length must be nperseg. Defaults + to a Hann window. Must match the window used to generate the + STFT for faithful inversion. + nperseg : int, optional + Number of data points corresponding to each STFT segment. This + parameter must be specified if the number of data points per + segment is odd, or if the STFT was padded via ``nfft > + nperseg``. If `None`, the value depends on the shape of + `Zxx` and `input_onesided`. If `input_onesided` is True, + ``nperseg=2*(Zxx.shape[freq_axis] - 1)``. Otherwise, + ``nperseg=Zxx.shape[freq_axis]``. Defaults to `None`. + noverlap : int, optional + Number of points to overlap between segments. If `None`, half + of the segment length. Defaults to `None`. When specified, the + COLA constraint must be met (see Notes below), and should match + the parameter used to generate the STFT. Defaults to `None`. + nfft : int, optional + Number of FFT points corresponding to each STFT segment. This + parameter must be specified if the STFT was padded via ``nfft > + nperseg``. If `None`, the default values are the same as for + `nperseg`, detailed above, with one exception: if + `input_onesided` is True and + ``nperseg==2*Zxx.shape[freq_axis] - 1``, `nfft` also takes on + that value. This case allows the proper inversion of an + odd-length unpadded STFT using ``nfft=None``. Defaults to + `None`. + input_onesided : bool, optional + If `True`, interpret the input array as one-sided FFTs, such + as is returned by `stft` with ``return_onesided=True`` and + `numpy.fft.rfft`. If `False`, interpret the input as a a + two-sided FFT. Defaults to `True`. + boundary : bool, optional + Specifies whether the input signal was extended at its + boundaries by supplying a non-`None` ``boundary`` argument to + `stft`. Defaults to `True`. + time_axis : int, optional + Where the time segments of the STFT is located; the default is + the last axis (i.e. ``axis=-1``). + freq_axis : int, optional + Where the frequency axis of the STFT is located; the default is + the penultimate axis (i.e. ``axis=-2``). + + Returns + ------- + t : ndarray + Array of output data times. + x : ndarray + iSTFT of `Zxx`. + + See Also + -------- + stft: Short Time Fourier Transform + check_COLA: Check whether the Constant OverLap Add (COLA) constraint + is met + check_NOLA: Check whether the Nonzero Overlap Add (NOLA) constraint is met + + Notes + ----- + In order to enable inversion of an STFT via the inverse STFT with + `istft`, the signal windowing must obey the constraint of "nonzero + overlap add" (NOLA): + + .. math:: \sum_{t}w^{2}[n-tH] \ne 0 + + This ensures that the normalization factors that appear in the denominator + of the overlap-add reconstruction equation + + .. math:: x[n]=\frac{\sum_{t}x_{t}[n]w[n-tH]}{\sum_{t}w^{2}[n-tH]} + + are not zero. The NOLA constraint can be checked with the `check_NOLA` + function. + + An STFT which has been modified (via masking or otherwise) is not + guaranteed to correspond to a exactly realizible signal. This + function implements the iSTFT via the least-squares estimation + algorithm detailed in [2]_, which produces a signal that minimizes + the mean squared error between the STFT of the returned signal and + the modified STFT. + + .. versionadded:: 0.19.0 + + References + ---------- + .. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck + "Discrete-Time Signal Processing", Prentice Hall, 1999. + .. [2] Daniel W. Griffin, Jae S. Lim "Signal Estimation from + Modified Short-Time Fourier Transform", IEEE 1984, + 10.1109/TASSP.1984.1164317 + + Examples + -------- + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + Generate a test signal, a 2 Vrms sine wave at 50Hz corrupted by + 0.001 V**2/Hz of white noise sampled at 1024 Hz. + + >>> fs = 1024 + >>> N = 10*fs + >>> nperseg = 512 + >>> amp = 2 * np.sqrt(2) + >>> noise_power = 0.001 * fs / 2 + >>> time = np.arange(N) / float(fs) + >>> carrier = amp * np.sin(2*np.pi*50*time) + >>> noise = np.random.normal(scale=np.sqrt(noise_power), + ... size=time.shape) + >>> x = carrier + noise + + Compute the STFT, and plot its magnitude + + >>> f, t, Zxx = signal.stft(x, fs=fs, nperseg=nperseg) + >>> plt.figure() + >>> plt.pcolormesh(t, f, np.abs(Zxx), vmin=0, vmax=amp) + >>> plt.ylim([f[1], f[-1]]) + >>> plt.title('STFT Magnitude') + >>> plt.ylabel('Frequency [Hz]') + >>> plt.xlabel('Time [sec]') + >>> plt.yscale('log') + >>> plt.show() + + Zero the components that are 10% or less of the carrier magnitude, + then convert back to a time series via inverse STFT + + >>> Zxx = np.where(np.abs(Zxx) >= amp/10, Zxx, 0) + >>> _, xrec = signal.istft(Zxx, fs) + + Compare the cleaned signal with the original and true carrier signals. + + >>> plt.figure() + >>> plt.plot(time, x, time, xrec, time, carrier) + >>> plt.xlim([2, 2.1]) + >>> plt.xlabel('Time [sec]') + >>> plt.ylabel('Signal') + >>> plt.legend(['Carrier + Noise', 'Filtered via STFT', 'True Carrier']) + >>> plt.show() + + Note that the cleaned signal does not start as abruptly as the original, + since some of the coefficients of the transient were also removed: + + >>> plt.figure() + >>> plt.plot(time, x, time, xrec, time, carrier) + >>> plt.xlim([0, 0.1]) + >>> plt.xlabel('Time [sec]') + >>> plt.ylabel('Signal') + >>> plt.legend(['Carrier + Noise', 'Filtered via STFT', 'True Carrier']) + >>> plt.show() + + """ + + # Make sure input is an ndarray of appropriate complex dtype + Zxx = np.asarray(Zxx) + 0j + freq_axis = int(freq_axis) + time_axis = int(time_axis) + + if Zxx.ndim < 2: + raise ValueError('Input stft must be at least 2d!') + + if freq_axis == time_axis: + raise ValueError('Must specify differing time and frequency axes!') + + nseg = Zxx.shape[time_axis] + + if input_onesided: + # Assume even segment length + n_default = 2*(Zxx.shape[freq_axis] - 1) + else: + n_default = Zxx.shape[freq_axis] + + # Check windowing parameters + if nperseg is None: + nperseg = n_default + else: + nperseg = int(nperseg) + if nperseg < 1: + raise ValueError('nperseg must be a positive integer') + + if nfft is None: + if (input_onesided) and (nperseg == n_default + 1): + # Odd nperseg, no FFT padding + nfft = nperseg + else: + nfft = n_default + elif nfft < nperseg: + raise ValueError('nfft must be greater than or equal to nperseg.') + else: + nfft = int(nfft) + + if noverlap is None: + noverlap = nperseg//2 + else: + noverlap = int(noverlap) + if noverlap >= nperseg: + raise ValueError('noverlap must be less than nperseg.') + nstep = nperseg - noverlap + + # Rearrange axes if necessary + if time_axis != Zxx.ndim-1 or freq_axis != Zxx.ndim-2: + # Turn negative indices to positive for the call to transpose + if freq_axis < 0: + freq_axis = Zxx.ndim + freq_axis + if time_axis < 0: + time_axis = Zxx.ndim + time_axis + zouter = list(range(Zxx.ndim)) + for ax in sorted([time_axis, freq_axis], reverse=True): + zouter.pop(ax) + Zxx = np.transpose(Zxx, zouter+[freq_axis, time_axis]) + + # Get window as array + if isinstance(window, string_types) or type(window) is tuple: + win = get_window(window, nperseg) + else: + win = np.asarray(window) + if len(win.shape) != 1: + raise ValueError('window must be 1-D') + if win.shape[0] != nperseg: + raise ValueError('window must have length of {0}'.format(nperseg)) + + if input_onesided: + ifunc = np.fft.irfft + else: + ifunc = fftpack.ifft + + xsubs = ifunc(Zxx, axis=-2, n=nfft)[..., :nperseg, :] + + # Initialize output and normalization arrays + outputlength = nperseg + (nseg-1)*nstep + x = np.zeros(list(Zxx.shape[:-2])+[outputlength], dtype=xsubs.dtype) + norm = np.zeros(outputlength, dtype=xsubs.dtype) + + if np.result_type(win, xsubs) != xsubs.dtype: + win = win.astype(xsubs.dtype) + + xsubs *= win.sum() # This takes care of the 'spectrum' scaling + + # Construct the output from the ifft segments + # This loop could perhaps be vectorized/strided somehow... + for ii in range(nseg): + # Window the ifft + x[..., ii*nstep:ii*nstep+nperseg] += xsubs[..., ii] * win + norm[..., ii*nstep:ii*nstep+nperseg] += win**2 + + # Remove extension points + if boundary: + x = x[..., nperseg//2:-(nperseg//2)] + norm = norm[..., nperseg//2:-(nperseg//2)] + + # Divide out normalization where non-tiny + if np.sum(norm > 1e-10) != len(norm): + warnings.warn("NOLA condition failed, STFT may not be invertible") + x /= np.where(norm > 1e-10, norm, 1.0) + + if input_onesided: + x = x.real + + # Put axes back + if x.ndim > 1: + if time_axis != Zxx.ndim-1: + if freq_axis < time_axis: + time_axis -= 1 + x = np.rollaxis(x, -1, time_axis) + + time = np.arange(x.shape[0])/float(fs) + return time, x + + +def coherence(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, + nfft=None, detrend='constant', axis=-1): + r""" + Estimate the magnitude squared coherence estimate, Cxy, of + discrete-time signals X and Y using Welch's method. + + ``Cxy = abs(Pxy)**2/(Pxx*Pyy)``, where `Pxx` and `Pyy` are power + spectral density estimates of X and Y, and `Pxy` is the cross + spectral density estimate of X and Y. + + Parameters + ---------- + x : array_like + Time series of measurement values + y : array_like + Time series of measurement values + fs : float, optional + Sampling frequency of the `x` and `y` time series. Defaults + to 1.0. + window : str or tuple or array_like, optional + Desired window to use. If `window` is a string or tuple, it is + passed to `get_window` to generate the window values, which are + DFT-even by default. See `get_window` for a list of windows and + required parameters. If `window` is array_like it will be used + directly as the window and its length must be nperseg. Defaults + to a Hann window. + nperseg : int, optional + Length of each segment. Defaults to None, but if window is str or + tuple, is set to 256, and if window is array_like, is set to the + length of the window. + noverlap: int, optional + Number of points to overlap between segments. If `None`, + ``noverlap = nperseg // 2``. Defaults to `None`. + nfft : int, optional + Length of the FFT used, if a zero padded FFT is desired. If + `None`, the FFT length is `nperseg`. Defaults to `None`. + detrend : str or function or `False`, optional + Specifies how to detrend each segment. If `detrend` is a + string, it is passed as the `type` argument to the `detrend` + function. If it is a function, it takes a segment and returns a + detrended segment. If `detrend` is `False`, no detrending is + done. Defaults to 'constant'. + axis : int, optional + Axis along which the coherence is computed for both inputs; the + default is over the last axis (i.e. ``axis=-1``). + + Returns + ------- + f : ndarray + Array of sample frequencies. + Cxy : ndarray + Magnitude squared coherence of x and y. + + See Also + -------- + periodogram: Simple, optionally modified periodogram + lombscargle: Lomb-Scargle periodogram for unevenly sampled data + welch: Power spectral density by Welch's method. + csd: Cross spectral density by Welch's method. + + Notes + -------- + An appropriate amount of overlap will depend on the choice of window + and on your requirements. For the default Hann window an overlap of + 50% is a reasonable trade off between accurately estimating the + signal power, while not over counting any of the data. Narrower + windows may require a larger overlap. + + .. versionadded:: 0.16.0 + + References + ---------- + .. [1] P. Welch, "The use of the fast Fourier transform for the + estimation of power spectra: A method based on time averaging + over short, modified periodograms", IEEE Trans. Audio + Electroacoust. vol. 15, pp. 70-73, 1967. + .. [2] Stoica, Petre, and Randolph Moses, "Spectral Analysis of + Signals" Prentice Hall, 2005 + + Examples + -------- + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + Generate two test signals with some common features. + + >>> fs = 10e3 + >>> N = 1e5 + >>> amp = 20 + >>> freq = 1234.0 + >>> noise_power = 0.001 * fs / 2 + >>> time = np.arange(N) / fs + >>> b, a = signal.butter(2, 0.25, 'low') + >>> x = np.random.normal(scale=np.sqrt(noise_power), size=time.shape) + >>> y = signal.lfilter(b, a, x) + >>> x += amp*np.sin(2*np.pi*freq*time) + >>> y += np.random.normal(scale=0.1*np.sqrt(noise_power), size=time.shape) + + Compute and plot the coherence. + + >>> f, Cxy = signal.coherence(x, y, fs, nperseg=1024) + >>> plt.semilogy(f, Cxy) + >>> plt.xlabel('frequency [Hz]') + >>> plt.ylabel('Coherence') + >>> plt.show() + """ + + freqs, Pxx = welch(x, fs=fs, window=window, nperseg=nperseg, + noverlap=noverlap, nfft=nfft, detrend=detrend, + axis=axis) + _, Pyy = welch(y, fs=fs, window=window, nperseg=nperseg, noverlap=noverlap, + nfft=nfft, detrend=detrend, axis=axis) + _, Pxy = csd(x, y, fs=fs, window=window, nperseg=nperseg, + noverlap=noverlap, nfft=nfft, detrend=detrend, axis=axis) + + Cxy = np.abs(Pxy)**2 / Pxx / Pyy + + return freqs, Cxy + + +def _spectral_helper(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, + nfft=None, detrend='constant', return_onesided=True, + scaling='spectrum', axis=-1, mode='psd', boundary=None, + padded=False): + """ + Calculate various forms of windowed FFTs for PSD, CSD, etc. + + This is a helper function that implements the commonality between + the stft, psd, csd, and spectrogram functions. It is not designed to + be called externally. The windows are not averaged over; the result + from each window is returned. + + Parameters + --------- + x : array_like + Array or sequence containing the data to be analyzed. + y : array_like + Array or sequence containing the data to be analyzed. If this is + the same object in memory as `x` (i.e. ``_spectral_helper(x, + x, ...)``), the extra computations are spared. + fs : float, optional + Sampling frequency of the time series. Defaults to 1.0. + window : str or tuple or array_like, optional + Desired window to use. If `window` is a string or tuple, it is + passed to `get_window` to generate the window values, which are + DFT-even by default. See `get_window` for a list of windows and + required parameters. If `window` is array_like it will be used + directly as the window and its length must be nperseg. Defaults + to a Hann window. + nperseg : int, optional + Length of each segment. Defaults to None, but if window is str or + tuple, is set to 256, and if window is array_like, is set to the + length of the window. + noverlap : int, optional + Number of points to overlap between segments. If `None`, + ``noverlap = nperseg // 2``. Defaults to `None`. + nfft : int, optional + Length of the FFT used, if a zero padded FFT is desired. If + `None`, the FFT length is `nperseg`. Defaults to `None`. + detrend : str or function or `False`, optional + Specifies how to detrend each segment. If `detrend` is a + string, it is passed as the `type` argument to the `detrend` + function. If it is a function, it takes a segment and returns a + detrended segment. If `detrend` is `False`, no detrending is + done. Defaults to 'constant'. + return_onesided : bool, optional + If `True`, return a one-sided spectrum for real data. If + `False` return a two-sided spectrum. Note that for complex + data, a two-sided spectrum is always returned. + scaling : { 'density', 'spectrum' }, optional + Selects between computing the cross spectral density ('density') + where `Pxy` has units of V**2/Hz and computing the cross + spectrum ('spectrum') where `Pxy` has units of V**2, if `x` + and `y` are measured in V and `fs` is measured in Hz. + Defaults to 'density' + axis : int, optional + Axis along which the FFTs are computed; the default is over the + last axis (i.e. ``axis=-1``). + mode: str {'psd', 'stft'}, optional + Defines what kind of return values are expected. Defaults to + 'psd'. + boundary : str or None, optional + Specifies whether the input signal is extended at both ends, and + how to generate the new values, in order to center the first + windowed segment on the first input point. This has the benefit + of enabling reconstruction of the first input point when the + employed window function starts at zero. Valid options are + ``['even', 'odd', 'constant', 'zeros', None]``. Defaults to + `None`. + padded : bool, optional + Specifies whether the input signal is zero-padded at the end to + make the signal fit exactly into an integer number of window + segments, so that all of the signal is included in the output. + Defaults to `False`. Padding occurs after boundary extension, if + `boundary` is not `None`, and `padded` is `True`. + Returns + ------- + freqs : ndarray + Array of sample frequencies. + t : ndarray + Array of times corresponding to each data segment + result : ndarray + Array of output data, contents dependent on *mode* kwarg. + + Notes + ----- + Adapted from matplotlib.mlab + + .. versionadded:: 0.16.0 + """ + if mode not in ['psd', 'stft']: + raise ValueError("Unknown value for mode %s, must be one of: " + "{'psd', 'stft'}" % mode) + + boundary_funcs = {'even': even_ext, + 'odd': odd_ext, + 'constant': const_ext, + 'zeros': zero_ext, + None: None} + + if boundary not in boundary_funcs: + raise ValueError("Unknown boundary option '{0}', must be one of: {1}" + .format(boundary, list(boundary_funcs.keys()))) + + # If x and y are the same object we can save ourselves some computation. + same_data = y is x + + if not same_data and mode != 'psd': + raise ValueError("x and y must be equal if mode is 'stft'") + + axis = int(axis) + + # Ensure we have np.arrays, get outdtype + x = np.asarray(x) + if not same_data: + y = np.asarray(y) + outdtype = np.result_type(x, y, np.complex64) + else: + outdtype = np.result_type(x, np.complex64) + + if not same_data: + # Check if we can broadcast the outer axes together + xouter = list(x.shape) + youter = list(y.shape) + xouter.pop(axis) + youter.pop(axis) + try: + outershape = np.broadcast(np.empty(xouter), np.empty(youter)).shape + except ValueError: + raise ValueError('x and y cannot be broadcast together.') + + if same_data: + if x.size == 0: + return np.empty(x.shape), np.empty(x.shape), np.empty(x.shape) + else: + if x.size == 0 or y.size == 0: + outshape = outershape + (min([x.shape[axis], y.shape[axis]]),) + emptyout = np.rollaxis(np.empty(outshape), -1, axis) + return emptyout, emptyout, emptyout + + if x.ndim > 1: + if axis != -1: + x = np.rollaxis(x, axis, len(x.shape)) + if not same_data and y.ndim > 1: + y = np.rollaxis(y, axis, len(y.shape)) + + # Check if x and y are the same length, zero-pad if necessary + if not same_data: + if x.shape[-1] != y.shape[-1]: + if x.shape[-1] < y.shape[-1]: + pad_shape = list(x.shape) + pad_shape[-1] = y.shape[-1] - x.shape[-1] + x = np.concatenate((x, np.zeros(pad_shape)), -1) + else: + pad_shape = list(y.shape) + pad_shape[-1] = x.shape[-1] - y.shape[-1] + y = np.concatenate((y, np.zeros(pad_shape)), -1) + + if nperseg is not None: # if specified by user + nperseg = int(nperseg) + if nperseg < 1: + raise ValueError('nperseg must be a positive integer') + + # parse window; if array like, then set nperseg = win.shape + win, nperseg = _triage_segments(window, nperseg, input_length=x.shape[-1]) + + if nfft is None: + nfft = nperseg + elif nfft < nperseg: + raise ValueError('nfft must be greater than or equal to nperseg.') + else: + nfft = int(nfft) + + if noverlap is None: + noverlap = nperseg//2 + else: + noverlap = int(noverlap) + if noverlap >= nperseg: + raise ValueError('noverlap must be less than nperseg.') + nstep = nperseg - noverlap + + # Padding occurs after boundary extension, so that the extended signal ends + # in zeros, instead of introducing an impulse at the end. + # I.e. if x = [..., 3, 2] + # extend then pad -> [..., 3, 2, 2, 3, 0, 0, 0] + # pad then extend -> [..., 3, 2, 0, 0, 0, 2, 3] + + if boundary is not None: + ext_func = boundary_funcs[boundary] + x = ext_func(x, nperseg//2, axis=-1) + if not same_data: + y = ext_func(y, nperseg//2, axis=-1) + + if padded: + # Pad to integer number of windowed segments + # I.e make x.shape[-1] = nperseg + (nseg-1)*nstep, with integer nseg + nadd = (-(x.shape[-1]-nperseg) % nstep) % nperseg + zeros_shape = list(x.shape[:-1]) + [nadd] + x = np.concatenate((x, np.zeros(zeros_shape)), axis=-1) + if not same_data: + zeros_shape = list(y.shape[:-1]) + [nadd] + y = np.concatenate((y, np.zeros(zeros_shape)), axis=-1) + + # Handle detrending and window functions + if not detrend: + def detrend_func(d): + return d + elif not hasattr(detrend, '__call__'): + def detrend_func(d): + return signaltools.detrend(d, type=detrend, axis=-1) + elif axis != -1: + # Wrap this function so that it receives a shape that it could + # reasonably expect to receive. + def detrend_func(d): + d = np.rollaxis(d, -1, axis) + d = detrend(d) + return np.rollaxis(d, axis, len(d.shape)) + else: + detrend_func = detrend + + if np.result_type(win, np.complex64) != outdtype: + win = win.astype(outdtype) + + if scaling == 'density': + scale = 1.0 / (fs * (win*win).sum()) + elif scaling == 'spectrum': + scale = 1.0 / win.sum()**2 + else: + raise ValueError('Unknown scaling: %r' % scaling) + + if mode == 'stft': + scale = np.sqrt(scale) + + if return_onesided: + if np.iscomplexobj(x): + sides = 'twosided' + warnings.warn('Input data is complex, switching to ' + 'return_onesided=False') + else: + sides = 'onesided' + if not same_data: + if np.iscomplexobj(y): + sides = 'twosided' + warnings.warn('Input data is complex, switching to ' + 'return_onesided=False') + else: + sides = 'twosided' + + if sides == 'twosided': + freqs = fftpack.fftfreq(nfft, 1/fs) + elif sides == 'onesided': + freqs = np.fft.rfftfreq(nfft, 1/fs) + + # Perform the windowed FFTs + result = _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft, sides) + + if not same_data: + # All the same operations on the y data + result_y = _fft_helper(y, win, detrend_func, nperseg, noverlap, nfft, + sides) + result = np.conjugate(result) * result_y + elif mode == 'psd': + result = np.conjugate(result) * result + + result *= scale + if sides == 'onesided' and mode == 'psd': + if nfft % 2: + result[..., 1:] *= 2 + else: + # Last point is unpaired Nyquist freq point, don't double + result[..., 1:-1] *= 2 + + time = np.arange(nperseg/2, x.shape[-1] - nperseg/2 + 1, + nperseg - noverlap)/float(fs) + if boundary is not None: + time -= (nperseg/2) / fs + + result = result.astype(outdtype) + + # All imaginary parts are zero anyways + if same_data and mode != 'stft': + result = result.real + + # Output is going to have new last axis for time/window index, so a + # negative axis index shifts down one + if axis < 0: + axis -= 1 + + # Roll frequency axis back to axis where the data came from + result = np.rollaxis(result, -1, axis) + + return freqs, time, result + + +def _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft, sides): + """ + Calculate windowed FFT, for internal use by + scipy.signal._spectral_helper + + This is a helper function that does the main FFT calculation for + `_spectral helper`. All input validation is performed there, and the + data axis is assumed to be the last axis of x. It is not designed to + be called externally. The windows are not averaged over; the result + from each window is returned. + + Returns + ------- + result : ndarray + Array of FFT data + + Notes + ----- + Adapted from matplotlib.mlab + + .. versionadded:: 0.16.0 + """ + # Created strided array of data segments + if nperseg == 1 and noverlap == 0: + result = x[..., np.newaxis] + else: + # https://stackoverflow.com/a/5568169 + step = nperseg - noverlap + shape = x.shape[:-1]+((x.shape[-1]-noverlap)//step, nperseg) + strides = x.strides[:-1]+(step*x.strides[-1], x.strides[-1]) + result = np.lib.stride_tricks.as_strided(x, shape=shape, + strides=strides) + + # Detrend each data segment individually + result = detrend_func(result) + + # Apply window by multiplication + result = win * result + + # Perform the fft. Acts on last axis by default. Zero-pads automatically + if sides == 'twosided': + func = fftpack.fft + else: + result = result.real + func = np.fft.rfft + result = func(result, n=nfft) + + return result + + +def _triage_segments(window, nperseg, input_length): + """ + Parses window and nperseg arguments for spectrogram and _spectral_helper. + This is a helper function, not meant to be called externally. + + Parameters + ---------- + window : string, tuple, or ndarray + If window is specified by a string or tuple and nperseg is not + specified, nperseg is set to the default of 256 and returns a window of + that length. + If instead the window is array_like and nperseg is not specified, then + nperseg is set to the length of the window. A ValueError is raised if + the user supplies both an array_like window and a value for nperseg but + nperseg does not equal the length of the window. + + nperseg : int + Length of each segment + + input_length: int + Length of input signal, i.e. x.shape[-1]. Used to test for errors. + + Returns + ------- + win : ndarray + window. If function was called with string or tuple than this will hold + the actual array used as a window. + + nperseg : int + Length of each segment. If window is str or tuple, nperseg is set to + 256. If window is array_like, nperseg is set to the length of the + 6 + window. + """ + + # parse window; if array like, then set nperseg = win.shape + if isinstance(window, string_types) or isinstance(window, tuple): + # if nperseg not specified + if nperseg is None: + nperseg = 256 # then change to default + if nperseg > input_length: + warnings.warn('nperseg = {0:d} is greater than input length ' + ' = {1:d}, using nperseg = {1:d}' + .format(nperseg, input_length)) + nperseg = input_length + win = get_window(window, nperseg) + else: + win = np.asarray(window) + if len(win.shape) != 1: + raise ValueError('window must be 1-D') + if input_length < win.shape[-1]: + raise ValueError('window is longer than input signal') + if nperseg is None: + nperseg = win.shape[0] + elif nperseg is not None: + if nperseg != win.shape[0]: + raise ValueError("value specified for nperseg is different" + " from length of window") + return win, nperseg + + +def _median_bias(n): + """ + Returns the bias of the median of a set of periodograms relative to + the mean. + + See arXiv:gr-qc/0509116 Appendix B for details. + + Parameters + ---------- + n : int + Numbers of periodograms being averaged. + + Returns + ------- + bias : float + Calculated bias. + """ + ii_2 = 2 * np.arange(1., (n-1) // 2 + 1) + return 1 + np.sum(1. / (ii_2 + 1) - 1. / ii_2) diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/spectral.pyc b/project/venv/lib/python2.7/site-packages/scipy/signal/spectral.pyc new file mode 100644 index 0000000..7990733 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/signal/spectral.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/spline.so b/project/venv/lib/python2.7/site-packages/scipy/signal/spline.so new file mode 100755 index 0000000..f5ff156 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/signal/spline.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/tests/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/tests/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/__init__.pyc new file mode 100644 index 0000000..1344660 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/tests/mpsig.py b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/mpsig.py new file mode 100644 index 0000000..237557d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/mpsig.py @@ -0,0 +1,126 @@ +""" +Some signal functions implemented using mpmath. +""" + +from __future__ import division + +try: + import mpmath +except ImportError: + mpmath = None + + +def _prod(seq): + """Returns the product of the elements in the sequence `seq`.""" + p = 1 + for elem in seq: + p *= elem + return p + + +def _relative_degree(z, p): + """ + Return relative degree of transfer function from zeros and poles. + + This is simply len(p) - len(z), which must be nonnegative. + A ValueError is raised if len(p) < len(z). + """ + degree = len(p) - len(z) + if degree < 0: + raise ValueError("Improper transfer function. " + "Must have at least as many poles as zeros.") + return degree + + +def _zpkbilinear(z, p, k, fs): + """Bilinear transformation to convert a filter from analog to digital.""" + + degree = _relative_degree(z, p) + + fs2 = 2*fs + + # Bilinear transform the poles and zeros + z_z = [(fs2 + z1) / (fs2 - z1) for z1 in z] + p_z = [(fs2 + p1) / (fs2 - p1) for p1 in p] + + # Any zeros that were at infinity get moved to the Nyquist frequency + z_z.extend([-1] * degree) + + # Compensate for gain change + numer = _prod(fs2 - z1 for z1 in z) + denom = _prod(fs2 - p1 for p1 in p) + k_z = k * numer / denom + + return z_z, p_z, k_z.real + + +def _zpklp2lp(z, p, k, wo=1): + """Transform a lowpass filter to a different cutoff frequency.""" + + degree = _relative_degree(z, p) + + # Scale all points radially from origin to shift cutoff frequency + z_lp = [wo * z1 for z1 in z] + p_lp = [wo * p1 for p1 in p] + + # Each shifted pole decreases gain by wo, each shifted zero increases it. + # Cancel out the net change to keep overall gain the same + k_lp = k * wo**degree + + return z_lp, p_lp, k_lp + + +def _butter_analog_poles(n): + """ + Poles of an analog Butterworth lowpass filter. + + This is the same calculation as scipy.signal.buttap(n) or + scipy.signal.butter(n, 1, analog=True, output='zpk'), but mpmath is used, + and only the poles are returned. + """ + poles = [] + for k in range(-n+1, n, 2): + poles.append(-mpmath.exp(1j*mpmath.pi*k/(2*n))) + return poles + + +def butter_lp(n, Wn): + """ + Lowpass Butterworth digital filter design. + + This computes the same result as scipy.signal.butter(n, Wn, output='zpk'), + but it uses mpmath, and the results are returned in lists instead of numpy + arrays. + """ + zeros = [] + poles = _butter_analog_poles(n) + k = 1 + fs = 2 + warped = 2 * fs * mpmath.tan(mpmath.pi * Wn / fs) + z, p, k = _zpklp2lp(zeros, poles, k, wo=warped) + z, p, k = _zpkbilinear(z, p, k, fs=fs) + return z, p, k + + +def zpkfreqz(z, p, k, worN=None): + """ + Frequency response of a filter in zpk format, using mpmath. + + This is the same calculation as scipy.signal.freqz, but the input is in + zpk format, the calculation is performed using mpath, and the results are + returned in lists instead of numpy arrays. + """ + if worN is None or isinstance(worN, int): + N = worN or 512 + ws = [mpmath.pi * mpmath.mpf(j) / N for j in range(N)] + else: + ws = worN + + h = [] + for wk in ws: + zm1 = mpmath.exp(1j * wk) + numer = _prod([zm1 - t for t in z]) + denom = _prod([zm1 - t for t in p]) + hk = k * numer / denom + h.append(hk) + return ws, h diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/tests/mpsig.pyc b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/mpsig.pyc new file mode 100644 index 0000000..24a8a3a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/mpsig.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_array_tools.py b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_array_tools.py new file mode 100644 index 0000000..14dd54f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_array_tools.py @@ -0,0 +1,113 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np + +from numpy.testing import assert_array_equal +from pytest import raises as assert_raises + +from scipy.signal._arraytools import (axis_slice, axis_reverse, + odd_ext, even_ext, const_ext, zero_ext) + + +class TestArrayTools(object): + + def test_axis_slice(self): + a = np.arange(12).reshape(3, 4) + + s = axis_slice(a, start=0, stop=1, axis=0) + assert_array_equal(s, a[0:1, :]) + + s = axis_slice(a, start=-1, axis=0) + assert_array_equal(s, a[-1:, :]) + + s = axis_slice(a, start=0, stop=1, axis=1) + assert_array_equal(s, a[:, 0:1]) + + s = axis_slice(a, start=-1, axis=1) + assert_array_equal(s, a[:, -1:]) + + s = axis_slice(a, start=0, step=2, axis=0) + assert_array_equal(s, a[::2, :]) + + s = axis_slice(a, start=0, step=2, axis=1) + assert_array_equal(s, a[:, ::2]) + + def test_axis_reverse(self): + a = np.arange(12).reshape(3, 4) + + r = axis_reverse(a, axis=0) + assert_array_equal(r, a[::-1, :]) + + r = axis_reverse(a, axis=1) + assert_array_equal(r, a[:, ::-1]) + + def test_odd_ext(self): + a = np.array([[1, 2, 3, 4, 5], + [9, 8, 7, 6, 5]]) + + odd = odd_ext(a, 2, axis=1) + expected = np.array([[-1, 0, 1, 2, 3, 4, 5, 6, 7], + [11, 10, 9, 8, 7, 6, 5, 4, 3]]) + assert_array_equal(odd, expected) + + odd = odd_ext(a, 1, axis=0) + expected = np.array([[-7, -4, -1, 2, 5], + [1, 2, 3, 4, 5], + [9, 8, 7, 6, 5], + [17, 14, 11, 8, 5]]) + assert_array_equal(odd, expected) + + assert_raises(ValueError, odd_ext, a, 2, axis=0) + assert_raises(ValueError, odd_ext, a, 5, axis=1) + + def test_even_ext(self): + a = np.array([[1, 2, 3, 4, 5], + [9, 8, 7, 6, 5]]) + + even = even_ext(a, 2, axis=1) + expected = np.array([[3, 2, 1, 2, 3, 4, 5, 4, 3], + [7, 8, 9, 8, 7, 6, 5, 6, 7]]) + assert_array_equal(even, expected) + + even = even_ext(a, 1, axis=0) + expected = np.array([[9, 8, 7, 6, 5], + [1, 2, 3, 4, 5], + [9, 8, 7, 6, 5], + [1, 2, 3, 4, 5]]) + assert_array_equal(even, expected) + + assert_raises(ValueError, even_ext, a, 2, axis=0) + assert_raises(ValueError, even_ext, a, 5, axis=1) + + def test_const_ext(self): + a = np.array([[1, 2, 3, 4, 5], + [9, 8, 7, 6, 5]]) + + const = const_ext(a, 2, axis=1) + expected = np.array([[1, 1, 1, 2, 3, 4, 5, 5, 5], + [9, 9, 9, 8, 7, 6, 5, 5, 5]]) + assert_array_equal(const, expected) + + const = const_ext(a, 1, axis=0) + expected = np.array([[1, 2, 3, 4, 5], + [1, 2, 3, 4, 5], + [9, 8, 7, 6, 5], + [9, 8, 7, 6, 5]]) + assert_array_equal(const, expected) + + def test_zero_ext(self): + a = np.array([[1, 2, 3, 4, 5], + [9, 8, 7, 6, 5]]) + + zero = zero_ext(a, 2, axis=1) + expected = np.array([[0, 0, 1, 2, 3, 4, 5, 0, 0], + [0, 0, 9, 8, 7, 6, 5, 0, 0]]) + assert_array_equal(zero, expected) + + zero = zero_ext(a, 1, axis=0) + expected = np.array([[0, 0, 0, 0, 0], + [1, 2, 3, 4, 5], + [9, 8, 7, 6, 5], + [0, 0, 0, 0, 0]]) + assert_array_equal(zero, expected) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_array_tools.pyc b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_array_tools.pyc new file mode 100644 index 0000000..46402ac Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_array_tools.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_bsplines.py b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_bsplines.py new file mode 100644 index 0000000..77753f3 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_bsplines.py @@ -0,0 +1,224 @@ +# pylint: disable=missing-docstring +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy import array +from numpy.testing import (TestCase, assert_equal, run_module_suite, + assert_allclose, assert_array_equal, + assert_almost_equal) +from pytest import raises + +import scipy.signal.bsplines as bsp + + +class TestBSplines(object): + """Test behaviors of bsplines. The values tested against were returned as of + scipy 1.1.0 and are included for regression testing purposes""" + + def test_factorial(self): + # can't all be zero state + assert_equal(bsp.factorial(1), 1) + + def test_spline_filter(self): + np.random.seed(12457) + # Test the type-error branch + raises(TypeError, bsp.spline_filter, array([0]), 0) + # Test the complex branch + data_array_complex = np.random.rand(7, 7) + np.random.rand(7, 7)*1j + # make the magnitude exceed 1, and make some negative + data_array_complex = 10*(1+1j-2*data_array_complex) + result_array_complex = array( + [[-4.61489230e-01-1.92994022j, 8.33332443+6.25519943j, + 6.96300745e-01-9.05576038j, 5.28294849+3.97541356j, + 5.92165565+7.68240595j, 6.59493160-1.04542804j, + 9.84503460-5.85946894j], + [-8.78262329-8.4295969j, 7.20675516+5.47528982j, + -8.17223072+2.06330729j, -4.38633347-8.65968037j, + 9.89916801-8.91720295j, 2.67755103+8.8706522j, + 6.24192142+3.76879835j], + [-3.15627527+2.56303072j, 9.87658501-0.82838702j, + -9.96930313+8.72288895j, 3.17193985+6.42474651j, + -4.50919819-6.84576082j, 5.75423431+9.94723988j, + 9.65979767+6.90665293j], + [-8.28993416-6.61064005j, 9.71416473e-01-9.44907284j, + -2.38331890+9.25196648j, -7.08868170-0.77403212j, + 4.89887714+7.05371094j, -1.37062311-2.73505688j, + 7.70705748+2.5395329j], + [2.51528406-1.82964492j, 3.65885472+2.95454836j, + 5.16786575-1.66362023j, -8.77737999e-03+5.72478867j, + 4.10533333-3.10287571j, 9.04761887+1.54017115j, + -5.77960968e-01-7.87758923j], + [9.86398506-3.98528528j, -4.71444130-2.44316983j, + -1.68038976-1.12708664j, 2.84695053+1.01725709j, + 1.14315915-8.89294529j, -3.17127085-5.42145538j, + 1.91830420-6.16370344j], + [7.13875294+2.91851187j, -5.35737514+9.64132309j, + -9.66586399+0.70250005j, -9.87717438-2.0262239j, + 9.93160629+1.5630846j, 4.71948051-2.22050714j, + 9.49550819+7.8995142j]]) + # FIXME: for complex types, the computations are done in + # single precision (reason unclear). When this is changed, + # this test needs updating. + assert_allclose(bsp.spline_filter(data_array_complex, 0), + result_array_complex, rtol=1e-6) + # Test the real branch + np.random.seed(12457) + data_array_real = np.random.rand(12, 12) + # make the magnitude exceed 1, and make some negative + data_array_real = 10*(1-2*data_array_real) + result_array_real = array( + [[-.463312621, 8.33391222, .697290949, 5.28390836, + 5.92066474, 6.59452137, 9.84406950, -8.78324188, + 7.20675750, -8.17222994, -4.38633345, 9.89917069], + [2.67755154, 6.24192170, -3.15730578, 9.87658581, + -9.96930425, 3.17194115, -4.50919947, 5.75423446, + 9.65979824, -8.29066885, .971416087, -2.38331897], + [-7.08868346, 4.89887705, -1.37062289, 7.70705838, + 2.51526461, 3.65885497, 5.16786604, -8.77715342e-03, + 4.10533325, 9.04761993, -.577960351, 9.86382519], + [-4.71444301, -1.68038985, 2.84695116, 1.14315938, + -3.17127091, 1.91830461, 7.13779687, -5.35737482, + -9.66586425, -9.87717456, 9.93160672, 4.71948144], + [9.49551194, -1.92958436, 6.25427993, -9.05582911, + 3.97562282, 7.68232426, -1.04514824, -5.86021443, + -8.43007451, 5.47528997, 2.06330736, -8.65968112], + [-8.91720100, 8.87065356, 3.76879937, 2.56222894, + -.828387146, 8.72288903, 6.42474741, -6.84576083, + 9.94724115, 6.90665380, -6.61084494, -9.44907391], + [9.25196790, -.774032030, 7.05371046, -2.73505725, + 2.53953305, -1.82889155, 2.95454824, -1.66362046, + 5.72478916, -3.10287679, 1.54017123, -7.87759020], + [-3.98464539, -2.44316992, -1.12708657, 1.01725672, + -8.89294671, -5.42145629, -6.16370321, 2.91775492, + 9.64132208, .702499998, -2.02622392, 1.56308431], + [-2.22050773, 7.89951554, 5.98970713, -7.35861835, + 5.45459283, -7.76427957, 3.67280490, -4.05521315, + 4.51967507, -3.22738749, -3.65080177, 3.05630155], + [-6.21240584, -.296796126, -8.34800163, 9.21564563, + -3.61958784, -4.77120006, -3.99454057, 1.05021988e-03, + -6.95982829, 6.04380797, 8.43181250, -2.71653339], + [1.19638037, 6.99718842e-02, 6.72020394, -2.13963198, + 3.75309875, -5.70076744, 5.92143551, -7.22150575, + -3.77114594, -1.11903194, -5.39151466, 3.06620093], + [9.86326886, 1.05134482, -7.75950607, -3.64429655, + 7.81848957, -9.02270373, 3.73399754, -4.71962549, + -7.71144306, 3.78263161, 6.46034818, -4.43444731]]) + assert_allclose(bsp.spline_filter(data_array_real, 0), + result_array_real) + + def test_bspline(self): + np.random.seed(12458) + assert_allclose(bsp.bspline(np.random.rand(1, 1), 2), + array([[0.73694695]])) + data_array_complex = np.random.rand(4, 4) + np.random.rand(4, 4)*1j + data_array_complex = 0.1*data_array_complex + result_array_complex = array( + [[0.40882362, 0.41021151, 0.40886708, 0.40905103], + [0.40829477, 0.41021230, 0.40966097, 0.40939871], + [0.41036803, 0.40901724, 0.40965331, 0.40879513], + [0.41032862, 0.40925287, 0.41037754, 0.41027477]]) + assert_allclose(bsp.bspline(data_array_complex, 10), + result_array_complex) + + def test_gauss_spline(self): + np.random.seed(12459) + assert_almost_equal(bsp.gauss_spline(0, 0), 1.381976597885342) + assert_allclose(bsp.gauss_spline(array([1.]), 1), array([0.04865217])) + + def test_cubic(self): + np.random.seed(12460) + assert_array_equal(bsp.cubic([0]), array([0])) + data_array_complex = np.random.rand(4, 4) + np.random.rand(4, 4)*1j + data_array_complex = 1+1j-2*data_array_complex + # scaling the magnitude by 10 makes the results close enough to zero, + # that the assertion fails, so just make the elements have a mix of + # positive and negative imaginary components... + result_array_complex = array( + [[0.23056563, 0.38414406, 0.08342987, 0.06904847], + [0.17240848, 0.47055447, 0.63896278, 0.39756424], + [0.12672571, 0.65862632, 0.1116695, 0.09700386], + [0.3544116, 0.17856518, 0.1528841, 0.17285762]]) + assert_allclose(bsp.cubic(data_array_complex), result_array_complex) + + def test_quadratic(self): + np.random.seed(12461) + assert_array_equal(bsp.quadratic([0]), array([0])) + data_array_complex = np.random.rand(4, 4) + np.random.rand(4, 4)*1j + # scaling the magnitude by 10 makes the results all zero, + # so just make the elements have a mix of positive and negative + # imaginary components... + data_array_complex = (1+1j-2*data_array_complex) + result_array_complex = array( + [[0.23062746, 0.06338176, 0.34902312, 0.31944105], + [0.14701256, 0.13277773, 0.29428615, 0.09814697], + [0.52873842, 0.06484157, 0.09517566, 0.46420389], + [0.09286829, 0.09371954, 0.1422526, 0.16007024]]) + assert_allclose(bsp.quadratic(data_array_complex), + result_array_complex) + + def test_cspline1d(self): + np.random.seed(12462) + assert_array_equal(bsp.cspline1d(array([0])), [0.]) + c1d = array([1.21037185, 1.86293902, 2.98834059, 4.11660378, + 4.78893826]) + # test lamda != 0 + assert_allclose(bsp.cspline1d(array([1., 2, 3, 4, 5]), 1), c1d) + c1d0 = array([0.78683946, 2.05333735, 2.99981113, 3.94741812, + 5.21051638]) + assert_allclose(bsp.cspline1d(array([1., 2, 3, 4, 5])), c1d0) + + def test_qspline1d(self): + np.random.seed(12463) + assert_array_equal(bsp.qspline1d(array([0])), [0.]) + # test lamda != 0 + raises(ValueError, bsp.qspline1d, array([1., 2, 3, 4, 5]), 1.) + raises(ValueError, bsp.qspline1d, array([1., 2, 3, 4, 5]), -1.) + q1d0 = array([0.85350007, 2.02441743, 2.99999534, 3.97561055, + 5.14634135]) + assert_allclose(bsp.qspline1d(array([1., 2, 3, 4, 5])), q1d0) + + def test_cspline1d_eval(self): + np.random.seed(12464) + assert_allclose(bsp.cspline1d_eval(array([0., 0]), [0.]), array([0.])) + assert_array_equal(bsp.cspline1d_eval(array([1., 0, 1]), []), + array([])) + x = [-3, -2, -1, 0, 1, 2, 3, 4, 5, 6] + dx = x[1]-x[0] + newx = [-6., -5.5, -5., -4.5, -4., -3.5, -3., -2.5, -2., -1.5, -1., + -0.5, 0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5, 5., 5.5, 6., + 6.5, 7., 7.5, 8., 8.5, 9., 9.5, 10., 10.5, 11., 11.5, 12., + 12.5] + y = array([4.216, 6.864, 3.514, 6.203, 6.759, 7.433, 7.874, 5.879, + 1.396, 4.094]) + cj = bsp.cspline1d(y) + newy = array([6.203, 4.41570658, 3.514, 5.16924703, 6.864, 6.04643068, + 4.21600281, 6.04643068, 6.864, 5.16924703, 3.514, + 4.41570658, 6.203, 6.80717667, 6.759, 6.98971173, 7.433, + 7.79560142, 7.874, 7.41525761, 5.879, 3.18686814, 1.396, + 2.24889482, 4.094, 2.24889482, 1.396, 3.18686814, 5.879, + 7.41525761, 7.874, 7.79560142, 7.433, 6.98971173, 6.759, + 6.80717667, 6.203, 4.41570658]) + assert_allclose(bsp.cspline1d_eval(cj, newx, dx=dx, x0=x[0]), newy) + + def test_qspline1d_eval(self): + np.random.seed(12465) + assert_allclose(bsp.qspline1d_eval(array([0., 0]), [0.]), array([0.])) + assert_array_equal(bsp.qspline1d_eval(array([1., 0, 1]), []), + array([])) + x = [-3, -2, -1, 0, 1, 2, 3, 4, 5, 6] + dx = x[1]-x[0] + newx = [-6., -5.5, -5., -4.5, -4., -3.5, -3., -2.5, -2., -1.5, -1., + -0.5, 0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5, 5., 5.5, 6., + 6.5, 7., 7.5, 8., 8.5, 9., 9.5, 10., 10.5, 11., 11.5, 12., + 12.5] + y = array([4.216, 6.864, 3.514, 6.203, 6.759, 7.433, 7.874, 5.879, + 1.396, 4.094]) + cj = bsp.qspline1d(y) + newy = array([6.203, 4.49418159, 3.514, 5.18390821, 6.864, 5.91436915, + 4.21600002, 5.91436915, 6.864, 5.18390821, 3.514, + 4.49418159, 6.203, 6.71900226, 6.759, 7.03980488, 7.433, + 7.81016848, 7.874, 7.32718426, 5.879, 3.23872593, 1.396, + 2.34046013, 4.094, 2.34046013, 1.396, 3.23872593, 5.879, + 7.32718426, 7.874, 7.81016848, 7.433, 7.03980488, 6.759, + 6.71900226, 6.203, 4.49418159]) + assert_allclose(bsp.qspline1d_eval(cj, newx, dx=dx, x0=x[0]), newy) diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_bsplines.pyc b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_bsplines.pyc new file mode 100644 index 0000000..d33d875 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_bsplines.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_cont2discrete.py b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_cont2discrete.py new file mode 100644 index 0000000..9ec1d87 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_cont2discrete.py @@ -0,0 +1,370 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import \ + assert_array_almost_equal, assert_almost_equal, \ + assert_allclose, assert_equal + +import warnings +from scipy.signal import cont2discrete as c2d +from scipy.signal import dlsim, ss2tf, ss2zpk, lsim2, lti + +# Author: Jeffrey Armstrong <jeff@approximatrix.com> +# March 29, 2011 + + +class TestC2D(object): + def test_zoh(self): + ac = np.eye(2) + bc = 0.5 * np.ones((2, 1)) + cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]]) + dc = np.array([[0.0], [0.0], [-0.33]]) + + ad_truth = 1.648721270700128 * np.eye(2) + bd_truth = 0.324360635350064 * np.ones((2, 1)) + # c and d in discrete should be equal to their continuous counterparts + dt_requested = 0.5 + + ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, method='zoh') + + assert_array_almost_equal(ad_truth, ad) + assert_array_almost_equal(bd_truth, bd) + assert_array_almost_equal(cc, cd) + assert_array_almost_equal(dc, dd) + assert_almost_equal(dt_requested, dt) + + def test_gbt(self): + ac = np.eye(2) + bc = 0.5 * np.ones((2, 1)) + cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]]) + dc = np.array([[0.0], [0.0], [-0.33]]) + + dt_requested = 0.5 + alpha = 1.0 / 3.0 + + ad_truth = 1.6 * np.eye(2) + bd_truth = 0.3 * np.ones((2, 1)) + cd_truth = np.array([[0.9, 1.2], + [1.2, 1.2], + [1.2, 0.3]]) + dd_truth = np.array([[0.175], + [0.2], + [-0.205]]) + + ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, + method='gbt', alpha=alpha) + + assert_array_almost_equal(ad_truth, ad) + assert_array_almost_equal(bd_truth, bd) + assert_array_almost_equal(cd_truth, cd) + assert_array_almost_equal(dd_truth, dd) + + def test_euler(self): + ac = np.eye(2) + bc = 0.5 * np.ones((2, 1)) + cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]]) + dc = np.array([[0.0], [0.0], [-0.33]]) + + dt_requested = 0.5 + + ad_truth = 1.5 * np.eye(2) + bd_truth = 0.25 * np.ones((2, 1)) + cd_truth = np.array([[0.75, 1.0], + [1.0, 1.0], + [1.0, 0.25]]) + dd_truth = dc + + ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, + method='euler') + + assert_array_almost_equal(ad_truth, ad) + assert_array_almost_equal(bd_truth, bd) + assert_array_almost_equal(cd_truth, cd) + assert_array_almost_equal(dd_truth, dd) + assert_almost_equal(dt_requested, dt) + + def test_backward_diff(self): + ac = np.eye(2) + bc = 0.5 * np.ones((2, 1)) + cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]]) + dc = np.array([[0.0], [0.0], [-0.33]]) + + dt_requested = 0.5 + + ad_truth = 2.0 * np.eye(2) + bd_truth = 0.5 * np.ones((2, 1)) + cd_truth = np.array([[1.5, 2.0], + [2.0, 2.0], + [2.0, 0.5]]) + dd_truth = np.array([[0.875], + [1.0], + [0.295]]) + + ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, + method='backward_diff') + + assert_array_almost_equal(ad_truth, ad) + assert_array_almost_equal(bd_truth, bd) + assert_array_almost_equal(cd_truth, cd) + assert_array_almost_equal(dd_truth, dd) + + def test_bilinear(self): + ac = np.eye(2) + bc = 0.5 * np.ones((2, 1)) + cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]]) + dc = np.array([[0.0], [0.0], [-0.33]]) + + dt_requested = 0.5 + + ad_truth = (5.0 / 3.0) * np.eye(2) + bd_truth = (1.0 / 3.0) * np.ones((2, 1)) + cd_truth = np.array([[1.0, 4.0 / 3.0], + [4.0 / 3.0, 4.0 / 3.0], + [4.0 / 3.0, 1.0 / 3.0]]) + dd_truth = np.array([[0.291666666666667], + [1.0 / 3.0], + [-0.121666666666667]]) + + ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, + method='bilinear') + + assert_array_almost_equal(ad_truth, ad) + assert_array_almost_equal(bd_truth, bd) + assert_array_almost_equal(cd_truth, cd) + assert_array_almost_equal(dd_truth, dd) + assert_almost_equal(dt_requested, dt) + + # Same continuous system again, but change sampling rate + + ad_truth = 1.4 * np.eye(2) + bd_truth = 0.2 * np.ones((2, 1)) + cd_truth = np.array([[0.9, 1.2], [1.2, 1.2], [1.2, 0.3]]) + dd_truth = np.array([[0.175], [0.2], [-0.205]]) + + dt_requested = 1.0 / 3.0 + + ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, + method='bilinear') + + assert_array_almost_equal(ad_truth, ad) + assert_array_almost_equal(bd_truth, bd) + assert_array_almost_equal(cd_truth, cd) + assert_array_almost_equal(dd_truth, dd) + assert_almost_equal(dt_requested, dt) + + def test_transferfunction(self): + numc = np.array([0.25, 0.25, 0.5]) + denc = np.array([0.75, 0.75, 1.0]) + + numd = np.array([[1.0 / 3.0, -0.427419169438754, 0.221654141101125]]) + dend = np.array([1.0, -1.351394049721225, 0.606530659712634]) + + dt_requested = 0.5 + + num, den, dt = c2d((numc, denc), dt_requested, method='zoh') + + assert_array_almost_equal(numd, num) + assert_array_almost_equal(dend, den) + assert_almost_equal(dt_requested, dt) + + def test_zerospolesgain(self): + zeros_c = np.array([0.5, -0.5]) + poles_c = np.array([1.j / np.sqrt(2), -1.j / np.sqrt(2)]) + k_c = 1.0 + + zeros_d = [1.23371727305860, 0.735356894461267] + polls_d = [0.938148335039729 + 0.346233593780536j, + 0.938148335039729 - 0.346233593780536j] + k_d = 1.0 + + dt_requested = 0.5 + + zeros, poles, k, dt = c2d((zeros_c, poles_c, k_c), dt_requested, + method='zoh') + + assert_array_almost_equal(zeros_d, zeros) + assert_array_almost_equal(polls_d, poles) + assert_almost_equal(k_d, k) + assert_almost_equal(dt_requested, dt) + + def test_gbt_with_sio_tf_and_zpk(self): + """Test method='gbt' with alpha=0.25 for tf and zpk cases.""" + # State space coefficients for the continuous SIO system. + A = -1.0 + B = 1.0 + C = 1.0 + D = 0.5 + + # The continuous transfer function coefficients. + cnum, cden = ss2tf(A, B, C, D) + + # Continuous zpk representation + cz, cp, ck = ss2zpk(A, B, C, D) + + h = 1.0 + alpha = 0.25 + + # Explicit formulas, in the scalar case. + Ad = (1 + (1 - alpha) * h * A) / (1 - alpha * h * A) + Bd = h * B / (1 - alpha * h * A) + Cd = C / (1 - alpha * h * A) + Dd = D + alpha * C * Bd + + # Convert the explicit solution to tf + dnum, dden = ss2tf(Ad, Bd, Cd, Dd) + + # Compute the discrete tf using cont2discrete. + c2dnum, c2dden, dt = c2d((cnum, cden), h, method='gbt', alpha=alpha) + + assert_allclose(dnum, c2dnum) + assert_allclose(dden, c2dden) + + # Convert explicit solution to zpk. + dz, dp, dk = ss2zpk(Ad, Bd, Cd, Dd) + + # Compute the discrete zpk using cont2discrete. + c2dz, c2dp, c2dk, dt = c2d((cz, cp, ck), h, method='gbt', alpha=alpha) + + assert_allclose(dz, c2dz) + assert_allclose(dp, c2dp) + assert_allclose(dk, c2dk) + + def test_discrete_approx(self): + """ + Test that the solution to the discrete approximation of a continuous + system actually approximates the solution to the continuous system. + This is an indirect test of the correctness of the implementation + of cont2discrete. + """ + + def u(t): + return np.sin(2.5 * t) + + a = np.array([[-0.01]]) + b = np.array([[1.0]]) + c = np.array([[1.0]]) + d = np.array([[0.2]]) + x0 = 1.0 + + t = np.linspace(0, 10.0, 101) + dt = t[1] - t[0] + u1 = u(t) + + # Use lsim2 to compute the solution to the continuous system. + t, yout, xout = lsim2((a, b, c, d), T=t, U=u1, X0=x0, + rtol=1e-9, atol=1e-11) + + # Convert the continuous system to a discrete approximation. + dsys = c2d((a, b, c, d), dt, method='bilinear') + + # Use dlsim with the pairwise averaged input to compute the output + # of the discrete system. + u2 = 0.5 * (u1[:-1] + u1[1:]) + t2 = t[:-1] + td2, yd2, xd2 = dlsim(dsys, u=u2.reshape(-1, 1), t=t2, x0=x0) + + # ymid is the average of consecutive terms of the "exact" output + # computed by lsim2. This is what the discrete approximation + # actually approximates. + ymid = 0.5 * (yout[:-1] + yout[1:]) + + assert_allclose(yd2.ravel(), ymid, rtol=1e-4) + + def test_simo_tf(self): + # See gh-5753 + tf = ([[1, 0], [1, 1]], [1, 1]) + num, den, dt = c2d(tf, 0.01) + + assert_equal(dt, 0.01) # sanity check + assert_allclose(den, [1, -0.990404983], rtol=1e-3) + assert_allclose(num, [[1, -1], [1, -0.99004983]], rtol=1e-3) + + def test_multioutput(self): + ts = 0.01 # time step + + tf = ([[1, -3], [1, 5]], [1, 1]) + num, den, dt = c2d(tf, ts) + + tf1 = (tf[0][0], tf[1]) + num1, den1, dt1 = c2d(tf1, ts) + + tf2 = (tf[0][1], tf[1]) + num2, den2, dt2 = c2d(tf2, ts) + + # Sanity checks + assert_equal(dt, dt1) + assert_equal(dt, dt2) + + # Check that we get the same results + assert_allclose(num, np.vstack((num1, num2)), rtol=1e-13) + + # Single input, so the denominator should + # not be multidimensional like the numerator + assert_allclose(den, den1, rtol=1e-13) + assert_allclose(den, den2, rtol=1e-13) + +class TestC2dLti(object): + def test_c2d_ss(self): + # StateSpace + A = np.array([[-0.3, 0.1], [0.2, -0.7]]) + B = np.array([[0], [1]]) + C = np.array([[1, 0]]) + D = 0 + + A_res = np.array([[0.985136404135682, 0.004876671474795], + [0.009753342949590, 0.965629718236502]]) + B_res = np.array([[0.000122937599964], [0.049135527547844]]) + + sys_ssc = lti(A, B, C, D) + sys_ssd = sys_ssc.to_discrete(0.05) + + assert_allclose(sys_ssd.A, A_res) + assert_allclose(sys_ssd.B, B_res) + assert_allclose(sys_ssd.C, C) + assert_allclose(sys_ssd.D, D) + + def test_c2d_tf(self): + + sys = lti([0.5, 0.3], [1.0, 0.4]) + sys = sys.to_discrete(0.005) + + # Matlab results + num_res = np.array([0.5, -0.485149004980066]) + den_res = np.array([1.0, -0.980198673306755]) + + # Somehow a lot of numerical errors + assert_allclose(sys.den, den_res, atol=0.02) + assert_allclose(sys.num, num_res, atol=0.02) + +class TestC2dLti(object): + def test_c2d_ss(self): + # StateSpace + A = np.array([[-0.3, 0.1], [0.2, -0.7]]) + B = np.array([[0], [1]]) + C = np.array([[1, 0]]) + D = 0 + + A_res = np.array([[0.985136404135682, 0.004876671474795], + [0.009753342949590, 0.965629718236502]]) + B_res = np.array([[0.000122937599964], [0.049135527547844]]) + + sys_ssc = lti(A, B, C, D) + sys_ssd = sys_ssc.to_discrete(0.05) + + assert_allclose(sys_ssd.A, A_res) + assert_allclose(sys_ssd.B, B_res) + assert_allclose(sys_ssd.C, C) + assert_allclose(sys_ssd.D, D) + + def test_c2d_tf(self): + + sys = lti([0.5, 0.3], [1.0, 0.4]) + sys = sys.to_discrete(0.005) + + # Matlab results + num_res = np.array([0.5, -0.485149004980066]) + den_res = np.array([1.0, -0.980198673306755]) + + # Somehow a lot of numerical errors + assert_allclose(sys.den, den_res, atol=0.02) + assert_allclose(sys.num, num_res, atol=0.02) diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_cont2discrete.pyc b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_cont2discrete.pyc new file mode 100644 index 0000000..3170e27 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_cont2discrete.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_dltisys.py b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_dltisys.py new file mode 100644 index 0000000..72b28e6 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_dltisys.py @@ -0,0 +1,600 @@ +# Author: Jeffrey Armstrong <jeff@approximatrix.com> +# April 4, 2011 + +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import (assert_equal, + assert_array_almost_equal, assert_array_equal, + assert_allclose, assert_, assert_almost_equal) +from pytest import raises as assert_raises +from scipy._lib._numpy_compat import suppress_warnings +from scipy.signal import (dlsim, dstep, dimpulse, tf2zpk, lti, dlti, + StateSpace, TransferFunction, ZerosPolesGain, + dfreqresp, dbode, BadCoefficients) + + +class TestDLTI(object): + + def test_dlsim(self): + + a = np.asarray([[0.9, 0.1], [-0.2, 0.9]]) + b = np.asarray([[0.4, 0.1, -0.1], [0.0, 0.05, 0.0]]) + c = np.asarray([[0.1, 0.3]]) + d = np.asarray([[0.0, -0.1, 0.0]]) + dt = 0.5 + + # Create an input matrix with inputs down the columns (3 cols) and its + # respective time input vector + u = np.hstack((np.asmatrix(np.linspace(0, 4.0, num=5)).transpose(), + 0.01 * np.ones((5, 1)), + -0.002 * np.ones((5, 1)))) + t_in = np.linspace(0, 2.0, num=5) + + # Define the known result + yout_truth = np.asmatrix([-0.001, + -0.00073, + 0.039446, + 0.0915387, + 0.13195948]).transpose() + xout_truth = np.asarray([[0, 0], + [0.0012, 0.0005], + [0.40233, 0.00071], + [1.163368, -0.079327], + [2.2402985, -0.3035679]]) + + tout, yout, xout = dlsim((a, b, c, d, dt), u, t_in) + + assert_array_almost_equal(yout_truth, yout) + assert_array_almost_equal(xout_truth, xout) + assert_array_almost_equal(t_in, tout) + + # Make sure input with single-dimension doesn't raise error + dlsim((1, 2, 3), 4) + + # Interpolated control - inputs should have different time steps + # than the discrete model uses internally + u_sparse = u[[0, 4], :] + t_sparse = np.asarray([0.0, 2.0]) + + tout, yout, xout = dlsim((a, b, c, d, dt), u_sparse, t_sparse) + + assert_array_almost_equal(yout_truth, yout) + assert_array_almost_equal(xout_truth, xout) + assert_equal(len(tout), yout.shape[0]) + + # Transfer functions (assume dt = 0.5) + num = np.asarray([1.0, -0.1]) + den = np.asarray([0.3, 1.0, 0.2]) + yout_truth = np.asmatrix([0.0, + 0.0, + 3.33333333333333, + -4.77777777777778, + 23.0370370370370]).transpose() + + # Assume use of the first column of the control input built earlier + tout, yout = dlsim((num, den, 0.5), u[:, 0], t_in) + + assert_array_almost_equal(yout, yout_truth) + assert_array_almost_equal(t_in, tout) + + # Retest the same with a 1-D input vector + uflat = np.asarray(u[:, 0]) + uflat = uflat.reshape((5,)) + tout, yout = dlsim((num, den, 0.5), uflat, t_in) + + assert_array_almost_equal(yout, yout_truth) + assert_array_almost_equal(t_in, tout) + + # zeros-poles-gain representation + zd = np.array([0.5, -0.5]) + pd = np.array([1.j / np.sqrt(2), -1.j / np.sqrt(2)]) + k = 1.0 + yout_truth = np.asmatrix([0.0, 1.0, 2.0, 2.25, 2.5]).transpose() + + tout, yout = dlsim((zd, pd, k, 0.5), u[:, 0], t_in) + + assert_array_almost_equal(yout, yout_truth) + assert_array_almost_equal(t_in, tout) + + # Raise an error for continuous-time systems + system = lti([1], [1, 1]) + assert_raises(AttributeError, dlsim, system, u) + + def test_dstep(self): + + a = np.asarray([[0.9, 0.1], [-0.2, 0.9]]) + b = np.asarray([[0.4, 0.1, -0.1], [0.0, 0.05, 0.0]]) + c = np.asarray([[0.1, 0.3]]) + d = np.asarray([[0.0, -0.1, 0.0]]) + dt = 0.5 + + # Because b.shape[1] == 3, dstep should result in a tuple of three + # result vectors + yout_step_truth = (np.asarray([0.0, 0.04, 0.052, 0.0404, 0.00956, + -0.036324, -0.093318, -0.15782348, + -0.226628324, -0.2969374948]), + np.asarray([-0.1, -0.075, -0.058, -0.04815, + -0.04453, -0.0461895, -0.0521812, + -0.061588875, -0.073549579, + -0.08727047595]), + np.asarray([0.0, -0.01, -0.013, -0.0101, -0.00239, + 0.009081, 0.0233295, 0.03945587, + 0.056657081, 0.0742343737])) + + tout, yout = dstep((a, b, c, d, dt), n=10) + + assert_equal(len(yout), 3) + + for i in range(0, len(yout)): + assert_equal(yout[i].shape[0], 10) + assert_array_almost_equal(yout[i].flatten(), yout_step_truth[i]) + + # Check that the other two inputs (tf, zpk) will work as well + tfin = ([1.0], [1.0, 1.0], 0.5) + yout_tfstep = np.asarray([0.0, 1.0, 0.0]) + tout, yout = dstep(tfin, n=3) + assert_equal(len(yout), 1) + assert_array_almost_equal(yout[0].flatten(), yout_tfstep) + + zpkin = tf2zpk(tfin[0], tfin[1]) + (0.5,) + tout, yout = dstep(zpkin, n=3) + assert_equal(len(yout), 1) + assert_array_almost_equal(yout[0].flatten(), yout_tfstep) + + # Raise an error for continuous-time systems + system = lti([1], [1, 1]) + assert_raises(AttributeError, dstep, system) + + def test_dimpulse(self): + + a = np.asarray([[0.9, 0.1], [-0.2, 0.9]]) + b = np.asarray([[0.4, 0.1, -0.1], [0.0, 0.05, 0.0]]) + c = np.asarray([[0.1, 0.3]]) + d = np.asarray([[0.0, -0.1, 0.0]]) + dt = 0.5 + + # Because b.shape[1] == 3, dimpulse should result in a tuple of three + # result vectors + yout_imp_truth = (np.asarray([0.0, 0.04, 0.012, -0.0116, -0.03084, + -0.045884, -0.056994, -0.06450548, + -0.068804844, -0.0703091708]), + np.asarray([-0.1, 0.025, 0.017, 0.00985, 0.00362, + -0.0016595, -0.0059917, -0.009407675, + -0.011960704, -0.01372089695]), + np.asarray([0.0, -0.01, -0.003, 0.0029, 0.00771, + 0.011471, 0.0142485, 0.01612637, + 0.017201211, 0.0175772927])) + + tout, yout = dimpulse((a, b, c, d, dt), n=10) + + assert_equal(len(yout), 3) + + for i in range(0, len(yout)): + assert_equal(yout[i].shape[0], 10) + assert_array_almost_equal(yout[i].flatten(), yout_imp_truth[i]) + + # Check that the other two inputs (tf, zpk) will work as well + tfin = ([1.0], [1.0, 1.0], 0.5) + yout_tfimpulse = np.asarray([0.0, 1.0, -1.0]) + tout, yout = dimpulse(tfin, n=3) + assert_equal(len(yout), 1) + assert_array_almost_equal(yout[0].flatten(), yout_tfimpulse) + + zpkin = tf2zpk(tfin[0], tfin[1]) + (0.5,) + tout, yout = dimpulse(zpkin, n=3) + assert_equal(len(yout), 1) + assert_array_almost_equal(yout[0].flatten(), yout_tfimpulse) + + # Raise an error for continuous-time systems + system = lti([1], [1, 1]) + assert_raises(AttributeError, dimpulse, system) + + def test_dlsim_trivial(self): + a = np.array([[0.0]]) + b = np.array([[0.0]]) + c = np.array([[0.0]]) + d = np.array([[0.0]]) + n = 5 + u = np.zeros(n).reshape(-1, 1) + tout, yout, xout = dlsim((a, b, c, d, 1), u) + assert_array_equal(tout, np.arange(float(n))) + assert_array_equal(yout, np.zeros((n, 1))) + assert_array_equal(xout, np.zeros((n, 1))) + + def test_dlsim_simple1d(self): + a = np.array([[0.5]]) + b = np.array([[0.0]]) + c = np.array([[1.0]]) + d = np.array([[0.0]]) + n = 5 + u = np.zeros(n).reshape(-1, 1) + tout, yout, xout = dlsim((a, b, c, d, 1), u, x0=1) + assert_array_equal(tout, np.arange(float(n))) + expected = (0.5 ** np.arange(float(n))).reshape(-1, 1) + assert_array_equal(yout, expected) + assert_array_equal(xout, expected) + + def test_dlsim_simple2d(self): + lambda1 = 0.5 + lambda2 = 0.25 + a = np.array([[lambda1, 0.0], + [0.0, lambda2]]) + b = np.array([[0.0], + [0.0]]) + c = np.array([[1.0, 0.0], + [0.0, 1.0]]) + d = np.array([[0.0], + [0.0]]) + n = 5 + u = np.zeros(n).reshape(-1, 1) + tout, yout, xout = dlsim((a, b, c, d, 1), u, x0=1) + assert_array_equal(tout, np.arange(float(n))) + # The analytical solution: + expected = (np.array([lambda1, lambda2]) ** + np.arange(float(n)).reshape(-1, 1)) + assert_array_equal(yout, expected) + assert_array_equal(xout, expected) + + def test_more_step_and_impulse(self): + lambda1 = 0.5 + lambda2 = 0.75 + a = np.array([[lambda1, 0.0], + [0.0, lambda2]]) + b = np.array([[1.0, 0.0], + [0.0, 1.0]]) + c = np.array([[1.0, 1.0]]) + d = np.array([[0.0, 0.0]]) + + n = 10 + + # Check a step response. + ts, ys = dstep((a, b, c, d, 1), n=n) + + # Create the exact step response. + stp0 = (1.0 / (1 - lambda1)) * (1.0 - lambda1 ** np.arange(n)) + stp1 = (1.0 / (1 - lambda2)) * (1.0 - lambda2 ** np.arange(n)) + + assert_allclose(ys[0][:, 0], stp0) + assert_allclose(ys[1][:, 0], stp1) + + # Check an impulse response with an initial condition. + x0 = np.array([1.0, 1.0]) + ti, yi = dimpulse((a, b, c, d, 1), n=n, x0=x0) + + # Create the exact impulse response. + imp = (np.array([lambda1, lambda2]) ** + np.arange(-1, n + 1).reshape(-1, 1)) + imp[0, :] = 0.0 + # Analytical solution to impulse response + y0 = imp[:n, 0] + np.dot(imp[1:n + 1, :], x0) + y1 = imp[:n, 1] + np.dot(imp[1:n + 1, :], x0) + + assert_allclose(yi[0][:, 0], y0) + assert_allclose(yi[1][:, 0], y1) + + # Check that dt=0.1, n=3 gives 3 time values. + system = ([1.0], [1.0, -0.5], 0.1) + t, (y,) = dstep(system, n=3) + assert_allclose(t, [0, 0.1, 0.2]) + assert_array_equal(y.T, [[0, 1.0, 1.5]]) + t, (y,) = dimpulse(system, n=3) + assert_allclose(t, [0, 0.1, 0.2]) + assert_array_equal(y.T, [[0, 1, 0.5]]) + + +class TestDlti(object): + def test_dlti_instantiation(self): + # Test that lti can be instantiated. + + dt = 0.05 + # TransferFunction + s = dlti([1], [-1], dt=dt) + assert_(isinstance(s, TransferFunction)) + assert_(isinstance(s, dlti)) + assert_(not isinstance(s, lti)) + assert_equal(s.dt, dt) + + # ZerosPolesGain + s = dlti(np.array([]), np.array([-1]), 1, dt=dt) + assert_(isinstance(s, ZerosPolesGain)) + assert_(isinstance(s, dlti)) + assert_(not isinstance(s, lti)) + assert_equal(s.dt, dt) + + # StateSpace + s = dlti([1], [-1], 1, 3, dt=dt) + assert_(isinstance(s, StateSpace)) + assert_(isinstance(s, dlti)) + assert_(not isinstance(s, lti)) + assert_equal(s.dt, dt) + + # Number of inputs + assert_raises(ValueError, dlti, 1) + assert_raises(ValueError, dlti, 1, 1, 1, 1, 1) + + +class TestStateSpaceDisc(object): + def test_initialization(self): + # Check that all initializations work + dt = 0.05 + s = StateSpace(1, 1, 1, 1, dt=dt) + s = StateSpace([1], [2], [3], [4], dt=dt) + s = StateSpace(np.array([[1, 2], [3, 4]]), np.array([[1], [2]]), + np.array([[1, 0]]), np.array([[0]]), dt=dt) + s = StateSpace(1, 1, 1, 1, dt=True) + + def test_conversion(self): + # Check the conversion functions + s = StateSpace(1, 2, 3, 4, dt=0.05) + assert_(isinstance(s.to_ss(), StateSpace)) + assert_(isinstance(s.to_tf(), TransferFunction)) + assert_(isinstance(s.to_zpk(), ZerosPolesGain)) + + # Make sure copies work + assert_(StateSpace(s) is not s) + assert_(s.to_ss() is not s) + + def test_properties(self): + # Test setters/getters for cross class properties. + # This implicitly tests to_tf() and to_zpk() + + # Getters + s = StateSpace(1, 1, 1, 1, dt=0.05) + assert_equal(s.poles, [1]) + assert_equal(s.zeros, [0]) + + +class TestTransferFunction(object): + def test_initialization(self): + # Check that all initializations work + dt = 0.05 + s = TransferFunction(1, 1, dt=dt) + s = TransferFunction([1], [2], dt=dt) + s = TransferFunction(np.array([1]), np.array([2]), dt=dt) + s = TransferFunction(1, 1, dt=True) + + def test_conversion(self): + # Check the conversion functions + s = TransferFunction([1, 0], [1, -1], dt=0.05) + assert_(isinstance(s.to_ss(), StateSpace)) + assert_(isinstance(s.to_tf(), TransferFunction)) + assert_(isinstance(s.to_zpk(), ZerosPolesGain)) + + # Make sure copies work + assert_(TransferFunction(s) is not s) + assert_(s.to_tf() is not s) + + def test_properties(self): + # Test setters/getters for cross class properties. + # This implicitly tests to_ss() and to_zpk() + + # Getters + s = TransferFunction([1, 0], [1, -1], dt=0.05) + assert_equal(s.poles, [1]) + assert_equal(s.zeros, [0]) + + +class TestZerosPolesGain(object): + def test_initialization(self): + # Check that all initializations work + dt = 0.05 + s = ZerosPolesGain(1, 1, 1, dt=dt) + s = ZerosPolesGain([1], [2], 1, dt=dt) + s = ZerosPolesGain(np.array([1]), np.array([2]), 1, dt=dt) + s = ZerosPolesGain(1, 1, 1, dt=True) + + def test_conversion(self): + # Check the conversion functions + s = ZerosPolesGain(1, 2, 3, dt=0.05) + assert_(isinstance(s.to_ss(), StateSpace)) + assert_(isinstance(s.to_tf(), TransferFunction)) + assert_(isinstance(s.to_zpk(), ZerosPolesGain)) + + # Make sure copies work + assert_(ZerosPolesGain(s) is not s) + assert_(s.to_zpk() is not s) + + +class Test_dfreqresp(object): + + def test_manual(self): + # Test dfreqresp() real part calculation (manual sanity check). + # 1st order low-pass filter: H(z) = 1 / (z - 0.2), + system = TransferFunction(1, [1, -0.2], dt=0.1) + w = [0.1, 1, 10] + w, H = dfreqresp(system, w=w) + + # test real + expected_re = [1.2383, 0.4130, -0.7553] + assert_almost_equal(H.real, expected_re, decimal=4) + + # test imag + expected_im = [-0.1555, -1.0214, 0.3955] + assert_almost_equal(H.imag, expected_im, decimal=4) + + def test_auto(self): + # Test dfreqresp() real part calculation. + # 1st order low-pass filter: H(z) = 1 / (z - 0.2), + system = TransferFunction(1, [1, -0.2], dt=0.1) + w = [0.1, 1, 10, 100] + w, H = dfreqresp(system, w=w) + jw = np.exp(w * 1j) + y = np.polyval(system.num, jw) / np.polyval(system.den, jw) + + # test real + expected_re = y.real + assert_almost_equal(H.real, expected_re) + + # test imag + expected_im = y.imag + assert_almost_equal(H.imag, expected_im) + + def test_freq_range(self): + # Test that freqresp() finds a reasonable frequency range. + # 1st order low-pass filter: H(z) = 1 / (z - 0.2), + # Expected range is from 0.01 to 10. + system = TransferFunction(1, [1, -0.2], dt=0.1) + n = 10 + expected_w = np.linspace(0, np.pi, 10, endpoint=False) + w, H = dfreqresp(system, n=n) + assert_almost_equal(w, expected_w) + + def test_pole_one(self): + # Test that freqresp() doesn't fail on a system with a pole at 0. + # integrator, pole at zero: H(s) = 1 / s + system = TransferFunction([1], [1, -1], dt=0.1) + + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, message="divide by zero") + sup.filter(RuntimeWarning, message="invalid value encountered") + w, H = dfreqresp(system, n=2) + assert_equal(w[0], 0.) # a fail would give not-a-number + + def test_error(self): + # Raise an error for continuous-time systems + system = lti([1], [1, 1]) + assert_raises(AttributeError, dfreqresp, system) + + def test_from_state_space(self): + # H(z) = 2 / z^3 - 0.5 * z^2 + + system_TF = dlti([2], [1, -0.5, 0, 0]) + + A = np.array([[0.5, 0, 0], + [1, 0, 0], + [0, 1, 0]]) + B = np.array([[1, 0, 0]]).T + C = np.array([[0, 0, 2]]) + D = 0 + + system_SS = dlti(A, B, C, D) + w = 10.0**np.arange(-3,0,.5) + with suppress_warnings() as sup: + sup.filter(BadCoefficients) + w1, H1 = dfreqresp(system_TF, w=w) + w2, H2 = dfreqresp(system_SS, w=w) + + assert_almost_equal(H1, H2) + + def test_from_zpk(self): + # 1st order low-pass filter: H(s) = 0.3 / (z - 0.2), + system_ZPK = dlti([],[0.2],0.3) + system_TF = dlti(0.3, [1, -0.2]) + w = [0.1, 1, 10, 100] + w1, H1 = dfreqresp(system_ZPK, w=w) + w2, H2 = dfreqresp(system_TF, w=w) + assert_almost_equal(H1, H2) + + +class Test_bode(object): + + def test_manual(self): + # Test bode() magnitude calculation (manual sanity check). + # 1st order low-pass filter: H(s) = 0.3 / (z - 0.2), + dt = 0.1 + system = TransferFunction(0.3, [1, -0.2], dt=dt) + w = [0.1, 0.5, 1, np.pi] + w2, mag, phase = dbode(system, w=w) + + # Test mag + expected_mag = [-8.5329, -8.8396, -9.6162, -12.0412] + assert_almost_equal(mag, expected_mag, decimal=4) + + # Test phase + expected_phase = [-7.1575, -35.2814, -67.9809, -180.0000] + assert_almost_equal(phase, expected_phase, decimal=4) + + # Test frequency + assert_equal(np.array(w) / dt, w2) + + def test_auto(self): + # Test bode() magnitude calculation. + # 1st order low-pass filter: H(s) = 0.3 / (z - 0.2), + system = TransferFunction(0.3, [1, -0.2], dt=0.1) + w = np.array([0.1, 0.5, 1, np.pi]) + w2, mag, phase = dbode(system, w=w) + jw = np.exp(w * 1j) + y = np.polyval(system.num, jw) / np.polyval(system.den, jw) + + # Test mag + expected_mag = 20.0 * np.log10(abs(y)) + assert_almost_equal(mag, expected_mag) + + # Test phase + expected_phase = np.rad2deg(np.angle(y)) + assert_almost_equal(phase, expected_phase) + + def test_range(self): + # Test that bode() finds a reasonable frequency range. + # 1st order low-pass filter: H(s) = 0.3 / (z - 0.2), + dt = 0.1 + system = TransferFunction(0.3, [1, -0.2], dt=0.1) + n = 10 + # Expected range is from 0.01 to 10. + expected_w = np.linspace(0, np.pi, n, endpoint=False) / dt + w, mag, phase = dbode(system, n=n) + assert_almost_equal(w, expected_w) + + def test_pole_one(self): + # Test that freqresp() doesn't fail on a system with a pole at 0. + # integrator, pole at zero: H(s) = 1 / s + system = TransferFunction([1], [1, -1], dt=0.1) + + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, message="divide by zero") + sup.filter(RuntimeWarning, message="invalid value encountered") + w, mag, phase = dbode(system, n=2) + assert_equal(w[0], 0.) # a fail would give not-a-number + + def test_imaginary(self): + # bode() should not fail on a system with pure imaginary poles. + # The test passes if bode doesn't raise an exception. + system = TransferFunction([1], [1, 0, 100], dt=0.1) + dbode(system, n=2) + + def test_error(self): + # Raise an error for continuous-time systems + system = lti([1], [1, 1]) + assert_raises(AttributeError, dbode, system) + + +class TestTransferFunctionZConversion(object): + """Test private conversions between 'z' and 'z**-1' polynomials.""" + + def test_full(self): + # Numerator and denominator same order + num = [2, 3, 4] + den = [5, 6, 7] + num2, den2 = TransferFunction._z_to_zinv(num, den) + assert_equal(num, num2) + assert_equal(den, den2) + + num2, den2 = TransferFunction._zinv_to_z(num, den) + assert_equal(num, num2) + assert_equal(den, den2) + + def test_numerator(self): + # Numerator lower order than denominator + num = [2, 3] + den = [5, 6, 7] + num2, den2 = TransferFunction._z_to_zinv(num, den) + assert_equal([0, 2, 3], num2) + assert_equal(den, den2) + + num2, den2 = TransferFunction._zinv_to_z(num, den) + assert_equal([2, 3, 0], num2) + assert_equal(den, den2) + + def test_denominator(self): + # Numerator higher order than denominator + num = [2, 3, 4] + den = [5, 6] + num2, den2 = TransferFunction._z_to_zinv(num, den) + assert_equal(num, num2) + assert_equal([0, 5, 6], den2) + + num2, den2 = TransferFunction._zinv_to_z(num, den) + assert_equal(num, num2) + assert_equal([5, 6, 0], den2) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_dltisys.pyc b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_dltisys.pyc new file mode 100644 index 0000000..80d72ef Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_dltisys.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_filter_design.py b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_filter_design.py new file mode 100644 index 0000000..d080b55 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_filter_design.py @@ -0,0 +1,3638 @@ +from __future__ import division, print_function, absolute_import + +import warnings + +from distutils.version import LooseVersion +import numpy as np +from numpy.testing import (assert_array_almost_equal, + assert_array_equal, assert_array_less, + assert_equal, assert_, + assert_allclose, assert_warns) +import pytest +from pytest import raises as assert_raises +from scipy._lib._numpy_compat import suppress_warnings + +from numpy import array, spacing, sin, pi, sort, sqrt +from scipy.signal import (BadCoefficients, bessel, besselap, bilinear, buttap, + butter, buttord, cheb1ap, cheb1ord, cheb2ap, + cheb2ord, cheby1, cheby2, ellip, ellipap, ellipord, + firwin, freqs_zpk, freqs, freqz, freqz_zpk, + group_delay, iirfilter, iirnotch, iirpeak, lp2bp, + lp2bs, lp2hp, lp2lp, normalize, sos2tf, sos2zpk, + sosfreqz, tf2sos, tf2zpk, zpk2sos, zpk2tf, + bilinear_zpk, lp2lp_zpk, lp2hp_zpk, lp2bp_zpk, + lp2bs_zpk) +from scipy.signal.filter_design import (_cplxreal, _cplxpair, _norm_factor, + _bessel_poly, _bessel_zeros) + +try: + import mpmath +except ImportError: + mpmath = None + + +def mpmath_check(min_ver): + return pytest.mark.skipif(mpmath is None or + LooseVersion(mpmath.__version__) < LooseVersion(min_ver), + reason="mpmath version >= %s required" % min_ver) + + +class TestCplxPair(object): + + def test_trivial_input(self): + assert_equal(_cplxpair([]).size, 0) + assert_equal(_cplxpair(1), 1) + + def test_output_order(self): + assert_allclose(_cplxpair([1+1j, 1-1j]), [1-1j, 1+1j]) + + a = [1+1j, 1+1j, 1, 1-1j, 1-1j, 2] + b = [1-1j, 1+1j, 1-1j, 1+1j, 1, 2] + assert_allclose(_cplxpair(a), b) + + # points spaced around the unit circle + z = np.exp(2j*pi*array([4, 3, 5, 2, 6, 1, 0])/7) + z1 = np.copy(z) + np.random.shuffle(z) + assert_allclose(_cplxpair(z), z1) + np.random.shuffle(z) + assert_allclose(_cplxpair(z), z1) + np.random.shuffle(z) + assert_allclose(_cplxpair(z), z1) + + # Should be able to pair up all the conjugates + x = np.random.rand(10000) + 1j * np.random.rand(10000) + y = x.conj() + z = np.random.rand(10000) + x = np.concatenate((x, y, z)) + np.random.shuffle(x) + c = _cplxpair(x) + + # Every other element of head should be conjugates: + assert_allclose(c[0:20000:2], np.conj(c[1:20000:2])) + # Real parts of head should be in sorted order: + assert_allclose(c[0:20000:2].real, np.sort(c[0:20000:2].real)) + # Tail should be sorted real numbers: + assert_allclose(c[20000:], np.sort(c[20000:])) + + def test_real_integer_input(self): + assert_array_equal(_cplxpair([2, 0, 1]), [0, 1, 2]) + + def test_tolerances(self): + eps = spacing(1) + assert_allclose(_cplxpair([1j, -1j, 1+1j*eps], tol=2*eps), + [-1j, 1j, 1+1j*eps]) + + # sorting close to 0 + assert_allclose(_cplxpair([-eps+1j, +eps-1j]), [-1j, +1j]) + assert_allclose(_cplxpair([+eps+1j, -eps-1j]), [-1j, +1j]) + assert_allclose(_cplxpair([+1j, -1j]), [-1j, +1j]) + + def test_unmatched_conjugates(self): + # 1+2j is unmatched + assert_raises(ValueError, _cplxpair, [1+3j, 1-3j, 1+2j]) + + # 1+2j and 1-3j are unmatched + assert_raises(ValueError, _cplxpair, [1+3j, 1-3j, 1+2j, 1-3j]) + + # 1+3j is unmatched + assert_raises(ValueError, _cplxpair, [1+3j, 1-3j, 1+3j]) + + # Not conjugates + assert_raises(ValueError, _cplxpair, [4+5j, 4+5j]) + assert_raises(ValueError, _cplxpair, [1-7j, 1-7j]) + + # No pairs + assert_raises(ValueError, _cplxpair, [1+3j]) + assert_raises(ValueError, _cplxpair, [1-3j]) + + +class TestCplxReal(object): + + def test_trivial_input(self): + assert_equal(_cplxreal([]), ([], [])) + assert_equal(_cplxreal(1), ([], [1])) + + def test_output_order(self): + zc, zr = _cplxreal(np.roots(array([1, 0, 0, 1]))) + assert_allclose(np.append(zc, zr), [1/2 + 1j*sin(pi/3), -1]) + + eps = spacing(1) + + a = [0+1j, 0-1j, eps + 1j, eps - 1j, -eps + 1j, -eps - 1j, + 1, 4, 2, 3, 0, 0, + 2+3j, 2-3j, + 1-eps + 1j, 1+2j, 1-2j, 1+eps - 1j, # sorts out of order + 3+1j, 3+1j, 3+1j, 3-1j, 3-1j, 3-1j, + 2-3j, 2+3j] + zc, zr = _cplxreal(a) + assert_allclose(zc, [1j, 1j, 1j, 1+1j, 1+2j, 2+3j, 2+3j, 3+1j, 3+1j, + 3+1j]) + assert_allclose(zr, [0, 0, 1, 2, 3, 4]) + + z = array([1-eps + 1j, 1+2j, 1-2j, 1+eps - 1j, 1+eps+3j, 1-2*eps-3j, + 0+1j, 0-1j, 2+4j, 2-4j, 2+3j, 2-3j, 3+7j, 3-7j, 4-eps+1j, + 4+eps-2j, 4-1j, 4-eps+2j]) + + zc, zr = _cplxreal(z) + assert_allclose(zc, [1j, 1+1j, 1+2j, 1+3j, 2+3j, 2+4j, 3+7j, 4+1j, + 4+2j]) + assert_equal(zr, []) + + def test_unmatched_conjugates(self): + # 1+2j is unmatched + assert_raises(ValueError, _cplxreal, [1+3j, 1-3j, 1+2j]) + + # 1+2j and 1-3j are unmatched + assert_raises(ValueError, _cplxreal, [1+3j, 1-3j, 1+2j, 1-3j]) + + # 1+3j is unmatched + assert_raises(ValueError, _cplxreal, [1+3j, 1-3j, 1+3j]) + + # No pairs + assert_raises(ValueError, _cplxreal, [1+3j]) + assert_raises(ValueError, _cplxreal, [1-3j]) + + def test_real_integer_input(self): + zc, zr = _cplxreal([2, 0, 1, 4]) + assert_array_equal(zc, []) + assert_array_equal(zr, [0, 1, 2, 4]) + + +class TestTf2zpk(object): + + def test_simple(self): + z_r = np.array([0.5, -0.5]) + p_r = np.array([1.j / np.sqrt(2), -1.j / np.sqrt(2)]) + # Sort the zeros/poles so that we don't fail the test if the order + # changes + z_r.sort() + p_r.sort() + b = np.poly(z_r) + a = np.poly(p_r) + + z, p, k = tf2zpk(b, a) + z.sort() + p.sort() + assert_array_almost_equal(z, z_r) + assert_array_almost_equal(p, p_r) + + def test_bad_filter(self): + # Regression test for #651: better handling of badly conditioned + # filter coefficients. + with suppress_warnings(): + warnings.simplefilter("error", BadCoefficients) + assert_raises(BadCoefficients, tf2zpk, [1e-15], [1.0, 1.0]) + + +class TestZpk2Tf(object): + + def test_identity(self): + """Test the identity transfer function.""" + z = [] + p = [] + k = 1. + b, a = zpk2tf(z, p, k) + b_r = np.array([1.]) # desired result + a_r = np.array([1.]) # desired result + # The test for the *type* of the return values is a regression + # test for ticket #1095. In the case p=[], zpk2tf used to + # return the scalar 1.0 instead of array([1.0]). + assert_array_equal(b, b_r) + assert_(isinstance(b, np.ndarray)) + assert_array_equal(a, a_r) + assert_(isinstance(a, np.ndarray)) + + +class TestSos2Zpk(object): + + def test_basic(self): + sos = [[1, 0, 1, 1, 0, -0.81], + [1, 0, 0, 1, 0, +0.49]] + z, p, k = sos2zpk(sos) + z2 = [1j, -1j, 0, 0] + p2 = [0.9, -0.9, 0.7j, -0.7j] + k2 = 1 + assert_array_almost_equal(sort(z), sort(z2), decimal=4) + assert_array_almost_equal(sort(p), sort(p2), decimal=4) + assert_array_almost_equal(k, k2) + + sos = [[1.00000, +0.61803, 1.0000, 1.00000, +0.60515, 0.95873], + [1.00000, -1.61803, 1.0000, 1.00000, -1.58430, 0.95873], + [1.00000, +1.00000, 0.0000, 1.00000, +0.97915, 0.00000]] + z, p, k = sos2zpk(sos) + z2 = [-0.3090 + 0.9511j, -0.3090 - 0.9511j, 0.8090 + 0.5878j, + 0.8090 - 0.5878j, -1.0000 + 0.0000j, 0] + p2 = [-0.3026 + 0.9312j, -0.3026 - 0.9312j, 0.7922 + 0.5755j, + 0.7922 - 0.5755j, -0.9791 + 0.0000j, 0] + k2 = 1 + assert_array_almost_equal(sort(z), sort(z2), decimal=4) + assert_array_almost_equal(sort(p), sort(p2), decimal=4) + + sos = array([[1, 2, 3, 1, 0.2, 0.3], + [4, 5, 6, 1, 0.4, 0.5]]) + z = array([-1 - 1.41421356237310j, -1 + 1.41421356237310j, + -0.625 - 1.05326872164704j, -0.625 + 1.05326872164704j]) + p = array([-0.2 - 0.678232998312527j, -0.2 + 0.678232998312527j, + -0.1 - 0.538516480713450j, -0.1 + 0.538516480713450j]) + k = 4 + z2, p2, k2 = sos2zpk(sos) + assert_allclose(_cplxpair(z2), z) + assert_allclose(_cplxpair(p2), p) + assert_allclose(k2, k) + + +class TestSos2Tf(object): + + def test_basic(self): + sos = [[1, 1, 1, 1, 0, -1], + [-2, 3, 1, 1, 10, 1]] + b, a = sos2tf(sos) + assert_array_almost_equal(b, [-2, 1, 2, 4, 1]) + assert_array_almost_equal(a, [1, 10, 0, -10, -1]) + + +class TestTf2Sos(object): + + def test_basic(self): + num = [2, 16, 44, 56, 32] + den = [3, 3, -15, 18, -12] + sos = tf2sos(num, den) + sos2 = [[0.6667, 4.0000, 5.3333, 1.0000, +2.0000, -4.0000], + [1.0000, 2.0000, 2.0000, 1.0000, -1.0000, +1.0000]] + assert_array_almost_equal(sos, sos2, decimal=4) + + b = [1, -3, 11, -27, 18] + a = [16, 12, 2, -4, -1] + sos = tf2sos(b, a) + sos2 = [[0.0625, -0.1875, 0.1250, 1.0000, -0.2500, -0.1250], + [1.0000, +0.0000, 9.0000, 1.0000, +1.0000, +0.5000]] + # assert_array_almost_equal(sos, sos2, decimal=4) + + +class TestZpk2Sos(object): + + def test_basic(self): + for pairing in ('nearest', 'keep_odd'): + # + # Cases that match octave + # + + z = [-1, -1] + p = [0.57149 + 0.29360j, 0.57149 - 0.29360j] + k = 1 + sos = zpk2sos(z, p, k, pairing=pairing) + sos2 = [[1, 2, 1, 1, -1.14298, 0.41280]] # octave & MATLAB + assert_array_almost_equal(sos, sos2, decimal=4) + + z = [1j, -1j] + p = [0.9, -0.9, 0.7j, -0.7j] + k = 1 + sos = zpk2sos(z, p, k, pairing=pairing) + sos2 = [[1, 0, 1, 1, 0, +0.49], + [1, 0, 0, 1, 0, -0.81]] # octave + # sos2 = [[0, 0, 1, 1, -0.9, 0], + # [1, 0, 1, 1, 0.9, 0]] # MATLAB + assert_array_almost_equal(sos, sos2, decimal=4) + + z = [] + p = [0.8, -0.5+0.25j, -0.5-0.25j] + k = 1. + sos = zpk2sos(z, p, k, pairing=pairing) + sos2 = [[1., 0., 0., 1., 1., 0.3125], + [1., 0., 0., 1., -0.8, 0.]] # octave, MATLAB fails + assert_array_almost_equal(sos, sos2, decimal=4) + + z = [1., 1., 0.9j, -0.9j] + p = [0.99+0.01j, 0.99-0.01j, 0.1+0.9j, 0.1-0.9j] + k = 1 + sos = zpk2sos(z, p, k, pairing=pairing) + sos2 = [[1, 0, 0.81, 1, -0.2, 0.82], + [1, -2, 1, 1, -1.98, 0.9802]] # octave + # sos2 = [[1, -2, 1, 1, -0.2, 0.82], + # [1, 0, 0.81, 1, -1.98, 0.9802]] # MATLAB + assert_array_almost_equal(sos, sos2, decimal=4) + + z = [0.9+0.1j, 0.9-0.1j, -0.9] + p = [0.75+0.25j, 0.75-0.25j, 0.9] + k = 1 + sos = zpk2sos(z, p, k, pairing=pairing) + if pairing == 'keep_odd': + sos2 = [[1, -1.8, 0.82, 1, -1.5, 0.625], + [1, 0.9, 0, 1, -0.9, 0]] # octave; MATLAB fails + assert_array_almost_equal(sos, sos2, decimal=4) + else: # pairing == 'nearest' + sos2 = [[1, 0.9, 0, 1, -1.5, 0.625], + [1, -1.8, 0.82, 1, -0.9, 0]] # our algorithm + assert_array_almost_equal(sos, sos2, decimal=4) + + # + # Cases that differ from octave: + # + + z = [-0.3090 + 0.9511j, -0.3090 - 0.9511j, 0.8090 + 0.5878j, + +0.8090 - 0.5878j, -1.0000 + 0.0000j] + p = [-0.3026 + 0.9312j, -0.3026 - 0.9312j, 0.7922 + 0.5755j, + +0.7922 - 0.5755j, -0.9791 + 0.0000j] + k = 1 + sos = zpk2sos(z, p, k, pairing=pairing) + # sos2 = [[1, 0.618, 1, 1, 0.6052, 0.95870], + # [1, -1.618, 1, 1, -1.5844, 0.95878], + # [1, 1, 0, 1, 0.9791, 0]] # octave, MATLAB fails + sos2 = [[1, 1, 0, 1, +0.97915, 0], + [1, 0.61803, 1, 1, +0.60515, 0.95873], + [1, -1.61803, 1, 1, -1.58430, 0.95873]] + assert_array_almost_equal(sos, sos2, decimal=4) + + z = [-1 - 1.4142j, -1 + 1.4142j, + -0.625 - 1.0533j, -0.625 + 1.0533j] + p = [-0.2 - 0.6782j, -0.2 + 0.6782j, + -0.1 - 0.5385j, -0.1 + 0.5385j] + k = 4 + sos = zpk2sos(z, p, k, pairing=pairing) + sos2 = [[4, 8, 12, 1, 0.2, 0.3], + [1, 1.25, 1.5, 1, 0.4, 0.5]] # MATLAB + # sos2 = [[4, 8, 12, 1, 0.4, 0.5], + # [1, 1.25, 1.5, 1, 0.2, 0.3]] # octave + assert_allclose(sos, sos2, rtol=1e-4, atol=1e-4) + + z = [] + p = [0.2, -0.5+0.25j, -0.5-0.25j] + k = 1. + sos = zpk2sos(z, p, k, pairing=pairing) + sos2 = [[1., 0., 0., 1., -0.2, 0.], + [1., 0., 0., 1., 1., 0.3125]] + # sos2 = [[1., 0., 0., 1., 1., 0.3125], + # [1., 0., 0., 1., -0.2, 0]] # octave, MATLAB fails + assert_array_almost_equal(sos, sos2, decimal=4) + + # The next two examples are adapted from Leland B. Jackson, + # "Digital Filters and Signal Processing (1995) p.400: + # http://books.google.com/books?id=VZ8uabI1pNMC&lpg=PA400&ots=gRD9pi8Jua&dq=Pole%2Fzero%20pairing%20for%20minimum%20roundoff%20noise%20in%20BSF.&pg=PA400#v=onepage&q=Pole%2Fzero%20pairing%20for%20minimum%20roundoff%20noise%20in%20BSF.&f=false + + deg2rad = np.pi / 180. + k = 1. + + # first example + thetas = [22.5, 45, 77.5] + mags = [0.8, 0.6, 0.9] + z = np.array([np.exp(theta * deg2rad * 1j) for theta in thetas]) + z = np.concatenate((z, np.conj(z))) + p = np.array([mag * np.exp(theta * deg2rad * 1j) + for theta, mag in zip(thetas, mags)]) + p = np.concatenate((p, np.conj(p))) + sos = zpk2sos(z, p, k) + # sos2 = [[1, -0.43288, 1, 1, -0.38959, 0.81], # octave, + # [1, -1.41421, 1, 1, -0.84853, 0.36], # MATLAB fails + # [1, -1.84776, 1, 1, -1.47821, 0.64]] + # Note that pole-zero pairing matches, but ordering is different + sos2 = [[1, -1.41421, 1, 1, -0.84853, 0.36], + [1, -1.84776, 1, 1, -1.47821, 0.64], + [1, -0.43288, 1, 1, -0.38959, 0.81]] + assert_array_almost_equal(sos, sos2, decimal=4) + + # second example + z = np.array([np.exp(theta * deg2rad * 1j) + for theta in (85., 10.)]) + z = np.concatenate((z, np.conj(z), [1, -1])) + sos = zpk2sos(z, p, k) + + # sos2 = [[1, -0.17431, 1, 1, -0.38959, 0.81], # octave "wrong", + # [1, -1.96962, 1, 1, -0.84853, 0.36], # MATLAB fails + # [1, 0, -1, 1, -1.47821, 0.64000]] + # Our pole-zero pairing matches the text, Octave does not + sos2 = [[1, 0, -1, 1, -0.84853, 0.36], + [1, -1.96962, 1, 1, -1.47821, 0.64], + [1, -0.17431, 1, 1, -0.38959, 0.81]] + assert_array_almost_equal(sos, sos2, decimal=4) + + +class TestFreqs(object): + + def test_basic(self): + _, h = freqs([1.0], [1.0], worN=8) + assert_array_almost_equal(h, np.ones(8)) + + def test_output(self): + # 1st order low-pass filter: H(s) = 1 / (s + 1) + w = [0.1, 1, 10, 100] + num = [1] + den = [1, 1] + w, H = freqs(num, den, worN=w) + s = w * 1j + expected = 1 / (s + 1) + assert_array_almost_equal(H.real, expected.real) + assert_array_almost_equal(H.imag, expected.imag) + + def test_freq_range(self): + # Test that freqresp() finds a reasonable frequency range. + # 1st order low-pass filter: H(s) = 1 / (s + 1) + # Expected range is from 0.01 to 10. + num = [1] + den = [1, 1] + n = 10 + expected_w = np.logspace(-2, 1, n) + w, H = freqs(num, den, worN=n) + assert_array_almost_equal(w, expected_w) + + def test_plot(self): + + def plot(w, h): + assert_array_almost_equal(h, np.ones(8)) + + assert_raises(ZeroDivisionError, freqs, [1.0], [1.0], worN=8, + plot=lambda w, h: 1 / 0) + freqs([1.0], [1.0], worN=8, plot=plot) + + def test_backward_compat(self): + # For backward compatibility, test if None act as a wrapper for default + w1, h1 = freqs([1.0], [1.0]) + w2, h2 = freqs([1.0], [1.0], None) + assert_array_almost_equal(w1, w2) + assert_array_almost_equal(h1, h2) + + def test_w_or_N_types(self): + # Measure at 8 equally-spaced points + for N in (8, np.int8(8), np.int16(8), np.int32(8), np.int64(8), + np.array(8)): + w, h = freqs([1.0], [1.0], worN=N) + assert_equal(len(w), 8) + assert_array_almost_equal(h, np.ones(8)) + + # Measure at frequency 8 rad/sec + for w in (8.0, 8.0+0j): + w_out, h = freqs([1.0], [1.0], worN=w) + assert_array_almost_equal(w_out, [8]) + assert_array_almost_equal(h, [1]) + + +class TestFreqs_zpk(object): + + def test_basic(self): + _, h = freqs_zpk([1.0], [1.0], [1.0], worN=8) + assert_array_almost_equal(h, np.ones(8)) + + def test_output(self): + # 1st order low-pass filter: H(s) = 1 / (s + 1) + w = [0.1, 1, 10, 100] + z = [] + p = [-1] + k = 1 + w, H = freqs_zpk(z, p, k, worN=w) + s = w * 1j + expected = 1 / (s + 1) + assert_array_almost_equal(H.real, expected.real) + assert_array_almost_equal(H.imag, expected.imag) + + def test_freq_range(self): + # Test that freqresp() finds a reasonable frequency range. + # 1st order low-pass filter: H(s) = 1 / (s + 1) + # Expected range is from 0.01 to 10. + z = [] + p = [-1] + k = 1 + n = 10 + expected_w = np.logspace(-2, 1, n) + w, H = freqs_zpk(z, p, k, worN=n) + assert_array_almost_equal(w, expected_w) + + def test_vs_freqs(self): + b, a = cheby1(4, 5, 100, analog=True, output='ba') + z, p, k = cheby1(4, 5, 100, analog=True, output='zpk') + + w1, h1 = freqs(b, a) + w2, h2 = freqs_zpk(z, p, k) + assert_allclose(w1, w2) + assert_allclose(h1, h2, rtol=1e-6) + + def test_backward_compat(self): + # For backward compatibility, test if None act as a wrapper for default + w1, h1 = freqs_zpk([1.0], [1.0], [1.0]) + w2, h2 = freqs_zpk([1.0], [1.0], [1.0], None) + assert_array_almost_equal(w1, w2) + assert_array_almost_equal(h1, h2) + + def test_w_or_N_types(self): + # Measure at 8 equally-spaced points + for N in (8, np.int8(8), np.int16(8), np.int32(8), np.int64(8), + np.array(8)): + w, h = freqs_zpk([], [], 1, worN=N) + assert_equal(len(w), 8) + assert_array_almost_equal(h, np.ones(8)) + + # Measure at frequency 8 rad/sec + for w in (8.0, 8.0+0j): + w_out, h = freqs_zpk([], [], 1, worN=w) + assert_array_almost_equal(w_out, [8]) + assert_array_almost_equal(h, [1]) + + +class TestFreqz(object): + + def test_ticket1441(self): + """Regression test for ticket 1441.""" + # Because freqz previously used arange instead of linspace, + # when N was large, it would return one more point than + # requested. + N = 100000 + w, h = freqz([1.0], worN=N) + assert_equal(w.shape, (N,)) + + def test_basic(self): + w, h = freqz([1.0], worN=8) + assert_array_almost_equal(w, np.pi * np.arange(8) / 8.) + assert_array_almost_equal(h, np.ones(8)) + w, h = freqz([1.0], worN=9) + assert_array_almost_equal(w, np.pi * np.arange(9) / 9.) + assert_array_almost_equal(h, np.ones(9)) + + for a in [1, np.ones(2)]: + w, h = freqz(np.ones(2), a, worN=0) + assert_equal(w.shape, (0,)) + assert_equal(h.shape, (0,)) + assert_equal(h.dtype, np.dtype('complex128')) + + t = np.linspace(0, 1, 4, endpoint=False) + for b, a, h_whole in zip( + ([1., 0, 0, 0], np.sin(2 * np.pi * t)), + ([1., 0, 0, 0], [0.5, 0, 0, 0]), + ([1., 1., 1., 1.], [0, -4j, 0, 4j])): + w, h = freqz(b, a, worN=4, whole=True) + expected_w = np.linspace(0, 2 * np.pi, 4, endpoint=False) + assert_array_almost_equal(w, expected_w) + assert_array_almost_equal(h, h_whole) + # simultaneously check int-like support + w, h = freqz(b, a, worN=np.int32(4), whole=True) + assert_array_almost_equal(w, expected_w) + assert_array_almost_equal(h, h_whole) + w, h = freqz(b, a, worN=w, whole=True) + assert_array_almost_equal(w, expected_w) + assert_array_almost_equal(h, h_whole) + + def test_basic_whole(self): + w, h = freqz([1.0], worN=8, whole=True) + assert_array_almost_equal(w, 2 * np.pi * np.arange(8.0) / 8) + assert_array_almost_equal(h, np.ones(8)) + + def test_plot(self): + + def plot(w, h): + assert_array_almost_equal(w, np.pi * np.arange(8.0) / 8) + assert_array_almost_equal(h, np.ones(8)) + + assert_raises(ZeroDivisionError, freqz, [1.0], worN=8, + plot=lambda w, h: 1 / 0) + freqz([1.0], worN=8, plot=plot) + + def test_fft_wrapping(self): + # Some simple real FIR filters + bs = list() # filters + as_ = list() + hs_whole = list() + hs_half = list() + # 3 taps + t = np.linspace(0, 1, 3, endpoint=False) + bs.append(np.sin(2 * np.pi * t)) + as_.append(3.) + hs_whole.append([0, -0.5j, 0.5j]) + hs_half.append([0, np.sqrt(1./12.), -0.5j]) + # 4 taps + t = np.linspace(0, 1, 4, endpoint=False) + bs.append(np.sin(2 * np.pi * t)) + as_.append(0.5) + hs_whole.append([0, -4j, 0, 4j]) + hs_half.append([0, np.sqrt(8), -4j, -np.sqrt(8)]) + del t + for ii, b in enumerate(bs): + # whole + a = as_[ii] + expected_w = np.linspace(0, 2 * np.pi, len(b), endpoint=False) + w, h = freqz(b, a, worN=expected_w, whole=True) # polyval + err_msg = 'b = %s, a=%s' % (b, a) + assert_array_almost_equal(w, expected_w, err_msg=err_msg) + assert_array_almost_equal(h, hs_whole[ii], err_msg=err_msg) + w, h = freqz(b, a, worN=len(b), whole=True) # FFT + assert_array_almost_equal(w, expected_w, err_msg=err_msg) + assert_array_almost_equal(h, hs_whole[ii], err_msg=err_msg) + # non-whole + expected_w = np.linspace(0, np.pi, len(b), endpoint=False) + w, h = freqz(b, a, worN=expected_w, whole=False) # polyval + assert_array_almost_equal(w, expected_w, err_msg=err_msg) + assert_array_almost_equal(h, hs_half[ii], err_msg=err_msg) + w, h = freqz(b, a, worN=len(b), whole=False) # FFT + assert_array_almost_equal(w, expected_w, err_msg=err_msg) + assert_array_almost_equal(h, hs_half[ii], err_msg=err_msg) + + # some random FIR filters (real + complex) + # assume polyval is accurate + rng = np.random.RandomState(0) + for ii in range(2, 10): # number of taps + b = rng.randn(ii) + for kk in range(2): + a = rng.randn(1) if kk == 0 else rng.randn(3) + for jj in range(2): + if jj == 1: + b = b + rng.randn(ii) * 1j + # whole + expected_w = np.linspace(0, 2 * np.pi, ii, endpoint=False) + w, expected_h = freqz(b, a, worN=expected_w, whole=True) + assert_array_almost_equal(w, expected_w) + w, h = freqz(b, a, worN=ii, whole=True) + assert_array_almost_equal(w, expected_w) + assert_array_almost_equal(h, expected_h) + # half + expected_w = np.linspace(0, np.pi, ii, endpoint=False) + w, expected_h = freqz(b, a, worN=expected_w, whole=False) + assert_array_almost_equal(w, expected_w) + w, h = freqz(b, a, worN=ii, whole=False) + assert_array_almost_equal(w, expected_w) + assert_array_almost_equal(h, expected_h) + + def test_broadcasting1(self): + # Test broadcasting with worN an integer or a 1-D array, + # b and a are n-dimensional arrays. + np.random.seed(123) + b = np.random.rand(3, 5, 1) + a = np.random.rand(2, 1) + for whole in [False, True]: + # Test with worN being integers (one fast for FFT and one not), + # a 1-D array, and an empty array. + for worN in [16, 17, np.linspace(0, 1, 10), np.array([])]: + w, h = freqz(b, a, worN=worN, whole=whole) + for k in range(b.shape[1]): + bk = b[:, k, 0] + ak = a[:, 0] + ww, hh = freqz(bk, ak, worN=worN, whole=whole) + assert_allclose(ww, w) + assert_allclose(hh, h[k]) + + def test_broadcasting2(self): + # Test broadcasting with worN an integer or a 1-D array, + # b is an n-dimensional array, and a is left at the default value. + np.random.seed(123) + b = np.random.rand(3, 5, 1) + for whole in [False, True]: + for worN in [16, 17, np.linspace(0, 1, 10)]: + w, h = freqz(b, worN=worN, whole=whole) + for k in range(b.shape[1]): + bk = b[:, k, 0] + ww, hh = freqz(bk, worN=worN, whole=whole) + assert_allclose(ww, w) + assert_allclose(hh, h[k]) + + def test_broadcasting3(self): + # Test broadcasting where b.shape[-1] is the same length + # as worN, and a is left at the default value. + np.random.seed(123) + N = 16 + b = np.random.rand(3, N) + for whole in [False, True]: + for worN in [N, np.linspace(0, 1, N)]: + w, h = freqz(b, worN=worN, whole=whole) + assert_equal(w.size, N) + for k in range(N): + bk = b[:, k] + ww, hh = freqz(bk, worN=w[k], whole=whole) + assert_allclose(ww, w[k]) + assert_allclose(hh, h[k]) + + def test_broadcasting4(self): + # Test broadcasting with worN a 2-D array. + np.random.seed(123) + b = np.random.rand(4, 2, 1, 1) + a = np.random.rand(5, 2, 1, 1) + for whole in [False, True]: + for worN in [np.random.rand(6, 7), np.empty((6, 0))]: + w, h = freqz(b, a, worN=worN, whole=whole) + assert_allclose(w, worN, rtol=1e-14) + assert_equal(h.shape, (2,) + worN.shape) + for k in range(2): + ww, hh = freqz(b[:, k, 0, 0], a[:, k, 0, 0], + worN=worN.ravel(), + whole=whole) + assert_allclose(ww, worN.ravel(), rtol=1e-14) + assert_allclose(hh, h[k, :, :].ravel()) + + def test_backward_compat(self): + # For backward compatibility, test if None act as a wrapper for default + w1, h1 = freqz([1.0], 1) + w2, h2 = freqz([1.0], 1, None) + assert_array_almost_equal(w1, w2) + assert_array_almost_equal(h1, h2) + + def test_fs_param(self): + fs = 900 + b = [0.039479155677484369, 0.11843746703245311, 0.11843746703245311, + 0.039479155677484369] + a = [1.0, -1.3199152021838287, 0.80341991081938424, + -0.16767146321568049] + + # N = None, whole=False + w1, h1 = freqz(b, a, fs=fs) + w2, h2 = freqz(b, a) + assert_allclose(h1, h2) + assert_allclose(w1, np.linspace(0, fs/2, 512, endpoint=False)) + + # N = None, whole=True + w1, h1 = freqz(b, a, whole=True, fs=fs) + w2, h2 = freqz(b, a, whole=True) + assert_allclose(h1, h2) + assert_allclose(w1, np.linspace(0, fs, 512, endpoint=False)) + + # N = 5, whole=False + w1, h1 = freqz(b, a, 5, fs=fs) + w2, h2 = freqz(b, a, 5) + assert_allclose(h1, h2) + assert_allclose(w1, np.linspace(0, fs/2, 5, endpoint=False)) + + # N = 5, whole=True + w1, h1 = freqz(b, a, 5, whole=True, fs=fs) + w2, h2 = freqz(b, a, 5, whole=True) + assert_allclose(h1, h2) + assert_allclose(w1, np.linspace(0, fs, 5, endpoint=False)) + + # w is an array_like + for w in ([123], (123,), np.array([123]), (50, 123, 230), + np.array([50, 123, 230])): + w1, h1 = freqz(b, a, w, fs=fs) + w2, h2 = freqz(b, a, 2*pi*np.array(w)/fs) + assert_allclose(h1, h2) + assert_allclose(w, w1) + + def test_w_or_N_types(self): + # Measure at 7 (polyval) or 8 (fft) equally-spaced points + for N in (7, np.int8(7), np.int16(7), np.int32(7), np.int64(7), + np.array(7), + 8, np.int8(8), np.int16(8), np.int32(8), np.int64(8), + np.array(8)): + + w, h = freqz([1.0], worN=N) + assert_array_almost_equal(w, np.pi * np.arange(N) / N) + assert_array_almost_equal(h, np.ones(N)) + + w, h = freqz([1.0], worN=N, fs=100) + assert_array_almost_equal(w, np.linspace(0, 50, N, endpoint=False)) + assert_array_almost_equal(h, np.ones(N)) + + # Measure at frequency 8 Hz + for w in (8.0, 8.0+0j): + # Only makes sense when fs is specified + w_out, h = freqz([1.0], worN=w, fs=100) + assert_array_almost_equal(w_out, [8]) + assert_array_almost_equal(h, [1]) + + +class TestSOSFreqz(object): + + def test_sosfreqz_basic(self): + # Compare the results of freqz and sosfreqz for a low order + # Butterworth filter. + + N = 500 + + b, a = butter(4, 0.2) + sos = butter(4, 0.2, output='sos') + w, h = freqz(b, a, worN=N) + w2, h2 = sosfreqz(sos, worN=N) + assert_equal(w2, w) + assert_allclose(h2, h, rtol=1e-10, atol=1e-14) + + b, a = ellip(3, 1, 30, (0.2, 0.3), btype='bandpass') + sos = ellip(3, 1, 30, (0.2, 0.3), btype='bandpass', output='sos') + w, h = freqz(b, a, worN=N) + w2, h2 = sosfreqz(sos, worN=N) + assert_equal(w2, w) + assert_allclose(h2, h, rtol=1e-10, atol=1e-14) + # must have at least one section + assert_raises(ValueError, sosfreqz, sos[:0]) + + def test_sosfrez_design(self): + # Compare sosfreqz output against expected values for different + # filter types + + # from cheb2ord + N, Wn = cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 60) + sos = cheby2(N, 60, Wn, 'stop', output='sos') + w, h = sosfreqz(sos) + h = np.abs(h) + w /= np.pi + assert_allclose(20 * np.log10(h[w <= 0.1]), 0, atol=3.01) + assert_allclose(20 * np.log10(h[w >= 0.6]), 0., atol=3.01) + assert_allclose(h[(w >= 0.2) & (w <= 0.5)], 0., atol=1e-3) # <= -60 dB + + N, Wn = cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 150) + sos = cheby2(N, 150, Wn, 'stop', output='sos') + w, h = sosfreqz(sos) + dB = 20*np.log10(np.abs(h)) + w /= np.pi + assert_allclose(dB[w <= 0.1], 0, atol=3.01) + assert_allclose(dB[w >= 0.6], 0., atol=3.01) + assert_array_less(dB[(w >= 0.2) & (w <= 0.5)], -149.9) + + # from cheb1ord + N, Wn = cheb1ord(0.2, 0.3, 3, 40) + sos = cheby1(N, 3, Wn, 'low', output='sos') + w, h = sosfreqz(sos) + h = np.abs(h) + w /= np.pi + assert_allclose(20 * np.log10(h[w <= 0.2]), 0, atol=3.01) + assert_allclose(h[w >= 0.3], 0., atol=1e-2) # <= -40 dB + + N, Wn = cheb1ord(0.2, 0.3, 1, 150) + sos = cheby1(N, 1, Wn, 'low', output='sos') + w, h = sosfreqz(sos) + dB = 20*np.log10(np.abs(h)) + w /= np.pi + assert_allclose(dB[w <= 0.2], 0, atol=1.01) + assert_array_less(dB[w >= 0.3], -149.9) + + # adapted from ellipord + N, Wn = ellipord(0.3, 0.2, 3, 60) + sos = ellip(N, 0.3, 60, Wn, 'high', output='sos') + w, h = sosfreqz(sos) + h = np.abs(h) + w /= np.pi + assert_allclose(20 * np.log10(h[w >= 0.3]), 0, atol=3.01) + assert_allclose(h[w <= 0.1], 0., atol=1.5e-3) # <= -60 dB (approx) + + # adapted from buttord + N, Wn = buttord([0.2, 0.5], [0.14, 0.6], 3, 40) + sos = butter(N, Wn, 'band', output='sos') + w, h = sosfreqz(sos) + h = np.abs(h) + w /= np.pi + assert_allclose(h[w <= 0.14], 0., atol=1e-2) # <= -40 dB + assert_allclose(h[w >= 0.6], 0., atol=1e-2) # <= -40 dB + assert_allclose(20 * np.log10(h[(w >= 0.2) & (w <= 0.5)]), + 0, atol=3.01) + + N, Wn = buttord([0.2, 0.5], [0.14, 0.6], 3, 100) + sos = butter(N, Wn, 'band', output='sos') + w, h = sosfreqz(sos) + dB = 20*np.log10(np.maximum(np.abs(h), 1e-10)) + w /= np.pi + assert_array_less(dB[(w > 0) & (w <= 0.14)], -99.9) + assert_array_less(dB[w >= 0.6], -99.9) + assert_allclose(dB[(w >= 0.2) & (w <= 0.5)], 0, atol=3.01) + + @pytest.mark.xfail + def test_sosfreqz_design_ellip(self): + N, Wn = ellipord(0.3, 0.1, 3, 60) + sos = ellip(N, 0.3, 60, Wn, 'high', output='sos') + w, h = sosfreqz(sos) + h = np.abs(h) + w /= np.pi + assert_allclose(20 * np.log10(h[w >= 0.3]), 0, atol=3.01) + assert_allclose(h[w <= 0.1], 0., atol=1.5e-3) # <= -60 dB (approx) + + N, Wn = ellipord(0.3, 0.2, .5, 150) + sos = ellip(N, .5, 150, Wn, 'high', output='sos') + w, h = sosfreqz(sos) + dB = 20*np.log10(np.maximum(np.abs(h), 1e-10)) + w /= np.pi + assert_allclose(dB[w >= 0.3], 0, atol=.55) + # this is not great (147 instead of 150, could be ellip[ord] problem?) + assert_array_less(dB[(w > 0) & (w <= 0.25)], -147) + + @mpmath_check("0.10") + def test_sos_freqz_against_mp(self): + # Compare the result of sosfreqz applied to a high order Butterworth + # filter against the result computed using mpmath. (signal.freqz fails + # miserably with such high order filters.) + from . import mpsig + N = 500 + order = 25 + Wn = 0.15 + with mpmath.workdps(80): + z_mp, p_mp, k_mp = mpsig.butter_lp(order, Wn) + w_mp, h_mp = mpsig.zpkfreqz(z_mp, p_mp, k_mp, N) + w_mp = np.array([float(x) for x in w_mp]) + h_mp = np.array([complex(x) for x in h_mp]) + + sos = butter(order, Wn, output='sos') + w, h = sosfreqz(sos, worN=N) + assert_allclose(w, w_mp, rtol=1e-12, atol=1e-14) + assert_allclose(h, h_mp, rtol=1e-12, atol=1e-14) + + def test_fs_param(self): + fs = 900 + sos = [[0.03934683014103762, 0.07869366028207524, 0.03934683014103762, + 1.0, -0.37256600288916636, 0.0], + [1.0, 1.0, 0.0, 1.0, -0.9495739996946778, 0.45125966317124144]] + + # N = None, whole=False + w1, h1 = sosfreqz(sos, fs=fs) + w2, h2 = sosfreqz(sos) + assert_allclose(h1, h2) + assert_allclose(w1, np.linspace(0, fs/2, 512, endpoint=False)) + + # N = None, whole=True + w1, h1 = sosfreqz(sos, whole=True, fs=fs) + w2, h2 = sosfreqz(sos, whole=True) + assert_allclose(h1, h2) + assert_allclose(w1, np.linspace(0, fs, 512, endpoint=False)) + + # N = 5, whole=False + w1, h1 = sosfreqz(sos, 5, fs=fs) + w2, h2 = sosfreqz(sos, 5) + assert_allclose(h1, h2) + assert_allclose(w1, np.linspace(0, fs/2, 5, endpoint=False)) + + # N = 5, whole=True + w1, h1 = sosfreqz(sos, 5, whole=True, fs=fs) + w2, h2 = sosfreqz(sos, 5, whole=True) + assert_allclose(h1, h2) + assert_allclose(w1, np.linspace(0, fs, 5, endpoint=False)) + + # w is an array_like + for w in ([123], (123,), np.array([123]), (50, 123, 230), + np.array([50, 123, 230])): + w1, h1 = sosfreqz(sos, w, fs=fs) + w2, h2 = sosfreqz(sos, 2*pi*np.array(w)/fs) + assert_allclose(h1, h2) + assert_allclose(w, w1) + + def test_w_or_N_types(self): + # Measure at 7 (polyval) or 8 (fft) equally-spaced points + for N in (7, np.int8(7), np.int16(7), np.int32(7), np.int64(7), + np.array(7), + 8, np.int8(8), np.int16(8), np.int32(8), np.int64(8), + np.array(8)): + + w, h = sosfreqz([1, 0, 0, 1, 0, 0], worN=N) + assert_array_almost_equal(w, np.pi * np.arange(N) / N) + assert_array_almost_equal(h, np.ones(N)) + + w, h = sosfreqz([1, 0, 0, 1, 0, 0], worN=N, fs=100) + assert_array_almost_equal(w, np.linspace(0, 50, N, endpoint=False)) + assert_array_almost_equal(h, np.ones(N)) + + # Measure at frequency 8 Hz + for w in (8.0, 8.0+0j): + # Only makes sense when fs is specified + w_out, h = sosfreqz([1, 0, 0, 1, 0, 0], worN=w, fs=100) + assert_array_almost_equal(w_out, [8]) + assert_array_almost_equal(h, [1]) + + +class TestFreqz_zpk(object): + + def test_ticket1441(self): + """Regression test for ticket 1441.""" + # Because freqz previously used arange instead of linspace, + # when N was large, it would return one more point than + # requested. + N = 100000 + w, h = freqz_zpk([0.5], [0.5], 1.0, worN=N) + assert_equal(w.shape, (N,)) + + def test_basic(self): + w, h = freqz_zpk([0.5], [0.5], 1.0, worN=8) + assert_array_almost_equal(w, np.pi * np.arange(8.0) / 8) + assert_array_almost_equal(h, np.ones(8)) + + def test_basic_whole(self): + w, h = freqz_zpk([0.5], [0.5], 1.0, worN=8, whole=True) + assert_array_almost_equal(w, 2 * np.pi * np.arange(8.0) / 8) + assert_array_almost_equal(h, np.ones(8)) + + def test_vs_freqz(self): + b, a = cheby1(4, 5, 0.5, analog=False, output='ba') + z, p, k = cheby1(4, 5, 0.5, analog=False, output='zpk') + + w1, h1 = freqz(b, a) + w2, h2 = freqz_zpk(z, p, k) + assert_allclose(w1, w2) + assert_allclose(h1, h2, rtol=1e-6) + + def test_backward_compat(self): + # For backward compatibility, test if None act as a wrapper for default + w1, h1 = freqz_zpk([0.5], [0.5], 1.0) + w2, h2 = freqz_zpk([0.5], [0.5], 1.0, None) + assert_array_almost_equal(w1, w2) + assert_array_almost_equal(h1, h2) + + def test_fs_param(self): + fs = 900 + z = [-1, -1, -1] + p = [0.4747869998473389+0.4752230717749344j, 0.37256600288916636, + 0.4747869998473389-0.4752230717749344j] + k = 0.03934683014103762 + + # N = None, whole=False + w1, h1 = freqz_zpk(z, p, k, whole=False, fs=fs) + w2, h2 = freqz_zpk(z, p, k, whole=False) + assert_allclose(h1, h2) + assert_allclose(w1, np.linspace(0, fs/2, 512, endpoint=False)) + + # N = None, whole=True + w1, h1 = freqz_zpk(z, p, k, whole=True, fs=fs) + w2, h2 = freqz_zpk(z, p, k, whole=True) + assert_allclose(h1, h2) + assert_allclose(w1, np.linspace(0, fs, 512, endpoint=False)) + + # N = 5, whole=False + w1, h1 = freqz_zpk(z, p, k, 5, fs=fs) + w2, h2 = freqz_zpk(z, p, k, 5) + assert_allclose(h1, h2) + assert_allclose(w1, np.linspace(0, fs/2, 5, endpoint=False)) + + # N = 5, whole=True + w1, h1 = freqz_zpk(z, p, k, 5, whole=True, fs=fs) + w2, h2 = freqz_zpk(z, p, k, 5, whole=True) + assert_allclose(h1, h2) + assert_allclose(w1, np.linspace(0, fs, 5, endpoint=False)) + + # w is an array_like + for w in ([123], (123,), np.array([123]), (50, 123, 230), + np.array([50, 123, 230])): + w1, h1 = freqz_zpk(z, p, k, w, fs=fs) + w2, h2 = freqz_zpk(z, p, k, 2*pi*np.array(w)/fs) + assert_allclose(h1, h2) + assert_allclose(w, w1) + + def test_w_or_N_types(self): + # Measure at 8 equally-spaced points + for N in (8, np.int8(8), np.int16(8), np.int32(8), np.int64(8), + np.array(8)): + + w, h = freqz_zpk([], [], 1, worN=N) + assert_array_almost_equal(w, np.pi * np.arange(8) / 8.) + assert_array_almost_equal(h, np.ones(8)) + + w, h = freqz_zpk([], [], 1, worN=N, fs=100) + assert_array_almost_equal(w, np.linspace(0, 50, 8, endpoint=False)) + assert_array_almost_equal(h, np.ones(8)) + + # Measure at frequency 8 Hz + for w in (8.0, 8.0+0j): + # Only makes sense when fs is specified + w_out, h = freqz_zpk([], [], 1, worN=w, fs=100) + assert_array_almost_equal(w_out, [8]) + assert_array_almost_equal(h, [1]) + + +class TestNormalize(object): + + def test_allclose(self): + """Test for false positive on allclose in normalize() in + filter_design.py""" + # Test to make sure the allclose call within signal.normalize does not + # choose false positives. Then check against a known output from MATLAB + # to make sure the fix doesn't break anything. + + # These are the coefficients returned from + # `[b,a] = cheby1(8, 0.5, 0.048)' + # in MATLAB. There are at least 15 significant figures in each + # coefficient, so it makes sense to test for errors on the order of + # 1e-13 (this can always be relaxed if different platforms have + # different rounding errors) + b_matlab = np.array([2.150733144728282e-11, 1.720586515782626e-10, + 6.022052805239190e-10, 1.204410561047838e-09, + 1.505513201309798e-09, 1.204410561047838e-09, + 6.022052805239190e-10, 1.720586515782626e-10, + 2.150733144728282e-11]) + a_matlab = np.array([1.000000000000000e+00, -7.782402035027959e+00, + 2.654354569747454e+01, -5.182182531666387e+01, + 6.334127355102684e+01, -4.963358186631157e+01, + 2.434862182949389e+01, -6.836925348604676e+00, + 8.412934944449140e-01]) + + # This is the input to signal.normalize after passing through the + # equivalent steps in signal.iirfilter as was done for MATLAB + b_norm_in = np.array([1.5543135865293012e-06, 1.2434508692234413e-05, + 4.3520780422820447e-05, 8.7041560845640893e-05, + 1.0880195105705122e-04, 8.7041560845640975e-05, + 4.3520780422820447e-05, 1.2434508692234413e-05, + 1.5543135865293012e-06]) + a_norm_in = np.array([7.2269025909127173e+04, -5.6242661430467968e+05, + 1.9182761917308895e+06, -3.7451128364682454e+06, + 4.5776121393762771e+06, -3.5869706138592605e+06, + 1.7596511818472347e+06, -4.9409793515707983e+05, + 6.0799461347219651e+04]) + + b_output, a_output = normalize(b_norm_in, a_norm_in) + + # The test on b works for decimal=14 but the one for a does not. For + # the sake of consistency, both of these are decimal=13. If something + # breaks on another platform, it is probably fine to relax this lower. + assert_array_almost_equal(b_matlab, b_output, decimal=13) + assert_array_almost_equal(a_matlab, a_output, decimal=13) + + def test_errors(self): + """Test the error cases.""" + # all zero denominator + assert_raises(ValueError, normalize, [1, 2], 0) + + # denominator not 1 dimensional + assert_raises(ValueError, normalize, [1, 2], [[1]]) + + # numerator too many dimensions + assert_raises(ValueError, normalize, [[[1, 2]]], 1) + + +class TestLp2lp(object): + + def test_basic(self): + b = [1] + a = [1, np.sqrt(2), 1] + b_lp, a_lp = lp2lp(b, a, 0.38574256627112119) + assert_array_almost_equal(b_lp, [0.1488], decimal=4) + assert_array_almost_equal(a_lp, [1, 0.5455, 0.1488], decimal=4) + + +class TestLp2hp(object): + + def test_basic(self): + b = [0.25059432325190018] + a = [1, 0.59724041654134863, 0.92834805757524175, 0.25059432325190018] + b_hp, a_hp = lp2hp(b, a, 2*np.pi*5000) + assert_allclose(b_hp, [1, 0, 0, 0]) + assert_allclose(a_hp, [1, 1.1638e5, 2.3522e9, 1.2373e14], rtol=1e-4) + + +class TestLp2bp(object): + + def test_basic(self): + b = [1] + a = [1, 2, 2, 1] + b_bp, a_bp = lp2bp(b, a, 2*np.pi*4000, 2*np.pi*2000) + assert_allclose(b_bp, [1.9844e12, 0, 0, 0], rtol=1e-6) + assert_allclose(a_bp, [1, 2.5133e4, 2.2108e9, 3.3735e13, + 1.3965e18, 1.0028e22, 2.5202e26], rtol=1e-4) + + +class TestLp2bs(object): + + def test_basic(self): + b = [1] + a = [1, 1] + b_bs, a_bs = lp2bs(b, a, 0.41722257286366754, 0.18460575326152251) + assert_array_almost_equal(b_bs, [1, 0, 0.17407], decimal=5) + assert_array_almost_equal(a_bs, [1, 0.18461, 0.17407], decimal=5) + + +class TestBilinear(object): + + def test_basic(self): + b = [0.14879732743343033] + a = [1, 0.54552236880522209, 0.14879732743343033] + b_z, a_z = bilinear(b, a, 0.5) + assert_array_almost_equal(b_z, [0.087821, 0.17564, 0.087821], + decimal=5) + assert_array_almost_equal(a_z, [1, -1.0048, 0.35606], decimal=4) + + b = [1, 0, 0.17407467530697837] + a = [1, 0.18460575326152251, 0.17407467530697837] + b_z, a_z = bilinear(b, a, 0.5) + assert_array_almost_equal(b_z, [0.86413, -1.2158, 0.86413], + decimal=4) + assert_array_almost_equal(a_z, [1, -1.2158, 0.72826], + decimal=4) + + +class TestLp2lp_zpk(object): + + def test_basic(self): + z = [] + p = [(-1+1j)/np.sqrt(2), (-1-1j)/np.sqrt(2)] + k = 1 + z_lp, p_lp, k_lp = lp2lp_zpk(z, p, k, 5) + assert_array_equal(z_lp, []) + assert_allclose(sort(p_lp), sort(p)*5) + assert_allclose(k_lp, 25) + + # Pseudo-Chebyshev with both poles and zeros + z = [-2j, +2j] + p = [-0.75, -0.5-0.5j, -0.5+0.5j] + k = 3 + z_lp, p_lp, k_lp = lp2lp_zpk(z, p, k, 20) + assert_allclose(sort(z_lp), sort([-40j, +40j])) + assert_allclose(sort(p_lp), sort([-15, -10-10j, -10+10j])) + assert_allclose(k_lp, 60) + + +class TestLp2hp_zpk(object): + + def test_basic(self): + z = [] + p = [(-1+1j)/np.sqrt(2), (-1-1j)/np.sqrt(2)] + k = 1 + + z_hp, p_hp, k_hp = lp2hp_zpk(z, p, k, 5) + assert_array_equal(z_hp, [0, 0]) + assert_allclose(sort(p_hp), sort(p)*5) + assert_allclose(k_hp, 1) + + z = [-2j, +2j] + p = [-0.75, -0.5-0.5j, -0.5+0.5j] + k = 3 + z_hp, p_hp, k_hp = lp2hp_zpk(z, p, k, 6) + assert_allclose(sort(z_hp), sort([-3j, 0, +3j])) + assert_allclose(sort(p_hp), sort([-8, -6-6j, -6+6j])) + assert_allclose(k_hp, 32) + + +class TestLp2bp_zpk(object): + + def test_basic(self): + z = [-2j, +2j] + p = [-0.75, -0.5-0.5j, -0.5+0.5j] + k = 3 + z_bp, p_bp, k_bp = lp2bp_zpk(z, p, k, 15, 8) + assert_allclose(sort(z_bp), sort([-25j, -9j, 0, +9j, +25j])) + assert_allclose(sort(p_bp), sort([-3 + 6j*sqrt(6), + -3 - 6j*sqrt(6), + +2j+sqrt(-8j-225)-2, + -2j+sqrt(+8j-225)-2, + +2j-sqrt(-8j-225)-2, + -2j-sqrt(+8j-225)-2, ])) + assert_allclose(k_bp, 24) + + +class TestLp2bs_zpk(object): + + def test_basic(self): + z = [-2j, +2j] + p = [-0.75, -0.5-0.5j, -0.5+0.5j] + k = 3 + + z_bs, p_bs, k_bs = lp2bs_zpk(z, p, k, 35, 12) + + assert_allclose(sort(z_bs), sort([+35j, -35j, + +3j+sqrt(1234)*1j, + -3j+sqrt(1234)*1j, + +3j-sqrt(1234)*1j, + -3j-sqrt(1234)*1j])) + assert_allclose(sort(p_bs), sort([+3j*sqrt(129) - 8, + -3j*sqrt(129) - 8, + (-6 + 6j) - sqrt(-1225 - 72j), + (-6 - 6j) - sqrt(-1225 + 72j), + (-6 + 6j) + sqrt(-1225 - 72j), + (-6 - 6j) + sqrt(-1225 + 72j), ])) + assert_allclose(k_bs, 32) + + +class TestBilinear_zpk(object): + + def test_basic(self): + z = [-2j, +2j] + p = [-0.75, -0.5-0.5j, -0.5+0.5j] + k = 3 + + z_d, p_d, k_d = bilinear_zpk(z, p, k, 10) + + assert_allclose(sort(z_d), sort([(20-2j)/(20+2j), (20+2j)/(20-2j), + -1])) + assert_allclose(sort(p_d), sort([77/83, + (1j/2 + 39/2) / (41/2 - 1j/2), + (39/2 - 1j/2) / (1j/2 + 41/2), ])) + assert_allclose(k_d, 9696/69803) + + +class TestPrototypeType(object): + + def test_output_type(self): + # Prototypes should consistently output arrays, not lists + # https://github.com/scipy/scipy/pull/441 + for func in (buttap, + besselap, + lambda N: cheb1ap(N, 1), + lambda N: cheb2ap(N, 20), + lambda N: ellipap(N, 1, 20)): + for N in range(7): + z, p, k = func(N) + assert_(isinstance(z, np.ndarray)) + assert_(isinstance(p, np.ndarray)) + + +def dB(x): + # Return magnitude in decibels, avoiding divide-by-zero warnings + # (and deal with some "not less-ordered" errors when -inf shows up) + return 20 * np.log10(np.maximum(np.abs(x), np.finfo(np.float64).tiny)) + + +class TestButtord(object): + + def test_lowpass(self): + wp = 0.2 + ws = 0.3 + rp = 3 + rs = 60 + N, Wn = buttord(wp, ws, rp, rs, False) + b, a = butter(N, Wn, 'lowpass', False) + w, h = freqz(b, a) + w /= np.pi + assert_array_less(-rp, dB(h[w <= wp])) + assert_array_less(dB(h[ws <= w]), -rs) + + assert_equal(N, 16) + assert_allclose(Wn, 2.0002776782743284e-01, rtol=1e-15) + + def test_highpass(self): + wp = 0.3 + ws = 0.2 + rp = 3 + rs = 70 + N, Wn = buttord(wp, ws, rp, rs, False) + b, a = butter(N, Wn, 'highpass', False) + w, h = freqz(b, a) + w /= np.pi + assert_array_less(-rp, dB(h[wp <= w])) + assert_array_less(dB(h[w <= ws]), -rs) + + assert_equal(N, 18) + assert_allclose(Wn, 2.9996603079132672e-01, rtol=1e-15) + + def test_bandpass(self): + wp = [0.2, 0.5] + ws = [0.1, 0.6] + rp = 3 + rs = 80 + N, Wn = buttord(wp, ws, rp, rs, False) + b, a = butter(N, Wn, 'bandpass', False) + w, h = freqz(b, a) + w /= np.pi + assert_array_less(-rp - 0.1, + dB(h[np.logical_and(wp[0] <= w, w <= wp[1])])) + assert_array_less(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]), + -rs + 0.1) + + assert_equal(N, 18) + assert_allclose(Wn, [1.9998742411409134e-01, 5.0002139595676276e-01], + rtol=1e-15) + + def test_bandstop(self): + wp = [0.1, 0.6] + ws = [0.2, 0.5] + rp = 3 + rs = 90 + N, Wn = buttord(wp, ws, rp, rs, False) + b, a = butter(N, Wn, 'bandstop', False) + w, h = freqz(b, a) + w /= np.pi + assert_array_less(-rp, + dB(h[np.logical_or(w <= wp[0], wp[1] <= w)])) + assert_array_less(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]), + -rs) + + assert_equal(N, 20) + assert_allclose(Wn, [1.4759432329294042e-01, 5.9997365985276407e-01], + rtol=1e-6) + + def test_analog(self): + wp = 200 + ws = 600 + rp = 3 + rs = 60 + N, Wn = buttord(wp, ws, rp, rs, True) + b, a = butter(N, Wn, 'lowpass', True) + w, h = freqs(b, a) + assert_array_less(-rp, dB(h[w <= wp])) + assert_array_less(dB(h[ws <= w]), -rs) + + assert_equal(N, 7) + assert_allclose(Wn, 2.0006785355671877e+02, rtol=1e-15) + + n, Wn = buttord(1, 550/450, 1, 26, analog=True) + assert_equal(n, 19) + assert_allclose(Wn, 1.0361980524629517, rtol=1e-15) + + assert_equal(buttord(1, 1.2, 1, 80, analog=True)[0], 55) + + def test_fs_param(self): + wp = [4410, 11025] + ws = [2205, 13230] + rp = 3 + rs = 80 + fs = 44100 + N, Wn = buttord(wp, ws, rp, rs, False, fs=fs) + b, a = butter(N, Wn, 'bandpass', False, fs=fs) + w, h = freqz(b, a, fs=fs) + assert_array_less(-rp - 0.1, + dB(h[np.logical_and(wp[0] <= w, w <= wp[1])])) + assert_array_less(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]), + -rs + 0.1) + + assert_equal(N, 18) + assert_allclose(Wn, [4409.722701715714, 11025.47178084662], + rtol=1e-15) + + +class TestCheb1ord(object): + + def test_lowpass(self): + wp = 0.2 + ws = 0.3 + rp = 3 + rs = 60 + N, Wn = cheb1ord(wp, ws, rp, rs, False) + b, a = cheby1(N, rp, Wn, 'low', False) + w, h = freqz(b, a) + w /= np.pi + assert_array_less(-rp - 0.1, dB(h[w <= wp])) + assert_array_less(dB(h[ws <= w]), -rs + 0.1) + + assert_equal(N, 8) + assert_allclose(Wn, 0.2, rtol=1e-15) + + def test_highpass(self): + wp = 0.3 + ws = 0.2 + rp = 3 + rs = 70 + N, Wn = cheb1ord(wp, ws, rp, rs, False) + b, a = cheby1(N, rp, Wn, 'high', False) + w, h = freqz(b, a) + w /= np.pi + assert_array_less(-rp - 0.1, dB(h[wp <= w])) + assert_array_less(dB(h[w <= ws]), -rs + 0.1) + + assert_equal(N, 9) + assert_allclose(Wn, 0.3, rtol=1e-15) + + def test_bandpass(self): + wp = [0.2, 0.5] + ws = [0.1, 0.6] + rp = 3 + rs = 80 + N, Wn = cheb1ord(wp, ws, rp, rs, False) + b, a = cheby1(N, rp, Wn, 'band', False) + w, h = freqz(b, a) + w /= np.pi + assert_array_less(-rp - 0.1, + dB(h[np.logical_and(wp[0] <= w, w <= wp[1])])) + assert_array_less(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]), + -rs + 0.1) + + assert_equal(N, 9) + assert_allclose(Wn, [0.2, 0.5], rtol=1e-15) + + def test_bandstop(self): + wp = [0.1, 0.6] + ws = [0.2, 0.5] + rp = 3 + rs = 90 + N, Wn = cheb1ord(wp, ws, rp, rs, False) + b, a = cheby1(N, rp, Wn, 'stop', False) + w, h = freqz(b, a) + w /= np.pi + assert_array_less(-rp - 0.1, + dB(h[np.logical_or(w <= wp[0], wp[1] <= w)])) + assert_array_less(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]), + -rs + 0.1) + + assert_equal(N, 10) + assert_allclose(Wn, [0.14758232569947785, 0.6], rtol=1e-5) + + def test_analog(self): + wp = 700 + ws = 100 + rp = 3 + rs = 70 + N, Wn = cheb1ord(wp, ws, rp, rs, True) + b, a = cheby1(N, rp, Wn, 'high', True) + w, h = freqs(b, a) + assert_array_less(-rp - 0.1, dB(h[wp <= w])) + assert_array_less(dB(h[w <= ws]), -rs + 0.1) + + assert_equal(N, 4) + assert_allclose(Wn, 700, rtol=1e-15) + + assert_equal(cheb1ord(1, 1.2, 1, 80, analog=True)[0], 17) + + def test_fs_param(self): + wp = 4800 + ws = 7200 + rp = 3 + rs = 60 + fs = 48000 + N, Wn = cheb1ord(wp, ws, rp, rs, False, fs=fs) + b, a = cheby1(N, rp, Wn, 'low', False, fs=fs) + w, h = freqz(b, a, fs=fs) + assert_array_less(-rp - 0.1, dB(h[w <= wp])) + assert_array_less(dB(h[ws <= w]), -rs + 0.1) + + assert_equal(N, 8) + assert_allclose(Wn, 4800, rtol=1e-15) + + +class TestCheb2ord(object): + + def test_lowpass(self): + wp = 0.2 + ws = 0.3 + rp = 3 + rs = 60 + N, Wn = cheb2ord(wp, ws, rp, rs, False) + b, a = cheby2(N, rs, Wn, 'lp', False) + w, h = freqz(b, a) + w /= np.pi + assert_array_less(-rp - 0.1, dB(h[w <= wp])) + assert_array_less(dB(h[ws <= w]), -rs + 0.1) + + assert_equal(N, 8) + assert_allclose(Wn, 0.28647639976553163, rtol=1e-15) + + def test_highpass(self): + wp = 0.3 + ws = 0.2 + rp = 3 + rs = 70 + N, Wn = cheb2ord(wp, ws, rp, rs, False) + b, a = cheby2(N, rs, Wn, 'hp', False) + w, h = freqz(b, a) + w /= np.pi + assert_array_less(-rp - 0.1, dB(h[wp <= w])) + assert_array_less(dB(h[w <= ws]), -rs + 0.1) + + assert_equal(N, 9) + assert_allclose(Wn, 0.20697492182903282, rtol=1e-15) + + def test_bandpass(self): + wp = [0.2, 0.5] + ws = [0.1, 0.6] + rp = 3 + rs = 80 + N, Wn = cheb2ord(wp, ws, rp, rs, False) + b, a = cheby2(N, rs, Wn, 'bp', False) + w, h = freqz(b, a) + w /= np.pi + assert_array_less(-rp - 0.1, + dB(h[np.logical_and(wp[0] <= w, w <= wp[1])])) + assert_array_less(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]), + -rs + 0.1) + + assert_equal(N, 9) + assert_allclose(Wn, [0.14876937565923479, 0.59748447842351482], + rtol=1e-15) + + def test_bandstop(self): + wp = [0.1, 0.6] + ws = [0.2, 0.5] + rp = 3 + rs = 90 + N, Wn = cheb2ord(wp, ws, rp, rs, False) + b, a = cheby2(N, rs, Wn, 'bs', False) + w, h = freqz(b, a) + w /= np.pi + assert_array_less(-rp - 0.1, + dB(h[np.logical_or(w <= wp[0], wp[1] <= w)])) + assert_array_less(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]), + -rs + 0.1) + + assert_equal(N, 10) + assert_allclose(Wn, [0.19926249974781743, 0.50125246585567362], + rtol=1e-6) + + def test_analog(self): + wp = [20, 50] + ws = [10, 60] + rp = 3 + rs = 80 + N, Wn = cheb2ord(wp, ws, rp, rs, True) + b, a = cheby2(N, rs, Wn, 'bp', True) + w, h = freqs(b, a) + assert_array_less(-rp - 0.1, + dB(h[np.logical_and(wp[0] <= w, w <= wp[1])])) + assert_array_less(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]), + -rs + 0.1) + + assert_equal(N, 11) + assert_allclose(Wn, [1.673740595370124e+01, 5.974641487254268e+01], + rtol=1e-15) + + def test_fs_param(self): + wp = 150 + ws = 100 + rp = 3 + rs = 70 + fs = 1000 + N, Wn = cheb2ord(wp, ws, rp, rs, False, fs=fs) + b, a = cheby2(N, rs, Wn, 'hp', False, fs=fs) + w, h = freqz(b, a, fs=fs) + assert_array_less(-rp - 0.1, dB(h[wp <= w])) + assert_array_less(dB(h[w <= ws]), -rs + 0.1) + + assert_equal(N, 9) + assert_allclose(Wn, 103.4874609145164, rtol=1e-15) + + +class TestEllipord(object): + + def test_lowpass(self): + wp = 0.2 + ws = 0.3 + rp = 3 + rs = 60 + N, Wn = ellipord(wp, ws, rp, rs, False) + b, a = ellip(N, rp, rs, Wn, 'lp', False) + w, h = freqz(b, a) + w /= np.pi + assert_array_less(-rp - 0.1, dB(h[w <= wp])) + assert_array_less(dB(h[ws <= w]), -rs + 0.1) + + assert_equal(N, 5) + assert_allclose(Wn, 0.2, rtol=1e-15) + + def test_highpass(self): + wp = 0.3 + ws = 0.2 + rp = 3 + rs = 70 + N, Wn = ellipord(wp, ws, rp, rs, False) + b, a = ellip(N, rp, rs, Wn, 'hp', False) + w, h = freqz(b, a) + w /= np.pi + assert_array_less(-rp - 0.1, dB(h[wp <= w])) + assert_array_less(dB(h[w <= ws]), -rs + 0.1) + + assert_equal(N, 6) + assert_allclose(Wn, 0.3, rtol=1e-15) + + def test_bandpass(self): + wp = [0.2, 0.5] + ws = [0.1, 0.6] + rp = 3 + rs = 80 + N, Wn = ellipord(wp, ws, rp, rs, False) + b, a = ellip(N, rp, rs, Wn, 'bp', False) + w, h = freqz(b, a) + w /= np.pi + assert_array_less(-rp - 0.1, + dB(h[np.logical_and(wp[0] <= w, w <= wp[1])])) + assert_array_less(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]), + -rs + 0.1) + + assert_equal(N, 6) + assert_allclose(Wn, [0.2, 0.5], rtol=1e-15) + + def test_bandstop(self): + wp = [0.1, 0.6] + ws = [0.2, 0.5] + rp = 3 + rs = 90 + N, Wn = ellipord(wp, ws, rp, rs, False) + b, a = ellip(N, rp, rs, Wn, 'bs', False) + w, h = freqz(b, a) + w /= np.pi + assert_array_less(-rp - 0.1, + dB(h[np.logical_or(w <= wp[0], wp[1] <= w)])) + assert_array_less(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]), + -rs + 0.1) + + assert_equal(N, 7) + assert_allclose(Wn, [0.14758232794342988, 0.6], rtol=1e-5) + + def test_analog(self): + wp = [1000, 6000] + ws = [2000, 5000] + rp = 3 + rs = 90 + N, Wn = ellipord(wp, ws, rp, rs, True) + b, a = ellip(N, rp, rs, Wn, 'bs', True) + w, h = freqs(b, a) + assert_array_less(-rp - 0.1, + dB(h[np.logical_or(w <= wp[0], wp[1] <= w)])) + assert_array_less(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]), + -rs + 0.1) + + assert_equal(N, 8) + assert_allclose(Wn, [1666.6666, 6000]) + + assert_equal(ellipord(1, 1.2, 1, 80, analog=True)[0], 9) + + def test_fs_param(self): + wp = [400, 2400] + ws = [800, 2000] + rp = 3 + rs = 90 + fs = 8000 + N, Wn = ellipord(wp, ws, rp, rs, False, fs=fs) + b, a = ellip(N, rp, rs, Wn, 'bs', False, fs=fs) + w, h = freqz(b, a, fs=fs) + assert_array_less(-rp - 0.1, + dB(h[np.logical_or(w <= wp[0], wp[1] <= w)])) + assert_array_less(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]), + -rs + 0.1) + + assert_equal(N, 7) + assert_allclose(Wn, [590.3293117737195, 2400], rtol=1e-5) + + +class TestBessel(object): + + def test_degenerate(self): + for norm in ('delay', 'phase', 'mag'): + # 0-order filter is just a passthrough + b, a = bessel(0, 1, analog=True, norm=norm) + assert_array_equal(b, [1]) + assert_array_equal(a, [1]) + + # 1-order filter is same for all types + b, a = bessel(1, 1, analog=True, norm=norm) + assert_allclose(b, [1], rtol=1e-15) + assert_allclose(a, [1, 1], rtol=1e-15) + + z, p, k = bessel(1, 0.3, analog=True, output='zpk', norm=norm) + assert_array_equal(z, []) + assert_allclose(p, [-0.3], rtol=1e-14) + assert_allclose(k, 0.3, rtol=1e-14) + + def test_high_order(self): + # high even order, 'phase' + z, p, k = bessel(24, 100, analog=True, output='zpk') + z2 = [] + p2 = [ + -9.055312334014323e+01 + 4.844005815403969e+00j, + -8.983105162681878e+01 + 1.454056170018573e+01j, + -8.837357994162065e+01 + 2.426335240122282e+01j, + -8.615278316179575e+01 + 3.403202098404543e+01j, + -8.312326467067703e+01 + 4.386985940217900e+01j, + -7.921695461084202e+01 + 5.380628489700191e+01j, + -7.433392285433246e+01 + 6.388084216250878e+01j, + -6.832565803501586e+01 + 7.415032695116071e+01j, + -6.096221567378025e+01 + 8.470292433074425e+01j, + -5.185914574820616e+01 + 9.569048385258847e+01j, + -4.027853855197555e+01 + 1.074195196518679e+02j, + -2.433481337524861e+01 + 1.207298683731973e+02j, + ] + k2 = 9.999999999999989e+47 + assert_array_equal(z, z2) + assert_allclose(sorted(p, key=np.imag), + sorted(np.union1d(p2, np.conj(p2)), key=np.imag)) + assert_allclose(k, k2, rtol=1e-14) + + # high odd order, 'phase' + z, p, k = bessel(23, 1000, analog=True, output='zpk') + z2 = [] + p2 = [ + -2.497697202208956e+02 + 1.202813187870698e+03j, + -4.126986617510172e+02 + 1.065328794475509e+03j, + -5.304922463809596e+02 + 9.439760364018479e+02j, + -9.027564978975828e+02 + 1.010534334242318e+02j, + -8.909283244406079e+02 + 2.023024699647598e+02j, + -8.709469394347836e+02 + 3.039581994804637e+02j, + -8.423805948131370e+02 + 4.062657947488952e+02j, + -8.045561642249877e+02 + 5.095305912401127e+02j, + -7.564660146766259e+02 + 6.141594859516342e+02j, + -6.965966033906477e+02 + 7.207341374730186e+02j, + -6.225903228776276e+02 + 8.301558302815096e+02j, + -9.066732476324988e+02] + k2 = 9.999999999999983e+68 + assert_array_equal(z, z2) + assert_allclose(sorted(p, key=np.imag), + sorted(np.union1d(p2, np.conj(p2)), key=np.imag)) + assert_allclose(k, k2, rtol=1e-14) + + # high even order, 'delay' (Orchard 1965 "The Roots of the + # Maximally Flat-Delay Polynomials" Table 1) + z, p, k = bessel(31, 1, analog=True, output='zpk', norm='delay') + p2 = [-20.876706, + -20.826543 + 1.735732j, + -20.675502 + 3.473320j, + -20.421895 + 5.214702j, + -20.062802 + 6.961982j, + -19.593895 + 8.717546j, + -19.009148 + 10.484195j, + -18.300400 + 12.265351j, + -17.456663 + 14.065350j, + -16.463032 + 15.889910j, + -15.298849 + 17.746914j, + -13.934466 + 19.647827j, + -12.324914 + 21.610519j, + -10.395893 + 23.665701j, + - 8.005600 + 25.875019j, + - 4.792045 + 28.406037j, + ] + assert_allclose(sorted(p, key=np.imag), + sorted(np.union1d(p2, np.conj(p2)), key=np.imag)) + + # high odd order, 'delay' + z, p, k = bessel(30, 1, analog=True, output='zpk', norm='delay') + p2 = [-20.201029 + 0.867750j, + -20.097257 + 2.604235j, + -19.888485 + 4.343721j, + -19.572188 + 6.088363j, + -19.144380 + 7.840570j, + -18.599342 + 9.603147j, + -17.929195 + 11.379494j, + -17.123228 + 13.173901j, + -16.166808 + 14.992008j, + -15.039580 + 16.841580j, + -13.712245 + 18.733902j, + -12.140295 + 20.686563j, + -10.250119 + 22.729808j, + - 7.901170 + 24.924391j, + - 4.734679 + 27.435615j, + ] + assert_allclose(sorted(p, key=np.imag), + sorted(np.union1d(p2, np.conj(p2)), key=np.imag)) + + def test_refs(self): + # Compare to http://www.crbond.com/papers/bsf2.pdf + # "Delay Normalized Bessel Polynomial Coefficients" + bond_b = 10395 + bond_a = [1, 21, 210, 1260, 4725, 10395, 10395] + b, a = bessel(6, 1, norm='delay', analog=True) + assert_allclose(bond_b, b) + assert_allclose(bond_a, a) + + # "Delay Normalized Bessel Pole Locations" + bond_poles = { + 1: [-1.0000000000], + 2: [-1.5000000000 + 0.8660254038j], + 3: [-1.8389073227 + 1.7543809598j, -2.3221853546], + 4: [-2.1037893972 + 2.6574180419j, -2.8962106028 + 0.8672341289j], + 5: [-2.3246743032 + 3.5710229203j, -3.3519563992 + 1.7426614162j, + -3.6467385953], + 6: [-2.5159322478 + 4.4926729537j, -3.7357083563 + 2.6262723114j, + -4.2483593959 + 0.8675096732j], + 7: [-2.6856768789 + 5.4206941307j, -4.0701391636 + 3.5171740477j, + -4.7582905282 + 1.7392860611j, -4.9717868585], + 8: [-2.8389839489 + 6.3539112986j, -4.3682892172 + 4.4144425005j, + -5.2048407906 + 2.6161751526j, -5.5878860433 + 0.8676144454j], + 9: [-2.9792607982 + 7.2914636883j, -4.6384398872 + 5.3172716754j, + -5.6044218195 + 3.4981569179j, -6.1293679043 + 1.7378483835j, + -6.2970191817], + 10: [-3.1089162336 + 8.2326994591j, -4.8862195669 + 6.2249854825j, + -5.9675283286 + 4.3849471889j, -6.6152909655 + 2.6115679208j, + -6.9220449054 + 0.8676651955j] + } + + for N in range(1, 11): + p1 = np.sort(bond_poles[N]) + p2 = np.sort(np.concatenate(_cplxreal(besselap(N, 'delay')[1]))) + assert_array_almost_equal(p1, p2, decimal=10) + + # "Frequency Normalized Bessel Pole Locations" + bond_poles = { + 1: [-1.0000000000], + 2: [-1.1016013306 + 0.6360098248j], + 3: [-1.0474091610 + 0.9992644363j, -1.3226757999], + 4: [-0.9952087644 + 1.2571057395j, -1.3700678306 + 0.4102497175j], + 5: [-0.9576765486 + 1.4711243207j, -1.3808773259 + 0.7179095876j, + -1.5023162714], + 6: [-0.9306565229 + 1.6618632689j, -1.3818580976 + 0.9714718907j, + -1.5714904036 + 0.3208963742j], + 7: [-0.9098677806 + 1.8364513530j, -1.3789032168 + 1.1915667778j, + -1.6120387662 + 0.5892445069j, -1.6843681793], + 8: [-0.8928697188 + 1.9983258436j, -1.3738412176 + 1.3883565759j, + -1.6369394181 + 0.8227956251j, -1.7574084004 + 0.2728675751j], + 9: [-0.8783992762 + 2.1498005243j, -1.3675883098 + 1.5677337122j, + -1.6523964846 + 1.0313895670j, -1.8071705350 + 0.5123837306j, + -1.8566005012], + 10: [-0.8657569017 + 2.2926048310j, -1.3606922784 + 1.7335057427j, + -1.6618102414 + 1.2211002186j, -1.8421962445 + 0.7272575978j, + -1.9276196914 + 0.2416234710j] + } + + for N in range(1, 11): + p1 = np.sort(bond_poles[N]) + p2 = np.sort(np.concatenate(_cplxreal(besselap(N, 'mag')[1]))) + assert_array_almost_equal(p1, p2, decimal=10) + + # Compare to http://www.rane.com/note147.html + # "Table 1 - Bessel Crossovers of Second, Third, and Fourth-Order" + a = [1, 1, 1/3] + b2, a2 = bessel(2, 1, norm='delay', analog=True) + assert_allclose(a[::-1], a2/b2) + + a = [1, 1, 2/5, 1/15] + b2, a2 = bessel(3, 1, norm='delay', analog=True) + assert_allclose(a[::-1], a2/b2) + + a = [1, 1, 9/21, 2/21, 1/105] + b2, a2 = bessel(4, 1, norm='delay', analog=True) + assert_allclose(a[::-1], a2/b2) + + a = [1, np.sqrt(3), 1] + b2, a2 = bessel(2, 1, norm='phase', analog=True) + assert_allclose(a[::-1], a2/b2) + + # TODO: Why so inaccurate? Is reference flawed? + a = [1, 2.481, 2.463, 1.018] + b2, a2 = bessel(3, 1, norm='phase', analog=True) + assert_array_almost_equal(a[::-1], a2/b2, decimal=1) + + # TODO: Why so inaccurate? Is reference flawed? + a = [1, 3.240, 4.5, 3.240, 1.050] + b2, a2 = bessel(4, 1, norm='phase', analog=True) + assert_array_almost_equal(a[::-1], a2/b2, decimal=1) + + # Table of -3 dB factors: + N, scale = 2, 1.272 + scale2 = besselap(N, 'mag')[1] / besselap(N, 'phase')[1] + assert_array_almost_equal(scale, scale2, decimal=3) + + # TODO: Why so inaccurate? Is reference flawed? + N, scale = 3, 1.413 + scale2 = besselap(N, 'mag')[1] / besselap(N, 'phase')[1] + assert_array_almost_equal(scale, scale2, decimal=2) + + # TODO: Why so inaccurate? Is reference flawed? + N, scale = 4, 1.533 + scale2 = besselap(N, 'mag')[1] / besselap(N, 'phase')[1] + assert_array_almost_equal(scale, scale2, decimal=1) + + def test_hardcoded(self): + # Compare to values from original hardcoded implementation + originals = { + 0: [], + 1: [-1], + 2: [-.8660254037844386467637229 + .4999999999999999999999996j], + 3: [-.9416000265332067855971980, + -.7456403858480766441810907 + .7113666249728352680992154j], + 4: [-.6572111716718829545787788 + .8301614350048733772399715j, + -.9047587967882449459642624 + .2709187330038746636700926j], + 5: [-.9264420773877602247196260, + -.8515536193688395541722677 + .4427174639443327209850002j, + -.5905759446119191779319432 + .9072067564574549539291747j], + 6: [-.9093906830472271808050953 + .1856964396793046769246397j, + -.7996541858328288520243325 + .5621717346937317988594118j, + -.5385526816693109683073792 + .9616876881954277199245657j], + 7: [-.9194871556490290014311619, + -.8800029341523374639772340 + .3216652762307739398381830j, + -.7527355434093214462291616 + .6504696305522550699212995j, + -.4966917256672316755024763 + 1.002508508454420401230220j], + 8: [-.9096831546652910216327629 + .1412437976671422927888150j, + -.8473250802359334320103023 + .4259017538272934994996429j, + -.7111381808485399250796172 + .7186517314108401705762571j, + -.4621740412532122027072175 + 1.034388681126901058116589j], + 9: [-.9154957797499037686769223, + -.8911217017079759323183848 + .2526580934582164192308115j, + -.8148021112269012975514135 + .5085815689631499483745341j, + -.6743622686854761980403401 + .7730546212691183706919682j, + -.4331415561553618854685942 + 1.060073670135929666774323j], + 10: [-.9091347320900502436826431 + .1139583137335511169927714j, + -.8688459641284764527921864 + .3430008233766309973110589j, + -.7837694413101441082655890 + .5759147538499947070009852j, + -.6417513866988316136190854 + .8175836167191017226233947j, + -.4083220732868861566219785 + 1.081274842819124562037210j], + 11: [-.9129067244518981934637318, + -.8963656705721166099815744 + .2080480375071031919692341j, + -.8453044014712962954184557 + .4178696917801248292797448j, + -.7546938934722303128102142 + .6319150050721846494520941j, + -.6126871554915194054182909 + .8547813893314764631518509j, + -.3868149510055090879155425 + 1.099117466763120928733632j], + 12: [-.9084478234140682638817772 + 95506365213450398415258360e-27j, + -.8802534342016826507901575 + .2871779503524226723615457j, + -.8217296939939077285792834 + .4810212115100676440620548j, + -.7276681615395159454547013 + .6792961178764694160048987j, + -.5866369321861477207528215 + .8863772751320727026622149j, + -.3679640085526312839425808 + 1.114373575641546257595657j], + 13: [-.9110914665984182781070663, + -.8991314665475196220910718 + .1768342956161043620980863j, + -.8625094198260548711573628 + .3547413731172988997754038j, + -.7987460692470972510394686 + .5350752120696801938272504j, + -.7026234675721275653944062 + .7199611890171304131266374j, + -.5631559842430199266325818 + .9135900338325109684927731j, + -.3512792323389821669401925 + 1.127591548317705678613239j], + 14: [-.9077932138396487614720659 + 82196399419401501888968130e-27j, + -.8869506674916445312089167 + .2470079178765333183201435j, + -.8441199160909851197897667 + .4131653825102692595237260j, + -.7766591387063623897344648 + .5819170677377608590492434j, + -.6794256425119233117869491 + .7552857305042033418417492j, + -.5418766775112297376541293 + .9373043683516919569183099j, + -.3363868224902037330610040 + 1.139172297839859991370924j], + 15: [-.9097482363849064167228581, + -.9006981694176978324932918 + .1537681197278439351298882j, + -.8731264620834984978337843 + .3082352470564267657715883j, + -.8256631452587146506294553 + .4642348752734325631275134j, + -.7556027168970728127850416 + .6229396358758267198938604j, + -.6579196593110998676999362 + .7862895503722515897065645j, + -.5224954069658330616875186 + .9581787261092526478889345j, + -.3229963059766444287113517 + 1.149416154583629539665297j], + 16: [-.9072099595087001356491337 + 72142113041117326028823950e-27j, + -.8911723070323647674780132 + .2167089659900576449410059j, + -.8584264231521330481755780 + .3621697271802065647661080j, + -.8074790293236003885306146 + .5092933751171800179676218j, + -.7356166304713115980927279 + .6591950877860393745845254j, + -.6379502514039066715773828 + .8137453537108761895522580j, + -.5047606444424766743309967 + .9767137477799090692947061j, + -.3108782755645387813283867 + 1.158552841199330479412225j], + 17: [-.9087141161336397432860029, + -.9016273850787285964692844 + .1360267995173024591237303j, + -.8801100704438627158492165 + .2725347156478803885651973j, + -.8433414495836129204455491 + .4100759282910021624185986j, + -.7897644147799708220288138 + .5493724405281088674296232j, + -.7166893842372349049842743 + .6914936286393609433305754j, + -.6193710717342144521602448 + .8382497252826992979368621j, + -.4884629337672704194973683 + .9932971956316781632345466j, + -.2998489459990082015466971 + 1.166761272925668786676672j], + 18: [-.9067004324162775554189031 + 64279241063930693839360680e-27j, + -.8939764278132455733032155 + .1930374640894758606940586j, + -.8681095503628830078317207 + .3224204925163257604931634j, + -.8281885016242836608829018 + .4529385697815916950149364j, + -.7726285030739558780127746 + .5852778162086640620016316j, + -.6987821445005273020051878 + .7204696509726630531663123j, + -.6020482668090644386627299 + .8602708961893664447167418j, + -.4734268069916151511140032 + 1.008234300314801077034158j, + -.2897592029880489845789953 + 1.174183010600059128532230j], + 19: [-.9078934217899404528985092, + -.9021937639390660668922536 + .1219568381872026517578164j, + -.8849290585034385274001112 + .2442590757549818229026280j, + -.8555768765618421591093993 + .3672925896399872304734923j, + -.8131725551578197705476160 + .4915365035562459055630005j, + -.7561260971541629355231897 + .6176483917970178919174173j, + -.6818424412912442033411634 + .7466272357947761283262338j, + -.5858613321217832644813602 + .8801817131014566284786759j, + -.4595043449730988600785456 + 1.021768776912671221830298j, + -.2804866851439370027628724 + 1.180931628453291873626003j], + 20: [-.9062570115576771146523497 + 57961780277849516990208850e-27j, + -.8959150941925768608568248 + .1740317175918705058595844j, + -.8749560316673332850673214 + .2905559296567908031706902j, + -.8427907479956670633544106 + .4078917326291934082132821j, + -.7984251191290606875799876 + .5264942388817132427317659j, + -.7402780309646768991232610 + .6469975237605228320268752j, + -.6658120544829934193890626 + .7703721701100763015154510j, + -.5707026806915714094398061 + .8982829066468255593407161j, + -.4465700698205149555701841 + 1.034097702560842962315411j, + -.2719299580251652601727704 + 1.187099379810885886139638j], + 21: [-.9072262653142957028884077, + -.9025428073192696303995083 + .1105252572789856480992275j, + -.8883808106664449854431605 + .2213069215084350419975358j, + -.8643915813643204553970169 + .3326258512522187083009453j, + -.8299435470674444100273463 + .4448177739407956609694059j, + -.7840287980408341576100581 + .5583186348022854707564856j, + -.7250839687106612822281339 + .6737426063024382240549898j, + -.6506315378609463397807996 + .7920349342629491368548074j, + -.5564766488918562465935297 + .9148198405846724121600860j, + -.4345168906815271799687308 + 1.045382255856986531461592j, + -.2640041595834031147954813 + 1.192762031948052470183960j], + 22: [-.9058702269930872551848625 + 52774908289999045189007100e-27j, + -.8972983138153530955952835 + .1584351912289865608659759j, + -.8799661455640176154025352 + .2644363039201535049656450j, + -.8534754036851687233084587 + .3710389319482319823405321j, + -.8171682088462720394344996 + .4785619492202780899653575j, + -.7700332930556816872932937 + .5874255426351153211965601j, + -.7105305456418785989070935 + .6982266265924524000098548j, + -.6362427683267827226840153 + .8118875040246347267248508j, + -.5430983056306302779658129 + .9299947824439872998916657j, + -.4232528745642628461715044 + 1.055755605227545931204656j, + -.2566376987939318038016012 + 1.197982433555213008346532j], + 23: [-.9066732476324988168207439, + -.9027564979912504609412993 + .1010534335314045013252480j, + -.8909283242471251458653994 + .2023024699381223418195228j, + -.8709469395587416239596874 + .3039581993950041588888925j, + -.8423805948021127057054288 + .4062657948237602726779246j, + -.8045561642053176205623187 + .5095305912227258268309528j, + -.7564660146829880581478138 + .6141594859476032127216463j, + -.6965966033912705387505040 + .7207341374753046970247055j, + -.6225903228771341778273152 + .8301558302812980678845563j, + -.5304922463810191698502226 + .9439760364018300083750242j, + -.4126986617510148836149955 + 1.065328794475513585531053j, + -.2497697202208956030229911 + 1.202813187870697831365338j], + 24: [-.9055312363372773709269407 + 48440066540478700874836350e-27j, + -.8983105104397872954053307 + .1454056133873610120105857j, + -.8837358034555706623131950 + .2426335234401383076544239j, + -.8615278304016353651120610 + .3403202112618624773397257j, + -.8312326466813240652679563 + .4386985933597305434577492j, + -.7921695462343492518845446 + .5380628490968016700338001j, + -.7433392285088529449175873 + .6388084216222567930378296j, + -.6832565803536521302816011 + .7415032695091650806797753j, + -.6096221567378335562589532 + .8470292433077202380020454j, + -.5185914574820317343536707 + .9569048385259054576937721j, + -.4027853855197518014786978 + 1.074195196518674765143729j, + -.2433481337524869675825448 + 1.207298683731972524975429j], + 25: [-.9062073871811708652496104, + -.9028833390228020537142561 + 93077131185102967450643820e-27j, + -.8928551459883548836774529 + .1863068969804300712287138j, + -.8759497989677857803656239 + .2798521321771408719327250j, + -.8518616886554019782346493 + .3738977875907595009446142j, + -.8201226043936880253962552 + .4686668574656966589020580j, + -.7800496278186497225905443 + .5644441210349710332887354j, + -.7306549271849967721596735 + .6616149647357748681460822j, + -.6704827128029559528610523 + .7607348858167839877987008j, + -.5972898661335557242320528 + .8626676330388028512598538j, + -.5073362861078468845461362 + .9689006305344868494672405j, + -.3934529878191079606023847 + 1.082433927173831581956863j, + -.2373280669322028974199184 + 1.211476658382565356579418j], + } + for N in originals: + p1 = sorted(np.union1d(originals[N], + np.conj(originals[N])), key=np.imag) + p2 = sorted(besselap(N)[1], key=np.imag) + assert_allclose(p1, p2, rtol=1e-14) + + def test_norm_phase(self): + # Test some orders and frequencies and see that they have the right + # phase at w0 + for N in (1, 2, 3, 4, 5, 51, 72): + for w0 in (1, 100): + b, a = bessel(N, w0, analog=True, norm='phase') + w = np.linspace(0, w0, 100) + w, h = freqs(b, a, w) + phase = np.unwrap(np.angle(h)) + assert_allclose(phase[[0, -1]], (0, -N*pi/4), rtol=1e-1) + + def test_norm_mag(self): + # Test some orders and frequencies and see that they have the right + # mag at w0 + for N in (1, 2, 3, 4, 5, 51, 72): + for w0 in (1, 100): + b, a = bessel(N, w0, analog=True, norm='mag') + w = (0, w0) + w, h = freqs(b, a, w) + mag = abs(h) + assert_allclose(mag, (1, 1/np.sqrt(2))) + + def test_norm_delay(self): + # Test some orders and frequencies and see that they have the right + # delay at DC + for N in (1, 2, 3, 4, 5, 51, 72): + for w0 in (1, 100): + b, a = bessel(N, w0, analog=True, norm='delay') + w = np.linspace(0, 10*w0, 1000) + w, h = freqs(b, a, w) + delay = -np.diff(np.unwrap(np.angle(h)))/np.diff(w) + assert_allclose(delay[0], 1/w0, rtol=1e-4) + + def test_norm_factor(self): + mpmath_values = { + 1: 1, 2: 1.361654128716130520, 3: 1.755672368681210649, + 4: 2.113917674904215843, 5: 2.427410702152628137, + 6: 2.703395061202921876, 7: 2.951722147038722771, + 8: 3.179617237510651330, 9: 3.391693138911660101, + 10: 3.590980594569163482, 11: 3.779607416439620092, + 12: 3.959150821144285315, 13: 4.130825499383535980, + 14: 4.295593409533637564, 15: 4.454233021624377494, + 16: 4.607385465472647917, 17: 4.755586548961147727, + 18: 4.899289677284488007, 19: 5.038882681488207605, + 20: 5.174700441742707423, 21: 5.307034531360917274, + 22: 5.436140703250035999, 23: 5.562244783787878196, + 24: 5.685547371295963521, 25: 5.806227623775418541, + 50: 8.268963160013226298, 51: 8.352374541546012058, + } + for N in mpmath_values: + z, p, k = besselap(N, 'delay') + assert_allclose(mpmath_values[N], _norm_factor(p, k), rtol=1e-13) + + def test_bessel_poly(self): + assert_array_equal(_bessel_poly(5), [945, 945, 420, 105, 15, 1]) + assert_array_equal(_bessel_poly(4, True), [1, 10, 45, 105, 105]) + + def test_bessel_zeros(self): + assert_array_equal(_bessel_zeros(0), []) + + def test_invalid(self): + assert_raises(ValueError, besselap, 5, 'nonsense') + assert_raises(ValueError, besselap, -5) + assert_raises(ValueError, besselap, 3.2) + assert_raises(ValueError, _bessel_poly, -3) + assert_raises(ValueError, _bessel_poly, 3.3) + + def test_fs_param(self): + for norm in ('phase', 'mag', 'delay'): + for fs in (900, 900.1, 1234.567): + for N in (0, 1, 2, 3, 10): + for fc in (100, 100.1, 432.12345): + for btype in ('lp', 'hp'): + ba1 = bessel(N, fc, btype, fs=fs) + ba2 = bessel(N, fc/(fs/2), btype) + assert_allclose(ba1, ba2) + for fc in ((100, 200), (100.1, 200.2), (321.123, 432.123)): + for btype in ('bp', 'bs'): + ba1 = bessel(N, fc, btype, fs=fs) + for seq in (list, tuple, array): + fcnorm = seq([f/(fs/2) for f in fc]) + ba2 = bessel(N, fcnorm, btype) + assert_allclose(ba1, ba2) + + +class TestButter(object): + + def test_degenerate(self): + # 0-order filter is just a passthrough + b, a = butter(0, 1, analog=True) + assert_array_equal(b, [1]) + assert_array_equal(a, [1]) + + # 1-order filter is same for all types + b, a = butter(1, 1, analog=True) + assert_array_almost_equal(b, [1]) + assert_array_almost_equal(a, [1, 1]) + + z, p, k = butter(1, 0.3, output='zpk') + assert_array_equal(z, [-1]) + assert_allclose(p, [3.249196962329063e-01], rtol=1e-14) + assert_allclose(k, 3.375401518835469e-01, rtol=1e-14) + + def test_basic(self): + # analog s-plane + for N in range(25): + wn = 0.01 + z, p, k = butter(N, wn, 'low', analog=True, output='zpk') + assert_array_almost_equal([], z) + assert_(len(p) == N) + # All poles should be at distance wn from origin + assert_array_almost_equal(wn, abs(p)) + assert_(all(np.real(p) <= 0)) # No poles in right half of S-plane + assert_array_almost_equal(wn**N, k) + + # digital z-plane + for N in range(25): + wn = 0.01 + z, p, k = butter(N, wn, 'high', analog=False, output='zpk') + assert_array_equal(np.ones(N), z) # All zeros exactly at DC + assert_(all(np.abs(p) <= 1)) # No poles outside unit circle + + b1, a1 = butter(2, 1, analog=True) + assert_array_almost_equal(b1, [1]) + assert_array_almost_equal(a1, [1, np.sqrt(2), 1]) + + b2, a2 = butter(5, 1, analog=True) + assert_array_almost_equal(b2, [1]) + assert_array_almost_equal(a2, [1, 3.2361, 5.2361, + 5.2361, 3.2361, 1], decimal=4) + + b3, a3 = butter(10, 1, analog=True) + assert_array_almost_equal(b3, [1]) + assert_array_almost_equal(a3, [1, 6.3925, 20.4317, 42.8021, 64.8824, + 74.2334, 64.8824, 42.8021, 20.4317, + 6.3925, 1], decimal=4) + + b2, a2 = butter(19, 1.0441379169150726, analog=True) + assert_array_almost_equal(b2, [2.2720], decimal=4) + assert_array_almost_equal(a2, 1.0e+004 * np.array([ + 0.0001, 0.0013, 0.0080, 0.0335, 0.1045, 0.2570, + 0.5164, 0.8669, 1.2338, 1.5010, 1.5672, 1.4044, + 1.0759, 0.6986, 0.3791, 0.1681, 0.0588, 0.0153, + 0.0026, 0.0002]), decimal=0) + + b, a = butter(5, 0.4) + assert_array_almost_equal(b, [0.0219, 0.1097, 0.2194, + 0.2194, 0.1097, 0.0219], decimal=4) + assert_array_almost_equal(a, [1.0000, -0.9853, 0.9738, + -0.3864, 0.1112, -0.0113], decimal=4) + + def test_highpass(self): + # highpass, high even order + z, p, k = butter(28, 0.43, 'high', output='zpk') + z2 = np.ones(28) + p2 = [ + 2.068257195514592e-01 + 9.238294351481734e-01j, + 2.068257195514592e-01 - 9.238294351481734e-01j, + 1.874933103892023e-01 + 8.269455076775277e-01j, + 1.874933103892023e-01 - 8.269455076775277e-01j, + 1.717435567330153e-01 + 7.383078571194629e-01j, + 1.717435567330153e-01 - 7.383078571194629e-01j, + 1.588266870755982e-01 + 6.564623730651094e-01j, + 1.588266870755982e-01 - 6.564623730651094e-01j, + 1.481881532502603e-01 + 5.802343458081779e-01j, + 1.481881532502603e-01 - 5.802343458081779e-01j, + 1.394122576319697e-01 + 5.086609000582009e-01j, + 1.394122576319697e-01 - 5.086609000582009e-01j, + 1.321840881809715e-01 + 4.409411734716436e-01j, + 1.321840881809715e-01 - 4.409411734716436e-01j, + 1.262633413354405e-01 + 3.763990035551881e-01j, + 1.262633413354405e-01 - 3.763990035551881e-01j, + 1.214660449478046e-01 + 3.144545234797277e-01j, + 1.214660449478046e-01 - 3.144545234797277e-01j, + 1.104868766650320e-01 + 2.771505404367791e-02j, + 1.104868766650320e-01 - 2.771505404367791e-02j, + 1.111768629525075e-01 + 8.331369153155753e-02j, + 1.111768629525075e-01 - 8.331369153155753e-02j, + 1.125740630842972e-01 + 1.394219509611784e-01j, + 1.125740630842972e-01 - 1.394219509611784e-01j, + 1.147138487992747e-01 + 1.963932363793666e-01j, + 1.147138487992747e-01 - 1.963932363793666e-01j, + 1.176516491045901e-01 + 2.546021573417188e-01j, + 1.176516491045901e-01 - 2.546021573417188e-01j, + ] + k2 = 1.446671081817286e-06 + assert_array_equal(z, z2) + assert_allclose(sorted(p, key=np.imag), + sorted(p2, key=np.imag), rtol=1e-7) + assert_allclose(k, k2, rtol=1e-10) + + # highpass, high odd order + z, p, k = butter(27, 0.56, 'high', output='zpk') + z2 = np.ones(27) + p2 = [ + -1.772572785680147e-01 + 9.276431102995948e-01j, + -1.772572785680147e-01 - 9.276431102995948e-01j, + -1.600766565322114e-01 + 8.264026279893268e-01j, + -1.600766565322114e-01 - 8.264026279893268e-01j, + -1.461948419016121e-01 + 7.341841939120078e-01j, + -1.461948419016121e-01 - 7.341841939120078e-01j, + -1.348975284762046e-01 + 6.493235066053785e-01j, + -1.348975284762046e-01 - 6.493235066053785e-01j, + -1.256628210712206e-01 + 5.704921366889227e-01j, + -1.256628210712206e-01 - 5.704921366889227e-01j, + -1.181038235962314e-01 + 4.966120551231630e-01j, + -1.181038235962314e-01 - 4.966120551231630e-01j, + -1.119304913239356e-01 + 4.267938916403775e-01j, + -1.119304913239356e-01 - 4.267938916403775e-01j, + -1.069237739782691e-01 + 3.602914879527338e-01j, + -1.069237739782691e-01 - 3.602914879527338e-01j, + -1.029178030691416e-01 + 2.964677964142126e-01j, + -1.029178030691416e-01 - 2.964677964142126e-01j, + -9.978747500816100e-02 + 2.347687643085738e-01j, + -9.978747500816100e-02 - 2.347687643085738e-01j, + -9.743974496324025e-02 + 1.747028739092479e-01j, + -9.743974496324025e-02 - 1.747028739092479e-01j, + -9.580754551625957e-02 + 1.158246860771989e-01j, + -9.580754551625957e-02 - 1.158246860771989e-01j, + -9.484562207782568e-02 + 5.772118357151691e-02j, + -9.484562207782568e-02 - 5.772118357151691e-02j, + -9.452783117928215e-02 + ] + k2 = 9.585686688851069e-09 + assert_array_equal(z, z2) + assert_allclose(sorted(p, key=np.imag), + sorted(p2, key=np.imag), rtol=1e-8) + assert_allclose(k, k2) + + def test_bandpass(self): + z, p, k = butter(8, [0.25, 0.33], 'band', output='zpk') + z2 = [1, 1, 1, 1, 1, 1, 1, 1, + -1, -1, -1, -1, -1, -1, -1, -1] + p2 = [ + 4.979909925436156e-01 + 8.367609424799387e-01j, + 4.979909925436156e-01 - 8.367609424799387e-01j, + 4.913338722555539e-01 + 7.866774509868817e-01j, + 4.913338722555539e-01 - 7.866774509868817e-01j, + 5.035229361778706e-01 + 7.401147376726750e-01j, + 5.035229361778706e-01 - 7.401147376726750e-01j, + 5.307617160406101e-01 + 7.029184459442954e-01j, + 5.307617160406101e-01 - 7.029184459442954e-01j, + 5.680556159453138e-01 + 6.788228792952775e-01j, + 5.680556159453138e-01 - 6.788228792952775e-01j, + 6.100962560818854e-01 + 6.693849403338664e-01j, + 6.100962560818854e-01 - 6.693849403338664e-01j, + 6.904694312740631e-01 + 6.930501690145245e-01j, + 6.904694312740631e-01 - 6.930501690145245e-01j, + 6.521767004237027e-01 + 6.744414640183752e-01j, + 6.521767004237027e-01 - 6.744414640183752e-01j, + ] + k2 = 3.398854055800844e-08 + assert_array_equal(z, z2) + assert_allclose(sorted(p, key=np.imag), + sorted(p2, key=np.imag), rtol=1e-13) + assert_allclose(k, k2, rtol=1e-13) + + # bandpass analog + z, p, k = butter(4, [90.5, 110.5], 'bp', analog=True, output='zpk') + z2 = np.zeros(4) + p2 = [ + -4.179137760733086e+00 + 1.095935899082837e+02j, + -4.179137760733086e+00 - 1.095935899082837e+02j, + -9.593598668443835e+00 + 1.034745398029734e+02j, + -9.593598668443835e+00 - 1.034745398029734e+02j, + -8.883991981781929e+00 + 9.582087115567160e+01j, + -8.883991981781929e+00 - 9.582087115567160e+01j, + -3.474530886568715e+00 + 9.111599925805801e+01j, + -3.474530886568715e+00 - 9.111599925805801e+01j, + ] + k2 = 1.600000000000001e+05 + assert_array_equal(z, z2) + assert_allclose(sorted(p, key=np.imag), sorted(p2, key=np.imag)) + assert_allclose(k, k2, rtol=1e-15) + + def test_bandstop(self): + z, p, k = butter(7, [0.45, 0.56], 'stop', output='zpk') + z2 = [-1.594474531383421e-02 + 9.998728744679880e-01j, + -1.594474531383421e-02 - 9.998728744679880e-01j, + -1.594474531383421e-02 + 9.998728744679880e-01j, + -1.594474531383421e-02 - 9.998728744679880e-01j, + -1.594474531383421e-02 + 9.998728744679880e-01j, + -1.594474531383421e-02 - 9.998728744679880e-01j, + -1.594474531383421e-02 + 9.998728744679880e-01j, + -1.594474531383421e-02 - 9.998728744679880e-01j, + -1.594474531383421e-02 + 9.998728744679880e-01j, + -1.594474531383421e-02 - 9.998728744679880e-01j, + -1.594474531383421e-02 + 9.998728744679880e-01j, + -1.594474531383421e-02 - 9.998728744679880e-01j, + -1.594474531383421e-02 + 9.998728744679880e-01j, + -1.594474531383421e-02 - 9.998728744679880e-01j] + p2 = [-1.766850742887729e-01 + 9.466951258673900e-01j, + -1.766850742887729e-01 - 9.466951258673900e-01j, + 1.467897662432886e-01 + 9.515917126462422e-01j, + 1.467897662432886e-01 - 9.515917126462422e-01j, + -1.370083529426906e-01 + 8.880376681273993e-01j, + -1.370083529426906e-01 - 8.880376681273993e-01j, + 1.086774544701390e-01 + 8.915240810704319e-01j, + 1.086774544701390e-01 - 8.915240810704319e-01j, + -7.982704457700891e-02 + 8.506056315273435e-01j, + -7.982704457700891e-02 - 8.506056315273435e-01j, + 5.238812787110331e-02 + 8.524011102699969e-01j, + 5.238812787110331e-02 - 8.524011102699969e-01j, + -1.357545000491310e-02 + 8.382287744986582e-01j, + -1.357545000491310e-02 - 8.382287744986582e-01j] + k2 = 4.577122512960063e-01 + assert_allclose(sorted(z, key=np.imag), sorted(z2, key=np.imag)) + assert_allclose(sorted(p, key=np.imag), sorted(p2, key=np.imag)) + assert_allclose(k, k2, rtol=1e-14) + + def test_ba_output(self): + b, a = butter(4, [100, 300], 'bandpass', analog=True) + b2 = [1.6e+09, 0, 0, 0, 0] + a2 = [1.000000000000000e+00, 5.226251859505511e+02, + 2.565685424949238e+05, 6.794127417357160e+07, + 1.519411254969542e+10, 2.038238225207147e+12, + 2.309116882454312e+14, 1.411088002066486e+16, + 8.099999999999991e+17] + assert_allclose(b, b2, rtol=1e-14) + assert_allclose(a, a2, rtol=1e-14) + + def test_fs_param(self): + for fs in (900, 900.1, 1234.567): + for N in (0, 1, 2, 3, 10): + for fc in (100, 100.1, 432.12345): + for btype in ('lp', 'hp'): + ba1 = butter(N, fc, btype, fs=fs) + ba2 = butter(N, fc/(fs/2), btype) + assert_allclose(ba1, ba2) + for fc in ((100, 200), (100.1, 200.2), (321.123, 432.123)): + for btype in ('bp', 'bs'): + ba1 = butter(N, fc, btype, fs=fs) + for seq in (list, tuple, array): + fcnorm = seq([f/(fs/2) for f in fc]) + ba2 = butter(N, fcnorm, btype) + assert_allclose(ba1, ba2) + + +class TestCheby1(object): + + def test_degenerate(self): + # 0-order filter is just a passthrough + # Even-order filters have DC gain of -rp dB + b, a = cheby1(0, 10*np.log10(2), 1, analog=True) + assert_array_almost_equal(b, [1/np.sqrt(2)]) + assert_array_equal(a, [1]) + + # 1-order filter is same for all types + b, a = cheby1(1, 10*np.log10(2), 1, analog=True) + assert_array_almost_equal(b, [1]) + assert_array_almost_equal(a, [1, 1]) + + z, p, k = cheby1(1, 0.1, 0.3, output='zpk') + assert_array_equal(z, [-1]) + assert_allclose(p, [-5.390126972799615e-01], rtol=1e-14) + assert_allclose(k, 7.695063486399808e-01, rtol=1e-14) + + def test_basic(self): + for N in range(25): + wn = 0.01 + z, p, k = cheby1(N, 1, wn, 'low', analog=True, output='zpk') + assert_array_almost_equal([], z) + assert_(len(p) == N) + assert_(all(np.real(p) <= 0)) # No poles in right half of S-plane + + for N in range(25): + wn = 0.01 + z, p, k = cheby1(N, 1, wn, 'high', analog=False, output='zpk') + assert_array_equal(np.ones(N), z) # All zeros exactly at DC + assert_(all(np.abs(p) <= 1)) # No poles outside unit circle + + # Same test as TestNormalize + b, a = cheby1(8, 0.5, 0.048) + assert_array_almost_equal(b, [ + 2.150733144728282e-11, 1.720586515782626e-10, + 6.022052805239190e-10, 1.204410561047838e-09, + 1.505513201309798e-09, 1.204410561047838e-09, + 6.022052805239190e-10, 1.720586515782626e-10, + 2.150733144728282e-11], decimal=14) + assert_array_almost_equal(a, [ + 1.000000000000000e+00, -7.782402035027959e+00, + 2.654354569747454e+01, -5.182182531666387e+01, + 6.334127355102684e+01, -4.963358186631157e+01, + 2.434862182949389e+01, -6.836925348604676e+00, + 8.412934944449140e-01], decimal=14) + + b, a = cheby1(4, 1, [0.4, 0.7], btype='band') + assert_array_almost_equal(b, [0.0084, 0, -0.0335, 0, 0.0502, 0, + -0.0335, 0, 0.0084], decimal=4) + assert_array_almost_equal(a, [1.0, 1.1191, 2.862, 2.2986, 3.4137, + 1.8653, 1.8982, 0.5676, 0.4103], + decimal=4) + + b2, a2 = cheby1(5, 3, 1, analog=True) + assert_array_almost_equal(b2, [0.0626], decimal=4) + assert_array_almost_equal(a2, [1, 0.5745, 1.4150, 0.5489, 0.4080, + 0.0626], decimal=4) + + b, a = cheby1(8, 0.5, 0.1) + assert_array_almost_equal(b, 1.0e-006 * np.array([ + 0.00703924326028, 0.05631394608227, 0.19709881128793, + 0.39419762257586, 0.49274702821983, 0.39419762257586, + 0.19709881128793, 0.05631394608227, 0.00703924326028]), + decimal=13) + assert_array_almost_equal(a, [ + 1.00000000000000, -7.44912258934158, 24.46749067762108, + -46.27560200466141, 55.11160187999928, -42.31640010161038, + 20.45543300484147, -5.69110270561444, 0.69770374759022], + decimal=13) + + b, a = cheby1(8, 0.5, 0.25) + assert_array_almost_equal(b, 1.0e-003 * np.array([ + 0.00895261138923, 0.07162089111382, 0.25067311889837, + 0.50134623779673, 0.62668279724591, 0.50134623779673, + 0.25067311889837, 0.07162089111382, 0.00895261138923]), + decimal=13) + assert_array_almost_equal(a, [1.00000000000000, -5.97529229188545, + 16.58122329202101, -27.71423273542923, + 30.39509758355313, -22.34729670426879, + 10.74509800434910, -3.08924633697497, + 0.40707685889802], decimal=13) + + def test_highpass(self): + # high even order + z, p, k = cheby1(24, 0.7, 0.2, 'high', output='zpk') + z2 = np.ones(24) + p2 = [-6.136558509657073e-01 + 2.700091504942893e-01j, + -6.136558509657073e-01 - 2.700091504942893e-01j, + -3.303348340927516e-01 + 6.659400861114254e-01j, + -3.303348340927516e-01 - 6.659400861114254e-01j, + 8.779713780557169e-03 + 8.223108447483040e-01j, + 8.779713780557169e-03 - 8.223108447483040e-01j, + 2.742361123006911e-01 + 8.356666951611864e-01j, + 2.742361123006911e-01 - 8.356666951611864e-01j, + 4.562984557158206e-01 + 7.954276912303594e-01j, + 4.562984557158206e-01 - 7.954276912303594e-01j, + 5.777335494123628e-01 + 7.435821817961783e-01j, + 5.777335494123628e-01 - 7.435821817961783e-01j, + 6.593260977749194e-01 + 6.955390907990932e-01j, + 6.593260977749194e-01 - 6.955390907990932e-01j, + 7.149590948466562e-01 + 6.559437858502012e-01j, + 7.149590948466562e-01 - 6.559437858502012e-01j, + 7.532432388188739e-01 + 6.256158042292060e-01j, + 7.532432388188739e-01 - 6.256158042292060e-01j, + 7.794365244268271e-01 + 6.042099234813333e-01j, + 7.794365244268271e-01 - 6.042099234813333e-01j, + 7.967253874772997e-01 + 5.911966597313203e-01j, + 7.967253874772997e-01 - 5.911966597313203e-01j, + 8.069756417293870e-01 + 5.862214589217275e-01j, + 8.069756417293870e-01 - 5.862214589217275e-01j] + k2 = 6.190427617192018e-04 + assert_array_equal(z, z2) + assert_allclose(sorted(p, key=np.imag), + sorted(p2, key=np.imag), rtol=1e-10) + assert_allclose(k, k2, rtol=1e-10) + + # high odd order + z, p, k = cheby1(23, 0.8, 0.3, 'high', output='zpk') + z2 = np.ones(23) + p2 = [-7.676400532011010e-01, + -6.754621070166477e-01 + 3.970502605619561e-01j, + -6.754621070166477e-01 - 3.970502605619561e-01j, + -4.528880018446727e-01 + 6.844061483786332e-01j, + -4.528880018446727e-01 - 6.844061483786332e-01j, + -1.986009130216447e-01 + 8.382285942941594e-01j, + -1.986009130216447e-01 - 8.382285942941594e-01j, + 2.504673931532608e-02 + 8.958137635794080e-01j, + 2.504673931532608e-02 - 8.958137635794080e-01j, + 2.001089429976469e-01 + 9.010678290791480e-01j, + 2.001089429976469e-01 - 9.010678290791480e-01j, + 3.302410157191755e-01 + 8.835444665962544e-01j, + 3.302410157191755e-01 - 8.835444665962544e-01j, + 4.246662537333661e-01 + 8.594054226449009e-01j, + 4.246662537333661e-01 - 8.594054226449009e-01j, + 4.919620928120296e-01 + 8.366772762965786e-01j, + 4.919620928120296e-01 - 8.366772762965786e-01j, + 5.385746917494749e-01 + 8.191616180796720e-01j, + 5.385746917494749e-01 - 8.191616180796720e-01j, + 5.855636993537203e-01 + 8.060680937701062e-01j, + 5.855636993537203e-01 - 8.060680937701062e-01j, + 5.688812849391721e-01 + 8.086497795114683e-01j, + 5.688812849391721e-01 - 8.086497795114683e-01j] + k2 = 1.941697029206324e-05 + assert_array_equal(z, z2) + assert_allclose(sorted(p, key=np.imag), + sorted(p2, key=np.imag), rtol=1e-10) + assert_allclose(k, k2, rtol=1e-10) + + z, p, k = cheby1(10, 1, 1000, 'high', analog=True, output='zpk') + z2 = np.zeros(10) + p2 = [-3.144743169501551e+03 + 3.511680029092744e+03j, + -3.144743169501551e+03 - 3.511680029092744e+03j, + -5.633065604514602e+02 + 2.023615191183945e+03j, + -5.633065604514602e+02 - 2.023615191183945e+03j, + -1.946412183352025e+02 + 1.372309454274755e+03j, + -1.946412183352025e+02 - 1.372309454274755e+03j, + -7.987162953085479e+01 + 1.105207708045358e+03j, + -7.987162953085479e+01 - 1.105207708045358e+03j, + -2.250315039031946e+01 + 1.001723931471477e+03j, + -2.250315039031946e+01 - 1.001723931471477e+03j] + k2 = 8.912509381337453e-01 + assert_array_equal(z, z2) + assert_allclose(sorted(p, key=np.imag), + sorted(p2, key=np.imag), rtol=1e-13) + assert_allclose(k, k2, rtol=1e-15) + + def test_bandpass(self): + z, p, k = cheby1(8, 1, [0.3, 0.4], 'bp', output='zpk') + z2 = [1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1] + p2 = [3.077784854851463e-01 + 9.453307017592942e-01j, + 3.077784854851463e-01 - 9.453307017592942e-01j, + 3.280567400654425e-01 + 9.272377218689016e-01j, + 3.280567400654425e-01 - 9.272377218689016e-01j, + 3.677912763284301e-01 + 9.038008865279966e-01j, + 3.677912763284301e-01 - 9.038008865279966e-01j, + 4.194425632520948e-01 + 8.769407159656157e-01j, + 4.194425632520948e-01 - 8.769407159656157e-01j, + 4.740921994669189e-01 + 8.496508528630974e-01j, + 4.740921994669189e-01 - 8.496508528630974e-01j, + 5.234866481897429e-01 + 8.259608422808477e-01j, + 5.234866481897429e-01 - 8.259608422808477e-01j, + 5.844717632289875e-01 + 8.052901363500210e-01j, + 5.844717632289875e-01 - 8.052901363500210e-01j, + 5.615189063336070e-01 + 8.100667803850766e-01j, + 5.615189063336070e-01 - 8.100667803850766e-01j] + k2 = 5.007028718074307e-09 + assert_array_equal(z, z2) + assert_allclose(sorted(p, key=np.imag), + sorted(p2, key=np.imag), rtol=1e-13) + assert_allclose(k, k2, rtol=1e-13) + + def test_bandstop(self): + z, p, k = cheby1(7, 1, [0.5, 0.6], 'stop', output='zpk') + z2 = [-1.583844403245361e-01 + 9.873775210440450e-01j, + -1.583844403245361e-01 - 9.873775210440450e-01j, + -1.583844403245361e-01 + 9.873775210440450e-01j, + -1.583844403245361e-01 - 9.873775210440450e-01j, + -1.583844403245361e-01 + 9.873775210440450e-01j, + -1.583844403245361e-01 - 9.873775210440450e-01j, + -1.583844403245361e-01 + 9.873775210440450e-01j, + -1.583844403245361e-01 - 9.873775210440450e-01j, + -1.583844403245361e-01 + 9.873775210440450e-01j, + -1.583844403245361e-01 - 9.873775210440450e-01j, + -1.583844403245361e-01 + 9.873775210440450e-01j, + -1.583844403245361e-01 - 9.873775210440450e-01j, + -1.583844403245361e-01 + 9.873775210440450e-01j, + -1.583844403245361e-01 - 9.873775210440450e-01j] + p2 = [-8.942974551472813e-02 + 3.482480481185926e-01j, + -8.942974551472813e-02 - 3.482480481185926e-01j, + 1.293775154041798e-01 + 8.753499858081858e-01j, + 1.293775154041798e-01 - 8.753499858081858e-01j, + 3.399741945062013e-02 + 9.690316022705607e-01j, + 3.399741945062013e-02 - 9.690316022705607e-01j, + 4.167225522796539e-04 + 9.927338161087488e-01j, + 4.167225522796539e-04 - 9.927338161087488e-01j, + -3.912966549550960e-01 + 8.046122859255742e-01j, + -3.912966549550960e-01 - 8.046122859255742e-01j, + -3.307805547127368e-01 + 9.133455018206508e-01j, + -3.307805547127368e-01 - 9.133455018206508e-01j, + -3.072658345097743e-01 + 9.443589759799366e-01j, + -3.072658345097743e-01 - 9.443589759799366e-01j] + k2 = 3.619438310405028e-01 + assert_allclose(sorted(z, key=np.imag), + sorted(z2, key=np.imag), rtol=1e-13) + assert_allclose(sorted(p, key=np.imag), + sorted(p2, key=np.imag), rtol=1e-13) + assert_allclose(k, k2, rtol=1e-15) + + def test_ba_output(self): + # with transfer function conversion, without digital conversion + b, a = cheby1(5, 0.9, [210, 310], 'stop', analog=True) + b2 = [1.000000000000006e+00, 0, + 3.255000000000020e+05, 0, + 4.238010000000026e+10, 0, + 2.758944510000017e+15, 0, + 8.980364380050052e+19, 0, + 1.169243442282517e+24 + ] + a2 = [1.000000000000000e+00, 4.630555945694342e+02, + 4.039266454794788e+05, 1.338060988610237e+08, + 5.844333551294591e+10, 1.357346371637638e+13, + 3.804661141892782e+15, 5.670715850340080e+17, + 1.114411200988328e+20, 8.316815934908471e+21, + 1.169243442282517e+24 + ] + assert_allclose(b, b2, rtol=1e-14) + assert_allclose(a, a2, rtol=1e-14) + + def test_fs_param(self): + for fs in (900, 900.1, 1234.567): + for N in (0, 1, 2, 3, 10): + for fc in (100, 100.1, 432.12345): + for btype in ('lp', 'hp'): + ba1 = cheby1(N, 1, fc, btype, fs=fs) + ba2 = cheby1(N, 1, fc/(fs/2), btype) + assert_allclose(ba1, ba2) + for fc in ((100, 200), (100.1, 200.2), (321.123, 432.123)): + for btype in ('bp', 'bs'): + ba1 = cheby1(N, 1, fc, btype, fs=fs) + for seq in (list, tuple, array): + fcnorm = seq([f/(fs/2) for f in fc]) + ba2 = cheby1(N, 1, fcnorm, btype) + assert_allclose(ba1, ba2) + + +class TestCheby2(object): + + def test_degenerate(self): + # 0-order filter is just a passthrough + # Stopband ripple factor doesn't matter + b, a = cheby2(0, 123.456, 1, analog=True) + assert_array_equal(b, [1]) + assert_array_equal(a, [1]) + + # 1-order filter is same for all types + b, a = cheby2(1, 10*np.log10(2), 1, analog=True) + assert_array_almost_equal(b, [1]) + assert_array_almost_equal(a, [1, 1]) + + z, p, k = cheby2(1, 50, 0.3, output='zpk') + assert_array_equal(z, [-1]) + assert_allclose(p, [9.967826460175649e-01], rtol=1e-14) + assert_allclose(k, 1.608676991217512e-03, rtol=1e-14) + + def test_basic(self): + for N in range(25): + wn = 0.01 + z, p, k = cheby2(N, 40, wn, 'low', analog=True, output='zpk') + assert_(len(p) == N) + assert_(all(np.real(p) <= 0)) # No poles in right half of S-plane + + for N in range(25): + wn = 0.01 + z, p, k = cheby2(N, 40, wn, 'high', analog=False, output='zpk') + assert_(all(np.abs(p) <= 1)) # No poles outside unit circle + + B, A = cheby2(18, 100, 0.5) + assert_array_almost_equal(B, [ + 0.00167583914216, 0.01249479541868, 0.05282702120282, + 0.15939804265706, 0.37690207631117, 0.73227013789108, + 1.20191856962356, 1.69522872823393, 2.07598674519837, + 2.21972389625291, 2.07598674519838, 1.69522872823395, + 1.20191856962359, 0.73227013789110, 0.37690207631118, + 0.15939804265707, 0.05282702120282, 0.01249479541868, + 0.00167583914216], decimal=13) + assert_array_almost_equal(A, [ + 1.00000000000000, -0.27631970006174, 3.19751214254060, + -0.15685969461355, 4.13926117356269, 0.60689917820044, + 2.95082770636540, 0.89016501910416, 1.32135245849798, + 0.51502467236824, 0.38906643866660, 0.15367372690642, + 0.07255803834919, 0.02422454070134, 0.00756108751837, + 0.00179848550988, 0.00033713574499, 0.00004258794833, + 0.00000281030149], decimal=13) + + def test_highpass(self): + # high even order + z, p, k = cheby2(26, 60, 0.3, 'high', output='zpk') + z2 = [9.981088955489852e-01 + 6.147058341984388e-02j, + 9.981088955489852e-01 - 6.147058341984388e-02j, + 9.832702870387426e-01 + 1.821525257215483e-01j, + 9.832702870387426e-01 - 1.821525257215483e-01j, + 9.550760158089112e-01 + 2.963609353922882e-01j, + 9.550760158089112e-01 - 2.963609353922882e-01j, + 9.162054748821922e-01 + 4.007087817803773e-01j, + 9.162054748821922e-01 - 4.007087817803773e-01j, + 8.700619897368064e-01 + 4.929423232136168e-01j, + 8.700619897368064e-01 - 4.929423232136168e-01j, + 5.889791753434985e-01 + 8.081482110427953e-01j, + 5.889791753434985e-01 - 8.081482110427953e-01j, + 5.984900456570295e-01 + 8.011302423760501e-01j, + 5.984900456570295e-01 - 8.011302423760501e-01j, + 6.172880888914629e-01 + 7.867371958365343e-01j, + 6.172880888914629e-01 - 7.867371958365343e-01j, + 6.448899971038180e-01 + 7.642754030030161e-01j, + 6.448899971038180e-01 - 7.642754030030161e-01j, + 6.804845629637927e-01 + 7.327624168637228e-01j, + 6.804845629637927e-01 - 7.327624168637228e-01j, + 8.202619107108660e-01 + 5.719881098737678e-01j, + 8.202619107108660e-01 - 5.719881098737678e-01j, + 7.228410452536148e-01 + 6.910143437705678e-01j, + 7.228410452536148e-01 - 6.910143437705678e-01j, + 7.702121399578629e-01 + 6.377877856007792e-01j, + 7.702121399578629e-01 - 6.377877856007792e-01j] + p2 = [7.365546198286450e-01 + 4.842085129329526e-02j, + 7.365546198286450e-01 - 4.842085129329526e-02j, + 7.292038510962885e-01 + 1.442201672097581e-01j, + 7.292038510962885e-01 - 1.442201672097581e-01j, + 7.151293788040354e-01 + 2.369925800458584e-01j, + 7.151293788040354e-01 - 2.369925800458584e-01j, + 6.955051820787286e-01 + 3.250341363856910e-01j, + 6.955051820787286e-01 - 3.250341363856910e-01j, + 6.719122956045220e-01 + 4.070475750638047e-01j, + 6.719122956045220e-01 - 4.070475750638047e-01j, + 6.461722130611300e-01 + 4.821965916689270e-01j, + 6.461722130611300e-01 - 4.821965916689270e-01j, + 5.528045062872224e-01 + 8.162920513838372e-01j, + 5.528045062872224e-01 - 8.162920513838372e-01j, + 5.464847782492791e-01 + 7.869899955967304e-01j, + 5.464847782492791e-01 - 7.869899955967304e-01j, + 5.488033111260949e-01 + 7.520442354055579e-01j, + 5.488033111260949e-01 - 7.520442354055579e-01j, + 6.201874719022955e-01 + 5.500894392527353e-01j, + 6.201874719022955e-01 - 5.500894392527353e-01j, + 5.586478152536709e-01 + 7.112676877332921e-01j, + 5.586478152536709e-01 - 7.112676877332921e-01j, + 5.958145844148228e-01 + 6.107074340842115e-01j, + 5.958145844148228e-01 - 6.107074340842115e-01j, + 5.747812938519067e-01 + 6.643001536914696e-01j, + 5.747812938519067e-01 - 6.643001536914696e-01j] + k2 = 9.932997786497189e-02 + assert_allclose(sorted(z, key=np.angle), + sorted(z2, key=np.angle), rtol=1e-13) + assert_allclose(sorted(p, key=np.angle), + sorted(p2, key=np.angle), rtol=1e-12) + assert_allclose(k, k2, rtol=1e-11) + + # high odd order + z, p, k = cheby2(25, 80, 0.5, 'high', output='zpk') + z2 = [9.690690376586687e-01 + 2.467897896011971e-01j, + 9.690690376586687e-01 - 2.467897896011971e-01j, + 9.999999999999492e-01, + 8.835111277191199e-01 + 4.684101698261429e-01j, + 8.835111277191199e-01 - 4.684101698261429e-01j, + 7.613142857900539e-01 + 6.483830335935022e-01j, + 7.613142857900539e-01 - 6.483830335935022e-01j, + 6.232625173626231e-01 + 7.820126817709752e-01j, + 6.232625173626231e-01 - 7.820126817709752e-01j, + 4.864456563413621e-01 + 8.737108351316745e-01j, + 4.864456563413621e-01 - 8.737108351316745e-01j, + 3.618368136816749e-01 + 9.322414495530347e-01j, + 3.618368136816749e-01 - 9.322414495530347e-01j, + 2.549486883466794e-01 + 9.669545833752675e-01j, + 2.549486883466794e-01 - 9.669545833752675e-01j, + 1.676175432109457e-01 + 9.858520980390212e-01j, + 1.676175432109457e-01 - 9.858520980390212e-01j, + 1.975218468277521e-03 + 9.999980492540941e-01j, + 1.975218468277521e-03 - 9.999980492540941e-01j, + 1.786959496651858e-02 + 9.998403260399917e-01j, + 1.786959496651858e-02 - 9.998403260399917e-01j, + 9.967933660557139e-02 + 9.950196127985684e-01j, + 9.967933660557139e-02 - 9.950196127985684e-01j, + 5.013970951219547e-02 + 9.987422137518890e-01j, + 5.013970951219547e-02 - 9.987422137518890e-01j] + p2 = [4.218866331906864e-01, + 4.120110200127552e-01 + 1.361290593621978e-01j, + 4.120110200127552e-01 - 1.361290593621978e-01j, + 3.835890113632530e-01 + 2.664910809911026e-01j, + 3.835890113632530e-01 - 2.664910809911026e-01j, + 3.399195570456499e-01 + 3.863983538639875e-01j, + 3.399195570456499e-01 - 3.863983538639875e-01j, + 2.855977834508353e-01 + 4.929444399540688e-01j, + 2.855977834508353e-01 - 4.929444399540688e-01j, + 2.255765441339322e-01 + 5.851631870205766e-01j, + 2.255765441339322e-01 - 5.851631870205766e-01j, + 1.644087535815792e-01 + 6.637356937277153e-01j, + 1.644087535815792e-01 - 6.637356937277153e-01j, + -7.293633845273095e-02 + 9.739218252516307e-01j, + -7.293633845273095e-02 - 9.739218252516307e-01j, + 1.058259206358626e-01 + 7.304739464862978e-01j, + 1.058259206358626e-01 - 7.304739464862978e-01j, + -5.703971947785402e-02 + 9.291057542169088e-01j, + -5.703971947785402e-02 - 9.291057542169088e-01j, + 5.263875132656864e-02 + 7.877974334424453e-01j, + 5.263875132656864e-02 - 7.877974334424453e-01j, + -3.007943405982616e-02 + 8.846331716180016e-01j, + -3.007943405982616e-02 - 8.846331716180016e-01j, + 6.857277464483946e-03 + 8.383275456264492e-01j, + 6.857277464483946e-03 - 8.383275456264492e-01j] + k2 = 6.507068761705037e-03 + assert_allclose(sorted(z, key=np.angle), + sorted(z2, key=np.angle), rtol=1e-13) + assert_allclose(sorted(p, key=np.angle), + sorted(p2, key=np.angle), rtol=1e-12) + assert_allclose(k, k2, rtol=1e-11) + + def test_bandpass(self): + z, p, k = cheby2(9, 40, [0.07, 0.2], 'pass', output='zpk') + z2 = [-9.999999999999999e-01, + 3.676588029658514e-01 + 9.299607543341383e-01j, + 3.676588029658514e-01 - 9.299607543341383e-01j, + 7.009689684982283e-01 + 7.131917730894889e-01j, + 7.009689684982283e-01 - 7.131917730894889e-01j, + 7.815697973765858e-01 + 6.238178033919218e-01j, + 7.815697973765858e-01 - 6.238178033919218e-01j, + 8.063793628819866e-01 + 5.913986160941200e-01j, + 8.063793628819866e-01 - 5.913986160941200e-01j, + 1.000000000000001e+00, + 9.944493019920448e-01 + 1.052168511576739e-01j, + 9.944493019920448e-01 - 1.052168511576739e-01j, + 9.854674703367308e-01 + 1.698642543566085e-01j, + 9.854674703367308e-01 - 1.698642543566085e-01j, + 9.762751735919308e-01 + 2.165335665157851e-01j, + 9.762751735919308e-01 - 2.165335665157851e-01j, + 9.792277171575134e-01 + 2.027636011479496e-01j, + 9.792277171575134e-01 - 2.027636011479496e-01j] + p2 = [8.143803410489621e-01 + 5.411056063397541e-01j, + 8.143803410489621e-01 - 5.411056063397541e-01j, + 7.650769827887418e-01 + 5.195412242095543e-01j, + 7.650769827887418e-01 - 5.195412242095543e-01j, + 6.096241204063443e-01 + 3.568440484659796e-01j, + 6.096241204063443e-01 - 3.568440484659796e-01j, + 6.918192770246239e-01 + 4.770463577106911e-01j, + 6.918192770246239e-01 - 4.770463577106911e-01j, + 6.986241085779207e-01 + 1.146512226180060e-01j, + 6.986241085779207e-01 - 1.146512226180060e-01j, + 8.654645923909734e-01 + 1.604208797063147e-01j, + 8.654645923909734e-01 - 1.604208797063147e-01j, + 9.164831670444591e-01 + 1.969181049384918e-01j, + 9.164831670444591e-01 - 1.969181049384918e-01j, + 9.630425777594550e-01 + 2.317513360702271e-01j, + 9.630425777594550e-01 - 2.317513360702271e-01j, + 9.438104703725529e-01 + 2.193509900269860e-01j, + 9.438104703725529e-01 - 2.193509900269860e-01j] + k2 = 9.345352824659604e-03 + assert_allclose(sorted(z, key=np.angle), + sorted(z2, key=np.angle), rtol=1e-13) + assert_allclose(sorted(p, key=np.angle), + sorted(p2, key=np.angle), rtol=1e-13) + assert_allclose(k, k2, rtol=1e-11) + + def test_bandstop(self): + z, p, k = cheby2(6, 55, [0.1, 0.9], 'stop', output='zpk') + z2 = [6.230544895101009e-01 + 7.821784343111114e-01j, + 6.230544895101009e-01 - 7.821784343111114e-01j, + 9.086608545660115e-01 + 4.175349702471991e-01j, + 9.086608545660115e-01 - 4.175349702471991e-01j, + 9.478129721465802e-01 + 3.188268649763867e-01j, + 9.478129721465802e-01 - 3.188268649763867e-01j, + -6.230544895100982e-01 + 7.821784343111109e-01j, + -6.230544895100982e-01 - 7.821784343111109e-01j, + -9.086608545660116e-01 + 4.175349702472088e-01j, + -9.086608545660116e-01 - 4.175349702472088e-01j, + -9.478129721465784e-01 + 3.188268649763897e-01j, + -9.478129721465784e-01 - 3.188268649763897e-01j] + p2 = [-9.464094036167638e-01 + 1.720048695084344e-01j, + -9.464094036167638e-01 - 1.720048695084344e-01j, + -8.715844103386737e-01 + 1.370665039509297e-01j, + -8.715844103386737e-01 - 1.370665039509297e-01j, + -8.078751204586425e-01 + 5.729329866682983e-02j, + -8.078751204586425e-01 - 5.729329866682983e-02j, + 9.464094036167665e-01 + 1.720048695084332e-01j, + 9.464094036167665e-01 - 1.720048695084332e-01j, + 8.078751204586447e-01 + 5.729329866683007e-02j, + 8.078751204586447e-01 - 5.729329866683007e-02j, + 8.715844103386721e-01 + 1.370665039509331e-01j, + 8.715844103386721e-01 - 1.370665039509331e-01j] + k2 = 2.917823332763358e-03 + assert_allclose(sorted(z, key=np.angle), + sorted(z2, key=np.angle), rtol=1e-13) + assert_allclose(sorted(p, key=np.angle), + sorted(p2, key=np.angle), rtol=1e-13) + assert_allclose(k, k2, rtol=1e-11) + + def test_ba_output(self): + # with transfer function conversion, without digital conversion + b, a = cheby2(5, 20, [2010, 2100], 'stop', True) + b2 = [1.000000000000000e+00, 0, # Matlab: 6.683253076978249e-12, + 2.111512500000000e+07, 0, # Matlab: 1.134325604589552e-04, + 1.782966433781250e+14, 0, # Matlab: 7.216787944356781e+02, + 7.525901316990656e+20, 0, # Matlab: 2.039829265789886e+09, + 1.587960565565748e+27, 0, # Matlab: 2.161236218626134e+15, + 1.339913493808585e+33] + a2 = [1.000000000000000e+00, 1.849550755473371e+02, + 2.113222918998538e+07, 3.125114149732283e+09, + 1.785133457155609e+14, 1.979158697776348e+16, + 7.535048322653831e+20, 5.567966191263037e+22, + 1.589246884221346e+27, 5.871210648525566e+28, + 1.339913493808590e+33] + assert_allclose(b, b2, rtol=1e-14) + assert_allclose(a, a2, rtol=1e-14) + + def test_fs_param(self): + for fs in (900, 900.1, 1234.567): + for N in (0, 1, 2, 3, 10): + for fc in (100, 100.1, 432.12345): + for btype in ('lp', 'hp'): + ba1 = cheby2(N, 20, fc, btype, fs=fs) + ba2 = cheby2(N, 20, fc/(fs/2), btype) + assert_allclose(ba1, ba2) + for fc in ((100, 200), (100.1, 200.2), (321.123, 432.123)): + for btype in ('bp', 'bs'): + ba1 = cheby2(N, 20, fc, btype, fs=fs) + for seq in (list, tuple, array): + fcnorm = seq([f/(fs/2) for f in fc]) + ba2 = cheby2(N, 20, fcnorm, btype) + assert_allclose(ba1, ba2) + +class TestEllip(object): + + def test_degenerate(self): + # 0-order filter is just a passthrough + # Even-order filters have DC gain of -rp dB + # Stopband ripple factor doesn't matter + b, a = ellip(0, 10*np.log10(2), 123.456, 1, analog=True) + assert_array_almost_equal(b, [1/np.sqrt(2)]) + assert_array_equal(a, [1]) + + # 1-order filter is same for all types + b, a = ellip(1, 10*np.log10(2), 1, 1, analog=True) + assert_array_almost_equal(b, [1]) + assert_array_almost_equal(a, [1, 1]) + + z, p, k = ellip(1, 1, 55, 0.3, output='zpk') + assert_allclose(z, [-9.999999999999998e-01], rtol=1e-14) + assert_allclose(p, [-6.660721153525525e-04], rtol=1e-10) + assert_allclose(k, 5.003330360576763e-01, rtol=1e-14) + + def test_basic(self): + for N in range(25): + wn = 0.01 + z, p, k = ellip(N, 1, 40, wn, 'low', analog=True, output='zpk') + assert_(len(p) == N) + assert_(all(np.real(p) <= 0)) # No poles in right half of S-plane + + for N in range(25): + wn = 0.01 + z, p, k = ellip(N, 1, 40, wn, 'high', analog=False, output='zpk') + assert_(all(np.abs(p) <= 1)) # No poles outside unit circle + + b3, a3 = ellip(5, 3, 26, 1, analog=True) + assert_array_almost_equal(b3, [0.1420, 0, 0.3764, 0, + 0.2409], decimal=4) + assert_array_almost_equal(a3, [1, 0.5686, 1.8061, 0.8017, 0.8012, + 0.2409], decimal=4) + + b, a = ellip(3, 1, 60, [0.4, 0.7], 'stop') + assert_array_almost_equal(b, [0.3310, 0.3469, 1.1042, 0.7044, 1.1042, + 0.3469, 0.3310], decimal=4) + assert_array_almost_equal(a, [1.0000, 0.6973, 1.1441, 0.5878, 0.7323, + 0.1131, -0.0060], decimal=4) + + def test_highpass(self): + # high even order + z, p, k = ellip(24, 1, 80, 0.3, 'high', output='zpk') + z2 = [9.761875332501075e-01 + 2.169283290099910e-01j, + 9.761875332501075e-01 - 2.169283290099910e-01j, + 8.413503353963494e-01 + 5.404901600661900e-01j, + 8.413503353963494e-01 - 5.404901600661900e-01j, + 7.160082576305009e-01 + 6.980918098681732e-01j, + 7.160082576305009e-01 - 6.980918098681732e-01j, + 6.456533638965329e-01 + 7.636306264739803e-01j, + 6.456533638965329e-01 - 7.636306264739803e-01j, + 6.127321820971366e-01 + 7.902906256703928e-01j, + 6.127321820971366e-01 - 7.902906256703928e-01j, + 5.983607817490196e-01 + 8.012267936512676e-01j, + 5.983607817490196e-01 - 8.012267936512676e-01j, + 5.922577552594799e-01 + 8.057485658286990e-01j, + 5.922577552594799e-01 - 8.057485658286990e-01j, + 5.896952092563588e-01 + 8.076258788449631e-01j, + 5.896952092563588e-01 - 8.076258788449631e-01j, + 5.886248765538837e-01 + 8.084063054565607e-01j, + 5.886248765538837e-01 - 8.084063054565607e-01j, + 5.881802711123132e-01 + 8.087298490066037e-01j, + 5.881802711123132e-01 - 8.087298490066037e-01j, + 5.879995719101164e-01 + 8.088612386766461e-01j, + 5.879995719101164e-01 - 8.088612386766461e-01j, + 5.879354086709576e-01 + 8.089078780868164e-01j, + 5.879354086709576e-01 - 8.089078780868164e-01j] + p2 = [-3.184805259081650e-01 + 4.206951906775851e-01j, + -3.184805259081650e-01 - 4.206951906775851e-01j, + 1.417279173459985e-01 + 7.903955262836452e-01j, + 1.417279173459985e-01 - 7.903955262836452e-01j, + 4.042881216964651e-01 + 8.309042239116594e-01j, + 4.042881216964651e-01 - 8.309042239116594e-01j, + 5.128964442789670e-01 + 8.229563236799665e-01j, + 5.128964442789670e-01 - 8.229563236799665e-01j, + 5.569614712822724e-01 + 8.155957702908510e-01j, + 5.569614712822724e-01 - 8.155957702908510e-01j, + 5.750478870161392e-01 + 8.118633973883931e-01j, + 5.750478870161392e-01 - 8.118633973883931e-01j, + 5.825314018170804e-01 + 8.101960910679270e-01j, + 5.825314018170804e-01 - 8.101960910679270e-01j, + 5.856397379751872e-01 + 8.094825218722543e-01j, + 5.856397379751872e-01 - 8.094825218722543e-01j, + 5.869326035251949e-01 + 8.091827531557583e-01j, + 5.869326035251949e-01 - 8.091827531557583e-01j, + 5.874697218855733e-01 + 8.090593298213502e-01j, + 5.874697218855733e-01 - 8.090593298213502e-01j, + 5.876904783532237e-01 + 8.090127161018823e-01j, + 5.876904783532237e-01 - 8.090127161018823e-01j, + 5.877753105317594e-01 + 8.090050577978136e-01j, + 5.877753105317594e-01 - 8.090050577978136e-01j] + k2 = 4.918081266957108e-02 + assert_allclose(sorted(z, key=np.angle), + sorted(z2, key=np.angle), rtol=1e-4) + assert_allclose(sorted(p, key=np.angle), + sorted(p2, key=np.angle), rtol=1e-4) + assert_allclose(k, k2, rtol=1e-3) + + # high odd order + z, p, k = ellip(23, 1, 70, 0.5, 'high', output='zpk') + z2 = [9.999999999998661e-01, + 6.603717261750994e-01 + 7.509388678638675e-01j, + 6.603717261750994e-01 - 7.509388678638675e-01j, + 2.788635267510325e-01 + 9.603307416968041e-01j, + 2.788635267510325e-01 - 9.603307416968041e-01j, + 1.070215532544218e-01 + 9.942567008268131e-01j, + 1.070215532544218e-01 - 9.942567008268131e-01j, + 4.049427369978163e-02 + 9.991797705105507e-01j, + 4.049427369978163e-02 - 9.991797705105507e-01j, + 1.531059368627931e-02 + 9.998827859909265e-01j, + 1.531059368627931e-02 - 9.998827859909265e-01j, + 5.808061438534933e-03 + 9.999831330689181e-01j, + 5.808061438534933e-03 - 9.999831330689181e-01j, + 2.224277847754599e-03 + 9.999975262909676e-01j, + 2.224277847754599e-03 - 9.999975262909676e-01j, + 8.731857107534554e-04 + 9.999996187732845e-01j, + 8.731857107534554e-04 - 9.999996187732845e-01j, + 3.649057346914968e-04 + 9.999999334218996e-01j, + 3.649057346914968e-04 - 9.999999334218996e-01j, + 1.765538109802615e-04 + 9.999999844143768e-01j, + 1.765538109802615e-04 - 9.999999844143768e-01j, + 1.143655290967426e-04 + 9.999999934602630e-01j, + 1.143655290967426e-04 - 9.999999934602630e-01j] + p2 = [-6.322017026545028e-01, + -4.648423756662754e-01 + 5.852407464440732e-01j, + -4.648423756662754e-01 - 5.852407464440732e-01j, + -2.249233374627773e-01 + 8.577853017985717e-01j, + -2.249233374627773e-01 - 8.577853017985717e-01j, + -9.234137570557621e-02 + 9.506548198678851e-01j, + -9.234137570557621e-02 - 9.506548198678851e-01j, + -3.585663561241373e-02 + 9.821494736043981e-01j, + -3.585663561241373e-02 - 9.821494736043981e-01j, + -1.363917242312723e-02 + 9.933844128330656e-01j, + -1.363917242312723e-02 - 9.933844128330656e-01j, + -5.131505238923029e-03 + 9.975221173308673e-01j, + -5.131505238923029e-03 - 9.975221173308673e-01j, + -1.904937999259502e-03 + 9.990680819857982e-01j, + -1.904937999259502e-03 - 9.990680819857982e-01j, + -6.859439885466834e-04 + 9.996492201426826e-01j, + -6.859439885466834e-04 - 9.996492201426826e-01j, + -2.269936267937089e-04 + 9.998686250679161e-01j, + -2.269936267937089e-04 - 9.998686250679161e-01j, + -5.687071588789117e-05 + 9.999527573294513e-01j, + -5.687071588789117e-05 - 9.999527573294513e-01j, + -6.948417068525226e-07 + 9.999882737700173e-01j, + -6.948417068525226e-07 - 9.999882737700173e-01j] + k2 = 1.220910020289434e-02 + assert_allclose(sorted(z, key=np.angle), + sorted(z2, key=np.angle), rtol=1e-4) + assert_allclose(sorted(p, key=np.angle), + sorted(p2, key=np.angle), rtol=1e-4) + assert_allclose(k, k2, rtol=1e-3) + + def test_bandpass(self): + z, p, k = ellip(7, 1, 40, [0.07, 0.2], 'pass', output='zpk') + z2 = [-9.999999999999991e-01, + 6.856610961780020e-01 + 7.279209168501619e-01j, + 6.856610961780020e-01 - 7.279209168501619e-01j, + 7.850346167691289e-01 + 6.194518952058737e-01j, + 7.850346167691289e-01 - 6.194518952058737e-01j, + 7.999038743173071e-01 + 6.001281461922627e-01j, + 7.999038743173071e-01 - 6.001281461922627e-01j, + 9.999999999999999e-01, + 9.862938983554124e-01 + 1.649980183725925e-01j, + 9.862938983554124e-01 - 1.649980183725925e-01j, + 9.788558330548762e-01 + 2.045513580850601e-01j, + 9.788558330548762e-01 - 2.045513580850601e-01j, + 9.771155231720003e-01 + 2.127093189691258e-01j, + 9.771155231720003e-01 - 2.127093189691258e-01j] + p2 = [8.063992755498643e-01 + 5.858071374778874e-01j, + 8.063992755498643e-01 - 5.858071374778874e-01j, + 8.050395347071724e-01 + 5.639097428109795e-01j, + 8.050395347071724e-01 - 5.639097428109795e-01j, + 8.113124936559144e-01 + 4.855241143973142e-01j, + 8.113124936559144e-01 - 4.855241143973142e-01j, + 8.665595314082394e-01 + 3.334049560919331e-01j, + 8.665595314082394e-01 - 3.334049560919331e-01j, + 9.412369011968871e-01 + 2.457616651325908e-01j, + 9.412369011968871e-01 - 2.457616651325908e-01j, + 9.679465190411238e-01 + 2.228772501848216e-01j, + 9.679465190411238e-01 - 2.228772501848216e-01j, + 9.747235066273385e-01 + 2.178937926146544e-01j, + 9.747235066273385e-01 - 2.178937926146544e-01j] + k2 = 8.354782670263239e-03 + assert_allclose(sorted(z, key=np.angle), + sorted(z2, key=np.angle), rtol=1e-4) + assert_allclose(sorted(p, key=np.angle), + sorted(p2, key=np.angle), rtol=1e-4) + assert_allclose(k, k2, rtol=1e-3) + + z, p, k = ellip(5, 1, 75, [90.5, 110.5], 'pass', True, 'zpk') + z2 = [-5.583607317695175e-14 + 1.433755965989225e+02j, + -5.583607317695175e-14 - 1.433755965989225e+02j, + 5.740106416459296e-14 + 1.261678754570291e+02j, + 5.740106416459296e-14 - 1.261678754570291e+02j, + -2.199676239638652e-14 + 6.974861996895196e+01j, + -2.199676239638652e-14 - 6.974861996895196e+01j, + -3.372595657044283e-14 + 7.926145989044531e+01j, + -3.372595657044283e-14 - 7.926145989044531e+01j, + 0] + p2 = [-8.814960004852743e-01 + 1.104124501436066e+02j, + -8.814960004852743e-01 - 1.104124501436066e+02j, + -2.477372459140184e+00 + 1.065638954516534e+02j, + -2.477372459140184e+00 - 1.065638954516534e+02j, + -3.072156842945799e+00 + 9.995404870405324e+01j, + -3.072156842945799e+00 - 9.995404870405324e+01j, + -2.180456023925693e+00 + 9.379206865455268e+01j, + -2.180456023925693e+00 - 9.379206865455268e+01j, + -7.230484977485752e-01 + 9.056598800801140e+01j, + -7.230484977485752e-01 - 9.056598800801140e+01j] + k2 = 3.774571622827070e-02 + assert_allclose(sorted(z, key=np.imag), + sorted(z2, key=np.imag), rtol=1e-4) + assert_allclose(sorted(p, key=np.imag), + sorted(p2, key=np.imag), rtol=1e-6) + assert_allclose(k, k2, rtol=1e-3) + + def test_bandstop(self): + z, p, k = ellip(8, 1, 65, [0.2, 0.4], 'stop', output='zpk') + z2 = [3.528578094286510e-01 + 9.356769561794296e-01j, + 3.528578094286510e-01 - 9.356769561794296e-01j, + 3.769716042264783e-01 + 9.262248159096587e-01j, + 3.769716042264783e-01 - 9.262248159096587e-01j, + 4.406101783111199e-01 + 8.976985411420985e-01j, + 4.406101783111199e-01 - 8.976985411420985e-01j, + 5.539386470258847e-01 + 8.325574907062760e-01j, + 5.539386470258847e-01 - 8.325574907062760e-01j, + 6.748464963023645e-01 + 7.379581332490555e-01j, + 6.748464963023645e-01 - 7.379581332490555e-01j, + 7.489887970285254e-01 + 6.625826604475596e-01j, + 7.489887970285254e-01 - 6.625826604475596e-01j, + 7.913118471618432e-01 + 6.114127579150699e-01j, + 7.913118471618432e-01 - 6.114127579150699e-01j, + 7.806804740916381e-01 + 6.249303940216475e-01j, + 7.806804740916381e-01 - 6.249303940216475e-01j] + + p2 = [-1.025299146693730e-01 + 5.662682444754943e-01j, + -1.025299146693730e-01 - 5.662682444754943e-01j, + 1.698463595163031e-01 + 8.926678667070186e-01j, + 1.698463595163031e-01 - 8.926678667070186e-01j, + 2.750532687820631e-01 + 9.351020170094005e-01j, + 2.750532687820631e-01 - 9.351020170094005e-01j, + 3.070095178909486e-01 + 9.457373499553291e-01j, + 3.070095178909486e-01 - 9.457373499553291e-01j, + 7.695332312152288e-01 + 2.792567212705257e-01j, + 7.695332312152288e-01 - 2.792567212705257e-01j, + 8.083818999225620e-01 + 4.990723496863960e-01j, + 8.083818999225620e-01 - 4.990723496863960e-01j, + 8.066158014414928e-01 + 5.649811440393374e-01j, + 8.066158014414928e-01 - 5.649811440393374e-01j, + 8.062787978834571e-01 + 5.855780880424964e-01j, + 8.062787978834571e-01 - 5.855780880424964e-01j] + k2 = 2.068622545291259e-01 + assert_allclose(sorted(z, key=np.angle), + sorted(z2, key=np.angle), rtol=1e-6) + assert_allclose(sorted(p, key=np.angle), + sorted(p2, key=np.angle), rtol=1e-5) + assert_allclose(k, k2, rtol=1e-5) + + def test_ba_output(self): + # with transfer function conversion, without digital conversion + b, a = ellip(5, 1, 40, [201, 240], 'stop', True) + b2 = [ + 1.000000000000000e+00, 0, # Matlab: 1.743506051190569e-13, + 2.426561778314366e+05, 0, # Matlab: 3.459426536825722e-08, + 2.348218683400168e+10, 0, # Matlab: 2.559179747299313e-03, + 1.132780692872241e+15, 0, # Matlab: 8.363229375535731e+01, + 2.724038554089566e+19, 0, # Matlab: 1.018700994113120e+06, + 2.612380874940186e+23 + ] + a2 = [ + 1.000000000000000e+00, 1.337266601804649e+02, + 2.486725353510667e+05, 2.628059713728125e+07, + 2.436169536928770e+10, 1.913554568577315e+12, + 1.175208184614438e+15, 6.115751452473410e+16, + 2.791577695211466e+19, 7.241811142725384e+20, + 2.612380874940182e+23 + ] + assert_allclose(b, b2, rtol=1e-6) + assert_allclose(a, a2, rtol=1e-4) + + def test_fs_param(self): + for fs in (900, 900.1, 1234.567): + for N in (0, 1, 2, 3, 10): + for fc in (100, 100.1, 432.12345): + for btype in ('lp', 'hp'): + ba1 = ellip(N, 1, 20, fc, btype, fs=fs) + ba2 = ellip(N, 1, 20, fc/(fs/2), btype) + assert_allclose(ba1, ba2) + for fc in ((100, 200), (100.1, 200.2), (321.123, 432.123)): + for btype in ('bp', 'bs'): + ba1 = ellip(N, 1, 20, fc, btype, fs=fs) + for seq in (list, tuple, array): + fcnorm = seq([f/(fs/2) for f in fc]) + ba2 = ellip(N, 1, 20, fcnorm, btype) + assert_allclose(ba1, ba2) + + +def test_sos_consistency(): + # Consistency checks of output='sos' for the specialized IIR filter + # design functions. + design_funcs = [(bessel, (0.1,)), + (butter, (0.1,)), + (cheby1, (45.0, 0.1)), + (cheby2, (0.087, 0.1)), + (ellip, (0.087, 45, 0.1))] + for func, args in design_funcs: + name = func.__name__ + + b, a = func(2, *args, output='ba') + sos = func(2, *args, output='sos') + assert_allclose(sos, [np.hstack((b, a))], err_msg="%s(2,...)" % name) + + zpk = func(3, *args, output='zpk') + sos = func(3, *args, output='sos') + assert_allclose(sos, zpk2sos(*zpk), err_msg="%s(3,...)" % name) + + zpk = func(4, *args, output='zpk') + sos = func(4, *args, output='sos') + assert_allclose(sos, zpk2sos(*zpk), err_msg="%s(4,...)" % name) + + +class TestIIRNotch(object): + + def test_ba_output(self): + # Compare coeficients with Matlab ones + # for the equivalent input: + b, a = iirnotch(0.06, 30) + b2 = [ + 9.9686824e-01, -1.9584219e+00, + 9.9686824e-01 + ] + a2 = [ + 1.0000000e+00, -1.9584219e+00, + 9.9373647e-01 + ] + + assert_allclose(b, b2, rtol=1e-8) + assert_allclose(a, a2, rtol=1e-8) + + def test_frequency_response(self): + # Get filter coeficients + b, a = iirnotch(0.3, 30) + + # Get frequency response + w, h = freqz(b, a, 1000) + + # Pick 5 point + p = [200, # w0 = 0.200 + 295, # w0 = 0.295 + 300, # w0 = 0.300 + 305, # w0 = 0.305 + 400] # w0 = 0.400 + + # Get frequency response correspondent to each of those points + hp = h[p] + + # Check if the frequency response fulfill the specifications: + # hp[0] and hp[4] correspond to frequencies distant from + # w0 = 0.3 and should be close to 1 + assert_allclose(abs(hp[0]), 1, rtol=1e-2) + assert_allclose(abs(hp[4]), 1, rtol=1e-2) + + # hp[1] and hp[3] correspond to frequencies approximately + # on the edges of the passband and should be close to -3dB + assert_allclose(abs(hp[1]), 1/np.sqrt(2), rtol=1e-2) + assert_allclose(abs(hp[3]), 1/np.sqrt(2), rtol=1e-2) + + # hp[2] correspond to the frequency that should be removed + # the frequency response should be very close to 0 + assert_allclose(abs(hp[2]), 0, atol=1e-10) + + def test_errors(self): + # Exception should be raised if w0 > 1 or w0 <0 + assert_raises(ValueError, iirnotch, w0=2, Q=30) + assert_raises(ValueError, iirnotch, w0=-1, Q=30) + + # Exception should be raised if any of the parameters + # are not float (or cannot be converted to one) + assert_raises(ValueError, iirnotch, w0="blabla", Q=30) + assert_raises(TypeError, iirnotch, w0=-1, Q=[1, 2, 3]) + + def test_fs_param(self): + # Get filter coeficients + b, a = iirnotch(1500, 30, fs=10000) + + # Get frequency response + w, h = freqz(b, a, 1000, fs=10000) + + # Pick 5 point + p = [200, # w0 = 1000 + 295, # w0 = 1475 + 300, # w0 = 1500 + 305, # w0 = 1525 + 400] # w0 = 2000 + + # Get frequency response correspondent to each of those points + hp = h[p] + + # Check if the frequency response fulfill the specifications: + # hp[0] and hp[4] correspond to frequencies distant from + # w0 = 1500 and should be close to 1 + assert_allclose(abs(hp[0]), 1, rtol=1e-2) + assert_allclose(abs(hp[4]), 1, rtol=1e-2) + + # hp[1] and hp[3] correspond to frequencies approximately + # on the edges of the passband and should be close to -3dB + assert_allclose(abs(hp[1]), 1/np.sqrt(2), rtol=1e-2) + assert_allclose(abs(hp[3]), 1/np.sqrt(2), rtol=1e-2) + + # hp[2] correspond to the frequency that should be removed + # the frequency response should be very close to 0 + assert_allclose(abs(hp[2]), 0, atol=1e-10) + + +class TestIIRPeak(object): + + def test_ba_output(self): + # Compare coeficients with Matlab ones + # for the equivalent input: + b, a = iirpeak(0.06, 30) + b2 = [ + 3.131764229e-03, 0, + -3.131764229e-03 + ] + a2 = [ + 1.0000000e+00, -1.958421917e+00, + 9.9373647e-01 + ] + assert_allclose(b, b2, rtol=1e-8) + assert_allclose(a, a2, rtol=1e-8) + + def test_frequency_response(self): + # Get filter coeficients + b, a = iirpeak(0.3, 30) + + # Get frequency response + w, h = freqz(b, a, 1000) + + # Pick 5 point + p = [30, # w0 = 0.030 + 295, # w0 = 0.295 + 300, # w0 = 0.300 + 305, # w0 = 0.305 + 800] # w0 = 0.800 + + # Get frequency response correspondent to each of those points + hp = h[p] + + # Check if the frequency response fulfill the specifications: + # hp[0] and hp[4] correspond to frequencies distant from + # w0 = 0.3 and should be close to 0 + assert_allclose(abs(hp[0]), 0, atol=1e-2) + assert_allclose(abs(hp[4]), 0, atol=1e-2) + + # hp[1] and hp[3] correspond to frequencies approximately + # on the edges of the passband and should be close to 10**(-3/20) + assert_allclose(abs(hp[1]), 1/np.sqrt(2), rtol=1e-2) + assert_allclose(abs(hp[3]), 1/np.sqrt(2), rtol=1e-2) + + # hp[2] correspond to the frequency that should be retained and + # the frequency response should be very close to 1 + assert_allclose(abs(hp[2]), 1, rtol=1e-10) + + def test_errors(self): + # Exception should be raised if w0 > 1 or w0 <0 + assert_raises(ValueError, iirpeak, w0=2, Q=30) + assert_raises(ValueError, iirpeak, w0=-1, Q=30) + + # Exception should be raised if any of the parameters + # are not float (or cannot be converted to one) + assert_raises(ValueError, iirpeak, w0="blabla", Q=30) + assert_raises(TypeError, iirpeak, w0=-1, Q=[1, 2, 3]) + + def test_fs_param(self): + # Get filter coeficients + b, a = iirpeak(1200, 30, fs=8000) + + # Get frequency response + w, h = freqz(b, a, 1000, fs=8000) + + # Pick 5 point + p = [30, # w0 = 120 + 295, # w0 = 1180 + 300, # w0 = 1200 + 305, # w0 = 1220 + 800] # w0 = 3200 + + # Get frequency response correspondent to each of those points + hp = h[p] + + # Check if the frequency response fulfill the specifications: + # hp[0] and hp[4] correspond to frequencies distant from + # w0 = 1200 and should be close to 0 + assert_allclose(abs(hp[0]), 0, atol=1e-2) + assert_allclose(abs(hp[4]), 0, atol=1e-2) + + # hp[1] and hp[3] correspond to frequencies approximately + # on the edges of the passband and should be close to 10**(-3/20) + assert_allclose(abs(hp[1]), 1/np.sqrt(2), rtol=1e-2) + assert_allclose(abs(hp[3]), 1/np.sqrt(2), rtol=1e-2) + + # hp[2] correspond to the frequency that should be retained and + # the frequency response should be very close to 1 + assert_allclose(abs(hp[2]), 1, rtol=1e-10) + + +class TestIIRFilter(object): + + def test_symmetry(self): + # All built-in IIR filters are real, so should have perfectly + # symmetrical poles and zeros. Then ba representation (using + # numpy.poly) will be purely real instead of having negligible + # imaginary parts. + for N in np.arange(1, 26): + for ftype in ('butter', 'bessel', 'cheby1', 'cheby2', 'ellip'): + z, p, k = iirfilter(N, 1.1, 1, 20, 'low', analog=True, + ftype=ftype, output='zpk') + assert_array_equal(sorted(z), sorted(z.conj())) + assert_array_equal(sorted(p), sorted(p.conj())) + assert_equal(k, np.real(k)) + + b, a = iirfilter(N, 1.1, 1, 20, 'low', analog=True, + ftype=ftype, output='ba') + assert_(issubclass(b.dtype.type, np.floating)) + assert_(issubclass(a.dtype.type, np.floating)) + + def test_int_inputs(self): + # Using integer frequency arguments and large N should not produce + # np.ints that wraparound to negative numbers + k = iirfilter(24, 100, btype='low', analog=True, ftype='bessel', + output='zpk')[2] + k2 = 9.999999999999989e+47 + assert_allclose(k, k2) + + def test_invalid_wn_size(self): + # low and high have 1 Wn, band and stop have 2 Wn + assert_raises(ValueError, iirfilter, 1, [0.1, 0.9], btype='low') + assert_raises(ValueError, iirfilter, 1, [0.2, 0.5], btype='high') + assert_raises(ValueError, iirfilter, 1, 0.2, btype='bp') + assert_raises(ValueError, iirfilter, 1, 400, btype='bs', analog=True) + + def test_invalid_wn_range(self): + # For digital filters, 0 <= Wn <= 1 + assert_raises(ValueError, iirfilter, 1, 2, btype='low') + assert_raises(ValueError, iirfilter, 1, [0.5, 1], btype='band') + assert_raises(ValueError, iirfilter, 1, [0., 0.5], btype='band') + assert_raises(ValueError, iirfilter, 1, -1, btype='high') + assert_raises(ValueError, iirfilter, 1, [1, 2], btype='band') + assert_raises(ValueError, iirfilter, 1, [10, 20], btype='stop') + + +class TestGroupDelay(object): + def test_identity_filter(self): + w, gd = group_delay((1, 1)) + assert_array_almost_equal(w, pi * np.arange(512) / 512) + assert_array_almost_equal(gd, np.zeros(512)) + w, gd = group_delay((1, 1), whole=True) + assert_array_almost_equal(w, 2 * pi * np.arange(512) / 512) + assert_array_almost_equal(gd, np.zeros(512)) + + def test_fir(self): + # Let's design linear phase FIR and check that the group delay + # is constant. + N = 100 + b = firwin(N + 1, 0.1) + w, gd = group_delay((b, 1)) + assert_allclose(gd, 0.5 * N) + + def test_iir(self): + # Let's design Butterworth filter and test the group delay at + # some points against MATLAB answer. + b, a = butter(4, 0.1) + w = np.linspace(0, pi, num=10, endpoint=False) + w, gd = group_delay((b, a), w=w) + matlab_gd = np.array([8.249313898506037, 11.958947880907104, + 2.452325615326005, 1.048918665702008, + 0.611382575635897, 0.418293269460578, + 0.317932917836572, 0.261371844762525, + 0.229038045801298, 0.212185774208521]) + assert_array_almost_equal(gd, matlab_gd) + + def test_singular(self): + # Let's create a filter with zeros and poles on the unit circle and + # check if warning is raised and the group delay is set to zero at + # these frequencies. + z1 = np.exp(1j * 0.1 * pi) + z2 = np.exp(1j * 0.25 * pi) + p1 = np.exp(1j * 0.5 * pi) + p2 = np.exp(1j * 0.8 * pi) + b = np.convolve([1, -z1], [1, -z2]) + a = np.convolve([1, -p1], [1, -p2]) + w = np.array([0.1 * pi, 0.25 * pi, -0.5 * pi, -0.8 * pi]) + + w, gd = assert_warns(UserWarning, group_delay, (b, a), w=w) + assert_allclose(gd, 0) + + def test_backward_compat(self): + # For backward compatibility, test if None act as a wrapper for default + w1, gd1 = group_delay((1, 1)) + w2, gd2 = group_delay((1, 1), None) + assert_array_almost_equal(w1, w2) + assert_array_almost_equal(gd1, gd2) + + def test_fs_param(self): + # Let's design Butterworth filter and test the group delay at + # some points against the normalized frequency answer. + b, a = butter(4, 4800, fs=96000) + w = np.linspace(0, 96000/2, num=10, endpoint=False) + w, gd = group_delay((b, a), w=w, fs=96000) + norm_gd = np.array([8.249313898506037, 11.958947880907104, + 2.452325615326005, 1.048918665702008, + 0.611382575635897, 0.418293269460578, + 0.317932917836572, 0.261371844762525, + 0.229038045801298, 0.212185774208521]) + assert_array_almost_equal(gd, norm_gd) + + def test_w_or_N_types(self): + # Measure at 8 equally-spaced points + for N in (8, np.int8(8), np.int16(8), np.int32(8), np.int64(8), + np.array(8)): + w, gd = group_delay((1, 1), N) + assert_array_almost_equal(w, pi * np.arange(8) / 8) + assert_array_almost_equal(gd, np.zeros(8)) + + # Measure at frequency 8 rad/sec + for w in (8.0, 8.0+0j): + w_out, gd = group_delay((1, 1), w) + assert_array_almost_equal(w_out, [8]) + assert_array_almost_equal(gd, [0]) diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_filter_design.pyc b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_filter_design.pyc new file mode 100644 index 0000000..50e5d02 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_filter_design.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_fir_filter_design.py b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_fir_filter_design.py new file mode 100644 index 0000000..8150f45 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_fir_filter_design.py @@ -0,0 +1,563 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import (assert_almost_equal, assert_array_almost_equal, + assert_equal, assert_, + assert_allclose, assert_warns) +from pytest import raises as assert_raises + +from scipy.special import sinc + +from scipy.signal import kaiser_beta, kaiser_atten, kaiserord, \ + firwin, firwin2, freqz, remez, firls, minimum_phase + + +def test_kaiser_beta(): + b = kaiser_beta(58.7) + assert_almost_equal(b, 0.1102 * 50.0) + b = kaiser_beta(22.0) + assert_almost_equal(b, 0.5842 + 0.07886) + b = kaiser_beta(21.0) + assert_equal(b, 0.0) + b = kaiser_beta(10.0) + assert_equal(b, 0.0) + + +def test_kaiser_atten(): + a = kaiser_atten(1, 1.0) + assert_equal(a, 7.95) + a = kaiser_atten(2, 1/np.pi) + assert_equal(a, 2.285 + 7.95) + + +def test_kaiserord(): + assert_raises(ValueError, kaiserord, 1.0, 1.0) + numtaps, beta = kaiserord(2.285 + 7.95 - 0.001, 1/np.pi) + assert_equal((numtaps, beta), (2, 0.0)) + + +class TestFirwin(object): + + def check_response(self, h, expected_response, tol=.05): + N = len(h) + alpha = 0.5 * (N-1) + m = np.arange(0,N) - alpha # time indices of taps + for freq, expected in expected_response: + actual = abs(np.sum(h*np.exp(-1.j*np.pi*m*freq))) + mse = abs(actual-expected)**2 + assert_(mse < tol, 'response not as expected, mse=%g > %g' + % (mse, tol)) + + def test_response(self): + N = 51 + f = .5 + # increase length just to try even/odd + h = firwin(N, f) # low-pass from 0 to f + self.check_response(h, [(.25,1), (.75,0)]) + + h = firwin(N+1, f, window='nuttall') # specific window + self.check_response(h, [(.25,1), (.75,0)]) + + h = firwin(N+2, f, pass_zero=False) # stop from 0 to f --> high-pass + self.check_response(h, [(.25,0), (.75,1)]) + + f1, f2, f3, f4 = .2, .4, .6, .8 + h = firwin(N+3, [f1, f2], pass_zero=False) # band-pass filter + self.check_response(h, [(.1,0), (.3,1), (.5,0)]) + + h = firwin(N+4, [f1, f2]) # band-stop filter + self.check_response(h, [(.1,1), (.3,0), (.5,1)]) + + h = firwin(N+5, [f1, f2, f3, f4], pass_zero=False, scale=False) + self.check_response(h, [(.1,0), (.3,1), (.5,0), (.7,1), (.9,0)]) + + h = firwin(N+6, [f1, f2, f3, f4]) # multiband filter + self.check_response(h, [(.1,1), (.3,0), (.5,1), (.7,0), (.9,1)]) + + h = firwin(N+7, 0.1, width=.03) # low-pass + self.check_response(h, [(.05,1), (.75,0)]) + + h = firwin(N+8, 0.1, pass_zero=False) # high-pass + self.check_response(h, [(.05,0), (.75,1)]) + + def mse(self, h, bands): + """Compute mean squared error versus ideal response across frequency + band. + h -- coefficients + bands -- list of (left, right) tuples relative to 1==Nyquist of + passbands + """ + w, H = freqz(h, worN=1024) + f = w/np.pi + passIndicator = np.zeros(len(w), bool) + for left, right in bands: + passIndicator |= (f >= left) & (f < right) + Hideal = np.where(passIndicator, 1, 0) + mse = np.mean(abs(abs(H)-Hideal)**2) + return mse + + def test_scaling(self): + """ + For one lowpass, bandpass, and highpass example filter, this test + checks two things: + - the mean squared error over the frequency domain of the unscaled + filter is smaller than the scaled filter (true for rectangular + window) + - the response of the scaled filter is exactly unity at the center + of the first passband + """ + N = 11 + cases = [ + ([.5], True, (0, 1)), + ([0.2, .6], False, (.4, 1)), + ([.5], False, (1, 1)), + ] + for cutoff, pass_zero, expected_response in cases: + h = firwin(N, cutoff, scale=False, pass_zero=pass_zero, window='ones') + hs = firwin(N, cutoff, scale=True, pass_zero=pass_zero, window='ones') + if len(cutoff) == 1: + if pass_zero: + cutoff = [0] + cutoff + else: + cutoff = cutoff + [1] + assert_(self.mse(h, [cutoff]) < self.mse(hs, [cutoff]), + 'least squares violation') + self.check_response(hs, [expected_response], 1e-12) + + +class TestFirWinMore(object): + """Different author, different style, different tests...""" + + def test_lowpass(self): + width = 0.04 + ntaps, beta = kaiserord(120, width) + taps = firwin(ntaps, cutoff=0.5, window=('kaiser', beta), scale=False) + + # Check the symmetry of taps. + assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1]) + + # Check the gain at a few samples where we know it should be approximately 0 or 1. + freq_samples = np.array([0.0, 0.25, 0.5-width/2, 0.5+width/2, 0.75, 1.0]) + freqs, response = freqz(taps, worN=np.pi*freq_samples) + assert_array_almost_equal(np.abs(response), + [1.0, 1.0, 1.0, 0.0, 0.0, 0.0], decimal=5) + + def test_highpass(self): + width = 0.04 + ntaps, beta = kaiserord(120, width) + + # Ensure that ntaps is odd. + ntaps |= 1 + + taps = firwin(ntaps, cutoff=0.5, window=('kaiser', beta), + pass_zero=False, scale=False) + + # Check the symmetry of taps. + assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1]) + + # Check the gain at a few samples where we know it should be approximately 0 or 1. + freq_samples = np.array([0.0, 0.25, 0.5-width/2, 0.5+width/2, 0.75, 1.0]) + freqs, response = freqz(taps, worN=np.pi*freq_samples) + assert_array_almost_equal(np.abs(response), + [0.0, 0.0, 0.0, 1.0, 1.0, 1.0], decimal=5) + + def test_bandpass(self): + width = 0.04 + ntaps, beta = kaiserord(120, width) + taps = firwin(ntaps, cutoff=[0.3, 0.7], window=('kaiser', beta), + pass_zero=False, scale=False) + + # Check the symmetry of taps. + assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1]) + + # Check the gain at a few samples where we know it should be approximately 0 or 1. + freq_samples = np.array([0.0, 0.2, 0.3-width/2, 0.3+width/2, 0.5, + 0.7-width/2, 0.7+width/2, 0.8, 1.0]) + freqs, response = freqz(taps, worN=np.pi*freq_samples) + assert_array_almost_equal(np.abs(response), + [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0], decimal=5) + + def test_multi(self): + width = 0.04 + ntaps, beta = kaiserord(120, width) + taps = firwin(ntaps, cutoff=[0.2, 0.5, 0.8], window=('kaiser', beta), + pass_zero=True, scale=False) + + # Check the symmetry of taps. + assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1]) + + # Check the gain at a few samples where we know it should be approximately 0 or 1. + freq_samples = np.array([0.0, 0.1, 0.2-width/2, 0.2+width/2, 0.35, + 0.5-width/2, 0.5+width/2, 0.65, + 0.8-width/2, 0.8+width/2, 0.9, 1.0]) + freqs, response = freqz(taps, worN=np.pi*freq_samples) + assert_array_almost_equal(np.abs(response), + [1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0], + decimal=5) + + def test_fs_nyq(self): + """Test the fs and nyq keywords.""" + nyquist = 1000 + width = 40.0 + relative_width = width/nyquist + ntaps, beta = kaiserord(120, relative_width) + taps = firwin(ntaps, cutoff=[300, 700], window=('kaiser', beta), + pass_zero=False, scale=False, fs=2*nyquist) + + # Check the symmetry of taps. + assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1]) + + # Check the gain at a few samples where we know it should be approximately 0 or 1. + freq_samples = np.array([0.0, 200, 300-width/2, 300+width/2, 500, + 700-width/2, 700+width/2, 800, 1000]) + freqs, response = freqz(taps, worN=np.pi*freq_samples/nyquist) + assert_array_almost_equal(np.abs(response), + [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0], decimal=5) + + taps2 = firwin(ntaps, cutoff=[300, 700], window=('kaiser', beta), + pass_zero=False, scale=False, nyq=nyquist) + assert_allclose(taps2, taps) + + def test_bad_cutoff(self): + """Test that invalid cutoff argument raises ValueError.""" + # cutoff values must be greater than 0 and less than 1. + assert_raises(ValueError, firwin, 99, -0.5) + assert_raises(ValueError, firwin, 99, 1.5) + # Don't allow 0 or 1 in cutoff. + assert_raises(ValueError, firwin, 99, [0, 0.5]) + assert_raises(ValueError, firwin, 99, [0.5, 1]) + # cutoff values must be strictly increasing. + assert_raises(ValueError, firwin, 99, [0.1, 0.5, 0.2]) + assert_raises(ValueError, firwin, 99, [0.1, 0.5, 0.5]) + # Must have at least one cutoff value. + assert_raises(ValueError, firwin, 99, []) + # 2D array not allowed. + assert_raises(ValueError, firwin, 99, [[0.1, 0.2],[0.3, 0.4]]) + # cutoff values must be less than nyq. + assert_raises(ValueError, firwin, 99, 50.0, nyq=40) + assert_raises(ValueError, firwin, 99, [10, 20, 30], nyq=25) + assert_raises(ValueError, firwin, 99, 50.0, fs=80) + assert_raises(ValueError, firwin, 99, [10, 20, 30], fs=50) + + def test_even_highpass_raises_value_error(self): + """Test that attempt to create a highpass filter with an even number + of taps raises a ValueError exception.""" + assert_raises(ValueError, firwin, 40, 0.5, pass_zero=False) + assert_raises(ValueError, firwin, 40, [.25, 0.5]) + + +class TestFirwin2(object): + + def test_invalid_args(self): + # `freq` and `gain` have different lengths. + assert_raises(ValueError, firwin2, 50, [0, 0.5, 1], [0.0, 1.0]) + # `nfreqs` is less than `ntaps`. + assert_raises(ValueError, firwin2, 50, [0, 0.5, 1], [0.0, 1.0, 1.0], nfreqs=33) + # Decreasing value in `freq` + assert_raises(ValueError, firwin2, 50, [0, 0.5, 0.4, 1.0], [0, .25, .5, 1.0]) + # Value in `freq` repeated more than once. + assert_raises(ValueError, firwin2, 50, [0, .1, .1, .1, 1.0], + [0.0, 0.5, 0.75, 1.0, 1.0]) + # `freq` does not start at 0.0. + assert_raises(ValueError, firwin2, 50, [0.5, 1.0], [0.0, 1.0]) + + # Type II filter, but the gain at nyquist frequency is not zero. + assert_raises(ValueError, firwin2, 16, [0.0, 0.5, 1.0], [0.0, 1.0, 1.0]) + + # Type III filter, but the gains at nyquist and zero rate are not zero. + assert_raises(ValueError, firwin2, 17, [0.0, 0.5, 1.0], [0.0, 1.0, 1.0], + antisymmetric=True) + assert_raises(ValueError, firwin2, 17, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0], + antisymmetric=True) + assert_raises(ValueError, firwin2, 17, [0.0, 0.5, 1.0], [1.0, 1.0, 1.0], + antisymmetric=True) + + # Type VI filter, but the gain at zero rate is not zero. + assert_raises(ValueError, firwin2, 16, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0], + antisymmetric=True) + + def test01(self): + width = 0.04 + beta = 12.0 + ntaps = 400 + # Filter is 1 from w=0 to w=0.5, then decreases linearly from 1 to 0 as w + # increases from w=0.5 to w=1 (w=1 is the Nyquist frequency). + freq = [0.0, 0.5, 1.0] + gain = [1.0, 1.0, 0.0] + taps = firwin2(ntaps, freq, gain, window=('kaiser', beta)) + freq_samples = np.array([0.0, 0.25, 0.5-width/2, 0.5+width/2, + 0.75, 1.0-width/2]) + freqs, response = freqz(taps, worN=np.pi*freq_samples) + assert_array_almost_equal(np.abs(response), + [1.0, 1.0, 1.0, 1.0-width, 0.5, width], decimal=5) + + def test02(self): + width = 0.04 + beta = 12.0 + # ntaps must be odd for positive gain at Nyquist. + ntaps = 401 + # An ideal highpass filter. + freq = [0.0, 0.5, 0.5, 1.0] + gain = [0.0, 0.0, 1.0, 1.0] + taps = firwin2(ntaps, freq, gain, window=('kaiser', beta)) + freq_samples = np.array([0.0, 0.25, 0.5-width, 0.5+width, 0.75, 1.0]) + freqs, response = freqz(taps, worN=np.pi*freq_samples) + assert_array_almost_equal(np.abs(response), + [0.0, 0.0, 0.0, 1.0, 1.0, 1.0], decimal=5) + + def test03(self): + width = 0.02 + ntaps, beta = kaiserord(120, width) + # ntaps must be odd for positive gain at Nyquist. + ntaps = int(ntaps) | 1 + freq = [0.0, 0.4, 0.4, 0.5, 0.5, 1.0] + gain = [1.0, 1.0, 0.0, 0.0, 1.0, 1.0] + taps = firwin2(ntaps, freq, gain, window=('kaiser', beta)) + freq_samples = np.array([0.0, 0.4-width, 0.4+width, 0.45, + 0.5-width, 0.5+width, 0.75, 1.0]) + freqs, response = freqz(taps, worN=np.pi*freq_samples) + assert_array_almost_equal(np.abs(response), + [1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0], decimal=5) + + def test04(self): + """Test firwin2 when window=None.""" + ntaps = 5 + # Ideal lowpass: gain is 1 on [0,0.5], and 0 on [0.5, 1.0] + freq = [0.0, 0.5, 0.5, 1.0] + gain = [1.0, 1.0, 0.0, 0.0] + taps = firwin2(ntaps, freq, gain, window=None, nfreqs=8193) + alpha = 0.5 * (ntaps - 1) + m = np.arange(0, ntaps) - alpha + h = 0.5 * sinc(0.5 * m) + assert_array_almost_equal(h, taps) + + def test05(self): + """Test firwin2 for calculating Type IV filters""" + ntaps = 1500 + + freq = [0.0, 1.0] + gain = [0.0, 1.0] + taps = firwin2(ntaps, freq, gain, window=None, antisymmetric=True) + assert_array_almost_equal(taps[: ntaps // 2], -taps[ntaps // 2:][::-1]) + + freqs, response = freqz(taps, worN=2048) + assert_array_almost_equal(abs(response), freqs / np.pi, decimal=4) + + def test06(self): + """Test firwin2 for calculating Type III filters""" + ntaps = 1501 + + freq = [0.0, 0.5, 0.55, 1.0] + gain = [0.0, 0.5, 0.0, 0.0] + taps = firwin2(ntaps, freq, gain, window=None, antisymmetric=True) + assert_equal(taps[ntaps // 2], 0.0) + assert_array_almost_equal(taps[: ntaps // 2], -taps[ntaps // 2 + 1:][::-1]) + + freqs, response1 = freqz(taps, worN=2048) + response2 = np.interp(freqs / np.pi, freq, gain) + assert_array_almost_equal(abs(response1), response2, decimal=3) + + def test_fs_nyq(self): + taps1 = firwin2(80, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0]) + taps2 = firwin2(80, [0.0, 30.0, 60.0], [1.0, 1.0, 0.0], fs=120.0) + assert_array_almost_equal(taps1, taps2) + taps2 = firwin2(80, [0.0, 30.0, 60.0], [1.0, 1.0, 0.0], nyq=60.0) + assert_array_almost_equal(taps1, taps2) + +class TestRemez(object): + + def test_bad_args(self): + assert_raises(ValueError, remez, 11, [0.1, 0.4], [1], type='pooka') + + def test_hilbert(self): + N = 11 # number of taps in the filter + a = 0.1 # width of the transition band + + # design an unity gain hilbert bandpass filter from w to 0.5-w + h = remez(11, [a, 0.5-a], [1], type='hilbert') + + # make sure the filter has correct # of taps + assert_(len(h) == N, "Number of Taps") + + # make sure it is type III (anti-symmetric tap coefficients) + assert_array_almost_equal(h[:(N-1)//2], -h[:-(N-1)//2-1:-1]) + + # Since the requested response is symmetric, all even coefficients + # should be zero (or in this case really small) + assert_((abs(h[1::2]) < 1e-15).all(), "Even Coefficients Equal Zero") + + # now check the frequency response + w, H = freqz(h, 1) + f = w/2/np.pi + Hmag = abs(H) + + # should have a zero at 0 and pi (in this case close to zero) + assert_((Hmag[[0, -1]] < 0.02).all(), "Zero at zero and pi") + + # check that the pass band is close to unity + idx = np.logical_and(f > a, f < 0.5-a) + assert_((abs(Hmag[idx] - 1) < 0.015).all(), "Pass Band Close To Unity") + + def test_compare(self): + # test comparison to MATLAB + k = [0.024590270518440, -0.041314581814658, -0.075943803756711, + -0.003530911231040, 0.193140296954975, 0.373400753484939, + 0.373400753484939, 0.193140296954975, -0.003530911231040, + -0.075943803756711, -0.041314581814658, 0.024590270518440] + h = remez(12, [0, 0.3, 0.5, 1], [1, 0], Hz=2.) + assert_allclose(h, k) + h = remez(12, [0, 0.3, 0.5, 1], [1, 0], fs=2.) + assert_allclose(h, k) + + h = [-0.038976016082299, 0.018704846485491, -0.014644062687875, + 0.002879152556419, 0.016849978528150, -0.043276706138248, + 0.073641298245579, -0.103908158578635, 0.129770906801075, + -0.147163447297124, 0.153302248456347, -0.147163447297124, + 0.129770906801075, -0.103908158578635, 0.073641298245579, + -0.043276706138248, 0.016849978528150, 0.002879152556419, + -0.014644062687875, 0.018704846485491, -0.038976016082299] + assert_allclose(remez(21, [0, 0.8, 0.9, 1], [0, 1], Hz=2.), h) + assert_allclose(remez(21, [0, 0.8, 0.9, 1], [0, 1], fs=2.), h) + + +class TestFirls(object): + + def test_bad_args(self): + # even numtaps + assert_raises(ValueError, firls, 10, [0.1, 0.2], [0, 0]) + # odd bands + assert_raises(ValueError, firls, 11, [0.1, 0.2, 0.4], [0, 0, 0]) + # len(bands) != len(desired) + assert_raises(ValueError, firls, 11, [0.1, 0.2, 0.3, 0.4], [0, 0, 0]) + # non-monotonic bands + assert_raises(ValueError, firls, 11, [0.2, 0.1], [0, 0]) + assert_raises(ValueError, firls, 11, [0.1, 0.2, 0.3, 0.3], [0] * 4) + assert_raises(ValueError, firls, 11, [0.3, 0.4, 0.1, 0.2], [0] * 4) + assert_raises(ValueError, firls, 11, [0.1, 0.3, 0.2, 0.4], [0] * 4) + # negative desired + assert_raises(ValueError, firls, 11, [0.1, 0.2], [-1, 1]) + # len(weight) != len(pairs) + assert_raises(ValueError, firls, 11, [0.1, 0.2], [0, 0], [1, 2]) + # negative weight + assert_raises(ValueError, firls, 11, [0.1, 0.2], [0, 0], [-1]) + + def test_firls(self): + N = 11 # number of taps in the filter + a = 0.1 # width of the transition band + + # design a halfband symmetric low-pass filter + h = firls(11, [0, a, 0.5-a, 0.5], [1, 1, 0, 0], fs=1.0) + + # make sure the filter has correct # of taps + assert_equal(len(h), N) + + # make sure it is symmetric + midx = (N-1) // 2 + assert_array_almost_equal(h[:midx], h[:-midx-1:-1]) + + # make sure the center tap is 0.5 + assert_almost_equal(h[midx], 0.5) + + # For halfband symmetric, odd coefficients (except the center) + # should be zero (really small) + hodd = np.hstack((h[1:midx:2], h[-midx+1::2])) + assert_array_almost_equal(hodd, 0) + + # now check the frequency response + w, H = freqz(h, 1) + f = w/2/np.pi + Hmag = np.abs(H) + + # check that the pass band is close to unity + idx = np.logical_and(f > 0, f < a) + assert_array_almost_equal(Hmag[idx], 1, decimal=3) + + # check that the stop band is close to zero + idx = np.logical_and(f > 0.5-a, f < 0.5) + assert_array_almost_equal(Hmag[idx], 0, decimal=3) + + def test_compare(self): + # compare to OCTAVE output + taps = firls(9, [0, 0.5, 0.55, 1], [1, 1, 0, 0], [1, 2]) + # >> taps = firls(8, [0 0.5 0.55 1], [1 1 0 0], [1, 2]); + known_taps = [-6.26930101730182e-04, -1.03354450635036e-01, + -9.81576747564301e-03, 3.17271686090449e-01, + 5.11409425599933e-01, 3.17271686090449e-01, + -9.81576747564301e-03, -1.03354450635036e-01, + -6.26930101730182e-04] + assert_allclose(taps, known_taps) + + # compare to MATLAB output + taps = firls(11, [0, 0.5, 0.5, 1], [1, 1, 0, 0], [1, 2]) + # >> taps = firls(10, [0 0.5 0.5 1], [1 1 0 0], [1, 2]); + known_taps = [ + 0.058545300496815, -0.014233383714318, -0.104688258464392, + 0.012403323025279, 0.317930861136062, 0.488047220029700, + 0.317930861136062, 0.012403323025279, -0.104688258464392, + -0.014233383714318, 0.058545300496815] + assert_allclose(taps, known_taps) + + # With linear changes: + taps = firls(7, (0, 1, 2, 3, 4, 5), [1, 0, 0, 1, 1, 0], fs=20) + # >> taps = firls(6, [0, 0.1, 0.2, 0.3, 0.4, 0.5], [1, 0, 0, 1, 1, 0]) + known_taps = [ + 1.156090832768218, -4.1385894727395849, 7.5288619164321826, + -8.5530572592947856, 7.5288619164321826, -4.1385894727395849, + 1.156090832768218] + assert_allclose(taps, known_taps) + + taps = firls(7, (0, 1, 2, 3, 4, 5), [1, 0, 0, 1, 1, 0], nyq=10) + assert_allclose(taps, known_taps) + + +class TestMinimumPhase(object): + + def test_bad_args(self): + # not enough taps + assert_raises(ValueError, minimum_phase, [1.]) + assert_raises(ValueError, minimum_phase, [1., 1.]) + assert_raises(ValueError, minimum_phase, np.ones(10) * 1j) + assert_raises(ValueError, minimum_phase, 'foo') + assert_raises(ValueError, minimum_phase, np.ones(10), n_fft=8) + assert_raises(ValueError, minimum_phase, np.ones(10), method='foo') + assert_warns(RuntimeWarning, minimum_phase, np.arange(3)) + + def test_homomorphic(self): + # check that it can recover frequency responses of arbitrary + # linear-phase filters + + # for some cases we can get the actual filter back + h = [1, -1] + h_new = minimum_phase(np.convolve(h, h[::-1])) + assert_allclose(h_new, h, rtol=0.05) + + # but in general we only guarantee we get the magnitude back + rng = np.random.RandomState(0) + for n in (2, 3, 10, 11, 15, 16, 17, 20, 21, 100, 101): + h = rng.randn(n) + h_new = minimum_phase(np.convolve(h, h[::-1])) + assert_allclose(np.abs(np.fft.fft(h_new)), + np.abs(np.fft.fft(h)), rtol=1e-4) + + def test_hilbert(self): + # compare to MATLAB output of reference implementation + + # f=[0 0.3 0.5 1]; + # a=[1 1 0 0]; + # h=remez(11,f,a); + h = remez(12, [0, 0.3, 0.5, 1], [1, 0], fs=2.) + k = [0.349585548646686, 0.373552164395447, 0.326082685363438, + 0.077152207480935, -0.129943946349364, -0.059355880509749] + m = minimum_phase(h, 'hilbert') + assert_allclose(m, k, rtol=2e-3) + + # f=[0 0.8 0.9 1]; + # a=[0 0 1 1]; + # h=remez(20,f,a); + h = remez(21, [0, 0.8, 0.9, 1], [0, 1], fs=2.) + k = [0.232486803906329, -0.133551833687071, 0.151871456867244, + -0.157957283165866, 0.151739294892963, -0.129293146705090, + 0.100787844523204, -0.065832656741252, 0.035361328741024, + -0.014977068692269, -0.158416139047557] + m = minimum_phase(h, 'hilbert', n_fft=2**19) + assert_allclose(m, k, rtol=2e-3) diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_fir_filter_design.pyc b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_fir_filter_design.pyc new file mode 100644 index 0000000..6e5e7eb Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_fir_filter_design.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_ltisys.py b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_ltisys.py new file mode 100644 index 0000000..68a099b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_ltisys.py @@ -0,0 +1,1262 @@ +from __future__ import division, print_function, absolute_import + +import warnings + +import numpy as np +from numpy.testing import (assert_almost_equal, assert_equal, assert_allclose, + assert_) +from pytest import raises as assert_raises + +from scipy._lib._numpy_compat import suppress_warnings +from scipy.signal import (ss2tf, tf2ss, lsim2, impulse2, step2, lti, + dlti, bode, freqresp, lsim, impulse, step, + abcd_normalize, place_poles, + TransferFunction, StateSpace, ZerosPolesGain) +from scipy.signal.filter_design import BadCoefficients +import scipy.linalg as linalg + +import scipy._lib.six as six + + +def _assert_poles_close(P1,P2, rtol=1e-8, atol=1e-8): + """ + Check each pole in P1 is close to a pole in P2 with a 1e-8 + relative tolerance or 1e-8 absolute tolerance (useful for zero poles). + These tolerances are very strict but the systems tested are known to + accept these poles so we should not be far from what is requested. + """ + P2 = P2.copy() + for p1 in P1: + found = False + for p2_idx in range(P2.shape[0]): + if np.allclose([np.real(p1), np.imag(p1)], + [np.real(P2[p2_idx]), np.imag(P2[p2_idx])], + rtol, atol): + found = True + np.delete(P2, p2_idx) + break + if not found: + raise ValueError("Can't find pole " + str(p1) + " in " + str(P2)) + + +class TestPlacePoles(object): + + def _check(self, A, B, P, **kwargs): + """ + Perform the most common tests on the poles computed by place_poles + and return the Bunch object for further specific tests + """ + fsf = place_poles(A, B, P, **kwargs) + expected, _ = np.linalg.eig(A - np.dot(B, fsf.gain_matrix)) + _assert_poles_close(expected,fsf.requested_poles) + _assert_poles_close(expected,fsf.computed_poles) + _assert_poles_close(P,fsf.requested_poles) + return fsf + + def test_real(self): + # Test real pole placement using KNV and YT0 algorithm and example 1 in + # section 4 of the reference publication (see place_poles docstring) + A = np.array([1.380, -0.2077, 6.715, -5.676, -0.5814, -4.290, 0, + 0.6750, 1.067, 4.273, -6.654, 5.893, 0.0480, 4.273, + 1.343, -2.104]).reshape(4, 4) + B = np.array([0, 5.679, 1.136, 1.136, 0, 0, -3.146,0]).reshape(4, 2) + P = np.array([-0.2, -0.5, -5.0566, -8.6659]) + + # Check that both KNV and YT compute correct K matrix + self._check(A, B, P, method='KNV0') + self._check(A, B, P, method='YT') + + # Try to reach the specific case in _YT_real where two singular + # values are almost equal. This is to improve code coverage but I + # have no way to be sure this code is really reached + + # on some architectures this can lead to a RuntimeWarning invalid + # value in divide (see gh-7590), so suppress it for now + with np.errstate(invalid='ignore'): + self._check(A, B, (2,2,3,3)) + + def test_complex(self): + # Test complex pole placement on a linearized car model, taken from L. + # Jaulin, Automatique pour la robotique, Cours et Exercices, iSTE + # editions p 184/185 + A = np.array([0,7,0,0,0,0,0,7/3.,0,0,0,0,0,0,0,0]).reshape(4,4) + B = np.array([0,0,0,0,1,0,0,1]).reshape(4,2) + # Test complex poles on YT + P = np.array([-3, -1, -2-1j, -2+1j]) + self._check(A, B, P) + + # Try to reach the specific case in _YT_complex where two singular + # values are almost equal. This is to improve code coverage but I + # have no way to be sure this code is really reached + + P = [0-1e-6j,0+1e-6j,-10,10] + self._check(A, B, P, maxiter=1000) + + # Try to reach the specific case in _YT_complex where the rank two + # update yields two null vectors. This test was found via Monte Carlo. + + A = np.array( + [-2148,-2902, -2267, -598, -1722, -1829, -165, -283, -2546, + -167, -754, -2285, -543, -1700, -584, -2978, -925, -1300, + -1583, -984, -386, -2650, -764, -897, -517, -1598, 2, -1709, + -291, -338, -153, -1804, -1106, -1168, -867, -2297] + ).reshape(6,6) + + B = np.array( + [-108, -374, -524, -1285, -1232, -161, -1204, -672, -637, + -15, -483, -23, -931, -780, -1245, -1129, -1290, -1502, + -952, -1374, -62, -964, -930, -939, -792, -756, -1437, + -491, -1543, -686] + ).reshape(6,5) + P = [-25.-29.j, -25.+29.j, 31.-42.j, 31.+42.j, 33.-41.j, 33.+41.j] + self._check(A, B, P) + + # Use a lot of poles to go through all cases for update_order + # in _YT_loop + + big_A = np.ones((11,11))-np.eye(11) + big_B = np.ones((11,10))-np.diag([1]*10,1)[:,1:] + big_A[:6,:6] = A + big_B[:6,:5] = B + + P = [-10,-20,-30,40,50,60,70,-20-5j,-20+5j,5+3j,5-3j] + self._check(big_A, big_B, P) + + #check with only complex poles and only real poles + P = [-10,-20,-30,-40,-50,-60,-70,-80,-90,-100] + self._check(big_A[:-1,:-1], big_B[:-1,:-1], P) + P = [-10+10j,-20+20j,-30+30j,-40+40j,-50+50j, + -10-10j,-20-20j,-30-30j,-40-40j,-50-50j] + self._check(big_A[:-1,:-1], big_B[:-1,:-1], P) + + # need a 5x5 array to ensure YT handles properly when there + # is only one real pole and several complex + A = np.array([0,7,0,0,0,0,0,7/3.,0,0,0,0,0,0,0,0, + 0,0,0,5,0,0,0,0,9]).reshape(5,5) + B = np.array([0,0,0,0,1,0,0,1,2,3]).reshape(5,2) + P = np.array([-2, -3+1j, -3-1j, -1+1j, -1-1j]) + place_poles(A, B, P) + + # same test with an odd number of real poles > 1 + # this is another specific case of YT + P = np.array([-2, -3, -4, -1+1j, -1-1j]) + self._check(A, B, P) + + def test_tricky_B(self): + # check we handle as we should the 1 column B matrices and + # n column B matrices (with n such as shape(A)=(n, n)) + A = np.array([1.380, -0.2077, 6.715, -5.676, -0.5814, -4.290, 0, + 0.6750, 1.067, 4.273, -6.654, 5.893, 0.0480, 4.273, + 1.343, -2.104]).reshape(4, 4) + B = np.array([0, 5.679, 1.136, 1.136, 0, 0, -3.146, 0, 1, 2, 3, 4, + 5, 6, 7, 8]).reshape(4, 4) + + # KNV or YT are not called here, it's a specific case with only + # one unique solution + P = np.array([-0.2, -0.5, -5.0566, -8.6659]) + fsf = self._check(A, B, P) + # rtol and nb_iter should be set to np.nan as the identity can be + # used as transfer matrix + assert_equal(fsf.rtol, np.nan) + assert_equal(fsf.nb_iter, np.nan) + + # check with complex poles too as they trigger a specific case in + # the specific case :-) + P = np.array((-2+1j,-2-1j,-3,-2)) + fsf = self._check(A, B, P) + assert_equal(fsf.rtol, np.nan) + assert_equal(fsf.nb_iter, np.nan) + + #now test with a B matrix with only one column (no optimisation) + B = B[:,0].reshape(4,1) + P = np.array((-2+1j,-2-1j,-3,-2)) + fsf = self._check(A, B, P) + + # we can't optimize anything, check they are set to 0 as expected + assert_equal(fsf.rtol, 0) + assert_equal(fsf.nb_iter, 0) + + def test_errors(self): + # Test input mistakes from user + A = np.array([0,7,0,0,0,0,0,7/3.,0,0,0,0,0,0,0,0]).reshape(4,4) + B = np.array([0,0,0,0,1,0,0,1]).reshape(4,2) + + #should fail as the method keyword is invalid + assert_raises(ValueError, place_poles, A, B, (-2.1,-2.2,-2.3,-2.4), + method="foo") + + #should fail as poles are not 1D array + assert_raises(ValueError, place_poles, A, B, + np.array((-2.1,-2.2,-2.3,-2.4)).reshape(4,1)) + + #should fail as A is not a 2D array + assert_raises(ValueError, place_poles, A[:,:,np.newaxis], B, + (-2.1,-2.2,-2.3,-2.4)) + + #should fail as B is not a 2D array + assert_raises(ValueError, place_poles, A, B[:,:,np.newaxis], + (-2.1,-2.2,-2.3,-2.4)) + + #should fail as there are too many poles + assert_raises(ValueError, place_poles, A, B, (-2.1,-2.2,-2.3,-2.4,-3)) + + #should fail as there are not enough poles + assert_raises(ValueError, place_poles, A, B, (-2.1,-2.2,-2.3)) + + #should fail as the rtol is greater than 1 + assert_raises(ValueError, place_poles, A, B, (-2.1,-2.2,-2.3,-2.4), + rtol=42) + + #should fail as maxiter is smaller than 1 + assert_raises(ValueError, place_poles, A, B, (-2.1,-2.2,-2.3,-2.4), + maxiter=-42) + + # should fail as rank(B) is two + assert_raises(ValueError, place_poles, A, B, (-2,-2,-2,-2)) + + #unctrollable system + assert_raises(ValueError, place_poles, np.ones((4,4)), + np.ones((4,2)), (1,2,3,4)) + + # Should not raise ValueError as the poles can be placed but should + # raise a warning as the convergence is not reached + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + fsf = place_poles(A, B, (-1,-2,-3,-4), rtol=1e-16, maxiter=42) + assert_(len(w) == 1) + assert_(issubclass(w[-1].category, UserWarning)) + assert_("Convergence was not reached after maxiter iterations" + in str(w[-1].message)) + assert_equal(fsf.nb_iter, 42) + + # should fail as a complex misses its conjugate + assert_raises(ValueError, place_poles, A, B, (-2+1j,-2-1j,-2+3j,-2)) + + # should fail as A is not square + assert_raises(ValueError, place_poles, A[:,:3], B, (-2,-3,-4,-5)) + + # should fail as B has not the same number of lines as A + assert_raises(ValueError, place_poles, A, B[:3,:], (-2,-3,-4,-5)) + + # should fail as KNV0 does not support complex poles + assert_raises(ValueError, place_poles, A, B, + (-2+1j,-2-1j,-2+3j,-2-3j), method="KNV0") + + +class TestSS2TF: + + def check_matrix_shapes(self, p, q, r): + ss2tf(np.zeros((p, p)), + np.zeros((p, q)), + np.zeros((r, p)), + np.zeros((r, q)), 0) + + def test_shapes(self): + # Each tuple holds: + # number of states, number of inputs, number of outputs + for p, q, r in [(3, 3, 3), (1, 3, 3), (1, 1, 1)]: + self.check_matrix_shapes(p, q, r) + + def test_basic(self): + # Test a round trip through tf2ss and ss2tf. + b = np.array([1.0, 3.0, 5.0]) + a = np.array([1.0, 2.0, 3.0]) + + A, B, C, D = tf2ss(b, a) + assert_allclose(A, [[-2, -3], [1, 0]], rtol=1e-13) + assert_allclose(B, [[1], [0]], rtol=1e-13) + assert_allclose(C, [[1, 2]], rtol=1e-13) + assert_allclose(D, [[1]], rtol=1e-14) + + bb, aa = ss2tf(A, B, C, D) + assert_allclose(bb[0], b, rtol=1e-13) + assert_allclose(aa, a, rtol=1e-13) + + def test_zero_order_round_trip(self): + # See gh-5760 + tf = (2, 1) + A, B, C, D = tf2ss(*tf) + assert_allclose(A, [[0]], rtol=1e-13) + assert_allclose(B, [[0]], rtol=1e-13) + assert_allclose(C, [[0]], rtol=1e-13) + assert_allclose(D, [[2]], rtol=1e-13) + + num, den = ss2tf(A, B, C, D) + assert_allclose(num, [[2, 0]], rtol=1e-13) + assert_allclose(den, [1, 0], rtol=1e-13) + + tf = ([[5], [2]], 1) + A, B, C, D = tf2ss(*tf) + assert_allclose(A, [[0]], rtol=1e-13) + assert_allclose(B, [[0]], rtol=1e-13) + assert_allclose(C, [[0], [0]], rtol=1e-13) + assert_allclose(D, [[5], [2]], rtol=1e-13) + + num, den = ss2tf(A, B, C, D) + assert_allclose(num, [[5, 0], [2, 0]], rtol=1e-13) + assert_allclose(den, [1, 0], rtol=1e-13) + + def test_simo_round_trip(self): + # See gh-5753 + tf = ([[1, 2], [1, 1]], [1, 2]) + A, B, C, D = tf2ss(*tf) + assert_allclose(A, [[-2]], rtol=1e-13) + assert_allclose(B, [[1]], rtol=1e-13) + assert_allclose(C, [[0], [-1]], rtol=1e-13) + assert_allclose(D, [[1], [1]], rtol=1e-13) + + num, den = ss2tf(A, B, C, D) + assert_allclose(num, [[1, 2], [1, 1]], rtol=1e-13) + assert_allclose(den, [1, 2], rtol=1e-13) + + tf = ([[1, 0, 1], [1, 1, 1]], [1, 1, 1]) + A, B, C, D = tf2ss(*tf) + assert_allclose(A, [[-1, -1], [1, 0]], rtol=1e-13) + assert_allclose(B, [[1], [0]], rtol=1e-13) + assert_allclose(C, [[-1, 0], [0, 0]], rtol=1e-13) + assert_allclose(D, [[1], [1]], rtol=1e-13) + + num, den = ss2tf(A, B, C, D) + assert_allclose(num, [[1, 0, 1], [1, 1, 1]], rtol=1e-13) + assert_allclose(den, [1, 1, 1], rtol=1e-13) + + tf = ([[1, 2, 3], [1, 2, 3]], [1, 2, 3, 4]) + A, B, C, D = tf2ss(*tf) + assert_allclose(A, [[-2, -3, -4], [1, 0, 0], [0, 1, 0]], rtol=1e-13) + assert_allclose(B, [[1], [0], [0]], rtol=1e-13) + assert_allclose(C, [[1, 2, 3], [1, 2, 3]], rtol=1e-13) + assert_allclose(D, [[0], [0]], rtol=1e-13) + + num, den = ss2tf(A, B, C, D) + assert_allclose(num, [[0, 1, 2, 3], [0, 1, 2, 3]], rtol=1e-13) + assert_allclose(den, [1, 2, 3, 4], rtol=1e-13) + + tf = ([1, [2, 3]], [1, 6]) + A, B, C, D = tf2ss(*tf) + assert_allclose(A, [[-6]], rtol=1e-31) + assert_allclose(B, [[1]], rtol=1e-31) + assert_allclose(C, [[1], [-9]], rtol=1e-31) + assert_allclose(D, [[0], [2]], rtol=1e-31) + + num, den = ss2tf(A, B, C, D) + assert_allclose(num, [[0, 1], [2, 3]], rtol=1e-13) + assert_allclose(den, [1, 6], rtol=1e-13) + + tf = ([[1, -3], [1, 2, 3]], [1, 6, 5]) + A, B, C, D = tf2ss(*tf) + assert_allclose(A, [[-6, -5], [1, 0]], rtol=1e-13) + assert_allclose(B, [[1], [0]], rtol=1e-13) + assert_allclose(C, [[1, -3], [-4, -2]], rtol=1e-13) + assert_allclose(D, [[0], [1]], rtol=1e-13) + + num, den = ss2tf(A, B, C, D) + assert_allclose(num, [[0, 1, -3], [1, 2, 3]], rtol=1e-13) + assert_allclose(den, [1, 6, 5], rtol=1e-13) + + def test_multioutput(self): + # Regression test for gh-2669. + + # 4 states + A = np.array([[-1.0, 0.0, 1.0, 0.0], + [-1.0, 0.0, 2.0, 0.0], + [-4.0, 0.0, 3.0, 0.0], + [-8.0, 8.0, 0.0, 4.0]]) + + # 1 input + B = np.array([[0.3], + [0.0], + [7.0], + [0.0]]) + + # 3 outputs + C = np.array([[0.0, 1.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 1.0], + [8.0, 8.0, 0.0, 0.0]]) + + D = np.array([[0.0], + [0.0], + [1.0]]) + + # Get the transfer functions for all the outputs in one call. + b_all, a = ss2tf(A, B, C, D) + + # Get the transfer functions for each output separately. + b0, a0 = ss2tf(A, B, C[0], D[0]) + b1, a1 = ss2tf(A, B, C[1], D[1]) + b2, a2 = ss2tf(A, B, C[2], D[2]) + + # Check that we got the same results. + assert_allclose(a0, a, rtol=1e-13) + assert_allclose(a1, a, rtol=1e-13) + assert_allclose(a2, a, rtol=1e-13) + assert_allclose(b_all, np.vstack((b0, b1, b2)), rtol=1e-13, atol=1e-14) + + +class TestLsim(object): + def lti_nowarn(self, *args): + with suppress_warnings() as sup: + sup.filter(BadCoefficients) + system = lti(*args) + return system + + def test_first_order(self): + # y' = -y + # exact solution is y(t) = exp(-t) + system = self.lti_nowarn(-1.,1.,1.,0.) + t = np.linspace(0,5) + u = np.zeros_like(t) + tout, y, x = lsim(system, u, t, X0=[1.0]) + expected_x = np.exp(-tout) + assert_almost_equal(x, expected_x) + assert_almost_equal(y, expected_x) + + def test_integrator(self): + # integrator: y' = u + system = self.lti_nowarn(0., 1., 1., 0.) + t = np.linspace(0,5) + u = t + tout, y, x = lsim(system, u, t) + expected_x = 0.5 * tout**2 + assert_almost_equal(x, expected_x) + assert_almost_equal(y, expected_x) + + def test_double_integrator(self): + # double integrator: y'' = 2u + A = np.mat("0. 1.; 0. 0.") + B = np.mat("0.; 1.") + C = np.mat("2. 0.") + system = self.lti_nowarn(A, B, C, 0.) + t = np.linspace(0,5) + u = np.ones_like(t) + tout, y, x = lsim(system, u, t) + expected_x = np.transpose(np.array([0.5 * tout**2, tout])) + expected_y = tout**2 + assert_almost_equal(x, expected_x) + assert_almost_equal(y, expected_y) + + def test_jordan_block(self): + # Non-diagonalizable A matrix + # x1' + x1 = x2 + # x2' + x2 = u + # y = x1 + # Exact solution with u = 0 is y(t) = t exp(-t) + A = np.mat("-1. 1.; 0. -1.") + B = np.mat("0.; 1.") + C = np.mat("1. 0.") + system = self.lti_nowarn(A, B, C, 0.) + t = np.linspace(0,5) + u = np.zeros_like(t) + tout, y, x = lsim(system, u, t, X0=[0.0, 1.0]) + expected_y = tout * np.exp(-tout) + assert_almost_equal(y, expected_y) + + def test_miso(self): + # A system with two state variables, two inputs, and one output. + A = np.array([[-1.0, 0.0], [0.0, -2.0]]) + B = np.array([[1.0, 0.0], [0.0, 1.0]]) + C = np.array([1.0, 0.0]) + D = np.zeros((1,2)) + system = self.lti_nowarn(A, B, C, D) + + t = np.linspace(0, 5.0, 101) + u = np.zeros_like(t) + tout, y, x = lsim(system, u, t, X0=[1.0, 1.0]) + expected_y = np.exp(-tout) + expected_x0 = np.exp(-tout) + expected_x1 = np.exp(-2.0*tout) + assert_almost_equal(y, expected_y) + assert_almost_equal(x[:,0], expected_x0) + assert_almost_equal(x[:,1], expected_x1) + + def test_nonzero_initial_time(self): + system = self.lti_nowarn(-1.,1.,1.,0.) + t = np.linspace(1,2) + u = np.zeros_like(t) + tout, y, x = lsim(system, u, t, X0=[1.0]) + expected_y = np.exp(-tout) + assert_almost_equal(y, expected_y) + + +class Test_lsim2(object): + + def test_01(self): + t = np.linspace(0,10,1001) + u = np.zeros_like(t) + # First order system: x'(t) + x(t) = u(t), x(0) = 1. + # Exact solution is x(t) = exp(-t). + system = ([1.0],[1.0,1.0]) + tout, y, x = lsim2(system, u, t, X0=[1.0]) + expected_x = np.exp(-tout) + assert_almost_equal(x[:,0], expected_x) + + def test_02(self): + t = np.array([0.0, 1.0, 1.0, 3.0]) + u = np.array([0.0, 0.0, 1.0, 1.0]) + # Simple integrator: x'(t) = u(t) + system = ([1.0],[1.0,0.0]) + tout, y, x = lsim2(system, u, t, X0=[1.0]) + expected_x = np.maximum(1.0, tout) + assert_almost_equal(x[:,0], expected_x) + + def test_03(self): + t = np.array([0.0, 1.0, 1.0, 1.1, 1.1, 2.0]) + u = np.array([0.0, 0.0, 1.0, 1.0, 0.0, 0.0]) + # Simple integrator: x'(t) = u(t) + system = ([1.0],[1.0, 0.0]) + tout, y, x = lsim2(system, u, t, hmax=0.01) + expected_x = np.array([0.0, 0.0, 0.0, 0.1, 0.1, 0.1]) + assert_almost_equal(x[:,0], expected_x) + + def test_04(self): + t = np.linspace(0, 10, 1001) + u = np.zeros_like(t) + # Second order system with a repeated root: x''(t) + 2*x(t) + x(t) = 0. + # With initial conditions x(0)=1.0 and x'(t)=0.0, the exact solution + # is (1-t)*exp(-t). + system = ([1.0], [1.0, 2.0, 1.0]) + tout, y, x = lsim2(system, u, t, X0=[1.0, 0.0]) + expected_x = (1.0 - tout) * np.exp(-tout) + assert_almost_equal(x[:,0], expected_x) + + def test_05(self): + # The call to lsim2 triggers a "BadCoefficients" warning from + # scipy.signal.filter_design, but the test passes. I think the warning + # is related to the incomplete handling of multi-input systems in + # scipy.signal. + + # A system with two state variables, two inputs, and one output. + A = np.array([[-1.0, 0.0], [0.0, -2.0]]) + B = np.array([[1.0, 0.0], [0.0, 1.0]]) + C = np.array([1.0, 0.0]) + D = np.zeros((1, 2)) + + t = np.linspace(0, 10.0, 101) + with suppress_warnings() as sup: + sup.filter(BadCoefficients) + tout, y, x = lsim2((A,B,C,D), T=t, X0=[1.0, 1.0]) + expected_y = np.exp(-tout) + expected_x0 = np.exp(-tout) + expected_x1 = np.exp(-2.0 * tout) + assert_almost_equal(y, expected_y) + assert_almost_equal(x[:,0], expected_x0) + assert_almost_equal(x[:,1], expected_x1) + + def test_06(self): + # Test use of the default values of the arguments `T` and `U`. + # Second order system with a repeated root: x''(t) + 2*x(t) + x(t) = 0. + # With initial conditions x(0)=1.0 and x'(t)=0.0, the exact solution + # is (1-t)*exp(-t). + system = ([1.0], [1.0, 2.0, 1.0]) + tout, y, x = lsim2(system, X0=[1.0, 0.0]) + expected_x = (1.0 - tout) * np.exp(-tout) + assert_almost_equal(x[:,0], expected_x) + + +class _TestImpulseFuncs(object): + # Common tests for impulse/impulse2 (= self.func) + + def test_01(self): + # First order system: x'(t) + x(t) = u(t) + # Exact impulse response is x(t) = exp(-t). + system = ([1.0], [1.0,1.0]) + tout, y = self.func(system) + expected_y = np.exp(-tout) + assert_almost_equal(y, expected_y) + + def test_02(self): + # Specify the desired time values for the output. + + # First order system: x'(t) + x(t) = u(t) + # Exact impulse response is x(t) = exp(-t). + system = ([1.0], [1.0,1.0]) + n = 21 + t = np.linspace(0, 2.0, n) + tout, y = self.func(system, T=t) + assert_equal(tout.shape, (n,)) + assert_almost_equal(tout, t) + expected_y = np.exp(-t) + assert_almost_equal(y, expected_y) + + def test_03(self): + # Specify an initial condition as a scalar. + + # First order system: x'(t) + x(t) = u(t), x(0)=3.0 + # Exact impulse response is x(t) = 4*exp(-t). + system = ([1.0], [1.0,1.0]) + tout, y = self.func(system, X0=3.0) + expected_y = 4.0 * np.exp(-tout) + assert_almost_equal(y, expected_y) + + def test_04(self): + # Specify an initial condition as a list. + + # First order system: x'(t) + x(t) = u(t), x(0)=3.0 + # Exact impulse response is x(t) = 4*exp(-t). + system = ([1.0], [1.0,1.0]) + tout, y = self.func(system, X0=[3.0]) + expected_y = 4.0 * np.exp(-tout) + assert_almost_equal(y, expected_y) + + def test_05(self): + # Simple integrator: x'(t) = u(t) + system = ([1.0], [1.0,0.0]) + tout, y = self.func(system) + expected_y = np.ones_like(tout) + assert_almost_equal(y, expected_y) + + def test_06(self): + # Second order system with a repeated root: + # x''(t) + 2*x(t) + x(t) = u(t) + # The exact impulse response is t*exp(-t). + system = ([1.0], [1.0, 2.0, 1.0]) + tout, y = self.func(system) + expected_y = tout * np.exp(-tout) + assert_almost_equal(y, expected_y) + + def test_array_like(self): + # Test that function can accept sequences, scalars. + system = ([1.0], [1.0, 2.0, 1.0]) + # TODO: add meaningful test where X0 is a list + tout, y = self.func(system, X0=[3], T=[5, 6]) + tout, y = self.func(system, X0=[3], T=[5]) + + def test_array_like2(self): + system = ([1.0], [1.0, 2.0, 1.0]) + tout, y = self.func(system, X0=3, T=5) + + +class TestImpulse2(_TestImpulseFuncs): + def setup_method(self): + self.func = impulse2 + + +class TestImpulse(_TestImpulseFuncs): + def setup_method(self): + self.func = impulse + + +class _TestStepFuncs(object): + def test_01(self): + # First order system: x'(t) + x(t) = u(t) + # Exact step response is x(t) = 1 - exp(-t). + system = ([1.0], [1.0,1.0]) + tout, y = self.func(system) + expected_y = 1.0 - np.exp(-tout) + assert_almost_equal(y, expected_y) + + def test_02(self): + # Specify the desired time values for the output. + + # First order system: x'(t) + x(t) = u(t) + # Exact step response is x(t) = 1 - exp(-t). + system = ([1.0], [1.0,1.0]) + n = 21 + t = np.linspace(0, 2.0, n) + tout, y = self.func(system, T=t) + assert_equal(tout.shape, (n,)) + assert_almost_equal(tout, t) + expected_y = 1 - np.exp(-t) + assert_almost_equal(y, expected_y) + + def test_03(self): + # Specify an initial condition as a scalar. + + # First order system: x'(t) + x(t) = u(t), x(0)=3.0 + # Exact step response is x(t) = 1 + 2*exp(-t). + system = ([1.0], [1.0,1.0]) + tout, y = self.func(system, X0=3.0) + expected_y = 1 + 2.0*np.exp(-tout) + assert_almost_equal(y, expected_y) + + def test_04(self): + # Specify an initial condition as a list. + + # First order system: x'(t) + x(t) = u(t), x(0)=3.0 + # Exact step response is x(t) = 1 + 2*exp(-t). + system = ([1.0], [1.0,1.0]) + tout, y = self.func(system, X0=[3.0]) + expected_y = 1 + 2.0*np.exp(-tout) + assert_almost_equal(y, expected_y) + + def test_05(self): + # Simple integrator: x'(t) = u(t) + # Exact step response is x(t) = t. + system = ([1.0],[1.0,0.0]) + tout, y = self.func(system) + expected_y = tout + assert_almost_equal(y, expected_y) + + def test_06(self): + # Second order system with a repeated root: + # x''(t) + 2*x(t) + x(t) = u(t) + # The exact step response is 1 - (1 + t)*exp(-t). + system = ([1.0], [1.0, 2.0, 1.0]) + tout, y = self.func(system) + expected_y = 1 - (1 + tout) * np.exp(-tout) + assert_almost_equal(y, expected_y) + + def test_array_like(self): + # Test that function can accept sequences, scalars. + system = ([1.0], [1.0, 2.0, 1.0]) + # TODO: add meaningful test where X0 is a list + tout, y = self.func(system, T=[5, 6]) + + +class TestStep2(_TestStepFuncs): + def setup_method(self): + self.func = step2 + + def test_05(self): + # This test is almost the same as the one it overwrites in the base + # class. The only difference is the tolerances passed to step2: + # the default tolerances are not accurate enough for this test + + # Simple integrator: x'(t) = u(t) + # Exact step response is x(t) = t. + system = ([1.0], [1.0,0.0]) + tout, y = self.func(system, atol=1e-10, rtol=1e-8) + expected_y = tout + assert_almost_equal(y, expected_y) + + +class TestStep(_TestStepFuncs): + def setup_method(self): + self.func = step + + def test_complex_input(self): + # Test that complex input doesn't raise an error. + # `step` doesn't seem to have been designed for complex input, but this + # works and may be used, so add regression test. See gh-2654. + step(([], [-1], 1+0j)) + + +class TestLti(object): + def test_lti_instantiation(self): + # Test that lti can be instantiated with sequences, scalars. + # See PR-225. + + # TransferFunction + s = lti([1], [-1]) + assert_(isinstance(s, TransferFunction)) + assert_(isinstance(s, lti)) + assert_(not isinstance(s, dlti)) + assert_(s.dt is None) + + # ZerosPolesGain + s = lti(np.array([]), np.array([-1]), 1) + assert_(isinstance(s, ZerosPolesGain)) + assert_(isinstance(s, lti)) + assert_(not isinstance(s, dlti)) + assert_(s.dt is None) + + # StateSpace + s = lti([], [-1], 1) + s = lti([1], [-1], 1, 3) + assert_(isinstance(s, StateSpace)) + assert_(isinstance(s, lti)) + assert_(not isinstance(s, dlti)) + assert_(s.dt is None) + + +class TestStateSpace(object): + def test_initialization(self): + # Check that all initializations work + s = StateSpace(1, 1, 1, 1) + s = StateSpace([1], [2], [3], [4]) + s = StateSpace(np.array([[1, 2], [3, 4]]), np.array([[1], [2]]), + np.array([[1, 0]]), np.array([[0]])) + + def test_conversion(self): + # Check the conversion functions + s = StateSpace(1, 2, 3, 4) + assert_(isinstance(s.to_ss(), StateSpace)) + assert_(isinstance(s.to_tf(), TransferFunction)) + assert_(isinstance(s.to_zpk(), ZerosPolesGain)) + + # Make sure copies work + assert_(StateSpace(s) is not s) + assert_(s.to_ss() is not s) + + def test_properties(self): + # Test setters/getters for cross class properties. + # This implicitly tests to_tf() and to_zpk() + + # Getters + s = StateSpace(1, 1, 1, 1) + assert_equal(s.poles, [1]) + assert_equal(s.zeros, [0]) + assert_(s.dt is None) + + def test_operators(self): + # Test +/-/* operators on systems + + class BadType(object): + pass + + s1 = StateSpace(np.array([[-0.5, 0.7], [0.3, -0.8]]), + np.array([[1], [0]]), + np.array([[1, 0]]), + np.array([[0]]), + ) + + s2 = StateSpace(np.array([[-0.2, -0.1], [0.4, -0.1]]), + np.array([[1], [0]]), + np.array([[1, 0]]), + np.array([[0]]) + ) + + s_discrete = s1.to_discrete(0.1) + s2_discrete = s2.to_discrete(0.2) + + # Impulse response + t = np.linspace(0, 1, 100) + u = np.zeros_like(t) + u[0] = 1 + + # Test multiplication + for typ in six.integer_types + (float, complex, np.float32, + np.complex128, np.array): + assert_allclose(lsim(typ(2) * s1, U=u, T=t)[1], + typ(2) * lsim(s1, U=u, T=t)[1]) + + assert_allclose(lsim(s1 * typ(2), U=u, T=t)[1], + lsim(s1, U=u, T=t)[1] * typ(2)) + + assert_allclose(lsim(s1 / typ(2), U=u, T=t)[1], + lsim(s1, U=u, T=t)[1] / typ(2)) + + with assert_raises(TypeError): + typ(2) / s1 + + assert_allclose(lsim(s1 * 2, U=u, T=t)[1], + lsim(s1, U=2 * u, T=t)[1]) + + assert_allclose(lsim(s1 * s2, U=u, T=t)[1], + lsim(s1, U=lsim(s2, U=u, T=t)[1], T=t)[1], + atol=1e-5) + + with assert_raises(TypeError): + s1 / s1 + + with assert_raises(TypeError): + s1 * s_discrete + + with assert_raises(TypeError): + # Check different discretization constants + s_discrete * s2_discrete + + with assert_raises(TypeError): + s1 * BadType() + + with assert_raises(TypeError): + BadType() * s1 + + with assert_raises(TypeError): + s1 / BadType() + + with assert_raises(TypeError): + BadType() / s1 + + # Test addition + assert_allclose(lsim(s1 + 2, U=u, T=t)[1], + 2 * u + lsim(s1, U=u, T=t)[1]) + + # Check for dimension mismatch + with assert_raises(ValueError): + s1 + np.array([1, 2]) + + with assert_raises(ValueError): + np.array([1, 2]) + s1 + + with assert_raises(TypeError): + s1 + s_discrete + + with assert_raises(ValueError): + s1 / np.array([[1, 2], [3, 4]]) + + with assert_raises(TypeError): + # Check different discretization constants + s_discrete + s2_discrete + + with assert_raises(TypeError): + s1 + BadType() + + with assert_raises(TypeError): + BadType() + s1 + + assert_allclose(lsim(s1 + s2, U=u, T=t)[1], + lsim(s1, U=u, T=t)[1] + lsim(s2, U=u, T=t)[1]) + + # Test substraction + assert_allclose(lsim(s1 - 2, U=u, T=t)[1], + -2 * u + lsim(s1, U=u, T=t)[1]) + + assert_allclose(lsim(2 - s1, U=u, T=t)[1], + 2 * u + lsim(-s1, U=u, T=t)[1]) + + assert_allclose(lsim(s1 - s2, U=u, T=t)[1], + lsim(s1, U=u, T=t)[1] - lsim(s2, U=u, T=t)[1]) + + with assert_raises(TypeError): + s1 - BadType() + + with assert_raises(TypeError): + BadType() - s1 + + +class TestTransferFunction(object): + def test_initialization(self): + # Check that all initializations work + s = TransferFunction(1, 1) + s = TransferFunction([1], [2]) + s = TransferFunction(np.array([1]), np.array([2])) + + def test_conversion(self): + # Check the conversion functions + s = TransferFunction([1, 0], [1, -1]) + assert_(isinstance(s.to_ss(), StateSpace)) + assert_(isinstance(s.to_tf(), TransferFunction)) + assert_(isinstance(s.to_zpk(), ZerosPolesGain)) + + # Make sure copies work + assert_(TransferFunction(s) is not s) + assert_(s.to_tf() is not s) + + def test_properties(self): + # Test setters/getters for cross class properties. + # This implicitly tests to_ss() and to_zpk() + + # Getters + s = TransferFunction([1, 0], [1, -1]) + assert_equal(s.poles, [1]) + assert_equal(s.zeros, [0]) + + +class TestZerosPolesGain(object): + def test_initialization(self): + # Check that all initializations work + s = ZerosPolesGain(1, 1, 1) + s = ZerosPolesGain([1], [2], 1) + s = ZerosPolesGain(np.array([1]), np.array([2]), 1) + + def test_conversion(self): + #Check the conversion functions + s = ZerosPolesGain(1, 2, 3) + assert_(isinstance(s.to_ss(), StateSpace)) + assert_(isinstance(s.to_tf(), TransferFunction)) + assert_(isinstance(s.to_zpk(), ZerosPolesGain)) + + # Make sure copies work + assert_(ZerosPolesGain(s) is not s) + assert_(s.to_zpk() is not s) + + +class Test_abcd_normalize(object): + def setup_method(self): + self.A = np.array([[1.0, 2.0], [3.0, 4.0]]) + self.B = np.array([[-1.0], [5.0]]) + self.C = np.array([[4.0, 5.0]]) + self.D = np.array([[2.5]]) + + def test_no_matrix_fails(self): + assert_raises(ValueError, abcd_normalize) + + def test_A_nosquare_fails(self): + assert_raises(ValueError, abcd_normalize, [1, -1], + self.B, self.C, self.D) + + def test_AB_mismatch_fails(self): + assert_raises(ValueError, abcd_normalize, self.A, [-1, 5], + self.C, self.D) + + def test_AC_mismatch_fails(self): + assert_raises(ValueError, abcd_normalize, self.A, self.B, + [[4.0], [5.0]], self.D) + + def test_CD_mismatch_fails(self): + assert_raises(ValueError, abcd_normalize, self.A, self.B, + self.C, [2.5, 0]) + + def test_BD_mismatch_fails(self): + assert_raises(ValueError, abcd_normalize, self.A, [-1, 5], + self.C, self.D) + + def test_normalized_matrices_unchanged(self): + A, B, C, D = abcd_normalize(self.A, self.B, self.C, self.D) + assert_equal(A, self.A) + assert_equal(B, self.B) + assert_equal(C, self.C) + assert_equal(D, self.D) + + def test_shapes(self): + A, B, C, D = abcd_normalize(self.A, self.B, [1, 0], 0) + assert_equal(A.shape[0], A.shape[1]) + assert_equal(A.shape[0], B.shape[0]) + assert_equal(A.shape[0], C.shape[1]) + assert_equal(C.shape[0], D.shape[0]) + assert_equal(B.shape[1], D.shape[1]) + + def test_zero_dimension_is_not_none1(self): + B_ = np.zeros((2, 0)) + D_ = np.zeros((0, 0)) + A, B, C, D = abcd_normalize(A=self.A, B=B_, D=D_) + assert_equal(A, self.A) + assert_equal(B, B_) + assert_equal(D, D_) + assert_equal(C.shape[0], D_.shape[0]) + assert_equal(C.shape[1], self.A.shape[0]) + + def test_zero_dimension_is_not_none2(self): + B_ = np.zeros((2, 0)) + C_ = np.zeros((0, 2)) + A, B, C, D = abcd_normalize(A=self.A, B=B_, C=C_) + assert_equal(A, self.A) + assert_equal(B, B_) + assert_equal(C, C_) + assert_equal(D.shape[0], C_.shape[0]) + assert_equal(D.shape[1], B_.shape[1]) + + def test_missing_A(self): + A, B, C, D = abcd_normalize(B=self.B, C=self.C, D=self.D) + assert_equal(A.shape[0], A.shape[1]) + assert_equal(A.shape[0], B.shape[0]) + assert_equal(A.shape, (self.B.shape[0], self.B.shape[0])) + + def test_missing_B(self): + A, B, C, D = abcd_normalize(A=self.A, C=self.C, D=self.D) + assert_equal(B.shape[0], A.shape[0]) + assert_equal(B.shape[1], D.shape[1]) + assert_equal(B.shape, (self.A.shape[0], self.D.shape[1])) + + def test_missing_C(self): + A, B, C, D = abcd_normalize(A=self.A, B=self.B, D=self.D) + assert_equal(C.shape[0], D.shape[0]) + assert_equal(C.shape[1], A.shape[0]) + assert_equal(C.shape, (self.D.shape[0], self.A.shape[0])) + + def test_missing_D(self): + A, B, C, D = abcd_normalize(A=self.A, B=self.B, C=self.C) + assert_equal(D.shape[0], C.shape[0]) + assert_equal(D.shape[1], B.shape[1]) + assert_equal(D.shape, (self.C.shape[0], self.B.shape[1])) + + def test_missing_AB(self): + A, B, C, D = abcd_normalize(C=self.C, D=self.D) + assert_equal(A.shape[0], A.shape[1]) + assert_equal(A.shape[0], B.shape[0]) + assert_equal(B.shape[1], D.shape[1]) + assert_equal(A.shape, (self.C.shape[1], self.C.shape[1])) + assert_equal(B.shape, (self.C.shape[1], self.D.shape[1])) + + def test_missing_AC(self): + A, B, C, D = abcd_normalize(B=self.B, D=self.D) + assert_equal(A.shape[0], A.shape[1]) + assert_equal(A.shape[0], B.shape[0]) + assert_equal(C.shape[0], D.shape[0]) + assert_equal(C.shape[1], A.shape[0]) + assert_equal(A.shape, (self.B.shape[0], self.B.shape[0])) + assert_equal(C.shape, (self.D.shape[0], self.B.shape[0])) + + def test_missing_AD(self): + A, B, C, D = abcd_normalize(B=self.B, C=self.C) + assert_equal(A.shape[0], A.shape[1]) + assert_equal(A.shape[0], B.shape[0]) + assert_equal(D.shape[0], C.shape[0]) + assert_equal(D.shape[1], B.shape[1]) + assert_equal(A.shape, (self.B.shape[0], self.B.shape[0])) + assert_equal(D.shape, (self.C.shape[0], self.B.shape[1])) + + def test_missing_BC(self): + A, B, C, D = abcd_normalize(A=self.A, D=self.D) + assert_equal(B.shape[0], A.shape[0]) + assert_equal(B.shape[1], D.shape[1]) + assert_equal(C.shape[0], D.shape[0]) + assert_equal(C.shape[1], A.shape[0]) + assert_equal(B.shape, (self.A.shape[0], self.D.shape[1])) + assert_equal(C.shape, (self.D.shape[0], self.A.shape[0])) + + def test_missing_ABC_fails(self): + assert_raises(ValueError, abcd_normalize, D=self.D) + + def test_missing_BD_fails(self): + assert_raises(ValueError, abcd_normalize, A=self.A, C=self.C) + + def test_missing_CD_fails(self): + assert_raises(ValueError, abcd_normalize, A=self.A, B=self.B) + + +class Test_bode(object): + + def test_01(self): + # Test bode() magnitude calculation (manual sanity check). + # 1st order low-pass filter: H(s) = 1 / (s + 1), + # cutoff: 1 rad/s, slope: -20 dB/decade + # H(s=0.1) ~= 0 dB + # H(s=1) ~= -3 dB + # H(s=10) ~= -20 dB + # H(s=100) ~= -40 dB + system = lti([1], [1, 1]) + w = [0.1, 1, 10, 100] + w, mag, phase = bode(system, w=w) + expected_mag = [0, -3, -20, -40] + assert_almost_equal(mag, expected_mag, decimal=1) + + def test_02(self): + # Test bode() phase calculation (manual sanity check). + # 1st order low-pass filter: H(s) = 1 / (s + 1), + # angle(H(s=0.1)) ~= -5.7 deg + # angle(H(s=1)) ~= -45 deg + # angle(H(s=10)) ~= -84.3 deg + system = lti([1], [1, 1]) + w = [0.1, 1, 10] + w, mag, phase = bode(system, w=w) + expected_phase = [-5.7, -45, -84.3] + assert_almost_equal(phase, expected_phase, decimal=1) + + def test_03(self): + # Test bode() magnitude calculation. + # 1st order low-pass filter: H(s) = 1 / (s + 1) + system = lti([1], [1, 1]) + w = [0.1, 1, 10, 100] + w, mag, phase = bode(system, w=w) + jw = w * 1j + y = np.polyval(system.num, jw) / np.polyval(system.den, jw) + expected_mag = 20.0 * np.log10(abs(y)) + assert_almost_equal(mag, expected_mag) + + def test_04(self): + # Test bode() phase calculation. + # 1st order low-pass filter: H(s) = 1 / (s + 1) + system = lti([1], [1, 1]) + w = [0.1, 1, 10, 100] + w, mag, phase = bode(system, w=w) + jw = w * 1j + y = np.polyval(system.num, jw) / np.polyval(system.den, jw) + expected_phase = np.arctan2(y.imag, y.real) * 180.0 / np.pi + assert_almost_equal(phase, expected_phase) + + def test_05(self): + # Test that bode() finds a reasonable frequency range. + # 1st order low-pass filter: H(s) = 1 / (s + 1) + system = lti([1], [1, 1]) + n = 10 + # Expected range is from 0.01 to 10. + expected_w = np.logspace(-2, 1, n) + w, mag, phase = bode(system, n=n) + assert_almost_equal(w, expected_w) + + def test_06(self): + # Test that bode() doesn't fail on a system with a pole at 0. + # integrator, pole at zero: H(s) = 1 / s + system = lti([1], [1, 0]) + w, mag, phase = bode(system, n=2) + assert_equal(w[0], 0.01) # a fail would give not-a-number + + def test_07(self): + # bode() should not fail on a system with pure imaginary poles. + # The test passes if bode doesn't raise an exception. + system = lti([1], [1, 0, 100]) + w, mag, phase = bode(system, n=2) + + def test_08(self): + # Test that bode() return continuous phase, issues/2331. + system = lti([], [-10, -30, -40, -60, -70], 1) + w, mag, phase = system.bode(w=np.logspace(-3, 40, 100)) + assert_almost_equal(min(phase), -450, decimal=15) + + def test_from_state_space(self): + # Ensure that bode works with a system that was created from the + # state space representation matrices A, B, C, D. In this case, + # system.num will be a 2-D array with shape (1, n+1), where (n,n) + # is the shape of A. + # A Butterworth lowpass filter is used, so we know the exact + # frequency response. + a = np.array([1.0, 2.0, 2.0, 1.0]) + A = linalg.companion(a).T + B = np.array([[0.0], [0.0], [1.0]]) + C = np.array([[1.0, 0.0, 0.0]]) + D = np.array([[0.0]]) + with suppress_warnings() as sup: + sup.filter(BadCoefficients) + system = lti(A, B, C, D) + w, mag, phase = bode(system, n=100) + + expected_magnitude = 20 * np.log10(np.sqrt(1.0 / (1.0 + w**6))) + assert_almost_equal(mag, expected_magnitude) + + +class Test_freqresp(object): + + def test_output_manual(self): + # Test freqresp() output calculation (manual sanity check). + # 1st order low-pass filter: H(s) = 1 / (s + 1), + # re(H(s=0.1)) ~= 0.99 + # re(H(s=1)) ~= 0.5 + # re(H(s=10)) ~= 0.0099 + system = lti([1], [1, 1]) + w = [0.1, 1, 10] + w, H = freqresp(system, w=w) + expected_re = [0.99, 0.5, 0.0099] + expected_im = [-0.099, -0.5, -0.099] + assert_almost_equal(H.real, expected_re, decimal=1) + assert_almost_equal(H.imag, expected_im, decimal=1) + + def test_output(self): + # Test freqresp() output calculation. + # 1st order low-pass filter: H(s) = 1 / (s + 1) + system = lti([1], [1, 1]) + w = [0.1, 1, 10, 100] + w, H = freqresp(system, w=w) + s = w * 1j + expected = np.polyval(system.num, s) / np.polyval(system.den, s) + assert_almost_equal(H.real, expected.real) + assert_almost_equal(H.imag, expected.imag) + + def test_freq_range(self): + # Test that freqresp() finds a reasonable frequency range. + # 1st order low-pass filter: H(s) = 1 / (s + 1) + # Expected range is from 0.01 to 10. + system = lti([1], [1, 1]) + n = 10 + expected_w = np.logspace(-2, 1, n) + w, H = freqresp(system, n=n) + assert_almost_equal(w, expected_w) + + def test_pole_zero(self): + # Test that freqresp() doesn't fail on a system with a pole at 0. + # integrator, pole at zero: H(s) = 1 / s + system = lti([1], [1, 0]) + w, H = freqresp(system, n=2) + assert_equal(w[0], 0.01) # a fail would give not-a-number + + def test_from_state_space(self): + # Ensure that freqresp works with a system that was created from the + # state space representation matrices A, B, C, D. In this case, + # system.num will be a 2-D array with shape (1, n+1), where (n,n) is + # the shape of A. + # A Butterworth lowpass filter is used, so we know the exact + # frequency response. + a = np.array([1.0, 2.0, 2.0, 1.0]) + A = linalg.companion(a).T + B = np.array([[0.0],[0.0],[1.0]]) + C = np.array([[1.0, 0.0, 0.0]]) + D = np.array([[0.0]]) + with suppress_warnings() as sup: + sup.filter(BadCoefficients) + system = lti(A, B, C, D) + w, H = freqresp(system, n=100) + s = w * 1j + expected = (1.0 / (1.0 + 2*s + 2*s**2 + s**3)) + assert_almost_equal(H.real, expected.real) + assert_almost_equal(H.imag, expected.imag) + + def test_from_zpk(self): + # 4th order low-pass filter: H(s) = 1 / (s + 1) + system = lti([],[-1]*4,[1]) + w = [0.1, 1, 10, 100] + w, H = freqresp(system, w=w) + s = w * 1j + expected = 1 / (s + 1)**4 + assert_almost_equal(H.real, expected.real) + assert_almost_equal(H.imag, expected.imag) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_ltisys.pyc b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_ltisys.pyc new file mode 100644 index 0000000..7fdbe9b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_ltisys.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_max_len_seq.py b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_max_len_seq.py new file mode 100644 index 0000000..02c286f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_max_len_seq.py @@ -0,0 +1,67 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import assert_allclose, assert_array_equal +from pytest import raises as assert_raises + +from numpy.fft import fft, ifft + +from scipy.signal import max_len_seq + + +class TestMLS(object): + + def test_mls_inputs(self): + # can't all be zero state + assert_raises(ValueError, max_len_seq, + 10, state=np.zeros(10)) + # wrong size state + assert_raises(ValueError, max_len_seq, 10, + state=np.ones(3)) + # wrong length + assert_raises(ValueError, max_len_seq, 10, length=-1) + assert_array_equal(max_len_seq(10, length=0)[0], []) + # unknown taps + assert_raises(ValueError, max_len_seq, 64) + # bad taps + assert_raises(ValueError, max_len_seq, 10, taps=[-1, 1]) + + def test_mls_output(self): + # define some alternate working taps + alt_taps = {2: [1], 3: [2], 4: [3], 5: [4, 3, 2], 6: [5, 4, 1], 7: [4], + 8: [7, 5, 3]} + # assume the other bit levels work, too slow to test higher orders... + for nbits in range(2, 8): + for state in [None, np.round(np.random.rand(nbits))]: + for taps in [None, alt_taps[nbits]]: + if state is not None and np.all(state == 0): + state[0] = 1 # they can't all be zero + orig_m = max_len_seq(nbits, state=state, + taps=taps)[0] + m = 2. * orig_m - 1. # convert to +/- 1 representation + # First, make sure we got all 1's or -1 + err_msg = "mls had non binary terms" + assert_array_equal(np.abs(m), np.ones_like(m), + err_msg=err_msg) + # Test via circular cross-correlation, which is just mult. + # in the frequency domain with one signal conjugated + tester = np.real(ifft(fft(m) * np.conj(fft(m)))) + out_len = 2**nbits - 1 + # impulse amplitude == test_len + err_msg = "mls impulse has incorrect value" + assert_allclose(tester[0], out_len, err_msg=err_msg) + # steady-state is -1 + err_msg = "mls steady-state has incorrect value" + assert_allclose(tester[1:], -1 * np.ones(out_len - 1), + err_msg=err_msg) + # let's do the split thing using a couple options + for n in (1, 2**(nbits - 1)): + m1, s1 = max_len_seq(nbits, state=state, taps=taps, + length=n) + m2, s2 = max_len_seq(nbits, state=s1, taps=taps, + length=1) + m3, s3 = max_len_seq(nbits, state=s2, taps=taps, + length=out_len - n - 1) + new_m = np.concatenate((m1, m2, m3)) + assert_array_equal(orig_m, new_m) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_max_len_seq.pyc b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_max_len_seq.pyc new file mode 100644 index 0000000..e7bf40d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_max_len_seq.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_peak_finding.py b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_peak_finding.py new file mode 100644 index 0000000..1f88a1c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_peak_finding.py @@ -0,0 +1,824 @@ +from __future__ import division, print_function, absolute_import + +import copy + +import numpy as np +from numpy.testing import ( + assert_, + assert_equal, + assert_allclose, + assert_array_equal +) +import pytest +from pytest import raises, warns + +from scipy._lib.six import xrange +from scipy.signal._peak_finding import ( + argrelmax, + argrelmin, + peak_prominences, + peak_widths, + _unpack_condition_args, + find_peaks, + find_peaks_cwt, + _identify_ridge_lines +) +from scipy.signal._peak_finding_utils import _local_maxima_1d, PeakPropertyWarning + + +def _gen_gaussians(center_locs, sigmas, total_length): + xdata = np.arange(0, total_length).astype(float) + out_data = np.zeros(total_length, dtype=float) + for ind, sigma in enumerate(sigmas): + tmp = (xdata - center_locs[ind]) / sigma + out_data += np.exp(-(tmp**2)) + return out_data + + +def _gen_gaussians_even(sigmas, total_length): + num_peaks = len(sigmas) + delta = total_length / (num_peaks + 1) + center_locs = np.linspace(delta, total_length - delta, num=num_peaks).astype(int) + out_data = _gen_gaussians(center_locs, sigmas, total_length) + return out_data, center_locs + + +def _gen_ridge_line(start_locs, max_locs, length, distances, gaps): + """ + Generate coordinates for a ridge line. + + Will be a series of coordinates, starting a start_loc (length 2). + The maximum distance between any adjacent columns will be + `max_distance`, the max distance between adjacent rows + will be `map_gap'. + + `max_locs` should be the size of the intended matrix. The + ending coordinates are guaranteed to be less than `max_locs`, + although they may not approach `max_locs` at all. + """ + + def keep_bounds(num, max_val): + out = max(num, 0) + out = min(out, max_val) + return out + + gaps = copy.deepcopy(gaps) + distances = copy.deepcopy(distances) + + locs = np.zeros([length, 2], dtype=int) + locs[0, :] = start_locs + total_length = max_locs[0] - start_locs[0] - sum(gaps) + if total_length < length: + raise ValueError('Cannot generate ridge line according to constraints') + dist_int = length / len(distances) - 1 + gap_int = length / len(gaps) - 1 + for ind in xrange(1, length): + nextcol = locs[ind - 1, 1] + nextrow = locs[ind - 1, 0] + 1 + if (ind % dist_int == 0) and (len(distances) > 0): + nextcol += ((-1)**ind)*distances.pop() + if (ind % gap_int == 0) and (len(gaps) > 0): + nextrow += gaps.pop() + nextrow = keep_bounds(nextrow, max_locs[0]) + nextcol = keep_bounds(nextcol, max_locs[1]) + locs[ind, :] = [nextrow, nextcol] + + return [locs[:, 0], locs[:, 1]] + + +class TestLocalMaxima1d(object): + + def test_empty(self): + """Test with empty signal.""" + x = np.array([], dtype=np.float64) + for array in _local_maxima_1d(x): + assert_equal(array, np.array([])) + assert_(array.base is None) + + def test_linear(self): + """Test with linear signal.""" + x = np.linspace(0, 100) + for array in _local_maxima_1d(x): + assert_equal(array, np.array([])) + assert_(array.base is None) + + def test_simple(self): + """Test with simple signal.""" + x = np.linspace(-10, 10, 50) + x[2::3] += 1 + expected = np.arange(2, 50, 3) + for array in _local_maxima_1d(x): + # For plateaus of size 1, the edges are identical with the + # midpoints + assert_equal(array, expected) + assert_(array.base is None) + + def test_flat_maxima(self): + """Test if flat maxima are detected correctly.""" + x = np.array([-1.3, 0, 1, 0, 2, 2, 0, 3, 3, 3, 2.99, 4, 4, 4, 4, -10, + -5, -5, -5, -5, -5, -10]) + midpoints, left_edges, right_edges = _local_maxima_1d(x) + assert_equal(midpoints, np.array([2, 4, 8, 12, 18])) + assert_equal(left_edges, np.array([2, 4, 7, 11, 16])) + assert_equal(right_edges, np.array([2, 5, 9, 14, 20])) + + @pytest.mark.parametrize('x', [ + np.array([1., 0, 2]), + np.array([3., 3, 0, 4, 4]), + np.array([5., 5, 5, 0, 6, 6, 6]), + ]) + def test_signal_edges(self, x): + """Test if behavior on signal edges is correct.""" + for array in _local_maxima_1d(x): + assert_equal(array, np.array([])) + assert_(array.base is None) + + def test_exceptions(self): + """Test input validation and raised exceptions.""" + with raises(ValueError, match="wrong number of dimensions"): + _local_maxima_1d(np.ones((1, 1))) + with raises(ValueError, match="expected 'float64_t'"): + _local_maxima_1d(np.ones(1, dtype=int)) + with raises(TypeError, match="list"): + _local_maxima_1d([1., 2.]) + with raises(TypeError, match="'x' must not be None"): + _local_maxima_1d(None) + + +class TestRidgeLines(object): + + def test_empty(self): + test_matr = np.zeros([20, 100]) + lines = _identify_ridge_lines(test_matr, 2*np.ones(20), 1) + assert_(len(lines) == 0) + + def test_minimal(self): + test_matr = np.zeros([20, 100]) + test_matr[0, 10] = 1 + lines = _identify_ridge_lines(test_matr, 2*np.ones(20), 1) + assert_(len(lines) == 1) + + test_matr = np.zeros([20, 100]) + test_matr[0:2, 10] = 1 + lines = _identify_ridge_lines(test_matr, 2*np.ones(20), 1) + assert_(len(lines) == 1) + + def test_single_pass(self): + distances = [0, 1, 2, 5] + gaps = [0, 1, 2, 0, 1] + test_matr = np.zeros([20, 50]) + 1e-12 + length = 12 + line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps) + test_matr[line[0], line[1]] = 1 + max_distances = max(distances)*np.ones(20) + identified_lines = _identify_ridge_lines(test_matr, max_distances, max(gaps) + 1) + assert_array_equal(identified_lines, [line]) + + def test_single_bigdist(self): + distances = [0, 1, 2, 5] + gaps = [0, 1, 2, 4] + test_matr = np.zeros([20, 50]) + length = 12 + line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps) + test_matr[line[0], line[1]] = 1 + max_dist = 3 + max_distances = max_dist*np.ones(20) + #This should get 2 lines, since the distance is too large + identified_lines = _identify_ridge_lines(test_matr, max_distances, max(gaps) + 1) + assert_(len(identified_lines) == 2) + + for iline in identified_lines: + adists = np.diff(iline[1]) + np.testing.assert_array_less(np.abs(adists), max_dist) + + agaps = np.diff(iline[0]) + np.testing.assert_array_less(np.abs(agaps), max(gaps) + 0.1) + + def test_single_biggap(self): + distances = [0, 1, 2, 5] + max_gap = 3 + gaps = [0, 4, 2, 1] + test_matr = np.zeros([20, 50]) + length = 12 + line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps) + test_matr[line[0], line[1]] = 1 + max_dist = 6 + max_distances = max_dist*np.ones(20) + #This should get 2 lines, since the gap is too large + identified_lines = _identify_ridge_lines(test_matr, max_distances, max_gap) + assert_(len(identified_lines) == 2) + + for iline in identified_lines: + adists = np.diff(iline[1]) + np.testing.assert_array_less(np.abs(adists), max_dist) + + agaps = np.diff(iline[0]) + np.testing.assert_array_less(np.abs(agaps), max(gaps) + 0.1) + + def test_single_biggaps(self): + distances = [0] + max_gap = 1 + gaps = [3, 6] + test_matr = np.zeros([50, 50]) + length = 30 + line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps) + test_matr[line[0], line[1]] = 1 + max_dist = 1 + max_distances = max_dist*np.ones(50) + #This should get 3 lines, since the gaps are too large + identified_lines = _identify_ridge_lines(test_matr, max_distances, max_gap) + assert_(len(identified_lines) == 3) + + for iline in identified_lines: + adists = np.diff(iline[1]) + np.testing.assert_array_less(np.abs(adists), max_dist) + + agaps = np.diff(iline[0]) + np.testing.assert_array_less(np.abs(agaps), max(gaps) + 0.1) + + +class TestArgrel(object): + + def test_empty(self): + # Regression test for gh-2832. + # When there are no relative extrema, make sure that + # the number of empty arrays returned matches the + # dimension of the input. + + empty_array = np.array([], dtype=int) + + z1 = np.zeros(5) + + i = argrelmin(z1) + assert_equal(len(i), 1) + assert_array_equal(i[0], empty_array) + + z2 = np.zeros((3,5)) + + row, col = argrelmin(z2, axis=0) + assert_array_equal(row, empty_array) + assert_array_equal(col, empty_array) + + row, col = argrelmin(z2, axis=1) + assert_array_equal(row, empty_array) + assert_array_equal(col, empty_array) + + def test_basic(self): + # Note: the docstrings for the argrel{min,max,extrema} functions + # do not give a guarantee of the order of the indices, so we'll + # sort them before testing. + + x = np.array([[1, 2, 2, 3, 2], + [2, 1, 2, 2, 3], + [3, 2, 1, 2, 2], + [2, 3, 2, 1, 2], + [1, 2, 3, 2, 1]]) + + row, col = argrelmax(x, axis=0) + order = np.argsort(row) + assert_equal(row[order], [1, 2, 3]) + assert_equal(col[order], [4, 0, 1]) + + row, col = argrelmax(x, axis=1) + order = np.argsort(row) + assert_equal(row[order], [0, 3, 4]) + assert_equal(col[order], [3, 1, 2]) + + row, col = argrelmin(x, axis=0) + order = np.argsort(row) + assert_equal(row[order], [1, 2, 3]) + assert_equal(col[order], [1, 2, 3]) + + row, col = argrelmin(x, axis=1) + order = np.argsort(row) + assert_equal(row[order], [1, 2, 3]) + assert_equal(col[order], [1, 2, 3]) + + def test_highorder(self): + order = 2 + sigmas = [1.0, 2.0, 10.0, 5.0, 15.0] + test_data, act_locs = _gen_gaussians_even(sigmas, 500) + test_data[act_locs + order] = test_data[act_locs]*0.99999 + test_data[act_locs - order] = test_data[act_locs]*0.99999 + rel_max_locs = argrelmax(test_data, order=order, mode='clip')[0] + + assert_(len(rel_max_locs) == len(act_locs)) + assert_((rel_max_locs == act_locs).all()) + + def test_2d_gaussians(self): + sigmas = [1.0, 2.0, 10.0] + test_data, act_locs = _gen_gaussians_even(sigmas, 100) + rot_factor = 20 + rot_range = np.arange(0, len(test_data)) - rot_factor + test_data_2 = np.vstack([test_data, test_data[rot_range]]) + rel_max_rows, rel_max_cols = argrelmax(test_data_2, axis=1, order=1) + + for rw in xrange(0, test_data_2.shape[0]): + inds = (rel_max_rows == rw) + + assert_(len(rel_max_cols[inds]) == len(act_locs)) + assert_((act_locs == (rel_max_cols[inds] - rot_factor*rw)).all()) + + +class TestPeakProminences(object): + + def test_empty(self): + """ + Test if an empty array is returned if no peaks are provided. + """ + out = peak_prominences([1, 2, 3], []) + for arr, dtype in zip(out, [np.float64, np.intp, np.intp]): + assert_(arr.size == 0) + assert_(arr.dtype == dtype) + + out = peak_prominences([], []) + for arr, dtype in zip(out, [np.float64, np.intp, np.intp]): + assert_(arr.size == 0) + assert_(arr.dtype == dtype) + + def test_basic(self): + """ + Test if height of prominences is correctly calculated in signal with + rising baseline (peak widths are 1 sample). + """ + # Prepare basic signal + x = np.array([-1, 1.2, 1.2, 1, 3.2, 1.3, 2.88, 2.1]) + peaks = np.array([1, 2, 4, 6]) + lbases = np.array([0, 0, 0, 5]) + rbases = np.array([3, 3, 5, 7]) + proms = x[peaks] - np.max([x[lbases], x[rbases]], axis=0) + # Test if calculation matches handcrafted result + out = peak_prominences(x, peaks) + assert_equal(out[0], proms) + assert_equal(out[1], lbases) + assert_equal(out[2], rbases) + + def test_edge_cases(self): + """ + Test edge cases. + """ + # Peaks have same height, prominence and bases + x = [0, 2, 1, 2, 1, 2, 0] + peaks = [1, 3, 5] + proms, lbases, rbases = peak_prominences(x, peaks) + assert_equal(proms, [2, 2, 2]) + assert_equal(lbases, [0, 0, 0]) + assert_equal(rbases, [6, 6, 6]) + + # Peaks have same height & prominence but different bases + x = [0, 1, 0, 1, 0, 1, 0] + peaks = np.array([1, 3, 5]) + proms, lbases, rbases = peak_prominences(x, peaks) + assert_equal(proms, [1, 1, 1]) + assert_equal(lbases, peaks - 1) + assert_equal(rbases, peaks + 1) + + def test_non_contiguous(self): + """ + Test with non-C-contiguous input arrays. + """ + x = np.repeat([-9, 9, 9, 0, 3, 1], 2) + peaks = np.repeat([1, 2, 4], 2) + proms, lbases, rbases = peak_prominences(x[::2], peaks[::2]) + assert_equal(proms, [9, 9, 2]) + assert_equal(lbases, [0, 0, 3]) + assert_equal(rbases, [3, 3, 5]) + + def test_wlen(self): + """ + Test if wlen actually shrinks the evaluation range correctly. + """ + x = [0, 1, 2, 3, 1, 0, -1] + peak = [3] + # Test rounding behavior of wlen + assert_equal(peak_prominences(x, peak), [3., 0, 6]) + for wlen, i in [(8, 0), (7, 0), (6, 0), (5, 1), (3.2, 1), (3, 2), (1.1, 2)]: + assert_equal(peak_prominences(x, peak, wlen), [3. - i, 0 + i, 6 - i]) + + def test_exceptions(self): + """ + Verify that exceptions and warnings are raised. + """ + # x with dimension > 1 + with raises(ValueError, match='1D array'): + peak_prominences([[0, 1, 1, 0]], [1, 2]) + # peaks with dimension > 1 + with raises(ValueError, match='1D array'): + peak_prominences([0, 1, 1, 0], [[1, 2]]) + # x with dimension < 1 + with raises(ValueError, match='1D array'): + peak_prominences(3, [0,]) + + # empty x with supplied + with raises(ValueError, match='not a valid index'): + peak_prominences([], [0]) + # invalid indices with non-empty x + for p in [-100, -1, 3, 1000]: + with raises(ValueError, match='not a valid index'): + peak_prominences([1, 0, 2], [p]) + + # peaks is not cast-able to np.intp + with raises(TypeError, match='cannot safely cast'): + peak_prominences([0, 1, 1, 0], [1.1, 2.3]) + + # wlen < 3 + with raises(ValueError, match='wlen'): + peak_prominences(np.arange(10), [3, 5], wlen=1) + + def test_warnings(self): + """ + Verify that appropriate warnings are raised. + """ + msg = "some peaks have a prominence of 0" + for p in [0, 1, 2]: + with warns(PeakPropertyWarning, match=msg): + peak_prominences([1, 0, 2], [p,]) + with warns(PeakPropertyWarning, match=msg): + peak_prominences([0, 1, 1, 1, 0], [2], wlen=2) + + +class TestPeakWidths(object): + + def test_empty(self): + """ + Test if an empty array is returned if no peaks are provided. + """ + widths = peak_widths([], [])[0] + assert_(isinstance(widths, np.ndarray)) + assert_equal(widths.size, 0) + widths = peak_widths([1, 2, 3], [])[0] + assert_(isinstance(widths, np.ndarray)) + assert_equal(widths.size, 0) + out = peak_widths([], []) + for arr in out: + assert_(isinstance(arr, np.ndarray)) + assert_equal(arr.size, 0) + + @pytest.mark.filterwarnings("ignore:some peaks have a width of 0") + def test_basic(self): + """ + Test a simple use case with easy to verify results at different relative + heights. + """ + x = np.array([1, 0, 1, 2, 1, 0, -1]) + prominence = 2 + for rel_height, width_true, lip_true, rip_true in [ + (0., 0., 3., 3.), # raises warning + (0.25, 1., 2.5, 3.5), + (0.5, 2., 2., 4.), + (0.75, 3., 1.5, 4.5), + (1., 4., 1., 5.), + (2., 5., 1., 6.), + (3., 5., 1., 6.) + ]: + width_calc, height, lip_calc, rip_calc = peak_widths( + x, [3], rel_height) + assert_allclose(width_calc, width_true) + assert_allclose(height, 2 - rel_height * prominence) + assert_allclose(lip_calc, lip_true) + assert_allclose(rip_calc, rip_true) + + def test_non_contiguous(self): + """ + Test with non-C-contiguous input arrays. + """ + x = np.repeat([0, 100, 50], 4) + peaks = np.repeat([1], 3) + result = peak_widths(x[::4], peaks[::3]) + assert_equal(result, [0.75, 75, 0.75, 1.5]) + + def test_exceptions(self): + """ + Verify that argument validation works as intended. + """ + with raises(ValueError, match='1D array'): + # x with dimension > 1 + peak_widths(np.zeros((3, 4)), np.ones(3)) + with raises(ValueError, match='1D array'): + # x with dimension < 1 + peak_widths(3, [0]) + with raises(ValueError, match='1D array'): + # peaks with dimension > 1 + peak_widths(np.arange(10), np.ones((3, 2), dtype=np.intp)) + with raises(ValueError, match='1D array'): + # peaks with dimension < 1 + peak_widths(np.arange(10), 3) + with raises(ValueError, match='not a valid index'): + # peak pos exceeds x.size + peak_widths(np.arange(10), [8, 11]) + with raises(ValueError, match='not a valid index'): + # empty x with peaks supplied + peak_widths([], [1, 2]) + with raises(TypeError, match='cannot safely cast'): + # peak cannot be safely casted to intp + peak_widths(np.arange(10), [1.1, 2.3]) + with raises(ValueError, match='rel_height'): + # rel_height is < 0 + peak_widths([0, 1, 0, 1, 0], [1, 3], rel_height=-1) + with raises(TypeError, match='None'): + # prominence data contains None + peak_widths([1, 2, 1], [1], prominence_data=(None, None, None)) + + def test_warnings(self): + """ + Verify that appropriate warnings are raised. + """ + msg = "some peaks have a width of 0" + with warns(PeakPropertyWarning, match=msg): + # Case: rel_height is 0 + peak_widths([0, 1, 0], [1], rel_height=0) + with warns(PeakPropertyWarning, match=msg): + # Case: prominence is 0 and bases are identical + peak_widths( + [0, 1, 1, 1, 0], [2], + prominence_data=(np.array([0.], np.float64), + np.array([2], np.intp), + np.array([2], np.intp)) + ) + + def test_mismatching_prominence_data(self): + """Test with mismatching peak and / or prominence data.""" + x = [0, 1, 0] + peak = [1] + for i, (prominences, left_bases, right_bases) in enumerate([ + ((1.,), (-1,), (2,)), # left base not in x + ((1.,), (0,), (3,)), # right base not in x + ((1.,), (2,), (0,)), # swapped bases same as peak + ((1., 1.), (0, 0), (2, 2)), # array shapes don't match peaks + ((1., 1.), (0,), (2,)), # arrays with different shapes + ((1.,), (0, 0), (2,)), # arrays with different shapes + ((1.,), (0,), (2, 2)) # arrays with different shapes + ]): + # Make sure input is matches output of signal.peak_prominences + prominence_data = (np.array(prominences, dtype=np.float64), + np.array(left_bases, dtype=np.intp), + np.array(right_bases, dtype=np.intp)) + # Test for correct exception + if i < 3: + match = "prominence data is invalid for peak" + else: + match = "arrays in `prominence_data` must have the same shape" + with raises(ValueError, match=match): + peak_widths(x, peak, prominence_data=prominence_data) + + @pytest.mark.filterwarnings("ignore:some peaks have a width of 0") + def test_intersection_rules(self): + """Test if x == eval_height counts as an intersection.""" + # Flatt peak with two possible intersection points if evaluated at 1 + x = [0, 1, 2, 1, 3, 3, 3, 1, 2, 1, 0] + # relative height is 0 -> width is 0 as well, raises warning + assert_allclose(peak_widths(x, peaks=[5], rel_height=0), + [(0.,), (3.,), (5.,), (5.,)]) + # width_height == x counts as intersection -> nearest 1 is chosen + assert_allclose(peak_widths(x, peaks=[5], rel_height=2/3), + [(4.,), (1.,), (3.,), (7.,)]) + + +def test_unpack_condition_args(): + """ + Verify parsing of condition arguments for `scipy.signal.find_peaks` function. + """ + x = np.arange(10) + amin_true = x + amax_true = amin_true + 10 + peaks = amin_true[1::2] + + # Test unpacking with None or interval + assert_((None, None) == _unpack_condition_args((None, None), x, peaks)) + assert_((1, None) == _unpack_condition_args(1, x, peaks)) + assert_((1, None) == _unpack_condition_args((1, None), x, peaks)) + assert_((None, 2) == _unpack_condition_args((None, 2), x, peaks)) + assert_((3., 4.5) == _unpack_condition_args((3., 4.5), x, peaks)) + + # Test if borders are correctly reduced with `peaks` + amin_calc, amax_calc = _unpack_condition_args((amin_true, amax_true), x, peaks) + assert_equal(amin_calc, amin_true[peaks]) + assert_equal(amax_calc, amax_true[peaks]) + + # Test raises if array borders don't match x + with raises(ValueError, match="array size of lower"): + _unpack_condition_args(amin_true, np.arange(11), peaks) + with raises(ValueError, match="array size of upper"): + _unpack_condition_args((None, amin_true), np.arange(11), peaks) + + +class TestFindPeaks(object): + + # Keys of optionally returned properties + property_keys = {'peak_heights', 'left_thresholds', 'right_thresholds', + 'prominences', 'left_bases', 'right_bases', 'widths', + 'width_heights', 'left_ips', 'right_ips'} + + def test_constant(self): + """ + Test behavior for signal without local maxima. + """ + open_interval = (None, None) + peaks, props = find_peaks(np.ones(10), + height=open_interval, threshold=open_interval, + prominence=open_interval, width=open_interval) + assert_(peaks.size == 0) + for key in self.property_keys: + assert_(props[key].size == 0) + + def test_plateau_size(self): + """ + Test plateau size condition for peaks. + """ + # Prepare signal with peaks with peak_height == plateau_size + plateau_sizes = np.array([1, 2, 3, 4, 8, 20, 111]) + x = np.zeros(plateau_sizes.size * 2 + 1) + x[1::2] = plateau_sizes + repeats = np.ones(x.size, dtype=int) + repeats[1::2] = x[1::2] + x = np.repeat(x, repeats) + + # Test full output + peaks, props = find_peaks(x, plateau_size=(None, None)) + assert_equal(peaks, [1, 3, 7, 11, 18, 33, 100]) + assert_equal(props["plateau_sizes"], plateau_sizes) + assert_equal(props["left_edges"], peaks - (plateau_sizes - 1) // 2) + assert_equal(props["right_edges"], peaks + plateau_sizes // 2) + + # Test conditions + assert_equal(find_peaks(x, plateau_size=4)[0], [11, 18, 33, 100]) + assert_equal(find_peaks(x, plateau_size=(None, 3.5))[0], [1, 3, 7]) + assert_equal(find_peaks(x, plateau_size=(5, 50))[0], [18, 33]) + + def test_height_condition(self): + """ + Test height condition for peaks. + """ + x = (0., 1/3, 0., 2.5, 0, 4., 0) + peaks, props = find_peaks(x, height=(None, None)) + assert_equal(peaks, np.array([1, 3, 5])) + assert_equal(props['peak_heights'], np.array([1/3, 2.5, 4.])) + assert_equal(find_peaks(x, height=0.5)[0], np.array([3, 5])) + assert_equal(find_peaks(x, height=(None, 3))[0], np.array([1, 3])) + assert_equal(find_peaks(x, height=(2, 3))[0], np.array([3])) + + def test_threshold_condition(self): + """ + Test threshold condition for peaks. + """ + x = (0, 2, 1, 4, -1) + peaks, props = find_peaks(x, threshold=(None, None)) + assert_equal(peaks, np.array([1, 3])) + assert_equal(props['left_thresholds'], np.array([2, 3])) + assert_equal(props['right_thresholds'], np.array([1, 5])) + assert_equal(find_peaks(x, threshold=2)[0], np.array([3])) + assert_equal(find_peaks(x, threshold=3.5)[0], np.array([])) + assert_equal(find_peaks(x, threshold=(None, 5))[0], np.array([1, 3])) + assert_equal(find_peaks(x, threshold=(None, 4))[0], np.array([1])) + assert_equal(find_peaks(x, threshold=(2, 4))[0], np.array([])) + + def test_distance_condition(self): + """ + Test distance condition for peaks. + """ + # Peaks of different height with constant distance 3 + peaks_all = np.arange(1, 21, 3) + x = np.zeros(21) + x[peaks_all] += np.linspace(1, 2, peaks_all.size) + + # Test if peaks with "minimal" distance are still selected (distance = 3) + assert_equal(find_peaks(x, distance=3)[0], peaks_all) + + # Select every second peak (distance > 3) + peaks_subset = find_peaks(x, distance=3.0001)[0] + # Test if peaks_subset is subset of peaks_all + assert_( + np.setdiff1d(peaks_subset, peaks_all, assume_unique=True).size == 0 + ) + # Test if every second peak was removed + assert_equal(np.diff(peaks_subset), 6) + + # Test priority of peak removal + x = [-2, 1, -1, 0, -3] + peaks_subset = find_peaks(x, distance=10)[0] # use distance > x size + assert_(peaks_subset.size == 1 and peaks_subset[0] == 1) + + def test_prominence_condition(self): + """ + Test prominence condition for peaks. + """ + x = np.linspace(0, 10, 100) + peaks_true = np.arange(1, 99, 2) + offset = np.linspace(1, 10, peaks_true.size) + x[peaks_true] += offset + prominences = x[peaks_true] - x[peaks_true + 1] + interval = (3, 9) + keep = np.nonzero( + (interval[0] <= prominences) & (prominences <= interval[1])) + + peaks_calc, properties = find_peaks(x, prominence=interval) + assert_equal(peaks_calc, peaks_true[keep]) + assert_equal(properties['prominences'], prominences[keep]) + assert_equal(properties['left_bases'], 0) + assert_equal(properties['right_bases'], peaks_true[keep] + 1) + + def test_width_condition(self): + """ + Test width condition for peaks. + """ + x = np.array([1, 0, 1, 2, 1, 0, -1, 4, 0]) + peaks, props = find_peaks(x, width=(None, 2), rel_height=0.75) + assert_equal(peaks.size, 1) + assert_equal(peaks, 7) + assert_allclose(props['widths'], 1.35) + assert_allclose(props['width_heights'], 1.) + assert_allclose(props['left_ips'], 6.4) + assert_allclose(props['right_ips'], 7.75) + + def test_properties(self): + """ + Test returned properties. + """ + open_interval = (None, None) + x = [0, 1, 0, 2, 1.5, 0, 3, 0, 5, 9] + peaks, props = find_peaks(x, + height=open_interval, threshold=open_interval, + prominence=open_interval, width=open_interval) + assert_(len(props) == len(self.property_keys)) + for key in self.property_keys: + assert_(peaks.size == props[key].size) + + def test_raises(self): + """ + Test exceptions raised by function. + """ + with raises(ValueError, match="1D array"): + find_peaks(np.array(1)) + with raises(ValueError, match="1D array"): + find_peaks(np.ones((2, 2))) + with raises(ValueError, match="distance"): + find_peaks(np.arange(10), distance=-1) + + @pytest.mark.filterwarnings("ignore:some peaks have a prominence of 0", + "ignore:some peaks have a width of 0") + def test_wlen_smaller_plateau(self): + """ + Test behavior of prominence and width calculation if the given window + length is smaller than a peak's plateau size. + + Regression test for gh-9110. + """ + peaks, props = find_peaks([0, 1, 1, 1, 0], prominence=(None, None), + width=(None, None), wlen=2) + assert_equal(peaks, 2) + assert_equal(props["prominences"], 0) + assert_equal(props["widths"], 0) + assert_equal(props["width_heights"], 1) + for key in ("left_bases", "right_bases", "left_ips", "right_ips"): + assert_equal(props[key], peaks) + + +class TestFindPeaksCwt(object): + + def test_find_peaks_exact(self): + """ + Generate a series of gaussians and attempt to find the peak locations. + """ + sigmas = [5.0, 3.0, 10.0, 20.0, 10.0, 50.0] + num_points = 500 + test_data, act_locs = _gen_gaussians_even(sigmas, num_points) + widths = np.arange(0.1, max(sigmas)) + found_locs = find_peaks_cwt(test_data, widths, gap_thresh=2, min_snr=0, + min_length=None) + np.testing.assert_array_equal(found_locs, act_locs, + "Found maximum locations did not equal those expected") + + def test_find_peaks_withnoise(self): + """ + Verify that peak locations are (approximately) found + for a series of gaussians with added noise. + """ + sigmas = [5.0, 3.0, 10.0, 20.0, 10.0, 50.0] + num_points = 500 + test_data, act_locs = _gen_gaussians_even(sigmas, num_points) + widths = np.arange(0.1, max(sigmas)) + noise_amp = 0.07 + np.random.seed(18181911) + test_data += (np.random.rand(num_points) - 0.5)*(2*noise_amp) + found_locs = find_peaks_cwt(test_data, widths, min_length=15, + gap_thresh=1, min_snr=noise_amp / 5) + + np.testing.assert_equal(len(found_locs), len(act_locs), 'Different number' + + 'of peaks found than expected') + diffs = np.abs(found_locs - act_locs) + max_diffs = np.array(sigmas) / 5 + np.testing.assert_array_less(diffs, max_diffs, 'Maximum location differed' + + 'by more than %s' % (max_diffs)) + + def test_find_peaks_nopeak(self): + """ + Verify that no peak is found in + data that's just noise. + """ + noise_amp = 1.0 + num_points = 100 + np.random.seed(181819141) + test_data = (np.random.rand(num_points) - 0.5)*(2*noise_amp) + widths = np.arange(10, 50) + found_locs = find_peaks_cwt(test_data, widths, min_snr=5, noise_perc=30) + np.testing.assert_equal(len(found_locs), 0) diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_peak_finding.pyc b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_peak_finding.pyc new file mode 100644 index 0000000..bcfa05a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_peak_finding.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_savitzky_golay.py b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_savitzky_golay.py new file mode 100644 index 0000000..868f67a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_savitzky_golay.py @@ -0,0 +1,291 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import (assert_allclose, assert_equal, + assert_almost_equal, assert_array_equal, + assert_array_almost_equal) + +from scipy.ndimage import convolve1d + +from scipy.signal import savgol_coeffs, savgol_filter +from scipy.signal._savitzky_golay import _polyder + + +def check_polyder(p, m, expected): + dp = _polyder(p, m) + assert_array_equal(dp, expected) + + +def test_polyder(): + cases = [ + ([5], 0, [5]), + ([5], 1, [0]), + ([3, 2, 1], 0, [3, 2, 1]), + ([3, 2, 1], 1, [6, 2]), + ([3, 2, 1], 2, [6]), + ([3, 2, 1], 3, [0]), + ([[3, 2, 1], [5, 6, 7]], 0, [[3, 2, 1], [5, 6, 7]]), + ([[3, 2, 1], [5, 6, 7]], 1, [[6, 2], [10, 6]]), + ([[3, 2, 1], [5, 6, 7]], 2, [[6], [10]]), + ([[3, 2, 1], [5, 6, 7]], 3, [[0], [0]]), + ] + for p, m, expected in cases: + check_polyder(np.array(p).T, m, np.array(expected).T) + + +#-------------------------------------------------------------------- +# savgol_coeffs tests +#-------------------------------------------------------------------- + +def alt_sg_coeffs(window_length, polyorder, pos): + """This is an alternative implementation of the SG coefficients. + + It uses numpy.polyfit and numpy.polyval. The results should be + equivalent to those of savgol_coeffs(), but this implementation + is slower. + + window_length should be odd. + + """ + if pos is None: + pos = window_length // 2 + t = np.arange(window_length) + unit = (t == pos).astype(int) + h = np.polyval(np.polyfit(t, unit, polyorder), t) + return h + + +def test_sg_coeffs_trivial(): + # Test a trivial case of savgol_coeffs: polyorder = window_length - 1 + h = savgol_coeffs(1, 0) + assert_allclose(h, [1]) + + h = savgol_coeffs(3, 2) + assert_allclose(h, [0, 1, 0], atol=1e-10) + + h = savgol_coeffs(5, 4) + assert_allclose(h, [0, 0, 1, 0, 0], atol=1e-10) + + h = savgol_coeffs(5, 4, pos=1) + assert_allclose(h, [0, 0, 0, 1, 0], atol=1e-10) + + h = savgol_coeffs(5, 4, pos=1, use='dot') + assert_allclose(h, [0, 1, 0, 0, 0], atol=1e-10) + + +def compare_coeffs_to_alt(window_length, order): + # For the given window_length and order, compare the results + # of savgol_coeffs and alt_sg_coeffs for pos from 0 to window_length - 1. + # Also include pos=None. + for pos in [None] + list(range(window_length)): + h1 = savgol_coeffs(window_length, order, pos=pos, use='dot') + h2 = alt_sg_coeffs(window_length, order, pos=pos) + assert_allclose(h1, h2, atol=1e-10, + err_msg=("window_length = %d, order = %d, pos = %s" % + (window_length, order, pos))) + + +def test_sg_coeffs_compare(): + # Compare savgol_coeffs() to alt_sg_coeffs(). + for window_length in range(1, 8, 2): + for order in range(window_length): + compare_coeffs_to_alt(window_length, order) + + +def test_sg_coeffs_exact(): + polyorder = 4 + window_length = 9 + halflen = window_length // 2 + + x = np.linspace(0, 21, 43) + delta = x[1] - x[0] + + # The data is a cubic polynomial. We'll use an order 4 + # SG filter, so the filtered values should equal the input data + # (except within half window_length of the edges). + y = 0.5 * x ** 3 - x + h = savgol_coeffs(window_length, polyorder) + y0 = convolve1d(y, h) + assert_allclose(y0[halflen:-halflen], y[halflen:-halflen]) + + # Check the same input, but use deriv=1. dy is the exact result. + dy = 1.5 * x ** 2 - 1 + h = savgol_coeffs(window_length, polyorder, deriv=1, delta=delta) + y1 = convolve1d(y, h) + assert_allclose(y1[halflen:-halflen], dy[halflen:-halflen]) + + # Check the same input, but use deriv=2. d2y is the exact result. + d2y = 3.0 * x + h = savgol_coeffs(window_length, polyorder, deriv=2, delta=delta) + y2 = convolve1d(y, h) + assert_allclose(y2[halflen:-halflen], d2y[halflen:-halflen]) + + +def test_sg_coeffs_deriv(): + # The data in `x` is a sampled parabola, so using savgol_coeffs with an + # order 2 or higher polynomial should give exact results. + i = np.array([-2.0, 0.0, 2.0, 4.0, 6.0]) + x = i ** 2 / 4 + dx = i / 2 + d2x = 0.5 * np.ones_like(i) + for pos in range(x.size): + coeffs0 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot') + assert_allclose(coeffs0.dot(x), x[pos], atol=1e-10) + coeffs1 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot', deriv=1) + assert_allclose(coeffs1.dot(x), dx[pos], atol=1e-10) + coeffs2 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot', deriv=2) + assert_allclose(coeffs2.dot(x), d2x[pos], atol=1e-10) + + +def test_sg_coeffs_large(): + # Test that for large values of window_length and polyorder the array of + # coefficients returned is symmetric. The aim is to ensure that + # no potential numeric overflow occurs. + coeffs0 = savgol_coeffs(31, 9) + assert_array_almost_equal(coeffs0, coeffs0[::-1]) + coeffs1 = savgol_coeffs(31, 9, deriv=1) + assert_array_almost_equal(coeffs1, -coeffs1[::-1]) + + +#-------------------------------------------------------------------- +# savgol_filter tests +#-------------------------------------------------------------------- + + +def test_sg_filter_trivial(): + """ Test some trivial edge cases for savgol_filter().""" + x = np.array([1.0]) + y = savgol_filter(x, 1, 0) + assert_equal(y, [1.0]) + + # Input is a single value. With a window length of 3 and polyorder 1, + # the value in y is from the straight-line fit of (-1,0), (0,3) and + # (1, 0) at 0. This is just the average of the three values, hence 1.0. + x = np.array([3.0]) + y = savgol_filter(x, 3, 1, mode='constant') + assert_almost_equal(y, [1.0], decimal=15) + + x = np.array([3.0]) + y = savgol_filter(x, 3, 1, mode='nearest') + assert_almost_equal(y, [3.0], decimal=15) + + x = np.array([1.0] * 3) + y = savgol_filter(x, 3, 1, mode='wrap') + assert_almost_equal(y, [1.0, 1.0, 1.0], decimal=15) + + +def test_sg_filter_basic(): + # Some basic test cases for savgol_filter(). + x = np.array([1.0, 2.0, 1.0]) + y = savgol_filter(x, 3, 1, mode='constant') + assert_allclose(y, [1.0, 4.0 / 3, 1.0]) + + y = savgol_filter(x, 3, 1, mode='mirror') + assert_allclose(y, [5.0 / 3, 4.0 / 3, 5.0 / 3]) + + y = savgol_filter(x, 3, 1, mode='wrap') + assert_allclose(y, [4.0 / 3, 4.0 / 3, 4.0 / 3]) + + +def test_sg_filter_2d(): + x = np.array([[1.0, 2.0, 1.0], + [2.0, 4.0, 2.0]]) + expected = np.array([[1.0, 4.0 / 3, 1.0], + [2.0, 8.0 / 3, 2.0]]) + y = savgol_filter(x, 3, 1, mode='constant') + assert_allclose(y, expected) + + y = savgol_filter(x.T, 3, 1, mode='constant', axis=0) + assert_allclose(y, expected.T) + + +def test_sg_filter_interp_edges(): + # Another test with low degree polynomial data, for which we can easily + # give the exact results. In this test, we use mode='interp', so + # savgol_filter should match the exact solution for the entire data set, + # including the edges. + t = np.linspace(-5, 5, 21) + delta = t[1] - t[0] + # Polynomial test data. + x = np.array([t, + 3 * t ** 2, + t ** 3 - t]) + dx = np.array([np.ones_like(t), + 6 * t, + 3 * t ** 2 - 1.0]) + d2x = np.array([np.zeros_like(t), + 6 * np.ones_like(t), + 6 * t]) + + window_length = 7 + + y = savgol_filter(x, window_length, 3, axis=-1, mode='interp') + assert_allclose(y, x, atol=1e-12) + + y1 = savgol_filter(x, window_length, 3, axis=-1, mode='interp', + deriv=1, delta=delta) + assert_allclose(y1, dx, atol=1e-12) + + y2 = savgol_filter(x, window_length, 3, axis=-1, mode='interp', + deriv=2, delta=delta) + assert_allclose(y2, d2x, atol=1e-12) + + # Transpose everything, and test again with axis=0. + + x = x.T + dx = dx.T + d2x = d2x.T + + y = savgol_filter(x, window_length, 3, axis=0, mode='interp') + assert_allclose(y, x, atol=1e-12) + + y1 = savgol_filter(x, window_length, 3, axis=0, mode='interp', + deriv=1, delta=delta) + assert_allclose(y1, dx, atol=1e-12) + + y2 = savgol_filter(x, window_length, 3, axis=0, mode='interp', + deriv=2, delta=delta) + assert_allclose(y2, d2x, atol=1e-12) + + +def test_sg_filter_interp_edges_3d(): + # Test mode='interp' with a 3-D array. + t = np.linspace(-5, 5, 21) + delta = t[1] - t[0] + x1 = np.array([t, -t]) + x2 = np.array([t ** 2, 3 * t ** 2 + 5]) + x3 = np.array([t ** 3, 2 * t ** 3 + t ** 2 - 0.5 * t]) + dx1 = np.array([np.ones_like(t), -np.ones_like(t)]) + dx2 = np.array([2 * t, 6 * t]) + dx3 = np.array([3 * t ** 2, 6 * t ** 2 + 2 * t - 0.5]) + + # z has shape (3, 2, 21) + z = np.array([x1, x2, x3]) + dz = np.array([dx1, dx2, dx3]) + + y = savgol_filter(z, 7, 3, axis=-1, mode='interp', delta=delta) + assert_allclose(y, z, atol=1e-10) + + dy = savgol_filter(z, 7, 3, axis=-1, mode='interp', deriv=1, delta=delta) + assert_allclose(dy, dz, atol=1e-10) + + # z has shape (3, 21, 2) + z = np.array([x1.T, x2.T, x3.T]) + dz = np.array([dx1.T, dx2.T, dx3.T]) + + y = savgol_filter(z, 7, 3, axis=1, mode='interp', delta=delta) + assert_allclose(y, z, atol=1e-10) + + dy = savgol_filter(z, 7, 3, axis=1, mode='interp', deriv=1, delta=delta) + assert_allclose(dy, dz, atol=1e-10) + + # z has shape (21, 3, 2) + z = z.swapaxes(0, 1).copy() + dz = dz.swapaxes(0, 1).copy() + + y = savgol_filter(z, 7, 3, axis=0, mode='interp', delta=delta) + assert_allclose(y, z, atol=1e-10) + + dy = savgol_filter(z, 7, 3, axis=0, mode='interp', deriv=1, delta=delta) + assert_allclose(dy, dz, atol=1e-10) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_savitzky_golay.pyc b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_savitzky_golay.pyc new file mode 100644 index 0000000..1e37d62 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_savitzky_golay.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_signaltools.py b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_signaltools.py new file mode 100644 index 0000000..d40fc95 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_signaltools.py @@ -0,0 +1,2654 @@ +# -*- coding: utf-8 -*- +from __future__ import division, print_function, absolute_import + +import sys + +from decimal import Decimal +from itertools import product +import warnings + +import pytest +from pytest import raises as assert_raises +from numpy.testing import ( + assert_equal, + assert_almost_equal, assert_array_equal, assert_array_almost_equal, + assert_allclose, assert_, assert_warns, assert_array_less) +from scipy._lib._numpy_compat import suppress_warnings +from numpy import array, arange +import numpy as np + +from scipy.ndimage.filters import correlate1d +from scipy.optimize import fmin +from scipy import signal +from scipy.signal import ( + correlate, convolve, convolve2d, fftconvolve, choose_conv_method, + hilbert, hilbert2, lfilter, lfilter_zi, filtfilt, butter, zpk2tf, zpk2sos, + invres, invresz, vectorstrength, lfiltic, tf2sos, sosfilt, sosfiltfilt, + sosfilt_zi, tf2zpk, BadCoefficients) +from scipy.signal.windows import hann +from scipy.signal.signaltools import _filtfilt_gust + + +if sys.version_info.major >= 3 and sys.version_info.minor >= 5: + from math import gcd +else: + from fractions import gcd + + +class _TestConvolve(object): + + def test_basic(self): + a = [3, 4, 5, 6, 5, 4] + b = [1, 2, 3] + c = convolve(a, b) + assert_array_equal(c, array([3, 10, 22, 28, 32, 32, 23, 12])) + + def test_same(self): + a = [3, 4, 5] + b = [1, 2, 3, 4] + c = convolve(a, b, mode="same") + assert_array_equal(c, array([10, 22, 34])) + + def test_same_eq(self): + a = [3, 4, 5] + b = [1, 2, 3] + c = convolve(a, b, mode="same") + assert_array_equal(c, array([10, 22, 22])) + + def test_complex(self): + x = array([1 + 1j, 2 + 1j, 3 + 1j]) + y = array([1 + 1j, 2 + 1j]) + z = convolve(x, y) + assert_array_equal(z, array([2j, 2 + 6j, 5 + 8j, 5 + 5j])) + + def test_zero_rank(self): + a = 1289 + b = 4567 + c = convolve(a, b) + assert_equal(c, a * b) + + def test_single_element(self): + a = array([4967]) + b = array([3920]) + c = convolve(a, b) + assert_equal(c, a * b) + + def test_2d_arrays(self): + a = [[1, 2, 3], [3, 4, 5]] + b = [[2, 3, 4], [4, 5, 6]] + c = convolve(a, b) + d = array([[2, 7, 16, 17, 12], + [10, 30, 62, 58, 38], + [12, 31, 58, 49, 30]]) + assert_array_equal(c, d) + + def test_input_swapping(self): + small = arange(8).reshape(2, 2, 2) + big = 1j * arange(27).reshape(3, 3, 3) + big += arange(27)[::-1].reshape(3, 3, 3) + + out_array = array( + [[[0 + 0j, 26 + 0j, 25 + 1j, 24 + 2j], + [52 + 0j, 151 + 5j, 145 + 11j, 93 + 11j], + [46 + 6j, 133 + 23j, 127 + 29j, 81 + 23j], + [40 + 12j, 98 + 32j, 93 + 37j, 54 + 24j]], + + [[104 + 0j, 247 + 13j, 237 + 23j, 135 + 21j], + [282 + 30j, 632 + 96j, 604 + 124j, 330 + 86j], + [246 + 66j, 548 + 180j, 520 + 208j, 282 + 134j], + [142 + 66j, 307 + 161j, 289 + 179j, 153 + 107j]], + + [[68 + 36j, 157 + 103j, 147 + 113j, 81 + 75j], + [174 + 138j, 380 + 348j, 352 + 376j, 186 + 230j], + [138 + 174j, 296 + 432j, 268 + 460j, 138 + 278j], + [70 + 138j, 145 + 323j, 127 + 341j, 63 + 197j]], + + [[32 + 72j, 68 + 166j, 59 + 175j, 30 + 100j], + [68 + 192j, 139 + 433j, 117 + 455j, 57 + 255j], + [38 + 222j, 73 + 499j, 51 + 521j, 21 + 291j], + [12 + 144j, 20 + 318j, 7 + 331j, 0 + 182j]]]) + + assert_array_equal(convolve(small, big, 'full'), out_array) + assert_array_equal(convolve(big, small, 'full'), out_array) + assert_array_equal(convolve(small, big, 'same'), + out_array[1:3, 1:3, 1:3]) + assert_array_equal(convolve(big, small, 'same'), + out_array[0:3, 0:3, 0:3]) + assert_array_equal(convolve(small, big, 'valid'), + out_array[1:3, 1:3, 1:3]) + assert_array_equal(convolve(big, small, 'valid'), + out_array[1:3, 1:3, 1:3]) + + def test_invalid_params(self): + a = [3, 4, 5] + b = [1, 2, 3] + assert_raises(ValueError, convolve, a, b, mode='spam') + assert_raises(ValueError, convolve, a, b, mode='eggs', method='fft') + assert_raises(ValueError, convolve, a, b, mode='ham', method='direct') + assert_raises(ValueError, convolve, a, b, mode='full', method='bacon') + assert_raises(ValueError, convolve, a, b, mode='same', method='bacon') + + +class TestConvolve(_TestConvolve): + + def test_valid_mode2(self): + # See gh-5897 + a = [1, 2, 3, 6, 5, 3] + b = [2, 3, 4, 5, 3, 4, 2, 2, 1] + expected = [70, 78, 73, 65] + + out = convolve(a, b, 'valid') + assert_array_equal(out, expected) + + out = convolve(b, a, 'valid') + assert_array_equal(out, expected) + + a = [1 + 5j, 2 - 1j, 3 + 0j] + b = [2 - 3j, 1 + 0j] + expected = [2 - 3j, 8 - 10j] + + out = convolve(a, b, 'valid') + assert_array_equal(out, expected) + + out = convolve(b, a, 'valid') + assert_array_equal(out, expected) + + def test_same_mode(self): + a = [1, 2, 3, 3, 1, 2] + b = [1, 4, 3, 4, 5, 6, 7, 4, 3, 2, 1, 1, 3] + c = convolve(a, b, 'same') + d = array([57, 61, 63, 57, 45, 36]) + assert_array_equal(c, d) + + def test_invalid_shapes(self): + # By "invalid," we mean that no one + # array has dimensions that are all at + # least as large as the corresponding + # dimensions of the other array. This + # setup should throw a ValueError. + a = np.arange(1, 7).reshape((2, 3)) + b = np.arange(-6, 0).reshape((3, 2)) + + assert_raises(ValueError, convolve, *(a, b), **{'mode': 'valid'}) + assert_raises(ValueError, convolve, *(b, a), **{'mode': 'valid'}) + + def test_convolve_method(self, n=100): + types = sum([t for _, t in np.sctypes.items()], []) + types = {np.dtype(t).name for t in types} + + # These types include 'bool' and all precisions (int8, float32, etc) + # The removed types throw errors in correlate or fftconvolve + for dtype in ['complex256', 'complex192', 'float128', 'float96', + 'str', 'void', 'bytes', 'object', 'unicode', 'string']: + if dtype in types: + types.remove(dtype) + + args = [(t1, t2, mode) for t1 in types for t2 in types + for mode in ['valid', 'full', 'same']] + + # These are random arrays, which means test is much stronger than + # convolving testing by convolving two np.ones arrays + np.random.seed(42) + array_types = {'i': np.random.choice([0, 1], size=n), + 'f': np.random.randn(n)} + array_types['b'] = array_types['u'] = array_types['i'] + array_types['c'] = array_types['f'] + 0.5j*array_types['f'] + + for t1, t2, mode in args: + x1 = array_types[np.dtype(t1).kind].astype(t1) + x2 = array_types[np.dtype(t2).kind].astype(t2) + + results = {key: convolve(x1, x2, method=key, mode=mode) + for key in ['fft', 'direct']} + + assert_equal(results['fft'].dtype, results['direct'].dtype) + + if 'bool' in t1 and 'bool' in t2: + assert_equal(choose_conv_method(x1, x2), 'direct') + continue + + # Found by experiment. Found approx smallest value for (rtol, atol) + # threshold to have tests pass. + if any([t in {'complex64', 'float32'} for t in [t1, t2]]): + kwargs = {'rtol': 1.0e-4, 'atol': 1e-6} + elif 'float16' in [t1, t2]: + # atol is default for np.allclose + kwargs = {'rtol': 1e-3, 'atol': 1e-8} + else: + # defaults for np.allclose (different from assert_allclose) + kwargs = {'rtol': 1e-5, 'atol': 1e-8} + + assert_allclose(results['fft'], results['direct'], **kwargs) + + def test_convolve_method_large_input(self): + # This is really a test that convolving two large integers goes to the + # direct method even if they're in the fft method. + for n in [10, 20, 50, 51, 52, 53, 54, 60, 62]: + z = np.array([2**n], dtype=np.int64) + fft = convolve(z, z, method='fft') + direct = convolve(z, z, method='direct') + + # this is the case when integer precision gets to us + # issue #6076 has more detail, hopefully more tests after resolved + if n < 50: + assert_equal(fft, direct) + assert_equal(fft, 2**(2*n)) + assert_equal(direct, 2**(2*n)) + + def test_mismatched_dims(self): + # Input arrays should have the same number of dimensions + assert_raises(ValueError, convolve, [1], 2, method='direct') + assert_raises(ValueError, convolve, 1, [2], method='direct') + assert_raises(ValueError, convolve, [1], 2, method='fft') + assert_raises(ValueError, convolve, 1, [2], method='fft') + assert_raises(ValueError, convolve, [1], [[2]]) + assert_raises(ValueError, convolve, [3], 2) + + +class _TestConvolve2d(object): + + def test_2d_arrays(self): + a = [[1, 2, 3], [3, 4, 5]] + b = [[2, 3, 4], [4, 5, 6]] + d = array([[2, 7, 16, 17, 12], + [10, 30, 62, 58, 38], + [12, 31, 58, 49, 30]]) + e = convolve2d(a, b) + assert_array_equal(e, d) + + def test_valid_mode(self): + e = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]] + f = [[1, 2, 3], [3, 4, 5]] + h = array([[62, 80, 98, 116, 134]]) + + g = convolve2d(e, f, 'valid') + assert_array_equal(g, h) + + # See gh-5897 + g = convolve2d(f, e, 'valid') + assert_array_equal(g, h) + + def test_valid_mode_complx(self): + e = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]] + f = np.array([[1, 2, 3], [3, 4, 5]], dtype=complex) + 1j + h = array([[62.+24.j, 80.+30.j, 98.+36.j, 116.+42.j, 134.+48.j]]) + + g = convolve2d(e, f, 'valid') + assert_array_almost_equal(g, h) + + # See gh-5897 + g = convolve2d(f, e, 'valid') + assert_array_equal(g, h) + + def test_fillvalue(self): + a = [[1, 2, 3], [3, 4, 5]] + b = [[2, 3, 4], [4, 5, 6]] + fillval = 1 + c = convolve2d(a, b, 'full', 'fill', fillval) + d = array([[24, 26, 31, 34, 32], + [28, 40, 62, 64, 52], + [32, 46, 67, 62, 48]]) + assert_array_equal(c, d) + + def test_fillvalue_deprecations(self): + # Deprecated 2017-07, scipy version 1.0.0 + with suppress_warnings() as sup: + sup.filter(np.ComplexWarning, "Casting complex values to real") + r = sup.record(DeprecationWarning, "could not cast `fillvalue`") + convolve2d([[1]], [[1, 2]], fillvalue=1j) + assert_(len(r) == 1) + warnings.filterwarnings( + "error", message="could not cast `fillvalue`", + category=DeprecationWarning) + assert_raises(DeprecationWarning, convolve2d, [[1]], [[1, 2]], + fillvalue=1j) + + with suppress_warnings(): + warnings.filterwarnings( + "always", message="`fillvalue` must be scalar or an array ", + category=DeprecationWarning) + assert_warns(DeprecationWarning, convolve2d, [[1]], [[1, 2]], + fillvalue=[1, 2]) + warnings.filterwarnings( + "error", message="`fillvalue` must be scalar or an array ", + category=DeprecationWarning) + assert_raises(DeprecationWarning, convolve2d, [[1]], [[1, 2]], + fillvalue=[1, 2]) + + def test_fillvalue_empty(self): + # Check that fillvalue being empty raises an error: + assert_raises(ValueError, convolve2d, [[1]], [[1, 2]], + fillvalue=[]) + + def test_wrap_boundary(self): + a = [[1, 2, 3], [3, 4, 5]] + b = [[2, 3, 4], [4, 5, 6]] + c = convolve2d(a, b, 'full', 'wrap') + d = array([[80, 80, 74, 80, 80], + [68, 68, 62, 68, 68], + [80, 80, 74, 80, 80]]) + assert_array_equal(c, d) + + def test_sym_boundary(self): + a = [[1, 2, 3], [3, 4, 5]] + b = [[2, 3, 4], [4, 5, 6]] + c = convolve2d(a, b, 'full', 'symm') + d = array([[34, 30, 44, 62, 66], + [52, 48, 62, 80, 84], + [82, 78, 92, 110, 114]]) + assert_array_equal(c, d) + + def test_invalid_shapes(self): + # By "invalid," we mean that no one + # array has dimensions that are all at + # least as large as the corresponding + # dimensions of the other array. This + # setup should throw a ValueError. + a = np.arange(1, 7).reshape((2, 3)) + b = np.arange(-6, 0).reshape((3, 2)) + + assert_raises(ValueError, convolve2d, *(a, b), **{'mode': 'valid'}) + assert_raises(ValueError, convolve2d, *(b, a), **{'mode': 'valid'}) + + +class TestConvolve2d(_TestConvolve2d): + + def test_same_mode(self): + e = [[1, 2, 3], [3, 4, 5]] + f = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]] + g = convolve2d(e, f, 'same') + h = array([[22, 28, 34], + [80, 98, 116]]) + assert_array_equal(g, h) + + def test_valid_mode2(self): + # See gh-5897 + e = [[1, 2, 3], [3, 4, 5]] + f = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]] + expected = [[62, 80, 98, 116, 134]] + + out = convolve2d(e, f, 'valid') + assert_array_equal(out, expected) + + out = convolve2d(f, e, 'valid') + assert_array_equal(out, expected) + + e = [[1 + 1j, 2 - 3j], [3 + 1j, 4 + 0j]] + f = [[2 - 1j, 3 + 2j, 4 + 0j], [4 - 0j, 5 + 1j, 6 - 3j]] + expected = [[27 - 1j, 46. + 2j]] + + out = convolve2d(e, f, 'valid') + assert_array_equal(out, expected) + + # See gh-5897 + out = convolve2d(f, e, 'valid') + assert_array_equal(out, expected) + + def test_consistency_convolve_funcs(self): + # Compare np.convolve, signal.convolve, signal.convolve2d + a = np.arange(5) + b = np.array([3.2, 1.4, 3]) + for mode in ['full', 'valid', 'same']: + assert_almost_equal(np.convolve(a, b, mode=mode), + signal.convolve(a, b, mode=mode)) + assert_almost_equal(np.squeeze( + signal.convolve2d([a], [b], mode=mode)), + signal.convolve(a, b, mode=mode)) + + def test_invalid_dims(self): + assert_raises(ValueError, convolve2d, 3, 4) + assert_raises(ValueError, convolve2d, [3], [4]) + assert_raises(ValueError, convolve2d, [[[3]]], [[[4]]]) + + +class TestFFTConvolve(object): + + @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) + def test_real(self, axes): + a = array([1, 2, 3]) + expected = array([1, 4, 10, 12, 9.]) + + if axes == '': + out = fftconvolve(a, a) + else: + out = fftconvolve(a, a, axes=axes) + + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', [1, [1], -1, [-1]]) + def test_real_axes(self, axes): + a = array([1, 2, 3]) + expected = array([1, 4, 10, 12, 9.]) + + a = np.tile(a, [2, 1]) + expected = np.tile(expected, [2, 1]) + + out = fftconvolve(a, a, axes=axes) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) + def test_complex(self, axes): + a = array([1 + 1j, 2 + 2j, 3 + 3j]) + expected = array([0 + 2j, 0 + 8j, 0 + 20j, 0 + 24j, 0 + 18j]) + + if axes == '': + out = fftconvolve(a, a) + else: + out = fftconvolve(a, a, axes=axes) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', [1, [1], -1, [-1]]) + def test_complex_axes(self, axes): + a = array([1 + 1j, 2 + 2j, 3 + 3j]) + expected = array([0 + 2j, 0 + 8j, 0 + 20j, 0 + 24j, 0 + 18j]) + + a = np.tile(a, [2, 1]) + expected = np.tile(expected, [2, 1]) + + out = fftconvolve(a, a, axes=axes) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', ['', + None, + [0, 1], + [1, 0], + [0, -1], + [-1, 0], + [-2, 1], + [1, -2], + [-2, -1], + [-1, -2]]) + def test_2d_real_same(self, axes): + a = array([[1, 2, 3], + [4, 5, 6]]) + expected = array([[1, 4, 10, 12, 9], + [8, 26, 56, 54, 36], + [16, 40, 73, 60, 36]]) + + if axes == '': + out = fftconvolve(a, a) + else: + out = fftconvolve(a, a, axes=axes) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', [[1, 2], + [2, 1], + [1, -1], + [-1, 1], + [-2, 2], + [2, -2], + [-2, -1], + [-1, -2]]) + def test_2d_real_same_axes(self, axes): + a = array([[1, 2, 3], + [4, 5, 6]]) + expected = array([[1, 4, 10, 12, 9], + [8, 26, 56, 54, 36], + [16, 40, 73, 60, 36]]) + + a = np.tile(a, [2, 1, 1]) + expected = np.tile(expected, [2, 1, 1]) + + out = fftconvolve(a, a, axes=axes) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', ['', + None, + [0, 1], + [1, 0], + [0, -1], + [-1, 0], + [-2, 1], + [1, -2], + [-2, -1], + [-1, -2]]) + def test_2d_complex_same(self, axes): + a = array([[1 + 2j, 3 + 4j, 5 + 6j], + [2 + 1j, 4 + 3j, 6 + 5j]]) + expected = array([ + [-3 + 4j, -10 + 20j, -21 + 56j, -18 + 76j, -11 + 60j], + [10j, 44j, 118j, 156j, 122j], + [3 + 4j, 10 + 20j, 21 + 56j, 18 + 76j, 11 + 60j] + ]) + + if axes == '': + out = fftconvolve(a, a) + else: + out = fftconvolve(a, a, axes=axes) + + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', [[1, 2], + [2, 1], + [1, -1], + [-1, 1], + [-2, 2], + [2, -2], + [-2, -1], + [-1, -2]]) + def test_2d_complex_same_axes(self, axes): + a = array([[1 + 2j, 3 + 4j, 5 + 6j], + [2 + 1j, 4 + 3j, 6 + 5j]]) + expected = array([ + [-3 + 4j, -10 + 20j, -21 + 56j, -18 + 76j, -11 + 60j], + [10j, 44j, 118j, 156j, 122j], + [3 + 4j, 10 + 20j, 21 + 56j, 18 + 76j, 11 + 60j] + ]) + + a = np.tile(a, [2, 1, 1]) + expected = np.tile(expected, [2, 1, 1]) + + out = fftconvolve(a, a, axes=axes) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) + def test_real_same_mode(self, axes): + a = array([1, 2, 3]) + b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) + expected_1 = array([35., 41., 47.]) + expected_2 = array([9., 20., 25., 35., 41., 47., 39., 28., 2.]) + + if axes == '': + out = fftconvolve(a, b, 'same') + else: + out = fftconvolve(a, b, 'same', axes=axes) + assert_array_almost_equal(out, expected_1) + + if axes == '': + out = fftconvolve(b, a, 'same') + else: + out = fftconvolve(b, a, 'same', axes=axes) + assert_array_almost_equal(out, expected_2) + + @pytest.mark.parametrize('axes', [1, -1, [1], [-1]]) + def test_real_same_mode_axes(self, axes): + a = array([1, 2, 3]) + b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) + expected_1 = array([35., 41., 47.]) + expected_2 = array([9., 20., 25., 35., 41., 47., 39., 28., 2.]) + + a = np.tile(a, [2, 1]) + b = np.tile(b, [2, 1]) + expected_1 = np.tile(expected_1, [2, 1]) + expected_2 = np.tile(expected_2, [2, 1]) + + out = fftconvolve(a, b, 'same', axes=axes) + assert_array_almost_equal(out, expected_1) + + out = fftconvolve(b, a, 'same', axes=axes) + assert_array_almost_equal(out, expected_2) + + @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) + def test_valid_mode_real(self, axes): + # See gh-5897 + a = array([3, 2, 1]) + b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) + expected = array([24., 31., 41., 43., 49., 25., 12.]) + + if axes == '': + out = fftconvolve(a, b, 'valid') + else: + out = fftconvolve(a, b, 'valid', axes=axes) + assert_array_almost_equal(out, expected) + + if axes == '': + out = fftconvolve(b, a, 'valid') + else: + out = fftconvolve(b, a, 'valid', axes=axes) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', [1, [1]]) + def test_valid_mode_real_axes(self, axes): + # See gh-5897 + a = array([3, 2, 1]) + b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) + expected = array([24., 31., 41., 43., 49., 25., 12.]) + + a = np.tile(a, [2, 1]) + b = np.tile(b, [2, 1]) + expected = np.tile(expected, [2, 1]) + + out = fftconvolve(a, b, 'valid', axes=axes) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) + def test_valid_mode_complex(self, axes): + a = array([3 - 1j, 2 + 7j, 1 + 0j]) + b = array([3 + 2j, 3 - 3j, 5 + 0j, 6 - 1j, 8 + 0j]) + expected = array([45. + 12.j, 30. + 23.j, 48 + 32.j]) + + if axes == '': + out = fftconvolve(a, b, 'valid') + else: + out = fftconvolve(a, b, 'valid', axes=axes) + assert_array_almost_equal(out, expected) + + if axes == '': + out = fftconvolve(b, a, 'valid') + else: + out = fftconvolve(b, a, 'valid', axes=axes) + assert_array_almost_equal(out, expected) + + @pytest.mark.parametrize('axes', [1, [1], -1, [-1]]) + def test_valid_mode_complex_axes(self, axes): + a = array([3 - 1j, 2 + 7j, 1 + 0j]) + b = array([3 + 2j, 3 - 3j, 5 + 0j, 6 - 1j, 8 + 0j]) + expected = array([45. + 12.j, 30. + 23.j, 48 + 32.j]) + + a = np.tile(a, [2, 1]) + b = np.tile(b, [2, 1]) + expected = np.tile(expected, [2, 1]) + + out = fftconvolve(a, b, 'valid', axes=axes) + assert_array_almost_equal(out, expected) + + out = fftconvolve(b, a, 'valid', axes=axes) + assert_array_almost_equal(out, expected) + + def test_empty(self): + # Regression test for #1745: crashes with 0-length input. + assert_(fftconvolve([], []).size == 0) + assert_(fftconvolve([5, 6], []).size == 0) + assert_(fftconvolve([], [7]).size == 0) + + def test_zero_rank(self): + a = array(4967) + b = array(3920) + out = fftconvolve(a, b) + assert_equal(out, a * b) + + def test_single_element(self): + a = array([4967]) + b = array([3920]) + out = fftconvolve(a, b) + assert_equal(out, a * b) + + @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) + def test_random_data(self, axes): + np.random.seed(1234) + a = np.random.rand(1233) + 1j * np.random.rand(1233) + b = np.random.rand(1321) + 1j * np.random.rand(1321) + expected = np.convolve(a, b, 'full') + + if axes == '': + out = fftconvolve(a, b, 'full') + else: + out = fftconvolve(a, b, 'full', axes=axes) + assert_(np.allclose(out, expected, rtol=1e-10)) + + @pytest.mark.parametrize('axes', [1, [1], -1, [-1]]) + def test_random_data_axes(self, axes): + np.random.seed(1234) + a = np.random.rand(1233) + 1j * np.random.rand(1233) + b = np.random.rand(1321) + 1j * np.random.rand(1321) + expected = np.convolve(a, b, 'full') + + a = np.tile(a, [2, 1]) + b = np.tile(b, [2, 1]) + expected = np.tile(expected, [2, 1]) + + out = fftconvolve(a, b, 'full', axes=axes) + assert_(np.allclose(out, expected, rtol=1e-10)) + + @pytest.mark.parametrize('axes', [[1, 4], + [4, 1], + [1, -1], + [-1, 1], + [-4, 4], + [4, -4], + [-4, -1], + [-1, -4]]) + def test_random_data_multidim_axes(self, axes): + np.random.seed(1234) + a = np.random.rand(123, 222) + 1j * np.random.rand(123, 222) + b = np.random.rand(132, 111) + 1j * np.random.rand(132, 111) + expected = convolve2d(a, b, 'full') + + a = a[:, :, None, None, None] + b = b[:, :, None, None, None] + expected = expected[:, :, None, None, None] + + a = np.rollaxis(a.swapaxes(0, 2), 1, 5) + b = np.rollaxis(b.swapaxes(0, 2), 1, 5) + expected = np.rollaxis(expected.swapaxes(0, 2), 1, 5) + + # use 1 for dimension 2 in a and 3 in b to test broadcasting + a = np.tile(a, [2, 1, 3, 1, 1]) + b = np.tile(b, [2, 1, 1, 4, 1]) + expected = np.tile(expected, [2, 1, 3, 4, 1]) + + out = fftconvolve(a, b, 'full', axes=axes) + assert_(np.allclose(out, expected, rtol=1e-10)) + + @pytest.mark.slow + @pytest.mark.parametrize( + 'n', + list(range(1, 100)) + + list(range(1000, 1500)) + + np.random.RandomState(1234).randint(1001, 10000, 5).tolist()) + def test_many_sizes(self, n): + a = np.random.rand(n) + 1j * np.random.rand(n) + b = np.random.rand(n) + 1j * np.random.rand(n) + expected = np.convolve(a, b, 'full') + + out = fftconvolve(a, b, 'full') + assert_allclose(out, expected, atol=1e-10) + + out = fftconvolve(a, b, 'full', axes=[0]) + assert_allclose(out, expected, atol=1e-10) + + def test_invalid_shapes(self): + a = np.arange(1, 7).reshape((2, 3)) + b = np.arange(-6, 0).reshape((3, 2)) + with assert_raises(ValueError, + match="For 'valid' mode, one must be at least " + "as large as the other in every dimension"): + fftconvolve(a, b, mode='valid') + + def test_invalid_shapes_axes(self): + a = np.zeros([5, 6, 2, 1]) + b = np.zeros([5, 6, 3, 1]) + with assert_raises(ValueError, + match=r"incompatible shapes for in1 and in2:" + r" \(5L?, 6L?, 2L?, 1L?\) and" + r" \(5L?, 6L?, 3L?, 1L?\)"): + fftconvolve(a, b, axes=[0, 1]) + + @pytest.mark.parametrize('a,b', + [([1], 2), + (1, [2]), + ([3], [[2]])]) + def test_mismatched_dims(self, a, b): + with assert_raises(ValueError, + match="in1 and in2 should have the same" + " dimensionality"): + fftconvolve(a, b) + + def test_invalid_flags(self): + with assert_raises(ValueError, + match="acceptable mode flags are 'valid'," + " 'same', or 'full'"): + fftconvolve([1], [2], mode='chips') + + with assert_raises(ValueError, + match="when provided, axes cannot be empty"): + fftconvolve([1], [2], axes=[]) + + with assert_raises(ValueError, + match="when given, axes values must be a scalar" + " or vector"): + fftconvolve([1], [2], axes=[[1, 2], [3, 4]]) + + with assert_raises(ValueError, + match="when given, axes values must be integers"): + fftconvolve([1], [2], axes=[1., 2., 3., 4.]) + + with assert_raises(ValueError, + match="axes exceeds dimensionality of input"): + fftconvolve([1], [2], axes=[1]) + + with assert_raises(ValueError, + match="axes exceeds dimensionality of input"): + fftconvolve([1], [2], axes=[-2]) + + with assert_raises(ValueError, + match="all axes must be unique"): + fftconvolve([1], [2], axes=[0, 0]) + + +class TestMedFilt(object): + + def test_basic(self): + f = [[50, 50, 50, 50, 50, 92, 18, 27, 65, 46], + [50, 50, 50, 50, 50, 0, 72, 77, 68, 66], + [50, 50, 50, 50, 50, 46, 47, 19, 64, 77], + [50, 50, 50, 50, 50, 42, 15, 29, 95, 35], + [50, 50, 50, 50, 50, 46, 34, 9, 21, 66], + [70, 97, 28, 68, 78, 77, 61, 58, 71, 42], + [64, 53, 44, 29, 68, 32, 19, 68, 24, 84], + [3, 33, 53, 67, 1, 78, 74, 55, 12, 83], + [7, 11, 46, 70, 60, 47, 24, 43, 61, 26], + [32, 61, 88, 7, 39, 4, 92, 64, 45, 61]] + + d = signal.medfilt(f, [7, 3]) + e = signal.medfilt2d(np.array(f, float), [7, 3]) + assert_array_equal(d, [[0, 50, 50, 50, 42, 15, 15, 18, 27, 0], + [0, 50, 50, 50, 50, 42, 19, 21, 29, 0], + [50, 50, 50, 50, 50, 47, 34, 34, 46, 35], + [50, 50, 50, 50, 50, 50, 42, 47, 64, 42], + [50, 50, 50, 50, 50, 50, 46, 55, 64, 35], + [33, 50, 50, 50, 50, 47, 46, 43, 55, 26], + [32, 50, 50, 50, 50, 47, 46, 45, 55, 26], + [7, 46, 50, 50, 47, 46, 46, 43, 45, 21], + [0, 32, 33, 39, 32, 32, 43, 43, 43, 0], + [0, 7, 11, 7, 4, 4, 19, 19, 24, 0]]) + assert_array_equal(d, e) + + def test_none(self): + # Ticket #1124. Ensure this does not segfault. + signal.medfilt(None) + # Expand on this test to avoid a regression with possible contiguous + # numpy arrays that have odd strides. The stride value below gets + # us into wrong memory if used (but it does not need to be used) + dummy = np.arange(10, dtype=np.float64) + a = dummy[5:6] + a.strides = 16 + assert_(signal.medfilt(a, 1) == 5.) + + def test_refcounting(self): + # Check a refcounting-related crash + a = Decimal(123) + x = np.array([a, a], dtype=object) + if hasattr(sys, 'getrefcount'): + n = 2 * sys.getrefcount(a) + else: + n = 10 + # Shouldn't segfault: + for j in range(n): + signal.medfilt(x) + if hasattr(sys, 'getrefcount'): + assert_(sys.getrefcount(a) < n) + assert_equal(x, [a, a]) + + +class TestWiener(object): + + def test_basic(self): + g = array([[5, 6, 4, 3], + [3, 5, 6, 2], + [2, 3, 5, 6], + [1, 6, 9, 7]], 'd') + h = array([[2.16374269, 3.2222222222, 2.8888888889, 1.6666666667], + [2.666666667, 4.33333333333, 4.44444444444, 2.8888888888], + [2.222222222, 4.4444444444, 5.4444444444, 4.801066874837], + [1.33333333333, 3.92735042735, 6.0712560386, 5.0404040404]]) + assert_array_almost_equal(signal.wiener(g), h, decimal=6) + assert_array_almost_equal(signal.wiener(g, mysize=3), h, decimal=6) + + +class TestResample(object): + + def test_basic(self): + # Some basic tests + + # Regression test for issue #3603. + # window.shape must equal to sig.shape[0] + sig = np.arange(128) + num = 256 + win = signal.get_window(('kaiser', 8.0), 160) + assert_raises(ValueError, signal.resample, sig, num, window=win) + + # Other degenerate conditions + assert_raises(ValueError, signal.resample_poly, sig, 'yo', 1) + assert_raises(ValueError, signal.resample_poly, sig, 1, 0) + + # test for issue #6505 - should not modify window.shape when axis ≠ 0 + sig2 = np.tile(np.arange(160), (2,1)) + signal.resample(sig2, num, axis=-1, window=win) + assert_(win.shape == (160,)) + + def test_fft(self): + # Test FFT-based resampling + self._test_data(method='fft') + + def test_polyphase(self): + # Test polyphase resampling + self._test_data(method='polyphase') + + def test_polyphase_extfilter(self): + # Test external specification of downsampling filter + self._test_data(method='polyphase', ext=True) + + def test_mutable_window(self): + # Test that a mutable window is not modified + impulse = np.zeros(3) + window = np.random.RandomState(0).randn(2) + window_orig = window.copy() + signal.resample_poly(impulse, 5, 1, window=window) + assert_array_equal(window, window_orig) + + def test_output_float32(self): + # Test that float32 inputs yield a float32 output + x = np.arange(10, dtype=np.float32) + h = np.array([1,1,1], dtype=np.float32) + y = signal.resample_poly(x, 1, 2, window=h) + assert_(y.dtype == np.float32) + + def _test_data(self, method, ext=False): + # Test resampling of sinusoids and random noise (1-sec) + rate = 100 + rates_to = [49, 50, 51, 99, 100, 101, 199, 200, 201] + + # Sinusoids, windowed to avoid edge artifacts + t = np.arange(rate) / float(rate) + freqs = np.array((1., 10., 40.))[:, np.newaxis] + x = np.sin(2 * np.pi * freqs * t) * hann(rate) + + for rate_to in rates_to: + t_to = np.arange(rate_to) / float(rate_to) + y_tos = np.sin(2 * np.pi * freqs * t_to) * hann(rate_to) + if method == 'fft': + y_resamps = signal.resample(x, rate_to, axis=-1) + else: + if ext and rate_to != rate: + # Match default window design + g = gcd(rate_to, rate) + up = rate_to // g + down = rate // g + max_rate = max(up, down) + f_c = 1. / max_rate + half_len = 10 * max_rate + window = signal.firwin(2 * half_len + 1, f_c, + window=('kaiser', 5.0)) + polyargs = {'window': window} + else: + polyargs = {} + + y_resamps = signal.resample_poly(x, rate_to, rate, axis=-1, + **polyargs) + + for y_to, y_resamp, freq in zip(y_tos, y_resamps, freqs): + if freq >= 0.5 * rate_to: + y_to.fill(0.) # mostly low-passed away + assert_allclose(y_resamp, y_to, atol=1e-3) + else: + assert_array_equal(y_to.shape, y_resamp.shape) + corr = np.corrcoef(y_to, y_resamp)[0, 1] + assert_(corr > 0.99, msg=(corr, rate, rate_to)) + + # Random data + rng = np.random.RandomState(0) + x = hann(rate) * np.cumsum(rng.randn(rate)) # low-pass, wind + for rate_to in rates_to: + # random data + t_to = np.arange(rate_to) / float(rate_to) + y_to = np.interp(t_to, t, x) + if method == 'fft': + y_resamp = signal.resample(x, rate_to) + else: + y_resamp = signal.resample_poly(x, rate_to, rate) + assert_array_equal(y_to.shape, y_resamp.shape) + corr = np.corrcoef(y_to, y_resamp)[0, 1] + assert_(corr > 0.99, msg=corr) + + # More tests of fft method (Master 0.18.1 fails these) + if method == 'fft': + x1 = np.array([1.+0.j,0.+0.j]) + y1_test = signal.resample(x1,4) + y1_true = np.array([1.+0.j,0.5+0.j,0.+0.j,0.5+0.j]) # upsampling a complex array + assert_allclose(y1_test, y1_true, atol=1e-12) + x2 = np.array([1.,0.5,0.,0.5]) + y2_test = signal.resample(x2,2) # downsampling a real array + y2_true = np.array([1.,0.]) + assert_allclose(y2_test, y2_true, atol=1e-12) + + def test_poly_vs_filtfilt(self): + # Check that up=1.0 gives same answer as filtfilt + slicing + random_state = np.random.RandomState(17) + try_types = (int, np.float32, np.complex64, float, complex) + size = 10000 + down_factors = [2, 11, 79] + + for dtype in try_types: + x = random_state.randn(size).astype(dtype) + if dtype in (np.complex64, np.complex128): + x += 1j * random_state.randn(size) + + # resample_poly assumes zeros outside of signl, whereas filtfilt + # can only constant-pad. Make them equivalent: + x[0] = 0 + x[-1] = 0 + + for down in down_factors: + h = signal.firwin(31, 1. / down, window='hamming') + yf = filtfilt(h, 1.0, x, padtype='constant')[::down] + + # Need to pass convolved version of filter to resample_poly, + # since filtfilt does forward and backward, but resample_poly + # only goes forward + hc = convolve(h, h[::-1]) + y = signal.resample_poly(x, 1, down, window=hc) + assert_allclose(yf, y, atol=1e-7, rtol=1e-7) + + def test_correlate1d(self): + for down in [2, 4]: + for nx in range(1, 40, down): + for nweights in (32, 33): + x = np.random.random((nx,)) + weights = np.random.random((nweights,)) + y_g = correlate1d(x, weights[::-1], mode='constant') + y_s = signal.resample_poly(x, up=1, down=down, window=weights) + assert_allclose(y_g[::down], y_s) + + +class TestCSpline1DEval(object): + + def test_basic(self): + y = array([1, 2, 3, 4, 3, 2, 1, 2, 3.0]) + x = arange(len(y)) + dx = x[1] - x[0] + cj = signal.cspline1d(y) + + x2 = arange(len(y) * 10.0) / 10.0 + y2 = signal.cspline1d_eval(cj, x2, dx=dx, x0=x[0]) + + # make sure interpolated values are on knot points + assert_array_almost_equal(y2[::10], y, decimal=5) + + def test_complex(self): + # create some smoothly varying complex signal to interpolate + x = np.arange(2) + y = np.zeros(x.shape, dtype=np.complex64) + T = 10.0 + f = 1.0 / T + y = np.exp(2.0J * np.pi * f * x) + + # get the cspline transform + cy = signal.cspline1d(y) + + # determine new test x value and interpolate + xnew = np.array([0.5]) + ynew = signal.cspline1d_eval(cy, xnew) + + assert_equal(ynew.dtype, y.dtype) + +class TestOrderFilt(object): + + def test_basic(self): + assert_array_equal(signal.order_filter([1, 2, 3], [1, 0, 1], 1), + [2, 3, 2]) + + +class _TestLinearFilter(object): + def generate(self, shape): + x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape) + return self.convert_dtype(x) + + def convert_dtype(self, arr): + if self.dtype == np.dtype('O'): + arr = np.asarray(arr) + out = np.empty(arr.shape, self.dtype) + iter = np.nditer([arr, out], ['refs_ok','zerosize_ok'], + [['readonly'],['writeonly']]) + for x, y in iter: + y[...] = self.type(x[()]) + return out + else: + return np.array(arr, self.dtype, copy=False) + + def test_rank_1_IIR(self): + x = self.generate((6,)) + b = self.convert_dtype([1, -1]) + a = self.convert_dtype([0.5, -0.5]) + y_r = self.convert_dtype([0, 2, 4, 6, 8, 10.]) + assert_array_almost_equal(lfilter(b, a, x), y_r) + + def test_rank_1_FIR(self): + x = self.generate((6,)) + b = self.convert_dtype([1, 1]) + a = self.convert_dtype([1]) + y_r = self.convert_dtype([0, 1, 3, 5, 7, 9.]) + assert_array_almost_equal(lfilter(b, a, x), y_r) + + def test_rank_1_IIR_init_cond(self): + x = self.generate((6,)) + b = self.convert_dtype([1, 0, -1]) + a = self.convert_dtype([0.5, -0.5]) + zi = self.convert_dtype([1, 2]) + y_r = self.convert_dtype([1, 5, 9, 13, 17, 21]) + zf_r = self.convert_dtype([13, -10]) + y, zf = lfilter(b, a, x, zi=zi) + assert_array_almost_equal(y, y_r) + assert_array_almost_equal(zf, zf_r) + + def test_rank_1_FIR_init_cond(self): + x = self.generate((6,)) + b = self.convert_dtype([1, 1, 1]) + a = self.convert_dtype([1]) + zi = self.convert_dtype([1, 1]) + y_r = self.convert_dtype([1, 2, 3, 6, 9, 12.]) + zf_r = self.convert_dtype([9, 5]) + y, zf = lfilter(b, a, x, zi=zi) + assert_array_almost_equal(y, y_r) + assert_array_almost_equal(zf, zf_r) + + def test_rank_2_IIR_axis_0(self): + x = self.generate((4, 3)) + b = self.convert_dtype([1, -1]) + a = self.convert_dtype([0.5, 0.5]) + y_r2_a0 = self.convert_dtype([[0, 2, 4], [6, 4, 2], [0, 2, 4], + [6, 4, 2]]) + y = lfilter(b, a, x, axis=0) + assert_array_almost_equal(y_r2_a0, y) + + def test_rank_2_IIR_axis_1(self): + x = self.generate((4, 3)) + b = self.convert_dtype([1, -1]) + a = self.convert_dtype([0.5, 0.5]) + y_r2_a1 = self.convert_dtype([[0, 2, 0], [6, -4, 6], [12, -10, 12], + [18, -16, 18]]) + y = lfilter(b, a, x, axis=1) + assert_array_almost_equal(y_r2_a1, y) + + def test_rank_2_IIR_axis_0_init_cond(self): + x = self.generate((4, 3)) + b = self.convert_dtype([1, -1]) + a = self.convert_dtype([0.5, 0.5]) + zi = self.convert_dtype(np.ones((4,1))) + + y_r2_a0_1 = self.convert_dtype([[1, 1, 1], [7, -5, 7], [13, -11, 13], + [19, -17, 19]]) + zf_r = self.convert_dtype([-5, -17, -29, -41])[:, np.newaxis] + y, zf = lfilter(b, a, x, axis=1, zi=zi) + assert_array_almost_equal(y_r2_a0_1, y) + assert_array_almost_equal(zf, zf_r) + + def test_rank_2_IIR_axis_1_init_cond(self): + x = self.generate((4,3)) + b = self.convert_dtype([1, -1]) + a = self.convert_dtype([0.5, 0.5]) + zi = self.convert_dtype(np.ones((1,3))) + + y_r2_a0_0 = self.convert_dtype([[1, 3, 5], [5, 3, 1], + [1, 3, 5], [5, 3, 1]]) + zf_r = self.convert_dtype([[-23, -23, -23]]) + y, zf = lfilter(b, a, x, axis=0, zi=zi) + assert_array_almost_equal(y_r2_a0_0, y) + assert_array_almost_equal(zf, zf_r) + + def test_rank_3_IIR(self): + x = self.generate((4, 3, 2)) + b = self.convert_dtype([1, -1]) + a = self.convert_dtype([0.5, 0.5]) + + for axis in range(x.ndim): + y = lfilter(b, a, x, axis) + y_r = np.apply_along_axis(lambda w: lfilter(b, a, w), axis, x) + assert_array_almost_equal(y, y_r) + + def test_rank_3_IIR_init_cond(self): + x = self.generate((4, 3, 2)) + b = self.convert_dtype([1, -1]) + a = self.convert_dtype([0.5, 0.5]) + + for axis in range(x.ndim): + zi_shape = list(x.shape) + zi_shape[axis] = 1 + zi = self.convert_dtype(np.ones(zi_shape)) + zi1 = self.convert_dtype([1]) + y, zf = lfilter(b, a, x, axis, zi) + lf0 = lambda w: lfilter(b, a, w, zi=zi1)[0] + lf1 = lambda w: lfilter(b, a, w, zi=zi1)[1] + y_r = np.apply_along_axis(lf0, axis, x) + zf_r = np.apply_along_axis(lf1, axis, x) + assert_array_almost_equal(y, y_r) + assert_array_almost_equal(zf, zf_r) + + def test_rank_3_FIR(self): + x = self.generate((4, 3, 2)) + b = self.convert_dtype([1, 0, -1]) + a = self.convert_dtype([1]) + + for axis in range(x.ndim): + y = lfilter(b, a, x, axis) + y_r = np.apply_along_axis(lambda w: lfilter(b, a, w), axis, x) + assert_array_almost_equal(y, y_r) + + def test_rank_3_FIR_init_cond(self): + x = self.generate((4, 3, 2)) + b = self.convert_dtype([1, 0, -1]) + a = self.convert_dtype([1]) + + for axis in range(x.ndim): + zi_shape = list(x.shape) + zi_shape[axis] = 2 + zi = self.convert_dtype(np.ones(zi_shape)) + zi1 = self.convert_dtype([1, 1]) + y, zf = lfilter(b, a, x, axis, zi) + lf0 = lambda w: lfilter(b, a, w, zi=zi1)[0] + lf1 = lambda w: lfilter(b, a, w, zi=zi1)[1] + y_r = np.apply_along_axis(lf0, axis, x) + zf_r = np.apply_along_axis(lf1, axis, x) + assert_array_almost_equal(y, y_r) + assert_array_almost_equal(zf, zf_r) + + def test_zi_pseudobroadcast(self): + x = self.generate((4, 5, 20)) + b,a = signal.butter(8, 0.2, output='ba') + b = self.convert_dtype(b) + a = self.convert_dtype(a) + zi_size = b.shape[0] - 1 + + # lfilter requires x.ndim == zi.ndim exactly. However, zi can have + # length 1 dimensions. + zi_full = self.convert_dtype(np.ones((4, 5, zi_size))) + zi_sing = self.convert_dtype(np.ones((1, 1, zi_size))) + + y_full, zf_full = lfilter(b, a, x, zi=zi_full) + y_sing, zf_sing = lfilter(b, a, x, zi=zi_sing) + + assert_array_almost_equal(y_sing, y_full) + assert_array_almost_equal(zf_full, zf_sing) + + # lfilter does not prepend ones + assert_raises(ValueError, lfilter, b, a, x, -1, np.ones(zi_size)) + + def test_scalar_a(self): + # a can be a scalar. + x = self.generate(6) + b = self.convert_dtype([1, 0, -1]) + a = self.convert_dtype([1]) + y_r = self.convert_dtype([0, 1, 2, 2, 2, 2]) + + y = lfilter(b, a[0], x) + assert_array_almost_equal(y, y_r) + + def test_zi_some_singleton_dims(self): + # lfilter doesn't really broadcast (no prepending of 1's). But does + # do singleton expansion if x and zi have the same ndim. This was + # broken only if a subset of the axes were singletons (gh-4681). + x = self.convert_dtype(np.zeros((3,2,5), 'l')) + b = self.convert_dtype(np.ones(5, 'l')) + a = self.convert_dtype(np.array([1,0,0])) + zi = np.ones((3,1,4), 'l') + zi[1,:,:] *= 2 + zi[2,:,:] *= 3 + zi = self.convert_dtype(zi) + + zf_expected = self.convert_dtype(np.zeros((3,2,4), 'l')) + y_expected = np.zeros((3,2,5), 'l') + y_expected[:,:,:4] = [[[1]], [[2]], [[3]]] + y_expected = self.convert_dtype(y_expected) + + # IIR + y_iir, zf_iir = lfilter(b, a, x, -1, zi) + assert_array_almost_equal(y_iir, y_expected) + assert_array_almost_equal(zf_iir, zf_expected) + + # FIR + y_fir, zf_fir = lfilter(b, a[0], x, -1, zi) + assert_array_almost_equal(y_fir, y_expected) + assert_array_almost_equal(zf_fir, zf_expected) + + def base_bad_size_zi(self, b, a, x, axis, zi): + b = self.convert_dtype(b) + a = self.convert_dtype(a) + x = self.convert_dtype(x) + zi = self.convert_dtype(zi) + assert_raises(ValueError, lfilter, b, a, x, axis, zi) + + def test_bad_size_zi(self): + # rank 1 + x1 = np.arange(6) + self.base_bad_size_zi([1], [1], x1, -1, [1]) + self.base_bad_size_zi([1, 1], [1], x1, -1, [0, 1]) + self.base_bad_size_zi([1, 1], [1], x1, -1, [[0]]) + self.base_bad_size_zi([1, 1], [1], x1, -1, [0, 1, 2]) + self.base_bad_size_zi([1, 1, 1], [1], x1, -1, [[0]]) + self.base_bad_size_zi([1, 1, 1], [1], x1, -1, [0, 1, 2]) + self.base_bad_size_zi([1], [1, 1], x1, -1, [0, 1]) + self.base_bad_size_zi([1], [1, 1], x1, -1, [[0]]) + self.base_bad_size_zi([1], [1, 1], x1, -1, [0, 1, 2]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [[0], [1]]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0, 1, 2]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0, 1, 2, 3]) + self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0]) + self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [[0], [1]]) + self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0, 1, 2]) + self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0, 1, 2, 3]) + + # rank 2 + x2 = np.arange(12).reshape((4,3)) + # for axis=0 zi.shape should == (max(len(a),len(b))-1, 3) + self.base_bad_size_zi([1], [1], x2, 0, [0]) + + # for each of these there are 5 cases tested (in this order): + # 1. not deep enough, right # elements + # 2. too deep, right # elements + # 3. right depth, right # elements, transposed + # 4. right depth, too few elements + # 5. right depth, too many elements + + self.base_bad_size_zi([1, 1], [1], x2, 0, [0,1,2]) + self.base_bad_size_zi([1, 1], [1], x2, 0, [[[0,1,2]]]) + self.base_bad_size_zi([1, 1], [1], x2, 0, [[0], [1], [2]]) + self.base_bad_size_zi([1, 1], [1], x2, 0, [[0,1]]) + self.base_bad_size_zi([1, 1], [1], x2, 0, [[0,1,2,3]]) + + self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [0,1,2,3,4,5]) + self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[[0,1,2],[3,4,5]]]) + self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1],[2,3],[4,5]]) + self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1],[2,3]]) + self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1,2,3],[4,5,6,7]]) + + self.base_bad_size_zi([1], [1, 1], x2, 0, [0,1,2]) + self.base_bad_size_zi([1], [1, 1], x2, 0, [[[0,1,2]]]) + self.base_bad_size_zi([1], [1, 1], x2, 0, [[0], [1], [2]]) + self.base_bad_size_zi([1], [1, 1], x2, 0, [[0,1]]) + self.base_bad_size_zi([1], [1, 1], x2, 0, [[0,1,2,3]]) + + self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [0,1,2,3,4,5]) + self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[[0,1,2],[3,4,5]]]) + self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1],[2,3],[4,5]]) + self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1],[2,3]]) + self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1,2,3],[4,5,6,7]]) + + self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [0,1,2,3,4,5]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[[0,1,2],[3,4,5]]]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1],[2,3],[4,5]]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1],[2,3]]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1,2,3],[4,5,6,7]]) + + # for axis=1 zi.shape should == (4, max(len(a),len(b))-1) + self.base_bad_size_zi([1], [1], x2, 1, [0]) + + self.base_bad_size_zi([1, 1], [1], x2, 1, [0,1,2,3]) + self.base_bad_size_zi([1, 1], [1], x2, 1, [[[0],[1],[2],[3]]]) + self.base_bad_size_zi([1, 1], [1], x2, 1, [[0, 1, 2, 3]]) + self.base_bad_size_zi([1, 1], [1], x2, 1, [[0],[1],[2]]) + self.base_bad_size_zi([1, 1], [1], x2, 1, [[0],[1],[2],[3],[4]]) + + self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [0,1,2,3,4,5,6,7]) + self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]]) + self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1,2,3],[4,5,6,7]]) + self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1],[2,3],[4,5]]) + self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]]) + + self.base_bad_size_zi([1], [1, 1], x2, 1, [0,1,2,3]) + self.base_bad_size_zi([1], [1, 1], x2, 1, [[[0],[1],[2],[3]]]) + self.base_bad_size_zi([1], [1, 1], x2, 1, [[0, 1, 2, 3]]) + self.base_bad_size_zi([1], [1, 1], x2, 1, [[0],[1],[2]]) + self.base_bad_size_zi([1], [1, 1], x2, 1, [[0],[1],[2],[3],[4]]) + + self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [0,1,2,3,4,5,6,7]) + self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]]) + self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1,2,3],[4,5,6,7]]) + self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1],[2,3],[4,5]]) + self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]]) + + self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [0,1,2,3,4,5,6,7]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1,2,3],[4,5,6,7]]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1],[2,3],[4,5]]) + self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]]) + + def test_empty_zi(self): + # Regression test for #880: empty array for zi crashes. + x = self.generate((5,)) + a = self.convert_dtype([1]) + b = self.convert_dtype([1]) + zi = self.convert_dtype([]) + y, zf = lfilter(b, a, x, zi=zi) + assert_array_almost_equal(y, x) + assert_equal(zf.dtype, self.dtype) + assert_equal(zf.size, 0) + + def test_lfiltic_bad_zi(self): + # Regression test for #3699: bad initial conditions + a = self.convert_dtype([1]) + b = self.convert_dtype([1]) + # "y" sets the datatype of zi, so it truncates if int + zi = lfiltic(b, a, [1., 0]) + zi_1 = lfiltic(b, a, [1, 0]) + zi_2 = lfiltic(b, a, [True, False]) + assert_array_equal(zi, zi_1) + assert_array_equal(zi, zi_2) + + def test_short_x_FIR(self): + # regression test for #5116 + # x shorter than b, with non None zi fails + a = self.convert_dtype([1]) + b = self.convert_dtype([1, 0, -1]) + zi = self.convert_dtype([2, 7]) + x = self.convert_dtype([72]) + ye = self.convert_dtype([74]) + zfe = self.convert_dtype([7, -72]) + y, zf = lfilter(b, a, x, zi=zi) + assert_array_almost_equal(y, ye) + assert_array_almost_equal(zf, zfe) + + def test_short_x_IIR(self): + # regression test for #5116 + # x shorter than b, with non None zi fails + a = self.convert_dtype([1, 1]) + b = self.convert_dtype([1, 0, -1]) + zi = self.convert_dtype([2, 7]) + x = self.convert_dtype([72]) + ye = self.convert_dtype([74]) + zfe = self.convert_dtype([-67, -72]) + y, zf = lfilter(b, a, x, zi=zi) + assert_array_almost_equal(y, ye) + assert_array_almost_equal(zf, zfe) + + def test_do_not_modify_a_b_IIR(self): + x = self.generate((6,)) + b = self.convert_dtype([1, -1]) + b0 = b.copy() + a = self.convert_dtype([0.5, -0.5]) + a0 = a.copy() + y_r = self.convert_dtype([0, 2, 4, 6, 8, 10.]) + y_f = lfilter(b, a, x) + assert_array_almost_equal(y_f, y_r) + assert_equal(b, b0) + assert_equal(a, a0) + + def test_do_not_modify_a_b_FIR(self): + x = self.generate((6,)) + b = self.convert_dtype([1, 0, 1]) + b0 = b.copy() + a = self.convert_dtype([2]) + a0 = a.copy() + y_r = self.convert_dtype([0, 0.5, 1, 2, 3, 4.]) + y_f = lfilter(b, a, x) + assert_array_almost_equal(y_f, y_r) + assert_equal(b, b0) + assert_equal(a, a0) + + +class TestLinearFilterFloat32(_TestLinearFilter): + dtype = np.dtype('f') + + +class TestLinearFilterFloat64(_TestLinearFilter): + dtype = np.dtype('d') + + +class TestLinearFilterFloatExtended(_TestLinearFilter): + dtype = np.dtype('g') + + +class TestLinearFilterComplex64(_TestLinearFilter): + dtype = np.dtype('F') + + +class TestLinearFilterComplex128(_TestLinearFilter): + dtype = np.dtype('D') + + +class TestLinearFilterComplexExtended(_TestLinearFilter): + dtype = np.dtype('G') + +class TestLinearFilterDecimal(_TestLinearFilter): + dtype = np.dtype('O') + + def type(self, x): + return Decimal(str(x)) + + +class TestLinearFilterObject(_TestLinearFilter): + dtype = np.dtype('O') + type = float + + +def test_lfilter_bad_object(): + # lfilter: object arrays with non-numeric objects raise TypeError. + # Regression test for ticket #1452. + assert_raises(TypeError, lfilter, [1.0], [1.0], [1.0, None, 2.0]) + assert_raises(TypeError, lfilter, [1.0], [None], [1.0, 2.0, 3.0]) + assert_raises(TypeError, lfilter, [None], [1.0], [1.0, 2.0, 3.0]) + + +def test_lfilter_notimplemented_input(): + # Should not crash, gh-7991 + assert_raises(NotImplementedError, lfilter, [2,3], [4,5], [1,2,3,4,5]) + + +@pytest.mark.parametrize('dt', [np.ubyte, np.byte, np.ushort, np.short, + np.uint, int, np.ulonglong, np.ulonglong, + np.float32, np.float64, np.longdouble, + Decimal]) +class TestCorrelateReal(object): + def _setup_rank1(self, dt): + a = np.linspace(0, 3, 4).astype(dt) + b = np.linspace(1, 2, 2).astype(dt) + + y_r = np.array([0, 2, 5, 8, 3]).astype(dt) + return a, b, y_r + + def equal_tolerance(self, res_dt): + # default value of keyword + decimal = 6 + try: + dt_info = np.finfo(res_dt) + if hasattr(dt_info, 'resolution'): + decimal = int(-0.5*np.log10(dt_info.resolution)) + except Exception: + pass + return decimal + + def equal_tolerance_fft(self, res_dt): + # FFT implementations convert longdouble arguments down to + # double so don't expect better precision, see gh-9520 + if res_dt == np.longdouble: + return self.equal_tolerance(np.double) + else: + return self.equal_tolerance(res_dt) + + def test_method(self, dt): + if dt == Decimal: + method = choose_conv_method([Decimal(4)], [Decimal(3)]) + assert_equal(method, 'direct') + else: + a, b, y_r = self._setup_rank3(dt) + y_fft = correlate(a, b, method='fft') + y_direct = correlate(a, b, method='direct') + + assert_array_almost_equal(y_r, y_fft, decimal=self.equal_tolerance_fft(y_fft.dtype)) + assert_array_almost_equal(y_r, y_direct, decimal=self.equal_tolerance(y_direct.dtype)) + assert_equal(y_fft.dtype, dt) + assert_equal(y_direct.dtype, dt) + + def test_rank1_valid(self, dt): + a, b, y_r = self._setup_rank1(dt) + y = correlate(a, b, 'valid') + assert_array_almost_equal(y, y_r[1:4]) + assert_equal(y.dtype, dt) + + # See gh-5897 + y = correlate(b, a, 'valid') + assert_array_almost_equal(y, y_r[1:4][::-1]) + assert_equal(y.dtype, dt) + + def test_rank1_same(self, dt): + a, b, y_r = self._setup_rank1(dt) + y = correlate(a, b, 'same') + assert_array_almost_equal(y, y_r[:-1]) + assert_equal(y.dtype, dt) + + def test_rank1_full(self, dt): + a, b, y_r = self._setup_rank1(dt) + y = correlate(a, b, 'full') + assert_array_almost_equal(y, y_r) + assert_equal(y.dtype, dt) + + def _setup_rank3(self, dt): + a = np.linspace(0, 39, 40).reshape((2, 4, 5), order='F').astype( + dt) + b = np.linspace(0, 23, 24).reshape((2, 3, 4), order='F').astype( + dt) + + y_r = array([[[0., 184., 504., 912., 1360., 888., 472., 160.], + [46., 432., 1062., 1840., 2672., 1698., 864., 266.], + [134., 736., 1662., 2768., 3920., 2418., 1168., 314.], + [260., 952., 1932., 3056., 4208., 2580., 1240., 332.], + [202., 664., 1290., 1984., 2688., 1590., 712., 150.], + [114., 344., 642., 960., 1280., 726., 296., 38.]], + + [[23., 400., 1035., 1832., 2696., 1737., 904., 293.], + [134., 920., 2166., 3680., 5280., 3306., 1640., 474.], + [325., 1544., 3369., 5512., 7720., 4683., 2192., 535.], + [571., 1964., 3891., 6064., 8272., 4989., 2324., 565.], + [434., 1360., 2586., 3920., 5264., 3054., 1312., 230.], + [241., 700., 1281., 1888., 2496., 1383., 532., 39.]], + + [[22., 214., 528., 916., 1332., 846., 430., 132.], + [86., 484., 1098., 1832., 2600., 1602., 772., 206.], + [188., 802., 1698., 2732., 3788., 2256., 1018., 218.], + [308., 1006., 1950., 2996., 4052., 2400., 1078., 230.], + [230., 692., 1290., 1928., 2568., 1458., 596., 78.], + [126., 354., 636., 924., 1212., 654., 234., 0.]]], + dtype=dt) + + return a, b, y_r + + def test_rank3_valid(self, dt): + a, b, y_r = self._setup_rank3(dt) + y = correlate(a, b, "valid") + assert_array_almost_equal(y, y_r[1:2, 2:4, 3:5]) + assert_equal(y.dtype, dt) + + # See gh-5897 + y = correlate(b, a, "valid") + assert_array_almost_equal(y, y_r[1:2, 2:4, 3:5][::-1, ::-1, ::-1]) + assert_equal(y.dtype, dt) + + def test_rank3_same(self, dt): + a, b, y_r = self._setup_rank3(dt) + y = correlate(a, b, "same") + assert_array_almost_equal(y, y_r[0:-1, 1:-1, 1:-2]) + assert_equal(y.dtype, dt) + + def test_rank3_all(self, dt): + a, b, y_r = self._setup_rank3(dt) + y = correlate(a, b) + assert_array_almost_equal(y, y_r) + assert_equal(y.dtype, dt) + + +class TestCorrelate(object): + # Tests that don't depend on dtype + + def test_invalid_shapes(self): + # By "invalid," we mean that no one + # array has dimensions that are all at + # least as large as the corresponding + # dimensions of the other array. This + # setup should throw a ValueError. + a = np.arange(1, 7).reshape((2, 3)) + b = np.arange(-6, 0).reshape((3, 2)) + + assert_raises(ValueError, correlate, *(a, b), **{'mode': 'valid'}) + assert_raises(ValueError, correlate, *(b, a), **{'mode': 'valid'}) + + def test_invalid_params(self): + a = [3, 4, 5] + b = [1, 2, 3] + assert_raises(ValueError, correlate, a, b, mode='spam') + assert_raises(ValueError, correlate, a, b, mode='eggs', method='fft') + assert_raises(ValueError, correlate, a, b, mode='ham', method='direct') + assert_raises(ValueError, correlate, a, b, mode='full', method='bacon') + assert_raises(ValueError, correlate, a, b, mode='same', method='bacon') + + def test_mismatched_dims(self): + # Input arrays should have the same number of dimensions + assert_raises(ValueError, correlate, [1], 2, method='direct') + assert_raises(ValueError, correlate, 1, [2], method='direct') + assert_raises(ValueError, correlate, [1], 2, method='fft') + assert_raises(ValueError, correlate, 1, [2], method='fft') + assert_raises(ValueError, correlate, [1], [[2]]) + assert_raises(ValueError, correlate, [3], 2) + + def test_numpy_fastpath(self): + a = [1, 2, 3] + b = [4, 5] + assert_allclose(correlate(a, b, mode='same'), [5, 14, 23]) + + a = [1, 2, 3] + b = [4, 5, 6] + assert_allclose(correlate(a, b, mode='same'), [17, 32, 23]) + assert_allclose(correlate(a, b, mode='full'), [6, 17, 32, 23, 12]) + assert_allclose(correlate(a, b, mode='valid'), [32]) + + +@pytest.mark.parametrize('dt', [np.csingle, np.cdouble, np.clongdouble]) +class TestCorrelateComplex(object): + # The decimal precision to be used for comparing results. + # This value will be passed as the 'decimal' keyword argument of + # assert_array_almost_equal(). + # Since correlate may chose to use FFT method which converts + # longdoubles to doubles internally don't expect better precision + # for longdouble than for double (see gh-9520). + + def decimal(self, dt): + if dt == np.clongdouble: + dt = np.cdouble + return int(2 * np.finfo(dt).precision / 3) + + def _setup_rank1(self, dt, mode): + np.random.seed(9) + a = np.random.randn(10).astype(dt) + a += 1j * np.random.randn(10).astype(dt) + b = np.random.randn(8).astype(dt) + b += 1j * np.random.randn(8).astype(dt) + + y_r = (correlate(a.real, b.real, mode=mode) + + correlate(a.imag, b.imag, mode=mode)).astype(dt) + y_r += 1j * (-correlate(a.real, b.imag, mode=mode) + + correlate(a.imag, b.real, mode=mode)) + return a, b, y_r + + def test_rank1_valid(self, dt): + a, b, y_r = self._setup_rank1(dt, 'valid') + y = correlate(a, b, 'valid') + assert_array_almost_equal(y, y_r, decimal=self.decimal(dt)) + assert_equal(y.dtype, dt) + + # See gh-5897 + y = correlate(b, a, 'valid') + assert_array_almost_equal(y, y_r[::-1].conj(), decimal=self.decimal(dt)) + assert_equal(y.dtype, dt) + + def test_rank1_same(self, dt): + a, b, y_r = self._setup_rank1(dt, 'same') + y = correlate(a, b, 'same') + assert_array_almost_equal(y, y_r, decimal=self.decimal(dt)) + assert_equal(y.dtype, dt) + + def test_rank1_full(self, dt): + a, b, y_r = self._setup_rank1(dt, 'full') + y = correlate(a, b, 'full') + assert_array_almost_equal(y, y_r, decimal=self.decimal(dt)) + assert_equal(y.dtype, dt) + + def test_swap_full(self, dt): + d = np.array([0.+0.j, 1.+1.j, 2.+2.j], dtype=dt) + k = np.array([1.+3.j, 2.+4.j, 3.+5.j, 4.+6.j], dtype=dt) + y = correlate(d, k) + assert_equal(y, [0.+0.j, 10.-2.j, 28.-6.j, 22.-6.j, 16.-6.j, 8.-4.j]) + + def test_swap_same(self, dt): + d = [0.+0.j, 1.+1.j, 2.+2.j] + k = [1.+3.j, 2.+4.j, 3.+5.j, 4.+6.j] + y = correlate(d, k, mode="same") + assert_equal(y, [10.-2.j, 28.-6.j, 22.-6.j]) + + def test_rank3(self, dt): + a = np.random.randn(10, 8, 6).astype(dt) + a += 1j * np.random.randn(10, 8, 6).astype(dt) + b = np.random.randn(8, 6, 4).astype(dt) + b += 1j * np.random.randn(8, 6, 4).astype(dt) + + y_r = (correlate(a.real, b.real) + + correlate(a.imag, b.imag)).astype(dt) + y_r += 1j * (-correlate(a.real, b.imag) + correlate(a.imag, b.real)) + + y = correlate(a, b, 'full') + assert_array_almost_equal(y, y_r, decimal=self.decimal(dt) - 1) + assert_equal(y.dtype, dt) + + def test_rank0(self, dt): + a = np.array(np.random.randn()).astype(dt) + a += 1j * np.array(np.random.randn()).astype(dt) + b = np.array(np.random.randn()).astype(dt) + b += 1j * np.array(np.random.randn()).astype(dt) + + y_r = (correlate(a.real, b.real) + + correlate(a.imag, b.imag)).astype(dt) + y_r += 1j * (-correlate(a.real, b.imag) + correlate(a.imag, b.real)) + + y = correlate(a, b, 'full') + assert_array_almost_equal(y, y_r, decimal=self.decimal(dt) - 1) + assert_equal(y.dtype, dt) + + assert_equal(correlate([1], [2j]), correlate(1, 2j)) + assert_equal(correlate([2j], [3j]), correlate(2j, 3j)) + assert_equal(correlate([3j], [4]), correlate(3j, 4)) + + +class TestCorrelate2d(object): + + def test_consistency_correlate_funcs(self): + # Compare np.correlate, signal.correlate, signal.correlate2d + a = np.arange(5) + b = np.array([3.2, 1.4, 3]) + for mode in ['full', 'valid', 'same']: + assert_almost_equal(np.correlate(a, b, mode=mode), + signal.correlate(a, b, mode=mode)) + assert_almost_equal(np.squeeze(signal.correlate2d([a], [b], + mode=mode)), + signal.correlate(a, b, mode=mode)) + + # See gh-5897 + if mode == 'valid': + assert_almost_equal(np.correlate(b, a, mode=mode), + signal.correlate(b, a, mode=mode)) + assert_almost_equal(np.squeeze(signal.correlate2d([b], [a], + mode=mode)), + signal.correlate(b, a, mode=mode)) + + def test_invalid_shapes(self): + # By "invalid," we mean that no one + # array has dimensions that are all at + # least as large as the corresponding + # dimensions of the other array. This + # setup should throw a ValueError. + a = np.arange(1, 7).reshape((2, 3)) + b = np.arange(-6, 0).reshape((3, 2)) + + assert_raises(ValueError, signal.correlate2d, *(a, b), **{'mode': 'valid'}) + assert_raises(ValueError, signal.correlate2d, *(b, a), **{'mode': 'valid'}) + + def test_complex_input(self): + assert_equal(signal.correlate2d([[1]], [[2j]]), -2j) + assert_equal(signal.correlate2d([[2j]], [[3j]]), 6) + assert_equal(signal.correlate2d([[3j]], [[4]]), 12j) + + +class TestLFilterZI(object): + + def test_basic(self): + a = np.array([1.0, -1.0, 0.5]) + b = np.array([1.0, 0.0, 2.0]) + zi_expected = np.array([5.0, -1.0]) + zi = lfilter_zi(b, a) + assert_array_almost_equal(zi, zi_expected) + + def test_scale_invariance(self): + # Regression test. There was a bug in which b was not correctly + # rescaled when a[0] was nonzero. + b = np.array([2, 8, 5]) + a = np.array([1, 1, 8]) + zi1 = lfilter_zi(b, a) + zi2 = lfilter_zi(2*b, 2*a) + assert_allclose(zi2, zi1, rtol=1e-12) + + +class TestFiltFilt(object): + filtfilt_kind = 'tf' + + def filtfilt(self, zpk, x, axis=-1, padtype='odd', padlen=None, + method='pad', irlen=None): + if self.filtfilt_kind == 'tf': + b, a = zpk2tf(*zpk) + return filtfilt(b, a, x, axis, padtype, padlen, method, irlen) + elif self.filtfilt_kind == 'sos': + sos = zpk2sos(*zpk) + return sosfiltfilt(sos, x, axis, padtype, padlen) + + def test_basic(self): + zpk = tf2zpk([1, 2, 3], [1, 2, 3]) + out = self.filtfilt(zpk, np.arange(12)) + assert_allclose(out, arange(12), atol=1e-11) + + def test_sine(self): + rate = 2000 + t = np.linspace(0, 1.0, rate + 1) + # A signal with low frequency and a high frequency. + xlow = np.sin(5 * 2 * np.pi * t) + xhigh = np.sin(250 * 2 * np.pi * t) + x = xlow + xhigh + + zpk = butter(8, 0.125, output='zpk') + # r is the magnitude of the largest pole. + r = np.abs(zpk[1]).max() + eps = 1e-5 + # n estimates the number of steps for the + # transient to decay by a factor of eps. + n = int(np.ceil(np.log(eps) / np.log(r))) + + # High order lowpass filter... + y = self.filtfilt(zpk, x, padlen=n) + # Result should be just xlow. + err = np.abs(y - xlow).max() + assert_(err < 1e-4) + + # A 2D case. + x2d = np.vstack([xlow, xlow + xhigh]) + y2d = self.filtfilt(zpk, x2d, padlen=n, axis=1) + assert_equal(y2d.shape, x2d.shape) + err = np.abs(y2d - xlow).max() + assert_(err < 1e-4) + + # Use the previous result to check the use of the axis keyword. + # (Regression test for ticket #1620) + y2dt = self.filtfilt(zpk, x2d.T, padlen=n, axis=0) + assert_equal(y2d, y2dt.T) + + def test_axis(self): + # Test the 'axis' keyword on a 3D array. + x = np.arange(10.0 * 11.0 * 12.0).reshape(10, 11, 12) + zpk = butter(3, 0.125, output='zpk') + y0 = self.filtfilt(zpk, x, padlen=0, axis=0) + y1 = self.filtfilt(zpk, np.swapaxes(x, 0, 1), padlen=0, axis=1) + assert_array_equal(y0, np.swapaxes(y1, 0, 1)) + y2 = self.filtfilt(zpk, np.swapaxes(x, 0, 2), padlen=0, axis=2) + assert_array_equal(y0, np.swapaxes(y2, 0, 2)) + + def test_acoeff(self): + if self.filtfilt_kind != 'tf': + return # only necessary for TF + # test for 'a' coefficient as single number + out = signal.filtfilt([.5, .5], 1, np.arange(10)) + assert_allclose(out, np.arange(10), rtol=1e-14, atol=1e-14) + + def test_gust_simple(self): + if self.filtfilt_kind != 'tf': + pytest.skip('gust only implemented for TF systems') + # The input array has length 2. The exact solution for this case + # was computed "by hand". + x = np.array([1.0, 2.0]) + b = np.array([0.5]) + a = np.array([1.0, -0.5]) + y, z1, z2 = _filtfilt_gust(b, a, x) + assert_allclose([z1[0], z2[0]], + [0.3*x[0] + 0.2*x[1], 0.2*x[0] + 0.3*x[1]]) + assert_allclose(y, [z1[0] + 0.25*z2[0] + 0.25*x[0] + 0.125*x[1], + 0.25*z1[0] + z2[0] + 0.125*x[0] + 0.25*x[1]]) + + def test_gust_scalars(self): + if self.filtfilt_kind != 'tf': + pytest.skip('gust only implemented for TF systems') + # The filter coefficients are both scalars, so the filter simply + # multiplies its input by b/a. When it is used in filtfilt, the + # factor is (b/a)**2. + x = np.arange(12) + b = 3.0 + a = 2.0 + y = filtfilt(b, a, x, method="gust") + expected = (b/a)**2 * x + assert_allclose(y, expected) + + +class TestSOSFiltFilt(TestFiltFilt): + filtfilt_kind = 'sos' + + def test_equivalence(self): + """Test equivalence between sosfiltfilt and filtfilt""" + x = np.random.RandomState(0).randn(1000) + for order in range(1, 6): + zpk = signal.butter(order, 0.35, output='zpk') + b, a = zpk2tf(*zpk) + sos = zpk2sos(*zpk) + y = filtfilt(b, a, x) + y_sos = sosfiltfilt(sos, x) + assert_allclose(y, y_sos, atol=1e-12, err_msg='order=%s' % order) + + +def filtfilt_gust_opt(b, a, x): + """ + An alternative implementation of filtfilt with Gustafsson edges. + + This function computes the same result as + `scipy.signal.signaltools._filtfilt_gust`, but only 1-d arrays + are accepted. The problem is solved using `fmin` from `scipy.optimize`. + `_filtfilt_gust` is significanly faster than this implementation. + """ + def filtfilt_gust_opt_func(ics, b, a, x): + """Objective function used in filtfilt_gust_opt.""" + m = max(len(a), len(b)) - 1 + z0f = ics[:m] + z0b = ics[m:] + y_f = lfilter(b, a, x, zi=z0f)[0] + y_fb = lfilter(b, a, y_f[::-1], zi=z0b)[0][::-1] + + y_b = lfilter(b, a, x[::-1], zi=z0b)[0][::-1] + y_bf = lfilter(b, a, y_b, zi=z0f)[0] + value = np.sum((y_fb - y_bf)**2) + return value + + m = max(len(a), len(b)) - 1 + zi = lfilter_zi(b, a) + ics = np.concatenate((x[:m].mean()*zi, x[-m:].mean()*zi)) + result = fmin(filtfilt_gust_opt_func, ics, args=(b, a, x), + xtol=1e-10, ftol=1e-12, + maxfun=10000, maxiter=10000, + full_output=True, disp=False) + opt, fopt, niter, funcalls, warnflag = result + if warnflag > 0: + raise RuntimeError("minimization failed in filtfilt_gust_opt: " + "warnflag=%d" % warnflag) + z0f = opt[:m] + z0b = opt[m:] + + # Apply the forward-backward filter using the computed initial + # conditions. + y_b = lfilter(b, a, x[::-1], zi=z0b)[0][::-1] + y = lfilter(b, a, y_b, zi=z0f)[0] + + return y, z0f, z0b + + +def check_filtfilt_gust(b, a, shape, axis, irlen=None): + # Generate x, the data to be filtered. + np.random.seed(123) + x = np.random.randn(*shape) + + # Apply filtfilt to x. This is the main calculation to be checked. + y = filtfilt(b, a, x, axis=axis, method="gust", irlen=irlen) + + # Also call the private function so we can test the ICs. + yg, zg1, zg2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen) + + # filtfilt_gust_opt is an independent implementation that gives the + # expected result, but it only handles 1-d arrays, so use some looping + # and reshaping shenanigans to create the expected output arrays. + xx = np.swapaxes(x, axis, -1) + out_shape = xx.shape[:-1] + yo = np.empty_like(xx) + m = max(len(a), len(b)) - 1 + zo1 = np.empty(out_shape + (m,)) + zo2 = np.empty(out_shape + (m,)) + for indx in product(*[range(d) for d in out_shape]): + yo[indx], zo1[indx], zo2[indx] = filtfilt_gust_opt(b, a, xx[indx]) + yo = np.swapaxes(yo, -1, axis) + zo1 = np.swapaxes(zo1, -1, axis) + zo2 = np.swapaxes(zo2, -1, axis) + + assert_allclose(y, yo, rtol=1e-9, atol=1e-10) + assert_allclose(yg, yo, rtol=1e-9, atol=1e-10) + assert_allclose(zg1, zo1, rtol=1e-9, atol=1e-10) + assert_allclose(zg2, zo2, rtol=1e-9, atol=1e-10) + + +def test_choose_conv_method(): + for mode in ['valid', 'same', 'full']: + for ndims in [1, 2]: + n, k, true_method = 8, 6, 'direct' + x = np.random.randn(*((n,) * ndims)) + h = np.random.randn(*((k,) * ndims)) + + method = choose_conv_method(x, h, mode=mode) + assert_equal(method, true_method) + + method_try, times = choose_conv_method(x, h, mode=mode, measure=True) + assert_(method_try in {'fft', 'direct'}) + assert_(type(times) is dict) + assert_('fft' in times.keys() and 'direct' in times.keys()) + + n = 10 + for not_fft_conv_supp in ["complex256", "complex192"]: + if hasattr(np, not_fft_conv_supp): + x = np.ones(n, dtype=not_fft_conv_supp) + h = x.copy() + assert_equal(choose_conv_method(x, h, mode=mode), 'direct') + + x = np.array([2**51], dtype=np.int64) + h = x.copy() + assert_equal(choose_conv_method(x, h, mode=mode), 'direct') + + x = [Decimal(3), Decimal(2)] + h = [Decimal(1), Decimal(4)] + assert_equal(choose_conv_method(x, h, mode=mode), 'direct') + + +def test_filtfilt_gust(): + # Design a filter. + z, p, k = signal.ellip(3, 0.01, 120, 0.0875, output='zpk') + + # Find the approximate impulse response length of the filter. + eps = 1e-10 + r = np.max(np.abs(p)) + approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r))) + + np.random.seed(123) + + b, a = zpk2tf(z, p, k) + for irlen in [None, approx_impulse_len]: + signal_len = 5 * approx_impulse_len + + # 1-d test case + check_filtfilt_gust(b, a, (signal_len,), 0, irlen) + + # 3-d test case; test each axis. + for axis in range(3): + shape = [2, 2, 2] + shape[axis] = signal_len + check_filtfilt_gust(b, a, shape, axis, irlen) + + # Test case with length less than 2*approx_impulse_len. + # In this case, `filtfilt_gust` should behave the same as if + # `irlen=None` was given. + length = 2*approx_impulse_len - 50 + check_filtfilt_gust(b, a, (length,), 0, approx_impulse_len) + + +class TestDecimate(object): + def test_bad_args(self): + x = np.arange(12) + assert_raises(TypeError, signal.decimate, x, q=0.5, n=1) + assert_raises(TypeError, signal.decimate, x, q=2, n=0.5) + + def test_basic_IIR(self): + x = np.arange(12) + y = signal.decimate(x, 2, n=1, ftype='iir', zero_phase=False).round() + assert_array_equal(y, x[::2]) + + def test_basic_FIR(self): + x = np.arange(12) + y = signal.decimate(x, 2, n=1, ftype='fir', zero_phase=False).round() + assert_array_equal(y, x[::2]) + + def test_shape(self): + # Regression test for ticket #1480. + z = np.zeros((30, 30)) + d0 = signal.decimate(z, 2, axis=0, zero_phase=False) + assert_equal(d0.shape, (15, 30)) + d1 = signal.decimate(z, 2, axis=1, zero_phase=False) + assert_equal(d1.shape, (30, 15)) + + def test_phaseshift_FIR(self): + with suppress_warnings() as sup: + sup.filter(BadCoefficients, "Badly conditioned filter") + self._test_phaseshift(method='fir', zero_phase=False) + + def test_zero_phase_FIR(self): + with suppress_warnings() as sup: + sup.filter(BadCoefficients, "Badly conditioned filter") + self._test_phaseshift(method='fir', zero_phase=True) + + def test_phaseshift_IIR(self): + self._test_phaseshift(method='iir', zero_phase=False) + + def test_zero_phase_IIR(self): + self._test_phaseshift(method='iir', zero_phase=True) + + def _test_phaseshift(self, method, zero_phase): + rate = 120 + rates_to = [15, 20, 30, 40] # q = 8, 6, 4, 3 + + t_tot = int(100) # Need to let antialiasing filters settle + t = np.arange(rate*t_tot+1) / float(rate) + + # Sinusoids at 0.8*nyquist, windowed to avoid edge artifacts + freqs = np.array(rates_to) * 0.8 / 2 + d = (np.exp(1j * 2 * np.pi * freqs[:, np.newaxis] * t) + * signal.windows.tukey(t.size, 0.1)) + + for rate_to in rates_to: + q = rate // rate_to + t_to = np.arange(rate_to*t_tot+1) / float(rate_to) + d_tos = (np.exp(1j * 2 * np.pi * freqs[:, np.newaxis] * t_to) + * signal.windows.tukey(t_to.size, 0.1)) + + # Set up downsampling filters, match v0.17 defaults + if method == 'fir': + n = 30 + system = signal.dlti(signal.firwin(n + 1, 1. / q, + window='hamming'), 1.) + elif method == 'iir': + n = 8 + wc = 0.8*np.pi/q + system = signal.dlti(*signal.cheby1(n, 0.05, wc/np.pi)) + + # Calculate expected phase response, as unit complex vector + if zero_phase is False: + _, h_resps = signal.freqz(system.num, system.den, + freqs/rate*2*np.pi) + h_resps /= np.abs(h_resps) + else: + h_resps = np.ones_like(freqs) + + y_resamps = signal.decimate(d.real, q, n, ftype=system, + zero_phase=zero_phase) + + # Get phase from complex inner product, like CSD + h_resamps = np.sum(d_tos.conj() * y_resamps, axis=-1) + h_resamps /= np.abs(h_resamps) + subnyq = freqs < 0.5*rate_to + + # Complex vectors should be aligned, only compare below nyquist + assert_allclose(np.angle(h_resps.conj()*h_resamps)[subnyq], 0, + atol=1e-3, rtol=1e-3) + + def test_auto_n(self): + # Test that our value of n is a reasonable choice (depends on + # the downsampling factor) + sfreq = 100. + n = 1000 + t = np.arange(n) / sfreq + # will alias for decimations (>= 15) + x = np.sqrt(2. / n) * np.sin(2 * np.pi * (sfreq / 30.) * t) + assert_allclose(np.linalg.norm(x), 1., rtol=1e-3) + x_out = signal.decimate(x, 30, ftype='fir') + assert_array_less(np.linalg.norm(x_out), 0.01) + + +class TestHilbert(object): + + def test_bad_args(self): + x = np.array([1.0 + 0.0j]) + assert_raises(ValueError, hilbert, x) + x = np.arange(8.0) + assert_raises(ValueError, hilbert, x, N=0) + + def test_hilbert_theoretical(self): + # test cases by Ariel Rokem + decimal = 14 + + pi = np.pi + t = np.arange(0, 2 * pi, pi / 256) + a0 = np.sin(t) + a1 = np.cos(t) + a2 = np.sin(2 * t) + a3 = np.cos(2 * t) + a = np.vstack([a0, a1, a2, a3]) + + h = hilbert(a) + h_abs = np.abs(h) + h_angle = np.angle(h) + h_real = np.real(h) + + # The real part should be equal to the original signals: + assert_almost_equal(h_real, a, decimal) + # The absolute value should be one everywhere, for this input: + assert_almost_equal(h_abs, np.ones(a.shape), decimal) + # For the 'slow' sine - the phase should go from -pi/2 to pi/2 in + # the first 256 bins: + assert_almost_equal(h_angle[0, :256], + np.arange(-pi / 2, pi / 2, pi / 256), + decimal) + # For the 'slow' cosine - the phase should go from 0 to pi in the + # same interval: + assert_almost_equal( + h_angle[1, :256], np.arange(0, pi, pi / 256), decimal) + # The 'fast' sine should make this phase transition in half the time: + assert_almost_equal(h_angle[2, :128], + np.arange(-pi / 2, pi / 2, pi / 128), + decimal) + # Ditto for the 'fast' cosine: + assert_almost_equal( + h_angle[3, :128], np.arange(0, pi, pi / 128), decimal) + + # The imaginary part of hilbert(cos(t)) = sin(t) Wikipedia + assert_almost_equal(h[1].imag, a0, decimal) + + def test_hilbert_axisN(self): + # tests for axis and N arguments + a = np.arange(18).reshape(3, 6) + # test axis + aa = hilbert(a, axis=-1) + assert_equal(hilbert(a.T, axis=0), aa.T) + # test 1d + assert_almost_equal(hilbert(a[0]), aa[0], 14) + + # test N + aan = hilbert(a, N=20, axis=-1) + assert_equal(aan.shape, [3, 20]) + assert_equal(hilbert(a.T, N=20, axis=0).shape, [20, 3]) + # the next test is just a regression test, + # no idea whether numbers make sense + a0hilb = np.array([0.000000000000000e+00 - 1.72015830311905j, + 1.000000000000000e+00 - 2.047794505137069j, + 1.999999999999999e+00 - 2.244055555687583j, + 3.000000000000000e+00 - 1.262750302935009j, + 4.000000000000000e+00 - 1.066489252384493j, + 5.000000000000000e+00 + 2.918022706971047j, + 8.881784197001253e-17 + 3.845658908989067j, + -9.444121133484362e-17 + 0.985044202202061j, + -1.776356839400251e-16 + 1.332257797702019j, + -3.996802888650564e-16 + 0.501905089898885j, + 1.332267629550188e-16 + 0.668696078880782j, + -1.192678053963799e-16 + 0.235487067862679j, + -1.776356839400251e-16 + 0.286439612812121j, + 3.108624468950438e-16 + 0.031676888064907j, + 1.332267629550188e-16 - 0.019275656884536j, + -2.360035624836702e-16 - 0.1652588660287j, + 0.000000000000000e+00 - 0.332049855010597j, + 3.552713678800501e-16 - 0.403810179797771j, + 8.881784197001253e-17 - 0.751023775297729j, + 9.444121133484362e-17 - 0.79252210110103j]) + assert_almost_equal(aan[0], a0hilb, 14, 'N regression') + + +class TestHilbert2(object): + + def test_bad_args(self): + # x must be real. + x = np.array([[1.0 + 0.0j]]) + assert_raises(ValueError, hilbert2, x) + + # x must be rank 2. + x = np.arange(24).reshape(2, 3, 4) + assert_raises(ValueError, hilbert2, x) + + # Bad value for N. + x = np.arange(16).reshape(4, 4) + assert_raises(ValueError, hilbert2, x, N=0) + assert_raises(ValueError, hilbert2, x, N=(2, 0)) + assert_raises(ValueError, hilbert2, x, N=(2,)) + + +class TestPartialFractionExpansion(object): + def test_invresz_one_coefficient_bug(self): + # Regression test for issue in gh-4646. + r = [1] + p = [2] + k = [0] + a_expected = [1.0, 0.0] + b_expected = [1.0, -2.0] + a_observed, b_observed = invresz(r, p, k) + + assert_allclose(a_observed, a_expected) + assert_allclose(b_observed, b_expected) + + def test_invres_distinct_roots(self): + # This test was inspired by github issue 2496. + r = [3 / 10, -1 / 6, -2 / 15] + p = [0, -2, -5] + k = [] + a_expected = [1, 3] + b_expected = [1, 7, 10, 0] + a_observed, b_observed = invres(r, p, k) + assert_allclose(a_observed, a_expected) + assert_allclose(b_observed, b_expected) + rtypes = ('avg', 'mean', 'min', 'minimum', 'max', 'maximum') + + # With the default tolerance, the rtype does not matter + # for this example. + for rtype in rtypes: + a_observed, b_observed = invres(r, p, k, rtype=rtype) + assert_allclose(a_observed, a_expected) + assert_allclose(b_observed, b_expected) + + # With unrealistically large tolerances, repeated roots may be inferred + # and the rtype comes into play. + ridiculous_tolerance = 1e10 + for rtype in rtypes: + a, b = invres(r, p, k, tol=ridiculous_tolerance, rtype=rtype) + + def test_invres_repeated_roots(self): + r = [3 / 20, -7 / 36, -1 / 6, 2 / 45] + p = [0, -2, -2, -5] + k = [] + a_expected = [1, 3] + b_expected = [1, 9, 24, 20, 0] + rtypes = ('avg', 'mean', 'min', 'minimum', 'max', 'maximum') + for rtype in rtypes: + a_observed, b_observed = invres(r, p, k, rtype=rtype) + assert_allclose(a_observed, a_expected) + assert_allclose(b_observed, b_expected) + + def test_invres_bad_rtype(self): + r = [3 / 20, -7 / 36, -1 / 6, 2 / 45] + p = [0, -2, -2, -5] + k = [] + assert_raises(ValueError, invres, r, p, k, rtype='median') + + +class TestVectorstrength(object): + + def test_single_1dperiod(self): + events = np.array([.5]) + period = 5. + targ_strength = 1. + targ_phase = .1 + + strength, phase = vectorstrength(events, period) + + assert_equal(strength.ndim, 0) + assert_equal(phase.ndim, 0) + assert_almost_equal(strength, targ_strength) + assert_almost_equal(phase, 2 * np.pi * targ_phase) + + def test_single_2dperiod(self): + events = np.array([.5]) + period = [1, 2, 5.] + targ_strength = [1.] * 3 + targ_phase = np.array([.5, .25, .1]) + + strength, phase = vectorstrength(events, period) + + assert_equal(strength.ndim, 1) + assert_equal(phase.ndim, 1) + assert_array_almost_equal(strength, targ_strength) + assert_almost_equal(phase, 2 * np.pi * targ_phase) + + def test_equal_1dperiod(self): + events = np.array([.25, .25, .25, .25, .25, .25]) + period = 2 + targ_strength = 1. + targ_phase = .125 + + strength, phase = vectorstrength(events, period) + + assert_equal(strength.ndim, 0) + assert_equal(phase.ndim, 0) + assert_almost_equal(strength, targ_strength) + assert_almost_equal(phase, 2 * np.pi * targ_phase) + + def test_equal_2dperiod(self): + events = np.array([.25, .25, .25, .25, .25, .25]) + period = [1, 2, ] + targ_strength = [1.] * 2 + targ_phase = np.array([.25, .125]) + + strength, phase = vectorstrength(events, period) + + assert_equal(strength.ndim, 1) + assert_equal(phase.ndim, 1) + assert_almost_equal(strength, targ_strength) + assert_almost_equal(phase, 2 * np.pi * targ_phase) + + def test_spaced_1dperiod(self): + events = np.array([.1, 1.1, 2.1, 4.1, 10.1]) + period = 1 + targ_strength = 1. + targ_phase = .1 + + strength, phase = vectorstrength(events, period) + + assert_equal(strength.ndim, 0) + assert_equal(phase.ndim, 0) + assert_almost_equal(strength, targ_strength) + assert_almost_equal(phase, 2 * np.pi * targ_phase) + + def test_spaced_2dperiod(self): + events = np.array([.1, 1.1, 2.1, 4.1, 10.1]) + period = [1, .5] + targ_strength = [1.] * 2 + targ_phase = np.array([.1, .2]) + + strength, phase = vectorstrength(events, period) + + assert_equal(strength.ndim, 1) + assert_equal(phase.ndim, 1) + assert_almost_equal(strength, targ_strength) + assert_almost_equal(phase, 2 * np.pi * targ_phase) + + def test_partial_1dperiod(self): + events = np.array([.25, .5, .75]) + period = 1 + targ_strength = 1. / 3. + targ_phase = .5 + + strength, phase = vectorstrength(events, period) + + assert_equal(strength.ndim, 0) + assert_equal(phase.ndim, 0) + assert_almost_equal(strength, targ_strength) + assert_almost_equal(phase, 2 * np.pi * targ_phase) + + def test_partial_2dperiod(self): + events = np.array([.25, .5, .75]) + period = [1., 1., 1., 1.] + targ_strength = [1. / 3.] * 4 + targ_phase = np.array([.5, .5, .5, .5]) + + strength, phase = vectorstrength(events, period) + + assert_equal(strength.ndim, 1) + assert_equal(phase.ndim, 1) + assert_almost_equal(strength, targ_strength) + assert_almost_equal(phase, 2 * np.pi * targ_phase) + + def test_opposite_1dperiod(self): + events = np.array([0, .25, .5, .75]) + period = 1. + targ_strength = 0 + + strength, phase = vectorstrength(events, period) + + assert_equal(strength.ndim, 0) + assert_equal(phase.ndim, 0) + assert_almost_equal(strength, targ_strength) + + def test_opposite_2dperiod(self): + events = np.array([0, .25, .5, .75]) + period = [1.] * 10 + targ_strength = [0.] * 10 + + strength, phase = vectorstrength(events, period) + + assert_equal(strength.ndim, 1) + assert_equal(phase.ndim, 1) + assert_almost_equal(strength, targ_strength) + + def test_2d_events_ValueError(self): + events = np.array([[1, 2]]) + period = 1. + assert_raises(ValueError, vectorstrength, events, period) + + def test_2d_period_ValueError(self): + events = 1. + period = np.array([[1]]) + assert_raises(ValueError, vectorstrength, events, period) + + def test_zero_period_ValueError(self): + events = 1. + period = 0 + assert_raises(ValueError, vectorstrength, events, period) + + def test_negative_period_ValueError(self): + events = 1. + period = -1 + assert_raises(ValueError, vectorstrength, events, period) + + +class TestSOSFilt(object): + + # For sosfilt we only test a single datatype. Since sosfilt wraps + # to lfilter under the hood, it's hopefully good enough to ensure + # lfilter is extensively tested. + dt = np.float64 + + # The test_rank* tests are pulled from _TestLinearFilter + def test_rank1(self): + x = np.linspace(0, 5, 6).astype(self.dt) + b = np.array([1, -1]).astype(self.dt) + a = np.array([0.5, -0.5]).astype(self.dt) + + # Test simple IIR + y_r = np.array([0, 2, 4, 6, 8, 10.]).astype(self.dt) + assert_array_almost_equal(sosfilt(tf2sos(b, a), x), y_r) + + # Test simple FIR + b = np.array([1, 1]).astype(self.dt) + # NOTE: This was changed (rel. to TestLinear...) to add a pole @zero: + a = np.array([1, 0]).astype(self.dt) + y_r = np.array([0, 1, 3, 5, 7, 9.]).astype(self.dt) + assert_array_almost_equal(sosfilt(tf2sos(b, a), x), y_r) + + b = [1, 1, 0] + a = [1, 0, 0] + x = np.ones(8) + sos = np.concatenate((b, a)) + sos.shape = (1, 6) + y = sosfilt(sos, x) + assert_allclose(y, [1, 2, 2, 2, 2, 2, 2, 2]) + + def test_rank2(self): + shape = (4, 3) + x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape) + x = x.astype(self.dt) + + b = np.array([1, -1]).astype(self.dt) + a = np.array([0.5, 0.5]).astype(self.dt) + + y_r2_a0 = np.array([[0, 2, 4], [6, 4, 2], [0, 2, 4], [6, 4, 2]], + dtype=self.dt) + + y_r2_a1 = np.array([[0, 2, 0], [6, -4, 6], [12, -10, 12], + [18, -16, 18]], dtype=self.dt) + + y = sosfilt(tf2sos(b, a), x, axis=0) + assert_array_almost_equal(y_r2_a0, y) + + y = sosfilt(tf2sos(b, a), x, axis=1) + assert_array_almost_equal(y_r2_a1, y) + + def test_rank3(self): + shape = (4, 3, 2) + x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape) + + b = np.array([1, -1]).astype(self.dt) + a = np.array([0.5, 0.5]).astype(self.dt) + + # Test last axis + y = sosfilt(tf2sos(b, a), x) + for i in range(x.shape[0]): + for j in range(x.shape[1]): + assert_array_almost_equal(y[i, j], lfilter(b, a, x[i, j])) + + def test_initial_conditions(self): + b1, a1 = signal.butter(2, 0.25, 'low') + b2, a2 = signal.butter(2, 0.75, 'low') + b3, a3 = signal.butter(2, 0.75, 'low') + b = np.convolve(np.convolve(b1, b2), b3) + a = np.convolve(np.convolve(a1, a2), a3) + sos = np.array((np.r_[b1, a1], np.r_[b2, a2], np.r_[b3, a3])) + + x = np.random.rand(50) + + # Stopping filtering and continuing + y_true, zi = lfilter(b, a, x[:20], zi=np.zeros(6)) + y_true = np.r_[y_true, lfilter(b, a, x[20:], zi=zi)[0]] + assert_allclose(y_true, lfilter(b, a, x)) + + y_sos, zi = sosfilt(sos, x[:20], zi=np.zeros((3, 2))) + y_sos = np.r_[y_sos, sosfilt(sos, x[20:], zi=zi)[0]] + assert_allclose(y_true, y_sos) + + # Use a step function + zi = sosfilt_zi(sos) + x = np.ones(8) + y, zf = sosfilt(sos, x, zi=zi) + + assert_allclose(y, np.ones(8)) + assert_allclose(zf, zi) + + # Initial condition shape matching + x.shape = (1, 1) + x.shape # 3D + assert_raises(ValueError, sosfilt, sos, x, zi=zi) + zi_nd = zi.copy() + zi_nd.shape = (zi.shape[0], 1, 1, zi.shape[-1]) + assert_raises(ValueError, sosfilt, sos, x, + zi=zi_nd[:, :, :, [0, 1, 1]]) + y, zf = sosfilt(sos, x, zi=zi_nd) + assert_allclose(y[0, 0], np.ones(8)) + assert_allclose(zf[:, 0, 0, :], zi) + + def test_initial_conditions_3d_axis1(self): + # Test the use of zi when sosfilt is applied to axis 1 of a 3-d input. + + # Input array is x. + x = np.random.RandomState(159).randint(0, 5, size=(2, 15, 3)) + + # Design a filter in ZPK format and convert to SOS + zpk = signal.butter(6, 0.35, output='zpk') + sos = zpk2sos(*zpk) + nsections = sos.shape[0] + + # Filter along this axis. + axis = 1 + + # Initial conditions, all zeros. + shp = list(x.shape) + shp[axis] = 2 + shp = [nsections] + shp + z0 = np.zeros(shp) + + # Apply the filter to x. + yf, zf = sosfilt(sos, x, axis=axis, zi=z0) + + # Apply the filter to x in two stages. + y1, z1 = sosfilt(sos, x[:, :5, :], axis=axis, zi=z0) + y2, z2 = sosfilt(sos, x[:, 5:, :], axis=axis, zi=z1) + + # y should equal yf, and z2 should equal zf. + y = np.concatenate((y1, y2), axis=axis) + assert_allclose(y, yf, rtol=1e-10, atol=1e-13) + assert_allclose(z2, zf, rtol=1e-10, atol=1e-13) + + # let's try the "step" initial condition + zi = sosfilt_zi(sos) + zi.shape = [nsections, 1, 2, 1] + zi = zi * x[:, 0:1, :] + y = sosfilt(sos, x, axis=axis, zi=zi)[0] + # check it against the TF form + b, a = zpk2tf(*zpk) + zi = lfilter_zi(b, a) + zi.shape = [1, zi.size, 1] + zi = zi * x[:, 0:1, :] + y_tf = lfilter(b, a, x, axis=axis, zi=zi)[0] + assert_allclose(y, y_tf, rtol=1e-10, atol=1e-13) + + def test_bad_zi_shape(self): + # The shape of zi is checked before using any values in the + # arguments, so np.empty is fine for creating the arguments. + x = np.empty((3, 15, 3)) + sos = np.empty((4, 6)) + zi = np.empty((4, 3, 3, 2)) # Correct shape is (4, 3, 2, 3) + assert_raises(ValueError, sosfilt, sos, x, zi=zi, axis=1) + + def test_sosfilt_zi(self): + sos = signal.butter(6, 0.2, output='sos') + zi = sosfilt_zi(sos) + + y, zf = sosfilt(sos, np.ones(40), zi=zi) + assert_allclose(zf, zi, rtol=1e-13) + + # Expected steady state value of the step response of this filter: + ss = np.prod(sos[:, :3].sum(axis=-1) / sos[:, 3:].sum(axis=-1)) + assert_allclose(y, ss, rtol=1e-13) + + +class TestDeconvolve(object): + + def test_basic(self): + # From docstring example + original = [0, 1, 0, 0, 1, 1, 0, 0] + impulse_response = [2, 1] + recorded = [0, 2, 1, 0, 2, 3, 1, 0, 0] + recovered, remainder = signal.deconvolve(recorded, impulse_response) + assert_allclose(recovered, original) diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_signaltools.pyc b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_signaltools.pyc new file mode 100644 index 0000000..74db086 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_signaltools.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_spectral.py b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_spectral.py new file mode 100644 index 0000000..357075f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_spectral.py @@ -0,0 +1,1464 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import (assert_, assert_approx_equal, + assert_allclose, assert_array_equal, assert_equal, + assert_array_almost_equal_nulp) +import pytest +from pytest import raises as assert_raises + +from scipy._lib._numpy_compat import suppress_warnings +from scipy import signal, fftpack +from scipy.signal import (periodogram, welch, lombscargle, csd, coherence, + spectrogram, stft, istft, check_COLA, check_NOLA) +from scipy.signal.spectral import _spectral_helper + + +class TestPeriodogram(object): + def test_real_onesided_even(self): + x = np.zeros(16) + x[0] = 1 + f, p = periodogram(x) + assert_allclose(f, np.linspace(0, 0.5, 9)) + q = np.ones(9) + q[0] = 0 + q[-1] /= 2.0 + q /= 8 + assert_allclose(p, q) + + def test_real_onesided_odd(self): + x = np.zeros(15) + x[0] = 1 + f, p = periodogram(x) + assert_allclose(f, np.arange(8.0)/15.0) + q = np.ones(8) + q[0] = 0 + q *= 2.0/15.0 + assert_allclose(p, q, atol=1e-15) + + def test_real_twosided(self): + x = np.zeros(16) + x[0] = 1 + f, p = periodogram(x, return_onesided=False) + assert_allclose(f, fftpack.fftfreq(16, 1.0)) + q = np.ones(16)/16.0 + q[0] = 0 + assert_allclose(p, q) + + def test_real_spectrum(self): + x = np.zeros(16) + x[0] = 1 + f, p = periodogram(x, scaling='spectrum') + g, q = periodogram(x, scaling='density') + assert_allclose(f, np.linspace(0, 0.5, 9)) + assert_allclose(p, q/16.0) + + def test_integer_even(self): + x = np.zeros(16, dtype=int) + x[0] = 1 + f, p = periodogram(x) + assert_allclose(f, np.linspace(0, 0.5, 9)) + q = np.ones(9) + q[0] = 0 + q[-1] /= 2.0 + q /= 8 + assert_allclose(p, q) + + def test_integer_odd(self): + x = np.zeros(15, dtype=int) + x[0] = 1 + f, p = periodogram(x) + assert_allclose(f, np.arange(8.0)/15.0) + q = np.ones(8) + q[0] = 0 + q *= 2.0/15.0 + assert_allclose(p, q, atol=1e-15) + + def test_integer_twosided(self): + x = np.zeros(16, dtype=int) + x[0] = 1 + f, p = periodogram(x, return_onesided=False) + assert_allclose(f, fftpack.fftfreq(16, 1.0)) + q = np.ones(16)/16.0 + q[0] = 0 + assert_allclose(p, q) + + def test_complex(self): + x = np.zeros(16, np.complex128) + x[0] = 1.0 + 2.0j + f, p = periodogram(x, return_onesided=False) + assert_allclose(f, fftpack.fftfreq(16, 1.0)) + q = 5.0*np.ones(16)/16.0 + q[0] = 0 + assert_allclose(p, q) + + def test_unk_scaling(self): + assert_raises(ValueError, periodogram, np.zeros(4, np.complex128), + scaling='foo') + + def test_nd_axis_m1(self): + x = np.zeros(20, dtype=np.float64) + x = x.reshape((2,1,10)) + x[:,:,0] = 1.0 + f, p = periodogram(x) + assert_array_equal(p.shape, (2, 1, 6)) + assert_array_almost_equal_nulp(p[0,0,:], p[1,0,:], 60) + f0, p0 = periodogram(x[0,0,:]) + assert_array_almost_equal_nulp(p0[np.newaxis,:], p[1,:], 60) + + def test_nd_axis_0(self): + x = np.zeros(20, dtype=np.float64) + x = x.reshape((10,2,1)) + x[0,:,:] = 1.0 + f, p = periodogram(x, axis=0) + assert_array_equal(p.shape, (6,2,1)) + assert_array_almost_equal_nulp(p[:,0,0], p[:,1,0], 60) + f0, p0 = periodogram(x[:,0,0]) + assert_array_almost_equal_nulp(p0, p[:,1,0]) + + def test_window_external(self): + x = np.zeros(16) + x[0] = 1 + f, p = periodogram(x, 10, 'hann') + win = signal.get_window('hann', 16) + fe, pe = periodogram(x, 10, win) + assert_array_almost_equal_nulp(p, pe) + assert_array_almost_equal_nulp(f, fe) + win_err = signal.get_window('hann', 32) + assert_raises(ValueError, periodogram, x, + 10, win_err) # win longer than signal + + def test_padded_fft(self): + x = np.zeros(16) + x[0] = 1 + f, p = periodogram(x) + fp, pp = periodogram(x, nfft=32) + assert_allclose(f, fp[::2]) + assert_allclose(p, pp[::2]) + assert_array_equal(pp.shape, (17,)) + + def test_empty_input(self): + f, p = periodogram([]) + assert_array_equal(f.shape, (0,)) + assert_array_equal(p.shape, (0,)) + for shape in [(0,), (3,0), (0,5,2)]: + f, p = periodogram(np.empty(shape)) + assert_array_equal(f.shape, shape) + assert_array_equal(p.shape, shape) + + def test_empty_input_other_axis(self): + for shape in [(3,0), (0,5,2)]: + f, p = periodogram(np.empty(shape), axis=1) + assert_array_equal(f.shape, shape) + assert_array_equal(p.shape, shape) + + def test_short_nfft(self): + x = np.zeros(18) + x[0] = 1 + f, p = periodogram(x, nfft=16) + assert_allclose(f, np.linspace(0, 0.5, 9)) + q = np.ones(9) + q[0] = 0 + q[-1] /= 2.0 + q /= 8 + assert_allclose(p, q) + + def test_nfft_is_xshape(self): + x = np.zeros(16) + x[0] = 1 + f, p = periodogram(x, nfft=16) + assert_allclose(f, np.linspace(0, 0.5, 9)) + q = np.ones(9) + q[0] = 0 + q[-1] /= 2.0 + q /= 8 + assert_allclose(p, q) + + def test_real_onesided_even_32(self): + x = np.zeros(16, 'f') + x[0] = 1 + f, p = periodogram(x) + assert_allclose(f, np.linspace(0, 0.5, 9)) + q = np.ones(9, 'f') + q[0] = 0 + q[-1] /= 2.0 + q /= 8 + assert_allclose(p, q) + assert_(p.dtype == q.dtype) + + def test_real_onesided_odd_32(self): + x = np.zeros(15, 'f') + x[0] = 1 + f, p = periodogram(x) + assert_allclose(f, np.arange(8.0)/15.0) + q = np.ones(8, 'f') + q[0] = 0 + q *= 2.0/15.0 + assert_allclose(p, q, atol=1e-7) + assert_(p.dtype == q.dtype) + + def test_real_twosided_32(self): + x = np.zeros(16, 'f') + x[0] = 1 + f, p = periodogram(x, return_onesided=False) + assert_allclose(f, fftpack.fftfreq(16, 1.0)) + q = np.ones(16, 'f')/16.0 + q[0] = 0 + assert_allclose(p, q) + assert_(p.dtype == q.dtype) + + def test_complex_32(self): + x = np.zeros(16, 'F') + x[0] = 1.0 + 2.0j + f, p = periodogram(x, return_onesided=False) + assert_allclose(f, fftpack.fftfreq(16, 1.0)) + q = 5.0*np.ones(16, 'f')/16.0 + q[0] = 0 + assert_allclose(p, q) + assert_(p.dtype == q.dtype) + + +class TestWelch(object): + def test_real_onesided_even(self): + x = np.zeros(16) + x[0] = 1 + x[8] = 1 + f, p = welch(x, nperseg=8) + assert_allclose(f, np.linspace(0, 0.5, 5)) + q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222, + 0.11111111]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_real_onesided_odd(self): + x = np.zeros(16) + x[0] = 1 + x[8] = 1 + f, p = welch(x, nperseg=9) + assert_allclose(f, np.arange(5.0)/9.0) + q = np.array([0.12477455, 0.23430933, 0.17072113, 0.17072113, + 0.17072113]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_real_twosided(self): + x = np.zeros(16) + x[0] = 1 + x[8] = 1 + f, p = welch(x, nperseg=8, return_onesided=False) + assert_allclose(f, fftpack.fftfreq(8, 1.0)) + q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111, + 0.11111111, 0.11111111, 0.11111111, 0.07638889]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_real_spectrum(self): + x = np.zeros(16) + x[0] = 1 + x[8] = 1 + f, p = welch(x, nperseg=8, scaling='spectrum') + assert_allclose(f, np.linspace(0, 0.5, 5)) + q = np.array([0.015625, 0.02864583, 0.04166667, 0.04166667, + 0.02083333]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_integer_onesided_even(self): + x = np.zeros(16, dtype=int) + x[0] = 1 + x[8] = 1 + f, p = welch(x, nperseg=8) + assert_allclose(f, np.linspace(0, 0.5, 5)) + q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222, + 0.11111111]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_integer_onesided_odd(self): + x = np.zeros(16, dtype=int) + x[0] = 1 + x[8] = 1 + f, p = welch(x, nperseg=9) + assert_allclose(f, np.arange(5.0)/9.0) + q = np.array([0.12477455, 0.23430933, 0.17072113, 0.17072113, + 0.17072113]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_integer_twosided(self): + x = np.zeros(16, dtype=int) + x[0] = 1 + x[8] = 1 + f, p = welch(x, nperseg=8, return_onesided=False) + assert_allclose(f, fftpack.fftfreq(8, 1.0)) + q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111, + 0.11111111, 0.11111111, 0.11111111, 0.07638889]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_complex(self): + x = np.zeros(16, np.complex128) + x[0] = 1.0 + 2.0j + x[8] = 1.0 + 2.0j + f, p = welch(x, nperseg=8, return_onesided=False) + assert_allclose(f, fftpack.fftfreq(8, 1.0)) + q = np.array([0.41666667, 0.38194444, 0.55555556, 0.55555556, + 0.55555556, 0.55555556, 0.55555556, 0.38194444]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_unk_scaling(self): + assert_raises(ValueError, welch, np.zeros(4, np.complex128), + scaling='foo', nperseg=4) + + def test_detrend_linear(self): + x = np.arange(10, dtype=np.float64) + 0.04 + f, p = welch(x, nperseg=10, detrend='linear') + assert_allclose(p, np.zeros_like(p), atol=1e-15) + + def test_no_detrending(self): + x = np.arange(10, dtype=np.float64) + 0.04 + f1, p1 = welch(x, nperseg=10, detrend=False) + f2, p2 = welch(x, nperseg=10, detrend=lambda x: x) + assert_allclose(f1, f2, atol=1e-15) + assert_allclose(p1, p2, atol=1e-15) + + def test_detrend_external(self): + x = np.arange(10, dtype=np.float64) + 0.04 + f, p = welch(x, nperseg=10, + detrend=lambda seg: signal.detrend(seg, type='l')) + assert_allclose(p, np.zeros_like(p), atol=1e-15) + + def test_detrend_external_nd_m1(self): + x = np.arange(40, dtype=np.float64) + 0.04 + x = x.reshape((2,2,10)) + f, p = welch(x, nperseg=10, + detrend=lambda seg: signal.detrend(seg, type='l')) + assert_allclose(p, np.zeros_like(p), atol=1e-15) + + def test_detrend_external_nd_0(self): + x = np.arange(20, dtype=np.float64) + 0.04 + x = x.reshape((2,1,10)) + x = np.rollaxis(x, 2, 0) + f, p = welch(x, nperseg=10, axis=0, + detrend=lambda seg: signal.detrend(seg, axis=0, type='l')) + assert_allclose(p, np.zeros_like(p), atol=1e-15) + + def test_nd_axis_m1(self): + x = np.arange(20, dtype=np.float64) + 0.04 + x = x.reshape((2,1,10)) + f, p = welch(x, nperseg=10) + assert_array_equal(p.shape, (2, 1, 6)) + assert_allclose(p[0,0,:], p[1,0,:], atol=1e-13, rtol=1e-13) + f0, p0 = welch(x[0,0,:], nperseg=10) + assert_allclose(p0[np.newaxis,:], p[1,:], atol=1e-13, rtol=1e-13) + + def test_nd_axis_0(self): + x = np.arange(20, dtype=np.float64) + 0.04 + x = x.reshape((10,2,1)) + f, p = welch(x, nperseg=10, axis=0) + assert_array_equal(p.shape, (6,2,1)) + assert_allclose(p[:,0,0], p[:,1,0], atol=1e-13, rtol=1e-13) + f0, p0 = welch(x[:,0,0], nperseg=10) + assert_allclose(p0, p[:,1,0], atol=1e-13, rtol=1e-13) + + def test_window_external(self): + x = np.zeros(16) + x[0] = 1 + x[8] = 1 + f, p = welch(x, 10, 'hann', nperseg=8) + win = signal.get_window('hann', 8) + fe, pe = welch(x, 10, win, nperseg=None) + assert_array_almost_equal_nulp(p, pe) + assert_array_almost_equal_nulp(f, fe) + assert_array_equal(fe.shape, (5,)) # because win length used as nperseg + assert_array_equal(pe.shape, (5,)) + assert_raises(ValueError, welch, x, + 10, win, nperseg=4) # because nperseg != win.shape[-1] + win_err = signal.get_window('hann', 32) + assert_raises(ValueError, welch, x, + 10, win_err, nperseg=None) # win longer than signal + + def test_empty_input(self): + f, p = welch([]) + assert_array_equal(f.shape, (0,)) + assert_array_equal(p.shape, (0,)) + for shape in [(0,), (3,0), (0,5,2)]: + f, p = welch(np.empty(shape)) + assert_array_equal(f.shape, shape) + assert_array_equal(p.shape, shape) + + def test_empty_input_other_axis(self): + for shape in [(3,0), (0,5,2)]: + f, p = welch(np.empty(shape), axis=1) + assert_array_equal(f.shape, shape) + assert_array_equal(p.shape, shape) + + def test_short_data(self): + x = np.zeros(8) + x[0] = 1 + #for string-like window, input signal length < nperseg value gives + #UserWarning, sets nperseg to x.shape[-1] + with suppress_warnings() as sup: + sup.filter(UserWarning, "nperseg = 256 is greater than input length = 8, using nperseg = 8") + f, p = welch(x,window='hann') # default nperseg + f1, p1 = welch(x,window='hann', nperseg=256) # user-specified nperseg + f2, p2 = welch(x, nperseg=8) # valid nperseg, doesn't give warning + assert_allclose(f, f2) + assert_allclose(p, p2) + assert_allclose(f1, f2) + assert_allclose(p1, p2) + + def test_window_long_or_nd(self): + assert_raises(ValueError, welch, np.zeros(4), 1, np.array([1,1,1,1,1])) + assert_raises(ValueError, welch, np.zeros(4), 1, + np.arange(6).reshape((2,3))) + + def test_nondefault_noverlap(self): + x = np.zeros(64) + x[::8] = 1 + f, p = welch(x, nperseg=16, noverlap=4) + q = np.array([0, 1./12., 1./3., 1./5., 1./3., 1./5., 1./3., 1./5., + 1./6.]) + assert_allclose(p, q, atol=1e-12) + + def test_bad_noverlap(self): + assert_raises(ValueError, welch, np.zeros(4), 1, 'hann', 2, 7) + + def test_nfft_too_short(self): + assert_raises(ValueError, welch, np.ones(12), nfft=3, nperseg=4) + + def test_real_onesided_even_32(self): + x = np.zeros(16, 'f') + x[0] = 1 + x[8] = 1 + f, p = welch(x, nperseg=8) + assert_allclose(f, np.linspace(0, 0.5, 5)) + q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222, + 0.11111111], 'f') + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + assert_(p.dtype == q.dtype) + + def test_real_onesided_odd_32(self): + x = np.zeros(16, 'f') + x[0] = 1 + x[8] = 1 + f, p = welch(x, nperseg=9) + assert_allclose(f, np.arange(5.0)/9.0) + q = np.array([0.12477458, 0.23430935, 0.17072113, 0.17072116, + 0.17072113], 'f') + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + assert_(p.dtype == q.dtype) + + def test_real_twosided_32(self): + x = np.zeros(16, 'f') + x[0] = 1 + x[8] = 1 + f, p = welch(x, nperseg=8, return_onesided=False) + assert_allclose(f, fftpack.fftfreq(8, 1.0)) + q = np.array([0.08333333, 0.07638889, 0.11111111, + 0.11111111, 0.11111111, 0.11111111, 0.11111111, + 0.07638889], 'f') + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + assert_(p.dtype == q.dtype) + + def test_complex_32(self): + x = np.zeros(16, 'F') + x[0] = 1.0 + 2.0j + x[8] = 1.0 + 2.0j + f, p = welch(x, nperseg=8, return_onesided=False) + assert_allclose(f, fftpack.fftfreq(8, 1.0)) + q = np.array([0.41666666, 0.38194442, 0.55555552, 0.55555552, + 0.55555558, 0.55555552, 0.55555552, 0.38194442], 'f') + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + assert_(p.dtype == q.dtype, + 'dtype mismatch, %s, %s' % (p.dtype, q.dtype)) + + def test_padded_freqs(self): + x = np.zeros(12) + + nfft = 24 + f = fftpack.fftfreq(nfft, 1.0)[:nfft//2+1] + f[-1] *= -1 + fodd, _ = welch(x, nperseg=5, nfft=nfft) + feven, _ = welch(x, nperseg=6, nfft=nfft) + assert_allclose(f, fodd) + assert_allclose(f, feven) + + nfft = 25 + f = fftpack.fftfreq(nfft, 1.0)[:(nfft + 1)//2] + fodd, _ = welch(x, nperseg=5, nfft=nfft) + feven, _ = welch(x, nperseg=6, nfft=nfft) + assert_allclose(f, fodd) + assert_allclose(f, feven) + + def test_window_correction(self): + A = 20 + fs = 1e4 + nperseg = int(fs//10) + fsig = 300 + ii = int(fsig*nperseg//fs) # Freq index of fsig + + tt = np.arange(fs)/fs + x = A*np.sin(2*np.pi*fsig*tt) + + for window in ['hann', 'bartlett', ('tukey', 0.1), 'flattop']: + _, p_spec = welch(x, fs=fs, nperseg=nperseg, window=window, + scaling='spectrum') + freq, p_dens = welch(x, fs=fs, nperseg=nperseg, window=window, + scaling='density') + + # Check peak height at signal frequency for 'spectrum' + assert_allclose(p_spec[ii], A**2/2.0) + # Check integrated spectrum RMS for 'density' + assert_allclose(np.sqrt(np.trapz(p_dens, freq)), A*np.sqrt(2)/2, + rtol=1e-3) + + def test_axis_rolling(self): + np.random.seed(1234) + + x_flat = np.random.randn(1024) + _, p_flat = welch(x_flat) + + for a in range(3): + newshape = [1,]*3 + newshape[a] = -1 + x = x_flat.reshape(newshape) + + _, p_plus = welch(x, axis=a) # Positive axis index + _, p_minus = welch(x, axis=a-x.ndim) # Negative axis index + + assert_equal(p_flat, p_plus.squeeze(), err_msg=a) + assert_equal(p_flat, p_minus.squeeze(), err_msg=a-x.ndim) + + def test_average(self): + x = np.zeros(16) + x[0] = 1 + x[8] = 1 + f, p = welch(x, nperseg=8, average='median') + assert_allclose(f, np.linspace(0, 0.5, 5)) + q = np.array([.1, .05, 0., 1.54074396e-33, 0.]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + assert_raises(ValueError, welch, x, nperseg=8, + average='unrecognised-average') + + +class TestCSD: + def test_pad_shorter_x(self): + x = np.zeros(8) + y = np.zeros(12) + + f = np.linspace(0, 0.5, 7) + c = np.zeros(7,dtype=np.complex128) + f1, c1 = csd(x, y, nperseg=12) + + assert_allclose(f, f1) + assert_allclose(c, c1) + + def test_pad_shorter_y(self): + x = np.zeros(12) + y = np.zeros(8) + + f = np.linspace(0, 0.5, 7) + c = np.zeros(7,dtype=np.complex128) + f1, c1 = csd(x, y, nperseg=12) + + assert_allclose(f, f1) + assert_allclose(c, c1) + + def test_real_onesided_even(self): + x = np.zeros(16) + x[0] = 1 + x[8] = 1 + f, p = csd(x, x, nperseg=8) + assert_allclose(f, np.linspace(0, 0.5, 5)) + q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222, + 0.11111111]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_real_onesided_odd(self): + x = np.zeros(16) + x[0] = 1 + x[8] = 1 + f, p = csd(x, x, nperseg=9) + assert_allclose(f, np.arange(5.0)/9.0) + q = np.array([0.12477455, 0.23430933, 0.17072113, 0.17072113, + 0.17072113]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_real_twosided(self): + x = np.zeros(16) + x[0] = 1 + x[8] = 1 + f, p = csd(x, x, nperseg=8, return_onesided=False) + assert_allclose(f, fftpack.fftfreq(8, 1.0)) + q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111, + 0.11111111, 0.11111111, 0.11111111, 0.07638889]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_real_spectrum(self): + x = np.zeros(16) + x[0] = 1 + x[8] = 1 + f, p = csd(x, x, nperseg=8, scaling='spectrum') + assert_allclose(f, np.linspace(0, 0.5, 5)) + q = np.array([0.015625, 0.02864583, 0.04166667, 0.04166667, + 0.02083333]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_integer_onesided_even(self): + x = np.zeros(16, dtype=int) + x[0] = 1 + x[8] = 1 + f, p = csd(x, x, nperseg=8) + assert_allclose(f, np.linspace(0, 0.5, 5)) + q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222, + 0.11111111]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_integer_onesided_odd(self): + x = np.zeros(16, dtype=int) + x[0] = 1 + x[8] = 1 + f, p = csd(x, x, nperseg=9) + assert_allclose(f, np.arange(5.0)/9.0) + q = np.array([0.12477455, 0.23430933, 0.17072113, 0.17072113, + 0.17072113]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_integer_twosided(self): + x = np.zeros(16, dtype=int) + x[0] = 1 + x[8] = 1 + f, p = csd(x, x, nperseg=8, return_onesided=False) + assert_allclose(f, fftpack.fftfreq(8, 1.0)) + q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111, + 0.11111111, 0.11111111, 0.11111111, 0.07638889]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_complex(self): + x = np.zeros(16, np.complex128) + x[0] = 1.0 + 2.0j + x[8] = 1.0 + 2.0j + f, p = csd(x, x, nperseg=8, return_onesided=False) + assert_allclose(f, fftpack.fftfreq(8, 1.0)) + q = np.array([0.41666667, 0.38194444, 0.55555556, 0.55555556, + 0.55555556, 0.55555556, 0.55555556, 0.38194444]) + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + + def test_unk_scaling(self): + assert_raises(ValueError, csd, np.zeros(4, np.complex128), + np.ones(4, np.complex128), scaling='foo', nperseg=4) + + def test_detrend_linear(self): + x = np.arange(10, dtype=np.float64) + 0.04 + f, p = csd(x, x, nperseg=10, detrend='linear') + assert_allclose(p, np.zeros_like(p), atol=1e-15) + + def test_no_detrending(self): + x = np.arange(10, dtype=np.float64) + 0.04 + f1, p1 = csd(x, x, nperseg=10, detrend=False) + f2, p2 = csd(x, x, nperseg=10, detrend=lambda x: x) + assert_allclose(f1, f2, atol=1e-15) + assert_allclose(p1, p2, atol=1e-15) + + def test_detrend_external(self): + x = np.arange(10, dtype=np.float64) + 0.04 + f, p = csd(x, x, nperseg=10, + detrend=lambda seg: signal.detrend(seg, type='l')) + assert_allclose(p, np.zeros_like(p), atol=1e-15) + + def test_detrend_external_nd_m1(self): + x = np.arange(40, dtype=np.float64) + 0.04 + x = x.reshape((2,2,10)) + f, p = csd(x, x, nperseg=10, + detrend=lambda seg: signal.detrend(seg, type='l')) + assert_allclose(p, np.zeros_like(p), atol=1e-15) + + def test_detrend_external_nd_0(self): + x = np.arange(20, dtype=np.float64) + 0.04 + x = x.reshape((2,1,10)) + x = np.rollaxis(x, 2, 0) + f, p = csd(x, x, nperseg=10, axis=0, + detrend=lambda seg: signal.detrend(seg, axis=0, type='l')) + assert_allclose(p, np.zeros_like(p), atol=1e-15) + + def test_nd_axis_m1(self): + x = np.arange(20, dtype=np.float64) + 0.04 + x = x.reshape((2,1,10)) + f, p = csd(x, x, nperseg=10) + assert_array_equal(p.shape, (2, 1, 6)) + assert_allclose(p[0,0,:], p[1,0,:], atol=1e-13, rtol=1e-13) + f0, p0 = csd(x[0,0,:], x[0,0,:], nperseg=10) + assert_allclose(p0[np.newaxis,:], p[1,:], atol=1e-13, rtol=1e-13) + + def test_nd_axis_0(self): + x = np.arange(20, dtype=np.float64) + 0.04 + x = x.reshape((10,2,1)) + f, p = csd(x, x, nperseg=10, axis=0) + assert_array_equal(p.shape, (6,2,1)) + assert_allclose(p[:,0,0], p[:,1,0], atol=1e-13, rtol=1e-13) + f0, p0 = csd(x[:,0,0], x[:,0,0], nperseg=10) + assert_allclose(p0, p[:,1,0], atol=1e-13, rtol=1e-13) + + def test_window_external(self): + x = np.zeros(16) + x[0] = 1 + x[8] = 1 + f, p = csd(x, x, 10, 'hann', 8) + win = signal.get_window('hann', 8) + fe, pe = csd(x, x, 10, win, nperseg=None) + assert_array_almost_equal_nulp(p, pe) + assert_array_almost_equal_nulp(f, fe) + assert_array_equal(fe.shape, (5,)) # because win length used as nperseg + assert_array_equal(pe.shape, (5,)) + assert_raises(ValueError, csd, x, x, + 10, win, nperseg=256) # because nperseg != win.shape[-1] + win_err = signal.get_window('hann', 32) + assert_raises(ValueError, csd, x, x, + 10, win_err, nperseg=None) # because win longer than signal + + def test_empty_input(self): + f, p = csd([],np.zeros(10)) + assert_array_equal(f.shape, (0,)) + assert_array_equal(p.shape, (0,)) + + f, p = csd(np.zeros(10),[]) + assert_array_equal(f.shape, (0,)) + assert_array_equal(p.shape, (0,)) + + for shape in [(0,), (3,0), (0,5,2)]: + f, p = csd(np.empty(shape), np.empty(shape)) + assert_array_equal(f.shape, shape) + assert_array_equal(p.shape, shape) + + f, p = csd(np.ones(10), np.empty((5,0))) + assert_array_equal(f.shape, (5,0)) + assert_array_equal(p.shape, (5,0)) + + f, p = csd(np.empty((5,0)), np.ones(10)) + assert_array_equal(f.shape, (5,0)) + assert_array_equal(p.shape, (5,0)) + + def test_empty_input_other_axis(self): + for shape in [(3,0), (0,5,2)]: + f, p = csd(np.empty(shape), np.empty(shape), axis=1) + assert_array_equal(f.shape, shape) + assert_array_equal(p.shape, shape) + + f, p = csd(np.empty((10,10,3)), np.zeros((10,0,1)), axis=1) + assert_array_equal(f.shape, (10,0,3)) + assert_array_equal(p.shape, (10,0,3)) + + f, p = csd(np.empty((10,0,1)), np.zeros((10,10,3)), axis=1) + assert_array_equal(f.shape, (10,0,3)) + assert_array_equal(p.shape, (10,0,3)) + + def test_short_data(self): + x = np.zeros(8) + x[0] = 1 + + #for string-like window, input signal length < nperseg value gives + #UserWarning, sets nperseg to x.shape[-1] + with suppress_warnings() as sup: + sup.filter(UserWarning, "nperseg = 256 is greater than input length = 8, using nperseg = 8") + f, p = csd(x, x, window='hann') # default nperseg + f1, p1 = csd(x, x, window='hann', nperseg=256) # user-specified nperseg + f2, p2 = csd(x, x, nperseg=8) # valid nperseg, doesn't give warning + assert_allclose(f, f2) + assert_allclose(p, p2) + assert_allclose(f1, f2) + assert_allclose(p1, p2) + + def test_window_long_or_nd(self): + assert_raises(ValueError, csd, np.zeros(4), np.ones(4), 1, + np.array([1,1,1,1,1])) + assert_raises(ValueError, csd, np.zeros(4), np.ones(4), 1, + np.arange(6).reshape((2,3))) + + def test_nondefault_noverlap(self): + x = np.zeros(64) + x[::8] = 1 + f, p = csd(x, x, nperseg=16, noverlap=4) + q = np.array([0, 1./12., 1./3., 1./5., 1./3., 1./5., 1./3., 1./5., + 1./6.]) + assert_allclose(p, q, atol=1e-12) + + def test_bad_noverlap(self): + assert_raises(ValueError, csd, np.zeros(4), np.ones(4), 1, 'hann', + 2, 7) + + def test_nfft_too_short(self): + assert_raises(ValueError, csd, np.ones(12), np.zeros(12), nfft=3, + nperseg=4) + + def test_real_onesided_even_32(self): + x = np.zeros(16, 'f') + x[0] = 1 + x[8] = 1 + f, p = csd(x, x, nperseg=8) + assert_allclose(f, np.linspace(0, 0.5, 5)) + q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222, + 0.11111111], 'f') + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + assert_(p.dtype == q.dtype) + + def test_real_onesided_odd_32(self): + x = np.zeros(16, 'f') + x[0] = 1 + x[8] = 1 + f, p = csd(x, x, nperseg=9) + assert_allclose(f, np.arange(5.0)/9.0) + q = np.array([0.12477458, 0.23430935, 0.17072113, 0.17072116, + 0.17072113], 'f') + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + assert_(p.dtype == q.dtype) + + def test_real_twosided_32(self): + x = np.zeros(16, 'f') + x[0] = 1 + x[8] = 1 + f, p = csd(x, x, nperseg=8, return_onesided=False) + assert_allclose(f, fftpack.fftfreq(8, 1.0)) + q = np.array([0.08333333, 0.07638889, 0.11111111, + 0.11111111, 0.11111111, 0.11111111, 0.11111111, + 0.07638889], 'f') + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + assert_(p.dtype == q.dtype) + + def test_complex_32(self): + x = np.zeros(16, 'F') + x[0] = 1.0 + 2.0j + x[8] = 1.0 + 2.0j + f, p = csd(x, x, nperseg=8, return_onesided=False) + assert_allclose(f, fftpack.fftfreq(8, 1.0)) + q = np.array([0.41666666, 0.38194442, 0.55555552, 0.55555552, + 0.55555558, 0.55555552, 0.55555552, 0.38194442], 'f') + assert_allclose(p, q, atol=1e-7, rtol=1e-7) + assert_(p.dtype == q.dtype, + 'dtype mismatch, %s, %s' % (p.dtype, q.dtype)) + + def test_padded_freqs(self): + x = np.zeros(12) + y = np.ones(12) + + nfft = 24 + f = fftpack.fftfreq(nfft, 1.0)[:nfft//2+1] + f[-1] *= -1 + fodd, _ = csd(x, y, nperseg=5, nfft=nfft) + feven, _ = csd(x, y, nperseg=6, nfft=nfft) + assert_allclose(f, fodd) + assert_allclose(f, feven) + + nfft = 25 + f = fftpack.fftfreq(nfft, 1.0)[:(nfft + 1)//2] + fodd, _ = csd(x, y, nperseg=5, nfft=nfft) + feven, _ = csd(x, y, nperseg=6, nfft=nfft) + assert_allclose(f, fodd) + assert_allclose(f, feven) + +class TestCoherence(object): + def test_identical_input(self): + x = np.random.randn(20) + y = np.copy(x) # So `y is x` -> False + + f = np.linspace(0, 0.5, 6) + C = np.ones(6) + f1, C1 = coherence(x, y, nperseg=10) + + assert_allclose(f, f1) + assert_allclose(C, C1) + + def test_phase_shifted_input(self): + x = np.random.randn(20) + y = -x + + f = np.linspace(0, 0.5, 6) + C = np.ones(6) + f1, C1 = coherence(x, y, nperseg=10) + + assert_allclose(f, f1) + assert_allclose(C, C1) + + +class TestSpectrogram(object): + def test_average_all_segments(self): + x = np.random.randn(1024) + + fs = 1.0 + window = ('tukey', 0.25) + nperseg = 16 + noverlap = 2 + + f, _, P = spectrogram(x, fs, window, nperseg, noverlap) + fw, Pw = welch(x, fs, window, nperseg, noverlap) + assert_allclose(f, fw) + assert_allclose(np.mean(P, axis=-1), Pw) + + def test_window_external(self): + x = np.random.randn(1024) + + fs = 1.0 + window = ('tukey', 0.25) + nperseg = 16 + noverlap = 2 + f, _, P = spectrogram(x, fs, window, nperseg, noverlap) + + win = signal.get_window(('tukey', 0.25), 16) + fe, _, Pe = spectrogram(x, fs, win, nperseg=None, noverlap=2) + assert_array_equal(fe.shape, (9,)) # because win length used as nperseg + assert_array_equal(Pe.shape, (9,73)) + assert_raises(ValueError, spectrogram, x, + fs, win, nperseg=8) # because nperseg != win.shape[-1] + win_err = signal.get_window(('tukey', 0.25), 2048) + assert_raises(ValueError, spectrogram, x, + fs, win_err, nperseg=None) # win longer than signal + + def test_short_data(self): + x = np.random.randn(1024) + fs = 1.0 + + #for string-like window, input signal length < nperseg value gives + #UserWarning, sets nperseg to x.shape[-1] + f, _, p = spectrogram(x, fs, window=('tukey',0.25)) # default nperseg + with suppress_warnings() as sup: + sup.filter(UserWarning, + "nperseg = 1025 is greater than input length = 1024, using nperseg = 1024") + f1, _, p1 = spectrogram(x, fs, window=('tukey',0.25), + nperseg=1025) # user-specified nperseg + f2, _, p2 = spectrogram(x, fs, nperseg=256) # to compare w/default + f3, _, p3 = spectrogram(x, fs, nperseg=1024) # compare w/user-spec'd + assert_allclose(f, f2) + assert_allclose(p, p2) + assert_allclose(f1, f3) + assert_allclose(p1, p3) + +class TestLombscargle(object): + def test_frequency(self): + """Test if frequency location of peak corresponds to frequency of + generated input signal. + """ + + # Input parameters + ampl = 2. + w = 1. + phi = 0.5 * np.pi + nin = 100 + nout = 1000 + p = 0.7 # Fraction of points to select + + # Randomly select a fraction of an array with timesteps + np.random.seed(2353425) + r = np.random.rand(nin) + t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p] + + # Plot a sine wave for the selected times + x = ampl * np.sin(w*t + phi) + + # Define the array of frequencies for which to compute the periodogram + f = np.linspace(0.01, 10., nout) + + # Calculate Lomb-Scargle periodogram + P = lombscargle(t, x, f) + + # Check if difference between found frequency maximum and input + # frequency is less than accuracy + delta = f[1] - f[0] + assert_(w - f[np.argmax(P)] < (delta/2.)) + + def test_amplitude(self): + # Test if height of peak in normalized Lomb-Scargle periodogram + # corresponds to amplitude of the generated input signal. + + # Input parameters + ampl = 2. + w = 1. + phi = 0.5 * np.pi + nin = 100 + nout = 1000 + p = 0.7 # Fraction of points to select + + # Randomly select a fraction of an array with timesteps + np.random.seed(2353425) + r = np.random.rand(nin) + t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p] + + # Plot a sine wave for the selected times + x = ampl * np.sin(w*t + phi) + + # Define the array of frequencies for which to compute the periodogram + f = np.linspace(0.01, 10., nout) + + # Calculate Lomb-Scargle periodogram + pgram = lombscargle(t, x, f) + + # Normalize + pgram = np.sqrt(4 * pgram / t.shape[0]) + + # Check if difference between found frequency maximum and input + # frequency is less than accuracy + assert_approx_equal(np.max(pgram), ampl, significant=2) + + def test_precenter(self): + # Test if precenter gives the same result as manually precentering. + + # Input parameters + ampl = 2. + w = 1. + phi = 0.5 * np.pi + nin = 100 + nout = 1000 + p = 0.7 # Fraction of points to select + offset = 0.15 # Offset to be subtracted in pre-centering + + # Randomly select a fraction of an array with timesteps + np.random.seed(2353425) + r = np.random.rand(nin) + t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p] + + # Plot a sine wave for the selected times + x = ampl * np.sin(w*t + phi) + offset + + # Define the array of frequencies for which to compute the periodogram + f = np.linspace(0.01, 10., nout) + + # Calculate Lomb-Scargle periodogram + pgram = lombscargle(t, x, f, precenter=True) + pgram2 = lombscargle(t, x - x.mean(), f, precenter=False) + + # check if centering worked + assert_allclose(pgram, pgram2) + + def test_normalize(self): + # Test normalize option of Lomb-Scarge. + + # Input parameters + ampl = 2. + w = 1. + phi = 0.5 * np.pi + nin = 100 + nout = 1000 + p = 0.7 # Fraction of points to select + + # Randomly select a fraction of an array with timesteps + np.random.seed(2353425) + r = np.random.rand(nin) + t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p] + + # Plot a sine wave for the selected times + x = ampl * np.sin(w*t + phi) + + # Define the array of frequencies for which to compute the periodogram + f = np.linspace(0.01, 10., nout) + + # Calculate Lomb-Scargle periodogram + pgram = lombscargle(t, x, f) + pgram2 = lombscargle(t, x, f, normalize=True) + + # check if normalization works as expected + assert_allclose(pgram * 2 / np.dot(x, x), pgram2) + assert_approx_equal(np.max(pgram2), 1.0, significant=2) + + def test_wrong_shape(self): + t = np.linspace(0, 1, 1) + x = np.linspace(0, 1, 2) + f = np.linspace(0, 1, 3) + assert_raises(ValueError, lombscargle, t, x, f) + + def test_zero_division(self): + t = np.zeros(1) + x = np.zeros(1) + f = np.zeros(1) + assert_raises(ZeroDivisionError, lombscargle, t, x, f) + + def test_lombscargle_atan_vs_atan2(self): + # https://github.com/scipy/scipy/issues/3787 + # This raised a ZeroDivisionError. + t = np.linspace(0, 10, 1000, endpoint=False) + x = np.sin(4*t) + f = np.linspace(0, 50, 500, endpoint=False) + 0.1 + q = lombscargle(t, x, f*2*np.pi) + + +class TestSTFT(object): + def test_input_validation(self): + assert_raises(ValueError, check_COLA, 'hann', -10, 0) + assert_raises(ValueError, check_COLA, 'hann', 10, 20) + assert_raises(ValueError, check_COLA, np.ones((2,2)), 10, 0) + assert_raises(ValueError, check_COLA, np.ones(20), 10, 0) + + assert_raises(ValueError, check_NOLA, 'hann', -10, 0) + assert_raises(ValueError, check_NOLA, 'hann', 10, 20) + assert_raises(ValueError, check_NOLA, np.ones((2,2)), 10, 0) + assert_raises(ValueError, check_NOLA, np.ones(20), 10, 0) + assert_raises(ValueError, check_NOLA, 'hann', 64, -32) + + x = np.empty(1024) + z = stft(x) + + assert_raises(ValueError, stft, x, window=np.ones((2,2))) + assert_raises(ValueError, stft, x, window=np.ones(10), nperseg=256) + assert_raises(ValueError, stft, x, nperseg=-256) + assert_raises(ValueError, stft, x, nperseg=256, noverlap=1024) + assert_raises(ValueError, stft, x, nperseg=256, nfft=8) + + assert_raises(ValueError, istft, x) # Not 2d + assert_raises(ValueError, istft, z, window=np.ones((2,2))) + assert_raises(ValueError, istft, z, window=np.ones(10), nperseg=256) + assert_raises(ValueError, istft, z, nperseg=-256) + assert_raises(ValueError, istft, z, nperseg=256, noverlap=1024) + assert_raises(ValueError, istft, z, nperseg=256, nfft=8) + assert_raises(ValueError, istft, z, nperseg=256, noverlap=0, + window='hann') # Doesn't meet COLA + assert_raises(ValueError, istft, z, time_axis=0, freq_axis=0) + + assert_raises(ValueError, _spectral_helper, x, x, mode='foo') + assert_raises(ValueError, _spectral_helper, x[:512], x[512:], + mode='stft') + assert_raises(ValueError, _spectral_helper, x, x, boundary='foo') + + def test_check_COLA(self): + settings = [ + ('boxcar', 10, 0), + ('boxcar', 10, 9), + ('bartlett', 51, 26), + ('hann', 256, 128), + ('hann', 256, 192), + ('blackman', 300, 200), + (('tukey', 0.5), 256, 64), + ('hann', 256, 255), + ] + + for setting in settings: + msg = '{0}, {1}, {2}'.format(*setting) + assert_equal(True, check_COLA(*setting), err_msg=msg) + + def test_check_NOLA(self): + settings_pass = [ + ('boxcar', 10, 0), + ('boxcar', 10, 9), + ('boxcar', 10, 7), + ('bartlett', 51, 26), + ('bartlett', 51, 10), + ('hann', 256, 128), + ('hann', 256, 192), + ('hann', 256, 37), + ('blackman', 300, 200), + ('blackman', 300, 123), + (('tukey', 0.5), 256, 64), + (('tukey', 0.5), 256, 38), + ('hann', 256, 255), + ('hann', 256, 39), + ] + for setting in settings_pass: + msg = '{0}, {1}, {2}'.format(*setting) + assert_equal(True, check_NOLA(*setting), err_msg=msg) + + w_fail = np.ones(16) + w_fail[::2] = 0 + settings_fail = [ + (w_fail, len(w_fail), len(w_fail) // 2), + ('hann', 64, 0), + ] + for setting in settings_fail: + msg = '{0}, {1}, {2}'.format(*setting) + assert_equal(False, check_NOLA(*setting), err_msg=msg) + + def test_average_all_segments(self): + np.random.seed(1234) + x = np.random.randn(1024) + + fs = 1.0 + window = 'hann' + nperseg = 16 + noverlap = 8 + + # Compare twosided, because onesided welch doubles non-DC terms to + # account for power at negative frequencies. stft doesn't do this, + # because it breaks invertibility. + f, _, Z = stft(x, fs, window, nperseg, noverlap, padded=False, + return_onesided=False, boundary=None) + fw, Pw = welch(x, fs, window, nperseg, noverlap, return_onesided=False, + scaling='spectrum', detrend=False) + + assert_allclose(f, fw) + assert_allclose(np.mean(np.abs(Z)**2, axis=-1), Pw) + + def test_permute_axes(self): + np.random.seed(1234) + x = np.random.randn(1024) + + fs = 1.0 + window = 'hann' + nperseg = 16 + noverlap = 8 + + f1, t1, Z1 = stft(x, fs, window, nperseg, noverlap) + f2, t2, Z2 = stft(x.reshape((-1, 1, 1)), fs, window, nperseg, noverlap, + axis=0) + + t3, x1 = istft(Z1, fs, window, nperseg, noverlap) + t4, x2 = istft(Z2.T, fs, window, nperseg, noverlap, time_axis=0, + freq_axis=-1) + + assert_allclose(f1, f2) + assert_allclose(t1, t2) + assert_allclose(t3, t4) + assert_allclose(Z1, Z2[:, 0, 0, :]) + assert_allclose(x1, x2[:, 0, 0]) + + def test_roundtrip_real(self): + np.random.seed(1234) + + settings = [ + ('boxcar', 100, 10, 0), # Test no overlap + ('boxcar', 100, 10, 9), # Test high overlap + ('bartlett', 101, 51, 26), # Test odd nperseg + ('hann', 1024, 256, 128), # Test defaults + (('tukey', 0.5), 1152, 256, 64), # Test Tukey + ('hann', 1024, 256, 255), # Test overlapped hann + ] + + for window, N, nperseg, noverlap in settings: + t = np.arange(N) + x = 10*np.random.randn(t.size) + + _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap, + window=window, detrend=None, padded=False) + + tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap, + window=window) + + msg = '{0}, {1}'.format(window, noverlap) + assert_allclose(t, tr, err_msg=msg) + assert_allclose(x, xr, err_msg=msg) + + def test_roundtrip_not_nola(self): + np.random.seed(1234) + + w_fail = np.ones(16) + w_fail[::2] = 0 + settings = [ + (w_fail, 256, len(w_fail), len(w_fail) // 2), + ('hann', 256, 64, 0), + ] + + for window, N, nperseg, noverlap in settings: + msg = '{0}, {1}, {2}, {3}'.format(window, N, nperseg, noverlap) + assert not check_NOLA(window, nperseg, noverlap), msg + + t = np.arange(N) + x = 10 * np.random.randn(t.size) + + _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap, + window=window, detrend=None, padded=True, + boundary='zeros') + with pytest.warns(UserWarning, match='NOLA'): + tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap, + window=window, boundary=True) + + assert np.allclose(t, tr[:len(t)]), msg + assert not np.allclose(x, xr[:len(x)]), msg + + def test_roundtrip_nola_not_cola(self): + np.random.seed(1234) + + settings = [ + ('boxcar', 100, 10, 3), # NOLA True, COLA False + ('bartlett', 101, 51, 37), # NOLA True, COLA False + ('hann', 1024, 256, 127), # NOLA True, COLA False + (('tukey', 0.5), 1152, 256, 14), # NOLA True, COLA False + ('hann', 1024, 256, 5), # NOLA True, COLA False + ] + + for window, N, nperseg, noverlap in settings: + msg = '{0}, {1}, {2}'.format(window, nperseg, noverlap) + assert check_NOLA(window, nperseg, noverlap), msg + assert not check_COLA(window, nperseg, noverlap), msg + + t = np.arange(N) + x = 10 * np.random.randn(t.size) + + _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap, + window=window, detrend=None, padded=True, + boundary='zeros') + + tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap, + window=window, boundary=True) + + msg = '{0}, {1}'.format(window, noverlap) + assert_allclose(t, tr[:len(t)], err_msg=msg) + assert_allclose(x, xr[:len(x)], err_msg=msg) + + @pytest.mark.xfail(reason="Needs complex rfft from fftpack, see gh-2487 + gh-6058") + def test_roundtrip_float32(self): + np.random.seed(1234) + + settings = [('hann', 1024, 256, 128)] + + for window, N, nperseg, noverlap in settings: + t = np.arange(N) + x = 10*np.random.randn(t.size) + x = x.astype(np.float32) + + _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap, + window=window, detrend=None, padded=False) + + tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap, + window=window) + + msg = '{0}, {1}'.format(window, noverlap) + assert_allclose(t, t, err_msg=msg) + assert_allclose(x, xr, err_msg=msg, rtol=1e-4) + assert_(x.dtype == xr.dtype) + + def test_roundtrip_complex(self): + np.random.seed(1234) + + settings = [ + ('boxcar', 100, 10, 0), # Test no overlap + ('boxcar', 100, 10, 9), # Test high overlap + ('bartlett', 101, 51, 26), # Test odd nperseg + ('hann', 1024, 256, 128), # Test defaults + (('tukey', 0.5), 1152, 256, 64), # Test Tukey + ('hann', 1024, 256, 255), # Test overlapped hann + ] + + for window, N, nperseg, noverlap in settings: + t = np.arange(N) + x = 10*np.random.randn(t.size) + 10j*np.random.randn(t.size) + + _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap, + window=window, detrend=None, padded=False, + return_onesided=False) + + tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap, + window=window, input_onesided=False) + + msg = '{0}, {1}, {2}'.format(window, nperseg, noverlap) + assert_allclose(t, tr, err_msg=msg) + assert_allclose(x, xr, err_msg=msg) + + # Check that asking for onesided switches to twosided + with suppress_warnings() as sup: + sup.filter(UserWarning, + "Input data is complex, switching to return_onesided=False") + _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap, + window=window, detrend=None, padded=False, + return_onesided=True) + + tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap, + window=window, input_onesided=False) + + msg = '{0}, {1}, {2}'.format(window, nperseg, noverlap) + assert_allclose(t, tr, err_msg=msg) + assert_allclose(x, xr, err_msg=msg) + + def test_roundtrip_boundary_extension(self): + np.random.seed(1234) + + # Test against boxcar, since window is all ones, and thus can be fully + # recovered with no boundary extension + + settings = [ + ('boxcar', 100, 10, 0), # Test no overlap + ('boxcar', 100, 10, 9), # Test high overlap + ] + + for window, N, nperseg, noverlap in settings: + t = np.arange(N) + x = 10*np.random.randn(t.size) + + _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap, + window=window, detrend=None, padded=True, + boundary=None) + + _, xr = istft(zz, noverlap=noverlap, window=window, boundary=False) + + for boundary in ['even', 'odd', 'constant', 'zeros']: + _, _, zz_ext = stft(x, nperseg=nperseg, noverlap=noverlap, + window=window, detrend=None, padded=True, + boundary=boundary) + + _, xr_ext = istft(zz_ext, noverlap=noverlap, window=window, + boundary=True) + + msg = '{0}, {1}, {2}'.format(window, noverlap, boundary) + assert_allclose(x, xr, err_msg=msg) + assert_allclose(x, xr_ext, err_msg=msg) + + def test_roundtrip_padded_signal(self): + np.random.seed(1234) + + settings = [ + ('boxcar', 101, 10, 0), + ('hann', 1000, 256, 128), + ] + + for window, N, nperseg, noverlap in settings: + t = np.arange(N) + x = 10*np.random.randn(t.size) + + _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap, + window=window, detrend=None, padded=True) + + tr, xr = istft(zz, noverlap=noverlap, window=window) + + msg = '{0}, {1}'.format(window, noverlap) + # Account for possible zero-padding at the end + assert_allclose(t, tr[:t.size], err_msg=msg) + assert_allclose(x, xr[:x.size], err_msg=msg) + + def test_roundtrip_padded_FFT(self): + np.random.seed(1234) + + settings = [ + ('hann', 1024, 256, 128, 512), + ('hann', 1024, 256, 128, 501), + ('boxcar', 100, 10, 0, 33), + (('tukey', 0.5), 1152, 256, 64, 1024), + ] + + for window, N, nperseg, noverlap, nfft in settings: + t = np.arange(N) + x = 10*np.random.randn(t.size) + xc = x*np.exp(1j*np.pi/4) + + # real signal + _, _, z = stft(x, nperseg=nperseg, noverlap=noverlap, nfft=nfft, + window=window, detrend=None, padded=True) + + # complex signal + _, _, zc = stft(xc, nperseg=nperseg, noverlap=noverlap, nfft=nfft, + window=window, detrend=None, padded=True, + return_onesided=False) + + tr, xr = istft(z, nperseg=nperseg, noverlap=noverlap, nfft=nfft, + window=window) + + tr, xcr = istft(zc, nperseg=nperseg, noverlap=noverlap, nfft=nfft, + window=window, input_onesided=False) + + msg = '{0}, {1}'.format(window, noverlap) + assert_allclose(t, tr, err_msg=msg) + assert_allclose(x, xr, err_msg=msg) + assert_allclose(xc, xcr, err_msg=msg) + + def test_axis_rolling(self): + np.random.seed(1234) + + x_flat = np.random.randn(1024) + _, _, z_flat = stft(x_flat) + + for a in range(3): + newshape = [1,]*3 + newshape[a] = -1 + x = x_flat.reshape(newshape) + + _, _, z_plus = stft(x, axis=a) # Positive axis index + _, _, z_minus = stft(x, axis=a-x.ndim) # Negative axis index + + assert_equal(z_flat, z_plus.squeeze(), err_msg=a) + assert_equal(z_flat, z_minus.squeeze(), err_msg=a-x.ndim) + + # z_flat has shape [n_freq, n_time] + + # Test vs. transpose + _, x_transpose_m = istft(z_flat.T, time_axis=-2, freq_axis=-1) + _, x_transpose_p = istft(z_flat.T, time_axis=0, freq_axis=1) + + assert_allclose(x_flat, x_transpose_m, err_msg='istft transpose minus') + assert_allclose(x_flat, x_transpose_p, err_msg='istft transpose plus') diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_spectral.pyc b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_spectral.pyc new file mode 100644 index 0000000..f9bf64d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_spectral.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_upfirdn.py b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_upfirdn.py new file mode 100644 index 0000000..e9549ee --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_upfirdn.py @@ -0,0 +1,175 @@ +# Code adapted from "upfirdn" python library with permission: +# +# Copyright (c) 2009, Motorola, Inc +# +# All Rights Reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# * Neither the name of Motorola nor the names of its contributors may be +# used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +import numpy as np +from itertools import product + +from numpy.testing import assert_equal, assert_allclose +from pytest import raises as assert_raises + +from scipy.signal import upfirdn, firwin, lfilter +from scipy.signal._upfirdn import _output_len + + +def upfirdn_naive(x, h, up=1, down=1): + """Naive upfirdn processing in Python + + Note: arg order (x, h) differs to facilitate apply_along_axis use. + """ + h = np.asarray(h) + out = np.zeros(len(x) * up, x.dtype) + out[::up] = x + out = np.convolve(h, out)[::down][:_output_len(len(h), len(x), up, down)] + return out + + +class UpFIRDnCase(object): + """Test _UpFIRDn object""" + def __init__(self, up, down, h, x_dtype): + self.up = up + self.down = down + self.h = np.atleast_1d(h) + self.x_dtype = x_dtype + self.rng = np.random.RandomState(17) + + def __call__(self): + # tiny signal + self.scrub(np.ones(1, self.x_dtype)) + # ones + self.scrub(np.ones(10, self.x_dtype)) # ones + # randn + x = self.rng.randn(10).astype(self.x_dtype) + if self.x_dtype in (np.complex64, np.complex128): + x += 1j * self.rng.randn(10) + self.scrub(x) + # ramp + self.scrub(np.arange(10).astype(self.x_dtype)) + # 3D, random + size = (2, 3, 5) + x = self.rng.randn(*size).astype(self.x_dtype) + if self.x_dtype in (np.complex64, np.complex128): + x += 1j * self.rng.randn(*size) + for axis in range(len(size)): + self.scrub(x, axis=axis) + x = x[:, ::2, 1::3].T + for axis in range(len(size)): + self.scrub(x, axis=axis) + + def scrub(self, x, axis=-1): + yr = np.apply_along_axis(upfirdn_naive, axis, x, + self.h, self.up, self.down) + y = upfirdn(self.h, x, self.up, self.down, axis=axis) + dtypes = (self.h.dtype, x.dtype) + if all(d == np.complex64 for d in dtypes): + assert_equal(y.dtype, np.complex64) + elif np.complex64 in dtypes and np.float32 in dtypes: + assert_equal(y.dtype, np.complex64) + elif all(d == np.float32 for d in dtypes): + assert_equal(y.dtype, np.float32) + elif np.complex128 in dtypes or np.complex64 in dtypes: + assert_equal(y.dtype, np.complex128) + else: + assert_equal(y.dtype, np.float64) + assert_allclose(yr, y) + + +class TestUpfirdn(object): + + def test_valid_input(self): + assert_raises(ValueError, upfirdn, [1], [1], 1, 0) # up or down < 1 + assert_raises(ValueError, upfirdn, [], [1], 1, 1) # h.ndim != 1 + assert_raises(ValueError, upfirdn, [[1]], [1], 1, 1) + + def test_vs_lfilter(self): + # Check that up=1.0 gives same answer as lfilter + slicing + random_state = np.random.RandomState(17) + try_types = (int, np.float32, np.complex64, float, complex) + size = 10000 + down_factors = [2, 11, 79] + + for dtype in try_types: + x = random_state.randn(size).astype(dtype) + if dtype in (np.complex64, np.complex128): + x += 1j * random_state.randn(size) + + for down in down_factors: + h = firwin(31, 1. / down, window='hamming') + yl = lfilter(h, 1.0, x)[::down] + y = upfirdn(h, x, up=1, down=down) + assert_allclose(yl, y[:yl.size], atol=1e-7, rtol=1e-7) + + def test_vs_naive(self): + tests = [] + try_types = (int, np.float32, np.complex64, float, complex) + + # Simple combinations of factors + for x_dtype, h in product(try_types, (1., 1j)): + tests.append(UpFIRDnCase(1, 1, h, x_dtype)) + tests.append(UpFIRDnCase(2, 2, h, x_dtype)) + tests.append(UpFIRDnCase(3, 2, h, x_dtype)) + tests.append(UpFIRDnCase(2, 3, h, x_dtype)) + + # mixture of big, small, and both directions (net up and net down) + # use all combinations of data and filter dtypes + factors = (100, 10) # up/down factors + cases = product(factors, factors, try_types, try_types) + for case in cases: + tests += self._random_factors(*case) + + for test in tests: + test() + + def _random_factors(self, p_max, q_max, h_dtype, x_dtype): + n_rep = 3 + longest_h = 25 + random_state = np.random.RandomState(17) + tests = [] + + for _ in range(n_rep): + # Randomize the up/down factors somewhat + p_add = q_max if p_max > q_max else 1 + q_add = p_max if q_max > p_max else 1 + p = random_state.randint(p_max) + p_add + q = random_state.randint(q_max) + q_add + + # Generate random FIR coefficients + len_h = random_state.randint(longest_h) + 1 + h = np.atleast_1d(random_state.randint(len_h)) + h = h.astype(h_dtype) + if h_dtype == complex: + h += 1j * random_state.randint(len_h) + + tests.append(UpFIRDnCase(p, q, h, x_dtype)) + + return tests diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_upfirdn.pyc b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_upfirdn.pyc new file mode 100644 index 0000000..f829397 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_upfirdn.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_waveforms.py b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_waveforms.py new file mode 100644 index 0000000..674cbf1 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_waveforms.py @@ -0,0 +1,353 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import (assert_almost_equal, assert_equal, + assert_, assert_allclose, assert_array_equal) +from pytest import raises as assert_raises + +import scipy.signal.waveforms as waveforms + + +# These chirp_* functions are the instantaneous frequencies of the signals +# returned by chirp(). + +def chirp_linear(t, f0, f1, t1): + f = f0 + (f1 - f0) * t / t1 + return f + + +def chirp_quadratic(t, f0, f1, t1, vertex_zero=True): + if vertex_zero: + f = f0 + (f1 - f0) * t**2 / t1**2 + else: + f = f1 - (f1 - f0) * (t1 - t)**2 / t1**2 + return f + + +def chirp_geometric(t, f0, f1, t1): + f = f0 * (f1/f0)**(t/t1) + return f + + +def chirp_hyperbolic(t, f0, f1, t1): + f = f0*f1*t1 / ((f0 - f1)*t + f1*t1) + return f + + +def compute_frequency(t, theta): + """ + Compute theta'(t)/(2*pi), where theta'(t) is the derivative of theta(t). + """ + # Assume theta and t are 1D numpy arrays. + # Assume that t is uniformly spaced. + dt = t[1] - t[0] + f = np.diff(theta)/(2*np.pi) / dt + tf = 0.5*(t[1:] + t[:-1]) + return tf, f + + +class TestChirp(object): + + def test_linear_at_zero(self): + w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='linear') + assert_almost_equal(w, 1.0) + + def test_linear_freq_01(self): + method = 'linear' + f0 = 1.0 + f1 = 2.0 + t1 = 1.0 + t = np.linspace(0, t1, 100) + phase = waveforms._chirp_phase(t, f0, t1, f1, method) + tf, f = compute_frequency(t, phase) + abserr = np.max(np.abs(f - chirp_linear(tf, f0, f1, t1))) + assert_(abserr < 1e-6) + + def test_linear_freq_02(self): + method = 'linear' + f0 = 200.0 + f1 = 100.0 + t1 = 10.0 + t = np.linspace(0, t1, 100) + phase = waveforms._chirp_phase(t, f0, t1, f1, method) + tf, f = compute_frequency(t, phase) + abserr = np.max(np.abs(f - chirp_linear(tf, f0, f1, t1))) + assert_(abserr < 1e-6) + + def test_quadratic_at_zero(self): + w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='quadratic') + assert_almost_equal(w, 1.0) + + def test_quadratic_at_zero2(self): + w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='quadratic', + vertex_zero=False) + assert_almost_equal(w, 1.0) + + def test_quadratic_freq_01(self): + method = 'quadratic' + f0 = 1.0 + f1 = 2.0 + t1 = 1.0 + t = np.linspace(0, t1, 2000) + phase = waveforms._chirp_phase(t, f0, t1, f1, method) + tf, f = compute_frequency(t, phase) + abserr = np.max(np.abs(f - chirp_quadratic(tf, f0, f1, t1))) + assert_(abserr < 1e-6) + + def test_quadratic_freq_02(self): + method = 'quadratic' + f0 = 20.0 + f1 = 10.0 + t1 = 10.0 + t = np.linspace(0, t1, 2000) + phase = waveforms._chirp_phase(t, f0, t1, f1, method) + tf, f = compute_frequency(t, phase) + abserr = np.max(np.abs(f - chirp_quadratic(tf, f0, f1, t1))) + assert_(abserr < 1e-6) + + def test_logarithmic_at_zero(self): + w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='logarithmic') + assert_almost_equal(w, 1.0) + + def test_logarithmic_freq_01(self): + method = 'logarithmic' + f0 = 1.0 + f1 = 2.0 + t1 = 1.0 + t = np.linspace(0, t1, 10000) + phase = waveforms._chirp_phase(t, f0, t1, f1, method) + tf, f = compute_frequency(t, phase) + abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1))) + assert_(abserr < 1e-6) + + def test_logarithmic_freq_02(self): + method = 'logarithmic' + f0 = 200.0 + f1 = 100.0 + t1 = 10.0 + t = np.linspace(0, t1, 10000) + phase = waveforms._chirp_phase(t, f0, t1, f1, method) + tf, f = compute_frequency(t, phase) + abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1))) + assert_(abserr < 1e-6) + + def test_logarithmic_freq_03(self): + method = 'logarithmic' + f0 = 100.0 + f1 = 100.0 + t1 = 10.0 + t = np.linspace(0, t1, 10000) + phase = waveforms._chirp_phase(t, f0, t1, f1, method) + tf, f = compute_frequency(t, phase) + abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1))) + assert_(abserr < 1e-6) + + def test_hyperbolic_at_zero(self): + w = waveforms.chirp(t=0, f0=10.0, f1=1.0, t1=1.0, method='hyperbolic') + assert_almost_equal(w, 1.0) + + def test_hyperbolic_freq_01(self): + method = 'hyperbolic' + t1 = 1.0 + t = np.linspace(0, t1, 10000) + # f0 f1 + cases = [[10.0, 1.0], + [1.0, 10.0], + [-10.0, -1.0], + [-1.0, -10.0]] + for f0, f1 in cases: + phase = waveforms._chirp_phase(t, f0, t1, f1, method) + tf, f = compute_frequency(t, phase) + expected = chirp_hyperbolic(tf, f0, f1, t1) + assert_allclose(f, expected) + + def test_hyperbolic_zero_freq(self): + # f0=0 or f1=0 must raise a ValueError. + method = 'hyperbolic' + t1 = 1.0 + t = np.linspace(0, t1, 5) + assert_raises(ValueError, waveforms.chirp, t, 0, t1, 1, method) + assert_raises(ValueError, waveforms.chirp, t, 1, t1, 0, method) + + def test_unknown_method(self): + method = "foo" + f0 = 10.0 + f1 = 20.0 + t1 = 1.0 + t = np.linspace(0, t1, 10) + assert_raises(ValueError, waveforms.chirp, t, f0, t1, f1, method) + + def test_integer_t1(self): + f0 = 10.0 + f1 = 20.0 + t = np.linspace(-1, 1, 11) + t1 = 3.0 + float_result = waveforms.chirp(t, f0, t1, f1) + t1 = 3 + int_result = waveforms.chirp(t, f0, t1, f1) + err_msg = "Integer input 't1=3' gives wrong result" + assert_equal(int_result, float_result, err_msg=err_msg) + + def test_integer_f0(self): + f1 = 20.0 + t1 = 3.0 + t = np.linspace(-1, 1, 11) + f0 = 10.0 + float_result = waveforms.chirp(t, f0, t1, f1) + f0 = 10 + int_result = waveforms.chirp(t, f0, t1, f1) + err_msg = "Integer input 'f0=10' gives wrong result" + assert_equal(int_result, float_result, err_msg=err_msg) + + def test_integer_f1(self): + f0 = 10.0 + t1 = 3.0 + t = np.linspace(-1, 1, 11) + f1 = 20.0 + float_result = waveforms.chirp(t, f0, t1, f1) + f1 = 20 + int_result = waveforms.chirp(t, f0, t1, f1) + err_msg = "Integer input 'f1=20' gives wrong result" + assert_equal(int_result, float_result, err_msg=err_msg) + + def test_integer_all(self): + f0 = 10 + t1 = 3 + f1 = 20 + t = np.linspace(-1, 1, 11) + float_result = waveforms.chirp(t, float(f0), float(t1), float(f1)) + int_result = waveforms.chirp(t, f0, t1, f1) + err_msg = "Integer input 'f0=10, t1=3, f1=20' gives wrong result" + assert_equal(int_result, float_result, err_msg=err_msg) + + +class TestSweepPoly(object): + + def test_sweep_poly_quad1(self): + p = np.poly1d([1.0, 0.0, 1.0]) + t = np.linspace(0, 3.0, 10000) + phase = waveforms._sweep_poly_phase(t, p) + tf, f = compute_frequency(t, phase) + expected = p(tf) + abserr = np.max(np.abs(f - expected)) + assert_(abserr < 1e-6) + + def test_sweep_poly_const(self): + p = np.poly1d(2.0) + t = np.linspace(0, 3.0, 10000) + phase = waveforms._sweep_poly_phase(t, p) + tf, f = compute_frequency(t, phase) + expected = p(tf) + abserr = np.max(np.abs(f - expected)) + assert_(abserr < 1e-6) + + def test_sweep_poly_linear(self): + p = np.poly1d([-1.0, 10.0]) + t = np.linspace(0, 3.0, 10000) + phase = waveforms._sweep_poly_phase(t, p) + tf, f = compute_frequency(t, phase) + expected = p(tf) + abserr = np.max(np.abs(f - expected)) + assert_(abserr < 1e-6) + + def test_sweep_poly_quad2(self): + p = np.poly1d([1.0, 0.0, -2.0]) + t = np.linspace(0, 3.0, 10000) + phase = waveforms._sweep_poly_phase(t, p) + tf, f = compute_frequency(t, phase) + expected = p(tf) + abserr = np.max(np.abs(f - expected)) + assert_(abserr < 1e-6) + + def test_sweep_poly_cubic(self): + p = np.poly1d([2.0, 1.0, 0.0, -2.0]) + t = np.linspace(0, 2.0, 10000) + phase = waveforms._sweep_poly_phase(t, p) + tf, f = compute_frequency(t, phase) + expected = p(tf) + abserr = np.max(np.abs(f - expected)) + assert_(abserr < 1e-6) + + def test_sweep_poly_cubic2(self): + """Use an array of coefficients instead of a poly1d.""" + p = np.array([2.0, 1.0, 0.0, -2.0]) + t = np.linspace(0, 2.0, 10000) + phase = waveforms._sweep_poly_phase(t, p) + tf, f = compute_frequency(t, phase) + expected = np.poly1d(p)(tf) + abserr = np.max(np.abs(f - expected)) + assert_(abserr < 1e-6) + + def test_sweep_poly_cubic3(self): + """Use a list of coefficients instead of a poly1d.""" + p = [2.0, 1.0, 0.0, -2.0] + t = np.linspace(0, 2.0, 10000) + phase = waveforms._sweep_poly_phase(t, p) + tf, f = compute_frequency(t, phase) + expected = np.poly1d(p)(tf) + abserr = np.max(np.abs(f - expected)) + assert_(abserr < 1e-6) + + +class TestGaussPulse(object): + + def test_integer_fc(self): + float_result = waveforms.gausspulse('cutoff', fc=1000.0) + int_result = waveforms.gausspulse('cutoff', fc=1000) + err_msg = "Integer input 'fc=1000' gives wrong result" + assert_equal(int_result, float_result, err_msg=err_msg) + + def test_integer_bw(self): + float_result = waveforms.gausspulse('cutoff', bw=1.0) + int_result = waveforms.gausspulse('cutoff', bw=1) + err_msg = "Integer input 'bw=1' gives wrong result" + assert_equal(int_result, float_result, err_msg=err_msg) + + def test_integer_bwr(self): + float_result = waveforms.gausspulse('cutoff', bwr=-6.0) + int_result = waveforms.gausspulse('cutoff', bwr=-6) + err_msg = "Integer input 'bwr=-6' gives wrong result" + assert_equal(int_result, float_result, err_msg=err_msg) + + def test_integer_tpr(self): + float_result = waveforms.gausspulse('cutoff', tpr=-60.0) + int_result = waveforms.gausspulse('cutoff', tpr=-60) + err_msg = "Integer input 'tpr=-60' gives wrong result" + assert_equal(int_result, float_result, err_msg=err_msg) + + +class TestUnitImpulse(object): + + def test_no_index(self): + assert_array_equal(waveforms.unit_impulse(7), [1, 0, 0, 0, 0, 0, 0]) + assert_array_equal(waveforms.unit_impulse((3, 3)), + [[1, 0, 0], [0, 0, 0], [0, 0, 0]]) + + def test_index(self): + assert_array_equal(waveforms.unit_impulse(10, 3), + [0, 0, 0, 1, 0, 0, 0, 0, 0, 0]) + assert_array_equal(waveforms.unit_impulse((3, 3), (1, 1)), + [[0, 0, 0], [0, 1, 0], [0, 0, 0]]) + + # Broadcasting + imp = waveforms.unit_impulse((4, 4), 2) + assert_array_equal(imp, np.array([[0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 0]])) + + def test_mid(self): + assert_array_equal(waveforms.unit_impulse((3, 3), 'mid'), + [[0, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_array_equal(waveforms.unit_impulse(9, 'mid'), + [0, 0, 0, 0, 1, 0, 0, 0, 0]) + + def test_dtype(self): + imp = waveforms.unit_impulse(7) + assert_(np.issubdtype(imp.dtype, np.floating)) + + imp = waveforms.unit_impulse(5, 3, dtype=int) + assert_(np.issubdtype(imp.dtype, np.integer)) + + imp = waveforms.unit_impulse((5, 2), (3, 1), dtype=complex) + assert_(np.issubdtype(imp.dtype, np.complexfloating)) diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_waveforms.pyc b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_waveforms.pyc new file mode 100644 index 0000000..6992e96 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_waveforms.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_wavelets.py b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_wavelets.py new file mode 100644 index 0000000..4f5dd6c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_wavelets.py @@ -0,0 +1,132 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import assert_equal, \ + assert_array_equal, assert_array_almost_equal, assert_array_less, assert_ +from scipy._lib.six import xrange + +from scipy.signal import wavelets + + +class TestWavelets(object): + def test_qmf(self): + assert_array_equal(wavelets.qmf([1, 1]), [1, -1]) + + def test_daub(self): + for i in xrange(1, 15): + assert_equal(len(wavelets.daub(i)), i * 2) + + def test_cascade(self): + for J in xrange(1, 7): + for i in xrange(1, 5): + lpcoef = wavelets.daub(i) + k = len(lpcoef) + x, phi, psi = wavelets.cascade(lpcoef, J) + assert_(len(x) == len(phi) == len(psi)) + assert_equal(len(x), (k - 1) * 2 ** J) + + def test_morlet(self): + x = wavelets.morlet(50, 4.1, complete=True) + y = wavelets.morlet(50, 4.1, complete=False) + # Test if complete and incomplete wavelet have same lengths: + assert_equal(len(x), len(y)) + # Test if complete wavelet is less than incomplete wavelet: + assert_array_less(x, y) + + x = wavelets.morlet(10, 50, complete=False) + y = wavelets.morlet(10, 50, complete=True) + # For large widths complete and incomplete wavelets should be + # identical within numerical precision: + assert_equal(x, y) + + # miscellaneous tests: + x = np.array([1.73752399e-09 + 9.84327394e-25j, + 6.49471756e-01 + 0.00000000e+00j, + 1.73752399e-09 - 9.84327394e-25j]) + y = wavelets.morlet(3, w=2, complete=True) + assert_array_almost_equal(x, y) + + x = np.array([2.00947715e-09 + 9.84327394e-25j, + 7.51125544e-01 + 0.00000000e+00j, + 2.00947715e-09 - 9.84327394e-25j]) + y = wavelets.morlet(3, w=2, complete=False) + assert_array_almost_equal(x, y, decimal=2) + + x = wavelets.morlet(10000, s=4, complete=True) + y = wavelets.morlet(20000, s=8, complete=True)[5000:15000] + assert_array_almost_equal(x, y, decimal=2) + + x = wavelets.morlet(10000, s=4, complete=False) + assert_array_almost_equal(y, x, decimal=2) + y = wavelets.morlet(20000, s=8, complete=False)[5000:15000] + assert_array_almost_equal(x, y, decimal=2) + + x = wavelets.morlet(10000, w=3, s=5, complete=True) + y = wavelets.morlet(20000, w=3, s=10, complete=True)[5000:15000] + assert_array_almost_equal(x, y, decimal=2) + + x = wavelets.morlet(10000, w=3, s=5, complete=False) + assert_array_almost_equal(y, x, decimal=2) + y = wavelets.morlet(20000, w=3, s=10, complete=False)[5000:15000] + assert_array_almost_equal(x, y, decimal=2) + + x = wavelets.morlet(10000, w=7, s=10, complete=True) + y = wavelets.morlet(20000, w=7, s=20, complete=True)[5000:15000] + assert_array_almost_equal(x, y, decimal=2) + + x = wavelets.morlet(10000, w=7, s=10, complete=False) + assert_array_almost_equal(x, y, decimal=2) + y = wavelets.morlet(20000, w=7, s=20, complete=False)[5000:15000] + assert_array_almost_equal(x, y, decimal=2) + + def test_ricker(self): + w = wavelets.ricker(1.0, 1) + expected = 2 / (np.sqrt(3 * 1.0) * (np.pi ** 0.25)) + assert_array_equal(w, expected) + + lengths = [5, 11, 15, 51, 101] + for length in lengths: + w = wavelets.ricker(length, 1.0) + assert_(len(w) == length) + max_loc = np.argmax(w) + assert_(max_loc == (length // 2)) + + points = 100 + w = wavelets.ricker(points, 2.0) + half_vec = np.arange(0, points // 2) + #Wavelet should be symmetric + assert_array_almost_equal(w[half_vec], w[-(half_vec + 1)]) + + #Check zeros + aas = [5, 10, 15, 20, 30] + points = 99 + for a in aas: + w = wavelets.ricker(points, a) + vec = np.arange(0, points) - (points - 1.0) / 2 + exp_zero1 = np.argmin(np.abs(vec - a)) + exp_zero2 = np.argmin(np.abs(vec + a)) + assert_array_almost_equal(w[exp_zero1], 0) + assert_array_almost_equal(w[exp_zero2], 0) + + def test_cwt(self): + widths = [1.0] + delta_wavelet = lambda s, t: np.array([1]) + len_data = 100 + test_data = np.sin(np.pi * np.arange(0, len_data) / 10.0) + + #Test delta function input gives same data as output + cwt_dat = wavelets.cwt(test_data, delta_wavelet, widths) + assert_(cwt_dat.shape == (len(widths), len_data)) + assert_array_almost_equal(test_data, cwt_dat.flatten()) + + #Check proper shape on output + widths = [1, 3, 4, 5, 10] + cwt_dat = wavelets.cwt(test_data, wavelets.ricker, widths) + assert_(cwt_dat.shape == (len(widths), len_data)) + + widths = [len_data * 10] + #Note: this wavelet isn't defined quite right, but is fine for this test + flat_wavelet = lambda l, w: np.ones(w) / w + cwt_dat = wavelets.cwt(test_data, flat_wavelet, widths) + assert_array_almost_equal(cwt_dat, np.mean(test_data)) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_wavelets.pyc b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_wavelets.pyc new file mode 100644 index 0000000..8d4f95c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_wavelets.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_windows.py b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_windows.py new file mode 100644 index 0000000..5427eef --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_windows.py @@ -0,0 +1,640 @@ +from __future__ import division, print_function, absolute_import + +import pickle + +import numpy as np +from numpy import array +from numpy.testing import (assert_array_almost_equal, assert_array_equal, + assert_allclose, + assert_equal, assert_, assert_array_less) +from pytest import raises as assert_raises + +from scipy._lib._numpy_compat import suppress_warnings +from scipy import fftpack +from scipy.signal import windows, get_window, resample, hann as dep_hann + + +window_funcs = [ + ('boxcar', ()), + ('triang', ()), + ('parzen', ()), + ('bohman', ()), + ('blackman', ()), + ('nuttall', ()), + ('blackmanharris', ()), + ('flattop', ()), + ('bartlett', ()), + ('hanning', ()), + ('barthann', ()), + ('hamming', ()), + ('kaiser', (1,)), + ('dpss', (2,)), + ('gaussian', (0.5,)), + ('general_gaussian', (1.5, 2)), + ('chebwin', (1,)), + ('slepian', (2,)), + ('cosine', ()), + ('hann', ()), + ('exponential', ()), + ('tukey', (0.5,)), + ] + + +class TestBartHann(object): + + def test_basic(self): + assert_allclose(windows.barthann(6, sym=True), + [0, 0.35857354213752, 0.8794264578624801, + 0.8794264578624801, 0.3585735421375199, 0]) + assert_allclose(windows.barthann(7), + [0, 0.27, 0.73, 1.0, 0.73, 0.27, 0]) + assert_allclose(windows.barthann(6, False), + [0, 0.27, 0.73, 1.0, 0.73, 0.27]) + + +class TestBartlett(object): + + def test_basic(self): + assert_allclose(windows.bartlett(6), [0, 0.4, 0.8, 0.8, 0.4, 0]) + assert_allclose(windows.bartlett(7), [0, 1/3, 2/3, 1.0, 2/3, 1/3, 0]) + assert_allclose(windows.bartlett(6, False), + [0, 1/3, 2/3, 1.0, 2/3, 1/3]) + + +class TestBlackman(object): + + def test_basic(self): + assert_allclose(windows.blackman(6, sym=False), + [0, 0.13, 0.63, 1.0, 0.63, 0.13], atol=1e-14) + assert_allclose(windows.blackman(7, sym=False), + [0, 0.09045342435412804, 0.4591829575459636, + 0.9203636180999081, 0.9203636180999081, + 0.4591829575459636, 0.09045342435412804], atol=1e-8) + assert_allclose(windows.blackman(6), + [0, 0.2007701432625305, 0.8492298567374694, + 0.8492298567374694, 0.2007701432625305, 0], + atol=1e-14) + assert_allclose(windows.blackman(7, True), + [0, 0.13, 0.63, 1.0, 0.63, 0.13, 0], atol=1e-14) + + +class TestBlackmanHarris(object): + + def test_basic(self): + assert_allclose(windows.blackmanharris(6, False), + [6.0e-05, 0.055645, 0.520575, 1.0, 0.520575, 0.055645]) + assert_allclose(windows.blackmanharris(7, sym=False), + [6.0e-05, 0.03339172347815117, 0.332833504298565, + 0.8893697722232837, 0.8893697722232838, + 0.3328335042985652, 0.03339172347815122]) + assert_allclose(windows.blackmanharris(6), + [6.0e-05, 0.1030114893456638, 0.7938335106543362, + 0.7938335106543364, 0.1030114893456638, 6.0e-05]) + assert_allclose(windows.blackmanharris(7, sym=True), + [6.0e-05, 0.055645, 0.520575, 1.0, 0.520575, 0.055645, + 6.0e-05]) + + +class TestBohman(object): + + def test_basic(self): + assert_allclose(windows.bohman(6), + [0, 0.1791238937062839, 0.8343114522576858, + 0.8343114522576858, 0.1791238937062838, 0]) + assert_allclose(windows.bohman(7, sym=True), + [0, 0.1089977810442293, 0.6089977810442293, 1.0, + 0.6089977810442295, 0.1089977810442293, 0]) + assert_allclose(windows.bohman(6, False), + [0, 0.1089977810442293, 0.6089977810442293, 1.0, + 0.6089977810442295, 0.1089977810442293]) + + +class TestBoxcar(object): + + def test_basic(self): + assert_allclose(windows.boxcar(6), [1, 1, 1, 1, 1, 1]) + assert_allclose(windows.boxcar(7), [1, 1, 1, 1, 1, 1, 1]) + assert_allclose(windows.boxcar(6, False), [1, 1, 1, 1, 1, 1]) + + +cheb_odd_true = array([0.200938, 0.107729, 0.134941, 0.165348, + 0.198891, 0.235450, 0.274846, 0.316836, + 0.361119, 0.407338, 0.455079, 0.503883, + 0.553248, 0.602637, 0.651489, 0.699227, + 0.745266, 0.789028, 0.829947, 0.867485, + 0.901138, 0.930448, 0.955010, 0.974482, + 0.988591, 0.997138, 1.000000, 0.997138, + 0.988591, 0.974482, 0.955010, 0.930448, + 0.901138, 0.867485, 0.829947, 0.789028, + 0.745266, 0.699227, 0.651489, 0.602637, + 0.553248, 0.503883, 0.455079, 0.407338, + 0.361119, 0.316836, 0.274846, 0.235450, + 0.198891, 0.165348, 0.134941, 0.107729, + 0.200938]) + +cheb_even_true = array([0.203894, 0.107279, 0.133904, + 0.163608, 0.196338, 0.231986, + 0.270385, 0.311313, 0.354493, + 0.399594, 0.446233, 0.493983, + 0.542378, 0.590916, 0.639071, + 0.686302, 0.732055, 0.775783, + 0.816944, 0.855021, 0.889525, + 0.920006, 0.946060, 0.967339, + 0.983557, 0.994494, 1.000000, + 1.000000, 0.994494, 0.983557, + 0.967339, 0.946060, 0.920006, + 0.889525, 0.855021, 0.816944, + 0.775783, 0.732055, 0.686302, + 0.639071, 0.590916, 0.542378, + 0.493983, 0.446233, 0.399594, + 0.354493, 0.311313, 0.270385, + 0.231986, 0.196338, 0.163608, + 0.133904, 0.107279, 0.203894]) + + +class TestChebWin(object): + + def test_basic(self): + with suppress_warnings() as sup: + sup.filter(UserWarning, "This window is not suitable") + assert_allclose(windows.chebwin(6, 100), + [0.1046401879356917, 0.5075781475823447, 1.0, 1.0, + 0.5075781475823447, 0.1046401879356917]) + assert_allclose(windows.chebwin(7, 100), + [0.05650405062850233, 0.316608530648474, + 0.7601208123539079, 1.0, 0.7601208123539079, + 0.316608530648474, 0.05650405062850233]) + assert_allclose(windows.chebwin(6, 10), + [1.0, 0.6071201674458373, 0.6808391469897297, + 0.6808391469897297, 0.6071201674458373, 1.0]) + assert_allclose(windows.chebwin(7, 10), + [1.0, 0.5190521247588651, 0.5864059018130382, + 0.6101519801307441, 0.5864059018130382, + 0.5190521247588651, 1.0]) + assert_allclose(windows.chebwin(6, 10, False), + [1.0, 0.5190521247588651, 0.5864059018130382, + 0.6101519801307441, 0.5864059018130382, + 0.5190521247588651]) + + def test_cheb_odd_high_attenuation(self): + with suppress_warnings() as sup: + sup.filter(UserWarning, "This window is not suitable") + cheb_odd = windows.chebwin(53, at=-40) + assert_array_almost_equal(cheb_odd, cheb_odd_true, decimal=4) + + def test_cheb_even_high_attenuation(self): + with suppress_warnings() as sup: + sup.filter(UserWarning, "This window is not suitable") + cheb_even = windows.chebwin(54, at=40) + assert_array_almost_equal(cheb_even, cheb_even_true, decimal=4) + + def test_cheb_odd_low_attenuation(self): + cheb_odd_low_at_true = array([1.000000, 0.519052, 0.586405, + 0.610151, 0.586405, 0.519052, + 1.000000]) + with suppress_warnings() as sup: + sup.filter(UserWarning, "This window is not suitable") + cheb_odd = windows.chebwin(7, at=10) + assert_array_almost_equal(cheb_odd, cheb_odd_low_at_true, decimal=4) + + def test_cheb_even_low_attenuation(self): + cheb_even_low_at_true = array([1.000000, 0.451924, 0.51027, + 0.541338, 0.541338, 0.51027, + 0.451924, 1.000000]) + with suppress_warnings() as sup: + sup.filter(UserWarning, "This window is not suitable") + cheb_even = windows.chebwin(8, at=-10) + assert_array_almost_equal(cheb_even, cheb_even_low_at_true, decimal=4) + + +exponential_data = { + (4, None, 0.2, False): + array([4.53999297624848542e-05, + 6.73794699908546700e-03, 1.00000000000000000e+00, + 6.73794699908546700e-03]), + (4, None, 0.2, True): array([0.00055308437014783, 0.0820849986238988, + 0.0820849986238988, 0.00055308437014783]), + (4, None, 1.0, False): array([0.1353352832366127, 0.36787944117144233, 1., + 0.36787944117144233]), + (4, None, 1.0, True): array([0.22313016014842982, 0.60653065971263342, + 0.60653065971263342, 0.22313016014842982]), + (4, 2, 0.2, False): + array([4.53999297624848542e-05, 6.73794699908546700e-03, + 1.00000000000000000e+00, 6.73794699908546700e-03]), + (4, 2, 0.2, True): None, + (4, 2, 1.0, False): array([0.1353352832366127, 0.36787944117144233, 1., + 0.36787944117144233]), + (4, 2, 1.0, True): None, + (5, None, 0.2, True): + array([4.53999297624848542e-05, + 6.73794699908546700e-03, 1.00000000000000000e+00, + 6.73794699908546700e-03, 4.53999297624848542e-05]), + (5, None, 1.0, True): array([0.1353352832366127, 0.36787944117144233, 1., + 0.36787944117144233, 0.1353352832366127]), + (5, 2, 0.2, True): None, + (5, 2, 1.0, True): None +} + + +def test_exponential(): + for k, v in exponential_data.items(): + if v is None: + assert_raises(ValueError, windows.exponential, *k) + else: + win = windows.exponential(*k) + assert_allclose(win, v, rtol=1e-14) + + +class TestFlatTop(object): + + def test_basic(self): + assert_allclose(windows.flattop(6, sym=False), + [-0.000421051, -0.051263156, 0.19821053, 1.0, + 0.19821053, -0.051263156]) + assert_allclose(windows.flattop(7, sym=False), + [-0.000421051, -0.03684078115492348, + 0.01070371671615342, 0.7808739149387698, + 0.7808739149387698, 0.01070371671615342, + -0.03684078115492348]) + assert_allclose(windows.flattop(6), + [-0.000421051, -0.0677142520762119, 0.6068721525762117, + 0.6068721525762117, -0.0677142520762119, + -0.000421051]) + assert_allclose(windows.flattop(7, True), + [-0.000421051, -0.051263156, 0.19821053, 1.0, + 0.19821053, -0.051263156, -0.000421051]) + + +class TestGaussian(object): + + def test_basic(self): + assert_allclose(windows.gaussian(6, 1.0), + [0.04393693362340742, 0.3246524673583497, + 0.8824969025845955, 0.8824969025845955, + 0.3246524673583497, 0.04393693362340742]) + assert_allclose(windows.gaussian(7, 1.2), + [0.04393693362340742, 0.2493522087772962, + 0.7066482778577162, 1.0, 0.7066482778577162, + 0.2493522087772962, 0.04393693362340742]) + assert_allclose(windows.gaussian(7, 3), + [0.6065306597126334, 0.8007374029168081, + 0.9459594689067654, 1.0, 0.9459594689067654, + 0.8007374029168081, 0.6065306597126334]) + assert_allclose(windows.gaussian(6, 3, False), + [0.6065306597126334, 0.8007374029168081, + 0.9459594689067654, 1.0, 0.9459594689067654, + 0.8007374029168081]) + + +class TestGeneralCosine(object): + + def test_basic(self): + assert_allclose(windows.general_cosine(5, [0.5, 0.3, 0.2]), + [0.4, 0.3, 1, 0.3, 0.4]) + assert_allclose(windows.general_cosine(4, [0.5, 0.3, 0.2], sym=False), + [0.4, 0.3, 1, 0.3]) + +class TestGeneralHamming(object): + + def test_basic(self): + assert_allclose(windows.general_hamming(5, 0.7), + [0.4, 0.7, 1.0, 0.7, 0.4]) + assert_allclose(windows.general_hamming(5, 0.75, sym=False), + [0.5, 0.6727457514, 0.9522542486, + 0.9522542486, 0.6727457514]) + assert_allclose(windows.general_hamming(6, 0.75, sym=True), + [0.5, 0.6727457514, 0.9522542486, + 0.9522542486, 0.6727457514, 0.5]) + + +class TestHamming(object): + + def test_basic(self): + assert_allclose(windows.hamming(6, False), + [0.08, 0.31, 0.77, 1.0, 0.77, 0.31]) + assert_allclose(windows.hamming(7, sym=False), + [0.08, 0.2531946911449826, 0.6423596296199047, + 0.9544456792351128, 0.9544456792351128, + 0.6423596296199047, 0.2531946911449826]) + assert_allclose(windows.hamming(6), + [0.08, 0.3978521825875242, 0.9121478174124757, + 0.9121478174124757, 0.3978521825875242, 0.08]) + assert_allclose(windows.hamming(7, sym=True), + [0.08, 0.31, 0.77, 1.0, 0.77, 0.31, 0.08]) + + +class TestHann(object): + + def test_basic(self): + assert_allclose(windows.hann(6, sym=False), + [0, 0.25, 0.75, 1.0, 0.75, 0.25]) + assert_allclose(windows.hann(7, sym=False), + [0, 0.1882550990706332, 0.6112604669781572, + 0.9504844339512095, 0.9504844339512095, + 0.6112604669781572, 0.1882550990706332]) + assert_allclose(windows.hann(6, True), + [0, 0.3454915028125263, 0.9045084971874737, + 0.9045084971874737, 0.3454915028125263, 0]) + assert_allclose(windows.hann(7), + [0, 0.25, 0.75, 1.0, 0.75, 0.25, 0]) + + +class TestKaiser(object): + + def test_basic(self): + assert_allclose(windows.kaiser(6, 0.5), + [0.9403061933191572, 0.9782962393705389, + 0.9975765035372042, 0.9975765035372042, + 0.9782962393705389, 0.9403061933191572]) + assert_allclose(windows.kaiser(7, 0.5), + [0.9403061933191572, 0.9732402256999829, + 0.9932754654413773, 1.0, 0.9932754654413773, + 0.9732402256999829, 0.9403061933191572]) + assert_allclose(windows.kaiser(6, 2.7), + [0.2603047507678832, 0.6648106293528054, + 0.9582099802511439, 0.9582099802511439, + 0.6648106293528054, 0.2603047507678832]) + assert_allclose(windows.kaiser(7, 2.7), + [0.2603047507678832, 0.5985765418119844, + 0.8868495172060835, 1.0, 0.8868495172060835, + 0.5985765418119844, 0.2603047507678832]) + assert_allclose(windows.kaiser(6, 2.7, False), + [0.2603047507678832, 0.5985765418119844, + 0.8868495172060835, 1.0, 0.8868495172060835, + 0.5985765418119844]) + + +class TestNuttall(object): + + def test_basic(self): + assert_allclose(windows.nuttall(6, sym=False), + [0.0003628, 0.0613345, 0.5292298, 1.0, 0.5292298, + 0.0613345]) + assert_allclose(windows.nuttall(7, sym=False), + [0.0003628, 0.03777576895352025, 0.3427276199688195, + 0.8918518610776603, 0.8918518610776603, + 0.3427276199688196, 0.0377757689535203]) + assert_allclose(windows.nuttall(6), + [0.0003628, 0.1105152530498718, 0.7982580969501282, + 0.7982580969501283, 0.1105152530498719, 0.0003628]) + assert_allclose(windows.nuttall(7, True), + [0.0003628, 0.0613345, 0.5292298, 1.0, 0.5292298, + 0.0613345, 0.0003628]) + + +class TestParzen(object): + + def test_basic(self): + assert_allclose(windows.parzen(6), + [0.009259259259259254, 0.25, 0.8611111111111112, + 0.8611111111111112, 0.25, 0.009259259259259254]) + assert_allclose(windows.parzen(7, sym=True), + [0.00583090379008747, 0.1574344023323616, + 0.6501457725947521, 1.0, 0.6501457725947521, + 0.1574344023323616, 0.00583090379008747]) + assert_allclose(windows.parzen(6, False), + [0.00583090379008747, 0.1574344023323616, + 0.6501457725947521, 1.0, 0.6501457725947521, + 0.1574344023323616]) + + +class TestTriang(object): + + def test_basic(self): + + assert_allclose(windows.triang(6, True), + [1/6, 1/2, 5/6, 5/6, 1/2, 1/6]) + assert_allclose(windows.triang(7), + [1/4, 1/2, 3/4, 1, 3/4, 1/2, 1/4]) + assert_allclose(windows.triang(6, sym=False), + [1/4, 1/2, 3/4, 1, 3/4, 1/2]) + + +tukey_data = { + (4, 0.5, True): array([0.0, 1.0, 1.0, 0.0]), + (4, 0.9, True): array([0.0, 0.84312081893436686, + 0.84312081893436686, 0.0]), + (4, 1.0, True): array([0.0, 0.75, 0.75, 0.0]), + (4, 0.5, False): array([0.0, 1.0, 1.0, 1.0]), + (4, 0.9, False): array([0.0, 0.58682408883346526, + 1.0, 0.58682408883346526]), + (4, 1.0, False): array([0.0, 0.5, 1.0, 0.5]), + (5, 0.0, True): array([1.0, 1.0, 1.0, 1.0, 1.0]), + (5, 0.8, True): array([0.0, 0.69134171618254492, + 1.0, 0.69134171618254492, 0.0]), + (5, 1.0, True): array([0.0, 0.5, 1.0, 0.5, 0.0]), + + (6, 0): [1, 1, 1, 1, 1, 1], + (7, 0): [1, 1, 1, 1, 1, 1, 1], + (6, .25): [0, 1, 1, 1, 1, 0], + (7, .25): [0, 1, 1, 1, 1, 1, 0], + (6,): [0, 0.9045084971874737, 1.0, 1.0, 0.9045084971874735, 0], + (7,): [0, 0.75, 1.0, 1.0, 1.0, 0.75, 0], + (6, .75): [0, 0.5522642316338269, 1.0, 1.0, 0.5522642316338267, 0], + (7, .75): [0, 0.4131759111665348, 0.9698463103929542, 1.0, + 0.9698463103929542, 0.4131759111665347, 0], + (6, 1): [0, 0.3454915028125263, 0.9045084971874737, 0.9045084971874737, + 0.3454915028125263, 0], + (7, 1): [0, 0.25, 0.75, 1.0, 0.75, 0.25, 0], +} + + +class TestTukey(object): + + def test_basic(self): + # Test against hardcoded data + for k, v in tukey_data.items(): + if v is None: + assert_raises(ValueError, windows.tukey, *k) + else: + win = windows.tukey(*k) + assert_allclose(win, v, rtol=1e-14) + + def test_extremes(self): + # Test extremes of alpha correspond to boxcar and hann + tuk0 = windows.tukey(100, 0) + box0 = windows.boxcar(100) + assert_array_almost_equal(tuk0, box0) + + tuk1 = windows.tukey(100, 1) + han1 = windows.hann(100) + assert_array_almost_equal(tuk1, han1) + + +dpss_data = { + # All values from MATLAB: + # * taper[1] of (3, 1.4, 3) sign-flipped + # * taper[3] of (5, 1.5, 5) sign-flipped + (4, 0.1, 2): ([[0.497943898, 0.502047681, 0.502047681, 0.497943898], [0.670487993, 0.224601537, -0.224601537, -0.670487993]], [0.197961815, 0.002035474]), # noqa + (3, 1.4, 3): ([[0.410233151, 0.814504464, 0.410233151], [0.707106781, 0.0, -0.707106781], [0.575941629, -0.580157287, 0.575941629]], [0.999998093, 0.998067480, 0.801934426]), # noqa + (5, 1.5, 5): ([[0.1745071052, 0.4956749177, 0.669109327, 0.495674917, 0.174507105], [0.4399493348, 0.553574369, 0.0, -0.553574369, -0.439949334], [0.631452756, 0.073280238, -0.437943884, 0.073280238, 0.631452756], [0.553574369, -0.439949334, 0.0, 0.439949334, -0.553574369], [0.266110290, -0.498935248, 0.600414741, -0.498935248, 0.266110290147157]], [0.999728571, 0.983706916, 0.768457889, 0.234159338, 0.013947282907567]), # noqa: E501 + (100, 2, 4): ([[0.0030914414, 0.0041266922, 0.005315076, 0.006665149, 0.008184854, 0.0098814158, 0.011761239, 0.013829809, 0.016091597, 0.018549973, 0.02120712, 0.02406396, 0.027120092, 0.030373728, 0.033821651, 0.037459181, 0.041280145, 0.045276872, 0.049440192, 0.053759447, 0.058222524, 0.062815894, 0.067524661, 0.072332638, 0.077222418, 0.082175473, 0.087172252, 0.092192299, 0.097214376, 0.1022166, 0.10717657, 0.11207154, 0.11687856, 0.12157463, 0.12613686, 0.13054266, 0.13476986, 0.13879691, 0.14260302, 0.14616832, 0.14947401, 0.1525025, 0.15523755, 0.15766438, 0.15976981, 0.16154233, 0.16297223, 0.16405162, 0.16477455, 0.16513702, 0.16513702, 0.16477455, 0.16405162, 0.16297223, 0.16154233, 0.15976981, 0.15766438, 0.15523755, 0.1525025, 0.14947401, 0.14616832, 0.14260302, 0.13879691, 0.13476986, 0.13054266, 0.12613686, 0.12157463, 0.11687856, 0.11207154, 0.10717657, 0.1022166, 0.097214376, 0.092192299, 0.087172252, 0.082175473, 0.077222418, 0.072332638, 0.067524661, 0.062815894, 0.058222524, 0.053759447, 0.049440192, 0.045276872, 0.041280145, 0.037459181, 0.033821651, 0.030373728, 0.027120092, 0.02406396, 0.02120712, 0.018549973, 0.016091597, 0.013829809, 0.011761239, 0.0098814158, 0.008184854, 0.006665149, 0.005315076, 0.0041266922, 0.0030914414], [0.018064449, 0.022040342, 0.026325013, 0.030905288, 0.035764398, 0.040881982, 0.046234148, 0.051793558, 0.057529559, 0.063408356, 0.069393216, 0.075444716, 0.081521022, 0.087578202, 0.093570567, 0.099451049, 0.10517159, 0.11068356, 0.11593818, 0.12088699, 0.12548227, 0.12967752, 0.1334279, 0.13669069, 0.13942569, 0.1415957, 0.14316686, 0.14410905, 0.14439626, 0.14400686, 0.14292389, 0.1411353, 0.13863416, 0.13541876, 0.13149274, 0.12686516, 0.12155045, 0.1155684, 0.10894403, 0.10170748, 0.093893752, 0.08554251, 0.076697768, 0.067407559, 0.057723559, 0.04770068, 0.037396627, 0.026871428, 0.016186944, 0.0054063557, -0.0054063557, -0.016186944, -0.026871428, -0.037396627, -0.04770068, -0.057723559, -0.067407559, -0.076697768, -0.08554251, -0.093893752, -0.10170748, -0.10894403, -0.1155684, -0.12155045, -0.12686516, -0.13149274, -0.13541876, -0.13863416, -0.1411353, -0.14292389, -0.14400686, -0.14439626, -0.14410905, -0.14316686, -0.1415957, -0.13942569, -0.13669069, -0.1334279, -0.12967752, -0.12548227, -0.12088699, -0.11593818, -0.11068356, -0.10517159, -0.099451049, -0.093570567, -0.087578202, -0.081521022, -0.075444716, -0.069393216, -0.063408356, -0.057529559, -0.051793558, -0.046234148, -0.040881982, -0.035764398, -0.030905288, -0.026325013, -0.022040342, -0.018064449], [0.064817553, 0.072567801, 0.080292992, 0.087918235, 0.095367076, 0.10256232, 0.10942687, 0.1158846, 0.12186124, 0.12728523, 0.13208858, 0.13620771, 0.13958427, 0.14216587, 0.14390678, 0.14476863, 0.1447209, 0.14374148, 0.14181704, 0.13894336, 0.13512554, 0.13037812, 0.1247251, 0.11819984, 0.11084487, 0.10271159, 0.093859853, 0.084357497, 0.074279719, 0.063708406, 0.052731374, 0.041441525, 0.029935953, 0.018314987, 0.0066811877, -0.0048616765, -0.016209689, -0.027259848, -0.037911124, -0.048065512, -0.05762905, -0.066512804, -0.0746338, -0.081915903, -0.088290621, -0.09369783, -0.098086416, -0.10141482, -0.10365146, -0.10477512, -0.10477512, -0.10365146, -0.10141482, -0.098086416, -0.09369783, -0.088290621, -0.081915903, -0.0746338, -0.066512804, -0.05762905, -0.048065512, -0.037911124, -0.027259848, -0.016209689, -0.0048616765, 0.0066811877, 0.018314987, 0.029935953, 0.041441525, 0.052731374, 0.063708406, 0.074279719, 0.084357497, 0.093859853, 0.10271159, 0.11084487, 0.11819984, 0.1247251, 0.13037812, 0.13512554, 0.13894336, 0.14181704, 0.14374148, 0.1447209, 0.14476863, 0.14390678, 0.14216587, 0.13958427, 0.13620771, 0.13208858, 0.12728523, 0.12186124, 0.1158846, 0.10942687, 0.10256232, 0.095367076, 0.087918235, 0.080292992, 0.072567801, 0.064817553], [0.14985551, 0.15512305, 0.15931467, 0.16236806, 0.16423291, 0.16487165, 0.16426009, 0.1623879, 0.1592589, 0.15489114, 0.14931693, 0.14258255, 0.13474785, 0.1258857, 0.11608124, 0.10543095, 0.094041635, 0.082029213, 0.069517411, 0.056636348, 0.043521028, 0.030309756, 0.017142511, 0.0041592774, -0.0085016282, -0.020705223, -0.032321494, -0.043226982, -0.053306291, -0.062453515, -0.070573544, -0.077583253, -0.083412547, -0.088005244, -0.091319802, -0.093329861, -0.094024602, -0.093408915, -0.091503383, -0.08834406, -0.08398207, -0.078483012, -0.071926192, -0.064403681, -0.056019215, -0.046886954, -0.037130106, -0.026879442, -0.016271713, -0.005448, 0.005448, 0.016271713, 0.026879442, 0.037130106, 0.046886954, 0.056019215, 0.064403681, 0.071926192, 0.078483012, 0.08398207, 0.08834406, 0.091503383, 0.093408915, 0.094024602, 0.093329861, 0.091319802, 0.088005244, 0.083412547, 0.077583253, 0.070573544, 0.062453515, 0.053306291, 0.043226982, 0.032321494, 0.020705223, 0.0085016282, -0.0041592774, -0.017142511, -0.030309756, -0.043521028, -0.056636348, -0.069517411, -0.082029213, -0.094041635, -0.10543095, -0.11608124, -0.1258857, -0.13474785, -0.14258255, -0.14931693, -0.15489114, -0.1592589, -0.1623879, -0.16426009, -0.16487165, -0.16423291, -0.16236806, -0.15931467, -0.15512305, -0.14985551]], [0.999943140, 0.997571533, 0.959465463, 0.721862496]), # noqa: E501 +} + + +class TestDPSS(object): + + def test_basic(self): + # Test against hardcoded data + for k, v in dpss_data.items(): + win, ratios = windows.dpss(*k, return_ratios=True) + assert_allclose(win, v[0], atol=1e-7, err_msg=k) + assert_allclose(ratios, v[1], rtol=1e-5, atol=1e-7, err_msg=k) + + def test_unity(self): + # Test unity value handling (gh-2221) + for M in range(1, 21): + # corrected w/approximation (default) + win = windows.dpss(M, M / 2.1) + expected = M % 2 # one for odd, none for even + assert_equal(np.isclose(win, 1.).sum(), expected, + err_msg='%s' % (win,)) + # corrected w/subsample delay (slower) + win_sub = windows.dpss(M, M / 2.1, norm='subsample') + if M > 2: + # @M=2 the subsample doesn't do anything + assert_equal(np.isclose(win_sub, 1.).sum(), expected, + err_msg='%s' % (win_sub,)) + assert_allclose(win, win_sub, rtol=0.03) # within 3% + # not the same, l2-norm + win_2 = windows.dpss(M, M / 2.1, norm=2) + expected = 1 if M == 1 else 0 + assert_equal(np.isclose(win_2, 1.).sum(), expected, + err_msg='%s' % (win_2,)) + + def test_extremes(self): + # Test extremes of alpha + lam = windows.dpss(31, 6, 4, return_ratios=True)[1] + assert_array_almost_equal(lam, 1.) + lam = windows.dpss(31, 7, 4, return_ratios=True)[1] + assert_array_almost_equal(lam, 1.) + lam = windows.dpss(31, 8, 4, return_ratios=True)[1] + assert_array_almost_equal(lam, 1.) + + def test_degenerate(self): + # Test failures + assert_raises(ValueError, windows.dpss, 4, 1.5, -1) # Bad Kmax + assert_raises(ValueError, windows.dpss, 4, 1.5, -5) + assert_raises(TypeError, windows.dpss, 4, 1.5, 1.1) + assert_raises(ValueError, windows.dpss, 3, 1.5, 3) # NW must be < N/2. + assert_raises(ValueError, windows.dpss, 3, -1, 3) # NW must be pos + assert_raises(ValueError, windows.dpss, 3, 0, 3) + assert_raises(ValueError, windows.dpss, -1, 1, 3) # negative M + + +class TestGetWindow(object): + + def test_boxcar(self): + w = windows.get_window('boxcar', 12) + assert_array_equal(w, np.ones_like(w)) + + # window is a tuple of len 1 + w = windows.get_window(('boxcar',), 16) + assert_array_equal(w, np.ones_like(w)) + + def test_cheb_odd(self): + with suppress_warnings() as sup: + sup.filter(UserWarning, "This window is not suitable") + w = windows.get_window(('chebwin', -40), 53, fftbins=False) + assert_array_almost_equal(w, cheb_odd_true, decimal=4) + + def test_cheb_even(self): + with suppress_warnings() as sup: + sup.filter(UserWarning, "This window is not suitable") + w = windows.get_window(('chebwin', 40), 54, fftbins=False) + assert_array_almost_equal(w, cheb_even_true, decimal=4) + + def test_kaiser_float(self): + win1 = windows.get_window(7.2, 64) + win2 = windows.kaiser(64, 7.2, False) + assert_allclose(win1, win2) + + def test_invalid_inputs(self): + # Window is not a float, tuple, or string + assert_raises(ValueError, windows.get_window, set('hann'), 8) + + # Unknown window type error + assert_raises(ValueError, windows.get_window, 'broken', 4) + + def test_array_as_window(self): + # github issue 3603 + osfactor = 128 + sig = np.arange(128) + + win = windows.get_window(('kaiser', 8.0), osfactor // 2) + assert_raises(ValueError, resample, + (sig, len(sig) * osfactor), {'window': win}) + + +def test_windowfunc_basics(): + for window_name, params in window_funcs: + window = getattr(windows, window_name) + with suppress_warnings() as sup: + sup.filter(UserWarning, "This window is not suitable") + if window_name in ('slepian', 'hanning'): + sup.filter(DeprecationWarning) + # Check symmetry for odd and even lengths + w1 = window(8, *params, sym=True) + w2 = window(7, *params, sym=False) + assert_array_almost_equal(w1[:-1], w2) + + w1 = window(9, *params, sym=True) + w2 = window(8, *params, sym=False) + assert_array_almost_equal(w1[:-1], w2) + + # Check that functions run and output lengths are correct + assert_equal(len(window(6, *params, sym=True)), 6) + assert_equal(len(window(6, *params, sym=False)), 6) + assert_equal(len(window(7, *params, sym=True)), 7) + assert_equal(len(window(7, *params, sym=False)), 7) + + # Check invalid lengths + assert_raises(ValueError, window, 5.5, *params) + assert_raises(ValueError, window, -7, *params) + + # Check degenerate cases + assert_array_equal(window(0, *params, sym=True), []) + assert_array_equal(window(0, *params, sym=False), []) + assert_array_equal(window(1, *params, sym=True), [1]) + assert_array_equal(window(1, *params, sym=False), [1]) + + # Check dtype + assert_(window(0, *params, sym=True).dtype == 'float') + assert_(window(0, *params, sym=False).dtype == 'float') + assert_(window(1, *params, sym=True).dtype == 'float') + assert_(window(1, *params, sym=False).dtype == 'float') + assert_(window(6, *params, sym=True).dtype == 'float') + assert_(window(6, *params, sym=False).dtype == 'float') + + # Check normalization + assert_array_less(window(10, *params, sym=True), 1.01) + assert_array_less(window(10, *params, sym=False), 1.01) + assert_array_less(window(9, *params, sym=True), 1.01) + assert_array_less(window(9, *params, sym=False), 1.01) + + # Check that DFT-even spectrum is purely real for odd and even + assert_allclose(fftpack.fft(window(10, *params, sym=False)).imag, + 0, atol=1e-14) + assert_allclose(fftpack.fft(window(11, *params, sym=False)).imag, + 0, atol=1e-14) + + +def test_needs_params(): + for winstr in ['kaiser', 'ksr', 'gaussian', 'gauss', 'gss', + 'general gaussian', 'general_gaussian', + 'general gauss', 'general_gauss', 'ggs', + 'slepian', 'optimal', 'slep', 'dss', 'dpss', + 'chebwin', 'cheb', 'exponential', 'poisson', 'tukey', + 'tuk', 'dpss']: + assert_raises(ValueError, get_window, winstr, 7) + + +def test_deprecation(): + if dep_hann.__doc__ is not None: # can be None with `-OO` mode + assert_('signal.hann is deprecated' in dep_hann.__doc__) + assert_('deprecated' not in windows.hann.__doc__) + + +def test_deprecated_pickleable(): + dep_hann2 = pickle.loads(pickle.dumps(dep_hann)) + assert_(dep_hann2 is dep_hann) diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_windows.pyc b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_windows.pyc new file mode 100644 index 0000000..69ad85d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/signal/tests/test_windows.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/waveforms.py b/project/venv/lib/python2.7/site-packages/scipy/signal/waveforms.py new file mode 100644 index 0000000..b6b5a12 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/signal/waveforms.py @@ -0,0 +1,681 @@ +# Author: Travis Oliphant +# 2003 +# +# Feb. 2010: Updated by Warren Weckesser: +# Rewrote much of chirp() +# Added sweep_poly() +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \ + exp, cos, sin, polyval, polyint + +from scipy._lib.six import string_types + + +__all__ = ['sawtooth', 'square', 'gausspulse', 'chirp', 'sweep_poly', + 'unit_impulse'] + + +def sawtooth(t, width=1): + """ + Return a periodic sawtooth or triangle waveform. + + The sawtooth waveform has a period ``2*pi``, rises from -1 to 1 on the + interval 0 to ``width*2*pi``, then drops from 1 to -1 on the interval + ``width*2*pi`` to ``2*pi``. `width` must be in the interval [0, 1]. + + Note that this is not band-limited. It produces an infinite number + of harmonics, which are aliased back and forth across the frequency + spectrum. + + Parameters + ---------- + t : array_like + Time. + width : array_like, optional + Width of the rising ramp as a proportion of the total cycle. + Default is 1, producing a rising ramp, while 0 produces a falling + ramp. `width` = 0.5 produces a triangle wave. + If an array, causes wave shape to change over time, and must be the + same length as t. + + Returns + ------- + y : ndarray + Output array containing the sawtooth waveform. + + Examples + -------- + A 5 Hz waveform sampled at 500 Hz for 1 second: + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> t = np.linspace(0, 1, 500) + >>> plt.plot(t, signal.sawtooth(2 * np.pi * 5 * t)) + + """ + t, w = asarray(t), asarray(width) + w = asarray(w + (t - t)) + t = asarray(t + (w - w)) + if t.dtype.char in ['fFdD']: + ytype = t.dtype.char + else: + ytype = 'd' + y = zeros(t.shape, ytype) + + # width must be between 0 and 1 inclusive + mask1 = (w > 1) | (w < 0) + place(y, mask1, nan) + + # take t modulo 2*pi + tmod = mod(t, 2 * pi) + + # on the interval 0 to width*2*pi function is + # tmod / (pi*w) - 1 + mask2 = (1 - mask1) & (tmod < w * 2 * pi) + tsub = extract(mask2, tmod) + wsub = extract(mask2, w) + place(y, mask2, tsub / (pi * wsub) - 1) + + # on the interval width*2*pi to 2*pi function is + # (pi*(w+1)-tmod) / (pi*(1-w)) + + mask3 = (1 - mask1) & (1 - mask2) + tsub = extract(mask3, tmod) + wsub = extract(mask3, w) + place(y, mask3, (pi * (wsub + 1) - tsub) / (pi * (1 - wsub))) + return y + + +def square(t, duty=0.5): + """ + Return a periodic square-wave waveform. + + The square wave has a period ``2*pi``, has value +1 from 0 to + ``2*pi*duty`` and -1 from ``2*pi*duty`` to ``2*pi``. `duty` must be in + the interval [0,1]. + + Note that this is not band-limited. It produces an infinite number + of harmonics, which are aliased back and forth across the frequency + spectrum. + + Parameters + ---------- + t : array_like + The input time array. + duty : array_like, optional + Duty cycle. Default is 0.5 (50% duty cycle). + If an array, causes wave shape to change over time, and must be the + same length as t. + + Returns + ------- + y : ndarray + Output array containing the square waveform. + + Examples + -------- + A 5 Hz waveform sampled at 500 Hz for 1 second: + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> t = np.linspace(0, 1, 500, endpoint=False) + >>> plt.plot(t, signal.square(2 * np.pi * 5 * t)) + >>> plt.ylim(-2, 2) + + A pulse-width modulated sine wave: + + >>> plt.figure() + >>> sig = np.sin(2 * np.pi * t) + >>> pwm = signal.square(2 * np.pi * 30 * t, duty=(sig + 1)/2) + >>> plt.subplot(2, 1, 1) + >>> plt.plot(t, sig) + >>> plt.subplot(2, 1, 2) + >>> plt.plot(t, pwm) + >>> plt.ylim(-1.5, 1.5) + + """ + t, w = asarray(t), asarray(duty) + w = asarray(w + (t - t)) + t = asarray(t + (w - w)) + if t.dtype.char in ['fFdD']: + ytype = t.dtype.char + else: + ytype = 'd' + + y = zeros(t.shape, ytype) + + # width must be between 0 and 1 inclusive + mask1 = (w > 1) | (w < 0) + place(y, mask1, nan) + + # on the interval 0 to duty*2*pi function is 1 + tmod = mod(t, 2 * pi) + mask2 = (1 - mask1) & (tmod < w * 2 * pi) + place(y, mask2, 1) + + # on the interval duty*2*pi to 2*pi function is + # (pi*(w+1)-tmod) / (pi*(1-w)) + mask3 = (1 - mask1) & (1 - mask2) + place(y, mask3, -1) + return y + + +def gausspulse(t, fc=1000, bw=0.5, bwr=-6, tpr=-60, retquad=False, + retenv=False): + """ + Return a Gaussian modulated sinusoid: + + ``exp(-a t^2) exp(1j*2*pi*fc*t).`` + + If `retquad` is True, then return the real and imaginary parts + (in-phase and quadrature). + If `retenv` is True, then return the envelope (unmodulated signal). + Otherwise, return the real part of the modulated sinusoid. + + Parameters + ---------- + t : ndarray or the string 'cutoff' + Input array. + fc : int, optional + Center frequency (e.g. Hz). Default is 1000. + bw : float, optional + Fractional bandwidth in frequency domain of pulse (e.g. Hz). + Default is 0.5. + bwr : float, optional + Reference level at which fractional bandwidth is calculated (dB). + Default is -6. + tpr : float, optional + If `t` is 'cutoff', then the function returns the cutoff + time for when the pulse amplitude falls below `tpr` (in dB). + Default is -60. + retquad : bool, optional + If True, return the quadrature (imaginary) as well as the real part + of the signal. Default is False. + retenv : bool, optional + If True, return the envelope of the signal. Default is False. + + Returns + ------- + yI : ndarray + Real part of signal. Always returned. + yQ : ndarray + Imaginary part of signal. Only returned if `retquad` is True. + yenv : ndarray + Envelope of signal. Only returned if `retenv` is True. + + See Also + -------- + scipy.signal.morlet + + Examples + -------- + Plot real component, imaginary component, and envelope for a 5 Hz pulse, + sampled at 100 Hz for 2 seconds: + + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> t = np.linspace(-1, 1, 2 * 100, endpoint=False) + >>> i, q, e = signal.gausspulse(t, fc=5, retquad=True, retenv=True) + >>> plt.plot(t, i, t, q, t, e, '--') + + """ + if fc < 0: + raise ValueError("Center frequency (fc=%.2f) must be >=0." % fc) + if bw <= 0: + raise ValueError("Fractional bandwidth (bw=%.2f) must be > 0." % bw) + if bwr >= 0: + raise ValueError("Reference level for bandwidth (bwr=%.2f) must " + "be < 0 dB" % bwr) + + # exp(-a t^2) <-> sqrt(pi/a) exp(-pi^2/a * f^2) = g(f) + + ref = pow(10.0, bwr / 20.0) + # fdel = fc*bw/2: g(fdel) = ref --- solve this for a + # + # pi^2/a * fc^2 * bw^2 /4=-log(ref) + a = -(pi * fc * bw) ** 2 / (4.0 * log(ref)) + + if isinstance(t, string_types): + if t == 'cutoff': # compute cut_off point + # Solve exp(-a tc**2) = tref for tc + # tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20) + if tpr >= 0: + raise ValueError("Reference level for time cutoff must " + "be < 0 dB") + tref = pow(10.0, tpr / 20.0) + return sqrt(-log(tref) / a) + else: + raise ValueError("If `t` is a string, it must be 'cutoff'") + + yenv = exp(-a * t * t) + yI = yenv * cos(2 * pi * fc * t) + yQ = yenv * sin(2 * pi * fc * t) + if not retquad and not retenv: + return yI + if not retquad and retenv: + return yI, yenv + if retquad and not retenv: + return yI, yQ + if retquad and retenv: + return yI, yQ, yenv + + +def chirp(t, f0, t1, f1, method='linear', phi=0, vertex_zero=True): + """Frequency-swept cosine generator. + + In the following, 'Hz' should be interpreted as 'cycles per unit'; + there is no requirement here that the unit is one second. The + important distinction is that the units of rotation are cycles, not + radians. Likewise, `t` could be a measurement of space instead of time. + + Parameters + ---------- + t : array_like + Times at which to evaluate the waveform. + f0 : float + Frequency (e.g. Hz) at time t=0. + t1 : float + Time at which `f1` is specified. + f1 : float + Frequency (e.g. Hz) of the waveform at time `t1`. + method : {'linear', 'quadratic', 'logarithmic', 'hyperbolic'}, optional + Kind of frequency sweep. If not given, `linear` is assumed. See + Notes below for more details. + phi : float, optional + Phase offset, in degrees. Default is 0. + vertex_zero : bool, optional + This parameter is only used when `method` is 'quadratic'. + It determines whether the vertex of the parabola that is the graph + of the frequency is at t=0 or t=t1. + + Returns + ------- + y : ndarray + A numpy array containing the signal evaluated at `t` with the + requested time-varying frequency. More precisely, the function + returns ``cos(phase + (pi/180)*phi)`` where `phase` is the integral + (from 0 to `t`) of ``2*pi*f(t)``. ``f(t)`` is defined below. + + See Also + -------- + sweep_poly + + Notes + ----- + There are four options for the `method`. The following formulas give + the instantaneous frequency (in Hz) of the signal generated by + `chirp()`. For convenience, the shorter names shown below may also be + used. + + linear, lin, li: + + ``f(t) = f0 + (f1 - f0) * t / t1`` + + quadratic, quad, q: + + The graph of the frequency f(t) is a parabola through (0, f0) and + (t1, f1). By default, the vertex of the parabola is at (0, f0). + If `vertex_zero` is False, then the vertex is at (t1, f1). The + formula is: + + if vertex_zero is True: + + ``f(t) = f0 + (f1 - f0) * t**2 / t1**2`` + + else: + + ``f(t) = f1 - (f1 - f0) * (t1 - t)**2 / t1**2`` + + To use a more general quadratic function, or an arbitrary + polynomial, use the function `scipy.signal.waveforms.sweep_poly`. + + logarithmic, log, lo: + + ``f(t) = f0 * (f1/f0)**(t/t1)`` + + f0 and f1 must be nonzero and have the same sign. + + This signal is also known as a geometric or exponential chirp. + + hyperbolic, hyp: + + ``f(t) = f0*f1*t1 / ((f0 - f1)*t + f1*t1)`` + + f0 and f1 must be nonzero. + + Examples + -------- + The following will be used in the examples: + + >>> from scipy.signal import chirp, spectrogram + >>> import matplotlib.pyplot as plt + + For the first example, we'll plot the waveform for a linear chirp + from 6 Hz to 1 Hz over 10 seconds: + + >>> t = np.linspace(0, 10, 5001) + >>> w = chirp(t, f0=6, f1=1, t1=10, method='linear') + >>> plt.plot(t, w) + >>> plt.title("Linear Chirp, f(0)=6, f(10)=1") + >>> plt.xlabel('t (sec)') + >>> plt.show() + + For the remaining examples, we'll use higher frequency ranges, + and demonstrate the result using `scipy.signal.spectrogram`. + We'll use a 10 second interval sampled at 8000 Hz. + + >>> fs = 8000 + >>> T = 10 + >>> t = np.linspace(0, T, T*fs, endpoint=False) + + Quadratic chirp from 1500 Hz to 250 Hz over 10 seconds + (vertex of the parabolic curve of the frequency is at t=0): + + >>> w = chirp(t, f0=1500, f1=250, t1=10, method='quadratic') + >>> ff, tt, Sxx = spectrogram(w, fs=fs, noverlap=256, nperseg=512, + ... nfft=2048) + >>> plt.pcolormesh(tt, ff[:513], Sxx[:513], cmap='gray_r') + >>> plt.title('Quadratic Chirp, f(0)=1500, f(10)=250') + >>> plt.xlabel('t (sec)') + >>> plt.ylabel('Frequency (Hz)') + >>> plt.grid() + >>> plt.show() + + Quadratic chirp from 1500 Hz to 250 Hz over 10 seconds + (vertex of the parabolic curve of the frequency is at t=10): + + >>> w = chirp(t, f0=1500, f1=250, t1=10, method='quadratic', + ... vertex_zero=False) + >>> ff, tt, Sxx = spectrogram(w, fs=fs, noverlap=256, nperseg=512, + ... nfft=2048) + >>> plt.pcolormesh(tt, ff[:513], Sxx[:513], cmap='gray_r') + >>> plt.title('Quadratic Chirp, f(0)=1500, f(10)=250\\n' + + ... '(vertex_zero=False)') + >>> plt.xlabel('t (sec)') + >>> plt.ylabel('Frequency (Hz)') + >>> plt.grid() + >>> plt.show() + + Logarithmic chirp from 1500 Hz to 250 Hz over 10 seconds: + + >>> w = chirp(t, f0=1500, f1=250, t1=10, method='logarithmic') + >>> ff, tt, Sxx = spectrogram(w, fs=fs, noverlap=256, nperseg=512, + ... nfft=2048) + >>> plt.pcolormesh(tt, ff[:513], Sxx[:513], cmap='gray_r') + >>> plt.title('Logarithmic Chirp, f(0)=1500, f(10)=250') + >>> plt.xlabel('t (sec)') + >>> plt.ylabel('Frequency (Hz)') + >>> plt.grid() + >>> plt.show() + + Hyperbolic chirp from 1500 Hz to 250 Hz over 10 seconds: + + >>> w = chirp(t, f0=1500, f1=250, t1=10, method='hyperbolic') + >>> ff, tt, Sxx = spectrogram(w, fs=fs, noverlap=256, nperseg=512, + ... nfft=2048) + >>> plt.pcolormesh(tt, ff[:513], Sxx[:513], cmap='gray_r') + >>> plt.title('Hyperbolic Chirp, f(0)=1500, f(10)=250') + >>> plt.xlabel('t (sec)') + >>> plt.ylabel('Frequency (Hz)') + >>> plt.grid() + >>> plt.show() + + """ + # 'phase' is computed in _chirp_phase, to make testing easier. + phase = _chirp_phase(t, f0, t1, f1, method, vertex_zero) + # Convert phi to radians. + phi *= pi / 180 + return cos(phase + phi) + + +def _chirp_phase(t, f0, t1, f1, method='linear', vertex_zero=True): + """ + Calculate the phase used by `chirp` to generate its output. + + See `chirp` for a description of the arguments. + + """ + t = asarray(t) + f0 = float(f0) + t1 = float(t1) + f1 = float(f1) + if method in ['linear', 'lin', 'li']: + beta = (f1 - f0) / t1 + phase = 2 * pi * (f0 * t + 0.5 * beta * t * t) + + elif method in ['quadratic', 'quad', 'q']: + beta = (f1 - f0) / (t1 ** 2) + if vertex_zero: + phase = 2 * pi * (f0 * t + beta * t ** 3 / 3) + else: + phase = 2 * pi * (f1 * t + beta * ((t1 - t) ** 3 - t1 ** 3) / 3) + + elif method in ['logarithmic', 'log', 'lo']: + if f0 * f1 <= 0.0: + raise ValueError("For a logarithmic chirp, f0 and f1 must be " + "nonzero and have the same sign.") + if f0 == f1: + phase = 2 * pi * f0 * t + else: + beta = t1 / log(f1 / f0) + phase = 2 * pi * beta * f0 * (pow(f1 / f0, t / t1) - 1.0) + + elif method in ['hyperbolic', 'hyp']: + if f0 == 0 or f1 == 0: + raise ValueError("For a hyperbolic chirp, f0 and f1 must be " + "nonzero.") + if f0 == f1: + # Degenerate case: constant frequency. + phase = 2 * pi * f0 * t + else: + # Singular point: the instantaneous frequency blows up + # when t == sing. + sing = -f1 * t1 / (f0 - f1) + phase = 2 * pi * (-sing * f0) * log(np.abs(1 - t/sing)) + + else: + raise ValueError("method must be 'linear', 'quadratic', 'logarithmic'," + " or 'hyperbolic', but a value of %r was given." + % method) + + return phase + + +def sweep_poly(t, poly, phi=0): + """ + Frequency-swept cosine generator, with a time-dependent frequency. + + This function generates a sinusoidal function whose instantaneous + frequency varies with time. The frequency at time `t` is given by + the polynomial `poly`. + + Parameters + ---------- + t : ndarray + Times at which to evaluate the waveform. + poly : 1-D array_like or instance of numpy.poly1d + The desired frequency expressed as a polynomial. If `poly` is + a list or ndarray of length n, then the elements of `poly` are + the coefficients of the polynomial, and the instantaneous + frequency is + + ``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]`` + + If `poly` is an instance of numpy.poly1d, then the + instantaneous frequency is + + ``f(t) = poly(t)`` + + phi : float, optional + Phase offset, in degrees, Default: 0. + + Returns + ------- + sweep_poly : ndarray + A numpy array containing the signal evaluated at `t` with the + requested time-varying frequency. More precisely, the function + returns ``cos(phase + (pi/180)*phi)``, where `phase` is the integral + (from 0 to t) of ``2 * pi * f(t)``; ``f(t)`` is defined above. + + See Also + -------- + chirp + + Notes + ----- + .. versionadded:: 0.8.0 + + If `poly` is a list or ndarray of length `n`, then the elements of + `poly` are the coefficients of the polynomial, and the instantaneous + frequency is: + + ``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]`` + + If `poly` is an instance of `numpy.poly1d`, then the instantaneous + frequency is: + + ``f(t) = poly(t)`` + + Finally, the output `s` is: + + ``cos(phase + (pi/180)*phi)`` + + where `phase` is the integral from 0 to `t` of ``2 * pi * f(t)``, + ``f(t)`` as defined above. + + Examples + -------- + Compute the waveform with instantaneous frequency:: + + f(t) = 0.025*t**3 - 0.36*t**2 + 1.25*t + 2 + + over the interval 0 <= t <= 10. + + >>> from scipy.signal import sweep_poly + >>> p = np.poly1d([0.025, -0.36, 1.25, 2.0]) + >>> t = np.linspace(0, 10, 5001) + >>> w = sweep_poly(t, p) + + Plot it: + + >>> import matplotlib.pyplot as plt + >>> plt.subplot(2, 1, 1) + >>> plt.plot(t, w) + >>> plt.title("Sweep Poly\\nwith frequency " + + ... "$f(t) = 0.025t^3 - 0.36t^2 + 1.25t + 2$") + >>> plt.subplot(2, 1, 2) + >>> plt.plot(t, p(t), 'r', label='f(t)') + >>> plt.legend() + >>> plt.xlabel('t') + >>> plt.tight_layout() + >>> plt.show() + + """ + # 'phase' is computed in _sweep_poly_phase, to make testing easier. + phase = _sweep_poly_phase(t, poly) + # Convert to radians. + phi *= pi / 180 + return cos(phase + phi) + + +def _sweep_poly_phase(t, poly): + """ + Calculate the phase used by sweep_poly to generate its output. + + See `sweep_poly` for a description of the arguments. + + """ + # polyint handles lists, ndarrays and instances of poly1d automatically. + intpoly = polyint(poly) + phase = 2 * pi * polyval(intpoly, t) + return phase + + +def unit_impulse(shape, idx=None, dtype=float): + """ + Unit impulse signal (discrete delta function) or unit basis vector. + + Parameters + ---------- + shape : int or tuple of int + Number of samples in the output (1-D), or a tuple that represents the + shape of the output (N-D). + idx : None or int or tuple of int or 'mid', optional + Index at which the value is 1. If None, defaults to the 0th element. + If ``idx='mid'``, the impulse will be centered at ``shape // 2`` in + all dimensions. If an int, the impulse will be at `idx` in all + dimensions. + dtype : data-type, optional + The desired data-type for the array, e.g., `numpy.int8`. Default is + `numpy.float64`. + + Returns + ------- + y : ndarray + Output array containing an impulse signal. + + Notes + ----- + The 1D case is also known as the Kronecker delta. + + .. versionadded:: 0.19.0 + + Examples + -------- + An impulse at the 0th element (:math:`\\delta[n]`): + + >>> from scipy import signal + >>> signal.unit_impulse(8) + array([ 1., 0., 0., 0., 0., 0., 0., 0.]) + + Impulse offset by 2 samples (:math:`\\delta[n-2]`): + + >>> signal.unit_impulse(7, 2) + array([ 0., 0., 1., 0., 0., 0., 0.]) + + 2-dimensional impulse, centered: + + >>> signal.unit_impulse((3, 3), 'mid') + array([[ 0., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 0.]]) + + Impulse at (2, 2), using broadcasting: + + >>> signal.unit_impulse((4, 4), 2) + array([[ 0., 0., 0., 0.], + [ 0., 0., 0., 0.], + [ 0., 0., 1., 0.], + [ 0., 0., 0., 0.]]) + + Plot the impulse response of a 4th-order Butterworth lowpass filter: + + >>> imp = signal.unit_impulse(100, 'mid') + >>> b, a = signal.butter(4, 0.2) + >>> response = signal.lfilter(b, a, imp) + + >>> import matplotlib.pyplot as plt + >>> plt.plot(np.arange(-50, 50), imp) + >>> plt.plot(np.arange(-50, 50), response) + >>> plt.margins(0.1, 0.1) + >>> plt.xlabel('Time [samples]') + >>> plt.ylabel('Amplitude') + >>> plt.grid(True) + >>> plt.show() + + """ + out = zeros(shape, dtype) + + shape = np.atleast_1d(shape) + + if idx is None: + idx = (0,) * len(shape) + elif idx == 'mid': + idx = tuple(shape // 2) + elif not hasattr(idx, "__iter__"): + idx = (idx,) * len(shape) + + out[idx] = 1 + return out diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/waveforms.pyc b/project/venv/lib/python2.7/site-packages/scipy/signal/waveforms.pyc new file mode 100644 index 0000000..24dcd71 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/signal/waveforms.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/wavelets.py b/project/venv/lib/python2.7/site-packages/scipy/signal/wavelets.py new file mode 100644 index 0000000..6043ee2 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/signal/wavelets.py @@ -0,0 +1,365 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.dual import eig +from scipy.special import comb +from scipy import linspace, pi, exp +from scipy.signal import convolve + +__all__ = ['daub', 'qmf', 'cascade', 'morlet', 'ricker', 'cwt'] + + +def daub(p): + """ + The coefficients for the FIR low-pass filter producing Daubechies wavelets. + + p>=1 gives the order of the zero at f=1/2. + There are 2p filter coefficients. + + Parameters + ---------- + p : int + Order of the zero at f=1/2, can have values from 1 to 34. + + Returns + ------- + daub : ndarray + Return + + """ + sqrt = np.sqrt + if p < 1: + raise ValueError("p must be at least 1.") + if p == 1: + c = 1 / sqrt(2) + return np.array([c, c]) + elif p == 2: + f = sqrt(2) / 8 + c = sqrt(3) + return f * np.array([1 + c, 3 + c, 3 - c, 1 - c]) + elif p == 3: + tmp = 12 * sqrt(10) + z1 = 1.5 + sqrt(15 + tmp) / 6 - 1j * (sqrt(15) + sqrt(tmp - 15)) / 6 + z1c = np.conj(z1) + f = sqrt(2) / 8 + d0 = np.real((1 - z1) * (1 - z1c)) + a0 = np.real(z1 * z1c) + a1 = 2 * np.real(z1) + return f / d0 * np.array([a0, 3 * a0 - a1, 3 * a0 - 3 * a1 + 1, + a0 - 3 * a1 + 3, 3 - a1, 1]) + elif p < 35: + # construct polynomial and factor it + if p < 35: + P = [comb(p - 1 + k, k, exact=1) for k in range(p)][::-1] + yj = np.roots(P) + else: # try different polynomial --- needs work + P = [comb(p - 1 + k, k, exact=1) / 4.0**k + for k in range(p)][::-1] + yj = np.roots(P) / 4 + # for each root, compute two z roots, select the one with |z|>1 + # Build up final polynomial + c = np.poly1d([1, 1])**p + q = np.poly1d([1]) + for k in range(p - 1): + yval = yj[k] + part = 2 * sqrt(yval * (yval - 1)) + const = 1 - 2 * yval + z1 = const + part + if (abs(z1)) < 1: + z1 = const - part + q = q * [1, -z1] + + q = c * np.real(q) + # Normalize result + q = q / np.sum(q) * sqrt(2) + return q.c[::-1] + else: + raise ValueError("Polynomial factorization does not work " + "well for p too large.") + + +def qmf(hk): + """ + Return high-pass qmf filter from low-pass + + Parameters + ---------- + hk : array_like + Coefficients of high-pass filter. + + """ + N = len(hk) - 1 + asgn = [{0: 1, 1: -1}[k % 2] for k in range(N + 1)] + return hk[::-1] * np.array(asgn) + + +def cascade(hk, J=7): + """ + Return (x, phi, psi) at dyadic points ``K/2**J`` from filter coefficients. + + Parameters + ---------- + hk : array_like + Coefficients of low-pass filter. + J : int, optional + Values will be computed at grid points ``K/2**J``. Default is 7. + + Returns + ------- + x : ndarray + The dyadic points ``K/2**J`` for ``K=0...N * (2**J)-1`` where + ``len(hk) = len(gk) = N+1``. + phi : ndarray + The scaling function ``phi(x)`` at `x`: + ``phi(x) = sum(hk * phi(2x-k))``, where k is from 0 to N. + psi : ndarray, optional + The wavelet function ``psi(x)`` at `x`: + ``phi(x) = sum(gk * phi(2x-k))``, where k is from 0 to N. + `psi` is only returned if `gk` is not None. + + Notes + ----- + The algorithm uses the vector cascade algorithm described by Strang and + Nguyen in "Wavelets and Filter Banks". It builds a dictionary of values + and slices for quick reuse. Then inserts vectors into final vector at the + end. + + """ + N = len(hk) - 1 + + if (J > 30 - np.log2(N + 1)): + raise ValueError("Too many levels.") + if (J < 1): + raise ValueError("Too few levels.") + + # construct matrices needed + nn, kk = np.ogrid[:N, :N] + s2 = np.sqrt(2) + # append a zero so that take works + thk = np.r_[hk, 0] + gk = qmf(hk) + tgk = np.r_[gk, 0] + + indx1 = np.clip(2 * nn - kk, -1, N + 1) + indx2 = np.clip(2 * nn - kk + 1, -1, N + 1) + m = np.zeros((2, 2, N, N), 'd') + m[0, 0] = np.take(thk, indx1, 0) + m[0, 1] = np.take(thk, indx2, 0) + m[1, 0] = np.take(tgk, indx1, 0) + m[1, 1] = np.take(tgk, indx2, 0) + m *= s2 + + # construct the grid of points + x = np.arange(0, N * (1 << J), dtype=float) / (1 << J) + phi = 0 * x + + psi = 0 * x + + # find phi0, and phi1 + lam, v = eig(m[0, 0]) + ind = np.argmin(np.absolute(lam - 1)) + # a dictionary with a binary representation of the + # evaluation points x < 1 -- i.e. position is 0.xxxx + v = np.real(v[:, ind]) + # need scaling function to integrate to 1 so find + # eigenvector normalized to sum(v,axis=0)=1 + sm = np.sum(v) + if sm < 0: # need scaling function to integrate to 1 + v = -v + sm = -sm + bitdic = {'0': v / sm} + bitdic['1'] = np.dot(m[0, 1], bitdic['0']) + step = 1 << J + phi[::step] = bitdic['0'] + phi[(1 << (J - 1))::step] = bitdic['1'] + psi[::step] = np.dot(m[1, 0], bitdic['0']) + psi[(1 << (J - 1))::step] = np.dot(m[1, 1], bitdic['0']) + # descend down the levels inserting more and more values + # into bitdic -- store the values in the correct location once we + # have computed them -- stored in the dictionary + # for quicker use later. + prevkeys = ['1'] + for level in range(2, J + 1): + newkeys = ['%d%s' % (xx, yy) for xx in [0, 1] for yy in prevkeys] + fac = 1 << (J - level) + for key in newkeys: + # convert key to number + num = 0 + for pos in range(level): + if key[pos] == '1': + num += (1 << (level - 1 - pos)) + pastphi = bitdic[key[1:]] + ii = int(key[0]) + temp = np.dot(m[0, ii], pastphi) + bitdic[key] = temp + phi[num * fac::step] = temp + psi[num * fac::step] = np.dot(m[1, ii], pastphi) + prevkeys = newkeys + + return x, phi, psi + + +def morlet(M, w=5.0, s=1.0, complete=True): + """ + Complex Morlet wavelet. + + Parameters + ---------- + M : int + Length of the wavelet. + w : float, optional + Omega0. Default is 5 + s : float, optional + Scaling factor, windowed from ``-s*2*pi`` to ``+s*2*pi``. Default is 1. + complete : bool, optional + Whether to use the complete or the standard version. + + Returns + ------- + morlet : (M,) ndarray + + See Also + -------- + scipy.signal.gausspulse + + Notes + ----- + The standard version:: + + pi**-0.25 * exp(1j*w*x) * exp(-0.5*(x**2)) + + This commonly used wavelet is often referred to simply as the + Morlet wavelet. Note that this simplified version can cause + admissibility problems at low values of `w`. + + The complete version:: + + pi**-0.25 * (exp(1j*w*x) - exp(-0.5*(w**2))) * exp(-0.5*(x**2)) + + This version has a correction + term to improve admissibility. For `w` greater than 5, the + correction term is negligible. + + Note that the energy of the return wavelet is not normalised + according to `s`. + + The fundamental frequency of this wavelet in Hz is given + by ``f = 2*s*w*r / M`` where `r` is the sampling rate. + + Note: This function was created before `cwt` and is not compatible + with it. + + """ + x = linspace(-s * 2 * pi, s * 2 * pi, M) + output = exp(1j * w * x) + + if complete: + output -= exp(-0.5 * (w**2)) + + output *= exp(-0.5 * (x**2)) * pi**(-0.25) + + return output + + +def ricker(points, a): + """ + Return a Ricker wavelet, also known as the "Mexican hat wavelet". + + It models the function: + + ``A (1 - x^2/a^2) exp(-x^2/2 a^2)``, + + where ``A = 2/sqrt(3a)pi^1/4``. + + Parameters + ---------- + points : int + Number of points in `vector`. + Will be centered around 0. + a : scalar + Width parameter of the wavelet. + + Returns + ------- + vector : (N,) ndarray + Array of length `points` in shape of ricker curve. + + Examples + -------- + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + + >>> points = 100 + >>> a = 4.0 + >>> vec2 = signal.ricker(points, a) + >>> print(len(vec2)) + 100 + >>> plt.plot(vec2) + >>> plt.show() + + """ + A = 2 / (np.sqrt(3 * a) * (np.pi**0.25)) + wsq = a**2 + vec = np.arange(0, points) - (points - 1.0) / 2 + xsq = vec**2 + mod = (1 - xsq / wsq) + gauss = np.exp(-xsq / (2 * wsq)) + total = A * mod * gauss + return total + + +def cwt(data, wavelet, widths): + """ + Continuous wavelet transform. + + Performs a continuous wavelet transform on `data`, + using the `wavelet` function. A CWT performs a convolution + with `data` using the `wavelet` function, which is characterized + by a width parameter and length parameter. + + Parameters + ---------- + data : (N,) ndarray + data on which to perform the transform. + wavelet : function + Wavelet function, which should take 2 arguments. + The first argument is the number of points that the returned vector + will have (len(wavelet(length,width)) == length). + The second is a width parameter, defining the size of the wavelet + (e.g. standard deviation of a gaussian). See `ricker`, which + satisfies these requirements. + widths : (M,) sequence + Widths to use for transform. + + Returns + ------- + cwt: (M, N) ndarray + Will have shape of (len(widths), len(data)). + + Notes + ----- + :: + + length = min(10 * width[ii], len(data)) + cwt[ii,:] = signal.convolve(data, wavelet(length, + width[ii]), mode='same') + + Examples + -------- + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> t = np.linspace(-1, 1, 200, endpoint=False) + >>> sig = np.cos(2 * np.pi * 7 * t) + signal.gausspulse(t - 0.4, fc=2) + >>> widths = np.arange(1, 31) + >>> cwtmatr = signal.cwt(sig, signal.ricker, widths) + >>> plt.imshow(cwtmatr, extent=[-1, 1, 31, 1], cmap='PRGn', aspect='auto', + ... vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max()) + >>> plt.show() + + """ + output = np.zeros([len(widths), len(data)]) + for ind, width in enumerate(widths): + wavelet_data = wavelet(min(10 * width, len(data)), width) + output[ind, :] = convolve(data, wavelet_data, + mode='same') + return output diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/wavelets.pyc b/project/venv/lib/python2.7/site-packages/scipy/signal/wavelets.pyc new file mode 100644 index 0000000..c38c64b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/signal/wavelets.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/windows/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/signal/windows/__init__.py new file mode 100644 index 0000000..4f31100 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/signal/windows/__init__.py @@ -0,0 +1,46 @@ +""" +============================================== +Window functions (:mod:`scipy.signal.windows`) +============================================== + +The suite of window functions for filtering and spectral estimation. + +.. autosummary:: + :toctree: generated/ + + get_window -- Return a window of a given length and type. + + barthann -- Bartlett-Hann window + bartlett -- Bartlett window + blackman -- Blackman window + blackmanharris -- Minimum 4-term Blackman-Harris window + bohman -- Bohman window + boxcar -- Boxcar window + chebwin -- Dolph-Chebyshev window + cosine -- Cosine window + dpss -- Discrete prolate spheroidal sequences + exponential -- Exponential window + flattop -- Flat top window + gaussian -- Gaussian window + general_cosine -- Generalized Cosine window + general_gaussian -- Generalized Gaussian window + general_hamming -- Generalized Hamming window + hamming -- Hamming window + hann -- Hann window + hanning -- Hann window + kaiser -- Kaiser window + nuttall -- Nuttall's minimum 4-term Blackman-Harris window + parzen -- Parzen window + slepian -- Slepian window + triang -- Triangular window + tukey -- Tukey window + +""" + +from .windows import * + +__all__ = ['boxcar', 'triang', 'parzen', 'bohman', 'blackman', 'nuttall', + 'blackmanharris', 'flattop', 'bartlett', 'hanning', 'barthann', + 'hamming', 'kaiser', 'gaussian', 'general_gaussian', 'general_cosine', + 'general_hamming', 'chebwin', 'slepian', 'cosine', 'hann', + 'exponential', 'tukey', 'get_window', 'dpss'] diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/windows/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/signal/windows/__init__.pyc new file mode 100644 index 0000000..d8e474f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/signal/windows/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/windows/setup.py b/project/venv/lib/python2.7/site-packages/scipy/signal/windows/setup.py new file mode 100644 index 0000000..c69a1f2 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/signal/windows/setup.py @@ -0,0 +1,11 @@ +from __future__ import division, print_function, absolute_import + + +def configuration(parent_package='', top_path=None): + from numpy.distutils.misc_util import Configuration + + config = Configuration('windows', parent_package, top_path) + + config.add_data_dir('tests') + + return config diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/windows/setup.pyc b/project/venv/lib/python2.7/site-packages/scipy/signal/windows/setup.pyc new file mode 100644 index 0000000..423210d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/signal/windows/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/windows/windows.py b/project/venv/lib/python2.7/site-packages/scipy/signal/windows/windows.py new file mode 100644 index 0000000..f877bfe --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/signal/windows/windows.py @@ -0,0 +1,2107 @@ +"""The suite of window functions.""" + +from __future__ import division, print_function, absolute_import + +import operator +import warnings + +import numpy as np +from scipy import fftpack, linalg, special +from scipy._lib.six import string_types + +__all__ = ['boxcar', 'triang', 'parzen', 'bohman', 'blackman', 'nuttall', + 'blackmanharris', 'flattop', 'bartlett', 'hanning', 'barthann', + 'hamming', 'kaiser', 'gaussian', 'general_cosine','general_gaussian', + 'general_hamming', 'chebwin', 'slepian', 'cosine', 'hann', + 'exponential', 'tukey', 'dpss', 'get_window'] + + +def _len_guards(M): + """Handle small or incorrect window lengths""" + if int(M) != M or M < 0: + raise ValueError('Window length M must be a non-negative integer') + return M <= 1 + + +def _extend(M, sym): + """Extend window by 1 sample if needed for DFT-even symmetry""" + if not sym: + return M + 1, True + else: + return M, False + + +def _truncate(w, needed): + """Truncate window by 1 sample if needed for DFT-even symmetry""" + if needed: + return w[:-1] + else: + return w + + +def general_cosine(M, a, sym=True): + r""" + Generic weighted sum of cosine terms window + + Parameters + ---------- + M : int + Number of points in the output window + a : array_like + Sequence of weighting coefficients. This uses the convention of being + centered on the origin, so these will typically all be positive + numbers, not alternating sign. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + References + ---------- + .. [1] A. Nuttall, "Some windows with very good sidelobe behavior," IEEE + Transactions on Acoustics, Speech, and Signal Processing, vol. 29, + no. 1, pp. 84-91, Feb 1981. :doi:`10.1109/TASSP.1981.1163506`. + .. [2] Heinzel G. et al., "Spectrum and spectral density estimation by the + Discrete Fourier transform (DFT), including a comprehensive list of + window functions and some new flat-top windows", February 15, 2002 + https://holometer.fnal.gov/GH_FFT.pdf + + Examples + -------- + Heinzel describes a flat-top window named "HFT90D" with formula: [2]_ + + .. math:: w_j = 1 - 1.942604 \cos(z) + 1.340318 \cos(2z) + - 0.440811 \cos(3z) + 0.043097 \cos(4z) + + where + + .. math:: z = \frac{2 \pi j}{N}, j = 0...N - 1 + + Since this uses the convention of starting at the origin, to reproduce the + window, we need to convert every other coefficient to a positive number: + + >>> HFT90D = [1, 1.942604, 1.340318, 0.440811, 0.043097] + + The paper states that the highest sidelobe is at -90.2 dB. Reproduce + Figure 42 by plotting the window and its frequency response, and confirm + the sidelobe level in red: + + >>> from scipy.signal.windows import general_cosine + >>> from scipy.fftpack import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = general_cosine(1000, HFT90D, sym=False) + >>> plt.plot(window) + >>> plt.title("HFT90D window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 10000) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) + >>> plt.plot(freq, response) + >>> plt.axis([-50/1000, 50/1000, -140, 0]) + >>> plt.title("Frequency response of the HFT90D window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + >>> plt.axhline(-90.2, color='red') + >>> plt.show() + """ + if _len_guards(M): + return np.ones(M) + M, needs_trunc = _extend(M, sym) + + fac = np.linspace(-np.pi, np.pi, M) + w = np.zeros(M) + for k in range(len(a)): + w += a[k] * np.cos(k * fac) + + return _truncate(w, needs_trunc) + + +def boxcar(M, sym=True): + """Return a boxcar or rectangular window. + + Also known as a rectangular window or Dirichlet window, this is equivalent + to no window at all. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + sym : bool, optional + Whether the window is symmetric. (Has no effect for boxcar.) + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1. + + Examples + -------- + Plot the window and its frequency response: + + >>> from scipy import signal + >>> from scipy.fftpack import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = signal.boxcar(51) + >>> plt.plot(window) + >>> plt.title("Boxcar window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) + >>> plt.plot(freq, response) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the boxcar window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + if _len_guards(M): + return np.ones(M) + M, needs_trunc = _extend(M, sym) + + w = np.ones(M, float) + + return _truncate(w, needs_trunc) + + +def triang(M, sym=True): + """Return a triangular window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + See Also + -------- + bartlett : A triangular window that touches zero + + Examples + -------- + Plot the window and its frequency response: + + >>> from scipy import signal + >>> from scipy.fftpack import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = signal.triang(51) + >>> plt.plot(window) + >>> plt.title("Triangular window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) + >>> plt.plot(freq, response) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the triangular window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + if _len_guards(M): + return np.ones(M) + M, needs_trunc = _extend(M, sym) + + n = np.arange(1, (M + 1) // 2 + 1) + if M % 2 == 0: + w = (2 * n - 1.0) / M + w = np.r_[w, w[::-1]] + else: + w = 2 * n / (M + 1.0) + w = np.r_[w, w[-2::-1]] + + return _truncate(w, needs_trunc) + + +def parzen(M, sym=True): + """Return a Parzen window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + References + ---------- + .. [1] E. Parzen, "Mathematical Considerations in the Estimation of + Spectra", Technometrics, Vol. 3, No. 2 (May, 1961), pp. 167-190 + + Examples + -------- + Plot the window and its frequency response: + + >>> from scipy import signal + >>> from scipy.fftpack import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = signal.parzen(51) + >>> plt.plot(window) + >>> plt.title("Parzen window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) + >>> plt.plot(freq, response) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the Parzen window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + if _len_guards(M): + return np.ones(M) + M, needs_trunc = _extend(M, sym) + + n = np.arange(-(M - 1) / 2.0, (M - 1) / 2.0 + 0.5, 1.0) + na = np.extract(n < -(M - 1) / 4.0, n) + nb = np.extract(abs(n) <= (M - 1) / 4.0, n) + wa = 2 * (1 - np.abs(na) / (M / 2.0)) ** 3.0 + wb = (1 - 6 * (np.abs(nb) / (M / 2.0)) ** 2.0 + + 6 * (np.abs(nb) / (M / 2.0)) ** 3.0) + w = np.r_[wa, wb, wa[::-1]] + + return _truncate(w, needs_trunc) + + +def bohman(M, sym=True): + """Return a Bohman window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + Examples + -------- + Plot the window and its frequency response: + + >>> from scipy import signal + >>> from scipy.fftpack import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = signal.bohman(51) + >>> plt.plot(window) + >>> plt.title("Bohman window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) + >>> plt.plot(freq, response) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the Bohman window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + if _len_guards(M): + return np.ones(M) + M, needs_trunc = _extend(M, sym) + + fac = np.abs(np.linspace(-1, 1, M)[1:-1]) + w = (1 - fac) * np.cos(np.pi * fac) + 1.0 / np.pi * np.sin(np.pi * fac) + w = np.r_[0, w, 0] + + return _truncate(w, needs_trunc) + + +def blackman(M, sym=True): + r""" + Return a Blackman window. + + The Blackman window is a taper formed by using the first three terms of + a summation of cosines. It was designed to have close to the minimal + leakage possible. It is close to optimal, only slightly worse than a + Kaiser window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + Notes + ----- + The Blackman window is defined as + + .. math:: w(n) = 0.42 - 0.5 \cos(2\pi n/M) + 0.08 \cos(4\pi n/M) + + The "exact Blackman" window was designed to null out the third and fourth + sidelobes, but has discontinuities at the boundaries, resulting in a + 6 dB/oct fall-off. This window is an approximation of the "exact" window, + which does not null the sidelobes as well, but is smooth at the edges, + improving the fall-off rate to 18 dB/oct. [3]_ + + Most references to the Blackman window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. It is known as a + "near optimal" tapering function, almost as good (by some measures) + as the Kaiser window. + + References + ---------- + .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power + spectra, Dover Publications, New York. + .. [2] Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing. + Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471. + .. [3] Harris, Fredric J. (Jan 1978). "On the use of Windows for Harmonic + Analysis with the Discrete Fourier Transform". Proceedings of the + IEEE 66 (1): 51-83. :doi:`10.1109/PROC.1978.10837`. + + Examples + -------- + Plot the window and its frequency response: + + >>> from scipy import signal + >>> from scipy.fftpack import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = signal.blackman(51) + >>> plt.plot(window) + >>> plt.title("Blackman window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) + >>> plt.plot(freq, response) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the Blackman window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + # Docstring adapted from NumPy's blackman function + return general_cosine(M, [0.42, 0.50, 0.08], sym) + + +def nuttall(M, sym=True): + """Return a minimum 4-term Blackman-Harris window according to Nuttall. + + This variation is called "Nuttall4c" by Heinzel. [2]_ + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + References + ---------- + .. [1] A. Nuttall, "Some windows with very good sidelobe behavior," IEEE + Transactions on Acoustics, Speech, and Signal Processing, vol. 29, + no. 1, pp. 84-91, Feb 1981. :doi:`10.1109/TASSP.1981.1163506`. + .. [2] Heinzel G. et al., "Spectrum and spectral density estimation by the + Discrete Fourier transform (DFT), including a comprehensive list of + window functions and some new flat-top windows", February 15, 2002 + https://holometer.fnal.gov/GH_FFT.pdf + + Examples + -------- + Plot the window and its frequency response: + + >>> from scipy import signal + >>> from scipy.fftpack import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = signal.nuttall(51) + >>> plt.plot(window) + >>> plt.title("Nuttall window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) + >>> plt.plot(freq, response) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the Nuttall window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + return general_cosine(M, [0.3635819, 0.4891775, 0.1365995, 0.0106411], sym) + + +def blackmanharris(M, sym=True): + """Return a minimum 4-term Blackman-Harris window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + Examples + -------- + Plot the window and its frequency response: + + >>> from scipy import signal + >>> from scipy.fftpack import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = signal.blackmanharris(51) + >>> plt.plot(window) + >>> plt.title("Blackman-Harris window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) + >>> plt.plot(freq, response) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the Blackman-Harris window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + return general_cosine(M, [0.35875, 0.48829, 0.14128, 0.01168], sym) + + +def flattop(M, sym=True): + """Return a flat top window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + Notes + ----- + Flat top windows are used for taking accurate measurements of signal + amplitude in the frequency domain, with minimal scalloping error from the + center of a frequency bin to its edges, compared to others. This is a + 5th-order cosine window, with the 5 terms optimized to make the main lobe + maximally flat. [1]_ + + References + ---------- + .. [1] D'Antona, Gabriele, and A. Ferrero, "Digital Signal Processing for + Measurement Systems", Springer Media, 2006, p. 70 + :doi:`10.1007/0-387-28666-7`. + + Examples + -------- + Plot the window and its frequency response: + + >>> from scipy import signal + >>> from scipy.fftpack import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = signal.flattop(51) + >>> plt.plot(window) + >>> plt.title("Flat top window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) + >>> plt.plot(freq, response) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the flat top window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + a = [0.21557895, 0.41663158, 0.277263158, 0.083578947, 0.006947368] + return general_cosine(M, a, sym) + + +def bartlett(M, sym=True): + r""" + Return a Bartlett window. + + The Bartlett window is very similar to a triangular window, except + that the end points are at zero. It is often used in signal + processing for tapering a signal, without generating too much + ripple in the frequency domain. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The triangular window, with the first and last samples equal to zero + and the maximum value normalized to 1 (though the value 1 does not + appear if `M` is even and `sym` is True). + + See Also + -------- + triang : A triangular window that does not touch zero at the ends + + Notes + ----- + The Bartlett window is defined as + + .. math:: w(n) = \frac{2}{M-1} \left( + \frac{M-1}{2} - \left|n - \frac{M-1}{2}\right| + \right) + + Most references to the Bartlett window come from the signal + processing literature, where it is used as one of many windowing + functions for smoothing values. Note that convolution with this + window produces linear interpolation. It is also known as an + apodization (which means"removing the foot", i.e. smoothing + discontinuities at the beginning and end of the sampled signal) or + tapering function. The Fourier transform of the Bartlett is the product + of two sinc functions. + Note the excellent discussion in Kanasewich. [2]_ + + References + ---------- + .. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra", + Biometrika 37, 1-16, 1950. + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", + The University of Alberta Press, 1975, pp. 109-110. + .. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal + Processing", Prentice-Hall, 1999, pp. 468-471. + .. [4] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function + .. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, + "Numerical Recipes", Cambridge University Press, 1986, page 429. + + Examples + -------- + Plot the window and its frequency response: + + >>> from scipy import signal + >>> from scipy.fftpack import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = signal.bartlett(51) + >>> plt.plot(window) + >>> plt.title("Bartlett window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) + >>> plt.plot(freq, response) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the Bartlett window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + # Docstring adapted from NumPy's bartlett function + if _len_guards(M): + return np.ones(M) + M, needs_trunc = _extend(M, sym) + + n = np.arange(0, M) + w = np.where(np.less_equal(n, (M - 1) / 2.0), + 2.0 * n / (M - 1), 2.0 - 2.0 * n / (M - 1)) + + return _truncate(w, needs_trunc) + + +def hann(M, sym=True): + r""" + Return a Hann window. + + The Hann window is a taper formed by using a raised cosine or sine-squared + with ends that touch zero. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + Notes + ----- + The Hann window is defined as + + .. math:: w(n) = 0.5 - 0.5 \cos\left(\frac{2\pi{n}}{M-1}\right) + \qquad 0 \leq n \leq M-1 + + The window was named for Julius von Hann, an Austrian meteorologist. It is + also known as the Cosine Bell. It is sometimes erroneously referred to as + the "Hanning" window, from the use of "hann" as a verb in the original + paper and confusion with the very similar Hamming window. + + Most references to the Hann window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. + + References + ---------- + .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power + spectra, Dover Publications, New York. + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", + The University of Alberta Press, 1975, pp. 106-108. + .. [3] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function + .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, + "Numerical Recipes", Cambridge University Press, 1986, page 425. + + Examples + -------- + Plot the window and its frequency response: + + >>> from scipy import signal + >>> from scipy.fftpack import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = signal.hann(51) + >>> plt.plot(window) + >>> plt.title("Hann window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) + >>> plt.plot(freq, response) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the Hann window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + # Docstring adapted from NumPy's hanning function + return general_hamming(M, 0.5, sym) + + +@np.deprecate(new_name='scipy.signal.windows.hann') +def hanning(*args, **kwargs): + return hann(*args, **kwargs) + + +def tukey(M, alpha=0.5, sym=True): + r"""Return a Tukey window, also known as a tapered cosine window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + alpha : float, optional + Shape parameter of the Tukey window, representing the fraction of the + window inside the cosine tapered region. + If zero, the Tukey window is equivalent to a rectangular window. + If one, the Tukey window is equivalent to a Hann window. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + References + ---------- + .. [1] Harris, Fredric J. (Jan 1978). "On the use of Windows for Harmonic + Analysis with the Discrete Fourier Transform". Proceedings of the + IEEE 66 (1): 51-83. :doi:`10.1109/PROC.1978.10837` + .. [2] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function#Tukey_window + + Examples + -------- + Plot the window and its frequency response: + + >>> from scipy import signal + >>> from scipy.fftpack import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = signal.tukey(51) + >>> plt.plot(window) + >>> plt.title("Tukey window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + >>> plt.ylim([0, 1.1]) + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) + >>> plt.plot(freq, response) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the Tukey window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + if _len_guards(M): + return np.ones(M) + + if alpha <= 0: + return np.ones(M, 'd') + elif alpha >= 1.0: + return hann(M, sym=sym) + + M, needs_trunc = _extend(M, sym) + + n = np.arange(0, M) + width = int(np.floor(alpha*(M-1)/2.0)) + n1 = n[0:width+1] + n2 = n[width+1:M-width-1] + n3 = n[M-width-1:] + + w1 = 0.5 * (1 + np.cos(np.pi * (-1 + 2.0*n1/alpha/(M-1)))) + w2 = np.ones(n2.shape) + w3 = 0.5 * (1 + np.cos(np.pi * (-2.0/alpha + 1 + 2.0*n3/alpha/(M-1)))) + + w = np.concatenate((w1, w2, w3)) + + return _truncate(w, needs_trunc) + + +def barthann(M, sym=True): + """Return a modified Bartlett-Hann window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + Examples + -------- + Plot the window and its frequency response: + + >>> from scipy import signal + >>> from scipy.fftpack import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = signal.barthann(51) + >>> plt.plot(window) + >>> plt.title("Bartlett-Hann window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) + >>> plt.plot(freq, response) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the Bartlett-Hann window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + if _len_guards(M): + return np.ones(M) + M, needs_trunc = _extend(M, sym) + + n = np.arange(0, M) + fac = np.abs(n / (M - 1.0) - 0.5) + w = 0.62 - 0.48 * fac + 0.38 * np.cos(2 * np.pi * fac) + + return _truncate(w, needs_trunc) + + +def general_hamming(M, alpha, sym=True): + r"""Return a generalized Hamming window. + + The generalized Hamming window is constructed by multiplying a rectangular + window by one period of a cosine function [1]_. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + alpha : float + The window coefficient, :math:`\alpha` + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + Notes + ----- + The generalized Hamming window is defined as + + .. math:: w(n) = \alpha - \left(1 - \alpha\right) \cos\left(\frac{2\pi{n}}{M-1}\right) + \qquad 0 \leq n \leq M-1 + + Both the common Hamming window and Hann window are special cases of the + generalized Hamming window with :math:`\alpha` = 0.54 and :math:`\alpha` = + 0.5, respectively [2]_. + + See Also + -------- + hamming, hann + + Examples + -------- + The Sentinel-1A/B Instrument Processing Facility uses generalized Hamming + windows in the processing of spaceborne Synthetic Aperture Radar (SAR) + data [3]_. The facility uses various values for the :math:`\alpha` + parameter based on operating mode of the SAR instrument. Some common + :math:`\alpha` values include 0.75, 0.7 and 0.52 [4]_. As an example, we + plot these different windows. + + >>> from scipy.signal.windows import general_hamming + >>> from scipy.fftpack import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> fig1, spatial_plot = plt.subplots() + >>> spatial_plot.set_title("Generalized Hamming Windows") + >>> spatial_plot.set_ylabel("Amplitude") + >>> spatial_plot.set_xlabel("Sample") + + >>> fig2, freq_plot = plt.subplots() + >>> freq_plot.set_title("Frequency Responses") + >>> freq_plot.set_ylabel("Normalized magnitude [dB]") + >>> freq_plot.set_xlabel("Normalized frequency [cycles per sample]") + + >>> for alpha in [0.75, 0.7, 0.52]: + ... window = general_hamming(41, alpha) + ... spatial_plot.plot(window, label="{:.2f}".format(alpha)) + ... A = fft(window, 2048) / (len(window)/2.0) + ... freq = np.linspace(-0.5, 0.5, len(A)) + ... response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) + ... freq_plot.plot(freq, response, label="{:.2f}".format(alpha)) + >>> freq_plot.legend(loc="upper right") + >>> spatial_plot.legend(loc="upper right") + + References + ---------- + .. [1] DSPRelated, "Generalized Hamming Window Family", + https://www.dsprelated.com/freebooks/sasp/Generalized_Hamming_Window_Family.html + .. [2] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function + .. [3] Riccardo Piantanida ESA, "Sentinel-1 Level 1 Detailed Algorithm + Definition", + https://sentinel.esa.int/documents/247904/1877131/Sentinel-1-Level-1-Detailed-Algorithm-Definition + .. [4] Matthieu Bourbigot ESA, "Sentinel-1 Product Definition", + https://sentinel.esa.int/documents/247904/1877131/Sentinel-1-Product-Definition + """ + return general_cosine(M, [alpha, 1. - alpha], sym) + + +def hamming(M, sym=True): + r"""Return a Hamming window. + + The Hamming window is a taper formed by using a raised cosine with + non-zero endpoints, optimized to minimize the nearest side lobe. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + Notes + ----- + The Hamming window is defined as + + .. math:: w(n) = 0.54 - 0.46 \cos\left(\frac{2\pi{n}}{M-1}\right) + \qquad 0 \leq n \leq M-1 + + The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and + is described in Blackman and Tukey. It was recommended for smoothing the + truncated autocovariance function in the time domain. + Most references to the Hamming window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. + + References + ---------- + .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power + spectra, Dover Publications, New York. + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The + University of Alberta Press, 1975, pp. 109-110. + .. [3] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function + .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, + "Numerical Recipes", Cambridge University Press, 1986, page 425. + + Examples + -------- + Plot the window and its frequency response: + + >>> from scipy import signal + >>> from scipy.fftpack import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = signal.hamming(51) + >>> plt.plot(window) + >>> plt.title("Hamming window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) + >>> plt.plot(freq, response) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the Hamming window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + # Docstring adapted from NumPy's hamming function + return general_hamming(M, 0.54, sym) + + +def kaiser(M, beta, sym=True): + r"""Return a Kaiser window. + + The Kaiser window is a taper formed by using a Bessel function. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + beta : float + Shape parameter, determines trade-off between main-lobe width and + side lobe level. As beta gets large, the window narrows. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + Notes + ----- + The Kaiser window is defined as + + .. math:: w(n) = I_0\left( \beta \sqrt{1-\frac{4n^2}{(M-1)^2}} + \right)/I_0(\beta) + + with + + .. math:: \quad -\frac{M-1}{2} \leq n \leq \frac{M-1}{2}, + + where :math:`I_0` is the modified zeroth-order Bessel function. + + The Kaiser was named for Jim Kaiser, who discovered a simple approximation + to the DPSS window based on Bessel functions. + The Kaiser window is a very good approximation to the Digital Prolate + Spheroidal Sequence, or Slepian window, which is the transform which + maximizes the energy in the main lobe of the window relative to total + energy. + + The Kaiser can approximate other windows by varying the beta parameter. + (Some literature uses alpha = beta/pi.) [4]_ + + ==== ======================= + beta Window shape + ==== ======================= + 0 Rectangular + 5 Similar to a Hamming + 6 Similar to a Hann + 8.6 Similar to a Blackman + ==== ======================= + + A beta value of 14 is probably a good starting point. Note that as beta + gets large, the window narrows, and so the number of samples needs to be + large enough to sample the increasingly narrow spike, otherwise NaNs will + be returned. + + Most references to the Kaiser window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. + + References + ---------- + .. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by + digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285. + John Wiley and Sons, New York, (1966). + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The + University of Alberta Press, 1975, pp. 177-178. + .. [3] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function + .. [4] F. J. Harris, "On the use of windows for harmonic analysis with the + discrete Fourier transform," Proceedings of the IEEE, vol. 66, + no. 1, pp. 51-83, Jan. 1978. :doi:`10.1109/PROC.1978.10837`. + + Examples + -------- + Plot the window and its frequency response: + + >>> from scipy import signal + >>> from scipy.fftpack import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = signal.kaiser(51, beta=14) + >>> plt.plot(window) + >>> plt.title(r"Kaiser window ($\beta$=14)") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) + >>> plt.plot(freq, response) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title(r"Frequency response of the Kaiser window ($\beta$=14)") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + # Docstring adapted from NumPy's kaiser function + if _len_guards(M): + return np.ones(M) + M, needs_trunc = _extend(M, sym) + + n = np.arange(0, M) + alpha = (M - 1) / 2.0 + w = (special.i0(beta * np.sqrt(1 - ((n - alpha) / alpha) ** 2.0)) / + special.i0(beta)) + + return _truncate(w, needs_trunc) + + +def gaussian(M, std, sym=True): + r"""Return a Gaussian window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + std : float + The standard deviation, sigma. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + Notes + ----- + The Gaussian window is defined as + + .. math:: w(n) = e^{ -\frac{1}{2}\left(\frac{n}{\sigma}\right)^2 } + + Examples + -------- + Plot the window and its frequency response: + + >>> from scipy import signal + >>> from scipy.fftpack import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = signal.gaussian(51, std=7) + >>> plt.plot(window) + >>> plt.title(r"Gaussian window ($\sigma$=7)") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) + >>> plt.plot(freq, response) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title(r"Frequency response of the Gaussian window ($\sigma$=7)") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + if _len_guards(M): + return np.ones(M) + M, needs_trunc = _extend(M, sym) + + n = np.arange(0, M) - (M - 1.0) / 2.0 + sig2 = 2 * std * std + w = np.exp(-n ** 2 / sig2) + + return _truncate(w, needs_trunc) + + +def general_gaussian(M, p, sig, sym=True): + r"""Return a window with a generalized Gaussian shape. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + p : float + Shape parameter. p = 1 is identical to `gaussian`, p = 0.5 is + the same shape as the Laplace distribution. + sig : float + The standard deviation, sigma. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + Notes + ----- + The generalized Gaussian window is defined as + + .. math:: w(n) = e^{ -\frac{1}{2}\left|\frac{n}{\sigma}\right|^{2p} } + + the half-power point is at + + .. math:: (2 \log(2))^{1/(2 p)} \sigma + + Examples + -------- + Plot the window and its frequency response: + + >>> from scipy import signal + >>> from scipy.fftpack import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = signal.general_gaussian(51, p=1.5, sig=7) + >>> plt.plot(window) + >>> plt.title(r"Generalized Gaussian window (p=1.5, $\sigma$=7)") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) + >>> plt.plot(freq, response) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title(r"Freq. resp. of the gen. Gaussian " + ... "window (p=1.5, $\sigma$=7)") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + if _len_guards(M): + return np.ones(M) + M, needs_trunc = _extend(M, sym) + + n = np.arange(0, M) - (M - 1.0) / 2.0 + w = np.exp(-0.5 * np.abs(n / sig) ** (2 * p)) + + return _truncate(w, needs_trunc) + + +# `chebwin` contributed by Kumar Appaiah. +def chebwin(M, at, sym=True): + r"""Return a Dolph-Chebyshev window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + at : float + Attenuation (in dB). + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value always normalized to 1 + + Notes + ----- + This window optimizes for the narrowest main lobe width for a given order + `M` and sidelobe equiripple attenuation `at`, using Chebyshev + polynomials. It was originally developed by Dolph to optimize the + directionality of radio antenna arrays. + + Unlike most windows, the Dolph-Chebyshev is defined in terms of its + frequency response: + + .. math:: W(k) = \frac + {\cos\{M \cos^{-1}[\beta \cos(\frac{\pi k}{M})]\}} + {\cosh[M \cosh^{-1}(\beta)]} + + where + + .. math:: \beta = \cosh \left [\frac{1}{M} + \cosh^{-1}(10^\frac{A}{20}) \right ] + + and 0 <= abs(k) <= M-1. A is the attenuation in decibels (`at`). + + The time domain window is then generated using the IFFT, so + power-of-two `M` are the fastest to generate, and prime number `M` are + the slowest. + + The equiripple condition in the frequency domain creates impulses in the + time domain, which appear at the ends of the window. + + References + ---------- + .. [1] C. Dolph, "A current distribution for broadside arrays which + optimizes the relationship between beam width and side-lobe level", + Proceedings of the IEEE, Vol. 34, Issue 6 + .. [2] Peter Lynch, "The Dolph-Chebyshev Window: A Simple Optimal Filter", + American Meteorological Society (April 1997) + http://mathsci.ucd.ie/~plynch/Publications/Dolph.pdf + .. [3] F. J. Harris, "On the use of windows for harmonic analysis with the + discrete Fourier transforms", Proceedings of the IEEE, Vol. 66, + No. 1, January 1978 + + Examples + -------- + Plot the window and its frequency response: + + >>> from scipy import signal + >>> from scipy.fftpack import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = signal.chebwin(51, at=100) + >>> plt.plot(window) + >>> plt.title("Dolph-Chebyshev window (100 dB)") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) + >>> plt.plot(freq, response) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the Dolph-Chebyshev window (100 dB)") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + if np.abs(at) < 45: + warnings.warn("This window is not suitable for spectral analysis " + "for attenuation values lower than about 45dB because " + "the equivalent noise bandwidth of a Chebyshev window " + "does not grow monotonically with increasing sidelobe " + "attenuation when the attenuation is smaller than " + "about 45 dB.") + if _len_guards(M): + return np.ones(M) + M, needs_trunc = _extend(M, sym) + + # compute the parameter beta + order = M - 1.0 + beta = np.cosh(1.0 / order * np.arccosh(10 ** (np.abs(at) / 20.))) + k = np.r_[0:M] * 1.0 + x = beta * np.cos(np.pi * k / M) + # Find the window's DFT coefficients + # Use analytic definition of Chebyshev polynomial instead of expansion + # from scipy.special. Using the expansion in scipy.special leads to errors. + p = np.zeros(x.shape) + p[x > 1] = np.cosh(order * np.arccosh(x[x > 1])) + p[x < -1] = (2 * (M % 2) - 1) * np.cosh(order * np.arccosh(-x[x < -1])) + p[np.abs(x) <= 1] = np.cos(order * np.arccos(x[np.abs(x) <= 1])) + + # Appropriate IDFT and filling up + # depending on even/odd M + if M % 2: + w = np.real(fftpack.fft(p)) + n = (M + 1) // 2 + w = w[:n] + w = np.concatenate((w[n - 1:0:-1], w)) + else: + p = p * np.exp(1.j * np.pi / M * np.r_[0:M]) + w = np.real(fftpack.fft(p)) + n = M // 2 + 1 + w = np.concatenate((w[n - 1:0:-1], w[1:n])) + w = w / max(w) + + return _truncate(w, needs_trunc) + + +def slepian(M, width, sym=True): + """Return a digital Slepian (DPSS) window. + + Used to maximize the energy concentration in the main lobe. Also called + the digital prolate spheroidal sequence (DPSS). + + .. note:: Deprecated in SciPy 1.1. + `slepian` will be removed in a future version of SciPy, it is + replaced by `dpss`, which uses the standard definition of a + digital Slepian window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + width : float + Bandwidth + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value always normalized to 1 + + See Also + -------- + dpss + + References + ---------- + .. [1] D. Slepian & H. O. Pollak: "Prolate spheroidal wave functions, + Fourier analysis and uncertainty-I," Bell Syst. Tech. J., vol.40, + pp.43-63, 1961. https://archive.org/details/bstj40-1-43 + .. [2] H. J. Landau & H. O. Pollak: "Prolate spheroidal wave functions, + Fourier analysis and uncertainty-II," Bell Syst. Tech. J. , vol.40, + pp.65-83, 1961. https://archive.org/details/bstj40-1-65 + + Examples + -------- + Plot the window and its frequency response: + + >>> from scipy import signal + >>> from scipy.fftpack import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = signal.slepian(51, width=0.3) + >>> plt.plot(window) + >>> plt.title("Slepian (DPSS) window (BW=0.3)") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) + >>> plt.plot(freq, response) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the Slepian window (BW=0.3)") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + warnings.warn('slepian is deprecated and will be removed in a future ' + 'version, use dpss instead', DeprecationWarning) + if _len_guards(M): + return np.ones(M) + M, needs_trunc = _extend(M, sym) + + # our width is the full bandwidth + width = width / 2 + # to match the old version + width = width / 2 + m = np.arange(M, dtype='d') + H = np.zeros((2, M)) + H[0, 1:] = m[1:] * (M - m[1:]) / 2 + H[1, :] = ((M - 1 - 2 * m) / 2)**2 * np.cos(2 * np.pi * width) + + _, win = linalg.eig_banded(H, select='i', select_range=(M-1, M-1)) + win = win.ravel() / win.max() + + return _truncate(win, needs_trunc) + + +def cosine(M, sym=True): + """Return a window with a simple cosine shape. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + Notes + ----- + + .. versionadded:: 0.13.0 + + Examples + -------- + Plot the window and its frequency response: + + >>> from scipy import signal + >>> from scipy.fftpack import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = signal.cosine(51) + >>> plt.plot(window) + >>> plt.title("Cosine window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) + >>> plt.plot(freq, response) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the cosine window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + >>> plt.show() + + """ + if _len_guards(M): + return np.ones(M) + M, needs_trunc = _extend(M, sym) + + w = np.sin(np.pi / M * (np.arange(0, M) + .5)) + + return _truncate(w, needs_trunc) + + +def exponential(M, center=None, tau=1., sym=True): + r"""Return an exponential (or Poisson) window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + center : float, optional + Parameter defining the center location of the window function. + The default value if not given is ``center = (M-1) / 2``. This + parameter must take its default value for symmetric windows. + tau : float, optional + Parameter defining the decay. For ``center = 0`` use + ``tau = -(M-1) / ln(x)`` if ``x`` is the fraction of the window + remaining at the end. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + Notes + ----- + The Exponential window is defined as + + .. math:: w(n) = e^{-|n-center| / \tau} + + References + ---------- + S. Gade and H. Herlufsen, "Windows to FFT analysis (Part I)", + Technical Review 3, Bruel & Kjaer, 1987. + + Examples + -------- + Plot the symmetric window and its frequency response: + + >>> from scipy import signal + >>> from scipy.fftpack import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> M = 51 + >>> tau = 3.0 + >>> window = signal.exponential(M, tau=tau) + >>> plt.plot(window) + >>> plt.title("Exponential Window (tau=3.0)") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) + >>> plt.plot(freq, response) + >>> plt.axis([-0.5, 0.5, -35, 0]) + >>> plt.title("Frequency response of the Exponential window (tau=3.0)") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + This function can also generate non-symmetric windows: + + >>> tau2 = -(M-1) / np.log(0.01) + >>> window2 = signal.exponential(M, 0, tau2, False) + >>> plt.figure() + >>> plt.plot(window2) + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + """ + if sym and center is not None: + raise ValueError("If sym==True, center must be None.") + if _len_guards(M): + return np.ones(M) + M, needs_trunc = _extend(M, sym) + + if center is None: + center = (M-1) / 2 + + n = np.arange(0, M) + w = np.exp(-np.abs(n-center) / tau) + + return _truncate(w, needs_trunc) + + +def dpss(M, NW, Kmax=None, sym=True, norm=None, return_ratios=False): + """ + Compute the Discrete Prolate Spheroidal Sequences (DPSS). + + DPSS (or Slepian sequences) are often used in multitaper power spectral + density estimation (see [1]_). The first window in the sequence can be + used to maximize the energy concentration in the main lobe, and is also + called the Slepian window. + + Parameters + ---------- + M : int + Window length. + NW : float + Standardized half bandwidth corresponding to ``2*NW = BW/f0 = BW*N*dt`` + where ``dt`` is taken as 1. + Kmax : int | None, optional + Number of DPSS windows to return (orders ``0`` through ``Kmax-1``). + If None (default), return only a single window of shape ``(M,)`` + instead of an array of windows of shape ``(Kmax, M)``. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + norm : {2, 'approximate', 'subsample'} | None, optional + If 'approximate' or 'subsample', then the windows are normalized by the + maximum, and a correction scale-factor for even-length windows + is applied either using ``M**2/(M**2+NW)`` ("approximate") or + a FFT-based subsample shift ("subsample"), see Notes for details. + If None, then "approximate" is used when ``Kmax=None`` and 2 otherwise + (which uses the l2 norm). + return_ratios : bool, optional + If True, also return the concentration ratios in addition to the + windows. + + Returns + ------- + v : ndarray, shape (Kmax, N) or (N,) + The DPSS windows. Will be 1D if `Kmax` is None. + r : ndarray, shape (Kmax,) or float, optional + The concentration ratios for the windows. Only returned if + `return_ratios` evaluates to True. Will be 0D if `Kmax` is None. + + Notes + ----- + This computation uses the tridiagonal eigenvector formulation given + in [2]_. + + The default normalization for ``Kmax=None``, i.e. window-generation mode, + simply using the l-infinity norm would create a window with two unity + values, which creates slight normalization differences between even and odd + orders. The approximate correction of ``M**2/float(M**2+NW)`` for even + sample numbers is used to counteract this effect (see Examples below). + + For very long signals (e.g., 1e6 elements), it can be useful to compute + windows orders of magnitude shorter and use interpolation (e.g., + `scipy.interpolate.interp1d`) to obtain tapers of length `M`, + but this in general will not preserve orthogonality between the tapers. + + .. versionadded:: 1.1 + + References + ---------- + .. [1] Percival DB, Walden WT. Spectral Analysis for Physical Applications: + Multitaper and Conventional Univariate Techniques. + Cambridge University Press; 1993. + .. [2] Slepian, D. Prolate spheroidal wave functions, Fourier analysis, and + uncertainty V: The discrete case. Bell System Technical Journal, + Volume 57 (1978), 1371430. + .. [3] Kaiser, JF, Schafer RW. On the Use of the I0-Sinh Window for + Spectrum Analysis. IEEE Transactions on Acoustics, Speech and + Signal Processing. ASSP-28 (1): 105-107; 1980. + + Examples + -------- + We can compare the window to `kaiser`, which was invented as an alternative + that was easier to calculate [3]_ (example adapted from + `here <https://ccrma.stanford.edu/~jos/sasp/Kaiser_DPSS_Windows_Compared.html>`_): + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.signal import windows, freqz + >>> N = 51 + >>> fig, axes = plt.subplots(3, 2, figsize=(5, 7)) + >>> for ai, alpha in enumerate((1, 3, 5)): + ... win_dpss = windows.dpss(N, alpha) + ... beta = alpha*np.pi + ... win_kaiser = windows.kaiser(N, beta) + ... for win, c in ((win_dpss, 'k'), (win_kaiser, 'r')): + ... win /= win.sum() + ... axes[ai, 0].plot(win, color=c, lw=1.) + ... axes[ai, 0].set(xlim=[0, N-1], title=r'$\\alpha$ = %s' % alpha, + ... ylabel='Amplitude') + ... w, h = freqz(win) + ... axes[ai, 1].plot(w, 20 * np.log10(np.abs(h)), color=c, lw=1.) + ... axes[ai, 1].set(xlim=[0, np.pi], + ... title=r'$\\beta$ = %0.2f' % beta, + ... ylabel='Magnitude (dB)') + >>> for ax in axes.ravel(): + ... ax.grid(True) + >>> axes[2, 1].legend(['DPSS', 'Kaiser']) + >>> fig.tight_layout() + >>> plt.show() + + And here are examples of the first four windows, along with their + concentration ratios: + + >>> M = 512 + >>> NW = 2.5 + >>> win, eigvals = windows.dpss(M, NW, 4, return_ratios=True) + >>> fig, ax = plt.subplots(1) + >>> ax.plot(win.T, linewidth=1.) + >>> ax.set(xlim=[0, M-1], ylim=[-0.1, 0.1], xlabel='Samples', + ... title='DPSS, M=%d, NW=%0.1f' % (M, NW)) + >>> ax.legend(['win[%d] (%0.4f)' % (ii, ratio) + ... for ii, ratio in enumerate(eigvals)]) + >>> fig.tight_layout() + >>> plt.show() + + Using a standard :math:`l_{\\infty}` norm would produce two unity values + for even `M`, but only one unity value for odd `M`. This produces uneven + window power that can be counteracted by the approximate correction + ``M**2/float(M**2+NW)``, which can be selected by using + ``norm='approximate'`` (which is the same as ``norm=None`` when + ``Kmax=None``, as is the case here). Alternatively, the slower + ``norm='subsample'`` can be used, which uses subsample shifting in the + frequency domain (FFT) to compute the correction: + + >>> Ms = np.arange(1, 41) + >>> factors = (50, 20, 10, 5, 2.0001) + >>> energy = np.empty((3, len(Ms), len(factors))) + >>> for mi, M in enumerate(Ms): + ... for fi, factor in enumerate(factors): + ... NW = M / float(factor) + ... # Corrected using empirical approximation (default) + ... win = windows.dpss(M, NW) + ... energy[0, mi, fi] = np.sum(win ** 2) / np.sqrt(M) + ... # Corrected using subsample shifting + ... win = windows.dpss(M, NW, norm='subsample') + ... energy[1, mi, fi] = np.sum(win ** 2) / np.sqrt(M) + ... # Uncorrected (using l-infinity norm) + ... win /= win.max() + ... energy[2, mi, fi] = np.sum(win ** 2) / np.sqrt(M) + >>> fig, ax = plt.subplots(1) + >>> hs = ax.plot(Ms, energy[2], '-o', markersize=4, + ... markeredgecolor='none') + >>> leg = [hs[-1]] + >>> for hi, hh in enumerate(hs): + ... h1 = ax.plot(Ms, energy[0, :, hi], '-o', markersize=4, + ... color=hh.get_color(), markeredgecolor='none', + ... alpha=0.66) + ... h2 = ax.plot(Ms, energy[1, :, hi], '-o', markersize=4, + ... color=hh.get_color(), markeredgecolor='none', + ... alpha=0.33) + ... if hi == len(hs) - 1: + ... leg.insert(0, h1[0]) + ... leg.insert(0, h2[0]) + >>> ax.set(xlabel='M (samples)', ylabel=r'Power / $\\sqrt{M}$') + >>> ax.legend(leg, ['Uncorrected', r'Corrected: $\\frac{M^2}{M^2+NW}$', + ... 'Corrected (subsample)']) + >>> fig.tight_layout() + + """ # noqa: E501 + if _len_guards(M): + return np.ones(M) + if norm is None: + norm = 'approximate' if Kmax is None else 2 + known_norms = (2, 'approximate', 'subsample') + if norm not in known_norms: + raise ValueError('norm must be one of %s, got %s' + % (known_norms, norm)) + if Kmax is None: + singleton = True + Kmax = 1 + else: + singleton = False + Kmax = operator.index(Kmax) + if not 0 < Kmax <= M: + raise ValueError('Kmax must be greater than 0 and less than M') + if NW >= M/2.: + raise ValueError('NW must be less than M/2.') + if NW <= 0: + raise ValueError('NW must be positive') + M, needs_trunc = _extend(M, sym) + W = float(NW) / M + nidx = np.arange(M) + + # Here we want to set up an optimization problem to find a sequence + # whose energy is maximally concentrated within band [-W,W]. + # Thus, the measure lambda(T,W) is the ratio between the energy within + # that band, and the total energy. This leads to the eigen-system + # (A - (l1)I)v = 0, where the eigenvector corresponding to the largest + # eigenvalue is the sequence with maximally concentrated energy. The + # collection of eigenvectors of this system are called Slepian + # sequences, or discrete prolate spheroidal sequences (DPSS). Only the + # first K, K = 2NW/dt orders of DPSS will exhibit good spectral + # concentration + # [see https://en.wikipedia.org/wiki/Spectral_concentration_problem] + + # Here we set up an alternative symmetric tri-diagonal eigenvalue + # problem such that + # (B - (l2)I)v = 0, and v are our DPSS (but eigenvalues l2 != l1) + # the main diagonal = ([N-1-2*t]/2)**2 cos(2PIW), t=[0,1,2,...,N-1] + # and the first off-diagonal = t(N-t)/2, t=[1,2,...,N-1] + # [see Percival and Walden, 1993] + d = ((M - 1 - 2 * nidx) / 2.) ** 2 * np.cos(2 * np.pi * W) + e = nidx[1:] * (M - nidx[1:]) / 2. + + # only calculate the highest Kmax eigenvalues + w, windows = linalg.eigh_tridiagonal( + d, e, select='i', select_range=(M - Kmax, M - 1)) + w = w[::-1] + windows = windows[:, ::-1].T + + # By convention (Percival and Walden, 1993 pg 379) + # * symmetric tapers (k=0,2,4,...) should have a positive average. + fix_even = (windows[::2].sum(axis=1) < 0) + for i, f in enumerate(fix_even): + if f: + windows[2 * i] *= -1 + # * antisymmetric tapers should begin with a positive lobe + # (this depends on the definition of "lobe", here we'll take the first + # point above the numerical noise, which should be good enough for + # sufficiently smooth functions, and more robust than relying on an + # algorithm that uses max(abs(w)), which is susceptible to numerical + # noise problems) + thresh = max(1e-7, 1. / M) + for i, w in enumerate(windows[1::2]): + if w[w * w > thresh][0] < 0: + windows[2 * i + 1] *= -1 + + # Now find the eigenvalues of the original spectral concentration problem + # Use the autocorr sequence technique from Percival and Walden, 1993 pg 390 + if return_ratios: + dpss_rxx = _fftautocorr(windows) + r = 4 * W * np.sinc(2 * W * nidx) + r[0] = 2 * W + ratios = np.dot(dpss_rxx, r) + if singleton: + ratios = ratios[0] + # Deal with sym and Kmax=None + if norm != 2: + windows /= windows.max() + if M % 2 == 0: + if norm == 'approximate': + correction = M**2 / float(M**2 + NW) + else: + s = np.fft.rfft(windows[0]) + shift = -(1 - 1./M) * np.arange(1, M//2 + 1) + s[1:] *= 2 * np.exp(-1j * np.pi * shift) + correction = M / s.real.sum() + windows *= correction + # else we're already l2 normed, so do nothing + if needs_trunc: + windows = windows[:, :-1] + if singleton: + windows = windows[0] + return (windows, ratios) if return_ratios else windows + + +def _fftautocorr(x): + """Compute the autocorrelation of a real array and crop the result.""" + N = x.shape[-1] + use_N = fftpack.next_fast_len(2*N-1) + x_fft = np.fft.rfft(x, use_N, axis=-1) + cxy = np.fft.irfft(x_fft * x_fft.conj(), n=use_N)[:, :N] + # Or equivalently (but in most cases slower): + # cxy = np.array([np.convolve(xx, yy[::-1], mode='full') + # for xx, yy in zip(x, x)])[:, N-1:2*N-1] + return cxy + + +_win_equiv_raw = { + ('barthann', 'brthan', 'bth'): (barthann, False), + ('bartlett', 'bart', 'brt'): (bartlett, False), + ('blackman', 'black', 'blk'): (blackman, False), + ('blackmanharris', 'blackharr', 'bkh'): (blackmanharris, False), + ('bohman', 'bman', 'bmn'): (bohman, False), + ('boxcar', 'box', 'ones', + 'rect', 'rectangular'): (boxcar, False), + ('chebwin', 'cheb'): (chebwin, True), + ('cosine', 'halfcosine'): (cosine, False), + ('exponential', 'poisson'): (exponential, True), + ('flattop', 'flat', 'flt'): (flattop, False), + ('gaussian', 'gauss', 'gss'): (gaussian, True), + ('general gaussian', 'general_gaussian', + 'general gauss', 'general_gauss', 'ggs'): (general_gaussian, True), + ('hamming', 'hamm', 'ham'): (hamming, False), + ('hanning', 'hann', 'han'): (hann, False), + ('kaiser', 'ksr'): (kaiser, True), + ('nuttall', 'nutl', 'nut'): (nuttall, False), + ('parzen', 'parz', 'par'): (parzen, False), + ('slepian', 'slep', 'optimal', 'dpss', 'dss'): (slepian, True), + ('triangle', 'triang', 'tri'): (triang, False), + ('tukey', 'tuk'): (tukey, True), +} + +# Fill dict with all valid window name strings +_win_equiv = {} +for k, v in _win_equiv_raw.items(): + for key in k: + _win_equiv[key] = v[0] + +# Keep track of which windows need additional parameters +_needs_param = set() +for k, v in _win_equiv_raw.items(): + if v[1]: + _needs_param.update(k) + + +def get_window(window, Nx, fftbins=True): + """ + Return a window. + + Parameters + ---------- + window : string, float, or tuple + The type of window to create. See below for more details. + Nx : int + The number of samples in the window. + fftbins : bool, optional + If True (default), create a "periodic" window, ready to use with + `ifftshift` and be multiplied by the result of an FFT (see also + `fftpack.fftfreq`). + If False, create a "symmetric" window, for use in filter design. + + Returns + ------- + get_window : ndarray + Returns a window of length `Nx` and type `window` + + Notes + ----- + Window types: + + `boxcar`, `triang`, `blackman`, `hamming`, `hann`, `bartlett`, + `flattop`, `parzen`, `bohman`, `blackmanharris`, `nuttall`, + `barthann`, `kaiser` (needs beta), `gaussian` (needs standard + deviation), `general_gaussian` (needs power, width), `slepian` + (needs width), `dpss` (needs normalized half-bandwidth), + `chebwin` (needs attenuation), `exponential` (needs decay scale), + `tukey` (needs taper fraction) + + If the window requires no parameters, then `window` can be a string. + + If the window requires parameters, then `window` must be a tuple + with the first argument the string name of the window, and the next + arguments the needed parameters. + + If `window` is a floating point number, it is interpreted as the beta + parameter of the `kaiser` window. + + Each of the window types listed above is also the name of + a function that can be called directly to create a window of + that type. + + Examples + -------- + >>> from scipy import signal + >>> signal.get_window('triang', 7) + array([ 0.125, 0.375, 0.625, 0.875, 0.875, 0.625, 0.375]) + >>> signal.get_window(('kaiser', 4.0), 9) + array([ 0.08848053, 0.29425961, 0.56437221, 0.82160913, 0.97885093, + 0.97885093, 0.82160913, 0.56437221, 0.29425961]) + >>> signal.get_window(4.0, 9) + array([ 0.08848053, 0.29425961, 0.56437221, 0.82160913, 0.97885093, + 0.97885093, 0.82160913, 0.56437221, 0.29425961]) + + """ + sym = not fftbins + try: + beta = float(window) + except (TypeError, ValueError): + args = () + if isinstance(window, tuple): + winstr = window[0] + if len(window) > 1: + args = window[1:] + elif isinstance(window, string_types): + if window in _needs_param: + raise ValueError("The '" + window + "' window needs one or " + "more parameters -- pass a tuple.") + else: + winstr = window + else: + raise ValueError("%s as window type is not supported." % + str(type(window))) + + try: + winfunc = _win_equiv[winstr] + except KeyError: + raise ValueError("Unknown window type.") + + params = (Nx,) + args + (sym,) + else: + winfunc = kaiser + params = (Nx, beta, sym) + + return winfunc(*params) diff --git a/project/venv/lib/python2.7/site-packages/scipy/signal/windows/windows.pyc b/project/venv/lib/python2.7/site-packages/scipy/signal/windows/windows.pyc new file mode 100644 index 0000000..5b0e77b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/signal/windows/windows.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/__init__.py new file mode 100644 index 0000000..2da8937 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/__init__.py @@ -0,0 +1,252 @@ +""" +===================================== +Sparse matrices (:mod:`scipy.sparse`) +===================================== + +.. currentmodule:: scipy.sparse + +SciPy 2-D sparse matrix package for numeric data. + +Contents +======== + +Sparse matrix classes +--------------------- + +.. autosummary:: + :toctree: generated/ + + bsr_matrix - Block Sparse Row matrix + coo_matrix - A sparse matrix in COOrdinate format + csc_matrix - Compressed Sparse Column matrix + csr_matrix - Compressed Sparse Row matrix + dia_matrix - Sparse matrix with DIAgonal storage + dok_matrix - Dictionary Of Keys based sparse matrix + lil_matrix - Row-based linked list sparse matrix + spmatrix - Sparse matrix base class + +Functions +--------- + +Building sparse matrices: + +.. autosummary:: + :toctree: generated/ + + eye - Sparse MxN matrix whose k-th diagonal is all ones + identity - Identity matrix in sparse format + kron - kronecker product of two sparse matrices + kronsum - kronecker sum of sparse matrices + diags - Return a sparse matrix from diagonals + spdiags - Return a sparse matrix from diagonals + block_diag - Build a block diagonal sparse matrix + tril - Lower triangular portion of a matrix in sparse format + triu - Upper triangular portion of a matrix in sparse format + bmat - Build a sparse matrix from sparse sub-blocks + hstack - Stack sparse matrices horizontally (column wise) + vstack - Stack sparse matrices vertically (row wise) + rand - Random values in a given shape + random - Random values in a given shape + +Save and load sparse matrices: + +.. autosummary:: + :toctree: generated/ + + save_npz - Save a sparse matrix to a file using ``.npz`` format. + load_npz - Load a sparse matrix from a file using ``.npz`` format. + +Sparse matrix tools: + +.. autosummary:: + :toctree: generated/ + + find + +Identifying sparse matrices: + +.. autosummary:: + :toctree: generated/ + + issparse + isspmatrix + isspmatrix_csc + isspmatrix_csr + isspmatrix_bsr + isspmatrix_lil + isspmatrix_dok + isspmatrix_coo + isspmatrix_dia + +Submodules +---------- + +.. autosummary:: + :toctree: generated/ + + csgraph - Compressed sparse graph routines + linalg - sparse linear algebra routines + +Exceptions +---------- + +.. autosummary:: + :toctree: generated/ + + SparseEfficiencyWarning + SparseWarning + + +Usage information +================= + +There are seven available sparse matrix types: + + 1. csc_matrix: Compressed Sparse Column format + 2. csr_matrix: Compressed Sparse Row format + 3. bsr_matrix: Block Sparse Row format + 4. lil_matrix: List of Lists format + 5. dok_matrix: Dictionary of Keys format + 6. coo_matrix: COOrdinate format (aka IJV, triplet format) + 7. dia_matrix: DIAgonal format + +To construct a matrix efficiently, use either dok_matrix or lil_matrix. +The lil_matrix class supports basic slicing and fancy indexing with a +similar syntax to NumPy arrays. As illustrated below, the COO format +may also be used to efficiently construct matrices. Despite their +similarity to NumPy arrays, it is **strongly discouraged** to use NumPy +functions directly on these matrices because NumPy may not properly convert +them for computations, leading to unexpected (and incorrect) results. If you +do want to apply a NumPy function to these matrices, first check if SciPy has +its own implementation for the given sparse matrix class, or **convert the +sparse matrix to a NumPy array** (e.g. using the `toarray()` method of the +class) first before applying the method. + +To perform manipulations such as multiplication or inversion, first +convert the matrix to either CSC or CSR format. The lil_matrix format is +row-based, so conversion to CSR is efficient, whereas conversion to CSC +is less so. + +All conversions among the CSR, CSC, and COO formats are efficient, +linear-time operations. + +Matrix vector product +--------------------- +To do a vector product between a sparse matrix and a vector simply use +the matrix `dot` method, as described in its docstring: + +>>> import numpy as np +>>> from scipy.sparse import csr_matrix +>>> A = csr_matrix([[1, 2, 0], [0, 0, 3], [4, 0, 5]]) +>>> v = np.array([1, 0, -1]) +>>> A.dot(v) +array([ 1, -3, -1], dtype=int64) + +.. warning:: As of NumPy 1.7, `np.dot` is not aware of sparse matrices, + therefore using it will result on unexpected results or errors. + The corresponding dense array should be obtained first instead: + + >>> np.dot(A.toarray(), v) + array([ 1, -3, -1], dtype=int64) + + but then all the performance advantages would be lost. + +The CSR format is specially suitable for fast matrix vector products. + +Example 1 +--------- +Construct a 1000x1000 lil_matrix and add some values to it: + +>>> from scipy.sparse import lil_matrix +>>> from scipy.sparse.linalg import spsolve +>>> from numpy.linalg import solve, norm +>>> from numpy.random import rand + +>>> A = lil_matrix((1000, 1000)) +>>> A[0, :100] = rand(100) +>>> A[1, 100:200] = A[0, :100] +>>> A.setdiag(rand(1000)) + +Now convert it to CSR format and solve A x = b for x: + +>>> A = A.tocsr() +>>> b = rand(1000) +>>> x = spsolve(A, b) + +Convert it to a dense matrix and solve, and check that the result +is the same: + +>>> x_ = solve(A.toarray(), b) + +Now we can compute norm of the error with: + +>>> err = norm(x-x_) +>>> err < 1e-10 +True + +It should be small :) + + +Example 2 +--------- + +Construct a matrix in COO format: + +>>> from scipy import sparse +>>> from numpy import array +>>> I = array([0,3,1,0]) +>>> J = array([0,3,1,2]) +>>> V = array([4,5,7,9]) +>>> A = sparse.coo_matrix((V,(I,J)),shape=(4,4)) + +Notice that the indices do not need to be sorted. + +Duplicate (i,j) entries are summed when converting to CSR or CSC. + +>>> I = array([0,0,1,3,1,0,0]) +>>> J = array([0,2,1,3,1,0,0]) +>>> V = array([1,1,1,1,1,1,1]) +>>> B = sparse.coo_matrix((V,(I,J)),shape=(4,4)).tocsr() + +This is useful for constructing finite-element stiffness and mass matrices. + +Further Details +--------------- + +CSR column indices are not necessarily sorted. Likewise for CSC row +indices. Use the .sorted_indices() and .sort_indices() methods when +sorted indices are required (e.g. when passing data to other libraries). + +""" + +from __future__ import division, print_function, absolute_import + +# Original code by Travis Oliphant. +# Modified and extended by Ed Schofield, Robert Cimrman, +# Nathan Bell, and Jake Vanderplas. + +import warnings as _warnings + +from .base import * +from .csr import * +from .csc import * +from .lil import * +from .dok import * +from .coo import * +from .dia import * +from .bsr import * +from .construct import * +from .extract import * +from ._matrix_io import * + +# For backward compatibility with v0.19. +from . import csgraph + +__all__ = [s for s in dir() if not s.startswith('_')] + +# Filter PendingDeprecationWarning for np.matrix introduced with numpy 1.15 +_warnings.filterwarnings('ignore', message='the matrix subclass is not the recommended way') + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/__init__.pyc new file mode 100644 index 0000000..b8d204e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/_csparsetools.so b/project/venv/lib/python2.7/site-packages/scipy/sparse/_csparsetools.so new file mode 100755 index 0000000..fd8c223 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/_csparsetools.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/_matrix_io.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/_matrix_io.py new file mode 100644 index 0000000..5f31005 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/_matrix_io.py @@ -0,0 +1,157 @@ +from __future__ import division, print_function, absolute_import + +import sys +import numpy as np +import scipy.sparse + +from scipy._lib._version import NumpyVersion + +__all__ = ['save_npz', 'load_npz'] + + +if NumpyVersion(np.__version__) >= '1.10.0': + # Make loading safe vs. malicious input + PICKLE_KWARGS = dict(allow_pickle=False) +else: + PICKLE_KWARGS = dict() + + +def save_npz(file, matrix, compressed=True): + """ Save a sparse matrix to a file using ``.npz`` format. + + Parameters + ---------- + file : str or file-like object + Either the file name (string) or an open file (file-like object) + where the data will be saved. If file is a string, the ``.npz`` + extension will be appended to the file name if it is not already + there. + matrix: spmatrix (format: ``csc``, ``csr``, ``bsr``, ``dia`` or coo``) + The sparse matrix to save. + compressed : bool, optional + Allow compressing the file. Default: True + + See Also + -------- + scipy.sparse.load_npz: Load a sparse matrix from a file using ``.npz`` format. + numpy.savez: Save several arrays into a ``.npz`` archive. + numpy.savez_compressed : Save several arrays into a compressed ``.npz`` archive. + + Examples + -------- + Store sparse matrix to disk, and load it again: + + >>> import scipy.sparse + >>> sparse_matrix = scipy.sparse.csc_matrix(np.array([[0, 0, 3], [4, 0, 0]])) + >>> sparse_matrix + <2x3 sparse matrix of type '<class 'numpy.int64'>' + with 2 stored elements in Compressed Sparse Column format> + >>> sparse_matrix.todense() + matrix([[0, 0, 3], + [4, 0, 0]], dtype=int64) + + >>> scipy.sparse.save_npz('/tmp/sparse_matrix.npz', sparse_matrix) + >>> sparse_matrix = scipy.sparse.load_npz('/tmp/sparse_matrix.npz') + + >>> sparse_matrix + <2x3 sparse matrix of type '<class 'numpy.int64'>' + with 2 stored elements in Compressed Sparse Column format> + >>> sparse_matrix.todense() + matrix([[0, 0, 3], + [4, 0, 0]], dtype=int64) + """ + arrays_dict = {} + if matrix.format in ('csc', 'csr', 'bsr'): + arrays_dict.update(indices=matrix.indices, indptr=matrix.indptr) + elif matrix.format == 'dia': + arrays_dict.update(offsets=matrix.offsets) + elif matrix.format == 'coo': + arrays_dict.update(row=matrix.row, col=matrix.col) + else: + raise NotImplementedError('Save is not implemented for sparse matrix of format {}.'.format(matrix.format)) + arrays_dict.update( + format=matrix.format.encode('ascii'), + shape=matrix.shape, + data=matrix.data + ) + if compressed: + np.savez_compressed(file, **arrays_dict) + else: + np.savez(file, **arrays_dict) + + +def load_npz(file): + """ Load a sparse matrix from a file using ``.npz`` format. + + Parameters + ---------- + file : str or file-like object + Either the file name (string) or an open file (file-like object) + where the data will be loaded. + + Returns + ------- + result : csc_matrix, csr_matrix, bsr_matrix, dia_matrix or coo_matrix + A sparse matrix containing the loaded data. + + Raises + ------ + IOError + If the input file does not exist or cannot be read. + + See Also + -------- + scipy.sparse.save_npz: Save a sparse matrix to a file using ``.npz`` format. + numpy.load: Load several arrays from a ``.npz`` archive. + + Examples + -------- + Store sparse matrix to disk, and load it again: + + >>> import scipy.sparse + >>> sparse_matrix = scipy.sparse.csc_matrix(np.array([[0, 0, 3], [4, 0, 0]])) + >>> sparse_matrix + <2x3 sparse matrix of type '<class 'numpy.int64'>' + with 2 stored elements in Compressed Sparse Column format> + >>> sparse_matrix.todense() + matrix([[0, 0, 3], + [4, 0, 0]], dtype=int64) + + >>> scipy.sparse.save_npz('/tmp/sparse_matrix.npz', sparse_matrix) + >>> sparse_matrix = scipy.sparse.load_npz('/tmp/sparse_matrix.npz') + + >>> sparse_matrix + <2x3 sparse matrix of type '<class 'numpy.int64'>' + with 2 stored elements in Compressed Sparse Column format> + >>> sparse_matrix.todense() + matrix([[0, 0, 3], + [4, 0, 0]], dtype=int64) + """ + + with np.load(file, **PICKLE_KWARGS) as loaded: + try: + matrix_format = loaded['format'] + except KeyError: + raise ValueError('The file {} does not contain a sparse matrix.'.format(file)) + + matrix_format = matrix_format.item() + + if sys.version_info[0] >= 3 and not isinstance(matrix_format, str): + # Play safe with Python 2 vs 3 backward compatibility; + # files saved with Scipy < 1.0.0 may contain unicode or bytes. + matrix_format = matrix_format.decode('ascii') + + try: + cls = getattr(scipy.sparse, '{}_matrix'.format(matrix_format)) + except AttributeError: + raise ValueError('Unknown matrix format "{}"'.format(matrix_format)) + + if matrix_format in ('csc', 'csr', 'bsr'): + return cls((loaded['data'], loaded['indices'], loaded['indptr']), shape=loaded['shape']) + elif matrix_format == 'dia': + return cls((loaded['data'], loaded['offsets']), shape=loaded['shape']) + elif matrix_format == 'coo': + return cls((loaded['data'], (loaded['row'], loaded['col'])), shape=loaded['shape']) + else: + raise NotImplementedError('Load is not implemented for ' + 'sparse matrix of format {}.'.format(matrix_format)) diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/_matrix_io.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/_matrix_io.pyc new file mode 100644 index 0000000..ba14801 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/_matrix_io.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/_sparsetools.so b/project/venv/lib/python2.7/site-packages/scipy/sparse/_sparsetools.so new file mode 100755 index 0000000..5a20e0f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/_sparsetools.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/base.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/base.py new file mode 100644 index 0000000..86a496d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/base.py @@ -0,0 +1,1220 @@ +"""Base class for sparse matrices""" +from __future__ import division, print_function, absolute_import + +import sys + +import numpy as np + +from scipy._lib.six import xrange +from scipy._lib._numpy_compat import broadcast_to +from .sputils import (isdense, isscalarlike, isintlike, + get_sum_dtype, validateaxis, check_reshape_kwargs, + check_shape) + +__all__ = ['spmatrix', 'isspmatrix', 'issparse', + 'SparseWarning', 'SparseEfficiencyWarning'] + + +class SparseWarning(Warning): + pass + + +class SparseFormatWarning(SparseWarning): + pass + + +class SparseEfficiencyWarning(SparseWarning): + pass + + +# The formats that we might potentially understand. +_formats = {'csc': [0, "Compressed Sparse Column"], + 'csr': [1, "Compressed Sparse Row"], + 'dok': [2, "Dictionary Of Keys"], + 'lil': [3, "LInked List"], + 'dod': [4, "Dictionary of Dictionaries"], + 'sss': [5, "Symmetric Sparse Skyline"], + 'coo': [6, "COOrdinate"], + 'lba': [7, "Linpack BAnded"], + 'egd': [8, "Ellpack-itpack Generalized Diagonal"], + 'dia': [9, "DIAgonal"], + 'bsr': [10, "Block Sparse Row"], + 'msr': [11, "Modified compressed Sparse Row"], + 'bsc': [12, "Block Sparse Column"], + 'msc': [13, "Modified compressed Sparse Column"], + 'ssk': [14, "Symmetric SKyline"], + 'nsk': [15, "Nonsymmetric SKyline"], + 'jad': [16, "JAgged Diagonal"], + 'uss': [17, "Unsymmetric Sparse Skyline"], + 'vbr': [18, "Variable Block Row"], + 'und': [19, "Undefined"] + } + + +# These univariate ufuncs preserve zeros. +_ufuncs_with_fixed_point_at_zero = frozenset([ + np.sin, np.tan, np.arcsin, np.arctan, np.sinh, np.tanh, np.arcsinh, + np.arctanh, np.rint, np.sign, np.expm1, np.log1p, np.deg2rad, + np.rad2deg, np.floor, np.ceil, np.trunc, np.sqrt]) + + +MAXPRINT = 50 + + +class spmatrix(object): + """ This class provides a base class for all sparse matrices. It + cannot be instantiated. Most of the work is provided by subclasses. + """ + + __array_priority__ = 10.1 + ndim = 2 + + def __init__(self, maxprint=MAXPRINT): + self._shape = None + if self.__class__.__name__ == 'spmatrix': + raise ValueError("This class is not intended" + " to be instantiated directly.") + self.maxprint = maxprint + + def set_shape(self, shape): + """See `reshape`.""" + # Make sure copy is False since this is in place + # Make sure format is unchanged because we are doing a __dict__ swap + new_matrix = self.reshape(shape, copy=False).asformat(self.format) + self.__dict__ = new_matrix.__dict__ + + def get_shape(self): + """Get shape of a matrix.""" + return self._shape + + shape = property(fget=get_shape, fset=set_shape) + + def reshape(self, *args, **kwargs): + """reshape(self, shape, order='C', copy=False) + + Gives a new shape to a sparse matrix without changing its data. + + Parameters + ---------- + shape : length-2 tuple of ints + The new shape should be compatible with the original shape. + order : {'C', 'F'}, optional + Read the elements using this index order. 'C' means to read and + write the elements using C-like index order; e.g. read entire first + row, then second row, etc. 'F' means to read and write the elements + using Fortran-like index order; e.g. read entire first column, then + second column, etc. + copy : bool, optional + Indicates whether or not attributes of self should be copied + whenever possible. The degree to which attributes are copied varies + depending on the type of sparse matrix being used. + + Returns + ------- + reshaped_matrix : sparse matrix + A sparse matrix with the given `shape`, not necessarily of the same + format as the current object. + + See Also + -------- + np.matrix.reshape : NumPy's implementation of 'reshape' for matrices + """ + # If the shape already matches, don't bother doing an actual reshape + # Otherwise, the default is to convert to COO and use its reshape + shape = check_shape(args, self.shape) + order, copy = check_reshape_kwargs(kwargs) + if shape == self.shape: + if copy: + return self.copy() + else: + return self + + return self.tocoo(copy=copy).reshape(shape, order=order, copy=False) + + def resize(self, shape): + """Resize the matrix in-place to dimensions given by ``shape`` + + Any elements that lie within the new shape will remain at the same + indices, while non-zero elements lying outside the new shape are + removed. + + Parameters + ---------- + shape : (int, int) + number of rows and columns in the new matrix + + Notes + ----- + The semantics are not identical to `numpy.ndarray.resize` or + `numpy.resize`. Here, the same data will be maintained at each index + before and after reshape, if that index is within the new bounds. In + numpy, resizing maintains contiguity of the array, moving elements + around in the logical matrix but not within a flattened representation. + + We give no guarantees about whether the underlying data attributes + (arrays, etc.) will be modified in place or replaced with new objects. + """ + # As an inplace operation, this requires implementation in each format. + raise NotImplementedError( + '{}.resize is not implemented'.format(type(self).__name__)) + + def astype(self, dtype, casting='unsafe', copy=True): + """Cast the matrix elements to a specified type. + + Parameters + ---------- + dtype : string or numpy dtype + Typecode or data-type to which to cast the data. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. + Defaults to 'unsafe' for backwards compatibility. + 'no' means the data types should not be cast at all. + 'equiv' means only byte-order changes are allowed. + 'safe' means only casts which can preserve values are allowed. + 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + 'unsafe' means any data conversions may be done. + copy : bool, optional + If `copy` is `False`, the result might share some memory with this + matrix. If `copy` is `True`, it is guaranteed that the result and + this matrix do not share any memory. + """ + + dtype = np.dtype(dtype) + if self.dtype != dtype: + return self.tocsr().astype( + dtype, casting=casting, copy=copy).asformat(self.format) + elif copy: + return self.copy() + else: + return self + + def asfptype(self): + """Upcast matrix to a floating point format (if necessary)""" + + fp_types = ['f', 'd', 'F', 'D'] + + if self.dtype.char in fp_types: + return self + else: + for fp_type in fp_types: + if self.dtype <= np.dtype(fp_type): + return self.astype(fp_type) + + raise TypeError('cannot upcast [%s] to a floating ' + 'point format' % self.dtype.name) + + def __iter__(self): + for r in xrange(self.shape[0]): + yield self[r, :] + + def getmaxprint(self): + """Maximum number of elements to display when printed.""" + return self.maxprint + + def count_nonzero(self): + """Number of non-zero entries, equivalent to + + np.count_nonzero(a.toarray()) + + Unlike getnnz() and the nnz property, which return the number of stored + entries (the length of the data attribute), this method counts the + actual number of non-zero entries in data. + """ + raise NotImplementedError("count_nonzero not implemented for %s." % + self.__class__.__name__) + + def getnnz(self, axis=None): + """Number of stored values, including explicit zeros. + + Parameters + ---------- + axis : None, 0, or 1 + Select between the number of values across the whole matrix, in + each column, or in each row. + + See also + -------- + count_nonzero : Number of non-zero entries + """ + raise NotImplementedError("getnnz not implemented for %s." % + self.__class__.__name__) + + @property + def nnz(self): + """Number of stored values, including explicit zeros. + + See also + -------- + count_nonzero : Number of non-zero entries + """ + return self.getnnz() + + def getformat(self): + """Format of a matrix representation as a string.""" + return getattr(self, 'format', 'und') + + def __repr__(self): + _, format_name = _formats[self.getformat()] + return "<%dx%d sparse matrix of type '%s'\n" \ + "\twith %d stored elements in %s format>" % \ + (self.shape + (self.dtype.type, self.nnz, format_name)) + + def __str__(self): + maxprint = self.getmaxprint() + + A = self.tocoo() + + # helper function, outputs "(i,j) v" + def tostr(row, col, data): + triples = zip(list(zip(row, col)), data) + return '\n'.join([(' %s\t%s' % t) for t in triples]) + + if self.nnz > maxprint: + half = maxprint // 2 + out = tostr(A.row[:half], A.col[:half], A.data[:half]) + out += "\n :\t:\n" + half = maxprint - maxprint//2 + out += tostr(A.row[-half:], A.col[-half:], A.data[-half:]) + else: + out = tostr(A.row, A.col, A.data) + + return out + + def __bool__(self): # Simple -- other ideas? + if self.shape == (1, 1): + return self.nnz != 0 + else: + raise ValueError("The truth value of an array with more than one " + "element is ambiguous. Use a.any() or a.all().") + __nonzero__ = __bool__ + + # What should len(sparse) return? For consistency with dense matrices, + # perhaps it should be the number of rows? But for some uses the number of + # non-zeros is more important. For now, raise an exception! + def __len__(self): + raise TypeError("sparse matrix length is ambiguous; use getnnz()" + " or shape[0]") + + def asformat(self, format, copy=False): + """Return this matrix in the passed format. + + Parameters + ---------- + format : {str, None} + The desired matrix format ("csr", "csc", "lil", "dok", "array", ...) + or None for no conversion. + copy : bool, optional + If True, the result is guaranteed to not share data with self. + + Returns + ------- + A : This matrix in the passed format. + """ + if format is None or format == self.format: + if copy: + return self.copy() + else: + return self + else: + try: + convert_method = getattr(self, 'to' + format) + except AttributeError: + raise ValueError('Format {} is unknown.'.format(format)) + + # Forward the copy kwarg, if it's accepted. + try: + return convert_method(copy=copy) + except TypeError: + return convert_method() + + ################################################################### + # NOTE: All arithmetic operations use csr_matrix by default. + # Therefore a new sparse matrix format just needs to define a + # .tocsr() method to provide arithmetic support. Any of these + # methods can be overridden for efficiency. + #################################################################### + + def multiply(self, other): + """Point-wise multiplication by another matrix + """ + return self.tocsr().multiply(other) + + def maximum(self, other): + """Element-wise maximum between this and another matrix.""" + return self.tocsr().maximum(other) + + def minimum(self, other): + """Element-wise minimum between this and another matrix.""" + return self.tocsr().minimum(other) + + def dot(self, other): + """Ordinary dot product + + Examples + -------- + >>> import numpy as np + >>> from scipy.sparse import csr_matrix + >>> A = csr_matrix([[1, 2, 0], [0, 0, 3], [4, 0, 5]]) + >>> v = np.array([1, 0, -1]) + >>> A.dot(v) + array([ 1, -3, -1], dtype=int64) + + """ + return self * other + + def power(self, n, dtype=None): + """Element-wise power.""" + return self.tocsr().power(n, dtype=dtype) + + def __eq__(self, other): + return self.tocsr().__eq__(other) + + def __ne__(self, other): + return self.tocsr().__ne__(other) + + def __lt__(self, other): + return self.tocsr().__lt__(other) + + def __gt__(self, other): + return self.tocsr().__gt__(other) + + def __le__(self, other): + return self.tocsr().__le__(other) + + def __ge__(self, other): + return self.tocsr().__ge__(other) + + def __abs__(self): + return abs(self.tocsr()) + + def _add_sparse(self, other): + return self.tocsr()._add_sparse(other) + + def _add_dense(self, other): + return self.tocoo()._add_dense(other) + + def _sub_sparse(self, other): + return self.tocsr()._sub_sparse(other) + + def _sub_dense(self, other): + return self.todense() - other + + def _rsub_dense(self, other): + # note: this can't be replaced by other + (-self) for unsigned types + return other - self.todense() + + def __add__(self, other): # self + other + if isscalarlike(other): + if other == 0: + return self.copy() + # Now we would add this scalar to every element. + raise NotImplementedError('adding a nonzero scalar to a ' + 'sparse matrix is not supported') + elif isspmatrix(other): + if other.shape != self.shape: + raise ValueError("inconsistent shapes") + return self._add_sparse(other) + elif isdense(other): + other = broadcast_to(other, self.shape) + return self._add_dense(other) + else: + return NotImplemented + + def __radd__(self,other): # other + self + return self.__add__(other) + + def __sub__(self, other): # self - other + if isscalarlike(other): + if other == 0: + return self.copy() + raise NotImplementedError('subtracting a nonzero scalar from a ' + 'sparse matrix is not supported') + elif isspmatrix(other): + if other.shape != self.shape: + raise ValueError("inconsistent shapes") + return self._sub_sparse(other) + elif isdense(other): + other = broadcast_to(other, self.shape) + return self._sub_dense(other) + else: + return NotImplemented + + def __rsub__(self,other): # other - self + if isscalarlike(other): + if other == 0: + return -self.copy() + raise NotImplementedError('subtracting a sparse matrix from a ' + 'nonzero scalar is not supported') + elif isdense(other): + other = broadcast_to(other, self.shape) + return self._rsub_dense(other) + else: + return NotImplemented + + def __mul__(self, other): + """interpret other and call one of the following + + self._mul_scalar() + self._mul_vector() + self._mul_multivector() + self._mul_sparse_matrix() + """ + + M, N = self.shape + + if other.__class__ is np.ndarray: + # Fast path for the most common case + if other.shape == (N,): + return self._mul_vector(other) + elif other.shape == (N, 1): + return self._mul_vector(other.ravel()).reshape(M, 1) + elif other.ndim == 2 and other.shape[0] == N: + return self._mul_multivector(other) + + if isscalarlike(other): + # scalar value + return self._mul_scalar(other) + + if issparse(other): + if self.shape[1] != other.shape[0]: + raise ValueError('dimension mismatch') + return self._mul_sparse_matrix(other) + + # If it's a list or whatever, treat it like a matrix + other_a = np.asanyarray(other) + + if other_a.ndim == 0 and other_a.dtype == np.object_: + # Not interpretable as an array; return NotImplemented so that + # other's __rmul__ can kick in if that's implemented. + return NotImplemented + + try: + other.shape + except AttributeError: + other = other_a + + if other.ndim == 1 or other.ndim == 2 and other.shape[1] == 1: + # dense row or column vector + if other.shape != (N,) and other.shape != (N, 1): + raise ValueError('dimension mismatch') + + result = self._mul_vector(np.ravel(other)) + + if isinstance(other, np.matrix): + result = np.asmatrix(result) + + if other.ndim == 2 and other.shape[1] == 1: + # If 'other' was an (nx1) column vector, reshape the result + result = result.reshape(-1, 1) + + return result + + elif other.ndim == 2: + ## + # dense 2D array or matrix ("multivector") + + if other.shape[0] != self.shape[1]: + raise ValueError('dimension mismatch') + + result = self._mul_multivector(np.asarray(other)) + + if isinstance(other, np.matrix): + result = np.asmatrix(result) + + return result + + else: + raise ValueError('could not interpret dimensions') + + # by default, use CSR for __mul__ handlers + def _mul_scalar(self, other): + return self.tocsr()._mul_scalar(other) + + def _mul_vector(self, other): + return self.tocsr()._mul_vector(other) + + def _mul_multivector(self, other): + return self.tocsr()._mul_multivector(other) + + def _mul_sparse_matrix(self, other): + return self.tocsr()._mul_sparse_matrix(other) + + def __rmul__(self, other): # other * self + if isscalarlike(other): + return self.__mul__(other) + else: + # Don't use asarray unless we have to + try: + tr = other.transpose() + except AttributeError: + tr = np.asarray(other).transpose() + return (self.transpose() * tr).transpose() + + ##################################### + # matmul (@) operator (Python 3.5+) # + ##################################### + + def __matmul__(self, other): + if isscalarlike(other): + raise ValueError("Scalar operands are not allowed, " + "use '*' instead") + return self.__mul__(other) + + def __rmatmul__(self, other): + if isscalarlike(other): + raise ValueError("Scalar operands are not allowed, " + "use '*' instead") + return self.__rmul__(other) + + #################### + # Other Arithmetic # + #################### + + def _divide(self, other, true_divide=False, rdivide=False): + if isscalarlike(other): + if rdivide: + if true_divide: + return np.true_divide(other, self.todense()) + else: + return np.divide(other, self.todense()) + + if true_divide and np.can_cast(self.dtype, np.float_): + return self.astype(np.float_)._mul_scalar(1./other) + else: + r = self._mul_scalar(1./other) + + scalar_dtype = np.asarray(other).dtype + if (np.issubdtype(self.dtype, np.integer) and + np.issubdtype(scalar_dtype, np.integer)): + return r.astype(self.dtype) + else: + return r + + elif isdense(other): + if not rdivide: + if true_divide: + return np.true_divide(self.todense(), other) + else: + return np.divide(self.todense(), other) + else: + if true_divide: + return np.true_divide(other, self.todense()) + else: + return np.divide(other, self.todense()) + elif isspmatrix(other): + if rdivide: + return other._divide(self, true_divide, rdivide=False) + + self_csr = self.tocsr() + if true_divide and np.can_cast(self.dtype, np.float_): + return self_csr.astype(np.float_)._divide_sparse(other) + else: + return self_csr._divide_sparse(other) + else: + return NotImplemented + + def __truediv__(self, other): + return self._divide(other, true_divide=True) + + def __div__(self, other): + # Always do true division + return self._divide(other, true_divide=True) + + def __rtruediv__(self, other): + # Implementing this as the inverse would be too magical -- bail out + return NotImplemented + + def __rdiv__(self, other): + # Implementing this as the inverse would be too magical -- bail out + return NotImplemented + + def __neg__(self): + return -self.tocsr() + + def __iadd__(self, other): + return NotImplemented + + def __isub__(self, other): + return NotImplemented + + def __imul__(self, other): + return NotImplemented + + def __idiv__(self, other): + return self.__itruediv__(other) + + def __itruediv__(self, other): + return NotImplemented + + def __pow__(self, other): + if self.shape[0] != self.shape[1]: + raise TypeError('matrix is not square') + + if isintlike(other): + other = int(other) + if other < 0: + raise ValueError('exponent must be >= 0') + + if other == 0: + from .construct import eye + return eye(self.shape[0], dtype=self.dtype) + elif other == 1: + return self.copy() + else: + tmp = self.__pow__(other//2) + if (other % 2): + return self * tmp * tmp + else: + return tmp * tmp + elif isscalarlike(other): + raise ValueError('exponent must be an integer') + else: + return NotImplemented + + def __getattr__(self, attr): + if attr == 'A': + return self.toarray() + elif attr == 'T': + return self.transpose() + elif attr == 'H': + return self.getH() + elif attr == 'real': + return self._real() + elif attr == 'imag': + return self._imag() + elif attr == 'size': + return self.getnnz() + else: + raise AttributeError(attr + " not found") + + def transpose(self, axes=None, copy=False): + """ + Reverses the dimensions of the sparse matrix. + + Parameters + ---------- + axes : None, optional + This argument is in the signature *solely* for NumPy + compatibility reasons. Do not pass in anything except + for the default value. + copy : bool, optional + Indicates whether or not attributes of `self` should be + copied whenever possible. The degree to which attributes + are copied varies depending on the type of sparse matrix + being used. + + Returns + ------- + p : `self` with the dimensions reversed. + + See Also + -------- + np.matrix.transpose : NumPy's implementation of 'transpose' + for matrices + """ + return self.tocsr(copy=copy).transpose(axes=axes, copy=False) + + def conj(self, copy=True): + """Element-wise complex conjugation. + + If the matrix is of non-complex data type and `copy` is False, + this method does nothing and the data is not copied. + + Parameters + ---------- + copy : bool, optional + If True, the result is guaranteed to not share data with self. + + Returns + ------- + A : The element-wise complex conjugate. + + """ + if np.issubdtype(self.dtype, np.complexfloating): + return self.tocsr(copy=copy).conj(copy=False) + elif copy: + return self.copy() + else: + return self + + def conjugate(self, copy=True): + return self.conj(copy=copy) + + conjugate.__doc__ = conj.__doc__ + + # Renamed conjtranspose() -> getH() for compatibility with dense matrices + def getH(self): + """Return the Hermitian transpose of this matrix. + + See Also + -------- + np.matrix.getH : NumPy's implementation of `getH` for matrices + """ + return self.transpose().conj() + + def _real(self): + return self.tocsr()._real() + + def _imag(self): + return self.tocsr()._imag() + + def nonzero(self): + """nonzero indices + + Returns a tuple of arrays (row,col) containing the indices + of the non-zero elements of the matrix. + + Examples + -------- + >>> from scipy.sparse import csr_matrix + >>> A = csr_matrix([[1,2,0],[0,0,3],[4,0,5]]) + >>> A.nonzero() + (array([0, 0, 1, 2, 2]), array([0, 1, 2, 0, 2])) + + """ + + # convert to COOrdinate format + A = self.tocoo() + nz_mask = A.data != 0 + return (A.row[nz_mask], A.col[nz_mask]) + + def getcol(self, j): + """Returns a copy of column j of the matrix, as an (m x 1) sparse + matrix (column vector). + """ + # Spmatrix subclasses should override this method for efficiency. + # Post-multiply by a (n x 1) column vector 'a' containing all zeros + # except for a_j = 1 + from .csc import csc_matrix + n = self.shape[1] + if j < 0: + j += n + if j < 0 or j >= n: + raise IndexError("index out of bounds") + col_selector = csc_matrix(([1], [[j], [0]]), + shape=(n, 1), dtype=self.dtype) + return self * col_selector + + def getrow(self, i): + """Returns a copy of row i of the matrix, as a (1 x n) sparse + matrix (row vector). + """ + # Spmatrix subclasses should override this method for efficiency. + # Pre-multiply by a (1 x m) row vector 'a' containing all zeros + # except for a_i = 1 + from .csr import csr_matrix + m = self.shape[0] + if i < 0: + i += m + if i < 0 or i >= m: + raise IndexError("index out of bounds") + row_selector = csr_matrix(([1], [[0], [i]]), + shape=(1, m), dtype=self.dtype) + return row_selector * self + + # def __array__(self): + # return self.toarray() + + def todense(self, order=None, out=None): + """ + Return a dense matrix representation of this matrix. + + Parameters + ---------- + order : {'C', 'F'}, optional + Whether to store multi-dimensional data in C (row-major) + or Fortran (column-major) order in memory. The default + is 'None', indicating the NumPy default of C-ordered. + Cannot be specified in conjunction with the `out` + argument. + + out : ndarray, 2-dimensional, optional + If specified, uses this array (or `numpy.matrix`) as the + output buffer instead of allocating a new array to + return. The provided array must have the same shape and + dtype as the sparse matrix on which you are calling the + method. + + Returns + ------- + arr : numpy.matrix, 2-dimensional + A NumPy matrix object with the same shape and containing + the same data represented by the sparse matrix, with the + requested memory order. If `out` was passed and was an + array (rather than a `numpy.matrix`), it will be filled + with the appropriate values and returned wrapped in a + `numpy.matrix` object that shares the same memory. + """ + return np.asmatrix(self.toarray(order=order, out=out)) + + def toarray(self, order=None, out=None): + """ + Return a dense ndarray representation of this matrix. + + Parameters + ---------- + order : {'C', 'F'}, optional + Whether to store multi-dimensional data in C (row-major) + or Fortran (column-major) order in memory. The default + is 'None', indicating the NumPy default of C-ordered. + Cannot be specified in conjunction with the `out` + argument. + + out : ndarray, 2-dimensional, optional + If specified, uses this array as the output buffer + instead of allocating a new array to return. The provided + array must have the same shape and dtype as the sparse + matrix on which you are calling the method. For most + sparse types, `out` is required to be memory contiguous + (either C or Fortran ordered). + + Returns + ------- + arr : ndarray, 2-dimensional + An array with the same shape and containing the same + data represented by the sparse matrix, with the requested + memory order. If `out` was passed, the same object is + returned after being modified in-place to contain the + appropriate values. + """ + return self.tocoo(copy=False).toarray(order=order, out=out) + + # Any sparse matrix format deriving from spmatrix must define one of + # tocsr or tocoo. The other conversion methods may be implemented for + # efficiency, but are not required. + def tocsr(self, copy=False): + """Convert this matrix to Compressed Sparse Row format. + + With copy=False, the data/indices may be shared between this matrix and + the resultant csr_matrix. + """ + return self.tocoo(copy=copy).tocsr(copy=False) + + def todok(self, copy=False): + """Convert this matrix to Dictionary Of Keys format. + + With copy=False, the data/indices may be shared between this matrix and + the resultant dok_matrix. + """ + return self.tocoo(copy=copy).todok(copy=False) + + def tocoo(self, copy=False): + """Convert this matrix to COOrdinate format. + + With copy=False, the data/indices may be shared between this matrix and + the resultant coo_matrix. + """ + return self.tocsr(copy=False).tocoo(copy=copy) + + def tolil(self, copy=False): + """Convert this matrix to LInked List format. + + With copy=False, the data/indices may be shared between this matrix and + the resultant lil_matrix. + """ + return self.tocsr(copy=False).tolil(copy=copy) + + def todia(self, copy=False): + """Convert this matrix to sparse DIAgonal format. + + With copy=False, the data/indices may be shared between this matrix and + the resultant dia_matrix. + """ + return self.tocoo(copy=copy).todia(copy=False) + + def tobsr(self, blocksize=None, copy=False): + """Convert this matrix to Block Sparse Row format. + + With copy=False, the data/indices may be shared between this matrix and + the resultant bsr_matrix. + + When blocksize=(R, C) is provided, it will be used for construction of + the bsr_matrix. + """ + return self.tocsr(copy=False).tobsr(blocksize=blocksize, copy=copy) + + def tocsc(self, copy=False): + """Convert this matrix to Compressed Sparse Column format. + + With copy=False, the data/indices may be shared between this matrix and + the resultant csc_matrix. + """ + return self.tocsr(copy=copy).tocsc(copy=False) + + def copy(self): + """Returns a copy of this matrix. + + No data/indices will be shared between the returned value and current + matrix. + """ + return self.__class__(self, copy=True) + + def sum(self, axis=None, dtype=None, out=None): + """ + Sum the matrix elements over a given axis. + + Parameters + ---------- + axis : {-2, -1, 0, 1, None} optional + Axis along which the sum is computed. The default is to + compute the sum of all the matrix elements, returning a scalar + (i.e. `axis` = `None`). + dtype : dtype, optional + The type of the returned matrix and of the accumulator in which + the elements are summed. The dtype of `a` is used by default + unless `a` has an integer dtype of less precision than the default + platform integer. In that case, if `a` is signed then the platform + integer is used while if `a` is unsigned then an unsigned integer + of the same precision as the platform integer is used. + + .. versionadded:: 0.18.0 + + out : np.matrix, optional + Alternative output matrix in which to place the result. It must + have the same shape as the expected output, but the type of the + output values will be cast if necessary. + + .. versionadded:: 0.18.0 + + Returns + ------- + sum_along_axis : np.matrix + A matrix with the same shape as `self`, with the specified + axis removed. + + See Also + -------- + np.matrix.sum : NumPy's implementation of 'sum' for matrices + + """ + validateaxis(axis) + + # We use multiplication by a matrix of ones to achieve this. + # For some sparse matrix formats more efficient methods are + # possible -- these should override this function. + m, n = self.shape + + # Mimic numpy's casting. + res_dtype = get_sum_dtype(self.dtype) + + if axis is None: + # sum over rows and columns + return (self * np.asmatrix(np.ones( + (n, 1), dtype=res_dtype))).sum( + dtype=dtype, out=out) + + if axis < 0: + axis += 2 + + # axis = 0 or 1 now + if axis == 0: + # sum over columns + ret = np.asmatrix(np.ones( + (1, m), dtype=res_dtype)) * self + else: + # sum over rows + ret = self * np.asmatrix( + np.ones((n, 1), dtype=res_dtype)) + + if out is not None and out.shape != ret.shape: + raise ValueError("dimensions do not match") + + return ret.sum(axis=(), dtype=dtype, out=out) + + def mean(self, axis=None, dtype=None, out=None): + """ + Compute the arithmetic mean along the specified axis. + + Returns the average of the matrix elements. The average is taken + over all elements in the matrix by default, otherwise over the + specified axis. `float64` intermediate and return values are used + for integer inputs. + + Parameters + ---------- + axis : {-2, -1, 0, 1, None} optional + Axis along which the mean is computed. The default is to compute + the mean of all elements in the matrix (i.e. `axis` = `None`). + dtype : data-type, optional + Type to use in computing the mean. For integer inputs, the default + is `float64`; for floating point inputs, it is the same as the + input dtype. + + .. versionadded:: 0.18.0 + + out : np.matrix, optional + Alternative output matrix in which to place the result. It must + have the same shape as the expected output, but the type of the + output values will be cast if necessary. + + .. versionadded:: 0.18.0 + + Returns + ------- + m : np.matrix + + See Also + -------- + np.matrix.mean : NumPy's implementation of 'mean' for matrices + + """ + def _is_integral(dtype): + return (np.issubdtype(dtype, np.integer) or + np.issubdtype(dtype, np.bool_)) + + validateaxis(axis) + + res_dtype = self.dtype.type + integral = _is_integral(self.dtype) + + # output dtype + if dtype is None: + if integral: + res_dtype = np.float64 + else: + res_dtype = np.dtype(dtype).type + + # intermediate dtype for summation + inter_dtype = np.float64 if integral else res_dtype + inter_self = self.astype(inter_dtype) + + if axis is None: + return (inter_self / np.array( + self.shape[0] * self.shape[1]))\ + .sum(dtype=res_dtype, out=out) + + if axis < 0: + axis += 2 + + # axis = 0 or 1 now + if axis == 0: + return (inter_self * (1.0 / self.shape[0])).sum( + axis=0, dtype=res_dtype, out=out) + else: + return (inter_self * (1.0 / self.shape[1])).sum( + axis=1, dtype=res_dtype, out=out) + + def diagonal(self, k=0): + """Returns the k-th diagonal of the matrix. + + Parameters + ---------- + k : int, optional + Which diagonal to set, corresponding to elements a[i, i+k]. + Default: 0 (the main diagonal). + + .. versionadded:: 1.0 + + See also + -------- + numpy.diagonal : Equivalent numpy function. + + Examples + -------- + >>> from scipy.sparse import csr_matrix + >>> A = csr_matrix([[1, 2, 0], [0, 0, 3], [4, 0, 5]]) + >>> A.diagonal() + array([1, 0, 5]) + >>> A.diagonal(k=1) + array([2, 3]) + """ + return self.tocsr().diagonal(k=k) + + def setdiag(self, values, k=0): + """ + Set diagonal or off-diagonal elements of the array. + + Parameters + ---------- + values : array_like + New values of the diagonal elements. + + Values may have any length. If the diagonal is longer than values, + then the remaining diagonal entries will not be set. If values if + longer than the diagonal, then the remaining values are ignored. + + If a scalar value is given, all of the diagonal is set to it. + + k : int, optional + Which off-diagonal to set, corresponding to elements a[i,i+k]. + Default: 0 (the main diagonal). + + """ + M, N = self.shape + if (k > 0 and k >= N) or (k < 0 and -k >= M): + raise ValueError("k exceeds matrix dimensions") + self._setdiag(np.asarray(values), k) + + def _setdiag(self, values, k): + M, N = self.shape + if k < 0: + if values.ndim == 0: + # broadcast + max_index = min(M+k, N) + for i in xrange(max_index): + self[i - k, i] = values + else: + max_index = min(M+k, N, len(values)) + if max_index <= 0: + return + for i, v in enumerate(values[:max_index]): + self[i - k, i] = v + else: + if values.ndim == 0: + # broadcast + max_index = min(M, N-k) + for i in xrange(max_index): + self[i, i + k] = values + else: + max_index = min(M, N-k, len(values)) + if max_index <= 0: + return + for i, v in enumerate(values[:max_index]): + self[i, i + k] = v + + def _process_toarray_args(self, order, out): + if out is not None: + if order is not None: + raise ValueError('order cannot be specified if out ' + 'is not None') + if out.shape != self.shape or out.dtype != self.dtype: + raise ValueError('out array must be same dtype and shape as ' + 'sparse matrix') + out[...] = 0. + return out + else: + return np.zeros(self.shape, dtype=self.dtype, order=order) + + +def isspmatrix(x): + """Is x of a sparse matrix type? + + Parameters + ---------- + x + object to check for being a sparse matrix + + Returns + ------- + bool + True if x is a sparse matrix, False otherwise + + Notes + ----- + issparse and isspmatrix are aliases for the same function. + + Examples + -------- + >>> from scipy.sparse import csr_matrix, isspmatrix + >>> isspmatrix(csr_matrix([[5]])) + True + + >>> from scipy.sparse import isspmatrix + >>> isspmatrix(5) + False + """ + return isinstance(x, spmatrix) + + +issparse = isspmatrix diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/base.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/base.pyc new file mode 100644 index 0000000..f67ba9e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/base.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/bsr.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/bsr.py new file mode 100644 index 0000000..8e4a851 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/bsr.py @@ -0,0 +1,721 @@ +"""Compressed Block Sparse Row matrix format""" +from __future__ import division, print_function, absolute_import + + +__docformat__ = "restructuredtext en" + +__all__ = ['bsr_matrix', 'isspmatrix_bsr'] + +from warnings import warn + +import numpy as np + +from .data import _data_matrix, _minmax_mixin +from .compressed import _cs_matrix +from .base import isspmatrix, _formats, spmatrix +from .sputils import (isshape, getdtype, to_native, upcast, get_index_dtype, + check_shape) +from . import _sparsetools +from ._sparsetools import (bsr_matvec, bsr_matvecs, csr_matmat_pass1, + bsr_matmat_pass2, bsr_transpose, bsr_sort_indices, + bsr_tocsr) + + +class bsr_matrix(_cs_matrix, _minmax_mixin): + """Block Sparse Row matrix + + This can be instantiated in several ways: + bsr_matrix(D, [blocksize=(R,C)]) + where D is a dense matrix or 2-D ndarray. + + bsr_matrix(S, [blocksize=(R,C)]) + with another sparse matrix S (equivalent to S.tobsr()) + + bsr_matrix((M, N), [blocksize=(R,C), dtype]) + to construct an empty matrix with shape (M, N) + dtype is optional, defaulting to dtype='d'. + + bsr_matrix((data, ij), [blocksize=(R,C), shape=(M, N)]) + where ``data`` and ``ij`` satisfy ``a[ij[0, k], ij[1, k]] = data[k]`` + + bsr_matrix((data, indices, indptr), [shape=(M, N)]) + is the standard BSR representation where the block column + indices for row i are stored in ``indices[indptr[i]:indptr[i+1]]`` + and their corresponding block values are stored in + ``data[ indptr[i]: indptr[i+1] ]``. If the shape parameter is not + supplied, the matrix dimensions are inferred from the index arrays. + + Attributes + ---------- + dtype : dtype + Data type of the matrix + shape : 2-tuple + Shape of the matrix + ndim : int + Number of dimensions (this is always 2) + nnz + Number of nonzero elements + data + Data array of the matrix + indices + BSR format index array + indptr + BSR format index pointer array + blocksize + Block size of the matrix + has_sorted_indices + Whether indices are sorted + + Notes + ----- + Sparse matrices can be used in arithmetic operations: they support + addition, subtraction, multiplication, division, and matrix power. + + **Summary of BSR format** + + The Block Compressed Row (BSR) format is very similar to the Compressed + Sparse Row (CSR) format. BSR is appropriate for sparse matrices with dense + sub matrices like the last example below. Block matrices often arise in + vector-valued finite element discretizations. In such cases, BSR is + considerably more efficient than CSR and CSC for many sparse arithmetic + operations. + + **Blocksize** + + The blocksize (R,C) must evenly divide the shape of the matrix (M,N). + That is, R and C must satisfy the relationship ``M % R = 0`` and + ``N % C = 0``. + + If no blocksize is specified, a simple heuristic is applied to determine + an appropriate blocksize. + + Examples + -------- + >>> from scipy.sparse import bsr_matrix + >>> bsr_matrix((3, 4), dtype=np.int8).toarray() + array([[0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]], dtype=int8) + + >>> row = np.array([0, 0, 1, 2, 2, 2]) + >>> col = np.array([0, 2, 2, 0, 1, 2]) + >>> data = np.array([1, 2, 3 ,4, 5, 6]) + >>> bsr_matrix((data, (row, col)), shape=(3, 3)).toarray() + array([[1, 0, 2], + [0, 0, 3], + [4, 5, 6]]) + + >>> indptr = np.array([0, 2, 3, 6]) + >>> indices = np.array([0, 2, 2, 0, 1, 2]) + >>> data = np.array([1, 2, 3, 4, 5, 6]).repeat(4).reshape(6, 2, 2) + >>> bsr_matrix((data,indices,indptr), shape=(6, 6)).toarray() + array([[1, 1, 0, 0, 2, 2], + [1, 1, 0, 0, 2, 2], + [0, 0, 0, 0, 3, 3], + [0, 0, 0, 0, 3, 3], + [4, 4, 5, 5, 6, 6], + [4, 4, 5, 5, 6, 6]]) + + """ + format = 'bsr' + + def __init__(self, arg1, shape=None, dtype=None, copy=False, blocksize=None): + _data_matrix.__init__(self) + + if isspmatrix(arg1): + if isspmatrix_bsr(arg1) and copy: + arg1 = arg1.copy() + else: + arg1 = arg1.tobsr(blocksize=blocksize) + self._set_self(arg1) + + elif isinstance(arg1,tuple): + if isshape(arg1): + # it's a tuple of matrix dimensions (M,N) + self._shape = check_shape(arg1) + M,N = self.shape + # process blocksize + if blocksize is None: + blocksize = (1,1) + else: + if not isshape(blocksize): + raise ValueError('invalid blocksize=%s' % blocksize) + blocksize = tuple(blocksize) + self.data = np.zeros((0,) + blocksize, getdtype(dtype, default=float)) + + R,C = blocksize + if (M % R) != 0 or (N % C) != 0: + raise ValueError('shape must be multiple of blocksize') + + # Select index dtype large enough to pass array and + # scalar parameters to sparsetools + idx_dtype = get_index_dtype(maxval=max(M//R, N//C, R, C)) + self.indices = np.zeros(0, dtype=idx_dtype) + self.indptr = np.zeros(M//R + 1, dtype=idx_dtype) + + elif len(arg1) == 2: + # (data,(row,col)) format + from .coo import coo_matrix + self._set_self(coo_matrix(arg1, dtype=dtype).tobsr(blocksize=blocksize)) + + elif len(arg1) == 3: + # (data,indices,indptr) format + (data, indices, indptr) = arg1 + + # Select index dtype large enough to pass array and + # scalar parameters to sparsetools + maxval = 1 + if shape is not None: + maxval = max(shape) + if blocksize is not None: + maxval = max(maxval, max(blocksize)) + idx_dtype = get_index_dtype((indices, indptr), maxval=maxval, check_contents=True) + + self.indices = np.array(indices, copy=copy, dtype=idx_dtype) + self.indptr = np.array(indptr, copy=copy, dtype=idx_dtype) + self.data = np.array(data, copy=copy, dtype=getdtype(dtype, data)) + else: + raise ValueError('unrecognized bsr_matrix constructor usage') + else: + # must be dense + try: + arg1 = np.asarray(arg1) + except Exception: + raise ValueError("unrecognized form for" + " %s_matrix constructor" % self.format) + from .coo import coo_matrix + arg1 = coo_matrix(arg1, dtype=dtype).tobsr(blocksize=blocksize) + self._set_self(arg1) + + if shape is not None: + self._shape = check_shape(shape) + else: + if self.shape is None: + # shape not already set, try to infer dimensions + try: + M = len(self.indptr) - 1 + N = self.indices.max() + 1 + except Exception: + raise ValueError('unable to infer matrix dimensions') + else: + R,C = self.blocksize + self._shape = check_shape((M*R,N*C)) + + if self.shape is None: + if shape is None: + # TODO infer shape here + raise ValueError('need to infer shape') + else: + self._shape = check_shape(shape) + + if dtype is not None: + self.data = self.data.astype(dtype) + + self.check_format(full_check=False) + + def check_format(self, full_check=True): + """check whether the matrix format is valid + + *Parameters*: + full_check: + True - rigorous check, O(N) operations : default + False - basic check, O(1) operations + + """ + M,N = self.shape + R,C = self.blocksize + + # index arrays should have integer data types + if self.indptr.dtype.kind != 'i': + warn("indptr array has non-integer dtype (%s)" + % self.indptr.dtype.name) + if self.indices.dtype.kind != 'i': + warn("indices array has non-integer dtype (%s)" + % self.indices.dtype.name) + + idx_dtype = get_index_dtype((self.indices, self.indptr)) + self.indptr = np.asarray(self.indptr, dtype=idx_dtype) + self.indices = np.asarray(self.indices, dtype=idx_dtype) + self.data = to_native(self.data) + + # check array shapes + if self.indices.ndim != 1 or self.indptr.ndim != 1: + raise ValueError("indices, and indptr should be 1-D") + if self.data.ndim != 3: + raise ValueError("data should be 3-D") + + # check index pointer + if (len(self.indptr) != M//R + 1): + raise ValueError("index pointer size (%d) should be (%d)" % + (len(self.indptr), M//R + 1)) + if (self.indptr[0] != 0): + raise ValueError("index pointer should start with 0") + + # check index and data arrays + if (len(self.indices) != len(self.data)): + raise ValueError("indices and data should have the same size") + if (self.indptr[-1] > len(self.indices)): + raise ValueError("Last value of index pointer should be less than " + "the size of index and data arrays") + + self.prune() + + if full_check: + # check format validity (more expensive) + if self.nnz > 0: + if self.indices.max() >= N//C: + raise ValueError("column index values must be < %d (now max %d)" % (N//C, self.indices.max())) + if self.indices.min() < 0: + raise ValueError("column index values must be >= 0") + if np.diff(self.indptr).min() < 0: + raise ValueError("index pointer values must form a " + "non-decreasing sequence") + + # if not self.has_sorted_indices(): + # warn('Indices were not in sorted order. Sorting indices.') + # self.sort_indices(check_first=False) + + def _get_blocksize(self): + return self.data.shape[1:] + blocksize = property(fget=_get_blocksize) + + def getnnz(self, axis=None): + if axis is not None: + raise NotImplementedError("getnnz over an axis is not implemented " + "for BSR format") + R,C = self.blocksize + return int(self.indptr[-1] * R * C) + + getnnz.__doc__ = spmatrix.getnnz.__doc__ + + def __repr__(self): + format = _formats[self.getformat()][1] + return ("<%dx%d sparse matrix of type '%s'\n" + "\twith %d stored elements (blocksize = %dx%d) in %s format>" % + (self.shape + (self.dtype.type, self.nnz) + self.blocksize + + (format,))) + + def diagonal(self, k=0): + rows, cols = self.shape + if k <= -rows or k >= cols: + raise ValueError("k exceeds matrix dimensions") + R, C = self.blocksize + y = np.zeros(min(rows + min(k, 0), cols - max(k, 0)), + dtype=upcast(self.dtype)) + _sparsetools.bsr_diagonal(k, rows // R, cols // C, R, C, + self.indptr, self.indices, + np.ravel(self.data), y) + return y + + diagonal.__doc__ = spmatrix.diagonal.__doc__ + + ########################## + # NotImplemented methods # + ########################## + + def __getitem__(self,key): + raise NotImplementedError + + def __setitem__(self,key,val): + raise NotImplementedError + + ###################### + # Arithmetic methods # + ###################### + + @np.deprecate(message="BSR matvec is deprecated in scipy 0.19.0. " + "Use * operator instead.") + def matvec(self, other): + """Multiply matrix by vector.""" + return self * other + + @np.deprecate(message="BSR matmat is deprecated in scipy 0.19.0. " + "Use * operator instead.") + def matmat(self, other): + """Multiply this sparse matrix by other matrix.""" + return self * other + + def _add_dense(self, other): + return self.tocoo(copy=False)._add_dense(other) + + def _mul_vector(self, other): + M,N = self.shape + R,C = self.blocksize + + result = np.zeros(self.shape[0], dtype=upcast(self.dtype, other.dtype)) + + bsr_matvec(M//R, N//C, R, C, + self.indptr, self.indices, self.data.ravel(), + other, result) + + return result + + def _mul_multivector(self,other): + R,C = self.blocksize + M,N = self.shape + n_vecs = other.shape[1] # number of column vectors + + result = np.zeros((M,n_vecs), dtype=upcast(self.dtype,other.dtype)) + + bsr_matvecs(M//R, N//C, n_vecs, R, C, + self.indptr, self.indices, self.data.ravel(), + other.ravel(), result.ravel()) + + return result + + def _mul_sparse_matrix(self, other): + M, K1 = self.shape + K2, N = other.shape + + R,n = self.blocksize + + # convert to this format + if isspmatrix_bsr(other): + C = other.blocksize[1] + else: + C = 1 + + from .csr import isspmatrix_csr + + if isspmatrix_csr(other) and n == 1: + other = other.tobsr(blocksize=(n,C), copy=False) # lightweight conversion + else: + other = other.tobsr(blocksize=(n,C)) + + idx_dtype = get_index_dtype((self.indptr, self.indices, + other.indptr, other.indices), + maxval=(M//R)*(N//C)) + indptr = np.empty(self.indptr.shape, dtype=idx_dtype) + + csr_matmat_pass1(M//R, N//C, + self.indptr.astype(idx_dtype), + self.indices.astype(idx_dtype), + other.indptr.astype(idx_dtype), + other.indices.astype(idx_dtype), + indptr) + + bnnz = indptr[-1] + + idx_dtype = get_index_dtype((self.indptr, self.indices, + other.indptr, other.indices), + maxval=bnnz) + indptr = indptr.astype(idx_dtype) + indices = np.empty(bnnz, dtype=idx_dtype) + data = np.empty(R*C*bnnz, dtype=upcast(self.dtype,other.dtype)) + + bsr_matmat_pass2(M//R, N//C, R, C, n, + self.indptr.astype(idx_dtype), + self.indices.astype(idx_dtype), + np.ravel(self.data), + other.indptr.astype(idx_dtype), + other.indices.astype(idx_dtype), + np.ravel(other.data), + indptr, + indices, + data) + + data = data.reshape(-1,R,C) + + # TODO eliminate zeros + + return bsr_matrix((data,indices,indptr),shape=(M,N),blocksize=(R,C)) + + ###################### + # Conversion methods # + ###################### + + def tobsr(self, blocksize=None, copy=False): + """Convert this matrix into Block Sparse Row Format. + + With copy=False, the data/indices may be shared between this + matrix and the resultant bsr_matrix. + + If blocksize=(R, C) is provided, it will be used for determining + block size of the bsr_matrix. + """ + if blocksize not in [None, self.blocksize]: + return self.tocsr().tobsr(blocksize=blocksize) + if copy: + return self.copy() + else: + return self + + def tocsr(self, copy=False): + M, N = self.shape + R, C = self.blocksize + nnz = self.nnz + idx_dtype = get_index_dtype((self.indptr, self.indices), + maxval=max(nnz, N)) + indptr = np.empty(M + 1, dtype=idx_dtype) + indices = np.empty(nnz, dtype=idx_dtype) + data = np.empty(nnz, dtype=upcast(self.dtype)) + + bsr_tocsr(M // R, # n_brow + N // C, # n_bcol + R, C, + self.indptr.astype(idx_dtype, copy=False), + self.indices.astype(idx_dtype, copy=False), + self.data, + indptr, + indices, + data) + from .csr import csr_matrix + return csr_matrix((data, indices, indptr), shape=self.shape) + + tocsr.__doc__ = spmatrix.tocsr.__doc__ + + def tocsc(self, copy=False): + return self.tocsr(copy=False).tocsc(copy=copy) + + tocsc.__doc__ = spmatrix.tocsc.__doc__ + + def tocoo(self, copy=True): + """Convert this matrix to COOrdinate format. + + When copy=False the data array will be shared between + this matrix and the resultant coo_matrix. + """ + + M,N = self.shape + R,C = self.blocksize + + indptr_diff = np.diff(self.indptr) + if indptr_diff.dtype.itemsize > np.dtype(np.intp).itemsize: + # Check for potential overflow + indptr_diff_limited = indptr_diff.astype(np.intp) + if np.any(indptr_diff_limited != indptr_diff): + raise ValueError("Matrix too big to convert") + indptr_diff = indptr_diff_limited + + row = (R * np.arange(M//R)).repeat(indptr_diff) + row = row.repeat(R*C).reshape(-1,R,C) + row += np.tile(np.arange(R).reshape(-1,1), (1,C)) + row = row.reshape(-1) + + col = (C * self.indices).repeat(R*C).reshape(-1,R,C) + col += np.tile(np.arange(C), (R,1)) + col = col.reshape(-1) + + data = self.data.reshape(-1) + + if copy: + data = data.copy() + + from .coo import coo_matrix + return coo_matrix((data,(row,col)), shape=self.shape) + + def toarray(self, order=None, out=None): + return self.tocoo(copy=False).toarray(order=order, out=out) + + toarray.__doc__ = spmatrix.toarray.__doc__ + + def transpose(self, axes=None, copy=False): + if axes is not None: + raise ValueError(("Sparse matrices do not support " + "an 'axes' parameter because swapping " + "dimensions is the only logical permutation.")) + + R, C = self.blocksize + M, N = self.shape + NBLK = self.nnz//(R*C) + + if self.nnz == 0: + return bsr_matrix((N, M), blocksize=(C, R), + dtype=self.dtype, copy=copy) + + indptr = np.empty(N//C + 1, dtype=self.indptr.dtype) + indices = np.empty(NBLK, dtype=self.indices.dtype) + data = np.empty((NBLK, C, R), dtype=self.data.dtype) + + bsr_transpose(M//R, N//C, R, C, + self.indptr, self.indices, self.data.ravel(), + indptr, indices, data.ravel()) + + return bsr_matrix((data, indices, indptr), + shape=(N, M), copy=copy) + + transpose.__doc__ = spmatrix.transpose.__doc__ + + ############################################################## + # methods that examine or modify the internal data structure # + ############################################################## + + def eliminate_zeros(self): + """Remove zero elements in-place.""" + R,C = self.blocksize + M,N = self.shape + + mask = (self.data != 0).reshape(-1,R*C).sum(axis=1) # nonzero blocks + + nonzero_blocks = mask.nonzero()[0] + + if len(nonzero_blocks) == 0: + return # nothing to do + + self.data[:len(nonzero_blocks)] = self.data[nonzero_blocks] + + # modifies self.indptr and self.indices *in place* + _sparsetools.csr_eliminate_zeros(M//R, N//C, self.indptr, + self.indices, mask) + self.prune() + + def sum_duplicates(self): + """Eliminate duplicate matrix entries by adding them together + + The is an *in place* operation + """ + if self.has_canonical_format: + return + self.sort_indices() + R, C = self.blocksize + M, N = self.shape + + # port of _sparsetools.csr_sum_duplicates + n_row = M // R + nnz = 0 + row_end = 0 + for i in range(n_row): + jj = row_end + row_end = self.indptr[i+1] + while jj < row_end: + j = self.indices[jj] + x = self.data[jj] + jj += 1 + while jj < row_end and self.indices[jj] == j: + x += self.data[jj] + jj += 1 + self.indices[nnz] = j + self.data[nnz] = x + nnz += 1 + self.indptr[i+1] = nnz + + self.prune() # nnz may have changed + self.has_canonical_format = True + + def sort_indices(self): + """Sort the indices of this matrix *in place* + """ + if self.has_sorted_indices: + return + + R,C = self.blocksize + M,N = self.shape + + bsr_sort_indices(M//R, N//C, R, C, self.indptr, self.indices, self.data.ravel()) + + self.has_sorted_indices = True + + def prune(self): + """ Remove empty space after all non-zero elements. + """ + + R,C = self.blocksize + M,N = self.shape + + if len(self.indptr) != M//R + 1: + raise ValueError("index pointer has invalid length") + + bnnz = self.indptr[-1] + + if len(self.indices) < bnnz: + raise ValueError("indices array has too few elements") + if len(self.data) < bnnz: + raise ValueError("data array has too few elements") + + self.data = self.data[:bnnz] + self.indices = self.indices[:bnnz] + + # utility functions + def _binopt(self, other, op, in_shape=None, out_shape=None): + """Apply the binary operation fn to two sparse matrices.""" + + # Ideally we'd take the GCDs of the blocksize dimensions + # and explode self and other to match. + other = self.__class__(other, blocksize=self.blocksize) + + # e.g. bsr_plus_bsr, etc. + fn = getattr(_sparsetools, self.format + op + self.format) + + R,C = self.blocksize + + max_bnnz = len(self.data) + len(other.data) + idx_dtype = get_index_dtype((self.indptr, self.indices, + other.indptr, other.indices), + maxval=max_bnnz) + indptr = np.empty(self.indptr.shape, dtype=idx_dtype) + indices = np.empty(max_bnnz, dtype=idx_dtype) + + bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_'] + if op in bool_ops: + data = np.empty(R*C*max_bnnz, dtype=np.bool_) + else: + data = np.empty(R*C*max_bnnz, dtype=upcast(self.dtype,other.dtype)) + + fn(self.shape[0]//R, self.shape[1]//C, R, C, + self.indptr.astype(idx_dtype), + self.indices.astype(idx_dtype), + self.data, + other.indptr.astype(idx_dtype), + other.indices.astype(idx_dtype), + np.ravel(other.data), + indptr, + indices, + data) + + actual_bnnz = indptr[-1] + indices = indices[:actual_bnnz] + data = data[:R*C*actual_bnnz] + + if actual_bnnz < max_bnnz/2: + indices = indices.copy() + data = data.copy() + + data = data.reshape(-1,R,C) + + return self.__class__((data, indices, indptr), shape=self.shape) + + # needed by _data_matrix + def _with_data(self,data,copy=True): + """Returns a matrix with the same sparsity structure as self, + but with different data. By default the structure arrays + (i.e. .indptr and .indices) are copied. + """ + if copy: + return self.__class__((data,self.indices.copy(),self.indptr.copy()), + shape=self.shape,dtype=data.dtype) + else: + return self.__class__((data,self.indices,self.indptr), + shape=self.shape,dtype=data.dtype) + +# # these functions are used by the parent class +# # to remove redudancy between bsc_matrix and bsr_matrix +# def _swap(self,x): +# """swap the members of x if this is a column-oriented matrix +# """ +# return (x[0],x[1]) + + +def isspmatrix_bsr(x): + """Is x of a bsr_matrix type? + + Parameters + ---------- + x + object to check for being a bsr matrix + + Returns + ------- + bool + True if x is a bsr matrix, False otherwise + + Examples + -------- + >>> from scipy.sparse import bsr_matrix, isspmatrix_bsr + >>> isspmatrix_bsr(bsr_matrix([[5]])) + True + + >>> from scipy.sparse import bsr_matrix, csr_matrix, isspmatrix_bsr + >>> isspmatrix_bsr(csr_matrix([[5]])) + False + """ + return isinstance(x, bsr_matrix) diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/bsr.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/bsr.pyc new file mode 100644 index 0000000..238817d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/bsr.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/compressed.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/compressed.py new file mode 100644 index 0000000..4eb327b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/compressed.py @@ -0,0 +1,1206 @@ +"""Base class for sparse matrix formats using compressed storage.""" +from __future__ import division, print_function, absolute_import + +__all__ = [] + +from warnings import warn +import operator + +import numpy as np +from scipy._lib.six import zip as izip +from scipy._lib._util import _prune_array + +from .base import spmatrix, isspmatrix, SparseEfficiencyWarning +from .data import _data_matrix, _minmax_mixin +from .dia import dia_matrix +from . import _sparsetools +from .sputils import (upcast, upcast_char, to_native, isdense, isshape, + getdtype, isscalarlike, IndexMixin, get_index_dtype, + downcast_intp_index, get_sum_dtype, check_shape) + + +class _cs_matrix(_data_matrix, _minmax_mixin, IndexMixin): + """base matrix class for compressed row and column oriented matrices""" + + def __init__(self, arg1, shape=None, dtype=None, copy=False): + _data_matrix.__init__(self) + + if isspmatrix(arg1): + if arg1.format == self.format and copy: + arg1 = arg1.copy() + else: + arg1 = arg1.asformat(self.format) + self._set_self(arg1) + + elif isinstance(arg1, tuple): + if isshape(arg1): + # It's a tuple of matrix dimensions (M, N) + # create empty matrix + self._shape = check_shape(arg1) + M, N = self.shape + # Select index dtype large enough to pass array and + # scalar parameters to sparsetools + idx_dtype = get_index_dtype(maxval=max(M, N)) + self.data = np.zeros(0, getdtype(dtype, default=float)) + self.indices = np.zeros(0, idx_dtype) + self.indptr = np.zeros(self._swap((M, N))[0] + 1, + dtype=idx_dtype) + else: + if len(arg1) == 2: + # (data, ij) format + from .coo import coo_matrix + other = self.__class__(coo_matrix(arg1, shape=shape)) + self._set_self(other) + elif len(arg1) == 3: + # (data, indices, indptr) format + (data, indices, indptr) = arg1 + + # Select index dtype large enough to pass array and + # scalar parameters to sparsetools + maxval = None + if shape is not None: + maxval = max(shape) + idx_dtype = get_index_dtype((indices, indptr), + maxval=maxval, + check_contents=True) + + self.indices = np.array(indices, copy=copy, + dtype=idx_dtype) + self.indptr = np.array(indptr, copy=copy, dtype=idx_dtype) + self.data = np.array(data, copy=copy, dtype=dtype) + else: + raise ValueError("unrecognized {}_matrix " + "constructor usage".format(self.format)) + + else: + # must be dense + try: + arg1 = np.asarray(arg1) + except Exception: + raise ValueError("unrecognized {}_matrix constructor usage" + "".format(self.format)) + from .coo import coo_matrix + self._set_self(self.__class__(coo_matrix(arg1, dtype=dtype))) + + # Read matrix dimensions given, if any + if shape is not None: + self._shape = check_shape(shape) + else: + if self.shape is None: + # shape not already set, try to infer dimensions + try: + major_dim = len(self.indptr) - 1 + minor_dim = self.indices.max() + 1 + except Exception: + raise ValueError('unable to infer matrix dimensions') + else: + self._shape = check_shape(self._swap((major_dim, + minor_dim))) + + if dtype is not None: + self.data = np.asarray(self.data, dtype=dtype) + + self.check_format(full_check=False) + + def getnnz(self, axis=None): + if axis is None: + return int(self.indptr[-1]) + else: + if axis < 0: + axis += 2 + axis, _ = self._swap((axis, 1 - axis)) + _, N = self._swap(self.shape) + if axis == 0: + return np.bincount(downcast_intp_index(self.indices), + minlength=N) + elif axis == 1: + return np.diff(self.indptr) + raise ValueError('axis out of bounds') + + getnnz.__doc__ = spmatrix.getnnz.__doc__ + + def _set_self(self, other, copy=False): + """take the member variables of other and assign them to self""" + + if copy: + other = other.copy() + + self.data = other.data + self.indices = other.indices + self.indptr = other.indptr + self._shape = check_shape(other.shape) + + def check_format(self, full_check=True): + """check whether the matrix format is valid + + Parameters + ---------- + full_check : bool, optional + If `True`, rigorous check, O(N) operations. Otherwise + basic check, O(1) operations (default True). + """ + # use _swap to determine proper bounds + major_name, minor_name = self._swap(('row', 'column')) + major_dim, minor_dim = self._swap(self.shape) + + # index arrays should have integer data types + if self.indptr.dtype.kind != 'i': + warn("indptr array has non-integer dtype ({})" + "".format(self.indptr.dtype.name), stacklevel=3) + if self.indices.dtype.kind != 'i': + warn("indices array has non-integer dtype ({})" + "".format(self.indices.dtype.name), stacklevel=3) + + idx_dtype = get_index_dtype((self.indptr, self.indices)) + self.indptr = np.asarray(self.indptr, dtype=idx_dtype) + self.indices = np.asarray(self.indices, dtype=idx_dtype) + self.data = to_native(self.data) + + # check array shapes + for x in [self.data.ndim, self.indices.ndim, self.indptr.ndim]: + if x != 1: + raise ValueError('data, indices, and indptr should be 1-D') + + # check index pointer + if (len(self.indptr) != major_dim + 1): + raise ValueError("index pointer size ({}) should be ({})" + "".format(len(self.indptr), major_dim + 1)) + if (self.indptr[0] != 0): + raise ValueError("index pointer should start with 0") + + # check index and data arrays + if (len(self.indices) != len(self.data)): + raise ValueError("indices and data should have the same size") + if (self.indptr[-1] > len(self.indices)): + raise ValueError("Last value of index pointer should be less than " + "the size of index and data arrays") + + self.prune() + + if full_check: + # check format validity (more expensive) + if self.nnz > 0: + if self.indices.max() >= minor_dim: + raise ValueError("{} index values must be < {}" + "".format(minor_name, minor_dim)) + if self.indices.min() < 0: + raise ValueError("{} index values must be >= 0" + "".format(minor_name)) + if np.diff(self.indptr).min() < 0: + raise ValueError("index pointer values must form a " + "non-decreasing sequence") + + # if not self.has_sorted_indices(): + # warn('Indices were not in sorted order. Sorting indices.') + # self.sort_indices() + # assert(self.has_sorted_indices()) + # TODO check for duplicates? + + ####################### + # Boolean comparisons # + ####################### + + def _scalar_binopt(self, other, op): + """Scalar version of self._binopt, for cases in which no new nonzeros + are added. Produces a new spmatrix in canonical form. + """ + self.sum_duplicates() + res = self._with_data(op(self.data, other), copy=True) + res.eliminate_zeros() + return res + + def __eq__(self, other): + # Scalar other. + if isscalarlike(other): + if np.isnan(other): + return self.__class__(self.shape, dtype=np.bool_) + + if other == 0: + warn("Comparing a sparse matrix with 0 using == is inefficient" + ", try using != instead.", SparseEfficiencyWarning, + stacklevel=3) + all_true = self.__class__(np.ones(self.shape, dtype=np.bool_)) + inv = self._scalar_binopt(other, operator.ne) + return all_true - inv + else: + return self._scalar_binopt(other, operator.eq) + # Dense other. + elif isdense(other): + return self.todense() == other + # Sparse other. + elif isspmatrix(other): + warn("Comparing sparse matrices using == is inefficient, try using" + " != instead.", SparseEfficiencyWarning, stacklevel=3) + # TODO sparse broadcasting + if self.shape != other.shape: + return False + elif self.format != other.format: + other = other.asformat(self.format) + res = self._binopt(other, '_ne_') + all_true = self.__class__(np.ones(self.shape, dtype=np.bool_)) + return all_true - res + else: + return False + + def __ne__(self, other): + # Scalar other. + if isscalarlike(other): + if np.isnan(other): + warn("Comparing a sparse matrix with nan using != is" + " inefficient", SparseEfficiencyWarning, stacklevel=3) + all_true = self.__class__(np.ones(self.shape, dtype=np.bool_)) + return all_true + elif other != 0: + warn("Comparing a sparse matrix with a nonzero scalar using !=" + " is inefficient, try using == instead.", + SparseEfficiencyWarning, stacklevel=3) + all_true = self.__class__(np.ones(self.shape), dtype=np.bool_) + inv = self._scalar_binopt(other, operator.eq) + return all_true - inv + else: + return self._scalar_binopt(other, operator.ne) + # Dense other. + elif isdense(other): + return self.todense() != other + # Sparse other. + elif isspmatrix(other): + # TODO sparse broadcasting + if self.shape != other.shape: + return True + elif self.format != other.format: + other = other.asformat(self.format) + return self._binopt(other, '_ne_') + else: + return True + + def _inequality(self, other, op, op_name, bad_scalar_msg): + # Scalar other. + if isscalarlike(other): + if 0 == other and op_name in ('_le_', '_ge_'): + raise NotImplementedError(" >= and <= don't work with 0.") + elif op(0, other): + warn(bad_scalar_msg, SparseEfficiencyWarning) + other_arr = np.empty(self.shape, dtype=np.result_type(other)) + other_arr.fill(other) + other_arr = self.__class__(other_arr) + return self._binopt(other_arr, op_name) + else: + return self._scalar_binopt(other, op) + # Dense other. + elif isdense(other): + return op(self.todense(), other) + # Sparse other. + elif isspmatrix(other): + # TODO sparse broadcasting + if self.shape != other.shape: + raise ValueError("inconsistent shapes") + elif self.format != other.format: + other = other.asformat(self.format) + if op_name not in ('_ge_', '_le_'): + return self._binopt(other, op_name) + + warn("Comparing sparse matrices using >= and <= is inefficient, " + "using <, >, or !=, instead.", SparseEfficiencyWarning) + all_true = self.__class__(np.ones(self.shape, dtype=np.bool_)) + res = self._binopt(other, '_gt_' if op_name == '_le_' else '_lt_') + return all_true - res + else: + raise ValueError("Operands could not be compared.") + + def __lt__(self, other): + return self._inequality(other, operator.lt, '_lt_', + "Comparing a sparse matrix with a scalar " + "greater than zero using < is inefficient, " + "try using >= instead.") + + def __gt__(self, other): + return self._inequality(other, operator.gt, '_gt_', + "Comparing a sparse matrix with a scalar " + "less than zero using > is inefficient, " + "try using <= instead.") + + def __le__(self, other): + return self._inequality(other, operator.le, '_le_', + "Comparing a sparse matrix with a scalar " + "greater than zero using <= is inefficient, " + "try using > instead.") + + def __ge__(self, other): + return self._inequality(other, operator.ge, '_ge_', + "Comparing a sparse matrix with a scalar " + "less than zero using >= is inefficient, " + "try using < instead.") + + ################################# + # Arithmetic operator overrides # + ################################# + + def _add_dense(self, other): + if other.shape != self.shape: + raise ValueError('Incompatible shapes.') + dtype = upcast_char(self.dtype.char, other.dtype.char) + order = self._swap('CF')[0] + result = np.array(other, dtype=dtype, order=order, copy=True) + M, N = self._swap(self.shape) + y = result if result.flags.c_contiguous else result.T + _sparsetools.csr_todense(M, N, self.indptr, self.indices, self.data, y) + return np.matrix(result, copy=False) + + def _add_sparse(self, other): + return self._binopt(other, '_plus_') + + def _sub_sparse(self, other): + return self._binopt(other, '_minus_') + + def multiply(self, other): + """Point-wise multiplication by another matrix, vector, or + scalar. + """ + # Scalar multiplication. + if isscalarlike(other): + return self._mul_scalar(other) + # Sparse matrix or vector. + if isspmatrix(other): + if self.shape == other.shape: + other = self.__class__(other) + return self._binopt(other, '_elmul_') + # Single element. + elif other.shape == (1, 1): + return self._mul_scalar(other.toarray()[0, 0]) + elif self.shape == (1, 1): + return other._mul_scalar(self.toarray()[0, 0]) + # A row times a column. + elif self.shape[1] == 1 and other.shape[0] == 1: + return self._mul_sparse_matrix(other.tocsc()) + elif self.shape[0] == 1 and other.shape[1] == 1: + return other._mul_sparse_matrix(self.tocsc()) + # Row vector times matrix. other is a row. + elif other.shape[0] == 1 and self.shape[1] == other.shape[1]: + other = dia_matrix((other.toarray().ravel(), [0]), + shape=(other.shape[1], other.shape[1])) + return self._mul_sparse_matrix(other) + # self is a row. + elif self.shape[0] == 1 and self.shape[1] == other.shape[1]: + copy = dia_matrix((self.toarray().ravel(), [0]), + shape=(self.shape[1], self.shape[1])) + return other._mul_sparse_matrix(copy) + # Column vector times matrix. other is a column. + elif other.shape[1] == 1 and self.shape[0] == other.shape[0]: + other = dia_matrix((other.toarray().ravel(), [0]), + shape=(other.shape[0], other.shape[0])) + return other._mul_sparse_matrix(self) + # self is a column. + elif self.shape[1] == 1 and self.shape[0] == other.shape[0]: + copy = dia_matrix((self.toarray().ravel(), [0]), + shape=(self.shape[0], self.shape[0])) + return copy._mul_sparse_matrix(other) + else: + raise ValueError("inconsistent shapes") + + # Assume other is a dense matrix/array, which produces a single-item + # object array if other isn't convertible to ndarray. + other = np.atleast_2d(other) + + if other.ndim != 2: + return np.multiply(self.toarray(), other) + # Single element / wrapped object. + if other.size == 1: + return self._mul_scalar(other.flat[0]) + # Fast case for trivial sparse matrix. + elif self.shape == (1, 1): + return np.multiply(self.toarray()[0, 0], other) + + from .coo import coo_matrix + ret = self.tocoo() + # Matching shapes. + if self.shape == other.shape: + data = np.multiply(ret.data, other[ret.row, ret.col]) + # Sparse row vector times... + elif self.shape[0] == 1: + if other.shape[1] == 1: # Dense column vector. + data = np.multiply(ret.data, other) + elif other.shape[1] == self.shape[1]: # Dense matrix. + data = np.multiply(ret.data, other[:, ret.col]) + else: + raise ValueError("inconsistent shapes") + row = np.repeat(np.arange(other.shape[0]), len(ret.row)) + col = np.tile(ret.col, other.shape[0]) + return coo_matrix((data.view(np.ndarray).ravel(), (row, col)), + shape=(other.shape[0], self.shape[1]), + copy=False) + # Sparse column vector times... + elif self.shape[1] == 1: + if other.shape[0] == 1: # Dense row vector. + data = np.multiply(ret.data[:, None], other) + elif other.shape[0] == self.shape[0]: # Dense matrix. + data = np.multiply(ret.data[:, None], other[ret.row]) + else: + raise ValueError("inconsistent shapes") + row = np.repeat(ret.row, other.shape[1]) + col = np.tile(np.arange(other.shape[1]), len(ret.col)) + return coo_matrix((data.view(np.ndarray).ravel(), (row, col)), + shape=(self.shape[0], other.shape[1]), + copy=False) + # Sparse matrix times dense row vector. + elif other.shape[0] == 1 and self.shape[1] == other.shape[1]: + data = np.multiply(ret.data, other[:, ret.col].ravel()) + # Sparse matrix times dense column vector. + elif other.shape[1] == 1 and self.shape[0] == other.shape[0]: + data = np.multiply(ret.data, other[ret.row].ravel()) + else: + raise ValueError("inconsistent shapes") + ret.data = data.view(np.ndarray).ravel() + return ret + + ########################### + # Multiplication handlers # + ########################### + + def _mul_vector(self, other): + M, N = self.shape + + # output array + result = np.zeros(M, dtype=upcast_char(self.dtype.char, + other.dtype.char)) + + # csr_matvec or csc_matvec + fn = getattr(_sparsetools, self.format + '_matvec') + fn(M, N, self.indptr, self.indices, self.data, other, result) + + return result + + def _mul_multivector(self, other): + M, N = self.shape + n_vecs = other.shape[1] # number of column vectors + + result = np.zeros((M, n_vecs), + dtype=upcast_char(self.dtype.char, other.dtype.char)) + + # csr_matvecs or csc_matvecs + fn = getattr(_sparsetools, self.format + '_matvecs') + fn(M, N, n_vecs, self.indptr, self.indices, self.data, + other.ravel(), result.ravel()) + + return result + + def _mul_sparse_matrix(self, other): + M, K1 = self.shape + K2, N = other.shape + + major_axis = self._swap((M, N))[0] + other = self.__class__(other) # convert to this format + + idx_dtype = get_index_dtype((self.indptr, self.indices, + other.indptr, other.indices), + maxval=M*N) + indptr = np.empty(major_axis + 1, dtype=idx_dtype) + + fn = getattr(_sparsetools, self.format + '_matmat_pass1') + fn(M, N, + np.asarray(self.indptr, dtype=idx_dtype), + np.asarray(self.indices, dtype=idx_dtype), + np.asarray(other.indptr, dtype=idx_dtype), + np.asarray(other.indices, dtype=idx_dtype), + indptr) + + nnz = indptr[-1] + idx_dtype = get_index_dtype((self.indptr, self.indices, + other.indptr, other.indices), + maxval=nnz) + indptr = np.asarray(indptr, dtype=idx_dtype) + indices = np.empty(nnz, dtype=idx_dtype) + data = np.empty(nnz, dtype=upcast(self.dtype, other.dtype)) + + fn = getattr(_sparsetools, self.format + '_matmat_pass2') + fn(M, N, np.asarray(self.indptr, dtype=idx_dtype), + np.asarray(self.indices, dtype=idx_dtype), + self.data, + np.asarray(other.indptr, dtype=idx_dtype), + np.asarray(other.indices, dtype=idx_dtype), + other.data, + indptr, indices, data) + + return self.__class__((data, indices, indptr), shape=(M, N)) + + def diagonal(self, k=0): + rows, cols = self.shape + if k <= -rows or k >= cols: + raise ValueError("k exceeds matrix dimensions") + fn = getattr(_sparsetools, self.format + "_diagonal") + y = np.empty(min(rows + min(k, 0), cols - max(k, 0)), + dtype=upcast(self.dtype)) + fn(k, self.shape[0], self.shape[1], self.indptr, self.indices, + self.data, y) + return y + + diagonal.__doc__ = spmatrix.diagonal.__doc__ + + ##################### + # Other binary ops # + ##################### + + def _maximum_minimum(self, other, npop, op_name, dense_check): + if isscalarlike(other): + if dense_check(other): + warn("Taking maximum (minimum) with > 0 (< 0) number results" + " to a dense matrix.", SparseEfficiencyWarning, + stacklevel=3) + other_arr = np.empty(self.shape, dtype=np.asarray(other).dtype) + other_arr.fill(other) + other_arr = self.__class__(other_arr) + return self._binopt(other_arr, op_name) + else: + self.sum_duplicates() + new_data = npop(self.data, np.asarray(other)) + mat = self.__class__((new_data, self.indices, self.indptr), + dtype=new_data.dtype, shape=self.shape) + return mat + elif isdense(other): + return npop(self.todense(), other) + elif isspmatrix(other): + return self._binopt(other, op_name) + else: + raise ValueError("Operands not compatible.") + + def maximum(self, other): + return self._maximum_minimum(other, np.maximum, + '_maximum_', lambda x: np.asarray(x) > 0) + + maximum.__doc__ = spmatrix.maximum.__doc__ + + def minimum(self, other): + return self._maximum_minimum(other, np.minimum, + '_minimum_', lambda x: np.asarray(x) < 0) + + minimum.__doc__ = spmatrix.minimum.__doc__ + + ##################### + # Reduce operations # + ##################### + + def sum(self, axis=None, dtype=None, out=None): + """Sum the matrix over the given axis. If the axis is None, sum + over both rows and columns, returning a scalar. + """ + # The spmatrix base class already does axis=0 and axis=1 efficiently + # so we only do the case axis=None here + if (not hasattr(self, 'blocksize') and + axis in self._swap(((1, -1), (0, 2)))[0]): + # faster than multiplication for large minor axis in CSC/CSR + res_dtype = get_sum_dtype(self.dtype) + ret = np.zeros(len(self.indptr) - 1, dtype=res_dtype) + + major_index, value = self._minor_reduce(np.add) + ret[major_index] = value + ret = np.asmatrix(ret) + if axis % 2 == 1: + ret = ret.T + + if out is not None and out.shape != ret.shape: + raise ValueError('dimensions do not match') + + return ret.sum(axis=(), dtype=dtype, out=out) + # spmatrix will handle the remaining situations when axis + # is in {None, -1, 0, 1} + else: + return spmatrix.sum(self, axis=axis, dtype=dtype, out=out) + + sum.__doc__ = spmatrix.sum.__doc__ + + def _minor_reduce(self, ufunc, data=None): + """Reduce nonzeros with a ufunc over the minor axis when non-empty + + Can be applied to a function of self.data by supplying data parameter. + + Warning: this does not call sum_duplicates() + + Returns + ------- + major_index : array of ints + Major indices where nonzero + + value : array of self.dtype + Reduce result for nonzeros in each major_index + """ + if data is None: + data = self.data + major_index = np.flatnonzero(np.diff(self.indptr)) + value = ufunc.reduceat(data, + downcast_intp_index(self.indptr[major_index])) + return major_index, value + + ####################### + # Getting and Setting # + ####################### + + def __setitem__(self, index, x): + # Process arrays from IndexMixin + i, j = self._unpack_index(index) + i, j = self._index_to_arrays(i, j) + + if isspmatrix(x): + broadcast_row = x.shape[0] == 1 and i.shape[0] != 1 + broadcast_col = x.shape[1] == 1 and i.shape[1] != 1 + if not ((broadcast_row or x.shape[0] == i.shape[0]) and + (broadcast_col or x.shape[1] == i.shape[1])): + raise ValueError("shape mismatch in assignment") + + # clear entries that will be overwritten + ci, cj = self._swap((i.ravel(), j.ravel())) + self._zero_many(ci, cj) + + x = x.tocoo(copy=True) + x.sum_duplicates() + r, c = x.row, x.col + x = np.asarray(x.data, dtype=self.dtype) + if broadcast_row: + r = np.repeat(np.arange(i.shape[0]), len(r)) + c = np.tile(c, i.shape[0]) + x = np.tile(x, i.shape[0]) + if broadcast_col: + r = np.repeat(r, i.shape[1]) + c = np.tile(np.arange(i.shape[1]), len(c)) + x = np.repeat(x, i.shape[1]) + # only assign entries in the new sparsity structure + i = i[r, c] + j = j[r, c] + else: + # Make x and i into the same shape + x = np.asarray(x, dtype=self.dtype) + x, _ = np.broadcast_arrays(x, i) + + if x.shape != i.shape: + raise ValueError("shape mismatch in assignment") + + if np.size(x) == 0: + return + i, j = self._swap((i.ravel(), j.ravel())) + self._set_many(i, j, x.ravel()) + + def _setdiag(self, values, k): + if 0 in self.shape: + return + + M, N = self.shape + broadcast = (values.ndim == 0) + + if k < 0: + if broadcast: + max_index = min(M + k, N) + else: + max_index = min(M + k, N, len(values)) + i = np.arange(max_index, dtype=self.indices.dtype) + j = np.arange(max_index, dtype=self.indices.dtype) + i -= k + + else: + if broadcast: + max_index = min(M, N - k) + else: + max_index = min(M, N - k, len(values)) + i = np.arange(max_index, dtype=self.indices.dtype) + j = np.arange(max_index, dtype=self.indices.dtype) + j += k + + if not broadcast: + values = values[:len(i)] + + self[i, j] = values + + def _prepare_indices(self, i, j): + M, N = self._swap(self.shape) + + def check_bounds(indices, bound): + idx = indices.max() + if idx >= bound: + raise IndexError('index (%d) out of range (>= %d)' % + (idx, bound)) + idx = indices.min() + if idx < -bound: + raise IndexError('index (%d) out of range (< -%d)' % + (idx, bound)) + + check_bounds(i, M) + check_bounds(j, N) + + i = np.asarray(i, dtype=self.indices.dtype) + j = np.asarray(j, dtype=self.indices.dtype) + return i, j, M, N + + def _set_many(self, i, j, x): + """Sets value at each (i, j) to x + + Here (i,j) index major and minor respectively, and must not contain + duplicate entries. + """ + i, j, M, N = self._prepare_indices(i, j) + + n_samples = len(x) + offsets = np.empty(n_samples, dtype=self.indices.dtype) + ret = _sparsetools.csr_sample_offsets(M, N, self.indptr, self.indices, + n_samples, i, j, offsets) + if ret == 1: + # rinse and repeat + self.sum_duplicates() + _sparsetools.csr_sample_offsets(M, N, self.indptr, + self.indices, n_samples, i, j, + offsets) + + if -1 not in offsets: + # only affects existing non-zero cells + self.data[offsets] = x + return + + else: + warn("Changing the sparsity structure of a {}_matrix is expensive." + " lil_matrix is more efficient.".format(self.format), + SparseEfficiencyWarning, stacklevel=3) + # replace where possible + mask = offsets > -1 + self.data[offsets[mask]] = x[mask] + # only insertions remain + mask = ~mask + i = i[mask] + i[i < 0] += M + j = j[mask] + j[j < 0] += N + self._insert_many(i, j, x[mask]) + + def _zero_many(self, i, j): + """Sets value at each (i, j) to zero, preserving sparsity structure. + + Here (i,j) index major and minor respectively. + """ + i, j, M, N = self._prepare_indices(i, j) + + n_samples = len(i) + offsets = np.empty(n_samples, dtype=self.indices.dtype) + ret = _sparsetools.csr_sample_offsets(M, N, self.indptr, self.indices, + n_samples, i, j, offsets) + if ret == 1: + # rinse and repeat + self.sum_duplicates() + _sparsetools.csr_sample_offsets(M, N, self.indptr, + self.indices, n_samples, i, j, + offsets) + + # only assign zeros to the existing sparsity structure + self.data[offsets[offsets > -1]] = 0 + + def _insert_many(self, i, j, x): + """Inserts new nonzero at each (i, j) with value x + + Here (i,j) index major and minor respectively. + i, j and x must be non-empty, 1d arrays. + Inserts each major group (e.g. all entries per row) at a time. + Maintains has_sorted_indices property. + Modifies i, j, x in place. + """ + order = np.argsort(i, kind='mergesort') # stable for duplicates + i = i.take(order, mode='clip') + j = j.take(order, mode='clip') + x = x.take(order, mode='clip') + + do_sort = self.has_sorted_indices + + # Update index data type + idx_dtype = get_index_dtype((self.indices, self.indptr), + maxval=(self.indptr[-1] + x.size)) + self.indptr = np.asarray(self.indptr, dtype=idx_dtype) + self.indices = np.asarray(self.indices, dtype=idx_dtype) + i = np.asarray(i, dtype=idx_dtype) + j = np.asarray(j, dtype=idx_dtype) + + # Collate old and new in chunks by major index + indices_parts = [] + data_parts = [] + ui, ui_indptr = np.unique(i, return_index=True) + ui_indptr = np.append(ui_indptr, len(j)) + new_nnzs = np.diff(ui_indptr) + prev = 0 + for c, (ii, js, je) in enumerate(izip(ui, ui_indptr, ui_indptr[1:])): + # old entries + start = self.indptr[prev] + stop = self.indptr[ii] + indices_parts.append(self.indices[start:stop]) + data_parts.append(self.data[start:stop]) + + # handle duplicate j: keep last setting + uj, uj_indptr = np.unique(j[js:je][::-1], return_index=True) + if len(uj) == je - js: + indices_parts.append(j[js:je]) + data_parts.append(x[js:je]) + else: + indices_parts.append(j[js:je][::-1][uj_indptr]) + data_parts.append(x[js:je][::-1][uj_indptr]) + new_nnzs[c] = len(uj) + + prev = ii + + # remaining old entries + start = self.indptr[ii] + indices_parts.append(self.indices[start:]) + data_parts.append(self.data[start:]) + + # update attributes + self.indices = np.concatenate(indices_parts) + self.data = np.concatenate(data_parts) + nnzs = np.empty(self.indptr.shape, dtype=idx_dtype) + nnzs[0] = idx_dtype(0) + indptr_diff = np.diff(self.indptr) + indptr_diff[ui] += new_nnzs + nnzs[1:] = indptr_diff + self.indptr = np.cumsum(nnzs, out=nnzs) + + if do_sort: + # TODO: only sort where necessary + self.has_sorted_indices = False + self.sort_indices() + + self.check_format(full_check=False) + + def _get_single_element(self, row, col): + M, N = self.shape + if (row < 0): + row += M + if (col < 0): + col += N + if not (0 <= row < M) or not (0 <= col < N): + raise IndexError("index out of bounds: 0<=%d<%d, 0<=%d<%d" % + (row, M, col, N)) + + major_index, minor_index = self._swap((row, col)) + + start = self.indptr[major_index] + end = self.indptr[major_index + 1] + + if self.has_sorted_indices: + # Copies may be made, if dtypes of indices are not identical + minor_index = self.indices.dtype.type(minor_index) + minor_indices = self.indices[start:end] + insert_pos_left = np.searchsorted( + minor_indices, minor_index, side='left') + insert_pos_right = insert_pos_left + np.searchsorted( + minor_indices[insert_pos_left:], minor_index, side='right') + return self.data[start + insert_pos_left: + start + insert_pos_right].sum(dtype=self.dtype) + else: + return np.compress(minor_index == self.indices[start:end], + self.data[start:end]).sum(dtype=self.dtype) + + def _get_submatrix(self, slice0, slice1): + """Return a submatrix of this matrix (new matrix is created).""" + + slice0, slice1 = self._swap((slice0, slice1)) + shape0, shape1 = self._swap(self.shape) + + def _process_slice(sl, num): + if isinstance(sl, slice): + i0, i1 = sl.start, sl.stop + if i0 is None: + i0 = 0 + elif i0 < 0: + i0 = num + i0 + + if i1 is None: + i1 = num + elif i1 < 0: + i1 = num + i1 + + return i0, i1 + + elif np.isscalar(sl): + if sl < 0: + sl += num + + return sl, sl + 1 + + else: + return sl[0], sl[1] + + def _in_bounds(i0, i1, num): + if not (0 <= i0 < num) or not (0 < i1 <= num) or not (i0 < i1): + raise IndexError("index out of bounds:" + " 0<={i0}<{num}, 0<={i1}<{num}, {i0}<{i1}" + "".format(i0=i0, num=num, i1=i1)) + + i0, i1 = _process_slice(slice0, shape0) + j0, j1 = _process_slice(slice1, shape1) + _in_bounds(i0, i1, shape0) + _in_bounds(j0, j1, shape1) + + aux = _sparsetools.get_csr_submatrix(shape0, shape1, + self.indptr, self.indices, + self.data, + i0, i1, j0, j1) + + data, indices, indptr = aux[2], aux[1], aux[0] + shape = self._swap((i1 - i0, j1 - j0)) + + return self.__class__((data, indices, indptr), shape=shape) + + ###################### + # Conversion methods # + ###################### + + def tocoo(self, copy=True): + major_dim, minor_dim = self._swap(self.shape) + minor_indices = self.indices + major_indices = np.empty(len(minor_indices), dtype=self.indices.dtype) + _sparsetools.expandptr(major_dim, self.indptr, major_indices) + row, col = self._swap((major_indices, minor_indices)) + + from .coo import coo_matrix + return coo_matrix((self.data, (row, col)), self.shape, copy=copy, + dtype=self.dtype) + + tocoo.__doc__ = spmatrix.tocoo.__doc__ + + def toarray(self, order=None, out=None): + if out is None and order is None: + order = self._swap('cf')[0] + out = self._process_toarray_args(order, out) + if not (out.flags.c_contiguous or out.flags.f_contiguous): + raise ValueError('Output array must be C or F contiguous') + # align ideal order with output array order + if out.flags.c_contiguous: + x = self.tocsr() + y = out + else: + x = self.tocsc() + y = out.T + M, N = x._swap(x.shape) + _sparsetools.csr_todense(M, N, x.indptr, x.indices, x.data, y) + return out + + toarray.__doc__ = spmatrix.toarray.__doc__ + + ############################################################## + # methods that examine or modify the internal data structure # + ############################################################## + + def eliminate_zeros(self): + """Remove zero entries from the matrix + + This is an *in place* operation + """ + M, N = self._swap(self.shape) + _sparsetools.csr_eliminate_zeros(M, N, self.indptr, self.indices, + self.data) + self.prune() # nnz may have changed + + def __get_has_canonical_format(self): + """Determine whether the matrix has sorted indices and no duplicates + + Returns + - True: if the above applies + - False: otherwise + + has_canonical_format implies has_sorted_indices, so if the latter flag + is False, so will the former be; if the former is found True, the + latter flag is also set. + """ + + # first check to see if result was cached + if not getattr(self, '_has_sorted_indices', True): + # not sorted => not canonical + self._has_canonical_format = False + elif not hasattr(self, '_has_canonical_format'): + self.has_canonical_format = _sparsetools.csr_has_canonical_format( + len(self.indptr) - 1, self.indptr, self.indices) + return self._has_canonical_format + + def __set_has_canonical_format(self, val): + self._has_canonical_format = bool(val) + if val: + self.has_sorted_indices = True + + has_canonical_format = property(fget=__get_has_canonical_format, + fset=__set_has_canonical_format) + + def sum_duplicates(self): + """Eliminate duplicate matrix entries by adding them together + + The is an *in place* operation + """ + if self.has_canonical_format: + return + self.sort_indices() + + M, N = self._swap(self.shape) + _sparsetools.csr_sum_duplicates(M, N, self.indptr, self.indices, + self.data) + + self.prune() # nnz may have changed + self.has_canonical_format = True + + def __get_sorted(self): + """Determine whether the matrix has sorted indices + + Returns + - True: if the indices of the matrix are in sorted order + - False: otherwise + + """ + + # first check to see if result was cached + if not hasattr(self, '_has_sorted_indices'): + self._has_sorted_indices = _sparsetools.csr_has_sorted_indices( + len(self.indptr) - 1, self.indptr, self.indices) + return self._has_sorted_indices + + def __set_sorted(self, val): + self._has_sorted_indices = bool(val) + + has_sorted_indices = property(fget=__get_sorted, fset=__set_sorted) + + def sorted_indices(self): + """Return a copy of this matrix with sorted indices + """ + A = self.copy() + A.sort_indices() + return A + + # an alternative that has linear complexity is the following + # although the previous option is typically faster + # return self.toother().toother() + + def sort_indices(self): + """Sort the indices of this matrix *in place* + """ + + if not self.has_sorted_indices: + _sparsetools.csr_sort_indices(len(self.indptr) - 1, self.indptr, + self.indices, self.data) + self.has_sorted_indices = True + + def prune(self): + """Remove empty space after all non-zero elements. + """ + major_dim = self._swap(self.shape)[0] + + if len(self.indptr) != major_dim + 1: + raise ValueError('index pointer has invalid length') + if len(self.indices) < self.nnz: + raise ValueError('indices array has fewer than nnz elements') + if len(self.data) < self.nnz: + raise ValueError('data array has fewer than nnz elements') + + self.indices = _prune_array(self.indices[:self.nnz]) + self.data = _prune_array(self.data[:self.nnz]) + + def resize(self, *shape): + shape = check_shape(shape) + if hasattr(self, 'blocksize'): + bm, bn = self.blocksize + new_M, rm = divmod(shape[0], bm) + new_N, rn = divmod(shape[1], bn) + if rm or rn: + raise ValueError("shape must be divisible into %s blocks. " + "Got %s" % (self.blocksize, shape)) + M, N = self.shape[0] // bm, self.shape[1] // bn + else: + new_M, new_N = self._swap(shape) + M, N = self._swap(self.shape) + + if new_M < M: + self.indices = self.indices[:self.indptr[new_M]] + self.data = self.data[:self.indptr[new_M]] + self.indptr = self.indptr[:new_M + 1] + elif new_M > M: + self.indptr = np.resize(self.indptr, new_M + 1) + self.indptr[M + 1:].fill(self.indptr[M]) + + if new_N < N: + mask = self.indices < new_N + if not np.all(mask): + self.indices = self.indices[mask] + self.data = self.data[mask] + major_index, val = self._minor_reduce(np.add, mask) + self.indptr.fill(0) + self.indptr[1:][major_index] = val + np.cumsum(self.indptr, out=self.indptr) + + self._shape = shape + + resize.__doc__ = spmatrix.resize.__doc__ + + ################### + # utility methods # + ################### + + # needed by _data_matrix + def _with_data(self, data, copy=True): + """Returns a matrix with the same sparsity structure as self, + but with different data. By default the structure arrays + (i.e. .indptr and .indices) are copied. + """ + if copy: + return self.__class__((data, self.indices.copy(), + self.indptr.copy()), + shape=self.shape, + dtype=data.dtype) + else: + return self.__class__((data, self.indices, self.indptr), + shape=self.shape, dtype=data.dtype) + + def _binopt(self, other, op): + """apply the binary operation fn to two sparse matrices.""" + other = self.__class__(other) + + # e.g. csr_plus_csr, csr_minus_csr, etc. + fn = getattr(_sparsetools, self.format + op + self.format) + + maxnnz = self.nnz + other.nnz + idx_dtype = get_index_dtype((self.indptr, self.indices, + other.indptr, other.indices), + maxval=maxnnz) + indptr = np.empty(self.indptr.shape, dtype=idx_dtype) + indices = np.empty(maxnnz, dtype=idx_dtype) + + bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_'] + if op in bool_ops: + data = np.empty(maxnnz, dtype=np.bool_) + else: + data = np.empty(maxnnz, dtype=upcast(self.dtype, other.dtype)) + + fn(self.shape[0], self.shape[1], + np.asarray(self.indptr, dtype=idx_dtype), + np.asarray(self.indices, dtype=idx_dtype), + self.data, + np.asarray(other.indptr, dtype=idx_dtype), + np.asarray(other.indices, dtype=idx_dtype), + other.data, + indptr, indices, data) + + A = self.__class__((data, indices, indptr), shape=self.shape) + A.prune() + + return A + + def _divide_sparse(self, other): + """ + Divide this matrix by a second sparse matrix. + """ + if other.shape != self.shape: + raise ValueError('inconsistent shapes') + + r = self._binopt(other, '_eldiv_') + + if np.issubdtype(r.dtype, np.inexact): + # Eldiv leaves entries outside the combined sparsity + # pattern empty, so they must be filled manually. + # Everything outside of other's sparsity is NaN, and everything + # inside it is either zero or defined by eldiv. + out = np.empty(self.shape, dtype=self.dtype) + out.fill(np.nan) + row, col = other.nonzero() + out[row, col] = 0 + r = r.tocoo() + out[r.row, r.col] = r.data + out = np.matrix(out) + else: + # integers types go with nan <-> 0 + out = r + + return out diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/compressed.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/compressed.pyc new file mode 100644 index 0000000..bcae939 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/compressed.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/construct.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/construct.py new file mode 100644 index 0000000..3ba424f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/construct.py @@ -0,0 +1,842 @@ +"""Functions to construct sparse matrices +""" +from __future__ import division, print_function, absolute_import + +__docformat__ = "restructuredtext en" + +__all__ = ['spdiags', 'eye', 'identity', 'kron', 'kronsum', + 'hstack', 'vstack', 'bmat', 'rand', 'random', 'diags', 'block_diag'] + + +import numpy as np +from scipy._lib._numpy_compat import get_randint + +from scipy._lib.six import xrange + +from .sputils import upcast, get_index_dtype, isscalarlike + +from .csr import csr_matrix +from .csc import csc_matrix +from .bsr import bsr_matrix +from .coo import coo_matrix +from .dia import dia_matrix + +from .base import issparse + + +def spdiags(data, diags, m, n, format=None): + """ + Return a sparse matrix from diagonals. + + Parameters + ---------- + data : array_like + matrix diagonals stored row-wise + diags : diagonals to set + - k = 0 the main diagonal + - k > 0 the k-th upper diagonal + - k < 0 the k-th lower diagonal + m, n : int + shape of the result + format : str, optional + Format of the result. By default (format=None) an appropriate sparse + matrix format is returned. This choice is subject to change. + + See Also + -------- + diags : more convenient form of this function + dia_matrix : the sparse DIAgonal format. + + Examples + -------- + >>> from scipy.sparse import spdiags + >>> data = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) + >>> diags = np.array([0, -1, 2]) + >>> spdiags(data, diags, 4, 4).toarray() + array([[1, 0, 3, 0], + [1, 2, 0, 4], + [0, 2, 3, 0], + [0, 0, 3, 4]]) + + """ + return dia_matrix((data, diags), shape=(m,n)).asformat(format) + + +def diags(diagonals, offsets=0, shape=None, format=None, dtype=None): + """ + Construct a sparse matrix from diagonals. + + Parameters + ---------- + diagonals : sequence of array_like + Sequence of arrays containing the matrix diagonals, + corresponding to `offsets`. + offsets : sequence of int or an int, optional + Diagonals to set: + - k = 0 the main diagonal (default) + - k > 0 the k-th upper diagonal + - k < 0 the k-th lower diagonal + shape : tuple of int, optional + Shape of the result. If omitted, a square matrix large enough + to contain the diagonals is returned. + format : {"dia", "csr", "csc", "lil", ...}, optional + Matrix format of the result. By default (format=None) an + appropriate sparse matrix format is returned. This choice is + subject to change. + dtype : dtype, optional + Data type of the matrix. + + See Also + -------- + spdiags : construct matrix from diagonals + + Notes + ----- + This function differs from `spdiags` in the way it handles + off-diagonals. + + The result from `diags` is the sparse equivalent of:: + + np.diag(diagonals[0], offsets[0]) + + ... + + np.diag(diagonals[k], offsets[k]) + + Repeated diagonal offsets are disallowed. + + .. versionadded:: 0.11 + + Examples + -------- + >>> from scipy.sparse import diags + >>> diagonals = [[1, 2, 3, 4], [1, 2, 3], [1, 2]] + >>> diags(diagonals, [0, -1, 2]).toarray() + array([[1, 0, 1, 0], + [1, 2, 0, 2], + [0, 2, 3, 0], + [0, 0, 3, 4]]) + + Broadcasting of scalars is supported (but shape needs to be + specified): + + >>> diags([1, -2, 1], [-1, 0, 1], shape=(4, 4)).toarray() + array([[-2., 1., 0., 0.], + [ 1., -2., 1., 0.], + [ 0., 1., -2., 1.], + [ 0., 0., 1., -2.]]) + + + If only one diagonal is wanted (as in `numpy.diag`), the following + works as well: + + >>> diags([1, 2, 3], 1).toarray() + array([[ 0., 1., 0., 0.], + [ 0., 0., 2., 0.], + [ 0., 0., 0., 3.], + [ 0., 0., 0., 0.]]) + """ + # if offsets is not a sequence, assume that there's only one diagonal + if isscalarlike(offsets): + # now check that there's actually only one diagonal + if len(diagonals) == 0 or isscalarlike(diagonals[0]): + diagonals = [np.atleast_1d(diagonals)] + else: + raise ValueError("Different number of diagonals and offsets.") + else: + diagonals = list(map(np.atleast_1d, diagonals)) + + offsets = np.atleast_1d(offsets) + + # Basic check + if len(diagonals) != len(offsets): + raise ValueError("Different number of diagonals and offsets.") + + # Determine shape, if omitted + if shape is None: + m = len(diagonals[0]) + abs(int(offsets[0])) + shape = (m, m) + + # Determine data type, if omitted + if dtype is None: + dtype = np.common_type(*diagonals) + + # Construct data array + m, n = shape + + M = max([min(m + offset, n - offset) + max(0, offset) + for offset in offsets]) + M = max(0, M) + data_arr = np.zeros((len(offsets), M), dtype=dtype) + + K = min(m, n) + + for j, diagonal in enumerate(diagonals): + offset = offsets[j] + k = max(0, offset) + length = min(m + offset, n - offset, K) + if length < 0: + raise ValueError("Offset %d (index %d) out of bounds" % (offset, j)) + try: + data_arr[j, k:k+length] = diagonal[...,:length] + except ValueError: + if len(diagonal) != length and len(diagonal) != 1: + raise ValueError( + "Diagonal length (index %d: %d at offset %d) does not " + "agree with matrix size (%d, %d)." % ( + j, len(diagonal), offset, m, n)) + raise + + return dia_matrix((data_arr, offsets), shape=(m, n)).asformat(format) + + +def identity(n, dtype='d', format=None): + """Identity matrix in sparse format + + Returns an identity matrix with shape (n,n) using a given + sparse format and dtype. + + Parameters + ---------- + n : int + Shape of the identity matrix. + dtype : dtype, optional + Data type of the matrix + format : str, optional + Sparse format of the result, e.g. format="csr", etc. + + Examples + -------- + >>> from scipy.sparse import identity + >>> identity(3).toarray() + array([[ 1., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 1.]]) + >>> identity(3, dtype='int8', format='dia') + <3x3 sparse matrix of type '<class 'numpy.int8'>' + with 3 stored elements (1 diagonals) in DIAgonal format> + + """ + return eye(n, n, dtype=dtype, format=format) + + +def eye(m, n=None, k=0, dtype=float, format=None): + """Sparse matrix with ones on diagonal + + Returns a sparse (m x n) matrix where the k-th diagonal + is all ones and everything else is zeros. + + Parameters + ---------- + m : int + Number of rows in the matrix. + n : int, optional + Number of columns. Default: `m`. + k : int, optional + Diagonal to place ones on. Default: 0 (main diagonal). + dtype : dtype, optional + Data type of the matrix. + format : str, optional + Sparse format of the result, e.g. format="csr", etc. + + Examples + -------- + >>> from scipy import sparse + >>> sparse.eye(3).toarray() + array([[ 1., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 1.]]) + >>> sparse.eye(3, dtype=np.int8) + <3x3 sparse matrix of type '<class 'numpy.int8'>' + with 3 stored elements (1 diagonals) in DIAgonal format> + + """ + if n is None: + n = m + m,n = int(m),int(n) + + if m == n and k == 0: + # fast branch for special formats + if format in ['csr', 'csc']: + idx_dtype = get_index_dtype(maxval=n) + indptr = np.arange(n+1, dtype=idx_dtype) + indices = np.arange(n, dtype=idx_dtype) + data = np.ones(n, dtype=dtype) + cls = {'csr': csr_matrix, 'csc': csc_matrix}[format] + return cls((data,indices,indptr),(n,n)) + elif format == 'coo': + idx_dtype = get_index_dtype(maxval=n) + row = np.arange(n, dtype=idx_dtype) + col = np.arange(n, dtype=idx_dtype) + data = np.ones(n, dtype=dtype) + return coo_matrix((data,(row,col)),(n,n)) + + diags = np.ones((1, max(0, min(m + k, n))), dtype=dtype) + return spdiags(diags, k, m, n).asformat(format) + + +def kron(A, B, format=None): + """kronecker product of sparse matrices A and B + + Parameters + ---------- + A : sparse or dense matrix + first matrix of the product + B : sparse or dense matrix + second matrix of the product + format : str, optional + format of the result (e.g. "csr") + + Returns + ------- + kronecker product in a sparse matrix format + + + Examples + -------- + >>> from scipy import sparse + >>> A = sparse.csr_matrix(np.array([[0, 2], [5, 0]])) + >>> B = sparse.csr_matrix(np.array([[1, 2], [3, 4]])) + >>> sparse.kron(A, B).toarray() + array([[ 0, 0, 2, 4], + [ 0, 0, 6, 8], + [ 5, 10, 0, 0], + [15, 20, 0, 0]]) + + >>> sparse.kron(A, [[1, 2], [3, 4]]).toarray() + array([[ 0, 0, 2, 4], + [ 0, 0, 6, 8], + [ 5, 10, 0, 0], + [15, 20, 0, 0]]) + + """ + B = coo_matrix(B) + + if (format is None or format == "bsr") and 2*B.nnz >= B.shape[0] * B.shape[1]: + # B is fairly dense, use BSR + A = csr_matrix(A,copy=True) + + output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1]) + + if A.nnz == 0 or B.nnz == 0: + # kronecker product is the zero matrix + return coo_matrix(output_shape) + + B = B.toarray() + data = A.data.repeat(B.size).reshape(-1,B.shape[0],B.shape[1]) + data = data * B + + return bsr_matrix((data,A.indices,A.indptr), shape=output_shape) + else: + # use COO + A = coo_matrix(A) + output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1]) + + if A.nnz == 0 or B.nnz == 0: + # kronecker product is the zero matrix + return coo_matrix(output_shape) + + # expand entries of a into blocks + row = A.row.repeat(B.nnz) + col = A.col.repeat(B.nnz) + data = A.data.repeat(B.nnz) + + row *= B.shape[0] + col *= B.shape[1] + + # increment block indices + row,col = row.reshape(-1,B.nnz),col.reshape(-1,B.nnz) + row += B.row + col += B.col + row,col = row.reshape(-1),col.reshape(-1) + + # compute block entries + data = data.reshape(-1,B.nnz) * B.data + data = data.reshape(-1) + + return coo_matrix((data,(row,col)), shape=output_shape).asformat(format) + + +def kronsum(A, B, format=None): + """kronecker sum of sparse matrices A and B + + Kronecker sum of two sparse matrices is a sum of two Kronecker + products kron(I_n,A) + kron(B,I_m) where A has shape (m,m) + and B has shape (n,n) and I_m and I_n are identity matrices + of shape (m,m) and (n,n) respectively. + + Parameters + ---------- + A + square matrix + B + square matrix + format : str + format of the result (e.g. "csr") + + Returns + ------- + kronecker sum in a sparse matrix format + + Examples + -------- + + + """ + A = coo_matrix(A) + B = coo_matrix(B) + + if A.shape[0] != A.shape[1]: + raise ValueError('A is not square') + + if B.shape[0] != B.shape[1]: + raise ValueError('B is not square') + + dtype = upcast(A.dtype, B.dtype) + + L = kron(eye(B.shape[0],dtype=dtype), A, format=format) + R = kron(B, eye(A.shape[0],dtype=dtype), format=format) + + return (L+R).asformat(format) # since L + R is not always same format + + +def _compressed_sparse_stack(blocks, axis): + """ + Stacking fast path for CSR/CSC matrices + (i) vstack for CSR, (ii) hstack for CSC. + """ + other_axis = 1 if axis == 0 else 0 + data = np.concatenate([b.data for b in blocks]) + constant_dim = blocks[0].shape[other_axis] + idx_dtype = get_index_dtype(arrays=[b.indptr for b in blocks], + maxval=max(data.size, constant_dim)) + indices = np.empty(data.size, dtype=idx_dtype) + indptr = np.empty(sum(b.shape[axis] for b in blocks) + 1, dtype=idx_dtype) + last_indptr = idx_dtype(0) + sum_dim = 0 + sum_indices = 0 + for b in blocks: + if b.shape[other_axis] != constant_dim: + raise ValueError('incompatible dimensions for axis %d' % other_axis) + indices[sum_indices:sum_indices+b.indices.size] = b.indices + sum_indices += b.indices.size + idxs = slice(sum_dim, sum_dim + b.shape[axis]) + indptr[idxs] = b.indptr[:-1] + indptr[idxs] += last_indptr + sum_dim += b.shape[axis] + last_indptr += b.indptr[-1] + indptr[-1] = last_indptr + if axis == 0: + return csr_matrix((data, indices, indptr), + shape=(sum_dim, constant_dim)) + else: + return csc_matrix((data, indices, indptr), + shape=(constant_dim, sum_dim)) + + +def hstack(blocks, format=None, dtype=None): + """ + Stack sparse matrices horizontally (column wise) + + Parameters + ---------- + blocks + sequence of sparse matrices with compatible shapes + format : str + sparse format of the result (e.g. "csr") + by default an appropriate sparse matrix format is returned. + This choice is subject to change. + dtype : dtype, optional + The data-type of the output matrix. If not given, the dtype is + determined from that of `blocks`. + + See Also + -------- + vstack : stack sparse matrices vertically (row wise) + + Examples + -------- + >>> from scipy.sparse import coo_matrix, hstack + >>> A = coo_matrix([[1, 2], [3, 4]]) + >>> B = coo_matrix([[5], [6]]) + >>> hstack([A,B]).toarray() + array([[1, 2, 5], + [3, 4, 6]]) + + """ + return bmat([blocks], format=format, dtype=dtype) + + +def vstack(blocks, format=None, dtype=None): + """ + Stack sparse matrices vertically (row wise) + + Parameters + ---------- + blocks + sequence of sparse matrices with compatible shapes + format : str, optional + sparse format of the result (e.g. "csr") + by default an appropriate sparse matrix format is returned. + This choice is subject to change. + dtype : dtype, optional + The data-type of the output matrix. If not given, the dtype is + determined from that of `blocks`. + + See Also + -------- + hstack : stack sparse matrices horizontally (column wise) + + Examples + -------- + >>> from scipy.sparse import coo_matrix, vstack + >>> A = coo_matrix([[1, 2], [3, 4]]) + >>> B = coo_matrix([[5, 6]]) + >>> vstack([A, B]).toarray() + array([[1, 2], + [3, 4], + [5, 6]]) + + """ + return bmat([[b] for b in blocks], format=format, dtype=dtype) + + +def bmat(blocks, format=None, dtype=None): + """ + Build a sparse matrix from sparse sub-blocks + + Parameters + ---------- + blocks : array_like + Grid of sparse matrices with compatible shapes. + An entry of None implies an all-zero matrix. + format : {'bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil'}, optional + The sparse format of the result (e.g. "csr"). By default an + appropriate sparse matrix format is returned. + This choice is subject to change. + dtype : dtype, optional + The data-type of the output matrix. If not given, the dtype is + determined from that of `blocks`. + + Returns + ------- + bmat : sparse matrix + + See Also + -------- + block_diag, diags + + Examples + -------- + >>> from scipy.sparse import coo_matrix, bmat + >>> A = coo_matrix([[1, 2], [3, 4]]) + >>> B = coo_matrix([[5], [6]]) + >>> C = coo_matrix([[7]]) + >>> bmat([[A, B], [None, C]]).toarray() + array([[1, 2, 5], + [3, 4, 6], + [0, 0, 7]]) + + >>> bmat([[A, None], [None, C]]).toarray() + array([[1, 2, 0], + [3, 4, 0], + [0, 0, 7]]) + + """ + + blocks = np.asarray(blocks, dtype='object') + + if blocks.ndim != 2: + raise ValueError('blocks must be 2-D') + + M,N = blocks.shape + + # check for fast path cases + if (N == 1 and format in (None, 'csr') and all(isinstance(b, csr_matrix) + for b in blocks.flat)): + A = _compressed_sparse_stack(blocks[:,0], 0) + if dtype is not None: + A = A.astype(dtype) + return A + elif (M == 1 and format in (None, 'csc') + and all(isinstance(b, csc_matrix) for b in blocks.flat)): + A = _compressed_sparse_stack(blocks[0,:], 1) + if dtype is not None: + A = A.astype(dtype) + return A + + block_mask = np.zeros(blocks.shape, dtype=bool) + brow_lengths = np.zeros(M, dtype=np.int64) + bcol_lengths = np.zeros(N, dtype=np.int64) + + # convert everything to COO format + for i in range(M): + for j in range(N): + if blocks[i,j] is not None: + A = coo_matrix(blocks[i,j]) + blocks[i,j] = A + block_mask[i,j] = True + + if brow_lengths[i] == 0: + brow_lengths[i] = A.shape[0] + elif brow_lengths[i] != A.shape[0]: + msg = ('blocks[{i},:] has incompatible row dimensions. ' + 'Got blocks[{i},{j}].shape[0] == {got}, ' + 'expected {exp}.'.format(i=i, j=j, + exp=brow_lengths[i], + got=A.shape[0])) + raise ValueError(msg) + + if bcol_lengths[j] == 0: + bcol_lengths[j] = A.shape[1] + elif bcol_lengths[j] != A.shape[1]: + msg = ('blocks[:,{j}] has incompatible row dimensions. ' + 'Got blocks[{i},{j}].shape[1] == {got}, ' + 'expected {exp}.'.format(i=i, j=j, + exp=bcol_lengths[j], + got=A.shape[1])) + raise ValueError(msg) + + nnz = sum(block.nnz for block in blocks[block_mask]) + if dtype is None: + all_dtypes = [blk.dtype for blk in blocks[block_mask]] + dtype = upcast(*all_dtypes) if all_dtypes else None + + row_offsets = np.append(0, np.cumsum(brow_lengths)) + col_offsets = np.append(0, np.cumsum(bcol_lengths)) + + shape = (row_offsets[-1], col_offsets[-1]) + + data = np.empty(nnz, dtype=dtype) + idx_dtype = get_index_dtype(maxval=max(shape)) + row = np.empty(nnz, dtype=idx_dtype) + col = np.empty(nnz, dtype=idx_dtype) + + nnz = 0 + ii, jj = np.nonzero(block_mask) + for i, j in zip(ii, jj): + B = blocks[i, j] + idx = slice(nnz, nnz + B.nnz) + data[idx] = B.data + row[idx] = B.row + row_offsets[i] + col[idx] = B.col + col_offsets[j] + nnz += B.nnz + + return coo_matrix((data, (row, col)), shape=shape).asformat(format) + + +def block_diag(mats, format=None, dtype=None): + """ + Build a block diagonal sparse matrix from provided matrices. + + Parameters + ---------- + mats : sequence of matrices + Input matrices. + format : str, optional + The sparse format of the result (e.g. "csr"). If not given, the matrix + is returned in "coo" format. + dtype : dtype specifier, optional + The data-type of the output matrix. If not given, the dtype is + determined from that of `blocks`. + + Returns + ------- + res : sparse matrix + + Notes + ----- + + .. versionadded:: 0.11.0 + + See Also + -------- + bmat, diags + + Examples + -------- + >>> from scipy.sparse import coo_matrix, block_diag + >>> A = coo_matrix([[1, 2], [3, 4]]) + >>> B = coo_matrix([[5], [6]]) + >>> C = coo_matrix([[7]]) + >>> block_diag((A, B, C)).toarray() + array([[1, 2, 0, 0], + [3, 4, 0, 0], + [0, 0, 5, 0], + [0, 0, 6, 0], + [0, 0, 0, 7]]) + + """ + nmat = len(mats) + rows = [] + for ia, a in enumerate(mats): + row = [None]*nmat + if issparse(a): + row[ia] = a + else: + row[ia] = coo_matrix(a) + rows.append(row) + return bmat(rows, format=format, dtype=dtype) + + +def random(m, n, density=0.01, format='coo', dtype=None, + random_state=None, data_rvs=None): + """Generate a sparse matrix of the given shape and density with randomly + distributed values. + + Parameters + ---------- + m, n : int + shape of the matrix + density : real, optional + density of the generated matrix: density equal to one means a full + matrix, density of 0 means a matrix with no non-zero items. + format : str, optional + sparse matrix format. + dtype : dtype, optional + type of the returned matrix values. + random_state : {numpy.random.RandomState, int}, optional + Random number generator or random seed. If not given, the singleton + numpy.random will be used. This random state will be used + for sampling the sparsity structure, but not necessarily for sampling + the values of the structurally nonzero entries of the matrix. + data_rvs : callable, optional + Samples a requested number of random values. + This function should take a single argument specifying the length + of the ndarray that it will return. The structurally nonzero entries + of the sparse random matrix will be taken from the array sampled + by this function. By default, uniform [0, 1) random values will be + sampled using the same random state as is used for sampling + the sparsity structure. + + Returns + ------- + res : sparse matrix + + Notes + ----- + Only float types are supported for now. + + Examples + -------- + >>> from scipy.sparse import random + >>> from scipy import stats + + >>> class CustomRandomState(np.random.RandomState): + ... def randint(self, k): + ... i = np.random.randint(k) + ... return i - i % 2 + >>> np.random.seed(12345) + >>> rs = CustomRandomState() + >>> rvs = stats.poisson(25, loc=10).rvs + >>> S = random(3, 4, density=0.25, random_state=rs, data_rvs=rvs) + >>> S.A + array([[ 36., 0., 33., 0.], # random + [ 0., 0., 0., 0.], + [ 0., 0., 36., 0.]]) + + >>> from scipy.sparse import random + >>> from scipy.stats import rv_continuous + >>> class CustomDistribution(rv_continuous): + ... def _rvs(self, *args, **kwargs): + ... return self._random_state.randn(*self._size) + >>> X = CustomDistribution(seed=2906) + >>> Y = X() # get a frozen version of the distribution + >>> S = random(3, 4, density=0.25, random_state=2906, data_rvs=Y.rvs) + >>> S.A + array([[ 0. , 0. , 0. , 0. ], + [ 0.13569738, 1.9467163 , -0.81205367, 0. ], + [ 0. , 0. , 0. , 0. ]]) + + """ + if density < 0 or density > 1: + raise ValueError("density expected to be 0 <= density <= 1") + dtype = np.dtype(dtype) + + mn = m * n + + tp = np.intc + if mn > np.iinfo(tp).max: + tp = np.int64 + + if mn > np.iinfo(tp).max: + msg = """\ +Trying to generate a random sparse matrix such as the product of dimensions is +greater than %d - this is not supported on this machine +""" + raise ValueError(msg % np.iinfo(tp).max) + + # Number of non zero values + k = int(density * m * n) + + if random_state is None: + random_state = np.random + elif isinstance(random_state, (int, np.integer)): + random_state = np.random.RandomState(random_state) + + if data_rvs is None: + if np.issubdtype(dtype, np.integer): + randint = get_randint(random_state) + + def data_rvs(n): + return randint(np.iinfo(dtype).min, np.iinfo(dtype).max, + n, dtype=dtype) + elif np.issubdtype(dtype, np.complexfloating): + def data_rvs(n): + return random_state.rand(n) + random_state.rand(n) * 1j + else: + data_rvs = random_state.rand + + ind = random_state.choice(mn, size=k, replace=False) + + j = np.floor(ind * 1. / m).astype(tp, copy=False) + i = (ind - j * m).astype(tp, copy=False) + vals = data_rvs(k).astype(dtype, copy=False) + return coo_matrix((vals, (i, j)), shape=(m, n)).asformat(format, + copy=False) + + +def rand(m, n, density=0.01, format="coo", dtype=None, random_state=None): + """Generate a sparse matrix of the given shape and density with uniformly + distributed values. + + Parameters + ---------- + m, n : int + shape of the matrix + density : real, optional + density of the generated matrix: density equal to one means a full + matrix, density of 0 means a matrix with no non-zero items. + format : str, optional + sparse matrix format. + dtype : dtype, optional + type of the returned matrix values. + random_state : {numpy.random.RandomState, int}, optional + Random number generator or random seed. If not given, the singleton + numpy.random will be used. + + Returns + ------- + res : sparse matrix + + Notes + ----- + Only float types are supported for now. + + See Also + -------- + scipy.sparse.random : Similar function that allows a user-specified random + data source. + + Examples + -------- + >>> from scipy.sparse import rand + >>> matrix = rand(3, 4, density=0.25, format="csr", random_state=42) + >>> matrix + <3x4 sparse matrix of type '<class 'numpy.float64'>' + with 3 stored elements in Compressed Sparse Row format> + >>> matrix.todense() + matrix([[0.05641158, 0. , 0. , 0.65088847], + [0. , 0. , 0. , 0.14286682], + [0. , 0. , 0. , 0. ]]) + + """ + return random(m, n, density, format, dtype, random_state) diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/construct.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/construct.pyc new file mode 100644 index 0000000..55fb1ab Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/construct.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/coo.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/coo.py new file mode 100644 index 0000000..809ca38 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/coo.py @@ -0,0 +1,613 @@ +""" A sparse matrix in COOrdinate or 'triplet' format""" +from __future__ import division, print_function, absolute_import + +__docformat__ = "restructuredtext en" + +__all__ = ['coo_matrix', 'isspmatrix_coo'] + +from warnings import warn + +import numpy as np + +from scipy._lib.six import zip as izip + +from ._sparsetools import coo_tocsr, coo_todense, coo_matvec +from .base import isspmatrix, SparseEfficiencyWarning, spmatrix +from .data import _data_matrix, _minmax_mixin +from .sputils import (upcast, upcast_char, to_native, isshape, getdtype, + get_index_dtype, downcast_intp_index, check_shape, + check_reshape_kwargs) + + +class coo_matrix(_data_matrix, _minmax_mixin): + """ + A sparse matrix in COOrdinate format. + + Also known as the 'ijv' or 'triplet' format. + + This can be instantiated in several ways: + coo_matrix(D) + with a dense matrix D + + coo_matrix(S) + with another sparse matrix S (equivalent to S.tocoo()) + + coo_matrix((M, N), [dtype]) + to construct an empty matrix with shape (M, N) + dtype is optional, defaulting to dtype='d'. + + coo_matrix((data, (i, j)), [shape=(M, N)]) + to construct from three arrays: + 1. data[:] the entries of the matrix, in any order + 2. i[:] the row indices of the matrix entries + 3. j[:] the column indices of the matrix entries + + Where ``A[i[k], j[k]] = data[k]``. When shape is not + specified, it is inferred from the index arrays + + Attributes + ---------- + dtype : dtype + Data type of the matrix + shape : 2-tuple + Shape of the matrix + ndim : int + Number of dimensions (this is always 2) + nnz + Number of nonzero elements + data + COO format data array of the matrix + row + COO format row index array of the matrix + col + COO format column index array of the matrix + + Notes + ----- + + Sparse matrices can be used in arithmetic operations: they support + addition, subtraction, multiplication, division, and matrix power. + + Advantages of the COO format + - facilitates fast conversion among sparse formats + - permits duplicate entries (see example) + - very fast conversion to and from CSR/CSC formats + + Disadvantages of the COO format + - does not directly support: + + arithmetic operations + + slicing + + Intended Usage + - COO is a fast format for constructing sparse matrices + - Once a matrix has been constructed, convert to CSR or + CSC format for fast arithmetic and matrix vector operations + - By default when converting to CSR or CSC format, duplicate (i,j) + entries will be summed together. This facilitates efficient + construction of finite element matrices and the like. (see example) + + Examples + -------- + + >>> # Constructing an empty matrix + >>> from scipy.sparse import coo_matrix + >>> coo_matrix((3, 4), dtype=np.int8).toarray() + array([[0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]], dtype=int8) + + >>> # Constructing a matrix using ijv format + >>> row = np.array([0, 3, 1, 0]) + >>> col = np.array([0, 3, 1, 2]) + >>> data = np.array([4, 5, 7, 9]) + >>> coo_matrix((data, (row, col)), shape=(4, 4)).toarray() + array([[4, 0, 9, 0], + [0, 7, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 5]]) + + >>> # Constructing a matrix with duplicate indices + >>> row = np.array([0, 0, 1, 3, 1, 0, 0]) + >>> col = np.array([0, 2, 1, 3, 1, 0, 0]) + >>> data = np.array([1, 1, 1, 1, 1, 1, 1]) + >>> coo = coo_matrix((data, (row, col)), shape=(4, 4)) + >>> # Duplicate indices are maintained until implicitly or explicitly summed + >>> np.max(coo.data) + 1 + >>> coo.toarray() + array([[3, 0, 1, 0], + [0, 2, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 1]]) + + """ + format = 'coo' + + def __init__(self, arg1, shape=None, dtype=None, copy=False): + _data_matrix.__init__(self) + + if isinstance(arg1, tuple): + if isshape(arg1): + M, N = arg1 + self._shape = check_shape((M, N)) + idx_dtype = get_index_dtype(maxval=max(M, N)) + self.row = np.array([], dtype=idx_dtype) + self.col = np.array([], dtype=idx_dtype) + self.data = np.array([], getdtype(dtype, default=float)) + self.has_canonical_format = True + else: + try: + obj, (row, col) = arg1 + except (TypeError, ValueError): + raise TypeError('invalid input format') + + if shape is None: + if len(row) == 0 or len(col) == 0: + raise ValueError('cannot infer dimensions from zero ' + 'sized index arrays') + M = np.max(row) + 1 + N = np.max(col) + 1 + self._shape = check_shape((M, N)) + else: + # Use 2 steps to ensure shape has length 2. + M, N = shape + self._shape = check_shape((M, N)) + + idx_dtype = get_index_dtype(maxval=max(self.shape)) + self.row = np.array(row, copy=copy, dtype=idx_dtype) + self.col = np.array(col, copy=copy, dtype=idx_dtype) + self.data = np.array(obj, copy=copy) + self.has_canonical_format = False + + else: + if isspmatrix(arg1): + if isspmatrix_coo(arg1) and copy: + self.row = arg1.row.copy() + self.col = arg1.col.copy() + self.data = arg1.data.copy() + self._shape = check_shape(arg1.shape) + else: + coo = arg1.tocoo() + self.row = coo.row + self.col = coo.col + self.data = coo.data + self._shape = check_shape(coo.shape) + self.has_canonical_format = False + else: + #dense argument + M = np.atleast_2d(np.asarray(arg1)) + + if M.ndim != 2: + raise TypeError('expected dimension <= 2 array or matrix') + else: + self._shape = check_shape(M.shape) + + self.row, self.col = M.nonzero() + self.data = M[self.row, self.col] + self.has_canonical_format = True + + if dtype is not None: + self.data = self.data.astype(dtype, copy=False) + + self._check() + + def reshape(self, *args, **kwargs): + shape = check_shape(args, self.shape) + order, copy = check_reshape_kwargs(kwargs) + + # Return early if reshape is not required + if shape == self.shape: + if copy: + return self.copy() + else: + return self + + nrows, ncols = self.shape + + if order == 'C': + # Upcast to avoid overflows: the coo_matrix constructor + # below will downcast the results to a smaller dtype, if + # possible. + dtype = get_index_dtype(maxval=(ncols * max(0, nrows - 1) + max(0, ncols - 1))) + + flat_indices = np.multiply(ncols, self.row, dtype=dtype) + self.col + new_row, new_col = divmod(flat_indices, shape[1]) + elif order == 'F': + dtype = get_index_dtype(maxval=(nrows * max(0, ncols - 1) + max(0, nrows - 1))) + + flat_indices = np.multiply(nrows, self.col, dtype=dtype) + self.row + new_col, new_row = divmod(flat_indices, shape[0]) + else: + raise ValueError("'order' must be 'C' or 'F'") + + # Handle copy here rather than passing on to the constructor so that no + # copy will be made of new_row and new_col regardless + if copy: + new_data = self.data.copy() + else: + new_data = self.data + + return coo_matrix((new_data, (new_row, new_col)), + shape=shape, copy=False) + + reshape.__doc__ = spmatrix.reshape.__doc__ + + def getnnz(self, axis=None): + if axis is None: + nnz = len(self.data) + if nnz != len(self.row) or nnz != len(self.col): + raise ValueError('row, column, and data array must all be the ' + 'same length') + + if self.data.ndim != 1 or self.row.ndim != 1 or \ + self.col.ndim != 1: + raise ValueError('row, column, and data arrays must be 1-D') + + return int(nnz) + + if axis < 0: + axis += 2 + if axis == 0: + return np.bincount(downcast_intp_index(self.col), + minlength=self.shape[1]) + elif axis == 1: + return np.bincount(downcast_intp_index(self.row), + minlength=self.shape[0]) + else: + raise ValueError('axis out of bounds') + + getnnz.__doc__ = spmatrix.getnnz.__doc__ + + def _check(self): + """ Checks data structure for consistency """ + + # index arrays should have integer data types + if self.row.dtype.kind != 'i': + warn("row index array has non-integer dtype (%s) " + % self.row.dtype.name) + if self.col.dtype.kind != 'i': + warn("col index array has non-integer dtype (%s) " + % self.col.dtype.name) + + idx_dtype = get_index_dtype(maxval=max(self.shape)) + self.row = np.asarray(self.row, dtype=idx_dtype) + self.col = np.asarray(self.col, dtype=idx_dtype) + self.data = to_native(self.data) + + if self.nnz > 0: + if self.row.max() >= self.shape[0]: + raise ValueError('row index exceeds matrix dimensions') + if self.col.max() >= self.shape[1]: + raise ValueError('column index exceeds matrix dimensions') + if self.row.min() < 0: + raise ValueError('negative row index found') + if self.col.min() < 0: + raise ValueError('negative column index found') + + def transpose(self, axes=None, copy=False): + if axes is not None: + raise ValueError(("Sparse matrices do not support " + "an 'axes' parameter because swapping " + "dimensions is the only logical permutation.")) + + M, N = self.shape + return coo_matrix((self.data, (self.col, self.row)), + shape=(N, M), copy=copy) + + transpose.__doc__ = spmatrix.transpose.__doc__ + + def resize(self, *shape): + shape = check_shape(shape) + new_M, new_N = shape + M, N = self.shape + + if new_M < M or new_N < N: + mask = np.logical_and(self.row < new_M, self.col < new_N) + if not mask.all(): + self.row = self.row[mask] + self.col = self.col[mask] + self.data = self.data[mask] + + self._shape = shape + + resize.__doc__ = spmatrix.resize.__doc__ + + def toarray(self, order=None, out=None): + """See the docstring for `spmatrix.toarray`.""" + B = self._process_toarray_args(order, out) + fortran = int(B.flags.f_contiguous) + if not fortran and not B.flags.c_contiguous: + raise ValueError("Output array must be C or F contiguous") + M,N = self.shape + coo_todense(M, N, self.nnz, self.row, self.col, self.data, + B.ravel('A'), fortran) + return B + + def tocsc(self, copy=False): + """Convert this matrix to Compressed Sparse Column format + + Duplicate entries will be summed together. + + Examples + -------- + >>> from numpy import array + >>> from scipy.sparse import coo_matrix + >>> row = array([0, 0, 1, 3, 1, 0, 0]) + >>> col = array([0, 2, 1, 3, 1, 0, 0]) + >>> data = array([1, 1, 1, 1, 1, 1, 1]) + >>> A = coo_matrix((data, (row, col)), shape=(4, 4)).tocsc() + >>> A.toarray() + array([[3, 0, 1, 0], + [0, 2, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 1]]) + + """ + from .csc import csc_matrix + if self.nnz == 0: + return csc_matrix(self.shape, dtype=self.dtype) + else: + M,N = self.shape + idx_dtype = get_index_dtype((self.col, self.row), + maxval=max(self.nnz, M)) + row = self.row.astype(idx_dtype, copy=False) + col = self.col.astype(idx_dtype, copy=False) + + indptr = np.empty(N + 1, dtype=idx_dtype) + indices = np.empty_like(row, dtype=idx_dtype) + data = np.empty_like(self.data, dtype=upcast(self.dtype)) + + coo_tocsr(N, M, self.nnz, col, row, self.data, + indptr, indices, data) + + x = csc_matrix((data, indices, indptr), shape=self.shape) + if not self.has_canonical_format: + x.sum_duplicates() + return x + + def tocsr(self, copy=False): + """Convert this matrix to Compressed Sparse Row format + + Duplicate entries will be summed together. + + Examples + -------- + >>> from numpy import array + >>> from scipy.sparse import coo_matrix + >>> row = array([0, 0, 1, 3, 1, 0, 0]) + >>> col = array([0, 2, 1, 3, 1, 0, 0]) + >>> data = array([1, 1, 1, 1, 1, 1, 1]) + >>> A = coo_matrix((data, (row, col)), shape=(4, 4)).tocsr() + >>> A.toarray() + array([[3, 0, 1, 0], + [0, 2, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 1]]) + + """ + from .csr import csr_matrix + if self.nnz == 0: + return csr_matrix(self.shape, dtype=self.dtype) + else: + M,N = self.shape + idx_dtype = get_index_dtype((self.row, self.col), + maxval=max(self.nnz, N)) + row = self.row.astype(idx_dtype, copy=False) + col = self.col.astype(idx_dtype, copy=False) + + indptr = np.empty(M + 1, dtype=idx_dtype) + indices = np.empty_like(col, dtype=idx_dtype) + data = np.empty_like(self.data, dtype=upcast(self.dtype)) + + coo_tocsr(M, N, self.nnz, row, col, self.data, + indptr, indices, data) + + x = csr_matrix((data, indices, indptr), shape=self.shape) + if not self.has_canonical_format: + x.sum_duplicates() + return x + + def tocoo(self, copy=False): + if copy: + return self.copy() + else: + return self + + tocoo.__doc__ = spmatrix.tocoo.__doc__ + + def todia(self, copy=False): + from .dia import dia_matrix + + self.sum_duplicates() + ks = self.col - self.row # the diagonal for each nonzero + diags, diag_idx = np.unique(ks, return_inverse=True) + + if len(diags) > 100: + # probably undesired, should todia() have a maxdiags parameter? + warn("Constructing a DIA matrix with %d diagonals " + "is inefficient" % len(diags), SparseEfficiencyWarning) + + #initialize and fill in data array + if self.data.size == 0: + data = np.zeros((0, 0), dtype=self.dtype) + else: + data = np.zeros((len(diags), self.col.max()+1), dtype=self.dtype) + data[diag_idx, self.col] = self.data + + return dia_matrix((data,diags), shape=self.shape) + + todia.__doc__ = spmatrix.todia.__doc__ + + def todok(self, copy=False): + from .dok import dok_matrix + + self.sum_duplicates() + dok = dok_matrix((self.shape), dtype=self.dtype) + dok._update(izip(izip(self.row,self.col),self.data)) + + return dok + + todok.__doc__ = spmatrix.todok.__doc__ + + def diagonal(self, k=0): + rows, cols = self.shape + if k <= -rows or k >= cols: + raise ValueError("k exceeds matrix dimensions") + diag = np.zeros(min(rows + min(k, 0), cols - max(k, 0)), + dtype=self.dtype) + diag_mask = (self.row + k) == self.col + + if self.has_canonical_format: + row = self.row[diag_mask] + data = self.data[diag_mask] + else: + row, _, data = self._sum_duplicates(self.row[diag_mask], + self.col[diag_mask], + self.data[diag_mask]) + diag[row + min(k, 0)] = data + + return diag + + diagonal.__doc__ = _data_matrix.diagonal.__doc__ + + def _setdiag(self, values, k): + M, N = self.shape + if values.ndim and not len(values): + return + idx_dtype = self.row.dtype + + # Determine which triples to keep and where to put the new ones. + full_keep = self.col - self.row != k + if k < 0: + max_index = min(M+k, N) + if values.ndim: + max_index = min(max_index, len(values)) + keep = np.logical_or(full_keep, self.col >= max_index) + new_row = np.arange(-k, -k + max_index, dtype=idx_dtype) + new_col = np.arange(max_index, dtype=idx_dtype) + else: + max_index = min(M, N-k) + if values.ndim: + max_index = min(max_index, len(values)) + keep = np.logical_or(full_keep, self.row >= max_index) + new_row = np.arange(max_index, dtype=idx_dtype) + new_col = np.arange(k, k + max_index, dtype=idx_dtype) + + # Define the array of data consisting of the entries to be added. + if values.ndim: + new_data = values[:max_index] + else: + new_data = np.empty(max_index, dtype=self.dtype) + new_data[:] = values + + # Update the internal structure. + self.row = np.concatenate((self.row[keep], new_row)) + self.col = np.concatenate((self.col[keep], new_col)) + self.data = np.concatenate((self.data[keep], new_data)) + self.has_canonical_format = False + + # needed by _data_matrix + def _with_data(self,data,copy=True): + """Returns a matrix with the same sparsity structure as self, + but with different data. By default the index arrays + (i.e. .row and .col) are copied. + """ + if copy: + return coo_matrix((data, (self.row.copy(), self.col.copy())), + shape=self.shape, dtype=data.dtype) + else: + return coo_matrix((data, (self.row, self.col)), + shape=self.shape, dtype=data.dtype) + + def sum_duplicates(self): + """Eliminate duplicate matrix entries by adding them together + + This is an *in place* operation + """ + if self.has_canonical_format: + return + summed = self._sum_duplicates(self.row, self.col, self.data) + self.row, self.col, self.data = summed + self.has_canonical_format = True + + def _sum_duplicates(self, row, col, data): + # Assumes (data, row, col) not in canonical format. + if len(data) == 0: + return row, col, data + order = np.lexsort((row, col)) + row = row[order] + col = col[order] + data = data[order] + unique_mask = ((row[1:] != row[:-1]) | + (col[1:] != col[:-1])) + unique_mask = np.append(True, unique_mask) + row = row[unique_mask] + col = col[unique_mask] + unique_inds, = np.nonzero(unique_mask) + data = np.add.reduceat(data, unique_inds, dtype=self.dtype) + return row, col, data + + def eliminate_zeros(self): + """Remove zero entries from the matrix + + This is an *in place* operation + """ + mask = self.data != 0 + self.data = self.data[mask] + self.row = self.row[mask] + self.col = self.col[mask] + + ####################### + # Arithmetic handlers # + ####################### + + def _add_dense(self, other): + if other.shape != self.shape: + raise ValueError('Incompatible shapes.') + dtype = upcast_char(self.dtype.char, other.dtype.char) + result = np.array(other, dtype=dtype, copy=True) + fortran = int(result.flags.f_contiguous) + M, N = self.shape + coo_todense(M, N, self.nnz, self.row, self.col, self.data, + result.ravel('A'), fortran) + return np.matrix(result, copy=False) + + def _mul_vector(self, other): + #output array + result = np.zeros(self.shape[0], dtype=upcast_char(self.dtype.char, + other.dtype.char)) + coo_matvec(self.nnz, self.row, self.col, self.data, other, result) + return result + + def _mul_multivector(self, other): + result = np.zeros((other.shape[1], self.shape[0]), + dtype=upcast_char(self.dtype.char, other.dtype.char)) + for i, col in enumerate(other.T): + coo_matvec(self.nnz, self.row, self.col, self.data, col, result[i]) + return result.T.view(type=type(other)) + + +def isspmatrix_coo(x): + """Is x of coo_matrix type? + + Parameters + ---------- + x + object to check for being a coo matrix + + Returns + ------- + bool + True if x is a coo matrix, False otherwise + + Examples + -------- + >>> from scipy.sparse import coo_matrix, isspmatrix_coo + >>> isspmatrix_coo(coo_matrix([[5]])) + True + + >>> from scipy.sparse import coo_matrix, csr_matrix, isspmatrix_coo + >>> isspmatrix_coo(csr_matrix([[5]])) + False + """ + return isinstance(x, coo_matrix) diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/coo.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/coo.pyc new file mode 100644 index 0000000..44515eb Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/coo.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/csc.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/csc.py new file mode 100644 index 0000000..ed83ce0 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/csc.py @@ -0,0 +1,253 @@ +"""Compressed Sparse Column matrix format""" +from __future__ import division, print_function, absolute_import + +__docformat__ = "restructuredtext en" + +__all__ = ['csc_matrix', 'isspmatrix_csc'] + + +import numpy as np + +from .base import spmatrix +from ._sparsetools import csc_tocsr +from . import _sparsetools +from .sputils import upcast, isintlike, IndexMixin, get_index_dtype + +from .compressed import _cs_matrix + + +class csc_matrix(_cs_matrix, IndexMixin): + """ + Compressed Sparse Column matrix + + This can be instantiated in several ways: + + csc_matrix(D) + with a dense matrix or rank-2 ndarray D + + csc_matrix(S) + with another sparse matrix S (equivalent to S.tocsc()) + + csc_matrix((M, N), [dtype]) + to construct an empty matrix with shape (M, N) + dtype is optional, defaulting to dtype='d'. + + csc_matrix((data, (row_ind, col_ind)), [shape=(M, N)]) + where ``data``, ``row_ind`` and ``col_ind`` satisfy the + relationship ``a[row_ind[k], col_ind[k]] = data[k]``. + + csc_matrix((data, indices, indptr), [shape=(M, N)]) + is the standard CSC representation where the row indices for + column i are stored in ``indices[indptr[i]:indptr[i+1]]`` + and their corresponding values are stored in + ``data[indptr[i]:indptr[i+1]]``. If the shape parameter is + not supplied, the matrix dimensions are inferred from + the index arrays. + + Attributes + ---------- + dtype : dtype + Data type of the matrix + shape : 2-tuple + Shape of the matrix + ndim : int + Number of dimensions (this is always 2) + nnz + Number of nonzero elements + data + Data array of the matrix + indices + CSC format index array + indptr + CSC format index pointer array + has_sorted_indices + Whether indices are sorted + + Notes + ----- + + Sparse matrices can be used in arithmetic operations: they support + addition, subtraction, multiplication, division, and matrix power. + + Advantages of the CSC format + - efficient arithmetic operations CSC + CSC, CSC * CSC, etc. + - efficient column slicing + - fast matrix vector products (CSR, BSR may be faster) + + Disadvantages of the CSC format + - slow row slicing operations (consider CSR) + - changes to the sparsity structure are expensive (consider LIL or DOK) + + + Examples + -------- + + >>> import numpy as np + >>> from scipy.sparse import csc_matrix + >>> csc_matrix((3, 4), dtype=np.int8).toarray() + array([[0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]], dtype=int8) + + >>> row = np.array([0, 2, 2, 0, 1, 2]) + >>> col = np.array([0, 0, 1, 2, 2, 2]) + >>> data = np.array([1, 2, 3, 4, 5, 6]) + >>> csc_matrix((data, (row, col)), shape=(3, 3)).toarray() + array([[1, 0, 4], + [0, 0, 5], + [2, 3, 6]]) + + >>> indptr = np.array([0, 2, 3, 6]) + >>> indices = np.array([0, 2, 2, 0, 1, 2]) + >>> data = np.array([1, 2, 3, 4, 5, 6]) + >>> csc_matrix((data, indices, indptr), shape=(3, 3)).toarray() + array([[1, 0, 4], + [0, 0, 5], + [2, 3, 6]]) + + """ + format = 'csc' + + def transpose(self, axes=None, copy=False): + if axes is not None: + raise ValueError(("Sparse matrices do not support " + "an 'axes' parameter because swapping " + "dimensions is the only logical permutation.")) + + M, N = self.shape + + from .csr import csr_matrix + return csr_matrix((self.data, self.indices, + self.indptr), (N, M), copy=copy) + + transpose.__doc__ = spmatrix.transpose.__doc__ + + def __iter__(self): + for r in self.tocsr(): + yield r + + def tocsc(self, copy=False): + if copy: + return self.copy() + else: + return self + + tocsc.__doc__ = spmatrix.tocsc.__doc__ + + def tocsr(self, copy=False): + M,N = self.shape + idx_dtype = get_index_dtype((self.indptr, self.indices), + maxval=max(self.nnz, N)) + indptr = np.empty(M + 1, dtype=idx_dtype) + indices = np.empty(self.nnz, dtype=idx_dtype) + data = np.empty(self.nnz, dtype=upcast(self.dtype)) + + csc_tocsr(M, N, + self.indptr.astype(idx_dtype), + self.indices.astype(idx_dtype), + self.data, + indptr, + indices, + data) + + from .csr import csr_matrix + A = csr_matrix((data, indices, indptr), shape=self.shape, copy=False) + A.has_sorted_indices = True + return A + + tocsr.__doc__ = spmatrix.tocsr.__doc__ + + def __getitem__(self, key): + # Use CSR to implement fancy indexing. + + row, col = self._unpack_index(key) + # Things that return submatrices. row or col is a int or slice. + if (isinstance(row, slice) or isinstance(col, slice) or + isintlike(row) or isintlike(col)): + return self.T[col, row].T + # Things that return a sequence of values. + else: + return self.T[col, row] + + def nonzero(self): + # CSC can't use _cs_matrix's .nonzero method because it + # returns the indices sorted for self transposed. + + # Get row and col indices, from _cs_matrix.tocoo + major_dim, minor_dim = self._swap(self.shape) + minor_indices = self.indices + major_indices = np.empty(len(minor_indices), dtype=self.indices.dtype) + _sparsetools.expandptr(major_dim, self.indptr, major_indices) + row, col = self._swap((major_indices, minor_indices)) + + # Remove explicit zeros + nz_mask = self.data != 0 + row = row[nz_mask] + col = col[nz_mask] + + # Sort them to be in C-style order + ind = np.argsort(row, kind='mergesort') + row = row[ind] + col = col[ind] + + return row, col + + nonzero.__doc__ = _cs_matrix.nonzero.__doc__ + + def getrow(self, i): + """Returns a copy of row i of the matrix, as a (1 x n) + CSR matrix (row vector). + """ + # we convert to CSR to maintain compatibility with old impl. + # in spmatrix.getrow() + return self._get_submatrix(i, slice(None)).tocsr() + + def getcol(self, i): + """Returns a copy of column i of the matrix, as a (m x 1) + CSC matrix (column vector). + """ + M, N = self.shape + i = int(i) + if i < 0: + i += N + if i < 0 or i >= N: + raise IndexError('index (%d) out of range' % i) + idx = slice(*self.indptr[i:i+2]) + data = self.data[idx].copy() + indices = self.indices[idx].copy() + indptr = np.array([0, len(indices)], dtype=self.indptr.dtype) + return csc_matrix((data, indices, indptr), shape=(M, 1), + dtype=self.dtype, copy=False) + + # these functions are used by the parent class (_cs_matrix) + # to remove redudancy between csc_matrix and csr_matrix + def _swap(self, x): + """swap the members of x if this is a column-oriented matrix + """ + return x[1], x[0] + + +def isspmatrix_csc(x): + """Is x of csc_matrix type? + + Parameters + ---------- + x + object to check for being a csc matrix + + Returns + ------- + bool + True if x is a csc matrix, False otherwise + + Examples + -------- + >>> from scipy.sparse import csc_matrix, isspmatrix_csc + >>> isspmatrix_csc(csc_matrix([[5]])) + True + + >>> from scipy.sparse import csc_matrix, csr_matrix, isspmatrix_csc + >>> isspmatrix_csc(csr_matrix([[5]])) + False + """ + return isinstance(x, csc_matrix) diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/csc.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/csc.pyc new file mode 100644 index 0000000..6e0571c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/csc.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/__init__.py new file mode 100644 index 0000000..bceb2c3 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/__init__.py @@ -0,0 +1,195 @@ +r""" +============================================================== +Compressed Sparse Graph Routines (:mod:`scipy.sparse.csgraph`) +============================================================== + +.. currentmodule:: scipy.sparse.csgraph + +Fast graph algorithms based on sparse matrix representations. + +Contents +======== + +.. autosummary:: + :toctree: generated/ + + connected_components -- determine connected components of a graph + laplacian -- compute the laplacian of a graph + shortest_path -- compute the shortest path between points on a positive graph + dijkstra -- use Dijkstra's algorithm for shortest path + floyd_warshall -- use the Floyd-Warshall algorithm for shortest path + bellman_ford -- use the Bellman-Ford algorithm for shortest path + johnson -- use Johnson's algorithm for shortest path + breadth_first_order -- compute a breadth-first order of nodes + depth_first_order -- compute a depth-first order of nodes + breadth_first_tree -- construct the breadth-first tree from a given node + depth_first_tree -- construct a depth-first tree from a given node + minimum_spanning_tree -- construct the minimum spanning tree of a graph + reverse_cuthill_mckee -- compute permutation for reverse Cuthill-McKee ordering + maximum_bipartite_matching -- compute permutation to make diagonal zero free + structural_rank -- compute the structural rank of a graph + NegativeCycleError + +.. autosummary:: + :toctree: generated/ + + construct_dist_matrix + csgraph_from_dense + csgraph_from_masked + csgraph_masked_from_dense + csgraph_to_dense + csgraph_to_masked + reconstruct_path + +Graph Representations +===================== +This module uses graphs which are stored in a matrix format. A +graph with N nodes can be represented by an (N x N) adjacency matrix G. +If there is a connection from node i to node j, then G[i, j] = w, where +w is the weight of the connection. For nodes i and j which are +not connected, the value depends on the representation: + +- for dense array representations, non-edges are represented by + G[i, j] = 0, infinity, or NaN. + +- for dense masked representations (of type np.ma.MaskedArray), non-edges + are represented by masked values. This can be useful when graphs with + zero-weight edges are desired. + +- for sparse array representations, non-edges are represented by + non-entries in the matrix. This sort of sparse representation also + allows for edges with zero weights. + +As a concrete example, imagine that you would like to represent the following +undirected graph:: + + G + + (0) + / \ + 1 2 + / \ + (2) (1) + +This graph has three nodes, where node 0 and 1 are connected by an edge of +weight 2, and nodes 0 and 2 are connected by an edge of weight 1. +We can construct the dense, masked, and sparse representations as follows, +keeping in mind that an undirected graph is represented by a symmetric matrix:: + + >>> G_dense = np.array([[0, 2, 1], + ... [2, 0, 0], + ... [1, 0, 0]]) + >>> G_masked = np.ma.masked_values(G_dense, 0) + >>> from scipy.sparse import csr_matrix + >>> G_sparse = csr_matrix(G_dense) + +This becomes more difficult when zero edges are significant. For example, +consider the situation when we slightly modify the above graph:: + + G2 + + (0) + / \ + 0 2 + / \ + (2) (1) + +This is identical to the previous graph, except nodes 0 and 2 are connected +by an edge of zero weight. In this case, the dense representation above +leads to ambiguities: how can non-edges be represented if zero is a meaningful +value? In this case, either a masked or sparse representation must be used +to eliminate the ambiguity:: + + >>> G2_data = np.array([[np.inf, 2, 0 ], + ... [2, np.inf, np.inf], + ... [0, np.inf, np.inf]]) + >>> G2_masked = np.ma.masked_invalid(G2_data) + >>> from scipy.sparse.csgraph import csgraph_from_dense + >>> # G2_sparse = csr_matrix(G2_data) would give the wrong result + >>> G2_sparse = csgraph_from_dense(G2_data, null_value=np.inf) + >>> G2_sparse.data + array([ 2., 0., 2., 0.]) + +Here we have used a utility routine from the csgraph submodule in order to +convert the dense representation to a sparse representation which can be +understood by the algorithms in submodule. By viewing the data array, we +can see that the zero values are explicitly encoded in the graph. + +Directed vs. Undirected +----------------------- +Matrices may represent either directed or undirected graphs. This is +specified throughout the csgraph module by a boolean keyword. Graphs are +assumed to be directed by default. In a directed graph, traversal from node +i to node j can be accomplished over the edge G[i, j], but not the edge +G[j, i]. Consider the following dense graph:: + + >>> G_dense = np.array([[0, 1, 0], + ... [2, 0, 3], + ... [0, 4, 0]]) + +When ``directed=True`` we get the graph:: + + ---1--> ---3--> + (0) (1) (2) + <--2--- <--4--- + +In a non-directed graph, traversal from node i to node j can be +accomplished over either G[i, j] or G[j, i]. If both edges are not null, +and the two have unequal weights, then the smaller of the two is used. + +So for the same graph, when ``directed=False`` we get the graph:: + + (0)--1--(1)--2--(2) + +Note that a symmetric matrix will represent an undirected graph, regardless +of whether the 'directed' keyword is set to True or False. In this case, +using ``directed=True`` generally leads to more efficient computation. + +The routines in this module accept as input either scipy.sparse representations +(csr, csc, or lil format), masked representations, or dense representations +with non-edges indicated by zeros, infinities, and NaN entries. +""" + +from __future__ import division, print_function, absolute_import + +__docformat__ = "restructuredtext en" + +__all__ = ['connected_components', + 'laplacian', + 'shortest_path', + 'floyd_warshall', + 'dijkstra', + 'bellman_ford', + 'johnson', + 'breadth_first_order', + 'depth_first_order', + 'breadth_first_tree', + 'depth_first_tree', + 'minimum_spanning_tree', + 'reverse_cuthill_mckee', + 'maximum_bipartite_matching', + 'structural_rank', + 'construct_dist_matrix', + 'reconstruct_path', + 'csgraph_masked_from_dense', + 'csgraph_from_dense', + 'csgraph_from_masked', + 'csgraph_to_dense', + 'csgraph_to_masked', + 'NegativeCycleError'] + +from ._laplacian import laplacian +from ._shortest_path import shortest_path, floyd_warshall, dijkstra,\ + bellman_ford, johnson, NegativeCycleError +from ._traversal import breadth_first_order, depth_first_order, \ + breadth_first_tree, depth_first_tree, connected_components +from ._min_spanning_tree import minimum_spanning_tree +from ._reordering import reverse_cuthill_mckee, maximum_bipartite_matching, \ + structural_rank +from ._tools import construct_dist_matrix, reconstruct_path,\ + csgraph_from_dense, csgraph_to_dense, csgraph_masked_from_dense,\ + csgraph_from_masked, csgraph_to_masked + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/__init__.pyc new file mode 100644 index 0000000..20d84ad Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/_laplacian.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/_laplacian.py new file mode 100644 index 0000000..ef1afd1 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/_laplacian.py @@ -0,0 +1,128 @@ +""" +Laplacian of a compressed-sparse graph +""" + +# Authors: Aric Hagberg <hagberg@lanl.gov> +# Gael Varoquaux <gael.varoquaux@normalesup.org> +# Jake Vanderplas <vanderplas@astro.washington.edu> +# License: BSD + +from __future__ import division, print_function, absolute_import + +import numpy as np +from scipy.sparse import isspmatrix + + +############################################################################### +# Graph laplacian +def laplacian(csgraph, normed=False, return_diag=False, use_out_degree=False): + """ + Return the Laplacian matrix of a directed graph. + + Parameters + ---------- + csgraph : array_like or sparse matrix, 2 dimensions + compressed-sparse graph, with shape (N, N). + normed : bool, optional + If True, then compute normalized Laplacian. + return_diag : bool, optional + If True, then also return an array related to vertex degrees. + use_out_degree : bool, optional + If True, then use out-degree instead of in-degree. + This distinction matters only if the graph is asymmetric. + Default: False. + + Returns + ------- + lap : ndarray or sparse matrix + The N x N laplacian matrix of csgraph. It will be a numpy array (dense) + if the input was dense, or a sparse matrix otherwise. + diag : ndarray, optional + The length-N diagonal of the Laplacian matrix. + For the normalized Laplacian, this is the array of square roots + of vertex degrees or 1 if the degree is zero. + + Notes + ----- + The Laplacian matrix of a graph is sometimes referred to as the + "Kirchoff matrix" or the "admittance matrix", and is useful in many + parts of spectral graph theory. In particular, the eigen-decomposition + of the laplacian matrix can give insight into many properties of the graph. + + Examples + -------- + >>> from scipy.sparse import csgraph + >>> G = np.arange(5) * np.arange(5)[:, np.newaxis] + >>> G + array([[ 0, 0, 0, 0, 0], + [ 0, 1, 2, 3, 4], + [ 0, 2, 4, 6, 8], + [ 0, 3, 6, 9, 12], + [ 0, 4, 8, 12, 16]]) + >>> csgraph.laplacian(G, normed=False) + array([[ 0, 0, 0, 0, 0], + [ 0, 9, -2, -3, -4], + [ 0, -2, 16, -6, -8], + [ 0, -3, -6, 21, -12], + [ 0, -4, -8, -12, 24]]) + """ + if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]: + raise ValueError('csgraph must be a square matrix or array') + + if normed and (np.issubdtype(csgraph.dtype, np.signedinteger) + or np.issubdtype(csgraph.dtype, np.uint)): + csgraph = csgraph.astype(float) + + create_lap = _laplacian_sparse if isspmatrix(csgraph) else _laplacian_dense + degree_axis = 1 if use_out_degree else 0 + lap, d = create_lap(csgraph, normed=normed, axis=degree_axis) + if return_diag: + return lap, d + return lap + + +def _setdiag_dense(A, d): + A.flat[::len(d)+1] = d + + +def _laplacian_sparse(graph, normed=False, axis=0): + if graph.format in ('lil', 'dok'): + m = graph.tocoo() + needs_copy = False + else: + m = graph + needs_copy = True + w = m.sum(axis=axis).getA1() - m.diagonal() + if normed: + m = m.tocoo(copy=needs_copy) + isolated_node_mask = (w == 0) + w = np.where(isolated_node_mask, 1, np.sqrt(w)) + m.data /= w[m.row] + m.data /= w[m.col] + m.data *= -1 + m.setdiag(1 - isolated_node_mask) + else: + if m.format == 'dia': + m = m.copy() + else: + m = m.tocoo(copy=needs_copy) + m.data *= -1 + m.setdiag(w) + return m, w + + +def _laplacian_dense(graph, normed=False, axis=0): + m = np.array(graph) + np.fill_diagonal(m, 0) + w = m.sum(axis=axis) + if normed: + isolated_node_mask = (w == 0) + w = np.where(isolated_node_mask, 1, np.sqrt(w)) + m /= w + m /= w[:, np.newaxis] + m *= -1 + _setdiag_dense(m, 1 - isolated_node_mask) + else: + m *= -1 + _setdiag_dense(m, w) + return m, w diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/_laplacian.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/_laplacian.pyc new file mode 100644 index 0000000..23e46c6 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/_laplacian.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/_min_spanning_tree.so b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/_min_spanning_tree.so new file mode 100755 index 0000000..0d1eb44 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/_min_spanning_tree.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/_reordering.so b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/_reordering.so new file mode 100755 index 0000000..9199928 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/_reordering.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/_shortest_path.so b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/_shortest_path.so new file mode 100755 index 0000000..9ad85d6 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/_shortest_path.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/_tools.so b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/_tools.so new file mode 100755 index 0000000..21d6de5 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/_tools.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/_traversal.so b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/_traversal.so new file mode 100755 index 0000000..3cb3d01 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/_traversal.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/_validation.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/_validation.py new file mode 100644 index 0000000..31e35f6 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/_validation.py @@ -0,0 +1,58 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from scipy.sparse import csr_matrix, isspmatrix, isspmatrix_csc +from ._tools import csgraph_to_dense, csgraph_from_dense,\ + csgraph_masked_from_dense, csgraph_from_masked + +DTYPE = np.float64 + + +def validate_graph(csgraph, directed, dtype=DTYPE, + csr_output=True, dense_output=True, + copy_if_dense=False, copy_if_sparse=False, + null_value_in=0, null_value_out=np.inf, + infinity_null=True, nan_null=True): + """Routine for validation and conversion of csgraph inputs""" + if not (csr_output or dense_output): + raise ValueError("Internal: dense or csr output must be true") + + # if undirected and csc storage, then transposing in-place + # is quicker than later converting to csr. + if (not directed) and isspmatrix_csc(csgraph): + csgraph = csgraph.T + + if isspmatrix(csgraph): + if csr_output: + csgraph = csr_matrix(csgraph, dtype=DTYPE, copy=copy_if_sparse) + else: + csgraph = csgraph_to_dense(csgraph, null_value=null_value_out) + elif np.ma.isMaskedArray(csgraph): + if dense_output: + mask = csgraph.mask + csgraph = np.array(csgraph.data, dtype=DTYPE, copy=copy_if_dense) + csgraph[mask] = null_value_out + else: + csgraph = csgraph_from_masked(csgraph) + else: + if dense_output: + csgraph = csgraph_masked_from_dense(csgraph, + copy=copy_if_dense, + null_value=null_value_in, + nan_null=nan_null, + infinity_null=infinity_null) + mask = csgraph.mask + csgraph = np.asarray(csgraph.data, dtype=DTYPE) + csgraph[mask] = null_value_out + else: + csgraph = csgraph_from_dense(csgraph, null_value=null_value_in, + infinity_null=infinity_null, + nan_null=nan_null) + + if csgraph.ndim != 2: + raise ValueError("compressed-sparse graph must be two dimensional") + + if csgraph.shape[0] != csgraph.shape[1]: + raise ValueError("compressed-sparse graph must be shape (N, N)") + + return csgraph diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/_validation.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/_validation.pyc new file mode 100644 index 0000000..f3de367 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/_validation.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/setup.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/setup.py new file mode 100644 index 0000000..0b8f760 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/setup.py @@ -0,0 +1,32 @@ +from __future__ import division, print_function, absolute_import + + +def configuration(parent_package='', top_path=None): + import numpy + from numpy.distutils.misc_util import Configuration + + config = Configuration('csgraph', parent_package, top_path) + + config.add_data_dir('tests') + + config.add_extension('_shortest_path', + sources=['_shortest_path.c'], + include_dirs=[numpy.get_include()]) + + config.add_extension('_traversal', + sources=['_traversal.c'], + include_dirs=[numpy.get_include()]) + + config.add_extension('_min_spanning_tree', + sources=['_min_spanning_tree.c'], + include_dirs=[numpy.get_include()]) + + config.add_extension('_reordering', + sources=['_reordering.c'], + include_dirs=[numpy.get_include()]) + + config.add_extension('_tools', + sources=['_tools.c'], + include_dirs=[numpy.get_include()]) + + return config diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/setup.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/setup.pyc new file mode 100644 index 0000000..6bcb630 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/__init__.pyc new file mode 100644 index 0000000..7344702 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_connected_components.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_connected_components.py new file mode 100644 index 0000000..54f10e5 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_connected_components.py @@ -0,0 +1,101 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import assert_equal, assert_array_almost_equal +from scipy.sparse import csgraph + + +def test_weak_connections(): + Xde = np.array([[0, 1, 0], + [0, 0, 0], + [0, 0, 0]]) + + Xsp = csgraph.csgraph_from_dense(Xde, null_value=0) + + for X in Xsp, Xde: + n_components, labels =\ + csgraph.connected_components(X, directed=True, + connection='weak') + + assert_equal(n_components, 2) + assert_array_almost_equal(labels, [0, 0, 1]) + + +def test_strong_connections(): + X1de = np.array([[0, 1, 0], + [0, 0, 0], + [0, 0, 0]]) + X2de = X1de + X1de.T + + X1sp = csgraph.csgraph_from_dense(X1de, null_value=0) + X2sp = csgraph.csgraph_from_dense(X2de, null_value=0) + + for X in X1sp, X1de: + n_components, labels =\ + csgraph.connected_components(X, directed=True, + connection='strong') + + assert_equal(n_components, 3) + labels.sort() + assert_array_almost_equal(labels, [0, 1, 2]) + + for X in X2sp, X2de: + n_components, labels =\ + csgraph.connected_components(X, directed=True, + connection='strong') + + assert_equal(n_components, 2) + labels.sort() + assert_array_almost_equal(labels, [0, 0, 1]) + + +def test_strong_connections2(): + X = np.array([[0, 0, 0, 0, 0, 0], + [1, 0, 1, 0, 0, 0], + [0, 0, 0, 1, 0, 0], + [0, 0, 1, 0, 1, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0]]) + n_components, labels =\ + csgraph.connected_components(X, directed=True, + connection='strong') + assert_equal(n_components, 5) + labels.sort() + assert_array_almost_equal(labels, [0, 1, 2, 2, 3, 4]) + + +def test_weak_connections2(): + X = np.array([[0, 0, 0, 0, 0, 0], + [1, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0], + [0, 0, 1, 0, 1, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0]]) + n_components, labels =\ + csgraph.connected_components(X, directed=True, + connection='weak') + assert_equal(n_components, 2) + labels.sort() + assert_array_almost_equal(labels, [0, 0, 1, 1, 1, 1]) + + +def test_ticket1876(): + # Regression test: this failed in the original implementation + # There should be two strongly-connected components; previously gave one + g = np.array([[0, 1, 1, 0], + [1, 0, 0, 1], + [0, 0, 0, 1], + [0, 0, 1, 0]]) + n_components, labels = csgraph.connected_components(g, connection='strong') + + assert_equal(n_components, 2) + assert_equal(labels[0], labels[1]) + assert_equal(labels[2], labels[3]) + + +def test_fully_connected_graph(): + # Fully connected dense matrices raised an exception. + # https://github.com/scipy/scipy/issues/3818 + g = np.ones((4, 4)) + n_components, labels = csgraph.connected_components(g) + assert_equal(n_components, 1) diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_connected_components.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_connected_components.pyc new file mode 100644 index 0000000..405f94d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_connected_components.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_conversions.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_conversions.py new file mode 100644 index 0000000..5201464 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_conversions.py @@ -0,0 +1,69 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import assert_array_almost_equal +from scipy.sparse import csr_matrix +from scipy.sparse.csgraph import csgraph_from_dense, csgraph_to_dense + + +def test_csgraph_from_dense(): + np.random.seed(1234) + G = np.random.random((10, 10)) + some_nulls = (G < 0.4) + all_nulls = (G < 0.8) + + for null_value in [0, np.nan, np.inf]: + G[all_nulls] = null_value + olderr = np.seterr(invalid="ignore") + try: + G_csr = csgraph_from_dense(G, null_value=0) + finally: + np.seterr(**olderr) + + G[all_nulls] = 0 + assert_array_almost_equal(G, G_csr.toarray()) + + for null_value in [np.nan, np.inf]: + G[all_nulls] = 0 + G[some_nulls] = null_value + olderr = np.seterr(invalid="ignore") + try: + G_csr = csgraph_from_dense(G, null_value=0) + finally: + np.seterr(**olderr) + + G[all_nulls] = 0 + assert_array_almost_equal(G, G_csr.toarray()) + + +def test_csgraph_to_dense(): + np.random.seed(1234) + G = np.random.random((10, 10)) + nulls = (G < 0.8) + G[nulls] = np.inf + + G_csr = csgraph_from_dense(G) + + for null_value in [0, 10, -np.inf, np.inf]: + G[nulls] = null_value + assert_array_almost_equal(G, csgraph_to_dense(G_csr, null_value)) + + +def test_multiple_edges(): + # create a random sqare matrix with an even number of elements + np.random.seed(1234) + X = np.random.random((10, 10)) + Xcsr = csr_matrix(X) + + # now double-up every other column + Xcsr.indices[::2] = Xcsr.indices[1::2] + + # normal sparse toarray() will sum the duplicated edges + Xdense = Xcsr.toarray() + assert_array_almost_equal(Xdense[:, 1::2], + X[:, ::2] + X[:, 1::2]) + + # csgraph_to_dense chooses the minimum of each duplicated edge + Xdense = csgraph_to_dense(Xcsr) + assert_array_almost_equal(Xdense[:, 1::2], + np.minimum(X[:, ::2], X[:, 1::2])) diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_conversions.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_conversions.pyc new file mode 100644 index 0000000..a41318b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_conversions.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_graph_laplacian.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_graph_laplacian.py new file mode 100644 index 0000000..8c6b3a2 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_graph_laplacian.py @@ -0,0 +1,136 @@ +# Author: Gael Varoquaux <gael.varoquaux@normalesup.org> +# Jake Vanderplas <vanderplas@astro.washington.edu> +# License: BSD +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import assert_allclose, assert_array_almost_equal +from pytest import raises as assert_raises +from scipy import sparse + +from scipy.sparse import csgraph + + +def _explicit_laplacian(x, normed=False): + if sparse.issparse(x): + x = x.todense() + x = np.asarray(x) + y = -1.0 * x + for j in range(y.shape[0]): + y[j,j] = x[j,j+1:].sum() + x[j,:j].sum() + if normed: + d = np.diag(y).copy() + d[d == 0] = 1.0 + y /= d[:,None]**.5 + y /= d[None,:]**.5 + return y + + +def _check_symmetric_graph_laplacian(mat, normed): + if not hasattr(mat, 'shape'): + mat = eval(mat, dict(np=np, sparse=sparse)) + + if sparse.issparse(mat): + sp_mat = mat + mat = sp_mat.todense() + else: + sp_mat = sparse.csr_matrix(mat) + + laplacian = csgraph.laplacian(mat, normed=normed) + n_nodes = mat.shape[0] + if not normed: + assert_array_almost_equal(laplacian.sum(axis=0), np.zeros(n_nodes)) + assert_array_almost_equal(laplacian.T, laplacian) + assert_array_almost_equal(laplacian, + csgraph.laplacian(sp_mat, normed=normed).todense()) + + assert_array_almost_equal(laplacian, + _explicit_laplacian(mat, normed=normed)) + + +def test_laplacian_value_error(): + for t in int, float, complex: + for m in ([1, 1], + [[[1]]], + [[1, 2, 3], [4, 5, 6]], + [[1, 2], [3, 4], [5, 5]]): + A = np.array(m, dtype=t) + assert_raises(ValueError, csgraph.laplacian, A) + + +def test_symmetric_graph_laplacian(): + symmetric_mats = ('np.arange(10) * np.arange(10)[:, np.newaxis]', + 'np.ones((7, 7))', + 'np.eye(19)', + 'sparse.diags([1, 1], [-1, 1], shape=(4,4))', + 'sparse.diags([1, 1], [-1, 1], shape=(4,4)).todense()', + 'np.asarray(sparse.diags([1, 1], [-1, 1], shape=(4,4)).todense())', + 'np.vander(np.arange(4)) + np.vander(np.arange(4)).T') + for mat_str in symmetric_mats: + for normed in True, False: + _check_symmetric_graph_laplacian(mat_str, normed) + + +def _assert_allclose_sparse(a, b, **kwargs): + # helper function that can deal with sparse matrices + if sparse.issparse(a): + a = a.toarray() + if sparse.issparse(b): + b = a.toarray() + assert_allclose(a, b, **kwargs) + + +def _check_laplacian(A, desired_L, desired_d, normed, use_out_degree): + for arr_type in np.array, sparse.csr_matrix, sparse.coo_matrix: + for t in int, float, complex: + adj = arr_type(A, dtype=t) + L = csgraph.laplacian(adj, normed=normed, return_diag=False, + use_out_degree=use_out_degree) + _assert_allclose_sparse(L, desired_L, atol=1e-12) + L, d = csgraph.laplacian(adj, normed=normed, return_diag=True, + use_out_degree=use_out_degree) + _assert_allclose_sparse(L, desired_L, atol=1e-12) + _assert_allclose_sparse(d, desired_d, atol=1e-12) + + +def test_asymmetric_laplacian(): + # adjacency matrix + A = [[0, 1, 0], + [4, 2, 0], + [0, 0, 0]] + + # Laplacian matrix using out-degree + L = [[1, -1, 0], + [-4, 4, 0], + [0, 0, 0]] + d = [1, 4, 0] + _check_laplacian(A, L, d, normed=False, use_out_degree=True) + + # normalized Laplacian matrix using out-degree + L = [[1, -0.5, 0], + [-2, 1, 0], + [0, 0, 0]] + d = [1, 2, 1] + _check_laplacian(A, L, d, normed=True, use_out_degree=True) + + # Laplacian matrix using in-degree + L = [[4, -1, 0], + [-4, 1, 0], + [0, 0, 0]] + d = [4, 1, 0] + _check_laplacian(A, L, d, normed=False, use_out_degree=False) + + # normalized Laplacian matrix using in-degree + L = [[1, -0.5, 0], + [-2, 1, 0], + [0, 0, 0]] + d = [2, 1, 1] + _check_laplacian(A, L, d, normed=True, use_out_degree=False) + + +def test_sparse_formats(): + for fmt in ('csr', 'csc', 'coo', 'lil', 'dok', 'dia', 'bsr'): + mat = sparse.diags([1, 1], [-1, 1], shape=(4,4), format=fmt) + for normed in True, False: + _check_symmetric_graph_laplacian(mat, normed) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_graph_laplacian.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_graph_laplacian.pyc new file mode 100644 index 0000000..2434383 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_graph_laplacian.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_reordering.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_reordering.py new file mode 100644 index 0000000..8552426 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_reordering.py @@ -0,0 +1,121 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import assert_equal +from scipy.sparse.csgraph import (reverse_cuthill_mckee, + maximum_bipartite_matching, structural_rank) +from scipy.sparse import diags, csc_matrix, csr_matrix, coo_matrix + +def test_graph_reverse_cuthill_mckee(): + A = np.array([[1, 0, 0, 0, 1, 0, 0, 0], + [0, 1, 1, 0, 0, 1, 0, 1], + [0, 1, 1, 0, 1, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 1, 0], + [1, 0, 1, 0, 1, 0, 0, 0], + [0, 1, 0, 0, 0, 1, 0, 1], + [0, 0, 0, 1, 0, 0, 1, 0], + [0, 1, 0, 0, 0, 1, 0, 1]], dtype=int) + + graph = csr_matrix(A) + perm = reverse_cuthill_mckee(graph) + correct_perm = np.array([6, 3, 7, 5, 1, 2, 4, 0]) + assert_equal(perm, correct_perm) + + # Test int64 indices input + graph.indices = graph.indices.astype('int64') + graph.indptr = graph.indptr.astype('int64') + perm = reverse_cuthill_mckee(graph, True) + assert_equal(perm, correct_perm) + + +def test_graph_reverse_cuthill_mckee_ordering(): + data = np.ones(63,dtype=int) + rows = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, + 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, + 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9, + 9, 10, 10, 10, 10, 10, 11, 11, 11, 11, + 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, + 14, 15, 15, 15, 15, 15]) + cols = np.array([0, 2, 5, 8, 10, 1, 3, 9, 11, 0, 2, + 7, 10, 1, 3, 11, 4, 6, 12, 14, 0, 7, 13, + 15, 4, 6, 14, 2, 5, 7, 15, 0, 8, 10, 13, + 1, 9, 11, 0, 2, 8, 10, 15, 1, 3, 9, 11, + 4, 12, 14, 5, 8, 13, 15, 4, 6, 12, 14, + 5, 7, 10, 13, 15]) + graph = coo_matrix((data, (rows,cols))).tocsr() + perm = reverse_cuthill_mckee(graph) + correct_perm = np.array([12, 14, 4, 6, 10, 8, 2, 15, + 0, 13, 7, 5, 9, 11, 1, 3]) + assert_equal(perm, correct_perm) + + +def test_graph_maximum_bipartite_matching(): + A = diags(np.ones(25), offsets=0, format='csc') + rand_perm = np.random.permutation(25) + rand_perm2 = np.random.permutation(25) + + Rrow = np.arange(25) + Rcol = rand_perm + Rdata = np.ones(25,dtype=int) + Rmat = coo_matrix((Rdata,(Rrow,Rcol))).tocsc() + + Crow = rand_perm2 + Ccol = np.arange(25) + Cdata = np.ones(25,dtype=int) + Cmat = coo_matrix((Cdata,(Crow,Ccol))).tocsc() + # Randomly permute identity matrix + B = Rmat*A*Cmat + + # Row permute + perm = maximum_bipartite_matching(B,perm_type='row') + Rrow = np.arange(25) + Rcol = perm + Rdata = np.ones(25,dtype=int) + Rmat = coo_matrix((Rdata,(Rrow,Rcol))).tocsc() + C1 = Rmat*B + + # Column permute + perm2 = maximum_bipartite_matching(B,perm_type='column') + Crow = perm2 + Ccol = np.arange(25) + Cdata = np.ones(25,dtype=int) + Cmat = coo_matrix((Cdata,(Crow,Ccol))).tocsc() + C2 = B*Cmat + + # Should get identity matrix back + assert_equal(any(C1.diagonal() == 0), False) + assert_equal(any(C2.diagonal() == 0), False) + + # Test int64 indices input + B.indices = B.indices.astype('int64') + B.indptr = B.indptr.astype('int64') + perm = maximum_bipartite_matching(B,perm_type='row') + Rrow = np.arange(25) + Rcol = perm + Rdata = np.ones(25,dtype=int) + Rmat = coo_matrix((Rdata,(Rrow,Rcol))).tocsc() + C3 = Rmat*B + assert_equal(any(C3.diagonal() == 0), False) + + +def test_graph_structural_rank(): + # Test square matrix #1 + A = csc_matrix([[1, 1, 0], + [1, 0, 1], + [0, 1, 0]]) + assert_equal(structural_rank(A), 3) + + # Test square matrix #2 + rows = np.array([0,0,0,0,0,1,1,2,2,3,3,3,3,3,3,4,4,5,5,6,6,7,7]) + cols = np.array([0,1,2,3,4,2,5,2,6,0,1,3,5,6,7,4,5,5,6,2,6,2,4]) + data = np.ones_like(rows) + B = coo_matrix((data,(rows,cols)), shape=(8,8)) + assert_equal(structural_rank(B), 6) + + #Test non-square matrix + C = csc_matrix([[1, 0, 2, 0], + [2, 0, 4, 0]]) + assert_equal(structural_rank(C), 2) + + #Test tall matrix + assert_equal(structural_rank(C.T), 2) diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_reordering.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_reordering.pyc new file mode 100644 index 0000000..29da6df Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_reordering.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_shortest_path.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_shortest_path.py new file mode 100644 index 0000000..f1b3a5e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_shortest_path.py @@ -0,0 +1,202 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import assert_array_almost_equal, assert_array_equal +from pytest import raises as assert_raises +from scipy.sparse.csgraph import (shortest_path, dijkstra, johnson, + bellman_ford, construct_dist_matrix, NegativeCycleError) + + +directed_G = np.array([[0, 3, 3, 0, 0], + [0, 0, 0, 2, 4], + [0, 0, 0, 0, 0], + [1, 0, 0, 0, 0], + [2, 0, 0, 2, 0]], dtype=float) + +undirected_G = np.array([[0, 3, 3, 1, 2], + [3, 0, 0, 2, 4], + [3, 0, 0, 0, 0], + [1, 2, 0, 0, 2], + [2, 4, 0, 2, 0]], dtype=float) + +unweighted_G = (directed_G > 0).astype(float) + +directed_SP = [[0, 3, 3, 5, 7], + [3, 0, 6, 2, 4], + [np.inf, np.inf, 0, np.inf, np.inf], + [1, 4, 4, 0, 8], + [2, 5, 5, 2, 0]] + +directed_pred = np.array([[-9999, 0, 0, 1, 1], + [3, -9999, 0, 1, 1], + [-9999, -9999, -9999, -9999, -9999], + [3, 0, 0, -9999, 1], + [4, 0, 0, 4, -9999]], dtype=float) + +undirected_SP = np.array([[0, 3, 3, 1, 2], + [3, 0, 6, 2, 4], + [3, 6, 0, 4, 5], + [1, 2, 4, 0, 2], + [2, 4, 5, 2, 0]], dtype=float) + +undirected_SP_limit_2 = np.array([[0, np.inf, np.inf, 1, 2], + [np.inf, 0, np.inf, 2, np.inf], + [np.inf, np.inf, 0, np.inf, np.inf], + [1, 2, np.inf, 0, 2], + [2, np.inf, np.inf, 2, 0]], dtype=float) + +undirected_SP_limit_0 = np.ones((5, 5), dtype=float) - np.eye(5) +undirected_SP_limit_0[undirected_SP_limit_0 > 0] = np.inf + +undirected_pred = np.array([[-9999, 0, 0, 0, 0], + [1, -9999, 0, 1, 1], + [2, 0, -9999, 0, 0], + [3, 3, 0, -9999, 3], + [4, 4, 0, 4, -9999]], dtype=float) + +methods = ['auto', 'FW', 'D', 'BF', 'J'] + + +def test_dijkstra_limit(): + limits = [0, 2, np.inf] + results = [undirected_SP_limit_0, + undirected_SP_limit_2, + undirected_SP] + + def check(limit, result): + SP = dijkstra(undirected_G, directed=False, limit=limit) + assert_array_almost_equal(SP, result) + + for limit, result in zip(limits, results): + check(limit, result) + + +def test_directed(): + def check(method): + SP = shortest_path(directed_G, method=method, directed=True, + overwrite=False) + assert_array_almost_equal(SP, directed_SP) + + for method in methods: + check(method) + + +def test_undirected(): + def check(method, directed_in): + if directed_in: + SP1 = shortest_path(directed_G, method=method, directed=False, + overwrite=False) + assert_array_almost_equal(SP1, undirected_SP) + else: + SP2 = shortest_path(undirected_G, method=method, directed=True, + overwrite=False) + assert_array_almost_equal(SP2, undirected_SP) + + for method in methods: + for directed_in in (True, False): + check(method, directed_in) + + +def test_shortest_path_indices(): + indices = np.arange(4) + + def check(func, indshape): + outshape = indshape + (5,) + SP = func(directed_G, directed=False, + indices=indices.reshape(indshape)) + assert_array_almost_equal(SP, undirected_SP[indices].reshape(outshape)) + + for indshape in [(4,), (4, 1), (2, 2)]: + for func in (dijkstra, bellman_ford, johnson, shortest_path): + check(func, indshape) + + assert_raises(ValueError, shortest_path, directed_G, method='FW', + indices=indices) + + +def test_predecessors(): + SP_res = {True: directed_SP, + False: undirected_SP} + pred_res = {True: directed_pred, + False: undirected_pred} + + def check(method, directed): + SP, pred = shortest_path(directed_G, method, directed=directed, + overwrite=False, + return_predecessors=True) + assert_array_almost_equal(SP, SP_res[directed]) + assert_array_almost_equal(pred, pred_res[directed]) + + for method in methods: + for directed in (True, False): + check(method, directed) + + +def test_construct_shortest_path(): + def check(method, directed): + SP1, pred = shortest_path(directed_G, + directed=directed, + overwrite=False, + return_predecessors=True) + SP2 = construct_dist_matrix(directed_G, pred, directed=directed) + assert_array_almost_equal(SP1, SP2) + + for method in methods: + for directed in (True, False): + check(method, directed) + + +def test_unweighted_path(): + def check(method, directed): + SP1 = shortest_path(directed_G, + directed=directed, + overwrite=False, + unweighted=True) + SP2 = shortest_path(unweighted_G, + directed=directed, + overwrite=False, + unweighted=False) + assert_array_almost_equal(SP1, SP2) + + for method in methods: + for directed in (True, False): + check(method, directed) + + +def test_negative_cycles(): + # create a small graph with a negative cycle + graph = np.ones([5, 5]) + graph.flat[::6] = 0 + graph[1, 2] = -2 + + def check(method, directed): + assert_raises(NegativeCycleError, shortest_path, graph, method, + directed) + + for method in ['FW', 'J', 'BF']: + for directed in (True, False): + check(method, directed) + + +def test_masked_input(): + G = np.ma.masked_equal(directed_G, 0) + + def check(method): + SP = shortest_path(directed_G, method=method, directed=True, + overwrite=False) + assert_array_almost_equal(SP, directed_SP) + + for method in methods: + check(method) + + +def test_overwrite(): + G = np.array([[0, 3, 3, 1, 2], + [3, 0, 0, 2, 4], + [3, 0, 0, 0, 0], + [1, 2, 0, 0, 2], + [2, 4, 0, 2, 0]], dtype=float) + foo = G.copy() + shortest_path(foo, overwrite=False) + assert_array_equal(foo, G) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_shortest_path.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_shortest_path.pyc new file mode 100644 index 0000000..7ca23f6 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_shortest_path.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_spanning_tree.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_spanning_tree.py new file mode 100644 index 0000000..0a23878 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_spanning_tree.py @@ -0,0 +1,67 @@ +"""Test the minimum spanning tree function""" +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import assert_ +import numpy.testing as npt +from scipy.sparse import csr_matrix +from scipy.sparse.csgraph import minimum_spanning_tree + + +def test_minimum_spanning_tree(): + + # Create a graph with two connected components. + graph = [[0,1,0,0,0], + [1,0,0,0,0], + [0,0,0,8,5], + [0,0,8,0,1], + [0,0,5,1,0]] + graph = np.asarray(graph) + + # Create the expected spanning tree. + expected = [[0,1,0,0,0], + [0,0,0,0,0], + [0,0,0,0,5], + [0,0,0,0,1], + [0,0,0,0,0]] + expected = np.asarray(expected) + + # Ensure minimum spanning tree code gives this expected output. + csgraph = csr_matrix(graph) + mintree = minimum_spanning_tree(csgraph) + npt.assert_array_equal(mintree.todense(), expected, + 'Incorrect spanning tree found.') + + # Ensure that the original graph was not modified. + npt.assert_array_equal(csgraph.todense(), graph, + 'Original graph was modified.') + + # Now let the algorithm modify the csgraph in place. + mintree = minimum_spanning_tree(csgraph, overwrite=True) + npt.assert_array_equal(mintree.todense(), expected, + 'Graph was not properly modified to contain MST.') + + np.random.seed(1234) + for N in (5, 10, 15, 20): + + # Create a random graph. + graph = 3 + np.random.random((N, N)) + csgraph = csr_matrix(graph) + + # The spanning tree has at most N - 1 edges. + mintree = minimum_spanning_tree(csgraph) + assert_(mintree.nnz < N) + + # Set the sub diagonal to 1 to create a known spanning tree. + idx = np.arange(N-1) + graph[idx,idx+1] = 1 + csgraph = csr_matrix(graph) + mintree = minimum_spanning_tree(csgraph) + + # We expect to see this pattern in the spanning tree and otherwise + # have this zero. + expected = np.zeros((N, N)) + expected[idx, idx+1] = 1 + + npt.assert_array_equal(mintree.todense(), expected, + 'Incorrect spanning tree found.') diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_spanning_tree.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_spanning_tree.pyc new file mode 100644 index 0000000..ee116e3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_spanning_tree.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_traversal.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_traversal.py new file mode 100644 index 0000000..6192537 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_traversal.py @@ -0,0 +1,70 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import assert_array_almost_equal +from scipy.sparse.csgraph import (breadth_first_tree, depth_first_tree, + csgraph_to_dense, csgraph_from_dense) + + +def test_graph_breadth_first(): + csgraph = np.array([[0, 1, 2, 0, 0], + [1, 0, 0, 0, 3], + [2, 0, 0, 7, 0], + [0, 0, 7, 0, 1], + [0, 3, 0, 1, 0]]) + csgraph = csgraph_from_dense(csgraph, null_value=0) + + bfirst = np.array([[0, 1, 2, 0, 0], + [0, 0, 0, 0, 3], + [0, 0, 0, 7, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0]]) + + for directed in [True, False]: + bfirst_test = breadth_first_tree(csgraph, 0, directed) + assert_array_almost_equal(csgraph_to_dense(bfirst_test), + bfirst) + + +def test_graph_depth_first(): + csgraph = np.array([[0, 1, 2, 0, 0], + [1, 0, 0, 0, 3], + [2, 0, 0, 7, 0], + [0, 0, 7, 0, 1], + [0, 3, 0, 1, 0]]) + csgraph = csgraph_from_dense(csgraph, null_value=0) + + dfirst = np.array([[0, 1, 0, 0, 0], + [0, 0, 0, 0, 3], + [0, 0, 0, 0, 0], + [0, 0, 7, 0, 0], + [0, 0, 0, 1, 0]]) + + for directed in [True, False]: + dfirst_test = depth_first_tree(csgraph, 0, directed) + assert_array_almost_equal(csgraph_to_dense(dfirst_test), + dfirst) + + +def test_graph_breadth_first_trivial_graph(): + csgraph = np.array([[0]]) + csgraph = csgraph_from_dense(csgraph, null_value=0) + + bfirst = np.array([[0]]) + + for directed in [True, False]: + bfirst_test = breadth_first_tree(csgraph, 0, directed) + assert_array_almost_equal(csgraph_to_dense(bfirst_test), + bfirst) + + +def test_graph_depth_first_trivial_graph(): + csgraph = np.array([[0]]) + csgraph = csgraph_from_dense(csgraph, null_value=0) + + bfirst = np.array([[0]]) + + for directed in [True, False]: + bfirst_test = depth_first_tree(csgraph, 0, directed) + assert_array_almost_equal(csgraph_to_dense(bfirst_test), + bfirst) diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_traversal.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_traversal.pyc new file mode 100644 index 0000000..e543ff0 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_traversal.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/csr.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/csr.py new file mode 100644 index 0000000..1332402 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/csr.py @@ -0,0 +1,489 @@ +"""Compressed Sparse Row matrix format""" + +from __future__ import division, print_function, absolute_import + +__docformat__ = "restructuredtext en" + +__all__ = ['csr_matrix', 'isspmatrix_csr'] + + +import numpy as np +from scipy._lib.six import xrange + +from .base import spmatrix + +from ._sparsetools import csr_tocsc, csr_tobsr, csr_count_blocks, \ + get_csr_submatrix, csr_sample_values +from .sputils import (upcast, isintlike, IndexMixin, issequence, + get_index_dtype, ismatrix) + +from .compressed import _cs_matrix + + +class csr_matrix(_cs_matrix, IndexMixin): + """ + Compressed Sparse Row matrix + + This can be instantiated in several ways: + csr_matrix(D) + with a dense matrix or rank-2 ndarray D + + csr_matrix(S) + with another sparse matrix S (equivalent to S.tocsr()) + + csr_matrix((M, N), [dtype]) + to construct an empty matrix with shape (M, N) + dtype is optional, defaulting to dtype='d'. + + csr_matrix((data, (row_ind, col_ind)), [shape=(M, N)]) + where ``data``, ``row_ind`` and ``col_ind`` satisfy the + relationship ``a[row_ind[k], col_ind[k]] = data[k]``. + + csr_matrix((data, indices, indptr), [shape=(M, N)]) + is the standard CSR representation where the column indices for + row i are stored in ``indices[indptr[i]:indptr[i+1]]`` and their + corresponding values are stored in ``data[indptr[i]:indptr[i+1]]``. + If the shape parameter is not supplied, the matrix dimensions + are inferred from the index arrays. + + Attributes + ---------- + dtype : dtype + Data type of the matrix + shape : 2-tuple + Shape of the matrix + ndim : int + Number of dimensions (this is always 2) + nnz + Number of nonzero elements + data + CSR format data array of the matrix + indices + CSR format index array of the matrix + indptr + CSR format index pointer array of the matrix + has_sorted_indices + Whether indices are sorted + + Notes + ----- + + Sparse matrices can be used in arithmetic operations: they support + addition, subtraction, multiplication, division, and matrix power. + + Advantages of the CSR format + - efficient arithmetic operations CSR + CSR, CSR * CSR, etc. + - efficient row slicing + - fast matrix vector products + + Disadvantages of the CSR format + - slow column slicing operations (consider CSC) + - changes to the sparsity structure are expensive (consider LIL or DOK) + + Examples + -------- + + >>> import numpy as np + >>> from scipy.sparse import csr_matrix + >>> csr_matrix((3, 4), dtype=np.int8).toarray() + array([[0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]], dtype=int8) + + >>> row = np.array([0, 0, 1, 2, 2, 2]) + >>> col = np.array([0, 2, 2, 0, 1, 2]) + >>> data = np.array([1, 2, 3, 4, 5, 6]) + >>> csr_matrix((data, (row, col)), shape=(3, 3)).toarray() + array([[1, 0, 2], + [0, 0, 3], + [4, 5, 6]]) + + >>> indptr = np.array([0, 2, 3, 6]) + >>> indices = np.array([0, 2, 2, 0, 1, 2]) + >>> data = np.array([1, 2, 3, 4, 5, 6]) + >>> csr_matrix((data, indices, indptr), shape=(3, 3)).toarray() + array([[1, 0, 2], + [0, 0, 3], + [4, 5, 6]]) + + As an example of how to construct a CSR matrix incrementally, + the following snippet builds a term-document matrix from texts: + + >>> docs = [["hello", "world", "hello"], ["goodbye", "cruel", "world"]] + >>> indptr = [0] + >>> indices = [] + >>> data = [] + >>> vocabulary = {} + >>> for d in docs: + ... for term in d: + ... index = vocabulary.setdefault(term, len(vocabulary)) + ... indices.append(index) + ... data.append(1) + ... indptr.append(len(indices)) + ... + >>> csr_matrix((data, indices, indptr), dtype=int).toarray() + array([[2, 1, 0, 0], + [0, 1, 1, 1]]) + + """ + format = 'csr' + + def transpose(self, axes=None, copy=False): + if axes is not None: + raise ValueError(("Sparse matrices do not support " + "an 'axes' parameter because swapping " + "dimensions is the only logical permutation.")) + + M, N = self.shape + + from .csc import csc_matrix + return csc_matrix((self.data, self.indices, + self.indptr), shape=(N, M), copy=copy) + + transpose.__doc__ = spmatrix.transpose.__doc__ + + def tolil(self, copy=False): + from .lil import lil_matrix + lil = lil_matrix(self.shape,dtype=self.dtype) + + self.sum_duplicates() + ptr,ind,dat = self.indptr,self.indices,self.data + rows, data = lil.rows, lil.data + + for n in xrange(self.shape[0]): + start = ptr[n] + end = ptr[n+1] + rows[n] = ind[start:end].tolist() + data[n] = dat[start:end].tolist() + + return lil + + tolil.__doc__ = spmatrix.tolil.__doc__ + + def tocsr(self, copy=False): + if copy: + return self.copy() + else: + return self + + tocsr.__doc__ = spmatrix.tocsr.__doc__ + + def tocsc(self, copy=False): + idx_dtype = get_index_dtype((self.indptr, self.indices), + maxval=max(self.nnz, self.shape[0])) + indptr = np.empty(self.shape[1] + 1, dtype=idx_dtype) + indices = np.empty(self.nnz, dtype=idx_dtype) + data = np.empty(self.nnz, dtype=upcast(self.dtype)) + + csr_tocsc(self.shape[0], self.shape[1], + self.indptr.astype(idx_dtype), + self.indices.astype(idx_dtype), + self.data, + indptr, + indices, + data) + + from .csc import csc_matrix + A = csc_matrix((data, indices, indptr), shape=self.shape) + A.has_sorted_indices = True + return A + + tocsc.__doc__ = spmatrix.tocsc.__doc__ + + def tobsr(self, blocksize=None, copy=True): + from .bsr import bsr_matrix + + if blocksize is None: + from .spfuncs import estimate_blocksize + return self.tobsr(blocksize=estimate_blocksize(self)) + + elif blocksize == (1,1): + arg1 = (self.data.reshape(-1,1,1),self.indices,self.indptr) + return bsr_matrix(arg1, shape=self.shape, copy=copy) + + else: + R,C = blocksize + M,N = self.shape + + if R < 1 or C < 1 or M % R != 0 or N % C != 0: + raise ValueError('invalid blocksize %s' % blocksize) + + blks = csr_count_blocks(M,N,R,C,self.indptr,self.indices) + + idx_dtype = get_index_dtype((self.indptr, self.indices), + maxval=max(N//C, blks)) + indptr = np.empty(M//R+1, dtype=idx_dtype) + indices = np.empty(blks, dtype=idx_dtype) + data = np.zeros((blks,R,C), dtype=self.dtype) + + csr_tobsr(M, N, R, C, + self.indptr.astype(idx_dtype), + self.indices.astype(idx_dtype), + self.data, + indptr, indices, data.ravel()) + + return bsr_matrix((data,indices,indptr), shape=self.shape) + + tobsr.__doc__ = spmatrix.tobsr.__doc__ + + # these functions are used by the parent class (_cs_matrix) + # to remove redudancy between csc_matrix and csr_matrix + def _swap(self, x): + """swap the members of x if this is a column-oriented matrix + """ + return x + + def __getitem__(self, key): + def asindices(x): + try: + x = np.asarray(x) + + # Check index contents to avoid creating 64bit arrays needlessly + idx_dtype = get_index_dtype((x,), check_contents=True) + if idx_dtype != x.dtype: + x = x.astype(idx_dtype) + except Exception: + raise IndexError('invalid index') + else: + return x + + def check_bounds(indices, N): + if indices.size == 0: + return (0, 0) + + max_indx = indices.max() + if max_indx >= N: + raise IndexError('index (%d) out of range' % max_indx) + + min_indx = indices.min() + if min_indx < -N: + raise IndexError('index (%d) out of range' % (N + min_indx)) + + return min_indx, max_indx + + def extractor(indices,N): + """Return a sparse matrix P so that P*self implements + slicing of the form self[[1,2,3],:] + """ + indices = asindices(indices).copy() + + min_indx, max_indx = check_bounds(indices, N) + + if min_indx < 0: + indices[indices < 0] += N + + indptr = np.arange(len(indices)+1, dtype=indices.dtype) + data = np.ones(len(indices), dtype=self.dtype) + shape = (len(indices),N) + + return csr_matrix((data,indices,indptr), shape=shape, + dtype=self.dtype, copy=False) + + row, col = self._unpack_index(key) + + # First attempt to use original row optimized methods + # [1, ?] + if isintlike(row): + # [i, j] + if isintlike(col): + return self._get_single_element(row, col) + # [i, 1:2] + elif isinstance(col, slice): + return self._get_row_slice(row, col) + # [i, [1, 2]] + elif issequence(col): + P = extractor(col,self.shape[1]).T + return self[row, :] * P + elif isinstance(row, slice): + # [1:2,??] + if ((isintlike(col) and row.step in (1, None)) or + (isinstance(col, slice) and + col.step in (1, None) and + row.step in (1, None))): + # col is int or slice with step 1, row is slice with step 1. + return self._get_submatrix(row, col) + elif issequence(col): + # row is slice, col is sequence. + P = extractor(col,self.shape[1]).T # [1:2,[1,2]] + sliced = self + if row != slice(None, None, None): + sliced = sliced[row,:] + return sliced * P + + elif issequence(row): + # [[1,2],??] + if isintlike(col) or isinstance(col,slice): + P = extractor(row, self.shape[0]) # [[1,2],j] or [[1,2],1:2] + extracted = P * self + if col == slice(None, None, None): + return extracted + else: + return extracted[:,col] + + elif ismatrix(row) and issequence(col): + if len(row[0]) == 1 and isintlike(row[0][0]): + # [[[1],[2]], [1,2]], outer indexing + row = asindices(row) + P_row = extractor(row[:,0], self.shape[0]) + P_col = extractor(col, self.shape[1]).T + return P_row * self * P_col + + if not (issequence(col) and issequence(row)): + # Sample elementwise + row, col = self._index_to_arrays(row, col) + + row = asindices(row) + col = asindices(col) + if row.shape != col.shape: + raise IndexError('number of row and column indices differ') + assert row.ndim <= 2 + + num_samples = np.size(row) + if num_samples == 0: + return csr_matrix(np.atleast_2d(row).shape, dtype=self.dtype) + check_bounds(row, self.shape[0]) + check_bounds(col, self.shape[1]) + + val = np.empty(num_samples, dtype=self.dtype) + csr_sample_values(self.shape[0], self.shape[1], + self.indptr, self.indices, self.data, + num_samples, row.ravel(), col.ravel(), val) + if row.ndim == 1: + # row and col are 1d + return np.asmatrix(val) + return self.__class__(val.reshape(row.shape)) + + def __iter__(self): + indptr = np.zeros(2, dtype=self.indptr.dtype) + shape = (1, self.shape[1]) + i0 = 0 + for i1 in self.indptr[1:]: + indptr[1] = i1 - i0 + indices = self.indices[i0:i1] + data = self.data[i0:i1] + yield csr_matrix((data, indices, indptr), shape=shape, copy=True) + i0 = i1 + + def getrow(self, i): + """Returns a copy of row i of the matrix, as a (1 x n) + CSR matrix (row vector). + """ + M, N = self.shape + i = int(i) + if i < 0: + i += M + if i < 0 or i >= M: + raise IndexError('index (%d) out of range' % i) + idx = slice(*self.indptr[i:i+2]) + data = self.data[idx].copy() + indices = self.indices[idx].copy() + indptr = np.array([0, len(indices)], dtype=self.indptr.dtype) + return csr_matrix((data, indices, indptr), shape=(1, N), + dtype=self.dtype, copy=False) + + def getcol(self, i): + """Returns a copy of column i of the matrix, as a (m x 1) + CSR matrix (column vector). + """ + return self._get_submatrix(slice(None), i) + + def _get_row_slice(self, i, cslice): + """Returns a copy of row self[i, cslice] + """ + M, N = self.shape + + if i < 0: + i += M + + if i < 0 or i >= M: + raise IndexError('index (%d) out of range' % i) + + start, stop, stride = cslice.indices(N) + + if stride == 1: + # for stride == 1, get_csr_submatrix is faster + row_indptr, row_indices, row_data = get_csr_submatrix( + M, N, self.indptr, self.indices, self.data, i, i + 1, + start, stop) + else: + # other strides need new code + row_indices = self.indices[self.indptr[i]:self.indptr[i + 1]] + row_data = self.data[self.indptr[i]:self.indptr[i + 1]] + + if stride > 0: + ind = (row_indices >= start) & (row_indices < stop) + else: + ind = (row_indices <= start) & (row_indices > stop) + + if abs(stride) > 1: + ind &= (row_indices - start) % stride == 0 + + row_indices = (row_indices[ind] - start) // stride + row_data = row_data[ind] + row_indptr = np.array([0, len(row_indices)]) + + if stride < 0: + row_data = row_data[::-1] + row_indices = abs(row_indices[::-1]) + + shape = (1, int(np.ceil(float(stop - start) / stride))) + return csr_matrix((row_data, row_indices, row_indptr), shape=shape, + dtype=self.dtype, copy=False) + + def _get_submatrix(self, row_slice, col_slice): + """Return a submatrix of this matrix (new matrix is created).""" + + def process_slice(sl, num): + if isinstance(sl, slice): + i0, i1, stride = sl.indices(num) + if stride != 1: + raise ValueError('slicing with step != 1 not supported') + elif isintlike(sl): + if sl < 0: + sl += num + i0, i1 = sl, sl + 1 + else: + raise TypeError('expected slice or scalar') + + if not (0 <= i0 <= num) or not (0 <= i1 <= num) or not (i0 <= i1): + raise IndexError( + "index out of bounds: 0 <= %d <= %d, 0 <= %d <= %d," + " %d <= %d" % (i0, num, i1, num, i0, i1)) + return i0, i1 + + M,N = self.shape + i0, i1 = process_slice(row_slice, M) + j0, j1 = process_slice(col_slice, N) + + indptr, indices, data = get_csr_submatrix( + M, N, self.indptr, self.indices, self.data, i0, i1, j0, j1) + + shape = (i1 - i0, j1 - j0) + return self.__class__((data, indices, indptr), shape=shape, + dtype=self.dtype, copy=False) + + +def isspmatrix_csr(x): + """Is x of csr_matrix type? + + Parameters + ---------- + x + object to check for being a csr matrix + + Returns + ------- + bool + True if x is a csr matrix, False otherwise + + Examples + -------- + >>> from scipy.sparse import csr_matrix, isspmatrix_csr + >>> isspmatrix_csr(csr_matrix([[5]])) + True + + >>> from scipy.sparse import csc_matrix, csr_matrix, isspmatrix_csc + >>> isspmatrix_csr(csc_matrix([[5]])) + False + """ + return isinstance(x, csr_matrix) diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/csr.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/csr.pyc new file mode 100644 index 0000000..593d076 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/csr.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/data.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/data.py new file mode 100644 index 0000000..f9df142 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/data.py @@ -0,0 +1,396 @@ +"""Base class for sparse matrice with a .data attribute + + subclasses must provide a _with_data() method that + creates a new matrix with the same sparsity pattern + as self but with a different data array + +""" + +from __future__ import division, print_function, absolute_import + +import numpy as np + +from .base import spmatrix, _ufuncs_with_fixed_point_at_zero +from .sputils import isscalarlike, validateaxis + +__all__ = [] + + +# TODO implement all relevant operations +# use .data.__methods__() instead of /=, *=, etc. +class _data_matrix(spmatrix): + def __init__(self): + spmatrix.__init__(self) + + def _get_dtype(self): + return self.data.dtype + + def _set_dtype(self, newtype): + self.data.dtype = newtype + dtype = property(fget=_get_dtype, fset=_set_dtype) + + def _deduped_data(self): + if hasattr(self, 'sum_duplicates'): + self.sum_duplicates() + return self.data + + def __abs__(self): + return self._with_data(abs(self._deduped_data())) + + def _real(self): + return self._with_data(self.data.real) + + def _imag(self): + return self._with_data(self.data.imag) + + def __neg__(self): + if self.dtype.kind == 'b': + raise NotImplementedError('negating a sparse boolean ' + 'matrix is not supported') + return self._with_data(-self.data) + + def __imul__(self, other): # self *= other + if isscalarlike(other): + self.data *= other + return self + else: + return NotImplemented + + def __itruediv__(self, other): # self /= other + if isscalarlike(other): + recip = 1.0 / other + self.data *= recip + return self + else: + return NotImplemented + + def astype(self, dtype, casting='unsafe', copy=True): + dtype = np.dtype(dtype) + if self.dtype != dtype: + return self._with_data( + self._deduped_data().astype(dtype, casting=casting, copy=copy), + copy=copy) + elif copy: + return self.copy() + else: + return self + + astype.__doc__ = spmatrix.astype.__doc__ + + def conj(self, copy=True): + if np.issubdtype(self.dtype, np.complexfloating): + return self._with_data(self.data.conj(), copy=copy) + elif copy: + return self.copy() + else: + return self + + conj.__doc__ = spmatrix.conj.__doc__ + + def copy(self): + return self._with_data(self.data.copy(), copy=True) + + copy.__doc__ = spmatrix.copy.__doc__ + + def count_nonzero(self): + return np.count_nonzero(self._deduped_data()) + + count_nonzero.__doc__ = spmatrix.count_nonzero.__doc__ + + def power(self, n, dtype=None): + """ + This function performs element-wise power. + + Parameters + ---------- + n : n is a scalar + + dtype : If dtype is not specified, the current dtype will be preserved. + """ + if not isscalarlike(n): + raise NotImplementedError("input is not scalar") + + data = self._deduped_data() + if dtype is not None: + data = data.astype(dtype) + return self._with_data(data ** n) + + ########################### + # Multiplication handlers # + ########################### + + def _mul_scalar(self, other): + return self._with_data(self.data * other) + + +# Add the numpy unary ufuncs for which func(0) = 0 to _data_matrix. +for npfunc in _ufuncs_with_fixed_point_at_zero: + name = npfunc.__name__ + + def _create_method(op): + def method(self): + result = op(self._deduped_data()) + return self._with_data(result, copy=True) + + method.__doc__ = ("Element-wise %s.\n\n" + "See numpy.%s for more information." % (name, name)) + method.__name__ = name + + return method + + setattr(_data_matrix, name, _create_method(npfunc)) + + +def _find_missing_index(ind, n): + for k, a in enumerate(ind): + if k != a: + return k + + k += 1 + if k < n: + return k + else: + return -1 + + +class _minmax_mixin(object): + """Mixin for min and max methods. + + These are not implemented for dia_matrix, hence the separate class. + """ + + def _min_or_max_axis(self, axis, min_or_max): + N = self.shape[axis] + if N == 0: + raise ValueError("zero-size array to reduction operation") + M = self.shape[1 - axis] + + mat = self.tocsc() if axis == 0 else self.tocsr() + mat.sum_duplicates() + + major_index, value = mat._minor_reduce(min_or_max) + not_full = np.diff(mat.indptr)[major_index] < N + value[not_full] = min_or_max(value[not_full], 0) + + mask = value != 0 + major_index = np.compress(mask, major_index) + value = np.compress(mask, value) + + from . import coo_matrix + if axis == 0: + return coo_matrix((value, (np.zeros(len(value)), major_index)), + dtype=self.dtype, shape=(1, M)) + else: + return coo_matrix((value, (major_index, np.zeros(len(value)))), + dtype=self.dtype, shape=(M, 1)) + + def _min_or_max(self, axis, out, min_or_max): + if out is not None: + raise ValueError(("Sparse matrices do not support " + "an 'out' parameter.")) + + validateaxis(axis) + + if axis is None: + if 0 in self.shape: + raise ValueError("zero-size array to reduction operation") + + zero = self.dtype.type(0) + if self.nnz == 0: + return zero + m = min_or_max.reduce(self._deduped_data().ravel()) + if self.nnz != np.product(self.shape): + m = min_or_max(zero, m) + return m + + if axis < 0: + axis += 2 + + if (axis == 0) or (axis == 1): + return self._min_or_max_axis(axis, min_or_max) + else: + raise ValueError("axis out of range") + + def _arg_min_or_max_axis(self, axis, op, compare): + if self.shape[axis] == 0: + raise ValueError("Can't apply the operation along a zero-sized " + "dimension.") + + if axis < 0: + axis += 2 + + zero = self.dtype.type(0) + + mat = self.tocsc() if axis == 0 else self.tocsr() + mat.sum_duplicates() + + ret_size, line_size = mat._swap(mat.shape) + ret = np.zeros(ret_size, dtype=int) + + nz_lines, = np.nonzero(np.diff(mat.indptr)) + for i in nz_lines: + p, q = mat.indptr[i:i + 2] + data = mat.data[p:q] + indices = mat.indices[p:q] + am = op(data) + m = data[am] + if compare(m, zero) or q - p == line_size: + ret[i] = indices[am] + else: + zero_ind = _find_missing_index(indices, line_size) + if m == zero: + ret[i] = min(am, zero_ind) + else: + ret[i] = zero_ind + + if axis == 1: + ret = ret.reshape(-1, 1) + + return np.asmatrix(ret) + + def _arg_min_or_max(self, axis, out, op, compare): + if out is not None: + raise ValueError("Sparse matrices do not support " + "an 'out' parameter.") + + validateaxis(axis) + + if axis is None: + if 0 in self.shape: + raise ValueError("Can't apply the operation to " + "an empty matrix.") + + if self.nnz == 0: + return 0 + else: + zero = self.dtype.type(0) + mat = self.tocoo() + mat.sum_duplicates() + am = op(mat.data) + m = mat.data[am] + + if compare(m, zero): + return mat.row[am] * mat.shape[1] + mat.col[am] + else: + size = np.product(mat.shape) + if size == mat.nnz: + return am + else: + ind = mat.row * mat.shape[1] + mat.col + zero_ind = _find_missing_index(ind, size) + if m == zero: + return min(zero_ind, am) + else: + return zero_ind + + return self._arg_min_or_max_axis(axis, op, compare) + + def max(self, axis=None, out=None): + """ + Return the maximum of the matrix or maximum along an axis. + This takes all elements into account, not just the non-zero ones. + + Parameters + ---------- + axis : {-2, -1, 0, 1, None} optional + Axis along which the sum is computed. The default is to + compute the maximum over all the matrix elements, returning + a scalar (i.e. `axis` = `None`). + + out : None, optional + This argument is in the signature *solely* for NumPy + compatibility reasons. Do not pass in anything except + for the default value, as this argument is not used. + + Returns + ------- + amax : coo_matrix or scalar + Maximum of `a`. If `axis` is None, the result is a scalar value. + If `axis` is given, the result is a sparse.coo_matrix of dimension + ``a.ndim - 1``. + + See Also + -------- + min : The minimum value of a sparse matrix along a given axis. + np.matrix.max : NumPy's implementation of 'max' for matrices + + """ + return self._min_or_max(axis, out, np.maximum) + + def min(self, axis=None, out=None): + """ + Return the minimum of the matrix or maximum along an axis. + This takes all elements into account, not just the non-zero ones. + + Parameters + ---------- + axis : {-2, -1, 0, 1, None} optional + Axis along which the sum is computed. The default is to + compute the minimum over all the matrix elements, returning + a scalar (i.e. `axis` = `None`). + + out : None, optional + This argument is in the signature *solely* for NumPy + compatibility reasons. Do not pass in anything except for + the default value, as this argument is not used. + + Returns + ------- + amin : coo_matrix or scalar + Minimum of `a`. If `axis` is None, the result is a scalar value. + If `axis` is given, the result is a sparse.coo_matrix of dimension + ``a.ndim - 1``. + + See Also + -------- + max : The maximum value of a sparse matrix along a given axis. + np.matrix.min : NumPy's implementation of 'min' for matrices + + """ + return self._min_or_max(axis, out, np.minimum) + + def argmax(self, axis=None, out=None): + """Return indices of maximum elements along an axis. + + Implicit zero elements are also taken into account. If there are + several maximum values, the index of the first occurrence is returned. + + Parameters + ---------- + axis : {-2, -1, 0, 1, None}, optional + Axis along which the argmax is computed. If None (default), index + of the maximum element in the flatten data is returned. + out : None, optional + This argument is in the signature *solely* for NumPy + compatibility reasons. Do not pass in anything except for + the default value, as this argument is not used. + + Returns + ------- + ind : np.matrix or int + Indices of maximum elements. If matrix, its size along `axis` is 1. + """ + return self._arg_min_or_max(axis, out, np.argmax, np.greater) + + def argmin(self, axis=None, out=None): + """Return indices of minimum elements along an axis. + + Implicit zero elements are also taken into account. If there are + several minimum values, the index of the first occurrence is returned. + + Parameters + ---------- + axis : {-2, -1, 0, 1, None}, optional + Axis along which the argmin is computed. If None (default), index + of the minimum element in the flatten data is returned. + out : None, optional + This argument is in the signature *solely* for NumPy + compatibility reasons. Do not pass in anything except for + the default value, as this argument is not used. + + Returns + ------- + ind : np.matrix or int + Indices of minimum elements. If matrix, its size along `axis` is 1. + """ + return self._arg_min_or_max(axis, out, np.argmin, np.less) diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/data.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/data.pyc new file mode 100644 index 0000000..52c684e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/data.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/dia.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/dia.py new file mode 100644 index 0000000..4d76109 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/dia.py @@ -0,0 +1,420 @@ +"""Sparse DIAgonal format""" + +from __future__ import division, print_function, absolute_import + +__docformat__ = "restructuredtext en" + +__all__ = ['dia_matrix', 'isspmatrix_dia'] + +import numpy as np + +from .base import isspmatrix, _formats, spmatrix +from .data import _data_matrix +from .sputils import (isshape, upcast_char, getdtype, get_index_dtype, + get_sum_dtype, validateaxis, check_shape) +from ._sparsetools import dia_matvec + + +class dia_matrix(_data_matrix): + """Sparse matrix with DIAgonal storage + + This can be instantiated in several ways: + dia_matrix(D) + with a dense matrix + + dia_matrix(S) + with another sparse matrix S (equivalent to S.todia()) + + dia_matrix((M, N), [dtype]) + to construct an empty matrix with shape (M, N), + dtype is optional, defaulting to dtype='d'. + + dia_matrix((data, offsets), shape=(M, N)) + where the ``data[k,:]`` stores the diagonal entries for + diagonal ``offsets[k]`` (See example below) + + Attributes + ---------- + dtype : dtype + Data type of the matrix + shape : 2-tuple + Shape of the matrix + ndim : int + Number of dimensions (this is always 2) + nnz + Number of nonzero elements + data + DIA format data array of the matrix + offsets + DIA format offset array of the matrix + + Notes + ----- + + Sparse matrices can be used in arithmetic operations: they support + addition, subtraction, multiplication, division, and matrix power. + + Examples + -------- + + >>> import numpy as np + >>> from scipy.sparse import dia_matrix + >>> dia_matrix((3, 4), dtype=np.int8).toarray() + array([[0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]], dtype=int8) + + >>> data = np.array([[1, 2, 3, 4]]).repeat(3, axis=0) + >>> offsets = np.array([0, -1, 2]) + >>> dia_matrix((data, offsets), shape=(4, 4)).toarray() + array([[1, 0, 3, 0], + [1, 2, 0, 4], + [0, 2, 3, 0], + [0, 0, 3, 4]]) + + """ + format = 'dia' + + def __init__(self, arg1, shape=None, dtype=None, copy=False): + _data_matrix.__init__(self) + + if isspmatrix_dia(arg1): + if copy: + arg1 = arg1.copy() + self.data = arg1.data + self.offsets = arg1.offsets + self._shape = check_shape(arg1.shape) + elif isspmatrix(arg1): + if isspmatrix_dia(arg1) and copy: + A = arg1.copy() + else: + A = arg1.todia() + self.data = A.data + self.offsets = A.offsets + self._shape = check_shape(A.shape) + elif isinstance(arg1, tuple): + if isshape(arg1): + # It's a tuple of matrix dimensions (M, N) + # create empty matrix + self._shape = check_shape(arg1) + self.data = np.zeros((0,0), getdtype(dtype, default=float)) + idx_dtype = get_index_dtype(maxval=max(self.shape)) + self.offsets = np.zeros((0), dtype=idx_dtype) + else: + try: + # Try interpreting it as (data, offsets) + data, offsets = arg1 + except Exception: + raise ValueError('unrecognized form for dia_matrix constructor') + else: + if shape is None: + raise ValueError('expected a shape argument') + self.data = np.atleast_2d(np.array(arg1[0], dtype=dtype, copy=copy)) + self.offsets = np.atleast_1d(np.array(arg1[1], + dtype=get_index_dtype(maxval=max(shape)), + copy=copy)) + self._shape = check_shape(shape) + else: + #must be dense, convert to COO first, then to DIA + try: + arg1 = np.asarray(arg1) + except Exception: + raise ValueError("unrecognized form for" + " %s_matrix constructor" % self.format) + from .coo import coo_matrix + A = coo_matrix(arg1, dtype=dtype, shape=shape).todia() + self.data = A.data + self.offsets = A.offsets + self._shape = check_shape(A.shape) + + if dtype is not None: + self.data = self.data.astype(dtype) + + #check format + if self.offsets.ndim != 1: + raise ValueError('offsets array must have rank 1') + + if self.data.ndim != 2: + raise ValueError('data array must have rank 2') + + if self.data.shape[0] != len(self.offsets): + raise ValueError('number of diagonals (%d) ' + 'does not match the number of offsets (%d)' + % (self.data.shape[0], len(self.offsets))) + + if len(np.unique(self.offsets)) != len(self.offsets): + raise ValueError('offset array contains duplicate values') + + def __repr__(self): + format = _formats[self.getformat()][1] + return "<%dx%d sparse matrix of type '%s'\n" \ + "\twith %d stored elements (%d diagonals) in %s format>" % \ + (self.shape + (self.dtype.type, self.nnz, self.data.shape[0], + format)) + + def _data_mask(self): + """Returns a mask of the same shape as self.data, where + mask[i,j] is True when data[i,j] corresponds to a stored element.""" + num_rows, num_cols = self.shape + offset_inds = np.arange(self.data.shape[1]) + row = offset_inds - self.offsets[:,None] + mask = (row >= 0) + mask &= (row < num_rows) + mask &= (offset_inds < num_cols) + return mask + + def count_nonzero(self): + mask = self._data_mask() + return np.count_nonzero(self.data[mask]) + + def getnnz(self, axis=None): + if axis is not None: + raise NotImplementedError("getnnz over an axis is not implemented " + "for DIA format") + M,N = self.shape + nnz = 0 + for k in self.offsets: + if k > 0: + nnz += min(M,N-k) + else: + nnz += min(M+k,N) + return int(nnz) + + getnnz.__doc__ = spmatrix.getnnz.__doc__ + count_nonzero.__doc__ = spmatrix.count_nonzero.__doc__ + + def sum(self, axis=None, dtype=None, out=None): + validateaxis(axis) + + if axis is not None and axis < 0: + axis += 2 + + res_dtype = get_sum_dtype(self.dtype) + num_rows, num_cols = self.shape + ret = None + + if axis == 0: + mask = self._data_mask() + x = (self.data * mask).sum(axis=0) + if x.shape[0] == num_cols: + res = x + else: + res = np.zeros(num_cols, dtype=x.dtype) + res[:x.shape[0]] = x + ret = np.matrix(res, dtype=res_dtype) + + else: + row_sums = np.zeros(num_rows, dtype=res_dtype) + one = np.ones(num_cols, dtype=res_dtype) + dia_matvec(num_rows, num_cols, len(self.offsets), + self.data.shape[1], self.offsets, self.data, one, row_sums) + + row_sums = np.matrix(row_sums) + + if axis is None: + return row_sums.sum(dtype=dtype, out=out) + + if axis is not None: + row_sums = row_sums.T + + ret = np.matrix(row_sums.sum(axis=axis)) + + if out is not None and out.shape != ret.shape: + raise ValueError("dimensions do not match") + + return ret.sum(axis=(), dtype=dtype, out=out) + + sum.__doc__ = spmatrix.sum.__doc__ + + def _mul_vector(self, other): + x = other + + y = np.zeros(self.shape[0], dtype=upcast_char(self.dtype.char, + x.dtype.char)) + + L = self.data.shape[1] + + M,N = self.shape + + dia_matvec(M,N, len(self.offsets), L, self.offsets, self.data, x.ravel(), y.ravel()) + + return y + + def _mul_multimatrix(self, other): + return np.hstack([self._mul_vector(col).reshape(-1,1) for col in other.T]) + + def _setdiag(self, values, k=0): + M, N = self.shape + + if values.ndim == 0: + # broadcast + values_n = np.inf + else: + values_n = len(values) + + if k < 0: + n = min(M + k, N, values_n) + min_index = 0 + max_index = n + else: + n = min(M, N - k, values_n) + min_index = k + max_index = k + n + + if values.ndim != 0: + # allow also longer sequences + values = values[:n] + + if k in self.offsets: + self.data[self.offsets == k, min_index:max_index] = values + else: + self.offsets = np.append(self.offsets, self.offsets.dtype.type(k)) + m = max(max_index, self.data.shape[1]) + data = np.zeros((self.data.shape[0]+1, m), dtype=self.data.dtype) + data[:-1,:self.data.shape[1]] = self.data + data[-1, min_index:max_index] = values + self.data = data + + def todia(self, copy=False): + if copy: + return self.copy() + else: + return self + + todia.__doc__ = spmatrix.todia.__doc__ + + def transpose(self, axes=None, copy=False): + if axes is not None: + raise ValueError(("Sparse matrices do not support " + "an 'axes' parameter because swapping " + "dimensions is the only logical permutation.")) + + num_rows, num_cols = self.shape + max_dim = max(self.shape) + + # flip diagonal offsets + offsets = -self.offsets + + # re-align the data matrix + r = np.arange(len(offsets), dtype=np.intc)[:, None] + c = np.arange(num_rows, dtype=np.intc) - (offsets % max_dim)[:, None] + pad_amount = max(0, max_dim-self.data.shape[1]) + data = np.hstack((self.data, np.zeros((self.data.shape[0], pad_amount), + dtype=self.data.dtype))) + data = data[r, c] + return dia_matrix((data, offsets), shape=( + num_cols, num_rows), copy=copy) + + transpose.__doc__ = spmatrix.transpose.__doc__ + + def diagonal(self, k=0): + rows, cols = self.shape + if k <= -rows or k >= cols: + raise ValueError("k exceeds matrix dimensions") + idx, = np.nonzero(self.offsets == k) + first_col, last_col = max(0, k), min(rows + k, cols) + if idx.size == 0: + return np.zeros(last_col - first_col, dtype=self.data.dtype) + return self.data[idx[0], first_col:last_col] + + diagonal.__doc__ = spmatrix.diagonal.__doc__ + + def tocsc(self, copy=False): + from .csc import csc_matrix + if self.nnz == 0: + return csc_matrix(self.shape, dtype=self.dtype) + + num_rows, num_cols = self.shape + num_offsets, offset_len = self.data.shape + offset_inds = np.arange(offset_len) + + row = offset_inds - self.offsets[:,None] + mask = (row >= 0) + mask &= (row < num_rows) + mask &= (offset_inds < num_cols) + mask &= (self.data != 0) + + idx_dtype = get_index_dtype(maxval=max(self.shape)) + indptr = np.zeros(num_cols + 1, dtype=idx_dtype) + indptr[1:offset_len+1] = np.cumsum(mask.sum(axis=0)) + indptr[offset_len+1:] = indptr[offset_len] + indices = row.T[mask.T].astype(idx_dtype, copy=False) + data = self.data.T[mask.T] + return csc_matrix((data, indices, indptr), shape=self.shape, + dtype=self.dtype) + + tocsc.__doc__ = spmatrix.tocsc.__doc__ + + def tocoo(self, copy=False): + num_rows, num_cols = self.shape + num_offsets, offset_len = self.data.shape + offset_inds = np.arange(offset_len) + + row = offset_inds - self.offsets[:,None] + mask = (row >= 0) + mask &= (row < num_rows) + mask &= (offset_inds < num_cols) + mask &= (self.data != 0) + row = row[mask] + col = np.tile(offset_inds, num_offsets)[mask.ravel()] + data = self.data[mask] + + from .coo import coo_matrix + A = coo_matrix((data,(row,col)), shape=self.shape, dtype=self.dtype) + A.has_canonical_format = True + return A + + tocoo.__doc__ = spmatrix.tocoo.__doc__ + + # needed by _data_matrix + def _with_data(self, data, copy=True): + """Returns a matrix with the same sparsity structure as self, + but with different data. By default the structure arrays are copied. + """ + if copy: + return dia_matrix((data, self.offsets.copy()), shape=self.shape) + else: + return dia_matrix((data,self.offsets), shape=self.shape) + + def resize(self, *shape): + shape = check_shape(shape) + M, N = shape + # we do not need to handle the case of expanding N + self.data = self.data[:, :N] + + if (M > self.shape[0] and + np.any(self.offsets + self.shape[0] < self.data.shape[1])): + # explicitly clear values that were previously hidden + mask = (self.offsets[:, None] + self.shape[0] <= + np.arange(self.data.shape[1])) + self.data[mask] = 0 + + self._shape = shape + + resize.__doc__ = spmatrix.resize.__doc__ + + +def isspmatrix_dia(x): + """Is x of dia_matrix type? + + Parameters + ---------- + x + object to check for being a dia matrix + + Returns + ------- + bool + True if x is a dia matrix, False otherwise + + Examples + -------- + >>> from scipy.sparse import dia_matrix, isspmatrix_dia + >>> isspmatrix_dia(dia_matrix([[5]])) + True + + >>> from scipy.sparse import dia_matrix, csr_matrix, isspmatrix_dia + >>> isspmatrix_dia(csr_matrix([[5]])) + False + """ + return isinstance(x, dia_matrix) diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/dia.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/dia.pyc new file mode 100644 index 0000000..8cfb917 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/dia.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/dok.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/dok.py new file mode 100644 index 0000000..9dfcd29 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/dok.py @@ -0,0 +1,538 @@ +"""Dictionary Of Keys based matrix""" + +from __future__ import division, print_function, absolute_import + +__docformat__ = "restructuredtext en" + +__all__ = ['dok_matrix', 'isspmatrix_dok'] + +import functools +import operator +import itertools + +import numpy as np + +from scipy._lib.six import zip as izip, xrange, iteritems, iterkeys, itervalues + +from .base import spmatrix, isspmatrix +from .sputils import (isdense, getdtype, isshape, isintlike, isscalarlike, + upcast, upcast_scalar, IndexMixin, get_index_dtype, + check_shape) + +try: + from operator import isSequenceType as _is_sequence +except ImportError: + def _is_sequence(x): + return (hasattr(x, '__len__') or hasattr(x, '__next__') + or hasattr(x, 'next')) + + +class dok_matrix(spmatrix, IndexMixin, dict): + """ + Dictionary Of Keys based sparse matrix. + + This is an efficient structure for constructing sparse + matrices incrementally. + + This can be instantiated in several ways: + dok_matrix(D) + with a dense matrix, D + + dok_matrix(S) + with a sparse matrix, S + + dok_matrix((M,N), [dtype]) + create the matrix with initial shape (M,N) + dtype is optional, defaulting to dtype='d' + + Attributes + ---------- + dtype : dtype + Data type of the matrix + shape : 2-tuple + Shape of the matrix + ndim : int + Number of dimensions (this is always 2) + nnz + Number of nonzero elements + + Notes + ----- + + Sparse matrices can be used in arithmetic operations: they support + addition, subtraction, multiplication, division, and matrix power. + + Allows for efficient O(1) access of individual elements. + Duplicates are not allowed. + Can be efficiently converted to a coo_matrix once constructed. + + Examples + -------- + >>> import numpy as np + >>> from scipy.sparse import dok_matrix + >>> S = dok_matrix((5, 5), dtype=np.float32) + >>> for i in range(5): + ... for j in range(5): + ... S[i, j] = i + j # Update element + + """ + format = 'dok' + + def __init__(self, arg1, shape=None, dtype=None, copy=False): + dict.__init__(self) + spmatrix.__init__(self) + + self.dtype = getdtype(dtype, default=float) + if isinstance(arg1, tuple) and isshape(arg1): # (M,N) + M, N = arg1 + self._shape = check_shape((M, N)) + elif isspmatrix(arg1): # Sparse ctor + if isspmatrix_dok(arg1) and copy: + arg1 = arg1.copy() + else: + arg1 = arg1.todok() + + if dtype is not None: + arg1 = arg1.astype(dtype) + + dict.update(self, arg1) + self._shape = check_shape(arg1.shape) + self.dtype = arg1.dtype + else: # Dense ctor + try: + arg1 = np.asarray(arg1) + except Exception: + raise TypeError('Invalid input format.') + + if len(arg1.shape) != 2: + raise TypeError('Expected rank <=2 dense array or matrix.') + + from .coo import coo_matrix + d = coo_matrix(arg1, dtype=dtype).todok() + dict.update(self, d) + self._shape = check_shape(arg1.shape) + self.dtype = d.dtype + + def update(self, val): + # Prevent direct usage of update + raise NotImplementedError("Direct modification to dok_matrix element " + "is not allowed.") + + def _update(self, data): + """An update method for dict data defined for direct access to + `dok_matrix` data. Main purpose is to be used for effcient conversion + from other spmatrix classes. Has no checking if `data` is valid.""" + return dict.update(self, data) + + def set_shape(self, shape): + new_matrix = self.reshape(shape, copy=False).asformat(self.format) + self.__dict__ = new_matrix.__dict__ + dict.clear(self) + dict.update(self, new_matrix) + + shape = property(fget=spmatrix.get_shape, fset=set_shape) + + def getnnz(self, axis=None): + if axis is not None: + raise NotImplementedError("getnnz over an axis is not implemented " + "for DOK format.") + return dict.__len__(self) + + def count_nonzero(self): + return sum(x != 0 for x in itervalues(self)) + + getnnz.__doc__ = spmatrix.getnnz.__doc__ + count_nonzero.__doc__ = spmatrix.count_nonzero.__doc__ + + def __len__(self): + return dict.__len__(self) + + def get(self, key, default=0.): + """This overrides the dict.get method, providing type checking + but otherwise equivalent functionality. + """ + try: + i, j = key + assert isintlike(i) and isintlike(j) + except (AssertionError, TypeError, ValueError): + raise IndexError('Index must be a pair of integers.') + if (i < 0 or i >= self.shape[0] or j < 0 or j >= self.shape[1]): + raise IndexError('Index out of bounds.') + return dict.get(self, key, default) + + def __getitem__(self, index): + """If key=(i, j) is a pair of integers, return the corresponding + element. If either i or j is a slice or sequence, return a new sparse + matrix with just these elements. + """ + zero = self.dtype.type(0) + i, j = self._unpack_index(index) + + i_intlike = isintlike(i) + j_intlike = isintlike(j) + + if i_intlike and j_intlike: + i = int(i) + j = int(j) + if i < 0: + i += self.shape[0] + if i < 0 or i >= self.shape[0]: + raise IndexError('Index out of bounds.') + if j < 0: + j += self.shape[1] + if j < 0 or j >= self.shape[1]: + raise IndexError('Index out of bounds.') + return dict.get(self, (i,j), zero) + elif ((i_intlike or isinstance(i, slice)) and + (j_intlike or isinstance(j, slice))): + # Fast path for slicing very sparse matrices + i_slice = slice(i, i+1) if i_intlike else i + j_slice = slice(j, j+1) if j_intlike else j + i_indices = i_slice.indices(self.shape[0]) + j_indices = j_slice.indices(self.shape[1]) + i_seq = xrange(*i_indices) + j_seq = xrange(*j_indices) + newshape = (len(i_seq), len(j_seq)) + newsize = _prod(newshape) + + if len(self) < 2*newsize and newsize != 0: + # Switch to the fast path only when advantageous + # (count the iterations in the loops, adjust for complexity) + # + # We also don't handle newsize == 0 here (if + # i/j_intlike, it can mean index i or j was out of + # bounds) + return self._getitem_ranges(i_indices, j_indices, newshape) + + i, j = self._index_to_arrays(i, j) + + if i.size == 0: + return dok_matrix(i.shape, dtype=self.dtype) + + min_i = i.min() + if min_i < -self.shape[0] or i.max() >= self.shape[0]: + raise IndexError('Index (%d) out of range -%d to %d.' % + (i.min(), self.shape[0], self.shape[0]-1)) + if min_i < 0: + i = i.copy() + i[i < 0] += self.shape[0] + + min_j = j.min() + if min_j < -self.shape[1] or j.max() >= self.shape[1]: + raise IndexError('Index (%d) out of range -%d to %d.' % + (j.min(), self.shape[1], self.shape[1]-1)) + if min_j < 0: + j = j.copy() + j[j < 0] += self.shape[1] + + newdok = dok_matrix(i.shape, dtype=self.dtype) + + for key in itertools.product(xrange(i.shape[0]), xrange(i.shape[1])): + v = dict.get(self, (i[key], j[key]), zero) + if v: + dict.__setitem__(newdok, key, v) + + return newdok + + def _getitem_ranges(self, i_indices, j_indices, shape): + # performance golf: we don't want Numpy scalars here, they are slow + i_start, i_stop, i_stride = map(int, i_indices) + j_start, j_stop, j_stride = map(int, j_indices) + + newdok = dok_matrix(shape, dtype=self.dtype) + + for (ii, jj) in iterkeys(self): + # ditto for numpy scalars + ii = int(ii) + jj = int(jj) + a, ra = divmod(ii - i_start, i_stride) + if a < 0 or a >= shape[0] or ra != 0: + continue + b, rb = divmod(jj - j_start, j_stride) + if b < 0 or b >= shape[1] or rb != 0: + continue + dict.__setitem__(newdok, (a, b), + dict.__getitem__(self, (ii, jj))) + return newdok + + def __setitem__(self, index, x): + if isinstance(index, tuple) and len(index) == 2: + # Integer index fast path + i, j = index + if (isintlike(i) and isintlike(j) and 0 <= i < self.shape[0] + and 0 <= j < self.shape[1]): + v = np.asarray(x, dtype=self.dtype) + if v.ndim == 0 and v != 0: + dict.__setitem__(self, (int(i), int(j)), v[()]) + return + + i, j = self._unpack_index(index) + i, j = self._index_to_arrays(i, j) + + if isspmatrix(x): + x = x.toarray() + + # Make x and i into the same shape + x = np.asarray(x, dtype=self.dtype) + x, _ = np.broadcast_arrays(x, i) + + if x.shape != i.shape: + raise ValueError("Shape mismatch in assignment.") + + if np.size(x) == 0: + return + + min_i = i.min() + if min_i < -self.shape[0] or i.max() >= self.shape[0]: + raise IndexError('Index (%d) out of range -%d to %d.' % + (i.min(), self.shape[0], self.shape[0]-1)) + if min_i < 0: + i = i.copy() + i[i < 0] += self.shape[0] + + min_j = j.min() + if min_j < -self.shape[1] or j.max() >= self.shape[1]: + raise IndexError('Index (%d) out of range -%d to %d.' % + (j.min(), self.shape[1], self.shape[1]-1)) + if min_j < 0: + j = j.copy() + j[j < 0] += self.shape[1] + + dict.update(self, izip(izip(i.flat, j.flat), x.flat)) + + if 0 in x: + zeroes = x == 0 + for key in izip(i[zeroes].flat, j[zeroes].flat): + if dict.__getitem__(self, key) == 0: + # may have been superseded by later update + del self[key] + + def __add__(self, other): + if isscalarlike(other): + res_dtype = upcast_scalar(self.dtype, other) + new = dok_matrix(self.shape, dtype=res_dtype) + # Add this scalar to every element. + M, N = self.shape + for key in itertools.product(xrange(M), xrange(N)): + aij = dict.get(self, (key), 0) + other + if aij: + new[key] = aij + # new.dtype.char = self.dtype.char + elif isspmatrix_dok(other): + if other.shape != self.shape: + raise ValueError("Matrix dimensions are not equal.") + # We could alternatively set the dimensions to the largest of + # the two matrices to be summed. Would this be a good idea? + res_dtype = upcast(self.dtype, other.dtype) + new = dok_matrix(self.shape, dtype=res_dtype) + dict.update(new, self) + with np.errstate(over='ignore'): + dict.update(new, + ((k, new[k] + other[k]) for k in iterkeys(other))) + elif isspmatrix(other): + csc = self.tocsc() + new = csc + other + elif isdense(other): + new = self.todense() + other + else: + return NotImplemented + return new + + def __radd__(self, other): + if isscalarlike(other): + new = dok_matrix(self.shape, dtype=self.dtype) + M, N = self.shape + for key in itertools.product(xrange(M), xrange(N)): + aij = dict.get(self, (key), 0) + other + if aij: + new[key] = aij + elif isspmatrix_dok(other): + if other.shape != self.shape: + raise ValueError("Matrix dimensions are not equal.") + new = dok_matrix(self.shape, dtype=self.dtype) + dict.update(new, self) + dict.update(new, + ((k, self[k] + other[k]) for k in iterkeys(other))) + elif isspmatrix(other): + csc = self.tocsc() + new = csc + other + elif isdense(other): + new = other + self.todense() + else: + return NotImplemented + return new + + def __neg__(self): + if self.dtype.kind == 'b': + raise NotImplementedError('Negating a sparse boolean matrix is not' + ' supported.') + new = dok_matrix(self.shape, dtype=self.dtype) + dict.update(new, ((k, -self[k]) for k in iterkeys(self))) + return new + + def _mul_scalar(self, other): + res_dtype = upcast_scalar(self.dtype, other) + # Multiply this scalar by every element. + new = dok_matrix(self.shape, dtype=res_dtype) + dict.update(new, ((k, v * other) for k, v in iteritems(self))) + return new + + def _mul_vector(self, other): + # matrix * vector + result = np.zeros(self.shape[0], dtype=upcast(self.dtype, other.dtype)) + for (i, j), v in iteritems(self): + result[i] += v * other[j] + return result + + def _mul_multivector(self, other): + # matrix * multivector + result_shape = (self.shape[0], other.shape[1]) + result_dtype = upcast(self.dtype, other.dtype) + result = np.zeros(result_shape, dtype=result_dtype) + for (i, j), v in iteritems(self): + result[i,:] += v * other[j,:] + return result + + def __imul__(self, other): + if isscalarlike(other): + dict.update(self, ((k, v * other) for k, v in iteritems(self))) + return self + return NotImplemented + + def __truediv__(self, other): + if isscalarlike(other): + res_dtype = upcast_scalar(self.dtype, other) + new = dok_matrix(self.shape, dtype=res_dtype) + dict.update(new, ((k, v / other) for k, v in iteritems(self))) + return new + return self.tocsr() / other + + def __itruediv__(self, other): + if isscalarlike(other): + dict.update(self, ((k, v / other) for k, v in iteritems(self))) + return self + return NotImplemented + + def __reduce__(self): + # this approach is necessary because __setstate__ is called after + # __setitem__ upon unpickling and since __init__ is not called there + # is no shape attribute hence it is not possible to unpickle it. + return dict.__reduce__(self) + + # What should len(sparse) return? For consistency with dense matrices, + # perhaps it should be the number of rows? For now it returns the number + # of non-zeros. + + def transpose(self, axes=None, copy=False): + if axes is not None: + raise ValueError("Sparse matrices do not support " + "an 'axes' parameter because swapping " + "dimensions is the only logical permutation.") + + M, N = self.shape + new = dok_matrix((N, M), dtype=self.dtype, copy=copy) + dict.update(new, (((right, left), val) + for (left, right), val in iteritems(self))) + return new + + transpose.__doc__ = spmatrix.transpose.__doc__ + + def conjtransp(self): + """Return the conjugate transpose.""" + M, N = self.shape + new = dok_matrix((N, M), dtype=self.dtype) + dict.update(new, (((right, left), np.conj(val)) + for (left, right), val in iteritems(self))) + return new + + def copy(self): + new = dok_matrix(self.shape, dtype=self.dtype) + dict.update(new, self) + return new + + copy.__doc__ = spmatrix.copy.__doc__ + + def getrow(self, i): + """Returns the i-th row as a (1 x n) DOK matrix.""" + new = dok_matrix((1, self.shape[1]), dtype=self.dtype) + dict.update(new, (((0, j), self[i, j]) for j in xrange(self.shape[1]))) + return new + + def getcol(self, j): + """Returns the j-th column as a (m x 1) DOK matrix.""" + new = dok_matrix((self.shape[0], 1), dtype=self.dtype) + dict.update(new, (((i, 0), self[i, j]) for i in xrange(self.shape[0]))) + return new + + def tocoo(self, copy=False): + from .coo import coo_matrix + if self.nnz == 0: + return coo_matrix(self.shape, dtype=self.dtype) + + idx_dtype = get_index_dtype(maxval=max(self.shape)) + data = np.fromiter(itervalues(self), dtype=self.dtype, count=self.nnz) + row = np.fromiter((i for i, _ in iterkeys(self)), dtype=idx_dtype, count=self.nnz) + col = np.fromiter((j for _, j in iterkeys(self)), dtype=idx_dtype, count=self.nnz) + A = coo_matrix((data, (row, col)), shape=self.shape, dtype=self.dtype) + A.has_canonical_format = True + return A + + tocoo.__doc__ = spmatrix.tocoo.__doc__ + + def todok(self, copy=False): + if copy: + return self.copy() + return self + + todok.__doc__ = spmatrix.todok.__doc__ + + def tocsc(self, copy=False): + return self.tocoo(copy=False).tocsc(copy=copy) + + tocsc.__doc__ = spmatrix.tocsc.__doc__ + + def resize(self, *shape): + shape = check_shape(shape) + newM, newN = shape + M, N = self.shape + if newM < M or newN < N: + # Remove all elements outside new dimensions + for (i, j) in list(iterkeys(self)): + if i >= newM or j >= newN: + del self[i, j] + self._shape = shape + + resize.__doc__ = spmatrix.resize.__doc__ + + +def isspmatrix_dok(x): + """Is x of dok_matrix type? + + Parameters + ---------- + x + object to check for being a dok matrix + + Returns + ------- + bool + True if x is a dok matrix, False otherwise + + Examples + -------- + >>> from scipy.sparse import dok_matrix, isspmatrix_dok + >>> isspmatrix_dok(dok_matrix([[5]])) + True + + >>> from scipy.sparse import dok_matrix, csr_matrix, isspmatrix_dok + >>> isspmatrix_dok(csr_matrix([[5]])) + False + """ + return isinstance(x, dok_matrix) + + +def _prod(x): + """Product of a list of numbers; ~40x faster vs np.prod for Python tuples""" + if len(x) == 0: + return 1 + return functools.reduce(operator.mul, x) diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/dok.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/dok.pyc new file mode 100644 index 0000000..6a76ac3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/dok.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/extract.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/extract.py new file mode 100644 index 0000000..9d4ccff --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/extract.py @@ -0,0 +1,171 @@ +"""Functions to extract parts of sparse matrices +""" + +from __future__ import division, print_function, absolute_import + +__docformat__ = "restructuredtext en" + +__all__ = ['find', 'tril', 'triu'] + + +from .coo import coo_matrix + + +def find(A): + """Return the indices and values of the nonzero elements of a matrix + + Parameters + ---------- + A : dense or sparse matrix + Matrix whose nonzero elements are desired. + + Returns + ------- + (I,J,V) : tuple of arrays + I,J, and V contain the row indices, column indices, and values + of the nonzero matrix entries. + + + Examples + -------- + >>> from scipy.sparse import csr_matrix, find + >>> A = csr_matrix([[7.0, 8.0, 0],[0, 0, 9.0]]) + >>> find(A) + (array([0, 0, 1], dtype=int32), array([0, 1, 2], dtype=int32), array([ 7., 8., 9.])) + + """ + + A = coo_matrix(A, copy=True) + A.sum_duplicates() + # remove explicit zeros + nz_mask = A.data != 0 + return A.row[nz_mask], A.col[nz_mask], A.data[nz_mask] + + +def tril(A, k=0, format=None): + """Return the lower triangular portion of a matrix in sparse format + + Returns the elements on or below the k-th diagonal of the matrix A. + - k = 0 corresponds to the main diagonal + - k > 0 is above the main diagonal + - k < 0 is below the main diagonal + + Parameters + ---------- + A : dense or sparse matrix + Matrix whose lower trianglar portion is desired. + k : integer : optional + The top-most diagonal of the lower triangle. + format : string + Sparse format of the result, e.g. format="csr", etc. + + Returns + ------- + L : sparse matrix + Lower triangular portion of A in sparse format. + + See Also + -------- + triu : upper triangle in sparse format + + Examples + -------- + >>> from scipy.sparse import csr_matrix, tril + >>> A = csr_matrix([[1, 2, 0, 0, 3], [4, 5, 0, 6, 7], [0, 0, 8, 9, 0]], + ... dtype='int32') + >>> A.toarray() + array([[1, 2, 0, 0, 3], + [4, 5, 0, 6, 7], + [0, 0, 8, 9, 0]]) + >>> tril(A).toarray() + array([[1, 0, 0, 0, 0], + [4, 5, 0, 0, 0], + [0, 0, 8, 0, 0]]) + >>> tril(A).nnz + 4 + >>> tril(A, k=1).toarray() + array([[1, 2, 0, 0, 0], + [4, 5, 0, 0, 0], + [0, 0, 8, 9, 0]]) + >>> tril(A, k=-1).toarray() + array([[0, 0, 0, 0, 0], + [4, 0, 0, 0, 0], + [0, 0, 0, 0, 0]]) + >>> tril(A, format='csc') + <3x5 sparse matrix of type '<class 'numpy.int32'>' + with 4 stored elements in Compressed Sparse Column format> + + """ + + # convert to COOrdinate format where things are easy + A = coo_matrix(A, copy=False) + mask = A.row + k >= A.col + return _masked_coo(A, mask).asformat(format) + + +def triu(A, k=0, format=None): + """Return the upper triangular portion of a matrix in sparse format + + Returns the elements on or above the k-th diagonal of the matrix A. + - k = 0 corresponds to the main diagonal + - k > 0 is above the main diagonal + - k < 0 is below the main diagonal + + Parameters + ---------- + A : dense or sparse matrix + Matrix whose upper trianglar portion is desired. + k : integer : optional + The bottom-most diagonal of the upper triangle. + format : string + Sparse format of the result, e.g. format="csr", etc. + + Returns + ------- + L : sparse matrix + Upper triangular portion of A in sparse format. + + See Also + -------- + tril : lower triangle in sparse format + + Examples + -------- + >>> from scipy.sparse import csr_matrix, triu + >>> A = csr_matrix([[1, 2, 0, 0, 3], [4, 5, 0, 6, 7], [0, 0, 8, 9, 0]], + ... dtype='int32') + >>> A.toarray() + array([[1, 2, 0, 0, 3], + [4, 5, 0, 6, 7], + [0, 0, 8, 9, 0]]) + >>> triu(A).toarray() + array([[1, 2, 0, 0, 3], + [0, 5, 0, 6, 7], + [0, 0, 8, 9, 0]]) + >>> triu(A).nnz + 8 + >>> triu(A, k=1).toarray() + array([[0, 2, 0, 0, 3], + [0, 0, 0, 6, 7], + [0, 0, 0, 9, 0]]) + >>> triu(A, k=-1).toarray() + array([[1, 2, 0, 0, 3], + [4, 5, 0, 6, 7], + [0, 0, 8, 9, 0]]) + >>> triu(A, format='csc') + <3x5 sparse matrix of type '<class 'numpy.int32'>' + with 8 stored elements in Compressed Sparse Column format> + + """ + + # convert to COOrdinate format where things are easy + A = coo_matrix(A, copy=False) + mask = A.row + k <= A.col + return _masked_coo(A, mask).asformat(format) + + +def _masked_coo(A, mask): + row = A.row[mask] + col = A.col[mask] + data = A.data[mask] + return coo_matrix((data, (row, col)), shape=A.shape, dtype=A.dtype) diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/extract.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/extract.pyc new file mode 100644 index 0000000..6fd6ba9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/extract.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/generate_sparsetools.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/generate_sparsetools.py new file mode 100644 index 0000000..73f9eba --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/generate_sparsetools.py @@ -0,0 +1,429 @@ +""" +python generate_sparsetools.py + +Generate manual wrappers for C++ sparsetools code. + +Type codes used: + + 'i': integer scalar + 'I': integer array + 'T': data array + 'B': boolean array + 'V': std::vector<integer>* + 'W': std::vector<data>* + '*': indicates that the next argument is an output argument + 'v': void + 'l': 64-bit integer scalar + +See sparsetools.cxx for more details. + +""" +import optparse +import os +from distutils.dep_util import newer + +# +# List of all routines and their argument types. +# +# The first code indicates the return value, the rest the arguments. +# + +# bsr.h +BSR_ROUTINES = """ +bsr_diagonal v iiiiiIIT*T +bsr_tocsr v iiiiIIT*I*I*T +bsr_scale_rows v iiiiII*TT +bsr_scale_columns v iiiiII*TT +bsr_sort_indices v iiii*I*I*T +bsr_transpose v iiiiIIT*I*I*T +bsr_matmat_pass2 v iiiiiIITIIT*I*I*T +bsr_matvec v iiiiIITT*T +bsr_matvecs v iiiiiIITT*T +bsr_elmul_bsr v iiiiIITIIT*I*I*T +bsr_eldiv_bsr v iiiiIITIIT*I*I*T +bsr_plus_bsr v iiiiIITIIT*I*I*T +bsr_minus_bsr v iiiiIITIIT*I*I*T +bsr_maximum_bsr v iiiiIITIIT*I*I*T +bsr_minimum_bsr v iiiiIITIIT*I*I*T +bsr_ne_bsr v iiiiIITIIT*I*I*B +bsr_lt_bsr v iiiiIITIIT*I*I*B +bsr_gt_bsr v iiiiIITIIT*I*I*B +bsr_le_bsr v iiiiIITIIT*I*I*B +bsr_ge_bsr v iiiiIITIIT*I*I*B +""" + +# csc.h +CSC_ROUTINES = """ +csc_diagonal v iiiIIT*T +csc_tocsr v iiIIT*I*I*T +csc_matmat_pass1 v iiIIII*I +csc_matmat_pass2 v iiIITIIT*I*I*T +csc_matvec v iiIITT*T +csc_matvecs v iiiIITT*T +csc_elmul_csc v iiIITIIT*I*I*T +csc_eldiv_csc v iiIITIIT*I*I*T +csc_plus_csc v iiIITIIT*I*I*T +csc_minus_csc v iiIITIIT*I*I*T +csc_maximum_csc v iiIITIIT*I*I*T +csc_minimum_csc v iiIITIIT*I*I*T +csc_ne_csc v iiIITIIT*I*I*B +csc_lt_csc v iiIITIIT*I*I*B +csc_gt_csc v iiIITIIT*I*I*B +csc_le_csc v iiIITIIT*I*I*B +csc_ge_csc v iiIITIIT*I*I*B +""" + +# csr.h +CSR_ROUTINES = """ +csr_matmat_pass1 v iiIIII*I +csr_matmat_pass2 v iiIITIIT*I*I*T +csr_diagonal v iiiIIT*T +csr_tocsc v iiIIT*I*I*T +csr_tobsr v iiiiIIT*I*I*T +csr_todense v iiIIT*T +csr_matvec v iiIITT*T +csr_matvecs v iiiIITT*T +csr_elmul_csr v iiIITIIT*I*I*T +csr_eldiv_csr v iiIITIIT*I*I*T +csr_plus_csr v iiIITIIT*I*I*T +csr_minus_csr v iiIITIIT*I*I*T +csr_maximum_csr v iiIITIIT*I*I*T +csr_minimum_csr v iiIITIIT*I*I*T +csr_ne_csr v iiIITIIT*I*I*B +csr_lt_csr v iiIITIIT*I*I*B +csr_gt_csr v iiIITIIT*I*I*B +csr_le_csr v iiIITIIT*I*I*B +csr_ge_csr v iiIITIIT*I*I*B +csr_scale_rows v iiII*TT +csr_scale_columns v iiII*TT +csr_sort_indices v iI*I*T +csr_eliminate_zeros v ii*I*I*T +csr_sum_duplicates v ii*I*I*T +get_csr_submatrix v iiIITiiii*V*V*W +csr_sample_values v iiIITiII*T +csr_count_blocks i iiiiII +csr_sample_offsets i iiIIiII*I +expandptr v iI*I +test_throw_error i +csr_has_sorted_indices i iII +csr_has_canonical_format i iII +""" + +# coo.h, dia.h, csgraph.h +OTHER_ROUTINES = """ +coo_tocsr v iiiIIT*I*I*T +coo_todense v iilIIT*Ti +coo_matvec v lIITT*T +dia_matvec v iiiiITT*T +cs_graph_components i iII*I +""" + +# List of compilation units +COMPILATION_UNITS = [ + ('bsr', BSR_ROUTINES), + ('csr', CSR_ROUTINES), + ('csc', CSC_ROUTINES), + ('other', OTHER_ROUTINES), +] + +# +# List of the supported index typenums and the corresponding C++ types +# +I_TYPES = [ + ('NPY_INT32', 'npy_int32'), + ('NPY_INT64', 'npy_int64'), +] + +# +# List of the supported data typenums and the corresponding C++ types +# +T_TYPES = [ + ('NPY_BOOL', 'npy_bool_wrapper'), + ('NPY_BYTE', 'npy_byte'), + ('NPY_UBYTE', 'npy_ubyte'), + ('NPY_SHORT', 'npy_short'), + ('NPY_USHORT', 'npy_ushort'), + ('NPY_INT', 'npy_int'), + ('NPY_UINT', 'npy_uint'), + ('NPY_LONG', 'npy_long'), + ('NPY_ULONG', 'npy_ulong'), + ('NPY_LONGLONG', 'npy_longlong'), + ('NPY_ULONGLONG', 'npy_ulonglong'), + ('NPY_FLOAT', 'npy_float'), + ('NPY_DOUBLE', 'npy_double'), + ('NPY_LONGDOUBLE', 'npy_longdouble'), + ('NPY_CFLOAT', 'npy_cfloat_wrapper'), + ('NPY_CDOUBLE', 'npy_cdouble_wrapper'), + ('NPY_CLONGDOUBLE', 'npy_clongdouble_wrapper'), +] + +# +# Code templates +# + +THUNK_TEMPLATE = """ +static PY_LONG_LONG %(name)s_thunk(int I_typenum, int T_typenum, void **a) +{ + %(thunk_content)s +} +""" + +METHOD_TEMPLATE = """ +NPY_VISIBILITY_HIDDEN PyObject * +%(name)s_method(PyObject *self, PyObject *args) +{ + return call_thunk('%(ret_spec)s', "%(arg_spec)s", %(name)s_thunk, args); +} +""" + +GET_THUNK_CASE_TEMPLATE = """ +static int get_thunk_case(int I_typenum, int T_typenum) +{ + %(content)s; + return -1; +} +""" + + +# +# Code generation +# + +def get_thunk_type_set(): + """ + Get a list containing cartesian product of data types, plus a getter routine. + + Returns + ------- + i_types : list [(j, I_typenum, None, I_type, None), ...] + Pairing of index type numbers and the corresponding C++ types, + and an unique index `j`. This is for routines that are parameterized + only by I but not by T. + it_types : list [(j, I_typenum, T_typenum, I_type, T_type), ...] + Same as `i_types`, but for routines parameterized both by T and I. + getter_code : str + C++ code for a function that takes I_typenum, T_typenum and returns + the unique index corresponding to the lists, or -1 if no match was + found. + + """ + it_types = [] + i_types = [] + + j = 0 + + getter_code = " if (0) {}" + + for I_typenum, I_type in I_TYPES: + piece = """ + else if (I_typenum == %(I_typenum)s) { + if (T_typenum == -1) { return %(j)s; }""" + getter_code += piece % dict(I_typenum=I_typenum, j=j) + + i_types.append((j, I_typenum, None, I_type, None)) + j += 1 + + for T_typenum, T_type in T_TYPES: + piece = """ + else if (T_typenum == %(T_typenum)s) { return %(j)s; }""" + getter_code += piece % dict(T_typenum=T_typenum, j=j) + + it_types.append((j, I_typenum, T_typenum, I_type, T_type)) + j += 1 + + getter_code += """ + }""" + + return i_types, it_types, GET_THUNK_CASE_TEMPLATE % dict(content=getter_code) + + +def parse_routine(name, args, types): + """ + Generate thunk and method code for a given routine. + + Parameters + ---------- + name : str + Name of the C++ routine + args : str + Argument list specification (in format explained above) + types : list + List of types to instantiate, as returned `get_thunk_type_set` + + """ + + ret_spec = args[0] + arg_spec = args[1:] + + def get_arglist(I_type, T_type): + """ + Generate argument list for calling the C++ function + """ + args = [] + next_is_writeable = False + j = 0 + for t in arg_spec: + const = '' if next_is_writeable else 'const ' + next_is_writeable = False + if t == '*': + next_is_writeable = True + continue + elif t == 'i': + args.append("*(%s*)a[%d]" % (const + I_type, j)) + elif t == 'I': + args.append("(%s*)a[%d]" % (const + I_type, j)) + elif t == 'T': + args.append("(%s*)a[%d]" % (const + T_type, j)) + elif t == 'B': + args.append("(npy_bool_wrapper*)a[%d]" % (j,)) + elif t == 'V': + if const: + raise ValueError("'V' argument must be an output arg") + args.append("(std::vector<%s>*)a[%d]" % (I_type, j,)) + elif t == 'W': + if const: + raise ValueError("'W' argument must be an output arg") + args.append("(std::vector<%s>*)a[%d]" % (T_type, j,)) + elif t == 'l': + args.append("*(%snpy_int64*)a[%d]" % (const, j)) + else: + raise ValueError("Invalid spec character %r" % (t,)) + j += 1 + return ", ".join(args) + + # Generate thunk code: a giant switch statement with different + # type combinations inside. + thunk_content = """int j = get_thunk_case(I_typenum, T_typenum); + switch (j) {""" + for j, I_typenum, T_typenum, I_type, T_type in types: + arglist = get_arglist(I_type, T_type) + if T_type is None: + dispatch = "%s" % (I_type,) + else: + dispatch = "%s,%s" % (I_type, T_type) + if 'B' in arg_spec: + dispatch += ",npy_bool_wrapper" + + piece = """ + case %(j)s:""" + if ret_spec == 'v': + piece += """ + (void)%(name)s<%(dispatch)s>(%(arglist)s); + return 0;""" + else: + piece += """ + return %(name)s<%(dispatch)s>(%(arglist)s);""" + thunk_content += piece % dict(j=j, I_type=I_type, T_type=T_type, + I_typenum=I_typenum, T_typenum=T_typenum, + arglist=arglist, name=name, + dispatch=dispatch) + + thunk_content += """ + default: + throw std::runtime_error("internal error: invalid argument typenums"); + }""" + + thunk_code = THUNK_TEMPLATE % dict(name=name, + thunk_content=thunk_content) + + # Generate method code + method_code = METHOD_TEMPLATE % dict(name=name, + ret_spec=ret_spec, + arg_spec=arg_spec) + + return thunk_code, method_code + + +def main(): + p = optparse.OptionParser(usage=(__doc__ or '').strip()) + p.add_option("--no-force", action="store_false", + dest="force", default=True) + options, args = p.parse_args() + + names = [] + + i_types, it_types, getter_code = get_thunk_type_set() + + # Generate *_impl.h for each compilation unit + for unit_name, routines in COMPILATION_UNITS: + thunks = [] + methods = [] + + # Generate thunks and methods for all routines + for line in routines.splitlines(): + line = line.strip() + if not line or line.startswith('#'): + continue + + try: + name, args = line.split(None, 1) + except ValueError: + raise ValueError("Malformed line: %r" % (line,)) + + args = "".join(args.split()) + if 't' in args or 'T' in args: + thunk, method = parse_routine(name, args, it_types) + else: + thunk, method = parse_routine(name, args, i_types) + + if name in names: + raise ValueError("Duplicate routine %r" % (name,)) + + names.append(name) + thunks.append(thunk) + methods.append(method) + + # Produce output + dst = os.path.join(os.path.dirname(__file__), + 'sparsetools', + unit_name + '_impl.h') + if newer(__file__, dst) or options.force: + print("[generate_sparsetools] generating %r" % (dst,)) + with open(dst, 'w') as f: + write_autogen_blurb(f) + f.write(getter_code) + for thunk in thunks: + f.write(thunk) + for method in methods: + f.write(method) + else: + print("[generate_sparsetools] %r already up-to-date" % (dst,)) + + # Generate code for method struct + method_defs = "" + for name in names: + method_defs += "NPY_VISIBILITY_HIDDEN PyObject *%s_method(PyObject *, PyObject *);\n" % (name,) + + method_struct = """\nstatic struct PyMethodDef sparsetools_methods[] = {""" + for name in names: + method_struct += """ + {"%(name)s", (PyCFunction)%(name)s_method, METH_VARARGS, NULL},""" % dict(name=name) + method_struct += """ + {NULL, NULL, 0, NULL} + };""" + + # Produce sparsetools_impl.h + dst = os.path.join(os.path.dirname(__file__), + 'sparsetools', + 'sparsetools_impl.h') + + if newer(__file__, dst) or options.force: + print("[generate_sparsetools] generating %r" % (dst,)) + with open(dst, 'w') as f: + write_autogen_blurb(f) + f.write(method_defs) + f.write(method_struct) + else: + print("[generate_sparsetools] %r already up-to-date" % (dst,)) + + +def write_autogen_blurb(stream): + stream.write("""\ +/* This file is autogenerated by generate_sparsetools.py + * Do not edit manually or check into VCS. + */ +""") + + +if __name__ == "__main__": + main() diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/generate_sparsetools.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/generate_sparsetools.pyc new file mode 100644 index 0000000..3050688 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/generate_sparsetools.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/lil.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/lil.py new file mode 100644 index 0000000..c70f816 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/lil.py @@ -0,0 +1,548 @@ +"""LInked List sparse matrix class +""" + +from __future__ import division, print_function, absolute_import + +__docformat__ = "restructuredtext en" + +__all__ = ['lil_matrix','isspmatrix_lil'] + +from bisect import bisect_left + +import numpy as np + +from scipy._lib.six import xrange, zip +from .base import spmatrix, isspmatrix +from .sputils import (getdtype, isshape, isscalarlike, IndexMixin, + upcast_scalar, get_index_dtype, isintlike, check_shape, + check_reshape_kwargs) +from . import _csparsetools + + +class lil_matrix(spmatrix, IndexMixin): + """Row-based linked list sparse matrix + + This is a structure for constructing sparse matrices incrementally. + Note that inserting a single item can take linear time in the worst case; + to construct a matrix efficiently, make sure the items are pre-sorted by + index, per row. + + This can be instantiated in several ways: + lil_matrix(D) + with a dense matrix or rank-2 ndarray D + + lil_matrix(S) + with another sparse matrix S (equivalent to S.tolil()) + + lil_matrix((M, N), [dtype]) + to construct an empty matrix with shape (M, N) + dtype is optional, defaulting to dtype='d'. + + Attributes + ---------- + dtype : dtype + Data type of the matrix + shape : 2-tuple + Shape of the matrix + ndim : int + Number of dimensions (this is always 2) + nnz + Number of nonzero elements + data + LIL format data array of the matrix + rows + LIL format row index array of the matrix + + Notes + ----- + + Sparse matrices can be used in arithmetic operations: they support + addition, subtraction, multiplication, division, and matrix power. + + Advantages of the LIL format + - supports flexible slicing + - changes to the matrix sparsity structure are efficient + + Disadvantages of the LIL format + - arithmetic operations LIL + LIL are slow (consider CSR or CSC) + - slow column slicing (consider CSC) + - slow matrix vector products (consider CSR or CSC) + + Intended Usage + - LIL is a convenient format for constructing sparse matrices + - once a matrix has been constructed, convert to CSR or + CSC format for fast arithmetic and matrix vector operations + - consider using the COO format when constructing large matrices + + Data Structure + - An array (``self.rows``) of rows, each of which is a sorted + list of column indices of non-zero elements. + - The corresponding nonzero values are stored in similar + fashion in ``self.data``. + + + """ + format = 'lil' + + def __init__(self, arg1, shape=None, dtype=None, copy=False): + spmatrix.__init__(self) + self.dtype = getdtype(dtype, arg1, default=float) + + # First get the shape + if isspmatrix(arg1): + if isspmatrix_lil(arg1) and copy: + A = arg1.copy() + else: + A = arg1.tolil() + + if dtype is not None: + A = A.astype(dtype) + + self._shape = check_shape(A.shape) + self.dtype = A.dtype + self.rows = A.rows + self.data = A.data + elif isinstance(arg1,tuple): + if isshape(arg1): + if shape is not None: + raise ValueError('invalid use of shape parameter') + M, N = arg1 + self._shape = check_shape((M, N)) + self.rows = np.empty((M,), dtype=object) + self.data = np.empty((M,), dtype=object) + for i in range(M): + self.rows[i] = [] + self.data[i] = [] + else: + raise TypeError('unrecognized lil_matrix constructor usage') + else: + # assume A is dense + try: + A = np.asmatrix(arg1) + except TypeError: + raise TypeError('unsupported matrix type') + else: + from .csr import csr_matrix + A = csr_matrix(A, dtype=dtype).tolil() + + self._shape = check_shape(A.shape) + self.dtype = A.dtype + self.rows = A.rows + self.data = A.data + + def __iadd__(self,other): + self[:,:] = self + other + return self + + def __isub__(self,other): + self[:,:] = self - other + return self + + def __imul__(self,other): + if isscalarlike(other): + self[:,:] = self * other + return self + else: + return NotImplemented + + def __itruediv__(self,other): + if isscalarlike(other): + self[:,:] = self / other + return self + else: + return NotImplemented + + # Whenever the dimensions change, empty lists should be created for each + # row + + def getnnz(self, axis=None): + if axis is None: + return sum([len(rowvals) for rowvals in self.data]) + if axis < 0: + axis += 2 + if axis == 0: + out = np.zeros(self.shape[1], dtype=np.intp) + for row in self.rows: + out[row] += 1 + return out + elif axis == 1: + return np.array([len(rowvals) for rowvals in self.data], dtype=np.intp) + else: + raise ValueError('axis out of bounds') + + def count_nonzero(self): + return sum(np.count_nonzero(rowvals) for rowvals in self.data) + + getnnz.__doc__ = spmatrix.getnnz.__doc__ + count_nonzero.__doc__ = spmatrix.count_nonzero.__doc__ + + def __str__(self): + val = '' + for i, row in enumerate(self.rows): + for pos, j in enumerate(row): + val += " %s\t%s\n" % (str((i, j)), str(self.data[i][pos])) + return val[:-1] + + def getrowview(self, i): + """Returns a view of the 'i'th row (without copying). + """ + new = lil_matrix((1, self.shape[1]), dtype=self.dtype) + new.rows[0] = self.rows[i] + new.data[0] = self.data[i] + return new + + def getrow(self, i): + """Returns a copy of the 'i'th row. + """ + i = self._check_row_bounds(i) + new = lil_matrix((1, self.shape[1]), dtype=self.dtype) + new.rows[0] = self.rows[i][:] + new.data[0] = self.data[i][:] + return new + + def _check_row_bounds(self, i): + if i < 0: + i += self.shape[0] + if i < 0 or i >= self.shape[0]: + raise IndexError('row index out of bounds') + return i + + def _check_col_bounds(self, j): + if j < 0: + j += self.shape[1] + if j < 0 or j >= self.shape[1]: + raise IndexError('column index out of bounds') + return j + + def __getitem__(self, index): + """Return the element(s) index=(i, j), where j may be a slice. + This always returns a copy for consistency, since slices into + Python lists return copies. + """ + + # Scalar fast path first + if isinstance(index, tuple) and len(index) == 2: + i, j = index + # Use isinstance checks for common index types; this is + # ~25-50% faster than isscalarlike. Other types are + # handled below. + if ((isinstance(i, int) or isinstance(i, np.integer)) and + (isinstance(j, int) or isinstance(j, np.integer))): + v = _csparsetools.lil_get1(self.shape[0], self.shape[1], + self.rows, self.data, + i, j) + return self.dtype.type(v) + + # Utilities found in IndexMixin + i, j = self._unpack_index(index) + + # Proper check for other scalar index types + i_intlike = isintlike(i) + j_intlike = isintlike(j) + + if i_intlike and j_intlike: + v = _csparsetools.lil_get1(self.shape[0], self.shape[1], + self.rows, self.data, + i, j) + return self.dtype.type(v) + elif j_intlike or isinstance(j, slice): + # column slicing fast path + if j_intlike: + j = self._check_col_bounds(j) + j = slice(j, j+1) + + if i_intlike: + i = self._check_row_bounds(i) + i = xrange(i, i+1) + i_shape = None + elif isinstance(i, slice): + i = xrange(*i.indices(self.shape[0])) + i_shape = None + else: + i = np.atleast_1d(i) + i_shape = i.shape + + if i_shape is None or len(i_shape) == 1: + return self._get_row_ranges(i, j) + + i, j = self._index_to_arrays(i, j) + if i.size == 0: + return lil_matrix(i.shape, dtype=self.dtype) + + new = lil_matrix(i.shape, dtype=self.dtype) + + i, j = _prepare_index_for_memoryview(i, j) + _csparsetools.lil_fancy_get(self.shape[0], self.shape[1], + self.rows, self.data, + new.rows, new.data, + i, j) + return new + + def _get_row_ranges(self, rows, col_slice): + """ + Fast path for indexing in the case where column index is slice. + + This gains performance improvement over brute force by more + efficient skipping of zeros, by accessing the elements + column-wise in order. + + Parameters + ---------- + rows : sequence or xrange + Rows indexed. If xrange, must be within valid bounds. + col_slice : slice + Columns indexed + + """ + j_start, j_stop, j_stride = col_slice.indices(self.shape[1]) + col_range = xrange(j_start, j_stop, j_stride) + nj = len(col_range) + new = lil_matrix((len(rows), nj), dtype=self.dtype) + + _csparsetools.lil_get_row_ranges(self.shape[0], self.shape[1], + self.rows, self.data, + new.rows, new.data, + rows, + j_start, j_stop, j_stride, nj) + + return new + + def __setitem__(self, index, x): + # Scalar fast path first + if isinstance(index, tuple) and len(index) == 2: + i, j = index + # Use isinstance checks for common index types; this is + # ~25-50% faster than isscalarlike. Scalar index + # assignment for other types is handled below together + # with fancy indexing. + if ((isinstance(i, int) or isinstance(i, np.integer)) and + (isinstance(j, int) or isinstance(j, np.integer))): + x = self.dtype.type(x) + if x.size > 1: + # Triggered if input was an ndarray + raise ValueError("Trying to assign a sequence to an item") + _csparsetools.lil_insert(self.shape[0], self.shape[1], + self.rows, self.data, i, j, x) + return + + # General indexing + i, j = self._unpack_index(index) + + # shortcut for common case of full matrix assign: + if (isspmatrix(x) and isinstance(i, slice) and i == slice(None) and + isinstance(j, slice) and j == slice(None) + and x.shape == self.shape): + x = lil_matrix(x, dtype=self.dtype) + self.rows = x.rows + self.data = x.data + return + + i, j = self._index_to_arrays(i, j) + + if isspmatrix(x): + x = x.toarray() + + # Make x and i into the same shape + x = np.asarray(x, dtype=self.dtype) + x, _ = np.broadcast_arrays(x, i) + + if x.shape != i.shape: + raise ValueError("shape mismatch in assignment") + + # Set values + i, j, x = _prepare_index_for_memoryview(i, j, x) + _csparsetools.lil_fancy_set(self.shape[0], self.shape[1], + self.rows, self.data, + i, j, x) + + def _mul_scalar(self, other): + if other == 0: + # Multiply by zero: return the zero matrix + new = lil_matrix(self.shape, dtype=self.dtype) + else: + res_dtype = upcast_scalar(self.dtype, other) + + new = self.copy() + new = new.astype(res_dtype) + # Multiply this scalar by every element. + for j, rowvals in enumerate(new.data): + new.data[j] = [val*other for val in rowvals] + return new + + def __truediv__(self, other): # self / other + if isscalarlike(other): + new = self.copy() + # Divide every element by this scalar + for j, rowvals in enumerate(new.data): + new.data[j] = [val/other for val in rowvals] + return new + else: + return self.tocsr() / other + + def copy(self): + from copy import deepcopy + new = lil_matrix(self.shape, dtype=self.dtype) + new.data = deepcopy(self.data) + new.rows = deepcopy(self.rows) + return new + + copy.__doc__ = spmatrix.copy.__doc__ + + def reshape(self, *args, **kwargs): + shape = check_shape(args, self.shape) + order, copy = check_reshape_kwargs(kwargs) + + # Return early if reshape is not required + if shape == self.shape: + if copy: + return self.copy() + else: + return self + + new = lil_matrix(shape, dtype=self.dtype) + + if order == 'C': + ncols = self.shape[1] + for i, row in enumerate(self.rows): + for col, j in enumerate(row): + new_r, new_c = np.unravel_index(i * ncols + j, shape) + new[new_r, new_c] = self[i, j] + elif order == 'F': + nrows = self.shape[0] + for i, row in enumerate(self.rows): + for col, j in enumerate(row): + new_r, new_c = np.unravel_index(i + j * nrows, shape, order) + new[new_r, new_c] = self[i, j] + else: + raise ValueError("'order' must be 'C' or 'F'") + + return new + + reshape.__doc__ = spmatrix.reshape.__doc__ + + def resize(self, *shape): + shape = check_shape(shape) + new_M, new_N = shape + M, N = self.shape + + if new_M < M: + self.rows = self.rows[:new_M] + self.data = self.data[:new_M] + elif new_M > M: + self.rows = np.resize(self.rows, new_M) + self.data = np.resize(self.data, new_M) + for i in range(M, new_M): + self.rows[i] = [] + self.data[i] = [] + + if new_N < N: + for row, data in zip(self.rows, self.data): + trunc = bisect_left(row, new_N) + del row[trunc:] + del data[trunc:] + + self._shape = shape + + resize.__doc__ = spmatrix.resize.__doc__ + + def toarray(self, order=None, out=None): + d = self._process_toarray_args(order, out) + for i, row in enumerate(self.rows): + for pos, j in enumerate(row): + d[i, j] = self.data[i][pos] + return d + + toarray.__doc__ = spmatrix.toarray.__doc__ + + def transpose(self, axes=None, copy=False): + return self.tocsr(copy=copy).transpose(axes=axes, copy=False).tolil(copy=False) + + transpose.__doc__ = spmatrix.transpose.__doc__ + + def tolil(self, copy=False): + if copy: + return self.copy() + else: + return self + + tolil.__doc__ = spmatrix.tolil.__doc__ + + def tocsr(self, copy=False): + lst = [len(x) for x in self.rows] + idx_dtype = get_index_dtype(maxval=max(self.shape[1], sum(lst))) + + indptr = np.cumsum([0] + lst, dtype=idx_dtype) + indices = np.array([x for y in self.rows for x in y], dtype=idx_dtype) + data = np.array([x for y in self.data for x in y], dtype=self.dtype) + + from .csr import csr_matrix + return csr_matrix((data, indices, indptr), shape=self.shape) + + tocsr.__doc__ = spmatrix.tocsr.__doc__ + + +def _prepare_index_for_memoryview(i, j, x=None): + """ + Convert index and data arrays to form suitable for passing to the + Cython fancy getset routines. + + The conversions are necessary since to (i) ensure the integer + index arrays are in one of the accepted types, and (ii) to ensure + the arrays are writable so that Cython memoryview support doesn't + choke on them. + + Parameters + ---------- + i, j + Index arrays + x : optional + Data arrays + + Returns + ------- + i, j, x + Re-formatted arrays (x is omitted, if input was None) + + """ + if i.dtype > j.dtype: + j = j.astype(i.dtype) + elif i.dtype < j.dtype: + i = i.astype(j.dtype) + + if not i.flags.writeable or i.dtype not in (np.int32, np.int64): + i = i.astype(np.intp) + if not j.flags.writeable or j.dtype not in (np.int32, np.int64): + j = j.astype(np.intp) + + if x is not None: + if not x.flags.writeable: + x = x.copy() + return i, j, x + else: + return i, j + + +def isspmatrix_lil(x): + """Is x of lil_matrix type? + + Parameters + ---------- + x + object to check for being a lil matrix + + Returns + ------- + bool + True if x is a lil matrix, False otherwise + + Examples + -------- + >>> from scipy.sparse import lil_matrix, isspmatrix_lil + >>> isspmatrix_lil(lil_matrix([[5]])) + True + + >>> from scipy.sparse import lil_matrix, csr_matrix, isspmatrix_lil + >>> isspmatrix_lil(csr_matrix([[5]])) + False + """ + return isinstance(x, lil_matrix) diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/lil.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/lil.pyc new file mode 100644 index 0000000..e4f356f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/lil.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/__init__.py new file mode 100644 index 0000000..6a73294 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/__init__.py @@ -0,0 +1,127 @@ +""" +================================================== +Sparse linear algebra (:mod:`scipy.sparse.linalg`) +================================================== + +.. currentmodule:: scipy.sparse.linalg + +Abstract linear operators +------------------------- + +.. autosummary:: + :toctree: generated/ + + LinearOperator -- abstract representation of a linear operator + aslinearoperator -- convert an object to an abstract linear operator + +Matrix Operations +----------------- + +.. autosummary:: + :toctree: generated/ + + inv -- compute the sparse matrix inverse + expm -- compute the sparse matrix exponential + expm_multiply -- compute the product of a matrix exponential and a matrix + +Matrix norms +------------ + +.. autosummary:: + :toctree: generated/ + + norm -- Norm of a sparse matrix + onenormest -- Estimate the 1-norm of a sparse matrix + +Solving linear problems +----------------------- + +Direct methods for linear equation systems: + +.. autosummary:: + :toctree: generated/ + + spsolve -- Solve the sparse linear system Ax=b + spsolve_triangular -- Solve the sparse linear system Ax=b for a triangular matrix + factorized -- Pre-factorize matrix to a function solving a linear system + MatrixRankWarning -- Warning on exactly singular matrices + use_solver -- Select direct solver to use + +Iterative methods for linear equation systems: + +.. autosummary:: + :toctree: generated/ + + bicg -- Use BIConjugate Gradient iteration to solve A x = b + bicgstab -- Use BIConjugate Gradient STABilized iteration to solve A x = b + cg -- Use Conjugate Gradient iteration to solve A x = b + cgs -- Use Conjugate Gradient Squared iteration to solve A x = b + gmres -- Use Generalized Minimal RESidual iteration to solve A x = b + lgmres -- Solve a matrix equation using the LGMRES algorithm + minres -- Use MINimum RESidual iteration to solve Ax = b + qmr -- Use Quasi-Minimal Residual iteration to solve A x = b + gcrotmk -- Solve a matrix equation using the GCROT(m,k) algorithm + +Iterative methods for least-squares problems: + +.. autosummary:: + :toctree: generated/ + + lsqr -- Find the least-squares solution to a sparse linear equation system + lsmr -- Find the least-squares solution to a sparse linear equation system + +Matrix factorizations +--------------------- + +Eigenvalue problems: + +.. autosummary:: + :toctree: generated/ + + eigs -- Find k eigenvalues and eigenvectors of the square matrix A + eigsh -- Find k eigenvalues and eigenvectors of a symmetric matrix + lobpcg -- Solve symmetric partial eigenproblems with optional preconditioning + +Singular values problems: + +.. autosummary:: + :toctree: generated/ + + svds -- Compute k singular values/vectors for a sparse matrix + +Complete or incomplete LU factorizations + +.. autosummary:: + :toctree: generated/ + + splu -- Compute a LU decomposition for a sparse matrix + spilu -- Compute an incomplete LU decomposition for a sparse matrix + SuperLU -- Object representing an LU factorization + +Exceptions +---------- + +.. autosummary:: + :toctree: generated/ + + ArpackNoConvergence + ArpackError + +""" + +from __future__ import division, print_function, absolute_import + +from .isolve import * +from .dsolve import * +from .interface import * +from .eigen import * +from .matfuncs import * +from ._onenormest import * +from ._norm import * +from ._expm_multiply import * + +__all__ = [s for s in dir() if not s.startswith('_')] + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/__init__.pyc new file mode 100644 index 0000000..2a87cd6 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/_expm_multiply.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/_expm_multiply.py new file mode 100644 index 0000000..c2eb890 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/_expm_multiply.py @@ -0,0 +1,703 @@ +"""Compute the action of the matrix exponential. +""" + +from __future__ import division, print_function, absolute_import + +import numpy as np + +import scipy.linalg +import scipy.sparse.linalg +from scipy.sparse.linalg import aslinearoperator + +__all__ = ['expm_multiply'] + + +def _exact_inf_norm(A): + # A compatibility function which should eventually disappear. + if scipy.sparse.isspmatrix(A): + return max(abs(A).sum(axis=1).flat) + else: + return np.linalg.norm(A, np.inf) + + +def _exact_1_norm(A): + # A compatibility function which should eventually disappear. + if scipy.sparse.isspmatrix(A): + return max(abs(A).sum(axis=0).flat) + else: + return np.linalg.norm(A, 1) + + +def _trace(A): + # A compatibility function which should eventually disappear. + if scipy.sparse.isspmatrix(A): + return A.diagonal().sum() + else: + return np.trace(A) + + +def _ident_like(A): + # A compatibility function which should eventually disappear. + if scipy.sparse.isspmatrix(A): + return scipy.sparse.construct.eye(A.shape[0], A.shape[1], + dtype=A.dtype, format=A.format) + else: + return np.eye(A.shape[0], A.shape[1], dtype=A.dtype) + + +def expm_multiply(A, B, start=None, stop=None, num=None, endpoint=None): + """ + Compute the action of the matrix exponential of A on B. + + Parameters + ---------- + A : transposable linear operator + The operator whose exponential is of interest. + B : ndarray + The matrix or vector to be multiplied by the matrix exponential of A. + start : scalar, optional + The starting time point of the sequence. + stop : scalar, optional + The end time point of the sequence, unless `endpoint` is set to False. + In that case, the sequence consists of all but the last of ``num + 1`` + evenly spaced time points, so that `stop` is excluded. + Note that the step size changes when `endpoint` is False. + num : int, optional + Number of time points to use. + endpoint : bool, optional + If True, `stop` is the last time point. Otherwise, it is not included. + + Returns + ------- + expm_A_B : ndarray + The result of the action :math:`e^{t_k A} B`. + + Notes + ----- + The optional arguments defining the sequence of evenly spaced time points + are compatible with the arguments of `numpy.linspace`. + + The output ndarray shape is somewhat complicated so I explain it here. + The ndim of the output could be either 1, 2, or 3. + It would be 1 if you are computing the expm action on a single vector + at a single time point. + It would be 2 if you are computing the expm action on a vector + at multiple time points, or if you are computing the expm action + on a matrix at a single time point. + It would be 3 if you want the action on a matrix with multiple + columns at multiple time points. + If multiple time points are requested, expm_A_B[0] will always + be the action of the expm at the first time point, + regardless of whether the action is on a vector or a matrix. + + References + ---------- + .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2011) + "Computing the Action of the Matrix Exponential, + with an Application to Exponential Integrators." + SIAM Journal on Scientific Computing, + 33 (2). pp. 488-511. ISSN 1064-8275 + http://eprints.ma.man.ac.uk/1591/ + + .. [2] Nicholas J. Higham and Awad H. Al-Mohy (2010) + "Computing Matrix Functions." + Acta Numerica, + 19. 159-208. ISSN 0962-4929 + http://eprints.ma.man.ac.uk/1451/ + + Examples + -------- + >>> from scipy.sparse import csc_matrix + >>> from scipy.sparse.linalg import expm, expm_multiply + >>> A = csc_matrix([[1, 0], [0, 1]]) + >>> A.todense() + matrix([[1, 0], + [0, 1]], dtype=int64) + >>> B = np.array([np.exp(-1.), np.exp(-2.)]) + >>> B + array([ 0.36787944, 0.13533528]) + >>> expm_multiply(A, B, start=1, stop=2, num=3, endpoint=True) + array([[ 1. , 0.36787944], + [ 1.64872127, 0.60653066], + [ 2.71828183, 1. ]]) + >>> expm(A).dot(B) # Verify 1st timestep + array([ 1. , 0.36787944]) + >>> expm(1.5*A).dot(B) # Verify 2nd timestep + array([ 1.64872127, 0.60653066]) + >>> expm(2*A).dot(B) # Verify 3rd timestep + array([ 2.71828183, 1. ]) + """ + if all(arg is None for arg in (start, stop, num, endpoint)): + X = _expm_multiply_simple(A, B) + else: + X, status = _expm_multiply_interval(A, B, start, stop, num, endpoint) + return X + + +def _expm_multiply_simple(A, B, t=1.0, balance=False): + """ + Compute the action of the matrix exponential at a single time point. + + Parameters + ---------- + A : transposable linear operator + The operator whose exponential is of interest. + B : ndarray + The matrix to be multiplied by the matrix exponential of A. + t : float + A time point. + balance : bool + Indicates whether or not to apply balancing. + + Returns + ------- + F : ndarray + :math:`e^{t A} B` + + Notes + ----- + This is algorithm (3.2) in Al-Mohy and Higham (2011). + + """ + if balance: + raise NotImplementedError + if len(A.shape) != 2 or A.shape[0] != A.shape[1]: + raise ValueError('expected A to be like a square matrix') + if A.shape[1] != B.shape[0]: + raise ValueError('the matrices A and B have incompatible shapes') + ident = _ident_like(A) + n = A.shape[0] + if len(B.shape) == 1: + n0 = 1 + elif len(B.shape) == 2: + n0 = B.shape[1] + else: + raise ValueError('expected B to be like a matrix or a vector') + u_d = 2**-53 + tol = u_d + mu = _trace(A) / float(n) + A = A - mu * ident + A_1_norm = _exact_1_norm(A) + if t*A_1_norm == 0: + m_star, s = 0, 1 + else: + ell = 2 + norm_info = LazyOperatorNormInfo(t*A, A_1_norm=t*A_1_norm, ell=ell) + m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell) + return _expm_multiply_simple_core(A, B, t, mu, m_star, s, tol, balance) + + +def _expm_multiply_simple_core(A, B, t, mu, m_star, s, tol=None, balance=False): + """ + A helper function. + """ + if balance: + raise NotImplementedError + if tol is None: + u_d = 2 ** -53 + tol = u_d + F = B + eta = np.exp(t*mu / float(s)) + for i in range(s): + c1 = _exact_inf_norm(B) + for j in range(m_star): + coeff = t / float(s*(j+1)) + B = coeff * A.dot(B) + c2 = _exact_inf_norm(B) + F = F + B + if c1 + c2 <= tol * _exact_inf_norm(F): + break + c1 = c2 + F = eta * F + B = F + return F + + +# This table helps to compute bounds. +# They seem to have been difficult to calculate, involving symbolic +# manipulation of equations, followed by numerical root finding. +_theta = { + # The first 30 values are from table A.3 of Computing Matrix Functions. + 1: 2.29e-16, + 2: 2.58e-8, + 3: 1.39e-5, + 4: 3.40e-4, + 5: 2.40e-3, + 6: 9.07e-3, + 7: 2.38e-2, + 8: 5.00e-2, + 9: 8.96e-2, + 10: 1.44e-1, + # 11 + 11: 2.14e-1, + 12: 3.00e-1, + 13: 4.00e-1, + 14: 5.14e-1, + 15: 6.41e-1, + 16: 7.81e-1, + 17: 9.31e-1, + 18: 1.09, + 19: 1.26, + 20: 1.44, + # 21 + 21: 1.62, + 22: 1.82, + 23: 2.01, + 24: 2.22, + 25: 2.43, + 26: 2.64, + 27: 2.86, + 28: 3.08, + 29: 3.31, + 30: 3.54, + # The rest are from table 3.1 of + # Computing the Action of the Matrix Exponential. + 35: 4.7, + 40: 6.0, + 45: 7.2, + 50: 8.5, + 55: 9.9, + } + + +def _onenormest_matrix_power(A, p, + t=2, itmax=5, compute_v=False, compute_w=False): + """ + Efficiently estimate the 1-norm of A^p. + + Parameters + ---------- + A : ndarray + Matrix whose 1-norm of a power is to be computed. + p : int + Non-negative integer power. + t : int, optional + A positive parameter controlling the tradeoff between + accuracy versus time and memory usage. + Larger values take longer and use more memory + but give more accurate output. + itmax : int, optional + Use at most this many iterations. + compute_v : bool, optional + Request a norm-maximizing linear operator input vector if True. + compute_w : bool, optional + Request a norm-maximizing linear operator output vector if True. + + Returns + ------- + est : float + An underestimate of the 1-norm of the sparse matrix. + v : ndarray, optional + The vector such that ||Av||_1 == est*||v||_1. + It can be thought of as an input to the linear operator + that gives an output with particularly large norm. + w : ndarray, optional + The vector Av which has relatively large 1-norm. + It can be thought of as an output of the linear operator + that is relatively large in norm compared to the input. + + """ + #XXX Eventually turn this into an API function in the _onenormest module, + #XXX and remove its underscore, + #XXX but wait until expm_multiply goes into scipy. + return scipy.sparse.linalg.onenormest(aslinearoperator(A) ** p) + +class LazyOperatorNormInfo: + """ + Information about an operator is lazily computed. + + The information includes the exact 1-norm of the operator, + in addition to estimates of 1-norms of powers of the operator. + This uses the notation of Computing the Action (2011). + This class is specialized enough to probably not be of general interest + outside of this module. + + """ + def __init__(self, A, A_1_norm=None, ell=2, scale=1): + """ + Provide the operator and some norm-related information. + + Parameters + ---------- + A : linear operator + The operator of interest. + A_1_norm : float, optional + The exact 1-norm of A. + ell : int, optional + A technical parameter controlling norm estimation quality. + scale : int, optional + If specified, return the norms of scale*A instead of A. + + """ + self._A = A + self._A_1_norm = A_1_norm + self._ell = ell + self._d = {} + self._scale = scale + + def set_scale(self,scale): + """ + Set the scale parameter. + """ + self._scale = scale + + def onenorm(self): + """ + Compute the exact 1-norm. + """ + if self._A_1_norm is None: + self._A_1_norm = _exact_1_norm(self._A) + return self._scale*self._A_1_norm + + def d(self, p): + """ + Lazily estimate d_p(A) ~= || A^p ||^(1/p) where ||.|| is the 1-norm. + """ + if p not in self._d: + est = _onenormest_matrix_power(self._A, p, self._ell) + self._d[p] = est ** (1.0 / p) + return self._scale*self._d[p] + + def alpha(self, p): + """ + Lazily compute max(d(p), d(p+1)). + """ + return max(self.d(p), self.d(p+1)) + +def _compute_cost_div_m(m, p, norm_info): + """ + A helper function for computing bounds. + + This is equation (3.10). + It measures cost in terms of the number of required matrix products. + + Parameters + ---------- + m : int + A valid key of _theta. + p : int + A matrix power. + norm_info : LazyOperatorNormInfo + Information about 1-norms of related operators. + + Returns + ------- + cost_div_m : int + Required number of matrix products divided by m. + + """ + return int(np.ceil(norm_info.alpha(p) / _theta[m])) + + +def _compute_p_max(m_max): + """ + Compute the largest positive integer p such that p*(p-1) <= m_max + 1. + + Do this in a slightly dumb way, but safe and not too slow. + + Parameters + ---------- + m_max : int + A count related to bounds. + + """ + sqrt_m_max = np.sqrt(m_max) + p_low = int(np.floor(sqrt_m_max)) + p_high = int(np.ceil(sqrt_m_max + 1)) + return max(p for p in range(p_low, p_high+1) if p*(p-1) <= m_max + 1) + + +def _fragment_3_1(norm_info, n0, tol, m_max=55, ell=2): + """ + A helper function for the _expm_multiply_* functions. + + Parameters + ---------- + norm_info : LazyOperatorNormInfo + Information about norms of certain linear operators of interest. + n0 : int + Number of columns in the _expm_multiply_* B matrix. + tol : float + Expected to be + :math:`2^{-24}` for single precision or + :math:`2^{-53}` for double precision. + m_max : int + A value related to a bound. + ell : int + The number of columns used in the 1-norm approximation. + This is usually taken to be small, maybe between 1 and 5. + + Returns + ------- + best_m : int + Related to bounds for error control. + best_s : int + Amount of scaling. + + Notes + ----- + This is code fragment (3.1) in Al-Mohy and Higham (2011). + The discussion of default values for m_max and ell + is given between the definitions of equation (3.11) + and the definition of equation (3.12). + + """ + if ell < 1: + raise ValueError('expected ell to be a positive integer') + best_m = None + best_s = None + if _condition_3_13(norm_info.onenorm(), n0, m_max, ell): + for m, theta in _theta.items(): + s = int(np.ceil(norm_info.onenorm() / theta)) + if best_m is None or m * s < best_m * best_s: + best_m = m + best_s = s + else: + # Equation (3.11). + for p in range(2, _compute_p_max(m_max) + 1): + for m in range(p*(p-1)-1, m_max+1): + if m in _theta: + s = _compute_cost_div_m(m, p, norm_info) + if best_m is None or m * s < best_m * best_s: + best_m = m + best_s = s + best_s = max(best_s, 1) + return best_m, best_s + + +def _condition_3_13(A_1_norm, n0, m_max, ell): + """ + A helper function for the _expm_multiply_* functions. + + Parameters + ---------- + A_1_norm : float + The precomputed 1-norm of A. + n0 : int + Number of columns in the _expm_multiply_* B matrix. + m_max : int + A value related to a bound. + ell : int + The number of columns used in the 1-norm approximation. + This is usually taken to be small, maybe between 1 and 5. + + Returns + ------- + value : bool + Indicates whether or not the condition has been met. + + Notes + ----- + This is condition (3.13) in Al-Mohy and Higham (2011). + + """ + + # This is the rhs of equation (3.12). + p_max = _compute_p_max(m_max) + a = 2 * ell * p_max * (p_max + 3) + + # Evaluate the condition (3.13). + b = _theta[m_max] / float(n0 * m_max) + return A_1_norm <= a * b + + +def _expm_multiply_interval(A, B, start=None, stop=None, + num=None, endpoint=None, balance=False, status_only=False): + """ + Compute the action of the matrix exponential at multiple time points. + + Parameters + ---------- + A : transposable linear operator + The operator whose exponential is of interest. + B : ndarray + The matrix to be multiplied by the matrix exponential of A. + start : scalar, optional + The starting time point of the sequence. + stop : scalar, optional + The end time point of the sequence, unless `endpoint` is set to False. + In that case, the sequence consists of all but the last of ``num + 1`` + evenly spaced time points, so that `stop` is excluded. + Note that the step size changes when `endpoint` is False. + num : int, optional + Number of time points to use. + endpoint : bool, optional + If True, `stop` is the last time point. Otherwise, it is not included. + balance : bool + Indicates whether or not to apply balancing. + status_only : bool + A flag that is set to True for some debugging and testing operations. + + Returns + ------- + F : ndarray + :math:`e^{t_k A} B` + status : int + An integer status for testing and debugging. + + Notes + ----- + This is algorithm (5.2) in Al-Mohy and Higham (2011). + + There seems to be a typo, where line 15 of the algorithm should be + moved to line 6.5 (between lines 6 and 7). + + """ + if balance: + raise NotImplementedError + if len(A.shape) != 2 or A.shape[0] != A.shape[1]: + raise ValueError('expected A to be like a square matrix') + if A.shape[1] != B.shape[0]: + raise ValueError('the matrices A and B have incompatible shapes') + ident = _ident_like(A) + n = A.shape[0] + if len(B.shape) == 1: + n0 = 1 + elif len(B.shape) == 2: + n0 = B.shape[1] + else: + raise ValueError('expected B to be like a matrix or a vector') + u_d = 2**-53 + tol = u_d + mu = _trace(A) / float(n) + + # Get the linspace samples, attempting to preserve the linspace defaults. + linspace_kwargs = {'retstep': True} + if num is not None: + linspace_kwargs['num'] = num + if endpoint is not None: + linspace_kwargs['endpoint'] = endpoint + samples, step = np.linspace(start, stop, **linspace_kwargs) + + # Convert the linspace output to the notation used by the publication. + nsamples = len(samples) + if nsamples < 2: + raise ValueError('at least two time points are required') + q = nsamples - 1 + h = step + t_0 = samples[0] + t_q = samples[q] + + # Define the output ndarray. + # Use an ndim=3 shape, such that the last two indices + # are the ones that may be involved in level 3 BLAS operations. + X_shape = (nsamples,) + B.shape + X = np.empty(X_shape, dtype=np.result_type(A.dtype, B.dtype, float)) + t = t_q - t_0 + A = A - mu * ident + A_1_norm = _exact_1_norm(A) + ell = 2 + norm_info = LazyOperatorNormInfo(t*A, A_1_norm=t*A_1_norm, ell=ell) + if t*A_1_norm == 0: + m_star, s = 0, 1 + else: + m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell) + + # Compute the expm action up to the initial time point. + X[0] = _expm_multiply_simple_core(A, B, t_0, mu, m_star, s) + + # Compute the expm action at the rest of the time points. + if q <= s: + if status_only: + return 0 + else: + return _expm_multiply_interval_core_0(A, X, + h, mu, q, norm_info, tol, ell,n0) + elif not (q % s): + if status_only: + return 1 + else: + return _expm_multiply_interval_core_1(A, X, + h, mu, m_star, s, q, tol) + elif (q % s): + if status_only: + return 2 + else: + return _expm_multiply_interval_core_2(A, X, + h, mu, m_star, s, q, tol) + else: + raise Exception('internal error') + + +def _expm_multiply_interval_core_0(A, X, h, mu, q, norm_info, tol, ell, n0): + """ + A helper function, for the case q <= s. + """ + + # Compute the new values of m_star and s which should be applied + # over intervals of size t/q + if norm_info.onenorm() == 0: + m_star, s = 0, 1 + else: + norm_info.set_scale(1./q) + m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell) + norm_info.set_scale(1) + + for k in range(q): + X[k+1] = _expm_multiply_simple_core(A, X[k], h, mu, m_star, s) + return X, 0 + + +def _expm_multiply_interval_core_1(A, X, h, mu, m_star, s, q, tol): + """ + A helper function, for the case q > s and q % s == 0. + """ + d = q // s + input_shape = X.shape[1:] + K_shape = (m_star + 1, ) + input_shape + K = np.empty(K_shape, dtype=X.dtype) + for i in range(s): + Z = X[i*d] + K[0] = Z + high_p = 0 + for k in range(1, d+1): + F = K[0] + c1 = _exact_inf_norm(F) + for p in range(1, m_star+1): + if p > high_p: + K[p] = h * A.dot(K[p-1]) / float(p) + coeff = float(pow(k, p)) + F = F + coeff * K[p] + inf_norm_K_p_1 = _exact_inf_norm(K[p]) + c2 = coeff * inf_norm_K_p_1 + if c1 + c2 <= tol * _exact_inf_norm(F): + break + c1 = c2 + X[k + i*d] = np.exp(k*h*mu) * F + return X, 1 + + +def _expm_multiply_interval_core_2(A, X, h, mu, m_star, s, q, tol): + """ + A helper function, for the case q > s and q % s > 0. + """ + d = q // s + j = q // d + r = q - d * j + input_shape = X.shape[1:] + K_shape = (m_star + 1, ) + input_shape + K = np.empty(K_shape, dtype=X.dtype) + for i in range(j + 1): + Z = X[i*d] + K[0] = Z + high_p = 0 + if i < j: + effective_d = d + else: + effective_d = r + for k in range(1, effective_d+1): + F = K[0] + c1 = _exact_inf_norm(F) + for p in range(1, m_star+1): + if p == high_p + 1: + K[p] = h * A.dot(K[p-1]) / float(p) + high_p = p + coeff = float(pow(k, p)) + F = F + coeff * K[p] + inf_norm_K_p_1 = _exact_inf_norm(K[p]) + c2 = coeff * inf_norm_K_p_1 + if c1 + c2 <= tol * _exact_inf_norm(F): + break + c1 = c2 + X[k + i*d] = np.exp(k*h*mu) * F + return X, 2 diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/_expm_multiply.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/_expm_multiply.pyc new file mode 100644 index 0000000..2c796ec Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/_expm_multiply.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/_norm.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/_norm.py new file mode 100644 index 0000000..02bc66f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/_norm.py @@ -0,0 +1,184 @@ +"""Sparse matrix norms. + +""" +from __future__ import division, print_function, absolute_import + +import numpy as np +from scipy.sparse import issparse + +from numpy.core import Inf, sqrt, abs + +__all__ = ['norm'] + + +def _sparse_frobenius_norm(x): + if np.issubdtype(x.dtype, np.complexfloating): + sqnorm = abs(x).power(2).sum() + else: + sqnorm = x.power(2).sum() + return sqrt(sqnorm) + + +def norm(x, ord=None, axis=None): + """ + Norm of a sparse matrix + + This function is able to return one of seven different matrix norms, + depending on the value of the ``ord`` parameter. + + Parameters + ---------- + x : a sparse matrix + Input sparse matrix. + ord : {non-zero int, inf, -inf, 'fro'}, optional + Order of the norm (see table under ``Notes``). inf means numpy's + `inf` object. + axis : {int, 2-tuple of ints, None}, optional + If `axis` is an integer, it specifies the axis of `x` along which to + compute the vector norms. If `axis` is a 2-tuple, it specifies the + axes that hold 2-D matrices, and the matrix norms of these matrices + are computed. If `axis` is None then either a vector norm (when `x` + is 1-D) or a matrix norm (when `x` is 2-D) is returned. + + Returns + ------- + n : float or ndarray + + Notes + ----- + Some of the ord are not implemented because some associated functions like, + _multi_svd_norm, are not yet available for sparse matrix. + + This docstring is modified based on numpy.linalg.norm. + https://github.com/numpy/numpy/blob/master/numpy/linalg/linalg.py + + The following norms can be calculated: + + ===== ============================ + ord norm for sparse matrices + ===== ============================ + None Frobenius norm + 'fro' Frobenius norm + inf max(sum(abs(x), axis=1)) + -inf min(sum(abs(x), axis=1)) + 0 abs(x).sum(axis=axis) + 1 max(sum(abs(x), axis=0)) + -1 min(sum(abs(x), axis=0)) + 2 Not implemented + -2 Not implemented + other Not implemented + ===== ============================ + + The Frobenius norm is given by [1]_: + + :math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}` + + References + ---------- + .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*, + Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15 + + Examples + -------- + >>> from scipy.sparse import * + >>> import numpy as np + >>> from scipy.sparse.linalg import norm + >>> a = np.arange(9) - 4 + >>> a + array([-4, -3, -2, -1, 0, 1, 2, 3, 4]) + >>> b = a.reshape((3, 3)) + >>> b + array([[-4, -3, -2], + [-1, 0, 1], + [ 2, 3, 4]]) + + >>> b = csr_matrix(b) + >>> norm(b) + 7.745966692414834 + >>> norm(b, 'fro') + 7.745966692414834 + >>> norm(b, np.inf) + 9 + >>> norm(b, -np.inf) + 2 + >>> norm(b, 1) + 7 + >>> norm(b, -1) + 6 + + """ + if not issparse(x): + raise TypeError("input is not sparse. use numpy.linalg.norm") + + # Check the default case first and handle it immediately. + if axis is None and ord in (None, 'fro', 'f'): + return _sparse_frobenius_norm(x) + + # Some norms require functions that are not implemented for all types. + x = x.tocsr() + + if axis is None: + axis = (0, 1) + elif not isinstance(axis, tuple): + msg = "'axis' must be None, an integer or a tuple of integers" + try: + int_axis = int(axis) + except TypeError: + raise TypeError(msg) + if axis != int_axis: + raise TypeError(msg) + axis = (int_axis,) + + nd = 2 + if len(axis) == 2: + row_axis, col_axis = axis + if not (-nd <= row_axis < nd and -nd <= col_axis < nd): + raise ValueError('Invalid axis %r for an array with shape %r' % + (axis, x.shape)) + if row_axis % nd == col_axis % nd: + raise ValueError('Duplicate axes given.') + if ord == 2: + raise NotImplementedError + #return _multi_svd_norm(x, row_axis, col_axis, amax) + elif ord == -2: + raise NotImplementedError + #return _multi_svd_norm(x, row_axis, col_axis, amin) + elif ord == 1: + return abs(x).sum(axis=row_axis).max(axis=col_axis)[0,0] + elif ord == Inf: + return abs(x).sum(axis=col_axis).max(axis=row_axis)[0,0] + elif ord == -1: + return abs(x).sum(axis=row_axis).min(axis=col_axis)[0,0] + elif ord == -Inf: + return abs(x).sum(axis=col_axis).min(axis=row_axis)[0,0] + elif ord in (None, 'f', 'fro'): + # The axis order does not matter for this norm. + return _sparse_frobenius_norm(x) + else: + raise ValueError("Invalid norm order for matrices.") + elif len(axis) == 1: + a, = axis + if not (-nd <= a < nd): + raise ValueError('Invalid axis %r for an array with shape %r' % + (axis, x.shape)) + if ord == Inf: + M = abs(x).max(axis=a) + elif ord == -Inf: + M = abs(x).min(axis=a) + elif ord == 0: + # Zero norm + M = (x != 0).sum(axis=a) + elif ord == 1: + # special case for speedup + M = abs(x).sum(axis=a) + elif ord in (2, None): + M = sqrt(abs(x).power(2).sum(axis=a)) + else: + try: + ord + 1 + except TypeError: + raise ValueError('Invalid norm order for vectors.') + M = np.power(abs(x).power(ord).sum(axis=a), 1 / ord) + return M.A.ravel() + else: + raise ValueError("Improper number of dimensions to norm.") diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/_norm.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/_norm.pyc new file mode 100644 index 0000000..5bb9f63 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/_norm.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/_onenormest.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/_onenormest.py new file mode 100644 index 0000000..95da6cb --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/_onenormest.py @@ -0,0 +1,468 @@ +"""Sparse block 1-norm estimator. +""" + +from __future__ import division, print_function, absolute_import + +import numpy as np +from scipy.sparse.linalg import aslinearoperator + + +__all__ = ['onenormest'] + + +def onenormest(A, t=2, itmax=5, compute_v=False, compute_w=False): + """ + Compute a lower bound of the 1-norm of a sparse matrix. + + Parameters + ---------- + A : ndarray or other linear operator + A linear operator that can be transposed and that can + produce matrix products. + t : int, optional + A positive parameter controlling the tradeoff between + accuracy versus time and memory usage. + Larger values take longer and use more memory + but give more accurate output. + itmax : int, optional + Use at most this many iterations. + compute_v : bool, optional + Request a norm-maximizing linear operator input vector if True. + compute_w : bool, optional + Request a norm-maximizing linear operator output vector if True. + + Returns + ------- + est : float + An underestimate of the 1-norm of the sparse matrix. + v : ndarray, optional + The vector such that ||Av||_1 == est*||v||_1. + It can be thought of as an input to the linear operator + that gives an output with particularly large norm. + w : ndarray, optional + The vector Av which has relatively large 1-norm. + It can be thought of as an output of the linear operator + that is relatively large in norm compared to the input. + + Notes + ----- + This is algorithm 2.4 of [1]. + + In [2] it is described as follows. + "This algorithm typically requires the evaluation of + about 4t matrix-vector products and almost invariably + produces a norm estimate (which is, in fact, a lower + bound on the norm) correct to within a factor 3." + + .. versionadded:: 0.13.0 + + References + ---------- + .. [1] Nicholas J. Higham and Francoise Tisseur (2000), + "A Block Algorithm for Matrix 1-Norm Estimation, + with an Application to 1-Norm Pseudospectra." + SIAM J. Matrix Anal. Appl. Vol. 21, No. 4, pp. 1185-1201. + + .. [2] Awad H. Al-Mohy and Nicholas J. Higham (2009), + "A new scaling and squaring algorithm for the matrix exponential." + SIAM J. Matrix Anal. Appl. Vol. 31, No. 3, pp. 970-989. + + Examples + -------- + >>> from scipy.sparse import csc_matrix + >>> from scipy.sparse.linalg import onenormest + >>> A = csc_matrix([[1., 0., 0.], [5., 8., 2.], [0., -1., 0.]], dtype=float) + >>> A.todense() + matrix([[ 1., 0., 0.], + [ 5., 8., 2.], + [ 0., -1., 0.]]) + >>> onenormest(A) + 9.0 + >>> np.linalg.norm(A.todense(), ord=1) + 9.0 + """ + + # Check the input. + A = aslinearoperator(A) + if A.shape[0] != A.shape[1]: + raise ValueError('expected the operator to act like a square matrix') + + # If the operator size is small compared to t, + # then it is easier to compute the exact norm. + # Otherwise estimate the norm. + n = A.shape[1] + if t >= n: + A_explicit = np.asarray(aslinearoperator(A).matmat(np.identity(n))) + if A_explicit.shape != (n, n): + raise Exception('internal error: ', + 'unexpected shape ' + str(A_explicit.shape)) + col_abs_sums = abs(A_explicit).sum(axis=0) + if col_abs_sums.shape != (n, ): + raise Exception('internal error: ', + 'unexpected shape ' + str(col_abs_sums.shape)) + argmax_j = np.argmax(col_abs_sums) + v = elementary_vector(n, argmax_j) + w = A_explicit[:, argmax_j] + est = col_abs_sums[argmax_j] + else: + est, v, w, nmults, nresamples = _onenormest_core(A, A.H, t, itmax) + + # Report the norm estimate along with some certificates of the estimate. + if compute_v or compute_w: + result = (est,) + if compute_v: + result += (v,) + if compute_w: + result += (w,) + return result + else: + return est + + +def _blocked_elementwise(func): + """ + Decorator for an elementwise function, to apply it blockwise along + first dimension, to avoid excessive memory usage in temporaries. + """ + block_size = 2**20 + + def wrapper(x): + if x.shape[0] < block_size: + return func(x) + else: + y0 = func(x[:block_size]) + y = np.zeros((x.shape[0],) + y0.shape[1:], dtype=y0.dtype) + y[:block_size] = y0 + del y0 + for j in range(block_size, x.shape[0], block_size): + y[j:j+block_size] = func(x[j:j+block_size]) + return y + return wrapper + + +@_blocked_elementwise +def sign_round_up(X): + """ + This should do the right thing for both real and complex matrices. + + From Higham and Tisseur: + "Everything in this section remains valid for complex matrices + provided that sign(A) is redefined as the matrix (aij / |aij|) + (and sign(0) = 1) transposes are replaced by conjugate transposes." + + """ + Y = X.copy() + Y[Y == 0] = 1 + Y /= np.abs(Y) + return Y + + +@_blocked_elementwise +def _max_abs_axis1(X): + return np.max(np.abs(X), axis=1) + + +def _sum_abs_axis0(X): + block_size = 2**20 + r = None + for j in range(0, X.shape[0], block_size): + y = np.sum(np.abs(X[j:j+block_size]), axis=0) + if r is None: + r = y + else: + r += y + return r + + +def elementary_vector(n, i): + v = np.zeros(n, dtype=float) + v[i] = 1 + return v + + +def vectors_are_parallel(v, w): + # Columns are considered parallel when they are equal or negative. + # Entries are required to be in {-1, 1}, + # which guarantees that the magnitudes of the vectors are identical. + if v.ndim != 1 or v.shape != w.shape: + raise ValueError('expected conformant vectors with entries in {-1,1}') + n = v.shape[0] + return np.dot(v, w) == n + + +def every_col_of_X_is_parallel_to_a_col_of_Y(X, Y): + for v in X.T: + if not any(vectors_are_parallel(v, w) for w in Y.T): + return False + return True + + +def column_needs_resampling(i, X, Y=None): + # column i of X needs resampling if either + # it is parallel to a previous column of X or + # it is parallel to a column of Y + n, t = X.shape + v = X[:, i] + if any(vectors_are_parallel(v, X[:, j]) for j in range(i)): + return True + if Y is not None: + if any(vectors_are_parallel(v, w) for w in Y.T): + return True + return False + + +def resample_column(i, X): + X[:, i] = np.random.randint(0, 2, size=X.shape[0])*2 - 1 + + +def less_than_or_close(a, b): + return np.allclose(a, b) or (a < b) + + +def _algorithm_2_2(A, AT, t): + """ + This is Algorithm 2.2. + + Parameters + ---------- + A : ndarray or other linear operator + A linear operator that can produce matrix products. + AT : ndarray or other linear operator + The transpose of A. + t : int, optional + A positive parameter controlling the tradeoff between + accuracy versus time and memory usage. + + Returns + ------- + g : sequence + A non-negative decreasing vector + such that g[j] is a lower bound for the 1-norm + of the column of A of jth largest 1-norm. + The first entry of this vector is therefore a lower bound + on the 1-norm of the linear operator A. + This sequence has length t. + ind : sequence + The ith entry of ind is the index of the column A whose 1-norm + is given by g[i]. + This sequence of indices has length t, and its entries are + chosen from range(n), possibly with repetition, + where n is the order of the operator A. + + Notes + ----- + This algorithm is mainly for testing. + It uses the 'ind' array in a way that is similar to + its usage in algorithm 2.4. This algorithm 2.2 may be easier to test, + so it gives a chance of uncovering bugs related to indexing + which could have propagated less noticeably to algorithm 2.4. + + """ + A_linear_operator = aslinearoperator(A) + AT_linear_operator = aslinearoperator(AT) + n = A_linear_operator.shape[0] + + # Initialize the X block with columns of unit 1-norm. + X = np.ones((n, t)) + if t > 1: + X[:, 1:] = np.random.randint(0, 2, size=(n, t-1))*2 - 1 + X /= float(n) + + # Iteratively improve the lower bounds. + # Track extra things, to assert invariants for debugging. + g_prev = None + h_prev = None + k = 1 + ind = range(t) + while True: + Y = np.asarray(A_linear_operator.matmat(X)) + g = _sum_abs_axis0(Y) + best_j = np.argmax(g) + g.sort() + g = g[::-1] + S = sign_round_up(Y) + Z = np.asarray(AT_linear_operator.matmat(S)) + h = _max_abs_axis1(Z) + + # If this algorithm runs for fewer than two iterations, + # then its return values do not have the properties indicated + # in the description of the algorithm. + # In particular, the entries of g are not 1-norms of any + # column of A until the second iteration. + # Therefore we will require the algorithm to run for at least + # two iterations, even though this requirement is not stated + # in the description of the algorithm. + if k >= 2: + if less_than_or_close(max(h), np.dot(Z[:, best_j], X[:, best_j])): + break + ind = np.argsort(h)[::-1][:t] + h = h[ind] + for j in range(t): + X[:, j] = elementary_vector(n, ind[j]) + + # Check invariant (2.2). + if k >= 2: + if not less_than_or_close(g_prev[0], h_prev[0]): + raise Exception('invariant (2.2) is violated') + if not less_than_or_close(h_prev[0], g[0]): + raise Exception('invariant (2.2) is violated') + + # Check invariant (2.3). + if k >= 3: + for j in range(t): + if not less_than_or_close(g[j], g_prev[j]): + raise Exception('invariant (2.3) is violated') + + # Update for the next iteration. + g_prev = g + h_prev = h + k += 1 + + # Return the lower bounds and the corresponding column indices. + return g, ind + + +def _onenormest_core(A, AT, t, itmax): + """ + Compute a lower bound of the 1-norm of a sparse matrix. + + Parameters + ---------- + A : ndarray or other linear operator + A linear operator that can produce matrix products. + AT : ndarray or other linear operator + The transpose of A. + t : int, optional + A positive parameter controlling the tradeoff between + accuracy versus time and memory usage. + itmax : int, optional + Use at most this many iterations. + + Returns + ------- + est : float + An underestimate of the 1-norm of the sparse matrix. + v : ndarray, optional + The vector such that ||Av||_1 == est*||v||_1. + It can be thought of as an input to the linear operator + that gives an output with particularly large norm. + w : ndarray, optional + The vector Av which has relatively large 1-norm. + It can be thought of as an output of the linear operator + that is relatively large in norm compared to the input. + nmults : int, optional + The number of matrix products that were computed. + nresamples : int, optional + The number of times a parallel column was observed, + necessitating a re-randomization of the column. + + Notes + ----- + This is algorithm 2.4. + + """ + # This function is a more or less direct translation + # of Algorithm 2.4 from the Higham and Tisseur (2000) paper. + A_linear_operator = aslinearoperator(A) + AT_linear_operator = aslinearoperator(AT) + if itmax < 2: + raise ValueError('at least two iterations are required') + if t < 1: + raise ValueError('at least one column is required') + n = A.shape[0] + if t >= n: + raise ValueError('t should be smaller than the order of A') + # Track the number of big*small matrix multiplications + # and the number of resamplings. + nmults = 0 + nresamples = 0 + # "We now explain our choice of starting matrix. We take the first + # column of X to be the vector of 1s [...] This has the advantage that + # for a matrix with nonnegative elements the algorithm converges + # with an exact estimate on the second iteration, and such matrices + # arise in applications [...]" + X = np.ones((n, t), dtype=float) + # "The remaining columns are chosen as rand{-1,1}, + # with a check for and correction of parallel columns, + # exactly as for S in the body of the algorithm." + if t > 1: + for i in range(1, t): + # These are technically initial samples, not resamples, + # so the resampling count is not incremented. + resample_column(i, X) + for i in range(t): + while column_needs_resampling(i, X): + resample_column(i, X) + nresamples += 1 + # "Choose starting matrix X with columns of unit 1-norm." + X /= float(n) + # "indices of used unit vectors e_j" + ind_hist = np.zeros(0, dtype=np.intp) + est_old = 0 + S = np.zeros((n, t), dtype=float) + k = 1 + ind = None + while True: + Y = np.asarray(A_linear_operator.matmat(X)) + nmults += 1 + mags = _sum_abs_axis0(Y) + est = np.max(mags) + best_j = np.argmax(mags) + if est > est_old or k == 2: + if k >= 2: + ind_best = ind[best_j] + w = Y[:, best_j] + # (1) + if k >= 2 and est <= est_old: + est = est_old + break + est_old = est + S_old = S + if k > itmax: + break + S = sign_round_up(Y) + del Y + # (2) + if every_col_of_X_is_parallel_to_a_col_of_Y(S, S_old): + break + if t > 1: + # "Ensure that no column of S is parallel to another column of S + # or to a column of S_old by replacing columns of S by rand{-1,1}." + for i in range(t): + while column_needs_resampling(i, S, S_old): + resample_column(i, S) + nresamples += 1 + del S_old + # (3) + Z = np.asarray(AT_linear_operator.matmat(S)) + nmults += 1 + h = _max_abs_axis1(Z) + del Z + # (4) + if k >= 2 and max(h) == h[ind_best]: + break + # "Sort h so that h_first >= ... >= h_last + # and re-order ind correspondingly." + # + # Later on, we will need at most t+len(ind_hist) largest + # entries, so drop the rest + ind = np.argsort(h)[::-1][:t+len(ind_hist)].copy() + del h + if t > 1: + # (5) + # Break if the most promising t vectors have been visited already. + if np.in1d(ind[:t], ind_hist).all(): + break + # Put the most promising unvisited vectors at the front of the list + # and put the visited vectors at the end of the list. + # Preserve the order of the indices induced by the ordering of h. + seen = np.in1d(ind, ind_hist) + ind = np.concatenate((ind[~seen], ind[seen])) + for j in range(t): + X[:, j] = elementary_vector(n, ind[j]) + + new_ind = ind[:t][~np.in1d(ind[:t], ind_hist)] + ind_hist = np.concatenate((ind_hist, new_ind)) + k += 1 + v = elementary_vector(n, ind_best) + return est, v, w, nmults, nresamples diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/_onenormest.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/_onenormest.pyc new file mode 100644 index 0000000..d8a353f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/_onenormest.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/dsolve/SuperLU/License.txt b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/dsolve/SuperLU/License.txt new file mode 100644 index 0000000..e003503 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/dsolve/SuperLU/License.txt @@ -0,0 +1,29 @@ +Copyright (c) 2003, The Regents of the University of California, through +Lawrence Berkeley National Laboratory (subject to receipt of any required +approvals from U.S. Dept. of Energy) + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +(1) Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. +(2) Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. +(3) Neither the name of Lawrence Berkeley National Laboratory, U.S. Dept. of +Energy nor the names of its contributors may be used to endorse or promote +products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/dsolve/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/dsolve/__init__.py new file mode 100644 index 0000000..6cefa0e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/dsolve/__init__.py @@ -0,0 +1,68 @@ +""" +Linear Solvers +============== + +The default solver is SuperLU (included in the scipy distribution), +which can solve real or complex linear systems in both single and +double precisions. It is automatically replaced by UMFPACK, if +available. Note that UMFPACK works in double precision only, so +switch it off by:: + + >>> use_solver(useUmfpack=False) + +to solve in the single precision. See also use_solver documentation. + +Example session:: + + >>> from scipy.sparse import csc_matrix, spdiags + >>> from numpy import array + >>> from scipy.sparse.linalg import spsolve, use_solver + >>> + >>> print("Inverting a sparse linear system:") + >>> print("The sparse matrix (constructed from diagonals):") + >>> a = spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1], 5, 5) + >>> b = array([1, 2, 3, 4, 5]) + >>> print("Solve: single precision complex:") + >>> use_solver( useUmfpack = False ) + >>> a = a.astype('F') + >>> x = spsolve(a, b) + >>> print(x) + >>> print("Error: ", a*x-b) + >>> + >>> print("Solve: double precision complex:") + >>> use_solver( useUmfpack = True ) + >>> a = a.astype('D') + >>> x = spsolve(a, b) + >>> print(x) + >>> print("Error: ", a*x-b) + >>> + >>> print("Solve: double precision:") + >>> a = a.astype('d') + >>> x = spsolve(a, b) + >>> print(x) + >>> print("Error: ", a*x-b) + >>> + >>> print("Solve: single precision:") + >>> use_solver( useUmfpack = False ) + >>> a = a.astype('f') + >>> x = spsolve(a, b.astype('f')) + >>> print(x) + >>> print("Error: ", a*x-b) + +""" + +from __future__ import division, print_function, absolute_import + +#import umfpack +#__doc__ = '\n\n'.join( (__doc__, umfpack.__doc__) ) +#del umfpack + +from .linsolve import * +from ._superlu import SuperLU +from . import _add_newdocs + +__all__ = [s for s in dir() if not s.startswith('_')] + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/dsolve/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/dsolve/__init__.pyc new file mode 100644 index 0000000..a242c09 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/dsolve/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/dsolve/_add_newdocs.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/dsolve/_add_newdocs.py new file mode 100644 index 0000000..c973325 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/dsolve/_add_newdocs.py @@ -0,0 +1,154 @@ +from numpy.lib import add_newdoc + +add_newdoc('scipy.sparse.linalg.dsolve._superlu', 'SuperLU', + """ + LU factorization of a sparse matrix. + + Factorization is represented as:: + + Pr * A * Pc = L * U + + To construct these `SuperLU` objects, call the `splu` and `spilu` + functions. + + Attributes + ---------- + shape + nnz + perm_c + perm_r + L + U + + Methods + ------- + solve + + Notes + ----- + + .. versionadded:: 0.14.0 + + Examples + -------- + The LU decomposition can be used to solve matrix equations. Consider: + + >>> import numpy as np + >>> from scipy.sparse import csc_matrix, linalg as sla + >>> A = csc_matrix([[1,2,0,4],[1,0,0,1],[1,0,2,1],[2,2,1,0.]]) + + This can be solved for a given right-hand side: + + >>> lu = sla.splu(A) + >>> b = np.array([1, 2, 3, 4]) + >>> x = lu.solve(b) + >>> A.dot(x) + array([ 1., 2., 3., 4.]) + + The ``lu`` object also contains an explicit representation of the + decomposition. The permutations are represented as mappings of + indices: + + >>> lu.perm_r + array([0, 2, 1, 3], dtype=int32) + >>> lu.perm_c + array([2, 0, 1, 3], dtype=int32) + + The L and U factors are sparse matrices in CSC format: + + >>> lu.L.A + array([[ 1. , 0. , 0. , 0. ], + [ 0. , 1. , 0. , 0. ], + [ 0. , 0. , 1. , 0. ], + [ 1. , 0.5, 0.5, 1. ]]) + >>> lu.U.A + array([[ 2., 0., 1., 4.], + [ 0., 2., 1., 1.], + [ 0., 0., 1., 1.], + [ 0., 0., 0., -5.]]) + + The permutation matrices can be constructed: + + >>> Pr = csc_matrix((4, 4)) + >>> Pr[lu.perm_r, np.arange(4)] = 1 + >>> Pc = csc_matrix((4, 4)) + >>> Pc[np.arange(4), lu.perm_c] = 1 + + We can reassemble the original matrix: + + >>> (Pr.T * (lu.L * lu.U) * Pc.T).A + array([[ 1., 2., 0., 4.], + [ 1., 0., 0., 1.], + [ 1., 0., 2., 1.], + [ 2., 2., 1., 0.]]) + """) + +add_newdoc('scipy.sparse.linalg.dsolve._superlu', 'SuperLU', ('solve', + """ + solve(rhs[, trans]) + + Solves linear system of equations with one or several right-hand sides. + + Parameters + ---------- + rhs : ndarray, shape (n,) or (n, k) + Right hand side(s) of equation + trans : {'N', 'T', 'H'}, optional + Type of system to solve:: + + 'N': A * x == rhs (default) + 'T': A^T * x == rhs + 'H': A^H * x == rhs + + i.e., normal, transposed, and hermitian conjugate. + + Returns + ------- + x : ndarray, shape ``rhs.shape`` + Solution vector(s) + """)) + +add_newdoc('scipy.sparse.linalg.dsolve._superlu', 'SuperLU', ('L', + """ + Lower triangular factor with unit diagonal as a + `scipy.sparse.csc_matrix`. + + .. versionadded:: 0.14.0 + """)) + +add_newdoc('scipy.sparse.linalg.dsolve._superlu', 'SuperLU', ('U', + """ + Upper triangular factor as a `scipy.sparse.csc_matrix`. + + .. versionadded:: 0.14.0 + """)) + +add_newdoc('scipy.sparse.linalg.dsolve._superlu', 'SuperLU', ('shape', + """ + Shape of the original matrix as a tuple of ints. + """)) + +add_newdoc('scipy.sparse.linalg.dsolve._superlu', 'SuperLU', ('nnz', + """ + Number of nonzero elements in the matrix. + """)) + +add_newdoc('scipy.sparse.linalg.dsolve._superlu', 'SuperLU', ('perm_c', + """ + Permutation Pc represented as an array of indices. + + The column permutation matrix can be reconstructed via: + + >>> Pc = np.zeros((n, n)) + >>> Pc[np.arange(n), perm_c] = 1 + """)) + +add_newdoc('scipy.sparse.linalg.dsolve._superlu', 'SuperLU', ('perm_r', + """ + Permutation Pr represented as an array of indices. + + The row permutation matrix can be reconstructed via: + + >>> Pr = np.zeros((n, n)) + >>> Pr[perm_r, np.arange(n)] = 1 + """)) diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/dsolve/_add_newdocs.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/dsolve/_add_newdocs.pyc new file mode 100644 index 0000000..0483457 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/dsolve/_add_newdocs.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/dsolve/_superlu.so b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/dsolve/_superlu.so new file mode 100755 index 0000000..e2cf03c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/dsolve/_superlu.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/dsolve/linsolve.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/dsolve/linsolve.py new file mode 100644 index 0000000..e067938 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/dsolve/linsolve.py @@ -0,0 +1,570 @@ +from __future__ import division, print_function, absolute_import + +from warnings import warn + +import numpy as np +from numpy import asarray +from scipy.sparse import (isspmatrix_csc, isspmatrix_csr, isspmatrix, + SparseEfficiencyWarning, csc_matrix, csr_matrix) +from scipy.linalg import LinAlgError + +from . import _superlu + +noScikit = False +try: + import scikits.umfpack as umfpack +except ImportError: + noScikit = True + +useUmfpack = not noScikit + +__all__ = ['use_solver', 'spsolve', 'splu', 'spilu', 'factorized', + 'MatrixRankWarning', 'spsolve_triangular'] + + +class MatrixRankWarning(UserWarning): + pass + + +def use_solver(**kwargs): + """ + Select default sparse direct solver to be used. + + Parameters + ---------- + useUmfpack : bool, optional + Use UMFPACK over SuperLU. Has effect only if scikits.umfpack is + installed. Default: True + assumeSortedIndices : bool, optional + Allow UMFPACK to skip the step of sorting indices for a CSR/CSC matrix. + Has effect only if useUmfpack is True and scikits.umfpack is installed. + Default: False + + Notes + ----- + The default sparse solver is umfpack when available + (scikits.umfpack is installed). This can be changed by passing + useUmfpack = False, which then causes the always present SuperLU + based solver to be used. + + Umfpack requires a CSR/CSC matrix to have sorted column/row indices. If + sure that the matrix fulfills this, pass ``assumeSortedIndices=True`` + to gain some speed. + + """ + if 'useUmfpack' in kwargs: + globals()['useUmfpack'] = kwargs['useUmfpack'] + if useUmfpack and 'assumeSortedIndices' in kwargs: + umfpack.configure(assumeSortedIndices=kwargs['assumeSortedIndices']) + +def _get_umf_family(A): + """Get umfpack family string given the sparse matrix dtype.""" + _families = { + (np.float64, np.int32): 'di', + (np.complex128, np.int32): 'zi', + (np.float64, np.int64): 'dl', + (np.complex128, np.int64): 'zl' + } + + f_type = np.sctypeDict[A.dtype.name] + i_type = np.sctypeDict[A.indices.dtype.name] + + try: + family = _families[(f_type, i_type)] + + except KeyError: + msg = 'only float64 or complex128 matrices with int32 or int64' \ + ' indices are supported! (got: matrix: %s, indices: %s)' \ + % (f_type, i_type) + raise ValueError(msg) + + return family + +def spsolve(A, b, permc_spec=None, use_umfpack=True): + """Solve the sparse linear system Ax=b, where b may be a vector or a matrix. + + Parameters + ---------- + A : ndarray or sparse matrix + The square matrix A will be converted into CSC or CSR form + b : ndarray or sparse matrix + The matrix or vector representing the right hand side of the equation. + If a vector, b.shape must be (n,) or (n, 1). + permc_spec : str, optional + How to permute the columns of the matrix for sparsity preservation. + (default: 'COLAMD') + + - ``NATURAL``: natural ordering. + - ``MMD_ATA``: minimum degree ordering on the structure of A^T A. + - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A. + - ``COLAMD``: approximate minimum degree column ordering + use_umfpack : bool, optional + if True (default) then use umfpack for the solution. This is + only referenced if b is a vector and ``scikit-umfpack`` is installed. + + Returns + ------- + x : ndarray or sparse matrix + the solution of the sparse linear equation. + If b is a vector, then x is a vector of size A.shape[1] + If b is a matrix, then x is a matrix of size (A.shape[1], b.shape[1]) + + Notes + ----- + For solving the matrix expression AX = B, this solver assumes the resulting + matrix X is sparse, as is often the case for very sparse inputs. If the + resulting X is dense, the construction of this sparse result will be + relatively expensive. In that case, consider converting A to a dense + matrix and using scipy.linalg.solve or its variants. + + Examples + -------- + >>> from scipy.sparse import csc_matrix + >>> from scipy.sparse.linalg import spsolve + >>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float) + >>> B = csc_matrix([[2, 0], [-1, 0], [2, 0]], dtype=float) + >>> x = spsolve(A, B) + >>> np.allclose(A.dot(x).todense(), B.todense()) + True + """ + if not (isspmatrix_csc(A) or isspmatrix_csr(A)): + A = csc_matrix(A) + warn('spsolve requires A be CSC or CSR matrix format', + SparseEfficiencyWarning) + + # b is a vector only if b have shape (n,) or (n, 1) + b_is_sparse = isspmatrix(b) + if not b_is_sparse: + b = asarray(b) + b_is_vector = ((b.ndim == 1) or (b.ndim == 2 and b.shape[1] == 1)) + + # sum duplicates for non-canonical format + A.sum_duplicates() + A = A.asfptype() # upcast to a floating point format + result_dtype = np.promote_types(A.dtype, b.dtype) + if A.dtype != result_dtype: + A = A.astype(result_dtype) + if b.dtype != result_dtype: + b = b.astype(result_dtype) + + # validate input shapes + M, N = A.shape + if (M != N): + raise ValueError("matrix must be square (has shape %s)" % ((M, N),)) + + if M != b.shape[0]: + raise ValueError("matrix - rhs dimension mismatch (%s - %s)" + % (A.shape, b.shape[0])) + + use_umfpack = use_umfpack and useUmfpack + + if b_is_vector and use_umfpack: + if b_is_sparse: + b_vec = b.toarray() + else: + b_vec = b + b_vec = asarray(b_vec, dtype=A.dtype).ravel() + + if noScikit: + raise RuntimeError('Scikits.umfpack not installed.') + + if A.dtype.char not in 'dD': + raise ValueError("convert matrix data to double, please, using" + " .astype(), or set linsolve.useUmfpack = False") + + umf = umfpack.UmfpackContext(_get_umf_family(A)) + x = umf.linsolve(umfpack.UMFPACK_A, A, b_vec, + autoTranspose=True) + else: + if b_is_vector and b_is_sparse: + b = b.toarray() + b_is_sparse = False + + if not b_is_sparse: + if isspmatrix_csc(A): + flag = 1 # CSC format + else: + flag = 0 # CSR format + + options = dict(ColPerm=permc_spec) + x, info = _superlu.gssv(N, A.nnz, A.data, A.indices, A.indptr, + b, flag, options=options) + if info != 0: + warn("Matrix is exactly singular", MatrixRankWarning) + x.fill(np.nan) + if b_is_vector: + x = x.ravel() + else: + # b is sparse + Afactsolve = factorized(A) + + if not isspmatrix_csc(b): + warn('spsolve is more efficient when sparse b ' + 'is in the CSC matrix format', SparseEfficiencyWarning) + b = csc_matrix(b) + + # Create a sparse output matrix by repeatedly applying + # the sparse factorization to solve columns of b. + data_segs = [] + row_segs = [] + col_segs = [] + for j in range(b.shape[1]): + bj = b[:, j].A.ravel() + xj = Afactsolve(bj) + w = np.flatnonzero(xj) + segment_length = w.shape[0] + row_segs.append(w) + col_segs.append(np.full(segment_length, j, dtype=int)) + data_segs.append(np.asarray(xj[w], dtype=A.dtype)) + sparse_data = np.concatenate(data_segs) + sparse_row = np.concatenate(row_segs) + sparse_col = np.concatenate(col_segs) + x = A.__class__((sparse_data, (sparse_row, sparse_col)), + shape=b.shape, dtype=A.dtype) + + return x + + +def splu(A, permc_spec=None, diag_pivot_thresh=None, + relax=None, panel_size=None, options=dict()): + """ + Compute the LU decomposition of a sparse, square matrix. + + Parameters + ---------- + A : sparse matrix + Sparse matrix to factorize. Should be in CSR or CSC format. + permc_spec : str, optional + How to permute the columns of the matrix for sparsity preservation. + (default: 'COLAMD') + + - ``NATURAL``: natural ordering. + - ``MMD_ATA``: minimum degree ordering on the structure of A^T A. + - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A. + - ``COLAMD``: approximate minimum degree column ordering + + diag_pivot_thresh : float, optional + Threshold used for a diagonal entry to be an acceptable pivot. + See SuperLU user's guide for details [1]_ + relax : int, optional + Expert option for customizing the degree of relaxing supernodes. + See SuperLU user's guide for details [1]_ + panel_size : int, optional + Expert option for customizing the panel size. + See SuperLU user's guide for details [1]_ + options : dict, optional + Dictionary containing additional expert options to SuperLU. + See SuperLU user guide [1]_ (section 2.4 on the 'Options' argument) + for more details. For example, you can specify + ``options=dict(Equil=False, IterRefine='SINGLE'))`` + to turn equilibration off and perform a single iterative refinement. + + Returns + ------- + invA : scipy.sparse.linalg.SuperLU + Object, which has a ``solve`` method. + + See also + -------- + spilu : incomplete LU decomposition + + Notes + ----- + This function uses the SuperLU library. + + References + ---------- + .. [1] SuperLU http://crd.lbl.gov/~xiaoye/SuperLU/ + + Examples + -------- + >>> from scipy.sparse import csc_matrix + >>> from scipy.sparse.linalg import splu + >>> A = csc_matrix([[1., 0., 0.], [5., 0., 2.], [0., -1., 0.]], dtype=float) + >>> B = splu(A) + >>> x = np.array([1., 2., 3.], dtype=float) + >>> B.solve(x) + array([ 1. , -3. , -1.5]) + >>> A.dot(B.solve(x)) + array([ 1., 2., 3.]) + >>> B.solve(A.dot(x)) + array([ 1., 2., 3.]) + """ + + if not isspmatrix_csc(A): + A = csc_matrix(A) + warn('splu requires CSC matrix format', SparseEfficiencyWarning) + + # sum duplicates for non-canonical format + A.sum_duplicates() + A = A.asfptype() # upcast to a floating point format + + M, N = A.shape + if (M != N): + raise ValueError("can only factor square matrices") # is this true? + + _options = dict(DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec, + PanelSize=panel_size, Relax=relax) + if options is not None: + _options.update(options) + return _superlu.gstrf(N, A.nnz, A.data, A.indices, A.indptr, + ilu=False, options=_options) + + +def spilu(A, drop_tol=None, fill_factor=None, drop_rule=None, permc_spec=None, + diag_pivot_thresh=None, relax=None, panel_size=None, options=None): + """ + Compute an incomplete LU decomposition for a sparse, square matrix. + + The resulting object is an approximation to the inverse of `A`. + + Parameters + ---------- + A : (N, N) array_like + Sparse matrix to factorize + drop_tol : float, optional + Drop tolerance (0 <= tol <= 1) for an incomplete LU decomposition. + (default: 1e-4) + fill_factor : float, optional + Specifies the fill ratio upper bound (>= 1.0) for ILU. (default: 10) + drop_rule : str, optional + Comma-separated string of drop rules to use. + Available rules: ``basic``, ``prows``, ``column``, ``area``, + ``secondary``, ``dynamic``, ``interp``. (Default: ``basic,area``) + + See SuperLU documentation for details. + + Remaining other options + Same as for `splu` + + Returns + ------- + invA_approx : scipy.sparse.linalg.SuperLU + Object, which has a ``solve`` method. + + See also + -------- + splu : complete LU decomposition + + Notes + ----- + To improve the better approximation to the inverse, you may need to + increase `fill_factor` AND decrease `drop_tol`. + + This function uses the SuperLU library. + + Examples + -------- + >>> from scipy.sparse import csc_matrix + >>> from scipy.sparse.linalg import spilu + >>> A = csc_matrix([[1., 0., 0.], [5., 0., 2.], [0., -1., 0.]], dtype=float) + >>> B = spilu(A) + >>> x = np.array([1., 2., 3.], dtype=float) + >>> B.solve(x) + array([ 1. , -3. , -1.5]) + >>> A.dot(B.solve(x)) + array([ 1., 2., 3.]) + >>> B.solve(A.dot(x)) + array([ 1., 2., 3.]) + """ + if not isspmatrix_csc(A): + A = csc_matrix(A) + warn('splu requires CSC matrix format', SparseEfficiencyWarning) + + # sum duplicates for non-canonical format + A.sum_duplicates() + A = A.asfptype() # upcast to a floating point format + + M, N = A.shape + if (M != N): + raise ValueError("can only factor square matrices") # is this true? + + _options = dict(ILU_DropRule=drop_rule, ILU_DropTol=drop_tol, + ILU_FillFactor=fill_factor, + DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec, + PanelSize=panel_size, Relax=relax) + if options is not None: + _options.update(options) + return _superlu.gstrf(N, A.nnz, A.data, A.indices, A.indptr, + ilu=True, options=_options) + + +def factorized(A): + """ + Return a function for solving a sparse linear system, with A pre-factorized. + + Parameters + ---------- + A : (N, N) array_like + Input. + + Returns + ------- + solve : callable + To solve the linear system of equations given in `A`, the `solve` + callable should be passed an ndarray of shape (N,). + + Examples + -------- + >>> from scipy.sparse.linalg import factorized + >>> A = np.array([[ 3. , 2. , -1. ], + ... [ 2. , -2. , 4. ], + ... [-1. , 0.5, -1. ]]) + >>> solve = factorized(A) # Makes LU decomposition. + >>> rhs1 = np.array([1, -2, 0]) + >>> solve(rhs1) # Uses the LU factors. + array([ 1., -2., -2.]) + + """ + if useUmfpack: + if noScikit: + raise RuntimeError('Scikits.umfpack not installed.') + + if not isspmatrix_csc(A): + A = csc_matrix(A) + warn('splu requires CSC matrix format', SparseEfficiencyWarning) + + A = A.asfptype() # upcast to a floating point format + + if A.dtype.char not in 'dD': + raise ValueError("convert matrix data to double, please, using" + " .astype(), or set linsolve.useUmfpack = False") + + umf = umfpack.UmfpackContext(_get_umf_family(A)) + + # Make LU decomposition. + umf.numeric(A) + + def solve(b): + return umf.solve(umfpack.UMFPACK_A, A, b, autoTranspose=True) + + return solve + else: + return splu(A).solve + + +def spsolve_triangular(A, b, lower=True, overwrite_A=False, overwrite_b=False): + """ + Solve the equation `A x = b` for `x`, assuming A is a triangular matrix. + + Parameters + ---------- + A : (M, M) sparse matrix + A sparse square triangular matrix. Should be in CSR format. + b : (M,) or (M, N) array_like + Right-hand side matrix in `A x = b` + lower : bool, optional + Whether `A` is a lower or upper triangular matrix. + Default is lower triangular matrix. + overwrite_A : bool, optional + Allow changing `A`. The indices of `A` are going to be sorted and zero + entries are going to be removed. + Enabling gives a performance gain. Default is False. + overwrite_b : bool, optional + Allow overwriting data in `b`. + Enabling gives a performance gain. Default is False. + If `overwrite_b` is True, it should be ensured that + `b` has an appropriate dtype to be able to store the result. + + Returns + ------- + x : (M,) or (M, N) ndarray + Solution to the system `A x = b`. Shape of return matches shape of `b`. + + Raises + ------ + LinAlgError + If `A` is singular or not triangular. + ValueError + If shape of `A` or shape of `b` do not match the requirements. + + Notes + ----- + .. versionadded:: 0.19.0 + + Examples + -------- + >>> from scipy.sparse import csr_matrix + >>> from scipy.sparse.linalg import spsolve_triangular + >>> A = csr_matrix([[3, 0, 0], [1, -1, 0], [2, 0, 1]], dtype=float) + >>> B = np.array([[2, 0], [-1, 0], [2, 0]], dtype=float) + >>> x = spsolve_triangular(A, B) + >>> np.allclose(A.dot(x), B) + True + """ + + # Check the input for correct type and format. + if not isspmatrix_csr(A): + warn('CSR matrix format is required. Converting to CSR matrix.', + SparseEfficiencyWarning) + A = csr_matrix(A) + elif not overwrite_A: + A = A.copy() + + if A.shape[0] != A.shape[1]: + raise ValueError( + 'A must be a square matrix but its shape is {}.'.format(A.shape)) + + # sum duplicates for non-canonical format + A.sum_duplicates() + + b = np.asanyarray(b) + + if b.ndim not in [1, 2]: + raise ValueError( + 'b must have 1 or 2 dims but its shape is {}.'.format(b.shape)) + if A.shape[0] != b.shape[0]: + raise ValueError( + 'The size of the dimensions of A must be equal to ' + 'the size of the first dimension of b but the shape of A is ' + '{} and the shape of b is {}.'.format(A.shape, b.shape)) + + # Init x as (a copy of) b. + x_dtype = np.result_type(A.data, b, np.float) + if overwrite_b: + if np.can_cast(b.dtype, x_dtype, casting='same_kind'): + x = b + else: + raise ValueError( + 'Cannot overwrite b (dtype {}) with result ' + 'of type {}.'.format(b.dtype, x_dtype)) + else: + x = b.astype(x_dtype, copy=True) + + # Choose forward or backward order. + if lower: + row_indices = range(len(b)) + else: + row_indices = range(len(b) - 1, -1, -1) + + # Fill x iteratively. + for i in row_indices: + + # Get indices for i-th row. + indptr_start = A.indptr[i] + indptr_stop = A.indptr[i + 1] + if lower: + A_diagonal_index_row_i = indptr_stop - 1 + A_off_diagonal_indices_row_i = slice(indptr_start, indptr_stop - 1) + else: + A_diagonal_index_row_i = indptr_start + A_off_diagonal_indices_row_i = slice(indptr_start + 1, indptr_stop) + + # Check regularity and triangularity of A. + if indptr_stop <= indptr_start or A.indices[A_diagonal_index_row_i] < i: + raise LinAlgError( + 'A is singular: diagonal {} is zero.'.format(i)) + if A.indices[A_diagonal_index_row_i] > i: + raise LinAlgError( + 'A is not triangular: A[{}, {}] is nonzero.' + ''.format(i, A.indices[A_diagonal_index_row_i])) + + # Incorporate off-diagonal entries. + A_column_indices_in_row_i = A.indices[A_off_diagonal_indices_row_i] + A_values_in_row_i = A.data[A_off_diagonal_indices_row_i] + x[i] -= np.dot(x[A_column_indices_in_row_i].T, A_values_in_row_i) + + # Compute i-th entry of x. + x[i] /= A.data[A_diagonal_index_row_i] + + return x diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/dsolve/linsolve.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/dsolve/linsolve.pyc new file mode 100644 index 0000000..6414c10 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/dsolve/linsolve.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/dsolve/setup.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/dsolve/setup.py new file mode 100644 index 0000000..a2618e6 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/dsolve/setup.py @@ -0,0 +1,56 @@ +from __future__ import division, print_function, absolute_import + +from os.path import join, dirname +import sys +import os +import glob + + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + from scipy._build_utils.system_info import get_info + from scipy._build_utils import numpy_nodepr_api + + config = Configuration('dsolve',parent_package,top_path) + config.add_data_dir('tests') + + lapack_opt = get_info('lapack_opt',notfound_action=2) + if sys.platform == 'win32': + superlu_defs = [('NO_TIMER',1)] + else: + superlu_defs = [] + superlu_defs.append(('USE_VENDOR_BLAS',1)) + + superlu_src = join(dirname(__file__), 'SuperLU', 'SRC') + + sources = sorted(glob.glob(join(superlu_src, '*.c'))) + headers = list(glob.glob(join(superlu_src, '*.h'))) + + config.add_library('superlu_src', + sources=sources, + macros=superlu_defs, + include_dirs=[superlu_src], + ) + + # Extension + ext_sources = ['_superlumodule.c', + '_superlu_utils.c', + '_superluobject.c'] + + config.add_extension('_superlu', + sources=ext_sources, + libraries=['superlu_src'], + depends=(sources + headers), + extra_info=lapack_opt, + **numpy_nodepr_api + ) + + # Add license files + config.add_data_files('SuperLU/License.txt') + + return config + + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(**configuration(top_path='').todict()) diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/dsolve/setup.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/dsolve/setup.pyc new file mode 100644 index 0000000..468e882 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/dsolve/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/dsolve/tests/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/dsolve/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/dsolve/tests/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/dsolve/tests/__init__.pyc new file mode 100644 index 0000000..4c48669 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/dsolve/tests/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/dsolve/tests/test_linsolve.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/dsolve/tests/test_linsolve.py new file mode 100644 index 0000000..ee7dd12 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/dsolve/tests/test_linsolve.py @@ -0,0 +1,718 @@ +from __future__ import division, print_function, absolute_import + +import sys +import threading + +import numpy as np +from numpy import array, finfo, arange, eye, all, unique, ones, dot, matrix +import numpy.random as random +from numpy.testing import ( + assert_array_almost_equal, assert_almost_equal, + assert_equal, assert_array_equal, assert_, assert_allclose, + assert_warns) +import pytest +from pytest import raises as assert_raises + +import scipy.linalg +from scipy.linalg import norm, inv +from scipy.sparse import (spdiags, SparseEfficiencyWarning, csc_matrix, + csr_matrix, identity, isspmatrix, dok_matrix, lil_matrix, bsr_matrix) +from scipy.sparse.linalg import SuperLU +from scipy.sparse.linalg.dsolve import (spsolve, use_solver, splu, spilu, + MatrixRankWarning, _superlu, spsolve_triangular, factorized) + +from scipy._lib._numpy_compat import suppress_warnings + + +sup_sparse_efficiency = suppress_warnings() +sup_sparse_efficiency.filter(SparseEfficiencyWarning) + +# scikits.umfpack is not a SciPy dependency but it is optionally used in +# dsolve, so check whether it's available +try: + import scikits.umfpack as umfpack + has_umfpack = True +except ImportError: + has_umfpack = False + +def toarray(a): + if isspmatrix(a): + return a.toarray() + else: + return a + + +class TestFactorized(object): + def setup_method(self): + n = 5 + d = arange(n) + 1 + self.n = n + self.A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n).tocsc() + random.seed(1234) + + def _check_singular(self): + A = csc_matrix((5,5), dtype='d') + b = ones(5) + assert_array_almost_equal(0. * b, factorized(A)(b)) + + def _check_non_singular(self): + # Make a diagonal dominant, to make sure it is not singular + n = 5 + a = csc_matrix(random.rand(n, n)) + b = ones(n) + + expected = splu(a).solve(b) + assert_array_almost_equal(factorized(a)(b), expected) + + def test_singular_without_umfpack(self): + use_solver(useUmfpack=False) + with assert_raises(RuntimeError, match="Factor is exactly singular"): + self._check_singular() + + @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") + def test_singular_with_umfpack(self): + use_solver(useUmfpack=True) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "divide by zero encountered in double_scalars") + assert_warns(umfpack.UmfpackWarning, self._check_singular) + + def test_non_singular_without_umfpack(self): + use_solver(useUmfpack=False) + self._check_non_singular() + + @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") + def test_non_singular_with_umfpack(self): + use_solver(useUmfpack=True) + self._check_non_singular() + + def test_cannot_factorize_nonsquare_matrix_without_umfpack(self): + use_solver(useUmfpack=False) + msg = "can only factor square matrices" + with assert_raises(ValueError, match=msg): + factorized(self.A[:, :4]) + + @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") + def test_factorizes_nonsquare_matrix_with_umfpack(self): + use_solver(useUmfpack=True) + # does not raise + factorized(self.A[:,:4]) + + def test_call_with_incorrectly_sized_matrix_without_umfpack(self): + use_solver(useUmfpack=False) + solve = factorized(self.A) + b = random.rand(4) + B = random.rand(4, 3) + BB = random.rand(self.n, 3, 9) + + with assert_raises(ValueError, match="is of incompatible size"): + solve(b) + with assert_raises(ValueError, match="is of incompatible size"): + solve(B) + with assert_raises(ValueError, + match="object too deep for desired array"): + solve(BB) + + @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") + def test_call_with_incorrectly_sized_matrix_with_umfpack(self): + use_solver(useUmfpack=True) + solve = factorized(self.A) + b = random.rand(4) + B = random.rand(4, 3) + BB = random.rand(self.n, 3, 9) + + # does not raise + solve(b) + msg = "object too deep for desired array" + with assert_raises(ValueError, match=msg): + solve(B) + with assert_raises(ValueError, match=msg): + solve(BB) + + def test_call_with_cast_to_complex_without_umfpack(self): + use_solver(useUmfpack=False) + solve = factorized(self.A) + b = random.rand(4) + for t in [np.complex64, np.complex128]: + with assert_raises(TypeError, match="Cannot cast array data"): + solve(b.astype(t)) + + @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") + def test_call_with_cast_to_complex_with_umfpack(self): + use_solver(useUmfpack=True) + solve = factorized(self.A) + b = random.rand(4) + for t in [np.complex64, np.complex128]: + assert_warns(np.ComplexWarning, solve, b.astype(t)) + + @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") + def test_assume_sorted_indices_flag(self): + # a sparse matrix with unsorted indices + unsorted_inds = np.array([2, 0, 1, 0]) + data = np.array([10, 16, 5, 0.4]) + indptr = np.array([0, 1, 2, 4]) + A = csc_matrix((data, unsorted_inds, indptr), (3, 3)) + b = ones(3) + + # should raise when incorrectly assuming indices are sorted + use_solver(useUmfpack=True, assumeSortedIndices=True) + with assert_raises(RuntimeError, + match="UMFPACK_ERROR_invalid_matrix"): + factorized(A) + + # should sort indices and succeed when not assuming indices are sorted + use_solver(useUmfpack=True, assumeSortedIndices=False) + expected = splu(A.copy()).solve(b) + + assert_equal(A.has_sorted_indices, 0) + assert_array_almost_equal(factorized(A)(b), expected) + assert_equal(A.has_sorted_indices, 1) + + +class TestLinsolve(object): + def setup_method(self): + use_solver(useUmfpack=False) + + def test_singular(self): + A = csc_matrix((5,5), dtype='d') + b = array([1, 2, 3, 4, 5],dtype='d') + with suppress_warnings() as sup: + sup.filter(MatrixRankWarning, "Matrix is exactly singular") + x = spsolve(A, b) + assert_(not np.isfinite(x).any()) + + def test_singular_gh_3312(self): + # "Bad" test case that leads SuperLU to call LAPACK with invalid + # arguments. Check that it fails moderately gracefully. + ij = np.array([(17, 0), (17, 6), (17, 12), (10, 13)], dtype=np.int32) + v = np.array([0.284213, 0.94933781, 0.15767017, 0.38797296]) + A = csc_matrix((v, ij.T), shape=(20, 20)) + b = np.arange(20) + + try: + # should either raise a runtimeerror or return value + # appropriate for singular input + x = spsolve(A, b) + assert_(not np.isfinite(x).any()) + except RuntimeError: + pass + + def test_twodiags(self): + A = spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1], 5, 5) + b = array([1, 2, 3, 4, 5]) + + # condition number of A + cond_A = norm(A.todense(),2) * norm(inv(A.todense()),2) + + for t in ['f','d','F','D']: + eps = finfo(t).eps # floating point epsilon + b = b.astype(t) + + for format in ['csc','csr']: + Asp = A.astype(t).asformat(format) + + x = spsolve(Asp,b) + + assert_(norm(b - Asp*x) < 10 * cond_A * eps) + + def test_bvector_smoketest(self): + Adense = matrix([[0., 1., 1.], + [1., 0., 1.], + [0., 0., 1.]]) + As = csc_matrix(Adense) + random.seed(1234) + x = random.randn(3) + b = As*x + x2 = spsolve(As, b) + + assert_array_almost_equal(x, x2) + + def test_bmatrix_smoketest(self): + Adense = matrix([[0., 1., 1.], + [1., 0., 1.], + [0., 0., 1.]]) + As = csc_matrix(Adense) + random.seed(1234) + x = random.randn(3, 4) + Bdense = As.dot(x) + Bs = csc_matrix(Bdense) + x2 = spsolve(As, Bs) + assert_array_almost_equal(x, x2.todense()) + + @sup_sparse_efficiency + def test_non_square(self): + # A is not square. + A = ones((3, 4)) + b = ones((4, 1)) + assert_raises(ValueError, spsolve, A, b) + # A2 and b2 have incompatible shapes. + A2 = csc_matrix(eye(3)) + b2 = array([1.0, 2.0]) + assert_raises(ValueError, spsolve, A2, b2) + + @sup_sparse_efficiency + def test_example_comparison(self): + row = array([0,0,1,2,2,2]) + col = array([0,2,2,0,1,2]) + data = array([1,2,3,-4,5,6]) + sM = csr_matrix((data,(row,col)), shape=(3,3), dtype=float) + M = sM.todense() + + row = array([0,0,1,1,0,0]) + col = array([0,2,1,1,0,0]) + data = array([1,1,1,1,1,1]) + sN = csr_matrix((data, (row,col)), shape=(3,3), dtype=float) + N = sN.todense() + + sX = spsolve(sM, sN) + X = scipy.linalg.solve(M, N) + + assert_array_almost_equal(X, sX.todense()) + + @sup_sparse_efficiency + @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") + def test_shape_compatibility(self): + use_solver(useUmfpack=True) + A = csc_matrix([[1., 0], [0, 2]]) + bs = [ + [1, 6], + array([1, 6]), + [[1], [6]], + array([[1], [6]]), + csc_matrix([[1], [6]]), + csr_matrix([[1], [6]]), + dok_matrix([[1], [6]]), + bsr_matrix([[1], [6]]), + array([[1., 2., 3.], [6., 8., 10.]]), + csc_matrix([[1., 2., 3.], [6., 8., 10.]]), + csr_matrix([[1., 2., 3.], [6., 8., 10.]]), + dok_matrix([[1., 2., 3.], [6., 8., 10.]]), + bsr_matrix([[1., 2., 3.], [6., 8., 10.]]), + ] + + for b in bs: + x = np.linalg.solve(A.toarray(), toarray(b)) + for spmattype in [csc_matrix, csr_matrix, dok_matrix, lil_matrix]: + x1 = spsolve(spmattype(A), b, use_umfpack=True) + x2 = spsolve(spmattype(A), b, use_umfpack=False) + + # check solution + if x.ndim == 2 and x.shape[1] == 1: + # interprets also these as "vectors" + x = x.ravel() + + assert_array_almost_equal(toarray(x1), x, err_msg=repr((b, spmattype, 1))) + assert_array_almost_equal(toarray(x2), x, err_msg=repr((b, spmattype, 2))) + + # dense vs. sparse output ("vectors" are always dense) + if isspmatrix(b) and x.ndim > 1: + assert_(isspmatrix(x1), repr((b, spmattype, 1))) + assert_(isspmatrix(x2), repr((b, spmattype, 2))) + else: + assert_(isinstance(x1, np.ndarray), repr((b, spmattype, 1))) + assert_(isinstance(x2, np.ndarray), repr((b, spmattype, 2))) + + # check output shape + if x.ndim == 1: + # "vector" + assert_equal(x1.shape, (A.shape[1],)) + assert_equal(x2.shape, (A.shape[1],)) + else: + # "matrix" + assert_equal(x1.shape, x.shape) + assert_equal(x2.shape, x.shape) + + A = csc_matrix((3, 3)) + b = csc_matrix((1, 3)) + assert_raises(ValueError, spsolve, A, b) + + @sup_sparse_efficiency + def test_ndarray_support(self): + A = array([[1., 2.], [2., 0.]]) + x = array([[1., 1.], [0.5, -0.5]]) + b = array([[2., 0.], [2., 2.]]) + + assert_array_almost_equal(x, spsolve(A, b)) + + def test_gssv_badinput(self): + N = 10 + d = arange(N) + 1.0 + A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), N, N) + + for spmatrix in (csc_matrix, csr_matrix): + A = spmatrix(A) + b = np.arange(N) + + def not_c_contig(x): + return x.repeat(2)[::2] + + def not_1dim(x): + return x[:,None] + + def bad_type(x): + return x.astype(bool) + + def too_short(x): + return x[:-1] + + badops = [not_c_contig, not_1dim, bad_type, too_short] + + for badop in badops: + msg = "%r %r" % (spmatrix, badop) + # Not C-contiguous + assert_raises((ValueError, TypeError), _superlu.gssv, + N, A.nnz, badop(A.data), A.indices, A.indptr, + b, int(spmatrix == csc_matrix), err_msg=msg) + assert_raises((ValueError, TypeError), _superlu.gssv, + N, A.nnz, A.data, badop(A.indices), A.indptr, + b, int(spmatrix == csc_matrix), err_msg=msg) + assert_raises((ValueError, TypeError), _superlu.gssv, + N, A.nnz, A.data, A.indices, badop(A.indptr), + b, int(spmatrix == csc_matrix), err_msg=msg) + + def test_sparsity_preservation(self): + ident = csc_matrix([ + [1, 0, 0], + [0, 1, 0], + [0, 0, 1]]) + b = csc_matrix([ + [0, 1], + [1, 0], + [0, 0]]) + x = spsolve(ident, b) + assert_equal(ident.nnz, 3) + assert_equal(b.nnz, 2) + assert_equal(x.nnz, 2) + assert_allclose(x.A, b.A, atol=1e-12, rtol=1e-12) + + def test_dtype_cast(self): + A_real = scipy.sparse.csr_matrix([[1, 2, 0], + [0, 0, 3], + [4, 0, 5]]) + A_complex = scipy.sparse.csr_matrix([[1, 2, 0], + [0, 0, 3], + [4, 0, 5 + 1j]]) + b_real = np.array([1,1,1]) + b_complex = np.array([1,1,1]) + 1j*np.array([1,1,1]) + x = spsolve(A_real, b_real) + assert_(np.issubdtype(x.dtype, np.floating)) + x = spsolve(A_real, b_complex) + assert_(np.issubdtype(x.dtype, np.complexfloating)) + x = spsolve(A_complex, b_real) + assert_(np.issubdtype(x.dtype, np.complexfloating)) + x = spsolve(A_complex, b_complex) + assert_(np.issubdtype(x.dtype, np.complexfloating)) + + +class TestSplu(object): + def setup_method(self): + use_solver(useUmfpack=False) + n = 40 + d = arange(n) + 1 + self.n = n + self.A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n) + random.seed(1234) + + def _smoketest(self, spxlu, check, dtype): + if np.issubdtype(dtype, np.complexfloating): + A = self.A + 1j*self.A.T + else: + A = self.A + + A = A.astype(dtype) + lu = spxlu(A) + + rng = random.RandomState(1234) + + # Input shapes + for k in [None, 1, 2, self.n, self.n+2]: + msg = "k=%r" % (k,) + + if k is None: + b = rng.rand(self.n) + else: + b = rng.rand(self.n, k) + + if np.issubdtype(dtype, np.complexfloating): + b = b + 1j*rng.rand(*b.shape) + b = b.astype(dtype) + + x = lu.solve(b) + check(A, b, x, msg) + + x = lu.solve(b, 'T') + check(A.T, b, x, msg) + + x = lu.solve(b, 'H') + check(A.T.conj(), b, x, msg) + + @sup_sparse_efficiency + def test_splu_smoketest(self): + self._internal_test_splu_smoketest() + + def _internal_test_splu_smoketest(self): + # Check that splu works at all + def check(A, b, x, msg=""): + eps = np.finfo(A.dtype).eps + r = A * x + assert_(abs(r - b).max() < 1e3*eps, msg) + + self._smoketest(splu, check, np.float32) + self._smoketest(splu, check, np.float64) + self._smoketest(splu, check, np.complex64) + self._smoketest(splu, check, np.complex128) + + @sup_sparse_efficiency + def test_spilu_smoketest(self): + self._internal_test_spilu_smoketest() + + def _internal_test_spilu_smoketest(self): + errors = [] + + def check(A, b, x, msg=""): + r = A * x + err = abs(r - b).max() + assert_(err < 1e-2, msg) + if b.dtype in (np.float64, np.complex128): + errors.append(err) + + self._smoketest(spilu, check, np.float32) + self._smoketest(spilu, check, np.float64) + self._smoketest(spilu, check, np.complex64) + self._smoketest(spilu, check, np.complex128) + + assert_(max(errors) > 1e-5) + + @sup_sparse_efficiency + def test_spilu_drop_rule(self): + # Test passing in the drop_rule argument to spilu. + A = identity(2) + + rules = [ + b'basic,area'.decode('ascii'), # unicode + b'basic,area', # ascii + [b'basic', b'area'.decode('ascii')] + ] + for rule in rules: + # Argument should be accepted + assert_(isinstance(spilu(A, drop_rule=rule), SuperLU)) + + def test_splu_nnz0(self): + A = csc_matrix((5,5), dtype='d') + assert_raises(RuntimeError, splu, A) + + def test_spilu_nnz0(self): + A = csc_matrix((5,5), dtype='d') + assert_raises(RuntimeError, spilu, A) + + def test_splu_basic(self): + # Test basic splu functionality. + n = 30 + rng = random.RandomState(12) + a = rng.rand(n, n) + a[a < 0.95] = 0 + # First test with a singular matrix + a[:, 0] = 0 + a_ = csc_matrix(a) + # Matrix is exactly singular + assert_raises(RuntimeError, splu, a_) + + # Make a diagonal dominant, to make sure it is not singular + a += 4*eye(n) + a_ = csc_matrix(a) + lu = splu(a_) + b = ones(n) + x = lu.solve(b) + assert_almost_equal(dot(a, x), b) + + def test_splu_perm(self): + # Test the permutation vectors exposed by splu. + n = 30 + a = random.random((n, n)) + a[a < 0.95] = 0 + # Make a diagonal dominant, to make sure it is not singular + a += 4*eye(n) + a_ = csc_matrix(a) + lu = splu(a_) + # Check that the permutation indices do belong to [0, n-1]. + for perm in (lu.perm_r, lu.perm_c): + assert_(all(perm > -1)) + assert_(all(perm < n)) + assert_equal(len(unique(perm)), len(perm)) + + # Now make a symmetric, and test that the two permutation vectors are + # the same + # Note: a += a.T relies on undefined behavior. + a = a + a.T + a_ = csc_matrix(a) + lu = splu(a_) + assert_array_equal(lu.perm_r, lu.perm_c) + + @pytest.mark.skipif(not hasattr(sys, 'getrefcount'), reason="no sys.getrefcount") + def test_lu_refcount(self): + # Test that we are keeping track of the reference count with splu. + n = 30 + a = random.random((n, n)) + a[a < 0.95] = 0 + # Make a diagonal dominant, to make sure it is not singular + a += 4*eye(n) + a_ = csc_matrix(a) + lu = splu(a_) + + # And now test that we don't have a refcount bug + rc = sys.getrefcount(lu) + for attr in ('perm_r', 'perm_c'): + perm = getattr(lu, attr) + assert_equal(sys.getrefcount(lu), rc + 1) + del perm + assert_equal(sys.getrefcount(lu), rc) + + def test_bad_inputs(self): + A = self.A.tocsc() + + assert_raises(ValueError, splu, A[:,:4]) + assert_raises(ValueError, spilu, A[:,:4]) + + for lu in [splu(A), spilu(A)]: + b = random.rand(42) + B = random.rand(42, 3) + BB = random.rand(self.n, 3, 9) + assert_raises(ValueError, lu.solve, b) + assert_raises(ValueError, lu.solve, B) + assert_raises(ValueError, lu.solve, BB) + assert_raises(TypeError, lu.solve, + b.astype(np.complex64)) + assert_raises(TypeError, lu.solve, + b.astype(np.complex128)) + + @sup_sparse_efficiency + def test_superlu_dlamch_i386_nan(self): + # SuperLU 4.3 calls some functions returning floats without + # declaring them. On i386@linux call convention, this fails to + # clear floating point registers after call. As a result, NaN + # can appear in the next floating point operation made. + # + # Here's a test case that triggered the issue. + n = 8 + d = np.arange(n) + 1 + A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n) + A = A.astype(np.float32) + spilu(A) + A = A + 1j*A + B = A.A + assert_(not np.isnan(B).any()) + + @sup_sparse_efficiency + def test_lu_attr(self): + + def check(dtype, complex_2=False): + A = self.A.astype(dtype) + + if complex_2: + A = A + 1j*A.T + + n = A.shape[0] + lu = splu(A) + + # Check that the decomposition is as advertized + + Pc = np.zeros((n, n)) + Pc[np.arange(n), lu.perm_c] = 1 + + Pr = np.zeros((n, n)) + Pr[lu.perm_r, np.arange(n)] = 1 + + Ad = A.toarray() + lhs = Pr.dot(Ad).dot(Pc) + rhs = (lu.L * lu.U).toarray() + + eps = np.finfo(dtype).eps + + assert_allclose(lhs, rhs, atol=100*eps) + + check(np.float32) + check(np.float64) + check(np.complex64) + check(np.complex128) + check(np.complex64, True) + check(np.complex128, True) + + @pytest.mark.slow + @sup_sparse_efficiency + def test_threads_parallel(self): + oks = [] + + def worker(): + try: + self.test_splu_basic() + self._internal_test_splu_smoketest() + self._internal_test_spilu_smoketest() + oks.append(True) + except Exception: + pass + + threads = [threading.Thread(target=worker) + for k in range(20)] + for t in threads: + t.start() + for t in threads: + t.join() + + assert_equal(len(oks), 20) + + +class TestSpsolveTriangular(object): + def setup_method(self): + use_solver(useUmfpack=False) + + def test_singular(self): + n = 5 + A = csr_matrix((n, n)) + b = np.arange(n) + for lower in (True, False): + assert_raises(scipy.linalg.LinAlgError, spsolve_triangular, A, b, lower=lower) + + @sup_sparse_efficiency + def test_bad_shape(self): + # A is not square. + A = np.zeros((3, 4)) + b = ones((4, 1)) + assert_raises(ValueError, spsolve_triangular, A, b) + # A2 and b2 have incompatible shapes. + A2 = csr_matrix(eye(3)) + b2 = array([1.0, 2.0]) + assert_raises(ValueError, spsolve_triangular, A2, b2) + + @sup_sparse_efficiency + def test_input_types(self): + A = array([[1., 0.], [1., 2.]]) + b = array([[2., 0.], [2., 2.]]) + for matrix_type in (array, csc_matrix, csr_matrix): + x = spsolve_triangular(matrix_type(A), b, lower=True) + assert_array_almost_equal(A.dot(x), b) + + @pytest.mark.slow + @sup_sparse_efficiency + def test_random(self): + def random_triangle_matrix(n, lower=True): + A = scipy.sparse.random(n, n, density=0.1, format='coo') + if lower: + A = scipy.sparse.tril(A) + else: + A = scipy.sparse.triu(A) + A = A.tocsr(copy=False) + for i in range(n): + A[i, i] = np.random.rand() + 1 + return A + + np.random.seed(1234) + for lower in (True, False): + for n in (10, 10**2, 10**3): + A = random_triangle_matrix(n, lower=lower) + for m in (1, 10): + for b in (np.random.rand(n, m), + np.random.randint(-9, 9, (n, m)), + np.random.randint(-9, 9, (n, m)) + + np.random.randint(-9, 9, (n, m)) * 1j): + x = spsolve_triangular(A, b, lower=lower) + assert_array_almost_equal(A.dot(x), b) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/dsolve/tests/test_linsolve.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/dsolve/tests/test_linsolve.pyc new file mode 100644 index 0000000..d840b82 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/dsolve/tests/test_linsolve.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/__init__.py new file mode 100644 index 0000000..7d9668a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/__init__.py @@ -0,0 +1,18 @@ +""" +Sparse Eigenvalue Solvers +------------------------- + +The submodules of sparse.linalg.eigen: + 1. lobpcg: Locally Optimal Block Preconditioned Conjugate Gradient Method + +""" +from __future__ import division, print_function, absolute_import + +from .arpack import * +from .lobpcg import * + +__all__ = [s for s in dir() if not s.startswith('_')] + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/__init__.pyc new file mode 100644 index 0000000..eaa68e3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/arpack/ARPACK/COPYING b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/arpack/ARPACK/COPYING new file mode 100644 index 0000000..e87667e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/arpack/ARPACK/COPYING @@ -0,0 +1,45 @@ + +BSD Software License + +Pertains to ARPACK and P_ARPACK + +Copyright (c) 1996-2008 Rice University. +Developed by D.C. Sorensen, R.B. Lehoucq, C. Yang, and K. Maschhoff. +All rights reserved. + +Arpack has been renamed to arpack-ng. + +Copyright (c) 2001-2011 - Scilab Enterprises +Updated by Allan Cornet, Sylvestre Ledru. + +Copyright (c) 2010 - Jordi Gutiérrez Hermoso (Octave patch) + +Copyright (c) 2007 - Sébastien Fabbro (gentoo patch) + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +- Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +- Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer listed + in this license in the documentation and/or other materials + provided with the distribution. + +- Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/arpack/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/arpack/__init__.py new file mode 100644 index 0000000..420bdc4 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/arpack/__init__.py @@ -0,0 +1,22 @@ +""" +Eigenvalue solver using iterative methods. + +Find k eigenvectors and eigenvalues of a matrix A using the +Arnoldi/Lanczos iterative methods from ARPACK [1]_,[2]_. + +These methods are most useful for large sparse matrices. + + - eigs(A,k) + - eigsh(A,k) + +References +---------- +.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/ +.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE: + Solution of Large Scale Eigenvalue Problems by Implicitly Restarted + Arnoldi Methods. SIAM, Philadelphia, PA, 1998. + +""" +from __future__ import division, print_function, absolute_import + +from .arpack import * diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/arpack/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/arpack/__init__.pyc new file mode 100644 index 0000000..6141373 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/arpack/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/arpack/_arpack.so b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/arpack/_arpack.so new file mode 100755 index 0000000..cc0c0d9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/arpack/_arpack.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/arpack/arpack.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/arpack/arpack.py new file mode 100644 index 0000000..75e6557 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/arpack/arpack.py @@ -0,0 +1,1881 @@ +""" +Find a few eigenvectors and eigenvalues of a matrix. + + +Uses ARPACK: http://www.caam.rice.edu/software/ARPACK/ + +""" +# Wrapper implementation notes +# +# ARPACK Entry Points +# ------------------- +# The entry points to ARPACK are +# - (s,d)seupd : single and double precision symmetric matrix +# - (s,d,c,z)neupd: single,double,complex,double complex general matrix +# This wrapper puts the *neupd (general matrix) interfaces in eigs() +# and the *seupd (symmetric matrix) in eigsh(). +# There is no Hermetian complex/double complex interface. +# To find eigenvalues of a Hermetian matrix you +# must use eigs() and not eigsh() +# It might be desirable to handle the Hermetian case differently +# and, for example, return real eigenvalues. + +# Number of eigenvalues returned and complex eigenvalues +# ------------------------------------------------------ +# The ARPACK nonsymmetric real and double interface (s,d)naupd return +# eigenvalues and eigenvectors in real (float,double) arrays. +# Since the eigenvalues and eigenvectors are, in general, complex +# ARPACK puts the real and imaginary parts in consecutive entries +# in real-valued arrays. This wrapper puts the real entries +# into complex data types and attempts to return the requested eigenvalues +# and eigenvectors. + + +# Solver modes +# ------------ +# ARPACK and handle shifted and shift-inverse computations +# for eigenvalues by providing a shift (sigma) and a solver. + +from __future__ import division, print_function, absolute_import + +__docformat__ = "restructuredtext en" + +__all__ = ['eigs', 'eigsh', 'svds', 'ArpackError', 'ArpackNoConvergence'] + +from . import _arpack +import numpy as np +import warnings +from scipy.sparse.linalg.interface import aslinearoperator, LinearOperator +from scipy.sparse import eye, issparse, isspmatrix, isspmatrix_csr +from scipy.linalg import eig, eigh, lu_factor, lu_solve +from scipy.sparse.sputils import isdense +from scipy.sparse.linalg import gmres, splu +from scipy._lib._util import _aligned_zeros +from scipy._lib._threadsafety import ReentrancyLock + + +_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z'} +_ndigits = {'f': 5, 'd': 12, 'F': 5, 'D': 12} + +DNAUPD_ERRORS = { + 0: "Normal exit.", + 1: "Maximum number of iterations taken. " + "All possible eigenvalues of OP has been found. IPARAM(5) " + "returns the number of wanted converged Ritz values.", + 2: "No longer an informational error. Deprecated starting " + "with release 2 of ARPACK.", + 3: "No shifts could be applied during a cycle of the " + "Implicitly restarted Arnoldi iteration. One possibility " + "is to increase the size of NCV relative to NEV. ", + -1: "N must be positive.", + -2: "NEV must be positive.", + -3: "NCV-NEV >= 2 and less than or equal to N.", + -4: "The maximum number of Arnoldi update iterations allowed " + "must be greater than zero.", + -5: " WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'", + -6: "BMAT must be one of 'I' or 'G'.", + -7: "Length of private work array WORKL is not sufficient.", + -8: "Error return from LAPACK eigenvalue calculation;", + -9: "Starting vector is zero.", + -10: "IPARAM(7) must be 1,2,3,4.", + -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.", + -12: "IPARAM(1) must be equal to 0 or 1.", + -13: "NEV and WHICH = 'BE' are incompatible.", + -9999: "Could not build an Arnoldi factorization. " + "IPARAM(5) returns the size of the current Arnoldi " + "factorization. The user is advised to check that " + "enough workspace and array storage has been allocated." +} + +SNAUPD_ERRORS = DNAUPD_ERRORS + +ZNAUPD_ERRORS = DNAUPD_ERRORS.copy() +ZNAUPD_ERRORS[-10] = "IPARAM(7) must be 1,2,3." + +CNAUPD_ERRORS = ZNAUPD_ERRORS + +DSAUPD_ERRORS = { + 0: "Normal exit.", + 1: "Maximum number of iterations taken. " + "All possible eigenvalues of OP has been found.", + 2: "No longer an informational error. Deprecated starting with " + "release 2 of ARPACK.", + 3: "No shifts could be applied during a cycle of the Implicitly " + "restarted Arnoldi iteration. One possibility is to increase " + "the size of NCV relative to NEV. ", + -1: "N must be positive.", + -2: "NEV must be positive.", + -3: "NCV must be greater than NEV and less than or equal to N.", + -4: "The maximum number of Arnoldi update iterations allowed " + "must be greater than zero.", + -5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.", + -6: "BMAT must be one of 'I' or 'G'.", + -7: "Length of private work array WORKL is not sufficient.", + -8: "Error return from trid. eigenvalue calculation; " + "Informational error from LAPACK routine dsteqr .", + -9: "Starting vector is zero.", + -10: "IPARAM(7) must be 1,2,3,4,5.", + -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.", + -12: "IPARAM(1) must be equal to 0 or 1.", + -13: "NEV and WHICH = 'BE' are incompatible. ", + -9999: "Could not build an Arnoldi factorization. " + "IPARAM(5) returns the size of the current Arnoldi " + "factorization. The user is advised to check that " + "enough workspace and array storage has been allocated.", +} + +SSAUPD_ERRORS = DSAUPD_ERRORS + +DNEUPD_ERRORS = { + 0: "Normal exit.", + 1: "The Schur form computed by LAPACK routine dlahqr " + "could not be reordered by LAPACK routine dtrsen. " + "Re-enter subroutine dneupd with IPARAM(5)NCV and " + "increase the size of the arrays DR and DI to have " + "dimension at least dimension NCV and allocate at least NCV " + "columns for Z. NOTE: Not necessary if Z and V share " + "the same space. Please notify the authors if this error" + "occurs.", + -1: "N must be positive.", + -2: "NEV must be positive.", + -3: "NCV-NEV >= 2 and less than or equal to N.", + -5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'", + -6: "BMAT must be one of 'I' or 'G'.", + -7: "Length of private work WORKL array is not sufficient.", + -8: "Error return from calculation of a real Schur form. " + "Informational error from LAPACK routine dlahqr .", + -9: "Error return from calculation of eigenvectors. " + "Informational error from LAPACK routine dtrevc.", + -10: "IPARAM(7) must be 1,2,3,4.", + -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.", + -12: "HOWMNY = 'S' not yet implemented", + -13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.", + -14: "DNAUPD did not find any eigenvalues to sufficient " + "accuracy.", + -15: "DNEUPD got a different count of the number of converged " + "Ritz values than DNAUPD got. This indicates the user " + "probably made an error in passing data from DNAUPD to " + "DNEUPD or that the data was modified before entering " + "DNEUPD", +} + +SNEUPD_ERRORS = DNEUPD_ERRORS.copy() +SNEUPD_ERRORS[1] = ("The Schur form computed by LAPACK routine slahqr " + "could not be reordered by LAPACK routine strsen . " + "Re-enter subroutine dneupd with IPARAM(5)=NCV and " + "increase the size of the arrays DR and DI to have " + "dimension at least dimension NCV and allocate at least " + "NCV columns for Z. NOTE: Not necessary if Z and V share " + "the same space. Please notify the authors if this error " + "occurs.") +SNEUPD_ERRORS[-14] = ("SNAUPD did not find any eigenvalues to sufficient " + "accuracy.") +SNEUPD_ERRORS[-15] = ("SNEUPD got a different count of the number of " + "converged Ritz values than SNAUPD got. This indicates " + "the user probably made an error in passing data from " + "SNAUPD to SNEUPD or that the data was modified before " + "entering SNEUPD") + +ZNEUPD_ERRORS = {0: "Normal exit.", + 1: "The Schur form computed by LAPACK routine csheqr " + "could not be reordered by LAPACK routine ztrsen. " + "Re-enter subroutine zneupd with IPARAM(5)=NCV and " + "increase the size of the array D to have " + "dimension at least dimension NCV and allocate at least " + "NCV columns for Z. NOTE: Not necessary if Z and V share " + "the same space. Please notify the authors if this error " + "occurs.", + -1: "N must be positive.", + -2: "NEV must be positive.", + -3: "NCV-NEV >= 1 and less than or equal to N.", + -5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'", + -6: "BMAT must be one of 'I' or 'G'.", + -7: "Length of private work WORKL array is not sufficient.", + -8: "Error return from LAPACK eigenvalue calculation. " + "This should never happened.", + -9: "Error return from calculation of eigenvectors. " + "Informational error from LAPACK routine ztrevc.", + -10: "IPARAM(7) must be 1,2,3", + -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.", + -12: "HOWMNY = 'S' not yet implemented", + -13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.", + -14: "ZNAUPD did not find any eigenvalues to sufficient " + "accuracy.", + -15: "ZNEUPD got a different count of the number of " + "converged Ritz values than ZNAUPD got. This " + "indicates the user probably made an error in passing " + "data from ZNAUPD to ZNEUPD or that the data was " + "modified before entering ZNEUPD" + } + +CNEUPD_ERRORS = ZNEUPD_ERRORS.copy() +CNEUPD_ERRORS[-14] = ("CNAUPD did not find any eigenvalues to sufficient " + "accuracy.") +CNEUPD_ERRORS[-15] = ("CNEUPD got a different count of the number of " + "converged Ritz values than CNAUPD got. This indicates " + "the user probably made an error in passing data from " + "CNAUPD to CNEUPD or that the data was modified before " + "entering CNEUPD") + +DSEUPD_ERRORS = { + 0: "Normal exit.", + -1: "N must be positive.", + -2: "NEV must be positive.", + -3: "NCV must be greater than NEV and less than or equal to N.", + -5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.", + -6: "BMAT must be one of 'I' or 'G'.", + -7: "Length of private work WORKL array is not sufficient.", + -8: ("Error return from trid. eigenvalue calculation; " + "Information error from LAPACK routine dsteqr."), + -9: "Starting vector is zero.", + -10: "IPARAM(7) must be 1,2,3,4,5.", + -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.", + -12: "NEV and WHICH = 'BE' are incompatible.", + -14: "DSAUPD did not find any eigenvalues to sufficient accuracy.", + -15: "HOWMNY must be one of 'A' or 'S' if RVEC = .true.", + -16: "HOWMNY = 'S' not yet implemented", + -17: ("DSEUPD got a different count of the number of converged " + "Ritz values than DSAUPD got. This indicates the user " + "probably made an error in passing data from DSAUPD to " + "DSEUPD or that the data was modified before entering " + "DSEUPD.") +} + +SSEUPD_ERRORS = DSEUPD_ERRORS.copy() +SSEUPD_ERRORS[-14] = ("SSAUPD did not find any eigenvalues " + "to sufficient accuracy.") +SSEUPD_ERRORS[-17] = ("SSEUPD got a different count of the number of " + "converged " + "Ritz values than SSAUPD got. This indicates the user " + "probably made an error in passing data from SSAUPD to " + "SSEUPD or that the data was modified before entering " + "SSEUPD.") + +_SAUPD_ERRORS = {'d': DSAUPD_ERRORS, + 's': SSAUPD_ERRORS} +_NAUPD_ERRORS = {'d': DNAUPD_ERRORS, + 's': SNAUPD_ERRORS, + 'z': ZNAUPD_ERRORS, + 'c': CNAUPD_ERRORS} +_SEUPD_ERRORS = {'d': DSEUPD_ERRORS, + 's': SSEUPD_ERRORS} +_NEUPD_ERRORS = {'d': DNEUPD_ERRORS, + 's': SNEUPD_ERRORS, + 'z': ZNEUPD_ERRORS, + 'c': CNEUPD_ERRORS} + +# accepted values of parameter WHICH in _SEUPD +_SEUPD_WHICH = ['LM', 'SM', 'LA', 'SA', 'BE'] + +# accepted values of parameter WHICH in _NAUPD +_NEUPD_WHICH = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI'] + + +class ArpackError(RuntimeError): + """ + ARPACK error + """ + def __init__(self, info, infodict=_NAUPD_ERRORS): + msg = infodict.get(info, "Unknown error") + RuntimeError.__init__(self, "ARPACK error %d: %s" % (info, msg)) + + +class ArpackNoConvergence(ArpackError): + """ + ARPACK iteration did not converge + + Attributes + ---------- + eigenvalues : ndarray + Partial result. Converged eigenvalues. + eigenvectors : ndarray + Partial result. Converged eigenvectors. + + """ + def __init__(self, msg, eigenvalues, eigenvectors): + ArpackError.__init__(self, -1, {-1: msg}) + self.eigenvalues = eigenvalues + self.eigenvectors = eigenvectors + + +def choose_ncv(k): + """ + Choose number of lanczos vectors based on target number + of singular/eigen values and vectors to compute, k. + """ + return max(2 * k + 1, 20) + + +class _ArpackParams(object): + def __init__(self, n, k, tp, mode=1, sigma=None, + ncv=None, v0=None, maxiter=None, which="LM", tol=0): + if k <= 0: + raise ValueError("k must be positive, k=%d" % k) + + if maxiter is None: + maxiter = n * 10 + if maxiter <= 0: + raise ValueError("maxiter must be positive, maxiter=%d" % maxiter) + + if tp not in 'fdFD': + raise ValueError("matrix type must be 'f', 'd', 'F', or 'D'") + + if v0 is not None: + # ARPACK overwrites its initial resid, make a copy + self.resid = np.array(v0, copy=True) + info = 1 + else: + # ARPACK will use a random initial vector. + self.resid = np.zeros(n, tp) + info = 0 + + if sigma is None: + #sigma not used + self.sigma = 0 + else: + self.sigma = sigma + + if ncv is None: + ncv = choose_ncv(k) + ncv = min(ncv, n) + + self.v = np.zeros((n, ncv), tp) # holds Ritz vectors + self.iparam = np.zeros(11, "int") + + # set solver mode and parameters + ishfts = 1 + self.mode = mode + self.iparam[0] = ishfts + self.iparam[2] = maxiter + self.iparam[3] = 1 + self.iparam[6] = mode + + self.n = n + self.tol = tol + self.k = k + self.maxiter = maxiter + self.ncv = ncv + self.which = which + self.tp = tp + self.info = info + + self.converged = False + self.ido = 0 + + def _raise_no_convergence(self): + msg = "No convergence (%d iterations, %d/%d eigenvectors converged)" + k_ok = self.iparam[4] + num_iter = self.iparam[2] + try: + ev, vec = self.extract(True) + except ArpackError as err: + msg = "%s [%s]" % (msg, err) + ev = np.zeros((0,)) + vec = np.zeros((self.n, 0)) + k_ok = 0 + raise ArpackNoConvergence(msg % (num_iter, k_ok, self.k), ev, vec) + + +class _SymmetricArpackParams(_ArpackParams): + def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None, + Minv_matvec=None, sigma=None, + ncv=None, v0=None, maxiter=None, which="LM", tol=0): + # The following modes are supported: + # mode = 1: + # Solve the standard eigenvalue problem: + # A*x = lambda*x : + # A - symmetric + # Arguments should be + # matvec = left multiplication by A + # M_matvec = None [not used] + # Minv_matvec = None [not used] + # + # mode = 2: + # Solve the general eigenvalue problem: + # A*x = lambda*M*x + # A - symmetric + # M - symmetric positive definite + # Arguments should be + # matvec = left multiplication by A + # M_matvec = left multiplication by M + # Minv_matvec = left multiplication by M^-1 + # + # mode = 3: + # Solve the general eigenvalue problem in shift-invert mode: + # A*x = lambda*M*x + # A - symmetric + # M - symmetric positive semi-definite + # Arguments should be + # matvec = None [not used] + # M_matvec = left multiplication by M + # or None, if M is the identity + # Minv_matvec = left multiplication by [A-sigma*M]^-1 + # + # mode = 4: + # Solve the general eigenvalue problem in Buckling mode: + # A*x = lambda*AG*x + # A - symmetric positive semi-definite + # AG - symmetric indefinite + # Arguments should be + # matvec = left multiplication by A + # M_matvec = None [not used] + # Minv_matvec = left multiplication by [A-sigma*AG]^-1 + # + # mode = 5: + # Solve the general eigenvalue problem in Cayley-transformed mode: + # A*x = lambda*M*x + # A - symmetric + # M - symmetric positive semi-definite + # Arguments should be + # matvec = left multiplication by A + # M_matvec = left multiplication by M + # or None, if M is the identity + # Minv_matvec = left multiplication by [A-sigma*M]^-1 + if mode == 1: + if matvec is None: + raise ValueError("matvec must be specified for mode=1") + if M_matvec is not None: + raise ValueError("M_matvec cannot be specified for mode=1") + if Minv_matvec is not None: + raise ValueError("Minv_matvec cannot be specified for mode=1") + + self.OP = matvec + self.B = lambda x: x + self.bmat = 'I' + elif mode == 2: + if matvec is None: + raise ValueError("matvec must be specified for mode=2") + if M_matvec is None: + raise ValueError("M_matvec must be specified for mode=2") + if Minv_matvec is None: + raise ValueError("Minv_matvec must be specified for mode=2") + + self.OP = lambda x: Minv_matvec(matvec(x)) + self.OPa = Minv_matvec + self.OPb = matvec + self.B = M_matvec + self.bmat = 'G' + elif mode == 3: + if matvec is not None: + raise ValueError("matvec must not be specified for mode=3") + if Minv_matvec is None: + raise ValueError("Minv_matvec must be specified for mode=3") + + if M_matvec is None: + self.OP = Minv_matvec + self.OPa = Minv_matvec + self.B = lambda x: x + self.bmat = 'I' + else: + self.OP = lambda x: Minv_matvec(M_matvec(x)) + self.OPa = Minv_matvec + self.B = M_matvec + self.bmat = 'G' + elif mode == 4: + if matvec is None: + raise ValueError("matvec must be specified for mode=4") + if M_matvec is not None: + raise ValueError("M_matvec must not be specified for mode=4") + if Minv_matvec is None: + raise ValueError("Minv_matvec must be specified for mode=4") + self.OPa = Minv_matvec + self.OP = lambda x: self.OPa(matvec(x)) + self.B = matvec + self.bmat = 'G' + elif mode == 5: + if matvec is None: + raise ValueError("matvec must be specified for mode=5") + if Minv_matvec is None: + raise ValueError("Minv_matvec must be specified for mode=5") + + self.OPa = Minv_matvec + self.A_matvec = matvec + + if M_matvec is None: + self.OP = lambda x: Minv_matvec(matvec(x) + sigma * x) + self.B = lambda x: x + self.bmat = 'I' + else: + self.OP = lambda x: Minv_matvec(matvec(x) + + sigma * M_matvec(x)) + self.B = M_matvec + self.bmat = 'G' + else: + raise ValueError("mode=%i not implemented" % mode) + + if which not in _SEUPD_WHICH: + raise ValueError("which must be one of %s" + % ' '.join(_SEUPD_WHICH)) + if k >= n: + raise ValueError("k must be less than ndim(A), k=%d" % k) + + _ArpackParams.__init__(self, n, k, tp, mode, sigma, + ncv, v0, maxiter, which, tol) + + if self.ncv > n or self.ncv <= k: + raise ValueError("ncv must be k<ncv<=n, ncv=%s" % self.ncv) + + # Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1 + self.workd = _aligned_zeros(3 * n, self.tp) + self.workl = _aligned_zeros(self.ncv * (self.ncv + 8), self.tp) + + ltr = _type_conv[self.tp] + if ltr not in ["s", "d"]: + raise ValueError("Input matrix is not real-valued.") + + self._arpack_solver = _arpack.__dict__[ltr + 'saupd'] + self._arpack_extract = _arpack.__dict__[ltr + 'seupd'] + + self.iterate_infodict = _SAUPD_ERRORS[ltr] + self.extract_infodict = _SEUPD_ERRORS[ltr] + + self.ipntr = np.zeros(11, "int") + + def iterate(self): + self.ido, self.tol, self.resid, self.v, self.iparam, self.ipntr, self.info = \ + self._arpack_solver(self.ido, self.bmat, self.which, self.k, + self.tol, self.resid, self.v, self.iparam, + self.ipntr, self.workd, self.workl, self.info) + + xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n) + yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n) + if self.ido == -1: + # initialization + self.workd[yslice] = self.OP(self.workd[xslice]) + elif self.ido == 1: + # compute y = Op*x + if self.mode == 1: + self.workd[yslice] = self.OP(self.workd[xslice]) + elif self.mode == 2: + self.workd[xslice] = self.OPb(self.workd[xslice]) + self.workd[yslice] = self.OPa(self.workd[xslice]) + elif self.mode == 5: + Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n) + Ax = self.A_matvec(self.workd[xslice]) + self.workd[yslice] = self.OPa(Ax + (self.sigma * + self.workd[Bxslice])) + else: + Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n) + self.workd[yslice] = self.OPa(self.workd[Bxslice]) + elif self.ido == 2: + self.workd[yslice] = self.B(self.workd[xslice]) + elif self.ido == 3: + raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0") + else: + self.converged = True + + if self.info == 0: + pass + elif self.info == 1: + self._raise_no_convergence() + else: + raise ArpackError(self.info, infodict=self.iterate_infodict) + + def extract(self, return_eigenvectors): + rvec = return_eigenvectors + ierr = 0 + howmny = 'A' # return all eigenvectors + sselect = np.zeros(self.ncv, 'int') # unused + d, z, ierr = self._arpack_extract(rvec, howmny, sselect, self.sigma, + self.bmat, self.which, self.k, + self.tol, self.resid, self.v, + self.iparam[0:7], self.ipntr, + self.workd[0:2 * self.n], + self.workl, ierr) + if ierr != 0: + raise ArpackError(ierr, infodict=self.extract_infodict) + k_ok = self.iparam[4] + d = d[:k_ok] + z = z[:, :k_ok] + + if return_eigenvectors: + return d, z + else: + return d + + +class _UnsymmetricArpackParams(_ArpackParams): + def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None, + Minv_matvec=None, sigma=None, + ncv=None, v0=None, maxiter=None, which="LM", tol=0): + # The following modes are supported: + # mode = 1: + # Solve the standard eigenvalue problem: + # A*x = lambda*x + # A - square matrix + # Arguments should be + # matvec = left multiplication by A + # M_matvec = None [not used] + # Minv_matvec = None [not used] + # + # mode = 2: + # Solve the generalized eigenvalue problem: + # A*x = lambda*M*x + # A - square matrix + # M - symmetric, positive semi-definite + # Arguments should be + # matvec = left multiplication by A + # M_matvec = left multiplication by M + # Minv_matvec = left multiplication by M^-1 + # + # mode = 3,4: + # Solve the general eigenvalue problem in shift-invert mode: + # A*x = lambda*M*x + # A - square matrix + # M - symmetric, positive semi-definite + # Arguments should be + # matvec = None [not used] + # M_matvec = left multiplication by M + # or None, if M is the identity + # Minv_matvec = left multiplication by [A-sigma*M]^-1 + # if A is real and mode==3, use the real part of Minv_matvec + # if A is real and mode==4, use the imag part of Minv_matvec + # if A is complex and mode==3, + # use real and imag parts of Minv_matvec + if mode == 1: + if matvec is None: + raise ValueError("matvec must be specified for mode=1") + if M_matvec is not None: + raise ValueError("M_matvec cannot be specified for mode=1") + if Minv_matvec is not None: + raise ValueError("Minv_matvec cannot be specified for mode=1") + + self.OP = matvec + self.B = lambda x: x + self.bmat = 'I' + elif mode == 2: + if matvec is None: + raise ValueError("matvec must be specified for mode=2") + if M_matvec is None: + raise ValueError("M_matvec must be specified for mode=2") + if Minv_matvec is None: + raise ValueError("Minv_matvec must be specified for mode=2") + + self.OP = lambda x: Minv_matvec(matvec(x)) + self.OPa = Minv_matvec + self.OPb = matvec + self.B = M_matvec + self.bmat = 'G' + elif mode in (3, 4): + if matvec is None: + raise ValueError("matvec must be specified " + "for mode in (3,4)") + if Minv_matvec is None: + raise ValueError("Minv_matvec must be specified " + "for mode in (3,4)") + + self.matvec = matvec + if tp in 'DF': # complex type + if mode == 3: + self.OPa = Minv_matvec + else: + raise ValueError("mode=4 invalid for complex A") + else: # real type + if mode == 3: + self.OPa = lambda x: np.real(Minv_matvec(x)) + else: + self.OPa = lambda x: np.imag(Minv_matvec(x)) + if M_matvec is None: + self.B = lambda x: x + self.bmat = 'I' + self.OP = self.OPa + else: + self.B = M_matvec + self.bmat = 'G' + self.OP = lambda x: self.OPa(M_matvec(x)) + else: + raise ValueError("mode=%i not implemented" % mode) + + if which not in _NEUPD_WHICH: + raise ValueError("Parameter which must be one of %s" + % ' '.join(_NEUPD_WHICH)) + if k >= n - 1: + raise ValueError("k must be less than ndim(A)-1, k=%d" % k) + + _ArpackParams.__init__(self, n, k, tp, mode, sigma, + ncv, v0, maxiter, which, tol) + + if self.ncv > n or self.ncv <= k + 1: + raise ValueError("ncv must be k+1<ncv<=n, ncv=%s" % self.ncv) + + # Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1 + self.workd = _aligned_zeros(3 * n, self.tp) + self.workl = _aligned_zeros(3 * self.ncv * (self.ncv + 2), self.tp) + + ltr = _type_conv[self.tp] + self._arpack_solver = _arpack.__dict__[ltr + 'naupd'] + self._arpack_extract = _arpack.__dict__[ltr + 'neupd'] + + self.iterate_infodict = _NAUPD_ERRORS[ltr] + self.extract_infodict = _NEUPD_ERRORS[ltr] + + self.ipntr = np.zeros(14, "int") + + if self.tp in 'FD': + # Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1 + self.rwork = _aligned_zeros(self.ncv, self.tp.lower()) + else: + self.rwork = None + + def iterate(self): + if self.tp in 'fd': + self.ido, self.tol, self.resid, self.v, self.iparam, self.ipntr, self.info =\ + self._arpack_solver(self.ido, self.bmat, self.which, self.k, + self.tol, self.resid, self.v, self.iparam, + self.ipntr, self.workd, self.workl, + self.info) + else: + self.ido, self.tol, self.resid, self.v, self.iparam, self.ipntr, self.info =\ + self._arpack_solver(self.ido, self.bmat, self.which, self.k, + self.tol, self.resid, self.v, self.iparam, + self.ipntr, self.workd, self.workl, + self.rwork, self.info) + + xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n) + yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n) + if self.ido == -1: + # initialization + self.workd[yslice] = self.OP(self.workd[xslice]) + elif self.ido == 1: + # compute y = Op*x + if self.mode in (1, 2): + self.workd[yslice] = self.OP(self.workd[xslice]) + else: + Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n) + self.workd[yslice] = self.OPa(self.workd[Bxslice]) + elif self.ido == 2: + self.workd[yslice] = self.B(self.workd[xslice]) + elif self.ido == 3: + raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0") + else: + self.converged = True + + if self.info == 0: + pass + elif self.info == 1: + self._raise_no_convergence() + else: + raise ArpackError(self.info, infodict=self.iterate_infodict) + + def extract(self, return_eigenvectors): + k, n = self.k, self.n + + ierr = 0 + howmny = 'A' # return all eigenvectors + sselect = np.zeros(self.ncv, 'int') # unused + sigmar = np.real(self.sigma) + sigmai = np.imag(self.sigma) + workev = np.zeros(3 * self.ncv, self.tp) + + if self.tp in 'fd': + dr = np.zeros(k + 1, self.tp) + di = np.zeros(k + 1, self.tp) + zr = np.zeros((n, k + 1), self.tp) + dr, di, zr, ierr = \ + self._arpack_extract(return_eigenvectors, + howmny, sselect, sigmar, sigmai, workev, + self.bmat, self.which, k, self.tol, self.resid, + self.v, self.iparam, self.ipntr, + self.workd, self.workl, self.info) + if ierr != 0: + raise ArpackError(ierr, infodict=self.extract_infodict) + nreturned = self.iparam[4] # number of good eigenvalues returned + + # Build complex eigenvalues from real and imaginary parts + d = dr + 1.0j * di + + # Arrange the eigenvectors: complex eigenvectors are stored as + # real,imaginary in consecutive columns + z = zr.astype(self.tp.upper()) + + # The ARPACK nonsymmetric real and double interface (s,d)naupd + # return eigenvalues and eigenvectors in real (float,double) + # arrays. + + # Efficiency: this should check that return_eigenvectors == True + # before going through this construction. + if sigmai == 0: + i = 0 + while i <= k: + # check if complex + if abs(d[i].imag) != 0: + # this is a complex conjugate pair with eigenvalues + # in consecutive columns + if i < k: + z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1] + z[:, i + 1] = z[:, i].conjugate() + i += 1 + else: + #last eigenvalue is complex: the imaginary part of + # the eigenvector has not been returned + #this can only happen if nreturned > k, so we'll + # throw out this case. + nreturned -= 1 + i += 1 + + else: + # real matrix, mode 3 or 4, imag(sigma) is nonzero: + # see remark 3 in <s,d>neupd.f + # Build complex eigenvalues from real and imaginary parts + i = 0 + while i <= k: + if abs(d[i].imag) == 0: + d[i] = np.dot(zr[:, i], self.matvec(zr[:, i])) + else: + if i < k: + z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1] + z[:, i + 1] = z[:, i].conjugate() + d[i] = ((np.dot(zr[:, i], + self.matvec(zr[:, i])) + + np.dot(zr[:, i + 1], + self.matvec(zr[:, i + 1]))) + + 1j * (np.dot(zr[:, i], + self.matvec(zr[:, i + 1])) + - np.dot(zr[:, i + 1], + self.matvec(zr[:, i])))) + d[i + 1] = d[i].conj() + i += 1 + else: + #last eigenvalue is complex: the imaginary part of + # the eigenvector has not been returned + #this can only happen if nreturned > k, so we'll + # throw out this case. + nreturned -= 1 + i += 1 + + # Now we have k+1 possible eigenvalues and eigenvectors + # Return the ones specified by the keyword "which" + + if nreturned <= k: + # we got less or equal as many eigenvalues we wanted + d = d[:nreturned] + z = z[:, :nreturned] + else: + # we got one extra eigenvalue (likely a cc pair, but which?) + # cut at approx precision for sorting + rd = np.round(d, decimals=_ndigits[self.tp]) + if self.which in ['LR', 'SR']: + ind = np.argsort(rd.real) + elif self.which in ['LI', 'SI']: + # for LI,SI ARPACK returns largest,smallest + # abs(imaginary) why? + ind = np.argsort(abs(rd.imag)) + else: + ind = np.argsort(abs(rd)) + if self.which in ['LR', 'LM', 'LI']: + d = d[ind[-k:]] + z = z[:, ind[-k:]] + if self.which in ['SR', 'SM', 'SI']: + d = d[ind[:k]] + z = z[:, ind[:k]] + else: + # complex is so much simpler... + d, z, ierr =\ + self._arpack_extract(return_eigenvectors, + howmny, sselect, self.sigma, workev, + self.bmat, self.which, k, self.tol, self.resid, + self.v, self.iparam, self.ipntr, + self.workd, self.workl, self.rwork, ierr) + + if ierr != 0: + raise ArpackError(ierr, infodict=self.extract_infodict) + + k_ok = self.iparam[4] + d = d[:k_ok] + z = z[:, :k_ok] + + if return_eigenvectors: + return d, z + else: + return d + + +def _aslinearoperator_with_dtype(m): + m = aslinearoperator(m) + if not hasattr(m, 'dtype'): + x = np.zeros(m.shape[1]) + m.dtype = (m * x).dtype + return m + + +class SpLuInv(LinearOperator): + """ + SpLuInv: + helper class to repeatedly solve M*x=b + using a sparse LU-decopposition of M + """ + def __init__(self, M): + self.M_lu = splu(M) + self.shape = M.shape + self.dtype = M.dtype + self.isreal = not np.issubdtype(self.dtype, np.complexfloating) + + def _matvec(self, x): + # careful here: splu.solve will throw away imaginary + # part of x if M is real + x = np.asarray(x) + if self.isreal and np.issubdtype(x.dtype, np.complexfloating): + return (self.M_lu.solve(np.real(x).astype(self.dtype)) + + 1j * self.M_lu.solve(np.imag(x).astype(self.dtype))) + else: + return self.M_lu.solve(x.astype(self.dtype)) + + +class LuInv(LinearOperator): + """ + LuInv: + helper class to repeatedly solve M*x=b + using an LU-decomposition of M + """ + def __init__(self, M): + self.M_lu = lu_factor(M) + self.shape = M.shape + self.dtype = M.dtype + + def _matvec(self, x): + return lu_solve(self.M_lu, x) + + +def gmres_loose(A, b, tol): + """ + gmres with looser termination condition. + """ + b = np.asarray(b) + min_tol = 1000 * np.sqrt(b.size) * np.finfo(b.dtype).eps + return gmres(A, b, tol=max(tol, min_tol), atol=0) + + +class IterInv(LinearOperator): + """ + IterInv: + helper class to repeatedly solve M*x=b + using an iterative method. + """ + def __init__(self, M, ifunc=gmres_loose, tol=0): + self.M = M + if hasattr(M, 'dtype'): + self.dtype = M.dtype + else: + x = np.zeros(M.shape[1]) + self.dtype = (M * x).dtype + self.shape = M.shape + + if tol <= 0: + # when tol=0, ARPACK uses machine tolerance as calculated + # by LAPACK's _LAMCH function. We should match this + tol = 2 * np.finfo(self.dtype).eps + self.ifunc = ifunc + self.tol = tol + + def _matvec(self, x): + b, info = self.ifunc(self.M, x, tol=self.tol) + if info != 0: + raise ValueError("Error in inverting M: function " + "%s did not converge (info = %i)." + % (self.ifunc.__name__, info)) + return b + + +class IterOpInv(LinearOperator): + """ + IterOpInv: + helper class to repeatedly solve [A-sigma*M]*x = b + using an iterative method + """ + def __init__(self, A, M, sigma, ifunc=gmres_loose, tol=0): + self.A = A + self.M = M + self.sigma = sigma + + def mult_func(x): + return A.matvec(x) - sigma * M.matvec(x) + + def mult_func_M_None(x): + return A.matvec(x) - sigma * x + + x = np.zeros(A.shape[1]) + if M is None: + dtype = mult_func_M_None(x).dtype + self.OP = LinearOperator(self.A.shape, + mult_func_M_None, + dtype=dtype) + else: + dtype = mult_func(x).dtype + self.OP = LinearOperator(self.A.shape, + mult_func, + dtype=dtype) + self.shape = A.shape + + if tol <= 0: + # when tol=0, ARPACK uses machine tolerance as calculated + # by LAPACK's _LAMCH function. We should match this + tol = 2 * np.finfo(self.OP.dtype).eps + self.ifunc = ifunc + self.tol = tol + + def _matvec(self, x): + b, info = self.ifunc(self.OP, x, tol=self.tol) + if info != 0: + raise ValueError("Error in inverting [A-sigma*M]: function " + "%s did not converge (info = %i)." + % (self.ifunc.__name__, info)) + return b + + @property + def dtype(self): + return self.OP.dtype + + +def get_inv_matvec(M, symmetric=False, tol=0): + if isdense(M): + return LuInv(M).matvec + elif isspmatrix(M): + if isspmatrix_csr(M) and symmetric: + M = M.T + return SpLuInv(M).matvec + else: + return IterInv(M, tol=tol).matvec + + +def get_OPinv_matvec(A, M, sigma, symmetric=False, tol=0): + if sigma == 0: + return get_inv_matvec(A, symmetric=symmetric, tol=tol) + + if M is None: + #M is the identity matrix + if isdense(A): + if (np.issubdtype(A.dtype, np.complexfloating) + or np.imag(sigma) == 0): + A = np.copy(A) + else: + A = A + 0j + A.flat[::A.shape[1] + 1] -= sigma + return LuInv(A).matvec + elif isspmatrix(A): + A = A - sigma * eye(A.shape[0]) + if symmetric and isspmatrix_csr(A): + A = A.T + return SpLuInv(A.tocsc()).matvec + else: + return IterOpInv(_aslinearoperator_with_dtype(A), + M, sigma, tol=tol).matvec + else: + if ((not isdense(A) and not isspmatrix(A)) or + (not isdense(M) and not isspmatrix(M))): + return IterOpInv(_aslinearoperator_with_dtype(A), + _aslinearoperator_with_dtype(M), + sigma, tol=tol).matvec + elif isdense(A) or isdense(M): + return LuInv(A - sigma * M).matvec + else: + OP = A - sigma * M + if symmetric and isspmatrix_csr(OP): + OP = OP.T + return SpLuInv(OP.tocsc()).matvec + + +# ARPACK is not threadsafe or reentrant (SAVE variables), so we need a +# lock and a re-entering check. +_ARPACK_LOCK = ReentrancyLock("Nested calls to eigs/eighs not allowed: " + "ARPACK is not re-entrant") + + +def eigs(A, k=6, M=None, sigma=None, which='LM', v0=None, + ncv=None, maxiter=None, tol=0, return_eigenvectors=True, + Minv=None, OPinv=None, OPpart=None): + """ + Find k eigenvalues and eigenvectors of the square matrix A. + + Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem + for w[i] eigenvalues with corresponding eigenvectors x[i]. + + If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the + generalized eigenvalue problem for w[i] eigenvalues + with corresponding eigenvectors x[i] + + Parameters + ---------- + A : ndarray, sparse matrix or LinearOperator + An array, sparse matrix, or LinearOperator representing + the operation ``A * x``, where A is a real or complex square matrix. + k : int, optional + The number of eigenvalues and eigenvectors desired. + `k` must be smaller than N-1. It is not possible to compute all + eigenvectors of a matrix. + M : ndarray, sparse matrix or LinearOperator, optional + An array, sparse matrix, or LinearOperator representing + the operation M*x for the generalized eigenvalue problem + + A * x = w * M * x. + + M must represent a real, symmetric matrix if A is real, and must + represent a complex, hermitian matrix if A is complex. For best + results, the data type of M should be the same as that of A. + Additionally: + + If `sigma` is None, M is positive definite + + If sigma is specified, M is positive semi-definite + + If sigma is None, eigs requires an operator to compute the solution + of the linear equation ``M * x = b``. This is done internally via a + (sparse) LU decomposition for an explicit matrix M, or via an + iterative solver for a general linear operator. Alternatively, + the user can supply the matrix or operator Minv, which gives + ``x = Minv * b = M^-1 * b``. + sigma : real or complex, optional + Find eigenvalues near sigma using shift-invert mode. This requires + an operator to compute the solution of the linear system + ``[A - sigma * M] * x = b``, where M is the identity matrix if + unspecified. This is computed internally via a (sparse) LU + decomposition for explicit matrices A & M, or via an iterative + solver if either A or M is a general linear operator. + Alternatively, the user can supply the matrix or operator OPinv, + which gives ``x = OPinv * b = [A - sigma * M]^-1 * b``. + For a real matrix A, shift-invert can either be done in imaginary + mode or real mode, specified by the parameter OPpart ('r' or 'i'). + Note that when sigma is specified, the keyword 'which' (below) + refers to the shifted eigenvalues ``w'[i]`` where: + + If A is real and OPpart == 'r' (default), + ``w'[i] = 1/2 * [1/(w[i]-sigma) + 1/(w[i]-conj(sigma))]``. + + If A is real and OPpart == 'i', + ``w'[i] = 1/2i * [1/(w[i]-sigma) - 1/(w[i]-conj(sigma))]``. + + If A is complex, ``w'[i] = 1/(w[i]-sigma)``. + + v0 : ndarray, optional + Starting vector for iteration. + Default: random + ncv : int, optional + The number of Lanczos vectors generated + `ncv` must be greater than `k`; it is recommended that ``ncv > 2*k``. + Default: ``min(n, max(2*k + 1, 20))`` + which : str, ['LM' | 'SM' | 'LR' | 'SR' | 'LI' | 'SI'], optional + Which `k` eigenvectors and eigenvalues to find: + + 'LM' : largest magnitude + + 'SM' : smallest magnitude + + 'LR' : largest real part + + 'SR' : smallest real part + + 'LI' : largest imaginary part + + 'SI' : smallest imaginary part + + When sigma != None, 'which' refers to the shifted eigenvalues w'[i] + (see discussion in 'sigma', above). ARPACK is generally better + at finding large values than small values. If small eigenvalues are + desired, consider using shift-invert mode for better performance. + maxiter : int, optional + Maximum number of Arnoldi update iterations allowed + Default: ``n*10`` + tol : float, optional + Relative accuracy for eigenvalues (stopping criterion) + The default value of 0 implies machine precision. + return_eigenvectors : bool, optional + Return eigenvectors (True) in addition to eigenvalues + Minv : ndarray, sparse matrix or LinearOperator, optional + See notes in M, above. + OPinv : ndarray, sparse matrix or LinearOperator, optional + See notes in sigma, above. + OPpart : {'r' or 'i'}, optional + See notes in sigma, above + + Returns + ------- + w : ndarray + Array of k eigenvalues. + v : ndarray + An array of `k` eigenvectors. + ``v[:, i]`` is the eigenvector corresponding to the eigenvalue w[i]. + + Raises + ------ + ArpackNoConvergence + When the requested convergence is not obtained. + The currently converged eigenvalues and eigenvectors can be found + as ``eigenvalues`` and ``eigenvectors`` attributes of the exception + object. + + See Also + -------- + eigsh : eigenvalues and eigenvectors for symmetric matrix A + svds : singular value decomposition for a matrix A + + Notes + ----- + This function is a wrapper to the ARPACK [1]_ SNEUPD, DNEUPD, CNEUPD, + ZNEUPD, functions which use the Implicitly Restarted Arnoldi Method to + find the eigenvalues and eigenvectors [2]_. + + References + ---------- + .. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/ + .. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE: + Solution of Large Scale Eigenvalue Problems by Implicitly Restarted + Arnoldi Methods. SIAM, Philadelphia, PA, 1998. + + Examples + -------- + Find 6 eigenvectors of the identity matrix: + + >>> from scipy.sparse.linalg import eigs + >>> id = np.eye(13) + >>> vals, vecs = eigs(id, k=6) + >>> vals + array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j]) + >>> vecs.shape + (13, 6) + + """ + if A.shape[0] != A.shape[1]: + raise ValueError('expected square matrix (shape=%s)' % (A.shape,)) + if M is not None: + if M.shape != A.shape: + raise ValueError('wrong M dimensions %s, should be %s' + % (M.shape, A.shape)) + if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower(): + warnings.warn('M does not have the same type precision as A. ' + 'This may adversely affect ARPACK convergence') + + n = A.shape[0] + + if k <= 0: + raise ValueError("k=%d must be greater than 0." % k) + + if k >= n - 1: + warnings.warn("k >= N - 1 for N * N square matrix. " + "Attempting to use scipy.linalg.eig instead.", + RuntimeWarning) + + if issparse(A): + raise TypeError("Cannot use scipy.linalg.eig for sparse A with " + "k >= N - 1. Use scipy.linalg.eig(A.toarray()) or" + " reduce k.") + if isinstance(A, LinearOperator): + raise TypeError("Cannot use scipy.linalg.eig for LinearOperator " + "A with k >= N - 1.") + if isinstance(M, LinearOperator): + raise TypeError("Cannot use scipy.linalg.eig for LinearOperator " + "M with k >= N - 1.") + + return eig(A, b=M, right=return_eigenvectors) + + if sigma is None: + matvec = _aslinearoperator_with_dtype(A).matvec + + if OPinv is not None: + raise ValueError("OPinv should not be specified " + "with sigma = None.") + if OPpart is not None: + raise ValueError("OPpart should not be specified with " + "sigma = None or complex A") + + if M is None: + #standard eigenvalue problem + mode = 1 + M_matvec = None + Minv_matvec = None + if Minv is not None: + raise ValueError("Minv should not be " + "specified with M = None.") + else: + #general eigenvalue problem + mode = 2 + if Minv is None: + Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol) + else: + Minv = _aslinearoperator_with_dtype(Minv) + Minv_matvec = Minv.matvec + M_matvec = _aslinearoperator_with_dtype(M).matvec + else: + #sigma is not None: shift-invert mode + if np.issubdtype(A.dtype, np.complexfloating): + if OPpart is not None: + raise ValueError("OPpart should not be specified " + "with sigma=None or complex A") + mode = 3 + elif OPpart is None or OPpart.lower() == 'r': + mode = 3 + elif OPpart.lower() == 'i': + if np.imag(sigma) == 0: + raise ValueError("OPpart cannot be 'i' if sigma is real") + mode = 4 + else: + raise ValueError("OPpart must be one of ('r','i')") + + matvec = _aslinearoperator_with_dtype(A).matvec + if Minv is not None: + raise ValueError("Minv should not be specified when sigma is") + if OPinv is None: + Minv_matvec = get_OPinv_matvec(A, M, sigma, + symmetric=False, tol=tol) + else: + OPinv = _aslinearoperator_with_dtype(OPinv) + Minv_matvec = OPinv.matvec + if M is None: + M_matvec = None + else: + M_matvec = _aslinearoperator_with_dtype(M).matvec + + params = _UnsymmetricArpackParams(n, k, A.dtype.char, matvec, mode, + M_matvec, Minv_matvec, sigma, + ncv, v0, maxiter, which, tol) + + with _ARPACK_LOCK: + while not params.converged: + params.iterate() + + return params.extract(return_eigenvectors) + + +def eigsh(A, k=6, M=None, sigma=None, which='LM', v0=None, + ncv=None, maxiter=None, tol=0, return_eigenvectors=True, + Minv=None, OPinv=None, mode='normal'): + """ + Find k eigenvalues and eigenvectors of the real symmetric square matrix + or complex hermitian matrix A. + + Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem for + w[i] eigenvalues with corresponding eigenvectors x[i]. + + If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the + generalized eigenvalue problem for w[i] eigenvalues + with corresponding eigenvectors x[i]. + + Parameters + ---------- + A : ndarray, sparse matrix or LinearOperator + A square operator representing the operation ``A * x``, where ``A`` is + real symmetric or complex hermitian. For buckling mode (see below) + ``A`` must additionally be positive-definite. + k : int, optional + The number of eigenvalues and eigenvectors desired. + `k` must be smaller than N. It is not possible to compute all + eigenvectors of a matrix. + + Returns + ------- + w : array + Array of k eigenvalues. + v : array + An array representing the `k` eigenvectors. The column ``v[:, i]`` is + the eigenvector corresponding to the eigenvalue ``w[i]``. + + Other Parameters + ---------------- + M : An N x N matrix, array, sparse matrix, or linear operator representing + the operation ``M @ x`` for the generalized eigenvalue problem + + A @ x = w * M @ x. + + M must represent a real, symmetric matrix if A is real, and must + represent a complex, hermitian matrix if A is complex. For best + results, the data type of M should be the same as that of A. + Additionally: + + If sigma is None, M is symmetric positive definite. + + If sigma is specified, M is symmetric positive semi-definite. + + In buckling mode, M is symmetric indefinite. + + If sigma is None, eigsh requires an operator to compute the solution + of the linear equation ``M @ x = b``. This is done internally via a + (sparse) LU decomposition for an explicit matrix M, or via an + iterative solver for a general linear operator. Alternatively, + the user can supply the matrix or operator Minv, which gives + ``x = Minv @ b = M^-1 @ b``. + sigma : real + Find eigenvalues near sigma using shift-invert mode. This requires + an operator to compute the solution of the linear system + ``[A - sigma * M] x = b``, where M is the identity matrix if + unspecified. This is computed internally via a (sparse) LU + decomposition for explicit matrices A & M, or via an iterative + solver if either A or M is a general linear operator. + Alternatively, the user can supply the matrix or operator OPinv, + which gives ``x = OPinv @ b = [A - sigma * M]^-1 @ b``. + Note that when sigma is specified, the keyword 'which' refers to + the shifted eigenvalues ``w'[i]`` where: + + if mode == 'normal', ``w'[i] = 1 / (w[i] - sigma)``. + + if mode == 'cayley', ``w'[i] = (w[i] + sigma) / (w[i] - sigma)``. + + if mode == 'buckling', ``w'[i] = w[i] / (w[i] - sigma)``. + + (see further discussion in 'mode' below) + v0 : ndarray, optional + Starting vector for iteration. + Default: random + ncv : int, optional + The number of Lanczos vectors generated ncv must be greater than k and + smaller than n; it is recommended that ``ncv > 2*k``. + Default: ``min(n, max(2*k + 1, 20))`` + which : str ['LM' | 'SM' | 'LA' | 'SA' | 'BE'] + If A is a complex hermitian matrix, 'BE' is invalid. + Which `k` eigenvectors and eigenvalues to find: + + 'LM' : Largest (in magnitude) eigenvalues. + + 'SM' : Smallest (in magnitude) eigenvalues. + + 'LA' : Largest (algebraic) eigenvalues. + + 'SA' : Smallest (algebraic) eigenvalues. + + 'BE' : Half (k/2) from each end of the spectrum. + + When k is odd, return one more (k/2+1) from the high end. + When sigma != None, 'which' refers to the shifted eigenvalues ``w'[i]`` + (see discussion in 'sigma', above). ARPACK is generally better + at finding large values than small values. If small eigenvalues are + desired, consider using shift-invert mode for better performance. + maxiter : int, optional + Maximum number of Arnoldi update iterations allowed. + Default: ``n*10`` + tol : float + Relative accuracy for eigenvalues (stopping criterion). + The default value of 0 implies machine precision. + Minv : N x N matrix, array, sparse matrix, or LinearOperator + See notes in M, above. + OPinv : N x N matrix, array, sparse matrix, or LinearOperator + See notes in sigma, above. + return_eigenvectors : bool + Return eigenvectors (True) in addition to eigenvalues. This value determines + the order in which eigenvalues are sorted. The sort order is also dependent on the `which` variable. + + For which = 'LM' or 'SA': + If `return_eigenvectors` is True, eigenvalues are sorted by algebraic value. + + If `return_eigenvectors` is False, eigenvalues are sorted by absolute value. + + For which = 'BE' or 'LA': + eigenvalues are always sorted by algebraic value. + + For which = 'SM': + If `return_eigenvectors` is True, eigenvalues are sorted by algebraic value. + + If `return_eigenvectors` is False, eigenvalues are sorted by decreasing absolute value. + + mode : string ['normal' | 'buckling' | 'cayley'] + Specify strategy to use for shift-invert mode. This argument applies + only for real-valued A and sigma != None. For shift-invert mode, + ARPACK internally solves the eigenvalue problem + ``OP * x'[i] = w'[i] * B * x'[i]`` + and transforms the resulting Ritz vectors x'[i] and Ritz values w'[i] + into the desired eigenvectors and eigenvalues of the problem + ``A * x[i] = w[i] * M * x[i]``. + The modes are as follows: + + 'normal' : + OP = [A - sigma * M]^-1 @ M, + B = M, + w'[i] = 1 / (w[i] - sigma) + + 'buckling' : + OP = [A - sigma * M]^-1 @ A, + B = A, + w'[i] = w[i] / (w[i] - sigma) + + 'cayley' : + OP = [A - sigma * M]^-1 @ [A + sigma * M], + B = M, + w'[i] = (w[i] + sigma) / (w[i] - sigma) + + The choice of mode will affect which eigenvalues are selected by + the keyword 'which', and can also impact the stability of + convergence (see [2] for a discussion). + + Raises + ------ + ArpackNoConvergence + When the requested convergence is not obtained. + + The currently converged eigenvalues and eigenvectors can be found + as ``eigenvalues`` and ``eigenvectors`` attributes of the exception + object. + + See Also + -------- + eigs : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A + svds : singular value decomposition for a matrix A + + Notes + ----- + This function is a wrapper to the ARPACK [1]_ SSEUPD and DSEUPD + functions which use the Implicitly Restarted Lanczos Method to + find the eigenvalues and eigenvectors [2]_. + + References + ---------- + .. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/ + .. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE: + Solution of Large Scale Eigenvalue Problems by Implicitly Restarted + Arnoldi Methods. SIAM, Philadelphia, PA, 1998. + + Examples + -------- + >>> from scipy.sparse.linalg import eigsh + >>> identity = np.eye(13) + >>> eigenvalues, eigenvectors = eigsh(identity, k=6) + >>> eigenvalues + array([1., 1., 1., 1., 1., 1.]) + >>> eigenvectors.shape + (13, 6) + + """ + # complex hermitian matrices should be solved with eigs + if np.issubdtype(A.dtype, np.complexfloating): + if mode != 'normal': + raise ValueError("mode=%s cannot be used with " + "complex matrix A" % mode) + if which == 'BE': + raise ValueError("which='BE' cannot be used with complex matrix A") + elif which == 'LA': + which = 'LR' + elif which == 'SA': + which = 'SR' + ret = eigs(A, k, M=M, sigma=sigma, which=which, v0=v0, + ncv=ncv, maxiter=maxiter, tol=tol, + return_eigenvectors=return_eigenvectors, Minv=Minv, + OPinv=OPinv) + + if return_eigenvectors: + return ret[0].real, ret[1] + else: + return ret.real + + if A.shape[0] != A.shape[1]: + raise ValueError('expected square matrix (shape=%s)' % (A.shape,)) + if M is not None: + if M.shape != A.shape: + raise ValueError('wrong M dimensions %s, should be %s' + % (M.shape, A.shape)) + if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower(): + warnings.warn('M does not have the same type precision as A. ' + 'This may adversely affect ARPACK convergence') + + n = A.shape[0] + + if k <= 0: + raise ValueError("k must be greater than 0.") + + if k >= n: + warnings.warn("k >= N for N * N square matrix. " + "Attempting to use scipy.linalg.eigh instead.", + RuntimeWarning) + + if issparse(A): + raise TypeError("Cannot use scipy.linalg.eigh for sparse A with " + "k >= N. Use scipy.linalg.eigh(A.toarray()) or" + " reduce k.") + if isinstance(A, LinearOperator): + raise TypeError("Cannot use scipy.linalg.eigh for LinearOperator " + "A with k >= N.") + if isinstance(M, LinearOperator): + raise TypeError("Cannot use scipy.linalg.eigh for LinearOperator " + "M with k >= N.") + + return eigh(A, b=M, eigvals_only=not return_eigenvectors) + + if sigma is None: + A = _aslinearoperator_with_dtype(A) + matvec = A.matvec + + if OPinv is not None: + raise ValueError("OPinv should not be specified " + "with sigma = None.") + if M is None: + #standard eigenvalue problem + mode = 1 + M_matvec = None + Minv_matvec = None + if Minv is not None: + raise ValueError("Minv should not be " + "specified with M = None.") + else: + #general eigenvalue problem + mode = 2 + if Minv is None: + Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol) + else: + Minv = _aslinearoperator_with_dtype(Minv) + Minv_matvec = Minv.matvec + M_matvec = _aslinearoperator_with_dtype(M).matvec + else: + # sigma is not None: shift-invert mode + if Minv is not None: + raise ValueError("Minv should not be specified when sigma is") + + # normal mode + if mode == 'normal': + mode = 3 + matvec = None + if OPinv is None: + Minv_matvec = get_OPinv_matvec(A, M, sigma, + symmetric=True, tol=tol) + else: + OPinv = _aslinearoperator_with_dtype(OPinv) + Minv_matvec = OPinv.matvec + if M is None: + M_matvec = None + else: + M = _aslinearoperator_with_dtype(M) + M_matvec = M.matvec + + # buckling mode + elif mode == 'buckling': + mode = 4 + if OPinv is None: + Minv_matvec = get_OPinv_matvec(A, M, sigma, + symmetric=True, tol=tol) + else: + Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec + matvec = _aslinearoperator_with_dtype(A).matvec + M_matvec = None + + # cayley-transform mode + elif mode == 'cayley': + mode = 5 + matvec = _aslinearoperator_with_dtype(A).matvec + if OPinv is None: + Minv_matvec = get_OPinv_matvec(A, M, sigma, + symmetric=True, tol=tol) + else: + Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec + if M is None: + M_matvec = None + else: + M_matvec = _aslinearoperator_with_dtype(M).matvec + + # unrecognized mode + else: + raise ValueError("unrecognized mode '%s'" % mode) + + params = _SymmetricArpackParams(n, k, A.dtype.char, matvec, mode, + M_matvec, Minv_matvec, sigma, + ncv, v0, maxiter, which, tol) + + with _ARPACK_LOCK: + while not params.converged: + params.iterate() + + return params.extract(return_eigenvectors) + + +def _augmented_orthonormal_cols(x, k): + # extract the shape of the x array + n, m = x.shape + # create the expanded array and copy x into it + y = np.empty((n, m+k), dtype=x.dtype) + y[:, :m] = x + # do some modified gram schmidt to add k random orthonormal vectors + for i in range(k): + # sample a random initial vector + v = np.random.randn(n) + if np.iscomplexobj(x): + v = v + 1j*np.random.randn(n) + # subtract projections onto the existing unit length vectors + for j in range(m+i): + u = y[:, j] + v -= (np.dot(v, u.conj()) / np.dot(u, u.conj())) * u + # normalize v + v /= np.sqrt(np.dot(v, v.conj())) + # add v into the output array + y[:, m+i] = v + # return the expanded array + return y + + +def _augmented_orthonormal_rows(x, k): + return _augmented_orthonormal_cols(x.T, k).T + + +def _herm(x): + return x.T.conj() + + +def svds(A, k=6, ncv=None, tol=0, which='LM', v0=None, + maxiter=None, return_singular_vectors=True): + """Compute the largest k singular values/vectors for a sparse matrix. + + Parameters + ---------- + A : {sparse matrix, LinearOperator} + Array to compute the SVD on, of shape (M, N) + k : int, optional + Number of singular values and vectors to compute. + Must be 1 <= k < min(A.shape). + ncv : int, optional + The number of Lanczos vectors generated + ncv must be greater than k+1 and smaller than n; + it is recommended that ncv > 2*k + Default: ``min(n, max(2*k + 1, 20))`` + tol : float, optional + Tolerance for singular values. Zero (default) means machine precision. + which : str, ['LM' | 'SM'], optional + Which `k` singular values to find: + + - 'LM' : largest singular values + - 'SM' : smallest singular values + + .. versionadded:: 0.12.0 + v0 : ndarray, optional + Starting vector for iteration, of length min(A.shape). Should be an + (approximate) left singular vector if N > M and a right singular + vector otherwise. + Default: random + + .. versionadded:: 0.12.0 + maxiter : int, optional + Maximum number of iterations. + + .. versionadded:: 0.12.0 + return_singular_vectors : bool or str, optional + - True: return singular vectors (True) in addition to singular values. + + .. versionadded:: 0.12.0 + + - "u": only return the u matrix, without computing vh (if N > M). + - "vh": only return the vh matrix, without computing u (if N <= M). + + .. versionadded:: 0.16.0 + + Returns + ------- + u : ndarray, shape=(M, k) + Unitary matrix having left singular vectors as columns. + If `return_singular_vectors` is "vh", this variable is not computed, + and None is returned instead. + s : ndarray, shape=(k,) + The singular values. + vt : ndarray, shape=(k, N) + Unitary matrix having right singular vectors as rows. + If `return_singular_vectors` is "u", this variable is not computed, + and None is returned instead. + + + Notes + ----- + This is a naive implementation using ARPACK as an eigensolver + on A.H * A or A * A.H, depending on which one is more efficient. + + Examples + -------- + >>> from scipy.sparse import csc_matrix + >>> from scipy.sparse.linalg import svds, eigs + >>> A = csc_matrix([[1, 0, 0], [5, 0, 2], [0, -1, 0], [0, 0, 3]], dtype=float) + >>> u, s, vt = svds(A, k=2) + >>> s + array([ 2.75193379, 5.6059665 ]) + >>> np.sqrt(eigs(A.dot(A.T), k=2)[0]).real + array([ 5.6059665 , 2.75193379]) + """ + if not (isinstance(A, LinearOperator) or isspmatrix(A)): + A = np.asarray(A) + + n, m = A.shape + + if k <= 0 or k >= min(n, m): + raise ValueError("k must be between 1 and min(A.shape), k=%d" % k) + + if isinstance(A, LinearOperator): + if n > m: + X_dot = A.matvec + X_matmat = A.matmat + XH_dot = A.rmatvec + else: + X_dot = A.rmatvec + XH_dot = A.matvec + + dtype = getattr(A, 'dtype', None) + if dtype is None: + dtype = A.dot(np.zeros([m,1])).dtype + + # A^H * V; works around lack of LinearOperator.adjoint. + # XXX This can be slow! + def X_matmat(V): + out = np.empty((V.shape[1], m), dtype=dtype) + for i, col in enumerate(V.T): + out[i, :] = A.rmatvec(col.reshape(-1, 1)).T + return out.T + + else: + if n > m: + X_dot = X_matmat = A.dot + XH_dot = _herm(A).dot + else: + XH_dot = A.dot + X_dot = X_matmat = _herm(A).dot + + def matvec_XH_X(x): + return XH_dot(X_dot(x)) + + XH_X = LinearOperator(matvec=matvec_XH_X, dtype=A.dtype, + shape=(min(A.shape), min(A.shape))) + + # Get a low rank approximation of the implicitly defined gramian matrix. + # This is not a stable way to approach the problem. + eigvals, eigvec = eigsh(XH_X, k=k, tol=tol ** 2, maxiter=maxiter, + ncv=ncv, which=which, v0=v0) + + # In 'LM' mode try to be clever about small eigenvalues. + # Otherwise in 'SM' mode do not try to be clever. + if which == 'LM': + + # Gramian matrices have real non-negative eigenvalues. + eigvals = np.maximum(eigvals.real, 0) + + # Use the sophisticated detection of small eigenvalues from pinvh. + t = eigvec.dtype.char.lower() + factor = {'f': 1E3, 'd': 1E6} + cond = factor[t] * np.finfo(t).eps + cutoff = cond * np.max(eigvals) + + # Get a mask indicating which eigenpairs are not degenerately tiny, + # and create the re-ordered array of thresholded singular values. + above_cutoff = (eigvals > cutoff) + nlarge = above_cutoff.sum() + nsmall = k - nlarge + slarge = np.sqrt(eigvals[above_cutoff]) + s = np.zeros_like(eigvals) + s[:nlarge] = slarge + if not return_singular_vectors: + return s + + if n > m: + vlarge = eigvec[:, above_cutoff] + ularge = X_matmat(vlarge) / slarge if return_singular_vectors != 'vh' else None + vhlarge = _herm(vlarge) + else: + ularge = eigvec[:, above_cutoff] + vhlarge = _herm(X_matmat(ularge) / slarge) if return_singular_vectors != 'u' else None + + u = _augmented_orthonormal_cols(ularge, nsmall) if ularge is not None else None + vh = _augmented_orthonormal_rows(vhlarge, nsmall) if vhlarge is not None else None + + elif which == 'SM': + + s = np.sqrt(eigvals) + if not return_singular_vectors: + return s + + if n > m: + v = eigvec + u = X_matmat(v) / s if return_singular_vectors != 'vh' else None + vh = _herm(v) + else: + u = eigvec + vh = _herm(X_matmat(u) / s) if return_singular_vectors != 'u' else None + + else: + + raise ValueError("which must be either 'LM' or 'SM'.") + + return u, s, vh diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/arpack/arpack.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/arpack/arpack.pyc new file mode 100644 index 0000000..4c8d6d2 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/arpack/arpack.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/arpack/setup.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/arpack/setup.py new file mode 100644 index 0000000..dac0991 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/arpack/setup.py @@ -0,0 +1,41 @@ +from __future__ import division, print_function, absolute_import + +from os.path import join + + +def configuration(parent_package='',top_path=None): + from scipy._build_utils.system_info import get_info, NotFoundError + from numpy.distutils.misc_util import Configuration + from scipy._build_utils import get_g77_abi_wrappers + + lapack_opt = get_info('lapack_opt') + + config = Configuration('arpack', parent_package, top_path) + + arpack_sources = [join('ARPACK','SRC', '*.f')] + arpack_sources.extend([join('ARPACK','UTIL', '*.f')]) + + arpack_sources += get_g77_abi_wrappers(lapack_opt) + + config.add_library('arpack_scipy', sources=arpack_sources, + include_dirs=[join('ARPACK', 'SRC')]) + + ext_sources = ['arpack.pyf.src'] + config.add_extension('_arpack', + sources=ext_sources, + libraries=['arpack_scipy'], + extra_info=lapack_opt, + depends=arpack_sources, + ) + + config.add_data_dir('tests') + + # Add license files + config.add_data_files('ARPACK/COPYING') + + return config + + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(**configuration(top_path='').todict()) diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/arpack/setup.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/arpack/setup.pyc new file mode 100644 index 0000000..cb1347d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/arpack/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/arpack/tests/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/arpack/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/arpack/tests/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/arpack/tests/__init__.pyc new file mode 100644 index 0000000..3e3a207 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/arpack/tests/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/arpack/tests/test_arpack.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/arpack/tests/test_arpack.py new file mode 100644 index 0000000..4d9396c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/arpack/tests/test_arpack.py @@ -0,0 +1,965 @@ +from __future__ import division, print_function, absolute_import + +__usage__ = """ +To run tests locally: + python tests/test_arpack.py [-l<int>] [-v<int>] + +""" + +import threading + +import numpy as np + +from numpy.testing import (assert_allclose, assert_array_almost_equal_nulp, + assert_equal, assert_array_equal) +from pytest import raises as assert_raises +import pytest + +from numpy import dot, conj, random +from scipy.linalg import eig, eigh, hilbert, svd +from scipy.sparse import csc_matrix, csr_matrix, isspmatrix, diags +from scipy.sparse.linalg import LinearOperator, aslinearoperator +from scipy.sparse.linalg.eigen.arpack import eigs, eigsh, svds, \ + ArpackNoConvergence, arpack + +from scipy._lib._gcutils import assert_deallocated, IS_PYPY +from scipy._lib._numpy_compat import suppress_warnings + + +# precision for tests +_ndigits = {'f': 3, 'd': 11, 'F': 3, 'D': 11} + + +def _get_test_tolerance(type_char, mattype=None): + """ + Return tolerance values suitable for a given test: + + Parameters + ---------- + type_char : {'f', 'd', 'F', 'D'} + Data type in ARPACK eigenvalue problem + mattype : {csr_matrix, aslinearoperator, asarray}, optional + Linear operator type + + Returns + ------- + tol + Tolerance to pass to the ARPACK routine + rtol + Relative tolerance for outputs + atol + Absolute tolerance for outputs + + """ + + rtol = {'f': 3000 * np.finfo(np.float32).eps, + 'F': 3000 * np.finfo(np.float32).eps, + 'd': 2000 * np.finfo(np.float64).eps, + 'D': 2000 * np.finfo(np.float64).eps}[type_char] + atol = rtol + tol = 0 + + if mattype is aslinearoperator and type_char in ('f', 'F'): + # iterative methods in single precision: worse errors + # also: bump ARPACK tolerance so that the iterative method converges + tol = 30 * np.finfo(np.float32).eps + rtol *= 5 + + if mattype is csr_matrix and type_char in ('f', 'F'): + # sparse in single precision: worse errors + rtol *= 5 + + return tol, rtol, atol + + +def generate_matrix(N, complex=False, hermitian=False, + pos_definite=False, sparse=False): + M = np.random.random((N,N)) + if complex: + M = M + 1j * np.random.random((N,N)) + + if hermitian: + if pos_definite: + if sparse: + i = np.arange(N) + j = np.random.randint(N, size=N-2) + i, j = np.meshgrid(i, j) + M[i,j] = 0 + M = np.dot(M.conj(), M.T) + else: + M = np.dot(M.conj(), M.T) + if sparse: + i = np.random.randint(N, size=N * N // 4) + j = np.random.randint(N, size=N * N // 4) + ind = np.nonzero(i == j) + j[ind] = (j[ind] + 1) % N + M[i,j] = 0 + M[j,i] = 0 + else: + if sparse: + i = np.random.randint(N, size=N * N // 2) + j = np.random.randint(N, size=N * N // 2) + M[i,j] = 0 + return M + + +def generate_matrix_symmetric(N, pos_definite=False, sparse=False): + M = np.random.random((N, N)) + + M = 0.5 * (M + M.T) # Make M symmetric + + if pos_definite: + Id = N * np.eye(N) + if sparse: + M = csr_matrix(M) + M += Id + else: + if sparse: + M = csr_matrix(M) + + return M + + +def _aslinearoperator_with_dtype(m): + m = aslinearoperator(m) + if not hasattr(m, 'dtype'): + x = np.zeros(m.shape[1]) + m.dtype = (m * x).dtype + return m + + +def assert_allclose_cc(actual, desired, **kw): + """Almost equal or complex conjugates almost equal""" + try: + assert_allclose(actual, desired, **kw) + except AssertionError: + assert_allclose(actual, conj(desired), **kw) + + +def argsort_which(eval, typ, k, which, + sigma=None, OPpart=None, mode=None): + """Return sorted indices of eigenvalues using the "which" keyword + from eigs and eigsh""" + if sigma is None: + reval = np.round(eval, decimals=_ndigits[typ]) + else: + if mode is None or mode == 'normal': + if OPpart is None: + reval = 1. / (eval - sigma) + elif OPpart == 'r': + reval = 0.5 * (1. / (eval - sigma) + + 1. / (eval - np.conj(sigma))) + elif OPpart == 'i': + reval = -0.5j * (1. / (eval - sigma) + - 1. / (eval - np.conj(sigma))) + elif mode == 'cayley': + reval = (eval + sigma) / (eval - sigma) + elif mode == 'buckling': + reval = eval / (eval - sigma) + else: + raise ValueError("mode='%s' not recognized" % mode) + + reval = np.round(reval, decimals=_ndigits[typ]) + + if which in ['LM', 'SM']: + ind = np.argsort(abs(reval)) + elif which in ['LR', 'SR', 'LA', 'SA', 'BE']: + ind = np.argsort(np.real(reval)) + elif which in ['LI', 'SI']: + # for LI,SI ARPACK returns largest,smallest abs(imaginary) why? + if typ.islower(): + ind = np.argsort(abs(np.imag(reval))) + else: + ind = np.argsort(np.imag(reval)) + else: + raise ValueError("which='%s' is unrecognized" % which) + + if which in ['LM', 'LA', 'LR', 'LI']: + return ind[-k:] + elif which in ['SM', 'SA', 'SR', 'SI']: + return ind[:k] + elif which == 'BE': + return np.concatenate((ind[:k//2], ind[k//2-k:])) + + +def eval_evec(symmetric, d, typ, k, which, v0=None, sigma=None, + mattype=np.asarray, OPpart=None, mode='normal'): + general = ('bmat' in d) + + if symmetric: + eigs_func = eigsh + else: + eigs_func = eigs + + if general: + err = ("error for %s:general, typ=%s, which=%s, sigma=%s, " + "mattype=%s, OPpart=%s, mode=%s" % (eigs_func.__name__, + typ, which, sigma, + mattype.__name__, + OPpart, mode)) + else: + err = ("error for %s:standard, typ=%s, which=%s, sigma=%s, " + "mattype=%s, OPpart=%s, mode=%s" % (eigs_func.__name__, + typ, which, sigma, + mattype.__name__, + OPpart, mode)) + + a = d['mat'].astype(typ) + ac = mattype(a) + + if general: + b = d['bmat'].astype(typ.lower()) + bc = mattype(b) + + # get exact eigenvalues + exact_eval = d['eval'].astype(typ.upper()) + ind = argsort_which(exact_eval, typ, k, which, + sigma, OPpart, mode) + exact_eval = exact_eval[ind] + + # compute arpack eigenvalues + kwargs = dict(which=which, v0=v0, sigma=sigma) + if eigs_func is eigsh: + kwargs['mode'] = mode + else: + kwargs['OPpart'] = OPpart + + # compute suitable tolerances + kwargs['tol'], rtol, atol = _get_test_tolerance(typ, mattype) + + # on rare occasions, ARPACK routines return results that are proper + # eigenvalues and -vectors, but not necessarily the ones requested in + # the parameter which. This is inherent to the Krylov methods, and + # should not be treated as a failure. If such a rare situation + # occurs, the calculation is tried again (but at most a few times). + ntries = 0 + while ntries < 5: + # solve + if general: + try: + eval, evec = eigs_func(ac, k, bc, **kwargs) + except ArpackNoConvergence: + kwargs['maxiter'] = 20*a.shape[0] + eval, evec = eigs_func(ac, k, bc, **kwargs) + else: + try: + eval, evec = eigs_func(ac, k, **kwargs) + except ArpackNoConvergence: + kwargs['maxiter'] = 20*a.shape[0] + eval, evec = eigs_func(ac, k, **kwargs) + + ind = argsort_which(eval, typ, k, which, + sigma, OPpart, mode) + eval = eval[ind] + evec = evec[:,ind] + + # check eigenvectors + LHS = np.dot(a, evec) + if general: + RHS = eval * np.dot(b, evec) + else: + RHS = eval * evec + + assert_allclose(LHS, RHS, rtol=rtol, atol=atol, err_msg=err) + + try: + # check eigenvalues + assert_allclose_cc(eval, exact_eval, rtol=rtol, atol=atol, + err_msg=err) + break + except AssertionError: + ntries += 1 + + # check eigenvalues + assert_allclose_cc(eval, exact_eval, rtol=rtol, atol=atol, err_msg=err) + + +class DictWithRepr(dict): + def __init__(self, name): + self.name = name + + def __repr__(self): + return "<%s>" % self.name + + +class SymmetricParams: + def __init__(self): + self.eigs = eigsh + self.which = ['LM', 'SM', 'LA', 'SA', 'BE'] + self.mattypes = [csr_matrix, aslinearoperator, np.asarray] + self.sigmas_modes = {None: ['normal'], + 0.5: ['normal', 'buckling', 'cayley']} + + # generate matrices + # these should all be float32 so that the eigenvalues + # are the same in float32 and float64 + N = 6 + np.random.seed(2300) + Ar = generate_matrix(N, hermitian=True, + pos_definite=True).astype('f').astype('d') + M = generate_matrix(N, hermitian=True, + pos_definite=True).astype('f').astype('d') + Ac = generate_matrix(N, hermitian=True, pos_definite=True, + complex=True).astype('F').astype('D') + v0 = np.random.random(N) + + # standard symmetric problem + SS = DictWithRepr("std-symmetric") + SS['mat'] = Ar + SS['v0'] = v0 + SS['eval'] = eigh(SS['mat'], eigvals_only=True) + + # general symmetric problem + GS = DictWithRepr("gen-symmetric") + GS['mat'] = Ar + GS['bmat'] = M + GS['v0'] = v0 + GS['eval'] = eigh(GS['mat'], GS['bmat'], eigvals_only=True) + + # standard hermitian problem + SH = DictWithRepr("std-hermitian") + SH['mat'] = Ac + SH['v0'] = v0 + SH['eval'] = eigh(SH['mat'], eigvals_only=True) + + # general hermitian problem + GH = DictWithRepr("gen-hermitian") + GH['mat'] = Ac + GH['bmat'] = M + GH['v0'] = v0 + GH['eval'] = eigh(GH['mat'], GH['bmat'], eigvals_only=True) + + self.real_test_cases = [SS, GS] + self.complex_test_cases = [SH, GH] + + +class NonSymmetricParams: + def __init__(self): + self.eigs = eigs + self.which = ['LM', 'LR', 'LI'] # , 'SM', 'LR', 'SR', 'LI', 'SI'] + self.mattypes = [csr_matrix, aslinearoperator, np.asarray] + self.sigmas_OPparts = {None: [None], + 0.1: ['r'], + 0.1 + 0.1j: ['r', 'i']} + + # generate matrices + # these should all be float32 so that the eigenvalues + # are the same in float32 and float64 + N = 6 + np.random.seed(2300) + Ar = generate_matrix(N).astype('f').astype('d') + M = generate_matrix(N, hermitian=True, + pos_definite=True).astype('f').astype('d') + Ac = generate_matrix(N, complex=True).astype('F').astype('D') + v0 = np.random.random(N) + + # standard real nonsymmetric problem + SNR = DictWithRepr("std-real-nonsym") + SNR['mat'] = Ar + SNR['v0'] = v0 + SNR['eval'] = eig(SNR['mat'], left=False, right=False) + + # general real nonsymmetric problem + GNR = DictWithRepr("gen-real-nonsym") + GNR['mat'] = Ar + GNR['bmat'] = M + GNR['v0'] = v0 + GNR['eval'] = eig(GNR['mat'], GNR['bmat'], left=False, right=False) + + # standard complex nonsymmetric problem + SNC = DictWithRepr("std-cmplx-nonsym") + SNC['mat'] = Ac + SNC['v0'] = v0 + SNC['eval'] = eig(SNC['mat'], left=False, right=False) + + # general complex nonsymmetric problem + GNC = DictWithRepr("gen-cmplx-nonsym") + GNC['mat'] = Ac + GNC['bmat'] = M + GNC['v0'] = v0 + GNC['eval'] = eig(GNC['mat'], GNC['bmat'], left=False, right=False) + + self.real_test_cases = [SNR, GNR] + self.complex_test_cases = [SNC, GNC] + + +def test_symmetric_modes(): + params = SymmetricParams() + k = 2 + symmetric = True + for D in params.real_test_cases: + for typ in 'fd': + for which in params.which: + for mattype in params.mattypes: + for (sigma, modes) in params.sigmas_modes.items(): + for mode in modes: + eval_evec(symmetric, D, typ, k, which, + None, sigma, mattype, None, mode) + + +def test_hermitian_modes(): + params = SymmetricParams() + k = 2 + symmetric = True + for D in params.complex_test_cases: + for typ in 'FD': + for which in params.which: + if which == 'BE': + continue # BE invalid for complex + for mattype in params.mattypes: + for sigma in params.sigmas_modes: + eval_evec(symmetric, D, typ, k, which, + None, sigma, mattype) + + +def test_symmetric_starting_vector(): + params = SymmetricParams() + symmetric = True + for k in [1, 2, 3, 4, 5]: + for D in params.real_test_cases: + for typ in 'fd': + v0 = random.rand(len(D['v0'])).astype(typ) + eval_evec(symmetric, D, typ, k, 'LM', v0) + + +def test_symmetric_no_convergence(): + np.random.seed(1234) + m = generate_matrix(30, hermitian=True, pos_definite=True) + tol, rtol, atol = _get_test_tolerance('d') + try: + w, v = eigsh(m, 4, which='LM', v0=m[:, 0], maxiter=5, tol=tol, ncv=9) + raise AssertionError("Spurious no-error exit") + except ArpackNoConvergence as err: + k = len(err.eigenvalues) + if k <= 0: + raise AssertionError("Spurious no-eigenvalues-found case") + w, v = err.eigenvalues, err.eigenvectors + assert_allclose(dot(m, v), w * v, rtol=rtol, atol=atol) + + +def test_real_nonsymmetric_modes(): + params = NonSymmetricParams() + k = 2 + symmetric = False + for D in params.real_test_cases: + for typ in 'fd': + for which in params.which: + for mattype in params.mattypes: + for sigma, OPparts in params.sigmas_OPparts.items(): + for OPpart in OPparts: + eval_evec(symmetric, D, typ, k, which, + None, sigma, mattype, OPpart) + + +def test_complex_nonsymmetric_modes(): + params = NonSymmetricParams() + k = 2 + symmetric = False + for D in params.complex_test_cases: + for typ in 'DF': + for which in params.which: + for mattype in params.mattypes: + for sigma in params.sigmas_OPparts: + eval_evec(symmetric, D, typ, k, which, + None, sigma, mattype) + + +def test_standard_nonsymmetric_starting_vector(): + params = NonSymmetricParams() + sigma = None + symmetric = False + for k in [1, 2, 3, 4]: + for d in params.complex_test_cases: + for typ in 'FD': + A = d['mat'] + n = A.shape[0] + v0 = random.rand(n).astype(typ) + eval_evec(symmetric, d, typ, k, "LM", v0, sigma) + + +def test_general_nonsymmetric_starting_vector(): + params = NonSymmetricParams() + sigma = None + symmetric = False + for k in [1, 2, 3, 4]: + for d in params.complex_test_cases: + for typ in 'FD': + A = d['mat'] + n = A.shape[0] + v0 = random.rand(n).astype(typ) + eval_evec(symmetric, d, typ, k, "LM", v0, sigma) + + +def test_standard_nonsymmetric_no_convergence(): + np.random.seed(1234) + m = generate_matrix(30, complex=True) + tol, rtol, atol = _get_test_tolerance('d') + try: + w, v = eigs(m, 4, which='LM', v0=m[:, 0], maxiter=5, tol=tol) + raise AssertionError("Spurious no-error exit") + except ArpackNoConvergence as err: + k = len(err.eigenvalues) + if k <= 0: + raise AssertionError("Spurious no-eigenvalues-found case") + w, v = err.eigenvalues, err.eigenvectors + for ww, vv in zip(w, v.T): + assert_allclose(dot(m, vv), ww * vv, rtol=rtol, atol=atol) + + +def test_eigen_bad_shapes(): + # A is not square. + A = csc_matrix(np.zeros((2, 3))) + assert_raises(ValueError, eigs, A) + + +def test_eigen_bad_kwargs(): + # Test eigen on wrong keyword argument + A = csc_matrix(np.zeros((8, 8))) + assert_raises(ValueError, eigs, A, which='XX') + + +def test_ticket_1459_arpack_crash(): + for dtype in [np.float32, np.float64]: + # XXX: this test does not seem to catch the issue for float32, + # but we made the same fix there, just to be sure + + N = 6 + k = 2 + + np.random.seed(2301) + A = np.random.random((N, N)).astype(dtype) + v0 = np.array([-0.71063568258907849895, -0.83185111795729227424, + -0.34365925382227402451, 0.46122533684552280420, + -0.58001341115969040629, -0.78844877570084292984e-01], + dtype=dtype) + + # Should not crash: + evals, evecs = eigs(A, k, v0=v0) + + +#---------------------------------------------------------------------- +# sparse SVD tests + +def sorted_svd(m, k, which='LM'): + # Compute svd of a dense matrix m, and return singular vectors/values + # sorted. + if isspmatrix(m): + m = m.todense() + u, s, vh = svd(m) + if which == 'LM': + ii = np.argsort(s)[-k:] + elif which == 'SM': + ii = np.argsort(s)[:k] + else: + raise ValueError("unknown which=%r" % (which,)) + + return u[:, ii], s[ii], vh[ii] + + +def svd_estimate(u, s, vh): + return np.dot(u, np.dot(np.diag(s), vh)) + + +def svd_test_input_check(): + x = np.array([[1, 2, 3], + [3, 4, 3], + [1, 0, 2], + [0, 0, 1]], float) + + assert_raises(ValueError, svds, x, k=-1) + assert_raises(ValueError, svds, x, k=0) + assert_raises(ValueError, svds, x, k=10) + assert_raises(ValueError, svds, x, k=x.shape[0]) + assert_raises(ValueError, svds, x, k=x.shape[1]) + assert_raises(ValueError, svds, x.T, k=x.shape[0]) + assert_raises(ValueError, svds, x.T, k=x.shape[1]) + + +def test_svd_simple_real(): + x = np.array([[1, 2, 3], + [3, 4, 3], + [1, 0, 2], + [0, 0, 1]], float) + y = np.array([[1, 2, 3, 8], + [3, 4, 3, 5], + [1, 0, 2, 3], + [0, 0, 1, 0]], float) + z = csc_matrix(x) + + for m in [x.T, x, y, z, z.T]: + for k in range(1, min(m.shape)): + u, s, vh = sorted_svd(m, k) + su, ss, svh = svds(m, k) + + m_hat = svd_estimate(u, s, vh) + sm_hat = svd_estimate(su, ss, svh) + + assert_array_almost_equal_nulp(m_hat, sm_hat, nulp=1000) + + +def test_svd_simple_complex(): + x = np.array([[1, 2, 3], + [3, 4, 3], + [1 + 1j, 0, 2], + [0, 0, 1]], complex) + y = np.array([[1, 2, 3, 8 + 5j], + [3 - 2j, 4, 3, 5], + [1, 0, 2, 3], + [0, 0, 1, 0]], complex) + z = csc_matrix(x) + + for m in [x, x.T.conjugate(), x.T, y, y.conjugate(), z, z.T]: + for k in range(1, min(m.shape) - 1): + u, s, vh = sorted_svd(m, k) + su, ss, svh = svds(m, k) + + m_hat = svd_estimate(u, s, vh) + sm_hat = svd_estimate(su, ss, svh) + + assert_array_almost_equal_nulp(m_hat, sm_hat, nulp=1000) + + +def test_svd_maxiter(): + # check that maxiter works as expected + x = hilbert(6) + # ARPACK shouldn't converge on such an ill-conditioned matrix with just + # one iteration + assert_raises(ArpackNoConvergence, svds, x, 1, maxiter=1, ncv=3) + # but 100 iterations should be more than enough + u, s, vt = svds(x, 1, maxiter=100, ncv=3) + assert_allclose(s, [1.7], atol=0.5) + + +def test_svd_return(): + # check that the return_singular_vectors parameter works as expected + x = hilbert(6) + _, s, _ = sorted_svd(x, 2) + ss = svds(x, 2, return_singular_vectors=False) + assert_allclose(s, ss) + + +def test_svd_which(): + # check that the which parameter works as expected + x = hilbert(6) + for which in ['LM', 'SM']: + _, s, _ = sorted_svd(x, 2, which=which) + ss = svds(x, 2, which=which, return_singular_vectors=False) + ss.sort() + assert_allclose(s, ss, atol=np.sqrt(1e-15)) + + +def test_svd_v0(): + # check that the v0 parameter works as expected + x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]], float) + + u, s, vh = svds(x, 1) + u2, s2, vh2 = svds(x, 1, v0=u[:,0]) + + assert_allclose(s, s2, atol=np.sqrt(1e-15)) + + +def _check_svds(A, k, U, s, VH): + n, m = A.shape + + # Check shapes. + assert_equal(U.shape, (n, k)) + assert_equal(s.shape, (k,)) + assert_equal(VH.shape, (k, m)) + + # Check that the original matrix can be reconstituted. + A_rebuilt = (U*s).dot(VH) + assert_equal(A_rebuilt.shape, A.shape) + assert_allclose(A_rebuilt, A) + + # Check that U is a semi-orthogonal matrix. + UH_U = np.dot(U.T.conj(), U) + assert_equal(UH_U.shape, (k, k)) + assert_allclose(UH_U, np.identity(k), atol=1e-12) + + # Check that V is a semi-orthogonal matrix. + VH_V = np.dot(VH, VH.T.conj()) + assert_equal(VH_V.shape, (k, k)) + assert_allclose(VH_V, np.identity(k), atol=1e-12) + + +def test_svd_LM_ones_matrix(): + # Check that svds can deal with matrix_rank less than k in LM mode. + k = 3 + for n, m in (6, 5), (5, 5), (5, 6): + for t in float, complex: + A = np.ones((n, m), dtype=t) + U, s, VH = svds(A, k) + + # Check some generic properties of svd. + _check_svds(A, k, U, s, VH) + + # Check that the largest singular value is near sqrt(n*m) + # and the other singular values have been forced to zero. + assert_allclose(np.max(s), np.sqrt(n*m)) + assert_array_equal(sorted(s)[:-1], 0) + + +def test_svd_LM_zeros_matrix(): + # Check that svds can deal with matrices containing only zeros. + k = 1 + for n, m in (3, 4), (4, 4), (4, 3): + for t in float, complex: + A = np.zeros((n, m), dtype=t) + U, s, VH = svds(A, k) + + # Check some generic properties of svd. + _check_svds(A, k, U, s, VH) + + # Check that the singular values are zero. + assert_array_equal(s, 0) + + +def test_svd_LM_zeros_matrix_gh_3452(): + # Regression test for a github issue. + # https://github.com/scipy/scipy/issues/3452 + # Note that for complex dype the size of this matrix is too small for k=1. + n, m, k = 4, 2, 1 + A = np.zeros((n, m)) + U, s, VH = svds(A, k) + + # Check some generic properties of svd. + _check_svds(A, k, U, s, VH) + + # Check that the singular values are zero. + assert_array_equal(s, 0) + + +class CheckingLinearOperator(LinearOperator): + def __init__(self, A): + self.A = A + self.dtype = A.dtype + self.shape = A.shape + + def _matvec(self, x): + assert_equal(max(x.shape), np.size(x)) + return self.A.dot(x) + + def _rmatvec(self, x): + assert_equal(max(x.shape), np.size(x)) + return self.A.T.conjugate().dot(x) + + +def test_svd_linop(): + nmks = [(6, 7, 3), + (9, 5, 4), + (10, 8, 5)] + + def reorder(args): + U, s, VH = args + j = np.argsort(s) + return U[:,j], s[j], VH[j,:] + + for n, m, k in nmks: + # Test svds on a LinearOperator. + A = np.random.RandomState(52).randn(n, m) + L = CheckingLinearOperator(A) + + v0 = np.ones(min(A.shape)) + + U1, s1, VH1 = reorder(svds(A, k, v0=v0)) + U2, s2, VH2 = reorder(svds(L, k, v0=v0)) + + assert_allclose(np.abs(U1), np.abs(U2)) + assert_allclose(s1, s2) + assert_allclose(np.abs(VH1), np.abs(VH2)) + assert_allclose(np.dot(U1, np.dot(np.diag(s1), VH1)), + np.dot(U2, np.dot(np.diag(s2), VH2))) + + # Try again with which="SM". + A = np.random.RandomState(1909).randn(n, m) + L = CheckingLinearOperator(A) + + U1, s1, VH1 = reorder(svds(A, k, which="SM")) + U2, s2, VH2 = reorder(svds(L, k, which="SM")) + + assert_allclose(np.abs(U1), np.abs(U2)) + assert_allclose(s1, s2) + assert_allclose(np.abs(VH1), np.abs(VH2)) + assert_allclose(np.dot(U1, np.dot(np.diag(s1), VH1)), + np.dot(U2, np.dot(np.diag(s2), VH2))) + + if k < min(n, m) - 1: + # Complex input and explicit which="LM". + for (dt, eps) in [(complex, 1e-7), (np.complex64, 1e-3)]: + rng = np.random.RandomState(1648) + A = (rng.randn(n, m) + 1j * rng.randn(n, m)).astype(dt) + L = CheckingLinearOperator(A) + + U1, s1, VH1 = reorder(svds(A, k, which="LM")) + U2, s2, VH2 = reorder(svds(L, k, which="LM")) + + assert_allclose(np.abs(U1), np.abs(U2), rtol=eps) + assert_allclose(s1, s2, rtol=eps) + assert_allclose(np.abs(VH1), np.abs(VH2), rtol=eps) + assert_allclose(np.dot(U1, np.dot(np.diag(s1), VH1)), + np.dot(U2, np.dot(np.diag(s2), VH2)), rtol=eps) + + +@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy") +def test_linearoperator_deallocation(): + # Check that the linear operators used by the Arpack wrappers are + # deallocatable by reference counting -- they are big objects, so + # Python's cyclic GC may not collect them fast enough before + # running out of memory if eigs/eigsh are called in a tight loop. + + M_d = np.eye(10) + M_s = csc_matrix(M_d) + M_o = aslinearoperator(M_d) + + with assert_deallocated(lambda: arpack.SpLuInv(M_s)): + pass + with assert_deallocated(lambda: arpack.LuInv(M_d)): + pass + with assert_deallocated(lambda: arpack.IterInv(M_s)): + pass + with assert_deallocated(lambda: arpack.IterOpInv(M_o, None, 0.3)): + pass + with assert_deallocated(lambda: arpack.IterOpInv(M_o, M_o, 0.3)): + pass + + +def test_svds_partial_return(): + x = np.array([[1, 2, 3], + [3, 4, 3], + [1, 0, 2], + [0, 0, 1]], float) + # test vertical matrix + z = csr_matrix(x) + vh_full = svds(z, 2)[-1] + vh_partial = svds(z, 2, return_singular_vectors='vh')[-1] + dvh = np.linalg.norm(np.abs(vh_full) - np.abs(vh_partial)) + if dvh > 1e-10: + raise AssertionError('right eigenvector matrices differ when using return_singular_vectors parameter') + if svds(z, 2, return_singular_vectors='vh')[0] is not None: + raise AssertionError('left eigenvector matrix was computed when it should not have been') + # test horizontal matrix + z = csr_matrix(x.T) + u_full = svds(z, 2)[0] + u_partial = svds(z, 2, return_singular_vectors='vh')[0] + du = np.linalg.norm(np.abs(u_full) - np.abs(u_partial)) + if du > 1e-10: + raise AssertionError('left eigenvector matrices differ when using return_singular_vectors parameter') + if svds(z, 2, return_singular_vectors='u')[-1] is not None: + raise AssertionError('right eigenvector matrix was computed when it should not have been') + +def test_svds_wrong_eigen_type(): + # Regression test for a github issue. + # https://github.com/scipy/scipy/issues/4590 + # Function was not checking for eigenvalue type and unintended + # values could be returned. + x = np.array([[1, 2, 3], + [3, 4, 3], + [1, 0, 2], + [0, 0, 1]], float) + assert_raises(ValueError, svds, x, 1, which='LA') + + +def test_parallel_threads(): + results = [] + v0 = np.random.rand(50) + + def worker(): + x = diags([1, -2, 1], [-1, 0, 1], shape=(50, 50)) + w, v = eigs(x, k=3, v0=v0) + results.append(w) + + w, v = eigsh(x, k=3, v0=v0) + results.append(w) + + threads = [threading.Thread(target=worker) for k in range(10)] + for t in threads: + t.start() + for t in threads: + t.join() + + worker() + + for r in results: + assert_allclose(r, results[-1]) + + +def test_reentering(): + # Just some linear operator that calls eigs recursively + def A_matvec(x): + x = diags([1, -2, 1], [-1, 0, 1], shape=(50, 50)) + w, v = eigs(x, k=1) + return v / w[0] + A = LinearOperator(matvec=A_matvec, dtype=float, shape=(50, 50)) + + # The Fortran code is not reentrant, so this fails (gracefully, not crashing) + assert_raises(RuntimeError, eigs, A, k=1) + assert_raises(RuntimeError, eigsh, A, k=1) + + +def test_regression_arpackng_1315(): + # Check that issue arpack-ng/#1315 is not present. + # Adapted from arpack-ng/TESTS/bug_1315_single.c + # If this fails, then the installed ARPACK library is faulty. + + for dtype in [np.float32, np.float64]: + np.random.seed(1234) + + w0 = np.arange(1, 1000+1).astype(dtype) + A = diags([w0], [0], shape=(1000, 1000)) + + v0 = np.random.rand(1000).astype(dtype) + w, v = eigs(A, k=9, ncv=2*9+1, which="LM", v0=v0) + + assert_allclose(np.sort(w), np.sort(w0[-9:]), + rtol=1e-4) + + +def test_eigs_for_k_greater(): + # Test eigs() for k beyond limits. + A_sparse = diags([1, -2, 1], [-1, 0, 1], shape=(4, 4)) # sparse + A = generate_matrix(4, sparse=False) + M_dense = np.random.random((4, 4)) + M_sparse = generate_matrix(4, sparse=True) + M_linop = aslinearoperator(M_dense) + eig_tuple1 = eig(A, b=M_dense) + eig_tuple2 = eig(A, b=M_sparse) + + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + + assert_equal(eigs(A, M=M_dense, k=3), eig_tuple1) + assert_equal(eigs(A, M=M_dense, k=4), eig_tuple1) + assert_equal(eigs(A, M=M_dense, k=5), eig_tuple1) + assert_equal(eigs(A, M=M_sparse, k=5), eig_tuple2) + + # M as LinearOperator + assert_raises(TypeError, eigs, A, M=M_linop, k=3) + + # Test 'A' for different types + assert_raises(TypeError, eigs, aslinearoperator(A), k=3) + assert_raises(TypeError, eigs, A_sparse, k=3) + + +def test_eigsh_for_k_greater(): + # Test eigsh() for k beyond limits. + A_sparse = diags([1, -2, 1], [-1, 0, 1], shape=(4, 4)) # sparse + A = generate_matrix(4, sparse=False) + M_dense = generate_matrix_symmetric(4, pos_definite=True) + M_sparse = generate_matrix_symmetric(4, pos_definite=True, sparse=True) + M_linop = aslinearoperator(M_dense) + eig_tuple1 = eigh(A, b=M_dense) + eig_tuple2 = eigh(A, b=M_sparse) + + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + + assert_equal(eigsh(A, M=M_dense, k=4), eig_tuple1) + assert_equal(eigsh(A, M=M_dense, k=5), eig_tuple1) + assert_equal(eigsh(A, M=M_sparse, k=5), eig_tuple2) + + # M as LinearOperator + assert_raises(TypeError, eigsh, A, M=M_linop, k=4) + + # Test 'A' for different types + assert_raises(TypeError, eigsh, aslinearoperator(A), k=4) + assert_raises(TypeError, eigsh, A_sparse, M=M_dense, k=4) diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/arpack/tests/test_arpack.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/arpack/tests/test_arpack.pyc new file mode 100644 index 0000000..1627e1a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/arpack/tests/test_arpack.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/lobpcg/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/lobpcg/__init__.py new file mode 100644 index 0000000..a588a9d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/lobpcg/__init__.py @@ -0,0 +1,18 @@ +""" +Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG) + +LOBPCG is a preconditioned eigensolver for large symmetric positive definite +(SPD) generalized eigenproblems. + +Call the function lobpcg - see help for lobpcg.lobpcg. + +""" +from __future__ import division, print_function, absolute_import + +from .lobpcg import * + +__all__ = [s for s in dir() if not s.startswith('_')] + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/lobpcg/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/lobpcg/__init__.pyc new file mode 100644 index 0000000..c9b7e9e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/lobpcg/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/lobpcg/lobpcg.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/lobpcg/lobpcg.py new file mode 100644 index 0000000..2d21c86 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/lobpcg/lobpcg.py @@ -0,0 +1,582 @@ +""" +Pure SciPy implementation of Locally Optimal Block Preconditioned Conjugate +Gradient Method (LOBPCG), see +https://bitbucket.org/joseroman/blopex + +License: BSD + +Authors: Robert Cimrman, Andrew Knyazev + +Examples in tests directory contributed by Nils Wagner. +""" + +from __future__ import division, print_function, absolute_import +import numpy as np +from numpy.testing import assert_allclose +from scipy._lib.six import xrange +from scipy.linalg import inv, eigh, cho_factor, cho_solve, cholesky +from scipy.sparse.linalg import aslinearoperator, LinearOperator + +__all__ = ['lobpcg'] + + +def save(ar, fileName): + # Used only when verbosity level > 10. + from numpy import savetxt + savetxt(fileName, ar) + + +def _report_nonhermitian(M, a, b, name): + """ + Report if `M` is not a hermitian matrix given the tolerances `a`, `b`. + """ + from scipy.linalg import norm + + md = M - M.T.conj() + + nmd = norm(md, 1) + tol = np.spacing(max(10**a, (10**b)*norm(M, 1))) + if nmd > tol: + print('matrix %s is not sufficiently Hermitian for a=%d, b=%d:' + % (name, a, b)) + print('condition: %.e < %e' % (nmd, tol)) + +## +# 21.05.2007, c + + +def as2d(ar): + """ + If the input array is 2D return it, if it is 1D, append a dimension, + making it a column vector. + """ + if ar.ndim == 2: + return ar + else: # Assume 1! + aux = np.array(ar, copy=False) + aux.shape = (ar.shape[0], 1) + return aux + + +def _makeOperator(operatorInput, expectedShape): + """Takes a dense numpy array or a sparse matrix or + a function and makes an operator performing matrix * blockvector + products. + + Examples + -------- + >>> A = _makeOperator( arrayA, (n, n) ) + >>> vectorB = A( vectorX ) + + """ + if operatorInput is None: + def ident(x): + return x + operator = LinearOperator(expectedShape, ident, matmat=ident) + else: + operator = aslinearoperator(operatorInput) + + if operator.shape != expectedShape: + raise ValueError('operator has invalid shape') + + return operator + + +def _applyConstraints(blockVectorV, factYBY, blockVectorBY, blockVectorY): + """Changes blockVectorV in place.""" + gramYBV = np.dot(blockVectorBY.T.conj(), blockVectorV) + tmp = cho_solve(factYBY, gramYBV) + blockVectorV -= np.dot(blockVectorY, tmp) + + +def _b_orthonormalize(B, blockVectorV, blockVectorBV=None, retInvR=False): + if blockVectorBV is None: + if B is not None: + blockVectorBV = B(blockVectorV) + else: + blockVectorBV = blockVectorV # Shared data!!! + gramVBV = np.dot(blockVectorV.T.conj(), blockVectorBV) + gramVBV = cholesky(gramVBV) + gramVBV = inv(gramVBV, overwrite_a=True) + # gramVBV is now R^{-1}. + blockVectorV = np.dot(blockVectorV, gramVBV) + if B is not None: + blockVectorBV = np.dot(blockVectorBV, gramVBV) + + if retInvR: + return blockVectorV, blockVectorBV, gramVBV + else: + return blockVectorV, blockVectorBV + +def _get_indx(_lambda, num, largest): + """Get `num` indices into `_lambda` depending on `largest` option.""" + ii = np.argsort(_lambda) + if largest: + ii = ii[:-num-1:-1] + else: + ii = ii[:num] + + return ii + +def lobpcg(A, X, + B=None, M=None, Y=None, + tol=None, maxiter=20, + largest=True, verbosityLevel=0, + retLambdaHistory=False, retResidualNormsHistory=False): + """Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG) + + LOBPCG is a preconditioned eigensolver for large symmetric positive + definite (SPD) generalized eigenproblems. + + Parameters + ---------- + A : {sparse matrix, dense matrix, LinearOperator} + The symmetric linear operator of the problem, usually a + sparse matrix. Often called the "stiffness matrix". + X : array_like + Initial approximation to the k eigenvectors. If A has + shape=(n,n) then X should have shape shape=(n,k). + B : {dense matrix, sparse matrix, LinearOperator}, optional + the right hand side operator in a generalized eigenproblem. + by default, B = Identity + often called the "mass matrix" + M : {dense matrix, sparse matrix, LinearOperator}, optional + preconditioner to A; by default M = Identity + M should approximate the inverse of A + Y : array_like, optional + n-by-sizeY matrix of constraints, sizeY < n + The iterations will be performed in the B-orthogonal complement + of the column-space of Y. Y must be full rank. + + Returns + ------- + w : array + Array of k eigenvalues + v : array + An array of k eigenvectors. V has the same shape as X. + + Other Parameters + ---------------- + tol : scalar, optional + Solver tolerance (stopping criterion) + by default: tol=n*sqrt(eps) + maxiter : integer, optional + maximum number of iterations + by default: maxiter=min(n,20) + largest : bool, optional + when True, solve for the largest eigenvalues, otherwise the smallest + verbosityLevel : integer, optional + controls solver output. default: verbosityLevel = 0. + retLambdaHistory : boolean, optional + whether to return eigenvalue history + retResidualNormsHistory : boolean, optional + whether to return history of residual norms + + Examples + -------- + + Solve A x = lambda B x with constraints and preconditioning. + + >>> from scipy.sparse import spdiags, issparse + >>> from scipy.sparse.linalg import lobpcg, LinearOperator + >>> n = 100 + >>> vals = [np.arange(n, dtype=np.float64) + 1] + >>> A = spdiags(vals, 0, n, n) + >>> A.toarray() + array([[ 1., 0., 0., ..., 0., 0., 0.], + [ 0., 2., 0., ..., 0., 0., 0.], + [ 0., 0., 3., ..., 0., 0., 0.], + ..., + [ 0., 0., 0., ..., 98., 0., 0.], + [ 0., 0., 0., ..., 0., 99., 0.], + [ 0., 0., 0., ..., 0., 0., 100.]]) + + Constraints. + + >>> Y = np.eye(n, 3) + + Initial guess for eigenvectors, should have linearly independent + columns. Column dimension = number of requested eigenvalues. + + >>> X = np.random.rand(n, 3) + + Preconditioner -- inverse of A (as an abstract linear operator). + + >>> invA = spdiags([1./vals[0]], 0, n, n) + >>> def precond( x ): + ... return invA * x + >>> M = LinearOperator(matvec=precond, shape=(n, n), dtype=float) + + Here, ``invA`` could of course have been used directly as a preconditioner. + Let us then solve the problem: + + >>> eigs, vecs = lobpcg(A, X, Y=Y, M=M, tol=1e-4, maxiter=40, largest=False) + >>> eigs + array([ 4., 5., 6.]) + + Note that the vectors passed in Y are the eigenvectors of the 3 smallest + eigenvalues. The results returned are orthogonal to those. + + Notes + ----- + If both retLambdaHistory and retResidualNormsHistory are True, + the return tuple has the following format + (lambda, V, lambda history, residual norms history). + + In the following ``n`` denotes the matrix size and ``m`` the number + of required eigenvalues (smallest or largest). + + The LOBPCG code internally solves eigenproblems of the size 3``m`` on every + iteration by calling the "standard" dense eigensolver, so if ``m`` is not + small enough compared to ``n``, it does not make sense to call the LOBPCG + code, but rather one should use the "standard" eigensolver, + e.g. numpy or scipy function in this case. + If one calls the LOBPCG algorithm for 5``m``>``n``, + it will most likely break internally, so the code tries to call the standard + function instead. + + It is not that n should be large for the LOBPCG to work, but rather the + ratio ``n``/``m`` should be large. It you call the LOBPCG code with ``m``=1 + and ``n``=10, it should work, though ``n`` is small. The method is intended + for extremely large ``n``/``m``, see e.g., reference [28] in + https://arxiv.org/abs/0705.2626 + + The convergence speed depends basically on two factors: + + 1. How well relatively separated the seeking eigenvalues are + from the rest of the eigenvalues. + One can try to vary ``m`` to make this better. + + 2. How well conditioned the problem is. This can be changed by using proper + preconditioning. For example, a rod vibration test problem (under tests + directory) is ill-conditioned for large ``n``, so convergence will be + slow, unless efficient preconditioning is used. + For this specific problem, a good simple preconditioner function would + be a linear solve for A, which is easy to code since A is tridiagonal. + + *Acknowledgements* + + lobpcg.py code was written by Robert Cimrman. + Many thanks belong to Andrew Knyazev, the author of the algorithm, + for lots of advice and support. + + References + ---------- + .. [1] A. V. Knyazev (2001), + Toward the Optimal Preconditioned Eigensolver: Locally Optimal + Block Preconditioned Conjugate Gradient Method. + SIAM Journal on Scientific Computing 23, no. 2, + pp. 517-541. :doi:`10.1137/S1064827500366124` + + .. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov (2007), + Block Locally Optimal Preconditioned Eigenvalue Xolvers (BLOPEX) + in hypre and PETSc. https://arxiv.org/abs/0705.2626 + + .. [3] A. V. Knyazev's C and MATLAB implementations: + https://bitbucket.org/joseroman/blopex + + """ + blockVectorX = X + blockVectorY = Y + residualTolerance = tol + maxIterations = maxiter + + if blockVectorY is not None: + sizeY = blockVectorY.shape[1] + else: + sizeY = 0 + + # Block size. + if len(blockVectorX.shape) != 2: + raise ValueError('expected rank-2 array for argument X') + + n, sizeX = blockVectorX.shape + if sizeX > n: + raise ValueError('X column dimension exceeds the row dimension') + + A = _makeOperator(A, (n,n)) + B = _makeOperator(B, (n,n)) + M = _makeOperator(M, (n,n)) + + if (n - sizeY) < (5 * sizeX): + # warn('The problem size is small compared to the block size.' \ + # ' Using dense eigensolver instead of LOBPCG.') + + if blockVectorY is not None: + raise NotImplementedError('The dense eigensolver ' + 'does not support constraints.') + + # Define the closed range of indices of eigenvalues to return. + if largest: + eigvals = (n - sizeX, n-1) + else: + eigvals = (0, sizeX-1) + + A_dense = A(np.eye(n)) + B_dense = None if B is None else B(np.eye(n)) + + vals, vecs = eigh(A_dense, B_dense, eigvals=eigvals, check_finite=False) + if largest: + # Reverse order to be compatible with eigs() in 'LM' mode. + vals = vals[::-1] + vecs = vecs[:, ::-1] + + return vals, vecs + + if residualTolerance is None: + residualTolerance = np.sqrt(1e-15) * n + + maxIterations = min(n, maxIterations) + + if verbosityLevel: + aux = "Solving " + if B is None: + aux += "standard" + else: + aux += "generalized" + aux += " eigenvalue problem with" + if M is None: + aux += "out" + aux += " preconditioning\n\n" + aux += "matrix size %d\n" % n + aux += "block size %d\n\n" % sizeX + if blockVectorY is None: + aux += "No constraints\n\n" + else: + if sizeY > 1: + aux += "%d constraints\n\n" % sizeY + else: + aux += "%d constraint\n\n" % sizeY + print(aux) + + ## + # Apply constraints to X. + if blockVectorY is not None: + + if B is not None: + blockVectorBY = B(blockVectorY) + else: + blockVectorBY = blockVectorY + + # gramYBY is a dense array. + gramYBY = np.dot(blockVectorY.T.conj(), blockVectorBY) + try: + # gramYBY is a Cholesky factor from now on... + gramYBY = cho_factor(gramYBY) + except Exception: + raise ValueError('cannot handle linearly dependent constraints') + + _applyConstraints(blockVectorX, gramYBY, blockVectorBY, blockVectorY) + + ## + # B-orthonormalize X. + blockVectorX, blockVectorBX = _b_orthonormalize(B, blockVectorX) + + ## + # Compute the initial Ritz vectors: solve the eigenproblem. + blockVectorAX = A(blockVectorX) + gramXAX = np.dot(blockVectorX.T.conj(), blockVectorAX) + + _lambda, eigBlockVector = eigh(gramXAX, check_finite=False) + ii = _get_indx(_lambda, sizeX, largest) + _lambda = _lambda[ii] + + eigBlockVector = np.asarray(eigBlockVector[:,ii]) + blockVectorX = np.dot(blockVectorX, eigBlockVector) + blockVectorAX = np.dot(blockVectorAX, eigBlockVector) + if B is not None: + blockVectorBX = np.dot(blockVectorBX, eigBlockVector) + + ## + # Active index set. + activeMask = np.ones((sizeX,), dtype=bool) + + lambdaHistory = [_lambda] + residualNormsHistory = [] + + previousBlockSize = sizeX + ident = np.eye(sizeX, dtype=A.dtype) + ident0 = np.eye(sizeX, dtype=A.dtype) + + ## + # Main iteration loop. + + blockVectorP = None # set during iteration + blockVectorAP = None + blockVectorBP = None + + for iterationNumber in xrange(maxIterations): + if verbosityLevel > 0: + print('iteration %d' % iterationNumber) + + aux = blockVectorBX * _lambda[np.newaxis,:] + blockVectorR = blockVectorAX - aux + + aux = np.sum(blockVectorR.conjugate() * blockVectorR, 0) + residualNorms = np.sqrt(aux) + + residualNormsHistory.append(residualNorms) + + ii = np.where(residualNorms > residualTolerance, True, False) + activeMask = activeMask & ii + if verbosityLevel > 2: + print(activeMask) + + currentBlockSize = activeMask.sum() + if currentBlockSize != previousBlockSize: + previousBlockSize = currentBlockSize + ident = np.eye(currentBlockSize, dtype=A.dtype) + + if currentBlockSize == 0: + break + + if verbosityLevel > 0: + print('current block size:', currentBlockSize) + print('eigenvalue:', _lambda) + print('residual norms:', residualNorms) + if verbosityLevel > 10: + print(eigBlockVector) + + activeBlockVectorR = as2d(blockVectorR[:,activeMask]) + + if iterationNumber > 0: + activeBlockVectorP = as2d(blockVectorP[:,activeMask]) + activeBlockVectorAP = as2d(blockVectorAP[:,activeMask]) + activeBlockVectorBP = as2d(blockVectorBP[:,activeMask]) + + if M is not None: + # Apply preconditioner T to the active residuals. + activeBlockVectorR = M(activeBlockVectorR) + + ## + # Apply constraints to the preconditioned residuals. + if blockVectorY is not None: + _applyConstraints(activeBlockVectorR, + gramYBY, blockVectorBY, blockVectorY) + + ## + # B-orthonormalize the preconditioned residuals. + + aux = _b_orthonormalize(B, activeBlockVectorR) + activeBlockVectorR, activeBlockVectorBR = aux + + activeBlockVectorAR = A(activeBlockVectorR) + + if iterationNumber > 0: + aux = _b_orthonormalize(B, activeBlockVectorP, + activeBlockVectorBP, retInvR=True) + activeBlockVectorP, activeBlockVectorBP, invR = aux + activeBlockVectorAP = np.dot(activeBlockVectorAP, invR) + + ## + # Perform the Rayleigh Ritz Procedure: + # Compute symmetric Gram matrices: + + xaw = np.dot(blockVectorX.T.conj(), activeBlockVectorAR) + waw = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAR) + xbw = np.dot(blockVectorX.T.conj(), activeBlockVectorBR) + + if iterationNumber > 0: + xap = np.dot(blockVectorX.T.conj(), activeBlockVectorAP) + wap = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAP) + pap = np.dot(activeBlockVectorP.T.conj(), activeBlockVectorAP) + xbp = np.dot(blockVectorX.T.conj(), activeBlockVectorBP) + wbp = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorBP) + + gramA = np.bmat([[np.diag(_lambda), xaw, xap], + [xaw.T.conj(), waw, wap], + [xap.T.conj(), wap.T.conj(), pap]]) + + gramB = np.bmat([[ident0, xbw, xbp], + [xbw.T.conj(), ident, wbp], + [xbp.T.conj(), wbp.T.conj(), ident]]) + else: + gramA = np.bmat([[np.diag(_lambda), xaw], + [xaw.T.conj(), waw]]) + gramB = np.bmat([[ident0, xbw], + [xbw.T.conj(), ident]]) + + if verbosityLevel > 0: + _report_nonhermitian(gramA, 3, -1, 'gramA') + _report_nonhermitian(gramB, 3, -1, 'gramB') + + if verbosityLevel > 10: + save(gramA, 'gramA') + save(gramB, 'gramB') + + # Solve the generalized eigenvalue problem. + _lambda, eigBlockVector = eigh(gramA, gramB, check_finite=False) + ii = _get_indx(_lambda, sizeX, largest) + if verbosityLevel > 10: + print(ii) + + _lambda = _lambda[ii] + eigBlockVector = eigBlockVector[:,ii] + + lambdaHistory.append(_lambda) + + if verbosityLevel > 10: + print('lambda:', _lambda) +## # Normalize eigenvectors! +## aux = np.sum( eigBlockVector.conjugate() * eigBlockVector, 0 ) +## eigVecNorms = np.sqrt( aux ) +## eigBlockVector = eigBlockVector / eigVecNorms[np.newaxis,:] +# eigBlockVector, aux = _b_orthonormalize( B, eigBlockVector ) + + if verbosityLevel > 10: + print(eigBlockVector) + + ## + # Compute Ritz vectors. + if iterationNumber > 0: + eigBlockVectorX = eigBlockVector[:sizeX] + eigBlockVectorR = eigBlockVector[sizeX:sizeX+currentBlockSize] + eigBlockVectorP = eigBlockVector[sizeX+currentBlockSize:] + + pp = np.dot(activeBlockVectorR, eigBlockVectorR) + pp += np.dot(activeBlockVectorP, eigBlockVectorP) + + app = np.dot(activeBlockVectorAR, eigBlockVectorR) + app += np.dot(activeBlockVectorAP, eigBlockVectorP) + + bpp = np.dot(activeBlockVectorBR, eigBlockVectorR) + bpp += np.dot(activeBlockVectorBP, eigBlockVectorP) + else: + eigBlockVectorX = eigBlockVector[:sizeX] + eigBlockVectorR = eigBlockVector[sizeX:] + + pp = np.dot(activeBlockVectorR, eigBlockVectorR) + app = np.dot(activeBlockVectorAR, eigBlockVectorR) + bpp = np.dot(activeBlockVectorBR, eigBlockVectorR) + + if verbosityLevel > 10: + print(pp) + print(app) + print(bpp) + + blockVectorX = np.dot(blockVectorX, eigBlockVectorX) + pp + blockVectorAX = np.dot(blockVectorAX, eigBlockVectorX) + app + blockVectorBX = np.dot(blockVectorBX, eigBlockVectorX) + bpp + + blockVectorP, blockVectorAP, blockVectorBP = pp, app, bpp + + aux = blockVectorBX * _lambda[np.newaxis,:] + blockVectorR = blockVectorAX - aux + + aux = np.sum(blockVectorR.conjugate() * blockVectorR, 0) + residualNorms = np.sqrt(aux) + + if verbosityLevel > 0: + print('final eigenvalue:', _lambda) + print('final residual norms:', residualNorms) + + if retLambdaHistory: + if retResidualNormsHistory: + return _lambda, blockVectorX, lambdaHistory, residualNormsHistory + else: + return _lambda, blockVectorX, lambdaHistory + else: + if retResidualNormsHistory: + return _lambda, blockVectorX, residualNormsHistory + else: + return _lambda, blockVectorX diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/lobpcg/lobpcg.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/lobpcg/lobpcg.pyc new file mode 100644 index 0000000..78688c0 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/lobpcg/lobpcg.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/lobpcg/setup.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/lobpcg/setup.py new file mode 100644 index 0000000..8418808 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/lobpcg/setup.py @@ -0,0 +1,15 @@ +from __future__ import division, print_function, absolute_import + + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + + config = Configuration('lobpcg',parent_package,top_path) + config.add_data_dir('tests') + + return config + + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(**configuration(top_path='').todict()) diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/lobpcg/setup.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/lobpcg/setup.pyc new file mode 100644 index 0000000..d3f3103 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/lobpcg/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/lobpcg/tests/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/lobpcg/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/lobpcg/tests/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/lobpcg/tests/__init__.pyc new file mode 100644 index 0000000..3e6dc39 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/lobpcg/tests/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/lobpcg/tests/test_lobpcg.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/lobpcg/tests/test_lobpcg.py new file mode 100644 index 0000000..9edd38a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/lobpcg/tests/test_lobpcg.py @@ -0,0 +1,261 @@ +""" Test functions for the sparse.linalg.eigen.lobpcg module +""" +from __future__ import division, print_function, absolute_import + +import itertools + +import numpy as np +from numpy.testing import (assert_almost_equal, assert_equal, + assert_allclose, assert_array_less) + +from scipy import ones, rand, r_, diag, linalg, eye +from scipy.linalg import eig, eigh, toeplitz +import scipy.sparse +from scipy.sparse.linalg.eigen.lobpcg import lobpcg +from scipy.sparse.linalg import eigs +from scipy.sparse import spdiags + +import pytest + +def ElasticRod(n): + # Fixed-free elastic rod + L = 1.0 + le = L/n + rho = 7.85e3 + S = 1.e-4 + E = 2.1e11 + mass = rho*S*le/6. + k = E*S/le + A = k*(diag(r_[2.*ones(n-1),1])-diag(ones(n-1),1)-diag(ones(n-1),-1)) + B = mass*(diag(r_[4.*ones(n-1),2])+diag(ones(n-1),1)+diag(ones(n-1),-1)) + return A,B + + +def MikotaPair(n): + # Mikota pair acts as a nice test since the eigenvalues + # are the squares of the integers n, n=1,2,... + x = np.arange(1,n+1) + B = diag(1./x) + y = np.arange(n-1,0,-1) + z = np.arange(2*n-1,0,-2) + A = diag(z)-diag(y,-1)-diag(y,1) + return A,B + + +def compare_solutions(A,B,m): + n = A.shape[0] + + np.random.seed(0) + + V = rand(n,m) + X = linalg.orth(V) + + eigs,vecs = lobpcg(A, X, B=B, tol=1e-5, maxiter=30, largest=False) + eigs.sort() + + w,v = eig(A,b=B) + w.sort() + + assert_almost_equal(w[:int(m/2)],eigs[:int(m/2)],decimal=2) + + +def test_Small(): + A,B = ElasticRod(10) + compare_solutions(A,B,10) + A,B = MikotaPair(10) + compare_solutions(A,B,10) + + +def test_ElasticRod(): + A,B = ElasticRod(100) + compare_solutions(A,B,20) + + +def test_MikotaPair(): + A,B = MikotaPair(100) + compare_solutions(A,B,20) + + +def test_trivial(): + n = 5 + X = ones((n, 1)) + A = eye(n) + compare_solutions(A, None, n) + + +def test_regression(): + # https://mail.python.org/pipermail/scipy-user/2010-October/026944.html + n = 10 + X = np.ones((n, 1)) + A = np.identity(n) + w, V = lobpcg(A, X) + assert_allclose(w, [1]) + + +def test_diagonal(): + # This test was moved from '__main__' in lobpcg.py. + # Coincidentally or not, this is the same eigensystem + # required to reproduce arpack bug + # https://forge.scilab.org/p/arpack-ng/issues/1397/ + # even using the same n=100. + + np.random.seed(1234) + + # The system of interest is of size n x n. + n = 100 + + # We care about only m eigenpairs. + m = 4 + + # Define the generalized eigenvalue problem Av = cBv + # where (c, v) is a generalized eigenpair, + # and where we choose A to be the diagonal matrix whose entries are 1..n + # and where B is chosen to be the identity matrix. + vals = np.arange(1, n+1, dtype=float) + A = scipy.sparse.diags([vals], [0], (n, n)) + B = scipy.sparse.eye(n) + + # Let the preconditioner M be the inverse of A. + M = scipy.sparse.diags([np.reciprocal(vals)], [0], (n, n)) + + # Pick random initial vectors. + X = np.random.rand(n, m) + + # Require that the returned eigenvectors be in the orthogonal complement + # of the first few standard basis vectors. + m_excluded = 3 + Y = np.eye(n, m_excluded) + + eigs, vecs = lobpcg(A, X, B, M=M, Y=Y, tol=1e-4, maxiter=40, largest=False) + + assert_allclose(eigs, np.arange(1+m_excluded, 1+m_excluded+m)) + _check_eigen(A, eigs, vecs, rtol=1e-3, atol=1e-3) + + +def _check_eigen(M, w, V, rtol=1e-8, atol=1e-14): + mult_wV = np.multiply(w, V) + dot_MV = M.dot(V) + assert_allclose(mult_wV, dot_MV, rtol=rtol, atol=atol) + + +def _check_fiedler(n, p): + # This is not necessarily the recommended way to find the Fiedler vector. + np.random.seed(1234) + col = np.zeros(n) + col[1] = 1 + A = toeplitz(col) + D = np.diag(A.sum(axis=1)) + L = D - A + # Compute the full eigendecomposition using tricks, e.g. + # http://www.cs.yale.edu/homes/spielman/561/2009/lect02-09.pdf + tmp = np.pi * np.arange(n) / n + analytic_w = 2 * (1 - np.cos(tmp)) + analytic_V = np.cos(np.outer(np.arange(n) + 1/2, tmp)) + _check_eigen(L, analytic_w, analytic_V) + # Compute the full eigendecomposition using eigh. + eigh_w, eigh_V = eigh(L) + _check_eigen(L, eigh_w, eigh_V) + # Check that the first eigenvalue is near zero and that the rest agree. + assert_array_less(np.abs([eigh_w[0], analytic_w[0]]), 1e-14) + assert_allclose(eigh_w[1:], analytic_w[1:]) + + # Check small lobpcg eigenvalues. + X = analytic_V[:, :p] + lobpcg_w, lobpcg_V = lobpcg(L, X, largest=False) + assert_equal(lobpcg_w.shape, (p,)) + assert_equal(lobpcg_V.shape, (n, p)) + _check_eigen(L, lobpcg_w, lobpcg_V) + assert_array_less(np.abs(np.min(lobpcg_w)), 1e-14) + assert_allclose(np.sort(lobpcg_w)[1:], analytic_w[1:p]) + + # Check large lobpcg eigenvalues. + X = analytic_V[:, -p:] + lobpcg_w, lobpcg_V = lobpcg(L, X, largest=True) + assert_equal(lobpcg_w.shape, (p,)) + assert_equal(lobpcg_V.shape, (n, p)) + _check_eigen(L, lobpcg_w, lobpcg_V) + assert_allclose(np.sort(lobpcg_w), analytic_w[-p:]) + + # Look for the Fiedler vector using good but not exactly correct guesses. + fiedler_guess = np.concatenate((np.ones(n//2), -np.ones(n-n//2))) + X = np.vstack((np.ones(n), fiedler_guess)).T + lobpcg_w, lobpcg_V = lobpcg(L, X, largest=False) + # Mathematically, the smaller eigenvalue should be zero + # and the larger should be the algebraic connectivity. + lobpcg_w = np.sort(lobpcg_w) + assert_allclose(lobpcg_w, analytic_w[:2], atol=1e-14) + + +def test_fiedler_small_8(): + # This triggers the dense path because 8 < 2*5. + _check_fiedler(8, 2) + + +def test_fiedler_large_12(): + # This does not trigger the dense path, because 2*5 <= 12. + _check_fiedler(12, 2) + + +def test_hermitian(): + np.random.seed(1234) + + sizes = [3, 10, 50] + ks = [1, 3, 10, 50] + gens = [True, False] + + for size, k, gen in itertools.product(sizes, ks, gens): + if k > size: + continue + + H = np.random.rand(size, size) + 1.j * np.random.rand(size, size) + H = 10 * np.eye(size) + H + H.T.conj() + + X = np.random.rand(size, k) + + if not gen: + B = np.eye(size) + w, v = lobpcg(H, X, maxiter=5000) + w0, v0 = eigh(H) + else: + B = np.random.rand(size, size) + 1.j * np.random.rand(size, size) + B = 10 * np.eye(size) + B.dot(B.T.conj()) + w, v = lobpcg(H, X, B, maxiter=5000, largest=False) + w0, v0 = eigh(H, B) + + for wx, vx in zip(w, v.T): + # Check eigenvector + assert_allclose(np.linalg.norm(H.dot(vx) - B.dot(vx) * wx) + / np.linalg.norm(H.dot(vx)), + 0, atol=5e-4, rtol=0) + + # Compare eigenvalues + j = np.argmin(abs(w0 - wx)) + assert_allclose(wx, w0[j], rtol=1e-4) + +# The n=5 case tests the alternative small matrix code path that uses eigh(). +@pytest.mark.parametrize('n, atol', [(20, 1e-3), (5, 1e-8)]) +def test_eigs_consistency(n, atol): + vals = np.arange(1, n+1, dtype=np.float64) + A = spdiags(vals, 0, n, n) + np.random.seed(345678) + X = np.random.rand(n, 2) + lvals, lvecs = lobpcg(A, X, largest=True, maxiter=100) + vals, vecs = eigs(A, k=2) + + _check_eigen(A, lvals, lvecs, atol=atol, rtol=0) + assert_allclose(np.sort(vals), np.sort(lvals), atol=1e-14) + +def test_verbosity(): + """Check that nonzero verbosity level code runs. + """ + A, B = ElasticRod(100) + + n = A.shape[0] + m = 20 + + np.random.seed(0) + V = rand(n,m) + X = linalg.orth(V) + + eigs,vecs = lobpcg(A, X, B=B, tol=1e-5, maxiter=30, largest=False, + verbosityLevel=11) diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/lobpcg/tests/test_lobpcg.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/lobpcg/tests/test_lobpcg.pyc new file mode 100644 index 0000000..8c6c4a2 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/lobpcg/tests/test_lobpcg.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/setup.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/setup.py new file mode 100644 index 0000000..fc7b99f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/setup.py @@ -0,0 +1,17 @@ +from __future__ import division, print_function, absolute_import + + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + + config = Configuration('eigen',parent_package,top_path) + + config.add_subpackage(('arpack')) + config.add_subpackage(('lobpcg')) + + return config + + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(**configuration(top_path='').todict()) diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/setup.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/setup.pyc new file mode 100644 index 0000000..f12edc2 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/interface.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/interface.py new file mode 100644 index 0000000..7a63ad5 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/interface.py @@ -0,0 +1,703 @@ +"""Abstract linear algebra library. + +This module defines a class hierarchy that implements a kind of "lazy" +matrix representation, called the ``LinearOperator``. It can be used to do +linear algebra with extremely large sparse or structured matrices, without +representing those explicitly in memory. Such matrices can be added, +multiplied, transposed, etc. + +As a motivating example, suppose you want have a matrix where almost all of +the elements have the value one. The standard sparse matrix representation +skips the storage of zeros, but not ones. By contrast, a LinearOperator is +able to represent such matrices efficiently. First, we need a compact way to +represent an all-ones matrix:: + + >>> import numpy as np + >>> class Ones(LinearOperator): + ... def __init__(self, shape): + ... super(Ones, self).__init__(dtype=None, shape=shape) + ... def _matvec(self, x): + ... return np.repeat(x.sum(), self.shape[0]) + +Instances of this class emulate ``np.ones(shape)``, but using a constant +amount of storage, independent of ``shape``. The ``_matvec`` method specifies +how this linear operator multiplies with (operates on) a vector. We can now +add this operator to a sparse matrix that stores only offsets from one:: + + >>> from scipy.sparse import csr_matrix + >>> offsets = csr_matrix([[1, 0, 2], [0, -1, 0], [0, 0, 3]]) + >>> A = aslinearoperator(offsets) + Ones(offsets.shape) + >>> A.dot([1, 2, 3]) + array([13, 4, 15]) + +The result is the same as that given by its dense, explicitly-stored +counterpart:: + + >>> (np.ones(A.shape, A.dtype) + offsets.toarray()).dot([1, 2, 3]) + array([13, 4, 15]) + +Several algorithms in the ``scipy.sparse`` library are able to operate on +``LinearOperator`` instances. +""" + +from __future__ import division, print_function, absolute_import + +import warnings + +import numpy as np + +from scipy.sparse import isspmatrix +from scipy.sparse.sputils import isshape, isintlike + +__all__ = ['LinearOperator', 'aslinearoperator'] + + +class LinearOperator(object): + """Common interface for performing matrix vector products + + Many iterative methods (e.g. cg, gmres) do not need to know the + individual entries of a matrix to solve a linear system A*x=b. + Such solvers only require the computation of matrix vector + products, A*v where v is a dense vector. This class serves as + an abstract interface between iterative solvers and matrix-like + objects. + + To construct a concrete LinearOperator, either pass appropriate + callables to the constructor of this class, or subclass it. + + A subclass must implement either one of the methods ``_matvec`` + and ``_matmat``, and the attributes/properties ``shape`` (pair of + integers) and ``dtype`` (may be None). It may call the ``__init__`` + on this class to have these attributes validated. Implementing + ``_matvec`` automatically implements ``_matmat`` (using a naive + algorithm) and vice-versa. + + Optionally, a subclass may implement ``_rmatvec`` or ``_adjoint`` + to implement the Hermitian adjoint (conjugate transpose). As with + ``_matvec`` and ``_matmat``, implementing either ``_rmatvec`` or + ``_adjoint`` implements the other automatically. Implementing + ``_adjoint`` is preferable; ``_rmatvec`` is mostly there for + backwards compatibility. + + Parameters + ---------- + shape : tuple + Matrix dimensions (M,N). + matvec : callable f(v) + Returns returns A * v. + rmatvec : callable f(v) + Returns A^H * v, where A^H is the conjugate transpose of A. + matmat : callable f(V) + Returns A * V, where V is a dense matrix with dimensions (N,K). + dtype : dtype + Data type of the matrix. + + Attributes + ---------- + args : tuple + For linear operators describing products etc. of other linear + operators, the operands of the binary operation. + + See Also + -------- + aslinearoperator : Construct LinearOperators + + Notes + ----- + The user-defined matvec() function must properly handle the case + where v has shape (N,) as well as the (N,1) case. The shape of + the return type is handled internally by LinearOperator. + + LinearOperator instances can also be multiplied, added with each + other and exponentiated, all lazily: the result of these operations + is always a new, composite LinearOperator, that defers linear + operations to the original operators and combines the results. + + Examples + -------- + >>> import numpy as np + >>> from scipy.sparse.linalg import LinearOperator + >>> def mv(v): + ... return np.array([2*v[0], 3*v[1]]) + ... + >>> A = LinearOperator((2,2), matvec=mv) + >>> A + <2x2 _CustomLinearOperator with dtype=float64> + >>> A.matvec(np.ones(2)) + array([ 2., 3.]) + >>> A * np.ones(2) + array([ 2., 3.]) + + """ + def __new__(cls, *args, **kwargs): + if cls is LinearOperator: + # Operate as _CustomLinearOperator factory. + return super(LinearOperator, cls).__new__(_CustomLinearOperator) + else: + obj = super(LinearOperator, cls).__new__(cls) + + if (type(obj)._matvec == LinearOperator._matvec + and type(obj)._matmat == LinearOperator._matmat): + warnings.warn("LinearOperator subclass should implement" + " at least one of _matvec and _matmat.", + category=RuntimeWarning, stacklevel=2) + + return obj + + def __init__(self, dtype, shape): + """Initialize this LinearOperator. + + To be called by subclasses. ``dtype`` may be None; ``shape`` should + be convertible to a length-2 tuple. + """ + if dtype is not None: + dtype = np.dtype(dtype) + + shape = tuple(shape) + if not isshape(shape): + raise ValueError("invalid shape %r (must be 2-d)" % (shape,)) + + self.dtype = dtype + self.shape = shape + + def _init_dtype(self): + """Called from subclasses at the end of the __init__ routine. + """ + if self.dtype is None: + v = np.zeros(self.shape[-1]) + self.dtype = np.asarray(self.matvec(v)).dtype + + def _matmat(self, X): + """Default matrix-matrix multiplication handler. + + Falls back on the user-defined _matvec method, so defining that will + define matrix multiplication (though in a very suboptimal way). + """ + + return np.hstack([self.matvec(col.reshape(-1,1)) for col in X.T]) + + def _matvec(self, x): + """Default matrix-vector multiplication handler. + + If self is a linear operator of shape (M, N), then this method will + be called on a shape (N,) or (N, 1) ndarray, and should return a + shape (M,) or (M, 1) ndarray. + + This default implementation falls back on _matmat, so defining that + will define matrix-vector multiplication as well. + """ + return self.matmat(x.reshape(-1, 1)) + + def matvec(self, x): + """Matrix-vector multiplication. + + Performs the operation y=A*x where A is an MxN linear + operator and x is a column vector or 1-d array. + + Parameters + ---------- + x : {matrix, ndarray} + An array with shape (N,) or (N,1). + + Returns + ------- + y : {matrix, ndarray} + A matrix or ndarray with shape (M,) or (M,1) depending + on the type and shape of the x argument. + + Notes + ----- + This matvec wraps the user-specified matvec routine or overridden + _matvec method to ensure that y has the correct shape and type. + + """ + + x = np.asanyarray(x) + + M,N = self.shape + + if x.shape != (N,) and x.shape != (N,1): + raise ValueError('dimension mismatch') + + y = self._matvec(x) + + if isinstance(x, np.matrix): + y = np.asmatrix(y) + else: + y = np.asarray(y) + + if x.ndim == 1: + y = y.reshape(M) + elif x.ndim == 2: + y = y.reshape(M,1) + else: + raise ValueError('invalid shape returned by user-defined matvec()') + + return y + + def rmatvec(self, x): + """Adjoint matrix-vector multiplication. + + Performs the operation y = A^H * x where A is an MxN linear + operator and x is a column vector or 1-d array. + + Parameters + ---------- + x : {matrix, ndarray} + An array with shape (M,) or (M,1). + + Returns + ------- + y : {matrix, ndarray} + A matrix or ndarray with shape (N,) or (N,1) depending + on the type and shape of the x argument. + + Notes + ----- + This rmatvec wraps the user-specified rmatvec routine or overridden + _rmatvec method to ensure that y has the correct shape and type. + + """ + + x = np.asanyarray(x) + + M,N = self.shape + + if x.shape != (M,) and x.shape != (M,1): + raise ValueError('dimension mismatch') + + y = self._rmatvec(x) + + if isinstance(x, np.matrix): + y = np.asmatrix(y) + else: + y = np.asarray(y) + + if x.ndim == 1: + y = y.reshape(N) + elif x.ndim == 2: + y = y.reshape(N,1) + else: + raise ValueError('invalid shape returned by user-defined rmatvec()') + + return y + + def _rmatvec(self, x): + """Default implementation of _rmatvec; defers to adjoint.""" + if type(self)._adjoint == LinearOperator._adjoint: + # _adjoint not overridden, prevent infinite recursion + raise NotImplementedError + else: + return self.H.matvec(x) + + def matmat(self, X): + """Matrix-matrix multiplication. + + Performs the operation y=A*X where A is an MxN linear + operator and X dense N*K matrix or ndarray. + + Parameters + ---------- + X : {matrix, ndarray} + An array with shape (N,K). + + Returns + ------- + Y : {matrix, ndarray} + A matrix or ndarray with shape (M,K) depending on + the type of the X argument. + + Notes + ----- + This matmat wraps any user-specified matmat routine or overridden + _matmat method to ensure that y has the correct type. + + """ + + X = np.asanyarray(X) + + if X.ndim != 2: + raise ValueError('expected 2-d ndarray or matrix, not %d-d' + % X.ndim) + + M,N = self.shape + + if X.shape[0] != N: + raise ValueError('dimension mismatch: %r, %r' + % (self.shape, X.shape)) + + Y = self._matmat(X) + + if isinstance(Y, np.matrix): + Y = np.asmatrix(Y) + + return Y + + def __call__(self, x): + return self*x + + def __mul__(self, x): + return self.dot(x) + + def dot(self, x): + """Matrix-matrix or matrix-vector multiplication. + + Parameters + ---------- + x : array_like + 1-d or 2-d array, representing a vector or matrix. + + Returns + ------- + Ax : array + 1-d or 2-d array (depending on the shape of x) that represents + the result of applying this linear operator on x. + + """ + if isinstance(x, LinearOperator): + return _ProductLinearOperator(self, x) + elif np.isscalar(x): + return _ScaledLinearOperator(self, x) + else: + x = np.asarray(x) + + if x.ndim == 1 or x.ndim == 2 and x.shape[1] == 1: + return self.matvec(x) + elif x.ndim == 2: + return self.matmat(x) + else: + raise ValueError('expected 1-d or 2-d array or matrix, got %r' + % x) + + def __matmul__(self, other): + if np.isscalar(other): + raise ValueError("Scalar operands are not allowed, " + "use '*' instead") + return self.__mul__(other) + + def __rmatmul__(self, other): + if np.isscalar(other): + raise ValueError("Scalar operands are not allowed, " + "use '*' instead") + return self.__rmul__(other) + + def __rmul__(self, x): + if np.isscalar(x): + return _ScaledLinearOperator(self, x) + else: + return NotImplemented + + def __pow__(self, p): + if np.isscalar(p): + return _PowerLinearOperator(self, p) + else: + return NotImplemented + + def __add__(self, x): + if isinstance(x, LinearOperator): + return _SumLinearOperator(self, x) + else: + return NotImplemented + + def __neg__(self): + return _ScaledLinearOperator(self, -1) + + def __sub__(self, x): + return self.__add__(-x) + + def __repr__(self): + M,N = self.shape + if self.dtype is None: + dt = 'unspecified dtype' + else: + dt = 'dtype=' + str(self.dtype) + + return '<%dx%d %s with %s>' % (M, N, self.__class__.__name__, dt) + + def adjoint(self): + """Hermitian adjoint. + + Returns the Hermitian adjoint of self, aka the Hermitian + conjugate or Hermitian transpose. For a complex matrix, the + Hermitian adjoint is equal to the conjugate transpose. + + Can be abbreviated self.H instead of self.adjoint(). + + Returns + ------- + A_H : LinearOperator + Hermitian adjoint of self. + """ + return self._adjoint() + + H = property(adjoint) + + def transpose(self): + """Transpose this linear operator. + + Returns a LinearOperator that represents the transpose of this one. + Can be abbreviated self.T instead of self.transpose(). + """ + return self._transpose() + + T = property(transpose) + + def _adjoint(self): + """Default implementation of _adjoint; defers to rmatvec.""" + shape = (self.shape[1], self.shape[0]) + return _CustomLinearOperator(shape, matvec=self.rmatvec, + rmatvec=self.matvec, + dtype=self.dtype) + + +class _CustomLinearOperator(LinearOperator): + """Linear operator defined in terms of user-specified operations.""" + + def __init__(self, shape, matvec, rmatvec=None, matmat=None, dtype=None): + super(_CustomLinearOperator, self).__init__(dtype, shape) + + self.args = () + + self.__matvec_impl = matvec + self.__rmatvec_impl = rmatvec + self.__matmat_impl = matmat + + self._init_dtype() + + def _matmat(self, X): + if self.__matmat_impl is not None: + return self.__matmat_impl(X) + else: + return super(_CustomLinearOperator, self)._matmat(X) + + def _matvec(self, x): + return self.__matvec_impl(x) + + def _rmatvec(self, x): + func = self.__rmatvec_impl + if func is None: + raise NotImplementedError("rmatvec is not defined") + return self.__rmatvec_impl(x) + + def _adjoint(self): + return _CustomLinearOperator(shape=(self.shape[1], self.shape[0]), + matvec=self.__rmatvec_impl, + rmatvec=self.__matvec_impl, + dtype=self.dtype) + + +def _get_dtype(operators, dtypes=None): + if dtypes is None: + dtypes = [] + for obj in operators: + if obj is not None and hasattr(obj, 'dtype'): + dtypes.append(obj.dtype) + return np.find_common_type(dtypes, []) + + +class _SumLinearOperator(LinearOperator): + def __init__(self, A, B): + if not isinstance(A, LinearOperator) or \ + not isinstance(B, LinearOperator): + raise ValueError('both operands have to be a LinearOperator') + if A.shape != B.shape: + raise ValueError('cannot add %r and %r: shape mismatch' + % (A, B)) + self.args = (A, B) + super(_SumLinearOperator, self).__init__(_get_dtype([A, B]), A.shape) + + def _matvec(self, x): + return self.args[0].matvec(x) + self.args[1].matvec(x) + + def _rmatvec(self, x): + return self.args[0].rmatvec(x) + self.args[1].rmatvec(x) + + def _matmat(self, x): + return self.args[0].matmat(x) + self.args[1].matmat(x) + + def _adjoint(self): + A, B = self.args + return A.H + B.H + + +class _ProductLinearOperator(LinearOperator): + def __init__(self, A, B): + if not isinstance(A, LinearOperator) or \ + not isinstance(B, LinearOperator): + raise ValueError('both operands have to be a LinearOperator') + if A.shape[1] != B.shape[0]: + raise ValueError('cannot multiply %r and %r: shape mismatch' + % (A, B)) + super(_ProductLinearOperator, self).__init__(_get_dtype([A, B]), + (A.shape[0], B.shape[1])) + self.args = (A, B) + + def _matvec(self, x): + return self.args[0].matvec(self.args[1].matvec(x)) + + def _rmatvec(self, x): + return self.args[1].rmatvec(self.args[0].rmatvec(x)) + + def _matmat(self, x): + return self.args[0].matmat(self.args[1].matmat(x)) + + def _adjoint(self): + A, B = self.args + return B.H * A.H + + +class _ScaledLinearOperator(LinearOperator): + def __init__(self, A, alpha): + if not isinstance(A, LinearOperator): + raise ValueError('LinearOperator expected as A') + if not np.isscalar(alpha): + raise ValueError('scalar expected as alpha') + dtype = _get_dtype([A], [type(alpha)]) + super(_ScaledLinearOperator, self).__init__(dtype, A.shape) + self.args = (A, alpha) + + def _matvec(self, x): + return self.args[1] * self.args[0].matvec(x) + + def _rmatvec(self, x): + return np.conj(self.args[1]) * self.args[0].rmatvec(x) + + def _matmat(self, x): + return self.args[1] * self.args[0].matmat(x) + + def _adjoint(self): + A, alpha = self.args + return A.H * np.conj(alpha) + + +class _PowerLinearOperator(LinearOperator): + def __init__(self, A, p): + if not isinstance(A, LinearOperator): + raise ValueError('LinearOperator expected as A') + if A.shape[0] != A.shape[1]: + raise ValueError('square LinearOperator expected, got %r' % A) + if not isintlike(p) or p < 0: + raise ValueError('non-negative integer expected as p') + + super(_PowerLinearOperator, self).__init__(_get_dtype([A]), A.shape) + self.args = (A, p) + + def _power(self, fun, x): + res = np.array(x, copy=True) + for i in range(self.args[1]): + res = fun(res) + return res + + def _matvec(self, x): + return self._power(self.args[0].matvec, x) + + def _rmatvec(self, x): + return self._power(self.args[0].rmatvec, x) + + def _matmat(self, x): + return self._power(self.args[0].matmat, x) + + def _adjoint(self): + A, p = self.args + return A.H ** p + + +class MatrixLinearOperator(LinearOperator): + def __init__(self, A): + super(MatrixLinearOperator, self).__init__(A.dtype, A.shape) + self.A = A + self.__adj = None + self.args = (A,) + + def _matmat(self, X): + return self.A.dot(X) + + def _adjoint(self): + if self.__adj is None: + self.__adj = _AdjointMatrixOperator(self) + return self.__adj + + +class _AdjointMatrixOperator(MatrixLinearOperator): + def __init__(self, adjoint): + self.A = adjoint.A.T.conj() + self.__adjoint = adjoint + self.args = (adjoint,) + self.shape = adjoint.shape[1], adjoint.shape[0] + + @property + def dtype(self): + return self.__adjoint.dtype + + def _adjoint(self): + return self.__adjoint + + +class IdentityOperator(LinearOperator): + def __init__(self, shape, dtype=None): + super(IdentityOperator, self).__init__(dtype, shape) + + def _matvec(self, x): + return x + + def _rmatvec(self, x): + return x + + def _matmat(self, x): + return x + + def _adjoint(self): + return self + + +def aslinearoperator(A): + """Return A as a LinearOperator. + + 'A' may be any of the following types: + - ndarray + - matrix + - sparse matrix (e.g. csr_matrix, lil_matrix, etc.) + - LinearOperator + - An object with .shape and .matvec attributes + + See the LinearOperator documentation for additional information. + + Notes + ----- + If 'A' has no .dtype attribute, the data type is determined by calling + :func:`LinearOperator.matvec()` - set the .dtype attribute to prevent this + call upon the linear operator creation. + + Examples + -------- + >>> from scipy.sparse.linalg import aslinearoperator + >>> M = np.array([[1,2,3],[4,5,6]], dtype=np.int32) + >>> aslinearoperator(M) + <2x3 MatrixLinearOperator with dtype=int32> + """ + if isinstance(A, LinearOperator): + return A + + elif isinstance(A, np.ndarray) or isinstance(A, np.matrix): + if A.ndim > 2: + raise ValueError('array must have ndim <= 2') + A = np.atleast_2d(np.asarray(A)) + return MatrixLinearOperator(A) + + elif isspmatrix(A): + return MatrixLinearOperator(A) + + else: + if hasattr(A, 'shape') and hasattr(A, 'matvec'): + rmatvec = None + dtype = None + + if hasattr(A, 'rmatvec'): + rmatvec = A.rmatvec + if hasattr(A, 'dtype'): + dtype = A.dtype + return LinearOperator(A.shape, A.matvec, + rmatvec=rmatvec, dtype=dtype) + + else: + raise TypeError('type not understood') diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/interface.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/interface.pyc new file mode 100644 index 0000000..8eb45dd Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/interface.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/__init__.py new file mode 100644 index 0000000..a0ca1f8 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/__init__.py @@ -0,0 +1,17 @@ +"Iterative Solvers for Sparse Linear Systems" + +from __future__ import division, print_function, absolute_import + +#from info import __doc__ +from .iterative import * +from .minres import minres +from .lgmres import lgmres +from .lsqr import lsqr +from .lsmr import lsmr +from ._gcrotmk import gcrotmk + +__all__ = [s for s in dir() if not s.startswith('_')] + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/__init__.pyc new file mode 100644 index 0000000..8c0047c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/_gcrotmk.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/_gcrotmk.py new file mode 100644 index 0000000..2083855 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/_gcrotmk.py @@ -0,0 +1,487 @@ +# Copyright (C) 2015, Pauli Virtanen <pav@iki.fi> +# Distributed under the same license as Scipy. + +from __future__ import division, print_function, absolute_import + +import warnings +import numpy as np +from numpy.linalg import LinAlgError +from scipy._lib.six import xrange +from scipy.linalg import (get_blas_funcs, qr, solve, svd, qr_insert, lstsq) +from scipy.sparse.linalg.isolve.utils import make_system + + +__all__ = ['gcrotmk'] + + +def _fgmres(matvec, v0, m, atol, lpsolve=None, rpsolve=None, cs=(), outer_v=(), + prepend_outer_v=False): + """ + FGMRES Arnoldi process, with optional projection or augmentation + + Parameters + ---------- + matvec : callable + Operation A*x + v0 : ndarray + Initial vector, normalized to nrm2(v0) == 1 + m : int + Number of GMRES rounds + atol : float + Absolute tolerance for early exit + lpsolve : callable + Left preconditioner L + rpsolve : callable + Right preconditioner R + CU : list of (ndarray, ndarray) + Columns of matrices C and U in GCROT + outer_v : list of ndarrays + Augmentation vectors in LGMRES + prepend_outer_v : bool, optional + Whether augmentation vectors come before or after + Krylov iterates + + Raises + ------ + LinAlgError + If nans encountered + + Returns + ------- + Q, R : ndarray + QR decomposition of the upper Hessenberg H=QR + B : ndarray + Projections corresponding to matrix C + vs : list of ndarray + Columns of matrix V + zs : list of ndarray + Columns of matrix Z + y : ndarray + Solution to ||H y - e_1||_2 = min! + res : float + The final (preconditioned) residual norm + + """ + + if lpsolve is None: + lpsolve = lambda x: x + if rpsolve is None: + rpsolve = lambda x: x + + axpy, dot, scal, nrm2 = get_blas_funcs(['axpy', 'dot', 'scal', 'nrm2'], (v0,)) + + vs = [v0] + zs = [] + y = None + res = np.nan + + m = m + len(outer_v) + + # Orthogonal projection coefficients + B = np.zeros((len(cs), m), dtype=v0.dtype) + + # H is stored in QR factorized form + Q = np.ones((1, 1), dtype=v0.dtype) + R = np.zeros((1, 0), dtype=v0.dtype) + + eps = np.finfo(v0.dtype).eps + + breakdown = False + + # FGMRES Arnoldi process + for j in xrange(m): + # L A Z = C B + V H + + if prepend_outer_v and j < len(outer_v): + z, w = outer_v[j] + elif prepend_outer_v and j == len(outer_v): + z = rpsolve(v0) + w = None + elif not prepend_outer_v and j >= m - len(outer_v): + z, w = outer_v[j - (m - len(outer_v))] + else: + z = rpsolve(vs[-1]) + w = None + + if w is None: + w = lpsolve(matvec(z)) + else: + # w is clobbered below + w = w.copy() + + w_norm = nrm2(w) + + # GCROT projection: L A -> (1 - C C^H) L A + # i.e. orthogonalize against C + for i, c in enumerate(cs): + alpha = dot(c, w) + B[i,j] = alpha + w = axpy(c, w, c.shape[0], -alpha) # w -= alpha*c + + # Orthogonalize against V + hcur = np.zeros(j+2, dtype=Q.dtype) + for i, v in enumerate(vs): + alpha = dot(v, w) + hcur[i] = alpha + w = axpy(v, w, v.shape[0], -alpha) # w -= alpha*v + hcur[i+1] = nrm2(w) + + with np.errstate(over='ignore', divide='ignore'): + # Careful with denormals + alpha = 1/hcur[-1] + + if np.isfinite(alpha): + w = scal(alpha, w) + + if not (hcur[-1] > eps * w_norm): + # w essentially in the span of previous vectors, + # or we have nans. Bail out after updating the QR + # solution. + breakdown = True + + vs.append(w) + zs.append(z) + + # Arnoldi LSQ problem + + # Add new column to H=Q*R, padding other columns with zeros + Q2 = np.zeros((j+2, j+2), dtype=Q.dtype, order='F') + Q2[:j+1,:j+1] = Q + Q2[j+1,j+1] = 1 + + R2 = np.zeros((j+2, j), dtype=R.dtype, order='F') + R2[:j+1,:] = R + + Q, R = qr_insert(Q2, R2, hcur, j, which='col', + overwrite_qru=True, check_finite=False) + + # Transformed least squares problem + # || Q R y - inner_res_0 * e_1 ||_2 = min! + # Since R = [R'; 0], solution is y = inner_res_0 (R')^{-1} (Q^H)[:j,0] + + # Residual is immediately known + res = abs(Q[0,-1]) + + # Check for termination + if res < atol or breakdown: + break + + if not np.isfinite(R[j,j]): + # nans encountered, bail out + raise LinAlgError() + + # -- Get the LSQ problem solution + + # The problem is triangular, but the condition number may be + # bad (or in case of breakdown the last diagonal entry may be + # zero), so use lstsq instead of trtrs. + y, _, _, _, = lstsq(R[:j+1,:j+1], Q[0,:j+1].conj()) + + B = B[:,:j+1] + + return Q, R, B, vs, zs, y, res + + +def gcrotmk(A, b, x0=None, tol=1e-5, maxiter=1000, M=None, callback=None, + m=20, k=None, CU=None, discard_C=False, truncate='oldest', + atol=None): + """ + Solve a matrix equation using flexible GCROT(m,k) algorithm. + + Parameters + ---------- + A : {sparse matrix, dense matrix, LinearOperator} + The real or complex N-by-N matrix of the linear system. + b : {array, matrix} + Right hand side of the linear system. Has shape (N,) or (N,1). + x0 : {array, matrix} + Starting guess for the solution. + tol, atol : float, optional + Tolerances for convergence, ``norm(residual) <= max(tol*norm(b), atol)``. + The default for ``atol`` is `tol`. + + .. warning:: + + The default value for `atol` will be changed in a future release. + For future compatibility, specify `atol` explicitly. + maxiter : int, optional + Maximum number of iterations. Iteration will stop after maxiter + steps even if the specified tolerance has not been achieved. + M : {sparse matrix, dense matrix, LinearOperator}, optional + Preconditioner for A. The preconditioner should approximate the + inverse of A. gcrotmk is a 'flexible' algorithm and the preconditioner + can vary from iteration to iteration. Effective preconditioning + dramatically improves the rate of convergence, which implies that + fewer iterations are needed to reach a given error tolerance. + callback : function, optional + User-supplied function to call after each iteration. It is called + as callback(xk), where xk is the current solution vector. + m : int, optional + Number of inner FGMRES iterations per each outer iteration. + Default: 20 + k : int, optional + Number of vectors to carry between inner FGMRES iterations. + According to [2]_, good values are around m. + Default: m + CU : list of tuples, optional + List of tuples ``(c, u)`` which contain the columns of the matrices + C and U in the GCROT(m,k) algorithm. For details, see [2]_. + The list given and vectors contained in it are modified in-place. + If not given, start from empty matrices. The ``c`` elements in the + tuples can be ``None``, in which case the vectors are recomputed + via ``c = A u`` on start and orthogonalized as described in [3]_. + discard_C : bool, optional + Discard the C-vectors at the end. Useful if recycling Krylov subspaces + for different linear systems. + truncate : {'oldest', 'smallest'}, optional + Truncation scheme to use. Drop: oldest vectors, or vectors with + smallest singular values using the scheme discussed in [1,2]. + See [2]_ for detailed comparison. + Default: 'oldest' + + Returns + ------- + x : array or matrix + The solution found. + info : int + Provides convergence information: + + * 0 : successful exit + * >0 : convergence to tolerance not achieved, number of iterations + + References + ---------- + .. [1] E. de Sturler, ''Truncation strategies for optimal Krylov subspace + methods'', SIAM J. Numer. Anal. 36, 864 (1999). + .. [2] J.E. Hicken and D.W. Zingg, ''A simplified and flexible variant + of GCROT for solving nonsymmetric linear systems'', + SIAM J. Sci. Comput. 32, 172 (2010). + .. [3] M.L. Parks, E. de Sturler, G. Mackey, D.D. Johnson, S. Maiti, + ''Recycling Krylov subspaces for sequences of linear systems'', + SIAM J. Sci. Comput. 28, 1651 (2006). + + """ + A,M,x,b,postprocess = make_system(A,M,x0,b) + + if not np.isfinite(b).all(): + raise ValueError("RHS must contain only finite numbers") + + if truncate not in ('oldest', 'smallest'): + raise ValueError("Invalid value for 'truncate': %r" % (truncate,)) + + if atol is None: + warnings.warn("scipy.sparse.linalg.gcrotmk called without specifying `atol`. " + "The default value will change in the future. To preserve " + "current behavior, set ``atol=tol``.", + category=DeprecationWarning, stacklevel=2) + atol = tol + + matvec = A.matvec + psolve = M.matvec + + if CU is None: + CU = [] + + if k is None: + k = m + + axpy, dot, scal = None, None, None + + r = b - matvec(x) + + axpy, dot, scal, nrm2 = get_blas_funcs(['axpy', 'dot', 'scal', 'nrm2'], (x, r)) + + b_norm = nrm2(b) + + if discard_C: + CU[:] = [(None, u) for c, u in CU] + + # Reorthogonalize old vectors + if CU: + # Sort already existing vectors to the front + CU.sort(key=lambda cu: cu[0] is not None) + + # Fill-in missing ones + C = np.empty((A.shape[0], len(CU)), dtype=r.dtype, order='F') + us = [] + j = 0 + while CU: + # More memory-efficient: throw away old vectors as we go + c, u = CU.pop(0) + if c is None: + c = matvec(u) + C[:,j] = c + j += 1 + us.append(u) + + # Orthogonalize + Q, R, P = qr(C, overwrite_a=True, mode='economic', pivoting=True) + del C + + # C := Q + cs = list(Q.T) + + # U := U P R^-1, back-substitution + new_us = [] + for j in xrange(len(cs)): + u = us[P[j]] + for i in xrange(j): + u = axpy(us[P[i]], u, u.shape[0], -R[i,j]) + if abs(R[j,j]) < 1e-12 * abs(R[0,0]): + # discard rest of the vectors + break + u = scal(1.0/R[j,j], u) + new_us.append(u) + + # Form the new CU lists + CU[:] = list(zip(cs, new_us))[::-1] + + if CU: + axpy, dot = get_blas_funcs(['axpy', 'dot'], (r,)) + + # Solve first the projection operation with respect to the CU + # vectors. This corresponds to modifying the initial guess to + # be + # + # x' = x + U y + # y = argmin_y || b - A (x + U y) ||^2 + # + # The solution is y = C^H (b - A x) + for c, u in CU: + yc = dot(c, r) + x = axpy(u, x, x.shape[0], yc) + r = axpy(c, r, r.shape[0], -yc) + + # GCROT main iteration + for j_outer in xrange(maxiter): + # -- callback + if callback is not None: + callback(x) + + beta = nrm2(r) + + # -- check stopping condition + beta_tol = max(atol, tol * b_norm) + + if beta <= beta_tol and (j_outer > 0 or CU): + # recompute residual to avoid rounding error + r = b - matvec(x) + beta = nrm2(r) + + if beta <= beta_tol: + j_outer = -1 + break + + ml = m + max(k - len(CU), 0) + + cs = [c for c, u in CU] + + try: + Q, R, B, vs, zs, y, pres = _fgmres(matvec, + r/beta, + ml, + rpsolve=psolve, + atol=max(atol, tol*b_norm)/beta, + cs=cs) + y *= beta + except LinAlgError: + # Floating point over/underflow, non-finite result from + # matmul etc. -- report failure. + break + + # + # At this point, + # + # [A U, A Z] = [C, V] G; G = [ I B ] + # [ 0 H ] + # + # where [C, V] has orthonormal columns, and r = beta v_0. Moreover, + # + # || b - A (x + Z y + U q) ||_2 = || r - C B y - V H y - C q ||_2 = min! + # + # from which y = argmin_y || beta e_1 - H y ||_2, and q = -B y + # + + # + # GCROT(m,k) update + # + + # Define new outer vectors + + # ux := (Z - U B) y + ux = zs[0]*y[0] + for z, yc in zip(zs[1:], y[1:]): + ux = axpy(z, ux, ux.shape[0], yc) # ux += z*yc + by = B.dot(y) + for cu, byc in zip(CU, by): + c, u = cu + ux = axpy(u, ux, ux.shape[0], -byc) # ux -= u*byc + + # cx := V H y + hy = Q.dot(R.dot(y)) + cx = vs[0] * hy[0] + for v, hyc in zip(vs[1:], hy[1:]): + cx = axpy(v, cx, cx.shape[0], hyc) # cx += v*hyc + + # Normalize cx, maintaining cx = A ux + # This new cx is orthogonal to the previous C, by construction + try: + alpha = 1/nrm2(cx) + if not np.isfinite(alpha): + raise FloatingPointError() + except (FloatingPointError, ZeroDivisionError): + # Cannot update, so skip it + continue + + cx = scal(alpha, cx) + ux = scal(alpha, ux) + + # Update residual and solution + gamma = dot(cx, r) + r = axpy(cx, r, r.shape[0], -gamma) # r -= gamma*cx + x = axpy(ux, x, x.shape[0], gamma) # x += gamma*ux + + # Truncate CU + if truncate == 'oldest': + while len(CU) >= k and CU: + del CU[0] + elif truncate == 'smallest': + if len(CU) >= k and CU: + # cf. [1,2] + D = solve(R[:-1,:].T, B.T).T + W, sigma, V = svd(D) + + # C := C W[:,:k-1], U := U W[:,:k-1] + new_CU = [] + for j, w in enumerate(W[:,:k-1].T): + c, u = CU[0] + c = c * w[0] + u = u * w[0] + for cup, wp in zip(CU[1:], w[1:]): + cp, up = cup + c = axpy(cp, c, c.shape[0], wp) + u = axpy(up, u, u.shape[0], wp) + + # Reorthogonalize at the same time; not necessary + # in exact arithmetic, but floating point error + # tends to accumulate here + for cp, up in new_CU: + alpha = dot(cp, c) + c = axpy(cp, c, c.shape[0], -alpha) + u = axpy(up, u, u.shape[0], -alpha) + alpha = nrm2(c) + c = scal(1.0/alpha, c) + u = scal(1.0/alpha, u) + + new_CU.append((c, u)) + CU[:] = new_CU + + # Add new vector to CU + CU.append((cx, ux)) + + # Include the solution vector to the span + CU.append((None, x.copy())) + if discard_C: + CU[:] = [(None, uz) for cz, uz in CU] + + return postprocess(x), j_outer + 1 diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/_gcrotmk.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/_gcrotmk.pyc new file mode 100644 index 0000000..e728370 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/_gcrotmk.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/_iterative.so b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/_iterative.so new file mode 100755 index 0000000..6e673cf Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/_iterative.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/iterative.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/iterative.py new file mode 100644 index 0000000..3f66a4d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/iterative.py @@ -0,0 +1,753 @@ +"""Iterative methods for solving linear systems""" + +from __future__ import division, print_function, absolute_import + +__all__ = ['bicg','bicgstab','cg','cgs','gmres','qmr'] + +import warnings +import numpy as np + +from . import _iterative + +from scipy.sparse.linalg.interface import LinearOperator +from .utils import make_system +from scipy._lib._util import _aligned_zeros +from scipy._lib._threadsafety import non_reentrant + +_type_conv = {'f':'s', 'd':'d', 'F':'c', 'D':'z'} + + +# Part of the docstring common to all iterative solvers +common_doc1 = \ +""" +Parameters +---------- +A : {sparse matrix, dense matrix, LinearOperator}""" + +common_doc2 = \ +"""b : {array, matrix} + Right hand side of the linear system. Has shape (N,) or (N,1). + +Returns +------- +x : {array, matrix} + The converged solution. +info : integer + Provides convergence information: + 0 : successful exit + >0 : convergence to tolerance not achieved, number of iterations + <0 : illegal input or breakdown + +Other Parameters +---------------- +x0 : {array, matrix} + Starting guess for the solution. +tol, atol : float, optional + Tolerances for convergence, ``norm(residual) <= max(tol*norm(b), atol)``. + The default for ``atol`` is ``'legacy'``, which emulates + a different legacy behavior. + + .. warning:: + + The default value for `atol` will be changed in a future release. + For future compatibility, specify `atol` explicitly. +maxiter : integer + Maximum number of iterations. Iteration will stop after maxiter + steps even if the specified tolerance has not been achieved. +M : {sparse matrix, dense matrix, LinearOperator} + Preconditioner for A. The preconditioner should approximate the + inverse of A. Effective preconditioning dramatically improves the + rate of convergence, which implies that fewer iterations are needed + to reach a given error tolerance. +callback : function + User-supplied function to call after each iteration. It is called + as callback(xk), where xk is the current solution vector. + +""" + + +def _stoptest(residual, atol): + """ + Successful termination condition for the solvers. + """ + resid = np.linalg.norm(residual) + if resid <= atol: + return resid, 1 + else: + return resid, 0 + + +def _get_atol(tol, atol, bnrm2, get_residual, routine_name): + """ + Parse arguments for absolute tolerance in termination condition. + + Parameters + ---------- + tol, atol : object + The arguments passed into the solver routine by user. + bnrm2 : float + 2-norm of the rhs vector. + get_residual : callable + Callable ``get_residual()`` that returns the initial value of + the residual. + routine_name : str + Name of the routine. + """ + + if atol is None: + warnings.warn("scipy.sparse.linalg.{name} called without specifying `atol`. " + "The default value will be changed in a future release. " + "For compatibility, specify a value for `atol` explicitly, e.g., " + "``{name}(..., atol=0)``, or to retain the old behavior " + "``{name}(..., atol='legacy')``".format(name=routine_name), + category=DeprecationWarning, stacklevel=4) + atol = 'legacy' + + tol = float(tol) + + if atol == 'legacy': + # emulate old legacy behavior + resid = get_residual() + if resid <= tol: + return 'exit' + if bnrm2 == 0: + return tol + else: + return tol * float(bnrm2) + else: + return max(float(atol), tol * float(bnrm2)) + + +def set_docstring(header, Ainfo, footer='', atol_default='0'): + def combine(fn): + fn.__doc__ = '\n'.join((header, common_doc1, + ' ' + Ainfo.replace('\n', '\n '), + common_doc2, footer)) + return fn + return combine + + +@set_docstring('Use BIConjugate Gradient iteration to solve ``Ax = b``.', + 'The real or complex N-by-N matrix of the linear system.\n' + 'It is required that the linear operator can produce\n' + '``Ax`` and ``A^T x``.') +@non_reentrant() +def bicg(A, b, x0=None, tol=1e-5, maxiter=None, M=None, callback=None, atol=None): + A,M,x,b,postprocess = make_system(A, M, x0, b) + + n = len(b) + if maxiter is None: + maxiter = n*10 + + matvec, rmatvec = A.matvec, A.rmatvec + psolve, rpsolve = M.matvec, M.rmatvec + ltr = _type_conv[x.dtype.char] + revcom = getattr(_iterative, ltr + 'bicgrevcom') + + get_residual = lambda: np.linalg.norm(matvec(x) - b) + atol = _get_atol(tol, atol, np.linalg.norm(b), get_residual, 'bicg') + if atol == 'exit': + return postprocess(x), 0 + + resid = atol + ndx1 = 1 + ndx2 = -1 + # Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1 + work = _aligned_zeros(6*n,dtype=x.dtype) + ijob = 1 + info = 0 + ftflag = True + iter_ = maxiter + while True: + olditer = iter_ + x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \ + revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob) + if callback is not None and iter_ > olditer: + callback(x) + slice1 = slice(ndx1-1, ndx1-1+n) + slice2 = slice(ndx2-1, ndx2-1+n) + if (ijob == -1): + if callback is not None: + callback(x) + break + elif (ijob == 1): + work[slice2] *= sclr2 + work[slice2] += sclr1*matvec(work[slice1]) + elif (ijob == 2): + work[slice2] *= sclr2 + work[slice2] += sclr1*rmatvec(work[slice1]) + elif (ijob == 3): + work[slice1] = psolve(work[slice2]) + elif (ijob == 4): + work[slice1] = rpsolve(work[slice2]) + elif (ijob == 5): + work[slice2] *= sclr2 + work[slice2] += sclr1*matvec(x) + elif (ijob == 6): + if ftflag: + info = -1 + ftflag = False + resid, info = _stoptest(work[slice1], atol) + ijob = 2 + + if info > 0 and iter_ == maxiter and not (resid <= atol): + # info isn't set appropriately otherwise + info = iter_ + + return postprocess(x), info + + +@set_docstring('Use BIConjugate Gradient STABilized iteration to solve ' + '``Ax = b``.', + 'The real or complex N-by-N matrix of the linear system.') +@non_reentrant() +def bicgstab(A, b, x0=None, tol=1e-5, maxiter=None, M=None, callback=None, atol=None): + A, M, x, b, postprocess = make_system(A, M, x0, b) + + n = len(b) + if maxiter is None: + maxiter = n*10 + + matvec = A.matvec + psolve = M.matvec + ltr = _type_conv[x.dtype.char] + revcom = getattr(_iterative, ltr + 'bicgstabrevcom') + + get_residual = lambda: np.linalg.norm(matvec(x) - b) + atol = _get_atol(tol, atol, np.linalg.norm(b), get_residual, 'bicgstab') + if atol == 'exit': + return postprocess(x), 0 + + resid = atol + ndx1 = 1 + ndx2 = -1 + # Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1 + work = _aligned_zeros(7*n,dtype=x.dtype) + ijob = 1 + info = 0 + ftflag = True + iter_ = maxiter + while True: + olditer = iter_ + x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \ + revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob) + if callback is not None and iter_ > olditer: + callback(x) + slice1 = slice(ndx1-1, ndx1-1+n) + slice2 = slice(ndx2-1, ndx2-1+n) + if (ijob == -1): + if callback is not None: + callback(x) + break + elif (ijob == 1): + work[slice2] *= sclr2 + work[slice2] += sclr1*matvec(work[slice1]) + elif (ijob == 2): + work[slice1] = psolve(work[slice2]) + elif (ijob == 3): + work[slice2] *= sclr2 + work[slice2] += sclr1*matvec(x) + elif (ijob == 4): + if ftflag: + info = -1 + ftflag = False + resid, info = _stoptest(work[slice1], atol) + ijob = 2 + + if info > 0 and iter_ == maxiter and not (resid <= atol): + # info isn't set appropriately otherwise + info = iter_ + + return postprocess(x), info + + +@set_docstring('Use Conjugate Gradient iteration to solve ``Ax = b``.', + 'The real or complex N-by-N matrix of the linear system.\n' + '``A`` must represent a hermitian, positive definite matrix.') +@non_reentrant() +def cg(A, b, x0=None, tol=1e-5, maxiter=None, M=None, callback=None, atol=None): + A, M, x, b, postprocess = make_system(A, M, x0, b) + + n = len(b) + if maxiter is None: + maxiter = n*10 + + matvec = A.matvec + psolve = M.matvec + ltr = _type_conv[x.dtype.char] + revcom = getattr(_iterative, ltr + 'cgrevcom') + + get_residual = lambda: np.linalg.norm(matvec(x) - b) + atol = _get_atol(tol, atol, np.linalg.norm(b), get_residual, 'cg') + if atol == 'exit': + return postprocess(x), 0 + + resid = atol + ndx1 = 1 + ndx2 = -1 + # Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1 + work = _aligned_zeros(4*n,dtype=x.dtype) + ijob = 1 + info = 0 + ftflag = True + iter_ = maxiter + while True: + olditer = iter_ + x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \ + revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob) + if callback is not None and iter_ > olditer: + callback(x) + slice1 = slice(ndx1-1, ndx1-1+n) + slice2 = slice(ndx2-1, ndx2-1+n) + if (ijob == -1): + if callback is not None: + callback(x) + break + elif (ijob == 1): + work[slice2] *= sclr2 + work[slice2] += sclr1*matvec(work[slice1]) + elif (ijob == 2): + work[slice1] = psolve(work[slice2]) + elif (ijob == 3): + work[slice2] *= sclr2 + work[slice2] += sclr1*matvec(x) + elif (ijob == 4): + if ftflag: + info = -1 + ftflag = False + resid, info = _stoptest(work[slice1], atol) + if info == 1 and iter_ > 1: + # recompute residual and recheck, to avoid + # accumulating rounding error + work[slice1] = b - matvec(x) + resid, info = _stoptest(work[slice1], atol) + ijob = 2 + + if info > 0 and iter_ == maxiter and not (resid <= atol): + # info isn't set appropriately otherwise + info = iter_ + + return postprocess(x), info + + +@set_docstring('Use Conjugate Gradient Squared iteration to solve ``Ax = b``.', + 'The real-valued N-by-N matrix of the linear system.') +@non_reentrant() +def cgs(A, b, x0=None, tol=1e-5, maxiter=None, M=None, callback=None, atol=None): + A, M, x, b, postprocess = make_system(A, M, x0, b) + + n = len(b) + if maxiter is None: + maxiter = n*10 + + matvec = A.matvec + psolve = M.matvec + ltr = _type_conv[x.dtype.char] + revcom = getattr(_iterative, ltr + 'cgsrevcom') + + get_residual = lambda: np.linalg.norm(matvec(x) - b) + atol = _get_atol(tol, atol, np.linalg.norm(b), get_residual, 'cgs') + if atol == 'exit': + return postprocess(x), 0 + + resid = atol + ndx1 = 1 + ndx2 = -1 + # Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1 + work = _aligned_zeros(7*n,dtype=x.dtype) + ijob = 1 + info = 0 + ftflag = True + iter_ = maxiter + while True: + olditer = iter_ + x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \ + revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob) + if callback is not None and iter_ > olditer: + callback(x) + slice1 = slice(ndx1-1, ndx1-1+n) + slice2 = slice(ndx2-1, ndx2-1+n) + if (ijob == -1): + if callback is not None: + callback(x) + break + elif (ijob == 1): + work[slice2] *= sclr2 + work[slice2] += sclr1*matvec(work[slice1]) + elif (ijob == 2): + work[slice1] = psolve(work[slice2]) + elif (ijob == 3): + work[slice2] *= sclr2 + work[slice2] += sclr1*matvec(x) + elif (ijob == 4): + if ftflag: + info = -1 + ftflag = False + resid, info = _stoptest(work[slice1], atol) + if info == 1 and iter_ > 1: + # recompute residual and recheck, to avoid + # accumulating rounding error + work[slice1] = b - matvec(x) + resid, info = _stoptest(work[slice1], atol) + ijob = 2 + + if info == -10: + # termination due to breakdown: check for convergence + resid, ok = _stoptest(b - matvec(x), atol) + if ok: + info = 0 + + if info > 0 and iter_ == maxiter and not (resid <= atol): + # info isn't set appropriately otherwise + info = iter_ + + return postprocess(x), info + + +@non_reentrant() +def gmres(A, b, x0=None, tol=1e-5, restart=None, maxiter=None, M=None, callback=None, + restrt=None, atol=None): + """ + Use Generalized Minimal RESidual iteration to solve ``Ax = b``. + + Parameters + ---------- + A : {sparse matrix, dense matrix, LinearOperator} + The real or complex N-by-N matrix of the linear system. + b : {array, matrix} + Right hand side of the linear system. Has shape (N,) or (N,1). + + Returns + ------- + x : {array, matrix} + The converged solution. + info : int + Provides convergence information: + * 0 : successful exit + * >0 : convergence to tolerance not achieved, number of iterations + * <0 : illegal input or breakdown + + Other parameters + ---------------- + x0 : {array, matrix} + Starting guess for the solution (a vector of zeros by default). + tol, atol : float, optional + Tolerances for convergence, ``norm(residual) <= max(tol*norm(b), atol)``. + The default for ``atol`` is ``'legacy'``, which emulates + a different legacy behavior. + + .. warning:: + + The default value for `atol` will be changed in a future release. + For future compatibility, specify `atol` explicitly. + restart : int, optional + Number of iterations between restarts. Larger values increase + iteration cost, but may be necessary for convergence. + Default is 20. + maxiter : int, optional + Maximum number of iterations (restart cycles). Iteration will stop + after maxiter steps even if the specified tolerance has not been + achieved. + M : {sparse matrix, dense matrix, LinearOperator} + Inverse of the preconditioner of A. M should approximate the + inverse of A and be easy to solve for (see Notes). Effective + preconditioning dramatically improves the rate of convergence, + which implies that fewer iterations are needed to reach a given + error tolerance. By default, no preconditioner is used. + callback : function + User-supplied function to call after each iteration. It is called + as callback(rk), where rk is the current residual vector. + restrt : int, optional + DEPRECATED - use `restart` instead. + + See Also + -------- + LinearOperator + + Notes + ----- + A preconditioner, P, is chosen such that P is close to A but easy to solve + for. The preconditioner parameter required by this routine is + ``M = P^-1``. The inverse should preferably not be calculated + explicitly. Rather, use the following template to produce M:: + + # Construct a linear operator that computes P^-1 * x. + import scipy.sparse.linalg as spla + M_x = lambda x: spla.spsolve(P, x) + M = spla.LinearOperator((n, n), M_x) + + Examples + -------- + >>> from scipy.sparse import csc_matrix + >>> from scipy.sparse.linalg import gmres + >>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float) + >>> b = np.array([2, 4, -1], dtype=float) + >>> x, exitCode = gmres(A, b) + >>> print(exitCode) # 0 indicates successful convergence + 0 + >>> np.allclose(A.dot(x), b) + True + """ + + # Change 'restrt' keyword to 'restart' + if restrt is None: + restrt = restart + elif restart is not None: + raise ValueError("Cannot specify both restart and restrt keywords. " + "Preferably use 'restart' only.") + + A, M, x, b,postprocess = make_system(A, M, x0, b) + + n = len(b) + if maxiter is None: + maxiter = n*10 + + if restrt is None: + restrt = 20 + restrt = min(restrt, n) + + matvec = A.matvec + psolve = M.matvec + ltr = _type_conv[x.dtype.char] + revcom = getattr(_iterative, ltr + 'gmresrevcom') + + bnrm2 = np.linalg.norm(b) + Mb_nrm2 = np.linalg.norm(psolve(b)) + get_residual = lambda: np.linalg.norm(matvec(x) - b) + atol = _get_atol(tol, atol, bnrm2, get_residual, 'gmres') + if atol == 'exit': + return postprocess(x), 0 + + if bnrm2 == 0: + return postprocess(b), 0 + + # Tolerance passed to GMRESREVCOM applies to the inner iteration + # and deals with the left-preconditioned residual. + ptol_max_factor = 1.0 + ptol = Mb_nrm2 * min(ptol_max_factor, atol / bnrm2) + resid = np.nan + presid = np.nan + ndx1 = 1 + ndx2 = -1 + # Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1 + work = _aligned_zeros((6+restrt)*n,dtype=x.dtype) + work2 = _aligned_zeros((restrt+1)*(2*restrt+2),dtype=x.dtype) + ijob = 1 + info = 0 + ftflag = True + iter_ = maxiter + old_ijob = ijob + first_pass = True + resid_ready = False + iter_num = 1 + while True: + x, iter_, presid, info, ndx1, ndx2, sclr1, sclr2, ijob = \ + revcom(b, x, restrt, work, work2, iter_, presid, info, ndx1, ndx2, ijob, ptol) + slice1 = slice(ndx1-1, ndx1-1+n) + slice2 = slice(ndx2-1, ndx2-1+n) + if (ijob == -1): # gmres success, update last residual + if resid_ready and callback is not None: + callback(presid / bnrm2) + resid_ready = False + break + elif (ijob == 1): + work[slice2] *= sclr2 + work[slice2] += sclr1*matvec(x) + elif (ijob == 2): + work[slice1] = psolve(work[slice2]) + if not first_pass and old_ijob == 3: + resid_ready = True + + first_pass = False + elif (ijob == 3): + work[slice2] *= sclr2 + work[slice2] += sclr1*matvec(work[slice1]) + if resid_ready and callback is not None: + callback(presid / bnrm2) + resid_ready = False + iter_num = iter_num+1 + + elif (ijob == 4): + if ftflag: + info = -1 + ftflag = False + resid, info = _stoptest(work[slice1], atol) + + # Inner loop tolerance control + if info or presid > ptol: + ptol_max_factor = min(1.0, 1.5 * ptol_max_factor) + else: + # Inner loop tolerance OK, but outer loop not. + ptol_max_factor = max(1e-16, 0.25 * ptol_max_factor) + + if resid != 0: + ptol = presid * min(ptol_max_factor, atol / resid) + else: + ptol = presid * ptol_max_factor + + old_ijob = ijob + ijob = 2 + + if iter_num > maxiter: + info = maxiter + break + + if info >= 0 and not (resid <= atol): + # info isn't set appropriately otherwise + info = maxiter + + return postprocess(x), info + + +@non_reentrant() +def qmr(A, b, x0=None, tol=1e-5, maxiter=None, M1=None, M2=None, callback=None, + atol=None): + """Use Quasi-Minimal Residual iteration to solve ``Ax = b``. + + Parameters + ---------- + A : {sparse matrix, dense matrix, LinearOperator} + The real-valued N-by-N matrix of the linear system. + It is required that the linear operator can produce + ``Ax`` and ``A^T x``. + b : {array, matrix} + Right hand side of the linear system. Has shape (N,) or (N,1). + + Returns + ------- + x : {array, matrix} + The converged solution. + info : integer + Provides convergence information: + 0 : successful exit + >0 : convergence to tolerance not achieved, number of iterations + <0 : illegal input or breakdown + + Other Parameters + ---------------- + x0 : {array, matrix} + Starting guess for the solution. + tol, atol : float, optional + Tolerances for convergence, ``norm(residual) <= max(tol*norm(b), atol)``. + The default for ``atol`` is ``'legacy'``, which emulates + a different legacy behavior. + + .. warning:: + + The default value for `atol` will be changed in a future release. + For future compatibility, specify `atol` explicitly. + maxiter : integer + Maximum number of iterations. Iteration will stop after maxiter + steps even if the specified tolerance has not been achieved. + M1 : {sparse matrix, dense matrix, LinearOperator} + Left preconditioner for A. + M2 : {sparse matrix, dense matrix, LinearOperator} + Right preconditioner for A. Used together with the left + preconditioner M1. The matrix M1*A*M2 should have better + conditioned than A alone. + callback : function + User-supplied function to call after each iteration. It is called + as callback(xk), where xk is the current solution vector. + + See Also + -------- + LinearOperator + + Examples + -------- + >>> from scipy.sparse import csc_matrix + >>> from scipy.sparse.linalg import qmr + >>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float) + >>> b = np.array([2, 4, -1], dtype=float) + >>> x, exitCode = qmr(A, b) + >>> print(exitCode) # 0 indicates successful convergence + 0 + >>> np.allclose(A.dot(x), b) + True + """ + A_ = A + A, M, x, b, postprocess = make_system(A, None, x0, b) + + if M1 is None and M2 is None: + if hasattr(A_,'psolve'): + def left_psolve(b): + return A_.psolve(b,'left') + + def right_psolve(b): + return A_.psolve(b,'right') + + def left_rpsolve(b): + return A_.rpsolve(b,'left') + + def right_rpsolve(b): + return A_.rpsolve(b,'right') + M1 = LinearOperator(A.shape, matvec=left_psolve, rmatvec=left_rpsolve) + M2 = LinearOperator(A.shape, matvec=right_psolve, rmatvec=right_rpsolve) + else: + def id(b): + return b + M1 = LinearOperator(A.shape, matvec=id, rmatvec=id) + M2 = LinearOperator(A.shape, matvec=id, rmatvec=id) + + n = len(b) + if maxiter is None: + maxiter = n*10 + + ltr = _type_conv[x.dtype.char] + revcom = getattr(_iterative, ltr + 'qmrrevcom') + + get_residual = lambda: np.linalg.norm(A.matvec(x) - b) + atol = _get_atol(tol, atol, np.linalg.norm(b), get_residual, 'qmr') + if atol == 'exit': + return postprocess(x), 0 + + resid = atol + ndx1 = 1 + ndx2 = -1 + # Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1 + work = _aligned_zeros(11*n,x.dtype) + ijob = 1 + info = 0 + ftflag = True + iter_ = maxiter + while True: + olditer = iter_ + x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \ + revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob) + if callback is not None and iter_ > olditer: + callback(x) + slice1 = slice(ndx1-1, ndx1-1+n) + slice2 = slice(ndx2-1, ndx2-1+n) + if (ijob == -1): + if callback is not None: + callback(x) + break + elif (ijob == 1): + work[slice2] *= sclr2 + work[slice2] += sclr1*A.matvec(work[slice1]) + elif (ijob == 2): + work[slice2] *= sclr2 + work[slice2] += sclr1*A.rmatvec(work[slice1]) + elif (ijob == 3): + work[slice1] = M1.matvec(work[slice2]) + elif (ijob == 4): + work[slice1] = M2.matvec(work[slice2]) + elif (ijob == 5): + work[slice1] = M1.rmatvec(work[slice2]) + elif (ijob == 6): + work[slice1] = M2.rmatvec(work[slice2]) + elif (ijob == 7): + work[slice2] *= sclr2 + work[slice2] += sclr1*A.matvec(x) + elif (ijob == 8): + if ftflag: + info = -1 + ftflag = False + resid, info = _stoptest(work[slice1], atol) + ijob = 2 + + if info > 0 and iter_ == maxiter and not (resid <= atol): + # info isn't set appropriately otherwise + info = iter_ + + return postprocess(x), info diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/iterative.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/iterative.pyc new file mode 100644 index 0000000..09e69ec Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/iterative.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/lgmres.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/lgmres.py new file mode 100644 index 0000000..5afab62 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/lgmres.py @@ -0,0 +1,232 @@ +# Copyright (C) 2009, Pauli Virtanen <pav@iki.fi> +# Distributed under the same license as Scipy. + +from __future__ import division, print_function, absolute_import + +import warnings +import numpy as np +from numpy.linalg import LinAlgError +from scipy._lib.six import xrange +from scipy.linalg import get_blas_funcs, get_lapack_funcs +from .utils import make_system + +from ._gcrotmk import _fgmres + +__all__ = ['lgmres'] + + +def lgmres(A, b, x0=None, tol=1e-5, maxiter=1000, M=None, callback=None, + inner_m=30, outer_k=3, outer_v=None, store_outer_Av=True, + prepend_outer_v=False, atol=None): + """ + Solve a matrix equation using the LGMRES algorithm. + + The LGMRES algorithm [1]_ [2]_ is designed to avoid some problems + in the convergence in restarted GMRES, and often converges in fewer + iterations. + + Parameters + ---------- + A : {sparse matrix, dense matrix, LinearOperator} + The real or complex N-by-N matrix of the linear system. + b : {array, matrix} + Right hand side of the linear system. Has shape (N,) or (N,1). + x0 : {array, matrix} + Starting guess for the solution. + tol, atol : float, optional + Tolerances for convergence, ``norm(residual) <= max(tol*norm(b), atol)``. + The default for ``atol`` is `tol`. + + .. warning:: + + The default value for `atol` will be changed in a future release. + For future compatibility, specify `atol` explicitly. + maxiter : int, optional + Maximum number of iterations. Iteration will stop after maxiter + steps even if the specified tolerance has not been achieved. + M : {sparse matrix, dense matrix, LinearOperator}, optional + Preconditioner for A. The preconditioner should approximate the + inverse of A. Effective preconditioning dramatically improves the + rate of convergence, which implies that fewer iterations are needed + to reach a given error tolerance. + callback : function, optional + User-supplied function to call after each iteration. It is called + as callback(xk), where xk is the current solution vector. + inner_m : int, optional + Number of inner GMRES iterations per each outer iteration. + outer_k : int, optional + Number of vectors to carry between inner GMRES iterations. + According to [1]_, good values are in the range of 1...3. + However, note that if you want to use the additional vectors to + accelerate solving multiple similar problems, larger values may + be beneficial. + outer_v : list of tuples, optional + List containing tuples ``(v, Av)`` of vectors and corresponding + matrix-vector products, used to augment the Krylov subspace, and + carried between inner GMRES iterations. The element ``Av`` can + be `None` if the matrix-vector product should be re-evaluated. + This parameter is modified in-place by `lgmres`, and can be used + to pass "guess" vectors in and out of the algorithm when solving + similar problems. + store_outer_Av : bool, optional + Whether LGMRES should store also A*v in addition to vectors `v` + in the `outer_v` list. Default is True. + prepend_outer_v : bool, optional + Whether to put outer_v augmentation vectors before Krylov iterates. + In standard LGMRES, prepend_outer_v=False. + + Returns + ------- + x : array or matrix + The converged solution. + info : int + Provides convergence information: + + - 0 : successful exit + - >0 : convergence to tolerance not achieved, number of iterations + - <0 : illegal input or breakdown + + Notes + ----- + The LGMRES algorithm [1]_ [2]_ is designed to avoid the + slowing of convergence in restarted GMRES, due to alternating + residual vectors. Typically, it often outperforms GMRES(m) of + comparable memory requirements by some measure, or at least is not + much worse. + + Another advantage in this algorithm is that you can supply it with + 'guess' vectors in the `outer_v` argument that augment the Krylov + subspace. If the solution lies close to the span of these vectors, + the algorithm converges faster. This can be useful if several very + similar matrices need to be inverted one after another, such as in + Newton-Krylov iteration where the Jacobian matrix often changes + little in the nonlinear steps. + + References + ---------- + .. [1] A.H. Baker and E.R. Jessup and T. Manteuffel, "A Technique for + Accelerating the Convergence of Restarted GMRES", SIAM J. Matrix + Anal. Appl. 26, 962 (2005). + .. [2] A.H. Baker, "On Improving the Performance of the Linear Solver + restarted GMRES", PhD thesis, University of Colorado (2003). + + Examples + -------- + >>> from scipy.sparse import csc_matrix + >>> from scipy.sparse.linalg import lgmres + >>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float) + >>> b = np.array([2, 4, -1], dtype=float) + >>> x, exitCode = lgmres(A, b) + >>> print(exitCode) # 0 indicates successful convergence + 0 + >>> np.allclose(A.dot(x), b) + True + """ + A,M,x,b,postprocess = make_system(A,M,x0,b) + + if not np.isfinite(b).all(): + raise ValueError("RHS must contain only finite numbers") + + if atol is None: + warnings.warn("scipy.sparse.linalg.lgmres called without specifying `atol`. " + "The default value will change in the future. To preserve " + "current behavior, set ``atol=tol``.", + category=DeprecationWarning, stacklevel=2) + atol = tol + + matvec = A.matvec + psolve = M.matvec + + if outer_v is None: + outer_v = [] + + axpy, dot, scal = None, None, None + nrm2 = get_blas_funcs('nrm2', [b]) + + b_norm = nrm2(b) + ptol_max_factor = 1.0 + + for k_outer in xrange(maxiter): + r_outer = matvec(x) - b + + # -- callback + if callback is not None: + callback(x) + + # -- determine input type routines + if axpy is None: + if np.iscomplexobj(r_outer) and not np.iscomplexobj(x): + x = x.astype(r_outer.dtype) + axpy, dot, scal, nrm2 = get_blas_funcs(['axpy', 'dot', 'scal', 'nrm2'], + (x, r_outer)) + + # -- check stopping condition + r_norm = nrm2(r_outer) + if r_norm <= max(atol, tol * b_norm): + break + + # -- inner LGMRES iteration + v0 = -psolve(r_outer) + inner_res_0 = nrm2(v0) + + if inner_res_0 == 0: + rnorm = nrm2(r_outer) + raise RuntimeError("Preconditioner returned a zero vector; " + "|v| ~ %.1g, |M v| = 0" % rnorm) + + v0 = scal(1.0/inner_res_0, v0) + + ptol = min(ptol_max_factor, max(atol, tol*b_norm)/r_norm) + + try: + Q, R, B, vs, zs, y, pres = _fgmres(matvec, + v0, + inner_m, + lpsolve=psolve, + atol=ptol, + outer_v=outer_v, + prepend_outer_v=prepend_outer_v) + y *= inner_res_0 + if not np.isfinite(y).all(): + # Overflow etc. in computation. There's no way to + # recover from this, so we have to bail out. + raise LinAlgError() + except LinAlgError: + # Floating point over/underflow, non-finite result from + # matmul etc. -- report failure. + return postprocess(x), k_outer + 1 + + # Inner loop tolerance control + if pres > ptol: + ptol_max_factor = min(1.0, 1.5 * ptol_max_factor) + else: + ptol_max_factor = max(1e-16, 0.25 * ptol_max_factor) + + # -- GMRES terminated: eval solution + dx = zs[0]*y[0] + for w, yc in zip(zs[1:], y[1:]): + dx = axpy(w, dx, dx.shape[0], yc) # dx += w*yc + + # -- Store LGMRES augmentation vectors + nx = nrm2(dx) + if nx > 0: + if store_outer_Av: + q = Q.dot(R.dot(y)) + ax = vs[0]*q[0] + for v, qc in zip(vs[1:], q[1:]): + ax = axpy(v, ax, ax.shape[0], qc) + outer_v.append((dx/nx, ax/nx)) + else: + outer_v.append((dx/nx, None)) + + # -- Retain only a finite number of augmentation vectors + while len(outer_v) > outer_k: + del outer_v[0] + + # -- Apply step + x += dx + else: + # didn't converge ... + return postprocess(x), maxiter + + return postprocess(x), 0 diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/lgmres.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/lgmres.pyc new file mode 100644 index 0000000..9bbae22 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/lgmres.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/lsmr.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/lsmr.py new file mode 100644 index 0000000..a77fe25 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/lsmr.py @@ -0,0 +1,470 @@ +""" +Copyright (C) 2010 David Fong and Michael Saunders + +LSMR uses an iterative method. + +07 Jun 2010: Documentation updated +03 Jun 2010: First release version in Python + +David Chin-lung Fong clfong@stanford.edu +Institute for Computational and Mathematical Engineering +Stanford University + +Michael Saunders saunders@stanford.edu +Systems Optimization Laboratory +Dept of MS&E, Stanford University. + +""" + +from __future__ import division, print_function, absolute_import + +__all__ = ['lsmr'] + +from numpy import zeros, infty, atleast_1d +from numpy.linalg import norm +from math import sqrt +from scipy.sparse.linalg.interface import aslinearoperator + +from .lsqr import _sym_ortho + + +def lsmr(A, b, damp=0.0, atol=1e-6, btol=1e-6, conlim=1e8, + maxiter=None, show=False, x0=None): + """Iterative solver for least-squares problems. + + lsmr solves the system of linear equations ``Ax = b``. If the system + is inconsistent, it solves the least-squares problem ``min ||b - Ax||_2``. + A is a rectangular matrix of dimension m-by-n, where all cases are + allowed: m = n, m > n, or m < n. B is a vector of length m. + The matrix A may be dense or sparse (usually sparse). + + Parameters + ---------- + A : {matrix, sparse matrix, ndarray, LinearOperator} + Matrix A in the linear system. + b : array_like, shape (m,) + Vector b in the linear system. + damp : float + Damping factor for regularized least-squares. `lsmr` solves + the regularized least-squares problem:: + + min ||(b) - ( A )x|| + ||(0) (damp*I) ||_2 + + where damp is a scalar. If damp is None or 0, the system + is solved without regularization. + atol, btol : float, optional + Stopping tolerances. `lsmr` continues iterations until a + certain backward error estimate is smaller than some quantity + depending on atol and btol. Let ``r = b - Ax`` be the + residual vector for the current approximate solution ``x``. + If ``Ax = b`` seems to be consistent, ``lsmr`` terminates + when ``norm(r) <= atol * norm(A) * norm(x) + btol * norm(b)``. + Otherwise, lsmr terminates when ``norm(A^{T} r) <= + atol * norm(A) * norm(r)``. If both tolerances are 1.0e-6 (say), + the final ``norm(r)`` should be accurate to about 6 + digits. (The final x will usually have fewer correct digits, + depending on ``cond(A)`` and the size of LAMBDA.) If `atol` + or `btol` is None, a default value of 1.0e-6 will be used. + Ideally, they should be estimates of the relative error in the + entries of A and B respectively. For example, if the entries + of `A` have 7 correct digits, set atol = 1e-7. This prevents + the algorithm from doing unnecessary work beyond the + uncertainty of the input data. + conlim : float, optional + `lsmr` terminates if an estimate of ``cond(A)`` exceeds + `conlim`. For compatible systems ``Ax = b``, conlim could be + as large as 1.0e+12 (say). For least-squares problems, + `conlim` should be less than 1.0e+8. If `conlim` is None, the + default value is 1e+8. Maximum precision can be obtained by + setting ``atol = btol = conlim = 0``, but the number of + iterations may then be excessive. + maxiter : int, optional + `lsmr` terminates if the number of iterations reaches + `maxiter`. The default is ``maxiter = min(m, n)``. For + ill-conditioned systems, a larger value of `maxiter` may be + needed. + show : bool, optional + Print iterations logs if ``show=True``. + x0 : array_like, shape (n,), optional + Initial guess of x, if None zeros are used. + + .. versionadded:: 1.0.0 + Returns + ------- + x : ndarray of float + Least-square solution returned. + istop : int + istop gives the reason for stopping:: + + istop = 0 means x=0 is a solution. If x0 was given, then x=x0 is a + solution. + = 1 means x is an approximate solution to A*x = B, + according to atol and btol. + = 2 means x approximately solves the least-squares problem + according to atol. + = 3 means COND(A) seems to be greater than CONLIM. + = 4 is the same as 1 with atol = btol = eps (machine + precision) + = 5 is the same as 2 with atol = eps. + = 6 is the same as 3 with CONLIM = 1/eps. + = 7 means ITN reached maxiter before the other stopping + conditions were satisfied. + + itn : int + Number of iterations used. + normr : float + ``norm(b-Ax)`` + normar : float + ``norm(A^T (b - Ax))`` + norma : float + ``norm(A)`` + conda : float + Condition number of A. + normx : float + ``norm(x)`` + + Notes + ----- + + .. versionadded:: 0.11.0 + + References + ---------- + .. [1] D. C.-L. Fong and M. A. Saunders, + "LSMR: An iterative algorithm for sparse least-squares problems", + SIAM J. Sci. Comput., vol. 33, pp. 2950-2971, 2011. + https://arxiv.org/abs/1006.0758 + .. [2] LSMR Software, https://web.stanford.edu/group/SOL/software/lsmr/ + + Examples + -------- + >>> from scipy.sparse import csc_matrix + >>> from scipy.sparse.linalg import lsmr + >>> A = csc_matrix([[1., 0.], [1., 1.], [0., 1.]], dtype=float) + + The first example has the trivial solution `[0, 0]` + + >>> b = np.array([0., 0., 0.], dtype=float) + >>> x, istop, itn, normr = lsmr(A, b)[:4] + >>> istop + 0 + >>> x + array([ 0., 0.]) + + The stopping code `istop=0` returned indicates that a vector of zeros was + found as a solution. The returned solution `x` indeed contains `[0., 0.]`. + The next example has a non-trivial solution: + + >>> b = np.array([1., 0., -1.], dtype=float) + >>> x, istop, itn, normr = lsmr(A, b)[:4] + >>> istop + 1 + >>> x + array([ 1., -1.]) + >>> itn + 1 + >>> normr + 4.440892098500627e-16 + + As indicated by `istop=1`, `lsmr` found a solution obeying the tolerance + limits. The given solution `[1., -1.]` obviously solves the equation. The + remaining return values include information about the number of iterations + (`itn=1`) and the remaining difference of left and right side of the solved + equation. + The final example demonstrates the behavior in the case where there is no + solution for the equation: + + >>> b = np.array([1., 0.01, -1.], dtype=float) + >>> x, istop, itn, normr = lsmr(A, b)[:4] + >>> istop + 2 + >>> x + array([ 1.00333333, -0.99666667]) + >>> A.dot(x)-b + array([ 0.00333333, -0.00333333, 0.00333333]) + >>> normr + 0.005773502691896255 + + `istop` indicates that the system is inconsistent and thus `x` is rather an + approximate solution to the corresponding least-squares problem. `normr` + contains the minimal distance that was found. + """ + + A = aslinearoperator(A) + b = atleast_1d(b) + if b.ndim > 1: + b = b.squeeze() + + msg = ('The exact solution is x = 0, or x = x0, if x0 was given ', + 'Ax - b is small enough, given atol, btol ', + 'The least-squares solution is good enough, given atol ', + 'The estimate of cond(Abar) has exceeded conlim ', + 'Ax - b is small enough for this machine ', + 'The least-squares solution is good enough for this machine', + 'Cond(Abar) seems to be too large for this machine ', + 'The iteration limit has been reached ') + + hdg1 = ' itn x(1) norm r norm A''r' + hdg2 = ' compatible LS norm A cond A' + pfreq = 20 # print frequency (for repeating the heading) + pcount = 0 # print counter + + m, n = A.shape + + # stores the num of singular values + minDim = min([m, n]) + + if maxiter is None: + maxiter = minDim + + if show: + print(' ') + print('LSMR Least-squares solution of Ax = b\n') + print('The matrix A has %8g rows and %8g cols' % (m, n)) + print('damp = %20.14e\n' % (damp)) + print('atol = %8.2e conlim = %8.2e\n' % (atol, conlim)) + print('btol = %8.2e maxiter = %8g\n' % (btol, maxiter)) + + u = b + normb = norm(b) + if x0 is None: + x = zeros(n) + beta = normb.copy() + else: + x = atleast_1d(x0) + u = u - A.matvec(x) + beta = norm(u) + + if beta > 0: + u = (1 / beta) * u + v = A.rmatvec(u) + alpha = norm(v) + else: + v = zeros(n) + alpha = 0 + + if alpha > 0: + v = (1 / alpha) * v + + # Initialize variables for 1st iteration. + + itn = 0 + zetabar = alpha * beta + alphabar = alpha + rho = 1 + rhobar = 1 + cbar = 1 + sbar = 0 + + h = v.copy() + hbar = zeros(n) + + # Initialize variables for estimation of ||r||. + + betadd = beta + betad = 0 + rhodold = 1 + tautildeold = 0 + thetatilde = 0 + zeta = 0 + d = 0 + + # Initialize variables for estimation of ||A|| and cond(A) + + normA2 = alpha * alpha + maxrbar = 0 + minrbar = 1e+100 + normA = sqrt(normA2) + condA = 1 + normx = 0 + + # Items for use in stopping rules, normb set earlier + istop = 0 + ctol = 0 + if conlim > 0: + ctol = 1 / conlim + normr = beta + + # Reverse the order here from the original matlab code because + # there was an error on return when arnorm==0 + normar = alpha * beta + if normar == 0: + if show: + print(msg[0]) + return x, istop, itn, normr, normar, normA, condA, normx + + if show: + print(' ') + print(hdg1, hdg2) + test1 = 1 + test2 = alpha / beta + str1 = '%6g %12.5e' % (itn, x[0]) + str2 = ' %10.3e %10.3e' % (normr, normar) + str3 = ' %8.1e %8.1e' % (test1, test2) + print(''.join([str1, str2, str3])) + + # Main iteration loop. + while itn < maxiter: + itn = itn + 1 + + # Perform the next step of the bidiagonalization to obtain the + # next beta, u, alpha, v. These satisfy the relations + # beta*u = a*v - alpha*u, + # alpha*v = A'*u - beta*v. + + u = A.matvec(v) - alpha * u + beta = norm(u) + + if beta > 0: + u = (1 / beta) * u + v = A.rmatvec(u) - beta * v + alpha = norm(v) + if alpha > 0: + v = (1 / alpha) * v + + # At this point, beta = beta_{k+1}, alpha = alpha_{k+1}. + + # Construct rotation Qhat_{k,2k+1}. + + chat, shat, alphahat = _sym_ortho(alphabar, damp) + + # Use a plane rotation (Q_i) to turn B_i to R_i + + rhoold = rho + c, s, rho = _sym_ortho(alphahat, beta) + thetanew = s*alpha + alphabar = c*alpha + + # Use a plane rotation (Qbar_i) to turn R_i^T to R_i^bar + + rhobarold = rhobar + zetaold = zeta + thetabar = sbar * rho + rhotemp = cbar * rho + cbar, sbar, rhobar = _sym_ortho(cbar * rho, thetanew) + zeta = cbar * zetabar + zetabar = - sbar * zetabar + + # Update h, h_hat, x. + + hbar = h - (thetabar * rho / (rhoold * rhobarold)) * hbar + x = x + (zeta / (rho * rhobar)) * hbar + h = v - (thetanew / rho) * h + + # Estimate of ||r||. + + # Apply rotation Qhat_{k,2k+1}. + betaacute = chat * betadd + betacheck = -shat * betadd + + # Apply rotation Q_{k,k+1}. + betahat = c * betaacute + betadd = -s * betaacute + + # Apply rotation Qtilde_{k-1}. + # betad = betad_{k-1} here. + + thetatildeold = thetatilde + ctildeold, stildeold, rhotildeold = _sym_ortho(rhodold, thetabar) + thetatilde = stildeold * rhobar + rhodold = ctildeold * rhobar + betad = - stildeold * betad + ctildeold * betahat + + # betad = betad_k here. + # rhodold = rhod_k here. + + tautildeold = (zetaold - thetatildeold * tautildeold) / rhotildeold + taud = (zeta - thetatilde * tautildeold) / rhodold + d = d + betacheck * betacheck + normr = sqrt(d + (betad - taud)**2 + betadd * betadd) + + # Estimate ||A||. + normA2 = normA2 + beta * beta + normA = sqrt(normA2) + normA2 = normA2 + alpha * alpha + + # Estimate cond(A). + maxrbar = max(maxrbar, rhobarold) + if itn > 1: + minrbar = min(minrbar, rhobarold) + condA = max(maxrbar, rhotemp) / min(minrbar, rhotemp) + + # Test for convergence. + + # Compute norms for convergence testing. + normar = abs(zetabar) + normx = norm(x) + + # Now use these norms to estimate certain other quantities, + # some of which will be small near a solution. + + test1 = normr / normb + if (normA * normr) != 0: + test2 = normar / (normA * normr) + else: + test2 = infty + test3 = 1 / condA + t1 = test1 / (1 + normA * normx / normb) + rtol = btol + atol * normA * normx / normb + + # The following tests guard against extremely small values of + # atol, btol or ctol. (The user may have set any or all of + # the parameters atol, btol, conlim to 0.) + # The effect is equivalent to the normAl tests using + # atol = eps, btol = eps, conlim = 1/eps. + + if itn >= maxiter: + istop = 7 + if 1 + test3 <= 1: + istop = 6 + if 1 + test2 <= 1: + istop = 5 + if 1 + t1 <= 1: + istop = 4 + + # Allow for tolerances set by the user. + + if test3 <= ctol: + istop = 3 + if test2 <= atol: + istop = 2 + if test1 <= rtol: + istop = 1 + + # See if it is time to print something. + + if show: + if (n <= 40) or (itn <= 10) or (itn >= maxiter - 10) or \ + (itn % 10 == 0) or (test3 <= 1.1 * ctol) or \ + (test2 <= 1.1 * atol) or (test1 <= 1.1 * rtol) or \ + (istop != 0): + + if pcount >= pfreq: + pcount = 0 + print(' ') + print(hdg1, hdg2) + pcount = pcount + 1 + str1 = '%6g %12.5e' % (itn, x[0]) + str2 = ' %10.3e %10.3e' % (normr, normar) + str3 = ' %8.1e %8.1e' % (test1, test2) + str4 = ' %8.1e %8.1e' % (normA, condA) + print(''.join([str1, str2, str3, str4])) + + if istop > 0: + break + + # Print the stopping condition. + + if show: + print(' ') + print('LSMR finished') + print(msg[istop]) + print('istop =%8g normr =%8.1e' % (istop, normr)) + print(' normA =%8.1e normAr =%8.1e' % (normA, normar)) + print('itn =%8g condA =%8.1e' % (itn, condA)) + print(' normx =%8.1e' % (normx)) + print(str1, str2) + print(str3, str4) + + return x, istop, itn, normr, normar, normA, condA, normx diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/lsmr.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/lsmr.pyc new file mode 100644 index 0000000..1ca69be Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/lsmr.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/lsqr.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/lsqr.py new file mode 100644 index 0000000..8016345 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/lsqr.py @@ -0,0 +1,568 @@ +"""Sparse Equations and Least Squares. + +The original Fortran code was written by C. C. Paige and M. A. Saunders as +described in + +C. C. Paige and M. A. Saunders, LSQR: An algorithm for sparse linear +equations and sparse least squares, TOMS 8(1), 43--71 (1982). + +C. C. Paige and M. A. Saunders, Algorithm 583; LSQR: Sparse linear +equations and least-squares problems, TOMS 8(2), 195--209 (1982). + +It is licensed under the following BSD license: + +Copyright (c) 2006, Systems Optimization Laboratory +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of Stanford University nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +The Fortran code was translated to Python for use in CVXOPT by Jeffery +Kline with contributions by Mridul Aanjaneya and Bob Myhill. + +Adapted for SciPy by Stefan van der Walt. + +""" + +from __future__ import division, print_function, absolute_import + +__all__ = ['lsqr'] + +import numpy as np +from math import sqrt +from scipy.sparse.linalg.interface import aslinearoperator + +eps = np.finfo(np.float64).eps + + +def _sym_ortho(a, b): + """ + Stable implementation of Givens rotation. + + Notes + ----- + The routine 'SymOrtho' was added for numerical stability. This is + recommended by S.-C. Choi in [1]_. It removes the unpleasant potential of + ``1/eps`` in some important places (see, for example text following + "Compute the next plane rotation Qk" in minres.py). + + References + ---------- + .. [1] S.-C. Choi, "Iterative Methods for Singular Linear Equations + and Least-Squares Problems", Dissertation, + http://www.stanford.edu/group/SOL/dissertations/sou-cheng-choi-thesis.pdf + + """ + if b == 0: + return np.sign(a), 0, abs(a) + elif a == 0: + return 0, np.sign(b), abs(b) + elif abs(b) > abs(a): + tau = a / b + s = np.sign(b) / sqrt(1 + tau * tau) + c = s * tau + r = b / s + else: + tau = b / a + c = np.sign(a) / sqrt(1+tau*tau) + s = c * tau + r = a / c + return c, s, r + + +def lsqr(A, b, damp=0.0, atol=1e-8, btol=1e-8, conlim=1e8, + iter_lim=None, show=False, calc_var=False, x0=None): + """Find the least-squares solution to a large, sparse, linear system + of equations. + + The function solves ``Ax = b`` or ``min ||b - Ax||^2`` or + ``min ||Ax - b||^2 + d^2 ||x||^2``. + + The matrix A may be square or rectangular (over-determined or + under-determined), and may have any rank. + + :: + + 1. Unsymmetric equations -- solve A*x = b + + 2. Linear least squares -- solve A*x = b + in the least-squares sense + + 3. Damped least squares -- solve ( A )*x = ( b ) + ( damp*I ) ( 0 ) + in the least-squares sense + + Parameters + ---------- + A : {sparse matrix, ndarray, LinearOperator} + Representation of an m-by-n matrix. It is required that + the linear operator can produce ``Ax`` and ``A^T x``. + b : array_like, shape (m,) + Right-hand side vector ``b``. + damp : float + Damping coefficient. + atol, btol : float, optional + Stopping tolerances. If both are 1.0e-9 (say), the final + residual norm should be accurate to about 9 digits. (The + final x will usually have fewer correct digits, depending on + cond(A) and the size of damp.) + conlim : float, optional + Another stopping tolerance. lsqr terminates if an estimate of + ``cond(A)`` exceeds `conlim`. For compatible systems ``Ax = + b``, `conlim` could be as large as 1.0e+12 (say). For + least-squares problems, conlim should be less than 1.0e+8. + Maximum precision can be obtained by setting ``atol = btol = + conlim = zero``, but the number of iterations may then be + excessive. + iter_lim : int, optional + Explicit limitation on number of iterations (for safety). + show : bool, optional + Display an iteration log. + calc_var : bool, optional + Whether to estimate diagonals of ``(A'A + damp^2*I)^{-1}``. + x0 : array_like, shape (n,), optional + Initial guess of x, if None zeros are used. + + .. versionadded:: 1.0.0 + + Returns + ------- + x : ndarray of float + The final solution. + istop : int + Gives the reason for termination. + 1 means x is an approximate solution to Ax = b. + 2 means x approximately solves the least-squares problem. + itn : int + Iteration number upon termination. + r1norm : float + ``norm(r)``, where ``r = b - Ax``. + r2norm : float + ``sqrt( norm(r)^2 + damp^2 * norm(x)^2 )``. Equal to `r1norm` if + ``damp == 0``. + anorm : float + Estimate of Frobenius norm of ``Abar = [[A]; [damp*I]]``. + acond : float + Estimate of ``cond(Abar)``. + arnorm : float + Estimate of ``norm(A'*r - damp^2*x)``. + xnorm : float + ``norm(x)`` + var : ndarray of float + If ``calc_var`` is True, estimates all diagonals of + ``(A'A)^{-1}`` (if ``damp == 0``) or more generally ``(A'A + + damp^2*I)^{-1}``. This is well defined if A has full column + rank or ``damp > 0``. (Not sure what var means if ``rank(A) + < n`` and ``damp = 0.``) + + Notes + ----- + LSQR uses an iterative method to approximate the solution. The + number of iterations required to reach a certain accuracy depends + strongly on the scaling of the problem. Poor scaling of the rows + or columns of A should therefore be avoided where possible. + + For example, in problem 1 the solution is unaltered by + row-scaling. If a row of A is very small or large compared to + the other rows of A, the corresponding row of ( A b ) should be + scaled up or down. + + In problems 1 and 2, the solution x is easily recovered + following column-scaling. Unless better information is known, + the nonzero columns of A should be scaled so that they all have + the same Euclidean norm (e.g., 1.0). + + In problem 3, there is no freedom to re-scale if damp is + nonzero. However, the value of damp should be assigned only + after attention has been paid to the scaling of A. + + The parameter damp is intended to help regularize + ill-conditioned systems, by preventing the true solution from + being very large. Another aid to regularization is provided by + the parameter acond, which may be used to terminate iterations + before the computed solution becomes very large. + + If some initial estimate ``x0`` is known and if ``damp == 0``, + one could proceed as follows: + + 1. Compute a residual vector ``r0 = b - A*x0``. + 2. Use LSQR to solve the system ``A*dx = r0``. + 3. Add the correction dx to obtain a final solution ``x = x0 + dx``. + + This requires that ``x0`` be available before and after the call + to LSQR. To judge the benefits, suppose LSQR takes k1 iterations + to solve A*x = b and k2 iterations to solve A*dx = r0. + If x0 is "good", norm(r0) will be smaller than norm(b). + If the same stopping tolerances atol and btol are used for each + system, k1 and k2 will be similar, but the final solution x0 + dx + should be more accurate. The only way to reduce the total work + is to use a larger stopping tolerance for the second system. + If some value btol is suitable for A*x = b, the larger value + btol*norm(b)/norm(r0) should be suitable for A*dx = r0. + + Preconditioning is another way to reduce the number of iterations. + If it is possible to solve a related system ``M*x = b`` + efficiently, where M approximates A in some helpful way (e.g. M - + A has low rank or its elements are small relative to those of A), + LSQR may converge more rapidly on the system ``A*M(inverse)*z = + b``, after which x can be recovered by solving M*x = z. + + If A is symmetric, LSQR should not be used! + + Alternatives are the symmetric conjugate-gradient method (cg) + and/or SYMMLQ. SYMMLQ is an implementation of symmetric cg that + applies to any symmetric A and will converge more rapidly than + LSQR. If A is positive definite, there are other implementations + of symmetric cg that require slightly less work per iteration than + SYMMLQ (but will take the same number of iterations). + + References + ---------- + .. [1] C. C. Paige and M. A. Saunders (1982a). + "LSQR: An algorithm for sparse linear equations and + sparse least squares", ACM TOMS 8(1), 43-71. + .. [2] C. C. Paige and M. A. Saunders (1982b). + "Algorithm 583. LSQR: Sparse linear equations and least + squares problems", ACM TOMS 8(2), 195-209. + .. [3] M. A. Saunders (1995). "Solution of sparse rectangular + systems using LSQR and CRAIG", BIT 35, 588-604. + + Examples + -------- + >>> from scipy.sparse import csc_matrix + >>> from scipy.sparse.linalg import lsqr + >>> A = csc_matrix([[1., 0.], [1., 1.], [0., 1.]], dtype=float) + + The first example has the trivial solution `[0, 0]` + + >>> b = np.array([0., 0., 0.], dtype=float) + >>> x, istop, itn, normr = lsqr(A, b)[:4] + The exact solution is x = 0 + >>> istop + 0 + >>> x + array([ 0., 0.]) + + The stopping code `istop=0` returned indicates that a vector of zeros was + found as a solution. The returned solution `x` indeed contains `[0., 0.]`. + The next example has a non-trivial solution: + + >>> b = np.array([1., 0., -1.], dtype=float) + >>> x, istop, itn, r1norm = lsqr(A, b)[:4] + >>> istop + 1 + >>> x + array([ 1., -1.]) + >>> itn + 1 + >>> r1norm + 4.440892098500627e-16 + + As indicated by `istop=1`, `lsqr` found a solution obeying the tolerance + limits. The given solution `[1., -1.]` obviously solves the equation. The + remaining return values include information about the number of iterations + (`itn=1`) and the remaining difference of left and right side of the solved + equation. + The final example demonstrates the behavior in the case where there is no + solution for the equation: + + >>> b = np.array([1., 0.01, -1.], dtype=float) + >>> x, istop, itn, r1norm = lsqr(A, b)[:4] + >>> istop + 2 + >>> x + array([ 1.00333333, -0.99666667]) + >>> A.dot(x)-b + array([ 0.00333333, -0.00333333, 0.00333333]) + >>> r1norm + 0.005773502691896255 + + `istop` indicates that the system is inconsistent and thus `x` is rather an + approximate solution to the corresponding least-squares problem. `r1norm` + contains the norm of the minimal residual that was found. + """ + A = aslinearoperator(A) + b = np.atleast_1d(b) + if b.ndim > 1: + b = b.squeeze() + + m, n = A.shape + if iter_lim is None: + iter_lim = 2 * n + var = np.zeros(n) + + msg = ('The exact solution is x = 0 ', + 'Ax - b is small enough, given atol, btol ', + 'The least-squares solution is good enough, given atol ', + 'The estimate of cond(Abar) has exceeded conlim ', + 'Ax - b is small enough for this machine ', + 'The least-squares solution is good enough for this machine', + 'Cond(Abar) seems to be too large for this machine ', + 'The iteration limit has been reached ') + + if show: + print(' ') + print('LSQR Least-squares solution of Ax = b') + str1 = 'The matrix A has %8g rows and %8g cols' % (m, n) + str2 = 'damp = %20.14e calc_var = %8g' % (damp, calc_var) + str3 = 'atol = %8.2e conlim = %8.2e' % (atol, conlim) + str4 = 'btol = %8.2e iter_lim = %8g' % (btol, iter_lim) + print(str1) + print(str2) + print(str3) + print(str4) + + itn = 0 + istop = 0 + ctol = 0 + if conlim > 0: + ctol = 1/conlim + anorm = 0 + acond = 0 + dampsq = damp**2 + ddnorm = 0 + res2 = 0 + xnorm = 0 + xxnorm = 0 + z = 0 + cs2 = -1 + sn2 = 0 + + """ + Set up the first vectors u and v for the bidiagonalization. + These satisfy beta*u = b - A*x, alfa*v = A'*u. + """ + u = b + bnorm = np.linalg.norm(b) + if x0 is None: + x = np.zeros(n) + beta = bnorm.copy() + else: + x = np.asarray(x0) + u = u - A.matvec(x) + beta = np.linalg.norm(u) + + if beta > 0: + u = (1/beta) * u + v = A.rmatvec(u) + alfa = np.linalg.norm(v) + else: + v = x.copy() + alfa = 0 + + if alfa > 0: + v = (1/alfa) * v + w = v.copy() + + rhobar = alfa + phibar = beta + rnorm = beta + r1norm = rnorm + r2norm = rnorm + + # Reverse the order here from the original matlab code because + # there was an error on return when arnorm==0 + arnorm = alfa * beta + if arnorm == 0: + print(msg[0]) + return x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var + + head1 = ' Itn x[0] r1norm r2norm ' + head2 = ' Compatible LS Norm A Cond A' + + if show: + print(' ') + print(head1, head2) + test1 = 1 + test2 = alfa / beta + str1 = '%6g %12.5e' % (itn, x[0]) + str2 = ' %10.3e %10.3e' % (r1norm, r2norm) + str3 = ' %8.1e %8.1e' % (test1, test2) + print(str1, str2, str3) + + # Main iteration loop. + while itn < iter_lim: + itn = itn + 1 + """ + % Perform the next step of the bidiagonalization to obtain the + % next beta, u, alfa, v. These satisfy the relations + % beta*u = a*v - alfa*u, + % alfa*v = A'*u - beta*v. + """ + u = A.matvec(v) - alfa * u + beta = np.linalg.norm(u) + + if beta > 0: + u = (1/beta) * u + anorm = sqrt(anorm**2 + alfa**2 + beta**2 + damp**2) + v = A.rmatvec(u) - beta * v + alfa = np.linalg.norm(v) + if alfa > 0: + v = (1 / alfa) * v + + # Use a plane rotation to eliminate the damping parameter. + # This alters the diagonal (rhobar) of the lower-bidiagonal matrix. + rhobar1 = sqrt(rhobar**2 + damp**2) + cs1 = rhobar / rhobar1 + sn1 = damp / rhobar1 + psi = sn1 * phibar + phibar = cs1 * phibar + + # Use a plane rotation to eliminate the subdiagonal element (beta) + # of the lower-bidiagonal matrix, giving an upper-bidiagonal matrix. + cs, sn, rho = _sym_ortho(rhobar1, beta) + + theta = sn * alfa + rhobar = -cs * alfa + phi = cs * phibar + phibar = sn * phibar + tau = sn * phi + + # Update x and w. + t1 = phi / rho + t2 = -theta / rho + dk = (1 / rho) * w + + x = x + t1 * w + w = v + t2 * w + ddnorm = ddnorm + np.linalg.norm(dk)**2 + + if calc_var: + var = var + dk**2 + + # Use a plane rotation on the right to eliminate the + # super-diagonal element (theta) of the upper-bidiagonal matrix. + # Then use the result to estimate norm(x). + delta = sn2 * rho + gambar = -cs2 * rho + rhs = phi - delta * z + zbar = rhs / gambar + xnorm = sqrt(xxnorm + zbar**2) + gamma = sqrt(gambar**2 + theta**2) + cs2 = gambar / gamma + sn2 = theta / gamma + z = rhs / gamma + xxnorm = xxnorm + z**2 + + # Test for convergence. + # First, estimate the condition of the matrix Abar, + # and the norms of rbar and Abar'rbar. + acond = anorm * sqrt(ddnorm) + res1 = phibar**2 + res2 = res2 + psi**2 + rnorm = sqrt(res1 + res2) + arnorm = alfa * abs(tau) + + # Distinguish between + # r1norm = ||b - Ax|| and + # r2norm = rnorm in current code + # = sqrt(r1norm^2 + damp^2*||x||^2). + # Estimate r1norm from + # r1norm = sqrt(r2norm^2 - damp^2*||x||^2). + # Although there is cancellation, it might be accurate enough. + r1sq = rnorm**2 - dampsq * xxnorm + r1norm = sqrt(abs(r1sq)) + if r1sq < 0: + r1norm = -r1norm + r2norm = rnorm + + # Now use these norms to estimate certain other quantities, + # some of which will be small near a solution. + test1 = rnorm / bnorm + test2 = arnorm / (anorm * rnorm + eps) + test3 = 1 / (acond + eps) + t1 = test1 / (1 + anorm * xnorm / bnorm) + rtol = btol + atol * anorm * xnorm / bnorm + + # The following tests guard against extremely small values of + # atol, btol or ctol. (The user may have set any or all of + # the parameters atol, btol, conlim to 0.) + # The effect is equivalent to the normal tests using + # atol = eps, btol = eps, conlim = 1/eps. + if itn >= iter_lim: + istop = 7 + if 1 + test3 <= 1: + istop = 6 + if 1 + test2 <= 1: + istop = 5 + if 1 + t1 <= 1: + istop = 4 + + # Allow for tolerances set by the user. + if test3 <= ctol: + istop = 3 + if test2 <= atol: + istop = 2 + if test1 <= rtol: + istop = 1 + + # See if it is time to print something. + prnt = False + if n <= 40: + prnt = True + if itn <= 10: + prnt = True + if itn >= iter_lim-10: + prnt = True + # if itn%10 == 0: prnt = True + if test3 <= 2*ctol: + prnt = True + if test2 <= 10*atol: + prnt = True + if test1 <= 10*rtol: + prnt = True + if istop != 0: + prnt = True + + if prnt: + if show: + str1 = '%6g %12.5e' % (itn, x[0]) + str2 = ' %10.3e %10.3e' % (r1norm, r2norm) + str3 = ' %8.1e %8.1e' % (test1, test2) + str4 = ' %8.1e %8.1e' % (anorm, acond) + print(str1, str2, str3, str4) + + if istop != 0: + break + + # End of iteration loop. + # Print the stopping condition. + if show: + print(' ') + print('LSQR finished') + print(msg[istop]) + print(' ') + str1 = 'istop =%8g r1norm =%8.1e' % (istop, r1norm) + str2 = 'anorm =%8.1e arnorm =%8.1e' % (anorm, arnorm) + str3 = 'itn =%8g r2norm =%8.1e' % (itn, r2norm) + str4 = 'acond =%8.1e xnorm =%8.1e' % (acond, xnorm) + print(str1 + ' ' + str2) + print(str3 + ' ' + str4) + print(' ') + + return x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/lsqr.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/lsqr.pyc new file mode 100644 index 0000000..106b518 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/lsqr.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/minres.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/minres.py new file mode 100644 index 0000000..1e064d7 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/minres.py @@ -0,0 +1,365 @@ +from __future__ import division, print_function, absolute_import + +from numpy import sqrt, inner, zeros, inf, finfo +from numpy.linalg import norm + +from .utils import make_system + +__all__ = ['minres'] + + +def minres(A, b, x0=None, shift=0.0, tol=1e-5, maxiter=None, + M=None, callback=None, show=False, check=False): + """ + Use MINimum RESidual iteration to solve Ax=b + + MINRES minimizes norm(A*x - b) for a real symmetric matrix A. Unlike + the Conjugate Gradient method, A can be indefinite or singular. + + If shift != 0 then the method solves (A - shift*I)x = b + + Parameters + ---------- + A : {sparse matrix, dense matrix, LinearOperator} + The real symmetric N-by-N matrix of the linear system + b : {array, matrix} + Right hand side of the linear system. Has shape (N,) or (N,1). + + Returns + ------- + x : {array, matrix} + The converged solution. + info : integer + Provides convergence information: + 0 : successful exit + >0 : convergence to tolerance not achieved, number of iterations + <0 : illegal input or breakdown + + Other Parameters + ---------------- + x0 : {array, matrix} + Starting guess for the solution. + tol : float + Tolerance to achieve. The algorithm terminates when the relative + residual is below `tol`. + maxiter : integer + Maximum number of iterations. Iteration will stop after maxiter + steps even if the specified tolerance has not been achieved. + M : {sparse matrix, dense matrix, LinearOperator} + Preconditioner for A. The preconditioner should approximate the + inverse of A. Effective preconditioning dramatically improves the + rate of convergence, which implies that fewer iterations are needed + to reach a given error tolerance. + callback : function + User-supplied function to call after each iteration. It is called + as callback(xk), where xk is the current solution vector. + + References + ---------- + Solution of sparse indefinite systems of linear equations, + C. C. Paige and M. A. Saunders (1975), + SIAM J. Numer. Anal. 12(4), pp. 617-629. + https://web.stanford.edu/group/SOL/software/minres/ + + This file is a translation of the following MATLAB implementation: + https://web.stanford.edu/group/SOL/software/minres/minres-matlab.zip + + """ + A, M, x, b, postprocess = make_system(A, M, x0, b) + + matvec = A.matvec + psolve = M.matvec + + first = 'Enter minres. ' + last = 'Exit minres. ' + + n = A.shape[0] + + if maxiter is None: + maxiter = 5 * n + + msg = [' beta2 = 0. If M = I, b and x are eigenvectors ', # -1 + ' beta1 = 0. The exact solution is x = 0 ', # 0 + ' A solution to Ax = b was found, given rtol ', # 1 + ' A least-squares solution was found, given rtol ', # 2 + ' Reasonable accuracy achieved, given eps ', # 3 + ' x has converged to an eigenvector ', # 4 + ' acond has exceeded 0.1/eps ', # 5 + ' The iteration limit was reached ', # 6 + ' A does not define a symmetric matrix ', # 7 + ' M does not define a symmetric matrix ', # 8 + ' M does not define a pos-def preconditioner '] # 9 + + if show: + print(first + 'Solution of symmetric Ax = b') + print(first + 'n = %3g shift = %23.14e' % (n,shift)) + print(first + 'itnlim = %3g rtol = %11.2e' % (maxiter,tol)) + print() + + istop = 0 + itn = 0 + Anorm = 0 + Acond = 0 + rnorm = 0 + ynorm = 0 + + xtype = x.dtype + + eps = finfo(xtype).eps + + x = zeros(n, dtype=xtype) + + # Set up y and v for the first Lanczos vector v1. + # y = beta1 P' v1, where P = C**(-1). + # v is really P' v1. + + y = b + r1 = b + + y = psolve(b) + + beta1 = inner(b,y) + + if beta1 < 0: + raise ValueError('indefinite preconditioner') + elif beta1 == 0: + return (postprocess(x), 0) + + beta1 = sqrt(beta1) + + if check: + # are these too strict? + + # see if A is symmetric + w = matvec(y) + r2 = matvec(w) + s = inner(w,w) + t = inner(y,r2) + z = abs(s - t) + epsa = (s + eps) * eps**(1.0/3.0) + if z > epsa: + raise ValueError('non-symmetric matrix') + + # see if M is symmetric + r2 = psolve(y) + s = inner(y,y) + t = inner(r1,r2) + z = abs(s - t) + epsa = (s + eps) * eps**(1.0/3.0) + if z > epsa: + raise ValueError('non-symmetric preconditioner') + + # Initialize other quantities + oldb = 0 + beta = beta1 + dbar = 0 + epsln = 0 + qrnorm = beta1 + phibar = beta1 + rhs1 = beta1 + rhs2 = 0 + tnorm2 = 0 + gmax = 0 + gmin = finfo(xtype).max + cs = -1 + sn = 0 + w = zeros(n, dtype=xtype) + w2 = zeros(n, dtype=xtype) + r2 = r1 + + if show: + print() + print() + print(' Itn x(1) Compatible LS norm(A) cond(A) gbar/|A|') + + while itn < maxiter: + itn += 1 + + s = 1.0/beta + v = s*y + + y = matvec(v) + y = y - shift * v + + if itn >= 2: + y = y - (beta/oldb)*r1 + + alfa = inner(v,y) + y = y - (alfa/beta)*r2 + r1 = r2 + r2 = y + y = psolve(r2) + oldb = beta + beta = inner(r2,y) + if beta < 0: + raise ValueError('non-symmetric matrix') + beta = sqrt(beta) + tnorm2 += alfa**2 + oldb**2 + beta**2 + + if itn == 1: + if beta/beta1 <= 10*eps: + istop = -1 # Terminate later + + # Apply previous rotation Qk-1 to get + # [deltak epslnk+1] = [cs sn][dbark 0 ] + # [gbar k dbar k+1] [sn -cs][alfak betak+1]. + + oldeps = epsln + delta = cs * dbar + sn * alfa # delta1 = 0 deltak + gbar = sn * dbar - cs * alfa # gbar 1 = alfa1 gbar k + epsln = sn * beta # epsln2 = 0 epslnk+1 + dbar = - cs * beta # dbar 2 = beta2 dbar k+1 + root = norm([gbar, dbar]) + Arnorm = phibar * root + + # Compute the next plane rotation Qk + + gamma = norm([gbar, beta]) # gammak + gamma = max(gamma, eps) + cs = gbar / gamma # ck + sn = beta / gamma # sk + phi = cs * phibar # phik + phibar = sn * phibar # phibark+1 + + # Update x. + + denom = 1.0/gamma + w1 = w2 + w2 = w + w = (v - oldeps*w1 - delta*w2) * denom + x = x + phi*w + + # Go round again. + + gmax = max(gmax, gamma) + gmin = min(gmin, gamma) + z = rhs1 / gamma + rhs1 = rhs2 - delta*z + rhs2 = - epsln*z + + # Estimate various norms and test for convergence. + + Anorm = sqrt(tnorm2) + ynorm = norm(x) + epsa = Anorm * eps + epsx = Anorm * ynorm * eps + epsr = Anorm * ynorm * tol + diag = gbar + + if diag == 0: + diag = epsa + + qrnorm = phibar + rnorm = qrnorm + if ynorm == 0 or Anorm == 0: + test1 = inf + else: + test1 = rnorm / (Anorm*ynorm) # ||r|| / (||A|| ||x||) + if Anorm == 0: + test2 = inf + else: + test2 = root / Anorm # ||Ar|| / (||A|| ||r||) + + # Estimate cond(A). + # In this version we look at the diagonals of R in the + # factorization of the lower Hessenberg matrix, Q * H = R, + # where H is the tridiagonal matrix from Lanczos with one + # extra row, beta(k+1) e_k^T. + + Acond = gmax/gmin + + # See if any of the stopping criteria are satisfied. + # In rare cases, istop is already -1 from above (Abar = const*I). + + if istop == 0: + t1 = 1 + test1 # These tests work if tol < eps + t2 = 1 + test2 + if t2 <= 1: + istop = 2 + if t1 <= 1: + istop = 1 + + if itn >= maxiter: + istop = 6 + if Acond >= 0.1/eps: + istop = 4 + if epsx >= beta1: + istop = 3 + # if rnorm <= epsx : istop = 2 + # if rnorm <= epsr : istop = 1 + if test2 <= tol: + istop = 2 + if test1 <= tol: + istop = 1 + + # See if it is time to print something. + + prnt = False + if n <= 40: + prnt = True + if itn <= 10: + prnt = True + if itn >= maxiter-10: + prnt = True + if itn % 10 == 0: + prnt = True + if qrnorm <= 10*epsx: + prnt = True + if qrnorm <= 10*epsr: + prnt = True + if Acond <= 1e-2/eps: + prnt = True + if istop != 0: + prnt = True + + if show and prnt: + str1 = '%6g %12.5e %10.3e' % (itn, x[0], test1) + str2 = ' %10.3e' % (test2,) + str3 = ' %8.1e %8.1e %8.1e' % (Anorm, Acond, gbar/Anorm) + + print(str1 + str2 + str3) + + if itn % 10 == 0: + print() + + if callback is not None: + callback(x) + + if istop != 0: + break # TODO check this + + if show: + print() + print(last + ' istop = %3g itn =%5g' % (istop,itn)) + print(last + ' Anorm = %12.4e Acond = %12.4e' % (Anorm,Acond)) + print(last + ' rnorm = %12.4e ynorm = %12.4e' % (rnorm,ynorm)) + print(last + ' Arnorm = %12.4e' % (Arnorm,)) + print(last + msg[istop+1]) + + if istop == 6: + info = maxiter + else: + info = 0 + + return (postprocess(x),info) + + +if __name__ == '__main__': + from scipy import ones, arange + from scipy.linalg import norm + from scipy.sparse import spdiags + + n = 10 + + residuals = [] + + def cb(x): + residuals.append(norm(b - A*x)) + + # A = poisson((10,),format='csr') + A = spdiags([arange(1,n+1,dtype=float)], [0], n, n, format='csr') + M = spdiags([1.0/arange(1,n+1,dtype=float)], [0], n, n, format='csr') + A.psolve = M.matvec + b = 0*ones(A.shape[0]) + x = minres(A,b,tol=1e-12,maxiter=None,callback=cb) + # x = cg(A,b,x0=b,tol=1e-12,maxiter=None,callback=cb)[0] diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/minres.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/minres.pyc new file mode 100644 index 0000000..af7d56d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/minres.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/setup.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/setup.py new file mode 100644 index 0000000..7f6815a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/setup.py @@ -0,0 +1,44 @@ +from __future__ import division, print_function, absolute_import + +from os.path import join + + +def configuration(parent_package='',top_path=None): + from scipy._build_utils.system_info import get_info, NotFoundError + from numpy.distutils.misc_util import Configuration + from scipy._build_utils import get_g77_abi_wrappers + + config = Configuration('isolve',parent_package,top_path) + + lapack_opt = get_info('lapack_opt') + + # iterative methods + methods = ['BiCGREVCOM.f.src', + 'BiCGSTABREVCOM.f.src', + 'CGREVCOM.f.src', + 'CGSREVCOM.f.src', +# 'ChebyREVCOM.f.src', + 'GMRESREVCOM.f.src', +# 'JacobiREVCOM.f.src', + 'QMRREVCOM.f.src', +# 'SORREVCOM.f.src' + ] + + Util = ['getbreak.f.src'] + sources = Util + methods + ['_iterative.pyf.src'] + sources = [join('iterative', x) for x in sources] + sources += get_g77_abi_wrappers(lapack_opt) + + config.add_extension('_iterative', + sources=sources, + extra_info=lapack_opt) + + config.add_data_dir('tests') + + return config + + +if __name__ == '__main__': + from numpy.distutils.core import setup + + setup(**configuration(top_path='').todict()) diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/setup.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/setup.pyc new file mode 100644 index 0000000..7059b15 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/__init__.pyc new file mode 100644 index 0000000..418282c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/demo_lgmres.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/demo_lgmres.py new file mode 100644 index 0000000..006664f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/demo_lgmres.py @@ -0,0 +1,63 @@ +from __future__ import division, print_function, absolute_import + +import scipy.sparse.linalg as la +import scipy.sparse as sp +import scipy.io as io +import numpy as np +import sys + +#problem = "SPARSKIT/drivcav/e05r0100" +problem = "SPARSKIT/drivcav/e05r0200" +#problem = "Harwell-Boeing/sherman/sherman1" +#problem = "misc/hamm/add32" + +mm = np.lib._datasource.Repository('ftp://math.nist.gov/pub/MatrixMarket2/') +f = mm.open('%s.mtx.gz' % problem) +Am = io.mmread(f).tocsr() +f.close() + +f = mm.open('%s_rhs1.mtx.gz' % problem) +b = np.array(io.mmread(f)).ravel() +f.close() + +count = [0] + + +def matvec(v): + count[0] += 1 + sys.stderr.write('%d\r' % count[0]) + return Am*v + + +A = la.LinearOperator(matvec=matvec, shape=Am.shape, dtype=Am.dtype) + +M = 100 + +print("MatrixMarket problem %s" % problem) +print("Invert %d x %d matrix; nnz = %d" % (Am.shape[0], Am.shape[1], Am.nnz)) + +count[0] = 0 +x0, info = la.gmres(A, b, restrt=M, tol=1e-14) +count_0 = count[0] +err0 = np.linalg.norm(Am*x0 - b) / np.linalg.norm(b) +print("GMRES(%d):" % M, count_0, "matvecs, residual", err0) +if info != 0: + print("Didn't converge") + +count[0] = 0 +x1, info = la.lgmres(A, b, inner_m=M-6*2, outer_k=6, tol=1e-14) +count_1 = count[0] +err1 = np.linalg.norm(Am*x1 - b) / np.linalg.norm(b) +print("LGMRES(%d,6) [same memory req.]:" % (M-2*6), count_1, + "matvecs, residual:", err1) +if info != 0: + print("Didn't converge") + +count[0] = 0 +x2, info = la.lgmres(A, b, inner_m=M-6, outer_k=6, tol=1e-14) +count_2 = count[0] +err2 = np.linalg.norm(Am*x2 - b) / np.linalg.norm(b) +print("LGMRES(%d,6) [same subspace size]:" % (M-6), count_2, + "matvecs, residual:", err2) +if info != 0: + print("Didn't converge") diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/demo_lgmres.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/demo_lgmres.pyc new file mode 100644 index 0000000..3aa5842 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/demo_lgmres.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/test_gcrotmk.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/test_gcrotmk.py new file mode 100644 index 0000000..e891560 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/test_gcrotmk.py @@ -0,0 +1,167 @@ +#!/usr/bin/env python +"""Tests for the linalg.isolve.gcrotmk module +""" + +from __future__ import division, print_function, absolute_import + +from numpy.testing import assert_, assert_allclose, assert_equal +from scipy._lib._numpy_compat import suppress_warnings + +import numpy as np +from numpy import zeros, array, allclose +from scipy.linalg import norm +from scipy.sparse import csr_matrix, eye, rand + +from scipy.sparse.linalg.interface import LinearOperator +from scipy.sparse.linalg import splu +from scipy.sparse.linalg.isolve import gcrotmk, gmres + + +Am = csr_matrix(array([[-2,1,0,0,0,9], + [1,-2,1,0,5,0], + [0,1,-2,1,0,0], + [0,0,1,-2,1,0], + [0,3,0,1,-2,1], + [1,0,0,0,1,-2]])) +b = array([1,2,3,4,5,6]) +count = [0] + + +def matvec(v): + count[0] += 1 + return Am*v + + +A = LinearOperator(matvec=matvec, shape=Am.shape, dtype=Am.dtype) + + +def do_solve(**kw): + count[0] = 0 + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, ".*called without specifying.*") + x0, flag = gcrotmk(A, b, x0=zeros(A.shape[0]), tol=1e-14, **kw) + count_0 = count[0] + assert_(allclose(A*x0, b, rtol=1e-12, atol=1e-12), norm(A*x0-b)) + return x0, count_0 + + +class TestGCROTMK(object): + def test_preconditioner(self): + # Check that preconditioning works + pc = splu(Am.tocsc()) + M = LinearOperator(matvec=pc.solve, shape=A.shape, dtype=A.dtype) + + x0, count_0 = do_solve() + x1, count_1 = do_solve(M=M) + + assert_equal(count_1, 3) + assert_(count_1 < count_0/2) + assert_(allclose(x1, x0, rtol=1e-14)) + + def test_arnoldi(self): + np.random.rand(1234) + + A = eye(2000) + rand(2000, 2000, density=5e-4) + b = np.random.rand(2000) + + # The inner arnoldi should be equivalent to gmres + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, ".*called without specifying.*") + x0, flag0 = gcrotmk(A, b, x0=zeros(A.shape[0]), m=15, k=0, maxiter=1) + x1, flag1 = gmres(A, b, x0=zeros(A.shape[0]), restart=15, maxiter=1) + + assert_equal(flag0, 1) + assert_equal(flag1, 1) + assert_(np.linalg.norm(A.dot(x0) - b) > 1e-3) + + assert_allclose(x0, x1) + + def test_cornercase(self): + np.random.seed(1234) + + # Rounding error may prevent convergence with tol=0 --- ensure + # that the return values in this case are correct, and no + # exceptions are raised + + for n in [3, 5, 10, 100]: + A = 2*eye(n) + + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, ".*called without specifying.*") + b = np.ones(n) + x, info = gcrotmk(A, b, maxiter=10) + assert_equal(info, 0) + assert_allclose(A.dot(x) - b, 0, atol=1e-14) + + x, info = gcrotmk(A, b, tol=0, maxiter=10) + if info == 0: + assert_allclose(A.dot(x) - b, 0, atol=1e-14) + + b = np.random.rand(n) + x, info = gcrotmk(A, b, maxiter=10) + assert_equal(info, 0) + assert_allclose(A.dot(x) - b, 0, atol=1e-14) + + x, info = gcrotmk(A, b, tol=0, maxiter=10) + if info == 0: + assert_allclose(A.dot(x) - b, 0, atol=1e-14) + + def test_nans(self): + A = eye(3, format='lil') + A[1,1] = np.nan + b = np.ones(3) + + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, ".*called without specifying.*") + x, info = gcrotmk(A, b, tol=0, maxiter=10) + assert_equal(info, 1) + + def test_truncate(self): + np.random.seed(1234) + A = np.random.rand(30, 30) + np.eye(30) + b = np.random.rand(30) + + for truncate in ['oldest', 'smallest']: + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, ".*called without specifying.*") + x, info = gcrotmk(A, b, m=10, k=10, truncate=truncate, tol=1e-4, + maxiter=200) + assert_equal(info, 0) + assert_allclose(A.dot(x) - b, 0, atol=1e-3) + + def test_CU(self): + for discard_C in (True, False): + # Check that C,U behave as expected + CU = [] + x0, count_0 = do_solve(CU=CU, discard_C=discard_C) + assert_(len(CU) > 0) + assert_(len(CU) <= 6) + + if discard_C: + for c, u in CU: + assert_(c is None) + + # should converge immediately + x1, count_1 = do_solve(CU=CU, discard_C=discard_C) + if discard_C: + assert_equal(count_1, 2 + len(CU)) + else: + assert_equal(count_1, 3) + assert_(count_1 <= count_0/2) + assert_allclose(x1, x0, atol=1e-14) + + def test_denormals(self): + # Check that no warnings are emitted if the matrix contains + # numbers for which 1/x has no float representation, and that + # the solver behaves properly. + A = np.array([[1, 2], [3, 4]], dtype=float) + A *= 100 * np.nextafter(0, 1) + + b = np.array([1, 1]) + + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, ".*called without specifying.*") + xp, info = gcrotmk(A, b) + + if info == 0: + assert_allclose(A.dot(xp), b) diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/test_gcrotmk.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/test_gcrotmk.pyc new file mode 100644 index 0000000..ee77929 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/test_gcrotmk.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/test_iterative.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/test_iterative.py new file mode 100644 index 0000000..878101c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/test_iterative.py @@ -0,0 +1,635 @@ +""" Test functions for the sparse.linalg.isolve module +""" + +from __future__ import division, print_function, absolute_import + +import itertools +import numpy as np + +from numpy.testing import (assert_equal, assert_array_equal, + assert_, assert_allclose) +import pytest +from pytest import raises as assert_raises +from scipy._lib._numpy_compat import suppress_warnings + +from numpy import zeros, arange, array, ones, eye, iscomplexobj +from scipy.linalg import norm +from scipy.sparse import spdiags, csr_matrix, SparseEfficiencyWarning + +from scipy.sparse.linalg import LinearOperator, aslinearoperator +from scipy.sparse.linalg.isolve import cg, cgs, bicg, bicgstab, gmres, qmr, minres, lgmres, gcrotmk + +# TODO check that method preserve shape and type +# TODO test both preconditioner methods + + +class Case(object): + def __init__(self, name, A, b=None, skip=None, nonconvergence=None): + self.name = name + self.A = A + if b is None: + self.b = arange(A.shape[0], dtype=float) + else: + self.b = b + if skip is None: + self.skip = [] + else: + self.skip = skip + if nonconvergence is None: + self.nonconvergence = [] + else: + self.nonconvergence = nonconvergence + + def __repr__(self): + return "<%s>" % self.name + + +class IterativeParams(object): + def __init__(self): + # list of tuples (solver, symmetric, positive_definite ) + solvers = [cg, cgs, bicg, bicgstab, gmres, qmr, minres, lgmres, gcrotmk] + sym_solvers = [minres, cg] + posdef_solvers = [cg] + real_solvers = [minres] + + self.solvers = solvers + + # list of tuples (A, symmetric, positive_definite ) + self.cases = [] + + # Symmetric and Positive Definite + N = 40 + data = ones((3,N)) + data[0,:] = 2 + data[1,:] = -1 + data[2,:] = -1 + Poisson1D = spdiags(data, [0,-1,1], N, N, format='csr') + self.Poisson1D = Case("poisson1d", Poisson1D) + self.cases.append(Case("poisson1d", Poisson1D)) + # note: minres fails for single precision + self.cases.append(Case("poisson1d", Poisson1D.astype('f'), + skip=[minres])) + + # Symmetric and Negative Definite + self.cases.append(Case("neg-poisson1d", -Poisson1D, + skip=posdef_solvers)) + # note: minres fails for single precision + self.cases.append(Case("neg-poisson1d", (-Poisson1D).astype('f'), + skip=posdef_solvers + [minres])) + + # Symmetric and Indefinite + data = array([[6, -5, 2, 7, -1, 10, 4, -3, -8, 9]],dtype='d') + RandDiag = spdiags(data, [0], 10, 10, format='csr') + self.cases.append(Case("rand-diag", RandDiag, skip=posdef_solvers)) + self.cases.append(Case("rand-diag", RandDiag.astype('f'), + skip=posdef_solvers)) + + # Random real-valued + np.random.seed(1234) + data = np.random.rand(4, 4) + self.cases.append(Case("rand", data, skip=posdef_solvers+sym_solvers)) + self.cases.append(Case("rand", data.astype('f'), + skip=posdef_solvers+sym_solvers)) + + # Random symmetric real-valued + np.random.seed(1234) + data = np.random.rand(4, 4) + data = data + data.T + self.cases.append(Case("rand-sym", data, skip=posdef_solvers)) + self.cases.append(Case("rand-sym", data.astype('f'), + skip=posdef_solvers)) + + # Random pos-def symmetric real + np.random.seed(1234) + data = np.random.rand(9, 9) + data = np.dot(data.conj(), data.T) + self.cases.append(Case("rand-sym-pd", data)) + # note: minres fails for single precision + self.cases.append(Case("rand-sym-pd", data.astype('f'), + skip=[minres])) + + # Random complex-valued + np.random.seed(1234) + data = np.random.rand(4, 4) + 1j*np.random.rand(4, 4) + self.cases.append(Case("rand-cmplx", data, + skip=posdef_solvers+sym_solvers+real_solvers)) + self.cases.append(Case("rand-cmplx", data.astype('F'), + skip=posdef_solvers+sym_solvers+real_solvers)) + + # Random hermitian complex-valued + np.random.seed(1234) + data = np.random.rand(4, 4) + 1j*np.random.rand(4, 4) + data = data + data.T.conj() + self.cases.append(Case("rand-cmplx-herm", data, + skip=posdef_solvers+real_solvers)) + self.cases.append(Case("rand-cmplx-herm", data.astype('F'), + skip=posdef_solvers+real_solvers)) + + # Random pos-def hermitian complex-valued + np.random.seed(1234) + data = np.random.rand(9, 9) + 1j*np.random.rand(9, 9) + data = np.dot(data.conj(), data.T) + self.cases.append(Case("rand-cmplx-sym-pd", data, skip=real_solvers)) + self.cases.append(Case("rand-cmplx-sym-pd", data.astype('F'), + skip=real_solvers)) + + # Non-symmetric and Positive Definite + # + # cgs, qmr, and bicg fail to converge on this one + # -- algorithmic limitation apparently + data = ones((2,10)) + data[0,:] = 2 + data[1,:] = -1 + A = spdiags(data, [0,-1], 10, 10, format='csr') + self.cases.append(Case("nonsymposdef", A, + skip=sym_solvers+[cgs, qmr, bicg])) + self.cases.append(Case("nonsymposdef", A.astype('F'), + skip=sym_solvers+[cgs, qmr, bicg])) + + # Symmetric, non-pd, hitting cgs/bicg/bicgstab/qmr breakdown + A = np.array([[0, 0, 0, 0, 0, 1, -1, -0, -0, -0, -0], + [0, 0, 0, 0, 0, 2, -0, -1, -0, -0, -0], + [0, 0, 0, 0, 0, 2, -0, -0, -1, -0, -0], + [0, 0, 0, 0, 0, 2, -0, -0, -0, -1, -0], + [0, 0, 0, 0, 0, 1, -0, -0, -0, -0, -1], + [1, 2, 2, 2, 1, 0, -0, -0, -0, -0, -0], + [-1, 0, 0, 0, 0, 0, -1, -0, -0, -0, -0], + [0, -1, 0, 0, 0, 0, -0, -1, -0, -0, -0], + [0, 0, -1, 0, 0, 0, -0, -0, -1, -0, -0], + [0, 0, 0, -1, 0, 0, -0, -0, -0, -1, -0], + [0, 0, 0, 0, -1, 0, -0, -0, -0, -0, -1]], dtype=float) + b = np.array([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], dtype=float) + assert (A == A.T).all() + self.cases.append(Case("sym-nonpd", A, b, + skip=posdef_solvers, + nonconvergence=[cgs,bicg,bicgstab,qmr])) + + +params = IterativeParams() + + +def check_maxiter(solver, case): + A = case.A + tol = 1e-12 + + b = case.b + x0 = 0*b + + residuals = [] + + def callback(x): + residuals.append(norm(b - case.A*x)) + + x, info = solver(A, b, x0=x0, tol=tol, maxiter=1, callback=callback) + + assert_equal(len(residuals), 1) + assert_equal(info, 1) + + +def test_maxiter(): + case = params.Poisson1D + for solver in params.solvers: + if solver in case.skip: + continue + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, ".*called without specifying.*") + check_maxiter(solver, case) + + +def assert_normclose(a, b, tol=1e-8): + residual = norm(a - b) + tolerance = tol*norm(b) + msg = "residual (%g) not smaller than tolerance %g" % (residual, tolerance) + assert_(residual < tolerance, msg=msg) + + +def check_convergence(solver, case): + A = case.A + + if A.dtype.char in "dD": + tol = 1e-8 + else: + tol = 1e-2 + + b = case.b + x0 = 0*b + + x, info = solver(A, b, x0=x0, tol=tol) + + assert_array_equal(x0, 0*b) # ensure that x0 is not overwritten + if solver not in case.nonconvergence: + assert_equal(info,0) + assert_normclose(A.dot(x), b, tol=tol) + else: + assert_(info != 0) + assert_(np.linalg.norm(A.dot(x) - b) <= np.linalg.norm(b)) + + +def test_convergence(): + for solver in params.solvers: + for case in params.cases: + if solver in case.skip: + continue + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, ".*called without specifying.*") + check_convergence(solver, case) + + +def check_precond_dummy(solver, case): + tol = 1e-8 + + def identity(b,which=None): + """trivial preconditioner""" + return b + + A = case.A + + M,N = A.shape + D = spdiags([1.0/A.diagonal()], [0], M, N) + + b = case.b + x0 = 0*b + + precond = LinearOperator(A.shape, identity, rmatvec=identity) + + if solver is qmr: + x, info = solver(A, b, M1=precond, M2=precond, x0=x0, tol=tol) + else: + x, info = solver(A, b, M=precond, x0=x0, tol=tol) + assert_equal(info,0) + assert_normclose(A.dot(x), b, tol) + + A = aslinearoperator(A) + A.psolve = identity + A.rpsolve = identity + + x, info = solver(A, b, x0=x0, tol=tol) + assert_equal(info,0) + assert_normclose(A*x, b, tol=tol) + + +def test_precond_dummy(): + case = params.Poisson1D + for solver in params.solvers: + if solver in case.skip: + continue + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, ".*called without specifying.*") + check_precond_dummy(solver, case) + + +def check_precond_inverse(solver, case): + tol = 1e-8 + + def inverse(b,which=None): + """inverse preconditioner""" + A = case.A + if not isinstance(A, np.ndarray): + A = A.todense() + return np.linalg.solve(A, b) + + def rinverse(b,which=None): + """inverse preconditioner""" + A = case.A + if not isinstance(A, np.ndarray): + A = A.todense() + return np.linalg.solve(A.T, b) + + matvec_count = [0] + + def matvec(b): + matvec_count[0] += 1 + return case.A.dot(b) + + def rmatvec(b): + matvec_count[0] += 1 + return case.A.T.dot(b) + + b = case.b + x0 = 0*b + + A = LinearOperator(case.A.shape, matvec, rmatvec=rmatvec) + precond = LinearOperator(case.A.shape, inverse, rmatvec=rinverse) + + # Solve with preconditioner + matvec_count = [0] + x, info = solver(A, b, M=precond, x0=x0, tol=tol) + + assert_equal(info, 0) + assert_normclose(case.A.dot(x), b, tol) + + # Solution should be nearly instant + assert_(matvec_count[0] <= 3, repr(matvec_count)) + + +def test_precond_inverse(): + case = params.Poisson1D + for solver in params.solvers: + if solver in case.skip: + continue + if solver is qmr: + continue + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, ".*called without specifying.*") + check_precond_inverse(solver, case) + + +def test_gmres_basic(): + A = np.vander(np.arange(10) + 1)[:, ::-1] + b = np.zeros(10) + b[0] = 1 + x = np.linalg.solve(A, b) + + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, ".*called without specifying.*") + x_gm, err = gmres(A, b, restart=5, maxiter=1) + + assert_allclose(x_gm[0], 0.359, rtol=1e-2) + + +def test_reentrancy(): + non_reentrant = [cg, cgs, bicg, bicgstab, gmres, qmr] + reentrant = [lgmres, minres, gcrotmk] + for solver in reentrant + non_reentrant: + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, ".*called without specifying.*") + _check_reentrancy(solver, solver in reentrant) + + +def _check_reentrancy(solver, is_reentrant): + def matvec(x): + A = np.array([[1.0, 0, 0], [0, 2.0, 0], [0, 0, 3.0]]) + y, info = solver(A, x) + assert_equal(info, 0) + return y + b = np.array([1, 1./2, 1./3]) + op = LinearOperator((3, 3), matvec=matvec, rmatvec=matvec, + dtype=b.dtype) + + if not is_reentrant: + assert_raises(RuntimeError, solver, op, b) + else: + y, info = solver(op, b) + assert_equal(info, 0) + assert_allclose(y, [1, 1, 1]) + + +@pytest.mark.parametrize("solver", [cg, cgs, bicg, bicgstab, gmres, qmr, lgmres, gcrotmk]) +def test_atol(solver): + # TODO: minres. It didn't historically use absolute tolerances, so + # fixing it is less urgent. + + np.random.seed(1234) + A = np.random.rand(10, 10) + A = A.dot(A.T) + 10 * np.eye(10) + b = 1e3 * np.random.rand(10) + b_norm = np.linalg.norm(b) + + tols = np.r_[0, np.logspace(np.log10(1e-10), np.log10(1e2), 7), np.inf] + + # Check effect of badly scaled preconditioners + M0 = np.random.randn(10, 10) + M0 = M0.dot(M0.T) + Ms = [None, 1e-6 * M0, 1e6 * M0] + + for M, tol, atol in itertools.product(Ms, tols, tols): + if tol == 0 and atol == 0: + continue + + if solver is qmr: + if M is not None: + M = aslinearoperator(M) + M2 = aslinearoperator(np.eye(10)) + else: + M2 = None + x, info = solver(A, b, M1=M, M2=M2, tol=tol, atol=atol) + else: + x, info = solver(A, b, M=M, tol=tol, atol=atol) + assert_equal(info, 0) + + residual = A.dot(x) - b + err = np.linalg.norm(residual) + atol2 = tol * b_norm + assert_(err <= max(atol, atol2)) + + +@pytest.mark.parametrize("solver", [cg, cgs, bicg, bicgstab, gmres, qmr, minres, lgmres, gcrotmk]) +def test_zero_rhs(solver): + np.random.seed(1234) + A = np.random.rand(10, 10) + A = A.dot(A.T) + 10 * np.eye(10) + + b = np.zeros(10) + tols = np.r_[np.logspace(np.log10(1e-10), np.log10(1e2), 7)] + + for tol in tols: + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, ".*called without specifying.*") + + x, info = solver(A, b, tol=tol) + assert_equal(info, 0) + assert_allclose(x, 0, atol=1e-15) + + x, info = solver(A, b, tol=tol, x0=ones(10)) + assert_equal(info, 0) + assert_allclose(x, 0, atol=tol) + + if solver is not minres: + x, info = solver(A, b, tol=tol, atol=0, x0=ones(10)) + if info == 0: + assert_allclose(x, 0) + + x, info = solver(A, b, tol=tol, atol=tol) + assert_equal(info, 0) + assert_allclose(x, 0, atol=1e-300) + + x, info = solver(A, b, tol=tol, atol=0) + assert_equal(info, 0) + assert_allclose(x, 0, atol=1e-300) + + +@pytest.mark.parametrize("solver", [ + gmres, qmr, lgmres, + pytest.param(cgs, marks=pytest.mark.xfail), + pytest.param(bicg, marks=pytest.mark.xfail), + pytest.param(bicgstab, marks=pytest.mark.xfail), + pytest.param(gcrotmk, marks=pytest.mark.xfail)]) +def test_maxiter_worsening(solver): + # Check error does not grow (boundlessly) with increasing maxiter. + # This can occur due to the solvers hitting close to breakdown, + # which they should detect and halt as necessary. + # cf. gh-9100 + + # Singular matrix, rhs numerically not in range + A = np.array([[-0.1112795288033378, 0, 0, 0.16127952880333685], + [0, -0.13627952880333782+6.283185307179586j, 0, 0], + [0, 0, -0.13627952880333782-6.283185307179586j, 0], + [0.1112795288033368, 0j, 0j, -0.16127952880333785]]) + v = np.ones(4) + best_error = np.inf + + for maxiter in range(1, 20): + x, info = solver(A, v, maxiter=maxiter, tol=1e-8, atol=0) + + if info == 0: + assert_(np.linalg.norm(A.dot(x) - v) <= 1e-8*np.linalg.norm(v)) + + error = np.linalg.norm(A.dot(x) - v) + best_error = min(best_error, error) + + # Check with slack + assert_(error <= 5*best_error) + + +#------------------------------------------------------------------------------ + +class TestQMR(object): + def test_leftright_precond(self): + """Check that QMR works with left and right preconditioners""" + + from scipy.sparse.linalg.dsolve import splu + from scipy.sparse.linalg.interface import LinearOperator + + n = 100 + + dat = ones(n) + A = spdiags([-2*dat, 4*dat, -dat], [-1,0,1],n,n) + b = arange(n,dtype='d') + + L = spdiags([-dat/2, dat], [-1,0], n, n) + U = spdiags([4*dat, -dat], [0,1], n, n) + + with suppress_warnings() as sup: + sup.filter(SparseEfficiencyWarning, "splu requires CSC matrix format") + L_solver = splu(L) + U_solver = splu(U) + + def L_solve(b): + return L_solver.solve(b) + + def U_solve(b): + return U_solver.solve(b) + + def LT_solve(b): + return L_solver.solve(b,'T') + + def UT_solve(b): + return U_solver.solve(b,'T') + + M1 = LinearOperator((n,n), matvec=L_solve, rmatvec=LT_solve) + M2 = LinearOperator((n,n), matvec=U_solve, rmatvec=UT_solve) + + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, ".*called without specifying.*") + x,info = qmr(A, b, tol=1e-8, maxiter=15, M1=M1, M2=M2) + + assert_equal(info,0) + assert_normclose(A*x, b, tol=1e-8) + + +class TestGMRES(object): + def test_callback(self): + + def store_residual(r, rvec): + rvec[rvec.nonzero()[0].max()+1] = r + + # Define, A,b + A = csr_matrix(array([[-2,1,0,0,0,0],[1,-2,1,0,0,0],[0,1,-2,1,0,0],[0,0,1,-2,1,0],[0,0,0,1,-2,1],[0,0,0,0,1,-2]])) + b = ones((A.shape[0],)) + maxiter = 1 + rvec = zeros(maxiter+1) + rvec[0] = 1.0 + callback = lambda r:store_residual(r, rvec) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, ".*called without specifying.*") + x,flag = gmres(A, b, x0=zeros(A.shape[0]), tol=1e-16, maxiter=maxiter, callback=callback) + + # Expected output from Scipy 1.0.0 + assert_allclose(rvec, array([1.0, 0.81649658092772603]), rtol=1e-10) + + # Test preconditioned callback + M = 1e-3 * np.eye(A.shape[0]) + rvec = zeros(maxiter+1) + rvec[0] = 1.0 + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, ".*called without specifying.*") + x, flag = gmres(A, b, M=M, tol=1e-16, maxiter=maxiter, callback=callback) + + # Expected output from Scipy 1.0.0 (callback has preconditioned residual!) + assert_allclose(rvec, array([1.0, 1e-3 * 0.81649658092772603]), rtol=1e-10) + + def test_abi(self): + # Check we don't segfault on gmres with complex argument + A = eye(2) + b = ones(2) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, ".*called without specifying.*") + r_x, r_info = gmres(A, b) + r_x = r_x.astype(complex) + + x, info = gmres(A.astype(complex), b.astype(complex)) + + assert_(iscomplexobj(x)) + assert_allclose(r_x, x) + assert_(r_info == info) + + def test_atol_legacy(self): + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, ".*called without specifying.*") + + # Check the strange legacy behavior: the tolerance is interpreted + # as atol, but only for the initial residual + A = eye(2) + b = 1e-6 * ones(2) + x, info = gmres(A, b, tol=1e-5) + assert_array_equal(x, np.zeros(2)) + + A = eye(2) + b = ones(2) + x, info = gmres(A, b, tol=1e-5) + assert_(np.linalg.norm(A.dot(x) - b) <= 1e-5*np.linalg.norm(b)) + assert_allclose(x, b, atol=0, rtol=1e-8) + + rndm = np.random.RandomState(12345) + A = rndm.rand(30, 30) + b = 1e-6 * ones(30) + x, info = gmres(A, b, tol=1e-7, restart=20) + assert_(np.linalg.norm(A.dot(x) - b) > 1e-7) + + A = eye(2) + b = 1e-10 * ones(2) + x, info = gmres(A, b, tol=1e-8, atol=0) + assert_(np.linalg.norm(A.dot(x) - b) <= 1e-8*np.linalg.norm(b)) + + def test_defective_precond_breakdown(self): + # Breakdown due to defective preconditioner + M = np.eye(3) + M[2,2] = 0 + + b = np.array([0, 1, 1]) + x = np.array([1, 0, 0]) + A = np.diag([2, 3, 4]) + + x, info = gmres(A, b, x0=x, M=M, tol=1e-15, atol=0) + + # Should not return nans, nor terminate with false success + assert_(not np.isnan(x).any()) + if info == 0: + assert_(np.linalg.norm(A.dot(x) - b) <= 1e-15*np.linalg.norm(b)) + + # The solution should be OK outside null space of M + assert_allclose(M.dot(A.dot(x)), M.dot(b)) + + def test_defective_matrix_breakdown(self): + # Breakdown due to defective matrix + A = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 0]]) + b = np.array([1, 0, 1]) + x, info = gmres(A, b, tol=1e-8, atol=0) + + # Should not return nans, nor terminate with false success + assert_(not np.isnan(x).any()) + if info == 0: + assert_(np.linalg.norm(A.dot(x) - b) <= 1e-8*np.linalg.norm(b)) + + # The solution should be OK outside null space of A + assert_allclose(A.dot(A.dot(x)), A.dot(b)) diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/test_iterative.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/test_iterative.pyc new file mode 100644 index 0000000..4f571f7 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/test_iterative.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/test_lgmres.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/test_lgmres.py new file mode 100644 index 0000000..5777c5b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/test_lgmres.py @@ -0,0 +1,214 @@ +"""Tests for the linalg.isolve.lgmres module +""" + +from __future__ import division, print_function, absolute_import + +from numpy.testing import assert_, assert_allclose, assert_equal + +import pytest +from platform import python_implementation + +import numpy as np +from numpy import zeros, array, allclose +from scipy.linalg import norm +from scipy.sparse import csr_matrix, eye, rand + +from scipy.sparse.linalg.interface import LinearOperator +from scipy.sparse.linalg import splu +from scipy.sparse.linalg.isolve import lgmres, gmres + +from scipy._lib._numpy_compat import suppress_warnings + +Am = csr_matrix(array([[-2, 1, 0, 0, 0, 9], + [1, -2, 1, 0, 5, 0], + [0, 1, -2, 1, 0, 0], + [0, 0, 1, -2, 1, 0], + [0, 3, 0, 1, -2, 1], + [1, 0, 0, 0, 1, -2]])) +b = array([1, 2, 3, 4, 5, 6]) +count = [0] + + +def matvec(v): + count[0] += 1 + return Am*v + + +A = LinearOperator(matvec=matvec, shape=Am.shape, dtype=Am.dtype) + + +def do_solve(**kw): + count[0] = 0 + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, ".*called without specifying.*") + x0, flag = lgmres(A, b, x0=zeros(A.shape[0]), + inner_m=6, tol=1e-14, **kw) + count_0 = count[0] + assert_(allclose(A*x0, b, rtol=1e-12, atol=1e-12), norm(A*x0-b)) + return x0, count_0 + + +class TestLGMRES(object): + def test_preconditioner(self): + # Check that preconditioning works + pc = splu(Am.tocsc()) + M = LinearOperator(matvec=pc.solve, shape=A.shape, dtype=A.dtype) + + x0, count_0 = do_solve() + x1, count_1 = do_solve(M=M) + + assert_(count_1 == 3) + assert_(count_1 < count_0/2) + assert_(allclose(x1, x0, rtol=1e-14)) + + def test_outer_v(self): + # Check that the augmentation vectors behave as expected + + outer_v = [] + x0, count_0 = do_solve(outer_k=6, outer_v=outer_v) + assert_(len(outer_v) > 0) + assert_(len(outer_v) <= 6) + + x1, count_1 = do_solve(outer_k=6, outer_v=outer_v, + prepend_outer_v=True) + assert_(count_1 == 2, count_1) + assert_(count_1 < count_0/2) + assert_(allclose(x1, x0, rtol=1e-14)) + + # --- + + outer_v = [] + x0, count_0 = do_solve(outer_k=6, outer_v=outer_v, + store_outer_Av=False) + assert_(array([v[1] is None for v in outer_v]).all()) + assert_(len(outer_v) > 0) + assert_(len(outer_v) <= 6) + + x1, count_1 = do_solve(outer_k=6, outer_v=outer_v, + prepend_outer_v=True) + assert_(count_1 == 3, count_1) + assert_(count_1 < count_0/2) + assert_(allclose(x1, x0, rtol=1e-14)) + + @pytest.mark.skipif(python_implementation() == 'PyPy', + reason="Fails on PyPy CI runs. See #9507") + def test_arnoldi(self): + np.random.rand(1234) + + A = eye(2000) + rand(2000, 2000, density=5e-4) + b = np.random.rand(2000) + + # The inner arnoldi should be equivalent to gmres + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, ".*called without specifying.*") + x0, flag0 = lgmres(A, b, x0=zeros(A.shape[0]), + inner_m=15, maxiter=1) + x1, flag1 = gmres(A, b, x0=zeros(A.shape[0]), + restart=15, maxiter=1) + + assert_equal(flag0, 1) + assert_equal(flag1, 1) + assert_(np.linalg.norm(A.dot(x0) - b) > 4e-4) + + assert_allclose(x0, x1) + + def test_cornercase(self): + np.random.seed(1234) + + # Rounding error may prevent convergence with tol=0 --- ensure + # that the return values in this case are correct, and no + # exceptions are raised + + for n in [3, 5, 10, 100]: + A = 2*eye(n) + + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, ".*called without specifying.*") + + b = np.ones(n) + x, info = lgmres(A, b, maxiter=10) + assert_equal(info, 0) + assert_allclose(A.dot(x) - b, 0, atol=1e-14) + + x, info = lgmres(A, b, tol=0, maxiter=10) + if info == 0: + assert_allclose(A.dot(x) - b, 0, atol=1e-14) + + b = np.random.rand(n) + x, info = lgmres(A, b, maxiter=10) + assert_equal(info, 0) + assert_allclose(A.dot(x) - b, 0, atol=1e-14) + + x, info = lgmres(A, b, tol=0, maxiter=10) + if info == 0: + assert_allclose(A.dot(x) - b, 0, atol=1e-14) + + def test_nans(self): + A = eye(3, format='lil') + A[1, 1] = np.nan + b = np.ones(3) + + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, ".*called without specifying.*") + x, info = lgmres(A, b, tol=0, maxiter=10) + assert_equal(info, 1) + + def test_breakdown_with_outer_v(self): + A = np.array([[1, 2], [3, 4]], dtype=float) + b = np.array([1, 2]) + + x = np.linalg.solve(A, b) + v0 = np.array([1, 0]) + + # The inner iteration should converge to the correct solution, + # since it's in the outer vector list + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, ".*called without specifying.*") + xp, info = lgmres(A, b, outer_v=[(v0, None), (x, None)], maxiter=1) + + assert_allclose(xp, x, atol=1e-12) + + def test_breakdown_underdetermined(self): + # Should find LSQ solution in the Krylov span in one inner + # iteration, despite solver breakdown from nilpotent A. + A = np.array([[0, 1, 1, 1], + [0, 0, 1, 1], + [0, 0, 0, 1], + [0, 0, 0, 0]], dtype=float) + + bs = [ + np.array([1, 1, 1, 1]), + np.array([1, 1, 1, 0]), + np.array([1, 1, 0, 0]), + np.array([1, 0, 0, 0]), + ] + + for b in bs: + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, ".*called without specifying.*") + xp, info = lgmres(A, b, maxiter=1) + resp = np.linalg.norm(A.dot(xp) - b) + + K = np.c_[b, A.dot(b), A.dot(A.dot(b)), A.dot(A.dot(A.dot(b)))] + y, _, _, _ = np.linalg.lstsq(A.dot(K), b, rcond=-1) + x = K.dot(y) + res = np.linalg.norm(A.dot(x) - b) + + assert_allclose(resp, res, err_msg=repr(b)) + + def test_denormals(self): + # Check that no warnings are emitted if the matrix contains + # numbers for which 1/x has no float representation, and that + # the solver behaves properly. + A = np.array([[1, 2], [3, 4]], dtype=float) + A *= 100 * np.nextafter(0, 1) + + b = np.array([1, 1]) + + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, ".*called without specifying.*") + xp, info = lgmres(A, b) + + if info == 0: + assert_allclose(A.dot(xp), b) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/test_lgmres.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/test_lgmres.pyc new file mode 100644 index 0000000..26f2be2 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/test_lgmres.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/test_lsmr.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/test_lsmr.py new file mode 100644 index 0000000..33c574e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/test_lsmr.py @@ -0,0 +1,180 @@ +""" +Copyright (C) 2010 David Fong and Michael Saunders +Distributed under the same license as Scipy + +Testing Code for LSMR. + +03 Jun 2010: First version release with lsmr.py + +David Chin-lung Fong clfong@stanford.edu +Institute for Computational and Mathematical Engineering +Stanford University + +Michael Saunders saunders@stanford.edu +Systems Optimization Laboratory +Dept of MS&E, Stanford University. + +""" + +from __future__ import division, print_function, absolute_import + +from numpy import array, arange, eye, zeros, ones, sqrt, transpose, hstack +from numpy.linalg import norm +from numpy.testing import (assert_almost_equal, + assert_array_almost_equal) + +from scipy.sparse import coo_matrix +from scipy.sparse.linalg.interface import aslinearoperator +from scipy.sparse.linalg import lsmr +from .test_lsqr import G, b + + +class TestLSMR: + def setup_method(self): + self.n = 10 + self.m = 10 + + def assertCompatibleSystem(self, A, xtrue): + Afun = aslinearoperator(A) + b = Afun.matvec(xtrue) + x = lsmr(A, b)[0] + assert_almost_equal(norm(x - xtrue), 0, decimal=5) + + def testIdentityACase1(self): + A = eye(self.n) + xtrue = zeros((self.n, 1)) + self.assertCompatibleSystem(A, xtrue) + + def testIdentityACase2(self): + A = eye(self.n) + xtrue = ones((self.n,1)) + self.assertCompatibleSystem(A, xtrue) + + def testIdentityACase3(self): + A = eye(self.n) + xtrue = transpose(arange(self.n,0,-1)) + self.assertCompatibleSystem(A, xtrue) + + def testBidiagonalA(self): + A = lowerBidiagonalMatrix(20,self.n) + xtrue = transpose(arange(self.n,0,-1)) + self.assertCompatibleSystem(A,xtrue) + + def testScalarB(self): + A = array([[1.0, 2.0]]) + b = 3.0 + x = lsmr(A, b)[0] + assert_almost_equal(norm(A.dot(x) - b), 0) + + def testColumnB(self): + A = eye(self.n) + b = ones((self.n, 1)) + x = lsmr(A, b)[0] + assert_almost_equal(norm(A.dot(x) - b.ravel()), 0) + + def testInitialization(self): + # Test that the default setting is not modified + x_ref = lsmr(G, b)[0] + x0 = zeros(b.shape) + x = lsmr(G, b, x0=x0)[0] + assert_array_almost_equal(x_ref, x) + + # Test warm-start with single iteration + x0 = lsmr(G, b, maxiter=1)[0] + x = lsmr(G, b, x0=x0)[0] + assert_array_almost_equal(x_ref, x) + +class TestLSMRReturns: + def setup_method(self): + self.n = 10 + self.A = lowerBidiagonalMatrix(20,self.n) + self.xtrue = transpose(arange(self.n,0,-1)) + self.Afun = aslinearoperator(self.A) + self.b = self.Afun.matvec(self.xtrue) + self.returnValues = lsmr(self.A,self.b) + + def testNormr(self): + x, istop, itn, normr, normar, normA, condA, normx = self.returnValues + assert_almost_equal(normr, norm(self.b - self.Afun.matvec(x))) + + def testNormar(self): + x, istop, itn, normr, normar, normA, condA, normx = self.returnValues + assert_almost_equal(normar, + norm(self.Afun.rmatvec(self.b - self.Afun.matvec(x)))) + + def testNormx(self): + x, istop, itn, normr, normar, normA, condA, normx = self.returnValues + assert_almost_equal(normx, norm(x)) + + +def lowerBidiagonalMatrix(m, n): + # This is a simple example for testing LSMR. + # It uses the leading m*n submatrix from + # A = [ 1 + # 1 2 + # 2 3 + # 3 4 + # ... + # n ] + # suitably padded by zeros. + # + # 04 Jun 2010: First version for distribution with lsmr.py + if m <= n: + row = hstack((arange(m, dtype=int), + arange(1, m, dtype=int))) + col = hstack((arange(m, dtype=int), + arange(m-1, dtype=int))) + data = hstack((arange(1, m+1, dtype=float), + arange(1,m, dtype=float))) + return coo_matrix((data, (row, col)), shape=(m,n)) + else: + row = hstack((arange(n, dtype=int), + arange(1, n+1, dtype=int))) + col = hstack((arange(n, dtype=int), + arange(n, dtype=int))) + data = hstack((arange(1, n+1, dtype=float), + arange(1,n+1, dtype=float))) + return coo_matrix((data,(row, col)), shape=(m,n)) + + +def lsmrtest(m, n, damp): + """Verbose testing of lsmr""" + + A = lowerBidiagonalMatrix(m,n) + xtrue = arange(n,0,-1, dtype=float) + Afun = aslinearoperator(A) + + b = Afun.matvec(xtrue) + + atol = 1.0e-7 + btol = 1.0e-7 + conlim = 1.0e+10 + itnlim = 10*n + show = 1 + + x, istop, itn, normr, normar, norma, conda, normx \ + = lsmr(A, b, damp, atol, btol, conlim, itnlim, show) + + j1 = min(n,5) + j2 = max(n-4,1) + print(' ') + print('First elements of x:') + str = ['%10.4f' % (xi) for xi in x[0:j1]] + print(''.join(str)) + print(' ') + print('Last elements of x:') + str = ['%10.4f' % (xi) for xi in x[j2-1:]] + print(''.join(str)) + + r = b - Afun.matvec(x) + r2 = sqrt(norm(r)**2 + (damp*norm(x))**2) + print(' ') + str = 'normr (est.) %17.10e' % (normr) + str2 = 'normr (true) %17.10e' % (r2) + print(str) + print(str2) + print(' ') + + +if __name__ == "__main__": + lsmrtest(20,10,0) diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/test_lsmr.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/test_lsmr.pyc new file mode 100644 index 0000000..c40f1dc Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/test_lsmr.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/test_lsqr.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/test_lsqr.py new file mode 100644 index 0000000..61a929f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/test_lsqr.py @@ -0,0 +1,139 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import (assert_, assert_equal, assert_almost_equal, + assert_array_almost_equal) +from scipy._lib.six import xrange + +import scipy.sparse +import scipy.sparse.linalg +from scipy.sparse.linalg import lsqr +from time import time + +# Set up a test problem +n = 35 +G = np.eye(n) +normal = np.random.normal +norm = np.linalg.norm + +for jj in xrange(5): + gg = normal(size=n) + hh = gg * gg.T + G += (hh + hh.T) * 0.5 + G += normal(size=n) * normal(size=n) + +b = normal(size=n) + +tol = 1e-10 +show = False +maxit = None + + +def test_basic(): + b_copy = b.copy() + X = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit) + assert_(np.all(b_copy == b)) + + svx = np.linalg.solve(G, b) + xo = X[0] + assert_(norm(svx - xo) < 1e-5) + +def test_gh_2466(): + row = np.array([0, 0]) + col = np.array([0, 1]) + val = np.array([1, -1]) + A = scipy.sparse.coo_matrix((val, (row, col)), shape=(1, 2)) + b = np.asarray([4]) + lsqr(A, b) + + +def test_well_conditioned_problems(): + # Test that sparse the lsqr solver returns the right solution + # on various problems with different random seeds. + # This is a non-regression test for a potential ZeroDivisionError + # raised when computing the `test2` & `test3` convergence conditions. + n = 10 + A_sparse = scipy.sparse.eye(n, n) + A_dense = A_sparse.toarray() + + with np.errstate(invalid='raise'): + for seed in range(30): + rng = np.random.RandomState(seed + 10) + beta = rng.rand(n) + beta[beta == 0] = 0.00001 # ensure that all the betas are not null + b = A_sparse * beta[:, np.newaxis] + output = lsqr(A_sparse, b, show=show) + + # Check that the termination condition corresponds to an approximate + # solution to Ax = b + assert_equal(output[1], 1) + solution = output[0] + + # Check that we recover the ground truth solution + assert_array_almost_equal(solution, beta) + + # Sanity check: compare to the dense array solver + reference_solution = np.linalg.solve(A_dense, b).ravel() + assert_array_almost_equal(solution, reference_solution) + + +def test_b_shapes(): + # Test b being a scalar. + A = np.array([[1.0, 2.0]]) + b = 3.0 + x = lsqr(A, b)[0] + assert_almost_equal(norm(A.dot(x) - b), 0) + + # Test b being a column vector. + A = np.eye(10) + b = np.ones((10, 1)) + x = lsqr(A, b)[0] + assert_almost_equal(norm(A.dot(x) - b.ravel()), 0) + + +def test_initialization(): + # Test the default setting is the same as zeros + b_copy = b.copy() + x_ref = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit) + x0 = np.zeros(x_ref[0].shape) + x = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit, x0=x0) + assert_(np.all(b_copy == b)) + assert_array_almost_equal(x_ref[0], x[0]) + + # Test warm-start with single iteration + x0 = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=1)[0] + x = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit, x0=x0) + assert_array_almost_equal(x_ref[0], x[0]) + assert_(np.all(b_copy == b)) + + +if __name__ == "__main__": + svx = np.linalg.solve(G, b) + + tic = time() + X = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit) + xo = X[0] + phio = X[3] + psio = X[7] + k = X[2] + chio = X[8] + mg = np.amax(G - G.T) + if mg > 1e-14: + sym = 'No' + else: + sym = 'Yes' + + print('LSQR') + print("Is linear operator symmetric? " + sym) + print("n: %3g iterations: %3g" % (n, k)) + print("Norms computed in %.2fs by LSQR" % (time() - tic)) + print(" ||x|| %9.4e ||r|| %9.4e ||Ar|| %9.4e " % (chio, phio, psio)) + print("Residual norms computed directly:") + print(" ||x|| %9.4e ||r|| %9.4e ||Ar|| %9.4e" % (norm(xo), + norm(G*xo - b), + norm(G.T*(G*xo-b)))) + print("Direct solution norms:") + print(" ||x|| %9.4e ||r|| %9.4e " % (norm(svx), norm(G*svx - b))) + print("") + print(" || x_{direct} - x_{LSQR}|| %9.4e " % norm(svx-xo)) + print("") diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/test_lsqr.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/test_lsqr.pyc new file mode 100644 index 0000000..f025444 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/test_lsqr.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/test_minres.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/test_minres.py new file mode 100644 index 0000000..b94304b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/test_minres.py @@ -0,0 +1,65 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import assert_equal, assert_allclose, assert_ +from scipy.sparse.linalg.isolve import minres +import pytest +from pytest import raises as assert_raises +from .test_iterative import assert_normclose + + +def get_sample_problem(): + # A random 10 x 10 symmetric matrix + np.random.seed(1234) + matrix = np.random.rand(10, 10) + matrix = matrix + matrix.T + # A random vector of length 10 + vector = np.random.rand(10) + return matrix, vector + + +def test_singular(): + A, b = get_sample_problem() + A[0, ] = 0 + b[0] = 0 + xp, info = minres(A, b) + assert_equal(info, 0) + assert_normclose(A.dot(xp), b, tol=1e-5) + + +@pytest.mark.skip(reason="Skip Until gh #6843 is fixed") +def test_gh_6843(): + """check if x0 is being used by tracing iterates""" + A, b = get_sample_problem() + # Random x0 to feed minres + np.random.seed(12345) + x0 = np.random.rand(10) + trace = [] + + def trace_iterates(xk): + trace.append(xk) + minres(A, b, x0=x0, callback=trace_iterates) + trace_with_x0 = trace + + trace = [] + minres(A, b, callback=trace_iterates) + assert_(not np.array_equal(trace_with_x0[0], trace[0])) + + +def test_shift(): + A, b = get_sample_problem() + shift = 0.5 + shifted_A = A - shift * np.eye(10) + x1, info1 = minres(A, b, shift=shift) + x2, info2 = minres(shifted_A, b) + assert_equal(info1, 0) + assert_allclose(x1, x2, rtol=1e-5) + + +def test_asymmetric_fail(): + """Asymmetric matrix should raise `ValueError` when check=True""" + A, b = get_sample_problem() + A[1, 2] = 1 + A[2, 1] = 2 + with assert_raises(ValueError): + xp, info = minres(A, b, check=True) diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/test_minres.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/test_minres.pyc new file mode 100644 index 0000000..cf94837 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/test_minres.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/test_utils.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/test_utils.py new file mode 100644 index 0000000..fcbc7da --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/test_utils.py @@ -0,0 +1,10 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from pytest import raises as assert_raises + +from scipy.sparse.linalg import utils + + +def test_make_system_bad_shape(): + assert_raises(ValueError, utils.make_system, np.zeros((5,3)), None, np.zeros(4), np.zeros(4)) diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/test_utils.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/test_utils.pyc new file mode 100644 index 0000000..8108269 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/tests/test_utils.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/utils.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/utils.py new file mode 100644 index 0000000..8b29d18 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/utils.py @@ -0,0 +1,122 @@ +from __future__ import division, print_function, absolute_import + +__docformat__ = "restructuredtext en" + +__all__ = [] + + +from numpy import asanyarray, asarray, asmatrix, array, matrix, zeros + +from scipy.sparse.linalg.interface import aslinearoperator, LinearOperator, \ + IdentityOperator + +_coerce_rules = {('f','f'):'f', ('f','d'):'d', ('f','F'):'F', + ('f','D'):'D', ('d','f'):'d', ('d','d'):'d', + ('d','F'):'D', ('d','D'):'D', ('F','f'):'F', + ('F','d'):'D', ('F','F'):'F', ('F','D'):'D', + ('D','f'):'D', ('D','d'):'D', ('D','F'):'D', + ('D','D'):'D'} + + +def coerce(x,y): + if x not in 'fdFD': + x = 'd' + if y not in 'fdFD': + y = 'd' + return _coerce_rules[x,y] + + +def id(x): + return x + + +def make_system(A, M, x0, b): + """Make a linear system Ax=b + + Parameters + ---------- + A : LinearOperator + sparse or dense matrix (or any valid input to aslinearoperator) + M : {LinearOperator, Nones} + preconditioner + sparse or dense matrix (or any valid input to aslinearoperator) + x0 : {array_like, None} + initial guess to iterative method + b : array_like + right hand side + + Returns + ------- + (A, M, x, b, postprocess) + A : LinearOperator + matrix of the linear system + M : LinearOperator + preconditioner + x : rank 1 ndarray + initial guess + b : rank 1 ndarray + right hand side + postprocess : function + converts the solution vector to the appropriate + type and dimensions (e.g. (N,1) matrix) + + """ + A_ = A + A = aslinearoperator(A) + + if A.shape[0] != A.shape[1]: + raise ValueError('expected square matrix, but got shape=%s' % (A.shape,)) + + N = A.shape[0] + + b = asanyarray(b) + + if not (b.shape == (N,1) or b.shape == (N,)): + raise ValueError('A and b have incompatible dimensions') + + if b.dtype.char not in 'fdFD': + b = b.astype('d') # upcast non-FP types to double + + def postprocess(x): + if isinstance(b,matrix): + x = asmatrix(x) + return x.reshape(b.shape) + + if hasattr(A,'dtype'): + xtype = A.dtype.char + else: + xtype = A.matvec(b).dtype.char + xtype = coerce(xtype, b.dtype.char) + + b = asarray(b,dtype=xtype) # make b the same type as x + b = b.ravel() + + if x0 is None: + x = zeros(N, dtype=xtype) + else: + x = array(x0, dtype=xtype) + if not (x.shape == (N,1) or x.shape == (N,)): + raise ValueError('A and x have incompatible dimensions') + x = x.ravel() + + # process preconditioner + if M is None: + if hasattr(A_,'psolve'): + psolve = A_.psolve + else: + psolve = id + if hasattr(A_,'rpsolve'): + rpsolve = A_.rpsolve + else: + rpsolve = id + if psolve is id and rpsolve is id: + M = IdentityOperator(shape=A.shape, dtype=A.dtype) + else: + M = LinearOperator(A.shape, matvec=psolve, rmatvec=rpsolve, + dtype=A.dtype) + else: + M = aslinearoperator(M) + if A.shape != M.shape: + raise ValueError('matrix and preconditioner have different shapes') + + return A, M, x, b, postprocess diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/utils.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/utils.pyc new file mode 100644 index 0000000..6f6049d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/utils.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/matfuncs.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/matfuncs.py new file mode 100644 index 0000000..dfb0d43 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/matfuncs.py @@ -0,0 +1,868 @@ +""" +Sparse matrix functions +""" + +# +# Authors: Travis Oliphant, March 2002 +# Anthony Scopatz, August 2012 (Sparse Updates) +# Jake Vanderplas, August 2012 (Sparse Updates) +# + +from __future__ import division, print_function, absolute_import + +__all__ = ['expm', 'inv'] + +import math + +import numpy as np + +import scipy.special +from scipy.linalg.basic import solve, solve_triangular + +from scipy.sparse.base import isspmatrix +from scipy.sparse.construct import eye as speye +from scipy.sparse.linalg import spsolve + +import scipy.sparse +import scipy.sparse.linalg +from scipy.sparse.linalg.interface import LinearOperator + + +UPPER_TRIANGULAR = 'upper_triangular' + + +def inv(A): + """ + Compute the inverse of a sparse matrix + + Parameters + ---------- + A : (M,M) ndarray or sparse matrix + square matrix to be inverted + + Returns + ------- + Ainv : (M,M) ndarray or sparse matrix + inverse of `A` + + Notes + ----- + This computes the sparse inverse of `A`. If the inverse of `A` is expected + to be non-sparse, it will likely be faster to convert `A` to dense and use + scipy.linalg.inv. + + Examples + -------- + >>> from scipy.sparse import csc_matrix + >>> from scipy.sparse.linalg import inv + >>> A = csc_matrix([[1., 0.], [1., 2.]]) + >>> Ainv = inv(A) + >>> Ainv + <2x2 sparse matrix of type '<class 'numpy.float64'>' + with 3 stored elements in Compressed Sparse Column format> + >>> A.dot(Ainv) + <2x2 sparse matrix of type '<class 'numpy.float64'>' + with 2 stored elements in Compressed Sparse Column format> + >>> A.dot(Ainv).todense() + matrix([[ 1., 0.], + [ 0., 1.]]) + + .. versionadded:: 0.12.0 + + """ + #check input + if not scipy.sparse.isspmatrix(A): + raise TypeError('Input must be a sparse matrix') + + I = speye(A.shape[0], A.shape[1], dtype=A.dtype, format=A.format) + Ainv = spsolve(A, I) + return Ainv + + +def _onenorm_matrix_power_nnm(A, p): + """ + Compute the 1-norm of a non-negative integer power of a non-negative matrix. + + Parameters + ---------- + A : a square ndarray or matrix or sparse matrix + Input matrix with non-negative entries. + p : non-negative integer + The power to which the matrix is to be raised. + + Returns + ------- + out : float + The 1-norm of the matrix power p of A. + + """ + # check input + if int(p) != p or p < 0: + raise ValueError('expected non-negative integer p') + p = int(p) + if len(A.shape) != 2 or A.shape[0] != A.shape[1]: + raise ValueError('expected A to be like a square matrix') + + # Explicitly make a column vector so that this works when A is a + # numpy matrix (in addition to ndarray and sparse matrix). + v = np.ones((A.shape[0], 1), dtype=float) + M = A.T + for i in range(p): + v = M.dot(v) + return np.max(v) + + +def _onenorm(A): + # A compatibility function which should eventually disappear. + # This is copypasted from expm_action. + if scipy.sparse.isspmatrix(A): + return max(abs(A).sum(axis=0).flat) + else: + return np.linalg.norm(A, 1) + + +def _ident_like(A): + # A compatibility function which should eventually disappear. + # This is copypasted from expm_action. + if scipy.sparse.isspmatrix(A): + return scipy.sparse.construct.eye(A.shape[0], A.shape[1], + dtype=A.dtype, format=A.format) + else: + return np.eye(A.shape[0], A.shape[1], dtype=A.dtype) + + +def _is_upper_triangular(A): + # This function could possibly be of wider interest. + if isspmatrix(A): + lower_part = scipy.sparse.tril(A, -1) + # Check structural upper triangularity, + # then coincidental upper triangularity if needed. + return lower_part.nnz == 0 or lower_part.count_nonzero() == 0 + else: + return not np.tril(A, -1).any() + + +def _smart_matrix_product(A, B, alpha=None, structure=None): + """ + A matrix product that knows about sparse and structured matrices. + + Parameters + ---------- + A : 2d ndarray + First matrix. + B : 2d ndarray + Second matrix. + alpha : float + The matrix product will be scaled by this constant. + structure : str, optional + A string describing the structure of both matrices `A` and `B`. + Only `upper_triangular` is currently supported. + + Returns + ------- + M : 2d ndarray + Matrix product of A and B. + + """ + if len(A.shape) != 2: + raise ValueError('expected A to be a rectangular matrix') + if len(B.shape) != 2: + raise ValueError('expected B to be a rectangular matrix') + f = None + if structure == UPPER_TRIANGULAR: + if not isspmatrix(A) and not isspmatrix(B): + f, = scipy.linalg.get_blas_funcs(('trmm',), (A, B)) + if f is not None: + if alpha is None: + alpha = 1. + out = f(alpha, A, B) + else: + if alpha is None: + out = A.dot(B) + else: + out = alpha * A.dot(B) + return out + + +class MatrixPowerOperator(LinearOperator): + + def __init__(self, A, p, structure=None): + if A.ndim != 2 or A.shape[0] != A.shape[1]: + raise ValueError('expected A to be like a square matrix') + if p < 0: + raise ValueError('expected p to be a non-negative integer') + self._A = A + self._p = p + self._structure = structure + self.dtype = A.dtype + self.ndim = A.ndim + self.shape = A.shape + + def _matvec(self, x): + for i in range(self._p): + x = self._A.dot(x) + return x + + def _rmatvec(self, x): + A_T = self._A.T + x = x.ravel() + for i in range(self._p): + x = A_T.dot(x) + return x + + def _matmat(self, X): + for i in range(self._p): + X = _smart_matrix_product(self._A, X, structure=self._structure) + return X + + @property + def T(self): + return MatrixPowerOperator(self._A.T, self._p) + + +class ProductOperator(LinearOperator): + """ + For now, this is limited to products of multiple square matrices. + """ + + def __init__(self, *args, **kwargs): + self._structure = kwargs.get('structure', None) + for A in args: + if len(A.shape) != 2 or A.shape[0] != A.shape[1]: + raise ValueError( + 'For now, the ProductOperator implementation is ' + 'limited to the product of multiple square matrices.') + if args: + n = args[0].shape[0] + for A in args: + for d in A.shape: + if d != n: + raise ValueError( + 'The square matrices of the ProductOperator ' + 'must all have the same shape.') + self.shape = (n, n) + self.ndim = len(self.shape) + self.dtype = np.find_common_type([x.dtype for x in args], []) + self._operator_sequence = args + + def _matvec(self, x): + for A in reversed(self._operator_sequence): + x = A.dot(x) + return x + + def _rmatvec(self, x): + x = x.ravel() + for A in self._operator_sequence: + x = A.T.dot(x) + return x + + def _matmat(self, X): + for A in reversed(self._operator_sequence): + X = _smart_matrix_product(A, X, structure=self._structure) + return X + + @property + def T(self): + T_args = [A.T for A in reversed(self._operator_sequence)] + return ProductOperator(*T_args) + + +def _onenormest_matrix_power(A, p, + t=2, itmax=5, compute_v=False, compute_w=False, structure=None): + """ + Efficiently estimate the 1-norm of A^p. + + Parameters + ---------- + A : ndarray + Matrix whose 1-norm of a power is to be computed. + p : int + Non-negative integer power. + t : int, optional + A positive parameter controlling the tradeoff between + accuracy versus time and memory usage. + Larger values take longer and use more memory + but give more accurate output. + itmax : int, optional + Use at most this many iterations. + compute_v : bool, optional + Request a norm-maximizing linear operator input vector if True. + compute_w : bool, optional + Request a norm-maximizing linear operator output vector if True. + + Returns + ------- + est : float + An underestimate of the 1-norm of the sparse matrix. + v : ndarray, optional + The vector such that ||Av||_1 == est*||v||_1. + It can be thought of as an input to the linear operator + that gives an output with particularly large norm. + w : ndarray, optional + The vector Av which has relatively large 1-norm. + It can be thought of as an output of the linear operator + that is relatively large in norm compared to the input. + + """ + return scipy.sparse.linalg.onenormest( + MatrixPowerOperator(A, p, structure=structure)) + + +def _onenormest_product(operator_seq, + t=2, itmax=5, compute_v=False, compute_w=False, structure=None): + """ + Efficiently estimate the 1-norm of the matrix product of the args. + + Parameters + ---------- + operator_seq : linear operator sequence + Matrices whose 1-norm of product is to be computed. + t : int, optional + A positive parameter controlling the tradeoff between + accuracy versus time and memory usage. + Larger values take longer and use more memory + but give more accurate output. + itmax : int, optional + Use at most this many iterations. + compute_v : bool, optional + Request a norm-maximizing linear operator input vector if True. + compute_w : bool, optional + Request a norm-maximizing linear operator output vector if True. + structure : str, optional + A string describing the structure of all operators. + Only `upper_triangular` is currently supported. + + Returns + ------- + est : float + An underestimate of the 1-norm of the sparse matrix. + v : ndarray, optional + The vector such that ||Av||_1 == est*||v||_1. + It can be thought of as an input to the linear operator + that gives an output with particularly large norm. + w : ndarray, optional + The vector Av which has relatively large 1-norm. + It can be thought of as an output of the linear operator + that is relatively large in norm compared to the input. + + """ + return scipy.sparse.linalg.onenormest( + ProductOperator(*operator_seq, structure=structure)) + + +class _ExpmPadeHelper(object): + """ + Help lazily evaluate a matrix exponential. + + The idea is to not do more work than we need for high expm precision, + so we lazily compute matrix powers and store or precompute + other properties of the matrix. + + """ + def __init__(self, A, structure=None, use_exact_onenorm=False): + """ + Initialize the object. + + Parameters + ---------- + A : a dense or sparse square numpy matrix or ndarray + The matrix to be exponentiated. + structure : str, optional + A string describing the structure of matrix `A`. + Only `upper_triangular` is currently supported. + use_exact_onenorm : bool, optional + If True then only the exact one-norm of matrix powers and products + will be used. Otherwise, the one-norm of powers and products + may initially be estimated. + """ + self.A = A + self._A2 = None + self._A4 = None + self._A6 = None + self._A8 = None + self._A10 = None + self._d4_exact = None + self._d6_exact = None + self._d8_exact = None + self._d10_exact = None + self._d4_approx = None + self._d6_approx = None + self._d8_approx = None + self._d10_approx = None + self.ident = _ident_like(A) + self.structure = structure + self.use_exact_onenorm = use_exact_onenorm + + @property + def A2(self): + if self._A2 is None: + self._A2 = _smart_matrix_product( + self.A, self.A, structure=self.structure) + return self._A2 + + @property + def A4(self): + if self._A4 is None: + self._A4 = _smart_matrix_product( + self.A2, self.A2, structure=self.structure) + return self._A4 + + @property + def A6(self): + if self._A6 is None: + self._A6 = _smart_matrix_product( + self.A4, self.A2, structure=self.structure) + return self._A6 + + @property + def A8(self): + if self._A8 is None: + self._A8 = _smart_matrix_product( + self.A6, self.A2, structure=self.structure) + return self._A8 + + @property + def A10(self): + if self._A10 is None: + self._A10 = _smart_matrix_product( + self.A4, self.A6, structure=self.structure) + return self._A10 + + @property + def d4_tight(self): + if self._d4_exact is None: + self._d4_exact = _onenorm(self.A4)**(1/4.) + return self._d4_exact + + @property + def d6_tight(self): + if self._d6_exact is None: + self._d6_exact = _onenorm(self.A6)**(1/6.) + return self._d6_exact + + @property + def d8_tight(self): + if self._d8_exact is None: + self._d8_exact = _onenorm(self.A8)**(1/8.) + return self._d8_exact + + @property + def d10_tight(self): + if self._d10_exact is None: + self._d10_exact = _onenorm(self.A10)**(1/10.) + return self._d10_exact + + @property + def d4_loose(self): + if self.use_exact_onenorm: + return self.d4_tight + if self._d4_exact is not None: + return self._d4_exact + else: + if self._d4_approx is None: + self._d4_approx = _onenormest_matrix_power(self.A2, 2, + structure=self.structure)**(1/4.) + return self._d4_approx + + @property + def d6_loose(self): + if self.use_exact_onenorm: + return self.d6_tight + if self._d6_exact is not None: + return self._d6_exact + else: + if self._d6_approx is None: + self._d6_approx = _onenormest_matrix_power(self.A2, 3, + structure=self.structure)**(1/6.) + return self._d6_approx + + @property + def d8_loose(self): + if self.use_exact_onenorm: + return self.d8_tight + if self._d8_exact is not None: + return self._d8_exact + else: + if self._d8_approx is None: + self._d8_approx = _onenormest_matrix_power(self.A4, 2, + structure=self.structure)**(1/8.) + return self._d8_approx + + @property + def d10_loose(self): + if self.use_exact_onenorm: + return self.d10_tight + if self._d10_exact is not None: + return self._d10_exact + else: + if self._d10_approx is None: + self._d10_approx = _onenormest_product((self.A4, self.A6), + structure=self.structure)**(1/10.) + return self._d10_approx + + def pade3(self): + b = (120., 60., 12., 1.) + U = _smart_matrix_product(self.A, + b[3]*self.A2 + b[1]*self.ident, + structure=self.structure) + V = b[2]*self.A2 + b[0]*self.ident + return U, V + + def pade5(self): + b = (30240., 15120., 3360., 420., 30., 1.) + U = _smart_matrix_product(self.A, + b[5]*self.A4 + b[3]*self.A2 + b[1]*self.ident, + structure=self.structure) + V = b[4]*self.A4 + b[2]*self.A2 + b[0]*self.ident + return U, V + + def pade7(self): + b = (17297280., 8648640., 1995840., 277200., 25200., 1512., 56., 1.) + U = _smart_matrix_product(self.A, + b[7]*self.A6 + b[5]*self.A4 + b[3]*self.A2 + b[1]*self.ident, + structure=self.structure) + V = b[6]*self.A6 + b[4]*self.A4 + b[2]*self.A2 + b[0]*self.ident + return U, V + + def pade9(self): + b = (17643225600., 8821612800., 2075673600., 302702400., 30270240., + 2162160., 110880., 3960., 90., 1.) + U = _smart_matrix_product(self.A, + (b[9]*self.A8 + b[7]*self.A6 + b[5]*self.A4 + + b[3]*self.A2 + b[1]*self.ident), + structure=self.structure) + V = (b[8]*self.A8 + b[6]*self.A6 + b[4]*self.A4 + + b[2]*self.A2 + b[0]*self.ident) + return U, V + + def pade13_scaled(self, s): + b = (64764752532480000., 32382376266240000., 7771770303897600., + 1187353796428800., 129060195264000., 10559470521600., + 670442572800., 33522128640., 1323241920., 40840800., 960960., + 16380., 182., 1.) + B = self.A * 2**-s + B2 = self.A2 * 2**(-2*s) + B4 = self.A4 * 2**(-4*s) + B6 = self.A6 * 2**(-6*s) + U2 = _smart_matrix_product(B6, + b[13]*B6 + b[11]*B4 + b[9]*B2, + structure=self.structure) + U = _smart_matrix_product(B, + (U2 + b[7]*B6 + b[5]*B4 + + b[3]*B2 + b[1]*self.ident), + structure=self.structure) + V2 = _smart_matrix_product(B6, + b[12]*B6 + b[10]*B4 + b[8]*B2, + structure=self.structure) + V = V2 + b[6]*B6 + b[4]*B4 + b[2]*B2 + b[0]*self.ident + return U, V + + +def expm(A): + """ + Compute the matrix exponential using Pade approximation. + + Parameters + ---------- + A : (M,M) array_like or sparse matrix + 2D Array or Matrix (sparse or dense) to be exponentiated + + Returns + ------- + expA : (M,M) ndarray + Matrix exponential of `A` + + Notes + ----- + This is algorithm (6.1) which is a simplification of algorithm (5.1). + + .. versionadded:: 0.12.0 + + References + ---------- + .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2009) + "A New Scaling and Squaring Algorithm for the Matrix Exponential." + SIAM Journal on Matrix Analysis and Applications. + 31 (3). pp. 970-989. ISSN 1095-7162 + + Examples + -------- + >>> from scipy.sparse import csc_matrix + >>> from scipy.sparse.linalg import expm + >>> A = csc_matrix([[1, 0, 0], [0, 2, 0], [0, 0, 3]]) + >>> A.todense() + matrix([[1, 0, 0], + [0, 2, 0], + [0, 0, 3]], dtype=int64) + >>> Aexp = expm(A) + >>> Aexp + <3x3 sparse matrix of type '<class 'numpy.float64'>' + with 3 stored elements in Compressed Sparse Column format> + >>> Aexp.todense() + matrix([[ 2.71828183, 0. , 0. ], + [ 0. , 7.3890561 , 0. ], + [ 0. , 0. , 20.08553692]]) + """ + return _expm(A, use_exact_onenorm='auto') + + +def _expm(A, use_exact_onenorm): + # Core of expm, separated to allow testing exact and approximate + # algorithms. + + # Avoid indiscriminate asarray() to allow sparse or other strange arrays. + if isinstance(A, (list, tuple)): + A = np.asarray(A) + if len(A.shape) != 2 or A.shape[0] != A.shape[1]: + raise ValueError('expected a square matrix') + + # Trivial case + if A.shape == (1, 1): + out = [[np.exp(A[0, 0])]] + + # Avoid indiscriminate casting to ndarray to + # allow for sparse or other strange arrays + if isspmatrix(A): + return A.__class__(out) + + return np.array(out) + + # Ensure input is of float type, to avoid integer overflows etc. + if ((isinstance(A, np.ndarray) or isspmatrix(A)) + and not np.issubdtype(A.dtype, np.inexact)): + A = A.astype(float) + + # Detect upper triangularity. + structure = UPPER_TRIANGULAR if _is_upper_triangular(A) else None + + if use_exact_onenorm == "auto": + # Hardcode a matrix order threshold for exact vs. estimated one-norms. + use_exact_onenorm = A.shape[0] < 200 + + # Track functions of A to help compute the matrix exponential. + h = _ExpmPadeHelper( + A, structure=structure, use_exact_onenorm=use_exact_onenorm) + + # Try Pade order 3. + eta_1 = max(h.d4_loose, h.d6_loose) + if eta_1 < 1.495585217958292e-002 and _ell(h.A, 3) == 0: + U, V = h.pade3() + return _solve_P_Q(U, V, structure=structure) + + # Try Pade order 5. + eta_2 = max(h.d4_tight, h.d6_loose) + if eta_2 < 2.539398330063230e-001 and _ell(h.A, 5) == 0: + U, V = h.pade5() + return _solve_P_Q(U, V, structure=structure) + + # Try Pade orders 7 and 9. + eta_3 = max(h.d6_tight, h.d8_loose) + if eta_3 < 9.504178996162932e-001 and _ell(h.A, 7) == 0: + U, V = h.pade7() + return _solve_P_Q(U, V, structure=structure) + if eta_3 < 2.097847961257068e+000 and _ell(h.A, 9) == 0: + U, V = h.pade9() + return _solve_P_Q(U, V, structure=structure) + + # Use Pade order 13. + eta_4 = max(h.d8_loose, h.d10_loose) + eta_5 = min(eta_3, eta_4) + theta_13 = 4.25 + + # Choose smallest s>=0 such that 2**(-s) eta_5 <= theta_13 + if eta_5 == 0: + # Nilpotent special case + s = 0 + else: + s = max(int(np.ceil(np.log2(eta_5 / theta_13))), 0) + s = s + _ell(2**-s * h.A, 13) + U, V = h.pade13_scaled(s) + X = _solve_P_Q(U, V, structure=structure) + if structure == UPPER_TRIANGULAR: + # Invoke Code Fragment 2.1. + X = _fragment_2_1(X, h.A, s) + else: + # X = r_13(A)^(2^s) by repeated squaring. + for i in range(s): + X = X.dot(X) + return X + + +def _solve_P_Q(U, V, structure=None): + """ + A helper function for expm_2009. + + Parameters + ---------- + U : ndarray + Pade numerator. + V : ndarray + Pade denominator. + structure : str, optional + A string describing the structure of both matrices `U` and `V`. + Only `upper_triangular` is currently supported. + + Notes + ----- + The `structure` argument is inspired by similar args + for theano and cvxopt functions. + + """ + P = U + V + Q = -U + V + if isspmatrix(U): + return spsolve(Q, P) + elif structure is None: + return solve(Q, P) + elif structure == UPPER_TRIANGULAR: + return solve_triangular(Q, P) + else: + raise ValueError('unsupported matrix structure: ' + str(structure)) + + +def _sinch(x): + """ + Stably evaluate sinch. + + Notes + ----- + The strategy of falling back to a sixth order Taylor expansion + was suggested by the Spallation Neutron Source docs + which was found on the internet by google search. + http://www.ornl.gov/~t6p/resources/xal/javadoc/gov/sns/tools/math/ElementaryFunction.html + The details of the cutoff point and the Horner-like evaluation + was picked without reference to anything in particular. + + Note that sinch is not currently implemented in scipy.special, + whereas the "engineer's" definition of sinc is implemented. + The implementation of sinc involves a scaling factor of pi + that distinguishes it from the "mathematician's" version of sinc. + + """ + + # If x is small then use sixth order Taylor expansion. + # How small is small? I am using the point where the relative error + # of the approximation is less than 1e-14. + # If x is large then directly evaluate sinh(x) / x. + x2 = x*x + if abs(x) < 0.0135: + return 1 + (x2/6.)*(1 + (x2/20.)*(1 + (x2/42.))) + else: + return np.sinh(x) / x + + +def _eq_10_42(lam_1, lam_2, t_12): + """ + Equation (10.42) of Functions of Matrices: Theory and Computation. + + Notes + ----- + This is a helper function for _fragment_2_1 of expm_2009. + Equation (10.42) is on page 251 in the section on Schur algorithms. + In particular, section 10.4.3 explains the Schur-Parlett algorithm. + expm([[lam_1, t_12], [0, lam_1]) + = + [[exp(lam_1), t_12*exp((lam_1 + lam_2)/2)*sinch((lam_1 - lam_2)/2)], + [0, exp(lam_2)] + """ + + # The plain formula t_12 * (exp(lam_2) - exp(lam_2)) / (lam_2 - lam_1) + # apparently suffers from cancellation, according to Higham's textbook. + # A nice implementation of sinch, defined as sinh(x)/x, + # will apparently work around the cancellation. + a = 0.5 * (lam_1 + lam_2) + b = 0.5 * (lam_1 - lam_2) + return t_12 * np.exp(a) * _sinch(b) + + +def _fragment_2_1(X, T, s): + """ + A helper function for expm_2009. + + Notes + ----- + The argument X is modified in-place, but this modification is not the same + as the returned value of the function. + This function also takes pains to do things in ways that are compatible + with sparse matrices, for example by avoiding fancy indexing + and by using methods of the matrices whenever possible instead of + using functions of the numpy or scipy libraries themselves. + + """ + # Form X = r_m(2^-s T) + # Replace diag(X) by exp(2^-s diag(T)). + n = X.shape[0] + diag_T = np.ravel(T.diagonal().copy()) + + # Replace diag(X) by exp(2^-s diag(T)). + scale = 2 ** -s + exp_diag = np.exp(scale * diag_T) + for k in range(n): + X[k, k] = exp_diag[k] + + for i in range(s-1, -1, -1): + X = X.dot(X) + + # Replace diag(X) by exp(2^-i diag(T)). + scale = 2 ** -i + exp_diag = np.exp(scale * diag_T) + for k in range(n): + X[k, k] = exp_diag[k] + + # Replace (first) superdiagonal of X by explicit formula + # for superdiagonal of exp(2^-i T) from Eq (10.42) of + # the author's 2008 textbook + # Functions of Matrices: Theory and Computation. + for k in range(n-1): + lam_1 = scale * diag_T[k] + lam_2 = scale * diag_T[k+1] + t_12 = scale * T[k, k+1] + value = _eq_10_42(lam_1, lam_2, t_12) + X[k, k+1] = value + + # Return the updated X matrix. + return X + + +def _ell(A, m): + """ + A helper function for expm_2009. + + Parameters + ---------- + A : linear operator + A linear operator whose norm of power we care about. + m : int + The power of the linear operator + + Returns + ------- + value : int + A value related to a bound. + + """ + if len(A.shape) != 2 or A.shape[0] != A.shape[1]: + raise ValueError('expected A to be like a square matrix') + + p = 2*m + 1 + + # The c_i are explained in (2.2) and (2.6) of the 2005 expm paper. + # They are coefficients of terms of a generating function series expansion. + choose_2p_p = scipy.special.comb(2*p, p, exact=True) + abs_c_recip = float(choose_2p_p * math.factorial(2*p + 1)) + + # This is explained after Eq. (1.2) of the 2009 expm paper. + # It is the "unit roundoff" of IEEE double precision arithmetic. + u = 2**-53 + + # Compute the one-norm of matrix power p of abs(A). + A_abs_onenorm = _onenorm_matrix_power_nnm(abs(A), p) + + # Treat zero norm as a special case. + if not A_abs_onenorm: + return 0 + + alpha = A_abs_onenorm / (_onenorm(A) * abs_c_recip) + log2_alpha_div_u = np.log2(alpha/u) + value = int(np.ceil(log2_alpha_div_u / (2 * m))) + return max(value, 0) diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/matfuncs.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/matfuncs.pyc new file mode 100644 index 0000000..08590f7 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/matfuncs.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/setup.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/setup.py new file mode 100644 index 0000000..dad619d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/setup.py @@ -0,0 +1,20 @@ +from __future__ import division, print_function, absolute_import + + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + + config = Configuration('linalg',parent_package,top_path) + + config.add_subpackage(('isolve')) + config.add_subpackage(('dsolve')) + config.add_subpackage(('eigen')) + + config.add_data_dir('tests') + + return config + + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(**configuration(top_path='').todict()) diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/setup.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/setup.pyc new file mode 100644 index 0000000..70f4341 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/tests/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/tests/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/tests/__init__.pyc new file mode 100644 index 0000000..46f182d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/tests/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/tests/test_expm_multiply.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/tests/test_expm_multiply.py new file mode 100644 index 0000000..1a38258 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/tests/test_expm_multiply.py @@ -0,0 +1,254 @@ +"""Test functions for the sparse.linalg._expm_multiply module +""" + +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import assert_allclose, assert_, assert_equal +from scipy._lib._numpy_compat import suppress_warnings + +from scipy.sparse import SparseEfficiencyWarning +import scipy.linalg +from scipy.sparse.linalg._expm_multiply import (_theta, _compute_p_max, + _onenormest_matrix_power, expm_multiply, _expm_multiply_simple, + _expm_multiply_interval) + + +def less_than_or_close(a, b): + return np.allclose(a, b) or (a < b) + + +class TestExpmActionSimple(object): + """ + These tests do not consider the case of multiple time steps in one call. + """ + + def test_theta_monotonicity(self): + pairs = sorted(_theta.items()) + for (m_a, theta_a), (m_b, theta_b) in zip(pairs[:-1], pairs[1:]): + assert_(theta_a < theta_b) + + def test_p_max_default(self): + m_max = 55 + expected_p_max = 8 + observed_p_max = _compute_p_max(m_max) + assert_equal(observed_p_max, expected_p_max) + + def test_p_max_range(self): + for m_max in range(1, 55+1): + p_max = _compute_p_max(m_max) + assert_(p_max*(p_max - 1) <= m_max + 1) + p_too_big = p_max + 1 + assert_(p_too_big*(p_too_big - 1) > m_max + 1) + + def test_onenormest_matrix_power(self): + np.random.seed(1234) + n = 40 + nsamples = 10 + for i in range(nsamples): + A = scipy.linalg.inv(np.random.randn(n, n)) + for p in range(4): + if not p: + M = np.identity(n) + else: + M = np.dot(M, A) + estimated = _onenormest_matrix_power(A, p) + exact = np.linalg.norm(M, 1) + assert_(less_than_or_close(estimated, exact)) + assert_(less_than_or_close(exact, 3*estimated)) + + def test_expm_multiply(self): + np.random.seed(1234) + n = 40 + k = 3 + nsamples = 10 + for i in range(nsamples): + A = scipy.linalg.inv(np.random.randn(n, n)) + B = np.random.randn(n, k) + observed = expm_multiply(A, B) + expected = np.dot(scipy.linalg.expm(A), B) + assert_allclose(observed, expected) + + def test_matrix_vector_multiply(self): + np.random.seed(1234) + n = 40 + nsamples = 10 + for i in range(nsamples): + A = scipy.linalg.inv(np.random.randn(n, n)) + v = np.random.randn(n) + observed = expm_multiply(A, v) + expected = np.dot(scipy.linalg.expm(A), v) + assert_allclose(observed, expected) + + def test_scaled_expm_multiply(self): + np.random.seed(1234) + n = 40 + k = 3 + nsamples = 10 + for i in range(nsamples): + for t in (0.2, 1.0, 1.5): + with np.errstate(invalid='ignore'): + A = scipy.linalg.inv(np.random.randn(n, n)) + B = np.random.randn(n, k) + observed = _expm_multiply_simple(A, B, t=t) + expected = np.dot(scipy.linalg.expm(t*A), B) + assert_allclose(observed, expected) + + def test_scaled_expm_multiply_single_timepoint(self): + np.random.seed(1234) + t = 0.1 + n = 5 + k = 2 + A = np.random.randn(n, n) + B = np.random.randn(n, k) + observed = _expm_multiply_simple(A, B, t=t) + expected = scipy.linalg.expm(t*A).dot(B) + assert_allclose(observed, expected) + + def test_sparse_expm_multiply(self): + np.random.seed(1234) + n = 40 + k = 3 + nsamples = 10 + for i in range(nsamples): + A = scipy.sparse.rand(n, n, density=0.05) + B = np.random.randn(n, k) + observed = expm_multiply(A, B) + with suppress_warnings() as sup: + sup.filter(SparseEfficiencyWarning, + "splu requires CSC matrix format") + sup.filter(SparseEfficiencyWarning, + "spsolve is more efficient when sparse b is in the CSC matrix format") + expected = scipy.linalg.expm(A).dot(B) + assert_allclose(observed, expected) + + def test_complex(self): + A = np.array([ + [1j, 1j], + [0, 1j]], dtype=complex) + B = np.array([1j, 1j]) + observed = expm_multiply(A, B) + expected = np.array([ + 1j * np.exp(1j) + 1j * (1j*np.cos(1) - np.sin(1)), + 1j * np.exp(1j)], dtype=complex) + assert_allclose(observed, expected) + + +class TestExpmActionInterval(object): + + def test_sparse_expm_multiply_interval(self): + np.random.seed(1234) + start = 0.1 + stop = 3.2 + n = 40 + k = 3 + endpoint = True + for num in (14, 13, 2): + A = scipy.sparse.rand(n, n, density=0.05) + B = np.random.randn(n, k) + v = np.random.randn(n) + for target in (B, v): + X = expm_multiply(A, target, + start=start, stop=stop, num=num, endpoint=endpoint) + samples = np.linspace(start=start, stop=stop, + num=num, endpoint=endpoint) + with suppress_warnings() as sup: + sup.filter(SparseEfficiencyWarning, + "splu requires CSC matrix format") + sup.filter(SparseEfficiencyWarning, + "spsolve is more efficient when sparse b is in the CSC matrix format") + for solution, t in zip(X, samples): + assert_allclose(solution, + scipy.linalg.expm(t*A).dot(target)) + + def test_expm_multiply_interval_vector(self): + np.random.seed(1234) + start = 0.1 + stop = 3.2 + endpoint = True + for num in (14, 13, 2): + for n in (1, 2, 5, 20, 40): + A = scipy.linalg.inv(np.random.randn(n, n)) + v = np.random.randn(n) + X = expm_multiply(A, v, + start=start, stop=stop, num=num, endpoint=endpoint) + samples = np.linspace(start=start, stop=stop, + num=num, endpoint=endpoint) + for solution, t in zip(X, samples): + assert_allclose(solution, scipy.linalg.expm(t*A).dot(v)) + + def test_expm_multiply_interval_matrix(self): + np.random.seed(1234) + start = 0.1 + stop = 3.2 + endpoint = True + for num in (14, 13, 2): + for n in (1, 2, 5, 20, 40): + for k in (1, 2): + A = scipy.linalg.inv(np.random.randn(n, n)) + B = np.random.randn(n, k) + X = expm_multiply(A, B, + start=start, stop=stop, num=num, endpoint=endpoint) + samples = np.linspace(start=start, stop=stop, + num=num, endpoint=endpoint) + for solution, t in zip(X, samples): + assert_allclose(solution, scipy.linalg.expm(t*A).dot(B)) + + def test_sparse_expm_multiply_interval_dtypes(self): + # Test A & B int + A = scipy.sparse.diags(np.arange(5),format='csr', dtype=int) + B = np.ones(5, dtype=int) + Aexpm = scipy.sparse.diags(np.exp(np.arange(5)),format='csr') + assert_allclose(expm_multiply(A,B,0,1)[-1], Aexpm.dot(B)) + + # Test A complex, B int + A = scipy.sparse.diags(-1j*np.arange(5),format='csr', dtype=complex) + B = np.ones(5, dtype=int) + Aexpm = scipy.sparse.diags(np.exp(-1j*np.arange(5)),format='csr') + assert_allclose(expm_multiply(A,B,0,1)[-1], Aexpm.dot(B)) + + # Test A int, B complex + A = scipy.sparse.diags(np.arange(5),format='csr', dtype=int) + B = 1j*np.ones(5, dtype=complex) + Aexpm = scipy.sparse.diags(np.exp(np.arange(5)),format='csr') + assert_allclose(expm_multiply(A,B,0,1)[-1], Aexpm.dot(B)) + + def test_expm_multiply_interval_status_0(self): + self._help_test_specific_expm_interval_status(0) + + def test_expm_multiply_interval_status_1(self): + self._help_test_specific_expm_interval_status(1) + + def test_expm_multiply_interval_status_2(self): + self._help_test_specific_expm_interval_status(2) + + def _help_test_specific_expm_interval_status(self, target_status): + np.random.seed(1234) + start = 0.1 + stop = 3.2 + num = 13 + endpoint = True + n = 5 + k = 2 + nrepeats = 10 + nsuccesses = 0 + for num in [14, 13, 2] * nrepeats: + A = np.random.randn(n, n) + B = np.random.randn(n, k) + status = _expm_multiply_interval(A, B, + start=start, stop=stop, num=num, endpoint=endpoint, + status_only=True) + if status == target_status: + X, status = _expm_multiply_interval(A, B, + start=start, stop=stop, num=num, endpoint=endpoint, + status_only=False) + assert_equal(X.shape, (num, n, k)) + samples = np.linspace(start=start, stop=stop, + num=num, endpoint=endpoint) + for solution, t in zip(X, samples): + assert_allclose(solution, scipy.linalg.expm(t*A).dot(B)) + nsuccesses += 1 + if not nsuccesses: + msg = 'failed to find a status-' + str(target_status) + ' interval' + raise Exception(msg) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/tests/test_expm_multiply.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/tests/test_expm_multiply.pyc new file mode 100644 index 0000000..fec65f3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/tests/test_expm_multiply.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/tests/test_interface.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/tests/test_interface.py new file mode 100644 index 0000000..0844cde --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/tests/test_interface.py @@ -0,0 +1,367 @@ +"""Test functions for the sparse.linalg.interface module +""" + +from __future__ import division, print_function, absolute_import + +from functools import partial +from itertools import product +import operator +import pytest +from pytest import raises as assert_raises, warns +from numpy.testing import assert_, assert_equal + +import numpy as np +import scipy.sparse as sparse + +from scipy.sparse.linalg import interface + + +# Only test matmul operator (A @ B) when available (Python 3.5+) +TEST_MATMUL = hasattr(operator, 'matmul') + + +class TestLinearOperator(object): + def setup_method(self): + self.A = np.array([[1,2,3], + [4,5,6]]) + self.B = np.array([[1,2], + [3,4], + [5,6]]) + self.C = np.array([[1,2], + [3,4]]) + + def test_matvec(self): + def get_matvecs(A): + return [{ + 'shape': A.shape, + 'matvec': lambda x: np.dot(A, x).reshape(A.shape[0]), + 'rmatvec': lambda x: np.dot(A.T.conj(), + x).reshape(A.shape[1]) + }, + { + 'shape': A.shape, + 'matvec': lambda x: np.dot(A, x), + 'rmatvec': lambda x: np.dot(A.T.conj(), x), + 'matmat': lambda x: np.dot(A, x) + }] + + for matvecs in get_matvecs(self.A): + A = interface.LinearOperator(**matvecs) + + assert_(A.args == ()) + + assert_equal(A.matvec(np.array([1,2,3])), [14,32]) + assert_equal(A.matvec(np.array([[1],[2],[3]])), [[14],[32]]) + assert_equal(A * np.array([1,2,3]), [14,32]) + assert_equal(A * np.array([[1],[2],[3]]), [[14],[32]]) + assert_equal(A.dot(np.array([1,2,3])), [14,32]) + assert_equal(A.dot(np.array([[1],[2],[3]])), [[14],[32]]) + + assert_equal(A.matvec(np.matrix([[1],[2],[3]])), [[14],[32]]) + assert_equal(A * np.matrix([[1],[2],[3]]), [[14],[32]]) + assert_equal(A.dot(np.matrix([[1],[2],[3]])), [[14],[32]]) + + assert_equal((2*A)*[1,1,1], [12,30]) + assert_equal((2*A).rmatvec([1,1]), [10, 14, 18]) + assert_equal((2*A).H.matvec([1,1]), [10, 14, 18]) + assert_equal((2*A)*[[1],[1],[1]], [[12],[30]]) + assert_equal((2*A).matmat([[1],[1],[1]]), [[12],[30]]) + assert_equal((A*2)*[1,1,1], [12,30]) + assert_equal((A*2)*[[1],[1],[1]], [[12],[30]]) + assert_equal((2j*A)*[1,1,1], [12j,30j]) + assert_equal((A+A)*[1,1,1], [12, 30]) + assert_equal((A+A).rmatvec([1,1]), [10, 14, 18]) + assert_equal((A+A).H.matvec([1,1]), [10, 14, 18]) + assert_equal((A+A)*[[1],[1],[1]], [[12], [30]]) + assert_equal((A+A).matmat([[1],[1],[1]]), [[12], [30]]) + assert_equal((-A)*[1,1,1], [-6,-15]) + assert_equal((-A)*[[1],[1],[1]], [[-6],[-15]]) + assert_equal((A-A)*[1,1,1], [0,0]) + assert_equal((A-A)*[[1],[1],[1]], [[0],[0]]) + + z = A+A + assert_(len(z.args) == 2 and z.args[0] is A and z.args[1] is A) + z = 2*A + assert_(len(z.args) == 2 and z.args[0] is A and z.args[1] == 2) + + assert_(isinstance(A.matvec([1, 2, 3]), np.ndarray)) + assert_(isinstance(A.matvec(np.array([[1],[2],[3]])), np.ndarray)) + assert_(isinstance(A * np.array([1,2,3]), np.ndarray)) + assert_(isinstance(A * np.array([[1],[2],[3]]), np.ndarray)) + assert_(isinstance(A.dot(np.array([1,2,3])), np.ndarray)) + assert_(isinstance(A.dot(np.array([[1],[2],[3]])), np.ndarray)) + + assert_(isinstance(A.matvec(np.matrix([[1],[2],[3]])), np.ndarray)) + assert_(isinstance(A * np.matrix([[1],[2],[3]]), np.ndarray)) + assert_(isinstance(A.dot(np.matrix([[1],[2],[3]])), np.ndarray)) + + assert_(isinstance(2*A, interface._ScaledLinearOperator)) + assert_(isinstance(2j*A, interface._ScaledLinearOperator)) + assert_(isinstance(A+A, interface._SumLinearOperator)) + assert_(isinstance(-A, interface._ScaledLinearOperator)) + assert_(isinstance(A-A, interface._SumLinearOperator)) + + assert_((2j*A).dtype == np.complex_) + + assert_raises(ValueError, A.matvec, np.array([1,2])) + assert_raises(ValueError, A.matvec, np.array([1,2,3,4])) + assert_raises(ValueError, A.matvec, np.array([[1],[2]])) + assert_raises(ValueError, A.matvec, np.array([[1],[2],[3],[4]])) + + assert_raises(ValueError, lambda: A*A) + assert_raises(ValueError, lambda: A**2) + + for matvecsA, matvecsB in product(get_matvecs(self.A), + get_matvecs(self.B)): + A = interface.LinearOperator(**matvecsA) + B = interface.LinearOperator(**matvecsB) + + assert_equal((A*B)*[1,1], [50,113]) + assert_equal((A*B)*[[1],[1]], [[50],[113]]) + assert_equal((A*B).matmat([[1],[1]]), [[50],[113]]) + + assert_equal((A*B).rmatvec([1,1]), [71,92]) + assert_equal((A*B).H.matvec([1,1]), [71,92]) + + assert_(isinstance(A*B, interface._ProductLinearOperator)) + + assert_raises(ValueError, lambda: A+B) + assert_raises(ValueError, lambda: A**2) + + z = A*B + assert_(len(z.args) == 2 and z.args[0] is A and z.args[1] is B) + + for matvecsC in get_matvecs(self.C): + C = interface.LinearOperator(**matvecsC) + + assert_equal((C**2)*[1,1], [17,37]) + assert_equal((C**2).rmatvec([1,1]), [22,32]) + assert_equal((C**2).H.matvec([1,1]), [22,32]) + assert_equal((C**2).matmat([[1],[1]]), [[17],[37]]) + + assert_(isinstance(C**2, interface._PowerLinearOperator)) + + def test_matmul(self): + if not TEST_MATMUL: + pytest.skip("matmul is only tested in Python 3.5+") + + D = {'shape': self.A.shape, + 'matvec': lambda x: np.dot(self.A, x).reshape(self.A.shape[0]), + 'rmatvec': lambda x: np.dot(self.A.T.conj(), + x).reshape(self.A.shape[1]), + 'matmat': lambda x: np.dot(self.A, x)} + A = interface.LinearOperator(**D) + B = np.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + b = B[0] + + assert_equal(operator.matmul(A, b), A * b) + assert_equal(operator.matmul(A, B), A * B) + assert_raises(ValueError, operator.matmul, A, 2) + assert_raises(ValueError, operator.matmul, 2, A) + + +class TestAsLinearOperator(object): + def setup_method(self): + self.cases = [] + + def make_cases(dtype): + self.cases.append(np.matrix([[1,2,3],[4,5,6]], dtype=dtype)) + self.cases.append(np.array([[1,2,3],[4,5,6]], dtype=dtype)) + self.cases.append(sparse.csr_matrix([[1,2,3],[4,5,6]], dtype=dtype)) + + # Test default implementations of _adjoint and _rmatvec, which + # refer to each other. + def mv(x, dtype): + y = np.array([1 * x[0] + 2 * x[1] + 3 * x[2], + 4 * x[0] + 5 * x[1] + 6 * x[2]], dtype=dtype) + if len(x.shape) == 2: + y = y.reshape(-1, 1) + return y + + def rmv(x, dtype): + return np.array([1 * x[0] + 4 * x[1], + 2 * x[0] + 5 * x[1], + 3 * x[0] + 6 * x[1]], dtype=dtype) + + class BaseMatlike(interface.LinearOperator): + def __init__(self, dtype): + self.dtype = np.dtype(dtype) + self.shape = (2,3) + + def _matvec(self, x): + return mv(x, self.dtype) + + class HasRmatvec(BaseMatlike): + def _rmatvec(self,x): + return rmv(x, self.dtype) + + class HasAdjoint(BaseMatlike): + def _adjoint(self): + shape = self.shape[1], self.shape[0] + matvec = partial(rmv, dtype=self.dtype) + rmatvec = partial(mv, dtype=self.dtype) + return interface.LinearOperator(matvec=matvec, + rmatvec=rmatvec, + dtype=self.dtype, + shape=shape) + + self.cases.append(HasRmatvec(dtype)) + self.cases.append(HasAdjoint(dtype)) + + make_cases('int32') + make_cases('float32') + make_cases('float64') + + def test_basic(self): + + for M in self.cases: + A = interface.aslinearoperator(M) + M,N = A.shape + + assert_equal(A.matvec(np.array([1,2,3])), [14,32]) + assert_equal(A.matvec(np.array([[1],[2],[3]])), [[14],[32]]) + + assert_equal(A * np.array([1,2,3]), [14,32]) + assert_equal(A * np.array([[1],[2],[3]]), [[14],[32]]) + + assert_equal(A.rmatvec(np.array([1,2])), [9,12,15]) + assert_equal(A.rmatvec(np.array([[1],[2]])), [[9],[12],[15]]) + assert_equal(A.H.matvec(np.array([1,2])), [9,12,15]) + assert_equal(A.H.matvec(np.array([[1],[2]])), [[9],[12],[15]]) + + assert_equal( + A.matmat(np.array([[1,4],[2,5],[3,6]])), + [[14,32],[32,77]]) + + assert_equal(A * np.array([[1,4],[2,5],[3,6]]), [[14,32],[32,77]]) + + if hasattr(M,'dtype'): + assert_equal(A.dtype, M.dtype) + + def test_dot(self): + + for M in self.cases: + A = interface.aslinearoperator(M) + M,N = A.shape + + assert_equal(A.dot(np.array([1,2,3])), [14,32]) + assert_equal(A.dot(np.array([[1],[2],[3]])), [[14],[32]]) + + assert_equal( + A.dot(np.array([[1,4],[2,5],[3,6]])), + [[14,32],[32,77]]) + + +def test_repr(): + A = interface.LinearOperator(shape=(1, 1), matvec=lambda x: 1) + repr_A = repr(A) + assert_('unspecified dtype' not in repr_A, repr_A) + + +def test_identity(): + ident = interface.IdentityOperator((3, 3)) + assert_equal(ident * [1, 2, 3], [1, 2, 3]) + assert_equal(ident.dot(np.arange(9).reshape(3, 3)).ravel(), np.arange(9)) + + assert_raises(ValueError, ident.matvec, [1, 2, 3, 4]) + + +def test_attributes(): + A = interface.aslinearoperator(np.arange(16).reshape(4, 4)) + + def always_four_ones(x): + x = np.asarray(x) + assert_(x.shape == (3,) or x.shape == (3, 1)) + return np.ones(4) + + B = interface.LinearOperator(shape=(4, 3), matvec=always_four_ones) + + for op in [A, B, A * B, A.H, A + A, B + B, A ** 4]: + assert_(hasattr(op, "dtype")) + assert_(hasattr(op, "shape")) + assert_(hasattr(op, "_matvec")) + +def matvec(x): + """ Needed for test_pickle as local functions are not pickleable """ + return np.zeros(3) + +def test_pickle(): + import pickle + + for protocol in range(pickle.HIGHEST_PROTOCOL + 1): + A = interface.LinearOperator((3, 3), matvec) + s = pickle.dumps(A, protocol=protocol) + B = pickle.loads(s) + + for k in A.__dict__: + assert_equal(getattr(A, k), getattr(B, k)) + +def test_inheritance(): + class Empty(interface.LinearOperator): + pass + + with warns(RuntimeWarning, match="should implement at least"): + assert_raises(TypeError, Empty) + + class Identity(interface.LinearOperator): + def __init__(self, n): + super(Identity, self).__init__(dtype=None, shape=(n, n)) + + def _matvec(self, x): + return x + + id3 = Identity(3) + assert_equal(id3.matvec([1, 2, 3]), [1, 2, 3]) + assert_raises(NotImplementedError, id3.rmatvec, [4, 5, 6]) + + class MatmatOnly(interface.LinearOperator): + def __init__(self, A): + super(MatmatOnly, self).__init__(A.dtype, A.shape) + self.A = A + + def _matmat(self, x): + return self.A.dot(x) + + mm = MatmatOnly(np.random.randn(5, 3)) + assert_equal(mm.matvec(np.random.randn(3)).shape, (5,)) + +def test_dtypes_of_operator_sum(): + # gh-6078 + + mat_complex = np.random.rand(2,2) + 1j * np.random.rand(2,2) + mat_real = np.random.rand(2,2) + + complex_operator = interface.aslinearoperator(mat_complex) + real_operator = interface.aslinearoperator(mat_real) + + sum_complex = complex_operator + complex_operator + sum_real = real_operator + real_operator + + assert_equal(sum_real.dtype, np.float64) + assert_equal(sum_complex.dtype, np.complex128) + +def test_no_double_init(): + call_count = [0] + + def matvec(v): + call_count[0] += 1 + return v + + # It should call matvec exactly once (in order to determine the + # operator dtype) + A = interface.LinearOperator((2, 2), matvec=matvec) + assert_equal(call_count[0], 1) + +def test_adjoint_conjugate(): + X = np.array([[1j]]) + A = interface.aslinearoperator(X) + + B = 1j * A + Y = 1j * X + + v = np.array([1]) + + assert_equal(B.dot(v), Y.dot(v)) + assert_equal(B.H.dot(v), Y.T.conj().dot(v)) diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/tests/test_interface.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/tests/test_interface.pyc new file mode 100644 index 0000000..f7961c5 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/tests/test_interface.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/tests/test_matfuncs.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/tests/test_matfuncs.py new file mode 100644 index 0000000..c5ba36c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/tests/test_matfuncs.py @@ -0,0 +1,556 @@ +# +# Created by: Pearu Peterson, March 2002 +# +""" Test functions for scipy.linalg.matfuncs module + +""" +from __future__ import division, print_function, absolute_import + +import math + +import numpy as np +from numpy import array, eye, exp, random +from numpy.linalg import matrix_power +from numpy.testing import ( + assert_allclose, assert_, assert_array_almost_equal, assert_equal, + assert_array_almost_equal_nulp) +from scipy._lib._numpy_compat import suppress_warnings + +from scipy.sparse import csc_matrix, SparseEfficiencyWarning +from scipy.sparse.construct import eye as speye +from scipy.sparse.linalg.matfuncs import (expm, _expm, + ProductOperator, MatrixPowerOperator, + _onenorm_matrix_power_nnm) +from scipy.linalg import logm +from scipy.special import factorial, binom +import scipy.sparse +import scipy.sparse.linalg + + +def _burkardt_13_power(n, p): + """ + A helper function for testing matrix functions. + + Parameters + ---------- + n : integer greater than 1 + Order of the square matrix to be returned. + p : non-negative integer + Power of the matrix. + + Returns + ------- + out : ndarray representing a square matrix + A Forsythe matrix of order n, raised to the power p. + + """ + # Input validation. + if n != int(n) or n < 2: + raise ValueError('n must be an integer greater than 1') + n = int(n) + if p != int(p) or p < 0: + raise ValueError('p must be a non-negative integer') + p = int(p) + + # Construct the matrix explicitly. + a, b = divmod(p, n) + large = np.power(10.0, -n*a) + small = large * np.power(10.0, -n) + return np.diag([large]*(n-b), b) + np.diag([small]*b, b-n) + + +def test_onenorm_matrix_power_nnm(): + np.random.seed(1234) + for n in range(1, 5): + for p in range(5): + M = np.random.random((n, n)) + Mp = np.linalg.matrix_power(M, p) + observed = _onenorm_matrix_power_nnm(M, p) + expected = np.linalg.norm(Mp, 1) + assert_allclose(observed, expected) + + +class TestExpM(object): + def test_zero_ndarray(self): + a = array([[0.,0],[0,0]]) + assert_array_almost_equal(expm(a),[[1,0],[0,1]]) + + def test_zero_sparse(self): + a = csc_matrix([[0.,0],[0,0]]) + assert_array_almost_equal(expm(a).toarray(),[[1,0],[0,1]]) + + def test_zero_matrix(self): + a = np.matrix([[0.,0],[0,0]]) + assert_array_almost_equal(expm(a),[[1,0],[0,1]]) + + def test_misc_types(self): + A = expm(np.array([[1]])) + assert_allclose(expm(((1,),)), A) + assert_allclose(expm([[1]]), A) + assert_allclose(expm(np.matrix([[1]])), A) + assert_allclose(expm(np.array([[1]])), A) + assert_allclose(expm(csc_matrix([[1]])).A, A) + B = expm(np.array([[1j]])) + assert_allclose(expm(((1j,),)), B) + assert_allclose(expm([[1j]]), B) + assert_allclose(expm(np.matrix([[1j]])), B) + assert_allclose(expm(csc_matrix([[1j]])).A, B) + + def test_bidiagonal_sparse(self): + A = csc_matrix([ + [1, 3, 0], + [0, 1, 5], + [0, 0, 2]], dtype=float) + e1 = math.exp(1) + e2 = math.exp(2) + expected = np.array([ + [e1, 3*e1, 15*(e2 - 2*e1)], + [0, e1, 5*(e2 - e1)], + [0, 0, e2]], dtype=float) + observed = expm(A).toarray() + assert_array_almost_equal(observed, expected) + + def test_padecases_dtype_float(self): + for dtype in [np.float32, np.float64]: + for scale in [1e-2, 1e-1, 5e-1, 1, 10]: + A = scale * eye(3, dtype=dtype) + observed = expm(A) + expected = exp(scale) * eye(3, dtype=dtype) + assert_array_almost_equal_nulp(observed, expected, nulp=100) + + def test_padecases_dtype_complex(self): + for dtype in [np.complex64, np.complex128]: + for scale in [1e-2, 1e-1, 5e-1, 1, 10]: + A = scale * eye(3, dtype=dtype) + observed = expm(A) + expected = exp(scale) * eye(3, dtype=dtype) + assert_array_almost_equal_nulp(observed, expected, nulp=100) + + def test_padecases_dtype_sparse_float(self): + # float32 and complex64 lead to errors in spsolve/UMFpack + dtype = np.float64 + for scale in [1e-2, 1e-1, 5e-1, 1, 10]: + a = scale * speye(3, 3, dtype=dtype, format='csc') + e = exp(scale) * eye(3, dtype=dtype) + with suppress_warnings() as sup: + sup.filter(SparseEfficiencyWarning, + "Changing the sparsity structure of a csc_matrix is expensive.") + exact_onenorm = _expm(a, use_exact_onenorm=True).toarray() + inexact_onenorm = _expm(a, use_exact_onenorm=False).toarray() + assert_array_almost_equal_nulp(exact_onenorm, e, nulp=100) + assert_array_almost_equal_nulp(inexact_onenorm, e, nulp=100) + + def test_padecases_dtype_sparse_complex(self): + # float32 and complex64 lead to errors in spsolve/UMFpack + dtype = np.complex128 + for scale in [1e-2, 1e-1, 5e-1, 1, 10]: + a = scale * speye(3, 3, dtype=dtype, format='csc') + e = exp(scale) * eye(3, dtype=dtype) + with suppress_warnings() as sup: + sup.filter(SparseEfficiencyWarning, + "Changing the sparsity structure of a csc_matrix is expensive.") + assert_array_almost_equal_nulp(expm(a).toarray(), e, nulp=100) + + def test_logm_consistency(self): + random.seed(1234) + for dtype in [np.float64, np.complex128]: + for n in range(1, 10): + for scale in [1e-4, 1e-3, 1e-2, 1e-1, 1, 1e1, 1e2]: + # make logm(A) be of a given scale + A = (eye(n) + random.rand(n, n) * scale).astype(dtype) + if np.iscomplexobj(A): + A = A + 1j * random.rand(n, n) * scale + assert_array_almost_equal(expm(logm(A)), A) + + def test_integer_matrix(self): + Q = np.array([ + [-3, 1, 1, 1], + [1, -3, 1, 1], + [1, 1, -3, 1], + [1, 1, 1, -3]]) + assert_allclose(expm(Q), expm(1.0 * Q)) + + def test_integer_matrix_2(self): + # Check for integer overflows + Q = np.array([[-500, 500, 0, 0], + [0, -550, 360, 190], + [0, 630, -630, 0], + [0, 0, 0, 0]], dtype=np.int16) + assert_allclose(expm(Q), expm(1.0 * Q)) + + Q = csc_matrix(Q) + assert_allclose(expm(Q).A, expm(1.0 * Q).A) + + def test_triangularity_perturbation(self): + # Experiment (1) of + # Awad H. Al-Mohy and Nicholas J. Higham (2012) + # Improved Inverse Scaling and Squaring Algorithms + # for the Matrix Logarithm. + A = np.array([ + [3.2346e-1, 3e4, 3e4, 3e4], + [0, 3.0089e-1, 3e4, 3e4], + [0, 0, 3.221e-1, 3e4], + [0, 0, 0, 3.0744e-1]], + dtype=float) + A_logm = np.array([ + [-1.12867982029050462e+00, 9.61418377142025565e+04, + -4.52485573953179264e+09, 2.92496941103871812e+14], + [0.00000000000000000e+00, -1.20101052953082288e+00, + 9.63469687211303099e+04, -4.68104828911105442e+09], + [0.00000000000000000e+00, 0.00000000000000000e+00, + -1.13289322264498393e+00, 9.53249183094775653e+04], + [0.00000000000000000e+00, 0.00000000000000000e+00, + 0.00000000000000000e+00, -1.17947533272554850e+00]], + dtype=float) + assert_allclose(expm(A_logm), A, rtol=1e-4) + + # Perturb the upper triangular matrix by tiny amounts, + # so that it becomes technically not upper triangular. + random.seed(1234) + tiny = 1e-17 + A_logm_perturbed = A_logm.copy() + A_logm_perturbed[1, 0] = tiny + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "Ill-conditioned.*") + A_expm_logm_perturbed = expm(A_logm_perturbed) + rtol = 1e-4 + atol = 100 * tiny + assert_(not np.allclose(A_expm_logm_perturbed, A, rtol=rtol, atol=atol)) + + def test_burkardt_1(self): + # This matrix is diagonal. + # The calculation of the matrix exponential is simple. + # + # This is the first of a series of matrix exponential tests + # collected by John Burkardt from the following sources. + # + # Alan Laub, + # Review of "Linear System Theory" by Joao Hespanha, + # SIAM Review, + # Volume 52, Number 4, December 2010, pages 779--781. + # + # Cleve Moler and Charles Van Loan, + # Nineteen Dubious Ways to Compute the Exponential of a Matrix, + # Twenty-Five Years Later, + # SIAM Review, + # Volume 45, Number 1, March 2003, pages 3--49. + # + # Cleve Moler, + # Cleve's Corner: A Balancing Act for the Matrix Exponential, + # 23 July 2012. + # + # Robert Ward, + # Numerical computation of the matrix exponential + # with accuracy estimate, + # SIAM Journal on Numerical Analysis, + # Volume 14, Number 4, September 1977, pages 600--610. + exp1 = np.exp(1) + exp2 = np.exp(2) + A = np.array([ + [1, 0], + [0, 2], + ], dtype=float) + desired = np.array([ + [exp1, 0], + [0, exp2], + ], dtype=float) + actual = expm(A) + assert_allclose(actual, desired) + + def test_burkardt_2(self): + # This matrix is symmetric. + # The calculation of the matrix exponential is straightforward. + A = np.array([ + [1, 3], + [3, 2], + ], dtype=float) + desired = np.array([ + [39.322809708033859, 46.166301438885753], + [46.166301438885768, 54.711576854329110], + ], dtype=float) + actual = expm(A) + assert_allclose(actual, desired) + + def test_burkardt_3(self): + # This example is due to Laub. + # This matrix is ill-suited for the Taylor series approach. + # As powers of A are computed, the entries blow up too quickly. + exp1 = np.exp(1) + exp39 = np.exp(39) + A = np.array([ + [0, 1], + [-39, -40], + ], dtype=float) + desired = np.array([ + [ + 39/(38*exp1) - 1/(38*exp39), + -np.expm1(-38) / (38*exp1)], + [ + 39*np.expm1(-38) / (38*exp1), + -1/(38*exp1) + 39/(38*exp39)], + ], dtype=float) + actual = expm(A) + assert_allclose(actual, desired) + + def test_burkardt_4(self): + # This example is due to Moler and Van Loan. + # The example will cause problems for the series summation approach, + # as well as for diagonal Pade approximations. + A = np.array([ + [-49, 24], + [-64, 31], + ], dtype=float) + U = np.array([[3, 1], [4, 2]], dtype=float) + V = np.array([[1, -1/2], [-2, 3/2]], dtype=float) + w = np.array([-17, -1], dtype=float) + desired = np.dot(U * np.exp(w), V) + actual = expm(A) + assert_allclose(actual, desired) + + def test_burkardt_5(self): + # This example is due to Moler and Van Loan. + # This matrix is strictly upper triangular + # All powers of A are zero beyond some (low) limit. + # This example will cause problems for Pade approximations. + A = np.array([ + [0, 6, 0, 0], + [0, 0, 6, 0], + [0, 0, 0, 6], + [0, 0, 0, 0], + ], dtype=float) + desired = np.array([ + [1, 6, 18, 36], + [0, 1, 6, 18], + [0, 0, 1, 6], + [0, 0, 0, 1], + ], dtype=float) + actual = expm(A) + assert_allclose(actual, desired) + + def test_burkardt_6(self): + # This example is due to Moler and Van Loan. + # This matrix does not have a complete set of eigenvectors. + # That means the eigenvector approach will fail. + exp1 = np.exp(1) + A = np.array([ + [1, 1], + [0, 1], + ], dtype=float) + desired = np.array([ + [exp1, exp1], + [0, exp1], + ], dtype=float) + actual = expm(A) + assert_allclose(actual, desired) + + def test_burkardt_7(self): + # This example is due to Moler and Van Loan. + # This matrix is very close to example 5. + # Mathematically, it has a complete set of eigenvectors. + # Numerically, however, the calculation will be suspect. + exp1 = np.exp(1) + eps = np.spacing(1) + A = np.array([ + [1 + eps, 1], + [0, 1 - eps], + ], dtype=float) + desired = np.array([ + [exp1, exp1], + [0, exp1], + ], dtype=float) + actual = expm(A) + assert_allclose(actual, desired) + + def test_burkardt_8(self): + # This matrix was an example in Wikipedia. + exp4 = np.exp(4) + exp16 = np.exp(16) + A = np.array([ + [21, 17, 6], + [-5, -1, -6], + [4, 4, 16], + ], dtype=float) + desired = np.array([ + [13*exp16 - exp4, 13*exp16 - 5*exp4, 2*exp16 - 2*exp4], + [-9*exp16 + exp4, -9*exp16 + 5*exp4, -2*exp16 + 2*exp4], + [16*exp16, 16*exp16, 4*exp16], + ], dtype=float) * 0.25 + actual = expm(A) + assert_allclose(actual, desired) + + def test_burkardt_9(self): + # This matrix is due to the NAG Library. + # It is an example for function F01ECF. + A = np.array([ + [1, 2, 2, 2], + [3, 1, 1, 2], + [3, 2, 1, 2], + [3, 3, 3, 1], + ], dtype=float) + desired = np.array([ + [740.7038, 610.8500, 542.2743, 549.1753], + [731.2510, 603.5524, 535.0884, 542.2743], + [823.7630, 679.4257, 603.5524, 610.8500], + [998.4355, 823.7630, 731.2510, 740.7038], + ], dtype=float) + actual = expm(A) + assert_allclose(actual, desired) + + def test_burkardt_10(self): + # This is Ward's example #1. + # It is defective and nonderogatory. + A = np.array([ + [4, 2, 0], + [1, 4, 1], + [1, 1, 4], + ], dtype=float) + assert_allclose(sorted(scipy.linalg.eigvals(A)), (3, 3, 6)) + desired = np.array([ + [147.8666224463699, 183.7651386463682, 71.79703239999647], + [127.7810855231823, 183.7651386463682, 91.88256932318415], + [127.7810855231824, 163.6796017231806, 111.9681062463718], + ], dtype=float) + actual = expm(A) + assert_allclose(actual, desired) + + def test_burkardt_11(self): + # This is Ward's example #2. + # It is a symmetric matrix. + A = np.array([ + [29.87942128909879, 0.7815750847907159, -2.289519314033932], + [0.7815750847907159, 25.72656945571064, 8.680737820540137], + [-2.289519314033932, 8.680737820540137, 34.39400925519054], + ], dtype=float) + assert_allclose(scipy.linalg.eigvalsh(A), (20, 30, 40)) + desired = np.array([ + [ + 5.496313853692378E+15, + -1.823188097200898E+16, + -3.047577080858001E+16], + [ + -1.823188097200899E+16, + 6.060522870222108E+16, + 1.012918429302482E+17], + [ + -3.047577080858001E+16, + 1.012918429302482E+17, + 1.692944112408493E+17], + ], dtype=float) + actual = expm(A) + assert_allclose(actual, desired) + + def test_burkardt_12(self): + # This is Ward's example #3. + # Ward's algorithm has difficulty estimating the accuracy + # of its results. + A = np.array([ + [-131, 19, 18], + [-390, 56, 54], + [-387, 57, 52], + ], dtype=float) + assert_allclose(sorted(scipy.linalg.eigvals(A)), (-20, -2, -1)) + desired = np.array([ + [-1.509644158793135, 0.3678794391096522, 0.1353352811751005], + [-5.632570799891469, 1.471517758499875, 0.4060058435250609], + [-4.934938326088363, 1.103638317328798, 0.5413411267617766], + ], dtype=float) + actual = expm(A) + assert_allclose(actual, desired) + + def test_burkardt_13(self): + # This is Ward's example #4. + # This is a version of the Forsythe matrix. + # The eigenvector problem is badly conditioned. + # Ward's algorithm has difficulty esimating the accuracy + # of its results for this problem. + # + # Check the construction of one instance of this family of matrices. + A4_actual = _burkardt_13_power(4, 1) + A4_desired = [[0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + [1e-4, 0, 0, 0]] + assert_allclose(A4_actual, A4_desired) + # Check the expm for a few instances. + for n in (2, 3, 4, 10): + # Approximate expm using Taylor series. + # This works well for this matrix family + # because each matrix in the summation, + # even before dividing by the factorial, + # is entrywise positive with max entry 10**(-floor(p/n)*n). + k = max(1, int(np.ceil(16/n))) + desired = np.zeros((n, n), dtype=float) + for p in range(n*k): + Ap = _burkardt_13_power(n, p) + assert_equal(np.min(Ap), 0) + assert_allclose(np.max(Ap), np.power(10, -np.floor(p/n)*n)) + desired += Ap / factorial(p) + actual = expm(_burkardt_13_power(n, 1)) + assert_allclose(actual, desired) + + def test_burkardt_14(self): + # This is Moler's example. + # This badly scaled matrix caused problems for MATLAB's expm(). + A = np.array([ + [0, 1e-8, 0], + [-(2e10 + 4e8/6.), -3, 2e10], + [200./3., 0, -200./3.], + ], dtype=float) + desired = np.array([ + [0.446849468283175, 1.54044157383952e-09, 0.462811453558774], + [-5743067.77947947, -0.0152830038686819, -4526542.71278401], + [0.447722977849494, 1.54270484519591e-09, 0.463480648837651], + ], dtype=float) + actual = expm(A) + assert_allclose(actual, desired) + + def test_pascal(self): + # Test pascal triangle. + # Nilpotent exponential, used to trigger a failure (gh-8029) + + for scale in [1.0, 1e-3, 1e-6]: + for n in range(120): + A = np.diag(np.arange(1, n + 1), -1) * scale + B = expm(A) + + sc = scale**np.arange(n, -1, -1) + if np.any(sc < 1e-300): + continue + + got = B + expected = binom(np.arange(n + 1)[:,None], + np.arange(n + 1)[None,:]) * sc[None,:] / sc[:,None] + err = abs(expected - got).max() + atol = 1e-13 * abs(expected).max() + assert_allclose(got, expected, atol=atol) + + +class TestOperators(object): + + def test_product_operator(self): + random.seed(1234) + n = 5 + k = 2 + nsamples = 10 + for i in range(nsamples): + A = np.random.randn(n, n) + B = np.random.randn(n, n) + C = np.random.randn(n, n) + D = np.random.randn(n, k) + op = ProductOperator(A, B, C) + assert_allclose(op.matmat(D), A.dot(B).dot(C).dot(D)) + assert_allclose(op.T.matmat(D), (A.dot(B).dot(C)).T.dot(D)) + + def test_matrix_power_operator(self): + random.seed(1234) + n = 5 + k = 2 + p = 3 + nsamples = 10 + for i in range(nsamples): + A = np.random.randn(n, n) + B = np.random.randn(n, k) + op = MatrixPowerOperator(A, p) + assert_allclose(op.matmat(B), matrix_power(A, p).dot(B)) + assert_allclose(op.T.matmat(B), matrix_power(A, p).T.dot(B)) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/tests/test_matfuncs.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/tests/test_matfuncs.pyc new file mode 100644 index 0000000..0d76e80 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/tests/test_matfuncs.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/tests/test_norm.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/tests/test_norm.py new file mode 100644 index 0000000..fb4c5bd --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/tests/test_norm.py @@ -0,0 +1,127 @@ +"""Test functions for the sparse.linalg.norm module +""" + +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.linalg import norm as npnorm +from numpy.testing import assert_equal, assert_allclose +from pytest import raises as assert_raises + +from scipy._lib._version import NumpyVersion +import scipy.sparse +from scipy.sparse.linalg import norm as spnorm + + +class TestNorm(object): + def setup_method(self): + a = np.arange(9) - 4 + b = a.reshape((3, 3)) + self.b = scipy.sparse.csr_matrix(b) + + def test_matrix_norm(self): + + # Frobenius norm is the default + assert_allclose(spnorm(self.b), 7.745966692414834) + assert_allclose(spnorm(self.b, 'fro'), 7.745966692414834) + + assert_allclose(spnorm(self.b, np.inf), 9) + assert_allclose(spnorm(self.b, -np.inf), 2) + assert_allclose(spnorm(self.b, 1), 7) + assert_allclose(spnorm(self.b, -1), 6) + + # _multi_svd_norm is not implemented for sparse matrix + assert_raises(NotImplementedError, spnorm, self.b, 2) + assert_raises(NotImplementedError, spnorm, self.b, -2) + + def test_matrix_norm_axis(self): + for m, axis in ((self.b, None), (self.b, (0, 1)), (self.b.T, (1, 0))): + assert_allclose(spnorm(m, axis=axis), 7.745966692414834) + assert_allclose(spnorm(m, 'fro', axis=axis), 7.745966692414834) + assert_allclose(spnorm(m, np.inf, axis=axis), 9) + assert_allclose(spnorm(m, -np.inf, axis=axis), 2) + assert_allclose(spnorm(m, 1, axis=axis), 7) + assert_allclose(spnorm(m, -1, axis=axis), 6) + + def test_vector_norm(self): + v = [4.5825756949558398, 4.2426406871192848, 4.5825756949558398] + for m, a in (self.b, 0), (self.b.T, 1): + for axis in a, (a, ), a-2, (a-2, ): + assert_allclose(spnorm(m, 1, axis=axis), [7, 6, 7]) + assert_allclose(spnorm(m, np.inf, axis=axis), [4, 3, 4]) + assert_allclose(spnorm(m, axis=axis), v) + assert_allclose(spnorm(m, ord=2, axis=axis), v) + assert_allclose(spnorm(m, ord=None, axis=axis), v) + + def test_norm_exceptions(self): + m = self.b + assert_raises(TypeError, spnorm, m, None, 1.5) + assert_raises(TypeError, spnorm, m, None, [2]) + assert_raises(ValueError, spnorm, m, None, ()) + assert_raises(ValueError, spnorm, m, None, (0, 1, 2)) + assert_raises(ValueError, spnorm, m, None, (0, 0)) + assert_raises(ValueError, spnorm, m, None, (0, 2)) + assert_raises(ValueError, spnorm, m, None, (-3, 0)) + assert_raises(ValueError, spnorm, m, None, 2) + assert_raises(ValueError, spnorm, m, None, -3) + assert_raises(ValueError, spnorm, m, 'plate_of_shrimp', 0) + assert_raises(ValueError, spnorm, m, 'plate_of_shrimp', (0, 1)) + + +class TestVsNumpyNorm(object): + _sparse_types = ( + scipy.sparse.bsr_matrix, + scipy.sparse.coo_matrix, + scipy.sparse.csc_matrix, + scipy.sparse.csr_matrix, + scipy.sparse.dia_matrix, + scipy.sparse.dok_matrix, + scipy.sparse.lil_matrix, + ) + _test_matrices = ( + (np.arange(9) - 4).reshape((3, 3)), + [ + [1, 2, 3], + [-1, 1, 4]], + [ + [1, 0, 3], + [-1, 1, 4j]], + ) + + def test_sparse_matrix_norms(self): + for sparse_type in self._sparse_types: + for M in self._test_matrices: + S = sparse_type(M) + assert_allclose(spnorm(S), npnorm(M)) + assert_allclose(spnorm(S, 'fro'), npnorm(M, 'fro')) + assert_allclose(spnorm(S, np.inf), npnorm(M, np.inf)) + assert_allclose(spnorm(S, -np.inf), npnorm(M, -np.inf)) + assert_allclose(spnorm(S, 1), npnorm(M, 1)) + assert_allclose(spnorm(S, -1), npnorm(M, -1)) + + def test_sparse_matrix_norms_with_axis(self): + for sparse_type in self._sparse_types: + for M in self._test_matrices: + S = sparse_type(M) + for axis in None, (0, 1), (1, 0): + assert_allclose(spnorm(S, axis=axis), npnorm(M, axis=axis)) + for ord in 'fro', np.inf, -np.inf, 1, -1: + assert_allclose(spnorm(S, ord, axis=axis), + npnorm(M, ord, axis=axis)) + # Some numpy matrix norms are allergic to negative axes. + for axis in (-2, -1), (-1, -2), (1, -2): + assert_allclose(spnorm(S, axis=axis), npnorm(M, axis=axis)) + assert_allclose(spnorm(S, 'f', axis=axis), + npnorm(M, 'f', axis=axis)) + assert_allclose(spnorm(S, 'fro', axis=axis), + npnorm(M, 'fro', axis=axis)) + + def test_sparse_vector_norms(self): + for sparse_type in self._sparse_types: + for M in self._test_matrices: + S = sparse_type(M) + for axis in (0, 1, -1, -2, (0, ), (1, ), (-1, ), (-2, )): + assert_allclose(spnorm(S, axis=axis), npnorm(M, axis=axis)) + for ord in None, 2, np.inf, -np.inf, 1, 0.5, 0.42: + assert_allclose(spnorm(S, ord, axis=axis), + npnorm(M, ord, axis=axis)) diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/tests/test_norm.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/tests/test_norm.pyc new file mode 100644 index 0000000..d56bf9c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/tests/test_norm.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/tests/test_onenormest.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/tests/test_onenormest.py new file mode 100644 index 0000000..48cb4a3 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/tests/test_onenormest.py @@ -0,0 +1,254 @@ +"""Test functions for the sparse.linalg._onenormest module +""" + +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import assert_allclose, assert_equal, assert_ +import pytest +import scipy.linalg +import scipy.sparse.linalg +from scipy.sparse.linalg._onenormest import _onenormest_core, _algorithm_2_2 + + +class MatrixProductOperator(scipy.sparse.linalg.LinearOperator): + """ + This is purely for onenormest testing. + """ + + def __init__(self, A, B): + if A.ndim != 2 or B.ndim != 2: + raise ValueError('expected ndarrays representing matrices') + if A.shape[1] != B.shape[0]: + raise ValueError('incompatible shapes') + self.A = A + self.B = B + self.ndim = 2 + self.shape = (A.shape[0], B.shape[1]) + + def _matvec(self, x): + return np.dot(self.A, np.dot(self.B, x)) + + def _rmatvec(self, x): + return np.dot(np.dot(x, self.A), self.B) + + def _matmat(self, X): + return np.dot(self.A, np.dot(self.B, X)) + + @property + def T(self): + return MatrixProductOperator(self.B.T, self.A.T) + + +class TestOnenormest(object): + + @pytest.mark.xslow + def test_onenormest_table_3_t_2(self): + # This will take multiple seconds if your computer is slow like mine. + # It is stochastic, so the tolerance could be too strict. + np.random.seed(1234) + t = 2 + n = 100 + itmax = 5 + nsamples = 5000 + observed = [] + expected = [] + nmult_list = [] + nresample_list = [] + for i in range(nsamples): + A = scipy.linalg.inv(np.random.randn(n, n)) + est, v, w, nmults, nresamples = _onenormest_core(A, A.T, t, itmax) + observed.append(est) + expected.append(scipy.linalg.norm(A, 1)) + nmult_list.append(nmults) + nresample_list.append(nresamples) + observed = np.array(observed, dtype=float) + expected = np.array(expected, dtype=float) + relative_errors = np.abs(observed - expected) / expected + + # check the mean underestimation ratio + underestimation_ratio = observed / expected + assert_(0.99 < np.mean(underestimation_ratio) < 1.0) + + # check the max and mean required column resamples + assert_equal(np.max(nresample_list), 2) + assert_(0.05 < np.mean(nresample_list) < 0.2) + + # check the proportion of norms computed exactly correctly + nexact = np.count_nonzero(relative_errors < 1e-14) + proportion_exact = nexact / float(nsamples) + assert_(0.9 < proportion_exact < 0.95) + + # check the average number of matrix*vector multiplications + assert_(3.5 < np.mean(nmult_list) < 4.5) + + @pytest.mark.xslow + def test_onenormest_table_4_t_7(self): + # This will take multiple seconds if your computer is slow like mine. + # It is stochastic, so the tolerance could be too strict. + np.random.seed(1234) + t = 7 + n = 100 + itmax = 5 + nsamples = 5000 + observed = [] + expected = [] + nmult_list = [] + nresample_list = [] + for i in range(nsamples): + A = np.random.randint(-1, 2, size=(n, n)) + est, v, w, nmults, nresamples = _onenormest_core(A, A.T, t, itmax) + observed.append(est) + expected.append(scipy.linalg.norm(A, 1)) + nmult_list.append(nmults) + nresample_list.append(nresamples) + observed = np.array(observed, dtype=float) + expected = np.array(expected, dtype=float) + relative_errors = np.abs(observed - expected) / expected + + # check the mean underestimation ratio + underestimation_ratio = observed / expected + assert_(0.90 < np.mean(underestimation_ratio) < 0.99) + + # check the required column resamples + assert_equal(np.max(nresample_list), 0) + + # check the proportion of norms computed exactly correctly + nexact = np.count_nonzero(relative_errors < 1e-14) + proportion_exact = nexact / float(nsamples) + assert_(0.15 < proportion_exact < 0.25) + + # check the average number of matrix*vector multiplications + assert_(3.5 < np.mean(nmult_list) < 4.5) + + def test_onenormest_table_5_t_1(self): + # "note that there is no randomness and hence only one estimate for t=1" + t = 1 + n = 100 + itmax = 5 + alpha = 1 - 1e-6 + A = -scipy.linalg.inv(np.identity(n) + alpha*np.eye(n, k=1)) + first_col = np.array([1] + [0]*(n-1)) + first_row = np.array([(-alpha)**i for i in range(n)]) + B = -scipy.linalg.toeplitz(first_col, first_row) + assert_allclose(A, B) + est, v, w, nmults, nresamples = _onenormest_core(B, B.T, t, itmax) + exact_value = scipy.linalg.norm(B, 1) + underest_ratio = est / exact_value + assert_allclose(underest_ratio, 0.05, rtol=1e-4) + assert_equal(nmults, 11) + assert_equal(nresamples, 0) + # check the non-underscored version of onenormest + est_plain = scipy.sparse.linalg.onenormest(B, t=t, itmax=itmax) + assert_allclose(est, est_plain) + + @pytest.mark.xslow + def test_onenormest_table_6_t_1(self): + #TODO this test seems to give estimates that match the table, + #TODO even though no attempt has been made to deal with + #TODO complex numbers in the one-norm estimation. + # This will take multiple seconds if your computer is slow like mine. + # It is stochastic, so the tolerance could be too strict. + np.random.seed(1234) + t = 1 + n = 100 + itmax = 5 + nsamples = 5000 + observed = [] + expected = [] + nmult_list = [] + nresample_list = [] + for i in range(nsamples): + A_inv = np.random.rand(n, n) + 1j * np.random.rand(n, n) + A = scipy.linalg.inv(A_inv) + est, v, w, nmults, nresamples = _onenormest_core(A, A.T, t, itmax) + observed.append(est) + expected.append(scipy.linalg.norm(A, 1)) + nmult_list.append(nmults) + nresample_list.append(nresamples) + observed = np.array(observed, dtype=float) + expected = np.array(expected, dtype=float) + relative_errors = np.abs(observed - expected) / expected + + # check the mean underestimation ratio + underestimation_ratio = observed / expected + underestimation_ratio_mean = np.mean(underestimation_ratio) + assert_(0.90 < underestimation_ratio_mean < 0.99) + + # check the required column resamples + max_nresamples = np.max(nresample_list) + assert_equal(max_nresamples, 0) + + # check the proportion of norms computed exactly correctly + nexact = np.count_nonzero(relative_errors < 1e-14) + proportion_exact = nexact / float(nsamples) + assert_(0.7 < proportion_exact < 0.8) + + # check the average number of matrix*vector multiplications + mean_nmult = np.mean(nmult_list) + assert_(4 < mean_nmult < 5) + + def _help_product_norm_slow(self, A, B): + # for profiling + C = np.dot(A, B) + return scipy.linalg.norm(C, 1) + + def _help_product_norm_fast(self, A, B): + # for profiling + t = 2 + itmax = 5 + D = MatrixProductOperator(A, B) + est, v, w, nmults, nresamples = _onenormest_core(D, D.T, t, itmax) + return est + + @pytest.mark.slow + def test_onenormest_linear_operator(self): + # Define a matrix through its product A B. + # Depending on the shapes of A and B, + # it could be easy to multiply this product by a small matrix, + # but it could be annoying to look at all of + # the entries of the product explicitly. + np.random.seed(1234) + n = 6000 + k = 3 + A = np.random.randn(n, k) + B = np.random.randn(k, n) + fast_estimate = self._help_product_norm_fast(A, B) + exact_value = self._help_product_norm_slow(A, B) + assert_(fast_estimate <= exact_value <= 3*fast_estimate, + 'fast: %g\nexact:%g' % (fast_estimate, exact_value)) + + def test_returns(self): + np.random.seed(1234) + A = scipy.sparse.rand(50, 50, 0.1) + + s0 = scipy.linalg.norm(A.todense(), 1) + s1, v = scipy.sparse.linalg.onenormest(A, compute_v=True) + s2, w = scipy.sparse.linalg.onenormest(A, compute_w=True) + s3, v2, w2 = scipy.sparse.linalg.onenormest(A, compute_w=True, compute_v=True) + + assert_allclose(s1, s0, rtol=1e-9) + assert_allclose(np.linalg.norm(A.dot(v), 1), s0*np.linalg.norm(v, 1), rtol=1e-9) + assert_allclose(A.dot(v), w, rtol=1e-9) + + +class TestAlgorithm_2_2(object): + + def test_randn_inv(self): + np.random.seed(1234) + n = 20 + nsamples = 100 + for i in range(nsamples): + + # Choose integer t uniformly between 1 and 3 inclusive. + t = np.random.randint(1, 4) + + # Choose n uniformly between 10 and 40 inclusive. + n = np.random.randint(10, 41) + + # Sample the inverse of a matrix with random normal entries. + A = scipy.linalg.inv(np.random.randn(n, n)) + + # Compute the 1-norm bounds. + g, ind = _algorithm_2_2(A, A.T, t) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/tests/test_onenormest.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/tests/test_onenormest.pyc new file mode 100644 index 0000000..637c5b2 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/linalg/tests/test_onenormest.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/setup.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/setup.py new file mode 100644 index 0000000..61480e3 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/setup.py @@ -0,0 +1,64 @@ +from __future__ import division, print_function, absolute_import + +import os +import sys +import subprocess + + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + + config = Configuration('sparse',parent_package,top_path) + + config.add_data_dir('tests') + + config.add_subpackage('linalg') + config.add_subpackage('csgraph') + + config.add_extension('_csparsetools', + sources=['_csparsetools.c']) + + def get_sparsetools_sources(ext, build_dir): + # Defer generation of source files + subprocess.check_call([sys.executable, + os.path.join(os.path.dirname(__file__), + 'generate_sparsetools.py'), + '--no-force']) + return [] + + depends = ['sparsetools_impl.h', + 'bsr_impl.h', + 'csc_impl.h', + 'csr_impl.h', + 'other_impl.h', + 'bool_ops.h', + 'bsr.h', + 'complex_ops.h', + 'coo.h', + 'csc.h', + 'csgraph.h', + 'csr.h', + 'dense.h', + 'dia.h', + 'py3k.h', + 'sparsetools.h', + 'util.h'] + depends = [os.path.join('sparsetools', hdr) for hdr in depends], + config.add_extension('_sparsetools', + define_macros=[('__STDC_FORMAT_MACROS', 1)], + depends=depends, + include_dirs=['sparsetools'], + sources=[os.path.join('sparsetools', 'sparsetools.cxx'), + os.path.join('sparsetools', 'csr.cxx'), + os.path.join('sparsetools', 'csc.cxx'), + os.path.join('sparsetools', 'bsr.cxx'), + os.path.join('sparsetools', 'other.cxx'), + get_sparsetools_sources] + ) + + return config + + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(**configuration(top_path='').todict()) diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/setup.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/setup.pyc new file mode 100644 index 0000000..3d0fba0 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/sparsetools.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/sparsetools.py new file mode 100644 index 0000000..1b8a9af --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/sparsetools.py @@ -0,0 +1,27 @@ +""" +sparsetools is not a public module in scipy.sparse, but this file is +for backward compatibility if someone happens to use it. +""" +from numpy import deprecate + +# This file shouldn't be imported by scipy --- Scipy code should use +# internally scipy.sparse._sparsetools + + +@deprecate(old_name="scipy.sparse.sparsetools", + message=("scipy.sparse.sparsetools is a private module for scipy.sparse, " + "and should not be used.")) +def _deprecated(): + pass + + +del deprecate + +try: + _deprecated() +except DeprecationWarning as e: + # don't fail import if DeprecationWarnings raise error -- works around + # the situation with Numpy's test framework + pass + +from ._sparsetools import * diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/sparsetools.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/sparsetools.pyc new file mode 100644 index 0000000..182b01e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/sparsetools.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/spfuncs.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/spfuncs.py new file mode 100644 index 0000000..045afb7 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/spfuncs.py @@ -0,0 +1,100 @@ +""" Functions that operate on sparse matrices +""" + +from __future__ import division, print_function, absolute_import + +__all__ = ['count_blocks','estimate_blocksize'] + +from .csr import isspmatrix_csr, csr_matrix +from .csc import isspmatrix_csc +from ._sparsetools import csr_count_blocks + + +def extract_diagonal(A): + raise NotImplementedError('use .diagonal() instead') + +#def extract_diagonal(A): +# """extract_diagonal(A) returns the main diagonal of A.""" +# #TODO extract k-th diagonal +# if isspmatrix_csr(A) or isspmatrix_csc(A): +# fn = getattr(sparsetools, A.format + "_diagonal") +# y = empty( min(A.shape), dtype=upcast(A.dtype) ) +# fn(A.shape[0],A.shape[1],A.indptr,A.indices,A.data,y) +# return y +# elif isspmatrix_bsr(A): +# M,N = A.shape +# R,C = A.blocksize +# y = empty( min(M,N), dtype=upcast(A.dtype) ) +# fn = sparsetools.bsr_diagonal(M//R, N//C, R, C, \ +# A.indptr, A.indices, ravel(A.data), y) +# return y +# else: +# return extract_diagonal(csr_matrix(A)) + + +def estimate_blocksize(A,efficiency=0.7): + """Attempt to determine the blocksize of a sparse matrix + + Returns a blocksize=(r,c) such that + - A.nnz / A.tobsr( (r,c) ).nnz > efficiency + """ + if not (isspmatrix_csr(A) or isspmatrix_csc(A)): + A = csr_matrix(A) + + if A.nnz == 0: + return (1,1) + + if not 0 < efficiency < 1.0: + raise ValueError('efficiency must satisfy 0.0 < efficiency < 1.0') + + high_efficiency = (1.0 + efficiency) / 2.0 + nnz = float(A.nnz) + M,N = A.shape + + if M % 2 == 0 and N % 2 == 0: + e22 = nnz / (4 * count_blocks(A,(2,2))) + else: + e22 = 0.0 + + if M % 3 == 0 and N % 3 == 0: + e33 = nnz / (9 * count_blocks(A,(3,3))) + else: + e33 = 0.0 + + if e22 > high_efficiency and e33 > high_efficiency: + e66 = nnz / (36 * count_blocks(A,(6,6))) + if e66 > efficiency: + return (6,6) + else: + return (3,3) + else: + if M % 4 == 0 and N % 4 == 0: + e44 = nnz / (16 * count_blocks(A,(4,4))) + else: + e44 = 0.0 + + if e44 > efficiency: + return (4,4) + elif e33 > efficiency: + return (3,3) + elif e22 > efficiency: + return (2,2) + else: + return (1,1) + + +def count_blocks(A,blocksize): + """For a given blocksize=(r,c) count the number of occupied + blocks in a sparse matrix A + """ + r,c = blocksize + if r < 1 or c < 1: + raise ValueError('r and c must be positive') + + if isspmatrix_csr(A): + M,N = A.shape + return csr_count_blocks(M,N,r,c,A.indptr,A.indices) + elif isspmatrix_csc(A): + return count_blocks(A.T,(c,r)) + else: + return count_blocks(csr_matrix(A),blocksize) diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/spfuncs.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/spfuncs.pyc new file mode 100644 index 0000000..1a114b1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/spfuncs.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/sputils.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/sputils.py new file mode 100644 index 0000000..b528456 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/sputils.py @@ -0,0 +1,476 @@ +""" Utility functions for sparse matrix module +""" + +from __future__ import division, print_function, absolute_import + +import operator +import warnings +import numpy as np + +__all__ = ['upcast', 'getdtype', 'isscalarlike', 'isintlike', + 'isshape', 'issequence', 'isdense', 'ismatrix', 'get_sum_dtype'] + +supported_dtypes = ['bool', 'int8', 'uint8', 'short', 'ushort', 'intc', + 'uintc', 'longlong', 'ulonglong', 'single', 'double', + 'longdouble', 'csingle', 'cdouble', 'clongdouble'] +supported_dtypes = [np.typeDict[x] for x in supported_dtypes] + +_upcast_memo = {} + + +def upcast(*args): + """Returns the nearest supported sparse dtype for the + combination of one or more types. + + upcast(t0, t1, ..., tn) -> T where T is a supported dtype + + Examples + -------- + + >>> upcast('int32') + <type 'numpy.int32'> + >>> upcast('bool') + <type 'numpy.bool_'> + >>> upcast('int32','float32') + <type 'numpy.float64'> + >>> upcast('bool',complex,float) + <type 'numpy.complex128'> + + """ + + t = _upcast_memo.get(hash(args)) + if t is not None: + return t + + upcast = np.find_common_type(args, []) + + for t in supported_dtypes: + if np.can_cast(upcast, t): + _upcast_memo[hash(args)] = t + return t + + raise TypeError('no supported conversion for types: %r' % (args,)) + + +def upcast_char(*args): + """Same as `upcast` but taking dtype.char as input (faster).""" + t = _upcast_memo.get(args) + if t is not None: + return t + t = upcast(*map(np.dtype, args)) + _upcast_memo[args] = t + return t + + +def upcast_scalar(dtype, scalar): + """Determine data type for binary operation between an array of + type `dtype` and a scalar. + """ + return (np.array([0], dtype=dtype) * scalar).dtype + + +def downcast_intp_index(arr): + """ + Down-cast index array to np.intp dtype if it is of a larger dtype. + + Raise an error if the array contains a value that is too large for + intp. + """ + if arr.dtype.itemsize > np.dtype(np.intp).itemsize: + if arr.size == 0: + return arr.astype(np.intp) + maxval = arr.max() + minval = arr.min() + if maxval > np.iinfo(np.intp).max or minval < np.iinfo(np.intp).min: + raise ValueError("Cannot deal with arrays with indices larger " + "than the machine maximum address size " + "(e.g. 64-bit indices on 32-bit machine).") + return arr.astype(np.intp) + return arr + + +def to_native(A): + return np.asarray(A, dtype=A.dtype.newbyteorder('native')) + + +def getdtype(dtype, a=None, default=None): + """Function used to simplify argument processing. If 'dtype' is not + specified (is None), returns a.dtype; otherwise returns a np.dtype + object created from the specified dtype argument. If 'dtype' and 'a' + are both None, construct a data type out of the 'default' parameter. + Furthermore, 'dtype' must be in 'allowed' set. + """ + # TODO is this really what we want? + if dtype is None: + try: + newdtype = a.dtype + except AttributeError: + if default is not None: + newdtype = np.dtype(default) + else: + raise TypeError("could not interpret data type") + else: + newdtype = np.dtype(dtype) + if newdtype == np.object_: + warnings.warn("object dtype is not supported by sparse matrices") + + return newdtype + + +def get_index_dtype(arrays=(), maxval=None, check_contents=False): + """ + Based on input (integer) arrays `a`, determine a suitable index data + type that can hold the data in the arrays. + + Parameters + ---------- + arrays : tuple of array_like + Input arrays whose types/contents to check + maxval : float, optional + Maximum value needed + check_contents : bool, optional + Whether to check the values in the arrays and not just their types. + Default: False (check only the types) + + Returns + ------- + dtype : dtype + Suitable index data type (int32 or int64) + + """ + + int32min = np.iinfo(np.int32).min + int32max = np.iinfo(np.int32).max + + dtype = np.intc + if maxval is not None: + if maxval > int32max: + dtype = np.int64 + + if isinstance(arrays, np.ndarray): + arrays = (arrays,) + + for arr in arrays: + arr = np.asarray(arr) + if not np.can_cast(arr.dtype, np.int32): + if check_contents: + if arr.size == 0: + # a bigger type not needed + continue + elif np.issubdtype(arr.dtype, np.integer): + maxval = arr.max() + minval = arr.min() + if minval >= int32min and maxval <= int32max: + # a bigger type not needed + continue + + dtype = np.int64 + break + + return dtype + + +def get_sum_dtype(dtype): + """Mimic numpy's casting for np.sum""" + if dtype.kind == 'u' and np.can_cast(dtype, np.uint): + return np.uint + if np.can_cast(dtype, np.int_): + return np.int_ + return dtype + + +def isscalarlike(x): + """Is x either a scalar, an array scalar, or a 0-dim array?""" + return np.isscalar(x) or (isdense(x) and x.ndim == 0) + + +def isintlike(x): + """Is x appropriate as an index into a sparse matrix? Returns True + if it can be cast safely to a machine int. + """ + # Fast-path check to eliminate non-scalar values. operator.index would + # catch this case too, but the exception catching is slow. + if np.ndim(x) != 0: + return False + try: + operator.index(x) + except (TypeError, ValueError): + try: + loose_int = bool(int(x) == x) + except (TypeError, ValueError): + return False + if loose_int: + warnings.warn("Inexact indices into sparse matrices are deprecated", + DeprecationWarning) + return loose_int + return True + + +def isshape(x, nonneg=False): + """Is x a valid 2-tuple of dimensions? + + If nonneg, also checks that the dimensions are non-negative. + """ + try: + # Assume it's a tuple of matrix dimensions (M, N) + (M, N) = x + except Exception: + return False + else: + if isintlike(M) and isintlike(N): + if np.ndim(M) == 0 and np.ndim(N) == 0: + if not nonneg or (M >= 0 and N >= 0): + return True + return False + + +def issequence(t): + return ((isinstance(t, (list, tuple)) and + (len(t) == 0 or np.isscalar(t[0]))) or + (isinstance(t, np.ndarray) and (t.ndim == 1))) + + +def ismatrix(t): + return ((isinstance(t, (list, tuple)) and + len(t) > 0 and issequence(t[0])) or + (isinstance(t, np.ndarray) and t.ndim == 2)) + + +def isdense(x): + return isinstance(x, np.ndarray) + + +def validateaxis(axis): + if axis is not None: + axis_type = type(axis) + + # In NumPy, you can pass in tuples for 'axis', but they are + # not very useful for sparse matrices given their limited + # dimensions, so let's make it explicit that they are not + # allowed to be passed in + if axis_type == tuple: + raise TypeError(("Tuples are not accepted for the 'axis' " + "parameter. Please pass in one of the " + "following: {-2, -1, 0, 1, None}.")) + + # If not a tuple, check that the provided axis is actually + # an integer and raise a TypeError similar to NumPy's + if not np.issubdtype(np.dtype(axis_type), np.integer): + raise TypeError("axis must be an integer, not {name}" + .format(name=axis_type.__name__)) + + if not (-2 <= axis <= 1): + raise ValueError("axis out of range") + + +def check_shape(args, current_shape=None): + """Imitate numpy.matrix handling of shape arguments""" + if len(args) == 0: + raise TypeError("function missing 1 required positional argument: " + "'shape'") + elif len(args) == 1: + try: + shape_iter = iter(args[0]) + except TypeError: + new_shape = (operator.index(args[0]), ) + else: + new_shape = tuple(operator.index(arg) for arg in shape_iter) + else: + new_shape = tuple(operator.index(arg) for arg in args) + + if current_shape is None: + if len(new_shape) != 2: + raise ValueError('shape must be a 2-tuple of positive integers') + elif new_shape[0] < 0 or new_shape[1] < 0: + raise ValueError("'shape' elements cannot be negative") + + else: + # Check the current size only if needed + current_size = np.prod(current_shape, dtype=int) + + # Check for negatives + negative_indexes = [i for i, x in enumerate(new_shape) if x < 0] + if len(negative_indexes) == 0: + new_size = np.prod(new_shape, dtype=int) + if new_size != current_size: + raise ValueError('cannot reshape array of size {} into shape {}' + .format(new_size, new_shape)) + elif len(negative_indexes) == 1: + skip = negative_indexes[0] + specified = np.prod(new_shape[0:skip] + new_shape[skip+1:]) + unspecified, remainder = divmod(current_size, specified) + if remainder != 0: + err_shape = tuple('newshape' if x < 0 else x for x in new_shape) + raise ValueError('cannot reshape array of size {} into shape {}' + ''.format(current_size, err_shape)) + new_shape = new_shape[0:skip] + (unspecified,) + new_shape[skip+1:] + else: + raise ValueError('can only specify one unknown dimension') + + # Add and remove ones like numpy.matrix.reshape + if len(new_shape) != 2: + new_shape = tuple(arg for arg in new_shape if arg != 1) + + if len(new_shape) == 0: + new_shape = (1, 1) + elif len(new_shape) == 1: + new_shape = (1, new_shape[0]) + + if len(new_shape) > 2: + raise ValueError('shape too large to be a matrix') + + return new_shape + + +def check_reshape_kwargs(kwargs): + """Unpack keyword arguments for reshape function. + + This is useful because keyword arguments after star arguments are not + allowed in Python 2, but star keyword arguments are. This function unpacks + 'order' and 'copy' from the star keyword arguments (with defaults) and + throws an error for any remaining. + """ + + order = kwargs.pop('order', 'C') + copy = kwargs.pop('copy', False) + if kwargs: # Some unused kwargs remain + raise TypeError('reshape() got unexpected keywords arguments: {}' + .format(', '.join(kwargs.keys()))) + return order, copy + + +class IndexMixin(object): + """ + This class simply exists to hold the methods necessary for fancy indexing. + """ + def _slicetoarange(self, j, shape): + """ Given a slice object, use numpy arange to change it to a 1D + array. + """ + start, stop, step = j.indices(shape) + return np.arange(start, stop, step) + + def _unpack_index(self, index): + """ Parse index. Always return a tuple of the form (row, col). + Where row/col is a integer, slice, or array of integers. + """ + # First, check if indexing with single boolean matrix. + from .base import spmatrix # This feels dirty but... + if (isinstance(index, (spmatrix, np.ndarray)) and + (index.ndim == 2) and index.dtype.kind == 'b'): + return index.nonzero() + + # Parse any ellipses. + index = self._check_ellipsis(index) + + # Next, parse the tuple or object + if isinstance(index, tuple): + if len(index) == 2: + row, col = index + elif len(index) == 1: + row, col = index[0], slice(None) + else: + raise IndexError('invalid number of indices') + else: + row, col = index, slice(None) + + # Next, check for validity, or transform the index as needed. + row, col = self._check_boolean(row, col) + return row, col + + def _check_ellipsis(self, index): + """Process indices with Ellipsis. Returns modified index.""" + if index is Ellipsis: + return (slice(None), slice(None)) + elif isinstance(index, tuple): + # Find first ellipsis + for j, v in enumerate(index): + if v is Ellipsis: + first_ellipsis = j + break + else: + first_ellipsis = None + + # Expand the first one + if first_ellipsis is not None: + # Shortcuts + if len(index) == 1: + return (slice(None), slice(None)) + elif len(index) == 2: + if first_ellipsis == 0: + if index[1] is Ellipsis: + return (slice(None), slice(None)) + else: + return (slice(None), index[1]) + else: + return (index[0], slice(None)) + + # General case + tail = () + for v in index[first_ellipsis+1:]: + if v is not Ellipsis: + tail = tail + (v,) + nd = first_ellipsis + len(tail) + nslice = max(0, 2 - nd) + return index[:first_ellipsis] + (slice(None),)*nslice + tail + + return index + + def _check_boolean(self, row, col): + from .base import isspmatrix # ew... + # Supporting sparse boolean indexing with both row and col does + # not work because spmatrix.ndim is always 2. + if isspmatrix(row) or isspmatrix(col): + raise IndexError( + "Indexing with sparse matrices is not supported " + "except boolean indexing where matrix and index " + "are equal shapes.") + if isinstance(row, np.ndarray) and row.dtype.kind == 'b': + row = self._boolean_index_to_array(row) + if isinstance(col, np.ndarray) and col.dtype.kind == 'b': + col = self._boolean_index_to_array(col) + return row, col + + def _boolean_index_to_array(self, i): + if i.ndim > 1: + raise IndexError('invalid index shape') + return i.nonzero()[0] + + def _index_to_arrays(self, i, j): + i, j = self._check_boolean(i, j) + + i_slice = isinstance(i, slice) + if i_slice: + i = self._slicetoarange(i, self.shape[0])[:, None] + else: + i = np.atleast_1d(i) + + if isinstance(j, slice): + j = self._slicetoarange(j, self.shape[1])[None, :] + if i.ndim == 1: + i = i[:, None] + elif not i_slice: + raise IndexError('index returns 3-dim structure') + elif isscalarlike(j): + # row vector special case + j = np.atleast_1d(j) + if i.ndim == 1: + i, j = np.broadcast_arrays(i, j) + i = i[:, None] + j = j[:, None] + return i, j + else: + j = np.atleast_1d(j) + if i_slice and j.ndim > 1: + raise IndexError('index returns 3-dim structure') + + i, j = np.broadcast_arrays(i, j) + + if i.ndim == 1: + # return column vectors for 1-D indexing + i = i[None, :] + j = j[None, :] + elif i.ndim > 2: + raise IndexError("Index dimension must be <= 2") + + return i, j diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/sputils.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/sputils.pyc new file mode 100644 index 0000000..930a34a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/sputils.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/__init__.pyc new file mode 100644 index 0000000..2202148 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/data/csc_py2.npz b/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/data/csc_py2.npz new file mode 100644 index 0000000..83ee257 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/data/csc_py2.npz differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/data/csc_py3.npz b/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/data/csc_py3.npz new file mode 100644 index 0000000..73d086f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/data/csc_py3.npz differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_base.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_base.py new file mode 100644 index 0000000..71dd3fd --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_base.py @@ -0,0 +1,4683 @@ +# +# Authors: Travis Oliphant, Ed Schofield, Robert Cimrman, Nathan Bell, and others + +""" Test functions for sparse matrices. Each class in the "Matrix class +based tests" section become subclasses of the classes in the "Generic +tests" section. This is done by the functions in the "Tailored base +class for generic tests" section. + +""" + +from __future__ import division, print_function, absolute_import + +__usage__ = """ +Build sparse: + python setup.py build +Run tests if scipy is installed: + python -c 'import scipy;scipy.sparse.test()' +Run tests if sparse is not installed: + python tests/test_base.py +""" + +import operator +import contextlib +import functools +from distutils.version import LooseVersion + +import numpy as np +from scipy._lib.six import xrange, zip as izip +from numpy import (arange, zeros, array, dot, matrix, asmatrix, asarray, + vstack, ndarray, transpose, diag, kron, inf, conjugate, + int8, ComplexWarning) + +import random +from numpy.testing import (assert_equal, assert_array_equal, + assert_array_almost_equal, assert_almost_equal, assert_, + assert_allclose) +from pytest import raises as assert_raises +from scipy._lib._numpy_compat import suppress_warnings + +import scipy.linalg + +import scipy.sparse as sparse +from scipy.sparse import (csc_matrix, csr_matrix, dok_matrix, + coo_matrix, lil_matrix, dia_matrix, bsr_matrix, + eye, isspmatrix, SparseEfficiencyWarning, issparse) +from scipy.sparse.sputils import supported_dtypes, isscalarlike, get_index_dtype +from scipy.sparse.linalg import splu, expm, inv + +from scipy._lib._version import NumpyVersion +from scipy._lib.decorator import decorator + +import pytest + + +def assert_in(member, collection, msg=None): + assert_(member in collection, msg=msg if msg is not None else "%r not found in %r" % (member, collection)) + + +def assert_array_equal_dtype(x, y, **kwargs): + assert_(x.dtype == y.dtype) + assert_array_equal(x, y, **kwargs) + + +# Only test matmul operator (A @ B) when available (Python 3.5+) +TEST_MATMUL = hasattr(operator, 'matmul') + +sup_complex = suppress_warnings() +sup_complex.filter(ComplexWarning) + + +def with_64bit_maxval_limit(maxval_limit=None, random=False, fixed_dtype=None, + downcast_maxval=None, assert_32bit=False): + """ + Monkeypatch the maxval threshold at which scipy.sparse switches to + 64-bit index arrays, or make it (pseudo-)random. + + """ + if maxval_limit is None: + maxval_limit = 10 + + if assert_32bit: + def new_get_index_dtype(arrays=(), maxval=None, check_contents=False): + tp = get_index_dtype(arrays, maxval, check_contents) + assert_equal(np.iinfo(tp).max, np.iinfo(np.int32).max) + assert_(tp == np.int32 or tp == np.intc) + return tp + elif fixed_dtype is not None: + def new_get_index_dtype(arrays=(), maxval=None, check_contents=False): + return fixed_dtype + elif random: + counter = np.random.RandomState(seed=1234) + + def new_get_index_dtype(arrays=(), maxval=None, check_contents=False): + return (np.int32, np.int64)[counter.randint(2)] + else: + def new_get_index_dtype(arrays=(), maxval=None, check_contents=False): + dtype = np.int32 + if maxval is not None: + if maxval > maxval_limit: + dtype = np.int64 + for arr in arrays: + arr = np.asarray(arr) + if arr.dtype > np.int32: + if check_contents: + if arr.size == 0: + # a bigger type not needed + continue + elif np.issubdtype(arr.dtype, np.integer): + maxval = arr.max() + minval = arr.min() + if minval >= -maxval_limit and maxval <= maxval_limit: + # a bigger type not needed + continue + dtype = np.int64 + return dtype + + if downcast_maxval is not None: + def new_downcast_intp_index(arr): + if arr.max() > downcast_maxval: + raise AssertionError("downcast limited") + return arr.astype(np.intp) + + @decorator + def deco(func, *a, **kw): + backup = [] + modules = [scipy.sparse.bsr, scipy.sparse.coo, scipy.sparse.csc, + scipy.sparse.csr, scipy.sparse.dia, scipy.sparse.dok, + scipy.sparse.lil, scipy.sparse.sputils, + scipy.sparse.compressed, scipy.sparse.construct] + try: + for mod in modules: + backup.append((mod, 'get_index_dtype', + getattr(mod, 'get_index_dtype', None))) + setattr(mod, 'get_index_dtype', new_get_index_dtype) + if downcast_maxval is not None: + backup.append((mod, 'downcast_intp_index', + getattr(mod, 'downcast_intp_index', None))) + setattr(mod, 'downcast_intp_index', new_downcast_intp_index) + return func(*a, **kw) + finally: + for mod, name, oldfunc in backup: + if oldfunc is not None: + setattr(mod, name, oldfunc) + + return deco + + +def todense(a): + if isinstance(a, np.ndarray) or isscalarlike(a): + return a + return a.todense() + + +class BinopTester(object): + # Custom type to test binary operations on sparse matrices. + + def __add__(self, mat): + return "matrix on the right" + + def __mul__(self, mat): + return "matrix on the right" + + def __sub__(self, mat): + return "matrix on the right" + + def __radd__(self, mat): + return "matrix on the left" + + def __rmul__(self, mat): + return "matrix on the left" + + def __rsub__(self, mat): + return "matrix on the left" + + def __matmul__(self, mat): + return "matrix on the right" + + def __rmatmul__(self, mat): + return "matrix on the left" + +class BinopTester_with_shape(object): + # Custom type to test binary operations on sparse matrices + # with object which has shape attribute. + def __init__(self,shape): + self._shape = shape + + def shape(self): + return self._shape + + def ndim(self): + return len(self._shape) + + def __add__(self, mat): + return "matrix on the right" + + def __mul__(self, mat): + return "matrix on the right" + + def __sub__(self, mat): + return "matrix on the right" + + def __radd__(self, mat): + return "matrix on the left" + + def __rmul__(self, mat): + return "matrix on the left" + + def __rsub__(self, mat): + return "matrix on the left" + + def __matmul__(self, mat): + return "matrix on the right" + + def __rmatmul__(self, mat): + return "matrix on the left" + + +#------------------------------------------------------------------------------ +# Generic tests +#------------------------------------------------------------------------------ + + +# TODO check that spmatrix( ... , copy=X ) is respected +# TODO test prune +# TODO test has_sorted_indices +class _TestCommon(object): + """test common functionality shared by all sparse formats""" + math_dtypes = supported_dtypes + + @classmethod + def init_class(cls): + # Canonical data. + cls.dat = matrix([[1,0,0,2],[3,0,1,0],[0,2,0,0]],'d') + cls.datsp = cls.spmatrix(cls.dat) + + # Some sparse and dense matrices with data for every supported + # dtype. + # This set union is a workaround for numpy#6295, which means that + # two np.int64 dtypes don't hash to the same value. + cls.checked_dtypes = set(supported_dtypes).union(cls.math_dtypes) + cls.dat_dtypes = {} + cls.datsp_dtypes = {} + for dtype in cls.checked_dtypes: + cls.dat_dtypes[dtype] = cls.dat.astype(dtype) + cls.datsp_dtypes[dtype] = cls.spmatrix(cls.dat.astype(dtype)) + + # Check that the original data is equivalent to the + # corresponding dat_dtypes & datsp_dtypes. + assert_equal(cls.dat, cls.dat_dtypes[np.float64]) + assert_equal(cls.datsp.todense(), + cls.datsp_dtypes[np.float64].todense()) + + def test_bool(self): + def check(dtype): + datsp = self.datsp_dtypes[dtype] + + assert_raises(ValueError, bool, datsp) + assert_(self.spmatrix([1])) + assert_(not self.spmatrix([0])) + + if isinstance(self, TestDOK): + pytest.skip("Cannot create a rank <= 2 DOK matrix.") + for dtype in self.checked_dtypes: + check(dtype) + + def test_bool_rollover(self): + # bool's underlying dtype is 1 byte, check that it does not + # rollover True -> False at 256. + dat = np.matrix([[True, False]]) + datsp = self.spmatrix(dat) + + for _ in range(10): + datsp = datsp + datsp + dat = dat + dat + assert_array_equal(dat, datsp.todense()) + + def test_eq(self): + sup = suppress_warnings() + sup.filter(SparseEfficiencyWarning) + + @sup + @sup_complex + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + dat2 = dat.copy() + dat2[:,0] = 0 + datsp2 = self.spmatrix(dat2) + datbsr = bsr_matrix(dat) + datcsr = csr_matrix(dat) + datcsc = csc_matrix(dat) + datlil = lil_matrix(dat) + + # sparse/sparse + assert_array_equal_dtype(dat == dat2, (datsp == datsp2).todense()) + # mix sparse types + assert_array_equal_dtype(dat == dat2, (datbsr == datsp2).todense()) + assert_array_equal_dtype(dat == dat2, (datcsr == datsp2).todense()) + assert_array_equal_dtype(dat == dat2, (datcsc == datsp2).todense()) + assert_array_equal_dtype(dat == dat2, (datlil == datsp2).todense()) + # sparse/dense + assert_array_equal_dtype(dat == datsp2, datsp2 == dat) + # sparse/scalar + assert_array_equal_dtype(dat == 0, (datsp == 0).todense()) + assert_array_equal_dtype(dat == 1, (datsp == 1).todense()) + assert_array_equal_dtype(dat == np.nan, + (datsp == np.nan).todense()) + + if not isinstance(self, (TestBSR, TestCSC, TestCSR)): + pytest.skip("Bool comparisons only implemented for BSR, CSC, and CSR.") + for dtype in self.checked_dtypes: + check(dtype) + + def test_ne(self): + sup = suppress_warnings() + sup.filter(SparseEfficiencyWarning) + + @sup + @sup_complex + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + dat2 = dat.copy() + dat2[:,0] = 0 + datsp2 = self.spmatrix(dat2) + datbsr = bsr_matrix(dat) + datcsc = csc_matrix(dat) + datcsr = csr_matrix(dat) + datlil = lil_matrix(dat) + + # sparse/sparse + assert_array_equal_dtype(dat != dat2, (datsp != datsp2).todense()) + # mix sparse types + assert_array_equal_dtype(dat != dat2, (datbsr != datsp2).todense()) + assert_array_equal_dtype(dat != dat2, (datcsc != datsp2).todense()) + assert_array_equal_dtype(dat != dat2, (datcsr != datsp2).todense()) + assert_array_equal_dtype(dat != dat2, (datlil != datsp2).todense()) + # sparse/dense + assert_array_equal_dtype(dat != datsp2, datsp2 != dat) + # sparse/scalar + assert_array_equal_dtype(dat != 0, (datsp != 0).todense()) + assert_array_equal_dtype(dat != 1, (datsp != 1).todense()) + assert_array_equal_dtype(0 != dat, (0 != datsp).todense()) + assert_array_equal_dtype(1 != dat, (1 != datsp).todense()) + assert_array_equal_dtype(dat != np.nan, + (datsp != np.nan).todense()) + + if not isinstance(self, (TestBSR, TestCSC, TestCSR)): + pytest.skip("Bool comparisons only implemented for BSR, CSC, and CSR.") + for dtype in self.checked_dtypes: + check(dtype) + + def test_lt(self): + sup = suppress_warnings() + sup.filter(SparseEfficiencyWarning) + + @sup + @sup_complex + def check(dtype): + # data + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + dat2 = dat.copy() + dat2[:,0] = 0 + datsp2 = self.spmatrix(dat2) + datcomplex = dat.astype(complex) + datcomplex[:,0] = 1 + 1j + datspcomplex = self.spmatrix(datcomplex) + datbsr = bsr_matrix(dat) + datcsc = csc_matrix(dat) + datcsr = csr_matrix(dat) + datlil = lil_matrix(dat) + + # sparse/sparse + assert_array_equal_dtype(dat < dat2, (datsp < datsp2).todense()) + assert_array_equal_dtype(datcomplex < dat2, + (datspcomplex < datsp2).todense()) + # mix sparse types + assert_array_equal_dtype(dat < dat2, (datbsr < datsp2).todense()) + assert_array_equal_dtype(dat < dat2, (datcsc < datsp2).todense()) + assert_array_equal_dtype(dat < dat2, (datcsr < datsp2).todense()) + assert_array_equal_dtype(dat < dat2, (datlil < datsp2).todense()) + + assert_array_equal_dtype(dat2 < dat, (datsp2 < datbsr).todense()) + assert_array_equal_dtype(dat2 < dat, (datsp2 < datcsc).todense()) + assert_array_equal_dtype(dat2 < dat, (datsp2 < datcsr).todense()) + assert_array_equal_dtype(dat2 < dat, (datsp2 < datlil).todense()) + # sparse/dense + assert_array_equal_dtype(dat < dat2, datsp < dat2) + assert_array_equal_dtype(datcomplex < dat2, datspcomplex < dat2) + # sparse/scalar + assert_array_equal_dtype((datsp < 2).todense(), dat < 2) + assert_array_equal_dtype((datsp < 1).todense(), dat < 1) + assert_array_equal_dtype((datsp < 0).todense(), dat < 0) + assert_array_equal_dtype((datsp < -1).todense(), dat < -1) + assert_array_equal_dtype((datsp < -2).todense(), dat < -2) + with np.errstate(invalid='ignore'): + assert_array_equal_dtype((datsp < np.nan).todense(), + dat < np.nan) + + assert_array_equal_dtype((2 < datsp).todense(), 2 < dat) + assert_array_equal_dtype((1 < datsp).todense(), 1 < dat) + assert_array_equal_dtype((0 < datsp).todense(), 0 < dat) + assert_array_equal_dtype((-1 < datsp).todense(), -1 < dat) + assert_array_equal_dtype((-2 < datsp).todense(), -2 < dat) + + # data + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + dat2 = dat.copy() + dat2[:,0] = 0 + datsp2 = self.spmatrix(dat2) + + # dense rhs + assert_array_equal_dtype(dat < datsp2, datsp < dat2) + + if not isinstance(self, (TestBSR, TestCSC, TestCSR)): + pytest.skip("Bool comparisons only implemented for BSR, CSC, and CSR.") + for dtype in self.checked_dtypes: + check(dtype) + + def test_gt(self): + sup = suppress_warnings() + sup.filter(SparseEfficiencyWarning) + + @sup + @sup_complex + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + dat2 = dat.copy() + dat2[:,0] = 0 + datsp2 = self.spmatrix(dat2) + datcomplex = dat.astype(complex) + datcomplex[:,0] = 1 + 1j + datspcomplex = self.spmatrix(datcomplex) + datbsr = bsr_matrix(dat) + datcsc = csc_matrix(dat) + datcsr = csr_matrix(dat) + datlil = lil_matrix(dat) + + # sparse/sparse + assert_array_equal_dtype(dat > dat2, (datsp > datsp2).todense()) + assert_array_equal_dtype(datcomplex > dat2, + (datspcomplex > datsp2).todense()) + # mix sparse types + assert_array_equal_dtype(dat > dat2, (datbsr > datsp2).todense()) + assert_array_equal_dtype(dat > dat2, (datcsc > datsp2).todense()) + assert_array_equal_dtype(dat > dat2, (datcsr > datsp2).todense()) + assert_array_equal_dtype(dat > dat2, (datlil > datsp2).todense()) + + assert_array_equal_dtype(dat2 > dat, (datsp2 > datbsr).todense()) + assert_array_equal_dtype(dat2 > dat, (datsp2 > datcsc).todense()) + assert_array_equal_dtype(dat2 > dat, (datsp2 > datcsr).todense()) + assert_array_equal_dtype(dat2 > dat, (datsp2 > datlil).todense()) + # sparse/dense + assert_array_equal_dtype(dat > dat2, datsp > dat2) + assert_array_equal_dtype(datcomplex > dat2, datspcomplex > dat2) + # sparse/scalar + assert_array_equal_dtype((datsp > 2).todense(), dat > 2) + assert_array_equal_dtype((datsp > 1).todense(), dat > 1) + assert_array_equal_dtype((datsp > 0).todense(), dat > 0) + assert_array_equal_dtype((datsp > -1).todense(), dat > -1) + assert_array_equal_dtype((datsp > -2).todense(), dat > -2) + with np.errstate(invalid='ignore'): + assert_array_equal_dtype((datsp > np.nan).todense(), + dat > np.nan) + + assert_array_equal_dtype((2 > datsp).todense(), 2 > dat) + assert_array_equal_dtype((1 > datsp).todense(), 1 > dat) + assert_array_equal_dtype((0 > datsp).todense(), 0 > dat) + assert_array_equal_dtype((-1 > datsp).todense(), -1 > dat) + assert_array_equal_dtype((-2 > datsp).todense(), -2 > dat) + + # data + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + dat2 = dat.copy() + dat2[:,0] = 0 + datsp2 = self.spmatrix(dat2) + + # dense rhs + assert_array_equal_dtype(dat > datsp2, datsp > dat2) + + if not isinstance(self, (TestBSR, TestCSC, TestCSR)): + pytest.skip("Bool comparisons only implemented for BSR, CSC, and CSR.") + for dtype in self.checked_dtypes: + check(dtype) + + def test_le(self): + sup = suppress_warnings() + sup.filter(SparseEfficiencyWarning) + + @sup + @sup_complex + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + dat2 = dat.copy() + dat2[:,0] = 0 + datsp2 = self.spmatrix(dat2) + datcomplex = dat.astype(complex) + datcomplex[:,0] = 1 + 1j + datspcomplex = self.spmatrix(datcomplex) + datbsr = bsr_matrix(dat) + datcsc = csc_matrix(dat) + datcsr = csr_matrix(dat) + datlil = lil_matrix(dat) + + # sparse/sparse + assert_array_equal_dtype(dat <= dat2, (datsp <= datsp2).todense()) + assert_array_equal_dtype(datcomplex <= dat2, + (datspcomplex <= datsp2).todense()) + # mix sparse types + assert_array_equal_dtype((datbsr <= datsp2).todense(), dat <= dat2) + assert_array_equal_dtype((datcsc <= datsp2).todense(), dat <= dat2) + assert_array_equal_dtype((datcsr <= datsp2).todense(), dat <= dat2) + assert_array_equal_dtype((datlil <= datsp2).todense(), dat <= dat2) + + assert_array_equal_dtype((datsp2 <= datbsr).todense(), dat2 <= dat) + assert_array_equal_dtype((datsp2 <= datcsc).todense(), dat2 <= dat) + assert_array_equal_dtype((datsp2 <= datcsr).todense(), dat2 <= dat) + assert_array_equal_dtype((datsp2 <= datlil).todense(), dat2 <= dat) + # sparse/dense + assert_array_equal_dtype(datsp <= dat2, dat <= dat2) + assert_array_equal_dtype(datspcomplex <= dat2, datcomplex <= dat2) + # sparse/scalar + assert_array_equal_dtype((datsp <= 2).todense(), dat <= 2) + assert_array_equal_dtype((datsp <= 1).todense(), dat <= 1) + assert_array_equal_dtype((datsp <= -1).todense(), dat <= -1) + assert_array_equal_dtype((datsp <= -2).todense(), dat <= -2) + + assert_array_equal_dtype((2 <= datsp).todense(), 2 <= dat) + assert_array_equal_dtype((1 <= datsp).todense(), 1 <= dat) + assert_array_equal_dtype((-1 <= datsp).todense(), -1 <= dat) + assert_array_equal_dtype((-2 <= datsp).todense(), -2 <= dat) + + # data + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + dat2 = dat.copy() + dat2[:,0] = 0 + datsp2 = self.spmatrix(dat2) + + # dense rhs + assert_array_equal_dtype(dat <= datsp2, datsp <= dat2) + + if not isinstance(self, (TestBSR, TestCSC, TestCSR)): + pytest.skip("Bool comparisons only implemented for BSR, CSC, and CSR.") + for dtype in self.checked_dtypes: + check(dtype) + + def test_ge(self): + sup = suppress_warnings() + sup.filter(SparseEfficiencyWarning) + + @sup + @sup_complex + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + dat2 = dat.copy() + dat2[:,0] = 0 + datsp2 = self.spmatrix(dat2) + datcomplex = dat.astype(complex) + datcomplex[:,0] = 1 + 1j + datspcomplex = self.spmatrix(datcomplex) + datbsr = bsr_matrix(dat) + datcsc = csc_matrix(dat) + datcsr = csr_matrix(dat) + datlil = lil_matrix(dat) + + # sparse/sparse + assert_array_equal_dtype(dat >= dat2, (datsp >= datsp2).todense()) + assert_array_equal_dtype(datcomplex >= dat2, + (datspcomplex >= datsp2).todense()) + # mix sparse types + assert_array_equal_dtype((datbsr >= datsp2).todense(), dat >= dat2) + assert_array_equal_dtype((datcsc >= datsp2).todense(), dat >= dat2) + assert_array_equal_dtype((datcsr >= datsp2).todense(), dat >= dat2) + assert_array_equal_dtype((datlil >= datsp2).todense(), dat >= dat2) + + assert_array_equal_dtype((datsp2 >= datbsr).todense(), dat2 >= dat) + assert_array_equal_dtype((datsp2 >= datcsc).todense(), dat2 >= dat) + assert_array_equal_dtype((datsp2 >= datcsr).todense(), dat2 >= dat) + assert_array_equal_dtype((datsp2 >= datlil).todense(), dat2 >= dat) + # sparse/dense + assert_array_equal_dtype(datsp >= dat2, dat >= dat2) + assert_array_equal_dtype(datspcomplex >= dat2, datcomplex >= dat2) + # sparse/scalar + assert_array_equal_dtype((datsp >= 2).todense(), dat >= 2) + assert_array_equal_dtype((datsp >= 1).todense(), dat >= 1) + assert_array_equal_dtype((datsp >= -1).todense(), dat >= -1) + assert_array_equal_dtype((datsp >= -2).todense(), dat >= -2) + + assert_array_equal_dtype((2 >= datsp).todense(), 2 >= dat) + assert_array_equal_dtype((1 >= datsp).todense(), 1 >= dat) + assert_array_equal_dtype((-1 >= datsp).todense(), -1 >= dat) + assert_array_equal_dtype((-2 >= datsp).todense(), -2 >= dat) + + # dense data + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + dat2 = dat.copy() + dat2[:,0] = 0 + datsp2 = self.spmatrix(dat2) + + # dense rhs + assert_array_equal_dtype(dat >= datsp2, datsp >= dat2) + + if not isinstance(self, (TestBSR, TestCSC, TestCSR)): + pytest.skip("Bool comparisons only implemented for BSR, CSC, and CSR.") + for dtype in self.checked_dtypes: + check(dtype) + + def test_empty(self): + # create empty matrices + assert_equal(self.spmatrix((3,3)).todense(), np.zeros((3,3))) + assert_equal(self.spmatrix((3,3)).nnz, 0) + assert_equal(self.spmatrix((3,3)).count_nonzero(), 0) + + def test_count_nonzero(self): + expected = np.count_nonzero(self.datsp.toarray()) + assert_equal(self.datsp.count_nonzero(), expected) + assert_equal(self.datsp.T.count_nonzero(), expected) + + def test_invalid_shapes(self): + assert_raises(ValueError, self.spmatrix, (-1,3)) + assert_raises(ValueError, self.spmatrix, (3,-1)) + assert_raises(ValueError, self.spmatrix, (-1,-1)) + + def test_repr(self): + repr(self.datsp) + + def test_str(self): + str(self.datsp) + + def test_empty_arithmetic(self): + # Test manipulating empty matrices. Fails in SciPy SVN <= r1768 + shape = (5, 5) + for mytype in [np.dtype('int32'), np.dtype('float32'), + np.dtype('float64'), np.dtype('complex64'), + np.dtype('complex128')]: + a = self.spmatrix(shape, dtype=mytype) + b = a + a + c = 2 * a + d = a * a.tocsc() + e = a * a.tocsr() + f = a * a.tocoo() + for m in [a,b,c,d,e,f]: + assert_equal(m.A, a.A*a.A) + # These fail in all revisions <= r1768: + assert_equal(m.dtype,mytype) + assert_equal(m.A.dtype,mytype) + + def test_abs(self): + A = matrix([[-1, 0, 17],[0, -5, 0],[1, -4, 0],[0,0,0]],'d') + assert_equal(abs(A),abs(self.spmatrix(A)).todense()) + + def test_elementwise_power(self): + A = matrix([[-4, -3, -2],[-1, 0, 1],[2, 3, 4]], 'd') + assert_equal(np.power(A, 2), self.spmatrix(A).power(2).todense()) + + #it's element-wise power function, input has to be a scalar + assert_raises(NotImplementedError, self.spmatrix(A).power, A) + + def test_neg(self): + A = matrix([[-1, 0, 17], [0, -5, 0], [1, -4, 0], [0, 0, 0]], 'd') + assert_equal(-A, (-self.spmatrix(A)).todense()) + + # see gh-5843 + A = matrix([[True, False, False], [False, False, True]]) + assert_raises(NotImplementedError, self.spmatrix(A).__neg__) + + def test_real(self): + D = matrix([[1 + 3j, 2 - 4j]]) + A = self.spmatrix(D) + assert_equal(A.real.todense(),D.real) + + def test_imag(self): + D = matrix([[1 + 3j, 2 - 4j]]) + A = self.spmatrix(D) + assert_equal(A.imag.todense(),D.imag) + + def test_diagonal(self): + # Does the matrix's .diagonal() method work? + mats = [] + mats.append([[1,0,2]]) + mats.append([[1],[0],[2]]) + mats.append([[0,1],[0,2],[0,3]]) + mats.append([[0,0,1],[0,0,2],[0,3,0]]) + + mats.append(kron(mats[0],[[1,2]])) + mats.append(kron(mats[0],[[1],[2]])) + mats.append(kron(mats[1],[[1,2],[3,4]])) + mats.append(kron(mats[2],[[1,2],[3,4]])) + mats.append(kron(mats[3],[[1,2],[3,4]])) + mats.append(kron(mats[3],[[1,2,3,4]])) + + for m in mats: + rows, cols = array(m).shape + sparse_mat = self.spmatrix(m) + for k in range(-rows + 1, cols): + assert_equal(sparse_mat.diagonal(k=k), diag(m, k=k)) + assert_raises(ValueError, sparse_mat.diagonal, -rows) + assert_raises(ValueError, sparse_mat.diagonal, cols) + + # Test all-zero matrix. + assert_equal(self.spmatrix((40, 16130)).diagonal(), np.zeros(40)) + + def test_reshape(self): + # This first example is taken from the lil_matrix reshaping test. + x = self.spmatrix([[1, 0, 7], [0, 0, 0], [0, 3, 0], [0, 0, 5]]) + for order in ['C', 'F']: + for s in [(12, 1), (1, 12)]: + assert_array_equal(x.reshape(s, order=order).todense(), + x.todense().reshape(s, order=order)) + + # This example is taken from the stackoverflow answer at + # https://stackoverflow.com/q/16511879 + x = self.spmatrix([[0, 10, 0, 0], [0, 0, 0, 0], [0, 20, 30, 40]]) + y = x.reshape((2, 6)) # Default order is 'C' + desired = [[0, 10, 0, 0, 0, 0], [0, 0, 0, 20, 30, 40]] + assert_array_equal(y.A, desired) + + # Reshape with negative indexes + y = x.reshape((2, -1)) + assert_array_equal(y.A, desired) + y = x.reshape((-1, 6)) + assert_array_equal(y.A, desired) + assert_raises(ValueError, x.reshape, (-1, -1)) + + # Reshape with star args + y = x.reshape(2, 6) + assert_array_equal(y.A, desired) + assert_raises(TypeError, x.reshape, 2, 6, not_an_arg=1) + + # Reshape with same size is noop unless copy=True + y = x.reshape((3, 4)) + assert_(y is x) + y = x.reshape((3, 4), copy=True) + assert_(y is not x) + + # Ensure reshape did not alter original size + assert_array_equal(x.shape, (3, 4)) + + # Reshape in place + x.shape = (2, 6) + assert_array_equal(x.A, desired) + + @pytest.mark.slow + def test_setdiag_comprehensive(self): + def dense_setdiag(a, v, k): + v = np.asarray(v) + if k >= 0: + n = min(a.shape[0], a.shape[1] - k) + if v.ndim != 0: + n = min(n, len(v)) + v = v[:n] + i = np.arange(0, n) + j = np.arange(k, k + n) + a[i,j] = v + elif k < 0: + dense_setdiag(a.T, v, -k) + + def check_setdiag(a, b, k): + # Check setting diagonal using a scalar, a vector of + # correct length, and too short or too long vectors + for r in [-1, len(np.diag(a, k)), 2, 30]: + if r < 0: + v = int(np.random.randint(1, 20, size=1)) + else: + v = np.random.randint(1, 20, size=r) + + dense_setdiag(a, v, k) + with suppress_warnings() as sup: + sup.filter(SparseEfficiencyWarning, "Changing the sparsity structure of a cs[cr]_matrix is expensive") + b.setdiag(v, k) + + # check that dense_setdiag worked + d = np.diag(a, k) + if np.asarray(v).ndim == 0: + assert_array_equal(d, v, err_msg="%s %d" % (msg, r)) + else: + n = min(len(d), len(v)) + assert_array_equal(d[:n], v[:n], err_msg="%s %d" % (msg, r)) + # check that sparse setdiag worked + assert_array_equal(b.A, a, err_msg="%s %d" % (msg, r)) + + # comprehensive test + np.random.seed(1234) + shapes = [(0,5), (5,0), (1,5), (5,1), (5,5)] + for dtype in [np.int8, np.float64]: + for m,n in shapes: + ks = np.arange(-m+1, n-1) + for k in ks: + msg = repr((dtype, m, n, k)) + a = np.zeros((m, n), dtype=dtype) + b = self.spmatrix((m, n), dtype=dtype) + + check_setdiag(a, b, k) + + # check overwriting etc + for k2 in np.random.choice(ks, size=min(len(ks), 5)): + check_setdiag(a, b, k2) + + def test_setdiag(self): + # simple test cases + m = self.spmatrix(np.eye(3)) + values = [3, 2, 1] + with suppress_warnings() as sup: + sup.filter(SparseEfficiencyWarning, + "Changing the sparsity structure of a cs[cr]_matrix is expensive") + assert_raises(ValueError, m.setdiag, values, k=4) + m.setdiag(values) + assert_array_equal(m.diagonal(), values) + m.setdiag(values, k=1) + assert_array_equal(m.A, np.array([[3, 3, 0], + [0, 2, 2], + [0, 0, 1]])) + m.setdiag(values, k=-2) + assert_array_equal(m.A, np.array([[3, 3, 0], + [0, 2, 2], + [3, 0, 1]])) + m.setdiag((9,), k=2) + assert_array_equal(m.A[0,2], 9) + m.setdiag((9,), k=-2) + assert_array_equal(m.A[2,0], 9) + + def test_nonzero(self): + A = array([[1, 0, 1],[0, 1, 1],[0, 0, 1]]) + Asp = self.spmatrix(A) + + A_nz = set([tuple(ij) for ij in transpose(A.nonzero())]) + Asp_nz = set([tuple(ij) for ij in transpose(Asp.nonzero())]) + + assert_equal(A_nz, Asp_nz) + + def test_numpy_nonzero(self): + # See gh-5987 + A = array([[1, 0, 1], [0, 1, 1], [0, 0, 1]]) + Asp = self.spmatrix(A) + + A_nz = set([tuple(ij) for ij in transpose(np.nonzero(A))]) + Asp_nz = set([tuple(ij) for ij in transpose(np.nonzero(Asp))]) + + assert_equal(A_nz, Asp_nz) + + def test_getrow(self): + assert_array_equal(self.datsp.getrow(1).todense(), self.dat[1,:]) + assert_array_equal(self.datsp.getrow(-1).todense(), self.dat[-1,:]) + + def test_getcol(self): + assert_array_equal(self.datsp.getcol(1).todense(), self.dat[:,1]) + assert_array_equal(self.datsp.getcol(-1).todense(), self.dat[:,-1]) + + def test_sum(self): + np.random.seed(1234) + dat_1 = np.matrix([[0, 1, 2], + [3, -4, 5], + [-6, 7, 9]]) + dat_2 = np.random.rand(5, 5) + dat_3 = np.array([[]]) + dat_4 = np.zeros((40, 40)) + dat_5 = sparse.rand(5, 5, density=1e-2).A + matrices = [dat_1, dat_2, dat_3, dat_4, dat_5] + + def check(dtype, j): + dat = np.matrix(matrices[j], dtype=dtype) + datsp = self.spmatrix(dat, dtype=dtype) + with np.errstate(over='ignore'): + assert_array_almost_equal(dat.sum(), datsp.sum()) + assert_equal(dat.sum().dtype, datsp.sum().dtype) + assert_(np.isscalar(datsp.sum(axis=None))) + assert_array_almost_equal(dat.sum(axis=None), + datsp.sum(axis=None)) + assert_equal(dat.sum(axis=None).dtype, + datsp.sum(axis=None).dtype) + assert_array_almost_equal(dat.sum(axis=0), datsp.sum(axis=0)) + assert_equal(dat.sum(axis=0).dtype, datsp.sum(axis=0).dtype) + assert_array_almost_equal(dat.sum(axis=1), datsp.sum(axis=1)) + assert_equal(dat.sum(axis=1).dtype, datsp.sum(axis=1).dtype) + assert_array_almost_equal(dat.sum(axis=-2), datsp.sum(axis=-2)) + assert_equal(dat.sum(axis=-2).dtype, datsp.sum(axis=-2).dtype) + assert_array_almost_equal(dat.sum(axis=-1), datsp.sum(axis=-1)) + assert_equal(dat.sum(axis=-1).dtype, datsp.sum(axis=-1).dtype) + + for dtype in self.checked_dtypes: + for j in range(len(matrices)): + check(dtype, j) + + def test_sum_invalid_params(self): + out = np.asmatrix(np.zeros((1, 3))) + dat = np.matrix([[0, 1, 2], + [3, -4, 5], + [-6, 7, 9]]) + datsp = self.spmatrix(dat) + + assert_raises(ValueError, datsp.sum, axis=3) + assert_raises(TypeError, datsp.sum, axis=(0, 1)) + assert_raises(TypeError, datsp.sum, axis=1.5) + assert_raises(ValueError, datsp.sum, axis=1, out=out) + + def test_sum_dtype(self): + dat = np.matrix([[0, 1, 2], + [3, -4, 5], + [-6, 7, 9]]) + datsp = self.spmatrix(dat) + + def check(dtype): + dat_mean = dat.mean(dtype=dtype) + datsp_mean = datsp.mean(dtype=dtype) + + assert_array_almost_equal(dat_mean, datsp_mean) + assert_equal(dat_mean.dtype, datsp_mean.dtype) + + for dtype in self.checked_dtypes: + check(dtype) + + def test_sum_out(self): + dat = np.matrix([[0, 1, 2], + [3, -4, 5], + [-6, 7, 9]]) + datsp = self.spmatrix(dat) + + dat_out = np.matrix(0) + datsp_out = np.matrix(0) + + dat.sum(out=dat_out) + datsp.sum(out=datsp_out) + assert_array_almost_equal(dat_out, datsp_out) + + dat_out = np.asmatrix(np.zeros((3, 1))) + datsp_out = np.asmatrix(np.zeros((3, 1))) + + dat.sum(axis=1, out=dat_out) + datsp.sum(axis=1, out=datsp_out) + assert_array_almost_equal(dat_out, datsp_out) + + def test_numpy_sum(self): + # See gh-5987 + dat = np.matrix([[0, 1, 2], + [3, -4, 5], + [-6, 7, 9]]) + datsp = self.spmatrix(dat) + + dat_mean = np.sum(dat) + datsp_mean = np.sum(datsp) + + assert_array_almost_equal(dat_mean, datsp_mean) + assert_equal(dat_mean.dtype, datsp_mean.dtype) + + def test_mean(self): + def check(dtype): + dat = np.matrix([[0, 1, 2], + [3, -4, 5], + [-6, 7, 9]], dtype=dtype) + datsp = self.spmatrix(dat, dtype=dtype) + + assert_array_almost_equal(dat.mean(), datsp.mean()) + assert_equal(dat.mean().dtype, datsp.mean().dtype) + assert_(np.isscalar(datsp.mean(axis=None))) + assert_array_almost_equal(dat.mean(axis=None), datsp.mean(axis=None)) + assert_equal(dat.mean(axis=None).dtype, datsp.mean(axis=None).dtype) + assert_array_almost_equal(dat.mean(axis=0), datsp.mean(axis=0)) + assert_equal(dat.mean(axis=0).dtype, datsp.mean(axis=0).dtype) + assert_array_almost_equal(dat.mean(axis=1), datsp.mean(axis=1)) + assert_equal(dat.mean(axis=1).dtype, datsp.mean(axis=1).dtype) + assert_array_almost_equal(dat.mean(axis=-2), datsp.mean(axis=-2)) + assert_equal(dat.mean(axis=-2).dtype, datsp.mean(axis=-2).dtype) + assert_array_almost_equal(dat.mean(axis=-1), datsp.mean(axis=-1)) + assert_equal(dat.mean(axis=-1).dtype, datsp.mean(axis=-1).dtype) + + for dtype in self.checked_dtypes: + check(dtype) + + def test_mean_invalid_params(self): + out = np.asmatrix(np.zeros((1, 3))) + dat = np.matrix([[0, 1, 2], + [3, -4, 5], + [-6, 7, 9]]) + datsp = self.spmatrix(dat) + + assert_raises(ValueError, datsp.mean, axis=3) + assert_raises(TypeError, datsp.mean, axis=(0, 1)) + assert_raises(TypeError, datsp.mean, axis=1.5) + assert_raises(ValueError, datsp.mean, axis=1, out=out) + + def test_mean_dtype(self): + dat = np.matrix([[0, 1, 2], + [3, -4, 5], + [-6, 7, 9]]) + datsp = self.spmatrix(dat) + + def check(dtype): + dat_mean = dat.mean(dtype=dtype) + datsp_mean = datsp.mean(dtype=dtype) + + assert_array_almost_equal(dat_mean, datsp_mean) + assert_equal(dat_mean.dtype, datsp_mean.dtype) + + for dtype in self.checked_dtypes: + check(dtype) + + def test_mean_out(self): + dat = np.matrix([[0, 1, 2], + [3, -4, 5], + [-6, 7, 9]]) + datsp = self.spmatrix(dat) + + dat_out = np.matrix(0) + datsp_out = np.matrix(0) + + dat.mean(out=dat_out) + datsp.mean(out=datsp_out) + assert_array_almost_equal(dat_out, datsp_out) + + dat_out = np.asmatrix(np.zeros((3, 1))) + datsp_out = np.asmatrix(np.zeros((3, 1))) + + dat.mean(axis=1, out=dat_out) + datsp.mean(axis=1, out=datsp_out) + assert_array_almost_equal(dat_out, datsp_out) + + def test_numpy_mean(self): + # See gh-5987 + dat = np.matrix([[0, 1, 2], + [3, -4, 5], + [-6, 7, 9]]) + datsp = self.spmatrix(dat) + + dat_mean = np.mean(dat) + datsp_mean = np.mean(datsp) + + assert_array_almost_equal(dat_mean, datsp_mean) + assert_equal(dat_mean.dtype, datsp_mean.dtype) + + def test_expm(self): + M = array([[1, 0, 2], [0, 0, 3], [-4, 5, 6]], float) + sM = self.spmatrix(M, shape=(3,3), dtype=float) + Mexp = scipy.linalg.expm(M) + + N = array([[3., 0., 1.], [0., 2., 0.], [0., 0., 0.]]) + sN = self.spmatrix(N, shape=(3,3), dtype=float) + Nexp = scipy.linalg.expm(N) + + with suppress_warnings() as sup: + sup.filter(SparseEfficiencyWarning, "splu requires CSC matrix format") + sup.filter(SparseEfficiencyWarning, + "spsolve is more efficient when sparse b is in the CSC matrix format") + sup.filter(SparseEfficiencyWarning, + "spsolve requires A be CSC or CSR matrix format") + sMexp = expm(sM).todense() + sNexp = expm(sN).todense() + + assert_array_almost_equal((sMexp - Mexp), zeros((3, 3))) + assert_array_almost_equal((sNexp - Nexp), zeros((3, 3))) + + def test_inv(self): + def check(dtype): + M = array([[1, 0, 2], [0, 0, 3], [-4, 5, 6]], dtype) + with suppress_warnings() as sup: + sup.filter(SparseEfficiencyWarning, + "spsolve requires A be CSC or CSR matrix format") + sup.filter(SparseEfficiencyWarning, + "spsolve is more efficient when sparse b is in the CSC matrix format") + sup.filter(SparseEfficiencyWarning, + "splu requires CSC matrix format") + sM = self.spmatrix(M, shape=(3,3), dtype=dtype) + sMinv = inv(sM) + assert_array_almost_equal(sMinv.dot(sM).todense(), np.eye(3)) + assert_raises(TypeError, inv, M) + for dtype in [float]: + check(dtype) + + @sup_complex + def test_from_array(self): + A = array([[1,0,0],[2,3,4],[0,5,0],[0,0,0]]) + assert_array_equal(self.spmatrix(A).toarray(), A) + + A = array([[1.0 + 3j, 0, 0], + [0, 2.0 + 5, 0], + [0, 0, 0]]) + assert_array_equal(self.spmatrix(A).toarray(), A) + assert_array_equal(self.spmatrix(A, dtype='int16').toarray(), A.astype('int16')) + + @sup_complex + def test_from_matrix(self): + A = matrix([[1,0,0],[2,3,4],[0,5,0],[0,0,0]]) + assert_array_equal(self.spmatrix(A).todense(), A) + + A = matrix([[1.0 + 3j, 0, 0], + [0, 2.0 + 5, 0], + [0, 0, 0]]) + assert_array_equal(self.spmatrix(A).toarray(), A) + assert_array_equal(self.spmatrix(A, dtype='int16').toarray(), A.astype('int16')) + + @sup_complex + def test_from_list(self): + A = [[1,0,0],[2,3,4],[0,5,0],[0,0,0]] + assert_array_equal(self.spmatrix(A).todense(), A) + + A = [[1.0 + 3j, 0, 0], + [0, 2.0 + 5, 0], + [0, 0, 0]] + assert_array_equal(self.spmatrix(A).toarray(), array(A)) + assert_array_equal(self.spmatrix(A, dtype='int16').todense(), array(A).astype('int16')) + + @sup_complex + def test_from_sparse(self): + D = array([[1,0,0],[2,3,4],[0,5,0],[0,0,0]]) + S = csr_matrix(D) + assert_array_equal(self.spmatrix(S).toarray(), D) + S = self.spmatrix(D) + assert_array_equal(self.spmatrix(S).toarray(), D) + + D = array([[1.0 + 3j, 0, 0], + [0, 2.0 + 5, 0], + [0, 0, 0]]) + S = csr_matrix(D) + assert_array_equal(self.spmatrix(S).toarray(), D) + assert_array_equal(self.spmatrix(S, dtype='int16').toarray(), D.astype('int16')) + S = self.spmatrix(D) + assert_array_equal(self.spmatrix(S).toarray(), D) + assert_array_equal(self.spmatrix(S, dtype='int16').toarray(), D.astype('int16')) + + # def test_array(self): + # """test array(A) where A is in sparse format""" + # assert_equal( array(self.datsp), self.dat ) + + def test_todense(self): + # Check C- or F-contiguous (default). + chk = self.datsp.todense() + assert_array_equal(chk, self.dat) + assert_(chk.flags.c_contiguous != chk.flags.f_contiguous) + # Check C-contiguous (with arg). + chk = self.datsp.todense(order='C') + assert_array_equal(chk, self.dat) + assert_(chk.flags.c_contiguous) + assert_(not chk.flags.f_contiguous) + # Check F-contiguous (with arg). + chk = self.datsp.todense(order='F') + assert_array_equal(chk, self.dat) + assert_(not chk.flags.c_contiguous) + assert_(chk.flags.f_contiguous) + # Check with out argument (array). + out = np.zeros(self.datsp.shape, dtype=self.datsp.dtype) + chk = self.datsp.todense(out=out) + assert_array_equal(self.dat, out) + assert_array_equal(self.dat, chk) + assert_(chk.base is out) + # Check with out array (matrix). + out = np.asmatrix(np.zeros(self.datsp.shape, dtype=self.datsp.dtype)) + chk = self.datsp.todense(out=out) + assert_array_equal(self.dat, out) + assert_array_equal(self.dat, chk) + assert_(chk is out) + a = matrix([1.,2.,3.]) + dense_dot_dense = a * self.dat + check = a * self.datsp.todense() + assert_array_equal(dense_dot_dense, check) + b = matrix([1.,2.,3.,4.]).T + dense_dot_dense = self.dat * b + check2 = self.datsp.todense() * b + assert_array_equal(dense_dot_dense, check2) + # Check bool data works. + spbool = self.spmatrix(self.dat, dtype=bool) + matbool = self.dat.astype(bool) + assert_array_equal(spbool.todense(), matbool) + + def test_toarray(self): + # Check C- or F-contiguous (default). + dat = asarray(self.dat) + chk = self.datsp.toarray() + assert_array_equal(chk, dat) + assert_(chk.flags.c_contiguous != chk.flags.f_contiguous) + # Check C-contiguous (with arg). + chk = self.datsp.toarray(order='C') + assert_array_equal(chk, dat) + assert_(chk.flags.c_contiguous) + assert_(not chk.flags.f_contiguous) + # Check F-contiguous (with arg). + chk = self.datsp.toarray(order='F') + assert_array_equal(chk, dat) + assert_(not chk.flags.c_contiguous) + assert_(chk.flags.f_contiguous) + # Check with output arg. + out = np.zeros(self.datsp.shape, dtype=self.datsp.dtype) + self.datsp.toarray(out=out) + assert_array_equal(chk, dat) + # Check that things are fine when we don't initialize with zeros. + out[...] = 1. + self.datsp.toarray(out=out) + assert_array_equal(chk, dat) + a = array([1.,2.,3.]) + dense_dot_dense = dot(a, dat) + check = dot(a, self.datsp.toarray()) + assert_array_equal(dense_dot_dense, check) + b = array([1.,2.,3.,4.]) + dense_dot_dense = dot(dat, b) + check2 = dot(self.datsp.toarray(), b) + assert_array_equal(dense_dot_dense, check2) + # Check bool data works. + spbool = self.spmatrix(self.dat, dtype=bool) + arrbool = dat.astype(bool) + assert_array_equal(spbool.toarray(), arrbool) + + @sup_complex + def test_astype(self): + D = array([[2.0 + 3j, 0, 0], + [0, 4.0 + 5j, 0], + [0, 0, 0]]) + S = self.spmatrix(D) + + for x in supported_dtypes: + # Check correctly casted + D_casted = D.astype(x) + for copy in (True, False): + S_casted = S.astype(x, copy=copy) + assert_equal(S_casted.dtype, D_casted.dtype) # correct type + assert_equal(S_casted.toarray(), D_casted) # correct values + assert_equal(S_casted.format, S.format) # format preserved + # Check correctly copied + assert_(S_casted.astype(x, copy=False) is S_casted) + S_copied = S_casted.astype(x, copy=True) + assert_(S_copied is not S_casted) + + def check_equal_but_not_same_array_attribute(attribute): + a = getattr(S_casted, attribute) + b = getattr(S_copied, attribute) + assert_array_equal(a, b) + assert_(a is not b) + i = (0,) * b.ndim + b_i = b[i] + b[i] = not b[i] + assert_(a[i] != b[i]) + b[i] = b_i + + if S_casted.format in ('csr', 'csc', 'bsr'): + for attribute in ('indices', 'indptr', 'data'): + check_equal_but_not_same_array_attribute(attribute) + elif S_casted.format == 'coo': + for attribute in ('row', 'col', 'data'): + check_equal_but_not_same_array_attribute(attribute) + elif S_casted.format == 'dia': + for attribute in ('offsets', 'data'): + check_equal_but_not_same_array_attribute(attribute) + + def test_asfptype(self): + A = self.spmatrix(arange(6,dtype='int32').reshape(2,3)) + + assert_equal(A.dtype, np.dtype('int32')) + assert_equal(A.asfptype().dtype, np.dtype('float64')) + assert_equal(A.asfptype().format, A.format) + assert_equal(A.astype('int16').asfptype().dtype, np.dtype('float32')) + assert_equal(A.astype('complex128').asfptype().dtype, np.dtype('complex128')) + + B = A.asfptype() + C = B.asfptype() + assert_(B is C) + + def test_mul_scalar(self): + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + + assert_array_equal(dat*2,(datsp*2).todense()) + assert_array_equal(dat*17.3,(datsp*17.3).todense()) + + for dtype in self.math_dtypes: + check(dtype) + + def test_rmul_scalar(self): + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + + assert_array_equal(2*dat,(2*datsp).todense()) + assert_array_equal(17.3*dat,(17.3*datsp).todense()) + + for dtype in self.math_dtypes: + check(dtype) + + def test_add(self): + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + + a = dat.copy() + a[0,2] = 2.0 + b = datsp + c = b + a + assert_array_equal(c, b.todense() + a) + + c = b + b.tocsr() + assert_array_equal(c.todense(), + b.todense() + b.todense()) + + # test broadcasting + c = b + a[0] + assert_array_equal(c, b.todense() + a[0]) + + for dtype in self.math_dtypes: + check(dtype) + + def test_radd(self): + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + + a = dat.copy() + a[0,2] = 2.0 + b = datsp + c = a + b + assert_array_equal(c, a + b.todense()) + + for dtype in self.math_dtypes: + check(dtype) + + def test_sub(self): + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + + assert_array_equal((datsp - datsp).todense(),[[0,0,0,0],[0,0,0,0],[0,0,0,0]]) + assert_array_equal((datsp - 0).todense(), dat) + + A = self.spmatrix(matrix([[1,0,0,4],[-1,0,0,0],[0,8,0,-5]],'d')) + assert_array_equal((datsp - A).todense(),dat - A.todense()) + assert_array_equal((A - datsp).todense(),A.todense() - dat) + + # test broadcasting + assert_array_equal(datsp - dat[0], dat - dat[0]) + + for dtype in self.math_dtypes: + if dtype == np.dtype('bool'): + # boolean array subtraction deprecated in 1.9.0 + continue + + check(dtype) + + def test_rsub(self): + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + + assert_array_equal((dat - datsp),[[0,0,0,0],[0,0,0,0],[0,0,0,0]]) + assert_array_equal((datsp - dat),[[0,0,0,0],[0,0,0,0],[0,0,0,0]]) + assert_array_equal((0 - datsp).todense(), -dat) + + A = self.spmatrix(matrix([[1,0,0,4],[-1,0,0,0],[0,8,0,-5]],'d')) + assert_array_equal((dat - A),dat - A.todense()) + assert_array_equal((A - dat),A.todense() - dat) + assert_array_equal(A.todense() - datsp,A.todense() - dat) + assert_array_equal(datsp - A.todense(),dat - A.todense()) + + # test broadcasting + assert_array_equal(dat[0] - datsp, dat[0] - dat) + + for dtype in self.math_dtypes: + if dtype == np.dtype('bool'): + # boolean array subtraction deprecated in 1.9.0 + continue + + check(dtype) + + def test_add0(self): + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + + # Adding 0 to a sparse matrix + assert_array_equal((datsp + 0).todense(), dat) + # use sum (which takes 0 as a starting value) + sumS = sum([k * datsp for k in range(1, 3)]) + sumD = sum([k * dat for k in range(1, 3)]) + assert_almost_equal(sumS.todense(), sumD) + + for dtype in self.math_dtypes: + check(dtype) + + def test_elementwise_multiply(self): + # real/real + A = array([[4,0,9],[2,-3,5]]) + B = array([[0,7,0],[0,-4,0]]) + Asp = self.spmatrix(A) + Bsp = self.spmatrix(B) + assert_almost_equal(Asp.multiply(Bsp).todense(), A*B) # sparse/sparse + assert_almost_equal(Asp.multiply(B).todense(), A*B) # sparse/dense + + # complex/complex + C = array([[1-2j,0+5j,-1+0j],[4-3j,-3+6j,5]]) + D = array([[5+2j,7-3j,-2+1j],[0-1j,-4+2j,9]]) + Csp = self.spmatrix(C) + Dsp = self.spmatrix(D) + assert_almost_equal(Csp.multiply(Dsp).todense(), C*D) # sparse/sparse + assert_almost_equal(Csp.multiply(D).todense(), C*D) # sparse/dense + + # real/complex + assert_almost_equal(Asp.multiply(Dsp).todense(), A*D) # sparse/sparse + assert_almost_equal(Asp.multiply(D).todense(), A*D) # sparse/dense + + def test_elementwise_multiply_broadcast(self): + A = array([4]) + B = array([[-9]]) + C = array([1,-1,0]) + D = array([[7,9,-9]]) + E = array([[3],[2],[1]]) + F = array([[8,6,3],[-4,3,2],[6,6,6]]) + G = [1, 2, 3] + H = np.ones((3, 4)) + J = H.T + K = array([[0]]) + L = array([[[1,2],[0,1]]]) + + # Some arrays can't be cast as spmatrices (A,C,L) so leave + # them out. + Bsp = self.spmatrix(B) + Dsp = self.spmatrix(D) + Esp = self.spmatrix(E) + Fsp = self.spmatrix(F) + Hsp = self.spmatrix(H) + Hspp = self.spmatrix(H[0,None]) + Jsp = self.spmatrix(J) + Jspp = self.spmatrix(J[:,0,None]) + Ksp = self.spmatrix(K) + + matrices = [A, B, C, D, E, F, G, H, J, K, L] + spmatrices = [Bsp, Dsp, Esp, Fsp, Hsp, Hspp, Jsp, Jspp, Ksp] + + # sparse/sparse + for i in spmatrices: + for j in spmatrices: + try: + dense_mult = np.multiply(i.todense(), j.todense()) + except ValueError: + assert_raises(ValueError, i.multiply, j) + continue + sp_mult = i.multiply(j) + assert_almost_equal(sp_mult.todense(), dense_mult) + + # sparse/dense + for i in spmatrices: + for j in matrices: + try: + dense_mult = np.multiply(i.todense(), j) + except TypeError: + continue + except ValueError: + assert_raises(ValueError, i.multiply, j) + continue + sp_mult = i.multiply(j) + if isspmatrix(sp_mult): + assert_almost_equal(sp_mult.todense(), dense_mult) + else: + assert_almost_equal(sp_mult, dense_mult) + + def test_elementwise_divide(self): + expected = [[1,np.nan,np.nan,1], + [1,np.nan,1,np.nan], + [np.nan,1,np.nan,np.nan]] + assert_array_equal(todense(self.datsp / self.datsp),expected) + + denom = self.spmatrix(matrix([[1,0,0,4],[-1,0,0,0],[0,8,0,-5]],'d')) + expected = [[1,np.nan,np.nan,0.5], + [-3,np.nan,inf,np.nan], + [np.nan,0.25,np.nan,0]] + assert_array_equal(todense(self.datsp / denom), expected) + + # complex + A = array([[1-2j,0+5j,-1+0j],[4-3j,-3+6j,5]]) + B = array([[5+2j,7-3j,-2+1j],[0-1j,-4+2j,9]]) + Asp = self.spmatrix(A) + Bsp = self.spmatrix(B) + assert_almost_equal(todense(Asp / Bsp), A/B) + + # integer + A = array([[1,2,3],[-3,2,1]]) + B = array([[0,1,2],[0,-2,3]]) + Asp = self.spmatrix(A) + Bsp = self.spmatrix(B) + with np.errstate(divide='ignore'): + assert_array_equal(todense(Asp / Bsp), A / B) + + # mismatching sparsity patterns + A = array([[0,1],[1,0]]) + B = array([[1,0],[1,0]]) + Asp = self.spmatrix(A) + Bsp = self.spmatrix(B) + with np.errstate(divide='ignore', invalid='ignore'): + assert_array_equal(np.array(todense(Asp / Bsp)), A / B) + + def test_pow(self): + A = matrix([[1,0,2,0],[0,3,4,0],[0,5,0,0],[0,6,7,8]]) + B = self.spmatrix(A) + + for exponent in [0,1,2,3]: + assert_array_equal((B**exponent).todense(),A**exponent) + + # invalid exponents + for exponent in [-1, 2.2, 1 + 3j]: + assert_raises(Exception, B.__pow__, exponent) + + # nonsquare matrix + B = self.spmatrix(A[:3,:]) + assert_raises(Exception, B.__pow__, 1) + + def test_rmatvec(self): + M = self.spmatrix(matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]])) + assert_array_almost_equal([1,2,3,4]*M, dot([1,2,3,4], M.toarray())) + row = matrix([[1,2,3,4]]) + assert_array_almost_equal(row*M, row*M.todense()) + + def test_small_multiplication(self): + # test that A*x works for x with shape () (1,) (1,1) and (1,0) + A = self.spmatrix([[1],[2],[3]]) + + assert_(isspmatrix(A * array(1))) + assert_equal((A * array(1)).todense(), [[1],[2],[3]]) + assert_equal(A * array([1]), array([1,2,3])) + assert_equal(A * array([[1]]), array([[1],[2],[3]])) + assert_equal(A * np.ones((1,0)), np.ones((3,0))) + + def test_binop_custom_type(self): + # Non-regression test: previously, binary operations would raise + # NotImplementedError instead of returning NotImplemented + # (https://docs.python.org/library/constants.html#NotImplemented) + # so overloading Custom + matrix etc. didn't work. + A = self.spmatrix([[1], [2], [3]]) + B = BinopTester() + assert_equal(A + B, "matrix on the left") + assert_equal(A - B, "matrix on the left") + assert_equal(A * B, "matrix on the left") + assert_equal(B + A, "matrix on the right") + assert_equal(B - A, "matrix on the right") + assert_equal(B * A, "matrix on the right") + + if TEST_MATMUL: + assert_equal(eval('A @ B'), "matrix on the left") + assert_equal(eval('B @ A'), "matrix on the right") + + def test_binop_custom_type_with_shape(self): + A = self.spmatrix([[1], [2], [3]]) + B = BinopTester_with_shape((3,1)) + assert_equal(A + B, "matrix on the left") + assert_equal(A - B, "matrix on the left") + assert_equal(A * B, "matrix on the left") + assert_equal(B + A, "matrix on the right") + assert_equal(B - A, "matrix on the right") + assert_equal(B * A, "matrix on the right") + + if TEST_MATMUL: + assert_equal(eval('A @ B'), "matrix on the left") + assert_equal(eval('B @ A'), "matrix on the right") + + def test_matmul(self): + if not TEST_MATMUL: + pytest.skip("matmul is only tested in Python 3.5+") + + M = self.spmatrix(matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]])) + B = self.spmatrix(matrix([[0,1],[1,0],[0,2]],'d')) + col = matrix([1,2,3]).T + + # check matrix-vector + assert_array_almost_equal(operator.matmul(M, col), + M.todense() * col) + + # check matrix-matrix + assert_array_almost_equal(operator.matmul(M, B).todense(), + (M * B).todense()) + assert_array_almost_equal(operator.matmul(M.todense(), B), + (M * B).todense()) + assert_array_almost_equal(operator.matmul(M, B.todense()), + (M * B).todense()) + + # check error on matrix-scalar + assert_raises(ValueError, operator.matmul, M, 1) + assert_raises(ValueError, operator.matmul, 1, M) + + def test_matvec(self): + M = self.spmatrix(matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]])) + col = matrix([1,2,3]).T + assert_array_almost_equal(M * col, M.todense() * col) + + # check result dimensions (ticket #514) + assert_equal((M * array([1,2,3])).shape,(4,)) + assert_equal((M * array([[1],[2],[3]])).shape,(4,1)) + assert_equal((M * matrix([[1],[2],[3]])).shape,(4,1)) + + # check result type + assert_(isinstance(M * array([1,2,3]), ndarray)) + assert_(isinstance(M * matrix([1,2,3]).T, matrix)) + + # ensure exception is raised for improper dimensions + bad_vecs = [array([1,2]), array([1,2,3,4]), array([[1],[2]]), + matrix([1,2,3]), matrix([[1],[2]])] + for x in bad_vecs: + assert_raises(ValueError, M.__mul__, x) + + # Should this be supported or not?! + # flat = array([1,2,3]) + # assert_array_almost_equal(M*flat, M.todense()*flat) + # Currently numpy dense matrices promote the result to a 1x3 matrix, + # whereas sparse matrices leave the result as a rank-1 array. Which + # is preferable? + + # Note: the following command does not work. Both NumPy matrices + # and spmatrices should raise exceptions! + # assert_array_almost_equal(M*[1,2,3], M.todense()*[1,2,3]) + + # The current relationship between sparse matrix products and array + # products is as follows: + assert_array_almost_equal(M*array([1,2,3]), dot(M.A,[1,2,3])) + assert_array_almost_equal(M*[[1],[2],[3]], asmatrix(dot(M.A,[1,2,3])).T) + # Note that the result of M * x is dense if x has a singleton dimension. + + # Currently M.matvec(asarray(col)) is rank-1, whereas M.matvec(col) + # is rank-2. Is this desirable? + + def test_matmat_sparse(self): + a = matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]]) + a2 = array([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]]) + b = matrix([[0,1],[1,0],[0,2]],'d') + asp = self.spmatrix(a) + bsp = self.spmatrix(b) + assert_array_almost_equal((asp*bsp).todense(), a*b) + assert_array_almost_equal(asp*b, a*b) + assert_array_almost_equal(a*bsp, a*b) + assert_array_almost_equal(a2*bsp, a*b) + + # Now try performing cross-type multplication: + csp = bsp.tocsc() + c = b + assert_array_almost_equal((asp*csp).todense(), a*c) + assert_array_almost_equal(asp*c, a*c) + + assert_array_almost_equal(a*csp, a*c) + assert_array_almost_equal(a2*csp, a*c) + csp = bsp.tocsr() + assert_array_almost_equal((asp*csp).todense(), a*c) + assert_array_almost_equal(asp*c, a*c) + + assert_array_almost_equal(a*csp, a*c) + assert_array_almost_equal(a2*csp, a*c) + csp = bsp.tocoo() + assert_array_almost_equal((asp*csp).todense(), a*c) + assert_array_almost_equal(asp*c, a*c) + + assert_array_almost_equal(a*csp, a*c) + assert_array_almost_equal(a2*csp, a*c) + + # Test provided by Andy Fraser, 2006-03-26 + L = 30 + frac = .3 + random.seed(0) # make runs repeatable + A = zeros((L,2)) + for i in xrange(L): + for j in xrange(2): + r = random.random() + if r < frac: + A[i,j] = r/frac + + A = self.spmatrix(A) + B = A*A.T + assert_array_almost_equal(B.todense(), A.todense() * A.T.todense()) + assert_array_almost_equal(B.todense(), A.todense() * A.todense().T) + + # check dimension mismatch 2x2 times 3x2 + A = self.spmatrix([[1,2],[3,4]]) + B = self.spmatrix([[1,2],[3,4],[5,6]]) + assert_raises(ValueError, A.__mul__, B) + + def test_matmat_dense(self): + a = matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]]) + asp = self.spmatrix(a) + + # check both array and matrix types + bs = [array([[1,2],[3,4],[5,6]]), matrix([[1,2],[3,4],[5,6]])] + + for b in bs: + result = asp*b + assert_(isinstance(result, type(b))) + assert_equal(result.shape, (4,2)) + assert_equal(result, dot(a,b)) + + def test_sparse_format_conversions(self): + A = sparse.kron([[1,0,2],[0,3,4],[5,0,0]], [[1,2],[0,3]]) + D = A.todense() + A = self.spmatrix(A) + + for format in ['bsr','coo','csc','csr','dia','dok','lil']: + a = A.asformat(format) + assert_equal(a.format,format) + assert_array_equal(a.todense(), D) + + b = self.spmatrix(D+3j).asformat(format) + assert_equal(b.format,format) + assert_array_equal(b.todense(), D+3j) + + c = eval(format + '_matrix')(A) + assert_equal(c.format,format) + assert_array_equal(c.todense(), D) + + for format in ['array', 'dense']: + a = A.asformat(format) + assert_array_equal(a, D) + + b = self.spmatrix(D+3j).asformat(format) + assert_array_equal(b, D+3j) + + def test_tobsr(self): + x = array([[1,0,2,0],[0,0,0,0],[0,0,4,5]]) + y = array([[0,1,2],[3,0,5]]) + A = kron(x,y) + Asp = self.spmatrix(A) + for format in ['bsr']: + fn = getattr(Asp, 'to' + format) + + for X in [1, 2, 3, 6]: + for Y in [1, 2, 3, 4, 6, 12]: + assert_equal(fn(blocksize=(X,Y)).todense(), A) + + def test_transpose(self): + dat_1 = self.dat + dat_2 = np.array([[]]) + matrices = [dat_1, dat_2] + + def check(dtype, j): + dat = np.matrix(matrices[j], dtype=dtype) + datsp = self.spmatrix(dat) + + a = datsp.transpose() + b = dat.transpose() + + assert_array_equal(a.todense(), b) + assert_array_equal(a.transpose().todense(), dat) + assert_equal(a.dtype, b.dtype) + + # See gh-5987 + empty = self.spmatrix((3, 4)) + assert_array_equal(np.transpose(empty).todense(), + np.transpose(zeros((3, 4)))) + assert_array_equal(empty.T.todense(), zeros((4, 3))) + assert_raises(ValueError, empty.transpose, axes=0) + + for dtype in self.checked_dtypes: + for j in range(len(matrices)): + check(dtype, j) + + def test_add_dense(self): + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + + # adding a dense matrix to a sparse matrix + sum1 = dat + datsp + assert_array_equal(sum1, dat + dat) + sum2 = datsp + dat + assert_array_equal(sum2, dat + dat) + + for dtype in self.math_dtypes: + check(dtype) + + def test_sub_dense(self): + # subtracting a dense matrix to/from a sparse matrix + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + + # Behavior is different for bool. + if dat.dtype == bool: + sum1 = dat - datsp + assert_array_equal(sum1, dat - dat) + sum2 = datsp - dat + assert_array_equal(sum2, dat - dat) + else: + # Manually add to avoid upcasting from scalar + # multiplication. + sum1 = (dat + dat + dat) - datsp + assert_array_equal(sum1, dat + dat) + sum2 = (datsp + datsp + datsp) - dat + assert_array_equal(sum2, dat + dat) + + for dtype in self.math_dtypes: + if (dtype == np.dtype('bool')) and ( + NumpyVersion(np.__version__) >= '1.9.0.dev'): + # boolean array subtraction deprecated in 1.9.0 + continue + + check(dtype) + + def test_maximum_minimum(self): + A_dense = np.array([[1, 0, 3], [0, 4, 5], [0, 0, 0]]) + B_dense = np.array([[1, 1, 2], [0, 3, 6], [1, -1, 0]]) + + A_dense_cpx = np.array([[1, 0, 3], [0, 4+2j, 5], [0, 1j, -1j]]) + + def check(dtype, dtype2, btype): + if np.issubdtype(dtype, np.complexfloating): + A = self.spmatrix(A_dense_cpx.astype(dtype)) + else: + A = self.spmatrix(A_dense.astype(dtype)) + if btype == 'scalar': + B = dtype2.type(1) + elif btype == 'scalar2': + B = dtype2.type(-1) + elif btype == 'dense': + B = B_dense.astype(dtype2) + elif btype == 'sparse': + B = self.spmatrix(B_dense.astype(dtype2)) + else: + raise ValueError() + + with suppress_warnings() as sup: + sup.filter(SparseEfficiencyWarning, + "Taking maximum .minimum. with > 0 .< 0. number results to a dense matrix") + + max_s = A.maximum(B) + min_s = A.minimum(B) + + max_d = np.maximum(todense(A), todense(B)) + assert_array_equal(todense(max_s), max_d) + assert_equal(max_s.dtype, max_d.dtype) + + min_d = np.minimum(todense(A), todense(B)) + assert_array_equal(todense(min_s), min_d) + assert_equal(min_s.dtype, min_d.dtype) + + for dtype in self.math_dtypes: + for dtype2 in [np.int8, np.float_, np.complex_]: + for btype in ['scalar', 'scalar2', 'dense', 'sparse']: + check(np.dtype(dtype), np.dtype(dtype2), btype) + + def test_copy(self): + # Check whether the copy=True and copy=False keywords work + A = self.datsp + + # check that copy preserves format + assert_equal(A.copy().format, A.format) + assert_equal(A.__class__(A,copy=True).format, A.format) + assert_equal(A.__class__(A,copy=False).format, A.format) + + assert_equal(A.copy().todense(), A.todense()) + assert_equal(A.__class__(A,copy=True).todense(), A.todense()) + assert_equal(A.__class__(A,copy=False).todense(), A.todense()) + + # check that XXX_matrix.toXXX() works + toself = getattr(A,'to' + A.format) + assert_equal(toself().format, A.format) + assert_equal(toself(copy=True).format, A.format) + assert_equal(toself(copy=False).format, A.format) + + assert_equal(toself().todense(), A.todense()) + assert_equal(toself(copy=True).todense(), A.todense()) + assert_equal(toself(copy=False).todense(), A.todense()) + + # check whether the data is copied? + # TODO: deal with non-indexable types somehow + B = A.copy() + try: + B[0,0] += 1 + assert_(B[0,0] != A[0,0]) + except NotImplementedError: + # not all sparse matrices can be indexed + pass + except TypeError: + # not all sparse matrices can be indexed + pass + + # test that __iter__ is compatible with NumPy matrix + def test_iterator(self): + B = np.matrix(np.arange(50).reshape(5, 10)) + A = self.spmatrix(B) + + for x, y in zip(A, B): + assert_equal(x.todense(), y) + + def test_size_zero_matrix_arithmetic(self): + # Test basic matrix arithmetic with shapes like (0,0), (10,0), + # (0, 3), etc. + mat = np.matrix([]) + a = mat.reshape((0, 0)) + b = mat.reshape((0, 1)) + c = mat.reshape((0, 5)) + d = mat.reshape((1, 0)) + e = mat.reshape((5, 0)) + f = np.matrix(np.ones([5, 5])) + + asp = self.spmatrix(a) + bsp = self.spmatrix(b) + csp = self.spmatrix(c) + dsp = self.spmatrix(d) + esp = self.spmatrix(e) + fsp = self.spmatrix(f) + + # matrix product. + assert_array_equal(asp.dot(asp).A, np.dot(a, a).A) + assert_array_equal(bsp.dot(dsp).A, np.dot(b, d).A) + assert_array_equal(dsp.dot(bsp).A, np.dot(d, b).A) + assert_array_equal(csp.dot(esp).A, np.dot(c, e).A) + assert_array_equal(csp.dot(fsp).A, np.dot(c, f).A) + assert_array_equal(esp.dot(csp).A, np.dot(e, c).A) + assert_array_equal(dsp.dot(csp).A, np.dot(d, c).A) + assert_array_equal(fsp.dot(esp).A, np.dot(f, e).A) + + # bad matrix products + assert_raises(ValueError, dsp.dot, e) + assert_raises(ValueError, asp.dot, d) + + # elemente-wise multiplication + assert_array_equal(asp.multiply(asp).A, np.multiply(a, a).A) + assert_array_equal(bsp.multiply(bsp).A, np.multiply(b, b).A) + assert_array_equal(dsp.multiply(dsp).A, np.multiply(d, d).A) + + assert_array_equal(asp.multiply(a).A, np.multiply(a, a).A) + assert_array_equal(bsp.multiply(b).A, np.multiply(b, b).A) + assert_array_equal(dsp.multiply(d).A, np.multiply(d, d).A) + + assert_array_equal(asp.multiply(6).A, np.multiply(a, 6).A) + assert_array_equal(bsp.multiply(6).A, np.multiply(b, 6).A) + assert_array_equal(dsp.multiply(6).A, np.multiply(d, 6).A) + + # bad element-wise multiplication + assert_raises(ValueError, asp.multiply, c) + assert_raises(ValueError, esp.multiply, c) + + # Addition + assert_array_equal(asp.__add__(asp).A, a.__add__(a).A) + assert_array_equal(bsp.__add__(bsp).A, b.__add__(b).A) + assert_array_equal(dsp.__add__(dsp).A, d.__add__(d).A) + + # bad addition + assert_raises(ValueError, asp.__add__, dsp) + assert_raises(ValueError, bsp.__add__, asp) + + def test_size_zero_conversions(self): + mat = np.matrix([]) + a = mat.reshape((0, 0)) + b = mat.reshape((0, 5)) + c = mat.reshape((5, 0)) + + for m in [a, b, c]: + spm = self.spmatrix(m) + assert_array_equal(spm.tocoo().A, m) + assert_array_equal(spm.tocsr().A, m) + assert_array_equal(spm.tocsc().A, m) + assert_array_equal(spm.tolil().A, m) + assert_array_equal(spm.todok().A, m) + assert_array_equal(spm.tobsr().A, m) + + def test_pickle(self): + import pickle + sup = suppress_warnings() + sup.filter(SparseEfficiencyWarning) + + @sup + def check(): + datsp = self.datsp.copy() + for protocol in range(pickle.HIGHEST_PROTOCOL): + sploaded = pickle.loads(pickle.dumps(datsp, protocol=protocol)) + assert_equal(datsp.shape, sploaded.shape) + assert_array_equal(datsp.toarray(), sploaded.toarray()) + assert_equal(datsp.format, sploaded.format) + for key, val in datsp.__dict__.items(): + if isinstance(val, np.ndarray): + assert_array_equal(val, sploaded.__dict__[key]) + else: + assert_(val == sploaded.__dict__[key]) + check() + + def test_unary_ufunc_overrides(self): + def check(name): + if name == "sign": + pytest.skip("sign conflicts with comparison op " + "support on Numpy") + if self.spmatrix in (dok_matrix, lil_matrix): + pytest.skip("Unary ops not implemented for dok/lil") + ufunc = getattr(np, name) + + X = self.spmatrix(np.arange(20).reshape(4, 5) / 20.) + X0 = ufunc(X.toarray()) + + X2 = ufunc(X) + assert_array_equal(X2.toarray(), X0) + + for name in ["sin", "tan", "arcsin", "arctan", "sinh", "tanh", + "arcsinh", "arctanh", "rint", "sign", "expm1", "log1p", + "deg2rad", "rad2deg", "floor", "ceil", "trunc", "sqrt", + "abs"]: + check(name) + + def test_resize(self): + # resize(shape) resizes the matrix in-place + D = np.array([[1, 0, 3, 4], + [2, 0, 0, 0], + [3, 0, 0, 0]]) + S = self.spmatrix(D) + assert_(S.resize((3, 2)) is None) + assert_array_equal(S.A, [[1, 0], + [2, 0], + [3, 0]]) + S.resize((2, 2)) + assert_array_equal(S.A, [[1, 0], + [2, 0]]) + S.resize((3, 2)) + assert_array_equal(S.A, [[1, 0], + [2, 0], + [0, 0]]) + S.resize((3, 3)) + assert_array_equal(S.A, [[1, 0, 0], + [2, 0, 0], + [0, 0, 0]]) + # test no-op + S.resize((3, 3)) + assert_array_equal(S.A, [[1, 0, 0], + [2, 0, 0], + [0, 0, 0]]) + + # test *args + S.resize(3, 2) + assert_array_equal(S.A, [[1, 0], + [2, 0], + [0, 0]]) + + for bad_shape in [1, (-1, 2), (2, -1), (1, 2, 3)]: + assert_raises(ValueError, S.resize, bad_shape) + + +class _TestInplaceArithmetic(object): + @pytest.mark.skipif(NumpyVersion(np.__version__) < "1.13.0", + reason="numpy version doesn't respect array priority") + def test_inplace_dense(self): + a = np.ones((3, 4)) + b = self.spmatrix(a) + + x = a.copy() + y = a.copy() + x += a + y += b + assert_array_equal(x, y) + + x = a.copy() + y = a.copy() + x -= a + y -= b + assert_array_equal(x, y) + + # This is matrix product, from __rmul__ + assert_raises(ValueError, operator.imul, x, b) + x = a.copy() + y = a.copy() + x = x.dot(a.T) + y *= b.T + assert_array_equal(x, y) + + # Matrix (non-elementwise) floor division is not defined + assert_raises(TypeError, operator.ifloordiv, x, b) + + def test_imul_scalar(self): + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + + # Avoid implicit casting. + if np.can_cast(type(2), dtype, casting='same_kind'): + a = datsp.copy() + a *= 2 + b = dat.copy() + b *= 2 + assert_array_equal(b, a.todense()) + + if np.can_cast(type(17.3), dtype, casting='same_kind'): + a = datsp.copy() + a *= 17.3 + b = dat.copy() + b *= 17.3 + assert_array_equal(b, a.todense()) + + for dtype in self.math_dtypes: + check(dtype) + + def test_idiv_scalar(self): + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + + if np.can_cast(type(2), dtype, casting='same_kind'): + a = datsp.copy() + a /= 2 + b = dat.copy() + b /= 2 + assert_array_equal(b, a.todense()) + + if np.can_cast(type(17.3), dtype, casting='same_kind'): + a = datsp.copy() + a /= 17.3 + b = dat.copy() + b /= 17.3 + assert_array_equal(b, a.todense()) + + for dtype in self.math_dtypes: + # /= should only be used with float dtypes to avoid implicit + # casting. + if not np.can_cast(dtype, np.int_): + check(dtype) + + def test_inplace_success(self): + # Inplace ops should work even if a specialized version is not + # implemented, falling back to x = x <op> y + a = self.spmatrix(np.eye(5)) + b = self.spmatrix(np.eye(5)) + bp = self.spmatrix(np.eye(5)) + + b += a + bp = bp + a + assert_allclose(b.A, bp.A) + + b *= a + bp = bp * a + assert_allclose(b.A, bp.A) + + b -= a + bp = bp - a + assert_allclose(b.A, bp.A) + + assert_raises(TypeError, operator.ifloordiv, a, b) + + +class _TestGetSet(object): + def test_getelement(self): + def check(dtype): + D = array([[1,0,0], + [4,3,0], + [0,2,0], + [0,0,0]], dtype=dtype) + A = self.spmatrix(D) + + M,N = D.shape + + for i in range(-M, M): + for j in range(-N, N): + assert_equal(A[i,j], D[i,j]) + + for ij in [(0,3),(-1,3),(4,0),(4,3),(4,-1), (1, 2, 3)]: + assert_raises((IndexError, TypeError), A.__getitem__, ij) + + for dtype in supported_dtypes: + check(np.dtype(dtype)) + + def test_setelement(self): + def check(dtype): + A = self.spmatrix((3,4), dtype=dtype) + with suppress_warnings() as sup: + sup.filter(SparseEfficiencyWarning, + "Changing the sparsity structure of a cs[cr]_matrix is expensive") + A[0, 0] = dtype.type(0) # bug 870 + A[1, 2] = dtype.type(4.0) + A[0, 1] = dtype.type(3) + A[2, 0] = dtype.type(2.0) + A[0,-1] = dtype.type(8) + A[-1,-2] = dtype.type(7) + A[0, 1] = dtype.type(5) + + if dtype != np.bool_: + assert_array_equal(A.todense(),[[0,5,0,8],[0,0,4,0],[2,0,7,0]]) + + for ij in [(0,4),(-1,4),(3,0),(3,4),(3,-1)]: + assert_raises(IndexError, A.__setitem__, ij, 123.0) + + for v in [[1,2,3], array([1,2,3])]: + assert_raises(ValueError, A.__setitem__, (0,0), v) + + if (not np.issubdtype(dtype, np.complexfloating) and + dtype != np.bool_): + for v in [3j]: + assert_raises(TypeError, A.__setitem__, (0,0), v) + + for dtype in supported_dtypes: + check(np.dtype(dtype)) + + def test_negative_index_assignment(self): + # Regression test for github issue 4428. + + def check(dtype): + A = self.spmatrix((3, 10), dtype=dtype) + with suppress_warnings() as sup: + sup.filter(SparseEfficiencyWarning, + "Changing the sparsity structure of a cs[cr]_matrix is expensive") + A[0, -4] = 1 + assert_equal(A[0, -4], 1) + + for dtype in self.math_dtypes: + check(np.dtype(dtype)) + + def test_scalar_assign_2(self): + n, m = (5, 10) + + def _test_set(i, j, nitems): + msg = "%r ; %r ; %r" % (i, j, nitems) + A = self.spmatrix((n, m)) + with suppress_warnings() as sup: + sup.filter(SparseEfficiencyWarning, + "Changing the sparsity structure of a cs[cr]_matrix is expensive") + A[i, j] = 1 + assert_almost_equal(A.sum(), nitems, err_msg=msg) + assert_almost_equal(A[i, j], 1, err_msg=msg) + + # [i,j] + for i, j in [(2, 3), (-1, 8), (-1, -2), (array(-1), -2), (-1, array(-2)), + (array(-1), array(-2))]: + _test_set(i, j, 1) + + def test_index_scalar_assign(self): + A = self.spmatrix((5, 5)) + B = np.zeros((5, 5)) + with suppress_warnings() as sup: + sup.filter(SparseEfficiencyWarning, + "Changing the sparsity structure of a cs[cr]_matrix is expensive") + for C in [A, B]: + C[0,1] = 1 + C[3,0] = 4 + C[3,0] = 9 + assert_array_equal(A.toarray(), B) + + +class _TestSolve(object): + def test_solve(self): + # Test whether the lu_solve command segfaults, as reported by Nils + # Wagner for a 64-bit machine, 02 March 2005 (EJS) + n = 20 + np.random.seed(0) # make tests repeatable + A = zeros((n,n), dtype=complex) + x = np.random.rand(n) + y = np.random.rand(n-1)+1j*np.random.rand(n-1) + r = np.random.rand(n) + for i in range(len(x)): + A[i,i] = x[i] + for i in range(len(y)): + A[i,i+1] = y[i] + A[i+1,i] = conjugate(y[i]) + A = self.spmatrix(A) + with suppress_warnings() as sup: + sup.filter(SparseEfficiencyWarning, "splu requires CSC matrix format") + x = splu(A).solve(r) + assert_almost_equal(A*x,r) + + +class _TestSlicing(object): + def test_dtype_preservation(self): + assert_equal(self.spmatrix((1,10), dtype=np.int16)[0,1:5].dtype, np.int16) + assert_equal(self.spmatrix((1,10), dtype=np.int32)[0,1:5].dtype, np.int32) + assert_equal(self.spmatrix((1,10), dtype=np.float32)[0,1:5].dtype, np.float32) + assert_equal(self.spmatrix((1,10), dtype=np.float64)[0,1:5].dtype, np.float64) + + def test_get_horiz_slice(self): + B = asmatrix(arange(50.).reshape(5,10)) + A = self.spmatrix(B) + assert_array_equal(B[1,:], A[1,:].todense()) + assert_array_equal(B[1,2:5], A[1,2:5].todense()) + + C = matrix([[1, 2, 1], [4, 0, 6], [0, 0, 0], [0, 0, 1]]) + D = self.spmatrix(C) + assert_array_equal(C[1, 1:3], D[1, 1:3].todense()) + + # Now test slicing when a row contains only zeros + E = matrix([[1, 2, 1], [4, 0, 0], [0, 0, 0], [0, 0, 1]]) + F = self.spmatrix(E) + assert_array_equal(E[1, 1:3], F[1, 1:3].todense()) + assert_array_equal(E[2, -2:], F[2, -2:].A) + + # The following should raise exceptions: + assert_raises(IndexError, A.__getitem__, (slice(None), 11)) + assert_raises(IndexError, A.__getitem__, (6, slice(3, 7))) + + def test_get_vert_slice(self): + B = asmatrix(arange(50.).reshape(5,10)) + A = self.spmatrix(B) + assert_array_equal(B[2:5,0], A[2:5,0].todense()) + assert_array_equal(B[:,1], A[:,1].todense()) + + C = matrix([[1, 2, 1], [4, 0, 6], [0, 0, 0], [0, 0, 1]]) + D = self.spmatrix(C) + assert_array_equal(C[1:3, 1], D[1:3, 1].todense()) + assert_array_equal(C[:, 2], D[:, 2].todense()) + + # Now test slicing when a column contains only zeros + E = matrix([[1, 0, 1], [4, 0, 0], [0, 0, 0], [0, 0, 1]]) + F = self.spmatrix(E) + assert_array_equal(E[:, 1], F[:, 1].todense()) + assert_array_equal(E[-2:, 2], F[-2:, 2].todense()) + + # The following should raise exceptions: + assert_raises(IndexError, A.__getitem__, (slice(None), 11)) + assert_raises(IndexError, A.__getitem__, (6, slice(3, 7))) + + def test_get_slices(self): + B = asmatrix(arange(50.).reshape(5,10)) + A = self.spmatrix(B) + assert_array_equal(A[2:5,0:3].todense(), B[2:5,0:3]) + assert_array_equal(A[1:,:-1].todense(), B[1:,:-1]) + assert_array_equal(A[:-1,1:].todense(), B[:-1,1:]) + + # Now test slicing when a column contains only zeros + E = matrix([[1, 0, 1], [4, 0, 0], [0, 0, 0], [0, 0, 1]]) + F = self.spmatrix(E) + assert_array_equal(E[1:2, 1:2], F[1:2, 1:2].todense()) + assert_array_equal(E[:, 1:], F[:, 1:].todense()) + + def test_non_unit_stride_2d_indexing(self): + # Regression test -- used to silently ignore the stride. + v0 = np.random.rand(50, 50) + try: + v = self.spmatrix(v0)[0:25:2, 2:30:3] + except ValueError: + # if unsupported + raise pytest.skip("feature not implemented") + + assert_array_equal(v.todense(), + v0[0:25:2, 2:30:3]) + + def test_slicing_2(self): + B = asmatrix(arange(50).reshape(5,10)) + A = self.spmatrix(B) + + # [i,j] + assert_equal(A[2,3], B[2,3]) + assert_equal(A[-1,8], B[-1,8]) + assert_equal(A[-1,-2],B[-1,-2]) + assert_equal(A[array(-1),-2],B[-1,-2]) + assert_equal(A[-1,array(-2)],B[-1,-2]) + assert_equal(A[array(-1),array(-2)],B[-1,-2]) + + # [i,1:2] + assert_equal(A[2,:].todense(), B[2,:]) + assert_equal(A[2,5:-2].todense(),B[2,5:-2]) + assert_equal(A[array(2),5:-2].todense(),B[2,5:-2]) + + # [1:2,j] + assert_equal(A[:,2].todense(), B[:,2]) + assert_equal(A[3:4,9].todense(), B[3:4,9]) + assert_equal(A[1:4,-5].todense(),B[1:4,-5]) + assert_equal(A[2:-1,3].todense(),B[2:-1,3]) + assert_equal(A[2:-1,array(3)].todense(),B[2:-1,3]) + + # [1:2,1:2] + assert_equal(A[1:2,1:2].todense(),B[1:2,1:2]) + assert_equal(A[4:,3:].todense(), B[4:,3:]) + assert_equal(A[:4,:5].todense(), B[:4,:5]) + assert_equal(A[2:-1,:5].todense(),B[2:-1,:5]) + + # [i] + assert_equal(A[1,:].todense(), B[1,:]) + assert_equal(A[-2,:].todense(),B[-2,:]) + assert_equal(A[array(-2),:].todense(),B[-2,:]) + + # [1:2] + assert_equal(A[1:4].todense(), B[1:4]) + assert_equal(A[1:-2].todense(),B[1:-2]) + + # Check bug reported by Robert Cimrman: + # http://thread.gmane.org/gmane.comp.python.scientific.devel/7986 (dead link) + s = slice(int8(2),int8(4),None) + assert_equal(A[s,:].todense(), B[2:4,:]) + assert_equal(A[:,s].todense(), B[:,2:4]) + + def test_slicing_3(self): + B = asmatrix(arange(50).reshape(5,10)) + A = self.spmatrix(B) + + s_ = np.s_ + slices = [s_[:2], s_[1:2], s_[3:], s_[3::2], + s_[8:3:-1], s_[4::-2], s_[:5:-1], + 0, 1, s_[:], s_[1:5], -1, -2, -5, + array(-1), np.int8(-3)] + + def check_1(a): + x = A[a] + y = B[a] + if y.shape == (): + assert_equal(x, y, repr(a)) + else: + if x.size == 0 and y.size == 0: + pass + else: + assert_array_equal(x.todense(), y, repr(a)) + + for j, a in enumerate(slices): + check_1(a) + + def check_2(a, b): + # Indexing np.matrix with 0-d arrays seems to be broken, + # as they seem not to be treated as scalars. + # https://github.com/numpy/numpy/issues/3110 + if isinstance(a, np.ndarray): + ai = int(a) + else: + ai = a + if isinstance(b, np.ndarray): + bi = int(b) + else: + bi = b + + x = A[a, b] + y = B[ai, bi] + + if y.shape == (): + assert_equal(x, y, repr((a, b))) + else: + if x.size == 0 and y.size == 0: + pass + else: + assert_array_equal(x.todense(), y, repr((a, b))) + + for i, a in enumerate(slices): + for j, b in enumerate(slices): + check_2(a, b) + + def test_ellipsis_slicing(self): + b = asmatrix(arange(50).reshape(5,10)) + a = self.spmatrix(b) + + assert_array_equal(a[...].A, b[...].A) + assert_array_equal(a[...,].A, b[...,].A) + + assert_array_equal(a[1, ...].A, b[1, ...].A) + assert_array_equal(a[..., 1].A, b[..., 1].A) + assert_array_equal(a[1:, ...].A, b[1:, ...].A) + assert_array_equal(a[..., 1:].A, b[..., 1:].A) + + assert_array_equal(a[1:, 1, ...].A, b[1:, 1, ...].A) + assert_array_equal(a[1, ..., 1:].A, b[1, ..., 1:].A) + # These return ints + assert_equal(a[1, 1, ...], b[1, 1, ...]) + assert_equal(a[1, ..., 1], b[1, ..., 1]) + + @pytest.mark.skipif(NumpyVersion(np.__version__) >= '1.9.0.dev', reason="") + def test_multiple_ellipsis_slicing(self): + b = asmatrix(arange(50).reshape(5,10)) + a = self.spmatrix(b) + + assert_array_equal(a[..., ...].A, b[..., ...].A) + assert_array_equal(a[..., ..., ...].A, b[..., ..., ...].A) + assert_array_equal(a[1, ..., ...].A, b[1, ..., ...].A) + assert_array_equal(a[1:, ..., ...].A, b[1:, ..., ...].A) + assert_array_equal(a[..., ..., 1:].A, b[..., ..., 1:].A) + + # Bug in NumPy's slicing + assert_array_equal(a[..., ..., 1].A, b[..., ..., 1].A.reshape((5,1))) + + +class _TestSlicingAssign(object): + def test_slice_scalar_assign(self): + A = self.spmatrix((5, 5)) + B = np.zeros((5, 5)) + with suppress_warnings() as sup: + sup.filter(SparseEfficiencyWarning, + "Changing the sparsity structure of a cs[cr]_matrix is expensive") + for C in [A, B]: + C[0:1,1] = 1 + C[3:0,0] = 4 + C[3:4,0] = 9 + C[0,4:] = 1 + C[3::-1,4:] = 9 + assert_array_equal(A.toarray(), B) + + def test_slice_assign_2(self): + n, m = (5, 10) + + def _test_set(i, j): + msg = "i=%r; j=%r" % (i, j) + A = self.spmatrix((n, m)) + with suppress_warnings() as sup: + sup.filter(SparseEfficiencyWarning, + "Changing the sparsity structure of a cs[cr]_matrix is expensive") + A[i, j] = 1 + B = np.zeros((n, m)) + B[i, j] = 1 + assert_array_almost_equal(A.todense(), B, err_msg=msg) + # [i,1:2] + for i, j in [(2, slice(3)), (2, slice(None, 10, 4)), (2, slice(5, -2)), + (array(2), slice(5, -2))]: + _test_set(i, j) + + def test_self_self_assignment(self): + # Tests whether a row of one lil_matrix can be assigned to + # another. + B = self.spmatrix((4,3)) + with suppress_warnings() as sup: + sup.filter(SparseEfficiencyWarning, + "Changing the sparsity structure of a cs[cr]_matrix is expensive") + B[0,0] = 2 + B[1,2] = 7 + B[2,1] = 3 + B[3,0] = 10 + + A = B / 10 + B[0,:] = A[0,:] + assert_array_equal(A[0,:].A, B[0,:].A) + + A = B / 10 + B[:,:] = A[:1,:1] + assert_array_equal(np.zeros((4,3)) + A[0,0], B.A) + + A = B / 10 + B[:-1,0] = A[0,:].T + assert_array_equal(A[0,:].A.T, B[:-1,0].A) + + def test_slice_assignment(self): + B = self.spmatrix((4,3)) + expected = array([[10,0,0], + [0,0,6], + [0,14,0], + [0,0,0]]) + block = [[1,0],[0,4]] + + with suppress_warnings() as sup: + sup.filter(SparseEfficiencyWarning, + "Changing the sparsity structure of a cs[cr]_matrix is expensive") + B[0,0] = 5 + B[1,2] = 3 + B[2,1] = 7 + B[:,:] = B+B + assert_array_equal(B.todense(),expected) + + B[:2,:2] = csc_matrix(array(block)) + assert_array_equal(B.todense()[:2,:2],block) + + def test_sparsity_modifying_assignment(self): + B = self.spmatrix((4,3)) + with suppress_warnings() as sup: + sup.filter(SparseEfficiencyWarning, + "Changing the sparsity structure of a cs[cr]_matrix is expensive") + B[0,0] = 5 + B[1,2] = 3 + B[2,1] = 7 + B[3,0] = 10 + B[:3] = csr_matrix(np.eye(3)) + + expected = array([[1,0,0],[0,1,0],[0,0,1],[10,0,0]]) + assert_array_equal(B.toarray(), expected) + + def test_set_slice(self): + A = self.spmatrix((5,10)) + B = matrix(zeros((5,10), float)) + s_ = np.s_ + slices = [s_[:2], s_[1:2], s_[3:], s_[3::2], + s_[8:3:-1], s_[4::-2], s_[:5:-1], + 0, 1, s_[:], s_[1:5], -1, -2, -5, + array(-1), np.int8(-3)] + + with suppress_warnings() as sup: + sup.filter(SparseEfficiencyWarning, + "Changing the sparsity structure of a cs[cr]_matrix is expensive") + for j, a in enumerate(slices): + A[a] = j + B[a] = j + assert_array_equal(A.todense(), B, repr(a)) + + for i, a in enumerate(slices): + for j, b in enumerate(slices): + A[a,b] = 10*i + 1000*(j+1) + B[a,b] = 10*i + 1000*(j+1) + assert_array_equal(A.todense(), B, repr((a, b))) + + A[0, 1:10:2] = xrange(1,10,2) + B[0, 1:10:2] = xrange(1,10,2) + assert_array_equal(A.todense(), B) + A[1:5:2,0] = np.array(range(1,5,2))[:,None] + B[1:5:2,0] = np.array(range(1,5,2))[:,None] + assert_array_equal(A.todense(), B) + + # The next commands should raise exceptions + assert_raises(ValueError, A.__setitem__, (0, 0), list(range(100))) + assert_raises(ValueError, A.__setitem__, (0, 0), arange(100)) + assert_raises(ValueError, A.__setitem__, (0, slice(None)), + list(range(100))) + assert_raises(ValueError, A.__setitem__, (slice(None), 1), + list(range(100))) + assert_raises(ValueError, A.__setitem__, (slice(None), 1), A.copy()) + assert_raises(ValueError, A.__setitem__, + ([[1, 2, 3], [0, 3, 4]], [1, 2, 3]), [1, 2, 3, 4]) + assert_raises(ValueError, A.__setitem__, + ([[1, 2, 3], [0, 3, 4], [4, 1, 3]], + [[1, 2, 4], [0, 1, 3]]), [2, 3, 4]) + + +class _TestFancyIndexing(object): + """Tests fancy indexing features. The tests for any matrix formats + that implement these features should derive from this class. + """ + + def test_bad_index(self): + A = self.spmatrix(np.zeros([5, 5])) + assert_raises((IndexError, ValueError, TypeError), A.__getitem__, "foo") + assert_raises((IndexError, ValueError, TypeError), A.__getitem__, (2, "foo")) + assert_raises((IndexError, ValueError), A.__getitem__, + ([1, 2, 3], [1, 2, 3, 4])) + + def test_fancy_indexing(self): + B = asmatrix(arange(50).reshape(5,10)) + A = self.spmatrix(B) + + # [i] + assert_equal(A[[1,3]].todense(), B[[1,3]]) + + # [i,[1,2]] + assert_equal(A[3,[1,3]].todense(), B[3,[1,3]]) + assert_equal(A[-1,[2,-5]].todense(),B[-1,[2,-5]]) + assert_equal(A[array(-1),[2,-5]].todense(),B[-1,[2,-5]]) + assert_equal(A[-1,array([2,-5])].todense(),B[-1,[2,-5]]) + assert_equal(A[array(-1),array([2,-5])].todense(),B[-1,[2,-5]]) + + # [1:2,[1,2]] + assert_equal(A[:,[2,8,3,-1]].todense(),B[:,[2,8,3,-1]]) + assert_equal(A[3:4,[9]].todense(), B[3:4,[9]]) + assert_equal(A[1:4,[-1,-5]].todense(), B[1:4,[-1,-5]]) + assert_equal(A[1:4,array([-1,-5])].todense(), B[1:4,[-1,-5]]) + + # [[1,2],j] + assert_equal(A[[1,3],3].todense(), B[[1,3],3]) + assert_equal(A[[2,-5],-4].todense(), B[[2,-5],-4]) + assert_equal(A[array([2,-5]),-4].todense(), B[[2,-5],-4]) + assert_equal(A[[2,-5],array(-4)].todense(), B[[2,-5],-4]) + assert_equal(A[array([2,-5]),array(-4)].todense(), B[[2,-5],-4]) + + # [[1,2],1:2] + assert_equal(A[[1,3],:].todense(), B[[1,3],:]) + assert_equal(A[[2,-5],8:-1].todense(),B[[2,-5],8:-1]) + assert_equal(A[array([2,-5]),8:-1].todense(),B[[2,-5],8:-1]) + + # [[1,2],[1,2]] + assert_equal(todense(A[[1,3],[2,4]]), B[[1,3],[2,4]]) + assert_equal(todense(A[[-1,-3],[2,-4]]), B[[-1,-3],[2,-4]]) + assert_equal(todense(A[array([-1,-3]),[2,-4]]), B[[-1,-3],[2,-4]]) + assert_equal(todense(A[[-1,-3],array([2,-4])]), B[[-1,-3],[2,-4]]) + assert_equal(todense(A[array([-1,-3]),array([2,-4])]), B[[-1,-3],[2,-4]]) + + # [[[1],[2]],[1,2]] + assert_equal(A[[[1],[3]],[2,4]].todense(), B[[[1],[3]],[2,4]]) + assert_equal(A[[[-1],[-3],[-2]],[2,-4]].todense(),B[[[-1],[-3],[-2]],[2,-4]]) + assert_equal(A[array([[-1],[-3],[-2]]),[2,-4]].todense(),B[[[-1],[-3],[-2]],[2,-4]]) + assert_equal(A[[[-1],[-3],[-2]],array([2,-4])].todense(),B[[[-1],[-3],[-2]],[2,-4]]) + assert_equal(A[array([[-1],[-3],[-2]]),array([2,-4])].todense(),B[[[-1],[-3],[-2]],[2,-4]]) + + # [[1,2]] + assert_equal(A[[1,3]].todense(), B[[1,3]]) + assert_equal(A[[-1,-3]].todense(),B[[-1,-3]]) + assert_equal(A[array([-1,-3])].todense(),B[[-1,-3]]) + + # [[1,2],:][:,[1,2]] + assert_equal(A[[1,3],:][:,[2,4]].todense(), B[[1,3],:][:,[2,4]]) + assert_equal(A[[-1,-3],:][:,[2,-4]].todense(), B[[-1,-3],:][:,[2,-4]]) + assert_equal(A[array([-1,-3]),:][:,array([2,-4])].todense(), B[[-1,-3],:][:,[2,-4]]) + + # [:,[1,2]][[1,2],:] + assert_equal(A[:,[1,3]][[2,4],:].todense(), B[:,[1,3]][[2,4],:]) + assert_equal(A[:,[-1,-3]][[2,-4],:].todense(), B[:,[-1,-3]][[2,-4],:]) + assert_equal(A[:,array([-1,-3])][array([2,-4]),:].todense(), B[:,[-1,-3]][[2,-4],:]) + + # Check bug reported by Robert Cimrman: + # http://thread.gmane.org/gmane.comp.python.scientific.devel/7986 (dead link) + s = slice(int8(2),int8(4),None) + assert_equal(A[s,:].todense(), B[2:4,:]) + assert_equal(A[:,s].todense(), B[:,2:4]) + + # Regression for gh-4917: index with tuple of 2D arrays + i = np.array([[1]], dtype=int) + assert_equal(A[i,i].todense(), B[i,i]) + + # Regression for gh-4917: index with tuple of empty nested lists + assert_equal(A[[[]], [[]]].todense(), B[[[]], [[]]]) + + def test_fancy_indexing_randomized(self): + np.random.seed(1234) # make runs repeatable + + NUM_SAMPLES = 50 + M = 6 + N = 4 + + D = np.asmatrix(np.random.rand(M,N)) + D = np.multiply(D, D > 0.5) + + I = np.random.randint(-M + 1, M, size=NUM_SAMPLES) + J = np.random.randint(-N + 1, N, size=NUM_SAMPLES) + + S = self.spmatrix(D) + + SIJ = S[I,J] + if isspmatrix(SIJ): + SIJ = SIJ.todense() + assert_equal(SIJ, D[I,J]) + + I_bad = I + M + J_bad = J - N + + assert_raises(IndexError, S.__getitem__, (I_bad,J)) + assert_raises(IndexError, S.__getitem__, (I,J_bad)) + + def test_fancy_indexing_boolean(self): + np.random.seed(1234) # make runs repeatable + + B = asmatrix(arange(50).reshape(5,10)) + A = self.spmatrix(B) + + I = np.array(np.random.randint(0, 2, size=5), dtype=bool) + J = np.array(np.random.randint(0, 2, size=10), dtype=bool) + X = np.array(np.random.randint(0, 2, size=(5, 10)), dtype=bool) + + assert_equal(todense(A[I]), B[I]) + assert_equal(todense(A[:,J]), B[:, J]) + assert_equal(todense(A[X]), B[X]) + assert_equal(todense(A[B > 9]), B[B > 9]) + + I = np.array([True, False, True, True, False]) + J = np.array([False, True, True, False, True, + False, False, False, False, False]) + + assert_equal(todense(A[I, J]), B[I, J]) + + Z1 = np.zeros((6, 11), dtype=bool) + Z2 = np.zeros((6, 11), dtype=bool) + Z2[0,-1] = True + Z3 = np.zeros((6, 11), dtype=bool) + Z3[-1,0] = True + + assert_equal(A[Z1], np.array([])) + assert_raises(IndexError, A.__getitem__, Z2) + assert_raises(IndexError, A.__getitem__, Z3) + assert_raises((IndexError, ValueError), A.__getitem__, (X, 1)) + + def test_fancy_indexing_sparse_boolean(self): + np.random.seed(1234) # make runs repeatable + + B = asmatrix(arange(50).reshape(5,10)) + A = self.spmatrix(B) + + X = np.array(np.random.randint(0, 2, size=(5, 10)), dtype=bool) + + Xsp = csr_matrix(X) + + assert_equal(todense(A[Xsp]), B[X]) + assert_equal(todense(A[A > 9]), B[B > 9]) + + Z = np.array(np.random.randint(0, 2, size=(5, 11)), dtype=bool) + Y = np.array(np.random.randint(0, 2, size=(6, 10)), dtype=bool) + + Zsp = csr_matrix(Z) + Ysp = csr_matrix(Y) + + assert_raises(IndexError, A.__getitem__, Zsp) + assert_raises(IndexError, A.__getitem__, Ysp) + assert_raises((IndexError, ValueError), A.__getitem__, (Xsp, 1)) + + def test_fancy_indexing_regression_3087(self): + mat = self.spmatrix(array([[1, 0, 0], [0,1,0], [1,0,0]])) + desired_cols = np.ravel(mat.sum(0)) > 0 + assert_equal(mat[:, desired_cols].A, [[1, 0], [0, 1], [1, 0]]) + + def test_fancy_indexing_seq_assign(self): + mat = self.spmatrix(array([[1, 0], [0, 1]])) + assert_raises(ValueError, mat.__setitem__, (0, 0), np.array([1,2])) + + def test_fancy_indexing_empty(self): + B = asmatrix(arange(50).reshape(5,10)) + B[1,:] = 0 + B[:,2] = 0 + B[3,6] = 0 + A = self.spmatrix(B) + + K = np.array([False, False, False, False, False]) + assert_equal(todense(A[K]), B[K]) + K = np.array([], dtype=int) + assert_equal(todense(A[K]), B[K]) + assert_equal(todense(A[K,K]), B[K,K]) + J = np.array([0, 1, 2, 3, 4], dtype=int)[:,None] + assert_equal(todense(A[K,J]), B[K,J]) + assert_equal(todense(A[J,K]), B[J,K]) + + +@contextlib.contextmanager +def check_remains_sorted(X): + """Checks that sorted indices property is retained through an operation + """ + if not hasattr(X, 'has_sorted_indices') or not X.has_sorted_indices: + yield + return + yield + indices = X.indices.copy() + X.has_sorted_indices = False + X.sort_indices() + assert_array_equal(indices, X.indices, + 'Expected sorted indices, found unsorted') + + +class _TestFancyIndexingAssign(object): + def test_bad_index_assign(self): + A = self.spmatrix(np.zeros([5, 5])) + assert_raises((IndexError, ValueError, TypeError), A.__setitem__, "foo", 2) + assert_raises((IndexError, ValueError, TypeError), A.__setitem__, (2, "foo"), 5) + + def test_fancy_indexing_set(self): + n, m = (5, 10) + + def _test_set_slice(i, j): + A = self.spmatrix((n, m)) + B = asmatrix(np.zeros((n, m))) + with suppress_warnings() as sup: + sup.filter(SparseEfficiencyWarning, + "Changing the sparsity structure of a cs[cr]_matrix is expensive") + B[i, j] = 1 + with check_remains_sorted(A): + A[i, j] = 1 + assert_array_almost_equal(A.todense(), B) + # [1:2,1:2] + for i, j in [((2, 3, 4), slice(None, 10, 4)), + (np.arange(3), slice(5, -2)), + (slice(2, 5), slice(5, -2))]: + _test_set_slice(i, j) + for i, j in [(np.arange(3), np.arange(3)), ((0, 3, 4), (1, 2, 4))]: + _test_set_slice(i, j) + + def test_fancy_assignment_dtypes(self): + def check(dtype): + A = self.spmatrix((5, 5), dtype=dtype) + with suppress_warnings() as sup: + sup.filter(SparseEfficiencyWarning, + "Changing the sparsity structure of a cs[cr]_matrix is expensive") + A[[0,1],[0,1]] = dtype.type(1) + assert_equal(A.sum(), dtype.type(1)*2) + A[0:2,0:2] = dtype.type(1.0) + assert_equal(A.sum(), dtype.type(1)*4) + A[2,2] = dtype.type(1.0) + assert_equal(A.sum(), dtype.type(1)*4 + dtype.type(1)) + + for dtype in supported_dtypes: + check(np.dtype(dtype)) + + def test_sequence_assignment(self): + A = self.spmatrix((4,3)) + B = self.spmatrix(eye(3,4)) + + i0 = [0,1,2] + i1 = (0,1,2) + i2 = array(i0) + + with suppress_warnings() as sup: + sup.filter(SparseEfficiencyWarning, + "Changing the sparsity structure of a cs[cr]_matrix is expensive") + with check_remains_sorted(A): + A[0,i0] = B[i0,0].T + A[1,i1] = B[i1,1].T + A[2,i2] = B[i2,2].T + assert_array_equal(A.todense(),B.T.todense()) + + # column slice + A = self.spmatrix((2,3)) + with check_remains_sorted(A): + A[1,1:3] = [10,20] + assert_array_equal(A.todense(), [[0,0,0],[0,10,20]]) + + # row slice + A = self.spmatrix((3,2)) + with check_remains_sorted(A): + A[1:3,1] = [[10],[20]] + assert_array_equal(A.todense(), [[0,0],[0,10],[0,20]]) + + # both slices + A = self.spmatrix((3,3)) + B = asmatrix(np.zeros((3,3))) + with check_remains_sorted(A): + for C in [A, B]: + C[[0,1,2], [0,1,2]] = [4,5,6] + assert_array_equal(A.toarray(), B) + + # both slices (2) + A = self.spmatrix((4, 3)) + with check_remains_sorted(A): + A[(1, 2, 3), (0, 1, 2)] = [1, 2, 3] + assert_almost_equal(A.sum(), 6) + B = asmatrix(np.zeros((4, 3))) + B[(1, 2, 3), (0, 1, 2)] = [1, 2, 3] + assert_array_equal(A.todense(), B) + + def test_fancy_assign_empty(self): + B = asmatrix(arange(50).reshape(5,10)) + B[1,:] = 0 + B[:,2] = 0 + B[3,6] = 0 + A = self.spmatrix(B) + + K = np.array([False, False, False, False, False]) + A[K] = 42 + assert_equal(todense(A), B) + + K = np.array([], dtype=int) + A[K] = 42 + assert_equal(todense(A), B) + A[K,K] = 42 + assert_equal(todense(A), B) + + J = np.array([0, 1, 2, 3, 4], dtype=int)[:,None] + A[K,J] = 42 + assert_equal(todense(A), B) + A[J,K] = 42 + assert_equal(todense(A), B) + + +class _TestFancyMultidim(object): + def test_fancy_indexing_ndarray(self): + sets = [ + (np.array([[1], [2], [3]]), np.array([3, 4, 2])), + (np.array([[1], [2], [3]]), np.array([[3, 4, 2]])), + (np.array([[1, 2, 3]]), np.array([[3], [4], [2]])), + (np.array([1, 2, 3]), np.array([[3], [4], [2]])), + (np.array([[1, 2, 3], [3, 4, 2]]), + np.array([[5, 6, 3], [2, 3, 1]])) + ] + # These inputs generate 3-D outputs + # (np.array([[[1], [2], [3]], [[3], [4], [2]]]), + # np.array([[[5], [6], [3]], [[2], [3], [1]]])), + + for I, J in sets: + np.random.seed(1234) + D = np.asmatrix(np.random.rand(5, 7)) + S = self.spmatrix(D) + + SIJ = S[I,J] + if isspmatrix(SIJ): + SIJ = SIJ.todense() + assert_equal(SIJ, D[I,J]) + + I_bad = I + 5 + J_bad = J + 7 + + assert_raises(IndexError, S.__getitem__, (I_bad,J)) + assert_raises(IndexError, S.__getitem__, (I,J_bad)) + + # This would generate 3-D arrays -- not supported + assert_raises(IndexError, S.__getitem__, ([I, I], slice(None))) + assert_raises(IndexError, S.__getitem__, (slice(None), [J, J])) + + +class _TestFancyMultidimAssign(object): + def test_fancy_assign_ndarray(self): + np.random.seed(1234) + + D = np.asmatrix(np.random.rand(5, 7)) + S = self.spmatrix(D) + X = np.random.rand(2, 3) + + I = np.array([[1, 2, 3], [3, 4, 2]]) + J = np.array([[5, 6, 3], [2, 3, 1]]) + + with check_remains_sorted(S): + S[I,J] = X + D[I,J] = X + assert_equal(S.todense(), D) + + I_bad = I + 5 + J_bad = J + 7 + + C = [1, 2, 3] + + with check_remains_sorted(S): + S[I,J] = C + D[I,J] = C + assert_equal(S.todense(), D) + + with check_remains_sorted(S): + S[I,J] = 3 + D[I,J] = 3 + assert_equal(S.todense(), D) + + assert_raises(IndexError, S.__setitem__, (I_bad,J), C) + assert_raises(IndexError, S.__setitem__, (I,J_bad), C) + + def test_fancy_indexing_multidim_set(self): + n, m = (5, 10) + + def _test_set_slice(i, j): + A = self.spmatrix((n, m)) + with check_remains_sorted(A), suppress_warnings() as sup: + sup.filter(SparseEfficiencyWarning, + "Changing the sparsity structure of a cs[cr]_matrix is expensive") + A[i, j] = 1 + B = asmatrix(np.zeros((n, m))) + B[i, j] = 1 + assert_array_almost_equal(A.todense(), B) + # [[[1, 2], [1, 2]], [1, 2]] + for i, j in [(np.array([[1, 2], [1, 3]]), [1, 3]), + (np.array([0, 4]), [[0, 3], [1, 2]]), + ([[1, 2, 3], [0, 2, 4]], [[0, 4, 3], [4, 1, 2]])]: + _test_set_slice(i, j) + + def test_fancy_assign_list(self): + np.random.seed(1234) + + D = np.asmatrix(np.random.rand(5, 7)) + S = self.spmatrix(D) + X = np.random.rand(2, 3) + + I = [[1, 2, 3], [3, 4, 2]] + J = [[5, 6, 3], [2, 3, 1]] + + S[I,J] = X + D[I,J] = X + assert_equal(S.todense(), D) + + I_bad = [[ii + 5 for ii in i] for i in I] + J_bad = [[jj + 7 for jj in j] for j in J] + C = [1, 2, 3] + + S[I,J] = C + D[I,J] = C + assert_equal(S.todense(), D) + + S[I,J] = 3 + D[I,J] = 3 + assert_equal(S.todense(), D) + + assert_raises(IndexError, S.__setitem__, (I_bad,J), C) + assert_raises(IndexError, S.__setitem__, (I,J_bad), C) + + def test_fancy_assign_slice(self): + np.random.seed(1234) + + D = np.asmatrix(np.random.rand(5, 7)) + S = self.spmatrix(D) + + I = [[1, 2, 3], [3, 4, 2]] + J = [[5, 6, 3], [2, 3, 1]] + + I_bad = [[ii + 5 for ii in i] for i in I] + J_bad = [[jj + 7 for jj in j] for j in J] + + C = [1, 2, 3, 4, 5, 6, 7] + assert_raises(IndexError, S.__setitem__, (I_bad, slice(None)), C) + assert_raises(IndexError, S.__setitem__, (slice(None), J_bad), C) + + +class _TestArithmetic(object): + """ + Test real/complex arithmetic + """ + def __arith_init(self): + # these can be represented exactly in FP (so arithmetic should be exact) + self.__A = matrix([[-1.5, 6.5, 0, 2.25, 0, 0], + [3.125, -7.875, 0.625, 0, 0, 0], + [0, 0, -0.125, 1.0, 0, 0], + [0, 0, 8.375, 0, 0, 0]],'float64') + self.__B = matrix([[0.375, 0, 0, 0, -5, 2.5], + [14.25, -3.75, 0, 0, -0.125, 0], + [0, 7.25, 0, 0, 0, 0], + [18.5, -0.0625, 0, 0, 0, 0]],'complex128') + self.__B.imag = matrix([[1.25, 0, 0, 0, 6, -3.875], + [2.25, 4.125, 0, 0, 0, 2.75], + [0, 4.125, 0, 0, 0, 0], + [-0.0625, 0, 0, 0, 0, 0]],'float64') + + # fractions are all x/16ths + assert_array_equal((self.__A*16).astype('int32'),16*self.__A) + assert_array_equal((self.__B.real*16).astype('int32'),16*self.__B.real) + assert_array_equal((self.__B.imag*16).astype('int32'),16*self.__B.imag) + + self.__Asp = self.spmatrix(self.__A) + self.__Bsp = self.spmatrix(self.__B) + + def test_add_sub(self): + self.__arith_init() + + # basic tests + assert_array_equal((self.__Asp+self.__Bsp).todense(),self.__A+self.__B) + + # check conversions + for x in supported_dtypes: + A = self.__A.astype(x) + Asp = self.spmatrix(A) + for y in supported_dtypes: + if not np.issubdtype(y, np.complexfloating): + B = self.__B.real.astype(y) + else: + B = self.__B.astype(y) + Bsp = self.spmatrix(B) + + # addition + D1 = A + B + S1 = Asp + Bsp + + assert_equal(S1.dtype,D1.dtype) + assert_array_equal(S1.todense(),D1) + assert_array_equal(Asp + B,D1) # check sparse + dense + assert_array_equal(A + Bsp,D1) # check dense + sparse + + # subtraction + if (np.dtype('bool') in [x, y]) and ( + NumpyVersion(np.__version__) >= '1.9.0.dev'): + # boolean array subtraction deprecated in 1.9.0 + continue + + D1 = A - B + S1 = Asp - Bsp + + assert_equal(S1.dtype,D1.dtype) + assert_array_equal(S1.todense(),D1) + assert_array_equal(Asp - B,D1) # check sparse - dense + assert_array_equal(A - Bsp,D1) # check dense - sparse + + def test_mu(self): + self.__arith_init() + + # basic tests + assert_array_equal((self.__Asp*self.__Bsp.T).todense(),self.__A*self.__B.T) + + for x in supported_dtypes: + A = self.__A.astype(x) + Asp = self.spmatrix(A) + for y in supported_dtypes: + if np.issubdtype(y, np.complexfloating): + B = self.__B.astype(y) + else: + B = self.__B.real.astype(y) + Bsp = self.spmatrix(B) + + D1 = A * B.T + S1 = Asp * Bsp.T + + assert_allclose(S1.todense(), D1, + atol=1e-14*abs(D1).max()) + assert_equal(S1.dtype,D1.dtype) + + +class _TestMinMax(object): + def test_minmax(self): + for dtype in [np.float32, np.float64, np.int32, np.int64, np.complex128]: + D = np.arange(20, dtype=dtype).reshape(5,4) + + X = self.spmatrix(D) + assert_equal(X.min(), 0) + assert_equal(X.max(), 19) + assert_equal(X.min().dtype, dtype) + assert_equal(X.max().dtype, dtype) + + D *= -1 + X = self.spmatrix(D) + assert_equal(X.min(), -19) + assert_equal(X.max(), 0) + + D += 5 + X = self.spmatrix(D) + assert_equal(X.min(), -14) + assert_equal(X.max(), 5) + + # try a fully dense matrix + X = self.spmatrix(np.arange(1, 10).reshape(3, 3)) + assert_equal(X.min(), 1) + assert_equal(X.min().dtype, X.dtype) + + X = -X + assert_equal(X.max(), -1) + + # and a fully sparse matrix + Z = self.spmatrix(np.zeros(1)) + assert_equal(Z.min(), 0) + assert_equal(Z.max(), 0) + assert_equal(Z.max().dtype, Z.dtype) + + # another test + D = np.arange(20, dtype=float).reshape(5,4) + D[0:2, :] = 0 + X = self.spmatrix(D) + assert_equal(X.min(), 0) + assert_equal(X.max(), 19) + + # zero-size matrices + for D in [np.zeros((0, 0)), np.zeros((0, 10)), np.zeros((10, 0))]: + X = self.spmatrix(D) + assert_raises(ValueError, X.min) + assert_raises(ValueError, X.max) + + def test_minmax_axis(self): + D = np.matrix(np.arange(50).reshape(5,10)) + # completely empty rows, leaving some completely full: + D[1, :] = 0 + # empty at end for reduceat: + D[:, 9] = 0 + # partial rows/cols: + D[3, 3] = 0 + # entries on either side of 0: + D[2, 2] = -1 + X = self.spmatrix(D) + + axes = [-2, -1, 0, 1] + for axis in axes: + assert_array_equal(X.max(axis=axis).A, D.max(axis=axis).A) + assert_array_equal(X.min(axis=axis).A, D.min(axis=axis).A) + + # full matrix + D = np.matrix(np.arange(1, 51).reshape(10, 5)) + X = self.spmatrix(D) + for axis in axes: + assert_array_equal(X.max(axis=axis).A, D.max(axis=axis).A) + assert_array_equal(X.min(axis=axis).A, D.min(axis=axis).A) + + # empty matrix + D = np.matrix(np.zeros((10, 5))) + X = self.spmatrix(D) + for axis in axes: + assert_array_equal(X.max(axis=axis).A, D.max(axis=axis).A) + assert_array_equal(X.min(axis=axis).A, D.min(axis=axis).A) + + axes_even = [0, -2] + axes_odd = [1, -1] + + # zero-size matrices + D = np.zeros((0, 10)) + X = self.spmatrix(D) + for axis in axes_even: + assert_raises(ValueError, X.min, axis=axis) + assert_raises(ValueError, X.max, axis=axis) + for axis in axes_odd: + assert_array_equal(np.zeros((0, 1)), X.min(axis=axis).A) + assert_array_equal(np.zeros((0, 1)), X.max(axis=axis).A) + + D = np.zeros((10, 0)) + X = self.spmatrix(D) + for axis in axes_odd: + assert_raises(ValueError, X.min, axis=axis) + assert_raises(ValueError, X.max, axis=axis) + for axis in axes_even: + assert_array_equal(np.zeros((1, 0)), X.min(axis=axis).A) + assert_array_equal(np.zeros((1, 0)), X.max(axis=axis).A) + + def test_minmax_invalid_params(self): + dat = np.matrix([[0, 1, 2], + [3, -4, 5], + [-6, 7, 9]]) + datsp = self.spmatrix(dat) + + for fname in ('min', 'max'): + func = getattr(datsp, fname) + assert_raises(ValueError, func, axis=3) + assert_raises(TypeError, func, axis=(0, 1)) + assert_raises(TypeError, func, axis=1.5) + assert_raises(ValueError, func, axis=1, out=1) + + def test_numpy_minmax(self): + # See gh-5987 + # xref gh-7460 in 'numpy' + from scipy.sparse import data + + dat = np.matrix([[0, 1, 2], + [3, -4, 5], + [-6, 7, 9]]) + datsp = self.spmatrix(dat) + + # We are only testing sparse matrices who have + # implemented 'min' and 'max' because they are + # the ones with the compatibility issues with + # the 'numpy' implementation. + if isinstance(datsp, data._minmax_mixin): + assert_array_equal(np.min(datsp), np.min(dat)) + assert_array_equal(np.max(datsp), np.max(dat)) + + def test_argmax(self): + D1 = np.array([ + [-1, 5, 2, 3], + [0, 0, -1, -2], + [-1, -2, -3, -4], + [1, 2, 3, 4], + [1, 2, 0, 0], + ]) + D2 = D1.transpose() + + for D in [D1, D2]: + mat = csr_matrix(D) + + assert_equal(mat.argmax(), np.argmax(D)) + assert_equal(mat.argmin(), np.argmin(D)) + + assert_equal(mat.argmax(axis=0), + np.asmatrix(np.argmax(D, axis=0))) + assert_equal(mat.argmin(axis=0), + np.asmatrix(np.argmin(D, axis=0))) + + assert_equal(mat.argmax(axis=1), + np.asmatrix(np.argmax(D, axis=1).reshape(-1, 1))) + assert_equal(mat.argmin(axis=1), + np.asmatrix(np.argmin(D, axis=1).reshape(-1, 1))) + + D1 = np.empty((0, 5)) + D2 = np.empty((5, 0)) + + for axis in [None, 0]: + mat = self.spmatrix(D1) + assert_raises(ValueError, mat.argmax, axis=axis) + assert_raises(ValueError, mat.argmin, axis=axis) + + for axis in [None, 1]: + mat = self.spmatrix(D2) + assert_raises(ValueError, mat.argmax, axis=axis) + assert_raises(ValueError, mat.argmin, axis=axis) + + +class _TestGetNnzAxis(object): + def test_getnnz_axis(self): + dat = np.matrix([[0, 2], + [3, 5], + [-6, 9]]) + bool_dat = dat.astype(bool).A + datsp = self.spmatrix(dat) + + accepted_return_dtypes = (np.int32, np.int64) + + assert_array_equal(bool_dat.sum(axis=None), datsp.getnnz(axis=None)) + assert_array_equal(bool_dat.sum(), datsp.getnnz()) + assert_array_equal(bool_dat.sum(axis=0), datsp.getnnz(axis=0)) + assert_in(datsp.getnnz(axis=0).dtype, accepted_return_dtypes) + assert_array_equal(bool_dat.sum(axis=1), datsp.getnnz(axis=1)) + assert_in(datsp.getnnz(axis=1).dtype, accepted_return_dtypes) + assert_array_equal(bool_dat.sum(axis=-2), datsp.getnnz(axis=-2)) + assert_in(datsp.getnnz(axis=-2).dtype, accepted_return_dtypes) + assert_array_equal(bool_dat.sum(axis=-1), datsp.getnnz(axis=-1)) + assert_in(datsp.getnnz(axis=-1).dtype, accepted_return_dtypes) + + assert_raises(ValueError, datsp.getnnz, axis=2) + + +#------------------------------------------------------------------------------ +# Tailored base class for generic tests +#------------------------------------------------------------------------------ + +def _possibly_unimplemented(cls, require=True): + """ + Construct a class that either runs tests as usual (require=True), + or each method skips if it encounters a common error. + """ + if require: + return cls + else: + def wrap(fc): + @functools.wraps(fc) + def wrapper(*a, **kw): + try: + return fc(*a, **kw) + except (NotImplementedError, TypeError, ValueError, + IndexError, AttributeError): + raise pytest.skip("feature not implemented") + + return wrapper + + new_dict = dict(cls.__dict__) + for name, func in cls.__dict__.items(): + if name.startswith('test_'): + new_dict[name] = wrap(func) + return type(cls.__name__ + "NotImplemented", + cls.__bases__, + new_dict) + + +def sparse_test_class(getset=True, slicing=True, slicing_assign=True, + fancy_indexing=True, fancy_assign=True, + fancy_multidim_indexing=True, fancy_multidim_assign=True, + minmax=True, nnz_axis=True): + """ + Construct a base class, optionally converting some of the tests in + the suite to check that the feature is not implemented. + """ + bases = (_TestCommon, + _possibly_unimplemented(_TestGetSet, getset), + _TestSolve, + _TestInplaceArithmetic, + _TestArithmetic, + _possibly_unimplemented(_TestSlicing, slicing), + _possibly_unimplemented(_TestSlicingAssign, slicing_assign), + _possibly_unimplemented(_TestFancyIndexing, fancy_indexing), + _possibly_unimplemented(_TestFancyIndexingAssign, + fancy_assign), + _possibly_unimplemented(_TestFancyMultidim, + fancy_indexing and fancy_multidim_indexing), + _possibly_unimplemented(_TestFancyMultidimAssign, + fancy_multidim_assign and fancy_assign), + _possibly_unimplemented(_TestMinMax, minmax), + _possibly_unimplemented(_TestGetNnzAxis, nnz_axis)) + + # check that test names do not clash + names = {} + for cls in bases: + for name in cls.__dict__: + if not name.startswith('test_'): + continue + old_cls = names.get(name) + if old_cls is not None: + raise ValueError("Test class %s overloads test %s defined in %s" % ( + cls.__name__, name, old_cls.__name__)) + names[name] = cls + + return type("TestBase", bases, {}) + + +#------------------------------------------------------------------------------ +# Matrix class based tests +#------------------------------------------------------------------------------ + +class TestCSR(sparse_test_class()): + @classmethod + def spmatrix(cls, *args, **kwargs): + with suppress_warnings() as sup: + sup.filter(SparseEfficiencyWarning, + "Changing the sparsity structure of a csr_matrix is expensive") + return csr_matrix(*args, **kwargs) + math_dtypes = [np.bool_, np.int_, np.float_, np.complex_] + + def test_constructor1(self): + b = matrix([[0,4,0], + [3,0,0], + [0,2,0]],'d') + bsp = csr_matrix(b) + assert_array_almost_equal(bsp.data,[4,3,2]) + assert_array_equal(bsp.indices,[1,0,1]) + assert_array_equal(bsp.indptr,[0,1,2,3]) + assert_equal(bsp.getnnz(),3) + assert_equal(bsp.getformat(),'csr') + assert_array_equal(bsp.todense(),b) + + def test_constructor2(self): + b = zeros((6,6),'d') + b[3,4] = 5 + bsp = csr_matrix(b) + assert_array_almost_equal(bsp.data,[5]) + assert_array_equal(bsp.indices,[4]) + assert_array_equal(bsp.indptr,[0,0,0,0,1,1,1]) + assert_array_almost_equal(bsp.todense(),b) + + def test_constructor3(self): + b = matrix([[1,0], + [0,2], + [3,0]],'d') + bsp = csr_matrix(b) + assert_array_almost_equal(bsp.data,[1,2,3]) + assert_array_equal(bsp.indices,[0,1,0]) + assert_array_equal(bsp.indptr,[0,1,2,3]) + assert_array_almost_equal(bsp.todense(),b) + +### currently disabled +## def test_constructor4(self): +## """try using int64 indices""" +## data = arange( 6 ) + 1 +## col = array( [1, 2, 1, 0, 0, 2], dtype='int64' ) +## ptr = array( [0, 2, 4, 6], dtype='int64' ) +## +## a = csr_matrix( (data, col, ptr), shape = (3,3) ) +## +## b = matrix([[0,1,2], +## [4,3,0], +## [5,0,6]],'d') +## +## assert_equal(a.indptr.dtype,numpy.dtype('int64')) +## assert_equal(a.indices.dtype,numpy.dtype('int64')) +## assert_array_equal(a.todense(),b) + + def test_constructor4(self): + # using (data, ij) format + row = array([2, 3, 1, 3, 0, 1, 3, 0, 2, 1, 2]) + col = array([0, 1, 0, 0, 1, 1, 2, 2, 2, 2, 1]) + data = array([6., 10., 3., 9., 1., 4., + 11., 2., 8., 5., 7.]) + + ij = vstack((row,col)) + csr = csr_matrix((data,ij),(4,3)) + assert_array_equal(arange(12).reshape(4,3),csr.todense()) + + def test_constructor5(self): + # infer dimensions from arrays + indptr = array([0,1,3,3]) + indices = array([0,5,1,2]) + data = array([1,2,3,4]) + csr = csr_matrix((data, indices, indptr)) + assert_array_equal(csr.shape,(3,6)) + + def test_constructor6(self): + # infer dimensions and dtype from lists + indptr = [0, 1, 3, 3] + indices = [0, 5, 1, 2] + data = [1, 2, 3, 4] + csr = csr_matrix((data, indices, indptr)) + assert_array_equal(csr.shape, (3,6)) + assert_(np.issubdtype(csr.dtype, np.signedinteger)) + + def test_sort_indices(self): + data = arange(5) + indices = array([7, 2, 1, 5, 4]) + indptr = array([0, 3, 5]) + asp = csr_matrix((data, indices, indptr), shape=(2,10)) + bsp = asp.copy() + asp.sort_indices() + assert_array_equal(asp.indices,[1, 2, 7, 4, 5]) + assert_array_equal(asp.todense(),bsp.todense()) + + def test_eliminate_zeros(self): + data = array([1, 0, 0, 0, 2, 0, 3, 0]) + indices = array([1, 2, 3, 4, 5, 6, 7, 8]) + indptr = array([0, 3, 8]) + asp = csr_matrix((data, indices, indptr), shape=(2,10)) + bsp = asp.copy() + asp.eliminate_zeros() + assert_array_equal(asp.nnz, 3) + assert_array_equal(asp.data,[1, 2, 3]) + assert_array_equal(asp.todense(),bsp.todense()) + + def test_ufuncs(self): + X = csr_matrix(np.arange(20).reshape(4, 5) / 20.) + for f in ["sin", "tan", "arcsin", "arctan", "sinh", "tanh", + "arcsinh", "arctanh", "rint", "sign", "expm1", "log1p", + "deg2rad", "rad2deg", "floor", "ceil", "trunc", "sqrt"]: + assert_equal(hasattr(csr_matrix, f), True) + X2 = getattr(X, f)() + assert_equal(X.shape, X2.shape) + assert_array_equal(X.indices, X2.indices) + assert_array_equal(X.indptr, X2.indptr) + assert_array_equal(X2.toarray(), getattr(np, f)(X.toarray())) + + def test_unsorted_arithmetic(self): + data = arange(5) + indices = array([7, 2, 1, 5, 4]) + indptr = array([0, 3, 5]) + asp = csr_matrix((data, indices, indptr), shape=(2,10)) + data = arange(6) + indices = array([8, 1, 5, 7, 2, 4]) + indptr = array([0, 2, 6]) + bsp = csr_matrix((data, indices, indptr), shape=(2,10)) + assert_equal((asp + bsp).todense(), asp.todense() + bsp.todense()) + + def test_fancy_indexing_broadcast(self): + # broadcasting indexing mode is supported + I = np.array([[1], [2], [3]]) + J = np.array([3, 4, 2]) + + np.random.seed(1234) + D = np.asmatrix(np.random.rand(5, 7)) + S = self.spmatrix(D) + + SIJ = S[I,J] + if isspmatrix(SIJ): + SIJ = SIJ.todense() + assert_equal(SIJ, D[I,J]) + + def test_has_sorted_indices(self): + "Ensure has_sorted_indices memoizes sorted state for sort_indices" + sorted_inds = np.array([0, 1]) + unsorted_inds = np.array([1, 0]) + data = np.array([1, 1]) + indptr = np.array([0, 2]) + M = csr_matrix((data, sorted_inds, indptr)).copy() + assert_equal(True, M.has_sorted_indices) + + M = csr_matrix((data, unsorted_inds, indptr)).copy() + assert_equal(False, M.has_sorted_indices) + + # set by sorting + M.sort_indices() + assert_equal(True, M.has_sorted_indices) + assert_array_equal(M.indices, sorted_inds) + + M = csr_matrix((data, unsorted_inds, indptr)).copy() + # set manually (although underlyingly unsorted) + M.has_sorted_indices = True + assert_equal(True, M.has_sorted_indices) + assert_array_equal(M.indices, unsorted_inds) + + # ensure sort bypassed when has_sorted_indices == True + M.sort_indices() + assert_array_equal(M.indices, unsorted_inds) + + def test_has_canonical_format(self): + "Ensure has_canonical_format memoizes state for sum_duplicates" + + M = csr_matrix((np.array([2]), np.array([0]), np.array([0, 1]))) + assert_equal(True, M.has_canonical_format) + + indices = np.array([0, 0]) # contains duplicate + data = np.array([1, 1]) + indptr = np.array([0, 2]) + + M = csr_matrix((data, indices, indptr)).copy() + assert_equal(False, M.has_canonical_format) + + # set by deduplicating + M.sum_duplicates() + assert_equal(True, M.has_canonical_format) + assert_equal(1, len(M.indices)) + + M = csr_matrix((data, indices, indptr)).copy() + # set manually (although underlyingly duplicated) + M.has_canonical_format = True + assert_equal(True, M.has_canonical_format) + assert_equal(2, len(M.indices)) # unaffected content + + # ensure deduplication bypassed when has_canonical_format == True + M.sum_duplicates() + assert_equal(2, len(M.indices)) # unaffected content + + def test_scalar_idx_dtype(self): + # Check that index dtype takes into account all parameters + # passed to sparsetools, including the scalar ones + indptr = np.zeros(2, dtype=np.int32) + indices = np.zeros(0, dtype=np.int32) + vals = np.zeros(0) + a = csr_matrix((vals, indices, indptr), shape=(1, 2**31-1)) + b = csr_matrix((vals, indices, indptr), shape=(1, 2**31)) + ij = np.zeros((2, 0), dtype=np.int32) + c = csr_matrix((vals, ij), shape=(1, 2**31-1)) + d = csr_matrix((vals, ij), shape=(1, 2**31)) + e = csr_matrix((1, 2**31-1)) + f = csr_matrix((1, 2**31)) + assert_equal(a.indptr.dtype, np.int32) + assert_equal(b.indptr.dtype, np.int64) + assert_equal(c.indptr.dtype, np.int32) + assert_equal(d.indptr.dtype, np.int64) + assert_equal(e.indptr.dtype, np.int32) + assert_equal(f.indptr.dtype, np.int64) + + # These shouldn't fail + for x in [a, b, c, d, e, f]: + x + x + + +TestCSR.init_class() + + +class TestCSC(sparse_test_class()): + @classmethod + def spmatrix(cls, *args, **kwargs): + with suppress_warnings() as sup: + sup.filter(SparseEfficiencyWarning, + "Changing the sparsity structure of a csc_matrix is expensive") + return csc_matrix(*args, **kwargs) + math_dtypes = [np.bool_, np.int_, np.float_, np.complex_] + + def test_constructor1(self): + b = matrix([[1,0,0,0],[0,0,1,0],[0,2,0,3]],'d') + bsp = csc_matrix(b) + assert_array_almost_equal(bsp.data,[1,2,1,3]) + assert_array_equal(bsp.indices,[0,2,1,2]) + assert_array_equal(bsp.indptr,[0,1,2,3,4]) + assert_equal(bsp.getnnz(),4) + assert_equal(bsp.shape,b.shape) + assert_equal(bsp.getformat(),'csc') + + def test_constructor2(self): + b = zeros((6,6),'d') + b[2,4] = 5 + bsp = csc_matrix(b) + assert_array_almost_equal(bsp.data,[5]) + assert_array_equal(bsp.indices,[2]) + assert_array_equal(bsp.indptr,[0,0,0,0,0,1,1]) + + def test_constructor3(self): + b = matrix([[1,0],[0,0],[0,2]],'d') + bsp = csc_matrix(b) + assert_array_almost_equal(bsp.data,[1,2]) + assert_array_equal(bsp.indices,[0,2]) + assert_array_equal(bsp.indptr,[0,1,2]) + + def test_constructor4(self): + # using (data, ij) format + row = array([2, 3, 1, 3, 0, 1, 3, 0, 2, 1, 2]) + col = array([0, 1, 0, 0, 1, 1, 2, 2, 2, 2, 1]) + data = array([6., 10., 3., 9., 1., 4., + 11., 2., 8., 5., 7.]) + + ij = vstack((row,col)) + csc = csc_matrix((data,ij),(4,3)) + assert_array_equal(arange(12).reshape(4,3),csc.todense()) + + def test_constructor5(self): + # infer dimensions from arrays + indptr = array([0,1,3,3]) + indices = array([0,5,1,2]) + data = array([1,2,3,4]) + csc = csc_matrix((data, indices, indptr)) + assert_array_equal(csc.shape,(6,3)) + + def test_constructor6(self): + # infer dimensions and dtype from lists + indptr = [0, 1, 3, 3] + indices = [0, 5, 1, 2] + data = [1, 2, 3, 4] + csc = csc_matrix((data, indices, indptr)) + assert_array_equal(csc.shape,(6,3)) + assert_(np.issubdtype(csc.dtype, np.signedinteger)) + + def test_eliminate_zeros(self): + data = array([1, 0, 0, 0, 2, 0, 3, 0]) + indices = array([1, 2, 3, 4, 5, 6, 7, 8]) + indptr = array([0, 3, 8]) + asp = csc_matrix((data, indices, indptr), shape=(10,2)) + bsp = asp.copy() + asp.eliminate_zeros() + assert_array_equal(asp.nnz, 3) + assert_array_equal(asp.data,[1, 2, 3]) + assert_array_equal(asp.todense(),bsp.todense()) + + def test_sort_indices(self): + data = arange(5) + row = array([7, 2, 1, 5, 4]) + ptr = [0, 3, 5] + asp = csc_matrix((data, row, ptr), shape=(10,2)) + bsp = asp.copy() + asp.sort_indices() + assert_array_equal(asp.indices,[1, 2, 7, 4, 5]) + assert_array_equal(asp.todense(),bsp.todense()) + + def test_ufuncs(self): + X = csc_matrix(np.arange(21).reshape(7, 3) / 21.) + for f in ["sin", "tan", "arcsin", "arctan", "sinh", "tanh", + "arcsinh", "arctanh", "rint", "sign", "expm1", "log1p", + "deg2rad", "rad2deg", "floor", "ceil", "trunc", "sqrt"]: + assert_equal(hasattr(csr_matrix, f), True) + X2 = getattr(X, f)() + assert_equal(X.shape, X2.shape) + assert_array_equal(X.indices, X2.indices) + assert_array_equal(X.indptr, X2.indptr) + assert_array_equal(X2.toarray(), getattr(np, f)(X.toarray())) + + def test_unsorted_arithmetic(self): + data = arange(5) + indices = array([7, 2, 1, 5, 4]) + indptr = array([0, 3, 5]) + asp = csc_matrix((data, indices, indptr), shape=(10,2)) + data = arange(6) + indices = array([8, 1, 5, 7, 2, 4]) + indptr = array([0, 2, 6]) + bsp = csc_matrix((data, indices, indptr), shape=(10,2)) + assert_equal((asp + bsp).todense(), asp.todense() + bsp.todense()) + + def test_fancy_indexing_broadcast(self): + # broadcasting indexing mode is supported + I = np.array([[1], [2], [3]]) + J = np.array([3, 4, 2]) + + np.random.seed(1234) + D = np.asmatrix(np.random.rand(5, 7)) + S = self.spmatrix(D) + + SIJ = S[I,J] + if isspmatrix(SIJ): + SIJ = SIJ.todense() + assert_equal(SIJ, D[I,J]) + + def test_scalar_idx_dtype(self): + # Check that index dtype takes into account all parameters + # passed to sparsetools, including the scalar ones + indptr = np.zeros(2, dtype=np.int32) + indices = np.zeros(0, dtype=np.int32) + vals = np.zeros(0) + a = csc_matrix((vals, indices, indptr), shape=(2**31-1, 1)) + b = csc_matrix((vals, indices, indptr), shape=(2**31, 1)) + ij = np.zeros((2, 0), dtype=np.int32) + c = csc_matrix((vals, ij), shape=(2**31-1, 1)) + d = csc_matrix((vals, ij), shape=(2**31, 1)) + e = csr_matrix((1, 2**31-1)) + f = csr_matrix((1, 2**31)) + assert_equal(a.indptr.dtype, np.int32) + assert_equal(b.indptr.dtype, np.int64) + assert_equal(c.indptr.dtype, np.int32) + assert_equal(d.indptr.dtype, np.int64) + assert_equal(e.indptr.dtype, np.int32) + assert_equal(f.indptr.dtype, np.int64) + + # These shouldn't fail + for x in [a, b, c, d, e, f]: + x + x + + +TestCSC.init_class() + + +class TestDOK(sparse_test_class(minmax=False, nnz_axis=False)): + spmatrix = dok_matrix + math_dtypes = [np.int_, np.float_, np.complex_] + + def test_mult(self): + A = dok_matrix((10,10)) + A[0,3] = 10 + A[5,6] = 20 + D = A*A.T + E = A*A.H + assert_array_equal(D.A, E.A) + + def test_add_nonzero(self): + A = self.spmatrix((3,2)) + A[0,1] = -10 + A[2,0] = 20 + A = A + 10 + B = matrix([[10, 0], [10, 10], [30, 10]]) + assert_array_equal(A.todense(), B) + + A = A + 1j + B = B + 1j + assert_array_equal(A.todense(), B) + + def test_dok_divide_scalar(self): + A = self.spmatrix((3,2)) + A[0,1] = -10 + A[2,0] = 20 + + assert_array_equal((A/1j).todense(), A.todense()/1j) + assert_array_equal((A/9).todense(), A.todense()/9) + + def test_convert(self): + # Test provided by Andrew Straw. Fails in SciPy <= r1477. + (m, n) = (6, 7) + a = dok_matrix((m, n)) + + # set a few elements, but none in the last column + a[2,1] = 1 + a[0,2] = 2 + a[3,1] = 3 + a[1,5] = 4 + a[4,3] = 5 + a[4,2] = 6 + + # assert that the last column is all zeros + assert_array_equal(a.toarray()[:,n-1], zeros(m,)) + + # make sure it still works for CSC format + csc = a.tocsc() + assert_array_equal(csc.toarray()[:,n-1], zeros(m,)) + + # now test CSR + (m, n) = (n, m) + b = a.transpose() + assert_equal(b.shape, (m, n)) + # assert that the last row is all zeros + assert_array_equal(b.toarray()[m-1,:], zeros(n,)) + + # make sure it still works for CSR format + csr = b.tocsr() + assert_array_equal(csr.toarray()[m-1,:], zeros(n,)) + + def test_ctor(self): + # Empty ctor + assert_raises(TypeError, dok_matrix) + + # Dense ctor + b = matrix([[1,0,0,0],[0,0,1,0],[0,2,0,3]],'d') + A = dok_matrix(b) + assert_equal(b.dtype, A.dtype) + assert_equal(A.todense(), b) + + # Sparse ctor + c = csr_matrix(b) + assert_equal(A.todense(), c.todense()) + + data = [[0, 1, 2], [3, 0, 0]] + d = dok_matrix(data, dtype=np.float32) + assert_equal(d.dtype, np.float32) + da = d.toarray() + assert_equal(da.dtype, np.float32) + assert_array_equal(da, data) + + def test_ticket1160(self): + # Regression test for ticket #1160. + a = dok_matrix((3,3)) + a[0,0] = 0 + # This assert would fail, because the above assignment would + # incorrectly call __set_item__ even though the value was 0. + assert_((0,0) not in a.keys(), "Unexpected entry (0,0) in keys") + + # Slice assignments were also affected. + b = dok_matrix((3,3)) + b[:,0] = 0 + assert_(len(b.keys()) == 0, "Unexpected entries in keys") + + +TestDOK.init_class() + + +class TestLIL(sparse_test_class(minmax=False)): + spmatrix = lil_matrix + math_dtypes = [np.int_, np.float_, np.complex_] + + def test_dot(self): + A = matrix(zeros((10,10))) + A[0,3] = 10 + A[5,6] = 20 + + B = lil_matrix((10,10)) + B[0,3] = 10 + B[5,6] = 20 + assert_array_equal(A * A.T, (B * B.T).todense()) + assert_array_equal(A * A.H, (B * B.H).todense()) + + def test_scalar_mul(self): + x = lil_matrix((3,3)) + x[0,0] = 2 + + x = x*2 + assert_equal(x[0,0],4) + + x = x*0 + assert_equal(x[0,0],0) + + def test_inplace_ops(self): + A = lil_matrix([[0,2,3],[4,0,6]]) + B = lil_matrix([[0,1,0],[0,2,3]]) + + data = {'add': (B,A + B), + 'sub': (B,A - B), + 'mul': (3,A * 3)} + + for op,(other,expected) in data.items(): + result = A.copy() + getattr(result, '__i%s__' % op)(other) + + assert_array_equal(result.todense(), expected.todense()) + + # Ticket 1604. + A = lil_matrix((1,3), dtype=np.dtype('float64')) + B = array([0.1,0.1,0.1]) + A[0,:] += B + assert_array_equal(A[0,:].toarray().squeeze(), B) + + def test_lil_iteration(self): + row_data = [[1,2,3],[4,5,6]] + B = lil_matrix(array(row_data)) + for r,row in enumerate(B): + assert_array_equal(row.todense(),array(row_data[r],ndmin=2)) + + def test_lil_from_csr(self): + # Tests whether a lil_matrix can be constructed from a + # csr_matrix. + B = lil_matrix((10,10)) + B[0,3] = 10 + B[5,6] = 20 + B[8,3] = 30 + B[3,8] = 40 + B[8,9] = 50 + C = B.tocsr() + D = lil_matrix(C) + assert_array_equal(C.A, D.A) + + def test_fancy_indexing_lil(self): + M = asmatrix(arange(25).reshape(5,5)) + A = lil_matrix(M) + + assert_equal(A[array([1,2,3]),2:3].todense(), M[array([1,2,3]),2:3]) + + def test_point_wise_multiply(self): + l = lil_matrix((4,3)) + l[0,0] = 1 + l[1,1] = 2 + l[2,2] = 3 + l[3,1] = 4 + + m = lil_matrix((4,3)) + m[0,0] = 1 + m[0,1] = 2 + m[2,2] = 3 + m[3,1] = 4 + m[3,2] = 4 + + assert_array_equal(l.multiply(m).todense(), + m.multiply(l).todense()) + + assert_array_equal(l.multiply(m).todense(), + [[1,0,0], + [0,0,0], + [0,0,9], + [0,16,0]]) + + def test_lil_multiply_removal(self): + # Ticket #1427. + a = lil_matrix(np.ones((3,3))) + a *= 2. + a[0, :] = 0 + + +TestLIL.init_class() + + +class TestCOO(sparse_test_class(getset=False, + slicing=False, slicing_assign=False, + fancy_indexing=False, fancy_assign=False)): + spmatrix = coo_matrix + math_dtypes = [np.int_, np.float_, np.complex_] + + def test_constructor1(self): + # unsorted triplet format + row = array([2, 3, 1, 3, 0, 1, 3, 0, 2, 1, 2]) + col = array([0, 1, 0, 0, 1, 1, 2, 2, 2, 2, 1]) + data = array([6., 10., 3., 9., 1., 4., + 11., 2., 8., 5., 7.]) + + coo = coo_matrix((data,(row,col)),(4,3)) + + assert_array_equal(arange(12).reshape(4,3),coo.todense()) + + def test_constructor2(self): + # unsorted triplet format with duplicates (which are summed) + row = array([0,1,2,2,2,2,0,0,2,2]) + col = array([0,2,0,2,1,1,1,0,0,2]) + data = array([2,9,-4,5,7,0,-1,2,1,-5]) + coo = coo_matrix((data,(row,col)),(3,3)) + + mat = matrix([[4,-1,0],[0,0,9],[-3,7,0]]) + + assert_array_equal(mat,coo.todense()) + + def test_constructor3(self): + # empty matrix + coo = coo_matrix((4,3)) + + assert_array_equal(coo.shape,(4,3)) + assert_array_equal(coo.row,[]) + assert_array_equal(coo.col,[]) + assert_array_equal(coo.data,[]) + assert_array_equal(coo.todense(),zeros((4,3))) + + def test_constructor4(self): + # from dense matrix + mat = array([[0,1,0,0], + [7,0,3,0], + [0,4,0,0]]) + coo = coo_matrix(mat) + assert_array_equal(coo.todense(),mat) + + # upgrade rank 1 arrays to row matrix + mat = array([0,1,0,0]) + coo = coo_matrix(mat) + assert_array_equal(coo.todense(),mat.reshape(1,-1)) + + @pytest.mark.xfail(run=False, reason='COO does not have a __getitem__') + def test_iterator(self): + pass + + def test_todia_all_zeros(self): + zeros = [[0, 0]] + dia = coo_matrix(zeros).todia() + assert_array_equal(dia.A, zeros) + + def test_sum_duplicates(self): + coo = coo_matrix((4,3)) + coo.sum_duplicates() + coo = coo_matrix(([1,2], ([1,0], [1,0]))) + coo.sum_duplicates() + assert_array_equal(coo.A, [[2,0],[0,1]]) + coo = coo_matrix(([1,2], ([1,1], [1,1]))) + coo.sum_duplicates() + assert_array_equal(coo.A, [[0,0],[0,3]]) + assert_array_equal(coo.row, [1]) + assert_array_equal(coo.col, [1]) + assert_array_equal(coo.data, [3]) + + def test_todok_duplicates(self): + coo = coo_matrix(([1,1,1,1], ([0,2,2,0], [0,1,1,0]))) + dok = coo.todok() + assert_array_equal(dok.A, coo.A) + + def test_eliminate_zeros(self): + data = array([1, 0, 0, 0, 2, 0, 3, 0]) + row = array([0, 0, 0, 1, 1, 1, 1, 1]) + col = array([1, 2, 3, 4, 5, 6, 7, 8]) + asp = coo_matrix((data, (row, col)), shape=(2,10)) + bsp = asp.copy() + asp.eliminate_zeros() + assert_((asp.data != 0).all()) + assert_array_equal(asp.A, bsp.A) + + def test_reshape_copy(self): + arr = [[0, 10, 0, 0], [0, 0, 0, 0], [0, 20, 30, 40]] + new_shape = (2, 6) + x = coo_matrix(arr) + + y = x.reshape(new_shape) + assert_(y.data is x.data) + + y = x.reshape(new_shape, copy=False) + assert_(y.data is x.data) + + y = x.reshape(new_shape, copy=True) + assert_(not np.may_share_memory(y.data, x.data)) + + def test_large_dimensions_reshape(self): + # Test that reshape is immune to integer overflow when number of elements + # exceeds 2^31-1 + mat1 = coo_matrix(([1], ([3000000], [1000])), (3000001, 1001)) + mat2 = coo_matrix(([1], ([1000], [3000000])), (1001, 3000001)) + + # assert_array_equal is slow for big matrices because it expects dense + # Using __ne__ and nnz instead + assert_((mat1.reshape((1001, 3000001), order='C') != mat2).nnz == 0) + assert_((mat2.reshape((3000001, 1001), order='F') != mat1).nnz == 0) + + +TestCOO.init_class() + + +class TestDIA(sparse_test_class(getset=False, slicing=False, slicing_assign=False, + fancy_indexing=False, fancy_assign=False, + minmax=False, nnz_axis=False)): + spmatrix = dia_matrix + math_dtypes = [np.int_, np.float_, np.complex_] + + def test_constructor1(self): + D = matrix([[1, 0, 3, 0], + [1, 2, 0, 4], + [0, 2, 3, 0], + [0, 0, 3, 4]]) + data = np.array([[1,2,3,4]]).repeat(3,axis=0) + offsets = np.array([0,-1,2]) + assert_equal(dia_matrix((data,offsets), shape=(4,4)).todense(), D) + + @pytest.mark.xfail(run=False, reason='DIA does not have a __getitem__') + def test_iterator(self): + pass + + @with_64bit_maxval_limit(3) + def test_setdiag_dtype(self): + m = dia_matrix(np.eye(3)) + assert_equal(m.offsets.dtype, np.int32) + m.setdiag((3,), k=2) + assert_equal(m.offsets.dtype, np.int32) + + m = dia_matrix(np.eye(4)) + assert_equal(m.offsets.dtype, np.int64) + m.setdiag((3,), k=3) + assert_equal(m.offsets.dtype, np.int64) + + @pytest.mark.skip(reason='DIA stores extra zeros') + def test_getnnz_axis(self): + pass + + +TestDIA.init_class() + + +class TestBSR(sparse_test_class(getset=False, + slicing=False, slicing_assign=False, + fancy_indexing=False, fancy_assign=False, + nnz_axis=False)): + spmatrix = bsr_matrix + math_dtypes = [np.int_, np.float_, np.complex_] + + def test_constructor1(self): + # check native BSR format constructor + indptr = array([0,2,2,4]) + indices = array([0,2,2,3]) + data = zeros((4,2,3)) + + data[0] = array([[0, 1, 2], + [3, 0, 5]]) + data[1] = array([[0, 2, 4], + [6, 0, 10]]) + data[2] = array([[0, 4, 8], + [12, 0, 20]]) + data[3] = array([[0, 5, 10], + [15, 0, 25]]) + + A = kron([[1,0,2,0],[0,0,0,0],[0,0,4,5]], [[0,1,2],[3,0,5]]) + Asp = bsr_matrix((data,indices,indptr),shape=(6,12)) + assert_equal(Asp.todense(),A) + + # infer shape from arrays + Asp = bsr_matrix((data,indices,indptr)) + assert_equal(Asp.todense(),A) + + def test_constructor2(self): + # construct from dense + + # test zero mats + for shape in [(1,1), (5,1), (1,10), (10,4), (3,7), (2,1)]: + A = zeros(shape) + assert_equal(bsr_matrix(A).todense(),A) + A = zeros((4,6)) + assert_equal(bsr_matrix(A,blocksize=(2,2)).todense(),A) + assert_equal(bsr_matrix(A,blocksize=(2,3)).todense(),A) + + A = kron([[1,0,2,0],[0,0,0,0],[0,0,4,5]], [[0,1,2],[3,0,5]]) + assert_equal(bsr_matrix(A).todense(),A) + assert_equal(bsr_matrix(A,shape=(6,12)).todense(),A) + assert_equal(bsr_matrix(A,blocksize=(1,1)).todense(),A) + assert_equal(bsr_matrix(A,blocksize=(2,3)).todense(),A) + assert_equal(bsr_matrix(A,blocksize=(2,6)).todense(),A) + assert_equal(bsr_matrix(A,blocksize=(2,12)).todense(),A) + assert_equal(bsr_matrix(A,blocksize=(3,12)).todense(),A) + assert_equal(bsr_matrix(A,blocksize=(6,12)).todense(),A) + + A = kron([[1,0,2,0],[0,1,0,0],[0,0,0,0]], [[0,1,2],[3,0,5]]) + assert_equal(bsr_matrix(A,blocksize=(2,3)).todense(),A) + + def test_constructor3(self): + # construct from coo-like (data,(row,col)) format + arg = ([1,2,3], ([0,1,1], [0,0,1])) + A = array([[1,0],[2,3]]) + assert_equal(bsr_matrix(arg, blocksize=(2,2)).todense(), A) + + def test_constructor4(self): + # regression test for gh-6292: bsr_matrix((data, indices, indptr)) was + # trying to compare an int to a None + n = 8 + data = np.ones((n, n, 1), dtype=np.int8) + indptr = np.array([0, n], dtype=np.int32) + indices = np.arange(n, dtype=np.int32) + bsr_matrix((data, indices, indptr), blocksize=(n, 1), copy=False) + + def test_bsr_tocsr(self): + # check native conversion from BSR to CSR + indptr = array([0, 2, 2, 4]) + indices = array([0, 2, 2, 3]) + data = zeros((4, 2, 3)) + + data[0] = array([[0, 1, 2], + [3, 0, 5]]) + data[1] = array([[0, 2, 4], + [6, 0, 10]]) + data[2] = array([[0, 4, 8], + [12, 0, 20]]) + data[3] = array([[0, 5, 10], + [15, 0, 25]]) + + A = kron([[1, 0, 2, 0], [0, 0, 0, 0], [0, 0, 4, 5]], + [[0, 1, 2], [3, 0, 5]]) + Absr = bsr_matrix((data, indices, indptr), shape=(6, 12)) + Acsr = Absr.tocsr() + Acsr_via_coo = Absr.tocoo().tocsr() + assert_equal(Acsr.todense(), A) + assert_equal(Acsr.todense(), Acsr_via_coo.todense()) + + def test_eliminate_zeros(self): + data = kron([1, 0, 0, 0, 2, 0, 3, 0], [[1,1],[1,1]]).T + data = data.reshape(-1,2,2) + indices = array([1, 2, 3, 4, 5, 6, 7, 8]) + indptr = array([0, 3, 8]) + asp = bsr_matrix((data, indices, indptr), shape=(4,20)) + bsp = asp.copy() + asp.eliminate_zeros() + assert_array_equal(asp.nnz, 3*4) + assert_array_equal(asp.todense(),bsp.todense()) + + def test_bsr_matvec(self): + A = bsr_matrix(arange(2*3*4*5).reshape(2*4,3*5), blocksize=(4,5)) + x = arange(A.shape[1]).reshape(-1,1) + assert_equal(A*x, A.todense()*x) + + def test_bsr_matvecs(self): + A = bsr_matrix(arange(2*3*4*5).reshape(2*4,3*5), blocksize=(4,5)) + x = arange(A.shape[1]*6).reshape(-1,6) + assert_equal(A*x, A.todense()*x) + + @pytest.mark.xfail(run=False, reason='BSR does not have a __getitem__') + def test_iterator(self): + pass + + @pytest.mark.xfail(run=False, reason='BSR does not have a __setitem__') + def test_setdiag(self): + pass + + def test_resize_blocked(self): + # test resize() with non-(1,1) blocksize + D = np.array([[1, 0, 3, 4], + [2, 0, 0, 0], + [3, 0, 0, 0]]) + S = self.spmatrix(D, blocksize=(1, 2)) + assert_(S.resize((3, 2)) is None) + assert_array_equal(S.A, [[1, 0], + [2, 0], + [3, 0]]) + S.resize((2, 2)) + assert_array_equal(S.A, [[1, 0], + [2, 0]]) + S.resize((3, 2)) + assert_array_equal(S.A, [[1, 0], + [2, 0], + [0, 0]]) + S.resize((3, 4)) + assert_array_equal(S.A, [[1, 0, 0, 0], + [2, 0, 0, 0], + [0, 0, 0, 0]]) + assert_raises(ValueError, S.resize, (2, 3)) + + @pytest.mark.xfail(run=False, reason='BSR does not have a __setitem__') + def test_setdiag_comprehensive(self): + pass + + def test_scalar_idx_dtype(self): + # Check that index dtype takes into account all parameters + # passed to sparsetools, including the scalar ones + indptr = np.zeros(2, dtype=np.int32) + indices = np.zeros(0, dtype=np.int32) + vals = np.zeros((0, 1, 1)) + a = bsr_matrix((vals, indices, indptr), shape=(1, 2**31-1)) + b = bsr_matrix((vals, indices, indptr), shape=(1, 2**31)) + c = bsr_matrix((1, 2**31-1)) + d = bsr_matrix((1, 2**31)) + assert_equal(a.indptr.dtype, np.int32) + assert_equal(b.indptr.dtype, np.int64) + assert_equal(c.indptr.dtype, np.int32) + assert_equal(d.indptr.dtype, np.int64) + + try: + vals2 = np.zeros((0, 1, 2**31-1)) + vals3 = np.zeros((0, 1, 2**31)) + e = bsr_matrix((vals2, indices, indptr), shape=(1, 2**31-1)) + f = bsr_matrix((vals3, indices, indptr), shape=(1, 2**31)) + assert_equal(e.indptr.dtype, np.int32) + assert_equal(f.indptr.dtype, np.int64) + except (MemoryError, ValueError): + # May fail on 32-bit Python + e = 0 + f = 0 + + # These shouldn't fail + for x in [a, b, c, d, e, f]: + x + x + + +TestBSR.init_class() + + +#------------------------------------------------------------------------------ +# Tests for non-canonical representations (with duplicates, unsorted indices) +#------------------------------------------------------------------------------ + +def _same_sum_duplicate(data, *inds, **kwargs): + """Duplicates entries to produce the same matrix""" + indptr = kwargs.pop('indptr', None) + if np.issubdtype(data.dtype, np.bool_) or \ + np.issubdtype(data.dtype, np.unsignedinteger): + if indptr is None: + return (data,) + inds + else: + return (data,) + inds + (indptr,) + + zeros_pos = (data == 0).nonzero() + + # duplicate data + data = data.repeat(2, axis=0) + data[::2] -= 1 + data[1::2] = 1 + + # don't spoil all explicit zeros + if zeros_pos[0].size > 0: + pos = tuple(p[0] for p in zeros_pos) + pos1 = (2*pos[0],) + pos[1:] + pos2 = (2*pos[0]+1,) + pos[1:] + data[pos1] = 0 + data[pos2] = 0 + + inds = tuple(indices.repeat(2) for indices in inds) + + if indptr is None: + return (data,) + inds + else: + return (data,) + inds + (indptr * 2,) + + +class _NonCanonicalMixin(object): + def spmatrix(self, D, sorted_indices=False, **kwargs): + """Replace D with a non-canonical equivalent: containing + duplicate elements and explicit zeros""" + construct = super(_NonCanonicalMixin, self).spmatrix + M = construct(D, **kwargs) + + zero_pos = (M.A == 0).nonzero() + has_zeros = (zero_pos[0].size > 0) + if has_zeros: + k = zero_pos[0].size//2 + with suppress_warnings() as sup: + sup.filter(SparseEfficiencyWarning, + "Changing the sparsity structure of a cs[cr]_matrix is expensive") + M = self._insert_explicit_zero(M, zero_pos[0][k], zero_pos[1][k]) + + arg1 = self._arg1_for_noncanonical(M, sorted_indices) + if 'shape' not in kwargs: + kwargs['shape'] = M.shape + NC = construct(arg1, **kwargs) + + # check that result is valid + if NC.dtype in [np.float32, np.complex64]: + # For single-precision floats, the differences between M and NC + # that are introduced by the extra operations involved in the + # construction of NC necessitate a more lenient tolerance level + # than the default. + rtol = 1e-05 + else: + rtol = 1e-07 + assert_allclose(NC.A, M.A, rtol=rtol) + + # check that at least one explicit zero + if has_zeros: + assert_((NC.data == 0).any()) + # TODO check that NC has duplicates (which are not explicit zeros) + + return NC + + @pytest.mark.skip(reason='bool(matrix) counts explicit zeros') + def test_bool(self): + pass + + @pytest.mark.skip(reason='getnnz-axis counts explicit zeros') + def test_getnnz_axis(self): + pass + + @pytest.mark.skip(reason='nnz counts explicit zeros') + def test_empty(self): + pass + + +class _NonCanonicalCompressedMixin(_NonCanonicalMixin): + def _arg1_for_noncanonical(self, M, sorted_indices=False): + """Return non-canonical constructor arg1 equivalent to M""" + data, indices, indptr = _same_sum_duplicate(M.data, M.indices, + indptr=M.indptr) + if not sorted_indices: + for start, stop in izip(indptr, indptr[1:]): + indices[start:stop] = indices[start:stop][::-1].copy() + data[start:stop] = data[start:stop][::-1].copy() + return data, indices, indptr + + def _insert_explicit_zero(self, M, i, j): + M[i,j] = 0 + return M + + +class _NonCanonicalCSMixin(_NonCanonicalCompressedMixin): + def test_getelement(self): + def check(dtype, sorted_indices): + D = array([[1,0,0], + [4,3,0], + [0,2,0], + [0,0,0]], dtype=dtype) + A = self.spmatrix(D, sorted_indices=sorted_indices) + + M,N = D.shape + + for i in range(-M, M): + for j in range(-N, N): + assert_equal(A[i,j], D[i,j]) + + for ij in [(0,3),(-1,3),(4,0),(4,3),(4,-1), (1, 2, 3)]: + assert_raises((IndexError, TypeError), A.__getitem__, ij) + + for dtype in supported_dtypes: + for sorted_indices in [False, True]: + check(np.dtype(dtype), sorted_indices) + + def test_setitem_sparse(self): + D = np.eye(3) + A = self.spmatrix(D) + B = self.spmatrix([[1,2,3]]) + + D[1,:] = B.toarray() + with suppress_warnings() as sup: + sup.filter(SparseEfficiencyWarning, + "Changing the sparsity structure of a cs[cr]_matrix is expensive") + A[1,:] = B + assert_array_equal(A.toarray(), D) + + D[:,2] = B.toarray().ravel() + with suppress_warnings() as sup: + sup.filter(SparseEfficiencyWarning, + "Changing the sparsity structure of a cs[cr]_matrix is expensive") + A[:,2] = B.T + assert_array_equal(A.toarray(), D) + + @pytest.mark.xfail(run=False, reason='inverse broken with non-canonical matrix') + def test_inv(self): + pass + + @pytest.mark.xfail(run=False, reason='solve broken with non-canonical matrix') + def test_solve(self): + pass + + +class TestCSRNonCanonical(_NonCanonicalCSMixin, TestCSR): + pass + + +class TestCSCNonCanonical(_NonCanonicalCSMixin, TestCSC): + pass + + +class TestBSRNonCanonical(_NonCanonicalCompressedMixin, TestBSR): + def _insert_explicit_zero(self, M, i, j): + x = M.tocsr() + x[i,j] = 0 + return x.tobsr(blocksize=M.blocksize) + + @pytest.mark.xfail(run=False, reason='diagonal broken with non-canonical BSR') + def test_diagonal(self): + pass + + @pytest.mark.xfail(run=False, reason='expm broken with non-canonical BSR') + def test_expm(self): + pass + + +class TestCOONonCanonical(_NonCanonicalMixin, TestCOO): + def _arg1_for_noncanonical(self, M, sorted_indices=None): + """Return non-canonical constructor arg1 equivalent to M""" + data, row, col = _same_sum_duplicate(M.data, M.row, M.col) + return data, (row, col) + + def _insert_explicit_zero(self, M, i, j): + M.data = np.r_[M.data.dtype.type(0), M.data] + M.row = np.r_[M.row.dtype.type(i), M.row] + M.col = np.r_[M.col.dtype.type(j), M.col] + return M + + def test_setdiag_noncanonical(self): + m = self.spmatrix(np.eye(3)) + m.sum_duplicates() + m.setdiag([3, 2], k=1) + m.sum_duplicates() + assert_(np.all(np.diff(m.col) >= 0)) + + +def cases_64bit(): + TEST_CLASSES = [TestBSR, TestCOO, TestCSC, TestCSR, TestDIA, + # lil/dok->other conversion operations have get_index_dtype + TestDOK, TestLIL + ] + + # The following features are missing, so skip the tests: + SKIP_TESTS = { + 'test_expm': 'expm for 64-bit indices not available', + 'test_inv': 'linsolve for 64-bit indices not available', + 'test_solve': 'linsolve for 64-bit indices not available', + 'test_scalar_idx_dtype': 'test implemented in base class', + 'test_large_dimensions_reshape': 'test actually requires 64-bit to work', + } + + for cls in TEST_CLASSES: + for method_name in sorted(dir(cls)): + method = getattr(cls, method_name) + if (method_name.startswith('test_') and + not getattr(method, 'slow', False)): + marks = [] + + msg = SKIP_TESTS.get(method_name) + if bool(msg): + marks += [pytest.mark.skip(reason=msg)] + + if LooseVersion(pytest.__version__) >= LooseVersion("3.6.0"): + markers = getattr(method, 'pytestmark', []) + for mark in markers: + if mark.name in ('skipif', 'skip', 'xfail', 'xslow'): + marks.append(mark) + else: + for mname in ['skipif', 'skip', 'xfail', 'xslow']: + if hasattr(method, mname): + marks += [getattr(method, mname)] + + yield pytest.param(cls, method_name, marks=marks) + + +class Test64Bit(object): + MAT_CLASSES = [bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dia_matrix] + + def _create_some_matrix(self, mat_cls, m, n): + return mat_cls(np.random.rand(m, n)) + + def _compare_index_dtype(self, m, dtype): + dtype = np.dtype(dtype) + if isinstance(m, csc_matrix) or isinstance(m, csr_matrix) \ + or isinstance(m, bsr_matrix): + return (m.indices.dtype == dtype) and (m.indptr.dtype == dtype) + elif isinstance(m, coo_matrix): + return (m.row.dtype == dtype) and (m.col.dtype == dtype) + elif isinstance(m, dia_matrix): + return (m.offsets.dtype == dtype) + else: + raise ValueError("matrix %r has no integer indices" % (m,)) + + def test_decorator_maxval_limit(self): + # Test that the with_64bit_maxval_limit decorator works + + @with_64bit_maxval_limit(maxval_limit=10) + def check(mat_cls): + m = mat_cls(np.random.rand(10, 1)) + assert_(self._compare_index_dtype(m, np.int32)) + m = mat_cls(np.random.rand(11, 1)) + assert_(self._compare_index_dtype(m, np.int64)) + + for mat_cls in self.MAT_CLASSES: + check(mat_cls) + + def test_decorator_maxval_random(self): + # Test that the with_64bit_maxval_limit decorator works (2) + + @with_64bit_maxval_limit(random=True) + def check(mat_cls): + seen_32 = False + seen_64 = False + for k in range(100): + m = self._create_some_matrix(mat_cls, 9, 9) + seen_32 = seen_32 or self._compare_index_dtype(m, np.int32) + seen_64 = seen_64 or self._compare_index_dtype(m, np.int64) + if seen_32 and seen_64: + break + else: + raise AssertionError("both 32 and 64 bit indices not seen") + + for mat_cls in self.MAT_CLASSES: + check(mat_cls) + + def _check_resiliency(self, cls, method_name, **kw): + # Resiliency test, to check that sparse matrices deal reasonably + # with varying index data types. + + @with_64bit_maxval_limit(**kw) + def check(cls, method_name): + instance = cls() + if hasattr(instance, 'setup_method'): + instance.setup_method() + try: + getattr(instance, method_name)() + finally: + if hasattr(instance, 'teardown_method'): + instance.teardown_method() + + check(cls, method_name) + + @pytest.mark.parametrize('cls,method_name', cases_64bit()) + def test_resiliency_limit_10(self, cls, method_name): + self._check_resiliency(cls, method_name, maxval_limit=10) + + @pytest.mark.parametrize('cls,method_name', cases_64bit()) + def test_resiliency_random(self, cls, method_name): + # bsr_matrix.eliminate_zeros relies on csr_matrix constructor + # not making copies of index arrays --- this is not + # necessarily true when we pick the index data type randomly + self._check_resiliency(cls, method_name, random=True) + + @pytest.mark.parametrize('cls,method_name', cases_64bit()) + def test_resiliency_all_32(self, cls, method_name): + self._check_resiliency(cls, method_name, fixed_dtype=np.int32) + + @pytest.mark.parametrize('cls,method_name', cases_64bit()) + def test_resiliency_all_64(self, cls, method_name): + self._check_resiliency(cls, method_name, fixed_dtype=np.int64) + + @pytest.mark.parametrize('cls,method_name', cases_64bit()) + def test_no_64(self, cls, method_name): + self._check_resiliency(cls, method_name, assert_32bit=True) + + def test_downcast_intp(self): + # Check that bincount and ufunc.reduceat intp downcasts are + # dealt with. The point here is to trigger points in the code + # that can fail on 32-bit systems when using 64-bit indices, + # due to use of functions that only work with intp-size + # indices. + + @with_64bit_maxval_limit(fixed_dtype=np.int64, + downcast_maxval=1) + def check_limited(): + # These involve indices larger than `downcast_maxval` + a = csc_matrix([[1, 2], [3, 4], [5, 6]]) + assert_raises(AssertionError, a.getnnz, axis=1) + assert_raises(AssertionError, a.sum, axis=0) + + a = csr_matrix([[1, 2, 3], [3, 4, 6]]) + assert_raises(AssertionError, a.getnnz, axis=0) + + a = coo_matrix([[1, 2, 3], [3, 4, 5]]) + assert_raises(AssertionError, a.getnnz, axis=0) + + @with_64bit_maxval_limit(fixed_dtype=np.int64) + def check_unlimited(): + # These involve indices larger than `downcast_maxval` + a = csc_matrix([[1, 2], [3, 4], [5, 6]]) + a.getnnz(axis=1) + a.sum(axis=0) + + a = csr_matrix([[1, 2, 3], [3, 4, 6]]) + a.getnnz(axis=0) + + a = coo_matrix([[1, 2, 3], [3, 4, 5]]) + a.getnnz(axis=0) + + check_limited() + check_unlimited() diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_base.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_base.pyc new file mode 100644 index 0000000..83f57b6 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_base.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_construct.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_construct.py new file mode 100644 index 0000000..84c80ae --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_construct.py @@ -0,0 +1,476 @@ +"""test sparse matrix construction functions""" + +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy import array, matrix +from numpy.testing import (assert_equal, assert_, + assert_array_equal, assert_array_almost_equal_nulp) +import pytest +from pytest import raises as assert_raises +from scipy._lib._testutils import check_free_memory + +from scipy.sparse import csr_matrix, coo_matrix + +from scipy.sparse import construct +from scipy.sparse.construct import rand as sprand + +sparse_formats = ['csr','csc','coo','bsr','dia','lil','dok'] + +#TODO check whether format=XXX is respected + + +def _sprandn(m, n, density=0.01, format="coo", dtype=None, random_state=None): + # Helper function for testing. + if random_state is None: + random_state = np.random + elif isinstance(random_state, (int, np.integer)): + random_state = np.random.RandomState(random_state) + data_rvs = random_state.randn + return construct.random(m, n, density, format, dtype, + random_state, data_rvs) + + +class TestConstructUtils(object): + def test_spdiags(self): + diags1 = array([[1, 2, 3, 4, 5]]) + diags2 = array([[1, 2, 3, 4, 5], + [6, 7, 8, 9,10]]) + diags3 = array([[1, 2, 3, 4, 5], + [6, 7, 8, 9,10], + [11,12,13,14,15]]) + + cases = [] + cases.append((diags1, 0, 1, 1, [[1]])) + cases.append((diags1, [0], 1, 1, [[1]])) + cases.append((diags1, [0], 2, 1, [[1],[0]])) + cases.append((diags1, [0], 1, 2, [[1,0]])) + cases.append((diags1, [1], 1, 2, [[0,2]])) + cases.append((diags1,[-1], 1, 2, [[0,0]])) + cases.append((diags1, [0], 2, 2, [[1,0],[0,2]])) + cases.append((diags1,[-1], 2, 2, [[0,0],[1,0]])) + cases.append((diags1, [3], 2, 2, [[0,0],[0,0]])) + cases.append((diags1, [0], 3, 4, [[1,0,0,0],[0,2,0,0],[0,0,3,0]])) + cases.append((diags1, [1], 3, 4, [[0,2,0,0],[0,0,3,0],[0,0,0,4]])) + cases.append((diags1, [2], 3, 5, [[0,0,3,0,0],[0,0,0,4,0],[0,0,0,0,5]])) + + cases.append((diags2, [0,2], 3, 3, [[1,0,8],[0,2,0],[0,0,3]])) + cases.append((diags2, [-1,0], 3, 4, [[6,0,0,0],[1,7,0,0],[0,2,8,0]])) + cases.append((diags2, [2,-3], 6, 6, [[0,0,3,0,0,0], + [0,0,0,4,0,0], + [0,0,0,0,5,0], + [6,0,0,0,0,0], + [0,7,0,0,0,0], + [0,0,8,0,0,0]])) + + cases.append((diags3, [-1,0,1], 6, 6, [[6,12, 0, 0, 0, 0], + [1, 7,13, 0, 0, 0], + [0, 2, 8,14, 0, 0], + [0, 0, 3, 9,15, 0], + [0, 0, 0, 4,10, 0], + [0, 0, 0, 0, 5, 0]])) + cases.append((diags3, [-4,2,-1], 6, 5, [[0, 0, 8, 0, 0], + [11, 0, 0, 9, 0], + [0,12, 0, 0,10], + [0, 0,13, 0, 0], + [1, 0, 0,14, 0], + [0, 2, 0, 0,15]])) + + for d,o,m,n,result in cases: + assert_equal(construct.spdiags(d,o,m,n).todense(), result) + + def test_diags(self): + a = array([1, 2, 3, 4, 5]) + b = array([6, 7, 8, 9, 10]) + c = array([11, 12, 13, 14, 15]) + + cases = [] + cases.append((a[:1], 0, (1, 1), [[1]])) + cases.append(([a[:1]], [0], (1, 1), [[1]])) + cases.append(([a[:1]], [0], (2, 1), [[1],[0]])) + cases.append(([a[:1]], [0], (1, 2), [[1,0]])) + cases.append(([a[:1]], [1], (1, 2), [[0,1]])) + cases.append(([a[:2]], [0], (2, 2), [[1,0],[0,2]])) + cases.append(([a[:1]],[-1], (2, 2), [[0,0],[1,0]])) + cases.append(([a[:3]], [0], (3, 4), [[1,0,0,0],[0,2,0,0],[0,0,3,0]])) + cases.append(([a[:3]], [1], (3, 4), [[0,1,0,0],[0,0,2,0],[0,0,0,3]])) + cases.append(([a[:1]], [-2], (3, 5), [[0,0,0,0,0],[0,0,0,0,0],[1,0,0,0,0]])) + cases.append(([a[:2]], [-1], (3, 5), [[0,0,0,0,0],[1,0,0,0,0],[0,2,0,0,0]])) + cases.append(([a[:3]], [0], (3, 5), [[1,0,0,0,0],[0,2,0,0,0],[0,0,3,0,0]])) + cases.append(([a[:3]], [1], (3, 5), [[0,1,0,0,0],[0,0,2,0,0],[0,0,0,3,0]])) + cases.append(([a[:3]], [2], (3, 5), [[0,0,1,0,0],[0,0,0,2,0],[0,0,0,0,3]])) + cases.append(([a[:2]], [3], (3, 5), [[0,0,0,1,0],[0,0,0,0,2],[0,0,0,0,0]])) + cases.append(([a[:1]], [4], (3, 5), [[0,0,0,0,1],[0,0,0,0,0],[0,0,0,0,0]])) + cases.append(([a[:1]], [-4], (5, 3), [[0,0,0],[0,0,0],[0,0,0],[0,0,0],[1,0,0]])) + cases.append(([a[:2]], [-3], (5, 3), [[0,0,0],[0,0,0],[0,0,0],[1,0,0],[0,2,0]])) + cases.append(([a[:3]], [-2], (5, 3), [[0,0,0],[0,0,0],[1,0,0],[0,2,0],[0,0,3]])) + cases.append(([a[:3]], [-1], (5, 3), [[0,0,0],[1,0,0],[0,2,0],[0,0,3],[0,0,0]])) + cases.append(([a[:3]], [0], (5, 3), [[1,0,0],[0,2,0],[0,0,3],[0,0,0],[0,0,0]])) + cases.append(([a[:2]], [1], (5, 3), [[0,1,0],[0,0,2],[0,0,0],[0,0,0],[0,0,0]])) + cases.append(([a[:1]], [2], (5, 3), [[0,0,1],[0,0,0],[0,0,0],[0,0,0],[0,0,0]])) + + cases.append(([a[:3],b[:1]], [0,2], (3, 3), [[1,0,6],[0,2,0],[0,0,3]])) + cases.append(([a[:2],b[:3]], [-1,0], (3, 4), [[6,0,0,0],[1,7,0,0],[0,2,8,0]])) + cases.append(([a[:4],b[:3]], [2,-3], (6, 6), [[0,0,1,0,0,0], + [0,0,0,2,0,0], + [0,0,0,0,3,0], + [6,0,0,0,0,4], + [0,7,0,0,0,0], + [0,0,8,0,0,0]])) + + cases.append(([a[:4],b,c[:4]], [-1,0,1], (5, 5), [[6,11, 0, 0, 0], + [1, 7,12, 0, 0], + [0, 2, 8,13, 0], + [0, 0, 3, 9,14], + [0, 0, 0, 4,10]])) + cases.append(([a[:2],b[:3],c], [-4,2,-1], (6, 5), [[0, 0, 6, 0, 0], + [11, 0, 0, 7, 0], + [0,12, 0, 0, 8], + [0, 0,13, 0, 0], + [1, 0, 0,14, 0], + [0, 2, 0, 0,15]])) + + # too long arrays are OK + cases.append(([a], [0], (1, 1), [[1]])) + cases.append(([a[:3],b], [0,2], (3, 3), [[1, 0, 6], [0, 2, 0], [0, 0, 3]])) + cases.append((np.array([[1, 2, 3], [4, 5, 6]]), [0,-1], (3, 3), [[1, 0, 0], [4, 2, 0], [0, 5, 3]])) + + # scalar case: broadcasting + cases.append(([1,-2,1], [1,0,-1], (3, 3), [[-2, 1, 0], + [1, -2, 1], + [0, 1, -2]])) + + for d, o, shape, result in cases: + err_msg = "%r %r %r %r" % (d, o, shape, result) + assert_equal(construct.diags(d, o, shape=shape).todense(), + result, err_msg=err_msg) + + if shape[0] == shape[1] and hasattr(d[0], '__len__') and len(d[0]) <= max(shape): + # should be able to find the shape automatically + assert_equal(construct.diags(d, o).todense(), result, + err_msg=err_msg) + + def test_diags_default(self): + a = array([1, 2, 3, 4, 5]) + assert_equal(construct.diags(a).todense(), np.diag(a)) + + def test_diags_default_bad(self): + a = array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6]]) + assert_raises(ValueError, construct.diags, a) + + def test_diags_bad(self): + a = array([1, 2, 3, 4, 5]) + b = array([6, 7, 8, 9, 10]) + c = array([11, 12, 13, 14, 15]) + + cases = [] + cases.append(([a[:0]], 0, (1, 1))) + cases.append(([a[:4],b,c[:3]], [-1,0,1], (5, 5))) + cases.append(([a[:2],c,b[:3]], [-4,2,-1], (6, 5))) + cases.append(([a[:2],c,b[:3]], [-4,2,-1], None)) + cases.append(([], [-4,2,-1], None)) + cases.append(([1], [-5], (4, 4))) + cases.append(([a], 0, None)) + + for d, o, shape in cases: + assert_raises(ValueError, construct.diags, d, o, shape) + + assert_raises(TypeError, construct.diags, [[None]], [0]) + + def test_diags_vs_diag(self): + # Check that + # + # diags([a, b, ...], [i, j, ...]) == diag(a, i) + diag(b, j) + ... + # + + np.random.seed(1234) + + for n_diags in [1, 2, 3, 4, 5, 10]: + n = 1 + n_diags//2 + np.random.randint(0, 10) + + offsets = np.arange(-n+1, n-1) + np.random.shuffle(offsets) + offsets = offsets[:n_diags] + + diagonals = [np.random.rand(n - abs(q)) for q in offsets] + + mat = construct.diags(diagonals, offsets) + dense_mat = sum([np.diag(x, j) for x, j in zip(diagonals, offsets)]) + + assert_array_almost_equal_nulp(mat.todense(), dense_mat) + + if len(offsets) == 1: + mat = construct.diags(diagonals[0], offsets[0]) + dense_mat = np.diag(diagonals[0], offsets[0]) + assert_array_almost_equal_nulp(mat.todense(), dense_mat) + + def test_diags_dtype(self): + x = construct.diags([2.2], [0], shape=(2, 2), dtype=int) + assert_equal(x.dtype, int) + assert_equal(x.todense(), [[2, 0], [0, 2]]) + + def test_diags_one_diagonal(self): + d = list(range(5)) + for k in range(-5, 6): + assert_equal(construct.diags(d, k).toarray(), + construct.diags([d], [k]).toarray()) + + def test_diags_empty(self): + x = construct.diags([]) + assert_equal(x.shape, (0, 0)) + + def test_identity(self): + assert_equal(construct.identity(1).toarray(), [[1]]) + assert_equal(construct.identity(2).toarray(), [[1,0],[0,1]]) + + I = construct.identity(3, dtype='int8', format='dia') + assert_equal(I.dtype, np.dtype('int8')) + assert_equal(I.format, 'dia') + + for fmt in sparse_formats: + I = construct.identity(3, format=fmt) + assert_equal(I.format, fmt) + assert_equal(I.toarray(), [[1,0,0],[0,1,0],[0,0,1]]) + + def test_eye(self): + assert_equal(construct.eye(1,1).toarray(), [[1]]) + assert_equal(construct.eye(2,3).toarray(), [[1,0,0],[0,1,0]]) + assert_equal(construct.eye(3,2).toarray(), [[1,0],[0,1],[0,0]]) + assert_equal(construct.eye(3,3).toarray(), [[1,0,0],[0,1,0],[0,0,1]]) + + assert_equal(construct.eye(3,3,dtype='int16').dtype, np.dtype('int16')) + + for m in [3, 5]: + for n in [3, 5]: + for k in range(-5,6): + assert_equal(construct.eye(m, n, k=k).toarray(), np.eye(m, n, k=k)) + if m == n: + assert_equal(construct.eye(m, k=k).toarray(), np.eye(m, n, k=k)) + + def test_eye_one(self): + assert_equal(construct.eye(1).toarray(), [[1]]) + assert_equal(construct.eye(2).toarray(), [[1,0],[0,1]]) + + I = construct.eye(3, dtype='int8', format='dia') + assert_equal(I.dtype, np.dtype('int8')) + assert_equal(I.format, 'dia') + + for fmt in sparse_formats: + I = construct.eye(3, format=fmt) + assert_equal(I.format, fmt) + assert_equal(I.toarray(), [[1,0,0],[0,1,0],[0,0,1]]) + + def test_kron(self): + cases = [] + + cases.append(array([[0]])) + cases.append(array([[-1]])) + cases.append(array([[4]])) + cases.append(array([[10]])) + cases.append(array([[0],[0]])) + cases.append(array([[0,0]])) + cases.append(array([[1,2],[3,4]])) + cases.append(array([[0,2],[5,0]])) + cases.append(array([[0,2,-6],[8,0,14]])) + cases.append(array([[5,4],[0,0],[6,0]])) + cases.append(array([[5,4,4],[1,0,0],[6,0,8]])) + cases.append(array([[0,1,0,2,0,5,8]])) + cases.append(array([[0.5,0.125,0,3.25],[0,2.5,0,0]])) + + for a in cases: + for b in cases: + result = construct.kron(csr_matrix(a),csr_matrix(b)).todense() + expected = np.kron(a,b) + assert_array_equal(result,expected) + + def test_kronsum(self): + cases = [] + + cases.append(array([[0]])) + cases.append(array([[-1]])) + cases.append(array([[4]])) + cases.append(array([[10]])) + cases.append(array([[1,2],[3,4]])) + cases.append(array([[0,2],[5,0]])) + cases.append(array([[0,2,-6],[8,0,14],[0,3,0]])) + cases.append(array([[1,0,0],[0,5,-1],[4,-2,8]])) + + for a in cases: + for b in cases: + result = construct.kronsum(csr_matrix(a),csr_matrix(b)).todense() + expected = np.kron(np.eye(len(b)), a) + \ + np.kron(b, np.eye(len(a))) + assert_array_equal(result,expected) + + def test_vstack(self): + + A = coo_matrix([[1,2],[3,4]]) + B = coo_matrix([[5,6]]) + + expected = matrix([[1, 2], + [3, 4], + [5, 6]]) + assert_equal(construct.vstack([A,B]).todense(), expected) + assert_equal(construct.vstack([A,B], dtype=np.float32).dtype, np.float32) + assert_equal(construct.vstack([A.tocsr(),B.tocsr()]).todense(), + expected) + assert_equal(construct.vstack([A.tocsr(),B.tocsr()], dtype=np.float32).dtype, + np.float32) + assert_equal(construct.vstack([A.tocsr(),B.tocsr()], + dtype=np.float32).indices.dtype, np.int32) + assert_equal(construct.vstack([A.tocsr(),B.tocsr()], + dtype=np.float32).indptr.dtype, np.int32) + + def test_hstack(self): + + A = coo_matrix([[1,2],[3,4]]) + B = coo_matrix([[5],[6]]) + + expected = matrix([[1, 2, 5], + [3, 4, 6]]) + assert_equal(construct.hstack([A,B]).todense(), expected) + assert_equal(construct.hstack([A,B], dtype=np.float32).dtype, np.float32) + assert_equal(construct.hstack([A.tocsc(),B.tocsc()]).todense(), + expected) + assert_equal(construct.hstack([A.tocsc(),B.tocsc()], dtype=np.float32).dtype, + np.float32) + + def test_bmat(self): + + A = coo_matrix([[1,2],[3,4]]) + B = coo_matrix([[5],[6]]) + C = coo_matrix([[7]]) + D = coo_matrix((0,0)) + + expected = matrix([[1, 2, 5], + [3, 4, 6], + [0, 0, 7]]) + assert_equal(construct.bmat([[A,B],[None,C]]).todense(), expected) + + expected = matrix([[1, 2, 0], + [3, 4, 0], + [0, 0, 7]]) + assert_equal(construct.bmat([[A,None],[None,C]]).todense(), expected) + + expected = matrix([[0, 5], + [0, 6], + [7, 0]]) + assert_equal(construct.bmat([[None,B],[C,None]]).todense(), expected) + + expected = matrix(np.empty((0,0))) + assert_equal(construct.bmat([[None,None]]).todense(), expected) + assert_equal(construct.bmat([[None,D],[D,None]]).todense(), expected) + + # test bug reported in gh-5976 + expected = matrix([[7]]) + assert_equal(construct.bmat([[None,D],[C,None]]).todense(), expected) + + # test failure cases + with assert_raises(ValueError) as excinfo: + construct.bmat([[A], [B]]) + excinfo.match(r'Got blocks\[1,0\]\.shape\[1\] == 1, expected 2') + + with assert_raises(ValueError) as excinfo: + construct.bmat([[A, C]]) + excinfo.match(r'Got blocks\[0,1\]\.shape\[0\] == 1, expected 2') + + @pytest.mark.slow + def test_concatenate_int32_overflow(self): + """ test for indptr overflow when concatenating matrices """ + check_free_memory(30000) + + n = 33000 + A = csr_matrix(np.ones((n, n), dtype=bool)) + B = A.copy() + C = construct._compressed_sparse_stack((A,B), 0) + + assert_(np.all(np.equal(np.diff(C.indptr), n))) + assert_equal(C.indices.dtype, np.int64) + assert_equal(C.indptr.dtype, np.int64) + + def test_block_diag_basic(self): + """ basic test for block_diag """ + A = coo_matrix([[1,2],[3,4]]) + B = coo_matrix([[5],[6]]) + C = coo_matrix([[7]]) + + expected = matrix([[1, 2, 0, 0], + [3, 4, 0, 0], + [0, 0, 5, 0], + [0, 0, 6, 0], + [0, 0, 0, 7]]) + + assert_equal(construct.block_diag((A, B, C)).todense(), expected) + + def test_block_diag_scalar_1d_args(self): + """ block_diag with scalar and 1d arguments """ + # one 1d matrix and a scalar + assert_array_equal(construct.block_diag([[2,3], 4]).toarray(), + [[2, 3, 0], [0, 0, 4]]) + + def test_block_diag_1(self): + """ block_diag with one matrix """ + assert_equal(construct.block_diag([[1, 0]]).todense(), + matrix([[1, 0]])) + assert_equal(construct.block_diag([[[1, 0]]]).todense(), + matrix([[1, 0]])) + assert_equal(construct.block_diag([[[1], [0]]]).todense(), + matrix([[1], [0]])) + # just on scalar + assert_equal(construct.block_diag([1]).todense(), + matrix([[1]])) + + def test_random_sampling(self): + # Simple sanity checks for sparse random sampling. + for f in sprand, _sprandn: + for t in [np.float32, np.float64, np.longdouble, + np.int32, np.int64, np.complex64, np.complex128]: + x = f(5, 10, density=0.1, dtype=t) + assert_equal(x.dtype, t) + assert_equal(x.shape, (5, 10)) + assert_equal(x.nnz, 5) + + x1 = f(5, 10, density=0.1, random_state=4321) + assert_equal(x1.dtype, np.double) + + x2 = f(5, 10, density=0.1, + random_state=np.random.RandomState(4321)) + + assert_array_equal(x1.data, x2.data) + assert_array_equal(x1.row, x2.row) + assert_array_equal(x1.col, x2.col) + + for density in [0.0, 0.1, 0.5, 1.0]: + x = f(5, 10, density=density) + assert_equal(x.nnz, int(density * np.prod(x.shape))) + + for fmt in ['coo', 'csc', 'csr', 'lil']: + x = f(5, 10, format=fmt) + assert_equal(x.format, fmt) + + assert_raises(ValueError, lambda: f(5, 10, 1.1)) + assert_raises(ValueError, lambda: f(5, 10, -0.1)) + + def test_rand(self): + # Simple distributional checks for sparse.rand. + for random_state in None, 4321, np.random.RandomState(): + x = sprand(10, 20, density=0.5, dtype=np.float64, + random_state=random_state) + assert_(np.all(np.less_equal(0, x.data))) + assert_(np.all(np.less_equal(x.data, 1))) + + def test_randn(self): + # Simple distributional checks for sparse.randn. + # Statistically, some of these should be negative + # and some should be greater than 1. + for random_state in None, 4321, np.random.RandomState(): + x = _sprandn(10, 20, density=0.5, dtype=np.float64, + random_state=random_state) + assert_(np.any(np.less(x.data, 0))) + assert_(np.any(np.less(1, x.data))) + + def test_random_accept_str_dtype(self): + # anything that np.dtype can convert to a dtype should be accepted + # for the dtype + a = construct.random(10, 10, dtype='d') + diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_construct.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_construct.pyc new file mode 100644 index 0000000..f258087 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_construct.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_csc.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_csc.py new file mode 100644 index 0000000..a8b8a3b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_csc.py @@ -0,0 +1,36 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import assert_array_almost_equal, assert_ +from scipy.sparse import csr_matrix, csc_matrix + + +def test_csc_getrow(): + N = 10 + np.random.seed(0) + X = np.random.random((N, N)) + X[X > 0.7] = 0 + Xcsc = csc_matrix(X) + + for i in range(N): + arr_row = X[i:i + 1, :] + csc_row = Xcsc.getrow(i) + + assert_array_almost_equal(arr_row, csc_row.toarray()) + assert_(type(csc_row) is csr_matrix) + + +def test_csc_getcol(): + N = 10 + np.random.seed(0) + X = np.random.random((N, N)) + X[X > 0.7] = 0 + Xcsc = csc_matrix(X) + + for i in range(N): + arr_col = X[:, i:i + 1] + csc_col = Xcsc.getcol(i) + + assert_array_almost_equal(arr_col, csc_col.toarray()) + assert_(type(csc_col) is csc_matrix) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_csc.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_csc.pyc new file mode 100644 index 0000000..7744bac Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_csc.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_csr.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_csr.py new file mode 100644 index 0000000..09c0f59 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_csr.py @@ -0,0 +1,60 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import assert_array_almost_equal, assert_ +from scipy.sparse import csr_matrix + + +def _check_csr_rowslice(i, sl, X, Xcsr): + np_slice = X[i, sl] + csr_slice = Xcsr[i, sl] + assert_array_almost_equal(np_slice, csr_slice.toarray()[0]) + assert_(type(csr_slice) is csr_matrix) + + +def test_csr_rowslice(): + N = 10 + np.random.seed(0) + X = np.random.random((N, N)) + X[X > 0.7] = 0 + Xcsr = csr_matrix(X) + + slices = [slice(None, None, None), + slice(None, None, -1), + slice(1, -2, 2), + slice(-2, 1, -2)] + + for i in range(N): + for sl in slices: + _check_csr_rowslice(i, sl, X, Xcsr) + + +def test_csr_getrow(): + N = 10 + np.random.seed(0) + X = np.random.random((N, N)) + X[X > 0.7] = 0 + Xcsr = csr_matrix(X) + + for i in range(N): + arr_row = X[i:i + 1, :] + csr_row = Xcsr.getrow(i) + + assert_array_almost_equal(arr_row, csr_row.toarray()) + assert_(type(csr_row) is csr_matrix) + + +def test_csr_getcol(): + N = 10 + np.random.seed(0) + X = np.random.random((N, N)) + X[X > 0.7] = 0 + Xcsr = csr_matrix(X) + + for i in range(N): + arr_col = X[:, i:i + 1] + csr_col = Xcsr.getcol(i) + + assert_array_almost_equal(arr_col, csr_col.toarray()) + assert_(type(csr_col) is csr_matrix) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_csr.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_csr.pyc new file mode 100644 index 0000000..9d94ade Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_csr.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_extract.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_extract.py new file mode 100644 index 0000000..e78765a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_extract.py @@ -0,0 +1,44 @@ +"""test sparse matrix construction functions""" + +from __future__ import division, print_function, absolute_import + +from numpy.testing import assert_equal +from scipy.sparse import csr_matrix + +import numpy as np +from scipy.sparse import extract + + +class TestExtract(object): + def setup_method(self): + self.cases = [ + csr_matrix([[1,2]]), + csr_matrix([[1,0]]), + csr_matrix([[0,0]]), + csr_matrix([[1],[2]]), + csr_matrix([[1],[0]]), + csr_matrix([[0],[0]]), + csr_matrix([[1,2],[3,4]]), + csr_matrix([[0,1],[0,0]]), + csr_matrix([[0,0],[1,0]]), + csr_matrix([[0,0],[0,0]]), + csr_matrix([[1,2,0,0,3],[4,5,0,6,7],[0,0,8,9,0]]), + csr_matrix([[1,2,0,0,3],[4,5,0,6,7],[0,0,8,9,0]]).T, + ] + + def find(self): + for A in self.cases: + I,J,V = extract.find(A) + assert_equal(A.toarray(), csr_matrix(((I,J),V), shape=A.shape)) + + def test_tril(self): + for A in self.cases: + B = A.toarray() + for k in [-3,-2,-1,0,1,2,3]: + assert_equal(extract.tril(A,k=k).toarray(), np.tril(B,k=k)) + + def test_triu(self): + for A in self.cases: + B = A.toarray() + for k in [-3,-2,-1,0,1,2,3]: + assert_equal(extract.triu(A,k=k).toarray(), np.triu(B,k=k)) diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_extract.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_extract.pyc new file mode 100644 index 0000000..93ba84b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_extract.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_matrix_io.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_matrix_io.py new file mode 100644 index 0000000..304a157 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_matrix_io.py @@ -0,0 +1,93 @@ +from __future__ import division, print_function, absolute_import + +import sys +import os +import numpy as np +import tempfile + +import pytest +from pytest import raises as assert_raises +from numpy.testing import assert_equal, assert_ +from scipy._lib._version import NumpyVersion + +from scipy.sparse import (csc_matrix, csr_matrix, bsr_matrix, dia_matrix, + coo_matrix, save_npz, load_npz, dok_matrix) + + +DATA_DIR = os.path.join(os.path.dirname(__file__), 'data') + + +def _save_and_load(matrix): + fd, tmpfile = tempfile.mkstemp(suffix='.npz') + os.close(fd) + try: + save_npz(tmpfile, matrix) + loaded_matrix = load_npz(tmpfile) + finally: + os.remove(tmpfile) + return loaded_matrix + +def _check_save_and_load(dense_matrix): + for matrix_class in [csc_matrix, csr_matrix, bsr_matrix, dia_matrix, coo_matrix]: + matrix = matrix_class(dense_matrix) + loaded_matrix = _save_and_load(matrix) + assert_(type(loaded_matrix) is matrix_class) + assert_(loaded_matrix.shape == dense_matrix.shape) + assert_(loaded_matrix.dtype == dense_matrix.dtype) + assert_equal(loaded_matrix.toarray(), dense_matrix) + +def test_save_and_load_random(): + N = 10 + np.random.seed(0) + dense_matrix = np.random.random((N, N)) + dense_matrix[dense_matrix > 0.7] = 0 + _check_save_and_load(dense_matrix) + +def test_save_and_load_empty(): + dense_matrix = np.zeros((4,6)) + _check_save_and_load(dense_matrix) + +def test_save_and_load_one_entry(): + dense_matrix = np.zeros((4,6)) + dense_matrix[1,2] = 1 + _check_save_and_load(dense_matrix) + + +@pytest.mark.skipif(NumpyVersion(np.__version__) < '1.10.0', + reason='disabling unpickling requires numpy >= 1.10.0') +def test_malicious_load(): + class Executor(object): + def __reduce__(self): + return (assert_, (False, 'unexpected code execution')) + + fd, tmpfile = tempfile.mkstemp(suffix='.npz') + os.close(fd) + try: + np.savez(tmpfile, format=Executor()) + + # Should raise a ValueError, not execute code + assert_raises(ValueError, load_npz, tmpfile) + finally: + os.remove(tmpfile) + + +def test_py23_compatibility(): + # Try loading files saved on Python 2 and Python 3. They are not + # the same, since files saved with Scipy versions < 1.0.0 may + # contain unicode. + + a = load_npz(os.path.join(DATA_DIR, 'csc_py2.npz')) + b = load_npz(os.path.join(DATA_DIR, 'csc_py3.npz')) + c = csc_matrix([[0]]) + + assert_equal(a.toarray(), c.toarray()) + assert_equal(b.toarray(), c.toarray()) + +def test_implemented_error(): + # Attempts to save an unsupported type and checks that an + # NotImplementedError is raised. + + x = dok_matrix((2,3)) + x[0,1] = 1 + + assert_raises(NotImplementedError, save_npz, 'x.npz', x) diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_matrix_io.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_matrix_io.pyc new file mode 100644 index 0000000..a4f023c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_matrix_io.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_sparsetools.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_sparsetools.py new file mode 100644 index 0000000..d6c600e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_sparsetools.py @@ -0,0 +1,328 @@ +from __future__ import division, print_function, absolute_import + +import sys +import os +import gc +import re +import threading + +import numpy as np +from numpy.testing import assert_equal, assert_, assert_allclose +from scipy.sparse import (_sparsetools, coo_matrix, csr_matrix, csc_matrix, + bsr_matrix, dia_matrix) +from scipy.sparse.sputils import supported_dtypes +from scipy._lib._testutils import check_free_memory + +import pytest +from pytest import raises as assert_raises + +def test_exception(): + assert_raises(MemoryError, _sparsetools.test_throw_error) + + +def test_threads(): + # Smoke test for parallel threaded execution; doesn't actually + # check that code runs in parallel, but just that it produces + # expected results. + nthreads = 10 + niter = 100 + + n = 20 + a = csr_matrix(np.ones([n, n])) + bres = [] + + class Worker(threading.Thread): + def run(self): + b = a.copy() + for j in range(niter): + _sparsetools.csr_plus_csr(n, n, + a.indptr, a.indices, a.data, + a.indptr, a.indices, a.data, + b.indptr, b.indices, b.data) + bres.append(b) + + threads = [Worker() for _ in range(nthreads)] + for thread in threads: + thread.start() + for thread in threads: + thread.join() + + for b in bres: + assert_(np.all(b.toarray() == 2)) + + +def test_regression_std_vector_dtypes(): + # Regression test for gh-3780, checking the std::vector typemaps + # in sparsetools.cxx are complete. + for dtype in supported_dtypes: + ad = np.matrix([[1, 2], [3, 4]]).astype(dtype) + a = csr_matrix(ad, dtype=dtype) + + # getcol is one function using std::vector typemaps, and should not fail + assert_equal(a.getcol(0).todense(), ad[:,0]) + + +@pytest.mark.slow +def test_nnz_overflow(): + # Regression test for gh-7230 / gh-7871, checking that coo_todense + # with nnz > int32max doesn't overflow. + nnz = np.iinfo(np.int32).max + 1 + # Ensure ~20 GB of RAM is free to run this test. + check_free_memory((4 + 4 + 1) * nnz / 1e6 + 0.5) + + # Use nnz duplicate entries to keep the dense version small. + row = np.zeros(nnz, dtype=np.int32) + col = np.zeros(nnz, dtype=np.int32) + data = np.zeros(nnz, dtype=np.int8) + data[-1] = 4 + s = coo_matrix((data, (row, col)), shape=(1, 1), copy=False) + # Sums nnz duplicates to produce a 1x1 array containing 4. + d = s.toarray() + + assert_allclose(d, [[4]]) + + +@pytest.mark.skipif(not (sys.platform.startswith('linux') and np.dtype(np.intp).itemsize >= 8), + reason="test requires 64-bit Linux") +class TestInt32Overflow(object): + """ + Some of the sparsetools routines use dense 2D matrices whose + total size is not bounded by the nnz of the sparse matrix. These + routines used to suffer from int32 wraparounds; here, we try to + check that the wraparounds don't occur any more. + """ + # choose n large enough + n = 50000 + + def setup_method(self): + assert self.n**2 > np.iinfo(np.int32).max + + # check there's enough memory even if everything is run at the + # same time + try: + parallel_count = int(os.environ.get('PYTEST_XDIST_WORKER_COUNT', '1')) + except ValueError: + parallel_count = np.inf + + check_free_memory(3000 * parallel_count) + + def teardown_method(self): + gc.collect() + + def test_coo_todense(self): + # Check *_todense routines (cf. gh-2179) + # + # All of them in the end call coo_matrix.todense + + n = self.n + + i = np.array([0, n-1]) + j = np.array([0, n-1]) + data = np.array([1, 2], dtype=np.int8) + m = coo_matrix((data, (i, j))) + + r = m.todense() + assert_equal(r[0,0], 1) + assert_equal(r[-1,-1], 2) + del r + gc.collect() + + @pytest.mark.slow + def test_matvecs(self): + # Check *_matvecs routines + n = self.n + + i = np.array([0, n-1]) + j = np.array([0, n-1]) + data = np.array([1, 2], dtype=np.int8) + m = coo_matrix((data, (i, j))) + + b = np.ones((n, n), dtype=np.int8) + for sptype in (csr_matrix, csc_matrix, bsr_matrix): + m2 = sptype(m) + r = m2.dot(b) + assert_equal(r[0,0], 1) + assert_equal(r[-1,-1], 2) + del r + gc.collect() + + del b + gc.collect() + + @pytest.mark.slow + def test_dia_matvec(self): + # Check: huge dia_matrix _matvec + n = self.n + data = np.ones((n, n), dtype=np.int8) + offsets = np.arange(n) + m = dia_matrix((data, offsets), shape=(n, n)) + v = np.ones(m.shape[1], dtype=np.int8) + r = m.dot(v) + assert_equal(r[0], np.int8(n)) + del data, offsets, m, v, r + gc.collect() + + _bsr_ops = [pytest.param("matmat", marks=pytest.mark.xslow), + pytest.param("matvecs", marks=pytest.mark.xslow), + "matvec", + "diagonal", + "sort_indices", + pytest.param("transpose", marks=pytest.mark.xslow)] + + @pytest.mark.slow + @pytest.mark.parametrize("op", _bsr_ops) + def test_bsr_1_block(self, op): + # Check: huge bsr_matrix (1-block) + # + # The point here is that indices inside a block may overflow. + + def get_matrix(): + n = self.n + data = np.ones((1, n, n), dtype=np.int8) + indptr = np.array([0, 1], dtype=np.int32) + indices = np.array([0], dtype=np.int32) + m = bsr_matrix((data, indices, indptr), blocksize=(n, n), copy=False) + del data, indptr, indices + return m + + gc.collect() + try: + getattr(self, "_check_bsr_" + op)(get_matrix) + finally: + gc.collect() + + @pytest.mark.slow + @pytest.mark.parametrize("op", _bsr_ops) + def test_bsr_n_block(self, op): + # Check: huge bsr_matrix (n-block) + # + # The point here is that while indices within a block don't + # overflow, accumulators across many block may. + + def get_matrix(): + n = self.n + data = np.ones((n, n, 1), dtype=np.int8) + indptr = np.array([0, n], dtype=np.int32) + indices = np.arange(n, dtype=np.int32) + m = bsr_matrix((data, indices, indptr), blocksize=(n, 1), copy=False) + del data, indptr, indices + return m + + gc.collect() + try: + getattr(self, "_check_bsr_" + op)(get_matrix) + finally: + gc.collect() + + def _check_bsr_matvecs(self, m): + m = m() + n = self.n + + # _matvecs + r = m.dot(np.ones((n, 2), dtype=np.int8)) + assert_equal(r[0,0], np.int8(n)) + + def _check_bsr_matvec(self, m): + m = m() + n = self.n + + # _matvec + r = m.dot(np.ones((n,), dtype=np.int8)) + assert_equal(r[0], np.int8(n)) + + def _check_bsr_diagonal(self, m): + m = m() + n = self.n + + # _diagonal + r = m.diagonal() + assert_equal(r, np.ones(n)) + + def _check_bsr_sort_indices(self, m): + # _sort_indices + m = m() + m.sort_indices() + + def _check_bsr_transpose(self, m): + # _transpose + m = m() + m.transpose() + + def _check_bsr_matmat(self, m): + m = m() + n = self.n + + # _bsr_matmat + m2 = bsr_matrix(np.ones((n, 2), dtype=np.int8), blocksize=(m.blocksize[1], 2)) + m.dot(m2) # shouldn't SIGSEGV + del m2 + + # _bsr_matmat + m2 = bsr_matrix(np.ones((2, n), dtype=np.int8), blocksize=(2, m.blocksize[0])) + m2.dot(m) # shouldn't SIGSEGV + + +@pytest.mark.skip(reason="64-bit indices in sparse matrices not available") +def test_csr_matmat_int64_overflow(): + n = 3037000500 + assert n**2 > np.iinfo(np.int64).max + + # the test would take crazy amounts of memory + check_free_memory(n * (8*2 + 1) * 3 / 1e6) + + # int64 overflow + data = np.ones((n,), dtype=np.int8) + indptr = np.arange(n+1, dtype=np.int64) + indices = np.zeros(n, dtype=np.int64) + a = csr_matrix((data, indices, indptr)) + b = a.T + + assert_raises(RuntimeError, a.dot, b) + + +def test_upcast(): + a0 = csr_matrix([[np.pi, np.pi*1j], [3, 4]], dtype=complex) + b0 = np.array([256+1j, 2**32], dtype=complex) + + for a_dtype in supported_dtypes: + for b_dtype in supported_dtypes: + msg = "(%r, %r)" % (a_dtype, b_dtype) + + if np.issubdtype(a_dtype, np.complexfloating): + a = a0.copy().astype(a_dtype) + else: + a = a0.real.copy().astype(a_dtype) + + if np.issubdtype(b_dtype, np.complexfloating): + b = b0.copy().astype(b_dtype) + else: + b = b0.real.copy().astype(b_dtype) + + if not (a_dtype == np.bool_ and b_dtype == np.bool_): + c = np.zeros((2,), dtype=np.bool_) + assert_raises(ValueError, _sparsetools.csr_matvec, + 2, 2, a.indptr, a.indices, a.data, b, c) + + if ((np.issubdtype(a_dtype, np.complexfloating) and + not np.issubdtype(b_dtype, np.complexfloating)) or + (not np.issubdtype(a_dtype, np.complexfloating) and + np.issubdtype(b_dtype, np.complexfloating))): + c = np.zeros((2,), dtype=np.float64) + assert_raises(ValueError, _sparsetools.csr_matvec, + 2, 2, a.indptr, a.indices, a.data, b, c) + + c = np.zeros((2,), dtype=np.result_type(a_dtype, b_dtype)) + _sparsetools.csr_matvec(2, 2, a.indptr, a.indices, a.data, b, c) + assert_allclose(c, np.dot(a.toarray(), b), err_msg=msg) + + +def test_endianness(): + d = np.ones((3,4)) + offsets = [-1,0,1] + + a = dia_matrix((d.astype('<f8'), offsets), (4, 4)) + b = dia_matrix((d.astype('>f8'), offsets), (4, 4)) + v = np.arange(4) + + assert_allclose(a.dot(v), [1, 3, 6, 5]) + assert_allclose(b.dot(v), [1, 3, 6, 5]) diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_sparsetools.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_sparsetools.pyc new file mode 100644 index 0000000..1c33934 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_sparsetools.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_spfuncs.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_spfuncs.py new file mode 100644 index 0000000..4614b5d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_spfuncs.py @@ -0,0 +1,99 @@ +from __future__ import division, print_function, absolute_import + +from numpy import array, kron, matrix, diag +from numpy.testing import assert_, assert_equal + +from scipy.sparse import spfuncs +from scipy.sparse import csr_matrix, csc_matrix, bsr_matrix +from scipy.sparse._sparsetools import (csr_scale_rows, csr_scale_columns, + bsr_scale_rows, bsr_scale_columns) + + +class TestSparseFunctions(object): + def test_scale_rows_and_cols(self): + D = matrix([[1,0,0,2,3], + [0,4,0,5,0], + [0,0,6,7,0]]) + + #TODO expose through function + S = csr_matrix(D) + v = array([1,2,3]) + csr_scale_rows(3,5,S.indptr,S.indices,S.data,v) + assert_equal(S.todense(), diag(v)*D) + + S = csr_matrix(D) + v = array([1,2,3,4,5]) + csr_scale_columns(3,5,S.indptr,S.indices,S.data,v) + assert_equal(S.todense(), D*diag(v)) + + # blocks + E = kron(D,[[1,2],[3,4]]) + S = bsr_matrix(E,blocksize=(2,2)) + v = array([1,2,3,4,5,6]) + bsr_scale_rows(3,5,2,2,S.indptr,S.indices,S.data,v) + assert_equal(S.todense(), diag(v)*E) + + S = bsr_matrix(E,blocksize=(2,2)) + v = array([1,2,3,4,5,6,7,8,9,10]) + bsr_scale_columns(3,5,2,2,S.indptr,S.indices,S.data,v) + assert_equal(S.todense(), E*diag(v)) + + E = kron(D,[[1,2,3],[4,5,6]]) + S = bsr_matrix(E,blocksize=(2,3)) + v = array([1,2,3,4,5,6]) + bsr_scale_rows(3,5,2,3,S.indptr,S.indices,S.data,v) + assert_equal(S.todense(), diag(v)*E) + + S = bsr_matrix(E,blocksize=(2,3)) + v = array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]) + bsr_scale_columns(3,5,2,3,S.indptr,S.indices,S.data,v) + assert_equal(S.todense(), E*diag(v)) + + def test_estimate_blocksize(self): + mats = [] + mats.append([[0,1],[1,0]]) + mats.append([[1,1,0],[0,0,1],[1,0,1]]) + mats.append([[0],[0],[1]]) + mats = [array(x) for x in mats] + + blks = [] + blks.append([[1]]) + blks.append([[1,1],[1,1]]) + blks.append([[1,1],[0,1]]) + blks.append([[1,1,0],[1,0,1],[1,1,1]]) + blks = [array(x) for x in blks] + + for A in mats: + for B in blks: + X = kron(A,B) + r,c = spfuncs.estimate_blocksize(X) + assert_(r >= B.shape[0]) + assert_(c >= B.shape[1]) + + def test_count_blocks(self): + def gold(A,bs): + R,C = bs + I,J = A.nonzero() + return len(set(zip(I//R,J//C))) + + mats = [] + mats.append([[0]]) + mats.append([[1]]) + mats.append([[1,0]]) + mats.append([[1,1]]) + mats.append([[0,1],[1,0]]) + mats.append([[1,1,0],[0,0,1],[1,0,1]]) + mats.append([[0],[0],[1]]) + + for A in mats: + for B in mats: + X = kron(A,B) + Y = csr_matrix(X) + for R in range(1,6): + for C in range(1,6): + assert_equal(spfuncs.count_blocks(Y, (R, C)), gold(X, (R, C))) + + X = kron([[1,1,0],[0,0,1],[1,0,1]],[[1,1]]) + Y = csc_matrix(X) + assert_equal(spfuncs.count_blocks(X, (1, 2)), gold(X, (1, 2))) + assert_equal(spfuncs.count_blocks(Y, (1, 2)), gold(X, (1, 2))) diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_spfuncs.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_spfuncs.pyc new file mode 100644 index 0000000..3004125 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_spfuncs.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_sputils.py b/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_sputils.py new file mode 100644 index 0000000..f14cdb5 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_sputils.py @@ -0,0 +1,153 @@ +"""unit tests for sparse utility functions""" + +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import assert_equal, assert_raises +from pytest import raises as assert_raises +from scipy.sparse import sputils +from scipy._lib._numpy_compat import suppress_warnings + + +class TestSparseUtils(object): + + def test_upcast(self): + assert_equal(sputils.upcast('intc'), np.intc) + assert_equal(sputils.upcast('int32', 'float32'), np.float64) + assert_equal(sputils.upcast('bool', complex, float), np.complex128) + assert_equal(sputils.upcast('i', 'd'), np.float64) + + def test_getdtype(self): + A = np.array([1], dtype='int8') + + assert_equal(sputils.getdtype(None, default=float), float) + assert_equal(sputils.getdtype(None, a=A), np.int8) + + def test_isscalarlike(self): + assert_equal(sputils.isscalarlike(3.0), True) + assert_equal(sputils.isscalarlike(-4), True) + assert_equal(sputils.isscalarlike(2.5), True) + assert_equal(sputils.isscalarlike(1 + 3j), True) + assert_equal(sputils.isscalarlike(np.array(3)), True) + assert_equal(sputils.isscalarlike("16"), True) + + assert_equal(sputils.isscalarlike(np.array([3])), False) + assert_equal(sputils.isscalarlike([[3]]), False) + assert_equal(sputils.isscalarlike((1,)), False) + assert_equal(sputils.isscalarlike((1, 2)), False) + + def test_isintlike(self): + assert_equal(sputils.isintlike(-4), True) + assert_equal(sputils.isintlike(np.array(3)), True) + assert_equal(sputils.isintlike(np.array([3])), False) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, + "Inexact indices into sparse matrices are deprecated") + assert_equal(sputils.isintlike(3.0), True) + + assert_equal(sputils.isintlike(2.5), False) + assert_equal(sputils.isintlike(1 + 3j), False) + assert_equal(sputils.isintlike((1,)), False) + assert_equal(sputils.isintlike((1, 2)), False) + + def test_isshape(self): + assert_equal(sputils.isshape((1, 2)), True) + assert_equal(sputils.isshape((5, 2)), True) + + assert_equal(sputils.isshape((1.5, 2)), False) + assert_equal(sputils.isshape((2, 2, 2)), False) + assert_equal(sputils.isshape(([2], 2)), False) + assert_equal(sputils.isshape((-1, 2), nonneg=False),True) + assert_equal(sputils.isshape((2, -1), nonneg=False),True) + assert_equal(sputils.isshape((-1, 2), nonneg=True),False) + assert_equal(sputils.isshape((2, -1), nonneg=True),False) + + def test_issequence(self): + assert_equal(sputils.issequence((1,)), True) + assert_equal(sputils.issequence((1, 2, 3)), True) + assert_equal(sputils.issequence([1]), True) + assert_equal(sputils.issequence([1, 2, 3]), True) + assert_equal(sputils.issequence(np.array([1, 2, 3])), True) + + assert_equal(sputils.issequence(np.array([[1], [2], [3]])), False) + assert_equal(sputils.issequence(3), False) + + def test_ismatrix(self): + assert_equal(sputils.ismatrix(((),)), True) + assert_equal(sputils.ismatrix([[1], [2]]), True) + assert_equal(sputils.ismatrix(np.arange(3)[None]), True) + + assert_equal(sputils.ismatrix([1, 2]), False) + assert_equal(sputils.ismatrix(np.arange(3)), False) + assert_equal(sputils.ismatrix([[[1]]]), False) + assert_equal(sputils.ismatrix(3), False) + + def test_isdense(self): + assert_equal(sputils.isdense(np.array([1])), True) + assert_equal(sputils.isdense(np.matrix([1])), True) + + def test_validateaxis(self): + assert_raises(TypeError, sputils.validateaxis, (0, 1)) + assert_raises(TypeError, sputils.validateaxis, 1.5) + assert_raises(ValueError, sputils.validateaxis, 3) + + # These function calls should not raise errors + for axis in (-2, -1, 0, 1, None): + sputils.validateaxis(axis) + + def test_get_index_dtype(self): + imax = np.iinfo(np.int32).max + too_big = imax + 1 + + # Check that uint32's with no values too large doesn't return + # int64 + a1 = np.ones(90, dtype='uint32') + a2 = np.ones(90, dtype='uint32') + assert_equal( + np.dtype(sputils.get_index_dtype((a1, a2), check_contents=True)), + np.dtype('int32') + ) + + # Check that if we can not convert but all values are less than or + # equal to max that we can just convert to int32 + a1[-1] = imax + assert_equal( + np.dtype(sputils.get_index_dtype((a1, a2), check_contents=True)), + np.dtype('int32') + ) + + # Check that if it can not convert directly and the contents are + # too large that we return int64 + a1[-1] = too_big + assert_equal( + np.dtype(sputils.get_index_dtype((a1, a2), check_contents=True)), + np.dtype('int64') + ) + + # test that if can not convert and didn't specify to check_contents + # we return int64 + a1 = np.ones(89, dtype='uint32') + a2 = np.ones(89, dtype='uint32') + assert_equal( + np.dtype(sputils.get_index_dtype((a1, a2))), + np.dtype('int64') + ) + + # Check that even if we have arrays that can be converted directly + # that if we specify a maxval directly it takes precedence + a1 = np.ones(12, dtype='uint32') + a2 = np.ones(12, dtype='uint32') + assert_equal( + np.dtype(sputils.get_index_dtype( + (a1, a2), maxval=too_big, check_contents=True + )), + np.dtype('int64') + ) + + # Check that an array with a too max size and maxval set + # still returns int64 + a1[-1] = too_big + assert_equal( + np.dtype(sputils.get_index_dtype((a1, a2), maxval=too_big)), + np.dtype('int64') + ) diff --git a/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_sputils.pyc b/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_sputils.pyc new file mode 100644 index 0000000..3dab59f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/sparse/tests/test_sputils.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/spatial/__init__.py new file mode 100644 index 0000000..91d4853 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/__init__.py @@ -0,0 +1,113 @@ +""" +============================================================= +Spatial algorithms and data structures (:mod:`scipy.spatial`) +============================================================= + +.. currentmodule:: scipy.spatial + +Spatial Transformations +======================= +Contained in the `scipy.spatial.transform` submodule. + +Nearest-neighbor Queries +======================== +.. autosummary:: + :toctree: generated/ + + KDTree -- class for efficient nearest-neighbor queries + cKDTree -- class for efficient nearest-neighbor queries (faster impl.) + Rectangle + +Distance metrics are contained in the :mod:`scipy.spatial.distance` submodule. + +Delaunay Triangulation, Convex Hulls and Voronoi Diagrams +========================================================= + +.. autosummary:: + :toctree: generated/ + + Delaunay -- compute Delaunay triangulation of input points + ConvexHull -- compute a convex hull for input points + Voronoi -- compute a Voronoi diagram hull from input points + SphericalVoronoi -- compute a Voronoi diagram from input points on the surface of a sphere + HalfspaceIntersection -- compute the intersection points of input halfspaces + +Plotting Helpers +================ + +.. autosummary:: + :toctree: generated/ + + delaunay_plot_2d -- plot 2-D triangulation + convex_hull_plot_2d -- plot 2-D convex hull + voronoi_plot_2d -- plot 2-D voronoi diagram + +.. seealso:: :ref:`Tutorial <qhulltutorial>` + + +Simplex representation +====================== +The simplices (triangles, tetrahedra, ...) appearing in the Delaunay +tessellation (N-dim simplices), convex hull facets, and Voronoi ridges +(N-1 dim simplices) are represented in the following scheme:: + + tess = Delaunay(points) + hull = ConvexHull(points) + voro = Voronoi(points) + + # coordinates of the j-th vertex of the i-th simplex + tess.points[tess.simplices[i, j], :] # tessellation element + hull.points[hull.simplices[i, j], :] # convex hull facet + voro.vertices[voro.ridge_vertices[i, j], :] # ridge between Voronoi cells + +For Delaunay triangulations and convex hulls, the neighborhood +structure of the simplices satisfies the condition: + + ``tess.neighbors[i,j]`` is the neighboring simplex of the i-th + simplex, opposite to the j-vertex. It is -1 in case of no + neighbor. + +Convex hull facets also define a hyperplane equation:: + + (hull.equations[i,:-1] * coord).sum() + hull.equations[i,-1] == 0 + +Similar hyperplane equations for the Delaunay triangulation correspond +to the convex hull facets on the corresponding N+1 dimensional +paraboloid. + +The Delaunay triangulation objects offer a method for locating the +simplex containing a given point, and barycentric coordinate +computations. + +Functions +--------- + +.. autosummary:: + :toctree: generated/ + + tsearch + distance_matrix + minkowski_distance + minkowski_distance_p + procrustes + +""" + +from __future__ import division, print_function, absolute_import + +from .kdtree import * +from .ckdtree import * +from .qhull import * +from ._spherical_voronoi import SphericalVoronoi +from ._plotutils import * +from ._procrustes import procrustes +from . import transform + +__all__ = [s for s in dir() if not s.startswith('_')] +__all__ += ['distance'] + +from . import distance + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/spatial/__init__.pyc new file mode 100644 index 0000000..270d0c3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/spatial/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/_distance_wrap.so b/project/venv/lib/python2.7/site-packages/scipy/spatial/_distance_wrap.so new file mode 100755 index 0000000..040a4d4 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/spatial/_distance_wrap.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/_hausdorff.so b/project/venv/lib/python2.7/site-packages/scipy/spatial/_hausdorff.so new file mode 100755 index 0000000..2e07d93 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/spatial/_hausdorff.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/_plotutils.py b/project/venv/lib/python2.7/site-packages/scipy/spatial/_plotutils.py new file mode 100644 index 0000000..24013d8 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/_plotutils.py @@ -0,0 +1,262 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from scipy._lib.decorator import decorator as _decorator + +__all__ = ['delaunay_plot_2d', 'convex_hull_plot_2d', 'voronoi_plot_2d'] + + +@_decorator +def _held_figure(func, obj, ax=None, **kw): + import matplotlib.pyplot as plt + + if ax is None: + fig = plt.figure() + ax = fig.gca() + return func(obj, ax=ax, **kw) + + # As of matplotlib 2.0, the "hold" mechanism is deprecated. + # When matplotlib 1.x is no longer supported, this check can be removed. + was_held = getattr(ax, 'ishold', lambda: True)() + if was_held: + return func(obj, ax=ax, **kw) + try: + ax.hold(True) + return func(obj, ax=ax, **kw) + finally: + ax.hold(was_held) + + +def _adjust_bounds(ax, points): + margin = 0.1 * points.ptp(axis=0) + xy_min = points.min(axis=0) - margin + xy_max = points.max(axis=0) + margin + ax.set_xlim(xy_min[0], xy_max[0]) + ax.set_ylim(xy_min[1], xy_max[1]) + + +@_held_figure +def delaunay_plot_2d(tri, ax=None): + """ + Plot the given Delaunay triangulation in 2-D + + Parameters + ---------- + tri : scipy.spatial.Delaunay instance + Triangulation to plot + ax : matplotlib.axes.Axes instance, optional + Axes to plot on + + Returns + ------- + fig : matplotlib.figure.Figure instance + Figure for the plot + + See Also + -------- + Delaunay + matplotlib.pyplot.triplot + + Notes + ----- + Requires Matplotlib. + + Examples + -------- + + >>> import matplotlib.pyplot as plt + >>> from scipy.spatial import Delaunay, delaunay_plot_2d + + The Delaunay triangulation of a set of random points: + + >>> points = np.random.rand(30, 2) + >>> tri = Delaunay(points) + + Plot it: + + >>> _ = delaunay_plot_2d(tri) + >>> plt.show() + + """ + if tri.points.shape[1] != 2: + raise ValueError("Delaunay triangulation is not 2-D") + + x, y = tri.points.T + ax.plot(x, y, 'o') + ax.triplot(x, y, tri.simplices.copy()) + + _adjust_bounds(ax, tri.points) + + return ax.figure + + +@_held_figure +def convex_hull_plot_2d(hull, ax=None): + """ + Plot the given convex hull diagram in 2-D + + Parameters + ---------- + hull : scipy.spatial.ConvexHull instance + Convex hull to plot + ax : matplotlib.axes.Axes instance, optional + Axes to plot on + + Returns + ------- + fig : matplotlib.figure.Figure instance + Figure for the plot + + See Also + -------- + ConvexHull + + Notes + ----- + Requires Matplotlib. + + + Examples + -------- + + >>> import matplotlib.pyplot as plt + >>> from scipy.spatial import ConvexHull, convex_hull_plot_2d + + The convex hull of a random set of points: + + >>> points = np.random.rand(30, 2) + >>> hull = ConvexHull(points) + + Plot it: + + >>> _ = convex_hull_plot_2d(hull) + >>> plt.show() + + """ + from matplotlib.collections import LineCollection + + if hull.points.shape[1] != 2: + raise ValueError("Convex hull is not 2-D") + + ax.plot(hull.points[:,0], hull.points[:,1], 'o') + line_segments = [hull.points[simplex] for simplex in hull.simplices] + ax.add_collection(LineCollection(line_segments, + colors='k', + linestyle='solid')) + _adjust_bounds(ax, hull.points) + + return ax.figure + + +@_held_figure +def voronoi_plot_2d(vor, ax=None, **kw): + """ + Plot the given Voronoi diagram in 2-D + + Parameters + ---------- + vor : scipy.spatial.Voronoi instance + Diagram to plot + ax : matplotlib.axes.Axes instance, optional + Axes to plot on + show_points: bool, optional + Add the Voronoi points to the plot. + show_vertices : bool, optional + Add the Voronoi vertices to the plot. + line_colors : string, optional + Specifies the line color for polygon boundaries + line_width : float, optional + Specifies the line width for polygon boundaries + line_alpha: float, optional + Specifies the line alpha for polygon boundaries + point_size: float, optional + Specifies the size of points + + + Returns + ------- + fig : matplotlib.figure.Figure instance + Figure for the plot + + See Also + -------- + Voronoi + + Notes + ----- + Requires Matplotlib. + + Examples + -------- + Set of point: + + >>> import matplotlib.pyplot as plt + >>> points = np.random.rand(10,2) #random + + Voronoi diagram of the points: + + >>> from scipy.spatial import Voronoi, voronoi_plot_2d + >>> vor = Voronoi(points) + + using `voronoi_plot_2d` for visualisation: + + >>> fig = voronoi_plot_2d(vor) + + using `voronoi_plot_2d` for visualisation with enhancements: + + >>> fig = voronoi_plot_2d(vor, show_vertices=False, line_colors='orange', + ... line_width=2, line_alpha=0.6, point_size=2) + >>> plt.show() + + """ + from matplotlib.collections import LineCollection + + if vor.points.shape[1] != 2: + raise ValueError("Voronoi diagram is not 2-D") + + if kw.get('show_points', True): + point_size = kw.get('point_size', None) + ax.plot(vor.points[:,0], vor.points[:,1], '.', markersize=point_size) + if kw.get('show_vertices', True): + ax.plot(vor.vertices[:,0], vor.vertices[:,1], 'o') + + line_colors = kw.get('line_colors', 'k') + line_width = kw.get('line_width', 1.0) + line_alpha = kw.get('line_alpha', 1.0) + + center = vor.points.mean(axis=0) + ptp_bound = vor.points.ptp(axis=0) + + finite_segments = [] + infinite_segments = [] + for pointidx, simplex in zip(vor.ridge_points, vor.ridge_vertices): + simplex = np.asarray(simplex) + if np.all(simplex >= 0): + finite_segments.append(vor.vertices[simplex]) + else: + i = simplex[simplex >= 0][0] # finite end Voronoi vertex + + t = vor.points[pointidx[1]] - vor.points[pointidx[0]] # tangent + t /= np.linalg.norm(t) + n = np.array([-t[1], t[0]]) # normal + + midpoint = vor.points[pointidx].mean(axis=0) + direction = np.sign(np.dot(midpoint - center, n)) * n + far_point = vor.vertices[i] + direction * ptp_bound.max() + + infinite_segments.append([vor.vertices[i], far_point]) + + ax.add_collection(LineCollection(finite_segments, + colors=line_colors, + lw=line_width, + alpha=line_alpha, + linestyle='solid')) + ax.add_collection(LineCollection(infinite_segments, + colors=line_colors, + lw=line_width, + alpha=line_alpha, + linestyle='dashed')) + + _adjust_bounds(ax, vor.points) + + return ax.figure diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/_plotutils.pyc b/project/venv/lib/python2.7/site-packages/scipy/spatial/_plotutils.pyc new file mode 100644 index 0000000..97343fd Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/spatial/_plotutils.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/_procrustes.py b/project/venv/lib/python2.7/site-packages/scipy/spatial/_procrustes.py new file mode 100644 index 0000000..e87ee11 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/_procrustes.py @@ -0,0 +1,133 @@ +""" +This module provides functions to perform full Procrustes analysis. + +This code was originally written by Justin Kucynski and ported over from +scikit-bio by Yoshiki Vazquez-Baeza. +""" + +from __future__ import absolute_import, division, print_function + +import numpy as np +from scipy.linalg import orthogonal_procrustes + + +__all__ = ['procrustes'] + + +def procrustes(data1, data2): + r"""Procrustes analysis, a similarity test for two data sets. + + Each input matrix is a set of points or vectors (the rows of the matrix). + The dimension of the space is the number of columns of each matrix. Given + two identically sized matrices, procrustes standardizes both such that: + + - :math:`tr(AA^{T}) = 1`. + + - Both sets of points are centered around the origin. + + Procrustes ([1]_, [2]_) then applies the optimal transform to the second + matrix (including scaling/dilation, rotations, and reflections) to minimize + :math:`M^{2}=\sum(data1-data2)^{2}`, or the sum of the squares of the + pointwise differences between the two input datasets. + + This function was not designed to handle datasets with different numbers of + datapoints (rows). If two data sets have different dimensionality + (different number of columns), simply add columns of zeros to the smaller + of the two. + + Parameters + ---------- + data1 : array_like + Matrix, n rows represent points in k (columns) space `data1` is the + reference data, after it is standardised, the data from `data2` will be + transformed to fit the pattern in `data1` (must have >1 unique points). + data2 : array_like + n rows of data in k space to be fit to `data1`. Must be the same + shape ``(numrows, numcols)`` as data1 (must have >1 unique points). + + Returns + ------- + mtx1 : array_like + A standardized version of `data1`. + mtx2 : array_like + The orientation of `data2` that best fits `data1`. Centered, but not + necessarily :math:`tr(AA^{T}) = 1`. + disparity : float + :math:`M^{2}` as defined above. + + Raises + ------ + ValueError + If the input arrays are not two-dimensional. + If the shape of the input arrays is different. + If the input arrays have zero columns or zero rows. + + See Also + -------- + scipy.linalg.orthogonal_procrustes + scipy.spatial.distance.directed_hausdorff : Another similarity test + for two data sets + + Notes + ----- + - The disparity should not depend on the order of the input matrices, but + the output matrices will, as only the first output matrix is guaranteed + to be scaled such that :math:`tr(AA^{T}) = 1`. + + - Duplicate data points are generally ok, duplicating a data point will + increase its effect on the procrustes fit. + + - The disparity scales as the number of points per input matrix. + + References + ---------- + .. [1] Krzanowski, W. J. (2000). "Principles of Multivariate analysis". + .. [2] Gower, J. C. (1975). "Generalized procrustes analysis". + + Examples + -------- + >>> from scipy.spatial import procrustes + + The matrix ``b`` is a rotated, shifted, scaled and mirrored version of + ``a`` here: + + >>> a = np.array([[1, 3], [1, 2], [1, 1], [2, 1]], 'd') + >>> b = np.array([[4, -2], [4, -4], [4, -6], [2, -6]], 'd') + >>> mtx1, mtx2, disparity = procrustes(a, b) + >>> round(disparity) + 0.0 + + """ + mtx1 = np.array(data1, dtype=np.double, copy=True) + mtx2 = np.array(data2, dtype=np.double, copy=True) + + if mtx1.ndim != 2 or mtx2.ndim != 2: + raise ValueError("Input matrices must be two-dimensional") + if mtx1.shape != mtx2.shape: + raise ValueError("Input matrices must be of same shape") + if mtx1.size == 0: + raise ValueError("Input matrices must be >0 rows and >0 cols") + + # translate all the data to the origin + mtx1 -= np.mean(mtx1, 0) + mtx2 -= np.mean(mtx2, 0) + + norm1 = np.linalg.norm(mtx1) + norm2 = np.linalg.norm(mtx2) + + if norm1 == 0 or norm2 == 0: + raise ValueError("Input matrices must contain >1 unique points") + + # change scaling of data (in rows) such that trace(mtx*mtx') = 1 + mtx1 /= norm1 + mtx2 /= norm2 + + # transform mtx2 to minimize disparity + R, s = orthogonal_procrustes(mtx1, mtx2) + mtx2 = np.dot(mtx2, R.T) * s + + # measure the dissimilarity between the two datasets + disparity = np.sum(np.square(mtx1 - mtx2)) + + return mtx1, mtx2, disparity + diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/_procrustes.pyc b/project/venv/lib/python2.7/site-packages/scipy/spatial/_procrustes.pyc new file mode 100644 index 0000000..e603034 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/spatial/_procrustes.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/_spherical_voronoi.py b/project/venv/lib/python2.7/site-packages/scipy/spatial/_spherical_voronoi.py new file mode 100644 index 0000000..661796c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/_spherical_voronoi.py @@ -0,0 +1,337 @@ +""" +Spherical Voronoi Code + +.. versionadded:: 0.18.0 + +""" +# +# Copyright (C) Tyler Reddy, Ross Hemsley, Edd Edmondson, +# Nikolai Nowaczyk, Joe Pitt-Francis, 2015. +# +# Distributed under the same BSD license as Scipy. +# + +import numpy as np +import scipy +import itertools +from . import _voronoi +from scipy.spatial.distance import pdist + +__all__ = ['SphericalVoronoi'] + +def sphere_check(points, radius, center): + """ Determines distance of generators from theoretical sphere + surface. + + """ + actual_squared_radii = (((points[...,0] - center[0]) ** 2) + + ((points[...,1] - center[1]) ** 2) + + ((points[...,2] - center[2]) ** 2)) + max_discrepancy = (np.sqrt(actual_squared_radii) - radius).max() + return abs(max_discrepancy) + +def calc_circumcenters(tetrahedrons): + """ Calculates the cirumcenters of the circumspheres of tetrahedrons. + + An implementation based on + http://mathworld.wolfram.com/Circumsphere.html + + Parameters + ---------- + tetrahedrons : an array of shape (N, 4, 3) + consisting of N tetrahedrons defined by 4 points in 3D + + Returns + ---------- + circumcenters : an array of shape (N, 3) + consisting of the N circumcenters of the tetrahedrons in 3D + + """ + + num = tetrahedrons.shape[0] + a = np.concatenate((tetrahedrons, np.ones((num, 4, 1))), axis=2) + + sums = np.sum(tetrahedrons ** 2, axis=2) + d = np.concatenate((sums[:, :, np.newaxis], a), axis=2) + + dx = np.delete(d, 1, axis=2) + dy = np.delete(d, 2, axis=2) + dz = np.delete(d, 3, axis=2) + + dx = np.linalg.det(dx) + dy = -np.linalg.det(dy) + dz = np.linalg.det(dz) + a = np.linalg.det(a) + + nominator = np.vstack((dx, dy, dz)) + denominator = 2*a + return (nominator / denominator).T + + +def project_to_sphere(points, center, radius): + """ + Projects the elements of points onto the sphere defined + by center and radius. + + Parameters + ---------- + points : array of floats of shape (npoints, ndim) + consisting of the points in a space of dimension ndim + center : array of floats of shape (ndim,) + the center of the sphere to project on + radius : float + the radius of the sphere to project on + + returns: array of floats of shape (npoints, ndim) + the points projected onto the sphere + """ + + lengths = scipy.spatial.distance.cdist(points, np.array([center])) + return (points - center) / lengths * radius + center + + +class SphericalVoronoi: + """ Voronoi diagrams on the surface of a sphere. + + .. versionadded:: 0.18.0 + + Parameters + ---------- + points : ndarray of floats, shape (npoints, 3) + Coordinates of points to construct a spherical + Voronoi diagram from + radius : float, optional + Radius of the sphere (Default: 1) + center : ndarray of floats, shape (3,) + Center of sphere (Default: origin) + threshold : float + Threshold for detecting duplicate points and + mismatches between points and sphere parameters. + (Default: 1e-06) + + Attributes + ---------- + points : double array of shape (npoints, 3) + the points in 3D to generate the Voronoi diagram from + radius : double + radius of the sphere + Default: None (forces estimation, which is less precise) + center : double array of shape (3,) + center of the sphere + Default: None (assumes sphere is centered at origin) + vertices : double array of shape (nvertices, 3) + Voronoi vertices corresponding to points + regions : list of list of integers of shape (npoints, _ ) + the n-th entry is a list consisting of the indices + of the vertices belonging to the n-th point in points + + Raises + ------ + ValueError + If there are duplicates in `points`. + If the provided `radius` is not consistent with `points`. + + Notes + ---------- + The spherical Voronoi diagram algorithm proceeds as follows. The Convex + Hull of the input points (generators) is calculated, and is equivalent to + their Delaunay triangulation on the surface of the sphere [Caroli]_. + A 3D Delaunay tetrahedralization is obtained by including the origin of + the coordinate system as the fourth vertex of each simplex of the Convex + Hull. The circumcenters of all tetrahedra in the system are calculated and + projected to the surface of the sphere, producing the Voronoi vertices. + The Delaunay tetrahedralization neighbour information is then used to + order the Voronoi region vertices around each generator. The latter + approach is substantially less sensitive to floating point issues than + angle-based methods of Voronoi region vertex sorting. + + The surface area of spherical polygons is calculated by decomposing them + into triangles and using L'Huilier's Theorem to calculate the spherical + excess of each triangle [Weisstein]_. The sum of the spherical excesses is + multiplied by the square of the sphere radius to obtain the surface area + of the spherical polygon. For nearly-degenerate spherical polygons an area + of approximately 0 is returned by default, rather than attempting the + unstable calculation. + + Empirical assessment of spherical Voronoi algorithm performance suggests + quadratic time complexity (loglinear is optimal, but algorithms are more + challenging to implement). The reconstitution of the surface area of the + sphere, measured as the sum of the surface areas of all Voronoi regions, + is closest to 100 % for larger (>> 10) numbers of generators. + + References + ---------- + + .. [Caroli] Caroli et al. Robust and Efficient Delaunay triangulations of + points on or close to a sphere. Research Report RR-7004, 2009. + .. [Weisstein] "L'Huilier's Theorem." From MathWorld -- A Wolfram Web + Resource. http://mathworld.wolfram.com/LHuiliersTheorem.html + + See Also + -------- + Voronoi : Conventional Voronoi diagrams in N dimensions. + + Examples + -------- + + >>> from matplotlib import colors + >>> from mpl_toolkits.mplot3d.art3d import Poly3DCollection + >>> import matplotlib.pyplot as plt + >>> from scipy.spatial import SphericalVoronoi + >>> from mpl_toolkits.mplot3d import proj3d + >>> # set input data + >>> points = np.array([[0, 0, 1], [0, 0, -1], [1, 0, 0], + ... [0, 1, 0], [0, -1, 0], [-1, 0, 0], ]) + >>> center = np.array([0, 0, 0]) + >>> radius = 1 + >>> # calculate spherical Voronoi diagram + >>> sv = SphericalVoronoi(points, radius, center) + >>> # sort vertices (optional, helpful for plotting) + >>> sv.sort_vertices_of_regions() + >>> # generate plot + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111, projection='3d') + >>> # plot the unit sphere for reference (optional) + >>> u = np.linspace(0, 2 * np.pi, 100) + >>> v = np.linspace(0, np.pi, 100) + >>> x = np.outer(np.cos(u), np.sin(v)) + >>> y = np.outer(np.sin(u), np.sin(v)) + >>> z = np.outer(np.ones(np.size(u)), np.cos(v)) + >>> ax.plot_surface(x, y, z, color='y', alpha=0.1) + >>> # plot generator points + >>> ax.scatter(points[:, 0], points[:, 1], points[:, 2], c='b') + >>> # plot Voronoi vertices + >>> ax.scatter(sv.vertices[:, 0], sv.vertices[:, 1], sv.vertices[:, 2], + ... c='g') + >>> # indicate Voronoi regions (as Euclidean polygons) + >>> for region in sv.regions: + ... random_color = colors.rgb2hex(np.random.rand(3)) + ... polygon = Poly3DCollection([sv.vertices[region]], alpha=1.0) + ... polygon.set_color(random_color) + ... ax.add_collection3d(polygon) + >>> plt.show() + + """ + + def __init__(self, points, radius=None, center=None, threshold=1e-06): + """ + Initializes the object and starts the computation of the Voronoi + diagram. + + points : The generator points of the Voronoi diagram assumed to be + all on the sphere with radius supplied by the radius parameter and + center supplied by the center parameter. + radius : The radius of the sphere. Will default to 1 if not supplied. + center : The center of the sphere. Will default to the origin if not + supplied. + """ + + self.points = points + if np.any(center): + self.center = center + else: + self.center = np.zeros(3) + if radius: + self.radius = radius + else: + self.radius = 1 + + if pdist(self.points).min() <= threshold * self.radius: + raise ValueError("Duplicate generators present.") + + max_discrepancy = sphere_check(self.points, + self.radius, + self.center) + if max_discrepancy >= threshold * self.radius: + raise ValueError("Radius inconsistent with generators.") + self.vertices = None + self.regions = None + self._tri = None + self._calc_vertices_regions() + + def _calc_vertices_regions(self): + """ + Calculates the Voronoi vertices and regions of the generators stored + in self.points. The vertices will be stored in self.vertices and the + regions in self.regions. + + This algorithm was discussed at PyData London 2015 by + Tyler Reddy, Ross Hemsley and Nikolai Nowaczyk + """ + + # perform 3D Delaunay triangulation on data set + # (here ConvexHull can also be used, and is faster) + self._tri = scipy.spatial.ConvexHull(self.points) + + # add the center to each of the simplices in tri to get the same + # tetrahedrons we'd have gotten from Delaunay tetrahedralization + # tetrahedrons will have shape: (2N-4, 4, 3) + tetrahedrons = self._tri.points[self._tri.simplices] + tetrahedrons = np.insert( + tetrahedrons, + 3, + np.array([self.center]), + axis=1 + ) + + # produce circumcenters of tetrahedrons from 3D Delaunay + # circumcenters will have shape: (2N-4, 3) + circumcenters = calc_circumcenters(tetrahedrons) + + # project tetrahedron circumcenters to the surface of the sphere + # self.vertices will have shape: (2N-4, 3) + self.vertices = project_to_sphere( + circumcenters, + self.center, + self.radius + ) + + # calculate regions from triangulation + # simplex_indices will have shape: (2N-4,) + simplex_indices = np.arange(self._tri.simplices.shape[0]) + # tri_indices will have shape: (6N-12,) + tri_indices = np.column_stack([simplex_indices, simplex_indices, + simplex_indices]).ravel() + # point_indices will have shape: (6N-12,) + point_indices = self._tri.simplices.ravel() + + # array_associations will have shape: (6N-12, 2) + array_associations = np.dstack((point_indices, tri_indices))[0] + array_associations = array_associations[np.lexsort(( + array_associations[...,1], + array_associations[...,0]))] + array_associations = array_associations.astype(np.intp) + + # group by generator indices to produce + # unsorted regions in nested list + groups = [] + for k, g in itertools.groupby(array_associations, + lambda t: t[0]): + groups.append(list(list(zip(*list(g)))[1])) + + self.regions = groups + + def sort_vertices_of_regions(self): + """ + For each region in regions, it sorts the indices of the Voronoi + vertices such that the resulting points are in a clockwise or + counterclockwise order around the generator point. + + This is done as follows: Recall that the n-th region in regions + surrounds the n-th generator in points and that the k-th + Voronoi vertex in vertices is the projected circumcenter of the + tetrahedron obtained by the k-th triangle in _tri.simplices (and the + origin). For each region n, we choose the first triangle (=Voronoi + vertex) in _tri.simplices and a vertex of that triangle not equal to + the center n. These determine a unique neighbor of that triangle, + which is then chosen as the second triangle. The second triangle + will have a unique vertex not equal to the current vertex or the + center. This determines a unique neighbor of the second triangle, + which is then chosen as the third triangle and so forth. We proceed + through all the triangles (=Voronoi vertices) belonging to the + generator in points and obtain a sorted version of the vertices + of its surrounding region. + """ + + _voronoi.sort_vertices_of_regions(self._tri.simplices, + self.regions) diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/_spherical_voronoi.pyc b/project/venv/lib/python2.7/site-packages/scipy/spatial/_spherical_voronoi.pyc new file mode 100644 index 0000000..9fa952b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/spatial/_spherical_voronoi.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/_voronoi.so b/project/venv/lib/python2.7/site-packages/scipy/spatial/_voronoi.so new file mode 100755 index 0000000..ed84968 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/spatial/_voronoi.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/ckdtree.so b/project/venv/lib/python2.7/site-packages/scipy/spatial/ckdtree.so new file mode 100755 index 0000000..cbca4a3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/spatial/ckdtree.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/distance.py b/project/venv/lib/python2.7/site-packages/scipy/spatial/distance.py new file mode 100644 index 0000000..ab188f6 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/distance.py @@ -0,0 +1,2774 @@ +""" +===================================================== +Distance computations (:mod:`scipy.spatial.distance`) +===================================================== + +.. sectionauthor:: Damian Eads + +Function Reference +------------------ + +Distance matrix computation from a collection of raw observation vectors +stored in a rectangular array. + +.. autosummary:: + :toctree: generated/ + + pdist -- pairwise distances between observation vectors. + cdist -- distances between two collections of observation vectors + squareform -- convert distance matrix to a condensed one and vice versa + directed_hausdorff -- directed Hausdorff distance between arrays + +Predicates for checking the validity of distance matrices, both +condensed and redundant. Also contained in this module are functions +for computing the number of observations in a distance matrix. + +.. autosummary:: + :toctree: generated/ + + is_valid_dm -- checks for a valid distance matrix + is_valid_y -- checks for a valid condensed distance matrix + num_obs_dm -- # of observations in a distance matrix + num_obs_y -- # of observations in a condensed distance matrix + +Distance functions between two numeric vectors ``u`` and ``v``. Computing +distances over a large collection of vectors is inefficient for these +functions. Use ``pdist`` for this purpose. + +.. autosummary:: + :toctree: generated/ + + braycurtis -- the Bray-Curtis distance. + canberra -- the Canberra distance. + chebyshev -- the Chebyshev distance. + cityblock -- the Manhattan distance. + correlation -- the Correlation distance. + cosine -- the Cosine distance. + euclidean -- the Euclidean distance. + jensenshannon -- the Jensen-Shannon distance. + mahalanobis -- the Mahalanobis distance. + minkowski -- the Minkowski distance. + seuclidean -- the normalized Euclidean distance. + sqeuclidean -- the squared Euclidean distance. + wminkowski -- (deprecated) alias of `minkowski`. + +Distance functions between two boolean vectors (representing sets) ``u`` and +``v``. As in the case of numerical vectors, ``pdist`` is more efficient for +computing the distances between all pairs. + +.. autosummary:: + :toctree: generated/ + + dice -- the Dice dissimilarity. + hamming -- the Hamming distance. + jaccard -- the Jaccard distance. + kulsinski -- the Kulsinski distance. + rogerstanimoto -- the Rogers-Tanimoto dissimilarity. + russellrao -- the Russell-Rao dissimilarity. + sokalmichener -- the Sokal-Michener dissimilarity. + sokalsneath -- the Sokal-Sneath dissimilarity. + yule -- the Yule dissimilarity. + +:func:`hamming` also operates over discrete numerical vectors. +""" + +# Copyright (C) Damian Eads, 2007-2008. New BSD License. + +from __future__ import division, print_function, absolute_import + +__all__ = [ + 'braycurtis', + 'canberra', + 'cdist', + 'chebyshev', + 'cityblock', + 'correlation', + 'cosine', + 'dice', + 'directed_hausdorff', + 'euclidean', + 'hamming', + 'is_valid_dm', + 'is_valid_y', + 'jaccard', + 'jensenshannon', + 'kulsinski', + 'mahalanobis', + 'matching', + 'minkowski', + 'num_obs_dm', + 'num_obs_y', + 'pdist', + 'rogerstanimoto', + 'russellrao', + 'seuclidean', + 'sokalmichener', + 'sokalsneath', + 'sqeuclidean', + 'squareform', + 'wminkowski', + 'yule' +] + + +import warnings +import numpy as np + +from functools import partial +from collections import namedtuple +from scipy._lib.six import callable, string_types +from scipy._lib.six import xrange +from scipy._lib._util import _asarray_validated + +from . import _distance_wrap +from . import _hausdorff +from ..linalg import norm +from ..special import rel_entr + + +def _args_to_kwargs_xdist(args, kwargs, metric, func_name): + """ + Convert legacy positional arguments to keyword arguments for pdist/cdist. + """ + if not args: + return kwargs + + if (callable(metric) and metric not in [ + braycurtis, canberra, chebyshev, cityblock, correlation, cosine, + dice, euclidean, hamming, jaccard, jensenshannon, kulsinski, + mahalanobis, matching, minkowski, rogerstanimoto, russellrao, + seuclidean, sokalmichener, sokalsneath, sqeuclidean, yule, + wminkowski]): + raise TypeError('When using a custom metric arguments must be passed' + 'as keyword (i.e., ARGNAME=ARGVALUE)') + + if func_name == 'pdist': + old_arg_names = ['p', 'w', 'V', 'VI'] + else: + old_arg_names = ['p', 'V', 'VI', 'w'] + + num_args = len(args) + warnings.warn('%d metric parameters have been passed as positional.' + 'This will raise an error in a future version.' + 'Please pass arguments as keywords(i.e., ARGNAME=ARGVALUE)' + % num_args, DeprecationWarning) + + if num_args > 4: + raise ValueError('Deprecated %s signature accepts only 4' + 'positional arguments (%s), %d given.' + % (func_name, ', '.join(old_arg_names), num_args)) + + for old_arg, arg in zip(old_arg_names, args): + if old_arg in kwargs: + raise TypeError('%s() got multiple values for argument %s' + % (func_name, old_arg)) + kwargs[old_arg] = arg + return kwargs + + +def _copy_array_if_base_present(a): + """Copy the array if its base points to a parent array.""" + if a.base is not None: + return a.copy() + return a + + +def _correlation_cdist_wrap(XA, XB, dm, **kwargs): + XA = XA - XA.mean(axis=1, keepdims=True) + XB = XB - XB.mean(axis=1, keepdims=True) + _distance_wrap.cdist_cosine_double_wrap(XA, XB, dm, **kwargs) + + +def _correlation_pdist_wrap(X, dm, **kwargs): + X2 = X - X.mean(axis=1, keepdims=True) + _distance_wrap.pdist_cosine_double_wrap(X2, dm, **kwargs) + + +def _convert_to_type(X, out_type): + return np.ascontiguousarray(X, dtype=out_type) + + +def _filter_deprecated_kwargs(kwargs, args_blacklist): + # Filtering out old default keywords + for k in args_blacklist: + if k in kwargs: + del kwargs[k] + warnings.warn('Got unexpected kwarg %s. This will raise an error' + ' in a future version.' % k, DeprecationWarning) + + +def _nbool_correspond_all(u, v, w=None): + if u.dtype == v.dtype == bool and w is None: + not_u = ~u + not_v = ~v + nff = (not_u & not_v).sum() + nft = (not_u & v).sum() + ntf = (u & not_v).sum() + ntt = (u & v).sum() + else: + dtype = np.find_common_type([int], [u.dtype, v.dtype]) + u = u.astype(dtype) + v = v.astype(dtype) + not_u = 1.0 - u + not_v = 1.0 - v + if w is not None: + not_u = w * not_u + u = w * u + nff = (not_u * not_v).sum() + nft = (not_u * v).sum() + ntf = (u * not_v).sum() + ntt = (u * v).sum() + return (nff, nft, ntf, ntt) + + +def _nbool_correspond_ft_tf(u, v, w=None): + if u.dtype == v.dtype == bool and w is None: + not_u = ~u + not_v = ~v + nft = (not_u & v).sum() + ntf = (u & not_v).sum() + else: + dtype = np.find_common_type([int], [u.dtype, v.dtype]) + u = u.astype(dtype) + v = v.astype(dtype) + not_u = 1.0 - u + not_v = 1.0 - v + if w is not None: + not_u = w * not_u + u = w * u + nft = (not_u * v).sum() + ntf = (u * not_v).sum() + return (nft, ntf) + + +def _validate_cdist_input(XA, XB, mA, mB, n, metric_name, **kwargs): + if metric_name is not None: + # get supported types + types = _METRICS[metric_name].types + # choose best type + typ = types[types.index(XA.dtype)] if XA.dtype in types else types[0] + # validate data + XA = _convert_to_type(XA, out_type=typ) + XB = _convert_to_type(XB, out_type=typ) + + # validate kwargs + _validate_kwargs = _METRICS[metric_name].validator + if _validate_kwargs: + kwargs = _validate_kwargs(np.vstack([XA, XB]), mA + mB, n, **kwargs) + else: + typ = None + return XA, XB, typ, kwargs + + +def _validate_mahalanobis_kwargs(X, m, n, **kwargs): + VI = kwargs.pop('VI', None) + if VI is None: + if m <= n: + # There are fewer observations than the dimension of + # the observations. + raise ValueError("The number of observations (%d) is too " + "small; the covariance matrix is " + "singular. For observations with %d " + "dimensions, at least %d observations " + "are required." % (m, n, n + 1)) + CV = np.atleast_2d(np.cov(X.astype(np.double).T)) + VI = np.linalg.inv(CV).T.copy() + kwargs["VI"] = _convert_to_double(VI) + return kwargs + + +def _validate_minkowski_kwargs(X, m, n, **kwargs): + if 'p' not in kwargs: + kwargs['p'] = 2. + return kwargs + + +def _validate_pdist_input(X, m, n, metric_name, **kwargs): + if metric_name is not None: + # get supported types + types = _METRICS[metric_name].types + # choose best type + typ = types[types.index(X.dtype)] if X.dtype in types else types[0] + # validate data + X = _convert_to_type(X, out_type=typ) + + # validate kwargs + _validate_kwargs = _METRICS[metric_name].validator + if _validate_kwargs: + kwargs = _validate_kwargs(X, m, n, **kwargs) + else: + typ = None + return X, typ, kwargs + + +def _validate_seuclidean_kwargs(X, m, n, **kwargs): + V = kwargs.pop('V', None) + if V is None: + V = np.var(X.astype(np.double), axis=0, ddof=1) + else: + V = np.asarray(V, order='c') + if V.dtype != np.double: + raise TypeError('Variance vector V must contain doubles.') + if len(V.shape) != 1: + raise ValueError('Variance vector V must ' + 'be one-dimensional.') + if V.shape[0] != n: + raise ValueError('Variance vector V must be of the same ' + 'dimension as the vectors on which the distances ' + 'are computed.') + kwargs['V'] = _convert_to_double(V) + return kwargs + + +def _validate_vector(u, dtype=None): + # XXX Is order='c' really necessary? + u = np.asarray(u, dtype=dtype, order='c').squeeze() + # Ensure values such as u=1 and u=[1] still return 1-D arrays. + u = np.atleast_1d(u) + if u.ndim > 1: + raise ValueError("Input vector should be 1-D.") + return u + + +def _validate_weights(w, dtype=np.double): + w = _validate_vector(w, dtype=dtype) + if np.any(w < 0): + raise ValueError("Input weights should be all non-negative") + return w + + +def _validate_wminkowski_kwargs(X, m, n, **kwargs): + w = kwargs.pop('w', None) + if w is None: + raise ValueError('weighted minkowski requires a weight ' + 'vector `w` to be given.') + kwargs['w'] = _validate_weights(w) + if 'p' not in kwargs: + kwargs['p'] = 2. + return kwargs + + +def directed_hausdorff(u, v, seed=0): + """ + Compute the directed Hausdorff distance between two N-D arrays. + + Distances between pairs are calculated using a Euclidean metric. + + Parameters + ---------- + u : (M,N) ndarray + Input array. + v : (O,N) ndarray + Input array. + seed : int or None + Local `np.random.RandomState` seed. Default is 0, a random shuffling of + u and v that guarantees reproducibility. + + Returns + ------- + d : double + The directed Hausdorff distance between arrays `u` and `v`, + + index_1 : int + index of point contributing to Hausdorff pair in `u` + + index_2 : int + index of point contributing to Hausdorff pair in `v` + + Notes + ----- + Uses the early break technique and the random sampling approach + described by [1]_. Although worst-case performance is ``O(m * o)`` + (as with the brute force algorithm), this is unlikely in practice + as the input data would have to require the algorithm to explore + every single point interaction, and after the algorithm shuffles + the input points at that. The best case performance is O(m), which + is satisfied by selecting an inner loop distance that is less than + cmax and leads to an early break as often as possible. The authors + have formally shown that the average runtime is closer to O(m). + + .. versionadded:: 0.19.0 + + References + ---------- + .. [1] A. A. Taha and A. Hanbury, "An efficient algorithm for + calculating the exact Hausdorff distance." IEEE Transactions On + Pattern Analysis And Machine Intelligence, vol. 37 pp. 2153-63, + 2015. + + See Also + -------- + scipy.spatial.procrustes : Another similarity test for two data sets + + Examples + -------- + Find the directed Hausdorff distance between two 2-D arrays of + coordinates: + + >>> from scipy.spatial.distance import directed_hausdorff + >>> u = np.array([(1.0, 0.0), + ... (0.0, 1.0), + ... (-1.0, 0.0), + ... (0.0, -1.0)]) + >>> v = np.array([(2.0, 0.0), + ... (0.0, 2.0), + ... (-2.0, 0.0), + ... (0.0, -4.0)]) + + >>> directed_hausdorff(u, v)[0] + 2.23606797749979 + >>> directed_hausdorff(v, u)[0] + 3.0 + + Find the general (symmetric) Hausdorff distance between two 2-D + arrays of coordinates: + + >>> max(directed_hausdorff(u, v)[0], directed_hausdorff(v, u)[0]) + 3.0 + + Find the indices of the points that generate the Hausdorff distance + (the Hausdorff pair): + + >>> directed_hausdorff(v, u)[1:] + (3, 3) + + """ + u = np.asarray(u, dtype=np.float64, order='c') + v = np.asarray(v, dtype=np.float64, order='c') + result = _hausdorff.directed_hausdorff(u, v, seed) + return result + + +def minkowski(u, v, p=2, w=None): + """ + Compute the Minkowski distance between two 1-D arrays. + + The Minkowski distance between 1-D arrays `u` and `v`, + is defined as + + .. math:: + + {||u-v||}_p = (\\sum{|u_i - v_i|^p})^{1/p}. + + + \\left(\\sum{w_i(|(u_i - v_i)|^p)}\\right)^{1/p}. + + Parameters + ---------- + u : (N,) array_like + Input array. + v : (N,) array_like + Input array. + p : int + The order of the norm of the difference :math:`{||u-v||}_p`. + w : (N,) array_like, optional + The weights for each value in `u` and `v`. Default is None, + which gives each value a weight of 1.0 + + Returns + ------- + minkowski : double + The Minkowski distance between vectors `u` and `v`. + + Examples + -------- + >>> from scipy.spatial import distance + >>> distance.minkowski([1, 0, 0], [0, 1, 0], 1) + 2.0 + >>> distance.minkowski([1, 0, 0], [0, 1, 0], 2) + 1.4142135623730951 + >>> distance.minkowski([1, 0, 0], [0, 1, 0], 3) + 1.2599210498948732 + >>> distance.minkowski([1, 1, 0], [0, 1, 0], 1) + 1.0 + >>> distance.minkowski([1, 1, 0], [0, 1, 0], 2) + 1.0 + >>> distance.minkowski([1, 1, 0], [0, 1, 0], 3) + 1.0 + + """ + u = _validate_vector(u) + v = _validate_vector(v) + if p < 1: + raise ValueError("p must be at least 1") + u_v = u - v + if w is not None: + w = _validate_weights(w) + if p == 1: + root_w = w + if p == 2: + # better precision and speed + root_w = np.sqrt(w) + else: + root_w = np.power(w, 1/p) + u_v = root_w * u_v + dist = norm(u_v, ord=p) + return dist + + +# `minkowski` gained weights in scipy 1.0. Once we're at say version 1.3, +# deprecated `wminkowski`. Not done at once because it would be annoying for +# downstream libraries that used `wminkowski` and support multiple scipy +# versions. +def wminkowski(u, v, p, w): + """ + Compute the weighted Minkowski distance between two 1-D arrays. + + The weighted Minkowski distance between `u` and `v`, defined as + + .. math:: + + \\left(\\sum{(|w_i (u_i - v_i)|^p)}\\right)^{1/p}. + + Parameters + ---------- + u : (N,) array_like + Input array. + v : (N,) array_like + Input array. + p : int + The order of the norm of the difference :math:`{||u-v||}_p`. + w : (N,) array_like + The weight vector. + + Returns + ------- + wminkowski : double + The weighted Minkowski distance between vectors `u` and `v`. + + Notes + ----- + `wminkowski` is DEPRECATED. It implements a definition where weights + are powered. It is recommended to use the weighted version of `minkowski` + instead. This function will be removed in a future version of scipy. + + Examples + -------- + >>> from scipy.spatial import distance + >>> distance.wminkowski([1, 0, 0], [0, 1, 0], 1, np.ones(3)) + 2.0 + >>> distance.wminkowski([1, 0, 0], [0, 1, 0], 2, np.ones(3)) + 1.4142135623730951 + >>> distance.wminkowski([1, 0, 0], [0, 1, 0], 3, np.ones(3)) + 1.2599210498948732 + >>> distance.wminkowski([1, 1, 0], [0, 1, 0], 1, np.ones(3)) + 1.0 + >>> distance.wminkowski([1, 1, 0], [0, 1, 0], 2, np.ones(3)) + 1.0 + >>> distance.wminkowski([1, 1, 0], [0, 1, 0], 3, np.ones(3)) + 1.0 + + """ + w = _validate_weights(w) + return minkowski(u, v, p=p, w=w**p) + + +def euclidean(u, v, w=None): + """ + Computes the Euclidean distance between two 1-D arrays. + + The Euclidean distance between 1-D arrays `u` and `v`, is defined as + + .. math:: + + {||u-v||}_2 + + \\left(\\sum{(w_i |(u_i - v_i)|^2)}\\right)^{1/2} + + Parameters + ---------- + u : (N,) array_like + Input array. + v : (N,) array_like + Input array. + w : (N,) array_like, optional + The weights for each value in `u` and `v`. Default is None, + which gives each value a weight of 1.0 + + Returns + ------- + euclidean : double + The Euclidean distance between vectors `u` and `v`. + + Examples + -------- + >>> from scipy.spatial import distance + >>> distance.euclidean([1, 0, 0], [0, 1, 0]) + 1.4142135623730951 + >>> distance.euclidean([1, 1, 0], [0, 1, 0]) + 1.0 + + """ + return minkowski(u, v, p=2, w=w) + + +def sqeuclidean(u, v, w=None): + """ + Compute the squared Euclidean distance between two 1-D arrays. + + The squared Euclidean distance between `u` and `v` is defined as + + .. math:: + + {||u-v||}_2^2 + + \\left(\\sum{(w_i |(u_i - v_i)|^2)}\\right) + + Parameters + ---------- + u : (N,) array_like + Input array. + v : (N,) array_like + Input array. + w : (N,) array_like, optional + The weights for each value in `u` and `v`. Default is None, + which gives each value a weight of 1.0 + + Returns + ------- + sqeuclidean : double + The squared Euclidean distance between vectors `u` and `v`. + + Examples + -------- + >>> from scipy.spatial import distance + >>> distance.sqeuclidean([1, 0, 0], [0, 1, 0]) + 2.0 + >>> distance.sqeuclidean([1, 1, 0], [0, 1, 0]) + 1.0 + + """ + # Preserve float dtypes, but convert everything else to np.float64 + # for stability. + utype, vtype = None, None + if not (hasattr(u, "dtype") and np.issubdtype(u.dtype, np.inexact)): + utype = np.float64 + if not (hasattr(v, "dtype") and np.issubdtype(v.dtype, np.inexact)): + vtype = np.float64 + + u = _validate_vector(u, dtype=utype) + v = _validate_vector(v, dtype=vtype) + u_v = u - v + u_v_w = u_v # only want weights applied once + if w is not None: + w = _validate_weights(w) + u_v_w = w * u_v + return np.dot(u_v, u_v_w) + + +def correlation(u, v, w=None, centered=True): + """ + Compute the correlation distance between two 1-D arrays. + + The correlation distance between `u` and `v`, is + defined as + + .. math:: + + 1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})} + {{||(u - \\bar{u})||}_2 {||(v - \\bar{v})||}_2} + + where :math:`\\bar{u}` is the mean of the elements of `u` + and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`. + + Parameters + ---------- + u : (N,) array_like + Input array. + v : (N,) array_like + Input array. + w : (N,) array_like, optional + The weights for each value in `u` and `v`. Default is None, + which gives each value a weight of 1.0 + + Returns + ------- + correlation : double + The correlation distance between 1-D array `u` and `v`. + + """ + u = _validate_vector(u) + v = _validate_vector(v) + if w is not None: + w = _validate_weights(w) + if centered: + umu = np.average(u, weights=w) + vmu = np.average(v, weights=w) + u = u - umu + v = v - vmu + uv = np.average(u * v, weights=w) + uu = np.average(np.square(u), weights=w) + vv = np.average(np.square(v), weights=w) + dist = 1.0 - uv / np.sqrt(uu * vv) + return dist + + +def cosine(u, v, w=None): + """ + Compute the Cosine distance between 1-D arrays. + + The Cosine distance between `u` and `v`, is defined as + + .. math:: + + 1 - \\frac{u \\cdot v} + {||u||_2 ||v||_2}. + + where :math:`u \\cdot v` is the dot product of :math:`u` and + :math:`v`. + + Parameters + ---------- + u : (N,) array_like + Input array. + v : (N,) array_like + Input array. + w : (N,) array_like, optional + The weights for each value in `u` and `v`. Default is None, + which gives each value a weight of 1.0 + + Returns + ------- + cosine : double + The Cosine distance between vectors `u` and `v`. + + Examples + -------- + >>> from scipy.spatial import distance + >>> distance.cosine([1, 0, 0], [0, 1, 0]) + 1.0 + >>> distance.cosine([100, 0, 0], [0, 1, 0]) + 1.0 + >>> distance.cosine([1, 1, 0], [0, 1, 0]) + 0.29289321881345254 + + """ + # cosine distance is also referred to as 'uncentered correlation', + # or 'reflective correlation' + return correlation(u, v, w=w, centered=False) + + +def hamming(u, v, w=None): + """ + Compute the Hamming distance between two 1-D arrays. + + The Hamming distance between 1-D arrays `u` and `v`, is simply the + proportion of disagreeing components in `u` and `v`. If `u` and `v` are + boolean vectors, the Hamming distance is + + .. math:: + + \\frac{c_{01} + c_{10}}{n} + + where :math:`c_{ij}` is the number of occurrences of + :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for + :math:`k < n`. + + Parameters + ---------- + u : (N,) array_like + Input array. + v : (N,) array_like + Input array. + w : (N,) array_like, optional + The weights for each value in `u` and `v`. Default is None, + which gives each value a weight of 1.0 + + Returns + ------- + hamming : double + The Hamming distance between vectors `u` and `v`. + + Examples + -------- + >>> from scipy.spatial import distance + >>> distance.hamming([1, 0, 0], [0, 1, 0]) + 0.66666666666666663 + >>> distance.hamming([1, 0, 0], [1, 1, 0]) + 0.33333333333333331 + >>> distance.hamming([1, 0, 0], [2, 0, 0]) + 0.33333333333333331 + >>> distance.hamming([1, 0, 0], [3, 0, 0]) + 0.33333333333333331 + + """ + u = _validate_vector(u) + v = _validate_vector(v) + if u.shape != v.shape: + raise ValueError('The 1d arrays must have equal lengths.') + u_ne_v = u != v + if w is not None: + w = _validate_weights(w) + return np.average(u_ne_v, weights=w) + + +def jaccard(u, v, w=None): + """ + Compute the Jaccard-Needham dissimilarity between two boolean 1-D arrays. + + The Jaccard-Needham dissimilarity between 1-D boolean arrays `u` and `v`, + is defined as + + .. math:: + + \\frac{c_{TF} + c_{FT}} + {c_{TT} + c_{FT} + c_{TF}} + + where :math:`c_{ij}` is the number of occurrences of + :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for + :math:`k < n`. + + Parameters + ---------- + u : (N,) array_like, bool + Input array. + v : (N,) array_like, bool + Input array. + w : (N,) array_like, optional + The weights for each value in `u` and `v`. Default is None, + which gives each value a weight of 1.0 + + Returns + ------- + jaccard : double + The Jaccard distance between vectors `u` and `v`. + + Notes + ----- + When both `u` and `v` lead to a `0/0` division i.e. there is no overlap + between the items in the vectors the returned distance is 0. See the + Wikipedia page on the Jaccard index [1]_, and this paper [2]_. + + .. versionchanged:: 1.2.0 + Previously, when `u` and `v` lead to a `0/0` division, the function + would return NaN. This was changed to return 0 instead. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Jaccard_index + .. [2] S. Kosub, "A note on the triangle inequality for the Jaccard + distance", 2016, Available online: https://arxiv.org/pdf/1612.02696.pdf + + Examples + -------- + >>> from scipy.spatial import distance + >>> distance.jaccard([1, 0, 0], [0, 1, 0]) + 1.0 + >>> distance.jaccard([1, 0, 0], [1, 1, 0]) + 0.5 + >>> distance.jaccard([1, 0, 0], [1, 2, 0]) + 0.5 + >>> distance.jaccard([1, 0, 0], [1, 1, 1]) + 0.66666666666666663 + + """ + u = _validate_vector(u) + v = _validate_vector(v) + + nonzero = np.bitwise_or(u != 0, v != 0) + unequal_nonzero = np.bitwise_and((u != v), nonzero) + if w is not None: + w = _validate_weights(w) + nonzero = w * nonzero + unequal_nonzero = w * unequal_nonzero + a = np.double(unequal_nonzero.sum()) + b = np.double(nonzero.sum()) + return (a / b) if b != 0 else 0 + + +def kulsinski(u, v, w=None): + """ + Compute the Kulsinski dissimilarity between two boolean 1-D arrays. + + The Kulsinski dissimilarity between two boolean 1-D arrays `u` and `v`, + is defined as + + .. math:: + + \\frac{c_{TF} + c_{FT} - c_{TT} + n} + {c_{FT} + c_{TF} + n} + + where :math:`c_{ij}` is the number of occurrences of + :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for + :math:`k < n`. + + Parameters + ---------- + u : (N,) array_like, bool + Input array. + v : (N,) array_like, bool + Input array. + w : (N,) array_like, optional + The weights for each value in `u` and `v`. Default is None, + which gives each value a weight of 1.0 + + Returns + ------- + kulsinski : double + The Kulsinski distance between vectors `u` and `v`. + + Examples + -------- + >>> from scipy.spatial import distance + >>> distance.kulsinski([1, 0, 0], [0, 1, 0]) + 1.0 + >>> distance.kulsinski([1, 0, 0], [1, 1, 0]) + 0.75 + >>> distance.kulsinski([1, 0, 0], [2, 1, 0]) + 0.33333333333333331 + >>> distance.kulsinski([1, 0, 0], [3, 1, 0]) + -0.5 + + """ + u = _validate_vector(u) + v = _validate_vector(v) + if w is None: + n = float(len(u)) + else: + w = _validate_weights(w) + n = w.sum() + (nff, nft, ntf, ntt) = _nbool_correspond_all(u, v, w=w) + + return (ntf + nft - ntt + n) / (ntf + nft + n) + + +def seuclidean(u, v, V): + """ + Return the standardized Euclidean distance between two 1-D arrays. + + The standardized Euclidean distance between `u` and `v`. + + Parameters + ---------- + u : (N,) array_like + Input array. + v : (N,) array_like + Input array. + V : (N,) array_like + `V` is an 1-D array of component variances. It is usually computed + among a larger collection vectors. + + Returns + ------- + seuclidean : double + The standardized Euclidean distance between vectors `u` and `v`. + + Examples + -------- + >>> from scipy.spatial import distance + >>> distance.seuclidean([1, 0, 0], [0, 1, 0], [0.1, 0.1, 0.1]) + 4.4721359549995796 + >>> distance.seuclidean([1, 0, 0], [0, 1, 0], [1, 0.1, 0.1]) + 3.3166247903553998 + >>> distance.seuclidean([1, 0, 0], [0, 1, 0], [10, 0.1, 0.1]) + 3.1780497164141406 + + """ + u = _validate_vector(u) + v = _validate_vector(v) + V = _validate_vector(V, dtype=np.float64) + if V.shape[0] != u.shape[0] or u.shape[0] != v.shape[0]: + raise TypeError('V must be a 1-D array of the same dimension ' + 'as u and v.') + return euclidean(u, v, w=1/V) + + +def cityblock(u, v, w=None): + """ + Compute the City Block (Manhattan) distance. + + Computes the Manhattan distance between two 1-D arrays `u` and `v`, + which is defined as + + .. math:: + + \\sum_i {\\left| u_i - v_i \\right|}. + + Parameters + ---------- + u : (N,) array_like + Input array. + v : (N,) array_like + Input array. + w : (N,) array_like, optional + The weights for each value in `u` and `v`. Default is None, + which gives each value a weight of 1.0 + + Returns + ------- + cityblock : double + The City Block (Manhattan) distance between vectors `u` and `v`. + + Examples + -------- + >>> from scipy.spatial import distance + >>> distance.cityblock([1, 0, 0], [0, 1, 0]) + 2 + >>> distance.cityblock([1, 0, 0], [0, 2, 0]) + 3 + >>> distance.cityblock([1, 0, 0], [1, 1, 0]) + 1 + + """ + u = _validate_vector(u) + v = _validate_vector(v) + l1_diff = abs(u - v) + if w is not None: + w = _validate_weights(w) + l1_diff = w * l1_diff + return l1_diff.sum() + + +def mahalanobis(u, v, VI): + """ + Compute the Mahalanobis distance between two 1-D arrays. + + The Mahalanobis distance between 1-D arrays `u` and `v`, is defined as + + .. math:: + + \\sqrt{ (u-v) V^{-1} (u-v)^T } + + where ``V`` is the covariance matrix. Note that the argument `VI` + is the inverse of ``V``. + + Parameters + ---------- + u : (N,) array_like + Input array. + v : (N,) array_like + Input array. + VI : ndarray + The inverse of the covariance matrix. + + Returns + ------- + mahalanobis : double + The Mahalanobis distance between vectors `u` and `v`. + + Examples + -------- + >>> from scipy.spatial import distance + >>> iv = [[1, 0.5, 0.5], [0.5, 1, 0.5], [0.5, 0.5, 1]] + >>> distance.mahalanobis([1, 0, 0], [0, 1, 0], iv) + 1.0 + >>> distance.mahalanobis([0, 2, 0], [0, 1, 0], iv) + 1.0 + >>> distance.mahalanobis([2, 0, 0], [0, 1, 0], iv) + 1.7320508075688772 + + """ + u = _validate_vector(u) + v = _validate_vector(v) + VI = np.atleast_2d(VI) + delta = u - v + m = np.dot(np.dot(delta, VI), delta) + return np.sqrt(m) + + +def chebyshev(u, v, w=None): + """ + Compute the Chebyshev distance. + + Computes the Chebyshev distance between two 1-D arrays `u` and `v`, + which is defined as + + .. math:: + + \\max_i {|u_i-v_i|}. + + Parameters + ---------- + u : (N,) array_like + Input vector. + v : (N,) array_like + Input vector. + w : (N,) array_like, optional + The weights for each value in `u` and `v`. Default is None, + which gives each value a weight of 1.0 + + Returns + ------- + chebyshev : double + The Chebyshev distance between vectors `u` and `v`. + + Examples + -------- + >>> from scipy.spatial import distance + >>> distance.chebyshev([1, 0, 0], [0, 1, 0]) + 1 + >>> distance.chebyshev([1, 1, 0], [0, 1, 0]) + 1 + + """ + u = _validate_vector(u) + v = _validate_vector(v) + if w is not None: + w = _validate_weights(w) + has_weight = w > 0 + if has_weight.sum() < w.size: + u = u[has_weight] + v = v[has_weight] + return max(abs(u - v)) + + +def braycurtis(u, v, w=None): + """ + Compute the Bray-Curtis distance between two 1-D arrays. + + Bray-Curtis distance is defined as + + .. math:: + + \\sum{|u_i-v_i|} / \\sum{|u_i+v_i|} + + The Bray-Curtis distance is in the range [0, 1] if all coordinates are + positive, and is undefined if the inputs are of length zero. + + Parameters + ---------- + u : (N,) array_like + Input array. + v : (N,) array_like + Input array. + w : (N,) array_like, optional + The weights for each value in `u` and `v`. Default is None, + which gives each value a weight of 1.0 + + Returns + ------- + braycurtis : double + The Bray-Curtis distance between 1-D arrays `u` and `v`. + + Examples + -------- + >>> from scipy.spatial import distance + >>> distance.braycurtis([1, 0, 0], [0, 1, 0]) + 1.0 + >>> distance.braycurtis([1, 1, 0], [0, 1, 0]) + 0.33333333333333331 + + """ + u = _validate_vector(u) + v = _validate_vector(v, dtype=np.float64) + l1_diff = abs(u - v) + l1_sum = abs(u + v) + if w is not None: + w = _validate_weights(w) + l1_diff = w * l1_diff + l1_sum = w * l1_sum + return l1_diff.sum() / l1_sum.sum() + + +def canberra(u, v, w=None): + """ + Compute the Canberra distance between two 1-D arrays. + + The Canberra distance is defined as + + .. math:: + + d(u,v) = \\sum_i \\frac{|u_i-v_i|} + {|u_i|+|v_i|}. + + Parameters + ---------- + u : (N,) array_like + Input array. + v : (N,) array_like + Input array. + w : (N,) array_like, optional + The weights for each value in `u` and `v`. Default is None, + which gives each value a weight of 1.0 + + Returns + ------- + canberra : double + The Canberra distance between vectors `u` and `v`. + + Notes + ----- + When `u[i]` and `v[i]` are 0 for given i, then the fraction 0/0 = 0 is + used in the calculation. + + Examples + -------- + >>> from scipy.spatial import distance + >>> distance.canberra([1, 0, 0], [0, 1, 0]) + 2.0 + >>> distance.canberra([1, 1, 0], [0, 1, 0]) + 1.0 + + """ + u = _validate_vector(u) + v = _validate_vector(v, dtype=np.float64) + if w is not None: + w = _validate_weights(w) + olderr = np.seterr(invalid='ignore') + try: + abs_uv = abs(u - v) + abs_u = abs(u) + abs_v = abs(v) + d = abs_uv / (abs_u + abs_v) + if w is not None: + d = w * d + d = np.nansum(d) + finally: + np.seterr(**olderr) + return d + + +def jensenshannon(p, q, base=None): + """ + Compute the Jensen-Shannon distance (metric) between + two 1-D probability arrays. This is the square root + of the Jensen-Shannon divergence. + + The Jensen-Shannon distance between two probability + vectors `p` and `q` is defined as, + + .. math:: + + \\sqrt{\\frac{D(p \\parallel m) + D(q \\parallel m)}{2}} + + where :math:`m` is the pointwise mean of :math:`p` and :math:`q` + and :math:`D` is the Kullback-Leibler divergence. + + This routine will normalize `p` and `q` if they don't sum to 1.0. + + Parameters + ---------- + p : (N,) array_like + left probability vector + q : (N,) array_like + right probability vector + base : double, optional + the base of the logarithm used to compute the output + if not given, then the routine uses the default base of + scipy.stats.entropy. + + Returns + ------- + js : double + The Jensen-Shannon distance between `p` and `q` + + .. versionadded:: 1.2.0 + + Examples + -------- + >>> from scipy.spatial import distance + >>> distance.jensenshannon([1.0, 0.0, 0.0], [0.0, 1.0, 0.0], 2.0) + 1.0 + >>> distance.jensenshannon([1.0, 0.0], [0.5, 0.5]) + 0.46450140402245893 + >>> distance.jensenshannon([1.0, 0.0, 0.0], [1.0, 0.0, 0.0]) + 0.0 + + """ + p = np.asarray(p) + q = np.asarray(q) + p = p / np.sum(p, axis=0) + q = q / np.sum(q, axis=0) + m = (p + q) / 2.0 + left = rel_entr(p, m) + right = rel_entr(q, m) + js = np.sum(left, axis=0) + np.sum(right, axis=0) + if base is not None: + js /= np.log(base) + return np.sqrt(js / 2.0) + + +def yule(u, v, w=None): + """ + Compute the Yule dissimilarity between two boolean 1-D arrays. + + The Yule dissimilarity is defined as + + .. math:: + + \\frac{R}{c_{TT} * c_{FF} + \\frac{R}{2}} + + where :math:`c_{ij}` is the number of occurrences of + :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for + :math:`k < n` and :math:`R = 2.0 * c_{TF} * c_{FT}`. + + Parameters + ---------- + u : (N,) array_like, bool + Input array. + v : (N,) array_like, bool + Input array. + w : (N,) array_like, optional + The weights for each value in `u` and `v`. Default is None, + which gives each value a weight of 1.0 + + Returns + ------- + yule : double + The Yule dissimilarity between vectors `u` and `v`. + + Examples + -------- + >>> from scipy.spatial import distance + >>> distance.yule([1, 0, 0], [0, 1, 0]) + 2.0 + >>> distance.yule([1, 1, 0], [0, 1, 0]) + 0.0 + + """ + u = _validate_vector(u) + v = _validate_vector(v) + if w is not None: + w = _validate_weights(w) + (nff, nft, ntf, ntt) = _nbool_correspond_all(u, v, w=w) + return float(2.0 * ntf * nft / np.array(ntt * nff + ntf * nft)) + + +@np.deprecate(message="spatial.distance.matching is deprecated in scipy 1.0.0; " + "use spatial.distance.hamming instead.") +def matching(u, v, w=None): + """ + Compute the Hamming distance between two boolean 1-D arrays. + + This is a deprecated synonym for :func:`hamming`. + """ + return hamming(u, v, w=w) + + +def dice(u, v, w=None): + """ + Compute the Dice dissimilarity between two boolean 1-D arrays. + + The Dice dissimilarity between `u` and `v`, is + + .. math:: + + \\frac{c_{TF} + c_{FT}} + {2c_{TT} + c_{FT} + c_{TF}} + + where :math:`c_{ij}` is the number of occurrences of + :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for + :math:`k < n`. + + Parameters + ---------- + u : (N,) ndarray, bool + Input 1-D array. + v : (N,) ndarray, bool + Input 1-D array. + w : (N,) array_like, optional + The weights for each value in `u` and `v`. Default is None, + which gives each value a weight of 1.0 + + Returns + ------- + dice : double + The Dice dissimilarity between 1-D arrays `u` and `v`. + + Examples + -------- + >>> from scipy.spatial import distance + >>> distance.dice([1, 0, 0], [0, 1, 0]) + 1.0 + >>> distance.dice([1, 0, 0], [1, 1, 0]) + 0.3333333333333333 + >>> distance.dice([1, 0, 0], [2, 0, 0]) + -0.3333333333333333 + + """ + u = _validate_vector(u) + v = _validate_vector(v) + if w is not None: + w = _validate_weights(w) + if u.dtype == v.dtype == bool and w is None: + ntt = (u & v).sum() + else: + dtype = np.find_common_type([int], [u.dtype, v.dtype]) + u = u.astype(dtype) + v = v.astype(dtype) + if w is None: + ntt = (u * v).sum() + else: + ntt = (u * v * w).sum() + (nft, ntf) = _nbool_correspond_ft_tf(u, v, w=w) + return float((ntf + nft) / np.array(2.0 * ntt + ntf + nft)) + + +def rogerstanimoto(u, v, w=None): + """ + Compute the Rogers-Tanimoto dissimilarity between two boolean 1-D arrays. + + The Rogers-Tanimoto dissimilarity between two boolean 1-D arrays + `u` and `v`, is defined as + + .. math:: + \\frac{R} + {c_{TT} + c_{FF} + R} + + where :math:`c_{ij}` is the number of occurrences of + :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for + :math:`k < n` and :math:`R = 2(c_{TF} + c_{FT})`. + + Parameters + ---------- + u : (N,) array_like, bool + Input array. + v : (N,) array_like, bool + Input array. + w : (N,) array_like, optional + The weights for each value in `u` and `v`. Default is None, + which gives each value a weight of 1.0 + + Returns + ------- + rogerstanimoto : double + The Rogers-Tanimoto dissimilarity between vectors + `u` and `v`. + + Examples + -------- + >>> from scipy.spatial import distance + >>> distance.rogerstanimoto([1, 0, 0], [0, 1, 0]) + 0.8 + >>> distance.rogerstanimoto([1, 0, 0], [1, 1, 0]) + 0.5 + >>> distance.rogerstanimoto([1, 0, 0], [2, 0, 0]) + -1.0 + + """ + u = _validate_vector(u) + v = _validate_vector(v) + if w is not None: + w = _validate_weights(w) + (nff, nft, ntf, ntt) = _nbool_correspond_all(u, v, w=w) + return float(2.0 * (ntf + nft)) / float(ntt + nff + (2.0 * (ntf + nft))) + + +def russellrao(u, v, w=None): + """ + Compute the Russell-Rao dissimilarity between two boolean 1-D arrays. + + The Russell-Rao dissimilarity between two boolean 1-D arrays, `u` and + `v`, is defined as + + .. math:: + + \\frac{n - c_{TT}} + {n} + + where :math:`c_{ij}` is the number of occurrences of + :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for + :math:`k < n`. + + Parameters + ---------- + u : (N,) array_like, bool + Input array. + v : (N,) array_like, bool + Input array. + w : (N,) array_like, optional + The weights for each value in `u` and `v`. Default is None, + which gives each value a weight of 1.0 + + Returns + ------- + russellrao : double + The Russell-Rao dissimilarity between vectors `u` and `v`. + + Examples + -------- + >>> from scipy.spatial import distance + >>> distance.russellrao([1, 0, 0], [0, 1, 0]) + 1.0 + >>> distance.russellrao([1, 0, 0], [1, 1, 0]) + 0.6666666666666666 + >>> distance.russellrao([1, 0, 0], [2, 0, 0]) + 0.3333333333333333 + + """ + u = _validate_vector(u) + v = _validate_vector(v) + if u.dtype == v.dtype == bool and w is None: + ntt = (u & v).sum() + n = float(len(u)) + elif w is None: + ntt = (u * v).sum() + n = float(len(u)) + else: + w = _validate_weights(w) + ntt = (u * v * w).sum() + n = w.sum() + return float(n - ntt) / n + + +def sokalmichener(u, v, w=None): + """ + Compute the Sokal-Michener dissimilarity between two boolean 1-D arrays. + + The Sokal-Michener dissimilarity between boolean 1-D arrays `u` and `v`, + is defined as + + .. math:: + + \\frac{R} + {S + R} + + where :math:`c_{ij}` is the number of occurrences of + :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for + :math:`k < n`, :math:`R = 2 * (c_{TF} + c_{FT})` and + :math:`S = c_{FF} + c_{TT}`. + + Parameters + ---------- + u : (N,) array_like, bool + Input array. + v : (N,) array_like, bool + Input array. + w : (N,) array_like, optional + The weights for each value in `u` and `v`. Default is None, + which gives each value a weight of 1.0 + + Returns + ------- + sokalmichener : double + The Sokal-Michener dissimilarity between vectors `u` and `v`. + + Examples + -------- + >>> from scipy.spatial import distance + >>> distance.sokalmichener([1, 0, 0], [0, 1, 0]) + 0.8 + >>> distance.sokalmichener([1, 0, 0], [1, 1, 0]) + 0.5 + >>> distance.sokalmichener([1, 0, 0], [2, 0, 0]) + -1.0 + + """ + u = _validate_vector(u) + v = _validate_vector(v) + if u.dtype == v.dtype == bool and w is None: + ntt = (u & v).sum() + nff = (~u & ~v).sum() + elif w is None: + ntt = (u * v).sum() + nff = ((1.0 - u) * (1.0 - v)).sum() + else: + w = _validate_weights(w) + ntt = (u * v * w).sum() + nff = ((1.0 - u) * (1.0 - v) * w).sum() + (nft, ntf) = _nbool_correspond_ft_tf(u, v) + return float(2.0 * (ntf + nft)) / float(ntt + nff + 2.0 * (ntf + nft)) + + +def sokalsneath(u, v, w=None): + """ + Compute the Sokal-Sneath dissimilarity between two boolean 1-D arrays. + + The Sokal-Sneath dissimilarity between `u` and `v`, + + .. math:: + + \\frac{R} + {c_{TT} + R} + + where :math:`c_{ij}` is the number of occurrences of + :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for + :math:`k < n` and :math:`R = 2(c_{TF} + c_{FT})`. + + Parameters + ---------- + u : (N,) array_like, bool + Input array. + v : (N,) array_like, bool + Input array. + w : (N,) array_like, optional + The weights for each value in `u` and `v`. Default is None, + which gives each value a weight of 1.0 + + Returns + ------- + sokalsneath : double + The Sokal-Sneath dissimilarity between vectors `u` and `v`. + + Examples + -------- + >>> from scipy.spatial import distance + >>> distance.sokalsneath([1, 0, 0], [0, 1, 0]) + 1.0 + >>> distance.sokalsneath([1, 0, 0], [1, 1, 0]) + 0.66666666666666663 + >>> distance.sokalsneath([1, 0, 0], [2, 1, 0]) + 0.0 + >>> distance.sokalsneath([1, 0, 0], [3, 1, 0]) + -2.0 + + """ + u = _validate_vector(u) + v = _validate_vector(v) + if u.dtype == v.dtype == bool and w is None: + ntt = (u & v).sum() + elif w is None: + ntt = (u * v).sum() + else: + w = _validate_weights(w) + ntt = (u * v * w).sum() + (nft, ntf) = _nbool_correspond_ft_tf(u, v, w=w) + denom = np.array(ntt + 2.0 * (ntf + nft)) + if not denom.any(): + raise ValueError('Sokal-Sneath dissimilarity is not defined for ' + 'vectors that are entirely false.') + return float(2.0 * (ntf + nft)) / denom + + +_convert_to_double = partial(_convert_to_type, out_type=np.double) +_convert_to_bool = partial(_convert_to_type, out_type=bool) + +# adding python-only wrappers to _distance_wrap module +_distance_wrap.pdist_correlation_double_wrap = _correlation_pdist_wrap +_distance_wrap.cdist_correlation_double_wrap = _correlation_cdist_wrap + +# Registry of implemented metrics: +# Dictionary with the following structure: +# { +# metric_name : MetricInfo(aka, types=[double], validator=None) +# } +# +# Where: +# `metric_name` must be equal to python metric name +# +# MetricInfo is a named tuple with fields: +# 'aka' : [list of aliases], +# +# 'validator': f(X, m, n, **kwargs) # function that check kwargs and +# # computes default values. +# +# 'types': [list of supported types], # X (pdist) and XA (cdist) are used to +# # choose the type. if there is no match +# # the first type is used. Default double +# } +MetricInfo = namedtuple("MetricInfo", 'aka types validator ') +MetricInfo.__new__.__defaults__ = (['double'], None) + +_METRICS = { + 'braycurtis': MetricInfo(aka=['braycurtis']), + 'canberra': MetricInfo(aka=['canberra']), + 'chebyshev': MetricInfo(aka=['chebychev', 'chebyshev', 'cheby', 'cheb', 'ch']), + 'cityblock': MetricInfo(aka=['cityblock', 'cblock', 'cb', 'c']), + 'correlation': MetricInfo(aka=['correlation', 'co']), + 'cosine': MetricInfo(aka=['cosine', 'cos']), + 'dice': MetricInfo(aka=['dice'], types=['bool']), + 'euclidean': MetricInfo(aka=['euclidean', 'euclid', 'eu', 'e']), + 'hamming': MetricInfo(aka=['matching', 'hamming', 'hamm', 'ha', 'h'], + types=['double', 'bool']), + 'jaccard': MetricInfo(aka=['jaccard', 'jacc', 'ja', 'j'], + types=['double', 'bool']), + 'jensenshannon': MetricInfo(aka=['jensenshannon', 'js'], + types=['double']), + 'kulsinski': MetricInfo(aka=['kulsinski'], types=['bool']), + 'mahalanobis': MetricInfo(aka=['mahalanobis', 'mahal', 'mah'], + validator=_validate_mahalanobis_kwargs), + 'minkowski': MetricInfo(aka=['minkowski', 'mi', 'm', 'pnorm'], + validator=_validate_minkowski_kwargs), + 'rogerstanimoto': MetricInfo(aka=['rogerstanimoto'], types=['bool']), + 'russellrao': MetricInfo(aka=['russellrao'], types=['bool']), + 'seuclidean': MetricInfo(aka=['seuclidean', 'se', 's'], + validator=_validate_seuclidean_kwargs), + 'sokalmichener': MetricInfo(aka=['sokalmichener'], types=['bool']), + 'sokalsneath': MetricInfo(aka=['sokalsneath'], types=['bool']), + 'sqeuclidean': MetricInfo(aka=['sqeuclidean', 'sqe', 'sqeuclid']), + 'wminkowski': MetricInfo(aka=['wminkowski', 'wmi', 'wm', 'wpnorm'], + validator=_validate_wminkowski_kwargs), + 'yule': MetricInfo(aka=['yule'], types=['bool']), +} + + +_METRIC_ALIAS = dict((alias, name) + for name, info in _METRICS.items() + for alias in info.aka) + +_METRICS_NAMES = list(_METRICS.keys()) + +_TEST_METRICS = {'test_' + name: globals()[name] for name in _METRICS.keys()} + + +def _select_weighted_metric(mstr, kwargs, out): + kwargs = dict(kwargs) + + if "w" in kwargs and kwargs["w"] is None: + # w=None is the same as omitting it + kwargs.pop("w") + + if mstr.startswith("test_") or mstr in _METRICS['wminkowski'].aka: + # These support weights + pass + elif "w" in kwargs: + if (mstr in _METRICS['seuclidean'].aka or + mstr in _METRICS['mahalanobis'].aka): + raise ValueError("metric %s incompatible with weights" % mstr) + + # XXX: C-versions do not support weights + # need to use python version for weighting + kwargs['out'] = out + mstr = "test_%s" % mstr + + return mstr, kwargs + + +def pdist(X, metric='euclidean', *args, **kwargs): + """ + Pairwise distances between observations in n-dimensional space. + + See Notes for common calling conventions. + + Parameters + ---------- + X : ndarray + An m by n array of m original observations in an + n-dimensional space. + metric : str or function, optional + The distance metric to use. The distance function can + be 'braycurtis', 'canberra', 'chebyshev', 'cityblock', + 'correlation', 'cosine', 'dice', 'euclidean', 'hamming', + 'jaccard', 'jensenshannon', 'kulsinski', 'mahalanobis', 'matching', + 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', + 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule'. + *args : tuple. Deprecated. + Additional arguments should be passed as keyword arguments + **kwargs : dict, optional + Extra arguments to `metric`: refer to each metric documentation for a + list of all possible arguments. + + Some possible arguments: + + p : scalar + The p-norm to apply for Minkowski, weighted and unweighted. + Default: 2. + + w : ndarray + The weight vector for metrics that support weights (e.g., Minkowski). + + V : ndarray + The variance vector for standardized Euclidean. + Default: var(X, axis=0, ddof=1) + + VI : ndarray + The inverse of the covariance matrix for Mahalanobis. + Default: inv(cov(X.T)).T + + out : ndarray. + The output array + If not None, condensed distance matrix Y is stored in this array. + Note: metric independent, it will become a regular keyword arg in a + future scipy version + + Returns + ------- + Y : ndarray + Returns a condensed distance matrix Y. For + each :math:`i` and :math:`j` (where :math:`i<j<m`),where m is the number + of original observations. The metric ``dist(u=X[i], v=X[j])`` + is computed and stored in entry ``ij``. + + See Also + -------- + squareform : converts between condensed distance matrices and + square distance matrices. + + Notes + ----- + See ``squareform`` for information on how to calculate the index of + this entry or to convert the condensed distance matrix to a + redundant square matrix. + + The following are common calling conventions. + + 1. ``Y = pdist(X, 'euclidean')`` + + Computes the distance between m points using Euclidean distance + (2-norm) as the distance metric between the points. The points + are arranged as m n-dimensional row vectors in the matrix X. + + 2. ``Y = pdist(X, 'minkowski', p=2.)`` + + Computes the distances using the Minkowski distance + :math:`||u-v||_p` (p-norm) where :math:`p \\geq 1`. + + 3. ``Y = pdist(X, 'cityblock')`` + + Computes the city block or Manhattan distance between the + points. + + 4. ``Y = pdist(X, 'seuclidean', V=None)`` + + Computes the standardized Euclidean distance. The standardized + Euclidean distance between two n-vectors ``u`` and ``v`` is + + .. math:: + + \\sqrt{\\sum {(u_i-v_i)^2 / V[x_i]}} + + + V is the variance vector; V[i] is the variance computed over all + the i'th components of the points. If not passed, it is + automatically computed. + + 5. ``Y = pdist(X, 'sqeuclidean')`` + + Computes the squared Euclidean distance :math:`||u-v||_2^2` between + the vectors. + + 6. ``Y = pdist(X, 'cosine')`` + + Computes the cosine distance between vectors u and v, + + .. math:: + + 1 - \\frac{u \\cdot v} + {{||u||}_2 {||v||}_2} + + where :math:`||*||_2` is the 2-norm of its argument ``*``, and + :math:`u \\cdot v` is the dot product of ``u`` and ``v``. + + 7. ``Y = pdist(X, 'correlation')`` + + Computes the correlation distance between vectors u and v. This is + + .. math:: + + 1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})} + {{||(u - \\bar{u})||}_2 {||(v - \\bar{v})||}_2} + + where :math:`\\bar{v}` is the mean of the elements of vector v, + and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`. + + 8. ``Y = pdist(X, 'hamming')`` + + Computes the normalized Hamming distance, or the proportion of + those vector elements between two n-vectors ``u`` and ``v`` + which disagree. To save memory, the matrix ``X`` can be of type + boolean. + + 9. ``Y = pdist(X, 'jaccard')`` + + Computes the Jaccard distance between the points. Given two + vectors, ``u`` and ``v``, the Jaccard distance is the + proportion of those elements ``u[i]`` and ``v[i]`` that + disagree. + + 10. ``Y = pdist(X, 'chebyshev')`` + + Computes the Chebyshev distance between the points. The + Chebyshev distance between two n-vectors ``u`` and ``v`` is the + maximum norm-1 distance between their respective elements. More + precisely, the distance is given by + + .. math:: + + d(u,v) = \\max_i {|u_i-v_i|} + + 11. ``Y = pdist(X, 'canberra')`` + + Computes the Canberra distance between the points. The + Canberra distance between two points ``u`` and ``v`` is + + .. math:: + + d(u,v) = \\sum_i \\frac{|u_i-v_i|} + {|u_i|+|v_i|} + + + 12. ``Y = pdist(X, 'braycurtis')`` + + Computes the Bray-Curtis distance between the points. The + Bray-Curtis distance between two points ``u`` and ``v`` is + + + .. math:: + + d(u,v) = \\frac{\\sum_i {|u_i-v_i|}} + {\\sum_i {|u_i+v_i|}} + + 13. ``Y = pdist(X, 'mahalanobis', VI=None)`` + + Computes the Mahalanobis distance between the points. The + Mahalanobis distance between two points ``u`` and ``v`` is + :math:`\\sqrt{(u-v)(1/V)(u-v)^T}` where :math:`(1/V)` (the ``VI`` + variable) is the inverse covariance. If ``VI`` is not None, + ``VI`` will be used as the inverse covariance matrix. + + 14. ``Y = pdist(X, 'yule')`` + + Computes the Yule distance between each pair of boolean + vectors. (see yule function documentation) + + 15. ``Y = pdist(X, 'matching')`` + + Synonym for 'hamming'. + + 16. ``Y = pdist(X, 'dice')`` + + Computes the Dice distance between each pair of boolean + vectors. (see dice function documentation) + + 17. ``Y = pdist(X, 'kulsinski')`` + + Computes the Kulsinski distance between each pair of + boolean vectors. (see kulsinski function documentation) + + 18. ``Y = pdist(X, 'rogerstanimoto')`` + + Computes the Rogers-Tanimoto distance between each pair of + boolean vectors. (see rogerstanimoto function documentation) + + 19. ``Y = pdist(X, 'russellrao')`` + + Computes the Russell-Rao distance between each pair of + boolean vectors. (see russellrao function documentation) + + 20. ``Y = pdist(X, 'sokalmichener')`` + + Computes the Sokal-Michener distance between each pair of + boolean vectors. (see sokalmichener function documentation) + + 21. ``Y = pdist(X, 'sokalsneath')`` + + Computes the Sokal-Sneath distance between each pair of + boolean vectors. (see sokalsneath function documentation) + + 22. ``Y = pdist(X, 'wminkowski', p=2, w=w)`` + + Computes the weighted Minkowski distance between each pair of + vectors. (see wminkowski function documentation) + + 23. ``Y = pdist(X, f)`` + + Computes the distance between all pairs of vectors in X + using the user supplied 2-arity function f. For example, + Euclidean distance between the vectors could be computed + as follows:: + + dm = pdist(X, lambda u, v: np.sqrt(((u-v)**2).sum())) + + Note that you should avoid passing a reference to one of + the distance functions defined in this library. For example,:: + + dm = pdist(X, sokalsneath) + + would calculate the pair-wise distances between the vectors in + X using the Python function sokalsneath. This would result in + sokalsneath being called :math:`{n \\choose 2}` times, which + is inefficient. Instead, the optimized C version is more + efficient, and we call it using the following syntax.:: + + dm = pdist(X, 'sokalsneath') + + """ + # You can also call this as: + # Y = pdist(X, 'test_abc') + # where 'abc' is the metric being tested. This computes the distance + # between all pairs of vectors in X using the distance metric 'abc' but + # with a more succinct, verifiable, but less efficient implementation. + + X = _asarray_validated(X, sparse_ok=False, objects_ok=True, mask_ok=True, + check_finite=False) + kwargs = _args_to_kwargs_xdist(args, kwargs, metric, "pdist") + + X = np.asarray(X, order='c') + + s = X.shape + if len(s) != 2: + raise ValueError('A 2-dimensional array must be passed.') + + m, n = s + out = kwargs.pop("out", None) + if out is None: + dm = np.empty((m * (m - 1)) // 2, dtype=np.double) + else: + if out.shape != (m * (m - 1) // 2,): + raise ValueError("output array has incorrect shape.") + if not out.flags.c_contiguous: + raise ValueError("Output array must be C-contiguous.") + if out.dtype != np.double: + raise ValueError("Output array must be double type.") + dm = out + + # compute blacklist for deprecated kwargs + if(metric in _METRICS['jensenshannon'].aka + or metric == 'test_jensenshannon' or metric == jensenshannon): + kwargs_blacklist = ["p", "w", "V", "VI"] + + elif(metric in _METRICS['minkowski'].aka + or metric in _METRICS['wminkowski'].aka + or metric in ['test_minkowski', 'test_wminkowski'] + or metric in [minkowski, wminkowski]): + kwargs_blacklist = ["V", "VI"] + + elif(metric in _METRICS['seuclidean'].aka or + metric == 'test_seuclidean' or metric == seuclidean): + kwargs_blacklist = ["p", "w", "VI"] + + elif(metric in _METRICS['mahalanobis'].aka + or metric == 'test_mahalanobis' or metric == mahalanobis): + kwargs_blacklist = ["p", "w", "V"] + + else: + kwargs_blacklist = ["p", "V", "VI"] + + _filter_deprecated_kwargs(kwargs, kwargs_blacklist) + + if callable(metric): + mstr = getattr(metric, '__name__', 'UnknownCustomMetric') + metric_name = _METRIC_ALIAS.get(mstr, None) + + if metric_name is not None: + X, typ, kwargs = _validate_pdist_input(X, m, n, + metric_name, **kwargs) + + k = 0 + for i in xrange(0, m - 1): + for j in xrange(i + 1, m): + dm[k] = metric(X[i], X[j], **kwargs) + k = k + 1 + + elif isinstance(metric, string_types): + mstr = metric.lower() + + mstr, kwargs = _select_weighted_metric(mstr, kwargs, out) + + metric_name = _METRIC_ALIAS.get(mstr, None) + + if metric_name is not None: + X, typ, kwargs = _validate_pdist_input(X, m, n, + metric_name, **kwargs) + + # get pdist wrapper + pdist_fn = getattr(_distance_wrap, + "pdist_%s_%s_wrap" % (metric_name, typ)) + pdist_fn(X, dm, **kwargs) + return dm + + elif mstr in ['old_cosine', 'old_cos']: + warnings.warn('"old_cosine" is deprecated and will be removed in ' + 'a future version. Use "cosine" instead.', + DeprecationWarning) + X = _convert_to_double(X) + norms = np.einsum('ij,ij->i', X, X, dtype=np.double) + np.sqrt(norms, out=norms) + nV = norms.reshape(m, 1) + # The numerator u * v + nm = np.dot(X, X.T) + # The denom. ||u||*||v|| + de = np.dot(nV, nV.T) + dm = 1.0 - (nm / de) + dm[xrange(0, m), xrange(0, m)] = 0.0 + dm = squareform(dm) + elif mstr.startswith("test_"): + if mstr in _TEST_METRICS: + dm = pdist(X, _TEST_METRICS[mstr], **kwargs) + else: + raise ValueError('Unknown "Test" Distance Metric: %s' % mstr[5:]) + else: + raise ValueError('Unknown Distance Metric: %s' % mstr) + else: + raise TypeError('2nd argument metric must be a string identifier ' + 'or a function.') + return dm + + +def squareform(X, force="no", checks=True): + """ + Convert a vector-form distance vector to a square-form distance + matrix, and vice-versa. + + Parameters + ---------- + X : ndarray + Either a condensed or redundant distance matrix. + force : str, optional + As with MATLAB(TM), if force is equal to ``'tovector'`` or + ``'tomatrix'``, the input will be treated as a distance matrix or + distance vector respectively. + checks : bool, optional + If set to False, no checks will be made for matrix + symmetry nor zero diagonals. This is useful if it is known that + ``X - X.T1`` is small and ``diag(X)`` is close to zero. + These values are ignored any way so they do not disrupt the + squareform transformation. + + Returns + ------- + Y : ndarray + If a condensed distance matrix is passed, a redundant one is + returned, or if a redundant one is passed, a condensed distance + matrix is returned. + + Notes + ----- + 1. v = squareform(X) + + Given a square d-by-d symmetric distance matrix X, + ``v = squareform(X)`` returns a ``d * (d-1) / 2`` (or + :math:`{n \\choose 2}`) sized vector v. + + :math:`v[{n \\choose 2}-{n-i \\choose 2} + (j-i-1)]` is the distance + between points i and j. If X is non-square or asymmetric, an error + is returned. + + 2. X = squareform(v) + + Given a ``d*(d-1)/2`` sized v for some integer ``d >= 2`` encoding + distances as described, ``X = squareform(v)`` returns a d by d distance + matrix X. The ``X[i, j]`` and ``X[j, i]`` values are set to + :math:`v[{n \\choose 2}-{n-i \\choose 2} + (j-i-1)]` and all + diagonal elements are zero. + + In Scipy 0.19.0, ``squareform`` stopped casting all input types to + float64, and started returning arrays of the same dtype as the input. + + """ + + X = np.ascontiguousarray(X) + + s = X.shape + + if force.lower() == 'tomatrix': + if len(s) != 1: + raise ValueError("Forcing 'tomatrix' but input X is not a " + "distance vector.") + elif force.lower() == 'tovector': + if len(s) != 2: + raise ValueError("Forcing 'tovector' but input X is not a " + "distance matrix.") + + # X = squareform(v) + if len(s) == 1: + if s[0] == 0: + return np.zeros((1, 1), dtype=X.dtype) + + # Grab the closest value to the square root of the number + # of elements times 2 to see if the number of elements + # is indeed a binomial coefficient. + d = int(np.ceil(np.sqrt(s[0] * 2))) + + # Check that v is of valid dimensions. + if d * (d - 1) != s[0] * 2: + raise ValueError('Incompatible vector size. It must be a binomial ' + 'coefficient n choose 2 for some integer n >= 2.') + + # Allocate memory for the distance matrix. + M = np.zeros((d, d), dtype=X.dtype) + + # Since the C code does not support striding using strides. + # The dimensions are used instead. + X = _copy_array_if_base_present(X) + + # Fill in the values of the distance matrix. + _distance_wrap.to_squareform_from_vector_wrap(M, X) + + # Return the distance matrix. + return M + elif len(s) == 2: + if s[0] != s[1]: + raise ValueError('The matrix argument must be square.') + if checks: + is_valid_dm(X, throw=True, name='X') + + # One-side of the dimensions is set here. + d = s[0] + + if d <= 1: + return np.array([], dtype=X.dtype) + + # Create a vector. + v = np.zeros((d * (d - 1)) // 2, dtype=X.dtype) + + # Since the C code does not support striding using strides. + # The dimensions are used instead. + X = _copy_array_if_base_present(X) + + # Convert the vector to squareform. + _distance_wrap.to_vector_from_squareform_wrap(X, v) + return v + else: + raise ValueError(('The first argument must be one or two dimensional ' + 'array. A %d-dimensional array is not ' + 'permitted') % len(s)) + + +def is_valid_dm(D, tol=0.0, throw=False, name="D", warning=False): + """ + Return True if input array is a valid distance matrix. + + Distance matrices must be 2-dimensional numpy arrays. + They must have a zero-diagonal, and they must be symmetric. + + Parameters + ---------- + D : ndarray + The candidate object to test for validity. + tol : float, optional + The distance matrix should be symmetric. `tol` is the maximum + difference between entries ``ij`` and ``ji`` for the distance + metric to be considered symmetric. + throw : bool, optional + An exception is thrown if the distance matrix passed is not valid. + name : str, optional + The name of the variable to checked. This is useful if + throw is set to True so the offending variable can be identified + in the exception message when an exception is thrown. + warning : bool, optional + Instead of throwing an exception, a warning message is + raised. + + Returns + ------- + valid : bool + True if the variable `D` passed is a valid distance matrix. + + Notes + ----- + Small numerical differences in `D` and `D.T` and non-zeroness of + the diagonal are ignored if they are within the tolerance specified + by `tol`. + + """ + D = np.asarray(D, order='c') + valid = True + try: + s = D.shape + if len(D.shape) != 2: + if name: + raise ValueError(('Distance matrix \'%s\' must have shape=2 ' + '(i.e. be two-dimensional).') % name) + else: + raise ValueError('Distance matrix must have shape=2 (i.e. ' + 'be two-dimensional).') + if tol == 0.0: + if not (D == D.T).all(): + if name: + raise ValueError(('Distance matrix \'%s\' must be ' + 'symmetric.') % name) + else: + raise ValueError('Distance matrix must be symmetric.') + if not (D[xrange(0, s[0]), xrange(0, s[0])] == 0).all(): + if name: + raise ValueError(('Distance matrix \'%s\' diagonal must ' + 'be zero.') % name) + else: + raise ValueError('Distance matrix diagonal must be zero.') + else: + if not (D - D.T <= tol).all(): + if name: + raise ValueError(('Distance matrix \'%s\' must be ' + 'symmetric within tolerance %5.5f.') + % (name, tol)) + else: + raise ValueError('Distance matrix must be symmetric within' + ' tolerance %5.5f.' % tol) + if not (D[xrange(0, s[0]), xrange(0, s[0])] <= tol).all(): + if name: + raise ValueError(('Distance matrix \'%s\' diagonal must be' + ' close to zero within tolerance %5.5f.') + % (name, tol)) + else: + raise ValueError(('Distance matrix \'%s\' diagonal must be' + ' close to zero within tolerance %5.5f.') + % tol) + except Exception as e: + if throw: + raise + if warning: + warnings.warn(str(e)) + valid = False + return valid + + +def is_valid_y(y, warning=False, throw=False, name=None): + """ + Return True if the input array is a valid condensed distance matrix. + + Condensed distance matrices must be 1-dimensional numpy arrays. + Their length must be a binomial coefficient :math:`{n \\choose 2}` + for some positive integer n. + + Parameters + ---------- + y : ndarray + The condensed distance matrix. + warning : bool, optional + Invokes a warning if the variable passed is not a valid + condensed distance matrix. The warning message explains why + the distance matrix is not valid. `name` is used when + referencing the offending variable. + throw : bool, optional + Throws an exception if the variable passed is not a valid + condensed distance matrix. + name : bool, optional + Used when referencing the offending variable in the + warning or exception message. + + """ + y = np.asarray(y, order='c') + valid = True + try: + if len(y.shape) != 1: + if name: + raise ValueError(('Condensed distance matrix \'%s\' must ' + 'have shape=1 (i.e. be one-dimensional).') + % name) + else: + raise ValueError('Condensed distance matrix must have shape=1 ' + '(i.e. be one-dimensional).') + n = y.shape[0] + d = int(np.ceil(np.sqrt(n * 2))) + if (d * (d - 1) / 2) != n: + if name: + raise ValueError(('Length n of condensed distance matrix ' + '\'%s\' must be a binomial coefficient, i.e.' + 'there must be a k such that ' + '(k \\choose 2)=n)!') % name) + else: + raise ValueError('Length n of condensed distance matrix must ' + 'be a binomial coefficient, i.e. there must ' + 'be a k such that (k \\choose 2)=n)!') + except Exception as e: + if throw: + raise + if warning: + warnings.warn(str(e)) + valid = False + return valid + + +def num_obs_dm(d): + """ + Return the number of original observations that correspond to a + square, redundant distance matrix. + + Parameters + ---------- + d : ndarray + The target distance matrix. + + Returns + ------- + num_obs_dm : int + The number of observations in the redundant distance matrix. + + """ + d = np.asarray(d, order='c') + is_valid_dm(d, tol=np.inf, throw=True, name='d') + return d.shape[0] + + +def num_obs_y(Y): + """ + Return the number of original observations that correspond to a + condensed distance matrix. + + Parameters + ---------- + Y : ndarray + Condensed distance matrix. + + Returns + ------- + n : int + The number of observations in the condensed distance matrix `Y`. + + """ + Y = np.asarray(Y, order='c') + is_valid_y(Y, throw=True, name='Y') + k = Y.shape[0] + if k == 0: + raise ValueError("The number of observations cannot be determined on " + "an empty distance matrix.") + d = int(np.ceil(np.sqrt(k * 2))) + if (d * (d - 1) / 2) != k: + raise ValueError("Invalid condensed distance matrix passed. Must be " + "some k where k=(n choose 2) for some n >= 2.") + return d + + +def cdist(XA, XB, metric='euclidean', *args, **kwargs): + """ + Compute distance between each pair of the two collections of inputs. + + See Notes for common calling conventions. + + Parameters + ---------- + XA : ndarray + An :math:`m_A` by :math:`n` array of :math:`m_A` + original observations in an :math:`n`-dimensional space. + Inputs are converted to float type. + XB : ndarray + An :math:`m_B` by :math:`n` array of :math:`m_B` + original observations in an :math:`n`-dimensional space. + Inputs are converted to float type. + metric : str or callable, optional + The distance metric to use. If a string, the distance function can be + 'braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation', + 'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'jensenshannon', + 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', + 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', + 'wminkowski', 'yule'. + *args : tuple. Deprecated. + Additional arguments should be passed as keyword arguments + **kwargs : dict, optional + Extra arguments to `metric`: refer to each metric documentation for a + list of all possible arguments. + + Some possible arguments: + + p : scalar + The p-norm to apply for Minkowski, weighted and unweighted. + Default: 2. + + w : ndarray + The weight vector for metrics that support weights (e.g., Minkowski). + + V : ndarray + The variance vector for standardized Euclidean. + Default: var(vstack([XA, XB]), axis=0, ddof=1) + + VI : ndarray + The inverse of the covariance matrix for Mahalanobis. + Default: inv(cov(vstack([XA, XB].T))).T + + out : ndarray + The output array + If not None, the distance matrix Y is stored in this array. + Note: metric independent, it will become a regular keyword arg in a + future scipy version + + Returns + ------- + Y : ndarray + A :math:`m_A` by :math:`m_B` distance matrix is returned. + For each :math:`i` and :math:`j`, the metric + ``dist(u=XA[i], v=XB[j])`` is computed and stored in the + :math:`ij` th entry. + + Raises + ------ + ValueError + An exception is thrown if `XA` and `XB` do not have + the same number of columns. + + Notes + ----- + The following are common calling conventions: + + 1. ``Y = cdist(XA, XB, 'euclidean')`` + + Computes the distance between :math:`m` points using + Euclidean distance (2-norm) as the distance metric between the + points. The points are arranged as :math:`m` + :math:`n`-dimensional row vectors in the matrix X. + + 2. ``Y = cdist(XA, XB, 'minkowski', p=2.)`` + + Computes the distances using the Minkowski distance + :math:`||u-v||_p` (:math:`p`-norm) where :math:`p \\geq 1`. + + 3. ``Y = cdist(XA, XB, 'cityblock')`` + + Computes the city block or Manhattan distance between the + points. + + 4. ``Y = cdist(XA, XB, 'seuclidean', V=None)`` + + Computes the standardized Euclidean distance. The standardized + Euclidean distance between two n-vectors ``u`` and ``v`` is + + .. math:: + + \\sqrt{\\sum {(u_i-v_i)^2 / V[x_i]}}. + + V is the variance vector; V[i] is the variance computed over all + the i'th components of the points. If not passed, it is + automatically computed. + + 5. ``Y = cdist(XA, XB, 'sqeuclidean')`` + + Computes the squared Euclidean distance :math:`||u-v||_2^2` between + the vectors. + + 6. ``Y = cdist(XA, XB, 'cosine')`` + + Computes the cosine distance between vectors u and v, + + .. math:: + + 1 - \\frac{u \\cdot v} + {{||u||}_2 {||v||}_2} + + where :math:`||*||_2` is the 2-norm of its argument ``*``, and + :math:`u \\cdot v` is the dot product of :math:`u` and :math:`v`. + + 7. ``Y = cdist(XA, XB, 'correlation')`` + + Computes the correlation distance between vectors u and v. This is + + .. math:: + + 1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})} + {{||(u - \\bar{u})||}_2 {||(v - \\bar{v})||}_2} + + where :math:`\\bar{v}` is the mean of the elements of vector v, + and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`. + + + 8. ``Y = cdist(XA, XB, 'hamming')`` + + Computes the normalized Hamming distance, or the proportion of + those vector elements between two n-vectors ``u`` and ``v`` + which disagree. To save memory, the matrix ``X`` can be of type + boolean. + + 9. ``Y = cdist(XA, XB, 'jaccard')`` + + Computes the Jaccard distance between the points. Given two + vectors, ``u`` and ``v``, the Jaccard distance is the + proportion of those elements ``u[i]`` and ``v[i]`` that + disagree where at least one of them is non-zero. + + 10. ``Y = cdist(XA, XB, 'chebyshev')`` + + Computes the Chebyshev distance between the points. The + Chebyshev distance between two n-vectors ``u`` and ``v`` is the + maximum norm-1 distance between their respective elements. More + precisely, the distance is given by + + .. math:: + + d(u,v) = \\max_i {|u_i-v_i|}. + + 11. ``Y = cdist(XA, XB, 'canberra')`` + + Computes the Canberra distance between the points. The + Canberra distance between two points ``u`` and ``v`` is + + .. math:: + + d(u,v) = \\sum_i \\frac{|u_i-v_i|} + {|u_i|+|v_i|}. + + 12. ``Y = cdist(XA, XB, 'braycurtis')`` + + Computes the Bray-Curtis distance between the points. The + Bray-Curtis distance between two points ``u`` and ``v`` is + + + .. math:: + + d(u,v) = \\frac{\\sum_i (|u_i-v_i|)} + {\\sum_i (|u_i+v_i|)} + + 13. ``Y = cdist(XA, XB, 'mahalanobis', VI=None)`` + + Computes the Mahalanobis distance between the points. The + Mahalanobis distance between two points ``u`` and ``v`` is + :math:`\\sqrt{(u-v)(1/V)(u-v)^T}` where :math:`(1/V)` (the ``VI`` + variable) is the inverse covariance. If ``VI`` is not None, + ``VI`` will be used as the inverse covariance matrix. + + 14. ``Y = cdist(XA, XB, 'yule')`` + + Computes the Yule distance between the boolean + vectors. (see `yule` function documentation) + + 15. ``Y = cdist(XA, XB, 'matching')`` + + Synonym for 'hamming'. + + 16. ``Y = cdist(XA, XB, 'dice')`` + + Computes the Dice distance between the boolean vectors. (see + `dice` function documentation) + + 17. ``Y = cdist(XA, XB, 'kulsinski')`` + + Computes the Kulsinski distance between the boolean + vectors. (see `kulsinski` function documentation) + + 18. ``Y = cdist(XA, XB, 'rogerstanimoto')`` + + Computes the Rogers-Tanimoto distance between the boolean + vectors. (see `rogerstanimoto` function documentation) + + 19. ``Y = cdist(XA, XB, 'russellrao')`` + + Computes the Russell-Rao distance between the boolean + vectors. (see `russellrao` function documentation) + + 20. ``Y = cdist(XA, XB, 'sokalmichener')`` + + Computes the Sokal-Michener distance between the boolean + vectors. (see `sokalmichener` function documentation) + + 21. ``Y = cdist(XA, XB, 'sokalsneath')`` + + Computes the Sokal-Sneath distance between the vectors. (see + `sokalsneath` function documentation) + + + 22. ``Y = cdist(XA, XB, 'wminkowski', p=2., w=w)`` + + Computes the weighted Minkowski distance between the + vectors. (see `wminkowski` function documentation) + + 23. ``Y = cdist(XA, XB, f)`` + + Computes the distance between all pairs of vectors in X + using the user supplied 2-arity function f. For example, + Euclidean distance between the vectors could be computed + as follows:: + + dm = cdist(XA, XB, lambda u, v: np.sqrt(((u-v)**2).sum())) + + Note that you should avoid passing a reference to one of + the distance functions defined in this library. For example,:: + + dm = cdist(XA, XB, sokalsneath) + + would calculate the pair-wise distances between the vectors in + X using the Python function `sokalsneath`. This would result in + sokalsneath being called :math:`{n \\choose 2}` times, which + is inefficient. Instead, the optimized C version is more + efficient, and we call it using the following syntax:: + + dm = cdist(XA, XB, 'sokalsneath') + + Examples + -------- + Find the Euclidean distances between four 2-D coordinates: + + >>> from scipy.spatial import distance + >>> coords = [(35.0456, -85.2672), + ... (35.1174, -89.9711), + ... (35.9728, -83.9422), + ... (36.1667, -86.7833)] + >>> distance.cdist(coords, coords, 'euclidean') + array([[ 0. , 4.7044, 1.6172, 1.8856], + [ 4.7044, 0. , 6.0893, 3.3561], + [ 1.6172, 6.0893, 0. , 2.8477], + [ 1.8856, 3.3561, 2.8477, 0. ]]) + + + Find the Manhattan distance from a 3-D point to the corners of the unit + cube: + + >>> a = np.array([[0, 0, 0], + ... [0, 0, 1], + ... [0, 1, 0], + ... [0, 1, 1], + ... [1, 0, 0], + ... [1, 0, 1], + ... [1, 1, 0], + ... [1, 1, 1]]) + >>> b = np.array([[ 0.1, 0.2, 0.4]]) + >>> distance.cdist(a, b, 'cityblock') + array([[ 0.7], + [ 0.9], + [ 1.3], + [ 1.5], + [ 1.5], + [ 1.7], + [ 2.1], + [ 2.3]]) + + """ + # You can also call this as: + # Y = cdist(XA, XB, 'test_abc') + # where 'abc' is the metric being tested. This computes the distance + # between all pairs of vectors in XA and XB using the distance metric 'abc' + # but with a more succinct, verifiable, but less efficient implementation. + + kwargs = _args_to_kwargs_xdist(args, kwargs, metric, "cdist") + + XA = np.asarray(XA, order='c') + XB = np.asarray(XB, order='c') + + s = XA.shape + sB = XB.shape + + if len(s) != 2: + raise ValueError('XA must be a 2-dimensional array.') + if len(sB) != 2: + raise ValueError('XB must be a 2-dimensional array.') + if s[1] != sB[1]: + raise ValueError('XA and XB must have the same number of columns ' + '(i.e. feature dimension.)') + + mA = s[0] + mB = sB[0] + n = s[1] + out = kwargs.pop("out", None) + if out is None: + dm = np.empty((mA, mB), dtype=np.double) + else: + if out.shape != (mA, mB): + raise ValueError("Output array has incorrect shape.") + if not out.flags.c_contiguous: + raise ValueError("Output array must be C-contiguous.") + if out.dtype != np.double: + raise ValueError("Output array must be double type.") + dm = out + + # compute blacklist for deprecated kwargs + if(metric in _METRICS['minkowski'].aka or + metric in _METRICS['wminkowski'].aka or + metric in ['test_minkowski', 'test_wminkowski'] or + metric in [minkowski, wminkowski]): + kwargs_blacklist = ["V", "VI"] + elif(metric in _METRICS['seuclidean'].aka or + metric == 'test_seuclidean' or metric == seuclidean): + kwargs_blacklist = ["p", "w", "VI"] + elif(metric in _METRICS['mahalanobis'].aka or + metric == 'test_mahalanobis' or metric == mahalanobis): + kwargs_blacklist = ["p", "w", "V"] + else: + kwargs_blacklist = ["p", "V", "VI"] + + _filter_deprecated_kwargs(kwargs, kwargs_blacklist) + + if callable(metric): + + mstr = getattr(metric, '__name__', 'Unknown') + metric_name = _METRIC_ALIAS.get(mstr, None) + + XA, XB, typ, kwargs = _validate_cdist_input(XA, XB, mA, mB, n, + metric_name, **kwargs) + + for i in xrange(0, mA): + for j in xrange(0, mB): + dm[i, j] = metric(XA[i], XB[j], **kwargs) + + elif isinstance(metric, string_types): + mstr = metric.lower() + + mstr, kwargs = _select_weighted_metric(mstr, kwargs, out) + + metric_name = _METRIC_ALIAS.get(mstr, None) + if metric_name is not None: + XA, XB, typ, kwargs = _validate_cdist_input(XA, XB, mA, mB, n, + metric_name, **kwargs) + # get cdist wrapper + cdist_fn = getattr(_distance_wrap, + "cdist_%s_%s_wrap" % (metric_name, typ)) + cdist_fn(XA, XB, dm, **kwargs) + return dm + + elif mstr.startswith("test_"): + if mstr in _TEST_METRICS: + dm = cdist(XA, XB, _TEST_METRICS[mstr], **kwargs) + else: + raise ValueError('Unknown "Test" Distance Metric: %s' % mstr[5:]) + else: + raise ValueError('Unknown Distance Metric: %s' % mstr) + else: + raise TypeError('2nd argument metric must be a string identifier ' + 'or a function.') + return dm diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/distance.pyc b/project/venv/lib/python2.7/site-packages/scipy/spatial/distance.pyc new file mode 100644 index 0000000..8dec911 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/spatial/distance.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/kdtree.py b/project/venv/lib/python2.7/site-packages/scipy/spatial/kdtree.py new file mode 100644 index 0000000..b2694de --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/kdtree.py @@ -0,0 +1,987 @@ +# Copyright Anne M. Archibald 2008 +# Released under the scipy license +from __future__ import division, print_function, absolute_import + +import sys +import numpy as np +from heapq import heappush, heappop +import scipy.sparse + +__all__ = ['minkowski_distance_p', 'minkowski_distance', + 'distance_matrix', + 'Rectangle', 'KDTree'] + + +def minkowski_distance_p(x, y, p=2): + """ + Compute the p-th power of the L**p distance between two arrays. + + For efficiency, this function computes the L**p distance but does + not extract the pth root. If `p` is 1 or infinity, this is equal to + the actual L**p distance. + + Parameters + ---------- + x : (M, K) array_like + Input array. + y : (N, K) array_like + Input array. + p : float, 1 <= p <= infinity + Which Minkowski p-norm to use. + + Examples + -------- + >>> from scipy.spatial import minkowski_distance_p + >>> minkowski_distance_p([[0,0],[0,0]], [[1,1],[0,1]]) + array([2, 1]) + + """ + x = np.asarray(x) + y = np.asarray(y) + if p == np.inf: + return np.amax(np.abs(y-x), axis=-1) + elif p == 1: + return np.sum(np.abs(y-x), axis=-1) + else: + return np.sum(np.abs(y-x)**p, axis=-1) + + +def minkowski_distance(x, y, p=2): + """ + Compute the L**p distance between two arrays. + + Parameters + ---------- + x : (M, K) array_like + Input array. + y : (N, K) array_like + Input array. + p : float, 1 <= p <= infinity + Which Minkowski p-norm to use. + + Examples + -------- + >>> from scipy.spatial import minkowski_distance + >>> minkowski_distance([[0,0],[0,0]], [[1,1],[0,1]]) + array([ 1.41421356, 1. ]) + + """ + x = np.asarray(x) + y = np.asarray(y) + if p == np.inf or p == 1: + return minkowski_distance_p(x, y, p) + else: + return minkowski_distance_p(x, y, p)**(1./p) + + +class Rectangle(object): + """Hyperrectangle class. + + Represents a Cartesian product of intervals. + """ + def __init__(self, maxes, mins): + """Construct a hyperrectangle.""" + self.maxes = np.maximum(maxes,mins).astype(float) + self.mins = np.minimum(maxes,mins).astype(float) + self.m, = self.maxes.shape + + def __repr__(self): + return "<Rectangle %s>" % list(zip(self.mins, self.maxes)) + + def volume(self): + """Total volume.""" + return np.prod(self.maxes-self.mins) + + def split(self, d, split): + """ + Produce two hyperrectangles by splitting. + + In general, if you need to compute maximum and minimum + distances to the children, it can be done more efficiently + by updating the maximum and minimum distances to the parent. + + Parameters + ---------- + d : int + Axis to split hyperrectangle along. + split : float + Position along axis `d` to split at. + + """ + mid = np.copy(self.maxes) + mid[d] = split + less = Rectangle(self.mins, mid) + mid = np.copy(self.mins) + mid[d] = split + greater = Rectangle(mid, self.maxes) + return less, greater + + def min_distance_point(self, x, p=2.): + """ + Return the minimum distance between input and points in the hyperrectangle. + + Parameters + ---------- + x : array_like + Input. + p : float, optional + Input. + + """ + return minkowski_distance(0, np.maximum(0,np.maximum(self.mins-x,x-self.maxes)),p) + + def max_distance_point(self, x, p=2.): + """ + Return the maximum distance between input and points in the hyperrectangle. + + Parameters + ---------- + x : array_like + Input array. + p : float, optional + Input. + + """ + return minkowski_distance(0, np.maximum(self.maxes-x,x-self.mins),p) + + def min_distance_rectangle(self, other, p=2.): + """ + Compute the minimum distance between points in the two hyperrectangles. + + Parameters + ---------- + other : hyperrectangle + Input. + p : float + Input. + + """ + return minkowski_distance(0, np.maximum(0,np.maximum(self.mins-other.maxes,other.mins-self.maxes)),p) + + def max_distance_rectangle(self, other, p=2.): + """ + Compute the maximum distance between points in the two hyperrectangles. + + Parameters + ---------- + other : hyperrectangle + Input. + p : float, optional + Input. + + """ + return minkowski_distance(0, np.maximum(self.maxes-other.mins,other.maxes-self.mins),p) + + +class KDTree(object): + """ + kd-tree for quick nearest-neighbor lookup + + This class provides an index into a set of k-dimensional points which + can be used to rapidly look up the nearest neighbors of any point. + + Parameters + ---------- + data : (N,K) array_like + The data points to be indexed. This array is not copied, and + so modifying this data will result in bogus results. + leafsize : int, optional + The number of points at which the algorithm switches over to + brute-force. Has to be positive. + + Raises + ------ + RuntimeError + The maximum recursion limit can be exceeded for large data + sets. If this happens, either increase the value for the `leafsize` + parameter or increase the recursion limit by:: + + >>> import sys + >>> sys.setrecursionlimit(10000) + + See Also + -------- + cKDTree : Implementation of `KDTree` in Cython + + Notes + ----- + The algorithm used is described in Maneewongvatana and Mount 1999. + The general idea is that the kd-tree is a binary tree, each of whose + nodes represents an axis-aligned hyperrectangle. Each node specifies + an axis and splits the set of points based on whether their coordinate + along that axis is greater than or less than a particular value. + + During construction, the axis and splitting point are chosen by the + "sliding midpoint" rule, which ensures that the cells do not all + become long and thin. + + The tree can be queried for the r closest neighbors of any given point + (optionally returning only those within some maximum distance of the + point). It can also be queried, with a substantial gain in efficiency, + for the r approximate closest neighbors. + + For large dimensions (20 is already large) do not expect this to run + significantly faster than brute force. High-dimensional nearest-neighbor + queries are a substantial open problem in computer science. + + The tree also supports all-neighbors queries, both with arrays of points + and with other kd-trees. These do use a reasonably efficient algorithm, + but the kd-tree is not necessarily the best data structure for this + sort of calculation. + + """ + def __init__(self, data, leafsize=10): + self.data = np.asarray(data) + self.n, self.m = np.shape(self.data) + self.leafsize = int(leafsize) + if self.leafsize < 1: + raise ValueError("leafsize must be at least 1") + self.maxes = np.amax(self.data,axis=0) + self.mins = np.amin(self.data,axis=0) + + self.tree = self.__build(np.arange(self.n), self.maxes, self.mins) + + class node(object): + if sys.version_info[0] >= 3: + def __lt__(self, other): + return id(self) < id(other) + + def __gt__(self, other): + return id(self) > id(other) + + def __le__(self, other): + return id(self) <= id(other) + + def __ge__(self, other): + return id(self) >= id(other) + + def __eq__(self, other): + return id(self) == id(other) + + class leafnode(node): + def __init__(self, idx): + self.idx = idx + self.children = len(idx) + + class innernode(node): + def __init__(self, split_dim, split, less, greater): + self.split_dim = split_dim + self.split = split + self.less = less + self.greater = greater + self.children = less.children+greater.children + + def __build(self, idx, maxes, mins): + if len(idx) <= self.leafsize: + return KDTree.leafnode(idx) + else: + data = self.data[idx] + # maxes = np.amax(data,axis=0) + # mins = np.amin(data,axis=0) + d = np.argmax(maxes-mins) + maxval = maxes[d] + minval = mins[d] + if maxval == minval: + # all points are identical; warn user? + return KDTree.leafnode(idx) + data = data[:,d] + + # sliding midpoint rule; see Maneewongvatana and Mount 1999 + # for arguments that this is a good idea. + split = (maxval+minval)/2 + less_idx = np.nonzero(data <= split)[0] + greater_idx = np.nonzero(data > split)[0] + if len(less_idx) == 0: + split = np.amin(data) + less_idx = np.nonzero(data <= split)[0] + greater_idx = np.nonzero(data > split)[0] + if len(greater_idx) == 0: + split = np.amax(data) + less_idx = np.nonzero(data < split)[0] + greater_idx = np.nonzero(data >= split)[0] + if len(less_idx) == 0: + # _still_ zero? all must have the same value + if not np.all(data == data[0]): + raise ValueError("Troublesome data array: %s" % data) + split = data[0] + less_idx = np.arange(len(data)-1) + greater_idx = np.array([len(data)-1]) + + lessmaxes = np.copy(maxes) + lessmaxes[d] = split + greatermins = np.copy(mins) + greatermins[d] = split + return KDTree.innernode(d, split, + self.__build(idx[less_idx],lessmaxes,mins), + self.__build(idx[greater_idx],maxes,greatermins)) + + def __query(self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf): + + side_distances = np.maximum(0,np.maximum(x-self.maxes,self.mins-x)) + if p != np.inf: + side_distances **= p + min_distance = np.sum(side_distances) + else: + min_distance = np.amax(side_distances) + + # priority queue for chasing nodes + # entries are: + # minimum distance between the cell and the target + # distances between the nearest side of the cell and the target + # the head node of the cell + q = [(min_distance, + tuple(side_distances), + self.tree)] + # priority queue for the nearest neighbors + # furthest known neighbor first + # entries are (-distance**p, i) + neighbors = [] + + if eps == 0: + epsfac = 1 + elif p == np.inf: + epsfac = 1/(1+eps) + else: + epsfac = 1/(1+eps)**p + + if p != np.inf and distance_upper_bound != np.inf: + distance_upper_bound = distance_upper_bound**p + + while q: + min_distance, side_distances, node = heappop(q) + if isinstance(node, KDTree.leafnode): + # brute-force + data = self.data[node.idx] + ds = minkowski_distance_p(data,x[np.newaxis,:],p) + for i in range(len(ds)): + if ds[i] < distance_upper_bound: + if len(neighbors) == k: + heappop(neighbors) + heappush(neighbors, (-ds[i], node.idx[i])) + if len(neighbors) == k: + distance_upper_bound = -neighbors[0][0] + else: + # we don't push cells that are too far onto the queue at all, + # but since the distance_upper_bound decreases, we might get + # here even if the cell's too far + if min_distance > distance_upper_bound*epsfac: + # since this is the nearest cell, we're done, bail out + break + # compute minimum distances to the children and push them on + if x[node.split_dim] < node.split: + near, far = node.less, node.greater + else: + near, far = node.greater, node.less + + # near child is at the same distance as the current node + heappush(q,(min_distance, side_distances, near)) + + # far child is further by an amount depending only + # on the split value + sd = list(side_distances) + if p == np.inf: + min_distance = max(min_distance, abs(node.split-x[node.split_dim])) + elif p == 1: + sd[node.split_dim] = np.abs(node.split-x[node.split_dim]) + min_distance = min_distance - side_distances[node.split_dim] + sd[node.split_dim] + else: + sd[node.split_dim] = np.abs(node.split-x[node.split_dim])**p + min_distance = min_distance - side_distances[node.split_dim] + sd[node.split_dim] + + # far child might be too far, if so, don't bother pushing it + if min_distance <= distance_upper_bound*epsfac: + heappush(q,(min_distance, tuple(sd), far)) + + if p == np.inf: + return sorted([(-d,i) for (d,i) in neighbors]) + else: + return sorted([((-d)**(1./p),i) for (d,i) in neighbors]) + + def query(self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf): + """ + Query the kd-tree for nearest neighbors + + Parameters + ---------- + x : array_like, last dimension self.m + An array of points to query. + k : int, optional + The number of nearest neighbors to return. + eps : nonnegative float, optional + Return approximate nearest neighbors; the kth returned value + is guaranteed to be no further than (1+eps) times the + distance to the real kth nearest neighbor. + p : float, 1<=p<=infinity, optional + Which Minkowski p-norm to use. + 1 is the sum-of-absolute-values "Manhattan" distance + 2 is the usual Euclidean distance + infinity is the maximum-coordinate-difference distance + distance_upper_bound : nonnegative float, optional + Return only neighbors within this distance. This is used to prune + tree searches, so if you are doing a series of nearest-neighbor + queries, it may help to supply the distance to the nearest neighbor + of the most recent point. + + Returns + ------- + d : float or array of floats + The distances to the nearest neighbors. + If x has shape tuple+(self.m,), then d has shape tuple if + k is one, or tuple+(k,) if k is larger than one. Missing + neighbors (e.g. when k > n or distance_upper_bound is + given) are indicated with infinite distances. If k is None, + then d is an object array of shape tuple, containing lists + of distances. In either case the hits are sorted by distance + (nearest first). + i : integer or array of integers + The locations of the neighbors in self.data. i is the same + shape as d. + + Examples + -------- + >>> from scipy import spatial + >>> x, y = np.mgrid[0:5, 2:8] + >>> tree = spatial.KDTree(list(zip(x.ravel(), y.ravel()))) + >>> tree.data + array([[0, 2], + [0, 3], + [0, 4], + [0, 5], + [0, 6], + [0, 7], + [1, 2], + [1, 3], + [1, 4], + [1, 5], + [1, 6], + [1, 7], + [2, 2], + [2, 3], + [2, 4], + [2, 5], + [2, 6], + [2, 7], + [3, 2], + [3, 3], + [3, 4], + [3, 5], + [3, 6], + [3, 7], + [4, 2], + [4, 3], + [4, 4], + [4, 5], + [4, 6], + [4, 7]]) + >>> pts = np.array([[0, 0], [2.1, 2.9]]) + >>> tree.query(pts) + (array([ 2. , 0.14142136]), array([ 0, 13])) + >>> tree.query(pts[0]) + (2.0, 0) + + """ + x = np.asarray(x) + if np.shape(x)[-1] != self.m: + raise ValueError("x must consist of vectors of length %d but has shape %s" % (self.m, np.shape(x))) + if p < 1: + raise ValueError("Only p-norms with 1<=p<=infinity permitted") + retshape = np.shape(x)[:-1] + if retshape != (): + if k is None: + dd = np.empty(retshape,dtype=object) + ii = np.empty(retshape,dtype=object) + elif k > 1: + dd = np.empty(retshape+(k,),dtype=float) + dd.fill(np.inf) + ii = np.empty(retshape+(k,),dtype=int) + ii.fill(self.n) + elif k == 1: + dd = np.empty(retshape,dtype=float) + dd.fill(np.inf) + ii = np.empty(retshape,dtype=int) + ii.fill(self.n) + else: + raise ValueError("Requested %s nearest neighbors; acceptable numbers are integers greater than or equal to one, or None") + for c in np.ndindex(retshape): + hits = self.__query(x[c], k=k, eps=eps, p=p, distance_upper_bound=distance_upper_bound) + if k is None: + dd[c] = [d for (d,i) in hits] + ii[c] = [i for (d,i) in hits] + elif k > 1: + for j in range(len(hits)): + dd[c+(j,)], ii[c+(j,)] = hits[j] + elif k == 1: + if len(hits) > 0: + dd[c], ii[c] = hits[0] + else: + dd[c] = np.inf + ii[c] = self.n + return dd, ii + else: + hits = self.__query(x, k=k, eps=eps, p=p, distance_upper_bound=distance_upper_bound) + if k is None: + return [d for (d,i) in hits], [i for (d,i) in hits] + elif k == 1: + if len(hits) > 0: + return hits[0] + else: + return np.inf, self.n + elif k > 1: + dd = np.empty(k,dtype=float) + dd.fill(np.inf) + ii = np.empty(k,dtype=int) + ii.fill(self.n) + for j in range(len(hits)): + dd[j], ii[j] = hits[j] + return dd, ii + else: + raise ValueError("Requested %s nearest neighbors; acceptable numbers are integers greater than or equal to one, or None") + + def __query_ball_point(self, x, r, p=2., eps=0): + R = Rectangle(self.maxes, self.mins) + + def traverse_checking(node, rect): + if rect.min_distance_point(x, p) > r / (1. + eps): + return [] + elif rect.max_distance_point(x, p) < r * (1. + eps): + return traverse_no_checking(node) + elif isinstance(node, KDTree.leafnode): + d = self.data[node.idx] + return node.idx[minkowski_distance(d, x, p) <= r].tolist() + else: + less, greater = rect.split(node.split_dim, node.split) + return traverse_checking(node.less, less) + \ + traverse_checking(node.greater, greater) + + def traverse_no_checking(node): + if isinstance(node, KDTree.leafnode): + return node.idx.tolist() + else: + return traverse_no_checking(node.less) + \ + traverse_no_checking(node.greater) + + return traverse_checking(self.tree, R) + + def query_ball_point(self, x, r, p=2., eps=0): + """Find all points within distance r of point(s) x. + + Parameters + ---------- + x : array_like, shape tuple + (self.m,) + The point or points to search for neighbors of. + r : positive float + The radius of points to return. + p : float, optional + Which Minkowski p-norm to use. Should be in the range [1, inf]. + eps : nonnegative float, optional + Approximate search. Branches of the tree are not explored if their + nearest points are further than ``r / (1 + eps)``, and branches are + added in bulk if their furthest points are nearer than + ``r * (1 + eps)``. + + Returns + ------- + results : list or array of lists + If `x` is a single point, returns a list of the indices of the + neighbors of `x`. If `x` is an array of points, returns an object + array of shape tuple containing lists of neighbors. + + Notes + ----- + If you have many points whose neighbors you want to find, you may save + substantial amounts of time by putting them in a KDTree and using + query_ball_tree. + + Examples + -------- + >>> from scipy import spatial + >>> x, y = np.mgrid[0:5, 0:5] + >>> points = np.c_[x.ravel(), y.ravel()] + >>> tree = spatial.KDTree(points) + >>> tree.query_ball_point([2, 0], 1) + [5, 10, 11, 15] + + Query multiple points and plot the results: + + >>> import matplotlib.pyplot as plt + >>> points = np.asarray(points) + >>> plt.plot(points[:,0], points[:,1], '.') + >>> for results in tree.query_ball_point(([2, 0], [3, 3]), 1): + ... nearby_points = points[results] + ... plt.plot(nearby_points[:,0], nearby_points[:,1], 'o') + >>> plt.margins(0.1, 0.1) + >>> plt.show() + + """ + x = np.asarray(x) + if x.shape[-1] != self.m: + raise ValueError("Searching for a %d-dimensional point in a " + "%d-dimensional KDTree" % (x.shape[-1], self.m)) + if len(x.shape) == 1: + return self.__query_ball_point(x, r, p, eps) + else: + retshape = x.shape[:-1] + result = np.empty(retshape, dtype=object) + for c in np.ndindex(retshape): + result[c] = self.__query_ball_point(x[c], r, p=p, eps=eps) + return result + + def query_ball_tree(self, other, r, p=2., eps=0): + """Find all pairs of points whose distance is at most r + + Parameters + ---------- + other : KDTree instance + The tree containing points to search against. + r : float + The maximum distance, has to be positive. + p : float, optional + Which Minkowski norm to use. `p` has to meet the condition + ``1 <= p <= infinity``. + eps : float, optional + Approximate search. Branches of the tree are not explored + if their nearest points are further than ``r/(1+eps)``, and + branches are added in bulk if their furthest points are nearer + than ``r * (1+eps)``. `eps` has to be non-negative. + + Returns + ------- + results : list of lists + For each element ``self.data[i]`` of this tree, ``results[i]`` is a + list of the indices of its neighbors in ``other.data``. + + """ + results = [[] for i in range(self.n)] + + def traverse_checking(node1, rect1, node2, rect2): + if rect1.min_distance_rectangle(rect2, p) > r/(1.+eps): + return + elif rect1.max_distance_rectangle(rect2, p) < r*(1.+eps): + traverse_no_checking(node1, node2) + elif isinstance(node1, KDTree.leafnode): + if isinstance(node2, KDTree.leafnode): + d = other.data[node2.idx] + for i in node1.idx: + results[i] += node2.idx[minkowski_distance(d,self.data[i],p) <= r].tolist() + else: + less, greater = rect2.split(node2.split_dim, node2.split) + traverse_checking(node1,rect1,node2.less,less) + traverse_checking(node1,rect1,node2.greater,greater) + elif isinstance(node2, KDTree.leafnode): + less, greater = rect1.split(node1.split_dim, node1.split) + traverse_checking(node1.less,less,node2,rect2) + traverse_checking(node1.greater,greater,node2,rect2) + else: + less1, greater1 = rect1.split(node1.split_dim, node1.split) + less2, greater2 = rect2.split(node2.split_dim, node2.split) + traverse_checking(node1.less,less1,node2.less,less2) + traverse_checking(node1.less,less1,node2.greater,greater2) + traverse_checking(node1.greater,greater1,node2.less,less2) + traverse_checking(node1.greater,greater1,node2.greater,greater2) + + def traverse_no_checking(node1, node2): + if isinstance(node1, KDTree.leafnode): + if isinstance(node2, KDTree.leafnode): + for i in node1.idx: + results[i] += node2.idx.tolist() + else: + traverse_no_checking(node1, node2.less) + traverse_no_checking(node1, node2.greater) + else: + traverse_no_checking(node1.less, node2) + traverse_no_checking(node1.greater, node2) + + traverse_checking(self.tree, Rectangle(self.maxes, self.mins), + other.tree, Rectangle(other.maxes, other.mins)) + return results + + def query_pairs(self, r, p=2., eps=0): + """ + Find all pairs of points within a distance. + + Parameters + ---------- + r : positive float + The maximum distance. + p : float, optional + Which Minkowski norm to use. `p` has to meet the condition + ``1 <= p <= infinity``. + eps : float, optional + Approximate search. Branches of the tree are not explored + if their nearest points are further than ``r/(1+eps)``, and + branches are added in bulk if their furthest points are nearer + than ``r * (1+eps)``. `eps` has to be non-negative. + + Returns + ------- + results : set + Set of pairs ``(i,j)``, with ``i < j``, for which the corresponding + positions are close. + + """ + results = set() + + def traverse_checking(node1, rect1, node2, rect2): + if rect1.min_distance_rectangle(rect2, p) > r/(1.+eps): + return + elif rect1.max_distance_rectangle(rect2, p) < r*(1.+eps): + traverse_no_checking(node1, node2) + elif isinstance(node1, KDTree.leafnode): + if isinstance(node2, KDTree.leafnode): + # Special care to avoid duplicate pairs + if id(node1) == id(node2): + d = self.data[node2.idx] + for i in node1.idx: + for j in node2.idx[minkowski_distance(d,self.data[i],p) <= r]: + if i < j: + results.add((i,j)) + else: + d = self.data[node2.idx] + for i in node1.idx: + for j in node2.idx[minkowski_distance(d,self.data[i],p) <= r]: + if i < j: + results.add((i,j)) + elif j < i: + results.add((j,i)) + else: + less, greater = rect2.split(node2.split_dim, node2.split) + traverse_checking(node1,rect1,node2.less,less) + traverse_checking(node1,rect1,node2.greater,greater) + elif isinstance(node2, KDTree.leafnode): + less, greater = rect1.split(node1.split_dim, node1.split) + traverse_checking(node1.less,less,node2,rect2) + traverse_checking(node1.greater,greater,node2,rect2) + else: + less1, greater1 = rect1.split(node1.split_dim, node1.split) + less2, greater2 = rect2.split(node2.split_dim, node2.split) + traverse_checking(node1.less,less1,node2.less,less2) + traverse_checking(node1.less,less1,node2.greater,greater2) + + # Avoid traversing (node1.less, node2.greater) and + # (node1.greater, node2.less) (it's the same node pair twice + # over, which is the source of the complication in the + # original KDTree.query_pairs) + if id(node1) != id(node2): + traverse_checking(node1.greater,greater1,node2.less,less2) + + traverse_checking(node1.greater,greater1,node2.greater,greater2) + + def traverse_no_checking(node1, node2): + if isinstance(node1, KDTree.leafnode): + if isinstance(node2, KDTree.leafnode): + # Special care to avoid duplicate pairs + if id(node1) == id(node2): + for i in node1.idx: + for j in node2.idx: + if i < j: + results.add((i,j)) + else: + for i in node1.idx: + for j in node2.idx: + if i < j: + results.add((i,j)) + elif j < i: + results.add((j,i)) + else: + traverse_no_checking(node1, node2.less) + traverse_no_checking(node1, node2.greater) + else: + # Avoid traversing (node1.less, node2.greater) and + # (node1.greater, node2.less) (it's the same node pair twice + # over, which is the source of the complication in the + # original KDTree.query_pairs) + if id(node1) == id(node2): + traverse_no_checking(node1.less, node2.less) + traverse_no_checking(node1.less, node2.greater) + traverse_no_checking(node1.greater, node2.greater) + else: + traverse_no_checking(node1.less, node2) + traverse_no_checking(node1.greater, node2) + + traverse_checking(self.tree, Rectangle(self.maxes, self.mins), + self.tree, Rectangle(self.maxes, self.mins)) + return results + + def count_neighbors(self, other, r, p=2.): + """ + Count how many nearby pairs can be formed. + + Count the number of pairs (x1,x2) can be formed, with x1 drawn + from self and x2 drawn from `other`, and where + ``distance(x1, x2, p) <= r``. + This is the "two-point correlation" described in Gray and Moore 2000, + "N-body problems in statistical learning", and the code here is based + on their algorithm. + + Parameters + ---------- + other : KDTree instance + The other tree to draw points from. + r : float or one-dimensional array of floats + The radius to produce a count for. Multiple radii are searched with + a single tree traversal. + p : float, 1<=p<=infinity, optional + Which Minkowski p-norm to use + + Returns + ------- + result : int or 1-D array of ints + The number of pairs. Note that this is internally stored in a numpy + int, and so may overflow if very large (2e9). + + """ + def traverse(node1, rect1, node2, rect2, idx): + min_r = rect1.min_distance_rectangle(rect2,p) + max_r = rect1.max_distance_rectangle(rect2,p) + c_greater = r[idx] > max_r + result[idx[c_greater]] += node1.children*node2.children + idx = idx[(min_r <= r[idx]) & (r[idx] <= max_r)] + if len(idx) == 0: + return + + if isinstance(node1,KDTree.leafnode): + if isinstance(node2,KDTree.leafnode): + ds = minkowski_distance(self.data[node1.idx][:,np.newaxis,:], + other.data[node2.idx][np.newaxis,:,:], + p).ravel() + ds.sort() + result[idx] += np.searchsorted(ds,r[idx],side='right') + else: + less, greater = rect2.split(node2.split_dim, node2.split) + traverse(node1, rect1, node2.less, less, idx) + traverse(node1, rect1, node2.greater, greater, idx) + else: + if isinstance(node2,KDTree.leafnode): + less, greater = rect1.split(node1.split_dim, node1.split) + traverse(node1.less, less, node2, rect2, idx) + traverse(node1.greater, greater, node2, rect2, idx) + else: + less1, greater1 = rect1.split(node1.split_dim, node1.split) + less2, greater2 = rect2.split(node2.split_dim, node2.split) + traverse(node1.less,less1,node2.less,less2,idx) + traverse(node1.less,less1,node2.greater,greater2,idx) + traverse(node1.greater,greater1,node2.less,less2,idx) + traverse(node1.greater,greater1,node2.greater,greater2,idx) + + R1 = Rectangle(self.maxes, self.mins) + R2 = Rectangle(other.maxes, other.mins) + if np.shape(r) == (): + r = np.array([r]) + result = np.zeros(1,dtype=int) + traverse(self.tree, R1, other.tree, R2, np.arange(1)) + return result[0] + elif len(np.shape(r)) == 1: + r = np.asarray(r) + n, = r.shape + result = np.zeros(n,dtype=int) + traverse(self.tree, R1, other.tree, R2, np.arange(n)) + return result + else: + raise ValueError("r must be either a single value or a one-dimensional array of values") + + def sparse_distance_matrix(self, other, max_distance, p=2.): + """ + Compute a sparse distance matrix + + Computes a distance matrix between two KDTrees, leaving as zero + any distance greater than max_distance. + + Parameters + ---------- + other : KDTree + + max_distance : positive float + + p : float, optional + + Returns + ------- + result : dok_matrix + Sparse matrix representing the results in "dictionary of keys" format. + + """ + result = scipy.sparse.dok_matrix((self.n,other.n)) + + def traverse(node1, rect1, node2, rect2): + if rect1.min_distance_rectangle(rect2, p) > max_distance: + return + elif isinstance(node1, KDTree.leafnode): + if isinstance(node2, KDTree.leafnode): + for i in node1.idx: + for j in node2.idx: + d = minkowski_distance(self.data[i],other.data[j],p) + if d <= max_distance: + result[i,j] = d + else: + less, greater = rect2.split(node2.split_dim, node2.split) + traverse(node1,rect1,node2.less,less) + traverse(node1,rect1,node2.greater,greater) + elif isinstance(node2, KDTree.leafnode): + less, greater = rect1.split(node1.split_dim, node1.split) + traverse(node1.less,less,node2,rect2) + traverse(node1.greater,greater,node2,rect2) + else: + less1, greater1 = rect1.split(node1.split_dim, node1.split) + less2, greater2 = rect2.split(node2.split_dim, node2.split) + traverse(node1.less,less1,node2.less,less2) + traverse(node1.less,less1,node2.greater,greater2) + traverse(node1.greater,greater1,node2.less,less2) + traverse(node1.greater,greater1,node2.greater,greater2) + traverse(self.tree, Rectangle(self.maxes, self.mins), + other.tree, Rectangle(other.maxes, other.mins)) + + return result + + +def distance_matrix(x, y, p=2, threshold=1000000): + """ + Compute the distance matrix. + + Returns the matrix of all pair-wise distances. + + Parameters + ---------- + x : (M, K) array_like + Matrix of M vectors in K dimensions. + y : (N, K) array_like + Matrix of N vectors in K dimensions. + p : float, 1 <= p <= infinity + Which Minkowski p-norm to use. + threshold : positive int + If ``M * N * K`` > `threshold`, algorithm uses a Python loop instead + of large temporary arrays. + + Returns + ------- + result : (M, N) ndarray + Matrix containing the distance from every vector in `x` to every vector + in `y`. + + Examples + -------- + >>> from scipy.spatial import distance_matrix + >>> distance_matrix([[0,0],[0,1]], [[1,0],[1,1]]) + array([[ 1. , 1.41421356], + [ 1.41421356, 1. ]]) + + """ + + x = np.asarray(x) + m, k = x.shape + y = np.asarray(y) + n, kk = y.shape + + if k != kk: + raise ValueError("x contains %d-dimensional vectors but y contains %d-dimensional vectors" % (k, kk)) + + if m*n*k <= threshold: + return minkowski_distance(x[:,np.newaxis,:],y[np.newaxis,:,:],p) + else: + result = np.empty((m,n),dtype=float) # FIXME: figure out the best dtype + if m < n: + for i in range(m): + result[i,:] = minkowski_distance(x[i],y,p) + else: + for j in range(n): + result[:,j] = minkowski_distance(x,y[j],p) + return result diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/kdtree.pyc b/project/venv/lib/python2.7/site-packages/scipy/spatial/kdtree.pyc new file mode 100644 index 0000000..b4d7324 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/spatial/kdtree.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/qhull.so b/project/venv/lib/python2.7/site-packages/scipy/spatial/qhull.so new file mode 100755 index 0000000..a6cf0a3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/spatial/qhull.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/qhull_src/COPYING.txt b/project/venv/lib/python2.7/site-packages/scipy/spatial/qhull_src/COPYING.txt new file mode 100644 index 0000000..2895ec6 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/qhull_src/COPYING.txt @@ -0,0 +1,38 @@ + Qhull, Copyright (c) 1993-2015 + + C.B. Barber + Arlington, MA + + and + + The National Science and Technology Research Center for + Computation and Visualization of Geometric Structures + (The Geometry Center) + University of Minnesota + + email: qhull@qhull.org + +This software includes Qhull from C.B. Barber and The Geometry Center. +Qhull is copyrighted as noted above. Qhull is free software and may +be obtained via http from www.qhull.org. It may be freely copied, modified, +and redistributed under the following conditions: + +1. All copyright notices must remain intact in all files. + +2. A copy of this text file must be distributed along with any copies + of Qhull that you redistribute; this includes copies that you have + modified, or copies of programs or other software products that + include Qhull. + +3. If you modify Qhull, you must include a notice giving the + name of the person performing the modification, the date of + modification, and the reason for such modification. + +4. When distributing modified versions of Qhull, or other software + products that include Qhull, you must provide notice that the original + source code may be obtained as noted above. + +5. There is no warranty or other guarantee of fitness for Qhull, it is + provided solely "as is". Bug reports or fixes may be sent to + qhull_bug@qhull.org; the authors may or may not act on them as + they desire. diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/setup.py b/project/venv/lib/python2.7/site-packages/scipy/spatial/setup.py new file mode 100644 index 0000000..9736a54 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/setup.py @@ -0,0 +1,88 @@ +from __future__ import division, print_function, absolute_import + +from os.path import join, dirname +import glob + + +def configuration(parent_package='', top_path=None): + from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs + from numpy.distutils.misc_util import get_info as get_misc_info + from scipy._build_utils.system_info import get_info as get_sys_info + from distutils.sysconfig import get_python_inc + + config = Configuration('spatial', parent_package, top_path) + + config.add_data_dir('tests') + + # spatial.transform + config.add_subpackage('transform') + + # qhull + qhull_src = sorted(glob.glob(join(dirname(__file__), 'qhull_src', + 'src', '*.c'))) + + inc_dirs = [get_python_inc()] + if inc_dirs[0] != get_python_inc(plat_specific=1): + inc_dirs.append(get_python_inc(plat_specific=1)) + inc_dirs.append(get_numpy_include_dirs()) + inc_dirs.append(join(dirname(dirname(__file__)), '_lib')) + + cfg = dict(get_sys_info('lapack_opt')) + cfg.setdefault('include_dirs', []).extend(inc_dirs) + config.add_extension('qhull', + sources=['qhull.c'] + qhull_src, + **cfg) + + # cKDTree + ckdtree_src = ['query.cxx', + 'build.cxx', + 'globals.cxx', + 'cpp_exc.cxx', + 'query_pairs.cxx', + 'count_neighbors.cxx', + 'query_ball_point.cxx', + 'query_ball_tree.cxx', + 'sparse_distances.cxx'] + + ckdtree_src = [join('ckdtree', 'src', x) for x in ckdtree_src] + + ckdtree_headers = ['ckdtree_decl.h', + 'ckdtree_methods.h', + 'coo_entries.h', + 'cpp_exc.h', + 'cpp_utils.h', + 'distance_base.h', + 'distance.h', + 'ordered_pair.h', + 'partial_sort.h', + 'rectangle.h'] + + ckdtree_headers = [join('ckdtree', 'src', x) for x in ckdtree_headers] + + ckdtree_dep = ['ckdtree.cxx'] + ckdtree_headers + ckdtree_src + config.add_extension('ckdtree', + sources=['ckdtree.cxx'] + ckdtree_src, + depends=ckdtree_dep, + include_dirs=inc_dirs + [join('ckdtree', 'src')]) + # _distance_wrap + config.add_extension('_distance_wrap', + sources=[join('src', 'distance_wrap.c')], + depends=[join('src', 'distance_impl.h')], + include_dirs=[get_numpy_include_dirs()], + extra_info=get_misc_info("npymath")) + + config.add_extension('_voronoi', + sources=['_voronoi.c']) + + config.add_extension('_hausdorff', + sources=['_hausdorff.c']) + + # Add license files + config.add_data_files('qhull_src/COPYING.txt') + + return config + + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(**configuration(top_path='').todict()) diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/setup.pyc b/project/venv/lib/python2.7/site-packages/scipy/spatial/setup.pyc new file mode 100644 index 0000000..4d3ee36 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/spatial/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/__init__.pyc new file mode 100644 index 0000000..319dc0f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/cdist-X1.txt b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/cdist-X1.txt new file mode 100644 index 0000000..833d5bd --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/cdist-X1.txt @@ -0,0 +1,10 @@ +1.147593763490969421e-01 8.926156143344999849e-01 1.437758624645746330e-02 1.803435962879929022e-02 5.533046214065578949e-01 5.554315640747428118e-01 4.497546637814608950e-02 4.438089247948049376e-01 7.984582810220538507e-01 2.752880789161644692e-01 1.344667112315823809e-01 9.230479561452992199e-01 6.040471462941819913e-01 3.797251652770228247e-01 4.316042735592399149e-01 5.312356915348823705e-01 4.348143005129563310e-01 3.111531488508799681e-01 9.531194313908697424e-04 8.212995023500069269e-02 6.689953269869852726e-01 9.914864535288493430e-01 8.037556036341153565e-01 +9.608925123801395074e-01 2.974451233678974127e-01 9.001110330654185088e-01 5.824163330415995654e-01 7.308574928293812834e-01 2.276154562412870952e-01 7.306791076039623745e-01 8.677244866905511333e-01 9.160806456176984192e-01 6.157216959991280714e-01 5.149053524695440531e-01 3.056427344890983999e-01 9.790557366933895223e-01 4.484995861076724877e-01 4.776550391081165747e-01 7.210436977670631187e-01 9.136399501661039979e-01 4.260275733550000776e-02 5.943900041968954717e-01 3.864571606342745991e-01 9.442027665110838131e-01 4.779949058608601309e-02 6.107551944250865228e-01 +3.297286578103622023e-01 5.980207401936733502e-01 3.673301293561567205e-01 2.585830520887681949e-01 4.660558746104259686e-01 6.083795956610364986e-01 4.535206368070313632e-01 6.873989778785424276e-01 5.130152688495458468e-01 7.665877846542720198e-01 3.444402973525138023e-01 3.583658123644906102e-02 7.924818220986856732e-01 8.746685720522412444e-01 3.010105569182431884e-01 6.012239357385538163e-01 6.233737362204671006e-01 4.830438698668915176e-01 2.317286885842551047e-02 7.585989958123050547e-01 7.108257632278830451e-01 1.551024884178199281e-01 2.665485998155288083e-01 +2.456278068903017253e-02 4.148739837711815648e-01 1.986372227934196655e-01 6.920408530298168825e-01 1.003067576685774398e-01 7.421560456480125190e-01 1.808453980608998313e-01 4.251297882537475870e-01 6.773002683522370004e-01 4.084108792570182445e-01 7.462888013191590897e-01 8.069930220529277776e-01 9.211110587681808903e-01 4.141491046181076108e-01 7.486318689260342829e-01 9.515405507589296263e-01 4.634288892577109742e-03 8.027593488166355762e-01 3.010346805217798405e-01 8.663248877242523127e-01 2.479968181181605447e-01 5.619851096054278017e-01 3.903886764590250857e-01 +7.122019976035700584e-01 6.188878051047785878e-01 7.290897087051201320e-01 6.334802157757637442e-01 5.523084734954342156e-01 5.614937129563645213e-01 2.496741051791574462e-01 5.972227939599233926e-01 1.786590597761109622e-01 2.609525984850900038e-01 7.210438943286010538e-01 2.211429064605652250e-01 9.140497572472672250e-02 1.430242193668443962e-01 7.856446942916397447e-01 4.635256358156553125e-01 5.278744289813760426e-01 3.702808015407184072e-01 5.527073830480792038e-01 6.370732917599846168e-01 9.953487928925482953e-01 3.021789770611936765e-01 3.354901923998221402e-02 +6.509638560895427695e-01 8.387598220902757751e-01 7.761375971745763103e-01 1.481627639227802717e-01 3.529474982902305324e-01 4.883093646287851586e-01 9.652923033658690199e-01 9.500680513565308294e-01 3.061885005078281985e-01 7.271902818906019750e-01 2.358962978196710303e-03 7.359889703223099211e-01 8.988893768074724955e-01 4.135279653937307121e-02 8.516441856688283796e-01 4.889597623270667270e-01 5.575909822114655245e-01 9.010853652261575641e-01 2.912844516556202246e-01 9.088759383368658629e-01 8.104351227460024898e-01 8.080695436776826890e-01 1.430530913253185155e-01 +8.048001196608134400e-01 3.066089444418462762e-02 9.021887554292090661e-01 6.154331491807940591e-02 1.378912575206647784e-02 5.775720193142440673e-01 1.219298963069791464e-01 1.883270243412101808e-01 5.569262398688379356e-02 8.964817777510125651e-02 7.977092785346929782e-01 4.878149375226197293e-01 4.511973131518809410e-02 1.858690046801604323e-01 6.947686471083162063e-01 5.884058794291086025e-01 8.638884676612634816e-01 3.855470871341656336e-01 3.495049047300468059e-01 2.767740932353948136e-01 4.731087031714035218e-01 6.679001673437914288e-01 7.502944200696660682e-01 +6.527328264244687261e-01 8.289483383553154505e-01 9.179741348282299818e-01 1.065639864466713105e-01 6.253616929058514184e-01 5.927750325266062381e-01 3.039157425463192563e-01 2.452766763359194302e-01 6.514027700704632107e-01 5.529218485487964463e-01 4.941158239308394151e-01 6.605306467722642516e-01 2.273688037050677346e-01 4.282616592244774534e-01 2.956128257930247250e-01 1.154803628237965896e-01 9.228220410235263849e-01 6.663525307676617659e-01 1.908852615936970087e-01 9.921383408926374159e-01 4.988716450388516188e-01 1.014900352736023414e-01 3.363930180244284474e-01 +2.914369076275757919e-01 5.196673601143533272e-01 7.420144907858341465e-01 1.768984185504740569e-01 5.296766993228564369e-01 5.922023566159900776e-01 5.965161262020234334e-01 3.810272333046110793e-01 8.368797246118340194e-01 7.896422363801189892e-01 9.655797561098209414e-01 4.430034032346981121e-01 2.780869795706976122e-01 3.047310845416009162e-01 8.051138863500326703e-01 6.731468634690835895e-01 4.743383036815584930e-01 9.530709614322225853e-01 7.753587619850917934e-01 2.801137109357491051e-01 6.182543660889736614e-01 5.005218857766725593e-01 9.071447804755052857e-01 +2.075071644012620453e-01 4.834950086973934802e-01 3.037011473860764532e-01 6.476084284887700937e-01 8.107195771564194020e-01 7.869075869075803364e-01 6.851234019375299633e-01 3.544187468104398331e-02 4.847673235908021017e-01 5.690262846164507726e-01 1.663354142616256803e-01 9.692796809752548537e-01 4.133441725866372485e-01 6.729167604487583665e-01 3.998813427407297283e-01 8.272617414104491695e-01 2.129248316324727774e-01 6.517004761357130249e-01 7.363013506605019520e-01 4.072375306356985636e-01 4.463336683526665238e-01 5.485059309728204102e-01 1.981745754527846071e-01 diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/cdist-X2.txt b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/cdist-X2.txt new file mode 100644 index 0000000..fc3ea19 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/cdist-X2.txt @@ -0,0 +1,20 @@ +7.680465556300619667e-02 4.675022344069014180e-01 8.955498989131543963e-01 3.816236071436276411e-01 1.109030077070989329e-01 2.318928815459808668e-02 7.477394240984251983e-01 1.202289789304434864e-01 8.007290497575981769e-01 6.795195698871731027e-01 6.568225762396605605e-01 2.231475263228478445e-01 7.064624077661341151e-02 1.081656666815267176e-02 1.592069359090128033e-01 1.363392203645097389e-01 9.277020735447568667e-01 8.103136564528209407e-01 5.229467676276455812e-02 7.708020259874025504e-01 6.527954747473352359e-02 5.516397414886525796e-01 3.653371861367954443e-01 +8.144399106025798085e-01 7.731852525462976633e-01 6.909477620673205589e-01 9.696063817000286633e-01 4.297887511677249694e-01 6.989600553425188156e-01 7.310201335033380543e-01 3.135256147868910048e-01 5.715578037275241829e-01 3.935000744675094531e-01 2.057715781268398825e-01 5.892508589665171881e-01 8.512951599236765476e-01 9.569808799061578775e-01 6.164885878024699561e-01 4.714185430004367294e-01 6.128831737628155363e-01 6.641799309623502845e-01 6.001985185338730711e-01 4.231922889723856995e-01 7.605249308075449077e-01 1.064530958018087281e-01 6.306470691957204444e-01 +4.265470127256254518e-01 5.933766716280767239e-01 3.698589270536845053e-02 2.173799740537294412e-01 3.032679325475639009e-01 4.271831790058847611e-01 1.828944535901013690e-01 4.772333422710156592e-01 2.564773455194128138e-01 7.120329875362141347e-01 8.952243430110462530e-01 1.808777012183288013e-01 3.612151871458374464e-01 3.960999167923041631e-01 1.821669970670747318e-02 8.835474857189200559e-01 1.353104648821573663e-01 3.457291739160937016e-01 1.126467375304566199e-01 4.107293162402323450e-01 4.051719311053743056e-01 4.007382985250427243e-01 1.286905671428811848e-01 +2.910657003883979632e-01 9.616259180685315933e-03 2.033032441536681834e-01 1.096599110293863255e-01 4.191101704605176836e-01 5.462131536027151624e-01 8.393047907010142694e-01 9.046805198676335369e-01 7.009863472176891541e-01 2.508215985039629059e-01 6.754410796667598138e-01 6.740895474032024826e-01 1.358993708621679675e-01 8.219861775211464439e-01 6.322220445623235596e-01 2.766813559002430090e-01 6.575983861590951607e-01 9.515869708336625044e-01 8.654526462353933081e-01 3.450245117834797037e-01 5.649032890631299209e-01 4.717687914789682191e-01 3.296483580510030098e-01 +9.172477457635394016e-01 3.057396583041891436e-01 7.335332344225760082e-01 8.370236206345178509e-01 3.765464253115927695e-01 5.089680319287778199e-01 1.202325719268168003e-01 9.717771065272349240e-01 5.907820104019682050e-01 9.809211614977710880e-01 9.064285003671219698e-01 8.848841466121748489e-01 2.043407730734815297e-01 9.157600394927275511e-01 4.532260315147775831e-01 4.241077335005828397e-01 1.751730149568804240e-01 4.090412146081819911e-01 3.632197861847064058e-02 5.832539334970230360e-01 4.041848151536805434e-01 3.603643989086504629e-01 1.838411383882069261e-01 +2.508806403290032572e-01 4.381403985282813496e-01 4.694787405018008286e-02 6.353900562024634713e-01 1.200813444244532846e-01 6.072397042913001419e-01 9.937255904754030977e-01 4.916670237677555066e-01 3.473845913923001572e-01 3.526875922864345370e-01 5.448595548197197047e-01 2.245096010156972799e-01 9.003258279804994269e-01 3.534560469735994470e-01 2.989266066346342177e-01 4.621024982808636938e-01 9.626538866576676012e-01 9.791401720716153001e-01 7.138514287330390840e-01 9.832862333928654719e-01 3.233999591031431198e-01 5.406467224926423398e-01 9.581890295057201579e-01 +5.210583601680578436e-01 4.598159993059653949e-01 2.111497132057748027e-01 5.949977700916546652e-01 6.342618461422359077e-01 9.888228769705599275e-01 6.096770711536318998e-01 7.548431368960863974e-01 7.490858664860100546e-01 3.186213496546415058e-01 7.895687083231245351e-01 4.178326793268141159e-01 8.095818334534051752e-01 7.886271673523481684e-01 4.038905626506847923e-01 3.652649247094948981e-01 8.267205959224892542e-01 6.433617243328785262e-01 3.117681563249452559e-01 9.675995575054980868e-01 3.675673836358472890e-01 5.863757289184046151e-01 9.099029857959717305e-02 +4.024573981231733821e-01 3.578997554002771864e-01 3.519299868071553705e-01 7.417747693762357653e-01 2.963713903285800644e-01 9.602967989298948348e-01 3.811392331739601458e-01 5.493237898295448840e-01 6.835113342793640578e-01 2.304506220807415184e-01 3.727299857731285471e-01 5.450263991912108752e-01 6.951521210987908761e-01 6.474582745861203747e-01 6.316089475403589004e-01 5.672043967425510758e-02 9.034937506977609445e-01 2.332567550780038079e-01 1.096955741449157085e-02 8.870663813493575578e-01 4.384385452180562526e-01 7.100898998169548060e-01 3.245358176196319056e-01 +9.162009194452818139e-01 5.572224742426723498e-02 3.445910686865658601e-01 9.683564008127462097e-01 9.375063149031520604e-01 9.128188852869822956e-02 9.613605414326487075e-01 5.298598697556915482e-01 6.724799695520149445e-01 1.269103938571825019e-02 1.008406153387807480e-01 8.951105272379104028e-01 1.585460318853607609e-01 6.739986455059543413e-01 5.345419321702655768e-01 6.248843899572337213e-01 3.050288488994817859e-01 1.423645553465189284e-01 1.802121190541096096e-01 9.474646822694763326e-01 2.345716438587298613e-01 9.688281784764296578e-01 1.845165243240991515e-01 +2.548297646910531178e-01 2.580877375379494465e-01 1.355482532666937301e-01 6.478812986505504412e-01 9.971695982152032345e-01 2.606721082477282403e-01 5.483439686378906996e-01 4.409612606704470528e-01 4.396442074915688503e-01 7.414262832597111608e-01 7.308840725375539416e-01 8.072095530497225280e-02 6.829509968656330976e-01 5.700030854230387911e-01 3.801845336730320657e-01 2.481059916867158766e-01 3.977295094395927322e-03 5.749480512407895150e-01 4.112033136603401307e-01 8.676159710377848722e-01 9.062646588480167686e-01 3.326691167317923359e-01 8.498307982774666591e-01 +4.464338109330643345e-01 8.546516760817471914e-01 7.384800352329814466e-01 3.692485164984804502e-02 2.915662689505471583e-02 9.010049994217171898e-01 8.622900253010918892e-01 9.786230638032608065e-01 6.546824077297251909e-01 6.342297560006789903e-01 2.230339826582647955e-01 7.658846744185553446e-01 4.603043831539479491e-01 2.017100469861691225e-01 4.891590639893540482e-01 1.937140918314912419e-01 8.161582138652878626e-01 5.597293607114051106e-02 8.423261093326828153e-02 5.105392204475533990e-02 8.234193902673621057e-01 1.784268309975372002e-01 9.118997881986501408e-02 +8.588746913421980711e-01 1.479641118621310980e-02 1.375875301146138874e-01 7.533888774725254756e-01 5.782592791549248101e-01 9.128573037619659436e-01 1.831275762880391067e-01 3.471382864827737835e-01 4.859524740929310749e-02 8.955146541561730400e-01 4.787220791101074457e-01 4.222803577759057791e-01 8.469923964908064873e-01 6.300290047587608910e-02 1.020873237837905956e-01 3.585612487182909813e-02 6.320107119904569970e-01 5.891245970008752719e-01 1.104698053665007507e-01 4.233226558073774903e-01 4.432217054386708988e-01 2.864765416628194394e-01 2.489777211814803159e-02 +5.343810659756068615e-01 4.829076396403546578e-01 8.364480888953172988e-01 8.931374995414760321e-01 6.034161442354715188e-01 3.578336000768178593e-03 4.100579775972763574e-01 3.968667908067096128e-01 5.897163653686778861e-01 3.003241263928478899e-01 2.520935203143799264e-01 3.112129371563532310e-02 9.052865295974613646e-01 1.172285124002711010e-01 4.840001666149388315e-01 3.424620676348436588e-01 5.526057133826853818e-01 6.346139530261846184e-01 5.747945930485597321e-01 1.389915612177697879e-01 2.413801217666421417e-01 7.829900796662081497e-01 7.213528084845653998e-01 +9.384509283406079483e-01 6.303019601671526750e-01 1.787921522728125323e-01 1.556003868047917127e-02 5.662397078816850948e-01 3.437473614806091371e-01 8.615844972800188462e-01 7.624380237306396246e-01 1.096468347898514883e-01 1.276566836610887323e-01 8.479188493443535757e-01 3.634713454428405432e-01 7.478112314318967613e-01 9.856395696968375253e-01 6.250293654177319080e-02 1.919327272501809567e-01 1.415594476031050153e-01 7.224057351041784925e-01 8.452145259310355208e-01 5.434318833772002755e-01 5.177620959731277228e-02 3.358977598185840518e-01 2.542654881527960375e-01 +4.800909104006243489e-01 3.651345393613150137e-01 3.657093052788148446e-01 8.579662326651369408e-01 5.787694361240260932e-01 6.491966196891312268e-01 3.252508517294879775e-01 8.639694334693422961e-01 3.028097078756678551e-01 6.295814666338699350e-01 7.305627351548695803e-01 6.975931849120264872e-03 8.321205159004851915e-01 2.681809305821257761e-01 3.628869474597150591e-01 9.598981434716586936e-01 5.947913523332928332e-01 7.794864238003402779e-01 2.819511239444029149e-01 5.134200958476284882e-01 7.284684743064278045e-01 3.099571109539331903e-01 1.502222882866774967e-01 +2.463382654375219083e-01 4.465700737264240994e-01 7.180855317941433613e-01 5.056099420785193921e-01 6.182117344332578313e-01 2.370453793561340117e-01 9.831748018047525850e-01 6.397098184531551102e-01 8.260469782208745837e-02 7.474671691560941245e-01 9.963429983418570224e-02 5.450078811081275898e-01 5.370188678062637333e-02 2.774024442708808991e-01 2.082643088545442778e-01 2.704155352788065736e-01 7.225035580445194894e-01 4.866791976239246420e-01 1.357043111201584606e-01 7.911335827987711067e-01 7.278977102006007893e-01 6.880892094410231419e-01 1.029231496520791600e-01 +6.901796117735281566e-01 1.558248977395644275e-01 4.241818789360329855e-01 5.055658246392458199e-01 1.756288758075611467e-01 4.215083703818177652e-01 7.809231602323289945e-01 1.170053878686481141e-01 6.497026323614403243e-01 5.733120641440232479e-01 4.407703406152092551e-01 5.608677124532297498e-01 7.471045703286000039e-01 3.334604336022076732e-01 8.927208811415126011e-01 9.794565286182396191e-01 9.621542824973521313e-01 3.945825239405253981e-01 8.338963875792834157e-01 9.310552325082104286e-01 7.688283033784242271e-01 3.798823731047119567e-01 1.459993613028365278e-02 +7.848623555505630511e-01 2.681039365355797344e-03 7.833208051794043891e-01 8.184381915171493604e-01 4.682581645582317709e-01 2.391069309436419932e-01 1.765377537168698607e-01 9.863494676539893424e-01 4.378412300863872009e-01 7.494505491149090481e-01 1.942180356195394308e-01 9.981402467222395547e-01 7.992190944052800505e-01 1.350875702852057936e-01 4.950149186748543650e-01 7.243422481248201761e-01 3.544596746353472216e-01 8.320192561472177228e-01 9.776840296475269865e-01 7.733852731914863110e-01 2.305732998099923048e-01 9.746878189802981041e-01 7.747723331200035979e-01 +6.521099013127149568e-01 5.452399443648201505e-01 8.146707517183656710e-01 3.827256063695345656e-01 7.954832091744263867e-01 7.834427643148527132e-01 9.661317930643520402e-02 9.215673965718058636e-01 4.914305728788055383e-01 4.105628408027649501e-01 9.844647830893304974e-02 3.974831165301851987e-01 3.857608898053827007e-01 5.520210781401946321e-01 3.445787541654143915e-03 4.552922057017416702e-01 7.456544561760444223e-01 4.753985092154335845e-01 2.821385239833401615e-01 7.560136035104459973e-01 8.453142510471420845e-01 6.679627143276523071e-01 6.910882868284401459e-01 +8.526493480446283302e-01 1.183917973068240315e-01 6.163988861865119517e-01 5.751899460059114455e-01 1.638797964925038375e-01 8.214597298784013235e-01 5.424670654187370156e-01 1.806631819658732763e-01 9.268107278221827672e-01 4.127397378597359445e-01 7.529877485901653733e-01 1.714251090083847018e-01 2.601487784245806179e-01 2.028326156742237263e-01 5.299879450122358948e-01 7.587877062981395193e-01 4.070738595375062996e-01 3.546903049793261875e-01 8.695365138547607176e-01 1.447085661525142619e-01 3.193366245820845606e-01 8.797841086211429795e-01 2.666562188639977071e-01 diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/degenerate_pointset.npz b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/degenerate_pointset.npz new file mode 100644 index 0000000..1d39302 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/degenerate_pointset.npz differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/iris.txt b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/iris.txt new file mode 100644 index 0000000..4d78390 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/iris.txt @@ -0,0 +1,150 @@ +5.099999999999999645e+00 3.500000000000000000e+00 1.399999999999999911e+00 2.000000000000000111e-01 +4.900000000000000355e+00 3.000000000000000000e+00 1.399999999999999911e+00 2.000000000000000111e-01 +4.700000000000000178e+00 3.200000000000000178e+00 1.300000000000000044e+00 2.000000000000000111e-01 +4.599999999999999645e+00 3.100000000000000089e+00 1.500000000000000000e+00 2.000000000000000111e-01 +5.000000000000000000e+00 3.600000000000000089e+00 1.399999999999999911e+00 2.000000000000000111e-01 +5.400000000000000355e+00 3.899999999999999911e+00 1.699999999999999956e+00 4.000000000000000222e-01 +4.599999999999999645e+00 3.399999999999999911e+00 1.399999999999999911e+00 2.999999999999999889e-01 +5.000000000000000000e+00 3.399999999999999911e+00 1.500000000000000000e+00 2.000000000000000111e-01 +4.400000000000000355e+00 2.899999999999999911e+00 1.399999999999999911e+00 2.000000000000000111e-01 +4.900000000000000355e+00 3.100000000000000089e+00 1.500000000000000000e+00 1.000000000000000056e-01 +5.400000000000000355e+00 3.700000000000000178e+00 1.500000000000000000e+00 2.000000000000000111e-01 +4.799999999999999822e+00 3.399999999999999911e+00 1.600000000000000089e+00 2.000000000000000111e-01 +4.799999999999999822e+00 3.000000000000000000e+00 1.399999999999999911e+00 1.000000000000000056e-01 +4.299999999999999822e+00 3.000000000000000000e+00 1.100000000000000089e+00 1.000000000000000056e-01 +5.799999999999999822e+00 4.000000000000000000e+00 1.199999999999999956e+00 2.000000000000000111e-01 +5.700000000000000178e+00 4.400000000000000355e+00 1.500000000000000000e+00 4.000000000000000222e-01 +5.400000000000000355e+00 3.899999999999999911e+00 1.300000000000000044e+00 4.000000000000000222e-01 +5.099999999999999645e+00 3.500000000000000000e+00 1.399999999999999911e+00 2.999999999999999889e-01 +5.700000000000000178e+00 3.799999999999999822e+00 1.699999999999999956e+00 2.999999999999999889e-01 +5.099999999999999645e+00 3.799999999999999822e+00 1.500000000000000000e+00 2.999999999999999889e-01 +5.400000000000000355e+00 3.399999999999999911e+00 1.699999999999999956e+00 2.000000000000000111e-01 +5.099999999999999645e+00 3.700000000000000178e+00 1.500000000000000000e+00 4.000000000000000222e-01 +4.599999999999999645e+00 3.600000000000000089e+00 1.000000000000000000e+00 2.000000000000000111e-01 +5.099999999999999645e+00 3.299999999999999822e+00 1.699999999999999956e+00 5.000000000000000000e-01 +4.799999999999999822e+00 3.399999999999999911e+00 1.899999999999999911e+00 2.000000000000000111e-01 +5.000000000000000000e+00 3.000000000000000000e+00 1.600000000000000089e+00 2.000000000000000111e-01 +5.000000000000000000e+00 3.399999999999999911e+00 1.600000000000000089e+00 4.000000000000000222e-01 +5.200000000000000178e+00 3.500000000000000000e+00 1.500000000000000000e+00 2.000000000000000111e-01 +5.200000000000000178e+00 3.399999999999999911e+00 1.399999999999999911e+00 2.000000000000000111e-01 +4.700000000000000178e+00 3.200000000000000178e+00 1.600000000000000089e+00 2.000000000000000111e-01 +4.799999999999999822e+00 3.100000000000000089e+00 1.600000000000000089e+00 2.000000000000000111e-01 +5.400000000000000355e+00 3.399999999999999911e+00 1.500000000000000000e+00 4.000000000000000222e-01 +5.200000000000000178e+00 4.099999999999999645e+00 1.500000000000000000e+00 1.000000000000000056e-01 +5.500000000000000000e+00 4.200000000000000178e+00 1.399999999999999911e+00 2.000000000000000111e-01 +4.900000000000000355e+00 3.100000000000000089e+00 1.500000000000000000e+00 1.000000000000000056e-01 +5.000000000000000000e+00 3.200000000000000178e+00 1.199999999999999956e+00 2.000000000000000111e-01 +5.500000000000000000e+00 3.500000000000000000e+00 1.300000000000000044e+00 2.000000000000000111e-01 +4.900000000000000355e+00 3.100000000000000089e+00 1.500000000000000000e+00 1.000000000000000056e-01 +4.400000000000000355e+00 3.000000000000000000e+00 1.300000000000000044e+00 2.000000000000000111e-01 +5.099999999999999645e+00 3.399999999999999911e+00 1.500000000000000000e+00 2.000000000000000111e-01 +5.000000000000000000e+00 3.500000000000000000e+00 1.300000000000000044e+00 2.999999999999999889e-01 +4.500000000000000000e+00 2.299999999999999822e+00 1.300000000000000044e+00 2.999999999999999889e-01 +4.400000000000000355e+00 3.200000000000000178e+00 1.300000000000000044e+00 2.000000000000000111e-01 +5.000000000000000000e+00 3.500000000000000000e+00 1.600000000000000089e+00 5.999999999999999778e-01 +5.099999999999999645e+00 3.799999999999999822e+00 1.899999999999999911e+00 4.000000000000000222e-01 +4.799999999999999822e+00 3.000000000000000000e+00 1.399999999999999911e+00 2.999999999999999889e-01 +5.099999999999999645e+00 3.799999999999999822e+00 1.600000000000000089e+00 2.000000000000000111e-01 +4.599999999999999645e+00 3.200000000000000178e+00 1.399999999999999911e+00 2.000000000000000111e-01 +5.299999999999999822e+00 3.700000000000000178e+00 1.500000000000000000e+00 2.000000000000000111e-01 +5.000000000000000000e+00 3.299999999999999822e+00 1.399999999999999911e+00 2.000000000000000111e-01 +7.000000000000000000e+00 3.200000000000000178e+00 4.700000000000000178e+00 1.399999999999999911e+00 +6.400000000000000355e+00 3.200000000000000178e+00 4.500000000000000000e+00 1.500000000000000000e+00 +6.900000000000000355e+00 3.100000000000000089e+00 4.900000000000000355e+00 1.500000000000000000e+00 +5.500000000000000000e+00 2.299999999999999822e+00 4.000000000000000000e+00 1.300000000000000044e+00 +6.500000000000000000e+00 2.799999999999999822e+00 4.599999999999999645e+00 1.500000000000000000e+00 +5.700000000000000178e+00 2.799999999999999822e+00 4.500000000000000000e+00 1.300000000000000044e+00 +6.299999999999999822e+00 3.299999999999999822e+00 4.700000000000000178e+00 1.600000000000000089e+00 +4.900000000000000355e+00 2.399999999999999911e+00 3.299999999999999822e+00 1.000000000000000000e+00 +6.599999999999999645e+00 2.899999999999999911e+00 4.599999999999999645e+00 1.300000000000000044e+00 +5.200000000000000178e+00 2.700000000000000178e+00 3.899999999999999911e+00 1.399999999999999911e+00 +5.000000000000000000e+00 2.000000000000000000e+00 3.500000000000000000e+00 1.000000000000000000e+00 +5.900000000000000355e+00 3.000000000000000000e+00 4.200000000000000178e+00 1.500000000000000000e+00 +6.000000000000000000e+00 2.200000000000000178e+00 4.000000000000000000e+00 1.000000000000000000e+00 +6.099999999999999645e+00 2.899999999999999911e+00 4.700000000000000178e+00 1.399999999999999911e+00 +5.599999999999999645e+00 2.899999999999999911e+00 3.600000000000000089e+00 1.300000000000000044e+00 +6.700000000000000178e+00 3.100000000000000089e+00 4.400000000000000355e+00 1.399999999999999911e+00 +5.599999999999999645e+00 3.000000000000000000e+00 4.500000000000000000e+00 1.500000000000000000e+00 +5.799999999999999822e+00 2.700000000000000178e+00 4.099999999999999645e+00 1.000000000000000000e+00 +6.200000000000000178e+00 2.200000000000000178e+00 4.500000000000000000e+00 1.500000000000000000e+00 +5.599999999999999645e+00 2.500000000000000000e+00 3.899999999999999911e+00 1.100000000000000089e+00 +5.900000000000000355e+00 3.200000000000000178e+00 4.799999999999999822e+00 1.800000000000000044e+00 +6.099999999999999645e+00 2.799999999999999822e+00 4.000000000000000000e+00 1.300000000000000044e+00 +6.299999999999999822e+00 2.500000000000000000e+00 4.900000000000000355e+00 1.500000000000000000e+00 +6.099999999999999645e+00 2.799999999999999822e+00 4.700000000000000178e+00 1.199999999999999956e+00 +6.400000000000000355e+00 2.899999999999999911e+00 4.299999999999999822e+00 1.300000000000000044e+00 +6.599999999999999645e+00 3.000000000000000000e+00 4.400000000000000355e+00 1.399999999999999911e+00 +6.799999999999999822e+00 2.799999999999999822e+00 4.799999999999999822e+00 1.399999999999999911e+00 +6.700000000000000178e+00 3.000000000000000000e+00 5.000000000000000000e+00 1.699999999999999956e+00 +6.000000000000000000e+00 2.899999999999999911e+00 4.500000000000000000e+00 1.500000000000000000e+00 +5.700000000000000178e+00 2.600000000000000089e+00 3.500000000000000000e+00 1.000000000000000000e+00 +5.500000000000000000e+00 2.399999999999999911e+00 3.799999999999999822e+00 1.100000000000000089e+00 +5.500000000000000000e+00 2.399999999999999911e+00 3.700000000000000178e+00 1.000000000000000000e+00 +5.799999999999999822e+00 2.700000000000000178e+00 3.899999999999999911e+00 1.199999999999999956e+00 +6.000000000000000000e+00 2.700000000000000178e+00 5.099999999999999645e+00 1.600000000000000089e+00 +5.400000000000000355e+00 3.000000000000000000e+00 4.500000000000000000e+00 1.500000000000000000e+00 +6.000000000000000000e+00 3.399999999999999911e+00 4.500000000000000000e+00 1.600000000000000089e+00 +6.700000000000000178e+00 3.100000000000000089e+00 4.700000000000000178e+00 1.500000000000000000e+00 +6.299999999999999822e+00 2.299999999999999822e+00 4.400000000000000355e+00 1.300000000000000044e+00 +5.599999999999999645e+00 3.000000000000000000e+00 4.099999999999999645e+00 1.300000000000000044e+00 +5.500000000000000000e+00 2.500000000000000000e+00 4.000000000000000000e+00 1.300000000000000044e+00 +5.500000000000000000e+00 2.600000000000000089e+00 4.400000000000000355e+00 1.199999999999999956e+00 +6.099999999999999645e+00 3.000000000000000000e+00 4.599999999999999645e+00 1.399999999999999911e+00 +5.799999999999999822e+00 2.600000000000000089e+00 4.000000000000000000e+00 1.199999999999999956e+00 +5.000000000000000000e+00 2.299999999999999822e+00 3.299999999999999822e+00 1.000000000000000000e+00 +5.599999999999999645e+00 2.700000000000000178e+00 4.200000000000000178e+00 1.300000000000000044e+00 +5.700000000000000178e+00 3.000000000000000000e+00 4.200000000000000178e+00 1.199999999999999956e+00 +5.700000000000000178e+00 2.899999999999999911e+00 4.200000000000000178e+00 1.300000000000000044e+00 +6.200000000000000178e+00 2.899999999999999911e+00 4.299999999999999822e+00 1.300000000000000044e+00 +5.099999999999999645e+00 2.500000000000000000e+00 3.000000000000000000e+00 1.100000000000000089e+00 +5.700000000000000178e+00 2.799999999999999822e+00 4.099999999999999645e+00 1.300000000000000044e+00 +6.299999999999999822e+00 3.299999999999999822e+00 6.000000000000000000e+00 2.500000000000000000e+00 +5.799999999999999822e+00 2.700000000000000178e+00 5.099999999999999645e+00 1.899999999999999911e+00 +7.099999999999999645e+00 3.000000000000000000e+00 5.900000000000000355e+00 2.100000000000000089e+00 +6.299999999999999822e+00 2.899999999999999911e+00 5.599999999999999645e+00 1.800000000000000044e+00 +6.500000000000000000e+00 3.000000000000000000e+00 5.799999999999999822e+00 2.200000000000000178e+00 +7.599999999999999645e+00 3.000000000000000000e+00 6.599999999999999645e+00 2.100000000000000089e+00 +4.900000000000000355e+00 2.500000000000000000e+00 4.500000000000000000e+00 1.699999999999999956e+00 +7.299999999999999822e+00 2.899999999999999911e+00 6.299999999999999822e+00 1.800000000000000044e+00 +6.700000000000000178e+00 2.500000000000000000e+00 5.799999999999999822e+00 1.800000000000000044e+00 +7.200000000000000178e+00 3.600000000000000089e+00 6.099999999999999645e+00 2.500000000000000000e+00 +6.500000000000000000e+00 3.200000000000000178e+00 5.099999999999999645e+00 2.000000000000000000e+00 +6.400000000000000355e+00 2.700000000000000178e+00 5.299999999999999822e+00 1.899999999999999911e+00 +6.799999999999999822e+00 3.000000000000000000e+00 5.500000000000000000e+00 2.100000000000000089e+00 +5.700000000000000178e+00 2.500000000000000000e+00 5.000000000000000000e+00 2.000000000000000000e+00 +5.799999999999999822e+00 2.799999999999999822e+00 5.099999999999999645e+00 2.399999999999999911e+00 +6.400000000000000355e+00 3.200000000000000178e+00 5.299999999999999822e+00 2.299999999999999822e+00 +6.500000000000000000e+00 3.000000000000000000e+00 5.500000000000000000e+00 1.800000000000000044e+00 +7.700000000000000178e+00 3.799999999999999822e+00 6.700000000000000178e+00 2.200000000000000178e+00 +7.700000000000000178e+00 2.600000000000000089e+00 6.900000000000000355e+00 2.299999999999999822e+00 +6.000000000000000000e+00 2.200000000000000178e+00 5.000000000000000000e+00 1.500000000000000000e+00 +6.900000000000000355e+00 3.200000000000000178e+00 5.700000000000000178e+00 2.299999999999999822e+00 +5.599999999999999645e+00 2.799999999999999822e+00 4.900000000000000355e+00 2.000000000000000000e+00 +7.700000000000000178e+00 2.799999999999999822e+00 6.700000000000000178e+00 2.000000000000000000e+00 +6.299999999999999822e+00 2.700000000000000178e+00 4.900000000000000355e+00 1.800000000000000044e+00 +6.700000000000000178e+00 3.299999999999999822e+00 5.700000000000000178e+00 2.100000000000000089e+00 +7.200000000000000178e+00 3.200000000000000178e+00 6.000000000000000000e+00 1.800000000000000044e+00 +6.200000000000000178e+00 2.799999999999999822e+00 4.799999999999999822e+00 1.800000000000000044e+00 +6.099999999999999645e+00 3.000000000000000000e+00 4.900000000000000355e+00 1.800000000000000044e+00 +6.400000000000000355e+00 2.799999999999999822e+00 5.599999999999999645e+00 2.100000000000000089e+00 +7.200000000000000178e+00 3.000000000000000000e+00 5.799999999999999822e+00 1.600000000000000089e+00 +7.400000000000000355e+00 2.799999999999999822e+00 6.099999999999999645e+00 1.899999999999999911e+00 +7.900000000000000355e+00 3.799999999999999822e+00 6.400000000000000355e+00 2.000000000000000000e+00 +6.400000000000000355e+00 2.799999999999999822e+00 5.599999999999999645e+00 2.200000000000000178e+00 +6.299999999999999822e+00 2.799999999999999822e+00 5.099999999999999645e+00 1.500000000000000000e+00 +6.099999999999999645e+00 2.600000000000000089e+00 5.599999999999999645e+00 1.399999999999999911e+00 +7.700000000000000178e+00 3.000000000000000000e+00 6.099999999999999645e+00 2.299999999999999822e+00 +6.299999999999999822e+00 3.399999999999999911e+00 5.599999999999999645e+00 2.399999999999999911e+00 +6.400000000000000355e+00 3.100000000000000089e+00 5.500000000000000000e+00 1.800000000000000044e+00 +6.000000000000000000e+00 3.000000000000000000e+00 4.799999999999999822e+00 1.800000000000000044e+00 +6.900000000000000355e+00 3.100000000000000089e+00 5.400000000000000355e+00 2.100000000000000089e+00 +6.700000000000000178e+00 3.100000000000000089e+00 5.599999999999999645e+00 2.399999999999999911e+00 +6.900000000000000355e+00 3.100000000000000089e+00 5.099999999999999645e+00 2.299999999999999822e+00 +5.799999999999999822e+00 2.700000000000000178e+00 5.099999999999999645e+00 1.899999999999999911e+00 +6.799999999999999822e+00 3.200000000000000178e+00 5.900000000000000355e+00 2.299999999999999822e+00 +6.700000000000000178e+00 3.299999999999999822e+00 5.700000000000000178e+00 2.500000000000000000e+00 +6.700000000000000178e+00 3.000000000000000000e+00 5.200000000000000178e+00 2.299999999999999822e+00 +6.299999999999999822e+00 2.500000000000000000e+00 5.000000000000000000e+00 1.899999999999999911e+00 +6.500000000000000000e+00 3.000000000000000000e+00 5.200000000000000178e+00 2.000000000000000000e+00 +6.200000000000000178e+00 3.399999999999999911e+00 5.400000000000000355e+00 2.299999999999999822e+00 +5.900000000000000355e+00 3.000000000000000000e+00 5.099999999999999645e+00 1.800000000000000044e+00 diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-boolean-inp.txt b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-boolean-inp.txt new file mode 100644 index 0000000..0636cc9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-boolean-inp.txt @@ -0,0 +1,20 @@ +1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 +1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 +1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 +0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 +1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 +1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 +1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 +1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 +1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 +1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 +0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 +1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 +1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 +1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 +0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 +1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 +1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 +0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 +1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 +1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-chebyshev-ml-iris.txt b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-chebyshev-ml-iris.txt new file mode 100644 index 0000000..0aff126 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-chebyshev-ml-iris.txt @@ -0,0 +1 @@ + 5.0000000e-01 4.0000000e-01 5.0000000e-01 1.0000000e-01 4.0000000e-01 5.0000000e-01 1.0000000e-01 7.0000000e-01 4.0000000e-01 3.0000000e-01 3.0000000e-01 5.0000000e-01 8.0000000e-01 7.0000000e-01 9.0000000e-01 4.0000000e-01 1.0000000e-01 6.0000000e-01 3.0000000e-01 3.0000000e-01 2.0000000e-01 5.0000000e-01 3.0000000e-01 5.0000000e-01 5.0000000e-01 2.0000000e-01 1.0000000e-01 1.0000000e-01 4.0000000e-01 4.0000000e-01 3.0000000e-01 6.0000000e-01 7.0000000e-01 4.0000000e-01 3.0000000e-01 4.0000000e-01 4.0000000e-01 7.0000000e-01 1.0000000e-01 1.0000000e-01 1.2000000e+00 7.0000000e-01 4.0000000e-01 5.0000000e-01 5.0000000e-01 3.0000000e-01 5.0000000e-01 2.0000000e-01 2.0000000e-01 3.3000000e+00 3.1000000e+00 3.5000000e+00 2.6000000e+00 3.2000000e+00 3.1000000e+00 3.3000000e+00 1.9000000e+00 3.2000000e+00 2.5000000e+00 2.1000000e+00 2.8000000e+00 2.6000000e+00 3.3000000e+00 2.2000000e+00 3.0000000e+00 3.1000000e+00 2.7000000e+00 3.1000000e+00 2.5000000e+00 3.4000000e+00 2.6000000e+00 3.5000000e+00 3.3000000e+00 2.9000000e+00 3.0000000e+00 3.4000000e+00 3.6000000e+00 3.1000000e+00 2.1000000e+00 2.4000000e+00 2.3000000e+00 2.5000000e+00 3.7000000e+00 3.1000000e+00 3.1000000e+00 3.3000000e+00 3.0000000e+00 2.7000000e+00 2.6000000e+00 3.0000000e+00 3.2000000e+00 2.6000000e+00 1.9000000e+00 2.8000000e+00 2.8000000e+00 2.8000000e+00 2.9000000e+00 1.6000000e+00 2.7000000e+00 4.6000000e+00 3.7000000e+00 4.5000000e+00 4.2000000e+00 4.4000000e+00 5.2000000e+00 3.1000000e+00 4.9000000e+00 4.4000000e+00 4.7000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 5.3000000e+00 5.5000000e+00 3.6000000e+00 4.3000000e+00 3.5000000e+00 5.3000000e+00 3.5000000e+00 4.3000000e+00 4.6000000e+00 3.4000000e+00 3.5000000e+00 4.2000000e+00 4.4000000e+00 4.7000000e+00 5.0000000e+00 4.2000000e+00 3.7000000e+00 4.2000000e+00 4.7000000e+00 4.2000000e+00 4.1000000e+00 3.4000000e+00 4.0000000e+00 4.2000000e+00 3.7000000e+00 3.7000000e+00 4.5000000e+00 4.3000000e+00 3.8000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.7000000e+00 2.0000000e-01 3.0000000e-01 6.0000000e-01 9.0000000e-01 4.0000000e-01 4.0000000e-01 5.0000000e-01 1.0000000e-01 7.0000000e-01 4.0000000e-01 1.0000000e-01 6.0000000e-01 1.0000000e+00 1.4000000e+00 9.0000000e-01 5.0000000e-01 8.0000000e-01 8.0000000e-01 5.0000000e-01 7.0000000e-01 6.0000000e-01 3.0000000e-01 5.0000000e-01 2.0000000e-01 4.0000000e-01 5.0000000e-01 4.0000000e-01 2.0000000e-01 2.0000000e-01 5.0000000e-01 1.1000000e+00 1.2000000e+00 1.0000000e-01 2.0000000e-01 6.0000000e-01 1.0000000e-01 5.0000000e-01 4.0000000e-01 5.0000000e-01 7.0000000e-01 5.0000000e-01 5.0000000e-01 8.0000000e-01 1.0000000e-01 8.0000000e-01 3.0000000e-01 7.0000000e-01 3.0000000e-01 3.3000000e+00 3.1000000e+00 3.5000000e+00 2.6000000e+00 3.2000000e+00 3.1000000e+00 3.3000000e+00 1.9000000e+00 3.2000000e+00 2.5000000e+00 2.1000000e+00 2.8000000e+00 2.6000000e+00 3.3000000e+00 2.2000000e+00 3.0000000e+00 3.1000000e+00 2.7000000e+00 3.1000000e+00 2.5000000e+00 3.4000000e+00 2.6000000e+00 3.5000000e+00 3.3000000e+00 2.9000000e+00 3.0000000e+00 3.4000000e+00 3.6000000e+00 3.1000000e+00 2.1000000e+00 2.4000000e+00 2.3000000e+00 2.5000000e+00 3.7000000e+00 3.1000000e+00 3.1000000e+00 3.3000000e+00 3.0000000e+00 2.7000000e+00 2.6000000e+00 3.0000000e+00 3.2000000e+00 2.6000000e+00 1.9000000e+00 2.8000000e+00 2.8000000e+00 2.8000000e+00 2.9000000e+00 1.6000000e+00 2.7000000e+00 4.6000000e+00 3.7000000e+00 4.5000000e+00 4.2000000e+00 4.4000000e+00 5.2000000e+00 3.1000000e+00 4.9000000e+00 4.4000000e+00 4.7000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 5.3000000e+00 5.5000000e+00 3.6000000e+00 4.3000000e+00 3.5000000e+00 5.3000000e+00 3.5000000e+00 4.3000000e+00 4.6000000e+00 3.4000000e+00 3.5000000e+00 4.2000000e+00 4.4000000e+00 4.7000000e+00 5.0000000e+00 4.2000000e+00 3.7000000e+00 4.2000000e+00 4.7000000e+00 4.2000000e+00 4.1000000e+00 3.4000000e+00 4.0000000e+00 4.2000000e+00 3.7000000e+00 3.7000000e+00 4.5000000e+00 4.3000000e+00 3.8000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.7000000e+00 2.0000000e-01 4.0000000e-01 7.0000000e-01 2.0000000e-01 3.0000000e-01 3.0000000e-01 2.0000000e-01 7.0000000e-01 3.0000000e-01 2.0000000e-01 4.0000000e-01 1.1000000e+00 1.2000000e+00 7.0000000e-01 4.0000000e-01 1.0000000e+00 6.0000000e-01 7.0000000e-01 5.0000000e-01 4.0000000e-01 4.0000000e-01 6.0000000e-01 3.0000000e-01 3.0000000e-01 5.0000000e-01 5.0000000e-01 3.0000000e-01 3.0000000e-01 7.0000000e-01 9.0000000e-01 1.0000000e+00 2.0000000e-01 3.0000000e-01 8.0000000e-01 2.0000000e-01 3.0000000e-01 4.0000000e-01 3.0000000e-01 9.0000000e-01 3.0000000e-01 4.0000000e-01 6.0000000e-01 2.0000000e-01 6.0000000e-01 1.0000000e-01 6.0000000e-01 3.0000000e-01 3.4000000e+00 3.2000000e+00 3.6000000e+00 2.7000000e+00 3.3000000e+00 3.2000000e+00 3.4000000e+00 2.0000000e+00 3.3000000e+00 2.6000000e+00 2.2000000e+00 2.9000000e+00 2.7000000e+00 3.4000000e+00 2.3000000e+00 3.1000000e+00 3.2000000e+00 2.8000000e+00 3.2000000e+00 2.6000000e+00 3.5000000e+00 2.7000000e+00 3.6000000e+00 3.4000000e+00 3.0000000e+00 3.1000000e+00 3.5000000e+00 3.7000000e+00 3.2000000e+00 2.2000000e+00 2.5000000e+00 2.4000000e+00 2.6000000e+00 3.8000000e+00 3.2000000e+00 3.2000000e+00 3.4000000e+00 3.1000000e+00 2.8000000e+00 2.7000000e+00 3.1000000e+00 3.3000000e+00 2.7000000e+00 2.0000000e+00 2.9000000e+00 2.9000000e+00 2.9000000e+00 3.0000000e+00 1.7000000e+00 2.8000000e+00 4.7000000e+00 3.8000000e+00 4.6000000e+00 4.3000000e+00 4.5000000e+00 5.3000000e+00 3.2000000e+00 5.0000000e+00 4.5000000e+00 4.8000000e+00 3.8000000e+00 4.0000000e+00 4.2000000e+00 3.7000000e+00 3.8000000e+00 4.0000000e+00 4.2000000e+00 5.4000000e+00 5.6000000e+00 3.7000000e+00 4.4000000e+00 3.6000000e+00 5.4000000e+00 3.6000000e+00 4.4000000e+00 4.7000000e+00 3.5000000e+00 3.6000000e+00 4.3000000e+00 4.5000000e+00 4.8000000e+00 5.1000000e+00 4.3000000e+00 3.8000000e+00 4.3000000e+00 4.8000000e+00 4.3000000e+00 4.2000000e+00 3.5000000e+00 4.1000000e+00 4.3000000e+00 3.8000000e+00 3.8000000e+00 4.6000000e+00 4.4000000e+00 3.9000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 3.8000000e+00 5.0000000e-01 8.0000000e-01 3.0000000e-01 4.0000000e-01 2.0000000e-01 3.0000000e-01 8.0000000e-01 3.0000000e-01 2.0000000e-01 4.0000000e-01 1.2000000e+00 1.3000000e+00 8.0000000e-01 5.0000000e-01 1.1000000e+00 7.0000000e-01 8.0000000e-01 6.0000000e-01 5.0000000e-01 5.0000000e-01 4.0000000e-01 4.0000000e-01 4.0000000e-01 6.0000000e-01 6.0000000e-01 1.0000000e-01 2.0000000e-01 8.0000000e-01 1.0000000e+00 1.1000000e+00 3.0000000e-01 4.0000000e-01 9.0000000e-01 3.0000000e-01 2.0000000e-01 5.0000000e-01 4.0000000e-01 8.0000000e-01 2.0000000e-01 4.0000000e-01 7.0000000e-01 2.0000000e-01 7.0000000e-01 1.0000000e-01 7.0000000e-01 4.0000000e-01 3.2000000e+00 3.0000000e+00 3.4000000e+00 2.5000000e+00 3.1000000e+00 3.0000000e+00 3.2000000e+00 1.8000000e+00 3.1000000e+00 2.4000000e+00 2.0000000e+00 2.7000000e+00 2.5000000e+00 3.2000000e+00 2.1000000e+00 2.9000000e+00 3.0000000e+00 2.6000000e+00 3.0000000e+00 2.4000000e+00 3.3000000e+00 2.5000000e+00 3.4000000e+00 3.2000000e+00 2.8000000e+00 2.9000000e+00 3.3000000e+00 3.5000000e+00 3.0000000e+00 2.0000000e+00 2.3000000e+00 2.2000000e+00 2.4000000e+00 3.6000000e+00 3.0000000e+00 3.0000000e+00 3.2000000e+00 2.9000000e+00 2.6000000e+00 2.5000000e+00 2.9000000e+00 3.1000000e+00 2.5000000e+00 1.8000000e+00 2.7000000e+00 2.7000000e+00 2.7000000e+00 2.8000000e+00 1.5000000e+00 2.6000000e+00 4.5000000e+00 3.6000000e+00 4.4000000e+00 4.1000000e+00 4.3000000e+00 5.1000000e+00 3.0000000e+00 4.8000000e+00 4.3000000e+00 4.6000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 5.2000000e+00 5.4000000e+00 3.5000000e+00 4.2000000e+00 3.4000000e+00 5.2000000e+00 3.4000000e+00 4.2000000e+00 4.5000000e+00 3.3000000e+00 3.4000000e+00 4.1000000e+00 4.3000000e+00 4.6000000e+00 4.9000000e+00 4.1000000e+00 3.6000000e+00 4.1000000e+00 4.6000000e+00 4.1000000e+00 4.0000000e+00 3.3000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.6000000e+00 4.4000000e+00 4.2000000e+00 3.7000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 3.6000000e+00 4.0000000e-01 4.0000000e-01 2.0000000e-01 7.0000000e-01 5.0000000e-01 4.0000000e-01 2.0000000e-01 6.0000000e-01 7.0000000e-01 8.0000000e-01 8.0000000e-01 4.0000000e-01 1.0000000e-01 7.0000000e-01 2.0000000e-01 4.0000000e-01 2.0000000e-01 4.0000000e-01 3.0000000e-01 5.0000000e-01 6.0000000e-01 2.0000000e-01 2.0000000e-01 2.0000000e-01 4.0000000e-01 5.0000000e-01 4.0000000e-01 5.0000000e-01 6.0000000e-01 5.0000000e-01 4.0000000e-01 5.0000000e-01 5.0000000e-01 6.0000000e-01 2.0000000e-01 1.0000000e-01 1.3000000e+00 6.0000000e-01 4.0000000e-01 5.0000000e-01 6.0000000e-01 2.0000000e-01 4.0000000e-01 3.0000000e-01 3.0000000e-01 3.3000000e+00 3.1000000e+00 3.5000000e+00 2.6000000e+00 3.2000000e+00 3.1000000e+00 3.3000000e+00 1.9000000e+00 3.2000000e+00 2.5000000e+00 2.1000000e+00 2.8000000e+00 2.6000000e+00 3.3000000e+00 2.2000000e+00 3.0000000e+00 3.1000000e+00 2.7000000e+00 3.1000000e+00 2.5000000e+00 3.4000000e+00 2.6000000e+00 3.5000000e+00 3.3000000e+00 2.9000000e+00 3.0000000e+00 3.4000000e+00 3.6000000e+00 3.1000000e+00 2.1000000e+00 2.4000000e+00 2.3000000e+00 2.5000000e+00 3.7000000e+00 3.1000000e+00 3.1000000e+00 3.3000000e+00 3.0000000e+00 2.7000000e+00 2.6000000e+00 3.0000000e+00 3.2000000e+00 2.6000000e+00 1.9000000e+00 2.8000000e+00 2.8000000e+00 2.8000000e+00 2.9000000e+00 1.6000000e+00 2.7000000e+00 4.6000000e+00 3.7000000e+00 4.5000000e+00 4.2000000e+00 4.4000000e+00 5.2000000e+00 3.1000000e+00 4.9000000e+00 4.4000000e+00 4.7000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 5.3000000e+00 5.5000000e+00 3.6000000e+00 4.3000000e+00 3.5000000e+00 5.3000000e+00 3.5000000e+00 4.3000000e+00 4.6000000e+00 3.4000000e+00 3.5000000e+00 4.2000000e+00 4.4000000e+00 4.7000000e+00 5.0000000e+00 4.2000000e+00 3.7000000e+00 4.2000000e+00 4.7000000e+00 4.2000000e+00 4.1000000e+00 3.4000000e+00 4.0000000e+00 4.2000000e+00 3.7000000e+00 3.7000000e+00 4.5000000e+00 4.3000000e+00 3.8000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.7000000e+00 8.0000000e-01 5.0000000e-01 1.0000000e+00 8.0000000e-01 2.0000000e-01 6.0000000e-01 9.0000000e-01 1.1000000e+00 5.0000000e-01 5.0000000e-01 4.0000000e-01 4.0000000e-01 3.0000000e-01 3.0000000e-01 5.0000000e-01 3.0000000e-01 8.0000000e-01 6.0000000e-01 6.0000000e-01 9.0000000e-01 5.0000000e-01 4.0000000e-01 5.0000000e-01 7.0000000e-01 8.0000000e-01 5.0000000e-01 3.0000000e-01 3.0000000e-01 8.0000000e-01 7.0000000e-01 4.0000000e-01 8.0000000e-01 1.0000000e+00 5.0000000e-01 4.0000000e-01 1.6000000e+00 1.0000000e+00 4.0000000e-01 3.0000000e-01 9.0000000e-01 3.0000000e-01 8.0000000e-01 2.0000000e-01 6.0000000e-01 3.0000000e+00 2.8000000e+00 3.2000000e+00 2.3000000e+00 2.9000000e+00 2.8000000e+00 3.0000000e+00 1.6000000e+00 2.9000000e+00 2.2000000e+00 1.9000000e+00 2.5000000e+00 2.3000000e+00 3.0000000e+00 1.9000000e+00 2.7000000e+00 2.8000000e+00 2.4000000e+00 2.8000000e+00 2.2000000e+00 3.1000000e+00 2.3000000e+00 3.2000000e+00 3.0000000e+00 2.6000000e+00 2.7000000e+00 3.1000000e+00 3.3000000e+00 2.8000000e+00 1.8000000e+00 2.1000000e+00 2.0000000e+00 2.2000000e+00 3.4000000e+00 2.8000000e+00 2.8000000e+00 3.0000000e+00 2.7000000e+00 2.4000000e+00 2.3000000e+00 2.7000000e+00 2.9000000e+00 2.3000000e+00 1.6000000e+00 2.5000000e+00 2.5000000e+00 2.5000000e+00 2.6000000e+00 1.4000000e+00 2.4000000e+00 4.3000000e+00 3.4000000e+00 4.2000000e+00 3.9000000e+00 4.1000000e+00 4.9000000e+00 2.8000000e+00 4.6000000e+00 4.1000000e+00 4.4000000e+00 3.4000000e+00 3.6000000e+00 3.8000000e+00 3.3000000e+00 3.4000000e+00 3.6000000e+00 3.8000000e+00 5.0000000e+00 5.2000000e+00 3.3000000e+00 4.0000000e+00 3.2000000e+00 5.0000000e+00 3.2000000e+00 4.0000000e+00 4.3000000e+00 3.1000000e+00 3.2000000e+00 3.9000000e+00 4.1000000e+00 4.4000000e+00 4.7000000e+00 3.9000000e+00 3.4000000e+00 3.9000000e+00 4.4000000e+00 3.9000000e+00 3.8000000e+00 3.1000000e+00 3.7000000e+00 3.9000000e+00 3.4000000e+00 3.4000000e+00 4.2000000e+00 4.0000000e+00 3.5000000e+00 3.3000000e+00 3.5000000e+00 3.7000000e+00 3.4000000e+00 4.0000000e-01 5.0000000e-01 3.0000000e-01 8.0000000e-01 2.0000000e-01 4.0000000e-01 4.0000000e-01 1.2000000e+00 1.1000000e+00 8.0000000e-01 5.0000000e-01 1.1000000e+00 5.0000000e-01 8.0000000e-01 5.0000000e-01 4.0000000e-01 5.0000000e-01 5.0000000e-01 4.0000000e-01 4.0000000e-01 6.0000000e-01 6.0000000e-01 2.0000000e-01 3.0000000e-01 8.0000000e-01 7.0000000e-01 9.0000000e-01 3.0000000e-01 4.0000000e-01 9.0000000e-01 3.0000000e-01 4.0000000e-01 5.0000000e-01 4.0000000e-01 1.1000000e+00 2.0000000e-01 4.0000000e-01 5.0000000e-01 4.0000000e-01 5.0000000e-01 2.0000000e-01 7.0000000e-01 4.0000000e-01 3.3000000e+00 3.1000000e+00 3.5000000e+00 2.6000000e+00 3.2000000e+00 3.1000000e+00 3.3000000e+00 1.9000000e+00 3.2000000e+00 2.5000000e+00 2.1000000e+00 2.8000000e+00 2.6000000e+00 3.3000000e+00 2.2000000e+00 3.0000000e+00 3.1000000e+00 2.7000000e+00 3.1000000e+00 2.5000000e+00 3.4000000e+00 2.6000000e+00 3.5000000e+00 3.3000000e+00 2.9000000e+00 3.0000000e+00 3.4000000e+00 3.6000000e+00 3.1000000e+00 2.1000000e+00 2.4000000e+00 2.3000000e+00 2.5000000e+00 3.7000000e+00 3.1000000e+00 3.1000000e+00 3.3000000e+00 3.0000000e+00 2.7000000e+00 2.6000000e+00 3.0000000e+00 3.2000000e+00 2.6000000e+00 1.9000000e+00 2.8000000e+00 2.8000000e+00 2.8000000e+00 2.9000000e+00 1.6000000e+00 2.7000000e+00 4.6000000e+00 3.7000000e+00 4.5000000e+00 4.2000000e+00 4.4000000e+00 5.2000000e+00 3.1000000e+00 4.9000000e+00 4.4000000e+00 4.7000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 5.3000000e+00 5.5000000e+00 3.6000000e+00 4.3000000e+00 3.5000000e+00 5.3000000e+00 3.5000000e+00 4.3000000e+00 4.6000000e+00 3.4000000e+00 3.5000000e+00 4.2000000e+00 4.4000000e+00 4.7000000e+00 5.0000000e+00 4.2000000e+00 3.7000000e+00 4.2000000e+00 4.7000000e+00 4.2000000e+00 4.1000000e+00 3.4000000e+00 4.0000000e+00 4.2000000e+00 3.7000000e+00 3.7000000e+00 4.5000000e+00 4.3000000e+00 3.8000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.7000000e+00 6.0000000e-01 3.0000000e-01 4.0000000e-01 2.0000000e-01 4.0000000e-01 7.0000000e-01 8.0000000e-01 1.0000000e+00 5.0000000e-01 1.0000000e-01 7.0000000e-01 4.0000000e-01 4.0000000e-01 3.0000000e-01 5.0000000e-01 3.0000000e-01 4.0000000e-01 4.0000000e-01 2.0000000e-01 2.0000000e-01 2.0000000e-01 3.0000000e-01 3.0000000e-01 4.0000000e-01 7.0000000e-01 8.0000000e-01 3.0000000e-01 3.0000000e-01 5.0000000e-01 3.0000000e-01 6.0000000e-01 1.0000000e-01 2.0000000e-01 1.1000000e+00 6.0000000e-01 4.0000000e-01 4.0000000e-01 4.0000000e-01 4.0000000e-01 4.0000000e-01 3.0000000e-01 1.0000000e-01 3.2000000e+00 3.0000000e+00 3.4000000e+00 2.5000000e+00 3.1000000e+00 3.0000000e+00 3.2000000e+00 1.8000000e+00 3.1000000e+00 2.4000000e+00 2.0000000e+00 2.7000000e+00 2.5000000e+00 3.2000000e+00 2.1000000e+00 2.9000000e+00 3.0000000e+00 2.6000000e+00 3.0000000e+00 2.4000000e+00 3.3000000e+00 2.5000000e+00 3.4000000e+00 3.2000000e+00 2.8000000e+00 2.9000000e+00 3.3000000e+00 3.5000000e+00 3.0000000e+00 2.0000000e+00 2.3000000e+00 2.2000000e+00 2.4000000e+00 3.6000000e+00 3.0000000e+00 3.0000000e+00 3.2000000e+00 2.9000000e+00 2.6000000e+00 2.5000000e+00 2.9000000e+00 3.1000000e+00 2.5000000e+00 1.8000000e+00 2.7000000e+00 2.7000000e+00 2.7000000e+00 2.8000000e+00 1.5000000e+00 2.6000000e+00 4.5000000e+00 3.6000000e+00 4.4000000e+00 4.1000000e+00 4.3000000e+00 5.1000000e+00 3.0000000e+00 4.8000000e+00 4.3000000e+00 4.6000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 5.2000000e+00 5.4000000e+00 3.5000000e+00 4.2000000e+00 3.4000000e+00 5.2000000e+00 3.4000000e+00 4.2000000e+00 4.5000000e+00 3.3000000e+00 3.4000000e+00 4.1000000e+00 4.3000000e+00 4.6000000e+00 4.9000000e+00 4.1000000e+00 3.6000000e+00 4.1000000e+00 4.6000000e+00 4.1000000e+00 4.0000000e+00 3.3000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.6000000e+00 4.4000000e+00 4.2000000e+00 3.7000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 3.6000000e+00 5.0000000e-01 1.0000000e+00 5.0000000e-01 4.0000000e-01 3.0000000e-01 1.4000000e+00 1.5000000e+00 1.0000000e+00 7.0000000e-01 1.3000000e+00 9.0000000e-01 1.0000000e+00 8.0000000e-01 7.0000000e-01 7.0000000e-01 5.0000000e-01 6.0000000e-01 6.0000000e-01 8.0000000e-01 8.0000000e-01 3.0000000e-01 4.0000000e-01 1.0000000e+00 1.2000000e+00 1.3000000e+00 5.0000000e-01 6.0000000e-01 1.1000000e+00 5.0000000e-01 1.0000000e-01 7.0000000e-01 6.0000000e-01 6.0000000e-01 3.0000000e-01 6.0000000e-01 9.0000000e-01 4.0000000e-01 9.0000000e-01 3.0000000e-01 9.0000000e-01 6.0000000e-01 3.3000000e+00 3.1000000e+00 3.5000000e+00 2.6000000e+00 3.2000000e+00 3.1000000e+00 3.3000000e+00 1.9000000e+00 3.2000000e+00 2.5000000e+00 2.1000000e+00 2.8000000e+00 2.6000000e+00 3.3000000e+00 2.2000000e+00 3.0000000e+00 3.1000000e+00 2.7000000e+00 3.1000000e+00 2.5000000e+00 3.4000000e+00 2.6000000e+00 3.5000000e+00 3.3000000e+00 2.9000000e+00 3.0000000e+00 3.4000000e+00 3.6000000e+00 3.1000000e+00 2.1000000e+00 2.4000000e+00 2.3000000e+00 2.5000000e+00 3.7000000e+00 3.1000000e+00 3.1000000e+00 3.3000000e+00 3.0000000e+00 2.7000000e+00 2.6000000e+00 3.0000000e+00 3.2000000e+00 2.6000000e+00 1.9000000e+00 2.8000000e+00 2.8000000e+00 2.8000000e+00 2.9000000e+00 1.6000000e+00 2.7000000e+00 4.6000000e+00 3.7000000e+00 4.5000000e+00 4.2000000e+00 4.4000000e+00 5.2000000e+00 3.1000000e+00 4.9000000e+00 4.4000000e+00 4.7000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 5.3000000e+00 5.5000000e+00 3.6000000e+00 4.3000000e+00 3.5000000e+00 5.3000000e+00 3.5000000e+00 4.3000000e+00 4.6000000e+00 3.4000000e+00 3.5000000e+00 4.2000000e+00 4.4000000e+00 4.7000000e+00 5.0000000e+00 4.2000000e+00 3.7000000e+00 4.2000000e+00 4.7000000e+00 4.2000000e+00 4.1000000e+00 3.4000000e+00 4.0000000e+00 4.2000000e+00 3.7000000e+00 3.7000000e+00 4.5000000e+00 4.3000000e+00 3.8000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.7000000e+00 6.0000000e-01 3.0000000e-01 1.0000000e-01 6.0000000e-01 9.0000000e-01 1.3000000e+00 8.0000000e-01 4.0000000e-01 8.0000000e-01 7.0000000e-01 5.0000000e-01 6.0000000e-01 5.0000000e-01 4.0000000e-01 4.0000000e-01 1.0000000e-01 3.0000000e-01 4.0000000e-01 3.0000000e-01 2.0000000e-01 1.0000000e-01 5.0000000e-01 1.0000000e+00 1.1000000e+00 0.0000000e+00 3.0000000e-01 6.0000000e-01 0.0000000e+00 5.0000000e-01 3.0000000e-01 4.0000000e-01 8.0000000e-01 5.0000000e-01 5.0000000e-01 7.0000000e-01 2.0000000e-01 7.0000000e-01 3.0000000e-01 6.0000000e-01 2.0000000e-01 3.2000000e+00 3.0000000e+00 3.4000000e+00 2.5000000e+00 3.1000000e+00 3.0000000e+00 3.2000000e+00 1.8000000e+00 3.1000000e+00 2.4000000e+00 2.0000000e+00 2.7000000e+00 2.5000000e+00 3.2000000e+00 2.1000000e+00 2.9000000e+00 3.0000000e+00 2.6000000e+00 3.0000000e+00 2.4000000e+00 3.3000000e+00 2.5000000e+00 3.4000000e+00 3.2000000e+00 2.8000000e+00 2.9000000e+00 3.3000000e+00 3.5000000e+00 3.0000000e+00 2.0000000e+00 2.3000000e+00 2.2000000e+00 2.4000000e+00 3.6000000e+00 3.0000000e+00 3.0000000e+00 3.2000000e+00 2.9000000e+00 2.6000000e+00 2.5000000e+00 2.9000000e+00 3.1000000e+00 2.5000000e+00 1.8000000e+00 2.7000000e+00 2.7000000e+00 2.7000000e+00 2.8000000e+00 1.5000000e+00 2.6000000e+00 4.5000000e+00 3.6000000e+00 4.4000000e+00 4.1000000e+00 4.3000000e+00 5.1000000e+00 3.0000000e+00 4.8000000e+00 4.3000000e+00 4.6000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 5.2000000e+00 5.4000000e+00 3.5000000e+00 4.2000000e+00 3.4000000e+00 5.2000000e+00 3.4000000e+00 4.2000000e+00 4.5000000e+00 3.3000000e+00 3.4000000e+00 4.1000000e+00 4.3000000e+00 4.6000000e+00 4.9000000e+00 4.1000000e+00 3.6000000e+00 4.1000000e+00 4.6000000e+00 4.1000000e+00 4.0000000e+00 3.3000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.6000000e+00 4.4000000e+00 4.2000000e+00 3.7000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 3.6000000e+00 6.0000000e-01 7.0000000e-01 1.1000000e+00 4.0000000e-01 7.0000000e-01 2.0000000e-01 3.0000000e-01 3.0000000e-01 3.0000000e-01 3.0000000e-01 3.0000000e-01 8.0000000e-01 4.0000000e-01 6.0000000e-01 7.0000000e-01 4.0000000e-01 2.0000000e-01 3.0000000e-01 7.0000000e-01 6.0000000e-01 3.0000000e-01 4.0000000e-01 5.0000000e-01 6.0000000e-01 5.0000000e-01 2.0000000e-01 6.0000000e-01 1.0000000e+00 3.0000000e-01 4.0000000e-01 1.4000000e+00 1.0000000e+00 4.0000000e-01 4.0000000e-01 7.0000000e-01 3.0000000e-01 8.0000000e-01 1.0000000e-01 4.0000000e-01 3.2000000e+00 3.0000000e+00 3.4000000e+00 2.5000000e+00 3.1000000e+00 3.0000000e+00 3.2000000e+00 1.8000000e+00 3.1000000e+00 2.4000000e+00 2.0000000e+00 2.7000000e+00 2.5000000e+00 3.2000000e+00 2.1000000e+00 2.9000000e+00 3.0000000e+00 2.6000000e+00 3.0000000e+00 2.4000000e+00 3.3000000e+00 2.5000000e+00 3.4000000e+00 3.2000000e+00 2.8000000e+00 2.9000000e+00 3.3000000e+00 3.5000000e+00 3.0000000e+00 2.0000000e+00 2.3000000e+00 2.2000000e+00 2.4000000e+00 3.6000000e+00 3.0000000e+00 3.0000000e+00 3.2000000e+00 2.9000000e+00 2.6000000e+00 2.5000000e+00 2.9000000e+00 3.1000000e+00 2.5000000e+00 1.8000000e+00 2.7000000e+00 2.7000000e+00 2.7000000e+00 2.8000000e+00 1.5000000e+00 2.6000000e+00 4.5000000e+00 3.6000000e+00 4.4000000e+00 4.1000000e+00 4.3000000e+00 5.1000000e+00 3.0000000e+00 4.8000000e+00 4.3000000e+00 4.6000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 5.2000000e+00 5.4000000e+00 3.5000000e+00 4.2000000e+00 3.4000000e+00 5.2000000e+00 3.4000000e+00 4.2000000e+00 4.5000000e+00 3.3000000e+00 3.4000000e+00 4.1000000e+00 4.3000000e+00 4.6000000e+00 4.9000000e+00 4.1000000e+00 3.6000000e+00 4.1000000e+00 4.6000000e+00 4.1000000e+00 4.0000000e+00 3.3000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.6000000e+00 4.4000000e+00 4.2000000e+00 3.7000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 3.6000000e+00 4.0000000e-01 5.0000000e-01 1.0000000e+00 1.0000000e+00 6.0000000e-01 3.0000000e-01 9.0000000e-01 4.0000000e-01 6.0000000e-01 3.0000000e-01 6.0000000e-01 3.0000000e-01 3.0000000e-01 4.0000000e-01 2.0000000e-01 4.0000000e-01 4.0000000e-01 2.0000000e-01 3.0000000e-01 6.0000000e-01 7.0000000e-01 8.0000000e-01 3.0000000e-01 4.0000000e-01 7.0000000e-01 3.0000000e-01 4.0000000e-01 3.0000000e-01 3.0000000e-01 1.1000000e+00 4.0000000e-01 4.0000000e-01 4.0000000e-01 4.0000000e-01 4.0000000e-01 2.0000000e-01 5.0000000e-01 2.0000000e-01 3.1000000e+00 2.9000000e+00 3.3000000e+00 2.4000000e+00 3.0000000e+00 2.9000000e+00 3.1000000e+00 1.7000000e+00 3.0000000e+00 2.3000000e+00 1.9000000e+00 2.6000000e+00 2.4000000e+00 3.1000000e+00 2.0000000e+00 2.8000000e+00 2.9000000e+00 2.5000000e+00 2.9000000e+00 2.3000000e+00 3.2000000e+00 2.4000000e+00 3.3000000e+00 3.1000000e+00 2.7000000e+00 2.8000000e+00 3.2000000e+00 3.4000000e+00 2.9000000e+00 1.9000000e+00 2.2000000e+00 2.1000000e+00 2.3000000e+00 3.5000000e+00 2.9000000e+00 2.9000000e+00 3.1000000e+00 2.8000000e+00 2.5000000e+00 2.4000000e+00 2.8000000e+00 3.0000000e+00 2.4000000e+00 1.7000000e+00 2.6000000e+00 2.6000000e+00 2.6000000e+00 2.7000000e+00 1.4000000e+00 2.5000000e+00 4.4000000e+00 3.5000000e+00 4.3000000e+00 4.0000000e+00 4.2000000e+00 5.0000000e+00 2.9000000e+00 4.7000000e+00 4.2000000e+00 4.5000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 3.4000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 5.1000000e+00 5.3000000e+00 3.4000000e+00 4.1000000e+00 3.3000000e+00 5.1000000e+00 3.3000000e+00 4.1000000e+00 4.4000000e+00 3.2000000e+00 3.3000000e+00 4.0000000e+00 4.2000000e+00 4.5000000e+00 4.8000000e+00 4.0000000e+00 3.5000000e+00 4.0000000e+00 4.5000000e+00 4.0000000e+00 3.9000000e+00 3.2000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 3.5000000e+00 4.3000000e+00 4.1000000e+00 3.6000000e+00 3.4000000e+00 3.6000000e+00 3.8000000e+00 3.5000000e+00 5.0000000e-01 1.0000000e+00 1.4000000e+00 9.0000000e-01 5.0000000e-01 9.0000000e-01 8.0000000e-01 6.0000000e-01 7.0000000e-01 6.0000000e-01 4.0000000e-01 5.0000000e-01 2.0000000e-01 4.0000000e-01 5.0000000e-01 4.0000000e-01 2.0000000e-01 2.0000000e-01 6.0000000e-01 1.1000000e+00 1.2000000e+00 1.0000000e-01 2.0000000e-01 7.0000000e-01 1.0000000e-01 4.0000000e-01 4.0000000e-01 5.0000000e-01 7.0000000e-01 4.0000000e-01 5.0000000e-01 8.0000000e-01 2.0000000e-01 8.0000000e-01 2.0000000e-01 7.0000000e-01 3.0000000e-01 3.3000000e+00 3.1000000e+00 3.5000000e+00 2.6000000e+00 3.2000000e+00 3.1000000e+00 3.3000000e+00 1.9000000e+00 3.2000000e+00 2.5000000e+00 2.1000000e+00 2.8000000e+00 2.6000000e+00 3.3000000e+00 2.2000000e+00 3.0000000e+00 3.1000000e+00 2.7000000e+00 3.1000000e+00 2.5000000e+00 3.4000000e+00 2.6000000e+00 3.5000000e+00 3.3000000e+00 2.9000000e+00 3.0000000e+00 3.4000000e+00 3.6000000e+00 3.1000000e+00 2.1000000e+00 2.4000000e+00 2.3000000e+00 2.5000000e+00 3.7000000e+00 3.1000000e+00 3.1000000e+00 3.3000000e+00 3.0000000e+00 2.7000000e+00 2.6000000e+00 3.0000000e+00 3.2000000e+00 2.6000000e+00 1.9000000e+00 2.8000000e+00 2.8000000e+00 2.8000000e+00 2.9000000e+00 1.6000000e+00 2.7000000e+00 4.6000000e+00 3.7000000e+00 4.5000000e+00 4.2000000e+00 4.4000000e+00 5.2000000e+00 3.1000000e+00 4.9000000e+00 4.4000000e+00 4.7000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 5.3000000e+00 5.5000000e+00 3.6000000e+00 4.3000000e+00 3.5000000e+00 5.3000000e+00 3.5000000e+00 4.3000000e+00 4.6000000e+00 3.4000000e+00 3.5000000e+00 4.2000000e+00 4.4000000e+00 4.7000000e+00 5.0000000e+00 4.2000000e+00 3.7000000e+00 4.2000000e+00 4.7000000e+00 4.2000000e+00 4.1000000e+00 3.4000000e+00 4.0000000e+00 4.2000000e+00 3.7000000e+00 3.7000000e+00 4.5000000e+00 4.3000000e+00 3.8000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.7000000e+00 1.5000000e+00 1.4000000e+00 1.1000000e+00 8.0000000e-01 1.4000000e+00 8.0000000e-01 1.1000000e+00 8.0000000e-01 6.0000000e-01 8.0000000e-01 8.0000000e-01 7.0000000e-01 7.0000000e-01 9.0000000e-01 9.0000000e-01 5.0000000e-01 5.0000000e-01 1.1000000e+00 1.1000000e+00 1.2000000e+00 6.0000000e-01 7.0000000e-01 1.2000000e+00 6.0000000e-01 2.0000000e-01 8.0000000e-01 7.0000000e-01 7.0000000e-01 2.0000000e-01 7.0000000e-01 8.0000000e-01 5.0000000e-01 8.0000000e-01 3.0000000e-01 1.0000000e+00 7.0000000e-01 3.6000000e+00 3.4000000e+00 3.8000000e+00 2.9000000e+00 3.5000000e+00 3.4000000e+00 3.6000000e+00 2.2000000e+00 3.5000000e+00 2.8000000e+00 2.4000000e+00 3.1000000e+00 2.9000000e+00 3.6000000e+00 2.5000000e+00 3.3000000e+00 3.4000000e+00 3.0000000e+00 3.4000000e+00 2.8000000e+00 3.7000000e+00 2.9000000e+00 3.8000000e+00 3.6000000e+00 3.2000000e+00 3.3000000e+00 3.7000000e+00 3.9000000e+00 3.4000000e+00 2.4000000e+00 2.7000000e+00 2.6000000e+00 2.8000000e+00 4.0000000e+00 3.4000000e+00 3.4000000e+00 3.6000000e+00 3.3000000e+00 3.0000000e+00 2.9000000e+00 3.3000000e+00 3.5000000e+00 2.9000000e+00 2.2000000e+00 3.1000000e+00 3.1000000e+00 3.1000000e+00 3.2000000e+00 1.9000000e+00 3.0000000e+00 4.9000000e+00 4.0000000e+00 4.8000000e+00 4.5000000e+00 4.7000000e+00 5.5000000e+00 3.4000000e+00 5.2000000e+00 4.7000000e+00 5.0000000e+00 4.0000000e+00 4.2000000e+00 4.4000000e+00 3.9000000e+00 4.0000000e+00 4.2000000e+00 4.4000000e+00 5.6000000e+00 5.8000000e+00 3.9000000e+00 4.6000000e+00 3.8000000e+00 5.6000000e+00 3.8000000e+00 4.6000000e+00 4.9000000e+00 3.7000000e+00 3.8000000e+00 4.5000000e+00 4.7000000e+00 5.0000000e+00 5.3000000e+00 4.5000000e+00 4.0000000e+00 4.5000000e+00 5.0000000e+00 4.5000000e+00 4.4000000e+00 3.7000000e+00 4.3000000e+00 4.5000000e+00 4.0000000e+00 4.0000000e+00 4.8000000e+00 4.6000000e+00 4.1000000e+00 3.9000000e+00 4.1000000e+00 4.3000000e+00 4.0000000e+00 4.0000000e-01 4.0000000e-01 7.0000000e-01 5.0000000e-01 7.0000000e-01 6.0000000e-01 7.0000000e-01 1.2000000e+00 7.0000000e-01 1.0000000e+00 1.0000000e+00 8.0000000e-01 6.0000000e-01 6.0000000e-01 1.1000000e+00 1.0000000e+00 6.0000000e-01 6.0000000e-01 3.0000000e-01 9.0000000e-01 8.0000000e-01 5.0000000e-01 9.0000000e-01 1.4000000e+00 7.0000000e-01 8.0000000e-01 1.7000000e+00 1.4000000e+00 8.0000000e-01 7.0000000e-01 1.0000000e+00 7.0000000e-01 1.2000000e+00 5.0000000e-01 8.0000000e-01 3.5000000e+00 3.3000000e+00 3.7000000e+00 2.8000000e+00 3.4000000e+00 3.3000000e+00 3.5000000e+00 2.1000000e+00 3.4000000e+00 2.7000000e+00 2.3000000e+00 3.0000000e+00 2.8000000e+00 3.5000000e+00 2.4000000e+00 3.2000000e+00 3.3000000e+00 2.9000000e+00 3.3000000e+00 2.7000000e+00 3.6000000e+00 2.8000000e+00 3.7000000e+00 3.5000000e+00 3.1000000e+00 3.2000000e+00 3.6000000e+00 3.8000000e+00 3.3000000e+00 2.3000000e+00 2.6000000e+00 2.5000000e+00 2.7000000e+00 3.9000000e+00 3.3000000e+00 3.3000000e+00 3.5000000e+00 3.2000000e+00 2.9000000e+00 2.8000000e+00 3.2000000e+00 3.4000000e+00 2.8000000e+00 2.1000000e+00 3.0000000e+00 3.0000000e+00 3.0000000e+00 3.1000000e+00 1.8000000e+00 2.9000000e+00 4.8000000e+00 3.9000000e+00 4.7000000e+00 4.4000000e+00 4.6000000e+00 5.4000000e+00 3.3000000e+00 5.1000000e+00 4.6000000e+00 4.9000000e+00 3.9000000e+00 4.1000000e+00 4.3000000e+00 3.8000000e+00 3.9000000e+00 4.1000000e+00 4.3000000e+00 5.5000000e+00 5.7000000e+00 3.8000000e+00 4.5000000e+00 3.7000000e+00 5.5000000e+00 3.7000000e+00 4.5000000e+00 4.8000000e+00 3.6000000e+00 3.7000000e+00 4.4000000e+00 4.6000000e+00 4.9000000e+00 5.2000000e+00 4.4000000e+00 3.9000000e+00 4.4000000e+00 4.9000000e+00 4.4000000e+00 4.3000000e+00 3.6000000e+00 4.2000000e+00 4.4000000e+00 3.9000000e+00 3.9000000e+00 4.7000000e+00 4.5000000e+00 4.0000000e+00 3.8000000e+00 4.0000000e+00 4.2000000e+00 3.9000000e+00 5.0000000e-01 9.0000000e-01 6.0000000e-01 6.0000000e-01 1.0000000e+00 7.0000000e-01 1.1000000e+00 1.1000000e+00 1.0000000e+00 1.4000000e+00 1.0000000e+00 9.0000000e-01 1.0000000e+00 1.2000000e+00 1.3000000e+00 1.0000000e+00 5.0000000e-01 2.0000000e-01 1.3000000e+00 1.2000000e+00 9.0000000e-01 1.3000000e+00 1.4000000e+00 1.0000000e+00 9.0000000e-01 2.1000000e+00 1.3000000e+00 9.0000000e-01 6.0000000e-01 1.4000000e+00 6.0000000e-01 1.2000000e+00 7.0000000e-01 1.1000000e+00 3.2000000e+00 3.0000000e+00 3.4000000e+00 2.5000000e+00 3.1000000e+00 3.0000000e+00 3.2000000e+00 2.0000000e+00 3.1000000e+00 2.4000000e+00 2.4000000e+00 2.7000000e+00 2.5000000e+00 3.2000000e+00 2.1000000e+00 2.9000000e+00 3.0000000e+00 2.6000000e+00 3.0000000e+00 2.4000000e+00 3.3000000e+00 2.5000000e+00 3.4000000e+00 3.2000000e+00 2.8000000e+00 2.9000000e+00 3.3000000e+00 3.5000000e+00 3.0000000e+00 2.0000000e+00 2.3000000e+00 2.2000000e+00 2.4000000e+00 3.6000000e+00 3.0000000e+00 3.0000000e+00 3.2000000e+00 2.9000000e+00 2.6000000e+00 2.5000000e+00 2.9000000e+00 3.1000000e+00 2.5000000e+00 2.1000000e+00 2.7000000e+00 2.7000000e+00 2.7000000e+00 2.8000000e+00 1.9000000e+00 2.6000000e+00 4.5000000e+00 3.6000000e+00 4.4000000e+00 4.1000000e+00 4.3000000e+00 5.1000000e+00 3.0000000e+00 4.8000000e+00 4.3000000e+00 4.6000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 5.2000000e+00 5.4000000e+00 3.5000000e+00 4.2000000e+00 3.4000000e+00 5.2000000e+00 3.4000000e+00 4.2000000e+00 4.5000000e+00 3.3000000e+00 3.4000000e+00 4.1000000e+00 4.3000000e+00 4.6000000e+00 4.9000000e+00 4.1000000e+00 3.6000000e+00 4.1000000e+00 4.6000000e+00 4.1000000e+00 4.0000000e+00 3.3000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.6000000e+00 4.4000000e+00 4.2000000e+00 3.7000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 3.6000000e+00 4.0000000e-01 4.0000000e-01 3.0000000e-01 5.0000000e-01 3.0000000e-01 8.0000000e-01 6.0000000e-01 6.0000000e-01 9.0000000e-01 5.0000000e-01 4.0000000e-01 5.0000000e-01 7.0000000e-01 8.0000000e-01 5.0000000e-01 3.0000000e-01 3.0000000e-01 8.0000000e-01 7.0000000e-01 4.0000000e-01 8.0000000e-01 1.0000000e+00 5.0000000e-01 4.0000000e-01 1.6000000e+00 1.0000000e+00 4.0000000e-01 6.0000000e-01 9.0000000e-01 3.0000000e-01 8.0000000e-01 2.0000000e-01 6.0000000e-01 3.4000000e+00 3.2000000e+00 3.6000000e+00 2.7000000e+00 3.3000000e+00 3.2000000e+00 3.4000000e+00 2.0000000e+00 3.3000000e+00 2.6000000e+00 2.2000000e+00 2.9000000e+00 2.7000000e+00 3.4000000e+00 2.3000000e+00 3.1000000e+00 3.2000000e+00 2.8000000e+00 3.2000000e+00 2.6000000e+00 3.5000000e+00 2.7000000e+00 3.6000000e+00 3.4000000e+00 3.0000000e+00 3.1000000e+00 3.5000000e+00 3.7000000e+00 3.2000000e+00 2.2000000e+00 2.5000000e+00 2.4000000e+00 2.6000000e+00 3.8000000e+00 3.2000000e+00 3.2000000e+00 3.4000000e+00 3.1000000e+00 2.8000000e+00 2.7000000e+00 3.1000000e+00 3.3000000e+00 2.7000000e+00 2.0000000e+00 2.9000000e+00 2.9000000e+00 2.9000000e+00 3.0000000e+00 1.7000000e+00 2.8000000e+00 4.7000000e+00 3.8000000e+00 4.6000000e+00 4.3000000e+00 4.5000000e+00 5.3000000e+00 3.2000000e+00 5.0000000e+00 4.5000000e+00 4.8000000e+00 3.8000000e+00 4.0000000e+00 4.2000000e+00 3.7000000e+00 3.8000000e+00 4.0000000e+00 4.2000000e+00 5.4000000e+00 5.6000000e+00 3.7000000e+00 4.4000000e+00 3.6000000e+00 5.4000000e+00 3.6000000e+00 4.4000000e+00 4.7000000e+00 3.5000000e+00 3.6000000e+00 4.3000000e+00 4.5000000e+00 4.8000000e+00 5.1000000e+00 4.3000000e+00 3.8000000e+00 4.3000000e+00 4.8000000e+00 4.3000000e+00 4.2000000e+00 3.5000000e+00 4.1000000e+00 4.3000000e+00 3.8000000e+00 3.8000000e+00 4.6000000e+00 4.4000000e+00 3.9000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 3.8000000e+00 6.0000000e-01 3.0000000e-01 3.0000000e-01 2.0000000e-01 5.0000000e-01 3.0000000e-01 5.0000000e-01 5.0000000e-01 2.0000000e-01 1.0000000e-01 1.0000000e-01 4.0000000e-01 4.0000000e-01 3.0000000e-01 6.0000000e-01 7.0000000e-01 4.0000000e-01 3.0000000e-01 4.0000000e-01 4.0000000e-01 7.0000000e-01 1.0000000e-01 1.0000000e-01 1.2000000e+00 7.0000000e-01 3.0000000e-01 5.0000000e-01 5.0000000e-01 3.0000000e-01 5.0000000e-01 2.0000000e-01 2.0000000e-01 3.3000000e+00 3.1000000e+00 3.5000000e+00 2.6000000e+00 3.2000000e+00 3.1000000e+00 3.3000000e+00 1.9000000e+00 3.2000000e+00 2.5000000e+00 2.1000000e+00 2.8000000e+00 2.6000000e+00 3.3000000e+00 2.2000000e+00 3.0000000e+00 3.1000000e+00 2.7000000e+00 3.1000000e+00 2.5000000e+00 3.4000000e+00 2.6000000e+00 3.5000000e+00 3.3000000e+00 2.9000000e+00 3.0000000e+00 3.4000000e+00 3.6000000e+00 3.1000000e+00 2.1000000e+00 2.4000000e+00 2.3000000e+00 2.5000000e+00 3.7000000e+00 3.1000000e+00 3.1000000e+00 3.3000000e+00 3.0000000e+00 2.7000000e+00 2.6000000e+00 3.0000000e+00 3.2000000e+00 2.6000000e+00 1.9000000e+00 2.8000000e+00 2.8000000e+00 2.8000000e+00 2.9000000e+00 1.6000000e+00 2.7000000e+00 4.6000000e+00 3.7000000e+00 4.5000000e+00 4.2000000e+00 4.4000000e+00 5.2000000e+00 3.1000000e+00 4.9000000e+00 4.4000000e+00 4.7000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 5.3000000e+00 5.5000000e+00 3.6000000e+00 4.3000000e+00 3.5000000e+00 5.3000000e+00 3.5000000e+00 4.3000000e+00 4.6000000e+00 3.4000000e+00 3.5000000e+00 4.2000000e+00 4.4000000e+00 4.7000000e+00 5.0000000e+00 4.2000000e+00 3.7000000e+00 4.2000000e+00 4.7000000e+00 4.2000000e+00 4.1000000e+00 3.4000000e+00 4.0000000e+00 4.2000000e+00 3.7000000e+00 3.7000000e+00 4.5000000e+00 4.3000000e+00 3.8000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.7000000e+00 6.0000000e-01 4.0000000e-01 6.0000000e-01 1.1000000e+00 6.0000000e-01 9.0000000e-01 8.0000000e-01 7.0000000e-01 5.0000000e-01 5.0000000e-01 1.0000000e+00 9.0000000e-01 4.0000000e-01 5.0000000e-01 4.0000000e-01 8.0000000e-01 7.0000000e-01 4.0000000e-01 8.0000000e-01 1.3000000e+00 6.0000000e-01 7.0000000e-01 1.5000000e+00 1.3000000e+00 7.0000000e-01 6.0000000e-01 9.0000000e-01 6.0000000e-01 1.1000000e+00 4.0000000e-01 7.0000000e-01 3.0000000e+00 2.8000000e+00 3.2000000e+00 2.3000000e+00 2.9000000e+00 2.8000000e+00 3.0000000e+00 1.6000000e+00 2.9000000e+00 2.2000000e+00 1.8000000e+00 2.5000000e+00 2.3000000e+00 3.0000000e+00 1.9000000e+00 2.7000000e+00 2.8000000e+00 2.4000000e+00 2.8000000e+00 2.2000000e+00 3.1000000e+00 2.3000000e+00 3.2000000e+00 3.0000000e+00 2.6000000e+00 2.7000000e+00 3.1000000e+00 3.3000000e+00 2.8000000e+00 1.8000000e+00 2.1000000e+00 2.0000000e+00 2.2000000e+00 3.4000000e+00 2.8000000e+00 2.8000000e+00 3.0000000e+00 2.7000000e+00 2.4000000e+00 2.3000000e+00 2.7000000e+00 2.9000000e+00 2.3000000e+00 1.6000000e+00 2.5000000e+00 2.5000000e+00 2.5000000e+00 2.6000000e+00 1.3000000e+00 2.4000000e+00 4.3000000e+00 3.4000000e+00 4.2000000e+00 3.9000000e+00 4.1000000e+00 4.9000000e+00 2.8000000e+00 4.6000000e+00 4.1000000e+00 4.4000000e+00 3.4000000e+00 3.6000000e+00 3.8000000e+00 3.3000000e+00 3.4000000e+00 3.6000000e+00 3.8000000e+00 5.0000000e+00 5.2000000e+00 3.3000000e+00 4.0000000e+00 3.2000000e+00 5.0000000e+00 3.2000000e+00 4.0000000e+00 4.3000000e+00 3.1000000e+00 3.2000000e+00 3.9000000e+00 4.1000000e+00 4.4000000e+00 4.7000000e+00 3.9000000e+00 3.4000000e+00 3.9000000e+00 4.4000000e+00 3.9000000e+00 3.8000000e+00 3.1000000e+00 3.7000000e+00 3.9000000e+00 3.4000000e+00 3.4000000e+00 4.2000000e+00 4.0000000e+00 3.5000000e+00 3.3000000e+00 3.5000000e+00 3.7000000e+00 3.4000000e+00 4.0000000e-01 1.0000000e-01 5.0000000e-01 5.0000000e-01 4.0000000e-01 8.0000000e-01 4.0000000e-01 3.0000000e-01 4.0000000e-01 6.0000000e-01 7.0000000e-01 4.0000000e-01 3.0000000e-01 4.0000000e-01 7.0000000e-01 6.0000000e-01 4.0000000e-01 7.0000000e-01 8.0000000e-01 4.0000000e-01 3.0000000e-01 1.5000000e+00 7.0000000e-01 3.0000000e-01 4.0000000e-01 8.0000000e-01 1.0000000e-01 6.0000000e-01 2.0000000e-01 5.0000000e-01 3.2000000e+00 3.0000000e+00 3.4000000e+00 2.5000000e+00 3.1000000e+00 3.0000000e+00 3.2000000e+00 1.8000000e+00 3.1000000e+00 2.4000000e+00 2.0000000e+00 2.7000000e+00 2.5000000e+00 3.2000000e+00 2.1000000e+00 2.9000000e+00 3.0000000e+00 2.6000000e+00 3.0000000e+00 2.4000000e+00 3.3000000e+00 2.5000000e+00 3.4000000e+00 3.2000000e+00 2.8000000e+00 2.9000000e+00 3.3000000e+00 3.5000000e+00 3.0000000e+00 2.0000000e+00 2.3000000e+00 2.2000000e+00 2.4000000e+00 3.6000000e+00 3.0000000e+00 3.0000000e+00 3.2000000e+00 2.9000000e+00 2.6000000e+00 2.5000000e+00 2.9000000e+00 3.1000000e+00 2.5000000e+00 1.8000000e+00 2.7000000e+00 2.7000000e+00 2.7000000e+00 2.8000000e+00 1.5000000e+00 2.6000000e+00 4.5000000e+00 3.6000000e+00 4.4000000e+00 4.1000000e+00 4.3000000e+00 5.1000000e+00 3.0000000e+00 4.8000000e+00 4.3000000e+00 4.6000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 5.2000000e+00 5.4000000e+00 3.5000000e+00 4.2000000e+00 3.4000000e+00 5.2000000e+00 3.4000000e+00 4.2000000e+00 4.5000000e+00 3.3000000e+00 3.4000000e+00 4.1000000e+00 4.3000000e+00 4.6000000e+00 4.9000000e+00 4.1000000e+00 3.6000000e+00 4.1000000e+00 4.6000000e+00 4.1000000e+00 4.0000000e+00 3.3000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.6000000e+00 4.4000000e+00 4.2000000e+00 3.7000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 3.6000000e+00 3.0000000e-01 8.0000000e-01 3.0000000e-01 6.0000000e-01 4.0000000e-01 4.0000000e-01 2.0000000e-01 3.0000000e-01 7.0000000e-01 6.0000000e-01 2.0000000e-01 7.0000000e-01 8.0000000e-01 5.0000000e-01 5.0000000e-01 4.0000000e-01 5.0000000e-01 1.0000000e+00 3.0000000e-01 4.0000000e-01 1.1000000e+00 1.0000000e+00 4.0000000e-01 4.0000000e-01 6.0000000e-01 4.0000000e-01 8.0000000e-01 3.0000000e-01 4.0000000e-01 3.0000000e+00 2.8000000e+00 3.2000000e+00 2.3000000e+00 2.9000000e+00 2.8000000e+00 3.0000000e+00 1.6000000e+00 2.9000000e+00 2.2000000e+00 1.8000000e+00 2.5000000e+00 2.3000000e+00 3.0000000e+00 1.9000000e+00 2.7000000e+00 2.8000000e+00 2.4000000e+00 2.8000000e+00 2.2000000e+00 3.1000000e+00 2.3000000e+00 3.2000000e+00 3.0000000e+00 2.6000000e+00 2.7000000e+00 3.1000000e+00 3.3000000e+00 2.8000000e+00 1.8000000e+00 2.1000000e+00 2.0000000e+00 2.2000000e+00 3.4000000e+00 2.8000000e+00 2.8000000e+00 3.0000000e+00 2.7000000e+00 2.4000000e+00 2.3000000e+00 2.7000000e+00 2.9000000e+00 2.3000000e+00 1.6000000e+00 2.5000000e+00 2.5000000e+00 2.5000000e+00 2.6000000e+00 1.3000000e+00 2.4000000e+00 4.3000000e+00 3.4000000e+00 4.2000000e+00 3.9000000e+00 4.1000000e+00 4.9000000e+00 2.8000000e+00 4.6000000e+00 4.1000000e+00 4.4000000e+00 3.4000000e+00 3.6000000e+00 3.8000000e+00 3.3000000e+00 3.4000000e+00 3.6000000e+00 3.8000000e+00 5.0000000e+00 5.2000000e+00 3.3000000e+00 4.0000000e+00 3.2000000e+00 5.0000000e+00 3.2000000e+00 4.0000000e+00 4.3000000e+00 3.1000000e+00 3.2000000e+00 3.9000000e+00 4.1000000e+00 4.4000000e+00 4.7000000e+00 3.9000000e+00 3.4000000e+00 3.9000000e+00 4.4000000e+00 3.9000000e+00 3.8000000e+00 3.1000000e+00 3.7000000e+00 3.9000000e+00 3.4000000e+00 3.4000000e+00 4.2000000e+00 4.0000000e+00 3.5000000e+00 3.3000000e+00 3.5000000e+00 3.7000000e+00 3.4000000e+00 5.0000000e-01 4.0000000e-01 4.0000000e-01 7.0000000e-01 3.0000000e-01 2.0000000e-01 3.0000000e-01 5.0000000e-01 6.0000000e-01 3.0000000e-01 4.0000000e-01 5.0000000e-01 6.0000000e-01 5.0000000e-01 4.0000000e-01 6.0000000e-01 7.0000000e-01 3.0000000e-01 2.0000000e-01 1.4000000e+00 7.0000000e-01 2.0000000e-01 4.0000000e-01 7.0000000e-01 2.0000000e-01 5.0000000e-01 2.0000000e-01 4.0000000e-01 3.2000000e+00 3.0000000e+00 3.4000000e+00 2.5000000e+00 3.1000000e+00 3.0000000e+00 3.2000000e+00 1.8000000e+00 3.1000000e+00 2.4000000e+00 2.0000000e+00 2.7000000e+00 2.5000000e+00 3.2000000e+00 2.1000000e+00 2.9000000e+00 3.0000000e+00 2.6000000e+00 3.0000000e+00 2.4000000e+00 3.3000000e+00 2.5000000e+00 3.4000000e+00 3.2000000e+00 2.8000000e+00 2.9000000e+00 3.3000000e+00 3.5000000e+00 3.0000000e+00 2.0000000e+00 2.3000000e+00 2.2000000e+00 2.4000000e+00 3.6000000e+00 3.0000000e+00 3.0000000e+00 3.2000000e+00 2.9000000e+00 2.6000000e+00 2.5000000e+00 2.9000000e+00 3.1000000e+00 2.5000000e+00 1.8000000e+00 2.7000000e+00 2.7000000e+00 2.7000000e+00 2.8000000e+00 1.5000000e+00 2.6000000e+00 4.5000000e+00 3.6000000e+00 4.4000000e+00 4.1000000e+00 4.3000000e+00 5.1000000e+00 3.0000000e+00 4.8000000e+00 4.3000000e+00 4.6000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 5.2000000e+00 5.4000000e+00 3.5000000e+00 4.2000000e+00 3.4000000e+00 5.2000000e+00 3.4000000e+00 4.2000000e+00 4.5000000e+00 3.3000000e+00 3.4000000e+00 4.1000000e+00 4.3000000e+00 4.6000000e+00 4.9000000e+00 4.1000000e+00 3.6000000e+00 4.1000000e+00 4.6000000e+00 4.1000000e+00 4.0000000e+00 3.3000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.6000000e+00 4.4000000e+00 4.2000000e+00 3.7000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 3.6000000e+00 7.0000000e-01 9.0000000e-01 6.0000000e-01 6.0000000e-01 6.0000000e-01 6.0000000e-01 6.0000000e-01 6.0000000e-01 8.0000000e-01 6.0000000e-01 9.0000000e-01 5.0000000e-01 4.0000000e-01 9.0000000e-01 5.0000000e-01 6.0000000e-01 5.0000000e-01 4.0000000e-01 1.3000000e+00 4.0000000e-01 6.0000000e-01 9.0000000e-01 6.0000000e-01 6.0000000e-01 4.0000000e-01 7.0000000e-01 4.0000000e-01 3.7000000e+00 3.5000000e+00 3.9000000e+00 3.0000000e+00 3.6000000e+00 3.5000000e+00 3.7000000e+00 2.3000000e+00 3.6000000e+00 2.9000000e+00 2.5000000e+00 3.2000000e+00 3.0000000e+00 3.7000000e+00 2.6000000e+00 3.4000000e+00 3.5000000e+00 3.1000000e+00 3.5000000e+00 2.9000000e+00 3.8000000e+00 3.0000000e+00 3.9000000e+00 3.7000000e+00 3.3000000e+00 3.4000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 2.5000000e+00 2.8000000e+00 2.7000000e+00 2.9000000e+00 4.1000000e+00 3.5000000e+00 3.5000000e+00 3.7000000e+00 3.4000000e+00 3.1000000e+00 3.0000000e+00 3.4000000e+00 3.6000000e+00 3.0000000e+00 2.3000000e+00 3.2000000e+00 3.2000000e+00 3.2000000e+00 3.3000000e+00 2.0000000e+00 3.1000000e+00 5.0000000e+00 4.1000000e+00 4.9000000e+00 4.6000000e+00 4.8000000e+00 5.6000000e+00 3.5000000e+00 5.3000000e+00 4.8000000e+00 5.1000000e+00 4.1000000e+00 4.3000000e+00 4.5000000e+00 4.0000000e+00 4.1000000e+00 4.3000000e+00 4.5000000e+00 5.7000000e+00 5.9000000e+00 4.0000000e+00 4.7000000e+00 3.9000000e+00 5.7000000e+00 3.9000000e+00 4.7000000e+00 5.0000000e+00 3.8000000e+00 3.9000000e+00 4.6000000e+00 4.8000000e+00 5.1000000e+00 5.4000000e+00 4.6000000e+00 4.1000000e+00 4.6000000e+00 5.1000000e+00 4.6000000e+00 4.5000000e+00 3.8000000e+00 4.4000000e+00 4.6000000e+00 4.1000000e+00 4.1000000e+00 4.9000000e+00 4.7000000e+00 4.2000000e+00 4.0000000e+00 4.2000000e+00 4.4000000e+00 4.1000000e+00 3.0000000e-01 3.0000000e-01 1.0000000e-01 3.0000000e-01 3.0000000e-01 4.0000000e-01 3.0000000e-01 3.0000000e-01 8.0000000e-01 9.0000000e-01 4.0000000e-01 5.0000000e-01 4.0000000e-01 4.0000000e-01 7.0000000e-01 3.0000000e-01 4.0000000e-01 1.0000000e+00 7.0000000e-01 2.0000000e-01 5.0000000e-01 3.0000000e-01 5.0000000e-01 5.0000000e-01 4.0000000e-01 3.0000000e-01 3.0000000e+00 2.8000000e+00 3.2000000e+00 2.3000000e+00 2.9000000e+00 2.8000000e+00 3.0000000e+00 1.6000000e+00 2.9000000e+00 2.2000000e+00 1.8000000e+00 2.5000000e+00 2.3000000e+00 3.0000000e+00 1.9000000e+00 2.7000000e+00 2.8000000e+00 2.4000000e+00 2.8000000e+00 2.2000000e+00 3.1000000e+00 2.3000000e+00 3.2000000e+00 3.0000000e+00 2.6000000e+00 2.7000000e+00 3.1000000e+00 3.3000000e+00 2.8000000e+00 1.8000000e+00 2.1000000e+00 2.0000000e+00 2.2000000e+00 3.4000000e+00 2.8000000e+00 2.8000000e+00 3.0000000e+00 2.7000000e+00 2.4000000e+00 2.3000000e+00 2.7000000e+00 2.9000000e+00 2.3000000e+00 1.6000000e+00 2.5000000e+00 2.5000000e+00 2.5000000e+00 2.6000000e+00 1.3000000e+00 2.4000000e+00 4.3000000e+00 3.4000000e+00 4.2000000e+00 3.9000000e+00 4.1000000e+00 4.9000000e+00 2.8000000e+00 4.6000000e+00 4.1000000e+00 4.4000000e+00 3.4000000e+00 3.6000000e+00 3.8000000e+00 3.3000000e+00 3.4000000e+00 3.6000000e+00 3.8000000e+00 5.0000000e+00 5.2000000e+00 3.3000000e+00 4.0000000e+00 3.2000000e+00 5.0000000e+00 3.2000000e+00 4.0000000e+00 4.3000000e+00 3.1000000e+00 3.2000000e+00 3.9000000e+00 4.1000000e+00 4.4000000e+00 4.7000000e+00 3.9000000e+00 3.4000000e+00 3.9000000e+00 4.4000000e+00 3.9000000e+00 3.8000000e+00 3.1000000e+00 3.7000000e+00 3.9000000e+00 3.4000000e+00 3.4000000e+00 4.2000000e+00 4.0000000e+00 3.5000000e+00 3.3000000e+00 3.5000000e+00 3.7000000e+00 3.4000000e+00 4.0000000e-01 3.0000000e-01 4.0000000e-01 5.0000000e-01 3.0000000e-01 3.0000000e-01 6.0000000e-01 7.0000000e-01 8.0000000e-01 4.0000000e-01 7.0000000e-01 7.0000000e-01 4.0000000e-01 6.0000000e-01 4.0000000e-01 6.0000000e-01 1.1000000e+00 6.0000000e-01 4.0000000e-01 4.0000000e-01 5.0000000e-01 4.0000000e-01 5.0000000e-01 5.0000000e-01 5.0000000e-01 2.8000000e+00 2.6000000e+00 3.0000000e+00 2.1000000e+00 2.7000000e+00 2.6000000e+00 2.8000000e+00 1.4000000e+00 2.7000000e+00 2.0000000e+00 1.6000000e+00 2.3000000e+00 2.1000000e+00 2.8000000e+00 1.7000000e+00 2.5000000e+00 2.6000000e+00 2.2000000e+00 2.6000000e+00 2.0000000e+00 2.9000000e+00 2.1000000e+00 3.0000000e+00 2.8000000e+00 2.4000000e+00 2.5000000e+00 2.9000000e+00 3.1000000e+00 2.6000000e+00 1.6000000e+00 1.9000000e+00 1.8000000e+00 2.0000000e+00 3.2000000e+00 2.6000000e+00 2.6000000e+00 2.8000000e+00 2.5000000e+00 2.2000000e+00 2.1000000e+00 2.5000000e+00 2.7000000e+00 2.1000000e+00 1.4000000e+00 2.3000000e+00 2.3000000e+00 2.3000000e+00 2.4000000e+00 1.1000000e+00 2.2000000e+00 4.1000000e+00 3.2000000e+00 4.0000000e+00 3.7000000e+00 3.9000000e+00 4.7000000e+00 2.6000000e+00 4.4000000e+00 3.9000000e+00 4.2000000e+00 3.2000000e+00 3.4000000e+00 3.6000000e+00 3.1000000e+00 3.2000000e+00 3.4000000e+00 3.6000000e+00 4.8000000e+00 5.0000000e+00 3.1000000e+00 3.8000000e+00 3.0000000e+00 4.8000000e+00 3.0000000e+00 3.8000000e+00 4.1000000e+00 2.9000000e+00 3.0000000e+00 3.7000000e+00 3.9000000e+00 4.2000000e+00 4.5000000e+00 3.7000000e+00 3.2000000e+00 3.7000000e+00 4.2000000e+00 3.7000000e+00 3.6000000e+00 2.9000000e+00 3.5000000e+00 3.7000000e+00 3.2000000e+00 3.2000000e+00 4.0000000e+00 3.8000000e+00 3.3000000e+00 3.1000000e+00 3.3000000e+00 3.5000000e+00 3.2000000e+00 4.0000000e-01 5.0000000e-01 4.0000000e-01 3.0000000e-01 2.0000000e-01 4.0000000e-01 1.1000000e+00 1.2000000e+00 1.0000000e-01 4.0000000e-01 5.0000000e-01 1.0000000e-01 6.0000000e-01 4.0000000e-01 5.0000000e-01 7.0000000e-01 6.0000000e-01 5.0000000e-01 8.0000000e-01 2.0000000e-01 8.0000000e-01 4.0000000e-01 7.0000000e-01 3.0000000e-01 3.1000000e+00 2.9000000e+00 3.3000000e+00 2.4000000e+00 3.0000000e+00 2.9000000e+00 3.1000000e+00 1.7000000e+00 3.0000000e+00 2.3000000e+00 1.9000000e+00 2.6000000e+00 2.4000000e+00 3.1000000e+00 2.0000000e+00 2.8000000e+00 2.9000000e+00 2.5000000e+00 2.9000000e+00 2.3000000e+00 3.2000000e+00 2.4000000e+00 3.3000000e+00 3.1000000e+00 2.7000000e+00 2.8000000e+00 3.2000000e+00 3.4000000e+00 2.9000000e+00 1.9000000e+00 2.2000000e+00 2.1000000e+00 2.3000000e+00 3.5000000e+00 2.9000000e+00 2.9000000e+00 3.1000000e+00 2.8000000e+00 2.5000000e+00 2.4000000e+00 2.8000000e+00 3.0000000e+00 2.4000000e+00 1.7000000e+00 2.6000000e+00 2.6000000e+00 2.6000000e+00 2.7000000e+00 1.4000000e+00 2.5000000e+00 4.4000000e+00 3.5000000e+00 4.3000000e+00 4.0000000e+00 4.2000000e+00 5.0000000e+00 2.9000000e+00 4.7000000e+00 4.2000000e+00 4.5000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 3.4000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 5.1000000e+00 5.3000000e+00 3.4000000e+00 4.1000000e+00 3.3000000e+00 5.1000000e+00 3.3000000e+00 4.1000000e+00 4.4000000e+00 3.2000000e+00 3.3000000e+00 4.0000000e+00 4.2000000e+00 4.5000000e+00 4.8000000e+00 4.0000000e+00 3.5000000e+00 4.0000000e+00 4.5000000e+00 4.0000000e+00 3.9000000e+00 3.2000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 3.5000000e+00 4.3000000e+00 4.1000000e+00 3.6000000e+00 3.4000000e+00 3.6000000e+00 3.8000000e+00 3.5000000e+00 2.0000000e-01 2.0000000e-01 3.0000000e-01 3.0000000e-01 4.0000000e-01 7.0000000e-01 8.0000000e-01 3.0000000e-01 4.0000000e-01 5.0000000e-01 3.0000000e-01 6.0000000e-01 2.0000000e-01 3.0000000e-01 1.1000000e+00 6.0000000e-01 2.0000000e-01 4.0000000e-01 4.0000000e-01 4.0000000e-01 4.0000000e-01 3.0000000e-01 2.0000000e-01 3.1000000e+00 2.9000000e+00 3.3000000e+00 2.4000000e+00 3.0000000e+00 2.9000000e+00 3.1000000e+00 1.7000000e+00 3.0000000e+00 2.3000000e+00 1.9000000e+00 2.6000000e+00 2.4000000e+00 3.1000000e+00 2.0000000e+00 2.8000000e+00 2.9000000e+00 2.5000000e+00 2.9000000e+00 2.3000000e+00 3.2000000e+00 2.4000000e+00 3.3000000e+00 3.1000000e+00 2.7000000e+00 2.8000000e+00 3.2000000e+00 3.4000000e+00 2.9000000e+00 1.9000000e+00 2.2000000e+00 2.1000000e+00 2.3000000e+00 3.5000000e+00 2.9000000e+00 2.9000000e+00 3.1000000e+00 2.8000000e+00 2.5000000e+00 2.4000000e+00 2.8000000e+00 3.0000000e+00 2.4000000e+00 1.7000000e+00 2.6000000e+00 2.6000000e+00 2.6000000e+00 2.7000000e+00 1.4000000e+00 2.5000000e+00 4.4000000e+00 3.5000000e+00 4.3000000e+00 4.0000000e+00 4.2000000e+00 5.0000000e+00 2.9000000e+00 4.7000000e+00 4.2000000e+00 4.5000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 3.4000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 5.1000000e+00 5.3000000e+00 3.4000000e+00 4.1000000e+00 3.3000000e+00 5.1000000e+00 3.3000000e+00 4.1000000e+00 4.4000000e+00 3.2000000e+00 3.3000000e+00 4.0000000e+00 4.2000000e+00 4.5000000e+00 4.8000000e+00 4.0000000e+00 3.5000000e+00 4.0000000e+00 4.5000000e+00 4.0000000e+00 3.9000000e+00 3.2000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 3.5000000e+00 4.3000000e+00 4.1000000e+00 3.6000000e+00 3.4000000e+00 3.6000000e+00 3.8000000e+00 3.5000000e+00 1.0000000e-01 5.0000000e-01 4.0000000e-01 2.0000000e-01 6.0000000e-01 7.0000000e-01 4.0000000e-01 3.0000000e-01 3.0000000e-01 4.0000000e-01 8.0000000e-01 1.0000000e-01 2.0000000e-01 1.2000000e+00 8.0000000e-01 4.0000000e-01 4.0000000e-01 5.0000000e-01 3.0000000e-01 6.0000000e-01 2.0000000e-01 2.0000000e-01 3.2000000e+00 3.0000000e+00 3.4000000e+00 2.5000000e+00 3.1000000e+00 3.0000000e+00 3.2000000e+00 1.8000000e+00 3.1000000e+00 2.4000000e+00 2.0000000e+00 2.7000000e+00 2.5000000e+00 3.2000000e+00 2.1000000e+00 2.9000000e+00 3.0000000e+00 2.6000000e+00 3.0000000e+00 2.4000000e+00 3.3000000e+00 2.5000000e+00 3.4000000e+00 3.2000000e+00 2.8000000e+00 2.9000000e+00 3.3000000e+00 3.5000000e+00 3.0000000e+00 2.0000000e+00 2.3000000e+00 2.2000000e+00 2.4000000e+00 3.6000000e+00 3.0000000e+00 3.0000000e+00 3.2000000e+00 2.9000000e+00 2.6000000e+00 2.5000000e+00 2.9000000e+00 3.1000000e+00 2.5000000e+00 1.8000000e+00 2.7000000e+00 2.7000000e+00 2.7000000e+00 2.8000000e+00 1.5000000e+00 2.6000000e+00 4.5000000e+00 3.6000000e+00 4.4000000e+00 4.1000000e+00 4.3000000e+00 5.1000000e+00 3.0000000e+00 4.8000000e+00 4.3000000e+00 4.6000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 5.2000000e+00 5.4000000e+00 3.5000000e+00 4.2000000e+00 3.4000000e+00 5.2000000e+00 3.4000000e+00 4.2000000e+00 4.5000000e+00 3.3000000e+00 3.4000000e+00 4.1000000e+00 4.3000000e+00 4.6000000e+00 4.9000000e+00 4.1000000e+00 3.6000000e+00 4.1000000e+00 4.6000000e+00 4.1000000e+00 4.0000000e+00 3.3000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.6000000e+00 4.4000000e+00 4.2000000e+00 3.7000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 3.6000000e+00 5.0000000e-01 4.0000000e-01 2.0000000e-01 7.0000000e-01 8.0000000e-01 3.0000000e-01 2.0000000e-01 3.0000000e-01 3.0000000e-01 8.0000000e-01 1.0000000e-01 2.0000000e-01 1.1000000e+00 8.0000000e-01 4.0000000e-01 5.0000000e-01 4.0000000e-01 4.0000000e-01 6.0000000e-01 3.0000000e-01 2.0000000e-01 3.3000000e+00 3.1000000e+00 3.5000000e+00 2.6000000e+00 3.2000000e+00 3.1000000e+00 3.3000000e+00 1.9000000e+00 3.2000000e+00 2.5000000e+00 2.1000000e+00 2.8000000e+00 2.6000000e+00 3.3000000e+00 2.2000000e+00 3.0000000e+00 3.1000000e+00 2.7000000e+00 3.1000000e+00 2.5000000e+00 3.4000000e+00 2.6000000e+00 3.5000000e+00 3.3000000e+00 2.9000000e+00 3.0000000e+00 3.4000000e+00 3.6000000e+00 3.1000000e+00 2.1000000e+00 2.4000000e+00 2.3000000e+00 2.5000000e+00 3.7000000e+00 3.1000000e+00 3.1000000e+00 3.3000000e+00 3.0000000e+00 2.7000000e+00 2.6000000e+00 3.0000000e+00 3.2000000e+00 2.6000000e+00 1.9000000e+00 2.8000000e+00 2.8000000e+00 2.8000000e+00 2.9000000e+00 1.6000000e+00 2.7000000e+00 4.6000000e+00 3.7000000e+00 4.5000000e+00 4.2000000e+00 4.4000000e+00 5.2000000e+00 3.1000000e+00 4.9000000e+00 4.4000000e+00 4.7000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 5.3000000e+00 5.5000000e+00 3.6000000e+00 4.3000000e+00 3.5000000e+00 5.3000000e+00 3.5000000e+00 4.3000000e+00 4.6000000e+00 3.4000000e+00 3.5000000e+00 4.2000000e+00 4.4000000e+00 4.7000000e+00 5.0000000e+00 4.2000000e+00 3.7000000e+00 4.2000000e+00 4.7000000e+00 4.2000000e+00 4.1000000e+00 3.4000000e+00 4.0000000e+00 4.2000000e+00 3.7000000e+00 3.7000000e+00 4.5000000e+00 4.3000000e+00 3.8000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.7000000e+00 1.0000000e-01 7.0000000e-01 9.0000000e-01 1.0000000e+00 2.0000000e-01 4.0000000e-01 8.0000000e-01 2.0000000e-01 3.0000000e-01 4.0000000e-01 3.0000000e-01 9.0000000e-01 3.0000000e-01 4.0000000e-01 6.0000000e-01 2.0000000e-01 6.0000000e-01 2.0000000e-01 6.0000000e-01 3.0000000e-01 3.1000000e+00 2.9000000e+00 3.3000000e+00 2.4000000e+00 3.0000000e+00 2.9000000e+00 3.1000000e+00 1.7000000e+00 3.0000000e+00 2.3000000e+00 1.9000000e+00 2.6000000e+00 2.4000000e+00 3.1000000e+00 2.0000000e+00 2.8000000e+00 2.9000000e+00 2.5000000e+00 2.9000000e+00 2.3000000e+00 3.2000000e+00 2.4000000e+00 3.3000000e+00 3.1000000e+00 2.7000000e+00 2.8000000e+00 3.2000000e+00 3.4000000e+00 2.9000000e+00 1.9000000e+00 2.2000000e+00 2.1000000e+00 2.3000000e+00 3.5000000e+00 2.9000000e+00 2.9000000e+00 3.1000000e+00 2.8000000e+00 2.5000000e+00 2.4000000e+00 2.8000000e+00 3.0000000e+00 2.4000000e+00 1.7000000e+00 2.6000000e+00 2.6000000e+00 2.6000000e+00 2.7000000e+00 1.4000000e+00 2.5000000e+00 4.4000000e+00 3.5000000e+00 4.3000000e+00 4.0000000e+00 4.2000000e+00 5.0000000e+00 2.9000000e+00 4.7000000e+00 4.2000000e+00 4.5000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 3.4000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 5.1000000e+00 5.3000000e+00 3.4000000e+00 4.1000000e+00 3.3000000e+00 5.1000000e+00 3.3000000e+00 4.1000000e+00 4.4000000e+00 3.2000000e+00 3.3000000e+00 4.0000000e+00 4.2000000e+00 4.5000000e+00 4.8000000e+00 4.0000000e+00 3.5000000e+00 4.0000000e+00 4.5000000e+00 4.0000000e+00 3.9000000e+00 3.2000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 3.5000000e+00 4.3000000e+00 4.1000000e+00 3.6000000e+00 3.4000000e+00 3.6000000e+00 3.8000000e+00 3.5000000e+00 6.0000000e-01 1.0000000e+00 1.1000000e+00 1.0000000e-01 4.0000000e-01 7.0000000e-01 1.0000000e-01 4.0000000e-01 3.0000000e-01 4.0000000e-01 8.0000000e-01 4.0000000e-01 4.0000000e-01 7.0000000e-01 2.0000000e-01 7.0000000e-01 2.0000000e-01 6.0000000e-01 2.0000000e-01 3.1000000e+00 2.9000000e+00 3.3000000e+00 2.4000000e+00 3.0000000e+00 2.9000000e+00 3.1000000e+00 1.7000000e+00 3.0000000e+00 2.3000000e+00 1.9000000e+00 2.6000000e+00 2.4000000e+00 3.1000000e+00 2.0000000e+00 2.8000000e+00 2.9000000e+00 2.5000000e+00 2.9000000e+00 2.3000000e+00 3.2000000e+00 2.4000000e+00 3.3000000e+00 3.1000000e+00 2.7000000e+00 2.8000000e+00 3.2000000e+00 3.4000000e+00 2.9000000e+00 1.9000000e+00 2.2000000e+00 2.1000000e+00 2.3000000e+00 3.5000000e+00 2.9000000e+00 2.9000000e+00 3.1000000e+00 2.8000000e+00 2.5000000e+00 2.4000000e+00 2.8000000e+00 3.0000000e+00 2.4000000e+00 1.7000000e+00 2.6000000e+00 2.6000000e+00 2.6000000e+00 2.7000000e+00 1.4000000e+00 2.5000000e+00 4.4000000e+00 3.5000000e+00 4.3000000e+00 4.0000000e+00 4.2000000e+00 5.0000000e+00 2.9000000e+00 4.7000000e+00 4.2000000e+00 4.5000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 3.4000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 5.1000000e+00 5.3000000e+00 3.4000000e+00 4.1000000e+00 3.3000000e+00 5.1000000e+00 3.3000000e+00 4.1000000e+00 4.4000000e+00 3.2000000e+00 3.3000000e+00 4.0000000e+00 4.2000000e+00 4.5000000e+00 4.8000000e+00 4.0000000e+00 3.5000000e+00 4.0000000e+00 4.5000000e+00 4.0000000e+00 3.9000000e+00 3.2000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 3.5000000e+00 4.3000000e+00 4.1000000e+00 3.6000000e+00 3.4000000e+00 3.6000000e+00 3.8000000e+00 3.5000000e+00 7.0000000e-01 8.0000000e-01 5.0000000e-01 4.0000000e-01 2.0000000e-01 5.0000000e-01 1.0000000e+00 3.0000000e-01 4.0000000e-01 1.1000000e+00 1.0000000e+00 4.0000000e-01 4.0000000e-01 6.0000000e-01 4.0000000e-01 8.0000000e-01 3.0000000e-01 4.0000000e-01 3.2000000e+00 3.0000000e+00 3.4000000e+00 2.5000000e+00 3.1000000e+00 3.0000000e+00 3.2000000e+00 1.8000000e+00 3.1000000e+00 2.4000000e+00 2.0000000e+00 2.7000000e+00 2.5000000e+00 3.2000000e+00 2.1000000e+00 2.9000000e+00 3.0000000e+00 2.6000000e+00 3.0000000e+00 2.4000000e+00 3.3000000e+00 2.5000000e+00 3.4000000e+00 3.2000000e+00 2.8000000e+00 2.9000000e+00 3.3000000e+00 3.5000000e+00 3.0000000e+00 2.0000000e+00 2.3000000e+00 2.2000000e+00 2.4000000e+00 3.6000000e+00 3.0000000e+00 3.0000000e+00 3.2000000e+00 2.9000000e+00 2.6000000e+00 2.5000000e+00 2.9000000e+00 3.1000000e+00 2.5000000e+00 1.8000000e+00 2.7000000e+00 2.7000000e+00 2.7000000e+00 2.8000000e+00 1.5000000e+00 2.6000000e+00 4.5000000e+00 3.6000000e+00 4.4000000e+00 4.1000000e+00 4.3000000e+00 5.1000000e+00 3.0000000e+00 4.8000000e+00 4.3000000e+00 4.6000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 5.2000000e+00 5.4000000e+00 3.5000000e+00 4.2000000e+00 3.4000000e+00 5.2000000e+00 3.4000000e+00 4.2000000e+00 4.5000000e+00 3.3000000e+00 3.4000000e+00 4.1000000e+00 4.3000000e+00 4.6000000e+00 4.9000000e+00 4.1000000e+00 3.6000000e+00 4.1000000e+00 4.6000000e+00 4.1000000e+00 4.0000000e+00 3.3000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.6000000e+00 4.4000000e+00 4.2000000e+00 3.7000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 3.6000000e+00 3.0000000e-01 1.0000000e+00 9.0000000e-01 6.0000000e-01 1.0000000e+00 1.1000000e+00 7.0000000e-01 6.0000000e-01 1.8000000e+00 9.0000000e-01 6.0000000e-01 4.0000000e-01 1.1000000e+00 3.0000000e-01 9.0000000e-01 4.0000000e-01 8.0000000e-01 3.2000000e+00 3.0000000e+00 3.4000000e+00 2.5000000e+00 3.1000000e+00 3.0000000e+00 3.2000000e+00 1.8000000e+00 3.1000000e+00 2.4000000e+00 2.1000000e+00 2.7000000e+00 2.5000000e+00 3.2000000e+00 2.1000000e+00 2.9000000e+00 3.0000000e+00 2.6000000e+00 3.0000000e+00 2.4000000e+00 3.3000000e+00 2.5000000e+00 3.4000000e+00 3.2000000e+00 2.8000000e+00 2.9000000e+00 3.3000000e+00 3.5000000e+00 3.0000000e+00 2.0000000e+00 2.3000000e+00 2.2000000e+00 2.4000000e+00 3.6000000e+00 3.0000000e+00 3.0000000e+00 3.2000000e+00 2.9000000e+00 2.6000000e+00 2.5000000e+00 2.9000000e+00 3.1000000e+00 2.5000000e+00 1.8000000e+00 2.7000000e+00 2.7000000e+00 2.7000000e+00 2.8000000e+00 1.6000000e+00 2.6000000e+00 4.5000000e+00 3.6000000e+00 4.4000000e+00 4.1000000e+00 4.3000000e+00 5.1000000e+00 3.0000000e+00 4.8000000e+00 4.3000000e+00 4.6000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 5.2000000e+00 5.4000000e+00 3.5000000e+00 4.2000000e+00 3.4000000e+00 5.2000000e+00 3.4000000e+00 4.2000000e+00 4.5000000e+00 3.3000000e+00 3.4000000e+00 4.1000000e+00 4.3000000e+00 4.6000000e+00 4.9000000e+00 4.1000000e+00 3.6000000e+00 4.1000000e+00 4.6000000e+00 4.1000000e+00 4.0000000e+00 3.3000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.6000000e+00 4.4000000e+00 4.2000000e+00 3.7000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 3.6000000e+00 1.1000000e+00 1.0000000e+00 7.0000000e-01 1.1000000e+00 1.2000000e+00 8.0000000e-01 7.0000000e-01 1.9000000e+00 1.1000000e+00 7.0000000e-01 5.0000000e-01 1.2000000e+00 4.0000000e-01 1.0000000e+00 5.0000000e-01 9.0000000e-01 3.3000000e+00 3.1000000e+00 3.5000000e+00 2.6000000e+00 3.2000000e+00 3.1000000e+00 3.3000000e+00 1.9000000e+00 3.2000000e+00 2.5000000e+00 2.2000000e+00 2.8000000e+00 2.6000000e+00 3.3000000e+00 2.2000000e+00 3.0000000e+00 3.1000000e+00 2.7000000e+00 3.1000000e+00 2.5000000e+00 3.4000000e+00 2.6000000e+00 3.5000000e+00 3.3000000e+00 2.9000000e+00 3.0000000e+00 3.4000000e+00 3.6000000e+00 3.1000000e+00 2.1000000e+00 2.4000000e+00 2.3000000e+00 2.5000000e+00 3.7000000e+00 3.1000000e+00 3.1000000e+00 3.3000000e+00 3.0000000e+00 2.7000000e+00 2.6000000e+00 3.0000000e+00 3.2000000e+00 2.6000000e+00 1.9000000e+00 2.8000000e+00 2.8000000e+00 2.8000000e+00 2.9000000e+00 1.7000000e+00 2.7000000e+00 4.6000000e+00 3.7000000e+00 4.5000000e+00 4.2000000e+00 4.4000000e+00 5.2000000e+00 3.1000000e+00 4.9000000e+00 4.4000000e+00 4.7000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 5.3000000e+00 5.5000000e+00 3.6000000e+00 4.3000000e+00 3.5000000e+00 5.3000000e+00 3.5000000e+00 4.3000000e+00 4.6000000e+00 3.4000000e+00 3.5000000e+00 4.2000000e+00 4.4000000e+00 4.7000000e+00 5.0000000e+00 4.2000000e+00 3.7000000e+00 4.2000000e+00 4.7000000e+00 4.2000000e+00 4.1000000e+00 3.4000000e+00 4.0000000e+00 4.2000000e+00 3.7000000e+00 3.7000000e+00 4.5000000e+00 4.3000000e+00 3.8000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.7000000e+00 3.0000000e-01 6.0000000e-01 0.0000000e+00 5.0000000e-01 3.0000000e-01 4.0000000e-01 8.0000000e-01 5.0000000e-01 5.0000000e-01 7.0000000e-01 2.0000000e-01 7.0000000e-01 3.0000000e-01 6.0000000e-01 2.0000000e-01 3.2000000e+00 3.0000000e+00 3.4000000e+00 2.5000000e+00 3.1000000e+00 3.0000000e+00 3.2000000e+00 1.8000000e+00 3.1000000e+00 2.4000000e+00 2.0000000e+00 2.7000000e+00 2.5000000e+00 3.2000000e+00 2.1000000e+00 2.9000000e+00 3.0000000e+00 2.6000000e+00 3.0000000e+00 2.4000000e+00 3.3000000e+00 2.5000000e+00 3.4000000e+00 3.2000000e+00 2.8000000e+00 2.9000000e+00 3.3000000e+00 3.5000000e+00 3.0000000e+00 2.0000000e+00 2.3000000e+00 2.2000000e+00 2.4000000e+00 3.6000000e+00 3.0000000e+00 3.0000000e+00 3.2000000e+00 2.9000000e+00 2.6000000e+00 2.5000000e+00 2.9000000e+00 3.1000000e+00 2.5000000e+00 1.8000000e+00 2.7000000e+00 2.7000000e+00 2.7000000e+00 2.8000000e+00 1.5000000e+00 2.6000000e+00 4.5000000e+00 3.6000000e+00 4.4000000e+00 4.1000000e+00 4.3000000e+00 5.1000000e+00 3.0000000e+00 4.8000000e+00 4.3000000e+00 4.6000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 5.2000000e+00 5.4000000e+00 3.5000000e+00 4.2000000e+00 3.4000000e+00 5.2000000e+00 3.4000000e+00 4.2000000e+00 4.5000000e+00 3.3000000e+00 3.4000000e+00 4.1000000e+00 4.3000000e+00 4.6000000e+00 4.9000000e+00 4.1000000e+00 3.6000000e+00 4.1000000e+00 4.6000000e+00 4.1000000e+00 4.0000000e+00 3.3000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.6000000e+00 4.4000000e+00 4.2000000e+00 3.7000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 3.6000000e+00 5.0000000e-01 3.0000000e-01 6.0000000e-01 3.0000000e-01 3.0000000e-01 9.0000000e-01 6.0000000e-01 4.0000000e-01 7.0000000e-01 2.0000000e-01 6.0000000e-01 4.0000000e-01 5.0000000e-01 2.0000000e-01 3.5000000e+00 3.3000000e+00 3.7000000e+00 2.8000000e+00 3.4000000e+00 3.3000000e+00 3.5000000e+00 2.1000000e+00 3.4000000e+00 2.7000000e+00 2.3000000e+00 3.0000000e+00 2.8000000e+00 3.5000000e+00 2.4000000e+00 3.2000000e+00 3.3000000e+00 2.9000000e+00 3.3000000e+00 2.7000000e+00 3.6000000e+00 2.8000000e+00 3.7000000e+00 3.5000000e+00 3.1000000e+00 3.2000000e+00 3.6000000e+00 3.8000000e+00 3.3000000e+00 2.3000000e+00 2.6000000e+00 2.5000000e+00 2.7000000e+00 3.9000000e+00 3.3000000e+00 3.3000000e+00 3.5000000e+00 3.2000000e+00 2.9000000e+00 2.8000000e+00 3.2000000e+00 3.4000000e+00 2.8000000e+00 2.1000000e+00 3.0000000e+00 3.0000000e+00 3.0000000e+00 3.1000000e+00 1.8000000e+00 2.9000000e+00 4.8000000e+00 3.9000000e+00 4.7000000e+00 4.4000000e+00 4.6000000e+00 5.4000000e+00 3.3000000e+00 5.1000000e+00 4.6000000e+00 4.9000000e+00 3.9000000e+00 4.1000000e+00 4.3000000e+00 3.8000000e+00 3.9000000e+00 4.1000000e+00 4.3000000e+00 5.5000000e+00 5.7000000e+00 3.8000000e+00 4.5000000e+00 3.7000000e+00 5.5000000e+00 3.7000000e+00 4.5000000e+00 4.8000000e+00 3.6000000e+00 3.7000000e+00 4.4000000e+00 4.6000000e+00 4.9000000e+00 5.2000000e+00 4.4000000e+00 3.9000000e+00 4.4000000e+00 4.9000000e+00 4.4000000e+00 4.3000000e+00 3.6000000e+00 4.2000000e+00 4.4000000e+00 3.9000000e+00 3.9000000e+00 4.7000000e+00 4.5000000e+00 4.0000000e+00 3.8000000e+00 4.0000000e+00 4.2000000e+00 3.9000000e+00 6.0000000e-01 1.1000000e+00 4.0000000e-01 5.0000000e-01 1.2000000e+00 1.1000000e+00 5.0000000e-01 6.0000000e-01 7.0000000e-01 4.0000000e-01 9.0000000e-01 2.0000000e-01 5.0000000e-01 3.4000000e+00 3.2000000e+00 3.6000000e+00 2.7000000e+00 3.3000000e+00 3.2000000e+00 3.4000000e+00 2.0000000e+00 3.3000000e+00 2.6000000e+00 2.2000000e+00 2.9000000e+00 2.7000000e+00 3.4000000e+00 2.3000000e+00 3.1000000e+00 3.2000000e+00 2.8000000e+00 3.2000000e+00 2.6000000e+00 3.5000000e+00 2.7000000e+00 3.6000000e+00 3.4000000e+00 3.0000000e+00 3.1000000e+00 3.5000000e+00 3.7000000e+00 3.2000000e+00 2.2000000e+00 2.5000000e+00 2.4000000e+00 2.6000000e+00 3.8000000e+00 3.2000000e+00 3.2000000e+00 3.4000000e+00 3.1000000e+00 2.8000000e+00 2.7000000e+00 3.1000000e+00 3.3000000e+00 2.7000000e+00 2.0000000e+00 2.9000000e+00 2.9000000e+00 2.9000000e+00 3.0000000e+00 1.7000000e+00 2.8000000e+00 4.7000000e+00 3.8000000e+00 4.6000000e+00 4.3000000e+00 4.5000000e+00 5.3000000e+00 3.2000000e+00 5.0000000e+00 4.5000000e+00 4.8000000e+00 3.8000000e+00 4.0000000e+00 4.2000000e+00 3.7000000e+00 3.8000000e+00 4.0000000e+00 4.2000000e+00 5.4000000e+00 5.6000000e+00 3.7000000e+00 4.4000000e+00 3.6000000e+00 5.4000000e+00 3.6000000e+00 4.4000000e+00 4.7000000e+00 3.5000000e+00 3.6000000e+00 4.3000000e+00 4.5000000e+00 4.8000000e+00 5.1000000e+00 4.3000000e+00 3.8000000e+00 4.3000000e+00 4.8000000e+00 4.3000000e+00 4.2000000e+00 3.5000000e+00 4.1000000e+00 4.3000000e+00 3.8000000e+00 3.8000000e+00 4.6000000e+00 4.4000000e+00 3.9000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 3.8000000e+00 5.0000000e-01 3.0000000e-01 4.0000000e-01 8.0000000e-01 5.0000000e-01 5.0000000e-01 7.0000000e-01 2.0000000e-01 7.0000000e-01 3.0000000e-01 6.0000000e-01 2.0000000e-01 3.2000000e+00 3.0000000e+00 3.4000000e+00 2.5000000e+00 3.1000000e+00 3.0000000e+00 3.2000000e+00 1.8000000e+00 3.1000000e+00 2.4000000e+00 2.0000000e+00 2.7000000e+00 2.5000000e+00 3.2000000e+00 2.1000000e+00 2.9000000e+00 3.0000000e+00 2.6000000e+00 3.0000000e+00 2.4000000e+00 3.3000000e+00 2.5000000e+00 3.4000000e+00 3.2000000e+00 2.8000000e+00 2.9000000e+00 3.3000000e+00 3.5000000e+00 3.0000000e+00 2.0000000e+00 2.3000000e+00 2.2000000e+00 2.4000000e+00 3.6000000e+00 3.0000000e+00 3.0000000e+00 3.2000000e+00 2.9000000e+00 2.6000000e+00 2.5000000e+00 2.9000000e+00 3.1000000e+00 2.5000000e+00 1.8000000e+00 2.7000000e+00 2.7000000e+00 2.7000000e+00 2.8000000e+00 1.5000000e+00 2.6000000e+00 4.5000000e+00 3.6000000e+00 4.4000000e+00 4.1000000e+00 4.3000000e+00 5.1000000e+00 3.0000000e+00 4.8000000e+00 4.3000000e+00 4.6000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 5.2000000e+00 5.4000000e+00 3.5000000e+00 4.2000000e+00 3.4000000e+00 5.2000000e+00 3.4000000e+00 4.2000000e+00 4.5000000e+00 3.3000000e+00 3.4000000e+00 4.1000000e+00 4.3000000e+00 4.6000000e+00 4.9000000e+00 4.1000000e+00 3.6000000e+00 4.1000000e+00 4.6000000e+00 4.1000000e+00 4.0000000e+00 3.3000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.6000000e+00 4.4000000e+00 4.2000000e+00 3.7000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 3.6000000e+00 7.0000000e-01 6.0000000e-01 7.0000000e-01 2.0000000e-01 6.0000000e-01 8.0000000e-01 4.0000000e-01 8.0000000e-01 2.0000000e-01 9.0000000e-01 6.0000000e-01 3.4000000e+00 3.2000000e+00 3.6000000e+00 2.7000000e+00 3.3000000e+00 3.2000000e+00 3.4000000e+00 2.0000000e+00 3.3000000e+00 2.6000000e+00 2.2000000e+00 2.9000000e+00 2.7000000e+00 3.4000000e+00 2.3000000e+00 3.1000000e+00 3.2000000e+00 2.8000000e+00 3.2000000e+00 2.6000000e+00 3.5000000e+00 2.7000000e+00 3.6000000e+00 3.4000000e+00 3.0000000e+00 3.1000000e+00 3.5000000e+00 3.7000000e+00 3.2000000e+00 2.2000000e+00 2.5000000e+00 2.4000000e+00 2.6000000e+00 3.8000000e+00 3.2000000e+00 3.2000000e+00 3.4000000e+00 3.1000000e+00 2.8000000e+00 2.7000000e+00 3.1000000e+00 3.3000000e+00 2.7000000e+00 2.0000000e+00 2.9000000e+00 2.9000000e+00 2.9000000e+00 3.0000000e+00 1.7000000e+00 2.8000000e+00 4.7000000e+00 3.8000000e+00 4.6000000e+00 4.3000000e+00 4.5000000e+00 5.3000000e+00 3.2000000e+00 5.0000000e+00 4.5000000e+00 4.8000000e+00 3.8000000e+00 4.0000000e+00 4.2000000e+00 3.7000000e+00 3.8000000e+00 4.0000000e+00 4.2000000e+00 5.4000000e+00 5.6000000e+00 3.7000000e+00 4.4000000e+00 3.6000000e+00 5.4000000e+00 3.6000000e+00 4.4000000e+00 4.7000000e+00 3.5000000e+00 3.6000000e+00 4.3000000e+00 4.5000000e+00 4.8000000e+00 5.1000000e+00 4.3000000e+00 3.8000000e+00 4.3000000e+00 4.8000000e+00 4.3000000e+00 4.2000000e+00 3.5000000e+00 4.1000000e+00 4.3000000e+00 3.8000000e+00 3.8000000e+00 4.6000000e+00 4.4000000e+00 3.9000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 3.8000000e+00 2.0000000e-01 1.1000000e+00 7.0000000e-01 4.0000000e-01 4.0000000e-01 4.0000000e-01 4.0000000e-01 5.0000000e-01 3.0000000e-01 1.0000000e-01 3.2000000e+00 3.0000000e+00 3.4000000e+00 2.5000000e+00 3.1000000e+00 3.0000000e+00 3.2000000e+00 1.8000000e+00 3.1000000e+00 2.4000000e+00 2.0000000e+00 2.7000000e+00 2.5000000e+00 3.2000000e+00 2.1000000e+00 2.9000000e+00 3.0000000e+00 2.6000000e+00 3.0000000e+00 2.4000000e+00 3.3000000e+00 2.5000000e+00 3.4000000e+00 3.2000000e+00 2.8000000e+00 2.9000000e+00 3.3000000e+00 3.5000000e+00 3.0000000e+00 2.0000000e+00 2.3000000e+00 2.2000000e+00 2.4000000e+00 3.6000000e+00 3.0000000e+00 3.0000000e+00 3.2000000e+00 2.9000000e+00 2.6000000e+00 2.5000000e+00 2.9000000e+00 3.1000000e+00 2.5000000e+00 1.8000000e+00 2.7000000e+00 2.7000000e+00 2.7000000e+00 2.8000000e+00 1.5000000e+00 2.6000000e+00 4.5000000e+00 3.6000000e+00 4.4000000e+00 4.1000000e+00 4.3000000e+00 5.1000000e+00 3.0000000e+00 4.8000000e+00 4.3000000e+00 4.6000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 5.2000000e+00 5.4000000e+00 3.5000000e+00 4.2000000e+00 3.4000000e+00 5.2000000e+00 3.4000000e+00 4.2000000e+00 4.5000000e+00 3.3000000e+00 3.4000000e+00 4.1000000e+00 4.3000000e+00 4.6000000e+00 4.9000000e+00 4.1000000e+00 3.6000000e+00 4.1000000e+00 4.6000000e+00 4.1000000e+00 4.0000000e+00 3.3000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.6000000e+00 4.4000000e+00 4.2000000e+00 3.7000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 3.6000000e+00 1.2000000e+00 6.0000000e-01 3.0000000e-01 6.0000000e-01 5.0000000e-01 3.0000000e-01 4.0000000e-01 3.0000000e-01 2.0000000e-01 3.4000000e+00 3.2000000e+00 3.6000000e+00 2.7000000e+00 3.3000000e+00 3.2000000e+00 3.4000000e+00 2.0000000e+00 3.3000000e+00 2.6000000e+00 2.2000000e+00 2.9000000e+00 2.7000000e+00 3.4000000e+00 2.3000000e+00 3.1000000e+00 3.2000000e+00 2.8000000e+00 3.2000000e+00 2.6000000e+00 3.5000000e+00 2.7000000e+00 3.6000000e+00 3.4000000e+00 3.0000000e+00 3.1000000e+00 3.5000000e+00 3.7000000e+00 3.2000000e+00 2.2000000e+00 2.5000000e+00 2.4000000e+00 2.6000000e+00 3.8000000e+00 3.2000000e+00 3.2000000e+00 3.4000000e+00 3.1000000e+00 2.8000000e+00 2.7000000e+00 3.1000000e+00 3.3000000e+00 2.7000000e+00 2.0000000e+00 2.9000000e+00 2.9000000e+00 2.9000000e+00 3.0000000e+00 1.7000000e+00 2.8000000e+00 4.7000000e+00 3.8000000e+00 4.6000000e+00 4.3000000e+00 4.5000000e+00 5.3000000e+00 3.2000000e+00 5.0000000e+00 4.5000000e+00 4.8000000e+00 3.8000000e+00 4.0000000e+00 4.2000000e+00 3.7000000e+00 3.8000000e+00 4.0000000e+00 4.2000000e+00 5.4000000e+00 5.6000000e+00 3.7000000e+00 4.4000000e+00 3.6000000e+00 5.4000000e+00 3.6000000e+00 4.4000000e+00 4.7000000e+00 3.5000000e+00 3.6000000e+00 4.3000000e+00 4.5000000e+00 4.8000000e+00 5.1000000e+00 4.3000000e+00 3.8000000e+00 4.3000000e+00 4.8000000e+00 4.3000000e+00 4.2000000e+00 3.5000000e+00 4.1000000e+00 4.3000000e+00 3.8000000e+00 3.8000000e+00 4.6000000e+00 4.4000000e+00 3.9000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 3.8000000e+00 9.0000000e-01 1.2000000e+00 1.5000000e+00 7.0000000e-01 1.5000000e+00 9.0000000e-01 1.4000000e+00 1.0000000e+00 3.4000000e+00 3.2000000e+00 3.6000000e+00 2.7000000e+00 3.3000000e+00 3.2000000e+00 3.4000000e+00 2.0000000e+00 3.3000000e+00 2.6000000e+00 2.2000000e+00 2.9000000e+00 2.7000000e+00 3.4000000e+00 2.3000000e+00 3.1000000e+00 3.2000000e+00 2.8000000e+00 3.2000000e+00 2.6000000e+00 3.5000000e+00 2.7000000e+00 3.6000000e+00 3.4000000e+00 3.0000000e+00 3.1000000e+00 3.5000000e+00 3.7000000e+00 3.2000000e+00 2.2000000e+00 2.5000000e+00 2.4000000e+00 2.6000000e+00 3.8000000e+00 3.2000000e+00 3.2000000e+00 3.4000000e+00 3.1000000e+00 2.8000000e+00 2.7000000e+00 3.1000000e+00 3.3000000e+00 2.7000000e+00 2.0000000e+00 2.9000000e+00 2.9000000e+00 2.9000000e+00 3.0000000e+00 1.7000000e+00 2.8000000e+00 4.7000000e+00 3.8000000e+00 4.6000000e+00 4.3000000e+00 4.5000000e+00 5.3000000e+00 3.2000000e+00 5.0000000e+00 4.5000000e+00 4.8000000e+00 3.8000000e+00 4.0000000e+00 4.2000000e+00 3.7000000e+00 3.8000000e+00 4.0000000e+00 4.2000000e+00 5.4000000e+00 5.6000000e+00 3.7000000e+00 4.4000000e+00 3.6000000e+00 5.4000000e+00 3.6000000e+00 4.4000000e+00 4.7000000e+00 3.5000000e+00 3.6000000e+00 4.3000000e+00 4.5000000e+00 4.8000000e+00 5.1000000e+00 4.3000000e+00 3.8000000e+00 4.3000000e+00 4.8000000e+00 4.3000000e+00 4.2000000e+00 3.5000000e+00 4.1000000e+00 4.3000000e+00 3.8000000e+00 3.8000000e+00 4.6000000e+00 4.4000000e+00 3.9000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 3.8000000e+00 6.0000000e-01 7.0000000e-01 4.0000000e-01 7.0000000e-01 2.0000000e-01 9.0000000e-01 6.0000000e-01 3.4000000e+00 3.2000000e+00 3.6000000e+00 2.7000000e+00 3.3000000e+00 3.2000000e+00 3.4000000e+00 2.0000000e+00 3.3000000e+00 2.6000000e+00 2.2000000e+00 2.9000000e+00 2.7000000e+00 3.4000000e+00 2.3000000e+00 3.1000000e+00 3.2000000e+00 2.8000000e+00 3.2000000e+00 2.6000000e+00 3.5000000e+00 2.7000000e+00 3.6000000e+00 3.4000000e+00 3.0000000e+00 3.1000000e+00 3.5000000e+00 3.7000000e+00 3.2000000e+00 2.2000000e+00 2.5000000e+00 2.4000000e+00 2.6000000e+00 3.8000000e+00 3.2000000e+00 3.2000000e+00 3.4000000e+00 3.1000000e+00 2.8000000e+00 2.7000000e+00 3.1000000e+00 3.3000000e+00 2.7000000e+00 2.0000000e+00 2.9000000e+00 2.9000000e+00 2.9000000e+00 3.0000000e+00 1.7000000e+00 2.8000000e+00 4.7000000e+00 3.8000000e+00 4.6000000e+00 4.3000000e+00 4.5000000e+00 5.3000000e+00 3.2000000e+00 5.0000000e+00 4.5000000e+00 4.8000000e+00 3.8000000e+00 4.0000000e+00 4.2000000e+00 3.7000000e+00 3.8000000e+00 4.0000000e+00 4.2000000e+00 5.4000000e+00 5.6000000e+00 3.7000000e+00 4.4000000e+00 3.6000000e+00 5.4000000e+00 3.6000000e+00 4.4000000e+00 4.7000000e+00 3.5000000e+00 3.6000000e+00 4.3000000e+00 4.5000000e+00 4.8000000e+00 5.1000000e+00 4.3000000e+00 3.8000000e+00 4.3000000e+00 4.8000000e+00 4.3000000e+00 4.2000000e+00 3.5000000e+00 4.1000000e+00 4.3000000e+00 3.8000000e+00 3.8000000e+00 4.6000000e+00 4.4000000e+00 3.9000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 3.8000000e+00 3.0000000e-01 5.0000000e-01 4.0000000e-01 4.0000000e-01 4.0000000e-01 4.0000000e-01 3.1000000e+00 2.9000000e+00 3.3000000e+00 2.4000000e+00 3.0000000e+00 2.9000000e+00 3.1000000e+00 1.7000000e+00 3.0000000e+00 2.3000000e+00 1.9000000e+00 2.6000000e+00 2.4000000e+00 3.1000000e+00 2.0000000e+00 2.8000000e+00 2.9000000e+00 2.5000000e+00 2.9000000e+00 2.3000000e+00 3.2000000e+00 2.4000000e+00 3.3000000e+00 3.1000000e+00 2.7000000e+00 2.8000000e+00 3.2000000e+00 3.4000000e+00 2.9000000e+00 1.9000000e+00 2.2000000e+00 2.1000000e+00 2.3000000e+00 3.5000000e+00 2.9000000e+00 2.9000000e+00 3.1000000e+00 2.8000000e+00 2.5000000e+00 2.4000000e+00 2.8000000e+00 3.0000000e+00 2.4000000e+00 1.7000000e+00 2.6000000e+00 2.6000000e+00 2.6000000e+00 2.7000000e+00 1.4000000e+00 2.5000000e+00 4.4000000e+00 3.5000000e+00 4.3000000e+00 4.0000000e+00 4.2000000e+00 5.0000000e+00 2.9000000e+00 4.7000000e+00 4.2000000e+00 4.5000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 3.4000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 5.1000000e+00 5.3000000e+00 3.4000000e+00 4.1000000e+00 3.3000000e+00 5.1000000e+00 3.3000000e+00 4.1000000e+00 4.4000000e+00 3.2000000e+00 3.3000000e+00 4.0000000e+00 4.2000000e+00 4.5000000e+00 4.8000000e+00 4.0000000e+00 3.5000000e+00 4.0000000e+00 4.5000000e+00 4.0000000e+00 3.9000000e+00 3.2000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 3.5000000e+00 4.3000000e+00 4.1000000e+00 3.6000000e+00 3.4000000e+00 3.6000000e+00 3.8000000e+00 3.5000000e+00 8.0000000e-01 3.0000000e-01 6.0000000e-01 4.0000000e-01 5.0000000e-01 2.8000000e+00 2.6000000e+00 3.0000000e+00 2.1000000e+00 2.7000000e+00 2.6000000e+00 2.8000000e+00 1.4000000e+00 2.7000000e+00 2.0000000e+00 1.8000000e+00 2.3000000e+00 2.1000000e+00 2.8000000e+00 1.7000000e+00 2.5000000e+00 2.6000000e+00 2.2000000e+00 2.6000000e+00 2.0000000e+00 2.9000000e+00 2.1000000e+00 3.0000000e+00 2.8000000e+00 2.4000000e+00 2.5000000e+00 2.9000000e+00 3.1000000e+00 2.6000000e+00 1.6000000e+00 1.9000000e+00 1.8000000e+00 2.0000000e+00 3.2000000e+00 2.6000000e+00 2.6000000e+00 2.8000000e+00 2.5000000e+00 2.2000000e+00 2.1000000e+00 2.5000000e+00 2.7000000e+00 2.1000000e+00 1.5000000e+00 2.3000000e+00 2.3000000e+00 2.3000000e+00 2.4000000e+00 1.3000000e+00 2.2000000e+00 4.1000000e+00 3.2000000e+00 4.0000000e+00 3.7000000e+00 3.9000000e+00 4.7000000e+00 2.6000000e+00 4.4000000e+00 3.9000000e+00 4.2000000e+00 3.2000000e+00 3.4000000e+00 3.6000000e+00 3.1000000e+00 3.2000000e+00 3.4000000e+00 3.6000000e+00 4.8000000e+00 5.0000000e+00 3.1000000e+00 3.8000000e+00 3.0000000e+00 4.8000000e+00 3.0000000e+00 3.8000000e+00 4.1000000e+00 2.9000000e+00 3.0000000e+00 3.7000000e+00 3.9000000e+00 4.2000000e+00 4.5000000e+00 3.7000000e+00 3.2000000e+00 3.7000000e+00 4.2000000e+00 3.7000000e+00 3.6000000e+00 2.9000000e+00 3.5000000e+00 3.7000000e+00 3.2000000e+00 3.2000000e+00 4.0000000e+00 3.8000000e+00 3.3000000e+00 3.1000000e+00 3.3000000e+00 3.5000000e+00 3.2000000e+00 8.0000000e-01 2.0000000e-01 7.0000000e-01 3.0000000e-01 3.3000000e+00 3.1000000e+00 3.5000000e+00 2.6000000e+00 3.2000000e+00 3.1000000e+00 3.3000000e+00 1.9000000e+00 3.2000000e+00 2.5000000e+00 2.1000000e+00 2.8000000e+00 2.6000000e+00 3.3000000e+00 2.2000000e+00 3.0000000e+00 3.1000000e+00 2.7000000e+00 3.1000000e+00 2.5000000e+00 3.4000000e+00 2.6000000e+00 3.5000000e+00 3.3000000e+00 2.9000000e+00 3.0000000e+00 3.4000000e+00 3.6000000e+00 3.1000000e+00 2.1000000e+00 2.4000000e+00 2.3000000e+00 2.5000000e+00 3.7000000e+00 3.1000000e+00 3.1000000e+00 3.3000000e+00 3.0000000e+00 2.7000000e+00 2.6000000e+00 3.0000000e+00 3.2000000e+00 2.6000000e+00 1.9000000e+00 2.8000000e+00 2.8000000e+00 2.8000000e+00 2.9000000e+00 1.6000000e+00 2.7000000e+00 4.6000000e+00 3.7000000e+00 4.5000000e+00 4.2000000e+00 4.4000000e+00 5.2000000e+00 3.1000000e+00 4.9000000e+00 4.4000000e+00 4.7000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 5.3000000e+00 5.5000000e+00 3.6000000e+00 4.3000000e+00 3.5000000e+00 5.3000000e+00 3.5000000e+00 4.3000000e+00 4.6000000e+00 3.4000000e+00 3.5000000e+00 4.2000000e+00 4.4000000e+00 4.7000000e+00 5.0000000e+00 4.2000000e+00 3.7000000e+00 4.2000000e+00 4.7000000e+00 4.2000000e+00 4.1000000e+00 3.4000000e+00 4.0000000e+00 4.2000000e+00 3.7000000e+00 3.7000000e+00 4.5000000e+00 4.3000000e+00 3.8000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.7000000e+00 6.0000000e-01 2.0000000e-01 5.0000000e-01 3.1000000e+00 2.9000000e+00 3.3000000e+00 2.4000000e+00 3.0000000e+00 2.9000000e+00 3.1000000e+00 1.7000000e+00 3.0000000e+00 2.3000000e+00 1.9000000e+00 2.6000000e+00 2.4000000e+00 3.1000000e+00 2.0000000e+00 2.8000000e+00 2.9000000e+00 2.5000000e+00 2.9000000e+00 2.3000000e+00 3.2000000e+00 2.4000000e+00 3.3000000e+00 3.1000000e+00 2.7000000e+00 2.8000000e+00 3.2000000e+00 3.4000000e+00 2.9000000e+00 1.9000000e+00 2.2000000e+00 2.1000000e+00 2.3000000e+00 3.5000000e+00 2.9000000e+00 2.9000000e+00 3.1000000e+00 2.8000000e+00 2.5000000e+00 2.4000000e+00 2.8000000e+00 3.0000000e+00 2.4000000e+00 1.7000000e+00 2.6000000e+00 2.6000000e+00 2.6000000e+00 2.7000000e+00 1.4000000e+00 2.5000000e+00 4.4000000e+00 3.5000000e+00 4.3000000e+00 4.0000000e+00 4.2000000e+00 5.0000000e+00 2.9000000e+00 4.7000000e+00 4.2000000e+00 4.5000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 3.4000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 5.1000000e+00 5.3000000e+00 3.4000000e+00 4.1000000e+00 3.3000000e+00 5.1000000e+00 3.3000000e+00 4.1000000e+00 4.4000000e+00 3.2000000e+00 3.3000000e+00 4.0000000e+00 4.2000000e+00 4.5000000e+00 4.8000000e+00 4.0000000e+00 3.5000000e+00 4.0000000e+00 4.5000000e+00 4.0000000e+00 3.9000000e+00 3.2000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 3.5000000e+00 4.3000000e+00 4.1000000e+00 3.6000000e+00 3.4000000e+00 3.6000000e+00 3.8000000e+00 3.5000000e+00 7.0000000e-01 4.0000000e-01 3.3000000e+00 3.1000000e+00 3.5000000e+00 2.6000000e+00 3.2000000e+00 3.1000000e+00 3.3000000e+00 1.9000000e+00 3.2000000e+00 2.5000000e+00 2.1000000e+00 2.8000000e+00 2.6000000e+00 3.3000000e+00 2.2000000e+00 3.0000000e+00 3.1000000e+00 2.7000000e+00 3.1000000e+00 2.5000000e+00 3.4000000e+00 2.6000000e+00 3.5000000e+00 3.3000000e+00 2.9000000e+00 3.0000000e+00 3.4000000e+00 3.6000000e+00 3.1000000e+00 2.1000000e+00 2.4000000e+00 2.3000000e+00 2.5000000e+00 3.7000000e+00 3.1000000e+00 3.1000000e+00 3.3000000e+00 3.0000000e+00 2.7000000e+00 2.6000000e+00 3.0000000e+00 3.2000000e+00 2.6000000e+00 1.9000000e+00 2.8000000e+00 2.8000000e+00 2.8000000e+00 2.9000000e+00 1.6000000e+00 2.7000000e+00 4.6000000e+00 3.7000000e+00 4.5000000e+00 4.2000000e+00 4.4000000e+00 5.2000000e+00 3.1000000e+00 4.9000000e+00 4.4000000e+00 4.7000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 5.3000000e+00 5.5000000e+00 3.6000000e+00 4.3000000e+00 3.5000000e+00 5.3000000e+00 3.5000000e+00 4.3000000e+00 4.6000000e+00 3.4000000e+00 3.5000000e+00 4.2000000e+00 4.4000000e+00 4.7000000e+00 5.0000000e+00 4.2000000e+00 3.7000000e+00 4.2000000e+00 4.7000000e+00 4.2000000e+00 4.1000000e+00 3.4000000e+00 4.0000000e+00 4.2000000e+00 3.7000000e+00 3.7000000e+00 4.5000000e+00 4.3000000e+00 3.8000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.7000000e+00 4.0000000e-01 3.2000000e+00 3.0000000e+00 3.4000000e+00 2.5000000e+00 3.1000000e+00 3.0000000e+00 3.2000000e+00 1.8000000e+00 3.1000000e+00 2.4000000e+00 2.0000000e+00 2.7000000e+00 2.5000000e+00 3.2000000e+00 2.1000000e+00 2.9000000e+00 3.0000000e+00 2.6000000e+00 3.0000000e+00 2.4000000e+00 3.3000000e+00 2.5000000e+00 3.4000000e+00 3.2000000e+00 2.8000000e+00 2.9000000e+00 3.3000000e+00 3.5000000e+00 3.0000000e+00 2.0000000e+00 2.3000000e+00 2.2000000e+00 2.4000000e+00 3.6000000e+00 3.0000000e+00 3.0000000e+00 3.2000000e+00 2.9000000e+00 2.6000000e+00 2.5000000e+00 2.9000000e+00 3.1000000e+00 2.5000000e+00 1.8000000e+00 2.7000000e+00 2.7000000e+00 2.7000000e+00 2.8000000e+00 1.5000000e+00 2.6000000e+00 4.5000000e+00 3.6000000e+00 4.4000000e+00 4.1000000e+00 4.3000000e+00 5.1000000e+00 3.0000000e+00 4.8000000e+00 4.3000000e+00 4.6000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 5.2000000e+00 5.4000000e+00 3.5000000e+00 4.2000000e+00 3.4000000e+00 5.2000000e+00 3.4000000e+00 4.2000000e+00 4.5000000e+00 3.3000000e+00 3.4000000e+00 4.1000000e+00 4.3000000e+00 4.6000000e+00 4.9000000e+00 4.1000000e+00 3.6000000e+00 4.1000000e+00 4.6000000e+00 4.1000000e+00 4.0000000e+00 3.3000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.6000000e+00 4.4000000e+00 4.2000000e+00 3.7000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 3.6000000e+00 3.3000000e+00 3.1000000e+00 3.5000000e+00 2.6000000e+00 3.2000000e+00 3.1000000e+00 3.3000000e+00 1.9000000e+00 3.2000000e+00 2.5000000e+00 2.1000000e+00 2.8000000e+00 2.6000000e+00 3.3000000e+00 2.2000000e+00 3.0000000e+00 3.1000000e+00 2.7000000e+00 3.1000000e+00 2.5000000e+00 3.4000000e+00 2.6000000e+00 3.5000000e+00 3.3000000e+00 2.9000000e+00 3.0000000e+00 3.4000000e+00 3.6000000e+00 3.1000000e+00 2.1000000e+00 2.4000000e+00 2.3000000e+00 2.5000000e+00 3.7000000e+00 3.1000000e+00 3.1000000e+00 3.3000000e+00 3.0000000e+00 2.7000000e+00 2.6000000e+00 3.0000000e+00 3.2000000e+00 2.6000000e+00 1.9000000e+00 2.8000000e+00 2.8000000e+00 2.8000000e+00 2.9000000e+00 1.6000000e+00 2.7000000e+00 4.6000000e+00 3.7000000e+00 4.5000000e+00 4.2000000e+00 4.4000000e+00 5.2000000e+00 3.1000000e+00 4.9000000e+00 4.4000000e+00 4.7000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 5.3000000e+00 5.5000000e+00 3.6000000e+00 4.3000000e+00 3.5000000e+00 5.3000000e+00 3.5000000e+00 4.3000000e+00 4.6000000e+00 3.4000000e+00 3.5000000e+00 4.2000000e+00 4.4000000e+00 4.7000000e+00 5.0000000e+00 4.2000000e+00 3.7000000e+00 4.2000000e+00 4.7000000e+00 4.2000000e+00 4.1000000e+00 3.4000000e+00 4.0000000e+00 4.2000000e+00 3.7000000e+00 3.7000000e+00 4.5000000e+00 4.3000000e+00 3.8000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.7000000e+00 6.0000000e-01 2.0000000e-01 1.5000000e+00 5.0000000e-01 1.3000000e+00 7.0000000e-01 2.1000000e+00 4.0000000e-01 1.8000000e+00 2.0000000e+00 1.1000000e+00 1.0000000e+00 9.0000000e-01 1.4000000e+00 3.0000000e-01 1.4000000e+00 1.2000000e+00 1.0000000e+00 1.4000000e+00 1.1000000e+00 9.0000000e-01 7.0000000e-01 9.0000000e-01 6.0000000e-01 4.0000000e-01 4.0000000e-01 3.0000000e-01 1.0000000e+00 1.3000000e+00 1.5000000e+00 1.5000000e+00 1.2000000e+00 1.0000000e+00 1.6000000e+00 1.0000000e+00 3.0000000e-01 9.0000000e-01 1.4000000e+00 1.5000000e+00 1.5000000e+00 9.0000000e-01 1.2000000e+00 2.0000000e+00 1.4000000e+00 1.3000000e+00 1.3000000e+00 8.0000000e-01 1.9000000e+00 1.3000000e+00 1.3000000e+00 1.2000000e+00 1.2000000e+00 9.0000000e-01 1.1000000e+00 1.9000000e+00 2.1000000e+00 1.6000000e+00 1.1000000e+00 1.4000000e+00 6.0000000e-01 6.0000000e-01 8.0000000e-01 1.3000000e+00 1.2000000e+00 9.0000000e-01 8.0000000e-01 2.0000000e+00 2.2000000e+00 1.0000000e+00 1.0000000e+00 1.4000000e+00 2.0000000e+00 7.0000000e-01 1.0000000e+00 1.3000000e+00 8.0000000e-01 9.0000000e-01 9.0000000e-01 1.1000000e+00 1.4000000e+00 1.7000000e+00 9.0000000e-01 7.0000000e-01 9.0000000e-01 1.4000000e+00 1.0000000e+00 8.0000000e-01 1.0000000e+00 7.0000000e-01 1.0000000e+00 9.0000000e-01 1.2000000e+00 1.2000000e+00 1.1000000e+00 9.0000000e-01 7.0000000e-01 6.0000000e-01 9.0000000e-01 1.1000000e+00 5.0000000e-01 9.0000000e-01 4.0000000e-01 7.0000000e-01 2.0000000e-01 1.5000000e+00 3.0000000e-01 1.2000000e+00 1.4000000e+00 5.0000000e-01 1.0000000e+00 3.0000000e-01 9.0000000e-01 3.0000000e-01 8.0000000e-01 6.0000000e-01 1.0000000e+00 8.0000000e-01 5.0000000e-01 5.0000000e-01 7.0000000e-01 4.0000000e-01 3.0000000e-01 2.0000000e-01 4.0000000e-01 5.0000000e-01 4.0000000e-01 1.0000000e+00 9.0000000e-01 9.0000000e-01 6.0000000e-01 6.0000000e-01 1.0000000e+00 4.0000000e-01 3.0000000e-01 9.0000000e-01 8.0000000e-01 9.0000000e-01 9.0000000e-01 3.0000000e-01 6.0000000e-01 1.4000000e+00 8.0000000e-01 7.0000000e-01 7.0000000e-01 3.0000000e-01 1.5000000e+00 7.0000000e-01 1.5000000e+00 6.0000000e-01 1.4000000e+00 1.1000000e+00 1.3000000e+00 2.1000000e+00 1.5000000e+00 1.8000000e+00 1.3000000e+00 1.6000000e+00 6.0000000e-01 8.0000000e-01 1.0000000e+00 7.0000000e-01 9.0000000e-01 8.0000000e-01 1.0000000e+00 2.2000000e+00 2.4000000e+00 1.0000000e+00 1.2000000e+00 8.0000000e-01 2.2000000e+00 5.0000000e-01 1.2000000e+00 1.5000000e+00 4.0000000e-01 4.0000000e-01 1.1000000e+00 1.3000000e+00 1.6000000e+00 1.9000000e+00 1.1000000e+00 6.0000000e-01 1.1000000e+00 1.6000000e+00 1.1000000e+00 1.0000000e+00 4.0000000e-01 9.0000000e-01 1.1000000e+00 8.0000000e-01 6.0000000e-01 1.4000000e+00 1.2000000e+00 8.0000000e-01 7.0000000e-01 7.0000000e-01 9.0000000e-01 6.0000000e-01 1.4000000e+00 4.0000000e-01 1.2000000e+00 6.0000000e-01 2.0000000e+00 3.0000000e-01 1.7000000e+00 1.9000000e+00 1.0000000e+00 9.0000000e-01 8.0000000e-01 1.3000000e+00 5.0000000e-01 1.3000000e+00 1.1000000e+00 9.0000000e-01 1.3000000e+00 1.0000000e+00 9.0000000e-01 6.0000000e-01 8.0000000e-01 6.0000000e-01 5.0000000e-01 3.0000000e-01 2.0000000e-01 9.0000000e-01 1.4000000e+00 1.4000000e+00 1.4000000e+00 1.1000000e+00 9.0000000e-01 1.5000000e+00 9.0000000e-01 2.0000000e-01 8.0000000e-01 1.3000000e+00 1.4000000e+00 1.4000000e+00 8.0000000e-01 1.1000000e+00 1.9000000e+00 1.3000000e+00 1.2000000e+00 1.2000000e+00 7.0000000e-01 1.9000000e+00 1.2000000e+00 1.1000000e+00 1.1000000e+00 1.0000000e+00 7.0000000e-01 9.0000000e-01 1.7000000e+00 2.0000000e+00 1.4000000e+00 9.0000000e-01 1.2000000e+00 5.0000000e-01 5.0000000e-01 6.0000000e-01 1.2000000e+00 1.1000000e+00 8.0000000e-01 6.0000000e-01 1.8000000e+00 2.0000000e+00 9.0000000e-01 8.0000000e-01 1.3000000e+00 1.8000000e+00 6.0000000e-01 8.0000000e-01 1.1000000e+00 7.0000000e-01 8.0000000e-01 7.0000000e-01 9.0000000e-01 1.2000000e+00 1.5000000e+00 7.0000000e-01 6.0000000e-01 8.0000000e-01 1.2000000e+00 9.0000000e-01 6.0000000e-01 9.0000000e-01 6.0000000e-01 9.0000000e-01 8.0000000e-01 1.1000000e+00 1.0000000e+00 1.0000000e+00 8.0000000e-01 6.0000000e-01 5.0000000e-01 8.0000000e-01 1.0000000e+00 1.0000000e+00 5.0000000e-01 1.0000000e+00 7.0000000e-01 1.1000000e+00 4.0000000e-01 5.0000000e-01 7.0000000e-01 5.0000000e-01 7.0000000e-01 6.0000000e-01 1.2000000e+00 7.0000000e-01 4.0000000e-01 7.0000000e-01 2.0000000e-01 9.0000000e-01 6.0000000e-01 9.0000000e-01 7.0000000e-01 9.0000000e-01 1.1000000e+00 1.3000000e+00 1.2000000e+00 6.0000000e-01 5.0000000e-01 2.0000000e-01 3.0000000e-01 4.0000000e-01 1.1000000e+00 7.0000000e-01 1.1000000e+00 1.2000000e+00 8.0000000e-01 7.0000000e-01 2.0000000e-01 4.0000000e-01 7.0000000e-01 3.0000000e-01 7.0000000e-01 4.0000000e-01 7.0000000e-01 6.0000000e-01 7.0000000e-01 1.0000000e+00 5.0000000e-01 2.0000000e+00 1.1000000e+00 1.9000000e+00 1.6000000e+00 1.8000000e+00 2.6000000e+00 6.0000000e-01 2.3000000e+00 1.8000000e+00 2.1000000e+00 1.1000000e+00 1.3000000e+00 1.5000000e+00 1.0000000e+00 1.1000000e+00 1.3000000e+00 1.5000000e+00 2.7000000e+00 2.9000000e+00 1.0000000e+00 1.7000000e+00 9.0000000e-01 2.7000000e+00 9.0000000e-01 1.7000000e+00 2.0000000e+00 8.0000000e-01 9.0000000e-01 1.6000000e+00 1.8000000e+00 2.1000000e+00 2.4000000e+00 1.6000000e+00 1.1000000e+00 1.6000000e+00 2.2000000e+00 1.6000000e+00 1.5000000e+00 8.0000000e-01 1.4000000e+00 1.6000000e+00 1.4000000e+00 1.1000000e+00 1.9000000e+00 1.7000000e+00 1.2000000e+00 1.0000000e+00 1.2000000e+00 1.4000000e+00 1.1000000e+00 8.0000000e-01 5.0000000e-01 1.6000000e+00 2.0000000e-01 1.3000000e+00 1.5000000e+00 6.0000000e-01 6.0000000e-01 4.0000000e-01 1.0000000e+00 3.0000000e-01 9.0000000e-01 7.0000000e-01 6.0000000e-01 9.0000000e-01 6.0000000e-01 6.0000000e-01 3.0000000e-01 4.0000000e-01 3.0000000e-01 2.0000000e-01 3.0000000e-01 4.0000000e-01 5.0000000e-01 1.1000000e+00 1.0000000e+00 1.0000000e+00 7.0000000e-01 5.0000000e-01 1.1000000e+00 6.0000000e-01 3.0000000e-01 5.0000000e-01 9.0000000e-01 1.0000000e+00 1.0000000e+00 4.0000000e-01 7.0000000e-01 1.5000000e+00 9.0000000e-01 8.0000000e-01 8.0000000e-01 3.0000000e-01 1.6000000e+00 8.0000000e-01 1.4000000e+00 7.0000000e-01 1.3000000e+00 1.0000000e+00 1.2000000e+00 2.0000000e+00 1.6000000e+00 1.7000000e+00 1.2000000e+00 1.5000000e+00 5.0000000e-01 7.0000000e-01 9.0000000e-01 8.0000000e-01 9.0000000e-01 8.0000000e-01 9.0000000e-01 2.1000000e+00 2.3000000e+00 6.0000000e-01 1.1000000e+00 9.0000000e-01 2.1000000e+00 3.0000000e-01 1.1000000e+00 1.4000000e+00 3.0000000e-01 4.0000000e-01 1.0000000e+00 1.2000000e+00 1.5000000e+00 1.8000000e+00 1.0000000e+00 5.0000000e-01 1.0000000e+00 1.5000000e+00 1.0000000e+00 9.0000000e-01 5.0000000e-01 8.0000000e-01 1.0000000e+00 8.0000000e-01 7.0000000e-01 1.3000000e+00 1.1000000e+00 8.0000000e-01 4.0000000e-01 6.0000000e-01 8.0000000e-01 6.0000000e-01 6.0000000e-01 1.2000000e+00 9.0000000e-01 6.0000000e-01 1.0000000e+00 3.0000000e-01 6.0000000e-01 4.0000000e-01 9.0000000e-01 1.0000000e+00 2.0000000e-01 4.0000000e-01 6.0000000e-01 6.0000000e-01 5.0000000e-01 5.0000000e-01 6.0000000e-01 4.0000000e-01 7.0000000e-01 9.0000000e-01 1.1000000e+00 1.0000000e+00 3.0000000e-01 1.0000000e+00 7.0000000e-01 8.0000000e-01 6.0000000e-01 6.0000000e-01 3.0000000e-01 6.0000000e-01 1.0000000e+00 6.0000000e-01 4.0000000e-01 5.0000000e-01 2.0000000e-01 4.0000000e-01 5.0000000e-01 1.2000000e+00 3.0000000e-01 3.0000000e-01 3.0000000e-01 5.0000000e-01 1.5000000e+00 4.0000000e-01 1.5000000e+00 6.0000000e-01 1.4000000e+00 1.1000000e+00 1.3000000e+00 2.1000000e+00 8.0000000e-01 1.8000000e+00 1.3000000e+00 1.6000000e+00 8.0000000e-01 8.0000000e-01 1.1000000e+00 7.0000000e-01 1.1000000e+00 1.0000000e+00 1.0000000e+00 2.2000000e+00 2.4000000e+00 6.0000000e-01 1.2000000e+00 7.0000000e-01 2.2000000e+00 6.0000000e-01 1.2000000e+00 1.5000000e+00 5.0000000e-01 5.0000000e-01 1.1000000e+00 1.5000000e+00 1.7000000e+00 2.2000000e+00 1.1000000e+00 6.0000000e-01 1.1000000e+00 2.0000000e+00 1.1000000e+00 1.0000000e+00 5.0000000e-01 1.2000000e+00 1.1000000e+00 1.2000000e+00 6.0000000e-01 1.4000000e+00 1.2000000e+00 1.0000000e+00 6.0000000e-01 8.0000000e-01 1.0000000e+00 6.0000000e-01 1.4000000e+00 4.0000000e-01 1.1000000e+00 1.3000000e+00 5.0000000e-01 1.1000000e+00 4.0000000e-01 1.1000000e+00 4.0000000e-01 7.0000000e-01 6.0000000e-01 1.1000000e+00 8.0000000e-01 4.0000000e-01 7.0000000e-01 8.0000000e-01 5.0000000e-01 4.0000000e-01 3.0000000e-01 5.0000000e-01 4.0000000e-01 4.0000000e-01 1.2000000e+00 9.0000000e-01 1.0000000e+00 8.0000000e-01 6.0000000e-01 9.0000000e-01 3.0000000e-01 4.0000000e-01 1.0000000e+00 7.0000000e-01 8.0000000e-01 8.0000000e-01 3.0000000e-01 7.0000000e-01 1.4000000e+00 7.0000000e-01 6.0000000e-01 6.0000000e-01 4.0000000e-01 1.7000000e+00 6.0000000e-01 1.3000000e+00 6.0000000e-01 1.2000000e+00 9.0000000e-01 1.1000000e+00 1.9000000e+00 1.4000000e+00 1.6000000e+00 1.1000000e+00 1.4000000e+00 4.0000000e-01 6.0000000e-01 8.0000000e-01 8.0000000e-01 8.0000000e-01 7.0000000e-01 8.0000000e-01 2.0000000e+00 2.2000000e+00 1.1000000e+00 1.0000000e+00 7.0000000e-01 2.0000000e+00 6.0000000e-01 1.0000000e+00 1.3000000e+00 5.0000000e-01 3.0000000e-01 9.0000000e-01 1.1000000e+00 1.4000000e+00 1.7000000e+00 9.0000000e-01 5.0000000e-01 9.0000000e-01 1.4000000e+00 9.0000000e-01 8.0000000e-01 3.0000000e-01 7.0000000e-01 9.0000000e-01 7.0000000e-01 6.0000000e-01 1.2000000e+00 1.0000000e+00 7.0000000e-01 8.0000000e-01 5.0000000e-01 7.0000000e-01 4.0000000e-01 1.7000000e+00 6.0000000e-01 4.0000000e-01 1.0000000e+00 1.1000000e+00 1.4000000e+00 7.0000000e-01 1.8000000e+00 1.2000000e+00 9.0000000e-01 1.3000000e+00 7.0000000e-01 1.5000000e+00 1.2000000e+00 1.6000000e+00 1.4000000e+00 1.5000000e+00 1.7000000e+00 1.9000000e+00 1.8000000e+00 1.2000000e+00 8.0000000e-01 6.0000000e-01 6.0000000e-01 9.0000000e-01 1.8000000e+00 1.2000000e+00 1.2000000e+00 1.8000000e+00 1.4000000e+00 8.0000000e-01 7.0000000e-01 1.1000000e+00 1.3000000e+00 9.0000000e-01 1.0000000e-01 9.0000000e-01 9.0000000e-01 9.0000000e-01 1.3000000e+00 3.0000000e-01 8.0000000e-01 2.7000000e+00 1.8000000e+00 2.6000000e+00 2.3000000e+00 2.5000000e+00 3.3000000e+00 1.2000000e+00 3.0000000e+00 2.5000000e+00 2.8000000e+00 1.8000000e+00 2.0000000e+00 2.2000000e+00 1.7000000e+00 1.8000000e+00 2.0000000e+00 2.2000000e+00 3.4000000e+00 3.6000000e+00 1.7000000e+00 2.4000000e+00 1.6000000e+00 3.4000000e+00 1.6000000e+00 2.4000000e+00 2.7000000e+00 1.5000000e+00 1.6000000e+00 2.3000000e+00 2.5000000e+00 2.8000000e+00 3.1000000e+00 2.3000000e+00 1.8000000e+00 2.3000000e+00 2.8000000e+00 2.3000000e+00 2.2000000e+00 1.5000000e+00 2.1000000e+00 2.3000000e+00 2.0000000e+00 1.8000000e+00 2.6000000e+00 2.4000000e+00 1.9000000e+00 1.7000000e+00 1.9000000e+00 2.1000000e+00 1.8000000e+00 1.4000000e+00 1.6000000e+00 7.0000000e-01 7.0000000e-01 5.0000000e-01 1.0000000e+00 2.0000000e-01 1.0000000e+00 8.0000000e-01 7.0000000e-01 1.0000000e+00 7.0000000e-01 6.0000000e-01 4.0000000e-01 5.0000000e-01 3.0000000e-01 2.0000000e-01 2.0000000e-01 4.0000000e-01 6.0000000e-01 1.1000000e+00 1.1000000e+00 1.1000000e+00 8.0000000e-01 6.0000000e-01 1.2000000e+00 6.0000000e-01 2.0000000e-01 6.0000000e-01 1.0000000e+00 1.1000000e+00 1.1000000e+00 5.0000000e-01 8.0000000e-01 1.6000000e+00 1.0000000e+00 9.0000000e-01 9.0000000e-01 4.0000000e-01 1.6000000e+00 9.0000000e-01 1.4000000e+00 8.0000000e-01 1.3000000e+00 1.0000000e+00 1.2000000e+00 2.0000000e+00 1.7000000e+00 1.7000000e+00 1.2000000e+00 1.5000000e+00 7.0000000e-01 7.0000000e-01 9.0000000e-01 9.0000000e-01 1.1000000e+00 1.0000000e+00 9.0000000e-01 2.1000000e+00 2.3000000e+00 7.0000000e-01 1.1000000e+00 1.0000000e+00 2.1000000e+00 5.0000000e-01 1.1000000e+00 1.4000000e+00 5.0000000e-01 5.0000000e-01 1.0000000e+00 1.2000000e+00 1.5000000e+00 1.8000000e+00 1.0000000e+00 5.0000000e-01 1.0000000e+00 1.5000000e+00 1.1000000e+00 9.0000000e-01 6.0000000e-01 8.0000000e-01 1.1000000e+00 1.0000000e+00 8.0000000e-01 1.3000000e+00 1.2000000e+00 1.0000000e+00 6.0000000e-01 7.0000000e-01 1.0000000e+00 7.0000000e-01 7.0000000e-01 7.0000000e-01 8.0000000e-01 9.0000000e-01 4.0000000e-01 1.5000000e+00 6.0000000e-01 6.0000000e-01 1.0000000e+00 4.0000000e-01 9.0000000e-01 9.0000000e-01 1.1000000e+00 9.0000000e-01 1.2000000e+00 1.4000000e+00 1.6000000e+00 1.5000000e+00 8.0000000e-01 5.0000000e-01 3.0000000e-01 4.0000000e-01 6.0000000e-01 1.2000000e+00 6.0000000e-01 8.0000000e-01 1.5000000e+00 1.1000000e+00 4.0000000e-01 3.0000000e-01 5.0000000e-01 9.0000000e-01 6.0000000e-01 6.0000000e-01 4.0000000e-01 5.0000000e-01 5.0000000e-01 1.0000000e+00 9.0000000e-01 5.0000000e-01 2.1000000e+00 1.2000000e+00 2.0000000e+00 1.7000000e+00 1.9000000e+00 2.7000000e+00 6.0000000e-01 2.4000000e+00 1.9000000e+00 2.2000000e+00 1.3000000e+00 1.4000000e+00 1.6000000e+00 1.1000000e+00 1.2000000e+00 1.4000000e+00 1.6000000e+00 2.8000000e+00 3.0000000e+00 1.1000000e+00 1.8000000e+00 1.0000000e+00 2.8000000e+00 1.1000000e+00 1.8000000e+00 2.1000000e+00 1.0000000e+00 1.0000000e+00 1.7000000e+00 2.0000000e+00 2.2000000e+00 2.7000000e+00 1.7000000e+00 1.2000000e+00 1.7000000e+00 2.5000000e+00 1.7000000e+00 1.6000000e+00 9.0000000e-01 1.7000000e+00 1.7000000e+00 1.7000000e+00 1.2000000e+00 2.0000000e+00 1.8000000e+00 1.5000000e+00 1.1000000e+00 1.3000000e+00 1.5000000e+00 1.2000000e+00 1.0000000e+00 1.0000000e+00 1.2000000e+00 9.0000000e-01 1.7000000e+00 1.0000000e+00 8.0000000e-01 1.2000000e+00 6.0000000e-01 1.3000000e+00 1.1000000e+00 1.4000000e+00 1.2000000e+00 1.4000000e+00 1.6000000e+00 1.8000000e+00 1.7000000e+00 1.0000000e+00 7.0000000e-01 5.0000000e-01 5.0000000e-01 8.0000000e-01 1.6000000e+00 1.0000000e+00 1.4000000e+00 1.7000000e+00 1.3000000e+00 1.0000000e+00 5.0000000e-01 9.0000000e-01 1.1000000e+00 8.0000000e-01 3.0000000e-01 7.0000000e-01 1.0000000e+00 9.0000000e-01 1.2000000e+00 5.0000000e-01 8.0000000e-01 2.5000000e+00 1.6000000e+00 2.4000000e+00 2.1000000e+00 2.3000000e+00 3.1000000e+00 1.0000000e+00 2.8000000e+00 2.3000000e+00 2.6000000e+00 1.6000000e+00 1.8000000e+00 2.0000000e+00 1.5000000e+00 1.6000000e+00 1.8000000e+00 2.0000000e+00 3.2000000e+00 3.4000000e+00 1.5000000e+00 2.2000000e+00 1.4000000e+00 3.2000000e+00 1.4000000e+00 2.2000000e+00 2.5000000e+00 1.3000000e+00 1.4000000e+00 2.1000000e+00 2.3000000e+00 2.6000000e+00 2.9000000e+00 2.1000000e+00 1.6000000e+00 2.1000000e+00 2.7000000e+00 2.1000000e+00 2.0000000e+00 1.3000000e+00 1.9000000e+00 2.1000000e+00 1.9000000e+00 1.6000000e+00 2.4000000e+00 2.2000000e+00 1.7000000e+00 1.5000000e+00 1.7000000e+00 1.9000000e+00 1.6000000e+00 8.0000000e-01 5.0000000e-01 6.0000000e-01 8.0000000e-01 3.0000000e-01 5.0000000e-01 8.0000000e-01 5.0000000e-01 6.0000000e-01 2.0000000e-01 7.0000000e-01 5.0000000e-01 5.0000000e-01 7.0000000e-01 9.0000000e-01 8.0000000e-01 3.0000000e-01 7.0000000e-01 6.0000000e-01 6.0000000e-01 3.0000000e-01 9.0000000e-01 5.0000000e-01 4.0000000e-01 8.0000000e-01 7.0000000e-01 3.0000000e-01 5.0000000e-01 4.0000000e-01 4.0000000e-01 4.0000000e-01 9.0000000e-01 3.0000000e-01 3.0000000e-01 2.0000000e-01 3.0000000e-01 1.2000000e+00 2.0000000e-01 1.8000000e+00 9.0000000e-01 1.7000000e+00 1.4000000e+00 1.6000000e+00 2.4000000e+00 1.0000000e+00 2.1000000e+00 1.6000000e+00 1.9000000e+00 9.0000000e-01 1.1000000e+00 1.3000000e+00 8.0000000e-01 9.0000000e-01 1.1000000e+00 1.3000000e+00 2.5000000e+00 2.7000000e+00 8.0000000e-01 1.5000000e+00 7.0000000e-01 2.5000000e+00 7.0000000e-01 1.5000000e+00 1.8000000e+00 6.0000000e-01 7.0000000e-01 1.4000000e+00 1.6000000e+00 1.9000000e+00 2.2000000e+00 1.4000000e+00 9.0000000e-01 1.4000000e+00 1.9000000e+00 1.4000000e+00 1.3000000e+00 6.0000000e-01 1.2000000e+00 1.4000000e+00 1.0000000e+00 9.0000000e-01 1.7000000e+00 1.5000000e+00 1.0000000e+00 8.0000000e-01 1.0000000e+00 1.2000000e+00 9.0000000e-01 7.0000000e-01 7.0000000e-01 9.0000000e-01 8.0000000e-01 5.0000000e-01 5.0000000e-01 4.0000000e-01 1.0000000e+00 6.0000000e-01 9.0000000e-01 7.0000000e-01 7.0000000e-01 8.0000000e-01 8.0000000e-01 1.0000000e+00 7.0000000e-01 5.0000000e-01 5.0000000e-01 5.0000000e-01 5.0000000e-01 1.1000000e+00 8.0000000e-01 1.2000000e+00 9.0000000e-01 4.0000000e-01 8.0000000e-01 5.0000000e-01 5.0000000e-01 8.0000000e-01 4.0000000e-01 1.0000000e+00 5.0000000e-01 8.0000000e-01 7.0000000e-01 7.0000000e-01 1.0000000e+00 6.0000000e-01 2.0000000e+00 1.1000000e+00 1.9000000e+00 1.6000000e+00 1.8000000e+00 2.6000000e+00 1.1000000e+00 2.3000000e+00 1.8000000e+00 2.1000000e+00 1.1000000e+00 1.3000000e+00 1.5000000e+00 1.0000000e+00 1.4000000e+00 1.3000000e+00 1.5000000e+00 2.7000000e+00 2.9000000e+00 1.0000000e+00 1.7000000e+00 1.0000000e+00 2.7000000e+00 9.0000000e-01 1.7000000e+00 2.0000000e+00 8.0000000e-01 9.0000000e-01 1.6000000e+00 1.8000000e+00 2.1000000e+00 2.4000000e+00 1.6000000e+00 1.1000000e+00 1.6000000e+00 2.1000000e+00 1.6000000e+00 1.5000000e+00 8.0000000e-01 1.4000000e+00 1.6000000e+00 1.3000000e+00 1.1000000e+00 1.9000000e+00 1.7000000e+00 1.3000000e+00 1.0000000e+00 1.2000000e+00 1.4000000e+00 1.1000000e+00 1.1000000e+00 6.0000000e-01 5.0000000e-01 6.0000000e-01 7.0000000e-01 8.0000000e-01 4.0000000e-01 7.0000000e-01 4.0000000e-01 2.0000000e-01 4.0000000e-01 5.0000000e-01 7.0000000e-01 6.0000000e-01 2.0000000e-01 1.2000000e+00 9.0000000e-01 1.0000000e+00 8.0000000e-01 4.0000000e-01 7.0000000e-01 5.0000000e-01 6.0000000e-01 6.0000000e-01 6.0000000e-01 7.0000000e-01 6.0000000e-01 1.0000000e-01 7.0000000e-01 1.4000000e+00 5.0000000e-01 5.0000000e-01 5.0000000e-01 4.0000000e-01 1.7000000e+00 6.0000000e-01 1.3000000e+00 5.0000000e-01 1.2000000e+00 9.0000000e-01 1.1000000e+00 1.9000000e+00 1.2000000e+00 1.6000000e+00 1.1000000e+00 1.4000000e+00 6.0000000e-01 6.0000000e-01 8.0000000e-01 6.0000000e-01 1.0000000e+00 9.0000000e-01 8.0000000e-01 2.0000000e+00 2.2000000e+00 7.0000000e-01 1.0000000e+00 6.0000000e-01 2.0000000e+00 4.0000000e-01 1.0000000e+00 1.3000000e+00 4.0000000e-01 4.0000000e-01 9.0000000e-01 1.1000000e+00 1.4000000e+00 1.8000000e+00 9.0000000e-01 4.0000000e-01 9.0000000e-01 1.6000000e+00 1.0000000e+00 8.0000000e-01 4.0000000e-01 8.0000000e-01 1.0000000e+00 9.0000000e-01 5.0000000e-01 1.2000000e+00 1.1000000e+00 9.0000000e-01 5.0000000e-01 6.0000000e-01 9.0000000e-01 4.0000000e-01 1.1000000e+00 9.0000000e-01 5.0000000e-01 9.0000000e-01 4.0000000e-01 1.2000000e+00 5.0000000e-01 1.3000000e+00 1.1000000e+00 8.0000000e-01 1.0000000e+00 1.2000000e+00 1.4000000e+00 9.0000000e-01 3.0000000e-01 5.0000000e-01 5.0000000e-01 3.0000000e-01 1.5000000e+00 9.0000000e-01 9.0000000e-01 1.1000000e+00 8.0000000e-01 5.0000000e-01 4.0000000e-01 8.0000000e-01 1.0000000e+00 4.0000000e-01 6.0000000e-01 6.0000000e-01 6.0000000e-01 6.0000000e-01 7.0000000e-01 6.0000000e-01 5.0000000e-01 2.4000000e+00 1.5000000e+00 2.3000000e+00 2.0000000e+00 2.2000000e+00 3.0000000e+00 9.0000000e-01 2.7000000e+00 2.2000000e+00 2.5000000e+00 1.5000000e+00 1.7000000e+00 1.9000000e+00 1.4000000e+00 1.5000000e+00 1.7000000e+00 1.9000000e+00 3.1000000e+00 3.3000000e+00 1.4000000e+00 2.1000000e+00 1.3000000e+00 3.1000000e+00 1.3000000e+00 2.1000000e+00 2.4000000e+00 1.2000000e+00 1.3000000e+00 2.0000000e+00 2.2000000e+00 2.5000000e+00 2.8000000e+00 2.0000000e+00 1.5000000e+00 2.0000000e+00 2.5000000e+00 2.0000000e+00 1.9000000e+00 1.2000000e+00 1.8000000e+00 2.0000000e+00 1.5000000e+00 1.5000000e+00 2.3000000e+00 2.1000000e+00 1.6000000e+00 1.4000000e+00 1.6000000e+00 1.8000000e+00 1.5000000e+00 1.1000000e+00 9.0000000e-01 9.0000000e-01 1.1000000e+00 8.0000000e-01 6.0000000e-01 6.0000000e-01 6.0000000e-01 3.0000000e-01 1.0000000e-01 4.0000000e-01 6.0000000e-01 7.0000000e-01 1.0000000e+00 1.2000000e+00 1.2000000e+00 9.0000000e-01 7.0000000e-01 1.3000000e+00 7.0000000e-01 3.0000000e-01 8.0000000e-01 1.1000000e+00 1.2000000e+00 1.2000000e+00 6.0000000e-01 9.0000000e-01 1.7000000e+00 1.1000000e+00 1.0000000e+00 1.0000000e+00 5.0000000e-01 1.6000000e+00 1.0000000e+00 1.6000000e+00 9.0000000e-01 1.5000000e+00 1.2000000e+00 1.4000000e+00 2.2000000e+00 1.8000000e+00 1.9000000e+00 1.4000000e+00 1.7000000e+00 7.0000000e-01 9.0000000e-01 1.1000000e+00 1.0000000e+00 1.0000000e+00 9.0000000e-01 1.1000000e+00 2.3000000e+00 2.5000000e+00 9.0000000e-01 1.3000000e+00 1.1000000e+00 2.3000000e+00 5.0000000e-01 1.3000000e+00 1.6000000e+00 5.0000000e-01 6.0000000e-01 1.2000000e+00 1.4000000e+00 1.7000000e+00 2.0000000e+00 1.2000000e+00 7.0000000e-01 1.2000000e+00 1.7000000e+00 1.2000000e+00 1.1000000e+00 7.0000000e-01 1.0000000e+00 1.2000000e+00 9.0000000e-01 9.0000000e-01 1.5000000e+00 1.3000000e+00 9.0000000e-01 6.0000000e-01 8.0000000e-01 1.0000000e+00 8.0000000e-01 5.0000000e-01 8.0000000e-01 6.0000000e-01 3.0000000e-01 5.0000000e-01 7.0000000e-01 5.0000000e-01 8.0000000e-01 1.0000000e+00 1.2000000e+00 1.1000000e+00 4.0000000e-01 1.0000000e+00 7.0000000e-01 8.0000000e-01 6.0000000e-01 6.0000000e-01 2.0000000e-01 4.0000000e-01 1.1000000e+00 7.0000000e-01 4.0000000e-01 5.0000000e-01 4.0000000e-01 5.0000000e-01 5.0000000e-01 1.2000000e+00 3.0000000e-01 3.0000000e-01 3.0000000e-01 6.0000000e-01 1.5000000e+00 4.0000000e-01 1.5000000e+00 6.0000000e-01 1.5000000e+00 1.1000000e+00 1.3000000e+00 2.1000000e+00 7.0000000e-01 1.8000000e+00 1.3000000e+00 1.6000000e+00 9.0000000e-01 8.0000000e-01 1.2000000e+00 5.0000000e-01 9.0000000e-01 8.0000000e-01 1.0000000e+00 2.2000000e+00 2.4000000e+00 8.0000000e-01 1.3000000e+00 5.0000000e-01 2.2000000e+00 7.0000000e-01 1.2000000e+00 1.6000000e+00 6.0000000e-01 5.0000000e-01 1.1000000e+00 1.6000000e+00 1.8000000e+00 2.3000000e+00 1.1000000e+00 7.0000000e-01 1.1000000e+00 2.1000000e+00 1.1000000e+00 1.0000000e+00 4.0000000e-01 1.3000000e+00 1.1000000e+00 1.3000000e+00 6.0000000e-01 1.4000000e+00 1.2000000e+00 1.1000000e+00 7.0000000e-01 9.0000000e-01 9.0000000e-01 6.0000000e-01 5.0000000e-01 2.0000000e-01 8.0000000e-01 3.0000000e-01 8.0000000e-01 6.0000000e-01 6.0000000e-01 8.0000000e-01 1.0000000e+00 9.0000000e-01 5.0000000e-01 6.0000000e-01 3.0000000e-01 4.0000000e-01 2.0000000e-01 1.0000000e+00 5.0000000e-01 7.0000000e-01 9.0000000e-01 5.0000000e-01 3.0000000e-01 3.0000000e-01 3.0000000e-01 5.0000000e-01 2.0000000e-01 8.0000000e-01 3.0000000e-01 3.0000000e-01 3.0000000e-01 4.0000000e-01 1.1000000e+00 3.0000000e-01 1.9000000e+00 1.0000000e+00 1.8000000e+00 1.5000000e+00 1.7000000e+00 2.5000000e+00 9.0000000e-01 2.2000000e+00 1.7000000e+00 2.0000000e+00 1.0000000e+00 1.2000000e+00 1.4000000e+00 1.0000000e+00 1.4000000e+00 1.3000000e+00 1.4000000e+00 2.6000000e+00 2.8000000e+00 9.0000000e-01 1.6000000e+00 1.0000000e+00 2.6000000e+00 8.0000000e-01 1.6000000e+00 1.9000000e+00 8.0000000e-01 8.0000000e-01 1.5000000e+00 1.7000000e+00 2.0000000e+00 2.3000000e+00 1.5000000e+00 1.0000000e+00 1.5000000e+00 2.0000000e+00 1.5000000e+00 1.4000000e+00 8.0000000e-01 1.3000000e+00 1.5000000e+00 1.3000000e+00 1.0000000e+00 1.8000000e+00 1.6000000e+00 1.3000000e+00 9.0000000e-01 1.1000000e+00 1.3000000e+00 1.0000000e+00 6.0000000e-01 1.0000000e+00 6.0000000e-01 4.0000000e-01 6.0000000e-01 7.0000000e-01 8.0000000e-01 6.0000000e-01 8.0000000e-01 7.0000000e-01 1.0000000e+00 7.0000000e-01 8.0000000e-01 6.0000000e-01 6.0000000e-01 8.0000000e-01 1.2000000e+00 9.0000000e-01 2.0000000e-01 8.0000000e-01 7.0000000e-01 7.0000000e-01 8.0000000e-01 5.0000000e-01 1.2000000e+00 6.0000000e-01 8.0000000e-01 7.0000000e-01 7.0000000e-01 1.5000000e+00 6.0000000e-01 1.5000000e+00 6.0000000e-01 1.4000000e+00 1.1000000e+00 1.3000000e+00 2.1000000e+00 1.3000000e+00 1.8000000e+00 1.3000000e+00 1.6000000e+00 1.0000000e+00 8.0000000e-01 1.0000000e+00 5.0000000e-01 9.0000000e-01 1.0000000e+00 1.0000000e+00 2.2000000e+00 2.4000000e+00 5.0000000e-01 1.2000000e+00 6.0000000e-01 2.2000000e+00 5.0000000e-01 1.2000000e+00 1.5000000e+00 6.0000000e-01 8.0000000e-01 1.1000000e+00 1.3000000e+00 1.6000000e+00 1.9000000e+00 1.1000000e+00 6.0000000e-01 1.1000000e+00 1.6000000e+00 1.2000000e+00 1.0000000e+00 8.0000000e-01 9.0000000e-01 1.1000000e+00 9.0000000e-01 6.0000000e-01 1.4000000e+00 1.2000000e+00 8.0000000e-01 5.0000000e-01 8.0000000e-01 1.2000000e+00 8.0000000e-01 9.0000000e-01 5.0000000e-01 1.0000000e+00 8.0000000e-01 8.0000000e-01 1.0000000e+00 1.2000000e+00 1.1000000e+00 6.0000000e-01 4.0000000e-01 1.0000000e-01 2.0000000e-01 2.0000000e-01 1.2000000e+00 6.0000000e-01 9.0000000e-01 1.1000000e+00 7.0000000e-01 5.0000000e-01 2.0000000e-01 5.0000000e-01 7.0000000e-01 2.0000000e-01 6.0000000e-01 3.0000000e-01 5.0000000e-01 4.0000000e-01 6.0000000e-01 9.0000000e-01 3.0000000e-01 2.1000000e+00 1.2000000e+00 2.0000000e+00 1.7000000e+00 1.9000000e+00 2.7000000e+00 7.0000000e-01 2.4000000e+00 1.9000000e+00 2.2000000e+00 1.2000000e+00 1.4000000e+00 1.6000000e+00 1.1000000e+00 1.3000000e+00 1.4000000e+00 1.6000000e+00 2.8000000e+00 3.0000000e+00 1.1000000e+00 1.8000000e+00 1.0000000e+00 2.8000000e+00 1.0000000e+00 1.8000000e+00 2.1000000e+00 9.0000000e-01 1.0000000e+00 1.7000000e+00 1.9000000e+00 2.2000000e+00 2.5000000e+00 1.7000000e+00 1.2000000e+00 1.7000000e+00 2.2000000e+00 1.7000000e+00 1.6000000e+00 9.0000000e-01 1.5000000e+00 1.7000000e+00 1.3000000e+00 1.2000000e+00 2.0000000e+00 1.8000000e+00 1.3000000e+00 1.1000000e+00 1.3000000e+00 1.5000000e+00 1.2000000e+00 8.0000000e-01 7.0000000e-01 6.0000000e-01 5.0000000e-01 7.0000000e-01 9.0000000e-01 8.0000000e-01 3.0000000e-01 1.3000000e+00 1.0000000e+00 1.1000000e+00 9.0000000e-01 5.0000000e-01 5.0000000e-01 3.0000000e-01 8.0000000e-01 9.0000000e-01 7.0000000e-01 8.0000000e-01 6.0000000e-01 4.0000000e-01 8.0000000e-01 1.5000000e+00 6.0000000e-01 6.0000000e-01 6.0000000e-01 5.0000000e-01 1.8000000e+00 7.0000000e-01 1.2000000e+00 5.0000000e-01 1.2000000e+00 8.0000000e-01 1.0000000e+00 1.8000000e+00 1.0000000e+00 1.5000000e+00 1.0000000e+00 1.3000000e+00 6.0000000e-01 5.0000000e-01 9.0000000e-01 7.0000000e-01 6.0000000e-01 5.0000000e-01 7.0000000e-01 1.9000000e+00 2.1000000e+00 1.0000000e+00 1.0000000e+00 4.0000000e-01 1.9000000e+00 5.0000000e-01 9.0000000e-01 1.3000000e+00 4.0000000e-01 2.0000000e-01 8.0000000e-01 1.3000000e+00 1.5000000e+00 2.0000000e+00 8.0000000e-01 4.0000000e-01 8.0000000e-01 1.8000000e+00 8.0000000e-01 7.0000000e-01 2.0000000e-01 1.0000000e+00 8.0000000e-01 1.0000000e+00 5.0000000e-01 1.1000000e+00 9.0000000e-01 8.0000000e-01 7.0000000e-01 6.0000000e-01 6.0000000e-01 3.0000000e-01 9.0000000e-01 7.0000000e-01 3.0000000e-01 5.0000000e-01 8.0000000e-01 1.0000000e+00 5.0000000e-01 5.0000000e-01 6.0000000e-01 6.0000000e-01 3.0000000e-01 1.1000000e+00 7.0000000e-01 6.0000000e-01 7.0000000e-01 5.0000000e-01 5.0000000e-01 6.0000000e-01 6.0000000e-01 6.0000000e-01 3.0000000e-01 1.1000000e+00 5.0000000e-01 4.0000000e-01 4.0000000e-01 3.0000000e-01 1.0000000e+00 4.0000000e-01 2.0000000e+00 1.1000000e+00 1.9000000e+00 1.6000000e+00 1.8000000e+00 2.6000000e+00 1.2000000e+00 2.3000000e+00 1.8000000e+00 2.1000000e+00 1.1000000e+00 1.3000000e+00 1.5000000e+00 1.0000000e+00 1.1000000e+00 1.3000000e+00 1.5000000e+00 2.7000000e+00 2.9000000e+00 1.0000000e+00 1.7000000e+00 9.0000000e-01 2.7000000e+00 9.0000000e-01 1.7000000e+00 2.0000000e+00 8.0000000e-01 9.0000000e-01 1.6000000e+00 1.8000000e+00 2.1000000e+00 2.4000000e+00 1.6000000e+00 1.1000000e+00 1.6000000e+00 2.1000000e+00 1.6000000e+00 1.5000000e+00 8.0000000e-01 1.4000000e+00 1.6000000e+00 1.1000000e+00 1.1000000e+00 1.9000000e+00 1.7000000e+00 1.2000000e+00 1.0000000e+00 1.2000000e+00 1.4000000e+00 1.1000000e+00 3.0000000e-01 6.0000000e-01 5.0000000e-01 5.0000000e-01 5.0000000e-01 4.0000000e-01 1.4000000e+00 1.1000000e+00 1.2000000e+00 1.0000000e+00 3.0000000e-01 9.0000000e-01 9.0000000e-01 6.0000000e-01 5.0000000e-01 8.0000000e-01 9.0000000e-01 8.0000000e-01 5.0000000e-01 9.0000000e-01 1.6000000e+00 7.0000000e-01 7.0000000e-01 7.0000000e-01 6.0000000e-01 1.9000000e+00 8.0000000e-01 1.1000000e+00 5.0000000e-01 1.0000000e+00 7.0000000e-01 9.0000000e-01 1.7000000e+00 1.4000000e+00 1.4000000e+00 9.0000000e-01 1.2000000e+00 7.0000000e-01 4.0000000e-01 6.0000000e-01 6.0000000e-01 9.0000000e-01 8.0000000e-01 6.0000000e-01 1.8000000e+00 2.0000000e+00 3.0000000e-01 8.0000000e-01 7.0000000e-01 1.8000000e+00 3.0000000e-01 8.0000000e-01 1.1000000e+00 3.0000000e-01 5.0000000e-01 7.0000000e-01 9.0000000e-01 1.2000000e+00 1.6000000e+00 7.0000000e-01 3.0000000e-01 7.0000000e-01 1.4000000e+00 9.0000000e-01 6.0000000e-01 5.0000000e-01 6.0000000e-01 9.0000000e-01 8.0000000e-01 5.0000000e-01 1.0000000e+00 1.0000000e+00 8.0000000e-01 4.0000000e-01 5.0000000e-01 9.0000000e-01 5.0000000e-01 4.0000000e-01 5.0000000e-01 7.0000000e-01 6.0000000e-01 3.0000000e-01 1.2000000e+00 9.0000000e-01 1.0000000e+00 8.0000000e-01 4.0000000e-01 7.0000000e-01 6.0000000e-01 6.0000000e-01 5.0000000e-01 6.0000000e-01 7.0000000e-01 6.0000000e-01 2.0000000e-01 7.0000000e-01 1.4000000e+00 5.0000000e-01 5.0000000e-01 5.0000000e-01 4.0000000e-01 1.7000000e+00 6.0000000e-01 1.3000000e+00 7.0000000e-01 1.2000000e+00 9.0000000e-01 1.1000000e+00 1.9000000e+00 1.2000000e+00 1.6000000e+00 1.1000000e+00 1.4000000e+00 8.0000000e-01 7.0000000e-01 9.0000000e-01 8.0000000e-01 1.2000000e+00 1.1000000e+00 8.0000000e-01 2.0000000e+00 2.2000000e+00 6.0000000e-01 1.1000000e+00 8.0000000e-01 2.0000000e+00 6.0000000e-01 1.0000000e+00 1.3000000e+00 6.0000000e-01 6.0000000e-01 9.0000000e-01 1.1000000e+00 1.4000000e+00 1.8000000e+00 1.0000000e+00 4.0000000e-01 9.0000000e-01 1.6000000e+00 1.2000000e+00 8.0000000e-01 6.0000000e-01 9.0000000e-01 1.2000000e+00 1.1000000e+00 7.0000000e-01 1.2000000e+00 1.3000000e+00 1.1000000e+00 7.0000000e-01 8.0000000e-01 1.1000000e+00 6.0000000e-01 2.0000000e-01 5.0000000e-01 7.0000000e-01 4.0000000e-01 8.0000000e-01 9.0000000e-01 9.0000000e-01 6.0000000e-01 8.0000000e-01 1.0000000e+00 5.0000000e-01 4.0000000e-01 6.0000000e-01 8.0000000e-01 9.0000000e-01 9.0000000e-01 3.0000000e-01 6.0000000e-01 1.4000000e+00 8.0000000e-01 7.0000000e-01 7.0000000e-01 2.0000000e-01 1.3000000e+00 7.0000000e-01 1.7000000e+00 8.0000000e-01 1.6000000e+00 1.3000000e+00 1.5000000e+00 2.3000000e+00 1.5000000e+00 2.0000000e+00 1.5000000e+00 1.8000000e+00 8.0000000e-01 1.0000000e+00 1.2000000e+00 7.0000000e-01 1.1000000e+00 1.0000000e+00 1.2000000e+00 2.4000000e+00 2.6000000e+00 7.0000000e-01 1.4000000e+00 8.0000000e-01 2.4000000e+00 6.0000000e-01 1.4000000e+00 1.7000000e+00 5.0000000e-01 6.0000000e-01 1.3000000e+00 1.5000000e+00 1.8000000e+00 2.1000000e+00 1.3000000e+00 8.0000000e-01 1.3000000e+00 1.8000000e+00 1.3000000e+00 1.2000000e+00 5.0000000e-01 1.1000000e+00 1.3000000e+00 1.0000000e+00 8.0000000e-01 1.6000000e+00 1.4000000e+00 1.0000000e+00 7.0000000e-01 9.0000000e-01 1.1000000e+00 8.0000000e-01 4.0000000e-01 6.0000000e-01 6.0000000e-01 9.0000000e-01 1.1000000e+00 1.1000000e+00 8.0000000e-01 7.0000000e-01 1.2000000e+00 6.0000000e-01 3.0000000e-01 7.0000000e-01 1.0000000e+00 1.1000000e+00 1.1000000e+00 5.0000000e-01 8.0000000e-01 1.6000000e+00 1.0000000e+00 9.0000000e-01 9.0000000e-01 4.0000000e-01 1.5000000e+00 9.0000000e-01 1.6000000e+00 8.0000000e-01 1.5000000e+00 1.2000000e+00 1.4000000e+00 2.2000000e+00 1.7000000e+00 1.9000000e+00 1.4000000e+00 1.7000000e+00 7.0000000e-01 9.0000000e-01 1.1000000e+00 9.0000000e-01 1.0000000e+00 9.0000000e-01 1.1000000e+00 2.3000000e+00 2.5000000e+00 8.0000000e-01 1.3000000e+00 1.0000000e+00 2.3000000e+00 5.0000000e-01 1.3000000e+00 1.6000000e+00 4.0000000e-01 5.0000000e-01 1.2000000e+00 1.4000000e+00 1.7000000e+00 2.0000000e+00 1.2000000e+00 7.0000000e-01 1.2000000e+00 1.7000000e+00 1.2000000e+00 1.1000000e+00 6.0000000e-01 1.0000000e+00 1.2000000e+00 9.0000000e-01 8.0000000e-01 1.5000000e+00 1.3000000e+00 9.0000000e-01 6.0000000e-01 8.0000000e-01 1.0000000e+00 7.0000000e-01 3.0000000e-01 8.0000000e-01 1.3000000e+00 1.3000000e+00 1.3000000e+00 1.0000000e+00 8.0000000e-01 1.4000000e+00 8.0000000e-01 3.0000000e-01 5.0000000e-01 1.2000000e+00 1.3000000e+00 1.3000000e+00 7.0000000e-01 1.0000000e+00 1.8000000e+00 1.2000000e+00 1.1000000e+00 1.1000000e+00 6.0000000e-01 1.8000000e+00 1.1000000e+00 1.2000000e+00 1.0000000e+00 1.1000000e+00 8.0000000e-01 1.0000000e+00 1.8000000e+00 1.9000000e+00 1.5000000e+00 1.0000000e+00 1.3000000e+00 6.0000000e-01 5.0000000e-01 7.0000000e-01 1.1000000e+00 1.0000000e+00 9.0000000e-01 7.0000000e-01 1.9000000e+00 2.1000000e+00 8.0000000e-01 9.0000000e-01 1.2000000e+00 1.9000000e+00 5.0000000e-01 9.0000000e-01 1.2000000e+00 6.0000000e-01 7.0000000e-01 8.0000000e-01 1.0000000e+00 1.3000000e+00 1.6000000e+00 8.0000000e-01 5.0000000e-01 8.0000000e-01 1.3000000e+00 1.0000000e+00 7.0000000e-01 8.0000000e-01 7.0000000e-01 1.0000000e+00 9.0000000e-01 1.0000000e+00 1.1000000e+00 1.1000000e+00 9.0000000e-01 5.0000000e-01 6.0000000e-01 9.0000000e-01 9.0000000e-01 7.0000000e-01 1.5000000e+00 1.2000000e+00 1.3000000e+00 1.1000000e+00 7.0000000e-01 1.3000000e+00 7.0000000e-01 3.0000000e-01 7.0000000e-01 1.1000000e+00 1.2000000e+00 1.2000000e+00 6.0000000e-01 1.0000000e+00 1.7000000e+00 1.1000000e+00 1.0000000e+00 1.0000000e+00 7.0000000e-01 2.0000000e+00 1.0000000e+00 1.0000000e+00 9.0000000e-01 9.0000000e-01 6.0000000e-01 8.0000000e-01 1.6000000e+00 1.8000000e+00 1.3000000e+00 8.0000000e-01 1.1000000e+00 3.0000000e-01 3.0000000e-01 5.0000000e-01 1.0000000e+00 9.0000000e-01 6.0000000e-01 5.0000000e-01 1.7000000e+00 1.9000000e+00 8.0000000e-01 7.0000000e-01 1.1000000e+00 1.7000000e+00 4.0000000e-01 7.0000000e-01 1.0000000e+00 5.0000000e-01 6.0000000e-01 6.0000000e-01 8.0000000e-01 1.1000000e+00 1.4000000e+00 6.0000000e-01 4.0000000e-01 6.0000000e-01 1.1000000e+00 7.0000000e-01 5.0000000e-01 7.0000000e-01 4.0000000e-01 7.0000000e-01 6.0000000e-01 9.0000000e-01 9.0000000e-01 8.0000000e-01 6.0000000e-01 5.0000000e-01 3.0000000e-01 6.0000000e-01 8.0000000e-01 1.0000000e+00 7.0000000e-01 8.0000000e-01 6.0000000e-01 6.0000000e-01 6.0000000e-01 5.0000000e-01 7.0000000e-01 6.0000000e-01 4.0000000e-01 5.0000000e-01 5.0000000e-01 1.0000000e-01 5.0000000e-01 1.2000000e+00 4.0000000e-01 3.0000000e-01 3.0000000e-01 2.0000000e-01 1.5000000e+00 4.0000000e-01 1.5000000e+00 6.0000000e-01 1.4000000e+00 1.1000000e+00 1.3000000e+00 2.1000000e+00 1.1000000e+00 1.8000000e+00 1.3000000e+00 1.6000000e+00 6.0000000e-01 8.0000000e-01 1.0000000e+00 5.0000000e-01 9.0000000e-01 8.0000000e-01 1.0000000e+00 2.2000000e+00 2.4000000e+00 7.0000000e-01 1.2000000e+00 5.0000000e-01 2.2000000e+00 4.0000000e-01 1.2000000e+00 1.5000000e+00 3.0000000e-01 4.0000000e-01 1.1000000e+00 1.3000000e+00 1.6000000e+00 1.9000000e+00 1.1000000e+00 6.0000000e-01 1.1000000e+00 1.7000000e+00 1.1000000e+00 1.0000000e+00 3.0000000e-01 9.0000000e-01 1.1000000e+00 9.0000000e-01 6.0000000e-01 1.4000000e+00 1.2000000e+00 8.0000000e-01 5.0000000e-01 7.0000000e-01 9.0000000e-01 6.0000000e-01 3.0000000e-01 2.0000000e-01 4.0000000e-01 1.6000000e+00 1.0000000e+00 1.0000000e+00 1.2000000e+00 9.0000000e-01 6.0000000e-01 5.0000000e-01 9.0000000e-01 1.1000000e+00 5.0000000e-01 7.0000000e-01 7.0000000e-01 7.0000000e-01 7.0000000e-01 8.0000000e-01 6.0000000e-01 6.0000000e-01 2.5000000e+00 1.6000000e+00 2.4000000e+00 2.1000000e+00 2.3000000e+00 3.1000000e+00 1.0000000e+00 2.8000000e+00 2.3000000e+00 2.6000000e+00 1.6000000e+00 1.8000000e+00 2.0000000e+00 1.5000000e+00 1.6000000e+00 1.8000000e+00 2.0000000e+00 3.2000000e+00 3.4000000e+00 1.5000000e+00 2.2000000e+00 1.4000000e+00 3.2000000e+00 1.4000000e+00 2.2000000e+00 2.5000000e+00 1.3000000e+00 1.4000000e+00 2.1000000e+00 2.3000000e+00 2.6000000e+00 2.9000000e+00 2.1000000e+00 1.6000000e+00 2.1000000e+00 2.6000000e+00 2.1000000e+00 2.0000000e+00 1.3000000e+00 1.9000000e+00 2.1000000e+00 1.6000000e+00 1.6000000e+00 2.4000000e+00 2.2000000e+00 1.7000000e+00 1.5000000e+00 1.7000000e+00 1.9000000e+00 1.6000000e+00 1.0000000e-01 3.0000000e-01 1.3000000e+00 7.0000000e-01 1.0000000e+00 1.2000000e+00 8.0000000e-01 6.0000000e-01 2.0000000e-01 6.0000000e-01 8.0000000e-01 3.0000000e-01 5.0000000e-01 4.0000000e-01 6.0000000e-01 5.0000000e-01 7.0000000e-01 8.0000000e-01 4.0000000e-01 2.2000000e+00 1.3000000e+00 2.1000000e+00 1.8000000e+00 2.0000000e+00 2.8000000e+00 7.0000000e-01 2.5000000e+00 2.0000000e+00 2.3000000e+00 1.3000000e+00 1.5000000e+00 1.7000000e+00 1.2000000e+00 1.3000000e+00 1.5000000e+00 1.7000000e+00 2.9000000e+00 3.1000000e+00 1.2000000e+00 1.9000000e+00 1.1000000e+00 2.9000000e+00 1.1000000e+00 1.9000000e+00 2.2000000e+00 1.0000000e+00 1.1000000e+00 1.8000000e+00 2.0000000e+00 2.3000000e+00 2.6000000e+00 1.8000000e+00 1.3000000e+00 1.8000000e+00 2.3000000e+00 1.8000000e+00 1.7000000e+00 1.0000000e+00 1.6000000e+00 1.8000000e+00 1.4000000e+00 1.3000000e+00 2.1000000e+00 1.9000000e+00 1.4000000e+00 1.2000000e+00 1.4000000e+00 1.6000000e+00 1.3000000e+00 3.0000000e-01 1.4000000e+00 8.0000000e-01 1.0000000e+00 1.2000000e+00 8.0000000e-01 6.0000000e-01 3.0000000e-01 7.0000000e-01 9.0000000e-01 3.0000000e-01 5.0000000e-01 5.0000000e-01 6.0000000e-01 5.0000000e-01 7.0000000e-01 7.0000000e-01 4.0000000e-01 2.3000000e+00 1.4000000e+00 2.2000000e+00 1.9000000e+00 2.1000000e+00 2.9000000e+00 8.0000000e-01 2.6000000e+00 2.1000000e+00 2.4000000e+00 1.4000000e+00 1.6000000e+00 1.8000000e+00 1.3000000e+00 1.4000000e+00 1.6000000e+00 1.8000000e+00 3.0000000e+00 3.2000000e+00 1.3000000e+00 2.0000000e+00 1.2000000e+00 3.0000000e+00 1.2000000e+00 2.0000000e+00 2.3000000e+00 1.1000000e+00 1.2000000e+00 1.9000000e+00 2.1000000e+00 2.4000000e+00 2.7000000e+00 1.9000000e+00 1.4000000e+00 1.9000000e+00 2.4000000e+00 1.9000000e+00 1.8000000e+00 1.1000000e+00 1.7000000e+00 1.9000000e+00 1.4000000e+00 1.4000000e+00 2.2000000e+00 2.0000000e+00 1.5000000e+00 1.3000000e+00 1.5000000e+00 1.7000000e+00 1.4000000e+00 1.2000000e+00 6.0000000e-01 7.0000000e-01 9.0000000e-01 5.0000000e-01 3.0000000e-01 3.0000000e-01 5.0000000e-01 7.0000000e-01 1.0000000e-01 8.0000000e-01 3.0000000e-01 3.0000000e-01 3.0000000e-01 4.0000000e-01 9.0000000e-01 2.0000000e-01 2.1000000e+00 1.2000000e+00 2.0000000e+00 1.7000000e+00 1.9000000e+00 2.7000000e+00 9.0000000e-01 2.4000000e+00 1.9000000e+00 2.2000000e+00 1.2000000e+00 1.4000000e+00 1.6000000e+00 1.1000000e+00 1.2000000e+00 1.4000000e+00 1.6000000e+00 2.8000000e+00 3.0000000e+00 1.1000000e+00 1.8000000e+00 1.0000000e+00 2.8000000e+00 1.0000000e+00 1.8000000e+00 2.1000000e+00 9.0000000e-01 1.0000000e+00 1.7000000e+00 1.9000000e+00 2.2000000e+00 2.5000000e+00 1.7000000e+00 1.2000000e+00 1.7000000e+00 2.2000000e+00 1.7000000e+00 1.6000000e+00 9.0000000e-01 1.5000000e+00 1.7000000e+00 1.2000000e+00 1.2000000e+00 2.0000000e+00 1.8000000e+00 1.3000000e+00 1.1000000e+00 1.3000000e+00 1.5000000e+00 1.2000000e+00 6.0000000e-01 7.0000000e-01 7.0000000e-01 7.0000000e-01 1.0000000e+00 1.1000000e+00 7.0000000e-01 5.0000000e-01 1.1000000e+00 1.8000000e+00 9.0000000e-01 9.0000000e-01 9.0000000e-01 8.0000000e-01 2.1000000e+00 1.0000000e+00 9.0000000e-01 3.0000000e-01 1.1000000e+00 5.0000000e-01 7.0000000e-01 1.6000000e+00 1.1000000e+00 1.3000000e+00 7.0000000e-01 1.2000000e+00 5.0000000e-01 4.0000000e-01 8.0000000e-01 4.0000000e-01 8.0000000e-01 7.0000000e-01 5.0000000e-01 1.7000000e+00 1.8000000e+00 5.0000000e-01 9.0000000e-01 4.0000000e-01 1.7000000e+00 3.0000000e-01 7.0000000e-01 1.2000000e+00 3.0000000e-01 3.0000000e-01 5.0000000e-01 1.2000000e+00 1.4000000e+00 1.9000000e+00 6.0000000e-01 3.0000000e-01 5.0000000e-01 1.7000000e+00 8.0000000e-01 4.0000000e-01 3.0000000e-01 9.0000000e-01 8.0000000e-01 9.0000000e-01 3.0000000e-01 8.0000000e-01 9.0000000e-01 7.0000000e-01 3.0000000e-01 5.0000000e-01 7.0000000e-01 3.0000000e-01 6.0000000e-01 1.3000000e+00 9.0000000e-01 4.0000000e-01 5.0000000e-01 4.0000000e-01 7.0000000e-01 5.0000000e-01 1.2000000e+00 3.0000000e-01 3.0000000e-01 3.0000000e-01 8.0000000e-01 1.5000000e+00 4.0000000e-01 1.5000000e+00 6.0000000e-01 1.7000000e+00 1.1000000e+00 1.3000000e+00 2.2000000e+00 5.0000000e-01 1.9000000e+00 1.3000000e+00 1.8000000e+00 1.1000000e+00 1.0000000e+00 1.4000000e+00 5.0000000e-01 9.0000000e-01 1.0000000e+00 1.1000000e+00 2.3000000e+00 2.4000000e+00 8.0000000e-01 1.5000000e+00 5.0000000e-01 2.3000000e+00 9.0000000e-01 1.3000000e+00 1.8000000e+00 8.0000000e-01 7.0000000e-01 1.1000000e+00 1.8000000e+00 2.0000000e+00 2.5000000e+00 1.1000000e+00 9.0000000e-01 1.1000000e+00 2.3000000e+00 1.1000000e+00 1.0000000e+00 6.0000000e-01 1.5000000e+00 1.3000000e+00 1.5000000e+00 6.0000000e-01 1.4000000e+00 1.3000000e+00 1.3000000e+00 9.0000000e-01 1.1000000e+00 9.0000000e-01 6.0000000e-01 7.0000000e-01 1.1000000e+00 4.0000000e-01 9.0000000e-01 8.0000000e-01 4.0000000e-01 8.0000000e-01 1.2000000e+00 7.0000000e-01 4.0000000e-01 5.0000000e-01 5.0000000e-01 1.5000000e+00 6.0000000e-01 1.5000000e+00 7.0000000e-01 1.4000000e+00 1.1000000e+00 1.3000000e+00 2.1000000e+00 1.1000000e+00 1.8000000e+00 1.3000000e+00 1.6000000e+00 6.0000000e-01 8.0000000e-01 1.0000000e+00 9.0000000e-01 8.0000000e-01 8.0000000e-01 1.0000000e+00 2.2000000e+00 2.4000000e+00 1.2000000e+00 1.2000000e+00 6.0000000e-01 2.2000000e+00 7.0000000e-01 1.2000000e+00 1.5000000e+00 6.0000000e-01 4.0000000e-01 1.1000000e+00 1.3000000e+00 1.6000000e+00 1.9000000e+00 1.1000000e+00 6.0000000e-01 1.1000000e+00 1.7000000e+00 1.1000000e+00 1.0000000e+00 4.0000000e-01 9.0000000e-01 1.1000000e+00 9.0000000e-01 7.0000000e-01 1.4000000e+00 1.2000000e+00 7.0000000e-01 9.0000000e-01 7.0000000e-01 9.0000000e-01 6.0000000e-01 8.0000000e-01 1.1000000e+00 1.2000000e+00 1.2000000e+00 6.0000000e-01 9.0000000e-01 1.7000000e+00 1.1000000e+00 1.0000000e+00 1.0000000e+00 5.0000000e-01 1.7000000e+00 1.0000000e+00 1.3000000e+00 9.0000000e-01 1.2000000e+00 9.0000000e-01 1.1000000e+00 1.9000000e+00 1.8000000e+00 1.6000000e+00 1.1000000e+00 1.4000000e+00 5.0000000e-01 6.0000000e-01 8.0000000e-01 1.0000000e+00 9.0000000e-01 8.0000000e-01 8.0000000e-01 2.0000000e+00 2.2000000e+00 9.0000000e-01 1.0000000e+00 1.1000000e+00 2.0000000e+00 4.0000000e-01 1.0000000e+00 1.3000000e+00 5.0000000e-01 6.0000000e-01 9.0000000e-01 1.1000000e+00 1.4000000e+00 1.7000000e+00 9.0000000e-01 4.0000000e-01 9.0000000e-01 1.4000000e+00 9.0000000e-01 8.0000000e-01 7.0000000e-01 7.0000000e-01 9.0000000e-01 8.0000000e-01 9.0000000e-01 1.2000000e+00 1.0000000e+00 8.0000000e-01 6.0000000e-01 5.0000000e-01 8.0000000e-01 8.0000000e-01 7.0000000e-01 8.0000000e-01 8.0000000e-01 7.0000000e-01 5.0000000e-01 1.3000000e+00 7.0000000e-01 7.0000000e-01 6.0000000e-01 6.0000000e-01 1.4000000e+00 6.0000000e-01 1.6000000e+00 7.0000000e-01 1.5000000e+00 1.2000000e+00 1.4000000e+00 2.2000000e+00 1.4000000e+00 1.9000000e+00 1.4000000e+00 1.7000000e+00 9.0000000e-01 9.0000000e-01 1.1000000e+00 7.0000000e-01 1.1000000e+00 1.0000000e+00 1.1000000e+00 2.3000000e+00 2.5000000e+00 6.0000000e-01 1.3000000e+00 7.0000000e-01 2.3000000e+00 5.0000000e-01 1.3000000e+00 1.6000000e+00 5.0000000e-01 7.0000000e-01 1.2000000e+00 1.4000000e+00 1.7000000e+00 2.0000000e+00 1.2000000e+00 7.0000000e-01 1.2000000e+00 1.7000000e+00 1.2000000e+00 1.1000000e+00 7.0000000e-01 1.0000000e+00 1.2000000e+00 1.0000000e+00 7.0000000e-01 1.5000000e+00 1.3000000e+00 1.0000000e+00 6.0000000e-01 8.0000000e-01 1.1000000e+00 7.0000000e-01 5.0000000e-01 4.0000000e-01 5.0000000e-01 4.0000000e-01 8.0000000e-01 3.0000000e-01 1.0000000e-01 1.0000000e-01 6.0000000e-01 1.1000000e+00 2.0000000e-01 1.9000000e+00 1.0000000e+00 1.8000000e+00 1.5000000e+00 1.7000000e+00 2.5000000e+00 7.0000000e-01 2.2000000e+00 1.7000000e+00 2.0000000e+00 1.0000000e+00 1.2000000e+00 1.4000000e+00 9.0000000e-01 1.1000000e+00 1.2000000e+00 1.4000000e+00 2.6000000e+00 2.8000000e+00 9.0000000e-01 1.6000000e+00 8.0000000e-01 2.6000000e+00 8.0000000e-01 1.6000000e+00 1.9000000e+00 7.0000000e-01 8.0000000e-01 1.5000000e+00 1.7000000e+00 2.0000000e+00 2.3000000e+00 1.5000000e+00 1.0000000e+00 1.5000000e+00 2.1000000e+00 1.5000000e+00 1.4000000e+00 7.0000000e-01 1.3000000e+00 1.5000000e+00 1.3000000e+00 1.0000000e+00 1.8000000e+00 1.6000000e+00 1.1000000e+00 9.0000000e-01 1.1000000e+00 1.3000000e+00 1.0000000e+00 4.0000000e-01 6.0000000e-01 3.0000000e-01 7.0000000e-01 2.0000000e-01 5.0000000e-01 4.0000000e-01 7.0000000e-01 1.0000000e+00 3.0000000e-01 2.0000000e+00 1.1000000e+00 1.9000000e+00 1.6000000e+00 1.8000000e+00 2.6000000e+00 6.0000000e-01 2.3000000e+00 1.8000000e+00 2.1000000e+00 1.1000000e+00 1.3000000e+00 1.5000000e+00 1.0000000e+00 1.1000000e+00 1.3000000e+00 1.5000000e+00 2.7000000e+00 2.9000000e+00 1.0000000e+00 1.7000000e+00 9.0000000e-01 2.7000000e+00 9.0000000e-01 1.7000000e+00 2.0000000e+00 8.0000000e-01 9.0000000e-01 1.6000000e+00 1.8000000e+00 2.1000000e+00 2.4000000e+00 1.6000000e+00 1.1000000e+00 1.6000000e+00 2.2000000e+00 1.6000000e+00 1.5000000e+00 8.0000000e-01 1.4000000e+00 1.6000000e+00 1.4000000e+00 1.1000000e+00 1.9000000e+00 1.7000000e+00 1.2000000e+00 1.0000000e+00 1.2000000e+00 1.4000000e+00 1.1000000e+00 6.0000000e-01 4.0000000e-01 1.1000000e+00 2.0000000e-01 4.0000000e-01 3.0000000e-01 7.0000000e-01 1.4000000e+00 3.0000000e-01 1.6000000e+00 7.0000000e-01 1.6000000e+00 1.2000000e+00 1.4000000e+00 2.2000000e+00 6.0000000e-01 1.9000000e+00 1.4000000e+00 1.7000000e+00 1.0000000e+00 9.0000000e-01 1.3000000e+00 8.0000000e-01 1.2000000e+00 1.1000000e+00 1.1000000e+00 2.3000000e+00 2.5000000e+00 6.0000000e-01 1.4000000e+00 8.0000000e-01 2.3000000e+00 8.0000000e-01 1.3000000e+00 1.7000000e+00 7.0000000e-01 6.0000000e-01 1.2000000e+00 1.7000000e+00 1.9000000e+00 2.4000000e+00 1.2000000e+00 8.0000000e-01 1.2000000e+00 2.2000000e+00 1.2000000e+00 1.1000000e+00 6.0000000e-01 1.4000000e+00 1.2000000e+00 1.4000000e+00 7.0000000e-01 1.5000000e+00 1.3000000e+00 1.2000000e+00 8.0000000e-01 1.0000000e+00 1.1000000e+00 7.0000000e-01 6.0000000e-01 1.3000000e+00 5.0000000e-01 4.0000000e-01 4.0000000e-01 3.0000000e-01 1.6000000e+00 5.0000000e-01 1.4000000e+00 5.0000000e-01 1.3000000e+00 1.0000000e+00 1.2000000e+00 2.0000000e+00 1.2000000e+00 1.7000000e+00 1.2000000e+00 1.5000000e+00 6.0000000e-01 7.0000000e-01 9.0000000e-01 6.0000000e-01 1.0000000e+00 9.0000000e-01 9.0000000e-01 2.1000000e+00 2.3000000e+00 8.0000000e-01 1.1000000e+00 6.0000000e-01 2.1000000e+00 4.0000000e-01 1.1000000e+00 1.4000000e+00 4.0000000e-01 4.0000000e-01 1.0000000e+00 1.2000000e+00 1.5000000e+00 1.8000000e+00 1.0000000e+00 5.0000000e-01 1.0000000e+00 1.6000000e+00 1.0000000e+00 9.0000000e-01 4.0000000e-01 8.0000000e-01 1.0000000e+00 9.0000000e-01 5.0000000e-01 1.3000000e+00 1.1000000e+00 9.0000000e-01 5.0000000e-01 6.0000000e-01 9.0000000e-01 5.0000000e-01 8.0000000e-01 2.0000000e-01 4.0000000e-01 3.0000000e-01 4.0000000e-01 1.0000000e+00 2.0000000e-01 2.0000000e+00 1.1000000e+00 1.9000000e+00 1.6000000e+00 1.8000000e+00 2.6000000e+00 9.0000000e-01 2.3000000e+00 1.8000000e+00 2.1000000e+00 1.1000000e+00 1.3000000e+00 1.5000000e+00 1.0000000e+00 1.2000000e+00 1.3000000e+00 1.5000000e+00 2.7000000e+00 2.9000000e+00 1.0000000e+00 1.7000000e+00 9.0000000e-01 2.7000000e+00 9.0000000e-01 1.7000000e+00 2.0000000e+00 8.0000000e-01 9.0000000e-01 1.6000000e+00 1.8000000e+00 2.1000000e+00 2.4000000e+00 1.6000000e+00 1.1000000e+00 1.6000000e+00 2.1000000e+00 1.6000000e+00 1.5000000e+00 8.0000000e-01 1.4000000e+00 1.6000000e+00 1.1000000e+00 1.1000000e+00 1.9000000e+00 1.7000000e+00 1.2000000e+00 1.0000000e+00 1.2000000e+00 1.4000000e+00 1.1000000e+00 9.0000000e-01 9.0000000e-01 9.0000000e-01 1.2000000e+00 3.0000000e-01 8.0000000e-01 2.7000000e+00 1.8000000e+00 2.6000000e+00 2.3000000e+00 2.5000000e+00 3.3000000e+00 1.2000000e+00 3.0000000e+00 2.5000000e+00 2.8000000e+00 1.8000000e+00 2.0000000e+00 2.2000000e+00 1.7000000e+00 1.8000000e+00 2.0000000e+00 2.2000000e+00 3.4000000e+00 3.6000000e+00 1.7000000e+00 2.4000000e+00 1.6000000e+00 3.4000000e+00 1.6000000e+00 2.4000000e+00 2.7000000e+00 1.5000000e+00 1.6000000e+00 2.3000000e+00 2.5000000e+00 2.8000000e+00 3.1000000e+00 2.3000000e+00 1.8000000e+00 2.3000000e+00 2.8000000e+00 2.3000000e+00 2.2000000e+00 1.5000000e+00 2.1000000e+00 2.3000000e+00 1.9000000e+00 1.8000000e+00 2.6000000e+00 2.4000000e+00 1.9000000e+00 1.7000000e+00 1.9000000e+00 2.1000000e+00 1.8000000e+00 3.0000000e-01 2.0000000e-01 6.0000000e-01 1.2000000e+00 1.0000000e-01 1.8000000e+00 9.0000000e-01 1.7000000e+00 1.4000000e+00 1.6000000e+00 2.4000000e+00 7.0000000e-01 2.1000000e+00 1.6000000e+00 1.9000000e+00 9.0000000e-01 1.1000000e+00 1.3000000e+00 8.0000000e-01 1.1000000e+00 1.1000000e+00 1.3000000e+00 2.5000000e+00 2.7000000e+00 8.0000000e-01 1.5000000e+00 7.0000000e-01 2.5000000e+00 7.0000000e-01 1.5000000e+00 1.8000000e+00 6.0000000e-01 7.0000000e-01 1.4000000e+00 1.6000000e+00 1.9000000e+00 2.3000000e+00 1.4000000e+00 9.0000000e-01 1.4000000e+00 2.1000000e+00 1.4000000e+00 1.3000000e+00 6.0000000e-01 1.3000000e+00 1.4000000e+00 1.3000000e+00 9.0000000e-01 1.7000000e+00 1.5000000e+00 1.1000000e+00 8.0000000e-01 1.0000000e+00 1.2000000e+00 9.0000000e-01 1.0000000e-01 5.0000000e-01 1.2000000e+00 2.0000000e-01 1.8000000e+00 9.0000000e-01 1.7000000e+00 1.4000000e+00 1.6000000e+00 2.4000000e+00 8.0000000e-01 2.1000000e+00 1.6000000e+00 1.9000000e+00 9.0000000e-01 1.1000000e+00 1.3000000e+00 8.0000000e-01 1.2000000e+00 1.1000000e+00 1.3000000e+00 2.5000000e+00 2.7000000e+00 8.0000000e-01 1.5000000e+00 8.0000000e-01 2.5000000e+00 7.0000000e-01 1.5000000e+00 1.8000000e+00 6.0000000e-01 7.0000000e-01 1.4000000e+00 1.6000000e+00 1.9000000e+00 2.2000000e+00 1.4000000e+00 9.0000000e-01 1.4000000e+00 2.0000000e+00 1.4000000e+00 1.3000000e+00 6.0000000e-01 1.2000000e+00 1.4000000e+00 1.2000000e+00 9.0000000e-01 1.7000000e+00 1.5000000e+00 1.1000000e+00 8.0000000e-01 1.0000000e+00 1.2000000e+00 9.0000000e-01 5.0000000e-01 1.2000000e+00 1.0000000e-01 1.8000000e+00 9.0000000e-01 1.7000000e+00 1.4000000e+00 1.6000000e+00 2.4000000e+00 8.0000000e-01 2.1000000e+00 1.6000000e+00 1.9000000e+00 9.0000000e-01 1.1000000e+00 1.3000000e+00 8.0000000e-01 1.1000000e+00 1.1000000e+00 1.3000000e+00 2.5000000e+00 2.7000000e+00 8.0000000e-01 1.5000000e+00 7.0000000e-01 2.5000000e+00 7.0000000e-01 1.5000000e+00 1.8000000e+00 6.0000000e-01 7.0000000e-01 1.4000000e+00 1.6000000e+00 1.9000000e+00 2.2000000e+00 1.4000000e+00 9.0000000e-01 1.4000000e+00 2.0000000e+00 1.4000000e+00 1.3000000e+00 6.0000000e-01 1.2000000e+00 1.4000000e+00 1.2000000e+00 9.0000000e-01 1.7000000e+00 1.5000000e+00 1.0000000e+00 8.0000000e-01 1.0000000e+00 1.2000000e+00 9.0000000e-01 1.3000000e+00 5.0000000e-01 1.7000000e+00 8.0000000e-01 1.6000000e+00 1.3000000e+00 1.5000000e+00 2.3000000e+00 1.3000000e+00 2.0000000e+00 1.5000000e+00 1.8000000e+00 8.0000000e-01 1.0000000e+00 1.2000000e+00 7.0000000e-01 1.1000000e+00 1.0000000e+00 1.2000000e+00 2.4000000e+00 2.6000000e+00 7.0000000e-01 1.4000000e+00 7.0000000e-01 2.4000000e+00 6.0000000e-01 1.4000000e+00 1.7000000e+00 5.0000000e-01 6.0000000e-01 1.3000000e+00 1.5000000e+00 1.8000000e+00 2.1000000e+00 1.3000000e+00 8.0000000e-01 1.3000000e+00 1.8000000e+00 1.3000000e+00 1.2000000e+00 5.0000000e-01 1.1000000e+00 1.3000000e+00 1.0000000e+00 8.0000000e-01 1.6000000e+00 1.4000000e+00 1.0000000e+00 7.0000000e-01 9.0000000e-01 1.1000000e+00 8.0000000e-01 1.1000000e+00 3.0000000e+00 2.1000000e+00 2.9000000e+00 2.6000000e+00 2.8000000e+00 3.6000000e+00 1.5000000e+00 3.3000000e+00 2.8000000e+00 3.1000000e+00 2.1000000e+00 2.3000000e+00 2.5000000e+00 2.0000000e+00 2.1000000e+00 2.3000000e+00 2.5000000e+00 3.7000000e+00 3.9000000e+00 2.0000000e+00 2.7000000e+00 1.9000000e+00 3.7000000e+00 1.9000000e+00 2.7000000e+00 3.0000000e+00 1.8000000e+00 1.9000000e+00 2.6000000e+00 2.8000000e+00 3.1000000e+00 3.4000000e+00 2.6000000e+00 2.1000000e+00 2.6000000e+00 3.1000000e+00 2.6000000e+00 2.5000000e+00 1.8000000e+00 2.4000000e+00 2.6000000e+00 2.1000000e+00 2.1000000e+00 2.9000000e+00 2.7000000e+00 2.2000000e+00 2.0000000e+00 2.2000000e+00 2.4000000e+00 2.1000000e+00 1.9000000e+00 1.0000000e+00 1.8000000e+00 1.5000000e+00 1.7000000e+00 2.5000000e+00 8.0000000e-01 2.2000000e+00 1.7000000e+00 2.0000000e+00 1.0000000e+00 1.2000000e+00 1.4000000e+00 9.0000000e-01 1.1000000e+00 1.2000000e+00 1.4000000e+00 2.6000000e+00 2.8000000e+00 9.0000000e-01 1.6000000e+00 8.0000000e-01 2.6000000e+00 8.0000000e-01 1.6000000e+00 1.9000000e+00 7.0000000e-01 8.0000000e-01 1.5000000e+00 1.7000000e+00 2.0000000e+00 2.3000000e+00 1.5000000e+00 1.0000000e+00 1.5000000e+00 2.0000000e+00 1.5000000e+00 1.4000000e+00 7.0000000e-01 1.3000000e+00 1.5000000e+00 1.2000000e+00 1.0000000e+00 1.8000000e+00 1.6000000e+00 1.1000000e+00 9.0000000e-01 1.1000000e+00 1.3000000e+00 1.0000000e+00 9.0000000e-01 8.0000000e-01 7.0000000e-01 3.0000000e-01 1.3000000e+00 1.5000000e+00 1.0000000e+00 8.0000000e-01 9.0000000e-01 9.0000000e-01 7.0000000e-01 5.0000000e-01 1.0000000e+00 9.0000000e-01 7.0000000e-01 7.0000000e-01 1.4000000e+00 1.4000000e+00 1.1000000e+00 6.0000000e-01 1.1000000e+00 1.4000000e+00 1.1000000e+00 4.0000000e-01 9.0000000e-01 1.2000000e+00 1.1000000e+00 5.0000000e-01 9.0000000e-01 1.1000000e+00 1.6000000e+00 5.0000000e-01 1.0000000e+00 1.1000000e+00 1.4000000e+00 4.0000000e-01 7.0000000e-01 1.2000000e+00 6.0000000e-01 4.0000000e-01 9.0000000e-01 9.0000000e-01 5.0000000e-01 4.0000000e-01 8.0000000e-01 1.0000000e+00 8.0000000e-01 6.0000000e-01 9.0000000e-01 1.3000000e+00 5.0000000e-01 7.0000000e-01 1.8000000e+00 9.0000000e-01 1.5000000e+00 9.0000000e-01 1.4000000e+00 7.0000000e-01 6.0000000e-01 1.0000000e+00 2.0000000e-01 5.0000000e-01 6.0000000e-01 7.0000000e-01 1.9000000e+00 1.9000000e+00 5.0000000e-01 1.1000000e+00 2.0000000e-01 1.9000000e+00 5.0000000e-01 9.0000000e-01 1.4000000e+00 4.0000000e-01 3.0000000e-01 6.0000000e-01 1.4000000e+00 1.6000000e+00 2.1000000e+00 6.0000000e-01 5.0000000e-01 5.0000000e-01 1.9000000e+00 7.0000000e-01 6.0000000e-01 3.0000000e-01 1.1000000e+00 9.0000000e-01 1.1000000e+00 0.0000000e+00 1.0000000e+00 9.0000000e-01 9.0000000e-01 5.0000000e-01 7.0000000e-01 7.0000000e-01 3.0000000e-01 8.0000000e-01 6.0000000e-01 7.0000000e-01 2.2000000e+00 4.0000000e-01 5.0000000e-01 6.0000000e-01 8.0000000e-01 7.0000000e-01 4.0000000e-01 1.4000000e+00 1.3000000e+00 7.0000000e-01 6.0000000e-01 8.0000000e-01 1.0000000e+00 1.1000000e+00 2.0000000e-01 1.5000000e+00 8.0000000e-01 1.0000000e+00 4.0000000e-01 3.0000000e-01 1.1000000e+00 1.0000000e+00 7.0000000e-01 5.0000000e-01 3.0000000e-01 8.0000000e-01 7.0000000e-01 8.0000000e-01 1.0000000e+00 6.0000000e-01 8.0000000e-01 7.0000000e-01 1.1000000e+00 5.0000000e-01 4.0000000e-01 8.0000000e-01 1.3000000e+00 3.0000000e-01 4.0000000e-01 7.0000000e-01 9.0000000e-01 7.0000000e-01 9.0000000e-01 1.2000000e+00 4.0000000e-01 1.3000000e+00 1.4000000e+00 1.0000000e+00 4.0000000e-01 9.0000000e-01 5.0000000e-01 3.0000000e-01 5.0000000e-01 6.0000000e-01 6.0000000e-01 5.0000000e-01 2.0000000e-01 1.4000000e+00 1.4000000e+00 7.0000000e-01 6.0000000e-01 7.0000000e-01 1.4000000e+00 7.0000000e-01 4.0000000e-01 9.0000000e-01 8.0000000e-01 7.0000000e-01 3.0000000e-01 9.0000000e-01 1.1000000e+00 1.6000000e+00 4.0000000e-01 5.0000000e-01 4.0000000e-01 1.4000000e+00 6.0000000e-01 2.0000000e-01 8.0000000e-01 6.0000000e-01 6.0000000e-01 6.0000000e-01 5.0000000e-01 5.0000000e-01 7.0000000e-01 5.0000000e-01 6.0000000e-01 4.0000000e-01 5.0000000e-01 5.0000000e-01 1.1000000e+00 1.6000000e+00 8.0000000e-01 5.0000000e-01 7.0000000e-01 7.0000000e-01 5.0000000e-01 3.0000000e-01 8.0000000e-01 7.0000000e-01 5.0000000e-01 4.0000000e-01 1.2000000e+00 1.2000000e+00 8.0000000e-01 4.0000000e-01 9.0000000e-01 1.2000000e+00 9.0000000e-01 3.0000000e-01 7.0000000e-01 1.0000000e+00 9.0000000e-01 2.0000000e-01 7.0000000e-01 9.0000000e-01 1.4000000e+00 2.0000000e-01 7.0000000e-01 8.0000000e-01 1.2000000e+00 4.0000000e-01 4.0000000e-01 1.0000000e+00 4.0000000e-01 2.0000000e-01 7.0000000e-01 7.0000000e-01 3.0000000e-01 3.0000000e-01 6.0000000e-01 8.0000000e-01 6.0000000e-01 4.0000000e-01 7.0000000e-01 2.7000000e+00 3.0000000e-01 9.0000000e-01 6.0000000e-01 1.5000000e+00 1.3000000e+00 1.1000000e+00 1.9000000e+00 1.8000000e+00 1.3000000e+00 1.1000000e+00 8.0000000e-01 4.0000000e-01 1.6000000e+00 9.0000000e-01 2.0000000e+00 2.0000000e-01 1.7000000e+00 9.0000000e-01 6.0000000e-01 1.8000000e+00 1.7000000e+00 1.2000000e+00 8.0000000e-01 5.0000000e-01 8.0000000e-01 1.2000000e+00 1.5000000e+00 1.5000000e+00 5.0000000e-01 1.3000000e+00 1.2000000e+00 1.8000000e+00 1.2000000e+00 1.0000000e+00 1.5000000e+00 1.8000000e+00 8.0000000e-01 9.0000000e-01 1.4000000e+00 1.6000000e+00 1.4000000e+00 1.4000000e+00 1.7000000e+00 2.4000000e+00 1.8000000e+00 2.3000000e+00 1.6000000e+00 1.5000000e+00 1.9000000e+00 8.0000000e-01 9.0000000e-01 1.5000000e+00 1.6000000e+00 2.8000000e+00 2.8000000e+00 1.1000000e+00 2.0000000e+00 7.0000000e-01 2.8000000e+00 1.4000000e+00 1.8000000e+00 2.3000000e+00 1.3000000e+00 1.2000000e+00 1.5000000e+00 2.3000000e+00 2.5000000e+00 3.0000000e+00 1.5000000e+00 1.4000000e+00 1.2000000e+00 2.8000000e+00 1.4000000e+00 1.5000000e+00 1.1000000e+00 2.0000000e+00 1.8000000e+00 2.0000000e+00 9.0000000e-01 1.9000000e+00 1.8000000e+00 1.8000000e+00 1.4000000e+00 1.6000000e+00 1.3000000e+00 1.0000000e+00 6.0000000e-01 7.0000000e-01 1.2000000e+00 1.0000000e+00 8.0000000e-01 1.6000000e+00 1.5000000e+00 1.0000000e+00 8.0000000e-01 9.0000000e-01 6.0000000e-01 1.3000000e+00 6.0000000e-01 1.7000000e+00 4.0000000e-01 1.4000000e+00 6.0000000e-01 3.0000000e-01 1.5000000e+00 1.4000000e+00 9.0000000e-01 5.0000000e-01 2.0000000e-01 9.0000000e-01 9.0000000e-01 1.2000000e+00 1.2000000e+00 5.0000000e-01 1.0000000e+00 9.0000000e-01 1.5000000e+00 9.0000000e-01 7.0000000e-01 1.2000000e+00 1.5000000e+00 5.0000000e-01 7.0000000e-01 1.1000000e+00 1.3000000e+00 1.1000000e+00 1.1000000e+00 1.4000000e+00 1.1000000e+00 7.0000000e-01 5.0000000e-01 5.0000000e-01 1.0000000e+00 9.0000000e-01 7.0000000e-01 5.0000000e-01 1.3000000e+00 1.1000000e+00 8.0000000e-01 7.0000000e-01 1.1000000e+00 1.0000000e+00 9.0000000e-01 8.0000000e-01 7.0000000e-01 1.0000000e+00 9.0000000e-01 3.0000000e-01 5.0000000e-01 7.0000000e-01 1.3000000e+00 4.0000000e-01 7.0000000e-01 6.0000000e-01 1.0000000e+00 9.0000000e-01 6.0000000e-01 1.0000000e+00 6.0000000e-01 6.0000000e-01 7.0000000e-01 9.0000000e-01 7.0000000e-01 8.0000000e-01 6.0000000e-01 8.0000000e-01 6.0000000e-01 9.0000000e-01 8.0000000e-01 1.0000000e+00 9.0000000e-01 6.0000000e-01 1.5000000e+00 1.4000000e+00 8.0000000e-01 7.0000000e-01 6.0000000e-01 1.0000000e+00 1.4000000e+00 4.0000000e-01 1.6000000e+00 8.0000000e-01 1.2000000e+00 5.0000000e-01 7.0000000e-01 1.3000000e+00 1.2000000e+00 8.0000000e-01 9.0000000e-01 8.0000000e-01 7.0000000e-01 8.0000000e-01 1.0000000e+00 1.1000000e+00 6.0000000e-01 9.0000000e-01 8.0000000e-01 1.3000000e+00 7.0000000e-01 5.0000000e-01 1.0000000e+00 1.4000000e+00 4.0000000e-01 5.0000000e-01 9.0000000e-01 1.1000000e+00 9.0000000e-01 1.0000000e+00 1.3000000e+00 5.0000000e-01 4.0000000e-01 8.0000000e-01 7.0000000e-01 3.0000000e-01 4.0000000e-01 1.6000000e+00 1.8000000e+00 1.0000000e+00 6.0000000e-01 9.0000000e-01 1.6000000e+00 5.0000000e-01 6.0000000e-01 9.0000000e-01 4.0000000e-01 4.0000000e-01 5.0000000e-01 7.0000000e-01 1.0000000e+00 1.4000000e+00 5.0000000e-01 5.0000000e-01 6.0000000e-01 1.2000000e+00 5.0000000e-01 4.0000000e-01 5.0000000e-01 4.0000000e-01 5.0000000e-01 4.0000000e-01 7.0000000e-01 8.0000000e-01 6.0000000e-01 3.0000000e-01 7.0000000e-01 2.0000000e-01 3.0000000e-01 6.0000000e-01 4.0000000e-01 7.0000000e-01 6.0000000e-01 5.0000000e-01 3.0000000e-01 1.4000000e+00 1.6000000e+00 5.0000000e-01 5.0000000e-01 8.0000000e-01 1.4000000e+00 4.0000000e-01 6.0000000e-01 8.0000000e-01 5.0000000e-01 4.0000000e-01 3.0000000e-01 8.0000000e-01 1.0000000e+00 1.5000000e+00 3.0000000e-01 4.0000000e-01 5.0000000e-01 1.3000000e+00 7.0000000e-01 4.0000000e-01 5.0000000e-01 5.0000000e-01 5.0000000e-01 5.0000000e-01 6.0000000e-01 6.0000000e-01 6.0000000e-01 4.0000000e-01 3.0000000e-01 3.0000000e-01 7.0000000e-01 5.0000000e-01 1.1000000e+00 1.0000000e+00 4.0000000e-01 3.0000000e-01 1.2000000e+00 1.4000000e+00 8.0000000e-01 2.0000000e-01 1.2000000e+00 1.2000000e+00 6.0000000e-01 3.0000000e-01 5.0000000e-01 7.0000000e-01 7.0000000e-01 4.0000000e-01 5.0000000e-01 6.0000000e-01 1.1000000e+00 4.0000000e-01 6.0000000e-01 7.0000000e-01 9.0000000e-01 5.0000000e-01 4.0000000e-01 8.0000000e-01 1.0000000e-01 3.0000000e-01 4.0000000e-01 1.0000000e+00 4.0000000e-01 4.0000000e-01 3.0000000e-01 5.0000000e-01 3.0000000e-01 6.0000000e-01 9.0000000e-01 4.0000000e-01 7.0000000e-01 8.0000000e-01 2.0000000e+00 2.0000000e+00 5.0000000e-01 1.2000000e+00 3.0000000e-01 2.0000000e+00 6.0000000e-01 1.0000000e+00 1.5000000e+00 5.0000000e-01 5.0000000e-01 7.0000000e-01 1.5000000e+00 1.7000000e+00 2.2000000e+00 7.0000000e-01 6.0000000e-01 6.0000000e-01 2.0000000e+00 9.0000000e-01 7.0000000e-01 5.0000000e-01 1.2000000e+00 1.0000000e+00 1.2000000e+00 2.0000000e-01 1.1000000e+00 1.0000000e+00 1.0000000e+00 6.0000000e-01 8.0000000e-01 9.0000000e-01 5.0000000e-01 6.0000000e-01 7.0000000e-01 1.9000000e+00 1.9000000e+00 9.0000000e-01 1.1000000e+00 4.0000000e-01 1.9000000e+00 6.0000000e-01 9.0000000e-01 1.4000000e+00 6.0000000e-01 6.0000000e-01 6.0000000e-01 1.4000000e+00 1.6000000e+00 2.1000000e+00 6.0000000e-01 9.0000000e-01 1.0000000e+00 1.9000000e+00 6.0000000e-01 6.0000000e-01 6.0000000e-01 1.1000000e+00 9.0000000e-01 1.1000000e+00 5.0000000e-01 1.0000000e+00 9.0000000e-01 9.0000000e-01 5.0000000e-01 7.0000000e-01 6.0000000e-01 6.0000000e-01 5.0000000e-01 1.4000000e+00 1.6000000e+00 1.0000000e+00 5.0000000e-01 8.0000000e-01 1.4000000e+00 5.0000000e-01 4.0000000e-01 8.0000000e-01 5.0000000e-01 5.0000000e-01 4.0000000e-01 8.0000000e-01 1.0000000e+00 1.5000000e+00 4.0000000e-01 8.0000000e-01 9.0000000e-01 1.3000000e+00 3.0000000e-01 5.0000000e-01 5.0000000e-01 5.0000000e-01 3.0000000e-01 5.0000000e-01 6.0000000e-01 6.0000000e-01 4.0000000e-01 3.0000000e-01 7.0000000e-01 3.0000000e-01 2.0000000e-01 5.0000000e-01 1.2000000e+00 1.4000000e+00 8.0000000e-01 5.0000000e-01 9.0000000e-01 1.2000000e+00 6.0000000e-01 3.0000000e-01 7.0000000e-01 7.0000000e-01 6.0000000e-01 3.0000000e-01 7.0000000e-01 9.0000000e-01 1.4000000e+00 4.0000000e-01 4.0000000e-01 4.0000000e-01 1.2000000e+00 6.0000000e-01 1.0000000e-01 7.0000000e-01 4.0000000e-01 6.0000000e-01 5.0000000e-01 7.0000000e-01 5.0000000e-01 7.0000000e-01 5.0000000e-01 5.0000000e-01 3.0000000e-01 5.0000000e-01 6.0000000e-01 1.2000000e+00 1.7000000e+00 1.0000000e+00 2.1000000e+00 1.0000000e+00 1.8000000e+00 1.0000000e+00 7.0000000e-01 1.9000000e+00 1.8000000e+00 1.3000000e+00 9.0000000e-01 1.0000000e+00 3.0000000e-01 1.3000000e+00 1.6000000e+00 1.6000000e+00 8.0000000e-01 1.4000000e+00 1.3000000e+00 1.9000000e+00 1.3000000e+00 1.1000000e+00 1.6000000e+00 1.9000000e+00 9.0000000e-01 1.0000000e+00 1.5000000e+00 1.7000000e+00 1.5000000e+00 1.5000000e+00 1.8000000e+00 1.9000000e+00 1.2000000e+00 2.1000000e+00 3.0000000e-01 2.0000000e+00 1.2000000e+00 9.0000000e-01 2.1000000e+00 2.0000000e+00 1.3000000e+00 1.1000000e+00 8.0000000e-01 1.2000000e+00 1.3000000e+00 1.8000000e+00 1.6000000e+00 8.0000000e-01 1.4000000e+00 1.4000000e+00 2.1000000e+00 1.5000000e+00 1.3000000e+00 1.8000000e+00 1.9000000e+00 1.0000000e+00 1.2000000e+00 1.7000000e+00 1.9000000e+00 1.7000000e+00 1.5000000e+00 1.8000000e+00 1.0000000e+00 6.0000000e-01 1.7000000e+00 5.0000000e-01 1.1000000e+00 1.2000000e+00 6.0000000e-01 8.0000000e-01 6.0000000e-01 1.2000000e+00 1.4000000e+00 1.9000000e+00 7.0000000e-01 6.0000000e-01 6.0000000e-01 1.7000000e+00 1.2000000e+00 9.0000000e-01 8.0000000e-01 9.0000000e-01 9.0000000e-01 9.0000000e-01 5.0000000e-01 1.0000000e+00 1.1000000e+00 8.0000000e-01 4.0000000e-01 8.0000000e-01 1.2000000e+00 8.0000000e-01 1.3000000e+00 1.0000000e+00 8.0000000e-01 2.0000000e-01 5.0000000e-01 9.0000000e-01 8.0000000e-01 5.0000000e-01 7.0000000e-01 5.0000000e-01 1.0000000e+00 5.0000000e-01 8.0000000e-01 9.0000000e-01 8.0000000e-01 6.0000000e-01 5.0000000e-01 9.0000000e-01 3.0000000e-01 2.0000000e-01 6.0000000e-01 1.1000000e+00 2.0000000e-01 2.0000000e-01 5.0000000e-01 7.0000000e-01 5.0000000e-01 7.0000000e-01 1.0000000e+00 2.1000000e+00 7.0000000e-01 1.1000000e+00 1.6000000e+00 6.0000000e-01 5.0000000e-01 8.0000000e-01 1.6000000e+00 1.8000000e+00 2.3000000e+00 8.0000000e-01 7.0000000e-01 7.0000000e-01 2.1000000e+00 7.0000000e-01 8.0000000e-01 4.0000000e-01 1.3000000e+00 1.1000000e+00 1.3000000e+00 2.0000000e-01 1.2000000e+00 1.1000000e+00 1.1000000e+00 7.0000000e-01 9.0000000e-01 6.0000000e-01 3.0000000e-01 1.8000000e+00 1.0000000e+00 7.0000000e-01 1.9000000e+00 1.8000000e+00 1.3000000e+00 9.0000000e-01 6.0000000e-01 1.0000000e+00 1.3000000e+00 1.6000000e+00 1.6000000e+00 6.0000000e-01 1.4000000e+00 1.3000000e+00 1.9000000e+00 1.3000000e+00 1.1000000e+00 1.6000000e+00 1.9000000e+00 9.0000000e-01 1.0000000e+00 1.5000000e+00 1.7000000e+00 1.5000000e+00 1.5000000e+00 1.8000000e+00 8.0000000e-01 1.1000000e+00 1.0000000e-01 3.0000000e-01 7.0000000e-01 9.0000000e-01 1.2000000e+00 1.6000000e+00 7.0000000e-01 3.0000000e-01 7.0000000e-01 1.4000000e+00 7.0000000e-01 6.0000000e-01 3.0000000e-01 6.0000000e-01 7.0000000e-01 6.0000000e-01 5.0000000e-01 1.0000000e+00 8.0000000e-01 5.0000000e-01 2.0000000e-01 3.0000000e-01 7.0000000e-01 4.0000000e-01 5.0000000e-01 9.0000000e-01 8.0000000e-01 5.0000000e-01 5.0000000e-01 7.0000000e-01 1.2000000e+00 5.0000000e-01 6.0000000e-01 7.0000000e-01 1.0000000e+00 4.0000000e-01 3.0000000e-01 9.0000000e-01 3.0000000e-01 3.0000000e-01 6.0000000e-01 9.0000000e-01 2.0000000e-01 4.0000000e-01 5.0000000e-01 8.0000000e-01 5.0000000e-01 5.0000000e-01 8.0000000e-01 1.2000000e+00 1.1000000e+00 8.0000000e-01 2.0000000e-01 4.0000000e-01 7.0000000e-01 8.0000000e-01 9.0000000e-01 1.1000000e+00 5.0000000e-01 9.0000000e-01 8.0000000e-01 1.2000000e+00 6.0000000e-01 6.0000000e-01 9.0000000e-01 1.4000000e+00 5.0000000e-01 7.0000000e-01 8.0000000e-01 1.0000000e+00 8.0000000e-01 1.0000000e+00 1.3000000e+00 2.0000000e-01 8.0000000e-01 1.0000000e+00 1.3000000e+00 1.7000000e+00 8.0000000e-01 3.0000000e-01 8.0000000e-01 1.5000000e+00 8.0000000e-01 7.0000000e-01 2.0000000e-01 7.0000000e-01 8.0000000e-01 7.0000000e-01 4.0000000e-01 1.1000000e+00 9.0000000e-01 5.0000000e-01 3.0000000e-01 4.0000000e-01 6.0000000e-01 3.0000000e-01 7.0000000e-01 1.1000000e+00 1.3000000e+00 1.8000000e+00 7.0000000e-01 3.0000000e-01 7.0000000e-01 1.6000000e+00 7.0000000e-01 6.0000000e-01 1.0000000e-01 8.0000000e-01 7.0000000e-01 8.0000000e-01 3.0000000e-01 1.0000000e+00 8.0000000e-01 6.0000000e-01 5.0000000e-01 4.0000000e-01 5.0000000e-01 2.0000000e-01 8.0000000e-01 1.0000000e+00 1.5000000e+00 1.0000000e-01 6.0000000e-01 7.0000000e-01 1.3000000e+00 6.0000000e-01 3.0000000e-01 8.0000000e-01 5.0000000e-01 3.0000000e-01 5.0000000e-01 6.0000000e-01 4.0000000e-01 5.0000000e-01 4.0000000e-01 6.0000000e-01 4.0000000e-01 6.0000000e-01 5.0000000e-01 3.0000000e-01 8.0000000e-01 8.0000000e-01 9.0000000e-01 1.1000000e+00 7.0000000e-01 9.0000000e-01 8.0000000e-01 1.2000000e+00 5.0000000e-01 8.0000000e-01 7.0000000e-01 1.4000000e+00 7.0000000e-01 9.0000000e-01 7.0000000e-01 9.0000000e-01 7.0000000e-01 1.0000000e+00 1.3000000e+00 1.0000000e+00 1.0000000e+00 1.1000000e+00 1.3000000e+00 4.0000000e-01 1.1000000e+00 1.0000000e+00 1.4000000e+00 7.0000000e-01 7.0000000e-01 1.0000000e+00 1.6000000e+00 6.0000000e-01 7.0000000e-01 9.0000000e-01 1.1000000e+00 9.0000000e-01 1.2000000e+00 1.5000000e+00 1.5000000e+00 1.6000000e+00 1.8000000e+00 8.0000000e-01 1.6000000e+00 1.5000000e+00 1.9000000e+00 1.0000000e+00 1.2000000e+00 1.3000000e+00 2.1000000e+00 1.1000000e+00 1.2000000e+00 1.2000000e+00 1.6000000e+00 1.4000000e+00 1.7000000e+00 2.0000000e+00 7.0000000e-01 8.0000000e-01 1.3000000e+00 6.0000000e-01 4.0000000e-01 8.0000000e-01 5.0000000e-01 3.0000000e-01 5.0000000e-01 6.0000000e-01 4.0000000e-01 5.0000000e-01 4.0000000e-01 6.0000000e-01 4.0000000e-01 6.0000000e-01 5.0000000e-01 5.0000000e-01 1.4000000e+00 9.0000000e-01 4.0000000e-01 3.0000000e-01 6.0000000e-01 9.0000000e-01 8.0000000e-01 5.0000000e-01 8.0000000e-01 1.0000000e+00 8.0000000e-01 4.0000000e-01 5.0000000e-01 8.0000000e-01 4.0000000e-01 1.6000000e+00 1.0000000e+00 5.0000000e-01 8.0000000e-01 8.0000000e-01 1.0000000e+00 9.0000000e-01 5.0000000e-01 9.0000000e-01 1.1000000e+00 9.0000000e-01 6.0000000e-01 6.0000000e-01 9.0000000e-01 5.0000000e-01 1.4000000e+00 1.3000000e+00 1.7000000e+00 8.0000000e-01 1.0000000e+00 1.0000000e+00 1.9000000e+00 9.0000000e-01 1.0000000e+00 1.0000000e+00 1.4000000e+00 1.2000000e+00 1.5000000e+00 1.8000000e+00 6.0000000e-01 8.0000000e-01 6.0000000e-01 4.0000000e-01 6.0000000e-01 7.0000000e-01 5.0000000e-01 4.0000000e-01 4.0000000e-01 9.0000000e-01 4.0000000e-01 2.0000000e-01 6.0000000e-01 7.0000000e-01 5.0000000e-01 6.0000000e-01 5.0000000e-01 6.0000000e-01 5.0000000e-01 7.0000000e-01 5.0000000e-01 6.0000000e-01 3.0000000e-01 5.0000000e-01 5.0000000e-01 9.0000000e-01 8.0000000e-01 9.0000000e-01 3.0000000e-01 1.1000000e+00 9.0000000e-01 7.0000000e-01 5.0000000e-01 5.0000000e-01 6.0000000e-01 3.0000000e-01 3.0000000e-01 3.0000000e-01 1.1000000e+00 5.0000000e-01 4.0000000e-01 2.0000000e-01 6.0000000e-01 4.0000000e-01 7.0000000e-01 1.0000000e+00 5.0000000e-01 9.0000000e-01 3.0000000e-01 2.0000000e-01 4.0000000e-01 6.0000000e-01 4.0000000e-01 5.0000000e-01 8.0000000e-01 1.1000000e+00 8.0000000e-01 6.0000000e-01 2.0000000e-01 6.0000000e-01 4.0000000e-01 7.0000000e-01 1.0000000e+00 1.0000000e+00 9.0000000e-01 9.0000000e-01 5.0000000e-01 7.0000000e-01 7.0000000e-01 3.0000000e-01 2.0000000e-01 7.0000000e-01 9.0000000e-01 7.0000000e-01 6.0000000e-01 9.0000000e-01 5.0000000e-01 8.0000000e-01 5.0000000e-01 5.0000000e-01 8.0000000e-01 5.0000000e-01 3.0000000e-01 5.0000000e-01 8.0000000e-01 5.0000000e-01 9.0000000e-01 5.0000000e-01 4.0000000e-01 6.0000000e-01 5.0000000e-01 diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-chebyshev-ml.txt b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-chebyshev-ml.txt new file mode 100644 index 0000000..7864862 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-chebyshev-ml.txt @@ -0,0 +1 @@ + 8.9084734e-01 9.3573853e-01 9.3507398e-01 9.6040691e-01 9.2918157e-01 9.6617342e-01 9.0430930e-01 9.5753424e-01 8.7106898e-01 9.2169905e-01 9.7401159e-01 8.9013416e-01 9.3956689e-01 9.0041896e-01 9.2588355e-01 9.3849417e-01 8.9713468e-01 9.1481804e-01 9.7500539e-01 9.0012586e-01 9.0962559e-01 8.5860091e-01 8.6981095e-01 8.9995771e-01 8.8070172e-01 9.1456657e-01 8.6711474e-01 9.2593917e-01 8.7560376e-01 8.5193121e-01 9.0898542e-01 8.7765302e-01 8.6555584e-01 8.6093485e-01 9.0447028e-01 8.7614405e-01 9.4803522e-01 8.4998062e-01 7.8398996e-01 8.9538612e-01 8.3902291e-01 9.9039470e-01 9.5480519e-01 8.9152195e-01 9.1623329e-01 7.9094921e-01 9.1777100e-01 9.8972335e-01 9.0429093e-01 8.7646362e-01 9.2136649e-01 9.7178177e-01 8.9610979e-01 9.4710327e-01 9.3612450e-01 9.0241499e-01 7.7992538e-01 8.7262126e-01 9.3325183e-01 8.5796531e-01 9.4267977e-01 6.7224167e-01 7.9568368e-01 8.6411267e-01 9.3311642e-01 9.0160114e-01 9.0698887e-01 8.5833256e-01 9.6902830e-01 9.5072298e-01 8.6808495e-01 9.7879599e-01 8.8060729e-01 8.2818573e-01 8.4366706e-01 8.4506700e-01 9.4532981e-01 9.1792306e-01 7.8917825e-01 9.8337805e-01 8.1751613e-01 9.3037855e-01 9.1618832e-01 8.6568874e-01 8.9751397e-01 8.7923710e-01 8.6814329e-01 9.0330164e-01 8.2426213e-01 9.4644643e-01 8.8431293e-01 8.8497426e-01 9.0633818e-01 9.5537161e-01 8.2167575e-01 8.7771053e-01 9.0681167e-01 8.7626143e-01 8.7463464e-01 9.8033940e-01 9.2920881e-01 9.5108549e-01 9.1287466e-01 8.0052218e-01 9.2409517e-01 8.8252650e-01 8.7873923e-01 9.2989402e-01 9.1985043e-01 9.6172646e-01 8.8223856e-01 9.4477822e-01 8.8310948e-01 9.4461306e-01 9.1875210e-01 9.1233363e-01 9.2124013e-01 9.5460897e-01 8.4640982e-01 9.0882657e-01 9.8169468e-01 9.7828355e-01 8.4150533e-01 8.6888923e-01 9.7138825e-01 8.7988144e-01 9.6720910e-01 8.9450147e-01 9.5331584e-01 8.8871809e-01 8.9736685e-01 8.6258146e-01 9.1331565e-01 9.0968870e-01 9.4833654e-01 9.0536967e-01 9.5099871e-01 8.0251958e-01 9.2526150e-01 9.8971957e-01 9.0340947e-01 9.4955892e-01 9.6838162e-01 8.7534901e-01 9.1178797e-01 9.2649154e-01 9.5260993e-01 9.3178143e-01 9.4943000e-01 8.7816171e-01 9.6506542e-01 8.3422958e-01 9.3443585e-01 9.3220084e-01 8.5706573e-01 8.4666325e-01 9.0474744e-01 9.1080644e-01 9.2406899e-01 8.7901768e-01 9.3265263e-01 9.5992829e-01 9.5696271e-01 9.1932272e-01 8.0937044e-01 9.0904917e-01 8.9516756e-01 9.4797906e-01 8.4159421e-01 9.6773901e-01 9.7099825e-01 9.6941820e-01 9.8174088e-01 9.7569951e-01 9.3655362e-01 8.4130333e-01 9.5994549e-01 8.4235414e-01 9.1429418e-01 9.3418117e-01 8.4600977e-01 8.8166496e-01 8.7594776e-01 8.8571112e-01 9.6308174e-01 9.5315927e-01 8.6997519e-01 8.9383032e-01 9.4686804e-01 9.4399596e-01 diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-cityblock-ml-iris.txt b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-cityblock-ml-iris.txt new file mode 100644 index 0000000..6722928 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-cityblock-ml-iris.txt @@ -0,0 +1 @@ + 7.0000000e-01 8.0000000e-01 1.0000000e+00 2.0000000e-01 1.2000000e+00 7.0000000e-01 3.0000000e-01 1.3000000e+00 8.0000000e-01 6.0000000e-01 6.0000000e-01 9.0000000e-01 1.7000000e+00 1.4000000e+00 1.8000000e+00 1.0000000e+00 1.0000000e-01 1.3000000e+00 5.0000000e-01 7.0000000e-01 5.0000000e-01 1.0000000e+00 8.0000000e-01 9.0000000e-01 8.0000000e-01 6.0000000e-01 2.0000000e-01 2.0000000e-01 9.0000000e-01 9.0000000e-01 7.0000000e-01 9.0000000e-01 1.1000000e+00 8.0000000e-01 6.0000000e-01 5.0000000e-01 8.0000000e-01 1.3000000e+00 2.0000000e-01 3.0000000e-01 2.0000000e+00 1.1000000e+00 7.0000000e-01 1.0000000e+00 9.0000000e-01 5.0000000e-01 8.0000000e-01 5.0000000e-01 3.0000000e-01 6.7000000e+00 6.0000000e+00 7.0000000e+00 5.3000000e+00 6.6000000e+00 5.5000000e+00 6.1000000e+00 4.0000000e+00 6.4000000e+00 4.6000000e+00 4.5000000e+00 5.4000000e+00 5.6000000e+00 6.1000000e+00 4.4000000e+00 6.2000000e+00 5.4000000e+00 5.0000000e+00 6.8000000e+00 4.9000000e+00 6.1000000e+00 5.4000000e+00 7.0000000e+00 6.0000000e+00 5.9000000e+00 6.2000000e+00 7.0000000e+00 7.2000000e+00 5.9000000e+00 4.4000000e+00 4.8000000e+00 4.6000000e+00 5.0000000e+00 6.8000000e+00 5.2000000e+00 5.5000000e+00 6.6000000e+00 6.5000000e+00 4.8000000e+00 5.1000000e+00 5.3000000e+00 5.9000000e+00 5.2000000e+00 4.0000000e+00 5.2000000e+00 4.9000000e+00 5.1000000e+00 5.7000000e+00 3.5000000e+00 5.1000000e+00 8.3000000e+00 6.9000000e+00 8.9000000e+00 7.6000000e+00 8.3000000e+00 1.0100000e+01 5.8000000e+00 9.3000000e+00 8.6000000e+00 9.2000000e+00 7.2000000e+00 7.7000000e+00 8.2000000e+00 7.0000000e+00 7.3000000e+00 7.6000000e+00 7.6000000e+00 1.0200000e+01 1.1100000e+01 7.1000000e+00 8.5000000e+00 6.5000000e+00 1.0400000e+01 7.1000000e+00 8.0000000e+00 8.6000000e+00 6.8000000e+00 6.6000000e+00 8.1000000e+00 8.4000000e+00 9.4000000e+00 9.9000000e+00 8.2000000e+00 6.9000000e+00 7.3000000e+00 9.9000000e+00 7.7000000e+00 7.4000000e+00 6.4000000e+00 8.1000000e+00 8.4000000e+00 8.0000000e+00 6.9000000e+00 8.6000000e+00 8.4000000e+00 8.0000000e+00 7.5000000e+00 7.5000000e+00 7.3000000e+00 6.6000000e+00 5.0000000e-01 5.0000000e-01 7.0000000e-01 1.9000000e+00 8.0000000e-01 6.0000000e-01 6.0000000e-01 3.0000000e-01 1.3000000e+00 7.0000000e-01 2.0000000e-01 1.0000000e+00 2.1000000e+00 2.5000000e+00 1.7000000e+00 8.0000000e-01 2.0000000e+00 1.2000000e+00 1.2000000e+00 1.2000000e+00 1.3000000e+00 1.1000000e+00 1.0000000e+00 3.0000000e-01 9.0000000e-01 9.0000000e-01 7.0000000e-01 6.0000000e-01 4.0000000e-01 1.2000000e+00 1.6000000e+00 1.8000000e+00 3.0000000e-01 5.0000000e-01 1.2000000e+00 3.0000000e-01 6.0000000e-01 7.0000000e-01 8.0000000e-01 1.3000000e+00 8.0000000e-01 1.2000000e+00 1.7000000e+00 2.0000000e-01 1.2000000e+00 5.0000000e-01 1.2000000e+00 4.0000000e-01 6.8000000e+00 6.1000000e+00 6.9000000e+00 5.0000000e+00 6.3000000e+00 5.2000000e+00 6.4000000e+00 3.3000000e+00 6.1000000e+00 4.3000000e+00 4.0000000e+00 5.1000000e+00 5.3000000e+00 5.8000000e+00 4.1000000e+00 6.1000000e+00 5.1000000e+00 4.7000000e+00 6.5000000e+00 4.6000000e+00 6.2000000e+00 5.1000000e+00 6.7000000e+00 5.7000000e+00 5.6000000e+00 5.9000000e+00 6.7000000e+00 6.9000000e+00 5.6000000e+00 4.1000000e+00 4.5000000e+00 4.3000000e+00 4.7000000e+00 6.5000000e+00 4.9000000e+00 6.0000000e+00 6.5000000e+00 6.2000000e+00 4.5000000e+00 4.8000000e+00 5.0000000e+00 5.6000000e+00 4.9000000e+00 3.5000000e+00 4.9000000e+00 4.6000000e+00 4.8000000e+00 5.4000000e+00 3.2000000e+00 4.8000000e+00 8.6000000e+00 6.6000000e+00 8.6000000e+00 7.3000000e+00 8.0000000e+00 9.8000000e+00 5.1000000e+00 9.0000000e+00 8.3000000e+00 9.9000000e+00 7.3000000e+00 7.4000000e+00 7.9000000e+00 6.7000000e+00 7.0000000e+00 7.7000000e+00 7.3000000e+00 1.0900000e+01 1.0800000e+01 6.8000000e+00 8.6000000e+00 6.2000000e+00 1.0100000e+01 6.8000000e+00 8.3000000e+00 8.7000000e+00 6.5000000e+00 6.3000000e+00 7.8000000e+00 8.1000000e+00 9.1000000e+00 1.0600000e+01 7.9000000e+00 6.6000000e+00 7.0000000e+00 9.6000000e+00 8.2000000e+00 7.3000000e+00 6.1000000e+00 8.0000000e+00 8.3000000e+00 7.9000000e+00 6.6000000e+00 8.7000000e+00 8.7000000e+00 7.7000000e+00 7.2000000e+00 7.2000000e+00 7.8000000e+00 6.3000000e+00 4.0000000e-01 8.0000000e-01 2.0000000e+00 5.0000000e-01 7.0000000e-01 7.0000000e-01 6.0000000e-01 1.4000000e+00 6.0000000e-01 5.0000000e-01 9.0000000e-01 2.0000000e+00 2.6000000e+00 1.6000000e+00 9.0000000e-01 2.1000000e+00 1.3000000e+00 1.3000000e+00 1.3000000e+00 8.0000000e-01 1.2000000e+00 9.0000000e-01 8.0000000e-01 1.0000000e+00 1.0000000e+00 8.0000000e-01 3.0000000e-01 5.0000000e-01 1.3000000e+00 1.7000000e+00 1.9000000e+00 6.0000000e-01 4.0000000e-01 1.1000000e+00 6.0000000e-01 5.0000000e-01 8.0000000e-01 7.0000000e-01 1.2000000e+00 3.0000000e-01 1.3000000e+00 1.8000000e+00 5.0000000e-01 1.3000000e+00 2.0000000e-01 1.3000000e+00 5.0000000e-01 6.9000000e+00 6.2000000e+00 7.2000000e+00 5.5000000e+00 6.8000000e+00 5.7000000e+00 6.5000000e+00 3.8000000e+00 6.6000000e+00 4.8000000e+00 4.5000000e+00 5.6000000e+00 5.8000000e+00 6.3000000e+00 4.6000000e+00 6.4000000e+00 5.6000000e+00 5.2000000e+00 7.0000000e+00 5.1000000e+00 6.3000000e+00 5.6000000e+00 7.2000000e+00 6.2000000e+00 6.1000000e+00 6.4000000e+00 7.2000000e+00 7.4000000e+00 6.1000000e+00 4.6000000e+00 5.0000000e+00 4.8000000e+00 5.2000000e+00 7.0000000e+00 5.4000000e+00 6.1000000e+00 6.8000000e+00 6.7000000e+00 5.0000000e+00 5.3000000e+00 5.5000000e+00 6.1000000e+00 5.4000000e+00 4.0000000e+00 5.4000000e+00 5.1000000e+00 5.3000000e+00 5.9000000e+00 3.7000000e+00 5.3000000e+00 8.7000000e+00 7.1000000e+00 9.1000000e+00 7.8000000e+00 8.5000000e+00 1.0300000e+01 5.6000000e+00 9.5000000e+00 8.8000000e+00 1.0000000e+01 7.4000000e+00 7.9000000e+00 8.4000000e+00 7.2000000e+00 7.5000000e+00 7.8000000e+00 7.8000000e+00 1.1000000e+01 1.1300000e+01 7.3000000e+00 8.7000000e+00 6.7000000e+00 1.0600000e+01 7.3000000e+00 8.4000000e+00 8.8000000e+00 7.0000000e+00 6.8000000e+00 8.3000000e+00 8.6000000e+00 9.6000000e+00 1.0700000e+01 8.4000000e+00 7.1000000e+00 7.5000000e+00 1.0100000e+01 8.3000000e+00 7.6000000e+00 6.6000000e+00 8.3000000e+00 8.6000000e+00 8.2000000e+00 7.1000000e+00 8.8000000e+00 8.8000000e+00 8.2000000e+00 7.7000000e+00 7.7000000e+00 7.9000000e+00 6.8000000e+00 1.0000000e+00 2.0000000e+00 5.0000000e-01 7.0000000e-01 5.0000000e-01 4.0000000e-01 1.4000000e+00 6.0000000e-01 5.0000000e-01 9.0000000e-01 2.4000000e+00 2.6000000e+00 2.0000000e+00 1.1000000e+00 2.1000000e+00 1.3000000e+00 1.3000000e+00 1.3000000e+00 1.0000000e+00 1.2000000e+00 9.0000000e-01 6.0000000e-01 1.0000000e+00 1.0000000e+00 1.0000000e+00 3.0000000e-01 3.0000000e-01 1.3000000e+00 1.7000000e+00 2.1000000e+00 4.0000000e-01 8.0000000e-01 1.5000000e+00 4.0000000e-01 5.0000000e-01 8.0000000e-01 1.1000000e+00 1.2000000e+00 5.0000000e-01 1.3000000e+00 1.8000000e+00 5.0000000e-01 1.3000000e+00 2.0000000e-01 1.3000000e+00 7.0000000e-01 6.9000000e+00 6.2000000e+00 7.0000000e+00 5.3000000e+00 6.6000000e+00 5.5000000e+00 6.5000000e+00 3.6000000e+00 6.4000000e+00 4.6000000e+00 4.3000000e+00 5.4000000e+00 5.6000000e+00 6.1000000e+00 4.4000000e+00 6.2000000e+00 5.4000000e+00 5.0000000e+00 6.8000000e+00 4.9000000e+00 6.3000000e+00 5.4000000e+00 7.0000000e+00 6.0000000e+00 5.9000000e+00 6.2000000e+00 7.0000000e+00 7.2000000e+00 5.9000000e+00 4.4000000e+00 4.8000000e+00 4.6000000e+00 5.0000000e+00 6.8000000e+00 5.2000000e+00 6.1000000e+00 6.6000000e+00 6.5000000e+00 4.8000000e+00 5.1000000e+00 5.3000000e+00 5.9000000e+00 5.2000000e+00 3.8000000e+00 5.2000000e+00 4.9000000e+00 5.1000000e+00 5.7000000e+00 3.5000000e+00 5.1000000e+00 8.7000000e+00 6.9000000e+00 8.9000000e+00 7.6000000e+00 8.3000000e+00 1.0100000e+01 5.4000000e+00 9.3000000e+00 8.6000000e+00 1.0000000e+01 7.4000000e+00 7.7000000e+00 8.2000000e+00 7.0000000e+00 7.3000000e+00 7.8000000e+00 7.6000000e+00 1.1000000e+01 1.1100000e+01 7.1000000e+00 8.7000000e+00 6.5000000e+00 1.0400000e+01 7.1000000e+00 8.4000000e+00 8.8000000e+00 6.8000000e+00 6.6000000e+00 8.1000000e+00 8.4000000e+00 9.4000000e+00 1.0700000e+01 8.2000000e+00 6.9000000e+00 7.3000000e+00 9.9000000e+00 8.3000000e+00 7.4000000e+00 6.4000000e+00 8.1000000e+00 8.4000000e+00 8.0000000e+00 6.9000000e+00 8.8000000e+00 8.8000000e+00 8.0000000e+00 7.5000000e+00 7.5000000e+00 7.9000000e+00 6.6000000e+00 1.2000000e+00 7.0000000e-01 3.0000000e-01 1.3000000e+00 8.0000000e-01 6.0000000e-01 6.0000000e-01 9.0000000e-01 1.7000000e+00 1.4000000e+00 1.8000000e+00 1.0000000e+00 3.0000000e-01 1.3000000e+00 5.0000000e-01 9.0000000e-01 5.0000000e-01 8.0000000e-01 1.0000000e+00 9.0000000e-01 8.0000000e-01 6.0000000e-01 4.0000000e-01 4.0000000e-01 9.0000000e-01 9.0000000e-01 9.0000000e-01 9.0000000e-01 1.1000000e+00 8.0000000e-01 6.0000000e-01 7.0000000e-01 8.0000000e-01 1.3000000e+00 4.0000000e-01 3.0000000e-01 2.0000000e+00 1.1000000e+00 7.0000000e-01 1.0000000e+00 9.0000000e-01 5.0000000e-01 8.0000000e-01 5.0000000e-01 3.0000000e-01 6.9000000e+00 6.2000000e+00 7.2000000e+00 5.5000000e+00 6.8000000e+00 5.7000000e+00 6.3000000e+00 4.0000000e+00 6.6000000e+00 4.8000000e+00 4.5000000e+00 5.6000000e+00 5.8000000e+00 6.3000000e+00 4.6000000e+00 6.4000000e+00 5.6000000e+00 5.2000000e+00 7.0000000e+00 5.1000000e+00 6.3000000e+00 5.6000000e+00 7.2000000e+00 6.2000000e+00 6.1000000e+00 6.4000000e+00 7.2000000e+00 7.4000000e+00 6.1000000e+00 4.6000000e+00 5.0000000e+00 4.8000000e+00 5.2000000e+00 7.0000000e+00 5.4000000e+00 5.7000000e+00 6.8000000e+00 6.7000000e+00 5.0000000e+00 5.3000000e+00 5.5000000e+00 6.1000000e+00 5.4000000e+00 4.0000000e+00 5.4000000e+00 5.1000000e+00 5.3000000e+00 5.9000000e+00 3.7000000e+00 5.3000000e+00 8.5000000e+00 7.1000000e+00 9.1000000e+00 7.8000000e+00 8.5000000e+00 1.0300000e+01 5.8000000e+00 9.5000000e+00 8.8000000e+00 9.2000000e+00 7.4000000e+00 7.9000000e+00 8.4000000e+00 7.2000000e+00 7.5000000e+00 7.8000000e+00 7.8000000e+00 1.0200000e+01 1.1300000e+01 7.3000000e+00 8.7000000e+00 6.7000000e+00 1.0600000e+01 7.3000000e+00 8.2000000e+00 8.8000000e+00 7.0000000e+00 6.8000000e+00 8.3000000e+00 8.6000000e+00 9.6000000e+00 9.9000000e+00 8.4000000e+00 7.1000000e+00 7.5000000e+00 1.0100000e+01 7.9000000e+00 7.6000000e+00 6.6000000e+00 8.3000000e+00 8.6000000e+00 8.2000000e+00 7.1000000e+00 8.8000000e+00 8.6000000e+00 8.2000000e+00 7.7000000e+00 7.7000000e+00 7.5000000e+00 6.8000000e+00 1.7000000e+00 1.3000000e+00 2.5000000e+00 1.8000000e+00 6.0000000e-01 1.4000000e+00 2.1000000e+00 2.9000000e+00 1.2000000e+00 1.0000000e+00 4.0000000e-01 1.1000000e+00 5.0000000e-01 7.0000000e-01 7.0000000e-01 7.0000000e-01 2.0000000e+00 1.0000000e+00 1.5000000e+00 1.6000000e+00 1.0000000e+00 1.0000000e+00 1.2000000e+00 1.7000000e+00 1.7000000e+00 7.0000000e-01 9.0000000e-01 9.0000000e-01 1.8000000e+00 1.8000000e+00 1.1000000e+00 1.8000000e+00 2.5000000e+00 1.2000000e+00 1.3000000e+00 3.0000000e+00 2.3000000e+00 1.1000000e+00 6.0000000e-01 1.9000000e+00 7.0000000e-01 2.0000000e+00 7.0000000e-01 1.5000000e+00 6.3000000e+00 5.6000000e+00 6.6000000e+00 4.9000000e+00 6.2000000e+00 5.1000000e+00 5.7000000e+00 4.2000000e+00 6.0000000e+00 4.6000000e+00 4.7000000e+00 5.0000000e+00 5.2000000e+00 5.7000000e+00 4.0000000e+00 5.8000000e+00 5.0000000e+00 4.6000000e+00 6.4000000e+00 4.5000000e+00 5.7000000e+00 5.0000000e+00 6.6000000e+00 5.6000000e+00 5.5000000e+00 5.8000000e+00 6.6000000e+00 6.8000000e+00 5.5000000e+00 4.0000000e+00 4.4000000e+00 4.2000000e+00 4.6000000e+00 6.4000000e+00 4.8000000e+00 5.1000000e+00 6.2000000e+00 6.1000000e+00 4.4000000e+00 4.7000000e+00 4.9000000e+00 5.5000000e+00 4.8000000e+00 4.2000000e+00 4.8000000e+00 4.5000000e+00 4.7000000e+00 5.3000000e+00 3.7000000e+00 4.7000000e+00 7.9000000e+00 6.5000000e+00 8.5000000e+00 7.2000000e+00 7.9000000e+00 9.7000000e+00 6.0000000e+00 8.9000000e+00 8.2000000e+00 8.6000000e+00 6.8000000e+00 7.3000000e+00 7.8000000e+00 6.6000000e+00 6.9000000e+00 7.2000000e+00 7.2000000e+00 9.2000000e+00 1.0700000e+01 6.7000000e+00 8.1000000e+00 6.1000000e+00 1.0000000e+01 6.7000000e+00 7.6000000e+00 8.2000000e+00 6.4000000e+00 6.2000000e+00 7.7000000e+00 8.0000000e+00 9.0000000e+00 8.9000000e+00 7.8000000e+00 6.5000000e+00 6.9000000e+00 9.5000000e+00 7.3000000e+00 7.0000000e+00 6.0000000e+00 7.7000000e+00 8.0000000e+00 7.6000000e+00 6.5000000e+00 8.2000000e+00 8.0000000e+00 7.6000000e+00 7.1000000e+00 7.1000000e+00 6.9000000e+00 6.2000000e+00 6.0000000e-01 8.0000000e-01 9.0000000e-01 1.3000000e+00 5.0000000e-01 8.0000000e-01 1.2000000e+00 2.1000000e+00 2.3000000e+00 1.5000000e+00 6.0000000e-01 1.8000000e+00 1.0000000e+00 1.2000000e+00 1.0000000e+00 7.0000000e-01 1.1000000e+00 8.0000000e-01 1.1000000e+00 7.0000000e-01 9.0000000e-01 7.0000000e-01 6.0000000e-01 8.0000000e-01 1.0000000e+00 1.6000000e+00 1.8000000e+00 9.0000000e-01 9.0000000e-01 1.2000000e+00 9.0000000e-01 8.0000000e-01 7.0000000e-01 6.0000000e-01 1.3000000e+00 6.0000000e-01 1.0000000e+00 1.5000000e+00 6.0000000e-01 1.2000000e+00 3.0000000e-01 1.2000000e+00 6.0000000e-01 7.0000000e+00 6.3000000e+00 7.3000000e+00 5.6000000e+00 6.9000000e+00 5.8000000e+00 6.4000000e+00 3.9000000e+00 6.7000000e+00 4.9000000e+00 4.6000000e+00 5.7000000e+00 5.9000000e+00 6.4000000e+00 4.7000000e+00 6.5000000e+00 5.7000000e+00 5.3000000e+00 7.1000000e+00 5.2000000e+00 6.4000000e+00 5.7000000e+00 7.3000000e+00 6.3000000e+00 6.2000000e+00 6.5000000e+00 7.3000000e+00 7.5000000e+00 6.2000000e+00 4.7000000e+00 5.1000000e+00 4.9000000e+00 5.3000000e+00 7.1000000e+00 5.5000000e+00 5.8000000e+00 6.9000000e+00 6.8000000e+00 5.1000000e+00 5.4000000e+00 5.6000000e+00 6.2000000e+00 5.5000000e+00 4.1000000e+00 5.5000000e+00 5.2000000e+00 5.4000000e+00 6.0000000e+00 3.8000000e+00 5.4000000e+00 8.6000000e+00 7.2000000e+00 9.2000000e+00 7.9000000e+00 8.6000000e+00 1.0400000e+01 5.7000000e+00 9.6000000e+00 8.9000000e+00 9.7000000e+00 7.5000000e+00 8.0000000e+00 8.5000000e+00 7.3000000e+00 7.6000000e+00 7.9000000e+00 7.9000000e+00 1.0700000e+01 1.1400000e+01 7.4000000e+00 8.8000000e+00 6.8000000e+00 1.0700000e+01 7.4000000e+00 8.3000000e+00 8.9000000e+00 7.1000000e+00 6.9000000e+00 8.4000000e+00 8.7000000e+00 9.7000000e+00 1.0400000e+01 8.5000000e+00 7.2000000e+00 7.6000000e+00 1.0200000e+01 8.0000000e+00 7.7000000e+00 6.7000000e+00 8.4000000e+00 8.7000000e+00 8.3000000e+00 7.2000000e+00 8.9000000e+00 8.7000000e+00 8.3000000e+00 7.8000000e+00 7.8000000e+00 7.6000000e+00 6.9000000e+00 1.2000000e+00 5.0000000e-01 7.0000000e-01 3.0000000e-01 8.0000000e-01 1.6000000e+00 1.7000000e+00 1.9000000e+00 1.3000000e+00 4.0000000e-01 1.4000000e+00 6.0000000e-01 6.0000000e-01 6.0000000e-01 1.1000000e+00 7.0000000e-01 6.0000000e-01 5.0000000e-01 3.0000000e-01 3.0000000e-01 3.0000000e-01 6.0000000e-01 6.0000000e-01 6.0000000e-01 1.0000000e+00 1.4000000e+00 5.0000000e-01 5.0000000e-01 8.0000000e-01 5.0000000e-01 1.2000000e+00 1.0000000e-01 4.0000000e-01 1.9000000e+00 1.0000000e+00 6.0000000e-01 1.1000000e+00 8.0000000e-01 6.0000000e-01 7.0000000e-01 6.0000000e-01 2.0000000e-01 6.6000000e+00 5.9000000e+00 6.9000000e+00 5.2000000e+00 6.5000000e+00 5.4000000e+00 6.0000000e+00 3.7000000e+00 6.3000000e+00 4.5000000e+00 4.2000000e+00 5.3000000e+00 5.5000000e+00 6.0000000e+00 4.3000000e+00 6.1000000e+00 5.3000000e+00 4.9000000e+00 6.7000000e+00 4.8000000e+00 6.0000000e+00 5.3000000e+00 6.9000000e+00 5.9000000e+00 5.8000000e+00 6.1000000e+00 6.9000000e+00 7.1000000e+00 5.8000000e+00 4.3000000e+00 4.7000000e+00 4.5000000e+00 4.9000000e+00 6.7000000e+00 5.1000000e+00 5.4000000e+00 6.5000000e+00 6.4000000e+00 4.7000000e+00 5.0000000e+00 5.2000000e+00 5.8000000e+00 5.1000000e+00 3.7000000e+00 5.1000000e+00 4.8000000e+00 5.0000000e+00 5.6000000e+00 3.4000000e+00 5.0000000e+00 8.2000000e+00 6.8000000e+00 8.8000000e+00 7.5000000e+00 8.2000000e+00 1.0000000e+01 5.5000000e+00 9.2000000e+00 8.5000000e+00 9.3000000e+00 7.1000000e+00 7.6000000e+00 8.1000000e+00 6.9000000e+00 7.2000000e+00 7.5000000e+00 7.5000000e+00 1.0300000e+01 1.1000000e+01 7.0000000e+00 8.4000000e+00 6.4000000e+00 1.0300000e+01 7.0000000e+00 7.9000000e+00 8.5000000e+00 6.7000000e+00 6.5000000e+00 8.0000000e+00 8.3000000e+00 9.3000000e+00 1.0000000e+01 8.1000000e+00 6.8000000e+00 7.2000000e+00 9.8000000e+00 7.6000000e+00 7.3000000e+00 6.3000000e+00 8.0000000e+00 8.3000000e+00 7.9000000e+00 6.8000000e+00 8.5000000e+00 8.3000000e+00 7.9000000e+00 7.4000000e+00 7.4000000e+00 7.2000000e+00 6.5000000e+00 9.0000000e-01 1.9000000e+00 1.1000000e+00 6.0000000e-01 6.0000000e-01 2.7000000e+00 3.1000000e+00 2.3000000e+00 1.4000000e+00 2.6000000e+00 1.8000000e+00 1.8000000e+00 1.8000000e+00 1.3000000e+00 1.7000000e+00 1.4000000e+00 9.0000000e-01 1.5000000e+00 1.5000000e+00 1.3000000e+00 8.0000000e-01 8.0000000e-01 1.8000000e+00 2.2000000e+00 2.4000000e+00 9.0000000e-01 1.1000000e+00 1.8000000e+00 9.0000000e-01 2.0000000e-01 1.3000000e+00 1.4000000e+00 9.0000000e-01 4.0000000e-01 1.8000000e+00 2.3000000e+00 6.0000000e-01 1.8000000e+00 5.0000000e-01 1.8000000e+00 1.0000000e+00 7.4000000e+00 6.7000000e+00 7.5000000e+00 5.4000000e+00 6.7000000e+00 5.6000000e+00 7.0000000e+00 3.7000000e+00 6.5000000e+00 4.7000000e+00 4.4000000e+00 5.7000000e+00 5.7000000e+00 6.2000000e+00 4.5000000e+00 6.7000000e+00 5.7000000e+00 5.1000000e+00 6.9000000e+00 5.0000000e+00 6.8000000e+00 5.5000000e+00 7.1000000e+00 6.1000000e+00 6.0000000e+00 6.5000000e+00 7.1000000e+00 7.5000000e+00 6.0000000e+00 4.5000000e+00 4.9000000e+00 4.7000000e+00 5.1000000e+00 6.9000000e+00 5.5000000e+00 6.6000000e+00 7.1000000e+00 6.6000000e+00 5.1000000e+00 5.2000000e+00 5.4000000e+00 6.2000000e+00 5.3000000e+00 3.9000000e+00 5.3000000e+00 5.2000000e+00 5.2000000e+00 5.8000000e+00 3.6000000e+00 5.2000000e+00 9.2000000e+00 7.0000000e+00 9.2000000e+00 7.7000000e+00 8.6000000e+00 1.0400000e+01 5.5000000e+00 9.4000000e+00 8.7000000e+00 1.0500000e+01 7.9000000e+00 7.8000000e+00 8.5000000e+00 7.1000000e+00 7.4000000e+00 8.3000000e+00 7.9000000e+00 1.1500000e+01 1.1200000e+01 7.2000000e+00 9.2000000e+00 6.6000000e+00 1.0500000e+01 7.2000000e+00 8.9000000e+00 9.3000000e+00 6.9000000e+00 6.9000000e+00 8.2000000e+00 8.7000000e+00 9.5000000e+00 1.1200000e+01 8.3000000e+00 7.0000000e+00 7.4000000e+00 1.0200000e+01 8.8000000e+00 7.9000000e+00 6.7000000e+00 8.6000000e+00 8.9000000e+00 8.5000000e+00 7.0000000e+00 9.3000000e+00 9.3000000e+00 8.3000000e+00 7.6000000e+00 7.8000000e+00 8.4000000e+00 6.9000000e+00 1.2000000e+00 6.0000000e-01 3.0000000e-01 1.1000000e+00 2.2000000e+00 2.4000000e+00 1.8000000e+00 9.0000000e-01 1.9000000e+00 1.1000000e+00 1.1000000e+00 1.1000000e+00 1.4000000e+00 1.0000000e+00 9.0000000e-01 4.0000000e-01 8.0000000e-01 8.0000000e-01 8.0000000e-01 5.0000000e-01 3.0000000e-01 1.1000000e+00 1.3000000e+00 1.9000000e+00 0.0000000e+00 6.0000000e-01 1.3000000e+00 0.0000000e+00 9.0000000e-01 6.0000000e-01 9.0000000e-01 1.6000000e+00 9.0000000e-01 1.1000000e+00 1.6000000e+00 5.0000000e-01 1.1000000e+00 6.0000000e-01 1.1000000e+00 5.0000000e-01 6.7000000e+00 6.0000000e+00 6.8000000e+00 5.1000000e+00 6.4000000e+00 5.3000000e+00 6.3000000e+00 3.4000000e+00 6.2000000e+00 4.4000000e+00 4.1000000e+00 5.2000000e+00 5.4000000e+00 5.9000000e+00 4.2000000e+00 6.0000000e+00 5.2000000e+00 4.8000000e+00 6.6000000e+00 4.7000000e+00 6.1000000e+00 5.2000000e+00 6.8000000e+00 5.8000000e+00 5.7000000e+00 6.0000000e+00 6.8000000e+00 7.0000000e+00 5.7000000e+00 4.2000000e+00 4.6000000e+00 4.4000000e+00 4.8000000e+00 6.6000000e+00 5.0000000e+00 5.9000000e+00 6.4000000e+00 6.3000000e+00 4.6000000e+00 4.9000000e+00 5.1000000e+00 5.7000000e+00 5.0000000e+00 3.6000000e+00 5.0000000e+00 4.7000000e+00 4.9000000e+00 5.5000000e+00 3.3000000e+00 4.9000000e+00 8.5000000e+00 6.7000000e+00 8.7000000e+00 7.4000000e+00 8.1000000e+00 9.9000000e+00 5.2000000e+00 9.1000000e+00 8.4000000e+00 9.8000000e+00 7.2000000e+00 7.5000000e+00 8.0000000e+00 6.8000000e+00 7.1000000e+00 7.6000000e+00 7.4000000e+00 1.0800000e+01 1.0900000e+01 6.9000000e+00 8.5000000e+00 6.3000000e+00 1.0200000e+01 6.9000000e+00 8.2000000e+00 8.6000000e+00 6.6000000e+00 6.4000000e+00 7.9000000e+00 8.2000000e+00 9.2000000e+00 1.0500000e+01 8.0000000e+00 6.7000000e+00 7.1000000e+00 9.7000000e+00 8.1000000e+00 7.2000000e+00 6.2000000e+00 7.9000000e+00 8.2000000e+00 7.8000000e+00 6.7000000e+00 8.6000000e+00 8.6000000e+00 7.8000000e+00 7.3000000e+00 7.3000000e+00 7.7000000e+00 6.4000000e+00 1.0000000e+00 1.5000000e+00 2.3000000e+00 1.0000000e+00 1.2000000e+00 6.0000000e-01 7.0000000e-01 7.0000000e-01 5.0000000e-01 5.0000000e-01 5.0000000e-01 1.4000000e+00 1.2000000e+00 1.3000000e+00 1.2000000e+00 1.0000000e+00 4.0000000e-01 6.0000000e-01 1.3000000e+00 1.3000000e+00 5.0000000e-01 7.0000000e-01 7.0000000e-01 1.2000000e+00 1.2000000e+00 5.0000000e-01 1.2000000e+00 1.9000000e+00 6.0000000e-01 9.0000000e-01 2.6000000e+00 1.7000000e+00 1.1000000e+00 1.0000000e+00 1.5000000e+00 5.0000000e-01 1.4000000e+00 1.0000000e-01 9.0000000e-01 6.5000000e+00 5.8000000e+00 6.8000000e+00 5.1000000e+00 6.4000000e+00 5.3000000e+00 5.9000000e+00 4.4000000e+00 6.2000000e+00 4.8000000e+00 4.9000000e+00 5.2000000e+00 5.4000000e+00 5.9000000e+00 4.2000000e+00 6.0000000e+00 5.2000000e+00 4.8000000e+00 6.6000000e+00 4.7000000e+00 5.9000000e+00 5.2000000e+00 6.8000000e+00 5.8000000e+00 5.7000000e+00 6.0000000e+00 6.8000000e+00 7.0000000e+00 5.7000000e+00 4.2000000e+00 4.6000000e+00 4.4000000e+00 4.8000000e+00 6.6000000e+00 5.0000000e+00 5.3000000e+00 6.4000000e+00 6.3000000e+00 4.6000000e+00 4.9000000e+00 5.1000000e+00 5.7000000e+00 5.0000000e+00 4.4000000e+00 5.0000000e+00 4.7000000e+00 4.9000000e+00 5.5000000e+00 3.9000000e+00 4.9000000e+00 8.1000000e+00 6.7000000e+00 8.7000000e+00 7.4000000e+00 8.1000000e+00 9.9000000e+00 6.2000000e+00 9.1000000e+00 8.4000000e+00 8.8000000e+00 7.0000000e+00 7.5000000e+00 8.0000000e+00 6.8000000e+00 7.1000000e+00 7.4000000e+00 7.4000000e+00 9.6000000e+00 1.0900000e+01 6.9000000e+00 8.3000000e+00 6.3000000e+00 1.0200000e+01 6.9000000e+00 7.8000000e+00 8.4000000e+00 6.6000000e+00 6.4000000e+00 7.9000000e+00 8.2000000e+00 9.2000000e+00 9.3000000e+00 8.0000000e+00 6.7000000e+00 7.1000000e+00 9.7000000e+00 7.5000000e+00 7.2000000e+00 6.2000000e+00 7.9000000e+00 8.2000000e+00 7.8000000e+00 6.7000000e+00 8.4000000e+00 8.2000000e+00 7.8000000e+00 7.3000000e+00 7.3000000e+00 7.1000000e+00 6.4000000e+00 7.0000000e-01 1.5000000e+00 2.0000000e+00 2.2000000e+00 1.6000000e+00 7.0000000e-01 1.5000000e+00 9.0000000e-01 7.0000000e-01 9.0000000e-01 1.0000000e+00 8.0000000e-01 3.0000000e-01 6.0000000e-01 4.0000000e-01 6.0000000e-01 6.0000000e-01 3.0000000e-01 3.0000000e-01 9.0000000e-01 1.3000000e+00 1.7000000e+00 6.0000000e-01 8.0000000e-01 1.1000000e+00 6.0000000e-01 1.1000000e+00 4.0000000e-01 7.0000000e-01 1.8000000e+00 9.0000000e-01 7.0000000e-01 1.2000000e+00 7.0000000e-01 7.0000000e-01 6.0000000e-01 9.0000000e-01 5.0000000e-01 6.7000000e+00 6.0000000e+00 7.0000000e+00 5.3000000e+00 6.6000000e+00 5.5000000e+00 6.1000000e+00 3.6000000e+00 6.4000000e+00 4.6000000e+00 4.3000000e+00 5.4000000e+00 5.6000000e+00 6.1000000e+00 4.4000000e+00 6.2000000e+00 5.4000000e+00 5.0000000e+00 6.8000000e+00 4.9000000e+00 6.1000000e+00 5.4000000e+00 7.0000000e+00 6.0000000e+00 5.9000000e+00 6.2000000e+00 7.0000000e+00 7.2000000e+00 5.9000000e+00 4.4000000e+00 4.8000000e+00 4.6000000e+00 5.0000000e+00 6.8000000e+00 5.2000000e+00 5.5000000e+00 6.6000000e+00 6.5000000e+00 4.8000000e+00 5.1000000e+00 5.3000000e+00 5.9000000e+00 5.2000000e+00 3.8000000e+00 5.2000000e+00 4.9000000e+00 5.1000000e+00 5.7000000e+00 3.5000000e+00 5.1000000e+00 8.3000000e+00 6.9000000e+00 8.9000000e+00 7.6000000e+00 8.3000000e+00 1.0100000e+01 5.4000000e+00 9.3000000e+00 8.6000000e+00 9.4000000e+00 7.2000000e+00 7.7000000e+00 8.2000000e+00 7.0000000e+00 7.3000000e+00 7.6000000e+00 7.6000000e+00 1.0400000e+01 1.1100000e+01 7.1000000e+00 8.5000000e+00 6.5000000e+00 1.0400000e+01 7.1000000e+00 8.0000000e+00 8.6000000e+00 6.8000000e+00 6.6000000e+00 8.1000000e+00 8.4000000e+00 9.4000000e+00 1.0100000e+01 8.2000000e+00 6.9000000e+00 7.3000000e+00 9.9000000e+00 7.7000000e+00 7.4000000e+00 6.4000000e+00 8.1000000e+00 8.4000000e+00 8.0000000e+00 6.9000000e+00 8.6000000e+00 8.4000000e+00 8.0000000e+00 7.5000000e+00 7.5000000e+00 7.3000000e+00 6.6000000e+00 8.0000000e-01 2.3000000e+00 2.7000000e+00 1.9000000e+00 1.0000000e+00 2.2000000e+00 1.4000000e+00 1.4000000e+00 1.4000000e+00 1.3000000e+00 1.3000000e+00 1.0000000e+00 5.0000000e-01 1.1000000e+00 1.1000000e+00 9.0000000e-01 6.0000000e-01 4.0000000e-01 1.4000000e+00 1.6000000e+00 2.0000000e+00 3.0000000e-01 7.0000000e-01 1.4000000e+00 3.0000000e-01 6.0000000e-01 9.0000000e-01 1.0000000e+00 1.3000000e+00 8.0000000e-01 1.4000000e+00 1.9000000e+00 2.0000000e-01 1.4000000e+00 5.0000000e-01 1.4000000e+00 6.0000000e-01 7.0000000e+00 6.3000000e+00 7.1000000e+00 5.2000000e+00 6.5000000e+00 5.4000000e+00 6.6000000e+00 3.5000000e+00 6.3000000e+00 4.5000000e+00 4.2000000e+00 5.3000000e+00 5.5000000e+00 6.0000000e+00 4.3000000e+00 6.3000000e+00 5.3000000e+00 4.9000000e+00 6.7000000e+00 4.8000000e+00 6.4000000e+00 5.3000000e+00 6.9000000e+00 5.9000000e+00 5.8000000e+00 6.1000000e+00 6.9000000e+00 7.1000000e+00 5.8000000e+00 4.3000000e+00 4.7000000e+00 4.5000000e+00 4.9000000e+00 6.7000000e+00 5.1000000e+00 6.2000000e+00 6.7000000e+00 6.4000000e+00 4.7000000e+00 5.0000000e+00 5.2000000e+00 5.8000000e+00 5.1000000e+00 3.7000000e+00 5.1000000e+00 4.8000000e+00 5.0000000e+00 5.6000000e+00 3.4000000e+00 5.0000000e+00 8.8000000e+00 6.8000000e+00 8.8000000e+00 7.5000000e+00 8.2000000e+00 1.0000000e+01 5.3000000e+00 9.2000000e+00 8.5000000e+00 1.0100000e+01 7.5000000e+00 7.6000000e+00 8.1000000e+00 6.9000000e+00 7.2000000e+00 7.9000000e+00 7.5000000e+00 1.1100000e+01 1.1000000e+01 7.0000000e+00 8.8000000e+00 6.4000000e+00 1.0300000e+01 7.0000000e+00 8.5000000e+00 8.9000000e+00 6.7000000e+00 6.5000000e+00 8.0000000e+00 8.3000000e+00 9.3000000e+00 1.0800000e+01 8.1000000e+00 6.8000000e+00 7.2000000e+00 9.8000000e+00 8.4000000e+00 7.5000000e+00 6.3000000e+00 8.2000000e+00 8.5000000e+00 8.1000000e+00 6.8000000e+00 8.9000000e+00 8.9000000e+00 7.9000000e+00 7.4000000e+00 7.4000000e+00 8.0000000e+00 6.5000000e+00 2.7000000e+00 3.5000000e+00 2.5000000e+00 1.8000000e+00 3.0000000e+00 2.2000000e+00 2.2000000e+00 2.2000000e+00 1.1000000e+00 2.1000000e+00 1.8000000e+00 1.3000000e+00 1.9000000e+00 1.9000000e+00 1.7000000e+00 1.2000000e+00 1.2000000e+00 2.2000000e+00 2.4000000e+00 2.8000000e+00 1.1000000e+00 1.1000000e+00 2.0000000e+00 1.1000000e+00 4.0000000e-01 1.7000000e+00 1.6000000e+00 1.3000000e+00 6.0000000e-01 2.2000000e+00 2.7000000e+00 1.0000000e+00 2.2000000e+00 9.0000000e-01 2.2000000e+00 1.4000000e+00 7.8000000e+00 7.1000000e+00 7.9000000e+00 6.0000000e+00 7.3000000e+00 6.2000000e+00 7.4000000e+00 4.3000000e+00 7.1000000e+00 5.3000000e+00 5.0000000e+00 6.1000000e+00 6.3000000e+00 6.8000000e+00 5.1000000e+00 7.1000000e+00 6.1000000e+00 5.7000000e+00 7.5000000e+00 5.6000000e+00 7.2000000e+00 6.1000000e+00 7.7000000e+00 6.7000000e+00 6.6000000e+00 6.9000000e+00 7.7000000e+00 7.9000000e+00 6.6000000e+00 5.1000000e+00 5.5000000e+00 5.3000000e+00 5.7000000e+00 7.5000000e+00 5.9000000e+00 7.0000000e+00 7.5000000e+00 7.2000000e+00 5.5000000e+00 5.8000000e+00 6.0000000e+00 6.6000000e+00 5.9000000e+00 4.5000000e+00 5.9000000e+00 5.6000000e+00 5.8000000e+00 6.4000000e+00 4.2000000e+00 5.8000000e+00 9.6000000e+00 7.6000000e+00 9.6000000e+00 8.3000000e+00 9.0000000e+00 1.0800000e+01 6.1000000e+00 1.0000000e+01 9.3000000e+00 1.0900000e+01 8.3000000e+00 8.4000000e+00 8.9000000e+00 7.7000000e+00 8.0000000e+00 8.7000000e+00 8.3000000e+00 1.1900000e+01 1.1800000e+01 7.8000000e+00 9.6000000e+00 7.2000000e+00 1.1100000e+01 7.8000000e+00 9.3000000e+00 9.7000000e+00 7.5000000e+00 7.3000000e+00 8.8000000e+00 9.1000000e+00 1.0100000e+01 1.1600000e+01 8.9000000e+00 7.6000000e+00 8.0000000e+00 1.0600000e+01 9.2000000e+00 8.3000000e+00 7.1000000e+00 9.0000000e+00 9.3000000e+00 8.9000000e+00 7.6000000e+00 9.7000000e+00 9.7000000e+00 8.7000000e+00 8.2000000e+00 8.2000000e+00 8.8000000e+00 7.3000000e+00 1.0000000e+00 8.0000000e-01 1.5000000e+00 9.0000000e-01 1.3000000e+00 1.5000000e+00 1.5000000e+00 1.8000000e+00 2.2000000e+00 2.3000000e+00 2.2000000e+00 2.0000000e+00 1.4000000e+00 1.4000000e+00 2.3000000e+00 2.3000000e+00 1.5000000e+00 1.1000000e+00 7.0000000e-01 2.2000000e+00 1.6000000e+00 9.0000000e-01 2.2000000e+00 2.5000000e+00 1.6000000e+00 1.5000000e+00 3.2000000e+00 2.3000000e+00 2.1000000e+00 1.8000000e+00 2.3000000e+00 1.3000000e+00 2.2000000e+00 1.1000000e+00 1.7000000e+00 6.7000000e+00 6.0000000e+00 7.0000000e+00 5.9000000e+00 6.6000000e+00 5.7000000e+00 6.1000000e+00 5.4000000e+00 6.4000000e+00 5.8000000e+00 5.9000000e+00 5.4000000e+00 5.6000000e+00 6.1000000e+00 4.8000000e+00 6.2000000e+00 5.8000000e+00 5.0000000e+00 6.8000000e+00 5.3000000e+00 6.1000000e+00 5.4000000e+00 7.0000000e+00 6.0000000e+00 5.9000000e+00 6.2000000e+00 7.0000000e+00 7.2000000e+00 5.9000000e+00 4.6000000e+00 5.4000000e+00 5.2000000e+00 5.0000000e+00 6.8000000e+00 6.0000000e+00 5.5000000e+00 6.6000000e+00 6.5000000e+00 5.2000000e+00 5.7000000e+00 5.9000000e+00 5.9000000e+00 5.2000000e+00 5.4000000e+00 5.6000000e+00 5.1000000e+00 5.3000000e+00 5.7000000e+00 4.9000000e+00 5.3000000e+00 8.3000000e+00 6.9000000e+00 8.9000000e+00 7.6000000e+00 8.3000000e+00 1.0100000e+01 7.2000000e+00 9.3000000e+00 8.6000000e+00 9.0000000e+00 7.2000000e+00 7.7000000e+00 8.2000000e+00 7.2000000e+00 7.3000000e+00 7.6000000e+00 7.6000000e+00 9.6000000e+00 1.1100000e+01 7.1000000e+00 8.5000000e+00 6.9000000e+00 1.0400000e+01 7.1000000e+00 8.0000000e+00 8.6000000e+00 6.8000000e+00 6.6000000e+00 8.1000000e+00 8.4000000e+00 9.4000000e+00 9.3000000e+00 8.2000000e+00 6.9000000e+00 7.3000000e+00 9.9000000e+00 7.7000000e+00 7.4000000e+00 6.4000000e+00 8.1000000e+00 8.4000000e+00 8.0000000e+00 6.9000000e+00 8.6000000e+00 8.4000000e+00 8.0000000e+00 7.5000000e+00 7.5000000e+00 7.3000000e+00 6.6000000e+00 1.0000000e+00 1.7000000e+00 9.0000000e-01 1.3000000e+00 1.7000000e+00 1.3000000e+00 2.6000000e+00 2.0000000e+00 2.5000000e+00 2.4000000e+00 1.8000000e+00 1.6000000e+00 1.8000000e+00 2.5000000e+00 2.5000000e+00 1.3000000e+00 1.1000000e+00 7.0000000e-01 2.4000000e+00 2.4000000e+00 1.5000000e+00 2.4000000e+00 3.1000000e+00 1.8000000e+00 1.9000000e+00 3.6000000e+00 2.9000000e+00 1.9000000e+00 1.6000000e+00 2.5000000e+00 1.5000000e+00 2.6000000e+00 1.3000000e+00 2.1000000e+00 6.7000000e+00 6.0000000e+00 7.0000000e+00 5.7000000e+00 6.6000000e+00 5.5000000e+00 6.1000000e+00 5.2000000e+00 6.4000000e+00 5.6000000e+00 5.7000000e+00 5.4000000e+00 5.6000000e+00 6.1000000e+00 4.6000000e+00 6.2000000e+00 5.6000000e+00 5.0000000e+00 6.8000000e+00 5.1000000e+00 6.1000000e+00 5.4000000e+00 7.0000000e+00 6.0000000e+00 5.9000000e+00 6.2000000e+00 7.0000000e+00 7.2000000e+00 5.9000000e+00 4.4000000e+00 5.2000000e+00 5.0000000e+00 5.0000000e+00 6.8000000e+00 5.8000000e+00 5.5000000e+00 6.6000000e+00 6.5000000e+00 5.0000000e+00 5.5000000e+00 5.7000000e+00 5.9000000e+00 5.2000000e+00 5.2000000e+00 5.4000000e+00 4.9000000e+00 5.1000000e+00 5.7000000e+00 4.7000000e+00 5.1000000e+00 8.3000000e+00 6.9000000e+00 8.9000000e+00 7.6000000e+00 8.3000000e+00 1.0100000e+01 7.0000000e+00 9.3000000e+00 8.6000000e+00 9.0000000e+00 7.2000000e+00 7.7000000e+00 8.2000000e+00 7.0000000e+00 7.3000000e+00 7.6000000e+00 7.6000000e+00 9.6000000e+00 1.1100000e+01 7.1000000e+00 8.5000000e+00 6.7000000e+00 1.0400000e+01 7.1000000e+00 8.0000000e+00 8.6000000e+00 6.8000000e+00 6.6000000e+00 8.1000000e+00 8.4000000e+00 9.4000000e+00 9.3000000e+00 8.2000000e+00 6.9000000e+00 7.3000000e+00 9.9000000e+00 7.7000000e+00 7.4000000e+00 6.4000000e+00 8.1000000e+00 8.4000000e+00 8.0000000e+00 6.9000000e+00 8.6000000e+00 8.4000000e+00 8.0000000e+00 7.5000000e+00 7.5000000e+00 7.3000000e+00 6.6000000e+00 9.0000000e-01 9.0000000e-01 7.0000000e-01 1.1000000e+00 7.0000000e-01 1.6000000e+00 1.4000000e+00 1.9000000e+00 1.8000000e+00 1.2000000e+00 1.0000000e+00 1.0000000e+00 1.9000000e+00 1.9000000e+00 7.0000000e-01 9.0000000e-01 7.0000000e-01 1.8000000e+00 1.4000000e+00 7.0000000e-01 1.8000000e+00 2.1000000e+00 1.2000000e+00 9.0000000e-01 2.6000000e+00 1.9000000e+00 1.3000000e+00 1.0000000e+00 1.7000000e+00 9.0000000e-01 1.8000000e+00 7.0000000e-01 1.3000000e+00 6.7000000e+00 6.0000000e+00 7.0000000e+00 5.3000000e+00 6.6000000e+00 5.5000000e+00 6.1000000e+00 4.6000000e+00 6.4000000e+00 5.0000000e+00 5.1000000e+00 5.4000000e+00 5.6000000e+00 6.1000000e+00 4.4000000e+00 6.2000000e+00 5.4000000e+00 5.0000000e+00 6.8000000e+00 4.9000000e+00 6.1000000e+00 5.4000000e+00 7.0000000e+00 6.0000000e+00 5.9000000e+00 6.2000000e+00 7.0000000e+00 7.2000000e+00 5.9000000e+00 4.4000000e+00 4.8000000e+00 4.6000000e+00 5.0000000e+00 6.8000000e+00 5.2000000e+00 5.5000000e+00 6.6000000e+00 6.5000000e+00 4.8000000e+00 5.1000000e+00 5.3000000e+00 5.9000000e+00 5.2000000e+00 4.6000000e+00 5.2000000e+00 4.9000000e+00 5.1000000e+00 5.7000000e+00 4.1000000e+00 5.1000000e+00 8.3000000e+00 6.9000000e+00 8.9000000e+00 7.6000000e+00 8.3000000e+00 1.0100000e+01 6.4000000e+00 9.3000000e+00 8.6000000e+00 9.0000000e+00 7.2000000e+00 7.7000000e+00 8.2000000e+00 7.0000000e+00 7.3000000e+00 7.6000000e+00 7.6000000e+00 9.6000000e+00 1.1100000e+01 7.1000000e+00 8.5000000e+00 6.5000000e+00 1.0400000e+01 7.1000000e+00 8.0000000e+00 8.6000000e+00 6.8000000e+00 6.6000000e+00 8.1000000e+00 8.4000000e+00 9.4000000e+00 9.3000000e+00 8.2000000e+00 6.9000000e+00 7.3000000e+00 9.9000000e+00 7.7000000e+00 7.4000000e+00 6.4000000e+00 8.1000000e+00 8.4000000e+00 8.0000000e+00 6.9000000e+00 8.6000000e+00 8.4000000e+00 8.0000000e+00 7.5000000e+00 7.5000000e+00 7.3000000e+00 6.6000000e+00 1.2000000e+00 4.0000000e-01 8.0000000e-01 4.0000000e-01 1.1000000e+00 7.0000000e-01 1.0000000e+00 9.0000000e-01 5.0000000e-01 3.0000000e-01 3.0000000e-01 1.0000000e+00 1.0000000e+00 6.0000000e-01 1.0000000e+00 1.2000000e+00 9.0000000e-01 7.0000000e-01 6.0000000e-01 9.0000000e-01 1.4000000e+00 3.0000000e-01 2.0000000e-01 1.9000000e+00 1.2000000e+00 6.0000000e-01 9.0000000e-01 8.0000000e-01 6.0000000e-01 9.0000000e-01 6.0000000e-01 4.0000000e-01 6.6000000e+00 5.9000000e+00 6.9000000e+00 5.2000000e+00 6.5000000e+00 5.4000000e+00 6.0000000e+00 3.9000000e+00 6.3000000e+00 4.5000000e+00 4.4000000e+00 5.3000000e+00 5.5000000e+00 6.0000000e+00 4.3000000e+00 6.1000000e+00 5.3000000e+00 4.9000000e+00 6.7000000e+00 4.8000000e+00 6.0000000e+00 5.3000000e+00 6.9000000e+00 5.9000000e+00 5.8000000e+00 6.1000000e+00 6.9000000e+00 7.1000000e+00 5.8000000e+00 4.3000000e+00 4.7000000e+00 4.5000000e+00 4.9000000e+00 6.7000000e+00 5.1000000e+00 5.4000000e+00 6.5000000e+00 6.4000000e+00 4.7000000e+00 5.0000000e+00 5.2000000e+00 5.8000000e+00 5.1000000e+00 3.9000000e+00 5.1000000e+00 4.8000000e+00 5.0000000e+00 5.6000000e+00 3.4000000e+00 5.0000000e+00 8.2000000e+00 6.8000000e+00 8.8000000e+00 7.5000000e+00 8.2000000e+00 1.0000000e+01 5.7000000e+00 9.2000000e+00 8.5000000e+00 9.1000000e+00 7.1000000e+00 7.6000000e+00 8.1000000e+00 6.9000000e+00 7.2000000e+00 7.5000000e+00 7.5000000e+00 1.0100000e+01 1.1000000e+01 7.0000000e+00 8.4000000e+00 6.4000000e+00 1.0300000e+01 7.0000000e+00 7.9000000e+00 8.5000000e+00 6.7000000e+00 6.5000000e+00 8.0000000e+00 8.3000000e+00 9.3000000e+00 9.8000000e+00 8.1000000e+00 6.8000000e+00 7.2000000e+00 9.8000000e+00 7.6000000e+00 7.3000000e+00 6.3000000e+00 8.0000000e+00 8.3000000e+00 7.9000000e+00 6.8000000e+00 8.5000000e+00 8.3000000e+00 7.9000000e+00 7.4000000e+00 7.4000000e+00 7.2000000e+00 6.5000000e+00 8.0000000e-01 8.0000000e-01 1.0000000e+00 2.1000000e+00 1.3000000e+00 1.6000000e+00 1.7000000e+00 1.3000000e+00 1.1000000e+00 1.3000000e+00 1.8000000e+00 1.8000000e+00 1.0000000e+00 1.2000000e+00 1.0000000e+00 1.9000000e+00 1.9000000e+00 1.0000000e+00 1.9000000e+00 2.6000000e+00 1.3000000e+00 1.4000000e+00 3.1000000e+00 2.4000000e+00 1.4000000e+00 9.0000000e-01 2.0000000e+00 8.0000000e-01 2.1000000e+00 8.0000000e-01 1.6000000e+00 6.0000000e+00 5.3000000e+00 6.3000000e+00 5.0000000e+00 5.9000000e+00 4.8000000e+00 5.4000000e+00 4.5000000e+00 5.7000000e+00 4.9000000e+00 5.0000000e+00 4.7000000e+00 4.9000000e+00 5.4000000e+00 3.9000000e+00 5.5000000e+00 4.9000000e+00 4.3000000e+00 6.1000000e+00 4.4000000e+00 5.4000000e+00 4.7000000e+00 6.3000000e+00 5.3000000e+00 5.2000000e+00 5.5000000e+00 6.3000000e+00 6.5000000e+00 5.2000000e+00 3.7000000e+00 4.5000000e+00 4.3000000e+00 4.3000000e+00 6.1000000e+00 5.1000000e+00 4.8000000e+00 5.9000000e+00 5.8000000e+00 4.3000000e+00 4.8000000e+00 5.0000000e+00 5.2000000e+00 4.5000000e+00 4.5000000e+00 4.7000000e+00 4.2000000e+00 4.4000000e+00 5.0000000e+00 4.0000000e+00 4.4000000e+00 7.6000000e+00 6.2000000e+00 8.2000000e+00 6.9000000e+00 7.6000000e+00 9.4000000e+00 6.3000000e+00 8.6000000e+00 7.9000000e+00 8.3000000e+00 6.5000000e+00 7.0000000e+00 7.5000000e+00 6.3000000e+00 6.6000000e+00 6.9000000e+00 6.9000000e+00 8.9000000e+00 1.0400000e+01 6.4000000e+00 7.8000000e+00 6.0000000e+00 9.7000000e+00 6.4000000e+00 7.3000000e+00 7.9000000e+00 6.1000000e+00 5.9000000e+00 7.4000000e+00 7.7000000e+00 8.7000000e+00 8.6000000e+00 7.5000000e+00 6.2000000e+00 6.6000000e+00 9.2000000e+00 7.0000000e+00 6.7000000e+00 5.7000000e+00 7.4000000e+00 7.7000000e+00 7.3000000e+00 6.2000000e+00 7.9000000e+00 7.7000000e+00 7.3000000e+00 6.8000000e+00 6.8000000e+00 6.6000000e+00 5.9000000e+00 1.0000000e+00 2.0000000e-01 1.3000000e+00 9.0000000e-01 1.2000000e+00 1.1000000e+00 7.0000000e-01 5.0000000e-01 7.0000000e-01 1.2000000e+00 1.2000000e+00 8.0000000e-01 6.0000000e-01 1.0000000e+00 1.1000000e+00 1.1000000e+00 1.0000000e+00 1.1000000e+00 1.8000000e+00 5.0000000e-01 6.0000000e-01 2.3000000e+00 1.6000000e+00 8.0000000e-01 5.0000000e-01 1.2000000e+00 2.0000000e-01 1.3000000e+00 4.0000000e-01 8.0000000e-01 6.8000000e+00 6.1000000e+00 7.1000000e+00 5.4000000e+00 6.7000000e+00 5.6000000e+00 6.2000000e+00 4.1000000e+00 6.5000000e+00 4.7000000e+00 4.6000000e+00 5.5000000e+00 5.7000000e+00 6.2000000e+00 4.5000000e+00 6.3000000e+00 5.5000000e+00 5.1000000e+00 6.9000000e+00 5.0000000e+00 6.2000000e+00 5.5000000e+00 7.1000000e+00 6.1000000e+00 6.0000000e+00 6.3000000e+00 7.1000000e+00 7.3000000e+00 6.0000000e+00 4.5000000e+00 4.9000000e+00 4.7000000e+00 5.1000000e+00 6.9000000e+00 5.3000000e+00 5.6000000e+00 6.7000000e+00 6.6000000e+00 4.9000000e+00 5.2000000e+00 5.4000000e+00 6.0000000e+00 5.3000000e+00 4.1000000e+00 5.3000000e+00 5.0000000e+00 5.2000000e+00 5.8000000e+00 3.6000000e+00 5.2000000e+00 8.4000000e+00 7.0000000e+00 9.0000000e+00 7.7000000e+00 8.4000000e+00 1.0200000e+01 5.9000000e+00 9.4000000e+00 8.7000000e+00 9.1000000e+00 7.3000000e+00 7.8000000e+00 8.3000000e+00 7.1000000e+00 7.4000000e+00 7.7000000e+00 7.7000000e+00 9.7000000e+00 1.1200000e+01 7.2000000e+00 8.6000000e+00 6.6000000e+00 1.0500000e+01 7.2000000e+00 8.1000000e+00 8.7000000e+00 6.9000000e+00 6.7000000e+00 8.2000000e+00 8.5000000e+00 9.5000000e+00 9.4000000e+00 8.3000000e+00 7.0000000e+00 7.4000000e+00 1.0000000e+01 7.8000000e+00 7.5000000e+00 6.5000000e+00 8.2000000e+00 8.5000000e+00 8.1000000e+00 7.0000000e+00 8.7000000e+00 8.5000000e+00 8.1000000e+00 7.6000000e+00 7.6000000e+00 7.4000000e+00 6.7000000e+00 1.0000000e+00 1.7000000e+00 7.0000000e-01 8.0000000e-01 9.0000000e-01 7.0000000e-01 5.0000000e-01 5.0000000e-01 1.0000000e+00 1.0000000e+00 4.0000000e-01 1.2000000e+00 1.2000000e+00 1.1000000e+00 1.1000000e+00 6.0000000e-01 1.1000000e+00 1.8000000e+00 5.0000000e-01 1.0000000e+00 2.5000000e+00 1.6000000e+00 1.0000000e+00 1.1000000e+00 1.4000000e+00 8.0000000e-01 1.3000000e+00 6.0000000e-01 8.0000000e-01 6.0000000e+00 5.3000000e+00 6.3000000e+00 4.6000000e+00 5.9000000e+00 4.8000000e+00 5.4000000e+00 3.9000000e+00 5.7000000e+00 4.3000000e+00 4.4000000e+00 4.7000000e+00 4.9000000e+00 5.4000000e+00 3.7000000e+00 5.5000000e+00 4.7000000e+00 4.3000000e+00 6.1000000e+00 4.2000000e+00 5.4000000e+00 4.7000000e+00 6.3000000e+00 5.3000000e+00 5.2000000e+00 5.5000000e+00 6.3000000e+00 6.5000000e+00 5.2000000e+00 3.7000000e+00 4.1000000e+00 3.9000000e+00 4.3000000e+00 6.1000000e+00 4.5000000e+00 4.8000000e+00 5.9000000e+00 5.8000000e+00 4.1000000e+00 4.4000000e+00 4.6000000e+00 5.2000000e+00 4.5000000e+00 3.9000000e+00 4.5000000e+00 4.2000000e+00 4.4000000e+00 5.0000000e+00 3.4000000e+00 4.4000000e+00 7.6000000e+00 6.2000000e+00 8.2000000e+00 6.9000000e+00 7.6000000e+00 9.4000000e+00 5.7000000e+00 8.6000000e+00 7.9000000e+00 8.7000000e+00 6.5000000e+00 7.0000000e+00 7.5000000e+00 6.3000000e+00 6.6000000e+00 6.9000000e+00 6.9000000e+00 9.7000000e+00 1.0400000e+01 6.4000000e+00 7.8000000e+00 5.8000000e+00 9.7000000e+00 6.4000000e+00 7.3000000e+00 7.9000000e+00 6.1000000e+00 5.9000000e+00 7.4000000e+00 7.7000000e+00 8.7000000e+00 9.4000000e+00 7.5000000e+00 6.2000000e+00 6.6000000e+00 9.2000000e+00 7.0000000e+00 6.7000000e+00 5.7000000e+00 7.4000000e+00 7.7000000e+00 7.3000000e+00 6.2000000e+00 7.9000000e+00 7.7000000e+00 7.3000000e+00 6.8000000e+00 6.8000000e+00 6.6000000e+00 5.9000000e+00 1.3000000e+00 7.0000000e-01 1.2000000e+00 1.1000000e+00 5.0000000e-01 5.0000000e-01 7.0000000e-01 1.2000000e+00 1.2000000e+00 6.0000000e-01 8.0000000e-01 1.2000000e+00 1.1000000e+00 1.1000000e+00 1.0000000e+00 1.1000000e+00 1.8000000e+00 5.0000000e-01 6.0000000e-01 2.3000000e+00 1.6000000e+00 6.0000000e-01 5.0000000e-01 1.2000000e+00 4.0000000e-01 1.3000000e+00 4.0000000e-01 8.0000000e-01 6.6000000e+00 5.9000000e+00 6.9000000e+00 5.2000000e+00 6.5000000e+00 5.4000000e+00 6.0000000e+00 3.9000000e+00 6.3000000e+00 4.5000000e+00 4.4000000e+00 5.3000000e+00 5.5000000e+00 6.0000000e+00 4.3000000e+00 6.1000000e+00 5.3000000e+00 4.9000000e+00 6.7000000e+00 4.8000000e+00 6.0000000e+00 5.3000000e+00 6.9000000e+00 5.9000000e+00 5.8000000e+00 6.1000000e+00 6.9000000e+00 7.1000000e+00 5.8000000e+00 4.3000000e+00 4.7000000e+00 4.5000000e+00 4.9000000e+00 6.7000000e+00 5.1000000e+00 5.4000000e+00 6.5000000e+00 6.4000000e+00 4.7000000e+00 5.0000000e+00 5.2000000e+00 5.8000000e+00 5.1000000e+00 3.9000000e+00 5.1000000e+00 4.8000000e+00 5.0000000e+00 5.6000000e+00 3.4000000e+00 5.0000000e+00 8.2000000e+00 6.8000000e+00 8.8000000e+00 7.5000000e+00 8.2000000e+00 1.0000000e+01 5.7000000e+00 9.2000000e+00 8.5000000e+00 8.9000000e+00 7.1000000e+00 7.6000000e+00 8.1000000e+00 6.9000000e+00 7.2000000e+00 7.5000000e+00 7.5000000e+00 9.7000000e+00 1.1000000e+01 7.0000000e+00 8.4000000e+00 6.4000000e+00 1.0300000e+01 7.0000000e+00 7.9000000e+00 8.5000000e+00 6.7000000e+00 6.5000000e+00 8.0000000e+00 8.3000000e+00 9.3000000e+00 9.4000000e+00 8.1000000e+00 6.8000000e+00 7.2000000e+00 9.8000000e+00 7.6000000e+00 7.3000000e+00 6.3000000e+00 8.0000000e+00 8.3000000e+00 7.9000000e+00 6.8000000e+00 8.5000000e+00 8.3000000e+00 7.9000000e+00 7.4000000e+00 7.4000000e+00 7.2000000e+00 6.5000000e+00 1.8000000e+00 1.3000000e+00 1.6000000e+00 1.4000000e+00 1.2000000e+00 1.2000000e+00 1.1000000e+00 1.3000000e+00 1.7000000e+00 1.7000000e+00 1.9000000e+00 1.4000000e+00 1.0000000e+00 1.3000000e+00 1.4000000e+00 1.1000000e+00 1.2000000e+00 9.0000000e-01 1.8000000e+00 9.0000000e-01 1.5000000e+00 1.8000000e+00 1.3000000e+00 1.3000000e+00 8.0000000e-01 1.3000000e+00 1.1000000e+00 7.7000000e+00 7.0000000e+00 8.0000000e+00 6.3000000e+00 7.6000000e+00 6.5000000e+00 7.1000000e+00 4.6000000e+00 7.4000000e+00 5.6000000e+00 5.3000000e+00 6.4000000e+00 6.6000000e+00 7.1000000e+00 5.4000000e+00 7.2000000e+00 6.4000000e+00 6.0000000e+00 7.8000000e+00 5.9000000e+00 7.1000000e+00 6.4000000e+00 8.0000000e+00 7.0000000e+00 6.9000000e+00 7.2000000e+00 8.0000000e+00 8.2000000e+00 6.9000000e+00 5.4000000e+00 5.8000000e+00 5.6000000e+00 6.0000000e+00 7.8000000e+00 6.2000000e+00 6.5000000e+00 7.6000000e+00 7.5000000e+00 5.8000000e+00 6.1000000e+00 6.3000000e+00 6.9000000e+00 6.2000000e+00 4.8000000e+00 6.2000000e+00 5.9000000e+00 6.1000000e+00 6.7000000e+00 4.5000000e+00 6.1000000e+00 9.3000000e+00 7.9000000e+00 9.9000000e+00 8.6000000e+00 9.3000000e+00 1.1100000e+01 6.4000000e+00 1.0300000e+01 9.6000000e+00 1.0000000e+01 8.2000000e+00 8.7000000e+00 9.2000000e+00 8.0000000e+00 8.3000000e+00 8.6000000e+00 8.6000000e+00 1.1000000e+01 1.2100000e+01 8.1000000e+00 9.5000000e+00 7.5000000e+00 1.1400000e+01 8.1000000e+00 9.0000000e+00 9.6000000e+00 7.8000000e+00 7.6000000e+00 9.1000000e+00 9.4000000e+00 1.0400000e+01 1.0700000e+01 9.2000000e+00 7.9000000e+00 8.3000000e+00 1.0900000e+01 8.7000000e+00 8.4000000e+00 7.4000000e+00 9.1000000e+00 9.4000000e+00 9.0000000e+00 7.9000000e+00 9.6000000e+00 9.4000000e+00 9.0000000e+00 8.5000000e+00 8.5000000e+00 8.3000000e+00 7.6000000e+00 9.0000000e-01 8.0000000e-01 4.0000000e-01 8.0000000e-01 8.0000000e-01 9.0000000e-01 9.0000000e-01 7.0000000e-01 1.5000000e+00 1.9000000e+00 1.0000000e+00 1.0000000e+00 1.3000000e+00 1.0000000e+00 1.7000000e+00 6.0000000e-01 9.0000000e-01 2.2000000e+00 1.5000000e+00 5.0000000e-01 8.0000000e-01 1.1000000e+00 9.0000000e-01 1.2000000e+00 1.1000000e+00 7.0000000e-01 5.9000000e+00 5.2000000e+00 6.2000000e+00 4.5000000e+00 5.8000000e+00 4.7000000e+00 5.3000000e+00 3.2000000e+00 5.6000000e+00 3.8000000e+00 3.7000000e+00 4.6000000e+00 4.8000000e+00 5.3000000e+00 3.6000000e+00 5.4000000e+00 4.6000000e+00 4.2000000e+00 6.0000000e+00 4.1000000e+00 5.3000000e+00 4.6000000e+00 6.2000000e+00 5.2000000e+00 5.1000000e+00 5.4000000e+00 6.2000000e+00 6.4000000e+00 5.1000000e+00 3.6000000e+00 4.0000000e+00 3.8000000e+00 4.2000000e+00 6.0000000e+00 4.4000000e+00 4.9000000e+00 5.8000000e+00 5.7000000e+00 4.0000000e+00 4.3000000e+00 4.5000000e+00 5.1000000e+00 4.4000000e+00 3.2000000e+00 4.4000000e+00 4.1000000e+00 4.3000000e+00 4.9000000e+00 2.7000000e+00 4.3000000e+00 7.5000000e+00 6.1000000e+00 8.1000000e+00 6.8000000e+00 7.5000000e+00 9.3000000e+00 5.0000000e+00 8.5000000e+00 7.8000000e+00 8.8000000e+00 6.4000000e+00 6.9000000e+00 7.4000000e+00 6.2000000e+00 6.5000000e+00 6.8000000e+00 6.8000000e+00 9.8000000e+00 1.0300000e+01 6.3000000e+00 7.7000000e+00 5.7000000e+00 9.6000000e+00 6.3000000e+00 7.2000000e+00 7.8000000e+00 6.0000000e+00 5.8000000e+00 7.3000000e+00 7.6000000e+00 8.6000000e+00 9.5000000e+00 7.4000000e+00 6.1000000e+00 6.5000000e+00 9.1000000e+00 7.1000000e+00 6.6000000e+00 5.6000000e+00 7.3000000e+00 7.6000000e+00 7.2000000e+00 6.1000000e+00 7.8000000e+00 7.6000000e+00 7.2000000e+00 6.7000000e+00 6.7000000e+00 6.7000000e+00 5.8000000e+00 9.0000000e-01 7.0000000e-01 9.0000000e-01 9.0000000e-01 6.0000000e-01 6.0000000e-01 1.2000000e+00 1.6000000e+00 2.0000000e+00 9.0000000e-01 1.1000000e+00 1.4000000e+00 9.0000000e-01 1.4000000e+00 7.0000000e-01 1.0000000e+00 2.1000000e+00 1.2000000e+00 1.0000000e+00 9.0000000e-01 1.0000000e+00 1.0000000e+00 9.0000000e-01 1.2000000e+00 8.0000000e-01 6.4000000e+00 5.7000000e+00 6.7000000e+00 5.0000000e+00 6.3000000e+00 5.2000000e+00 5.8000000e+00 3.3000000e+00 6.1000000e+00 4.3000000e+00 4.0000000e+00 5.1000000e+00 5.3000000e+00 5.8000000e+00 4.1000000e+00 5.9000000e+00 5.1000000e+00 4.7000000e+00 6.5000000e+00 4.6000000e+00 5.8000000e+00 5.1000000e+00 6.7000000e+00 5.7000000e+00 5.6000000e+00 5.9000000e+00 6.7000000e+00 6.9000000e+00 5.6000000e+00 4.1000000e+00 4.5000000e+00 4.3000000e+00 4.7000000e+00 6.5000000e+00 4.9000000e+00 5.2000000e+00 6.3000000e+00 6.2000000e+00 4.5000000e+00 4.8000000e+00 5.0000000e+00 5.6000000e+00 4.9000000e+00 3.5000000e+00 4.9000000e+00 4.6000000e+00 4.8000000e+00 5.4000000e+00 3.2000000e+00 4.8000000e+00 8.0000000e+00 6.6000000e+00 8.6000000e+00 7.3000000e+00 8.0000000e+00 9.8000000e+00 5.1000000e+00 9.0000000e+00 8.3000000e+00 9.1000000e+00 6.9000000e+00 7.4000000e+00 7.9000000e+00 6.7000000e+00 7.0000000e+00 7.3000000e+00 7.3000000e+00 1.0100000e+01 1.0800000e+01 6.8000000e+00 8.2000000e+00 6.2000000e+00 1.0100000e+01 6.8000000e+00 7.7000000e+00 8.3000000e+00 6.5000000e+00 6.3000000e+00 7.8000000e+00 8.1000000e+00 9.1000000e+00 9.8000000e+00 7.9000000e+00 6.6000000e+00 7.0000000e+00 9.6000000e+00 7.4000000e+00 7.1000000e+00 6.1000000e+00 7.8000000e+00 8.1000000e+00 7.7000000e+00 6.6000000e+00 8.3000000e+00 8.1000000e+00 7.7000000e+00 7.2000000e+00 7.2000000e+00 7.0000000e+00 6.3000000e+00 6.0000000e-01 8.0000000e-01 8.0000000e-01 5.0000000e-01 3.0000000e-01 1.1000000e+00 1.5000000e+00 1.9000000e+00 4.0000000e-01 6.0000000e-01 1.3000000e+00 4.0000000e-01 9.0000000e-01 6.0000000e-01 9.0000000e-01 1.6000000e+00 1.1000000e+00 9.0000000e-01 1.4000000e+00 5.0000000e-01 9.0000000e-01 8.0000000e-01 1.1000000e+00 5.0000000e-01 6.5000000e+00 5.8000000e+00 6.6000000e+00 4.7000000e+00 6.0000000e+00 4.9000000e+00 6.1000000e+00 3.2000000e+00 5.8000000e+00 4.0000000e+00 3.7000000e+00 4.8000000e+00 5.0000000e+00 5.5000000e+00 3.8000000e+00 5.8000000e+00 4.8000000e+00 4.4000000e+00 6.2000000e+00 4.3000000e+00 5.9000000e+00 4.8000000e+00 6.4000000e+00 5.4000000e+00 5.3000000e+00 5.6000000e+00 6.4000000e+00 6.6000000e+00 5.3000000e+00 3.8000000e+00 4.2000000e+00 4.0000000e+00 4.4000000e+00 6.2000000e+00 4.6000000e+00 5.7000000e+00 6.2000000e+00 5.9000000e+00 4.2000000e+00 4.5000000e+00 4.7000000e+00 5.3000000e+00 4.6000000e+00 3.2000000e+00 4.6000000e+00 4.3000000e+00 4.5000000e+00 5.1000000e+00 2.9000000e+00 4.5000000e+00 8.3000000e+00 6.3000000e+00 8.3000000e+00 7.0000000e+00 7.7000000e+00 9.5000000e+00 5.0000000e+00 8.7000000e+00 8.0000000e+00 9.6000000e+00 7.0000000e+00 7.1000000e+00 7.6000000e+00 6.4000000e+00 6.7000000e+00 7.4000000e+00 7.0000000e+00 1.0600000e+01 1.0500000e+01 6.5000000e+00 8.3000000e+00 5.9000000e+00 9.8000000e+00 6.5000000e+00 8.0000000e+00 8.4000000e+00 6.2000000e+00 6.0000000e+00 7.5000000e+00 7.8000000e+00 8.8000000e+00 1.0300000e+01 7.6000000e+00 6.3000000e+00 6.7000000e+00 9.3000000e+00 7.9000000e+00 7.0000000e+00 5.8000000e+00 7.7000000e+00 8.0000000e+00 7.6000000e+00 6.3000000e+00 8.4000000e+00 8.4000000e+00 7.4000000e+00 6.9000000e+00 6.9000000e+00 7.5000000e+00 6.0000000e+00 6.0000000e-01 6.0000000e-01 7.0000000e-01 7.0000000e-01 5.0000000e-01 1.3000000e+00 1.7000000e+00 8.0000000e-01 8.0000000e-01 1.1000000e+00 8.0000000e-01 1.5000000e+00 4.0000000e-01 5.0000000e-01 2.0000000e+00 1.3000000e+00 3.0000000e-01 8.0000000e-01 9.0000000e-01 7.0000000e-01 1.0000000e+00 9.0000000e-01 5.0000000e-01 6.3000000e+00 5.6000000e+00 6.6000000e+00 4.9000000e+00 6.2000000e+00 5.1000000e+00 5.7000000e+00 3.4000000e+00 6.0000000e+00 4.2000000e+00 3.9000000e+00 5.0000000e+00 5.2000000e+00 5.7000000e+00 4.0000000e+00 5.8000000e+00 5.0000000e+00 4.6000000e+00 6.4000000e+00 4.5000000e+00 5.7000000e+00 5.0000000e+00 6.6000000e+00 5.6000000e+00 5.5000000e+00 5.8000000e+00 6.6000000e+00 6.8000000e+00 5.5000000e+00 4.0000000e+00 4.4000000e+00 4.2000000e+00 4.6000000e+00 6.4000000e+00 4.8000000e+00 5.1000000e+00 6.2000000e+00 6.1000000e+00 4.4000000e+00 4.7000000e+00 4.9000000e+00 5.5000000e+00 4.8000000e+00 3.4000000e+00 4.8000000e+00 4.5000000e+00 4.7000000e+00 5.3000000e+00 3.1000000e+00 4.7000000e+00 7.9000000e+00 6.5000000e+00 8.5000000e+00 7.2000000e+00 7.9000000e+00 9.7000000e+00 5.2000000e+00 8.9000000e+00 8.2000000e+00 9.0000000e+00 6.8000000e+00 7.3000000e+00 7.8000000e+00 6.6000000e+00 6.9000000e+00 7.2000000e+00 7.2000000e+00 1.0000000e+01 1.0700000e+01 6.7000000e+00 8.1000000e+00 6.1000000e+00 1.0000000e+01 6.7000000e+00 7.6000000e+00 8.2000000e+00 6.4000000e+00 6.2000000e+00 7.7000000e+00 8.0000000e+00 9.0000000e+00 9.7000000e+00 7.8000000e+00 6.5000000e+00 6.9000000e+00 9.5000000e+00 7.3000000e+00 7.0000000e+00 6.0000000e+00 7.7000000e+00 8.0000000e+00 7.6000000e+00 6.5000000e+00 8.2000000e+00 8.0000000e+00 7.6000000e+00 7.1000000e+00 7.1000000e+00 6.9000000e+00 6.2000000e+00 2.0000000e-01 9.0000000e-01 9.0000000e-01 5.0000000e-01 7.0000000e-01 1.1000000e+00 8.0000000e-01 8.0000000e-01 5.0000000e-01 8.0000000e-01 1.5000000e+00 2.0000000e-01 5.0000000e-01 2.2000000e+00 1.3000000e+00 7.0000000e-01 1.0000000e+00 1.1000000e+00 5.0000000e-01 1.0000000e+00 3.0000000e-01 5.0000000e-01 6.5000000e+00 5.8000000e+00 6.8000000e+00 5.1000000e+00 6.4000000e+00 5.3000000e+00 5.9000000e+00 4.0000000e+00 6.2000000e+00 4.4000000e+00 4.5000000e+00 5.2000000e+00 5.4000000e+00 5.9000000e+00 4.2000000e+00 6.0000000e+00 5.2000000e+00 4.8000000e+00 6.6000000e+00 4.7000000e+00 5.9000000e+00 5.2000000e+00 6.8000000e+00 5.8000000e+00 5.7000000e+00 6.0000000e+00 6.8000000e+00 7.0000000e+00 5.7000000e+00 4.2000000e+00 4.6000000e+00 4.4000000e+00 4.8000000e+00 6.6000000e+00 5.0000000e+00 5.3000000e+00 6.4000000e+00 6.3000000e+00 4.6000000e+00 4.9000000e+00 5.1000000e+00 5.7000000e+00 5.0000000e+00 4.0000000e+00 5.0000000e+00 4.7000000e+00 4.9000000e+00 5.5000000e+00 3.5000000e+00 4.9000000e+00 8.1000000e+00 6.7000000e+00 8.7000000e+00 7.4000000e+00 8.1000000e+00 9.9000000e+00 5.8000000e+00 9.1000000e+00 8.4000000e+00 9.0000000e+00 7.0000000e+00 7.5000000e+00 8.0000000e+00 6.8000000e+00 7.1000000e+00 7.4000000e+00 7.4000000e+00 1.0000000e+01 1.0900000e+01 6.9000000e+00 8.3000000e+00 6.3000000e+00 1.0200000e+01 6.9000000e+00 7.8000000e+00 8.4000000e+00 6.6000000e+00 6.4000000e+00 7.9000000e+00 8.2000000e+00 9.2000000e+00 9.7000000e+00 8.0000000e+00 6.7000000e+00 7.1000000e+00 9.7000000e+00 7.5000000e+00 7.2000000e+00 6.2000000e+00 7.9000000e+00 8.2000000e+00 7.8000000e+00 6.7000000e+00 8.4000000e+00 8.2000000e+00 7.8000000e+00 7.3000000e+00 7.3000000e+00 7.1000000e+00 6.4000000e+00 9.0000000e-01 9.0000000e-01 5.0000000e-01 9.0000000e-01 1.1000000e+00 8.0000000e-01 6.0000000e-01 5.0000000e-01 8.0000000e-01 1.3000000e+00 2.0000000e-01 5.0000000e-01 2.0000000e+00 1.1000000e+00 9.0000000e-01 1.2000000e+00 9.0000000e-01 7.0000000e-01 8.0000000e-01 5.0000000e-01 3.0000000e-01 6.5000000e+00 5.8000000e+00 6.8000000e+00 5.1000000e+00 6.4000000e+00 5.3000000e+00 5.9000000e+00 4.0000000e+00 6.2000000e+00 4.4000000e+00 4.5000000e+00 5.2000000e+00 5.4000000e+00 5.9000000e+00 4.2000000e+00 6.0000000e+00 5.2000000e+00 4.8000000e+00 6.6000000e+00 4.7000000e+00 5.9000000e+00 5.2000000e+00 6.8000000e+00 5.8000000e+00 5.7000000e+00 6.0000000e+00 6.8000000e+00 7.0000000e+00 5.7000000e+00 4.2000000e+00 4.6000000e+00 4.4000000e+00 4.8000000e+00 6.6000000e+00 5.0000000e+00 5.3000000e+00 6.4000000e+00 6.3000000e+00 4.6000000e+00 4.9000000e+00 5.1000000e+00 5.7000000e+00 5.0000000e+00 4.0000000e+00 5.0000000e+00 4.7000000e+00 4.9000000e+00 5.5000000e+00 3.5000000e+00 4.9000000e+00 8.1000000e+00 6.7000000e+00 8.7000000e+00 7.4000000e+00 8.1000000e+00 9.9000000e+00 5.8000000e+00 9.1000000e+00 8.4000000e+00 9.2000000e+00 7.0000000e+00 7.5000000e+00 8.0000000e+00 6.8000000e+00 7.1000000e+00 7.4000000e+00 7.4000000e+00 1.0200000e+01 1.0900000e+01 6.9000000e+00 8.3000000e+00 6.3000000e+00 1.0200000e+01 6.9000000e+00 7.8000000e+00 8.4000000e+00 6.6000000e+00 6.4000000e+00 7.9000000e+00 8.2000000e+00 9.2000000e+00 9.9000000e+00 8.0000000e+00 6.7000000e+00 7.1000000e+00 9.7000000e+00 7.5000000e+00 7.2000000e+00 6.2000000e+00 7.9000000e+00 8.2000000e+00 7.8000000e+00 6.7000000e+00 8.4000000e+00 8.2000000e+00 7.8000000e+00 7.3000000e+00 7.3000000e+00 7.1000000e+00 6.4000000e+00 2.0000000e-01 1.2000000e+00 1.6000000e+00 2.0000000e+00 5.0000000e-01 7.0000000e-01 1.4000000e+00 5.0000000e-01 8.0000000e-01 7.0000000e-01 1.0000000e+00 1.5000000e+00 6.0000000e-01 1.0000000e+00 1.5000000e+00 6.0000000e-01 1.0000000e+00 3.0000000e-01 1.2000000e+00 6.0000000e-01 6.6000000e+00 5.9000000e+00 6.9000000e+00 5.2000000e+00 6.5000000e+00 5.4000000e+00 6.2000000e+00 3.5000000e+00 6.3000000e+00 4.5000000e+00 4.2000000e+00 5.3000000e+00 5.5000000e+00 6.0000000e+00 4.3000000e+00 6.1000000e+00 5.3000000e+00 4.9000000e+00 6.7000000e+00 4.8000000e+00 6.0000000e+00 5.3000000e+00 6.9000000e+00 5.9000000e+00 5.8000000e+00 6.1000000e+00 6.9000000e+00 7.1000000e+00 5.8000000e+00 4.3000000e+00 4.7000000e+00 4.5000000e+00 4.9000000e+00 6.7000000e+00 5.1000000e+00 5.8000000e+00 6.5000000e+00 6.4000000e+00 4.7000000e+00 5.0000000e+00 5.2000000e+00 5.8000000e+00 5.1000000e+00 3.7000000e+00 5.1000000e+00 4.8000000e+00 5.0000000e+00 5.6000000e+00 3.4000000e+00 5.0000000e+00 8.4000000e+00 6.8000000e+00 8.8000000e+00 7.5000000e+00 8.2000000e+00 1.0000000e+01 5.3000000e+00 9.2000000e+00 8.5000000e+00 9.7000000e+00 7.1000000e+00 7.6000000e+00 8.1000000e+00 6.9000000e+00 7.2000000e+00 7.5000000e+00 7.5000000e+00 1.0700000e+01 1.1000000e+01 7.0000000e+00 8.4000000e+00 6.4000000e+00 1.0300000e+01 7.0000000e+00 8.1000000e+00 8.5000000e+00 6.7000000e+00 6.5000000e+00 8.0000000e+00 8.3000000e+00 9.3000000e+00 1.0400000e+01 8.1000000e+00 6.8000000e+00 7.2000000e+00 9.8000000e+00 8.0000000e+00 7.3000000e+00 6.3000000e+00 8.0000000e+00 8.3000000e+00 7.9000000e+00 6.8000000e+00 8.5000000e+00 8.5000000e+00 7.9000000e+00 7.4000000e+00 7.4000000e+00 7.6000000e+00 6.5000000e+00 1.2000000e+00 1.6000000e+00 2.0000000e+00 3.0000000e-01 7.0000000e-01 1.4000000e+00 3.0000000e-01 8.0000000e-01 7.0000000e-01 1.0000000e+00 1.5000000e+00 8.0000000e-01 1.0000000e+00 1.5000000e+00 4.0000000e-01 1.0000000e+00 5.0000000e-01 1.2000000e+00 6.0000000e-01 6.6000000e+00 5.9000000e+00 6.7000000e+00 5.0000000e+00 6.3000000e+00 5.2000000e+00 6.2000000e+00 3.3000000e+00 6.1000000e+00 4.3000000e+00 4.0000000e+00 5.1000000e+00 5.3000000e+00 5.8000000e+00 4.1000000e+00 5.9000000e+00 5.1000000e+00 4.7000000e+00 6.5000000e+00 4.6000000e+00 6.0000000e+00 5.1000000e+00 6.7000000e+00 5.7000000e+00 5.6000000e+00 5.9000000e+00 6.7000000e+00 6.9000000e+00 5.6000000e+00 4.1000000e+00 4.5000000e+00 4.3000000e+00 4.7000000e+00 6.5000000e+00 4.9000000e+00 5.8000000e+00 6.3000000e+00 6.2000000e+00 4.5000000e+00 4.8000000e+00 5.0000000e+00 5.6000000e+00 4.9000000e+00 3.5000000e+00 4.9000000e+00 4.6000000e+00 4.8000000e+00 5.4000000e+00 3.2000000e+00 4.8000000e+00 8.4000000e+00 6.6000000e+00 8.6000000e+00 7.3000000e+00 8.0000000e+00 9.8000000e+00 5.1000000e+00 9.0000000e+00 8.3000000e+00 9.7000000e+00 7.1000000e+00 7.4000000e+00 7.9000000e+00 6.7000000e+00 7.0000000e+00 7.5000000e+00 7.3000000e+00 1.0700000e+01 1.0800000e+01 6.8000000e+00 8.4000000e+00 6.2000000e+00 1.0100000e+01 6.8000000e+00 8.1000000e+00 8.5000000e+00 6.5000000e+00 6.3000000e+00 7.8000000e+00 8.1000000e+00 9.1000000e+00 1.0400000e+01 7.9000000e+00 6.6000000e+00 7.0000000e+00 9.6000000e+00 8.0000000e+00 7.1000000e+00 6.1000000e+00 7.8000000e+00 8.1000000e+00 7.7000000e+00 6.6000000e+00 8.5000000e+00 8.5000000e+00 7.7000000e+00 7.2000000e+00 7.2000000e+00 7.6000000e+00 6.3000000e+00 1.2000000e+00 1.2000000e+00 1.1000000e+00 1.1000000e+00 6.0000000e-01 1.1000000e+00 1.8000000e+00 5.0000000e-01 8.0000000e-01 2.3000000e+00 1.6000000e+00 8.0000000e-01 1.1000000e+00 1.2000000e+00 1.0000000e+00 1.3000000e+00 6.0000000e-01 8.0000000e-01 6.0000000e+00 5.3000000e+00 6.3000000e+00 4.6000000e+00 5.9000000e+00 4.8000000e+00 5.4000000e+00 3.9000000e+00 5.7000000e+00 4.3000000e+00 4.4000000e+00 4.7000000e+00 4.9000000e+00 5.4000000e+00 3.7000000e+00 5.5000000e+00 4.7000000e+00 4.3000000e+00 6.1000000e+00 4.2000000e+00 5.4000000e+00 4.7000000e+00 6.3000000e+00 5.3000000e+00 5.2000000e+00 5.5000000e+00 6.3000000e+00 6.5000000e+00 5.2000000e+00 3.7000000e+00 4.1000000e+00 3.9000000e+00 4.3000000e+00 6.1000000e+00 4.5000000e+00 4.8000000e+00 5.9000000e+00 5.8000000e+00 4.1000000e+00 4.4000000e+00 4.6000000e+00 5.2000000e+00 4.5000000e+00 3.9000000e+00 4.5000000e+00 4.2000000e+00 4.4000000e+00 5.0000000e+00 3.4000000e+00 4.4000000e+00 7.6000000e+00 6.2000000e+00 8.2000000e+00 6.9000000e+00 7.6000000e+00 9.4000000e+00 5.7000000e+00 8.6000000e+00 7.9000000e+00 8.7000000e+00 6.5000000e+00 7.0000000e+00 7.5000000e+00 6.3000000e+00 6.6000000e+00 6.9000000e+00 6.9000000e+00 9.7000000e+00 1.0400000e+01 6.4000000e+00 7.8000000e+00 5.8000000e+00 9.7000000e+00 6.4000000e+00 7.3000000e+00 7.9000000e+00 6.1000000e+00 5.9000000e+00 7.4000000e+00 7.7000000e+00 8.7000000e+00 9.4000000e+00 7.5000000e+00 6.2000000e+00 6.6000000e+00 9.2000000e+00 7.0000000e+00 6.7000000e+00 5.7000000e+00 7.4000000e+00 7.7000000e+00 7.3000000e+00 6.2000000e+00 7.9000000e+00 7.7000000e+00 7.3000000e+00 6.8000000e+00 6.8000000e+00 6.6000000e+00 5.9000000e+00 6.0000000e-01 1.3000000e+00 1.5000000e+00 1.2000000e+00 1.3000000e+00 2.2000000e+00 9.0000000e-01 1.2000000e+00 2.9000000e+00 2.0000000e+00 1.4000000e+00 1.1000000e+00 1.8000000e+00 6.0000000e-01 1.7000000e+00 6.0000000e-01 1.2000000e+00 7.2000000e+00 6.5000000e+00 7.5000000e+00 5.8000000e+00 7.1000000e+00 6.0000000e+00 6.6000000e+00 4.7000000e+00 6.9000000e+00 5.1000000e+00 5.2000000e+00 5.9000000e+00 6.1000000e+00 6.6000000e+00 4.9000000e+00 6.7000000e+00 5.9000000e+00 5.5000000e+00 7.3000000e+00 5.4000000e+00 6.6000000e+00 5.9000000e+00 7.5000000e+00 6.5000000e+00 6.4000000e+00 6.7000000e+00 7.5000000e+00 7.7000000e+00 6.4000000e+00 4.9000000e+00 5.3000000e+00 5.1000000e+00 5.5000000e+00 7.3000000e+00 5.7000000e+00 6.0000000e+00 7.1000000e+00 7.0000000e+00 5.3000000e+00 5.6000000e+00 5.8000000e+00 6.4000000e+00 5.7000000e+00 4.7000000e+00 5.7000000e+00 5.4000000e+00 5.6000000e+00 6.2000000e+00 4.2000000e+00 5.6000000e+00 8.8000000e+00 7.4000000e+00 9.4000000e+00 8.1000000e+00 8.8000000e+00 1.0600000e+01 6.5000000e+00 9.8000000e+00 9.1000000e+00 9.5000000e+00 7.7000000e+00 8.2000000e+00 8.7000000e+00 7.5000000e+00 7.8000000e+00 8.1000000e+00 8.1000000e+00 1.0100000e+01 1.1600000e+01 7.6000000e+00 9.0000000e+00 7.0000000e+00 1.0900000e+01 7.6000000e+00 8.5000000e+00 9.1000000e+00 7.3000000e+00 7.1000000e+00 8.6000000e+00 8.9000000e+00 9.9000000e+00 9.8000000e+00 8.7000000e+00 7.4000000e+00 7.8000000e+00 1.0400000e+01 8.2000000e+00 7.9000000e+00 6.9000000e+00 8.6000000e+00 8.9000000e+00 8.5000000e+00 7.4000000e+00 9.1000000e+00 8.9000000e+00 8.5000000e+00 8.0000000e+00 8.0000000e+00 7.8000000e+00 7.1000000e+00 1.9000000e+00 1.7000000e+00 8.0000000e-01 1.9000000e+00 2.4000000e+00 1.3000000e+00 1.4000000e+00 3.1000000e+00 2.2000000e+00 1.8000000e+00 1.5000000e+00 2.0000000e+00 1.0000000e+00 1.9000000e+00 8.0000000e-01 1.4000000e+00 7.0000000e+00 6.3000000e+00 7.3000000e+00 5.6000000e+00 6.9000000e+00 5.8000000e+00 6.4000000e+00 5.1000000e+00 6.7000000e+00 5.5000000e+00 5.6000000e+00 5.7000000e+00 5.9000000e+00 6.4000000e+00 4.7000000e+00 6.5000000e+00 5.7000000e+00 5.3000000e+00 7.1000000e+00 5.2000000e+00 6.4000000e+00 5.7000000e+00 7.3000000e+00 6.3000000e+00 6.2000000e+00 6.5000000e+00 7.3000000e+00 7.5000000e+00 6.2000000e+00 4.7000000e+00 5.1000000e+00 4.9000000e+00 5.3000000e+00 7.1000000e+00 5.7000000e+00 5.8000000e+00 6.9000000e+00 6.8000000e+00 5.1000000e+00 5.4000000e+00 5.6000000e+00 6.2000000e+00 5.5000000e+00 5.1000000e+00 5.5000000e+00 5.2000000e+00 5.4000000e+00 6.0000000e+00 4.6000000e+00 5.4000000e+00 8.6000000e+00 7.2000000e+00 9.2000000e+00 7.9000000e+00 8.6000000e+00 1.0400000e+01 6.9000000e+00 9.6000000e+00 8.9000000e+00 9.3000000e+00 7.5000000e+00 8.0000000e+00 8.5000000e+00 7.3000000e+00 7.6000000e+00 7.9000000e+00 7.9000000e+00 9.9000000e+00 1.1400000e+01 7.4000000e+00 8.8000000e+00 6.8000000e+00 1.0700000e+01 7.4000000e+00 8.3000000e+00 8.9000000e+00 7.1000000e+00 6.9000000e+00 8.4000000e+00 8.7000000e+00 9.7000000e+00 9.6000000e+00 8.5000000e+00 7.2000000e+00 7.6000000e+00 1.0200000e+01 8.0000000e+00 7.7000000e+00 6.7000000e+00 8.4000000e+00 8.7000000e+00 8.3000000e+00 7.2000000e+00 8.9000000e+00 8.7000000e+00 8.3000000e+00 7.8000000e+00 7.8000000e+00 7.6000000e+00 6.9000000e+00 6.0000000e-01 1.3000000e+00 0.0000000e+00 9.0000000e-01 6.0000000e-01 9.0000000e-01 1.6000000e+00 9.0000000e-01 1.1000000e+00 1.6000000e+00 5.0000000e-01 1.1000000e+00 6.0000000e-01 1.1000000e+00 5.0000000e-01 6.7000000e+00 6.0000000e+00 6.8000000e+00 5.1000000e+00 6.4000000e+00 5.3000000e+00 6.3000000e+00 3.4000000e+00 6.2000000e+00 4.4000000e+00 4.1000000e+00 5.2000000e+00 5.4000000e+00 5.9000000e+00 4.2000000e+00 6.0000000e+00 5.2000000e+00 4.8000000e+00 6.6000000e+00 4.7000000e+00 6.1000000e+00 5.2000000e+00 6.8000000e+00 5.8000000e+00 5.7000000e+00 6.0000000e+00 6.8000000e+00 7.0000000e+00 5.7000000e+00 4.2000000e+00 4.6000000e+00 4.4000000e+00 4.8000000e+00 6.6000000e+00 5.0000000e+00 5.9000000e+00 6.4000000e+00 6.3000000e+00 4.6000000e+00 4.9000000e+00 5.1000000e+00 5.7000000e+00 5.0000000e+00 3.6000000e+00 5.0000000e+00 4.7000000e+00 4.9000000e+00 5.5000000e+00 3.3000000e+00 4.9000000e+00 8.5000000e+00 6.7000000e+00 8.7000000e+00 7.4000000e+00 8.1000000e+00 9.9000000e+00 5.2000000e+00 9.1000000e+00 8.4000000e+00 9.8000000e+00 7.2000000e+00 7.5000000e+00 8.0000000e+00 6.8000000e+00 7.1000000e+00 7.6000000e+00 7.4000000e+00 1.0800000e+01 1.0900000e+01 6.9000000e+00 8.5000000e+00 6.3000000e+00 1.0200000e+01 6.9000000e+00 8.2000000e+00 8.6000000e+00 6.6000000e+00 6.4000000e+00 7.9000000e+00 8.2000000e+00 9.2000000e+00 1.0500000e+01 8.0000000e+00 6.7000000e+00 7.1000000e+00 9.7000000e+00 8.1000000e+00 7.2000000e+00 6.2000000e+00 7.9000000e+00 8.2000000e+00 7.8000000e+00 6.7000000e+00 8.6000000e+00 8.6000000e+00 7.8000000e+00 7.3000000e+00 7.3000000e+00 7.7000000e+00 6.4000000e+00 9.0000000e-01 6.0000000e-01 9.0000000e-01 6.0000000e-01 5.0000000e-01 1.6000000e+00 7.0000000e-01 1.1000000e+00 1.6000000e+00 7.0000000e-01 1.1000000e+00 6.0000000e-01 1.1000000e+00 3.0000000e-01 6.7000000e+00 6.0000000e+00 7.0000000e+00 5.3000000e+00 6.6000000e+00 5.5000000e+00 6.3000000e+00 3.8000000e+00 6.4000000e+00 4.6000000e+00 4.3000000e+00 5.4000000e+00 5.6000000e+00 6.1000000e+00 4.4000000e+00 6.2000000e+00 5.4000000e+00 5.0000000e+00 6.8000000e+00 4.9000000e+00 6.1000000e+00 5.4000000e+00 7.0000000e+00 6.0000000e+00 5.9000000e+00 6.2000000e+00 7.0000000e+00 7.2000000e+00 5.9000000e+00 4.4000000e+00 4.8000000e+00 4.6000000e+00 5.0000000e+00 6.8000000e+00 5.2000000e+00 5.9000000e+00 6.6000000e+00 6.5000000e+00 4.8000000e+00 5.1000000e+00 5.3000000e+00 5.9000000e+00 5.2000000e+00 3.8000000e+00 5.2000000e+00 4.9000000e+00 5.1000000e+00 5.7000000e+00 3.5000000e+00 5.1000000e+00 8.5000000e+00 6.9000000e+00 8.9000000e+00 7.6000000e+00 8.3000000e+00 1.0100000e+01 5.6000000e+00 9.3000000e+00 8.6000000e+00 9.8000000e+00 7.2000000e+00 7.7000000e+00 8.2000000e+00 7.0000000e+00 7.3000000e+00 7.6000000e+00 7.6000000e+00 1.0800000e+01 1.1100000e+01 7.1000000e+00 8.5000000e+00 6.5000000e+00 1.0400000e+01 7.1000000e+00 8.2000000e+00 8.6000000e+00 6.8000000e+00 6.6000000e+00 8.1000000e+00 8.4000000e+00 9.4000000e+00 1.0500000e+01 8.2000000e+00 6.9000000e+00 7.3000000e+00 9.9000000e+00 8.1000000e+00 7.4000000e+00 6.4000000e+00 8.1000000e+00 8.4000000e+00 8.0000000e+00 6.9000000e+00 8.6000000e+00 8.6000000e+00 8.0000000e+00 7.5000000e+00 7.5000000e+00 7.7000000e+00 6.6000000e+00 1.3000000e+00 1.6000000e+00 7.0000000e-01 6.0000000e-01 2.3000000e+00 1.4000000e+00 1.2000000e+00 1.5000000e+00 1.4000000e+00 1.0000000e+00 1.3000000e+00 6.0000000e-01 8.0000000e-01 6.4000000e+00 5.7000000e+00 6.7000000e+00 5.0000000e+00 6.3000000e+00 5.2000000e+00 5.8000000e+00 4.5000000e+00 6.1000000e+00 4.9000000e+00 5.0000000e+00 5.1000000e+00 5.3000000e+00 5.8000000e+00 4.1000000e+00 5.9000000e+00 5.1000000e+00 4.7000000e+00 6.5000000e+00 4.6000000e+00 5.8000000e+00 5.1000000e+00 6.7000000e+00 5.7000000e+00 5.6000000e+00 5.9000000e+00 6.7000000e+00 6.9000000e+00 5.6000000e+00 4.1000000e+00 4.5000000e+00 4.3000000e+00 4.7000000e+00 6.5000000e+00 5.1000000e+00 5.2000000e+00 6.3000000e+00 6.2000000e+00 4.5000000e+00 4.8000000e+00 5.0000000e+00 5.6000000e+00 4.9000000e+00 4.5000000e+00 4.9000000e+00 4.6000000e+00 4.8000000e+00 5.4000000e+00 4.0000000e+00 4.8000000e+00 8.0000000e+00 6.6000000e+00 8.6000000e+00 7.3000000e+00 8.0000000e+00 9.8000000e+00 6.3000000e+00 9.0000000e+00 8.3000000e+00 8.9000000e+00 6.9000000e+00 7.4000000e+00 7.9000000e+00 6.7000000e+00 7.0000000e+00 7.3000000e+00 7.3000000e+00 9.9000000e+00 1.0800000e+01 6.8000000e+00 8.2000000e+00 6.2000000e+00 1.0100000e+01 6.8000000e+00 7.7000000e+00 8.3000000e+00 6.5000000e+00 6.3000000e+00 7.8000000e+00 8.1000000e+00 9.1000000e+00 9.6000000e+00 7.9000000e+00 6.6000000e+00 7.0000000e+00 9.6000000e+00 7.4000000e+00 7.1000000e+00 6.1000000e+00 7.8000000e+00 8.1000000e+00 7.7000000e+00 6.6000000e+00 8.3000000e+00 8.1000000e+00 7.7000000e+00 7.2000000e+00 7.2000000e+00 7.0000000e+00 6.3000000e+00 9.0000000e-01 6.0000000e-01 9.0000000e-01 1.6000000e+00 9.0000000e-01 1.1000000e+00 1.6000000e+00 5.0000000e-01 1.1000000e+00 6.0000000e-01 1.1000000e+00 5.0000000e-01 6.7000000e+00 6.0000000e+00 6.8000000e+00 5.1000000e+00 6.4000000e+00 5.3000000e+00 6.3000000e+00 3.4000000e+00 6.2000000e+00 4.4000000e+00 4.1000000e+00 5.2000000e+00 5.4000000e+00 5.9000000e+00 4.2000000e+00 6.0000000e+00 5.2000000e+00 4.8000000e+00 6.6000000e+00 4.7000000e+00 6.1000000e+00 5.2000000e+00 6.8000000e+00 5.8000000e+00 5.7000000e+00 6.0000000e+00 6.8000000e+00 7.0000000e+00 5.7000000e+00 4.2000000e+00 4.6000000e+00 4.4000000e+00 4.8000000e+00 6.6000000e+00 5.0000000e+00 5.9000000e+00 6.4000000e+00 6.3000000e+00 4.6000000e+00 4.9000000e+00 5.1000000e+00 5.7000000e+00 5.0000000e+00 3.6000000e+00 5.0000000e+00 4.7000000e+00 4.9000000e+00 5.5000000e+00 3.3000000e+00 4.9000000e+00 8.5000000e+00 6.7000000e+00 8.7000000e+00 7.4000000e+00 8.1000000e+00 9.9000000e+00 5.2000000e+00 9.1000000e+00 8.4000000e+00 9.8000000e+00 7.2000000e+00 7.5000000e+00 8.0000000e+00 6.8000000e+00 7.1000000e+00 7.6000000e+00 7.4000000e+00 1.0800000e+01 1.0900000e+01 6.9000000e+00 8.5000000e+00 6.3000000e+00 1.0200000e+01 6.9000000e+00 8.2000000e+00 8.6000000e+00 6.6000000e+00 6.4000000e+00 7.9000000e+00 8.2000000e+00 9.2000000e+00 1.0500000e+01 8.0000000e+00 6.7000000e+00 7.1000000e+00 9.7000000e+00 8.1000000e+00 7.2000000e+00 6.2000000e+00 7.9000000e+00 8.2000000e+00 7.8000000e+00 6.7000000e+00 8.6000000e+00 8.6000000e+00 7.8000000e+00 7.3000000e+00 7.3000000e+00 7.7000000e+00 6.4000000e+00 1.3000000e+00 1.2000000e+00 9.0000000e-01 2.0000000e-01 1.8000000e+00 2.3000000e+00 6.0000000e-01 1.8000000e+00 5.0000000e-01 1.8000000e+00 1.0000000e+00 7.4000000e+00 6.7000000e+00 7.5000000e+00 5.6000000e+00 6.9000000e+00 5.8000000e+00 7.0000000e+00 3.9000000e+00 6.7000000e+00 4.9000000e+00 4.6000000e+00 5.7000000e+00 5.9000000e+00 6.4000000e+00 4.7000000e+00 6.7000000e+00 5.7000000e+00 5.3000000e+00 7.1000000e+00 5.2000000e+00 6.8000000e+00 5.7000000e+00 7.3000000e+00 6.3000000e+00 6.2000000e+00 6.5000000e+00 7.3000000e+00 7.5000000e+00 6.2000000e+00 4.7000000e+00 5.1000000e+00 4.9000000e+00 5.3000000e+00 7.1000000e+00 5.5000000e+00 6.6000000e+00 7.1000000e+00 6.8000000e+00 5.1000000e+00 5.4000000e+00 5.6000000e+00 6.2000000e+00 5.5000000e+00 4.1000000e+00 5.5000000e+00 5.2000000e+00 5.4000000e+00 6.0000000e+00 3.8000000e+00 5.4000000e+00 9.2000000e+00 7.2000000e+00 9.2000000e+00 7.9000000e+00 8.6000000e+00 1.0400000e+01 5.7000000e+00 9.6000000e+00 8.9000000e+00 1.0500000e+01 7.9000000e+00 8.0000000e+00 8.5000000e+00 7.3000000e+00 7.6000000e+00 8.3000000e+00 7.9000000e+00 1.1500000e+01 1.1400000e+01 7.4000000e+00 9.2000000e+00 6.8000000e+00 1.0700000e+01 7.4000000e+00 8.9000000e+00 9.3000000e+00 7.1000000e+00 6.9000000e+00 8.4000000e+00 8.7000000e+00 9.7000000e+00 1.1200000e+01 8.5000000e+00 7.2000000e+00 7.6000000e+00 1.0200000e+01 8.8000000e+00 7.9000000e+00 6.7000000e+00 8.6000000e+00 8.9000000e+00 8.5000000e+00 7.2000000e+00 9.3000000e+00 9.3000000e+00 8.3000000e+00 7.8000000e+00 7.8000000e+00 8.4000000e+00 6.9000000e+00 5.0000000e-01 2.0000000e+00 1.1000000e+00 7.0000000e-01 1.0000000e+00 9.0000000e-01 5.0000000e-01 8.0000000e-01 5.0000000e-01 3.0000000e-01 6.5000000e+00 5.8000000e+00 6.8000000e+00 5.1000000e+00 6.4000000e+00 5.3000000e+00 5.9000000e+00 3.8000000e+00 6.2000000e+00 4.4000000e+00 4.3000000e+00 5.2000000e+00 5.4000000e+00 5.9000000e+00 4.2000000e+00 6.0000000e+00 5.2000000e+00 4.8000000e+00 6.6000000e+00 4.7000000e+00 5.9000000e+00 5.2000000e+00 6.8000000e+00 5.8000000e+00 5.7000000e+00 6.0000000e+00 6.8000000e+00 7.0000000e+00 5.7000000e+00 4.2000000e+00 4.6000000e+00 4.4000000e+00 4.8000000e+00 6.6000000e+00 5.0000000e+00 5.3000000e+00 6.4000000e+00 6.3000000e+00 4.6000000e+00 4.9000000e+00 5.1000000e+00 5.7000000e+00 5.0000000e+00 3.8000000e+00 5.0000000e+00 4.7000000e+00 4.9000000e+00 5.5000000e+00 3.3000000e+00 4.9000000e+00 8.1000000e+00 6.7000000e+00 8.7000000e+00 7.4000000e+00 8.1000000e+00 9.9000000e+00 5.6000000e+00 9.1000000e+00 8.4000000e+00 9.2000000e+00 7.0000000e+00 7.5000000e+00 8.0000000e+00 6.8000000e+00 7.1000000e+00 7.4000000e+00 7.4000000e+00 1.0200000e+01 1.0900000e+01 6.9000000e+00 8.3000000e+00 6.3000000e+00 1.0200000e+01 6.9000000e+00 7.8000000e+00 8.4000000e+00 6.6000000e+00 6.4000000e+00 7.9000000e+00 8.2000000e+00 9.2000000e+00 9.9000000e+00 8.0000000e+00 6.7000000e+00 7.1000000e+00 9.7000000e+00 7.5000000e+00 7.2000000e+00 6.2000000e+00 7.9000000e+00 8.2000000e+00 7.8000000e+00 6.7000000e+00 8.4000000e+00 8.2000000e+00 7.8000000e+00 7.3000000e+00 7.3000000e+00 7.1000000e+00 6.4000000e+00 1.7000000e+00 1.0000000e+00 6.0000000e-01 1.1000000e+00 8.0000000e-01 8.0000000e-01 9.0000000e-01 8.0000000e-01 4.0000000e-01 6.8000000e+00 6.1000000e+00 7.1000000e+00 5.4000000e+00 6.7000000e+00 5.6000000e+00 6.2000000e+00 3.9000000e+00 6.5000000e+00 4.7000000e+00 4.4000000e+00 5.5000000e+00 5.7000000e+00 6.2000000e+00 4.5000000e+00 6.3000000e+00 5.5000000e+00 5.1000000e+00 6.9000000e+00 5.0000000e+00 6.2000000e+00 5.5000000e+00 7.1000000e+00 6.1000000e+00 6.0000000e+00 6.3000000e+00 7.1000000e+00 7.3000000e+00 6.0000000e+00 4.5000000e+00 4.9000000e+00 4.7000000e+00 5.1000000e+00 6.9000000e+00 5.3000000e+00 5.6000000e+00 6.7000000e+00 6.6000000e+00 4.9000000e+00 5.2000000e+00 5.4000000e+00 6.0000000e+00 5.3000000e+00 3.9000000e+00 5.3000000e+00 5.0000000e+00 5.2000000e+00 5.8000000e+00 3.6000000e+00 5.2000000e+00 8.4000000e+00 7.0000000e+00 9.0000000e+00 7.7000000e+00 8.4000000e+00 1.0200000e+01 5.7000000e+00 9.4000000e+00 8.7000000e+00 9.3000000e+00 7.3000000e+00 7.8000000e+00 8.3000000e+00 7.1000000e+00 7.4000000e+00 7.7000000e+00 7.7000000e+00 1.0300000e+01 1.1200000e+01 7.2000000e+00 8.6000000e+00 6.6000000e+00 1.0500000e+01 7.2000000e+00 8.1000000e+00 8.7000000e+00 6.9000000e+00 6.7000000e+00 8.2000000e+00 8.5000000e+00 9.5000000e+00 1.0000000e+01 8.3000000e+00 7.0000000e+00 7.4000000e+00 1.0000000e+01 7.8000000e+00 7.5000000e+00 6.5000000e+00 8.2000000e+00 8.5000000e+00 8.1000000e+00 7.0000000e+00 8.7000000e+00 8.5000000e+00 8.1000000e+00 7.6000000e+00 7.6000000e+00 7.4000000e+00 6.7000000e+00 1.1000000e+00 2.3000000e+00 2.8000000e+00 1.1000000e+00 2.5000000e+00 1.2000000e+00 2.5000000e+00 1.7000000e+00 7.9000000e+00 7.2000000e+00 8.0000000e+00 4.7000000e+00 7.0000000e+00 5.9000000e+00 7.5000000e+00 3.2000000e+00 7.0000000e+00 4.8000000e+00 3.7000000e+00 6.2000000e+00 5.0000000e+00 6.7000000e+00 5.0000000e+00 7.2000000e+00 6.2000000e+00 5.2000000e+00 6.2000000e+00 4.7000000e+00 7.3000000e+00 5.8000000e+00 6.8000000e+00 6.4000000e+00 6.5000000e+00 7.0000000e+00 7.4000000e+00 8.0000000e+00 6.5000000e+00 4.4000000e+00 4.4000000e+00 4.2000000e+00 5.2000000e+00 7.0000000e+00 6.0000000e+00 7.1000000e+00 7.6000000e+00 5.9000000e+00 5.6000000e+00 4.9000000e+00 5.3000000e+00 6.7000000e+00 5.2000000e+00 3.2000000e+00 5.4000000e+00 5.7000000e+00 5.7000000e+00 6.3000000e+00 3.3000000e+00 5.5000000e+00 9.7000000e+00 7.1000000e+00 9.7000000e+00 8.2000000e+00 9.1000000e+00 1.0900000e+01 5.2000000e+00 9.9000000e+00 8.4000000e+00 1.1000000e+01 8.4000000e+00 7.9000000e+00 9.0000000e+00 6.8000000e+00 7.7000000e+00 8.8000000e+00 8.4000000e+00 1.2000000e+01 1.1100000e+01 6.5000000e+00 9.7000000e+00 6.9000000e+00 1.0800000e+01 7.3000000e+00 9.4000000e+00 9.8000000e+00 7.2000000e+00 7.4000000e+00 8.5000000e+00 9.2000000e+00 9.8000000e+00 1.1700000e+01 8.6000000e+00 7.3000000e+00 7.3000000e+00 1.0700000e+01 9.3000000e+00 8.4000000e+00 7.2000000e+00 9.1000000e+00 9.4000000e+00 9.0000000e+00 7.1000000e+00 9.8000000e+00 9.8000000e+00 8.8000000e+00 7.3000000e+00 8.3000000e+00 8.9000000e+00 7.4000000e+00 1.6000000e+00 2.1000000e+00 8.0000000e-01 1.6000000e+00 3.0000000e-01 1.6000000e+00 8.0000000e-01 7.2000000e+00 6.5000000e+00 7.5000000e+00 5.8000000e+00 7.1000000e+00 6.0000000e+00 6.8000000e+00 4.1000000e+00 6.9000000e+00 5.1000000e+00 4.8000000e+00 5.9000000e+00 6.1000000e+00 6.6000000e+00 4.9000000e+00 6.7000000e+00 5.9000000e+00 5.5000000e+00 7.3000000e+00 5.4000000e+00 6.6000000e+00 5.9000000e+00 7.5000000e+00 6.5000000e+00 6.4000000e+00 6.7000000e+00 7.5000000e+00 7.7000000e+00 6.4000000e+00 4.9000000e+00 5.3000000e+00 5.1000000e+00 5.5000000e+00 7.3000000e+00 5.7000000e+00 6.4000000e+00 7.1000000e+00 7.0000000e+00 5.3000000e+00 5.6000000e+00 5.8000000e+00 6.4000000e+00 5.7000000e+00 4.3000000e+00 5.7000000e+00 5.4000000e+00 5.6000000e+00 6.2000000e+00 4.0000000e+00 5.6000000e+00 9.0000000e+00 7.4000000e+00 9.4000000e+00 8.1000000e+00 8.8000000e+00 1.0600000e+01 5.9000000e+00 9.8000000e+00 9.1000000e+00 1.0300000e+01 7.7000000e+00 8.2000000e+00 8.7000000e+00 7.5000000e+00 7.8000000e+00 8.1000000e+00 8.1000000e+00 1.1300000e+01 1.1600000e+01 7.6000000e+00 9.0000000e+00 7.0000000e+00 1.0900000e+01 7.6000000e+00 8.7000000e+00 9.1000000e+00 7.3000000e+00 7.1000000e+00 8.6000000e+00 8.9000000e+00 9.9000000e+00 1.1000000e+01 8.7000000e+00 7.4000000e+00 7.8000000e+00 1.0400000e+01 8.6000000e+00 7.9000000e+00 6.9000000e+00 8.6000000e+00 8.9000000e+00 8.5000000e+00 7.4000000e+00 9.1000000e+00 9.1000000e+00 8.5000000e+00 8.0000000e+00 8.0000000e+00 8.2000000e+00 7.1000000e+00 9.0000000e-01 1.2000000e+00 8.0000000e-01 1.3000000e+00 1.0000000e+00 8.0000000e-01 6.2000000e+00 5.5000000e+00 6.5000000e+00 4.8000000e+00 6.1000000e+00 5.0000000e+00 5.6000000e+00 3.3000000e+00 5.9000000e+00 4.1000000e+00 3.8000000e+00 4.9000000e+00 5.1000000e+00 5.6000000e+00 3.9000000e+00 5.7000000e+00 4.9000000e+00 4.5000000e+00 6.3000000e+00 4.4000000e+00 5.6000000e+00 4.9000000e+00 6.5000000e+00 5.5000000e+00 5.4000000e+00 5.7000000e+00 6.5000000e+00 6.7000000e+00 5.4000000e+00 3.9000000e+00 4.3000000e+00 4.1000000e+00 4.5000000e+00 6.3000000e+00 4.7000000e+00 5.0000000e+00 6.1000000e+00 6.0000000e+00 4.3000000e+00 4.6000000e+00 4.8000000e+00 5.4000000e+00 4.7000000e+00 3.3000000e+00 4.7000000e+00 4.4000000e+00 4.6000000e+00 5.2000000e+00 3.0000000e+00 4.6000000e+00 7.8000000e+00 6.4000000e+00 8.4000000e+00 7.1000000e+00 7.8000000e+00 9.6000000e+00 5.1000000e+00 8.8000000e+00 8.1000000e+00 8.7000000e+00 6.7000000e+00 7.2000000e+00 7.7000000e+00 6.5000000e+00 6.8000000e+00 7.1000000e+00 7.1000000e+00 9.7000000e+00 1.0600000e+01 6.6000000e+00 8.0000000e+00 6.0000000e+00 9.9000000e+00 6.6000000e+00 7.5000000e+00 8.1000000e+00 6.3000000e+00 6.1000000e+00 7.6000000e+00 7.9000000e+00 8.9000000e+00 9.4000000e+00 7.7000000e+00 6.4000000e+00 6.8000000e+00 9.4000000e+00 7.2000000e+00 6.9000000e+00 5.9000000e+00 7.6000000e+00 7.9000000e+00 7.5000000e+00 6.4000000e+00 8.1000000e+00 7.9000000e+00 7.5000000e+00 7.0000000e+00 7.0000000e+00 6.8000000e+00 6.1000000e+00 1.7000000e+00 5.0000000e-01 1.8000000e+00 9.0000000e-01 1.3000000e+00 6.3000000e+00 5.6000000e+00 6.6000000e+00 4.9000000e+00 6.2000000e+00 5.1000000e+00 5.7000000e+00 3.6000000e+00 6.0000000e+00 4.2000000e+00 4.1000000e+00 5.0000000e+00 5.2000000e+00 5.7000000e+00 4.0000000e+00 5.8000000e+00 5.0000000e+00 4.6000000e+00 6.4000000e+00 4.5000000e+00 5.7000000e+00 5.0000000e+00 6.6000000e+00 5.6000000e+00 5.5000000e+00 5.8000000e+00 6.6000000e+00 6.8000000e+00 5.5000000e+00 4.0000000e+00 4.4000000e+00 4.2000000e+00 4.6000000e+00 6.4000000e+00 4.8000000e+00 5.1000000e+00 6.2000000e+00 6.1000000e+00 4.4000000e+00 4.7000000e+00 4.9000000e+00 5.5000000e+00 4.8000000e+00 3.6000000e+00 4.8000000e+00 4.5000000e+00 4.7000000e+00 5.3000000e+00 3.1000000e+00 4.7000000e+00 7.9000000e+00 6.5000000e+00 8.5000000e+00 7.2000000e+00 7.9000000e+00 9.7000000e+00 5.4000000e+00 8.9000000e+00 8.2000000e+00 8.6000000e+00 6.8000000e+00 7.3000000e+00 7.8000000e+00 6.6000000e+00 6.9000000e+00 7.2000000e+00 7.2000000e+00 9.2000000e+00 1.0700000e+01 6.7000000e+00 8.1000000e+00 6.1000000e+00 1.0000000e+01 6.7000000e+00 7.6000000e+00 8.2000000e+00 6.4000000e+00 6.2000000e+00 7.7000000e+00 8.0000000e+00 9.0000000e+00 8.9000000e+00 7.8000000e+00 6.5000000e+00 6.9000000e+00 9.5000000e+00 7.3000000e+00 7.0000000e+00 6.0000000e+00 7.7000000e+00 8.0000000e+00 7.6000000e+00 6.5000000e+00 8.2000000e+00 8.0000000e+00 7.6000000e+00 7.1000000e+00 7.1000000e+00 6.9000000e+00 6.2000000e+00 1.4000000e+00 5.0000000e-01 1.4000000e+00 6.0000000e-01 6.8000000e+00 6.1000000e+00 6.9000000e+00 5.0000000e+00 6.3000000e+00 5.2000000e+00 6.4000000e+00 3.3000000e+00 6.1000000e+00 4.3000000e+00 4.0000000e+00 5.1000000e+00 5.3000000e+00 5.8000000e+00 4.1000000e+00 6.1000000e+00 5.1000000e+00 4.7000000e+00 6.5000000e+00 4.6000000e+00 6.2000000e+00 5.1000000e+00 6.7000000e+00 5.7000000e+00 5.6000000e+00 5.9000000e+00 6.7000000e+00 6.9000000e+00 5.6000000e+00 4.1000000e+00 4.5000000e+00 4.3000000e+00 4.7000000e+00 6.5000000e+00 4.9000000e+00 6.0000000e+00 6.5000000e+00 6.2000000e+00 4.5000000e+00 4.8000000e+00 5.0000000e+00 5.6000000e+00 4.9000000e+00 3.5000000e+00 4.9000000e+00 4.6000000e+00 4.8000000e+00 5.4000000e+00 3.2000000e+00 4.8000000e+00 8.6000000e+00 6.6000000e+00 8.6000000e+00 7.3000000e+00 8.0000000e+00 9.8000000e+00 5.1000000e+00 9.0000000e+00 8.3000000e+00 9.9000000e+00 7.3000000e+00 7.4000000e+00 7.9000000e+00 6.7000000e+00 7.0000000e+00 7.7000000e+00 7.3000000e+00 1.0900000e+01 1.0800000e+01 6.8000000e+00 8.6000000e+00 6.2000000e+00 1.0100000e+01 6.8000000e+00 8.3000000e+00 8.7000000e+00 6.5000000e+00 6.3000000e+00 7.8000000e+00 8.1000000e+00 9.1000000e+00 1.0600000e+01 7.9000000e+00 6.6000000e+00 7.0000000e+00 9.6000000e+00 8.2000000e+00 7.3000000e+00 6.1000000e+00 8.0000000e+00 8.3000000e+00 7.9000000e+00 6.6000000e+00 8.7000000e+00 8.7000000e+00 7.7000000e+00 7.2000000e+00 7.2000000e+00 7.8000000e+00 6.3000000e+00 1.3000000e+00 4.0000000e-01 8.0000000e-01 6.8000000e+00 6.1000000e+00 7.1000000e+00 5.4000000e+00 6.7000000e+00 5.6000000e+00 6.2000000e+00 4.1000000e+00 6.5000000e+00 4.7000000e+00 4.6000000e+00 5.5000000e+00 5.7000000e+00 6.2000000e+00 4.5000000e+00 6.3000000e+00 5.5000000e+00 5.1000000e+00 6.9000000e+00 5.0000000e+00 6.2000000e+00 5.5000000e+00 7.1000000e+00 6.1000000e+00 6.0000000e+00 6.3000000e+00 7.1000000e+00 7.3000000e+00 6.0000000e+00 4.5000000e+00 4.9000000e+00 4.7000000e+00 5.1000000e+00 6.9000000e+00 5.3000000e+00 5.6000000e+00 6.7000000e+00 6.6000000e+00 4.9000000e+00 5.2000000e+00 5.4000000e+00 6.0000000e+00 5.3000000e+00 4.1000000e+00 5.3000000e+00 5.0000000e+00 5.2000000e+00 5.8000000e+00 3.6000000e+00 5.2000000e+00 8.4000000e+00 7.0000000e+00 9.0000000e+00 7.7000000e+00 8.4000000e+00 1.0200000e+01 5.9000000e+00 9.4000000e+00 8.7000000e+00 9.1000000e+00 7.3000000e+00 7.8000000e+00 8.3000000e+00 7.1000000e+00 7.4000000e+00 7.7000000e+00 7.7000000e+00 9.7000000e+00 1.1200000e+01 7.2000000e+00 8.6000000e+00 6.6000000e+00 1.0500000e+01 7.2000000e+00 8.1000000e+00 8.7000000e+00 6.9000000e+00 6.7000000e+00 8.2000000e+00 8.5000000e+00 9.5000000e+00 9.4000000e+00 8.3000000e+00 7.0000000e+00 7.4000000e+00 1.0000000e+01 7.8000000e+00 7.5000000e+00 6.5000000e+00 8.2000000e+00 8.5000000e+00 8.1000000e+00 7.0000000e+00 8.7000000e+00 8.5000000e+00 8.1000000e+00 7.6000000e+00 7.6000000e+00 7.4000000e+00 6.7000000e+00 1.3000000e+00 5.0000000e-01 6.9000000e+00 6.2000000e+00 7.2000000e+00 5.5000000e+00 6.8000000e+00 5.7000000e+00 6.5000000e+00 3.8000000e+00 6.6000000e+00 4.8000000e+00 4.5000000e+00 5.6000000e+00 5.8000000e+00 6.3000000e+00 4.6000000e+00 6.4000000e+00 5.6000000e+00 5.2000000e+00 7.0000000e+00 5.1000000e+00 6.3000000e+00 5.6000000e+00 7.2000000e+00 6.2000000e+00 6.1000000e+00 6.4000000e+00 7.2000000e+00 7.4000000e+00 6.1000000e+00 4.6000000e+00 5.0000000e+00 4.8000000e+00 5.2000000e+00 7.0000000e+00 5.4000000e+00 6.1000000e+00 6.8000000e+00 6.7000000e+00 5.0000000e+00 5.3000000e+00 5.5000000e+00 6.1000000e+00 5.4000000e+00 4.0000000e+00 5.4000000e+00 5.1000000e+00 5.3000000e+00 5.9000000e+00 3.7000000e+00 5.3000000e+00 8.7000000e+00 7.1000000e+00 9.1000000e+00 7.8000000e+00 8.5000000e+00 1.0300000e+01 5.6000000e+00 9.5000000e+00 8.8000000e+00 1.0000000e+01 7.4000000e+00 7.9000000e+00 8.4000000e+00 7.2000000e+00 7.5000000e+00 7.8000000e+00 7.8000000e+00 1.1000000e+01 1.1300000e+01 7.3000000e+00 8.7000000e+00 6.7000000e+00 1.0600000e+01 7.3000000e+00 8.4000000e+00 8.8000000e+00 7.0000000e+00 6.8000000e+00 8.3000000e+00 8.6000000e+00 9.6000000e+00 1.0700000e+01 8.4000000e+00 7.1000000e+00 7.5000000e+00 1.0100000e+01 8.3000000e+00 7.6000000e+00 6.6000000e+00 8.3000000e+00 8.6000000e+00 8.2000000e+00 7.1000000e+00 8.8000000e+00 8.8000000e+00 8.2000000e+00 7.7000000e+00 7.7000000e+00 7.9000000e+00 6.8000000e+00 8.0000000e-01 6.6000000e+00 5.9000000e+00 6.9000000e+00 5.2000000e+00 6.5000000e+00 5.4000000e+00 6.0000000e+00 4.3000000e+00 6.3000000e+00 4.7000000e+00 4.8000000e+00 5.3000000e+00 5.5000000e+00 6.0000000e+00 4.3000000e+00 6.1000000e+00 5.3000000e+00 4.9000000e+00 6.7000000e+00 4.8000000e+00 6.0000000e+00 5.3000000e+00 6.9000000e+00 5.9000000e+00 5.8000000e+00 6.1000000e+00 6.9000000e+00 7.1000000e+00 5.8000000e+00 4.3000000e+00 4.7000000e+00 4.5000000e+00 4.9000000e+00 6.7000000e+00 5.1000000e+00 5.4000000e+00 6.5000000e+00 6.4000000e+00 4.7000000e+00 5.0000000e+00 5.2000000e+00 5.8000000e+00 5.1000000e+00 4.3000000e+00 5.1000000e+00 4.8000000e+00 5.0000000e+00 5.6000000e+00 3.8000000e+00 5.0000000e+00 8.2000000e+00 6.8000000e+00 8.8000000e+00 7.5000000e+00 8.2000000e+00 1.0000000e+01 6.1000000e+00 9.2000000e+00 8.5000000e+00 8.9000000e+00 7.1000000e+00 7.6000000e+00 8.1000000e+00 6.9000000e+00 7.2000000e+00 7.5000000e+00 7.5000000e+00 9.7000000e+00 1.1000000e+01 7.0000000e+00 8.4000000e+00 6.4000000e+00 1.0300000e+01 7.0000000e+00 7.9000000e+00 8.5000000e+00 6.7000000e+00 6.5000000e+00 8.0000000e+00 8.3000000e+00 9.3000000e+00 9.4000000e+00 8.1000000e+00 6.8000000e+00 7.2000000e+00 9.8000000e+00 7.6000000e+00 7.3000000e+00 6.3000000e+00 8.0000000e+00 8.3000000e+00 7.9000000e+00 6.8000000e+00 8.5000000e+00 8.3000000e+00 7.9000000e+00 7.4000000e+00 7.4000000e+00 7.2000000e+00 6.5000000e+00 6.6000000e+00 5.9000000e+00 6.9000000e+00 5.2000000e+00 6.5000000e+00 5.4000000e+00 6.0000000e+00 3.7000000e+00 6.3000000e+00 4.5000000e+00 4.2000000e+00 5.3000000e+00 5.5000000e+00 6.0000000e+00 4.3000000e+00 6.1000000e+00 5.3000000e+00 4.9000000e+00 6.7000000e+00 4.8000000e+00 6.0000000e+00 5.3000000e+00 6.9000000e+00 5.9000000e+00 5.8000000e+00 6.1000000e+00 6.9000000e+00 7.1000000e+00 5.8000000e+00 4.3000000e+00 4.7000000e+00 4.5000000e+00 4.9000000e+00 6.7000000e+00 5.1000000e+00 5.6000000e+00 6.5000000e+00 6.4000000e+00 4.7000000e+00 5.0000000e+00 5.2000000e+00 5.8000000e+00 5.1000000e+00 3.7000000e+00 5.1000000e+00 4.8000000e+00 5.0000000e+00 5.6000000e+00 3.4000000e+00 5.0000000e+00 8.2000000e+00 6.8000000e+00 8.8000000e+00 7.5000000e+00 8.2000000e+00 1.0000000e+01 5.5000000e+00 9.2000000e+00 8.5000000e+00 9.5000000e+00 7.1000000e+00 7.6000000e+00 8.1000000e+00 6.9000000e+00 7.2000000e+00 7.5000000e+00 7.5000000e+00 1.0500000e+01 1.1000000e+01 7.0000000e+00 8.4000000e+00 6.4000000e+00 1.0300000e+01 7.0000000e+00 7.9000000e+00 8.5000000e+00 6.7000000e+00 6.5000000e+00 8.0000000e+00 8.3000000e+00 9.3000000e+00 1.0200000e+01 8.1000000e+00 6.8000000e+00 7.2000000e+00 9.8000000e+00 7.8000000e+00 7.3000000e+00 6.3000000e+00 8.0000000e+00 8.3000000e+00 7.9000000e+00 6.8000000e+00 8.5000000e+00 8.3000000e+00 7.9000000e+00 7.4000000e+00 7.4000000e+00 7.4000000e+00 6.5000000e+00 9.0000000e-01 5.0000000e-01 3.2000000e+00 1.1000000e+00 2.0000000e+00 1.0000000e+00 4.7000000e+00 9.0000000e-01 3.1000000e+00 4.8000000e+00 1.9000000e+00 3.1000000e+00 1.2000000e+00 2.9000000e+00 7.0000000e-01 1.9000000e+00 2.7000000e+00 2.1000000e+00 3.2000000e+00 1.6000000e+00 2.1000000e+00 1.7000000e+00 1.5000000e+00 1.4000000e+00 9.0000000e-01 7.0000000e-01 1.1000000e+00 1.6000000e+00 3.5000000e+00 3.5000000e+00 3.7000000e+00 2.7000000e+00 2.1000000e+00 2.1000000e+00 1.6000000e+00 5.0000000e-01 2.0000000e+00 2.3000000e+00 3.0000000e+00 2.6000000e+00 1.2000000e+00 2.7000000e+00 4.7000000e+00 2.5000000e+00 2.2000000e+00 2.2000000e+00 1.6000000e+00 4.6000000e+00 2.4000000e+00 3.2000000e+00 2.6000000e+00 2.2000000e+00 2.3000000e+00 2.6000000e+00 3.4000000e+00 3.3000000e+00 2.6000000e+00 2.5000000e+00 3.1000000e+00 1.5000000e+00 2.2000000e+00 1.9000000e+00 2.9000000e+00 3.0000000e+00 2.1000000e+00 1.9000000e+00 4.1000000e+00 4.4000000e+00 2.4000000e+00 2.0000000e+00 2.6000000e+00 3.7000000e+00 1.8000000e+00 2.1000000e+00 1.9000000e+00 1.7000000e+00 1.7000000e+00 2.6000000e+00 1.7000000e+00 2.7000000e+00 3.8000000e+00 2.7000000e+00 1.6000000e+00 2.4000000e+00 3.2000000e+00 2.8000000e+00 1.9000000e+00 1.7000000e+00 1.6000000e+00 2.3000000e+00 1.5000000e+00 2.6000000e+00 2.3000000e+00 2.5000000e+00 1.9000000e+00 2.2000000e+00 1.8000000e+00 2.6000000e+00 2.1000000e+00 1.0000000e+00 2.5000000e+00 6.0000000e-01 1.3000000e+00 5.0000000e-01 4.0000000e+00 8.0000000e-01 2.4000000e+00 4.1000000e+00 1.0000000e+00 2.4000000e+00 9.0000000e-01 2.2000000e+00 6.0000000e-01 1.0000000e+00 2.0000000e+00 1.2000000e+00 2.5000000e+00 1.1000000e+00 1.4000000e+00 1.2000000e+00 1.2000000e+00 7.0000000e-01 6.0000000e-01 1.2000000e+00 1.2000000e+00 7.0000000e-01 2.8000000e+00 2.8000000e+00 3.0000000e+00 2.0000000e+00 1.6000000e+00 1.2000000e+00 7.0000000e-01 6.0000000e-01 1.3000000e+00 1.6000000e+00 2.3000000e+00 1.9000000e+00 7.0000000e-01 2.0000000e+00 4.0000000e+00 1.8000000e+00 1.5000000e+00 1.5000000e+00 9.0000000e-01 3.9000000e+00 1.7000000e+00 2.7000000e+00 2.1000000e+00 2.9000000e+00 1.8000000e+00 2.3000000e+00 4.1000000e+00 2.4000000e+00 3.3000000e+00 2.6000000e+00 3.8000000e+00 1.2000000e+00 1.7000000e+00 2.2000000e+00 2.4000000e+00 2.5000000e+00 1.6000000e+00 1.6000000e+00 4.8000000e+00 5.1000000e+00 1.9000000e+00 2.5000000e+00 2.1000000e+00 4.4000000e+00 1.3000000e+00 2.2000000e+00 2.6000000e+00 1.2000000e+00 1.2000000e+00 2.1000000e+00 2.4000000e+00 3.4000000e+00 4.5000000e+00 2.2000000e+00 1.1000000e+00 2.1000000e+00 3.9000000e+00 2.3000000e+00 1.4000000e+00 1.2000000e+00 2.1000000e+00 2.4000000e+00 2.0000000e+00 2.1000000e+00 2.6000000e+00 2.6000000e+00 2.0000000e+00 1.7000000e+00 1.5000000e+00 2.1000000e+00 1.6000000e+00 3.3000000e+00 1.0000000e+00 2.1000000e+00 1.1000000e+00 4.8000000e+00 1.0000000e+00 3.2000000e+00 4.9000000e+00 1.8000000e+00 3.2000000e+00 1.3000000e+00 3.0000000e+00 8.0000000e-01 1.8000000e+00 2.8000000e+00 2.0000000e+00 3.3000000e+00 1.5000000e+00 2.2000000e+00 1.2000000e+00 1.6000000e+00 1.5000000e+00 1.0000000e+00 6.0000000e-01 6.0000000e-01 1.5000000e+00 3.6000000e+00 3.6000000e+00 3.8000000e+00 2.8000000e+00 1.6000000e+00 2.0000000e+00 1.7000000e+00 4.0000000e-01 2.1000000e+00 2.4000000e+00 3.1000000e+00 2.7000000e+00 1.3000000e+00 2.8000000e+00 4.8000000e+00 2.6000000e+00 2.3000000e+00 2.3000000e+00 1.7000000e+00 4.7000000e+00 2.5000000e+00 2.9000000e+00 2.1000000e+00 1.9000000e+00 1.8000000e+00 2.1000000e+00 3.1000000e+00 3.2000000e+00 2.3000000e+00 2.0000000e+00 3.0000000e+00 1.2000000e+00 1.7000000e+00 1.4000000e+00 2.4000000e+00 2.5000000e+00 1.8000000e+00 1.4000000e+00 4.0000000e+00 4.1000000e+00 1.9000000e+00 1.7000000e+00 2.1000000e+00 3.4000000e+00 1.3000000e+00 1.8000000e+00 1.8000000e+00 1.4000000e+00 1.2000000e+00 2.1000000e+00 1.4000000e+00 2.4000000e+00 3.7000000e+00 2.2000000e+00 1.1000000e+00 2.1000000e+00 2.9000000e+00 2.5000000e+00 1.4000000e+00 1.4000000e+00 1.1000000e+00 1.8000000e+00 1.0000000e+00 2.1000000e+00 2.0000000e+00 2.2000000e+00 1.4000000e+00 1.7000000e+00 1.3000000e+00 2.3000000e+00 1.6000000e+00 2.3000000e+00 1.2000000e+00 2.8000000e+00 1.7000000e+00 2.3000000e+00 9.0000000e-01 1.6000000e+00 1.5000000e+00 9.0000000e-01 2.0000000e+00 1.1000000e+00 2.5000000e+00 1.5000000e+00 1.1000000e+00 1.5000000e+00 6.0000000e-01 2.6000000e+00 1.1000000e+00 2.1000000e+00 1.9000000e+00 1.8000000e+00 2.3000000e+00 2.7000000e+00 3.3000000e+00 1.8000000e+00 1.3000000e+00 5.0000000e-01 7.0000000e-01 9.0000000e-01 2.3000000e+00 1.5000000e+00 2.4000000e+00 2.9000000e+00 1.2000000e+00 9.0000000e-01 2.0000000e-01 8.0000000e-01 2.0000000e+00 7.0000000e-01 1.5000000e+00 7.0000000e-01 1.2000000e+00 1.0000000e+00 1.6000000e+00 1.8000000e+00 8.0000000e-01 5.0000000e+00 2.4000000e+00 5.0000000e+00 3.5000000e+00 4.4000000e+00 6.2000000e+00 1.7000000e+00 5.2000000e+00 3.7000000e+00 6.3000000e+00 3.7000000e+00 3.2000000e+00 4.3000000e+00 2.1000000e+00 3.0000000e+00 4.1000000e+00 3.7000000e+00 7.3000000e+00 6.4000000e+00 1.8000000e+00 5.0000000e+00 2.2000000e+00 6.1000000e+00 2.6000000e+00 4.7000000e+00 5.1000000e+00 2.5000000e+00 2.7000000e+00 3.8000000e+00 4.5000000e+00 5.1000000e+00 7.0000000e+00 3.9000000e+00 2.6000000e+00 2.6000000e+00 6.0000000e+00 4.6000000e+00 3.7000000e+00 2.5000000e+00 4.4000000e+00 4.7000000e+00 4.3000000e+00 2.4000000e+00 5.1000000e+00 5.1000000e+00 4.1000000e+00 2.6000000e+00 3.6000000e+00 4.2000000e+00 2.7000000e+00 1.1000000e+00 9.0000000e-01 3.8000000e+00 4.0000000e-01 2.2000000e+00 3.9000000e+00 1.2000000e+00 2.2000000e+00 7.0000000e-01 2.2000000e+00 8.0000000e-01 1.2000000e+00 1.8000000e+00 1.0000000e+00 2.3000000e+00 1.5000000e+00 1.2000000e+00 8.0000000e-01 8.0000000e-01 7.0000000e-01 6.0000000e-01 6.0000000e-01 1.0000000e+00 7.0000000e-01 2.6000000e+00 2.6000000e+00 2.8000000e+00 1.8000000e+00 1.2000000e+00 1.4000000e+00 1.3000000e+00 6.0000000e-01 1.1000000e+00 1.8000000e+00 2.1000000e+00 1.7000000e+00 7.0000000e-01 1.8000000e+00 3.8000000e+00 1.6000000e+00 1.7000000e+00 1.5000000e+00 9.0000000e-01 3.7000000e+00 1.5000000e+00 3.1000000e+00 1.7000000e+00 2.7000000e+00 1.6000000e+00 2.1000000e+00 3.9000000e+00 2.2000000e+00 2.9000000e+00 2.0000000e+00 4.0000000e+00 1.4000000e+00 1.3000000e+00 2.0000000e+00 2.0000000e+00 2.1000000e+00 2.0000000e+00 1.4000000e+00 5.0000000e+00 4.5000000e+00 1.5000000e+00 2.7000000e+00 1.7000000e+00 3.8000000e+00 9.0000000e-01 2.4000000e+00 2.8000000e+00 8.0000000e-01 1.2000000e+00 1.7000000e+00 2.2000000e+00 2.8000000e+00 4.7000000e+00 1.8000000e+00 7.0000000e-01 1.7000000e+00 3.7000000e+00 2.7000000e+00 1.6000000e+00 1.2000000e+00 2.1000000e+00 2.4000000e+00 2.0000000e+00 1.7000000e+00 2.8000000e+00 2.8000000e+00 1.8000000e+00 1.3000000e+00 1.3000000e+00 2.5000000e+00 1.6000000e+00 1.6000000e+00 2.7000000e+00 1.1000000e+00 1.3000000e+00 2.8000000e+00 9.0000000e-01 1.7000000e+00 8.0000000e-01 1.1000000e+00 1.5000000e+00 5.0000000e-01 9.0000000e-01 1.3000000e+00 1.2000000e+00 1.4000000e+00 9.0000000e-01 1.5000000e+00 7.0000000e-01 1.0000000e+00 1.3000000e+00 1.5000000e+00 2.1000000e+00 6.0000000e-01 1.5000000e+00 1.5000000e+00 1.7000000e+00 9.0000000e-01 1.3000000e+00 7.0000000e-01 1.2000000e+00 1.7000000e+00 1.2000000e+00 7.0000000e-01 1.0000000e+00 6.0000000e-01 8.0000000e-01 9.0000000e-01 2.7000000e+00 5.0000000e-01 6.0000000e-01 4.0000000e-01 8.0000000e-01 2.6000000e+00 4.0000000e-01 3.8000000e+00 1.4000000e+00 3.8000000e+00 2.3000000e+00 3.2000000e+00 5.0000000e+00 1.5000000e+00 4.0000000e+00 3.1000000e+00 5.1000000e+00 2.5000000e+00 2.2000000e+00 3.1000000e+00 1.5000000e+00 1.8000000e+00 2.9000000e+00 2.5000000e+00 6.1000000e+00 5.6000000e+00 1.6000000e+00 3.8000000e+00 1.2000000e+00 4.9000000e+00 1.6000000e+00 3.5000000e+00 3.9000000e+00 1.3000000e+00 1.5000000e+00 2.6000000e+00 3.3000000e+00 3.9000000e+00 5.8000000e+00 2.7000000e+00 1.4000000e+00 1.8000000e+00 4.8000000e+00 3.4000000e+00 2.5000000e+00 1.3000000e+00 3.2000000e+00 3.5000000e+00 3.1000000e+00 1.4000000e+00 3.9000000e+00 3.9000000e+00 2.9000000e+00 2.0000000e+00 2.4000000e+00 3.0000000e+00 1.5000000e+00 4.3000000e+00 1.1000000e+00 2.7000000e+00 4.4000000e+00 1.3000000e+00 2.7000000e+00 8.0000000e-01 2.5000000e+00 1.1000000e+00 1.3000000e+00 2.3000000e+00 1.5000000e+00 2.8000000e+00 8.0000000e-01 1.7000000e+00 1.1000000e+00 1.1000000e+00 1.2000000e+00 1.1000000e+00 1.3000000e+00 1.1000000e+00 1.0000000e+00 3.1000000e+00 3.1000000e+00 3.3000000e+00 2.3000000e+00 1.3000000e+00 1.5000000e+00 6.0000000e-01 7.0000000e-01 1.6000000e+00 1.9000000e+00 2.6000000e+00 2.2000000e+00 8.0000000e-01 2.3000000e+00 4.3000000e+00 2.1000000e+00 1.8000000e+00 1.8000000e+00 1.2000000e+00 4.2000000e+00 2.0000000e+00 2.2000000e+00 1.8000000e+00 2.8000000e+00 1.5000000e+00 2.2000000e+00 4.0000000e+00 2.5000000e+00 3.2000000e+00 2.5000000e+00 3.5000000e+00 1.1000000e+00 1.6000000e+00 2.1000000e+00 2.1000000e+00 2.2000000e+00 1.5000000e+00 1.5000000e+00 4.5000000e+00 5.0000000e+00 1.8000000e+00 2.4000000e+00 1.8000000e+00 4.3000000e+00 1.0000000e+00 1.9000000e+00 2.5000000e+00 9.0000000e-01 9.0000000e-01 2.0000000e+00 2.3000000e+00 3.3000000e+00 4.2000000e+00 2.1000000e+00 1.0000000e+00 2.0000000e+00 3.8000000e+00 1.8000000e+00 1.3000000e+00 9.0000000e-01 2.0000000e+00 2.3000000e+00 1.9000000e+00 1.8000000e+00 2.5000000e+00 2.3000000e+00 1.9000000e+00 1.4000000e+00 1.4000000e+00 1.6000000e+00 1.3000000e+00 3.8000000e+00 1.6000000e+00 7.0000000e-01 3.0000000e+00 2.0000000e+00 3.5000000e+00 1.8000000e+00 4.0000000e+00 3.0000000e+00 2.0000000e+00 3.2000000e+00 1.5000000e+00 4.1000000e+00 2.6000000e+00 3.6000000e+00 3.2000000e+00 3.3000000e+00 3.8000000e+00 4.2000000e+00 4.8000000e+00 3.3000000e+00 1.2000000e+00 1.2000000e+00 1.0000000e+00 2.0000000e+00 3.8000000e+00 2.8000000e+00 3.9000000e+00 4.4000000e+00 2.9000000e+00 2.4000000e+00 1.7000000e+00 2.1000000e+00 3.5000000e+00 2.0000000e+00 2.0000000e-01 2.2000000e+00 2.5000000e+00 2.5000000e+00 3.1000000e+00 7.0000000e-01 2.3000000e+00 6.5000000e+00 3.9000000e+00 6.5000000e+00 5.0000000e+00 5.9000000e+00 7.7000000e+00 2.0000000e+00 6.7000000e+00 5.2000000e+00 7.8000000e+00 5.2000000e+00 4.7000000e+00 5.8000000e+00 3.6000000e+00 4.5000000e+00 5.6000000e+00 5.2000000e+00 8.8000000e+00 7.9000000e+00 3.5000000e+00 6.5000000e+00 3.7000000e+00 7.6000000e+00 4.1000000e+00 6.2000000e+00 6.6000000e+00 4.0000000e+00 4.2000000e+00 5.3000000e+00 6.0000000e+00 6.6000000e+00 8.5000000e+00 5.4000000e+00 4.1000000e+00 4.1000000e+00 7.5000000e+00 6.1000000e+00 5.2000000e+00 4.0000000e+00 5.9000000e+00 6.2000000e+00 5.8000000e+00 3.9000000e+00 6.6000000e+00 6.6000000e+00 5.6000000e+00 4.1000000e+00 5.1000000e+00 5.7000000e+00 4.2000000e+00 2.4000000e+00 3.9000000e+00 1.4000000e+00 2.2000000e+00 7.0000000e-01 2.0000000e+00 6.0000000e-01 1.4000000e+00 1.8000000e+00 1.4000000e+00 2.3000000e+00 1.7000000e+00 1.2000000e+00 1.2000000e+00 8.0000000e-01 5.0000000e-01 4.0000000e-01 6.0000000e-01 1.0000000e+00 9.0000000e-01 2.6000000e+00 2.6000000e+00 2.8000000e+00 1.8000000e+00 1.6000000e+00 1.6000000e+00 1.5000000e+00 6.0000000e-01 1.1000000e+00 1.6000000e+00 2.1000000e+00 1.7000000e+00 7.0000000e-01 1.8000000e+00 3.8000000e+00 1.6000000e+00 1.5000000e+00 1.3000000e+00 7.0000000e-01 3.7000000e+00 1.5000000e+00 3.3000000e+00 2.1000000e+00 2.7000000e+00 1.8000000e+00 2.3000000e+00 3.9000000e+00 2.6000000e+00 2.9000000e+00 2.2000000e+00 4.0000000e+00 1.6000000e+00 1.7000000e+00 2.0000000e+00 2.4000000e+00 2.5000000e+00 2.2000000e+00 1.6000000e+00 5.0000000e+00 4.7000000e+00 1.9000000e+00 2.7000000e+00 2.1000000e+00 4.0000000e+00 1.3000000e+00 2.4000000e+00 2.8000000e+00 1.2000000e+00 1.4000000e+00 2.1000000e+00 2.2000000e+00 3.0000000e+00 4.7000000e+00 2.2000000e+00 1.1000000e+00 1.9000000e+00 3.7000000e+00 2.9000000e+00 1.8000000e+00 1.4000000e+00 2.1000000e+00 2.4000000e+00 2.0000000e+00 2.1000000e+00 2.8000000e+00 2.8000000e+00 1.8000000e+00 1.7000000e+00 1.5000000e+00 2.7000000e+00 1.8000000e+00 1.7000000e+00 1.4000000e+00 1.8000000e+00 1.9000000e+00 1.0000000e+00 2.4000000e+00 1.4000000e+00 1.2000000e+00 2.2000000e+00 9.0000000e-01 2.5000000e+00 1.2000000e+00 2.4000000e+00 2.0000000e+00 1.9000000e+00 2.2000000e+00 2.6000000e+00 3.2000000e+00 1.7000000e+00 1.4000000e+00 1.0000000e+00 1.2000000e+00 8.0000000e-01 2.2000000e+00 1.2000000e+00 2.3000000e+00 2.8000000e+00 2.1000000e+00 1.0000000e+00 7.0000000e-01 1.1000000e+00 1.9000000e+00 1.0000000e+00 1.6000000e+00 8.0000000e-01 1.3000000e+00 1.1000000e+00 1.7000000e+00 1.5000000e+00 9.0000000e-01 4.9000000e+00 2.3000000e+00 4.9000000e+00 3.4000000e+00 4.3000000e+00 6.1000000e+00 1.4000000e+00 5.1000000e+00 4.0000000e+00 6.2000000e+00 3.6000000e+00 3.1000000e+00 4.2000000e+00 2.4000000e+00 2.9000000e+00 4.0000000e+00 3.6000000e+00 7.2000000e+00 6.5000000e+00 2.5000000e+00 4.9000000e+00 2.1000000e+00 6.0000000e+00 2.5000000e+00 4.6000000e+00 5.0000000e+00 2.4000000e+00 2.6000000e+00 3.7000000e+00 4.4000000e+00 5.0000000e+00 6.9000000e+00 3.8000000e+00 2.5000000e+00 2.7000000e+00 5.9000000e+00 4.5000000e+00 3.6000000e+00 2.4000000e+00 4.3000000e+00 4.6000000e+00 4.2000000e+00 2.3000000e+00 5.0000000e+00 5.0000000e+00 4.0000000e+00 2.9000000e+00 3.5000000e+00 4.1000000e+00 2.6000000e+00 3.1000000e+00 1.7000000e+00 3.6000000e+00 1.9000000e+00 4.1000000e+00 3.1000000e+00 2.1000000e+00 2.9000000e+00 1.6000000e+00 4.2000000e+00 2.7000000e+00 3.7000000e+00 3.3000000e+00 3.4000000e+00 3.9000000e+00 4.3000000e+00 4.9000000e+00 3.4000000e+00 1.3000000e+00 1.3000000e+00 1.1000000e+00 2.1000000e+00 3.9000000e+00 2.9000000e+00 4.0000000e+00 4.5000000e+00 2.8000000e+00 2.5000000e+00 1.8000000e+00 2.2000000e+00 3.6000000e+00 2.1000000e+00 5.0000000e-01 2.3000000e+00 2.6000000e+00 2.6000000e+00 3.2000000e+00 1.2000000e+00 2.4000000e+00 6.6000000e+00 4.0000000e+00 6.6000000e+00 5.1000000e+00 6.0000000e+00 7.8000000e+00 2.3000000e+00 6.8000000e+00 5.3000000e+00 7.9000000e+00 5.3000000e+00 4.8000000e+00 5.9000000e+00 3.7000000e+00 4.6000000e+00 5.7000000e+00 5.3000000e+00 8.9000000e+00 8.0000000e+00 3.2000000e+00 6.6000000e+00 3.8000000e+00 7.7000000e+00 4.2000000e+00 6.3000000e+00 6.7000000e+00 4.1000000e+00 4.3000000e+00 5.4000000e+00 6.1000000e+00 6.7000000e+00 8.6000000e+00 5.5000000e+00 4.2000000e+00 4.2000000e+00 7.6000000e+00 6.2000000e+00 5.3000000e+00 4.1000000e+00 6.0000000e+00 6.3000000e+00 5.9000000e+00 4.0000000e+00 6.7000000e+00 6.7000000e+00 5.7000000e+00 4.2000000e+00 5.2000000e+00 5.8000000e+00 4.3000000e+00 1.6000000e+00 9.0000000e-01 1.2000000e+00 1.2000000e+00 6.0000000e-01 1.0000000e+00 1.4000000e+00 1.5000000e+00 1.1000000e+00 8.0000000e-01 1.6000000e+00 1.2000000e+00 9.0000000e-01 1.0000000e+00 1.8000000e+00 1.8000000e+00 5.0000000e-01 1.8000000e+00 1.8000000e+00 2.0000000e+00 1.0000000e+00 1.4000000e+00 8.0000000e-01 9.0000000e-01 1.4000000e+00 1.5000000e+00 6.0000000e-01 1.3000000e+00 1.3000000e+00 7.0000000e-01 1.0000000e+00 3.0000000e+00 8.0000000e-01 5.0000000e-01 5.0000000e-01 7.0000000e-01 2.9000000e+00 7.0000000e-01 3.5000000e+00 1.7000000e+00 3.5000000e+00 2.2000000e+00 2.9000000e+00 4.7000000e+00 2.0000000e+00 3.9000000e+00 3.2000000e+00 4.8000000e+00 2.2000000e+00 2.3000000e+00 2.8000000e+00 2.0000000e+00 2.1000000e+00 2.6000000e+00 2.2000000e+00 5.8000000e+00 5.7000000e+00 1.7000000e+00 3.5000000e+00 1.7000000e+00 5.0000000e+00 1.7000000e+00 3.2000000e+00 3.6000000e+00 1.4000000e+00 1.2000000e+00 2.7000000e+00 3.0000000e+00 4.0000000e+00 5.5000000e+00 2.8000000e+00 1.5000000e+00 2.1000000e+00 4.5000000e+00 3.1000000e+00 2.2000000e+00 1.0000000e+00 2.9000000e+00 3.2000000e+00 2.8000000e+00 1.7000000e+00 3.6000000e+00 3.6000000e+00 2.6000000e+00 2.1000000e+00 2.1000000e+00 2.7000000e+00 1.2000000e+00 1.9000000e+00 1.8000000e+00 2.4000000e+00 2.2000000e+00 8.0000000e-01 1.2000000e+00 9.0000000e-01 2.7000000e+00 1.0000000e+00 2.0000000e+00 1.6000000e+00 1.7000000e+00 2.2000000e+00 2.6000000e+00 3.2000000e+00 1.7000000e+00 1.2000000e+00 1.0000000e+00 1.0000000e+00 1.0000000e+00 2.2000000e+00 2.4000000e+00 2.3000000e+00 2.8000000e+00 1.1000000e+00 1.6000000e+00 1.1000000e+00 1.5000000e+00 1.9000000e+00 8.0000000e-01 1.8000000e+00 1.4000000e+00 1.5000000e+00 1.5000000e+00 1.5000000e+00 2.3000000e+00 1.3000000e+00 4.9000000e+00 2.7000000e+00 4.9000000e+00 3.4000000e+00 4.3000000e+00 6.1000000e+00 2.6000000e+00 5.1000000e+00 3.6000000e+00 6.2000000e+00 3.6000000e+00 3.1000000e+00 4.2000000e+00 2.6000000e+00 3.3000000e+00 4.0000000e+00 3.6000000e+00 7.2000000e+00 6.3000000e+00 1.5000000e+00 4.9000000e+00 2.9000000e+00 6.0000000e+00 2.5000000e+00 4.6000000e+00 5.0000000e+00 2.4000000e+00 2.6000000e+00 3.7000000e+00 4.4000000e+00 5.0000000e+00 6.9000000e+00 3.8000000e+00 2.5000000e+00 2.5000000e+00 5.9000000e+00 4.5000000e+00 3.6000000e+00 2.4000000e+00 4.3000000e+00 4.6000000e+00 4.2000000e+00 2.7000000e+00 5.0000000e+00 5.0000000e+00 4.0000000e+00 2.5000000e+00 3.5000000e+00 4.1000000e+00 2.8000000e+00 1.7000000e+00 1.1000000e+00 9.0000000e-01 1.5000000e+00 1.1000000e+00 2.0000000e+00 1.0000000e+00 9.0000000e-01 9.0000000e-01 3.0000000e-01 8.0000000e-01 9.0000000e-01 9.0000000e-01 1.3000000e+00 4.0000000e-01 2.3000000e+00 2.3000000e+00 2.5000000e+00 1.5000000e+00 9.0000000e-01 1.1000000e+00 1.0000000e+00 9.0000000e-01 1.2000000e+00 1.3000000e+00 1.8000000e+00 1.4000000e+00 2.0000000e-01 1.5000000e+00 3.5000000e+00 1.3000000e+00 1.2000000e+00 1.0000000e+00 6.0000000e-01 3.4000000e+00 1.2000000e+00 3.0000000e+00 1.4000000e+00 3.0000000e+00 1.5000000e+00 2.4000000e+00 4.2000000e+00 2.1000000e+00 3.2000000e+00 2.5000000e+00 4.3000000e+00 1.7000000e+00 1.6000000e+00 2.3000000e+00 1.7000000e+00 1.8000000e+00 2.1000000e+00 1.7000000e+00 5.3000000e+00 5.0000000e+00 1.2000000e+00 3.0000000e+00 1.4000000e+00 4.3000000e+00 1.0000000e+00 2.7000000e+00 3.1000000e+00 7.0000000e-01 7.0000000e-01 2.0000000e+00 2.5000000e+00 3.3000000e+00 5.0000000e+00 2.1000000e+00 8.0000000e-01 1.2000000e+00 4.0000000e+00 2.6000000e+00 1.7000000e+00 7.0000000e-01 2.4000000e+00 2.7000000e+00 2.3000000e+00 1.4000000e+00 3.1000000e+00 3.1000000e+00 2.1000000e+00 1.4000000e+00 1.6000000e+00 2.2000000e+00 1.1000000e+00 2.2000000e+00 1.2000000e+00 1.2000000e+00 2.4000000e+00 9.0000000e-01 2.3000000e+00 1.0000000e+00 2.6000000e+00 1.8000000e+00 1.5000000e+00 2.0000000e+00 2.6000000e+00 3.0000000e+00 1.5000000e+00 8.0000000e-01 1.0000000e+00 1.0000000e+00 8.0000000e-01 2.4000000e+00 1.4000000e+00 2.1000000e+00 2.6000000e+00 2.1000000e+00 6.0000000e-01 9.0000000e-01 1.3000000e+00 1.7000000e+00 1.0000000e+00 1.8000000e+00 8.0000000e-01 9.0000000e-01 7.0000000e-01 1.3000000e+00 1.7000000e+00 7.0000000e-01 4.7000000e+00 2.5000000e+00 4.7000000e+00 3.2000000e+00 4.1000000e+00 5.9000000e+00 2.4000000e+00 4.9000000e+00 4.2000000e+00 6.0000000e+00 3.4000000e+00 3.3000000e+00 4.0000000e+00 2.6000000e+00 2.9000000e+00 3.8000000e+00 3.4000000e+00 7.0000000e+00 6.7000000e+00 2.7000000e+00 4.7000000e+00 2.1000000e+00 6.0000000e+00 2.7000000e+00 4.4000000e+00 4.8000000e+00 2.4000000e+00 2.4000000e+00 3.7000000e+00 4.2000000e+00 5.0000000e+00 6.7000000e+00 3.8000000e+00 2.5000000e+00 2.9000000e+00 5.7000000e+00 4.3000000e+00 3.4000000e+00 2.2000000e+00 4.1000000e+00 4.4000000e+00 4.0000000e+00 2.5000000e+00 4.8000000e+00 4.8000000e+00 3.8000000e+00 3.1000000e+00 3.3000000e+00 3.9000000e+00 2.4000000e+00 1.4000000e+00 2.0000000e+00 1.6000000e+00 2.5000000e+00 1.7000000e+00 1.4000000e+00 1.6000000e+00 1.4000000e+00 7.0000000e-01 2.0000000e-01 8.0000000e-01 1.0000000e+00 1.1000000e+00 2.8000000e+00 2.8000000e+00 3.0000000e+00 2.0000000e+00 2.0000000e+00 1.6000000e+00 1.3000000e+00 4.0000000e-01 1.3000000e+00 1.6000000e+00 2.3000000e+00 1.9000000e+00 9.0000000e-01 2.0000000e+00 4.0000000e+00 1.8000000e+00 1.5000000e+00 1.5000000e+00 9.0000000e-01 3.9000000e+00 1.7000000e+00 3.3000000e+00 2.5000000e+00 2.7000000e+00 2.2000000e+00 2.5000000e+00 3.9000000e+00 2.8000000e+00 3.1000000e+00 2.4000000e+00 3.8000000e+00 1.6000000e+00 2.1000000e+00 2.0000000e+00 2.8000000e+00 2.9000000e+00 2.2000000e+00 1.8000000e+00 4.8000000e+00 4.9000000e+00 2.3000000e+00 2.5000000e+00 2.5000000e+00 4.2000000e+00 1.7000000e+00 2.2000000e+00 2.6000000e+00 1.6000000e+00 1.6000000e+00 2.5000000e+00 2.2000000e+00 3.2000000e+00 4.5000000e+00 2.6000000e+00 1.5000000e+00 2.3000000e+00 3.7000000e+00 2.9000000e+00 1.8000000e+00 1.6000000e+00 1.9000000e+00 2.2000000e+00 1.8000000e+00 2.5000000e+00 2.6000000e+00 2.6000000e+00 1.8000000e+00 2.1000000e+00 1.7000000e+00 2.7000000e+00 2.0000000e+00 1.4000000e+00 1.4000000e+00 1.5000000e+00 1.1000000e+00 1.4000000e+00 1.6000000e+00 1.2000000e+00 1.3000000e+00 1.2000000e+00 1.8000000e+00 1.8000000e+00 5.0000000e-01 2.0000000e+00 1.8000000e+00 2.0000000e+00 1.4000000e+00 1.4000000e+00 2.0000000e-01 9.0000000e-01 1.4000000e+00 1.7000000e+00 6.0000000e-01 1.3000000e+00 9.0000000e-01 7.0000000e-01 1.4000000e+00 3.0000000e+00 8.0000000e-01 7.0000000e-01 7.0000000e-01 1.1000000e+00 2.9000000e+00 9.0000000e-01 3.5000000e+00 1.5000000e+00 3.5000000e+00 2.2000000e+00 2.9000000e+00 4.7000000e+00 1.4000000e+00 3.9000000e+00 3.2000000e+00 4.8000000e+00 2.2000000e+00 2.3000000e+00 2.8000000e+00 1.6000000e+00 1.9000000e+00 2.6000000e+00 2.2000000e+00 5.8000000e+00 5.7000000e+00 1.7000000e+00 3.5000000e+00 1.1000000e+00 5.0000000e+00 1.7000000e+00 3.2000000e+00 3.6000000e+00 1.4000000e+00 1.2000000e+00 2.7000000e+00 3.0000000e+00 4.0000000e+00 5.5000000e+00 2.8000000e+00 1.5000000e+00 2.1000000e+00 4.5000000e+00 3.1000000e+00 2.2000000e+00 1.0000000e+00 2.9000000e+00 3.2000000e+00 2.8000000e+00 1.5000000e+00 3.6000000e+00 3.6000000e+00 2.6000000e+00 2.1000000e+00 2.1000000e+00 2.7000000e+00 1.2000000e+00 1.8000000e+00 7.0000000e-01 2.1000000e+00 8.0000000e-01 2.0000000e+00 1.2000000e+00 1.3000000e+00 1.8000000e+00 2.2000000e+00 2.8000000e+00 1.3000000e+00 8.0000000e-01 1.0000000e+00 1.0000000e+00 4.0000000e-01 1.8000000e+00 1.6000000e+00 1.9000000e+00 2.4000000e+00 1.5000000e+00 8.0000000e-01 9.0000000e-01 9.0000000e-01 1.5000000e+00 4.0000000e-01 2.0000000e+00 6.0000000e-01 7.0000000e-01 7.0000000e-01 1.1000000e+00 2.1000000e+00 5.0000000e-01 4.5000000e+00 1.9000000e+00 4.5000000e+00 3.0000000e+00 3.9000000e+00 5.7000000e+00 2.2000000e+00 4.7000000e+00 3.6000000e+00 5.8000000e+00 3.2000000e+00 2.7000000e+00 3.8000000e+00 2.2000000e+00 2.5000000e+00 3.6000000e+00 3.2000000e+00 6.8000000e+00 6.1000000e+00 2.1000000e+00 4.5000000e+00 2.1000000e+00 5.6000000e+00 2.1000000e+00 4.2000000e+00 4.6000000e+00 2.0000000e+00 2.2000000e+00 3.3000000e+00 4.0000000e+00 4.6000000e+00 6.5000000e+00 3.4000000e+00 2.1000000e+00 2.3000000e+00 5.5000000e+00 4.1000000e+00 3.2000000e+00 2.0000000e+00 3.9000000e+00 4.2000000e+00 3.8000000e+00 1.9000000e+00 4.6000000e+00 4.6000000e+00 3.6000000e+00 2.5000000e+00 3.1000000e+00 3.7000000e+00 2.2000000e+00 1.9000000e+00 1.9000000e+00 1.4000000e+00 8.0000000e-01 1.2000000e+00 1.3000000e+00 1.4000000e+00 1.6000000e+00 2.0000000e+00 9.0000000e-01 2.4000000e+00 2.0000000e+00 2.2000000e+00 1.8000000e+00 1.4000000e+00 1.6000000e+00 1.5000000e+00 1.6000000e+00 5.0000000e-01 2.0000000e+00 1.7000000e+00 1.5000000e+00 1.1000000e+00 1.6000000e+00 3.0000000e+00 1.6000000e+00 1.9000000e+00 1.7000000e+00 1.1000000e+00 3.3000000e+00 1.7000000e+00 3.7000000e+00 1.9000000e+00 3.7000000e+00 2.2000000e+00 3.1000000e+00 4.9000000e+00 1.8000000e+00 3.9000000e+00 2.4000000e+00 5.0000000e+00 2.4000000e+00 1.9000000e+00 3.0000000e+00 1.8000000e+00 2.5000000e+00 2.8000000e+00 2.4000000e+00 6.0000000e+00 5.1000000e+00 7.0000000e-01 3.7000000e+00 2.1000000e+00 4.8000000e+00 1.3000000e+00 3.4000000e+00 3.8000000e+00 1.2000000e+00 1.6000000e+00 2.5000000e+00 3.2000000e+00 3.8000000e+00 5.7000000e+00 2.6000000e+00 1.3000000e+00 1.7000000e+00 4.7000000e+00 3.3000000e+00 2.4000000e+00 1.6000000e+00 3.1000000e+00 3.4000000e+00 3.0000000e+00 1.9000000e+00 3.8000000e+00 3.8000000e+00 2.8000000e+00 1.3000000e+00 2.3000000e+00 2.9000000e+00 2.0000000e+00 2.6000000e+00 1.1000000e+00 2.1000000e+00 1.7000000e+00 1.8000000e+00 2.3000000e+00 2.7000000e+00 3.3000000e+00 1.8000000e+00 7.0000000e-01 3.0000000e-01 5.0000000e-01 5.0000000e-01 2.3000000e+00 1.7000000e+00 2.4000000e+00 2.9000000e+00 1.6000000e+00 9.0000000e-01 4.0000000e-01 8.0000000e-01 2.0000000e+00 5.0000000e-01 1.5000000e+00 7.0000000e-01 1.0000000e+00 1.0000000e+00 1.6000000e+00 1.4000000e+00 8.0000000e-01 5.0000000e+00 2.4000000e+00 5.0000000e+00 3.5000000e+00 4.4000000e+00 6.2000000e+00 1.9000000e+00 5.2000000e+00 3.7000000e+00 6.3000000e+00 3.7000000e+00 3.2000000e+00 4.3000000e+00 2.1000000e+00 3.0000000e+00 4.1000000e+00 3.7000000e+00 7.3000000e+00 6.4000000e+00 2.2000000e+00 5.0000000e+00 2.2000000e+00 6.1000000e+00 2.6000000e+00 4.7000000e+00 5.1000000e+00 2.5000000e+00 2.7000000e+00 3.8000000e+00 4.5000000e+00 5.1000000e+00 7.0000000e+00 3.9000000e+00 2.6000000e+00 2.6000000e+00 6.0000000e+00 4.6000000e+00 3.7000000e+00 2.5000000e+00 4.4000000e+00 4.7000000e+00 4.3000000e+00 2.4000000e+00 5.1000000e+00 5.1000000e+00 4.1000000e+00 2.6000000e+00 3.6000000e+00 4.2000000e+00 2.7000000e+00 1.9000000e+00 1.5000000e+00 1.3000000e+00 1.8000000e+00 1.7000000e+00 1.7000000e+00 1.3000000e+00 1.0000000e+00 2.9000000e+00 2.9000000e+00 3.1000000e+00 2.1000000e+00 1.1000000e+00 1.3000000e+00 8.0000000e-01 1.3000000e+00 2.2000000e+00 1.7000000e+00 2.4000000e+00 2.0000000e+00 1.0000000e+00 2.1000000e+00 4.1000000e+00 1.9000000e+00 1.6000000e+00 1.6000000e+00 1.6000000e+00 4.0000000e+00 1.8000000e+00 2.4000000e+00 1.0000000e+00 2.8000000e+00 1.5000000e+00 2.2000000e+00 4.0000000e+00 2.1000000e+00 3.2000000e+00 2.5000000e+00 3.7000000e+00 1.1000000e+00 1.6000000e+00 2.1000000e+00 1.3000000e+00 1.4000000e+00 1.5000000e+00 1.5000000e+00 4.7000000e+00 5.0000000e+00 1.6000000e+00 2.4000000e+00 1.0000000e+00 4.3000000e+00 1.0000000e+00 2.1000000e+00 2.5000000e+00 7.0000000e-01 5.0000000e-01 2.0000000e+00 2.7000000e+00 3.3000000e+00 4.4000000e+00 2.1000000e+00 1.4000000e+00 2.0000000e+00 3.8000000e+00 2.0000000e+00 1.3000000e+00 3.0000000e-01 2.0000000e+00 2.3000000e+00 1.9000000e+00 1.0000000e+00 2.5000000e+00 2.5000000e+00 1.9000000e+00 1.4000000e+00 1.4000000e+00 1.6000000e+00 5.0000000e-01 1.6000000e+00 8.0000000e-01 7.0000000e-01 1.2000000e+00 1.6000000e+00 2.2000000e+00 9.0000000e-01 1.4000000e+00 1.4000000e+00 1.6000000e+00 6.0000000e-01 1.6000000e+00 1.6000000e+00 1.5000000e+00 1.8000000e+00 1.1000000e+00 8.0000000e-01 9.0000000e-01 1.3000000e+00 9.0000000e-01 6.0000000e-01 2.6000000e+00 8.0000000e-01 9.0000000e-01 7.0000000e-01 5.0000000e-01 2.5000000e+00 5.0000000e-01 3.9000000e+00 2.1000000e+00 3.9000000e+00 2.4000000e+00 3.3000000e+00 5.1000000e+00 2.4000000e+00 4.1000000e+00 3.2000000e+00 5.2000000e+00 2.6000000e+00 2.3000000e+00 3.2000000e+00 2.4000000e+00 2.5000000e+00 3.0000000e+00 2.6000000e+00 6.2000000e+00 5.7000000e+00 1.9000000e+00 3.9000000e+00 2.1000000e+00 5.0000000e+00 1.7000000e+00 3.6000000e+00 4.0000000e+00 1.4000000e+00 1.6000000e+00 2.7000000e+00 3.4000000e+00 4.0000000e+00 5.9000000e+00 2.8000000e+00 1.5000000e+00 1.9000000e+00 4.9000000e+00 3.5000000e+00 2.6000000e+00 1.6000000e+00 3.3000000e+00 3.6000000e+00 3.2000000e+00 2.1000000e+00 4.0000000e+00 4.0000000e+00 3.0000000e+00 2.1000000e+00 2.5000000e+00 3.1000000e+00 2.0000000e+00 1.0000000e+00 1.3000000e+00 1.4000000e+00 1.0000000e+00 1.2000000e+00 1.1000000e+00 2.6000000e+00 2.4000000e+00 2.6000000e+00 2.0000000e+00 8.0000000e-01 1.8000000e+00 1.7000000e+00 1.2000000e+00 9.0000000e-01 2.2000000e+00 1.9000000e+00 1.7000000e+00 1.1000000e+00 1.8000000e+00 3.6000000e+00 1.8000000e+00 2.1000000e+00 1.9000000e+00 1.3000000e+00 3.5000000e+00 1.9000000e+00 2.9000000e+00 1.3000000e+00 2.9000000e+00 1.4000000e+00 2.3000000e+00 4.1000000e+00 2.0000000e+00 3.1000000e+00 1.6000000e+00 4.2000000e+00 1.6000000e+00 1.1000000e+00 2.2000000e+00 1.2000000e+00 1.9000000e+00 2.0000000e+00 1.6000000e+00 5.2000000e+00 4.3000000e+00 7.0000000e-01 2.9000000e+00 1.5000000e+00 4.0000000e+00 5.0000000e-01 2.6000000e+00 3.0000000e+00 8.0000000e-01 1.0000000e+00 1.7000000e+00 2.4000000e+00 3.0000000e+00 4.9000000e+00 1.8000000e+00 5.0000000e-01 1.1000000e+00 3.9000000e+00 2.5000000e+00 1.6000000e+00 1.2000000e+00 2.3000000e+00 2.6000000e+00 2.2000000e+00 1.3000000e+00 3.0000000e+00 3.0000000e+00 2.0000000e+00 5.0000000e-01 1.5000000e+00 2.3000000e+00 1.4000000e+00 9.0000000e-01 1.2000000e+00 1.0000000e+00 1.6000000e+00 7.0000000e-01 2.0000000e+00 2.0000000e+00 2.2000000e+00 1.2000000e+00 1.0000000e+00 1.4000000e+00 1.3000000e+00 1.2000000e+00 1.1000000e+00 1.4000000e+00 1.7000000e+00 1.1000000e+00 5.0000000e-01 1.2000000e+00 3.2000000e+00 1.2000000e+00 1.1000000e+00 1.1000000e+00 7.0000000e-01 3.1000000e+00 1.1000000e+00 3.3000000e+00 1.5000000e+00 3.3000000e+00 1.8000000e+00 2.7000000e+00 4.5000000e+00 2.2000000e+00 3.5000000e+00 2.6000000e+00 4.6000000e+00 2.0000000e+00 1.7000000e+00 2.6000000e+00 1.8000000e+00 1.9000000e+00 2.4000000e+00 2.0000000e+00 5.6000000e+00 5.1000000e+00 1.3000000e+00 3.3000000e+00 1.5000000e+00 4.4000000e+00 1.1000000e+00 3.0000000e+00 3.4000000e+00 8.0000000e-01 1.0000000e+00 2.1000000e+00 2.8000000e+00 3.4000000e+00 5.3000000e+00 2.2000000e+00 9.0000000e-01 1.3000000e+00 4.3000000e+00 2.9000000e+00 2.0000000e+00 1.0000000e+00 2.7000000e+00 3.0000000e+00 2.6000000e+00 1.5000000e+00 3.4000000e+00 3.4000000e+00 2.4000000e+00 1.5000000e+00 1.9000000e+00 2.5000000e+00 1.4000000e+00 5.0000000e-01 1.1000000e+00 1.5000000e+00 8.0000000e-01 2.1000000e+00 2.1000000e+00 2.3000000e+00 1.3000000e+00 1.7000000e+00 1.5000000e+00 1.4000000e+00 1.1000000e+00 8.0000000e-01 1.1000000e+00 1.6000000e+00 1.4000000e+00 8.0000000e-01 1.3000000e+00 3.3000000e+00 1.1000000e+00 1.0000000e+00 8.0000000e-01 2.0000000e-01 3.2000000e+00 1.0000000e+00 3.4000000e+00 2.2000000e+00 3.2000000e+00 1.9000000e+00 2.6000000e+00 4.4000000e+00 2.5000000e+00 3.4000000e+00 2.7000000e+00 4.5000000e+00 1.9000000e+00 1.8000000e+00 2.5000000e+00 2.5000000e+00 2.6000000e+00 2.3000000e+00 1.9000000e+00 5.5000000e+00 5.2000000e+00 2.0000000e+00 3.2000000e+00 2.2000000e+00 4.5000000e+00 1.4000000e+00 2.9000000e+00 3.3000000e+00 1.3000000e+00 1.5000000e+00 2.2000000e+00 2.7000000e+00 3.5000000e+00 5.2000000e+00 2.3000000e+00 1.2000000e+00 2.0000000e+00 4.2000000e+00 3.0000000e+00 1.9000000e+00 1.5000000e+00 2.6000000e+00 2.9000000e+00 2.5000000e+00 2.2000000e+00 3.3000000e+00 3.3000000e+00 2.3000000e+00 1.8000000e+00 1.8000000e+00 2.8000000e+00 1.9000000e+00 8.0000000e-01 1.0000000e+00 9.0000000e-01 2.6000000e+00 2.6000000e+00 2.8000000e+00 1.8000000e+00 1.8000000e+00 1.4000000e+00 1.3000000e+00 6.0000000e-01 1.1000000e+00 1.4000000e+00 2.1000000e+00 1.7000000e+00 7.0000000e-01 1.8000000e+00 3.8000000e+00 1.6000000e+00 1.3000000e+00 1.3000000e+00 7.0000000e-01 3.7000000e+00 1.5000000e+00 3.3000000e+00 2.3000000e+00 2.7000000e+00 2.0000000e+00 2.3000000e+00 3.9000000e+00 2.6000000e+00 3.1000000e+00 2.4000000e+00 4.0000000e+00 1.6000000e+00 1.9000000e+00 2.0000000e+00 2.6000000e+00 2.7000000e+00 2.2000000e+00 1.6000000e+00 5.0000000e+00 4.9000000e+00 2.1000000e+00 2.7000000e+00 2.3000000e+00 4.2000000e+00 1.5000000e+00 2.4000000e+00 2.8000000e+00 1.4000000e+00 1.4000000e+00 2.3000000e+00 2.2000000e+00 3.2000000e+00 4.7000000e+00 2.4000000e+00 1.3000000e+00 2.1000000e+00 3.7000000e+00 2.9000000e+00 1.8000000e+00 1.4000000e+00 2.1000000e+00 2.4000000e+00 2.0000000e+00 2.3000000e+00 2.8000000e+00 2.8000000e+00 1.8000000e+00 1.9000000e+00 1.5000000e+00 2.7000000e+00 1.8000000e+00 8.0000000e-01 1.3000000e+00 3.0000000e+00 3.0000000e+00 3.2000000e+00 2.2000000e+00 1.4000000e+00 2.0000000e+00 1.9000000e+00 6.0000000e-01 1.5000000e+00 2.2000000e+00 2.5000000e+00 2.1000000e+00 1.1000000e+00 2.2000000e+00 4.2000000e+00 2.0000000e+00 2.1000000e+00 1.9000000e+00 1.3000000e+00 4.1000000e+00 1.9000000e+00 3.3000000e+00 1.9000000e+00 2.3000000e+00 1.8000000e+00 2.3000000e+00 3.5000000e+00 2.8000000e+00 2.5000000e+00 1.8000000e+00 3.6000000e+00 1.6000000e+00 1.5000000e+00 1.6000000e+00 2.2000000e+00 2.3000000e+00 2.2000000e+00 1.6000000e+00 4.6000000e+00 4.1000000e+00 1.7000000e+00 2.3000000e+00 1.9000000e+00 3.4000000e+00 1.1000000e+00 2.2000000e+00 2.4000000e+00 1.0000000e+00 1.4000000e+00 1.9000000e+00 1.8000000e+00 2.4000000e+00 4.3000000e+00 2.0000000e+00 9.0000000e-01 1.7000000e+00 3.3000000e+00 2.9000000e+00 1.8000000e+00 1.4000000e+00 1.7000000e+00 2.2000000e+00 1.6000000e+00 1.9000000e+00 2.4000000e+00 2.6000000e+00 1.6000000e+00 1.5000000e+00 1.5000000e+00 2.7000000e+00 1.8000000e+00 1.5000000e+00 3.6000000e+00 3.6000000e+00 3.8000000e+00 2.8000000e+00 1.2000000e+00 2.0000000e+00 1.7000000e+00 6.0000000e-01 2.1000000e+00 2.4000000e+00 3.1000000e+00 2.7000000e+00 1.3000000e+00 2.8000000e+00 4.8000000e+00 2.6000000e+00 2.3000000e+00 2.3000000e+00 1.7000000e+00 4.7000000e+00 2.5000000e+00 2.5000000e+00 1.5000000e+00 1.7000000e+00 1.2000000e+00 1.5000000e+00 2.9000000e+00 2.8000000e+00 2.1000000e+00 1.4000000e+00 3.0000000e+00 8.0000000e-01 1.1000000e+00 1.0000000e+00 1.8000000e+00 1.9000000e+00 1.4000000e+00 8.0000000e-01 4.0000000e+00 3.9000000e+00 1.7000000e+00 1.7000000e+00 1.7000000e+00 3.2000000e+00 9.0000000e-01 1.4000000e+00 1.8000000e+00 1.0000000e+00 8.0000000e-01 1.5000000e+00 1.4000000e+00 2.2000000e+00 3.7000000e+00 1.6000000e+00 9.0000000e-01 1.9000000e+00 2.7000000e+00 2.1000000e+00 1.0000000e+00 1.0000000e+00 1.1000000e+00 1.4000000e+00 1.0000000e+00 1.5000000e+00 1.8000000e+00 1.8000000e+00 8.0000000e-01 1.1000000e+00 7.0000000e-01 1.9000000e+00 1.0000000e+00 2.1000000e+00 2.1000000e+00 2.3000000e+00 1.3000000e+00 9.0000000e-01 7.0000000e-01 6.0000000e-01 1.1000000e+00 1.2000000e+00 1.1000000e+00 1.6000000e+00 1.2000000e+00 4.0000000e-01 1.3000000e+00 3.3000000e+00 1.1000000e+00 1.0000000e+00 8.0000000e-01 6.0000000e-01 3.2000000e+00 1.0000000e+00 3.2000000e+00 1.4000000e+00 3.2000000e+00 1.7000000e+00 2.6000000e+00 4.4000000e+00 1.7000000e+00 3.4000000e+00 2.7000000e+00 4.5000000e+00 1.9000000e+00 1.8000000e+00 2.5000000e+00 1.7000000e+00 1.8000000e+00 2.3000000e+00 1.9000000e+00 5.5000000e+00 5.2000000e+00 1.2000000e+00 3.2000000e+00 1.4000000e+00 4.5000000e+00 1.2000000e+00 2.9000000e+00 3.3000000e+00 9.0000000e-01 9.0000000e-01 2.2000000e+00 2.7000000e+00 3.5000000e+00 5.2000000e+00 2.3000000e+00 1.0000000e+00 1.6000000e+00 4.2000000e+00 2.8000000e+00 1.9000000e+00 7.0000000e-01 2.6000000e+00 2.9000000e+00 2.5000000e+00 1.4000000e+00 3.3000000e+00 3.3000000e+00 2.3000000e+00 1.6000000e+00 1.8000000e+00 2.4000000e+00 1.1000000e+00 8.0000000e-01 6.0000000e-01 8.0000000e-01 2.6000000e+00 2.2000000e+00 2.7000000e+00 3.2000000e+00 2.1000000e+00 1.4000000e+00 1.1000000e+00 1.3000000e+00 2.3000000e+00 8.0000000e-01 1.2000000e+00 1.2000000e+00 1.3000000e+00 1.3000000e+00 1.9000000e+00 1.3000000e+00 1.1000000e+00 5.3000000e+00 2.7000000e+00 5.3000000e+00 3.8000000e+00 4.7000000e+00 6.5000000e+00 2.6000000e+00 5.5000000e+00 4.2000000e+00 6.6000000e+00 4.0000000e+00 3.5000000e+00 4.6000000e+00 2.6000000e+00 3.3000000e+00 4.4000000e+00 4.0000000e+00 7.6000000e+00 6.7000000e+00 2.7000000e+00 5.3000000e+00 2.7000000e+00 6.4000000e+00 2.9000000e+00 5.0000000e+00 5.4000000e+00 2.8000000e+00 3.0000000e+00 4.1000000e+00 4.8000000e+00 5.4000000e+00 7.3000000e+00 4.2000000e+00 2.9000000e+00 2.9000000e+00 6.3000000e+00 4.9000000e+00 4.0000000e+00 2.8000000e+00 4.7000000e+00 5.0000000e+00 4.6000000e+00 2.7000000e+00 5.4000000e+00 5.4000000e+00 4.4000000e+00 3.1000000e+00 3.9000000e+00 4.5000000e+00 3.0000000e+00 2.0000000e-01 8.0000000e-01 2.6000000e+00 1.8000000e+00 2.7000000e+00 3.2000000e+00 1.7000000e+00 1.2000000e+00 5.0000000e-01 9.0000000e-01 2.3000000e+00 8.0000000e-01 1.2000000e+00 1.0000000e+00 1.3000000e+00 1.3000000e+00 1.9000000e+00 1.3000000e+00 1.1000000e+00 5.3000000e+00 2.7000000e+00 5.3000000e+00 3.8000000e+00 4.7000000e+00 6.5000000e+00 2.0000000e+00 5.5000000e+00 4.0000000e+00 6.6000000e+00 4.0000000e+00 3.5000000e+00 4.6000000e+00 2.4000000e+00 3.3000000e+00 4.4000000e+00 4.0000000e+00 7.6000000e+00 6.7000000e+00 2.3000000e+00 5.3000000e+00 2.5000000e+00 6.4000000e+00 2.9000000e+00 5.0000000e+00 5.4000000e+00 2.8000000e+00 3.0000000e+00 4.1000000e+00 4.8000000e+00 5.4000000e+00 7.3000000e+00 4.2000000e+00 2.9000000e+00 2.9000000e+00 6.3000000e+00 4.9000000e+00 4.0000000e+00 2.8000000e+00 4.7000000e+00 5.0000000e+00 4.6000000e+00 2.7000000e+00 5.4000000e+00 5.4000000e+00 4.4000000e+00 2.9000000e+00 3.9000000e+00 4.5000000e+00 3.0000000e+00 1.0000000e+00 2.8000000e+00 2.0000000e+00 2.9000000e+00 3.4000000e+00 1.9000000e+00 1.4000000e+00 7.0000000e-01 1.1000000e+00 2.5000000e+00 1.0000000e+00 1.0000000e+00 1.2000000e+00 1.5000000e+00 1.5000000e+00 2.1000000e+00 1.3000000e+00 1.3000000e+00 5.5000000e+00 2.9000000e+00 5.5000000e+00 4.0000000e+00 4.9000000e+00 6.7000000e+00 2.2000000e+00 5.7000000e+00 4.2000000e+00 6.8000000e+00 4.2000000e+00 3.7000000e+00 4.8000000e+00 2.6000000e+00 3.5000000e+00 4.6000000e+00 4.2000000e+00 7.8000000e+00 6.9000000e+00 2.5000000e+00 5.5000000e+00 2.7000000e+00 6.6000000e+00 3.1000000e+00 5.2000000e+00 5.6000000e+00 3.0000000e+00 3.2000000e+00 4.3000000e+00 5.0000000e+00 5.6000000e+00 7.5000000e+00 4.4000000e+00 3.1000000e+00 3.1000000e+00 6.5000000e+00 5.1000000e+00 4.2000000e+00 3.0000000e+00 4.9000000e+00 5.2000000e+00 4.8000000e+00 2.9000000e+00 5.6000000e+00 5.6000000e+00 4.6000000e+00 3.1000000e+00 4.1000000e+00 4.7000000e+00 3.2000000e+00 1.8000000e+00 1.6000000e+00 1.9000000e+00 2.4000000e+00 1.5000000e+00 8.0000000e-01 7.0000000e-01 9.0000000e-01 1.5000000e+00 2.0000000e-01 2.0000000e+00 6.0000000e-01 7.0000000e-01 7.0000000e-01 1.1000000e+00 1.9000000e+00 5.0000000e-01 4.5000000e+00 1.9000000e+00 4.5000000e+00 3.0000000e+00 3.9000000e+00 5.7000000e+00 2.2000000e+00 4.7000000e+00 3.6000000e+00 5.8000000e+00 3.2000000e+00 2.7000000e+00 3.8000000e+00 2.2000000e+00 2.5000000e+00 3.6000000e+00 3.2000000e+00 6.8000000e+00 6.1000000e+00 2.1000000e+00 4.5000000e+00 2.1000000e+00 5.6000000e+00 2.1000000e+00 4.2000000e+00 4.6000000e+00 2.0000000e+00 2.2000000e+00 3.3000000e+00 4.0000000e+00 4.6000000e+00 6.5000000e+00 3.4000000e+00 2.1000000e+00 2.3000000e+00 5.5000000e+00 4.1000000e+00 3.2000000e+00 2.0000000e+00 3.9000000e+00 4.2000000e+00 3.8000000e+00 1.9000000e+00 4.6000000e+00 4.6000000e+00 3.6000000e+00 2.5000000e+00 3.1000000e+00 3.7000000e+00 2.2000000e+00 1.6000000e+00 1.3000000e+00 1.6000000e+00 1.7000000e+00 2.0000000e+00 2.1000000e+00 1.7000000e+00 1.1000000e+00 1.8000000e+00 3.8000000e+00 1.6000000e+00 1.9000000e+00 1.7000000e+00 1.5000000e+00 3.7000000e+00 1.7000000e+00 2.7000000e+00 5.0000000e-01 2.7000000e+00 1.2000000e+00 2.1000000e+00 3.9000000e+00 2.0000000e+00 2.9000000e+00 1.8000000e+00 4.0000000e+00 1.4000000e+00 9.0000000e-01 2.0000000e+00 1.0000000e+00 1.1000000e+00 1.8000000e+00 1.4000000e+00 5.0000000e+00 4.3000000e+00 7.0000000e-01 2.7000000e+00 1.1000000e+00 3.8000000e+00 7.0000000e-01 2.4000000e+00 2.8000000e+00 8.0000000e-01 8.0000000e-01 1.5000000e+00 2.2000000e+00 2.8000000e+00 4.7000000e+00 1.6000000e+00 5.0000000e-01 9.0000000e-01 3.7000000e+00 2.3000000e+00 1.4000000e+00 8.0000000e-01 2.1000000e+00 2.4000000e+00 2.0000000e+00 5.0000000e-01 2.8000000e+00 2.8000000e+00 1.8000000e+00 9.0000000e-01 1.3000000e+00 1.9000000e+00 6.0000000e-01 1.1000000e+00 1.6000000e+00 1.9000000e+00 8.0000000e-01 1.3000000e+00 9.0000000e-01 9.0000000e-01 1.6000000e+00 2.8000000e+00 1.0000000e+00 9.0000000e-01 9.0000000e-01 1.3000000e+00 2.7000000e+00 1.1000000e+00 3.7000000e+00 1.7000000e+00 3.7000000e+00 2.4000000e+00 3.1000000e+00 4.9000000e+00 1.2000000e+00 4.1000000e+00 3.4000000e+00 5.0000000e+00 2.4000000e+00 2.5000000e+00 3.0000000e+00 1.8000000e+00 2.1000000e+00 2.8000000e+00 2.4000000e+00 6.0000000e+00 5.9000000e+00 1.9000000e+00 3.7000000e+00 1.3000000e+00 5.2000000e+00 1.9000000e+00 3.4000000e+00 3.8000000e+00 1.6000000e+00 1.4000000e+00 2.9000000e+00 3.2000000e+00 4.2000000e+00 5.7000000e+00 3.0000000e+00 1.7000000e+00 2.3000000e+00 4.7000000e+00 3.3000000e+00 2.4000000e+00 1.2000000e+00 3.1000000e+00 3.4000000e+00 3.0000000e+00 1.7000000e+00 3.8000000e+00 3.8000000e+00 2.8000000e+00 2.3000000e+00 2.3000000e+00 2.9000000e+00 1.4000000e+00 1.3000000e+00 1.8000000e+00 1.5000000e+00 2.2000000e+00 1.8000000e+00 8.0000000e-01 1.9000000e+00 3.9000000e+00 1.7000000e+00 1.4000000e+00 1.4000000e+00 1.2000000e+00 3.8000000e+00 1.6000000e+00 2.8000000e+00 1.8000000e+00 3.4000000e+00 2.1000000e+00 2.8000000e+00 4.6000000e+00 2.1000000e+00 3.8000000e+00 3.1000000e+00 3.9000000e+00 1.7000000e+00 2.2000000e+00 2.7000000e+00 2.1000000e+00 2.2000000e+00 2.1000000e+00 2.1000000e+00 4.9000000e+00 5.6000000e+00 1.8000000e+00 3.0000000e+00 1.8000000e+00 4.9000000e+00 1.6000000e+00 2.5000000e+00 3.1000000e+00 1.3000000e+00 1.1000000e+00 2.6000000e+00 2.9000000e+00 3.9000000e+00 4.6000000e+00 2.7000000e+00 1.6000000e+00 2.2000000e+00 4.4000000e+00 2.2000000e+00 1.9000000e+00 9.0000000e-01 2.6000000e+00 2.9000000e+00 2.5000000e+00 1.8000000e+00 3.1000000e+00 2.9000000e+00 2.5000000e+00 2.0000000e+00 2.0000000e+00 1.8000000e+00 1.3000000e+00 1.7000000e+00 2.0000000e+00 2.7000000e+00 2.3000000e+00 9.0000000e-01 2.4000000e+00 4.4000000e+00 2.2000000e+00 1.9000000e+00 1.9000000e+00 1.3000000e+00 4.3000000e+00 2.1000000e+00 2.9000000e+00 2.1000000e+00 2.3000000e+00 1.8000000e+00 2.1000000e+00 3.5000000e+00 2.8000000e+00 2.7000000e+00 2.0000000e+00 3.4000000e+00 1.2000000e+00 1.7000000e+00 1.6000000e+00 2.4000000e+00 2.5000000e+00 1.8000000e+00 1.4000000e+00 4.4000000e+00 4.5000000e+00 1.9000000e+00 2.1000000e+00 2.1000000e+00 3.8000000e+00 1.3000000e+00 1.8000000e+00 2.2000000e+00 1.2000000e+00 1.2000000e+00 2.1000000e+00 1.8000000e+00 2.8000000e+00 4.1000000e+00 2.2000000e+00 1.1000000e+00 2.1000000e+00 3.3000000e+00 2.5000000e+00 1.4000000e+00 1.2000000e+00 1.5000000e+00 1.8000000e+00 1.4000000e+00 2.1000000e+00 2.2000000e+00 2.2000000e+00 1.4000000e+00 1.7000000e+00 1.3000000e+00 2.3000000e+00 1.6000000e+00 1.7000000e+00 1.4000000e+00 1.2000000e+00 1.2000000e+00 1.3000000e+00 2.7000000e+00 1.3000000e+00 1.6000000e+00 1.4000000e+00 8.0000000e-01 3.0000000e+00 1.4000000e+00 3.8000000e+00 2.2000000e+00 3.8000000e+00 2.3000000e+00 3.2000000e+00 5.0000000e+00 2.1000000e+00 4.0000000e+00 2.5000000e+00 5.1000000e+00 2.5000000e+00 2.0000000e+00 3.1000000e+00 2.1000000e+00 2.8000000e+00 2.9000000e+00 2.5000000e+00 6.1000000e+00 5.2000000e+00 1.2000000e+00 3.8000000e+00 2.4000000e+00 4.9000000e+00 1.4000000e+00 3.5000000e+00 3.9000000e+00 1.5000000e+00 1.9000000e+00 2.6000000e+00 3.3000000e+00 3.9000000e+00 5.8000000e+00 2.7000000e+00 1.4000000e+00 1.8000000e+00 4.8000000e+00 3.4000000e+00 2.5000000e+00 1.9000000e+00 3.2000000e+00 3.5000000e+00 3.1000000e+00 2.2000000e+00 3.9000000e+00 3.9000000e+00 2.9000000e+00 1.4000000e+00 2.4000000e+00 3.2000000e+00 2.3000000e+00 7.0000000e-01 9.0000000e-01 1.1000000e+00 8.0000000e-01 2.4000000e+00 4.0000000e-01 3.0000000e-01 3.0000000e-01 9.0000000e-01 2.3000000e+00 3.0000000e-01 4.1000000e+00 2.1000000e+00 4.1000000e+00 2.8000000e+00 3.5000000e+00 5.3000000e+00 2.0000000e+00 4.5000000e+00 3.8000000e+00 5.4000000e+00 2.8000000e+00 2.9000000e+00 3.4000000e+00 2.2000000e+00 2.5000000e+00 3.2000000e+00 2.8000000e+00 6.4000000e+00 6.3000000e+00 2.3000000e+00 4.1000000e+00 1.7000000e+00 5.6000000e+00 2.3000000e+00 3.8000000e+00 4.2000000e+00 2.0000000e+00 1.8000000e+00 3.3000000e+00 3.6000000e+00 4.6000000e+00 6.1000000e+00 3.4000000e+00 2.1000000e+00 2.5000000e+00 5.1000000e+00 3.7000000e+00 2.8000000e+00 1.6000000e+00 3.5000000e+00 3.8000000e+00 3.4000000e+00 2.1000000e+00 4.2000000e+00 4.2000000e+00 3.2000000e+00 2.7000000e+00 2.7000000e+00 3.3000000e+00 1.8000000e+00 6.0000000e-01 1.8000000e+00 5.0000000e-01 1.7000000e+00 5.0000000e-01 1.0000000e+00 8.0000000e-01 1.4000000e+00 1.6000000e+00 6.0000000e-01 4.8000000e+00 2.2000000e+00 4.8000000e+00 3.3000000e+00 4.2000000e+00 6.0000000e+00 1.5000000e+00 5.0000000e+00 3.5000000e+00 6.1000000e+00 3.5000000e+00 3.0000000e+00 4.1000000e+00 1.9000000e+00 2.8000000e+00 3.9000000e+00 3.5000000e+00 7.1000000e+00 6.2000000e+00 2.0000000e+00 4.8000000e+00 2.0000000e+00 5.9000000e+00 2.4000000e+00 4.5000000e+00 4.9000000e+00 2.3000000e+00 2.5000000e+00 3.6000000e+00 4.3000000e+00 4.9000000e+00 6.8000000e+00 3.7000000e+00 2.4000000e+00 2.4000000e+00 5.8000000e+00 4.4000000e+00 3.5000000e+00 2.3000000e+00 4.2000000e+00 4.5000000e+00 4.1000000e+00 2.2000000e+00 4.9000000e+00 4.9000000e+00 3.9000000e+00 2.4000000e+00 3.4000000e+00 4.0000000e+00 2.5000000e+00 1.4000000e+00 7.0000000e-01 2.1000000e+00 5.0000000e-01 8.0000000e-01 8.0000000e-01 1.2000000e+00 2.0000000e+00 8.0000000e-01 4.4000000e+00 1.8000000e+00 4.4000000e+00 2.9000000e+00 3.8000000e+00 5.6000000e+00 1.3000000e+00 4.6000000e+00 3.3000000e+00 5.7000000e+00 3.1000000e+00 2.6000000e+00 3.7000000e+00 1.7000000e+00 2.4000000e+00 3.5000000e+00 3.1000000e+00 6.7000000e+00 5.8000000e+00 1.8000000e+00 4.4000000e+00 1.6000000e+00 5.5000000e+00 2.0000000e+00 4.1000000e+00 4.5000000e+00 1.9000000e+00 2.1000000e+00 3.2000000e+00 3.9000000e+00 4.5000000e+00 6.4000000e+00 3.3000000e+00 2.0000000e+00 2.0000000e+00 5.4000000e+00 4.0000000e+00 3.1000000e+00 1.9000000e+00 3.8000000e+00 4.1000000e+00 3.7000000e+00 1.8000000e+00 4.5000000e+00 4.5000000e+00 3.5000000e+00 2.2000000e+00 3.0000000e+00 3.6000000e+00 2.1000000e+00 1.5000000e+00 3.5000000e+00 1.3000000e+00 1.0000000e+00 1.0000000e+00 6.0000000e-01 3.4000000e+00 1.2000000e+00 3.0000000e+00 1.6000000e+00 3.0000000e+00 1.7000000e+00 2.4000000e+00 4.2000000e+00 2.1000000e+00 3.4000000e+00 2.7000000e+00 4.3000000e+00 1.7000000e+00 1.8000000e+00 2.3000000e+00 1.9000000e+00 2.0000000e+00 2.1000000e+00 1.7000000e+00 5.3000000e+00 5.2000000e+00 1.4000000e+00 3.0000000e+00 1.6000000e+00 4.5000000e+00 1.2000000e+00 2.7000000e+00 3.1000000e+00 9.0000000e-01 7.0000000e-01 2.2000000e+00 2.5000000e+00 3.5000000e+00 5.0000000e+00 2.3000000e+00 1.0000000e+00 1.4000000e+00 4.0000000e+00 2.6000000e+00 1.7000000e+00 7.0000000e-01 2.4000000e+00 2.7000000e+00 2.3000000e+00 1.6000000e+00 3.1000000e+00 3.1000000e+00 2.1000000e+00 1.6000000e+00 1.6000000e+00 2.2000000e+00 1.1000000e+00 2.0000000e+00 6.0000000e-01 7.0000000e-01 7.0000000e-01 1.1000000e+00 1.9000000e+00 5.0000000e-01 4.5000000e+00 1.9000000e+00 4.5000000e+00 3.0000000e+00 3.9000000e+00 5.7000000e+00 2.0000000e+00 4.7000000e+00 3.4000000e+00 5.8000000e+00 3.2000000e+00 2.7000000e+00 3.8000000e+00 2.0000000e+00 2.5000000e+00 3.6000000e+00 3.2000000e+00 6.8000000e+00 5.9000000e+00 1.9000000e+00 4.5000000e+00 2.1000000e+00 5.6000000e+00 2.1000000e+00 4.2000000e+00 4.6000000e+00 2.0000000e+00 2.2000000e+00 3.3000000e+00 4.0000000e+00 4.6000000e+00 6.5000000e+00 3.4000000e+00 2.1000000e+00 2.1000000e+00 5.5000000e+00 4.1000000e+00 3.2000000e+00 2.0000000e+00 3.9000000e+00 4.2000000e+00 3.8000000e+00 1.9000000e+00 4.6000000e+00 4.6000000e+00 3.6000000e+00 2.3000000e+00 3.1000000e+00 3.7000000e+00 2.2000000e+00 2.2000000e+00 2.5000000e+00 2.5000000e+00 3.1000000e+00 7.0000000e-01 2.3000000e+00 6.5000000e+00 3.9000000e+00 6.5000000e+00 5.0000000e+00 5.9000000e+00 7.7000000e+00 2.2000000e+00 6.7000000e+00 5.2000000e+00 7.8000000e+00 5.2000000e+00 4.7000000e+00 5.8000000e+00 3.6000000e+00 4.5000000e+00 5.6000000e+00 5.2000000e+00 8.8000000e+00 7.9000000e+00 3.3000000e+00 6.5000000e+00 3.7000000e+00 7.6000000e+00 4.1000000e+00 6.2000000e+00 6.6000000e+00 4.0000000e+00 4.2000000e+00 5.3000000e+00 6.0000000e+00 6.6000000e+00 8.5000000e+00 5.4000000e+00 4.1000000e+00 4.1000000e+00 7.5000000e+00 6.1000000e+00 5.2000000e+00 4.0000000e+00 5.9000000e+00 6.2000000e+00 5.8000000e+00 3.9000000e+00 6.6000000e+00 6.6000000e+00 5.6000000e+00 4.1000000e+00 5.1000000e+00 5.7000000e+00 4.2000000e+00 5.0000000e-01 3.0000000e-01 9.0000000e-01 2.1000000e+00 3.0000000e-01 4.3000000e+00 1.7000000e+00 4.3000000e+00 2.8000000e+00 3.7000000e+00 5.5000000e+00 1.6000000e+00 4.5000000e+00 3.4000000e+00 5.6000000e+00 3.0000000e+00 2.5000000e+00 3.6000000e+00 1.8000000e+00 2.3000000e+00 3.4000000e+00 3.0000000e+00 6.6000000e+00 5.9000000e+00 1.9000000e+00 4.3000000e+00 1.5000000e+00 5.4000000e+00 1.9000000e+00 4.0000000e+00 4.4000000e+00 1.8000000e+00 2.0000000e+00 3.1000000e+00 3.8000000e+00 4.4000000e+00 6.3000000e+00 3.2000000e+00 1.9000000e+00 2.1000000e+00 5.3000000e+00 3.9000000e+00 3.0000000e+00 1.8000000e+00 3.7000000e+00 4.0000000e+00 3.6000000e+00 1.7000000e+00 4.4000000e+00 4.4000000e+00 3.4000000e+00 2.3000000e+00 2.9000000e+00 3.5000000e+00 2.0000000e+00 2.0000000e-01 8.0000000e-01 2.4000000e+00 4.0000000e-01 4.0000000e+00 2.0000000e+00 4.0000000e+00 2.7000000e+00 3.4000000e+00 5.2000000e+00 2.1000000e+00 4.4000000e+00 3.7000000e+00 5.3000000e+00 2.7000000e+00 2.8000000e+00 3.3000000e+00 2.1000000e+00 2.4000000e+00 3.1000000e+00 2.7000000e+00 6.3000000e+00 6.2000000e+00 2.2000000e+00 4.0000000e+00 1.8000000e+00 5.5000000e+00 2.2000000e+00 3.7000000e+00 4.1000000e+00 1.9000000e+00 1.7000000e+00 3.2000000e+00 3.5000000e+00 4.5000000e+00 6.0000000e+00 3.3000000e+00 2.0000000e+00 2.4000000e+00 5.0000000e+00 3.6000000e+00 2.7000000e+00 1.5000000e+00 3.4000000e+00 3.7000000e+00 3.3000000e+00 2.0000000e+00 4.1000000e+00 4.1000000e+00 3.1000000e+00 2.6000000e+00 2.6000000e+00 3.2000000e+00 1.7000000e+00 6.0000000e-01 2.4000000e+00 2.0000000e-01 4.0000000e+00 1.8000000e+00 4.0000000e+00 2.5000000e+00 3.4000000e+00 5.2000000e+00 1.9000000e+00 4.2000000e+00 3.5000000e+00 5.3000000e+00 2.7000000e+00 2.6000000e+00 3.3000000e+00 1.9000000e+00 2.2000000e+00 3.1000000e+00 2.7000000e+00 6.3000000e+00 6.0000000e+00 2.0000000e+00 4.0000000e+00 1.6000000e+00 5.3000000e+00 2.0000000e+00 3.7000000e+00 4.1000000e+00 1.7000000e+00 1.7000000e+00 3.0000000e+00 3.5000000e+00 4.3000000e+00 6.0000000e+00 3.1000000e+00 1.8000000e+00 2.2000000e+00 5.0000000e+00 3.6000000e+00 2.7000000e+00 1.5000000e+00 3.4000000e+00 3.7000000e+00 3.3000000e+00 1.8000000e+00 4.1000000e+00 4.1000000e+00 3.1000000e+00 2.4000000e+00 2.6000000e+00 3.2000000e+00 1.7000000e+00 3.0000000e+00 8.0000000e-01 3.4000000e+00 2.0000000e+00 3.4000000e+00 1.9000000e+00 2.8000000e+00 4.6000000e+00 2.3000000e+00 3.6000000e+00 2.9000000e+00 4.7000000e+00 2.1000000e+00 2.0000000e+00 2.7000000e+00 2.3000000e+00 2.4000000e+00 2.5000000e+00 2.1000000e+00 5.7000000e+00 5.4000000e+00 1.8000000e+00 3.4000000e+00 2.0000000e+00 4.7000000e+00 1.4000000e+00 3.1000000e+00 3.5000000e+00 1.1000000e+00 1.3000000e+00 2.4000000e+00 2.9000000e+00 3.7000000e+00 5.4000000e+00 2.5000000e+00 1.2000000e+00 1.8000000e+00 4.4000000e+00 3.0000000e+00 2.1000000e+00 1.3000000e+00 2.8000000e+00 3.1000000e+00 2.7000000e+00 2.0000000e+00 3.5000000e+00 3.5000000e+00 2.5000000e+00 1.8000000e+00 2.0000000e+00 2.6000000e+00 1.7000000e+00 2.2000000e+00 6.4000000e+00 3.8000000e+00 6.4000000e+00 4.9000000e+00 5.8000000e+00 7.6000000e+00 2.3000000e+00 6.6000000e+00 5.1000000e+00 7.7000000e+00 5.1000000e+00 4.6000000e+00 5.7000000e+00 3.5000000e+00 4.4000000e+00 5.5000000e+00 5.1000000e+00 8.7000000e+00 7.8000000e+00 3.6000000e+00 6.4000000e+00 3.6000000e+00 7.5000000e+00 4.0000000e+00 6.1000000e+00 6.5000000e+00 3.9000000e+00 4.1000000e+00 5.2000000e+00 5.9000000e+00 6.5000000e+00 8.4000000e+00 5.3000000e+00 4.0000000e+00 4.0000000e+00 7.4000000e+00 6.0000000e+00 5.1000000e+00 3.9000000e+00 5.8000000e+00 6.1000000e+00 5.7000000e+00 3.8000000e+00 6.5000000e+00 6.5000000e+00 5.5000000e+00 4.0000000e+00 5.0000000e+00 5.6000000e+00 4.1000000e+00 4.2000000e+00 1.8000000e+00 4.2000000e+00 2.7000000e+00 3.6000000e+00 5.4000000e+00 1.9000000e+00 4.4000000e+00 3.5000000e+00 5.5000000e+00 2.9000000e+00 2.6000000e+00 3.5000000e+00 1.9000000e+00 2.2000000e+00 3.3000000e+00 2.9000000e+00 6.5000000e+00 6.0000000e+00 2.0000000e+00 4.2000000e+00 1.6000000e+00 5.3000000e+00 2.0000000e+00 3.9000000e+00 4.3000000e+00 1.7000000e+00 1.9000000e+00 3.0000000e+00 3.7000000e+00 4.3000000e+00 6.2000000e+00 3.1000000e+00 1.8000000e+00 2.2000000e+00 5.2000000e+00 3.8000000e+00 2.9000000e+00 1.7000000e+00 3.6000000e+00 3.9000000e+00 3.5000000e+00 1.8000000e+00 4.3000000e+00 4.3000000e+00 3.3000000e+00 2.4000000e+00 2.8000000e+00 3.4000000e+00 1.9000000e+00 2.6000000e+00 1.6000000e+00 1.5000000e+00 1.0000000e+00 2.6000000e+00 4.5000000e+00 2.4000000e+00 2.1000000e+00 1.3000000e+00 1.7000000e+00 2.0000000e+00 1.7000000e+00 2.9000000e+00 2.0000000e+00 1.1000000e+00 1.7000000e+00 2.9000000e+00 3.2000000e+00 3.4000000e+00 1.2000000e+00 2.8000000e+00 3.1000000e+00 2.4000000e+00 1.1000000e+00 1.7000000e+00 2.5000000e+00 2.3000000e+00 1.4000000e+00 2.3000000e+00 2.3000000e+00 3.0000000e+00 1.3000000e+00 2.4000000e+00 2.4000000e+00 2.0000000e+00 6.0000000e-01 1.5000000e+00 2.5000000e+00 1.8000000e+00 1.1000000e+00 1.9000000e+00 2.6000000e+00 9.0000000e-01 7.0000000e-01 1.7000000e+00 2.4000000e+00 1.8000000e+00 1.0000000e+00 2.3000000e+00 2.6000000e+00 1.3000000e+00 2.0000000e+00 3.8000000e+00 1.9000000e+00 3.0000000e+00 1.9000000e+00 3.9000000e+00 1.3000000e+00 8.0000000e-01 1.9000000e+00 5.0000000e-01 6.0000000e-01 1.7000000e+00 1.5000000e+00 4.9000000e+00 4.2000000e+00 1.2000000e+00 2.6000000e+00 6.0000000e-01 3.7000000e+00 8.0000000e-01 2.3000000e+00 2.9000000e+00 9.0000000e-01 9.0000000e-01 1.4000000e+00 2.7000000e+00 2.7000000e+00 4.6000000e+00 1.5000000e+00 1.0000000e+00 1.4000000e+00 3.6000000e+00 2.2000000e+00 1.5000000e+00 9.0000000e-01 2.0000000e+00 2.3000000e+00 1.9000000e+00 0.0000000e+00 2.7000000e+00 2.7000000e+00 1.7000000e+00 8.0000000e-01 1.2000000e+00 1.8000000e+00 5.0000000e-01 1.5000000e+00 8.0000000e-01 1.2000000e+00 4.5000000e+00 1.0000000e+00 1.3000000e+00 1.3000000e+00 1.7000000e+00 1.8000000e+00 7.0000000e-01 2.9000000e+00 2.6000000e+00 1.7000000e+00 1.3000000e+00 2.3000000e+00 2.2000000e+00 3.4000000e+00 8.0000000e-01 2.8000000e+00 1.7000000e+00 2.4000000e+00 9.0000000e-01 7.0000000e-01 2.5000000e+00 2.3000000e+00 1.2000000e+00 7.0000000e-01 9.0000000e-01 2.2000000e+00 1.3000000e+00 2.4000000e+00 2.4000000e+00 1.0000000e+00 1.8000000e+00 1.5000000e+00 2.5000000e+00 8.0000000e-01 1.1000000e+00 1.3000000e+00 2.6000000e+00 7.0000000e-01 1.3000000e+00 1.3000000e+00 2.4000000e+00 1.4000000e+00 2.0000000e+00 2.3000000e+00 9.0000000e-01 2.7000000e+00 3.0000000e+00 1.7000000e+00 1.0000000e+00 2.8000000e+00 1.2000000e+00 7.0000000e-01 1.0000000e+00 1.8000000e+00 1.7000000e+00 1.2000000e+00 4.0000000e-01 3.8000000e+00 3.5000000e+00 1.9000000e+00 1.5000000e+00 1.7000000e+00 2.8000000e+00 9.0000000e-01 1.2000000e+00 1.6000000e+00 1.0000000e+00 1.0000000e+00 5.0000000e-01 1.4000000e+00 1.8000000e+00 3.5000000e+00 6.0000000e-01 9.0000000e-01 9.0000000e-01 2.5000000e+00 1.1000000e+00 4.0000000e-01 1.2000000e+00 1.3000000e+00 1.2000000e+00 1.8000000e+00 1.3000000e+00 1.6000000e+00 1.6000000e+00 1.4000000e+00 1.1000000e+00 9.0000000e-01 1.3000000e+00 1.0000000e+00 2.0000000e+00 3.9000000e+00 1.8000000e+00 1.1000000e+00 1.9000000e+00 1.1000000e+00 1.2000000e+00 7.0000000e-01 2.3000000e+00 1.8000000e+00 9.0000000e-01 7.0000000e-01 2.9000000e+00 2.8000000e+00 2.8000000e+00 8.0000000e-01 2.2000000e+00 2.5000000e+00 1.8000000e+00 7.0000000e-01 1.5000000e+00 1.9000000e+00 1.7000000e+00 6.0000000e-01 1.3000000e+00 1.7000000e+00 3.0000000e+00 5.0000000e-01 1.8000000e+00 1.8000000e+00 1.6000000e+00 1.0000000e+00 9.0000000e-01 1.9000000e+00 1.0000000e+00 7.0000000e-01 1.3000000e+00 2.0000000e+00 7.0000000e-01 9.0000000e-01 9.0000000e-01 1.8000000e+00 8.0000000e-01 1.2000000e+00 1.7000000e+00 5.7000000e+00 1.0000000e+00 2.5000000e+00 1.9000000e+00 2.9000000e+00 3.0000000e+00 1.9000000e+00 4.1000000e+00 3.8000000e+00 2.9000000e+00 2.5000000e+00 1.1000000e+00 1.0000000e+00 4.6000000e+00 2.0000000e+00 4.0000000e+00 5.0000000e-01 3.6000000e+00 2.1000000e+00 1.5000000e+00 3.7000000e+00 3.5000000e+00 2.4000000e+00 1.7000000e+00 1.1000000e+00 1.4000000e+00 2.5000000e+00 3.6000000e+00 3.6000000e+00 8.0000000e-01 3.0000000e+00 2.7000000e+00 3.7000000e+00 2.0000000e+00 2.3000000e+00 2.5000000e+00 3.8000000e+00 1.9000000e+00 2.5000000e+00 2.5000000e+00 3.6000000e+00 2.6000000e+00 3.2000000e+00 3.5000000e+00 4.7000000e+00 3.2000000e+00 5.8000000e+00 3.2000000e+00 2.7000000e+00 3.8000000e+00 1.6000000e+00 2.5000000e+00 3.6000000e+00 3.2000000e+00 6.8000000e+00 5.9000000e+00 2.1000000e+00 4.5000000e+00 1.7000000e+00 5.6000000e+00 2.1000000e+00 4.2000000e+00 4.6000000e+00 2.0000000e+00 2.2000000e+00 3.3000000e+00 4.2000000e+00 4.6000000e+00 6.5000000e+00 3.4000000e+00 2.5000000e+00 2.7000000e+00 5.5000000e+00 4.1000000e+00 3.2000000e+00 2.0000000e+00 3.9000000e+00 4.2000000e+00 3.8000000e+00 1.9000000e+00 4.6000000e+00 4.6000000e+00 3.6000000e+00 2.1000000e+00 3.1000000e+00 3.7000000e+00 2.2000000e+00 1.5000000e+00 1.7000000e+00 2.5000000e+00 2.2000000e+00 1.7000000e+00 3.5000000e+00 3.4000000e+00 2.7000000e+00 1.7000000e+00 2.1000000e+00 1.8000000e+00 3.6000000e+00 1.8000000e+00 3.4000000e+00 1.1000000e+00 2.6000000e+00 1.9000000e+00 7.0000000e-01 2.7000000e+00 2.7000000e+00 2.0000000e+00 9.0000000e-01 5.0000000e-01 1.8000000e+00 2.1000000e+00 2.6000000e+00 2.6000000e+00 1.2000000e+00 2.8000000e+00 1.9000000e+00 2.9000000e+00 1.8000000e+00 2.1000000e+00 2.3000000e+00 3.0000000e+00 1.7000000e+00 2.3000000e+00 2.3000000e+00 2.8000000e+00 2.2000000e+00 3.0000000e+00 2.7000000e+00 2.6000000e+00 1.8000000e+00 1.1000000e+00 1.2000000e+00 2.0000000e+00 2.5000000e+00 2.0000000e+00 1.0000000e+00 3.6000000e+00 2.7000000e+00 2.1000000e+00 1.5000000e+00 2.5000000e+00 2.4000000e+00 1.5000000e+00 1.2000000e+00 1.4000000e+00 1.8000000e+00 2.0000000e+00 1.1000000e+00 1.2000000e+00 1.4000000e+00 3.3000000e+00 1.2000000e+00 1.7000000e+00 1.3000000e+00 2.3000000e+00 2.1000000e+00 1.2000000e+00 2.2000000e+00 1.5000000e+00 1.4000000e+00 2.0000000e+00 1.9000000e+00 1.4000000e+00 1.6000000e+00 1.6000000e+00 1.3000000e+00 1.5000000e+00 2.3000000e+00 2.0000000e+00 2.6000000e+00 3.1000000e+00 2.0000000e+00 4.2000000e+00 3.3000000e+00 2.2000000e+00 2.6000000e+00 1.6000000e+00 2.5000000e+00 4.7000000e+00 1.3000000e+00 4.1000000e+00 2.4000000e+00 3.7000000e+00 1.6000000e+00 1.2000000e+00 3.8000000e+00 3.6000000e+00 2.5000000e+00 1.8000000e+00 1.6000000e+00 1.7000000e+00 2.4000000e+00 3.7000000e+00 3.7000000e+00 1.3000000e+00 1.7000000e+00 2.6000000e+00 3.8000000e+00 1.9000000e+00 1.6000000e+00 2.0000000e+00 3.9000000e+00 1.2000000e+00 1.2000000e+00 2.2000000e+00 3.7000000e+00 2.7000000e+00 2.1000000e+00 3.6000000e+00 9.0000000e-01 1.0000000e+00 1.6000000e+00 1.5000000e+00 6.0000000e-01 8.0000000e-01 3.6000000e+00 3.9000000e+00 2.1000000e+00 1.3000000e+00 1.5000000e+00 3.2000000e+00 1.1000000e+00 1.0000000e+00 1.8000000e+00 1.2000000e+00 1.0000000e+00 1.1000000e+00 2.0000000e+00 2.4000000e+00 3.3000000e+00 1.2000000e+00 1.1000000e+00 2.1000000e+00 2.7000000e+00 1.3000000e+00 8.0000000e-01 1.2000000e+00 9.0000000e-01 1.2000000e+00 8.0000000e-01 1.3000000e+00 1.4000000e+00 1.4000000e+00 8.0000000e-01 1.1000000e+00 3.0000000e-01 1.1000000e+00 1.0000000e+00 1.1000000e+00 1.3000000e+00 1.4000000e+00 9.0000000e-01 7.0000000e-01 4.1000000e+00 3.4000000e+00 1.6000000e+00 1.8000000e+00 1.4000000e+00 2.9000000e+00 6.0000000e-01 1.5000000e+00 2.1000000e+00 9.0000000e-01 1.1000000e+00 6.0000000e-01 1.9000000e+00 1.9000000e+00 3.8000000e+00 7.0000000e-01 8.0000000e-01 1.2000000e+00 2.8000000e+00 1.6000000e+00 7.0000000e-01 1.3000000e+00 1.2000000e+00 1.5000000e+00 1.5000000e+00 8.0000000e-01 1.9000000e+00 1.9000000e+00 1.1000000e+00 6.0000000e-01 6.0000000e-01 1.4000000e+00 1.1000000e+00 2.2000000e+00 1.9000000e+00 1.0000000e+00 6.0000000e-01 3.0000000e+00 2.9000000e+00 2.7000000e+00 7.0000000e-01 2.1000000e+00 2.4000000e+00 1.7000000e+00 6.0000000e-01 1.4000000e+00 1.8000000e+00 1.6000000e+00 7.0000000e-01 1.2000000e+00 1.6000000e+00 2.9000000e+00 8.0000000e-01 1.7000000e+00 1.9000000e+00 1.7000000e+00 1.3000000e+00 8.0000000e-01 1.8000000e+00 3.0000000e-01 6.0000000e-01 8.0000000e-01 1.9000000e+00 8.0000000e-01 1.0000000e+00 6.0000000e-01 1.7000000e+00 7.0000000e-01 1.3000000e+00 1.6000000e+00 9.0000000e-01 2.0000000e+00 2.0000000e+00 5.2000000e+00 4.3000000e+00 1.1000000e+00 2.9000000e+00 5.0000000e-01 4.0000000e+00 1.1000000e+00 2.6000000e+00 3.4000000e+00 1.2000000e+00 1.2000000e+00 1.7000000e+00 3.2000000e+00 3.2000000e+00 4.9000000e+00 1.8000000e+00 1.5000000e+00 1.7000000e+00 3.9000000e+00 2.5000000e+00 2.0000000e+00 1.2000000e+00 2.3000000e+00 2.6000000e+00 2.2000000e+00 5.0000000e-01 3.0000000e+00 3.0000000e+00 2.0000000e+00 7.0000000e-01 1.5000000e+00 2.1000000e+00 1.0000000e+00 1.3000000e+00 1.9000000e+00 4.7000000e+00 4.0000000e+00 1.8000000e+00 2.2000000e+00 8.0000000e-01 3.9000000e+00 1.4000000e+00 2.3000000e+00 3.3000000e+00 1.3000000e+00 1.3000000e+00 1.4000000e+00 3.1000000e+00 3.1000000e+00 4.8000000e+00 1.3000000e+00 1.4000000e+00 2.0000000e+00 3.2000000e+00 1.6000000e+00 1.9000000e+00 1.3000000e+00 2.0000000e+00 1.7000000e+00 1.5000000e+00 6.0000000e-01 2.3000000e+00 2.1000000e+00 1.3000000e+00 1.4000000e+00 1.4000000e+00 1.4000000e+00 9.0000000e-01 1.0000000e+00 3.4000000e+00 3.5000000e+00 2.5000000e+00 9.0000000e-01 1.9000000e+00 3.4000000e+00 1.5000000e+00 1.0000000e+00 2.0000000e+00 1.6000000e+00 1.4000000e+00 9.0000000e-01 2.2000000e+00 2.6000000e+00 3.5000000e+00 8.0000000e-01 1.5000000e+00 2.1000000e+00 2.3000000e+00 7.0000000e-01 8.0000000e-01 1.6000000e+00 9.0000000e-01 8.0000000e-01 8.0000000e-01 1.7000000e+00 1.0000000e+00 1.0000000e+00 6.0000000e-01 1.5000000e+00 7.0000000e-01 5.0000000e-01 1.4000000e+00 3.6000000e+00 3.5000000e+00 2.1000000e+00 1.3000000e+00 1.9000000e+00 2.8000000e+00 1.1000000e+00 1.0000000e+00 1.4000000e+00 1.2000000e+00 1.0000000e+00 7.0000000e-01 1.2000000e+00 1.8000000e+00 3.3000000e+00 8.0000000e-01 1.1000000e+00 1.3000000e+00 2.3000000e+00 1.3000000e+00 2.0000000e-01 1.2000000e+00 9.0000000e-01 1.0000000e+00 1.4000000e+00 1.5000000e+00 1.4000000e+00 1.4000000e+00 1.0000000e+00 1.3000000e+00 5.0000000e-01 1.3000000e+00 1.0000000e+00 1.5000000e+00 5.7000000e+00 2.5000000e+00 5.1000000e+00 1.2000000e+00 4.7000000e+00 2.6000000e+00 2.2000000e+00 4.8000000e+00 4.6000000e+00 3.5000000e+00 2.8000000e+00 2.2000000e+00 7.0000000e-01 3.4000000e+00 4.7000000e+00 4.7000000e+00 1.5000000e+00 3.1000000e+00 3.6000000e+00 4.8000000e+00 2.9000000e+00 3.0000000e+00 3.2000000e+00 4.9000000e+00 2.4000000e+00 2.8000000e+00 3.4000000e+00 4.7000000e+00 3.7000000e+00 3.3000000e+00 4.6000000e+00 4.8000000e+00 2.6000000e+00 4.6000000e+00 7.0000000e-01 4.0000000e+00 3.1000000e+00 2.5000000e+00 4.3000000e+00 4.5000000e+00 3.0000000e+00 2.7000000e+00 1.7000000e+00 2.2000000e+00 2.9000000e+00 4.2000000e+00 3.8000000e+00 1.2000000e+00 3.6000000e+00 3.7000000e+00 4.7000000e+00 3.0000000e+00 2.9000000e+00 3.1000000e+00 4.2000000e+00 2.5000000e+00 3.1000000e+00 3.1000000e+00 3.8000000e+00 3.6000000e+00 3.8000000e+00 4.5000000e+00 3.4000000e+00 1.6000000e+00 4.5000000e+00 1.2000000e+00 3.1000000e+00 3.5000000e+00 1.3000000e+00 1.3000000e+00 2.2000000e+00 2.9000000e+00 3.5000000e+00 5.4000000e+00 2.3000000e+00 1.0000000e+00 1.2000000e+00 4.4000000e+00 3.0000000e+00 2.1000000e+00 1.3000000e+00 2.8000000e+00 3.1000000e+00 2.7000000e+00 1.2000000e+00 3.5000000e+00 3.5000000e+00 2.5000000e+00 1.0000000e+00 2.0000000e+00 2.6000000e+00 1.3000000e+00 2.8000000e+00 2.5000000e+00 2.4000000e+00 5.0000000e-01 1.1000000e+00 2.5000000e+00 2.3000000e+00 1.2000000e+00 1.3000000e+00 1.7000000e+00 2.6000000e+00 1.1000000e+00 2.4000000e+00 2.4000000e+00 1.4000000e+00 1.0000000e+00 1.3000000e+00 2.5000000e+00 6.0000000e-01 5.0000000e-01 7.0000000e-01 2.6000000e+00 3.0000000e-01 5.0000000e-01 9.0000000e-01 2.4000000e+00 1.4000000e+00 1.2000000e+00 2.3000000e+00 3.9000000e+00 1.0000000e+00 2.5000000e+00 3.3000000e+00 9.0000000e-01 9.0000000e-01 1.6000000e+00 3.1000000e+00 3.1000000e+00 4.8000000e+00 1.7000000e+00 1.4000000e+00 2.0000000e+00 3.8000000e+00 2.4000000e+00 1.9000000e+00 9.0000000e-01 2.2000000e+00 2.5000000e+00 2.1000000e+00 6.0000000e-01 2.9000000e+00 2.9000000e+00 1.9000000e+00 1.2000000e+00 1.4000000e+00 2.0000000e+00 9.0000000e-01 3.5000000e+00 2.6000000e+00 1.8000000e+00 3.6000000e+00 3.8000000e+00 2.5000000e+00 2.0000000e+00 1.0000000e+00 1.5000000e+00 2.6000000e+00 3.5000000e+00 3.5000000e+00 1.1000000e+00 3.5000000e+00 3.0000000e+00 4.0000000e+00 2.5000000e+00 2.8000000e+00 3.0000000e+00 3.7000000e+00 2.4000000e+00 3.0000000e+00 3.0000000e+00 3.5000000e+00 2.9000000e+00 3.7000000e+00 3.8000000e+00 2.1000000e+00 2.5000000e+00 3.0000000e-01 5.0000000e-01 1.2000000e+00 2.3000000e+00 2.5000000e+00 4.4000000e+00 1.3000000e+00 6.0000000e-01 1.4000000e+00 3.4000000e+00 2.0000000e+00 1.1000000e+00 7.0000000e-01 1.8000000e+00 2.1000000e+00 1.7000000e+00 8.0000000e-01 2.5000000e+00 2.5000000e+00 1.5000000e+00 4.0000000e-01 1.0000000e+00 1.8000000e+00 9.0000000e-01 1.2000000e+00 2.2000000e+00 2.0000000e+00 9.0000000e-01 1.4000000e+00 1.8000000e+00 2.5000000e+00 1.0000000e+00 2.1000000e+00 2.1000000e+00 1.9000000e+00 9.0000000e-01 1.0000000e+00 2.2000000e+00 7.0000000e-01 6.0000000e-01 1.2000000e+00 2.3000000e+00 6.0000000e-01 4.0000000e-01 1.0000000e+00 2.1000000e+00 1.1000000e+00 1.1000000e+00 2.0000000e+00 2.6000000e+00 2.4000000e+00 1.9000000e+00 6.0000000e-01 8.0000000e-01 1.9000000e+00 2.0000000e+00 2.5000000e+00 2.5000000e+00 1.3000000e+00 2.1000000e+00 1.4000000e+00 2.6000000e+00 1.3000000e+00 1.6000000e+00 1.8000000e+00 2.9000000e+00 1.0000000e+00 1.6000000e+00 2.0000000e+00 2.7000000e+00 1.9000000e+00 2.3000000e+00 2.4000000e+00 4.0000000e-01 1.3000000e+00 2.4000000e+00 2.6000000e+00 4.5000000e+00 1.4000000e+00 7.0000000e-01 1.5000000e+00 3.5000000e+00 2.1000000e+00 1.2000000e+00 4.0000000e-01 1.9000000e+00 2.2000000e+00 1.8000000e+00 9.0000000e-01 2.6000000e+00 2.6000000e+00 1.6000000e+00 7.0000000e-01 1.1000000e+00 1.7000000e+00 8.0000000e-01 1.5000000e+00 2.2000000e+00 2.8000000e+00 4.3000000e+00 1.6000000e+00 9.0000000e-01 1.5000000e+00 3.3000000e+00 1.9000000e+00 1.0000000e+00 2.0000000e-01 1.7000000e+00 2.0000000e+00 1.6000000e+00 9.0000000e-01 2.4000000e+00 2.4000000e+00 1.4000000e+00 9.0000000e-01 9.0000000e-01 1.5000000e+00 4.0000000e-01 1.7000000e+00 1.7000000e+00 3.4000000e+00 1.0000000e-01 1.2000000e+00 1.2000000e+00 2.2000000e+00 1.0000000e+00 7.0000000e-01 1.7000000e+00 1.0000000e+00 9.0000000e-01 1.5000000e+00 1.4000000e+00 1.3000000e+00 1.3000000e+00 1.1000000e+00 1.2000000e+00 8.0000000e-01 1.2000000e+00 1.5000000e+00 1.0000000e+00 2.5000000e+00 1.8000000e+00 1.9000000e+00 1.9000000e+00 1.5000000e+00 2.3000000e+00 1.4000000e+00 2.4000000e+00 1.3000000e+00 1.6000000e+00 1.8000000e+00 2.7000000e+00 1.4000000e+00 1.8000000e+00 1.8000000e+00 2.5000000e+00 1.7000000e+00 2.5000000e+00 2.2000000e+00 1.9000000e+00 1.8000000e+00 2.5000000e+00 2.5000000e+00 9.0000000e-01 2.7000000e+00 2.0000000e+00 3.0000000e+00 1.7000000e+00 2.0000000e+00 2.2000000e+00 2.7000000e+00 1.6000000e+00 2.2000000e+00 2.2000000e+00 2.5000000e+00 2.1000000e+00 2.9000000e+00 2.8000000e+00 3.5000000e+00 4.4000000e+00 4.4000000e+00 1.6000000e+00 3.2000000e+00 3.3000000e+00 4.5000000e+00 2.8000000e+00 3.1000000e+00 3.3000000e+00 4.6000000e+00 2.5000000e+00 2.9000000e+00 3.5000000e+00 4.4000000e+00 3.4000000e+00 3.4000000e+00 4.3000000e+00 1.3000000e+00 1.3000000e+00 2.1000000e+00 9.0000000e-01 8.0000000e-01 1.8000000e+00 1.1000000e+00 8.0000000e-01 1.4000000e+00 1.5000000e+00 1.2000000e+00 1.2000000e+00 1.0000000e+00 1.3000000e+00 9.0000000e-01 1.1000000e+00 1.6000000e+00 1.0000000e+00 3.4000000e+00 2.0000000e+00 1.1000000e+00 1.1000000e+00 1.8000000e+00 2.1000000e+00 1.7000000e+00 1.0000000e+00 2.5000000e+00 2.5000000e+00 1.5000000e+00 8.0000000e-01 1.0000000e+00 1.8000000e+00 9.0000000e-01 3.4000000e+00 2.0000000e+00 1.3000000e+00 1.7000000e+00 2.2000000e+00 2.1000000e+00 2.7000000e+00 1.4000000e+00 2.5000000e+00 2.5000000e+00 2.3000000e+00 1.4000000e+00 1.8000000e+00 2.0000000e+00 1.5000000e+00 2.4000000e+00 2.5000000e+00 3.5000000e+00 1.8000000e+00 1.7000000e+00 1.9000000e+00 3.6000000e+00 1.3000000e+00 1.9000000e+00 1.9000000e+00 3.4000000e+00 2.4000000e+00 2.6000000e+00 3.3000000e+00 1.1000000e+00 2.1000000e+00 1.4000000e+00 7.0000000e-01 1.5000000e+00 2.2000000e+00 1.1000000e+00 7.0000000e-01 1.3000000e+00 2.0000000e+00 1.4000000e+00 4.0000000e-01 1.9000000e+00 1.2000000e+00 9.0000000e-01 1.0000000e+00 1.4000000e+00 1.5000000e+00 1.4000000e+00 1.4000000e+00 1.2000000e+00 1.3000000e+00 7.0000000e-01 1.1000000e+00 1.0000000e+00 1.9000000e+00 2.2000000e+00 1.8000000e+00 9.0000000e-01 2.6000000e+00 2.6000000e+00 1.6000000e+00 1.1000000e+00 1.1000000e+00 1.7000000e+00 4.0000000e-01 7.0000000e-01 5.0000000e-01 2.0000000e+00 9.0000000e-01 1.1000000e+00 7.0000000e-01 1.8000000e+00 8.0000000e-01 1.2000000e+00 1.7000000e+00 8.0000000e-01 2.3000000e+00 6.0000000e-01 4.0000000e-01 6.0000000e-01 2.1000000e+00 1.1000000e+00 1.1000000e+00 2.0000000e+00 1.9000000e+00 1.0000000e+00 1.2000000e+00 4.0000000e-01 1.7000000e+00 9.0000000e-01 1.3000000e+00 1.6000000e+00 2.7000000e+00 2.7000000e+00 1.7000000e+00 8.0000000e-01 1.2000000e+00 1.8000000e+00 5.0000000e-01 6.0000000e-01 1.0000000e+00 2.5000000e+00 1.5000000e+00 1.3000000e+00 2.4000000e+00 1.0000000e+00 2.5000000e+00 1.5000000e+00 1.1000000e+00 2.4000000e+00 1.5000000e+00 5.0000000e-01 1.1000000e+00 1.4000000e+00 1.0000000e+00 1.8000000e+00 1.1000000e+00 1.2000000e+00 9.0000000e-01 1.5000000e+00 diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-cityblock-ml.txt b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-cityblock-ml.txt new file mode 100644 index 0000000..8fb22e6 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-cityblock-ml.txt @@ -0,0 +1 @@ + 3.2420590e+01 3.3246607e+01 3.0526910e+01 3.5166573e+01 3.1868301e+01 3.6025002e+01 3.2513623e+01 3.6557796e+01 3.3752212e+01 3.4422130e+01 3.2526018e+01 3.2581161e+01 3.3743555e+01 3.6960777e+01 3.4225270e+01 3.2965308e+01 3.4591031e+01 3.4204203e+01 3.4678123e+01 3.5728720e+01 3.0830047e+01 3.1550681e+01 3.3304790e+01 3.2676753e+01 3.2742330e+01 3.1684556e+01 3.2830915e+01 3.2956614e+01 2.7365639e+01 3.3207307e+01 3.3420925e+01 3.4357941e+01 2.8280126e+01 3.4523458e+01 3.2705274e+01 3.2455891e+01 3.1636060e+01 3.1594957e+01 3.1805202e+01 3.3886574e+01 3.3438829e+01 3.3330030e+01 3.4168514e+01 3.0637353e+01 4.2149167e+01 3.6340559e+01 2.9315308e+01 3.5778314e+01 3.7693050e+01 3.2598714e+01 3.2990836e+01 3.4967659e+01 3.9748920e+01 3.6745043e+01 2.7117550e+01 3.6014760e+01 2.9367558e+01 3.3845350e+01 3.5477339e+01 3.1513372e+01 3.2517953e+01 2.4755097e+01 3.0229897e+01 3.4799343e+01 3.3371710e+01 2.9600910e+01 3.3275088e+01 3.3567110e+01 3.4527016e+01 3.4942320e+01 3.2359383e+01 3.2607100e+01 3.1467914e+01 2.9032039e+01 3.3122878e+01 2.8496709e+01 2.9908448e+01 2.9962886e+01 3.0345299e+01 3.1737613e+01 2.8551485e+01 3.2610551e+01 3.3082660e+01 3.3719298e+01 3.6434018e+01 3.6589278e+01 3.3889586e+01 3.8036774e+01 3.1483497e+01 3.4196794e+01 3.5154035e+01 3.5488608e+01 3.6143183e+01 3.3473491e+01 3.4686446e+01 2.8687495e+01 3.5725742e+01 3.0188298e+01 3.3084534e+01 3.3538519e+01 3.6226849e+01 2.9052099e+01 3.6032733e+01 3.0811503e+01 3.2616190e+01 3.3888566e+01 3.3074570e+01 2.9683515e+01 3.0600771e+01 3.4345247e+01 3.6983843e+01 3.3692824e+01 3.3762461e+01 3.4024582e+01 3.3698854e+01 3.1238613e+01 3.4978833e+01 3.4991078e+01 3.4577741e+01 3.3749227e+01 3.4982272e+01 3.0487868e+01 3.2317632e+01 3.1125588e+01 3.4413791e+01 3.1881871e+01 3.1373821e+01 3.0416864e+01 3.2066187e+01 3.1128313e+01 3.0240249e+01 3.0125198e+01 3.1343454e+01 3.5479092e+01 3.4450767e+01 3.2953507e+01 3.4456795e+01 3.0136375e+01 3.3462150e+01 2.9894274e+01 3.1367432e+01 3.2839320e+01 3.1440398e+01 2.9400374e+01 3.1106338e+01 3.1242624e+01 3.5537892e+01 3.3056459e+01 2.8610281e+01 3.4296217e+01 3.5819772e+01 3.2503922e+01 3.0963029e+01 3.4762112e+01 3.4796284e+01 2.9645345e+01 3.4468088e+01 2.6975590e+01 3.3738555e+01 2.8825009e+01 3.2663999e+01 3.2547878e+01 3.2308091e+01 3.2489966e+01 3.0868597e+01 3.2974220e+01 3.0866111e+01 3.8197342e+01 3.0609568e+01 3.5478978e+01 2.9249184e+01 3.6185622e+01 3.1948258e+01 3.2649719e+01 3.3305650e+01 3.4643955e+01 3.6566241e+01 3.4968484e+01 3.2632218e+01 3.6741383e+01 3.5700008e+01 3.1962468e+01 3.1410623e+01 3.0412061e+01 3.3749077e+01 3.5649661e+01 3.7649263e+01 3.2832574e+01 3.1783914e+01 2.8264292e+01 diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-correlation-ml-iris.txt b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-correlation-ml-iris.txt new file mode 100644 index 0000000..f297500 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-correlation-ml-iris.txt @@ -0,0 +1 @@ + 4.0013388e-03 2.6088954e-05 1.8315482e-03 6.5266850e-04 4.1394685e-04 1.1888069e-03 4.6185289e-04 1.9233577e-03 3.4480388e-03 1.5150632e-05 1.9126718e-03 3.0974734e-03 2.2295833e-04 2.4043394e-03 5.0134320e-03 3.0165570e-03 1.3145239e-04 6.0759419e-04 1.6672981e-03 4.0036132e-03 6.1375191e-04 8.5916540e-03 3.0212269e-03 8.6923503e-03 7.7875235e-03 5.1612907e-04 2.9662451e-04 6.2402983e-04 2.7278440e-03 4.0510347e-03 3.0027154e-03 6.2616145e-03 4.1342211e-03 3.4480388e-03 1.5822510e-03 1.7143312e-03 3.4480388e-03 2.2462074e-04 6.1048465e-04 6.5190641e-04 2.4247873e-02 9.0785596e-04 2.1652052e-04 3.4845573e-03 3.2507646e-03 2.3346511e-03 4.0773355e-04 1.1278223e-04 5.0819669e-04 2.1340893e-01 2.1253858e-01 2.5193073e-01 2.9479565e-01 2.6774348e-01 2.8869785e-01 2.3348217e-01 1.9273490e-01 2.4443270e-01 2.4320510e-01 2.7679421e-01 2.1672263e-01 2.6813840e-01 2.8435705e-01 1.5561363e-01 2.0057173e-01 2.7812139e-01 2.2900256e-01 3.4724680e-01 2.3882260e-01 2.9132931e-01 2.0333645e-01 3.5307051e-01 2.8812452e-01 2.1722530e-01 2.1423111e-01 2.7396952e-01 2.9207940e-01 2.6626182e-01 1.7106032e-01 2.4279706e-01 2.2559055e-01 2.0940857e-01 3.8432412e-01 2.9354670e-01 2.0829958e-01 2.3669414e-01 3.0463326e-01 2.1035851e-01 2.6623117e-01 3.0835417e-01 2.5871089e-01 2.3465249e-01 2.0319416e-01 2.6292582e-01 2.1771735e-01 2.3212816e-01 2.2399387e-01 1.3799316e-01 2.3049526e-01 4.8512087e-01 4.2535066e-01 4.0184471e-01 4.1903049e-01 4.4627199e-01 4.4692268e-01 4.3569888e-01 4.2673251e-01 4.5731950e-01 3.7438176e-01 3.0619251e-01 4.0039114e-01 3.7245195e-01 4.5829878e-01 4.5814844e-01 3.6107062e-01 3.7600936e-01 3.7662883e-01 5.2492832e-01 4.2684428e-01 3.7975064e-01 4.0636707e-01 4.6364339e-01 3.4607190e-01 3.6988036e-01 3.6764668e-01 3.2524634e-01 3.1943549e-01 4.4481193e-01 3.5496498e-01 4.1356534e-01 3.2082320e-01 4.5322964e-01 3.4300770e-01 4.4485158e-01 3.9755578e-01 3.9702418e-01 3.7202285e-01 3.1131344e-01 3.4018064e-01 4.0217537e-01 3.1441868e-01 4.2535066e-01 4.1533176e-01 3.9695242e-01 3.5313531e-01 3.9400199e-01 3.4652657e-01 3.6608320e-01 3.6684161e-01 3.3929143e-03 2.6033698e-03 7.7673212e-03 6.4081099e-03 9.2794464e-03 2.8819447e-03 1.4536586e-03 9.6714455e-04 3.7992387e-03 5.9342609e-03 3.9974031e-04 6.0694735e-03 9.1304628e-03 1.7655983e-02 1.1643899e-02 4.0363794e-03 1.6463709e-03 1.0706739e-02 6.7984475e-04 7.6845878e-03 2.3516587e-02 9.9502337e-05 1.0315881e-02 1.0821735e-03 1.8887942e-03 2.4624674e-03 1.5760536e-03 3.6638868e-03 1.6253664e-03 7.8762517e-04 1.9487010e-02 1.6211862e-02 9.6714455e-04 2.2382105e-03 2.1712385e-03 9.6714455e-04 2.9674185e-03 1.9068589e-03 6.4555509e-03 8.8254342e-03 8.1777355e-03 3.4663084e-03 9.6481454e-03 9.7747764e-05 1.0706793e-02 4.2246850e-03 4.9836128e-03 1.6613867e-03 1.6856078e-01 1.6930583e-01 2.0381801e-01 2.4171317e-01 2.1689289e-01 2.4212069e-01 1.9027913e-01 1.5127382e-01 1.9696970e-01 1.9830901e-01 2.2503195e-01 1.7290786e-01 2.1618942e-01 2.3648275e-01 1.1720113e-01 1.5636322e-01 2.3357633e-01 1.8548772e-01 2.8791738e-01 1.9215793e-01 2.4470933e-01 1.5850128e-01 2.9662484e-01 2.4061109e-01 1.7172858e-01 1.6853658e-01 2.2283143e-01 2.4032537e-01 2.1849821e-01 1.2975740e-01 1.9502432e-01 1.7953012e-01 1.6504306e-01 3.2966120e-01 2.4985160e-01 1.6946778e-01 1.8991741e-01 2.4919698e-01 1.7046257e-01 2.1691882e-01 2.6018338e-01 2.1310883e-01 1.8776864e-01 1.5909082e-01 2.1620321e-01 1.7782256e-01 1.8911127e-01 1.7894094e-01 9.9649433e-02 1.8605378e-01 4.2702260e-01 3.6742642e-01 3.4284735e-01 3.6351462e-01 3.8703088e-01 3.8643219e-01 3.8033312e-01 3.6849688e-01 3.9561383e-01 3.1894438e-01 2.5437467e-01 3.4129756e-01 3.1464030e-01 3.9617829e-01 3.9513061e-01 3.0506617e-01 3.2181902e-01 3.2465822e-01 4.5849285e-01 3.6615509e-01 3.2201598e-01 3.4969244e-01 4.0175045e-01 2.8934891e-01 3.1614551e-01 3.1385643e-01 2.7059114e-01 2.6798818e-01 3.8429703e-01 3.0087709e-01 3.5349775e-01 2.7097983e-01 3.9156246e-01 2.9045741e-01 3.8972428e-01 3.3603762e-01 3.4254663e-01 3.1960575e-01 2.6050327e-01 2.8406500e-01 3.4225008e-01 2.5735736e-01 3.6742642e-01 3.5724408e-01 3.3861338e-01 2.9412113e-01 3.3288556e-01 2.9101723e-01 3.1374321e-01 3.1516519e-01 1.6665208e-03 9.3886805e-04 6.2270349e-04 1.5623211e-03 3.9549141e-04 1.6439076e-03 3.0144044e-03 3.0583810e-05 2.0234943e-03 2.6246966e-03 3.8983492e-04 2.5645160e-03 5.6944285e-03 3.3339055e-03 1.2831328e-04 4.1302346e-04 2.1101667e-03 3.4972086e-03 8.7482704e-04 9.4271115e-03 2.5080125e-03 8.7042936e-03 7.0125369e-03 3.5088415e-04 1.9451019e-04 3.9574419e-04 2.5986219e-03 3.6402032e-03 2.4900748e-03 7.0784673e-03 4.7935149e-03 3.0144044e-03 1.2869381e-03 1.4017897e-03 3.0144044e-03 1.7197161e-04 4.4525534e-04 7.8138074e-04 2.2693053e-02 1.2229281e-03 1.5754704e-04 3.7899670e-03 2.6957902e-03 2.7721274e-03 4.5733371e-04 2.2324575e-04 3.1003560e-04 2.1002397e-01 2.0931874e-01 2.4834042e-01 2.9081912e-01 2.6391644e-01 2.8536997e-01 2.3033428e-01 1.8962461e-01 2.4088505e-01 2.3991724e-01 2.7290008e-01 2.1345771e-01 2.6419304e-01 2.8088883e-01 1.5266518e-01 1.9720348e-01 2.7496549e-01 2.2580939e-01 3.4275207e-01 2.3533918e-01 2.8800391e-01 1.9991219e-01 3.4890690e-01 2.8470246e-01 2.1378625e-01 2.1075909e-01 2.7013290e-01 2.8823552e-01 2.6275308e-01 1.6787442e-01 2.3921107e-01 2.2212337e-01 2.0605918e-01 3.8041788e-01 2.9051019e-01 2.0550537e-01 2.3319159e-01 3.0043218e-01 2.0746652e-01 2.6256228e-01 3.0491843e-01 2.5539836e-01 2.3113139e-01 1.9984797e-01 2.5951290e-01 2.1484809e-01 2.2899468e-01 2.2062653e-01 1.3495710e-01 2.2721304e-01 4.8106467e-01 4.2120214e-01 3.9753504e-01 4.1511036e-01 4.4203186e-01 4.4255637e-01 4.3182495e-01 4.2255527e-01 4.5284888e-01 3.7037516e-01 3.0238339e-01 3.9606802e-01 3.6819494e-01 4.5378706e-01 4.5354234e-01 3.5697379e-01 3.7213234e-01 3.7297305e-01 5.2009409e-01 4.2241474e-01 3.7552000e-01 4.0230555e-01 4.5916611e-01 3.4185974e-01 3.6603540e-01 3.6379117e-01 3.2119479e-01 3.1570003e-01 4.4043880e-01 3.5104995e-01 4.0917086e-01 3.1725229e-01 4.4875464e-01 3.3921954e-01 4.4101743e-01 3.9296628e-01 3.9316311e-01 3.6831351e-01 3.0762140e-01 3.3601664e-01 3.9776902e-01 3.1006892e-01 4.2120214e-01 4.1114584e-01 3.9269988e-01 3.4869458e-01 3.8944692e-01 3.4244384e-01 3.6236872e-01 3.6319420e-01 3.2811792e-03 2.1674206e-03 3.8606330e-03 4.5444049e-04 1.6669051e-04 6.9315236e-04 1.5191179e-03 7.4896915e-04 1.0486334e-03 3.0115188e-03 8.3553530e-03 1.1528814e-02 9.5172421e-03 2.7099731e-03 7.1677618e-04 4.9455548e-03 1.1260396e-03 4.0113810e-03 1.7041109e-02 1.7048436e-03 3.2998306e-03 3.5839458e-03 6.5708756e-04 7.5073414e-04 1.6739794e-03 1.4404874e-04 6.5489426e-04 3.9918560e-03 9.7678136e-03 9.5698494e-03 6.9315236e-04 4.1051921e-03 4.2098821e-03 6.9315236e-04 7.7852178e-04 5.0066998e-04 4.6641147e-03 1.9877450e-02 2.9999880e-03 2.6696154e-03 2.3124511e-03 2.6940762e-03 3.6188953e-03 7.8131154e-04 1.7395433e-03 1.1236329e-03 1.8068407e-01 1.7911784e-01 2.1632614e-01 2.5732132e-01 2.3189913e-01 2.4898678e-01 1.9775994e-01 1.6095697e-01 2.0933838e-01 2.0710048e-01 2.4048237e-01 1.8306865e-01 2.3298507e-01 2.4543409e-01 1.2760114e-01 1.6921167e-01 2.3873595e-01 1.9385405e-01 3.0859675e-01 2.0396213e-01 2.5141439e-01 1.7197884e-01 3.1193260e-01 2.4875015e-01 1.8437187e-01 1.8189435e-01 2.3759181e-01 2.5412614e-01 2.2898904e-01 1.4232827e-01 2.0806035e-01 1.9199138e-01 1.7693745e-01 3.4002004e-01 2.5277786e-01 1.7384453e-01 2.0213849e-01 2.6767804e-01 1.7597754e-01 2.2968366e-01 2.6752309e-01 2.2134041e-01 2.0039734e-01 1.7140536e-01 2.2556075e-01 1.8258974e-01 1.9648053e-01 1.9006393e-01 1.1321036e-01 1.9554999e-01 4.3600761e-01 3.7954719e-01 3.5812457e-01 3.7278397e-01 3.9968201e-01 4.0081518e-01 3.8843945e-01 3.8096257e-01 4.1111272e-01 3.3107441e-01 2.6691137e-01 3.5682832e-01 3.3041468e-01 4.1223120e-01 4.1255773e-01 3.1903974e-01 3.3211211e-01 3.3199965e-01 4.7707633e-01 3.8217339e-01 3.3708398e-01 3.6131644e-01 4.1712805e-01 3.0571403e-01 3.2625308e-01 3.2419755e-01 2.8563873e-01 2.7883266e-01 3.9884997e-01 3.1256889e-01 3.6952768e-01 2.7953122e-01 4.0726631e-01 3.0093515e-01 3.9702583e-01 3.5566883e-01 3.5181973e-01 3.2782384e-01 2.7114516e-01 3.0000941e-01 3.5891976e-01 2.7762255e-01 3.7954719e-01 3.7024637e-01 3.5327118e-01 3.1362016e-01 3.5214869e-01 3.0546169e-01 3.2226324e-01 3.2277503e-01 1.1668032e-04 8.6044327e-05 1.4968429e-03 3.9691382e-03 6.2388252e-03 7.0588028e-04 1.9691519e-03 6.1279520e-03 1.9660913e-04 2.5761274e-03 2.5168387e-03 2.4029967e-03 9.7429318e-04 2.3122381e-03 2.3682626e-04 7.1643085e-03 1.3128642e-04 5.3939078e-03 6.2992904e-03 9.0353935e-03 1.2266741e-02 2.0706893e-03 1.5408774e-03 2.5522607e-03 3.9522692e-03 6.6899152e-03 6.3980861e-03 2.9039306e-03 1.6942568e-03 6.2388252e-03 3.9336207e-03 4.1533642e-03 6.2388252e-03 1.2180733e-03 2.1518176e-03 8.8270219e-04 3.2743785e-02 7.7572782e-05 1.3417853e-03 2.5322339e-03 6.7844678e-03 8.2761566e-04 8.6236157e-04 3.1792685e-04 2.2579028e-03 2.2976852e-01 2.2800773e-01 2.6917639e-01 3.1391346e-01 2.8619117e-01 3.0434544e-01 2.4844431e-01 2.0773635e-01 2.6148944e-01 2.5886513e-01 2.9555853e-01 2.3241008e-01 2.8723433e-01 3.0077518e-01 1.6999944e-01 2.1692490e-01 2.9290302e-01 2.4423212e-01 3.6894940e-01 2.5556289e-01 3.0695160e-01 2.1997775e-01 3.7292526e-01 3.0427758e-01 2.3385552e-01 2.3106102e-01 2.9243468e-01 3.1048744e-01 2.8298832e-01 1.8662712e-01 2.6007204e-01 2.4232174e-01 2.2560003e-01 4.0265980e-01 3.0762748e-01 2.2151447e-01 2.5355085e-01 3.2493368e-01 2.2408199e-01 2.8382455e-01 3.2448802e-01 2.7442187e-01 2.5162228e-01 2.1940897e-01 2.7915380e-01 2.3127910e-01 2.4702051e-01 2.4019206e-01 1.5301315e-01 2.4619533e-01 5.0391216e-01 4.4483392e-01 4.2228707e-01 4.3731273e-01 4.6617394e-01 4.6750253e-01 4.5367757e-01 4.4636521e-01 4.7842690e-01 3.9329580e-01 3.2434154e-01 4.2091234e-01 3.9272980e-01 4.7962551e-01 4.7998875e-01 3.8052757e-01 3.9422077e-01 3.9365636e-01 5.4778535e-01 4.4784024e-01 3.9985415e-01 4.2545240e-01 4.8476479e-01 3.6622351e-01 3.8794414e-01 3.8577592e-01 3.4462011e-01 3.3712507e-01 4.6543611e-01 3.7346604e-01 4.3442200e-01 3.3762412e-01 4.7437523e-01 3.6087712e-01 4.6258851e-01 4.1954525e-01 4.1507031e-01 3.8935328e-01 3.2880662e-01 3.6009673e-01 4.2314218e-01 3.3549029e-01 4.4483392e-01 4.3505302e-01 4.1710447e-01 3.7450914e-01 4.1581746e-01 3.6597096e-01 3.8346411e-01 3.8386195e-01 2.7739415e-04 8.2117467e-04 2.7843462e-03 4.7394226e-03 3.9365385e-04 1.1964598e-03 4.7400628e-03 2.5527396e-04 3.2634446e-03 3.7103657e-03 3.3188195e-03 8.6302611e-04 1.5635411e-03 6.0189508e-04 5.5859876e-03 3.8282951e-04 7.0925635e-03 5.0273924e-03 7.3470160e-03 1.0223636e-02 1.3503463e-03 9.3049535e-04 1.9663208e-03 2.7155903e-03 5.0798223e-03 5.5875952e-03 3.5987384e-03 2.6550151e-03 4.7394226e-03 3.5923878e-03 3.7937786e-03 4.7394226e-03 6.6480476e-04 1.3814035e-03 1.1581699e-03 3.0091048e-02 1.0888067e-04 1.1634967e-03 1.9052023e-03 5.6332034e-03 7.9034466e-04 3.5005887e-04 1.0179107e-04 1.6076683e-03 2.2018795e-01 2.1841167e-01 2.5888963e-01 3.0298911e-01 2.7568827e-01 2.9345793e-01 2.3845526e-01 1.9853879e-01 2.5133209e-01 2.4870042e-01 2.8491736e-01 2.2273608e-01 2.7678086e-01 2.8993699e-01 1.6165333e-01 2.0762176e-01 2.8220337e-01 2.3432093e-01 3.5743296e-01 2.4549712e-01 2.9602684e-01 2.1063470e-01 3.6116406e-01 2.9338620e-01 2.2421094e-01 2.2149308e-01 2.8182182e-01 2.9956540e-01 2.7243830e-01 1.7796817e-01 2.4995634e-01 2.3251210e-01 2.1609445e-01 3.9047864e-01 2.9675065e-01 2.1202908e-01 2.4353030e-01 3.1395037e-01 2.1453916e-01 2.7329907e-01 3.1330772e-01 2.6399518e-01 2.4164659e-01 2.1003834e-01 2.6865531e-01 2.2160867e-01 2.3705638e-01 2.3039039e-01 1.4523591e-01 2.3625874e-01 4.9071957e-01 4.3219633e-01 4.0993016e-01 4.2475440e-01 4.5332327e-01 4.5465382e-01 4.4096188e-01 4.3371359e-01 4.6548638e-01 3.8123224e-01 3.1318896e-01 4.0857587e-01 3.8073064e-01 4.6668253e-01 4.6707002e-01 3.6864305e-01 3.8213773e-01 3.8159377e-01 5.3428461e-01 4.3521869e-01 3.8775353e-01 4.1301893e-01 4.7176152e-01 3.5457739e-01 3.7593537e-01 3.7379344e-01 3.3323181e-01 3.2577190e-01 4.5261005e-01 3.6164142e-01 4.2194514e-01 3.2625778e-01 4.6147754e-01 3.4920516e-01 4.4979521e-01 4.0734174e-01 4.0275102e-01 3.7733353e-01 3.1756791e-01 3.4852048e-01 4.1080565e-01 3.2443308e-01 4.3219633e-01 4.2252463e-01 4.0479526e-01 3.6286403e-01 4.0364435e-01 3.5428112e-01 3.7151258e-01 3.7191203e-01 2.0478784e-03 4.7860280e-03 7.2727783e-03 1.2329906e-03 2.0550268e-03 7.3066158e-03 5.3810576e-04 3.2245377e-03 2.1674888e-03 2.7856851e-03 1.6387773e-03 3.1323423e-03 6.7307381e-05 8.3332066e-03 3.3954200e-04 4.9119026e-03 7.6276175e-03 8.9240504e-03 1.3851706e-02 2.8347122e-03 2.2069601e-03 3.5278340e-03 4.3892066e-03 7.6179982e-03 7.9398397e-03 2.0003115e-03 1.2885024e-03 7.2727783e-03 5.1809110e-03 5.4327573e-03 7.2727783e-03 1.7938257e-03 2.8924302e-03 1.4132511e-03 3.5877316e-02 5.5425651e-05 2.1070391e-03 2.2246722e-03 8.2675293e-03 4.8309816e-04 1.2152730e-03 6.6498554e-04 3.1323983e-03 2.3387083e-01 2.3171300e-01 2.7340823e-01 3.1873143e-01 2.9087099e-01 3.0765106e-01 2.5178166e-01 2.1138210e-01 2.6568600e-01 2.6244481e-01 3.0032621e-01 2.3618270e-01 2.9221666e-01 3.0443867e-01 1.7368514e-01 2.2112593e-01 2.9589691e-01 2.4771596e-01 3.7467992e-01 2.5965416e-01 3.1023215e-01 2.2429067e-01 3.7775475e-01 3.0780450e-01 2.3805322e-01 2.3537447e-01 2.9708156e-01 3.1499475e-01 2.8689460e-01 1.9071775e-01 2.6437980e-01 2.4650321e-01 2.2965596e-01 4.0666307e-01 3.1024566e-01 2.2426781e-01 2.5770992e-01 3.3024819e-01 2.2703835e-01 2.8812097e-01 3.2789845e-01 2.7792695e-01 2.5584753e-01 2.2352457e-01 2.8285984e-01 2.3411709e-01 2.5033634e-01 2.4414228e-01 1.5718330e-01 2.4987685e-01 5.0772777e-01 4.4916856e-01 4.2715006e-01 4.4114868e-01 4.7061118e-01 4.7223859e-01 4.5731921e-01 4.5076024e-01 4.8335950e-01 3.9759816e-01 3.2864932e-01 4.2581769e-01 3.9765561e-01 4.8465345e-01 4.8525127e-01 3.8513607e-01 3.9820652e-01 3.9712803e-01 5.5326906e-01 4.5284577e-01 4.0466523e-01 4.2968979e-01 4.8967945e-01 3.7122633e-01 3.9189300e-01 3.8976354e-01 3.4937792e-01 3.4115590e-01 4.7020033e-01 3.7767688e-01 4.3942138e-01 3.4125876e-01 4.7934102e-01 3.6486747e-01 4.6609349e-01 4.2514437e-01 4.1889348e-01 3.9297481e-01 3.3279384e-01 3.6502235e-01 4.2824304e-01 3.4111505e-01 4.4916856e-01 4.3953404e-01 4.2185806e-01 3.8004789e-01 4.2135185e-01 3.7064704e-01 3.8713375e-01 3.8737322e-01 5.9378937e-04 1.6263483e-03 3.1194349e-04 8.5089275e-04 1.6365846e-03 1.1579874e-03 4.9430863e-03 7.7957878e-03 5.8209267e-03 9.7423596e-04 2.1559031e-04 2.8280232e-03 2.1261057e-03 1.8496545e-03 1.2342594e-02 1.9347552e-03 5.4995961e-03 5.2624400e-03 1.3773080e-04 7.1496401e-05 7.1145768e-04 9.6706058e-04 1.9028496e-03 3.0842001e-03 7.5087003e-03 6.3709632e-03 1.6263483e-03 2.4219636e-03 2.5416684e-03 1.6263483e-03 4.5881830e-05 1.0508341e-04 2.2101780e-03 2.1711060e-02 1.4779987e-03 1.0004664e-03 2.4029906e-03 2.5527616e-03 2.4859397e-03 1.2918144e-04 4.6388898e-04 3.7292268e-04 1.9674800e-01 1.9551090e-01 2.3384591e-01 2.7581575e-01 2.4956432e-01 2.6852776e-01 2.1529223e-01 1.7652116e-01 2.2659936e-01 2.2483764e-01 2.5838801e-01 1.9958250e-01 2.5031938e-01 2.6459593e-01 1.4127677e-01 1.8459488e-01 2.5809699e-01 2.1110498e-01 3.2773890e-01 2.2109976e-01 2.7105695e-01 1.8736680e-01 3.3227725e-01 2.6813261e-01 2.0050552e-01 1.9777445e-01 2.5552383e-01 2.7284453e-01 2.4733170e-01 1.5638593e-01 2.2514733e-01 2.0849921e-01 1.9287136e-01 3.6191982e-01 2.7281863e-01 1.9071038e-01 2.1912633e-01 2.8593968e-01 1.9281596e-01 2.4768186e-01 2.8763479e-01 2.3971138e-01 2.1723753e-01 1.8699948e-01 2.4393940e-01 1.9979806e-01 2.1397418e-01 2.0672587e-01 1.2529166e-01 2.1270878e-01 4.6033946e-01 4.0222602e-01 3.7977808e-01 3.9565939e-01 4.2276626e-01 4.2367223e-01 4.1181849e-01 4.0362790e-01 4.3403229e-01 3.5248619e-01 2.8628855e-01 3.7840466e-01 3.5121985e-01 4.3508597e-01 4.3518504e-01 3.3982310e-01 3.5380487e-01 3.5403534e-01 5.0086936e-01 4.0431760e-01 3.5820087e-01 3.8360776e-01 4.4020334e-01 3.2567463e-01 3.4780712e-01 3.4566354e-01 3.0520609e-01 2.9886193e-01 4.2163480e-01 3.3351442e-01 3.9135037e-01 2.9988740e-01 4.3006423e-01 3.2170490e-01 4.2068520e-01 3.7643926e-01 3.7416586e-01 3.4964963e-01 2.9095264e-01 3.1987117e-01 3.8035231e-01 2.9582266e-01 4.0222602e-01 3.9256940e-01 3.7489846e-01 3.3318735e-01 3.7289799e-01 3.2576051e-01 3.4389968e-01 3.4452790e-01 2.6022528e-04 1.6357171e-03 1.5776794e-03 3.8821876e-04 3.3383101e-03 8.1348232e-03 1.2673676e-02 9.6454978e-03 2.5951087e-03 4.7710550e-04 5.9640558e-03 5.0935416e-04 4.5005321e-03 1.8295131e-02 8.0881241e-04 4.5548809e-03 2.4342161e-03 4.9437559e-04 7.1285200e-04 1.1827453e-03 5.3640347e-04 3.9274104e-04 2.7126384e-03 1.1795471e-02 1.0834355e-02 2.6022528e-04 3.1798475e-03 3.2407554e-03 2.6022528e-04 8.6271302e-04 3.7982619e-04 4.6816553e-03 1.6563494e-02 3.8583498e-03 2.4190920e-03 3.6876972e-03 1.5540748e-03 4.9548680e-03 1.1822578e-03 2.1020911e-03 7.8943086e-04 1.7690932e-01 1.7592254e-01 2.1244374e-01 2.5265403e-01 2.2738778e-01 2.4651370e-01 1.9514374e-01 1.5780110e-01 2.0549282e-01 2.0415245e-01 2.3585798e-01 1.7978451e-01 2.2802648e-01 2.4243736e-01 1.2428265e-01 1.6526016e-01 2.3669421e-01 1.9101658e-01 3.0265561e-01 2.0025949e-01 2.4898144e-01 1.6786898e-01 3.0733331e-01 2.4595698e-01 1.8046568e-01 1.7781045e-01 2.3314059e-01 2.4991164e-01 2.2560913e-01 1.3845901e-01 2.0404813e-01 1.8812805e-01 1.7322140e-01 3.3666075e-01 2.5129679e-01 1.7201674e-01 1.9833201e-01 2.6229128e-01 1.7386253e-01 2.2573363e-01 2.6492814e-01 2.1852944e-01 1.9648937e-01 1.6758641e-01 2.2246603e-01 1.8066071e-01 1.9389274e-01 1.8653629e-01 1.0911288e-01 1.9242842e-01 4.3299781e-01 3.7574545e-01 3.5353088e-01 3.6969868e-01 3.9574789e-01 3.9644649e-01 3.8564737e-01 3.7707427e-01 4.0646475e-01 3.2727058e-01 2.6301141e-01 3.5217112e-01 3.2569733e-01 4.0744443e-01 4.0742674e-01 3.1477415e-01 3.2876954e-01 3.2939944e-01 4.7166141e-01 3.7739415e-01 3.3254323e-01 3.5763917e-01 4.1251084e-01 3.0085067e-01 3.2295735e-01 3.2084322e-01 2.8110727e-01 2.7535557e-01 3.9443856e-01 3.0887601e-01 3.6474538e-01 2.7663010e-01 4.0256676e-01 2.9754793e-01 3.9443683e-01 3.4998590e-01 3.4873324e-01 3.2500314e-01 2.6771990e-01 2.9525187e-01 3.5397733e-01 2.7178914e-01 3.7574545e-01 3.6622316e-01 3.4883278e-01 3.0797294e-01 3.4655783e-01 3.0107914e-01 3.1936679e-01 3.2010756e-01 3.0868881e-03 2.8691382e-03 1.3643967e-04 5.3429196e-03 1.0581034e-02 1.6459895e-02 1.2551534e-02 4.1576220e-03 1.2160579e-03 8.7064401e-03 5.4418849e-05 6.8214146e-03 2.2732626e-02 5.6586090e-04 4.9831593e-03 1.1314391e-03 1.3090644e-03 1.7262411e-03 1.9770194e-03 1.0389613e-03 9.3641634e-05 2.8067647e-03 1.5471006e-02 1.4411876e-02 0.0000000e+00 4.0251529e-03 4.0402030e-03 0.0000000e+00 2.0104773e-03 1.1579294e-03 6.7733512e-03 1.3328703e-02 6.1195429e-03 3.8280621e-03 5.4471697e-03 1.3203495e-03 7.3930866e-03 2.5511055e-03 3.8011014e-03 1.5935161e-03 1.6488937e-01 1.6420208e-01 1.9946134e-01 2.3841307e-01 2.1377841e-01 2.3352484e-01 1.8323739e-01 1.4660873e-01 1.9269692e-01 1.9183992e-01 2.2200730e-01 1.6791595e-01 2.1423078e-01 2.2922867e-01 1.1407468e-01 1.5349471e-01 2.2418179e-01 1.7908948e-01 2.8692621e-01 1.8765985e-01 2.3596576e-01 1.5596498e-01 2.9203641e-01 2.3278981e-01 1.6829150e-01 1.6563525e-01 2.1942293e-01 2.3592536e-01 2.1256421e-01 1.2755291e-01 1.9121342e-01 1.7576718e-01 1.6132929e-01 3.2148865e-01 2.3885357e-01 1.6118125e-01 1.8573297e-01 2.4757051e-01 1.6279862e-01 2.1240660e-01 2.5149161e-01 2.0595435e-01 1.8389163e-01 1.5580881e-01 2.0964365e-01 1.6953436e-01 1.8203376e-01 1.7437090e-01 9.9184065e-02 1.8031354e-01 4.1664204e-01 3.5971944e-01 3.3744612e-01 3.5416868e-01 3.7936058e-01 3.7982315e-01 3.7006184e-01 3.6098196e-01 3.8956200e-01 3.1201251e-01 2.4889912e-01 3.3607844e-01 3.1002022e-01 3.9046127e-01 3.9028467e-01 2.9949936e-01 3.1373736e-01 3.1479561e-01 4.5355800e-01 3.6085089e-01 3.1682959e-01 3.4195608e-01 3.9553949e-01 2.8555816e-01 3.0804941e-01 3.0593834e-01 2.6633772e-01 2.6121338e-01 3.7782244e-01 2.9399549e-01 3.4839508e-01 2.6278429e-01 3.8569374e-01 2.8303600e-01 3.7885421e-01 3.3349568e-01 3.3352424e-01 3.1033775e-01 2.5375578e-01 2.8011018e-01 3.3772574e-01 2.5671985e-01 3.5971944e-01 3.5022311e-01 3.3289784e-01 2.9224130e-01 3.3016010e-01 2.8599652e-01 3.0475125e-01 3.0561759e-01 1.6232219e-03 2.8110674e-03 3.0992704e-04 2.8009412e-03 5.3859157e-03 3.4428307e-03 2.2389965e-04 4.8865483e-04 1.7600776e-03 3.6417064e-03 7.4576509e-04 9.1296419e-03 2.8123816e-03 8.0068765e-03 7.3379880e-03 3.9579356e-04 1.9713653e-04 6.0194434e-04 2.3367622e-03 3.6239908e-03 3.0185118e-03 6.3202257e-03 4.4046298e-03 3.0868881e-03 1.7141363e-03 1.8461990e-03 3.0868881e-03 1.2729358e-04 4.6571753e-04 8.6393185e-04 2.3903803e-02 9.0441478e-04 3.0528309e-04 3.1648190e-03 3.1223857e-03 2.2339929e-03 2.7724170e-04 9.5102356e-05 4.3913729e-04 2.1061066e-01 2.0964712e-01 2.4888926e-01 2.9163776e-01 2.6471732e-01 2.8522189e-01 2.3035873e-01 1.8998369e-01 2.4143327e-01 2.4006731e-01 2.7373417e-01 2.1381600e-01 2.6519773e-01 2.8097864e-01 1.5319882e-01 1.9790190e-01 2.7464921e-01 2.2594124e-01 3.4406181e-01 2.3583526e-01 2.8783351e-01 2.0067586e-01 3.4959337e-01 2.8469567e-01 2.1442337e-01 2.1148410e-01 2.7089383e-01 2.8885413e-01 2.6304757e-01 1.6861411e-01 2.3983808e-01 2.2272157e-01 2.0662897e-01 3.8050402e-01 2.8992390e-01 2.0524163e-01 2.3373909e-01 3.0156183e-01 2.0732304e-01 2.6311208e-01 3.0478863e-01 2.5545595e-01 2.3172924e-01 2.0047959e-01 2.5968741e-01 2.1460680e-01 2.2900957e-01 2.2107714e-01 1.3590375e-01 2.2746759e-01 4.8087096e-01 4.2142796e-01 3.9814594e-01 4.1502871e-01 4.4228995e-01 4.4300705e-01 4.3159396e-01 4.2281768e-01 4.5341178e-01 3.7067256e-01 3.0283474e-01 3.9670954e-01 3.6890449e-01 4.5441105e-01 4.5432159e-01 3.5749751e-01 3.7222314e-01 3.7273757e-01 5.2092654e-01 4.2307513e-01 3.7613897e-01 4.0250144e-01 4.5970761e-01 3.4267709e-01 3.6611458e-01 3.6389952e-01 3.2189682e-01 3.1593975e-01 4.4091119e-01 3.5132753e-01 4.0985044e-01 3.1723537e-01 4.4934566e-01 3.3938050e-01 4.4068685e-01 3.9407776e-01 3.9311145e-01 3.6818097e-01 3.0785214e-01 3.3679573e-01 3.9853670e-01 3.1138703e-01 4.2142796e-01 4.1148317e-01 3.9324794e-01 3.4985868e-01 3.9052142e-01 3.4304315e-01 3.6227810e-01 3.6300235e-01 3.5297445e-03 2.3993465e-03 8.1469449e-03 8.4116551e-03 8.4748907e-03 3.0443320e-03 1.8587915e-03 2.8140158e-03 3.7033592e-03 2.9317197e-03 1.3265454e-02 4.5015236e-03 2.6265400e-03 7.5415562e-03 1.6353241e-03 1.4046892e-03 3.1172532e-03 6.0159720e-04 2.6265400e-03 7.0517547e-03 5.5270478e-03 6.4518235e-03 2.8691382e-03 6.1014438e-03 6.3013427e-03 2.8691382e-03 1.1615614e-03 1.4498289e-03 4.4069463e-03 2.8268404e-02 1.4610903e-03 3.3082950e-03 4.5136771e-04 5.8170191e-03 1.2876475e-03 5.5964987e-04 1.3152056e-03 2.3446691e-03 1.9624436e-01 1.9365486e-01 2.3272539e-01 2.7570910e-01 2.4962218e-01 2.6341222e-01 2.1161968e-01 1.7505868e-01 2.2555325e-01 2.2171510e-01 2.5853430e-01 1.9783603e-01 2.5146360e-01 2.6075203e-01 1.4121466e-01 1.8483453e-01 2.5221719e-01 2.0803875e-01 3.2980663e-01 2.1983733e-01 2.6580206e-01 1.8792174e-01 3.3096405e-01 2.6375229e-01 2.0022607e-01 1.9799659e-01 2.5530781e-01 2.7170116e-01 2.4472945e-01 1.5721426e-01 2.2453551e-01 2.0791989e-01 1.9232403e-01 3.5721533e-01 2.6541719e-01 1.8583170e-01 2.1815822e-01 2.8740780e-01 1.8853156e-01 2.4642239e-01 2.8243457e-01 2.3594931e-01 2.1655773e-01 1.8685267e-01 2.4074340e-01 1.9492750e-01 2.1026662e-01 2.0538411e-01 1.2769778e-01 2.1026662e-01 4.5350142e-01 3.9795187e-01 3.7769065e-01 3.8983341e-01 4.1851849e-01 4.2042793e-01 4.0510768e-01 3.9953508e-01 4.3130939e-01 3.4895812e-01 2.8413412e-01 3.7648942e-01 3.4987508e-01 4.3267862e-01 4.3359924e-01 3.3758650e-01 3.4918773e-01 3.4772733e-01 4.9915788e-01 4.0231358e-01 3.5632097e-01 3.7931174e-01 4.3732697e-01 3.2511301e-01 3.4317696e-01 3.4120231e-01 3.0420772e-01 2.9548175e-01 4.1851849e-01 3.3003548e-01 3.8954108e-01 2.9516106e-01 4.2751620e-01 3.1771485e-01 4.1340131e-01 3.7704473e-01 3.6865281e-01 3.4390717e-01 2.8759676e-01 3.1915503e-01 3.7909158e-01 2.9821954e-01 3.9795187e-01 3.8894782e-01 3.7251565e-01 3.3442155e-01 3.7333196e-01 3.2403985e-01 3.3841983e-01 3.3852014e-01 4.9713816e-03 9.2903476e-03 1.5944722e-02 1.1386125e-02 3.5402300e-03 9.6201776e-04 8.6911918e-03 9.4953207e-05 6.4447745e-03 2.1940505e-02 1.4660203e-04 6.6774582e-03 1.0681613e-03 1.0987220e-03 1.5397922e-03 1.3925202e-03 1.6744918e-03 4.5493239e-04 1.7274513e-03 1.6063124e-02 1.4167352e-02 1.3643967e-04 2.8974888e-03 2.8893579e-03 1.3643967e-04 1.8844794e-03 1.0195101e-03 5.9835494e-03 1.1907169e-02 6.2144210e-03 3.1461009e-03 6.4603159e-03 6.0804116e-04 7.9398115e-03 2.6608779e-03 3.6611489e-03 1.1878605e-03 1.6827914e-01 1.6808878e-01 2.0330224e-01 2.4208927e-01 2.1725982e-01 2.3901101e-01 1.8791090e-01 1.5022413e-01 1.9646923e-01 1.9636982e-01 2.2550082e-01 1.7178784e-01 2.1730098e-01 2.3423675e-01 1.1690649e-01 1.5652486e-01 2.2988556e-01 1.8351752e-01 2.8999988e-01 1.9148148e-01 2.4151451e-01 1.5889324e-01 2.9641885e-01 2.3800995e-01 1.7162027e-01 1.6875690e-01 2.2303923e-01 2.3997805e-01 2.1702847e-01 1.3016155e-01 1.9481438e-01 1.7925806e-01 1.6471086e-01 3.2723835e-01 2.4516700e-01 1.6613136e-01 1.8943302e-01 2.5069233e-01 1.6755119e-01 2.1637456e-01 2.5710224e-01 2.1080083e-01 1.8747239e-01 1.5900127e-01 2.1430731e-01 1.7454101e-01 1.8671199e-01 1.7813660e-01 1.0093430e-01 1.8452279e-01 4.2348788e-01 3.6545764e-01 3.4229982e-01 3.6044592e-01 3.8515670e-01 3.8525367e-01 3.7671058e-01 3.6665866e-01 3.9483186e-01 3.1729700e-01 2.5339335e-01 3.4086300e-01 3.1448994e-01 3.9561710e-01 3.9513385e-01 3.0425707e-01 3.1942407e-01 3.2109044e-01 4.5863634e-01 3.6575820e-01 3.2152631e-01 3.4763745e-01 4.0088503e-01 2.8963041e-01 3.1371721e-01 3.1153634e-01 2.7048666e-01 2.6621754e-01 3.8319922e-01 2.9918600e-01 3.5318564e-01 2.6828212e-01 3.9088644e-01 2.8836369e-01 3.8573578e-01 3.3732047e-01 3.3961180e-01 3.1641339e-01 2.5871452e-01 2.8421639e-01 3.4227214e-01 2.5952711e-01 3.6545764e-01 3.5568934e-01 3.3784386e-01 2.9565951e-01 3.3403769e-01 2.9050448e-01 3.1070970e-01 3.1176741e-01 1.7225353e-03 3.1252650e-03 1.9141697e-03 3.0572974e-04 1.5621195e-03 7.7657730e-04 6.0730841e-03 9.6969946e-05 6.0804497e-03 4.8728559e-03 1.0019276e-02 1.0630759e-02 1.4028596e-03 1.0008907e-03 1.5106647e-03 3.9425625e-03 5.9922357e-03 4.5089760e-03 4.5296071e-03 2.4544132e-03 5.3429196e-03 2.3927105e-03 2.5658780e-03 5.3429196e-03 8.0759380e-04 1.5345623e-03 3.2653860e-04 2.8784931e-02 4.6959359e-04 5.2961514e-04 3.5529133e-03 5.0787251e-03 1.7528094e-03 7.9750642e-04 1.8469304e-04 1.3949343e-03 2.2601194e-01 2.2489065e-01 2.6538416e-01 3.0933559e-01 2.8173337e-01 3.0217927e-01 2.4601138e-01 2.0461695e-01 2.5772236e-01 2.5608721e-01 2.9099324e-01 2.2920421e-01 2.8227951e-01 2.9802519e-01 1.6660589e-01 2.1294215e-01 2.9118999e-01 2.4154604e-01 3.6304829e-01 2.5194222e-01 3.0483437e-01 2.1582549e-01 3.6852765e-01 3.0175865e-01 2.2996178e-01 2.2696306e-01 2.8805620e-01 3.0640689e-01 2.7978083e-01 1.8266242e-01 2.5611673e-01 2.3849427e-01 2.2189938e-01 3.9968280e-01 3.0655638e-01 2.1989254e-01 2.4981061e-01 3.1957344e-01 2.2214967e-01 2.7998655e-01 3.2222399e-01 2.7182647e-01 2.4776514e-01 2.1557967e-01 2.7625427e-01 2.2956697e-01 2.4461597e-01 2.3673300e-01 1.4870158e-01 2.4319918e-01 5.0146439e-01 4.4143183e-01 4.1797463e-01 4.3468983e-01 4.6265612e-01 4.6350594e-01 4.5140079e-01 4.4286970e-01 4.7413628e-01 3.8981309e-01 3.2063561e-01 4.1652733e-01 3.8823357e-01 4.7518267e-01 4.7516433e-01 3.7651294e-01 3.9124904e-01 3.9150135e-01 5.4273573e-01 4.4336023e-01 3.9556546e-01 4.2215915e-01 4.8051706e-01 3.6152094e-01 3.8501414e-01 3.8277781e-01 3.4024930e-01 3.3391004e-01 4.6138930e-01 3.7007400e-01 4.2991865e-01 3.3504580e-01 4.7002175e-01 3.5780202e-01 4.6054739e-01 4.1401655e-01 4.1241306e-01 3.8694896e-01 3.2563422e-01 3.5550143e-01 4.1844379e-01 3.2964964e-01 4.4143183e-01 4.3139176e-01 4.1295631e-01 3.6894646e-01 4.1038562e-01 3.6180237e-01 3.8096707e-01 3.8161756e-01 2.9059655e-03 2.3706161e-04 1.5622923e-03 4.6985391e-03 3.0899699e-03 1.1144685e-02 1.5452086e-03 4.3912180e-03 8.2378168e-03 1.9894938e-02 1.6022015e-02 4.6248887e-03 4.1434850e-03 3.4955495e-03 1.0239539e-02 1.2012056e-02 5.3911313e-03 8.1025404e-03 3.3141536e-03 1.0581034e-02 2.5128050e-03 2.6447621e-03 1.0581034e-02 4.0444636e-03 5.0265914e-03 5.7591609e-04 3.1111838e-02 3.5439088e-03 1.6935296e-03 1.0096079e-02 7.4536930e-03 6.1909555e-03 4.6446420e-03 2.9301624e-03 3.9714148e-03 2.5068010e-01 2.5098525e-01 2.9216380e-01 3.3638336e-01 3.0774280e-01 3.3430586e-01 2.7479523e-01 2.2946729e-01 2.8415608e-01 2.8467354e-01 3.1719659e-01 2.5534962e-01 3.0706036e-01 3.2874800e-01 1.8820706e-01 2.3625921e-01 3.2370915e-01 2.6954509e-01 3.8903955e-01 2.7840834e-01 3.3718014e-01 2.3890051e-01 3.9852630e-01 3.3314087e-01 2.5453269e-01 2.5085481e-01 3.1457631e-01 3.3452242e-01 3.0863300e-01 2.0400912e-01 2.8201538e-01 2.6372052e-01 2.4645516e-01 4.3401653e-01 3.4099078e-01 2.4885351e-01 2.7587880e-01 3.4510687e-01 2.5062404e-01 3.0740449e-01 3.5503881e-01 3.0161400e-01 2.7344211e-01 2.3943815e-01 3.0560987e-01 2.5890901e-01 2.7338361e-01 2.6272911e-01 1.6653571e-01 2.7061027e-01 5.3996294e-01 4.7622847e-01 4.4996413e-01 4.7092027e-01 4.9784258e-01 4.9765553e-01 4.8884384e-01 4.7750758e-01 5.0792579e-01 4.2270376e-01 3.5026703e-01 4.4829833e-01 4.1874266e-01 5.0865069e-01 5.0773433e-01 4.0771387e-01 4.2529697e-01 4.2723724e-01 5.7654330e-01 4.7578493e-01 4.2683086e-01 4.5657562e-01 5.1458642e-01 3.9050926e-01 4.1892650e-01 4.1646419e-01 3.6916765e-01 3.6521270e-01 4.9536279e-01 4.0243409e-01 4.6185742e-01 3.6775616e-01 5.0354755e-01 3.9037876e-01 4.9872590e-01 4.4290716e-01 4.4784995e-01 4.2202156e-01 3.5667822e-01 3.8450868e-01 4.4953729e-01 3.5436611e-01 4.7622847e-01 4.6530251e-01 4.4515647e-01 3.9606646e-01 4.3939461e-01 3.9207838e-01 4.1563418e-01 4.1682072e-01 1.5609953e-03 4.8490070e-03 9.0958370e-03 1.4989091e-03 1.7813868e-02 2.1261488e-03 5.5450301e-04 1.5694148e-02 1.9384410e-02 2.5209737e-02 8.6954304e-03 7.6213095e-03 8.7105486e-03 1.2654300e-02 1.7334342e-02 1.3883194e-02 2.2522789e-03 1.7443565e-04 1.6459895e-02 9.1718824e-03 9.4916987e-03 1.6459895e-02 6.9918464e-03 8.9833757e-03 2.9662983e-03 4.9266036e-02 2.9055422e-03 5.5731976e-03 8.0889759e-03 1.5739049e-02 3.7597322e-03 6.3200695e-03 4.4644236e-03 8.6395939e-03 2.7692079e-01 2.7499135e-01 3.1941212e-01 3.6719225e-01 3.3760508e-01 3.5656667e-01 2.9687968e-01 2.5298236e-01 3.1115442e-01 3.0819394e-01 3.4760947e-01 2.7976620e-01 3.3861131e-01 3.5300749e-01 2.1161836e-01 2.6294922e-01 3.4413759e-01 2.9242527e-01 4.2523864e-01 3.0477646e-01 3.5931443e-01 2.6623770e-01 4.2973462e-01 3.5665121e-01 2.8133678e-01 2.7828246e-01 3.4429599e-01 3.6357656e-01 3.3414792e-01 2.2982660e-01 3.0962503e-01 2.9049749e-01 2.7240490e-01 4.6073277e-01 3.5937207e-01 2.6744708e-01 3.0261187e-01 3.7874332e-01 2.7038484e-01 3.3511133e-01 3.7800754e-01 3.2481692e-01 3.0053112e-01 2.6567213e-01 3.2997477e-01 2.7805229e-01 2.9533686e-01 2.8819780e-01 1.9246360e-01 2.9461561e-01 5.6604002e-01 5.0500114e-01 4.8160322e-01 4.9684759e-01 5.2728223e-01 5.2878056e-01 5.1374073e-01 5.0662675e-01 5.4019822e-01 4.5105935e-01 3.7828674e-01 4.8016813e-01 4.5059090e-01 5.4146236e-01 5.4186046e-01 4.3772456e-01 4.5187440e-01 4.5090747e-01 6.1216296e-01 5.0833718e-01 4.5807252e-01 4.8471571e-01 5.4678369e-01 4.2264992e-01 4.4526564e-01 4.4301133e-01 3.9982165e-01 3.9174986e-01 5.2663733e-01 4.3017652e-01 4.9431552e-01 3.9206379e-01 5.3598953e-01 4.1681568e-01 5.2288475e-01 4.7864082e-01 4.7360696e-01 4.4651927e-01 3.8292357e-01 4.1618542e-01 4.8251093e-01 3.8978460e-01 5.0500114e-01 4.9485605e-01 4.7615846e-01 4.3123359e-01 4.7475023e-01 4.2239197e-01 4.4037545e-01 4.4066833e-01 2.2696323e-03 5.9674345e-03 2.4448133e-03 1.3335241e-02 1.4550127e-03 2.5899751e-03 1.0462849e-02 2.0483763e-02 1.9016265e-02 5.8048658e-03 5.1341121e-03 4.8749246e-03 1.1285550e-02 1.3907377e-02 7.6337585e-03 6.3180508e-03 2.0267734e-03 1.2551534e-02 4.1078635e-03 4.2919330e-03 1.2551534e-02 4.8854397e-03 6.2043549e-03 8.9134425e-04 3.6466805e-02 3.2782414e-03 2.5696799e-03 9.8307089e-03 9.8120264e-03 5.5450409e-03 5.1728098e-03 3.2838118e-03 5.2555481e-03 2.6123905e-01 2.6103645e-01 3.0329747e-01 3.4864780e-01 3.1958423e-01 3.4458162e-01 2.8459400e-01 2.3921225e-01 2.9516933e-01 2.9488851e-01 3.2923215e-01 2.6553322e-01 3.1926739e-01 3.3946038e-01 1.9751618e-01 2.4678743e-01 3.3347141e-01 2.7948950e-01 4.0283567e-01 2.8923185e-01 3.4744486e-01 2.4959732e-01 4.1128993e-01 3.4370645e-01 2.6525258e-01 2.6168140e-01 3.2643822e-01 3.4638099e-01 3.1949478e-01 2.1402441e-01 2.9314941e-01 2.7451414e-01 2.5691069e-01 4.4594923e-01 3.5036767e-01 2.5760360e-01 2.8676453e-01 3.5805235e-01 2.5967094e-01 3.1875975e-01 3.6562012e-01 3.1188296e-01 2.8438818e-01 2.4989387e-01 3.1618152e-01 2.6787548e-01 2.8314006e-01 2.7321717e-01 1.7615044e-01 2.8082637e-01 5.5224352e-01 4.8885819e-01 4.6311762e-01 4.8285549e-01 5.1073035e-01 5.1093091e-01 5.0061869e-01 4.9022404e-01 5.2151073e-01 4.3495694e-01 3.6199911e-01 4.6149506e-01 4.3176986e-01 5.2236122e-01 5.2173621e-01 4.2026138e-01 4.3714978e-01 4.3841019e-01 5.9117531e-01 4.8927834e-01 4.3976762e-01 4.6895974e-01 5.2818477e-01 4.0343191e-01 4.3068816e-01 4.2826096e-01 3.8162190e-01 3.7670056e-01 5.0866155e-01 4.1442940e-01 4.7525840e-01 3.7873655e-01 5.1715063e-01 4.0199874e-01 5.1036877e-01 4.5693051e-01 4.5962906e-01 4.3336423e-01 3.6804311e-01 3.9729031e-01 4.6298867e-01 3.6775880e-01 4.8885819e-01 4.7805886e-01 4.5813974e-01 4.0968699e-01 4.5331588e-01 4.0460089e-01 4.2699969e-01 4.2797954e-01 8.7894099e-04 2.0541035e-03 4.6305984e-03 6.7282118e-04 8.1166178e-03 3.1872717e-03 1.0880321e-02 8.3605717e-03 8.2577841e-04 6.1811314e-04 5.6936180e-04 3.8806955e-03 4.9821714e-03 2.5043588e-03 7.1844876e-03 4.2468498e-03 4.1576220e-03 9.9353221e-04 1.1055786e-03 4.1576220e-03 5.9733473e-04 9.9247040e-04 3.2081104e-04 2.3598005e-02 1.4307976e-03 3.0934240e-05 4.9322953e-03 3.1299098e-03 3.2877683e-03 9.9769768e-04 4.3968696e-04 6.2770216e-04 2.1786575e-01 2.1746374e-01 2.5686278e-01 2.9961632e-01 2.7235218e-01 2.9521657e-01 2.3917491e-01 1.9737534e-01 2.4929109e-01 2.4877587e-01 2.8142376e-01 2.2163964e-01 2.7235613e-01 2.9042411e-01 1.5945203e-01 2.0466964e-01 2.8483074e-01 2.3445929e-01 3.5153649e-01 2.4372083e-01 2.9790924e-01 2.0734139e-01 3.5860995e-01 2.9439415e-01 2.2162942e-01 2.1843466e-01 2.7871280e-01 2.9725443e-01 2.7179832e-01 1.7471289e-01 2.4749127e-01 2.3015922e-01 2.1385216e-01 3.9117409e-01 3.0083879e-01 2.1421404e-01 2.4147478e-01 3.0893498e-01 2.1609222e-01 2.7129944e-01 3.1500957e-01 2.6459749e-01 2.3931769e-01 2.0744985e-01 2.6864595e-01 2.2369874e-01 2.3782351e-01 2.2882042e-01 1.4076875e-01 2.3574843e-01 4.9305396e-01 4.3221625e-01 4.0786305e-01 4.2639713e-01 4.5320351e-01 4.5351054e-01 4.4342223e-01 4.3354113e-01 4.6376094e-01 3.8078797e-01 3.1179824e-01 4.0634323e-01 3.7808795e-01 4.6463298e-01 4.6419251e-01 3.6697079e-01 3.8279432e-01 3.8398235e-01 5.3121485e-01 4.3292773e-01 3.8560639e-01 4.1316779e-01 4.7015919e-01 3.5131205e-01 3.7664303e-01 3.7434225e-01 3.3054253e-01 3.2553357e-01 4.5134808e-01 3.6126826e-01 4.1953095e-01 3.2738535e-01 4.5959627e-01 3.4943014e-01 4.5279702e-01 4.0259529e-01 4.0419917e-01 3.7916847e-01 3.1736132e-01 3.4544845e-01 4.0790313e-01 3.1842804e-01 4.3221625e-01 4.2193584e-01 4.0305492e-01 3.5775833e-01 3.9908964e-01 3.5217975e-01 3.7311479e-01 3.7405234e-01 4.0128787e-03 1.5082918e-03 2.4327589e-03 1.3749582e-02 9.8644164e-04 7.0275865e-03 4.0509182e-03 9.2137987e-06 8.1954627e-05 2.0221322e-04 1.4919720e-03 1.6918938e-03 1.6810350e-03 9.8366461e-03 7.8161988e-03 1.2160579e-03 1.4072237e-03 1.4774681e-03 1.2160579e-03 1.9402564e-04 2.6249834e-05 2.2527118e-03 1.8043780e-02 2.4914194e-03 7.5228825e-04 4.0311549e-03 1.3209671e-03 4.0483306e-03 6.3837247e-04 9.1866746e-04 4.5668021e-05 1.9279238e-01 1.9221899e-01 2.2979083e-01 2.7088361e-01 2.4479658e-01 2.6610561e-01 2.1267629e-01 1.7326091e-01 2.2258210e-01 2.2184784e-01 2.5349552e-01 1.9619155e-01 2.4503806e-01 2.6157291e-01 1.3780150e-01 1.8042756e-01 2.5616217e-01 2.0823320e-01 3.2136818e-01 2.1724300e-01 2.6868196e-01 1.8301959e-01 3.2745065e-01 2.6534429e-01 1.9640162e-01 1.9346456e-01 2.5082861e-01 2.6843057e-01 2.4386055e-01 1.5229091e-01 2.2093793e-01 2.0444992e-01 1.8898039e-01 3.5852616e-01 2.7153123e-01 1.8897970e-01 2.1514921e-01 2.8018592e-01 1.9075855e-01 2.4355737e-01 2.8508289e-01 2.3688721e-01 2.1314274e-01 1.8298617e-01 2.4079289e-01 1.9795777e-01 2.1138990e-01 2.0306053e-01 1.2090533e-01 2.0951271e-01 4.5737829e-01 3.9834085e-01 3.7497739e-01 3.9259152e-01 4.1873643e-01 4.1914498e-01 4.0909121e-01 3.9964410e-01 4.2918834e-01 3.4857995e-01 2.8224226e-01 3.7353062e-01 3.4626594e-01 4.3008323e-01 4.2978752e-01 3.3538250e-01 3.5042715e-01 3.5150644e-01 4.9516533e-01 3.9931312e-01 3.5345241e-01 3.7985840e-01 4.3539662e-01 3.2054198e-01 3.4448056e-01 3.4226906e-01 3.0044482e-01 2.9530625e-01 4.1705793e-01 3.2972732e-01 3.8633770e-01 2.9699130e-01 4.2515986e-01 3.1826259e-01 4.1819279e-01 3.7038673e-01 3.7108888e-01 3.4686702e-01 2.8745249e-01 3.1485734e-01 3.7515178e-01 2.8956004e-01 3.9834085e-01 3.8842721e-01 3.7027395e-01 3.2715736e-01 3.6694987e-01 3.2117964e-01 3.4102816e-01 3.4191835e-01 9.8460504e-03 4.2097646e-04 3.8720695e-03 8.9609139e-03 1.0271114e-02 1.5759765e-02 3.6853519e-03 2.9640455e-03 4.3261623e-03 5.5313131e-03 9.1074119e-03 8.9974672e-03 1.5687499e-03 7.6879483e-04 8.7064401e-03 5.8625582e-03 6.1329529e-03 8.7064401e-03 2.4949105e-03 3.7729184e-03 1.5387455e-03 3.8489345e-02 2.4427633e-04 2.5870999e-03 2.8098846e-03 9.5472550e-03 6.5811256e-04 1.8465019e-03 1.0925425e-03 3.9557876e-03 2.4136396e-01 2.3914246e-01 2.8140056e-01 3.2726072e-01 2.9909180e-01 3.1587284e-01 2.5940651e-01 2.1853199e-01 2.7358477e-01 2.7023320e-01 3.0865618e-01 2.4367721e-01 3.0046030e-01 3.1269475e-01 1.8026794e-01 2.2845050e-01 3.0393397e-01 2.5531598e-01 3.8377418e-01 2.6747174e-01 3.1847415e-01 2.3166294e-01 3.8684625e-01 3.1607055e-01 2.4560721e-01 2.4289838e-01 3.0537010e-01 3.2346477e-01 2.9500292e-01 1.9758720e-01 2.7227129e-01 2.5416529e-01 2.3708995e-01 4.1588028e-01 3.1833018e-01 2.3144395e-01 2.6550973e-01 3.3890880e-01 2.3429142e-01 2.9628566e-01 3.3633600e-01 2.8587728e-01 2.6362949e-01 2.3087874e-01 2.9089607e-01 2.4143368e-01 2.5794041e-01 2.5175590e-01 1.6347149e-01 2.5753099e-01 5.1758017e-01 4.5875138e-01 4.3664497e-01 4.5058511e-01 4.8035455e-01 4.8203651e-01 4.6682330e-01 4.6036369e-01 4.9325486e-01 4.0679312e-01 3.3723664e-01 4.3530687e-01 4.0692383e-01 4.9456817e-01 4.9519341e-01 3.9426880e-01 4.0735509e-01 4.0616607e-01 5.6363656e-01 4.6254311e-01 4.1397979e-01 4.3911880e-01 4.9961302e-01 3.8027310e-01 4.0098514e-01 3.9884545e-01 3.5820683e-01 3.4982569e-01 4.7998848e-01 3.8669536e-01 4.4902142e-01 3.4986296e-01 4.8921417e-01 3.7374272e-01 4.7562308e-01 4.3467328e-01 4.2816870e-01 4.0201263e-01 3.4137888e-01 3.7400811e-01 4.3776854e-01 3.4988231e-01 4.5875138e-01 4.4907175e-01 4.3130086e-01 3.8919296e-01 4.3084858e-01 3.7966236e-01 3.9613519e-01 3.9634370e-01 7.6697767e-03 2.4232895e-02 4.2497383e-04 5.8645021e-03 7.1697323e-04 1.6459430e-03 2.1472725e-03 2.1701877e-03 1.5644328e-03 2.1493500e-04 2.5540450e-03 1.7185148e-02 1.5785034e-02 5.4418849e-05 4.0402433e-03 4.0294443e-03 5.4418849e-05 2.5045043e-03 1.5070390e-03 7.3845654e-03 1.1685406e-02 7.1165327e-03 4.2192668e-03 6.5870297e-03 1.0764642e-03 8.6213220e-03 3.2147266e-03 4.4993834e-03 1.8554035e-03 1.6163874e-01 1.6124582e-01 1.9600517e-01 2.3441471e-01 2.0995191e-01 2.3066019e-01 1.8049424e-01 1.4375647e-01 1.8928742e-01 1.8888935e-01 2.1809263e-01 1.6489764e-01 2.1018407e-01 2.2610729e-01 1.1131536e-01 1.5021138e-01 2.2157821e-01 1.7624974e-01 2.8213828e-01 1.8434338e-01 2.3311339e-01 1.5259174e-01 2.8788764e-01 2.2976085e-01 1.6495975e-01 1.6222817e-01 2.1560367e-01 2.3216078e-01 2.0930930e-01 1.2444580e-01 1.8772709e-01 1.7242257e-01 1.5812464e-01 3.1788081e-01 2.3649618e-01 1.5894784e-01 1.8237277e-01 2.4318751e-01 1.6040650e-01 2.0886424e-01 2.4850015e-01 2.0301750e-01 1.8048879e-01 1.5257957e-01 2.0654604e-01 1.6721007e-01 1.7931046e-01 1.7120054e-01 9.6139593e-02 1.7732284e-01 4.1297254e-01 3.5577891e-01 3.3316436e-01 3.5061301e-01 3.7530173e-01 3.7554177e-01 3.6661833e-01 3.5699377e-01 3.8511507e-01 3.0820403e-01 2.4524565e-01 3.3176869e-01 3.0575678e-01 3.8594227e-01 3.8559393e-01 2.9549451e-01 3.1016061e-01 3.1160241e-01 4.4857854e-01 3.5641190e-01 3.1263400e-01 3.3812708e-01 3.9109335e-01 2.8129908e-01 3.0451362e-01 3.0237858e-01 2.6230725e-01 2.5773246e-01 3.7352449e-01 2.9029824e-01 3.4398832e-01 2.5959514e-01 3.8123049e-01 2.7952979e-01 3.7549545e-01 3.2868655e-01 3.3002382e-01 3.0704202e-01 2.5032682e-01 2.7592132e-01 3.3326893e-01 2.5208920e-01 3.5577891e-01 3.4619466e-01 3.2870702e-01 2.8757710e-01 3.2540565e-01 2.8197541e-01 3.0143248e-01 3.0241595e-01 4.6779054e-03 6.3405358e-03 1.1072067e-02 1.2740784e-02 2.2273681e-03 1.7042107e-03 2.3469652e-03 4.9562680e-03 7.4940361e-03 5.7695726e-03 3.5981535e-03 1.5844867e-03 6.8214146e-03 3.1957366e-03 3.3967393e-03 6.8214146e-03 1.4289215e-03 2.3814252e-03 3.6906895e-04 3.2052685e-02 4.0942979e-04 9.9039775e-04 3.7975123e-03 6.5400285e-03 1.5831617e-03 1.2837357e-03 4.6903663e-04 2.2256509e-03 2.3440889e-01 2.3310632e-01 2.7431935e-01 3.1898347e-01 2.9103053e-01 3.1107217e-01 2.5431417e-01 2.1253549e-01 2.6655333e-01 2.6462531e-01 3.0042601e-01 2.3750489e-01 2.9168949e-01 3.0705757e-01 1.7397041e-01 2.2119992e-01 2.9980584e-01 2.4986709e-01 3.7353610e-01 2.6066067e-01 3.1374027e-01 2.2416418e-01 3.7874518e-01 3.1076095e-01 2.3844790e-01 2.3545346e-01 2.9741154e-01 3.1590317e-01 2.8873621e-01 1.9043311e-01 2.6497335e-01 2.4708373e-01 2.3022342e-01 4.0978464e-01 3.1511039e-01 2.2757419e-01 2.5853132e-01 3.2950090e-01 2.2996799e-01 2.8911377e-01 3.3136481e-01 2.8050509e-01 2.5648758e-01 2.2384164e-01 2.8507826e-01 2.3741411e-01 2.5289158e-01 2.4520543e-01 1.5591712e-01 2.5163277e-01 5.1215829e-01 4.5200156e-01 4.2861009e-01 4.4496538e-01 4.7341919e-01 4.7441196e-01 4.6168878e-01 4.5347805e-01 4.8519916e-01 3.9997999e-01 3.3019092e-01 4.2716946e-01 3.9865809e-01 4.8629326e-01 4.8637186e-01 3.8670691e-01 4.0127624e-01 4.0126667e-01 5.5444310e-01 4.5424309e-01 4.0600299e-01 4.3254422e-01 4.9161364e-01 3.7174460e-01 3.9497344e-01 3.9273809e-01 3.5018083e-01 3.4346685e-01 4.7229362e-01 3.8003558e-01 4.4070091e-01 3.4442025e-01 4.8107176e-01 3.6755422e-01 4.7082182e-01 4.2490414e-01 4.2252116e-01 3.9675769e-01 3.3509282e-01 3.6564070e-01 4.2918040e-01 3.3977682e-01 4.5200156e-01 4.4195847e-01 4.2350677e-01 3.7942836e-01 4.2122143e-01 3.7189771e-01 3.9075300e-01 3.9132598e-01 2.1401129e-02 2.6296569e-02 3.2566498e-02 1.3297832e-02 1.1999623e-02 1.2969856e-02 1.8466773e-02 2.3873417e-02 1.8518842e-02 4.0273028e-03 1.2616800e-03 2.2732626e-02 1.2827409e-02 1.3181090e-02 2.2732626e-02 1.1261737e-02 1.3701796e-02 5.3984869e-03 5.7610599e-02 5.9941455e-03 8.9691119e-03 1.2694193e-02 2.1144882e-02 7.0627421e-03 1.0543898e-02 8.0308076e-03 1.3063205e-02 2.9989796e-01 2.9808728e-01 3.4381381e-01 3.9275825e-01 3.6234621e-01 3.8238034e-01 3.2086021e-01 2.7526551e-01 3.3529881e-01 3.3248525e-01 3.7261196e-01 3.0300865e-01 3.6317903e-01 3.7865155e-01 2.3211430e-01 2.8532877e-01 3.6963026e-01 3.1621648e-01 4.5177033e-01 3.2875625e-01 3.8520789e-01 2.8867443e-01 4.5697798e-01 3.8242988e-01 3.0442487e-01 3.0118555e-01 3.6926913e-01 3.8920975e-01 3.5915749e-01 2.5088735e-01 3.3365900e-01 3.1393007e-01 2.9523721e-01 4.8898239e-01 3.8528778e-01 2.9050533e-01 3.2648373e-01 4.0430761e-01 2.9352285e-01 3.5998625e-01 4.0438969e-01 3.4965804e-01 3.2429593e-01 2.8821126e-01 3.5492121e-01 3.0147553e-01 3.1927061e-01 3.1166517e-01 2.1164881e-01 3.1841810e-01 5.9626248e-01 5.3406968e-01 5.1001241e-01 5.2586196e-01 5.5673824e-01 5.5816878e-01 5.4309284e-01 5.3570978e-01 5.6971774e-01 4.7901704e-01 4.0442977e-01 5.0852772e-01 4.7828326e-01 5.7096471e-01 5.7126109e-01 4.6526268e-01 4.7993533e-01 4.7902644e-01 6.4260357e-01 5.3722757e-01 4.8599135e-01 5.1340958e-01 5.7642669e-01 4.4962350e-01 4.7318809e-01 4.7087433e-01 4.2633712e-01 4.1835467e-01 5.5597597e-01 4.5768215e-01 5.2292685e-01 4.1877489e-01 5.6541976e-01 4.4406435e-01 5.5240885e-01 5.0657081e-01 5.0216108e-01 4.7452659e-01 4.0930806e-01 4.4303784e-01 5.1082488e-01 4.1541892e-01 5.3406968e-01 5.2368299e-01 5.0449934e-01 4.5807263e-01 5.0263701e-01 4.4952858e-01 4.6823972e-01 4.6855914e-01 8.7198254e-03 1.2902931e-03 1.1684021e-03 1.6341127e-03 1.0742763e-03 2.6208124e-03 1.1173357e-03 8.9620141e-04 1.6960301e-02 1.4197837e-02 5.6586090e-04 2.0157829e-03 1.9836256e-03 5.6586090e-04 2.0413623e-03 1.1637665e-03 5.4511725e-03 1.0718252e-02 6.5990100e-03 2.7263257e-03 7.7966832e-03 1.5754002e-04 8.7922744e-03 3.0619296e-03 3.8030551e-03 1.0550593e-03 1.7203244e-01 1.7235723e-01 2.0751307e-01 2.4611820e-01 2.2109400e-01 2.4491699e-01 1.9298922e-01 1.5421652e-01 2.0061094e-01 2.0129686e-01 2.2934517e-01 1.7603988e-01 2.2070889e-01 2.3964854e-01 1.2009621e-01 1.5990877e-01 2.3601974e-01 1.8834267e-01 2.9339046e-01 1.9567575e-01 2.4748464e-01 1.6217086e-01 3.0116422e-01 2.4364046e-01 1.7530938e-01 1.7223213e-01 2.2701156e-01 2.4439785e-01 2.2188150e-01 1.3311613e-01 1.9877894e-01 1.8311301e-01 1.6845668e-01 3.3339209e-01 2.5192873e-01 1.7150268e-01 1.9350184e-01 2.5414415e-01 1.7271733e-01 2.2071334e-01 2.6313203e-01 2.1605186e-01 1.9141813e-01 1.6255272e-01 2.1936783e-01 1.7996851e-01 1.9179556e-01 1.8227672e-01 1.0300751e-01 1.8912086e-01 4.3075013e-01 3.7158844e-01 3.4752045e-01 3.6713693e-01 3.9134187e-01 3.9105984e-01 3.8378198e-01 3.7272538e-01 4.0046876e-01 3.2297138e-01 2.5826699e-01 3.4601238e-01 3.1932057e-01 4.0113556e-01 4.0033459e-01 3.0938917e-01 3.2551476e-01 3.2781102e-01 4.6405632e-01 3.7102765e-01 3.2659021e-01 3.5371449e-01 4.0659862e-01 2.9405620e-01 3.1978995e-01 3.1753736e-01 2.7499746e-01 2.7161605e-01 3.8895029e-01 3.0476818e-01 3.5833770e-01 2.7419176e-01 3.9644448e-01 2.9409120e-01 3.9304592e-01 3.4147602e-01 3.4611220e-01 3.2290811e-01 2.6406812e-01 2.8867894e-01 3.4717424e-01 2.6265032e-01 3.7158844e-01 3.6154105e-01 3.4316147e-01 2.9940560e-01 3.3824889e-01 2.9538166e-01 3.1708459e-01 3.1834036e-01 8.7316989e-03 6.7615313e-03 6.7580543e-03 9.6081666e-03 2.0657605e-03 3.8371349e-03 1.4271025e-02 1.2184393e-02 1.6084765e-02 4.9831593e-03 1.4699244e-02 1.4912454e-02 4.9831593e-03 6.5028816e-03 6.2489819e-03 1.3726162e-02 3.1411524e-02 7.7445772e-03 1.1141212e-02 2.5000477e-03 1.1143820e-02 6.1604304e-03 5.3711085e-03 7.5849080e-03 8.1835455e-03 1.6688130e-01 1.6291433e-01 2.0003066e-01 2.4149909e-01 2.1722570e-01 2.2445012e-01 1.7758122e-01 1.4623090e-01 1.9343386e-01 1.8753794e-01 2.2568158e-01 1.6690274e-01 2.2030573e-01 2.2323949e-01 1.1699096e-01 1.5725818e-01 2.1323159e-01 1.7489777e-01 2.9532205e-01 1.8786273e-01 2.2655558e-01 1.6049736e-01 2.9226070e-01 2.2550223e-01 1.7083413e-01 1.6938578e-01 2.2221647e-01 2.3649790e-01 2.0956346e-01 1.3266766e-01 1.9299947e-01 1.7760033e-01 1.6320695e-01 3.1350837e-01 2.2421748e-01 1.5253844e-01 1.8664517e-01 2.5447595e-01 1.5560691e-01 2.1266187e-01 2.4223633e-01 2.0012528e-01 1.8549850e-01 1.5864276e-01 2.0519641e-01 1.6093217e-01 1.7628990e-01 1.7435363e-01 1.0801101e-01 1.7763987e-01 4.0351234e-01 3.5280598e-01 3.3582666e-01 3.4348161e-01 3.7251839e-01 3.7541634e-01 3.5722931e-01 3.5450932e-01 3.8642215e-01 3.0688243e-01 2.4703841e-01 3.3487550e-01 3.1018257e-01 3.8808655e-01 3.8990045e-01 2.9749231e-01 3.0596368e-01 3.0291072e-01 4.5287211e-01 3.5944458e-01 3.1570031e-01 3.3498082e-01 3.9202879e-01 2.8759651e-01 3.0023966e-01 2.9853735e-01 2.6729925e-01 2.5639363e-01 3.7372548e-01 2.8896987e-01 3.4745520e-01 2.5469579e-01 3.8297689e-01 2.7675896e-01 3.6463006e-01 3.3839246e-01 3.2358656e-01 2.9982548e-01 2.4898596e-01 2.8176933e-01 3.3810925e-01 2.6587897e-01 3.5280598e-01 3.4488951e-01 3.3055981e-01 2.9862862e-01 3.3464036e-01 2.8522813e-01 2.9487313e-01 2.9445589e-01 4.3312037e-03 5.1674385e-03 4.6600432e-03 4.1032398e-03 1.3249619e-03 3.6837745e-03 2.4877830e-02 2.2958753e-02 1.1314391e-03 6.3769043e-03 6.2810489e-03 1.1314391e-03 5.7702159e-03 4.1635898e-03 1.1902706e-02 7.2040189e-03 1.2293784e-02 7.6706139e-03 1.1339253e-02 1.8296747e-03 1.4280945e-02 6.9429868e-03 8.6587607e-03 4.4388406e-03 1.4462991e-01 1.4494206e-01 1.7756526e-01 2.1378771e-01 1.9031042e-01 2.1286860e-01 1.6419734e-01 1.2822571e-01 1.7113301e-01 1.7185341e-01 1.9804950e-01 1.4833973e-01 1.9005998e-01 2.0775634e-01 9.7105565e-02 1.3348080e-01 2.0467358e-01 1.5981923e-01 2.5874568e-01 1.6653873e-01 2.1530133e-01 1.3558698e-01 2.6575883e-01 2.1156399e-01 1.4766732e-01 1.4484877e-01 1.9583483e-01 2.1211626e-01 1.9101641e-01 1.0897783e-01 1.6943757e-01 1.5488355e-01 1.4132668e-01 2.9650216e-01 2.1994601e-01 1.4455458e-01 1.6451956e-01 2.2152336e-01 1.4555202e-01 1.8989624e-01 2.3000345e-01 1.8566766e-01 1.6258902e-01 1.3589561e-01 1.8870744e-01 1.5235300e-01 1.6309487e-01 1.5410176e-01 8.1942750e-02 1.6048904e-01 3.8999083e-01 3.3294245e-01 3.0988325e-01 3.2882841e-01 3.5187047e-01 3.5156617e-01 3.4487977e-01 3.3402051e-01 3.6059894e-01 2.8649814e-01 2.2516617e-01 3.0844683e-01 2.8301726e-01 3.6124416e-01 3.6050400e-01 2.7355262e-01 2.8899505e-01 2.9139815e-01 4.2192237e-01 3.3236960e-01 2.8992253e-01 3.1585310e-01 3.6648886e-01 2.5905330e-01 2.8355149e-01 2.8139447e-01 2.4097820e-01 2.3780572e-01 3.4954046e-01 2.6919588e-01 3.2022959e-01 2.4036369e-01 3.5673540e-01 2.5910558e-01 3.5384915e-01 3.0430469e-01 3.0871278e-01 2.8664912e-01 2.3068275e-01 2.5394489e-01 3.0958210e-01 2.2970603e-01 3.3294245e-01 3.2329736e-01 3.0571743e-01 2.6432145e-01 3.0120271e-01 2.6026006e-01 2.8107880e-01 2.8234890e-01 3.9333334e-05 2.5761851e-04 1.3896575e-03 1.7536063e-03 1.9216331e-03 9.2702772e-03 7.4022776e-03 1.3090644e-03 1.5444025e-03 1.6252591e-03 1.3090644e-03 1.2151407e-04 1.0618022e-05 2.1493524e-03 1.8833153e-02 2.2172388e-03 7.3253783e-04 3.6800655e-03 1.5497854e-03 3.6733784e-03 4.9421897e-04 7.6664670e-04 7.0183041e-05 1.9407897e-01 1.9337938e-01 2.3114302e-01 2.7244640e-01 2.4629979e-01 2.6718506e-01 2.1373312e-01 1.7438911e-01 2.2391848e-01 2.2298785e-01 2.5503185e-01 1.9737603e-01 2.4663659e-01 2.6276403e-01 1.3891547e-01 1.8173656e-01 2.5713590e-01 2.0933386e-01 3.2324703e-01 2.1854256e-01 2.6975489e-01 1.8436571e-01 3.2904939e-01 2.6649404e-01 1.9772084e-01 1.9481826e-01 2.5232520e-01 2.6989379e-01 2.4511828e-01 1.5354404e-01 2.2230840e-01 2.0576951e-01 1.9024966e-01 3.5987888e-01 2.7239312e-01 1.8983400e-01 2.1646886e-01 2.8191043e-01 1.9167912e-01 2.4493869e-01 2.8620678e-01 2.3801287e-01 2.1448199e-01 1.8427005e-01 2.4198503e-01 1.9884520e-01 2.1243903e-01 2.0430599e-01 1.2215728e-01 2.1067715e-01 4.5871846e-01 3.9981904e-01 3.7661173e-01 3.9390784e-01 4.2025710e-01 4.2076082e-01 4.1035332e-01 4.0114215e-01 4.3087129e-01 3.5002279e-01 2.8365040e-01 3.7517763e-01 3.4790525e-01 4.3179681e-01 4.3157518e-01 3.3691557e-01 3.5177012e-01 3.5268567e-01 4.9705280e-01 4.0100475e-01 3.5505921e-01 3.8129652e-01 4.3707681e-01 3.2219168e-01 3.4580868e-01 3.4360838e-01 3.0200588e-01 2.9663386e-01 4.1868171e-01 3.3113109e-01 3.8802094e-01 2.9819354e-01 4.2685142e-01 3.1959000e-01 4.1941543e-01 3.7225097e-01 3.7239048e-01 3.4809183e-01 2.8876177e-01 3.1647952e-01 3.7686145e-01 2.9138730e-01 3.9981904e-01 3.8994709e-01 3.7187123e-01 3.2897937e-01 3.6879196e-01 3.2272643e-01 3.4226532e-01 3.4310531e-01 3.4181612e-04 1.4644608e-03 2.1632573e-03 2.3231555e-03 8.1286280e-03 6.3818616e-03 1.7262411e-03 1.6624580e-03 1.7631926e-03 1.7262411e-03 2.4569053e-05 5.6859581e-05 1.7551896e-03 2.0541034e-02 1.6702311e-03 5.9025530e-04 3.2341087e-03 2.0408948e-03 3.0131078e-03 2.8888789e-04 4.5924146e-04 1.3293684e-04 1.9862431e-01 1.9776069e-01 2.3600084e-01 2.7779005e-01 2.5142895e-01 2.7187023e-01 2.1810101e-01 1.7860088e-01 2.2871390e-01 2.2752286e-01 2.6024955e-01 2.0181434e-01 2.5187795e-01 2.6758248e-01 1.4283878e-01 1.8622396e-01 2.6161990e-01 2.1373552e-01 3.2920505e-01 2.2325952e-01 2.7444329e-01 1.8891553e-01 3.3470963e-01 2.7127428e-01 2.0233024e-01 1.9944956e-01 2.5748056e-01 2.7510316e-01 2.4993365e-01 1.5775267e-01 2.2713624e-01 2.1043184e-01 1.9474801e-01 3.6534538e-01 2.7678503e-01 1.9377126e-01 2.2119834e-01 2.8749014e-01 1.9572304e-01 2.4991164e-01 2.9104517e-01 2.4261230e-01 2.1922346e-01 1.8874518e-01 2.4669621e-01 2.0288553e-01 2.1678869e-01 2.0886650e-01 1.2608625e-01 2.1517216e-01 4.6449792e-01 4.0560127e-01 3.8251194e-01 3.9944814e-01 4.2616553e-01 4.2679596e-01 4.1587264e-01 4.0695550e-01 4.3702628e-01 3.5557170e-01 2.8885981e-01 3.8108822e-01 3.5369655e-01 4.3799369e-01 4.3786355e-01 3.4252750e-01 3.5719184e-01 3.5788232e-01 5.0366583e-01 4.0706759e-01 3.6083966e-01 3.8695887e-01 4.4324858e-01 3.2788328e-01 3.5118655e-01 3.4899273e-01 3.0749578e-01 3.0179987e-01 4.2472010e-01 3.3655063e-01 3.9402558e-01 3.0319627e-01 4.3300399e-01 3.2485825e-01 4.2490754e-01 3.7841034e-01 3.7783349e-01 3.5333537e-01 2.9386653e-01 3.2211174e-01 3.8285611e-01 2.9712888e-01 4.0560127e-01 3.9574961e-01 3.7770634e-01 3.3490218e-01 3.7491174e-01 3.2829405e-01 3.4750328e-01 3.4827580e-01 2.7893549e-03 2.7277911e-03 9.4108270e-04 1.0799149e-02 7.7770747e-03 1.9770194e-03 5.4325768e-04 5.8945201e-04 1.9770194e-03 5.1514113e-04 3.6478892e-04 1.6876295e-03 1.7172006e-02 2.9808015e-03 3.7816236e-04 5.6735840e-03 1.0535544e-03 5.0675384e-03 1.2248969e-03 1.1742680e-03 5.8878380e-05 1.9840502e-01 1.9840646e-01 2.3601178e-01 2.7699198e-01 2.5062921e-01 2.7429513e-01 2.1981939e-01 1.7908511e-01 2.2870797e-01 2.2884763e-01 2.5935890e-01 2.0237179e-01 2.5038976e-01 2.6920634e-01 1.4261267e-01 1.8559031e-01 2.6457877e-01 2.1508394e-01 3.2684946e-01 2.2341437e-01 2.7694800e-01 1.8806980e-01 3.3447269e-01 2.7322826e-01 2.0195195e-01 1.9876628e-01 2.5683031e-01 2.7497319e-01 2.5083242e-01 1.5688509e-01 2.2686098e-01 2.1020792e-01 1.9457412e-01 3.6717222e-01 2.8066992e-01 1.9637709e-01 2.2117475e-01 2.8566080e-01 1.9793610e-01 2.4995417e-01 2.9345115e-01 2.4428403e-01 2.1902512e-01 1.8834472e-01 2.4798811e-01 2.0544380e-01 2.1853532e-01 2.0913443e-01 1.2439434e-01 2.1611468e-01 4.6739623e-01 4.0702486e-01 3.8261075e-01 4.0188705e-01 4.2751037e-01 4.2749738e-01 4.1883120e-01 4.0825884e-01 4.3736523e-01 3.5667688e-01 2.8932174e-01 3.8108192e-01 3.5341594e-01 4.3812879e-01 4.3747647e-01 3.4285168e-01 3.5898948e-01 3.6076578e-01 5.0317788e-01 4.0703744e-01 3.6087589e-01 3.8845434e-01 4.4366743e-01 3.2719263e-01 3.5301257e-01 3.5071770e-01 3.0715395e-01 3.0299726e-01 4.2534595e-01 3.3768843e-01 3.9391146e-01 3.0525217e-01 4.3324318e-01 3.2636388e-01 4.2820876e-01 3.7681443e-01 3.8013938e-01 3.5586945e-01 2.9507753e-01 3.2153910e-01 3.8242816e-01 2.9467977e-01 4.0702486e-01 3.9678588e-01 3.7800850e-01 3.3305200e-01 3.7343525e-01 3.2833826e-01 3.4988792e-01 3.5099795e-01 7.6889988e-04 5.5936308e-03 9.7548717e-03 1.0412603e-02 1.0389613e-03 5.7781385e-03 5.9052415e-03 1.0389613e-03 1.4340775e-03 1.1584691e-03 6.0259991e-03 2.1726707e-02 3.4598658e-03 3.9021886e-03 1.8160607e-03 3.9104990e-03 3.6199548e-03 1.2013216e-03 2.4248068e-03 2.0560641e-03 1.7726458e-01 1.7519789e-01 2.1239850e-01 2.5350621e-01 2.2831460e-01 2.4335474e-01 1.9301815e-01 1.5733953e-01 2.0548992e-01 2.0248973e-01 2.3687424e-01 1.7915768e-01 2.2981656e-01 2.4028800e-01 1.2483249e-01 1.6617415e-01 2.3289941e-01 1.8936545e-01 3.0533170e-01 2.0007138e-01 2.4571617e-01 1.6904051e-01 3.0733551e-01 2.4338605e-01 1.8100871e-01 1.7874491e-01 2.3386295e-01 2.4993365e-01 2.2441376e-01 1.3976123e-01 2.0438723e-01 1.8845276e-01 1.7353191e-01 3.3400984e-01 2.4631085e-01 1.6886156e-01 1.9837340e-01 2.6441281e-01 1.7118963e-01 2.2560913e-01 2.6174131e-01 2.1639026e-01 1.9675554e-01 1.6819987e-01 2.2078897e-01 1.7753606e-01 1.9173589e-01 1.8624992e-01 1.1156657e-01 1.9127963e-01 4.2879129e-01 3.7350470e-01 3.5299849e-01 3.6620603e-01 3.9355975e-01 3.9506070e-01 3.8147101e-01 3.7498057e-01 4.0550993e-01 3.2554347e-01 2.6225589e-01 3.5177359e-01 3.2570798e-01 4.0674255e-01 4.0737898e-01 3.1405475e-01 3.2617446e-01 3.2545044e-01 4.7160294e-01 3.7696723e-01 3.3214002e-01 3.5535112e-01 4.1144267e-01 3.0143840e-01 3.2034131e-01 3.1835849e-01 2.8130938e-01 2.7364725e-01 3.9315179e-01 3.0715437e-01 3.6445309e-01 2.7384746e-01 4.0174546e-01 2.9539622e-01 3.8981435e-01 3.5158797e-01 3.4545598e-01 3.2149982e-01 2.6601526e-01 2.9570609e-01 3.5410378e-01 2.7466513e-01 3.7350470e-01 3.6448827e-01 3.4805708e-01 3.0999906e-01 3.4801807e-01 3.0074518e-01 3.1606363e-01 3.1638243e-01 3.9211584e-03 1.5428451e-02 1.5051422e-02 9.3641634e-05 5.2251987e-03 5.2564865e-03 9.3641634e-05 2.4059863e-03 1.5411207e-03 7.7716003e-03 1.4517538e-02 6.4009679e-03 4.6963365e-03 4.9429047e-03 2.1131701e-03 7.3135995e-03 2.7662314e-03 4.2415669e-03 2.1979302e-03 1.6154877e-01 1.6047147e-01 1.9568300e-01 2.3468417e-01 2.1025394e-01 2.2838581e-01 1.7884422e-01 1.4313754e-01 1.8898793e-01 1.8753794e-01 2.1845622e-01 1.6418759e-01 2.1103286e-01 2.2447017e-01 1.1131548e-01 1.5046796e-01 2.1889519e-01 1.7489777e-01 2.8359045e-01 1.8392350e-01 2.3077309e-01 1.5301294e-01 2.8765002e-01 2.2785871e-01 1.6499082e-01 1.6250540e-01 2.1577964e-01 2.3190311e-01 2.0826660e-01 1.2493129e-01 1.8764327e-01 1.7232008e-01 1.5800409e-01 3.1597925e-01 2.3306671e-01 1.5663136e-01 1.8209705e-01 2.4426589e-01 1.5839676e-01 2.0849973e-01 2.4621874e-01 2.0137281e-01 1.8035428e-01 1.5264734e-01 2.0519641e-01 1.6491824e-01 1.7763987e-01 1.7071018e-01 9.7334595e-02 1.7628990e-01 4.1013753e-01 3.5415853e-01 3.3261378e-01 3.4819629e-01 3.7372548e-01 3.7447405e-01 3.6377706e-01 3.5546726e-01 3.8432564e-01 3.0688243e-01 2.4450319e-01 3.3130276e-01 3.0553473e-01 3.8531353e-01 3.8537934e-01 2.9480414e-01 3.0829006e-01 3.0887441e-01 4.4839112e-01 3.5594109e-01 3.1215369e-01 3.3646690e-01 3.9023537e-01 2.8142851e-01 3.0262656e-01 3.0057353e-01 2.6218143e-01 2.5639363e-01 3.7251839e-01 2.8896987e-01 3.4359589e-01 2.5757650e-01 3.8052352e-01 2.7792266e-01 3.7237623e-01 3.2948525e-01 3.2773210e-01 3.0459398e-01 2.4898596e-01 2.7596310e-01 3.3313575e-01 2.5365041e-01 3.5415853e-01 3.4488951e-01 3.2799981e-01 2.8862094e-01 3.2611271e-01 2.8152144e-01 2.9910812e-01 2.9982464e-01 1.7874931e-02 1.3166667e-02 2.8067647e-03 5.4483543e-04 4.8491153e-04 2.8067647e-03 2.7932574e-03 2.1120566e-03 4.0283644e-03 1.1006031e-02 7.1968081e-03 1.9824114e-03 1.0903705e-02 3.4373671e-04 1.0364284e-02 4.2441401e-03 4.1832512e-03 1.3456492e-03 1.8893734e-01 1.9027350e-01 2.2602463e-01 2.6493124e-01 2.3909907e-01 2.6731994e-01 2.1287273e-01 1.7119179e-01 2.1885648e-01 2.2106766e-01 2.4751153e-01 1.9400411e-01 2.3783986e-01 2.6104566e-01 1.3465024e-01 1.7581619e-01 2.5867158e-01 2.0765719e-01 3.1157756e-01 2.1392913e-01 2.7004976e-01 1.7791371e-01 3.2221942e-01 2.6551484e-01 1.9215572e-01 1.8858682e-01 2.4539617e-01 2.6396374e-01 2.4190361e-01 1.4753547e-01 2.1663955e-01 2.0044864e-01 1.8527044e-01 3.5759997e-01 2.7597540e-01 1.9143761e-01 2.1146655e-01 2.7190276e-01 1.9231433e-01 2.3971651e-01 2.8607417e-01 2.3671699e-01 2.0909909e-01 1.7885289e-01 2.3973819e-01 2.0020124e-01 2.1166375e-01 2.0015528e-01 1.1482314e-01 2.0802247e-01 4.5814031e-01 3.9622068e-01 3.7010544e-01 3.9277065e-01 4.1629822e-01 4.1528170e-01 4.1031284e-01 3.9724817e-01 4.2445794e-01 3.4613203e-01 2.7894605e-01 3.6843870e-01 3.4078468e-01 4.2489846e-01 4.2345869e-01 3.3131814e-01 3.4950009e-01 3.5299308e-01 4.8820349e-01 3.9397890e-01 3.4860920e-01 3.7803314e-01 4.3080058e-01 3.1437592e-01 3.4366436e-01 3.4124618e-01 2.9521701e-01 2.9351505e-01 4.1304236e-01 3.2750551e-01 3.8091343e-01 2.9709224e-01 4.2023724e-01 3.1695286e-01 4.2011817e-01 3.6184591e-01 3.7113199e-01 3.4760954e-01 2.8576301e-01 3.0899799e-01 3.6912287e-01 2.7985004e-01 3.9622068e-01 3.8552168e-01 3.6588486e-01 3.1841134e-01 3.5869504e-01 3.1661834e-01 3.4148439e-01 3.4312143e-01 1.2983249e-03 1.5471006e-02 1.3470267e-02 1.3877317e-02 1.5471006e-02 7.2739782e-03 9.2612860e-03 6.0792360e-03 5.4160667e-02 2.4574168e-03 8.1477262e-03 3.8451807e-03 1.8194605e-02 1.5067164e-03 5.6792549e-03 4.8846913e-03 1.0004616e-02 2.6108642e-01 2.5718656e-01 3.0162187e-01 3.4990044e-01 3.2118558e-01 3.3212064e-01 2.7586622e-01 2.3634761e-01 2.9366681e-01 2.8770633e-01 3.3110877e-01 2.6200119e-01 3.2383134e-01 3.3046521e-01 1.9829106e-01 2.4860521e-01 3.1887574e-01 2.7240198e-01 4.1018982e-01 2.8711577e-01 3.3461178e-01 2.5228731e-01 4.0944673e-01 3.3325697e-01 2.6572836e-01 2.6351234e-01 3.2731613e-01 3.4479515e-01 3.1382719e-01 2.1730674e-01 2.9282566e-01 2.7420761e-01 2.5662111e-01 4.3493335e-01 3.3165524e-01 2.4546083e-01 2.8544328e-01 3.6363165e-01 2.4916639e-01 3.1676067e-01 3.5299851e-01 3.0301330e-01 2.8384547e-01 2.5066938e-01 3.0887732e-01 2.5579752e-01 2.7431070e-01 2.7082223e-01 1.8355500e-01 2.7545202e-01 5.3566205e-01 4.7913827e-01 4.5930782e-01 4.6887050e-01 5.0113940e-01 5.0408511e-01 4.8425611e-01 4.8100390e-01 5.1611676e-01 4.2712908e-01 3.5770237e-01 4.5815125e-01 4.2990067e-01 5.1783166e-01 5.1944925e-01 4.1592191e-01 4.2634771e-01 4.2297928e-01 5.8870710e-01 4.8576650e-01 4.3645958e-01 4.5912638e-01 5.2238783e-01 4.0361731e-01 4.1983156e-01 4.1785974e-01 3.8054730e-01 3.6909926e-01 5.0215992e-01 4.0667536e-01 4.7224138e-01 3.6745640e-01 5.1222367e-01 3.9280819e-01 4.9246039e-01 4.6045456e-01 4.4643751e-01 4.1946868e-01 3.6048019e-01 3.9703515e-01 4.6143668e-01 3.7590038e-01 4.7913827e-01 4.7009550e-01 4.5350904e-01 4.1479012e-01 4.5636227e-01 4.0162438e-01 4.1380491e-01 4.1334093e-01 1.4411876e-02 8.7935053e-03 9.1184252e-03 1.4411876e-02 5.7453578e-03 7.6054776e-03 2.7409931e-03 4.7624677e-02 1.8780439e-03 4.9746662e-03 5.9684857e-03 1.4534556e-02 2.3601867e-03 4.9286760e-03 3.4466344e-03 7.5341006e-03 2.6746877e-01 2.6516664e-01 3.0921139e-01 3.5673992e-01 3.2754140e-01 3.4481450e-01 2.8624779e-01 2.4359422e-01 3.0108022e-01 2.9756219e-01 3.3745616e-01 2.6990583e-01 3.2887946e-01 3.4163113e-01 2.0331657e-01 2.5392638e-01 3.3233847e-01 2.8201820e-01 4.1489235e-01 2.9472025e-01 3.4749512e-01 2.5726587e-01 4.1828245e-01 3.4508966e-01 2.7189140e-01 2.6903465e-01 3.3406888e-01 3.5284896e-01 3.2333666e-01 2.2149953e-01 2.9970099e-01 2.8083524e-01 2.6300357e-01 4.4804341e-01 3.4702929e-01 2.5691578e-01 2.9267142e-01 3.6866600e-01 2.5996213e-01 3.2467728e-01 3.6599412e-01 3.1379996e-01 2.9070461e-01 2.5648991e-01 3.1904849e-01 2.6736905e-01 2.8471636e-01 2.7833907e-01 1.8530093e-01 2.8435281e-01 5.5199539e-01 4.9206433e-01 4.6946939e-01 4.8356355e-01 5.1418554e-01 5.1595196e-01 5.0010048e-01 4.9372697e-01 5.2743722e-01 4.3877522e-01 3.6710197e-01 4.6809646e-01 4.3893984e-01 5.2878375e-01 5.2942069e-01 4.2592703e-01 4.3928640e-01 4.3789059e-01 5.9924430e-01 4.9601715e-01 4.4619754e-01 4.7193405e-01 5.3393309e-01 4.1149136e-01 4.3273356e-01 4.3054493e-01 3.8875406e-01 3.8007672e-01 5.1386048e-01 4.1809676e-01 4.8216133e-01 3.8002405e-01 5.2330849e-01 4.0472333e-01 5.0903363e-01 4.6735136e-01 4.6059448e-01 4.3368525e-01 3.7134977e-01 4.0504167e-01 4.7061436e-01 3.7989795e-01 4.9206433e-01 4.8217825e-01 4.6398354e-01 4.2055878e-01 4.6343857e-01 4.1088828e-01 4.2766517e-01 4.2782051e-01 4.0251529e-03 4.0402030e-03 0.0000000e+00 2.0104773e-03 1.1579294e-03 6.7733512e-03 1.3328703e-02 6.1195429e-03 3.8280621e-03 5.4471697e-03 1.3203495e-03 7.3930866e-03 2.5511055e-03 3.8011014e-03 1.5935161e-03 1.6488937e-01 1.6420208e-01 1.9946134e-01 2.3841307e-01 2.1377841e-01 2.3352484e-01 1.8323739e-01 1.4660873e-01 1.9269692e-01 1.9183992e-01 2.2200730e-01 1.6791595e-01 2.1423078e-01 2.2922867e-01 1.1407468e-01 1.5349471e-01 2.2418179e-01 1.7908948e-01 2.8692621e-01 1.8765985e-01 2.3596576e-01 1.5596498e-01 2.9203641e-01 2.3278981e-01 1.6829150e-01 1.6563525e-01 2.1942293e-01 2.3592536e-01 2.1256421e-01 1.2755291e-01 1.9121342e-01 1.7576718e-01 1.6132929e-01 3.2148865e-01 2.3885357e-01 1.6118125e-01 1.8573297e-01 2.4757051e-01 1.6279862e-01 2.1240660e-01 2.5149161e-01 2.0595435e-01 1.8389163e-01 1.5580881e-01 2.0964365e-01 1.6953436e-01 1.8203376e-01 1.7437090e-01 9.9184065e-02 1.8031354e-01 4.1664204e-01 3.5971944e-01 3.3744612e-01 3.5416868e-01 3.7936058e-01 3.7982315e-01 3.7006184e-01 3.6098196e-01 3.8956200e-01 3.1201251e-01 2.4889912e-01 3.3607844e-01 3.1002022e-01 3.9046127e-01 3.9028467e-01 2.9949936e-01 3.1373736e-01 3.1479561e-01 4.5355800e-01 3.6085089e-01 3.1682959e-01 3.4195608e-01 3.9553949e-01 2.8555816e-01 3.0804941e-01 3.0593834e-01 2.6633772e-01 2.6121338e-01 3.7782244e-01 2.9399549e-01 3.4839508e-01 2.6278429e-01 3.8569374e-01 2.8303600e-01 3.7885421e-01 3.3349568e-01 3.3352424e-01 3.1033775e-01 2.5375578e-01 2.8011018e-01 3.3772574e-01 2.5671985e-01 3.5971944e-01 3.5022311e-01 3.3289784e-01 2.9224130e-01 3.3016010e-01 2.8599652e-01 3.0475125e-01 3.0561759e-01 3.0700267e-06 4.0251529e-03 1.9467123e-03 1.7971688e-03 1.7301608e-03 1.6293141e-02 4.7718752e-03 6.8454231e-04 9.3235659e-03 1.4106675e-03 7.7429721e-03 3.1097831e-03 2.5391665e-03 9.5764453e-04 2.0739268e-01 2.0836358e-01 2.4589773e-01 2.8656370e-01 2.5981614e-01 2.8739169e-01 2.3131893e-01 1.8849968e-01 2.3845740e-01 2.4007299e-01 2.6857108e-01 2.1230255e-01 2.5877066e-01 2.8137449e-01 1.5046312e-01 1.9386671e-01 2.7809337e-01 2.2610426e-01 3.3523485e-01 2.3325794e-01 2.9016640e-01 1.9614834e-01 3.4542234e-01 2.8580893e-01 2.1082239e-01 2.0722978e-01 2.6628244e-01 2.8528108e-01 2.6193836e-01 1.6431358e-01 2.3627495e-01 2.1939954e-01 2.0354344e-01 3.8077948e-01 2.9536596e-01 2.0839572e-01 2.3078002e-01 2.9414568e-01 2.0957408e-01 2.6009226e-01 3.0679254e-01 2.5613081e-01 2.2839644e-01 1.9694047e-01 2.5948108e-01 2.1757899e-01 2.3004331e-01 2.1886202e-01 1.3010315e-01 2.2671016e-01 4.8309156e-01 4.2059741e-01 3.9446827e-01 4.1650857e-01 4.4118694e-01 4.4046177e-01 4.3416675e-01 4.2171124e-01 4.5001131e-01 3.6938174e-01 3.0049261e-01 3.9280494e-01 3.6453208e-01 4.5055186e-01 4.4930066e-01 3.5453263e-01 3.7247610e-01 3.7543144e-01 5.1540859e-01 4.1899010e-01 3.7243361e-01 4.0192295e-01 4.5645786e-01 3.3753686e-01 3.6646355e-01 3.6403348e-01 3.1765048e-01 3.1516192e-01 4.3820660e-01 3.5021371e-01 4.0564147e-01 3.1837605e-01 4.4574076e-01 3.3915719e-01 4.4399141e-01 3.8666018e-01 3.9439941e-01 3.7011128e-01 3.0715348e-01 3.3195062e-01 3.9368522e-01 3.0254596e-01 4.2059741e-01 4.0983289e-01 3.9004780e-01 3.4211299e-01 3.8338579e-01 3.3953411e-01 3.6390344e-01 3.6538415e-01 4.0402030e-03 2.0647746e-03 1.8810615e-03 1.8744038e-03 1.5904825e-02 5.0089073e-03 7.7718458e-04 9.5962272e-03 1.3536136e-03 8.0354537e-03 3.2683077e-03 2.7053207e-03 1.0204136e-03 2.0663566e-01 2.0767698e-01 2.4509318e-01 2.8563097e-01 2.5892354e-01 2.8672996e-01 2.3068420e-01 1.8783705e-01 2.3766371e-01 2.3938906e-01 2.6765771e-01 2.1160129e-01 2.5782529e-01 2.8065126e-01 1.4982077e-01 1.9310124e-01 2.7749364e-01 2.2544624e-01 3.3411471e-01 2.3248629e-01 2.8950769e-01 1.9536146e-01 3.4445558e-01 2.8510792e-01 2.1004612e-01 2.0643532e-01 2.6539182e-01 2.8440424e-01 2.6118254e-01 1.6358877e-01 2.3546280e-01 2.1862058e-01 2.0279724e-01 3.7994304e-01 2.9482551e-01 2.0788172e-01 2.2999781e-01 2.9312113e-01 2.0902225e-01 2.5926765e-01 3.0610117e-01 2.5545076e-01 2.2760393e-01 1.9618808e-01 2.5876275e-01 2.1704368e-01 2.2941326e-01 2.1812470e-01 1.2939132e-01 2.2601611e-01 4.8224276e-01 4.1968249e-01 3.9347110e-01 4.1568549e-01 4.4024422e-01 4.3946585e-01 4.3337086e-01 4.2078495e-01 4.4897602e-01 3.6849705e-01 2.9964288e-01 3.9180103e-01 3.6353845e-01 4.4949934e-01 4.4820701e-01 3.5360058e-01 3.7164688e-01 3.7469388e-01 5.1424729e-01 4.1795596e-01 3.7145645e-01 4.0103422e-01 4.5542290e-01 3.3654347e-01 3.6564394e-01 3.6320808e-01 3.1671115e-01 3.1435391e-01 4.3720666e-01 3.4935506e-01 4.0461470e-01 3.1763784e-01 4.4470146e-01 3.3834387e-01 4.4321597e-01 3.8553656e-01 3.9358898e-01 3.6934910e-01 3.0635763e-01 3.3097385e-01 3.9264615e-01 3.0146236e-01 4.1968249e-01 4.0889669e-01 3.8907226e-01 3.4102273e-01 3.8227517e-01 3.3859771e-01 3.6313559e-01 3.6464431e-01 2.0104773e-03 1.1579294e-03 6.7733512e-03 1.3328703e-02 6.1195429e-03 3.8280621e-03 5.4471697e-03 1.3203495e-03 7.3930866e-03 2.5511055e-03 3.8011014e-03 1.5935161e-03 1.6488937e-01 1.6420208e-01 1.9946134e-01 2.3841307e-01 2.1377841e-01 2.3352484e-01 1.8323739e-01 1.4660873e-01 1.9269692e-01 1.9183992e-01 2.2200730e-01 1.6791595e-01 2.1423078e-01 2.2922867e-01 1.1407468e-01 1.5349471e-01 2.2418179e-01 1.7908948e-01 2.8692621e-01 1.8765985e-01 2.3596576e-01 1.5596498e-01 2.9203641e-01 2.3278981e-01 1.6829150e-01 1.6563525e-01 2.1942293e-01 2.3592536e-01 2.1256421e-01 1.2755291e-01 1.9121342e-01 1.7576718e-01 1.6132929e-01 3.2148865e-01 2.3885357e-01 1.6118125e-01 1.8573297e-01 2.4757051e-01 1.6279862e-01 2.1240660e-01 2.5149161e-01 2.0595435e-01 1.8389163e-01 1.5580881e-01 2.0964365e-01 1.6953436e-01 1.8203376e-01 1.7437090e-01 9.9184065e-02 1.8031354e-01 4.1664204e-01 3.5971944e-01 3.3744612e-01 3.5416868e-01 3.7936058e-01 3.7982315e-01 3.7006184e-01 3.6098196e-01 3.8956200e-01 3.1201251e-01 2.4889912e-01 3.3607844e-01 3.1002022e-01 3.9046127e-01 3.9028467e-01 2.9949936e-01 3.1373736e-01 3.1479561e-01 4.5355800e-01 3.6085089e-01 3.1682959e-01 3.4195608e-01 3.9553949e-01 2.8555816e-01 3.0804941e-01 3.0593834e-01 2.6633772e-01 2.6121338e-01 3.7782244e-01 2.9399549e-01 3.4839508e-01 2.6278429e-01 3.8569374e-01 2.8303600e-01 3.7885421e-01 3.3349568e-01 3.3352424e-01 3.1033775e-01 2.5375578e-01 2.8011018e-01 3.3772574e-01 2.5671985e-01 3.5971944e-01 3.5022311e-01 3.3289784e-01 2.9224130e-01 3.3016010e-01 2.8599652e-01 3.0475125e-01 3.0561759e-01 1.3213080e-04 1.6348389e-03 2.1958653e-02 1.3024074e-03 6.2459052e-04 2.7868209e-03 2.5128724e-03 2.4948096e-03 1.5213665e-04 2.9312203e-04 2.6269769e-04 2.0119891e-01 2.0014287e-01 2.3871519e-01 2.8086210e-01 2.5438649e-01 2.7419983e-01 2.2034638e-01 1.8091150e-01 2.3139686e-01 2.2990542e-01 2.6326545e-01 2.0423778e-01 2.5497864e-01 2.7008054e-01 1.4507978e-01 1.8881738e-01 2.6377709e-01 2.1604374e-01 3.3279319e-01 2.2588101e-01 2.7676493e-01 1.9156801e-01 3.3788258e-01 2.7371187e-01 2.0495838e-01 2.0212622e-01 2.6043406e-01 2.7802264e-01 2.5251791e-01 1.6022437e-01 2.2986910e-01 2.1306976e-01 1.9729190e-01 3.6816423e-01 2.7878040e-01 1.9567818e-01 2.2384809e-01 2.9081290e-01 1.9773475e-01 2.5268382e-01 2.9345893e-01 2.4498738e-01 2.2190073e-01 1.9130390e-01 2.4917712e-01 2.0485354e-01 2.1902087e-01 2.1139045e-01 1.2850635e-01 2.1757950e-01 4.6735126e-01 4.0863686e-01 3.8577163e-01 4.0223139e-01 4.2927752e-01 4.3005363e-01 4.1858115e-01 4.1002225e-01 4.4039161e-01 3.5852229e-01 2.9170762e-01 3.8436621e-01 3.5694453e-01 4.4140604e-01 4.4138801e-01 3.4560659e-01 3.5999045e-01 3.6042881e-01 5.0737364e-01 4.1042921e-01 3.6404336e-01 3.8992213e-01 4.4661300e-01 3.3112795e-01 3.5395838e-01 3.5178032e-01 3.1059068e-01 3.0453530e-01 4.2798873e-01 3.3942843e-01 3.9736659e-01 3.0574030e-01 4.3638021e-01 3.2761075e-01 4.2755962e-01 3.8201356e-01 3.8058140e-01 3.5594943e-01 2.9656762e-01 3.2531047e-01 3.8623001e-01 3.0061288e-01 4.0863686e-01 3.9884323e-01 3.8090672e-01 3.3841042e-01 3.7847943e-01 3.3138355e-01 3.5013260e-01 3.5082807e-01 2.3963109e-03 1.8894374e-02 2.2439950e-03 9.0594652e-04 3.4335324e-03 1.6224592e-03 3.5937956e-03 4.5783206e-04 8.1747242e-04 1.3119947e-04 1.9231717e-01 1.9150735e-01 2.2920314e-01 2.7047035e-01 2.4441394e-01 2.6481790e-01 2.1164496e-01 1.7262115e-01 2.2200818e-01 2.2091128e-01 2.5312771e-01 1.9549847e-01 2.4484600e-01 2.6050695e-01 1.3741417e-01 1.8008449e-01 2.5474237e-01 2.0730811e-01 3.2133505e-01 2.1663183e-01 2.6736983e-01 1.8273137e-01 3.2682425e-01 2.6418461e-01 1.9596515e-01 1.9311641e-01 2.5039748e-01 2.6783585e-01 2.4301257e-01 1.5205635e-01 2.2044020e-01 2.0395985e-01 1.8849810e-01 3.5730736e-01 2.6984619e-01 1.8774203e-01 2.1458977e-01 2.8004857e-01 1.8962388e-01 2.4294411e-01 2.8377929e-01 2.3583758e-01 2.1263399e-01 1.8257496e-01 2.3984158e-01 1.9672007e-01 2.1035264e-01 2.0243724e-01 1.2095575e-01 2.0869633e-01 4.5579154e-01 3.9719792e-01 3.7421582e-01 3.9117796e-01 4.1759891e-01 4.1818270e-01 4.0752420e-01 3.9853286e-01 4.2831769e-01 3.4756630e-01 2.8146997e-01 3.7279915e-01 3.4563396e-01 4.2926782e-01 4.2911586e-01 3.3459449e-01 3.4922354e-01 3.5000846e-01 4.9447633e-01 3.9856686e-01 3.5272682e-01 3.7871114e-01 4.3449948e-01 3.2004729e-01 3.4327468e-01 3.4109263e-01 2.9987407e-01 2.9431934e-01 4.1611792e-01 3.2872232e-01 3.8562509e-01 2.9576964e-01 4.2431958e-01 3.1716861e-01 4.1652649e-01 3.7009695e-01 3.6972936e-01 3.4546743e-01 2.8647014e-01 3.1433567e-01 3.7453491e-01 2.8958559e-01 3.9719792e-01 3.8739786e-01 3.6946063e-01 3.2697767e-01 3.6662992e-01 3.2048201e-01 3.3967430e-01 3.4047263e-01 2.8241524e-02 1.4713791e-03 4.6823304e-04 6.0296352e-03 5.2071190e-03 3.4182869e-03 1.9529381e-03 9.1025104e-04 1.8441969e-03 2.3388660e-01 2.3348281e-01 2.7404368e-01 3.1787596e-01 2.8991570e-01 3.1331298e-01 2.5580945e-01 2.1274052e-01 2.6625865e-01 2.6571335e-01 2.9921857e-01 2.3779036e-01 2.8985951e-01 3.0846478e-01 1.7343111e-01 2.2024476e-01 3.0259960e-01 2.5097696e-01 3.7082192e-01 2.6053197e-01 3.1606460e-01 2.2298918e-01 3.7823337e-01 3.1251210e-01 2.3776110e-01 2.3444923e-01 2.9645330e-01 3.1548927e-01 2.8938607e-01 1.8922982e-01 2.6439849e-01 2.4655891e-01 2.2974757e-01 4.1142478e-01 3.1887022e-01 2.2998734e-01 2.5821680e-01 3.2732410e-01 2.3197317e-01 2.8887378e-01 3.3359010e-01 2.8195681e-01 2.5599107e-01 2.2312925e-01 2.8613603e-01 2.3977955e-01 2.5441596e-01 2.4519140e-01 1.5385241e-01 2.5232104e-01 5.1493631e-01 4.5323214e-01 4.2845834e-01 4.4724986e-01 4.7457294e-01 4.7489714e-01 4.6451744e-01 4.5458397e-01 4.8531016e-01 4.0086775e-01 3.3039510e-01 4.2690815e-01 3.9810528e-01 4.8619268e-01 4.8572861e-01 3.8678000e-01 4.0288412e-01 4.0400345e-01 5.5371159e-01 4.5396321e-01 4.0578110e-01 4.3384503e-01 4.9180921e-01 3.7075981e-01 3.9660943e-01 3.9426853e-01 3.4955388e-01 3.4443823e-01 4.7269976e-01 3.8095285e-01 4.4033063e-01 3.4628429e-01 4.8107810e-01 3.6885113e-01 4.7400892e-01 4.2299055e-01 4.2466221e-01 3.9913020e-01 3.3607659e-01 3.6477722e-01 4.2848136e-01 3.3695835e-01 4.5323214e-01 4.4278413e-01 4.2356519e-01 3.7724050e-01 4.1943108e-01 3.7167660e-01 3.9296882e-01 3.9389283e-01 3.3787081e-02 2.1999955e-02 3.5606869e-02 9.7919779e-03 3.8730614e-02 2.5164827e-02 2.6849118e-02 1.7903003e-02 1.2662420e-01 1.3061214e-01 1.5776511e-01 1.8787898e-01 1.6611889e-01 2.0099100e-01 1.5292701e-01 1.1474225e-01 1.5177722e-01 1.5820095e-01 1.7289957e-01 1.3332512e-01 1.6306194e-01 1.9266388e-01 8.3782583e-02 1.1475472e-01 1.9583103e-01 1.4727273e-01 2.2350713e-01 1.4824286e-01 2.0360977e-01 1.1578494e-01 2.3876064e-01 1.9766401e-01 1.2872248e-01 1.2487918e-01 1.7184761e-01 1.8912261e-01 1.7398714e-01 9.1697206e-02 1.4908080e-01 1.3596066e-01 1.2380963e-01 2.7656824e-01 2.1428571e-01 1.3903777e-01 1.4563130e-01 1.9085533e-01 1.3814153e-01 1.6927103e-01 2.1683317e-01 1.7245190e-01 1.4312164e-01 1.1792045e-01 1.7357532e-01 1.4591400e-01 1.5204372e-01 1.3733781e-01 6.4362895e-02 1.4610010e-01 3.6994724e-01 3.0922665e-01 2.8172128e-01 3.0981046e-01 3.2683078e-01 3.2376302e-01 3.2734273e-01 3.0972403e-01 3.3079457e-01 2.6398840e-01 2.0377846e-01 2.7991998e-01 2.5479336e-01 3.3054260e-01 3.2764011e-01 2.4845492e-01 2.6936921e-01 2.7648193e-01 3.8578015e-01 3.0244531e-01 2.6262990e-01 2.9337350e-01 3.3673416e-01 2.3057418e-01 2.6437275e-01 2.6189855e-01 2.1511723e-01 2.1874548e-01 3.2151629e-01 2.4786804e-01 2.9060022e-01 2.2492263e-01 3.2669967e-01 2.4001378e-01 3.3739817e-01 2.6941492e-01 2.9020759e-01 2.7043921e-01 2.1216140e-01 2.2627991e-01 2.7921868e-01 1.9599823e-01 3.0922665e-01 2.9841488e-01 2.7865157e-01 2.3076134e-01 2.6697481e-01 2.3479056e-01 2.6453179e-01 2.6724220e-01 1.8463349e-03 1.7603525e-03 7.3051568e-03 4.1015204e-04 7.6986614e-04 4.1505661e-04 2.5516535e-03 2.2715123e-01 2.2501703e-01 2.6621562e-01 3.1107232e-01 2.8349906e-01 3.0014485e-01 2.4486000e-01 2.0495206e-01 2.5858102e-01 2.5538755e-01 2.9285644e-01 2.2943000e-01 2.8485147e-01 2.9693404e-01 1.6780939e-01 2.1457958e-01 2.8854018e-01 2.4083271e-01 3.6655716e-01 2.5261802e-01 3.0270375e-01 2.1770871e-01 3.6954821e-01 3.0027680e-01 2.3128364e-01 2.2864524e-01 2.8964053e-01 3.0736347e-01 2.7955757e-01 1.8460266e-01 2.5729263e-01 2.3962699e-01 2.2298939e-01 3.9825299e-01 3.0280555e-01 2.1772566e-01 2.5069761e-01 3.2250864e-01 2.2044120e-01 2.8076678e-01 3.2019205e-01 2.7070478e-01 2.4885895e-01 2.1694127e-01 2.7557358e-01 2.2744661e-01 2.4343281e-01 2.3729093e-01 1.5163937e-01 2.4296011e-01 4.9867564e-01 4.4043288e-01 4.1855224e-01 4.3250726e-01 4.6172807e-01 4.6333132e-01 4.4859207e-01 4.4201028e-01 4.7437581e-01 3.8923920e-01 3.2088439e-01 4.1722959e-01 3.8928568e-01 4.7566007e-01 4.7625383e-01 3.7686783e-01 3.8986190e-01 3.8884081e-01 5.4387473e-01 4.4406349e-01 3.9623886e-01 4.2109532e-01 4.8065646e-01 3.6308022e-01 3.8359994e-01 3.8148432e-01 3.4142085e-01 3.3328357e-01 4.6130540e-01 3.6948155e-01 4.3073385e-01 3.3341169e-01 4.7038237e-01 3.5679054e-01 4.5732763e-01 4.1658437e-01 4.1040883e-01 3.8470265e-01 3.2499997e-01 3.5692838e-01 4.1963835e-01 3.3330111e-01 4.4043288e-01 4.3085698e-01 4.1330060e-01 3.7185723e-01 4.1281711e-01 3.6250007e-01 3.7890193e-01 3.7915604e-01 5.4235993e-03 2.5998420e-03 3.8677378e-03 1.1491859e-03 6.2877149e-04 4.8332372e-04 2.1486898e-01 2.1467854e-01 2.5369922e-01 2.9604720e-01 2.6891688e-01 2.9246555e-01 2.3653315e-01 1.9467620e-01 2.4616446e-01 2.4597983e-01 2.7792166e-01 2.1880759e-01 2.6876410e-01 2.8748855e-01 1.5684504e-01 2.0165721e-01 2.8227299e-01 2.3174975e-01 3.4738814e-01 2.4066356e-01 2.9516630e-01 2.0426320e-01 3.5491429e-01 2.9152419e-01 2.1857252e-01 2.1532575e-01 2.7527993e-01 2.9385335e-01 2.6877316e-01 1.7184619e-01 2.4431046e-01 2.2708854e-01 2.1089056e-01 3.8786051e-01 2.9845589e-01 2.1195408e-01 2.3838722e-01 3.0508869e-01 2.1371586e-01 2.6806783e-01 3.1216051e-01 2.6180490e-01 2.3620065e-01 2.0447438e-01 2.6573552e-01 2.2136814e-01 2.3519672e-01 2.2587537e-01 1.3797486e-01 2.3292930e-01 4.8968343e-01 4.2865553e-01 4.0406082e-01 4.2311568e-01 4.4955467e-01 4.4970134e-01 4.4022012e-01 4.2994584e-01 4.5983149e-01 3.7733154e-01 3.0847182e-01 4.0252124e-01 3.7430526e-01 4.6065171e-01 4.6008825e-01 3.6337620e-01 3.7950484e-01 3.8097080e-01 5.2689771e-01 4.2900834e-01 3.8187058e-01 4.0969052e-01 4.6622956e-01 3.4753954e-01 3.7338441e-01 3.7106677e-01 3.2693971e-01 3.2232718e-01 4.4752724e-01 3.5789640e-01 4.1563695e-01 3.2438944e-01 4.5565562e-01 3.4619907e-01 4.4965557e-01 3.9841763e-01 4.0096111e-01 3.7608333e-01 3.1419485e-01 3.4172782e-01 4.0397551e-01 3.1440281e-01 4.2865553e-01 4.1831326e-01 3.9931889e-01 3.5369559e-01 3.9495166e-01 3.4857729e-01 3.7001412e-01 3.7103653e-01 9.4891023e-03 8.2375707e-04 1.6686708e-03 2.4475618e-03 4.6565778e-03 2.0491959e-01 2.0139969e-01 2.4160221e-01 2.8586265e-01 2.5951451e-01 2.7003471e-01 2.1844761e-01 1.8272084e-01 2.3436629e-01 2.2908460e-01 2.6861177e-01 2.0572725e-01 2.6206958e-01 2.6821933e-01 1.4908933e-01 1.9377976e-01 2.5813756e-01 2.1522061e-01 3.4200281e-01 2.2841742e-01 2.7236034e-01 1.9712279e-01 3.4101997e-01 2.7089218e-01 2.0911514e-01 2.0716704e-01 2.6510721e-01 2.8112922e-01 2.5280689e-01 1.6599180e-01 2.3361443e-01 2.1674754e-01 2.0090153e-01 3.6527528e-01 2.7043122e-01 1.9136502e-01 2.2690330e-01 2.9871211e-01 1.9453190e-01 2.5541554e-01 2.8925890e-01 2.4310626e-01 2.2546391e-01 1.9558871e-01 2.4836004e-01 2.0063248e-01 2.1704777e-01 2.1367234e-01 1.3678274e-01 2.1790721e-01 4.6096128e-01 4.0671584e-01 3.8773373e-01 3.9744288e-01 4.2748862e-01 4.3009711e-01 4.1223718e-01 4.0843810e-01 4.4142325e-01 3.5773584e-01 2.9306179e-01 3.8663461e-01 3.6011993e-01 4.4301487e-01 4.4448732e-01 3.4710733e-01 3.5722182e-01 3.5456119e-01 5.1046759e-01 4.1264841e-01 3.6628224e-01 3.8788026e-01 4.4738884e-01 3.3559118e-01 3.5113614e-01 3.4925623e-01 3.1415212e-01 3.0373195e-01 4.2825709e-01 3.3863565e-01 3.9988417e-01 3.0248234e-01 4.3771445e-01 3.2582243e-01 4.2019828e-01 3.8883040e-01 3.7626984e-01 3.5109855e-01 2.9576205e-01 3.2946337e-01 3.8969349e-01 3.1023698e-01 4.0671584e-01 3.9807129e-01 3.8231042e-01 3.4615104e-01 3.8497123e-01 3.3374807e-01 3.4573433e-01 3.4546409e-01 9.9827550e-03 3.7862368e-03 4.2584946e-03 1.2265791e-03 1.7617949e-01 1.7703821e-01 2.1212343e-01 2.5052805e-01 2.2530971e-01 2.5127302e-01 1.9850334e-01 1.5861692e-01 2.0515179e-01 2.0665158e-01 2.3356888e-01 1.8070269e-01 2.2448284e-01 2.4549391e-01 1.2367511e-01 1.6367665e-01 2.4261509e-01 1.9359579e-01 2.9712394e-01 2.0027260e-01 2.5390638e-01 1.6582785e-01 3.0629971e-01 2.4971132e-01 1.7938888e-01 1.7609087e-01 2.3136866e-01 2.4921343e-01 2.2715314e-01 1.3644726e-01 2.0313672e-01 1.8736201e-01 1.7259709e-01 3.3997757e-01 2.5916969e-01 1.7732718e-01 1.9796934e-01 2.5795325e-01 1.7832877e-01 2.2545225e-01 2.6961076e-01 2.2173788e-01 1.9575867e-01 1.6649347e-01 2.2485533e-01 1.8584864e-01 1.9731550e-01 1.8682160e-01 1.0543428e-01 1.9413835e-01 4.3845459e-01 3.7813833e-01 3.5313427e-01 3.7426892e-01 3.9794201e-01 3.9726718e-01 3.9130315e-01 3.7920848e-01 4.0649770e-01 3.2906316e-01 2.6354873e-01 3.5155280e-01 3.2453882e-01 4.0704153e-01 4.0591144e-01 3.1492301e-01 3.3203733e-01 3.3498583e-01 4.6984073e-01 3.7668481e-01 3.3204804e-01 3.6021418e-01 4.1270518e-01 2.9886261e-01 3.2629567e-01 3.2396944e-01 2.7989788e-01 2.7743776e-01 3.9510115e-01 3.1077003e-01 3.6387709e-01 2.8054253e-01 4.0239292e-01 3.0024699e-01 4.0081171e-01 3.4598760e-01 3.5305310e-01 3.2985030e-01 2.6984561e-01 2.9352511e-01 3.5245796e-01 2.6611615e-01 3.7813833e-01 3.6780473e-01 3.4887718e-01 3.0350575e-01 3.4281917e-01 3.0065558e-01 3.2390437e-01 3.2536506e-01 1.4872402e-03 1.4261828e-03 4.3370414e-03 2.2707675e-01 2.2400493e-01 2.6569285e-01 3.1123078e-01 2.8378554e-01 2.9683242e-01 2.4257696e-01 2.0422363e-01 2.5811380e-01 2.5348079e-01 2.9319960e-01 2.2849166e-01 2.8589147e-01 2.9450563e-01 1.6807288e-01 2.1502765e-01 2.8470332e-01 2.3897250e-01 3.6819788e-01 2.5200370e-01 2.9929335e-01 2.1837402e-01 3.6884033e-01 2.9747447e-01 2.3136263e-01 2.2907381e-01 2.8973378e-01 3.0680413e-01 2.7799960e-01 1.8547528e-01 2.5712752e-01 2.3949352e-01 2.2288675e-01 3.9517754e-01 2.9790586e-01 2.1461980e-01 2.5028856e-01 3.2375470e-01 2.1774627e-01 2.8014020e-01 3.1681085e-01 2.6834640e-01 2.4865097e-01 2.1711982e-01 2.7360556e-01 2.2434130e-01 2.4112622e-01 2.3662943e-01 1.5362542e-01 2.4153192e-01 4.9407199e-01 4.3763583e-01 4.1728329e-01 4.2863716e-01 4.5893922e-01 4.6123811e-01 4.4410036e-01 4.3933832e-01 4.7263926e-01 3.8698450e-01 3.1961653e-01 4.1608078e-01 3.8854631e-01 4.7414193e-01 4.7530563e-01 3.7551546e-01 3.8685112e-01 3.8465728e-01 5.4280771e-01 4.4284025e-01 3.9512469e-01 4.1829234e-01 4.7881043e-01 3.6289720e-01 3.8058317e-01 3.7858579e-01 3.4094528e-01 3.3115887e-01 4.5930198e-01 3.6727055e-01 4.2965213e-01 3.3035276e-01 4.6876907e-01 3.5423104e-01 4.5243416e-01 4.1718566e-01 4.0676646e-01 3.8091352e-01 3.2289623e-01 3.5664111e-01 4.1894270e-01 3.3518670e-01 4.3763583e-01 4.2851737e-01 4.1182914e-01 3.7291664e-01 4.1330116e-01 3.6151280e-01 3.7529785e-01 3.7518553e-01 2.1466294e-04 8.1135795e-04 2.0395325e-01 2.0232995e-01 2.4147692e-01 2.8429935e-01 2.5773107e-01 2.7550146e-01 2.2191392e-01 1.8311464e-01 2.3414066e-01 2.3177254e-01 2.6670079e-01 2.0650082e-01 2.5876572e-01 2.7190067e-01 1.4755667e-01 1.9178198e-01 2.6468375e-01 2.1783813e-01 3.3738596e-01 2.2849816e-01 2.7802206e-01 1.9468150e-01 3.4113386e-01 2.7532568e-01 2.0783209e-01 2.0518365e-01 2.6370466e-01 2.8101900e-01 2.5475268e-01 1.6319612e-01 2.3278117e-01 2.1587899e-01 1.9999930e-01 3.7014440e-01 2.7911349e-01 1.9654509e-01 2.2657275e-01 2.9494234e-01 1.9887441e-01 2.5547815e-01 2.9483790e-01 2.4668408e-01 2.2473055e-01 1.9413008e-01 2.5114079e-01 2.0579387e-01 2.2056413e-01 2.1387343e-01 1.3181835e-01 2.1964768e-01 4.6883654e-01 4.1098091e-01 3.8893009e-01 4.0388763e-01 4.3171852e-01 4.3291890e-01 4.1992695e-01 4.1244869e-01 4.4351517e-01 3.6095375e-01 2.9436456e-01 3.8758959e-01 3.6027619e-01 4.4466450e-01 4.4498549e-01 3.4851465e-01 3.6196876e-01 3.6168475e-01 5.1115719e-01 4.1372900e-01 3.6718805e-01 3.9217531e-01 4.4969696e-01 3.3465314e-01 3.5590188e-01 3.5378490e-01 3.1382959e-01 3.0675099e-01 4.3089908e-01 3.4178657e-01 4.0069101e-01 3.0739171e-01 4.3956059e-01 3.2969272e-01 4.2869495e-01 3.8625531e-01 3.8227459e-01 3.5742723e-01 2.9874912e-01 3.2874265e-01 3.8973129e-01 3.0516637e-01 4.1098091e-01 4.0141982e-01 3.8392089e-01 3.4269880e-01 3.8263609e-01 3.3443392e-01 3.5169446e-01 3.5216636e-01 9.1517643e-04 2.1524074e-01 2.1387759e-01 2.5373107e-01 2.9714785e-01 2.7003778e-01 2.8920001e-01 2.3426287e-01 1.9410928e-01 2.4622443e-01 2.4423617e-01 2.7915984e-01 2.1812334e-01 2.7081916e-01 2.8531140e-01 1.5727134e-01 2.0260081e-01 2.7829111e-01 2.2998414e-01 3.5056990e-01 2.4050939e-01 2.9179089e-01 2.0549251e-01 3.5521699e-01 2.8889714e-01 2.1915718e-01 2.1632646e-01 2.7619378e-01 2.9404932e-01 2.6759110e-01 1.7313032e-01 2.4473695e-01 2.2745766e-01 2.1120369e-01 3.8534612e-01 2.9320974e-01 2.0849982e-01 2.3847660e-01 3.0758274e-01 2.1079215e-01 2.6804702e-01 3.0890781e-01 2.5958017e-01 2.3652855e-01 2.0509955e-01 2.6402277e-01 2.1797131e-01 2.3288914e-01 2.2557846e-01 1.4040598e-01 2.3171289e-01 4.8566100e-01 4.2666426e-01 4.0387929e-01 4.1975185e-01 4.4765854e-01 4.4867725e-01 4.3614287e-01 4.2811681e-01 4.5929331e-01 3.7580128e-01 3.0785579e-01 4.0248311e-01 3.7465557e-01 4.6038945e-01 4.6053443e-01 3.6291292e-01 3.7703492e-01 3.7702914e-01 5.2744561e-01 4.2898757e-01 3.8178676e-01 4.0761255e-01 4.6557959e-01 3.4846205e-01 3.7087965e-01 3.6870032e-01 3.2740185e-01 3.2070546e-01 4.4660679e-01 3.5633353e-01 4.1573756e-01 3.2160319e-01 4.5525495e-01 3.4414484e-01 4.4510906e-01 4.0053809e-01 3.9779009e-01 3.7261777e-01 3.1255985e-01 3.4249381e-01 4.0450836e-01 3.1773210e-01 4.2666426e-01 4.1685273e-01 3.9886436e-01 3.5618753e-01 3.9691207e-01 3.4850257e-01 3.6675459e-01 3.6731925e-01 1.9628524e-01 1.9595152e-01 2.3363495e-01 2.7477230e-01 2.4851118e-01 2.7084921e-01 2.1685944e-01 1.7678722e-01 2.2636857e-01 2.2599655e-01 2.5724256e-01 1.9993208e-01 2.4854721e-01 2.6607935e-01 1.4078995e-01 1.8369551e-01 2.6097312e-01 2.1228442e-01 3.2508389e-01 2.2103528e-01 2.7346255e-01 1.8624671e-01 3.3183707e-01 2.6996197e-01 1.9987779e-01 1.9682726e-01 2.5462868e-01 2.7249816e-01 2.4805238e-01 1.5523356e-01 2.2463473e-01 2.0803519e-01 1.9245485e-01 3.6364465e-01 2.7666661e-01 1.9319723e-01 2.1887484e-01 2.8382186e-01 1.9489313e-01 2.4750809e-01 2.8994226e-01 2.4123726e-01 2.1680288e-01 1.8634448e-01 2.4506974e-01 2.0223799e-01 2.1557050e-01 2.0677515e-01 1.2326853e-01 2.1346465e-01 4.6321322e-01 4.0354170e-01 3.7970298e-01 3.9803381e-01 4.2400584e-01 4.2423988e-01 4.1474064e-01 4.0481837e-01 4.3422436e-01 3.5345426e-01 2.8656464e-01 3.7821934e-01 3.5073976e-01 4.3506541e-01 4.3461895e-01 3.3997143e-01 3.5549675e-01 3.5686166e-01 5.0021759e-01 4.0411514e-01 3.5805390e-01 3.8499293e-01 4.4048074e-01 3.2475847e-01 3.4952686e-01 3.4727706e-01 3.0464816e-01 2.9991017e-01 4.2212289e-01 3.3451084e-01 3.9105629e-01 3.0183284e-01 4.3015090e-01 3.2308204e-01 4.2396798e-01 3.7459262e-01 3.7639485e-01 3.5210962e-01 2.9201270e-01 3.1907654e-01 3.7972706e-01 2.9306033e-01 4.0354170e-01 3.9347753e-01 3.7503434e-01 3.3106203e-01 3.7117506e-01 3.2561211e-01 3.4620183e-01 3.4718282e-01 5.4794269e-04 1.8595150e-03 7.6486933e-03 3.6709934e-03 1.1297771e-02 3.2211024e-03 9.3536855e-04 1.2319946e-03 2.8277178e-03 4.9162548e-03 4.5756064e-04 4.5051431e-03 8.2041162e-03 5.0174936e-03 3.6228961e-04 1.1821527e-02 2.0977093e-03 2.1132418e-02 9.2067605e-04 1.2006919e-02 4.4257629e-04 2.0287770e-02 9.6871594e-03 3.6190304e-05 1.5767395e-04 4.3807979e-03 7.0604016e-03 4.2468409e-03 2.9914879e-03 1.0695136e-03 1.8996353e-04 2.2349983e-05 3.2007300e-02 1.8626112e-02 5.9280137e-03 7.1455899e-04 1.0704644e-02 4.3625889e-03 3.3835317e-03 1.4689427e-02 5.0222514e-03 5.6899707e-04 1.6818673e-04 4.5805421e-03 5.6585758e-03 3.2371570e-03 3.2354233e-04 1.1207565e-02 1.3610741e-03 7.1737216e-02 4.4570224e-02 3.4818230e-02 4.5037397e-02 5.2043753e-02 5.1252208e-02 5.2740741e-02 4.4819381e-02 5.4844272e-02 2.7492078e-02 9.9689819e-03 3.4289978e-02 2.5632216e-02 5.5069559e-02 5.4817042e-02 2.2717491e-02 2.9451557e-02 3.3085492e-02 8.2968826e-02 4.3268668e-02 2.7878625e-02 3.8225012e-02 5.7392372e-02 1.8533113e-02 2.7752880e-02 2.6863838e-02 1.3522246e-02 1.3720834e-02 5.0366251e-02 2.2136217e-02 3.8620607e-02 1.6104597e-02 5.3184340e-02 1.9805362e-02 5.7449954e-02 3.4065200e-02 3.7316903e-02 3.0490470e-02 1.2067932e-02 1.7032969e-02 3.4772493e-02 1.3995163e-02 4.4570224e-02 4.0370599e-02 3.3379869e-02 2.1584097e-02 3.2788169e-02 1.8689520e-02 2.8313400e-02 2.9690702e-02 2.1810130e-03 9.0584977e-03 5.0419708e-03 8.6169391e-03 1.3497185e-03 5.5621011e-04 1.5626898e-03 1.4715788e-03 6.4159866e-03 2.8759099e-05 6.7456588e-03 6.5333041e-03 5.4642837e-03 1.4108585e-03 8.4463171e-03 6.8795048e-04 2.4580787e-02 1.0388021e-03 9.2286561e-03 1.7639378e-03 2.1069750e-02 7.6032385e-03 7.9175727e-04 1.2932915e-03 5.5725803e-03 7.6219263e-03 3.4673161e-03 4.2795834e-03 1.7380404e-03 7.6483144e-04 5.0584546e-04 3.0216137e-02 1.4096242e-02 2.8861882e-03 1.0548294e-03 1.3430225e-02 1.8234348e-03 3.6876972e-03 1.2061426e-02 3.2511669e-03 1.1268214e-03 9.2690858e-04 3.2976984e-03 2.6938518e-03 1.3276664e-03 2.7368625e-04 1.3465991e-02 4.6822375e-04 6.8703048e-02 4.3340061e-02 3.5237451e-02 4.2511976e-02 5.0937508e-02 5.0961712e-02 4.9575019e-02 4.3743233e-02 5.5024281e-02 2.6603107e-02 9.8022959e-03 3.4840370e-02 2.6494597e-02 5.5508568e-02 5.5916268e-02 2.2800218e-02 2.7686924e-02 2.9943438e-02 8.4266289e-02 4.3880838e-02 2.8349604e-02 3.6881146e-02 5.7478275e-02 1.9879185e-02 2.5944410e-02 2.5180647e-02 1.4399125e-02 1.2641568e-02 5.0169037e-02 2.1180767e-02 3.9323276e-02 1.3938661e-02 5.3485998e-02 1.8367828e-02 5.3859405e-02 3.6620288e-02 3.4933732e-02 2.7786245e-02 1.0959937e-02 1.8222006e-02 3.5858197e-02 1.7515874e-02 4.3340061e-02 3.9619354e-02 3.3535832e-02 2.4403195e-02 3.5188163e-02 1.9107255e-02 2.5791231e-02 2.6740983e-02 2.3644893e-03 7.3259797e-04 5.5696730e-03 2.5707992e-03 4.5988577e-03 6.5028014e-05 1.3580837e-03 1.2238983e-03 1.7109321e-03 1.8714646e-03 2.9944669e-03 1.2905109e-02 3.6725511e-03 6.9487479e-03 1.9516582e-03 1.2369689e-02 2.1307052e-04 6.0423668e-03 3.5751225e-03 1.0028727e-02 4.0861108e-03 1.5902610e-03 2.1378325e-03 8.2628456e-04 1.7251178e-03 8.0406810e-04 9.4788056e-03 1.5999623e-04 8.7509151e-04 2.2560004e-03 1.8732619e-02 1.2379328e-02 7.5803848e-03 2.7223442e-04 5.1089250e-03 5.8562851e-03 2.2798360e-04 7.3027135e-03 2.0605789e-03 3.8651941e-04 3.1125030e-03 1.3286042e-03 6.3896783e-03 2.7088758e-03 9.7424018e-04 2.1879745e-02 1.1792881e-03 5.1158538e-02 2.8393852e-02 2.0746802e-02 2.9099880e-02 3.4416089e-02 3.3763806e-02 3.5540899e-02 2.8575694e-02 3.6757319e-02 1.5144171e-02 3.2315336e-03 2.0363783e-02 1.3898176e-02 3.6986917e-02 3.6943012e-02 1.1630303e-02 1.6809451e-02 2.0213112e-02 6.0723696e-02 2.7441217e-02 1.5470812e-02 2.3398093e-02 3.8846172e-02 9.0112370e-03 1.5570624e-02 1.4874310e-02 5.5171614e-03 5.6710746e-03 3.3048406e-02 1.1270408e-02 2.3776077e-02 7.7482583e-03 3.5413096e-02 9.7831561e-03 3.9569593e-02 2.1047269e-02 2.3004495e-02 1.8015778e-02 4.6822715e-03 7.9260822e-03 2.0880056e-02 7.9428824e-03 2.8393852e-02 2.5015882e-02 1.9595645e-02 1.2073748e-02 1.9972960e-02 8.8353740e-03 1.6316616e-02 1.7585650e-02 7.5921246e-04 7.6581519e-03 8.7602132e-03 1.3285408e-02 3.1081233e-03 6.1183864e-03 3.4258252e-04 8.0693053e-03 1.1527108e-03 4.3989827e-03 2.4770294e-02 1.0297780e-02 1.0716436e-02 8.0921947e-03 4.0468338e-03 3.9682629e-03 8.0100500e-03 9.6963228e-03 3.4263963e-03 5.5915243e-03 6.8271736e-03 7.3405400e-03 4.5375223e-04 3.3086446e-04 2.9090971e-03 1.8954919e-02 3.0536401e-03 5.4665114e-03 8.4934379e-03 1.2174442e-02 1.6139108e-02 1.7445625e-02 4.0248005e-03 8.4214697e-04 1.4968032e-02 1.3210674e-03 7.6674238e-03 5.8292743e-03 4.1363018e-03 9.7236406e-03 4.2254644e-03 1.5250051e-02 9.0649565e-03 6.2807525e-03 3.4303230e-02 6.6673397e-03 3.8666536e-02 1.8227060e-02 1.0732501e-02 2.0722480e-02 2.2627004e-02 2.1137328e-02 2.6644819e-02 1.8156140e-02 2.2956019e-02 8.4429075e-03 1.3075172e-03 1.0338488e-02 5.7120788e-03 2.2877982e-02 2.2249653e-02 5.0348740e-03 1.0858478e-02 1.5670368e-02 4.1328272e-02 1.5415087e-02 7.0760309e-03 1.4735153e-02 2.4690692e-02 2.4709117e-03 1.0136092e-02 9.4846153e-03 1.0671740e-03 3.5238125e-03 2.0483172e-02 6.1432951e-03 1.2637418e-02 6.5903261e-03 2.1786492e-02 6.0819601e-03 3.0486925e-02 9.5174641e-03 1.6103409e-02 1.3353819e-02 3.2356456e-03 1.9926682e-03 1.0243487e-02 2.2538296e-03 1.8227060e-02 1.5108364e-02 1.0188754e-02 3.7627514e-03 8.8362152e-03 3.0791096e-03 1.1920070e-02 1.3588585e-02 8.0203401e-03 5.9881494e-03 7.9998519e-03 1.0330180e-03 3.9563353e-03 9.0962406e-05 4.3155161e-03 3.4798297e-04 4.5913809e-03 1.6934319e-02 5.4778348e-03 1.0415690e-02 5.0724002e-03 7.4143773e-03 1.5625174e-03 8.5090504e-03 5.0363885e-03 7.2978538e-03 5.9687085e-03 3.0800386e-03 3.3854665e-03 6.0369763e-05 9.7766526e-04 2.1106407e-03 1.2150960e-02 8.7050457e-04 2.2450596e-03 4.2656078e-03 1.7573546e-02 1.6541615e-02 1.3010703e-02 1.4863348e-03 2.0318224e-03 1.0715058e-02 5.1959240e-04 9.1091800e-03 4.5577281e-03 1.4679901e-03 5.0882044e-03 3.2208315e-03 1.1416637e-02 6.2084512e-03 2.9761078e-03 2.5010280e-02 3.7556543e-03 4.8641699e-02 2.5613136e-02 1.7022941e-02 2.7738185e-02 3.0983818e-02 2.9540677e-02 3.4381880e-02 2.5612023e-02 3.1832087e-02 1.3388731e-02 2.7364434e-03 1.6556686e-02 1.0555947e-02 3.1795721e-02 3.1144834e-02 9.3248236e-03 1.5850102e-02 2.0586917e-02 5.3133864e-02 2.2885657e-02 1.2291162e-02 2.1220606e-02 3.3851933e-02 5.9611654e-03 1.4821288e-02 1.4065156e-02 3.4935277e-03 5.6486577e-03 2.8790647e-02 1.0114487e-02 1.9481208e-02 8.7344459e-03 3.0476411e-02 9.4253713e-03 3.8601009e-02 1.5492941e-02 2.2081481e-02 1.8082534e-02 4.9261084e-03 5.1866248e-03 1.6531969e-02 3.8794099e-03 2.5613136e-02 2.2037756e-02 1.6246851e-02 7.5034139e-03 1.4652134e-02 6.6254167e-03 1.6367612e-02 1.8037074e-02 3.5963937e-03 1.3253217e-02 6.2726815e-03 2.9800542e-03 8.1679414e-03 8.1170586e-03 1.1643531e-02 4.8148791e-04 2.7366574e-02 1.5666050e-02 3.5602925e-04 4.6080605e-03 1.9717886e-02 6.2846724e-03 1.0858138e-05 1.6108248e-02 9.2526017e-03 1.6342536e-04 1.1289374e-02 1.3182521e-02 7.1086671e-03 4.8107615e-03 2.1505038e-03 2.4867835e-02 7.4585494e-03 9.3457001e-03 1.1859292e-02 8.6506765e-03 1.5825531e-03 7.6287797e-03 7.1464196e-03 1.3469318e-02 7.0587993e-03 4.6011946e-03 3.8423475e-04 1.2933991e-03 8.0463849e-03 1.4044313e-02 1.5554975e-03 5.8246512e-03 3.7445923e-03 7.8179143e-03 4.3383609e-02 5.1292419e-03 3.1208966e-02 1.7106674e-02 1.5590525e-02 1.4711786e-02 2.2118312e-02 2.3677515e-02 1.8355265e-02 1.7620984e-02 2.7322237e-02 8.2125300e-03 3.2025084e-03 1.5673958e-02 1.1993026e-02 2.8215754e-02 3.0043744e-02 8.3225318e-03 7.3692394e-03 7.0283195e-03 5.0297559e-02 2.1175797e-02 1.1957606e-02 1.3050611e-02 2.8749586e-02 1.0337000e-02 6.4346633e-03 6.2339245e-03 7.0135662e-03 2.1302546e-03 2.3356399e-02 5.5096838e-03 1.8637999e-02 1.1462623e-03 2.6577654e-02 3.5495923e-03 2.0668821e-02 2.2280093e-02 1.0461671e-02 6.3164945e-03 1.6640428e-03 9.2261377e-03 1.7584245e-02 1.7910761e-02 1.7106674e-02 1.5703153e-02 1.4029303e-02 1.7152691e-02 2.0996496e-02 7.4959989e-03 5.5175599e-03 5.5902559e-03 3.2004692e-03 2.3419030e-03 2.3799055e-04 7.0894399e-03 1.3013007e-03 8.8089668e-03 2.9046172e-03 1.1317768e-02 5.4246845e-03 3.1331888e-03 1.2023171e-04 2.4596565e-02 1.8369557e-03 3.9714617e-03 5.9789699e-03 1.7350836e-02 3.3069650e-03 3.5250221e-03 4.6752456e-03 5.9850395e-03 6.3682068e-03 1.7841601e-03 1.0289989e-02 3.0278347e-03 2.8250687e-03 3.3130538e-03 2.2020473e-02 6.7612416e-03 1.5121522e-03 2.2413165e-03 1.4397994e-02 9.1427197e-04 3.3482085e-03 6.1881321e-03 7.4537620e-04 2.6935839e-03 4.4237033e-03 1.1678481e-03 9.0068892e-04 2.9616334e-06 1.6314282e-03 2.2817755e-02 4.4730602e-04 5.5111404e-02 3.4158218e-02 2.9021577e-02 3.2043073e-02 4.1089749e-02 4.2083473e-02 3.7707137e-02 3.4692829e-02 4.6329738e-02 1.9906109e-02 6.9820129e-03 2.8847990e-02 2.2051442e-02 4.7099521e-02 4.8343924e-02 1.7860817e-02 1.9868896e-02 2.0488578e-02 7.4490873e-02 3.6906910e-02 2.3107397e-02 2.8319511e-02 4.8426947e-02 1.7182411e-02 1.8331671e-02 1.7811306e-02 1.2023707e-02 8.1012404e-03 4.1487060e-02 1.5227136e-02 3.2990763e-02 8.0392167e-03 4.5098943e-02 1.2350504e-02 4.1149609e-02 3.3318195e-02 2.5517275e-02 1.9030633e-02 6.7530681e-03 1.5581926e-02 3.0469465e-02 1.9100479e-02 3.4158218e-02 3.1415243e-02 2.7190491e-02 2.3321177e-02 3.1828581e-02 1.5212907e-02 1.7498443e-02 1.7919743e-02 3.6042298e-03 3.7468828e-03 9.7768723e-03 7.8664808e-04 9.4997776e-03 1.0849175e-02 2.5690262e-03 9.8725606e-04 1.2645853e-02 2.2464135e-03 3.0594913e-02 2.8398433e-03 1.3996699e-02 1.4253747e-03 2.8011999e-02 1.2147431e-02 1.3365379e-03 1.5899927e-03 8.8722552e-03 1.1913349e-02 6.7897739e-03 2.1135064e-03 3.6254453e-03 1.7898196e-03 6.9240260e-04 3.8891340e-02 1.9082524e-02 3.5297395e-03 2.7024302e-03 1.7766622e-02 2.5342822e-03 6.8092652e-03 1.7593080e-02 6.3527044e-03 2.6240465e-03 7.1495011e-04 6.5371413e-03 3.8357773e-03 3.1136154e-03 1.3436442e-03 8.9702416e-03 2.0253115e-03 8.1348350e-02 5.3552444e-02 4.4248656e-02 5.2648527e-02 6.1923652e-02 6.1838703e-02 6.0414517e-02 5.3985372e-02 6.6201925e-02 3.4742321e-02 1.4883289e-02 4.3767758e-02 3.4228373e-02 6.6672589e-02 6.6939218e-02 3.0219175e-02 3.6020463e-02 3.8455365e-02 9.7619080e-02 5.3827294e-02 3.6463335e-02 4.6373140e-02 6.8906148e-02 2.6398982e-02 3.4032020e-02 3.3159361e-02 2.0120628e-02 1.8469842e-02 6.0950543e-02 2.8524017e-02 4.8737718e-02 1.9976319e-02 6.4489083e-02 2.5275791e-02 6.5080438e-02 4.4969016e-02 4.4191042e-02 3.6066964e-02 1.6430448e-02 2.4524789e-02 4.4743887e-02 2.2062365e-02 5.3552444e-02 4.9370398e-02 4.2403953e-02 3.0850071e-02 4.3434971e-02 2.5846885e-02 3.3807321e-02 3.4832107e-02 1.2988107e-03 1.6703175e-03 1.1675843e-03 2.1414615e-03 3.6300242e-03 1.1143098e-02 2.7878082e-03 7.4699291e-03 1.6417856e-03 1.3833381e-02 5.5119833e-05 6.7870415e-03 2.7321459e-03 1.1691818e-02 4.7830259e-03 1.0265050e-03 1.5056614e-03 1.2372212e-03 2.4522309e-03 1.1336677e-03 8.0050923e-03 5.6870746e-05 4.6915622e-04 1.5562032e-03 2.0887194e-02 1.3125417e-02 6.8837382e-03 7.1162950e-05 5.9522504e-03 5.2043728e-03 5.3649750e-04 8.3200456e-03 2.2696632e-03 1.4322584e-04 2.2876100e-03 1.6070427e-03 5.8684470e-03 2.4558869e-03 5.5570590e-04 1.9651663e-02 8.9172519e-04 5.4659464e-02 3.1114613e-02 2.3119551e-02 3.1736271e-02 3.7415588e-02 3.6761852e-02 3.8397207e-02 3.1313890e-02 3.9883303e-02 1.7150840e-02 4.2007421e-03 2.2713095e-02 1.5842502e-02 4.0120080e-02 4.0057089e-02 1.3428356e-02 1.8845203e-02 2.2251079e-02 6.4673609e-02 3.0152373e-02 1.7530451e-02 2.5859899e-02 4.2056336e-02 1.0552527e-02 1.7518114e-02 1.6789150e-02 6.7575680e-03 6.8601014e-03 3.6016991e-02 1.2994930e-02 2.6303908e-02 8.9623653e-03 3.8482980e-02 1.1333705e-02 4.2542187e-02 2.3287141e-02 2.5337550e-02 1.9991634e-02 5.7451264e-03 9.3841364e-03 2.3239540e-02 8.8973019e-03 3.1114613e-02 2.7595638e-02 2.1905943e-02 1.3658827e-02 2.2169613e-02 1.0410758e-02 1.8207584e-02 1.9484294e-02 4.8008453e-03 1.2650854e-03 6.4006179e-03 1.8900850e-03 1.2501460e-02 5.1363105e-03 3.0955001e-03 2.4385845e-04 2.0020418e-02 1.0205842e-03 3.3467158e-03 5.5033857e-03 1.3788482e-02 2.4101593e-03 2.9461849e-03 4.0056193e-03 3.8916824e-03 4.1559722e-03 7.4799809e-04 1.0679921e-02 1.8732675e-03 2.1213449e-03 3.0477499e-03 1.9047032e-02 6.9834653e-03 2.9438547e-03 1.3758132e-03 1.0992441e-02 2.0426183e-03 1.8183256e-03 5.1275661e-03 3.4870074e-04 1.7800086e-03 4.1941671e-03 4.4278656e-04 2.0594764e-03 2.9165015e-04 1.2452133e-03 2.3762069e-02 2.9165015e-04 5.0966362e-02 3.0166832e-02 2.4694798e-02 2.8788397e-02 3.6671707e-02 3.7274280e-02 3.4472569e-02 3.0604199e-02 4.1125938e-02 1.6692997e-02 4.7977197e-03 2.4489085e-02 1.8077937e-02 4.1761735e-02 4.2713992e-02 1.4455884e-02 1.7039782e-02 1.8322490e-02 6.7670508e-02 3.2041093e-02 1.9169462e-02 2.4724400e-02 4.3157837e-02 1.3510623e-02 1.5640518e-02 1.5102883e-02 8.9767687e-03 6.0642978e-03 3.6671707e-02 1.2419694e-02 2.8323093e-02 6.4935421e-03 3.9910361e-02 1.0006286e-02 3.7967777e-02 2.8141888e-02 2.2594035e-02 1.6734859e-02 4.8977754e-03 1.2095984e-02 2.5839912e-02 1.5166518e-02 3.0166832e-02 2.7378800e-02 2.3072794e-02 1.8885101e-02 2.6783663e-02 1.1941964e-02 1.5226991e-02 1.5844953e-02 5.5898027e-03 3.6211515e-04 4.6884489e-03 1.9469863e-02 6.9494812e-03 1.0856564e-02 6.2070186e-03 5.9174289e-03 2.3319364e-03 8.6215938e-03 6.4261295e-03 5.9157019e-03 6.0481987e-03 4.2268867e-03 4.5610911e-03 4.8970997e-05 6.8799458e-04 2.4420024e-03 1.4255275e-02 1.5079078e-03 3.2366850e-03 5.6006336e-03 1.5957034e-02 1.6871464e-02 1.4787237e-02 2.2762765e-03 1.3333689e-03 1.2378081e-02 7.3736127e-04 8.8932355e-03 5.1508306e-03 2.2779875e-03 6.5293055e-03 3.6737889e-03 1.2985539e-02 7.3417742e-03 4.0613774e-03 2.7837547e-02 4.7997557e-03 4.5639701e-02 2.3278576e-02 1.4877176e-02 2.5663407e-02 2.8314352e-02 2.6769516e-02 3.2135703e-02 2.3238033e-02 2.8854470e-02 1.1827077e-02 2.2644401e-03 1.4421001e-02 8.8429015e-03 2.8776903e-02 2.8063687e-02 7.8778602e-03 1.4365876e-02 1.9273282e-02 4.9067035e-02 2.0328263e-02 1.0496416e-02 1.9196637e-02 3.0789735e-02 4.6457607e-03 1.3443289e-02 1.2708604e-02 2.5804695e-03 5.0424172e-03 2.6041403e-02 8.8829912e-03 1.7122028e-02 8.2367466e-03 2.7547637e-02 8.4529986e-03 3.6274794e-02 1.3225786e-02 2.0335647e-02 1.6784857e-02 4.4669319e-03 3.9860599e-03 1.4316384e-02 2.9523808e-03 2.3278576e-02 1.9803941e-02 1.4205949e-02 5.9492702e-03 1.2454271e-02 5.3895801e-03 1.5143895e-02 1.6858271e-02 5.9387347e-03 5.9571758e-03 6.0526515e-03 1.4146691e-03 8.1320415e-03 6.3420687e-04 2.2943426e-02 7.2217865e-04 8.7156753e-03 1.7123420e-03 1.9617600e-02 7.0385795e-03 6.4044399e-04 1.1390332e-03 4.8019256e-03 6.7384180e-03 2.9679315e-03 4.6026472e-03 1.3268878e-03 5.4152853e-04 4.6516773e-04 2.8772311e-02 1.3783757e-02 3.2459159e-03 7.3821107e-04 1.2234521e-02 2.0992860e-03 3.0774015e-03 1.1372571e-02 2.9352447e-03 8.1022588e-04 9.2842537e-04 2.8855444e-03 2.9418595e-03 1.2977062e-03 1.2869337e-04 1.4124612e-02 3.4246729e-04 6.6626445e-02 4.1505551e-02 3.3416051e-02 4.0874061e-02 4.8928444e-02 4.8860648e-02 4.7883828e-02 4.1881126e-02 5.2796241e-02 2.5141882e-02 8.8605790e-03 3.3017625e-02 2.4870522e-02 5.3247026e-02 5.3590923e-02 2.1349209e-02 2.6308756e-02 2.8721666e-02 8.1439460e-02 4.1842449e-02 2.6703197e-02 3.5208876e-02 5.5212555e-02 1.8433774e-02 2.4622301e-02 2.3863121e-02 1.3181398e-02 1.1675955e-02 4.8074316e-02 1.9890418e-02 3.7380285e-02 1.3082557e-02 5.1276398e-02 1.7233265e-02 5.2150081e-02 3.4634218e-02 3.3454504e-02 2.6550201e-02 1.0070918e-02 1.6842626e-02 3.3968952e-02 1.6166125e-02 4.1505551e-02 3.7811539e-02 3.1780349e-02 2.2766214e-02 3.3244526e-02 1.7747224e-02 2.4583452e-02 2.5577750e-02 7.4058632e-03 1.7632325e-02 5.8672157e-03 1.4554963e-02 7.5139440e-03 6.3063306e-03 2.8227254e-03 1.2216172e-02 5.2171506e-03 8.3411571e-03 9.1198919e-03 3.7613430e-03 3.7527482e-03 5.6979631e-04 2.0412760e-03 4.1677011e-03 1.2238157e-02 1.7043766e-03 3.0984992e-03 5.1374687e-03 2.0730806e-02 2.1628161e-02 1.6669142e-02 2.5627983e-03 1.3966031e-03 1.3985502e-02 1.7150229e-03 1.2743810e-02 7.3773848e-03 2.3644296e-03 5.7186323e-03 5.6669191e-03 1.5046711e-02 9.0550035e-03 4.3221940e-03 2.4177800e-02 5.7926331e-03 5.2977640e-02 2.8412846e-02 1.8464622e-02 3.1548301e-02 3.3726407e-02 3.1637531e-02 3.8729422e-02 2.8292534e-02 3.3574657e-02 1.5809867e-02 4.4178873e-03 1.7890807e-02 1.1580028e-02 3.3334020e-02 3.2156672e-02 1.0943899e-02 1.8967943e-02 2.4791881e-02 5.4103864e-02 2.4225163e-02 1.3643641e-02 2.4071431e-02 3.5681100e-02 6.5373199e-03 1.7959936e-02 1.7099713e-02 4.3929724e-03 8.0887873e-03 3.0809355e-02 1.2543658e-02 2.0721585e-02 1.2037199e-02 3.2114506e-02 1.2207760e-02 4.3300211e-02 1.5218252e-02 2.5733556e-02 2.1932604e-02 7.3680112e-03 5.8739609e-03 1.7443010e-02 2.6299180e-03 2.8412846e-02 2.4437667e-02 1.7887656e-02 6.9159049e-03 1.4494165e-02 7.9434468e-03 2.0060731e-02 2.2068579e-02 2.3875094e-02 1.2000415e-02 1.3948342e-03 3.4546351e-03 1.4939730e-02 3.7950422e-03 6.0175173e-04 1.2211196e-02 7.0052335e-03 9.1256063e-05 8.0336825e-03 9.5484059e-03 3.8945506e-03 2.3206010e-03 7.0732153e-04 2.0727990e-02 4.4889370e-03 6.3258821e-03 8.7927798e-03 9.0069682e-03 3.7952179e-03 7.8092546e-03 4.4335690e-03 9.0550736e-03 6.7946167e-03 2.1531805e-03 9.6426638e-04 7.0770423e-04 5.1150602e-03 1.0670446e-02 5.5187206e-04 6.0114634e-03 3.0771240e-03 5.3655604e-03 3.8039285e-02 3.5100982e-03 3.3392676e-02 1.7228333e-02 1.4023464e-02 1.5984642e-02 2.2267037e-02 2.3075907e-02 2.0330300e-02 1.7609739e-02 2.6354220e-02 7.6657981e-03 1.4938436e-03 1.3975046e-02 9.8087979e-03 2.7013992e-02 2.8227679e-02 6.7815824e-03 7.6426994e-03 8.5739542e-03 4.8734857e-02 1.9601188e-02 1.0188446e-02 1.3147209e-02 2.7911001e-02 7.5145131e-03 6.7053137e-03 6.3732135e-03 4.4764490e-03 1.3642012e-03 2.2654181e-02 4.8866188e-03 1.6888522e-02 1.3930577e-03 2.5469441e-02 3.2860894e-03 2.3095694e-02 1.8694362e-02 1.1451986e-02 7.4272413e-03 8.6465452e-04 6.5073035e-03 1.5400191e-02 1.2824348e-02 1.7228333e-02 1.5313622e-02 1.2672619e-02 1.3006211e-02 1.7530867e-02 5.5400420e-03 6.4187909e-03 6.8900377e-03 3.1968437e-03 2.6175875e-02 9.5689725e-03 4.4766639e-02 9.9845360e-03 2.8418681e-02 3.6870693e-03 4.5253718e-02 2.5815239e-02 5.6468536e-03 5.2271680e-03 1.8578768e-02 2.3906603e-02 1.7414675e-02 6.6309054e-04 1.0701959e-02 7.1563943e-03 4.3943049e-03 6.0754711e-02 3.4699507e-02 9.6990814e-03 9.4374842e-03 2.8881695e-02 8.6238198e-03 1.6549411e-02 3.3492594e-02 1.6963829e-02 8.9575119e-03 3.4839312e-03 1.7214511e-02 1.0985852e-02 1.1115876e-02 7.0774378e-03 2.3223923e-03 9.1191389e-03 1.1169736e-01 7.8250518e-02 6.5808362e-02 7.7763348e-02 8.8114611e-02 8.7449743e-02 8.7215232e-02 7.8675005e-02 9.2203874e-02 5.5132383e-02 2.8794481e-02 6.5100824e-02 5.3025891e-02 9.2531519e-02 9.2220151e-02 4.8731252e-02 5.7186075e-02 6.0580024e-02 1.2745253e-01 7.7174441e-02 5.6198920e-02 6.9678402e-02 9.5449745e-02 4.2538093e-02 5.4722713e-02 5.3576276e-02 3.4860888e-02 3.4291475e-02 8.6329226e-02 4.7343142e-02 7.0978985e-02 3.6695928e-02 9.0088005e-02 4.3430772e-02 9.2828505e-02 6.4239353e-02 6.7481588e-02 5.7536867e-02 3.1542812e-02 4.0297150e-02 6.5753742e-02 3.3241661e-02 7.8250518e-02 7.2933055e-02 6.3794057e-02 4.6178781e-02 6.2571051e-02 4.2843100e-02 5.4666081e-02 5.6027673e-02 1.6139749e-02 3.9412782e-03 2.4265145e-02 2.3891707e-03 1.6498373e-02 4.1418612e-05 2.4988702e-02 1.3783039e-02 4.3146851e-04 2.5122323e-04 6.4815288e-03 1.0052630e-02 7.0587637e-03 1.3669961e-03 2.4020339e-03 9.7331181e-04 2.7171014e-04 3.8783427e-02 2.3881244e-02 7.7285447e-03 2.0010737e-03 1.2962382e-02 6.0316172e-03 5.6775409e-03 1.9642810e-02 8.0347010e-03 1.6772041e-03 5.1201506e-05 7.5139419e-03 7.7685816e-03 5.4104161e-03 1.3577183e-03 7.6646931e-03 3.0214900e-03 8.1621829e-02 5.2182884e-02 4.1040324e-02 5.3055858e-02 6.0133087e-02 5.8980137e-02 6.1457708e-02 5.2397706e-02 6.2608146e-02 3.3589093e-02 1.3750660e-02 4.0409320e-02 3.0837807e-02 6.2732754e-02 6.2152008e-02 2.7986481e-02 3.5988306e-02 4.0227814e-02 9.1799153e-02 5.0051858e-02 3.3478326e-02 4.5393111e-02 6.5357720e-02 2.2750138e-02 3.4139497e-02 3.3132950e-02 1.7367166e-02 1.8328224e-02 5.7997026e-02 2.7720666e-02 4.5013892e-02 2.1236242e-02 6.0787869e-02 2.5266646e-02 6.6573172e-02 3.9086224e-02 4.4693577e-02 3.7322864e-02 1.6444309e-02 2.1159286e-02 4.0674593e-02 1.6113197e-02 5.2182884e-02 4.7503868e-02 3.9598661e-02 2.5231075e-02 3.7795470e-02 2.3382629e-02 3.4903444e-02 3.6481922e-02 4.3266011e-03 2.5120283e-02 7.2104610e-03 3.6362605e-04 1.6818393e-02 1.3224351e-02 8.9751703e-04 1.2039448e-02 1.4078561e-02 9.5477546e-03 7.2983976e-03 3.1999143e-03 2.4639885e-02 8.8187561e-03 1.0207680e-02 1.2236717e-02 1.1661588e-02 8.0001293e-04 5.7297070e-03 8.1532898e-03 1.7517743e-02 5.5505771e-03 6.3202290e-03 1.2261175e-03 1.5275601e-03 9.1326114e-03 1.4417917e-02 2.2056571e-03 4.2297788e-03 3.2218738e-03 8.2583968e-03 4.2679879e-02 5.2144338e-03 3.5325690e-02 2.1286357e-02 2.0340751e-02 1.7932964e-02 2.6773752e-02 2.8858059e-02 2.1473619e-02 2.1925037e-02 3.2987864e-02 1.1596147e-02 5.5679058e-03 2.0475433e-02 1.6418368e-02 3.4058657e-02 3.6285268e-02 1.2036020e-02 1.0227865e-02 8.9974606e-03 5.7847652e-02 2.6531721e-02 1.6295053e-02 1.6788576e-02 3.4473710e-02 1.4498141e-02 9.1389529e-03 8.9727022e-03 1.0441422e-02 4.2212624e-03 2.8555402e-02 8.4473824e-03 2.3791713e-02 2.5698702e-03 3.2242281e-02 5.9266749e-03 2.3676462e-02 2.8216759e-02 1.3343464e-02 8.4809495e-03 3.5554866e-03 1.3166972e-02 2.2745344e-02 2.2629651e-02 2.1286357e-02 1.9992085e-02 1.8520730e-02 2.2324446e-02 2.6771848e-02 1.1107369e-02 7.6788410e-03 7.4852977e-03 2.3512869e-02 1.1702196e-03 5.0486445e-03 4.4067902e-03 1.7493736e-02 4.0658633e-03 2.3529372e-03 3.3047066e-03 5.2094902e-03 6.0430183e-03 1.7805903e-03 8.3604250e-03 2.1645857e-03 1.8214590e-03 2.1737913e-03 2.3530298e-02 8.5714286e-03 1.8444368e-03 1.4565864e-03 1.3243081e-02 1.0630397e-03 2.8808992e-03 7.3652636e-03 1.0835317e-03 1.7933322e-03 3.0961279e-03 1.3436220e-03 1.3043704e-03 1.2482375e-04 8.8034137e-04 2.0043242e-02 1.2482375e-04 5.7954445e-02 3.5775683e-02 2.9742194e-02 3.4149795e-02 4.2824229e-02 4.3477315e-02 4.0214177e-02 3.6256651e-02 4.7605297e-02 2.0950874e-02 7.1042975e-03 2.9499916e-02 2.2329817e-02 4.8271691e-02 4.9230028e-02 1.8375611e-02 2.1296157e-02 2.2492082e-02 7.5802989e-02 3.7751582e-02 2.3617894e-02 2.9827991e-02 4.9792717e-02 1.7017775e-02 1.9724142e-02 1.9131907e-02 1.1873908e-02 8.7349825e-03 4.2824229e-02 1.6130090e-02 3.3687060e-02 9.1410950e-03 4.6289887e-02 1.3350026e-02 4.3904542e-02 3.3071376e-02 2.7381594e-02 2.0813943e-02 7.3233676e-03 1.5432666e-02 3.0904955e-02 1.7682128e-02 3.5775683e-02 3.2747980e-02 2.7982984e-02 2.2571206e-02 3.1620085e-02 1.5454693e-02 1.9154732e-02 1.9759156e-02 1.5625666e-02 2.0014241e-02 2.2906319e-02 3.3689213e-03 1.6561417e-02 1.9555072e-02 1.9724522e-02 6.8229205e-03 6.0996359e-03 1.3507575e-02 3.5744409e-02 1.3348521e-02 1.7651623e-02 2.2506296e-02 1.4148535e-02 3.1413076e-02 3.8119216e-02 1.5501824e-02 1.7783197e-03 3.4445966e-02 9.9745771e-03 1.7822684e-02 1.8993068e-02 1.5436010e-02 2.3911299e-02 1.6070505e-02 3.4793519e-02 2.5109394e-02 1.9730191e-02 5.3384757e-02 2.0981999e-02 3.5016323e-02 1.5808551e-02 7.2216559e-03 2.1250521e-02 1.8323057e-02 1.5289849e-02 2.7077232e-02 1.5342317e-02 1.5450153e-02 9.5719689e-03 7.0769801e-03 6.7142336e-03 3.8618160e-03 1.4833874e-02 1.3052654e-02 5.5299114e-03 1.3477582e-02 2.0871841e-02 2.6745939e-02 9.5407500e-03 5.3018257e-03 1.4058965e-02 1.6863810e-02 2.0380823e-03 1.3353665e-02 1.2655783e-02 3.2775616e-03 9.7625272e-03 1.4632542e-02 9.0437643e-03 7.6711708e-03 1.4740530e-02 1.4381411e-02 1.0907741e-02 3.1026769e-02 2.6758954e-03 1.8148668e-02 1.8095332e-02 1.0256426e-02 2.3444102e-03 5.5327937e-03 1.0086149e-03 1.5808551e-02 1.2612237e-02 7.5482105e-03 2.3261323e-04 2.5482034e-03 4.2287654e-03 1.6814254e-02 1.9242327e-02 6.8098804e-03 2.4185222e-03 1.3095995e-02 4.9100577e-03 8.0319599e-04 1.3085229e-03 1.8113887e-03 3.1306373e-03 1.2541646e-03 7.2250359e-03 1.5182032e-04 3.2471009e-04 1.1793141e-03 2.2134336e-02 1.2798604e-02 5.8036245e-03 2.9068060e-05 7.1481800e-03 4.2574873e-03 8.5847073e-04 8.5715637e-03 2.1039195e-03 1.1389023e-04 1.8746094e-03 1.5895426e-03 4.9316123e-03 1.9257560e-03 2.7708506e-04 1.8515651e-02 5.3986259e-04 5.6637702e-02 3.2886175e-02 2.4967826e-02 3.3187187e-02 3.9414817e-02 3.8923256e-02 3.9880016e-02 3.3128964e-02 4.2231952e-02 1.8470852e-02 4.9097912e-03 2.4570551e-02 1.7467535e-02 4.2525770e-02 4.2580968e-02 1.4790908e-02 2.0011146e-02 2.3142926e-02 6.7845285e-02 3.2285576e-02 1.9161137e-02 2.7420216e-02 4.4447416e-02 1.1980855e-02 1.8611023e-02 1.7883360e-02 7.8729229e-03 7.5466400e-03 3.8175619e-02 1.4096206e-02 2.8321778e-02 9.4496873e-03 4.0815141e-02 1.2211823e-02 4.4024579e-02 2.5465922e-02 2.6596134e-02 2.0925904e-02 6.3340227e-03 1.0720439e-02 2.5211490e-02 1.0333158e-02 3.2886175e-02 2.9360405e-02 2.3656085e-02 1.5413665e-02 2.4285913e-02 1.1678199e-02 1.9116491e-02 2.0313205e-02 1.6950975e-02 9.2576768e-03 2.2830227e-04 1.1996475e-02 1.3943618e-02 7.5471408e-03 5.0950912e-03 2.4447813e-03 2.5903324e-02 8.0139448e-03 9.9879335e-03 1.2585436e-02 8.2754975e-03 1.4174352e-03 8.0478199e-03 7.7042954e-03 1.3901912e-02 7.5078674e-03 4.9957099e-03 2.9997761e-04 1.5395926e-03 8.6362248e-03 1.4833108e-02 1.8222710e-03 6.1957159e-03 4.1238914e-03 8.4102568e-03 4.4734160e-02 5.6076899e-03 3.0288189e-02 1.6597244e-02 1.5336169e-02 1.4117002e-02 2.1529085e-02 2.3159309e-02 1.7633168e-02 1.7119251e-02 2.6802924e-02 7.9632454e-03 3.3472740e-03 1.5438450e-02 1.1914773e-02 2.7716384e-02 2.9607261e-02 8.2241873e-03 7.0346123e-03 6.5727745e-03 4.9604200e-02 2.0828511e-02 1.1808642e-02 1.2615158e-02 2.8194719e-02 1.0429441e-02 6.1245969e-03 5.9452412e-03 7.1618546e-03 2.1261487e-03 2.2855826e-02 5.3452154e-03 1.8357871e-02 1.0397070e-03 2.6087088e-02 3.3990115e-03 1.9871685e-02 2.2251351e-02 9.9773797e-03 5.9192173e-03 1.6976055e-03 9.3317377e-03 1.7391404e-02 1.8375579e-02 1.6597244e-02 1.5280892e-02 1.3772451e-02 1.7356806e-02 2.0968489e-02 7.4978418e-03 5.1634523e-03 5.1963190e-03 2.4233623e-02 1.4077025e-02 4.3787020e-04 1.8512542e-04 6.0477386e-03 9.6703484e-03 7.1488601e-03 1.5477068e-03 2.2742281e-03 9.7171923e-04 3.9271863e-04 3.8509554e-02 2.4774335e-02 8.7360281e-03 1.9891111e-03 1.1995846e-02 6.8973897e-03 5.5019459e-03 1.9965157e-02 8.4152844e-03 1.6247783e-03 1.4531219e-04 7.7480972e-03 8.6972032e-03 5.9824611e-03 1.5176827e-03 7.8567259e-03 3.3493021e-03 8.1204385e-02 5.1575806e-02 4.0135980e-02 5.2803481e-02 5.9400299e-02 5.8042463e-02 6.1291964e-02 5.1745941e-02 6.1508632e-02 3.3138226e-02 1.3469654e-02 3.9479896e-02 2.9967140e-02 6.1565597e-02 6.0825097e-02 2.7361698e-02 3.5748038e-02 4.0333571e-02 9.0138895e-02 4.8984196e-02 3.2667108e-02 4.4902460e-02 6.4253355e-02 2.1894327e-02 3.3940189e-02 3.2913172e-02 1.6731486e-02 1.8202458e-02 5.7045856e-02 2.7382191e-02 4.3985602e-02 2.1375556e-02 5.9676750e-02 2.5108522e-02 6.6474949e-02 3.7689168e-02 4.4508235e-02 3.7339453e-02 1.6368548e-02 2.0371230e-02 3.9606943e-02 1.4908799e-02 5.1575806e-02 4.6824025e-02 3.8781771e-02 2.3989539e-02 3.6452117e-02 2.2748990e-02 3.4902979e-02 3.6586492e-02 7.5459794e-03 1.9125850e-02 2.0310725e-02 6.1222293e-03 3.4447226e-03 8.1418452e-03 3.7867035e-02 1.2077489e-02 1.6567789e-02 2.1605044e-02 3.9038067e-03 1.6430417e-02 2.8689503e-02 1.3569150e-02 4.2181405e-03 2.6167927e-02 7.2610935e-03 7.0630535e-03 1.1392972e-02 1.4088158e-02 2.3849569e-02 9.5566395e-03 2.5308356e-02 1.7804542e-02 1.7150362e-02 5.8921861e-02 1.6182419e-02 2.0027139e-02 6.1819655e-03 2.0410396e-03 8.6030014e-03 8.6357480e-03 7.5787415e-03 1.2598904e-02 6.0656768e-03 8.7139108e-03 1.7031846e-03 2.3331644e-03 1.8821103e-03 3.1682605e-04 8.7160052e-03 8.5600328e-03 2.8826583e-04 3.4529372e-03 7.5176091e-03 2.1668627e-02 4.3875084e-03 6.5654007e-04 4.4940457e-03 9.7914071e-03 1.9831433e-04 3.3391345e-03 2.9870521e-03 6.9985633e-04 2.5162956e-03 7.1853248e-03 1.3860021e-03 2.9754449e-03 4.8339512e-03 8.0121089e-03 2.3142577e-03 1.5373296e-02 2.8512223e-03 6.2399212e-03 5.8796015e-03 3.1092372e-03 2.3415322e-04 1.9852251e-03 5.2321529e-03 6.1819655e-03 4.3274341e-03 1.8113019e-03 1.9110595e-03 2.4033410e-03 9.3130265e-05 5.1498910e-03 6.5356041e-03 9.5659910e-03 1.1252405e-02 5.1556774e-03 3.2048021e-03 1.2695843e-03 2.2856756e-02 5.7848184e-03 7.7211451e-03 1.0283621e-02 8.3602499e-03 2.7615733e-03 7.9786726e-03 5.6505970e-03 1.0691105e-02 7.1332727e-03 3.1295602e-03 5.2401241e-04 9.5266815e-04 6.4319329e-03 1.2318850e-02 9.4872145e-04 6.1326072e-03 3.4758657e-03 6.5333653e-03 4.0853883e-02 4.3049208e-03 3.1649609e-02 1.6528693e-02 1.4101758e-02 1.4838879e-02 2.1483291e-02 2.2597676e-02 1.8841242e-02 1.6959579e-02 2.5991410e-02 7.4250762e-03 1.9970150e-03 1.4113092e-02 1.0250250e-02 2.6744340e-02 2.8212793e-02 6.9997737e-03 7.0607843e-03 7.4739664e-03 4.8397396e-02 1.9593840e-02 1.0430940e-02 1.2525634e-02 2.7473469e-02 8.3209543e-03 6.1476473e-03 5.8775226e-03 5.2455782e-03 1.4265861e-03 2.2224591e-02 4.7507930e-03 1.7000338e-02 1.0369313e-03 2.5178358e-02 3.0495461e-03 2.1392322e-02 1.9628131e-02 1.0500631e-02 6.5204272e-03 9.7357638e-04 7.2933833e-03 1.5732817e-02 1.4704532e-02 1.6528693e-02 1.4858839e-02 1.2681737e-02 1.4339393e-02 1.8427144e-02 5.9852538e-03 5.6194230e-03 5.9265324e-03 8.2461319e-05 3.7688699e-03 6.4113525e-03 4.0576095e-03 3.3120738e-03 8.1782115e-04 1.0911416e-04 1.0750826e-04 3.1097496e-02 1.8955233e-02 6.6951403e-03 5.7580225e-04 9.5785875e-03 5.0095389e-03 2.9949836e-03 1.4512717e-02 5.0859668e-03 4.0940931e-04 2.6323499e-04 4.5146422e-03 6.3221754e-03 3.5606197e-03 3.7002765e-04 1.1722539e-02 1.5048339e-03 7.0395338e-02 4.3268177e-02 3.3370831e-02 4.4028795e-02 5.0578307e-02 4.9629423e-02 5.1746432e-02 4.3478758e-02 5.3066435e-02 2.6481479e-02 9.3392135e-03 3.2828923e-02 2.4313691e-02 5.3237993e-02 5.2867243e-02 2.1623243e-02 2.8596153e-02 3.2496284e-02 8.0563616e-02 4.1614554e-02 2.6578058e-02 3.7074597e-02 5.5590704e-02 1.7323162e-02 2.6950223e-02 2.6053254e-02 1.2554151e-02 1.3157434e-02 4.8740123e-02 2.1279987e-02 3.7040705e-02 1.5739340e-02 5.1410982e-02 1.9129885e-02 5.6480014e-02 3.2271228e-02 3.6432542e-02 2.9847396e-02 1.1573499e-02 1.5895178e-02 3.3202746e-02 1.2659655e-02 4.3268177e-02 3.9046279e-02 3.2014714e-02 2.0067384e-02 3.1045819e-02 1.7642117e-02 2.7677552e-02 2.9133996e-02 4.1910853e-03 7.2076003e-03 5.1147587e-03 2.7507325e-03 1.1620134e-03 3.3310681e-04 2.1458858e-04 3.3405406e-02 2.1499526e-02 8.0088547e-03 9.8332257e-04 9.6892205e-03 6.1744631e-03 3.6704596e-03 1.6544301e-02 6.4217725e-03 7.2111274e-04 2.1270559e-04 5.7183304e-03 7.6981750e-03 4.7117366e-03 8.0177817e-04 1.0442534e-02 2.2910923e-03 7.3784227e-02 4.5661689e-02 3.5029236e-02 4.6823533e-02 5.3065363e-02 5.1836541e-02 5.4866908e-02 4.5828107e-02 5.5174565e-02 2.8408213e-02 1.0505539e-02 3.4430852e-02 2.5609623e-02 5.5260802e-02 5.4654205e-02 2.3114484e-02 3.0836583e-02 3.5207839e-02 8.2686924e-02 4.3372344e-02 2.8064998e-02 3.9380344e-02 5.7772002e-02 1.8254235e-02 2.9162191e-02 2.8206341e-02 1.3497345e-02 1.4745279e-02 5.0900941e-02 2.3090436e-02 3.8677830e-02 1.7710943e-02 5.3450122e-02 2.1019262e-02 5.9801906e-02 3.3130692e-02 3.9030212e-02 3.2379101e-02 1.3108535e-02 1.6839016e-02 3.4625964e-02 1.2551681e-02 4.5661689e-02 4.1207031e-02 3.3732757e-02 2.0513590e-02 3.1937443e-02 1.8908607e-02 3.0105345e-02 3.1711397e-02 5.5341249e-04 1.8100865e-03 1.3685556e-02 1.1595059e-03 2.7733190e-03 5.0259130e-03 1.5582273e-02 1.5303863e-02 1.3216313e-02 1.8061219e-03 1.8483286e-03 1.0961959e-02 4.0632184e-04 7.9419645e-03 4.2030049e-03 1.8572156e-03 5.9964081e-03 2.8843938e-03 1.1487297e-02 6.2195738e-03 3.4171878e-03 2.7328194e-02 3.9549773e-03 4.5323537e-02 2.3233798e-02 1.5194289e-02 2.5226902e-02 2.8389486e-02 2.7075250e-02 3.1585337e-02 2.3242139e-02 2.9332670e-02 1.1663012e-02 1.9950589e-03 1.4768251e-02 9.1566367e-03 2.9330780e-02 2.8799195e-02 7.9264454e-03 1.3962831e-02 1.8481959e-02 5.0115855e-02 2.0802698e-02 1.0733745e-02 1.9041166e-02 3.1266982e-02 4.9760517e-03 1.3001310e-02 1.2292231e-02 2.6877167e-03 4.5798906e-03 2.6364594e-02 8.6153969e-03 1.7566530e-02 7.4744318e-03 2.8043210e-02 7.9969256e-03 3.5640074e-02 1.4109863e-02 1.9847472e-02 1.6098385e-02 3.9587699e-03 4.2468630e-03 1.4818700e-02 3.7268555e-03 2.3233798e-02 1.9851942e-02 1.4429507e-02 6.7117241e-03 1.3281147e-02 5.4664867e-03 1.4483311e-02 1.6079322e-02 1.4644608e-03 1.8793440e-02 2.6529941e-03 4.9386280e-03 7.8423223e-03 1.0293845e-02 1.1871124e-02 1.4064748e-02 3.3517168e-03 2.2145112e-03 1.1983758e-02 7.1285200e-04 4.9198989e-03 3.5944894e-03 3.6244119e-03 9.2640212e-03 2.3814700e-03 1.1959906e-02 6.6391160e-03 5.2659669e-03 3.4768199e-02 5.0225808e-03 3.6096256e-02 1.6893661e-02 1.0608435e-02 1.8369401e-02 2.1445998e-02 2.0588370e-02 2.3840862e-02 1.6945577e-02 2.2798129e-02 7.2320779e-03 4.5716975e-04 1.0311574e-02 5.8475513e-03 2.2922553e-02 2.2802937e-02 4.5367353e-03 8.9750660e-03 1.2721749e-02 4.2209187e-02 1.5508457e-02 6.9302761e-03 1.3261817e-02 2.4477020e-02 2.9037640e-03 8.2014592e-03 7.6405998e-03 1.0779902e-03 2.0077167e-03 2.0000571e-02 4.8259503e-03 1.2764273e-02 4.2375783e-03 2.1711543e-02 4.3575270e-03 2.7378197e-02 1.1026559e-02 1.3799894e-02 1.0731586e-02 1.6733633e-03 2.2875292e-03 1.0634216e-02 4.2659077e-03 1.6893661e-02 1.4117780e-02 9.8480141e-03 5.3491991e-03 1.0216132e-02 2.7977678e-03 9.4220767e-03 1.0761419e-02 1.4255275e-02 1.6372127e-03 2.8542706e-03 4.7214122e-03 1.3332443e-02 7.0444510e-03 6.5532524e-03 1.6174955e-03 6.7559932e-03 5.2389698e-03 5.3640347e-04 3.3171183e-03 5.1283913e-04 2.0296126e-03 6.0950963e-03 1.2744293e-04 5.0948120e-03 1.9318373e-03 2.3608728e-03 2.9089432e-02 1.4755776e-03 4.1865124e-02 2.2429622e-02 1.7100637e-02 2.2021240e-02 2.8022948e-02 2.8179896e-02 2.7393595e-02 2.2729266e-02 3.1385076e-02 1.0928177e-02 1.7748140e-03 1.6893502e-02 1.1535806e-02 3.1856326e-02 3.2502251e-02 8.7863970e-03 1.1670124e-02 1.3706425e-02 5.4822004e-02 2.3316700e-02 1.2494202e-02 1.7833364e-02 3.3214276e-02 7.9216177e-03 1.0562159e-02 1.0054198e-02 4.5572702e-03 2.8793276e-03 2.7614983e-02 7.5454514e-03 2.0103411e-02 3.8323098e-03 3.0272091e-02 5.9246169e-03 3.0771609e-02 1.9772265e-02 1.6666117e-02 1.2065056e-02 2.1106407e-03 6.8455106e-03 1.7918371e-02 1.0155544e-02 2.2429622e-02 1.9803941e-02 1.5812785e-02 1.2351590e-02 1.8630292e-02 6.7616623e-03 1.0713593e-02 1.1544341e-02 7.3852619e-03 4.6030790e-03 2.5842708e-03 5.4300453e-02 3.3576641e-02 1.0668428e-02 6.6015056e-03 2.1814225e-02 9.0913366e-03 1.2584515e-02 3.0243821e-02 1.4861127e-02 6.0467884e-03 1.7442631e-03 1.4568415e-02 1.1492675e-02 1.0173171e-02 5.0439803e-03 2.6472861e-03 7.4513739e-03 1.0326079e-01 7.0098941e-02 5.7127711e-02 7.0823391e-02 7.9250920e-02 7.7946311e-02 8.0298570e-02 7.0363352e-02 8.2049228e-02 4.8294128e-02 2.3708406e-02 5.6369153e-02 4.4949110e-02 8.2157728e-02 8.1370723e-02 4.1592461e-02 5.0974970e-02 5.5456655e-02 1.1462128e-01 6.7593968e-02 4.8172633e-02 6.2189094e-02 8.5184220e-02 3.4981615e-02 4.8742735e-02 4.7569879e-02 2.8345798e-02 2.9430788e-02 7.6817222e-02 4.1186102e-02 6.1732592e-02 3.2638549e-02 7.9961084e-02 3.8043139e-02 8.5993831e-02 5.4182844e-02 6.1100206e-02 5.2203816e-02 2.6988218e-02 3.3049806e-02 5.6579852e-02 2.5305632e-02 7.0098941e-02 6.4715083e-02 5.5458622e-02 3.7374533e-02 5.2731313e-02 3.5938214e-02 4.9377578e-02 5.1061578e-02 3.5870177e-04 1.3937749e-03 2.2192133e-02 1.4887667e-02 7.8001469e-03 8.8171759e-05 5.5216302e-03 5.9793748e-03 7.0276376e-04 9.5746848e-03 3.0437686e-03 8.2181914e-05 2.0068727e-03 2.2506604e-03 6.8090409e-03 3.1469229e-03 6.3301284e-04 1.8528100e-02 1.2625125e-03 5.6704255e-02 3.2399710e-02 2.3806821e-02 3.3394775e-02 3.8737297e-02 3.7833623e-02 4.0310184e-02 3.2556548e-02 4.0845247e-02 1.8140183e-02 4.6897646e-03 2.3353931e-02 1.6279158e-02 4.1004664e-02 4.0734447e-02 1.4064354e-02 2.0110135e-02 2.3937572e-02 6.5512797e-02 3.0871191e-02 1.8117892e-02 2.7110445e-02 4.3069544e-02 1.0732705e-02 1.8772743e-02 1.7996705e-02 7.0003565e-03 7.6876035e-03 3.7052349e-02 1.3928781e-02 2.6945955e-02 1.0128877e-02 3.9392715e-02 1.2368176e-02 4.4616010e-02 2.3284644e-02 2.6871325e-02 2.1527670e-02 6.5473767e-03 9.5899926e-03 2.3719212e-02 8.2529644e-03 3.2399710e-02 2.8696517e-02 2.2657048e-02 1.3399527e-02 2.2203050e-02 1.0884330e-02 1.9665180e-02 2.1074599e-02 3.3845415e-04 2.7555720e-02 1.6677843e-02 6.3795274e-03 1.8362843e-04 8.3039101e-03 4.7168779e-03 1.9845708e-03 1.2200549e-02 3.8746792e-03 1.0179352e-04 6.8781671e-04 3.2951534e-03 5.8068152e-03 2.8838183e-03 1.7929502e-04 1.4079350e-02 1.0261263e-03 6.5064614e-02 3.9139337e-02 2.9910766e-02 3.9801101e-02 4.6137717e-02 4.5316574e-02 4.7159535e-02 3.9352955e-02 4.8674767e-02 2.3254889e-02 7.4636086e-03 2.9417103e-02 2.1432655e-02 4.8877998e-02 4.8632496e-02 1.8795647e-02 2.5200016e-02 2.8886320e-02 7.5371106e-02 3.7782169e-02 2.3498806e-02 3.3236011e-02 5.1084684e-02 1.4998262e-02 2.3652548e-02 2.2813769e-02 1.0515841e-02 1.0877794e-02 4.4476659e-02 1.8379623e-02 3.3440220e-02 1.3262540e-02 4.7104689e-02 1.6362923e-02 5.1690239e-02 2.9302173e-02 3.2587374e-02 2.6378456e-02 9.4404077e-03 1.3645256e-02 2.9862026e-02 1.1376615e-02 3.9139337e-02 3.5158865e-02 2.8588131e-02 1.7920264e-02 2.8103943e-02 1.5132216e-02 2.4337714e-02 2.5719532e-02 3.3433714e-02 1.9092201e-02 5.6820829e-03 9.6311300e-04 1.1687721e-02 4.1741711e-03 3.9152052e-03 1.5429746e-02 5.3660274e-03 8.0766059e-04 9.2937667e-05 4.9943675e-03 5.5161799e-03 3.3131521e-03 4.2533013e-04 1.0435178e-02 1.4826896e-03 7.3827823e-02 4.6346021e-02 3.6506307e-02 4.6683572e-02 5.3981470e-02 5.3241058e-02 5.4467293e-02 4.6614647e-02 5.6933591e-02 2.8894083e-02 1.0839802e-02 3.5973376e-02 2.7110780e-02 5.7179243e-02 5.6957917e-02 2.4065468e-02 3.0813759e-02 3.4360438e-02 8.5582303e-02 4.5156230e-02 2.9396418e-02 3.9850269e-02 5.9522160e-02 1.9818286e-02 2.9063291e-02 2.8164144e-02 1.4610346e-02 1.4656069e-02 5.2344759e-02 2.3377306e-02 4.0412481e-02 1.6984268e-02 5.5250411e-02 2.0918953e-02 5.9213325e-02 3.5814189e-02 3.8804285e-02 3.1759139e-02 1.2931018e-02 1.8261938e-02 3.6495356e-02 1.5089913e-02 4.6346021e-02 4.2098942e-02 3.5016667e-02 2.2986158e-02 3.4503485e-02 1.9934983e-02 2.9546649e-02 3.0900707e-02 1.1335824e-02 3.2301226e-02 2.3277392e-02 1.5629192e-02 3.0795664e-02 1.5041716e-02 5.4315810e-03 1.4713782e-02 2.4451968e-02 3.6761202e-02 1.3762814e-02 2.8473446e-02 2.2472023e-02 2.6736705e-02 8.0230549e-02 2.3341323e-02 8.1067950e-03 1.4485807e-03 2.4486060e-03 1.1703837e-03 3.1310445e-03 3.9732842e-03 2.8350922e-03 1.6242765e-03 5.7101906e-03 4.5289508e-04 6.6077306e-03 2.6813364e-03 3.3571693e-03 6.2840633e-03 7.7260784e-03 2.0715363e-03 5.6385417e-05 8.9610646e-04 1.8073537e-02 3.9621048e-03 2.3604444e-03 4.7560860e-04 6.2480089e-03 5.8573026e-03 1.7069942e-04 2.3254450e-04 6.3902073e-03 3.8624324e-03 3.9083302e-03 1.0347438e-03 3.4531305e-03 3.5564063e-03 5.5040957e-03 1.4887365e-03 4.1720167e-03 8.6822743e-03 2.8448886e-04 5.1458480e-04 4.8536300e-03 5.8397245e-03 3.9827712e-03 1.8058791e-02 1.4485807e-03 1.2408841e-03 1.8448387e-03 1.0796845e-02 8.0405443e-03 3.4808504e-03 5.2754145e-04 8.8520211e-04 8.8923137e-03 1.4043268e-02 2.4126814e-02 9.1698320e-03 1.1327807e-02 1.9771649e-03 4.5360079e-03 1.5317882e-02 2.1762150e-02 5.6097757e-03 7.1877025e-03 6.8496039e-03 1.4100359e-02 5.3810596e-02 9.9629280e-03 3.1307769e-02 2.0553715e-02 2.1937453e-02 1.5991855e-02 2.5676129e-02 2.8613140e-02 1.8469501e-02 2.1322428e-02 3.3003652e-02 1.2471005e-02 8.9752587e-03 2.2249180e-02 1.9221338e-02 3.4330234e-02 3.7274155e-02 1.4283960e-02 1.0136722e-02 7.4549090e-03 5.7532344e-02 2.7732649e-02 1.8492242e-02 1.6393364e-02 3.4241314e-02 1.8497141e-02 9.1551303e-03 9.1767161e-03 1.4470512e-02 6.3039218e-03 2.8459911e-02 9.7413243e-03 2.5409718e-02 3.4719577e-03 3.2484401e-02 7.0164005e-03 2.0017467e-02 3.2249701e-02 1.2084843e-02 7.5073944e-03 5.8227226e-03 1.7146634e-02 2.5055287e-02 2.9865216e-02 2.0553715e-02 2.0008085e-02 1.9953689e-02 2.7764157e-02 3.0721044e-02 1.4086953e-02 7.0439235e-03 6.3871700e-03 6.2932024e-03 2.4881477e-02 1.2570656e-04 9.2318283e-03 1.1425023e-02 3.9781027e-03 6.8287051e-03 6.6316807e-03 5.1943433e-03 1.2253382e-04 1.3842125e-03 4.4535183e-03 2.1436401e-02 2.8241776e-03 6.8828353e-02 4.7091014e-02 4.2520568e-02 4.3241334e-02 5.5155458e-02 5.7042053e-02 4.8981701e-02 4.7857882e-02 6.2261278e-02 3.0649818e-02 1.4761608e-02 4.2406580e-02 3.4493935e-02 6.3340671e-02 6.5239814e-02 2.8920518e-02 2.9750953e-02 2.8851937e-02 9.4568410e-02 5.1830706e-02 3.5536784e-02 4.0198717e-02 6.4555077e-02 2.8697450e-02 2.7846993e-02 2.7347316e-02 2.1934395e-02 1.5675611e-02 5.6443004e-02 2.4878970e-02 4.7356921e-02 1.4503825e-02 6.0963940e-02 2.0840976e-02 5.2367097e-02 4.8672170e-02 3.5784762e-02 2.7656172e-02 1.3836981e-02 2.6623769e-02 4.4650658e-02 3.0852069e-02 4.7091014e-02 4.4370840e-02 4.0176310e-02 3.6600204e-02 4.6858384e-02 2.5835018e-02 2.6019603e-02 2.5994222e-02 6.9728196e-03 4.6608676e-03 9.9835054e-04 9.5223148e-03 2.6272609e-03 2.7884021e-05 1.5635547e-03 2.0372846e-03 5.4615556e-03 2.3300808e-03 2.5698617e-04 1.7457304e-02 7.2996870e-04 5.8453388e-02 3.4095678e-02 2.5737672e-02 3.4628989e-02 4.0687367e-02 4.0032201e-02 4.1519056e-02 3.4313166e-02 4.3287054e-02 1.9386832e-02 5.3532424e-03 2.5306754e-02 1.8013458e-02 4.3531307e-02 4.3448361e-02 1.5446633e-02 2.1111383e-02 2.4519102e-02 6.8935827e-02 3.3123525e-02 1.9821803e-02 2.8570801e-02 4.5547846e-02 1.2302601e-02 1.9692146e-02 1.8929129e-02 8.1934144e-03 8.2421848e-03 3.9256624e-02 1.4936345e-02 2.9083818e-02 1.0370393e-02 4.1828239e-02 1.3093356e-02 4.5785106e-02 2.5766235e-02 2.7913838e-02 2.2194996e-02 6.9953757e-03 1.1047300e-02 2.5843766e-02 1.0033677e-02 3.4095678e-02 3.0429687e-02 2.4458901e-02 1.5454278e-02 2.4603652e-02 1.2196519e-02 2.0322394e-02 2.1606954e-02 2.1763592e-02 3.8778874e-03 1.3140898e-02 1.0973585e-02 6.8570288e-03 1.2689294e-02 8.7227464e-03 2.2446768e-02 1.4768924e-02 9.8837253e-03 3.6594036e-02 1.1176832e-02 4.2371132e-02 2.0489598e-02 1.1322791e-02 2.4620431e-02 2.4482623e-02 2.2010230e-02 3.1108475e-02 2.0217050e-02 2.3141445e-02 1.0944826e-02 3.8599335e-03 1.0791452e-02 6.1542383e-03 2.2743482e-02 2.1333545e-02 6.5515291e-03 1.4388718e-02 2.0852825e-02 3.9383251e-02 1.5461941e-02 7.8878514e-03 1.7372104e-02 2.4911352e-02 2.6880230e-03 1.3798611e-02 1.3025794e-02 2.1193973e-03 6.9838956e-03 2.1270749e-02 8.9787629e-03 1.2743052e-02 1.1335890e-02 2.1877049e-02 9.6533919e-03 3.5351983e-02 7.6384883e-03 2.0107777e-02 1.8067074e-02 6.8413076e-03 2.4741061e-03 1.0039063e-02 3.4670280e-04 2.0489598e-02 1.6923143e-02 1.1154236e-02 2.1521967e-03 7.1850256e-03 4.3898059e-03 1.6491142e-02 1.8665860e-02 7.4245740e-03 1.0715228e-02 3.1685550e-03 5.1120383e-03 5.0451240e-03 4.1323270e-03 1.4489604e-04 8.1318290e-04 3.0839979e-03 1.9714350e-02 1.7947024e-03 6.7440625e-02 4.5128820e-02 3.9916292e-02 4.1909560e-02 5.3040678e-02 5.4550431e-02 4.7872699e-02 4.5818357e-02 5.9509133e-02 2.8778013e-02 1.2927960e-02 3.9749997e-02 3.1835574e-02 6.0466685e-02 6.2068316e-02 2.6673332e-02 2.8274822e-02 2.8015780e-02 9.1028220e-02 4.9020613e-02 3.3018114e-02 3.8378554e-02 6.1817956e-02 2.5959804e-02 2.6422336e-02 2.5876047e-02 1.9520884e-02 1.4184191e-02 5.3918305e-02 2.3137863e-02 4.4568283e-02 1.3507580e-02 5.8174666e-02 1.9385371e-02 5.1416653e-02 4.5132833e-02 3.4491897e-02 2.6637939e-02 1.2404699e-02 2.3988175e-02 4.1744067e-02 2.7329966e-02 4.5128820e-02 4.2235431e-02 3.7715182e-02 3.3171215e-02 4.3405180e-02 2.3512609e-02 2.4946638e-02 2.5117607e-02 5.7451096e-03 2.0181202e-03 1.1916495e-03 5.0056293e-03 1.1565469e-03 7.7131020e-03 3.5313465e-03 2.1072743e-03 2.6393551e-02 2.0670282e-03 4.4935473e-02 2.3639481e-02 1.6646699e-02 2.4503230e-02 2.9137719e-02 2.8488385e-02 3.0530237e-02 2.3788614e-02 3.1241308e-02 1.1731452e-02 1.7654569e-03 1.6307697e-02 1.0602623e-02 3.1457386e-02 3.1449229e-02 8.6094127e-03 1.3341698e-02 1.6740022e-02 5.3656632e-02 2.2705161e-02 1.1958802e-02 1.9130319e-02 3.3171817e-02 6.4720106e-03 1.2268562e-02 1.1633623e-02 3.5425115e-03 3.7925712e-03 2.7828397e-02 8.3873180e-03 1.9384549e-02 5.8220699e-03 3.0002594e-02 7.2263237e-03 3.4339797e-02 1.7195852e-02 1.8977924e-02 1.4659749e-02 3.0403500e-03 5.5430802e-03 1.6804744e-02 6.5028482e-03 2.3639481e-02 2.0526700e-02 1.5612876e-02 9.4514606e-03 1.6202837e-02 6.2323724e-03 1.3120120e-02 1.4374340e-02 2.8180986e-03 1.0509318e-02 1.7897176e-02 2.8798358e-03 9.1928702e-03 6.3970688e-03 1.0746267e-02 5.0496414e-02 7.7981540e-03 2.4751970e-02 1.2458649e-02 1.1686269e-02 1.0356316e-02 1.6774912e-02 1.8282513e-02 1.3498932e-02 1.2919728e-02 2.1579044e-02 5.2962835e-03 2.7725666e-03 1.1818152e-02 9.1091800e-03 2.2436834e-02 2.4265521e-02 5.8512682e-03 4.4398598e-03 4.1822455e-03 4.2435194e-02 1.6425553e-02 8.8184300e-03 9.0473617e-03 2.2804959e-02 8.4055012e-03 3.7245140e-03 3.5955501e-03 5.8501155e-03 1.2261597e-03 1.8027694e-02 3.2889908e-03 1.4332649e-02 2.5252408e-04 2.0964539e-02 1.7865385e-03 1.5552416e-02 1.8549588e-02 6.8429145e-03 3.5948443e-03 1.0694978e-03 7.5092190e-03 1.3665818e-02 1.7252830e-02 1.2458649e-02 1.1370103e-02 1.0296741e-02 1.5007456e-02 1.7383467e-02 5.5463863e-03 2.9922345e-03 3.0734531e-03 3.1960387e-03 6.8662451e-03 1.2966654e-04 2.7649067e-03 8.3427366e-04 2.7984284e-03 2.9822521e-02 1.2731780e-03 4.3416537e-02 2.4875338e-02 2.0714079e-02 2.3200351e-02 3.0852531e-02 3.1753284e-02 2.8200300e-02 2.5332686e-02 3.5512031e-02 1.2989773e-02 3.4785945e-03 2.0603808e-02 1.5135483e-02 3.6226615e-02 3.7456354e-02 1.1531484e-02 1.2959687e-02 1.3737739e-02 6.0763799e-02 2.7435536e-02 1.5849420e-02 1.9923216e-02 3.7336849e-02 1.1567919e-02 1.1725372e-02 1.1300051e-02 7.4803077e-03 3.9656480e-03 3.1244978e-02 9.2703065e-03 2.4126524e-02 3.9496417e-03 3.4455719e-02 7.0383738e-03 3.1302098e-02 2.5203523e-02 1.7680030e-02 1.2436482e-02 3.0466203e-03 1.0266971e-02 2.2126952e-02 1.5206492e-02 2.4875338e-02 2.2550174e-02 1.9125072e-02 1.7383181e-02 2.3878119e-02 9.5944653e-03 1.1168635e-02 1.1615557e-02 1.3151667e-03 2.5320521e-03 6.0366399e-03 2.7823080e-03 2.9364031e-04 1.6476106e-02 9.7247557e-04 6.0284770e-02 3.5334723e-02 2.6546579e-02 3.6095524e-02 4.1987790e-02 4.1172424e-02 4.3178382e-02 3.5527420e-02 4.4374359e-02 2.0339225e-02 5.8436205e-03 2.6082735e-02 1.8603485e-02 4.4570054e-02 4.4351810e-02 1.6144347e-02 2.2244142e-02 2.5921932e-02 7.0056396e-02 3.3998717e-02 2.0524111e-02 2.9752453e-02 4.6679420e-02 1.2673386e-02 2.0806267e-02 2.0008665e-02 8.5632030e-03 8.9793060e-03 4.0369526e-02 1.5814708e-02 2.9884891e-02 1.1327921e-02 4.2874486e-02 1.4012365e-02 4.7563178e-02 2.6115270e-02 2.9259206e-02 2.3493076e-02 7.6990791e-03 1.1423166e-02 2.6517929e-02 9.7961479e-03 3.5334723e-02 3.1531604e-02 2.5300172e-02 1.5549538e-02 2.4969819e-02 1.2759840e-02 2.1558690e-02 2.2929217e-02 6.4418336e-03 6.6136731e-03 4.4102106e-03 9.1481615e-04 8.6358065e-03 2.3009102e-03 7.8700864e-02 5.0063075e-02 3.9499858e-02 5.0635713e-02 5.7926279e-02 5.6986449e-02 5.8777354e-02 5.0310505e-02 6.0682610e-02 3.1865387e-02 1.2666075e-02 3.8913195e-02 2.9597893e-02 6.0871779e-02 6.0471723e-02 2.6606279e-02 3.4024273e-02 3.7905442e-02 8.9816272e-02 4.8416482e-02 3.2086335e-02 4.3354864e-02 6.3371751e-02 2.1811352e-02 3.2202223e-02 3.1242353e-02 1.6436605e-02 1.6909958e-02 5.6040246e-02 2.6101775e-02 4.3480802e-02 1.9524603e-02 5.8918015e-02 2.3597831e-02 6.3733101e-02 3.8161980e-02 4.2440703e-02 3.5140814e-02 1.5073633e-02 2.0215594e-02 3.9313992e-02 1.6000311e-02 5.0063075e-02 4.5569203e-02 3.8016354e-02 2.4659247e-02 3.6849168e-02 2.2186423e-02 3.2805330e-02 3.4269626e-02 3.8467105e-03 1.2875815e-03 2.5072619e-03 2.9525973e-02 1.2875815e-03 4.2282453e-02 2.3357390e-02 1.8646096e-02 2.2313442e-02 2.9124686e-02 2.9658732e-02 2.7480923e-02 2.3735668e-02 3.3135090e-02 1.1714292e-02 2.4574323e-03 1.8489812e-02 1.3106245e-02 3.3729170e-02 3.4672500e-02 9.9345371e-03 1.2062833e-02 1.3461486e-02 5.7431716e-02 2.5093254e-02 1.3932087e-02 1.8599958e-02 3.4955869e-02 9.5459229e-03 1.0897419e-02 1.0433767e-02 5.8375168e-03 3.2324242e-03 2.9124686e-02 8.1819777e-03 2.1845036e-02 3.6925062e-03 3.2054971e-02 6.2644754e-03 3.0710474e-02 2.2249402e-02 1.6897068e-02 1.1997033e-02 2.3970471e-03 8.3620725e-03 1.9766550e-02 1.2536577e-02 2.3357390e-02 2.0894448e-02 1.7210222e-02 1.4675128e-02 2.1017625e-02 7.9709601e-03 1.0694253e-02 1.1329979e-02 8.0862109e-04 3.9488260e-03 2.3216413e-02 2.2109950e-03 6.3313725e-02 4.2461361e-02 3.8219621e-02 3.8832114e-02 5.0144792e-02 5.1966249e-02 4.4333772e-02 4.3192281e-02 5.6977170e-02 2.6944997e-02 1.2389873e-02 3.8126276e-02 3.0729621e-02 5.8026007e-02 5.9897200e-02 2.5415278e-02 2.6079828e-02 2.5293603e-02 8.8103550e-02 4.7063061e-02 3.1649890e-02 3.5923563e-02 5.9166899e-02 2.5434205e-02 2.4297609e-02 2.3831192e-02 1.9112973e-02 1.3083158e-02 5.1398726e-02 2.1557413e-02 4.2826183e-02 1.1968109e-02 5.5744441e-02 1.7792619e-02 4.7600638e-02 4.4377594e-02 3.1768113e-02 2.4143930e-02 1.1416637e-02 2.3484839e-02 4.0313562e-02 2.8205265e-02 4.2461361e-02 3.9889484e-02 3.5980112e-02 3.3161057e-02 4.2633793e-02 2.2600989e-02 2.2605649e-02 2.2606045e-02 1.6651142e-03 2.2593118e-02 4.7494657e-04 5.5766903e-02 3.4735344e-02 2.9594132e-02 3.2557240e-02 4.1723024e-02 4.2746173e-02 3.8238608e-02 3.5278801e-02 4.7033204e-02 2.0363061e-02 7.2717611e-03 2.9420951e-02 2.2560431e-02 4.7813714e-02 4.9078515e-02 1.8314288e-02 2.0299319e-02 2.0869007e-02 7.5386883e-02 3.7549246e-02 2.3623644e-02 2.8844458e-02 4.9142563e-02 1.7633849e-02 1.8744071e-02 1.8222205e-02 1.2402012e-02 8.3988430e-03 4.2147698e-02 1.5629140e-02 3.3602383e-02 8.3018288e-03 4.5796447e-02 1.2702505e-02 4.1687026e-02 3.3943329e-02 2.5980350e-02 1.9415550e-02 7.0260502e-03 1.6012157e-02 3.1063717e-02 1.9520820e-02 3.4735344e-02 3.1983672e-02 2.7741891e-02 2.3833980e-02 3.2440091e-02 1.5637054e-02 1.7874227e-02 1.8281297e-02 1.4972117e-02 3.8328039e-04 6.3723448e-02 3.8719440e-02 3.0384566e-02 3.8629777e-02 4.5827446e-02 4.5472032e-02 4.5653244e-02 3.9023449e-02 4.9116846e-02 2.2930601e-02 7.4083466e-03 2.9962579e-02 2.2100137e-02 4.9469036e-02 4.9598054e-02 1.8997460e-02 2.4379661e-02 2.7279745e-02 7.6566902e-02 3.8413283e-02 2.3958684e-02 3.2715136e-02 5.1484761e-02 1.5886249e-02 2.2794916e-02 2.2023153e-02 1.1094162e-02 1.0331639e-02 4.4681292e-02 1.7978253e-02 3.4097516e-02 1.2099271e-02 4.7608429e-02 1.5662138e-02 4.9957613e-02 3.0977854e-02 3.1455087e-02 2.5013254e-02 8.8604244e-03 1.4432007e-02 3.0714741e-02 1.3426163e-02 3.8719440e-02 3.4992868e-02 2.8902863e-02 1.9656006e-02 2.9684992e-02 1.5494562e-02 2.3064694e-02 2.4203450e-02 1.8836956e-02 1.3755831e-01 9.8685385e-02 8.2425470e-02 1.0000651e-01 1.0925759e-01 1.0726669e-01 1.1120208e-01 9.8918033e-02 1.1167083e-01 7.2661493e-02 4.1665349e-02 8.1426911e-02 6.7501542e-02 1.1159988e-01 1.1014878e-01 6.3989575e-02 7.6224987e-02 8.1835804e-02 1.4773346e-01 9.4587815e-02 7.1696153e-02 8.9447560e-02 1.1534202e-01 5.4876111e-02 7.3538690e-02 7.2081812e-02 4.6952332e-02 4.9385723e-02 1.0589901e-01 6.4038977e-02 8.7644093e-02 5.3625558e-02 1.0917520e-01 6.0322002e-02 1.1787190e-01 7.6918821e-02 8.8499122e-02 7.7885622e-02 4.6255911e-02 5.2612368e-02 8.1239445e-02 4.0019052e-02 9.8685385e-02 9.2131722e-02 8.0633770e-02 5.6243805e-02 7.5363077e-02 5.6811044e-02 7.4440381e-02 7.6512320e-02 5.8097767e-02 3.5211195e-02 2.8463781e-02 3.4215950e-02 4.2147698e-02 4.2414656e-02 4.0542228e-02 3.5615822e-02 4.6295613e-02 2.0363061e-02 6.3798220e-03 2.8163995e-02 2.0925233e-02 4.6838114e-02 4.7487902e-02 1.7368541e-02 2.1117782e-02 2.2967145e-02 7.3810390e-02 3.6316652e-02 2.2375879e-02 2.9367287e-02 4.8511603e-02 1.5463833e-02 1.9583828e-02 1.8938570e-02 1.0601468e-02 8.3988430e-03 4.1723024e-02 1.5629140e-02 3.2244606e-02 9.3153170e-03 4.4933305e-02 1.3111918e-02 4.4412271e-02 3.0809608e-02 2.7438827e-02 2.1093197e-02 7.0260502e-03 1.3969417e-02 2.9313944e-02 1.5218585e-02 3.5211195e-02 3.1983672e-02 2.6841235e-02 2.0313092e-02 2.9439905e-02 1.4332974e-02 1.9364178e-02 2.0170126e-02 3.9924060e-03 1.0538198e-02 3.2148883e-03 2.7366019e-03 4.4621109e-03 1.7397347e-03 4.1425525e-03 5.2723146e-03 1.1136640e-02 2.9112354e-02 1.1160954e-02 1.6541883e-02 6.0336652e-03 8.3590571e-03 1.6026960e-02 9.4770245e-03 8.7523451e-03 9.5282841e-03 8.5487892e-03 1.3900105e-02 5.7309995e-03 4.7773544e-03 2.3853571e-02 1.0495078e-02 1.1045936e-02 2.7111420e-02 2.3076623e-02 4.8207470e-03 1.4681091e-02 1.0141190e-02 2.1316125e-02 5.8141126e-03 1.6474359e-02 1.3064397e-03 2.0347529e-02 5.7992324e-03 9.5368199e-03 2.5369333e-02 2.4565418e-02 1.2896533e-02 4.4389699e-02 3.9924060e-03 5.8442541e-03 1.0341765e-02 2.9771308e-02 2.0024906e-02 2.0168043e-02 1.0668382e-02 1.0461554e-02 1.6977050e-03 8.0038314e-04 3.2736649e-04 7.3247771e-04 1.7861084e-03 1.2199790e-05 1.6168506e-03 2.0903986e-03 1.2529506e-02 1.9650001e-03 4.3150624e-03 2.0139182e-03 3.1759995e-03 4.1258242e-03 2.0323215e-03 3.5878910e-03 9.4977361e-03 1.5488203e-03 3.0112961e-03 2.7909675e-04 1.8152664e-03 8.3701176e-03 2.5714996e-03 2.7148441e-03 1.0520423e-02 9.2964166e-03 7.5284963e-04 3.9888853e-03 1.8331853e-03 9.4586856e-03 1.6039412e-03 5.4889906e-03 2.8052973e-03 7.3341015e-03 1.2316222e-03 3.2601592e-03 1.0868316e-02 8.8162037e-03 2.9210959e-03 2.1963225e-02 0.0000000e+00 1.8230646e-04 1.5254865e-03 1.2273284e-02 6.9788794e-03 6.3577637e-03 3.5631537e-03 4.1101326e-03 4.2047124e-03 2.6033919e-03 1.7916004e-03 6.7524244e-03 1.5279872e-03 2.3303877e-03 1.3468412e-03 7.9573793e-03 9.8329441e-06 8.0754860e-04 2.3639655e-03 2.4986002e-03 1.3799583e-03 2.6729899e-03 6.2996248e-03 1.0853281e-02 4.8574547e-04 3.9201723e-04 1.4418237e-03 2.9002781e-03 3.0725233e-03 3.0407572e-03 2.9093497e-03 5.1175009e-03 6.6599859e-03 1.5926877e-03 2.5889139e-03 1.3713626e-04 8.5117654e-03 1.9829707e-03 4.3442759e-03 8.7298845e-03 2.1325966e-03 3.5994846e-03 5.1472858e-03 7.9423211e-03 3.5289513e-03 2.0294631e-04 1.1925423e-02 1.6977050e-03 7.8274456e-04 4.8500499e-05 4.9289073e-03 1.8879551e-03 2.5218165e-03 4.9495642e-03 6.1589498e-03 1.4775011e-03 2.8508338e-03 3.8437964e-04 1.0082748e-03 4.3870644e-03 2.7766403e-03 1.3317076e-02 4.6107743e-03 7.0365754e-03 5.0970174e-03 7.0294656e-03 5.8145812e-03 1.6991258e-03 1.6190058e-03 1.4192459e-02 4.5311136e-03 5.3518685e-03 7.5607221e-04 4.5347742e-03 1.1372343e-02 2.1219078e-03 2.3903563e-03 1.2745790e-02 9.1602079e-03 2.9606388e-03 4.3905218e-03 4.7897027e-03 8.0493172e-03 4.4779450e-03 5.1908739e-03 9.8295198e-04 1.2281083e-02 3.7969538e-04 1.7665186e-03 1.0605818e-02 1.1571194e-02 6.2101277e-03 2.7119271e-02 8.0038314e-04 1.4492761e-03 3.6743113e-03 1.7056692e-02 1.1710246e-02 8.2821554e-03 2.2240766e-03 2.2684005e-03 3.0380682e-04 2.0343287e-03 2.8037036e-04 8.3123377e-04 3.9939935e-03 1.6647447e-02 2.9061444e-03 6.1278767e-03 1.1769582e-03 2.2926522e-03 6.3717175e-03 3.9793414e-03 5.6921961e-03 6.6386310e-03 1.6577942e-03 4.6178179e-03 1.2108195e-03 8.4773320e-04 1.1017765e-02 4.7223063e-03 4.9236953e-03 1.3900771e-02 1.3046714e-02 3.7972280e-04 6.5335900e-03 2.3480655e-03 1.3286542e-02 9.3142474e-04 8.4820747e-03 2.8343361e-03 8.2403259e-03 2.5815546e-03 5.4537681e-03 1.4900562e-02 1.1670902e-02 3.7659184e-03 2.5593433e-02 3.2736649e-04 7.7091107e-04 2.6079554e-03 1.4611634e-02 8.0055625e-03 9.0564615e-03 5.9260895e-03 6.5055071e-03 3.8779943e-03 5.7015696e-04 1.7303569e-04 4.1690936e-03 1.6322670e-02 1.9974930e-03 5.0009825e-03 3.2570724e-04 9.5057293e-04 5.8304604e-03 4.8059418e-03 7.5457291e-03 5.1241997e-03 6.9111123e-04 3.7782470e-03 1.7092344e-03 2.5698914e-04 9.5453465e-03 5.5846827e-03 5.6938512e-03 1.2788699e-02 1.3393372e-02 7.3809633e-06 6.7682476e-03 1.3200598e-03 1.4442150e-02 1.8441195e-04 9.0822620e-03 4.9911804e-03 5.8437761e-03 3.8589879e-03 6.9829557e-03 1.5273413e-02 1.0318402e-02 2.4584659e-03 2.2525564e-02 7.3247771e-04 8.2385517e-04 1.9942911e-03 1.2041849e-02 5.7258386e-03 8.2738022e-03 7.3225251e-03 8.2204997e-03 2.0515682e-03 5.4510656e-03 5.2217407e-03 1.7964531e-02 7.2742632e-03 1.0588415e-02 6.2949332e-03 8.6345788e-03 9.1795241e-03 3.5876856e-03 2.6991302e-03 1.4376485e-02 6.5862079e-03 8.4804528e-03 2.1372463e-03 5.3927480e-03 1.5896874e-02 4.1394106e-03 4.5375167e-03 1.7523452e-02 1.2909742e-02 4.0941134e-03 7.2891356e-03 7.2227860e-03 1.1126344e-02 5.7124492e-03 8.1126160e-03 1.3907276e-04 1.6260954e-02 1.3854449e-03 3.2122771e-03 1.4553993e-02 1.6151019e-02 9.1578470e-03 3.3857588e-02 1.7861084e-03 2.9619943e-03 6.1829720e-03 2.2326762e-02 1.5680260e-02 1.2223630e-02 3.9245535e-03 3.7050303e-03 1.3686449e-03 2.1603250e-03 1.2649428e-02 1.7801502e-03 4.1458992e-03 1.7278919e-03 2.7991042e-03 4.0888611e-03 2.2218452e-03 3.9716339e-03 8.9665631e-03 1.3013529e-03 2.8806398e-03 3.4594790e-04 1.5677728e-03 8.1916354e-03 2.7802616e-03 2.9107124e-03 1.0452698e-02 9.5134829e-03 5.8171764e-04 4.1098444e-03 1.6001879e-03 9.8257537e-03 1.3468918e-03 5.7037585e-03 3.1132824e-03 6.8805675e-03 1.4645264e-03 3.5912866e-03 1.1107885e-02 8.6722521e-03 2.6587395e-03 2.1561220e-02 1.2199790e-05 1.4772036e-04 1.4023821e-03 1.1878859e-02 6.5538274e-03 6.3060622e-03 3.8811059e-03 4.4873121e-03 5.6719205e-03 1.8601099e-02 2.5037679e-03 5.8026549e-03 3.0990295e-05 3.8568498e-04 7.1301713e-03 6.6581271e-03 9.9891912e-03 3.5467136e-03 7.7986655e-04 4.6294369e-03 2.9226786e-03 3.3006174e-05 1.0560037e-02 7.5497742e-03 7.6393818e-03 1.4326258e-02 1.5841028e-02 1.7171643e-04 8.5994033e-03 1.5671556e-03 1.7334506e-02 1.9316046e-05 1.1306536e-02 6.6195881e-03 5.5794173e-03 5.6606012e-03 9.3044548e-03 1.7865075e-02 1.1490070e-02 2.7178516e-03 2.3185889e-02 1.6168506e-03 1.6521970e-03 2.7157547e-03 1.2355858e-02 5.5768992e-03 9.6617637e-03 9.6558503e-03 1.0729574e-02 4.4050618e-03 1.4462579e-03 1.4084514e-03 6.0437228e-03 6.9280724e-03 5.9092958e-04 3.5267150e-04 2.3264461e-03 1.8099026e-02 3.0921484e-03 8.6005594e-04 9.1287231e-04 6.3843974e-03 3.0627943e-03 4.0297343e-04 3.2526954e-04 3.6399798e-03 2.6478661e-03 3.9939935e-03 3.1762278e-04 2.2988741e-03 3.2596799e-03 5.3016678e-03 9.7044163e-04 7.0435695e-03 5.7009823e-03 1.4263218e-03 1.5142545e-03 3.5226184e-03 3.0792135e-03 2.3251410e-03 1.2857609e-02 2.0903986e-03 1.3292907e-03 8.8570158e-04 6.8622586e-03 5.1349839e-03 1.4775225e-03 1.2815928e-03 1.9995407e-03 7.7887386e-03 4.2718762e-03 1.8880208e-02 1.9223691e-02 2.7240761e-03 5.5247078e-03 8.3688264e-03 3.7206770e-02 1.2358468e-02 4.8889738e-03 9.3012979e-03 2.0063076e-02 2.3808756e-03 4.8847228e-03 4.4654367e-03 8.0211221e-04 5.5572260e-04 1.5849918e-02 2.4737465e-03 1.0011166e-02 2.0320189e-03 1.7697241e-02 2.0106232e-03 2.1009310e-02 1.0002251e-02 9.3994441e-03 6.7786672e-03 4.3329436e-04 1.8154672e-03 8.4395042e-03 6.2214471e-03 1.2529506e-02 1.0325253e-02 7.1412079e-03 5.6981926e-03 9.1712120e-03 1.6108637e-03 5.7438691e-03 6.7900644e-03 6.8888062e-04 2.5080626e-03 2.5613963e-03 1.3274318e-03 2.8814590e-03 6.6724246e-03 1.1044919e-02 5.3338455e-04 3.3999007e-04 1.6762239e-03 3.1000026e-03 2.8256325e-03 3.2387801e-03 3.0867064e-03 4.8730365e-03 6.6378483e-03 1.7822362e-03 2.6462661e-03 1.4123935e-04 8.6230682e-03 2.1318487e-03 4.4281251e-03 9.3233552e-03 1.8673327e-03 3.9343847e-03 5.4568897e-03 7.9015195e-03 3.2859077e-03 1.3180217e-04 1.1320265e-02 1.9650001e-03 9.6799653e-04 7.8282421e-05 4.5166083e-03 1.6331972e-03 2.3904078e-03 5.2224500e-03 6.4841213e-03 5.7642096e-03 5.5885978e-03 3.7048759e-04 3.1695738e-03 7.3434699e-03 1.6768125e-02 2.3743894e-03 1.1932462e-04 3.2200650e-03 6.6976868e-03 7.3134819e-04 3.2564607e-03 2.9664521e-03 1.9437509e-03 4.0525881e-03 4.6657625e-03 1.7256268e-03 1.3667365e-03 6.4082141e-03 5.2167922e-03 3.0823107e-03 1.3133486e-02 1.7068449e-03 5.3434665e-03 5.8085657e-03 4.9261084e-03 9.6684380e-04 7.1816432e-04 6.8418503e-03 4.3150624e-03 2.7358698e-03 7.4246714e-04 2.1999970e-03 1.3715192e-03 6.3897855e-04 5.2531215e-03 6.6567087e-03 1.9959041e-04 7.2877827e-03 7.2348769e-03 1.0891220e-02 3.2459725e-03 7.4122702e-04 4.6749634e-03 3.3669230e-03 7.9583955e-05 1.0437521e-02 8.1436642e-03 8.2061472e-03 1.4337740e-02 1.6319841e-02 3.0462366e-04 9.0085125e-03 1.5214225e-03 1.8068443e-02 2.5934558e-05 1.1842923e-02 7.5545176e-03 5.1162105e-03 6.3606028e-03 1.0107865e-02 1.8360102e-02 1.1411116e-02 2.5943653e-03 2.2590562e-02 2.0139182e-03 1.9469619e-03 2.8165944e-03 1.1880076e-02 5.1539049e-03 9.7568917e-03 1.0422756e-02 1.1596876e-02 7.5663186e-03 8.6488322e-03 1.3133604e-02 3.0107568e-03 7.9406152e-04 4.7466095e-03 4.5711602e-03 4.9936224e-04 9.9296431e-03 9.5749791e-03 9.5587917e-03 1.4081306e-02 1.7234890e-02 8.7040364e-04 9.9040316e-03 1.5012448e-03 1.9639569e-02 3.0179394e-04 1.3010570e-02 1.0161281e-02 3.9519870e-03 8.1767554e-03 1.2080688e-02 1.9279439e-02 1.0989202e-02 2.3266303e-03 2.0713423e-02 3.1759995e-03 2.8067172e-03 3.1032715e-03 1.0490119e-02 4.0786420e-03 9.8110939e-03 1.2277454e-02 1.3709414e-02 1.7582839e-03 4.9489607e-03 1.9934064e-02 3.5016865e-03 3.4697267e-04 2.6310541e-03 8.0617166e-03 9.6411709e-04 1.7064375e-03 1.4666650e-03 1.4745772e-03 2.1108592e-03 5.5260764e-03 5.1802495e-04 2.3341578e-03 3.7167048e-03 6.5626516e-03 1.3180061e-03 1.1565092e-02 3.6658759e-03 3.8514458e-03 3.6514318e-03 2.8125792e-03 9.8545506e-04 1.7565517e-03 7.9513446e-03 4.1258242e-03 2.7177731e-03 1.0483123e-03 3.5558284e-03 3.1649242e-03 2.4110016e-04 3.1231514e-03 4.2303212e-03 8.7046320e-04 1.9803680e-02 4.5057787e-03 2.2875453e-03 8.1473256e-04 7.2784764e-03 5.2841523e-03 3.1936914e-05 6.0579771e-05 5.5427829e-03 2.9996038e-03 4.7111715e-03 6.7018226e-04 3.8206460e-03 2.7379425e-03 6.3961850e-03 9.7000606e-04 5.0455840e-03 8.7517746e-03 5.2181192e-04 4.2086901e-04 3.8740476e-03 5.1909001e-03 4.1846614e-03 1.6912712e-02 2.0323215e-03 1.6814934e-03 2.0140995e-03 1.0246238e-02 8.0709165e-03 2.9533168e-03 3.5001524e-04 7.2402168e-04 2.4525121e-02 8.3786268e-03 5.9730003e-03 2.2241688e-03 1.0490949e-02 1.0052150e-02 8.4481649e-04 1.0306755e-03 9.7104895e-03 4.7051149e-03 7.5682486e-03 2.5521754e-03 7.8053009e-03 3.0734717e-03 9.8801938e-03 2.2384251e-03 3.6192976e-03 1.5052125e-02 6.1736712e-04 1.0280118e-04 5.5375454e-03 9.7589612e-03 8.6474768e-03 2.4375316e-02 3.5878910e-03 3.8390770e-03 5.3089764e-03 1.6859170e-02 1.4176552e-02 6.5576789e-03 2.5984028e-04 9.3320862e-05 6.7547274e-03 1.5238284e-02 1.2713534e-02 3.0890016e-03 2.3712664e-02 2.1340343e-02 2.1521507e-02 3.0005294e-02 3.3960619e-02 5.2290080e-03 2.3006673e-02 8.7125215e-03 3.6416136e-02 3.8281789e-03 2.7389250e-02 1.5157530e-02 1.2639929e-02 1.7411617e-02 2.3820503e-02 3.6850697e-02 2.5373970e-02 1.0602298e-02 3.7784525e-02 9.4977361e-03 1.0004799e-02 1.1952848e-02 2.3559319e-02 1.3109247e-02 2.3603108e-02 2.4562463e-02 2.6017612e-02 1.7241949e-03 2.0286689e-03 1.1335916e-03 5.6322483e-03 5.1221157e-03 5.0559971e-03 8.5835151e-03 1.0650923e-02 5.5566853e-04 5.1134826e-03 1.3892193e-04 1.2640034e-02 5.6438567e-04 7.4313522e-03 8.3161300e-03 2.5369950e-03 4.7342848e-03 7.3166190e-03 1.2276381e-02 6.3456614e-03 5.8916640e-04 1.5594819e-02 1.5488203e-03 9.3082136e-04 7.5849611e-04 7.0579696e-03 2.4440449e-03 5.1791902e-03 7.3166180e-03 8.5826500e-03 2.1075364e-03 5.4157920e-03 1.3748154e-03 2.4371345e-03 2.2201300e-03 2.6881175e-03 4.1574302e-03 3.5021271e-03 1.4032793e-03 9.1188034e-04 6.1085103e-03 4.1365224e-03 2.7438304e-03 1.0770631e-02 2.1519140e-03 3.9794011e-03 4.6480198e-03 5.1312578e-03 1.6288841e-03 5.4679156e-04 8.7532006e-03 3.0112961e-03 1.7262374e-03 2.9055719e-04 3.3158230e-03 1.8013008e-03 9.2887256e-04 4.2320076e-03 5.4776329e-03 3.2889511e-03 6.5019207e-03 1.1654311e-03 1.2550970e-03 7.9744383e-03 6.4095512e-03 1.6783080e-03 2.2182877e-03 1.9380758e-03 6.5001122e-03 2.8053456e-03 3.3035106e-03 3.3582234e-03 7.0744470e-03 5.6581852e-04 1.8141689e-03 7.7202001e-03 6.7565678e-03 2.7207024e-03 1.9179880e-02 2.7909675e-04 2.2008347e-04 1.1064738e-03 1.0688407e-02 6.6082062e-03 4.4435997e-03 1.9606397e-03 2.4773484e-03 1.1763001e-02 8.2259536e-03 8.3480149e-03 1.5692475e-02 1.7083720e-02 2.8355914e-04 9.4960515e-03 2.0536401e-03 1.8469924e-02 1.0092056e-04 1.2279622e-02 6.4441301e-03 6.4046448e-03 6.0274458e-03 9.8857207e-03 1.9188640e-02 1.2736270e-02 3.3492132e-03 2.4941935e-02 1.8152664e-03 1.9865894e-03 3.3071796e-03 1.3641872e-02 6.4197769e-03 1.0769048e-02 1.0309787e-02 1.1341762e-02 5.1119032e-03 4.6662587e-03 4.6505160e-04 3.2269116e-03 9.0774431e-03 2.5215213e-03 4.0057628e-03 6.1199030e-03 9.7447895e-03 3.5360157e-03 1.8995080e-02 2.6805580e-03 8.6582433e-03 8.1342278e-03 3.7198152e-03 3.9443681e-05 2.6670326e-03 3.4016785e-03 8.3701176e-03 6.1426721e-03 2.9189037e-03 1.0083175e-03 2.2713681e-03 3.9595770e-04 7.2341855e-03 8.8447009e-03 1.1230173e-05 5.1389929e-03 2.4707449e-03 5.4731760e-03 5.1980845e-04 4.3112681e-03 2.1787917e-03 7.2534046e-03 6.7672528e-04 5.6623211e-03 9.1277214e-03 7.3619628e-04 3.7016392e-04 3.2564607e-03 4.9531882e-03 4.5742792e-03 1.6438776e-02 2.5714996e-03 2.1421258e-03 2.3256067e-03 1.0175775e-02 8.4088183e-03 2.7613050e-03 2.4755840e-04 6.0873636e-04 4.6703189e-03 2.2089026e-03 5.5642336e-03 3.7824549e-04 4.1878969e-03 2.0462491e-03 7.3148116e-03 5.4583143e-04 6.1390745e-03 8.7247882e-03 9.1073145e-04 4.9034572e-04 2.9664521e-03 4.5041455e-03 4.3650355e-03 1.5605807e-02 2.7148441e-03 2.1951828e-03 2.2077213e-03 9.5777719e-03 8.0120192e-03 2.4278861e-03 3.2552080e-04 7.3734328e-04 1.7636357e-03 1.2290813e-02 2.3919392e-03 6.5534766e-03 4.2355076e-03 1.3428854e-02 2.7760250e-03 2.0719142e-02 5.3736931e-03 9.3604766e-03 7.8350168e-03 1.9437509e-03 2.3451546e-04 4.9805974e-03 3.4615227e-03 1.0520423e-02 8.1642247e-03 4.6947450e-03 2.2490758e-03 4.7893620e-03 5.2359029e-04 6.8110246e-03 8.2524171e-03 1.3046714e-02 1.1318623e-03 8.6855001e-03 5.4130623e-04 1.5131426e-02 5.6179632e-04 1.5407719e-02 1.0669118e-02 5.8683852e-03 3.5797271e-03 6.2414181e-05 2.6714308e-03 7.7088950e-03 9.8081718e-03 9.2964166e-03 7.7221233e-03 5.7330158e-03 7.6711675e-03 9.7842038e-03 1.6419007e-03 2.8364539e-03 3.5173281e-03 6.5335900e-03 1.1340078e-03 1.4192754e-02 1.5964523e-04 8.8481511e-03 5.2711173e-03 5.4383051e-03 3.8856537e-03 6.9538378e-03 1.4900562e-02 9.8443835e-03 2.2013577e-03 2.1746585e-02 7.5284963e-04 7.7091107e-04 1.8026328e-03 1.1463429e-02 5.3229428e-03 7.8904169e-03 7.2583306e-03 8.1944055e-03 3.9135288e-03 1.7196090e-03 8.1113282e-03 2.2942459e-04 9.3463740e-03 6.8261058e-03 2.3665724e-03 1.6309596e-03 1.7256268e-03 2.3397606e-03 3.5922701e-03 1.1208568e-02 3.9888853e-03 2.9455145e-03 1.9660659e-03 6.5467152e-03 6.1492489e-03 9.3538874e-04 1.2212079e-03 1.9267388e-03 1.0793365e-02 1.2592720e-03 6.0147390e-03 9.1609131e-03 1.8537817e-03 4.5335508e-03 6.6172179e-03 1.0131803e-02 4.6089934e-03 1.7595473e-04 1.2987364e-02 1.8331853e-03 9.7087402e-04 3.3609260e-04 5.4018686e-03 1.7068395e-03 3.6646912e-03 6.4787894e-03 7.7901480e-03 1.6754655e-02 6.9641544e-04 1.3182224e-02 1.4651385e-02 4.9466239e-03 2.3931163e-03 5.9263439e-04 5.4211666e-03 1.0221921e-02 1.4928620e-02 9.4586856e-03 8.3819492e-03 7.3339215e-03 1.2011676e-02 1.3620169e-02 3.6119007e-03 1.8435886e-03 2.0979826e-03 1.0794235e-02 6.9811950e-03 4.9473584e-03 5.5993500e-03 9.1192858e-03 1.7103671e-02 1.0654852e-02 2.2936354e-03 2.1887320e-02 1.6039412e-03 1.5236521e-03 2.3684718e-03 1.1405606e-02 4.9416620e-03 8.9601309e-03 9.4128890e-03 1.0537337e-02 1.0144176e-02 9.3650956e-03 2.8049928e-03 1.4261619e-03 9.6795720e-04 3.1673467e-03 5.6308712e-03 1.2399657e-02 5.4889906e-03 4.4629844e-03 3.5148873e-03 8.2921709e-03 8.5570702e-03 1.5767832e-03 9.7941910e-04 1.5044401e-03 1.9162648e-02 2.3258011e-03 4.3611697e-03 1.7154674e-02 1.9266334e-02 1.1399040e-02 3.8294804e-02 2.8052973e-03 4.2987486e-03 8.1194560e-03 2.5933333e-02 1.8564970e-02 1.4940871e-02 5.2230072e-03 4.8296564e-03 1.1136786e-02 1.2996602e-02 1.1896568e-02 3.3692870e-03 1.0295085e-03 6.7928621e-03 7.3341015e-03 5.3674586e-03 2.6380919e-03 1.7126363e-03 1.9252416e-05 3.9628066e-03 1.2371798e-02 1.4417093e-02 5.5916770e-04 7.0172680e-03 8.6736288e-03 5.5025710e-03 2.2796596e-02 1.2316222e-03 1.4793666e-03 2.9451077e-03 1.4303806e-02 1.0475492e-02 5.7470026e-03 7.8882742e-04 9.0526661e-04 4.3645694e-03 7.8632956e-03 7.2260963e-03 2.1331409e-02 3.2601592e-03 3.2514734e-03 4.2300937e-03 1.4385386e-02 1.2160577e-02 5.0215597e-03 4.3279739e-05 5.1818418e-05 3.0767343e-03 9.0015857e-03 9.8042907e-03 1.0868316e-02 9.1698320e-03 6.9442206e-03 8.2610449e-03 1.0963075e-02 2.1355881e-03 3.5431999e-03 4.2180590e-03 3.2143555e-03 3.3819870e-03 8.8162037e-03 6.5643841e-03 3.2940615e-03 1.2995092e-03 2.9075684e-03 3.1897532e-04 6.9331287e-03 8.4863836e-03 1.0164058e-02 2.9210959e-03 1.7078622e-03 4.0814516e-04 3.6297378e-03 8.7693352e-04 2.6826046e-03 6.9132822e-03 8.3825591e-03 2.1963225e-02 1.8183756e-02 1.2010413e-02 1.7351817e-03 6.4958425e-03 5.7545512e-03 1.9704198e-02 2.2181841e-02 1.8230646e-04 1.5254865e-03 1.2273284e-02 6.9788794e-03 6.3577637e-03 3.5631537e-03 4.1101326e-03 6.5324999e-04 9.4729463e-03 5.0293123e-03 4.5622977e-03 3.3814637e-03 4.1233865e-03 5.1615257e-03 2.3326030e-03 2.1577255e-03 4.0325048e-03 5.1426622e-03 1.5351757e-03 2.6422594e-03 1.3282600e-02 1.5461999e-02 3.4307479e-03 1.1531055e-02 1.3518551e-02 4.2918744e-03 5.5398788e-03 8.4122900e-05 diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-correlation-ml.txt b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-correlation-ml.txt new file mode 100644 index 0000000..2a17a2a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-correlation-ml.txt @@ -0,0 +1 @@ + 9.2507465e-01 9.6528566e-01 8.7255441e-01 1.1287379e+00 8.7318727e-01 1.0767102e+00 9.1419676e-01 1.1503304e+00 9.8074509e-01 1.0135025e+00 1.0495025e+00 9.4794536e-01 9.6829273e-01 1.1345767e+00 1.1048008e+00 9.2407796e-01 1.0228634e+00 9.3853195e-01 9.9377619e-01 1.0407662e+00 9.5048989e-01 9.0465688e-01 9.8056930e-01 8.9777156e-01 9.6357127e-01 9.3864452e-01 9.9754613e-01 9.7271356e-01 8.4383151e-01 9.6981983e-01 9.7510267e-01 1.0112663e+00 7.8730400e-01 1.0299498e+00 9.9307979e-01 9.0239520e-01 8.5428231e-01 8.8972742e-01 8.5933162e-01 9.6625934e-01 9.4175449e-01 9.9120729e-01 1.0503963e+00 8.8223053e-01 1.3261434e+00 1.1063209e+00 8.4058398e-01 1.0844267e+00 1.1153093e+00 1.0092643e+00 8.9585237e-01 1.0599818e+00 1.2321707e+00 1.1359624e+00 8.3503556e-01 1.1792243e+00 7.9159781e-01 1.0830419e+00 1.2181870e+00 9.9888500e-01 1.0227144e+00 6.8557277e-01 9.6836193e-01 1.1061227e+00 1.0883453e+00 9.5681974e-01 9.9436299e-01 1.0304323e+00 1.1273949e+00 1.0735563e+00 1.0582583e+00 9.6040272e-01 1.0032137e+00 8.4900547e-01 1.1035351e+00 8.7867480e-01 9.6433176e-01 9.1850122e-01 8.9337435e-01 1.0449390e+00 8.9639384e-01 9.6704971e-01 1.0084258e+00 1.0528587e+00 1.1764481e+00 1.0913280e+00 1.0136672e+00 1.2737156e+00 9.5130359e-01 1.0367909e+00 1.1983402e+00 1.1319901e+00 1.1117462e+00 1.0343695e+00 1.0838628e+00 7.5266057e-01 1.0763316e+00 8.8067924e-01 9.6734383e-01 9.8800551e-01 1.2265742e+00 7.8833055e-01 1.0338670e+00 8.6666625e-01 9.9039950e-01 9.7142684e-01 9.3138616e-01 8.5849977e-01 8.5486301e-01 1.0516028e+00 1.1105313e+00 9.5943505e-01 9.8845171e-01 1.0566288e+00 9.9712198e-01 9.5545756e-01 1.1817974e+00 9.9128482e-01 1.0117892e+00 1.0979115e+00 1.0493943e+00 9.1318848e-01 9.3157311e-01 8.7073304e-01 1.2459441e+00 9.3412689e-01 1.0482297e+00 9.4224032e-01 9.5134153e-01 9.0857493e-01 9.7264161e-01 8.2900820e-01 9.3140549e-01 1.1330242e+00 1.0333002e+00 1.0117861e+00 1.2053255e+00 8.5291396e-01 1.0148928e+00 8.6641379e-01 9.7080819e-01 9.5457159e-01 9.5207457e-01 9.3539674e-01 9.0769069e-01 9.5322590e-01 1.1181803e+00 9.9765614e-01 7.5370610e-01 1.0807114e+00 1.0804601e+00 9.0214124e-01 8.7101998e-01 1.0167435e+00 1.2045936e+00 8.7300539e-01 1.1054300e+00 7.9145574e-01 1.0279340e+00 8.7623462e-01 1.0034756e+00 1.0386933e+00 9.3910970e-01 1.0028455e+00 9.9868824e-01 9.8752945e-01 9.8319327e-01 1.3110209e+00 8.6180633e-01 1.0993856e+00 8.5912563e-01 1.1303979e+00 9.8690459e-01 9.6910090e-01 9.1456819e-01 1.1525339e+00 1.1064552e+00 1.1062255e+00 9.7226683e-01 1.1091447e+00 1.1072238e+00 9.6544444e-01 9.6681036e-01 9.3247685e-01 9.6854634e-01 1.1035119e+00 1.1317148e+00 9.5557793e-01 9.8908485e-01 7.4873648e-01 diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-cosine-ml-iris.txt b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-cosine-ml-iris.txt new file mode 100644 index 0000000..8b705b3 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-cosine-ml-iris.txt @@ -0,0 +1 @@ + 1.4208365e-03 1.2652718e-05 8.9939315e-04 2.4232332e-04 9.9747033e-04 9.2045721e-04 2.2040648e-04 8.6480051e-04 1.2354911e-03 5.3650090e-06 1.0275886e-03 1.1695784e-03 2.3556571e-04 1.4590172e-03 1.8981327e-03 1.0939621e-03 1.2392314e-04 3.5850877e-04 8.6078038e-04 1.4490833e-03 8.4059347e-04 3.2873982e-03 2.7359832e-03 4.1316044e-03 2.7719149e-03 1.1814143e-03 1.1431285e-04 2.3850299e-04 1.3446247e-03 1.6406549e-03 1.2070654e-03 2.2241257e-03 1.4969348e-03 1.2354911e-03 7.6154552e-04 9.0853884e-04 1.2354911e-03 1.5825612e-04 2.3716586e-04 2.5806020e-04 8.5870759e-03 4.3447170e-04 2.6103416e-03 3.4026094e-03 1.2625429e-03 1.0000714e-03 2.7088099e-04 4.6161202e-05 1.7993015e-04 7.1619641e-02 7.4013940e-02 8.2336355e-02 9.3599031e-02 8.6542298e-02 9.2667602e-02 8.0934616e-02 6.7002415e-02 7.9695318e-02 8.3991107e-02 8.8330128e-02 7.6449243e-02 8.6123390e-02 9.1414445e-02 5.9767596e-02 6.8589764e-02 9.2363748e-02 7.5261304e-02 1.0768528e-01 7.8250149e-02 9.7383870e-02 6.9410330e-02 1.0895936e-01 9.1644587e-02 7.2677910e-02 7.2208930e-02 8.7635618e-02 9.3586395e-02 8.7700193e-02 5.8825053e-02 7.9271072e-02 7.4136423e-02 7.0977606e-02 1.1670751e-01 9.6691498e-02 7.7157266e-02 7.8793137e-02 9.6187418e-02 7.4355610e-02 8.6677009e-02 9.7286808e-02 8.5214421e-02 7.7419803e-02 6.8888638e-02 8.6192502e-02 7.4757686e-02 7.8851331e-02 7.5042247e-02 5.2484298e-02 7.8023694e-02 1.3991867e-01 1.2655756e-01 1.2099780e-01 1.2515784e-01 1.3134370e-01 1.3306336e-01 1.2911903e-01 1.2854613e-01 1.3655327e-01 1.1601604e-01 9.9632498e-02 1.2063863e-01 1.1404742e-01 1.3409335e-01 1.3451976e-01 1.1368563e-01 1.1469397e-01 1.1505768e-01 1.5479411e-01 1.2906390e-01 1.1634186e-01 1.2299625e-01 1.3892070e-01 1.0732534e-01 1.1401190e-01 1.1254699e-01 1.0266168e-01 1.0210743e-01 1.3111378e-01 1.0950615e-01 1.2501276e-01 1.0108759e-01 1.3297245e-01 1.0624129e-01 1.3360037e-01 1.2002867e-01 1.2233784e-01 1.1387071e-01 1.0061412e-01 1.0649150e-01 1.2174429e-01 1.0147290e-01 1.2655756e-01 1.2438709e-01 1.2138109e-01 1.1044406e-01 1.1910000e-01 1.0821359e-01 1.1609070e-01 1.1329724e-01 1.2085473e-03 1.2060695e-03 2.7592041e-03 3.0736184e-03 3.7201033e-03 1.0861043e-03 7.3910902e-04 3.4790667e-04 1.3491546e-03 2.4493052e-03 1.8482587e-04 2.3308566e-03 3.8997403e-03 6.3069928e-03 4.1362617e-03 1.5079538e-03 7.4890015e-04 4.0049414e-03 3.0763412e-04 3.2877725e-03 8.6909088e-03 1.8863199e-03 4.7592122e-03 4.5180751e-04 1.7148301e-03 8.8703626e-04 5.7128783e-04 1.7151033e-03 8.4814176e-04 4.7551630e-04 6.9313334e-03 5.8126778e-03 3.4790667e-04 9.7078221e-04 1.0390338e-03 3.4790667e-04 1.1371495e-03 7.0598263e-04 2.3100870e-03 3.1332241e-03 2.9870115e-03 3.7693564e-03 5.5008337e-03 2.0081767e-04 3.9261497e-03 1.6237803e-03 1.7731168e-03 5.9153033e-04 5.9997244e-02 6.3706418e-02 7.0131342e-02 8.0131815e-02 7.3670020e-02 8.1412444e-02 7.1132932e-02 5.6572408e-02 6.7223691e-02 7.3993918e-02 7.4363256e-02 6.6371013e-02 7.1106157e-02 7.9730716e-02 5.0610503e-02 5.7285563e-02 8.2536028e-02 6.3695818e-02 9.1877918e-02 6.6044079e-02 8.7700525e-02 5.7975072e-02 9.4407127e-02 7.9385033e-02 6.0900938e-02 6.0521931e-02 7.4070557e-02 8.1073873e-02 7.6438218e-02 4.7634460e-02 6.6728846e-02 6.1732271e-02 5.9656897e-02 1.0363139e-01 8.7312695e-02 6.8806126e-02 6.7142432e-02 8.0911573e-02 6.5091322e-02 7.4541034e-02 8.5313436e-02 7.4229332e-02 6.5328348e-02 5.7461491e-02 7.4891760e-02 6.5136264e-02 6.8598864e-02 6.3641018e-02 4.2790811e-02 6.7276779e-02 1.2872765e-01 1.1385917e-01 1.0708423e-01 1.1221780e-01 1.1844388e-01 1.1798239e-01 1.1767648e-01 1.1356773e-01 1.2073038e-01 1.0467824e-01 8.8441784e-02 1.0671832e-01 1.0091826e-01 1.2051300e-01 1.2244533e-01 1.0247664e-01 1.0203920e-01 1.0334656e-01 1.3764340e-01 1.1314999e-01 1.0390175e-01 1.1148602e-01 1.2274267e-01 9.3929112e-02 1.0239198e-01 9.9372667e-02 9.0109024e-02 9.0770318e-02 1.1749345e-01 9.5509620e-02 1.0956056e-01 8.9331297e-02 1.1936188e-01 9.3207628e-02 1.1935153e-01 1.0516553e-01 1.1204585e-01 1.0191688e-01 8.9582588e-02 9.3806716e-02 1.0922100e-01 8.9087100e-02 1.1385917e-01 1.1193127e-01 1.0978099e-01 9.7766696e-02 1.0448839e-01 9.5849546e-02 1.0619992e-01 1.0212555e-01 7.8301662e-04 3.3186074e-04 9.6097551e-04 9.6384587e-04 1.7160230e-04 7.1714495e-04 1.0915291e-03 1.4406904e-05 9.9431295e-04 1.0280837e-03 3.4520010e-04 1.6070142e-03 2.0814960e-03 1.1810349e-03 9.3270090e-05 2.4892291e-04 9.5000112e-04 1.2447556e-03 8.3736374e-04 3.6303226e-03 2.4141846e-03 3.9965261e-03 2.4688022e-03 1.0115165e-03 6.9871786e-05 1.7487334e-04 1.2251185e-03 1.4398826e-03 9.8199498e-04 2.5137187e-03 1.7466742e-03 1.0915291e-03 7.0690363e-04 8.5846505e-04 1.0915291e-03 1.0992291e-04 1.6427013e-04 2.8562896e-04 8.0123750e-03 5.0490687e-04 2.4076078e-03 3.3222239e-03 1.0270492e-03 1.0987887e-03 2.4862356e-04 7.8815959e-05 1.1120052e-04 7.0071463e-02 7.2494258e-02 8.0694698e-02 9.1816479e-02 8.4823937e-02 9.1055284e-02 7.9406161e-02 6.5540015e-02 7.8075821e-02 8.2418924e-02 8.6586217e-02 7.4908999e-02 8.4375857e-02 8.9771433e-02 5.8365951e-02 6.7055640e-02 9.0792516e-02 7.3755504e-02 1.0570869e-01 7.6652799e-02 9.5758989e-02 6.7858347e-02 1.0707149e-01 9.0015148e-02 7.1111432e-02 7.0634591e-02 8.5909852e-02 9.1841705e-02 8.6060650e-02 5.7382885e-02 7.7642663e-02 7.2560884e-02 6.9439824e-02 1.1486601e-01 9.5132094e-02 7.5722276e-02 7.7186494e-02 9.4329550e-02 7.2913445e-02 8.4999890e-02 9.5631654e-02 8.3632299e-02 7.5814411e-02 6.7360493e-02 8.4581854e-02 7.3324210e-02 7.7335911e-02 7.3484711e-02 5.1093482e-02 7.6474851e-02 1.3800148e-01 1.2463801e-01 1.1904450e-01 1.2328593e-01 1.2938789e-01 1.3104169e-01 1.2726294e-01 1.2658511e-01 1.3448678e-01 1.1418055e-01 9.7888383e-02 1.1868360e-01 1.1213978e-01 1.3206545e-01 1.3251384e-01 1.1184454e-01 1.1286955e-01 1.1328841e-01 1.5256500e-01 1.2703121e-01 1.1444439e-01 1.2112577e-01 1.3684054e-01 1.0544428e-01 1.1220824e-01 1.1073079e-01 1.0084086e-01 1.0036834e-01 1.2912019e-01 1.0768201e-01 1.2300696e-01 9.9385216e-02 1.3095409e-01 1.0446385e-01 1.3171213e-01 1.1800444e-01 1.2052688e-01 1.1209190e-01 9.8892088e-02 1.0463359e-01 1.1979721e-01 9.9600101e-02 1.2463801e-01 1.2247195e-01 1.1948197e-01 1.0852184e-01 1.1709036e-01 1.0637133e-01 1.1433097e-01 1.1154058e-01 1.2829581e-03 8.6520525e-04 1.3042912e-03 2.3052671e-04 6.0609671e-05 6.1408538e-04 7.9384016e-04 2.5551469e-04 9.4346154e-04 1.8930050e-03 4.6203036e-03 3.8649853e-03 3.3273220e-03 9.7135787e-04 2.5836286e-04 1.6395377e-03 4.6720392e-04 1.3833444e-03 6.8585778e-03 1.1817616e-03 1.4184724e-03 1.2935682e-03 4.4534899e-04 4.3337262e-04 9.9734142e-04 6.2957380e-05 2.1802414e-04 1.3452346e-03 3.6759458e-03 3.7514511e-03 6.1408538e-04 2.3527566e-03 2.5967147e-03 6.1408538e-04 3.1896708e-04 3.0643540e-04 1.7034162e-03 7.0964884e-03 1.0371098e-03 1.9760564e-03 1.6993217e-03 9.2490489e-04 1.2129757e-03 2.8785057e-04 7.8777499e-04 6.4144968e-04 5.7636535e-02 5.9786679e-02 6.7275391e-02 7.7706661e-02 7.1288776e-02 7.6308806e-02 6.5987844e-02 5.3398709e-02 6.4839697e-02 6.8887148e-02 7.2874646e-02 6.2111692e-02 7.1088473e-02 7.5274214e-02 4.7295630e-02 5.5048251e-02 7.6266639e-02 6.0532100e-02 9.0997542e-02 6.3501941e-02 8.1155480e-02 5.5841790e-02 9.1620605e-02 7.5304976e-02 5.8627379e-02 5.8302297e-02 7.2188128e-02 7.7632065e-02 7.2128571e-02 4.6353347e-02 6.4522763e-02 5.9860052e-02 5.7075256e-02 9.8501473e-02 8.0208982e-02 6.2676929e-02 6.4117314e-02 8.0306154e-02 5.9903400e-02 7.1264506e-02 8.0454669e-02 6.9667510e-02 6.2855874e-02 5.5234852e-02 7.0611788e-02 6.0083969e-02 6.3933681e-02 6.0638614e-02 4.1119113e-02 6.3291748e-02 1.2072945e-01 1.0797760e-01 1.0284307e-01 1.0630032e-01 1.1246316e-01 1.1377579e-01 1.1035397e-01 1.0939330e-01 1.1704519e-01 9.8543065e-02 8.3389076e-02 1.0253622e-01 9.6610654e-02 1.1523295e-01 1.1624035e-01 9.6621030e-02 9.6718555e-02 9.7003685e-02 1.3426257e-01 1.1013293e-01 9.8838972e-02 1.0496266e-01 1.1920082e-01 9.0400878e-02 9.6352086e-02 9.4617133e-02 8.6118226e-02 8.5443225e-02 1.1226469e-01 9.1815383e-02 1.0642172e-01 8.4132371e-02 1.1413570e-01 8.8823115e-02 1.1373227e-01 1.0228600e-01 1.0454965e-01 9.5917796e-02 8.4129252e-02 8.9732713e-02 1.0404039e-01 8.5714179e-02 1.0797760e-01 1.0611357e-01 1.0375975e-01 9.3828435e-02 1.0141953e-01 9.1231247e-02 9.8764813e-02 9.5558448e-02 7.0033377e-04 3.9650610e-04 5.3529876e-04 1.4703029e-03 2.2471049e-03 2.6137215e-04 9.1585095e-04 2.3098853e-03 3.2779352e-04 1.7003275e-03 9.5035099e-04 8.4163249e-04 3.6423601e-04 8.6760304e-04 2.6110376e-04 2.4965606e-03 5.0990123e-04 2.2208392e-03 3.4995017e-03 3.9813106e-03 4.2652650e-03 1.4776191e-03 5.3856223e-04 9.6152184e-04 1.6178695e-03 2.4296336e-03 2.2824176e-03 1.0483334e-03 6.6735604e-04 2.2471049e-03 1.7166964e-03 1.9224889e-03 2.2471049e-03 4.4953685e-04 7.5090712e-04 3.1050470e-04 1.1530910e-02 8.0837373e-05 2.6173161e-03 2.7612054e-03 2.3974656e-03 3.9140870e-04 3.5730731e-04 1.1232648e-04 8.0278741e-04 7.4728046e-02 7.6441141e-02 8.5477412e-02 9.7141382e-02 8.9947057e-02 9.5081677e-02 8.2962705e-02 6.9633999e-02 8.3013931e-02 8.6069979e-02 9.2215558e-02 7.8736928e-02 9.0603515e-02 9.4074986e-02 6.2034704e-02 7.1640320e-02 9.4150759e-02 7.8195110e-02 1.1214391e-01 8.1468219e-02 9.9059263e-02 7.2514318e-02 1.1269547e-01 9.4545020e-02 7.5842542e-02 7.5358360e-02 9.1332869e-02 9.6662705e-02 9.0277244e-02 6.2066860e-02 8.2644288e-02 7.7554694e-02 7.3959493e-02 1.1955630e-01 9.8181734e-02 7.8602674e-02 8.1755435e-02 1.0058819e-01 7.6248524e-02 8.9701900e-02 9.9938282e-02 8.7676596e-02 8.0619290e-02 7.1976555e-02 8.8793557e-02 7.6779152e-02 8.1107438e-02 7.7952944e-02 5.5245517e-02 8.0550459e-02 1.4162183e-01 1.2912349e-01 1.2423521e-01 1.2779447e-01 1.3393410e-01 1.3660889e-01 1.3105158e-01 1.3208577e-01 1.4040000e-01 1.1817736e-01 1.0200650e-01 1.2388995e-01 1.1706801e-01 1.3699958e-01 1.3682207e-01 1.1586916e-01 1.1739162e-01 1.1729454e-01 1.5902469e-01 1.3308573e-01 1.1901641e-01 1.2511327e-01 1.4289089e-01 1.1059070e-01 1.1627926e-01 1.1550831e-01 1.0561378e-01 1.0446495e-01 1.3405102e-01 1.1291439e-01 1.2888996e-01 1.0359625e-01 1.3590097e-01 1.0925250e-01 1.3665207e-01 1.2379539e-01 1.2392962e-01 1.1624448e-01 1.0286550e-01 1.0945264e-01 1.2440339e-01 1.0449561e-01 1.2912349e-01 1.2690130e-01 1.2362142e-01 1.1341467e-01 1.2276171e-01 1.1097585e-01 1.1759891e-01 1.1534218e-01 1.3143808e-04 7.3710840e-04 1.1313742e-03 2.6277162e-03 9.9332749e-04 4.8298989e-04 2.9659782e-03 1.8303797e-03 3.9657692e-03 1.4753738e-03 1.6266891e-03 7.0233916e-04 8.0313831e-04 3.4526160e-04 2.3291483e-03 1.3867759e-04 4.2228272e-03 1.6991343e-03 2.3223655e-03 3.8453210e-03 4.2904903e-04 9.9302567e-04 1.7706867e-03 9.4981017e-04 1.8259864e-03 2.0820613e-03 2.1473879e-03 2.0420431e-03 2.6277162e-03 3.0779094e-03 3.4332541e-03 2.6277162e-03 6.3280964e-04 1.0576914e-03 9.5198627e-04 1.0925795e-02 3.7286463e-04 7.9546610e-04 9.1841431e-04 2.1468126e-03 4.9129575e-04 4.3562197e-04 7.5083238e-04 1.3686608e-03 6.3901299e-02 6.4740623e-02 7.3708779e-02 8.4613714e-02 7.7866771e-02 8.2261058e-02 7.0449151e-02 5.8874682e-02 7.1767088e-02 7.3210535e-02 8.0660949e-02 6.6601983e-02 8.0033785e-02 8.1391959e-02 5.1369939e-02 6.0897790e-02 8.0716992e-02 6.7403323e-02 9.9203670e-02 7.0276809e-02 8.4922276e-02 6.1688045e-02 9.9339240e-02 8.2362360e-02 6.4928234e-02 6.4360101e-02 7.9641814e-02 8.3721620e-02 7.7549963e-02 5.2617898e-02 7.1414187e-02 6.6946935e-02 6.3031902e-02 1.0509118e-01 8.4332170e-02 6.6064468e-02 7.0064616e-02 8.8758294e-02 6.4379548e-02 7.7371173e-02 8.7052850e-02 7.5305342e-02 6.9340944e-02 6.1339869e-02 7.6377320e-02 6.5179636e-02 6.9093895e-02 6.6669498e-02 4.5609365e-02 6.8684945e-02 1.2445912e-01 1.1341836e-01 1.0935772e-01 1.1262566e-01 1.1789507e-01 1.2147174e-01 1.1488682e-01 1.1752559e-01 1.2531063e-01 1.0271865e-01 8.7888567e-02 1.0902443e-01 1.0234160e-01 1.2080033e-01 1.1990073e-01 1.0043696e-01 1.0286413e-01 1.0252340e-01 1.4292168e-01 1.1866325e-01 1.0381139e-01 1.0919240e-01 1.2785249e-01 9.6570465e-02 1.0127523e-01 1.0149554e-01 9.1688518e-02 9.0323099e-02 1.1822766e-01 9.9584713e-02 1.1452014e-01 9.0018133e-02 1.1983081e-01 9.5741335e-02 1.2190290e-01 1.0915996e-01 1.0773474e-01 1.0161859e-01 8.8729453e-02 9.5169428e-02 1.0868349e-01 9.0278091e-02 1.1341836e-01 1.1118524e-01 1.0767597e-01 9.8555096e-02 1.0809822e-01 9.6490550e-02 1.0179914e-01 1.0040847e-01 9.0953179e-04 1.6478123e-03 3.1324421e-03 9.3747882e-04 6.8074049e-04 3.4285457e-03 1.4256139e-03 3.3141786e-03 8.1135619e-04 1.2040955e-03 7.3894006e-04 1.1469835e-03 5.4914496e-05 3.0238895e-03 1.1512346e-04 2.9874978e-03 2.7356591e-03 2.9755481e-03 4.8570629e-03 9.8132331e-04 1.1267736e-03 1.9187302e-03 1.4320892e-03 2.5472569e-03 2.7129147e-03 1.2621760e-03 1.1868918e-03 3.1324421e-03 3.1260816e-03 3.4622842e-03 3.1324421e-03 7.8737454e-04 1.2923124e-03 7.7291736e-04 1.2676988e-02 1.5795155e-04 1.4073300e-03 1.3093851e-03 2.8558230e-03 2.3589004e-04 5.3160641e-04 6.3306680e-04 1.5563919e-03 6.9394652e-02 7.0160248e-02 7.9549278e-02 9.0909253e-02 8.3929778e-02 8.8133516e-02 7.5949213e-02 6.4094635e-02 7.7538115e-02 7.8838295e-02 8.6828513e-02 7.2078729e-02 8.6190925e-02 8.7328483e-02 5.6305232e-02 6.6307663e-02 8.6433769e-02 7.2861306e-02 1.0610432e-01 7.5977192e-02 9.0782134e-02 6.7147548e-02 1.0605640e-01 8.8295560e-02 7.0476640e-02 6.9912539e-02 8.5755505e-02 8.9909894e-02 8.3415192e-02 5.7694397e-02 7.7198547e-02 7.2551886e-02 6.8485682e-02 1.1174631e-01 9.0047290e-02 7.1258462e-02 7.5770197e-02 9.5276007e-02 6.9606963e-02 8.3332111e-02 9.3091350e-02 8.1019819e-02 7.5041473e-02 6.6748030e-02 8.2172293e-02 7.0413691e-02 7.4567733e-02 7.2221920e-02 5.0422561e-02 7.4234075e-02 1.3135838e-01 1.2029572e-01 1.1630277e-01 1.1941581e-01 1.2489530e-01 1.2871814e-01 1.2159882e-01 1.2460620e-01 1.3270425e-01 1.0925415e-01 9.4076611e-02 1.1596894e-01 1.0908894e-01 1.2799150e-01 1.2695158e-01 1.0694484e-01 1.0944639e-01 1.0895711e-01 1.5084375e-01 1.2591962e-01 1.1052596e-01 1.1587184e-01 1.3530738e-01 1.0320818e-01 1.0775506e-01 1.0806337e-01 9.8123191e-02 9.6541726e-02 1.2533326e-01 1.0616585e-01 1.2166800e-01 9.6181548e-02 1.2699662e-01 1.0216112e-01 1.2885603e-01 1.1626103e-01 1.1421827e-01 1.0807124e-01 9.4882428e-02 1.0171954e-01 1.1554226e-01 9.6763759e-02 1.2029572e-01 1.1801757e-01 1.1438908e-01 1.0525128e-01 1.1515210e-01 1.0301668e-01 1.0810316e-01 1.0676998e-01 2.4407151e-04 6.8243680e-04 1.6882982e-04 4.2217018e-04 8.1245396e-04 8.1915702e-04 2.7980568e-03 2.6783721e-03 2.0076713e-03 3.3526400e-04 9.3506008e-05 1.0407900e-03 7.3148476e-04 9.1895790e-04 4.8425923e-03 1.7878106e-03 2.5638304e-03 1.8092053e-03 6.2482332e-04 4.5470127e-05 3.8680919e-04 4.8577398e-04 7.0932539e-04 1.0773286e-03 2.7081281e-03 2.3916675e-03 6.8243680e-04 1.3234869e-03 1.5152295e-03 6.8243680e-04 1.7279927e-05 4.4719936e-05 7.6774714e-04 7.6386402e-03 5.1509749e-04 2.1386706e-03 2.3673979e-03 8.8641907e-04 8.8317423e-04 5.7646989e-05 1.8767975e-04 1.8238427e-04 6.4591491e-02 6.6891146e-02 7.4787553e-02 8.5653640e-02 7.8909235e-02 8.4481757e-02 7.3468926e-02 6.0165176e-02 7.2232139e-02 7.6459237e-02 8.0572670e-02 6.9287036e-02 7.8547451e-02 8.3338681e-02 5.3514192e-02 6.1787978e-02 8.4336540e-02 6.7840538e-02 9.9351761e-02 7.0839680e-02 8.9318727e-02 6.2598635e-02 1.0029777e-01 8.3444651e-02 6.5618944e-02 6.5228710e-02 7.9886645e-02 8.5622882e-02 7.9922508e-02 5.2526388e-02 7.1863670e-02 6.6948234e-02 6.3994975e-02 1.0763490e-01 8.8479248e-02 6.9931400e-02 7.1440370e-02 8.8224815e-02 6.7118281e-02 7.8968665e-02 8.8858891e-02 7.7432758e-02 7.0109240e-02 6.2023845e-02 7.8396402e-02 6.7393801e-02 7.1380489e-02 6.7813026e-02 4.6767795e-02 7.0645561e-02 1.3044475e-01 1.1734304e-01 1.1197394e-01 1.1577499e-01 1.2198760e-01 1.2346289e-01 1.1982320e-01 1.1899008e-01 1.2683842e-01 1.0736476e-01 9.1564623e-02 1.1164167e-01 1.0538958e-01 1.2475783e-01 1.2551509e-01 1.0524662e-01 1.0574315e-01 1.0607279e-01 1.4459461e-01 1.1962188e-01 1.0766800e-01 1.1407280e-01 1.2909426e-01 9.8905414e-02 1.0524346e-01 1.0359925e-01 9.4433579e-02 9.3820759e-02 1.2176744e-01 1.0065671e-01 1.1574436e-01 9.2625059e-02 1.2364363e-01 9.7538593e-02 1.2367543e-01 1.1121391e-01 1.1355049e-01 1.0493323e-01 9.2419908e-02 9.8167154e-02 1.1298864e-01 9.3668541e-02 1.1734304e-01 1.1533257e-01 1.1267510e-01 1.0222063e-01 1.1031800e-01 9.9779829e-02 1.0752614e-01 1.0448251e-01 3.8330702e-04 7.6710204e-04 5.4934344e-04 6.1141025e-04 1.8880070e-03 4.3782366e-03 4.2558302e-03 3.3445116e-03 9.0730658e-04 1.6460272e-04 1.9935351e-03 2.2277110e-04 1.5935452e-03 7.2001884e-03 1.0201171e-03 1.9163397e-03 8.7300929e-04 4.6754224e-04 3.6671499e-04 7.4258415e-04 2.1567602e-04 1.3361003e-04 9.1168360e-04 4.3156597e-03 4.1158943e-03 3.8330702e-04 1.9019978e-03 2.1146706e-03 3.8330702e-04 3.1982857e-04 2.1854146e-04 1.6719903e-03 5.9155088e-03 1.3110961e-03 2.0595508e-03 2.2774590e-03 5.2912957e-04 1.6598142e-03 4.0619000e-04 8.5702191e-04 4.6128261e-04 5.7335316e-02 5.9791552e-02 6.7034247e-02 7.7315388e-02 7.0912461e-02 7.6541197e-02 6.6231857e-02 5.3290914e-02 6.4524455e-02 6.9096848e-02 7.2330829e-02 6.2164647e-02 7.0266106e-02 7.5359485e-02 4.7211115e-02 5.4717217e-02 7.6724195e-02 6.0437574e-02 9.0217835e-02 6.3227153e-02 8.1624838e-02 5.5479636e-02 9.1272977e-02 7.5343817e-02 5.8299412e-02 5.7952393e-02 7.1727180e-02 7.7451339e-02 7.2165967e-02 4.5880845e-02 6.4164650e-02 5.9464600e-02 5.6817377e-02 9.8638775e-02 8.0838937e-02 6.3146715e-02 6.3916551e-02 7.9536591e-02 6.0201366e-02 7.1084400e-02 8.0631146e-02 6.9793274e-02 6.2554472e-02 5.4911579e-02 7.0667171e-02 6.0374722e-02 6.4102637e-02 6.0460719e-02 4.0707229e-02 6.3307106e-02 1.2135312e-01 1.0820155e-01 1.0273439e-01 1.0658023e-01 1.1268914e-01 1.1365695e-01 1.1088857e-01 1.0931070e-01 1.1680946e-01 9.8829208e-02 8.3503653e-02 1.0241354e-01 9.6518124e-02 1.1527856e-01 1.1644011e-01 9.6835948e-02 9.6892604e-02 9.7403729e-02 1.3389056e-01 1.0978147e-01 9.8889679e-02 1.0531384e-01 1.1893855e-01 9.0170315e-02 9.6654705e-02 9.4696682e-02 8.5999457e-02 8.5628213e-02 1.1232778e-01 9.1692367e-02 1.0609783e-01 8.4336563e-02 1.1417833e-01 8.8845800e-02 1.1399156e-01 1.0186496e-01 1.0510756e-01 9.6245658e-02 8.4341961e-02 8.9608741e-02 1.0408154e-01 8.5412047e-02 1.0820155e-01 1.0631639e-01 1.0398240e-01 9.3623884e-02 1.0104040e-01 9.1224725e-02 9.9332091e-02 9.5993921e-02 1.1067957e-03 1.4791390e-03 7.1256747e-05 2.0377231e-03 4.3755431e-03 5.9630791e-03 4.4970379e-03 1.5921641e-03 6.4984761e-04 3.3935862e-03 1.2039709e-04 3.0970780e-03 8.3950153e-03 2.1890332e-03 3.1326528e-03 5.0256002e-04 1.6389584e-03 6.4717383e-04 7.1019942e-04 8.9864077e-04 3.8255378e-04 1.2286350e-03 5.5229901e-03 5.1766813e-03 0.0000000e+00 1.5860612e-03 1.6773969e-03 0.0000000e+00 8.4337656e-04 4.6746407e-04 2.4549978e-03 4.7529836e-03 2.3235808e-03 4.0683267e-03 4.3260986e-03 6.7336618e-04 2.8454658e-03 1.0918918e-03 1.3756658e-03 5.7784546e-04 5.9573290e-02 6.3070670e-02 6.9597309e-02 7.9911457e-02 7.3480528e-02 7.9923883e-02 7.0144874e-02 5.5923876e-02 6.6635620e-02 7.3192589e-02 7.4096565e-02 6.5836734e-02 7.1022277e-02 7.8555696e-02 5.0423423e-02 5.7089619e-02 8.1093473e-02 6.2483167e-02 9.2251714e-02 6.5399221e-02 8.6573432e-02 5.7873871e-02 9.3861710e-02 7.7914479e-02 6.0545459e-02 6.0328179e-02 7.3725736e-02 8.0652769e-02 7.5662466e-02 4.7500835e-02 6.6267157e-02 6.1219907e-02 5.9246920e-02 1.0235525e-01 8.5584812e-02 6.7627185e-02 6.6676399e-02 8.1028522e-02 6.3874534e-02 7.4037098e-02 8.3735875e-02 7.3091049e-02 6.4875922e-02 5.7134332e-02 7.3898502e-02 6.3669341e-02 6.7483654e-02 6.3032151e-02 4.3195391e-02 6.6465430e-02 1.2757303e-01 1.1294171e-01 1.0654531e-01 1.1074736e-01 1.1756538e-01 1.1705185e-01 1.1633100e-01 1.1230305e-01 1.1988948e-01 1.0404710e-01 8.7981667e-02 1.0622547e-01 1.0061669e-01 1.2008497e-01 1.2242153e-01 1.0217012e-01 1.0084187e-01 1.0181343e-01 1.3714147e-01 1.1243585e-01 1.0356517e-01 1.1071692e-01 1.2181923e-01 9.3742899e-02 1.0137566e-01 9.8085445e-02 8.9840814e-02 8.9979544e-02 1.1682155e-01 9.4340727e-02 1.0893096e-01 8.8036209e-02 1.1887723e-01 9.1995894e-02 1.1718099e-01 1.0529554e-01 1.1115622e-01 1.0048991e-01 8.8823715e-02 9.3647258e-02 1.0909883e-01 8.9729548e-02 1.1294171e-01 1.1121254e-01 1.0947844e-01 9.8164553e-02 1.0458423e-01 9.5468337e-02 1.0529433e-01 1.0077315e-01 9.3075268e-04 1.0661545e-03 2.6597529e-04 1.6036733e-03 2.0280056e-03 1.2436972e-03 1.5688801e-04 3.1850165e-04 8.9411637e-04 1.3235015e-03 8.8731482e-04 3.4816593e-03 2.6719247e-03 3.9091992e-03 2.6159373e-03 1.1443019e-03 7.9601608e-05 2.3028989e-04 1.2135551e-03 1.4956336e-03 1.2137749e-03 2.2449952e-03 1.5932074e-03 1.1067957e-03 8.0827056e-04 9.5525701e-04 1.1067957e-03 1.2520454e-04 1.8684214e-04 3.3283736e-04 8.4659292e-03 4.3428627e-04 2.6431662e-03 3.3043825e-03 1.2192723e-03 9.6672170e-04 2.2673224e-04 4.0165289e-05 1.5556464e-04 7.0885985e-02 7.3312749e-02 8.1555170e-02 9.2789394e-02 8.5768022e-02 9.1811327e-02 8.0210428e-02 6.6299983e-02 7.8898236e-02 8.3277528e-02 8.7497229e-02 7.5768419e-02 8.5268176e-02 9.0575828e-02 5.9182538e-02 6.7899308e-02 9.1577973e-02 7.4436351e-02 1.0683101e-01 7.7459472e-02 9.6638219e-02 6.8724317e-02 1.0805306e-01 9.0746123e-02 7.1944393e-02 7.1498544e-02 8.6811878e-02 9.2796163e-02 8.6929388e-02 5.8156140e-02 7.8485798e-02 7.3355880e-02 7.0259533e-02 1.1577796e-01 9.5890635e-02 7.6480264e-02 7.8047377e-02 9.5335392e-02 7.3634293e-02 8.5899063e-02 9.6384778e-02 8.4415996e-02 7.6657094e-02 6.8177250e-02 8.5395861e-02 7.3990972e-02 7.8093423e-02 7.4294151e-02 5.1950910e-02 7.7279181e-02 1.3907744e-01 1.2568112e-01 1.2011323e-01 1.2420710e-01 1.3046004e-01 1.3207399e-01 1.2824918e-01 1.2752552e-01 1.3553911e-01 1.1524115e-01 9.8893820e-02 1.1975932e-01 1.1323008e-01 1.3322963e-01 1.3377253e-01 1.1295596e-01 1.1379378e-01 1.1416138e-01 1.5374990e-01 1.2806482e-01 1.1555054e-01 1.2219358e-01 1.3787989e-01 1.0651347e-01 1.1318026e-01 1.1161431e-01 1.0188137e-01 1.0132199e-01 1.3022140e-01 1.0855236e-01 1.2404638e-01 1.0022528e-01 1.3210134e-01 1.0532767e-01 1.3250558e-01 1.1917979e-01 1.2157791e-01 1.1297631e-01 9.9847302e-02 1.0571550e-01 1.2097128e-01 1.0080768e-01 1.2568112e-01 1.2354605e-01 1.2062969e-01 1.0973133e-01 1.1825900e-01 1.0742526e-01 1.1535080e-01 1.1244756e-01 1.9470856e-03 1.8498175e-03 4.7714250e-03 2.8358661e-03 3.0255426e-03 1.1308587e-03 6.7035566e-04 9.3284570e-04 1.3935241e-03 9.8369983e-04 5.6854836e-03 1.9144361e-03 1.0961099e-03 2.6770659e-03 6.7637792e-04 7.3922961e-04 1.6168588e-03 1.9795771e-04 8.8027763e-04 2.3819907e-03 2.3199642e-03 2.7913184e-03 1.4791390e-03 3.2257382e-03 3.5250868e-03 1.4791390e-03 4.9791374e-04 7.0216560e-04 1.6800207e-03 1.0022835e-02 5.5855445e-04 1.9786373e-03 9.4684044e-04 1.9956071e-03 4.5593799e-04 2.5049818e-04 7.2992180e-04 1.1563910e-03 6.0779252e-02 6.2273308e-02 7.0462169e-02 8.1326510e-02 7.4767830e-02 7.8734546e-02 6.8077240e-02 5.6059538e-02 6.8181020e-02 7.1050105e-02 7.6799016e-02 6.4482663e-02 7.5580358e-02 7.7962233e-02 4.9642606e-02 5.8153068e-02 7.8105114e-02 6.3436311e-02 9.5564553e-02 6.6739850e-02 8.2930379e-02 5.9008492e-02 9.5418848e-02 7.8187143e-02 6.1832470e-02 6.1508833e-02 7.5927040e-02 8.0793818e-02 7.4771898e-02 4.9618073e-02 6.7927507e-02 6.3289271e-02 6.0099335e-02 1.0140473e-01 8.1745663e-02 6.4187383e-02 6.7135505e-02 8.4768567e-02 6.1827019e-02 7.4354928e-02 8.3103529e-02 7.2159743e-02 6.6094709e-02 5.8361788e-02 7.3251937e-02 6.2103535e-02 6.6220430e-02 6.3584868e-02 4.3971154e-02 6.5863068e-02 1.2260074e-01 1.1066837e-01 1.0619606e-01 1.0899833e-01 1.1518894e-01 1.1739848e-01 1.1240635e-01 1.1296742e-01 1.2096568e-01 1.0086214e-01 8.5896751e-02 1.0590639e-01 9.9771313e-02 1.1830710e-01 1.1878365e-01 9.8989344e-02 9.9484141e-02 9.9300506e-02 1.3860781e-01 1.1421784e-01 1.0167360e-01 1.0723785e-01 1.2323222e-01 9.3791376e-02 9.8729091e-02 9.7617417e-02 8.9196630e-02 8.7906597e-02 1.1533851e-01 9.5241683e-02 1.1037299e-01 8.6684313e-02 1.1722334e-01 9.1866320e-02 1.1676032e-01 1.0620321e-01 1.0631345e-01 9.8352717e-02 8.6492752e-02 9.2837846e-02 1.0689111e-01 8.8947496e-02 1.1066837e-01 1.0877202e-01 1.0619549e-01 9.7005210e-02 1.0523294e-01 9.4129616e-02 1.0043708e-01 9.7689504e-02 1.8508011e-03 3.7513322e-03 6.0039368e-03 4.2304138e-03 1.5191600e-03 7.2789043e-04 3.6236504e-03 2.5132214e-04 3.2740913e-03 8.1034702e-03 2.4941139e-03 4.0964229e-03 6.0206143e-04 1.9190323e-03 6.6472571e-04 5.1664338e-04 1.3616103e-03 7.0613265e-04 1.0312088e-03 5.8211090e-03 5.1401914e-03 7.1256747e-05 1.1045080e-03 1.1556192e-03 7.1256747e-05 9.3818356e-04 5.1597856e-04 2.2957469e-03 4.3308939e-03 2.5276111e-03 4.3800580e-03 5.1684770e-03 5.8668191e-04 3.2395561e-03 1.2942225e-03 1.4104695e-03 4.9075437e-04 6.2060492e-02 6.5820549e-02 7.2324527e-02 8.2688153e-02 7.6151035e-02 8.3216525e-02 7.3194172e-02 5.8477367e-02 6.9270609e-02 7.6249005e-02 7.6681852e-02 6.8643243e-02 7.3331578e-02 8.1706941e-02 5.2792718e-02 5.9477668e-02 8.4501232e-02 6.5244243e-02 9.4903309e-02 6.8042766e-02 9.0018791e-02 6.0245936e-02 9.6930604e-02 8.1064051e-02 6.3026851e-02 6.2769846e-02 7.6368221e-02 8.3589775e-02 7.8680956e-02 4.9590571e-02 6.8853459e-02 6.3691512e-02 6.1750809e-02 1.0592213e-01 8.9183264e-02 7.0750492e-02 6.9363027e-02 8.3535040e-02 6.6868103e-02 7.6873580e-02 8.7073732e-02 7.6162997e-02 6.7469442e-02 5.9546534e-02 7.6928171e-02 6.6694450e-02 7.0471384e-02 6.5682900e-02 4.5133798e-02 6.9312951e-02 1.3167296e-01 1.1664541e-01 1.0993682e-01 1.1453374e-01 1.2132631e-01 1.2063781e-01 1.2028602e-01 1.1588513e-01 1.2343288e-01 1.0759677e-01 9.1184353e-02 1.0959799e-01 1.0389171e-01 1.2371908e-01 1.2606621e-01 1.0559841e-01 1.0439417e-01 1.0553794e-01 1.4077951e-01 1.1579171e-01 1.0695513e-01 1.1441431e-01 1.2538100e-01 9.6825962e-02 1.0496558e-01 1.0155971e-01 9.2933277e-02 9.3305204e-02 1.2046243e-01 9.7626604e-02 1.1224568e-01 9.1421248e-02 1.2250398e-01 9.5336276e-02 1.2112681e-01 1.0839632e-01 1.1495562e-01 1.0414545e-01 9.2136906e-02 9.6777242e-02 1.1252211e-01 9.2559606e-02 1.1664541e-01 1.1484781e-01 1.1301462e-01 1.0121871e-01 1.0770332e-01 9.8720694e-02 1.0901533e-01 1.0446815e-01 7.8277898e-04 1.7490530e-03 1.0345024e-03 5.6185312e-04 1.1591486e-03 1.1405764e-03 2.5549089e-03 1.4484284e-03 2.2580494e-03 4.5713265e-03 5.6870335e-03 4.1902203e-03 2.4320876e-03 6.0369458e-04 6.2286369e-04 2.4521502e-03 2.9038905e-03 2.2436415e-03 1.7675525e-03 9.5896000e-04 2.0377231e-03 8.9090360e-04 9.7827632e-04 2.0377231e-03 7.4516940e-04 8.4824201e-04 4.3724648e-04 1.0582513e-02 7.1366344e-04 4.1221085e-03 4.7945036e-03 2.3833891e-03 1.3170043e-03 8.5049004e-04 2.9093352e-04 6.7142903e-04 7.9558936e-02 8.2158081e-02 9.0820201e-02 1.0264403e-01 9.5277746e-02 1.0150997e-01 8.9387659e-02 7.4718776e-02 8.7974881e-02 9.2637642e-02 9.6993873e-02 8.4761019e-02 9.4485044e-02 1.0025701e-01 6.7224195e-02 7.6433848e-02 1.0126400e-01 8.3185627e-02 1.1729605e-01 8.6461029e-02 1.0658954e-01 7.7314215e-02 1.1857784e-01 1.0034666e-01 8.0683042e-02 8.0237049e-02 9.6300382e-02 1.0267579e-01 9.6489769e-02 6.6032956e-02 8.7552344e-02 8.2099586e-02 7.8915667e-02 1.2662250e-01 1.0571935e-01 8.5387558e-02 8.7148938e-02 1.0518820e-01 8.2417845e-02 9.5416475e-02 1.0627769e-01 9.3794810e-02 8.5650752e-02 7.6704239e-02 9.4843643e-02 8.2753514e-02 8.7142105e-02 8.3165683e-02 5.9528971e-02 8.6320416e-02 1.5080041e-01 1.3699340e-01 1.3123407e-01 1.3538631e-01 1.4196949e-01 1.4361623e-01 1.3957348e-01 1.3881335e-01 1.4720194e-01 1.2611020e-01 1.0906565e-01 1.3086970e-01 1.2408176e-01 1.4489915e-01 1.4541581e-01 1.2374159e-01 1.2456922e-01 1.2489938e-01 1.6613145e-01 1.3940980e-01 1.2649129e-01 1.3333884e-01 1.4960338e-01 1.1706680e-01 1.2394226e-01 1.2226369e-01 1.1222236e-01 1.1158045e-01 1.4175176e-01 1.1903032e-01 1.3525827e-01 1.1036833e-01 1.4372294e-01 1.1569638e-01 1.4384978e-01 1.3029466e-01 1.3261278e-01 1.2368173e-01 1.1003418e-01 1.1624174e-01 1.3214763e-01 1.1113124e-01 1.3699340e-01 1.3478604e-01 1.3174332e-01 1.2045775e-01 1.2933910e-01 1.1801062e-01 1.2611372e-01 1.2312584e-01 2.4038804e-03 9.9356679e-04 1.6379146e-03 2.9968879e-03 2.7777132e-03 5.0292552e-03 2.9485139e-03 1.7810659e-03 7.1187929e-03 1.0385224e-02 6.8192179e-03 4.7007047e-03 2.2536066e-03 1.6978265e-03 5.5995030e-03 5.8830752e-03 3.3086218e-03 3.5101479e-03 1.6089784e-03 4.3755431e-03 1.0574938e-03 1.0592785e-03 4.3755431e-03 2.5472554e-03 2.6641717e-03 1.0704333e-03 1.2070572e-02 2.4953874e-03 6.0785955e-03 8.5181088e-03 3.9627930e-03 3.6619699e-03 2.9233174e-03 1.7757672e-03 2.0595965e-03 9.0904959e-02 9.3846404e-02 1.0296173e-01 1.1510418e-01 1.0729700e-01 1.1521099e-01 1.0181822e-01 8.5988795e-02 1.0000742e-01 1.0504161e-01 1.0917671e-01 9.6459196e-02 1.0622808e-01 1.1358449e-01 7.7434068e-02 8.7329703e-02 1.1478646e-01 9.5582611e-02 1.2982843e-01 9.8465917e-02 1.1998512e-01 8.8160180e-02 1.3221652e-01 1.1399918e-01 9.2024225e-02 9.1371105e-02 1.0854159e-01 1.1533865e-01 1.0916600e-01 7.6158175e-02 9.9423258e-02 9.3687045e-02 9.0204262e-02 1.4138542e-01 1.1970815e-01 9.7663366e-02 9.8994798e-02 1.1736337e-01 9.4681982e-02 1.0777767e-01 1.2034441e-01 1.0671033e-01 9.7400706e-02 8.7763928e-02 1.0767516e-01 9.5332473e-02 9.9630913e-02 9.4930517e-02 6.8563663e-02 9.8462875e-02 1.6617515e-01 1.5176730e-01 1.4543395e-01 1.5072522e-01 1.5691405e-01 1.5882414e-01 1.5479314e-01 1.5416358e-01 1.6247914e-01 1.3996762e-01 1.2197918e-01 1.4500225e-01 1.3765005e-01 1.5950314e-01 1.5937612e-01 1.3710080e-01 1.3913130e-01 1.3976637e-01 1.8183098e-01 1.5420823e-01 1.4014252e-01 1.4766552e-01 1.6507182e-01 1.3020370e-01 1.3818766e-01 1.3684571e-01 1.2517707e-01 1.2500429e-01 1.5651711e-01 1.3335007e-01 1.4978001e-01 1.2432862e-01 1.5834992e-01 1.2988375e-01 1.6032290e-01 1.4373178e-01 1.4688056e-01 1.3840222e-01 1.2332107e-01 1.2926358e-01 1.4578293e-01 1.2292671e-01 1.5176730e-01 1.4922075e-01 1.4546636e-01 1.3299550e-01 1.4276363e-01 1.3134387e-01 1.4008961e-01 1.3766630e-01 5.7728358e-04 1.6620505e-03 3.0662753e-03 5.1693417e-04 6.0968463e-03 8.4744633e-04 8.7364721e-04 5.8106642e-03 6.7399476e-03 8.6083103e-03 3.1702748e-03 2.7104978e-03 3.3164143e-03 4.2509190e-03 5.8084215e-03 4.6709776e-03 9.8526568e-04 3.6786909e-04 5.9630791e-03 3.9566796e-03 4.2657824e-03 5.9630791e-03 2.3876668e-03 3.1383576e-03 1.0693622e-03 1.7187016e-02 9.8571152e-04 3.1367899e-03 3.7448988e-03 5.3108404e-03 1.2637202e-03 2.1359423e-03 1.6418787e-03 3.1352541e-03 8.3834616e-02 8.4243676e-02 9.4832859e-02 1.0703873e-01 9.9466934e-02 1.0403376e-01 9.0316788e-02 7.7914871e-02 9.2850027e-02 9.3299365e-02 1.0296391e-01 8.6082003e-02 1.0248600e-01 1.0318273e-01 6.8824902e-02 8.0299100e-02 1.0157055e-01 8.7955506e-02 1.2341155e-01 9.1142100e-02 1.0580332e-01 8.1173197e-02 1.2350931e-01 1.0460831e-01 8.4991511e-02 8.4254332e-02 1.0174542e-01 1.0573929e-01 9.8648070e-02 7.1061471e-02 9.2438015e-02 8.7508516e-02 8.2750270e-02 1.2928539e-01 1.0529438e-01 8.4835976e-02 9.0594121e-02 1.1204138e-01 8.3577196e-02 9.8754999e-02 1.0957368e-01 9.6258171e-02 8.9990776e-02 8.0898906e-02 9.7508458e-02 8.4744426e-02 8.9161769e-02 8.6853262e-02 6.2377571e-02 8.8830405e-02 1.4853162e-01 1.3772455e-01 1.3389458e-01 1.3729951e-01 1.4254710e-01 1.4753124e-01 1.3874272e-01 1.4343376e-01 1.5191156e-01 1.2542851e-01 1.0950060e-01 1.3351960e-01 1.2589032e-01 1.4575308e-01 1.4361388e-01 1.2273259e-01 1.2665814e-01 1.2593062e-01 1.7100462e-01 1.4482556e-01 1.2707776e-01 1.3245591e-01 1.5480441e-01 1.1982038e-01 1.2429326e-01 1.2550864e-01 1.1421155e-01 1.1236837e-01 1.4320023e-01 1.2379599e-01 1.4017288e-01 1.1252737e-01 1.4478118e-01 1.1925773e-01 1.4807486e-01 1.3379535e-01 1.3019772e-01 1.2505772e-01 1.1047444e-01 1.1793234e-01 1.3214851e-01 1.1203286e-01 1.3772455e-01 1.3511133e-01 1.3062456e-01 1.2117415e-01 1.3256036e-01 1.1928977e-01 1.2368263e-01 1.2328316e-01 7.8697050e-04 2.0732289e-03 9.4315564e-04 4.6001401e-03 8.4240364e-04 1.3015708e-03 4.6297460e-03 7.5292997e-03 6.5572401e-03 2.5566943e-03 1.7941741e-03 1.8226799e-03 3.9920133e-03 4.8070278e-03 2.6490734e-03 2.2737423e-03 8.3198965e-04 4.4970379e-03 1.8707122e-03 2.0801746e-03 4.4970379e-03 1.6864362e-03 2.1518496e-03 3.0908897e-04 1.2802000e-02 1.1444166e-03 2.7605116e-03 4.8428825e-03 3.3840997e-03 1.9469936e-03 1.7958172e-03 1.1565833e-03 1.8697862e-03 8.2127567e-02 8.3518119e-02 9.3314949e-02 1.0506408e-01 9.7541782e-02 1.0397584e-01 9.0341739e-02 7.6817337e-02 9.1083710e-02 9.3231274e-02 1.0048418e-01 8.5524503e-02 9.9112893e-02 1.0267345e-01 6.7846626e-02 7.8515797e-02 1.0225403e-01 8.6849217e-02 1.2022955e-01 8.9502113e-02 1.0655817e-01 7.9297559e-02 1.2165144e-01 1.0391861e-01 8.3203942e-02 8.2410584e-02 9.9528824e-02 1.0443382e-01 9.8019721e-02 6.8818975e-02 9.0543610e-02 8.5485551e-02 8.1186134e-02 1.2894588e-01 1.0651601e-01 8.5580315e-02 8.9213768e-02 1.0886353e-01 8.3754339e-02 9.7441118e-02 1.0932583e-01 9.5882843e-02 8.8282134e-02 7.9128322e-02 9.6917266e-02 8.4873719e-02 8.8927957e-02 8.5532654e-02 6.0384203e-02 8.8123284e-02 1.4980593e-01 1.3770759e-01 1.3282368e-01 1.3741111e-01 1.4254251e-01 1.4639384e-01 1.3970199e-01 1.4238069e-01 1.5040197e-01 1.2563911e-01 1.0916002e-01 1.3240724e-01 1.2489275e-01 1.4520334e-01 1.4360899e-01 1.2274102e-01 1.2644577e-01 1.2642535e-01 1.6908994e-01 1.4294672e-01 1.2654652e-01 1.3286867e-01 1.5320101e-01 1.1837980e-01 1.2451917e-01 1.2497748e-01 1.1312705e-01 1.1222677e-01 1.4268257e-01 1.2261254e-01 1.3839073e-01 1.1239899e-01 1.4421566e-01 1.1854547e-01 1.4805458e-01 1.3176737e-01 1.3127552e-01 1.2532607e-01 1.1042618e-01 1.1684289e-01 1.3160924e-01 1.1043724e-01 1.3770759e-01 1.3504379e-01 1.3066174e-01 1.1987723e-01 1.3066578e-01 1.1856367e-01 1.2478710e-01 1.2391087e-01 3.0983409e-04 7.5828890e-04 1.5917755e-03 4.8958816e-04 3.3744944e-03 2.1101097e-03 4.2400870e-03 2.8698866e-03 7.9583168e-04 2.4610899e-04 3.6436132e-04 1.4311801e-03 1.7294514e-03 8.6738167e-04 2.6111809e-03 1.6704106e-03 1.5921641e-03 8.6161593e-04 1.0547029e-03 1.5921641e-03 2.0427868e-04 3.5845705e-04 1.2194863e-04 8.2981219e-03 4.9180195e-04 1.7380522e-03 3.0734607e-03 1.0728608e-03 1.1397310e-03 3.4603128e-04 1.9200118e-04 2.8845040e-04 6.9779438e-02 7.1836392e-02 8.0319868e-02 9.1354890e-02 8.4353236e-02 9.0635736e-02 7.8599311e-02 6.5140986e-02 7.7899100e-02 8.1486701e-02 8.6472565e-02 7.4062042e-02 8.4619349e-02 8.9336326e-02 5.7575298e-02 6.6635212e-02 8.9946961e-02 7.3779929e-02 1.0532587e-01 7.6464940e-02 9.4587006e-02 6.7402630e-02 1.0673169e-01 8.9925800e-02 7.0797999e-02 7.0219100e-02 8.5721997e-02 9.1187854e-02 8.5377890e-02 5.7235173e-02 7.7431768e-02 7.2498793e-02 6.9063158e-02 1.1428266e-01 9.4218471e-02 7.4725647e-02 7.6703845e-02 9.4228329e-02 7.2261001e-02 8.4462132e-02 9.5360997e-02 8.3133678e-02 7.5506963e-02 6.7041127e-02 8.4070372e-02 7.2903149e-02 7.6784460e-02 7.3115143e-02 5.0413571e-02 7.5926129e-02 1.3636539e-01 1.2353657e-01 1.1821263e-01 1.2258303e-01 1.2822457e-01 1.3051322e-01 1.2599517e-01 1.2631512e-01 1.3406707e-01 1.1278003e-01 9.6722933e-02 1.1783731e-01 1.1110860e-01 1.3080317e-01 1.3063996e-01 1.1029647e-01 1.1216341e-01 1.1248813e-01 1.5199693e-01 1.2674108e-01 1.1318561e-01 1.1969790e-01 1.3653036e-01 1.0458853e-01 1.1112582e-01 1.1028100e-01 9.9890110e-02 9.9356661e-02 1.2805612e-01 1.0750038e-01 1.2261289e-01 9.8791075e-02 1.2975191e-01 1.0408196e-01 1.3162969e-01 1.1713290e-01 1.1886116e-01 1.1132823e-01 9.7814075e-02 1.0357451e-01 1.1833994e-01 9.8179977e-02 1.2353657e-01 1.2124412e-01 1.1787602e-01 1.0709455e-01 1.1618054e-01 1.0529428e-01 1.1269702e-01 1.1053049e-01 1.3650135e-03 5.3926014e-04 9.6875216e-04 5.5085642e-03 1.1951469e-03 2.8096772e-03 1.4033998e-03 3.8702395e-04 1.0970323e-04 3.3218009e-04 5.6326785e-04 5.8024795e-04 5.6723773e-04 3.5935032e-03 3.0059920e-03 6.4984761e-04 1.1677062e-03 1.3673782e-03 6.4984761e-04 7.6378345e-05 6.3488092e-05 8.1586688e-04 6.3954323e-03 8.4458294e-04 1.6959745e-03 2.5316364e-03 4.4648839e-04 1.3649198e-03 2.1646092e-04 4.0910219e-04 1.5323026e-04 6.2114649e-02 6.4461203e-02 7.2168083e-02 8.2712554e-02 7.6067484e-02 8.2127836e-02 7.1085856e-02 5.7869953e-02 6.9689694e-02 7.3941980e-02 7.7755077e-02 6.6772898e-02 7.5730146e-02 8.0858032e-02 5.1159438e-02 5.9263906e-02 8.1982206e-02 6.5662191e-02 9.5957099e-02 6.8346124e-02 8.6755825e-02 6.0017643e-02 9.7272303e-02 8.1103209e-02 6.3092478e-02 6.2637310e-02 7.7107544e-02 8.2765719e-02 7.7320565e-02 5.0179546e-02 6.9272103e-02 6.4477670e-02 6.1520691e-02 1.0482403e-01 8.6201836e-02 6.7723532e-02 6.8849001e-02 8.5128110e-02 6.4954612e-02 7.6260237e-02 8.6474261e-02 7.5037042e-02 6.7540885e-02 5.9554295e-02 7.5917230e-02 6.5334858e-02 6.9084098e-02 6.5352935e-02 4.4308417e-02 6.8222624e-02 1.2733314e-01 1.1424612e-01 1.0876932e-01 1.1294180e-01 1.1881213e-01 1.2027633e-01 1.1690957e-01 1.1601884e-01 1.2357066e-01 1.0430113e-01 8.8647048e-02 1.0842142e-01 1.0217893e-01 1.2134270e-01 1.2195833e-01 1.0207761e-01 1.0292571e-01 1.0341320e-01 1.4095409e-01 1.1639921e-01 1.0445121e-01 1.1097871e-01 1.2583780e-01 9.5736690e-02 1.0236688e-01 1.0085185e-01 9.1372067e-02 9.1009848e-02 1.1849408e-01 9.7905297e-02 1.1253291e-01 9.0050809e-02 1.2026601e-01 9.4848068e-02 1.2106318e-01 1.0772883e-01 1.1055160e-01 1.0223795e-01 8.9621067e-02 9.5003079e-02 1.0961118e-01 9.0242843e-02 1.1424612e-01 1.1217935e-01 1.0939970e-01 9.8767943e-02 1.0686010e-01 9.6691216e-02 1.0462140e-01 1.0177309e-01 3.4212913e-03 2.0350185e-04 2.2645835e-03 3.4346676e-03 3.6178579e-03 5.4115677e-03 1.4013939e-03 1.2010586e-03 1.9342083e-03 1.8303919e-03 3.0241355e-03 3.0182648e-03 8.7783530e-04 7.3200159e-04 3.3935862e-03 3.0016113e-03 3.3110105e-03 3.3935862e-03 9.0468402e-04 1.4291912e-03 6.5529771e-04 1.3482371e-02 1.1883350e-04 1.9129351e-03 1.8189821e-03 3.2224845e-03 2.2840556e-04 6.5009173e-04 5.8224397e-04 1.6275063e-03 7.3184911e-02 7.4026716e-02 8.3609924e-02 9.5240271e-02 8.8102740e-02 9.2390529e-02 7.9966102e-02 6.7767341e-02 8.1513618e-02 8.2938634e-02 9.0997666e-02 7.6011769e-02 9.0234918e-02 9.1578032e-02 5.9804069e-02 7.0034281e-02 9.0682677e-02 7.6686753e-02 1.1071727e-01 7.9918092e-02 9.5151478e-02 7.0899518e-02 1.1069472e-01 9.2509702e-02 7.4297007e-02 7.3732579e-02 8.9920224e-02 9.4250688e-02 8.7608739e-02 6.1121885e-02 8.1169501e-02 7.6375843e-02 7.2267735e-02 1.1652820e-01 9.4360734e-02 7.5150369e-02 7.9755441e-02 9.9609504e-02 7.3443983e-02 8.7507436e-02 9.7438140e-02 8.5130511e-02 7.8978434e-02 7.0471516e-02 8.6314807e-02 7.4241923e-02 7.8526940e-02 7.6102189e-02 5.3711374e-02 7.8190521e-02 1.3653802e-01 1.2528995e-01 1.2121146e-01 1.2435019e-01 1.2997976e-01 1.3382130e-01 1.2659694e-01 1.2959353e-01 1.3786369e-01 1.1404399e-01 9.8548323e-02 1.2087284e-01 1.1387348e-01 1.3314978e-01 1.3209550e-01 1.1169631e-01 1.1419600e-01 1.1368838e-01 1.5633317e-01 1.3093427e-01 1.1535002e-01 1.2078800e-01 1.4049523e-01 1.0785716e-01 1.1249675e-01 1.1275652e-01 1.0267406e-01 1.0105332e-01 1.3042827e-01 1.1078233e-01 1.2662105e-01 1.0064162e-01 1.3213313e-01 1.0672611e-01 1.3386868e-01 1.2116827e-01 1.1908244e-01 1.1278797e-01 9.9360911e-02 1.0635497e-01 1.2047378e-01 1.0130606e-01 1.2528995e-01 1.2297832e-01 1.1929072e-01 1.0997770e-01 1.2004306e-01 1.0767864e-01 1.1284273e-01 1.1147304e-01 2.8709378e-03 9.0791333e-03 1.3407674e-03 2.7138923e-03 2.4677711e-04 1.1444972e-03 7.6121265e-04 8.8810957e-04 7.0009018e-04 1.4757411e-04 9.0393445e-04 6.0883361e-03 5.6961877e-03 1.2039709e-04 1.8816976e-03 2.0265121e-03 1.2039709e-04 8.6239061e-04 5.2686780e-04 2.5538294e-03 4.1367540e-03 2.4461852e-03 3.1968429e-03 3.7345867e-03 3.8813913e-04 2.9749715e-03 1.1153685e-03 1.5845430e-03 6.9221070e-04 5.5493017e-02 5.8687008e-02 6.5182775e-02 7.5153394e-02 6.8882837e-02 7.5289957e-02 6.5516107e-02 5.1904720e-02 6.2434251e-02 6.8395322e-02 6.9725101e-02 6.1264247e-02 6.7000659e-02 7.3915045e-02 4.6337454e-02 5.2993228e-02 7.6203506e-02 5.8574285e-02 8.7235787e-02 6.1228310e-02 8.1349394e-02 5.3727422e-02 8.8860415e-02 7.3529860e-02 5.6419133e-02 5.6136434e-02 6.9314277e-02 7.5768587e-02 7.0921131e-02 4.3887190e-02 6.2047408e-02 5.7247874e-02 5.5122668e-02 9.7063471e-02 8.0589157e-02 6.3015465e-02 6.2273448e-02 7.6485434e-02 5.9534434e-02 6.9400424e-02 7.9100961e-02 6.8556756e-02 6.0632063e-02 5.3105900e-02 6.9320431e-02 5.9485747e-02 6.3075829e-02 5.8812039e-02 3.9389619e-02 6.2057390e-02 1.2120786e-01 1.0709958e-01 1.0095433e-01 1.0522389e-01 1.1158919e-01 1.1144992e-01 1.1038844e-01 1.0698853e-01 1.1429410e-01 9.8230586e-02 8.2643326e-02 1.0062987e-01 9.5029365e-02 1.1396256e-01 1.1592889e-01 9.6297509e-02 9.5506831e-02 9.6444702e-02 1.3110088e-01 1.0707131e-01 9.7795782e-02 1.0475251e-01 1.1626300e-01 8.8405248e-02 9.5813058e-02 9.2970070e-02 8.4549021e-02 8.4699662e-02 1.1089247e-01 8.9470273e-02 1.0356569e-01 8.3077190e-02 1.1281589e-01 8.7056416e-02 1.1197016e-01 9.9670877e-02 1.0510107e-01 9.5157512e-02 8.3537128e-02 8.8197538e-02 1.0308982e-01 8.4141549e-02 1.0709958e-01 1.0532428e-01 1.0341164e-01 9.2382015e-02 9.8953568e-02 8.9983057e-02 9.9390435e-02 9.5301345e-02 3.0771633e-03 2.2394953e-03 3.5785600e-03 4.5455517e-03 7.4728021e-04 1.0553655e-03 1.6471751e-03 1.6255623e-03 2.5337657e-03 2.0402330e-03 1.9207121e-03 1.4230439e-03 3.0970780e-03 2.6131629e-03 2.9423829e-03 3.0970780e-03 7.3620931e-04 1.2096052e-03 5.1210113e-04 1.1445573e-02 3.3743120e-04 9.3762253e-04 1.6709589e-03 2.3328362e-03 6.4367426e-04 6.1292579e-04 6.6980522e-04 1.3596425e-03 6.8771058e-02 6.9631829e-02 7.8939123e-02 9.0076585e-02 8.3106206e-02 8.8021023e-02 7.5587788e-02 6.3613086e-02 7.6977857e-02 7.8355410e-02 8.6058485e-02 7.1477483e-02 8.5350309e-02 8.7042060e-02 5.5585563e-02 6.5564323e-02 8.6285657e-02 7.2679829e-02 1.0489589e-01 7.5454083e-02 9.0429367e-02 6.6346194e-02 1.0535674e-01 8.8189227e-02 6.9809235e-02 6.9152322e-02 8.5027647e-02 8.9182145e-02 8.2908995e-02 5.6974544e-02 7.6567724e-02 7.1978204e-02 6.7853368e-02 1.1142320e-01 9.0045691e-02 7.1019587e-02 7.5131918e-02 9.4265212e-02 6.9407495e-02 8.2681448e-02 9.3017444e-02 8.0734547e-02 7.4408554e-02 6.6081064e-02 8.1800498e-02 7.0361312e-02 7.4293705e-02 7.1683996e-02 4.9412056e-02 7.3791248e-02 1.3087381e-01 1.1972349e-01 1.1554097e-01 1.1917243e-01 1.2428472e-01 1.2815053e-01 1.2126029e-01 1.2425112e-01 1.3208038e-01 1.0854852e-01 9.3334993e-02 1.1518188e-01 1.0821021e-01 1.2711496e-01 1.2583624e-01 1.0606084e-01 1.0908141e-01 1.0877992e-01 1.4997249e-01 1.2525272e-01 1.0965413e-01 1.1522136e-01 1.3472784e-01 1.0229252e-01 1.0728052e-01 1.0776484e-01 9.7267064e-02 9.5981774e-02 1.2460839e-01 1.0582154e-01 1.2096134e-01 9.5922459e-02 1.2615683e-01 1.0184377e-01 1.2900688e-01 1.1512576e-01 1.1363761e-01 1.0783825e-01 9.4308496e-02 1.0078579e-01 1.1452781e-01 9.5389301e-02 1.1972349e-01 1.1733679e-01 1.1347632e-01 1.0398208e-01 1.1403752e-01 1.0220157e-01 1.0755302e-01 1.0649118e-01 1.0147620e-02 1.1247381e-02 1.2064168e-02 6.5374061e-03 4.5897505e-03 4.8228518e-03 7.5563629e-03 9.2308338e-03 7.2321889e-03 1.6011806e-03 5.3396772e-04 8.3950153e-03 4.7739373e-03 4.9472694e-03 8.3950153e-03 4.4998786e-03 5.2439437e-03 2.2809748e-03 2.1071898e-02 2.7056396e-03 6.9610435e-03 7.8780829e-03 8.1126375e-03 3.2257880e-03 4.3393216e-03 3.1425928e-03 4.9174647e-03 1.0007131e-01 1.0106593e-01 1.1217080e-01 1.2532148e-01 1.1715442e-01 1.2248393e-01 1.0792073e-01 9.3860583e-02 1.0976919e-01 1.1122163e-01 1.2044721e-01 1.0322968e-01 1.1921347e-01 1.2149913e-01 8.4186074e-02 9.6291408e-02 1.2023049e-01 1.0442285e-01 1.4247402e-01 1.0795998e-01 1.2499239e-01 9.7246530e-02 1.4298267e-01 1.2269113e-01 1.0132833e-01 1.0059203e-01 1.1928692e-01 1.2425009e-01 1.1675840e-01 8.5744895e-02 1.0931412e-01 1.0376332e-01 9.9002718e-02 1.4971116e-01 1.2434000e-01 1.0215677e-01 1.0769100e-01 1.2998610e-01 1.0050022e-01 1.1660891e-01 1.2830083e-01 1.1408155e-01 1.0679553e-01 9.6866698e-02 1.1540492e-01 1.0158690e-01 1.0644304e-01 1.0354221e-01 7.6665035e-02 1.0598765e-01 1.7102185e-01 1.5912057e-01 1.5467066e-01 1.5843570e-01 1.6430112e-01 1.6898961e-01 1.6040927e-01 1.6442765e-01 1.7347908e-01 1.4613872e-01 1.2882261e-01 1.5426961e-01 1.4623773e-01 1.6767510e-01 1.6569930e-01 1.4326884e-01 1.4700928e-01 1.4639344e-01 1.9375995e-01 1.6572948e-01 1.4772209e-01 1.5370280e-01 1.7644069e-01 1.3951457e-01 1.4477511e-01 1.4552738e-01 1.3362833e-01 1.3186831e-01 1.6485780e-01 1.4332228e-01 1.6088049e-01 1.3176893e-01 1.6660706e-01 1.3873049e-01 1.6938110e-01 1.5434218e-01 1.5143663e-01 1.4540553e-01 1.2987912e-01 1.3768917e-01 1.5323248e-01 1.3136310e-01 1.5912057e-01 1.5638588e-01 1.5175254e-01 1.4128505e-01 1.5308268e-01 1.3923812e-01 1.4444485e-01 1.4370095e-01 2.6770864e-03 1.6033718e-03 4.5840441e-04 2.0276877e-03 2.4561050e-03 1.2787767e-03 1.0310968e-03 1.1138996e-03 7.3996134e-03 6.8122401e-03 2.1890332e-03 3.7576667e-03 4.1081994e-03 2.1890332e-03 1.7366871e-03 1.7496571e-03 3.0804223e-03 5.2624414e-03 3.0566387e-03 8.7139687e-04 2.3320271e-03 9.5854356e-04 3.5582665e-03 1.9033529e-03 2.7781156e-03 2.0582238e-03 4.8466175e-02 5.0119607e-02 5.7333122e-02 6.6635612e-02 6.0625611e-02 6.6546613e-02 5.6002103e-02 4.4610205e-02 5.5428894e-02 5.8355341e-02 6.2731565e-02 5.1936578e-02 6.1589720e-02 6.5254921e-02 3.8121898e-02 4.5707178e-02 6.5954438e-02 5.2310619e-02 7.8737277e-02 5.4221486e-02 6.9833666e-02 4.6306978e-02 8.0104533e-02 6.6061691e-02 4.9289042e-02 4.8703396e-02 6.2029842e-02 6.6459215e-02 6.1626890e-02 3.8139914e-02 5.4977441e-02 5.0955269e-02 4.7810568e-02 8.6879401e-02 6.9864176e-02 5.2960539e-02 5.4195866e-02 6.9397405e-02 5.0805294e-02 6.0771100e-02 7.0699543e-02 5.9928918e-02 5.3277849e-02 4.6137567e-02 6.0647902e-02 5.1507184e-02 5.4535520e-02 5.1271651e-02 3.2182295e-02 5.3653922e-02 1.0655841e-01 9.4809862e-02 8.9943103e-02 9.4257768e-02 9.8930460e-02 1.0102907e-01 9.7318001e-02 9.7573045e-02 1.0420091e-01 8.5339961e-02 7.1237738e-02 8.9593643e-02 8.3615246e-02 1.0099568e-01 1.0093948e-01 8.3072610e-02 8.4968711e-02 8.5464008e-02 1.2001752e-01 9.7755169e-02 8.5475001e-02 9.1482164e-02 1.0648093e-01 7.7912530e-02 8.4002788e-02 8.3421922e-02 7.3855173e-02 7.3645673e-02 9.8656580e-02 8.1073912e-02 9.4018658e-02 7.3420060e-02 1.0008227e-01 7.8012093e-02 1.0282145e-01 8.8787179e-02 9.1014600e-02 8.4377747e-02 7.2315249e-02 7.7005472e-02 8.9946277e-02 7.2132565e-02 9.4809862e-02 9.2717682e-02 8.9711332e-02 7.9921256e-02 8.7947099e-02 7.8589830e-02 8.5634568e-02 8.3685774e-02 3.6322977e-03 2.1046334e-03 3.2662345e-03 4.7576391e-03 8.8436254e-04 1.6169936e-03 5.0909036e-03 5.3937261e-03 6.9592372e-03 3.1326528e-03 7.3963134e-03 7.8155750e-03 3.1326528e-03 2.8162695e-03 2.9888005e-03 5.3841195e-03 1.1640832e-02 3.1058623e-03 3.5268449e-03 8.9518404e-04 4.1579453e-03 2.4418843e-03 2.3174283e-03 3.5804624e-03 3.9289766e-03 4.8779386e-02 4.9807262e-02 5.7295117e-02 6.7429274e-02 6.1528729e-02 6.3805672e-02 5.4672352e-02 4.4305273e-02 5.5263563e-02 5.7527840e-02 6.3439356e-02 5.1886311e-02 6.2844358e-02 6.3392021e-02 3.9105833e-02 4.6659036e-02 6.3315061e-02 5.0431450e-02 8.1151197e-02 5.3901750e-02 6.8036856e-02 4.7518043e-02 7.9948453e-02 6.3397617e-02 4.9788208e-02 4.9648688e-02 6.2516012e-02 6.6678586e-02 6.0874148e-02 3.9322135e-02 5.5164136e-02 5.1029588e-02 4.8159012e-02 8.4641068e-02 6.6410487e-02 5.1100232e-02 5.4352465e-02 7.1152366e-02 4.8870866e-02 6.0790663e-02 6.7705030e-02 5.8185400e-02 5.3489810e-02 4.6729049e-02 5.9304859e-02 4.8888831e-02 5.2875044e-02 5.1050585e-02 3.4854587e-02 5.2844524e-02 1.0451993e-01 9.3506986e-02 8.9732041e-02 9.1442484e-02 9.7717353e-02 9.9707944e-02 9.4802947e-02 9.5351404e-02 1.0312713e-01 8.4850971e-02 7.1294232e-02 8.9511382e-02 8.4075688e-02 1.0102482e-01 1.0205801e-01 8.3487981e-02 8.2948462e-02 8.2499658e-02 1.1982484e-01 9.7067919e-02 8.5815629e-02 9.0584285e-02 1.0517734e-01 7.8728615e-02 8.2465362e-02 8.1175179e-02 7.4451161e-02 7.2780961e-02 9.8027119e-02 7.9186352e-02 9.3572539e-02 7.1178913e-02 9.9960717e-02 7.6001190e-02 9.8069469e-02 9.0444802e-02 8.9768862e-02 8.1715688e-02 7.1540326e-02 7.7885383e-02 9.0855680e-02 7.5245252e-02 9.3506986e-02 9.1968167e-02 9.0109483e-02 8.2320804e-02 8.9509517e-02 7.8843854e-02 8.4366358e-02 8.1217849e-02 2.0130888e-03 1.8101212e-03 1.7679630e-03 1.5452657e-03 5.1945438e-04 1.2854163e-03 8.7808919e-03 8.2238558e-03 5.0256002e-04 2.7264330e-03 2.8467434e-03 5.0256002e-04 1.9812707e-03 1.4461308e-03 4.1136119e-03 2.5737383e-03 4.2121435e-03 4.2634118e-03 5.2197085e-03 6.4299253e-04 4.8925560e-03 2.3856210e-03 3.0302048e-03 1.5954763e-03 5.0938593e-02 5.4618269e-02 6.0341206e-02 6.9748949e-02 6.3744319e-02 7.0794949e-02 6.1613026e-02 4.7841585e-02 5.7516679e-02 6.4388046e-02 6.4176559e-02 5.7259869e-02 6.1078980e-02 6.9247074e-02 4.2780731e-02 4.8566082e-02 7.2247187e-02 5.4065395e-02 8.0844343e-02 5.6422768e-02 7.7401119e-02 4.9239970e-02 8.2976560e-02 6.8666014e-02 5.1795393e-02 5.1539865e-02 6.3922410e-02 7.0731007e-02 6.6410587e-02 3.9578438e-02 5.7098964e-02 5.2392525e-02 5.0683886e-02 9.1729236e-02 7.6796353e-02 5.9700691e-02 5.7658559e-02 7.0380963e-02 5.5891948e-02 6.4553084e-02 7.4314140e-02 6.4188376e-02 5.5862752e-02 4.8638731e-02 6.4818200e-02 5.5721441e-02 5.9023587e-02 5.4325379e-02 3.5659427e-02 5.7806321e-02 1.1646754e-01 1.0183406e-01 9.5237923e-02 9.9907095e-02 1.0622089e-01 1.0525522e-01 1.0561297e-01 1.0087833e-01 1.0779948e-01 9.3522115e-02 7.8070592e-02 9.4910280e-02 8.9633058e-02 1.0829781e-01 1.1081740e-01 9.1634033e-02 9.0337246e-02 9.1650208e-02 1.2399805e-01 1.0057671e-01 9.2649910e-02 9.9949877e-02 1.0962952e-01 8.2940269e-02 9.1028259e-02 8.7623472e-02 7.9432792e-02 8.0075100e-02 1.0524078e-01 8.3828651e-02 9.7259211e-02 7.8329945e-02 1.0714828e-01 8.1794698e-02 1.0616577e-01 9.3567438e-02 1.0077628e-01 9.0271060e-02 7.9035426e-02 8.3003648e-02 9.7873414e-02 7.9050596e-02 1.0183406e-01 1.0015100e-01 9.8558964e-02 8.7141498e-02 9.2951123e-02 8.4912476e-02 9.5252034e-02 9.0710349e-02 8.3242342e-04 1.3658963e-03 5.7230018e-04 8.1918836e-04 9.7205318e-04 4.1873206e-03 3.8010080e-03 1.6389584e-03 2.5918915e-03 2.9169588e-03 1.6389584e-03 5.6093257e-04 7.3131385e-04 1.3995850e-03 7.2853070e-03 1.1548608e-03 5.6661765e-04 1.3645766e-03 9.0462881e-04 1.5033145e-03 5.7640672e-04 1.1086000e-03 1.0041867e-03 5.6613254e-02 5.8073267e-02 6.6069758e-02 7.6238332e-02 6.9820331e-02 7.5150667e-02 6.4021588e-02 5.2221754e-02 6.4043055e-02 6.6631628e-02 7.2118099e-02 6.0001482e-02 7.1017077e-02 7.4033042e-02 4.5299125e-02 5.3744092e-02 7.4268794e-02 6.0257552e-02 8.9540407e-02 6.2698279e-02 7.8447791e-02 5.4448716e-02 9.0401373e-02 7.4795629e-02 5.7546133e-02 5.6993841e-02 7.1300530e-02 7.5820775e-02 7.0342915e-02 4.5615196e-02 6.3638314e-02 5.9296597e-02 5.5885275e-02 9.6910282e-02 7.8117499e-02 6.0390425e-02 6.2700176e-02 7.9484269e-02 5.8304382e-02 6.9718471e-02 7.9589842e-02 6.8316048e-02 6.1785991e-02 5.4151233e-02 6.9205713e-02 5.8981505e-02 6.2496988e-02 5.9482310e-02 3.9268283e-02 6.1808750e-02 1.1700097e-01 1.0528100e-01 1.0062122e-01 1.0448643e-01 1.0962576e-01 1.1218593e-01 1.0740486e-01 1.0838294e-01 1.1564442e-01 9.5242357e-02 8.0565058e-02 1.0027898e-01 9.3976493e-02 1.1211413e-01 1.1185843e-01 9.2981806e-02 9.4878465e-02 9.5039709e-02 1.3246555e-01 1.0898490e-01 9.5760022e-02 1.0161372e-01 1.1802422e-01 8.8109745e-02 9.3746381e-02 9.3301915e-02 8.3669649e-02 8.2970713e-02 1.0958444e-01 9.1015393e-02 1.0506496e-01 8.2570171e-02 1.1114727e-01 8.7646381e-02 1.1324005e-01 9.9872205e-02 1.0076379e-01 9.4009317e-02 8.1526386e-02 8.7041367e-02 1.0052094e-01 8.2193042e-02 1.0528100e-01 1.0314067e-01 9.9984788e-02 9.0308392e-02 9.8939123e-02 8.8538901e-02 9.5061969e-02 9.3157351e-02 1.7227462e-04 7.9314599e-04 8.9844173e-04 8.9518090e-04 2.8880348e-03 2.3244520e-03 6.4717383e-04 8.8327223e-04 1.0385020e-03 6.4717383e-04 4.2082433e-05 2.2535838e-05 6.1632160e-04 7.2421116e-03 6.3599645e-04 2.4204146e-03 3.0244507e-03 7.7555952e-04 1.1490838e-03 1.6721205e-04 1.6116507e-04 5.3985478e-05 6.6593275e-02 6.9140456e-02 7.6991415e-02 8.7909276e-02 8.1079796e-02 8.7140283e-02 7.5972851e-02 6.2229264e-02 7.4340995e-02 7.8983123e-02 8.2638003e-02 7.1602020e-02 8.0355120e-02 8.5886853e-02 5.5471847e-02 6.3724100e-02 8.7131187e-02 7.0026001e-02 1.0150375e-01 7.2956045e-02 9.2179245e-02 6.4526163e-02 1.0277569e-01 8.5954479e-02 6.7618774e-02 6.7207904e-02 8.2005017e-02 8.8025831e-02 8.2391127e-02 5.4193907e-02 7.3937560e-02 6.8913785e-02 6.6018815e-02 1.1053450e-01 9.1432865e-02 7.2512462e-02 7.3622571e-02 9.0228640e-02 6.9559505e-02 8.1277678e-02 9.1538587e-02 7.9923144e-02 7.2198625e-02 6.3968343e-02 8.0855092e-02 6.9835377e-02 7.3807917e-02 6.9953549e-02 4.8370205e-02 7.2961708e-02 1.3388298e-01 1.2040855e-01 1.1476271e-01 1.1886295e-01 1.2510613e-01 1.2637598e-01 1.2310531e-01 1.2187125e-01 1.2970708e-01 1.1033742e-01 9.4233672e-02 1.1441687e-01 1.0810566e-01 1.2778743e-01 1.2861773e-01 1.0813836e-01 1.0864380e-01 1.0911947e-01 1.4755823e-01 1.2232653e-01 1.1049966e-01 1.1716689e-01 1.3196625e-01 1.0144926e-01 1.0821364e-01 1.0641029e-01 9.6993665e-02 9.6571345e-02 1.2478057e-01 1.0328932e-01 1.1842664e-01 9.5377765e-02 1.2666075e-01 1.0023484e-01 1.2682498e-01 1.1377681e-01 1.1674990e-01 1.0792126e-01 9.5167233e-02 1.0076949e-01 1.1586910e-01 9.6070623e-02 1.2040855e-01 1.1835687e-01 1.1566030e-01 1.0480317e-01 1.1289922e-01 1.0248125e-01 1.1065853e-01 1.0752770e-01 1.5518070e-03 1.3360426e-03 6.1855325e-04 3.8684575e-03 2.7981409e-03 7.1019942e-04 2.9249692e-04 3.8108588e-04 7.1019942e-04 3.4811331e-04 2.0628507e-04 6.8481588e-04 6.1413327e-03 1.2640084e-03 3.0810810e-03 4.5162171e-03 6.1490495e-04 2.0823475e-03 6.6391763e-04 4.5940617e-04 4.0824218e-05 6.8980444e-02 7.2049409e-02 7.9659630e-02 9.0518795e-02 8.3601945e-02 9.0707158e-02 7.9362725e-02 6.4838475e-02 7.6843708e-02 8.2367370e-02 8.4912050e-02 7.4621958e-02 8.2116380e-02 8.9210086e-02 5.7968714e-02 6.6009408e-02 9.1025554e-02 7.2793579e-02 1.0368926e-01 7.5498978e-02 9.6154006e-02 6.6776572e-02 1.0567953e-01 8.9202124e-02 6.9982422e-02 6.9528749e-02 8.4404562e-02 9.0968518e-02 8.5580442e-02 5.6059636e-02 7.6365493e-02 7.1188605e-02 6.8464233e-02 1.1430428e-01 9.5645657e-02 7.6165252e-02 7.6295454e-02 9.2254316e-02 7.2920975e-02 8.4113452e-02 9.5088632e-02 8.3209827e-02 7.4687466e-02 6.6271074e-02 8.4049660e-02 7.3195324e-02 7.7054222e-02 7.2597553e-02 5.0198701e-02 7.5958466e-02 1.3864088e-01 1.2443023e-01 1.1820696e-01 1.2296103e-01 1.2918980e-01 1.2996108e-01 1.2761885e-01 1.2545185e-01 1.3315049e-01 1.1429516e-01 9.7708479e-02 1.1783417e-01 1.1147502e-01 1.3162480e-01 1.3265015e-01 1.1194384e-01 1.1244164e-01 1.1326233e-01 1.5100180e-01 1.2549100e-01 1.1411167e-01 1.2131599e-01 1.3539430e-01 1.0451333e-01 1.1218678e-01 1.1003412e-01 1.0016268e-01 1.0019858e-01 1.2861591e-01 1.0655136e-01 1.2158527e-01 9.9029274e-02 1.3048234e-01 1.0368193e-01 1.3098775e-01 1.1671318e-01 1.2117898e-01 1.1194010e-01 9.8811276e-02 1.0398171e-01 1.1952683e-01 9.8904418e-02 1.2443023e-01 1.2231211e-01 1.1957927e-01 1.0792296e-01 1.1588914e-01 1.0590028e-01 1.1501774e-01 1.1169315e-01 2.7458883e-04 1.9079543e-03 3.8013798e-03 4.1957403e-03 8.9864077e-04 3.1780627e-03 3.4575366e-03 8.9864077e-04 6.1086528e-04 6.3429136e-04 2.2515700e-03 7.8191545e-03 1.2413621e-03 2.0952535e-03 1.3093150e-03 1.3748953e-03 1.2388833e-03 4.8114625e-04 1.1404059e-03 1.0969768e-03 5.5584260e-02 5.7510469e-02 6.4993010e-02 7.5368447e-02 6.9057379e-02 7.3499750e-02 6.3437246e-02 5.1298347e-02 6.2635149e-02 6.6332421e-02 7.0700793e-02 5.9790543e-02 6.9153134e-02 7.2596138e-02 4.5395419e-02 5.3097576e-02 7.3376441e-02 5.8208889e-02 8.8752390e-02 6.1292160e-02 7.8242185e-02 5.3906344e-02 8.8989869e-02 7.2610553e-02 5.6579969e-02 5.6297092e-02 6.9966539e-02 7.5157531e-02 6.9589413e-02 4.4676337e-02 6.2364995e-02 5.7806901e-02 5.5013010e-02 9.5419957e-02 7.7143847e-02 6.0075763e-02 6.1882505e-02 7.8193894e-02 5.7405077e-02 6.8884889e-02 7.7590521e-02 6.7073564e-02 6.0698980e-02 5.3256477e-02 6.8055547e-02 5.7544133e-02 6.1428351e-02 5.8436070e-02 3.9618229e-02 6.0914936e-02 1.1719371e-01 1.0478727e-01 9.9928933e-02 1.0301520e-01 1.0921839e-01 1.1065431e-01 1.0694189e-01 1.0626748e-01 1.1395294e-01 9.5522682e-02 8.0692317e-02 9.9640584e-02 9.3821909e-02 1.1210560e-01 1.1313776e-01 9.3722341e-02 9.3659306e-02 9.3792773e-02 1.3107147e-01 1.0721381e-01 9.5955259e-02 1.0180084e-01 1.1608360e-01 8.7786080e-02 9.3279943e-02 9.1618132e-02 8.3505928e-02 8.2622852e-02 1.0912386e-01 8.8978076e-02 1.0355009e-01 8.1236966e-02 1.1101306e-01 8.5950464e-02 1.1026035e-01 9.9640402e-02 1.0131013e-01 9.2768985e-02 8.1325904e-02 8.7087186e-02 1.0113076e-01 8.3368795e-02 1.0478727e-01 1.0299507e-01 1.0075649e-01 9.1267488e-02 9.8760345e-02 8.8473047e-02 9.5602739e-02 9.2388034e-02 1.3192990e-03 5.6049942e-03 5.6260410e-03 3.8255378e-04 2.7098321e-03 2.9260165e-03 3.8255378e-04 8.5873923e-04 6.4551657e-04 2.7466704e-03 5.2436473e-03 2.1741200e-03 2.6488612e-03 2.5599967e-03 7.2546074e-04 2.4454411e-03 9.4817109e-04 1.6251073e-03 9.8969141e-04 5.2867682e-02 5.5519966e-02 6.2241614e-02 7.2187515e-02 6.6023504e-02 7.1524895e-02 6.1872329e-02 4.9090525e-02 5.9694897e-02 6.4712456e-02 6.7154795e-02 5.7947928e-02 6.4972194e-02 7.0357979e-02 4.3573654e-02 5.0441816e-02 7.2068579e-02 5.5680796e-02 8.4591184e-02 5.8459303e-02 7.7051055e-02 5.1193288e-02 8.5601942e-02 7.0117829e-02 5.3803216e-02 5.3535941e-02 6.6621467e-02 7.2473708e-02 6.7439009e-02 4.1816308e-02 5.9366143e-02 5.4760331e-02 5.2431175e-02 9.2978431e-02 7.6152091e-02 5.9131458e-02 5.9321453e-02 7.4096463e-02 5.5985281e-02 6.6256786e-02 7.5362720e-02 6.5046998e-02 5.7885295e-02 5.0561408e-02 6.5880549e-02 5.5988437e-02 5.9621488e-02 5.5930673e-02 3.7267604e-02 5.8818934e-02 1.1596549e-01 1.0264359e-01 9.7070235e-02 1.0079872e-01 1.0704759e-01 1.0746690e-01 1.0547300e-01 1.0308827e-01 1.1044375e-01 9.3809882e-02 7.8755194e-02 9.6766957e-02 9.1194118e-02 1.0959636e-01 1.1126853e-01 9.1984794e-02 9.1378676e-02 9.2003603e-02 1.2714849e-01 1.0351897e-01 9.3695356e-02 1.0013450e-01 1.1244271e-01 8.4898325e-02 9.1456450e-02 8.9057498e-02 8.0952577e-02 8.0704372e-02 1.0658329e-01 8.5941483e-02 1.0000975e-01 7.9159793e-02 1.0848060e-01 8.3337337e-02 1.0760265e-01 9.6215589e-02 1.0020263e-01 9.0833970e-02 7.9520366e-02 8.4523201e-02 9.8885306e-02 8.0736144e-02 1.0264359e-01 1.0090595e-01 9.8957167e-02 8.8690158e-02 9.5447639e-02 8.6121379e-02 9.4585258e-02 9.0802578e-02 6.3934178e-03 4.8940589e-03 1.2286350e-03 9.0839358e-04 1.0654961e-03 1.2286350e-03 9.6426336e-04 7.9049403e-04 1.4335758e-03 3.9777866e-03 2.4355051e-03 2.0107867e-03 4.6162602e-03 1.1770386e-04 3.4806567e-03 1.4352123e-03 1.5474906e-03 6.2303871e-04 6.0828103e-02 6.3671616e-02 7.0896871e-02 8.0864907e-02 7.4298413e-02 8.2081782e-02 7.0784097e-02 5.7058421e-02 6.8417644e-02 7.3405222e-02 7.5846906e-02 6.5924537e-02 7.3406010e-02 8.0389200e-02 5.0100593e-02 5.7793778e-02 8.2174514e-02 6.5233957e-02 9.3014438e-02 6.7186690e-02 8.6645560e-02 5.8420934e-02 9.5569964e-02 8.0832485e-02 6.1703014e-02 6.1092740e-02 7.5364208e-02 8.1326732e-02 7.6506202e-02 4.8648267e-02 6.7850123e-02 6.3129543e-02 6.0279722e-02 1.0421037e-01 8.6793624e-02 6.7911943e-02 6.7612814e-02 8.2552902e-02 6.4996479e-02 7.4977087e-02 8.6383221e-02 7.4662447e-02 6.6198278e-02 5.8199721e-02 7.5325266e-02 6.5572089e-02 6.8824430e-02 6.4314329e-02 4.2494903e-02 6.7537432e-02 1.2698890e-01 1.1333529e-01 1.0720337e-01 1.1257147e-01 1.1782339e-01 1.1890376e-01 1.1671052e-01 1.1501886e-01 1.2195484e-01 1.0334755e-01 8.7523621e-02 1.0680449e-01 1.0051071e-01 1.1973768e-01 1.2022270e-01 1.0080013e-01 1.0231108e-01 1.0334772e-01 1.3872306e-01 1.1462175e-01 1.0296153e-01 1.1014449e-01 1.2423991e-01 9.3883877e-02 1.0176675e-01 1.0022558e-01 8.9772340e-02 9.0213298e-02 1.1713918e-01 9.6981953e-02 1.1075521e-01 8.9707194e-02 1.1871449e-01 9.4162640e-02 1.2118172e-01 1.0525415e-01 1.1008916e-01 1.0200908e-01 8.8850235e-02 9.3264552e-02 1.0788029e-01 8.7700151e-02 1.1333529e-01 1.1110284e-01 1.0804665e-01 9.6438203e-02 1.0447205e-01 9.5256431e-02 1.0425068e-01 1.0162139e-01 4.7487225e-04 5.5229901e-03 5.0553045e-03 5.3192416e-03 5.5229901e-03 2.6440955e-03 3.2948907e-03 2.1817756e-03 1.9232390e-02 1.0060949e-03 5.2893888e-03 3.6565664e-03 6.4838776e-03 7.5722530e-04 2.1347736e-03 1.7405562e-03 3.5508478e-03 8.4906111e-02 8.5684875e-02 9.5941240e-02 1.0863973e-01 1.0110388e-01 1.0421406e-01 9.1634377e-02 7.8911086e-02 9.3575504e-02 9.5023267e-02 1.0394314e-01 8.7940160e-02 1.0307352e-01 1.0372095e-01 7.0927620e-02 8.1814584e-02 1.0248405e-01 8.7682138e-02 1.2553590e-01 9.1822634e-02 1.0758552e-01 8.2848043e-02 1.2459059e-01 1.0427303e-01 8.6172494e-02 8.5771372e-02 1.0276107e-01 1.0743739e-01 1.0000507e-01 7.2180344e-02 9.3352407e-02 8.8118179e-02 8.3972299e-02 1.2999219e-01 1.0601815e-01 8.6245777e-02 9.1942699e-02 1.1341937e-01 8.4389659e-02 1.0016617e-01 1.0942619e-01 9.6927434e-02 9.1068024e-02 8.2112078e-02 9.8354950e-02 8.4918144e-02 8.9929197e-02 8.7860129e-02 6.4913483e-02 8.9916257e-02 1.5108513e-01 1.3966025e-01 1.3579363e-01 1.3800221e-01 1.4462965e-01 1.4853849e-01 1.4049017e-01 1.4366186e-01 1.5284247e-01 1.2813521e-01 1.1198929e-01 1.3549048e-01 1.2835377e-01 1.4847864e-01 1.4775011e-01 1.2601872e-01 1.2764409e-01 1.2670935e-01 1.7255531e-01 1.4567287e-01 1.2987249e-01 1.3506836e-01 1.5547286e-01 1.2211481e-01 1.2607603e-01 1.2598262e-01 1.1656479e-01 1.1426125e-01 1.4534374e-01 1.2395854e-01 1.4127350e-01 1.1320894e-01 1.4734853e-01 1.1969377e-01 1.4703899e-01 1.3646872e-01 1.3305975e-01 1.2588961e-01 1.1250545e-01 1.2058130e-01 1.3549735e-01 1.1610735e-01 1.3966025e-01 1.3745925e-01 1.3401818e-01 1.2501338e-01 1.3525796e-01 1.2173379e-01 1.2646805e-01 1.2458940e-01 5.1766813e-03 3.3038909e-03 3.5089109e-03 5.1766813e-03 2.1988415e-03 2.7782772e-03 1.0688451e-03 1.7030893e-02 8.9726872e-04 4.6231995e-03 4.6736672e-03 5.3382818e-03 1.1647725e-03 1.9758755e-03 1.2750935e-03 2.7128598e-03 8.7833448e-02 8.9010914e-02 9.9253982e-02 1.1183035e-01 1.0412107e-01 1.0885582e-01 9.5547284e-02 8.2028755e-02 9.6837094e-02 9.8811318e-02 1.0697473e-01 9.1242752e-02 1.0570232e-01 1.0798221e-01 7.3423587e-02 8.4439842e-02 1.0715501e-01 9.1521191e-02 1.2824419e-01 9.5124610e-02 1.1205287e-01 8.5389026e-02 1.2841576e-01 1.0878552e-01 8.9048303e-02 8.8474213e-02 1.0589810e-01 1.1091852e-01 1.0379001e-01 7.4413565e-02 9.6465402e-02 9.1132676e-02 8.6893608e-02 1.3487393e-01 1.1110410e-01 9.0320856e-02 9.5130800e-02 1.1613190e-01 8.8387927e-02 1.0357744e-01 1.1422195e-01 1.0103671e-01 9.4160592e-02 8.4870839e-02 1.0232030e-01 8.9160088e-02 9.3889889e-02 9.1104413e-02 6.6493231e-02 9.3508454e-02 1.5639991e-01 1.4440871e-01 1.3995325e-01 1.4327783e-01 1.4942424e-01 1.5327636e-01 1.4579380e-01 1.4864466e-01 1.5749945e-01 1.3242397e-01 1.1574188e-01 1.3959357e-01 1.3216131e-01 1.5281767e-01 1.5172347e-01 1.2991524e-01 1.3242882e-01 1.3190251e-01 1.7712896e-01 1.5002220e-01 1.3380600e-01 1.3964037e-01 1.6023126e-01 1.2562692e-01 1.3071698e-01 1.3077115e-01 1.2010966e-01 1.1841338e-01 1.4987755e-01 1.2848062e-01 1.4548859e-01 1.1783301e-01 1.5172346e-01 1.2426441e-01 1.5309487e-01 1.3983154e-01 1.3778354e-01 1.3093423e-01 1.1660453e-01 1.2409289e-01 1.3931059e-01 1.1865130e-01 1.4440871e-01 1.4196632e-01 1.3805465e-01 1.2801436e-01 1.3865562e-01 1.2553921e-01 1.3109454e-01 1.2958547e-01 1.5860612e-03 1.6773969e-03 0.0000000e+00 8.4337656e-04 4.6746407e-04 2.4549978e-03 4.7529836e-03 2.3235808e-03 4.0683267e-03 4.3260986e-03 6.7336618e-04 2.8454658e-03 1.0918918e-03 1.3756658e-03 5.7784546e-04 5.9573290e-02 6.3070670e-02 6.9597309e-02 7.9911457e-02 7.3480528e-02 7.9923883e-02 7.0144874e-02 5.5923876e-02 6.6635620e-02 7.3192589e-02 7.4096565e-02 6.5836734e-02 7.1022277e-02 7.8555696e-02 5.0423423e-02 5.7089619e-02 8.1093473e-02 6.2483167e-02 9.2251714e-02 6.5399221e-02 8.6573432e-02 5.7873871e-02 9.3861710e-02 7.7914479e-02 6.0545459e-02 6.0328179e-02 7.3725736e-02 8.0652769e-02 7.5662466e-02 4.7500835e-02 6.6267157e-02 6.1219907e-02 5.9246920e-02 1.0235525e-01 8.5584812e-02 6.7627185e-02 6.6676399e-02 8.1028522e-02 6.3874534e-02 7.4037098e-02 8.3735875e-02 7.3091049e-02 6.4875922e-02 5.7134332e-02 7.3898502e-02 6.3669341e-02 6.7483654e-02 6.3032151e-02 4.3195391e-02 6.6465430e-02 1.2757303e-01 1.1294171e-01 1.0654531e-01 1.1074736e-01 1.1756538e-01 1.1705185e-01 1.1633100e-01 1.1230305e-01 1.1988948e-01 1.0404710e-01 8.7981667e-02 1.0622547e-01 1.0061669e-01 1.2008497e-01 1.2242153e-01 1.0217012e-01 1.0084187e-01 1.0181343e-01 1.3714147e-01 1.1243585e-01 1.0356517e-01 1.1071692e-01 1.2181923e-01 9.3742899e-02 1.0137566e-01 9.8085445e-02 8.9840814e-02 8.9979544e-02 1.1682155e-01 9.4340727e-02 1.0893096e-01 8.8036209e-02 1.1887723e-01 9.1995894e-02 1.1718099e-01 1.0529554e-01 1.1115622e-01 1.0048991e-01 8.8823715e-02 9.3647258e-02 1.0909883e-01 8.9729548e-02 1.1294171e-01 1.1121254e-01 1.0947844e-01 9.8164553e-02 1.0458423e-01 9.5468337e-02 1.0529433e-01 1.0077315e-01 1.0959804e-05 1.5860612e-03 1.2066237e-03 9.8801305e-04 9.8752232e-04 6.0992006e-03 2.3026455e-03 4.3295194e-03 6.8464966e-03 1.1467011e-03 3.5017117e-03 1.7326793e-03 1.1803657e-03 5.4725577e-04 7.4912594e-02 7.8462634e-02 8.6080465e-02 9.7055695e-02 8.9913940e-02 9.8246196e-02 8.6359614e-02 7.0875411e-02 8.3096610e-02 8.9373603e-02 9.1095332e-02 8.1132274e-02 8.7784959e-02 9.6471287e-02 6.3584445e-02 7.1727351e-02 9.8741504e-02 7.9297358e-02 1.1001947e-01 8.1763635e-02 1.0392038e-01 7.2462318e-02 1.1283140e-01 9.6493172e-02 7.5904241e-02 7.5362083e-02 9.0691159e-02 9.7798442e-02 9.2550220e-02 6.1191529e-02 8.2519289e-02 7.7118501e-02 7.4418954e-02 1.2241546e-01 1.0373213e-01 8.3271349e-02 8.2617382e-02 9.8302252e-02 7.9806174e-02 9.0742202e-02 1.0274136e-01 9.0294649e-02 8.0841362e-02 7.2046648e-02 9.1054206e-02 8.0166072e-02 8.3952641e-02 7.8850217e-02 5.4962225e-02 8.2583735e-02 1.4771497e-01 1.3277361e-01 1.2596285e-01 1.3150244e-01 1.3764710e-01 1.3814813e-01 1.3643324e-01 1.3364709e-01 1.4127174e-01 1.2228442e-01 1.0501430e-01 1.2555100e-01 1.1896947e-01 1.3983185e-01 1.4080838e-01 1.1967635e-01 1.2050783e-01 1.2165153e-01 1.5932394e-01 1.3324590e-01 1.2180922e-01 1.2960297e-01 1.4356572e-01 1.1163860e-01 1.2028439e-01 1.1797002e-01 1.0728569e-01 1.0776505e-01 1.3685139e-01 1.1414077e-01 1.2924282e-01 1.0675126e-01 1.3867943e-01 1.1135088e-01 1.3991357e-01 1.2389778e-01 1.2963231e-01 1.2019755e-01 1.0634263e-01 1.1117491e-01 1.2727853e-01 1.0546335e-01 1.3277361e-01 1.3050496e-01 1.2753116e-01 1.1493738e-01 1.2310368e-01 1.1333277e-01 1.2330887e-01 1.1999903e-01 1.6773969e-03 1.4044930e-03 1.1476188e-03 1.1728210e-03 6.0850239e-03 2.5611449e-03 4.7720148e-03 7.3487783e-03 1.2981958e-03 3.8044610e-03 1.9624179e-03 1.3573639e-03 6.7050163e-04 7.5875383e-02 7.9591742e-02 8.7130965e-02 9.8136915e-02 9.0968214e-02 9.9474916e-02 8.7612410e-02 7.1886206e-02 8.4072967e-02 9.0657173e-02 9.2037661e-02 8.2322796e-02 8.8559519e-02 9.7661149e-02 6.4636714e-02 7.2692172e-02 1.0010765e-01 8.0266168e-02 1.1103798e-01 8.2745924e-02 1.0537406e-01 7.3430328e-02 1.1396572e-01 9.7599809e-02 7.6869874e-02 7.6340270e-02 9.1668858e-02 9.8974404e-02 9.3760611e-02 6.2005453e-02 8.3489209e-02 7.8020463e-02 7.5407355e-02 1.2375637e-01 1.0517097e-01 8.4594299e-02 8.3682974e-02 9.9214366e-02 8.1008204e-02 9.1862911e-02 1.0394615e-01 9.1478420e-02 8.1837398e-02 7.2994499e-02 9.2227696e-02 8.1322109e-02 8.5126430e-02 7.9879628e-02 5.5860810e-02 8.3714500e-02 1.4946173e-01 1.3427629e-01 1.2730802e-01 1.3293539e-01 1.3918009e-01 1.3947497e-01 1.3805161e-01 1.3491280e-01 1.4255824e-01 1.2381647e-01 1.0639120e-01 1.2689399e-01 1.2032986e-01 1.4134878e-01 1.4247553e-01 1.2120787e-01 1.2187456e-01 1.2309328e-01 1.6066767e-01 1.3444738e-01 1.2325830e-01 1.3118369e-01 1.4483072e-01 1.1290137e-01 1.2175315e-01 1.1925251e-01 1.0857610e-01 1.0914142e-01 1.3832403e-01 1.1530308e-01 1.3045824e-01 1.0804612e-01 1.4018015e-01 1.1257905e-01 1.4124338e-01 1.2516443e-01 1.3130183e-01 1.2160995e-01 1.0773181e-01 1.1250108e-01 1.2878318e-01 1.0678720e-01 1.3427629e-01 1.3201801e-01 1.2910620e-01 1.1632723e-01 1.2438544e-01 1.1469977e-01 1.2494953e-01 1.2148287e-01 8.4337656e-04 4.6746407e-04 2.4549978e-03 4.7529836e-03 2.3235808e-03 4.0683267e-03 4.3260986e-03 6.7336618e-04 2.8454658e-03 1.0918918e-03 1.3756658e-03 5.7784546e-04 5.9573290e-02 6.3070670e-02 6.9597309e-02 7.9911457e-02 7.3480528e-02 7.9923883e-02 7.0144874e-02 5.5923876e-02 6.6635620e-02 7.3192589e-02 7.4096565e-02 6.5836734e-02 7.1022277e-02 7.8555696e-02 5.0423423e-02 5.7089619e-02 8.1093473e-02 6.2483167e-02 9.2251714e-02 6.5399221e-02 8.6573432e-02 5.7873871e-02 9.3861710e-02 7.7914479e-02 6.0545459e-02 6.0328179e-02 7.3725736e-02 8.0652769e-02 7.5662466e-02 4.7500835e-02 6.6267157e-02 6.1219907e-02 5.9246920e-02 1.0235525e-01 8.5584812e-02 6.7627185e-02 6.6676399e-02 8.1028522e-02 6.3874534e-02 7.4037098e-02 8.3735875e-02 7.3091049e-02 6.4875922e-02 5.7134332e-02 7.3898502e-02 6.3669341e-02 6.7483654e-02 6.3032151e-02 4.3195391e-02 6.6465430e-02 1.2757303e-01 1.1294171e-01 1.0654531e-01 1.1074736e-01 1.1756538e-01 1.1705185e-01 1.1633100e-01 1.1230305e-01 1.1988948e-01 1.0404710e-01 8.7981667e-02 1.0622547e-01 1.0061669e-01 1.2008497e-01 1.2242153e-01 1.0217012e-01 1.0084187e-01 1.0181343e-01 1.3714147e-01 1.1243585e-01 1.0356517e-01 1.1071692e-01 1.2181923e-01 9.3742899e-02 1.0137566e-01 9.8085445e-02 8.9840814e-02 8.9979544e-02 1.1682155e-01 9.4340727e-02 1.0893096e-01 8.8036209e-02 1.1887723e-01 9.1995894e-02 1.1718099e-01 1.0529554e-01 1.1115622e-01 1.0048991e-01 8.8823715e-02 9.3647258e-02 1.0909883e-01 8.9729548e-02 1.1294171e-01 1.1121254e-01 1.0947844e-01 9.8164553e-02 1.0458423e-01 9.5468337e-02 1.0529433e-01 1.0077315e-01 6.2742331e-05 5.7500583e-04 7.7277880e-03 4.4752900e-04 1.9151463e-03 2.3881652e-03 8.6221368e-04 8.6997251e-04 5.7712764e-05 1.4261239e-04 1.6337476e-04 6.5357511e-02 6.7564930e-02 7.5599895e-02 8.6482524e-02 7.9693502e-02 8.5392044e-02 7.4152863e-02 6.0880360e-02 7.3094884e-02 7.7108408e-02 8.1483729e-02 6.9905750e-02 7.9537736e-02 8.4220614e-02 5.4020845e-02 6.2478782e-02 8.5095725e-02 6.8776984e-02 1.0023818e-01 7.1693803e-02 8.9974847e-02 6.3277271e-02 1.0126704e-01 8.4456284e-02 6.6380508e-02 6.5944304e-02 8.0776371e-02 8.6401760e-02 8.0678654e-02 5.3232214e-02 7.2705055e-02 6.7807498e-02 6.4728976e-02 1.0860574e-01 8.9250516e-02 7.0532132e-02 7.2192370e-02 8.9155393e-02 6.7825786e-02 7.9751674e-02 8.9848634e-02 7.8255825e-02 7.0908403e-02 6.2757363e-02 7.9213209e-02 6.8195297e-02 7.2146158e-02 6.8587288e-02 4.7220467e-02 7.1390990e-02 1.3114639e-01 1.1816523e-01 1.1284157e-01 1.1675611e-01 1.2280854e-01 1.2450964e-01 1.2061835e-01 1.2011904e-01 1.2793055e-01 1.0801409e-01 9.2203467e-02 1.1250005e-01 1.0614185e-01 1.2553294e-01 1.2604996e-01 1.0581394e-01 1.0665851e-01 1.0697400e-01 1.4569576e-01 1.2071369e-01 1.0835453e-01 1.1475367e-01 1.3023667e-01 9.9676180e-02 1.0601570e-01 1.0459446e-01 9.5152798e-02 9.4544535e-02 1.2261107e-01 1.0171679e-01 1.1677938e-01 9.3513090e-02 1.2443770e-01 9.8519974e-02 1.2493623e-01 1.1202254e-01 1.1414514e-01 1.0583616e-01 9.3110547e-02 9.8863035e-02 1.1361961e-01 9.4163495e-02 1.1816523e-01 1.1608967e-01 1.1325987e-01 1.0277556e-01 1.1111172e-01 1.0049112e-01 1.0810161e-01 1.0529258e-01 8.3201047e-04 6.6553558e-03 8.0817319e-04 2.3666253e-03 2.9401972e-03 6.0430641e-04 1.3109323e-03 2.0042209e-04 2.9042771e-04 6.4823857e-05 6.4369752e-02 6.6980839e-02 7.4622998e-02 8.5371818e-02 7.8644884e-02 8.4714120e-02 7.3778054e-02 6.0124143e-02 7.1976215e-02 7.6757894e-02 8.0112003e-02 6.9445381e-02 7.7800626e-02 8.3451231e-02 5.3558729e-02 6.1563285e-02 8.4824350e-02 6.7739660e-02 9.8727492e-02 7.0619990e-02 8.9865595e-02 6.2353086e-02 1.0002589e-01 8.3464224e-02 6.5377675e-02 6.4985443e-02 7.9506921e-02 8.5546781e-02 8.0035925e-02 5.2147031e-02 7.1577598e-02 6.6610625e-02 6.3822994e-02 1.0780225e-01 8.9119904e-02 7.0461434e-02 7.1328305e-02 8.7570443e-02 6.7451743e-02 7.8878318e-02 8.9018945e-02 7.7592264e-02 6.9886540e-02 6.1789581e-02 7.8498655e-02 6.7684600e-02 7.1587759e-02 6.7704325e-02 4.6526712e-02 7.0724596e-02 1.3117528e-01 1.1766016e-01 1.1197322e-01 1.1607657e-01 1.2231416e-01 1.2340084e-01 1.2042522e-01 1.1891967e-01 1.2666041e-01 1.0778594e-01 9.1812847e-02 1.1163169e-01 1.0543690e-01 1.2494830e-01 1.2593147e-01 1.0563286e-01 1.0596141e-01 1.0649455e-01 1.4431904e-01 1.1933022e-01 1.0786987e-01 1.1454955e-01 1.2887601e-01 9.8812977e-02 1.0562884e-01 1.0369928e-01 9.4451580e-02 9.4102215e-02 1.2194237e-01 1.0054711e-01 1.1549365e-01 9.2857172e-02 1.2382256e-01 9.7583400e-02 1.2385907e-01 1.1095947e-01 1.1423829e-01 1.0528942e-01 9.2735747e-02 9.8196145e-02 1.1321168e-01 9.3608756e-02 1.1766016e-01 1.1565299e-01 1.1307354e-01 1.0223904e-01 1.1010492e-01 9.9908851e-02 1.0821954e-01 1.0496782e-01 9.9295890e-03 5.3473138e-04 2.1693539e-03 3.7076821e-03 1.8222092e-03 1.2380588e-03 7.0429726e-04 3.2576994e-04 6.7027380e-04 7.5169669e-02 7.7062740e-02 8.6033367e-02 9.7417771e-02 9.0184218e-02 9.6534643e-02 8.3912481e-02 7.0270670e-02 8.3626528e-02 8.6844061e-02 9.2545497e-02 7.9257265e-02 9.0777176e-02 9.5235076e-02 6.2208659e-02 7.1858472e-02 9.5535449e-02 7.9390740e-02 1.1189093e-01 8.2130752e-02 1.0013922e-01 7.2643917e-02 1.1330079e-01 9.5998078e-02 7.6221141e-02 7.5580594e-02 9.1730689e-02 9.7114882e-02 9.1048046e-02 6.2224749e-02 8.3137120e-02 7.8094271e-02 7.4382594e-02 1.2083561e-01 9.9831872e-02 7.9711043e-02 8.2237066e-02 1.0057287e-01 7.7408745e-02 9.0227459e-02 1.0148848e-01 8.8790024e-02 8.1095637e-02 7.2322440e-02 8.9773416e-02 7.8184960e-02 8.2186791e-02 7.8567440e-02 5.4863235e-02 8.1347337e-02 1.4275245e-01 1.3005259e-01 1.2482271e-01 1.2925722e-01 1.3482920e-01 1.3759096e-01 1.3236924e-01 1.3338627e-01 1.4130888e-01 1.1880967e-01 1.0247320e-01 1.2443347e-01 1.1741517e-01 1.3747488e-01 1.3688510e-01 1.1618864e-01 1.1858835e-01 1.1879679e-01 1.5963135e-01 1.3387511e-01 1.1938532e-01 1.2588037e-01 1.4388488e-01 1.1083228e-01 1.1728514e-01 1.1680010e-01 1.0591970e-01 1.0525056e-01 1.3476053e-01 1.1410719e-01 1.2959057e-01 1.0487194e-01 1.3643100e-01 1.1046993e-01 1.3880755e-01 1.2375874e-01 1.2479172e-01 1.1764923e-01 1.0361548e-01 1.0965651e-01 1.2456830e-01 1.0392711e-01 1.3005259e-01 1.2763609e-01 1.2394348e-01 1.1308481e-01 1.2275352e-01 1.1138648e-01 1.1846972e-01 1.1666230e-01 1.1839370e-02 9.5901133e-03 1.3740734e-02 3.5338001e-03 1.3550011e-02 8.8526707e-03 9.4699409e-03 6.3350154e-03 4.8467987e-02 5.3749987e-02 5.7717889e-02 6.5676566e-02 6.0050880e-02 7.0693363e-02 6.1877604e-02 4.6729706e-02 5.4665912e-02 6.4242849e-02 5.9590174e-02 5.6493218e-02 5.5132102e-02 6.8254515e-02 4.1974046e-02 4.5979775e-02 7.3375693e-02 5.2912711e-02 7.3889063e-02 5.3879214e-02 7.8254066e-02 4.6403740e-02 7.8645675e-02 6.7578949e-02 4.9098308e-02 4.8671019e-02 5.9827034e-02 6.7861186e-02 6.5136324e-02 3.6759287e-02 5.3980965e-02 4.9365653e-02 4.8462917e-02 9.0010105e-02 7.8830145e-02 6.1496889e-02 5.5380536e-02 6.4107134e-02 5.6871111e-02 6.2034296e-02 7.3759034e-02 6.3674900e-02 5.3119604e-02 4.6143136e-02 6.3811294e-02 5.6759011e-02 5.9030859e-02 5.2419966e-02 3.3258744e-02 5.6880106e-02 1.1655832e-01 1.0005043e-01 9.1684275e-02 9.8727002e-02 1.0425490e-01 1.0138126e-01 1.0565129e-01 9.7538138e-02 1.0314095e-01 9.2386824e-02 7.6571607e-02 9.1274575e-02 8.6336607e-02 1.0505203e-01 1.0829773e-01 9.0077109e-02 8.8858206e-02 9.1549427e-02 1.1781623e-01 9.5530481e-02 9.0068051e-02 9.8963246e-02 1.0479240e-01 7.9118056e-02 9.0207341e-02 8.5766485e-02 7.6442113e-02 7.8991975e-02 1.0229780e-01 8.0971468e-02 9.2462279e-02 7.7616682e-02 1.0394880e-01 7.9854175e-02 1.0493060e-01 8.8086863e-02 1.0105189e-01 8.9771099e-02 7.8157448e-02 7.9782471e-02 9.4953430e-02 7.4768636e-02 1.0005043e-01 9.8253215e-02 9.6744777e-02 8.3113340e-02 8.7744069e-02 8.2353939e-02 9.5830913e-02 9.0799350e-02 2.1341759e-03 1.9110407e-03 2.4747400e-03 1.4759127e-04 2.6111423e-04 2.1389787e-04 9.9338048e-04 7.1511570e-02 7.2884660e-02 8.1954557e-02 9.3478761e-02 8.6419800e-02 9.0995738e-02 7.9065079e-02 6.6366318e-02 7.9643396e-02 8.2112775e-02 8.8849792e-02 7.5066735e-02 8.7600830e-02 9.0111864e-02 5.8883625e-02 6.8498056e-02 8.9863635e-02 7.4803608e-02 1.0853136e-01 7.8097142e-02 9.4627165e-02 6.9370599e-02 1.0871906e-01 9.0676026e-02 7.2617660e-02 7.2144076e-02 8.7900274e-02 9.2810262e-02 8.6387249e-02 5.9335633e-02 7.9309675e-02 7.4397587e-02 7.0710353e-02 1.1504320e-01 9.3692385e-02 7.4629165e-02 7.8263498e-02 9.7248749e-02 7.2487054e-02 8.6011897e-02 9.5823155e-02 8.3806772e-02 7.7264691e-02 6.8838121e-02 8.4948991e-02 7.3047820e-02 7.7341294e-02 7.4548923e-02 5.2558496e-02 7.6909140e-02 1.3627575e-01 1.2430643e-01 1.1978148e-01 1.2301688e-01 1.2902659e-01 1.3200226e-01 1.2597225e-01 1.2757685e-01 1.3584109e-01 1.1348127e-01 9.7761057e-02 1.1945082e-01 1.1270375e-01 1.3215934e-01 1.3182973e-01 1.1125522e-01 1.1287096e-01 1.1260483e-01 1.5425448e-01 1.2876341e-01 1.1448536e-01 1.2024528e-01 1.3833407e-01 1.0647529e-01 1.1163750e-01 1.1113351e-01 1.0149473e-01 1.0013701e-01 1.2927004e-01 1.0879101e-01 1.2459621e-01 9.9330508e-02 1.3108770e-01 1.0504760e-01 1.3186075e-01 1.1958837e-01 1.1892921e-01 1.1162833e-01 9.8541878e-02 1.0525098e-01 1.1976536e-01 1.0049727e-01 1.2430643e-01 1.2212353e-01 1.1885667e-01 1.0916600e-01 1.1853397e-01 1.0665478e-01 1.1270995e-01 1.1063603e-01 1.5750019e-03 2.3061943e-03 2.5267586e-03 1.8816551e-03 2.4916769e-03 2.6490348e-03 5.6749564e-02 5.7171493e-02 6.5939847e-02 7.5932398e-02 6.9508351e-02 7.4656451e-02 6.2624290e-02 5.2024188e-02 6.4437230e-02 6.4937043e-02 7.2745199e-02 5.8611781e-02 7.2652586e-02 7.3573688e-02 4.4171072e-02 5.3594578e-02 7.2641168e-02 6.1050608e-02 8.9566976e-02 6.3051224e-02 7.6010398e-02 5.4225638e-02 9.0321512e-02 7.5195114e-02 5.7639533e-02 5.6852233e-02 7.1710391e-02 7.4949900e-02 6.9318916e-02 4.6230253e-02 6.3971485e-02 6.0026157e-02 5.5798348e-02 9.5989279e-02 7.6232404e-02 5.8444003e-02 6.2302257e-02 8.0224536e-02 5.7278169e-02 6.9150783e-02 7.9467477e-02 6.7704503e-02 6.1864652e-02 5.4241616e-02 6.8592620e-02 5.8516944e-02 6.1759106e-02 5.9360348e-02 3.8591759e-02 6.1151978e-02 1.1324556e-01 1.0303819e-01 9.9201558e-02 1.0318500e-01 1.0721879e-01 1.1147036e-01 1.0460948e-01 1.0828938e-01 1.1523751e-01 9.2280249e-02 7.8300492e-02 9.8832112e-02 9.2088327e-02 1.0954453e-01 1.0774163e-01 8.9704304e-02 9.3649245e-02 9.3477002e-02 1.3171389e-01 1.0896793e-01 9.3248418e-02 9.8534682e-02 1.1788866e-01 8.6730988e-02 9.1550165e-02 9.2743651e-02 8.2023117e-02 8.1036634e-02 1.0749375e-01 9.1211084e-02 1.0479473e-01 8.1639608e-02 1.0872731e-01 8.7298316e-02 1.1342725e-01 9.8507274e-02 9.7018335e-02 9.2572028e-02 7.9424005e-02 8.5123740e-02 9.7519007e-02 7.9504622e-02 1.0303819e-01 1.0060889e-01 9.6540999e-02 8.7528391e-02 9.7464514e-02 8.6517419e-02 9.1407615e-02 9.1076579e-02 4.2759888e-03 1.3998823e-03 1.8495751e-03 2.8301386e-03 3.7334484e-03 5.5009109e-02 5.4964037e-02 6.3820777e-02 7.4331569e-02 6.8062522e-02 7.0406681e-02 5.9595322e-02 4.9880526e-02 6.2237804e-02 6.2265151e-02 7.1113130e-02 5.6594278e-02 7.1451534e-02 6.9994053e-02 4.3126676e-02 5.2381128e-02 6.8520339e-02 5.7597897e-02 8.9058557e-02 6.0751652e-02 7.2510768e-02 5.3201106e-02 8.7849614e-02 7.0985642e-02 5.6028788e-02 5.5591108e-02 6.9920676e-02 7.2931234e-02 6.6633349e-02 4.5295849e-02 6.2044642e-02 5.8063752e-02 5.4097801e-02 9.1903182e-02 7.1473709e-02 5.5091248e-02 6.0376704e-02 7.9310761e-02 5.3854777e-02 6.7042056e-02 7.4968513e-02 6.4284932e-02 5.9983090e-02 5.2745182e-02 6.5463515e-02 5.4553279e-02 5.8476643e-02 5.7183259e-02 3.8909009e-02 5.8515093e-02 1.0925308e-01 9.9624317e-02 9.6622305e-02 9.8607255e-02 1.0383961e-01 1.0792916e-01 1.0029496e-01 1.0408880e-01 1.1184636e-01 8.9608429e-02 7.6157674e-02 9.6354194e-02 9.0095873e-02 1.0708448e-01 1.0618312e-01 8.7751513e-02 8.9756663e-02 8.8888346e-02 1.2885985e-01 1.0591974e-01 9.1173391e-02 9.5499762e-02 1.1429851e-01 8.5048829e-02 8.8069568e-02 8.8675858e-02 8.0228990e-02 7.8160661e-02 1.0454059e-01 8.7419578e-02 1.0196835e-01 7.7696889e-02 1.0615601e-01 8.3458946e-02 1.0727792e-01 9.7348785e-02 9.3770662e-02 8.8214646e-02 7.6638467e-02 8.3519036e-02 9.5849316e-02 7.9674217e-02 9.9624317e-02 9.7643485e-02 9.4514516e-02 8.7075933e-02 9.6245490e-02 8.4428758e-02 8.8195121e-02 8.6900395e-02 3.3656636e-03 1.2813341e-03 1.5508786e-03 5.5126016e-04 5.7906981e-02 6.0932190e-02 6.7790041e-02 7.7661589e-02 7.1242410e-02 7.8649743e-02 6.7963400e-02 5.4275670e-02 6.5215187e-02 7.0651008e-02 7.2495968e-02 6.3298898e-02 6.9915496e-02 7.7039648e-02 4.7868354e-02 5.5074415e-02 7.9095397e-02 6.1872293e-02 8.9630734e-02 6.4010779e-02 8.3786653e-02 5.5727300e-02 9.1943867e-02 7.7179306e-02 5.8787276e-02 5.8290887e-02 7.2059853e-02 7.8226343e-02 7.3481150e-02 4.5978845e-02 6.4704253e-02 5.9980041e-02 5.7435326e-02 1.0050646e-01 8.3655459e-02 6.5308029e-02 6.4668898e-02 7.9130910e-02 6.2159352e-02 7.1908147e-02 8.2739305e-02 7.1488143e-02 6.3159434e-02 5.5376396e-02 7.2163727e-02 6.2508559e-02 6.5824794e-02 6.1340761e-02 4.0472995e-02 6.4599849e-02 1.2380425e-01 1.0993395e-01 1.0373859e-01 1.0879322e-01 1.1440237e-01 1.1495444e-01 1.1335001e-01 1.1089530e-01 1.1789001e-01 1.0041407e-01 8.4708778e-02 1.0336455e-01 9.7357032e-02 1.1642404e-01 1.1748667e-01 9.8081847e-02 9.8747663e-02 9.9805801e-02 1.3456454e-01 1.1060300e-01 9.9947092e-02 1.0709744e-01 1.2004751e-01 9.0729860e-02 9.8544296e-02 9.6489030e-02 8.6759859e-02 8.7175826e-02 1.1367288e-01 9.3122372e-02 1.0688813e-01 8.6280635e-02 1.1536030e-01 9.0496365e-02 1.1670380e-01 1.0195807e-01 1.0724940e-01 9.8472199e-02 8.5899722e-02 9.0288199e-02 1.0497525e-01 8.5251170e-02 1.0993395e-01 1.0787024e-01 1.0524763e-01 9.3789157e-02 1.0121309e-01 9.2226653e-02 1.0148816e-01 9.8306001e-02 5.0782435e-04 6.2020689e-04 1.6756986e-03 7.0476887e-02 7.1501157e-02 8.0709837e-02 9.2336294e-02 8.5349594e-02 8.8954923e-02 7.7301298e-02 6.5155686e-02 7.8476830e-02 8.0402665e-02 8.7884855e-02 7.3650267e-02 8.6974315e-02 8.8299731e-02 5.7895678e-02 6.7595783e-02 8.7665774e-02 7.3314500e-02 1.0776923e-01 7.6895020e-02 9.2474273e-02 6.8514181e-02 1.0728576e-01 8.8812805e-02 7.1614033e-02 7.1214401e-02 8.6845407e-02 9.1428310e-02 8.4778392e-02 5.8702706e-02 7.8223013e-02 7.3389903e-02 6.9650470e-02 1.1290739e-01 9.1229062e-02 7.2678536e-02 7.7045799e-02 9.6518176e-02 7.0683794e-02 8.4677029e-02 9.3753287e-02 8.2039996e-02 7.6152676e-02 6.7885263e-02 8.3273481e-02 7.1166737e-02 7.5618620e-02 7.3311647e-02 5.2129744e-02 7.5414559e-02 1.3361288e-01 1.2212885e-01 1.1803753e-01 1.2062983e-01 1.2681873e-01 1.3003785e-01 1.2339855e-01 1.2552093e-01 1.3397302e-01 1.1145011e-01 9.6072866e-02 1.1773621e-01 1.1108813e-01 1.3021606e-01 1.2991222e-01 1.0941117e-01 1.1074904e-01 1.1019635e-01 1.5245649e-01 1.2709803e-01 1.1272646e-01 1.1805571e-01 1.3644429e-01 1.0507143e-01 1.0948152e-01 1.0908044e-01 1.0002018e-01 9.8259510e-02 1.2725753e-01 1.0697349e-01 1.2296793e-01 9.7285465e-02 1.2913723e-01 1.0312519e-01 1.2920697e-01 1.1832381e-01 1.1655849e-01 1.0932054e-01 9.6669076e-02 1.0377916e-01 1.1803847e-01 9.9488230e-02 1.2212885e-01 1.2004765e-01 1.1693776e-01 1.0790940e-01 1.1723226e-01 1.0500092e-01 1.1038445e-01 1.0827603e-01 1.5232780e-04 4.0007181e-04 6.5289213e-02 6.7204649e-02 7.5443787e-02 8.6444762e-02 7.9660759e-02 8.4749914e-02 7.3546883e-02 6.0641799e-02 7.3012359e-02 7.6531840e-02 8.1598668e-02 6.9499172e-02 7.9923854e-02 8.3724381e-02 5.3799024e-02 6.2453508e-02 8.4253942e-02 6.8486788e-02 1.0054294e-01 7.1576362e-02 8.9122210e-02 6.3281093e-02 1.0114322e-01 8.3993466e-02 6.6335786e-02 6.5925957e-02 8.0814161e-02 8.6165539e-02 8.0247902e-02 5.3368917e-02 7.2666724e-02 6.7816556e-02 6.4623203e-02 1.0800053e-01 8.8235002e-02 6.9728892e-02 7.2010954e-02 8.9470933e-02 6.7181463e-02 7.9529500e-02 8.9243318e-02 7.7739806e-02 7.0823200e-02 6.2720078e-02 7.8762930e-02 6.7548169e-02 7.1608713e-02 6.8390856e-02 4.7339693e-02 7.1004341e-02 1.3006516e-01 1.1747357e-01 1.1247378e-01 1.1599806e-01 1.2210677e-01 1.2411360e-01 1.1962941e-01 1.1968938e-01 1.2763702e-01 1.0730056e-01 9.1685935e-02 1.1214725e-01 1.0578284e-01 1.2500364e-01 1.2540001e-01 1.0518188e-01 1.0602704e-01 1.0612761e-01 1.4550231e-01 1.2054520e-01 1.0786099e-01 1.1396219e-01 1.2996046e-01 9.9460763e-02 1.0527691e-01 1.0405039e-01 9.4844256e-02 9.3945852e-02 1.2206303e-01 1.0136794e-01 1.1659636e-01 9.2882126e-02 1.2391115e-01 9.8044098e-02 1.2415930e-01 1.1196299e-01 1.1316845e-01 1.0506061e-01 9.2491851e-02 9.8554383e-02 1.1313163e-01 9.4057839e-02 1.1747357e-01 1.1542880e-01 1.1260458e-01 1.0255212e-01 1.1101490e-01 1.0006494e-01 1.0712947e-01 1.0442482e-01 3.2637736e-04 7.1228332e-02 7.3360309e-02 8.1848237e-02 9.3197783e-02 8.6156854e-02 9.1733116e-02 8.0052769e-02 6.6468809e-02 7.9277125e-02 8.3130576e-02 8.8073461e-02 7.5751270e-02 8.6114214e-02 9.0610516e-02 5.9264749e-02 6.8242422e-02 9.1255768e-02 7.4667198e-02 1.0756842e-01 7.7803425e-02 9.6260495e-02 6.9087389e-02 1.0845861e-01 9.0870006e-02 7.2305626e-02 7.1861030e-02 8.7311178e-02 9.3009359e-02 8.6960677e-02 5.8641082e-02 7.8892851e-02 7.3809649e-02 7.0554233e-02 1.1577001e-01 9.5424873e-02 7.6110400e-02 7.8288785e-02 9.6097634e-02 7.3451146e-02 8.6123272e-02 9.6377446e-02 8.4403314e-02 7.7003085e-02 6.8529959e-02 8.5437768e-02 7.3849362e-02 7.8032414e-02 7.4531055e-02 5.2299301e-02 7.7339214e-02 1.3853845e-01 1.2553252e-01 1.2026484e-01 1.2407451e-01 1.3030382e-01 1.3228892e-01 1.2783055e-01 1.2774776e-01 1.3586522e-01 1.1498861e-01 9.8800101e-02 1.1991948e-01 1.1333278e-01 1.3320266e-01 1.3352807e-01 1.1273476e-01 1.1373642e-01 1.1391093e-01 1.5417322e-01 1.2849633e-01 1.1550662e-01 1.2189074e-01 1.3824515e-01 1.0674666e-01 1.1296262e-01 1.1166908e-01 1.0200786e-01 1.0119861e-01 1.3020916e-01 1.0880524e-01 1.2443747e-01 1.0015009e-01 1.3208523e-01 1.0543261e-01 1.3248948e-01 1.1957063e-01 1.2107326e-01 1.1278773e-01 9.9690412e-02 1.0583253e-01 1.2090813e-01 1.0100447e-01 1.2553252e-01 1.2339293e-01 1.2039941e-01 1.0985773e-01 1.1861026e-01 1.0744824e-01 1.1483872e-01 1.1213527e-01 6.6878313e-02 6.9715409e-02 7.7364528e-02 8.8171063e-02 8.1336125e-02 8.7984980e-02 7.6788970e-02 6.2680317e-02 7.4638540e-02 7.9773718e-02 8.2744330e-02 7.2225777e-02 8.0191157e-02 8.6590874e-02 5.5913229e-02 6.3969698e-02 8.8177048e-02 7.0519412e-02 1.0143390e-01 7.3287383e-02 9.3242485e-02 6.4744474e-02 1.0311960e-01 8.6619094e-02 6.7881619e-02 6.7447267e-02 8.2187438e-02 8.8483348e-02 8.3036483e-02 5.4275541e-02 7.4193034e-02 6.9119612e-02 6.6341718e-02 1.1134910e-01 9.2648103e-02 7.3522039e-02 7.4021876e-02 9.0141442e-02 7.0408618e-02 8.1718494e-02 9.2345175e-02 8.0646828e-02 7.2501162e-02 6.4223536e-02 8.1515678e-02 7.0682605e-02 7.4552513e-02 7.0364536e-02 4.8478235e-02 7.3560525e-02 1.3517901e-01 1.2131760e-01 1.1535330e-01 1.1982809e-01 1.2602646e-01 1.2698846e-01 1.2430464e-01 1.2251062e-01 1.3021845e-01 1.1127258e-01 9.4973617e-02 1.1499323e-01 1.0869119e-01 1.2854331e-01 1.2950169e-01 1.0899747e-01 1.0948608e-01 1.1017110e-01 1.4797664e-01 1.2271397e-01 1.1121974e-01 1.1817834e-01 1.3245873e-01 1.0189203e-01 1.0916690e-01 1.0716280e-01 9.7527876e-02 9.7386912e-02 1.2555051e-01 1.0384747e-01 1.1883003e-01 9.6216108e-02 1.2741273e-01 1.0091958e-01 1.2779606e-01 1.1407020e-01 1.1794445e-01 1.0890370e-01 9.6003260e-02 1.0130628e-01 1.1658801e-01 9.6417670e-02 1.2131760e-01 1.1923852e-01 1.1654354e-01 1.0526503e-01 1.1322934e-01 1.0313209e-01 1.1184758e-01 1.0860395e-01 7.4548764e-04 4.1909104e-04 1.5729317e-03 7.9277531e-04 2.5985333e-03 2.1157871e-03 2.6363318e-04 2.7214324e-04 2.5006701e-03 1.1999234e-03 1.3641574e-03 2.4228657e-03 1.9193080e-03 1.5580763e-03 9.1547441e-05 4.2498838e-03 5.1587623e-04 4.4451535e-03 1.9710136e-04 5.9468529e-03 1.1637586e-04 4.0879090e-03 1.9577278e-03 7.5438506e-06 5.1321994e-05 9.5746588e-04 1.8362275e-03 1.6353958e-03 8.5567943e-04 2.3534169e-04 2.0155706e-04 2.8744085e-05 6.6464816e-03 6.0500135e-03 3.7609984e-03 3.0183871e-04 2.6908058e-03 1.8208811e-03 9.2994693e-04 3.0287331e-03 1.4489077e-03 1.1766146e-04 3.4058119e-05 1.3255619e-03 1.5104843e-03 1.2375803e-03 1.2042792e-04 2.2770079e-03 7.0925866e-04 1.7863102e-02 1.0250835e-02 7.3485693e-03 9.3316539e-03 1.1683662e-02 1.0169272e-02 1.2909901e-02 9.0217168e-03 1.1005819e-02 9.1383359e-03 4.6331293e-03 7.2658380e-03 6.2625155e-03 1.2163934e-02 1.5604510e-02 9.0849374e-03 6.4499430e-03 7.6430735e-03 1.6734199e-02 8.8633570e-03 7.8095179e-03 1.1083442e-02 1.1720144e-02 4.3192019e-03 7.6217573e-03 5.4833250e-03 3.8602370e-03 4.7396014e-03 1.0813609e-02 4.4772013e-03 7.7667524e-03 3.8237256e-03 1.1654961e-02 4.0471444e-03 1.1640132e-02 6.9842505e-03 1.3199398e-02 6.9808241e-03 4.8195688e-03 4.8204301e-03 9.7963275e-03 5.3012133e-03 1.0250835e-02 1.0025463e-02 1.1016974e-02 6.8718627e-03 6.8391566e-03 5.4173007e-03 1.1780352e-02 7.9156890e-03 7.3587704e-04 1.9839586e-03 1.2219042e-03 1.5405721e-03 4.1726993e-04 3.7297590e-04 1.1835574e-03 6.1737386e-04 2.7177989e-03 1.1395438e-04 5.3634867e-03 1.2095802e-03 9.5552631e-04 7.0211590e-04 1.8297765e-03 1.3098829e-03 5.8522900e-03 1.0011765e-03 2.8317516e-03 7.2859483e-04 4.4710316e-03 2.0244176e-03 7.6733040e-04 6.7685529e-04 2.1301657e-03 1.3518842e-03 6.1205846e-04 2.4644310e-03 1.1938153e-03 1.5975309e-03 5.1076446e-04 5.2791382e-03 3.0339660e-03 1.1907842e-03 3.5011674e-04 4.7257393e-03 3.5465173e-04 7.3647426e-04 2.3478030e-03 5.8214999e-04 7.6861523e-04 8.1449121e-04 5.9893306e-04 4.9750178e-04 2.3193980e-04 3.6882491e-04 2.8017975e-03 9.9802686e-05 1.3102890e-02 7.5969761e-03 6.0828162e-03 7.3064415e-03 8.8657957e-03 9.3996859e-03 9.1529212e-03 8.6958624e-03 1.0707084e-02 5.8653516e-03 2.3848099e-03 6.0110419e-03 4.6435179e-03 9.5841132e-03 1.1495876e-02 5.7351815e-03 4.7762002e-03 5.2279685e-03 1.6343054e-02 9.1684398e-03 5.3835136e-03 7.5338144e-03 1.1677909e-02 3.4411805e-03 4.9237859e-03 4.5530879e-03 2.5678146e-03 2.5808970e-03 8.5897045e-03 4.5950787e-03 7.8389611e-03 2.4182455e-03 9.2053995e-03 3.4479875e-03 1.0780684e-02 6.4366206e-03 8.7029459e-03 4.8244677e-03 2.4820766e-03 3.2994674e-03 6.9725103e-03 3.6004536e-03 7.5969761e-03 7.2055876e-03 7.4680636e-03 4.8244418e-03 6.1219436e-03 3.5524443e-03 7.3785563e-03 5.0327851e-03 4.5958248e-04 1.4323573e-04 1.2376608e-03 1.4854110e-03 8.9326822e-04 1.4516004e-04 1.6538068e-03 6.5338340e-04 1.1299287e-03 2.4074626e-03 6.9551596e-04 2.6949476e-03 7.2730987e-04 2.7608296e-03 6.5430290e-04 2.7448595e-03 1.4433165e-04 4.0981488e-03 7.0217976e-04 2.0017194e-03 8.4737221e-04 3.5875154e-04 4.2590326e-04 3.7194881e-04 5.5222558e-04 6.3134375e-04 2.4465093e-03 1.5551077e-04 5.7141821e-04 4.4617793e-04 3.7751002e-03 4.2307180e-03 3.3297057e-03 8.4238856e-05 1.8060244e-03 1.6735237e-03 1.3470834e-04 1.4289074e-03 6.1420964e-04 1.0488316e-04 6.6710002e-04 4.5037482e-04 1.3817339e-03 8.2351922e-04 1.8947713e-04 4.2527867e-03 4.2014447e-04 1.3415184e-02 6.6868372e-03 4.2910287e-03 5.8862692e-03 7.8406972e-03 6.4938295e-03 9.0912854e-03 5.6561218e-03 7.2322567e-03 6.1641630e-03 2.7540356e-03 4.2344365e-03 3.5847503e-03 8.2354910e-03 1.1610160e-02 6.2859853e-03 3.6577005e-03 4.7344286e-03 1.2033367e-02 5.6148647e-03 4.9101139e-03 7.6703569e-03 7.8664866e-03 2.1500523e-03 4.7726337e-03 2.9188064e-03 1.9107921e-03 2.6804057e-03 7.0603390e-03 2.2776004e-03 4.6992435e-03 1.8838392e-03 7.7943774e-03 1.9124732e-03 7.9102721e-03 4.1636243e-03 9.7823337e-03 4.1721395e-03 2.8414673e-03 2.6224860e-03 6.5787438e-03 3.5255969e-03 6.6868372e-03 6.5865547e-03 7.7526002e-03 4.4665713e-03 4.0404299e-03 3.0508638e-03 8.7371106e-03 5.0758684e-03 1.4741381e-04 1.6125952e-03 2.5451963e-03 2.5693849e-03 7.6673984e-04 2.4419486e-03 4.9090126e-04 2.2351893e-03 2.2914219e-03 9.4806103e-04 4.8625587e-03 2.0262116e-03 3.3701508e-03 1.9166326e-03 1.0683081e-03 9.0452408e-04 4.3648468e-03 1.8983778e-03 7.0324108e-04 1.1499379e-03 1.4013722e-03 1.4463883e-03 3.0992383e-04 2.7889447e-04 9.9370768e-04 4.4215550e-03 7.4609763e-04 1.5322654e-03 1.6670592e-03 2.5458750e-03 4.8164855e-03 4.9491294e-03 7.9622502e-04 9.3705610e-04 3.2918730e-03 3.3244867e-04 1.4917733e-03 1.2997427e-03 8.4964648e-04 1.9833350e-03 9.7663737e-04 3.0207145e-03 1.9767871e-03 1.2186915e-03 6.6584952e-03 1.4294810e-03 1.1447125e-02 4.9138585e-03 2.4483433e-03 4.3479747e-03 5.8020082e-03 4.0665925e-03 7.5951000e-03 3.6015682e-03 4.5184530e-03 5.0362911e-03 2.3873447e-03 2.3916463e-03 2.1166084e-03 5.8005881e-03 9.3326086e-03 5.1921662e-03 2.5583136e-03 3.9040792e-03 8.1942534e-03 3.2098756e-03 3.4610228e-03 6.2351763e-03 5.0378736e-03 9.3974884e-04 3.8135545e-03 1.8905878e-03 1.0935023e-03 2.2739982e-03 4.8352649e-03 1.2660181e-03 2.5068904e-03 1.6511455e-03 5.4229770e-03 1.1989559e-03 6.1054655e-03 2.0081796e-03 8.6788304e-03 3.3159020e-03 2.5546078e-03 1.5534900e-03 4.7975700e-03 2.5264412e-03 4.9138585e-03 4.8875866e-03 6.2264786e-03 3.0375207e-03 1.9750388e-03 2.0217929e-03 8.0063599e-03 4.3571190e-03 1.6611913e-03 2.0167984e-03 1.5438876e-03 3.6494229e-04 2.0217253e-03 4.6462511e-04 1.5312613e-03 2.1707116e-03 9.6955753e-04 3.3920004e-03 1.0848877e-03 3.2679078e-03 1.3217642e-03 1.7681092e-03 4.3634818e-04 4.3762590e-03 9.8982281e-04 1.4702047e-03 1.2309511e-03 6.6753502e-04 6.7519636e-04 2.4877587e-04 3.8105160e-04 8.2183923e-04 3.0399429e-03 3.2267433e-04 8.9534104e-04 8.4272052e-04 3.5139014e-03 4.8234985e-03 4.1432495e-03 3.0463297e-04 1.2193814e-03 2.4923401e-03 1.7011388e-04 1.7584273e-03 1.0444437e-03 3.3026408e-04 1.0723664e-03 7.7198092e-04 2.2859213e-03 1.4276818e-03 5.7687029e-04 4.8466907e-03 8.6936394e-04 1.2850654e-02 6.1088154e-03 3.5569698e-03 5.5719646e-03 7.1488198e-03 5.6571014e-03 8.7634622e-03 5.0674178e-03 6.2538450e-03 5.7468537e-03 2.5694606e-03 3.4855366e-03 2.9211508e-03 7.2454781e-03 1.0560745e-02 5.7784909e-03 3.4230741e-03 4.7152707e-03 1.0513823e-02 4.7114505e-03 4.2697686e-03 7.1803873e-03 6.8688743e-03 1.5447042e-03 4.5401495e-03 2.7361382e-03 1.4855517e-03 2.5857007e-03 6.2270622e-03 2.0576267e-03 3.8519560e-03 2.0130817e-03 6.8461641e-03 1.8250891e-03 7.7109246e-03 3.0910476e-03 9.4645289e-03 4.1075164e-03 2.7837137e-03 2.0667492e-03 5.7442304e-03 2.7416306e-03 6.1088154e-03 5.9744542e-03 7.0784056e-03 3.5969994e-03 3.0144395e-03 2.5834027e-03 8.5675229e-03 5.0446428e-03 9.3410747e-04 2.6108336e-03 1.8631164e-03 1.0514258e-03 2.7757095e-03 1.6027319e-03 5.9009151e-03 9.3507643e-05 4.8585250e-03 3.2279691e-03 7.0814372e-04 1.8064210e-03 4.7021523e-03 1.7906187e-03 1.7438865e-03 3.2726931e-03 2.1092470e-03 4.4199215e-04 2.5742583e-03 2.7576288e-03 2.1865740e-03 8.5954487e-04 4.3875130e-04 6.2385517e-03 2.0706463e-03 2.9895922e-03 2.4900150e-03 1.5452115e-03 1.2972442e-03 2.2089282e-03 1.3825902e-03 4.4047937e-03 1.3378707e-03 8.5528525e-04 1.4287812e-04 2.3200710e-04 1.9018574e-03 3.1193134e-03 2.7835858e-04 1.0440284e-03 6.7774868e-04 1.6434489e-03 8.2787646e-03 9.1805293e-04 8.2830461e-03 3.6334874e-03 2.7936555e-03 2.6701226e-03 4.5417785e-03 4.4199379e-03 4.7638022e-03 3.6853341e-03 5.4544340e-03 3.4799734e-03 1.6014887e-03 2.8117239e-03 2.4093914e-03 5.5035080e-03 8.3987820e-03 4.0331795e-03 1.3946498e-03 1.6013776e-03 9.9231424e-03 4.6841468e-03 3.0824955e-03 4.3724199e-03 6.0877536e-03 1.8755546e-03 2.0838499e-03 1.1387331e-03 1.4636530e-03 1.0937142e-03 4.3877001e-03 1.4753015e-03 3.8552640e-03 2.5868499e-04 5.1223304e-03 7.0168484e-04 4.3920187e-03 3.9679568e-03 5.8082286e-03 1.3720301e-03 1.2437314e-03 1.9911684e-03 4.5361159e-03 3.9571028e-03 3.6334874e-03 3.7423473e-03 5.0666856e-03 3.9975266e-03 3.7265040e-03 1.8650355e-03 5.0595778e-03 1.9211780e-03 1.4802848e-03 2.3288314e-03 6.8065434e-05 4.0423471e-03 2.2193959e-04 7.6466779e-03 9.2054349e-04 2.1021154e-03 2.1770952e-03 5.4412625e-04 2.3412508e-03 6.7735470e-03 2.1146424e-03 1.1473823e-03 2.2087962e-03 4.4699141e-03 2.0616206e-03 2.1327263e-03 2.0380999e-03 3.2539726e-03 1.3598045e-03 3.9670027e-04 4.8634117e-03 2.4197901e-03 3.2204932e-03 1.7622064e-03 3.8856461e-03 1.2334789e-03 4.0452577e-04 1.0887743e-03 6.2435560e-03 2.0292084e-04 1.0829901e-03 1.7786841e-03 4.1228310e-04 1.8859956e-03 2.3286589e-03 5.1772577e-04 4.9172870e-04 2.0007065e-04 1.2918676e-03 5.0330599e-03 3.9898095e-04 9.4287570e-03 5.3866372e-03 4.8647393e-03 5.2819054e-03 6.4598280e-03 8.0073865e-03 6.1852254e-03 7.5566712e-03 9.5128884e-03 3.7267827e-03 1.2843714e-03 4.8237356e-03 3.5085400e-03 7.3800011e-03 8.6672418e-03 3.7373665e-03 3.3006522e-03 3.2553459e-03 1.4768796e-02 8.5372346e-03 3.7597131e-03 5.0208833e-03 1.0540608e-02 2.9216135e-03 2.9987594e-03 3.5026889e-03 1.9346800e-03 1.3415748e-03 6.5619943e-03 4.2356898e-03 7.2096387e-03 1.4370251e-03 7.0771420e-03 2.7742326e-03 8.9575949e-03 5.8574207e-03 5.7119619e-03 3.0499081e-03 1.2067939e-03 2.4757916e-03 5.0772246e-03 3.1235792e-03 5.3866372e-03 5.0347068e-03 5.1609892e-03 3.8212989e-03 5.4644467e-03 2.4256658e-03 4.6018293e-03 2.9278326e-03 8.9754714e-04 1.9308173e-03 2.4780225e-03 8.6351250e-04 4.2337045e-03 2.1236680e-03 7.2381764e-04 2.1294516e-04 3.5897400e-03 7.7335429e-04 6.4715989e-03 7.0701488e-04 5.1814848e-03 2.9010443e-04 5.5409554e-03 2.4566735e-03 3.3429771e-04 3.2970103e-04 2.0470962e-03 2.3835444e-03 1.6383998e-03 1.0044594e-03 8.9092263e-04 8.3257442e-04 1.5048347e-04 7.3947599e-03 5.1765389e-03 2.4819362e-03 5.2929628e-04 4.5409213e-03 9.8467896e-04 1.3414871e-03 3.3640577e-03 1.3544701e-03 5.6680732e-04 2.1931945e-04 1.3672243e-03 8.5440953e-04 8.3314842e-04 2.6161270e-04 1.7366856e-03 5.2799539e-04 1.7649847e-02 1.0764587e-02 8.4383096e-03 9.9924928e-03 1.2273825e-02 1.1788899e-02 1.2850999e-02 1.0630699e-02 1.2964118e-02 9.0438994e-03 4.5288546e-03 8.3580766e-03 6.9988358e-03 1.3044370e-02 1.5771615e-02 8.9276151e-03 7.0025139e-03 7.7681345e-03 1.9248623e-02 1.0890288e-02 8.2148016e-03 1.1050411e-02 1.3845989e-02 5.2067991e-03 7.6819868e-03 6.2976924e-03 4.3733262e-03 4.7066273e-03 1.1733076e-02 5.6953630e-03 9.5825887e-03 4.0184455e-03 1.2555508e-02 4.8201445e-03 1.2938333e-02 8.5012649e-03 1.2613331e-02 7.2187981e-03 4.6491251e-03 5.3739269e-03 1.0213138e-02 5.7106829e-03 1.0764587e-02 1.0424589e-02 1.1013685e-02 7.3861586e-03 8.2413252e-03 5.8213434e-03 1.1031181e-02 7.7860554e-03 2.6482878e-03 4.2794287e-04 1.8319104e-03 1.5871773e-03 1.2340260e-03 2.9761573e-03 6.4181441e-04 3.9337040e-03 3.6570436e-04 2.9155752e-03 1.3382816e-05 5.6692471e-03 6.4631376e-04 2.4262663e-03 1.0063227e-03 2.3414789e-04 3.8352406e-04 2.6670153e-04 1.1841642e-03 1.3362953e-03 1.8214422e-03 1.1939941e-05 1.6841308e-04 4.0205285e-04 4.8423454e-03 5.6256276e-03 4.4224155e-03 3.0861840e-04 1.5326607e-03 2.2893524e-03 5.4845532e-04 1.9363566e-03 1.1483728e-03 6.7060376e-05 4.8890125e-04 9.7136579e-04 1.7676633e-03 1.3375465e-03 2.5372175e-04 4.0846507e-03 8.2917869e-04 1.6077651e-02 8.4460632e-03 5.5177458e-03 7.2638972e-03 9.7215743e-03 7.5597148e-03 1.1218034e-02 6.4234899e-03 8.1582316e-03 8.1119193e-03 4.1520915e-03 5.4624397e-03 4.9276011e-03 1.0142169e-02 1.4194600e-02 8.3023965e-03 4.8434975e-03 6.1466052e-03 1.3239048e-02 6.2447391e-03 6.5902144e-03 9.7724034e-03 8.6919298e-03 3.1454944e-03 6.3952659e-03 3.7622934e-03 3.0039560e-03 4.0045050e-03 8.7466217e-03 2.6847018e-03 5.4026497e-03 2.7900002e-03 9.6269482e-03 2.5687827e-03 8.7444600e-03 5.2241473e-03 1.2215015e-02 5.4919656e-03 4.2251052e-03 3.8716574e-03 8.5147448e-03 4.9424074e-03 8.4460632e-03 8.4294916e-03 9.9304629e-03 6.0757927e-03 5.1533590e-03 4.4115457e-03 1.1061881e-02 6.6918878e-03 4.2077380e-03 2.8084601e-04 8.0255307e-03 1.0009734e-03 2.4531898e-03 2.5259126e-03 4.7784400e-04 2.9272193e-03 6.4204523e-03 2.4613181e-03 8.1446100e-04 2.5131313e-03 4.2288269e-03 2.3479589e-03 2.4787654e-03 2.3219118e-03 3.4016339e-03 1.1923522e-03 3.6609993e-04 5.5309499e-03 2.7130308e-03 3.6876750e-03 2.1157012e-03 3.5197143e-03 1.1211667e-03 5.0040735e-04 1.2581494e-03 6.2453424e-03 4.9316252e-04 1.0956505e-03 1.9034882e-03 5.6262931e-04 2.1623642e-03 2.7381233e-03 6.2275542e-04 9.1647472e-04 4.3469066e-04 1.6016964e-03 5.5073116e-03 6.0170029e-04 8.2861929e-03 4.5965840e-03 4.2124912e-03 4.7715416e-03 5.5699315e-03 7.3813594e-03 5.3659110e-03 7.1660104e-03 8.8786575e-03 2.9267242e-03 8.0445420e-04 4.1635703e-03 2.8364579e-03 6.3397911e-03 7.3211567e-03 2.8639086e-03 2.8974680e-03 2.8626073e-03 1.3776826e-02 8.0525608e-03 2.9632152e-03 4.1293440e-03 9.9396499e-03 2.4124505e-03 2.4179141e-03 3.2613675e-03 1.4673422e-03 9.3714862e-04 5.6839580e-03 4.1506352e-03 6.7187933e-03 1.3083992e-03 6.0861850e-03 2.6486423e-03 8.7285257e-03 5.1099460e-03 4.7657924e-03 2.6722144e-03 7.9417630e-04 1.8862622e-03 4.0883053e-03 2.3510235e-03 4.5965840e-03 4.1923613e-03 4.1490574e-03 2.9383782e-03 4.7171745e-03 1.8344857e-03 3.7646957e-03 2.4446440e-03 3.4307781e-03 6.9180223e-04 1.9272484e-03 5.2229584e-03 1.7410764e-03 5.5262484e-03 1.3512624e-03 1.2930220e-03 5.9265821e-04 7.2336834e-03 1.6622228e-03 1.4643809e-03 1.4579196e-03 1.0665934e-03 1.2486050e-03 4.3399433e-05 1.3778574e-03 2.2390179e-03 3.1206766e-03 3.9982611e-04 7.0342073e-04 1.4793565e-03 4.5495005e-03 7.4063181e-03 6.9037801e-03 1.1215419e-03 3.4551859e-04 4.3750592e-03 1.0242073e-03 2.4559606e-03 2.2544417e-03 6.9180223e-04 1.5393939e-03 1.8990091e-03 3.7039856e-03 2.8782332e-03 1.2534261e-03 6.1345149e-03 2.1154370e-03 1.6389445e-02 8.1714731e-03 4.6971245e-03 6.9480013e-03 9.2874957e-03 6.0735661e-03 1.1557043e-02 5.0795005e-03 6.2695782e-03 8.5942939e-03 4.8981354e-03 4.6386510e-03 4.5456138e-03 9.3051239e-03 1.4039626e-02 8.8459552e-03 4.7683489e-03 6.5513763e-03 1.0462868e-02 4.3969158e-03 6.5113261e-03 1.0098096e-02 6.6155737e-03 2.7131547e-03 6.8044529e-03 3.4785563e-03 3.0414918e-03 4.6799978e-03 7.9508909e-03 2.0423594e-03 3.8146379e-03 3.3303959e-03 8.7946774e-03 2.4271150e-03 7.7798359e-03 3.9016462e-03 1.3149131e-02 5.7850137e-03 5.0633237e-03 3.7856360e-03 8.3083336e-03 4.9921128e-03 8.1714731e-03 8.2786520e-03 1.0189399e-02 5.8912773e-03 3.9649361e-03 4.4771942e-03 1.2268673e-02 7.3587328e-03 6.5787312e-03 1.3233357e-03 1.1455828e-03 1.2416155e-03 1.4002549e-03 2.1211045e-03 6.2047866e-03 1.6380421e-03 2.0343353e-03 1.2366343e-03 4.6737502e-03 2.4960613e-03 1.3635919e-03 1.1869805e-03 2.7420392e-03 1.3311387e-03 5.4030547e-04 3.4496369e-03 1.8301986e-03 2.4543913e-03 1.0360218e-03 4.9823495e-03 2.4358984e-03 7.7653049e-04 6.6085729e-04 5.4666528e-03 3.6251211e-04 9.0034369e-04 2.5308489e-03 6.8325669e-04 1.3068439e-03 1.4466712e-03 7.0328570e-04 7.2909544e-04 3.3427965e-04 8.2319726e-04 3.3236658e-03 2.7101948e-04 1.1415097e-02 6.6763180e-03 5.5975222e-03 6.7637660e-03 7.8425097e-03 9.1076654e-03 7.9396677e-03 8.6849227e-03 1.0537825e-02 4.7340864e-03 1.6981356e-03 5.5204544e-03 4.0238168e-03 8.5090240e-03 9.7837063e-03 4.5130967e-03 4.3644743e-03 4.6397935e-03 1.5929686e-02 9.2384755e-03 4.4702915e-03 6.2885333e-03 1.1607431e-02 3.1082253e-03 4.1080254e-03 4.4476701e-03 2.1448891e-03 1.9903866e-03 7.7241343e-03 4.8610346e-03 7.8308568e-03 2.2349743e-03 8.1986845e-03 3.4906235e-03 1.0785042e-02 6.0455598e-03 7.1678534e-03 4.3085958e-03 1.8327850e-03 2.7372439e-03 5.8248161e-03 2.8157351e-03 6.6763180e-03 6.1912806e-03 6.1165743e-03 3.9060199e-03 5.6854050e-03 2.9112999e-03 5.9286549e-03 4.2459890e-03 4.7261943e-03 7.5564450e-03 3.0589295e-03 9.9138151e-03 2.5436162e-03 2.3081715e-03 1.8344588e-03 1.2327595e-02 2.9913447e-03 3.4407458e-03 3.5166367e-03 2.2912956e-03 2.5985157e-03 1.0536407e-03 4.0148201e-03 5.3033668e-03 3.3636231e-03 1.5135209e-03 1.3379167e-03 2.9432336e-03 8.2210569e-03 1.2378534e-02 1.1256214e-02 3.0883819e-03 6.3877121e-04 7.6741777e-03 3.3091271e-03 5.2745018e-03 5.1312091e-03 2.0746888e-03 2.6973394e-03 4.6616800e-03 6.5765736e-03 5.8096774e-03 2.9820979e-03 7.3187243e-03 4.6783172e-03 2.3410581e-02 1.3144529e-02 8.3391705e-03 1.1305315e-02 1.4457465e-02 9.3485129e-03 1.7555848e-02 7.8838497e-03 9.1347689e-03 1.4043414e-02 9.2390419e-03 8.2683227e-03 8.4558564e-03 1.4291672e-02 2.0441558e-02 1.4348706e-02 8.7091477e-03 1.1171095e-02 1.3577182e-02 6.6301175e-03 1.1191012e-02 1.5865943e-02 9.2559664e-03 5.8683930e-03 1.1694065e-02 6.7001372e-03 6.5334334e-03 8.9560518e-03 1.2574664e-02 4.2523471e-03 6.2343724e-03 6.8791672e-03 1.3649249e-02 5.2782119e-03 1.1089361e-02 6.8777652e-03 1.9771053e-02 1.0157447e-02 9.4893989e-03 7.5319640e-03 1.3411350e-02 8.8625592e-03 1.3144529e-02 1.3390278e-02 1.5951074e-02 1.0173619e-02 7.0905829e-03 8.5554531e-03 1.8741319e-02 1.2400067e-02 4.2985441e-03 2.4763265e-03 1.0193789e-03 1.4380091e-03 3.6495663e-03 1.2003820e-03 2.0584975e-03 2.4845352e-03 1.6034070e-03 3.3059261e-04 1.8671770e-03 2.0088877e-03 1.4284810e-03 4.3098388e-04 2.3514979e-04 5.2698749e-03 1.3824059e-03 2.2360501e-03 1.8501267e-03 1.6468815e-03 1.8319470e-03 2.4154150e-03 8.5423988e-04 3.3259702e-03 1.3633177e-03 3.9750527e-04 2.1357310e-04 1.4089375e-04 1.2605369e-03 2.3906980e-03 1.0696291e-04 1.0906412e-03 5.9076498e-04 1.1304618e-03 7.2732511e-03 6.4012947e-04 8.9598694e-03 3.8314732e-03 2.5915433e-03 2.9763427e-03 4.7600376e-03 4.3053173e-03 5.3266981e-03 3.6190516e-03 5.2272723e-03 3.6549051e-03 1.5047575e-03 2.5888356e-03 2.1634980e-03 5.5055672e-03 8.5050147e-03 4.0885074e-03 1.5151975e-03 1.9844585e-03 9.6042351e-03 4.2899092e-03 2.9941597e-03 4.6609860e-03 5.8520656e-03 1.4443089e-03 2.3092039e-03 1.1621771e-03 1.1296414e-03 1.1169479e-03 4.4204603e-03 1.2641450e-03 3.4620250e-03 3.4971048e-04 5.1226200e-03 6.2752047e-04 4.7853928e-03 3.3731886e-03 6.3070664e-03 1.6662334e-03 1.2828407e-03 1.6597311e-03 4.4397739e-03 3.3292201e-03 3.8314732e-03 3.8867782e-03 5.1669823e-03 3.5417199e-03 3.1734550e-03 1.6842820e-03 5.5385508e-03 2.3108610e-03 1.0105990e-03 4.7523926e-03 2.9847642e-03 9.7105522e-03 2.6675945e-03 5.8410645e-03 1.0628436e-03 8.9773521e-03 5.4199214e-03 1.6519449e-03 1.3847497e-03 4.5932482e-03 4.2245140e-03 3.0278103e-03 1.7095869e-03 2.8675709e-03 2.8000554e-03 1.2095222e-03 1.0633409e-02 6.3588443e-03 2.3146826e-03 1.8579783e-03 7.7501487e-03 1.5279559e-03 3.0361588e-03 6.2292475e-03 2.9996444e-03 2.2092276e-03 1.2670329e-03 3.0585508e-03 1.9631739e-03 1.9448597e-03 1.6026176e-03 7.3057463e-04 1.6268750e-03 1.9360609e-02 1.3271536e-02 1.1367917e-02 1.3356276e-02 1.4871782e-02 1.6028245e-02 1.4980963e-02 1.5218503e-02 1.7630002e-02 1.0242398e-02 5.4451367e-03 1.1236806e-02 9.0841292e-03 1.5577818e-02 1.6739427e-02 9.6435556e-03 9.8426714e-03 1.0341095e-02 2.4442578e-02 1.5482723e-02 9.8424496e-03 1.2530745e-02 1.8870958e-02 7.3605563e-03 9.5556413e-03 9.6316193e-03 6.0281749e-03 6.1257342e-03 1.4596779e-02 9.4938425e-03 1.3739836e-02 6.3629397e-03 1.5181016e-02 7.9755348e-03 1.8111593e-02 1.1305790e-02 1.3376686e-02 9.8378125e-03 5.8135644e-03 6.9819428e-03 1.1631750e-02 6.1445613e-03 1.3271536e-02 1.2517306e-02 1.1992997e-02 8.3045136e-03 1.0906252e-02 7.4644491e-03 1.1536762e-02 9.7524154e-03 4.5852486e-03 9.8595493e-04 5.1231602e-03 5.3641159e-04 6.1291344e-03 9.0289414e-06 4.9719488e-03 2.7623691e-03 1.0135570e-04 4.9630072e-05 1.4573369e-03 2.2050307e-03 1.9067713e-03 6.5935996e-04 5.5655703e-04 4.9349156e-04 5.3910141e-05 7.6334144e-03 6.4541501e-03 3.6168243e-03 4.5600228e-04 3.3337204e-03 1.8551457e-03 1.2329546e-03 3.8576017e-03 1.8263928e-03 3.4082293e-04 2.8939589e-05 1.6992773e-03 1.7320592e-03 1.4340517e-03 2.7567571e-04 1.5103910e-03 8.5202938e-04 1.8535044e-02 1.1041503e-02 8.1659733e-03 1.0422568e-02 1.2515324e-02 1.1445689e-02 1.3674148e-02 1.0415137e-02 1.2394915e-02 9.4870366e-03 4.7984401e-03 8.0611256e-03 6.7876377e-03 1.2924053e-02 1.5886474e-02 9.2445598e-03 7.3109344e-03 8.4847047e-03 1.8293716e-02 1.0175519e-02 8.2306422e-03 1.1561315e-02 1.3225081e-02 4.8091709e-03 8.1976852e-03 6.4746796e-03 4.2076714e-03 5.1007444e-03 1.1683626e-02 5.5527590e-03 8.9342726e-03 4.4973686e-03 1.2442734e-02 4.9468838e-03 1.3343592e-02 7.6599210e-03 1.3497447e-02 7.8132331e-03 5.1004820e-03 5.1592495e-03 1.0155636e-02 5.1788357e-03 1.1041503e-02 1.0666367e-02 1.1283660e-02 6.9627068e-03 7.4793969e-03 5.8082013e-03 1.1981463e-02 8.5614846e-03 3.8688652e-03 7.5179133e-03 3.7407669e-03 3.1676786e-04 4.6173801e-03 4.2799166e-03 2.2501511e-03 4.2413983e-03 4.2204030e-03 4.6109813e-03 1.7937707e-03 8.2328963e-04 8.3021324e-03 4.1326900e-03 5.3767829e-03 3.8579826e-03 2.3734756e-03 1.6523409e-04 8.8835369e-04 2.5288831e-03 7.6911436e-03 1.1035999e-03 1.9572002e-03 1.3626197e-03 8.4501724e-04 3.6117590e-03 4.7013989e-03 1.0056440e-03 1.3674472e-03 9.9055737e-04 2.9505217e-03 8.8795046e-03 1.5067108e-03 6.0099517e-03 3.2148697e-03 3.5475051e-03 3.0620571e-03 4.0315627e-03 6.0415844e-03 3.4044063e-03 5.7826017e-03 7.5784861e-03 2.1065729e-03 8.8233953e-04 3.5528527e-03 2.5792431e-03 5.1245213e-03 6.2842876e-03 2.4095844e-03 1.8446522e-03 1.4167723e-03 1.2151930e-02 7.2262167e-03 2.4839286e-03 2.9019075e-03 8.5383029e-03 2.6181282e-03 1.4064800e-03 2.3288946e-03 1.7187301e-03 6.4203056e-04 4.4078979e-03 3.6116427e-03 6.0606537e-03 7.3312980e-04 4.8772717e-03 2.0701887e-03 6.3495233e-03 5.1782781e-03 3.3738477e-03 1.3922890e-03 5.6378115e-04 2.0528392e-03 3.5526857e-03 3.4142665e-03 3.2148697e-03 3.0384066e-03 3.3899600e-03 3.3852248e-03 4.7610217e-03 1.6939002e-03 2.6027995e-03 1.1563987e-03 4.9937739e-03 2.8043563e-04 6.0192505e-03 1.1047123e-03 3.7340869e-03 9.3891763e-04 5.7693223e-04 8.5742923e-04 1.1063653e-03 2.1518410e-03 1.7873418e-03 1.8443813e-03 4.7874237e-04 4.0562362e-04 6.3806313e-04 5.6896950e-03 5.3990383e-03 4.0838955e-03 7.6270784e-04 3.0255235e-03 1.8702287e-03 1.2256654e-03 1.9416278e-03 1.1863062e-03 4.8488269e-04 7.2384993e-04 1.1782779e-03 1.1397261e-03 1.1824844e-03 4.5012569e-04 4.3416102e-03 9.3532010e-04 1.7514227e-02 9.8600607e-03 7.1630436e-03 8.1480968e-03 1.1292909e-02 9.1710206e-03 1.2255610e-02 7.6051240e-03 9.9403988e-03 9.4620221e-03 5.2227781e-03 7.1398046e-03 6.5861173e-03 1.2201906e-02 1.6541006e-02 9.8696183e-03 5.7022093e-03 6.6806000e-03 1.5779735e-02 7.9447125e-03 8.2548534e-03 1.1179561e-02 1.0455124e-02 4.7301378e-03 7.3678164e-03 4.4876356e-03 4.3705173e-03 4.8557182e-03 1.0520487e-02 3.4331839e-03 7.0769130e-03 3.1675776e-03 1.1611243e-02 3.1862426e-03 9.1961094e-03 7.4495828e-03 1.3429738e-02 6.1002834e-03 5.0525215e-03 5.4421147e-03 1.0502545e-02 7.0546448e-03 9.8600607e-03 9.9581080e-03 1.1708746e-02 8.2306703e-03 7.3218404e-03 5.8241089e-03 1.2089443e-02 7.3253099e-03 3.2913459e-03 8.4499731e-03 4.8569537e-03 8.0216351e-04 3.5049401e-03 4.1209949e-03 4.1951632e-03 1.4430192e-03 2.1132422e-03 3.9581533e-03 7.7606295e-03 2.8139659e-03 3.8070595e-03 4.7702994e-03 3.8227367e-03 9.3108167e-03 1.0381275e-02 3.5346820e-03 5.2926972e-04 8.0977990e-03 2.5864687e-03 3.9952884e-03 4.6152713e-03 3.2808967e-03 5.0209478e-03 3.9992007e-03 7.6245140e-03 5.9395645e-03 4.2477403e-03 1.1072543e-02 4.9514063e-03 1.3733574e-02 6.2131974e-03 2.8230097e-03 5.6455623e-03 6.8638978e-03 3.4892544e-03 9.9148605e-03 3.2690483e-03 3.3086076e-03 7.4423917e-03 5.1197423e-03 2.7541244e-03 3.1219477e-03 6.1841097e-03 1.0726676e-02 7.6421585e-03 4.1594455e-03 6.2994749e-03 5.6115691e-03 2.0176931e-03 4.9542156e-03 8.4395555e-03 3.5502322e-03 1.8230265e-03 6.1478670e-03 3.1652703e-03 2.7049802e-03 4.9489115e-03 5.3042297e-03 1.9463142e-03 1.6986432e-03 4.1722516e-03 5.8173968e-03 2.6602408e-03 6.4952123e-03 1.5042394e-03 1.1842312e-02 5.5188608e-03 5.4735640e-03 2.9735027e-03 6.0829501e-03 4.0306925e-03 6.2131974e-03 6.3472325e-03 8.2596987e-03 4.2451973e-03 1.6828229e-03 3.7151898e-03 1.1574575e-02 7.1331719e-03 5.4811265e-03 5.5565221e-04 2.6852359e-03 1.0154324e-03 1.7597013e-04 3.1956114e-04 3.9523878e-04 1.2388706e-03 1.2655903e-03 1.6805675e-03 3.2406061e-05 1.6284558e-04 3.0055489e-04 4.9827743e-03 5.3992645e-03 4.0722394e-03 2.5388663e-04 1.8315484e-03 2.0093416e-03 5.5333384e-04 1.9333559e-03 1.0417210e-03 4.5631651e-05 3.9213732e-04 8.9628584e-04 1.5141190e-03 1.1540120e-03 1.6596111e-04 3.8108192e-03 6.9068728e-04 1.6110144e-02 8.5829426e-03 5.7514563e-03 7.4081873e-03 9.8865471e-03 7.9109476e-03 1.1246146e-02 6.7496818e-03 8.5805974e-03 8.1149529e-03 4.1088457e-03 5.6967153e-03 5.0837880e-03 1.0378372e-02 1.4309813e-02 8.2945922e-03 4.9454249e-03 6.1635386e-03 1.3817928e-02 6.6600542e-03 6.6926476e-03 9.8029996e-03 9.1474450e-03 3.3107453e-03 6.4113962e-03 3.9012077e-03 3.0861181e-03 3.9737498e-03 8.9756426e-03 2.8871345e-03 5.7720900e-03 2.7832463e-03 9.8623531e-03 2.6826772e-03 9.0033513e-03 5.5466456e-03 1.2137789e-02 5.5287165e-03 4.1655510e-03 3.9755943e-03 8.6395601e-03 5.0222445e-03 8.5829426e-03 8.5444862e-03 9.9728516e-03 6.1969528e-03 5.4521508e-03 4.4886104e-03 1.0936532e-02 6.6624227e-03 6.0879321e-03 5.2071133e-03 3.8816096e-03 5.8831678e-03 5.7002681e-03 6.1926865e-03 2.4416494e-03 1.5149583e-03 1.0539677e-02 5.8224280e-03 7.4292172e-03 5.4190952e-03 2.7168224e-03 2.8895677e-04 1.2473682e-03 3.7477469e-03 9.3088565e-03 2.0944009e-03 2.9293124e-03 2.5720420e-03 1.8897161e-03 5.1660356e-03 6.4102501e-03 2.0138936e-03 2.7413840e-03 2.0664916e-03 4.4338085e-03 1.0353410e-02 2.6118482e-03 4.2150650e-03 2.5222817e-03 3.4164904e-03 3.0582580e-03 3.1593359e-03 6.2318287e-03 2.3862805e-03 6.4839490e-03 7.9101598e-03 1.0920878e-03 5.4699759e-04 3.4046237e-03 2.2291279e-03 4.0543858e-03 4.2004727e-03 1.1970351e-03 2.0117915e-03 1.4112028e-03 1.1968526e-02 7.9088029e-03 1.7082776e-03 1.7239090e-03 9.0238817e-03 2.6874577e-03 9.4975956e-04 2.9722514e-03 1.7009640e-03 5.1343341e-04 3.7130353e-03 4.7806054e-03 6.6267337e-03 1.3220831e-03 3.9245747e-03 2.9403206e-03 7.2757164e-03 5.0266602e-03 1.8157078e-03 1.4701768e-03 3.5054511e-04 1.7883207e-03 2.3616946e-03 2.6617941e-03 2.5222817e-03 2.1723972e-03 1.9530095e-03 2.4533627e-03 4.5559375e-03 1.3609168e-03 1.2016297e-03 8.2861882e-04 4.8139327e-03 2.8209366e-03 1.1052349e-04 3.7194090e-05 1.3880093e-03 2.0960687e-03 1.8746430e-03 7.3307377e-04 5.4600497e-04 5.2531656e-04 7.7399437e-05 7.5260309e-03 6.5039124e-03 3.6958100e-03 4.3794778e-04 3.1691974e-03 1.9589162e-03 1.1747018e-03 3.8950000e-03 1.8610625e-03 3.3531102e-04 5.6607375e-05 1.7087501e-03 1.8728452e-03 1.4997100e-03 3.0123700e-04 1.5391826e-03 8.8147902e-04 1.8294246e-02 1.0829571e-02 7.9341800e-03 1.0299926e-02 1.2275737e-02 1.1220530e-02 1.3508784e-02 1.0262914e-02 1.2147087e-02 9.2823036e-03 4.6531594e-03 7.8245831e-03 6.5524640e-03 1.2610156e-02 1.5512672e-02 9.0061844e-03 7.1992659e-03 8.4158356e-03 1.7921554e-02 9.9492079e-03 7.9854978e-03 1.1342596e-02 1.2982212e-02 4.5954373e-03 8.0580657e-03 6.3922041e-03 4.0251341e-03 4.9895677e-03 1.1418706e-02 5.4787432e-03 8.7107499e-03 4.4679304e-03 1.2142235e-02 4.8867636e-03 1.3297934e-02 7.3458933e-03 1.3299484e-02 7.7358341e-03 4.9917221e-03 4.9413674e-03 9.8545070e-03 4.8770063e-03 1.0829571e-02 1.0435652e-02 1.1007902e-02 6.6474718e-03 7.1727397e-03 5.6106947e-03 1.1815640e-02 8.4729744e-03 1.5166983e-03 3.8470717e-03 4.0414722e-03 1.3464004e-03 1.0521554e-03 2.2256122e-03 8.2355212e-03 2.4972654e-03 3.6758320e-03 4.2952040e-03 1.2466978e-03 5.4237685e-03 7.4233145e-03 2.7434894e-03 1.3920410e-03 5.6705580e-03 1.6006987e-03 1.4795623e-03 2.5504964e-03 2.8217649e-03 4.8039535e-03 2.1716992e-03 5.1387834e-03 3.8511647e-03 3.4066344e-03 1.1638730e-02 3.4223746e-03 9.4998939e-03 3.3659956e-03 1.1438717e-03 2.4828929e-03 3.9653407e-03 1.5796312e-03 5.9993003e-03 1.2126234e-03 1.7377370e-03 4.6402796e-03 3.0884931e-03 1.1351744e-03 1.5562166e-03 3.9244608e-03 8.1556610e-03 5.1855221e-03 1.5226783e-03 2.8914073e-03 4.3431210e-03 9.2550137e-04 2.9272754e-03 5.3162687e-03 2.0104865e-03 7.9177243e-04 3.1975522e-03 8.1317449e-04 1.3785091e-03 2.5714969e-03 2.9511423e-03 2.8161529e-04 5.9442405e-04 1.5991249e-03 3.5615321e-03 5.8433150e-04 3.1011353e-03 9.7656296e-04 8.0983113e-03 2.3700003e-03 3.0461348e-03 1.6450583e-03 4.0872867e-03 3.5603497e-03 3.3659956e-03 3.6518311e-03 5.6844228e-03 3.2852236e-03 1.0303385e-03 1.9470044e-03 7.8778855e-03 3.7000111e-03 1.9300708e-03 2.2574717e-03 1.1284358e-03 1.0646979e-03 1.0312076e-03 5.0621817e-03 1.2060164e-03 1.8000639e-03 2.0650564e-03 2.1487796e-03 3.2333878e-03 4.0195479e-03 1.2411974e-03 2.7183002e-03 2.2726383e-03 8.5094120e-04 2.4056963e-04 6.3782113e-04 1.2937611e-03 2.4889893e-03 5.9722053e-04 1.5621785e-03 1.2375553e-03 1.3358412e-03 8.1240216e-03 1.2311022e-03 1.1517636e-02 5.2909227e-03 3.4509195e-03 3.7153220e-03 6.3154320e-03 4.5407961e-03 7.2027738e-03 3.4054800e-03 5.2043334e-03 5.7490488e-03 3.1731637e-03 3.4719309e-03 3.4347027e-03 7.1701181e-03 1.1379612e-02 6.4467389e-03 2.2692649e-03 2.9960956e-03 9.7363741e-03 4.0178908e-03 4.7338729e-03 6.7982603e-03 5.6061060e-03 2.3699712e-03 3.8192855e-03 1.4133848e-03 2.2956056e-03 2.5088406e-03 5.7314562e-03 9.5862581e-04 3.4107092e-03 9.7091810e-04 6.6778472e-03 7.6184177e-04 4.3247639e-03 4.2032521e-03 8.9746527e-03 2.5881501e-03 2.8171688e-03 3.0086239e-03 6.5256052e-03 5.3225794e-03 5.2909227e-03 5.5860698e-03 7.6013097e-03 5.5376614e-03 4.0938335e-03 3.1000848e-03 8.1789436e-03 3.7376243e-03 3.2267663e-05 8.3707549e-04 1.6905622e-03 1.5718936e-03 9.3681209e-04 1.8762095e-04 1.9684682e-04 4.1277465e-05 6.4440482e-03 6.0518000e-03 3.8422414e-03 2.6274935e-04 2.4665214e-03 1.9088192e-03 8.3864343e-04 2.9786928e-03 1.4388393e-03 8.3975474e-05 5.3416679e-05 1.2919166e-03 1.6159093e-03 1.2740163e-03 1.2266042e-04 2.3692761e-03 7.1636545e-04 1.7563748e-02 9.9676420e-03 7.0439691e-03 9.1049407e-03 1.1370346e-02 9.8276989e-03 1.2673450e-02 8.7383729e-03 1.0632094e-02 8.9036592e-03 4.4721835e-03 6.9588127e-03 5.9822651e-03 1.1786793e-02 1.5215597e-02 8.8361414e-03 6.2553172e-03 7.4909328e-03 1.6226352e-02 8.5173085e-03 7.5296182e-03 1.0823312e-02 1.1340207e-02 4.0626163e-03 7.4274805e-03 5.3041231e-03 3.6467217e-03 4.5925766e-03 1.0472609e-02 4.2980846e-03 7.4369871e-03 3.7248522e-03 1.1287932e-02 3.8978058e-03 1.1428778e-02 6.6149115e-03 1.2975959e-02 6.8214468e-03 4.6824073e-03 4.5741646e-03 9.4691004e-03 5.0187206e-03 9.9676420e-03 9.7385546e-03 1.0722589e-02 6.5565379e-03 6.4801586e-03 5.1854796e-03 1.1596572e-02 7.7633670e-03 9.9158601e-04 1.6686143e-03 1.5457871e-03 9.5071878e-04 3.0701864e-04 3.6362811e-04 4.2548991e-05 6.6288818e-03 6.0417331e-03 3.6564704e-03 2.5834714e-04 2.6458983e-03 1.8764428e-03 8.4820574e-04 3.2578450e-03 1.5209238e-03 1.5090465e-04 6.1935165e-05 1.3587649e-03 1.7158299e-03 1.3006818e-03 1.6628415e-04 2.0535582e-03 7.1188329e-04 1.7324639e-02 9.9147481e-03 7.0600417e-03 9.2840701e-03 1.1302120e-02 1.0064078e-02 1.2589616e-02 9.1080261e-03 1.0920912e-02 8.6425556e-03 4.2412792e-03 6.9624194e-03 5.8567924e-03 1.1639501e-02 1.4739829e-02 8.4651679e-03 6.3684651e-03 7.5923014e-03 1.6475338e-02 8.8252851e-03 7.3097093e-03 1.0588424e-02 1.1694482e-02 3.9807447e-03 7.3452100e-03 5.5353092e-03 3.5104587e-03 4.4753907e-03 1.0430724e-02 4.6273816e-03 7.6755144e-03 3.8450588e-03 1.1170257e-02 4.1281664e-03 1.1986478e-02 6.5423939e-03 1.2634016e-02 6.9257811e-03 4.5247923e-03 4.3922796e-03 9.1553155e-03 4.5676871e-03 9.9147481e-03 9.5995789e-03 1.0353234e-02 6.1627903e-03 6.3902007e-03 5.0235520e-03 1.1247115e-02 7.7428181e-03 9.7824889e-04 1.6624661e-03 3.0059873e-03 2.5249972e-04 6.2343657e-04 1.1719366e-03 4.0275459e-03 6.3575546e-03 5.8712921e-03 7.5872446e-04 5.6741436e-04 3.5908637e-03 6.4996051e-04 1.9776689e-03 1.6840088e-03 4.6219818e-04 1.2917587e-03 1.3743184e-03 3.0187893e-03 2.2373182e-03 9.0828703e-04 5.7426890e-03 1.5775049e-03 1.5168309e-02 7.4080646e-03 4.2538993e-03 6.3033105e-03 8.5100385e-03 5.7899432e-03 1.0520014e-02 4.8624768e-03 6.1169570e-03 7.6384090e-03 4.1091153e-03 4.1981402e-03 3.9923041e-03 8.6172736e-03 1.2997087e-02 7.8717119e-03 4.1667776e-03 5.7584992e-03 1.0387455e-02 4.3546484e-03 5.7746712e-03 9.1054055e-03 6.5301937e-03 2.3054088e-03 5.9614728e-03 3.0379183e-03 2.5067124e-03 3.9126904e-03 7.3239330e-03 1.8370981e-03 3.7021481e-03 2.7152943e-03 8.1304066e-03 2.0347479e-03 7.4172182e-03 3.6502880e-03 1.1915038e-02 5.0530642e-03 4.2458688e-03 3.2213237e-03 7.5116191e-03 4.3860606e-03 7.4080646e-03 7.4726409e-03 9.2071058e-03 5.2421868e-03 3.6675370e-03 3.8308993e-03 1.1024145e-02 6.4520833e-03 3.0133930e-04 5.1037905e-03 1.2017859e-03 2.1951761e-03 1.7686888e-03 1.8224411e-03 2.8788865e-03 3.1740291e-03 7.0420689e-04 2.2350252e-03 2.1605967e-03 1.6074282e-04 9.9312132e-04 6.3910600e-04 1.1068342e-03 2.2522084e-03 4.2563220e-04 2.1383582e-03 1.1781524e-03 1.1965920e-03 6.6826197e-03 8.9701315e-04 8.8374021e-03 3.5249181e-03 1.9006375e-03 3.2674819e-03 4.3522253e-03 3.8602822e-03 5.5032387e-03 3.5903574e-03 4.6280025e-03 3.2207897e-03 1.0860964e-03 1.8528751e-03 1.3287556e-03 4.5637650e-03 7.2040689e-03 3.3452381e-03 1.6438459e-03 2.5042800e-03 8.4078534e-03 3.6412189e-03 2.1968551e-03 4.2884610e-03 5.3072049e-03 5.5962180e-04 2.2924485e-03 1.3972917e-03 4.1775124e-04 1.0120628e-03 3.7672964e-03 1.3901921e-03 2.7802443e-03 7.8074497e-04 4.2513803e-03 8.6439557e-04 5.7017672e-03 1.9558095e-03 6.1305769e-03 2.0764524e-03 1.1750038e-03 7.8121442e-04 3.3536027e-03 1.6840705e-03 3.5249181e-03 3.4031938e-03 4.3169071e-03 2.0323666e-03 1.8104373e-03 1.0313169e-03 5.4663596e-03 2.6797009e-03 4.7017751e-03 1.4050521e-03 2.2967640e-03 1.4490557e-03 2.3196093e-03 1.7015479e-03 1.5767700e-03 5.5713099e-04 3.7392678e-03 8.9324098e-04 2.5002761e-04 8.6482380e-04 1.3585049e-04 1.1239692e-03 1.9761479e-03 8.2927740e-05 9.6014531e-04 3.4965263e-04 8.9552257e-04 5.7645956e-03 3.1975366e-04 8.7743211e-03 4.0156462e-03 2.9085611e-03 3.7265804e-03 4.9611690e-03 5.3200137e-03 5.4291747e-03 4.9090444e-03 6.4181467e-03 3.1699225e-03 9.0457257e-04 2.8707306e-03 2.0343345e-03 5.5511033e-03 7.6886258e-03 3.2962764e-03 1.9800100e-03 2.4033904e-03 1.0905558e-02 5.4434884e-03 2.6411651e-03 4.3219947e-03 7.2333465e-03 1.3483652e-03 2.2580657e-03 1.9023537e-03 8.2889320e-04 8.3511491e-04 4.6996589e-03 2.2192064e-03 4.3941392e-03 6.5409726e-04 5.2313685e-03 1.2879740e-03 6.5768476e-03 3.4674432e-03 5.6567012e-03 2.0849532e-03 8.7522015e-04 1.2925162e-03 3.9107304e-03 2.2116014e-03 4.0156462e-03 3.8265815e-03 4.4724719e-03 2.6832503e-03 3.2108081e-03 1.3825243e-03 4.7909587e-03 2.3944351e-03 1.6956470e-03 1.0108236e-03 9.3414345e-04 1.2241588e-02 1.0680036e-02 6.4457112e-03 2.1113831e-03 4.8851719e-03 3.9417362e-03 3.5304281e-03 6.9169294e-03 4.3255103e-03 1.5489174e-03 5.9380000e-04 4.2050996e-03 3.5612837e-03 3.5981180e-03 1.5473563e-03 1.0469113e-03 2.7881158e-03 2.6084642e-02 1.6915515e-02 1.3046936e-02 1.5813114e-02 1.8728660e-02 1.6622622e-02 2.0172358e-02 1.5047933e-02 1.7480656e-02 1.5121187e-02 8.9988271e-03 1.2921567e-02 1.1505555e-02 1.9202447e-02 2.2948431e-02 1.4807179e-02 1.1989561e-02 1.3522703e-02 2.4353472e-02 1.4534512e-02 1.3492376e-02 1.7699736e-02 1.8260951e-02 8.7551977e-03 1.3397972e-02 1.0605977e-02 8.1065177e-03 9.3705995e-03 1.7600384e-02 8.9170386e-03 1.3238432e-02 8.2394556e-03 1.8591466e-02 8.5466794e-03 1.8371580e-02 1.2139358e-02 2.0015536e-02 1.2667833e-02 9.3753580e-03 9.4099897e-03 1.5925767e-02 9.2577085e-03 1.6915515e-02 1.6530549e-02 1.7369847e-02 1.1736690e-02 1.2003814e-02 1.0321540e-02 1.8104120e-02 1.3766972e-02 1.5141485e-04 3.6186411e-04 5.0640473e-03 5.8846843e-03 4.5372473e-03 2.9889995e-04 1.4503455e-03 2.4048820e-03 5.6473356e-04 2.1694139e-03 1.2760726e-03 5.0001009e-05 4.2762331e-04 1.0752642e-03 1.9285330e-03 1.4459840e-03 2.6007132e-04 3.8461838e-03 8.8079628e-04 1.6321310e-02 8.6239000e-03 5.6156570e-03 7.5378743e-03 9.9017513e-03 7.7506746e-03 1.1476827e-02 6.6644304e-03 8.3385285e-03 8.2160708e-03 4.1911289e-03 5.5513041e-03 4.9688023e-03 1.0246372e-02 1.4224144e-02 8.3426053e-03 5.0473242e-03 6.4114140e-03 1.3391748e-02 6.3862951e-03 6.6425216e-03 9.9154668e-03 8.8906649e-03 3.1510729e-03 6.5653256e-03 3.9767727e-03 3.0148559e-03 4.1087786e-03 8.8891438e-03 2.8692574e-03 5.5219515e-03 2.9822205e-03 9.7392578e-03 2.7559926e-03 9.1569917e-03 5.1978118e-03 1.2376315e-02 5.7353587e-03 4.3202502e-03 3.8711781e-03 8.5388416e-03 4.7844327e-03 8.6239000e-03 8.5671038e-03 9.9856257e-03 5.9819812e-03 5.1316831e-03 4.4550423e-03 1.1213210e-02 6.9205612e-03 3.8131057e-04 6.7888158e-03 7.3363003e-03 5.3694436e-03 6.8516623e-04 1.8924051e-03 2.8895525e-03 1.2584203e-03 3.1106561e-03 1.9851820e-03 2.2993360e-04 3.0705767e-04 1.8065799e-03 2.2681792e-03 1.9895114e-03 4.4561501e-04 3.2950230e-03 1.3536253e-03 1.9442033e-02 1.0986408e-02 7.5586116e-03 9.6139706e-03 1.2432885e-02 9.8077912e-03 1.4077548e-02 8.4254038e-03 1.0370813e-02 1.0478515e-02 5.7856689e-03 7.4894276e-03 6.8497491e-03 1.2864462e-02 1.7242156e-02 1.0603978e-02 6.8089917e-03 8.2980029e-03 1.5941696e-02 8.0862824e-03 8.7712331e-03 1.2406133e-02 1.0894930e-02 4.6791769e-03 8.5829625e-03 5.4740472e-03 4.5017704e-03 5.7029968e-03 1.1309070e-02 4.0232333e-03 7.1989285e-03 4.2761837e-03 1.2289743e-02 4.0060693e-03 1.1002553e-02 7.0316526e-03 1.5021687e-02 7.5484658e-03 5.9167328e-03 5.5390768e-03 1.0934157e-02 6.4525090e-03 1.0986408e-02 1.0943118e-02 1.2502015e-02 7.9589531e-03 6.9802991e-03 6.2208864e-03 1.3663387e-02 8.8999016e-03 6.6062542e-03 5.5827314e-03 3.2137911e-03 2.4648342e-04 3.0882054e-03 1.4855278e-03 8.8238305e-04 3.0312113e-03 1.3048774e-03 1.7074342e-04 4.2409993e-05 1.2055084e-03 1.2882692e-03 1.0204762e-03 8.9780466e-05 2.0467514e-03 5.4370125e-04 1.7241595e-02 9.9776023e-03 7.2970604e-03 9.2205394e-03 1.1400153e-02 1.0313298e-02 1.2454804e-02 9.2538420e-03 1.1254653e-02 8.6534668e-03 4.2479840e-03 7.2108683e-03 6.0807502e-03 1.1908599e-02 1.4998922e-02 8.5435101e-03 6.3304436e-03 7.3992448e-03 1.7031719e-02 9.1810782e-03 7.4786833e-03 1.0592315e-02 1.2037933e-02 4.2382770e-03 7.2750145e-03 5.5074473e-03 3.6741461e-03 4.4146887e-03 1.0634443e-02 4.6823366e-03 8.0104480e-03 3.6842552e-03 1.1424845e-02 4.0945647e-03 1.1857190e-02 7.0169427e-03 1.2513676e-02 6.7752703e-03 4.4459599e-03 4.6022279e-03 9.3965771e-03 4.9465269e-03 9.9776023e-03 9.6902181e-03 1.0478640e-02 6.5221232e-03 6.8355801e-03 5.1573625e-03 1.1079009e-02 7.5518079e-03 2.6192775e-03 5.9216859e-03 4.3727502e-03 5.1296754e-03 5.3199310e-03 2.7670517e-03 1.1371035e-03 2.6020928e-03 5.1188394e-03 7.5382072e-03 2.4476107e-03 5.0616317e-03 3.9293810e-03 5.2302586e-03 1.5095801e-02 4.1435464e-03 4.4658108e-03 8.7058429e-04 4.4660793e-04 2.5555941e-04 1.2381894e-03 8.7244097e-04 2.0362511e-03 8.1311609e-04 1.5164656e-03 1.9481959e-03 1.9017569e-03 4.9004150e-04 8.1438359e-04 1.7141435e-03 4.7093504e-03 2.7065864e-03 6.0393489e-05 4.2915211e-04 3.9373776e-03 1.5669301e-03 1.2950596e-03 2.1025536e-03 1.9522193e-03 1.0322026e-03 8.4218839e-04 9.2905490e-05 1.2346326e-03 1.1781032e-03 9.8002670e-04 7.9339756e-04 1.1328834e-03 6.2948320e-04 1.4629322e-03 3.9084846e-04 1.4164544e-03 1.5298834e-03 3.8721999e-03 2.7001180e-04 1.5287601e-03 1.2669024e-03 2.1037534e-03 3.7134859e-03 8.7058429e-04 1.1775044e-03 2.8495857e-03 2.7086756e-03 1.4023687e-03 1.0191451e-03 3.8647977e-03 9.3179883e-04 1.2988786e-03 3.9786961e-03 9.7659791e-03 1.8948352e-03 3.2059058e-03 1.9902291e-03 1.7004118e-03 5.2879179e-03 6.5830280e-03 1.9368261e-03 2.2268965e-03 1.8962421e-03 4.4774419e-03 1.1162650e-02 2.6447647e-03 4.9365528e-03 3.0323131e-03 3.9972109e-03 2.9343480e-03 3.7523678e-03 6.3980393e-03 2.7120874e-03 6.2404178e-03 8.0807935e-03 1.9416596e-03 1.3024398e-03 4.0245181e-03 3.0697725e-03 5.0283287e-03 5.8270256e-03 2.3505477e-03 2.0375391e-03 1.2652208e-03 1.2527532e-02 8.0556250e-03 2.6750700e-03 2.5225228e-03 9.0764566e-03 3.4699713e-03 1.3308823e-03 2.7664696e-03 2.4678015e-03 9.5729618e-04 4.3858320e-03 4.4729680e-03 6.8828714e-03 1.1547480e-03 4.8183810e-03 2.7179325e-03 6.3087897e-03 6.0843336e-03 2.6715726e-03 1.3701564e-03 8.5085504e-04 2.7129190e-03 3.6187454e-03 4.3357364e-03 3.0323131e-03 2.8888833e-03 3.1752051e-03 4.0026050e-03 5.6110347e-03 2.1520192e-03 2.0021167e-03 9.3001678e-04 2.6139835e-03 9.7625461e-03 4.3820793e-04 2.7906691e-03 3.4740135e-03 1.5471352e-03 3.7324822e-03 3.8774805e-03 1.8049144e-03 1.0166909e-03 9.6586952e-04 2.7620874e-03 5.6379477e-03 1.4267399e-03 9.9420936e-03 6.9534902e-03 7.2046628e-03 7.1611325e-03 8.0864542e-03 1.1050679e-02 7.0236664e-03 1.0684883e-02 1.2982713e-02 4.4982901e-03 2.1523458e-03 7.1621069e-03 5.3786825e-03 9.3055499e-03 9.5487861e-03 4.3879387e-03 5.0693092e-03 4.4931267e-03 1.8805226e-02 1.2143973e-02 5.1616364e-03 5.8432704e-03 1.4239876e-02 5.0396297e-03 4.0643450e-03 5.6688069e-03 3.5911389e-03 2.3422208e-03 8.6067938e-03 6.9700590e-03 1.0540875e-02 2.8498735e-03 9.0488233e-03 4.9258801e-03 1.1781603e-02 8.6822745e-03 5.8089260e-03 4.4337196e-03 2.0267056e-03 4.1354243e-03 6.4148025e-03 4.4976834e-03 6.9534902e-03 6.4275292e-03 5.9584428e-03 5.3006657e-03 8.1437729e-03 3.9023880e-03 4.5299634e-03 3.7988456e-03 2.5123066e-03 1.1946309e-03 2.0097566e-04 1.7928786e-03 5.7143349e-04 1.2667176e-04 4.6644872e-04 4.4712161e-04 1.0683378e-03 5.7468992e-04 7.6554900e-05 3.3664265e-03 2.0582490e-04 1.3553574e-02 7.1051776e-03 4.8941009e-03 6.5257957e-03 8.3098959e-03 7.5373275e-03 9.3233389e-03 6.7621473e-03 8.4467156e-03 6.1291129e-03 2.5944515e-03 4.8248569e-03 3.9028342e-03 8.7515124e-03 1.1627531e-02 6.1178515e-03 4.1203288e-03 5.0572182e-03 1.3529172e-02 6.7984240e-03 5.0687125e-03 7.7357162e-03 9.2015610e-03 2.4873418e-03 4.9175701e-03 3.5355888e-03 2.0473249e-03 2.6605314e-03 7.6511613e-03 3.0628424e-03 5.7307078e-03 2.1169019e-03 8.3320524e-03 2.4574218e-03 9.1102030e-03 4.8098810e-03 9.5736130e-03 4.5225384e-03 2.7314130e-03 2.7690856e-03 6.7080946e-03 3.3506310e-03 7.1051776e-03 6.8798895e-03 7.7001955e-03 4.4569824e-03 4.6305164e-03 3.1848752e-03 8.4170139e-03 5.2045050e-03 6.9296246e-03 2.1360002e-03 3.7784686e-03 3.9888330e-03 1.9332806e-03 3.1165972e-03 3.4622240e-03 6.1843155e-03 4.9689115e-03 2.8276933e-03 8.4143362e-03 3.9488633e-03 1.7390613e-02 8.6823566e-03 4.7125346e-03 7.5297389e-03 9.6551734e-03 5.6073015e-03 1.2632980e-02 4.7967985e-03 5.4595549e-03 9.6779905e-03 6.1919843e-03 4.6437798e-03 4.8811137e-03 9.2634464e-03 1.4470481e-02 9.9239882e-03 5.5116951e-03 7.7393895e-03 8.8498403e-03 3.6104684e-03 7.0913900e-03 1.1068993e-02 5.6668826e-03 3.0002631e-03 7.9032607e-03 4.0760726e-03 3.7279027e-03 5.9729084e-03 7.9919733e-03 2.3383845e-03 3.2272638e-03 4.6096764e-03 8.7690968e-03 3.1364844e-03 7.9578373e-03 3.3962632e-03 1.4631125e-02 6.8663759e-03 6.4834920e-03 4.3461235e-03 8.7377082e-03 5.5255283e-03 8.6823566e-03 8.8561968e-03 1.1024987e-02 6.2645608e-03 3.5745084e-03 5.1960404e-03 1.3983177e-02 8.7265781e-03 1.5360474e-03 2.2738132e-03 6.3957013e-04 1.8470485e-03 1.9436948e-03 8.2753134e-04 1.4870044e-04 1.8575051e-04 1.1511457e-03 4.1570185e-03 4.2481246e-04 1.2022631e-02 7.4558885e-03 6.7319225e-03 7.0097246e-03 8.7236579e-03 1.0059866e-02 8.2686681e-03 9.2543911e-03 1.1644804e-02 5.5866311e-03 2.5062451e-03 6.6946690e-03 5.2622870e-03 9.8836223e-03 1.1452219e-02 5.6355000e-03 4.7339621e-03 4.6436729e-03 1.7620106e-02 1.0378141e-02 5.6675320e-03 7.1003305e-03 1.2674455e-02 4.3836233e-03 4.5891805e-03 4.7259960e-03 3.2437023e-03 2.5249532e-03 8.8366518e-03 5.2434703e-03 8.9867759e-03 2.2946416e-03 9.5089828e-03 3.7537168e-03 1.0503141e-02 7.8427036e-03 7.7804598e-03 4.4127994e-03 2.3620086e-03 3.9941388e-03 7.2903223e-03 4.7424240e-03 7.4558885e-03 7.1215300e-03 7.3539364e-03 5.7328206e-03 7.4284923e-03 3.9643246e-03 6.4306545e-03 4.3729078e-03 1.0852360e-03 3.9664584e-04 4.5123208e-04 1.2414442e-03 2.3150826e-04 1.4183402e-03 7.0426023e-04 4.8163546e-04 5.0741189e-03 3.9595383e-04 1.0937070e-02 5.0339955e-03 3.1168119e-03 4.5426488e-03 6.0432452e-03 5.3182662e-03 7.1300333e-03 4.7641753e-03 6.1358685e-03 4.4885462e-03 1.6916118e-03 3.0636842e-03 2.3987749e-03 6.3937387e-03 9.2584560e-03 4.5899059e-03 2.5786937e-03 3.4750392e-03 1.0541835e-02 4.8350707e-03 3.4370915e-03 5.7977832e-03 6.8277961e-03 1.3144796e-03 3.3638906e-03 2.1338739e-03 1.0504939e-03 1.6413828e-03 5.4173620e-03 1.8780432e-03 3.9014661e-03 1.1794316e-03 6.0207953e-03 1.3473818e-03 6.9135169e-03 3.1440431e-03 7.6724602e-03 2.9974710e-03 1.7751706e-03 1.5977690e-03 4.8531144e-03 2.4494451e-03 5.0339955e-03 4.8947958e-03 5.8464391e-03 3.1299587e-03 2.9881785e-03 1.9161069e-03 6.7804975e-03 3.6799724e-03 6.0827379e-04 2.1616612e-03 3.6536587e-03 6.0346559e-04 1.7560211e-03 1.3150048e-03 2.0808595e-03 9.6848049e-03 1.5050304e-03 8.5509882e-03 3.4934050e-03 2.3873070e-03 2.2266374e-03 4.3437966e-03 3.4432000e-03 4.8822411e-03 2.6118729e-03 4.2515298e-03 3.9280244e-03 2.1763551e-03 2.4215318e-03 2.3726469e-03 5.2469603e-03 8.8156975e-03 4.6543689e-03 1.1653780e-03 1.5634616e-03 8.3770205e-03 3.5034084e-03 3.2683461e-03 4.7014835e-03 4.7252691e-03 1.7941232e-03 2.2843067e-03 6.9328893e-04 1.6436639e-03 1.4866860e-03 4.0234517e-03 7.8970448e-04 2.8726545e-03 3.0564882e-04 4.8340126e-03 3.3681970e-04 3.2031086e-03 3.4962764e-03 6.5051951e-03 1.2935806e-03 1.7549327e-03 2.1673002e-03 4.7736712e-03 4.5514956e-03 3.4934050e-03 3.7661841e-03 5.5497035e-03 4.3927525e-03 3.3294822e-03 2.0638533e-03 5.8922322e-03 2.1421877e-03 1.0254014e-03 1.7967204e-03 2.4056491e-05 4.9606070e-04 1.5493043e-04 7.3786851e-04 5.7634641e-03 2.2821239e-04 9.8983432e-03 4.8234579e-03 3.6558248e-03 4.1003359e-03 5.8726550e-03 5.9033047e-03 6.1776943e-03 5.1708698e-03 7.0154090e-03 4.1004756e-03 1.5716690e-03 3.6383691e-03 2.8864008e-03 6.7180252e-03 9.3042807e-03 4.3918407e-03 2.3314998e-03 2.6748196e-03 1.1913776e-02 5.9266374e-03 3.6227612e-03 5.2997514e-03 7.7658365e-03 2.0655115e-03 2.8598574e-03 2.0567764e-03 1.5031818e-03 1.3330059e-03 5.6389436e-03 2.2318728e-03 4.9186471e-03 7.3077456e-04 6.3335619e-03 1.3529510e-03 6.4463256e-03 4.4695359e-03 6.6841393e-03 2.3650796e-03 1.3974597e-03 2.1137539e-03 5.1383865e-03 3.4413190e-03 4.8234579e-03 4.7468419e-03 5.6953492e-03 3.9468482e-03 4.2095924e-03 2.1520165e-03 5.7168561e-03 2.8181362e-03 2.6785665e-04 8.6267880e-04 1.5080867e-03 1.0657562e-03 9.1259932e-05 3.2834346e-03 5.6400090e-04 1.5768182e-02 8.4481987e-03 5.6912110e-03 7.5460195e-03 9.7379656e-03 8.1209399e-03 1.1078194e-02 7.1125751e-03 8.8544675e-03 7.7247994e-03 3.7380313e-03 5.6213520e-03 4.8502413e-03 1.0137958e-02 1.3681745e-02 7.7719782e-03 4.9953780e-03 6.2009166e-03 1.4059422e-02 6.9488117e-03 6.3504999e-03 9.4504672e-03 9.5001122e-03 3.1213299e-03 6.2307927e-03 4.0875652e-03 2.8287725e-03 3.7400310e-03 8.8571631e-03 3.1845212e-03 5.9757813e-03 2.8285304e-03 9.6559261e-03 2.8616391e-03 9.5937311e-03 5.3787128e-03 1.1658427e-02 5.5682032e-03 3.8827352e-03 3.6680728e-03 8.1913042e-03 4.3904219e-03 8.4481987e-03 8.3060404e-03 9.4592722e-03 5.6422254e-03 5.2631423e-03 4.2070121e-03 1.0442544e-02 6.5544595e-03 1.6777879e-03 1.6905742e-03 1.4559893e-03 2.3696578e-04 1.7661444e-03 8.8626613e-04 1.8929605e-02 1.1198434e-02 8.2126916e-03 1.0357192e-02 1.2691977e-02 1.1286676e-02 1.3897779e-02 1.0124890e-02 1.2174180e-02 9.8437553e-03 5.0898821e-03 8.1181082e-03 6.9678076e-03 1.3160374e-02 1.6460126e-02 9.6980805e-03 7.2881507e-03 8.4988687e-03 1.8112744e-02 9.9154333e-03 8.5214392e-03 1.1906445e-02 1.2939064e-02 4.9240987e-03 8.3797448e-03 6.3232280e-03 4.3836801e-03 5.2916266e-03 1.1816149e-02 5.2785357e-03 8.7418147e-03 4.4568647e-03 1.2648763e-02 4.7832566e-03 1.2911612e-02 7.7565330e-03 1.3968530e-02 7.8153448e-03 5.3323341e-03 5.3832666e-03 1.0542156e-02 5.6397042e-03 1.1198434e-02 1.0902962e-02 1.1744032e-02 7.3882415e-03 7.5970260e-03 6.0277743e-03 1.2454722e-02 8.6970669e-03 6.8936185e-04 2.4185836e-04 6.5755985e-04 5.6966351e-03 2.3035735e-04 9.8692354e-03 4.6426684e-03 3.3219514e-03 3.9792968e-03 5.6658817e-03 5.5182925e-03 6.1624599e-03 4.8454827e-03 6.5522795e-03 4.0009447e-03 1.4714328e-03 3.2975731e-03 2.5896752e-03 6.3872309e-03 9.0422907e-03 4.2582444e-03 2.2027730e-03 2.6736829e-03 1.1276803e-02 5.4428314e-03 3.3876877e-03 5.2005260e-03 7.2827649e-03 1.7342875e-03 2.7931367e-03 1.8984292e-03 1.2582586e-03 1.2661063e-03 5.3418398e-03 1.9890109e-03 4.4646783e-03 7.0136727e-04 6.0091268e-03 1.2000111e-03 6.3087140e-03 3.9600850e-03 6.7054154e-03 2.3290598e-03 1.3528267e-03 1.8394373e-03 4.8617299e-03 3.0806223e-03 4.6426684e-03 4.5595872e-03 5.5227532e-03 3.5904469e-03 3.7269772e-03 1.9354221e-03 5.7786823e-03 2.8379142e-03 1.6160265e-04 9.1316164e-04 4.5133200e-03 3.9636483e-04 1.2999342e-02 7.7990273e-03 6.7507696e-03 6.8565065e-03 9.1164934e-03 9.6187705e-03 8.8289255e-03 8.5171572e-03 1.1041936e-02 6.3870800e-03 3.1070828e-03 6.7336821e-03 5.5817760e-03 1.0377938e-02 1.2745315e-02 6.6393015e-03 4.6463353e-03 4.6865341e-03 1.7095703e-02 9.6546615e-03 6.2703784e-03 7.8942299e-03 1.1919237e-02 4.5195099e-03 4.9936591e-03 4.3235727e-03 3.5370732e-03 2.9138150e-03 9.0955590e-03 4.5162367e-03 8.4195838e-03 2.1592479e-03 9.9327314e-03 3.3056202e-03 9.5169563e-03 7.9097267e-03 8.9017922e-03 4.4124132e-03 2.8482486e-03 4.3995745e-03 8.1140558e-03 5.6448175e-03 7.7990273e-03 7.6396844e-03 8.3822247e-03 6.5839845e-03 7.5512097e-03 4.3854717e-03 7.5528676e-03 4.6977929e-03 6.1532842e-04 4.4755911e-03 9.8852555e-05 1.1287330e-02 6.2297232e-03 5.1189561e-03 5.6158860e-03 7.4118621e-03 7.9100135e-03 7.4458980e-03 7.1082862e-03 9.2159095e-03 4.9487763e-03 1.9996447e-03 5.0869737e-03 4.0020507e-03 8.3714289e-03 1.0564909e-02 5.1004357e-03 3.5205852e-03 3.7366566e-03 1.4679778e-02 7.9564069e-03 4.6553854e-03 6.3553187e-03 1.0091302e-02 3.0597107e-03 3.7953264e-03 3.3093658e-03 2.2411736e-03 1.9075393e-03 7.2834604e-03 3.5416682e-03 6.7651117e-03 1.4482690e-03 7.9807479e-03 2.4205104e-03 8.4840253e-03 5.9704010e-03 7.4790367e-03 3.4333236e-03 1.8691676e-03 2.9457380e-03 6.2606998e-03 3.9378134e-03 6.2297232e-03 6.0227040e-03 6.6475915e-03 4.7553767e-03 5.6493596e-03 2.9997214e-03 6.2979764e-03 3.7014773e-03 2.9064840e-03 2.5951953e-04 1.5239927e-02 8.3975944e-03 6.0152535e-03 7.5736149e-03 9.7205279e-03 8.7194769e-03 1.0665619e-02 7.7107157e-03 9.6513260e-03 7.3495285e-03 3.4129734e-03 5.9493098e-03 4.9891659e-03 1.0300134e-02 1.3429483e-02 7.3762928e-03 4.9967161e-03 5.9349958e-03 1.5151045e-02 7.8143476e-03 6.2821596e-03 9.0873094e-03 1.0390318e-02 3.3768176e-03 5.9427925e-03 4.2522990e-03 2.8707879e-03 3.4399883e-03 9.0442431e-03 3.6087055e-03 6.7269934e-03 2.6557188e-03 9.8316941e-03 3.0241465e-03 9.9672648e-03 5.9844482e-03 1.0964814e-02 5.3765757e-03 3.5060112e-03 3.7251338e-03 8.1205003e-03 4.4033527e-03 8.3975944e-03 8.1986819e-03 9.1329776e-03 5.6810271e-03 5.8010063e-03 4.1618459e-03 9.6695634e-03 6.1457695e-03 3.7030792e-03 2.6425725e-02 1.8599298e-02 1.5467357e-02 1.8635118e-02 2.0442342e-02 2.0541696e-02 2.1259986e-02 1.9510831e-02 2.1931276e-02 1.5294793e-02 9.1682566e-03 1.5285848e-02 1.2953400e-02 2.0823480e-02 2.2513198e-02 1.4419732e-02 1.4350471e-02 1.5480442e-02 2.9209010e-02 1.9066274e-02 1.4266029e-02 1.8093335e-02 2.3152907e-02 1.0464507e-02 1.4480769e-02 1.3685178e-02 9.2298310e-03 1.0125817e-02 1.9700965e-02 1.2730523e-02 1.7262155e-02 1.0225203e-02 2.0352884e-02 1.1535854e-02 2.3443654e-02 1.4503051e-02 1.9462802e-02 1.4723300e-02 9.8246219e-03 1.0395301e-02 1.6330792e-02 8.8773452e-03 1.8599298e-02 1.7720786e-02 1.7175716e-02 1.1774527e-02 1.4190018e-02 1.1287889e-02 1.7334951e-02 1.4959707e-02 1.2176711e-02 6.5819636e-03 5.0346277e-03 6.0397395e-03 7.7827629e-03 7.8599708e-03 8.1942510e-03 7.0899174e-03 9.0304038e-03 5.3331159e-03 2.1084043e-03 4.9833227e-03 3.9086435e-03 8.5223444e-03 1.0895531e-02 5.3819973e-03 3.7720778e-03 4.2725796e-03 1.4381123e-02 7.5978371e-03 4.7474248e-03 6.8472012e-03 9.8808760e-03 2.7678758e-03 4.1999833e-03 3.4360629e-03 2.0712504e-03 2.1185414e-03 7.4497841e-03 3.4016631e-03 6.4264729e-03 1.6858980e-03 8.1251590e-03 2.4514173e-03 8.9161864e-03 5.4840504e-03 8.2209587e-03 3.8745808e-03 2.1076714e-03 2.7824071e-03 6.3561971e-03 3.5186798e-03 6.5819636e-03 6.3377454e-03 6.9667686e-03 4.4964760e-03 5.2124475e-03 2.9917795e-03 7.0285944e-03 4.2716151e-03 1.5675754e-03 4.2437140e-03 2.9528525e-03 1.2801911e-03 5.5300675e-03 5.2313183e-04 7.0373258e-03 7.0028853e-03 1.5817688e-03 4.6017698e-03 4.2905415e-03 3.9763653e-03 1.7507004e-03 9.8088526e-04 2.0205251e-03 3.8651167e-03 2.8498196e-03 8.0083780e-03 8.7219602e-03 2.5429883e-03 8.6090275e-04 7.9906965e-03 6.1518285e-03 2.2097973e-03 5.7963314e-03 5.7460009e-03 4.2262969e-03 2.1872477e-03 8.9731860e-03 7.8793225e-03 5.8991938e-03 1.8999235e-03 7.1341168e-03 7.0170212e-03 6.4409068e-03 6.9062404e-04 3.2402506e-03 4.2346770e-03 4.9489360e-03 1.9844891e-03 6.6121605e-03 1.5675754e-03 1.4409377e-03 1.2937675e-03 4.5154908e-03 6.0031850e-03 4.0189646e-03 1.2526895e-03 2.2326743e-03 6.7976748e-04 4.4251056e-04 5.1964468e-05 1.5314410e-03 4.7093005e-04 2.2872076e-03 2.4409680e-03 6.1996952e-04 1.8531872e-03 7.0675084e-04 7.4672595e-04 2.9977980e-04 1.6320684e-03 1.1279305e-03 6.4871929e-04 6.2074215e-04 4.0109945e-03 3.1610099e-03 4.6600326e-04 4.3207236e-04 3.1187047e-03 1.7495883e-03 3.8892953e-04 1.4972893e-03 1.7881991e-03 1.3932149e-03 1.6264024e-04 3.1616233e-03 2.5553184e-03 1.9652949e-03 2.4129313e-04 2.2301817e-03 3.0218268e-03 1.9168834e-03 1.5054952e-03 6.4024254e-04 1.6084351e-03 1.4082859e-03 5.9336508e-04 3.2376306e-03 1.1102230e-16 6.3653022e-05 8.5949029e-04 1.8263557e-03 1.6881339e-03 9.7411877e-04 1.7845049e-03 6.1330704e-04 7.1368649e-04 9.0151789e-04 6.1896245e-04 2.2461726e-03 1.0406196e-03 1.1172454e-03 1.6730261e-03 1.8311894e-03 1.8071232e-06 2.5364008e-04 8.3907159e-04 3.3874526e-03 2.1375798e-03 4.5487046e-04 1.1672957e-03 2.8115805e-03 1.2138761e-03 7.0446879e-04 1.8201572e-03 1.6098088e-03 5.2152529e-04 1.0325997e-03 6.5974667e-04 9.0603694e-04 1.4060118e-03 4.3618665e-04 1.3469029e-03 7.5850938e-04 1.4500880e-03 6.7416609e-04 1.0299923e-03 2.5247687e-03 4.1453726e-04 3.7399848e-03 9.2294426e-04 1.7551096e-03 7.2749277e-04 1.1614195e-03 2.4439916e-03 6.7976748e-04 8.1575962e-04 2.1038526e-03 1.4799615e-03 3.3653084e-04 6.6936205e-04 3.8899674e-03 1.5033836e-03 6.1892938e-04 8.9565267e-04 1.1187414e-03 1.0854623e-03 1.6238473e-03 1.6997610e-03 2.5416419e-03 7.7939556e-04 1.2163589e-03 1.1645635e-03 3.7479516e-03 2.5673270e-03 2.8413719e-04 3.4882680e-04 3.5538869e-03 2.1358644e-03 1.3529179e-03 1.5262264e-03 2.0757398e-03 1.9224937e-03 7.7565401e-04 6.3114517e-04 2.1204969e-03 1.7089017e-03 5.9516371e-04 1.8253945e-03 1.7466546e-03 1.3725400e-03 9.7938279e-04 1.2586511e-03 1.2414457e-03 2.1946939e-03 3.0180258e-03 3.2655328e-04 2.0574339e-03 1.9692598e-03 1.9049224e-03 4.6812273e-03 4.4251056e-04 7.9383810e-04 2.4056659e-03 3.2171363e-03 2.0291670e-03 1.5121604e-03 3.2358269e-03 7.8092419e-04 1.6056499e-03 4.5150817e-04 2.5183573e-03 2.4854849e-03 8.1663583e-04 2.4301739e-03 9.3222022e-04 1.0572824e-03 1.7339381e-04 1.3855383e-03 1.3424761e-03 1.0242405e-03 9.7281350e-04 3.6981982e-03 3.3860618e-03 6.8052310e-04 4.8835842e-04 3.1532670e-03 2.2405021e-03 6.9163264e-04 1.9828394e-03 2.3495346e-03 1.9517373e-03 1.3548451e-04 3.7877283e-03 2.8247707e-03 2.6466102e-03 1.4866617e-04 2.8614588e-03 3.2185979e-03 2.1613637e-03 1.5430396e-03 1.0217871e-03 2.1845781e-03 1.8662401e-03 6.6257305e-04 3.7358116e-03 5.1964468e-05 1.2907411e-04 9.1062515e-04 2.1316335e-03 1.9388777e-03 1.3908595e-03 1.9526372e-03 9.5243046e-04 3.3534848e-03 2.1612689e-04 1.1409237e-04 3.7511533e-03 4.4135123e-03 6.6848080e-04 1.6638889e-03 1.5639090e-03 5.3797969e-03 4.6512834e-03 1.2304041e-03 2.1424312e-03 1.1209148e-03 3.9967200e-04 2.4397061e-03 3.5566606e-03 2.8773633e-04 1.9980195e-03 2.5921392e-03 1.0825130e-03 2.8538916e-03 3.5346603e-03 9.3770297e-04 1.4682668e-03 3.5169417e-04 2.8507687e-03 1.3414178e-03 1.7057263e-03 1.1395352e-03 1.1778138e-03 6.0686882e-03 1.8828714e-03 4.1140616e-03 2.6499598e-03 2.9837515e-03 5.4227271e-03 1.5314410e-03 2.0381041e-03 4.2816261e-03 3.9075302e-03 1.2227927e-03 2.5264748e-03 6.5268770e-03 2.9950818e-03 4.2523782e-03 4.6456758e-03 7.0284879e-04 2.6206169e-03 2.2973667e-03 2.1186617e-03 1.0662204e-03 1.4771683e-03 1.2714179e-03 1.6200996e-03 9.3400517e-04 6.3864823e-03 5.7928278e-03 1.2702417e-03 3.0680262e-04 5.4794582e-03 3.6431092e-03 7.0967201e-04 2.9368020e-03 3.3273895e-03 2.1071133e-03 1.0635659e-03 5.3575283e-03 5.0378701e-03 2.9935636e-03 1.0701915e-03 3.8904013e-03 4.2643219e-03 4.2480632e-03 6.5030200e-04 1.1722241e-03 2.1985353e-03 2.8568074e-03 1.2361632e-03 4.8426872e-03 4.7093005e-04 4.8520973e-04 9.1253531e-04 3.1519557e-03 3.8819044e-03 2.1030647e-03 9.4974174e-04 6.5992898e-04 2.7415573e-04 4.6934818e-03 4.8265016e-03 1.1075187e-03 2.2246224e-03 2.7094103e-03 7.2547386e-03 5.7651516e-03 1.2879775e-03 2.2498412e-03 1.7969640e-03 2.9988677e-04 3.2873557e-03 4.6281528e-03 3.4411066e-04 2.2557578e-03 3.0860670e-03 7.9349579e-04 3.1195585e-03 3.7700064e-03 1.7729958e-03 8.2927671e-04 3.1955044e-04 2.5127017e-03 2.3924363e-03 1.2355886e-03 6.3306711e-04 1.7271405e-03 7.3517242e-03 1.9482896e-03 4.3986367e-03 3.1257329e-03 4.1788193e-03 6.3227646e-03 2.2872076e-03 2.9291555e-03 5.5970414e-03 4.9483109e-03 1.7952206e-03 3.0186434e-03 7.6597710e-03 3.3340783e-03 5.0895780e-03 5.6951678e-03 1.1683986e-03 2.4173290e-03 2.2930538e-03 6.6766405e-03 6.0598142e-03 2.0196993e-03 3.2103351e-03 7.1260012e-04 2.1040770e-04 3.4457175e-03 4.8676879e-03 4.7880714e-05 2.6160358e-03 3.7638559e-03 1.6534967e-03 3.7166920e-03 4.7397184e-03 1.5966719e-03 1.7314841e-03 3.1225254e-04 3.8216967e-03 2.0442457e-03 2.2822879e-03 1.3421268e-03 1.4265946e-03 7.7649269e-03 2.8740126e-03 5.4160448e-03 3.5155679e-03 4.0306705e-03 6.4534752e-03 2.4409680e-03 3.0445932e-03 5.6145787e-03 4.8826195e-03 1.5492752e-03 3.4872724e-03 8.3107496e-03 4.2635519e-03 8.0787158e-04 1.6588755e-03 9.5088926e-04 1.1215622e-03 1.1319394e-03 1.0749073e-04 1.3651148e-03 1.0419898e-03 7.3951172e-03 5.6721517e-03 2.8717807e-04 1.2167194e-04 6.0929710e-03 1.9467902e-03 2.7497575e-04 2.6021687e-03 1.4711384e-03 7.6653532e-04 1.1979861e-03 4.6497866e-03 4.6528613e-03 2.0491908e-03 1.1214382e-03 3.1087470e-03 5.8331337e-03 2.9008505e-03 5.1851355e-04 1.1077981e-03 7.4180273e-04 1.1189833e-03 3.3213225e-04 1.9786092e-03 6.1996952e-04 3.2630432e-04 1.6788710e-04 1.0717507e-03 2.5327575e-03 6.9916673e-04 4.8860962e-04 5.0860173e-04 1.7852918e-03 8.1925898e-04 2.7637031e-03 3.4415701e-03 7.6048751e-04 1.3454814e-03 1.4168273e-03 9.1102129e-03 5.4634531e-03 7.0608839e-04 1.5253323e-03 6.6930140e-03 9.6900708e-04 7.0882160e-04 2.0455668e-03 3.9468549e-04 1.1422246e-04 2.4972040e-03 3.2985277e-03 4.3265068e-03 9.6084950e-04 2.6351726e-03 1.9457490e-03 6.5435669e-03 2.5549327e-03 2.2787630e-03 1.2770395e-03 6.6117888e-05 4.1362061e-04 1.2709485e-03 9.0111668e-04 1.8531872e-03 1.4879007e-03 1.4236785e-03 8.2396190e-04 2.2228636e-03 3.0260473e-04 1.7887502e-03 9.8733924e-04 2.2593145e-04 8.4529966e-04 3.3545284e-03 2.0967474e-03 4.8795197e-04 1.2181976e-03 2.8660735e-03 1.2480152e-03 6.7735579e-04 1.8245752e-03 1.6711447e-03 4.7937341e-04 1.0455121e-03 6.9960603e-04 8.5898853e-04 1.3874556e-03 4.5677202e-04 1.3792343e-03 7.7973864e-04 1.4683104e-03 6.8342524e-04 1.0561802e-03 2.6555506e-03 3.7342452e-04 3.7432085e-03 9.6667604e-04 1.7304819e-03 6.7810292e-04 1.1254700e-03 2.3291425e-03 7.0675084e-04 8.2471895e-04 2.0738735e-03 1.3951480e-03 2.9534051e-04 6.3569311e-04 3.8846725e-03 1.5360581e-03 9.9102840e-04 2.6998600e-03 1.1403522e-03 5.9435819e-04 1.1584769e-03 4.5439398e-03 2.4115910e-03 2.2376231e-04 1.3051908e-03 3.1170665e-03 2.4639357e-04 6.5093729e-04 1.0408914e-03 3.2170646e-04 6.8086675e-04 7.3723950e-04 1.9464154e-03 1.6938542e-03 1.2010351e-03 8.6567601e-04 1.2605984e-03 4.0415296e-03 6.0024917e-04 2.8241029e-03 9.3145876e-04 8.7987159e-04 1.5390142e-04 6.1621791e-04 1.2031695e-03 7.4672595e-04 6.4561848e-04 1.3132954e-03 5.7671738e-04 4.3996999e-04 1.2646330e-04 2.7819647e-03 1.1571313e-03 1.1519638e-03 1.4945239e-03 1.5137092e-03 1.7542391e-03 3.0584682e-03 3.1624125e-03 6.9423087e-04 8.0741075e-04 2.9564986e-03 2.1264808e-03 1.1956222e-03 2.4834226e-03 2.4046990e-03 2.4389289e-03 1.1428932e-04 4.1572751e-03 2.6270682e-03 3.3826290e-03 1.0906814e-05 3.3779688e-03 3.9749243e-03 1.6265579e-03 2.0735613e-03 1.7238890e-03 2.6977636e-03 1.8191981e-03 5.1180139e-04 3.3068455e-03 2.9977980e-04 3.0495320e-04 9.6732841e-04 1.7461140e-03 1.4560452e-03 1.4920651e-03 2.5772801e-03 1.7019143e-03 1.0341988e-03 4.0085546e-03 3.6492138e-03 7.4340196e-03 7.9703529e-03 1.4966880e-03 7.8440192e-04 7.7789554e-03 4.4532033e-03 2.2360209e-03 5.9110335e-03 4.1723764e-03 3.6115483e-03 1.8645164e-03 8.6587941e-03 6.9790025e-03 5.9223579e-03 1.3508421e-03 6.9679643e-03 8.8100022e-03 4.3884451e-03 9.3956602e-04 3.8113149e-03 3.5744927e-03 3.3082295e-03 7.5316334e-04 3.6491972e-03 1.6320684e-03 1.1884181e-03 4.5803930e-04 2.1497659e-03 4.0210128e-03 2.8148066e-03 1.4099016e-03 2.8014283e-03 2.0075866e-03 1.7351697e-03 8.4094224e-03 6.5677927e-03 3.9418601e-04 3.5055970e-04 7.1718250e-03 2.0973679e-03 6.4851765e-04 3.3744442e-03 1.5292791e-03 9.4071576e-04 1.7144529e-03 5.4753391e-03 5.4188405e-03 2.5828658e-03 1.5301373e-03 3.8027206e-03 7.3328244e-03 3.1192913e-03 6.2731613e-04 1.7741918e-03 8.4678687e-04 1.1412463e-03 3.3683903e-04 1.4744098e-03 1.1279305e-03 6.7560808e-04 1.3395103e-04 7.7360874e-04 2.7301747e-03 8.0481079e-04 5.3215427e-04 1.0082007e-03 2.3386408e-04 4.5650335e-03 2.1085336e-03 8.9398060e-04 1.5597528e-03 2.5626398e-03 8.9643929e-04 4.6401620e-04 2.1474828e-04 9.4200330e-04 7.4363388e-04 8.8544521e-04 1.1214375e-03 1.5513223e-03 4.7369920e-04 1.2949283e-03 5.0107186e-04 1.9686134e-03 1.5839083e-03 3.0734994e-03 1.1037013e-04 1.0184869e-03 9.4459481e-04 1.6215035e-03 3.1102930e-03 6.4871929e-04 8.4571319e-04 2.1956968e-03 2.1853516e-03 1.4074387e-03 6.7079422e-04 3.0128980e-03 5.5375407e-04 5.9745601e-03 3.5596915e-03 1.0825427e-03 1.1193941e-03 3.8619013e-03 1.7868782e-03 2.6699310e-04 7.4652731e-04 1.5644519e-03 7.6914730e-04 1.1868249e-03 2.1537215e-03 2.8904145e-03 6.7050048e-04 1.5688544e-03 1.1553603e-03 2.3985919e-03 2.8520067e-03 2.1226295e-03 2.6532317e-05 9.5443067e-04 1.5325109e-03 1.7403036e-03 3.8293675e-03 6.2074215e-04 7.8759073e-04 1.8993585e-03 2.7454611e-03 2.5765044e-03 1.0363780e-03 2.0443972e-03 1.4335859e-04 1.4016332e-03 5.4805806e-03 6.7405649e-03 6.4579738e-04 5.1748633e-03 6.3487765e-03 4.3874825e-03 6.7084849e-03 8.1049162e-03 2.6677096e-03 4.6345484e-03 1.7447861e-03 7.4893653e-03 2.9062036e-03 5.4610122e-03 3.0610024e-03 2.8530196e-03 9.9111331e-03 5.6471487e-03 8.9070297e-03 6.1633735e-03 5.5983777e-03 9.1911029e-03 4.0109945e-03 4.6927139e-03 7.3914201e-03 7.0233589e-03 3.0645626e-03 6.1283907e-03 1.0939049e-02 7.1096522e-03 3.7615860e-03 5.7439366e-03 2.3434979e-04 2.1625792e-03 4.1436319e-03 1.4386017e-03 3.2647240e-03 4.5819077e-03 2.2920937e-03 1.0610016e-03 6.9564930e-05 3.4218787e-03 2.8472107e-03 1.7665885e-03 1.7134963e-03 1.2022963e-03 8.8958832e-03 3.1032776e-03 5.2642601e-03 3.2432815e-03 4.6032265e-03 6.0005810e-03 3.1610099e-03 3.7408101e-03 6.4052558e-03 4.8371122e-03 1.3589516e-03 3.3936977e-03 9.2321743e-03 4.6963171e-03 5.0386940e-04 4.2984797e-03 8.6973796e-04 3.5526497e-04 1.7666127e-03 7.2192984e-04 6.5802622e-04 6.6868074e-04 3.2431480e-03 2.9035420e-03 1.6594087e-03 6.4854242e-04 2.1612810e-03 4.9124735e-03 1.3725331e-03 1.5050690e-03 9.8777331e-04 7.5260407e-04 4.1403958e-04 1.4453279e-04 1.2645371e-03 4.6600326e-04 2.4792089e-04 4.5627999e-04 4.7560079e-04 1.1214322e-03 2.2091820e-04 1.5167980e-03 7.9735767e-04 5.8276719e-03 2.5886189e-03 4.3280198e-04 2.9205975e-03 2.2021650e-03 1.3597486e-03 9.5466012e-04 5.1915037e-03 4.8194588e-03 2.6659023e-03 8.3877743e-04 3.6595156e-03 5.4665486e-03 3.2603866e-03 3.5246614e-04 1.2546385e-03 1.3738455e-03 1.7317120e-03 4.0173517e-04 2.8852033e-03 4.3207236e-04 2.1872967e-04 1.7262684e-04 1.6274416e-03 2.8950267e-03 1.1933707e-03 5.1057942e-04 6.2761497e-04 3.2515433e-03 4.5844712e-03 2.0397580e-03 4.4892831e-03 5.6236454e-03 2.1663608e-03 1.9457792e-03 4.4715761e-04 4.4562273e-03 2.6795180e-03 2.6855442e-03 1.3224094e-03 1.8950095e-03 8.9432615e-03 3.4981521e-03 6.3671383e-03 4.3127165e-03 4.9533188e-03 7.5137514e-03 3.1187047e-03 3.8229023e-03 6.6814123e-03 5.8612561e-03 2.0597987e-03 4.3006086e-03 9.5397630e-03 5.0775992e-03 1.3692874e-03 9.6403546e-04 1.3966479e-04 8.6664284e-04 1.6597338e-03 1.3212159e-03 1.4578093e-03 1.0432500e-03 1.9220792e-03 8.7918940e-04 4.4168060e-03 5.0670419e-04 4.4696559e-03 1.4208846e-03 1.1004494e-03 1.6436932e-04 1.5507648e-03 1.1191058e-03 1.7495883e-03 1.6621469e-03 2.5414268e-03 8.8363295e-04 4.0096791e-04 3.3482530e-04 4.2345389e-03 1.9374158e-03 1.2770848e-03 1.0395507e-03 3.9095383e-04 9.2829494e-04 2.8992176e-03 3.2975300e-03 9.5393320e-04 1.0925514e-03 1.6776296e-03 3.8507253e-03 2.3805613e-03 1.2082502e-03 2.8135004e-04 4.7639114e-04 8.7356741e-04 7.4310336e-04 2.3860646e-03 3.8892953e-04 3.1610282e-04 8.0643344e-04 1.4728895e-03 2.0712058e-03 4.6512964e-04 1.1229948e-03 8.8791739e-05 1.1956364e-03 1.3055469e-03 1.5870433e-03 3.7223827e-04 1.0219838e-03 4.8252621e-04 2.1760282e-03 1.1580239e-04 1.5036929e-03 1.5974284e-03 4.9006747e-03 5.1331960e-04 1.6812595e-03 1.3616634e-03 2.7816530e-03 3.8434871e-03 1.4972893e-03 1.8359326e-03 3.6884416e-03 3.0552141e-03 1.4996272e-03 1.2110783e-03 4.7697841e-03 1.3648602e-03 3.9191527e-04 1.9808749e-03 1.8113262e-03 2.3841094e-03 7.8154360e-04 2.2164780e-03 1.0439462e-03 5.1766229e-03 1.1157470e-03 3.6934914e-03 1.2626931e-03 5.1659909e-04 5.8959899e-05 1.4097594e-03 7.9470954e-04 1.7881991e-03 1.5872251e-03 2.1165612e-03 7.0368642e-04 9.2823209e-04 1.5533041e-04 3.3160103e-03 1.5048956e-03 2.0235000e-03 2.4966081e-03 3.5807846e-03 4.7284012e-04 2.2730788e-03 1.2853266e-03 5.0519729e-03 2.3762808e-03 2.2444800e-03 6.5500299e-04 2.4750944e-05 4.4815197e-04 1.3292061e-03 1.4780836e-03 1.3932149e-03 1.1924273e-03 1.5277304e-03 1.1896691e-03 2.0673918e-03 2.5400150e-04 1.8225589e-03 5.4227610e-04 2.9880363e-03 1.8314313e-03 2.5238977e-03 5.5757663e-05 2.3698503e-03 2.8213423e-03 1.3040881e-03 2.4190319e-03 1.1221589e-03 2.3332880e-03 1.5386845e-03 7.3215728e-04 3.3801838e-03 1.6264024e-04 2.8457958e-04 1.3051710e-03 1.8856170e-03 1.1599202e-03 1.2252261e-03 2.8396493e-03 1.3307965e-03 7.9081662e-04 1.1208231e-03 3.7513747e-03 2.2532467e-04 1.8915999e-03 1.8066444e-03 7.8138393e-03 1.7396624e-03 2.9887611e-03 2.1626603e-03 4.5716343e-03 4.7705983e-03 3.1616233e-03 3.6125170e-03 5.9695278e-03 4.2653532e-03 1.8208998e-03 2.2513824e-03 7.5822521e-03 3.0950854e-03 2.6638425e-03 2.3329339e-03 1.2833498e-03 1.8503904e-03 7.6221882e-04 7.7332789e-03 2.4545666e-03 4.1819304e-03 2.3678329e-03 3.7122984e-03 4.8118232e-03 2.5553184e-03 3.0167737e-03 5.3596333e-03 3.8038292e-03 8.6990936e-04 2.5148680e-03 7.9707274e-03 3.8479364e-03 3.0845972e-03 3.4545687e-04 3.2912795e-03 2.5772857e-03 4.0614792e-03 4.9082550e-04 6.4992316e-04 1.0688017e-03 2.7640538e-03 2.9796108e-03 1.9652949e-03 2.0639503e-03 3.2701588e-03 2.6492756e-03 2.3500195e-03 8.8549039e-04 3.5901884e-03 9.2048730e-04 3.0276112e-03 3.6195589e-03 1.4632965e-03 2.1763560e-03 1.5224813e-03 2.5475111e-03 1.6768950e-03 5.4331181e-04 3.2527051e-03 2.4129313e-04 2.7799169e-04 1.0453548e-03 1.7267901e-03 1.3027778e-03 1.3633707e-03 2.6503970e-03 1.5759024e-03 2.1731558e-03 1.7887862e-03 5.7263386e-03 8.5148931e-04 1.6383081e-03 1.3458097e-03 3.3594814e-03 3.6389958e-03 2.2301817e-03 2.5225544e-03 4.3534713e-03 3.1787209e-03 1.6954559e-03 1.3061945e-03 5.4111697e-03 1.7803664e-03 4.1741292e-03 7.9197469e-03 2.2821957e-03 5.7150571e-03 5.2344879e-03 5.9478982e-03 9.4860449e-03 3.0218268e-03 3.9157169e-03 7.0424278e-03 7.6315025e-03 4.1845322e-03 4.7751866e-03 8.3019483e-03 3.6628005e-03 5.6294536e-03 2.4239872e-03 2.7647556e-03 9.2421646e-04 1.7874511e-03 2.0292522e-03 1.9168834e-03 1.9458286e-03 3.1481713e-03 1.3908818e-03 1.3324310e-05 1.1782972e-03 5.7639460e-03 3.2058410e-03 2.4140579e-03 2.0956665e-03 3.1463866e-03 1.2261330e-03 4.0134358e-03 1.5054952e-03 1.1240475e-03 4.7360391e-04 2.7309553e-03 5.1301270e-03 2.4137049e-03 8.9922816e-05 1.2592279e-03 8.6164876e-04 1.2616309e-03 1.6936265e-03 3.4914911e-03 6.4024254e-04 8.0798030e-04 1.9823430e-03 2.5037231e-03 2.1768005e-03 8.4515239e-04 2.3117307e-03 2.1993248e-04 5.6812764e-04 1.3931392e-03 1.4308209e-03 1.6084351e-03 1.3403960e-03 1.4811963e-03 1.2106398e-03 2.4235208e-03 3.5787719e-04 1.6247178e-03 6.1681362e-04 9.2162383e-04 6.4491663e-04 1.4082859e-03 1.1774797e-03 1.5785221e-03 4.0122185e-04 7.3237681e-04 6.4530654e-05 2.8852212e-03 1.3991525e-03 1.5175131e-03 5.9336508e-04 2.9036861e-04 2.0099198e-04 5.5129609e-04 1.5179733e-03 6.8841417e-04 1.3866758e-03 1.2880812e-03 3.2376306e-03 2.6196338e-03 2.0860993e-03 2.5676122e-04 1.7880288e-03 9.1594192e-04 3.5976292e-03 3.2249197e-03 6.3653022e-05 8.5949029e-04 1.8263557e-03 1.6881339e-03 9.7411877e-04 1.7845049e-03 6.1330704e-04 4.6521065e-04 1.3489557e-03 1.6857799e-03 7.7549379e-04 1.3461987e-03 6.0478443e-04 1.0485877e-03 2.7784351e-03 1.1708179e-03 5.9587066e-04 1.2049026e-03 1.1592978e-03 4.9960727e-04 2.5830270e-03 2.2390567e-03 9.5053595e-04 5.2396832e-03 2.8622802e-03 2.1952710e-03 8.7416055e-04 1.1307517e-03 diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-cosine-ml.txt b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-cosine-ml.txt new file mode 100644 index 0000000..7c6b67f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-cosine-ml.txt @@ -0,0 +1 @@ + 2.5695885e-01 2.6882042e-01 2.3470353e-01 2.9299329e-01 2.2742702e-01 3.1253572e-01 2.4986352e-01 3.0770122e-01 2.5191977e-01 2.7931567e-01 2.8133743e-01 2.6316239e-01 2.6067201e-01 3.2982339e-01 2.8993002e-01 2.5506356e-01 2.8728051e-01 2.4952121e-01 2.8613379e-01 2.6894157e-01 2.3606353e-01 2.1670935e-01 2.3470242e-01 2.4294172e-01 2.4376454e-01 2.3228195e-01 2.3554918e-01 2.4851241e-01 2.0917546e-01 2.4971488e-01 2.4264224e-01 2.7405461e-01 1.9086415e-01 2.6346574e-01 2.5908801e-01 2.2138495e-01 2.2910721e-01 2.2169919e-01 2.0660065e-01 2.3207102e-01 2.5554688e-01 2.5153751e-01 2.6073682e-01 2.0919640e-01 3.3984433e-01 2.7503792e-01 2.1709889e-01 2.7068095e-01 3.0307041e-01 2.4529612e-01 2.2987015e-01 2.7736967e-01 3.0310708e-01 3.0544316e-01 1.9205388e-01 2.7098021e-01 2.0722466e-01 2.6387343e-01 2.8998308e-01 2.2633010e-01 2.5177075e-01 1.6347011e-01 2.4036389e-01 2.6485871e-01 2.8491965e-01 2.2273619e-01 2.4511873e-01 2.5930533e-01 2.6589995e-01 2.7797191e-01 2.3357373e-01 2.4279909e-01 2.3544532e-01 1.9447286e-01 2.3993534e-01 2.0856243e-01 2.2125251e-01 2.1988206e-01 2.0590152e-01 2.6441952e-01 2.0052739e-01 2.2978496e-01 2.4483670e-01 2.3879510e-01 2.9398425e-01 2.7541852e-01 2.3777469e-01 2.9151131e-01 2.0672752e-01 2.4584031e-01 2.7475025e-01 2.7064343e-01 2.5603684e-01 2.6165327e-01 2.4233155e-01 1.7892657e-01 2.6111203e-01 1.9965682e-01 2.4201634e-01 2.6281353e-01 3.1928221e-01 1.9731963e-01 2.7752862e-01 2.2633080e-01 2.6783167e-01 2.5447186e-01 2.6424243e-01 2.1960672e-01 2.2984242e-01 2.8788736e-01 2.8681630e-01 2.6949787e-01 2.3993685e-01 2.4440073e-01 2.5010397e-01 2.3230769e-01 2.9879682e-01 2.4200592e-01 2.6957748e-01 2.6073240e-01 2.6355347e-01 2.3403674e-01 2.2411413e-01 2.2956729e-01 2.8105976e-01 2.2913304e-01 2.4898608e-01 2.3304000e-01 2.2692988e-01 2.3728251e-01 2.2552243e-01 2.0364084e-01 2.3359511e-01 2.6619167e-01 2.6666588e-01 2.3666880e-01 2.7239113e-01 2.0146697e-01 2.3045559e-01 2.1695523e-01 2.1387991e-01 2.2366404e-01 2.2809635e-01 2.0901297e-01 2.2441100e-01 2.3418882e-01 2.8552218e-01 2.4609015e-01 2.0282492e-01 2.5940295e-01 2.7407006e-01 2.3344890e-01 2.1179142e-01 2.7047821e-01 2.9832768e-01 2.0859082e-01 2.8881331e-01 1.8384598e-01 2.5286491e-01 2.2012615e-01 2.3615775e-01 2.6845565e-01 2.3356355e-01 2.7164193e-01 2.4179380e-01 2.5247973e-01 2.5637548e-01 3.2126483e-01 2.3100774e-01 2.8832546e-01 2.0043257e-01 2.7918333e-01 2.4884522e-01 2.2904723e-01 2.3738940e-01 2.9461278e-01 2.9782005e-01 3.0332073e-01 2.5175971e-01 3.1203784e-01 2.6611535e-01 2.3713507e-01 2.2203585e-01 2.3602325e-01 2.5093670e-01 2.6860434e-01 3.0137874e-01 2.3759606e-01 2.6840346e-01 1.9200556e-01 diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-double-inp.txt b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-double-inp.txt new file mode 100644 index 0000000..7a77021 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-double-inp.txt @@ -0,0 +1,20 @@ +8.278938049410748956e-01 9.035293984476246987e-01 1.862188994679486731e-01 8.921151312310462433e-01 2.061859119379583216e-02 3.440636727385729676e-01 1.533779912830328662e-01 5.701372300009802663e-01 5.510020730211558915e-01 1.792362258426003496e-01 8.086175120876580857e-01 6.115487184317183189e-01 1.233471787164852618e-02 1.441643531871039663e-03 4.044309209045688913e-01 3.561398959499905148e-01 1.281985712929750720e-01 8.663300833847481508e-01 8.696027786291581352e-01 3.611727370363766454e-01 5.283537658772616830e-01 1.440241088090119526e-01 3.112457227138950566e-01 6.031280796897889873e-01 9.230324792742518047e-01 2.332121881136874908e-01 3.192652267403439659e-02 3.466206294995559656e-01 2.988687728046366399e-01 5.116749542048093513e-02 2.584975830914494344e-01 4.302023478042227289e-01 8.003972751713522849e-01 9.364931911368097328e-01 9.737098649964673891e-01 4.718038453972229762e-01 4.526591686607864817e-01 1.056485678520797666e-01 5.883019714285405710e-01 3.846092237676981274e-01 6.461500053435473845e-01 1.013239729848824933e-01 1.216151561651189761e-01 5.159668929484659827e-01 8.452074473510227115e-01 9.885170962247968873e-01 7.623883073490128615e-01 2.291163243615434997e-02 5.775530980802381364e-01 7.820699896828091635e-01 8.239186345842965942e-01 3.391800105260227571e-01 9.546318451614538292e-01 3.789677917867695367e-01 4.526533399649290690e-02 8.366786473238587707e-01 3.082636811049858094e-01 1.173936820793450853e-01 7.631994969169442200e-02 2.997416650722183329e-01 5.795208655160232203e-01 3.942350892542011431e-01 1.175126383297261379e-01 4.928232513950027149e-01 9.421293996225950096e-01 8.365391053841342295e-02 6.868059693571844093e-01 3.589527962429440722e-01 7.592939427166059962e-01 5.623849466131448649e-01 2.110746828032050715e-01 9.824683704668600859e-01 2.661230142246236996e-01 6.162272315007123469e-01 5.023254536607497656e-01 5.202854476669782624e-02 5.835090668842095596e-01 7.864642118889143552e-01 2.504012386867506823e-01 6.728308641135989365e-01 4.610793534576096420e-01 4.820508770515909980e-01 9.720403251022265989e-01 3.100069285263498120e-01 7.681017126461753275e-01 7.956539306007082146e-02 2.593389637887737464e-01 1.137852590403054531e-01 3.885303073284454012e-01 8.599094660075957686e-01 5.215167875918280682e-02 1.620908248572288102e-01 1.859236090457663249e-01 6.247716512610480555e-01 3.415128495520775020e-01 7.034903368378029320e-01 6.037564640019568163e-01 2.338969434423310290e-01 1.002104885609900187e-02 7.866058403969036217e-01 +8.033694116033356369e-01 8.653264545544031572e-01 7.468340410754038539e-01 6.362430919910603278e-01 5.120006306625468628e-02 9.503348372633585450e-01 4.697732609626817935e-01 4.221305288459429317e-01 3.153452119838391354e-01 2.991014843442657556e-01 1.190667967280257811e-01 3.486567714509342109e-01 8.289493649885054660e-01 8.454811050800014049e-01 9.149673018211901265e-01 7.708707837193897738e-01 2.640157732122547785e-01 2.107897022189605396e-01 4.207633055054439408e-01 6.719500284654699174e-01 1.458031684893063007e-01 1.800412735886125493e-02 8.402733435220011149e-02 4.206760156883160295e-02 1.376933515041314227e-01 1.716717341022133692e-01 1.788220727652158892e-01 8.224310433402118869e-01 7.729093666867475898e-01 2.064223621025984556e-01 9.592092036227207741e-01 8.312490243754996344e-01 6.673289360369902834e-01 4.632847903690773261e-02 7.643954098358983762e-01 9.359341525615098023e-01 1.914966319163026176e-01 4.536590469402868031e-01 8.640836016538007147e-01 3.941529178175462444e-02 5.602101995205478469e-01 9.263806161941660067e-01 1.555995325944817820e-01 6.172208102950116348e-01 6.335576752812099866e-01 9.766975460368043649e-02 4.475795689539874278e-02 3.248842796104995934e-01 5.700377122149502540e-01 9.066962967256807504e-01 5.458460621505676347e-01 6.833401285581487405e-01 2.887244409544044155e-01 1.316338647016834784e-01 2.325673305245992140e-01 4.150121963188406760e-01 3.834845466366055833e-01 8.149365773968725302e-01 1.867003849450201702e-01 3.170322173543018707e-01 6.832093662682684476e-01 1.729728518929105618e-01 9.236557359702636250e-01 9.152941252150086360e-01 7.224879983096620384e-01 8.557920626598064517e-01 5.344883059251644974e-01 4.876873274449112783e-01 8.308277804506420949e-01 3.916624489322212410e-01 3.459695122273966916e-01 4.033512499027409604e-01 6.555726444913008155e-01 7.138452409380238173e-01 1.683937314599968094e-01 1.769382143486440961e-01 7.588683655178136700e-01 3.750589892880819010e-01 7.525176245126207197e-01 6.083961152538303052e-01 1.145972309907993258e-01 6.239614485809552580e-01 1.307655482065895880e-01 8.530458750670916190e-01 4.801602070124768584e-01 8.168122189863546989e-02 3.793139622744635675e-01 1.496986997776840189e-01 7.129023878302899186e-01 6.830979237438047358e-01 7.635375943876505644e-01 1.824004963251233402e-01 5.764695848992339444e-01 8.865113248731604223e-01 5.784337085544002388e-01 9.700026628755119562e-01 7.318207347905059112e-01 3.851401393936705331e-01 1.774291851193399161e-01 9.763423229242296220e-01 +9.287178470949695175e-01 1.748282433617460718e-01 9.238531711586964734e-01 8.291274445125006443e-01 9.513259272578692416e-01 7.486316801165745494e-01 6.257378457524477300e-01 2.062711693536473101e-01 3.970721244184766130e-01 2.738325225026445597e-01 8.735038948299954642e-01 5.415282140033768066e-01 5.176317904298315398e-01 5.347036264518250093e-01 7.482056965410627258e-01 4.140672582824351800e-01 8.709067272363142376e-01 9.499605569181273079e-01 5.380266748336398619e-01 4.369252161707162241e-01 8.235722216228258397e-03 4.308187193646527691e-01 6.030581482859224129e-01 7.316831195156517920e-01 5.540499846834291420e-01 2.044203040111662872e-01 8.645251782981867583e-01 1.816095717570278545e-01 9.639119168018674966e-01 3.572031072322333634e-01 5.580226816834680248e-01 5.586629875016585478e-01 7.213854320902782780e-01 8.513998260042524580e-01 6.308764347277173723e-02 4.299855362100638567e-01 8.789303907444128150e-01 9.178850359236285783e-01 2.275205845091231582e-01 1.899395443939643213e-01 7.103070862773533944e-01 9.450015289553428399e-01 1.691856364522159595e-01 7.368719616877857925e-01 9.600189536623833231e-01 5.128846522932454244e-01 6.209162727118655578e-02 7.992250598838029907e-01 9.141050280518014937e-01 1.471297785256820978e-01 7.466162372930541524e-01 4.656107650642931084e-01 6.399324135161845728e-01 2.023617619481610230e-01 1.019104648900100996e-01 4.390693688536728700e-02 9.822620353006089600e-01 2.881951852926285529e-01 6.191575015960482098e-02 8.989580763251467932e-01 4.635958631890454429e-01 1.781973138114967270e-02 7.906911683818984571e-02 6.525270776225711167e-02 3.620583622807886925e-01 2.651673718940715796e-01 5.829372395929610651e-01 2.118159824373908595e-01 5.900287159143694504e-01 9.405929925178391215e-01 9.262415619063500971e-01 5.639581506302312475e-01 4.529556154689695635e-02 2.873819210518682166e-01 5.718545934306838996e-01 9.877670791317306742e-01 4.120364488714320927e-01 9.896078045634184583e-01 3.796586997026456523e-01 1.178183652203194098e-01 6.641068305236120795e-01 4.045960610587706618e-03 2.262690437428437340e-01 7.839938005832693957e-01 7.695391333937223743e-01 3.713918392552509884e-01 4.245533341514018399e-01 1.475072494020331915e-01 6.011975181419888514e-01 5.158174017998343741e-01 1.788706151398071764e-01 8.880707130134481986e-01 6.463351030474082659e-01 6.499920635615744624e-01 8.570273676455353318e-01 6.055019270899113515e-01 2.123561211054603159e-02 2.027688787664126968e-01 1.930834215328548487e-01 5.131906052747271518e-01 +2.599990881903107010e-01 6.767857524909899336e-01 7.188217446352963558e-01 3.037178903357997672e-01 4.252381412838680541e-01 4.070924411439535984e-02 8.426710493038247485e-02 8.301517457289483426e-01 8.254603255702420705e-01 7.258533909453509514e-01 9.958706809470796451e-01 1.323408451651194584e-01 8.523995455245143571e-01 2.572405385832454705e-02 4.715363690065482727e-01 7.920130365690022378e-01 7.613745641534582775e-01 5.108305991695683002e-01 7.908714335912382376e-01 4.641131983754837043e-01 3.112627109831845873e-01 4.218013908715474436e-01 3.291577909008427394e-01 2.538715054071232213e-01 1.362470842487485401e-01 2.716429790290709745e-01 1.485325814161112534e-01 4.514539027544387517e-01 6.900835128673067365e-01 7.793407072946112457e-02 5.938024345270752624e-01 1.497853829906865553e-01 5.399567982652856424e-01 1.419209916759478496e-03 7.719776132867679497e-01 3.130795105576239523e-01 6.670071611167494030e-01 8.900596881158256979e-01 8.011158503301568645e-01 7.089295605187424520e-01 4.671116382997058114e-01 6.682965170673403899e-01 6.524835265739736823e-02 5.454288420771494783e-01 7.751910790556310049e-01 8.192595541387335256e-01 3.098855848167891835e-01 3.689971355659119601e-01 8.666507475054133769e-01 2.749042684253171220e-01 3.566565602478318775e-01 4.838173174723044978e-01 1.032975933616413489e-01 5.063065339610417492e-02 5.791168455729079900e-01 3.573337411289496668e-01 6.714098909652352898e-01 2.917057662433912846e-01 2.654964332620638467e-01 7.171804039048814694e-01 3.314488637898249657e-01 5.230399837442840649e-01 6.866534136026025692e-02 1.252966394621071178e-01 5.349397882659551184e-01 2.841423847455760709e-01 4.158473635710734362e-01 7.197062989831272128e-01 5.123869045047864113e-01 8.675622821594339840e-01 8.097441845042540054e-01 7.317178252133832439e-01 3.300847596465853462e-01 5.922311859141077273e-01 8.852619511417836318e-02 2.673412917259408994e-01 6.878259052441990651e-01 3.223000927116328462e-01 8.859387123976615319e-01 5.722722388300067742e-01 8.254877606669521750e-01 5.705299682290687624e-01 7.046478734972855262e-01 1.316324413616759559e-01 3.056358395675535800e-01 2.396516834600909140e-01 2.041201422493257311e-01 1.610755140653103989e-01 1.617012564641111538e-01 4.449920510036902144e-01 2.731012972755201274e-01 7.826874666257994662e-01 5.193612375350010746e-01 8.688804522977213729e-01 3.742157602758655610e-02 6.649628920608219307e-01 5.978149424619171315e-01 5.345645500553952711e-01 9.443202650415919441e-01 6.105837075491723498e-01 +6.387761328141735584e-01 4.210087412162694109e-01 3.777306694964789324e-01 3.576349403292201634e-01 7.272699618880260619e-01 9.173392803607671731e-02 1.212535698300880593e-01 3.871229381194544183e-01 7.735150198351389284e-01 4.687200483013695962e-01 5.161778571874678923e-01 9.839646447226980674e-01 8.626932748911960713e-01 9.618485576577924245e-01 2.997996427525421170e-01 3.955404657388794654e-01 8.480126027102616870e-01 8.194992325050480808e-01 2.800213436873294492e-01 7.188391466620779324e-01 2.289766105875049584e-01 3.838547514028287644e-01 1.363553401061209369e-01 2.131328253542326134e-01 2.666779468144075960e-02 3.252883844200405994e-01 4.207860197469600605e-01 2.991365385037647595e-01 9.180779845534067229e-01 8.787338732192649937e-01 5.404510999105649471e-01 1.735493827761729335e-01 7.405224640747264386e-01 3.927355563629583157e-01 3.957109873399460298e-01 1.313029813325972128e-01 6.434498219738993274e-01 7.162213694578050127e-01 6.454998257494671821e-01 3.808124530008022424e-01 2.027201015737234435e-01 6.667632842770417900e-01 1.609491052365198405e-01 1.192413785409307536e-02 4.546773323526854815e-01 7.733541911050207940e-01 3.902525737195561284e-01 4.006023779897505133e-01 5.156517815815246930e-01 6.135685498584592112e-01 7.062153114980724844e-01 5.505858882117883324e-01 3.541308807182554919e-01 5.237151122342533771e-01 5.230649229131387745e-01 1.973541027697351957e-01 7.940327858595511712e-01 9.998588700623055603e-01 3.878271015153827994e-01 4.455006584967207139e-01 8.376414508056347907e-01 3.310833863524501597e-01 8.020469097392601832e-01 1.890327633084128989e-01 3.830289472395409511e-01 8.605040171046141051e-02 9.978185524023941433e-01 8.333890591892906263e-01 4.509013468741837061e-01 6.355778557686052599e-01 1.422515991097305088e-01 9.549891485963732940e-01 7.535776302868563148e-01 9.306005301880662106e-01 2.444330347211679522e-01 5.828218427569508142e-01 1.261938242968304591e-01 2.829188731405173352e-01 8.100246952078660190e-01 2.032739130996042975e-01 3.997268448390065565e-01 3.882777703107541667e-01 1.102505652624736765e-01 5.826634725328041498e-01 6.508734477956333864e-01 1.777287661702166011e-01 4.857051012052149286e-02 6.850537712379254351e-01 5.012281307761055071e-01 3.329154880061502286e-01 5.006261767216675374e-01 4.542081454976160115e-01 6.777801995399822532e-01 4.271303586474960445e-01 7.820470659692947413e-01 5.143462618485082904e-01 4.071273891563575997e-02 8.503383643856671226e-01 6.877485768345151795e-01 6.498843855014626580e-01 +5.539512747016193117e-01 6.329206647391879548e-01 2.798533500321682688e-01 4.825977295850051307e-01 7.625297023172977751e-01 9.081309101427640362e-01 4.124792086535029600e-01 3.647019658319609059e-01 7.529595202332928228e-02 3.072404010876803593e-01 7.890673660964639957e-01 4.079781478915127657e-01 1.440519120695739064e-01 2.538968953804546791e-01 1.595028243568367143e-01 9.066545851872198636e-02 6.367601114674349416e-01 7.622263643880089479e-02 3.015728236404162654e-01 2.424070469873378375e-01 5.711440390241000475e-01 5.717001375511508998e-01 2.237853674032181939e-01 7.112101625753678436e-01 4.321054197012103026e-01 2.505322169010260058e-02 5.877307077139551916e-01 4.415771174397812304e-01 3.766022855145171322e-01 9.803490652619811785e-01 1.229258314111529860e-01 8.108351868714478439e-01 8.558595456964329662e-01 2.168217533833206589e-01 2.034022719386595623e-01 8.687457137579783772e-01 9.013327195854559104e-01 8.156766512673154779e-01 2.717576187546973943e-01 1.756417893371479133e-01 7.555856977566548505e-01 6.708809351312817748e-01 8.998789237886926085e-01 1.936367585946979775e-01 7.949724635465026390e-01 3.164799312763589834e-01 5.493048513173155456e-01 1.608917269168268493e-01 3.048667492191803330e-01 5.599401537727016764e-01 5.779501360842279611e-01 1.296714605309662316e-01 9.160752328055997706e-01 8.058674476110374574e-01 4.385508937505578908e-01 9.212419718012100356e-01 2.249887451242467140e-01 6.283927745352599903e-01 3.778992451536005159e-01 3.571958698867505611e-03 7.276526470528231760e-01 9.051678673805297892e-01 8.465837072484881931e-01 4.548317505393462135e-02 3.189318261926020748e-01 4.446388607398673587e-01 4.292356336344156365e-01 4.203980977718795309e-01 4.698059253071955599e-01 6.151991200848159203e-01 8.479986139404802614e-01 9.870993262459623052e-01 3.164206525899861955e-01 6.464672171639846976e-01 8.508781429592480183e-01 4.733667503354813677e-01 8.076014176740163863e-01 6.671443255679101458e-01 6.639213267047979761e-01 3.681688930741919830e-01 4.679870252651611162e-01 1.790041740686979521e-01 8.446070273663058847e-01 3.350737544979878191e-01 6.600272349677447359e-01 4.356083218487936115e-01 7.995134167346013010e-01 9.083660261041469619e-01 9.743975306734570241e-01 8.144839650654719376e-01 6.865011984586443239e-01 1.709747281999153268e-01 8.534933687161740945e-01 9.494753729726415070e-01 8.140124992294850426e-01 8.936241255316055287e-01 9.087976860818796077e-01 9.030687493451383663e-02 4.025785149840914734e-01 9.592005611533803711e-01 +5.714058727476275523e-01 7.913573761505965365e-02 9.301773447377043036e-01 4.302822433307075256e-01 4.618892554175407783e-01 1.882471300213742760e-01 6.231472878215863487e-01 2.350437450940777717e-01 8.483410480771292894e-01 8.580803842040533036e-01 4.246398783388435350e-01 5.667321565946502604e-01 7.247417018955526480e-02 5.373984417482219333e-01 8.794242091541510931e-01 9.699025554453030162e-01 8.254197752548814160e-01 7.739723972867470492e-01 6.365819416181199841e-01 3.451230687021222820e-02 1.829102490094791644e-02 9.179618383026147965e-01 4.481667270072077214e-01 4.771270250445739380e-01 1.588469404953456454e-01 3.766332499200618633e-01 5.057026248713025751e-02 9.125900914275182352e-01 8.438133644246305076e-01 3.282972411719701222e-01 6.042003956122835584e-01 7.423456085393266290e-01 1.389012737541106546e-02 3.674754266702850991e-02 2.126646727703802586e-01 3.085666164246750887e-01 4.303440338750976757e-01 1.749037978865556342e-01 2.177699993322510519e-01 6.675614739991906355e-01 1.926533336347433512e-01 8.032010572660308600e-01 4.611412981769049679e-01 9.907201268457492827e-01 8.973785930837320235e-01 6.286342392657409128e-01 8.111266245859546364e-01 1.154230969025437092e-01 8.382880466301794176e-01 1.053753927827069115e-01 9.921712862234919328e-01 9.041662667920956631e-01 3.626267376021269362e-01 2.262225368932846425e-02 8.669003741626111204e-01 7.597054897704164089e-01 4.700318514995387442e-01 4.338185014241978665e-01 1.205425463362067573e-01 2.413879270602589111e-01 5.483334840461459025e-01 2.042653841254596925e-01 5.452588940366013270e-01 3.164646091706100339e-01 1.878958248945691301e-01 2.188622304737641855e-01 2.970982599823450698e-01 5.952148400199362976e-01 9.614251220149501176e-01 5.446813400697393392e-01 5.900748097930779146e-01 2.653062526715309621e-01 5.459933097767216692e-01 3.174185404661935550e-01 1.412133354129242457e-01 1.487441669790685594e-01 3.953776242211952674e-01 5.274261039692862418e-01 1.756132307607755072e-01 4.481942852746899630e-01 6.390660088765629521e-01 2.860380430081067571e-01 5.866902519902850166e-03 3.026687645174785946e-02 1.952533570196290924e-01 2.154769096186736066e-01 8.920573593276575064e-01 5.644513191915436767e-01 5.551464696654353492e-01 4.378199413349500579e-01 8.685737643974280608e-01 7.493934764293597173e-02 9.556749726352036234e-01 6.386433482536227890e-01 8.714694524097754691e-02 1.722786161701279628e-01 6.526867532768643176e-01 8.950304705281527662e-01 6.158198776753203152e-01 9.587176904005377809e-01 +7.705718397401561948e-01 3.165816092999733655e-01 4.334200859975760878e-01 8.639807015515663657e-01 5.576514209532534849e-03 2.456745447057938625e-01 1.664686313299922338e-01 9.637084729617834133e-01 1.083448720752323569e-01 1.865218070380464388e-01 3.730358890475884426e-01 5.015351872138350542e-01 7.420710795841709562e-01 4.919420674769692248e-01 3.426558201886464872e-02 8.669984854934246199e-01 2.204243734202966376e-01 4.109792246853891662e-01 4.361732572946559472e-01 6.819306998053020763e-02 9.986304248057148447e-01 4.119289455392274313e-01 8.533050041845835487e-01 3.416914861912183632e-01 6.522191951039880697e-01 4.162803668786793088e-01 9.051674379917418189e-02 4.552378661306888397e-02 2.122677193466918633e-01 7.461518531655018105e-01 4.654688019259497489e-01 7.877564083548750373e-01 4.518328005682387127e-01 7.173857464237374248e-01 6.940056370290903498e-02 2.804574410412373764e-01 6.095681113112718652e-01 3.680596478602831123e-01 1.814569150719304025e-01 6.505055517979729807e-01 2.759585245701871026e-01 1.429501104786028431e-01 7.813891153083207808e-02 8.925314279991185540e-01 6.692056941902108091e-01 1.915141341107173822e-01 5.750233129581091562e-01 2.051961006251528108e-01 3.849013692629975614e-01 9.503788222043518807e-01 7.690419386411734282e-01 9.978147530014782607e-01 1.719584162437415298e-01 4.890758882401113894e-01 7.195660736040896399e-01 2.485818040997200828e-01 9.706486601870933928e-01 5.182604282071262558e-01 8.082072245463804983e-01 4.889961284821118248e-01 8.042893959057633158e-01 3.200685313413229593e-01 8.983245016887355661e-01 2.811495336955205371e-01 3.986095833814048417e-01 8.607229214132059436e-01 4.827620119717191960e-01 6.715610252037491623e-01 9.330824374137768329e-01 7.537710530085762750e-01 9.840804224010484269e-01 2.319352541177217564e-01 9.569114943157627229e-01 5.821928104654411351e-01 6.700479524814679788e-01 5.663434680086896211e-01 8.851091082101365526e-01 6.800562815862243315e-01 3.578475213752868589e-01 2.900164669281133367e-01 8.379170683569914235e-02 9.929972839740475177e-02 5.946248553621906741e-01 1.991332889320840405e-01 8.115065723822508792e-01 2.023388190440008616e-01 4.056545651129230823e-01 2.966825350250481552e-01 7.457176343507545546e-01 9.856015771246517954e-01 2.264338016147812160e-01 8.366528670045663141e-01 6.116829813603242849e-01 2.605933184296719274e-01 5.765962146558850643e-01 5.064075092266390188e-01 5.499615769589756287e-01 9.240234698632640020e-01 7.169900155229913530e-02 3.544181364560751168e-01 +8.154844535553099627e-01 4.797965609394789777e-01 7.476703385713100447e-01 9.086708404761600910e-01 3.191752505450355937e-01 7.611128630021511965e-01 6.246790343299296611e-01 1.942001426217137006e-01 2.789860414631386565e-01 3.236359785042408621e-02 3.178191288741717413e-01 8.372264298357038337e-01 8.872692914664047636e-01 9.589758852077276963e-01 3.123722260380168425e-01 8.980164015338999439e-01 7.260784140459818348e-01 6.567013512265649222e-01 1.028743505926521529e-01 6.821705410750319443e-01 6.889838995316139858e-01 5.587525493094736007e-02 6.921487028366646310e-01 3.616312929861494885e-01 1.673758008792780583e-01 6.626504595920326146e-01 9.125680913222075086e-01 1.424077784972291871e-01 6.508496429060767197e-01 6.615417385775157477e-01 9.654167310675311198e-01 5.536662974550183858e-01 7.092622144968085962e-03 6.694595400455760625e-01 1.828533619119211417e-01 3.421514408394116247e-01 1.242580151818144518e-01 9.888774797458224075e-01 9.777955172739735135e-01 4.271370765628749178e-01 1.211608384809655936e-01 1.580132417172936954e-01 3.242705395708289640e-01 3.268994391754735940e-01 5.213767653645562383e-03 4.475169480357120699e-01 9.593245219293577986e-01 6.994304536782350867e-01 7.063863152769014331e-01 8.381620829497931080e-01 2.760441799736219615e-01 3.755200946645842475e-01 3.627729621737311172e-01 9.518310606719182498e-01 3.577273025276901386e-01 3.991159901003488164e-01 4.187060513068554535e-01 7.422605403637314581e-01 6.697944269780702342e-01 6.020599837037767799e-01 1.571185850817550245e-01 7.519860911185742847e-01 6.635775704496444938e-01 9.487848173531471252e-01 7.900030232338028924e-01 4.143783957270819052e-01 5.618429740858444932e-01 3.737804619062014000e-01 6.179941187802344693e-01 6.553638605616040058e-01 1.009709416658691739e-01 4.935037098582963910e-01 5.485489972455533936e-01 1.024147956480448984e-01 1.195764707555347917e-01 4.910516327810896531e-01 3.551185778630389089e-01 3.857601645798814927e-01 2.074975219600547760e-01 2.084038664460790002e-01 5.268616653491025037e-01 6.948014877618717833e-01 6.179744044618615817e-01 7.063658085955483168e-01 7.925757227686872630e-01 6.199016959584816577e-01 1.163676037434490107e-01 7.425752264755586252e-01 5.403115665133301215e-01 2.546191951391015840e-01 6.961300925345208501e-01 4.003013072125547467e-01 5.906120962720950995e-02 5.879915846330325824e-01 1.213602408288709800e-01 3.801780679842765576e-01 1.731477742402802722e-01 4.624568816669496485e-01 3.304453744619206823e-01 8.810445876116090869e-02 +5.140190515373614932e-01 1.419225260054487459e-01 7.777845802285945354e-01 3.327562899409282071e-01 8.916875699762913943e-01 7.212852862736146564e-01 5.727327199433507321e-01 5.897820225918504189e-01 7.318614954542906892e-01 7.393985144455500480e-01 4.531340740296823100e-01 9.903061584426188224e-01 4.213350938331624773e-01 4.542342471963995987e-01 9.788786426453045530e-01 1.881707000343846303e-02 8.005433413647761176e-01 1.523502822273363755e-01 5.630164732287495921e-01 5.946603842470724599e-01 1.225547698678740582e-01 1.531136594724622491e-01 8.157973612638946825e-02 2.752046015644330490e-01 6.809045821946161370e-01 6.455289724528190387e-01 3.830356726830793646e-01 4.446144649678575034e-01 4.969038423960672191e-01 5.497873820641221432e-01 9.471879627821714331e-01 5.933046675329255448e-01 4.099233758501530378e-02 5.790409810134594659e-01 9.546095885251496549e-01 2.608616052375664074e-01 6.910160339170060562e-01 1.293709850476291168e-01 6.407264616302255078e-03 6.186037089828009261e-01 5.537861302543241049e-01 3.527421038298221845e-01 8.033232052121624944e-01 8.128114152830284711e-01 8.319982582278713235e-01 5.939566376046836460e-01 2.291090283499520597e-01 5.438101817725821130e-01 6.881146379117278888e-01 2.421968586304659166e-01 5.874047918543783275e-01 6.210102709484541794e-01 7.041387566450251212e-01 6.959223476278774134e-01 9.133877300988062498e-01 9.230647706207778525e-01 6.856884219815310155e-01 6.997988808693775820e-01 6.177944932528769417e-01 5.512902545683161515e-01 5.818280341729102911e-01 6.538267999985679646e-01 6.946673485935980219e-01 4.817938258357623571e-02 9.352008817207906333e-01 4.774162142215661042e-01 5.768063588692976529e-01 4.589648891483899540e-02 7.998946815651652997e-01 4.434260476954369201e-01 9.850053510925722566e-01 6.648626681529369309e-01 4.606293826856903140e-01 3.309042418210563774e-01 1.438901922508034614e-01 7.986559119276418484e-01 7.037818421334554042e-01 3.605119534240813772e-01 3.785959549258922641e-01 9.562491516841659100e-01 4.997955143590974147e-01 1.029540300938682762e-01 1.819017177001992502e-01 3.665425750262368831e-01 1.688063588370778412e-01 7.030735208313992901e-01 8.922375654244527610e-01 1.055706412056253152e-01 2.664739907746691561e-01 9.906029568647586325e-01 6.043845090140997911e-03 3.495786295043534775e-01 5.989441999519146131e-01 6.776147193866479679e-01 7.012991789852640601e-01 1.825838783477321536e-01 7.612293578749116385e-01 1.564769891240175292e-01 2.762157292905387251e-01 7.641900040015234818e-01 +4.746013333880729768e-01 7.609202966712714788e-01 2.537820854162747830e-01 1.709362234877408460e-01 1.886635378734374813e-01 2.439567014093724229e-02 7.640304718272151741e-01 3.483216170435471382e-01 7.744289278738043514e-01 4.190437573644867353e-01 5.319091476394965934e-02 8.580130976087452233e-01 6.259446446786639529e-01 8.793213970773006150e-01 2.441023074890465994e-01 7.753405549489799098e-01 8.760187573193888300e-01 5.946480724009295393e-02 2.873093046571124631e-01 8.710837851946537924e-01 9.103181731924696596e-01 6.534637257615111272e-01 4.128420398577182793e-01 4.905858108576378607e-01 6.178275806701372108e-02 6.368043900016381320e-01 2.865296941219959148e-01 6.371773028539067241e-01 4.924322796636745325e-01 1.709313290387282080e-01 1.856892551689268700e-01 9.592782603102242289e-01 5.402593764193130976e-02 7.287312244390512506e-01 5.679467572000697073e-01 6.255587794305905724e-02 3.069660218141317953e-01 1.089960430557104232e-01 5.550748245336984965e-01 2.555948886689661803e-01 4.140925514039996980e-01 1.180376445052062628e-01 8.832322629884041820e-01 7.784546946701487169e-02 3.177678935473182698e-01 6.681804863429485764e-02 7.047099396645268854e-01 4.133897376851528582e-01 5.600656990480865627e-01 3.883995683475501837e-01 4.459430113152932362e-01 4.214077227574740681e-01 4.763369230200156235e-01 2.701480661168440545e-01 4.296286564389811824e-01 9.601402258758658936e-01 6.326999441846863359e-01 2.442086919688498670e-01 8.407708423957936938e-01 3.626867985638081437e-01 3.641441713291436733e-01 7.932397565989488530e-01 8.902073520619256941e-01 1.929173010337000838e-01 7.309376779324568973e-01 7.305852858337777977e-01 6.510197444582447313e-01 9.512661608643838695e-01 8.461467164366111016e-01 9.245490147941206605e-01 2.658844813385705663e-01 9.538758859344749208e-01 8.215517204998477041e-01 8.217795540390903097e-01 7.569662091300560780e-01 6.262685322871274218e-01 5.597770510574888725e-01 8.155720175123675197e-01 8.545688745180864965e-01 8.986051518529034610e-01 2.477911506572628708e-01 8.462580108996445860e-01 6.065941220995090255e-01 6.500490804973033665e-01 1.120463882674053169e-01 9.299049132942927010e-02 1.388364074229719858e-02 5.901199124540731367e-01 2.795110110544174464e-02 1.644097083463245124e-01 5.341029647603202646e-01 5.276816677181681570e-01 5.439849107754858304e-01 5.371677986392331405e-02 4.515163125788429488e-01 5.036243367087100964e-01 5.721818679625961801e-01 5.271368612400184617e-03 7.720961020546839304e-01 9.015383457479009266e-01 +8.301526916287945701e-01 8.704609696144033348e-01 2.955689129581380303e-01 1.762209253489944727e-01 2.698172933050072553e-01 1.138095349991521399e-01 4.092588531860634760e-01 8.202978121681584467e-01 2.822241377079557356e-01 6.117376205659387223e-01 7.169923068016897938e-01 9.310256256264415331e-02 3.989664052931106708e-01 1.651874953308862803e-02 7.890202597932294282e-02 9.068686774810821305e-01 5.203866694486933842e-01 4.297748572844445336e-01 5.208786995443430712e-01 2.163224881365530816e-01 7.274307306357226111e-01 1.675784956180090823e-01 5.969822786565782691e-01 8.959750832846602453e-02 1.253794151891943764e-01 5.352628522116801291e-01 2.562706125890066300e-01 6.030433202137867044e-01 8.330717547440393833e-01 9.603613683422040914e-02 7.569714244468559450e-01 3.184801677796517128e-01 1.667069341164499896e-01 3.132470247801235619e-01 6.417752836394801097e-01 6.433909425912354152e-02 4.056860213146201710e-01 3.166772891331335327e-01 9.574059746098845247e-01 1.492907964460536974e-01 8.311513764927496162e-01 6.652928354977717396e-01 2.396804722185036374e-01 5.812361618600220270e-01 9.724228681350225445e-01 2.853983236378453414e-01 5.337719354896472979e-01 6.779446197712412081e-01 5.485102006140557540e-01 9.010109155962182648e-01 5.724439967467525037e-01 5.965540527411405947e-01 1.598667990086183321e-01 1.363934512727023041e-01 5.327536522697270405e-01 4.123866715061276222e-01 4.617251396918636841e-01 6.935944951381239898e-01 4.300337419593377453e-01 1.892407993760835128e-01 1.666936825594794724e-01 4.625634184864588772e-01 4.805197636774838355e-02 7.003542850133466224e-01 2.130226006716084974e-03 8.678863343041013367e-01 4.874478520451258623e-01 7.043560228741558848e-01 6.317719270475393722e-01 5.372392256296196766e-01 2.982649812986511995e-01 1.272558612133412037e-01 2.467337555730741983e-01 6.546893200021091097e-01 6.291921159383098150e-01 8.505920470407707379e-01 4.046520490181828578e-01 3.875732096593392795e-01 8.551517214319142024e-01 4.152602284179877090e-01 9.587779137989138611e-01 6.977437468944928112e-01 3.240620775541913634e-02 4.025873770391376061e-01 5.485549335619134270e-01 7.146066156157020455e-01 3.012702534568838519e-01 3.526414480395153594e-01 3.309707144485515284e-01 4.315687014460974913e-01 6.641934530697197747e-01 2.172886798352815507e-01 4.807480925564590057e-01 5.006795397998469177e-01 5.818100901154411586e-01 2.107716091585690732e-01 6.606606051140029301e-01 9.317629042790995797e-01 9.840326342340242061e-01 5.752000964817773898e-01 +9.843444595454536872e-01 1.339523968066913540e-02 6.082172659959028671e-03 7.828244785439336662e-01 5.069653703872761819e-01 2.804896494365415327e-01 2.112385836660957139e-02 6.016479440778699228e-02 7.457477935084961818e-01 3.445503949245375397e-01 4.063494277166557200e-01 8.630275274433116817e-01 5.948396018456146850e-01 1.400867933474212457e-01 6.997522422654076646e-01 5.766519767930851081e-01 5.419976500582250889e-01 7.474121304089603735e-01 2.951600193008566686e-01 7.980170422334191827e-01 1.829036799578199757e-01 6.317636496261300749e-01 2.812612231140887431e-02 5.464747656105657381e-01 3.909873503320924204e-01 4.940850205957293406e-01 8.157850130814222611e-01 5.111092739445756150e-01 9.336823640685747439e-01 7.157105167170837445e-01 7.778989455994214097e-01 1.398722535910470466e-01 5.642653936300449091e-01 3.218717164845980028e-01 9.717427501967056402e-01 3.665791984428700134e-01 3.874321311211759156e-02 9.437600858738082188e-02 5.679526822961932231e-01 5.141385991358327079e-01 7.497840799582222715e-02 5.736515309094968318e-01 1.928132849879083954e-01 6.924244068001785823e-01 1.748389677952593146e-01 4.469577663506929532e-01 1.738527450963387455e-01 7.195287763517190793e-01 8.861150811892871682e-01 1.058443750714600506e-01 1.941789362229970894e-01 9.188374820700584422e-02 7.706736301449305104e-01 6.718642548609364828e-01 5.981029087121966237e-01 4.832880127232569434e-01 3.073688779938709148e-01 5.156312334804930009e-01 1.777418420119527553e-01 8.885462205165685079e-01 4.486254681289014723e-02 1.345398129556140132e-01 7.467627984379916484e-01 4.384565546058830643e-01 7.217750080760946263e-01 3.949550352625393890e-01 4.307950907642028593e-01 6.087680934849041270e-01 3.294516167246774874e-01 1.316682090209408962e-01 1.824857738754404046e-01 5.332379826483617524e-01 3.567136182864261151e-02 1.976220743086236631e-01 5.849349042822560296e-01 1.133174406357483344e-01 7.711522754393199675e-01 8.557306786807005183e-01 3.038353471344266143e-01 4.422747047768413875e-01 2.537160404215925702e-01 2.372714099723788328e-01 5.906462765375103396e-01 4.849909323133470007e-01 2.692576210504484813e-01 4.540849506602829821e-01 9.664935719107857759e-01 2.044371576459835804e-01 4.505417469690352616e-01 7.110722322201217249e-01 3.051357995214963870e-01 8.978937034341526457e-01 6.090501112506481185e-01 6.595415779178889215e-01 6.565426836745864581e-01 6.565608489824376059e-01 2.679102664248229626e-01 3.819533138204529443e-01 6.609794961162380744e-01 2.289558446859882856e-01 +9.274935298374649140e-01 1.174096651033715855e-01 3.030761852629033637e-01 1.605508209527917174e-01 9.601854834873225775e-01 4.341959513718630648e-01 6.320768160802121560e-01 4.213429090614078110e-01 3.695553969042019160e-01 5.965457437116089556e-01 3.520335041155040479e-01 7.702703502247409961e-01 8.571112772962534709e-01 7.904077282532658844e-01 2.247339318352784554e-01 6.823720204503556097e-01 5.883435710582129996e-02 6.786037033312407596e-01 9.721137137641507886e-01 2.042576970668320557e-01 8.394085754806240862e-01 7.433082729552867862e-01 4.072614159870893147e-01 7.451483066617257123e-01 1.699472962789440045e-01 1.753052015584344314e-01 2.255269204788400428e-01 7.794755643807432799e-01 8.407732260470973662e-01 9.301182862857163558e-01 3.701995309382508648e-01 4.481909027604019657e-01 1.261889085033987001e-01 5.600591735875248833e-01 8.244692493969552061e-01 8.969188061645969601e-01 4.802217973423368313e-01 3.556164122713412201e-02 3.393317823164623270e-01 2.491242957582864292e-01 9.863253789366602797e-01 5.585415885291432625e-01 3.702350606362231344e-01 6.766101432620400535e-01 6.999259389475386284e-01 6.676108316872160220e-01 7.870681827507105544e-01 8.746765411259082024e-01 9.125268371282718727e-01 6.638849997061806452e-01 3.253268113800632522e-01 7.968625619248901337e-01 7.584122525443606211e-01 9.028886850768532701e-01 5.381622293189292083e-02 8.097562873320752752e-01 7.092942088208666895e-01 9.915538877968065323e-01 4.319294903327922652e-01 4.307127933969153721e-01 2.768507739641907772e-01 8.076253078288621046e-01 2.569233696442670967e-01 7.595893829724666979e-01 5.768081727897018673e-01 2.537536777625452045e-01 8.874419624636734616e-01 5.091705681832693342e-01 4.811826624992353585e-01 2.794462461940371290e-01 3.846927898276129021e-01 5.129562951959991679e-01 8.515004062224775794e-01 7.103144978683579858e-01 9.526388607201888847e-01 2.367905569592337889e-01 9.137336039323161740e-01 5.722969943101696710e-02 2.019723935481291255e-01 3.098764675203513619e-02 1.121146613918624357e-01 9.937693067724532314e-01 8.476717958861412772e-02 2.059652110343795917e-01 2.139791918759540446e-01 9.137860316709250919e-01 9.530862653366889425e-03 2.027843281683039400e-03 2.506229951837134484e-01 6.244523528392044165e-01 5.523937894075592325e-01 3.712168074031840792e-01 4.218847794299319665e-01 4.827576239387890711e-01 5.244634168840578425e-01 5.182241092381567604e-01 3.308639956263292881e-03 9.370528021570383448e-01 4.694554875029453012e-01 4.950447554541728135e-01 +1.525818111800841814e-01 4.708012184002630107e-02 3.899035965341954846e-01 3.928304521031263929e-01 5.602286661727436945e-01 9.738256658043862313e-01 9.404465779766183475e-01 5.750862754958349088e-01 9.547546956257608741e-01 2.750275291553152535e-01 1.682773435862793265e-01 5.865928471016079726e-04 8.543378154943809255e-01 3.547649971465383079e-01 5.058056647397523031e-01 9.116332486700751137e-02 7.534666421106954726e-01 3.082429494433007733e-01 4.527145111847344916e-01 5.456680635225539255e-01 2.504131242494785914e-01 2.509240770568589296e-01 3.949236999582302898e-01 8.782959620323271821e-03 2.474641132111736752e-01 8.229417958971670943e-01 3.444225768479134420e-01 4.000027489436257522e-01 4.247741954177396417e-01 2.497745404169693373e-02 4.325768602588443423e-01 7.336592463477830117e-01 7.667663267650381975e-02 4.179022553581047683e-01 8.745172741480690126e-01 9.417705509525042817e-02 2.807522782799587446e-01 8.212710101351362590e-01 2.211181944001613386e-01 4.319929503523877168e-01 1.858636923768219873e-02 6.737037795085246694e-01 7.997187114913413275e-01 2.976552505976116647e-01 3.272347030789168887e-01 5.550935453236346406e-01 9.224109746648162522e-01 3.192827922106745708e-01 3.500098324549234530e-01 7.821988386980260888e-01 4.478417135239194380e-01 1.580956175222456572e-01 5.300807813550156844e-01 5.806154798468634581e-01 9.456842911054151868e-01 7.688127895655872956e-01 8.456527833650537840e-01 1.784229089865225770e-01 8.114517450321339087e-01 8.062506298824222428e-01 2.113482500442499523e-01 2.629226789210241666e-01 6.478686221690072022e-01 6.006672861605766300e-02 7.013679843242253131e-01 8.784753961212666828e-01 3.487138165323044880e-02 4.928426758517070461e-01 5.976224683315064512e-01 7.629063997052759616e-01 2.761721278953045422e-01 7.240740503283805696e-01 6.131065729985127888e-01 1.630885615792579957e-01 8.473783868551159060e-01 8.347614542399306448e-02 8.137265626844719657e-01 8.512508664918938539e-01 2.777097816703766320e-01 1.729154355214796990e-01 2.203382750835449766e-01 6.134780912629795857e-01 3.524352564238901753e-01 5.370314860129862256e-01 8.013986113284543578e-02 2.555842138998117852e-01 6.553915758947851389e-01 9.679125599178584061e-01 2.549566319678178150e-01 4.008180804370896633e-01 9.145789951670967310e-01 2.787926039163850511e-01 8.599455912576436933e-02 9.637558000691170967e-02 8.274101203974880692e-01 1.803747268179315411e-01 2.175735407836230095e-01 7.825994939720237742e-01 7.928519890958951599e-02 8.707949373106749213e-01 +6.398420210047787160e-01 5.739624494012524059e-01 3.359672805578653998e-01 1.130399363175038641e-02 3.349439685346782269e-01 2.315484030880912147e-01 4.575228302577399875e-01 1.149494135594463229e-01 2.888244352925943836e-01 3.625470995156252485e-01 3.795973190611611203e-01 6.567047810450010736e-01 1.484039742710284715e-01 9.273251916560719676e-01 4.334256728976307871e-01 6.734771102219323513e-01 9.125080197222198430e-01 4.974393931097168542e-01 8.301481563280355136e-01 4.526450714147856047e-01 2.414236092573898151e-01 8.070129698367667359e-02 7.260400697427102923e-01 1.396509691839398215e-02 2.496450588391967429e-01 4.335741205447194435e-01 3.089314419194891803e-01 9.543503534526003307e-01 5.457977547458532364e-01 3.139663643587058406e-01 5.034762326753475792e-01 4.756788330475764104e-01 6.849334942793482428e-01 3.880666613022351052e-01 6.483446580176778218e-01 5.217503801099343530e-01 5.371145824070304720e-01 3.121260159429154468e-01 8.314121854062171968e-01 4.538695969561833410e-01 8.598896961203845724e-01 9.961993522734106099e-01 8.865717795946430613e-01 7.828987966783660379e-01 3.412415531643435695e-01 7.421170530151157685e-01 4.484104178639959359e-01 6.793217012099640462e-01 3.756179958191659951e-01 7.821287098222597933e-01 6.227726265188193722e-02 8.552983413221663112e-01 4.824668768009222619e-01 2.241531065858231031e-01 4.939536577599041856e-01 5.129566641128722182e-01 1.057984177672518511e-01 9.541452507300716146e-01 3.396646181755047511e-01 7.452588103611947901e-01 5.315559265659929311e-01 5.493475179850665358e-01 5.214824278139198466e-01 5.150075718147916204e-01 1.196075368500321146e-01 9.035665331176232495e-01 7.522653903639873185e-01 6.638708679914825384e-01 5.584174553800479446e-01 5.015819402508836511e-01 5.507698483308445248e-01 5.978677577011723976e-01 8.450418028759657529e-01 3.266677322748618995e-01 1.321610045897971819e-01 2.394354042746985600e-01 2.723972163557076831e-01 5.523301747352814539e-01 5.518043850608547185e-01 5.283968096837132755e-02 8.192733312104071297e-01 2.277106024970321219e-02 1.414998099027269252e-01 6.517281615256080851e-01 1.811694734825117781e-01 9.472370614713256920e-01 5.454497319021770485e-01 1.364119913158231556e-01 8.446142008509562871e-01 7.671725984742419069e-01 2.461161648406858804e-01 1.421724627107351369e-01 6.290652581179481118e-01 7.094144689448004248e-01 4.419656923472803367e-02 6.614741876652251440e-01 8.712193265403500586e-02 4.734931280852430202e-01 5.382037050480286133e-01 1.396459758005891283e-01 +9.709329844415439670e-01 8.998575745276288229e-01 9.151313462895852568e-01 6.920489275523904471e-01 2.892231405199537919e-01 6.750679746268205550e-01 5.515642485826798280e-01 1.065253097812824956e-01 2.957026803465776510e-01 8.937347659632134400e-01 9.800016515925590310e-01 7.745900896182087436e-01 1.570977683146633774e-01 1.482028765821026273e-01 2.111147779712029271e-01 9.683759902485811200e-01 6.550951580826911425e-01 8.728324682592377703e-01 5.044803166579884257e-01 8.285704754811143991e-01 1.693574499337324735e-02 6.032669995180495182e-02 1.687026879086964692e-01 7.701554026145973619e-01 1.429888016593102718e-01 5.881172815379975827e-02 9.704206919487038396e-01 4.450807650730836951e-01 1.597445784258376689e-01 9.849229394397314152e-01 4.220083573536804744e-01 9.357693600374825671e-01 2.313199262338369033e-01 4.556443403861323294e-01 2.590791012828855822e-01 8.438664994487065085e-01 5.519045677502344427e-01 4.702170125676508050e-01 6.814723205638187897e-01 7.418295483665861001e-01 3.684921032028853904e-01 1.501895844844561845e-01 4.214513377519605308e-01 8.600279963652578408e-01 6.625616611189292238e-01 5.200151456470966105e-01 7.881072743086801058e-01 2.771703241081423519e-01 9.034135930616548071e-01 5.848441705791300738e-01 8.341698181274771473e-01 1.966638677318299777e-01 7.059747894371543042e-01 7.013854316067694716e-01 1.828430942760242983e-01 4.745548949934464966e-01 6.306422394641082452e-01 7.760751707194470939e-01 9.813187212598396547e-01 2.293595795266353266e-01 7.749261876107090830e-01 2.384106107787011819e-01 9.721209688979495223e-01 2.715569353686980714e-01 2.915573577694993146e-01 3.579601509630966349e-01 3.085697512342830962e-01 4.070219981627976047e-01 1.989632411372218579e-01 7.330003339460906542e-01 5.397259604481572381e-01 6.931009942216573849e-01 1.385457419653816080e-01 1.140339999976658358e-01 3.980752590866034613e-01 9.471822621683767540e-01 5.476643721405823895e-01 6.824131903515884279e-02 5.844099130744569992e-01 2.346881692012994236e-01 9.436439228519653000e-01 4.855518260479008141e-02 8.157036123302675579e-01 1.169761256455048581e-01 5.532962903488753970e-01 1.100965596251435308e-01 9.789490602992410029e-01 8.433487462016989733e-01 1.272410782852178013e-01 2.885715258680641160e-01 7.990943955388217779e-01 1.565305358979097727e-01 9.160846960406943129e-02 8.521842244411678147e-01 4.474243106736998099e-01 3.843945818845087015e-01 4.710645906071458944e-01 2.398348154123419729e-01 6.435351435258193087e-01 7.656897921129046658e-01 +4.894328120406804539e-01 7.881019629214267574e-01 6.974585354155089512e-01 2.023858939857701156e-01 1.660984914264745926e-01 4.854517801734643534e-01 2.789848572630315715e-01 2.311636522410289718e-01 9.821076233980715608e-01 1.220641257408076052e-01 2.614036146663852866e-01 7.657560715165320220e-01 3.968360577545695378e-01 4.566023622802184434e-02 1.049701948619241598e-02 9.281162949127452766e-01 4.490137965769909201e-01 2.095846458383606725e-01 9.195504656719085679e-01 9.683515436855471004e-01 9.800174878114910060e-01 5.517610861380117804e-01 6.711570559348770670e-01 5.125258050287277989e-01 2.105581493613526423e-01 8.281813206544574868e-01 4.964783994807770995e-01 7.284974208756571645e-01 1.320629592816270348e-01 6.652194518096135045e-01 9.430156297917950958e-01 7.477263137894260003e-01 2.054087806450300979e-01 4.248209124837907247e-01 7.657518666018259257e-02 1.031614100713345028e-01 4.122242287567021712e-01 4.919658859336810686e-01 3.752650167259050651e-01 4.175771429986683270e-01 6.131376293448997927e-01 5.463797405837259591e-01 3.119918548921774004e-01 6.331762507678504459e-01 5.484632429281035559e-01 6.815448032785871302e-01 8.065695507425107991e-02 8.720129122297424207e-01 8.318188557125294480e-03 2.199323537180564170e-02 8.933872719887463454e-01 1.953120287872067706e-02 2.478721941404590234e-01 5.994061179859005994e-01 6.588362611693047155e-01 6.332808851020984564e-01 3.823849348043323326e-01 5.111091324899629251e-01 7.034808459110406531e-01 4.347681568463539481e-01 4.316973576672314961e-01 9.620411080123215664e-01 6.247837467655984467e-01 8.196961678222113301e-01 5.574601810887074294e-01 8.800635018469276094e-01 8.772255241161972528e-01 5.075275933138404527e-01 8.022583187266906224e-01 2.320670802521890286e-01 1.165626629103270195e-01 4.623759662685936744e-01 7.938327000737943617e-02 7.986374689793115378e-01 6.728842751465858862e-01 8.133909095059230765e-01 1.202639390769081329e-01 1.052937257108800262e-01 8.717600467040409473e-02 2.163819956545051104e-01 6.596483385763984852e-01 1.202843170392309258e-02 1.538789195854695091e-01 3.120247727263308901e-01 3.408168327248596308e-01 3.241861797851740556e-01 3.637074533655986208e-01 1.533669345890729119e-01 4.455921334699539660e-01 5.619140093874478437e-01 1.881731359879111887e-01 9.416670800570559052e-01 1.740018593664415247e-01 7.030242331869680505e-01 5.922055553954849172e-01 9.326211623391688077e-01 6.608322881013140027e-01 7.009721551241574478e-01 1.079126054675583202e-01 6.158176671761947940e-01 +5.185079639625639336e-01 9.613742991518259284e-01 5.555312825626229634e-01 2.647628827924735084e-01 6.003697207460141350e-01 5.392112376769145898e-01 6.781186965667050925e-01 9.908971748181496508e-01 4.124155872095397468e-01 9.814941401724619485e-02 2.684237785531295994e-02 1.774652505962848181e-01 1.707589529595294753e-01 4.640932098465534450e-01 2.882179883914587348e-01 7.276822905806898945e-01 6.145789546745269449e-01 1.100959863917608805e-01 6.798859723042820491e-01 9.096984032948918220e-01 3.971368455178179158e-01 2.959494950971321980e-01 3.742088799298171065e-02 1.960739526210202310e-01 7.536102695342027369e-01 6.680915510628401277e-01 4.136507204312135366e-01 3.613996339406737590e-01 3.605422038261204554e-01 7.098503555159476619e-01 8.093719147087541366e-01 6.344097009128880638e-01 3.990082448083617228e-01 2.805918009906902544e-01 7.078488167363675698e-01 9.969917259866583059e-01 9.442054998992396309e-01 1.329075240769165278e-01 6.810681350588387861e-02 8.503491437913293094e-01 8.347117439165431252e-01 2.381858201903953587e-01 7.884260706938626129e-01 7.109907917419661105e-01 6.390916681983604963e-02 6.174365227062991179e-01 5.085733343630816083e-01 1.716846139694149231e-01 9.065664924270055991e-02 5.625330757196970177e-01 3.539663480209681579e-01 8.937139525947165319e-01 3.981380511900556307e-02 7.403597927449541150e-01 3.803872284089604427e-02 6.729519695709765825e-01 5.306080908840085097e-01 2.091237680402112664e-01 5.902903662907804661e-01 2.094778612095482551e-01 7.323447855684165342e-01 3.644574495843493356e-01 2.006215478057034041e-01 3.737617545555030896e-01 5.253471759602216240e-01 4.287889547869583318e-01 7.086098806190446187e-01 4.510792335515292351e-01 6.383187180169215269e-01 8.779355722397681472e-01 4.221338898667141848e-01 6.375840144651815367e-01 8.683057298299173832e-01 6.093730356952498095e-01 9.297141161056151626e-01 7.770838342807246946e-01 6.549661287008456956e-02 2.835060738158660110e-01 4.474138867374952699e-01 8.530336387421445510e-01 3.160209657891883683e-01 8.301538680518486535e-01 6.646903097549101691e-01 7.187130118106234145e-01 1.651862041735395747e-01 9.578252676762609719e-01 6.490273812885494209e-02 9.777273484666341163e-01 8.930729829254262508e-01 9.851054752118463265e-01 4.094323402286751401e-01 1.139176240124337713e-01 7.612865863899589414e-01 2.266379302491570158e-01 6.998882496157835531e-01 9.945043379099228753e-01 7.111578056749194854e-01 7.806190603886183910e-01 3.410170920712443099e-01 9.446084168886822452e-01 +5.015172758330755931e-01 5.569527971282052237e-01 1.122406928736449094e-01 8.960352822124777461e-01 6.049568585854003810e-02 1.202196001338627918e-01 1.870314295763603196e-01 9.017590029396971296e-01 3.597904628087450485e-01 2.130941062746317671e-01 2.556281834629479111e-01 5.123669364829196438e-01 4.754061129282013409e-01 9.764470380372083369e-01 8.038663983900646848e-01 6.960491266420890666e-01 2.940135977911654264e-01 2.857282759910040326e-03 4.599343225832352999e-02 5.597554495210212977e-01 7.445266674304001908e-01 3.387528030535971180e-01 6.429542922125383031e-01 2.123331785532429627e-01 5.302332654117811739e-01 7.262555377662539557e-01 3.982425859900724507e-01 3.243388301740235402e-01 6.191064123738921898e-01 8.988047781373914580e-01 7.819700328765150088e-01 7.664269102804815992e-01 6.734095355422575757e-03 2.904762329148526945e-01 5.097537644843168625e-01 9.524734606001823423e-01 4.812869576591960463e-01 6.236868013640477493e-01 1.459170943214320726e-01 9.874505139403206844e-01 7.561708982837871407e-01 3.798591332432484924e-01 6.056633451375117438e-01 7.935708170258731764e-01 1.458141583518740569e-01 7.082511296391911237e-01 1.098798009731616343e-02 3.655618484905173160e-01 9.551862303858617009e-01 8.148959351152762487e-02 4.739306219219985294e-02 7.963357515359494876e-01 6.208332695202813944e-01 3.884182264923189409e-01 4.589167647950288531e-01 6.496652974138312775e-01 2.467528128074852889e-01 5.309593064844935206e-01 5.364606369543487574e-01 2.421352989851309756e-01 3.776834556696828660e-02 1.564861233558080267e-01 5.197231021782636740e-01 8.725375120634637494e-01 2.441225493455024820e-01 2.320363366041028330e-01 5.026358683423555185e-01 7.035766000474735771e-01 8.347805591467084563e-01 2.303229841813967393e-01 6.908373419683054850e-01 2.646662377366995056e-01 1.259467197942290007e-01 9.372770922994989595e-01 6.674216272867254940e-01 1.027944489143072238e-01 5.686267290346079806e-01 3.948222804451942958e-01 4.689706944496729868e-01 4.446117700449114807e-02 6.817992275557515081e-01 9.084821829413957106e-01 9.184021015315092518e-01 3.045815734169987632e-01 2.204958624923980537e-03 7.542672057172502553e-01 9.460844786545006269e-01 3.373139094575949848e-02 9.059565314915285494e-01 9.938525461318854504e-01 2.542072661725306437e-01 9.685734112479216229e-02 8.223629541824816203e-01 1.057429056898460118e-01 8.080679390260248063e-01 5.823014244609205914e-01 6.413551528031806725e-01 1.787341975438894170e-01 1.250471413912357388e-01 8.390281297596062782e-01 diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-euclidean-ml-iris.txt b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-euclidean-ml-iris.txt new file mode 100644 index 0000000..86de3c7 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-euclidean-ml-iris.txt @@ -0,0 +1 @@ + 5.3851648e-01 5.0990195e-01 6.4807407e-01 1.4142136e-01 6.1644140e-01 5.1961524e-01 1.7320508e-01 9.2195445e-01 4.6904158e-01 3.7416574e-01 3.7416574e-01 5.9160798e-01 9.9498744e-01 8.8317609e-01 1.1045361e+00 5.4772256e-01 1.0000000e-01 7.4161985e-01 3.3166248e-01 4.3588989e-01 3.0000000e-01 6.4807407e-01 4.6904158e-01 5.9160798e-01 5.4772256e-01 3.1622777e-01 1.4142136e-01 1.4142136e-01 5.3851648e-01 5.3851648e-01 3.8729833e-01 6.2449980e-01 8.0622577e-01 4.6904158e-01 3.7416574e-01 4.1231056e-01 4.6904158e-01 8.6602540e-01 1.4142136e-01 1.7320508e-01 1.3490738e+00 7.6811457e-01 4.5825757e-01 6.1644140e-01 5.9160798e-01 3.6055513e-01 5.8309519e-01 3.0000000e-01 2.2360680e-01 4.0037482e+00 3.6166283e+00 4.1641326e+00 3.0935417e+00 3.7920970e+00 3.4161382e+00 3.7854986e+00 2.3452079e+00 3.7496667e+00 2.8879058e+00 2.7037012e+00 3.2280025e+00 3.1464265e+00 3.7000000e+00 2.5806976e+00 3.6276714e+00 3.4351128e+00 3.0099834e+00 3.7682887e+00 2.8827071e+00 3.8535698e+00 3.0757113e+00 4.0472213e+00 3.6578682e+00 3.4161382e+00 3.5972211e+00 4.0472213e+00 4.2449971e+00 3.5312887e+00 2.4939928e+00 2.8178006e+00 2.7018512e+00 2.8948230e+00 4.1352146e+00 3.4117444e+00 3.5199432e+00 3.9115214e+00 3.6180105e+00 3.0000000e+00 3.0215890e+00 3.3120990e+00 3.5958309e+00 3.0099834e+00 2.3874673e+00 3.1527766e+00 3.0740852e+00 3.1256999e+00 3.3451457e+00 2.0904545e+00 3.0577770e+00 5.2848841e+00 4.2083251e+00 5.3018865e+00 4.6904158e+00 5.0566788e+00 6.0950800e+00 3.5916570e+00 5.6364883e+00 5.0477718e+00 5.6391489e+00 4.3566042e+00 4.5199558e+00 4.8538644e+00 4.1904654e+00 4.4170126e+00 4.6260134e+00 4.6454279e+00 6.2401923e+00 6.4984614e+00 4.1412558e+00 5.1215232e+00 4.0286474e+00 6.2112801e+00 4.1097445e+00 4.9699095e+00 5.3122500e+00 3.9774364e+00 4.0074930e+00 4.8404545e+00 5.0970580e+00 5.5461698e+00 6.0141500e+00 4.8805737e+00 4.1605288e+00 4.5705580e+00 5.7887823e+00 4.8918299e+00 4.6065171e+00 3.8961519e+00 4.7968740e+00 5.0199602e+00 4.6368092e+00 4.2083251e+00 5.2573758e+00 5.1361464e+00 4.6540305e+00 4.2766810e+00 4.4598206e+00 4.6508064e+00 4.1400483e+00 3.0000000e-01 3.3166248e-01 6.0827625e-01 1.0908712e+00 5.0990195e-01 4.2426407e-01 5.0990195e-01 1.7320508e-01 8.6602540e-01 4.5825757e-01 1.4142136e-01 6.7823300e-01 1.3601471e+00 1.6278821e+00 1.0535654e+00 5.4772256e-01 1.1747340e+00 8.3666003e-01 7.0710678e-01 7.6157731e-01 7.8102497e-01 5.5677644e-01 6.4807407e-01 2.2360680e-01 5.0000000e-01 5.9160798e-01 5.0000000e-01 3.4641016e-01 2.4494897e-01 6.7823300e-01 1.1489125e+00 1.3416408e+00 1.7320508e-01 3.0000000e-01 7.8740079e-01 1.7320508e-01 5.0990195e-01 4.5825757e-01 5.2915026e-01 8.1853528e-01 5.4772256e-01 6.7823300e-01 9.8488578e-01 1.4142136e-01 8.4852814e-01 3.6055513e-01 8.1240384e-01 3.1622777e-01 4.0963398e+00 3.6864617e+00 4.2367440e+00 2.9698485e+00 3.8118237e+00 3.3911650e+00 3.8600518e+00 2.1470911e+00 3.7881394e+00 2.8053520e+00 2.4617067e+00 3.2449961e+00 3.0413813e+00 3.7121422e+00 2.5592968e+00 3.7000000e+00 3.4336569e+00 2.9715316e+00 3.6918830e+00 2.7928480e+00 3.8935845e+00 3.0740852e+00 4.0187063e+00 3.6565011e+00 3.4467376e+00 3.6510273e+00 4.0804412e+00 4.2953463e+00 3.5383612e+00 2.4186773e+00 2.7000000e+00 2.5787594e+00 2.8548205e+00 4.1170378e+00 3.3985291e+00 3.5972211e+00 3.9786933e+00 3.5580894e+00 2.9983329e+00 2.9291637e+00 3.2434549e+00 3.6221541e+00 2.9546573e+00 2.1794495e+00 3.1032241e+00 3.0789609e+00 3.1144823e+00 3.3645208e+00 1.9131126e+00 3.0298515e+00 5.3385391e+00 4.1809090e+00 5.3572381e+00 4.7085029e+00 5.0911688e+00 6.1595454e+00 3.4799425e+00 5.6868269e+00 5.0408333e+00 5.7471732e+00 4.4192760e+00 4.5210618e+00 4.9020404e+00 4.1340053e+00 4.4022721e+00 4.6808119e+00 4.6829478e+00 6.3694584e+00 6.5314623e+00 4.0620192e+00 5.1903757e+00 4.0024992e+00 6.2617889e+00 4.1060930e+00 5.0428167e+00 5.3898052e+00 3.9812058e+00 4.0311289e+00 4.8518038e+00 5.1584882e+00 5.5919585e+00 6.1546730e+00 4.8918299e+00 4.1689327e+00 4.5475268e+00 5.8600341e+00 4.9598387e+00 4.6508064e+00 3.9153544e+00 4.8600412e+00 5.0724747e+00 4.7021272e+00 4.1809090e+00 5.3207142e+00 5.2067264e+00 4.7000000e+00 4.2497059e+00 4.4988888e+00 4.7180504e+00 4.1533119e+00 2.4494897e-01 5.0990195e-01 1.0862780e+00 2.6457513e-01 4.1231056e-01 4.3588989e-01 3.1622777e-01 8.8317609e-01 3.7416574e-01 2.6457513e-01 5.0000000e-01 1.3638182e+00 1.5874508e+00 1.0099505e+00 5.1961524e-01 1.2369317e+00 7.5498344e-01 8.3066239e-01 7.0000000e-01 5.0990195e-01 6.4807407e-01 6.4031242e-01 4.6904158e-01 5.0990195e-01 6.1644140e-01 5.4772256e-01 3.0000000e-01 3.3166248e-01 7.8102497e-01 1.0535654e+00 1.2845233e+00 3.1622777e-01 3.1622777e-01 8.5440037e-01 3.1622777e-01 3.6055513e-01 4.8989795e-01 4.3588989e-01 9.2736185e-01 3.0000000e-01 6.5574385e-01 9.5916630e-01 2.6457513e-01 7.8102497e-01 1.4142136e-01 8.0622577e-01 3.3166248e-01 4.2766810e+00 3.8496753e+00 4.4158804e+00 3.1543621e+00 3.9974992e+00 3.5510562e+00 4.0112342e+00 2.3065125e+00 3.9749214e+00 2.9495762e+00 2.6476405e+00 3.4029399e+00 3.2588341e+00 3.8794329e+00 2.7202941e+00 3.8807216e+00 3.5749126e+00 3.1527766e+00 3.8961519e+00 2.9782545e+00 4.0311289e+00 3.2588341e+00 4.2071368e+00 3.8314488e+00 3.6318040e+00 3.8340579e+00 4.2731721e+00 4.4698993e+00 3.7027017e+00 2.6153394e+00 2.8879058e+00 2.7712813e+00 3.0364453e+00 4.2825226e+00 3.5298725e+00 3.7322915e+00 4.1545156e+00 3.7669616e+00 3.1464265e+00 3.1032241e+00 3.4073450e+00 3.7854986e+00 3.1400637e+00 2.3537205e+00 3.2680269e+00 3.2326460e+00 3.2726136e+00 3.5425979e+00 2.0856654e+00 3.1953091e+00 5.4726593e+00 4.3347434e+00 5.5290144e+00 4.8682646e+00 5.2469038e+00 6.3364028e+00 3.6083237e+00 5.8660038e+00 5.2249402e+00 5.8940648e+00 4.5738387e+00 4.6936127e+00 5.0695167e+00 4.2918527e+00 4.5442271e+00 4.8270074e+00 4.8456166e+00 6.5207362e+00 6.7178866e+00 4.2508823e+00 5.3488316e+00 4.1436699e+00 6.4467046e+00 4.2813549e+00 5.1942276e+00 5.5587768e+00 4.1496988e+00 4.1856899e+00 5.0149776e+00 5.3385391e+00 5.7775427e+00 6.3126856e+00 5.0537115e+00 4.3416587e+00 4.7169906e+00 6.0406953e+00 5.0921508e+00 4.8062459e+00 4.0669399e+00 5.0269275e+00 5.2287666e+00 4.8682646e+00 4.3347434e+00 5.4753995e+00 5.3535035e+00 4.8641546e+00 4.4305756e+00 4.6615448e+00 4.8487112e+00 4.2988371e+00 6.4807407e-01 1.1661904e+00 3.3166248e-01 5.0000000e-01 3.0000000e-01 3.1622777e-01 1.0000000e+00 3.7416574e-01 2.6457513e-01 5.1961524e-01 1.5297059e+00 1.7146428e+00 1.1661904e+00 6.5574385e-01 1.3228757e+00 8.6602540e-01 8.7749644e-01 8.0622577e-01 7.0710678e-01 6.4807407e-01 5.3851648e-01 4.2426407e-01 5.4772256e-01 7.2111026e-01 6.7823300e-01 1.7320508e-01 2.2360680e-01 8.7749644e-01 1.1704700e+00 1.4247807e+00 3.1622777e-01 5.0990195e-01 1.0049876e+00 3.1622777e-01 3.0000000e-01 5.8309519e-01 6.0827625e-01 8.3666003e-01 3.0000000e-01 7.0000000e-01 9.6953597e-01 2.6457513e-01 8.6602540e-01 1.4142136e-01 9.2195445e-01 4.5825757e-01 4.1773197e+00 3.7336309e+00 4.3058100e+00 2.9849623e+00 3.8729833e+00 3.3926391e+00 3.8897301e+00 2.1118712e+00 3.8548671e+00 2.7784888e+00 2.4515301e+00 3.2680269e+00 3.1080541e+00 3.7376463e+00 2.5806976e+00 3.7762415e+00 3.4205263e+00 3.0000000e+00 3.7496667e+00 2.8160256e+00 3.8923001e+00 3.1304952e+00 4.0620192e+00 3.6851052e+00 3.5114100e+00 3.7229021e+00 4.1545156e+00 4.3497126e+00 3.5623026e+00 2.4698178e+00 2.7202941e+00 2.6038433e+00 2.8913665e+00 4.1279535e+00 3.3674916e+00 3.6069378e+00 4.0422766e+00 3.6262929e+00 2.9966648e+00 2.9376862e+00 3.2357379e+00 3.6482873e+00 2.9899833e+00 2.1633308e+00 3.1080541e+00 3.0838288e+00 3.1224990e+00 3.4132096e+00 1.9157244e+00 3.0446675e+00 5.3357286e+00 4.1773197e+00 5.4064776e+00 4.7222876e+00 5.1097945e+00 6.2153037e+00 3.4205263e+00 5.7384667e+00 5.0813384e+00 5.7844619e+00 4.4519659e+00 4.5530210e+00 4.9457052e+00 4.1303753e+00 4.3965896e+00 4.7010637e+00 4.7095647e+00 6.4140471e+00 6.5901442e+00 4.0877867e+00 5.2297227e+00 3.9862263e+00 6.3229740e+00 4.1436699e+00 5.0695167e+00 5.4387499e+00 4.0124805e+00 4.0472213e+00 4.8733972e+00 5.2172790e+00 5.6550862e+00 6.2153037e+00 4.9132474e+00 4.1988094e+00 4.5552168e+00 5.9321160e+00 4.9628621e+00 4.6690470e+00 3.9268308e+00 4.9101935e+00 5.1048996e+00 4.7602521e+00 4.1773197e+00 5.3497664e+00 5.2325902e+00 4.7455242e+00 4.2883563e+00 4.5332108e+00 4.7191101e+00 4.1496988e+00 6.1644140e-01 4.5825757e-01 2.2360680e-01 9.2195445e-01 5.2915026e-01 4.2426407e-01 3.4641016e-01 6.4031242e-01 9.7467943e-01 9.1651514e-01 1.0862780e+00 5.4772256e-01 1.7320508e-01 7.9372539e-01 2.6457513e-01 5.3851648e-01 2.6457513e-01 5.6568542e-01 5.2915026e-01 5.7445626e-01 6.3245553e-01 3.4641016e-01 2.4494897e-01 2.8284271e-01 5.3851648e-01 5.7445626e-01 5.0000000e-01 5.5677644e-01 7.8102497e-01 5.2915026e-01 4.4721360e-01 5.1961524e-01 5.2915026e-01 8.5440037e-01 2.4494897e-01 1.7320508e-01 1.4000000e+00 7.2801099e-01 4.5825757e-01 5.8309519e-01 6.4031242e-01 3.0000000e-01 5.6568542e-01 3.3166248e-01 3.0000000e-01 4.0607881e+00 3.6633318e+00 4.2190046e+00 3.1480152e+00 3.8496753e+00 3.4568772e+00 3.8249183e+00 2.3874673e+00 3.8078866e+00 2.9223278e+00 2.7586228e+00 3.2710854e+00 3.2186954e+00 3.7456642e+00 2.6267851e+00 3.6851052e+00 3.4669872e+00 3.0626786e+00 3.8340579e+00 2.9376862e+00 3.8845849e+00 3.1336879e+00 4.1036569e+00 3.7067506e+00 3.4741906e+00 3.6551334e+00 4.1085277e+00 4.2965102e+00 3.5763109e+00 2.5573424e+00 2.8740216e+00 2.7604347e+00 2.9495762e+00 4.1785165e+00 3.4380227e+00 3.5510562e+00 3.9648455e+00 3.6864617e+00 3.0364453e+00 3.0708305e+00 3.3541020e+00 3.6400549e+00 3.0659419e+00 2.4372115e+00 3.1968735e+00 3.1128765e+00 3.1670175e+00 3.3985291e+00 2.1424285e+00 3.1032241e+00 5.3131911e+00 4.2461747e+00 5.3507009e+00 4.7307505e+00 5.0960769e+00 6.1457302e+00 3.6166283e+00 5.6877060e+00 5.1009803e+00 5.6762664e+00 4.3977267e+00 4.5683695e+00 4.9010203e+00 4.2308392e+00 4.4508426e+00 4.6626173e+00 4.6882833e+00 6.2785349e+00 6.5536250e+00 4.1964271e+00 5.1643005e+00 4.0607881e+00 6.2657801e+00 4.1605288e+00 5.0079936e+00 5.3591044e+00 4.0249224e+00 4.0472213e+00 4.8836462e+00 5.1497573e+00 5.6017854e+00 6.0572271e+00 4.9234135e+00 4.2083251e+00 4.6141088e+00 5.8438001e+00 4.9203658e+00 4.6454279e+00 3.9344631e+00 4.8445846e+00 5.0616203e+00 4.6861498e+00 4.2461747e+00 5.2971691e+00 5.1730069e+00 4.7010637e+00 4.3301270e+00 4.5044423e+00 4.6786750e+00 4.1737274e+00 9.9498744e-01 7.0000000e-01 1.4594520e+00 1.0099505e+00 3.4641016e-01 8.1240384e-01 1.1618950e+00 1.5716234e+00 6.7823300e-01 6.1644140e-01 4.0000000e-01 5.9160798e-01 3.3166248e-01 3.8729833e-01 5.3851648e-01 4.1231056e-01 1.1224972e+00 6.7823300e-01 8.3066239e-01 1.0099505e+00 6.4807407e-01 5.2915026e-01 6.4807407e-01 1.0148892e+00 1.0246951e+00 5.3851648e-01 4.5825757e-01 4.7958315e-01 1.0099505e+00 9.6953597e-01 6.0827625e-01 1.0099505e+00 1.4177447e+00 6.4807407e-01 7.0000000e-01 1.8814888e+00 1.3000000e+00 6.0827625e-01 3.7416574e-01 1.1269428e+00 3.8729833e-01 1.1224972e+00 3.6055513e-01 8.0622577e-01 3.6124784e+00 3.2465366e+00 3.7868192e+00 2.9444864e+00 3.4698703e+00 3.1543621e+00 3.4073450e+00 2.3280893e+00 3.4146742e+00 2.7055499e+00 2.7147744e+00 2.9189039e+00 2.9832868e+00 3.3896903e+00 2.3366643e+00 3.2588341e+00 3.1464265e+00 2.7784888e+00 3.5468296e+00 2.7073973e+00 3.5085610e+00 2.7928480e+00 3.7709415e+00 3.3674916e+00 3.0935417e+00 3.2465366e+00 3.7121422e+00 3.8832976e+00 3.2264532e+00 2.3194827e+00 2.6758176e+00 2.5729361e+00 2.6608269e+00 3.8470768e+00 3.1400637e+00 3.1448370e+00 3.5411862e+00 3.3867388e+00 2.7239677e+00 2.8407745e+00 3.1032241e+00 3.2726136e+00 2.7892651e+00 2.3748684e+00 2.9223278e+00 2.7910571e+00 2.8548205e+00 3.0347982e+00 2.0566964e+00 2.8053520e+00 4.9061186e+00 3.9255573e+00 4.9223978e+00 4.3566042e+00 4.6978719e+00 5.7052607e+00 3.4263683e+00 5.2659282e+00 4.7349762e+00 5.2057660e+00 3.9774364e+00 4.2011903e+00 4.4833024e+00 3.9370039e+00 4.1146081e+00 4.2497059e+00 4.2918527e+00 5.7913729e+00 6.1343296e+00 3.9179076e+00 4.7275787e+00 3.7483330e+00 5.8360946e+00 3.8013156e+00 4.5760245e+00 4.9173163e+00 3.6633318e+00 3.6742346e+00 4.5066617e+00 4.7222876e+00 5.1788030e+00 5.5596762e+00 4.5453273e+00 3.8457769e+00 4.2883563e+00 5.3916602e+00 4.5022217e+00 4.2473521e+00 3.5693137e+00 4.4124823e+00 4.6411206e+00 4.2497059e+00 3.9255573e+00 4.8682646e+00 4.7391982e+00 4.2848571e+00 3.9887341e+00 4.1024383e+00 4.2649736e+00 3.8183766e+00 4.2426407e-01 5.4772256e-01 4.7958315e-01 8.6602540e-01 3.0000000e-01 4.8989795e-01 6.1644140e-01 1.3601471e+00 1.4933185e+00 9.5393920e-01 5.0990195e-01 1.2083046e+00 6.4807407e-01 8.6023253e-01 6.0000000e-01 4.5825757e-01 6.2449980e-01 5.4772256e-01 6.0827625e-01 4.5825757e-01 6.2449980e-01 6.0827625e-01 3.1622777e-01 4.2426407e-01 8.1240384e-01 9.4868330e-01 1.2083046e+00 4.7958315e-01 5.0000000e-01 9.1651514e-01 4.7958315e-01 4.6904158e-01 5.1961524e-01 4.2426407e-01 1.1090537e+00 3.1622777e-01 5.4772256e-01 8.1853528e-01 4.4721360e-01 6.7823300e-01 2.2360680e-01 7.7459667e-01 4.2426407e-01 4.2308392e+00 3.7854986e+00 4.3669211e+00 3.1272992e+00 3.9560081e+00 3.4899857e+00 3.9344631e+00 2.2781571e+00 3.9357337e+00 2.8827071e+00 2.6495283e+00 3.3361655e+00 3.2634338e+00 3.8209946e+00 2.6627054e+00 3.8353618e+00 3.4942810e+00 3.1160873e+00 3.8794329e+00 2.9495762e+00 3.9420807e+00 3.2202484e+00 4.1701319e+00 3.7828561e+00 3.5916570e+00 3.7907783e+00 4.2391037e+00 4.4147480e+00 3.6414283e+00 2.5980762e+00 2.8653098e+00 2.7549955e+00 2.9983329e+00 4.2225585e+00 3.4423829e+00 3.6414283e+00 4.1024383e+00 3.7549967e+00 3.0740852e+00 3.0626786e+00 3.3555923e+00 3.7229021e+00 3.1064449e+00 2.3388031e+00 3.2140317e+00 3.1654384e+00 3.2093613e+00 3.4957117e+00 2.0639767e+00 3.1400637e+00 5.3758720e+00 4.2638011e+00 5.4680892e+00 4.7989582e+00 5.1710734e+00 6.2801274e+00 3.5312887e+00 5.8137767e+00 5.1797683e+00 5.8077534e+00 4.4977772e+00 4.6368092e+00 5.0049975e+00 4.2272923e+00 4.4609416e+00 4.7423623e+00 4.7780749e+00 6.4397205e+00 6.6708320e+00 4.2190046e+00 5.2744668e+00 4.0620192e+00 6.3992187e+00 4.2284749e+00 5.1137071e+00 5.4963624e+00 4.0902323e+00 4.1121770e+00 4.9477268e+00 5.2886671e+00 5.7314920e+00 6.2401923e+00 4.9849774e+00 4.2871902e+00 4.6626173e+00 5.9883220e+00 4.9939964e+00 4.7318073e+00 3.9912404e+00 4.9618545e+00 5.1526692e+00 4.8031240e+00 4.2638011e+00 5.3972215e+00 5.2678269e+00 4.7968740e+00 4.3840620e+00 4.5934736e+00 4.7497368e+00 4.2178193e+00 7.8740079e-01 3.3166248e-01 5.0000000e-01 2.2360680e-01 4.6904158e-01 9.0553851e-01 1.0440307e+00 1.2369317e+00 7.0000000e-01 2.0000000e-01 8.3666003e-01 4.2426407e-01 4.4721360e-01 3.7416574e-01 6.7082039e-01 3.8729833e-01 4.4721360e-01 4.1231056e-01 2.2360680e-01 2.2360680e-01 2.2360680e-01 3.7416574e-01 3.7416574e-01 4.4721360e-01 7.3484692e-01 9.4868330e-01 3.3166248e-01 3.6055513e-01 5.4772256e-01 3.3166248e-01 7.4833148e-01 1.0000000e-01 2.4494897e-01 1.2288206e+00 6.6332496e-01 4.2426407e-01 6.0827625e-01 4.6904158e-01 4.2426407e-01 4.5825757e-01 4.2426407e-01 1.4142136e-01 3.9648455e+00 3.5623026e+00 4.1170378e+00 2.9866369e+00 3.7296112e+00 3.3256578e+00 3.7282704e+00 2.2113344e+00 3.6918830e+00 2.7802878e+00 2.5690465e+00 3.1543621e+00 3.0545049e+00 3.6249138e+00 2.4959968e+00 3.5818989e+00 3.3481338e+00 2.9206164e+00 3.6837481e+00 2.7820855e+00 3.7815341e+00 3.0049958e+00 3.9686270e+00 3.5791060e+00 3.3555923e+00 3.5454196e+00 3.9912404e+00 4.1892720e+00 3.4554305e+00 2.4020824e+00 2.7110883e+00 2.5942244e+00 2.8089144e+00 4.0509258e+00 3.3181320e+00 3.4583233e+00 3.8613469e+00 3.5383612e+00 2.9137605e+00 2.9189039e+00 3.2093613e+00 3.5242020e+00 2.9206164e+00 2.2561028e+00 3.0577770e+00 2.9899833e+00 3.0397368e+00 3.2771939e+00 1.9697716e+00 2.9698485e+00 5.2191953e+00 4.1206796e+00 5.2478567e+00 4.6162756e+00 4.9899900e+00 6.0448325e+00 3.4741906e+00 5.5803226e+00 4.9749372e+00 5.5973208e+00 4.3000000e+00 4.4474712e+00 4.7968740e+00 4.0975602e+00 4.3358967e+00 4.5661800e+00 4.5793013e+00 6.2040309e+00 6.4420494e+00 4.0472213e+00 5.0695167e+00 3.9395431e+00 6.1587336e+00 4.0373258e+00 4.9142650e+00 5.2621288e+00 3.9051248e+00 3.9357337e+00 4.7686476e+00 5.0447993e+00 5.4927225e+00 5.9849812e+00 4.8093659e+00 4.0865633e+00 4.4833024e+00 5.7463032e+00 4.8311489e+00 4.5398238e+00 3.8223030e+00 4.7455242e+00 4.9628621e+00 4.5902070e+00 4.1206796e+00 5.2009614e+00 5.0823223e+00 4.5989129e+00 4.2000000e+00 4.3977267e+00 4.5891176e+00 4.0607881e+00 5.5677644e-01 1.2845233e+00 6.7082039e-01 4.2426407e-01 3.4641016e-01 1.7916473e+00 1.9974984e+00 1.4317821e+00 9.2736185e-01 1.6124515e+00 1.1489125e+00 1.1575837e+00 1.0862780e+00 8.3066239e-01 9.1104336e-01 8.1240384e-01 6.4031242e-01 8.3066239e-01 1.0049876e+00 9.4339811e-01 4.6904158e-01 4.8989795e-01 1.1401754e+00 1.4491377e+00 1.7029386e+00 5.5677644e-01 7.0000000e-01 1.2569805e+00 5.5677644e-01 1.4142136e-01 8.6602540e-01 8.6023253e-01 6.2449980e-01 3.1622777e-01 9.5916630e-01 1.2609520e+00 4.2426407e-01 1.1575837e+00 3.6055513e-01 1.2083046e+00 7.2111026e-01 4.3794977e+00 3.9230090e+00 4.4977772e+00 3.0886890e+00 4.0435133e+00 3.5383612e+00 4.0767634e+00 2.1794495e+00 4.0360872e+00 2.8930952e+00 2.4939928e+00 3.4336569e+00 3.2326460e+00 3.9012818e+00 2.7367864e+00 3.9711459e+00 3.5707142e+00 3.1511903e+00 3.8768544e+00 2.9427878e+00 4.0570926e+00 3.2969683e+00 4.2083251e+00 3.8457769e+00 3.6905284e+00 3.9102430e+00 4.3324358e+00 4.5287967e+00 3.7229021e+00 2.6134269e+00 2.8337255e+00 2.7184554e+00 3.0413813e+00 4.2720019e+00 3.5085610e+00 3.7920970e+00 4.2320208e+00 3.7656341e+00 3.1543621e+00 3.0561414e+00 3.3615473e+00 3.8183766e+00 3.1320920e+00 2.2293497e+00 3.2449961e+00 3.2465366e+00 3.2771939e+00 3.5860842e+00 2.0049938e+00 3.1937439e+00 5.4972721e+00 4.3104524e+00 5.5821143e+00 4.8795492e+00 5.2706736e+00 6.3953108e+00 3.5028560e+00 5.9143892e+00 5.2316345e+00 5.9757845e+00 4.6292548e+00 4.7053161e+00 5.1176166e+00 4.2485292e+00 4.5276926e+00 4.8692915e+00 4.8774994e+00 6.6174013e+00 6.7557383e+00 4.2071368e+00 5.4074023e+00 4.1158231e+00 6.4984614e+00 4.2965102e+00 5.2488094e+00 5.6258333e+00 4.1677332e+00 4.2083251e+00 5.0259327e+00 5.4009258e+00 5.8300943e+00 6.4265076e+00 5.0645829e+00 4.3588989e+00 4.6968074e+00 6.1155539e+00 5.1322510e+00 4.8383882e+00 4.0853396e+00 5.0892043e+00 5.2735187e+00 4.9386233e+00 4.3104524e+00 5.5235858e+00 5.4064776e+00 4.9142650e+00 4.4294469e+00 4.7010637e+00 4.8887626e+00 4.3023250e+00 7.8740079e-01 3.4641016e-01 1.7320508e-01 7.2801099e-01 1.3114877e+00 1.5556349e+00 1.0099505e+00 5.0000000e-01 1.1000000e+00 7.5498344e-01 6.2449980e-01 7.0000000e-01 7.7459667e-01 5.2915026e-01 5.1961524e-01 2.0000000e-01 4.4721360e-01 5.0990195e-01 4.4721360e-01 2.6457513e-01 1.7320508e-01 6.5574385e-01 1.0440307e+00 1.2609520e+00 0.0000000e+00 3.4641016e-01 7.5498344e-01 0.0000000e+00 5.5677644e-01 3.7416574e-01 5.0000000e-01 9.3808315e-01 5.5677644e-01 6.5574385e-01 8.8317609e-01 2.6457513e-01 7.4161985e-01 3.4641016e-01 7.2801099e-01 2.6457513e-01 4.0435133e+00 3.6359318e+00 4.1856899e+00 2.9478806e+00 3.7709415e+00 3.3421550e+00 3.8065733e+00 2.1307276e+00 3.7389838e+00 2.7748874e+00 2.4556058e+00 3.2031235e+00 3.0133038e+00 3.6619667e+00 2.5258662e+00 3.6523965e+00 3.3852622e+00 2.9223278e+00 3.6687873e+00 2.7586228e+00 3.8457769e+00 3.0364453e+00 3.9799497e+00 3.6027767e+00 3.4014703e+00 3.6055513e+00 4.0348482e+00 4.2497059e+00 3.4942810e+00 2.3874673e+00 2.6720778e+00 2.5495098e+00 2.8178006e+00 4.0718546e+00 3.3496268e+00 3.5425979e+00 3.9293765e+00 3.5284558e+00 2.9495762e+00 2.9000000e+00 3.1984371e+00 3.5707142e+00 2.9189039e+00 2.1679483e+00 3.0626786e+00 3.0248967e+00 3.0675723e+00 3.3181320e+00 1.9104973e+00 2.9883106e+00 5.2924474e+00 4.1436699e+00 5.3113087e+00 4.6583259e+00 5.0467812e+00 6.1081912e+00 3.4525353e+00 5.6329388e+00 4.9979996e+00 5.6973678e+00 4.3749286e+00 4.4821870e+00 4.8600412e+00 4.1060930e+00 4.3760713e+00 4.6411206e+00 4.6324939e+00 6.3071388e+00 6.4876806e+00 4.0286474e+00 5.1468437e+00 3.9686270e+00 6.2112801e+00 4.0706265e+00 4.9919936e+00 5.3329167e+00 3.9446166e+00 3.9874804e+00 4.8114447e+00 5.1029403e+00 5.5443665e+00 6.0917978e+00 4.8538644e+00 4.1194660e+00 4.4933284e+00 5.8180753e+00 4.9142650e+00 4.5978256e+00 3.8729833e+00 4.8176758e+00 5.0338852e+00 4.6690470e+00 4.1436699e+00 5.2744668e+00 5.1652686e+00 4.6669048e+00 4.2201896e+00 4.4575778e+00 4.6722586e+00 4.1060930e+00 6.7823300e-01 9.3273791e-01 1.3674794e+00 5.8309519e-01 7.8740079e-01 3.4641016e-01 3.8729833e-01 3.8729833e-01 3.3166248e-01 3.6055513e-01 3.6055513e-01 9.4868330e-01 6.1644140e-01 7.8102497e-01 8.1240384e-01 5.4772256e-01 2.8284271e-01 3.7416574e-01 8.6602540e-01 8.5440037e-01 3.6055513e-01 4.5825757e-01 5.1961524e-01 7.8740079e-01 7.0710678e-01 3.0000000e-01 7.8740079e-01 1.2369317e+00 4.2426407e-01 5.0000000e-01 1.6792856e+00 1.1357817e+00 6.0827625e-01 5.4772256e-01 9.3273791e-01 3.3166248e-01 9.4868330e-01 1.0000000e-01 5.7445626e-01 3.8065733e+00 3.4554305e+00 3.9824616e+00 3.0708305e+00 3.6496575e+00 3.3331667e+00 3.6290495e+00 2.4124676e+00 3.5916570e+00 2.8705400e+00 2.7730849e+00 3.1176915e+00 3.0822070e+00 3.5791060e+00 2.5099801e+00 3.4496377e+00 3.3496268e+00 2.9257478e+00 3.6851052e+00 2.8372522e+00 3.7349699e+00 2.9597297e+00 3.9370039e+00 3.5411862e+00 3.2695565e+00 3.4322005e+00 3.8858718e+00 4.0841156e+00 3.4190642e+00 2.4372115e+00 2.7928480e+00 2.6795522e+00 2.8142495e+00 4.0348482e+00 3.3436507e+00 3.3778692e+00 3.7389838e+00 3.5199432e+00 2.9154759e+00 2.9849623e+00 3.2603681e+00 3.4684290e+00 2.9359837e+00 2.4494897e+00 3.0886890e+00 2.9782545e+00 3.0380915e+00 3.2140317e+00 2.1424285e+00 2.9782545e+00 5.1487863e+00 4.1243181e+00 5.1332251e+00 4.5628938e+00 4.9183331e+00 5.9118525e+00 3.5972211e+00 5.4635154e+00 4.9173163e+00 5.4497706e+00 4.2023803e+00 4.3965896e+00 4.6968074e+00 4.1255303e+00 4.3324358e+00 4.4833024e+00 4.5011110e+00 6.0282667e+00 6.3300869e+00 4.0681691e+00 4.9547957e+00 3.9560081e+00 6.0315835e+00 3.9912404e+00 4.8062459e+00 5.1283526e+00 3.8600518e+00 3.8858718e+00 4.7148701e+00 4.9173163e+00 5.3721504e+00 5.7887823e+00 4.7560488e+00 4.0336088e+00 4.4665423e+00 5.5991071e+00 4.7486840e+00 4.4631827e+00 3.7815341e+00 4.6292548e+00 4.8682646e+00 4.4698993e+00 4.1243181e+00 5.0970580e+00 4.9779514e+00 4.5033321e+00 4.1701319e+00 4.3162484e+00 4.5110974e+00 4.0323690e+00 4.5825757e-01 8.1853528e-01 1.2328828e+00 1.3638182e+00 8.6023253e-01 3.8729833e-01 9.9498744e-01 5.1961524e-01 6.0827625e-01 4.7958315e-01 6.6332496e-01 4.4721360e-01 3.0000000e-01 4.4721360e-01 2.8284271e-01 4.2426407e-01 4.4721360e-01 2.2360680e-01 3.0000000e-01 6.4031242e-01 8.1853528e-01 1.0816654e+00 3.4641016e-01 4.8989795e-01 7.6811457e-01 3.4641016e-01 6.4031242e-01 3.1622777e-01 3.8729833e-01 1.1832160e+00 5.3851648e-01 4.5825757e-01 6.1644140e-01 4.5825757e-01 5.0000000e-01 3.4641016e-01 5.9160798e-01 3.0000000e-01 3.9912404e+00 3.5637059e+00 4.1327957e+00 2.9444864e+00 3.7336309e+00 3.2848135e+00 3.7188708e+00 2.1307276e+00 3.7013511e+00 2.7166155e+00 2.5000000e+00 3.1336879e+00 3.0463092e+00 3.6041643e+00 2.4698178e+00 3.6027767e+00 3.3015148e+00 2.8948230e+00 3.6742346e+00 2.7477263e+00 3.7483330e+00 3.0033315e+00 3.9547440e+00 3.5580894e+00 3.3630343e+00 3.5608988e+00 4.0049969e+00 4.1928511e+00 3.4336569e+00 2.3874673e+00 2.6720778e+00 2.5573424e+00 2.7892651e+00 4.0174619e+00 3.2588341e+00 3.4365681e+00 3.8729833e+00 3.5369478e+00 2.8740216e+00 2.8757608e+00 3.1575307e+00 3.5057096e+00 2.8982753e+00 2.1863211e+00 3.0166206e+00 2.9546573e+00 3.0049958e+00 3.2726136e+00 1.9157244e+00 2.9376862e+00 5.1874849e+00 4.0779897e+00 5.2488094e+00 4.5891176e+00 4.9689033e+00 6.0506198e+00 3.3882149e+00 5.5812185e+00 4.9618545e+00 5.5982140e+00 4.2918527e+00 4.4305756e+00 4.7937459e+00 4.0521599e+00 4.2953463e+00 4.5497253e+00 4.5628938e+00 6.2112801e+00 6.4459289e+00 4.0162171e+00 5.0665570e+00 3.8897301e+00 6.1660360e+00 4.0236799e+00 4.9030603e+00 5.2649786e+00 3.8884444e+00 3.9115214e+00 4.7465777e+00 5.0517324e+00 5.5009090e+00 6.0041652e+00 4.7874837e+00 4.0681691e+00 4.4463468e+00 5.7645468e+00 4.8052055e+00 4.5188494e+00 3.7947332e+00 4.7486840e+00 4.9537864e+00 4.6000000e+00 4.0779897e+00 5.1903757e+00 5.0714889e+00 4.5978256e+00 4.1844952e+00 4.3874822e+00 4.5617979e+00 4.0224371e+00 5.8309519e-01 1.4317821e+00 1.6941074e+00 1.1269428e+00 6.1644140e-01 1.2569805e+00 8.8317609e-01 7.8740079e-01 8.2462113e-01 7.5498344e-01 6.5574385e-01 6.4807407e-01 3.0000000e-01 5.7445626e-01 6.5574385e-01 5.7445626e-01 3.1622777e-01 2.4494897e-01 7.8740079e-01 1.1747340e+00 1.3928388e+00 1.7320508e-01 3.6055513e-01 8.7177979e-01 1.7320508e-01 4.2426407e-01 5.1961524e-01 5.8309519e-01 7.9372539e-01 4.6904158e-01 7.6157731e-01 1.0344080e+00 2.0000000e-01 8.8317609e-01 3.0000000e-01 8.7177979e-01 3.7416574e-01 4.1785165e+00 3.7643060e+00 4.3162484e+00 3.0298515e+00 3.8897301e+00 3.4496377e+00 3.9344631e+00 2.1886069e+00 3.8639358e+00 2.8618176e+00 2.5019992e+00 3.3181320e+00 3.1064449e+00 3.7788887e+00 2.6324893e+00 3.7828561e+00 3.4942810e+00 3.0315013e+00 3.7643060e+00 2.8530685e+00 3.9623226e+00 3.1511903e+00 4.0877867e+00 3.7188708e+00 3.5242020e+00 3.7322915e+00 4.1581246e+00 4.3737855e+00 3.6083237e+00 2.4879711e+00 2.7586228e+00 2.6362853e+00 2.9240383e+00 4.1797129e+00 3.4539832e+00 3.6687873e+00 4.0583248e+00 3.6304270e+00 3.0610456e+00 2.9899833e+00 3.2954514e+00 3.6905284e+00 3.0215890e+00 2.2248595e+00 3.1638584e+00 3.1400637e+00 3.1780497e+00 3.4380227e+00 1.9748418e+00 3.0951575e+00 5.4092513e+00 4.2449971e+00 5.4350713e+00 4.7738873e+00 5.1633323e+00 6.2353829e+00 3.5256205e+00 5.7584720e+00 5.1097945e+00 5.8283788e+00 4.4977772e+00 4.5934736e+00 4.9809638e+00 4.1988094e+00 4.4743715e+00 4.7592016e+00 4.7528939e+00 6.4459289e+00 6.6075714e+00 4.1231056e+00 5.2706736e+00 4.0669399e+00 6.3364028e+00 4.1809090e+00 5.1176166e+00 5.4635154e+00 4.0558600e+00 4.1024383e+00 4.9234135e+00 5.2316345e+00 5.6683331e+00 6.2337790e+00 4.9648766e+00 4.2355637e+00 4.6021734e+00 5.9447456e+00 5.0338852e+00 4.7191101e+00 3.9862263e+00 4.9416596e+00 5.1526692e+00 4.7906158e+00 4.2449971e+00 5.3972215e+00 5.2867760e+00 4.7843495e+00 4.3243497e+00 4.5760245e+00 4.7916594e+00 4.2178193e+00 1.8083141e+00 2.0420578e+00 1.4662878e+00 1.0099505e+00 1.7320508e+00 1.2165525e+00 1.3190906e+00 1.1747340e+00 6.8556546e-01 1.1180340e+00 1.0295630e+00 8.6602540e-01 9.9498744e-01 1.1090537e+00 1.0344080e+00 6.7823300e-01 7.2111026e-01 1.2727922e+00 1.4764823e+00 1.7262677e+00 7.2801099e-01 7.4161985e-01 1.3190906e+00 7.2801099e-01 2.4494897e-01 9.8488578e-01 9.0553851e-01 7.8102497e-01 3.1622777e-01 1.1135529e+00 1.4177447e+00 6.1644140e-01 1.2409674e+00 4.7958315e-01 1.2884099e+00 8.2462113e-01 4.6882833e+00 4.2391037e+00 4.8135226e+00 3.4322005e+00 4.3692105e+00 3.8729833e+00 4.3931765e+00 2.5238859e+00 4.3577517e+00 3.2295511e+00 2.8390139e+00 3.7589892e+00 3.5707142e+00 4.2308392e+00 3.0643107e+00 4.2836900e+00 3.9000000e+00 3.4856850e+00 4.2154478e+00 3.2832910e+00 4.3794977e+00 3.6235342e+00 4.5442271e+00 4.1773197e+00 4.0124805e+00 4.2272923e+00 4.6551047e+00 4.8507731e+00 4.0521599e+00 2.9478806e+00 3.1764760e+00 3.0610456e+00 3.3749074e+00 4.6076024e+00 3.8379682e+00 4.1060930e+00 4.5486262e+00 4.1012193e+00 3.4828150e+00 3.3970576e+00 3.7013511e+00 4.1448764e+00 3.4684290e+00 2.5748786e+00 3.5818989e+00 3.5749126e+00 3.6083237e+00 3.9115214e+00 2.3452079e+00 3.5270384e+00 5.8189346e+00 4.6454279e+00 5.9059292e+00 5.2105662e+00 5.5982140e+00 6.7186308e+00 3.8379682e+00 6.2401923e+00 5.5668663e+00 6.2872888e+00 4.9487372e+00 5.0378567e+00 5.4415071e+00 4.5858478e+00 4.8559242e+00 5.1894123e+00 5.2048055e+00 6.9260378e+00 7.0851958e+00 4.5497253e+00 5.7271284e+00 4.4474712e+00 6.8242216e+00 4.6281746e+00 5.5686623e+00 5.9455866e+00 4.4977772e+00 4.5354162e+00 5.3572381e+00 5.7227616e+00 6.1554854e+00 6.7305275e+00 5.3953684e+00 4.6904158e+00 5.0338852e+00 6.4342832e+00 5.4497706e+00 5.1643005e+00 4.4124823e+00 5.4092513e+00 5.5955339e+00 5.2545219e+00 4.6454279e+00 5.8455111e+00 5.7245087e+00 5.2354560e+00 4.7644517e+00 5.0259327e+00 5.2057660e+00 4.6314145e+00 5.4772256e-01 4.6904158e-01 8.8881944e-01 5.5677644e-01 7.9372539e-01 8.7749644e-01 8.4261498e-01 1.2806248e+00 1.1489125e+00 1.3601471e+00 1.3416408e+00 1.0954451e+00 8.3666003e-01 8.7177979e-01 1.4177447e+00 1.4035669e+00 8.0622577e-01 6.8556546e-01 4.1231056e-01 1.3114877e+00 1.1313708e+00 5.9160798e-01 1.3114877e+00 1.7233688e+00 9.6953597e-01 9.5393920e-01 2.1447611e+00 1.6155494e+00 1.1000000e+00 1.0295630e+00 1.4317821e+00 8.3066239e-01 1.4560220e+00 6.5574385e-01 1.0816654e+00 3.9711459e+00 3.6851052e+00 4.1713307e+00 3.4684290e+00 3.8961519e+00 3.6810325e+00 3.8665230e+00 2.9017236e+00 3.8236109e+00 3.2832910e+00 3.2511536e+00 3.4205263e+00 3.4292856e+00 3.8716921e+00 2.8670542e+00 3.6469165e+00 3.6905284e+00 3.2771939e+00 3.9974992e+00 3.2233523e+00 4.0211939e+00 3.2526912e+00 4.2284749e+00 3.8444766e+00 3.5199432e+00 3.6496575e+00 4.1036569e+00 4.3011626e+00 3.7188708e+00 2.8106939e+00 3.1968735e+00 3.0886890e+00 3.1591138e+00 4.3474130e+00 3.7067506e+00 3.6400549e+00 3.9446166e+00 3.8196859e+00 3.2649655e+00 3.3749074e+00 3.6455452e+00 3.7536649e+00 3.2863353e+00 2.9291637e+00 3.4554305e+00 3.3181320e+00 3.3808283e+00 3.4914181e+00 2.6057628e+00 3.3271610e+00 5.3916602e+00 4.4485953e+00 5.3282267e+00 4.8352870e+00 5.1623638e+00 6.0835845e+00 4.0249224e+00 5.6595053e+00 5.1749396e+00 5.6053546e+00 4.4249294e+00 4.6636895e+00 4.9091751e+00 4.4654227e+00 4.6357308e+00 4.7138095e+00 4.7476310e+00 6.1562976e+00 6.5169011e+00 4.4056782e+00 5.1487863e+00 4.2906876e+00 6.2080593e+00 4.2649736e+00 5.0159745e+00 5.3103672e+00 4.1376322e+00 4.1641326e+00 4.9769469e+00 5.1068581e+00 5.5587768e+00 5.8932164e+00 5.0159745e+00 4.3116122e+00 4.7801674e+00 5.7471732e+00 4.9809638e+00 4.7138095e+00 4.0693980e+00 4.8238988e+00 5.0813384e+00 4.6518813e+00 4.4485953e+00 5.3047149e+00 5.1807335e+00 4.7138095e+00 4.4530888e+00 4.5530210e+00 4.7507894e+00 4.3335897e+00 6.1644140e-01 1.0908712e+00 6.4031242e-01 8.5440037e-01 1.0816654e+00 9.2195445e-01 1.4628739e+00 1.2727922e+00 1.4177447e+00 1.5811388e+00 1.2247449e+00 1.0488088e+00 1.1401754e+00 1.5779734e+00 1.5968719e+00 1.0440307e+00 6.5574385e-01 3.6055513e-01 1.5556349e+00 1.4352700e+00 9.6436508e-01 1.5556349e+00 1.9313208e+00 1.1832160e+00 1.1618950e+00 2.4289916e+00 1.7916473e+00 1.1618950e+00 9.3808315e-01 1.6703293e+00 8.7749644e-01 1.6431677e+00 8.3066239e-01 1.3228757e+00 3.7907783e+00 3.4842503e+00 3.9874804e+00 3.3926391e+00 3.7443290e+00 3.5171011e+00 3.6400549e+00 2.8705400e+00 3.6715120e+00 3.1464265e+00 3.2572995e+00 3.2403703e+00 3.3970576e+00 3.6945906e+00 2.7349589e+00 3.4785054e+00 3.4899857e+00 3.1654384e+00 3.9115214e+00 3.1416556e+00 3.7854986e+00 3.1272992e+00 4.0914545e+00 3.6878178e+00 3.3749074e+00 3.4899857e+00 3.9572718e+00 4.1109610e+00 3.5425979e+00 2.7568098e+00 3.1336879e+00 3.0397368e+00 3.0495901e+00 4.1689327e+00 3.5014283e+00 3.3955854e+00 3.7603191e+00 3.7403208e+00 3.0886890e+00 3.2726136e+00 3.5114100e+00 3.5679126e+00 3.1843367e+00 2.9154759e+00 3.3166248e+00 3.1448370e+00 3.2171416e+00 3.3391616e+00 2.5903668e+00 3.1827661e+00 5.1215232e+00 4.2555846e+00 5.1156622e+00 4.6238512e+00 4.9325450e+00 5.8711157e+00 3.8652296e+00 5.4598535e+00 5.0059964e+00 5.3347915e+00 4.1952354e+00 4.4799554e+00 4.6968074e+00 4.2918527e+00 4.4192760e+00 4.4698993e+00 4.5343136e+00 5.8855756e+00 6.3253458e+00 4.2883563e+00 4.9122296e+00 4.0853396e+00 6.0133186e+00 4.0951190e+00 4.7686476e+00 5.0892043e+00 3.9572718e+00 3.9547440e+00 4.7696960e+00 4.9132474e+00 5.3721504e+00 5.6364883e+00 4.8062459e+00 4.1340053e+00 4.6054316e+00 5.5434646e+00 4.7085029e+00 4.4877611e+00 3.8600518e+00 4.6076024e+00 4.8476799e+00 4.4384682e+00 4.2555846e+00 5.0616203e+00 4.9254441e+00 4.5011110e+00 4.2976738e+00 4.3416587e+00 4.4799554e+00 4.1133928e+00 5.1961524e-01 5.1961524e-01 3.8729833e-01 6.7082039e-01 4.1231056e-01 9.2736185e-01 7.8740079e-01 1.0049876e+00 1.0488088e+00 7.0710678e-01 5.2915026e-01 5.8309519e-01 1.0535654e+00 1.0630146e+00 5.3851648e-01 4.5825757e-01 3.8729833e-01 1.0099505e+00 8.3666003e-01 4.5825757e-01 1.0099505e+00 1.3601471e+00 6.4807407e-01 5.7445626e-01 1.8384776e+00 1.2369317e+00 6.7082039e-01 6.7823300e-01 1.0908712e+00 4.7958315e-01 1.0862780e+00 3.6055513e-01 7.5498344e-01 3.9509493e+00 3.5972211e+00 4.1303753e+00 3.2664966e+00 3.8105118e+00 3.5142567e+00 3.7643060e+00 2.6191602e+00 3.7603191e+00 3.0397368e+00 2.9949958e+00 3.2680269e+00 3.3015148e+00 3.7483330e+00 2.6720778e+00 3.5972211e+00 3.5071356e+00 3.1304952e+00 3.8704005e+00 3.0413813e+00 3.8665230e+00 3.1304952e+00 4.1158231e+00 3.7282704e+00 3.4365681e+00 3.5860842e+00 4.0521599e+00 4.2284749e+00 3.5791060e+00 2.6419690e+00 3.0000000e+00 2.8948230e+00 3.0000000e+00 4.2047592e+00 3.5014283e+00 3.5057096e+00 3.8858718e+00 3.7134889e+00 3.0822070e+00 3.1733263e+00 3.4568772e+00 3.6318040e+00 3.1272992e+00 2.6608269e+00 3.2710854e+00 3.1543621e+00 3.2109189e+00 3.3837849e+00 2.3302360e+00 3.1543621e+00 5.2602281e+00 4.2766810e+00 5.2678269e+00 4.7180504e+00 5.0507425e+00 6.0522723e+00 3.7603191e+00 5.6187187e+00 5.0852729e+00 5.5479726e+00 4.3243497e+00 4.5486262e+00 4.8270074e+00 4.2778499e+00 4.4508426e+00 4.5934736e+00 4.6497312e+00 6.1400326e+00 6.4768820e+00 4.2602817e+00 5.0705029e+00 4.0951190e+00 6.1822326e+00 4.1436699e+00 4.9295030e+00 5.2706736e+00 4.0074930e+00 4.0274061e+00 4.8569538e+00 5.0734604e+00 5.5226805e+00 5.9016947e+00 4.8928519e+00 4.2035699e+00 4.6551047e+00 5.7227616e+00 4.8528342e+00 4.6086874e+00 3.9217343e+00 4.7528939e+00 4.9819675e+00 4.5760245e+00 4.2766810e+00 5.2172790e+00 5.0813384e+00 4.6173586e+00 4.3255058e+00 4.4485953e+00 4.6162756e+00 4.1785165e+00 7.3484692e-01 3.1622777e-01 4.4721360e-01 2.4494897e-01 6.5574385e-01 4.1231056e-01 6.0000000e-01 5.5677644e-01 2.6457513e-01 1.7320508e-01 1.7320508e-01 5.4772256e-01 5.4772256e-01 3.4641016e-01 6.4807407e-01 8.1240384e-01 5.0000000e-01 3.8729833e-01 4.2426407e-01 5.0000000e-01 8.7177979e-01 1.7320508e-01 1.4142136e-01 1.3453624e+00 7.7459667e-01 3.7416574e-01 5.9160798e-01 5.8309519e-01 3.7416574e-01 5.9160798e-01 3.1622777e-01 2.4494897e-01 3.9749214e+00 3.5818989e+00 4.1340053e+00 3.0594117e+00 3.7589892e+00 3.3852622e+00 3.7496667e+00 2.3130067e+00 3.7215588e+00 2.8478062e+00 2.6758176e+00 3.1890437e+00 3.1224990e+00 3.6687873e+00 2.5396850e+00 3.5958309e+00 3.3985291e+00 2.9849623e+00 3.7349699e+00 2.8530685e+00 3.8131352e+00 3.0413813e+00 4.0162171e+00 3.6318040e+00 3.3852622e+00 3.5651087e+00 4.0187063e+00 4.2107007e+00 3.4957117e+00 2.4637370e+00 2.7874720e+00 2.6739484e+00 2.8618176e+00 4.1024383e+00 3.3749074e+00 3.4813790e+00 3.8794329e+00 3.5888717e+00 2.9647934e+00 2.9866369e+00 3.2832910e+00 3.5637059e+00 2.9782545e+00 2.3558438e+00 3.1192948e+00 3.0430248e+00 3.0919250e+00 3.3136083e+00 2.0493902e+00 3.0232433e+00 5.2421370e+00 4.1689327e+00 5.2668776e+00 4.6572524e+00 5.0179677e+00 6.0646517e+00 3.5510562e+00 5.6089215e+00 5.0169712e+00 5.5991071e+00 4.3162484e+00 4.4833024e+00 4.8155997e+00 4.1484937e+00 4.3680659e+00 4.5814845e+00 4.6119410e+00 6.2088646e+00 6.4668385e+00 4.1109610e+00 5.0813384e+00 3.9849718e+00 6.1830413e+00 4.0718546e+00 4.9325450e+00 5.2829916e+00 3.9382737e+00 3.9686270e+00 4.8020829e+00 5.0705029e+00 5.5163394e+00 5.9849812e+00 4.8404545e+00 4.1303753e+00 4.5453273e+00 5.7532599e+00 4.8476799e+00 4.5727453e+00 3.8561639e+00 4.7581509e+00 4.9769469e+00 4.5923850e+00 4.1689327e+00 5.2182373e+00 5.0921508e+00 4.6097722e+00 4.2379240e+00 4.4204072e+00 4.6065171e+00 4.1024383e+00 6.3245553e-01 5.0990195e-01 6.4807407e-01 1.3228757e+00 8.0622577e-01 1.0099505e+00 1.0723805e+00 8.1853528e-01 6.2449980e-01 7.1414284e-01 1.1747340e+00 1.1489125e+00 5.4772256e-01 6.4807407e-01 5.4772256e-01 1.1000000e+00 1.0535654e+00 5.4772256e-01 1.1000000e+00 1.5811388e+00 7.5498344e-01 8.6023253e-01 1.9621417e+00 1.4899664e+00 8.2462113e-01 6.4031242e-01 1.2409674e+00 6.1644140e-01 1.2922848e+00 4.6904158e-01 9.1651514e-01 3.5014283e+00 3.1827661e+00 3.6891733e+00 2.9291637e+00 3.3896903e+00 3.1368774e+00 3.3615473e+00 2.3769729e+00 3.3211444e+00 2.7404379e+00 2.7313001e+00 2.8930952e+00 2.9034462e+00 3.3436507e+00 2.3302360e+00 3.1606961e+00 3.1511903e+00 2.7331301e+00 3.4770677e+00 2.6795522e+00 3.5014283e+00 2.7294688e+00 3.7054015e+00 3.3120990e+00 3.0099834e+00 3.1543621e+00 3.6097091e+00 3.8065733e+00 3.1906112e+00 2.2737634e+00 2.6551836e+00 2.5475478e+00 2.6210685e+00 3.8144462e+00 3.1638584e+00 3.1272992e+00 3.4539832e+00 3.3015148e+00 2.7221315e+00 2.8319605e+00 3.0951575e+00 3.2280025e+00 2.7477263e+00 2.4062419e+00 2.9103264e+00 2.7748874e+00 2.8390139e+00 2.9698485e+00 2.0928450e+00 2.7856777e+00 4.8928519e+00 3.9166312e+00 4.8456166e+00 4.3162484e+00 4.6583259e+00 5.6124861e+00 3.4828150e+00 5.1749396e+00 4.6636895e+00 5.1468437e+00 3.9306488e+00 4.1496988e+00 4.4192760e+00 3.9331921e+00 4.1206796e+00 4.2201896e+00 4.2391037e+00 5.7105166e+00 6.0398675e+00 3.8704005e+00 4.6690470e+00 3.7603191e+00 5.7349804e+00 3.7496667e+00 4.5265881e+00 4.8321838e+00 3.6207734e+00 3.6455452e+00 4.4654227e+00 4.6249324e+00 5.0803543e+00 5.4607692e+00 4.5066617e+00 3.7894591e+00 4.2449971e+00 5.2915026e+00 4.4877611e+00 4.2035699e+00 3.5482390e+00 4.3428102e+00 4.5945620e+00 4.1821047e+00 3.9166312e+00 4.8176758e+00 4.7000000e+00 4.2296572e+00 3.9370039e+00 4.0521599e+00 4.2544095e+00 3.8065733e+00 5.4772256e-01 1.4142136e-01 7.4161985e-01 5.7445626e-01 6.4807407e-01 8.1853528e-01 4.3588989e-01 3.3166248e-01 4.3588989e-01 7.3484692e-01 7.7459667e-01 5.0990195e-01 3.7416574e-01 5.8309519e-01 7.5498344e-01 6.8556546e-01 5.4772256e-01 7.5498344e-01 1.0862780e+00 4.1231056e-01 3.7416574e-01 1.6278821e+00 9.4868330e-01 4.4721360e-01 4.1231056e-01 8.6023253e-01 1.4142136e-01 7.9372539e-01 2.4494897e-01 5.2915026e-01 3.9268308e+00 3.5341194e+00 4.0902323e+00 3.1080541e+00 3.7429935e+00 3.3704599e+00 3.6905284e+00 2.3937418e+00 3.6972963e+00 2.8618176e+00 2.7820855e+00 3.1638584e+00 3.1796226e+00 3.6414283e+00 2.5436195e+00 3.5594943e+00 3.3660065e+00 2.9916551e+00 3.7696154e+00 2.8879058e+00 3.7603191e+00 3.0413813e+00 4.0162171e+00 3.6124784e+00 3.3674916e+00 3.5369478e+00 3.9987498e+00 4.1725292e+00 3.4727511e+00 2.5079872e+00 2.8372522e+00 2.7294688e+00 2.8757608e+00 4.0828911e+00 3.3421550e+00 3.4146742e+00 3.8379682e+00 3.6193922e+00 2.9410882e+00 3.0166206e+00 3.2893768e+00 3.5298725e+00 2.9983329e+00 2.4474477e+00 3.1224990e+00 3.0166206e+00 3.0757113e+00 3.2954514e+00 2.1400935e+00 3.0199338e+00 5.1749396e+00 4.1496988e+00 5.2191953e+00 4.6162756e+00 4.9699095e+00 6.0116553e+00 3.5623026e+00 5.5623736e+00 4.9989999e+00 5.5181519e+00 4.2626283e+00 4.4609416e+00 4.7717921e+00 4.1460825e+00 4.3428102e+00 4.5265881e+00 4.5661800e+00 6.1163715e+00 6.4311741e+00 4.1303753e+00 5.0239427e+00 3.9623226e+00 6.1392182e+00 4.0570926e+00 4.8672374e+00 5.2220686e+00 3.9179076e+00 3.9306488e+00 4.7686476e+00 5.0229473e+00 5.4781384e+00 5.8940648e+00 4.8072861e+00 4.1036569e+00 4.5232732e+00 5.7061370e+00 4.7770284e+00 4.5199558e+00 3.8196859e+00 4.7095647e+00 4.9264592e+00 4.5486262e+00 4.1496988e+00 5.1584882e+00 5.0289164e+00 4.5705580e+00 4.2355637e+00 4.3794977e+00 4.5365185e+00 4.0607881e+00 5.0990195e-01 1.0816654e+00 4.3588989e-01 6.3245553e-01 5.7445626e-01 4.5825757e-01 3.0000000e-01 3.6055513e-01 7.3484692e-01 6.7823300e-01 2.8284271e-01 7.6157731e-01 8.6023253e-01 6.2449980e-01 6.7082039e-01 4.2426407e-01 6.2449980e-01 1.1489125e+00 3.6055513e-01 5.8309519e-01 1.4798649e+00 1.0954451e+00 5.8309519e-01 5.7445626e-01 7.8740079e-01 5.0990195e-01 8.7749644e-01 3.7416574e-01 5.0990195e-01 3.6110940e+00 3.2511536e+00 3.7775654e+00 2.7784888e+00 3.4161382e+00 3.0822070e+00 3.4322005e+00 2.1095023e+00 3.3630343e+00 2.6095977e+00 2.4494897e+00 2.8896367e+00 2.7802878e+00 3.3436507e+00 2.2605309e+00 3.2419130e+00 3.1192948e+00 2.6551836e+00 3.4073450e+00 2.5495098e+00 3.5298725e+00 2.7110883e+00 3.6810325e+00 3.2939338e+00 3.0364453e+00 3.2140317e+00 3.6565011e+00 3.8716921e+00 3.1843367e+00 2.1470911e+00 2.4959968e+00 2.3769729e+00 2.5475478e+00 3.7907783e+00 3.1128765e+00 3.1874755e+00 3.5312887e+00 3.2434549e+00 2.6776856e+00 2.7055499e+00 2.9899833e+00 3.2403703e+00 2.6627054e+00 2.1377558e+00 2.8266588e+00 2.7386128e+00 2.7928480e+00 2.9765752e+00 1.8439089e+00 2.7239677e+00 4.9598387e+00 3.8858718e+00 4.9295030e+00 4.3393548e+00 4.7095647e+00 5.7113921e+00 3.3391616e+00 5.2516664e+00 4.6765372e+00 5.2848841e+00 4.0062451e+00 4.1641326e+00 4.4911023e+00 3.8768544e+00 4.1133928e+00 4.2906876e+00 4.2860238e+00 5.8694122e+00 6.1139185e+00 3.7920970e+00 4.7644517e+00 3.7255872e+00 5.8215118e+00 3.7549967e+00 4.6162756e+00 4.9325450e+00 3.6290495e+00 3.6674242e+00 4.4922155e+00 4.7085029e+00 5.1584882e+00 5.6338264e+00 4.5354162e+00 3.7973675e+00 4.2166337e+00 5.4055527e+00 4.5672749e+00 4.2532341e+00 3.5623026e+00 4.4317040e+00 4.6722586e+00 4.2790186e+00 3.8858718e+00 4.9040799e+00 4.7947888e+00 4.3023250e+00 3.9242834e+00 4.1060930e+00 4.3289722e+00 3.8118237e+00 7.4161985e-01 4.5825757e-01 6.1644140e-01 7.4161985e-01 3.3166248e-01 3.0000000e-01 3.8729833e-01 6.7823300e-01 7.0710678e-01 4.2426407e-01 5.0990195e-01 6.7823300e-01 7.0000000e-01 6.2449980e-01 5.2915026e-01 7.0000000e-01 1.0295630e+00 3.6055513e-01 3.1622777e-01 1.5394804e+00 9.0553851e-01 3.1622777e-01 4.1231056e-01 7.7459667e-01 2.4494897e-01 7.4161985e-01 2.8284271e-01 4.6904158e-01 3.8858718e+00 3.4856850e+00 4.0459857e+00 3.0298515e+00 3.6864617e+00 3.3136083e+00 3.6441734e+00 2.3086793e+00 3.6482873e+00 2.7874720e+00 2.6944387e+00 3.1032241e+00 3.1096624e+00 3.5888717e+00 2.4718414e+00 3.5114100e+00 3.3090784e+00 2.9342802e+00 3.6972963e+00 2.8178006e+00 3.7067506e+00 2.9782545e+00 3.9560081e+00 3.5623026e+00 3.3136083e+00 3.4856850e+00 3.9484174e+00 4.1218928e+00 3.4146742e+00 2.4351591e+00 2.7622455e+00 2.6551836e+00 2.8089144e+00 4.0261644e+00 3.2848135e+00 3.3674916e+00 3.7907783e+00 3.5524639e+00 2.8827071e+00 2.9427878e+00 3.2280025e+00 3.4785054e+00 2.9308702e+00 2.3600847e+00 3.0577770e+00 2.9631065e+00 3.0166206e+00 3.2403703e+00 2.0445048e+00 2.9563491e+00 5.1244512e+00 4.0865633e+00 5.1710734e+00 4.5661800e+00 4.9173163e+00 5.9699246e+00 3.4885527e+00 5.5208695e+00 4.9446941e+00 5.4763126e+00 4.2107007e+00 4.4022721e+00 4.7191101e+00 4.0755368e+00 4.2731721e+00 4.4710178e+00 4.5177428e+00 6.0868711e+00 6.3827894e+00 4.0644803e+00 4.9739320e+00 3.8961519e+00 6.0967204e+00 3.9949969e+00 4.8218254e+00 5.1836281e+00 3.8561639e+00 3.8742741e+00 4.7116876e+00 4.9829710e+00 5.4323107e+00 5.8668561e+00 4.7486840e+00 4.0521599e+00 4.4743715e+00 5.6586217e+00 4.7265209e+00 4.4732538e+00 3.7616486e+00 4.6583259e+00 4.8713448e+00 4.4911023e+00 4.0865633e+00 5.1097945e+00 4.9769469e+00 4.5110974e+00 4.1689327e+00 4.3243497e+00 4.4855323e+00 4.0062451e+00 9.5916630e-01 9.4339811e-01 9.3808315e-01 7.7459667e-01 7.8740079e-01 7.4833148e-01 7.2801099e-01 8.0622577e-01 9.8488578e-01 9.3273791e-01 1.1532563e+00 7.7459667e-01 6.0000000e-01 9.5393920e-01 7.7459667e-01 7.0000000e-01 7.3484692e-01 5.1961524e-01 1.3416408e+00 5.3851648e-01 8.3066239e-01 1.0677078e+00 7.5498344e-01 8.0622577e-01 5.6568542e-01 8.6602540e-01 6.4031242e-01 4.5880279e+00 4.1641326e+00 4.7370877e+00 3.5651087e+00 4.3474130e+00 3.9127995e+00 4.3162484e+00 2.7313001e+00 4.3197222e+00 3.3196385e+00 3.1000000e+00 3.7389838e+00 3.6823905e+00 4.2272923e+00 3.0757113e+00 4.2023803e+00 3.9115214e+00 3.5355339e+00 4.2965102e+00 3.3808283e+00 4.3416587e+00 3.6193922e+00 4.5825757e+00 4.1928511e+00 3.9786933e+00 4.1665333e+00 4.6216880e+00 4.7979162e+00 4.0484565e+00 3.0166206e+00 3.3015148e+00 3.1906112e+00 3.4146742e+00 4.6411206e+00 3.8652296e+00 4.0261644e+00 4.4766059e+00 4.1653331e+00 3.4899857e+00 3.4971417e+00 3.7907783e+00 4.1243181e+00 3.5270384e+00 2.7892651e+00 3.6414283e+00 3.5791060e+00 3.6262929e+00 3.8923001e+00 2.5039968e+00 3.5594943e+00 5.7680153e+00 4.6850827e+00 5.8506410e+00 5.2057660e+00 5.5686623e+00 6.6580778e+00 3.9749214e+00 6.1991935e+00 5.5874860e+00 6.1692787e+00 4.8805737e+00 5.0428167e+00 5.3907328e+00 4.6540305e+00 4.8713448e+00 5.1283526e+00 5.1749396e+00 6.7926431e+00 7.0590368e+00 4.6486557e+00 5.6524331e+00 4.4821870e+00 6.7808554e+00 4.6335731e+00 5.4954527e+00 5.8719673e+00 4.4944410e+00 4.5144213e+00 5.3525695e+00 5.6674509e+00 6.1139185e+00 6.5825527e+00 5.3888774e+00 4.6936127e+00 5.0842895e+00 6.3553127e+00 5.3786615e+00 5.1283526e+00 4.3954522e+00 5.3394756e+00 5.5371473e+00 5.1730069e+00 4.6850827e+00 5.7810034e+00 5.6462377e+00 5.1788030e+00 4.7947888e+00 4.9849774e+00 5.1351728e+00 4.6281746e+00 4.7958315e-01 4.4721360e-01 2.0000000e-01 4.2426407e-01 4.4721360e-01 5.1961524e-01 4.7958315e-01 3.8729833e-01 9.2195445e-01 1.0723805e+00 5.2915026e-01 6.0000000e-01 6.7082039e-01 5.2915026e-01 9.1104336e-01 3.7416574e-01 5.0000000e-01 1.2489996e+00 8.6602540e-01 2.6457513e-01 5.4772256e-01 5.5677644e-01 5.9160798e-01 6.6332496e-01 5.7445626e-01 4.3588989e-01 3.6646964e+00 3.2465366e+00 3.8105118e+00 2.6627054e+00 3.4088121e+00 3.0149627e+00 3.4132096e+00 1.9131126e+00 3.3852622e+00 2.4535688e+00 2.2781571e+00 2.8248894e+00 2.7495454e+00 3.3120990e+00 2.1587033e+00 3.2710854e+00 3.0298515e+00 2.6191602e+00 3.3555923e+00 2.4677925e+00 3.4568772e+00 2.6795522e+00 3.6496575e+00 3.2771939e+00 3.0413813e+00 3.2310989e+00 3.6823905e+00 3.8704005e+00 3.1320920e+00 2.0832667e+00 2.3958297e+00 2.2847319e+00 2.4859606e+00 3.7336309e+00 3.0033315e+00 3.1416556e+00 3.5496479e+00 3.2202484e+00 2.5961510e+00 2.5942244e+00 2.9034462e+00 3.2109189e+00 2.6000000e+00 1.9544820e+00 2.7386128e+00 2.6814175e+00 2.7221315e+00 2.9614186e+00 1.6401219e+00 2.6476405e+00 4.8918299e+00 3.7907783e+00 4.9284886e+00 4.3011626e+00 4.6636895e+00 5.7367238e+00 3.1559468e+00 5.2773099e+00 4.6583259e+00 5.2782573e+00 3.9724048e+00 4.1194660e+00 4.4698993e+00 3.7603191e+00 3.9887341e+00 4.2308392e+00 4.2638011e+00 5.9076222e+00 6.1261734e+00 3.7296112e+00 4.7423623e+00 3.6041643e+00 5.8532043e+00 3.7054015e+00 4.5956501e+00 4.9598387e+00 3.5721142e+00 3.6083237e+00 4.4395946e+00 4.7455242e+00 5.1826634e+00 5.6947344e+00 4.4766059e+00 3.7749172e+00 4.1844952e+00 5.4267854e+00 4.5022217e+00 4.2261093e+00 3.4928498e+00 4.4192760e+00 4.6281746e+00 4.2520583e+00 3.7907783e+00 4.8764741e+00 4.7497368e+00 4.2591079e+00 3.8639358e+00 4.0681691e+00 4.2602817e+00 3.7389838e+00 5.3851648e-01 4.1231056e-01 5.7445626e-01 6.4031242e-01 3.7416574e-01 4.2426407e-01 7.4833148e-01 9.0553851e-01 1.1747340e+00 5.1961524e-01 7.5498344e-01 9.2736185e-01 5.1961524e-01 8.2462113e-01 5.0000000e-01 6.4807407e-01 1.2922848e+00 7.4833148e-01 5.4772256e-01 5.3851648e-01 6.4807407e-01 5.8309519e-01 5.7445626e-01 7.0710678e-01 5.4772256e-01 3.7629775e+00 3.3241540e+00 3.8974351e+00 2.7055499e+00 3.4971417e+00 3.0232433e+00 3.4727511e+00 1.9000000e+00 3.4626579e+00 2.4677925e+00 2.2803509e+00 2.8896367e+00 2.8160256e+00 3.3496268e+00 2.2338308e+00 3.3749074e+00 3.0413813e+00 2.6400758e+00 3.4423829e+00 2.5019992e+00 3.4957117e+00 2.7694765e+00 3.7080992e+00 3.3000000e+00 3.1272992e+00 3.3301652e+00 3.7696154e+00 3.9534795e+00 3.1843367e+00 2.1563859e+00 2.4310492e+00 2.3173260e+00 2.5475478e+00 3.7589892e+00 2.9949958e+00 3.1874755e+00 3.6373067e+00 3.3045423e+00 2.6172505e+00 2.6305893e+00 2.8948230e+00 3.2526912e+00 2.6551836e+00 1.9621417e+00 2.7622455e+00 2.6944387e+00 2.7495454e+00 3.0298515e+00 1.7088007e+00 2.6870058e+00 4.9355851e+00 3.8236109e+00 5.0059964e+00 4.3301270e+00 4.7180504e+00 5.8051701e+00 3.1352831e+00 5.3310412e+00 4.7106263e+00 5.3600373e+00 4.0509258e+00 4.1833001e+00 4.5530210e+00 3.8039453e+00 4.0546270e+00 4.3092923e+00 4.3092923e+00 5.9674115e+00 6.2016127e+00 3.7656341e+00 4.8270074e+00 3.6386811e+00 5.9203040e+00 3.7815341e+00 4.6551047e+00 5.0169712e+00 3.6455452e+00 3.6619667e+00 4.4966654e+00 4.8052055e+00 5.2583267e+00 5.7671483e+00 4.5398238e+00 3.8131352e+00 4.1785165e+00 5.5335341e+00 4.5585085e+00 4.2626283e+00 3.5454196e+00 4.5122057e+00 4.7148701e+00 4.3760713e+00 3.8236109e+00 4.9446941e+00 4.8321838e+00 4.3669211e+00 3.9446166e+00 4.1448764e+00 4.3150898e+00 3.7643060e+00 4.4721360e-01 5.4772256e-01 4.8989795e-01 3.6055513e-01 2.2360680e-01 6.0827625e-01 1.1269428e+00 1.3152946e+00 2.0000000e-01 4.4721360e-01 7.6811457e-01 2.0000000e-01 6.7082039e-01 4.2426407e-01 5.9160798e-01 9.1651514e-01 7.0000000e-01 6.4031242e-01 8.8317609e-01 3.0000000e-01 8.0622577e-01 4.8989795e-01 7.6811457e-01 3.6055513e-01 3.8845849e+00 3.4785054e+00 4.0249224e+00 2.7766887e+00 3.6027767e+00 3.1859065e+00 3.6537652e+00 1.9748418e+00 3.5749126e+00 2.6191602e+00 2.2912878e+00 3.0430248e+00 2.8354894e+00 3.5028560e+00 2.3622024e+00 3.4899857e+00 3.2341923e+00 2.7604347e+00 3.4899857e+00 2.5903668e+00 3.6945906e+00 2.8670542e+00 3.8105118e+00 3.4438351e+00 3.2357379e+00 3.4409301e+00 3.8678159e+00 4.0865633e+00 3.3331667e+00 2.2135944e+00 2.5019992e+00 2.3790755e+00 2.6495283e+00 3.9115214e+00 3.2031235e+00 3.3955854e+00 3.7682887e+00 3.3511192e+00 2.7964263e+00 2.7331301e+00 3.0413813e+00 3.4132096e+00 2.7495454e+00 2.0049938e+00 2.9017236e+00 2.8722813e+00 2.9103264e+00 3.1543621e+00 1.7406895e+00 2.8266588e+00 5.1410116e+00 3.9837169e+00 5.1487863e+00 4.5011110e+00 4.8877398e+00 5.9472683e+00 3.3045423e+00 5.4726593e+00 4.8311489e+00 5.5443665e+00 4.2166337e+00 4.3162484e+00 4.6968074e+00 3.9420807e+00 4.2154478e+00 4.4833024e+00 4.4743715e+00 6.1595454e+00 6.3206012e+00 3.8587563e+00 4.9869831e+00 3.8118237e+00 6.0481402e+00 3.9025633e+00 4.8373546e+00 5.1768716e+00 3.7788887e+00 3.8288379e+00 4.6486557e+00 4.9436828e+00 5.3795911e+00 5.9439044e+00 4.6904158e+00 3.9585351e+00 4.3370497e+00 5.6524331e+00 4.7634021e+00 4.4429720e+00 3.7148351e+00 4.6551047e+00 4.8723711e+00 4.5033321e+00 3.9837169e+00 5.1166395e+00 5.0079936e+00 4.5011110e+00 4.0484565e+00 4.2953463e+00 4.5221676e+00 3.9522146e+00 3.1622777e-01 3.4641016e-01 4.1231056e-01 4.1231056e-01 4.1231056e-01 7.9372539e-01 9.8488578e-01 4.4721360e-01 4.8989795e-01 6.2449980e-01 4.4721360e-01 8.0622577e-01 2.4494897e-01 3.3166248e-01 1.2489996e+00 7.2801099e-01 2.2360680e-01 5.0990195e-01 5.0000000e-01 4.5825757e-01 5.2915026e-01 4.7958315e-01 3.0000000e-01 3.8275318e+00 3.4088121e+00 3.9749214e+00 2.8337255e+00 3.5805028e+00 3.1733263e+00 3.5707142e+00 2.0639767e+00 3.5524639e+00 2.6115130e+00 2.4351591e+00 2.9899833e+00 2.9257478e+00 3.4741906e+00 2.3280893e+00 3.4380227e+00 3.1843367e+00 2.7820855e+00 3.5355339e+00 2.6362853e+00 3.6124784e+00 2.8530685e+00 3.8209946e+00 3.4380227e+00 3.2109189e+00 3.4000000e+00 3.8522721e+00 4.0373258e+00 3.2969683e+00 2.2583180e+00 2.5651511e+00 2.4535688e+00 2.6570661e+00 3.8961519e+00 3.1527766e+00 3.2939338e+00 3.7148351e+00 3.3985291e+00 2.7531800e+00 2.7622455e+00 3.0610456e+00 3.3719431e+00 2.7712813e+00 2.1118712e+00 2.9017236e+00 2.8372522e+00 2.8827071e+00 3.1288976e+00 1.8083141e+00 2.8124722e+00 5.0467812e+00 3.9534795e+00 5.0941143e+00 4.4609416e+00 4.8259714e+00 5.9000000e+00 3.3045423e+00 5.4396691e+00 4.8270074e+00 5.4350713e+00 4.1352146e+00 4.2883563e+00 4.6368092e+00 3.9268308e+00 4.1533119e+00 4.3931765e+00 4.4249294e+00 6.0580525e+00 6.2952363e+00 3.9000000e+00 4.9061186e+00 3.7643060e+00 6.0183054e+00 3.8768544e+00 4.7539457e+00 5.1185936e+00 3.7416574e+00 3.7709415e+00 4.6054316e+00 4.9071377e+00 5.3497664e+00 5.8455111e+00 4.6432747e+00 3.9382737e+00 4.3416587e+00 5.5955339e+00 4.6572524e+00 4.3840620e+00 3.6551334e+00 4.5858478e+00 4.7937459e+00 4.4226689e+00 3.9534795e+00 5.0378567e+00 4.9112117e+00 4.4294469e+00 4.0385641e+00 4.2343831e+00 4.4147480e+00 3.8961519e+00 1.4142136e-01 5.9160798e-01 5.7445626e-01 3.0000000e-01 6.0827625e-01 7.6811457e-01 5.0990195e-01 4.6904158e-01 3.6055513e-01 5.0990195e-01 9.6436508e-01 1.4142136e-01 3.0000000e-01 1.4071247e+00 8.7749644e-01 4.5825757e-01 5.4772256e-01 6.5574385e-01 3.3166248e-01 6.7823300e-01 2.2360680e-01 3.0000000e-01 3.8742741e+00 3.4957117e+00 4.0373258e+00 2.9983329e+00 3.6715120e+00 3.3090784e+00 3.6674242e+00 2.2759613e+00 3.6249138e+00 2.8000000e+00 2.6324893e+00 3.1176915e+00 3.0364453e+00 3.5846897e+00 2.4779023e+00 3.5014283e+00 3.3316662e+00 2.8982753e+00 3.6578682e+00 2.7802878e+00 3.7456642e+00 2.9597297e+00 3.9319207e+00 3.5411862e+00 3.2939338e+00 3.4727511e+00 3.9217343e+00 4.1231056e+00 3.4190642e+00 2.3874673e+00 2.7202941e+00 2.6038433e+00 2.7856777e+00 4.0249224e+00 3.3136083e+00 3.4073450e+00 3.7868192e+00 3.5028560e+00 2.8948230e+00 2.9240383e+00 3.2109189e+00 3.4799425e+00 2.9017236e+00 2.3151674e+00 3.0495901e+00 2.9647934e+00 3.0182777e+00 3.2264532e+00 2.0174241e+00 2.9512709e+00 5.1759057e+00 4.1048752e+00 5.1797683e+00 4.5760245e+00 4.9426713e+00 5.9690870e+00 3.5128336e+00 5.5108983e+00 4.9295030e+00 5.5190579e+00 4.2402830e+00 4.4056782e+00 4.7349762e+00 4.0914545e+00 4.3185646e+00 4.5144213e+00 4.5276926e+00 6.1139185e+00 6.3741666e+00 4.0336088e+00 5.0029991e+00 3.9306488e+00 6.0844063e+00 3.9962482e+00 4.8518038e+00 5.1865210e+00 3.8652296e+00 3.8961519e+00 4.7275787e+00 4.9699095e+00 5.4203321e+00 5.8847260e+00 4.7686476e+00 4.0435133e+00 4.4575778e+00 5.6630381e+00 4.7822589e+00 4.4899889e+00 3.7868192e+00 4.6765372e+00 4.9050994e+00 4.5188494e+00 4.1048752e+00 5.1400389e+00 5.0219518e+00 4.5387223e+00 4.1653331e+00 4.3439613e+00 4.5420260e+00 4.0323690e+00 5.7445626e-01 5.3851648e-01 3.0000000e-01 7.1414284e-01 8.5440037e-01 4.4721360e-01 3.4641016e-01 3.3166248e-01 4.4721360e-01 9.0000000e-01 1.4142136e-01 2.6457513e-01 1.3114877e+00 8.3066239e-01 5.0000000e-01 6.7823300e-01 5.7445626e-01 4.5825757e-01 6.3245553e-01 3.3166248e-01 2.2360680e-01 3.9509493e+00 3.5749126e+00 4.1133928e+00 3.0446675e+00 3.7389838e+00 3.3808283e+00 3.7509999e+00 2.3108440e+00 3.6959437e+00 2.8600699e+00 2.6551836e+00 3.1906112e+00 3.0789609e+00 3.6592349e+00 2.5416530e+00 3.5749126e+00 3.4088121e+00 2.9631065e+00 3.7067506e+00 2.8337255e+00 3.8275318e+00 3.0232433e+00 3.9949969e+00 3.6138622e+00 3.3630343e+00 3.5440090e+00 3.9899875e+00 4.1976184e+00 3.4914181e+00 2.4372115e+00 2.7676705e+00 2.6495283e+00 2.8460499e+00 4.0963398e+00 3.3911650e+00 3.4942810e+00 3.8626416e+00 3.5538711e+00 2.9698485e+00 2.9782545e+00 3.2756679e+00 3.5566838e+00 2.9597297e+00 2.3452079e+00 3.1144823e+00 3.0413813e+00 3.0903074e+00 3.2969683e+00 2.0469489e+00 3.0182777e+00 5.2602281e+00 4.1749251e+00 5.2564246e+00 4.6540305e+00 5.0209561e+00 6.0473135e+00 3.5721142e+00 5.5883808e+00 4.9979996e+00 5.6053546e+00 4.3197222e+00 4.4754888e+00 4.8104054e+00 4.1545156e+00 4.3874822e+00 4.5934736e+00 4.6065171e+00 6.2048368e+00 6.4459289e+00 4.0902323e+00 5.0823223e+00 4.0012498e+00 6.1595454e+00 4.0632499e+00 4.9355851e+00 5.2687759e+00 3.9344631e+00 3.9724048e+00 4.8010416e+00 5.0477718e+00 5.4936327e+00 5.9741108e+00 4.8414874e+00 4.1170378e+00 4.5310043e+00 5.7367238e+00 4.8672374e+00 4.5716518e+00 3.8626416e+00 4.7528939e+00 4.9819675e+00 4.5912961e+00 4.1749251e+00 5.2211110e+00 5.1029403e+00 4.6108568e+00 4.2272923e+00 4.4192760e+00 4.6270941e+00 4.1109610e+00 1.4142136e-01 7.6157731e-01 1.0392305e+00 1.2961481e+00 2.6457513e-01 5.0000000e-01 9.0553851e-01 2.6457513e-01 4.6904158e-01 4.5825757e-01 5.2915026e-01 9.7467943e-01 4.2426407e-01 5.8309519e-01 8.0622577e-01 3.1622777e-01 7.2111026e-01 2.2360680e-01 7.8740079e-01 3.7416574e-01 4.0422766e+00 3.6041643e+00 4.1749251e+00 2.9017236e+00 3.7536649e+00 3.2832910e+00 3.7603191e+00 2.0518285e+00 3.7296112e+00 2.6888659e+00 2.4041631e+00 3.1511903e+00 3.0149627e+00 3.6193922e+00 2.4718414e+00 3.6455452e+00 3.3090784e+00 2.8896367e+00 3.6537652e+00 2.7202941e+00 3.7735925e+00 3.0149627e+00 3.9534795e+00 3.5679126e+00 3.3882149e+00 3.5958309e+00 4.0311289e+00 4.2249260e+00 3.4467376e+00 2.3685439e+00 2.6324893e+00 2.5159491e+00 2.7838822e+00 4.0187063e+00 3.2603681e+00 3.4785054e+00 3.9127995e+00 3.5242020e+00 2.8827071e+00 2.8460499e+00 3.1368774e+00 3.5270384e+00 2.8861739e+00 2.1047565e+00 3.0049958e+00 2.9664794e+00 3.0099834e+00 3.2924155e+00 1.8493242e+00 2.9359837e+00 5.2172790e+00 4.0743098e+00 5.2820451e+00 4.6054316e+00 4.9919936e+00 6.0876925e+00 3.3451457e+00 5.6124861e+00 4.9689033e+00 5.6524331e+00 4.3278170e+00 4.4407207e+00 4.8238988e+00 4.0360872e+00 4.2965102e+00 4.5814845e+00 4.5880279e+00 6.2745518e+00 6.4699304e+00 3.9924930e+00 5.1048996e+00 3.8858718e+00 6.1975802e+00 4.0323690e+00 4.9426713e+00 5.3075418e+00 3.9000000e+00 3.9306488e+00 4.7602521e+00 5.0882217e+00 5.5308227e+00 6.0728906e+00 4.8010416e+00 4.0816663e+00 4.4452222e+00 5.8051701e+00 4.8414874e+00 4.5464272e+00 3.8118237e+00 4.7853944e+00 4.9849774e+00 4.6378875e+00 4.0743098e+00 5.2258971e+00 5.1097945e+00 4.6270941e+00 4.1833001e+00 4.4136153e+00 4.5978256e+00 4.0360872e+00 7.0710678e-01 1.0862780e+00 1.3190906e+00 1.7320508e-01 4.5825757e-01 8.6023253e-01 1.7320508e-01 5.0990195e-01 4.3588989e-01 5.4772256e-01 9.1104336e-01 5.0990195e-01 6.0000000e-01 8.4261498e-01 2.4494897e-01 7.6157731e-01 3.0000000e-01 7.8740079e-01 3.4641016e-01 3.9874804e+00 3.5594943e+00 4.1218928e+00 2.8460499e+00 3.6972963e+00 3.2434549e+00 3.7229021e+00 2.0074860e+00 3.6728735e+00 2.6551836e+00 2.3452079e+00 3.1096624e+00 2.9410882e+00 3.5749126e+00 2.4269322e+00 3.5902646e+00 3.2787193e+00 2.8372522e+00 3.5874782e+00 2.6645825e+00 3.7443290e+00 2.9580399e+00 3.8974351e+00 3.5199432e+00 3.3316662e+00 3.5397740e+00 3.9711459e+00 4.1749251e+00 3.4029399e+00 2.3043437e+00 2.5748786e+00 2.4556058e+00 2.7294688e+00 3.9761791e+00 3.2357379e+00 3.4496377e+00 3.8613469e+00 3.4554305e+00 2.8478062e+00 2.7964263e+00 3.0951575e+00 3.4842503e+00 2.8301943e+00 2.0518285e+00 2.9614186e+00 2.9291637e+00 2.9698485e+00 3.2403703e+00 1.7944358e+00 2.8913665e+00 5.1903757e+00 4.0373258e+00 5.2345009e+00 4.5661800e+00 4.9537864e+00 6.0382117e+00 3.3211444e+00 5.5623736e+00 4.9162994e+00 5.6169387e+00 4.2883563e+00 4.3931765e+00 4.7780749e+00 3.9962482e+00 4.2638011e+00 4.5464272e+00 4.5464272e+00 6.2377881e+00 6.4156060e+00 3.9370039e+00 5.0635956e+00 3.8548671e+00 6.1441029e+00 3.9824616e+00 4.9061186e+00 5.2621288e+00 3.8535698e+00 3.8923001e+00 4.7180504e+00 5.0368641e+00 5.4763126e+00 6.0315835e+00 4.7592016e+00 4.0348482e+00 4.4022721e+00 5.7515215e+00 4.8145612e+00 4.5088801e+00 3.7749172e+00 4.7391982e+00 4.9446941e+00 4.5902070e+00 4.0373258e+00 5.1874849e+00 5.0744458e+00 4.5814845e+00 4.1303753e+00 4.3703547e+00 4.5716518e+00 4.0037482e+00 7.8740079e-01 8.3666003e-01 6.5574385e-01 5.7445626e-01 3.1622777e-01 6.5574385e-01 1.1135529e+00 3.6055513e-01 4.6904158e-01 1.4387495e+00 1.0583005e+00 4.6904158e-01 6.4031242e-01 7.3484692e-01 5.4772256e-01 8.5440037e-01 3.7416574e-01 4.6904158e-01 3.7202150e+00 3.3541020e+00 3.8871583e+00 2.8774989e+00 3.5199432e+00 3.2031235e+00 3.5355339e+00 2.2022716e+00 3.4799425e+00 2.7000000e+00 2.5455844e+00 2.9849623e+00 2.9000000e+00 3.4612137e+00 2.3473389e+00 3.3451457e+00 3.2264532e+00 2.7874720e+00 3.5057096e+00 2.6645825e+00 3.6249138e+00 2.8124722e+00 3.7934153e+00 3.4249088e+00 3.1464265e+00 3.3181320e+00 3.7696154e+00 3.9736633e+00 3.2893768e+00 2.2561028e+00 2.6057628e+00 2.4919872e+00 2.6551836e+00 3.9051248e+00 3.2202484e+00 3.2863353e+00 3.6373067e+00 3.3526109e+00 2.7874720e+00 2.8071338e+00 3.1144823e+00 3.3555923e+00 2.7730849e+00 2.2293497e+00 2.9376862e+00 2.8600699e+00 2.9051678e+00 3.0886890e+00 1.9078784e+00 2.8319605e+00 5.0477718e+00 3.9824616e+00 5.0299105e+00 4.4530888e+00 4.8062459e+00 5.8223707e+00 3.4278273e+00 5.3721504e+00 4.7906158e+00 5.3712196e+00 4.0951190e+00 4.2638011e+00 4.5836667e+00 3.9635842e+00 4.1809090e+00 4.3692105e+00 4.3965896e+00 5.9774577e+00 6.2209324e+00 3.9064050e+00 4.8518038e+00 3.8105118e+00 5.9371710e+00 3.8496753e+00 4.7148701e+00 5.0487622e+00 3.7215588e+00 3.7643060e+00 4.5891176e+00 4.8301139e+00 5.2697249e+00 5.7428216e+00 4.6270941e+00 3.9166312e+00 4.3520110e+00 5.4972721e+00 4.6497312e+00 4.3646306e+00 3.6565011e+00 4.5210618e+00 4.7528939e+00 4.3485630e+00 3.9824616e+00 4.9969991e+00 4.8733972e+00 4.3760713e+00 4.0149720e+00 4.1976184e+00 4.4113490e+00 3.9153544e+00 3.4641016e-01 1.0440307e+00 9.7467943e-01 7.0710678e-01 1.0440307e+00 1.3784049e+00 7.1414284e-01 6.9282032e-01 1.9519221e+00 1.2247449e+00 8.1240384e-01 5.9160798e-01 1.1916375e+00 3.4641016e-01 1.0908712e+00 4.2426407e-01 8.3666003e-01 3.9974992e+00 3.6345564e+00 4.1725292e+00 3.3196385e+00 3.8665230e+00 3.5185224e+00 3.7868192e+00 2.6514147e+00 3.8013156e+00 3.0675723e+00 3.0430248e+00 3.3090784e+00 3.3630343e+00 3.7656341e+00 2.7294688e+00 3.6537652e+00 3.5114100e+00 3.1448370e+00 3.9458839e+00 3.0789609e+00 3.8832976e+00 3.1921779e+00 4.1581246e+00 3.7349699e+00 3.4871192e+00 3.6428011e+00 4.1024383e+00 4.2743421e+00 3.6110940e+00 2.7037012e+00 3.0446675e+00 2.9376862e+00 3.0479501e+00 4.2201896e+00 3.4942810e+00 3.5185224e+00 3.9306488e+00 3.7815341e+00 3.0935417e+00 3.2155870e+00 3.4583233e+00 3.6496575e+00 3.1733263e+00 2.7073973e+00 3.2939338e+00 3.1559468e+00 3.2280025e+00 3.4234486e+00 2.4124676e+00 3.1843367e+00 5.2782573e+00 4.3034870e+00 5.3084838e+00 4.7275787e+00 5.0793700e+00 6.0811183e+00 3.7696154e+00 5.6373753e+00 5.1176166e+00 5.5830099e+00 4.3669211e+00 4.5912961e+00 4.8754487e+00 4.3208795e+00 4.5055521e+00 4.6400431e+00 4.6679760e+00 6.1473572e+00 6.5192024e+00 4.2965102e+00 5.1166395e+00 4.1255303e+00 6.2120850e+00 4.1976184e+00 4.9527770e+00 5.2867760e+00 4.0583248e+00 4.0583248e+00 4.8928519e+00 5.0941143e+00 5.5614746e+00 5.9160798e+00 4.9345719e+00 4.2213742e+00 4.6432747e+00 5.7844619e+00 4.8785244e+00 4.6184413e+00 3.9534795e+00 4.8062459e+00 5.0348784e+00 4.6572524e+00 4.3034870e+00 5.2507142e+00 5.1273775e+00 4.6893496e+00 4.3886217e+00 4.4944410e+00 4.6411206e+00 4.1892720e+00 1.2609520e+00 1.1357817e+00 7.0710678e-01 1.2609520e+00 1.6309506e+00 9.0000000e-01 8.7177979e-01 2.1517435e+00 1.4899664e+00 9.6953597e-01 7.8102497e-01 1.3928388e+00 6.0000000e-01 1.3453624e+00 5.4772256e-01 1.0295630e+00 3.9471509e+00 3.6207734e+00 4.1364236e+00 3.4029399e+00 3.8587563e+00 3.5805028e+00 3.7815341e+00 2.8017851e+00 3.7881394e+00 3.1670175e+00 3.1843367e+00 3.3361655e+00 3.4132096e+00 3.7920970e+00 2.7838822e+00 3.6180105e+00 3.5707142e+00 3.2046841e+00 3.9736633e+00 3.1559468e+00 3.9089641e+00 3.2078030e+00 4.1797129e+00 3.7696154e+00 3.4813790e+00 3.6180105e+00 4.0804412e+00 4.2532341e+00 3.6386811e+00 2.7658633e+00 3.1320920e+00 3.0282008e+00 3.0967725e+00 4.2602817e+00 3.5707142e+00 3.5298725e+00 3.9025633e+00 3.8026307e+00 3.1543621e+00 3.2954514e+00 3.5440090e+00 3.6715120e+00 3.2264532e+00 2.8478062e+00 3.3630343e+00 3.2124757e+00 3.2832910e+00 3.4351128e+00 2.5337719e+00 3.2403703e+00 5.2820451e+00 4.3497126e+00 5.2782573e+00 4.7465777e+00 5.0793700e+00 6.0415230e+00 3.8871583e+00 5.6124861e+00 5.1234754e+00 5.5344376e+00 4.3508620e+00 4.6000000e+00 4.8528342e+00 4.3737855e+00 4.5365185e+00 4.6292548e+00 4.6701178e+00 6.0901560e+00 6.4853681e+00 4.3474130e+00 5.0852729e+00 4.1785165e+00 6.1749494e+00 4.2071368e+00 4.9345719e+00 5.2545219e+00 4.0706265e+00 4.0755368e+00 4.9010203e+00 5.0645829e+00 5.5272054e+00 5.8446557e+00 4.9406477e+00 4.2402830e+00 4.6904158e+00 5.7253821e+00 4.8744230e+00 4.6249324e+00 3.9761791e+00 4.7728398e+00 5.0129831e+00 4.6119410e+00 4.3497126e+00 5.2297227e+00 5.1019604e+00 4.6615448e+00 4.4022721e+00 4.4855323e+00 4.6411206e+00 4.2249260e+00 3.4641016e-01 7.5498344e-01 0.0000000e+00 5.5677644e-01 3.7416574e-01 5.0000000e-01 9.3808315e-01 5.5677644e-01 6.5574385e-01 8.8317609e-01 2.6457513e-01 7.4161985e-01 3.4641016e-01 7.2801099e-01 2.6457513e-01 4.0435133e+00 3.6359318e+00 4.1856899e+00 2.9478806e+00 3.7709415e+00 3.3421550e+00 3.8065733e+00 2.1307276e+00 3.7389838e+00 2.7748874e+00 2.4556058e+00 3.2031235e+00 3.0133038e+00 3.6619667e+00 2.5258662e+00 3.6523965e+00 3.3852622e+00 2.9223278e+00 3.6687873e+00 2.7586228e+00 3.8457769e+00 3.0364453e+00 3.9799497e+00 3.6027767e+00 3.4014703e+00 3.6055513e+00 4.0348482e+00 4.2497059e+00 3.4942810e+00 2.3874673e+00 2.6720778e+00 2.5495098e+00 2.8178006e+00 4.0718546e+00 3.3496268e+00 3.5425979e+00 3.9293765e+00 3.5284558e+00 2.9495762e+00 2.9000000e+00 3.1984371e+00 3.5707142e+00 2.9189039e+00 2.1679483e+00 3.0626786e+00 3.0248967e+00 3.0675723e+00 3.3181320e+00 1.9104973e+00 2.9883106e+00 5.2924474e+00 4.1436699e+00 5.3113087e+00 4.6583259e+00 5.0467812e+00 6.1081912e+00 3.4525353e+00 5.6329388e+00 4.9979996e+00 5.6973678e+00 4.3749286e+00 4.4821870e+00 4.8600412e+00 4.1060930e+00 4.3760713e+00 4.6411206e+00 4.6324939e+00 6.3071388e+00 6.4876806e+00 4.0286474e+00 5.1468437e+00 3.9686270e+00 6.2112801e+00 4.0706265e+00 4.9919936e+00 5.3329167e+00 3.9446166e+00 3.9874804e+00 4.8114447e+00 5.1029403e+00 5.5443665e+00 6.0917978e+00 4.8538644e+00 4.1194660e+00 4.4933284e+00 5.8180753e+00 4.9142650e+00 4.5978256e+00 3.8729833e+00 4.8176758e+00 5.0338852e+00 4.6690470e+00 4.1436699e+00 5.2744668e+00 5.1652686e+00 4.6669048e+00 4.2201896e+00 4.4575778e+00 4.6722586e+00 4.1060930e+00 5.9160798e-01 3.4641016e-01 6.4031242e-01 3.7416574e-01 3.3166248e-01 1.0392305e+00 6.0827625e-01 6.4031242e-01 9.4868330e-01 3.6055513e-01 7.2801099e-01 4.4721360e-01 6.5574385e-01 2.2360680e-01 4.2059482e+00 3.8131352e+00 4.3588989e+00 3.1796226e+00 3.9572718e+00 3.5707142e+00 3.9887341e+00 2.3874673e+00 3.9268308e+00 3.0033315e+00 2.7147744e+00 3.3970576e+00 3.2372828e+00 3.8716921e+00 2.7239677e+00 3.8183766e+00 3.6027767e+00 3.1527766e+00 3.8755645e+00 2.9916551e+00 4.0410395e+00 3.2280025e+00 4.1904654e+00 3.8236109e+00 3.5874782e+00 3.7788887e+00 4.2190046e+00 4.4294469e+00 3.6972963e+00 2.6038433e+00 2.9086079e+00 2.7892651e+00 3.0298515e+00 4.2918527e+00 3.5749126e+00 3.7269290e+00 4.1036569e+00 3.7349699e+00 3.1654384e+00 3.1288976e+00 3.4423829e+00 3.7749172e+00 3.1368774e+00 2.4207437e+00 3.2893768e+00 3.2449961e+00 3.2848135e+00 3.5142567e+00 2.1330729e+00 3.2046841e+00 5.4799635e+00 4.3577517e+00 5.4909016e+00 4.8682646e+00 5.2392748e+00 6.2904690e+00 3.6932371e+00 5.8266629e+00 5.2057660e+00 5.8566202e+00 4.5497253e+00 4.6808119e+00 5.0378567e+00 4.3197222e+00 4.5661800e+00 4.8145612e+00 4.8311489e+00 6.4730209e+00 6.6745786e+00 4.2579338e+00 5.3169540e+00 4.1773197e+00 6.3984373e+00 4.2649736e+00 5.1730069e+00 5.5172457e+00 4.1376322e+00 4.1833001e+00 5.0089919e+00 5.2915026e+00 5.7288742e+00 6.2489999e+00 5.0477718e+00 4.3301270e+00 4.7296934e+00 5.9791304e+00 5.0921508e+00 4.7979162e+00 4.0693980e+00 4.9869831e+00 5.2057660e+00 4.8207883e+00 4.3577517e+00 5.4534393e+00 5.3329167e+00 4.8311489e+00 4.4170126e+00 4.6400431e+00 4.8507731e+00 4.3150898e+00 7.5498344e-01 1.2083046e+00 4.5825757e-01 5.0990195e-01 1.5652476e+00 1.1401754e+00 7.0710678e-01 8.0622577e-01 8.7177979e-01 5.8309519e-01 9.5393920e-01 3.4641016e-01 5.4772256e-01 3.9166312e+00 3.5818989e+00 4.0951190e+00 3.1527766e+00 3.7509999e+00 3.4612137e+00 3.7682887e+00 2.4919872e+00 3.6972963e+00 2.9883106e+00 2.8248894e+00 3.2419130e+00 3.1416556e+00 3.7040518e+00 2.6210685e+00 3.5566838e+00 3.4914181e+00 3.0347982e+00 3.7563280e+00 2.9291637e+00 3.8807216e+00 3.0577770e+00 4.0360872e+00 3.6619667e+00 3.3734256e+00 3.5369478e+00 3.9837169e+00 4.1988094e+00 3.5411862e+00 2.5159491e+00 2.8757608e+00 2.7586228e+00 2.9137605e+00 4.1581246e+00 3.4914181e+00 3.5298725e+00 3.8535698e+00 3.5916570e+00 3.0512293e+00 3.0822070e+00 3.3793490e+00 3.5972211e+00 3.0315013e+00 2.5159491e+00 3.2046841e+00 3.1144823e+00 3.1654384e+00 3.3256578e+00 2.2045408e+00 3.0951575e+00 5.2971691e+00 4.2497059e+00 5.2516664e+00 4.6957428e+00 5.0497525e+00 6.0299254e+00 3.7215588e+00 5.5821143e+00 5.0249378e+00 5.5883808e+00 4.3324358e+00 4.5099889e+00 4.8155997e+00 4.2391037e+00 4.4564560e+00 4.6162756e+00 4.6314145e+00 6.1717096e+00 6.4358372e+00 4.1617304e+00 5.0813384e+00 4.0865633e+00 6.1424751e+00 4.0987803e+00 4.9446941e+00 5.2564246e+00 3.9736633e+00 4.0162171e+00 4.8373546e+00 5.0348784e+00 5.4799635e+00 5.9245253e+00 4.8774994e+00 4.1545156e+00 4.5934736e+00 5.7043843e+00 4.8969378e+00 4.6010868e+00 3.9127995e+00 4.7476310e+00 4.9929951e+00 4.5793013e+00 4.2497059e+00 5.2297227e+00 5.1117512e+00 4.6162756e+00 4.2684892e+00 4.4384682e+00 4.6604721e+00 4.1725292e+00 5.5677644e-01 3.7416574e-01 5.0000000e-01 9.3808315e-01 5.5677644e-01 6.5574385e-01 8.8317609e-01 2.6457513e-01 7.4161985e-01 3.4641016e-01 7.2801099e-01 2.6457513e-01 4.0435133e+00 3.6359318e+00 4.1856899e+00 2.9478806e+00 3.7709415e+00 3.3421550e+00 3.8065733e+00 2.1307276e+00 3.7389838e+00 2.7748874e+00 2.4556058e+00 3.2031235e+00 3.0133038e+00 3.6619667e+00 2.5258662e+00 3.6523965e+00 3.3852622e+00 2.9223278e+00 3.6687873e+00 2.7586228e+00 3.8457769e+00 3.0364453e+00 3.9799497e+00 3.6027767e+00 3.4014703e+00 3.6055513e+00 4.0348482e+00 4.2497059e+00 3.4942810e+00 2.3874673e+00 2.6720778e+00 2.5495098e+00 2.8178006e+00 4.0718546e+00 3.3496268e+00 3.5425979e+00 3.9293765e+00 3.5284558e+00 2.9495762e+00 2.9000000e+00 3.1984371e+00 3.5707142e+00 2.9189039e+00 2.1679483e+00 3.0626786e+00 3.0248967e+00 3.0675723e+00 3.3181320e+00 1.9104973e+00 2.9883106e+00 5.2924474e+00 4.1436699e+00 5.3113087e+00 4.6583259e+00 5.0467812e+00 6.1081912e+00 3.4525353e+00 5.6329388e+00 4.9979996e+00 5.6973678e+00 4.3749286e+00 4.4821870e+00 4.8600412e+00 4.1060930e+00 4.3760713e+00 4.6411206e+00 4.6324939e+00 6.3071388e+00 6.4876806e+00 4.0286474e+00 5.1468437e+00 3.9686270e+00 6.2112801e+00 4.0706265e+00 4.9919936e+00 5.3329167e+00 3.9446166e+00 3.9874804e+00 4.8114447e+00 5.1029403e+00 5.5443665e+00 6.0917978e+00 4.8538644e+00 4.1194660e+00 4.4933284e+00 5.8180753e+00 4.9142650e+00 4.5978256e+00 3.8729833e+00 4.8176758e+00 5.0338852e+00 4.6690470e+00 4.1436699e+00 5.2744668e+00 5.1652686e+00 4.6669048e+00 4.2201896e+00 4.4575778e+00 4.6722586e+00 4.1060930e+00 8.3066239e-01 7.8740079e-01 7.1414284e-01 2.0000000e-01 9.2736185e-01 1.2369317e+00 4.2426407e-01 1.1045361e+00 3.0000000e-01 1.1575837e+00 6.7823300e-01 4.4497191e+00 3.9962482e+00 4.5727453e+00 3.1937439e+00 4.1267421e+00 3.6304270e+00 4.1496988e+00 2.2912878e+00 4.1170378e+00 2.9883106e+00 2.6153394e+00 3.5142567e+00 3.3361655e+00 3.9874804e+00 2.8195744e+00 4.0435133e+00 3.6565011e+00 3.2449961e+00 3.9761791e+00 3.0430248e+00 4.1352146e+00 3.3808283e+00 4.3023250e+00 3.9357337e+00 3.7709415e+00 3.9862263e+00 4.4147480e+00 4.6076024e+00 3.8078866e+00 2.7073973e+00 2.9376862e+00 2.8231188e+00 3.1320920e+00 4.3646306e+00 3.5958309e+00 3.8626416e+00 4.3069711e+00 3.8626416e+00 3.2388269e+00 3.1559468e+00 3.4612137e+00 3.9012818e+00 3.2264532e+00 2.3430749e+00 3.3391616e+00 3.3316662e+00 3.3645208e+00 3.6687873e+00 2.1071308e+00 3.2832910e+00 5.5749439e+00 4.4022721e+00 5.6621551e+00 4.9668904e+00 5.3535035e+00 6.4761099e+00 3.6041643e+00 5.9983331e+00 5.3244718e+00 6.0440053e+00 4.7042534e+00 4.7937459e+00 5.1971146e+00 4.3439613e+00 4.6130250e+00 4.9446941e+00 4.9608467e+00 6.6850580e+00 6.8425142e+00 4.3104524e+00 5.4827001e+00 4.2047592e+00 6.5825527e+00 4.3840620e+00 5.3244718e+00 5.7035077e+00 4.2532341e+00 4.2906876e+00 5.1127292e+00 5.4817880e+00 5.9135438e+00 6.4915329e+00 5.1507281e+00 4.4474712e+00 4.7937459e+00 6.1919302e+00 5.2057660e+00 4.9203658e+00 4.1677332e+00 5.1652686e+00 5.3507009e+00 5.0109879e+00 4.4022721e+00 5.6008928e+00 5.4799635e+00 4.9909919e+00 4.5210618e+00 4.7812132e+00 4.9618545e+00 4.3874822e+00 2.6457513e-01 1.2727922e+00 7.5498344e-01 4.3588989e-01 6.0000000e-01 5.1961524e-01 4.1231056e-01 5.4772256e-01 3.6055513e-01 1.7320508e-01 3.9153544e+00 3.5242020e+00 4.0718546e+00 2.9715316e+00 3.6905284e+00 3.3060551e+00 3.6945906e+00 2.2181073e+00 3.6496575e+00 2.7748874e+00 2.5709920e+00 3.1272992e+00 3.0232433e+00 3.5958309e+00 2.4738634e+00 3.5355339e+00 3.3316662e+00 2.8948230e+00 3.6523965e+00 2.7622455e+00 3.7589892e+00 2.9698485e+00 3.9370039e+00 3.5496479e+00 3.3151169e+00 3.5014283e+00 3.9471509e+00 4.1496988e+00 3.4278273e+00 2.3748684e+00 2.6944387e+00 2.5768197e+00 2.7820855e+00 4.0274061e+00 3.3075671e+00 3.4307434e+00 3.8183766e+00 3.5028560e+00 2.8948230e+00 2.9034462e+00 3.1953091e+00 3.4942810e+00 2.8948230e+00 2.2583180e+00 3.0397368e+00 2.9681644e+00 3.0182777e+00 3.2419130e+00 1.9672316e+00 2.9478806e+00 5.1951901e+00 4.1024383e+00 5.2086467e+00 4.5891176e+00 4.9608467e+00 6.0024995e+00 3.4785054e+00 5.5398556e+00 4.9416596e+00 5.5587768e+00 4.2661458e+00 4.4170126e+00 4.7602521e+00 4.0816663e+00 4.3185646e+00 4.5365185e+00 4.5475268e+00 6.1611687e+00 6.4007812e+00 4.0236799e+00 5.0328918e+00 3.9255573e+00 6.1155539e+00 4.0062451e+00 4.8805737e+00 5.2211110e+00 3.8755645e+00 3.9089641e+00 4.7402532e+00 5.0019996e+00 5.4497706e+00 5.9371710e+00 4.7812132e+00 4.0558600e+00 4.4598206e+00 5.7000000e+00 4.8052055e+00 4.5099889e+00 3.7973675e+00 4.7063787e+00 4.9295030e+00 4.5497253e+00 4.1024383e+00 5.1672043e+00 5.0497525e+00 4.5628938e+00 4.1701319e+00 4.3646306e+00 4.5639895e+00 4.0398020e+00 1.3000000e+00 6.7823300e-01 4.2426407e-01 6.8556546e-01 5.4772256e-01 4.4721360e-01 5.1961524e-01 4.2426407e-01 2.4494897e-01 4.1060930e+00 3.7054015e+00 4.2626283e+00 3.1591138e+00 3.8820098e+00 3.4957117e+00 3.8704005e+00 2.3895606e+00 3.8483763e+00 2.9410882e+00 2.7531800e+00 3.3030289e+00 3.2357379e+00 3.7868192e+00 2.6476405e+00 3.7242449e+00 3.5057096e+00 3.1000000e+00 3.8483763e+00 2.9597297e+00 3.9242834e+00 3.1606961e+00 4.1340053e+00 3.7509999e+00 3.5099858e+00 3.6918830e+00 4.1460825e+00 4.3347434e+00 3.6110940e+00 2.5748786e+00 2.8896367e+00 2.7766887e+00 2.9748950e+00 4.2154478e+00 3.4770677e+00 3.5972211e+00 4.0062451e+00 3.7067506e+00 3.0740852e+00 3.0886890e+00 3.3882149e+00 3.6823905e+00 3.0903074e+00 2.4351591e+00 3.2264532e+00 3.1559468e+00 3.2031235e+00 3.4351128e+00 2.1307276e+00 3.1336879e+00 5.3535035e+00 4.2755117e+00 5.3907328e+00 4.7738873e+00 5.1341991e+00 6.1919302e+00 3.6345564e+00 5.7358522e+00 5.1371198e+00 5.7210139e+00 4.4350874e+00 4.6000000e+00 4.9365980e+00 4.2508823e+00 4.4698993e+00 4.6957428e+00 4.7318073e+00 6.3364028e+00 6.5924199e+00 4.2213742e+00 5.2019227e+00 4.0865633e+00 6.3111013e+00 4.1880783e+00 5.0527220e+00 5.4101756e+00 4.0533936e+00 4.0828911e+00 4.9173163e+00 5.1990384e+00 5.6435804e+00 6.1155539e+00 4.9547957e+00 4.2497059e+00 4.6604721e+00 5.8804762e+00 4.9598387e+00 4.6914816e+00 3.9686270e+00 4.8805737e+00 5.0941143e+00 4.7127487e+00 4.2755117e+00 5.3376025e+00 5.2086467e+00 4.7275787e+00 4.3520110e+00 4.5387223e+00 4.7180504e+00 4.2130749e+00 9.1104336e-01 1.3674794e+00 1.7262677e+00 7.6811457e-01 1.6462078e+00 9.1651514e-01 1.6278821e+00 1.1269428e+00 4.4530888e+00 4.0124805e+00 4.5607017e+00 3.0479501e+00 4.0718546e+00 3.5958309e+00 4.1821047e+00 2.1587033e+00 4.0816663e+00 2.9359837e+00 2.3811762e+00 3.5071356e+00 3.1685959e+00 3.9610605e+00 2.8035692e+00 4.0373258e+00 3.6578682e+00 3.1906112e+00 3.8183766e+00 2.9410882e+00 4.1557190e+00 3.3316662e+00 4.2047592e+00 3.8961519e+00 3.7376463e+00 3.9648455e+00 4.3588989e+00 4.5803930e+00 3.7802116e+00 2.6191602e+00 2.8106939e+00 2.6944387e+00 3.0692019e+00 4.3058100e+00 3.6027767e+00 3.9230090e+00 4.2988371e+00 3.7215588e+00 3.2465366e+00 3.0545049e+00 3.3926391e+00 3.8923001e+00 3.1432467e+00 2.1771541e+00 3.2832910e+00 3.3391616e+00 3.3481338e+00 3.6400549e+00 1.9824228e+00 3.2449961e+00 5.5830099e+00 4.3416587e+00 5.6258333e+00 4.9335586e+00 5.3244718e+00 6.4366140e+00 3.5213634e+00 5.9539903e+00 5.2325902e+00 6.0712437e+00 4.7053161e+00 4.7254629e+00 5.1633323e+00 4.2497059e+00 4.5596052e+00 4.9416596e+00 4.9376108e+00 6.7275553e+00 6.7594378e+00 4.1701319e+00 5.4708317e+00 4.1605288e+00 6.5222695e+00 4.3139309e+00 5.3329167e+00 5.6956123e+00 4.2000000e+00 4.2731721e+00 5.0586559e+00 5.4516053e+00 5.8532043e+00 6.5352888e+00 5.0950957e+00 4.4011362e+00 4.7275787e+00 6.1457302e+00 5.2297227e+00 4.9132474e+00 4.1521079e+00 5.1429563e+00 5.3272882e+00 4.9839743e+00 4.3416587e+00 5.5910643e+00 5.4808758e+00 4.9537864e+00 4.4192760e+00 4.7528939e+00 4.9909919e+00 4.3749286e+00 8.3666003e-01 1.1180340e+00 4.6904158e-01 9.6953597e-01 2.2360680e-01 1.0488088e+00 6.1644140e-01 4.4452222e+00 3.9912404e+00 4.5727453e+00 3.2434549e+00 4.1412558e+00 3.6469165e+00 4.1400483e+00 2.3515952e+00 4.1267421e+00 3.0149627e+00 2.6981475e+00 3.5199432e+00 3.3896903e+00 3.9974992e+00 2.8337255e+00 4.0435133e+00 3.6619667e+00 3.2695565e+00 4.0211939e+00 3.0822070e+00 4.1303753e+00 3.3985291e+00 4.3301270e+00 3.9509493e+00 3.7815341e+00 3.9912404e+00 4.4283180e+00 4.6119410e+00 3.8183766e+00 2.7440845e+00 2.9849623e+00 2.8722813e+00 3.1575307e+00 4.3829214e+00 3.6013886e+00 3.8470768e+00 4.3069711e+00 3.9038443e+00 3.2449961e+00 3.1937439e+00 3.4899857e+00 3.9064050e+00 3.2572995e+00 2.4103942e+00 3.3630343e+00 3.3376639e+00 3.3763886e+00 3.6796739e+00 2.1633308e+00 3.3015148e+00 5.5677644e+00 4.4204072e+00 5.6656862e+00 4.9749372e+00 5.3572381e+00 6.4791975e+00 3.6373067e+00 6.0049979e+00 5.3469618e+00 6.0274373e+00 4.7000000e+00 4.8104054e+00 5.2009614e+00 4.3714986e+00 4.6260134e+00 4.9406477e+00 4.9648766e+00 6.6640828e+00 6.8571131e+00 4.3520110e+00 5.4790510e+00 4.2190046e+00 6.5916614e+00 4.4022721e+00 5.3169540e+00 5.7000000e+00 4.2673177e+00 4.2953463e+00 5.1244512e+00 5.4854353e+00 5.9236813e+00 6.4699304e+00 5.1623638e+00 4.4609416e+00 4.8145612e+00 6.1951594e+00 5.1942276e+00 4.9203658e+00 4.1725292e+00 5.1652686e+00 5.3507009e+00 5.0109879e+00 4.4204072e+00 5.5973208e+00 5.4726593e+00 4.9949975e+00 4.5475268e+00 4.7853944e+00 4.9497475e+00 4.3920383e+00 4.7958315e-01 6.4807407e-01 5.0990195e-01 6.7082039e-01 5.4772256e-01 4.8989795e-01 3.7868192e+00 3.3570821e+00 3.9331921e+00 2.8178006e+00 3.5425979e+00 3.1432467e+00 3.5128336e+00 2.0663978e+00 3.5227830e+00 2.5709920e+00 2.4535688e+00 2.9376862e+00 2.9342802e+00 3.4380227e+00 2.2825424e+00 3.3955854e+00 3.1352831e+00 2.7730849e+00 3.5142567e+00 2.6267851e+00 3.5468296e+00 2.8195744e+00 3.7934153e+00 3.4161382e+00 3.1780497e+00 3.3600595e+00 3.8223030e+00 3.9887341e+00 3.2526912e+00 2.2516660e+00 2.5592968e+00 2.4556058e+00 2.6324893e+00 3.8587563e+00 3.1032241e+00 3.2280025e+00 3.6701499e+00 3.3852622e+00 2.7110883e+00 2.7386128e+00 3.0430248e+00 3.3316662e+00 2.7513633e+00 2.1189620e+00 2.8722813e+00 2.8035692e+00 2.8460499e+00 3.0951575e+00 1.7944358e+00 2.7784888e+00 4.9699095e+00 3.9012818e+00 5.0398413e+00 4.4147480e+00 4.7644517e+00 5.8532043e+00 3.2603681e+00 5.4018515e+00 4.7927028e+00 5.3581713e+00 4.0681691e+00 4.2402830e+00 4.5771170e+00 3.8742741e+00 4.0767634e+00 4.3162484e+00 4.3760713e+00 5.9958319e+00 6.2513998e+00 3.8807216e+00 4.8373546e+00 3.7013511e+00 5.9791304e+00 3.8288379e+00 4.6893496e+00 5.0724747e+00 3.6891733e+00 3.7134889e+00 4.5497253e+00 4.8713448e+00 5.3094256e+00 5.7879185e+00 4.5836667e+00 3.9038443e+00 4.3197222e+00 5.5389530e+00 4.5760245e+00 4.3324358e+00 3.5958309e+00 4.5232732e+00 4.7212287e+00 4.3485630e+00 3.9012818e+00 4.9709154e+00 4.8321838e+00 4.3577517e+00 3.9924930e+00 4.1737274e+00 4.3335897e+00 3.8405729e+00 9.9498744e-01 3.6055513e-01 9.4868330e-01 5.0000000e-01 7.4161985e-01 3.5791060e+00 3.1654384e+00 3.7336309e+00 2.7622455e+00 3.3852622e+00 2.9883106e+00 3.3120990e+00 2.0784610e+00 3.3406586e+00 2.4939928e+00 2.4839485e+00 2.7892651e+00 2.8530685e+00 3.2634338e+00 2.1817424e+00 3.2093613e+00 2.9765752e+00 2.6267851e+00 3.4263683e+00 2.5357445e+00 3.3719431e+00 2.6870058e+00 3.6523965e+00 3.2372828e+00 3.0116441e+00 3.1843367e+00 3.6469165e+00 3.8078866e+00 3.0967725e+00 2.1725561e+00 2.4939928e+00 2.3916521e+00 2.5179357e+00 3.7013511e+00 2.9495762e+00 3.0282008e+00 3.4785054e+00 3.2787193e+00 2.5573424e+00 2.6589472e+00 2.9137605e+00 3.1511903e+00 2.6419690e+00 2.1400935e+00 2.7495454e+00 2.6324893e+00 2.6962938e+00 2.9308702e+00 1.8411953e+00 2.6476405e+00 4.7864392e+00 3.7669616e+00 4.8507731e+00 4.2308392e+00 4.5880279e+00 5.6453521e+00 3.1906112e+00 5.1932649e+00 4.6281746e+00 5.1478151e+00 3.8884444e+00 4.0877867e+00 4.4022721e+00 3.7709415e+00 3.9661064e+00 4.1496988e+00 4.1856899e+00 5.7480431e+00 6.0671245e+00 3.7669616e+00 4.6529560e+00 3.5791060e+00 5.7758116e+00 3.6891733e+00 4.4877611e+00 4.8518038e+00 3.5468296e+00 3.5496479e+00 4.3897608e+00 4.6583259e+00 5.1166395e+00 5.5362442e+00 4.4294469e+00 3.7269290e+00 4.1388404e+00 5.3525695e+00 4.3920383e+00 4.1352146e+00 3.4380227e+00 4.3439613e+00 4.5541190e+00 4.1928511e+00 3.7669616e+00 4.7812132e+00 4.6540305e+00 4.2071368e+00 3.8716921e+00 4.0062451e+00 4.1509035e+00 3.6715120e+00 8.8317609e-01 3.0000000e-01 8.7177979e-01 3.7416574e-01 4.1206796e+00 3.6945906e+00 4.2555846e+00 2.9563491e+00 3.8223030e+00 3.3852622e+00 3.8626416e+00 2.1142375e+00 3.8065733e+00 2.7766887e+00 2.4372115e+00 3.2388269e+00 3.0545049e+00 3.7148351e+00 2.5475478e+00 3.7188708e+00 3.4190642e+00 2.9782545e+00 3.6945906e+00 2.7892651e+00 3.8807216e+00 3.0805844e+00 4.0236799e+00 3.6646964e+00 3.4612137e+00 3.6674242e+00 4.1000000e+00 4.3046487e+00 3.5355339e+00 2.4228083e+00 2.6925824e+00 2.5748786e+00 2.8548205e+00 4.1121770e+00 3.3778692e+00 3.5916570e+00 3.9937451e+00 3.5693137e+00 2.9883106e+00 2.9154759e+00 3.2341923e+00 3.6249138e+00 2.9546573e+00 2.1517435e+00 3.0935417e+00 3.0757113e+00 3.1080541e+00 3.3734256e+00 1.8814888e+00 3.0232433e+00 5.3235327e+00 4.1641326e+00 5.3646994e+00 4.7063787e+00 5.0852729e+00 6.1741396e+00 3.4394767e+00 5.7026310e+00 5.0467812e+00 5.7489129e+00 4.4170126e+00 4.5188494e+00 4.9040799e+00 4.1121770e+00 4.3749286e+00 4.6701178e+00 4.6850827e+00 6.3835727e+00 6.5436993e+00 4.0595566e+00 5.1903757e+00 3.9774364e+00 6.2793312e+00 4.1036569e+00 5.0428167e+00 5.4046276e+00 3.9761791e+00 4.0236799e+00 4.8456166e+00 5.1778374e+00 5.6080300e+00 6.1757591e+00 4.8836462e+00 4.1737274e+00 4.5497253e+00 5.8736701e+00 4.9457052e+00 4.6508064e+00 3.9051248e+00 4.8641546e+00 5.0665570e+00 4.7021272e+00 4.1641326e+00 5.3188345e+00 5.1990384e+00 4.6957428e+00 4.2449971e+00 4.4966654e+00 4.7031904e+00 4.1412558e+00 8.0622577e-01 2.4494897e-01 5.4772256e-01 3.8755645e+00 3.4856850e+00 4.0385641e+00 3.0626786e+00 3.6945906e+00 3.3136083e+00 3.6414283e+00 2.3515952e+00 3.6428011e+00 2.8195744e+00 2.7386128e+00 3.1192948e+00 3.1256999e+00 3.5860842e+00 2.5039968e+00 3.5114100e+00 3.3151169e+00 2.9308702e+00 3.7242449e+00 2.8354894e+00 3.7148351e+00 2.9949958e+00 3.9635842e+00 3.5510562e+00 3.3166248e+00 3.4885527e+00 3.9458839e+00 4.1243181e+00 3.4234486e+00 2.4596748e+00 2.7874720e+00 2.6776856e+00 2.8266588e+00 4.0286474e+00 3.2908965e+00 3.3674916e+00 3.7881394e+00 3.5693137e+00 2.8896367e+00 2.9698485e+00 3.2310989e+00 3.4756294e+00 2.9478806e+00 2.4062419e+00 3.0708305e+00 2.9597297e+00 3.0232433e+00 3.2434549e+00 2.1118712e+00 2.9698485e+00 5.1322510e+00 4.1036569e+00 5.1710734e+00 4.5617979e+00 4.9234135e+00 5.9581876e+00 3.5199432e+00 5.5045436e+00 4.9446941e+00 5.4763126e+00 4.2201896e+00 4.4136153e+00 4.7275787e+00 4.1048752e+00 4.3104524e+00 4.4888751e+00 4.5133136e+00 6.0638272e+00 6.3796552e+00 4.0767634e+00 4.9819675e+00 3.9217343e+00 6.0835845e+00 4.0124805e+00 4.8197510e+00 5.1662365e+00 3.8742741e+00 3.8845849e+00 4.7222876e+00 4.9648766e+00 5.4249424e+00 5.8412327e+00 4.7634021e+00 4.0472213e+00 4.4586994e+00 5.6621551e+00 4.7370877e+00 4.4665423e+00 3.7749172e+00 4.6669048e+00 4.8877398e+00 4.5155288e+00 4.1036569e+00 5.1137071e+00 4.9909919e+00 4.5354162e+00 4.1928511e+00 4.3358967e+00 4.4966654e+00 4.0112342e+00 8.6602540e-01 4.1231056e-01 4.2532341e+00 3.8131352e+00 4.3863424e+00 3.0967725e+00 3.9623226e+00 3.4914181e+00 3.9686270e+00 2.2315914e+00 3.9420807e+00 2.8809721e+00 2.5787594e+00 3.3555923e+00 3.2186954e+00 3.8301436e+00 2.6720778e+00 3.8548671e+00 3.5128336e+00 3.1016125e+00 3.8548671e+00 2.9240383e+00 3.9761791e+00 3.2218007e+00 4.1617304e+00 3.7815341e+00 3.5986108e+00 3.8052595e+00 4.2426407e+00 4.4339599e+00 3.6537652e+00 2.5729361e+00 2.8319605e+00 2.7166155e+00 2.9899833e+00 4.2261093e+00 3.4612137e+00 3.6837481e+00 4.1231056e+00 3.7296112e+00 3.0886890e+00 3.0446675e+00 3.3421550e+00 3.7376463e+00 3.0919250e+00 2.2847319e+00 3.2093613e+00 3.1764760e+00 3.2171416e+00 3.5028560e+00 2.0273135e+00 3.1416556e+00 5.4175640e+00 4.2743421e+00 5.4909016e+00 4.8145612e+00 5.1971146e+00 6.3000000e+00 3.5270384e+00 5.8266629e+00 5.1788030e+00 5.8566202e+00 4.5321077e+00 4.6465041e+00 5.0299105e+00 4.2308392e+00 4.4866469e+00 4.7812132e+00 4.7979162e+00 6.4853681e+00 6.6805688e+00 4.1964271e+00 5.3094256e+00 4.0804412e+00 6.4109282e+00 4.2367440e+00 5.1497573e+00 5.5208695e+00 4.1036569e+00 4.1352146e+00 4.9648766e+00 5.3028294e+00 5.7428216e+00 6.2841069e+00 5.0039984e+00 4.2930176e+00 4.6572524e+00 6.0124870e+00 5.0408333e+00 4.7560488e+00 4.0149720e+00 4.9909919e+00 5.1865210e+00 4.8373546e+00 4.2743421e+00 5.4313902e+00 5.3103672e+00 4.8270074e+00 4.3852024e+00 4.6184413e+00 4.7968740e+00 4.2402830e+00 5.0990195e-01 3.8496753e+00 3.4856850e+00 4.0211939e+00 3.0757113e+00 3.6810325e+00 3.3436507e+00 3.6551334e+00 2.3937418e+00 3.6262929e+00 2.8653098e+00 2.7604347e+00 3.1352831e+00 3.1032241e+00 3.6000000e+00 2.5199206e+00 3.4885527e+00 3.3570821e+00 2.9410882e+00 3.7080992e+00 2.8460499e+00 3.7496667e+00 2.9849623e+00 3.9610605e+00 3.5623026e+00 3.3015148e+00 3.4684290e+00 3.9230090e+00 4.1170378e+00 3.4380227e+00 2.4515301e+00 2.7982137e+00 2.6851443e+00 2.8301943e+00 4.0509258e+00 3.3451457e+00 3.3970576e+00 3.7749172e+00 3.5468296e+00 2.9240383e+00 2.9899833e+00 3.2649655e+00 3.4899857e+00 2.9512709e+00 2.4351591e+00 3.0967725e+00 2.9899833e+00 3.0495901e+00 3.2403703e+00 2.1307276e+00 2.9899833e+00 5.1672043e+00 4.1352146e+00 5.1672043e+00 4.5836667e+00 4.9416596e+00 5.9497899e+00 3.5846897e+00 5.4990908e+00 4.9446941e+00 5.4836119e+00 4.2296572e+00 4.4204072e+00 4.7275787e+00 4.1340053e+00 4.3428102e+00 4.5066617e+00 4.5265881e+00 6.0671245e+00 6.3671030e+00 4.0841156e+00 4.9859803e+00 3.9623226e+00 6.0704201e+00 4.0149720e+00 4.8342528e+00 5.1643005e+00 3.8820098e+00 3.9051248e+00 4.7370877e+00 4.9547957e+00 5.4101756e+00 5.8326666e+00 4.7780749e+00 4.0570926e+00 4.4833024e+00 5.6409219e+00 4.7686476e+00 4.4866469e+00 3.7986840e+00 4.6626173e+00 4.8959167e+00 4.5044423e+00 4.1352146e+00 5.1254268e+00 5.0049975e+00 4.5332108e+00 4.1928511e+00 4.3428102e+00 4.5299007e+00 4.0459857e+00 4.0422766e+00 3.6428011e+00 4.1940434e+00 3.0364453e+00 3.7986840e+00 3.4000000e+00 3.8131352e+00 2.2516660e+00 3.7643060e+00 2.8442925e+00 2.5961510e+00 3.2295511e+00 3.1000000e+00 3.7013511e+00 2.5632011e+00 3.6565011e+00 3.4278273e+00 2.9883106e+00 3.7349699e+00 2.8390139e+00 3.8652296e+00 3.0708305e+00 4.0336088e+00 3.6537652e+00 3.4263683e+00 3.6180105e+00 4.0607881e+00 4.2649736e+00 3.5298725e+00 2.4556058e+00 2.7622455e+00 2.6438608e+00 2.8722813e+00 4.1243181e+00 3.3985291e+00 3.5468296e+00 3.9382737e+00 3.5916570e+00 2.9916551e+00 2.9765752e+00 3.2771939e+00 3.6027767e+00 2.9816103e+00 2.2912878e+00 3.1256999e+00 3.0692019e+00 3.1144823e+00 3.3496268e+00 2.0049938e+00 3.0397368e+00 5.3047149e+00 4.1928511e+00 5.3254108e+00 4.6957428e+00 5.0695167e+00 6.1237244e+00 3.5369478e+00 5.6586217e+00 5.0447993e+00 5.6841886e+00 4.3806392e+00 4.5188494e+00 4.8733972e+00 4.1629317e+00 4.4068129e+00 4.6465041e+00 4.6593991e+00 6.2952363e+00 6.5145990e+00 4.1060930e+00 5.1497573e+00 4.0124805e+00 6.2345810e+00 4.1060930e+00 4.9989999e+00 5.3450912e+00 3.9761791e+00 4.0137264e+00 4.8435524e+00 5.1234754e+00 5.5668663e+00 6.0745370e+00 4.8836462e+00 4.1617304e+00 4.5585085e+00 5.8206529e+00 4.9173163e+00 4.6227697e+00 3.9000000e+00 4.8228622e+00 5.0408333e+00 4.6636895e+00 4.1928511e+00 5.2829916e+00 5.1643005e+00 4.6722586e+00 4.2638011e+00 4.4743715e+00 4.6754679e+00 4.1412558e+00 6.4031242e-01 2.6457513e-01 1.8867962e+00 6.5574385e-01 1.3784049e+00 7.3484692e-01 2.6776856e+00 5.1961524e-01 2.0322401e+00 2.6532998e+00 1.2288206e+00 1.6278821e+00 9.4868330e-01 1.8083141e+00 4.3588989e-01 1.4317821e+00 1.4866069e+00 1.3000000e+00 1.7832555e+00 1.1747340e+00 1.2124356e+00 1.0148892e+00 1.0049876e+00 7.8740079e-01 5.3851648e-01 4.5825757e-01 5.5677644e-01 1.0677078e+00 1.9104973e+00 1.9467922e+00 2.0124612e+00 1.5394804e+00 1.2041595e+00 1.6278821e+00 1.0583005e+00 3.3166248e-01 1.1832160e+00 1.5394804e+00 1.8000000e+00 1.6552945e+00 9.2736185e-01 1.5264338e+00 2.6324893e+00 1.5716234e+00 1.4212670e+00 1.4282857e+00 9.4868330e-01 2.6608269e+00 1.4899664e+00 1.8439089e+00 1.4491377e+00 1.4071247e+00 1.2449900e+00 1.4628739e+00 2.1213203e+00 2.2427661e+00 1.7029386e+00 1.3964240e+00 1.8357560e+00 8.7749644e-01 1.1045361e+00 1.1000000e+00 1.6217275e+00 1.6613248e+00 1.2369317e+00 1.0440307e+00 2.3430749e+00 2.5495098e+00 1.4491377e+00 1.3490738e+00 1.5874508e+00 2.2383029e+00 9.6953597e-01 1.2609520e+00 1.3747727e+00 9.8488578e-01 1.0246951e+00 1.3490738e+00 1.1532563e+00 1.5905974e+00 2.1023796e+00 1.4035669e+00 9.0553851e-01 1.4071247e+00 1.8165902e+00 1.5297059e+00 1.0816654e+00 1.1000000e+00 1.0000000e+00 1.3820275e+00 9.9498744e-01 1.4491377e+00 1.5132746e+00 1.5198684e+00 1.0908712e+00 1.1489125e+00 9.4868330e-01 1.4071247e+00 1.2529964e+00 6.4807407e-01 1.3820275e+00 4.2426407e-01 8.3066239e-01 2.6457513e-01 2.1400935e+00 4.2426407e-01 1.4352700e+00 2.1563859e+00 6.1644140e-01 1.2884099e+00 4.7958315e-01 1.2569805e+00 3.4641016e-01 8.2462113e-01 1.0099505e+00 1.0198039e+00 1.2845233e+00 6.5574385e-01 7.3484692e-01 8.1240384e-01 6.1644140e-01 4.1231056e-01 3.1622777e-01 6.4807407e-01 6.4807407e-01 5.0000000e-01 1.4491377e+00 1.4491377e+00 1.5297059e+00 1.0295630e+00 8.8317609e-01 1.0198039e+00 4.5825757e-01 3.7416574e-01 9.3273791e-01 9.3808315e-01 1.2609520e+00 1.1269428e+00 3.8729833e-01 1.0295630e+00 2.1118712e+00 1.0099505e+00 8.4261498e-01 8.4261498e-01 4.5825757e-01 2.1424285e+00 9.2195445e-01 1.8083141e+00 1.0630146e+00 1.6881943e+00 1.1832160e+00 1.4933185e+00 2.5000000e+00 1.6673332e+00 2.0566964e+00 1.5362291e+00 2.0880613e+00 7.8740079e-01 1.0246951e+00 1.2489996e+00 1.2165525e+00 1.3000000e+00 1.1313708e+00 1.0677078e+00 2.7166155e+00 2.9068884e+00 1.1874342e+00 1.5264338e+00 1.1000000e+00 2.6343880e+00 7.1414284e-01 1.3784049e+00 1.7262677e+00 6.1644140e-01 6.1644140e-01 1.3152946e+00 1.5427249e+00 1.9697716e+00 2.5436195e+00 1.3638182e+00 7.2801099e-01 1.2922848e+00 2.2203603e+00 1.4387495e+00 1.0488088e+00 6.1644140e-01 1.1958261e+00 1.4560220e+00 1.1224972e+00 1.0630146e+00 1.6613248e+00 1.5937377e+00 1.1224972e+00 9.5393920e-01 8.8881944e-01 1.2369317e+00 8.6023253e-01 1.8574176e+00 5.8309519e-01 1.3152946e+00 6.7082039e-01 2.7018512e+00 5.0990195e-01 2.0149442e+00 2.6514147e+00 1.2247449e+00 1.6370706e+00 8.5440037e-01 1.8601075e+00 5.4772256e-01 1.3638182e+00 1.5033296e+00 1.2083046e+00 1.7916473e+00 1.0535654e+00 1.2569805e+00 8.4852814e-01 9.2736185e-01 8.3066239e-01 6.0000000e-01 3.4641016e-01 3.1622777e-01 1.0049876e+00 1.9748418e+00 1.9544820e+00 2.0346990e+00 1.5684387e+00 1.0099505e+00 1.5556349e+00 1.0344080e+00 2.8284271e-01 1.1357817e+00 1.5427249e+00 1.7804494e+00 1.5968719e+00 8.6602540e-01 1.5362291e+00 2.6570661e+00 1.5427249e+00 1.4247807e+00 1.4177447e+00 9.6436508e-01 2.7147744e+00 1.4866069e+00 1.6155494e+00 1.2529964e+00 1.1874342e+00 9.8994949e-01 1.2124356e+00 1.9364917e+00 2.1354157e+00 1.5000000e+00 1.1401754e+00 1.6673332e+00 6.7823300e-01 8.5440037e-01 8.6023253e-01 1.4352700e+00 1.4662878e+00 1.0295630e+00 7.8740079e-01 2.2045408e+00 2.3515952e+00 1.2767145e+00 1.1357817e+00 1.4247807e+00 2.0542639e+00 7.8102497e-01 1.0392305e+00 1.1832160e+00 8.2462113e-01 8.6023253e-01 1.0908712e+00 9.5916630e-01 1.3928388e+00 1.9974984e+00 1.1489125e+00 7.0000000e-01 1.1789826e+00 1.6522712e+00 1.3228757e+00 8.3666003e-01 9.5916630e-01 7.8102497e-01 1.1575837e+00 8.2462113e-01 1.2529964e+00 1.2884099e+00 1.3114877e+00 8.8317609e-01 9.4339811e-01 7.1414284e-01 1.2124356e+00 1.0677078e+00 1.2845233e+00 7.3484692e-01 1.4899664e+00 9.7467943e-01 1.3892444e+00 5.1961524e-01 8.2462113e-01 8.5440037e-01 5.9160798e-01 1.1045361e+00 7.2801099e-01 1.5000000e+00 8.8881944e-01 5.9160798e-01 8.8881944e-01 3.1622777e-01 1.3638182e+00 7.8102497e-01 1.2369317e+00 1.0535654e+00 1.1224972e+00 1.3674794e+00 1.6093477e+00 1.7578396e+00 9.4868330e-01 6.8556546e-01 3.0000000e-01 4.3588989e-01 5.1961524e-01 1.3076697e+00 8.8881944e-01 1.3416408e+00 1.6155494e+00 8.9442719e-01 7.1414284e-01 2.0000000e-01 5.0990195e-01 1.1045361e+00 4.3588989e-01 9.1104336e-01 4.5825757e-01 7.6157731e-01 6.6332496e-01 9.6953597e-01 1.1135529e+00 5.4772256e-01 2.6608269e+00 1.3490738e+00 2.7018512e+00 1.9519221e+00 2.3537205e+00 3.5071356e+00 9.0000000e-01 3.0232433e+00 2.2293497e+00 3.2295511e+00 1.8734994e+00 1.7378147e+00 2.2516660e+00 1.2529964e+00 1.6613248e+00 2.0760539e+00 1.9974984e+00 3.8974351e+00 3.7868192e+00 1.1401754e+00 2.5806976e+00 1.2489996e+00 3.5874782e+00 1.3638182e+00 2.4433583e+00 2.8195744e+00 1.2767145e+00 1.3820275e+00 2.0639767e+00 2.5903668e+00 2.9376862e+00 3.7762415e+00 2.1047565e+00 1.4628739e+00 1.7378147e+00 3.2771939e+00 2.3706539e+00 1.9874607e+00 1.2767145e+00 2.2803509e+00 2.4186773e+00 2.1931712e+00 1.3490738e+00 2.6664583e+00 2.6019224e+00 2.0904545e+00 1.4282857e+00 1.8493242e+00 2.1587033e+00 1.4525839e+00 8.3066239e-01 5.5677644e-01 2.1587033e+00 2.4494897e-01 1.4832397e+00 2.0856654e+00 7.4833148e-01 1.1045361e+00 4.3588989e-01 1.3638182e+00 4.2426407e-01 9.2736185e-01 1.0000000e+00 6.7823300e-01 1.2449900e+00 8.0622577e-01 7.4833148e-01 4.6904158e-01 5.0990195e-01 3.8729833e-01 3.1622777e-01 3.7416574e-01 5.2915026e-01 5.1961524e-01 1.4628739e+00 1.4000000e+00 1.4899664e+00 1.0392305e+00 7.2111026e-01 1.1224972e+00 7.9372539e-01 3.7416574e-01 6.0827625e-01 1.0677078e+00 1.2206556e+00 1.0816654e+00 4.5825757e-01 9.8994949e-01 2.1071308e+00 1.0099505e+00 9.6436508e-01 9.2195445e-01 4.7958315e-01 2.1840330e+00 9.6436508e-01 1.8027756e+00 9.5393920e-01 1.5652476e+00 1.0677078e+00 1.4035669e+00 2.3685439e+00 1.6431677e+00 1.9052559e+00 1.2884099e+00 2.0928450e+00 8.1240384e-01 8.1853528e-01 1.1401754e+00 1.0677078e+00 1.2449900e+00 1.1401754e+00 9.6953597e-01 2.7092434e+00 2.7221315e+00 8.7749644e-01 1.4730920e+00 1.0723805e+00 2.4698178e+00 4.7958315e-01 1.3638182e+00 1.6431677e+00 4.6904158e-01 6.1644140e-01 1.1704700e+00 1.4071247e+00 1.7944358e+00 2.5396850e+00 1.2247449e+00 5.3851648e-01 1.1000000e+00 2.0904545e+00 1.4866069e+00 1.0000000e+00 6.4807407e-01 1.1180340e+00 1.3928388e+00 1.0677078e+00 9.5393920e-01 1.6062378e+00 1.5811388e+00 1.0392305e+00 6.7082039e-01 8.0622577e-01 1.3152946e+00 8.6023253e-01 8.6023253e-01 1.5264338e+00 9.1104336e-01 7.9372539e-01 1.4899664e+00 4.5825757e-01 8.8881944e-01 4.6904158e-01 9.1104336e-01 1.0535654e+00 3.0000000e-01 5.1961524e-01 8.0622577e-01 7.0710678e-01 7.3484692e-01 6.4031242e-01 8.0622577e-01 4.5825757e-01 7.3484692e-01 9.3273791e-01 1.1445523e+00 1.2041595e+00 3.7416574e-01 1.0630146e+00 8.5440037e-01 9.6436508e-01 6.2449980e-01 7.4161985e-01 4.1231056e-01 7.3484692e-01 1.0816654e+00 7.8740079e-01 4.5825757e-01 6.1644140e-01 3.1622777e-01 4.6904158e-01 5.5677644e-01 1.5066519e+00 3.3166248e-01 3.7416574e-01 3.1622777e-01 5.4772256e-01 1.6552945e+00 4.0000000e-01 2.0736441e+00 8.6023253e-01 2.1447611e+00 1.3527749e+00 1.7832555e+00 2.9495762e+00 9.4339811e-01 2.4617067e+00 1.7406895e+00 2.6248809e+00 1.2845233e+00 1.2247449e+00 1.7000000e+00 9.1104336e-01 1.2569805e+00 1.5132746e+00 1.3892444e+00 3.2634338e+00 3.2863353e+00 8.6023253e-01 2.0099751e+00 8.1240384e-01 3.0545049e+00 8.8317609e-01 1.8248288e+00 2.2158520e+00 7.6811457e-01 7.8102497e-01 1.5297059e+00 2.0174241e+00 2.4103942e+00 3.1527766e+00 1.5842980e+00 8.7177979e-01 1.1916375e+00 2.7568098e+00 1.7720045e+00 1.3527749e+00 6.8556546e-01 1.7262677e+00 1.8734994e+00 1.7000000e+00 8.6023253e-01 2.0808652e+00 2.0322401e+00 1.5905974e+00 1.0295630e+00 1.2884099e+00 1.5556349e+00 8.3066239e-01 2.2561028e+00 5.9160798e-01 1.5000000e+00 2.2759613e+00 7.1414284e-01 1.4662878e+00 4.8989795e-01 1.3964240e+00 5.7445626e-01 7.9372539e-01 1.1532563e+00 1.1269428e+00 1.4212670e+00 4.6904158e-01 9.3273791e-01 8.3066239e-01 6.7082039e-01 6.4807407e-01 5.5677644e-01 7.4161985e-01 5.9160798e-01 5.4772256e-01 1.6278821e+00 1.5842980e+00 1.6763055e+00 1.1874342e+00 7.8102497e-01 9.7467943e-01 3.7416574e-01 4.5825757e-01 1.0862780e+00 1.0148892e+00 1.3638182e+00 1.1747340e+00 4.2426407e-01 1.1789826e+00 2.2383029e+00 1.0908712e+00 9.2736185e-01 9.2736185e-01 6.4807407e-01 2.2847319e+00 1.0295630e+00 1.5811388e+00 9.2736185e-01 1.5556349e+00 1.0049876e+00 1.3038405e+00 2.3748684e+00 1.6278821e+00 1.9390719e+00 1.4317821e+00 1.9157244e+00 6.0827625e-01 9.0553851e-01 1.1090537e+00 1.1180340e+00 1.1401754e+00 9.3273791e-01 9.0000000e-01 2.5632011e+00 2.7892651e+00 1.1832160e+00 1.3638182e+00 9.6953597e-01 2.5238859e+00 6.6332496e-01 1.1874342e+00 1.5968719e+00 5.5677644e-01 4.5825757e-01 1.1489125e+00 1.4525839e+00 1.8734994e+00 2.4207437e+00 1.1958261e+00 6.4807407e-01 1.1747340e+00 2.1213203e+00 1.2083046e+00 8.5440037e-01 4.7958315e-01 1.0677078e+00 1.2845233e+00 1.0246951e+00 9.2736185e-01 1.4798649e+00 1.4035669e+00 9.9498744e-01 9.0553851e-01 7.3484692e-01 1.0000000e+00 6.7082039e-01 2.2181073e+00 8.3666003e-01 4.5825757e-01 1.5556349e+00 1.3190906e+00 1.9519221e+00 9.5916630e-01 2.2583180e+00 1.5937377e+00 1.2409674e+00 1.8493242e+00 9.3273791e-01 2.1283797e+00 1.4764823e+00 2.1863211e+00 1.8973666e+00 1.8947295e+00 2.1494185e+00 2.4859606e+00 2.6419690e+00 1.7748239e+00 8.4852814e-01 7.8740079e-01 7.2111026e-01 1.1401754e+00 2.2135944e+00 1.5165751e+00 2.0024984e+00 2.4372115e+00 1.8083141e+00 1.2569805e+00 9.7467943e-01 1.2845233e+00 1.9104973e+00 1.1747340e+00 1.4142136e-01 1.2165525e+00 1.3601471e+00 1.3379088e+00 1.7406895e+00 3.8729833e-01 1.2369317e+00 3.5085610e+00 2.2248595e+00 3.6290495e+00 2.8530685e+00 3.2572995e+00 4.4440972e+00 1.3928388e+00 3.9560081e+00 3.1843367e+00 4.1012193e+00 2.7276363e+00 2.6739484e+00 3.1654384e+00 2.1307276e+00 2.4839485e+00 2.9291637e+00 2.8982753e+00 4.7749346e+00 4.7465777e+00 2.0952327e+00 3.4770677e+00 2.0518285e+00 4.5343136e+00 2.2912878e+00 3.3196385e+00 3.7229021e+00 2.1771541e+00 2.2360680e+00 2.9849623e+00 3.5014283e+00 3.8807216e+00 4.6443514e+00 3.0232433e+00 2.3685439e+00 2.6324893e+00 4.2107007e+00 3.1953091e+00 2.8670542e+00 2.1118712e+00 3.1796226e+00 3.3136083e+00 3.0692019e+00 2.2248595e+00 3.5637059e+00 3.4727511e+00 2.9832868e+00 2.3811762e+00 2.7440845e+00 2.9647934e+00 2.2891046e+00 1.5811388e+00 2.1610183e+00 8.3666003e-01 1.1401754e+00 5.1961524e-01 1.4142136e+00 3.1622777e-01 1.0295630e+00 1.0099505e+00 8.3666003e-01 1.3000000e+00 9.3273791e-01 7.8740079e-01 6.1644140e-01 5.2915026e-01 3.6055513e-01 2.4494897e-01 3.1622777e-01 5.8309519e-01 6.4031242e-01 1.4832397e+00 1.4628739e+00 1.5362291e+00 1.0862780e+00 8.6023253e-01 1.2247449e+00 8.4261498e-01 3.1622777e-01 7.0000000e-01 1.1224972e+00 1.3152946e+00 1.1618950e+00 5.1961524e-01 1.0488088e+00 2.1679483e+00 1.0954451e+00 9.9498744e-01 9.8488578e-01 5.0000000e-01 2.2383029e+00 1.0344080e+00 1.9104973e+00 1.1357817e+00 1.6093477e+00 1.1575837e+00 1.5066519e+00 2.3769729e+00 1.7944358e+00 1.9052559e+00 1.3638182e+00 2.1307276e+00 9.1651514e-01 9.6436508e-01 1.2247449e+00 1.2727922e+00 1.4525839e+00 1.2727922e+00 1.0392305e+00 2.6907248e+00 2.7549955e+00 1.0246951e+00 1.5459625e+00 1.2609520e+00 2.4738634e+00 6.8556546e-01 1.4212670e+00 1.6309506e+00 6.7823300e-01 7.7459667e-01 1.3000000e+00 1.3784049e+00 1.8055470e+00 2.4959968e+00 1.3638182e+00 6.2449980e-01 1.1618950e+00 2.1142375e+00 1.5968719e+00 1.0677078e+00 8.1240384e-01 1.1874342e+00 1.5033296e+00 1.1747340e+00 1.1357817e+00 1.6792856e+00 1.6792856e+00 1.1747340e+00 8.7749644e-01 9.3273791e-01 1.4317821e+00 1.0000000e+00 9.2195445e-01 8.2462113e-01 1.0295630e+00 1.2206556e+00 5.4772256e-01 1.6309506e+00 7.8740079e-01 7.4833148e-01 1.2727922e+00 5.3851648e-01 1.3076697e+00 9.1651514e-01 1.5033296e+00 1.2247449e+00 1.2845233e+00 1.5165751e+00 1.8384776e+00 1.9078784e+00 1.0246951e+00 7.6157731e-01 5.2915026e-01 6.1644140e-01 6.3245553e-01 1.4560220e+00 7.0710678e-01 1.2369317e+00 1.7492856e+00 1.2767145e+00 5.4772256e-01 3.8729833e-01 6.2449980e-01 1.1789826e+00 6.4807407e-01 8.4852814e-01 5.0990195e-01 6.8556546e-01 6.2449980e-01 1.1000000e+00 9.7467943e-01 5.5677644e-01 2.6814175e+00 1.4317821e+00 2.8618176e+00 2.0736441e+00 2.4556058e+00 3.6918830e+00 7.6157731e-01 3.2202484e+00 2.4617067e+00 3.2954514e+00 1.9339080e+00 1.9104973e+00 2.3874673e+00 1.3638182e+00 1.6763055e+00 2.1118712e+00 2.1213203e+00 3.9924930e+00 4.0087405e+00 1.4525839e+00 2.6814175e+00 1.2369317e+00 3.8026307e+00 1.5394804e+00 2.5179357e+00 2.9698485e+00 1.4071247e+00 1.4352700e+00 2.1977261e+00 2.7820855e+00 3.1527766e+00 3.8871583e+00 2.2315914e+00 1.6340135e+00 1.9261360e+00 3.4626579e+00 2.3643181e+00 2.0784610e+00 1.3038405e+00 2.4062419e+00 2.5099801e+00 2.3021729e+00 1.4317821e+00 2.7604347e+00 2.6570661e+00 2.2000000e+00 1.6462078e+00 1.9570386e+00 2.1330729e+00 1.4764823e+00 1.5968719e+00 1.1357817e+00 1.9026298e+00 1.1269428e+00 2.2516660e+00 1.6155494e+00 1.2206556e+00 1.6522712e+00 8.8317609e-01 2.1400935e+00 1.4798649e+00 2.0371549e+00 1.8248288e+00 1.8708287e+00 2.1283797e+00 2.3937418e+00 2.5748786e+00 1.7492856e+00 9.2195445e-01 7.1414284e-01 6.7082039e-01 1.1532563e+00 2.1000000e+00 1.5524175e+00 2.0784610e+00 2.4062419e+00 1.6370706e+00 1.3453624e+00 9.1651514e-01 1.2083046e+00 1.8920888e+00 1.1357817e+00 3.6055513e-01 1.1958261e+00 1.4212670e+00 1.3711309e+00 1.7262677e+00 7.2111026e-01 1.2569805e+00 3.4467376e+00 2.1213203e+00 3.5185224e+00 2.7477263e+00 3.1591138e+00 4.3104524e+00 1.3228757e+00 3.8183766e+00 3.0116441e+00 4.0509258e+00 2.6925824e+00 2.5495098e+00 3.0740852e+00 1.9974984e+00 2.4083189e+00 2.8861739e+00 2.8089144e+00 4.7127487e+00 4.5716518e+00 1.8814888e+00 3.4029399e+00 1.9899749e+00 4.3783559e+00 2.1863211e+00 3.2603681e+00 3.6290495e+00 2.1000000e+00 2.1931712e+00 2.8670542e+00 3.3896903e+00 3.7376463e+00 4.5891176e+00 2.9068884e+00 2.2671568e+00 2.4779023e+00 4.0914545e+00 3.1654384e+00 2.7946377e+00 2.0808652e+00 3.1048349e+00 3.2357379e+00 3.0116441e+00 2.1213203e+00 3.4828150e+00 3.4161382e+00 2.9103264e+00 2.2360680e+00 2.6720778e+00 2.9495762e+00 2.2383029e+00 9.6953597e-01 5.5677644e-01 7.0710678e-01 8.3666003e-01 4.2426407e-01 6.0000000e-01 9.0553851e-01 7.6811457e-01 7.0000000e-01 4.0000000e-01 9.4868330e-01 6.4807407e-01 5.5677644e-01 7.3484692e-01 1.1045361e+00 1.1489125e+00 3.3166248e-01 9.6953597e-01 9.1651514e-01 1.0099505e+00 5.2915026e-01 9.5916630e-01 5.8309519e-01 5.1961524e-01 9.4868330e-01 8.5440037e-01 3.7416574e-01 7.0000000e-01 6.7082039e-01 4.5825757e-01 5.4772256e-01 1.5362291e+00 4.6904158e-01 3.6055513e-01 3.0000000e-01 3.8729833e-01 1.5779734e+00 3.6055513e-01 2.1189620e+00 1.0344080e+00 2.1656408e+00 1.4899664e+00 1.8466185e+00 3.0016662e+00 1.1747340e+00 2.5436195e+00 1.8814888e+00 2.5806976e+00 1.2083046e+00 1.3076697e+00 1.6911535e+00 1.0862780e+00 1.2922848e+00 1.4628739e+00 1.4628739e+00 3.2588341e+00 3.3660065e+00 1.1357817e+00 1.9824228e+00 9.3273791e-01 3.1272992e+00 9.1104336e-01 1.8275667e+00 2.2494444e+00 7.6157731e-01 7.8740079e-01 1.6155494e+00 2.0639767e+00 2.4617067e+00 3.1192948e+00 1.6552945e+00 1.0049876e+00 1.4730920e+00 2.7367864e+00 1.7578396e+00 1.4282857e+00 6.7823300e-01 1.6763055e+00 1.8493242e+00 1.5684387e+00 1.0344080e+00 2.0928450e+00 1.9949937e+00 1.5099669e+00 1.1000000e+00 1.2688578e+00 1.5264338e+00 9.4868330e-01 1.0723805e+00 9.4868330e-01 1.2727922e+00 1.1401754e+00 5.4772256e-01 7.3484692e-01 5.1961524e-01 1.5132746e+00 6.7823300e-01 1.1135529e+00 9.4868330e-01 9.1104336e-01 1.1489125e+00 1.3416408e+00 1.6186414e+00 9.9498744e-01 7.0710678e-01 5.8309519e-01 6.1644140e-01 5.8309519e-01 1.3490738e+00 1.2247449e+00 1.4317821e+00 1.4282857e+00 5.9160798e-01 9.4868330e-01 6.5574385e-01 7.8102497e-01 1.0816654e+00 4.8989795e-01 1.2247449e+00 7.3484692e-01 9.0000000e-01 8.4261498e-01 8.4261498e-01 1.3820275e+00 7.4161985e-01 2.7477263e+00 1.5198684e+00 2.5826343e+00 1.9442222e+00 2.3600847e+00 3.3421550e+00 1.4282857e+00 2.8478062e+00 2.1118712e+00 3.1717503e+00 1.8601075e+00 1.7058722e+00 2.1771541e+00 1.4764823e+00 1.8894444e+00 2.1307276e+00 1.9442222e+00 3.7656341e+00 3.6262929e+00 1.1180340e+00 2.5278449e+00 1.5264338e+00 3.3970576e+00 1.3379088e+00 2.4083189e+00 2.6608269e+00 1.2961481e+00 1.4491377e+00 2.0712315e+00 2.3832751e+00 2.7459060e+00 3.5958309e+00 2.1260292e+00 1.3820275e+00 1.7000000e+00 3.1032241e+00 2.4596748e+00 1.9646883e+00 1.3856406e+00 2.1886069e+00 2.4124676e+00 2.1260292e+00 1.5198684e+00 2.6343880e+00 2.6153394e+00 2.0639767e+00 1.4106736e+00 1.8248288e+00 2.2649503e+00 1.5811388e+00 1.2124356e+00 7.0000000e-01 5.5677644e-01 8.0622577e-01 7.4161985e-01 1.0677078e+00 5.4772256e-01 7.1414284e-01 5.0000000e-01 2.2360680e-01 5.0990195e-01 5.9160798e-01 7.1414284e-01 7.4161985e-01 2.4494897e-01 1.3601471e+00 1.2288206e+00 1.3304135e+00 9.0000000e-01 5.0000000e-01 7.4161985e-01 5.8309519e-01 6.4031242e-01 7.0710678e-01 7.9372539e-01 1.0099505e+00 7.6157731e-01 1.4142136e-01 8.4261498e-01 1.9209373e+00 7.4161985e-01 6.7823300e-01 6.4807407e-01 4.2426407e-01 2.0346990e+00 7.3484692e-01 1.7606817e+00 7.3484692e-01 1.7146428e+00 1.0049876e+00 1.4212670e+00 2.5219040e+00 1.3152946e+00 2.0396078e+00 1.3747727e+00 2.2068076e+00 8.7749644e-01 8.6023253e-01 1.2767145e+00 8.7749644e-01 1.1224972e+00 1.1618950e+00 9.8488578e-01 2.8301943e+00 2.8809721e+00 7.7459667e-01 1.5937377e+00 8.1240384e-01 2.6324893e+00 5.2915026e-01 1.4177447e+00 1.7748239e+00 4.3588989e-01 4.5825757e-01 1.1832160e+00 1.5716234e+00 1.9773720e+00 2.7018512e+00 1.2449900e+00 4.6904158e-01 9.4868330e-01 2.3108440e+00 1.4491377e+00 9.6436508e-01 4.3588989e-01 1.2884099e+00 1.4866069e+00 1.2845233e+00 7.3484692e-01 1.6822604e+00 1.6522712e+00 1.1958261e+00 7.3484692e-01 8.8317609e-01 1.2489996e+00 6.0827625e-01 1.3784049e+00 9.2736185e-01 6.4807407e-01 1.3038405e+00 5.3851648e-01 1.3674794e+00 6.4807407e-01 1.5427249e+00 1.2165525e+00 1.0630146e+00 1.2884099e+00 1.7029386e+00 1.8275667e+00 1.0049876e+00 4.4721360e-01 5.8309519e-01 6.0000000e-01 4.2426407e-01 1.5937377e+00 9.4868330e-01 1.1445523e+00 1.5811388e+00 1.2206556e+00 5.0990195e-01 5.7445626e-01 8.6602540e-01 1.1269428e+00 5.4772256e-01 9.4868330e-01 6.3245553e-01 6.2449980e-01 6.0827625e-01 9.2195445e-01 9.0000000e-01 5.1961524e-01 2.8017851e+00 1.6401219e+00 2.8618176e+00 2.1771541e+00 2.5436195e+00 3.6945906e+00 1.2727922e+00 3.2295511e+00 2.5416530e+00 3.2771939e+00 1.9078784e+00 1.9824228e+00 2.3874673e+00 1.6186414e+00 1.8734994e+00 2.1494185e+00 2.1633308e+00 3.9547440e+00 4.0484565e+00 1.6278821e+00 2.6814175e+00 1.4798649e+00 3.8105118e+00 1.5716234e+00 2.5337719e+00 2.9427878e+00 1.4352700e+00 1.4832397e+00 2.3000000e+00 2.7386128e+00 3.1400637e+00 3.7986840e+00 2.3366643e+00 1.6703293e+00 2.0856654e+00 3.4161382e+00 2.4392622e+00 2.1307276e+00 1.3638182e+00 2.3685439e+00 2.5416530e+00 2.2315914e+00 1.6401219e+00 2.7964263e+00 2.6870058e+00 2.1863211e+00 1.7233688e+00 1.9672316e+00 2.2022716e+00 1.6124515e+00 1.1135529e+00 1.1045361e+00 1.0392305e+00 1.3820275e+00 9.8488578e-01 7.8740079e-01 8.8317609e-01 7.6157731e-01 3.8729833e-01 1.4142136e-01 5.0990195e-01 6.7823300e-01 7.4161985e-01 1.4899664e+00 1.5427249e+00 1.6062378e+00 1.1224972e+00 1.0862780e+00 1.3114877e+00 7.9372539e-01 3.1622777e-01 9.0000000e-01 1.1489125e+00 1.4035669e+00 1.3152946e+00 6.4031242e-01 1.1224972e+00 2.2135944e+00 1.1916375e+00 1.0440307e+00 1.0440307e+00 5.5677644e-01 2.2293497e+00 1.0908712e+00 1.9924859e+00 1.3076697e+00 1.7058722e+00 1.3416408e+00 1.6278821e+00 2.4799194e+00 1.9235384e+00 2.0420578e+00 1.5748016e+00 2.1447611e+00 9.4868330e-01 1.1445523e+00 1.3114877e+00 1.4422205e+00 1.5459625e+00 1.3114877e+00 1.1916375e+00 2.7239677e+00 2.8827071e+00 1.2922848e+00 1.5968719e+00 1.3820275e+00 2.5961510e+00 8.5440037e-01 1.4899664e+00 1.7262677e+00 8.1240384e-01 8.8317609e-01 1.4525839e+00 1.5033296e+00 1.9287302e+00 2.5079872e+00 1.5033296e+00 8.6602540e-01 1.4317821e+00 2.1702534e+00 1.6401219e+00 1.2083046e+00 9.0553851e-01 1.2369317e+00 1.5620499e+00 1.1575837e+00 1.3076697e+00 1.7549929e+00 1.7146428e+00 1.2083046e+00 1.0630146e+00 1.0246951e+00 1.4662878e+00 1.1401754e+00 7.3484692e-01 1.0000000e+00 8.7749644e-01 5.5677644e-01 7.6157731e-01 9.4868330e-01 6.4807407e-01 8.5440037e-01 1.0099505e+00 1.2569805e+00 1.2247449e+00 4.1231056e-01 1.1916375e+00 1.0099505e+00 1.1224972e+00 7.6157731e-01 7.8740079e-01 2.0000000e-01 5.7445626e-01 1.1224972e+00 1.0148892e+00 4.4721360e-01 7.4161985e-01 5.1961524e-01 5.1961524e-01 7.3484692e-01 1.5937377e+00 4.6904158e-01 4.3588989e-01 3.8729833e-01 6.7082039e-01 1.7058722e+00 5.0000000e-01 1.9570386e+00 8.0622577e-01 2.1377558e+00 1.3416408e+00 1.7291616e+00 2.9614186e+00 8.8317609e-01 2.4959968e+00 1.8000000e+00 2.5455844e+00 1.2083046e+00 1.2369317e+00 1.6733201e+00 8.7177979e-01 1.1180340e+00 1.4000000e+00 1.3784049e+00 3.2218007e+00 3.3120990e+00 1.0246951e+00 1.9519221e+00 6.7082039e-01 3.0886890e+00 9.1104336e-01 1.7606817e+00 2.2226111e+00 7.6157731e-01 7.0710678e-01 1.5000000e+00 2.0639767e+00 2.4494897e+00 3.1288976e+00 1.5427249e+00 9.4339811e-01 1.2767145e+00 2.7586228e+00 1.6340135e+00 1.3190906e+00 5.8309519e-01 1.6941074e+00 1.8000000e+00 1.6431677e+00 8.0622577e-01 2.0199010e+00 1.9339080e+00 1.5297059e+00 1.0723805e+00 1.2449900e+00 1.4035669e+00 7.3484692e-01 9.0553851e-01 3.6055513e-01 1.1789826e+00 4.4721360e-01 1.0862780e+00 7.0710678e-01 7.2801099e-01 9.8994949e-01 1.2884099e+00 1.4832397e+00 7.0000000e-01 6.1644140e-01 5.2915026e-01 5.8309519e-01 2.8284271e-01 1.1832160e+00 8.1240384e-01 1.0246951e+00 1.2569805e+00 7.6811457e-01 4.6904158e-01 4.7958315e-01 4.7958315e-01 7.6811457e-01 2.4494897e-01 1.2000000e+00 3.7416574e-01 3.8729833e-01 3.8729833e-01 5.7445626e-01 1.3228757e+00 3.3166248e-01 2.5436195e+00 1.3453624e+00 2.4959968e+00 1.7832555e+00 2.2158520e+00 3.2848135e+00 1.2247449e+00 2.7874720e+00 2.0928450e+00 3.0033315e+00 1.6552945e+00 1.6155494e+00 2.0639767e+00 1.3638182e+00 1.7233688e+00 1.9339080e+00 1.7832555e+00 3.6083237e+00 3.6262929e+00 1.1618950e+00 2.3895606e+00 1.3000000e+00 3.3734256e+00 1.2369317e+00 2.2226111e+00 2.5416530e+00 1.1401754e+00 1.2083046e+00 1.9570386e+00 2.3021729e+00 2.7166155e+00 3.4510868e+00 2.0149442e+00 1.2288206e+00 1.5842980e+00 3.0643107e+00 2.2248595e+00 1.7663522e+00 1.1224972e+00 2.0663978e+00 2.2759613e+00 2.0149442e+00 1.3453624e+00 2.4859606e+00 2.4454039e+00 1.9493589e+00 1.3820275e+00 1.6703293e+00 2.0074860e+00 1.3190906e+00 9.8488578e-01 1.1269428e+00 8.1240384e-01 5.0990195e-01 7.0710678e-01 7.8102497e-01 9.0553851e-01 9.0553851e-01 1.0862780e+00 7.2801099e-01 1.2884099e+00 1.0862780e+00 1.1916375e+00 9.2736185e-01 8.1240384e-01 1.1313708e+00 1.2206556e+00 1.0488088e+00 2.6457513e-01 1.0954451e+00 9.3273791e-01 8.6602540e-01 8.1853528e-01 8.1240384e-01 1.7720045e+00 8.6023253e-01 1.0344080e+00 9.3273791e-01 7.5498344e-01 1.9261360e+00 9.0000000e-01 2.1142375e+00 9.6436508e-01 1.9416488e+00 1.3416408e+00 1.7058722e+00 2.7147744e+00 1.3490738e+00 2.2427661e+00 1.4560220e+00 2.5534291e+00 1.3038405e+00 1.0440307e+00 1.5362291e+00 9.1651514e-01 1.3000000e+00 1.5231546e+00 1.3490738e+00 3.1843367e+00 2.9681644e+00 5.3851648e-01 1.8894444e+00 1.0630146e+00 2.7748874e+00 7.1414284e-01 1.8055470e+00 2.0832667e+00 7.3484692e-01 9.4868330e-01 1.4035669e+00 1.8275667e+00 2.1260292e+00 3.0512293e+00 1.4491377e+00 8.5440037e-01 1.1789826e+00 2.4677925e+00 1.8627936e+00 1.3928388e+00 9.2736185e-01 1.5716234e+00 1.7549929e+00 1.5165751e+00 9.6436508e-01 1.9899749e+00 1.9748418e+00 1.4212670e+00 7.1414284e-01 1.2124356e+00 1.7000000e+00 1.0862780e+00 1.3711309e+00 6.2449980e-01 1.2845233e+00 9.9498744e-01 1.0000000e+00 1.2609520e+00 1.5588457e+00 1.7406895e+00 9.1651514e-01 4.3588989e-01 1.7320508e-01 2.6457513e-01 3.0000000e-01 1.3747727e+00 9.0000000e-01 1.2569805e+00 1.5394804e+00 9.0553851e-01 5.7445626e-01 2.4494897e-01 5.2915026e-01 1.0392305e+00 2.6457513e-01 8.7749644e-01 4.1231056e-01 6.0000000e-01 5.4772256e-01 8.4852814e-01 1.0295630e+00 4.2426407e-01 2.7386128e+00 1.4696938e+00 2.7386128e+00 2.0074860e+00 2.4248711e+00 3.5411862e+00 1.1000000e+00 3.0495901e+00 2.3043437e+00 3.2511536e+00 1.8841444e+00 1.8110770e+00 2.2912878e+00 1.4247807e+00 1.8055470e+00 2.1283797e+00 2.0273135e+00 3.8923001e+00 3.8548671e+00 1.2727922e+00 2.6191602e+00 1.3784049e+00 3.6262929e+00 1.4212670e+00 2.4677925e+00 2.8195744e+00 1.3228757e+00 1.4106736e+00 2.1494185e+00 2.5826343e+00 2.9681644e+00 3.7469988e+00 2.1977261e+00 1.4764823e+00 1.8000000e+00 3.3075671e+00 2.4248711e+00 2.0124612e+00 1.3076697e+00 2.3021729e+00 2.4799194e+00 2.2203603e+00 1.4696938e+00 2.7147744e+00 2.6551836e+00 2.1424285e+00 1.5297059e+00 1.8867962e+00 2.2045408e+00 1.5066519e+00 1.0440307e+00 8.6602540e-01 7.5498344e-01 9.1651514e-01 9.2195445e-01 1.0630146e+00 8.5440037e-01 5.2915026e-01 1.6522712e+00 1.5132746e+00 1.6278821e+00 1.1958261e+00 6.2449980e-01 6.8556546e-01 4.2426407e-01 8.6602540e-01 1.1747340e+00 9.3273791e-01 1.2409674e+00 1.0198039e+00 5.2915026e-01 1.1704700e+00 2.1236761e+00 9.7467943e-01 8.9442719e-01 8.6023253e-01 8.2462113e-01 2.2045408e+00 9.6953597e-01 1.4491377e+00 6.0000000e-01 1.6673332e+00 9.4339811e-01 1.2489996e+00 2.5019992e+00 1.2609520e+00 2.0736441e+00 1.4594520e+00 2.0074860e+00 7.0000000e-01 8.7177979e-01 1.1958261e+00 7.8102497e-01 7.8740079e-01 8.6602540e-01 9.4339811e-01 2.7147744e+00 2.8740216e+00 1.0677078e+00 1.4352700e+00 5.4772256e-01 2.6551836e+00 6.4807407e-01 1.2449900e+00 1.7691806e+00 5.0000000e-01 3.0000000e-01 1.0677078e+00 1.6643317e+00 2.0273135e+00 2.6381812e+00 1.1000000e+00 7.0710678e-01 1.0954451e+00 2.2847319e+00 1.0954451e+00 8.6602540e-01 2.2360680e-01 1.2083046e+00 1.2845233e+00 1.1618950e+00 6.0000000e-01 1.5066519e+00 1.3964240e+00 1.0440307e+00 8.3666003e-01 7.7459667e-01 8.6023253e-01 3.6055513e-01 9.8994949e-01 7.0710678e-01 4.3588989e-01 6.7823300e-01 1.0677078e+00 1.2489996e+00 5.5677644e-01 7.3484692e-01 7.7459667e-01 8.3666003e-01 3.4641016e-01 1.1489125e+00 9.0553851e-01 8.4261498e-01 9.8994949e-01 6.7082039e-01 5.4772256e-01 6.7082039e-01 7.5498344e-01 6.4031242e-01 3.7416574e-01 1.4282857e+00 5.4772256e-01 5.0000000e-01 4.5825757e-01 3.3166248e-01 1.4594520e+00 4.1231056e-01 2.3937418e+00 1.2922848e+00 2.3000000e+00 1.6911535e+00 2.0615528e+00 3.1128765e+00 1.3928388e+00 2.6438608e+00 1.9849433e+00 2.7748874e+00 1.4212670e+00 1.4662878e+00 1.8493242e+00 1.3190906e+00 1.5842980e+00 1.7146428e+00 1.6431677e+00 3.4146742e+00 3.4655447e+00 1.1874342e+00 2.1656408e+00 1.2449900e+00 3.2155870e+00 1.0535654e+00 2.0346990e+00 2.3706539e+00 9.4868330e-01 1.0488088e+00 1.8138357e+00 2.1400935e+00 2.5416530e+00 3.2388269e+00 1.8601075e+00 1.1357817e+00 1.6155494e+00 2.8301943e+00 2.0420578e+00 1.6370706e+00 9.6953597e-01 1.8248288e+00 2.0542639e+00 1.7146428e+00 1.2922848e+00 2.2934690e+00 2.2226111e+00 1.6852300e+00 1.2206556e+00 1.4594520e+00 1.8248288e+00 1.2409674e+00 5.0990195e-01 7.5498344e-01 7.7459667e-01 6.0000000e-01 6.7823300e-01 6.4031242e-01 1.6062378e+00 1.4212670e+00 1.5297059e+00 1.1747340e+00 4.2426407e-01 1.1045361e+00 1.0344080e+00 7.4833148e-01 5.7445626e-01 1.1916375e+00 1.2206556e+00 9.9498744e-01 6.2449980e-01 1.0770330e+00 2.1307276e+00 1.0295630e+00 1.0908712e+00 1.0246951e+00 7.5498344e-01 2.2825424e+00 1.0630146e+00 1.6881943e+00 7.0000000e-01 1.5000000e+00 8.6023253e-01 1.2609520e+00 2.2781571e+00 1.4696938e+00 1.7916473e+00 1.0295630e+00 2.1118712e+00 9.0553851e-01 6.0827625e-01 1.1045361e+00 7.8740079e-01 1.0908712e+00 1.1401754e+00 8.6023253e-01 2.7166155e+00 2.5709920e+00 4.3588989e-01 1.4594520e+00 9.1104336e-01 2.3537205e+00 3.6055513e-01 1.3416408e+00 1.6124515e+00 4.4721360e-01 6.1644140e-01 9.7467943e-01 1.3711309e+00 1.7029386e+00 2.5980762e+00 1.0392305e+00 3.6055513e-01 7.4161985e-01 2.0712315e+00 1.4525839e+00 9.0553851e-01 6.6332496e-01 1.1532563e+00 1.3490738e+00 1.1832160e+00 7.0000000e-01 1.5427249e+00 1.5620499e+00 1.0677078e+00 4.1231056e-01 7.9372539e-01 1.3076697e+00 7.3484692e-01 5.1961524e-01 6.4807407e-01 7.3484692e-01 8.6023253e-01 3.8729833e-01 1.2961481e+00 1.1575837e+00 1.2489996e+00 8.6023253e-01 5.8309519e-01 8.1240384e-01 7.5498344e-01 7.3484692e-01 6.2449980e-01 8.1240384e-01 9.7467943e-01 7.0000000e-01 3.0000000e-01 7.8740079e-01 1.8601075e+00 7.2111026e-01 6.7082039e-01 6.5574385e-01 4.3588989e-01 1.9974984e+00 7.2801099e-01 1.9157244e+00 8.6602540e-01 1.8138357e+00 1.1045361e+00 1.5524175e+00 2.5903668e+00 1.3490738e+00 2.0904545e+00 1.4212670e+00 2.3452079e+00 1.0583005e+00 9.7467943e-01 1.4071247e+00 9.8994949e-01 1.3000000e+00 1.3490738e+00 1.0954451e+00 2.9257478e+00 2.9410882e+00 7.4161985e-01 1.7349352e+00 9.6436508e-01 2.6832816e+00 6.7082039e-01 1.5556349e+00 1.8493242e+00 6.1644140e-01 6.6332496e-01 1.3076697e+00 1.6186414e+00 2.0346990e+00 2.7874720e+00 1.3784049e+00 5.3851648e-01 9.4339811e-01 2.4020824e+00 1.6278821e+00 1.0862780e+00 6.4807407e-01 1.4247807e+00 1.6431677e+00 1.4491377e+00 8.6602540e-01 1.8165902e+00 1.8165902e+00 1.3638182e+00 8.4261498e-01 1.0440307e+00 1.4387495e+00 7.7459667e-01 2.6457513e-01 6.5574385e-01 8.6602540e-01 4.8989795e-01 1.1445523e+00 1.1618950e+00 1.2288206e+00 7.5498344e-01 9.6436508e-01 1.0440307e+00 7.3484692e-01 5.7445626e-01 6.1644140e-01 8.3066239e-01 1.0295630e+00 9.5916630e-01 4.4721360e-01 7.4161985e-01 1.8466185e+00 8.3066239e-01 7.2111026e-01 7.0710678e-01 2.0000000e-01 1.8920888e+00 7.3484692e-01 2.1213203e+00 1.1832160e+00 1.9235384e+00 1.3964240e+00 1.7549929e+00 2.7166155e+00 1.6155494e+00 2.2494444e+00 1.6583124e+00 2.4103942e+00 1.1090537e+00 1.1832160e+00 1.5000000e+00 1.2767145e+00 1.4899664e+00 1.4456832e+00 1.3076697e+00 3.0116441e+00 3.0886890e+00 1.0862780e+00 1.8165902e+00 1.2247449e+00 2.8195744e+00 8.1240384e-01 1.6881943e+00 1.9672316e+00 7.4161985e-01 8.4261498e-01 1.5297059e+00 1.7291616e+00 2.1470911e+00 2.8213472e+00 1.5842980e+00 8.3666003e-01 1.3711309e+00 2.4372115e+00 1.7776389e+00 1.3152946e+00 8.1853528e-01 1.4628739e+00 1.7406895e+00 1.3892444e+00 1.1832160e+00 1.9519221e+00 1.9104973e+00 1.3820275e+00 1.0099505e+00 1.1489125e+00 1.5811388e+00 1.0723805e+00 4.8989795e-01 6.7823300e-01 6.2449980e-01 1.3928388e+00 1.4212670e+00 1.4899664e+00 1.0099505e+00 9.8994949e-01 1.2083046e+00 7.5498344e-01 3.4641016e-01 7.6811457e-01 1.0488088e+00 1.2767145e+00 1.1874342e+00 5.3851648e-01 1.0000000e+00 2.1023796e+00 1.0677078e+00 9.4339811e-01 9.3273791e-01 4.3588989e-01 2.1330729e+00 9.7467943e-01 1.9874607e+00 1.2124356e+00 1.7291616e+00 1.3038405e+00 1.6155494e+00 2.5159491e+00 1.8000000e+00 2.0663978e+00 1.5427249e+00 2.1954498e+00 9.4868330e-01 1.0908712e+00 1.3190906e+00 1.3341664e+00 1.4730920e+00 1.3038405e+00 1.1747340e+00 2.7892651e+00 2.9034462e+00 1.1704700e+00 1.6217275e+00 1.2845233e+00 2.6267851e+00 7.6811457e-01 1.5099669e+00 1.7663522e+00 7.2111026e-01 8.1240384e-01 1.4177447e+00 1.5362291e+00 1.9544820e+00 2.5865034e+00 1.4696938e+00 7.9372539e-01 1.3601471e+00 2.2158520e+00 1.6401219e+00 1.1916375e+00 8.2462113e-01 1.2609520e+00 1.5684387e+00 1.1832160e+00 1.2124356e+00 1.7720045e+00 1.7320508e+00 1.2083046e+00 9.7467943e-01 1.0049876e+00 1.4594520e+00 1.0677078e+00 4.2426407e-01 8.6602540e-01 1.7606817e+00 1.7146428e+00 1.7944358e+00 1.3638182e+00 8.8317609e-01 1.4491377e+00 1.0630146e+00 3.4641016e-01 8.1853528e-01 1.4071247e+00 1.5588457e+00 1.3892444e+00 7.5498344e-01 1.3114877e+00 2.4289916e+00 1.3490738e+00 1.2845233e+00 1.2609520e+00 7.9372539e-01 2.5119713e+00 1.3076697e+00 1.7748239e+00 1.1618950e+00 1.3527749e+00 1.0295630e+00 1.3304135e+00 2.1000000e+00 1.9697716e+00 1.6340135e+00 1.1224972e+00 1.9235384e+00 8.3666003e-01 8.1853528e-01 1.0099505e+00 1.3038405e+00 1.4456832e+00 1.1747340e+00 8.8317609e-01 2.4617067e+00 2.4637370e+00 1.0246951e+00 1.3379088e+00 1.3453624e+00 2.1863211e+00 6.5574385e-01 1.2489996e+00 1.3856406e+00 7.2111026e-01 8.3666003e-01 1.1357817e+00 1.1135529e+00 1.5165751e+00 2.2649503e+00 1.2000000e+00 5.9160798e-01 1.0816654e+00 1.8303005e+00 1.5000000e+00 9.4868330e-01 9.1651514e-01 9.7467943e-01 1.3190906e+00 1.0000000e+00 1.1618950e+00 1.4764823e+00 1.5099669e+00 1.0099505e+00 7.9372539e-01 8.0622577e-01 1.3747727e+00 1.0488088e+00 8.8881944e-01 1.9748418e+00 1.8973666e+00 1.9949937e+00 1.5362291e+00 7.7459667e-01 1.4071247e+00 9.5393920e-01 3.7416574e-01 1.0816654e+00 1.4764823e+00 1.6881943e+00 1.4866069e+00 7.8102497e-01 1.4899664e+00 2.6000000e+00 1.4491377e+00 1.3747727e+00 1.3453624e+00 9.5393920e-01 2.6776856e+00 1.4177447e+00 1.3747727e+00 9.7467943e-01 1.0630146e+00 7.3484692e-01 9.6436508e-01 1.8788294e+00 1.9339080e+00 1.4387495e+00 9.4868330e-01 1.5684387e+00 4.2426407e-01 5.5677644e-01 6.4807407e-01 1.1575837e+00 1.1618950e+00 7.6157731e-01 5.4772256e-01 2.1863211e+00 2.2649503e+00 1.0816654e+00 9.6436508e-01 1.1618950e+00 2.0049938e+00 5.1961524e-01 8.6023253e-01 1.1401754e+00 5.8309519e-01 6.1644140e-01 8.0622577e-01 9.4868330e-01 1.3341664e+00 2.0322401e+00 8.6023253e-01 5.0000000e-01 9.8488578e-01 1.6031220e+00 1.0816654e+00 6.0000000e-01 7.3484692e-01 6.0827625e-01 9.2736185e-01 6.4807407e-01 9.7467943e-01 1.1045361e+00 1.1045361e+00 6.3245553e-01 6.7082039e-01 4.1231056e-01 9.6436508e-01 8.1240384e-01 1.1958261e+00 1.0723805e+00 1.1789826e+00 7.2801099e-01 6.4031242e-01 6.0827625e-01 5.0990195e-01 7.5498344e-01 7.0710678e-01 6.0827625e-01 8.3666003e-01 6.6332496e-01 2.0000000e-01 6.8556546e-01 1.7464249e+00 5.7445626e-01 5.2915026e-01 4.6904158e-01 3.4641016e-01 1.8384776e+00 5.4772256e-01 1.8708287e+00 7.7459667e-01 1.8814888e+00 1.1789826e+00 1.5620499e+00 2.7092434e+00 1.1874342e+00 2.2405357e+00 1.5588457e+00 2.3430749e+00 9.7467943e-01 1.0000000e+00 1.4177447e+00 8.6602540e-01 1.1045361e+00 1.2369317e+00 1.1618950e+00 3.0049958e+00 3.0626786e+00 8.6023253e-01 1.7262677e+00 7.6157731e-01 2.8266588e+00 6.1644140e-01 1.5652476e+00 1.9672316e+00 4.7958315e-01 5.1961524e-01 1.3190906e+00 1.7748239e+00 2.1656408e+00 2.8774989e+00 1.3674794e+00 6.7823300e-01 1.1489125e+00 2.4698178e+00 1.5362291e+00 1.1357817e+00 4.3588989e-01 1.4212670e+00 1.5968719e+00 1.3601471e+00 7.7459667e-01 1.8248288e+00 1.7578396e+00 1.2767145e+00 8.1240384e-01 1.0000000e+00 1.3190906e+00 6.8556546e-01 4.2426407e-01 3.4641016e-01 4.6904158e-01 1.7378147e+00 1.2247449e+00 1.4456832e+00 1.7146428e+00 1.1618950e+00 7.8740079e-01 6.2449980e-01 9.4339811e-01 1.3000000e+00 5.4772256e-01 7.8740079e-01 7.7459667e-01 8.3066239e-01 8.1853528e-01 1.0344080e+00 7.9372539e-01 7.0000000e-01 3.0577770e+00 1.8411953e+00 3.0149627e+00 2.3452079e+00 2.7440845e+00 3.8196859e+00 1.4628739e+00 3.3361655e+00 2.6343880e+00 3.5014283e+00 2.1354157e+00 2.1330729e+00 2.5651511e+00 1.8055470e+00 2.1377558e+00 2.4041631e+00 2.3323808e+00 4.1376322e+00 4.1533119e+00 1.6583124e+00 2.8861739e+00 1.7349352e+00 3.9089641e+00 1.7233688e+00 2.7459060e+00 3.0822070e+00 1.6186414e+00 1.7088007e+00 2.4799194e+00 2.8390139e+00 3.2403703e+00 3.9610605e+00 2.5258662e+00 1.7916473e+00 2.1748563e+00 3.5510562e+00 2.7147744e+00 2.3194827e+00 1.6062378e+00 2.5514702e+00 2.7604347e+00 2.4372115e+00 1.8411953e+00 3.0033315e+00 2.9291637e+00 2.3958297e+00 1.8520259e+00 2.1656408e+00 2.4879711e+00 1.8439089e+00 1.4142136e-01 4.4721360e-01 1.5099669e+00 1.0099505e+00 1.4106736e+00 1.7029386e+00 1.0246951e+00 7.0710678e-01 3.0000000e-01 6.4031242e-01 1.2041595e+00 4.2426407e-01 7.2111026e-01 5.4772256e-01 7.5498344e-01 7.0000000e-01 1.0148892e+00 9.0000000e-01 5.7445626e-01 2.8722813e+00 1.5842980e+00 2.8861739e+00 2.1494185e+00 2.5632011e+00 3.6891733e+00 1.1045361e+00 3.1984371e+00 2.4372115e+00 3.4029399e+00 2.0346990e+00 1.9467922e+00 2.4372115e+00 1.5165751e+00 1.9052559e+00 2.2671568e+00 2.1771541e+00 4.0521599e+00 3.9912404e+00 1.3747727e+00 2.7658633e+00 1.4798649e+00 3.7709415e+00 1.5588457e+00 2.6191602e+00 2.9765752e+00 1.4628739e+00 1.5556349e+00 2.2825424e+00 2.7386128e+00 3.1144823e+00 3.9102430e+00 2.3280893e+00 1.6278821e+00 1.9313208e+00 3.4539832e+00 2.5632011e+00 2.1633308e+00 1.4491377e+00 2.4515301e+00 2.6191602e+00 2.3622024e+00 1.5842980e+00 2.8600699e+00 2.7964263e+00 2.2803509e+00 1.6522712e+00 2.0322401e+00 2.3430749e+00 1.6431677e+00 5.0990195e-01 1.6309506e+00 1.1224972e+00 1.5000000e+00 1.7832555e+00 1.1090537e+00 7.8740079e-01 4.3588989e-01 7.5498344e-01 1.3000000e+00 5.0990195e-01 6.4807407e-01 6.6332496e-01 8.3066239e-01 7.9372539e-01 1.0908712e+00 8.1853528e-01 6.7082039e-01 2.9983329e+00 1.7175564e+00 2.9949958e+00 2.2671568e+00 2.6851443e+00 3.7934153e+00 1.2247449e+00 3.3000000e+00 2.5495098e+00 3.5128336e+00 2.1447611e+00 2.0663978e+00 2.5495098e+00 1.6552945e+00 2.0420578e+00 2.3874673e+00 2.2891046e+00 4.1521079e+00 4.1000000e+00 1.4933185e+00 2.8792360e+00 1.6155494e+00 3.8729833e+00 1.6763055e+00 2.7313001e+00 3.0757113e+00 1.5811388e+00 1.6733201e+00 2.4062419e+00 2.8319605e+00 3.2155870e+00 4.0012498e+00 2.4535688e+00 1.7349352e+00 2.0420578e+00 3.5566838e+00 2.6851443e+00 2.2759613e+00 1.5684387e+00 2.5592968e+00 2.7386128e+00 2.4698178e+00 1.7175564e+00 2.9765752e+00 2.9154759e+00 2.3958297e+00 1.7748239e+00 2.1470911e+00 2.4637370e+00 1.7663522e+00 1.2806248e+00 8.3666003e-01 1.0246951e+00 1.3038405e+00 8.1853528e-01 4.2426407e-01 3.8729833e-01 5.9160798e-01 8.4261498e-01 1.4142136e-01 1.0954451e+00 3.7416574e-01 4.3588989e-01 3.8729833e-01 6.0827625e-01 1.1618950e+00 2.6457513e-01 2.5903668e+00 1.3892444e+00 2.5670995e+00 1.8814888e+00 2.2781571e+00 3.3808283e+00 1.2083046e+00 2.9000000e+00 2.1954498e+00 3.0495901e+00 1.6792856e+00 1.6763055e+00 2.1118712e+00 1.3784049e+00 1.7000000e+00 1.9442222e+00 1.8708287e+00 3.6959437e+00 3.7188708e+00 1.2609520e+00 2.4310492e+00 1.3000000e+00 3.4785054e+00 1.2688578e+00 2.2847319e+00 2.6419690e+00 1.1575837e+00 1.2409674e+00 2.0174241e+00 2.4124676e+00 2.8106939e+00 3.5369478e+00 2.0639767e+00 1.3379088e+00 1.7406895e+00 3.1224990e+00 2.2516660e+00 1.8547237e+00 1.1401754e+00 2.1047565e+00 2.3021729e+00 2.0049938e+00 1.3892444e+00 2.5416530e+00 2.4698178e+00 1.9493589e+00 1.4106736e+00 1.7058722e+00 2.0273135e+00 1.3784049e+00 9.0553851e-01 9.2195445e-01 9.0553851e-01 9.1104336e-01 1.1575837e+00 1.2609520e+00 9.5393920e-01 6.2449980e-01 1.1916375e+00 2.1817424e+00 1.0295630e+00 1.0723805e+00 1.0148892e+00 9.0000000e-01 2.3473389e+00 1.0908712e+00 1.4387495e+00 3.6055513e-01 1.4798649e+00 6.4807407e-01 1.0908712e+00 2.2693611e+00 1.2727922e+00 1.7916473e+00 1.0295630e+00 2.0149442e+00 8.1240384e-01 5.3851648e-01 1.0677078e+00 5.4772256e-01 8.3066239e-01 9.6953597e-01 7.3484692e-01 2.6495283e+00 2.5748786e+00 5.1961524e-01 1.3820275e+00 6.0827625e-01 2.3706539e+00 4.1231056e-01 1.2083046e+00 1.5937377e+00 4.2426407e-01 4.2426407e-01 8.1853528e-01 1.4212670e+00 1.7492856e+00 2.5826343e+00 8.8317609e-01 3.3166248e-01 5.5677644e-01 2.1142375e+00 1.2124356e+00 7.2111026e-01 4.6904158e-01 1.1445523e+00 1.2409674e+00 1.2083046e+00 3.6055513e-01 1.4212670e+00 1.4212670e+00 1.0392305e+00 4.7958315e-01 7.1414284e-01 1.0535654e+00 3.7416574e-01 7.2801099e-01 1.3190906e+00 1.1618950e+00 4.8989795e-01 7.4161985e-01 5.1961524e-01 7.1414284e-01 8.1240384e-01 1.5297059e+00 5.0990195e-01 5.1961524e-01 4.7958315e-01 8.5440037e-01 1.6583124e+00 5.7445626e-01 2.0371549e+00 8.7749644e-01 2.2825424e+00 1.4560220e+00 1.8411953e+00 3.1000000e+00 7.3484692e-01 2.6362853e+00 1.9287302e+00 2.6758176e+00 1.3638182e+00 1.3747727e+00 1.8220867e+00 9.1651514e-01 1.1704700e+00 1.5231546e+00 1.5165751e+00 3.3555923e+00 3.4423829e+00 1.1180340e+00 2.0904545e+00 7.0000000e-01 3.2280025e+00 1.0723805e+00 1.8920888e+00 2.3706539e+00 9.2736185e-01 8.6023253e-01 1.6155494e+00 2.2226111e+00 2.6000000e+00 3.2787193e+00 1.6552945e+00 1.1000000e+00 1.3674794e+00 2.9137605e+00 1.7291616e+00 1.4491377e+00 7.3484692e-01 1.8520259e+00 1.9287302e+00 1.8055470e+00 8.7749644e-01 2.1447611e+00 2.0542639e+00 1.6792856e+00 1.2124356e+00 1.3964240e+00 1.5000000e+00 8.3666003e-01 7.9372539e-01 1.1832160e+00 7.5498344e-01 1.1832160e+00 1.0295630e+00 4.6904158e-01 1.0440307e+00 2.0024984e+00 9.1104336e-01 7.0710678e-01 7.2111026e-01 6.4807407e-01 2.0297783e+00 8.3666003e-01 1.7776389e+00 9.8994949e-01 1.8920888e+00 1.2609520e+00 1.5684387e+00 2.7166155e+00 1.4247807e+00 2.2847319e+00 1.7406895e+00 2.2022716e+00 9.0000000e-01 1.1747340e+00 1.4317821e+00 1.1445523e+00 1.1832160e+00 1.1532563e+00 1.2041595e+00 2.8722813e+00 3.1272992e+00 1.3038405e+00 1.6673332e+00 9.1651514e-01 2.8722813e+00 8.8317609e-01 1.4798649e+00 1.9416488e+00 7.2801099e-01 6.0827625e-01 1.4071247e+00 1.8138357e+00 2.2293497e+00 2.7459060e+00 1.4456832e+00 9.0553851e-01 1.3784049e+00 2.4698178e+00 1.3928388e+00 1.1357817e+00 5.3851648e-01 1.4000000e+00 1.5588457e+00 1.3228757e+00 9.8994949e-01 1.7691806e+00 1.6583124e+00 1.2767145e+00 1.1135529e+00 1.0295630e+00 1.1575837e+00 7.5498344e-01 9.6436508e-01 1.2727922e+00 1.5264338e+00 1.3674794e+00 6.2449980e-01 1.2806248e+00 2.3958297e+00 1.2884099e+00 1.1618950e+00 1.1532563e+00 7.0000000e-01 2.4433583e+00 1.2206556e+00 1.7000000e+00 1.1357817e+00 1.4035669e+00 1.0488088e+00 1.3228757e+00 2.1886069e+00 1.9183326e+00 1.7464249e+00 1.2884099e+00 1.8601075e+00 6.7823300e-01 8.7749644e-01 1.0099505e+00 1.3038405e+00 1.3674794e+00 1.0488088e+00 8.8317609e-01 2.4454039e+00 2.5942244e+00 1.1789826e+00 1.3000000e+00 1.2609520e+00 2.3108440e+00 6.7082039e-01 1.1832160e+00 1.4282857e+00 6.6332496e-01 7.0710678e-01 1.1618950e+00 1.2165525e+00 1.6431677e+00 2.2516660e+00 1.2165525e+00 6.4031242e-01 1.1958261e+00 1.9000000e+00 1.3674794e+00 9.0553851e-01 7.7459667e-01 9.4339811e-01 1.2727922e+00 9.1651514e-01 1.1357817e+00 1.4491377e+00 1.4282857e+00 9.4868330e-01 8.7749644e-01 7.4161985e-01 1.2124356e+00 9.4868330e-01 1.0344080e+00 9.1651514e-01 8.6023253e-01 7.6157731e-01 7.1414284e-01 1.7291616e+00 8.3066239e-01 9.4868330e-01 8.7177979e-01 6.1644140e-01 1.8654758e+00 8.3666003e-01 2.2360680e+00 1.1224972e+00 2.0049938e+00 1.4317821e+00 1.8165902e+00 2.7676705e+00 1.4730920e+00 2.2847319e+00 1.5524175e+00 2.6134269e+00 1.3527749e+00 1.1575837e+00 1.6093477e+00 1.1180340e+00 1.4832397e+00 1.6217275e+00 1.4106736e+00 3.2109189e+00 3.0495901e+00 7.0710678e-01 1.9646883e+00 1.2165525e+00 2.8266588e+00 8.1240384e-01 1.8681542e+00 2.1047565e+00 8.1853528e-01 1.0148892e+00 1.5297059e+00 1.8303005e+00 2.1702534e+00 3.0495901e+00 1.5842980e+00 8.8317609e-01 1.2569805e+00 2.5179357e+00 1.9646883e+00 1.4525839e+00 9.9498744e-01 1.6248077e+00 1.8574176e+00 1.5779734e+00 1.1224972e+00 2.0760539e+00 2.0712315e+00 1.5132746e+00 8.7177979e-01 1.2884099e+00 1.7944358e+00 1.1789826e+00 5.1961524e-01 5.1961524e-01 7.1414284e-01 4.6904158e-01 1.2569805e+00 3.1622777e-01 1.7320508e-01 1.7320508e-01 6.4031242e-01 1.3228757e+00 2.2360680e-01 2.3727621e+00 1.2206556e+00 2.4758837e+00 1.7320508e+00 2.1236761e+00 3.3000000e+00 1.0295630e+00 2.8266588e+00 2.1447611e+00 2.8913665e+00 1.5297059e+00 1.5905974e+00 2.0099751e+00 1.2489996e+00 1.5132746e+00 1.7663522e+00 1.7378147e+00 3.5524639e+00 3.6619667e+00 1.2845233e+00 2.3000000e+00 1.0816654e+00 3.4205263e+00 1.2124356e+00 2.1213203e+00 2.5416530e+00 1.0677078e+00 1.0677078e+00 1.8894444e+00 2.3537205e+00 2.7640550e+00 3.4219877e+00 1.9339080e+00 1.2529964e+00 1.6340135e+00 3.0675723e+00 2.0273135e+00 1.6911535e+00 9.4868330e-01 2.0074860e+00 2.1633308e+00 1.9235384e+00 1.2206556e+00 2.3916521e+00 2.3021729e+00 1.8493242e+00 1.3820275e+00 1.5842980e+00 1.7916473e+00 1.1575837e+00 4.2426407e-01 9.8994949e-01 3.3166248e-01 9.3273791e-01 3.0000000e-01 5.8309519e-01 4.8989795e-01 8.6023253e-01 1.0954451e+00 3.7416574e-01 2.5922963e+00 1.3038405e+00 2.6570661e+00 1.9000000e+00 2.3021729e+00 3.4727511e+00 8.7749644e-01 2.9899833e+00 2.2203603e+00 3.1543621e+00 1.7860571e+00 1.7029386e+00 2.1977261e+00 1.2369317e+00 1.6124515e+00 1.9974984e+00 1.9364917e+00 3.8249183e+00 3.7762415e+00 1.1747340e+00 2.5179357e+00 1.1832160e+00 3.5651087e+00 1.3190906e+00 2.3685439e+00 2.7622455e+00 1.2124356e+00 1.2922848e+00 2.0248457e+00 2.5436195e+00 2.9103264e+00 3.7013511e+00 2.0663978e+00 1.4071247e+00 1.7146428e+00 3.2403703e+00 2.2847319e+00 1.9157244e+00 1.1789826e+00 2.2181073e+00 2.3600847e+00 2.1283797e+00 1.3038405e+00 2.6057628e+00 2.5317978e+00 2.0322401e+00 1.4142136e+00 1.7832555e+00 2.0639767e+00 1.3674794e+00 7.7459667e-01 5.0000000e-01 1.2609520e+00 2.6457513e-01 4.8989795e-01 4.2426407e-01 7.7459667e-01 1.4628739e+00 4.2426407e-01 2.3194827e+00 1.0392305e+00 2.4041631e+00 1.5905974e+00 2.0297783e+00 3.1968735e+00 7.9372539e-01 2.7018512e+00 1.9416488e+00 2.9103264e+00 1.5779734e+00 1.4560220e+00 1.9672316e+00 1.0246951e+00 1.4352700e+00 1.7860571e+00 1.6522712e+00 3.5454196e+00 3.5071356e+00 9.2736185e-01 2.2847319e+00 9.6953597e-01 3.2878564e+00 1.1224972e+00 2.1047565e+00 2.4839485e+00 1.0246951e+00 1.0630146e+00 1.7606817e+00 2.2737634e+00 2.6514147e+00 3.4409301e+00 1.8138357e+00 1.1224972e+00 1.3564660e+00 3.0166206e+00 2.0396078e+00 1.6217275e+00 9.6436508e-01 2.0049938e+00 2.1377558e+00 1.9773720e+00 1.0392305e+00 2.3473389e+00 2.3043437e+00 1.8574176e+00 1.2247449e+00 1.5620499e+00 1.8275667e+00 1.0816654e+00 8.0622577e-01 1.8841444e+00 7.1414284e-01 6.0000000e-01 5.8309519e-01 3.4641016e-01 1.9748418e+00 6.7823300e-01 1.8165902e+00 8.2462113e-01 1.7832555e+00 1.1000000e+00 1.4966630e+00 2.5961510e+00 1.3379088e+00 2.1213203e+00 1.4866069e+00 2.2427661e+00 9.0000000e-01 9.5916630e-01 1.3379088e+00 9.6436508e-01 1.1747340e+00 1.1958261e+00 1.0630146e+00 2.8722813e+00 2.9698485e+00 9.0553851e-01 1.6431677e+00 8.6023253e-01 2.7147744e+00 6.1644140e-01 1.4662878e+00 1.8357560e+00 5.0000000e-01 5.0000000e-01 1.2727922e+00 1.6401219e+00 2.0566964e+00 2.7349589e+00 1.3304135e+00 5.8309519e-01 1.0770330e+00 2.3706539e+00 1.4832397e+00 1.0344080e+00 4.5825757e-01 1.3341664e+00 1.5394804e+00 1.3076697e+00 8.2462113e-01 1.7406895e+00 1.6941074e+00 1.2369317e+00 8.3666003e-01 9.3808315e-01 1.2727922e+00 6.7082039e-01 1.1224972e+00 3.1622777e-01 4.5825757e-01 3.8729833e-01 5.9160798e-01 1.2288206e+00 2.6457513e-01 2.5357445e+00 1.3076697e+00 2.5039968e+00 1.8055470e+00 2.2113344e+00 3.3120990e+00 1.1489125e+00 2.8266588e+00 2.1023796e+00 3.0099834e+00 1.6431677e+00 1.5968719e+00 2.0542639e+00 1.2884099e+00 1.6401219e+00 1.9026298e+00 1.8055470e+00 3.6523965e+00 3.6373067e+00 1.1357817e+00 2.3811762e+00 1.2369317e+00 3.4029399e+00 1.1958261e+00 2.2360680e+00 2.5845696e+00 1.0954451e+00 1.1916375e+00 1.9416488e+00 2.3494680e+00 2.7386128e+00 3.5000000e+00 1.9899749e+00 1.2609520e+00 1.6401219e+00 3.0643107e+00 2.2113344e+00 1.7944358e+00 1.0954451e+00 2.0566964e+00 2.2494444e+00 1.9697716e+00 1.3076697e+00 2.4859606e+00 2.4248711e+00 1.9026298e+00 1.3228757e+00 1.6522712e+00 1.9924859e+00 1.3190906e+00 1.1916375e+00 1.3527749e+00 1.3228757e+00 1.7000000e+00 3.8729833e-01 1.2124356e+00 3.4971417e+00 2.2022716e+00 3.5874782e+00 2.8248894e+00 3.2295511e+00 4.3988635e+00 1.4071247e+00 3.9102430e+00 3.1336879e+00 4.0767634e+00 2.7018512e+00 2.6324893e+00 3.1272992e+00 2.1023796e+00 2.4677925e+00 2.9086079e+00 2.8670542e+00 4.7476310e+00 4.6936127e+00 2.0371549e+00 3.4452866e+00 2.0420578e+00 4.4833024e+00 2.2472205e+00 3.2954514e+00 3.6851052e+00 2.1400935e+00 2.2135944e+00 2.9512709e+00 3.4554305e+00 3.8288379e+00 4.6119410e+00 2.9899833e+00 2.3302360e+00 2.5980762e+00 4.1605288e+00 3.1859065e+00 2.8425341e+00 2.0928450e+00 3.1416556e+00 3.2832910e+00 3.0298515e+00 2.2022716e+00 3.5355339e+00 3.4496377e+00 2.9461840e+00 2.3302360e+00 2.7110883e+00 2.9580399e+00 2.2759613e+00 3.3166248e-01 2.2360680e-01 6.4031242e-01 1.3304135e+00 1.7320508e-01 2.3515952e+00 1.1000000e+00 2.4228083e+00 1.6552945e+00 2.0663978e+00 3.2388269e+00 8.8317609e-01 2.7549955e+00 2.0149442e+00 2.9017236e+00 1.5362291e+00 1.4866069e+00 1.9646883e+00 1.0862780e+00 1.4387495e+00 1.7606817e+00 1.6852300e+00 3.5608988e+00 3.5651087e+00 1.0440307e+00 2.2781571e+00 9.9498744e-01 3.3406586e+00 1.1090537e+00 2.1118712e+00 2.5099801e+00 9.8994949e-01 1.0392305e+00 1.8027756e+00 2.3021729e+00 2.6870058e+00 3.4394767e+00 1.8493242e+00 1.1618950e+00 1.4933185e+00 3.0182777e+00 2.0371549e+00 1.6552945e+00 9.2736185e-01 1.9824228e+00 2.1307276e+00 1.9131126e+00 1.1000000e+00 2.3622024e+00 2.2934690e+00 1.8165902e+00 1.2369317e+00 1.5459625e+00 1.8138357e+00 1.1135529e+00 1.4142136e-01 5.2915026e-01 1.4352700e+00 2.4494897e-01 2.3194827e+00 1.1832160e+00 2.3790755e+00 1.6401219e+00 2.0493902e+00 3.1906112e+00 1.1090537e+00 2.7092434e+00 2.0420578e+00 2.8124722e+00 1.4594520e+00 1.5099669e+00 1.9261360e+00 1.2369317e+00 1.5165751e+00 1.7175564e+00 1.6401219e+00 3.4481879e+00 3.5580894e+00 1.2083046e+00 2.2226111e+00 1.0862780e+00 3.3060551e+00 1.1401754e+00 2.0371549e+00 2.4269322e+00 1.0049876e+00 1.0049876e+00 1.8165902e+00 2.2293497e+00 2.6514147e+00 3.3105891e+00 1.8681542e+00 1.1401754e+00 1.5231546e+00 2.9698485e+00 1.9798990e+00 1.5968719e+00 9.0000000e-01 1.9235384e+00 2.1000000e+00 1.8627936e+00 1.1832160e+00 2.3130067e+00 2.2427661e+00 1.7916473e+00 1.3190906e+00 1.5099669e+00 1.7492856e+00 1.1000000e+00 5.0990195e-01 1.4142136e+00 1.4142136e-01 2.2803509e+00 1.1045361e+00 2.3452079e+00 1.6031220e+00 2.0049938e+00 3.1654384e+00 1.0246951e+00 2.6870058e+00 1.9924859e+00 2.7910571e+00 1.4247807e+00 1.4491377e+00 1.8841444e+00 1.1357817e+00 1.4282857e+00 1.6703293e+00 1.6093477e+00 3.4452866e+00 3.5185224e+00 1.1224972e+00 2.1863211e+00 1.0000000e+00 3.2787193e+00 1.0677078e+00 2.0124612e+00 2.4145393e+00 9.3273791e-01 9.5393920e-01 1.7606817e+00 2.2158520e+00 2.6210685e+00 3.3136083e+00 1.8083141e+00 1.1045361e+00 1.4899664e+00 2.9359837e+00 1.9442222e+00 1.5716234e+00 8.4261498e-01 1.8867962e+00 2.0518285e+00 1.8138357e+00 1.1045361e+00 2.2781571e+00 2.2022716e+00 1.7349352e+00 1.2328828e+00 1.4628739e+00 1.7146428e+00 1.0535654e+00 1.7606817e+00 5.4772256e-01 2.1213203e+00 1.0954451e+00 2.0049938e+00 1.3964240e+00 1.7776389e+00 2.8106939e+00 1.4317821e+00 2.3366643e+00 1.7058722e+00 2.4839485e+00 1.1445523e+00 1.2000000e+00 1.5652476e+00 1.1789826e+00 1.4212670e+00 1.4594520e+00 1.3379088e+00 3.1032241e+00 3.1780497e+00 1.0295630e+00 1.8814888e+00 1.1045361e+00 2.9171904e+00 8.1240384e-01 1.7349352e+00 2.0566964e+00 7.1414284e-01 7.9372539e-01 1.5427249e+00 1.8303005e+00 2.2472205e+00 2.9325757e+00 1.5968719e+00 8.3666003e-01 1.3416408e+00 2.5495098e+00 1.7776389e+00 1.3304135e+00 7.4161985e-01 1.5427249e+00 1.7860571e+00 1.4730920e+00 1.0954451e+00 2.0024984e+00 1.9519221e+00 1.4387495e+00 1.0099505e+00 1.1832160e+00 1.5684387e+00 9.9498744e-01 1.3038405e+00 3.6110940e+00 2.3622024e+00 3.6959437e+00 2.9748950e+00 3.3555923e+00 4.5232732e+00 1.6278821e+00 4.0472213e+00 3.3000000e+00 4.1460825e+00 2.7694765e+00 2.7676705e+00 3.2233523e+00 2.2737634e+00 2.5845696e+00 2.9849623e+00 2.9916551e+00 4.8321838e+00 4.8394215e+00 2.2494444e+00 3.5298725e+00 2.1817424e+00 4.6206060e+00 2.3622024e+00 3.3896903e+00 3.7934153e+00 2.2427661e+00 2.3130067e+00 3.0886890e+00 3.5707142e+00 3.9534795e+00 4.6797436e+00 3.1224990e+00 2.4698178e+00 2.8035692e+00 4.2497059e+00 3.2710854e+00 2.9647934e+00 2.1886069e+00 3.2186954e+00 3.3719431e+00 3.0740852e+00 2.3622024e+00 3.6373067e+00 3.5284558e+00 3.0149627e+00 2.4657656e+00 2.8035692e+00 3.0364453e+00 2.4062419e+00 2.3790755e+00 1.1747340e+00 2.4248711e+00 1.6941074e+00 2.0928450e+00 3.2465366e+00 1.0246951e+00 2.7676705e+00 2.0566964e+00 2.8861739e+00 1.5132746e+00 1.5165751e+00 1.9621417e+00 1.1789826e+00 1.4899664e+00 1.7578396e+00 1.7000000e+00 3.5454196e+00 3.5888717e+00 1.1401754e+00 2.2715633e+00 1.0677078e+00 3.3541020e+00 1.1224972e+00 2.1095023e+00 2.5039968e+00 9.9498744e-01 1.0440307e+00 1.8384776e+00 2.2956481e+00 2.6925824e+00 3.4088121e+00 1.8841444e+00 1.1832160e+00 1.5684387e+00 3.0066593e+00 2.0445048e+00 1.6703293e+00 9.3273791e-01 1.9646883e+00 2.1330729e+00 1.8788294e+00 1.1747340e+00 2.3685439e+00 2.2912878e+00 1.8027756e+00 1.2727922e+00 1.5427249e+00 1.8165902e+00 1.1532563e+00 1.3341664e+00 9.4868330e-01 9.0000000e-01 5.0990195e-01 1.5165751e+00 2.3430749e+00 1.3190906e+00 1.1532563e+00 9.5393920e-01 1.0535654e+00 1.1045361e+00 8.6602540e-01 1.5000000e+00 1.1489125e+00 7.4161985e-01 9.3273791e-01 1.6703293e+00 1.8165902e+00 1.8165902e+00 7.0710678e-01 1.4832397e+00 1.7175564e+00 1.4352700e+00 6.4031242e-01 1.1445523e+00 1.4798649e+00 1.3527749e+00 7.6157731e-01 1.3228757e+00 1.3527749e+00 1.7944358e+00 7.1414284e-01 1.4352700e+00 1.3784049e+00 1.4491377e+00 4.2426407e-01 8.8881944e-01 1.4525839e+00 9.5916630e-01 6.0827625e-01 1.1180340e+00 1.3341664e+00 5.5677644e-01 5.0000000e-01 9.6436508e-01 1.4142136e+00 1.0099505e+00 6.4807407e-01 1.2449900e+00 1.5684387e+00 7.4161985e-01 1.0770330e+00 2.3706539e+00 1.1180340e+00 1.9339080e+00 1.1618950e+00 2.0322401e+00 8.6602540e-01 6.3245553e-01 1.1357817e+00 2.6457513e-01 5.0990195e-01 9.0000000e-01 8.6602540e-01 2.7331301e+00 2.6495283e+00 6.7823300e-01 1.4071247e+00 3.1622777e-01 2.4879711e+00 5.4772256e-01 1.2529964e+00 1.7406895e+00 5.1961524e-01 4.7958315e-01 8.1240384e-01 1.6217275e+00 1.8894444e+00 2.7055499e+00 8.4261498e-01 6.4807407e-01 7.7459667e-01 2.2045408e+00 1.1135529e+00 8.3066239e-01 4.7958315e-01 1.2247449e+00 1.2124356e+00 1.2369317e+00 0.0000000e+00 1.4317821e+00 1.3747727e+00 1.0344080e+00 5.4772256e-01 7.7459667e-01 9.4868330e-01 3.3166248e-01 9.1104336e-01 6.1644140e-01 8.6023253e-01 2.6851443e+00 5.4772256e-01 7.1414284e-01 7.5498344e-01 1.0246951e+00 9.8994949e-01 5.0000000e-01 1.7406895e+00 1.5684387e+00 9.6436508e-01 7.8102497e-01 1.2845233e+00 1.2489996e+00 1.7378147e+00 4.0000000e-01 1.8165902e+00 1.0246951e+00 1.3490738e+00 5.3851648e-01 3.8729833e-01 1.4662878e+00 1.4456832e+00 7.8740079e-01 5.1961524e-01 4.5825757e-01 1.2409674e+00 7.9372539e-01 1.2961481e+00 1.3190906e+00 6.6332496e-01 9.8994949e-01 8.6602540e-01 1.5842980e+00 5.4772256e-01 5.9160798e-01 8.5440037e-01 1.5684387e+00 4.1231056e-01 6.7082039e-01 8.3066239e-01 1.3190906e+00 9.2736185e-01 1.1224972e+00 1.4730920e+00 5.0000000e-01 1.6703293e+00 1.8275667e+00 1.2206556e+00 6.0000000e-01 1.4282857e+00 6.4807407e-01 3.8729833e-01 6.0000000e-01 9.5916630e-01 9.3273791e-01 6.6332496e-01 2.4494897e-01 2.0346990e+00 1.9974984e+00 1.0148892e+00 8.4261498e-01 1.0148892e+00 1.7944358e+00 7.2801099e-01 6.4807407e-01 1.0295630e+00 8.1240384e-01 7.3484692e-01 3.3166248e-01 9.4868330e-01 1.2165525e+00 2.0124612e+00 4.2426407e-01 5.9160798e-01 5.3851648e-01 1.5716234e+00 7.8102497e-01 2.4494897e-01 8.6023253e-01 7.2801099e-01 7.4833148e-01 9.4868330e-01 7.4161985e-01 8.2462113e-01 9.0553851e-01 7.6157731e-01 7.2801099e-01 5.0000000e-01 7.4161985e-01 6.4807407e-01 1.3638182e+00 2.1794495e+00 1.0295630e+00 6.7082039e-01 1.0148892e+00 7.5498344e-01 6.6332496e-01 4.3588989e-01 1.2529964e+00 1.0295630e+00 5.5677644e-01 5.0000000e-01 1.7000000e+00 1.6792856e+00 1.4212670e+00 4.6904158e-01 1.3038405e+00 1.5264338e+00 1.0488088e+00 3.8729833e-01 8.5440037e-01 1.1357817e+00 1.0630146e+00 3.1622777e-01 9.2195445e-01 1.0148892e+00 1.7320508e+00 3.0000000e-01 1.0295630e+00 1.0000000e+00 1.2409674e+00 5.2915026e-01 5.1961524e-01 1.1874342e+00 5.8309519e-01 3.6055513e-01 8.1853528e-01 1.0770330e+00 3.8729833e-01 4.7958315e-01 6.4031242e-01 1.0099505e+00 6.3245553e-01 6.4807407e-01 1.0049876e+00 3.4799425e+00 5.2915026e-01 1.3379088e+00 9.6436508e-01 1.8734994e+00 1.8055470e+00 1.3601471e+00 2.5357445e+00 2.3706539e+00 1.7916473e+00 1.5842980e+00 8.1853528e-01 5.4772256e-01 2.4738634e+00 1.1747340e+00 2.6343880e+00 2.6457513e-01 2.1817424e+00 1.3076697e+00 8.0622577e-01 2.3086793e+00 2.2869193e+00 1.5748016e+00 1.0246951e+00 6.0827625e-01 8.8317609e-01 1.5779734e+00 2.0832667e+00 1.9748418e+00 5.4772256e-01 1.7146428e+00 1.6583124e+00 2.4269322e+00 1.3928388e+00 1.3820275e+00 1.6703293e+00 2.3706539e+00 1.1000000e+00 1.3674794e+00 1.6763055e+00 2.1307276e+00 1.7832555e+00 1.8973666e+00 2.2869193e+00 3.0282008e+00 2.2226111e+00 3.1144823e+00 1.8708287e+00 1.7233688e+00 2.2405357e+00 9.8994949e-01 1.3228757e+00 1.9339080e+00 1.9544820e+00 3.8236109e+00 3.7376463e+00 1.2609520e+00 2.5079872e+00 9.1104336e-01 3.5860842e+00 1.4730920e+00 2.3409400e+00 2.8354894e+00 1.3711309e+00 1.3638182e+00 1.9261360e+00 2.6907248e+00 2.9899833e+00 3.7934153e+00 1.9493589e+00 1.5652476e+00 1.6583124e+00 3.3181320e+00 2.1142375e+00 1.9026298e+00 1.2489996e+00 2.3086793e+00 2.3021729e+00 2.2538855e+00 1.1180340e+00 2.5337719e+00 2.4413111e+00 2.0832667e+00 1.5000000e+00 1.8411953e+00 1.9157244e+00 1.2727922e+00 8.7749644e-01 1.0148892e+00 1.4866069e+00 1.3638182e+00 9.9498744e-01 2.1095023e+00 2.0149442e+00 1.4662878e+00 1.1357817e+00 1.1357817e+00 9.2736185e-01 1.9899749e+00 9.2736185e-01 2.2135944e+00 6.0827625e-01 1.7320508e+00 9.8488578e-01 4.3588989e-01 1.8627936e+00 1.8466185e+00 1.1832160e+00 5.5677644e-01 2.6457513e-01 1.1045361e+00 1.2124356e+00 1.5937377e+00 1.4764823e+00 6.7823300e-01 1.4491377e+00 1.2206556e+00 1.9874607e+00 1.0488088e+00 1.1180340e+00 1.3747727e+00 1.9339080e+00 8.6602540e-01 1.1704700e+00 1.3527749e+00 1.6911535e+00 1.3784049e+00 1.5874508e+00 1.8466185e+00 1.4282857e+00 1.0295630e+00 6.2449980e-01 6.6332496e-01 1.2961481e+00 1.3228757e+00 1.0392305e+00 6.1644140e-01 1.9131126e+00 1.5716234e+00 1.1445523e+00 8.8881944e-01 1.4662878e+00 1.3928388e+00 1.0049876e+00 8.6023253e-01 8.8317609e-01 1.1575837e+00 1.1916375e+00 5.5677644e-01 7.3484692e-01 8.2462113e-01 1.8788294e+00 6.1644140e-01 9.1104336e-01 7.5498344e-01 1.2609520e+00 1.1704700e+00 7.3484692e-01 1.3190906e+00 8.0622577e-01 8.7177979e-01 1.0677078e+00 1.1618950e+00 8.7177979e-01 1.0677078e+00 9.2736185e-01 9.0000000e-01 8.3066239e-01 1.2124356e+00 1.1747340e+00 1.3784049e+00 1.5652476e+00 1.0198039e+00 2.2181073e+00 1.9000000e+00 1.2165525e+00 1.3038405e+00 8.6023253e-01 1.3892444e+00 2.3685439e+00 6.7082039e-01 2.2113344e+00 1.2247449e+00 1.8841444e+00 8.1240384e-01 8.1240384e-01 1.9544820e+00 1.8708287e+00 1.3000000e+00 1.1224972e+00 1.0198039e+00 9.3273791e-01 1.2727922e+00 1.8574176e+00 1.9157244e+00 8.0622577e-01 1.0535654e+00 1.3190906e+00 1.9949937e+00 9.9498744e-01 8.7177979e-01 1.1747340e+00 2.0322401e+00 6.3245553e-01 7.0710678e-01 1.2083046e+00 1.8947295e+00 1.3820275e+00 1.2529964e+00 1.8814888e+00 5.5677644e-01 5.4772256e-01 1.0677078e+00 9.0000000e-01 3.7416574e-01 4.8989795e-01 2.0976177e+00 2.2649503e+00 1.2288206e+00 7.8102497e-01 1.0049876e+00 2.0396078e+00 6.0827625e-01 6.4807407e-01 1.1575837e+00 6.1644140e-01 5.2915026e-01 6.5574385e-01 1.0862780e+00 1.4071247e+00 2.0024984e+00 6.7823300e-01 6.7082039e-01 1.0630146e+00 1.6031220e+00 7.0000000e-01 4.6904158e-01 6.4807407e-01 5.1961524e-01 6.7823300e-01 5.0990195e-01 8.6602540e-01 9.0553851e-01 8.1240384e-01 4.2426407e-01 7.4161985e-01 2.2360680e-01 5.5677644e-01 6.6332496e-01 5.7445626e-01 7.9372539e-01 8.1240384e-01 6.4031242e-01 3.8729833e-01 2.2248595e+00 2.1023796e+00 8.1240384e-01 9.0553851e-01 9.0553851e-01 1.9157244e+00 4.2426407e-01 8.0622577e-01 1.1789826e+00 5.5677644e-01 5.9160798e-01 3.7416574e-01 1.0344080e+00 1.2845233e+00 2.1633308e+00 4.3588989e-01 4.6904158e-01 6.6332496e-01 1.6062378e+00 9.1651514e-01 4.5825757e-01 7.1414284e-01 6.7823300e-01 7.6811457e-01 7.8102497e-01 6.3245553e-01 9.6436508e-01 9.8488578e-01 5.9160798e-01 3.7416574e-01 3.4641016e-01 8.3666003e-01 6.2449980e-01 1.3114877e+00 1.1357817e+00 5.2915026e-01 4.2426407e-01 1.7029386e+00 1.7233688e+00 1.3747727e+00 3.6055513e-01 1.3601471e+00 1.5165751e+00 8.8881944e-01 3.7416574e-01 7.3484692e-01 9.8994949e-01 9.6953597e-01 4.5825757e-01 7.0710678e-01 8.9442719e-01 1.6340135e+00 4.6904158e-01 9.0000000e-01 1.0723805e+00 1.1000000e+00 7.1414284e-01 5.0990195e-01 1.1045361e+00 1.7320508e-01 3.4641016e-01 4.6904158e-01 1.1357817e+00 4.8989795e-01 5.4772256e-01 3.7416574e-01 8.8881944e-01 4.3588989e-01 7.5498344e-01 1.0295630e+00 5.1961524e-01 1.0770330e+00 1.0862780e+00 2.9359837e+00 2.7766887e+00 6.5574385e-01 1.5842980e+00 3.3166248e-01 2.6419690e+00 6.7082039e-01 1.4628739e+00 1.9442222e+00 6.4807407e-01 6.7823300e-01 9.7467943e-01 1.8165902e+00 2.0493902e+00 2.9137605e+00 9.8994949e-01 8.4261498e-01 9.4339811e-01 2.3558438e+00 1.3000000e+00 1.0677078e+00 6.4807407e-01 1.4035669e+00 1.3711309e+00 1.3784049e+00 2.6457513e-01 1.6124515e+00 1.5427249e+00 1.1747340e+00 6.0827625e-01 9.6436508e-01 1.1445523e+00 5.8309519e-01 7.5498344e-01 1.0246951e+00 2.6851443e+00 2.6267851e+00 1.1045361e+00 1.3190906e+00 4.8989795e-01 2.5159491e+00 8.1240384e-01 1.2288206e+00 1.8138357e+00 7.8102497e-01 7.2801099e-01 8.3666003e-01 1.7691806e+00 1.9519221e+00 2.6944387e+00 8.0622577e-01 1.0295630e+00 1.1747340e+00 2.1587033e+00 9.2736185e-01 9.8488578e-01 7.2801099e-01 1.2165525e+00 1.0723805e+00 1.1445523e+00 5.0990195e-01 1.3453624e+00 1.1958261e+00 9.3273791e-01 7.7459667e-01 8.3666003e-01 7.8740079e-01 6.4031242e-01 5.8309519e-01 2.0049938e+00 2.1470911e+00 1.3747727e+00 6.4031242e-01 1.0246951e+00 1.9748418e+00 8.1853528e-01 5.4772256e-01 1.1747340e+00 8.3666003e-01 7.3484692e-01 5.3851648e-01 1.1916375e+00 1.4000000e+00 1.9773720e+00 5.0990195e-01 9.2195445e-01 1.1618950e+00 1.5394804e+00 3.8729833e-01 5.4772256e-01 8.3666003e-01 5.5677644e-01 4.4721360e-01 5.4772256e-01 9.0000000e-01 7.2111026e-01 5.4772256e-01 3.7416574e-01 8.6602540e-01 3.8729833e-01 3.0000000e-01 7.6157731e-01 1.9183326e+00 1.9519221e+00 1.1090537e+00 7.0000000e-01 1.1180340e+00 1.7204651e+00 7.0000000e-01 5.0990195e-01 8.8317609e-01 7.8740079e-01 7.2111026e-01 3.8729833e-01 7.8740079e-01 1.1045361e+00 1.8574176e+00 4.6904158e-01 5.7445626e-01 7.0000000e-01 1.4317821e+00 7.5498344e-01 1.4142136e-01 8.6023253e-01 5.1961524e-01 6.4807407e-01 7.6157731e-01 8.6602540e-01 7.3484692e-01 8.1240384e-01 6.1644140e-01 7.4161985e-01 3.6055513e-01 7.1414284e-01 7.2111026e-01 1.2206556e+00 2.9715316e+00 1.4177447e+00 2.9478806e+00 1.0198039e+00 2.5632011e+00 1.5033296e+00 1.1224972e+00 2.6495283e+00 2.5690465e+00 1.9773720e+00 1.4352700e+00 1.2409674e+00 4.1231056e-01 1.9748418e+00 2.4515301e+00 2.4186773e+00 1.0049876e+00 1.8357560e+00 1.9442222e+00 2.7018512e+00 1.6822604e+00 1.6552945e+00 1.9235384e+00 2.7331301e+00 1.3490738e+00 1.5297059e+00 1.9748418e+00 2.5748786e+00 2.0904545e+00 2.0273135e+00 2.5690465e+00 2.7018512e+00 1.5620499e+00 2.9223278e+00 4.1231056e-01 2.4939928e+00 1.7233688e+00 1.2922848e+00 2.6362853e+00 2.6400758e+00 1.8601075e+00 1.4525839e+00 9.6436508e-01 1.3490738e+00 1.8520259e+00 2.4248711e+00 2.2494444e+00 8.9442719e-01 2.0736441e+00 2.0371549e+00 2.7766887e+00 1.7832555e+00 1.7175564e+00 2.0322401e+00 2.6495283e+00 1.4730920e+00 1.7233688e+00 2.0124612e+00 2.3958297e+00 2.1400935e+00 2.2671568e+00 2.6248809e+00 1.7146428e+00 8.8317609e-01 2.5278449e+00 6.6332496e-01 1.5968719e+00 1.8788294e+00 7.2801099e-01 8.6602540e-01 1.1135529e+00 1.6522712e+00 1.9209373e+00 2.8948230e+00 1.1704700e+00 6.7823300e-01 7.3484692e-01 2.3194827e+00 1.6431677e+00 1.1445523e+00 8.7749644e-01 1.4628739e+00 1.5716234e+00 1.5066519e+00 6.7823300e-01 1.7578396e+00 1.7860571e+00 1.3453624e+00 5.8309519e-01 1.0862780e+00 1.5099669e+00 8.6602540e-01 1.6062378e+00 1.3747727e+00 1.2247449e+00 3.0000000e-01 6.5574385e-01 1.3076697e+00 1.2529964e+00 6.7823300e-01 7.9372539e-01 8.5440037e-01 1.3928388e+00 6.5574385e-01 1.2328828e+00 1.3490738e+00 9.1651514e-01 6.4807407e-01 7.4161985e-01 1.3820275e+00 3.7416574e-01 2.6457513e-01 6.0827625e-01 1.4071247e+00 2.2360680e-01 3.0000000e-01 5.7445626e-01 1.2247449e+00 7.3484692e-01 7.8740079e-01 1.2845233e+00 2.7658633e+00 7.3484692e-01 1.4525839e+00 1.9924859e+00 6.4031242e-01 5.7445626e-01 1.0677078e+00 1.8894444e+00 2.1656408e+00 2.9223278e+00 1.0816654e+00 8.8317609e-01 1.0677078e+00 2.4454039e+00 1.2247449e+00 1.0630146e+00 5.0000000e-01 1.4282857e+00 1.3964240e+00 1.3820275e+00 3.1622777e-01 1.6401219e+00 1.5329710e+00 1.1958261e+00 7.7459667e-01 9.6953597e-01 1.0295630e+00 4.5825757e-01 2.2912878e+00 1.5033296e+00 9.6953597e-01 2.4289916e+00 2.4248711e+00 1.7058722e+00 1.1224972e+00 6.7823300e-01 1.0630146e+00 1.7146428e+00 2.1840330e+00 2.0420578e+00 7.0000000e-01 1.9209373e+00 1.8055470e+00 2.5651511e+00 1.5588457e+00 1.5684387e+00 1.8384776e+00 2.4879711e+00 1.3038405e+00 1.5811388e+00 1.8384776e+00 2.2248595e+00 1.9313208e+00 2.0952327e+00 2.4248711e+00 1.1180340e+00 1.5066519e+00 1.7320508e-01 3.6055513e-01 7.7459667e-01 1.3228757e+00 1.6340135e+00 2.4617067e+00 8.1853528e-01 3.7416574e-01 8.3666003e-01 1.9339080e+00 1.1575837e+00 7.2801099e-01 4.3588989e-01 9.2736185e-01 1.0816654e+00 9.0000000e-01 5.4772256e-01 1.3228757e+00 1.2845233e+00 7.6811457e-01 2.4494897e-01 5.0990195e-01 1.0000000e+00 5.3851648e-01 6.6332496e-01 1.1832160e+00 1.0862780e+00 5.9160798e-01 7.7459667e-01 9.6953597e-01 1.4798649e+00 6.0000000e-01 1.0630146e+00 1.1618950e+00 1.1357817e+00 5.1961524e-01 5.0990195e-01 1.2165525e+00 4.1231056e-01 3.7416574e-01 6.9282032e-01 1.2529964e+00 3.1622777e-01 4.0000000e-01 6.1644140e-01 1.1532563e+00 6.2449980e-01 6.2449980e-01 1.0862780e+00 1.6124515e+00 1.5684387e+00 1.0246951e+00 3.4641016e-01 4.6904158e-01 1.0246951e+00 1.0583005e+00 1.3674794e+00 1.3747727e+00 7.4161985e-01 1.1704700e+00 9.4868330e-01 1.7088007e+00 7.4161985e-01 8.8317609e-01 1.0770330e+00 1.7406895e+00 6.4807407e-01 9.1651514e-01 1.0862780e+00 1.5198684e+00 1.1000000e+00 1.2845233e+00 1.5937377e+00 2.4494897e-01 8.7749644e-01 1.4422205e+00 1.7720045e+00 2.5475478e+00 9.1651514e-01 4.3588989e-01 9.2195445e-01 2.0566964e+00 1.1704700e+00 7.8740079e-01 2.8284271e-01 1.0148892e+00 1.1575837e+00 9.5916630e-01 5.1961524e-01 1.4071247e+00 1.3416408e+00 8.3666003e-01 3.8729833e-01 5.7445626e-01 9.8488578e-01 4.6904158e-01 8.4261498e-01 1.4352700e+00 1.7832555e+00 2.4839485e+00 8.8317609e-01 4.5825757e-01 9.0000000e-01 2.0615528e+00 1.0246951e+00 6.7823300e-01 1.4142136e-01 9.9498744e-01 1.1045361e+00 9.6953597e-01 4.7958315e-01 1.3341664e+00 1.2569805e+00 8.3666003e-01 5.5677644e-01 5.3851648e-01 8.1853528e-01 2.8284271e-01 9.8488578e-01 1.1357817e+00 1.9748418e+00 1.0000000e-01 7.8740079e-01 7.8740079e-01 1.4212670e+00 6.7823300e-01 4.3588989e-01 9.6436508e-01 6.1644140e-01 5.1961524e-01 7.9372539e-01 8.1240384e-01 6.7082039e-01 7.1414284e-01 5.7445626e-01 7.0710678e-01 4.6904158e-01 6.9282032e-01 7.9372539e-01 5.0990195e-01 1.2845233e+00 1.0392305e+00 1.1618950e+00 1.2041595e+00 9.1104336e-01 1.2845233e+00 8.8317609e-01 1.5748016e+00 7.1414284e-01 9.6953597e-01 1.0392305e+00 1.6217275e+00 8.3666003e-01 1.0770330e+00 1.0488088e+00 1.3379088e+00 1.0049876e+00 1.3453624e+00 1.4899664e+00 1.1618950e+00 1.1575837e+00 1.5394804e+00 1.4933185e+00 5.3851648e-01 1.4387495e+00 1.2083046e+00 1.9235384e+00 9.3273791e-01 1.0392305e+00 1.2247449e+00 1.8894444e+00 8.4852814e-01 1.1224972e+00 1.2247449e+00 1.5842980e+00 1.2922848e+00 1.5652476e+00 1.8165902e+00 1.9824228e+00 2.3452079e+00 2.3832751e+00 9.2736185e-01 1.8761663e+00 1.8947295e+00 2.6172505e+00 1.5811388e+00 1.6522712e+00 1.8083141e+00 2.7055499e+00 1.3820275e+00 1.5588457e+00 1.9000000e+00 2.4939928e+00 2.0099751e+00 2.0346990e+00 2.5238859e+00 8.6602540e-01 8.7749644e-01 1.4106736e+00 6.4031242e-01 5.0990195e-01 1.0000000e+00 6.2449980e-01 4.6904158e-01 7.7459667e-01 8.4261498e-01 6.4807407e-01 6.6332496e-01 5.4772256e-01 7.4161985e-01 5.0000000e-01 6.7082039e-01 8.3666003e-01 5.8309519e-01 1.9078784e+00 1.1916375e+00 5.9160798e-01 5.5677644e-01 9.4868330e-01 1.1445523e+00 1.0440307e+00 6.4807407e-01 1.3000000e+00 1.3304135e+00 9.2195445e-01 5.0990195e-01 5.8309519e-01 1.0488088e+00 5.3851648e-01 1.9442222e+00 1.2961481e+00 7.1414284e-01 9.8488578e-01 1.1916375e+00 1.2688578e+00 1.3964240e+00 7.7459667e-01 1.3228757e+00 1.4387495e+00 1.2206556e+00 8.1240384e-01 9.1651514e-01 1.2247449e+00 7.8102497e-01 1.5427249e+00 1.5198684e+00 2.1977261e+00 1.0862780e+00 1.1269428e+00 1.2845233e+00 2.2045408e+00 9.4339811e-01 1.1357817e+00 1.3453624e+00 1.8920888e+00 1.5297059e+00 1.7029386e+00 2.1189620e+00 6.8556546e-01 1.1180340e+00 7.6157731e-01 5.0000000e-01 8.4261498e-01 1.1135529e+00 6.2449980e-01 4.3588989e-01 7.0000000e-01 1.1916375e+00 7.2111026e-01 2.4494897e-01 9.6436508e-01 8.1240384e-01 5.9160798e-01 6.7823300e-01 8.1240384e-01 8.3066239e-01 7.6157731e-01 8.1240384e-01 6.6332496e-01 7.9372539e-01 3.8729833e-01 6.2449980e-01 6.4807407e-01 1.1269428e+00 1.2247449e+00 1.0770330e+00 4.7958315e-01 1.4628739e+00 1.3711309e+00 9.4868330e-01 6.2449980e-01 6.7082039e-01 9.0000000e-01 3.1622777e-01 4.1231056e-01 3.6055513e-01 1.2247449e+00 5.5677644e-01 5.7445626e-01 3.6055513e-01 9.5916630e-01 4.6904158e-01 7.8740079e-01 1.0908712e+00 5.4772256e-01 1.2124356e+00 3.4641016e-01 2.4494897e-01 4.2426407e-01 1.0630146e+00 6.0827625e-01 6.2449980e-01 1.1224972e+00 1.2369317e+00 8.1240384e-01 6.9282032e-01 2.4494897e-01 9.4339811e-01 5.1961524e-01 8.1853528e-01 1.1224972e+00 1.4317821e+00 1.3747727e+00 1.0344080e+00 5.4772256e-01 7.7459667e-01 9.4868330e-01 3.3166248e-01 3.1622777e-01 7.3484692e-01 1.3076697e+00 8.4261498e-01 8.0622577e-01 1.3190906e+00 6.1644140e-01 1.2845233e+00 7.9372539e-01 6.2449980e-01 1.2569805e+00 7.8102497e-01 3.6055513e-01 6.7082039e-01 9.4868330e-01 5.8309519e-01 1.0677078e+00 6.5574385e-01 6.1644140e-01 6.4031242e-01 7.6811457e-01 diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-euclidean-ml.txt b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-euclidean-ml.txt new file mode 100644 index 0000000..1b75520 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-euclidean-ml.txt @@ -0,0 +1 @@ + 4.0515260e+00 4.2121458e+00 3.7357405e+00 4.2313317e+00 3.9136009e+00 4.3843298e+00 3.9811426e+00 4.3624182e+00 4.0642508e+00 4.2105933e+00 4.0747226e+00 3.9068586e+00 4.1637004e+00 4.4303203e+00 4.1841564e+00 4.1063279e+00 4.1862390e+00 4.0719925e+00 4.2227579e+00 4.3173531e+00 3.8811067e+00 3.7577567e+00 4.0623722e+00 3.9882453e+00 4.0432671e+00 3.9085109e+00 4.0283414e+00 4.0846110e+00 3.6459235e+00 3.9544001e+00 4.1134244e+00 4.1805752e+00 3.5121011e+00 4.2747789e+00 4.1048323e+00 3.9269426e+00 3.8932032e+00 3.8281172e+00 3.7288430e+00 4.0863477e+00 4.1527428e+00 4.1646409e+00 4.2027433e+00 3.8441594e+00 4.8419117e+00 4.2455384e+00 3.7622220e+00 4.3967923e+00 4.4663183e+00 4.0435853e+00 4.0421692e+00 4.3124625e+00 4.6499961e+00 4.5595743e+00 3.4230430e+00 4.2612266e+00 3.5676603e+00 4.0866580e+00 4.2307103e+00 3.8521940e+00 3.9951183e+00 3.1022409e+00 3.7290193e+00 4.1931517e+00 4.1127027e+00 3.6633651e+00 4.0235815e+00 3.9729858e+00 4.1980132e+00 4.1579993e+00 3.9948955e+00 3.9081966e+00 3.9031152e+00 3.5069036e+00 4.0015727e+00 3.6763496e+00 3.6614339e+00 3.6227109e+00 3.7357992e+00 4.0170026e+00 3.5216829e+00 3.9322227e+00 3.9094621e+00 4.0170286e+00 4.3264246e+00 4.3435483e+00 4.0788635e+00 4.4761765e+00 3.8468186e+00 4.1490333e+00 4.2800007e+00 4.2260191e+00 4.3031858e+00 4.1897413e+00 4.0530244e+00 3.5893641e+00 4.2186615e+00 3.7979503e+00 4.0915473e+00 4.1343073e+00 4.5063851e+00 3.6394889e+00 4.2508448e+00 3.7160826e+00 4.0105262e+00 4.1578269e+00 4.0290590e+00 3.6971819e+00 3.9414087e+00 4.2522313e+00 4.4091714e+00 4.1542292e+00 3.9594691e+00 4.0923600e+00 4.0855497e+00 3.8253075e+00 4.3034717e+00 4.0976731e+00 4.1316523e+00 4.0872717e+00 4.2643353e+00 3.8887280e+00 3.9411273e+00 3.8848001e+00 4.3481996e+00 3.8716733e+00 3.9084684e+00 3.7546361e+00 3.9354816e+00 3.8293694e+00 3.7568515e+00 3.7184961e+00 3.8404278e+00 4.2570811e+00 4.1423777e+00 4.0291411e+00 4.2094682e+00 3.6127418e+00 4.0459839e+00 3.7737985e+00 3.7647653e+00 3.9762006e+00 3.8999512e+00 3.8509090e+00 3.8975941e+00 3.8432839e+00 4.2109046e+00 4.1339124e+00 3.5898873e+00 4.0794519e+00 4.3504966e+00 3.8862612e+00 3.8332931e+00 4.2190310e+00 4.1366595e+00 3.7220268e+00 4.1250795e+00 3.3169452e+00 4.0757181e+00 3.6487114e+00 3.9513724e+00 4.0735549e+00 3.9137880e+00 3.9656942e+00 3.7724953e+00 4.0505153e+00 3.9062302e+00 4.5671852e+00 3.7542175e+00 4.3731708e+00 3.6733907e+00 4.4667545e+00 4.1004635e+00 4.0530038e+00 4.0346958e+00 4.2145752e+00 4.4298637e+00 4.2982360e+00 4.0878239e+00 4.4061563e+00 4.2115971e+00 3.8263277e+00 3.8603258e+00 3.8572375e+00 4.1051910e+00 4.3787786e+00 4.5309659e+00 4.0047055e+00 4.1308854e+00 3.6283561e+00 diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-hamming-ml.txt b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-hamming-ml.txt new file mode 100644 index 0000000..bc4e1dd --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-hamming-ml.txt @@ -0,0 +1 @@ + 4.6000000e-01 4.3000000e-01 4.3000000e-01 5.4000000e-01 4.1000000e-01 5.3000000e-01 4.3000000e-01 5.9000000e-01 4.8000000e-01 4.7000000e-01 4.6000000e-01 4.9000000e-01 4.5000000e-01 5.5000000e-01 5.3000000e-01 4.5000000e-01 4.8000000e-01 4.7000000e-01 4.8000000e-01 5.1000000e-01 4.9000000e-01 4.4000000e-01 4.9000000e-01 4.7000000e-01 4.9000000e-01 4.7000000e-01 5.2000000e-01 4.7000000e-01 4.2000000e-01 4.9000000e-01 4.7000000e-01 5.5000000e-01 3.9000000e-01 5.5000000e-01 4.6000000e-01 4.5000000e-01 4.0000000e-01 4.8000000e-01 4.5000000e-01 4.8000000e-01 4.8000000e-01 5.0000000e-01 4.8000000e-01 4.5000000e-01 6.4000000e-01 5.7000000e-01 4.6000000e-01 5.4000000e-01 5.6000000e-01 4.8000000e-01 4.8000000e-01 5.3000000e-01 5.4000000e-01 5.3000000e-01 4.5000000e-01 5.8000000e-01 4.2000000e-01 5.4000000e-01 6.0000000e-01 5.1000000e-01 4.6000000e-01 4.1000000e-01 4.4000000e-01 5.6000000e-01 5.4000000e-01 4.8000000e-01 4.8000000e-01 5.1000000e-01 5.2000000e-01 5.5000000e-01 4.5000000e-01 4.3000000e-01 4.7000000e-01 4.7000000e-01 5.6000000e-01 4.9000000e-01 4.8000000e-01 4.5000000e-01 4.9000000e-01 4.7000000e-01 4.5000000e-01 4.5000000e-01 5.6000000e-01 4.9000000e-01 5.8000000e-01 5.4000000e-01 4.6000000e-01 5.8000000e-01 5.3000000e-01 5.4000000e-01 5.5000000e-01 5.0000000e-01 5.2000000e-01 4.8000000e-01 5.0000000e-01 3.8000000e-01 5.3000000e-01 4.8000000e-01 5.1000000e-01 4.8000000e-01 5.2000000e-01 4.7000000e-01 5.0000000e-01 4.3000000e-01 4.8000000e-01 5.2000000e-01 5.0000000e-01 4.2000000e-01 4.2000000e-01 4.7000000e-01 5.4000000e-01 5.1000000e-01 5.4000000e-01 5.1000000e-01 4.8000000e-01 4.7000000e-01 5.2000000e-01 5.2000000e-01 5.4000000e-01 5.4000000e-01 5.0000000e-01 4.5000000e-01 4.4000000e-01 4.1000000e-01 5.7000000e-01 4.6000000e-01 5.1000000e-01 5.2000000e-01 5.0000000e-01 4.8000000e-01 5.0000000e-01 4.4000000e-01 5.3000000e-01 5.2000000e-01 4.9000000e-01 5.7000000e-01 5.8000000e-01 4.9000000e-01 5.1000000e-01 4.5000000e-01 5.3000000e-01 4.5000000e-01 4.4000000e-01 3.5000000e-01 4.2000000e-01 5.3000000e-01 5.2000000e-01 5.0000000e-01 3.8000000e-01 5.2000000e-01 5.6000000e-01 4.7000000e-01 4.4000000e-01 5.1000000e-01 5.7000000e-01 4.5000000e-01 5.7000000e-01 4.3000000e-01 5.1000000e-01 3.8000000e-01 5.3000000e-01 4.8000000e-01 4.4000000e-01 5.0000000e-01 4.8000000e-01 5.0000000e-01 4.7000000e-01 6.4000000e-01 4.9000000e-01 5.2000000e-01 4.8000000e-01 5.6000000e-01 4.3000000e-01 4.8000000e-01 4.7000000e-01 6.0000000e-01 5.4000000e-01 5.5000000e-01 4.0000000e-01 5.5000000e-01 5.6000000e-01 4.9000000e-01 5.0000000e-01 4.3000000e-01 5.7000000e-01 5.0000000e-01 5.7000000e-01 4.9000000e-01 4.2000000e-01 3.9000000e-01 diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-jaccard-ml.txt b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-jaccard-ml.txt new file mode 100644 index 0000000..a7570d8 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-jaccard-ml.txt @@ -0,0 +1 @@ + 6.5714286e-01 6.0563380e-01 6.3235294e-01 7.3972603e-01 6.0294118e-01 7.3611111e-01 6.4179104e-01 7.7631579e-01 6.4000000e-01 6.6197183e-01 6.6666667e-01 7.0000000e-01 6.4285714e-01 7.7464789e-01 7.1621622e-01 6.4285714e-01 6.8571429e-01 6.4383562e-01 6.6666667e-01 6.5384615e-01 6.6216216e-01 6.1971831e-01 6.5333333e-01 6.5277778e-01 6.7123288e-01 6.4383562e-01 6.5000000e-01 6.3513514e-01 6.0000000e-01 6.7123288e-01 6.3513514e-01 7.4324324e-01 5.5714286e-01 7.0512821e-01 6.3888889e-01 6.0000000e-01 5.6338028e-01 6.3157895e-01 6.0810811e-01 6.2337662e-01 6.4000000e-01 6.5789474e-01 6.3157895e-01 5.6962025e-01 7.5294118e-01 7.1250000e-01 6.2162162e-01 6.7500000e-01 7.2727273e-01 6.2337662e-01 6.2337662e-01 6.7948718e-01 6.5853659e-01 6.6250000e-01 6.3380282e-01 7.3417722e-01 6.0869565e-01 7.2000000e-01 7.5949367e-01 6.4556962e-01 6.3013699e-01 5.9420290e-01 6.2857143e-01 7.1794872e-01 7.3972603e-01 6.4864865e-01 6.4864865e-01 6.8918919e-01 6.6666667e-01 7.0512821e-01 6.2500000e-01 6.2318841e-01 6.6197183e-01 6.5277778e-01 6.9135802e-01 6.6216216e-01 6.6666667e-01 6.4285714e-01 6.6216216e-01 6.8115942e-01 6.2500000e-01 6.2500000e-01 7.3684211e-01 6.4473684e-01 7.3417722e-01 7.1052632e-01 6.3888889e-01 7.3417722e-01 6.5432099e-01 6.9230769e-01 7.1428571e-01 6.7567568e-01 6.7532468e-01 6.7605634e-01 6.5789474e-01 5.4285714e-01 6.9736842e-01 6.2337662e-01 6.6233766e-01 6.7605634e-01 7.0270270e-01 6.1842105e-01 6.7567568e-01 6.2318841e-01 6.7605634e-01 6.9333333e-01 7.1428571e-01 6.0000000e-01 6.0000000e-01 6.6197183e-01 6.9230769e-01 6.8000000e-01 7.2000000e-01 6.5384615e-01 6.5753425e-01 6.6197183e-01 7.1232877e-01 6.9333333e-01 7.5000000e-01 7.1052632e-01 6.7567568e-01 6.4285714e-01 6.0273973e-01 5.8571429e-01 6.9512195e-01 6.3013699e-01 6.8918919e-01 7.0270270e-01 6.6666667e-01 6.8571429e-01 6.6666667e-01 6.1111111e-01 7.0666667e-01 6.6666667e-01 6.5333333e-01 6.8674699e-01 7.0731707e-01 6.3636364e-01 6.3750000e-01 6.1643836e-01 6.5432099e-01 5.8441558e-01 5.8666667e-01 4.7297297e-01 5.5263158e-01 6.9736842e-01 6.9333333e-01 6.5789474e-01 5.7575758e-01 6.7532468e-01 7.0886076e-01 6.4383562e-01 5.8666667e-01 6.6233766e-01 7.5000000e-01 6.2500000e-01 7.7027027e-01 6.0563380e-01 6.8000000e-01 5.6716418e-01 6.7948718e-01 6.4864865e-01 6.1971831e-01 7.1428571e-01 6.5753425e-01 6.7567568e-01 6.6197183e-01 7.7108434e-01 6.6216216e-01 7.1232877e-01 6.4000000e-01 7.0886076e-01 6.0563380e-01 6.2337662e-01 6.2666667e-01 7.7922078e-01 7.2972973e-01 7.5342466e-01 5.7971014e-01 7.3333333e-01 7.0886076e-01 6.6216216e-01 6.4102564e-01 5.8904110e-01 7.3076923e-01 6.4102564e-01 7.1250000e-01 6.4473684e-01 5.9154930e-01 5.3424658e-01 diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-jensenshannon-ml-iris.txt b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-jensenshannon-ml-iris.txt new file mode 100644 index 0000000..da698cf --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-jensenshannon-ml-iris.txt @@ -0,0 +1 @@ +0.0211635609063 0.00455072769716 0.0230610904531 0.0076674324982 0.037571216894 0.029561354778 0.0115281186735 0.0225809070507 0.0346801442638 0.00321049176948 0.0232839774828 0.0321124517082 0.0244179197971 0.0331466156799 0.0373949302575 0.0411984503375 0.0218945865519 0.0198268474453 0.02395616278 0.0254898420418 0.0394901943037 0.0396613298853 0.0621120626322 0.0458316817045 0.0334832834948 0.0445794256135 0.00775602650151 0.00770188279153 0.0277044809709 0.0292851269343 0.0408206135002 0.0384837880405 0.0229145225178 0.0346801442638 0.017155900548 0.0186865079856 0.0346801442638 0.011778601551 0.0109570078484 0.0244803928224 0.0609237090857 0.0125274661674 0.0713079215249 0.05072181848 0.0329702193648 0.018118079192 0.0129441131729 0.00461235131171 0.0077204189151 0.190781073622 0.196235539354 0.202381067744 0.215567934651 0.208951210729 0.208395222197 0.202977772245 0.185995150024 0.197488445796 0.207754344906 0.207231776416 0.200895721783 0.202370257926 0.208839694602 0.184151316466 0.18979501874 0.211864590783 0.187854399445 0.231134943838 0.195836809821 0.220082308081 0.191451897661 0.227329222402 0.204586412278 0.192443163994 0.193684928543 0.206841791869 0.21612354251 0.209268704489 0.175345617131 0.197941218478 0.190625337098 0.191065854858 0.2317867516 0.214902945463 0.200203629275 0.200116177839 0.216845735492 0.194043462589 0.208271280018 0.210985756639 0.203659220099 0.197020978632 0.188475437149 0.205376756706 0.190989482965 0.1980172695 0.194499957306 0.176357663314 0.198492265793 0.255232551639 0.244270007892 0.240469208946 0.23837051036 0.248414598574 0.246817944636 0.244919131711 0.239985639856 0.249741998486 0.238825041235 0.224881988223 0.240479022193 0.236919216915 0.253134884385 0.257281336145 0.239143010746 0.231252341207 0.230464321684 0.26688757037 0.243702807017 0.239741931201 0.243722504108 0.250777226737 0.230838149152 0.233779458319 0.227365052742 0.226996712309 0.224529623997 0.248876761269 0.223504306637 0.241222705588 0.218359591571 0.251783757155 0.22178058433 0.237922879169 0.243259140467 0.243757719218 0.229784431814 0.223834512884 0.231417699032 0.245855002542 0.23260880661 0.244270007892 0.24412721812 0.245364556958 0.239069534798 0.242476824971 0.231950344971 0.238821966724 0.23119389821 0.0191037071088 0.0192689125135 0.0279599225292 0.0400056720024 0.0369973592153 0.0168401225107 0.0156647141811 0.0317569365649 0.0209196472407 0.0261095572505 0.0295911292198 0.0387034711929 0.047943015361 0.0502801086558 0.0505076233319 0.0276985510653 0.0178344200842 0.0354647529806 0.0132311775272 0.0439058315568 0.0587698655317 0.0548749800113 0.0420211683289 0.0158475875817 0.0410205075223 0.0159821398213 0.0151652303532 0.0238719428054 0.0190485214781 0.0351129659726 0.0545720530766 0.0439213935019 0.0317569365649 0.0241815879409 0.0259117209292 0.0317569365649 0.0173211662353 0.0138864344566 0.0338981856531 0.0429537854169 0.027658485714 0.0691172321094 0.0500059403041 0.0236011037029 0.0316644090037 0.0201837094834 0.0228863104939 0.0134861623597 0.176517909274 0.183281450302 0.188265462996 0.20119658342 0.194697326208 0.195209942657 0.190674274015 0.172425697578 0.182894065786 0.195529592383 0.192026007893 0.188346396823 0.186051479032 0.19538200246 0.171605310091 0.175766658214 0.199864266754 0.173408334646 0.215875435175 0.181358449055 0.208526316653 0.177419239992 0.212710197663 0.190468934035 0.178153770213 0.179556589614 0.19192211318 0.202468559318 0.196181327665 0.160572766746 0.183327527516 0.175701303909 0.177066663037 0.218355819513 0.203310616701 0.188843124957 0.186323930321 0.201194011774 0.18173319992 0.194510318604 0.197386404553 0.190483994401 0.182718294182 0.174255518011 0.19205782762 0.178239088983 0.18513760318 0.180597338326 0.163063102971 0.185284857996 0.243887274146 0.231624187925 0.22689829944 0.225289762668 0.235796354542 0.23269059518 0.23306609907 0.225602345189 0.235244584021 0.226892475392 0.212543999067 0.226908361887 0.223757309255 0.240296170501 0.245569279789 0.227339668618 0.2180947426 0.217889650333 0.252259964829 0.228879839341 0.227150610245 0.231836962279 0.236076260811 0.217244760533 0.221438124324 0.213671912157 0.213819376581 0.212040793299 0.235827974309 0.209053467969 0.226615210725 0.205271111175 0.238869689413 0.207939309627 0.223898822147 0.229226063436 0.232630777303 0.217026651272 0.211532427176 0.218347693285 0.233460132247 0.219891922362 0.231624187925 0.231652249658 0.233525349836 0.226377769419 0.228579062405 0.219098380821 0.227789794229 0.219023772057 0.0212386043587 0.00993825296406 0.0336976816686 0.0262325070397 0.0107568964379 0.0200399473123 0.0369530877134 0.00706121377315 0.0225180018442 0.0345620434634 0.0288649599558 0.0352792061476 0.0352526366714 0.0385518621347 0.0178346265809 0.0156087458393 0.0212893461987 0.0244762891905 0.0356848493955 0.0412118431681 0.0577499958146 0.0449060313177 0.0316682156452 0.0402876009485 0.00791017688182 0.00777689270989 0.0262859463939 0.0275691765497 0.0363841779992 0.0426229028778 0.0258430808451 0.0369530877134 0.0171494695134 0.0197038085728 0.0369530877134 0.00856221073911 0.010146504576 0.0213442662268 0.0572142712511 0.0116574543859 0.0671597340405 0.047388301579 0.0285557929122 0.0192111185981 0.0112749020632 0.00781127840012 0.00637566729419 0.187210918471 0.192623672543 0.198833727144 0.212008654042 0.205367416257 0.204958566039 0.199402528264 0.182409867249 0.193987091024 0.204155295526 0.20374075502 0.197257634781 0.198980554413 0.205364330922 0.180426094029 0.186163209648 0.208359044162 0.184479344493 0.227592539467 0.192334488284 0.216529219774 0.187806950452 0.223855994439 0.201226354807 0.188864300604 0.190066520684 0.203335651256 0.212548662023 0.205705049083 0.171742464799 0.194417377469 0.187136601809 0.187472552212 0.228344421279 0.211429948522 0.196622478767 0.196530972542 0.21335054643 0.190497911091 0.204702393397 0.207598118603 0.200155023454 0.193467129006 0.184880183147 0.201857382862 0.187517863683 0.194486736804 0.1909406597 0.172553592377 0.194927231832 0.251743344729 0.240762357546 0.236947962879 0.234942968898 0.244909063494 0.243388177909 0.24143957892 0.236617606758 0.24633077256 0.235257992461 0.221275459366 0.23695229759 0.23334624948 0.249598367646 0.253700089415 0.235542233543 0.227782636512 0.227013807453 0.263469639225 0.240293112196 0.236160757703 0.240179126335 0.247395319298 0.227257451928 0.23025606552 0.223937789122 0.223399862869 0.220967129276 0.245363860699 0.220116506797 0.237779687596 0.214888164759 0.248252329967 0.218347705592 0.234681014401 0.239690494574 0.240217402996 0.22632629355 0.220260809644 0.227813787503 0.242262375922 0.228918020841 0.240762357546 0.240592325032 0.241778783496 0.235418026861 0.23890352284 0.228365058816 0.235271978928 0.227703766766 0.0251699105319 0.0326109277538 0.029278801732 0.0117605763436 0.00495713045566 0.032971458625 0.022273270218 0.00845866705418 0.0338325850006 0.0418060793613 0.0560571569439 0.0475989809058 0.0526432986362 0.0287512950603 0.0156470261333 0.0286833299321 0.0125318521449 0.039628171731 0.0605879245284 0.051592881751 0.0247889558 0.0185457610439 0.0360813596602 0.0162897964903 0.0233731835598 0.00555113851114 0.00851608036789 0.039350861172 0.0494281040416 0.0428445793936 0.032971458625 0.0361084743768 0.0382083791018 0.032971458625 0.0135564720949 0.0133460812565 0.0359059029584 0.0525998249753 0.020448013216 0.0653401649712 0.0366222774812 0.0276779812669 0.0199994530533 0.0115734561992 0.0214333278479 0.0186461579961 0.171344593808 0.177166238774 0.182950771401 0.19643196185 0.18986301898 0.188443393542 0.18388535541 0.166562175459 0.177783941311 0.188923487579 0.187724110496 0.182156573883 0.18272910062 0.189078094755 0.165787292858 0.170722554665 0.192539814536 0.167433166664 0.21234151671 0.17610118578 0.201339136255 0.172496446246 0.207869320447 0.184234771078 0.173084677744 0.174586008872 0.187343267555 0.197067369338 0.190065322687 0.155992913972 0.178370192697 0.170834599332 0.171756811444 0.21215391799 0.195543956872 0.181382419503 0.180878423055 0.197648250997 0.174630568935 0.189049602981 0.190809942592 0.184007315234 0.177572802284 0.169158986096 0.1858235382 0.171054750208 0.178472858167 0.175024341592 0.15843173338 0.179125844986 0.236753289776 0.225336913605 0.221457751336 0.21877528886 0.229546376269 0.227338714151 0.226000082599 0.220106179064 0.230241030133 0.220420771483 0.206398010187 0.221506177578 0.218254053746 0.234550382603 0.23950257292 0.221077875617 0.211789394262 0.210958467875 0.247726054061 0.224160699687 0.221293898641 0.225242596712 0.231164838975 0.212084684776 0.21483758197 0.207582855086 0.208324546741 0.205668770285 0.22999948579 0.203519866038 0.221791807814 0.198729512666 0.233119883684 0.201955632148 0.21744774227 0.224658382044 0.22554531899 0.210282327898 0.205086194314 0.212901784527 0.227685054369 0.214992030153 0.225336913605 0.225426421062 0.227294020009 0.22122180599 0.223891772502 0.213328572484 0.220627825622 0.212027719306 0.036692370716 0.0274221847228 0.014614486903 0.0257905543192 0.038185072385 0.00854173398239 0.0228846598272 0.0362880440778 0.0247966202157 0.0338575762449 0.0341810302892 0.0403839419732 0.023192138006 0.0228332335778 0.0207802721216 0.030246984472 0.0385095591836 0.036116458124 0.063670015249 0.0456331337141 0.0387265099713 0.0454622939774 0.0128829753328 0.0153687384621 0.0290580106985 0.032571197138 0.0439484983687 0.0345946330706 0.0178789804494 0.038185072385 0.0229066226947 0.0243703411158 0.038185072385 0.0145921399018 0.0157758022112 0.0246899780508 0.0667942861058 0.00859371091119 0.0713780561444 0.0493343894065 0.0370078971529 0.0136450272229 0.0138896306008 0.00617904291603 0.0146027872105 0.19304474601 0.19797909852 0.204564547012 0.217893788171 0.211240589753 0.210085035577 0.204424468033 0.187966151726 0.199846283114 0.209201380502 0.209876418912 0.202509063074 0.205495616388 0.210676243708 0.185852822947 0.192014733821 0.213129345848 0.190059083655 0.233894763867 0.198141527364 0.221234996275 0.193689434624 0.229701750232 0.206631480423 0.194727732257 0.195938025333 0.209362001884 0.218147207364 0.211029564766 0.177869147322 0.200327033561 0.193124484677 0.193233901741 0.233604044389 0.215980508747 0.201289146742 0.20219168761 0.219726845187 0.195458087393 0.2103246028 0.212820091698 0.205396680761 0.199290845186 0.190742394929 0.207189399227 0.192521418757 0.199651552643 0.196594932336 0.178468769167 0.200295584792 0.256297768876 0.245841451519 0.242438509968 0.240033073248 0.249980802879 0.248953385462 0.246144223104 0.242178797272 0.252036489845 0.240176361939 0.226412773569 0.242454491215 0.238768549206 0.254841315698 0.258625650226 0.240494667361 0.232976534038 0.231927166971 0.269283834821 0.246140263748 0.241374087503 0.245035624054 0.253144665918 0.232868387014 0.235226383286 0.229279397026 0.228861516729 0.226064745168 0.250630665997 0.225726655668 0.243583203961 0.220042394187 0.253511231548 0.2237596548 0.239872842457 0.245497904196 0.24478307529 0.231328796826 0.225307213023 0.233256809435 0.247442229489 0.234444929147 0.245841451519 0.245661961251 0.246724980668 0.240848644418 0.244659004118 0.233675626419 0.239814102397 0.232532592403 0.010524058973 0.0347497758249 0.0310026365608 0.0636683508445 0.039628310023 0.0339732471818 0.0632599742009 0.0607622080336 0.0610552648732 0.0274939888458 0.0321794296029 0.0203038890155 0.0225566538528 0.0181521200419 0.0426282898435 0.00971744861179 0.0573638408809 0.0304112802741 0.0463628234513 0.044509344874 0.0125060810098 0.0372012029006 0.0399195029174 0.0351572922113 0.0381714857339 0.0236066809352 0.0675880085279 0.0489234884353 0.0636683508445 0.0451372647506 0.0495218266562 0.0636683508445 0.028718795999 0.0363358460459 0.0247409309981 0.0536494939568 0.0291783067693 0.0362475310016 0.0206193569389 0.0238060051702 0.0365127412376 0.0303247636874 0.0382706081182 0.036679138207 0.165252454208 0.168745392776 0.176697416201 0.190075579294 0.183193768644 0.182210296577 0.174895117306 0.159516185843 0.172780618769 0.179424300801 0.183362052205 0.17275366841 0.180884475296 0.18272692365 0.155645508044 0.163596126812 0.183814684533 0.163944686708 0.206939183794 0.170981324059 0.191238016882 0.165171506195 0.202635613449 0.180157192696 0.16688029242 0.167662828735 0.182473473654 0.189606814944 0.182144568907 0.150494922446 0.17311022882 0.166528964674 0.165043072843 0.205731384902 0.18659197245 0.17105591364 0.173799739615 0.193545885155 0.166211621507 0.181959703917 0.185636136233 0.177047042786 0.171588556623 0.162758165885 0.178831153511 0.164197394115 0.170920859204 0.168528635782 0.148460744764 0.171573948268 0.226522867074 0.216861787556 0.214094925757 0.211961277469 0.220987866614 0.22168305754 0.216782699249 0.215609692292 0.225208444508 0.210312198283 0.19661028383 0.214074476024 0.209773195915 0.225810880474 0.228551359916 0.210341321513 0.204691741777 0.203336697222 0.242485382573 0.21963599647 0.211892193047 0.215282949931 0.226705435666 0.20418234927 0.205942096193 0.201754356567 0.199749686219 0.196656401255 0.221913123953 0.199165134473 0.216656797893 0.191759529503 0.224569670715 0.196349964299 0.213992874847 0.217230566883 0.214548114667 0.2028246956 0.195690210734 0.20400781198 0.217750478797 0.204445613083 0.216861787556 0.216379424168 0.21668692278 0.211041069144 0.216249554038 0.204373397921 0.209459569208 0.203359671893 0.0284940982012 0.0283835072119 0.0582564069663 0.0317209110303 0.0290062929149 0.0575581968066 0.0518675712793 0.0525497818792 0.0214648817816 0.0289820919427 0.0151981567712 0.0194868108708 0.00772151270155 0.0396885548083 0.0133170749495 0.0480498930165 0.0406834920646 0.0454173876936 0.04374952779 0.0223043370554 0.0304932603619 0.0333669476098 0.0323541790178 0.036462611527 0.0283937271683 0.0581656862224 0.0386849486073 0.0582564069663 0.0386824403573 0.0427767064205 0.0582564069663 0.0225670168648 0.0303996112373 0.0186317385432 0.0589532282724 0.020129496567 0.0456219811369 0.0271705146906 0.0266114081755 0.0281840659315 0.0237276312035 0.0300017258072 0.0304084467621 0.174689674341 0.17830904459 0.186122916272 0.199541551099 0.192698821274 0.191439303409 0.184399484383 0.168993025372 0.182083678728 0.188980455315 0.192641208359 0.182376656778 0.189869605957 0.192034662906 0.165406381187 0.173160863593 0.193190156667 0.172952807322 0.216356837462 0.18028635421 0.200707489616 0.174765006311 0.211946253402 0.189197004074 0.176338357547 0.17720731475 0.191796191467 0.199121585044 0.191634817003 0.159923924784 0.182459504488 0.175769947329 0.174536678956 0.215011640541 0.195898920801 0.180555890973 0.183304141459 0.202828623711 0.175640593326 0.191448592698 0.194762075203 0.186406863824 0.181008054094 0.172240816201 0.188223844291 0.17346231425 0.180325575914 0.177965844821 0.158332658267 0.181046361448 0.235882031296 0.226280867303 0.2235366831 0.221221709251 0.230404936567 0.23094802366 0.226136073658 0.224736021115 0.234431734165 0.219824894986 0.20619515856 0.223526372128 0.219308937798 0.235287233784 0.23807753807 0.219908336162 0.214027462834 0.212631745032 0.251732773763 0.228838671706 0.221440978506 0.224753704594 0.235866097652 0.213724598783 0.215380278296 0.210994508636 0.209321687173 0.206162323946 0.231346876087 0.208300913946 0.225928825898 0.201080283138 0.234037288778 0.20558958438 0.222856381147 0.226751911512 0.223989137906 0.212135251626 0.205214537461 0.213595258296 0.2273163389 0.21417146415 0.226280867303 0.225846388312 0.226229092302 0.220705781832 0.225781259333 0.213928177909 0.218913775078 0.212729498479 0.0122225873329 0.0303322491715 0.0105162282326 0.0130161869455 0.0294681125528 0.031169289575 0.0445674903412 0.0424428041907 0.0471592401326 0.0242919299586 0.0151911030837 0.0250458377851 0.0159588466859 0.0393890228389 0.0501712969027 0.0573610210315 0.0347635318983 0.0244162409612 0.040396894165 0.00489128055225 0.0130158047868 0.0162739668734 0.0181749821427 0.0399100872217 0.0416234365466 0.032217222914 0.0303322491715 0.0260818708259 0.0277812891175 0.0303322491715 0.0068887146948 0.00350057866506 0.0298024545939 0.056647752597 0.0131029257559 0.0689399130874 0.0438694576111 0.0295733202179 0.0148293487362 0.005187559828 0.00994170320116 0.00857601759458 0.181761248694 0.187453410696 0.193365623784 0.206702523841 0.20011934013 0.199105672278 0.194191029325 0.176999836949 0.188302627791 0.199111466491 0.198126208998 0.192301121465 0.193121251528 0.199652739338 0.175765384319 0.180985589904 0.202947496477 0.178262931918 0.22241465847 0.18663850283 0.211489015658 0.182706386145 0.218266373994 0.195039865626 0.18346554188 0.184856636434 0.197755074291 0.207324505424 0.200404409787 0.166355990223 0.188830148216 0.181378679541 0.182125858248 0.222660838403 0.205968099039 0.191576935017 0.19121911341 0.207887116649 0.185081160877 0.199380829938 0.201554837448 0.194543615891 0.187991163757 0.179524780118 0.19631300534 0.181726234556 0.188970696545 0.185468734774 0.168191837054 0.18953883133 0.246767069406 0.235545780286 0.231684267956 0.229269594484 0.239724337717 0.237749571543 0.236212316891 0.230682790909 0.240645700931 0.230405224861 0.216420479968 0.231715542723 0.228337424754 0.244594612682 0.249193956399 0.230909382279 0.222229420235 0.221427946934 0.257965809127 0.234573740528 0.231288571496 0.235259005866 0.241607850531 0.22220070955 0.225060949621 0.218149643482 0.218415896436 0.21585883122 0.24017332069 0.214149237729 0.232167950695 0.20925074028 0.243199182069 0.21253980165 0.228281003551 0.23468620257 0.235446691383 0.22074773222 0.215228869367 0.222922571895 0.237553097068 0.224606387466 0.235545780286 0.235534393045 0.23712429287 0.230940484692 0.233917063813 0.223401305554 0.230524046304 0.222353817706 0.0346270873711 0.0222428303765 0.0130402644662 0.0348466257078 0.042829178942 0.0549568638218 0.0464424131108 0.050271964255 0.0260503884653 0.0122224116333 0.0282180922789 0.011864843965 0.0376053109772 0.060340947297 0.0487164281844 0.0285013587126 0.0163929694658 0.0334500762426 0.015996373074 0.0216948948211 0.00981357617646 0.00897169179454 0.0351677992971 0.0524329239391 0.0436506734112 0.0346270873711 0.0336931600411 0.03612022228 0.0346270873711 0.0123155461924 0.0128144174355 0.033561339537 0.0480641708164 0.0212422700005 0.0628734973132 0.0366806543871 0.0231396795589 0.0230741684915 0.0123098386274 0.0219470818919 0.0170687771133 0.170097980345 0.176050748737 0.181762587161 0.195138740624 0.188553715994 0.187634353283 0.182931238354 0.165421712546 0.176600578903 0.187912731345 0.186408453678 0.181027641681 0.181322068987 0.188146844457 0.16448627936 0.169388121003 0.191757508168 0.166524283083 0.21084441712 0.17494262847 0.200480456222 0.171127562134 0.206682857942 0.183402734812 0.171811556423 0.173255302855 0.186064001811 0.195873759085 0.189008503285 0.154586199139 0.177136590503 0.16961214015 0.170511848112 0.211247624618 0.194873193684 0.18053463387 0.179673493778 0.196193226513 0.173758955601 0.187869379999 0.19001738823 0.183074417621 0.176342969787 0.167870586049 0.184832516957 0.170273119593 0.177542554096 0.173865162824 0.156824879402 0.178069468543 0.235952767235 0.224377142544 0.220334312937 0.217941307072 0.228582134377 0.226295522057 0.225204079848 0.219149120214 0.229153457047 0.219423743811 0.20530168342 0.220370010221 0.217084867796 0.233463162138 0.238411309618 0.220004052761 0.210872881275 0.210171938201 0.24654768975 0.223020715486 0.220176358035 0.224293060856 0.230089603615 0.210845620347 0.213914309618 0.206670271948 0.207126565579 0.204661896322 0.228953899388 0.202543633745 0.220637247329 0.197849243585 0.232039444595 0.201016080923 0.216786754649 0.223327252818 0.224678101179 0.209454984614 0.204079198003 0.211687536154 0.226538202286 0.213579234366 0.224377142544 0.224425108032 0.226231350025 0.219899730556 0.222577135927 0.212192019374 0.21976273048 0.211209202307 0.0316629149872 0.033505533415 0.00599304401229 0.03147088677 0.0569379974042 0.0709823822809 0.0748230813085 0.0531369685873 0.043263968218 0.0546416139704 0.0249055105449 0.0688386171987 0.0687094353901 0.0814189949505 0.0424145094995 0.0312264925083 0.0671690041442 0.0297074253675 0.0326159908833 0.0331671559449 0.0312681130066 0.0653641254742 0.040141041955 0.0490986378456 0.0 0.0426893927085 0.0408370899258 0.0 0.0368318621027 0.029200211213 0.0578784327643 0.069522041979 0.0411420259607 0.0955605905857 0.0684916485028 0.0536985636371 0.0361283833003 0.0349236407736 0.0326544016244 0.0320790517512 0.190734279342 0.198011093099 0.202165335147 0.215193803756 0.208989236133 0.207943677959 0.205067684162 0.186855165408 0.196324830352 0.210203923195 0.205342231011 0.203427359944 0.198526873832 0.208498767129 0.187766259392 0.1906844311 0.213518747065 0.185598548292 0.229746939288 0.194805777978 0.222821572969 0.192481412892 0.225723793976 0.202375384198 0.192457483521 0.194300696695 0.205376522136 0.216660688755 0.21030774241 0.175307222553 0.197000980201 0.189066511629 0.191539232379 0.231058906578 0.21669613367 0.203523564636 0.200666530244 0.214595210264 0.195827171333 0.208641958794 0.209561099462 0.203960485003 0.196717491572 0.188766309975 0.205678014795 0.191492051166 0.198961164508 0.194672996958 0.180152450124 0.19944864987 0.257583228235 0.245178524187 0.240481186058 0.23786630337 0.249341975217 0.245210894146 0.246468005973 0.237456371003 0.247563146561 0.241300303041 0.22729752986 0.240555698528 0.237993038095 0.254193700204 0.26033321069 0.24220764546 0.231128442611 0.230799434891 0.264694157473 0.241194058993 0.241588310222 0.245985985156 0.248083056968 0.231516408669 0.235215479132 0.226198260689 0.228320459149 0.22623565919 0.249391170367 0.221152204365 0.239284874476 0.218351963925 0.252686589685 0.220530747351 0.234477098822 0.243363395844 0.246969168578 0.229990071021 0.225895881072 0.232959826282 0.248113114083 0.23570223574 0.245178524187 0.245562733947 0.248226439579 0.241702239505 0.242775171886 0.233515238499 0.242256602421 0.232453320509 0.0221776604768 0.0292076409984 0.0225953199419 0.0344078822964 0.0402556862186 0.0443490345462 0.0247422055695 0.0212582761966 0.0262237412261 0.0239779350196 0.0420497572884 0.0414593565817 0.0638943455501 0.0444256278823 0.0324613404347 0.0464717210574 0.0064847144584 0.00810593532016 0.026613709743 0.0281809240896 0.0431707040779 0.0364933096346 0.0236549105265 0.0316629149872 0.0188389154871 0.0197111271958 0.0316629149872 0.0126413659503 0.00989228745945 0.0276229976961 0.0619054176788 0.0136582341261 0.0735855911944 0.0517713161376 0.0346942423053 0.0171636471405 0.0128780143687 0.00330427938484 0.00787705209324 0.191124461218 0.196709270957 0.202716444922 0.215924187936 0.209335641351 0.208652638742 0.203456445375 0.186385899088 0.197740844714 0.208277146555 0.207460837635 0.201437849638 0.202451222155 0.209128071387 0.184787202976 0.190226937612 0.212288039878 0.187958804478 0.231460569504 0.196093364986 0.220613648389 0.191903262961 0.227580912528 0.204703238288 0.192798319471 0.194099784682 0.207098250381 0.21653588119 0.209689452484 0.175692486622 0.198220686642 0.190843207535 0.19145100341 0.232063526554 0.215320322312 0.200757345911 0.200512766736 0.217080327154 0.194471953875 0.2086592247 0.211165769014 0.203992872465 0.197349559187 0.188849659131 0.205722712353 0.191296031353 0.198394567468 0.194851113014 0.177048060287 0.198893335106 0.255751598489 0.244696550693 0.240850735029 0.238657915403 0.248846650839 0.247061872969 0.245367004129 0.240135191988 0.249950401792 0.239377278919 0.225435027987 0.240867925442 0.23738991955 0.253602763959 0.257909304325 0.239755581779 0.231578379023 0.230802663983 0.267123052444 0.243889033036 0.240266927764 0.244253437447 0.250943098487 0.231284290644 0.234232349856 0.227603903464 0.227483301065 0.225010355342 0.249291083828 0.223656027478 0.241455021635 0.21867583866 0.252237135554 0.222012069338 0.237944922778 0.2436864771 0.244348021388 0.230119265967 0.224342678419 0.231928535106 0.246422479994 0.233277995952 0.244696550693 0.244602643666 0.245964063641 0.239692017082 0.242916090829 0.232449793011 0.23942553099 0.231611684555 0.0349328180738 0.0397488289561 0.055917859907 0.0466657680267 0.0537525131124 0.0312322620487 0.0204661899168 0.0273925121636 0.0188335643607 0.0409848131484 0.0582072876054 0.056071962696 0.0229170555223 0.0261517042618 0.0397135231185 0.0177392332242 0.0260266328079 0.00803879586998 0.0153514357558 0.0448019039458 0.0436873202641 0.0393784019825 0.033505533415 0.038918396453 0.040710313775 0.033505533415 0.0158373818638 0.0159973219202 0.0374102115892 0.0608056403948 0.0178404535904 0.0681680501025 0.0368277650919 0.0342153028534 0.0136005609896 0.0114725513671 0.0202654226306 0.0215111320227 0.175502940168 0.180846739943 0.186993890383 0.200622200655 0.194047326964 0.191871659177 0.187230724864 0.170446969951 0.181958118895 0.192296146604 0.192168805538 0.185737701873 0.187567790504 0.1926934047 0.169562979332 0.174903941965 0.195627236518 0.171332536122 0.216941277953 0.180224282629 0.204375817171 0.176708588212 0.212007481629 0.187940100384 0.177271729963 0.178782860675 0.191678059195 0.200974345147 0.193702992688 0.160469688943 0.182595805427 0.175146841545 0.17584337797 0.215706304999 0.198413689274 0.184386485081 0.184856323095 0.20232915317 0.177913816132 0.192982627694 0.194328346433 0.187558388958 0.181714052911 0.173349805963 0.189464804214 0.174374123803 0.181953747446 0.178997842818 0.162709307879 0.18281187754 0.239644460978 0.228724428122 0.225246554022 0.222159074766 0.232927392687 0.231188577573 0.229024014707 0.22394884582 0.234228075319 0.223668927512 0.209861178703 0.225307285274 0.221985913786 0.238104571085 0.242774657148 0.224369518131 0.215280858443 0.214174032708 0.25182022511 0.228290074367 0.224826938646 0.228427350702 0.235193041024 0.216001866886 0.218122104545 0.211215702011 0.212099059638 0.209083204086 0.233569980649 0.207419375116 0.225880589953 0.202184861075 0.236687676647 0.205660738951 0.22091809464 0.228766193737 0.228457617824 0.2135856342 0.208453819381 0.216659972673 0.231192094033 0.218856194539 0.228724428122 0.228811270004 0.230578167797 0.224983578773 0.22794936407 0.216954366886 0.223518672063 0.215168993038 0.0278789967436 0.0521696459286 0.0689795584164 0.0720246274977 0.0510464689946 0.0421745394626 0.053582971262 0.0252834975098 0.0677158937278 0.0652657188093 0.081318384881 0.0463957610979 0.0317140261778 0.0668416555065 0.0279143378417 0.0293650428541 0.0349789935753 0.032697569141 0.0634994948437 0.0398508689254 0.0464805914619 0.00599304401229 0.0382168939676 0.0359888514026 0.00599304401229 0.0356302780251 0.0278103138868 0.0553135387293 0.0681148770417 0.0401494228711 0.0950529579724 0.0698373790182 0.0522664102967 0.0364245082526 0.034367526255 0.0307005764611 0.0296340954647 0.193320448722 0.200616479264 0.204805927895 0.217738543992 0.211505662739 0.210916031972 0.207785188245 0.189492055249 0.199011305592 0.212849559963 0.207930120156 0.205973985295 0.201080290256 0.211364107387 0.190111543422 0.193149888648 0.216396870129 0.188564248953 0.232124397575 0.197509520178 0.225563508571 0.194908404083 0.228408782968 0.205404773753 0.195016829794 0.196780548006 0.207983584103 0.219251719916 0.21299620972 0.177753842585 0.19963357851 0.19173429638 0.194103048534 0.233951642516 0.219652294406 0.206249008964 0.203257559758 0.217055047586 0.19863330849 0.211258733702 0.212590735452 0.206794766477 0.199327419787 0.191301214309 0.208462930841 0.194441169877 0.201765843334 0.197324504463 0.182225897174 0.202147597456 0.260363424846 0.247928312505 0.243139798114 0.240805098305 0.252082485905 0.248010938223 0.249316339351 0.240374954583 0.250342905364 0.243926059402 0.229857687769 0.243199557315 0.240557269117 0.256813113392 0.262812568913 0.244726906585 0.233983032632 0.233739246967 0.267372287242 0.243942332388 0.244149194945 0.248660562989 0.250891907974 0.234045856002 0.23797517084 0.229108542601 0.230855414047 0.228916694642 0.252083674667 0.224056856634 0.242001030284 0.221239323161 0.255322806495 0.223424493307 0.237685672547 0.245823282675 0.249646734812 0.232903558602 0.228555612396 0.235462898266 0.250614129287 0.237926457957 0.247928312505 0.248245352669 0.250761236826 0.244030479155 0.2452417146 0.236085335476 0.244925556651 0.235313123462 0.0296919493074 0.0549103775111 0.0590022859872 0.0450958795243 0.0436647014416 0.0451669494409 0.0401697758406 0.0620690815544 0.0404063812339 0.0856236153109 0.0591687282212 0.0490473517415 0.0683619493373 0.0275851844543 0.0264002504943 0.0446945676427 0.0461763497794 0.0638640733472 0.0232220049486 0.0233353107646 0.03147088677 0.029201781247 0.02614478673 0.03147088677 0.0350355717384 0.030428654072 0.0452764113597 0.0802967236314 0.0328354116031 0.0941098407344 0.0724718577687 0.0562014612922 0.0313029110017 0.0340889421853 0.0234757814211 0.028925036543 0.210407140423 0.216202187825 0.221880775368 0.234954371989 0.228487320903 0.227668142527 0.22288748901 0.205844703323 0.216768667446 0.227711464843 0.226263348504 0.220970493431 0.220784685064 0.228189601628 0.204608420295 0.209669658318 0.231537789973 0.206793805641 0.250139733355 0.215157813925 0.239903556316 0.211352210266 0.246303099122 0.223431243026 0.212079033799 0.213468334673 0.225996065584 0.235701634712 0.228967589943 0.195079844754 0.217288259849 0.209835756349 0.210818201018 0.25087781482 0.234509225035 0.220284749566 0.219817558162 0.235704379779 0.213924917847 0.22786813663 0.229992970272 0.223192642095 0.216540668547 0.208207378637 0.224921456698 0.210576921517 0.217728441709 0.214138769918 0.196963968339 0.21825274764 0.274633184392 0.263620373951 0.259724001387 0.257434131046 0.267733388911 0.265627371082 0.264299751314 0.258574475793 0.268388644672 0.258492710734 0.244710620006 0.259750243754 0.256426981572 0.272472244255 0.27686351493 0.258914849135 0.250487159092 0.249742739323 0.285410306304 0.262307563551 0.259345413586 0.26329314732 0.269271128247 0.250334873532 0.253300294048 0.246383898221 0.246645581278 0.244211963707 0.268141898451 0.242252606427 0.260005586654 0.237699783889 0.271104326635 0.240825446457 0.256147548483 0.26253800174 0.263413026859 0.249061635853 0.243583443282 0.251070491042 0.265469741093 0.25252673858 0.263620373951 0.263585528386 0.265041995624 0.258842865142 0.261802853748 0.251583944502 0.25855292944 0.250657339544 0.0450102465608 0.0439198537957 0.0420878714528 0.0489743500788 0.0458798154287 0.0561577170329 0.0578224624858 0.0224176386409 0.0849412968827 0.0784422731853 0.0630254855321 0.0686145922194 0.0402128784166 0.0342259139129 0.0607491308771 0.0616063147121 0.059057582132 0.0469260494382 0.0248556117621 0.0569379974042 0.0246670677105 0.0228986845775 0.0569379974042 0.0437638412118 0.0433305015411 0.0375189879884 0.0808012493057 0.0411311531308 0.089015155435 0.0787045494767 0.0568803303557 0.0468151856286 0.0457099341419 0.0358166687321 0.0382966404493 0.218181156993 0.223322073096 0.229705702275 0.242392790988 0.23577882008 0.236467641295 0.230139466345 0.213525276303 0.225126391191 0.234563959305 0.23447409507 0.227593305984 0.22968375989 0.236629811874 0.210530415308 0.216784944512 0.23930372472 0.216469414915 0.257297585894 0.22354090813 0.246798396633 0.218282233397 0.254457430072 0.233053108231 0.219729861221 0.220661425884 0.234112165808 0.242944509178 0.236406584132 0.202708452811 0.225406734885 0.218378296681 0.218342011887 0.259279663879 0.242430236301 0.227193609953 0.227261836804 0.24357213448 0.22167035443 0.235341335514 0.239265397892 0.231386700827 0.224381906232 0.215751708357 0.232947742221 0.219218926008 0.225693206986 0.221969156729 0.202171293512 0.225894654473 0.281426284779 0.270908280604 0.267090828143 0.26583132699 0.274949583689 0.273930107263 0.271660675229 0.267616788632 0.276815360824 0.264923167376 0.251110814116 0.267050687079 0.263175899022 0.279239724537 0.282486166675 0.264820551157 0.258570659963 0.257930961428 0.293412787858 0.270816835713 0.265787467516 0.269896000193 0.277962792777 0.257183140616 0.260538043514 0.255036127386 0.253324298915 0.251219664681 0.275354649609 0.251349757403 0.268235077166 0.245978035775 0.277988121804 0.249516964722 0.266387651101 0.269264891567 0.269706692396 0.257221982881 0.25041421232 0.257531332229 0.271532129624 0.257608408712 0.270908280604 0.270491687514 0.271003440471 0.264326332823 0.268486578685 0.258229704408 0.264793848318 0.258283260113 0.0140423845301 0.0233496953457 0.0357817640834 0.0194594148752 0.0561842963236 0.0207161808072 0.0344686045315 0.0532665757163 0.0656105748711 0.0611304155975 0.0376479555399 0.0422417749013 0.0418045042693 0.0514369987371 0.0551924216967 0.0370785412495 0.0618826263881 0.03552992519 0.0709823822809 0.0406476055096 0.0445130802003 0.0709823822809 0.0367370595656 0.0436433053629 0.0176515558758 0.0714383400943 0.0313405988749 0.050902359898 0.0456873150128 0.0406736463161 0.0404939048403 0.0386201712918 0.0390017962403 0.0411563720519 0.190383467974 0.193150151335 0.201647820189 0.214732592431 0.207848794505 0.207203203998 0.199017728171 0.184517441065 0.198117570692 0.203251095462 0.208611510757 0.196706601418 0.206520396425 0.207659337014 0.179547279859 0.188402996857 0.207901541524 0.18984126915 0.231436366396 0.196328732424 0.214604717337 0.189868309751 0.227504737805 0.2057488472 0.191940214233 0.192467873609 0.207646738607 0.21397919575 0.206530920079 0.175883310987 0.198349858418 0.192082967323 0.190004696209 0.230325216181 0.21052421867 0.194679583814 0.198537353627 0.218586319181 0.19070413186 0.206580728089 0.210890308648 0.201869808993 0.196665077614 0.187817228574 0.203604184545 0.189253631177 0.195640755776 0.193545341254 0.172360847921 0.196255157011 0.249220192749 0.240515221189 0.238164043403 0.236348677214 0.244540315717 0.246252369823 0.240132469075 0.240621741867 0.249888930273 0.233326376677 0.219986372028 0.238112203328 0.233455949013 0.249164983161 0.25077275089 0.233019708703 0.229068571188 0.227556818352 0.266813634827 0.244493984417 0.235169888951 0.238293435462 0.251527743788 0.228140449415 0.22959779378 0.226587756924 0.223543468869 0.220396683922 0.245620426642 0.22441324809 0.241383050303 0.216373265669 0.24802824928 0.221314439433 0.239346728325 0.241091845042 0.237050180919 0.227111645095 0.219282181885 0.227591404442 0.240665010845 0.227138628142 0.240515221189 0.239782829874 0.239319392462 0.23385508337 0.24006053231 0.227964392353 0.231962468731 0.227162089828 0.0243523838323 0.0383436294161 0.0281380877138 0.0591143980174 0.023507742642 0.0377968944731 0.0517794728194 0.0731209286425 0.0626942772583 0.0386453724111 0.0461236944312 0.0433886463071 0.057222198299 0.0591551673076 0.0319956615202 0.0701506857494 0.0422353525103 0.0748230813085 0.0387325777977 0.0428806382505 0.0748230813085 0.0409657262208 0.047385502674 0.0175421295668 0.0657295550805 0.0386908764688 0.0489968109439 0.0522686619491 0.0387311691946 0.0491554938498 0.0442532671202 0.0441014308007 0.0436449779054 0.189947462454 0.193087358051 0.201270443197 0.213957860003 0.207091706444 0.207812220084 0.199357780455 0.184445706942 0.197758177741 0.203406879958 0.207785745488 0.196588475912 0.205402521621 0.207935882191 0.179126107639 0.187763747972 0.208638518258 0.190249811145 0.22994679767 0.196055336563 0.215105618909 0.189122223025 0.226906503622 0.206245399814 0.191418910324 0.191795589288 0.206939834769 0.213506326546 0.206505332082 0.175157210084 0.197857598212 0.191658112797 0.189588115864 0.230508642351 0.211553886467 0.195374420383 0.198156887541 0.217288762795 0.191332101287 0.206198782834 0.211486527696 0.202202827305 0.19621322526 0.187298385594 0.203764973183 0.190101114406 0.196046751208 0.193325009164 0.171164264517 0.19631141813 0.249660253112 0.240527632421 0.23771162293 0.236702658605 0.244516745342 0.245937622906 0.240621381598 0.240549902948 0.249413960629 0.233305723303 0.219774348622 0.237625603888 0.232924324305 0.248768260048 0.250380844287 0.232789805762 0.22924781342 0.228117670835 0.26599273559 0.24389404773 0.234785533178 0.238373023947 0.251062908639 0.227440481837 0.229791016779 0.226764157548 0.223002288166 0.220422035353 0.245352612988 0.224391473735 0.240777024278 0.216738025139 0.247654798968 0.221447853711 0.240056634641 0.240035093698 0.237386138185 0.227558813593 0.219321439562 0.226978987712 0.240167124817 0.22597544824 0.240527632421 0.2396924683 0.239078218682 0.232902821621 0.239060496039 0.227569835839 0.232339810664 0.227659905228 0.0141262101806 0.0139426926457 0.0350538423884 0.0193552608508 0.0431621025158 0.0440916637552 0.050896637678 0.0388189182549 0.0267566278127 0.024110231253 0.0232035130335 0.0336464874245 0.0349533341335 0.0219308383764 0.0572621322316 0.0349099564127 0.0531369685873 0.0252590392656 0.0298245046475 0.0531369685873 0.0175869571119 0.0244711956848 0.00818635303924 0.0514811245358 0.0194723442375 0.0509148981601 0.038941584783 0.0196470806889 0.0301479553419 0.021545096414 0.0245644202822 0.0217515624084 0.179463055971 0.184039565703 0.191058390156 0.204133065196 0.197353411589 0.197439799103 0.1907070948 0.174350095292 0.18671934138 0.195225461177 0.19666314681 0.188285080293 0.192912863971 0.197716084636 0.17106321505 0.177948538218 0.199885832792 0.178001143095 0.220003811923 0.185034059424 0.207478284578 0.179492397587 0.21650741229 0.194560853934 0.181055185718 0.181934852801 0.196069170377 0.204292703623 0.197354097622 0.164139489747 0.187014857055 0.180129327357 0.179477085752 0.220692949894 0.202951258529 0.187497059139 0.188415486818 0.206311756974 0.182082560815 0.196599300923 0.200515380101 0.192263706104 0.185773424375 0.176983033355 0.193914579783 0.179773695055 0.186365398569 0.183085748173 0.163066216827 0.18672092116 0.242702110891 0.232274391832 0.228788679171 0.227187602483 0.236388804345 0.235969651611 0.232778101812 0.229721372697 0.23914299349 0.226115326683 0.212181006801 0.228757804803 0.224715842477 0.240932159296 0.244193462731 0.226111600887 0.219855639469 0.218968698082 0.256162056313 0.233278273993 0.227208648321 0.231128251362 0.240452766308 0.218806638317 0.221623839858 0.216518120679 0.214708469871 0.212229381986 0.23697508889 0.213254322456 0.230501952576 0.207030794754 0.239662752997 0.210997516317 0.22840539478 0.231383198724 0.230834439598 0.218316993527 0.211375460714 0.218990694003 0.233092437868 0.219339375552 0.232274391832 0.231855607362 0.232408829329 0.226038981221 0.230519762371 0.219567778072 0.225824086027 0.219230412501 0.0195919993919 0.0221059172753 0.0271326704392 0.0528905379198 0.0431586822848 0.0385447030867 0.0255010287112 0.0262337606525 0.0170321830128 0.0195302099147 0.0206521036595 0.0209833716129 0.0252645250533 0.0550811186683 0.0394746523074 0.043263968218 0.0280088992059 0.0316978756724 0.043263968218 0.00932281475944 0.0154549635915 0.0220221996956 0.04619442006 0.0176558634743 0.0549187053198 0.0347531723104 0.0154010655624 0.0249961358537 0.0130624709681 0.021086975526 0.0157582275316 0.172494143831 0.17784850126 0.184169405165 0.197413015598 0.190717679771 0.190383314088 0.184688114733 0.167622718241 0.179385018024 0.189454833136 0.189254205537 0.182497405034 0.184783379021 0.190756581821 0.165553322788 0.171383967321 0.193742345805 0.169993474917 0.213189630746 0.17771520654 0.201947261672 0.173029090008 0.209395719039 0.186777092552 0.174149245532 0.175318744446 0.188797436083 0.197902799323 0.191019058185 0.156992121883 0.179797341784 0.172560525004 0.172720884671 0.213854773981 0.196871318612 0.181938976962 0.181811193063 0.198969640765 0.17576606357 0.19002752539 0.193102069722 0.185493626505 0.178790364297 0.170132074883 0.187191891105 0.172843021691 0.179777298661 0.176222723299 0.157641466463 0.180195116525 0.237394679003 0.226280310947 0.222447934209 0.220486140395 0.230450693082 0.229006895923 0.227001818993 0.222289337207 0.232008422548 0.22075701885 0.20666547313 0.222449960987 0.218801277251 0.235156691974 0.239347508119 0.221054539901 0.213261323555 0.212504634094 0.249237472588 0.225979854618 0.221636903122 0.225713392695 0.233125083698 0.212685206963 0.215719410439 0.209455227207 0.208792490533 0.206361366722 0.230902205737 0.20571314342 0.223403407555 0.200311655198 0.233802587518 0.20384593219 0.22048704521 0.225216247618 0.225812871456 0.211806271505 0.205652934938 0.213231447371 0.227786843309 0.214377387143 0.226280310947 0.226104307704 0.22732908173 0.220904746241 0.224415757753 0.21378854989 0.220845147853 0.213178189936 0.0385029210536 0.0191331286834 0.0412210559221 0.0479132957057 0.0465498029846 0.0439952648454 0.0294155998226 0.0263022819525 0.0288814535105 0.0321813612841 0.0366154046916 0.0324760763417 0.0515944791638 0.0310049526439 0.0546416139704 0.0338597056505 0.0376153728239 0.0546416139704 0.019563854062 0.0269885058553 0.0157505744896 0.0625368339241 0.0143844790412 0.0526157254036 0.0338348827289 0.0297234721938 0.0233631827985 0.0204547357021 0.0243203298193 0.026443786847 0.18161798851 0.18540629351 0.193056292253 0.206460086953 0.199649422865 0.198358533748 0.191516052491 0.176005251549 0.188914967022 0.196119500233 0.199392563722 0.189524653623 0.196344377596 0.19896940691 0.172616127427 0.180167039066 0.200264745011 0.179650753185 0.223145799502 0.187130729579 0.207841346365 0.181781654149 0.218763807382 0.195938920487 0.183272877978 0.184194077304 0.198599402775 0.206127033545 0.198682577368 0.166796826532 0.189310437328 0.182538488189 0.181515358431 0.221921151125 0.202966250725 0.187732725258 0.190303161279 0.209524247949 0.182729260129 0.198439370358 0.201593038612 0.193392410161 0.187922687561 0.179193994752 0.195211914017 0.180441261226 0.187358375156 0.184927566912 0.165517695891 0.188083050798 0.242939707589 0.233299699182 0.23049760933 0.228144979761 0.237419943564 0.237777043946 0.233178399857 0.23147925362 0.241203272786 0.226929567318 0.213318762235 0.230491320835 0.22634238089 0.242302330141 0.245163744774 0.227035678455 0.220985219764 0.21961554026 0.258484704802 0.235572463293 0.228516087928 0.231839786454 0.242586485114 0.220732170317 0.222439032331 0.217865646283 0.216378419732 0.213247698263 0.238336887242 0.215052759841 0.232719204269 0.208046041848 0.241045913549 0.212450914632 0.229495311164 0.23369764439 0.231101958719 0.219113353825 0.212320151032 0.220660185426 0.234399345217 0.221293359798 0.233299699182 0.23289508713 0.233336932661 0.227805970609 0.232744111879 0.220997609023 0.226042688917 0.219773770977 0.0487507947542 0.0650775470184 0.057802762893 0.0304021403296 0.00977564963899 0.0440829229362 0.0178656109143 0.0223657443481 0.0145412613726 0.00880579384939 0.0433599813023 0.0513293545711 0.0472054655688 0.0249055105449 0.0346274013058 0.0356855998153 0.0249055105449 0.0197624673252 0.0147740519949 0.0419438715965 0.0491548979957 0.0286642180871 0.0731736765881 0.0474587573553 0.0307413987988 0.0281173399333 0.0193986394444 0.0247050709585 0.0190246608336 0.173083486457 0.179910481276 0.184747521108 0.19795391577 0.191518465927 0.190900207622 0.18706914222 0.168885152715 0.179192090216 0.192134059909 0.188561922702 0.185169221179 0.182516578227 0.191341288573 0.168884190059 0.172666616329 0.195883736852 0.16891577804 0.213018522015 0.177613800133 0.204946600226 0.174427531668 0.209108112252 0.18591519295 0.174796071764 0.17642769541 0.188436163643 0.199161967465 0.19263437332 0.157387090389 0.179769711315 0.171986018804 0.173714059835 0.214316704261 0.199152104404 0.185242989169 0.182953007753 0.198045578814 0.177854485907 0.191103831493 0.192902410163 0.18652468984 0.179244739552 0.170950373519 0.188227253613 0.173938849831 0.181251003885 0.177034455953 0.160977434421 0.181661357468 0.240293898195 0.228034626835 0.223480275138 0.221169492968 0.232239647508 0.228872448909 0.229260803782 0.221433214134 0.231450162781 0.223643381382 0.209415344527 0.223528489107 0.220601809879 0.237045012584 0.242726403737 0.224366911478 0.21417342735 0.213777217999 0.24873924373 0.225124576112 0.224039797111 0.228474279829 0.232185938187 0.214138987375 0.217816173262 0.209554716543 0.210717178803 0.208592377005 0.232371287988 0.204873194809 0.222962175481 0.201230387345 0.23556954796 0.203837920113 0.218977104053 0.226289614933 0.229286064361 0.212971252918 0.208146125074 0.215343203422 0.230520772996 0.217614435398 0.228034626835 0.228243837858 0.230522424537 0.223850190554 0.225634530741 0.215935214332 0.224460188266 0.215143400901 0.0529022439667 0.0328752197893 0.0555054115849 0.0509289255649 0.0173017354019 0.040824127775 0.0418711547682 0.0429127577978 0.0455543968368 0.0222834470095 0.0700549868865 0.0480571802481 0.0688386171987 0.0442386440219 0.048830469586 0.0688386171987 0.0328274300443 0.0405886029945 0.0207406620607 0.0561033512724 0.0322786585229 0.0341942563123 0.0291137581017 0.0267606033099 0.0411628160219 0.0351933008888 0.0409598634365 0.0395653722748 0.170381496838 0.173513369262 0.181763628401 0.194913312213 0.188005094963 0.187597093173 0.17963993246 0.164619720579 0.178109956535 0.183964962678 0.188586214824 0.177259498897 0.186421186364 0.18797705999 0.159970598511 0.168451386355 0.188698976448 0.169860506279 0.211615177079 0.176329007245 0.195682012245 0.169937526308 0.20770892717 0.185927394166 0.171947667368 0.172526710735 0.187654893417 0.194331077481 0.186965121806 0.155693053512 0.178340485408 0.171986662107 0.170058435784 0.210831670623 0.191498228781 0.175622787806 0.178712133706 0.198589692402 0.171227708945 0.186830515333 0.191211132234 0.182219297429 0.176703062328 0.167807776518 0.183929379926 0.16962686739 0.17602523241 0.173642929989 0.152568053554 0.176555503125 0.230732284086 0.221405569072 0.21873883425 0.217008773205 0.22548132942 0.226701619289 0.221306518291 0.22096326048 0.23027897199 0.21448417882 0.200865540743 0.218691208961 0.214158679542 0.230111756153 0.232289297369 0.214283536851 0.209653108975 0.208315017711 0.24732302577 0.224778673487 0.216104443077 0.219500448511 0.231884032027 0.208661452368 0.210499772687 0.206990362059 0.20415679031 0.201176497026 0.226427503413 0.204626996142 0.221691815993 0.19686908479 0.228928610762 0.20163871009 0.219806895143 0.22162291443 0.218587303851 0.207804967644 0.200129766869 0.208295752101 0.221764477978 0.208127749312 0.221405569072 0.220756657587 0.220625985458 0.214861357139 0.220620889076 0.208723632287 0.213494112425 0.208081504586 0.0844496882978 0.080315980567 0.0726849367116 0.0675030089859 0.0473153931529 0.0443066374773 0.0646752376508 0.0680246382237 0.0621370759065 0.0478849585017 0.0214355468841 0.0687094353901 0.0384979333887 0.0389190825118 0.0687094353901 0.0480894783324 0.0504544201599 0.0366660205236 0.0908834253131 0.0410486802894 0.0843383154277 0.0744888741312 0.0620377042787 0.0466247584157 0.0490662247741 0.0410809318858 0.0466044522535 0.220241217585 0.22387872958 0.231567441105 0.244549509651 0.237801597966 0.237271702597 0.229957304486 0.214803305719 0.2275731622 0.234289966045 0.237659204867 0.227701989162 0.234344570768 0.237727323575 0.210730852647 0.218580436475 0.238765139748 0.218842181584 0.260585713889 0.225852980679 0.24576980875 0.220083510611 0.25691340974 0.234991633089 0.221816502308 0.222561867727 0.236939418607 0.244255467135 0.237073335368 0.205416544089 0.227870869354 0.221241762596 0.220094090971 0.260317043933 0.241447551617 0.226019474356 0.228752673734 0.247333244315 0.221529692284 0.236780633698 0.24056209021 0.232170242771 0.226474325634 0.217767662464 0.233885787279 0.219595234935 0.226160651319 0.223600212873 0.203287025744 0.22672247966 0.280139937217 0.271046573965 0.268296536378 0.26646171863 0.275059767728 0.275799812567 0.270911764394 0.269815722172 0.27914751079 0.264301608642 0.250949666543 0.26825814336 0.263926860434 0.279625941989 0.2816892989 0.264092216423 0.259287884013 0.257990452056 0.295945646215 0.273550874886 0.265902495939 0.269211685027 0.28055869892 0.258431707188 0.260338655828 0.256388822593 0.254096852299 0.25120005211 0.275959599762 0.25362897924 0.27069009133 0.246633770083 0.278453866291 0.251048870754 0.268244717954 0.271056483898 0.268212813274 0.257487053623 0.250194192056 0.258191856723 0.27145388403 0.257986657626 0.271046573965 0.2704506544 0.270310564507 0.264635332559 0.270115829289 0.258629632749 0.263204545545 0.257890609764 0.0600044713459 0.054351041098 0.0187381084361 0.0598557442646 0.0614469307754 0.0531453686192 0.0525559397425 0.0279315026787 0.094761292833 0.0773971393828 0.0814189949505 0.0655620569523 0.0700255275487 0.0814189949505 0.0520201647503 0.0579117136152 0.0488956649362 0.0419109171187 0.0567137285928 0.0208614229037 0.0306610868801 0.0327090237565 0.063272034339 0.0543984191577 0.0634480421324 0.0583685669148 0.139330779418 0.142986837848 0.150850053523 0.163810831232 0.156868051014 0.157669598862 0.149679456655 0.133871337528 0.147188409072 0.153968295801 0.157417010427 0.14689076955 0.155379198936 0.157713790383 0.129254210759 0.137253284329 0.159227064857 0.139693977567 0.180206197156 0.145463442392 0.166351059616 0.138682374547 0.176816693259 0.155870230339 0.140840856643 0.141328264666 0.156536274226 0.163539791349 0.156545980147 0.124345584519 0.147307265103 0.141019689746 0.139034333305 0.180682036747 0.1624502749 0.146314826392 0.147811298228 0.167221256708 0.141400345074 0.156004354912 0.161263479124 0.152006654871 0.145684418167 0.136675034398 0.153561404536 0.139874017863 0.145889334783 0.142806737922 0.121109447078 0.146069682444 0.201853472765 0.191504661606 0.188213817168 0.18712354961 0.1956147304 0.196211110416 0.19205227926 0.19059963575 0.199675257584 0.184878502918 0.170804202645 0.188147863911 0.18370042204 0.200019806255 0.20290821197 0.184684396544 0.179556147674 0.178663646288 0.216648538359 0.194032671841 0.185998827234 0.190015746161 0.201299319844 0.177907547001 0.180731098286 0.176760615884 0.17358596254 0.171156072474 0.196260390732 0.174185108777 0.190914661822 0.166819613367 0.198798087344 0.171315193398 0.190200635461 0.190703604042 0.189694426068 0.178004531696 0.170196606131 0.177783458136 0.19179082566 0.177644952 0.191504661606 0.190881968726 0.191068709685 0.184514101166 0.189756786146 0.178407147108 0.184631515769 0.178548671216 0.0324892056253 0.0484887435869 0.0393579503362 0.0472627188893 0.0192847169709 0.0232472318507 0.0583393494412 0.0585791036352 0.0611786752117 0.0424145094995 0.0604358052165 0.0620651155443 0.0424145094995 0.037432002363 0.036885144463 0.057932237922 0.0663347563067 0.0402054510934 0.0745837939532 0.0379481013657 0.0474009418109 0.0343119409392 0.0339409730754 0.0428079642087 0.0427340086613 0.16139435599 0.167055199688 0.172679145019 0.186588672878 0.18018023169 0.176484226078 0.173177363378 0.15635152751 0.167324305619 0.178559723881 0.177731707828 0.172313382954 0.172981201837 0.177656055027 0.157022451257 0.161367937512 0.181005231952 0.155624650937 0.203264920982 0.165557822324 0.190457367134 0.163325089652 0.197312648791 0.172046353812 0.163261037864 0.165144860989 0.177247951111 0.186978615056 0.179504291425 0.146841839918 0.168174392768 0.160512412099 0.161906209121 0.200546381048 0.18360708151 0.170619997941 0.170849280459 0.188248015891 0.163499893332 0.178863271671 0.178580924982 0.172741578769 0.16748001244 0.159472149167 0.174802823761 0.159227769495 0.167358477359 0.164720514491 0.151099028747 0.168548224145 0.225620984689 0.214383995774 0.210968404368 0.206923044041 0.218629792329 0.216162853062 0.214556733684 0.208383921822 0.219163632298 0.210075568948 0.196412311391 0.211088115641 0.208225582965 0.224211015109 0.229824216763 0.21125046128 0.200361910494 0.199102744226 0.237092122358 0.213257676298 0.21126821314 0.214648561609 0.219950980033 0.202248505777 0.203853239286 0.195906673599 0.198467217831 0.195092454721 0.219319110187 0.191906900165 0.211043854216 0.187166023617 0.222703227226 0.190377176965 0.20432458647 0.215124925944 0.214931156714 0.198560742198 0.194613360236 0.203185148475 0.217932610212 0.206623397839 0.214383995774 0.214797954846 0.217376030313 0.212326968407 0.214327312553 0.203281369316 0.21006441216 0.200554878155 0.0431984510448 0.0262542814372 0.0292765976694 0.0197548242369 0.011556553192 0.0420248516295 0.0609848282508 0.0559281666693 0.0312264925083 0.0398958448421 0.0413044920489 0.0312264925083 0.0264369738429 0.0230439739916 0.0462734733613 0.0414691510708 0.0363662025945 0.071550533242 0.0478849716213 0.0291862789915 0.0369373377392 0.0270714127462 0.0334218260536 0.0262390277171 0.165544038214 0.172891912863 0.17727312869 0.190354892388 0.183958646909 0.183879480011 0.180336742003 0.161635841791 0.171548979047 0.185413091762 0.180663093483 0.178298498301 0.174219977581 0.184181187608 0.161990271375 0.165191063063 0.189309277337 0.161423020955 0.205054842396 0.170020243784 0.198514733988 0.166940816324 0.201451973118 0.178556962169 0.167238778058 0.168913749918 0.180647428918 0.191850429195 0.185573186785 0.149626920851 0.172104358108 0.164216849771 0.166272040954 0.207167511585 0.192763981398 0.178906141081 0.175593499508 0.189955336349 0.171151190119 0.183759195847 0.185737635433 0.179470325207 0.171692649892 0.163416602952 0.18110112813 0.167100843514 0.174311352225 0.169649991905 0.153736698586 0.174561747041 0.233945473753 0.221154951746 0.216216020369 0.214168470951 0.225368727897 0.22143399553 0.222723035108 0.213934293134 0.223870685298 0.217024022208 0.202620827758 0.216260896263 0.213476245827 0.230060267172 0.236168299839 0.217779153489 0.207119858677 0.206977473585 0.241093686538 0.217416199365 0.217140656671 0.221883614074 0.224541883596 0.206836488872 0.211062122527 0.202310625247 0.203579796415 0.201763166599 0.225319290313 0.197343747333 0.215321920645 0.194216980733 0.228558415828 0.196533366942 0.211711953148 0.218804815465 0.22300715182 0.206091661798 0.201387938181 0.208241563837 0.223688965104 0.210605474313 0.221154951746 0.221416959944 0.223922313478 0.216868041009 0.218202757079 0.208934179216 0.218220121204 0.208484033719 0.0428988991781 0.0449811252912 0.038369173012 0.0392012066073 0.0184411809504 0.0770158611592 0.0591685049971 0.0671690041442 0.0499388239897 0.0544215157144 0.0671690041442 0.0346140690794 0.0413101969584 0.0321994698404 0.0440967531043 0.0383047062906 0.0298070621094 0.0211708137004 0.0201129458309 0.0453565917011 0.0368886973605 0.0457257341 0.0417220412167 0.154888546678 0.158813196403 0.166441835812 0.179672088583 0.172784191158 0.17260621351 0.165322327284 0.149403283235 0.162445964052 0.169819706795 0.172790429714 0.162919256631 0.17010598628 0.17291506116 0.145591206314 0.153172124666 0.174534430185 0.15398983773 0.196191934512 0.160693736573 0.182015889553 0.154710886954 0.192325902157 0.170351903358 0.156482289316 0.157229639215 0.171983255038 0.179460677481 0.172268582678 0.139849276668 0.162711676043 0.156097286205 0.154728919391 0.195984213978 0.177559464338 0.161865850494 0.163584898864 0.182768842876 0.156718657607 0.171791912863 0.175973376944 0.167287390852 0.161244255475 0.152349968293 0.168972557751 0.154707872737 0.161224201041 0.158337859598 0.137882544324 0.161653779265 0.217498297387 0.207268525118 0.204089016051 0.202387117484 0.211405750921 0.211656794766 0.207586435206 0.20562940887 0.215067539532 0.200874708618 0.186922162094 0.204055283767 0.199813802793 0.216054855672 0.219141272905 0.20087129472 0.194993585948 0.193928687775 0.232251037143 0.209370889696 0.202145172225 0.205917531589 0.216546254935 0.194031863501 0.196450064794 0.191939021682 0.189735506736 0.187016932169 0.212133736422 0.189132050882 0.20640648386 0.182100826622 0.214796842045 0.186466235268 0.20445164291 0.206912827034 0.205504201388 0.193322781156 0.186101115818 0.194015369065 0.208047565025 0.194373832921 0.207268525118 0.20679029341 0.207231461284 0.201073842632 0.205977385627 0.194517602543 0.200437951551 0.194023674059 0.00847168263945 0.0208969725179 0.0218078109788 0.0404437599311 0.0404194345362 0.0297098891496 0.0297074253675 0.0216763215811 0.0230614305273 0.0297074253675 0.00860159575866 0.00340785264521 0.0287758992091 0.0571909561512 0.0141236699669 0.0709946391549 0.0479055513747 0.0306293087394 0.0170492128659 0.00888265836541 0.00729150357409 0.00480661141663 0.185289458233 0.191085055337 0.196910034804 0.210145347321 0.203571296435 0.202882056675 0.197912476038 0.180625340213 0.191835734795 0.202787038295 0.201526915714 0.195916323017 0.19637797116 0.203353873438 0.179306027922 0.184472082084 0.206754233077 0.181958697279 0.225649748274 0.19019500565 0.21522963857 0.186167360837 0.221735787446 0.198765098442 0.186973637291 0.18833349586 0.201196357535 0.21085114286 0.204041973491 0.169797982896 0.192333522046 0.184882922721 0.185665053896 0.226331622925 0.209833971543 0.195365654989 0.194769866304 0.211153682947 0.188870228152 0.202929999244 0.20531727443 0.198262875463 0.19151438955 0.183035158868 0.19999216658 0.185562442931 0.19270971507 0.189054601453 0.171539131405 0.193197031986 0.250465285504 0.239173402368 0.23520366736 0.232978158323 0.24334022521 0.241285239552 0.239942028126 0.234265108703 0.244131225098 0.234017030201 0.220001112132 0.235226825533 0.231841004595 0.248118070359 0.252690451382 0.234464488853 0.225903274535 0.225189555796 0.261354129729 0.238022768459 0.234821661628 0.238891339667 0.245085625014 0.225666229038 0.228735290931 0.22181584168 0.221922833386 0.219499858076 0.243732621444 0.217749348385 0.235627115362 0.212972717548 0.246730042299 0.216195919385 0.232029501648 0.238052252824 0.239120375862 0.224484292562 0.218871170879 0.226408868705 0.241049615623 0.227943328268 0.239173402368 0.239135618661 0.240673256099 0.234329166611 0.237299131316 0.226939754937 0.234208016817 0.226098822397 0.028402190164 0.0277439229346 0.038946334352 0.0434213038922 0.0291676419928 0.0326159908833 0.0134911389606 0.0149220924049 0.0326159908833 0.0134816006656 0.0104081517294 0.0265932070523 0.0554420028459 0.0189257472352 0.0720390188254 0.0531694571518 0.0303305078503 0.0242591305562 0.0161087615284 0.011068875208 0.00482735633945 0.188743108882 0.194722342773 0.200407653053 0.213430694909 0.206860229667 0.206917746824 0.201757002065 0.18426566592 0.195342886433 0.206526515083 0.204777293747 0.199508643938 0.199425600774 0.207211939812 0.182702196026 0.187804249003 0.210815779569 0.185880854144 0.228533589019 0.193748347964 0.21913688541 0.189440254064 0.225129338462 0.202750726696 0.19038113591 0.191653574921 0.204515978518 0.214295374936 0.207718295645 0.173066791358 0.195766348142 0.188344856347 0.189125824359 0.230149564934 0.214039936134 0.199354571186 0.198257058594 0.214135463178 0.192868986061 0.206423136152 0.209356843732 0.20214026995 0.194966833942 0.186438002469 0.20377859508 0.189699928978 0.196612341236 0.192629244536 0.174503529883 0.196914127595 0.254336052592 0.242870282486 0.238667846113 0.23688365824 0.247015930548 0.244839615824 0.243870689439 0.237954961473 0.247598002179 0.237655720218 0.223546452001 0.238671394435 0.23524462783 0.251589019272 0.256100611398 0.237973407633 0.229710932979 0.229189966161 0.264623863258 0.241418386796 0.238285878732 0.24258651618 0.248557401899 0.228985563027 0.232518055884 0.22563381547 0.2253177885 0.223189662674 0.247286475663 0.221462369267 0.239019854286 0.21687609732 0.250217711381 0.219989961903 0.23614243802 0.241179806044 0.242913875985 0.228427928816 0.222559090866 0.229759973658 0.244438457877 0.230953863516 0.242870282486 0.24276526561 0.244179265793 0.237465205634 0.24045622955 0.230407994296 0.238017253402 0.230045288889 0.0082487413397 0.0434871339043 0.050183840328 0.0462946366306 0.0331671559449 0.0413860283481 0.0432754400997 0.0331671559449 0.018728526127 0.0181135727037 0.0407852007406 0.0552412031248 0.0241124765906 0.0672915663182 0.036051090003 0.0318240646756 0.0214815502931 0.0159437959725 0.0254901889174 0.0237539942631 0.169264556488 0.175091115152 0.180818741393 0.194404360875 0.187867354301 0.185963216048 0.181699481724 0.164435072081 0.175589139186 0.186821392192 0.185627969522 0.180155862933 0.18064011609 0.186712516664 0.164002900607 0.168780956251 0.190185205445 0.164923288811 0.210475012613 0.173888729012 0.199146471591 0.170598213889 0.205685349652 0.18167215365 0.171034804287 0.172628140609 0.185232711805 0.19500403819 0.187902476327 0.154050585176 0.176234935944 0.168650273349 0.169706859713 0.209769138775 0.193110247226 0.179199013537 0.178806517751 0.195681872133 0.172338947581 0.186953230165 0.188260629529 0.181677967227 0.175468629894 0.167135725316 0.183547713996 0.168593151982 0.176177479228 0.172880341359 0.156935998004 0.176943539051 0.234533528357 0.223126396017 0.219327759561 0.216348929156 0.227347260416 0.225044163552 0.22369577874 0.21767535265 0.227960059154 0.218355967949 0.204394726122 0.219392188471 0.216234012192 0.232478694207 0.237601395838 0.219130755018 0.209451421522 0.208536786671 0.245551542374 0.221905162641 0.219288471778 0.22312655653 0.228847315313 0.210092965448 0.212618708514 0.205172062073 0.20633435877 0.203530506813 0.227844403478 0.201098042061 0.219576265526 0.196351165226 0.231028528894 0.199557870197 0.214705957114 0.222733230705 0.223437599857 0.207886214359 0.202974139337 0.210949037245 0.225748458168 0.213347187038 0.223126396017 0.223291835541 0.22533034675 0.219465519892 0.221962474599 0.211307137623 0.21852957733 0.209700938041 0.0419192621384 0.0545713872547 0.0501857282151 0.0312681130066 0.0402402021553 0.0420008929211 0.0312681130066 0.0205720172252 0.0184919891428 0.0424913715031 0.0489285308529 0.0286939286865 0.0683097144827 0.0399734095489 0.0293256914093 0.0276858239832 0.0194684720042 0.0280123772406 0.0234918354003 0.166859723597 0.173331371512 0.178503598003 0.191909917223 0.185423378094 0.184241312774 0.180294315558 0.162400014304 0.173055799755 0.185412361634 0.18273757472 0.178554599617 0.177148633502 0.184812731656 0.162323255063 0.166439616176 0.188986532191 0.162572794611 0.207449709654 0.171422042516 0.198077549025 0.168235674576 0.203119984389 0.179508471538 0.16860424661 0.170241216483 0.182496759111 0.192879913246 0.186103766816 0.151333877107 0.173669495408 0.165942821486 0.167423536544 0.20787297933 0.192134128169 0.178241744028 0.176630454655 0.192503529595 0.170982423968 0.184797056786 0.186354645979 0.179907483835 0.173050611212 0.164731478826 0.181684378357 0.167089130276 0.174542724285 0.170681059367 0.154769008136 0.175102602238 0.233527638993 0.221524217045 0.217249882254 0.214634557666 0.225748190551 0.222754126699 0.222507410575 0.215313006261 0.225475910208 0.217028556203 0.202867049651 0.217307455338 0.214308520933 0.230711314018 0.236268919515 0.217806003587 0.207670960165 0.207072717123 0.242941912447 0.219250459655 0.217623775648 0.221838573376 0.226272530404 0.207953270239 0.211176611808 0.20315950921 0.204399143496 0.201993427277 0.226020220858 0.198708591851 0.217018221505 0.194631582156 0.229236199557 0.197470332905 0.212630809879 0.22033552246 0.22249437564 0.206324343096 0.201514537553 0.209042945777 0.224134795171 0.211456208554 0.221524217045 0.221736417649 0.223988271389 0.217633390228 0.219633609699 0.209536083539 0.217632171671 0.20838956304 0.0775399671204 0.0564468400456 0.0653641254742 0.0395849667657 0.0441943006701 0.0653641254742 0.03373111328 0.0393588608039 0.0260318537498 0.035188052496 0.0394930245033 0.0375459624937 0.0389958606199 0.0129896263362 0.048815943545 0.0379517413039 0.0436570210079 0.0373605585088 0.163502150984 0.168337389523 0.175155703097 0.187828039408 0.181028588214 0.182647655516 0.175454197959 0.158708602469 0.170954310136 0.179762949884 0.180496225683 0.172501221065 0.176828601599 0.182533215894 0.154901122018 0.161684635697 0.185118234991 0.163277975005 0.203127118484 0.169348081929 0.192443927773 0.163103682661 0.200509349698 0.179853786658 0.164993784705 0.165655282459 0.179962811264 0.188228038198 0.18173609183 0.147930089008 0.171080786567 0.164375611634 0.163483874158 0.205438086531 0.18853460221 0.172643498492 0.17244334411 0.189729944562 0.167174105351 0.180637454384 0.185804185303 0.177096116234 0.169809585441 0.16090449309 0.178551065139 0.165213876 0.171240797871 0.16732581913 0.146067131815 0.17119760345 0.227732558213 0.216775565291 0.212818856201 0.212113111804 0.220864451794 0.220275362854 0.217805693503 0.214378859193 0.223348154292 0.21056133718 0.196361313338 0.212749259626 0.208612136858 0.225027947273 0.228363013794 0.210350767964 0.204552905568 0.204069917831 0.240058060786 0.217392662406 0.211260278871 0.215701291359 0.224730833936 0.202526361799 0.206282759299 0.201292044309 0.198558189974 0.196671901675 0.221200953228 0.197951565037 0.214539723027 0.191917757906 0.223779418483 0.195733699704 0.214045901526 0.214810423689 0.215727246675 0.203293848315 0.195827563807 0.20277537878 0.217057373264 0.202583470922 0.216775565291 0.216238379997 0.216660429864 0.209514540125 0.213990052328 0.203581443905 0.210749726957 0.20422943306 0.0299803388219 0.040141041955 0.0494042382248 0.0475047073328 0.040141041955 0.0460070623429 0.0427394298505 0.0575631845334 0.0966299998606 0.0399614977189 0.101837465377 0.0750240618009 0.0697188576907 0.0332240827502 0.042809511279 0.0352142478257 0.0438103448171 0.215592317841 0.220449330264 0.226776190885 0.240218246846 0.233773884944 0.231038348034 0.2263684672 0.210465175429 0.221881119192 0.231308080404 0.231995396207 0.22508238649 0.227350588366 0.232024663305 0.209334152567 0.215020396004 0.234337406165 0.211132081266 0.256379513335 0.220156177626 0.242726282533 0.216799299323 0.251315628789 0.227298889535 0.217343977664 0.218830240402 0.231491464759 0.240364579333 0.233021642066 0.20102218505 0.222551706707 0.215242680103 0.215887525017 0.254517907032 0.236803444285 0.223122921939 0.224639449766 0.242029857582 0.217221787473 0.232566133597 0.233478733428 0.226945320336 0.221662735537 0.213507733767 0.228892766884 0.213808892567 0.221389781107 0.218899946715 0.202924474402 0.222409930936 0.277044937084 0.267058523642 0.264053120566 0.260683081125 0.271161003688 0.269928478944 0.266921380471 0.262757571495 0.272983826376 0.261803183737 0.248535164351 0.264118904646 0.260747941814 0.276361888176 0.280348586054 0.262426412526 0.254046125774 0.252677731845 0.290308221249 0.267228122274 0.26327783075 0.266417210913 0.273921992864 0.255063180539 0.256547903477 0.250174111671 0.251094888905 0.247813627121 0.2719924919 0.246585755223 0.264876118769 0.241182097008 0.275004799417 0.244784301617 0.259319666846 0.267703173929 0.266005774781 0.252196034878 0.247119094967 0.255516011217 0.269407340011 0.257485027934 0.267058523642 0.267089999571 0.268509786469 0.263477813268 0.266862086523 0.25569939087 0.261126117571 0.253527997613 0.0490986378456 0.0295882981502 0.0295703235206 0.0490986378456 0.0321772405698 0.0329624694956 0.0320791928311 0.0819581869701 0.0243777845713 0.0815880274595 0.0630031242432 0.0521832400773 0.0270041961787 0.0316626367995 0.0225970983534 0.0306135777155 0.209138400452 0.213486068388 0.2205444529 0.233810061497 0.227135531522 0.225853564299 0.21966357841 0.203852341272 0.216088869802 0.224306040557 0.226217923772 0.217746213648 0.22224285579 0.226486749722 0.201086071725 0.207931744175 0.228270944293 0.206490835508 0.249940445551 0.214362660634 0.235990805565 0.209564571075 0.245741550277 0.222850058752 0.210799114332 0.211874421056 0.225608964554 0.23378761328 0.226547651045 0.194192088088 0.216536885951 0.209533041634 0.209201443093 0.249253656365 0.230961033564 0.21611849601 0.218017866257 0.236081990375 0.21084689547 0.226091432124 0.228781524913 0.221104911259 0.215367366824 0.206800674523 0.222911320613 0.208230596605 0.21525709201 0.2125575014 0.193883560936 0.215963986139 0.270701972881 0.26093677072 0.257916700448 0.255512820108 0.265026258573 0.264740784654 0.260931231555 0.258198696768 0.267948592812 0.254868213876 0.241356571396 0.257920982238 0.254009770883 0.269860597717 0.272941762013 0.255029971614 0.248480879151 0.247232962401 0.285073136127 0.262202525959 0.256323297606 0.259707446043 0.269155247223 0.248322830219 0.250262594392 0.245086205405 0.244160084795 0.241180503071 0.265837705385 0.241871800867 0.259549733542 0.235634358004 0.268589453275 0.239657575868 0.255932774798 0.261005427367 0.2590787222 0.246704864465 0.240323606031 0.248446059436 0.262200385961 0.249216212931 0.26093677072 0.260623330072 0.261243034875 0.255650557301 0.260116706756 0.248814449864 0.254094104786 0.247584466928 0.0426893927085 0.0408370899258 0.0 0.0368318621027 0.029200211213 0.0578784327643 0.069522041979 0.0411420259607 0.0955605905857 0.0684916485028 0.0536985636371 0.0361283833003 0.0349236407736 0.0326544016244 0.0320790517512 0.190734279342 0.198011093099 0.202165335147 0.215193803756 0.208989236133 0.207943677959 0.205067684162 0.186855165408 0.196324830352 0.210203923195 0.205342231011 0.203427359944 0.198526873832 0.208498767129 0.187766259392 0.1906844311 0.213518747065 0.185598548292 0.229746939288 0.194805777978 0.222821572969 0.192481412892 0.225723793976 0.202375384198 0.192457483521 0.194300696695 0.205376522136 0.216660688755 0.21030774241 0.175307222553 0.197000980201 0.189066511629 0.191539232379 0.231058906578 0.21669613367 0.203523564636 0.200666530244 0.214595210264 0.195827171333 0.208641958794 0.209561099462 0.203960485003 0.196717491572 0.188766309975 0.205678014795 0.191492051166 0.198961164508 0.194672996958 0.180152450124 0.19944864987 0.257583228235 0.245178524187 0.240481186058 0.23786630337 0.249341975217 0.245210894146 0.246468005973 0.237456371003 0.247563146561 0.241300303041 0.22729752986 0.240555698528 0.237993038095 0.254193700204 0.26033321069 0.24220764546 0.231128442611 0.230799434891 0.264694157473 0.241194058993 0.241588310222 0.245985985156 0.248083056968 0.231516408669 0.235215479132 0.226198260689 0.228320459149 0.22623565919 0.249391170367 0.221152204365 0.239284874476 0.218351963925 0.252686589685 0.220530747351 0.234477098822 0.243363395844 0.246969168578 0.229990071021 0.225895881072 0.232959826282 0.248113114083 0.23570223574 0.245178524187 0.245562733947 0.248226439579 0.241702239505 0.242775171886 0.233515238499 0.242256602421 0.232453320509 0.00485178735022 0.0426893927085 0.0245520805084 0.0237321483943 0.0251494297711 0.057422445578 0.0278324002426 0.0740340192529 0.0616449531864 0.0345266403523 0.0352178925007 0.0280808122885 0.0215364121522 0.0177797202497 0.195939824645 0.201850357058 0.207610278022 0.22033675889 0.213750209485 0.214735564684 0.209038553623 0.191585516698 0.202707820911 0.213610139259 0.211883129405 0.206450810174 0.206559465565 0.214810867748 0.189369070647 0.194741318593 0.218356798216 0.193913039155 0.235021949805 0.201158850164 0.226304640977 0.196279111135 0.23226696484 0.210758787397 0.197506357802 0.198589557713 0.211650666116 0.221261000242 0.214919567324 0.180148757389 0.203013849771 0.195747568982 0.196259855385 0.237643665501 0.221708411792 0.206639148011 0.205357745264 0.220920753341 0.200419916968 0.213509698507 0.217293606067 0.209702604063 0.202159105945 0.193545439121 0.211227865592 0.19759997516 0.20414742205 0.199914048013 0.180698578828 0.204233409175 0.261355583351 0.249944219156 0.245612323364 0.244419283687 0.254044690674 0.252080319195 0.251117898529 0.245501499007 0.254799073865 0.244461494615 0.230320341653 0.245583982907 0.241986600586 0.258340089378 0.262471019952 0.244539880639 0.237114058797 0.236754636273 0.271527597514 0.248603215251 0.244975611097 0.24946627049 0.255826675762 0.23571262824 0.239663820665 0.233206292475 0.232061847402 0.230234276049 0.254229520181 0.229085095534 0.246138012907 0.224434087127 0.257016267918 0.227571722833 0.244299258458 0.247702557023 0.249766808341 0.235948695396 0.229554249737 0.236400217176 0.250952182681 0.236961759284 0.249944219156 0.249686907558 0.250743450736 0.243657848264 0.246990711027 0.237179418187 0.244877764453 0.237415960787 0.0408370899258 0.0273499265107 0.025297636378 0.0296282714156 0.0603715739764 0.030303105355 0.0787085816014 0.0655434565706 0.0387042108075 0.0365503327535 0.0303622028421 0.0226096284705 0.0196557817863 0.19925839426 0.205348957591 0.210921560248 0.223617852816 0.217069506306 0.218059817218 0.212575335145 0.194999027921 0.205926088655 0.217171129009 0.215009852018 0.21001079042 0.209452929226 0.218136520475 0.193003747556 0.198137341753 0.221866239829 0.197043564846 0.238162085485 0.204392911714 0.229893095054 0.199683809546 0.235440202166 0.213909407049 0.200827966888 0.201961005661 0.214826820158 0.224639586072 0.218358620882 0.183440825957 0.206248208954 0.198918059014 0.19962810203 0.240932955881 0.225232933618 0.210279009964 0.208738673233 0.223984027141 0.203935797463 0.216876482587 0.220527121607 0.21308659534 0.205456328664 0.196890655988 0.214607347176 0.201007750183 0.207588221909 0.203264328774 0.184313736826 0.207665740899 0.26489357675 0.253366014308 0.248941435732 0.247733075772 0.257464720601 0.255265649267 0.254603866324 0.248608337617 0.25792057227 0.248007634376 0.233863042555 0.24891723403 0.245404277418 0.261753780229 0.266030956859 0.248122124098 0.240457509587 0.240148573616 0.27462290691 0.251686093628 0.248460344614 0.252996524531 0.258897040073 0.239091218679 0.243138472416 0.236454818573 0.235503469176 0.233728803563 0.257606144443 0.232210377675 0.249277823428 0.227793973728 0.260419107201 0.230810446264 0.247340816609 0.251008018719 0.253372216044 0.239328849004 0.233077723245 0.239855175697 0.254460096493 0.240510971982 0.253366014308 0.253147704183 0.254305992093 0.247178452498 0.250316773817 0.240645130337 0.248503359474 0.240885126472 0.0368318621027 0.029200211213 0.0578784327643 0.069522041979 0.0411420259607 0.0955605905857 0.0684916485028 0.0536985636371 0.0361283833003 0.0349236407736 0.0326544016244 0.0320790517512 0.190734279342 0.198011093099 0.202165335147 0.215193803756 0.208989236133 0.207943677959 0.205067684162 0.186855165408 0.196324830352 0.210203923195 0.205342231011 0.203427359944 0.198526873832 0.208498767129 0.187766259392 0.1906844311 0.213518747065 0.185598548292 0.229746939288 0.194805777978 0.222821572969 0.192481412892 0.225723793976 0.202375384198 0.192457483521 0.194300696695 0.205376522136 0.216660688755 0.21030774241 0.175307222553 0.197000980201 0.189066511629 0.191539232379 0.231058906578 0.21669613367 0.203523564636 0.200666530244 0.214595210264 0.195827171333 0.208641958794 0.209561099462 0.203960485003 0.196717491572 0.188766309975 0.205678014795 0.191492051166 0.198961164508 0.194672996958 0.180152450124 0.19944864987 0.257583228235 0.245178524187 0.240481186058 0.23786630337 0.249341975217 0.245210894146 0.246468005973 0.237456371003 0.247563146561 0.241300303041 0.22729752986 0.240555698528 0.237993038095 0.254193700204 0.26033321069 0.24220764546 0.231128442611 0.230799434891 0.264694157473 0.241194058993 0.241588310222 0.245985985156 0.248083056968 0.231516408669 0.235215479132 0.226198260689 0.228320459149 0.22623565919 0.249391170367 0.221152204365 0.239284874476 0.218351963925 0.252686589685 0.220530747351 0.234477098822 0.243363395844 0.246969168578 0.229990071021 0.225895881072 0.232959826282 0.248113114083 0.23570223574 0.245178524187 0.245562733947 0.248226439579 0.241702239505 0.242775171886 0.233515238499 0.242256602421 0.232453320509 0.00792313459309 0.0236297776546 0.0536541076705 0.0108412074571 0.0628560371629 0.0400426717019 0.0242916508179 0.0172043768705 0.0050674059167 0.0120480352419 0.00909567999576 0.179733982337 0.185130641595 0.191359647755 0.204666509458 0.198016972069 0.197255576955 0.19185518721 0.174855164332 0.186481505005 0.196685913747 0.196375987191 0.189833405887 0.19172374648 0.197739787295 0.173088361774 0.178764981726 0.200732295255 0.17675687262 0.220477540317 0.184804954654 0.209049846749 0.180446207623 0.21646451476 0.193507677512 0.181414841832 0.18267689502 0.195938395299 0.20515640915 0.198204629073 0.164323672477 0.186951627799 0.179636188812 0.180004434454 0.220785829324 0.203765417111 0.189073881786 0.189075562692 0.206144383335 0.182845346787 0.197260508087 0.199879524686 0.192526504243 0.186000212457 0.177429453542 0.194272636643 0.179755047797 0.186852872988 0.183420017951 0.165408476828 0.187374971211 0.244379342624 0.233352874625 0.229596132577 0.227371143644 0.2375226222 0.23596534417 0.233967797192 0.229101525181 0.238946455726 0.22794079194 0.213947298319 0.229612039594 0.226054866131 0.242325328031 0.24658946328 0.228320886035 0.220245475488 0.219410638455 0.256223296024 0.232925784221 0.228888220536 0.232841814871 0.240004491346 0.219971839687 0.222802209556 0.216357175156 0.216093165101 0.213536808192 0.238014479196 0.212556057937 0.230415334062 0.207278762186 0.240959781593 0.210762423837 0.226970181506 0.23251461018 0.23290234378 0.218739342276 0.212847200349 0.220551656017 0.235068599786 0.221910374672 0.233352874625 0.233237981586 0.234568034713 0.228343429058 0.231717374059 0.221050886534 0.227947933137 0.220165226053 0.0299322879907 0.05484354778 0.015508827535 0.0698377856312 0.0461226756997 0.0288439268589 0.0179749812614 0.00820193608475 0.0104514607363 0.00601471721254 0.182232775094 0.188144985482 0.193867682363 0.20711590793 0.200550766912 0.199860135361 0.195016228938 0.177611309591 0.188739284934 0.199919451707 0.198412537874 0.193032154463 0.193186123937 0.200329430243 0.176446801251 0.181459762374 0.203862717343 0.178811701467 0.222599641879 0.187102653143 0.212418919228 0.183165138771 0.218668582765 0.195650642874 0.183922319896 0.18531418569 0.198099986213 0.207874048966 0.201087073424 0.166709980365 0.189246782627 0.18175655241 0.182635613511 0.223327464443 0.206968516624 0.19255440527 0.191763016548 0.208041469681 0.185943960269 0.199929552497 0.20225187193 0.195263692383 0.188456709089 0.17999008547 0.196992188976 0.182563902533 0.189736470349 0.186019084029 0.168664903455 0.190216873808 0.247702840069 0.236282170601 0.232244216334 0.230002139075 0.240457584895 0.238254033101 0.237105182737 0.231183514354 0.241076068443 0.231215767875 0.217161214732 0.232270602033 0.228935795245 0.245246208475 0.249963377759 0.231700745278 0.222930449716 0.222251471523 0.258324331925 0.234942120984 0.231973156153 0.236088516952 0.242009296663 0.222723564262 0.225860019318 0.218781249758 0.219012595092 0.216618271437 0.240821050718 0.214649402949 0.232567858569 0.209986385677 0.243846113816 0.213146433073 0.228924161819 0.235098611969 0.236391467186 0.22153411273 0.216011138881 0.223520339681 0.238239770023 0.225155295827 0.236282170601 0.236275025927 0.237908750738 0.231525069379 0.234355153325 0.224056763193 0.231485450395 0.223215654925 0.0572284762647 0.0227453897084 0.0528238726337 0.0444991634471 0.0266973769114 0.0336948312224 0.0272062333147 0.0274688740262 0.0263328670099 0.185716682176 0.189934662812 0.1972487923 0.210230033124 0.203429572326 0.203652674494 0.196491149306 0.180504651022 0.193104816748 0.200892549998 0.203064104164 0.193989394378 0.199601579765 0.203900303764 0.17670118478 0.184039790634 0.205684310622 0.1846555251 0.226113704537 0.191415178407 0.212986837934 0.185538541574 0.222740004307 0.201086740863 0.187280443615 0.188038576609 0.202414184433 0.210238732317 0.203278048935 0.170511460418 0.193354539773 0.186626010428 0.185640639557 0.226777734968 0.208697723096 0.193060716385 0.194490643156 0.212667601723 0.188009393157 0.2026405567 0.206872630148 0.198376957217 0.192017886181 0.18319542151 0.200010008529 0.185964041385 0.192410951726 0.189280207199 0.168712331151 0.192752182621 0.248024768226 0.23796255467 0.234657432476 0.23319215601 0.242043627351 0.242092609424 0.238343194449 0.236048752979 0.245336704526 0.231521041315 0.21770196943 0.234612870361 0.230403022013 0.246515101463 0.249324779094 0.231381623939 0.225835327084 0.224876205525 0.262239370649 0.239554652873 0.232726560614 0.236544998844 0.246724791051 0.22460575232 0.227286958934 0.222708966341 0.220419587826 0.21790450046 0.242699459353 0.219658545965 0.236700307439 0.213086341068 0.245286981729 0.217239309033 0.234909245332 0.237182720463 0.236050820735 0.224251890124 0.21698485009 0.224622112179 0.238475066919 0.224611061526 0.23796255467 0.237436165661 0.237675287343 0.231370754771 0.236291823604 0.225200181658 0.231029643716 0.224953381888 0.0635710838178 0.0592624543316 0.0596058092469 0.0328544820604 0.0698260168465 0.0573261822931 0.0634460229613 0.0541080958794 0.147303482686 0.155161448288 0.159136079545 0.171063643845 0.16463221595 0.168584971583 0.163624194761 0.144235266953 0.153827968777 0.168063795028 0.16179307599 0.160206770568 0.155398151635 0.167887347752 0.142965424509 0.146100735749 0.17375554158 0.146517107633 0.184102282814 0.152518687773 0.182006446517 0.147493939017 0.182975421708 0.163616999091 0.148716120222 0.149770660992 0.161958247648 0.173135111896 0.168022513735 0.130738265008 0.153920373487 0.146537313722 0.147925357539 0.190546691654 0.177990062346 0.162980599048 0.157241520972 0.169863812022 0.155438284157 0.165389918434 0.170707644688 0.163197102596 0.153422018191 0.144858486622 0.164315178145 0.152440808389 0.158128021971 0.151920639439 0.132537826475 0.157359920068 0.217237278816 0.203578319465 0.19760287759 0.197919551969 0.207681519923 0.203644803794 0.206313583309 0.1971606419 0.205828867849 0.199064489236 0.184178187026 0.197538822775 0.194404518771 0.211337912042 0.217143803133 0.199166671372 0.190313841849 0.191119829867 0.222105237734 0.199176357415 0.198307304 0.204224647581 0.206706114847 0.187412194782 0.19389709622 0.185837912363 0.184448440446 0.184112718669 0.207064291919 0.180760878547 0.196874753064 0.177987251583 0.20993106085 0.180009687643 0.197504007472 0.198552660709 0.205899002329 0.189953313758 0.183706289376 0.188863124638 0.204480568311 0.189510910124 0.203578319465 0.203449868624 0.205307715981 0.196398052452 0.198053210544 0.190139115088 0.201200460168 0.192215464603 0.0646648846724 0.0408252785942 0.0324283536149 0.0110096321254 0.0094227049932 0.0109851717283 0.0161323040352 0.185995265043 0.190666336997 0.197500031875 0.21094278484 0.204249573992 0.202798103731 0.196994111724 0.180742040387 0.192881751338 0.201790619427 0.203110749465 0.195155158145 0.19908437034 0.203454592708 0.178505994536 0.184932353957 0.205646229373 0.183039730424 0.227253690432 0.191142107437 0.213742423108 0.186621634482 0.222820224457 0.199555206103 0.187692696694 0.188884746385 0.202513734055 0.21103182776 0.203761324581 0.170936996707 0.193374027451 0.18623653615 0.18612791719 0.226436319176 0.208431967216 0.193707164322 0.195058461584 0.2131353075 0.187987297837 0.203202157658 0.20562939159 0.19810682266 0.192261721083 0.183685994256 0.199939039445 0.185109577754 0.192293556305 0.189460942551 0.171326016098 0.193018202765 0.248893705524 0.238594153414 0.235368547023 0.232809197602 0.242745772985 0.241996085058 0.238752248121 0.235253956997 0.245180784501 0.232837883436 0.219103668122 0.235388183401 0.231636120508 0.247694764978 0.251385734184 0.233178069062 0.225757404345 0.224585782777 0.262534762143 0.239353867417 0.234157942476 0.23769286224 0.246342255252 0.225805082888 0.227891511242 0.222153883963 0.221706146684 0.218742321816 0.243483255972 0.218775046696 0.236736505477 0.212768083313 0.246367071263 0.216653745648 0.232866084107 0.238586937872 0.237342638049 0.224021718097 0.217958679267 0.226108360277 0.24024082191 0.227355271774 0.238594153414 0.238405022146 0.239431466759 0.233734880392 0.237715653864 0.226470518154 0.232346541994 0.225139690924 0.0393671458325 0.0464638919444 0.0721210062546 0.0652058473437 0.0727980489441 0.069475743339 0.146377403101 0.147841270546 0.157364583964 0.169967544666 0.162961576643 0.163688135507 0.153695971045 0.140223917866 0.154815274304 0.157460458795 0.165356253736 0.150768971529 0.165320051187 0.163744020488 0.133177827162 0.143578449734 0.163063976092 0.148488500757 0.186880468442 0.153029883361 0.168852310214 0.144817839236 0.183493098196 0.16367577587 0.147767272784 0.147687188567 0.164075138054 0.168705452589 0.161392920433 0.132534765042 0.154766041608 0.149414499828 0.145576557217 0.186084208163 0.165901662347 0.149128920765 0.153742006284 0.175201329509 0.146090630422 0.161672202019 0.168031911823 0.15768748388 0.152605178523 0.14358929792 0.159222856143 0.14586039418 0.151244472229 0.149339889326 0.125727972716 0.151529925609 0.203450036676 0.194986217471 0.192843589773 0.192004248179 0.198946853705 0.202034655336 0.194735870429 0.197390062155 0.205967080811 0.187074482741 0.173679535364 0.192730113791 0.187498820793 0.203203985607 0.204052975425 0.186370806955 0.184428712838 0.182999565721 0.222535792393 0.200873648 0.188899014225 0.192203777807 0.208005111614 0.182384645276 0.184037705225 0.182740791406 0.177549631149 0.174643342307 0.20005255491 0.181451046073 0.197344936488 0.172060390986 0.202165775813 0.177622921879 0.197550181919 0.195373313831 0.190875275066 0.18253037262 0.173366080571 0.181382900577 0.194116360457 0.17989694523 0.194986217471 0.19390780748 0.192735187036 0.186889353803 0.194252690611 0.181881177771 0.185754237755 0.182012475847 0.0362542669235 0.0433946419111 0.0395938166279 0.0499100911085 0.0488639489089 0.15152574063 0.154669324023 0.162831337388 0.176665753207 0.169806006203 0.167122046596 0.160378494232 0.145396880889 0.158882941041 0.165181343889 0.170013544811 0.158841213295 0.168049497633 0.168033584387 0.142281111905 0.150191536414 0.168801703096 0.149137142763 0.194382185315 0.156984666205 0.176687245859 0.151908690894 0.188914920448 0.165149458367 0.153257832095 0.154263633953 0.168972071115 0.175883337588 0.16793063653 0.137286345822 0.159386690667 0.152763325671 0.151315442368 0.191108253021 0.171290308569 0.156330262179 0.159994057675 0.180780718652 0.151317533557 0.168120728135 0.170508979962 0.162341150711 0.157837504705 0.149168793021 0.164327797527 0.148926027403 0.156192253462 0.154516403486 0.136169004568 0.157264478091 0.212122717934 0.202635989676 0.200296956824 0.197168424809 0.206818081536 0.207593997357 0.202129844535 0.201194661862 0.211287068634 0.196367072918 0.182828097116 0.200324517328 0.196191886272 0.212106652571 0.215234691898 0.196754062665 0.19012565359 0.188386972878 0.22899541165 0.20586463613 0.198258080181 0.201204812212 0.21275581912 0.190751178898 0.191559171703 0.187109767024 0.186199376174 0.182482910799 0.207969239625 0.184720534783 0.20291190811 0.176982703379 0.210810911009 0.181756418439 0.198660937707 0.204191111601 0.200384835859 0.187985877445 0.181554202617 0.190580156178 0.204342491841 0.191957847677 0.202635989676 0.20235052312 0.203098119616 0.198224746232 0.20315819974 0.190687755597 0.195275148422 0.188602851388 0.0403137962831 0.0283224608052 0.0352856337592 0.0279159520938 0.163417587591 0.169024399252 0.175174902733 0.188097949492 0.181377843525 0.182339093803 0.176249889638 0.158817342465 0.170474967966 0.180845899763 0.180001842158 0.173609983768 0.175496658011 0.182387588854 0.15630806646 0.162058645803 0.185720747493 0.161911269413 0.203372519665 0.168871283524 0.193698104443 0.163602773962 0.200347540332 0.178768462478 0.164992621499 0.165987648079 0.179610648238 0.188805808549 0.182291142535 0.147635227715 0.17074779736 0.163623388142 0.163626611748 0.205461058801 0.189137857415 0.173807850394 0.172761367844 0.189359771077 0.167597338583 0.181003226921 0.18512092453 0.177130623568 0.169726367846 0.160952549622 0.178668367454 0.164953979441 0.171439694143 0.167334380068 0.147661742228 0.171525073941 0.229212799489 0.217687295638 0.213458314816 0.212251579761 0.221837277994 0.220248120289 0.218836902814 0.21380890761 0.22315649106 0.21209021333 0.197773787684 0.213427195478 0.209691510886 0.226221325989 0.230427027381 0.212200169545 0.204828617154 0.204406576244 0.240127225685 0.217034360511 0.212651201199 0.2171572787 0.224326708779 0.203421180278 0.207247009432 0.201076078606 0.199634752126 0.197699005741 0.222082174457 0.197241869749 0.214400679015 0.192020712279 0.224886788727 0.195424460906 0.212817607264 0.215705919361 0.217495203752 0.20360497892 0.196993475509 0.204019508797 0.218720046506 0.204677118987 0.217687295638 0.217405004007 0.218489214457 0.2114130463 0.214943199157 0.204771656731 0.212548494217 0.204988233676 0.0127227257693 0.0140144534262 0.021205315625 0.187358882688 0.192125766059 0.198772205734 0.212374766999 0.205757524042 0.203466313878 0.198269360384 0.182075536022 0.193992765379 0.203217006858 0.204331991647 0.196763084721 0.20014614007 0.2043270841 0.180514052654 0.186571402116 0.206603961592 0.183553088594 0.228871000555 0.192232411854 0.214999691886 0.188338267885 0.22392031363 0.199986970587 0.189109313009 0.19048409561 0.20374267521 0.212452607057 0.205039311477 0.17250608603 0.194596025922 0.187333202847 0.187569910566 0.227244357321 0.209244321292 0.19501056691 0.196469921009 0.214533216851 0.189073691369 0.204563213894 0.206128514596 0.199072106837 0.193573357583 0.185159768848 0.200995780718 0.185854759801 0.193344854477 0.190733738309 0.173803583444 0.194263918799 0.250041858008 0.239752334137 0.236636793113 0.233547933384 0.243918830588 0.242901503454 0.239756635176 0.235879076786 0.246072511959 0.234292093183 0.220682667006 0.236686092505 0.233134556981 0.249085035733 0.25310029637 0.234845006722 0.226672837999 0.225370202913 0.263585873652 0.240269679341 0.235701118126 0.239046163418 0.247138992266 0.227341018945 0.229062028698 0.222899030248 0.223273394137 0.220071420598 0.244719204688 0.219434280561 0.237757183282 0.213627898609 0.247720909818 0.217420020786 0.232895251914 0.240194731922 0.23871780496 0.22484832656 0.219342733041 0.227741527766 0.241902288188 0.229541957119 0.239752334137 0.239709455557 0.241056941465 0.235717844261 0.239326740269 0.227988125157 0.23374803561 0.226117504317 0.0112944223112 0.0116717916683 0.180315550232 0.185624806253 0.191893759643 0.205310880338 0.198677901623 0.19742928403 0.192207723586 0.175346071899 0.186991091141 0.197103740487 0.197010559796 0.19035952375 0.192415460068 0.198031907427 0.173783350974 0.179445795208 0.200920544981 0.176980189616 0.221316628359 0.185291341794 0.209338461592 0.181164395028 0.216995536327 0.193672787851 0.182024521085 0.183353435463 0.196543976916 0.205721030365 0.198641096205 0.165041173069 0.187513275437 0.180170480519 0.180593580969 0.221064954649 0.203852710847 0.189347093282 0.189636739108 0.206917162302 0.183105626465 0.197802407018 0.200023916345 0.192826802002 0.18656768388 0.178054301819 0.194629767109 0.179904508376 0.187157083025 0.183925453374 0.166391248738 0.187799823192 0.244629672801 0.233720966397 0.230103074977 0.227589285523 0.237898238239 0.236372028201 0.234191133105 0.229406521443 0.239389014631 0.228371263352 0.214461216321 0.2301324354 0.226622699222 0.242825477857 0.247136546528 0.228837795311 0.220544275821 0.219592617307 0.256764787312 0.233410014962 0.229423132068 0.233225877209 0.240428033421 0.220595144087 0.223137647662 0.216631996383 0.216685610877 0.213950565713 0.238461242313 0.212870718316 0.230920602382 0.207535413146 0.241449474994 0.211057657029 0.226977435726 0.233226691247 0.233226664174 0.218957536154 0.213267398065 0.221167604245 0.235644719391 0.222751284499 0.233720966397 0.233655964463 0.235074683268 0.229091941731 0.232416283617 0.221592631388 0.228271321964 0.220395124759 0.00983853465038 0.190743576675 0.196127634014 0.202306832097 0.215593264431 0.208988714901 0.208014681254 0.202752493595 0.185879141861 0.197391448173 0.207587390017 0.207246286244 0.200818808941 0.202436920976 0.208560198594 0.184214467342 0.189842910423 0.211501430369 0.187510793393 0.23133568248 0.195719341744 0.219810583637 0.19153274103 0.227260064133 0.204197085901 0.192430421495 0.193730481177 0.206830708645 0.216081015857 0.209114627207 0.175418662775 0.197889059256 0.19054759391 0.191035254803 0.231501988345 0.214455580052 0.199914298524 0.200065006525 0.216984298716 0.193736240961 0.208206242937 0.210580217034 0.203385972256 0.196973524191 0.188474338185 0.205152454389 0.190585220715 0.197746685322 0.194398694242 0.176663593729 0.198324488838 0.254937931723 0.244068220209 0.240384243666 0.238035456407 0.248220627185 0.246646653405 0.244595211757 0.23972473422 0.249602645305 0.238680104062 0.224803483418 0.240405804306 0.236887764418 0.253050369586 0.257246240499 0.239074958955 0.230985078994 0.230097737916 0.266839316033 0.24359819756 0.239685124735 0.243538571704 0.250621888643 0.230851769093 0.233547789712 0.227075078845 0.226983547477 0.224362834405 0.248743336169 0.223247552937 0.241135379781 0.21805286342 0.251689329115 0.22150661959 0.237406179341 0.243353541542 0.243526559835 0.229448331316 0.22367406721 0.231427017129 0.245837459247 0.232818717405 0.244068220209 0.243969807123 0.245289155672 0.239200193755 0.242559723366 0.231895813526 0.238589343178 0.230870674099 0.185074856292 0.190966638679 0.196734232482 0.209858428647 0.203274864625 0.203041999473 0.197926990268 0.180506604609 0.191676192961 0.202741147438 0.201237653931 0.195771907735 0.19601488206 0.203403274574 0.179018659177 0.184172394714 0.206916175569 0.182062920029 0.225161143908 0.190059350101 0.215302655172 0.18583273214 0.22154130677 0.198918819466 0.186732254392 0.188033674574 0.200939126942 0.210648020541 0.203966819203 0.169461460201 0.19212652304 0.184698761261 0.185447917615 0.226382707341 0.210090272929 0.195464472523 0.194576182922 0.210724866996 0.188972223666 0.202749851185 0.205495551665 0.198311837225 0.191308228071 0.1827836447 0.199987032182 0.185758986102 0.192762378199 0.188912535548 0.170985151469 0.193136210612 0.250560354729 0.239150130043 0.235042618318 0.233086785485 0.243308731763 0.241203268173 0.2400598963 0.234273096316 0.244008938502 0.233948328457 0.219858019146 0.235053540533 0.231632132621 0.247968147104 0.252509320758 0.234318201257 0.225941055535 0.225341346975 0.261132458323 0.237859251815 0.23464919225 0.238863315744 0.244976238699 0.225403382703 0.228750181247 0.221868483944 0.221696049716 0.219446034661 0.243630311449 0.217754591911 0.235449762585 0.213055210612 0.246589228494 0.216230229788 0.232311179578 0.237695560547 0.239155145074 0.22460119101 0.218814227997 0.226159982187 0.240839111604 0.227492031494 0.239150130043 0.239069291581 0.240533107829 0.233959974366 0.236956995194 0.226761267667 0.234246750663 0.226212511867 0.0174000566092 0.0121264323023 0.0260599074483 0.0195345881682 0.024716388707 0.0269505563041 0.00951579401108 0.00952789620934 0.0312682326376 0.021380704298 0.0249359987416 0.0321633377276 0.0221499569656 0.0250465767598 0.00649156808823 0.035247677191 0.0178478945632 0.0452164673822 0.00796037757126 0.0457327534983 0.00806434689496 0.0382898633561 0.0225581753549 0.00212523780258 0.00635071079008 0.0188691425113 0.0279463141708 0.0242753243704 0.0170805625395 0.00904309726647 0.0106287996797 0.00382969063087 0.0441552932431 0.0411601284763 0.0356126616505 0.0116846154421 0.0333280322148 0.0231237862627 0.0194233884188 0.0266405905658 0.0193741592831 0.00647009737397 0.00313195595137 0.0192809490983 0.0198825976901 0.0184223649953 0.00589033700008 0.0274923472821 0.0151107135826 0.0780209291989 0.0597893086125 0.0526478480904 0.0519801851129 0.0640907142958 0.0582643284706 0.0646025548306 0.0523589334483 0.0615531482061 0.0598796956679 0.0449628875056 0.0527423072945 0.051324556652 0.0686409960047 0.0805660524071 0.0623345264313 0.0443847427682 0.0466830846559 0.0795542446923 0.0559362746494 0.057180756356 0.0643259703348 0.063211257271 0.0437185245372 0.0513548290636 0.0393714351609 0.0417127661704 0.0422926634719 0.0628873923745 0.0360959787877 0.0527047257983 0.0325588237609 0.0667976247829 0.0335624409497 0.0547114607912 0.0561052885987 0.069799650238 0.0449738444715 0.0432370258673 0.0470356297838 0.065014861143 0.055075271212 0.0597893086125 0.0609892947965 0.0676680770887 0.0593149961508 0.0554983088895 0.0479751568133 0.0662596895747 0.0497728280187 0.01649842894 0.0270940163297 0.0209738779605 0.0204474607028 0.0105739124512 0.0137437284214 0.0226997190651 0.0150100972387 0.0323011347894 0.008153166122 0.0475382748664 0.0187641297777 0.0164404692163 0.0144508320868 0.0210394349685 0.0305556358824 0.0486242638836 0.0214513889049 0.030175982789 0.0140964441541 0.0400575333251 0.0288561044949 0.0169609734513 0.0143755139823 0.0284025548343 0.0224203703593 0.013792641556 0.0304826758057 0.0219984307058 0.027438003111 0.0138498079587 0.0391014082904 0.0268826742795 0.0187520448645 0.0110691342578 0.0424853466078 0.0102534616754 0.0160596134656 0.0266767705353 0.0127998581546 0.0173245890268 0.0173211449901 0.0131739480428 0.0158792885006 0.00835247262611 0.012879803971 0.0267489620569 0.00596710410688 0.0651865992814 0.0501303259948 0.0467631748034 0.0452702235248 0.0544981935174 0.0559804285713 0.0526556031199 0.0527154428326 0.0609295585885 0.0469864956417 0.0318305696362 0.0467870360621 0.0428839020559 0.0596952123639 0.0682904935263 0.0490145651142 0.0373470202066 0.0372051258398 0.0783094346225 0.0573164765228 0.0464381891239 0.0518006344835 0.0637213875725 0.037328539174 0.0398159521041 0.0364375689163 0.0327311081772 0.0300536558581 0.0548465086523 0.039011355091 0.052932116237 0.0253006236285 0.0581288867478 0.0321437027692 0.0554616174667 0.0518535767409 0.0554912699649 0.0361633495108 0.0300500951908 0.0376868875379 0.053920841562 0.0443787428504 0.0501303259948 0.0504194914628 0.0549441033997 0.0487769599128 0.0505784847321 0.0377653392745 0.0513716565226 0.0379306609676 0.0148794236891 0.00906915259144 0.016817766784 0.022666965263 0.0181414830259 0.00943690730089 0.0254552863313 0.0160108606893 0.0217205062473 0.032481574081 0.0125336713193 0.0301048876528 0.0144114156753 0.0283145714635 0.0236499286545 0.0356994826174 0.00980454037789 0.0380382046842 0.0138535372004 0.0270349861178 0.0179310476065 0.0105970849446 0.0106446588122 0.0120133548138 0.0166102052911 0.0152998463146 0.0290596443937 0.00834773888805 0.0173051001714 0.012006228531 0.0322817993752 0.0342847484172 0.0334965405575 0.00578703582339 0.0267498608594 0.0233284219943 0.00836772170811 0.018341518653 0.0127097608473 0.00603575216863 0.0145919151911 0.0109073587446 0.0221533308504 0.0160929276419 0.00858765501382 0.0355326614102 0.0120318414802 0.067539995406 0.0482957753815 0.0406895564252 0.0402187129364 0.0525505335689 0.046337446086 0.0538706159523 0.0410532995186 0.049945680623 0.0498002070389 0.0356573113194 0.0408170339224 0.03998131445 0.0570983300794 0.0703114871106 0.0527515586 0.0326360388464 0.0357703215059 0.068007735392 0.0448145788863 0.0463929144792 0.0539072356938 0.0519007533329 0.0322898342044 0.0406511091494 0.027662402635 0.0308780067072 0.0322628111538 0.0510866561777 0.025649802677 0.0412344607513 0.0218269690426 0.0551778627391 0.0220763570226 0.0446670171533 0.0446820028935 0.060359178521 0.0338187065026 0.0337343667693 0.0362783001067 0.0544202026485 0.046225200341 0.0482957753815 0.049817335882 0.0576169439653 0.0494636606912 0.0440581542748 0.0371733017103 0.0573289834367 0.0394045266195 0.00716257307188 0.0232372547893 0.0296783885761 0.0325204811189 0.0212497773179 0.0292620976856 0.016439517739 0.0284506794787 0.0345599570635 0.018188478274 0.0407426749726 0.0271225123173 0.0318719827355 0.0364884226289 0.021914442716 0.0227786086069 0.0376078412936 0.0256329664506 0.0146652900265 0.0257412263731 0.0241649286356 0.0229439407062 0.0136364254073 0.0100256292567 0.0184950349026 0.0419365600148 0.0198805169881 0.0290470401931 0.0259349477967 0.0239003990661 0.037192775807 0.0407761513521 0.0178664543062 0.0194803036782 0.0346542430628 0.0113663417752 0.0228609184952 0.0221099611853 0.0198250112005 0.0281996969801 0.0190943935979 0.0354944682629 0.027502181016 0.0232700909093 0.0459502516894 0.0236639254111 0.0598327139142 0.0386582188727 0.0283018389706 0.0317793366202 0.0424462733589 0.0338166029949 0.0467225177273 0.0306128740578 0.0370882424143 0.0426365040044 0.0306518342203 0.028353389025 0.0289452185335 0.045605512774 0.0609187137277 0.0456096520871 0.0247062397964 0.0307047013286 0.0541889879817 0.0321986089116 0.0369056583991 0.0461635888574 0.0394191509814 0.0202672234717 0.0339543216254 0.0203331670034 0.0216962768111 0.0275014797571 0.0396207876643 0.019383687668 0.028134515225 0.0203542968498 0.0436399460151 0.0166979961449 0.0388212853573 0.0305366541923 0.0544557347145 0.0282093601187 0.0297076171269 0.0264388860156 0.0445799816631 0.0375526043804 0.0386582188727 0.0404030430489 0.0493940445937 0.0396161416365 0.0302144214958 0.0282464575757 0.0525063270042 0.0345543356833 0.0213910735206 0.0254312508458 0.0257342205184 0.0165221855109 0.0259539784544 0.0158835031429 0.0233831935041 0.0339323139257 0.0162106434865 0.03399674531 0.0200386549433 0.0299171917849 0.0320747556874 0.028046974659 0.0176794481153 0.0368388291758 0.0185008751091 0.0214981879903 0.0241691252685 0.0175651807134 0.0158907875652 0.0122369735355 0.0109776452166 0.015527622801 0.0354957768624 0.014819189185 0.0241388300803 0.019062303667 0.0287269405771 0.0357916006232 0.0364633531028 0.011219279972 0.0225748368915 0.0292557862917 0.00702356235419 0.0224830610185 0.0181511844266 0.0136555064616 0.0214280218685 0.0152776051594 0.0301845131917 0.0223495731118 0.0167956602264 0.0389756608983 0.0178914144765 0.0630604023291 0.0430080788574 0.0341031378994 0.0365519407683 0.0470199755056 0.0406269394348 0.0499352678431 0.0370580183618 0.0441324571638 0.0450148609009 0.0313792244576 0.0341228348643 0.0333031398497 0.0505703458181 0.0642434790604 0.0476293944045 0.0288843329998 0.0334940865056 0.0612862785232 0.0392274534904 0.040346557275 0.0490663667656 0.0464449738085 0.0249899515531 0.0366261362911 0.0249803957153 0.0245501998624 0.0286632518672 0.0448796558538 0.024023128213 0.0352285170514 0.0213779538928 0.0486983458625 0.0204530056806 0.043719713027 0.0366461987398 0.0563678077617 0.0312461294161 0.0303695774583 0.0296679302141 0.0480887950749 0.0392929320337 0.0430080788574 0.0443169978812 0.0521158245642 0.0424602914349 0.0361040078467 0.0312787145737 0.0538034695951 0.0366723303295 0.0189709615579 0.0257111384326 0.0217335533305 0.0223113040739 0.027967478664 0.0234852927562 0.0436322798908 0.00521461133699 0.0364760096592 0.0272343470762 0.018116669378 0.0277674536165 0.043201573857 0.0213285903305 0.0304538340544 0.0273512817022 0.0293661558134 0.0151289313008 0.0243064809516 0.0249356313294 0.024322988602 0.0196946095768 0.0139855052687 0.0405716245072 0.0223909261135 0.0287463352875 0.0239629151594 0.02510185342 0.0224608355139 0.0285849443101 0.0178309380123 0.0374526670612 0.0202528662781 0.0157931802813 0.00761690092841 0.00770880409418 0.0208073209186 0.0270724449013 0.00859765119326 0.0186885305588 0.0137210864523 0.0193875507074 0.0459800217652 0.015147695259 0.0594631949225 0.0420406539118 0.0381002519704 0.0316288068581 0.0464453358384 0.0430647918893 0.0450678160713 0.0372936406921 0.0478953066773 0.0442824164473 0.0325191406563 0.038458833697 0.0379877190085 0.0530105166244 0.0661049064219 0.0485790002934 0.0252530768184 0.0252616659632 0.0666194586026 0.0445179442461 0.0429682351205 0.0475934680999 0.0500384221554 0.032835448251 0.0334890161181 0.0210425012042 0.0304165560005 0.0269992218058 0.0464858418191 0.0236376116141 0.0409389296383 0.0119097107153 0.0510342004912 0.0165862044563 0.0364051705359 0.0459765436849 0.0532158463913 0.0240697227805 0.0285944307415 0.0355069812685 0.0512442268801 0.0481793211849 0.0420406539118 0.0442662890139 0.05312949223 0.0494910921385 0.04499219501 0.0346009573898 0.0502278687979 0.0300299308142 0.0232712606741 0.0301762988443 0.00654363258115 0.0383536762695 0.0073760670349 0.0549696818035 0.0187358469678 0.0230006125585 0.0248620926321 0.0112006081505 0.0374302122761 0.0507791829944 0.0291552453818 0.0203658790931 0.0243768337268 0.0405838171216 0.0318523232399 0.0264398107213 0.0241549255092 0.0342108755533 0.021966370396 0.0115756992435 0.0406700397742 0.029726336077 0.0360763557135 0.0237708872008 0.0347952708329 0.0167090325333 0.0113314779127 0.0183460384347 0.0474972085699 0.0106130759732 0.0188803713912 0.0264255690706 0.0134988723184 0.025504997589 0.027405254367 0.0141844800377 0.0190580694839 0.0107985492476 0.0215712414492 0.0353201768965 0.0121413245456 0.0563908913649 0.043304104353 0.0426898087169 0.0394771848203 0.0476531796683 0.0529352282385 0.044079995111 0.0508430194559 0.0587105683651 0.0390993709526 0.0249779507582 0.042747127932 0.0380731496814 0.053623295371 0.0609715530482 0.0414798961609 0.0320700568798 0.0299307088952 0.0756552859532 0.0564187262812 0.0401315540974 0.0437426630802 0.0619393205223 0.0343990170803 0.0321000148697 0.0335451545545 0.0286757976711 0.0228629245983 0.0490706959168 0.039335399235 0.0517108395757 0.0207931533413 0.0521719534959 0.0306385429933 0.0529657791748 0.0494073245643 0.0465244230409 0.0294935309982 0.022387741137 0.033135656028 0.0474637239198 0.0404408008061 0.043304104353 0.0434045699957 0.0474449484031 0.0438694244062 0.0478136370998 0.032205230585 0.0422362620829 0.0295056722481 0.0179188644218 0.028584000913 0.0304649482776 0.0216593575612 0.0411663680687 0.024518088592 0.0182283990095 0.00873472135302 0.0327024039756 0.0205661418233 0.0530659348987 0.0158790060301 0.0434559310925 0.0108179884547 0.0451149656888 0.0268976993783 0.0108384529361 0.0115766662184 0.0275567255166 0.0316971869351 0.0249562865626 0.0177154908046 0.0178955252093 0.0182906823384 0.00749636648732 0.0477714342488 0.038203551745 0.0295629098692 0.0151014852938 0.0424264199032 0.016651504005 0.0233361298106 0.0296064297332 0.0193940010564 0.0143559792056 0.00822362335298 0.0204472045407 0.0141062231631 0.0150969431722 0.00961358010122 0.0230069897016 0.0136344497691 0.0781873312389 0.061922358127 0.0567947708263 0.0549074799203 0.0663268940474 0.0637272258979 0.0651276548535 0.0583171690756 0.0677292446726 0.060191106251 0.0450946120505 0.0568830058441 0.0543288790766 0.0714589426004 0.081454303397 0.0624243269582 0.0472314410611 0.0479124253475 0.0858307227677 0.0627914211132 0.0589518720089 0.0648453559762 0.0697319385158 0.0476614784634 0.0522209851297 0.0435983831723 0.0443141443319 0.0427834724857 0.0660408790701 0.0423473900154 0.0592034189384 0.034371623803 0.0697342027164 0.0381294864628 0.0595840773258 0.0612390496755 0.0689539919437 0.0466415215244 0.0431628346368 0.0495434525796 0.0666477691564 0.0566659496435 0.061922358127 0.062735676927 0.0682003243846 0.0612178236271 0.0603270178223 0.0499653486319 0.0649071391116 0.0500436367076 0.0339571590708 0.013301666182 0.0294613107742 0.0254168369791 0.0187830848109 0.0337456927151 0.0151760493477 0.0360233288761 0.0161149469416 0.039042024755 0.00229536147376 0.0468249873675 0.0157583667263 0.0310761119725 0.0155497505138 0.0090301180649 0.0126416375855 0.0110159110748 0.0254616235228 0.0242814349621 0.0236690592627 0.00236651040215 0.00842631126501 0.0122777482071 0.0387720000407 0.041740632348 0.0402615455592 0.013434165421 0.0259957062278 0.0279382933191 0.0176919201528 0.0210030594474 0.0190515742522 0.00605687883869 0.0125440326934 0.0183812478057 0.0234838433483 0.0213287295776 0.0102611349583 0.0367608397052 0.018305882063 0.0762459812633 0.0565232340764 0.0480647092458 0.0470080033305 0.0607210126742 0.0515562009886 0.0623134210102 0.0445281266442 0.0542656274802 0.0589172543408 0.0450145677229 0.0482495838943 0.0484339717661 0.0652417317109 0.0793204584838 0.0620264746962 0.0400173133601 0.0433798691859 0.0725797896424 0.0481813389954 0.055284998644 0.0628522019597 0.0554630904838 0.0404443368976 0.0493602212467 0.0332419109523 0.0398075629785 0.0413677301877 0.0589057964055 0.0278286603835 0.0454781228443 0.029186418008 0.0632373035893 0.027173160002 0.0466041993476 0.0516248397682 0.0694549695494 0.0414073101396 0.0429302830553 0.0451579600752 0.0633364240737 0.0552241510377 0.0565232340764 0.0584051969715 0.0667610558404 0.0584355181495 0.0512925402879 0.046128531904 0.0664605075068 0.0478081299752 0.0403784266437 0.00849189377523 0.0578244327435 0.0213782060591 0.0260735654855 0.0286868543518 0.0107633182991 0.0429130184438 0.049112317265 0.0332704837119 0.015321761445 0.0277153223765 0.0397499694056 0.0358925712782 0.0304111104426 0.027505083584 0.0362377250231 0.020296537393 0.0117771620381 0.0454349567531 0.033203078919 0.0404550655622 0.028050968973 0.0329121522515 0.0157822820327 0.0128529655968 0.0212960295437 0.0481179953609 0.0170390332384 0.0197624290813 0.029414246654 0.0176689832365 0.0290804953813 0.0317312604538 0.0173770193477 0.0255273516204 0.0167054579942 0.0259572392414 0.0381174742782 0.0168503391466 0.0510138836142 0.0385473731826 0.0388016187658 0.0368178557203 0.04276997017 0.0503778656709 0.0392641023806 0.0497809957465 0.0564266766196 0.0332095636962 0.0188695135337 0.0387787813789 0.0331502464834 0.0483979502244 0.0548055143427 0.035228159998 0.0294466884888 0.0272290139414 0.0724784935896 0.0548011964122 0.0344489111929 0.0380672364964 0.060026483594 0.0303284751736 0.0272545178904 0.0327801045139 0.0239410636139 0.0177611799925 0.0444007657806 0.0400513163792 0.0497786040376 0.0204852863736 0.0470867569115 0.0308376722615 0.0532970554221 0.0452748414675 0.0407821021663 0.0268849282918 0.0168086928609 0.0279020988959 0.0414910752652 0.0343097482389 0.0385473731826 0.0381367753126 0.0413101510581 0.0377465227401 0.0435171810289 0.0268955341281 0.0365057689544 0.0254190380854 0.0372013435687 0.018490846954 0.023945912793 0.0440067159299 0.0253810829497 0.0423576975539 0.0266775910883 0.0276067076791 0.0155964706338 0.0512008225103 0.0249293141549 0.0224091031216 0.0208182862063 0.0201027144741 0.0218258381078 0.00416795120123 0.0250523436876 0.0291897433081 0.0336104473732 0.0126474260294 0.0179897427224 0.0236687976248 0.0357767564718 0.0479080621505 0.0493839683144 0.0213027411218 0.0127962715487 0.0389577422313 0.0207982711984 0.0248766740175 0.0272195414617 0.0164396805465 0.0239043019084 0.0253433507519 0.0357454256498 0.0316012688091 0.0218891275488 0.046006369731 0.0279699723289 0.0753592714385 0.0538300830176 0.0427234551893 0.0442120918411 0.0575724115573 0.0438794711912 0.0617203772126 0.0368175055007 0.0452394206226 0.0586608041232 0.0462735571883 0.0428932945735 0.0449361857949 0.0608531076963 0.0771129194067 0.0618336393757 0.0380658785095 0.0436894906412 0.0627047512052 0.0382553066725 0.0531191059331 0.0620656917263 0.0459127944105 0.0362899248016 0.0492167826913 0.0301637294202 0.0380460619309 0.0426550687382 0.0544072702012 0.0218332584664 0.0362322493618 0.031226533309 0.0587827461321 0.024852970378 0.0417700516252 0.0442659791406 0.0702283886386 0.0412678777108 0.0447863140602 0.0428258891548 0.0608093994885 0.0534400200237 0.0538300830176 0.0561072465285 0.0656581075266 0.0558523559264 0.0444818994194 0.044500683531 0.0680831562837 0.0488303143353 0.0536500814259 0.0217777943298 0.0182308184172 0.0213271967089 0.017640751735 0.0384313065823 0.0491331799987 0.0285490579199 0.0236508921207 0.0203518801552 0.0410544418794 0.0345949638149 0.024163451076 0.0208164937907 0.0331698057229 0.0214370208347 0.0129093004816 0.0378630437774 0.0285080143021 0.0349561724295 0.0213643314537 0.0378207671811 0.023156173716 0.0144257922467 0.0164328495191 0.0458898771597 0.0137078716838 0.0180512548912 0.0303444429836 0.0166949845236 0.0239375979831 0.0248141920509 0.0164642247089 0.0221495310709 0.0136759772653 0.0203492925476 0.0297801495496 0.012327304817 0.0589145352863 0.0454329273476 0.0436909431618 0.0429329583507 0.0496681092117 0.0546717056664 0.0472190839202 0.0531335572006 0.0601545762173 0.0404993410456 0.025351107679 0.0436326686228 0.0384109589864 0.0546942484238 0.0614849395874 0.0420121432296 0.0350678306139 0.034181943564 0.076607832627 0.0574938051354 0.0407284888751 0.0455392391661 0.0634559846389 0.0340449392101 0.0346886110004 0.0365094436091 0.0283668149646 0.0247441178114 0.0505723350307 0.0414268809452 0.0526572068528 0.0247442404176 0.0533283030157 0.0333480302269 0.0569330422551 0.0488445949836 0.0485166318417 0.0334202567015 0.0241088713298 0.0327605074387 0.0477602460683 0.037977730422 0.0454329273476 0.0450703466816 0.0481686005759 0.0425586735361 0.0473020173719 0.0326189496433 0.0442268465034 0.033265894657 0.0406164099076 0.0563656658624 0.0369501005762 0.0596090654503 0.0304778147307 0.0384204581231 0.0270915625525 0.0693260105947 0.0372136698768 0.037023479673 0.0317062741776 0.0317712182279 0.034804368211 0.0223060266021 0.0435100417127 0.047004084469 0.0362329082931 0.0256397756303 0.0233719185841 0.035627421568 0.0521858971919 0.0650552610656 0.0654396577163 0.0372909018916 0.0208395075735 0.0532980934411 0.0387978654626 0.0393043110859 0.0433091405548 0.0302415541747 0.0340064333643 0.0421117035102 0.0478257550421 0.0466283489206 0.035247716101 0.054937797688 0.0434605477125 0.0930034854114 0.0710598303107 0.0590352084097 0.0604460830393 0.0745555173324 0.0571183440581 0.0793617023165 0.0484952151811 0.0566053521726 0.0768259736839 0.0646780127707 0.0592344115045 0.0623544010763 0.0773513341111 0.0945306112415 0.0799933849368 0.0552426925494 0.0610368934489 0.0726391747026 0.0486086004123 0.0708765879781 0.0800292786375 0.0558578611463 0.0537011940437 0.0672777153371 0.0461852109805 0.0561622363977 0.0610513630375 0.0708198771797 0.0350965268909 0.0484124521051 0.0486964562823 0.075256407672 0.04111323881 0.0515712020719 0.0592740141065 0.0884497661149 0.0586350703024 0.0632023881374 0.0606979638812 0.078320587954 0.0707259147977 0.0710598303107 0.0736338137855 0.0835800603427 0.0732101393079 0.0599496433571 0.0625510181289 0.0864139348042 0.0667637091982 0.0350728110176 0.0244007367668 0.0194418255357 0.0278046770383 0.0387260088041 0.0187288873365 0.0305873095592 0.0241694671072 0.0257451620145 0.0153815378858 0.021353060898 0.0215496497445 0.0200984831417 0.0151985245998 0.0110228413253 0.0387908454863 0.0190085239065 0.026530841158 0.0213969993651 0.0238500818663 0.0245406806879 0.0294364678495 0.0140120927131 0.0331219747793 0.0211978333105 0.0108265764026 0.00897607251111 0.00677241917437 0.0173770286262 0.0245814114194 0.00563326172159 0.0204952689406 0.0138047466575 0.0168569359655 0.0438273371107 0.0133334601758 0.0592456323654 0.0408071286879 0.0355509485261 0.031081872872 0.0452073820968 0.0410054777303 0.04495248578 0.0356498623047 0.0456510923283 0.0431383529174 0.0306345228853 0.0358468154266 0.0353265194622 0.0511623196634 0.0646276367384 0.0471382277373 0.0240229200194 0.0254553208119 0.0642596486532 0.0418982548195 0.0409421092482 0.0466753277343 0.0478887558589 0.0293373407147 0.0326161841904 0.0195735225778 0.0272641873276 0.0254981680323 0.0447040552558 0.0215611560931 0.0381253844376 0.0111199895155 0.0491745036263 0.0145691145727 0.0368088577033 0.0425091408325 0.0528722213324 0.0238501784613 0.0272505069941 0.0325614342759 0.0492627638247 0.0449950723976 0.0408071286879 0.042880253884 0.0517160325388 0.0466599102029 0.0415966376912 0.0321745745193 0.0499813496989 0.030016506883 0.0195067443548 0.034042657808 0.0387050626348 0.0607833234632 0.0322025711177 0.0402462250605 0.019510297358 0.0548235012014 0.0429595771314 0.0252279466187 0.0223273919125 0.0407231129516 0.0368354815518 0.0293953937656 0.0290463736365 0.0328461116776 0.035143771121 0.0215923461726 0.0549385181088 0.0388660143681 0.023623079783 0.0246479181765 0.0539903453268 0.0196033552608 0.0309786977565 0.0425462811063 0.0289476729073 0.0283702078661 0.0228452205014 0.0295421719187 0.0252210967901 0.0230772437036 0.0240590413191 0.0135013916275 0.0218148808282 0.0760114892469 0.0634029220478 0.0607879784498 0.0607163777171 0.0675569624906 0.0711706523576 0.0650651993965 0.0685468614076 0.0760185520266 0.0575507664821 0.0425210776149 0.0606855343213 0.0555229333703 0.0720565340813 0.0774071503121 0.0581416488422 0.0527363337967 0.0521637490438 0.0926239124524 0.0722776295267 0.0578783465588 0.0627903946651 0.0789206671037 0.0503951188312 0.0527646858335 0.0526096570134 0.04527903594 0.0427992547678 0.0682205850017 0.0547141937568 0.067807383273 0.0414244261778 0.0707718291801 0.0483573376556 0.0717861683592 0.0644987372167 0.0647006499463 0.0513517515126 0.0419564687506 0.0495341433982 0.064364362053 0.0518587226049 0.0634029220478 0.0627776825589 0.0645131806296 0.0578143875392 0.0631581345522 0.0499505499876 0.0600491597535 0.0513712453732 0.034450057717 0.0238219725869 0.0465207238958 0.0139550218762 0.0437148052069 0.00245848843359 0.0406705408218 0.027996084796 0.00617634742576 0.0042140797155 0.0226171443798 0.0276920922451 0.023535776763 0.0178395839466 0.0140356715542 0.0166022608919 0.00395476408572 0.0457707350729 0.0404789526881 0.0327219703849 0.0114454148489 0.0362680793242 0.0217463268752 0.0196365744138 0.030477863462 0.0206103562172 0.0102714385383 0.00460445870022 0.0203218526125 0.0212018784388 0.0183586998171 0.00829854248447 0.0218572079865 0.0144443422105 0.0767877803745 0.0594466868469 0.0529468255635 0.0532520366965 0.0637146030163 0.0600960587419 0.0639510014921 0.0553587219012 0.0637257572972 0.0581054381403 0.0427611569751 0.0529559974475 0.050488809742 0.0679805704334 0.0784814462329 0.0599903091584 0.0453120703317 0.0472956572051 0.081207762841 0.0585100350392 0.0557351056658 0.0628545029747 0.0657746195642 0.0431635890019 0.0505766161122 0.0416755044323 0.0404647152471 0.0409948112567 0.0627553433594 0.0398795409571 0.0548556307871 0.0337749376373 0.0662751646447 0.0362650420971 0.058753352182 0.0559719307576 0.0677796526261 0.0456851568449 0.0415580901724 0.0455930571155 0.0632638085815 0.0519953365762 0.0594466868469 0.0601182129397 0.0655928087235 0.0568949371894 0.0552051980285 0.0466604473635 0.0640269526764 0.0493532376221 0.0428114080859 0.0513187599237 0.0353300715155 0.0136841990953 0.0339659047401 0.0392151216672 0.0331598693815 0.0346578423583 0.0330024342773 0.038276947888 0.0230224626975 0.0145213724276 0.0501490398304 0.0358866478447 0.0428680377048 0.0326728263648 0.0285835471973 0.0062386073831 0.0158573158736 0.0257280664001 0.0503050577684 0.0188536314021 0.0230369129683 0.0250256284162 0.0171680840121 0.0324912467925 0.0363869094834 0.0177612063031 0.0250245943265 0.0179627656197 0.0294197347692 0.0465091219099 0.0203095862842 0.0478795030574 0.0357982238192 0.0376241763265 0.0315623347361 0.0401002610089 0.0474897864671 0.0350582543035 0.0460362585126 0.0538318698513 0.0325365856585 0.0213227378076 0.0377990397126 0.0335693240845 0.0471274543161 0.054885621667 0.0360897261843 0.0253548565171 0.0209548211511 0.0704293001167 0.0527939534511 0.0346935966786 0.0364130601336 0.0572327947958 0.0318390512051 0.0244040617426 0.0286768378156 0.0261566126157 0.0173773142125 0.0423900706098 0.037171182215 0.0481110975007 0.0159454061909 0.0456669379702 0.0274665151098 0.0469607798152 0.0462908705234 0.039158298024 0.0212460794033 0.0171141394847 0.0299475002492 0.0420369583583 0.0395076063195 0.0357982238192 0.0363456286371 0.0414629362584 0.0409765644808 0.0445581081298 0.027819794971 0.035217696192 0.0204081888775 0.0536229831874 0.0147412411033 0.0555896434003 0.0257773616784 0.0440411962897 0.0177255540439 0.0193829905731 0.0239978426404 0.0256940264499 0.0393410530504 0.0352411367288 0.0221731284836 0.0182277280594 0.0123391761488 0.020654487001 0.0493059373458 0.0475681206642 0.0451873269558 0.025880190654 0.0390288508413 0.0309214453223 0.0313196281309 0.0264802426158 0.0266703711652 0.0198006836859 0.019520741807 0.0278514155812 0.0221034761072 0.026690732588 0.0196134707881 0.0412756734602 0.0265178671443 0.0867397306197 0.0682110021203 0.0611304199736 0.0570363597121 0.0725160802101 0.0632079822129 0.072465417084 0.0544582268199 0.0658094706322 0.0704919404627 0.0569226677141 0.0614294730407 0.0618668588016 0.0780403185985 0.0918397178914 0.0740264863472 0.0508436174709 0.0525989509117 0.0847012354501 0.0597190404435 0.0681089165817 0.0742388101657 0.0664380426823 0.0545769816583 0.0603124830305 0.0434291774759 0.0533790593767 0.0526730162139 0.07136820435 0.037381607524 0.0577096830472 0.0386410478706 0.0759869972218 0.0373998132392 0.052181569916 0.0660810261842 0.0798969360666 0.0510826914494 0.0540234946407 0.0588039304452 0.0763291167757 0.0694007087698 0.0682110021203 0.0704124305268 0.0789271737863 0.0724038719993 0.0657040131714 0.0590995697111 0.0765722724394 0.0574793382897 0.0411299322154 0.0539204969946 0.0448971315706 0.0161163886115 0.0424587591617 0.0433134103038 0.0424269061508 0.0282464178608 0.029088245744 0.0394507490162 0.0585960215177 0.0377541724349 0.0451043153526 0.0458829699472 0.0321616028135 0.0557124503126 0.0614547711288 0.0392057260171 0.0177559051904 0.0564954090624 0.0332181102648 0.0405454141429 0.0436595776199 0.0393478274847 0.0472383546112 0.0407140770645 0.0569892712332 0.049354627832 0.0439022116296 0.0637553975882 0.0455099918129 0.0646100490562 0.0426539647843 0.028431151743 0.0378227253234 0.0447657124399 0.0278050988657 0.0537117960824 0.0274550112394 0.027109787916 0.0505464931112 0.0435971104256 0.0283504838988 0.0329648689469 0.0443909167392 0.0625701957077 0.0530229796398 0.0343871989004 0.0426240079793 0.0396498771349 0.0213457948378 0.0419360502864 0.0525916757896 0.0283713421426 0.0258314536376 0.0439902997437 0.0303827915475 0.0319578433558 0.0415135400801 0.039276736137 0.0267873256159 0.0192565547583 0.03784586336 0.0425882783228 0.0300770949457 0.0411024644378 0.0235206108502 0.0627241786637 0.0400442811976 0.0439864971889 0.0336917004451 0.0474026229045 0.0427127785344 0.0426539647843 0.0447428873557 0.0545498150062 0.042821329639 0.024917555179 0.03656154349 0.0624685036836 0.0466134489709 0.0464211898017 0.0148261628982 0.0328892390973 0.0156382282878 0.00785541359441 0.0118772597644 0.013233811524 0.0262299374219 0.024092235964 0.0223052815816 0.00384479195153 0.00794260597132 0.0107672255318 0.0397240757159 0.0410257993957 0.038935751255 0.0130110302225 0.0282819504394 0.0262547979726 0.0181226466418 0.0211289335782 0.0182692961983 0.00567846812613 0.0110316054291 0.0179271098732 0.0214946645806 0.0199268176637 0.00875561948865 0.0354271933282 0.0171443662393 0.0766374045646 0.0572939392451 0.0492966948245 0.0478544225205 0.061544844174 0.0531089657897 0.0627070970919 0.0461191081801 0.0559975469422 0.0592703107177 0.045198111152 0.0494826907565 0.0493817865713 0.0662451441692 0.0799247658409 0.0623534855885 0.040791427856 0.0437433118438 0.0743922416686 0.0500539193261 0.0559768175635 0.0632764008918 0.057263588801 0.0415573436738 0.0497522962913 0.0342757495228 0.040553840562 0.0415809282216 0.0599519771943 0.0293556022635 0.0472716928172 0.029440558171 0.0642586667075 0.0282006776448 0.047766217977 0.0531327322745 0.069584244341 0.041867047458 0.0430265316703 0.045945636772 0.0640480645613 0.0558532996544 0.0572939392451 0.0591020248311 0.0672221887664 0.0591786574244 0.0527229760336 0.0467939943149 0.066452337221 0.048010057481 0.0426662887331 0.0439949614302 0.0453715955589 0.0447907897725 0.0421151539334 0.0471958607455 0.0276193449237 0.02275846799 0.0605343055308 0.046215555164 0.0540603153716 0.0428037480825 0.0313795261955 0.0125172459763 0.0208290281635 0.0350621941996 0.0569552641601 0.0299458792012 0.0309610087229 0.0366049420462 0.0292005950568 0.0426658539169 0.046550389751 0.0288402250941 0.037809939324 0.0299619998536 0.0401350153602 0.0525912191682 0.0310232912468 0.0370292782789 0.029362531619 0.0351405044644 0.0315813022426 0.0330583005053 0.0477931213785 0.0264557013575 0.0497838425895 0.0546676669155 0.0214340498054 0.0132666230167 0.0351245595911 0.0284887636401 0.0394555006358 0.0430107543153 0.024001610215 0.0265746605534 0.0212961513987 0.0686985381679 0.055308045978 0.0261438582005 0.0256904480148 0.0588231449389 0.030013743601 0.0177237336601 0.0340579413817 0.023239083403 0.0129096702298 0.0366510262704 0.0448114548715 0.0501265325324 0.0238298010541 0.0385248366721 0.0348349791307 0.0531258509004 0.0432244806886 0.0265717263897 0.0222990802833 0.0105547424712 0.0247433535936 0.0319856839059 0.0308056274733 0.029362531619 0.0282110860995 0.0297242114734 0.0317000113527 0.0411003820059 0.0219347750579 0.022168792283 0.0155469808263 0.0395024062804 0.0287670845389 0.00713435108734 0.00340555052012 0.0220748170877 0.0261325674803 0.0225086838709 0.0197301659136 0.0143163055062 0.0179547860371 0.00552830002832 0.044759241534 0.0400746442935 0.0324889323762 0.0105741309237 0.0352991764051 0.0223168808509 0.0183878728912 0.0306904112299 0.0206021475442 0.0104104939928 0.00670538845305 0.0199820984844 0.0226302237702 0.0187233601872 0.00907856174289 0.0219615297998 0.0143637742128 0.0753337380263 0.0579434032885 0.0513377916805 0.0521965044751 0.0621695045027 0.0588078748161 0.0626419440897 0.0544695113035 0.0624620281749 0.0565017669041 0.0411290585403 0.0513195597187 0.0487054775012 0.0662358469866 0.0766400648909 0.0582604755345 0.0441853596565 0.0463597810918 0.0797172873623 0.0573362873289 0.0539615186802 0.0612879452506 0.064613107429 0.0413483548752 0.0492060616579 0.0408536005927 0.0386658835072 0.0395835329499 0.0611338973657 0.0393645393569 0.053572584104 0.0330686361703 0.0645571164168 0.0355933171917 0.0584689678276 0.0540828717859 0.066313152361 0.0447064134304 0.0401381607957 0.0437269026711 0.0614075519202 0.0498319714223 0.0579434032885 0.0585128670581 0.0638402708268 0.0548281142484 0.0533071471302 0.0449195692472 0.0626153729552 0.0482343239424 0.0295703841711 0.0366619395227 0.036500768924 0.0216100790796 0.0202086133386 0.0291081498137 0.0538445159183 0.0304641791012 0.0384602037434 0.0388505887816 0.0184939827468 0.0431509831641 0.0514497599393 0.0311190761962 0.0193480902941 0.0458820296239 0.0240997287473 0.0258856149523 0.0318135695083 0.0318488388141 0.0408906139312 0.0292269134036 0.0455476970026 0.0384416242221 0.0355459730354 0.0604270677753 0.0357756191383 0.0579552498509 0.0353240752803 0.0225716123758 0.0257247517645 0.0383804782921 0.0215488097029 0.044948702922 0.0168022708851 0.0235401078242 0.0440963094143 0.0364932029322 0.0228797040221 0.0280879909327 0.0410984380147 0.0600604954457 0.0480533666552 0.0218775898923 0.029791578282 0.04158778439 0.0178882014032 0.0370867411707 0.0460836281119 0.0252155578724 0.0208740877353 0.0346851323104 0.0150019768994 0.0257835645737 0.0322493089661 0.0342156649906 0.0116183721538 0.0144931943243 0.0240188836176 0.0389068297986 0.0144502424291 0.0276541092905 0.0250061652639 0.0558771163058 0.0271661677907 0.0351271284352 0.0289684152137 0.0442230773604 0.0424650907505 0.0353240752803 0.0384614305736 0.0501641618566 0.0418657156844 0.0254781137454 0.0303709238998 0.055175185082 0.0355533707256 0.0226236551837 0.0257243733887 0.0188708962976 0.0280458447088 0.0259495603965 0.0358126700891 0.0173972925493 0.0203550537734 0.0241984077655 0.0326658712857 0.0374251933899 0.0415458358848 0.0220997016335 0.0314739029711 0.0297599572502 0.0224642058136 0.0101652715813 0.0184552115135 0.0190795074292 0.0255522841638 0.0188568594492 0.0233889500734 0.0229626929389 0.0201746110209 0.0490887418108 0.0229752614583 0.0721999639917 0.0528489482775 0.0457665690796 0.0402580156331 0.0570067559398 0.0466251225309 0.0575932728503 0.0376765605581 0.0495760090258 0.0573857799099 0.0457294834507 0.0461836275015 0.0478961825174 0.0628300939828 0.0782978077723 0.0617431168606 0.0349090738753 0.0372003973766 0.0687801137491 0.0442160827041 0.0545072221083 0.0604041835816 0.0503536267186 0.0412302683921 0.0462335606677 0.0265634879228 0.0409252862943 0.0403112510912 0.055713517152 0.0210444978159 0.042126696634 0.0241765196843 0.060672423249 0.020816933598 0.0350778856202 0.052005350197 0.067095717986 0.0355534496705 0.0422916670752 0.0461036815572 0.0628103650139 0.0591144676161 0.0528489482775 0.055715116829 0.0658580956838 0.0605104799512 0.0516473901424 0.0459913703308 0.0644951804376 0.0433395766961 0.00476396676703 0.0174479971383 0.0262117873028 0.0230803954362 0.0185627883328 0.00806620839217 0.0113814933623 0.00390319674762 0.0428177321428 0.0406615445683 0.0354624532828 0.0102263088249 0.0317971018832 0.0234656694869 0.0177926588758 0.0262276626577 0.0188776295133 0.00499606040594 0.00424685036617 0.0184558479833 0.02090066744 0.0184013332257 0.00554993882929 0.0277916235291 0.0146255258466 0.0766584518429 0.0582536091399 0.0509151171955 0.0506702158494 0.0625265075224 0.0566765790267 0.063296553303 0.0510181236531 0.0599592936046 0.0584198089247 0.0435074153396 0.0509940717271 0.0495661398107 0.0669266401341 0.0789376730305 0.0608280979649 0.0430167703447 0.0455496571569 0.0778333721022 0.054368608181 0.0555188693765 0.0628704456678 0.0616793859568 0.0418722056261 0.0499801315934 0.0381178178645 0.0399704213931 0.0409195428074 0.0612188756554 0.0349680364553 0.0510653686933 0.0315069690942 0.065089713199 0.0323745393338 0.0538917621299 0.0541667478451 0.0685063182149 0.0437802642661 0.0419063387522 0.0452698567868 0.063322234724 0.0532615551019 0.0582536091399 0.0594215850469 0.0661130238288 0.0575059193505 0.0535703049783 0.046311202915 0.0650382074097 0.0485914933238 0.018842155525 0.0240370427448 0.0209528738511 0.0208089534878 0.0111203801188 0.015917150662 0.00453488271341 0.0421322819802 0.0391458330884 0.0331083714381 0.00824403626512 0.0325132311882 0.0224847085725 0.015999516676 0.0277730715816 0.0186363801442 0.00708833477439 0.00642676690132 0.0178233234532 0.0220593051536 0.0178006297585 0.00678573265748 0.0253531080362 0.0132629896024 0.0741187095157 0.0561450172019 0.0491350925935 0.0497514221438 0.060382347286 0.0560370322524 0.0611402268324 0.051356658754 0.059586423134 0.0554899035837 0.04031824984 0.0491508161478 0.0470289167752 0.0645318276086 0.0757421493114 0.057544117742 0.0418388253062 0.0443100927117 0.0770487593095 0.0543425693403 0.0526585588221 0.0601234502371 0.0616298008584 0.039475560154 0.0476974484773 0.0379820027229 0.0371681460661 0.0383173623723 0.0591794106435 0.0360653613059 0.0506856040282 0.0307481929538 0.0627839311984 0.0325910275487 0.055237903563 0.0520576344331 0.0655568890209 0.042566036842 0.0391094232808 0.0423459347432 0.0602723002528 0.0493981873195 0.0561450172019 0.0569656980973 0.0629553672806 0.0540102942581 0.0513493159685 0.0435185067862 0.0620208318713 0.0466647868495 0.0214518595921 0.0250574648372 0.0327035392179 0.0101396500324 0.0173409936704 0.0207342919251 0.0332931627481 0.0438902497615 0.0452760516117 0.0173962021214 0.0155474932413 0.035097178823 0.0167075078514 0.0219356609327 0.0232161362242 0.0132718217012 0.0215035441799 0.0212520758854 0.0323731403418 0.0276797560909 0.0185474301296 0.0436301136913 0.0240030156204 0.0722858985812 0.0511085237891 0.0406650357841 0.0417405611462 0.0549851372416 0.0429543701734 0.0585838550979 0.0363293620395 0.0449605458738 0.0553495140321 0.0426335340052 0.0408304262584 0.0422933209142 0.0585800490211 0.0742997883793 0.058525226517 0.035206446299 0.0404435198283 0.0627381786217 0.038449263747 0.0501920171677 0.058888502058 0.0460241869732 0.0337463818569 0.0458812434451 0.0278046680998 0.0349131438742 0.0389869853015 0.0521722266345 0.0209043609517 0.0359181281747 0.0276204608178 0.056526179998 0.0222941226577 0.041152359144 0.0428797366789 0.0667722352073 0.0380782189511 0.0410414680557 0.0398735425148 0.0580112990116 0.0505852294836 0.0511085237891 0.0532571789631 0.0625496034306 0.0531021220011 0.042888351609 0.0413906647006 0.0644729984699 0.0453457651066 0.0107504885543 0.0445819304752 0.0242663168605 0.0336280858548 0.0265479358211 0.0206001571473 0.0282052030653 0.0325495864256 0.0168933846577 0.0294835383052 0.0291726320507 0.0086711740192 0.0221542204142 0.0179304565478 0.022368606911 0.0297212987281 0.0149126627655 0.0323363820236 0.0229519593576 0.0235384954017 0.0445361789109 0.0198724736323 0.0521573964273 0.0325036248406 0.025391360945 0.0274230805005 0.0366454630316 0.0344942582431 0.0390192491133 0.0331598239249 0.0393605141281 0.0342759827638 0.021260474927 0.0254073365966 0.0234580906639 0.0407813070352 0.054069225305 0.037293221414 0.0193616327469 0.0235535155332 0.0561408138747 0.0363119747552 0.0299684113953 0.0382284111131 0.0425406057591 0.0161350959097 0.0256650379468 0.0189027762255 0.0143744986934 0.0179840539103 0.0351859317755 0.0236434364907 0.0314452790447 0.0139598707264 0.0389431870684 0.016485382228 0.040355084472 0.0299825744277 0.0456481354326 0.0213868007101 0.0199817088813 0.0197684998025 0.0379808456048 0.0313521152981 0.0325036248406 0.0336827412775 0.0416843797906 0.0334784161986 0.0289224474022 0.0207816321212 0.0432746333929 0.0259935830061 0.0406061367838 0.0235468720047 0.0318323205013 0.0219961670771 0.0255808203638 0.0205571609912 0.0226267329863 0.013012280745 0.0369778751619 0.0187789289183 0.00874641080149 0.0197571101094 0.00977736627462 0.0202312308031 0.0257037864622 0.00771970434192 0.0231557657352 0.0133099999073 0.0187324692629 0.0392432332024 0.0115155558073 0.0539655210169 0.0370438046358 0.0333786958054 0.0316409822149 0.0414621149977 0.0426991016561 0.0406517401284 0.0403678402162 0.0480644681966 0.0361424947464 0.0218113942904 0.0334641673727 0.0302365918541 0.0468956611505 0.0577004528593 0.0391757625612 0.0236163775334 0.0242576354959 0.0653230280715 0.0452817250312 0.0345762397733 0.0404850805623 0.0511603238121 0.024886725311 0.0274569982987 0.0236904007661 0.0206352481789 0.0182697542476 0.041544522984 0.0289437440201 0.0406091261712 0.0126309522282 0.0451825068417 0.020490830418 0.0442568377481 0.0395946754396 0.0458654268305 0.0228937718071 0.0192636370802 0.0258703030154 0.0425840964507 0.0360074541747 0.0370438046358 0.0378930233244 0.0444471390747 0.0387351356827 0.0382610925586 0.0256300024801 0.0425154846229 0.0258478343265 0.0236633695184 0.0174990292841 0.0186530571476 0.0611247172591 0.0557656094279 0.0466088592976 0.0282320418304 0.0445464770565 0.0341524834412 0.0362512556468 0.0421364706395 0.0353354631726 0.0231187500774 0.0150626098763 0.0358189243099 0.0298015850374 0.0324439491366 0.0223980477721 0.0244141427415 0.0302418773665 0.0942328467355 0.076594072426 0.0693014297828 0.0689934026371 0.0808708035071 0.0745545238332 0.0811424703773 0.0680895982601 0.0772898278285 0.0757784502044 0.0604959493699 0.069358828284 0.0677179794145 0.0851546659241 0.0961465377952 0.0777040409423 0.0614342341624 0.0635105029946 0.0949910709403 0.0710640834394 0.0733442971825 0.0804630717269 0.0785569975811 0.0599979412685 0.0679000809486 0.0561320615274 0.0579270123928 0.0585056430456 0.0796324819571 0.0514612322157 0.068283919556 0.0494122791964 0.083373938553 0.0501855689957 0.06973964597 0.0718399524308 0.0852557355766 0.0618964253913 0.0591154896454 0.0630863991576 0.0809010142666 0.069276699042 0.076594072426 0.0775503896407 0.0833112406891 0.0743754315619 0.0713482849075 0.0642332894623 0.0813844341388 0.0662752735857 0.00943572253542 0.0115403490897 0.0385513754293 0.041736873589 0.0399182937318 0.0122452242861 0.025058464158 0.0280391379811 0.0165428935252 0.0220426648475 0.0191966494038 0.00476868066193 0.0119136882752 0.0182274158907 0.024378991238 0.0214350053307 0.00987681971674 0.0356319079498 0.0179231346829 0.0755450486118 0.0557883449648 0.0471764532756 0.0467952668235 0.0599617395979 0.0511011457957 0.0617632501287 0.0445144536833 0.0538325742859 0.0579584947727 0.0439095831377 0.0473266714409 0.047305404175 0.0642647315012 0.0781620294368 0.0608980895696 0.0396397814207 0.0431908772896 0.0719383279912 0.0477762451878 0.0541744509705 0.0619776318533 0.0551486398926 0.0392023772277 0.0486708484659 0.0332061436773 0.0385651962783 0.0405238673503 0.0580675371179 0.0281169946403 0.0449308552075 0.0290979509914 0.0622896414172 0.0272386507233 0.0473564201885 0.0503681043652 0.0686223309305 0.0411717919112 0.0420509879955 0.0438795889294 0.0621492769732 0.0535535915584 0.0557883449648 0.0575324110618 0.0656578520536 0.0569417414102 0.0500298962763 0.0450037013757 0.0656341911967 0.0473715492692 0.0144468889514 0.0470085725986 0.0485138320241 0.0451352115307 0.0199583230651 0.0301372822106 0.0318368515531 0.0256571286264 0.0278614622113 0.0258405224944 0.0121251490044 0.0124421067497 0.0257425497103 0.0259848999587 0.0265128839547 0.0147071440313 0.0356921243154 0.0239599037157 0.0844599977072 0.0649098127865 0.0563152793656 0.0552526867175 0.0691043038035 0.0592804769565 0.0705635348052 0.0516880380121 0.0615515055365 0.0669939251635 0.0527850317966 0.0564919445965 0.0566781222463 0.0735436571696 0.0873943668972 0.0699246931044 0.0483722118201 0.0515510652048 0.079719756552 0.0550353446586 0.0634817595673 0.0710367571435 0.0623974106591 0.0485852577555 0.0576004808055 0.0413084590066 0.0479219849808 0.0493882028299 0.0672450344951 0.0348287223508 0.0527462211198 0.037276801135 0.071549415301 0.0351894067592 0.0529630080357 0.0594111357934 0.0773657257414 0.0496448829109 0.0507905960575 0.0532464315334 0.0714643731585 0.0625646663061 0.0649098127865 0.0667401518449 0.0748051995393 0.0661923309263 0.0591726016457 0.0543093310288 0.074196762744 0.0559244370052 0.043526425057 0.0386335179446 0.0321768451862 0.00974615024856 0.0352347635745 0.0201959159339 0.0180776754132 0.0269400303578 0.0177526168011 0.0076128290146 0.00378003726334 0.0176983136317 0.0184021944475 0.015957334363 0.00467338620778 0.0252008449064 0.0123712673884 0.075769333126 0.0581585948517 0.051685391535 0.0510927438975 0.0624824329901 0.058237684953 0.0625760535583 0.0529652916573 0.0618856654756 0.0574514448673 0.0423317456956 0.0517508019275 0.0497149058646 0.0670867680773 0.0782168275888 0.0597219475669 0.0433096265158 0.0451925797267 0.0797528207091 0.0566461202307 0.0551309318768 0.0620350591031 0.0638200944876 0.0424059873805 0.0493045876457 0.0391604065434 0.0398454975082 0.039967724192 0.0615633801452 0.0371201263022 0.0531118088353 0.0313373664803 0.0653089870936 0.0335830893361 0.0556009421638 0.055317918307 0.0671296861942 0.0435900410315 0.0407047621897 0.0451219309698 0.062876852421 0.0526327547566 0.0581585948517 0.0591116735406 0.0652217833265 0.0570769673892 0.0545681643866 0.0459758175248 0.0634453480612 0.0477419996054 0.0302678050916 0.043643410877 0.0344015081898 0.0370220423378 0.041870658286 0.0264826423168 0.0239447061259 0.0294219290852 0.0381639993795 0.046622906961 0.0276160450358 0.0433656713185 0.0358877725536 0.0394027280044 0.0640596529703 0.0352660320463 0.0428200645336 0.0215929365207 0.0157178006612 0.00848045847957 0.0252717757761 0.0193974269851 0.0287059984697 0.018482386953 0.0255997786212 0.0310533244304 0.0267766622161 0.0163561213819 0.0202134806234 0.03120585866 0.04895305148 0.036414957876 0.00526673250197 0.012342309222 0.0430330254803 0.0254800237585 0.0266262246906 0.032239986602 0.0289090953163 0.0180202124552 0.0199181330431 0.00657470209208 0.019948180111 0.0209059906389 0.0237059104211 0.0188029876251 0.0211536929916 0.0148162018356 0.0289733910382 0.0123381921604 0.0242362829274 0.0258181102869 0.0413288399769 0.0100525363381 0.0238857865216 0.0223322555234 0.0341921837249 0.0380927560603 0.0215929365207 0.0255329039423 0.0383043472154 0.0356000313746 0.0249953874006 0.0211460552822 0.0407734262534 0.0193178864072 0.0180029141367 0.0318989553395 0.055546862882 0.0233316606029 0.0289827460324 0.0288090712213 0.0227692137274 0.0384790428766 0.0423072029606 0.0235668927239 0.02923399269 0.0234314824709 0.0353361736228 0.0517306479462 0.0262806947106 0.0449202216804 0.0352666691701 0.0392752718621 0.0317034826189 0.0393329164331 0.0490036652351 0.0325915526781 0.0480657848217 0.0556144972514 0.0314856186317 0.0229568301755 0.0394863344353 0.0351981784458 0.0468732286627 0.0534614036052 0.0352961935447 0.0267660200432 0.0206247795688 0.0716485928619 0.0553558677376 0.0350658679333 0.0348297205048 0.0591015265332 0.0349050611526 0.0239450314084 0.0312292238968 0.0291976883203 0.0191245738332 0.0424715605102 0.0407160306554 0.0507428891398 0.0195019979736 0.0455419926359 0.03094049167 0.0480027022309 0.0486981223658 0.0364405293098 0.0216724201002 0.0185311393183 0.0322264249099 0.0418711782883 0.0415641388928 0.0352666691701 0.0357541599092 0.040359691621 0.0422344237589 0.0468672273606 0.0294846892119 0.0324923805449 0.019122055634 0.028756827183 0.0587035417373 0.0144079071022 0.0301136242831 0.0361947314748 0.0239369814187 0.0355016994114 0.0354103580658 0.0251937444079 0.0240593800606 0.0195703439645 0.0308533101673 0.0370972551577 0.0220259748313 0.0576270076145 0.0487199724465 0.0508215730415 0.0469075647824 0.0527931212734 0.0620087731623 0.046977227863 0.0607033064427 0.0681905343994 0.0417090362137 0.0294084394516 0.0508408090071 0.0450400867986 0.0589677823649 0.0627613332544 0.0433082894199 0.0402642195426 0.0363046359117 0.084449006206 0.0666097386489 0.0449981598795 0.0463289168121 0.0716590868723 0.0429897098207 0.0369198960753 0.0432687121664 0.0364322199569 0.0287560795721 0.0553568599314 0.0500592177291 0.0617693212505 0.0302484927047 0.057817398636 0.0410079662048 0.0620054930234 0.0578523465697 0.0464641684152 0.0365865801942 0.0272374209649 0.0399576916682 0.0513227964118 0.0446516670985 0.0487199724465 0.0480806958126 0.0495453699635 0.0482845333883 0.0560302897599 0.0385013240981 0.0415333749992 0.0339802993211 0.0315416974034 0.0191639032899 0.00845591360235 0.0214341775937 0.0115218935447 0.00798589270321 0.013178045677 0.0100207917484 0.0199483776485 0.012776598265 0.00707708079363 0.0310463483447 0.00779811122226 0.0665120827305 0.0484731432285 0.0421114253952 0.0418216626354 0.0527853739739 0.0493966141428 0.0532439640509 0.0450980317427 0.0534929652418 0.0482563107324 0.0334028573772 0.0421738253769 0.0400650763191 0.0574010770653 0.0690115183659 0.0507807835016 0.033887277172 0.0360916940356 0.0712074753299 0.0488889567075 0.0456112729946 0.052712754383 0.0558191941722 0.0328832629679 0.0398769847778 0.0305626616722 0.03027845702 0.0307126203306 0.051853149134 0.0304315085387 0.0449208583046 0.0225669791691 0.0556104755493 0.0254498384131 0.0488668504899 0.0462046921677 0.0583599392562 0.0343786096194 0.0317076229625 0.0356254179774 0.0534761774586 0.0442401179536 0.0484731432285 0.0494784680372 0.0560638562362 0.0481022882275 0.0453399462887 0.0364100969213 0.0549637988767 0.0386182470618 0.0499647144567 0.0286991839096 0.0337581082348 0.0374427591873 0.0282526339036 0.0355267981937 0.0350362485208 0.0478325170984 0.0425063748067 0.0336958297594 0.0552821351039 0.0385952701325 0.0756759815417 0.0533281139759 0.0401812481707 0.0447968354881 0.0564105044414 0.0391525826608 0.0629279047421 0.0332827070938 0.0388499692511 0.059944716429 0.0494848197832 0.0402828974015 0.0438747097228 0.0580998054312 0.0757410641548 0.0628856838636 0.039846534973 0.047063496141 0.0543336344875 0.0312551117596 0.052807685198 0.0627916823231 0.0390112066642 0.0353058185357 0.0513654740192 0.0323261929775 0.0392211101217 0.0463708307148 0.0519963158873 0.0234224809867 0.0301000678074 0.0372829629985 0.056085164213 0.0288004220361 0.0417416575715 0.0389119081863 0.0720657500645 0.0444323135884 0.0487706488559 0.0429521653792 0.0596874437499 0.052997284605 0.0533281139759 0.055723410021 0.0657462146584 0.0546473494099 0.0397492903292 0.0452841873006 0.0707319601823 0.0522103619845 0.0233361726138 0.027152976334 0.0145101369991 0.0239084572879 0.0231796409398 0.016731423623 0.010101659051 0.00781108168527 0.0186261838058 0.0315054768721 0.011535154356 0.0661583467836 0.0529737197777 0.0515726376237 0.0472342284059 0.0573866872941 0.0602508051558 0.0535888133028 0.0563499220883 0.0655740867856 0.0494619373879 0.0355566546043 0.0516986354459 0.0478914383755 0.0636133913461 0.0714227137745 0.0519652065027 0.040040966538 0.0379888138015 0.0833863881089 0.0624232672218 0.0504889561622 0.0539483262888 0.0682863371673 0.0434492287718 0.0419221148541 0.0393420375143 0.0383919560321 0.0330409365388 0.0586345177546 0.0426935993753 0.0582011555148 0.0271038932712 0.0620596992058 0.0353264927481 0.0565229896235 0.0582751327845 0.0564224406826 0.0375222198637 0.032781658946 0.0431629716317 0.0579468986629 0.0506284295544 0.0529737197777 0.0534601982596 0.057907553496 0.0543043024926 0.0568716124285 0.0423649825393 0.0520169525452 0.0387128090446 0.0188041113809 0.0118952838684 0.0140246174547 0.0212962400935 0.00885470160798 0.025142494638 0.0164130694781 0.0148836892075 0.0382106616301 0.012548123562 0.0595024177002 0.0405144987261 0.0336829979036 0.0339109092342 0.0447786612887 0.0412528753098 0.0460796810618 0.0378073897699 0.0456289595962 0.0415318567716 0.0274434917532 0.0337586538145 0.0320559171837 0.0493065770936 0.0620181140557 0.044443934423 0.0259210454652 0.0290199212383 0.0631931226882 0.0415500215307 0.0381432011516 0.0457001329437 0.0482419653282 0.0247624956339 0.0326912275918 0.0231122641396 0.0226732232029 0.0241582823353 0.0435758556102 0.024717419625 0.037259551901 0.0161507334564 0.0474557435456 0.0186437176733 0.0428749718429 0.0381563349544 0.0522572394535 0.0270598018866 0.0256825354187 0.028101885414 0.0461630275758 0.0383904744963 0.0405144987261 0.041758354552 0.0492909187408 0.0413606774947 0.0372601462253 0.0289047461458 0.049343864353 0.0319066136889 0.0140884736377 0.0220949123029 0.0294588701998 0.0141915109934 0.0236402055251 0.0202701530019 0.0222961106051 0.0508328337578 0.0209048958304 0.0623063919994 0.0435137888223 0.0379179155361 0.0310595169631 0.0477275300573 0.0401401417297 0.0476510837343 0.0326069267264 0.0442002914704 0.048031576146 0.0373260310255 0.0383688546713 0.0396828967833 0.0540671732006 0.0691161945626 0.052697233895 0.0256698443155 0.0271866198766 0.063343063446 0.0401717345523 0.0456835007162 0.0508390224232 0.0457136947621 0.0340506342939 0.0366331778515 0.0185292352758 0.0332180639746 0.031402826069 0.0470047522912 0.0178796862446 0.0373117201537 0.0146755143259 0.0519441438124 0.0134308953908 0.0305516636587 0.0455547559496 0.0574314455133 0.025677142017 0.0334820400224 0.038185075996 0.053981408775 0.0519396327628 0.0435137888223 0.0464090712113 0.0566586435278 0.0526391071675 0.0449104072539 0.0375180555166 0.0549667946861 0.0334290223836 0.0161979126548 0.0212360948185 0.00313302296585 0.0148689509772 0.00704616989296 0.0135603719097 0.0384674432624 0.00748847746612 0.0612097252086 0.0442840466006 0.040253243803 0.0360606427738 0.0487530949526 0.0470778914902 0.0473392259982 0.0422963534873 0.051949068182 0.0444929580638 0.0309270188278 0.040473799777 0.0386243677637 0.0547837147046 0.066413370926 0.0479885282668 0.0287291459439 0.0288822795748 0.0702829288423 0.0483322868723 0.0432975336344 0.0484818516534 0.0543652596123 0.0329738645592 0.0348525988976 0.0258401695463 0.029652318833 0.0266204166929 0.0488217477328 0.0282325775461 0.0443695558327 0.0152728890442 0.0529346931915 0.0211960241124 0.0432358961175 0.0470042201915 0.0535566647123 0.027654993588 0.0276975587847 0.0349524016669 0.051450438001 0.0457471816551 0.0442840466006 0.0457673454519 0.0531390087147 0.0482613634351 0.0459084957324 0.0344895940573 0.0501063288839 0.0320407745768 0.00918896575074 0.0152928703979 0.0213827580606 0.0175694634428 0.00562421953864 0.0321021314886 0.0136468651148 0.0732793088732 0.0542749513511 0.0465599433205 0.0461499050441 0.0585282720374 0.0518386762248 0.0596896497046 0.0460357977086 0.0551020502351 0.055347044484 0.0408456006547 0.0466734650365 0.0457685420886 0.0629790052567 0.0758414718388 0.0580725583953 0.0386295601195 0.0415657641172 0.0731303485703 0.0495368191967 0.0520897347287 0.0595893389246 0.0567976430466 0.0379647331116 0.046441942626 0.0333063038869 0.036500108881 0.0377849182347 0.0570420811998 0.030012797008 0.0462545479693 0.027461418846 0.0610786707463 0.0275181743774 0.0490679602747 0.0500650525841 0.0657243645716 0.0396831631678 0.0390548397455 0.0418579268325 0.0600259300869 0.0509169565607 0.0542749513511 0.0557017357111 0.0631094362549 0.0546387182457 0.049518990394 0.0428637051261 0.0625010516077 0.0450876329039 0.0212339329669 0.0205215987455 0.0193863377449 0.00780222226267 0.0244911121423 0.0160871853235 0.0794295187203 0.0616053956912 0.0547043851777 0.0543405312398 0.065903416382 0.0608093966938 0.066232208671 0.0551996275352 0.0641635601661 0.0610614868167 0.0459223913813 0.0547672174246 0.0529488520021 0.0703578675954 0.081658744309 0.063265127216 0.046621662518 0.0487339053303 0.0819943915105 0.0586031447424 0.058550915637 0.0656481691263 0.065908662167 0.0454276311919 0.0529363521846 0.0420305180364 0.0431442291715 0.0436296366503 0.0648039964872 0.0390809908814 0.0552779379516 0.0347675148936 0.0685677387884 0.0363124697872 0.0577513229251 0.0579440183076 0.0708163109663 0.0470812852612 0.0443909494169 0.0483964045026 0.0662606542355 0.055662814312 0.0616053956912 0.0625961650479 0.0687432253304 0.0602433308395 0.0572909174214 0.0493875071422 0.0671416550914 0.051430941132 0.0176886906331 0.00927892520724 0.0135248076976 0.0386449359276 0.00800171044068 0.0600153322548 0.0424684845259 0.0378068586886 0.0345103555887 0.0469162632965 0.044830307306 0.0461600332306 0.040366688607 0.0496446456688 0.0429723253644 0.0292480429291 0.0380026884561 0.0362133523808 0.0526530252718 0.0646507321938 0.046401756397 0.0269368048123 0.0278152961494 0.0678508831968 0.0459682894465 0.0412431425852 0.0469952418705 0.0521326959742 0.0302237793371 0.0333696851677 0.0240543524356 0.0271406206916 0.0250182744726 0.0466959485275 0.0265020349694 0.0419000008026 0.0139599809083 0.0507909794153 0.0193885955935 0.0423195771966 0.0441873849773 0.0525088605043 0.0263754966172 0.0262639235251 0.0325133748369 0.0494223408625 0.0434143928247 0.0424684845259 0.0439360734198 0.0514706200386 0.0459476778298 0.0431249041549 0.0322850478478 0.0492075262735 0.0310084454102 0.0100495790147 0.0163467110294 0.0344303173981 0.0138069903297 0.0725561849391 0.0577945355153 0.0549392051976 0.0494983401957 0.0622636335656 0.0613281176125 0.0591564733558 0.055493289997 0.0660163516694 0.0564238014063 0.0428263617367 0.0551692935595 0.0528843814502 0.0687090576791 0.0786138592889 0.0595588928727 0.0427108568825 0.041401774003 0.084635719422 0.0620469795992 0.0566375025559 0.0605458826833 0.0680826326606 0.0475957607451 0.0475121922821 0.0395532575369 0.0436869591597 0.0392259763169 0.0629613789201 0.0403138291944 0.0584854937624 0.0290090498587 0.0669476358426 0.0346983343078 0.0538333844175 0.0617823249939 0.0639146571596 0.0406990904981 0.0395983575205 0.0488304003282 0.0645174644364 0.0580105192199 0.0577945355153 0.0590354282227 0.0651460923012 0.0612394393036 0.0606595939265 0.0481508588685 0.059835663962 0.0439114917806 0.0129734005291 0.03341718436 0.00522839839082 0.0644629130549 0.0490781090479 0.0460393397147 0.042060104717 0.0535473784021 0.0537667869164 0.051142158888 0.0493088817338 0.0587609555936 0.047452874403 0.0333145084294 0.0462018313471 0.0433659094036 0.0595851004773 0.0694769331379 0.0504124171924 0.034671039268 0.0338977868767 0.0768809318577 0.0551957563061 0.0471765984426 0.0517762151648 0.0612752535588 0.0380650031758 0.0388085089346 0.0327052071067 0.0338902731124 0.0299610722521 0.054082097785 0.0351427472718 0.0511360689963 0.0213341577068 0.0578680354373 0.0282202368919 0.0499673113862 0.0525352197686 0.0557073834724 0.0329893658613 0.0303826410135 0.0390336792828 0.055051644837 0.0480885204528 0.0490781090479 0.0500556489468 0.0560168186449 0.0513666437929 0.0513150683738 0.038531174408 0.0517653307967 0.0358776775011 0.0293027668161 0.00949293329263 0.0725916210374 0.0547194436592 0.0483620571735 0.0469816749901 0.0590799074957 0.0545464429945 0.0590974756513 0.0490311503881 0.0583273766433 0.0546626805606 0.0398779297173 0.0484777367952 0.0468025950712 0.0639880374892 0.0757199402524 0.0573199482995 0.0393352512936 0.0411552751652 0.0764775433632 0.0532445829203 0.052349146311 0.0590579753428 0.0602574471507 0.039602885829 0.0459481099263 0.0349175810863 0.037150941259 0.03692168916 0.0582151479375 0.0331340211018 0.0497159727507 0.0271028830882 0.0621502493207 0.0292960379946 0.0512053897158 0.0526539904442 0.0643939446385 0.0395489605444 0.0378591872997 0.0425337328718 0.0602822057384 0.0511975943751 0.0547194436592 0.0559279154126 0.0626694858218 0.0551041885347 0.0518894661445 0.0431638732605 0.0608387750183 0.0441361499894 0.0310183369902 0.0871660444405 0.0733823196322 0.0689340787316 0.0705597518969 0.0774366556509 0.07861295118 0.076322371958 0.075585060054 0.0826717778434 0.0681219966699 0.0528565530205 0.0687776982924 0.0640973745197 0.0810751053908 0.0869333145725 0.0682988428223 0.0623918658396 0.0630905228705 0.0989058396189 0.0780506774957 0.0672923155095 0.0734447484338 0.0852286487915 0.0580045491026 0.0635565107431 0.0611199283903 0.0538632888957 0.0534691027903 0.0772908944799 0.0610905623861 0.073927736475 0.0514266311669 0.0797776498939 0.0563654161709 0.0797411714169 0.0709112220147 0.075991910188 0.061929266059 0.0528794637266 0.0580761513446 0.0735774043698 0.0591743277341 0.0733823196322 0.0727567267189 0.0745131906994 0.065799040936 0.0698581828462 0.0592195179672 0.0715468662712 0.0627789092372 0.0648080233561 0.0484745494015 0.0442686485072 0.0419227062634 0.0529072690322 0.05220865754 0.0515658331236 0.0480206916873 0.056947174509 0.0470499747083 0.0323100366631 0.0443750009709 0.0415450201967 0.0583606796016 0.0686531977281 0.0497286282257 0.0341488382347 0.0345013563823 0.0748626932863 0.0530450676854 0.0459172064129 0.0515384326883 0.0594738121093 0.0355453778886 0.038659716641 0.0319368788596 0.0317165879728 0.0293867131129 0.0529269518286 0.0336846365247 0.0489212862277 0.021422361173 0.0566364033836 0.0272163294484 0.0501093106191 0.0498104699827 0.056059086848 0.0332712049783 0.0299486671345 0.0369730112559 0.0537844555125 0.0455932312627 0.0484745494015 0.0493458255088 0.0553162838774 0.0492693292197 0.0486803630647 0.0369835817896 0.0522579331221 0.0364683447074 0.022730708667 0.036743783492 0.0359076620395 0.0204805867516 0.0463990865864 0.0147686048767 0.0549817963395 0.0525592871928 0.0196223614467 0.0349129145261 0.0366443799136 0.0327544499352 0.0228629827418 0.017026958725 0.0209966358066 0.0388626349727 0.035298115438 0.0567980979705 0.0582058906835 0.0248891314561 0.0145753449597 0.0570907866116 0.0413833987479 0.0269969183741 0.0492024913366 0.039440986454 0.0359176091053 0.0265520996515 0.0615044403055 0.0540461620767 0.0485288037826 0.0239710979892 0.0540724881457 0.0603640513646 0.0428978851084 0.0134027017241 0.0368991356344 0.0354211161184 0.0355257446359 0.0210753154172 0.0377446845484 0.022730708667 0.0201673641607 0.0164841811953 0.0313109932769 0.041027522952 0.0326196770584 0.0184701671719 0.0295244972329 0.0144112105736 0.0158826147242 0.00448936000965 0.0256749628733 0.0117360219999 0.0336843126425 0.0324372887253 0.0146677582401 0.0224513170291 0.0144322007261 0.0126899376427 0.0122323800999 0.0277700509569 0.0197834641463 0.0180901487399 0.0184584697146 0.0418096588931 0.0367452397055 0.0108947935426 0.0131136689852 0.0371795334759 0.0204476192567 0.0118634455346 0.0280193213775 0.0206906520081 0.0208413846069 0.00805992628737 0.0397245591306 0.032104964073 0.0301985227624 0.0107001908888 0.0330988349255 0.0415195660786 0.0230606485279 0.0236410923325 0.0187109927435 0.0222023475244 0.0175662614613 0.0151089228932 0.0279798538754 0.0 0.00504787707871 0.0188703006517 0.0219245561497 0.0212095981034 0.0149942492526 0.0252687339312 0.016225955903 0.014982874816 0.0165713440517 0.0153992347708 0.0258128425881 0.0233194300829 0.0216404069707 0.0254243329386 0.0254506863873 0.000788850240312 0.00972288497974 0.0186083951584 0.0381473811219 0.0294157845108 0.0147578085456 0.0211609731312 0.0342335739858 0.0241609615185 0.0173732430671 0.0259324451415 0.0264222187829 0.0114741578552 0.0193298399608 0.0206022299244 0.0166568971164 0.0229188388297 0.0118905814083 0.0295550989457 0.0189848877848 0.0268759492012 0.0164070102936 0.0252554335694 0.0350869023834 0.0115237799469 0.0366507700029 0.0195520445986 0.0253794363549 0.0152320827299 0.0231766024476 0.028592244401 0.0144112105736 0.0173074040909 0.0296651493536 0.0243710502336 0.0103105434217 0.0155263675354 0.0374184940592 0.0231136537684 0.0190194510466 0.0188364876977 0.0221985054079 0.0214172478398 0.0257446692843 0.0273354856863 0.0275203255805 0.0156705691302 0.0195445751869 0.0258686776409 0.0435616108671 0.0331590462603 0.00822908256455 0.0111578554098 0.0408350390305 0.0282065307084 0.0237933699021 0.0272090918986 0.029489839001 0.0211107451245 0.0171676820259 0.0146997004559 0.0226821224304 0.022183640748 0.0185118691459 0.0264356522916 0.0241910879615 0.0208215741795 0.0237366538469 0.0205889692169 0.0263075431581 0.0263185412116 0.036342074994 0.0103523538982 0.0247947799759 0.0232770954007 0.0303205821473 0.0382875192266 0.0158826147242 0.0205185838207 0.033936913628 0.0342591359421 0.0252819239657 0.0211775313941 0.0366164619483 0.0168683582115 0.0265527528137 0.0122930639405 0.0356399190513 0.0329283261088 0.0161748547301 0.0260907486468 0.0165355786656 0.0155013603686 0.00871939649167 0.0250375042482 0.0205784254424 0.0221880849182 0.0226228234946 0.0402265547715 0.0380348851944 0.0126404460089 0.0132955588731 0.037673161045 0.023685130291 0.0159536871846 0.0317445670434 0.0244407629294 0.0250650773838 0.00665080674841 0.0431354773139 0.0336884150562 0.0346100496205 0.00755310734613 0.037025240188 0.043753914844 0.0237910793513 0.023601769275 0.0229675330633 0.0262374537149 0.0207694947791 0.0142462256068 0.0294419901123 0.00448936000965 0.00600179744469 0.0181950411243 0.0225874743282 0.0220819669003 0.0185630904749 0.0261141108628 0.0202600233978 0.0359510556228 0.0122181959079 0.00715556619596 0.0393252590156 0.0398773260728 0.015944130912 0.025117898688 0.0279596047113 0.0495072405762 0.0438343802676 0.0223301740975 0.0291958566192 0.0238057945893 0.0121408308676 0.0320439133203 0.0386704894061 0.0116681269474 0.0246115626131 0.0320654848123 0.02212022544 0.0306066666749 0.0360151785428 0.0212925403107 0.0265520435189 0.0096564754612 0.0336990775918 0.0258669646258 0.0271355620099 0.0255831511409 0.0192288539502 0.0492946436623 0.0275425360484 0.0388140188539 0.0303333899385 0.0366778253264 0.0435686919256 0.0256749628733 0.0298872725089 0.043124071299 0.0391147976424 0.020010154166 0.0305524053833 0.0506217525372 0.0340993001213 0.0427032875962 0.0428000340065 0.0127191401929 0.0244302586973 0.0258873530415 0.0224665589838 0.0196473040025 0.0259734946343 0.0180766430627 0.0245847379502 0.0205530555257 0.0515531804399 0.047284161103 0.0168752313645 0.0093274994833 0.0472940785154 0.0299859804517 0.013568861355 0.0349261393238 0.0278394065303 0.0233159306106 0.018729490125 0.0474594716238 0.0428441003445 0.0340535271365 0.0192623858156 0.0396592952666 0.0474504224755 0.0346317498496 0.0155232846365 0.0221965807346 0.023519540409 0.0251795457747 0.0186807942123 0.0328466600284 0.0117360219999 0.0111390258469 0.0171063233969 0.027248122956 0.0326787410905 0.0216187767654 0.0174724607588 0.0153838329478 0.013043913361 0.0463610530412 0.0437380208469 0.0239671160531 0.0322222363953 0.0387763357271 0.0597848214293 0.0513617728094 0.0233595904168 0.0301944308891 0.0320013483014 0.010613155035 0.0399275999718 0.0464560281456 0.0134988892532 0.0290788786594 0.0367911394695 0.0175534572334 0.0343523037935 0.0386269072909 0.0314326304477 0.0171757591643 0.0105270444576 0.0306360966619 0.0365489530298 0.0211256372062 0.0155275027866 0.0282406353658 0.0565789653078 0.0282583663219 0.0416523037157 0.0359394330813 0.0459783038684 0.0506220203623 0.0336843126425 0.0381881672141 0.0517807652888 0.0474506190651 0.0289707556044 0.0360504295798 0.0570578425896 0.0371145976129 0.0460576692202 0.0464080290911 0.0220712004031 0.0312574889523 0.0332251791601 0.054841891687 0.0503036463483 0.028999517533 0.0360798173432 0.0193249539307 0.00826557354917 0.0383933246971 0.0453200679143 0.00495350657916 0.0300295268949 0.0391229254289 0.0271682846269 0.036533385441 0.0426841669003 0.0272844902637 0.0287077471132 0.00927183865749 0.0394174170837 0.0313084618098 0.0314758507766 0.0267668687054 0.022518491913 0.05600898249 0.0343402195218 0.045509854552 0.0363226467926 0.0425348706975 0.0488168350062 0.0324372887253 0.0364696219213 0.0493441352532 0.0445105561012 0.0238995483109 0.0369592605884 0.0574925847452 0.0411978099793 0.0155890245102 0.0251635522565 0.0177733966699 0.020295344741 0.0224627747939 0.00622985531203 0.0261058920982 0.0236064364637 0.0550881017687 0.0493624804622 0.00949195374034 0.00547982782741 0.0508785947136 0.0251610996387 0.0119430843146 0.0365988456521 0.0213251727582 0.0179868212343 0.0207990673061 0.0485594746656 0.044214664825 0.0334335462828 0.0201922684831 0.0401754502972 0.0535325351547 0.0317074448278 0.0123261383208 0.0244676333996 0.0171629247157 0.0177983318365 0.0115555789303 0.021116940732 0.0146677582401 0.0105358367361 0.00915254671069 0.0167177891777 0.0294167459865 0.0147897849251 0.0123457776793 0.0164773765795 0.0252103273152 0.0171988861049 0.0305685435552 0.0365843208441 0.0175183238357 0.0218496748957 0.0205878945245 0.0593121863314 0.0470793832619 0.0159816216856 0.020744505382 0.0508661739809 0.018760262956 0.0128710885205 0.0300992820345 0.0117566522917 0.00679248320348 0.0280357325772 0.040552704264 0.0415859654681 0.0232256372519 0.0295874345059 0.0314690999231 0.0507965688951 0.0314591977122 0.0256867489287 0.0204093797109 0.00501434925534 0.0122277574576 0.0226603353781 0.0187158051887 0.0224513170291 0.020695499337 0.0231928239165 0.0198154038419 0.0292814261752 0.0103125125409 0.0227651885673 0.0153489133169 0.00921470910295 0.0183064776985 0.0377227049932 0.0290274791546 0.0152667053733 0.0215933253328 0.0343731361054 0.0245700487863 0.0169471424987 0.0257260078813 0.0268671065769 0.0111334970897 0.0193681705086 0.0211865368155 0.0163734248392 0.0228777878657 0.0117963493804 0.030068869871 0.0193762721713 0.027229886316 0.0161367068103 0.025740037188 0.0358416431275 0.0109673463454 0.0364509080414 0.0199965660095 0.0252914478428 0.014773865842 0.0226846344514 0.0279231861054 0.0144322007261 0.0171240275822 0.0292730788831 0.0236979129564 0.00966001570023 0.0151974508598 0.0372251985508 0.023311727536 0.0176051360114 0.032564167101 0.0208965631668 0.0168197313548 0.0207764664023 0.0423976172234 0.033280884693 0.00922898632458 0.0197200489933 0.0360385863631 0.00893473638299 0.014132357355 0.0250685285695 0.0103719683171 0.0167517993882 0.0135716425012 0.0350532380107 0.027879799306 0.0265658749664 0.015993421345 0.0286771298489 0.0428550440411 0.0151980773887 0.0298292088786 0.0196261415194 0.0184891599574 0.00655621912452 0.016322105066 0.0195353045374 0.0126899376427 0.0127701290018 0.0223217848231 0.0157154460048 0.0129568955631 0.00690277207156 0.0299954575682 0.0193817024498 0.0216771607077 0.0225658582257 0.0286138040807 0.0305011727447 0.0368382394098 0.0388439321615 0.0148355509953 0.0174423915339 0.0380129595202 0.0258082909988 0.0229842208153 0.0373470514825 0.0277476701907 0.0308564780706 0.00775830938446 0.0476084837369 0.0347450599297 0.0411478727988 0.00231111578025 0.0424821924039 0.0488975441679 0.0213563138315 0.0271270574956 0.0304501786922 0.0318897260431 0.0231190177025 0.0126379860304 0.0283589858381 0.0122323800999 0.0114136835682 0.0186791916183 0.0206672138954 0.0199079464104 0.0222007080182 0.0303110417953 0.0280827256688 0.0197052367937 0.0449418080277 0.0438182248518 0.0562345701188 0.0602081091792 0.0243728506009 0.019370550695 0.0596171194363 0.0411146925258 0.0330451422406 0.0550957050319 0.039836592047 0.0399228025142 0.0284759591968 0.0663825437409 0.0558252777705 0.0548034680532 0.0236830779253 0.0595333433641 0.0686676458986 0.0397842689041 0.0197958382609 0.0446061287137 0.0392287195509 0.0347118369491 0.0164026290561 0.0305214779366 0.0277700509569 0.0235155079857 0.0135888018548 0.0240589544851 0.0380160024194 0.0333951182547 0.0242014401746 0.0377621051763 0.0314790157987 0.029423203836 0.0582327389784 0.0535477924002 0.0121411421616 0.00926081168701 0.0551978876015 0.028043323191 0.0176337854836 0.041735927557 0.023998617859 0.0217227463265 0.0246395690903 0.0532994548372 0.0483234850835 0.0380931811444 0.0229416680066 0.0449797181572 0.059280772983 0.0338814552602 0.0120128740103 0.0301325195322 0.0203844916972 0.0200232099081 0.0110576598728 0.0182627991321 0.0197834641463 0.0150522414851 0.006556855609 0.0144048387324 0.0315801243453 0.0179150623078 0.0119668288225 0.022167313587 0.00841936493113 0.0453128226707 0.029523676844 0.0223084484022 0.0276272783534 0.0327625901905 0.0158536182647 0.0147737513682 0.0106741877313 0.0162753926733 0.0159900539559 0.0215767754577 0.023165172617 0.0247814128516 0.0137331692872 0.0264980088885 0.0153005942866 0.0291348842471 0.0254286093168 0.0364522562324 0.00599431682769 0.0189173783639 0.0184393411187 0.0301213577678 0.0340540018144 0.0180901487399 0.0215314467503 0.0337154513722 0.0315978293722 0.024169684578 0.0166816873996 0.0356875981931 0.0143747226708 0.0514837021891 0.0372994668391 0.0230531280792 0.0250236859971 0.0398047566678 0.021934356719 0.0118568359564 0.016712157857 0.0198275637831 0.0143476538676 0.0244078681037 0.0292462046489 0.0328036706334 0.014376575397 0.0286902071785 0.0204063466033 0.0328369650405 0.0322219054124 0.0320787692664 0.00266433410273 0.0165423262191 0.0215926529236 0.03047036768 0.0358252300263 0.0184584697146 0.0213100670766 0.032046547675 0.0333108429689 0.0306570115485 0.018573514939 0.030845411182 0.00824349209933 0.0254197321026 0.047324533609 0.0530498030585 0.0193482837408 0.0436475526218 0.0516271254066 0.0457976820229 0.0502335440929 0.0569904536613 0.0344485460271 0.0478896186827 0.0272539439051 0.0572698534085 0.0358919394369 0.050424643016 0.0434868500285 0.0309009333379 0.0632686594285 0.0501928388473 0.0594250482186 0.0482632028717 0.0482478127894 0.0570074387196 0.0418096588931 0.0448198518438 0.0553205633247 0.0514202209421 0.0325934353049 0.0491031454545 0.0660730408504 0.0548573842682 0.041501827818 0.0494594306994 0.00785967991178 0.0297803384368 0.0414124759052 0.0250716250932 0.0363814861914 0.0430927179021 0.0324719682551 0.0232534554573 0.00557282966442 0.0375065288364 0.0368003051347 0.0279998159154 0.0252506405295 0.0244125277036 0.0601405054627 0.0351799654171 0.0460355764213 0.0372310571496 0.0466115069454 0.0500875517655 0.0367452397055 0.0406580228721 0.0534802351733 0.0469702184582 0.0259083864948 0.0383091416944 0.0609916135179 0.0430915691708 0.0113258711014 0.0432937210726 0.0175511998728 0.0124157338497 0.03212454916 0.0158561794962 0.0176896387613 0.0143093876737 0.043181186929 0.0362893217044 0.0315177649353 0.0141883840024 0.0359022033885 0.0492746305333 0.0223018781484 0.0211045193185 0.0229404408443 0.0181288833884 0.0110415500412 0.00837364985146 0.0172572597582 0.0108947935426 0.00733209366776 0.0131427681285 0.0115359384743 0.0199994152339 0.0092535804352 0.0217603350968 0.0184515208125 0.0501255669434 0.0279388443296 0.0142651752221 0.0382528821443 0.0251771073359 0.0222405400165 0.018907996804 0.050407441851 0.0445874581383 0.03644061978 0.0176520694999 0.0424414179548 0.05338489352 0.0323923780445 0.0108812710336 0.0260958051981 0.0217902651406 0.0213471493056 0.0109405604951 0.0249968133743 0.0131136689852 0.00893536297262 0.00802513881767 0.0192978256402 0.0302167707341 0.018365059649 0.0130926236805 0.0184689475202 0.0343186729394 0.0436186327673 0.0296745080858 0.0408423177906 0.0469164023153 0.0321355072481 0.0293927572919 0.0115197032526 0.0423686398976 0.036129628995 0.0335484782832 0.0259949361948 0.0269704035297 0.0607621523939 0.0380404513983 0.049798653181 0.0409231346593 0.0474755392541 0.0535032395417 0.0371795334759 0.0413092694389 0.054257484038 0.0493594143559 0.0284802931744 0.0415978746211 0.0622229836323 0.0453773607077 0.0190484002067 0.0207749590973 0.00729318592678 0.0172314224853 0.0208211767145 0.0287420582694 0.0242194382748 0.0225231335589 0.0240241380028 0.0229680498334 0.0402905798782 0.0150429891355 0.0374592753112 0.0199314954492 0.0195088829088 0.00836639606005 0.0247669201274 0.0222507048525 0.0204476192567 0.021382719754 0.0305241597933 0.0212891172726 0.0134776031268 0.0107610654817 0.0367912026131 0.0227467739819 0.0252412358335 0.0153803531506 0.0101093922522 0.0191555088988 0.0375470144869 0.0363071087647 0.0222024666973 0.021689493319 0.0288400411309 0.0426974513075 0.0286740878663 0.0220177373135 0.0125507954062 0.0111145943691 0.0144555489867 0.0195277194831 0.0258649050915 0.0118634455346 0.0120062047089 0.0205457113931 0.0226933950128 0.0265812358015 0.010518368073 0.0210005232169 0.00605049088221 0.0227443548737 0.0240543202468 0.0298445720969 0.0127076622649 0.0212764877573 0.0130936058244 0.0350960563748 0.00616350137707 0.0220053566886 0.029182207808 0.0470447673611 0.0143549158293 0.0270200109504 0.0261798064669 0.0399469917707 0.0417589617676 0.0280193213775 0.0317524179632 0.0441157592928 0.0402109444472 0.0286518076954 0.0255338040061 0.0460635600013 0.0240522825965 0.0109052701432 0.0235077295524 0.0318665250498 0.0308330797717 0.0201941994246 0.026209699134 0.024272445381 0.0437154276666 0.0215651164399 0.0334110813047 0.0182660118628 0.0127706309369 0.00547043300996 0.0236736081076 0.0193715054877 0.0206906520081 0.0206326054685 0.0278093020752 0.0196854448654 0.0196839348285 0.00687772387853 0.0318861086105 0.0187829482374 0.026787715043 0.0349795665731 0.0377560197701 0.0169430774361 0.0294826475268 0.02554791035 0.044564230482 0.0307872765104 0.0282156909894 0.0140157312571 0.00304825224888 0.013131876158 0.0254598849649 0.0242359050423 0.0208413846069 0.0206178921833 0.0265921223132 0.024263714379 0.0287692622655 0.0105411880456 0.0255889461915 0.0107722057824 0.040112260752 0.0281920691871 0.0346799127455 0.00547299312148 0.0351698801996 0.0413927767406 0.0178561632488 0.0295704831304 0.0240442618733 0.028433519767 0.0198981190422 0.0164655446546 0.0293872770207 0.00805992628737 0.0103936152637 0.0224915115769 0.0226675446032 0.0164308167554 0.0187959028502 0.0318543120786 0.0236941138036 0.0214366809685 0.02158460699 0.0453234846239 0.0100348820296 0.0205571695519 0.0349449705288 0.0594353543617 0.0269232145172 0.0378563722335 0.0357646095878 0.0508614713562 0.0504482642845 0.0397245591306 0.0434432071263 0.055747628056 0.0497669350857 0.035138779746 0.0361053959246 0.0583555321158 0.0365217793435 0.0331608473129 0.0326376645366 0.0244797200401 0.0260351758605 0.0198255233977 0.055257342683 0.030607412773 0.0406724640229 0.0316826493706 0.0417015250124 0.044714144963 0.032104964073 0.0358439539437 0.0485265392209 0.0417397589401 0.0211023329973 0.0328093106913 0.055937896155 0.0382216948189 0.039178382683 0.0116462448464 0.0328449320294 0.0356452166019 0.0431040247851 0.0127555033307 0.0192172916283 0.0248029464024 0.0398126039657 0.0387676066679 0.0301985227624 0.0325088097853 0.0421224110282 0.0389248491362 0.0344395222458 0.0235149407659 0.0407276725105 0.0194230824244 0.0402634745964 0.0467109968717 0.0198309327046 0.0277799641062 0.0285372209353 0.0306922978338 0.0217975015009 0.0133715864954 0.0282760151712 0.0107001908888 0.0106288988311 0.0196086000788 0.0208164998238 0.0183683750797 0.0208677693371 0.0306825719595 0.0267116346537 0.0244223076771 0.0326854873354 0.0507787766717 0.0180800830921 0.0283453513368 0.0285692206738 0.0439964220236 0.0435330479692 0.0330988349255 0.0364104267178 0.047999984958 0.0430530022579 0.0322085934623 0.0282999450485 0.049271623839 0.0273166054796 0.0425206430225 0.0621747559979 0.0316259435967 0.0474306721161 0.0460270405303 0.055722110458 0.0616108763567 0.0415195660786 0.0463779068274 0.0600405303926 0.058487923658 0.0428330727641 0.0451145046508 0.0622811849491 0.040848815271 0.0429266615699 0.0304603009077 0.0328680598837 0.0193092495687 0.0257358921646 0.0276472175985 0.0230606485279 0.0242829569209 0.0335562675322 0.0240112096757 0.0023427193432 0.0215758461563 0.0440130376785 0.0333268924093 0.0336594835101 0.0267459293135 0.0300685155468 0.0190916149163 0.0302108808069 0.0236410923325 0.0197983357166 0.0116623663768 0.025981149508 0.0407083432987 0.0270506742353 0.00533601555818 0.0243947872794 0.0165388277645 0.0203993556991 0.0306449706863 0.0351570407904 0.0187109927435 0.0216936002715 0.0328125704384 0.0328483395342 0.0289711567156 0.0177290185247 0.0324417191457 0.00990245486371 0.0144435719572 0.0253854499058 0.0235470613048 0.0222023475244 0.0213735089495 0.0257216217897 0.0239916366178 0.0307770932602 0.0118559544137 0.0237036848381 0.0117425310805 0.0184555467145 0.0157796168272 0.0175662614613 0.0166625646078 0.0231975140243 0.0146299258577 0.0171445810537 0.00397360286698 0.029169512627 0.0192796522734 0.0184254094366 0.0151089228932 0.0104342480435 0.00810789324855 0.0108902089943 0.0236280097789 0.017384613998 0.0213096795304 0.0252727380791 0.0279798538754 0.0244605767611 0.021500625639 0.00796997392989 0.0256665045822 0.0176058458155 0.0292950203769 0.0310442784549 0.00504787707871 0.0188703006517 0.0219245561497 0.0212095981034 0.0149942492526 0.0252687339312 0.016225955903 0.0138989004364 0.0180767927652 0.0222193332382 0.0141002986592 0.0215877207325 0.0172352062998 0.0156454524782 0.031383424491 0.0212505739868 0.0142951058056 0.0253431933269 0.0219030212003 0.0156518397703 0.0263438271284 0.0285058962486 0.0193626993311 0.0417261059969 0.0313743437253 0.0260801912789 0.0155074292908 0.0227467684355 diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-jensenshannon-ml.txt b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-jensenshannon-ml.txt new file mode 100644 index 0000000..8ed5b96 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-jensenshannon-ml.txt @@ -0,0 +1 @@ +0.320369972991 0.338972466 0.308199372323 0.3452431902 0.310024768313 0.357115225615 0.311131096357 0.357391534414 0.329718053755 0.347365921475 0.335272625287 0.336451560653 0.33015370606 0.369628769749 0.344499490029 0.321622508707 0.345377707016 0.321007207534 0.350728979121 0.32809430086 0.30207071308 0.291663252492 0.30760470102 0.315976639534 0.308132467187 0.313014586878 0.310463895925 0.321091616502 0.290044394125 0.322213459935 0.315509196522 0.3331114403 0.281071919202 0.320854431887 0.332190658438 0.299342730178 0.313528775154 0.310049073937 0.288821516545 0.307662081954 0.328387688508 0.317185603454 0.332046170365 0.291912213887 0.37870970117 0.336080073379 0.304593343921 0.330138983604 0.355071759299 0.311946140607 0.302025400768 0.330940761586 0.351140062502 0.354772884287 0.272605322053 0.327957349848 0.28871110366 0.320821172951 0.340976919806 0.30757488831 0.320975346884 0.252776262329 0.314549731907 0.326876483 0.337684418756 0.296520013735 0.31493077245 0.327721982167 0.325802862624 0.341908184107 0.300481749419 0.312499767894 0.301061762121 0.27665157989 0.3082566692 0.287466396145 0.288313694552 0.296629698731 0.283556095025 0.322489360684 0.280765581604 0.297958166613 0.313189657041 0.303470399659 0.348652898212 0.331594734387 0.299446687464 0.339047458559 0.286979246044 0.316326095312 0.321618884109 0.330065896317 0.324500638067 0.328300795872 0.309002568222 0.262587468469 0.31974123777 0.286316182293 0.321162329165 0.328160620315 0.356618051635 0.289733970648 0.344507756538 0.301485561986 0.335785898715 0.322635066518 0.331480718646 0.297897604494 0.306942928189 0.350843442517 0.342585296966 0.341311053315 0.306780105123 0.313401804298 0.319978145568 0.302460397612 0.346105758567 0.312802351189 0.331552275517 0.321624157344 0.318798118247 0.301906095501 0.301585920138 0.314556178985 0.333215221158 0.306929663844 0.317083256901 0.309667679181 0.306529028004 0.30865993751 0.296031907986 0.28742420979 0.311584483038 0.319043629504 0.330278008622 0.314466433681 0.327937382021 0.296448162218 0.307033121385 0.296391953011 0.292691206116 0.297146209653 0.307929858983 0.291863681454 0.307300188104 0.306597817799 0.34718100163 0.317436210259 0.29952626739 0.330762834707 0.334951064852 0.323806678898 0.296203706701 0.33398466797 0.344931265559 0.293948734727 0.332764639313 0.272651853935 0.317324315923 0.300493570867 0.307008231016 0.333263322802 0.31390648462 0.332416491248 0.314766869708 0.321015549211 0.322909289307 0.356882966656 0.310596945263 0.343939748528 0.286269629586 0.33173459898 0.323848483719 0.305841388975 0.319266258167 0.34012363898 0.3443280395 0.353885654057 0.320544729867 0.353280499623 0.315621795536 0.312176062734 0.301562130879 0.312061680573 0.312642847966 0.326222109701 0.357417912858 0.313083593142 0.334033412713 0.295630506074 diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-minkowski-3.2-ml-iris.txt b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-minkowski-3.2-ml-iris.txt new file mode 100644 index 0000000..dc396c8 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-minkowski-3.2-ml-iris.txt @@ -0,0 +1 @@ + 5.0817745e-01 4.4535192e-01 5.6700421e-01 1.2418578e-01 4.8927739e-01 5.0180477e-01 1.4096146e-01 8.1242502e-01 4.1586001e-01 3.2586371e-01 3.2586371e-01 5.2942799e-01 8.6137722e-01 7.7039952e-01 9.7270522e-01 4.5581864e-01 1.0000000e-01 6.3861009e-01 3.0546431e-01 3.7427929e-01 2.5251796e-01 5.6700421e-01 3.8776762e-01 5.2942799e-01 5.0905001e-01 2.5651975e-01 1.2418578e-01 1.2418578e-01 4.5470518e-01 4.5470518e-01 3.2816937e-01 6.0181382e-01 7.3457830e-01 4.1586001e-01 3.2586371e-01 4.0147421e-01 4.1586001e-01 7.6752131e-01 1.2418578e-01 1.4096146e-01 1.2396136e+00 7.1462831e-01 4.1449626e-01 5.3588338e-01 5.2942799e-01 3.2352160e-01 5.2862779e-01 2.5251796e-01 2.0656129e-01 3.5031395e+00 3.2158090e+00 3.6682165e+00 2.7164367e+00 3.3288934e+00 3.1477087e+00 3.4033622e+00 2.0308266e+00 3.3209346e+00 2.5912926e+00 2.3257069e+00 2.8912179e+00 2.7273721e+00 3.3660466e+00 2.2876649e+00 3.1664710e+00 3.1642132e+00 2.7448172e+00 3.2474124e+00 2.5734684e+00 3.5025969e+00 2.6980573e+00 3.5983434e+00 3.3515288e+00 3.0113552e+00 3.1469325e+00 3.5526357e+00 3.7475562e+00 3.1812462e+00 2.1818668e+00 2.4927109e+00 2.3909738e+00 2.5729378e+00 3.7711998e+00 3.1620401e+00 3.1916270e+00 3.4478147e+00 3.1312883e+00 2.7541224e+00 2.6886547e+00 3.0483897e+00 3.2685282e+00 2.6752185e+00 2.0587064e+00 2.8619072e+00 2.8416143e+00 2.8554471e+00 2.9845926e+00 1.7697734e+00 2.7640668e+00 4.7690606e+00 3.8067806e+00 4.6866422e+00 4.2843668e+00 4.5417384e+00 5.4120246e+00 3.2161426e+00 5.0569442e+00 4.5165793e+00 4.9462324e+00 3.8595100e+00 4.0249346e+00 4.2787236e+00 3.7387507e+00 3.9160762e+00 4.0938708e+00 4.2028863e+00 5.5316487e+00 5.7297286e+00 3.6968486e+00 4.5074741e+00 3.6330985e+00 5.5146761e+00 3.6293227e+00 4.4495340e+00 4.7599229e+00 3.5255287e+00 3.6076762e+00 4.3339547e+00 4.5590471e+00 4.8997298e+00 5.2856169e+00 4.3511402e+00 3.7760534e+00 4.2460554e+00 5.0103780e+00 4.3808704e+00 4.1939019e+00 3.5087649e+00 4.2018804e+00 4.4140402e+00 3.9807996e+00 3.8067806e+00 4.6775324e+00 4.5250934e+00 4.0376133e+00 3.7473276e+00 3.9523060e+00 4.1709262e+00 3.7872951e+00 2.5251796e-01 3.0546431e-01 6.0060595e-01 9.5035453e-01 4.4535192e-01 4.0293660e-01 5.0090417e-01 1.4096146e-01 7.6752131e-01 4.1449626e-01 1.2418578e-01 6.2024833e-01 1.1845977e+00 1.4700179e+00 9.4309624e-01 5.0905001e-01 1.0003617e+00 8.0358695e-01 5.8851328e-01 7.0826681e-01 6.6384020e-01 4.3456114e-01 5.6700421e-01 2.0656129e-01 4.2667565e-01 5.2942799e-01 4.4417983e-01 2.8192292e-01 2.1269358e-01 5.7324170e-01 1.1056650e+00 1.2393677e+00 1.4096146e-01 2.5251796e-01 6.8961791e-01 1.4096146e-01 5.0090417e-01 4.1449626e-01 5.0270183e-01 7.3535471e-01 5.0905001e-01 5.7324170e-01 8.5690100e-01 1.2418578e-01 8.0587320e-01 3.2352160e-01 7.3496673e-01 3.0275928e-01 3.5601468e+00 3.2472699e+00 3.7137483e+00 2.6693888e+00 3.3563815e+00 3.1472333e+00 3.4276314e+00 1.9506288e+00 3.3563695e+00 2.5739370e+00 2.1870094e+00 2.9033014e+00 2.6860278e+00 3.3789262e+00 2.2884830e+00 3.2153154e+00 3.1667333e+00 2.7423060e+00 3.2269725e+00 2.5465772e+00 3.5123782e+00 2.7147889e+00 3.6030381e+00 3.3619470e+00 3.0427908e+00 3.1888219e+00 3.5910272e+00 3.7805671e+00 3.1921903e+00 2.1611020e+00 2.4491518e+00 2.3430978e+00 2.5700421e+00 3.7741357e+00 3.1615131e+00 3.2084454e+00 3.4884789e+00 3.1228939e+00 2.7575407e+00 2.6617768e+00 3.0343591e+00 3.2842184e+00 2.6656374e+00 1.9595652e+00 2.8539100e+00 2.8474367e+00 2.8585579e+00 3.0059712e+00 1.6867642e+00 2.7634340e+00 4.7806735e+00 3.8055585e+00 4.7194850e+00 4.2963997e+00 4.5579706e+00 5.4507801e+00 3.1945300e+00 5.0903533e+00 4.5297786e+00 4.9814379e+00 3.8841455e+00 4.0376849e+00 4.3069372e+00 3.7284750e+00 3.9173293e+00 4.1124749e+00 4.2221165e+00 5.5759608e+00 5.7633066e+00 3.6758942e+00 4.5370189e+00 3.6312130e+00 5.5536680e+00 3.6416405e+00 4.4736906e+00 4.7961103e+00 3.5380868e+00 3.6203213e+00 4.3467079e+00 4.5977693e+00 4.9380624e+00 5.3421274e+00 4.3637834e+00 3.7899304e+00 4.2477635e+00 5.0602038e+00 4.3953045e+00 4.2110583e+00 3.5192753e+00 4.2358121e+00 4.4378207e+00 4.0189525e+00 3.8055585e+00 4.7017335e+00 4.5483787e+00 4.0656879e+00 3.7516222e+00 3.9742971e+00 4.1845313e+00 3.7939847e+00 2.1269358e-01 4.4535192e-01 8.9366705e-01 2.1845981e-01 3.4378533e-01 3.7427929e-01 2.5651975e-01 7.7039952e-01 3.2586371e-01 2.1845981e-01 4.2667565e-01 1.2113327e+00 1.3801284e+00 8.7175869e-01 4.4651726e-01 1.0719360e+00 6.5223271e-01 7.3813096e-01 5.7867728e-01 4.4535192e-01 5.2655962e-01 6.0611244e-01 3.8776762e-01 4.0176783e-01 5.3588338e-01 5.0905001e-01 3.0000000e-01 3.0546431e-01 7.1169738e-01 9.4309624e-01 1.1327825e+00 2.5651975e-01 3.0275928e-01 8.1067767e-01 2.5651975e-01 3.2352160e-01 4.2538717e-01 3.7427929e-01 9.0252542e-01 3.0000000e-01 5.1138698e-01 7.7869083e-01 2.1845981e-01 6.6384020e-01 1.2418578e-01 6.9325418e-01 3.0546431e-01 3.7098973e+00 3.3770904e+00 3.8553941e+00 2.7868575e+00 3.4895316e+00 3.2571492e+00 3.5499573e+00 2.0646687e+00 3.4944845e+00 2.6743800e+00 2.3196869e+00 3.0181476e+00 2.8270253e+00 3.4973911e+00 2.3997585e+00 3.3600102e+00 3.2716172e+00 2.8619072e+00 3.3597438e+00 2.6649106e+00 3.6203213e+00 2.8440609e+00 3.7280682e+00 3.4822008e+00 3.1786890e+00 3.3296038e+00 3.7325066e+00 3.9121945e+00 3.3084060e+00 2.2888897e+00 2.5683989e+00 2.4649412e+00 2.6906230e+00 3.8866112e+00 3.2625043e+00 3.3219248e+00 3.6264668e+00 3.2609948e+00 2.8656468e+00 2.7738624e+00 3.1430282e+00 3.4033622e+00 2.7865812e+00 2.0797392e+00 2.9638836e+00 2.9589097e+00 2.9695568e+00 3.1337459e+00 1.7991433e+00 2.8758936e+00 4.8875515e+00 3.9111857e+00 4.8490379e+00 4.4107143e+00 4.6725771e+00 5.5854254e+00 3.2933477e+00 5.2226262e+00 4.6541348e+00 5.1068487e+00 4.0049607e+00 4.1564977e+00 4.4321573e+00 3.8331006e+00 4.0161098e+00 4.2255639e+00 4.3417782e+00 5.7091264e+00 5.8970064e+00 3.7961619e+00 4.6611065e+00 3.7313856e+00 5.6903014e+00 3.7618406e+00 4.5942943e+00 4.9290197e+00 3.6553612e+00 3.7333492e+00 4.4613366e+00 4.7342792e+00 5.0749049e+00 5.4844039e+00 4.4774673e+00 3.9102500e+00 4.3611782e+00 5.2016658e+00 4.5034762e+00 4.3281161e+00 3.6300436e+00 4.3648112e+00 4.5562166e+00 4.1482002e+00 3.9111857e+00 4.8218416e+00 4.6648403e+00 4.1879434e+00 3.8717400e+00 4.0945154e+00 4.2919258e+00 3.9013483e+00 5.6700421e-01 9.9714776e-01 3.0546431e-01 4.4417983e-01 2.5251796e-01 3.0275928e-01 8.8835966e-01 3.2586371e-01 2.1845981e-01 4.4651726e-01 1.3360558e+00 1.5022608e+00 9.9714776e-01 5.6769031e-01 1.1765359e+00 7.6752131e-01 8.1354181e-01 6.9325418e-01 6.2092891e-01 5.4292906e-01 4.5470518e-01 4.0293660e-01 4.5581864e-01 6.4704320e-01 6.2024833e-01 1.4096146e-01 2.0656129e-01 8.1354181e-01 1.0574300e+00 1.2554784e+00 3.0275928e-01 4.4535192e-01 9.2264612e-01 3.0275928e-01 2.5251796e-01 5.2862779e-01 5.0592043e-01 8.0358695e-01 2.5251796e-01 5.6454040e-01 7.9878917e-01 2.1845981e-01 7.6752131e-01 1.2418578e-01 8.1242502e-01 4.1449626e-01 3.5875094e+00 3.2277825e+00 3.7190120e+00 2.6019240e+00 3.3414931e+00 3.0741797e+00 3.3904673e+00 1.8683030e+00 3.3506325e+00 2.4892190e+00 2.1209506e+00 2.8530088e+00 2.6606291e+00 3.3264150e+00 2.2345869e+00 3.2325480e+00 3.0894572e+00 2.6859989e+00 3.1954750e+00 2.4836725e+00 3.4467337e+00 2.6928468e+00 3.5602810e+00 3.3090659e+00 3.0346426e+00 3.1953687e+00 3.5930845e+00 3.7635112e+00 3.1392617e+00 2.1242643e+00 2.3839455e+00 2.2806773e+00 2.5225548e+00 3.7070070e+00 3.0760590e+00 3.1551922e+00 3.4865435e+00 3.1033781e+00 2.6867856e+00 2.5906376e+00 2.9536363e+00 3.2348458e+00 2.6148507e+00 1.8841403e+00 2.7819255e+00 2.7801917e+00 2.7920574e+00 2.9774862e+00 1.6195190e+00 2.7001131e+00 4.7151191e+00 3.7310738e+00 4.6963107e+00 4.2348119e+00 4.5036643e+00 5.4345951e+00 3.1040223e+00 5.0660245e+00 4.4858951e+00 4.9576471e+00 3.8485743e+00 3.9894963e+00 4.2781102e+00 3.6535208e+00 3.8473084e+00 4.0656969e+00 4.1736133e+00 5.5611269e+00 5.7439963e+00 3.6142694e+00 4.5082936e+00 3.5527533e+00 5.5400450e+00 3.5988819e+00 4.4321573e+00 4.7754556e+00 3.4913787e+00 3.5638529e+00 4.2915574e+00 4.5844335e+00 4.9269527e+00 5.3501611e+00 4.3091163e+00 3.7395252e+00 4.1763853e+00 5.0687940e+00 4.3363292e+00 4.1568278e+00 3.4594086e+00 4.2175903e+00 4.4004449e+00 4.0139427e+00 3.7310738e+00 4.6611132e+00 4.5083524e+00 4.0415593e+00 3.7070350e+00 3.9354060e+00 4.1243443e+00 3.7225506e+00 4.8927739e-01 4.1449626e-01 2.0656129e-01 8.1242502e-01 5.0270183e-01 4.0293660e-01 2.8192292e-01 6.0611244e-01 8.2305664e-01 8.2899253e-01 9.3824087e-01 4.5581864e-01 1.4096146e-01 7.1840099e-01 2.1845981e-01 4.5470518e-01 2.1845981e-01 4.9674312e-01 4.2418962e-01 5.1607523e-01 6.0551856e-01 2.8192292e-01 2.1269358e-01 2.4837156e-01 4.5470518e-01 5.1607523e-01 4.2667565e-01 5.0991930e-01 6.8917100e-01 5.0270183e-01 4.1312257e-01 5.0180477e-01 5.0270183e-01 7.4549115e-01 2.1269358e-01 1.4096146e-01 1.3190071e+00 6.4755655e-01 4.1449626e-01 5.1691876e-01 6.0611244e-01 2.5251796e-01 4.9674312e-01 3.0546431e-01 3.0000000e-01 3.5310961e+00 3.2313174e+00 3.6912396e+00 2.7363446e+00 3.3486156e+00 3.1550780e+00 3.4146950e+00 2.0587064e+00 3.3422688e+00 2.6000813e+00 2.3658814e+00 2.9005672e+00 2.7581254e+00 3.3764180e+00 2.2982662e+00 3.1914715e+00 3.1684808e+00 2.7581145e+00 3.2719146e+00 2.5906376e+00 3.5076679e+00 2.7164648e+00 3.6146980e+00 3.3629944e+00 3.0317688e+00 3.1699749e+00 3.5767944e+00 3.7653940e+00 3.1912695e+00 2.2046610e+00 2.5131017e+00 2.4132939e+00 2.5882494e+00 3.7797776e+00 3.1649733e+00 3.1986968e+00 3.4685882e+00 3.1575873e+00 2.7599092e+00 2.7031874e+00 3.0575551e+00 3.2787144e+00 2.6914804e+00 2.0914773e+00 2.8714673e+00 2.8482104e+00 2.8631525e+00 3.0002861e+00 1.8009624e+00 2.7738624e+00 4.7744685e+00 3.8132783e+00 4.7036953e+00 4.2925903e+00 4.5507995e+00 5.4317036e+00 3.2245243e+00 5.0748136e+00 4.5314818e+00 4.9621679e+00 3.8715927e+00 4.0372136e+00 4.2937599e+00 3.7469906e+00 3.9213497e+00 4.1030149e+00 4.2136261e+00 5.5512721e+00 5.7499082e+00 3.7127205e+00 4.5218897e+00 3.6377830e+00 5.5357771e+00 3.6429670e+00 4.4609633e+00 4.7775824e+00 3.5373240e+00 3.6158814e+00 4.3437318e+00 4.5790474e+00 4.9211035e+00 5.3110568e+00 4.3608329e+00 3.7876656e+00 4.2543813e+00 5.0356467e+00 4.3872625e+00 4.2028863e+00 3.5161021e+00 4.2189979e+00 4.4261470e+00 4.0000622e+00 3.8132783e+00 4.6893387e+00 4.5361087e+00 4.0527696e+00 3.7622948e+00 3.9645936e+00 4.1768667e+00 3.7924679e+00 8.6137722e-01 5.7867728e-01 1.2470767e+00 8.6361309e-01 2.8192292e-01 6.9369532e-01 9.8450810e-01 1.2949422e+00 5.7324170e-01 5.3588338e-01 4.0000000e-01 4.8135521e-01 3.0546431e-01 3.2816937e-01 5.0817745e-01 3.4378533e-01 9.4558103e-01 6.2024833e-01 6.9728513e-01 9.2288144e-01 5.6700421e-01 4.3691963e-01 5.4292906e-01 8.7202528e-01 8.9095811e-01 5.0817745e-01 3.6171588e-01 3.8934542e-01 8.6361309e-01 7.9878917e-01 5.0592043e-01 8.6361309e-01 1.1959482e+00 5.4292906e-01 5.6454040e-01 1.6807352e+00 1.1055064e+00 5.0592043e-01 3.2586371e-01 9.7779835e-01 3.2816937e-01 9.4558103e-01 2.8507955e-01 6.6827038e-01 3.1533911e+00 2.8840079e+00 3.3274872e+00 2.5335921e+00 3.0169509e+00 2.8661222e+00 3.0732956e+00 1.9492232e+00 3.0013391e+00 2.3437032e+00 2.3116343e+00 2.5873149e+00 2.5591371e+00 3.0631725e+00 2.0220740e+00 2.8270253e+00 2.8656468e+00 2.4892190e+00 3.0178921e+00 2.3656538e+00 3.1846482e+00 2.4132559e+00 3.3163294e+00 3.0590735e+00 2.6993871e+00 2.8174914e+00 3.2310326e+00 3.4162231e+00 2.8802219e+00 1.9932786e+00 2.3173648e+00 2.2314118e+00 2.3212593e+00 3.4779999e+00 2.8654680e+00 2.8662571e+00 3.1113805e+00 2.8927401e+00 2.4634131e+00 2.4685230e+00 2.7948819e+00 2.9596963e+00 2.4341346e+00 2.0039447e+00 2.6000813e+00 2.5498770e+00 2.5700421e+00 2.6813098e+00 1.7123398e+00 2.4913669e+00 4.4418755e+00 3.5123791e+00 4.3488707e+00 3.9713081e+00 4.2172545e+00 5.0700045e+00 2.9631582e+00 4.7239900e+00 4.2113881e+00 4.5979409e+00 3.5255287e+00 3.7162377e+00 3.9448212e+00 3.4598280e+00 3.6097419e+00 3.7620043e+00 3.8810240e+00 5.1822310e+00 5.3953096e+00 3.4508156e+00 4.1665786e+00 3.3353616e+00 5.1763300e+00 3.3260356e+00 4.1143832e+00 4.4201622e+00 3.2188998e+00 3.2929599e+00 4.0183758e+00 4.2229849e+00 4.5637045e+00 4.9290256e+00 4.0343724e+00 3.4708900e+00 3.9559935e+00 4.6576736e+00 4.0502252e+00 3.8718131e+00 3.1963475e+00 3.8610636e+00 4.0785553e+00 3.6345765e+00 3.5123791e+00 4.3416284e+00 4.1864302e+00 3.7018916e+00 3.4568305e+00 3.6254423e+00 3.8415026e+00 3.4775621e+00 4.0293660e-01 5.0905001e-01 3.8934542e-01 8.1130291e-01 2.5251796e-01 4.2538717e-01 4.8927739e-01 1.2406194e+00 1.3074132e+00 8.5233811e-01 5.0090417e-01 1.1185330e+00 5.6700421e-01 8.1099042e-01 5.3022554e-01 4.1449626e-01 5.3665999e-01 5.0905001e-01 5.0592043e-01 4.1449626e-01 6.0181382e-01 6.0060595e-01 2.5651975e-01 3.4583729e-01 8.0064372e-01 8.1558458e-01 1.0597541e+00 3.8934542e-01 4.2667565e-01 9.0074515e-01 3.8934542e-01 4.1586001e-01 5.0180477e-01 4.0293660e-01 1.1003197e+00 2.5651975e-01 4.5581864e-01 6.6539428e-01 4.1312257e-01 5.7324170e-01 2.0656129e-01 7.1504098e-01 4.0293660e-01 3.6583368e+00 3.3018939e+00 3.7934214e+00 2.7118627e+00 3.4196168e+00 3.1646752e+00 3.4663954e+00 1.9965608e+00 3.4302944e+00 2.5753574e+00 2.2837561e+00 2.9283888e+00 2.7788099e+00 3.4116298e+00 2.3101107e+00 3.3028359e+00 3.1719381e+00 2.7826178e+00 3.2957091e+00 2.5882494e+00 3.5217244e+00 2.7724782e+00 3.6509512e+00 3.3998935e+00 3.1126354e+00 3.2681313e+00 3.6719821e+00 3.8384263e+00 3.2202056e+00 2.2240476e+00 2.4960474e+00 2.3970928e+00 2.6119950e+00 3.7951491e+00 3.1592993e+00 3.2300555e+00 3.5604418e+00 3.2025128e+00 2.7701355e+00 2.6892823e+00 3.0524247e+00 3.3177721e+00 2.7092568e+00 2.0227167e+00 2.8731220e+00 2.8671099e+00 2.8775912e+00 3.0586720e+00 1.7332099e+00 2.7865812e+00 4.7866828e+00 3.8123695e+00 4.7714708e+00 4.3189924e+00 4.5796358e+00 5.5132325e+00 3.1921277e+00 5.1489022e+00 4.5737586e+00 5.0249531e+00 3.9185849e+00 4.0697987e+00 4.3502657e+00 3.7349501e+00 3.9102370e+00 4.1311343e+00 4.2548570e+00 5.6362307e+00 5.8240252e+00 3.7174048e+00 4.5773480e+00 3.6270581e+00 5.6209145e+00 3.6774027e+00 4.5072397e+00 4.8555167e+00 3.5675580e+00 3.6401392e+00 4.3693804e+00 4.6657186e+00 5.0062061e+00 5.4227201e+00 4.3844239e+00 3.8261182e+00 4.2718480e+00 5.1373047e+00 4.4043123e+00 4.2383633e+00 3.5347392e+00 4.2868650e+00 4.4668117e+00 4.0716645e+00 3.8123695e+00 4.7338066e+00 4.5734052e+00 4.1036179e+00 3.7882079e+00 4.0078491e+00 4.1922661e+00 3.8027591e+00 6.8961791e-01 3.0546431e-01 4.4417983e-01 2.0656129e-01 4.1586001e-01 7.6625946e-01 8.9687438e-01 1.0919712e+00 5.7867728e-01 1.5422108e-01 7.3851529e-01 4.0293660e-01 4.1312257e-01 3.2586371e-01 5.7257017e-01 3.2816937e-01 4.1312257e-01 4.0147421e-01 2.0656129e-01 2.0656129e-01 2.0656129e-01 3.2586371e-01 3.2586371e-01 4.1312257e-01 7.0437330e-01 8.5205778e-01 3.0546431e-01 3.2352160e-01 5.0905001e-01 3.0546431e-01 6.5172743e-01 1.0000000e-01 2.1269358e-01 1.1283882e+00 6.1092863e-01 4.0293660e-01 5.0592043e-01 4.1586001e-01 4.0293660e-01 4.1449626e-01 3.7255734e-01 1.2418578e-01 3.4445326e+00 3.1392617e+00 3.6011035e+00 2.6118700e+00 3.2516941e+00 3.0511838e+00 3.3218097e+00 1.9189245e+00 3.2468925e+00 2.4924452e+00 2.2081024e+00 2.8038661e+00 2.6291264e+00 3.2767369e+00 2.1964719e+00 3.1025274e+00 3.0696611e+00 2.6485861e+00 3.1554034e+00 2.4715204e+00 3.4135983e+00 2.6151245e+00 3.5092032e+00 3.2604423e+00 2.9354140e+00 3.0782101e+00 3.4818889e+00 3.6726568e+00 3.0922811e+00 2.0843471e+00 2.3874354e+00 2.2845234e+00 2.4794505e+00 3.6775470e+00 3.0659000e+00 3.1055388e+00 3.3775462e+00 3.0430948e+00 2.6597612e+00 2.5873149e+00 2.9471553e+00 3.1807044e+00 2.5795723e+00 1.9450499e+00 2.7640668e+00 2.7473221e+00 2.7611864e+00 2.9015702e+00 1.6626642e+00 2.6693888e+00 4.6823704e+00 3.7130994e+00 4.6117428e+00 4.1946425e+00 4.4565357e+00 5.3399939e+00 3.1168466e+00 4.9805386e+00 4.4303862e+00 4.8738189e+00 3.7806643e+00 3.9387918e+00 4.2018804e+00 3.6441274e+00 3.8290120e+00 4.0132700e+00 4.1177139e+00 5.4615788e+00 5.6559440e+00 3.5983434e+00 4.4321573e+00 3.5405803e+00 5.4429455e+00 3.5441556e+00 4.3687483e+00 4.6853394e+00 3.4399664e+00 3.5203203e+00 4.2473048e+00 4.4861009e+00 4.8281381e+00 5.2242271e+00 4.2652659e+00 3.6876909e+00 4.1503255e+00 4.9488209e+00 4.2966585e+00 4.1071698e+00 3.4205830e+00 4.1292490e+00 4.3363292e+00 3.9150359e+00 3.7130994e+00 4.5977729e+00 4.4473292e+00 3.9643224e+00 3.6603913e+00 3.8715927e+00 4.0861975e+00 3.6954796e+00 5.0991930e-01 1.1327825e+00 5.7257017e-01 4.0293660e-01 3.0811765e-01 1.5771666e+00 1.7488874e+00 1.2431040e+00 8.1273630e-01 1.4170618e+00 1.0106392e+00 1.0389435e+00 9.3824087e-01 7.3813096e-01 7.5976039e-01 6.6491075e-01 6.0611244e-01 6.9728513e-01 8.8861541e-01 8.5177726e-01 3.8776762e-01 4.2538717e-01 1.0346741e+00 1.2943100e+00 1.5015203e+00 5.0991930e-01 6.2482915e-01 1.1473003e+00 5.0991930e-01 1.2418578e-01 7.6752131e-01 7.4586719e-01 6.0181382e-01 3.0275928e-01 7.7869083e-01 1.0440187e+00 4.0293660e-01 1.0120221e+00 3.2352160e-01 1.0597541e+00 6.4704320e-01 3.7504939e+00 3.3717768e+00 3.8731169e+00 2.7062054e+00 3.4865562e+00 3.1921903e+00 3.5262546e+00 1.9522524e+00 3.5018009e+00 2.5914913e+00 2.1818668e+00 2.9807120e+00 2.7874290e+00 3.4557351e+00 2.3604042e+00 3.3915488e+00 3.2027420e+00 2.8150728e+00 3.3206640e+00 2.6018930e+00 3.5642457e+00 2.8360166e+00 3.6902583e+00 3.4394878e+00 3.1847477e+00 3.3503379e+00 3.7461474e+00 3.9068076e+00 3.2666666e+00 2.2590074e+00 2.4950353e+00 2.3935209e+00 2.6534332e+00 3.8259590e+00 3.1834936e+00 3.2834077e+00 3.6377049e+00 3.2390016e+00 2.8060305e+00 2.7012392e+00 3.0647279e+00 3.3658240e+00 2.7423171e+00 1.9645331e+00 2.8984764e+00 2.9033203e+00 2.9139413e+00 3.1189900e+00 1.7118795e+00 2.8228127e+00 4.8290847e+00 3.8416142e+00 4.8350745e+00 4.3569606e+00 4.6261788e+00 5.5774554e+00 3.1958228e+00 5.2067803e+00 4.6153241e+00 5.0947934e+00 3.9803199e+00 4.1159553e+00 4.4131159e+00 3.7587872e+00 3.9513472e+00 4.1881498e+00 4.3024754e+00 5.7071195e+00 5.8839539e+00 3.7280682e+00 4.6419531e+00 3.6578722e+00 5.6843788e+00 3.7276068e+00 4.5625120e+00 4.9179194e+00 3.6182608e+00 3.6866535e+00 4.4136707e+00 4.7307689e+00 5.0723886e+00 5.5062533e+00 4.4301849e+00 3.8690719e+00 4.2943891e+00 5.2192815e+00 4.4536259e+00 4.2828634e+00 3.5797958e+00 4.3570079e+00 4.5278761e+00 4.1542558e+00 3.8416142e+00 4.7899951e+00 4.6341170e+00 4.1740602e+00 3.8316735e+00 4.0656969e+00 4.2413113e+00 3.8376713e+00 6.8961791e-01 3.0811765e-01 1.4096146e-01 6.4755655e-01 1.1229906e+00 1.3835747e+00 8.6361309e-01 4.2667565e-01 9.4009473e-01 7.0784540e-01 5.3665999e-01 6.2482915e-01 6.3977563e-01 4.3691963e-01 4.4651726e-01 1.5422108e-01 3.7598397e-01 4.4535192e-01 3.7598397e-01 2.1845981e-01 1.4096146e-01 5.5419992e-01 1.0065841e+00 1.1474460e+00 0.0000000e+00 3.0811765e-01 6.5223271e-01 0.0000000e+00 5.0991930e-01 3.2586371e-01 4.2667565e-01 8.3172002e-01 5.0991930e-01 5.6769031e-01 7.5082357e-01 2.1845981e-01 7.0479928e-01 3.0811765e-01 6.4755655e-01 2.1845981e-01 3.4865562e+00 3.1726595e+00 3.6377960e+00 2.5987470e+00 3.2814045e+00 3.0627375e+00 3.3515846e+00 1.8841865e+00 3.2769379e+00 2.5038079e+00 2.1311468e+00 2.8311678e+00 2.6104387e+00 3.2962520e+00 2.2214438e+00 3.1433122e+00 3.0878634e+00 2.6552472e+00 3.1570103e+00 2.4668912e+00 3.4394878e+00 2.6411293e+00 3.5233648e+00 3.2747247e+00 2.9659871e+00 3.1154783e+00 3.5134741e+00 3.7059620e+00 3.1148696e+00 2.0851901e+00 2.3731428e+00 2.2655571e+00 2.4927109e+00 3.6920087e+00 3.0823446e+00 3.1337459e+00 3.4135200e+00 3.0481703e+00 2.6780487e+00 2.5874301e+00 2.9489507e+00 3.2027420e+00 2.5873149e+00 1.8973383e+00 2.7738355e+00 2.7632614e+00 2.7778954e+00 2.9269923e+00 1.6390769e+00 2.6848587e+00 4.7106706e+00 3.7313856e+00 4.6446321e+00 4.2142736e+00 4.4836580e+00 5.3716885e+00 3.1250284e+00 5.0074019e+00 4.4485220e+00 4.9128219e+00 3.8150636e+00 3.9624529e+00 4.2358121e+00 3.6602286e+00 3.8605980e+00 4.0488387e+00 4.1418643e+00 5.4970002e+00 5.6855224e+00 3.5964347e+00 4.4685630e+00 3.5634461e+00 5.4730406e+00 3.5693950e+00 4.3989089e+00 4.7150659e+00 3.4668130e+00 3.5464993e+00 4.2723380e+00 4.5155386e+00 4.8594290e+00 5.2647079e+00 4.2921213e+00 3.7064459e+00 4.1581964e+00 4.9913682e+00 4.3286007e+00 4.1303097e+00 3.4468286e+00 4.1669742e+00 4.3729308e+00 3.9624170e+00 3.7313856e+00 4.6297577e+00 4.4844827e+00 4.0056359e+00 3.6817961e+00 3.9035218e+00 4.1179678e+00 3.7164366e+00 6.2024833e-01 8.1304731e-01 1.1868139e+00 4.8036801e-01 7.1799256e-01 2.8192292e-01 3.2816937e-01 3.2816937e-01 3.0546431e-01 3.2352160e-01 3.2352160e-01 8.5205778e-01 4.8927739e-01 6.6384020e-01 7.3496673e-01 4.5581864e-01 2.4837156e-01 3.2586371e-01 7.6752131e-01 7.4549115e-01 3.2352160e-01 4.1449626e-01 5.0180477e-01 6.8961791e-01 5.8851328e-01 2.5251796e-01 6.8961791e-01 1.0919712e+00 3.7255734e-01 4.2667565e-01 1.4993782e+00 1.0344911e+00 5.0592043e-01 4.5581864e-01 8.1304731e-01 3.0546431e-01 8.5205778e-01 1.0000000e-01 4.9766035e-01 3.3472053e+00 3.0922811e+00 3.5254266e+00 2.6661987e+00 3.2094276e+00 3.0570957e+00 3.2869053e+00 2.0190980e+00 3.1913594e+00 2.5206151e+00 2.3403819e+00 2.7928582e+00 2.6680945e+00 3.2615924e+00 2.2070201e+00 3.0233425e+00 3.0716969e+00 2.6575076e+00 3.1694367e+00 2.5088543e+00 3.4030318e+00 2.5954147e+00 3.4988409e+00 3.2483608e+00 2.8891737e+00 3.0123702e+00 3.4182420e+00 3.6203759e+00 3.0811775e+00 2.1190324e+00 2.4416796e+00 2.3440712e+00 2.4897570e+00 3.6753309e+00 3.0715435e+00 3.0851463e+00 3.3123070e+00 3.0424689e+00 2.6625505e+00 2.6241824e+00 2.9689697e+00 3.1616811e+00 2.5961850e+00 2.0559262e+00 2.7803619e+00 2.7462372e+00 2.7639489e+00 2.8736288e+00 1.7674365e+00 2.6773131e+00 4.6660957e+00 3.7173526e+00 4.5567672e+00 4.1782968e+00 4.4326194e+00 5.2720689e+00 3.1469325e+00 4.9232255e+00 4.4057732e+00 4.8164157e+00 3.7433882e+00 3.9194796e+00 4.1567419e+00 3.6582432e+00 3.8303544e+00 3.9861488e+00 4.0892044e+00 5.3882212e+00 5.5946413e+00 3.6180819e+00 4.3839191e+00 3.5469476e+00 5.3734444e+00 3.5262672e+00 4.3306501e+00 4.6237863e+00 3.4237160e+00 3.5051302e+00 4.2288456e+00 4.4201622e+00 4.7609637e+00 5.1280035e+00 4.2469785e+00 3.6684143e+00 4.1480002e+00 4.8602572e+00 4.2765700e+00 4.0824098e+00 3.4092877e+00 4.0737132e+00 4.2991233e+00 3.8524190e+00 3.7173526e+00 4.5590471e+00 4.4107160e+00 3.9202843e+00 3.6509512e+00 3.8388884e+00 4.0680120e+00 3.6894983e+00 4.1449626e-01 6.6539428e-01 1.0717668e+00 1.1847335e+00 7.0776547e-01 3.2816937e-01 9.2095040e-01 4.4651726e-01 6.0060595e-01 3.8934542e-01 6.1092863e-01 3.7598397e-01 3.0000000e-01 4.1312257e-01 2.4837156e-01 4.0293660e-01 4.1312257e-01 2.0656129e-01 3.0000000e-01 6.0611244e-01 7.3535471e-01 9.3801395e-01 3.0811765e-01 4.2538717e-01 7.1462831e-01 3.0811765e-01 5.2574978e-01 3.0275928e-01 3.2816937e-01 1.1107977e+00 4.5470518e-01 4.1449626e-01 4.8927739e-01 4.1449626e-01 4.4417983e-01 2.8192292e-01 5.2942799e-01 2.5251796e-01 3.4297053e+00 3.0906838e+00 3.5704156e+00 2.5301680e+00 3.2062204e+00 2.9663489e+00 3.2615889e+00 1.8330979e+00 3.2074600e+00 2.4030878e+00 2.1292724e+00 2.7344480e+00 2.5716369e+00 3.2053511e+00 2.1242643e+00 3.0798277e+00 2.9831836e+00 2.5729378e+00 3.0964590e+00 2.3917863e+00 3.3353616e+00 2.5635110e+00 3.4441347e+00 3.1882407e+00 2.8938821e+00 3.0477086e+00 3.4484194e+00 3.6265826e+00 3.0209783e+00 2.0203134e+00 2.3063579e+00 2.2046610e+00 2.4100833e+00 3.5972040e+00 2.9748436e+00 3.0349291e+00 3.3414931e+00 2.9918962e+00 2.5764694e+00 2.5038051e+00 2.8573838e+00 3.1113597e+00 2.5079404e+00 1.8623849e+00 2.6799601e+00 2.6656374e+00 2.6804452e+00 2.8458006e+00 1.5870088e+00 2.5906376e+00 4.6056614e+00 3.6293396e+00 4.5625120e+00 4.1184849e+00 4.3862724e+00 5.2957861e+00 3.0253131e+00 4.9300368e+00 4.3656957e+00 4.8256905e+00 3.7228356e+00 3.8717400e+00 4.1487889e+00 3.5605424e+00 3.7509165e+00 3.9489970e+00 4.0502806e+00 5.4193574e+00 5.6096505e+00 3.5201263e+00 4.3797165e+00 3.4555095e+00 5.4004015e+00 3.4805320e+00 4.3069452e+00 4.6373516e+00 3.3738930e+00 3.4478147e+00 4.1762321e+00 4.4428877e+00 4.7870294e+00 5.1982218e+00 4.1948678e+00 3.6180819e+00 4.0668114e+00 4.9227056e+00 4.2245318e+00 4.0358897e+00 3.3459883e+00 4.0835979e+00 4.2783731e+00 3.8797354e+00 3.6293396e+00 4.5370189e+00 4.3879553e+00 3.9155334e+00 3.5955337e+00 3.8113970e+00 4.0131848e+00 3.6132595e+00 5.2862779e-01 1.2431040e+00 1.5013525e+00 9.7779835e-01 5.3588338e-01 1.0669582e+00 8.1385214e-01 6.6432544e-01 7.2823007e-01 6.5223271e-01 5.1138698e-01 5.6700421e-01 2.5251796e-01 4.6472023e-01 5.6769031e-01 4.9766035e-01 2.5651975e-01 2.1269358e-01 6.6432544e-01 1.1134787e+00 1.2632199e+00 1.4096146e-01 2.8507955e-01 7.6787403e-01 1.4096146e-01 4.0293660e-01 4.4651726e-01 5.1691876e-01 7.1840099e-01 4.1586001e-01 6.3108414e-01 8.7021234e-01 2.0000000e-01 8.1385214e-01 2.5251796e-01 7.6787403e-01 3.2586371e-01 3.6025735e+00 3.2810515e+00 3.7511944e+00 2.6894009e+00 3.3904673e+00 3.1636869e+00 3.4574937e+00 1.9666356e+00 3.3893691e+00 2.5954173e+00 2.1997395e+00 2.9322283e+00 2.7092568e+00 3.4012145e+00 2.3186758e+00 3.2568914e+00 3.1861493e+00 2.7595194e+00 3.2561045e+00 2.5646808e+00 3.5381764e+00 2.7476411e+00 3.6278993e+00 3.3809159e+00 3.0768226e+00 3.2277675e+00 3.6265617e+00 3.8150532e+00 3.2176230e+00 2.1864840e+00 2.4668912e+00 2.3596992e+00 2.5949561e+00 3.7935487e+00 3.1789378e+00 3.2360886e+00 3.5252258e+00 3.1522058e+00 2.7777040e+00 2.6819136e+00 3.0473722e+00 3.3079290e+00 2.6886547e+00 1.9757309e+00 2.8726212e+00 2.8654680e+00 2.8788483e+00 3.0349462e+00 1.7160413e+00 2.7852734e+00 4.8087107e+00 3.8282466e+00 4.7531334e+00 4.3176393e+00 4.5857287e+00 5.4831923e+00 3.2147850e+00 5.1185883e+00 4.5544260e+00 5.0194259e+00 3.9185849e+00 4.0655452e+00 4.3416283e+00 3.7535680e+00 3.9509795e+00 4.1478442e+00 4.2472736e+00 5.6096505e+00 5.7957776e+00 3.6945993e+00 4.5734622e+00 3.6568202e+00 5.5854254e+00 3.6720840e+00 4.5038991e+00 4.8262859e+00 3.5684917e+00 3.6474985e+00 4.3740189e+00 4.6282931e+00 4.9713928e+00 5.3806679e+00 4.3928114e+00 3.8121990e+00 4.2612863e+00 5.1032991e+00 4.4267055e+00 4.2347444e+00 3.5464871e+00 4.2738510e+00 4.4745238e+00 4.0663411e+00 3.8282466e+00 4.7338066e+00 4.5852690e+00 4.1075310e+00 3.7823897e+00 4.0070636e+00 4.2156933e+00 3.8157950e+00 1.6177449e+00 1.7454671e+00 1.2604558e+00 8.6361309e-01 1.4955532e+00 1.0118409e+00 1.1594648e+00 9.6204649e-01 6.2081167e-01 9.1750357e-01 8.7504951e-01 7.6752131e-01 8.0660588e-01 9.5965467e-01 9.2859317e-01 5.7324170e-01 6.2205176e-01 1.1313840e+00 1.2653669e+00 1.4930627e+00 6.4755655e-01 7.0479928e-01 1.2236003e+00 6.4755655e-01 2.1269358e-01 8.5105559e-01 7.7360126e-01 7.1169738e-01 2.5651975e-01 8.7229670e-01 1.1327578e+00 5.3588338e-01 1.0269295e+00 3.8934542e-01 1.1042097e+00 7.2823007e-01 4.0317004e+00 3.6659830e+00 4.1618561e+00 3.0123702e+00 3.7804276e+00 3.4970843e+00 3.8244351e+00 2.2591077e+00 3.7930789e+00 2.8953397e+00 2.4889124e+00 3.2809188e+00 3.0866488e+00 3.7578933e+00 2.6595288e+00 3.6754272e+00 3.5073435e+00 3.1173742e+00 3.6212723e+00 2.9065572e+00 3.8667462e+00 3.1302383e+00 3.9918403e+00 3.7416229e+00 3.4760444e+00 3.6375992e+00 4.0358101e+00 4.2016915e+00 3.5683934e+00 2.5569968e+00 2.8007817e+00 2.6989368e+00 2.9539253e+00 4.1306024e+00 3.4882801e+00 3.5831257e+00 3.9280671e+00 3.5362697e+00 3.1099883e+00 3.0065416e+00 3.3706887e+00 3.6672620e+00 3.0442126e+00 2.2719663e+00 3.2032390e+00 3.2071637e+00 3.2176230e+00 3.4155491e+00 2.0139971e+00 3.1260028e+00 5.1310217e+00 4.1456639e+00 5.1323742e+00 4.6609614e+00 4.9284761e+00 5.8739676e+00 3.4984873e+00 5.5048216e+00 4.9175276e+00 5.3898806e+00 4.2781762e+00 4.4176533e+00 4.7107211e+00 4.0621414e+00 4.2494597e+00 4.4865569e+00 4.6045396e+00 6.0012333e+00 6.1816086e+00 4.0339598e+00 4.9390284e+00 3.9601358e+00 5.9803696e+00 4.0277694e+00 4.8626276e+00 5.2147981e+00 3.9185849e+00 3.9887029e+00 4.7161140e+00 5.0257341e+00 5.3673499e+00 5.7939320e+00 4.7320534e+00 4.1713411e+00 4.5995433e+00 5.5085511e+00 4.7536729e+00 4.5857356e+00 3.8819510e+00 4.6519178e+00 4.8256399e+00 4.4430890e+00 4.1456639e+00 5.0898961e+00 4.9316646e+00 4.4680158e+00 4.1325542e+00 4.3648035e+00 4.5412859e+00 4.1418557e+00 4.5581864e-01 4.1586001e-01 7.7074935e-01 5.0991930e-01 7.1840099e-01 7.2486328e-01 7.3145860e-01 1.2122249e+00 9.2112464e-01 1.1384810e+00 1.1451403e+00 9.1163729e-01 7.0386584e-01 7.4855857e-01 1.2220203e+00 1.1947245e+00 6.6827038e-01 6.2081167e-01 3.4378533e-01 1.1229906e+00 9.9348625e-01 5.2942799e-01 1.1229906e+00 1.5344133e+00 8.2275389e-01 8.5233811e-01 1.8985661e+00 1.4692412e+00 8.9653332e-01 8.7420176e-01 1.2431040e+00 7.3813096e-01 1.2951131e+00 5.5419992e-01 9.3801395e-01 3.5789198e+00 3.3663244e+00 3.7753619e+00 3.0049442e+00 3.4909841e+00 3.3695525e+00 3.5654259e+00 2.3989172e+00 3.4663502e+00 2.8427326e+00 2.7185849e+00 3.0894572e+00 3.0108764e+00 3.5617386e+00 2.5173832e+00 3.2758681e+00 3.3732554e+00 2.9816791e+00 3.4895316e+00 2.8451507e+00 3.6905956e+00 2.8989400e+00 3.8036776e+00 3.5549103e+00 3.1734856e+00 3.2772927e+00 3.6834126e+00 3.8868430e+00 3.3809159e+00 2.4588872e+00 2.7850016e+00 2.6918796e+00 2.8113773e+00 3.9804187e+00 3.3742776e+00 3.3692592e+00 3.5726491e+00 3.3587234e+00 2.9691171e+00 2.9539253e+00 3.2926883e+00 3.4584304e+00 2.9217347e+00 2.4321061e+00 3.0988783e+00 3.0546600e+00 3.0736357e+00 3.1699903e+00 2.1306832e+00 2.9913743e+00 4.9420128e+00 4.0177712e+00 4.8123874e+00 4.4703485e+00 4.7113827e+00 5.5147622e+00 3.4715574e+00 5.1811127e+00 4.6944291e+00 5.0587041e+00 4.0117533e+00 4.2085851e+00 4.4199792e+00 3.9616570e+00 4.1103933e+00 4.2537610e+00 4.3721243e+00 5.6218420e+00 5.8419148e+00 3.9412893e+00 4.6390186e+00 3.8408636e+00 5.6159291e+00 3.8175051e+00 4.5984929e+00 4.8771654e+00 3.7143727e+00 3.7943375e+00 4.5141564e+00 4.6734732e+00 5.0086627e+00 5.3396700e+00 4.5298770e+00 3.9647930e+00 4.4561969e+00 5.0778756e+00 4.5477422e+00 4.3671210e+00 3.6996802e+00 4.3269400e+00 4.5602465e+00 4.0901232e+00 4.0177712e+00 4.8233796e+00 4.6689006e+00 4.1757336e+00 3.9466531e+00 4.1130674e+00 4.3408596e+00 3.9840684e+00 5.3588338e-01 9.7098574e-01 6.0611244e-01 7.4549115e-01 1.0101422e+00 8.1242502e-01 1.2342162e+00 1.1486378e+00 1.1959482e+00 1.4468211e+00 1.0906388e+00 9.4287188e-01 1.0346741e+00 1.3793330e+00 1.4148192e+00 1.0065841e+00 5.5419992e-01 2.8507955e-01 1.3835747e+00 1.2681309e+00 9.0679720e-01 1.3835747e+00 1.6801917e+00 1.0588560e+00 1.0122141e+00 2.2040881e+00 1.5564198e+00 1.0122141e+00 7.7553525e-01 1.4987155e+00 7.4893123e-01 1.4320120e+00 7.3813096e-01 1.1765359e+00 3.3186105e+00 3.0934278e+00 3.5115632e+00 2.9015832e+00 3.2557855e+00 3.1381850e+00 3.2787144e+00 2.3983798e+00 3.2261964e+00 2.6655261e+00 2.7738368e+00 2.8425716e+00 2.9377092e+00 3.3097860e+00 2.3365894e+00 3.0236933e+00 3.1147370e+00 2.7988444e+00 3.3431646e+00 2.7201960e+00 3.4033622e+00 2.7009102e+00 3.5863979e+00 3.3171611e+00 2.9439491e+00 3.0327979e+00 3.4475678e+00 3.6195561e+00 3.1337459e+00 2.3758157e+00 2.6957302e+00 2.6219409e+00 2.6429556e+00 3.7305206e+00 3.1152653e+00 3.0762634e+00 3.3088561e+00 3.2115055e+00 2.7318540e+00 2.8101506e+00 3.0967820e+00 3.1998457e+00 2.7619926e+00 2.4596921e+00 2.9002737e+00 2.8148869e+00 2.8449691e+00 2.9378173e+00 2.1723936e+00 2.7843048e+00 4.6358686e+00 3.7621042e+00 4.5316360e+00 4.1928583e+00 4.4221843e+00 5.2364242e+00 3.2682245e+00 4.9072991e+00 4.4428425e+00 4.7561724e+00 3.7219581e+00 3.9498605e+00 4.1390009e+00 3.7275066e+00 3.8377652e+00 3.9569614e+00 4.0917968e+00 5.3289557e+00 5.5738695e+00 3.7540308e+00 4.3457024e+00 3.5797958e+00 5.3465693e+00 3.5720882e+00 4.3012547e+00 4.5936468e+00 3.4616111e+00 3.5205889e+00 4.2389017e+00 4.4031390e+00 4.7432976e+00 5.0569442e+00 4.2531342e+00 3.7092459e+00 4.2038056e+00 4.8064634e+00 4.2402361e+00 4.0806404e+00 3.4276314e+00 4.0438546e+00 4.2676463e+00 3.8085992e+00 3.7621042e+00 4.5272919e+00 4.3667579e+00 3.8946701e+00 3.7163265e+00 3.8338395e+00 4.0342445e+00 3.7061759e+00 4.4651726e-01 4.4651726e-01 3.2816937e-01 5.7257017e-01 3.4378533e-01 8.2384013e-01 6.6432544e-01 8.0758367e-01 9.3048953e-01 5.8851328e-01 4.3691963e-01 5.1691876e-01 8.8062848e-01 8.9917007e-01 5.0817745e-01 3.6171588e-01 3.2816937e-01 8.6361309e-01 7.3851529e-01 4.1449626e-01 8.6361309e-01 1.1845977e+00 5.4292906e-01 4.9766035e-01 1.6754036e+00 1.0919712e+00 5.3309112e-01 6.2024833e-01 9.7098574e-01 3.8934542e-01 9.3824087e-01 2.8507955e-01 6.5223271e-01 3.5185448e+00 3.2633258e+00 3.6996953e+00 2.8710255e+00 3.3892942e+00 3.2497279e+00 3.4561374e+00 2.2371784e+00 3.3772302e+00 2.7023432e+00 2.5704711e+00 2.9638836e+00 2.8904978e+00 3.4483274e+00 2.3826791e+00 3.1954061e+00 3.2493673e+00 2.8645447e+00 3.3669805e+00 2.7184506e+00 3.5654259e+00 2.7812639e+00 3.6908684e+00 3.4451701e+00 3.0736340e+00 3.1881331e+00 3.6017790e+00 3.7914024e+00 3.2604423e+00 2.3308454e+00 2.6548674e+00 2.5630676e+00 2.6859989e+00 3.8615219e+00 3.2492317e+00 3.2498302e+00 3.4856775e+00 3.2460061e+00 2.8456767e+00 2.8220742e+00 3.1709561e+00 3.3452644e+00 2.7965957e+00 2.2780262e+00 2.9733693e+00 2.9362769e+00 2.9511072e+00 3.0600825e+00 1.9688013e+00 2.8661222e+00 4.8176767e+00 3.8889176e+00 4.7230291e+00 4.3578295e+00 4.5962942e+00 5.4442203e+00 3.3242177e+00 5.1039076e+00 4.5914416e+00 4.9653393e+00 3.8994399e+00 4.0931001e+00 4.3174903e+00 3.8262108e+00 3.9673816e+00 4.1302370e+00 4.2654212e+00 5.5551457e+00 5.7673205e+00 3.8189943e+00 4.5366342e+00 3.7059360e+00 5.5500825e+00 3.6985459e+00 4.4934936e+00 4.7995387e+00 3.5922369e+00 3.6724425e+00 4.3963259e+00 4.6010378e+00 4.9364818e+00 5.2936070e+00 4.4094664e+00 3.8558772e+00 4.3453589e+00 5.0159331e+00 4.4225169e+00 4.2579435e+00 3.5745624e+00 4.2301614e+00 4.4459076e+00 3.9875954e+00 3.8889176e+00 4.7169919e+00 4.5531173e+00 4.0619315e+00 3.8238093e+00 3.9999729e+00 4.2141826e+00 3.8611742e+00 6.3808075e-01 3.0275928e-01 3.7598397e-01 2.1269358e-01 5.6769031e-01 3.4378533e-01 5.3022554e-01 5.0991930e-01 2.1845981e-01 1.4096146e-01 1.4096146e-01 4.5581864e-01 4.5581864e-01 3.0811765e-01 6.0670504e-01 7.3496673e-01 4.2667565e-01 3.2816937e-01 4.0293660e-01 4.2667565e-01 7.6787403e-01 1.4096146e-01 1.2418578e-01 1.2394907e+00 7.1504098e-01 3.2586371e-01 5.2942799e-01 5.2862779e-01 3.2586371e-01 5.2942799e-01 2.5651975e-01 2.1269358e-01 3.4944845e+00 3.2032390e+00 3.6588207e+00 2.7040077e+00 3.3172489e+00 3.1387381e+00 3.3902207e+00 2.0195610e+00 3.3129652e+00 2.5744164e+00 2.3173648e+00 2.8753045e+00 2.7215057e+00 3.3565935e+00 2.2694598e+00 3.1556501e+00 3.1511848e+00 2.7390328e+00 3.2351115e+00 2.5646808e+00 3.4858646e+00 2.6854398e+00 3.5885398e+00 3.3452644e+00 3.0014619e+00 3.1359624e+00 3.5442447e+00 3.7351231e+00 3.1683717e+00 2.1722580e+00 2.4832809e+00 2.3831271e+00 2.5617005e+00 3.7607269e+00 3.1489919e+00 3.1764760e+00 3.4370400e+00 3.1222134e+00 2.7420671e+00 2.6759392e+00 3.0406669e+00 3.2584404e+00 2.6649106e+00 2.0477765e+00 2.8508344e+00 2.8325946e+00 2.8443188e+00 2.9745020e+00 1.7495699e+00 2.7521074e+00 4.7498176e+00 3.7908064e+00 4.6736601e+00 4.2736523e+00 4.5261084e+00 5.4025762e+00 3.1986968e+00 5.0495127e+00 4.5070435e+00 4.9284820e+00 3.8418637e+00 4.0108142e+00 4.2628455e+00 3.7198158e+00 3.8891307e+00 4.0719001e+00 4.1917075e+00 5.5215376e+00 5.7192826e+00 3.6876129e+00 4.4897240e+00 3.6129201e+00 5.5066558e+00 3.6138577e+00 4.4349731e+00 4.7514299e+00 3.5090368e+00 3.5920050e+00 4.3185209e+00 4.5521574e+00 4.8905855e+00 5.2768100e+00 4.3339547e+00 3.7672401e+00 4.2403934e+00 4.9963306e+00 4.3598652e+00 4.1826701e+00 3.4920978e+00 4.1853524e+00 4.3933832e+00 3.9574195e+00 3.7908064e+00 4.6611791e+00 4.5034762e+00 4.0149574e+00 3.7307866e+00 3.9355645e+00 4.1498459e+00 3.7732223e+00 6.0551856e-01 4.4535192e-01 6.0670504e-01 1.1765359e+00 6.9325418e-01 9.2288144e-01 9.3637892e-01 7.3535471e-01 5.3665999e-01 5.8914551e-01 1.0576043e+00 1.0106392e+00 4.5581864e-01 5.4292906e-01 4.5581864e-01 9.4009473e-01 8.6290690e-01 4.5581864e-01 9.4009473e-01 1.3885563e+00 6.5223271e-01 7.4740267e-01 1.7041201e+00 1.3421549e+00 7.2823007e-01 6.0611244e-01 1.0653845e+00 6.0121055e-01 1.1521791e+00 4.1586001e-01 7.7919451e-01 3.1037808e+00 2.8727295e+00 3.2914954e+00 2.5112138e+00 2.9950832e+00 2.8632951e+00 3.0711321e+00 1.9332545e+00 2.9709745e+00 2.3448578e+00 2.2688022e+00 2.5914913e+00 2.5183808e+00 3.0579528e+00 2.0217308e+00 2.7906520e+00 2.8719896e+00 2.4738237e+00 2.9926636e+00 2.3440712e+00 3.1967616e+00 2.3980102e+00 3.3005331e+00 3.0483897e+00 2.6752185e+00 2.7868575e+00 3.1931777e+00 3.3968373e+00 2.8794765e+00 1.9640287e+00 2.2899742e+00 2.1990648e+00 2.3082381e+00 3.4762640e+00 2.8726212e+00 2.8752807e+00 3.0841055e+00 2.8599559e+00 2.4658566e+00 2.4543822e+00 2.7852734e+00 2.9558536e+00 2.4184985e+00 1.9730073e+00 2.5941661e+00 2.5490308e+00 2.5692104e+00 2.6676432e+00 1.6855044e+00 2.4875166e+00 4.4549901e+00 3.5192877e+00 4.3283747e+00 3.9701062e+00 4.2192020e+00 5.0364062e+00 2.9740218e+00 4.6944291e+00 4.1953299e+00 4.5849301e+00 3.5249823e+00 3.7114178e+00 3.9340128e+00 3.4663826e+00 3.6308220e+00 3.7719045e+00 3.8752244e+00 5.1489675e+00 5.3620178e+00 3.4363773e+00 4.1584261e+00 3.3485848e+00 5.1375979e+00 3.3209346e+00 4.1101337e+00 4.3926615e+00 3.2186045e+00 3.2982828e+00 4.0192434e+00 4.1885158e+00 4.5274663e+00 4.8785522e+00 4.0373008e+00 3.4619693e+00 3.9494193e+00 4.6147130e+00 4.0642632e+00 3.8698115e+00 3.2042572e+00 3.8459316e+00 4.0795080e+00 3.6227082e+00 3.5192877e+00 4.3377723e+00 4.1907472e+00 3.6992844e+00 3.4507210e+00 3.6230931e+00 3.8568809e+00 3.4855556e+00 4.5581864e-01 1.2418578e-01 6.2660376e-01 5.1607523e-01 5.2655962e-01 8.0096515e-01 4.0438741e-01 3.0546431e-01 4.0438741e-01 6.4806901e-01 7.1504098e-01 4.4535192e-01 3.2586371e-01 4.9857388e-01 7.0784540e-01 6.2081167e-01 4.5581864e-01 7.0784540e-01 9.3824087e-01 4.0147421e-01 3.2586371e-01 1.5252485e+00 8.1558458e-01 3.7598397e-01 4.0147421e-01 8.1099042e-01 1.2418578e-01 6.9006418e-01 2.1269358e-01 5.0270183e-01 3.4104878e+00 3.1150013e+00 3.5735680e+00 2.6813198e+00 3.2413086e+00 3.0598576e+00 3.2985843e+00 2.0418831e+00 3.2329790e+00 2.5171713e+00 2.3816204e+00 2.7937685e+00 2.7102198e+00 3.2724336e+00 2.2054062e+00 3.0737397e+00 3.0650685e+00 2.6738621e+00 3.1983741e+00 2.5253420e+00 3.3951420e+00 2.6185785e+00 3.5201263e+00 3.2642011e+00 2.9245132e+00 3.0559405e+00 3.4672191e+00 3.6501426e+00 3.0869409e+00 2.1449779e+00 2.4611582e+00 2.3678836e+00 2.5038051e+00 3.6798850e+00 3.0627375e+00 3.0833417e+00 3.3518108e+00 3.0810282e+00 2.6595270e+00 2.6323570e+00 2.9747184e+00 3.1719581e+00 2.6119950e+00 2.0875308e+00 2.7837517e+00 2.7481947e+00 2.7653278e+00 2.8959432e+00 1.7918633e+00 2.6810089e+00 4.6579399e+00 3.7114178e+00 4.5860934e+00 4.1845117e+00 4.4368376e+00 5.3138890e+00 3.1392617e+00 4.9608959e+00 4.4280037e+00 4.8387532e+00 3.7530908e+00 3.9304148e+00 4.1764880e+00 3.6510310e+00 3.8111653e+00 3.9838913e+00 4.1019789e+00 5.4302306e+00 5.6352651e+00 3.6334268e+00 4.4011799e+00 3.5332521e+00 5.4201202e+00 3.5378545e+00 4.3430510e+00 4.6603717e+00 3.4302376e+00 3.5053533e+00 4.2335883e+00 4.4640423e+00 4.8059592e+00 5.1881280e+00 4.2497073e+00 3.6834126e+00 4.1572779e+00 4.9128775e+00 4.2687104e+00 4.0908836e+00 3.4061169e+00 4.0989195e+00 4.3064904e+00 3.8759115e+00 3.7114178e+00 4.5707958e+00 4.4147019e+00 3.9326468e+00 3.6624594e+00 3.8494244e+00 4.0586633e+00 3.6843892e+00 4.0176783e-01 9.3801395e-01 3.7427929e-01 6.0551856e-01 4.9766035e-01 4.1449626e-01 2.5251796e-01 3.2352160e-01 7.0437330e-01 6.2024833e-01 2.4837156e-01 7.0826681e-01 8.1099042e-01 5.3665999e-01 5.7257017e-01 4.0293660e-01 5.3665999e-01 1.0321505e+00 3.2352160e-01 4.9857388e-01 1.2654843e+00 1.0181000e+00 4.9857388e-01 4.6472023e-01 6.6432544e-01 4.4535192e-01 8.1354181e-01 3.2586371e-01 4.4535192e-01 3.1652953e+00 2.9034751e+00 3.3383293e+00 2.4277398e+00 3.0113552e+00 2.8500355e+00 3.0981427e+00 1.7594421e+00 2.9944056e+00 2.3105335e+00 2.0559262e+00 2.5987706e+00 2.4171653e+00 3.0605340e+00 2.0054351e+00 2.8372675e+00 2.8748086e+00 2.4385827e+00 2.9411544e+00 2.2761106e+00 3.2149087e+00 2.3896630e+00 3.2878368e+00 3.0415738e+00 2.6906230e+00 2.8216976e+00 3.2222602e+00 3.4304873e+00 2.8822802e+00 1.8816502e+00 2.1994544e+00 2.0961718e+00 2.2729984e+00 3.4713427e+00 2.8746311e+00 2.8977380e+00 3.1239380e+00 2.8149627e+00 2.4626518e+00 2.3988202e+00 2.7512943e+00 2.9634715e+00 2.3744738e+00 1.7861398e+00 2.5679553e+00 2.5438761e+00 2.5602722e+00 2.6724144e+00 1.5132025e+00 2.4693940e+00 4.4818617e+00 3.5188715e+00 4.3693901e+00 3.9814082e+00 4.2430316e+00 5.0846750e+00 2.9378173e+00 4.7308926e+00 4.2026915e+00 4.6369546e+00 3.5592955e+00 3.7219741e+00 3.9702025e+00 3.4565374e+00 3.6485303e+00 3.8059948e+00 3.8952692e+00 5.2045888e+00 5.4038637e+00 3.3942456e+00 4.2018404e+00 3.3550631e+00 5.1838582e+00 3.3280757e+00 4.1439346e+00 4.4349731e+00 3.2284912e+00 3.3133518e+00 4.0354963e+00 4.2293106e+00 4.5707958e+00 4.9486891e+00 4.0555804e+00 3.4667821e+00 3.9402495e+00 4.6817169e+00 4.0952984e+00 3.8891597e+00 3.2181054e+00 3.8906833e+00 4.1179678e+00 3.6792093e+00 3.5188715e+00 4.3738746e+00 4.2318888e+00 3.7415007e+00 3.4482727e+00 3.6509580e+00 3.8867812e+00 3.4956677e+00 6.2660376e-01 4.1449626e-01 4.8927739e-01 7.0479928e-01 3.0546431e-01 2.5251796e-01 3.2816937e-01 5.7324170e-01 6.2538346e-01 3.7255734e-01 4.4535192e-01 5.7324170e-01 6.2482915e-01 5.3665999e-01 4.3691963e-01 6.2482915e-01 8.7420176e-01 3.2352160e-01 2.5651975e-01 1.4293465e+00 7.7360126e-01 2.5651975e-01 4.0147421e-01 7.1504098e-01 2.1269358e-01 6.2660376e-01 2.4837156e-01 4.1586001e-01 3.4011512e+00 3.1015495e+00 3.5629124e+00 2.6446848e+00 3.2242409e+00 3.0444970e+00 3.2854349e+00 1.9922212e+00 3.2208624e+00 2.4875432e+00 2.3235341e+00 2.7738624e+00 2.6761538e+00 3.2590031e+00 2.1770137e+00 3.0609726e+00 3.0488669e+00 2.6564689e+00 3.1671679e+00 2.4967542e+00 3.3778209e+00 2.5968629e+00 3.5012162e+00 3.2523394e+00 2.9093820e+00 3.0417674e+00 3.4541339e+00 3.6357801e+00 3.0695657e+00 2.1117686e+00 2.4265922e+00 2.3324013e+00 2.4794505e+00 3.6641654e+00 3.0465086e+00 3.0686943e+00 3.3395333e+00 3.0541897e+00 2.6428278e+00 2.6018930e+00 2.9558536e+00 3.1589081e+00 2.5867906e+00 2.0334278e+00 2.7624503e+00 2.7347909e+00 2.7481947e+00 2.8804789e+00 1.7294430e+00 2.6604040e+00 4.6390099e+00 3.6904099e+00 4.5721680e+00 4.1717456e+00 4.4201622e+00 5.3038306e+00 3.1101376e+00 4.9521292e+00 4.4131751e+00 4.8218478e+00 3.7351231e+00 3.9119157e+00 4.1593600e+00 3.6239123e+00 3.7806200e+00 3.9616940e+00 4.0893976e+00 5.4208566e+00 5.6225133e+00 3.6099406e+00 4.3833809e+00 3.5087649e+00 5.4106224e+00 3.5167400e+00 4.3287713e+00 4.6517741e+00 3.4091049e+00 3.4875352e+00 4.2154392e+00 4.4559638e+00 4.7948032e+00 5.1800729e+00 4.2298456e+00 3.6705550e+00 4.1464752e+00 4.8981257e+00 4.2482702e+00 4.0788778e+00 3.3871264e+00 4.0817153e+00 4.2852718e+00 3.8517038e+00 3.6904099e+00 4.5544260e+00 4.3933832e+00 3.9084933e+00 3.6377874e+00 3.8310704e+00 4.0381483e+00 3.6684338e+00 7.9016429e-01 9.0454394e-01 7.7553525e-01 6.5633874e-01 6.8961791e-01 6.5172743e-01 6.4755655e-01 6.9325418e-01 8.5690100e-01 7.5871717e-01 9.8800009e-01 6.3977563e-01 5.0503591e-01 9.0852141e-01 6.3977563e-01 6.2482915e-01 6.2605182e-01 4.4651726e-01 1.3039319e+00 4.5470518e-01 6.8801986e-01 9.4492923e-01 6.5223271e-01 6.9325418e-01 4.9674312e-01 7.6752131e-01 5.2574978e-01 3.9950977e+00 3.6682165e+00 4.1454421e+00 3.1171350e+00 3.7869887e+00 3.5623665e+00 3.8418637e+00 2.4093459e+00 3.7913585e+00 2.9787373e+00 2.6930133e+00 3.3123070e+00 3.1668302e+00 3.7985007e+00 2.6980573e+00 3.6472316e+00 3.5682291e+00 3.1756360e+00 3.6828708e+00 2.9891564e+00 3.9102500e+00 3.1455588e+00 4.0375495e+00 3.7881557e+00 3.4756264e+00 3.6204175e+00 4.0288578e+00 4.2043522e+00 3.6069560e+00 2.6127852e+00 2.9004348e+00 2.8010550e+00 3.0013391e+00 4.1901031e+00 3.5584876e+00 3.6126340e+00 3.9170234e+00 3.5823448e+00 3.1646752e+00 3.0923554e+00 3.4563987e+00 3.7021702e+00 3.1018442e+00 2.4341346e+00 3.2724336e+00 3.2604423e+00 3.2715632e+00 3.4335342e+00 2.1375243e+00 3.1807243e+00 5.1732049e+00 4.2085267e+00 5.1402548e+00 4.7090394e+00 4.9640507e+00 5.8783901e+00 3.5970425e+00 5.5195747e+00 4.9586651e+00 5.3905797e+00 4.2919639e+00 4.4545908e+00 4.7214902e+00 4.1323720e+00 4.2962344e+00 4.5078390e+00 4.6379987e+00 5.9985593e+00 6.1927610e+00 4.1173292e+00 4.9467209e+00 4.0217355e+00 5.9855018e+00 4.0597680e+00 4.8845913e+00 5.2228901e+00 3.9505682e+00 4.0262006e+00 4.7557226e+00 5.0295898e+00 5.3695643e+00 5.7704875e+00 4.7697480e+00 4.2124808e+00 4.6690776e+00 5.4857949e+00 4.7867344e+00 4.6237863e+00 3.9220577e+00 4.6512574e+00 4.8397561e+00 4.4244727e+00 4.2085267e+00 5.1102370e+00 4.9464234e+00 4.4688286e+00 4.1733964e+00 4.3844239e+00 4.5752087e+00 4.1957914e+00 3.8934542e-01 3.7598397e-01 1.5422108e-01 3.4583729e-01 3.7598397e-01 4.4651726e-01 3.8934542e-01 3.2816937e-01 8.2929029e-01 9.3610001e-01 4.3691963e-01 5.3022554e-01 5.3309112e-01 4.3691963e-01 7.5976039e-01 3.2586371e-01 4.2667565e-01 1.0733200e+00 7.4777660e-01 2.1845981e-01 5.0905001e-01 4.3456114e-01 5.2942799e-01 5.5492130e-01 4.6472023e-01 3.7427929e-01 3.2191540e+00 2.9033203e+00 3.3725057e+00 2.3744738e+00 3.0162346e+00 2.8254861e+00 3.0850817e+00 1.6867642e+00 3.0206125e+00 2.2489449e+00 1.9859316e+00 2.5612285e+00 2.4037412e+00 3.0483897e+00 1.9482601e+00 2.8711841e+00 2.8361445e+00 2.4280197e+00 2.9151666e+00 2.2428341e+00 3.1709561e+00 2.3770599e+00 3.2772927e+00 3.0392407e+00 2.7044574e+00 2.8456337e+00 3.2543813e+00 3.4363773e+00 2.8560815e+00 1.8517858e+00 2.1570512e+00 2.0577182e+00 2.2449618e+00 3.4472201e+00 2.8333788e+00 2.8654874e+00 3.1455372e+00 2.8102985e+00 2.4278629e+00 2.3504126e+00 2.7240842e+00 2.9511072e+00 2.3468577e+00 1.7140774e+00 2.5325623e+00 2.5220817e+00 2.5303132e+00 2.6704164e+00 1.4096199e+00 2.4355523e+00 4.4339908e+00 3.4713427e+00 4.3742064e+00 3.9639546e+00 4.2144772e+00 5.1104621e+00 2.8721481e+00 4.7555981e+00 4.1997133e+00 4.6264002e+00 3.5337158e+00 3.6986235e+00 3.9580816e+00 3.3951420e+00 3.5651172e+00 3.7578933e+00 3.8852294e+00 5.2312618e+00 5.4232729e+00 3.3678461e+00 4.1846468e+00 3.2909043e+00 5.2164277e+00 3.3005331e+00 4.1286019e+00 4.4583488e+00 3.1948184e+00 3.2785487e+00 4.0052019e+00 4.2627019e+00 4.5989546e+00 4.9978258e+00 4.0193656e+00 3.4602150e+00 3.9315374e+00 4.7096962e+00 4.0441539e+00 3.8751788e+00 3.1769821e+00 3.8841455e+00 4.0829381e+00 3.6559398e+00 3.4713427e+00 4.3535851e+00 4.1922661e+00 3.7063225e+00 3.4135466e+00 3.6262912e+00 3.8337160e+00 3.4586921e+00 4.5470518e-01 3.4378533e-01 4.9766035e-01 5.6631629e-01 3.2586371e-01 3.7255734e-01 6.5172743e-01 7.6625946e-01 9.7356960e-01 4.4651726e-01 7.0784540e-01 8.1273630e-01 4.4651726e-01 6.8757066e-01 4.4417983e-01 6.0670504e-01 1.1521791e+00 6.5172743e-01 4.5581864e-01 4.5470518e-01 5.6700421e-01 4.8036801e-01 5.1607523e-01 5.8851328e-01 5.0905001e-01 3.1972361e+00 2.8360340e+00 3.3243092e+00 2.2696891e+00 2.9531300e+00 2.6835197e+00 2.9981183e+00 1.5916843e+00 2.9546153e+00 2.1366783e+00 1.9099663e+00 2.4717637e+00 2.3219731e+00 2.9300203e+00 1.8705419e+00 2.8448793e+00 2.7044574e+00 2.2951567e+00 2.8430073e+00 2.1220080e+00 3.0654291e+00 2.3117864e+00 3.1749624e+00 2.9091307e+00 2.6433897e+00 2.8065044e+00 3.2000771e+00 3.3714688e+00 2.7511201e+00 1.7679293e+00 2.0426611e+00 1.9423536e+00 2.1457242e+00 3.3172489e+00 2.6940968e+00 2.7682296e+00 3.0935247e+00 2.7391698e+00 2.2996943e+00 2.2360451e+00 2.5729378e+00 2.8382774e+00 2.2413445e+00 1.6312555e+00 2.4031247e+00 2.3848740e+00 2.4037412e+00 2.5843380e+00 1.3803845e+00 2.3178393e+00 4.3373464e+00 3.3555449e+00 4.3029534e+00 3.8394246e+00 4.1166152e+00 5.0345534e+00 2.7564428e+00 4.6628709e+00 4.0929284e+00 4.5724955e+00 3.4657942e+00 3.6038441e+00 3.8912788e+00 3.2935105e+00 3.4985926e+00 3.6938082e+00 3.7771525e+00 5.1600819e+00 5.3477989e+00 3.2453592e+00 4.1245629e+00 3.1885527e+00 5.1389533e+00 3.2183480e+00 4.0411872e+00 4.3734530e+00 3.1116430e+00 3.1793624e+00 3.9065553e+00 4.1815080e+00 4.5288943e+00 4.9506208e+00 3.9281139e+00 3.3421539e+00 3.7789119e+00 4.6812838e+00 3.9623295e+00 3.7603559e+00 3.0782101e+00 3.8325036e+00 4.0241284e+00 3.6474081e+00 3.3555449e+00 4.2739171e+00 4.1339606e+00 3.6720159e+00 3.3349237e+00 3.5512382e+00 3.7511837e+00 3.3364095e+00 4.1312257e-01 5.0905001e-01 4.2538717e-01 3.2352160e-01 2.0656129e-01 5.0592043e-01 1.1017858e+00 1.2234738e+00 1.5422108e-01 4.1312257e-01 6.3924842e-01 1.5422108e-01 6.1968386e-01 4.0293660e-01 5.2942799e-01 7.7919451e-01 6.2482915e-01 5.6631629e-01 8.1385214e-01 2.5251796e-01 8.0032200e-01 4.2538717e-01 7.1462831e-01 3.2352160e-01 3.3601225e+00 3.0492285e+00 3.5130686e+00 2.4784234e+00 3.1574358e+00 2.9495683e+00 3.2305582e+00 1.7639552e+00 3.1543365e+00 2.3872777e+00 2.0066796e+00 2.7104259e+00 2.4866453e+00 3.1794134e+00 2.0999661e+00 3.0164205e+00 2.9733298e+00 2.5409084e+00 3.0313923e+00 2.3496997e+00 3.3211033e+00 2.5173832e+00 3.4035007e+00 3.1599783e+00 2.8424094e+00 2.9896107e+00 3.3894792e+00 3.5821206e+00 2.9960387e+00 1.9633030e+00 2.2546134e+00 2.1472921e+00 2.3729779e+00 3.5766918e+00 2.9692947e+00 3.0147040e+00 3.2887803e+00 2.9233984e+00 2.5628362e+00 2.4694536e+00 2.8371552e+00 3.0850817e+00 2.4681135e+00 1.7749726e+00 2.6585961e+00 2.6493446e+00 2.6623632e+00 2.8060305e+00 1.5124582e+00 2.5679553e+00 4.5912390e+00 3.6144502e+00 4.5212874e+00 4.0982070e+00 4.3637125e+00 5.2494547e+00 3.0086587e+00 4.8875515e+00 4.3294620e+00 4.7880147e+00 3.6912876e+00 3.8418637e+00 4.1117747e+00 3.5413188e+00 3.7389462e+00 3.9250135e+00 4.0233529e+00 5.3754694e+00 5.5627643e+00 3.4785161e+00 4.3436980e+00 3.4455964e+00 5.3512930e+00 3.4471566e+00 4.2776053e+00 4.5941143e+00 3.3449470e+00 3.4269051e+00 4.1524717e+00 4.3946634e+00 4.7366258e+00 5.1414650e+00 4.1713411e+00 3.5895201e+00 4.0467919e+00 4.8635964e+00 4.2075047e+00 4.0127353e+00 3.3273395e+00 4.0411872e+00 4.2480617e+00 3.8321687e+00 3.6144502e+00 4.5072985e+00 4.3598062e+00 3.8780834e+00 3.5586316e+00 3.7805671e+00 3.9971028e+00 3.6003219e+00 2.5651975e-01 2.8192292e-01 3.4378533e-01 3.4378533e-01 4.0147421e-01 7.1840099e-01 8.5690100e-01 3.7598397e-01 4.2538717e-01 5.3665999e-01 3.7598397e-01 6.6827038e-01 2.1269358e-01 3.0546431e-01 1.1320702e+00 6.2988288e-01 2.0656129e-01 4.4535192e-01 4.2667565e-01 4.1449626e-01 4.3691963e-01 3.8934542e-01 2.5251796e-01 3.3428183e+00 3.0232018e+00 3.4944845e+00 2.4950353e+00 3.1381402e+00 2.9363801e+00 3.2027420e+00 1.8084630e+00 3.1409294e+00 2.3642733e+00 2.1113036e+00 2.6784604e+00 2.5283251e+00 3.1625374e+00 2.0667297e+00 2.9950041e+00 2.9473422e+00 2.5410503e+00 3.0407257e+00 2.3596992e+00 3.2858223e+00 2.4986337e+00 3.3960124e+00 3.1519721e+00 2.8255193e+00 2.9686973e+00 3.3765772e+00 3.5575681e+00 2.9720515e+00 1.9732878e+00 2.2758449e+00 2.1765379e+00 2.3630256e+00 3.5606213e+00 2.9432282e+00 2.9813163e+00 3.2672636e+00 2.9349850e+00 2.5393641e+00 2.4678343e+00 2.8348218e+00 3.0655803e+00 2.4649412e+00 1.8381372e+00 2.6459992e+00 2.6324803e+00 2.6428278e+00 2.7886501e+00 1.5384446e+00 2.5500177e+00 4.5509522e+00 3.5863729e+00 4.4953238e+00 4.0776551e+00 4.3319640e+00 5.2308062e+00 2.9880146e+00 4.8736353e+00 4.3174903e+00 4.7496159e+00 3.6545040e+00 3.8173510e+00 4.0797075e+00 3.5129177e+00 3.6850739e+00 3.8789949e+00 4.0011311e+00 5.3516635e+00 5.5447338e+00 3.4854203e+00 4.3070102e+00 3.4066692e+00 5.3366535e+00 3.4209331e+00 4.2472818e+00 4.5769661e+00 3.3144678e+00 3.3951450e+00 4.1229838e+00 4.3815083e+00 4.7200939e+00 5.1200990e+00 4.1381062e+00 3.5750015e+00 4.0416512e+00 4.8355880e+00 4.1628805e+00 3.9898962e+00 3.2934163e+00 4.0073765e+00 4.2053647e+00 3.7839552e+00 3.5863729e+00 4.4735121e+00 4.3145846e+00 3.8316735e+00 3.5355330e+00 3.7466071e+00 3.9521135e+00 3.5718733e+00 1.2418578e-01 5.2942799e-01 4.9766035e-01 2.5251796e-01 6.0060595e-01 7.1462831e-01 4.4535192e-01 3.8776762e-01 3.2352160e-01 4.4535192e-01 8.5434758e-01 1.2418578e-01 2.5251796e-01 1.2643026e+00 8.1354181e-01 4.1449626e-01 4.5581864e-01 5.6769031e-01 3.0546431e-01 6.2024833e-01 2.0656129e-01 2.5251796e-01 3.3898078e+00 3.1105347e+00 3.5575681e+00 2.6249526e+00 3.2229246e+00 3.0488669e+00 3.3004172e+00 1.9465831e+00 3.2118493e+00 2.4993166e+00 2.2473120e+00 2.7928582e+00 2.6296047e+00 3.2639046e+00 2.1933937e+00 3.0559188e+00 3.0673749e+00 2.6440180e+00 3.1486786e+00 2.4776707e+00 3.4056271e+00 2.5954147e+00 3.4958702e+00 3.2483608e+00 2.9041534e+00 3.0378828e+00 3.4425295e+00 3.6411503e+00 3.0811775e+00 2.0851901e+00 2.3997585e+00 2.2980893e+00 2.4741664e+00 3.6714798e+00 3.0661139e+00 3.0923102e+00 3.3390355e+00 3.0288896e+00 2.6566259e+00 2.5949561e+00 2.9511072e+00 3.1662394e+00 2.5768305e+00 1.9764051e+00 2.7650187e+00 2.7420671e+00 2.7570191e+00 2.8802219e+00 1.6913411e+00 2.6662783e+00 4.6723657e+00 3.7109297e+00 4.5802461e+00 4.1829734e+00 4.4414351e+00 5.3024507e+00 3.1246867e+00 4.9479218e+00 4.4123750e+00 4.8421359e+00 3.7582429e+00 3.9238367e+00 4.1750713e+00 3.6455011e+00 3.8262353e+00 3.9966275e+00 4.0997246e+00 5.4219259e+00 5.6210533e+00 3.5985833e+00 4.4045665e+00 3.5402439e+00 5.4041845e+00 3.5288526e+00 4.3467079e+00 4.6509698e+00 3.4261305e+00 3.5087649e+00 4.2340479e+00 4.4487072e+00 4.7898591e+00 5.1726632e+00 4.2521321e+00 3.6728562e+00 4.1446028e+00 4.9002452e+00 4.2845042e+00 4.0915924e+00 3.4110551e+00 4.0971854e+00 4.3142588e+00 3.8789274e+00 3.7109297e+00 4.5753785e+00 4.4261431e+00 3.9377466e+00 3.6482464e+00 3.8509694e+00 4.0749843e+00 3.6894983e+00 5.1607523e-01 4.5470518e-01 2.5251796e-01 7.0086313e-01 8.1067767e-01 3.7598397e-01 2.8192292e-01 3.0546431e-01 3.7598397e-01 8.2654509e-01 1.2418578e-01 2.1845981e-01 1.1754055e+00 8.0326782e-01 4.2667565e-01 5.7324170e-01 4.9766035e-01 4.1449626e-01 6.0551856e-01 3.0546431e-01 2.0656129e-01 3.4780839e+00 3.2028668e+00 3.6478832e+00 2.7001131e+00 3.3123070e+00 3.1424188e+00 3.3940268e+00 2.0081113e+00 3.3027430e+00 2.5846998e+00 2.2899742e+00 2.8843735e+00 2.7014135e+00 3.3579678e+00 2.2804674e+00 3.1447330e+00 3.1614572e+00 2.7347909e+00 3.2266676e+00 2.5600441e+00 3.4989115e+00 2.6835197e+00 3.5848035e+00 3.3425343e+00 2.9944056e+00 3.1272113e+00 3.5315985e+00 3.7321938e+00 3.1736230e+00 2.1642821e+00 2.4763086e+00 2.3729779e+00 2.5613679e+00 3.7645411e+00 3.1602773e+00 3.1861493e+00 3.4298218e+00 3.1090174e+00 2.7503801e+00 2.6773131e+00 3.0414782e+00 3.2606191e+00 2.6626548e+00 2.0308266e+00 2.8549070e+00 2.8371552e+00 2.8500790e+00 2.9720515e+00 1.7439430e+00 2.7570191e+00 4.7646254e+00 3.8019108e+00 4.6714768e+00 4.2777154e+00 4.5341669e+00 5.3940437e+00 3.2096520e+00 5.0408889e+00 4.5037829e+00 4.9318274e+00 3.8493108e+00 4.0147814e+00 4.2656755e+00 3.7323432e+00 3.9122146e+00 4.0862794e+00 4.1939019e+00 5.5136120e+00 5.7113100e+00 3.6836404e+00 4.4947801e+00 3.6298047e+00 5.4953655e+00 3.6181784e+00 4.4396174e+00 4.7440323e+00 3.5161021e+00 3.6013159e+00 4.3259034e+00 4.5411167e+00 4.8804165e+00 5.2620015e+00 4.3431589e+00 3.7666196e+00 4.2394195e+00 4.9871255e+00 4.3755778e+00 4.1864815e+00 3.5032323e+00 4.1868319e+00 4.4036244e+00 3.9638383e+00 3.8019108e+00 4.6672392e+00 4.5155386e+00 4.0245873e+00 3.7349501e+00 3.9420153e+00 4.1660972e+00 3.7835217e+00 1.2418578e-01 7.0826681e-01 9.4125538e-01 1.1340084e+00 2.1845981e-01 4.4417983e-01 8.2105460e-01 2.1845981e-01 3.8776762e-01 4.1449626e-01 4.2418962e-01 9.1075311e-01 3.7255734e-01 4.8036801e-01 6.6827038e-01 2.5651975e-01 6.4704320e-01 2.0656129e-01 6.8961791e-01 3.2586371e-01 3.4686627e+00 3.1154621e+00 3.6024772e+00 2.5108475e+00 3.2290313e+00 2.9704704e+00 3.2809332e+00 1.7903952e+00 3.2349033e+00 2.3960250e+00 2.0600195e+00 2.7476411e+00 2.5610667e+00 3.2181149e+00 2.1323638e+00 3.1154281e+00 2.9881598e+00 2.5786309e+00 3.0947136e+00 2.3839455e+00 3.3448197e+00 2.5821869e+00 3.4532666e+00 3.1998457e+00 2.9200439e+00 3.0794219e+00 3.4771054e+00 3.6510733e+00 3.0328831e+00 2.0197525e+00 2.2893157e+00 2.1858172e+00 2.4166535e+00 3.6030381e+00 2.9770247e+00 3.0492285e+00 3.3713329e+00 2.9974031e+00 2.5833316e+00 2.4944673e+00 2.8535197e+00 3.1260028e+00 2.5104998e+00 1.8109223e+00 2.6804452e+00 2.6742360e+00 2.6875169e+00 2.8656044e+00 1.5447938e+00 2.5961850e+00 4.6147552e+00 3.6320149e+00 4.5850999e+00 4.1288948e+00 4.3989089e+00 5.3208661e+00 3.0146208e+00 4.9525575e+00 4.3778283e+00 4.8485483e+00 3.7416375e+00 3.8836706e+00 4.1692394e+00 3.5584719e+00 3.7545764e+00 3.9635178e+00 4.0653249e+00 5.4465238e+00 5.6318041e+00 3.5148434e+00 4.4004449e+00 3.4571917e+00 5.4256316e+00 3.4931266e+00 4.3243671e+00 4.6617210e+00 3.3863989e+00 3.4595139e+00 4.1872611e+00 4.4691596e+00 4.8126955e+00 5.2323771e+00 4.2057897e+00 3.6309135e+00 4.0714444e+00 4.9544547e+00 4.2355968e+00 4.0495222e+00 3.3563815e+00 4.1075398e+00 4.2957899e+00 3.9065020e+00 3.6320149e+00 4.5543025e+00 4.4046818e+00 3.9362563e+00 3.6038441e+00 3.8285790e+00 4.0238937e+00 3.6204282e+00 6.2538346e-01 1.0167353e+00 1.1763980e+00 1.4096146e-01 4.1449626e-01 7.4740267e-01 1.4096146e-01 4.4535192e-01 3.7427929e-01 4.5581864e-01 8.2135873e-01 4.4535192e-01 5.0503591e-01 7.3145860e-01 2.1269358e-01 7.1421512e-01 2.5251796e-01 6.8961791e-01 2.8192292e-01 3.4295980e+00 3.0905489e+00 3.5700123e+00 2.4944673e+00 3.2020292e+00 2.9613737e+00 3.2617087e+00 1.7750284e+00 3.2049793e+00 2.3909366e+00 2.0308266e+00 2.7326472e+00 2.5286673e+00 3.2028668e+00 2.1181082e+00 3.0792693e+00 2.9816970e+00 2.5624932e+00 3.0681391e+00 2.3677174e+00 3.3352475e+00 2.5566444e+00 3.4334259e+00 3.1839972e+00 2.8907702e+00 3.0462904e+00 3.4448498e+00 3.6256155e+00 3.0181476e+00 1.9946242e+00 2.2719663e+00 2.1665831e+00 2.3980102e+00 3.5922217e+00 2.9733478e+00 3.0355056e+00 3.3410264e+00 2.9673667e+00 2.5744164e+00 2.4820750e+00 2.8455141e+00 3.1100045e+00 2.4920874e+00 1.7903952e+00 2.6704164e+00 2.6637326e+00 2.6767607e+00 2.8425716e+00 1.5257596e+00 2.5839288e+00 4.6057175e+00 3.6244539e+00 4.5619285e+00 4.1170543e+00 4.3856360e+00 5.2953657e+00 3.0110441e+00 4.9290739e+00 4.3593510e+00 4.8266994e+00 3.7227460e+00 3.8675033e+00 4.1480696e+00 3.5505922e+00 3.7479505e+00 3.9489183e+00 4.0495222e+00 5.4213759e+00 5.6069704e+00 3.4988409e+00 4.3796539e+00 3.4519560e+00 5.3990720e+00 3.4751739e+00 4.3070102e+00 4.6372963e+00 3.3701472e+00 3.4467337e+00 4.1738909e+00 4.4422690e+00 4.7852959e+00 5.2004339e+00 4.1925494e+00 3.6148707e+00 4.0613681e+00 4.9222119e+00 4.2248103e+00 4.0355816e+00 3.3448336e+00 4.0832977e+00 4.2781022e+00 3.8793995e+00 3.6244539e+00 4.5369609e+00 4.3880177e+00 3.9147164e+00 3.5857963e+00 3.8105301e+00 4.0134966e+00 3.6122845e+00 7.1799256e-01 8.0358695e-01 5.5419992e-01 4.6472023e-01 2.5651975e-01 5.5419992e-01 1.0198386e+00 3.2352160e-01 4.1586001e-01 1.2565757e+00 1.0054037e+00 4.1586001e-01 5.2574978e-01 6.4806901e-01 4.5581864e-01 8.0619006e-01 3.2586371e-01 4.1586001e-01 3.3274681e+00 3.0643176e+00 3.5031390e+00 2.5831315e+00 3.1734856e+00 3.0256804e+00 3.2593968e+00 1.9049236e+00 3.1662394e+00 2.4587503e+00 2.1948123e+00 2.7522526e+00 2.5874301e+00 3.2341367e+00 2.1493214e+00 2.9966141e+00 3.0389019e+00 2.6214821e+00 3.0978571e+00 2.4463233e+00 3.3673968e+00 2.5500177e+00 3.4578354e+00 3.2240818e+00 2.8578051e+00 2.9828212e+00 3.3905763e+00 3.5904118e+00 3.0455175e+00 2.0467937e+00 2.3640301e+00 2.2638728e+00 2.4385827e+00 3.6424937e+00 3.0387448e+00 3.0543001e+00 3.2867025e+00 2.9810914e+00 2.6291524e+00 2.5579619e+00 2.9291061e+00 3.1351569e+00 2.5421955e+00 1.9274228e+00 2.7359835e+00 2.7196686e+00 2.7293044e+00 2.8418790e+00 1.6234861e+00 2.6349941e+00 4.6268918e+00 3.6735954e+00 4.5276539e+00 4.1519514e+00 4.3981255e+00 5.2510005e+00 3.0847983e+00 4.9049143e+00 4.3738893e+00 4.7804645e+00 3.7059360e+00 3.8806135e+00 4.1210200e+00 3.6007179e+00 3.7674167e+00 3.9401211e+00 4.0632074e+00 5.3683319e+00 5.5675337e+00 3.5650560e+00 4.3467079e+00 3.4964385e+00 5.3534247e+00 3.4817971e+00 4.3007421e+00 4.6057628e+00 3.3796707e+00 3.4684743e+00 4.1910954e+00 4.4033511e+00 4.7374076e+00 5.1107389e+00 4.2056854e+00 3.6417452e+00 4.1251907e+00 4.8290847e+00 4.2339606e+00 4.0576409e+00 3.3702841e+00 4.0376849e+00 4.2550416e+00 3.8015574e+00 3.6735954e+00 4.5249462e+00 4.3662620e+00 3.8699102e+00 3.5979726e+00 3.8007870e+00 4.0252278e+00 3.6568095e+00 3.0811765e-01 1.0065841e+00 9.1075311e-01 6.2538346e-01 1.0065841e+00 1.2125198e+00 7.0086313e-01 6.1623531e-01 1.8279039e+00 1.0613462e+00 6.9369532e-01 4.8135521e-01 1.1149070e+00 3.0811765e-01 9.7098574e-01 4.0293660e-01 8.0358695e-01 3.4154940e+00 3.1439160e+00 3.5872997e+00 2.8054691e+00 3.2839149e+00 3.1127876e+00 3.3274872e+00 2.2159139e+00 3.2598167e+00 2.6167778e+00 2.5758644e+00 2.8523754e+00 2.8256291e+00 3.3122081e+00 2.3003824e+00 3.0947136e+00 3.1160876e+00 2.7384939e+00 3.2940867e+00 2.6281170e+00 3.4401593e+00 2.6851662e+00 3.5758474e+00 3.3024121e+00 2.9636544e+00 3.0850893e+00 3.4935692e+00 3.6787177e+00 3.1382691e+00 2.2643860e+00 2.5838312e+00 2.4957147e+00 2.5876691e+00 3.7272092e+00 3.1148696e+00 3.1192689e+00 3.3731473e+00 3.1641238e+00 2.7164367e+00 2.7371177e+00 3.0433470e+00 3.2094276e+00 2.6989752e+00 2.2729593e+00 2.8576425e+00 2.7954161e+00 2.8234677e+00 2.9407805e+00 1.9980146e+00 2.7511201e+00 4.6993349e+00 3.7716613e+00 4.6090409e+00 4.2170163e+00 4.4740971e+00 5.3233695e+00 3.2312218e+00 4.9715358e+00 4.4649192e+00 4.8634712e+00 3.7907269e+00 3.9777091e+00 4.2102908e+00 3.7293867e+00 3.8887827e+00 4.0321207e+00 4.1304736e+00 5.4336694e+00 5.6535520e+00 3.7088191e+00 4.4333012e+00 3.6017014e+00 5.4287623e+00 3.5939935e+00 4.3696884e+00 4.6684884e+00 3.4865359e+00 3.5518450e+00 4.2777882e+00 4.4716883e+00 4.8204359e+00 5.1830573e+00 4.2975164e+00 3.7189281e+00 4.1915116e+00 4.9282164e+00 4.3126073e+00 4.1182993e+00 3.4568393e+00 4.1297239e+00 4.3496867e+00 3.9207320e+00 3.7716613e+00 4.6018276e+00 4.4564872e+00 3.9827168e+00 3.7295388e+00 3.8905364e+00 4.1037903e+00 3.7281480e+00 1.1474460e+00 1.0344911e+00 7.0043186e-01 1.1474460e+00 1.4311891e+00 8.2654509e-01 7.6787403e-01 1.9730918e+00 1.3073038e+00 7.9878917e-01 6.2407309e-01 1.2632199e+00 5.0503591e-01 1.1833480e+00 5.0905001e-01 9.4080461e-01 3.4392518e+00 3.2008338e+00 3.6261197e+00 2.9076510e+00 3.3439089e+00 3.2073032e+00 3.3907558e+00 2.3329978e+00 3.3168472e+00 2.7081478e+00 2.6929215e+00 2.9283888e+00 2.9264154e+00 3.3942456e+00 2.3848677e+00 3.1312883e+00 3.2027420e+00 2.8385969e+00 3.3781905e+00 2.7324845e+00 3.5133135e+00 2.7602023e+00 3.6563535e+00 3.3905763e+00 3.0256209e+00 3.1312883e+00 3.5423793e+00 3.7299679e+00 3.2179031e+00 2.3661539e+00 2.6906231e+00 2.6054995e+00 2.6801752e+00 3.8138933e+00 3.2027420e+00 3.1888037e+00 3.4187265e+00 3.2459231e+00 2.8060305e+00 2.8359967e+00 3.1453916e+00 3.2886661e+00 2.7943622e+00 2.3874574e+00 2.9533314e+00 2.8877105e+00 2.9141136e+00 3.0147886e+00 2.0992326e+00 2.8425716e+00 4.7630756e+00 3.8535531e+00 4.6550014e+00 4.2947468e+00 4.5389196e+00 5.3632740e+00 3.3234239e+00 5.0232338e+00 4.5363757e+00 4.8985515e+00 3.8441304e+00 4.0476997e+00 4.2599433e+00 3.8106152e+00 3.9516603e+00 4.0850903e+00 4.1995425e+00 5.4671477e+00 5.6957748e+00 3.8037734e+00 4.4771379e+00 3.6783395e+00 5.4690202e+00 3.6632177e+00 4.4259826e+00 4.7160634e+00 3.5560806e+00 3.6239123e+00 4.3464973e+00 4.5187406e+00 4.8619146e+00 5.2001625e+00 4.3635746e+00 3.7979761e+00 4.2843668e+00 4.9451734e+00 4.3709882e+00 4.1899199e+00 3.5299194e+00 4.1707976e+00 4.3972344e+00 3.9457376e+00 3.8535531e+00 4.6546556e+00 4.5024055e+00 4.0227020e+00 3.8004969e+00 3.9484773e+00 4.1635108e+00 3.8079860e+00 3.0811765e-01 6.5223271e-01 0.0000000e+00 5.0991930e-01 3.2586371e-01 4.2667565e-01 8.3172002e-01 5.0991930e-01 5.6769031e-01 7.5082357e-01 2.1845981e-01 7.0479928e-01 3.0811765e-01 6.4755655e-01 2.1845981e-01 3.4865562e+00 3.1726595e+00 3.6377960e+00 2.5987470e+00 3.2814045e+00 3.0627375e+00 3.3515846e+00 1.8841865e+00 3.2769379e+00 2.5038079e+00 2.1311468e+00 2.8311678e+00 2.6104387e+00 3.2962520e+00 2.2214438e+00 3.1433122e+00 3.0878634e+00 2.6552472e+00 3.1570103e+00 2.4668912e+00 3.4394878e+00 2.6411293e+00 3.5233648e+00 3.2747247e+00 2.9659871e+00 3.1154783e+00 3.5134741e+00 3.7059620e+00 3.1148696e+00 2.0851901e+00 2.3731428e+00 2.2655571e+00 2.4927109e+00 3.6920087e+00 3.0823446e+00 3.1337459e+00 3.4135200e+00 3.0481703e+00 2.6780487e+00 2.5874301e+00 2.9489507e+00 3.2027420e+00 2.5873149e+00 1.8973383e+00 2.7738355e+00 2.7632614e+00 2.7778954e+00 2.9269923e+00 1.6390769e+00 2.6848587e+00 4.7106706e+00 3.7313856e+00 4.6446321e+00 4.2142736e+00 4.4836580e+00 5.3716885e+00 3.1250284e+00 5.0074019e+00 4.4485220e+00 4.9128219e+00 3.8150636e+00 3.9624529e+00 4.2358121e+00 3.6602286e+00 3.8605980e+00 4.0488387e+00 4.1418643e+00 5.4970002e+00 5.6855224e+00 3.5964347e+00 4.4685630e+00 3.5634461e+00 5.4730406e+00 3.5693950e+00 4.3989089e+00 4.7150659e+00 3.4668130e+00 3.5464993e+00 4.2723380e+00 4.5155386e+00 4.8594290e+00 5.2647079e+00 4.2921213e+00 3.7064459e+00 4.1581964e+00 4.9913682e+00 4.3286007e+00 4.1303097e+00 3.4468286e+00 4.1669742e+00 4.3729308e+00 3.9624170e+00 3.7313856e+00 4.6297577e+00 4.4844827e+00 4.0056359e+00 3.6817961e+00 3.9035218e+00 4.1179678e+00 3.7164366e+00 5.2942799e-01 3.0811765e-01 6.0611244e-01 3.2586371e-01 3.0546431e-01 9.4125538e-01 6.0060595e-01 5.2574978e-01 8.1558458e-01 2.8507955e-01 6.4755655e-01 4.1312257e-01 5.5419992e-01 2.0656129e-01 3.7045940e+00 3.4142500e+00 3.8690719e+00 2.8688189e+00 3.5226542e+00 3.3385842e+00 3.6010215e+00 2.1580776e+00 3.5196916e+00 2.7652601e+00 2.4083873e+00 3.0820950e+00 2.8783309e+00 3.5617386e+00 2.4693940e+00 3.3658240e+00 3.3558214e+00 2.9322808e+00 3.4112518e+00 2.7424260e+00 3.6945405e+00 2.8867565e+00 3.7848217e+00 3.5471494e+00 3.2076743e+00 3.3449470e+00 3.7498842e+00 3.9450818e+00 3.3735875e+00 2.3490515e+00 2.6490839e+00 2.5444216e+00 2.7549369e+00 3.9621873e+00 3.3527310e+00 3.3865418e+00 3.6475099e+00 3.3024121e+00 2.9459653e+00 2.8566322e+00 3.2311957e+00 3.4653703e+00 2.8535197e+00 2.1708533e+00 3.0455175e+00 3.0364473e+00 3.0465086e+00 3.1799184e+00 1.8842354e+00 2.9509414e+00 4.9594922e+00 3.9924292e+00 4.8844442e+00 4.4804205e+00 4.7354764e+00 5.6130421e+00 3.3873806e+00 5.2583931e+00 4.7090394e+00 5.1413739e+00 4.0532097e+00 4.2156327e+00 4.4735121e+00 3.9153764e+00 4.0928954e+00 4.2826383e+00 4.4004808e+00 5.7341534e+00 5.9271927e+00 3.8700742e+00 4.7016733e+00 3.8156003e+00 5.7155096e+00 3.8175051e+00 4.6462020e+00 4.9620553e+00 3.7143727e+00 3.8000004e+00 4.5254820e+00 4.7613782e+00 5.0991856e+00 5.4888822e+00 4.5411167e+00 3.9718373e+00 4.4397363e+00 5.2074442e+00 4.5701358e+00 4.3916992e+00 3.6996802e+00 4.3970196e+00 4.6045465e+00 4.1691696e+00 3.9924292e+00 4.8725396e+00 4.7150659e+00 4.2256400e+00 3.9292780e+00 4.1454529e+00 4.3599285e+00 3.9798670e+00 6.5223271e-01 1.1268457e+00 4.1449626e-01 5.0090417e-01 1.3784393e+00 1.1053488e+00 5.8851328e-01 6.6827038e-01 7.6787403e-01 4.8036801e-01 9.0852141e-01 2.8192292e-01 5.0905001e-01 3.5117473e+00 3.2719724e+00 3.6961290e+00 2.8060101e+00 3.3799936e+00 3.2401159e+00 3.4709594e+00 2.1293854e+00 3.3643376e+00 2.6848587e+00 2.4115946e+00 2.9723937e+00 2.7984009e+00 3.4455106e+00 2.3749088e+00 3.1913397e+00 3.2574960e+00 2.8320532e+00 3.3150921e+00 2.6637326e+00 3.5883493e+00 2.7640668e+00 3.6695251e+00 3.4317240e+00 3.0617355e+00 3.1820648e+00 3.5855784e+00 3.7950050e+00 3.2620096e+00 2.2645766e+00 2.5831070e+00 2.4810276e+00 2.6563403e+00 3.8575846e+00 3.2574960e+00 3.2718360e+00 3.4856613e+00 3.1913594e+00 2.8466990e+00 2.7801709e+00 3.1437608e+00 3.3466419e+00 2.7595194e+00 2.1498672e+00 2.9543365e+00 2.9330570e+00 2.9459653e+00 3.0511838e+00 1.8555964e+00 2.8534301e+00 4.8490742e+00 3.8962785e+00 4.7308926e+00 4.3643842e+00 4.6144072e+00 5.4442665e+00 3.3129652e+00 5.0997539e+00 4.5816539e+00 4.9883395e+00 3.9213002e+00 4.0961125e+00 4.3313403e+00 3.8279951e+00 4.0003537e+00 4.1625074e+00 4.2731561e+00 5.5604940e+00 5.7635900e+00 3.7812981e+00 4.5580745e+00 3.7236571e+00 5.5437155e+00 3.6992145e+00 4.5120086e+00 4.8012240e+00 3.5989213e+00 3.6872980e+00 4.4084575e+00 4.5949523e+00 4.9306472e+00 5.2918657e+00 4.4250143e+00 3.8510143e+00 4.3338058e+00 5.0198276e+00 4.4571663e+00 4.2687771e+00 3.5910375e+00 4.2454653e+00 4.4729144e+00 4.0139676e+00 3.8962785e+00 4.7377574e+00 4.5853438e+00 4.0877338e+00 3.8179781e+00 4.0161568e+00 4.2490407e+00 3.8755800e+00 5.0991930e-01 3.2586371e-01 4.2667565e-01 8.3172002e-01 5.0991930e-01 5.6769031e-01 7.5082357e-01 2.1845981e-01 7.0479928e-01 3.0811765e-01 6.4755655e-01 2.1845981e-01 3.4865562e+00 3.1726595e+00 3.6377960e+00 2.5987470e+00 3.2814045e+00 3.0627375e+00 3.3515846e+00 1.8841865e+00 3.2769379e+00 2.5038079e+00 2.1311468e+00 2.8311678e+00 2.6104387e+00 3.2962520e+00 2.2214438e+00 3.1433122e+00 3.0878634e+00 2.6552472e+00 3.1570103e+00 2.4668912e+00 3.4394878e+00 2.6411293e+00 3.5233648e+00 3.2747247e+00 2.9659871e+00 3.1154783e+00 3.5134741e+00 3.7059620e+00 3.1148696e+00 2.0851901e+00 2.3731428e+00 2.2655571e+00 2.4927109e+00 3.6920087e+00 3.0823446e+00 3.1337459e+00 3.4135200e+00 3.0481703e+00 2.6780487e+00 2.5874301e+00 2.9489507e+00 3.2027420e+00 2.5873149e+00 1.8973383e+00 2.7738355e+00 2.7632614e+00 2.7778954e+00 2.9269923e+00 1.6390769e+00 2.6848587e+00 4.7106706e+00 3.7313856e+00 4.6446321e+00 4.2142736e+00 4.4836580e+00 5.3716885e+00 3.1250284e+00 5.0074019e+00 4.4485220e+00 4.9128219e+00 3.8150636e+00 3.9624529e+00 4.2358121e+00 3.6602286e+00 3.8605980e+00 4.0488387e+00 4.1418643e+00 5.4970002e+00 5.6855224e+00 3.5964347e+00 4.4685630e+00 3.5634461e+00 5.4730406e+00 3.5693950e+00 4.3989089e+00 4.7150659e+00 3.4668130e+00 3.5464993e+00 4.2723380e+00 4.5155386e+00 4.8594290e+00 5.2647079e+00 4.2921213e+00 3.7064459e+00 4.1581964e+00 4.9913682e+00 4.3286007e+00 4.1303097e+00 3.4468286e+00 4.1669742e+00 4.3729308e+00 3.9624170e+00 3.7313856e+00 4.6297577e+00 4.4844827e+00 4.0056359e+00 3.6817961e+00 3.9035218e+00 4.1179678e+00 3.7164366e+00 7.3813096e-01 6.8961791e-01 7.0086313e-01 2.0000000e-01 7.3805807e-01 1.0030700e+00 4.0293660e-01 9.4352681e-01 2.5251796e-01 1.0120221e+00 6.2024833e-01 3.8265307e+00 3.4552560e+00 3.9537404e+00 2.8022534e+00 3.5701225e+00 3.2863508e+00 3.6126198e+00 2.0524386e+00 3.5845127e+00 2.6848587e+00 2.2888897e+00 3.0684384e+00 2.8791325e+00 3.5464993e+00 2.4469125e+00 3.4686755e+00 3.2961206e+00 2.9072057e+00 3.4107902e+00 2.6959009e+00 3.6545040e+00 2.9195876e+00 3.7806187e+00 3.5312474e+00 3.2669151e+00 3.4295849e+00 3.8277021e+00 3.9909030e+00 3.3562690e+00 2.3465937e+00 2.5906376e+00 2.4892531e+00 2.7423171e+00 3.9193693e+00 3.2780620e+00 3.3708389e+00 3.7190229e+00 3.3268984e+00 2.8983020e+00 2.7954161e+00 3.1611584e+00 3.4557351e+00 2.8328337e+00 2.0658700e+00 2.9919517e+00 2.9960210e+00 3.0059712e+00 3.2048547e+00 1.8038968e+00 2.9141136e+00 4.9189452e+00 3.9342203e+00 4.9208723e+00 4.4494735e+00 4.7160542e+00 5.6635213e+00 3.2909043e+00 5.2946188e+00 4.7062325e+00 5.1779277e+00 4.0657707e+00 4.2053647e+00 4.4986279e+00 3.8509694e+00 4.0384339e+00 4.2739171e+00 4.3927415e+00 5.7909894e+00 5.9706827e+00 3.8237409e+00 4.7267599e+00 3.7490739e+00 5.7704875e+00 3.8154018e+00 4.6503268e+00 5.0044958e+00 3.7060524e+00 3.7762575e+00 4.5037232e+00 4.8164539e+00 5.1574084e+00 5.5860081e+00 4.5195237e+00 3.9601358e+00 4.3901702e+00 5.2992055e+00 4.5412859e+00 4.3739561e+00 3.6694918e+00 4.4403343e+00 4.6130415e+00 4.2323959e+00 3.9342203e+00 4.8773871e+00 4.7190595e+00 4.2559866e+00 3.9201797e+00 4.1523445e+00 4.3289315e+00 3.9302319e+00 2.1845981e-01 1.1486378e+00 7.0784540e-01 4.0438741e-01 5.0503591e-01 4.4651726e-01 4.0147421e-01 5.0905001e-01 3.2352160e-01 1.4096146e-01 3.4156574e+00 3.1235447e+00 3.5778307e+00 2.6097685e+00 3.2346686e+00 3.0478400e+00 3.3101102e+00 1.9193093e+00 3.2270948e+00 2.4922287e+00 2.2081369e+00 2.7965957e+00 2.6184141e+00 3.2685282e+00 2.1916898e+00 3.0773654e+00 3.0673749e+00 2.6423274e+00 3.1444983e+00 2.4678343e+00 3.4088888e+00 2.6016025e+00 3.4988409e+00 3.2521427e+00 2.9171710e+00 3.0559188e+00 3.4597125e+00 3.6553612e+00 3.0847983e+00 2.0765921e+00 2.3848740e+00 2.2817008e+00 2.4722095e+00 3.6724425e+00 3.0650478e+00 3.0981264e+00 3.3567173e+00 3.0288896e+00 2.6566259e+00 2.5851693e+00 2.9455446e+00 3.1719381e+00 2.5729378e+00 1.9450955e+00 2.7611864e+00 2.7431084e+00 2.7570191e+00 2.8884401e+00 1.6625998e+00 2.6648989e+00 4.6768871e+00 3.7101281e+00 4.5948688e+00 4.1876541e+00 4.4480565e+00 5.3202405e+00 3.1169790e+00 4.9630576e+00 4.4189653e+00 4.8572313e+00 3.7684708e+00 3.9292780e+00 4.1872611e+00 3.6418662e+00 3.8262353e+00 4.0041417e+00 4.1076175e+00 5.4411191e+00 5.6370076e+00 3.5929878e+00 4.4174698e+00 3.5389106e+00 5.4223305e+00 3.5340177e+00 4.3569684e+00 4.6672392e+00 3.4309563e+00 3.5133135e+00 4.2392500e+00 4.4661721e+00 4.8075242e+00 5.1977119e+00 4.2572858e+00 3.6784045e+00 4.1454521e+00 4.9233665e+00 4.2900309e+00 4.0984960e+00 3.4145942e+00 4.1120703e+00 4.3243538e+00 3.8957047e+00 3.7101281e+00 4.5857922e+00 4.4360042e+00 3.9497198e+00 3.6509512e+00 3.8600234e+00 4.0800357e+00 3.6915258e+00 1.2223099e+00 6.2024833e-01 3.7255734e-01 6.2081167e-01 5.0905001e-01 3.7598397e-01 4.4651726e-01 3.4583729e-01 2.1269358e-01 3.6091470e+00 3.3105724e+00 3.7708932e+00 2.7979838e+00 3.4251430e+00 3.2391031e+00 3.4951642e+00 2.1075335e+00 3.4235403e+00 2.6687055e+00 2.3989464e+00 2.9762945e+00 2.8216056e+00 3.4603628e+00 2.3673218e+00 3.2680041e+00 3.2498302e+00 2.8414525e+00 3.3361912e+00 2.6626548e+00 3.5849794e+00 2.7906520e+00 3.6926714e+00 3.4497714e+00 3.1105675e+00 3.2468925e+00 3.6557661e+00 3.8432801e+00 3.2705166e+00 2.2719663e+00 2.5786309e+00 2.4784234e+00 2.6629602e+00 3.8619321e+00 3.2465133e+00 3.2780765e+00 3.5475145e+00 3.2266676e+00 2.8416143e+00 2.7719788e+00 3.1392934e+00 3.3624692e+00 2.7656089e+00 2.1335398e+00 2.9496515e+00 2.9338590e+00 2.9447165e+00 3.0808399e+00 1.8330979e+00 2.8520904e+00 4.8482992e+00 3.8884996e+00 4.7814945e+00 4.3763964e+00 4.6280797e+00 5.5131379e+00 3.2922206e+00 5.1594703e+00 4.6125121e+00 5.0342965e+00 3.9453171e+00 4.1137002e+00 4.3683295e+00 3.8151836e+00 3.9816719e+00 4.1715493e+00 4.2963195e+00 5.6322635e+00 5.8289682e+00 3.7874513e+00 4.5945190e+00 3.7079546e+00 5.6180203e+00 3.7164611e+00 4.5394421e+00 4.8614468e+00 3.6107037e+00 3.6929889e+00 4.4201622e+00 4.6634723e+00 5.0015266e+00 5.3906681e+00 4.4348302e+00 3.8718490e+00 4.3427947e+00 5.1078309e+00 4.4583488e+00 4.2864208e+00 3.5920050e+00 4.2919639e+00 4.4953238e+00 4.0619633e+00 3.8884996e+00 4.7650352e+00 4.6046026e+00 4.1173867e+00 3.8320624e+00 4.0389546e+00 4.2480032e+00 3.8727359e+00 9.0049692e-01 1.2307737e+00 1.5483011e+00 7.1462831e-01 1.5272277e+00 9.0074515e-01 1.4700179e+00 1.0331736e+00 3.7896333e+00 3.4304205e+00 3.9179666e+00 2.7683644e+00 3.5321772e+00 3.2685282e+00 3.5962433e+00 2.0250421e+00 3.5485736e+00 2.6642702e+00 2.2244833e+00 3.0435803e+00 2.8325748e+00 3.5230967e+00 2.4205131e+00 3.4307077e+00 3.2815626e+00 2.8843735e+00 3.3658240e+00 2.6687055e+00 3.6389628e+00 2.8830783e+00 3.7490739e+00 3.5087649e+00 3.2305801e+00 3.3908902e+00 3.7876239e+00 3.9561876e+00 3.3306114e+00 2.3112968e+00 2.5604155e+00 2.4585271e+00 2.7122813e+00 3.8971376e+00 3.2667813e+00 3.3674720e+00 3.6849783e+00 3.2837729e+00 2.8840079e+00 2.7685572e+00 3.1442943e+00 3.4335342e+00 2.8028287e+00 2.0286682e+00 2.9704704e+00 2.9822537e+00 2.9868163e+00 3.1741954e+00 1.7644184e+00 2.8908296e+00 4.8985114e+00 3.9102500e+00 4.8863496e+00 4.4272873e+00 4.6888407e+00 5.6294495e+00 3.2706756e+00 5.2636641e+00 4.6765452e+00 5.1547576e+00 4.0378383e+00 4.1743230e+00 4.4638518e+00 3.8230269e+00 4.0056625e+00 4.2450961e+00 4.3676124e+00 5.7745449e+00 5.9344852e+00 3.7932062e+00 4.6942925e+00 3.7245053e+00 5.7354045e+00 3.7814598e+00 4.6271849e+00 4.9763401e+00 3.6736483e+00 3.7511498e+00 4.4747039e+00 4.7841150e+00 5.1205765e+00 5.5660104e+00 4.4889834e+00 3.9348528e+00 4.3728356e+00 5.2548517e+00 4.5226167e+00 4.3526639e+00 3.6449787e+00 4.4041536e+00 4.5787890e+00 4.1879339e+00 3.9102500e+00 4.8490587e+00 4.6897155e+00 4.2150171e+00 3.8841455e+00 4.1203091e+00 4.3121165e+00 3.9109678e+00 6.7975091e-01 9.0056222e-01 4.1586001e-01 8.2275389e-01 2.0656129e-01 9.4287188e-01 6.0121055e-01 3.8264361e+00 3.4551376e+00 3.9537404e+00 2.8149627e+00 3.5710248e+00 3.2874334e+00 3.6122384e+00 2.0711789e+00 3.5849006e+00 2.6879715e+00 2.3281827e+00 3.0685922e+00 2.8946126e+00 3.5468964e+00 2.4478108e+00 3.4686755e+00 3.2962520e+00 2.9098194e+00 3.4214793e+00 2.7033034e+00 3.6543993e+00 2.9209919e+00 3.7841436e+00 3.5321717e+00 3.2673909e+00 3.4297053e+00 3.8284763e+00 3.9909892e+00 3.3567173e+00 2.3533545e+00 2.6019240e+00 2.5015675e+00 2.7452885e+00 3.9207248e+00 3.2781950e+00 3.3698144e+00 3.7190229e+00 3.3356294e+00 2.8984764e+00 2.8022534e+00 3.1646752e+00 3.4558535e+00 2.8373077e+00 2.0905239e+00 2.9944056e+00 2.9961831e+00 3.0065426e+00 3.2053509e+00 1.8216743e+00 2.9155237e+00 4.9187518e+00 3.9355645e+00 4.9209267e+00 4.4497146e+00 4.7161140e+00 5.6635613e+00 3.2956846e+00 5.2947833e+00 4.7084108e+00 5.1767384e+00 4.0656879e+00 4.2065257e+00 4.4986941e+00 3.8543544e+00 4.0391220e+00 4.2738429e+00 4.3928114e+00 5.7890564e+00 5.9715517e+00 3.8320624e+00 4.7267005e+00 3.7498842e+00 5.7708014e+00 3.8168398e+00 4.6501080e+00 5.0044433e+00 3.7068836e+00 3.7763550e+00 4.5042646e+00 4.8165110e+00 5.1578102e+00 5.5839155e+00 4.5200609e+00 3.9608542e+00 4.3918790e+00 5.2992517e+00 4.5407542e+00 4.3739561e+00 3.6695956e+00 4.4403343e+00 4.6130415e+00 4.2323959e+00 3.9355645e+00 4.8773316e+00 4.7188476e+00 4.2560614e+00 3.9234348e+00 4.1524235e+00 4.3283407e+00 3.9303212e+00 3.8934542e-01 5.4292906e-01 4.4535192e-01 5.3309112e-01 4.5581864e-01 4.2538717e-01 3.3319064e+00 3.0058998e+00 3.4822680e+00 2.4967542e+00 3.1249915e+00 2.9284660e+00 3.1836200e+00 1.8265014e+00 3.1331426e+00 2.3481462e+00 2.1458939e+00 2.6572703e+00 2.5437119e+00 3.1519721e+00 2.0470220e+00 2.9815576e+00 2.9302134e+00 2.5421955e+00 3.0374850e+00 2.3632684e+00 3.2598750e+00 2.4873223e+00 3.3884269e+00 3.1477087e+00 2.8156804e+00 2.9556616e+00 3.3682605e+00 3.5401725e+00 2.9561205e+00 1.9790422e+00 2.2832934e+00 2.1885968e+00 2.3571494e+00 3.5486864e+00 2.9260462e+00 2.9587612e+00 3.2530866e+00 2.9361879e+00 2.5256527e+00 2.4631898e+00 2.8325946e+00 3.0534395e+00 2.4619105e+00 1.8618589e+00 2.6377354e+00 2.6235630e+00 2.6314198e+00 2.7785210e+00 1.5475473e+00 2.5392051e+00 4.5179415e+00 3.5641186e+00 4.4752183e+00 4.0626016e+00 4.3069105e+00 5.2164277e+00 2.9689697e+00 4.8634846e+00 4.3067423e+00 4.7194915e+00 3.6262912e+00 3.7979761e+00 4.0547736e+00 3.4875352e+00 3.6400497e+00 3.8418637e+00 3.9849753e+00 5.3352888e+00 5.5294519e+00 3.4830211e+00 4.2776053e+00 3.3760149e+00 5.3253207e+00 3.4004918e+00 4.2238184e+00 4.5645220e+00 3.2914954e+00 3.3718862e+00 4.0995928e+00 4.3725648e+00 4.7075069e+00 5.1063282e+00 4.1113292e+00 3.5651462e+00 4.0375056e+00 4.8132427e+00 4.1268905e+00 3.9732869e+00 3.2685282e+00 3.9810803e+00 4.1706050e+00 3.7449914e+00 3.5641186e+00 4.4464848e+00 4.2774083e+00 3.7941767e+00 3.5148434e+00 3.7206115e+00 3.9162695e+00 3.5510950e+00 8.6137722e-01 3.2352160e-01 7.6166891e-01 4.2667565e-01 6.2660376e-01 3.0634640e+00 2.7392828e+00 3.2125175e+00 2.3391296e+00 2.8734025e+00 2.6707502e+00 2.9145708e+00 1.7569738e+00 2.8671376e+00 2.1479276e+00 2.1308063e+00 2.4108292e+00 2.3854031e+00 2.8851564e+00 1.8368900e+00 2.7201546e+00 2.6724144e+00 2.2982662e+00 2.8483417e+00 2.1701312e+00 3.0045018e+00 2.2531409e+00 3.1444983e+00 2.8783309e+00 2.5586145e+00 2.6967931e+00 3.1073497e+00 3.2779401e+00 2.7018605e+00 1.8104298e+00 2.1219691e+00 2.0368741e+00 2.1367260e+00 3.2905600e+00 2.6692615e+00 2.6939411e+00 2.9863438e+00 2.7320931e+00 2.2698938e+00 2.2722516e+00 2.5933163e+00 2.7845473e+00 2.2472326e+00 1.8195937e+00 2.4037412e+00 2.3571494e+00 2.3781826e+00 2.5200525e+00 1.5411691e+00 2.3001580e+00 4.2694227e+00 3.3237039e+00 4.2115652e+00 3.7930080e+00 4.0498216e+00 4.9425633e+00 2.7652100e+00 4.5847429e+00 4.0464464e+00 4.4666326e+00 3.3738930e+00 3.5479683e+00 3.8004969e+00 3.2711669e+00 3.4346585e+00 3.6043418e+00 3.7146255e+00 5.0600559e+00 5.2639977e+00 3.2609948e+00 4.0260071e+00 3.1481222e+00 5.0506841e+00 3.1599876e+00 3.9594081e+00 4.2851208e+00 3.0501817e+00 3.1181310e+00 3.8476631e+00 4.0931004e+00 4.4378867e+00 4.8317916e+00 3.8652515e+00 3.2970551e+00 3.7653550e+00 4.5583070e+00 3.8836720e+00 3.7008091e+00 3.0188386e+00 3.7282629e+00 3.9300286e+00 3.5186510e+00 3.3237039e+00 4.1891573e+00 4.0376133e+00 3.5648733e+00 3.2885200e+00 3.4693398e+00 3.6732275e+00 3.2917360e+00 8.1385214e-01 2.5251796e-01 7.6787403e-01 3.2586371e-01 3.5846101e+00 3.2546607e+00 3.7315988e+00 2.6609901e+00 3.3659358e+00 3.1439065e+00 3.4298218e+00 1.9383545e+00 3.3723944e+00 2.5580763e+00 2.1777421e+00 2.8983020e+00 2.6954219e+00 3.3808052e+00 2.2790175e+00 3.2344170e+00 3.1579733e+00 2.7462372e+00 3.2292608e+00 2.5444216e+00 3.5028333e+00 2.7205595e+00 3.6067948e+00 3.3670797e+00 3.0557792e+00 3.2048395e+00 3.6088607e+00 3.7891577e+00 3.1900581e+00 2.1641873e+00 2.4447974e+00 2.3408917e+00 2.5700421e+00 3.7710363e+00 3.1506190e+00 3.2040124e+00 3.5027314e+00 3.1322650e+00 2.7512730e+00 2.6533250e+00 3.0299528e+00 3.2862185e+00 2.6656374e+00 1.9477421e+00 2.8481000e+00 2.8454952e+00 2.8544453e+00 3.0132514e+00 1.6658308e+00 2.7590026e+00 4.7688362e+00 3.7943375e+00 4.7263320e+00 4.2949506e+00 4.5532150e+00 5.4636648e+00 3.1768366e+00 5.1030228e+00 4.5342701e+00 4.9831843e+00 3.8820426e+00 4.0358897e+00 4.3088496e+00 3.7133362e+00 3.8949006e+00 4.1025163e+00 4.2237423e+00 5.5888590e+00 5.7742361e+00 3.6743314e+00 4.5370189e+00 3.6141649e+00 5.5687868e+00 3.6395551e+00 4.4736906e+00 4.8085584e+00 3.5338163e+00 3.6144782e+00 4.3417782e+00 4.6138035e+00 4.9524148e+00 5.3625968e+00 4.3570317e+00 3.7932922e+00 4.2488997e+00 5.0747397e+00 4.3832471e+00 4.2110583e+00 3.5113289e+00 4.2399031e+00 4.4320962e+00 4.0189525e+00 3.7943375e+00 4.7000551e+00 4.5409269e+00 4.0612006e+00 3.7475562e+00 3.9722979e+00 4.1719819e+00 3.7859347e+00 6.9325418e-01 2.1269358e-01 5.0905001e-01 3.3337914e+00 3.0376046e+00 3.4948415e+00 2.6099670e+00 3.1641230e+00 2.9745020e+00 3.2202056e+00 1.9796375e+00 3.1511769e+00 2.4469125e+00 2.3235342e+00 2.7196435e+00 2.6337042e+00 3.1881331e+00 2.1375243e+00 2.9985516e+00 2.9847499e+00 2.5867906e+00 3.1255136e+00 2.4464131e+00 3.3203249e+00 2.5432298e+00 3.4386456e+00 3.1757436e+00 2.8453413e+00 2.9797458e+00 3.3872988e+00 3.5731577e+00 3.0079222e+00 2.0710306e+00 2.3862284e+00 2.2923690e+00 2.4260574e+00 3.5964347e+00 2.9822786e+00 3.0067893e+00 3.2740296e+00 3.0040546e+00 2.5786309e+00 2.5579141e+00 2.8891491e+00 3.0885642e+00 2.5333642e+00 2.0283816e+00 2.7031874e+00 2.6626548e+00 2.6835197e+00 2.8149627e+00 1.7472675e+00 2.6016025e+00 4.5864727e+00 3.6358644e+00 4.5092329e+00 4.1008711e+00 4.3608329e+00 5.2329560e+00 3.0685922e+00 4.8761643e+00 4.3448624e+00 4.7688607e+00 3.6817961e+00 3.8534030e+00 4.1033021e+00 3.5811154e+00 3.7530357e+00 3.9183591e+00 4.0199092e+00 5.3504583e+00 5.5556442e+00 3.5517562e+00 4.3306939e+00 3.4641481e+00 5.3377017e+00 3.4637450e+00 4.2663890e+00 4.5772917e+00 3.3571533e+00 3.4296698e+00 4.1575709e+00 4.3797497e+00 4.7253427e+00 5.1097682e+00 4.1763898e+00 3.5983434e+00 4.0666282e+00 4.8419079e+00 4.2005634e+00 4.0083071e+00 3.3318481e+00 4.0278210e+00 4.2396851e+00 3.8171357e+00 3.6358644e+00 4.4969460e+00 4.3490518e+00 3.8705614e+00 3.5905415e+00 3.7766172e+00 3.9906340e+00 3.6052686e+00 7.6752131e-01 4.0147421e-01 3.6660608e+00 3.3135273e+00 3.8017611e+00 2.7018605e+00 3.4275679e+00 3.1699903e+00 3.4789136e+00 1.9730403e+00 3.4358679e+00 2.5840983e+00 2.2331309e+00 2.9422991e+00 2.7581254e+00 3.4189217e+00 2.3233512e+00 3.3121677e+00 3.1836200e+00 2.7812918e+00 3.2896000e+00 2.5816639e+00 3.5379744e+00 2.7795823e+00 3.6532797e+00 3.4029480e+00 3.1195329e+00 3.2770643e+00 3.6772238e+00 3.8493740e+00 3.2305582e+00 2.2173292e+00 2.4840285e+00 2.3814525e+00 2.6148507e+00 3.8019334e+00 3.1710836e+00 3.2447990e+00 3.5700242e+00 3.1955870e+00 2.7803619e+00 2.6879415e+00 3.0521985e+00 3.3264150e+00 2.7089627e+00 1.9908167e+00 2.8775912e+00 2.8744403e+00 2.8857836e+00 3.0658390e+00 1.7171798e+00 2.7936066e+00 4.8056033e+00 3.8247106e+00 4.7834029e+00 4.3283723e+00 4.5943507e+00 5.5219241e+00 3.2001457e+00 5.1552806e+00 4.5786596e+00 5.0423887e+00 3.9353963e+00 4.0804944e+00 4.3648743e+00 3.7469906e+00 3.9346929e+00 4.1523445e+00 4.2650653e+00 5.6468786e+00 5.8321617e+00 3.7127205e+00 4.5943003e+00 3.6444925e+00 5.6275915e+00 3.6885686e+00 4.5212945e+00 4.8635596e+00 3.5807904e+00 3.6545040e+00 4.3827087e+00 4.6717461e+00 5.0136174e+00 5.4320857e+00 4.3994790e+00 3.8323332e+00 4.2736523e+00 5.1501184e+00 4.4249262e+00 4.2490074e+00 3.5500567e+00 4.3022895e+00 4.4864974e+00 4.0933482e+00 3.8247106e+00 4.7495571e+00 4.5943072e+00 4.1245629e+00 3.7976741e+00 4.0232438e+00 4.2129603e+00 3.8158144e+00 4.4535192e-01 3.3681634e+00 3.1015495e+00 3.5417515e+00 2.6663854e+00 3.2198557e+00 3.0579528e+00 3.2934163e+00 2.0153916e+00 3.2040320e+00 2.5204039e+00 2.3388377e+00 2.7956674e+00 2.6725726e+00 3.2655357e+00 2.2078644e+00 3.0402181e+00 3.0721050e+00 2.6595270e+00 3.1749624e+00 2.5094912e+00 3.4048516e+00 2.6019240e+00 3.5045178e+00 3.2523394e+00 2.8999277e+00 3.0267460e+00 3.4333346e+00 3.6317584e+00 3.0844423e+00 2.1209506e+00 2.4419061e+00 2.3443191e+00 2.4920874e+00 3.6775470e+00 3.0715602e+00 3.0884019e+00 3.3261336e+00 3.0501817e+00 2.6631094e+00 2.6243758e+00 2.9691171e+00 3.1659032e+00 2.5983106e+00 2.0538718e+00 2.7808700e+00 2.7473221e+00 2.7650187e+00 2.8804789e+00 1.7660585e+00 2.6784604e+00 4.6691123e+00 3.7183181e+00 4.5689156e+00 4.1821417e+00 4.4377562e+00 5.2873851e+00 3.1455384e+00 4.9362269e+00 4.4131751e+00 4.8285697e+00 3.7508315e+00 3.9249912e+00 4.1665786e+00 3.6588207e+00 3.8312584e+00 3.9914600e+00 4.0953360e+00 5.4042844e+00 5.6094349e+00 3.6203759e+00 4.3940519e+00 3.5472449e+00 5.3896044e+00 3.5318477e+00 4.3383367e+00 4.6370771e+00 3.4283809e+00 3.5084967e+00 4.2335104e+00 4.4348302e+00 4.7765764e+00 5.1494118e+00 4.2515997e+00 3.6735311e+00 4.1503255e+00 4.8803856e+00 4.2802235e+00 4.0874500e+00 3.4119017e+00 4.0856134e+00 4.3069340e+00 3.8658667e+00 3.7183181e+00 4.5670800e+00 4.4180998e+00 3.9298460e+00 3.6561219e+00 3.8459316e+00 4.0712063e+00 3.6910219e+00 3.5300704e+00 3.2300705e+00 3.6894189e+00 2.6906230e+00 3.3402581e+00 3.1455455e+00 3.4142500e+00 1.9871921e+00 3.3364095e+00 2.5801041e+00 2.2579027e+00 2.8953397e+00 2.7040361e+00 3.3706887e+00 2.2848507e+00 3.1889632e+00 3.1641787e+00 2.7405950e+00 3.2351115e+00 2.5567836e+00 3.5066271e+00 2.7031874e+00 3.5985833e+00 3.3547156e+00 3.0245025e+00 3.1656773e+00 3.5695690e+00 3.7624530e+00 3.1847809e+00 2.1665831e+00 2.4678343e+00 2.3636654e+00 2.5680682e+00 3.7710578e+00 3.1606607e+00 3.1985717e+00 3.4665002e+00 3.1244487e+00 2.7540755e+00 2.6724144e+00 3.0392407e+00 3.2747247e+00 2.6671530e+00 2.0066796e+00 2.8554471e+00 2.8427684e+00 2.8549070e+00 2.9928504e+00 1.7230625e+00 2.7611864e+00 4.7742557e+00 3.8047268e+00 4.7018935e+00 4.2892152e+00 4.5488617e+00 5.4303909e+00 3.2037606e+00 5.0724791e+00 4.5217060e+00 4.9623634e+00 3.8707551e+00 4.0296740e+00 4.2915574e+00 3.7321091e+00 3.9154512e+00 4.1022778e+00 4.2113304e+00 5.5520135e+00 5.7453700e+00 3.6849669e+00 4.5212945e+00 3.6308222e+00 5.5330176e+00 3.6335075e+00 4.4607162e+00 4.7770551e+00 3.5299194e+00 3.6126659e+00 4.3390241e+00 4.5771358e+00 4.9175276e+00 5.3118739e+00 4.3561658e+00 3.7812981e+00 4.2455646e+00 5.0340960e+00 4.3872001e+00 4.2015182e+00 3.5126820e+00 4.2176412e+00 4.4249262e+00 3.9985367e+00 3.8047268e+00 4.6887893e+00 4.5358705e+00 4.0502685e+00 3.7475470e+00 3.9619683e+00 4.1767972e+00 3.7895730e+00 6.0611244e-01 2.1845981e-01 1.6212669e+00 5.6769031e-01 1.3103855e+00 7.0437330e-01 2.2923690e+00 4.4651726e-01 1.8497891e+00 2.2196852e+00 1.1283882e+00 1.3099706e+00 9.0827783e-01 1.5790055e+00 3.7427929e-01 1.4018200e+00 1.2701139e+00 1.1341579e+00 1.5133392e+00 1.1134787e+00 1.0264409e+00 8.7202528e-01 9.2264612e-01 6.6432544e-01 4.5470518e-01 4.1449626e-01 4.3456114e-01 1.0085601e+00 1.5838351e+00 1.6415861e+00 1.6742876e+00 1.3140585e+00 1.0496979e+00 1.6013574e+00 1.0054037e+00 3.0546431e-01 1.0168833e+00 1.4293465e+00 1.5774037e+00 1.5278635e+00 9.0252542e-01 1.2994764e+00 2.2231652e+00 1.4317371e+00 1.3207609e+00 1.3224963e+00 8.3649708e-01 2.2607507e+00 1.3421549e+00 1.5412452e+00 1.2539702e+00 1.2643026e+00 1.0324775e+00 1.2342162e+00 1.9387309e+00 2.1209313e+00 1.6105602e+00 1.1912106e+00 1.5832517e+00 7.2486328e-01 8.5585239e-01 9.4009473e-01 1.3873503e+00 1.3945703e+00 1.0313560e+00 8.7720955e-01 2.0658700e+00 2.2655571e+00 1.2460824e+00 1.1834841e+00 1.4368020e+00 2.0378171e+00 7.9878917e-01 1.0960883e+00 1.3102767e+00 8.5105559e-01 9.2480363e-01 1.0805899e+00 1.1043883e+00 1.4313279e+00 1.8006336e+00 1.1235486e+00 7.6625946e-01 1.1633029e+00 1.5390703e+00 1.2493717e+00 9.0965328e-01 1.0182895e+00 8.6983677e-01 1.1880428e+00 9.2095040e-01 1.2539702e+00 1.3335022e+00 1.3109705e+00 9.5035453e-01 9.2112464e-01 7.6166891e-01 1.1418127e+00 1.1276971e+00 5.6700421e-01 1.1449732e+00 4.0293660e-01 7.3813096e-01 2.1845981e-01 1.7551534e+00 3.4583729e-01 1.2603076e+00 1.7354460e+00 5.3588338e-01 1.0777972e+00 3.8934542e-01 1.0669582e+00 3.0811765e-01 8.0294841e-01 7.8768770e-01 1.0018083e+00 1.0175773e+00 5.5419992e-01 5.9426792e-01 7.3496673e-01 4.8927739e-01 3.4378533e-01 2.5651975e-01 5.2655962e-01 5.4292906e-01 4.4417983e-01 1.1634384e+00 1.1527805e+00 1.2020363e+00 8.1521713e-01 7.2526325e-01 1.0018083e+00 4.1449626e-01 3.2586371e-01 9.0277242e-01 8.3172002e-01 1.0440187e+00 9.7779835e-01 3.2816937e-01 8.1521713e-01 1.7083888e+00 8.6361309e-01 7.3145860e-01 7.3145860e-01 3.6171588e-01 1.7816674e+00 7.6914805e-01 1.6177449e+00 8.3060013e-01 1.4732400e+00 1.1107977e+00 1.3546017e+00 2.2147080e+00 1.5404344e+00 1.8624350e+00 1.3603471e+00 1.7544191e+00 6.8961791e-01 8.7478495e-01 1.0733200e+00 9.5271386e-01 1.0466623e+00 9.9348625e-01 1.0085601e+00 2.3452277e+00 2.5288464e+00 1.0480665e+00 1.3130641e+00 8.9653332e-01 2.3282127e+00 5.8914551e-01 1.2436109e+00 1.5625142e+00 4.8927739e-01 4.8927739e-01 1.1593224e+00 1.3813076e+00 1.7138020e+00 2.1603815e+00 1.1866786e+00 6.4755655e-01 1.1521791e+00 1.8620175e+00 1.2565757e+00 1.0067784e+00 4.8927739e-01 1.0056742e+00 1.2594846e+00 9.3049742e-01 8.3060013e-01 1.4762619e+00 1.3817041e+00 9.4558103e-01 7.9613242e-01 7.7074935e-01 1.0627606e+00 7.0776547e-01 1.5593809e+00 4.8036801e-01 1.2165505e+00 6.1151102e-01 2.2871743e+00 4.0176783e-01 1.7963441e+00 2.1851225e+00 1.0906388e+00 1.2884575e+00 8.0619006e-01 1.6156775e+00 5.0905001e-01 1.3093850e+00 1.2434795e+00 1.0262547e+00 1.4875372e+00 1.0069726e+00 1.0669582e+00 7.4511469e-01 8.2384013e-01 6.9728513e-01 5.3022554e-01 3.0811765e-01 2.5651975e-01 9.2264612e-01 1.6478667e+00 1.6180636e+00 1.6694817e+00 1.3199714e+00 9.2288144e-01 1.5068702e+00 9.2859317e-01 2.4837156e-01 9.3238528e-01 1.3813076e+00 1.5238543e+00 1.4346522e+00 8.1130291e-01 1.2794849e+00 2.2234347e+00 1.3629833e+00 1.2671726e+00 1.2652657e+00 8.1810461e-01 2.3116343e+00 1.2988558e+00 1.3410314e+00 1.1276971e+00 1.0590298e+00 8.2552685e-01 1.0264409e+00 1.7485421e+00 2.0171203e+00 1.4118594e+00 9.7949166e-01 1.3980896e+00 5.7324170e-01 6.6317860e-01 7.4586719e-01 1.2603076e+00 1.2604558e+00 8.7504951e-01 6.6432544e-01 1.8915404e+00 2.0711789e+00 1.1178264e+00 9.9368623e-01 1.3223897e+00 1.8515012e+00 6.6384020e-01 8.9303452e-01 1.1107977e+00 7.2823007e-01 8.1099042e-01 8.7170815e-01 9.0876485e-01 1.2370832e+00 1.6626615e+00 9.2112464e-01 6.2482915e-01 9.7377870e-01 1.3752391e+00 1.0720678e+00 7.0386584e-01 9.0876485e-01 6.8917100e-01 1.0120221e+00 8.0294841e-01 1.1276971e+00 1.1329323e+00 1.1353806e+00 8.1385214e-01 7.7588000e-01 5.8914551e-01 9.8054887e-01 1.0085601e+00 1.0879524e+00 6.2605182e-01 1.2079117e+00 8.2305664e-01 1.1903922e+00 4.4651726e-01 6.5648056e-01 7.4164639e-01 5.2942799e-01 8.9852394e-01 6.4755655e-01 1.3035649e+00 7.7074935e-01 4.8135521e-01 7.7074935e-01 2.5651975e-01 1.1022599e+00 6.8917100e-01 1.0627606e+00 8.6290690e-01 9.7759114e-01 1.1868139e+00 1.3969297e+00 1.4333755e+00 7.6166891e-01 5.6075294e-01 2.5251796e-01 3.7427929e-01 4.4651726e-01 1.1444449e+00 7.7074935e-01 1.1571858e+00 1.3491011e+00 8.2624515e-01 7.0086313e-01 2.0000000e-01 4.4535192e-01 8.9852394e-01 3.7427929e-01 7.7885297e-01 4.1449626e-01 7.0826681e-01 6.1092863e-01 8.2275389e-01 1.0198386e+00 5.0905001e-01 2.2002582e+00 1.1640914e+00 2.2347161e+00 1.6833015e+00 1.9584639e+00 2.9773446e+00 7.2852070e-01 2.5984158e+00 1.9494155e+00 2.5625921e+00 1.4644662e+00 1.4491244e+00 1.8190688e+00 1.0934620e+00 1.3861754e+00 1.6265426e+00 1.6626615e+00 3.1878246e+00 3.2549253e+00 1.0346741e+00 2.0606771e+00 1.0425476e+00 3.0882196e+00 1.1022599e+00 1.9692383e+00 2.3537589e+00 1.0082605e+00 1.0950112e+00 1.7332099e+00 2.1942739e+00 2.5032087e+00 3.0886055e+00 1.7538274e+00 1.2342162e+00 1.6237100e+00 2.7181432e+00 1.8926658e+00 1.6507294e+00 1.0082605e+00 1.8244836e+00 1.9254808e+00 1.7303440e+00 1.1640914e+00 2.1641182e+00 2.0565627e+00 1.6435752e+00 1.1782910e+00 1.4699978e+00 1.7142546e+00 1.2095267e+00 8.0326782e-01 5.0991930e-01 1.8350577e+00 2.1269358e-01 1.3537729e+00 1.7146525e+00 6.5172743e-01 8.5585239e-01 4.0438741e-01 1.1847335e+00 3.4583729e-01 9.0252542e-01 8.2372435e-01 6.2024833e-01 1.0324775e+00 6.6827038e-01 6.5172743e-01 3.8776762e-01 4.4535192e-01 3.2816937e-01 2.5651975e-01 3.2586371e-01 4.3691963e-01 5.0180477e-01 1.2342162e+00 1.1573546e+00 1.2172454e+00 8.7848692e-01 6.2205176e-01 1.1016264e+00 6.9006418e-01 3.2586371e-01 5.2371571e-01 9.4492923e-01 1.0646687e+00 1.0101422e+00 4.1449626e-01 8.2552685e-01 1.7679545e+00 9.2288144e-01 8.3888121e-01 8.2929029e-01 3.8934542e-01 1.8776878e+00 8.5434758e-01 1.5481649e+00 7.9613242e-01 1.3657247e+00 1.0085601e+00 1.2641849e+00 2.1002817e+00 1.6030661e+00 1.7482192e+00 1.2100024e+00 1.7005893e+00 6.6491075e-01 7.3535471e-01 9.7949166e-01 8.8358844e-01 1.0423677e+00 9.5498315e-01 9.1051084e-01 2.2737459e+00 2.4086493e+00 7.2486328e-01 1.2326306e+00 9.4832302e-01 2.2096958e+00 3.8934542e-01 1.1729895e+00 1.4561933e+00 3.8776762e-01 4.8927739e-01 1.0574300e+00 1.2643026e+00 1.5918956e+00 2.0914667e+00 1.0906388e+00 5.0817745e-01 1.0182895e+00 1.7457596e+00 1.2250414e+00 9.1663180e-01 5.4292906e-01 9.1750357e-01 1.1891470e+00 8.8358844e-01 7.9613242e-01 1.3916739e+00 1.3267389e+00 8.9303452e-01 5.3309112e-01 6.9325418e-01 1.0574013e+00 7.0776547e-01 7.0776547e-01 1.3071453e+00 9.0049692e-01 6.9006418e-01 1.2079117e+00 3.6171588e-01 7.1791510e-01 4.1586001e-01 9.0049692e-01 1.0069726e+00 2.5251796e-01 4.4651726e-01 6.9325418e-01 6.2538346e-01 5.9426792e-01 5.6631629e-01 6.6827038e-01 4.1449626e-01 7.0437330e-01 9.0277242e-01 1.1055069e+00 1.0496979e+00 3.2586371e-01 1.0083666e+00 7.4164639e-01 8.3888121e-01 6.0181382e-01 6.3861009e-01 3.4378533e-01 6.3808075e-01 1.0101422e+00 6.8961791e-01 4.1449626e-01 5.3588338e-01 2.5651975e-01 4.1586001e-01 5.0991930e-01 1.2869134e+00 3.0546431e-01 3.2586371e-01 3.0275928e-01 5.0905001e-01 1.5278635e+00 4.0000000e-01 1.7279861e+00 7.4586719e-01 1.7831878e+00 1.1718516e+00 1.4824233e+00 2.5111349e+00 8.3620494e-01 2.1256928e+00 1.4719311e+00 2.0814452e+00 1.0175773e+00 1.0014633e+00 1.3875139e+00 7.7885297e-01 1.1473003e+00 1.2144845e+00 1.1591754e+00 2.6773585e+00 2.7900071e+00 7.0776547e-01 1.6151673e+00 7.3496673e-01 2.6263773e+00 7.2526325e-01 1.4645804e+00 1.8755806e+00 6.3924842e-01 6.2407309e-01 1.2731262e+00 1.7507664e+00 2.0635966e+00 2.6116811e+00 1.3129189e+00 7.4855857e-01 1.1149070e+00 2.3160147e+00 1.4246028e+00 1.1229843e+00 5.6075294e-01 1.4120836e+00 1.5094575e+00 1.4108494e+00 7.4586719e-01 1.6884234e+00 1.6211869e+00 1.3017208e+00 8.1521713e-01 1.0401425e+00 1.2452704e+00 6.9728513e-01 1.8185955e+00 4.8135521e-01 1.2509218e+00 1.8049926e+00 5.8914551e-01 1.2205493e+00 4.2538717e-01 1.1912106e+00 4.6472023e-01 7.1840099e-01 8.9207714e-01 1.1017858e+00 1.1127329e+00 4.1586001e-01 7.8197925e-01 8.0326782e-01 5.7257017e-01 5.2655962e-01 4.3456114e-01 6.2660376e-01 4.8135521e-01 4.5581864e-01 1.3318128e+00 1.2468939e+00 1.3144065e+00 9.4935318e-01 6.6384020e-01 9.1075311e-01 3.2586371e-01 4.1449626e-01 1.0130748e+00 8.3280511e-01 1.0906119e+00 9.6204649e-01 3.4583729e-01 9.3296062e-01 1.7901543e+00 8.7170815e-01 7.3805807e-01 7.3805807e-01 5.2655962e-01 1.9041928e+00 8.1521713e-01 1.4138821e+00 7.3805807e-01 1.3166957e+00 9.2264612e-01 1.1533602e+00 2.0690479e+00 1.4700179e+00 1.7092525e+00 1.2231847e+00 1.5870088e+00 5.0592043e-01 7.5791688e-01 9.0575661e-01 9.1750357e-01 9.1802948e-01 8.1304731e-01 8.1638392e-01 2.1978861e+00 2.3802944e+00 1.1107977e+00 1.1386292e+00 7.9878917e-01 2.1900222e+00 6.1092863e-01 1.0480665e+00 1.4148192e+00 5.0991930e-01 3.6171588e-01 9.7825559e-01 1.2593659e+00 1.5912764e+00 2.0615043e+00 1.0056742e+00 5.6700421e-01 1.0137836e+00 1.7695175e+00 1.0597541e+00 8.0619006e-01 3.8934542e-01 8.6513410e-01 1.0755693e+00 8.4050231e-01 7.3805807e-01 1.2832075e+00 1.1947245e+00 8.0660588e-01 8.2105460e-01 5.9426792e-01 8.6983677e-01 5.3309112e-01 1.9083318e+00 6.7975091e-01 4.1449626e-01 1.2452704e+00 1.1763980e+00 1.6420607e+00 7.9016429e-01 1.9365498e+00 1.3172979e+00 1.0653845e+00 1.5684812e+00 8.1304731e-01 1.7169601e+00 1.2768639e+00 1.8804140e+00 1.6311692e+00 1.6315809e+00 1.8424891e+00 2.1489929e+00 2.2038673e+00 1.4613032e+00 8.0587320e-01 6.8961791e-01 6.4704320e-01 9.7949166e-01 1.9250543e+00 1.2802798e+00 1.5824669e+00 2.0485534e+00 1.5790055e+00 1.0078327e+00 8.2305664e-01 1.1498269e+00 1.5838351e+00 1.0137836e+00 1.2418578e-01 1.0230441e+00 1.1119327e+00 1.0941064e+00 1.4719311e+00 3.2816937e-01 1.0165138e+00 2.9338155e+00 1.9158303e+00 3.0455280e+00 2.4635485e+00 2.7430309e+00 3.7921012e+00 1.2632199e+00 3.4105293e+00 2.7619926e+00 3.3261421e+00 2.2045198e+00 2.2598424e+00 2.6204307e+00 1.8330979e+00 2.0701646e+00 2.3622531e+00 2.4525409e+00 3.9619101e+00 4.0743074e+00 1.8315269e+00 2.8475224e+00 1.7388184e+00 3.9054939e+00 1.9111264e+00 2.7377517e+00 3.1510494e+00 1.7964653e+00 1.8350071e+00 2.5277506e+00 2.9970778e+00 3.3196868e+00 3.8532018e+00 2.5453122e+00 2.0312250e+00 2.3887539e+00 3.5269824e+00 2.5986705e+00 2.4210417e+00 1.7228354e+00 2.6125646e+00 2.7062349e+00 2.4839132e+00 1.9158303e+00 2.9502077e+00 2.8139128e+00 2.4180244e+00 1.9947426e+00 2.2550764e+00 2.3956104e+00 1.9332869e+00 1.4468211e+00 1.8027242e+00 7.3851529e-01 9.0658670e-01 5.0180477e-01 1.2418578e+00 2.5651975e-01 1.0022010e+00 8.6361309e-01 7.3851529e-01 1.1055064e+00 7.8197925e-01 6.8961791e-01 4.8927739e-01 5.0270183e-01 3.2352160e-01 2.1269358e-01 2.5651975e-01 4.9857388e-01 6.0611244e-01 1.2633451e+00 1.2342162e+00 1.2794849e+00 9.3824087e-01 7.0776547e-01 1.2014753e+00 7.0429250e-01 2.5651975e-01 6.2482915e-01 1.0329901e+00 1.1593224e+00 1.1069580e+00 5.0180477e-01 8.9712482e-01 1.8394959e+00 1.0181000e+00 9.2095040e-01 9.2047746e-01 4.4417983e-01 1.9314297e+00 9.4103005e-01 1.6328100e+00 9.3238528e-01 1.3969297e+00 1.0389435e+00 1.3327491e+00 2.0961718e+00 1.7103548e+00 1.7405652e+00 1.2330392e+00 1.7474965e+00 7.7919451e-01 8.1810461e-01 1.0613462e+00 1.0417249e+00 1.2331989e+00 1.0974061e+00 9.4125538e-01 2.2568188e+00 2.4127176e+00 8.4050231e-01 1.3145067e+00 1.0960883e+00 2.1973666e+00 5.6075294e-01 1.2221471e+00 1.4467170e+00 5.7324170e-01 6.3977563e-01 1.1341579e+00 1.2436109e+00 1.5826476e+00 2.0476065e+00 1.1847335e+00 5.3665999e-01 1.0391247e+00 1.7521201e+00 1.3293211e+00 9.4492923e-01 6.9369532e-01 1.0019724e+00 1.3083079e+00 1.0406064e+00 9.3238528e-01 1.4580335e+00 1.4387122e+00 1.0576043e+00 7.0233835e-01 8.1304731e-01 1.1697902e+00 8.2372435e-01 7.6914805e-01 7.2823007e-01 8.7504951e-01 1.0611732e+00 4.5581864e-01 1.5204521e+00 6.6432544e-01 6.5172743e-01 1.0866092e+00 4.5470518e-01 1.0573285e+00 9.0074515e-01 1.3083079e+00 1.0613462e+00 1.2123540e+00 1.4190961e+00 1.6754036e+00 1.6596797e+00 8.9095811e-01 6.1947990e-01 4.2418962e-01 4.8927739e-01 6.0551856e-01 1.2951131e+00 6.2538346e-01 1.0030700e+00 1.5663312e+00 1.1396406e+00 4.5581864e-01 3.2816937e-01 5.3665999e-01 1.0166932e+00 6.0670504e-01 6.9167458e-01 4.4535192e-01 5.6075294e-01 5.3665999e-01 1.0182895e+00 9.1075311e-01 5.0991930e-01 2.2632657e+00 1.2601890e+00 2.4384530e+00 1.8269304e+00 2.0927845e+00 3.1870761e+00 6.4290921e-01 2.8096725e+00 2.1462316e+00 2.6900593e+00 1.5901181e+00 1.6364474e+00 2.0101738e+00 1.1729895e+00 1.4078246e+00 1.7083888e+00 1.8278268e+00 3.3435703e+00 3.4604677e+00 1.2331989e+00 2.2206574e+00 1.0719360e+00 3.3068858e+00 1.3163598e+00 2.0994872e+00 2.5539296e+00 1.1948578e+00 1.1991899e+00 1.8828324e+00 2.4245766e+00 2.7358293e+00 3.2702869e+00 1.8959565e+00 1.4312787e+00 1.7665622e+00 2.9527671e+00 1.9255490e+00 1.7860690e+00 1.0796583e+00 2.0205937e+00 2.0647798e+00 1.9168750e+00 1.2601890e+00 2.3069539e+00 2.1608869e+00 1.8128438e+00 1.3838212e+00 1.6376058e+00 1.7220696e+00 1.2768639e+00 1.2687651e+00 1.0344911e+00 1.5320003e+00 9.7779835e-01 1.8837258e+00 1.2979752e+00 1.0012667e+00 1.3957794e+00 7.2526325e-01 1.6850672e+00 1.2372418e+00 1.7004805e+00 1.4979666e+00 1.5611922e+00 1.7745022e+00 2.0153916e+00 2.0815027e+00 1.3830210e+00 8.1242502e-01 5.8914551e-01 5.7257017e-01 9.5676647e-01 1.7518264e+00 1.2724737e+00 1.6669115e+00 1.9675324e+00 1.4200435e+00 1.1136605e+00 7.1881659e-01 1.0072663e+00 1.5134954e+00 9.3238528e-01 3.2352160e-01 9.5222919e-01 1.1681971e+00 1.1043332e+00 1.4120836e+00 6.2205176e-01 1.0078327e+00 2.8028143e+00 1.7525933e+00 2.8815987e+00 2.2944257e+00 2.5825907e+00 3.6132031e+00 1.1179743e+00 3.2286633e+00 2.5673494e+00 3.2133201e+00 2.1127170e+00 2.0867931e+00 2.4721080e+00 1.6626615e+00 1.9456450e+00 2.2607446e+00 2.2983453e+00 3.8335668e+00 3.8818411e+00 1.6299374e+00 2.7127377e+00 1.6132118e+00 3.7182722e+00 1.7559391e+00 2.6123294e+00 2.9966900e+00 1.6625128e+00 1.7303440e+00 2.3560577e+00 2.8340159e+00 3.1407514e+00 3.7366777e+00 2.3765195e+00 1.8701780e+00 2.1933937e+00 3.3657099e+00 2.5066503e+00 2.2797241e+00 1.6331631e+00 2.4808010e+00 2.5698271e+00 2.3770285e+00 1.7525933e+00 2.8053367e+00 2.6963680e+00 2.2934334e+00 1.8202060e+00 2.1229819e+00 2.3231793e+00 1.8122257e+00 8.5462626e-01 5.0991930e-01 6.2538346e-01 8.0358695e-01 3.7255734e-01 5.3022554e-01 8.2105460e-01 6.0900723e-01 6.2482915e-01 3.0844217e-01 7.9580667e-01 5.4292906e-01 5.0991930e-01 7.0437330e-01 9.7270522e-01 9.9532071e-01 3.0546431e-01 7.9878917e-01 7.2343175e-01 7.8768770e-01 4.2418962e-01 9.0876485e-01 5.2862779e-01 4.4651726e-01 8.5205778e-01 7.4164639e-01 3.2586371e-01 5.7867728e-01 5.3309112e-01 4.1449626e-01 4.5581864e-01 1.2131545e+00 3.8776762e-01 3.2352160e-01 2.5251796e-01 3.2816937e-01 1.3221405e+00 2.8507955e-01 1.8873850e+00 9.2859317e-01 1.8730683e+00 1.4111029e+00 1.6550480e+00 2.6320302e+00 1.0406064e+00 2.2657813e+00 1.6658308e+00 2.1339968e+00 1.0072663e+00 1.1444449e+00 1.4417207e+00 8.9971984e-01 1.1192426e+00 1.2342162e+00 1.3367840e+00 2.7726042e+00 2.9277580e+00 9.9368623e-01 1.6694974e+00 7.8197925e-01 2.7493700e+00 7.5976039e-01 1.5849874e+00 1.9802196e+00 6.4290921e-01 7.1799256e-01 1.4445746e+00 1.8216794e+00 2.1462316e+00 2.6367554e+00 1.4616539e+00 9.2264612e-01 1.4088394e+00 2.3235034e+00 1.5120955e+00 1.3224963e+00 6.2024833e-01 1.4078246e+00 1.5587730e+00 1.2801437e+00 9.2859317e-01 1.8096161e+00 1.6710566e+00 1.2378278e+00 8.9653332e-01 1.0864449e+00 1.3071453e+00 9.0827783e-01 8.9159388e-01 7.7763126e-01 1.0417249e+00 9.1802948e-01 5.0905001e-01 6.2605182e-01 4.4651726e-01 1.2379511e+00 6.2024833e-01 9.5571254e-01 8.1558458e-01 7.5976039e-01 9.2944046e-01 1.0661822e+00 1.2662457e+00 8.2342214e-01 5.8851328e-01 5.1691876e-01 5.3588338e-01 5.1691876e-01 1.1717125e+00 9.6838716e-01 1.2601890e+00 1.1258723e+00 4.8135521e-01 8.3649708e-01 5.5419992e-01 6.2407309e-01 9.0965328e-01 4.2538717e-01 1.0906388e+00 5.9426792e-01 8.1638392e-01 7.3145860e-01 7.3145860e-01 1.1880428e+00 6.3861009e-01 2.2927296e+00 1.2766755e+00 2.1156916e+00 1.6869465e+00 1.9835684e+00 2.8209672e+00 1.2028939e+00 2.4458200e+00 1.8683030e+00 2.5149752e+00 1.4746001e+00 1.4371043e+00 1.7501772e+00 1.2500343e+00 1.5991931e+00 1.7211928e+00 1.6271057e+00 3.0580852e+00 3.1168283e+00 1.0328064e+00 2.0203543e+00 1.2344562e+00 2.9146252e+00 1.0941064e+00 1.9515265e+00 2.2002582e+00 1.0531192e+00 1.1790011e+00 1.7600233e+00 1.9895190e+00 2.3106402e+00 2.8876509e+00 1.7983401e+00 1.1763719e+00 1.6118154e+00 2.5100676e+00 2.0039716e+00 1.6449456e+00 1.1276917e+00 1.7245185e+00 1.9495298e+00 1.6638124e+00 1.2766755e+00 2.1512175e+00 2.1034605e+00 1.6449189e+00 1.1924295e+00 1.4645804e+00 1.8408873e+00 1.3037063e+00 1.1269972e+00 6.2482915e-01 5.0991930e-01 6.6827038e-01 7.0479928e-01 8.8358844e-01 4.5581864e-01 7.0086313e-01 4.2667565e-01 2.0656129e-01 4.4535192e-01 5.2942799e-01 7.0086313e-01 6.3861009e-01 2.1269358e-01 1.2261087e+00 1.0119857e+00 1.1001291e+00 8.1638392e-01 4.2667565e-01 7.0479928e-01 5.1691876e-01 6.0611244e-01 6.2538346e-01 6.9006418e-01 8.3812833e-01 6.4290921e-01 1.2418578e-01 7.3145860e-01 1.6044563e+00 6.2660376e-01 5.7324170e-01 5.6700421e-01 4.0293660e-01 1.7981158e+00 6.4806901e-01 1.5090287e+00 5.9426792e-01 1.4258804e+00 9.2264612e-01 1.2221471e+00 2.1613095e+00 1.2165505e+00 1.7814077e+00 1.1712156e+00 1.7475837e+00 7.0233835e-01 7.0776547e-01 1.0386594e+00 7.0233835e-01 1.0228981e+00 9.8450810e-01 8.5105559e-01 2.3257048e+00 2.4547248e+00 7.1504098e-01 1.2838690e+00 6.9369532e-01 2.2753334e+00 4.3691963e-01 1.1508502e+00 1.5109753e+00 4.0438741e-01 4.1449626e-01 1.0168833e+00 1.3670543e+00 1.6898941e+00 2.2253038e+00 1.0655560e+00 4.1586001e-01 9.0827783e-01 1.9262937e+00 1.2075315e+00 8.3888121e-01 4.0438741e-01 1.0401425e+00 1.2250414e+00 1.0755693e+00 5.9426792e-01 1.3866792e+00 1.3487634e+00 1.0056742e+00 5.9426792e-01 7.2526325e-01 1.0425476e+00 5.0592043e-01 1.2125198e+00 9.0252542e-01 5.4292906e-01 1.0679144e+00 4.5470518e-01 1.2307737e+00 5.6700421e-01 1.3629833e+00 1.1271488e+00 9.3592296e-01 1.1329323e+00 1.4903933e+00 1.5826638e+00 9.2264612e-01 3.7598397e-01 5.1691876e-01 5.3022554e-01 3.4583729e-01 1.5102079e+00 9.0478973e-01 9.6664346e-01 1.3678655e+00 1.0012667e+00 5.0090417e-01 4.9766035e-01 8.1130291e-01 1.0331736e+00 4.5581864e-01 7.6955924e-01 6.0551856e-01 6.0181382e-01 6.0060595e-01 8.1242502e-01 7.2852070e-01 5.0180477e-01 2.4944334e+00 1.5259640e+00 2.4897635e+00 2.0286682e+00 2.2758462e+00 3.2467454e+00 1.0417249e+00 2.8819633e+00 2.2804674e+00 2.7472449e+00 1.6234861e+00 1.7644184e+00 2.0587064e+00 1.4533724e+00 1.6559784e+00 1.8347926e+00 1.9605308e+00 3.3855928e+00 3.5452222e+00 1.4540815e+00 2.2852232e+00 1.3536716e+00 3.3617477e+00 1.3717027e+00 2.2096171e+00 2.5931780e+00 1.2603076e+00 1.3371180e+00 2.0643410e+00 2.4233813e+00 2.7521037e+00 3.2246201e+00 2.0784533e+00 1.5405106e+00 2.0088441e+00 2.9099860e+00 2.1140685e+00 1.9448322e+00 1.2330392e+00 2.0122105e+00 2.1687358e+00 1.8353933e+00 1.5259640e+00 2.4321505e+00 2.2783778e+00 1.8251179e+00 1.4795374e+00 1.7068208e+00 1.9049236e+00 1.5165339e+00 1.1004794e+00 9.4753140e-01 9.4125538e-01 1.1763719e+00 8.5105559e-01 6.6432544e-01 7.2526325e-01 6.4290921e-01 3.2816937e-01 1.2418578e-01 4.4535192e-01 6.2024833e-01 7.0479928e-01 1.2172454e+00 1.3021788e+00 1.3289150e+00 9.6141901e-01 8.9366705e-01 1.3003320e+00 7.1840099e-01 3.0275928e-01 8.2654509e-01 1.1056650e+00 1.2497790e+00 1.2234738e+00 6.0611244e-01 9.6141901e-01 1.8661545e+00 1.1149070e+00 1.0038051e+00 1.0038051e+00 5.0991930e-01 1.8886923e+00 1.0132664e+00 1.7427900e+00 1.0573285e+00 1.5462225e+00 1.2230220e+00 1.4700179e+00 2.2554582e+00 1.8183902e+00 1.9191337e+00 1.4359851e+00 1.8399871e+00 8.1558458e-01 9.6664346e-01 1.1754055e+00 1.1548215e+00 1.2523175e+00 1.1229906e+00 1.1149070e+00 2.3868096e+00 2.5734684e+00 1.0665149e+00 1.4148192e+00 1.1763719e+00 2.3591336e+00 6.6317860e-01 1.3545005e+00 1.6178623e+00 6.3735887e-01 7.2526325e-01 1.2709820e+00 1.4169523e+00 1.7425222e+00 2.1449779e+00 1.3015611e+00 7.4777660e-01 1.2601890e+00 1.8513630e+00 1.3897316e+00 1.1185330e+00 7.6625946e-01 1.0919712e+00 1.3783420e+00 1.0120221e+00 1.0573285e+00 1.5860263e+00 1.5022608e+00 1.0597541e+00 8.3060013e-01 8.9095811e-01 1.2107055e+00 9.5498315e-01 5.9426792e-01 8.8835966e-01 7.2486328e-01 4.3456114e-01 6.3108414e-01 7.9580667e-01 5.4292906e-01 8.0619006e-01 1.0003942e+00 1.2057554e+00 1.1282371e+00 4.0147421e-01 1.0482443e+00 8.3812833e-01 9.3049742e-01 6.4290921e-01 6.6432544e-01 2.0000000e-01 4.9766035e-01 1.1016264e+00 8.7202528e-01 4.1312257e-01 6.2660376e-01 4.4651726e-01 5.0180477e-01 5.9426792e-01 1.3172979e+00 3.8776762e-01 3.7427929e-01 3.2816937e-01 6.1151102e-01 1.5338492e+00 4.2667565e-01 1.6536633e+00 6.6827038e-01 1.8195408e+00 1.1798960e+00 1.4588731e+00 2.5552364e+00 7.7039952e-01 2.1764356e+00 1.5179392e+00 2.0659196e+00 1.0072663e+00 1.0165138e+00 1.4077317e+00 7.0523271e-01 9.7441804e-01 1.1290808e+00 1.1879078e+00 2.7003420e+00 2.8251568e+00 8.7478495e-01 1.6113870e+00 5.7257017e-01 2.6756977e+00 7.5976039e-01 1.4611141e+00 1.9290721e+00 6.4290921e-01 5.8851328e-01 1.2509218e+00 1.8216794e+00 2.1226924e+00 2.6556584e+00 1.2741904e+00 8.1527569e-01 1.1396406e+00 2.3658814e+00 1.3219975e+00 1.1377990e+00 4.8036801e-01 1.4418088e+00 1.4695582e+00 1.4097125e+00 6.6827038e-01 1.6762567e+00 1.5624022e+00 1.2731262e+00 8.4812820e-01 1.0423677e+00 1.1235486e+00 6.3808075e-01 7.0328431e-01 2.8507955e-01 9.7377870e-01 3.7598397e-01 8.9971984e-01 6.2538346e-01 6.2988288e-01 8.4591037e-01 1.1042097e+00 1.1949615e+00 5.7867728e-01 6.0121055e-01 4.2418962e-01 4.8036801e-01 2.4837156e-01 1.0588560e+00 6.3735887e-01 8.4050231e-01 1.0216438e+00 6.0900723e-01 3.8776762e-01 3.8934542e-01 3.8934542e-01 6.0900723e-01 2.1269358e-01 1.0100718e+00 3.2586371e-01 3.2816937e-01 3.2816937e-01 4.6472023e-01 1.1765359e+00 3.0546431e-01 2.1603815e+00 1.1833480e+00 2.0696037e+00 1.5733646e+00 1.8844302e+00 2.7913211e+00 1.0279631e+00 2.4069427e+00 1.8096161e+00 2.4013270e+00 1.3194762e+00 1.3641156e+00 1.6852518e+00 1.1847335e+00 1.5344133e+00 1.5901181e+00 1.5133392e+00 2.9601125e+00 3.0929882e+00 9.7994716e-01 1.9359434e+00 1.1341579e+00 2.8969791e+00 1.0267435e+00 1.8174459e+00 2.1353579e+00 9.5498315e-01 1.0067464e+00 1.6752254e+00 1.9600024e+00 2.3015655e+00 2.8161147e+00 1.7177705e+00 1.0636401e+00 1.5095556e+00 2.5229584e+00 1.8388413e+00 1.5016471e+00 9.4558103e-01 1.6620056e+00 1.8661202e+00 1.6246433e+00 1.1833480e+00 2.0524784e+00 1.9917352e+00 1.5896248e+00 1.1449732e+00 1.3635198e+00 1.6539414e+00 1.1377990e+00 7.8695083e-01 1.0194752e+00 6.9369532e-01 4.4535192e-01 6.2538346e-01 7.1169738e-01 8.2684479e-01 7.5791688e-01 8.9971984e-01 7.0394675e-01 1.0777972e+00 8.9366705e-01 9.7548738e-01 7.3805807e-01 6.9369532e-01 9.9348625e-01 1.2013436e+00 9.4287188e-01 2.1845981e-01 9.1163729e-01 7.8197925e-01 7.4777660e-01 8.0096515e-01 6.3735887e-01 1.5043029e+00 7.0776547e-01 8.7021234e-01 7.8197925e-01 7.0784540e-01 1.6629594e+00 7.2852070e-01 1.7521201e+00 7.5705927e-01 1.5812904e+00 1.1798960e+00 1.4306494e+00 2.2994849e+00 1.3047221e+00 1.9342059e+00 1.3259654e+00 2.0165210e+00 1.0919404e+00 8.7720955e-01 1.2180145e+00 7.1881659e-01 1.0466623e+00 1.2389598e+00 1.1426203e+00 2.5872805e+00 2.5766735e+00 5.0817745e-01 1.4924169e+00 8.3060013e-01 2.3982377e+00 5.8914551e-01 1.4728952e+00 1.7209381e+00 6.3808075e-01 8.3649708e-01 1.1916257e+00 1.5183917e+00 1.7983401e+00 2.4620092e+00 1.2174316e+00 7.4549115e-01 1.1136343e+00 1.9965599e+00 1.5255331e+00 1.1891470e+00 8.2384013e-01 1.2304904e+00 1.3937115e+00 1.1842231e+00 7.5705927e-01 1.6132118e+00 1.5725854e+00 1.1127329e+00 5.8914551e-01 9.8054887e-01 1.4089719e+00 9.0521488e-01 1.1043332e+00 5.3665999e-01 1.1040512e+00 8.6137722e-01 8.5335130e-01 1.0692258e+00 1.3395518e+00 1.4121163e+00 7.2343175e-01 4.0438741e-01 1.4096146e-01 2.1845981e-01 2.5251796e-01 1.2340567e+00 7.2852070e-01 1.0216438e+00 1.2599182e+00 7.7360126e-01 5.1607523e-01 2.1269358e-01 5.0270183e-01 8.3345577e-01 2.1845981e-01 7.4893123e-01 3.4378533e-01 5.3022554e-01 4.5581864e-01 6.9167458e-01 9.4080461e-01 3.4583729e-01 2.3056888e+00 1.2961380e+00 2.2790932e+00 1.7645599e+00 2.0520955e+00 3.0186066e+00 8.9827435e-01 2.6386155e+00 2.0191749e+00 2.5992685e+00 1.4843324e+00 1.5325189e+00 1.8691652e+00 1.2554784e+00 1.5582387e+00 1.7070813e+00 1.7171798e+00 3.2008583e+00 3.3121677e+00 1.1313840e+00 2.1147926e+00 1.1879078e+00 3.1280700e+00 1.1681971e+00 2.0145868e+00 2.3728666e+00 1.0720678e+00 1.1437669e+00 1.8347926e+00 2.2027051e+00 2.5315934e+00 3.0688850e+00 1.8636112e+00 1.2768639e+00 1.7126039e+00 2.7381221e+00 1.9739212e+00 1.7039473e+00 1.0573285e+00 1.8507968e+00 2.0095044e+00 1.7591313e+00 1.2961380e+00 2.2339736e+00 2.1358764e+00 1.7106141e+00 1.2731262e+00 1.5241199e+00 1.7828037e+00 1.2869134e+00 8.7720955e-01 7.4777660e-01 6.5223271e-01 7.1881659e-01 7.6914805e-01 9.3999899e-01 8.0619006e-01 4.2418962e-01 1.4104707e+00 1.2144845e+00 1.3128167e+00 1.0056742e+00 5.3665999e-01 5.6075294e-01 3.4583729e-01 8.1130291e-01 9.7730901e-01 7.8197925e-01 9.9089002e-01 8.0353565e-01 4.3691963e-01 9.6095130e-01 1.7110336e+00 7.7033318e-01 7.5196795e-01 7.0776547e-01 6.5648056e-01 1.8915404e+00 7.9878917e-01 1.2730931e+00 5.3022554e-01 1.4349259e+00 8.3620494e-01 1.0733200e+00 2.1767273e+00 1.0960883e+00 1.8048569e+00 1.2035173e+00 1.6539414e+00 6.2482915e-01 7.0523271e-01 1.0184370e+00 7.1169738e-01 6.6432544e-01 7.0480730e-01 8.1527569e-01 2.3116343e+00 2.4505705e+00 1.0085601e+00 1.2063335e+00 4.5581864e-01 2.3022338e+00 5.6700421e-01 1.0655560e+00 1.5550492e+00 4.4417983e-01 2.5251796e-01 8.8358844e-01 1.4559276e+00 1.7532140e+00 2.2755980e+00 8.9653332e-01 5.5160819e-01 9.1163729e-01 1.9862884e+00 9.1163729e-01 7.6752131e-01 2.0656129e-01 1.0632598e+00 1.0516761e+00 1.0391247e+00 5.3022554e-01 1.2756158e+00 1.1406052e+00 8.7720955e-01 7.3851529e-01 6.5633874e-01 7.0776547e-01 3.2352160e-01 9.1273187e-01 7.0043186e-01 3.7427929e-01 5.7324170e-01 9.3615100e-01 1.0733200e+00 5.0991930e-01 5.9426792e-01 6.5633874e-01 6.7975091e-01 3.0811765e-01 1.1056650e+00 7.7360126e-01 7.0429250e-01 8.2552685e-01 5.7257017e-01 5.0905001e-01 6.1968386e-01 6.5223271e-01 6.0611244e-01 3.2586371e-01 1.2028939e+00 5.0905001e-01 4.2667565e-01 4.1449626e-01 3.0546431e-01 1.2470767e+00 4.0147421e-01 2.1213832e+00 1.1521791e+00 2.0070710e+00 1.6126950e+00 1.8637576e+00 2.7490677e+00 1.2370832e+00 2.3910690e+00 1.8274132e+00 2.3004229e+00 1.1979861e+00 1.3368881e+00 1.5972416e+00 1.1093572e+00 1.3693737e+00 1.4644753e+00 1.5211725e+00 2.9013543e+00 3.0559175e+00 1.0590298e+00 1.8374244e+00 1.0423677e+00 2.8588399e+00 9.4309624e-01 1.7735968e+00 2.0979142e+00 8.5205778e-01 9.4287188e-01 1.6546836e+00 1.9109434e+00 2.2424413e+00 2.7118839e+00 1.6774684e+00 1.1029298e+00 1.6007141e+00 2.3898698e+00 1.7557336e+00 1.5191033e+00 8.5462626e-01 1.5344007e+00 1.7571295e+00 1.3898545e+00 1.1521791e+00 1.9987470e+00 1.8815752e+00 1.4085850e+00 1.0646687e+00 1.2740417e+00 1.5577803e+00 1.1296247e+00 4.0176783e-01 6.5223271e-01 6.3977563e-01 5.3022554e-01 5.7324170e-01 5.2574978e-01 1.4438552e+00 1.2221471e+00 1.3131724e+00 1.0406064e+00 3.4583729e-01 9.5943875e-01 9.2859317e-01 6.5172743e-01 5.1607523e-01 9.7548738e-01 1.0611732e+00 8.6137722e-01 5.3665999e-01 9.4854455e-01 1.8311457e+00 8.7420176e-01 8.7170815e-01 8.4050231e-01 6.5223271e-01 2.0303919e+00 8.9917007e-01 1.3866318e+00 5.7867728e-01 1.2002762e+00 7.4740267e-01 1.0440187e+00 1.9213397e+00 1.4087466e+00 1.5433565e+00 9.2836103e-01 1.6392533e+00 7.7360126e-01 5.0592043e-01 8.5585239e-01 6.8961791e-01 9.5035453e-01 9.5498315e-01 7.0776547e-01 2.1812146e+00 2.2081369e+00 3.7427929e-01 1.1335961e+00 7.7885297e-01 2.0291151e+00 3.2352160e-01 1.0661822e+00 1.3165513e+00 3.7598397e-01 5.3588338e-01 8.2305664e-01 1.1437730e+00 1.4415965e+00 2.0902718e+00 8.7848692e-01 3.2352160e-01 7.0479928e-01 1.6865203e+00 1.1904611e+00 7.5791688e-01 5.5492130e-01 8.9207714e-01 1.0805899e+00 9.6271042e-01 5.7867728e-01 1.2256881e+00 1.2481462e+00 8.8358844e-01 4.0147421e-01 6.4405773e-01 1.0887986e+00 5.9426792e-01 4.4651726e-01 5.4292906e-01 7.0437330e-01 7.0776547e-01 3.2816937e-01 1.2134101e+00 9.8820253e-01 1.0733200e+00 8.1099042e-01 4.9857388e-01 7.2172678e-01 6.5223271e-01 6.3808075e-01 5.3665999e-01 6.9369532e-01 8.2305664e-01 6.2482915e-01 2.5251796e-01 7.1799256e-01 1.5895397e+00 6.2205176e-01 5.7257017e-01 5.6769031e-01 4.0438741e-01 1.7935777e+00 6.4755655e-01 1.6267976e+00 7.4777660e-01 1.4807336e+00 9.7270522e-01 1.3173487e+00 2.1839601e+00 1.2277129e+00 1.7938033e+00 1.1948932e+00 1.8448199e+00 8.7383925e-01 8.2305664e-01 1.1418127e+00 8.4591037e-01 1.2153720e+00 1.1640914e+00 9.1163729e-01 2.3638833e+00 2.4815883e+00 6.3861009e-01 1.3946921e+00 8.5434758e-01 2.2902807e+00 6.1151102e-01 1.2452704e+00 1.5325394e+00 6.0121055e-01 6.1092863e-01 1.1228379e+00 1.3752705e+00 1.7103060e+00 2.2560685e+00 1.1879078e+00 4.5470518e-01 9.0454394e-01 1.9729066e+00 1.3650300e+00 9.0521488e-01 6.0670504e-01 1.1454006e+00 1.3674559e+00 1.2262704e+00 7.4777660e-01 1.4820085e+00 1.4947429e+00 1.1729895e+00 7.3145860e-01 8.7720955e-01 1.2163831e+00 6.5633874e-01 2.1845981e-01 5.6769031e-01 7.4777660e-01 4.2538717e-01 9.5099818e-01 9.7994716e-01 1.0119857e+00 6.5223271e-01 8.3888121e-01 1.0038051e+00 5.9426792e-01 4.6472023e-01 6.0121055e-01 8.0326782e-01 9.2836103e-01 9.0876485e-01 3.7598397e-01 6.3861009e-01 1.5602029e+00 8.0326782e-01 7.0129382e-01 7.0043186e-01 2.0000000e-01 1.6208239e+00 7.0437330e-01 1.8619092e+00 9.6271042e-01 1.6849072e+00 1.3188999e+00 1.5860263e+00 2.4084158e+00 1.5142414e+00 2.0543079e+00 1.5230852e+00 1.9980352e+00 9.4375082e-01 1.0588560e+00 1.3035649e+00 1.0035600e+00 1.2499342e+00 1.2459608e+00 1.2225634e+00 2.5586145e+00 2.7210925e+00 8.9366705e-01 1.5500052e+00 1.0014633e+00 2.5139485e+00 6.9369532e-01 1.4790710e+00 1.7580510e+00 6.2660376e-01 7.0429250e-01 1.3804167e+00 1.5625881e+00 1.8966943e+00 2.3518757e+00 1.4139741e+00 8.0358695e-01 1.3075101e+00 2.0455018e+00 1.5153654e+00 1.2234738e+00 6.6539428e-01 1.2342162e+00 1.5049644e+00 1.1591754e+00 9.6271042e-01 1.7107332e+00 1.6328100e+00 1.1880428e+00 8.3812833e-01 1.0106392e+00 1.3267389e+00 8.9767734e-01 4.2538717e-01 6.2024833e-01 6.0181382e-01 1.1431021e+00 1.1948932e+00 1.2269747e+00 8.6361309e-01 8.2552685e-01 1.2002640e+00 6.5223271e-01 3.0811765e-01 7.1462831e-01 1.0067784e+00 1.1396406e+00 1.1147518e+00 5.0817745e-01 8.5335130e-01 1.7711504e+00 1.0085601e+00 9.0454394e-01 9.0277242e-01 4.0438741e-01 1.8140813e+00 9.1075311e-01 1.7412567e+00 9.8054887e-01 1.5527694e+00 1.2155004e+00 1.4692412e+00 2.2702600e+00 1.7126039e+00 1.9279661e+00 1.4238090e+00 1.8539569e+00 8.1558458e-01 9.5035453e-01 1.1763980e+00 1.0621081e+00 1.2047214e+00 1.1205013e+00 1.1134787e+00 2.4108292e+00 2.5851693e+00 9.6095130e-01 1.4178113e+00 1.0879524e+00 2.3751496e+00 6.0900723e-01 1.3570688e+00 1.6277043e+00 5.7015910e-01 6.6491075e-01 1.2652657e+00 1.4292566e+00 1.7566567e+00 2.1846001e+00 1.2961380e+00 7.1840099e-01 1.2329148e+00 1.8795815e+00 1.3897316e+00 1.1149070e+00 6.8757066e-01 1.0960883e+00 1.3785366e+00 1.0168833e+00 9.8054887e-01 1.5871961e+00 1.5043071e+00 1.0597541e+00 7.7033318e-01 8.8861541e-01 1.2058675e+00 8.9134001e-01 3.4583729e-01 8.1130291e-01 1.5090287e+00 1.4644753e+00 1.5150043e+00 1.1847335e+00 8.1385214e-01 1.4041085e+00 8.9917007e-01 3.0811765e-01 6.6539428e-01 1.2643026e+00 1.3836712e+00 1.3112758e+00 7.0784540e-01 1.1353806e+00 2.0777059e+00 1.2396136e+00 1.1498269e+00 1.1474460e+00 6.9006418e-01 2.1775976e+00 1.1752673e+00 1.4613032e+00 1.0391247e+00 1.1810170e+00 8.7504951e-01 1.1390131e+00 1.8670836e+00 1.9048338e+00 1.5205305e+00 1.0228981e+00 1.5676403e+00 6.7975091e-01 6.6539428e-01 8.7175869e-01 1.1533602e+00 1.2459608e+00 9.7730901e-01 7.5082357e-01 2.0536508e+00 2.1838261e+00 8.9095811e-01 1.1306949e+00 1.2394907e+00 1.9665910e+00 5.6769031e-01 1.0425476e+00 1.2324706e+00 6.4704320e-01 7.3851529e-01 9.5476489e-01 1.0198386e+00 1.3510699e+00 1.8411319e+00 1.0100718e+00 5.2942799e-01 9.3801395e-01 1.5112621e+00 1.2002762e+00 7.7763126e-01 8.2899253e-01 8.2305664e-01 1.1377990e+00 9.1663180e-01 1.0391247e+00 1.2653669e+00 1.2757312e+00 9.2288144e-01 6.4405773e-01 6.6827038e-01 1.0851476e+00 9.3048953e-01 7.7074935e-01 1.6569692e+00 1.5391185e+00 1.6134578e+00 1.2794849e+00 7.1504098e-01 1.3197776e+00 7.9613242e-01 3.2586371e-01 8.6165877e-01 1.2653669e+00 1.4028652e+00 1.2701139e+00 6.6384020e-01 1.2172454e+00 2.1489775e+00 1.2262704e+00 1.1578646e+00 1.1452867e+00 7.9613242e-01 2.2808589e+00 1.1959482e+00 1.1500393e+00 9.1075311e-01 9.3999899e-01 6.4806901e-01 8.5434758e-01 1.6806723e+00 1.8184542e+00 1.3334814e+00 8.5205778e-01 1.2702636e+00 3.4583729e-01 4.3456114e-01 5.6700421e-01 1.0389435e+00 1.0122141e+00 6.4290921e-01 5.0905001e-01 1.8419636e+00 1.9902374e+00 9.3801395e-01 8.1810461e-01 1.1069580e+00 1.7940242e+00 4.4651726e-01 7.4740267e-01 1.0346741e+00 5.1691876e-01 6.0121055e-01 6.6827038e-01 8.5205778e-01 1.1776640e+00 1.6778021e+00 7.0776547e-01 4.2667565e-01 7.8695083e-01 1.3400806e+00 8.6165877e-01 5.3022554e-01 7.0437330e-01 5.0592043e-01 8.1273630e-01 6.0670504e-01 9.1075311e-01 9.7270522e-01 9.4352681e-01 6.0551856e-01 5.7257017e-01 3.4378533e-01 7.5705927e-01 8.0064372e-01 1.0450018e+00 8.4812820e-01 9.3847194e-01 6.2988288e-01 6.0611244e-01 6.0060595e-01 5.0090417e-01 7.0784540e-01 6.2538346e-01 5.0592043e-01 6.6932542e-01 5.5492130e-01 1.5422108e-01 5.6075294e-01 1.4235605e+00 4.6472023e-01 4.2418962e-01 3.8776762e-01 2.8192292e-01 1.5978583e+00 4.5581864e-01 1.6256459e+00 6.5633874e-01 1.5986180e+00 1.1106412e+00 1.3708966e+00 2.3519748e+00 1.1147518e+00 1.9798165e+00 1.3654173e+00 1.8856245e+00 7.7033318e-01 8.5335130e-01 1.1771643e+00 6.8076724e-01 9.7270522e-01 1.0165138e+00 1.0391247e+00 2.5081056e+00 2.6437138e+00 7.6716823e-01 1.4120836e+00 6.1947990e-01 2.4692682e+00 4.8927739e-01 1.3077572e+00 1.7030709e+00 3.8934542e-01 4.4651726e-01 1.1594648e+00 1.5551984e+00 1.8760773e+00 2.3977345e+00 1.1868139e+00 6.2024833e-01 1.1056650e+00 2.0821572e+00 1.2794849e+00 1.0244319e+00 3.7427929e-01 1.1646003e+00 1.3139135e+00 1.1119327e+00 6.5633874e-01 1.5344007e+00 1.4333755e+00 1.0386594e+00 6.3735887e-01 8.2372435e-01 1.0901359e+00 6.2081167e-01 3.4583729e-01 2.8192292e-01 4.1586001e-01 1.6237100e+00 1.0540105e+00 1.1816401e+00 1.4110536e+00 9.8450810e-01 6.6432544e-01 5.3665999e-01 9.0454394e-01 1.1389644e+00 5.0905001e-01 7.1799256e-01 7.1504098e-01 7.3813096e-01 7.2783368e-01 8.7021234e-01 6.9006418e-01 6.2482915e-01 2.6619364e+00 1.6754669e+00 2.5821869e+00 2.1421834e+00 2.4107926e+00 3.3209792e+00 1.2036484e+00 2.9531363e+00 2.3720162e+00 2.8824828e+00 1.7671769e+00 1.8842354e+00 2.1714345e+00 1.6176764e+00 1.8723516e+00 2.0134817e+00 2.0676751e+00 3.4808493e+00 3.6264553e+00 1.5230852e+00 2.4135751e+00 1.5351194e+00 3.4281547e+00 1.4948868e+00 2.3378347e+00 2.6680945e+00 1.3977032e+00 1.4832928e+00 2.1976523e+00 2.4795532e+00 2.8157449e+00 3.2956170e+00 2.2214438e+00 1.6336229e+00 2.1064881e+00 2.9775369e+00 2.2994849e+00 2.0603946e+00 1.3916739e+00 2.1189748e+00 2.3204945e+00 1.9672068e+00 1.6754669e+00 2.5635110e+00 2.4436082e+00 1.9753271e+00 1.6077195e+00 1.8374244e+00 2.0981613e+00 1.6585806e+00 1.2418578e-01 3.7598397e-01 1.3405045e+00 8.3812833e-01 1.1437669e+00 1.3915412e+00 8.9095811e-01 6.2538346e-01 2.5251796e-01 6.0611244e-01 9.6791960e-01 3.4583729e-01 6.2205176e-01 4.5581864e-01 6.5223271e-01 5.7867728e-01 8.2619017e-01 8.2654509e-01 4.6472023e-01 2.4061696e+00 1.3868130e+00 2.3985329e+00 1.8751958e+00 2.1591630e+00 3.1397173e+00 8.9852394e-01 2.7599154e+00 2.1335771e+00 2.7193241e+00 1.6021202e+00 1.6415861e+00 1.9851797e+00 1.3336069e+00 1.6224878e+00 1.8082080e+00 1.8350829e+00 3.3292261e+00 3.4297053e+00 1.2340567e+00 2.2298076e+00 1.2654843e+00 3.2489933e+00 1.2770118e+00 2.1337366e+00 2.4988032e+00 1.1786349e+00 1.2545301e+00 1.9393053e+00 2.3288114e+00 2.6536325e+00 3.2010862e+00 1.9648876e+00 1.3964978e+00 1.8188234e+00 2.8588023e+00 2.0766308e+00 1.8216743e+00 1.1634384e+00 1.9698860e+00 2.1147926e+00 1.8660999e+00 1.3868130e+00 2.3472906e+00 2.2417376e+00 1.8131734e+00 1.3752391e+00 1.6372749e+00 1.8856245e+00 1.3922071e+00 4.0176783e-01 1.4467170e+00 9.3049742e-01 1.2002762e+00 1.4411886e+00 9.4375082e-01 6.6432544e-01 3.7427929e-01 7.0784540e-01 1.0466623e+00 4.0176783e-01 5.6700421e-01 5.5492130e-01 6.9728513e-01 6.4405773e-01 8.7170815e-01 7.3535471e-01 5.3309112e-01 2.5193321e+00 1.5039793e+00 2.4895662e+00 1.9791576e+00 2.2673478e+00 3.2268480e+00 1.0014633e+00 2.8466240e+00 2.2303173e+00 2.8133325e+00 1.6962564e+00 1.7458338e+00 2.0805039e+00 1.4552205e+00 1.7454671e+00 1.9165907e+00 1.9332869e+00 3.4131779e+00 3.5208177e+00 1.3379696e+00 2.3275025e+00 1.3866044e+00 3.3340853e+00 1.3784233e+00 2.2317775e+00 2.5822073e+00 1.2828332e+00 1.3595018e+00 2.0485193e+00 2.4057315e+00 2.7355566e+00 3.2722484e+00 2.0762069e+00 1.4905436e+00 1.9191337e+00 2.9375895e+00 2.1864773e+00 1.9213461e+00 1.2702636e+00 2.0582667e+00 2.2206505e+00 1.9521784e+00 1.5039793e+00 2.4497583e+00 2.3480580e+00 1.9124077e+00 1.4817438e+00 1.7372199e+00 1.9937367e+00 1.5016471e+00 1.2122249e+00 6.7975091e-01 8.4050231e-01 1.0796583e+00 6.6539428e-01 3.4583729e-01 3.2816937e-01 5.2942799e-01 7.3145860e-01 1.2418578e-01 9.1163729e-01 3.2586371e-01 3.7427929e-01 3.2816937e-01 5.0592043e-01 1.0122141e+00 2.1845981e-01 2.2481791e+00 1.2631020e+00 2.1874158e+00 1.7295419e+00 1.9965608e+00 2.9333950e+00 1.0072663e+00 2.5632712e+00 1.9670002e+00 2.4858980e+00 1.3655398e+00 1.4724669e+00 1.7716601e+00 1.2125198e+00 1.4903113e+00 1.6105641e+00 1.6572339e+00 3.0940465e+00 3.2344170e+00 1.1332978e+00 2.0129643e+00 1.1341579e+00 3.0445772e+00 1.0864449e+00 1.9287276e+00 2.2802541e+00 9.8820253e-01 1.0688498e+00 1.7838044e+00 2.1039999e+00 2.4365934e+00 2.9349538e+00 1.8084630e+00 1.2266837e+00 1.7026843e+00 2.6138892e+00 1.8911946e+00 1.6476803e+00 9.7949166e-01 1.7305789e+00 1.9168750e+00 1.6065247e+00 1.2631020e+00 2.1541966e+00 2.0395401e+00 1.5896248e+00 1.1996741e+00 1.4306494e+00 1.6928004e+00 1.2436109e+00 7.5791688e-01 8.1242502e-01 7.6625946e-01 7.5976039e-01 1.0289803e+00 1.1332978e+00 7.9613242e-01 5.3665999e-01 1.1149070e+00 1.9007091e+00 9.2836103e-01 9.3610001e-01 9.1858284e-01 8.1638392e-01 2.1493214e+00 1.0132664e+00 1.1680362e+00 3.2352160e-01 1.2372418e+00 5.4292906e-01 8.7170815e-01 1.9366254e+00 1.1486378e+00 1.5564198e+00 8.7420176e-01 1.5682049e+00 6.6491075e-01 4.5470518e-01 8.8358844e-01 4.5581864e-01 8.0326782e-01 7.9878917e-01 5.9426792e-01 2.1460410e+00 2.1931311e+00 5.0180477e-01 1.0950112e+00 5.0592043e-01 2.0545952e+00 3.4378533e-01 9.3923979e-01 1.3512935e+00 3.4583729e-01 3.4583729e-01 6.6539428e-01 1.2670555e+00 1.5369942e+00 2.1465859e+00 7.2526325e-01 3.0546431e-01 5.0991930e-01 1.8206746e+00 9.8054887e-01 5.7015910e-01 3.8776762e-01 9.6664346e-01 9.9089002e-01 1.0262547e+00 3.2352160e-01 1.1127329e+00 1.1166017e+00 8.7848692e-01 3.8934542e-01 5.8914551e-01 8.8062848e-01 3.2586371e-01 6.4755655e-01 1.3011270e+00 1.0122141e+00 4.2538717e-01 6.2660376e-01 4.4651726e-01 7.0086313e-01 6.3735887e-01 1.2926374e+00 4.0176783e-01 4.2288438e-01 3.8934542e-01 8.0619006e-01 1.5230852e+00 4.6472023e-01 1.6933635e+00 7.0233835e-01 1.9584922e+00 1.2594846e+00 1.5411691e+00 2.6785768e+00 6.2605182e-01 2.3003744e+00 1.6284481e+00 2.1882128e+00 1.1729895e+00 1.1500393e+00 1.5577059e+00 7.1881659e-01 9.8985697e-01 1.2389598e+00 1.3108618e+00 2.8217641e+00 2.9357847e+00 9.3026633e-01 1.7457596e+00 5.7867728e-01 2.7994225e+00 9.3610001e-01 1.5801828e+00 2.0692197e+00 8.2384013e-01 7.4740267e-01 1.3410314e+00 1.9783833e+00 2.2683159e+00 2.8062463e+00 1.3610783e+00 9.7249562e-01 1.1868139e+00 2.5244698e+00 1.3852951e+00 1.2460824e+00 6.3808075e-01 1.6077195e+00 1.5873000e+00 1.5826476e+00 7.0233835e-01 1.7831878e+00 1.6667819e+00 1.4276261e+00 9.9519977e-01 1.1984588e+00 1.1903343e+00 7.0386584e-01 7.1840099e-01 1.1107977e+00 5.8624446e-01 9.8495853e-01 8.7504951e-01 4.1586001e-01 8.7720955e-01 1.5824669e+00 7.5976039e-01 5.5160819e-01 5.7741073e-01 5.4292906e-01 1.6736318e+00 6.7975091e-01 1.5883552e+00 8.2552685e-01 1.5948732e+00 1.1332978e+00 1.3595997e+00 2.3503762e+00 1.2554784e+00 1.9862884e+00 1.4596621e+00 1.8378727e+00 7.2852070e-01 9.6204649e-01 1.1697902e+00 9.6664346e-01 9.6271042e-01 9.5676647e-01 1.0496979e+00 2.4751922e+00 2.6546397e+00 1.2224367e+00 1.3843268e+00 7.2343175e-01 2.4751922e+00 7.5082357e-01 1.2832075e+00 1.7000773e+00 6.2988288e-01 5.0592043e-01 1.1833351e+00 1.5613251e+00 1.8886923e+00 2.3645560e+00 1.2016233e+00 7.5791688e-01 1.2125198e+00 2.0748074e+00 1.2155370e+00 1.0244319e+00 4.5470518e-01 1.1485394e+00 1.2770118e+00 1.0720678e+00 8.2552685e-01 1.5113992e+00 1.3835368e+00 1.0035600e+00 9.5571254e-01 8.2234151e-01 1.0120221e+00 6.5223271e-01 8.3888121e-01 1.1486378e+00 1.2994764e+00 1.2307737e+00 6.0181382e-01 1.0483827e+00 1.9867752e+00 1.1408504e+00 1.0391247e+00 1.0361698e+00 5.7867728e-01 2.0669733e+00 1.0646687e+00 1.4623898e+00 9.5866719e-01 1.2497790e+00 9.3048953e-01 1.1765359e+00 1.9666356e+00 1.8175297e+00 1.6242657e+00 1.1520347e+00 1.5603665e+00 5.7324170e-01 7.0233835e-01 8.8887100e-01 1.0919404e+00 1.1355826e+00 8.9712482e-01 8.1385214e-01 2.1052360e+00 2.2845234e+00 1.0166932e+00 1.1341579e+00 1.1332978e+00 2.0738150e+00 5.3309112e-01 1.0588560e+00 1.3224963e+00 5.5492130e-01 6.2538346e-01 9.8450810e-01 1.1271488e+00 1.4561933e+00 1.8911946e+00 1.0230441e+00 5.2574978e-01 1.0056742e+00 1.5916843e+00 1.1355826e+00 8.2105460e-01 7.1504098e-01 8.1527569e-01 1.1176720e+00 8.2899253e-01 9.5866719e-01 1.2943100e+00 1.2429818e+00 8.5205778e-01 7.0233835e-01 6.2660376e-01 9.8054887e-01 8.3649708e-01 8.7822463e-01 8.2899253e-01 8.1099042e-01 7.0826681e-01 5.8914551e-01 1.5042268e+00 7.3813096e-01 8.1558458e-01 7.4855857e-01 6.0121055e-01 1.6260946e+00 7.0386584e-01 1.8605327e+00 8.8503502e-01 1.6493191e+00 1.2601890e+00 1.5390703e+00 2.3592515e+00 1.4088394e+00 1.9940473e+00 1.4245508e+00 2.0716002e+00 1.1004436e+00 9.8820253e-01 1.2927814e+00 9.0056222e-01 1.2208301e+00 1.3194807e+00 1.1996741e+00 2.6153308e+00 2.6539963e+00 6.2538346e-01 1.5687169e+00 9.5271386e-01 2.4562038e+00 6.6491075e-01 1.5249255e+00 1.7538274e+00 6.6539428e-01 8.2619017e-01 1.3131724e+00 1.5409345e+00 1.8470010e+00 2.4533073e+00 1.3504603e+00 7.7039952e-01 1.2057554e+00 2.0352149e+00 1.6006330e+00 1.2331989e+00 8.0660588e-01 1.2747177e+00 1.5040391e+00 1.2426449e+00 8.8503502e-01 1.7019078e+00 1.6700310e+00 1.2144845e+00 7.4855857e-01 1.0401425e+00 1.4600567e+00 9.3296062e-01 5.0180477e-01 4.4651726e-01 6.2149089e-01 4.1586001e-01 1.0078327e+00 3.0275928e-01 1.4096146e-01 1.4096146e-01 6.0611244e-01 1.1536782e+00 2.0656129e-01 2.0491051e+00 1.0646687e+00 2.0979729e+00 1.5528443e+00 1.8279176e+00 2.8469870e+00 8.2234151e-01 2.4692682e+00 1.8399871e+00 2.3632803e+00 1.2493717e+00 1.3312249e+00 1.6756749e+00 1.0425476e+00 1.3092012e+00 1.4505265e+00 1.5123788e+00 2.9884772e+00 3.1361386e+00 1.0755693e+00 1.9017004e+00 9.3801395e-01 2.9635613e+00 9.8054887e-01 1.7833384e+00 2.1970231e+00 8.6513410e-01 8.9742724e-01 1.6159903e+00 2.0524973e+00 2.3760856e+00 2.8811560e+00 1.6399646e+00 1.0934620e+00 1.5205305e+00 2.5867433e+00 1.6928004e+00 1.4836711e+00 7.9580667e-01 1.6659943e+00 1.7839298e+00 1.5792930e+00 1.0646687e+00 2.0113485e+00 1.8901379e+00 1.5067717e+00 1.0950112e+00 1.3129189e+00 1.4875372e+00 1.0389435e+00 4.0293660e-01 8.0499049e-01 3.0546431e-01 7.8197925e-01 2.5251796e-01 5.1691876e-01 4.2538717e-01 7.4740267e-01 1.0181000e+00 3.2586371e-01 2.1717162e+00 1.1533602e+00 2.2234347e+00 1.6690840e+00 1.9433381e+00 2.9713636e+00 7.2486328e-01 2.5929835e+00 1.9489982e+00 2.5241649e+00 1.4089364e+00 1.4425304e+00 1.8012344e+00 1.0919712e+00 1.3726860e+00 1.5830057e+00 1.6408468e+00 3.1546522e+00 3.2544456e+00 1.0406064e+00 2.0352149e+00 1.0168833e+00 3.0859269e+00 1.0901359e+00 1.9325796e+00 2.3348454e+00 9.8054887e-01 1.0379132e+00 1.7250039e+00 2.1825260e+00 2.4995667e+00 3.0529994e+00 1.7458338e+00 1.2167151e+00 1.6214915e+00 2.7108297e+00 1.8418195e+00 1.6195190e+00 9.3847194e-01 1.7995863e+00 1.9034198e+00 1.7022897e+00 1.1533602e+00 2.1413027e+00 2.0233319e+00 1.6211869e+00 1.1770266e+00 1.4411886e+00 1.6502968e+00 1.1644030e+00 6.5633874e-01 4.4417983e-01 1.1332978e+00 2.1845981e-01 4.2538717e-01 3.4583729e-01 7.1504098e-01 1.4080793e+00 3.4583729e-01 1.8866180e+00 8.7848692e-01 1.9819543e+00 1.3312249e+00 1.6523803e+00 2.6988671e+00 6.9006418e-01 2.3100474e+00 1.6455737e+00 2.2934334e+00 1.2426449e+00 1.1905954e+00 1.5932297e+00 8.9095811e-01 1.2681309e+00 1.4065584e+00 1.3487634e+00 2.8835141e+00 2.9705897e+00 7.3805807e-01 1.8205354e+00 8.5462626e-01 2.8117234e+00 9.3049742e-01 1.6679957e+00 2.0758969e+00 8.4050231e-01 8.3060013e-01 1.4419145e+00 1.9521697e+00 2.2599493e+00 2.8310619e+00 1.4807336e+00 9.4558103e-01 1.2404967e+00 2.5235709e+00 1.6070713e+00 1.3102444e+00 7.5705927e-01 1.6281130e+00 1.7021627e+00 1.6240596e+00 8.7848692e-01 1.8790831e+00 1.8155245e+00 1.5040391e+00 1.0014633e+00 1.2481462e+00 1.4334902e+00 8.6165877e-01 6.6827038e-01 1.5475692e+00 5.8914551e-01 5.0503591e-01 4.9857388e-01 3.0811765e-01 1.7160413e+00 5.7324170e-01 1.5795964e+00 6.5648056e-01 1.4967461e+00 1.0182895e+00 1.3034549e+00 2.2380042e+00 1.2266837e+00 1.8619092e+00 1.2701139e+00 1.8007564e+00 7.2852070e-01 7.9016429e-01 1.0989735e+00 7.5705927e-01 1.0406064e+00 1.0184370e+00 9.3999899e-01 2.3886514e+00 2.5357185e+00 8.2684479e-01 1.3424112e+00 7.0776547e-01 2.3522207e+00 4.8927739e-01 1.2205493e+00 1.5832517e+00 4.2667565e-01 4.4417983e-01 1.0974061e+00 1.4319225e+00 1.7587110e+00 2.2711652e+00 1.1390131e+00 5.1691876e-01 1.0163549e+00 1.9782498e+00 1.2532075e+00 9.2859317e-01 4.1449626e-01 1.0852663e+00 1.2786117e+00 1.0887986e+00 6.5648056e-01 1.4596621e+00 1.3991741e+00 1.0313560e+00 6.6932542e-01 7.7553525e-01 1.0741917e+00 5.7257017e-01 9.4558103e-01 2.5651975e-01 4.1449626e-01 3.2816937e-01 4.8135521e-01 1.0908017e+00 2.1845981e-01 2.1701312e+00 1.1752673e+00 2.1084262e+00 1.6352583e+00 1.9101184e+00 2.8518881e+00 9.7825559e-01 2.4781934e+00 1.8745369e+00 2.4235816e+00 1.3078976e+00 1.3842113e+00 1.6965018e+00 1.1329323e+00 1.4319225e+00 1.5496439e+00 1.5691346e+00 3.0266197e+00 3.1503439e+00 1.0244319e+00 1.9425540e+00 1.0627606e+00 2.9623467e+00 1.0056742e+00 1.8539828e+00 2.2026387e+00 9.1163729e-01 9.9475949e-01 1.6952454e+00 2.0275673e+00 2.3581240e+00 2.8793190e+00 1.7227544e+00 1.1332978e+00 1.6029963e+00 2.5482247e+00 1.8278913e+00 1.5611067e+00 9.1163729e-01 1.6653066e+00 1.8462692e+00 1.5634147e+00 1.1752673e+00 2.0757295e+00 1.9739212e+00 1.5320003e+00 1.1179743e+00 1.3567326e+00 1.6362950e+00 1.1594648e+00 9.9475949e-01 1.1004436e+00 1.0720678e+00 1.4108494e+00 3.2816937e-01 9.8054887e-01 2.9240179e+00 1.9013501e+00 3.0016960e+00 2.4403742e+00 2.7185134e+00 3.7481490e+00 1.2643026e+00 3.3676733e+00 2.7249658e+00 3.2951180e+00 2.1701459e+00 2.2231652e+00 2.5778372e+00 1.8193838e+00 2.0594742e+00 2.3383666e+00 2.4210417e+00 3.9277558e+00 4.0319248e+00 1.8011138e+00 2.8105150e+00 1.7324239e+00 3.8595400e+00 1.8657887e+00 2.7098209e+00 3.1083486e+00 1.7551534e+00 1.8090259e+00 2.5002193e+00 2.9463843e+00 3.2688068e+00 3.8087204e+00 2.5182043e+00 1.9933741e+00 2.3692479e+00 3.4705738e+00 2.5885717e+00 2.3959721e+00 1.7005893e+00 2.5655126e+00 2.6734142e+00 2.4311441e+00 1.9013501e+00 2.9205331e+00 2.7876433e+00 2.3735872e+00 1.9516947e+00 2.2170194e+00 2.3879674e+00 1.9213461e+00 3.0546431e-01 2.0656129e-01 6.0611244e-01 1.2246352e+00 1.4096146e-01 1.9777274e+00 9.7249562e-01 2.0297383e+00 1.4616539e+00 1.7458338e+00 2.7737198e+00 7.5082357e-01 2.3931826e+00 1.7478866e+00 2.3213742e+00 1.2131545e+00 1.2498134e+00 1.6130724e+00 9.3824087e-01 1.2565757e+00 1.4029855e+00 1.4325768e+00 2.9414941e+00 3.0577671e+00 8.7720955e-01 1.8438146e+00 8.6956871e-01 2.8895427e+00 9.1310225e-01 1.7228354e+00 2.1321061e+00 8.0499049e-01 8.3345577e-01 1.5318874e+00 1.9898963e+00 2.3090270e+00 2.8491218e+00 1.5587730e+00 1.0122141e+00 1.4162017e+00 2.5326059e+00 1.6463627e+00 1.4047678e+00 7.3805807e-01 1.6165635e+00 1.7228488e+00 1.5520745e+00 9.7249562e-01 1.9420274e+00 1.8372522e+00 1.4628493e+00 1.0030700e+00 1.2523175e+00 1.4531349e+00 9.5571254e-01 1.2418578e-01 5.0270183e-01 1.2603076e+00 2.1269358e-01 1.9932786e+00 1.0168833e+00 1.9946994e+00 1.4557537e+00 1.7495699e+00 2.7337358e+00 9.0575661e-01 2.3519748e+00 1.7324239e+00 2.2796281e+00 1.1801240e+00 1.2450709e+00 1.5872286e+00 1.0267435e+00 1.3336069e+00 1.4152303e+00 1.4096199e+00 2.8778917e+00 3.0268604e+00 1.0067464e+00 1.8215944e+00 9.3824087e-01 2.8471683e+00 9.0658670e-01 1.6933635e+00 2.0801243e+00 8.0758367e-01 8.3783744e-01 1.5390703e+00 1.9310038e+00 2.2599493e+00 2.7651778e+00 1.5728839e+00 9.7949166e-01 1.4165336e+00 2.4822593e+00 1.6510537e+00 1.3842113e+00 7.5755387e-01 1.5773217e+00 1.7253276e+00 1.5255331e+00 1.0168833e+00 1.9287244e+00 1.8366596e+00 1.4599710e+00 1.0339865e+00 1.2378278e+00 1.4537266e+00 9.7249562e-01 5.0090417e-01 1.2507669e+00 1.2418578e-01 1.9589833e+00 9.7270522e-01 1.9792779e+00 1.4437673e+00 1.7230625e+00 2.7260686e+00 8.6012420e-01 2.3478326e+00 1.7190893e+00 2.2590861e+00 1.1454006e+00 1.2174316e+00 1.5614941e+00 9.5476489e-01 1.2555979e+00 1.3635198e+00 1.3969297e+00 2.8759951e+00 3.0161959e+00 9.4558103e-01 1.7925890e+00 8.6983677e-01 2.8416706e+00 8.6513410e-01 1.6742876e+00 2.0756986e+00 7.5871717e-01 7.9613242e-01 1.5107481e+00 1.9286915e+00 2.2531942e+00 2.7669732e+00 1.5384446e+00 9.7270522e-01 1.4111029e+00 2.4671050e+00 1.6105641e+00 1.3717027e+00 7.0429250e-01 1.5517600e+00 1.6837214e+00 1.4807336e+00 9.7270522e-01 1.9032219e+00 1.7953587e+00 1.4097072e+00 9.7855477e-01 1.2036484e+00 1.4110536e+00 9.4309624e-01 1.5090287e+00 5.0905001e-01 1.8619092e+00 9.1163729e-01 1.7230625e+00 1.3188999e+00 1.5883552e+00 2.4588872e+00 1.3193952e+00 2.0946464e+00 1.5338492e+00 2.0321740e+00 9.5099818e-01 1.0604511e+00 1.3277861e+00 9.3296062e-01 1.2221471e+00 1.2470767e+00 1.2266837e+00 2.6106370e+00 2.7667028e+00 8.7420176e-01 1.5746612e+00 8.9852394e-01 2.5679581e+00 6.9369532e-01 1.4905436e+00 1.8028753e+00 6.2149089e-01 6.9006418e-01 1.3813076e+00 1.6199747e+00 1.9552274e+00 2.4344864e+00 1.4148192e+00 8.0358695e-01 1.3039319e+00 2.1287551e+00 1.5153654e+00 1.2246352e+00 6.2660376e-01 1.2741904e+00 1.5160122e+00 1.2047214e+00 9.1163729e-01 1.7242097e+00 1.6420607e+00 1.2064640e+00 8.3812833e-01 1.0168833e+00 1.3257654e+00 8.6137722e-01 1.1533602e+00 3.1382691e+00 2.1485328e+00 3.1786790e+00 2.6799312e+00 2.9354140e+00 3.9353144e+00 1.5252485e+00 3.5658557e+00 2.9475994e+00 3.4455976e+00 2.3167786e+00 2.4320363e+00 2.7459122e+00 2.0598189e+00 2.2501104e+00 2.5013206e+00 2.6321552e+00 4.0910078e+00 4.2293986e+00 2.0521052e+00 2.9731112e+00 1.9619929e+00 4.0491656e+00 2.0481101e+00 2.8946126e+00 3.2860707e+00 1.9342059e+00 2.0025214e+00 2.7210925e+00 3.1148548e+00 3.4424959e+00 3.9360563e+00 2.7333517e+00 2.2078200e+00 2.6383936e+00 3.6047462e+00 2.7713578e+00 2.6121617e+00 1.8925840e+00 2.7075477e+00 2.8419886e+00 2.5215069e+00 2.1485328e+00 3.1102248e+00 2.9517129e+00 2.5041493e+00 2.1435335e+00 2.3887866e+00 2.5633372e+00 2.1544995e+00 2.0467316e+00 1.0576043e+00 2.0528819e+00 1.5379283e+00 1.8096161e+00 2.8029161e+00 8.6012420e-01 2.4272793e+00 1.8028753e+00 2.3372930e+00 1.2144845e+00 1.2985682e+00 1.6312555e+00 1.0166932e+00 1.3073038e+00 1.4333755e+00 1.4843487e+00 2.9588514e+00 3.0950947e+00 9.7949166e-01 1.8647706e+00 9.3615100e-01 2.9181741e+00 9.3049742e-01 1.7594421e+00 2.1522124e+00 8.2342214e-01 8.7720955e-01 1.5965946e+00 1.9973159e+00 2.3235032e+00 2.8379387e+00 1.6211988e+00 1.0588560e+00 1.5076049e+00 2.5254157e+00 1.6945041e+00 1.4637418e+00 7.8197925e-01 1.6130724e+00 1.7539916e+00 1.5193574e+00 1.0576043e+00 1.9849009e+00 1.8691652e+00 1.4607586e+00 1.0375119e+00 1.2741904e+00 1.4947429e+00 1.0361698e+00 1.0621081e+00 8.3649708e-01 7.6590510e-01 4.0176783e-01 1.3455136e+00 1.8827665e+00 1.1093572e+00 9.5676647e-01 9.0852141e-01 9.4309624e-01 8.9852394e-01 6.8076724e-01 1.2002762e+00 9.7825559e-01 7.0479928e-01 7.8197925e-01 1.4637418e+00 1.5390703e+00 1.4628493e+00 6.2538346e-01 1.2208301e+00 1.4754770e+00 1.2162549e+00 5.2574978e-01 1.0104465e+00 1.2832075e+00 1.1810170e+00 6.1947990e-01 1.1242402e+00 1.1718516e+00 1.6295015e+00 5.8914551e-01 1.2063335e+00 1.1879206e+00 1.4041085e+00 4.0293660e-01 7.7074935e-01 1.2709820e+00 7.7869083e-01 5.0592043e-01 9.7441804e-01 1.0621081e+00 5.0991930e-01 4.4417983e-01 8.3888121e-01 1.1770266e+00 8.6361309e-01 6.0670504e-01 1.0324775e+00 1.3844611e+00 6.2660376e-01 8.8695363e-01 2.0692197e+00 9.7441804e-01 1.6995747e+00 1.0122141e+00 1.6372749e+00 7.6752131e-01 6.0551856e-01 1.0244319e+00 2.1845981e-01 5.0090417e-01 7.2852070e-01 7.4777660e-01 2.2645802e+00 2.3019759e+00 5.7324170e-01 1.1833351e+00 2.5651975e-01 2.1907335e+00 5.0905001e-01 1.0330459e+00 1.5124582e+00 4.4651726e-01 3.8934542e-01 6.9369532e-01 1.4517959e+00 1.7036156e+00 2.3021295e+00 7.0429250e-01 5.6700421e-01 6.3977563e-01 1.9782093e+00 8.7229670e-01 6.8801986e-01 3.8934542e-01 1.1199472e+00 9.9519977e-01 1.1263042e+00 0.0000000e+00 1.1697902e+00 1.0851476e+00 9.2859317e-01 5.0905001e-01 7.1504098e-01 7.7763126e-01 3.0546431e-01 8.2135873e-01 6.0121055e-01 7.6716823e-01 2.3579605e+00 4.5581864e-01 5.8914551e-01 6.5223271e-01 8.9095811e-01 8.2552685e-01 4.4417983e-01 1.5124582e+00 1.3844611e+00 8.1810461e-01 6.6384020e-01 1.0516761e+00 1.0733200e+00 1.3725949e+00 3.0844217e-01 1.6183051e+00 8.9095811e-01 1.1426203e+00 4.5470518e-01 3.2816937e-01 1.2604558e+00 1.2459608e+00 7.1799256e-01 5.0180477e-01 3.6171588e-01 1.0269295e+00 7.1840099e-01 1.0531192e+00 1.1093572e+00 6.1092863e-01 8.4591037e-01 7.4777660e-01 1.3693737e+00 5.0905001e-01 4.8135521e-01 8.0619006e-01 1.3844611e+00 3.4378533e-01 5.3309112e-01 7.3813096e-01 1.0901359e+00 8.1273630e-01 9.6141901e-01 1.2978356e+00 4.2667565e-01 1.4573287e+00 1.5826638e+00 1.0904758e+00 5.0503591e-01 1.1258723e+00 5.4292906e-01 3.2816937e-01 5.3022554e-01 7.7869083e-01 7.5871717e-01 5.5492130e-01 2.1269358e-01 1.6596342e+00 1.6919202e+00 8.3280511e-01 7.0429250e-01 8.7202528e-01 1.5772389e+00 7.0394675e-01 5.2655962e-01 9.2836103e-01 8.0064372e-01 7.0437330e-01 3.0546431e-01 9.0478973e-01 1.1271488e+00 1.7235501e+00 4.0293660e-01 5.2942799e-01 4.5470518e-01 1.4317371e+00 6.8917100e-01 2.1269358e-01 8.1099042e-01 6.2988288e-01 6.5172743e-01 7.6166891e-01 6.2660376e-01 6.5648056e-01 7.6625946e-01 6.1947990e-01 6.4755655e-01 4.2667565e-01 6.2660376e-01 5.6700421e-01 1.2113327e+00 1.8396098e+00 8.7504951e-01 5.7257017e-01 8.3280511e-01 7.0784540e-01 5.5492130e-01 3.7427929e-01 1.0284501e+00 8.7420176e-01 5.0991930e-01 4.4417983e-01 1.4089719e+00 1.4387122e+00 1.1127329e+00 4.1586001e-01 1.1205013e+00 1.3344634e+00 9.3048953e-01 3.2816937e-01 7.4164639e-01 1.0244319e+00 9.3999899e-01 2.5651975e-01 8.1242502e-01 9.1858284e-01 1.4955532e+00 2.5251796e-01 8.7420176e-01 8.5335130e-01 1.2045536e+00 4.3691963e-01 4.4651726e-01 1.0480665e+00 4.9857388e-01 2.8507955e-01 7.3535471e-01 8.8695363e-01 3.2816937e-01 3.8934542e-01 6.0611244e-01 8.6361309e-01 6.0551856e-01 5.2655962e-01 8.3783744e-01 3.0351721e+00 4.2418962e-01 1.0941064e+00 7.5705927e-01 1.6559784e+00 1.5582387e+00 1.2112034e+00 2.1967372e+00 2.0692197e+00 1.5564198e+00 1.3693737e+00 8.0096515e-01 4.5581864e-01 2.0330276e+00 1.0137836e+00 2.3142399e+00 2.1845981e-01 1.9017011e+00 1.1228379e+00 6.6827038e-01 2.0223026e+00 1.9969203e+00 1.3792358e+00 8.7478495e-01 5.2371571e-01 8.1385214e-01 1.3793330e+00 1.7664528e+00 1.6569692e+00 5.0905001e-01 1.4644753e+00 1.4341959e+00 2.1204309e+00 1.2632199e+00 1.1880428e+00 1.5405106e+00 2.0692197e+00 9.4009473e-01 1.1355826e+00 1.4992973e+00 1.8311457e+00 1.5765737e+00 1.6311692e+00 1.9969203e+00 2.6670272e+00 1.9783833e+00 2.5784641e+00 1.6572339e+00 1.5613865e+00 1.9842916e+00 8.6110333e-01 1.0720678e+00 1.6180482e+00 1.7140774e+00 3.2123303e+00 3.2542669e+00 1.1332978e+00 2.1449779e+00 7.5976039e-01 3.1540626e+00 1.4088394e+00 1.9797139e+00 2.4825886e+00 1.3075101e+00 1.2330392e+00 1.6629594e+00 2.4148300e+00 2.6746409e+00 3.2573703e+00 1.6686069e+00 1.4322723e+00 1.4341959e+00 2.9471490e+00 1.6864366e+00 1.6385322e+00 1.1320702e+00 2.0632091e+00 1.9468380e+00 2.0389505e+00 9.7441804e-01 2.1303950e+00 2.0095672e+00 1.8517858e+00 1.4168607e+00 1.6483152e+00 1.5346983e+00 1.0866092e+00 7.2486328e-01 8.7202528e-01 1.2988558e+00 1.1847335e+00 8.6137722e-01 1.8265471e+00 1.7177705e+00 1.2107055e+00 9.9368623e-01 9.5866719e-01 7.3805807e-01 1.6506221e+00 7.3805807e-01 1.9449573e+00 5.0592043e-01 1.5350426e+00 7.8695083e-01 3.7427929e-01 1.6553809e+00 1.6249178e+00 1.0168833e+00 5.0991930e-01 2.1845981e-01 9.7270522e-01 1.0264409e+00 1.3817041e+00 1.2768639e+00 5.7324170e-01 1.1634384e+00 1.0611732e+00 1.7483574e+00 9.3048953e-01 9.0056222e-01 1.2340567e+00 1.6995747e+00 6.8076724e-01 9.1883539e-01 1.1718516e+00 1.4616896e+00 1.2125198e+00 1.2951888e+00 1.6249178e+00 1.2028939e+00 8.7420176e-01 5.3665999e-01 5.5492130e-01 1.1340084e+00 1.0720678e+00 8.3345577e-01 5.3588338e-01 1.5520745e+00 1.3258714e+00 9.5099818e-01 7.7074935e-01 1.2604558e+00 1.1891470e+00 9.2264612e-01 8.1099042e-01 7.7039952e-01 1.0389435e+00 1.0054794e+00 4.3456114e-01 6.2605182e-01 7.2823007e-01 1.5784191e+00 4.8927739e-01 7.5976039e-01 6.5223271e-01 1.0692258e+00 9.8985697e-01 6.3808075e-01 1.1178200e+00 6.6827038e-01 7.4855857e-01 8.6513410e-01 1.0122141e+00 7.6787403e-01 9.3615100e-01 7.5835500e-01 8.2654509e-01 6.9728513e-01 9.9519977e-01 9.7356960e-01 1.1306887e+00 1.2197188e+00 8.0353565e-01 1.7933375e+00 1.5916843e+00 1.0118409e+00 1.0089164e+00 7.0776547e-01 1.1591754e+00 1.8437762e+00 5.3309112e-01 1.8278913e+00 9.6838716e-01 1.4843324e+00 6.3735887e-01 7.3496673e-01 1.5570415e+00 1.5003972e+00 1.0421979e+00 9.7759114e-01 8.9070384e-01 7.8197925e-01 1.0329598e+00 1.4388174e+00 1.5204340e+00 6.9325418e-01 9.4309624e-01 1.0339865e+00 1.6134578e+00 8.0660588e-01 7.0523271e-01 1.0406064e+00 1.6372749e+00 5.1303949e-01 5.8851328e-01 1.0072663e+00 1.4951106e+00 1.0950112e+00 1.0934620e+00 1.5213929e+00 5.0991930e-01 4.5581864e-01 9.3615100e-01 7.6590510e-01 3.2586371e-01 4.2538717e-01 1.7942496e+00 1.9566981e+00 1.0636401e+00 6.6384020e-01 9.2264612e-01 1.7814077e+00 5.2371571e-01 6.0670504e-01 1.0120221e+00 4.8927739e-01 4.3691963e-01 5.6769031e-01 8.9366705e-01 1.1948578e+00 1.6982795e+00 5.7324170e-01 5.7257017e-01 8.3060013e-01 1.3824965e+00 5.7867728e-01 4.1586001e-01 5.4292906e-01 4.4651726e-01 5.7324170e-01 4.4535192e-01 7.6752131e-01 8.2105460e-01 6.9369532e-01 3.4583729e-01 7.0479928e-01 2.0656129e-01 4.3456114e-01 6.1092863e-01 4.6472023e-01 7.1840099e-01 6.9369532e-01 5.6631629e-01 3.2816937e-01 1.8058693e+00 1.8261179e+00 6.3735887e-01 7.0328431e-01 8.2684479e-01 1.6791597e+00 4.0293660e-01 6.6827038e-01 9.7377870e-01 5.0991930e-01 4.8135521e-01 3.2586371e-01 8.7021234e-01 1.1327825e+00 1.7839298e+00 3.7427929e-01 4.1586001e-01 5.5492130e-01 1.3916739e+00 7.7919451e-01 4.1449626e-01 5.8914551e-01 5.7324170e-01 6.0900723e-01 6.2407309e-01 6.0551856e-01 7.5705927e-01 7.8695083e-01 4.8135521e-01 3.2586371e-01 3.0811765e-01 7.3851529e-01 5.3665999e-01 1.1524979e+00 1.0244319e+00 4.3691963e-01 3.7255734e-01 1.4090646e+00 1.5060944e+00 1.0810263e+00 2.8507955e-01 1.2406194e+00 1.3336069e+00 7.1791510e-01 3.2586371e-01 5.9426792e-01 8.2552685e-01 8.2275389e-01 4.1449626e-01 5.8851328e-01 7.5196795e-01 1.3415658e+00 4.1586001e-01 7.2852070e-01 8.9159388e-01 9.7249562e-01 5.8914551e-01 4.4535192e-01 9.4352681e-01 1.4096146e-01 3.0811765e-01 4.1586001e-01 1.0244319e+00 4.2538717e-01 4.5581864e-01 3.2586371e-01 7.0869559e-01 3.7427929e-01 6.5223271e-01 9.2836103e-01 4.4651726e-01 8.8695363e-01 8.9971984e-01 2.4227359e+00 2.4243464e+00 5.5419992e-01 1.3235313e+00 3.0546431e-01 2.3149695e+00 6.1151102e-01 1.2036484e+00 1.6520677e+00 5.4292906e-01 5.7324170e-01 8.2305664e-01 1.5788188e+00 1.8238348e+00 2.4554026e+00 8.2552685e-01 7.0429250e-01 7.7588000e-01 2.0959492e+00 1.0466623e+00 8.6513410e-01 5.4292906e-01 1.2497790e+00 1.1215059e+00 1.2436109e+00 2.1845981e-01 1.3165513e+00 1.2256881e+00 1.0406064e+00 6.0060595e-01 8.5434758e-01 9.6664346e-01 5.1691876e-01 6.5223271e-01 8.4050231e-01 2.2451458e+00 2.2996030e+00 9.7270522e-01 1.1594648e+00 4.2538717e-01 2.1936248e+00 6.9369532e-01 1.0119857e+00 1.5297036e+00 6.6384020e-01 6.2988288e-01 7.0386584e-01 1.5113992e+00 1.7140171e+00 2.2868482e+00 6.9325418e-01 9.4080461e-01 1.0406064e+00 1.9734538e+00 7.5835500e-01 7.8695083e-01 6.2988288e-01 1.1158787e+00 9.4832302e-01 1.1055069e+00 5.0090417e-01 1.1452867e+00 1.0056742e+00 9.0277242e-01 6.3977563e-01 7.3851529e-01 6.6432544e-01 6.0611244e-01 5.1691876e-01 1.6983410e+00 1.8377590e+00 1.1500393e+00 5.6631629e-01 8.6012420e-01 1.6864433e+00 6.6539428e-01 4.5581864e-01 9.7356960e-01 6.6932542e-01 5.9426792e-01 4.5470518e-01 9.7548738e-01 1.1573546e+00 1.6772907e+00 4.4535192e-01 8.2929029e-01 9.8450810e-01 1.3812107e+00 3.2816937e-01 5.0905001e-01 6.6932542e-01 5.0991930e-01 3.7598397e-01 5.0905001e-01 7.2852070e-01 6.4704320e-01 4.5581864e-01 3.2586371e-01 7.4777660e-01 3.2816937e-01 2.5251796e-01 6.3108414e-01 1.5573817e+00 1.6420607e+00 9.0575661e-01 5.7867728e-01 9.7441804e-01 1.4917344e+00 6.2482915e-01 4.0176783e-01 7.7039952e-01 7.1799256e-01 6.4704320e-01 3.2816937e-01 7.1799256e-01 9.7270522e-01 1.5593809e+00 4.1586001e-01 4.6472023e-01 5.6454040e-01 1.2601890e+00 6.5223271e-01 1.2418578e-01 7.6716823e-01 4.4651726e-01 6.0670504e-01 6.1947990e-01 7.4777660e-01 5.9426792e-01 7.2172678e-01 5.3588338e-01 6.2660376e-01 3.2352160e-01 5.8914551e-01 6.4704320e-01 1.2013436e+00 2.3665136e+00 1.1771643e+00 2.4806944e+00 1.0018083e+00 2.1098467e+00 1.2627078e+00 8.8503502e-01 2.2024869e+00 2.1511385e+00 1.6189643e+00 1.1368070e+00 1.0688498e+00 3.4378533e-01 1.6188960e+00 1.9698860e+00 1.9254808e+00 8.8861541e-01 1.5832517e+00 1.5978297e+00 2.2712062e+00 1.4277162e+00 1.3610783e+00 1.6849072e+00 2.2645802e+00 1.1106525e+00 1.2665468e+00 1.6689743e+00 2.0995265e+00 1.7457596e+00 1.7532140e+00 2.1511385e+00 2.2712062e+00 1.3276804e+00 2.5485519e+00 3.4378533e-01 2.1870851e+00 1.4266198e+00 1.0379132e+00 2.3072128e+00 2.2736138e+00 1.6156775e+00 1.2095267e+00 8.3888121e-01 1.2277129e+00 1.6151153e+00 2.0528819e+00 1.8792214e+00 8.2624515e-01 1.7265353e+00 1.7004805e+00 2.3953564e+00 1.5733646e+00 1.4691764e+00 1.8497891e+00 2.3019759e+00 1.2238809e+00 1.4266198e+00 1.7962897e+00 2.1027465e+00 1.8635467e+00 1.9008621e+00 2.2439391e+00 1.3353353e+00 7.2526325e-01 2.1293320e+00 5.5492130e-01 1.2776560e+00 1.5193574e+00 6.2988288e-01 8.1130291e-01 8.6912228e-01 1.3752391e+00 1.6044563e+00 2.3474075e+00 9.1883539e-01 6.2024833e-01 6.4806901e-01 1.9000365e+00 1.3674559e+00 9.6664346e-01 8.1354181e-01 1.1751082e+00 1.2304904e+00 1.2256933e+00 5.7324170e-01 1.3628690e+00 1.4089364e+00 1.0866132e+00 4.8036801e-01 8.9971984e-01 1.3044654e+00 8.1130291e-01 1.3916739e+00 1.1500393e+00 9.6838716e-01 2.5251796e-01 5.5419992e-01 1.0573285e+00 1.0284501e+00 5.7324170e-01 7.1840099e-01 6.6317860e-01 1.1434428e+00 5.6769031e-01 9.7855477e-01 1.1106525e+00 8.2899253e-01 6.0670504e-01 6.2660376e-01 1.1449732e+00 3.2586371e-01 2.1845981e-01 6.0060595e-01 1.1833351e+00 2.0656129e-01 2.5251796e-01 5.1607523e-01 9.6324667e-01 5.9426792e-01 7.1799256e-01 1.0879524e+00 2.4372751e+00 7.0437330e-01 1.2331989e+00 1.7427900e+00 6.0611244e-01 5.1607523e-01 9.3615100e-01 1.6812503e+00 1.9411754e+00 2.5109747e+00 9.3801395e-01 7.7039952e-01 8.6513410e-01 2.2052183e+00 9.6324667e-01 8.9917007e-01 4.2667565e-01 1.3224963e+00 1.1912106e+00 1.3084046e+00 2.5651975e-01 1.3897316e+00 1.2541242e+00 1.1120775e+00 7.1504098e-01 9.1051084e-01 8.1521713e-01 3.6171588e-01 2.0209349e+00 1.2627078e+00 7.9878917e-01 2.1431239e+00 2.1198551e+00 1.5016009e+00 9.6141901e-01 6.2024833e-01 1.0083666e+00 1.5022608e+00 1.8803649e+00 1.7557336e+00 6.2482915e-01 1.6044563e+00 1.5582387e+00 2.2435182e+00 1.3836712e+00 1.3199714e+00 1.6568705e+00 2.1907335e+00 1.0796583e+00 1.2825987e+00 1.6205332e+00 1.9460721e+00 1.6995133e+00 1.7678302e+00 2.1198551e+00 9.1750357e-01 1.2756158e+00 1.4096146e-01 3.2352160e-01 7.1504098e-01 1.1242402e+00 1.4312787e+00 2.0223464e+00 7.3535471e-01 3.2586371e-01 7.3851529e-01 1.6386882e+00 9.4477932e-01 6.4755655e-01 3.7427929e-01 7.3805807e-01 8.6165877e-01 7.2852070e-01 5.0905001e-01 1.0922991e+00 1.0175773e+00 6.0900723e-01 2.1269358e-01 4.0176783e-01 8.2372435e-01 4.5470518e-01 5.5492130e-01 9.8495853e-01 9.0521488e-01 5.2942799e-01 6.3977563e-01 7.9878917e-01 1.2832075e+00 5.3022554e-01 8.3060013e-01 9.4500268e-01 1.0244319e+00 4.4651726e-01 4.0176783e-01 1.0230441e+00 3.4378533e-01 3.2586371e-01 6.1623531e-01 1.0330459e+00 2.5651975e-01 4.0000000e-01 5.3588338e-01 9.5676647e-01 5.3665999e-01 5.3665999e-01 9.0521488e-01 1.3865084e+00 1.3669552e+00 8.6012420e-01 2.8192292e-01 4.1586001e-01 8.4050231e-01 8.7383925e-01 1.1355826e+00 1.1712156e+00 6.2660376e-01 9.8985697e-01 8.5205778e-01 1.4909823e+00 6.3861009e-01 7.2526325e-01 9.4854455e-01 1.5124582e+00 5.6700421e-01 7.7919451e-01 8.9971984e-01 1.2483814e+00 9.4009473e-01 1.0879524e+00 1.4147273e+00 2.1269358e-01 8.1354181e-01 1.2441035e+00 1.5551238e+00 2.1137172e+00 8.2899253e-01 3.7427929e-01 8.2929029e-01 1.7587110e+00 9.6095130e-01 7.1799256e-01 2.4837156e-01 8.3280511e-01 9.3797093e-01 7.9016429e-01 4.4651726e-01 1.1833351e+00 1.0724413e+00 6.6932542e-01 3.2816937e-01 4.6472023e-01 8.0467258e-01 3.8776762e-01 7.3145860e-01 1.2564564e+00 1.5558094e+00 2.0983278e+00 7.5082357e-01 3.6171588e-01 7.6590510e-01 1.7862655e+00 8.4050231e-01 6.2024833e-01 1.2418578e-01 8.6137722e-01 8.9852394e-01 8.5462626e-01 3.8934542e-01 1.1192362e+00 1.0078327e+00 7.0386584e-01 5.0991930e-01 4.5470518e-01 6.6539428e-01 2.4837156e-01 8.5690100e-01 1.0344911e+00 1.6689743e+00 1.0000000e-01 6.8961791e-01 7.1799256e-01 1.3207609e+00 6.2024833e-01 3.7427929e-01 8.3888121e-01 5.3588338e-01 4.2288438e-01 6.4405773e-01 6.9369532e-01 5.3309112e-01 5.8914551e-01 4.6472023e-01 6.2538346e-01 4.1586001e-01 6.1623531e-01 6.4405773e-01 4.0176783e-01 1.0175773e+00 8.9303452e-01 1.0122141e+00 1.1161766e+00 7.7885297e-01 1.0755693e+00 8.1385214e-01 1.3792358e+00 5.8914551e-01 8.5462626e-01 8.7848692e-01 1.4517959e+00 7.3851529e-01 9.4854455e-01 8.6263408e-01 1.0941064e+00 8.3783744e-01 1.1172689e+00 1.3545005e+00 1.0391247e+00 1.0389435e+00 1.3163598e+00 1.3379696e+00 4.5470518e-01 1.1951875e+00 1.0632598e+00 1.6796759e+00 7.8197925e-01 8.3345577e-01 1.0540105e+00 1.7036156e+00 6.9167458e-01 8.8503502e-01 1.0279631e+00 1.3693737e+00 1.1192426e+00 1.3077572e+00 1.6183051e+00 1.6694974e+00 1.9094934e+00 1.9895190e+00 8.2384013e-01 1.6634400e+00 1.6218244e+00 2.2178691e+00 1.3008161e+00 1.3567326e+00 1.4994715e+00 2.3021295e+00 1.1763719e+00 1.3024224e+00 1.5535909e+00 2.0373882e+00 1.6756749e+00 1.7981158e+00 2.1739455e+00 7.6752131e-01 8.1354181e-01 1.3198846e+00 6.0611244e-01 4.4535192e-01 8.5335130e-01 5.3665999e-01 3.8776762e-01 6.3977563e-01 7.0429250e-01 5.2655962e-01 5.5492130e-01 4.5581864e-01 6.3861009e-01 4.2667565e-01 6.1151102e-01 6.6932542e-01 5.1691876e-01 1.5922648e+00 1.0054794e+00 4.8135521e-01 4.3456114e-01 7.6955924e-01 9.6664346e-01 8.9687438e-01 5.6700421e-01 1.0421979e+00 1.1001291e+00 8.2929029e-01 4.4535192e-01 5.1691876e-01 8.9712482e-01 4.5470518e-01 1.6914476e+00 1.1340084e+00 5.8914551e-01 8.5105559e-01 9.7548738e-01 1.0864449e+00 1.1160770e+00 6.3977563e-01 1.0720678e+00 1.2163831e+00 1.0047836e+00 6.9369532e-01 7.2343175e-01 1.0613462e+00 6.2407309e-01 1.4238090e+00 1.3511716e+00 1.9067300e+00 9.3824087e-01 1.0331736e+00 1.1327825e+00 1.9782093e+00 9.0454394e-01 1.0244319e+00 1.1833480e+00 1.5948732e+00 1.3360558e+00 1.5461469e+00 1.8900319e+00 6.2081167e-01 9.1750357e-01 6.4290921e-01 4.4417983e-01 7.0429250e-01 8.7229670e-01 5.3665999e-01 4.0438741e-01 5.6454040e-01 1.0054794e+00 5.7015910e-01 2.1269358e-01 7.5705927e-01 7.3496673e-01 5.2942799e-01 6.2024833e-01 6.6491075e-01 6.8801986e-01 6.1947990e-01 7.2172678e-01 5.5492130e-01 6.9006418e-01 3.2816937e-01 5.3665999e-01 5.6700421e-01 9.7779835e-01 1.0014633e+00 9.4854455e-01 3.8934542e-01 1.2342162e+00 1.1043332e+00 7.9580667e-01 5.3665999e-01 5.7257017e-01 7.2852070e-01 3.0275928e-01 3.4378533e-01 3.2352160e-01 1.1199472e+00 5.0991930e-01 4.6472023e-01 2.8507955e-01 7.7869083e-01 4.1586001e-01 7.1799256e-01 1.0132664e+00 5.0905001e-01 9.9519977e-01 3.0811765e-01 2.1269358e-01 4.0293660e-01 8.3060013e-01 5.0592043e-01 5.3665999e-01 9.3049742e-01 1.1263042e+00 8.0064372e-01 6.1623531e-01 2.1269358e-01 7.7588000e-01 4.4651726e-01 7.2783368e-01 1.0329901e+00 1.1697902e+00 1.0851476e+00 9.2859317e-01 5.0905001e-01 7.1504098e-01 7.7763126e-01 3.0546431e-01 2.5651975e-01 7.0437330e-01 1.0573285e+00 7.3145860e-01 6.9325418e-01 1.0901359e+00 5.3588338e-01 1.0175773e+00 6.4405773e-01 5.3665999e-01 1.0078327e+00 6.2407309e-01 3.2352160e-01 5.7257017e-01 8.5205778e-01 5.1691876e-01 9.4022486e-01 5.6769031e-01 4.8927739e-01 6.0611244e-01 6.0900723e-01 diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-minkowski-3.2-ml.txt b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-minkowski-3.2-ml.txt new file mode 100644 index 0000000..daa8111 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-minkowski-3.2-ml.txt @@ -0,0 +1 @@ + 2.0215050e+00 2.0988154e+00 1.8614681e+00 2.0510161e+00 1.9210911e+00 2.1323516e+00 1.9565454e+00 2.1029889e+00 1.9617871e+00 2.0544792e+00 2.0357408e+00 1.8811414e+00 2.0694693e+00 2.1245977e+00 2.0632165e+00 2.0452823e+00 2.0249330e+00 1.9635489e+00 2.0508580e+00 2.0838578e+00 1.9324052e+00 1.8224609e+00 1.9795343e+00 1.9536534e+00 1.9694910e+00 1.9075569e+00 1.9590397e+00 2.0022087e+00 1.8814000e+00 1.8884208e+00 1.9961121e+00 2.0215351e+00 1.7515769e+00 2.0756437e+00 2.0109476e+00 1.9234849e+00 1.9160076e+00 1.8550862e+00 1.7733640e+00 2.0071906e+00 2.0209542e+00 2.0616569e+00 2.0565503e+00 1.9083573e+00 2.2732431e+00 1.9975503e+00 1.9080072e+00 2.1437809e+00 2.1296295e+00 1.9739085e+00 1.9834166e+00 2.1078664e+00 2.2016840e+00 2.2080962e+00 1.7340579e+00 2.0549287e+00 1.7331748e+00 1.9559688e+00 2.0343364e+00 1.8736929e+00 1.9730416e+00 1.5308944e+00 1.8421831e+00 2.0174240e+00 2.0137378e+00 1.7956151e+00 1.9606596e+00 1.9074857e+00 2.0413879e+00 2.0070305e+00 1.9584677e+00 1.8977851e+00 1.9176239e+00 1.7067419e+00 1.9461927e+00 1.8431700e+00 1.8284576e+00 1.7778704e+00 1.8350329e+00 2.0175415e+00 1.7459063e+00 1.9242505e+00 1.8757370e+00 1.9312506e+00 2.0574808e+00 2.0894636e+00 1.9780203e+00 2.1374036e+00 1.8900436e+00 2.0273032e+00 2.0681953e+00 2.0234699e+00 2.0666449e+00 2.0663485e+00 1.9281402e+00 1.7846314e+00 2.0372479e+00 1.8831230e+00 2.0186015e+00 2.0193231e+00 2.2022665e+00 1.8145737e+00 2.0466545e+00 1.8092421e+00 1.9600687e+00 2.0322961e+00 1.9556364e+00 1.8266422e+00 1.9950345e+00 2.1038429e+00 2.1164145e+00 2.0188062e+00 1.8863331e+00 2.0006971e+00 1.9971068e+00 1.8771862e+00 2.1148855e+00 1.9570638e+00 1.9859615e+00 2.0030854e+00 2.0737344e+00 1.9739259e+00 1.9266524e+00 1.9200535e+00 2.1376689e+00 1.8944425e+00 1.9330553e+00 1.8561590e+00 1.9422954e+00 1.8874178e+00 1.8624808e+00 1.8265563e+00 1.8840519e+00 2.0515092e+00 2.0174226e+00 1.9771196e+00 2.0635988e+00 1.7334466e+00 1.9912604e+00 1.8915711e+00 1.8262636e+00 1.9369173e+00 1.9560446e+00 1.9549934e+00 1.9279230e+00 1.9021073e+00 2.0113391e+00 2.0305786e+00 1.8066806e+00 1.9656739e+00 2.1219217e+00 1.8820250e+00 1.8936826e+00 2.0565131e+00 1.9839441e+00 1.8553479e+00 1.9923760e+00 1.6393276e+00 1.9786440e+00 1.8274394e+00 1.9322611e+00 2.0404318e+00 1.9216532e+00 1.9361171e+00 1.8401373e+00 1.9908059e+00 1.9495117e+00 2.1975655e+00 1.8413913e+00 2.1528773e+00 1.8434374e+00 2.1668863e+00 2.0429273e+00 1.9980016e+00 1.9790129e+00 2.0264829e+00 2.1478843e+00 2.0899600e+00 2.0280670e+00 2.1210881e+00 1.9993891e+00 1.8646871e+00 1.9099983e+00 1.9263353e+00 2.0042495e+00 2.1365919e+00 2.1830279e+00 1.9631961e+00 2.0880004e+00 1.8348369e+00 diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-minkowski-5.8-ml-iris.txt b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-minkowski-5.8-ml-iris.txt new file mode 100644 index 0000000..aa26b04 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-minkowski-5.8-ml-iris.txt @@ -0,0 +1 @@ + 5.0042326e-01 4.1210927e-01 5.2133179e-01 1.1269424e-01 4.2362917e-01 5.0001522e-01 1.2085435e-01 7.4262850e-01 4.0127250e-01 3.0482299e-01 3.0482299e-01 5.0436965e-01 8.0923926e-01 7.1629168e-01 9.1424701e-01 4.1317535e-01 1.0000000e-01 6.0366256e-01 3.0017653e-01 3.3813251e-01 2.2573593e-01 5.2133179e-01 3.4080442e-01 5.0436965e-01 5.0043084e-01 2.2608083e-01 1.1269424e-01 1.1269424e-01 4.1315633e-01 4.1315633e-01 3.0490481e-01 6.0000952e-01 7.0462550e-01 4.0127250e-01 3.0482299e-01 4.0002221e-01 4.0127250e-01 7.1621748e-01 1.1269424e-01 1.2085435e-01 1.2036864e+00 7.0088477e-01 4.0125062e-01 5.0476836e-01 5.0436965e-01 3.0474106e-01 5.0436235e-01 2.2573593e-01 2.0061436e-01 3.3243227e+00 3.1068812e+00 3.5145413e+00 2.6080595e+00 3.2075731e+00 3.1014454e+00 3.3055260e+00 1.9156198e+00 3.2079238e+00 2.5066441e+00 2.1498493e+00 2.8059664e+00 2.6093989e+00 3.3021953e+00 2.2070266e+00 3.0158454e+00 3.1034764e+00 2.7009878e+00 3.1081779e+00 2.5032992e+00 3.4074959e+00 2.6050088e+00 3.5035589e+00 3.3011884e+00 2.9065890e+00 3.0117336e+00 3.4118782e+00 3.6094426e+00 3.1038958e+00 2.1042326e+00 2.4058620e+00 2.3063407e+00 2.5029614e+00 3.7025335e+00 3.1034636e+00 3.1057006e+00 3.3110189e+00 3.0065909e+00 2.7025941e+00 2.6047974e+00 3.0013665e+00 3.2025221e+00 2.6029242e+00 1.9242109e+00 2.8024935e+00 2.8013151e+00 2.8022622e+00 2.9036582e+00 1.6267693e+00 2.7028014e+00 4.6144526e+00 3.7071079e+00 4.5121787e+00 4.2031939e+00 4.4087839e+00 5.2153194e+00 3.1086291e+00 4.9093646e+00 4.4044245e+00 4.7202040e+00 3.7119486e+00 3.9066365e+00 4.1123628e+00 3.6114402e+00 3.7307413e+00 3.9194642e+00 4.1043951e+00 5.3177489e+00 5.5157728e+00 3.6035661e+00 4.3162097e+00 3.5127031e+00 5.3163123e+00 3.5077296e+00 4.3088507e+00 4.6100803e+00 3.4082578e+00 3.5068380e+00 4.2080636e+00 4.4113183e+00 4.7149608e+00 5.0316727e+00 4.2105572e+00 3.7024462e+00 4.2007769e+00 4.7331529e+00 4.2173557e+00 4.1039096e+00 3.4076329e+00 4.0157626e+00 4.2194897e+00 3.7329396e+00 3.7071079e+00 4.5119962e+00 4.3218071e+00 3.8249612e+00 3.6093673e+00 3.8105293e+00 4.0166459e+00 3.7050109e+00 2.2573593e-01 3.0017653e-01 6.0000317e-01 9.0534502e-01 4.1210927e-01 4.0004442e-01 5.0000761e-01 1.2085435e-01 7.1621748e-01 4.0125062e-01 1.1269424e-01 6.0184622e-01 1.0776294e+00 1.4092540e+00 9.0508756e-01 5.0043084e-01 9.0181717e-01 8.0004602e-01 5.2491131e-01 7.0017011e-01 6.1119267e-01 3.6452132e-01 5.2133179e-01 2.0061436e-01 4.0246123e-01 5.0436965e-01 4.1209001e-01 2.4170870e-01 2.0121983e-01 5.2167829e-01 1.1001015e+00 1.2036862e+00 1.2085435e-01 2.2573593e-01 6.3164977e-01 1.2085435e-01 5.0000761e-01 4.0125062e-01 5.0002283e-01 7.0462844e-01 5.0043084e-01 5.2167829e-01 8.0888055e-01 1.1269424e-01 8.0008884e-01 3.0474106e-01 7.0462697e-01 3.0008832e-01 3.3416860e+00 3.1112912e+00 3.5249966e+00 2.6033557e+00 3.2127499e+00 3.1015178e+00 3.3078313e+00 1.9025708e+00 3.2150318e+00 2.5060738e+00 2.1061951e+00 2.8068283e+00 2.6040016e+00 3.3032134e+00 2.2072454e+00 3.0286102e+00 3.1035443e+00 2.7011973e+00 3.1070853e+00 2.5014549e+00 3.4078435e+00 2.6080511e+00 3.5048916e+00 3.3021665e+00 2.9125999e+00 3.0213627e+00 3.4211337e+00 3.6148618e+00 3.1047537e+00 2.1027003e+00 2.4016639e+00 2.3011929e+00 2.5032633e+00 3.7028303e+00 3.1034629e+00 3.1065984e+00 3.3192072e+00 3.0078209e+00 2.7027260e+00 2.6031664e+00 3.0009332e+00 3.2037232e+00 2.6027120e+00 1.9031578e+00 2.8022915e+00 2.8015662e+00 2.8024715e+00 2.9065359e+00 1.6099792e+00 2.7029416e+00 4.6149181e+00 3.7071538e+00 4.5172866e+00 4.2039132e+00 4.4099272e+00 5.2224057e+00 3.1078968e+00 4.9146298e+00 4.4063795e+00 4.7253524e+00 3.7145622e+00 3.9080413e+00 4.1161770e+00 3.6111646e+00 3.7308314e+00 3.9209137e+00 4.1060063e+00 5.3254977e+00 5.5222404e+00 3.6024247e+00 4.3201293e+00 3.5126957e+00 5.3240486e+00 3.5093499e+00 4.3111749e+00 4.6158382e+00 3.4095576e+00 3.5076152e+00 4.2090727e+00 4.4184242e+00 4.7227808e+00 5.0458491e+00 4.2115634e+00 3.7037441e+00 4.2010125e+00 4.7466313e+00 4.2180733e+00 4.1050714e+00 3.4081972e+00 4.0212972e+00 4.2220584e+00 3.7407842e+00 3.7071538e+00 4.5144444e+00 4.3240980e+00 3.8290678e+00 3.6105228e+00 3.8128297e+00 4.0172657e+00 3.7052380e+00 2.0121983e-01 4.1210927e-01 7.9153339e-01 2.0181667e-01 3.0915245e-01 3.3813251e-01 2.2608083e-01 7.1629168e-01 3.0482299e-01 2.0181667e-01 4.0246123e-01 1.1281267e+00 1.2633045e+00 7.8890721e-01 4.1212852e-01 1.0095370e+00 6.0964891e-01 7.0470720e-01 5.2201750e-01 4.1210927e-01 4.5784410e-01 6.0017982e-01 3.4080442e-01 3.4342562e-01 5.0476836e-01 5.0043084e-01 3.0000000e-01 3.0017653e-01 7.0025283e-01 9.0508756e-01 1.0426513e+00 2.2608083e-01 3.0008832e-01 8.0046605e-01 2.2608083e-01 3.0474106e-01 4.0243965e-01 3.3813251e-01 9.0002570e-01 3.0000000e-01 4.3213914e-01 6.8170466e-01 2.0181667e-01 6.1119267e-01 1.1269424e-01 6.3178534e-01 3.0017653e-01 3.4595765e+00 3.2168311e+00 3.6364650e+00 2.7037323e+00 3.3192099e+00 3.2017763e+00 3.4107328e+00 2.0033798e+00 3.3237063e+00 2.6050967e+00 2.2121910e+00 2.9077087e+00 2.7085154e+00 3.4047917e+00 2.3071665e+00 3.1428042e+00 3.2033135e+00 2.8024935e+00 3.2103481e+00 2.6021247e+00 3.5076152e+00 2.7127272e+00 3.6073242e+00 3.4038884e+00 3.0203881e+00 3.1325879e+00 3.5317021e+00 3.7210979e+00 3.2059139e+00 2.2051638e+00 2.5023084e+00 2.4021168e+00 2.6048201e+00 3.8033004e+00 3.2030448e+00 3.2074921e+00 3.4286399e+00 3.1131211e+00 2.8028008e+00 2.7031257e+00 3.1010004e+00 3.3055260e+00 2.7040740e+00 2.0050309e+00 2.9023862e+00 2.9020767e+00 2.9028421e+00 3.0107283e+00 1.7089863e+00 2.8033666e+00 4.7142986e+00 3.8066401e+00 4.6226512e+00 4.3047830e+00 4.5107876e+00 5.3296471e+00 3.2068572e+00 5.0203871e+00 4.5089338e+00 4.8299744e+00 3.8170042e+00 4.0095939e+00 4.2200398e+00 3.7100654e+00 3.8275330e+00 4.0209836e+00 4.2079639e+00 5.4332277e+00 5.6287689e+00 3.7032748e+00 4.4237036e+00 3.6112573e+00 5.4319232e+00 3.6111754e+00 4.4135512e+00 4.7221364e+00 3.5107924e+00 3.6081749e+00 4.3098514e+00 4.5261773e+00 4.8309399e+00 5.1593152e+00 4.3120751e+00 3.8056232e+00 4.3015640e+00 4.8592534e+00 4.3174320e+00 4.2064763e+00 3.5083248e+00 4.1268500e+00 4.3236383e+00 3.8471097e+00 3.8066401e+00 4.6166518e+00 4.4251081e+00 3.9318948e+00 3.7118930e+00 3.9150333e+00 4.1165034e+00 3.8051417e+00 5.2133179e-01 9.0160400e-01 3.0017653e-01 4.1209001e-01 2.2573593e-01 3.0008832e-01 8.2418002e-01 3.0482299e-01 2.0181667e-01 4.1212852e-01 1.2363278e+00 1.3741498e+00 9.0160400e-01 5.2133802e-01 1.1133986e+00 7.1621748e-01 8.0051036e-01 6.3178534e-01 5.6347121e-01 5.0517282e-01 4.1315633e-01 4.0004442e-01 4.1317535e-01 6.0948212e-01 6.0184622e-01 1.2085435e-01 2.0061436e-01 8.0051036e-01 1.0087250e+00 1.1527669e+00 3.0008832e-01 4.1210927e-01 9.0142636e-01 3.0008832e-01 2.2573593e-01 5.0436235e-01 4.5148429e-01 8.0004602e-01 2.2573593e-01 4.8342635e-01 7.2044167e-01 2.0181667e-01 7.1621748e-01 1.1269424e-01 7.4262850e-01 4.0125062e-01 3.2983364e+00 3.0300451e+00 3.4603347e+00 2.5053901e+00 3.1338090e+00 3.0030658e+00 3.2183845e+00 1.8040969e+00 3.1419971e+00 2.4075162e+00 2.0123013e+00 2.7132680e+00 2.5163999e+00 3.2086215e+00 2.1132077e+00 2.9750754e+00 3.0049127e+00 2.6055197e+00 3.0177719e+00 2.4040962e+00 3.3110162e+00 2.5253371e+00 3.4126529e+00 3.2074182e+00 2.8380954e+00 2.9580787e+00 3.3536443e+00 3.5347730e+00 3.0101869e+00 2.0123796e+00 2.3038195e+00 2.2036797e+00 2.4099203e+00 3.6051707e+00 3.0042758e+00 3.0123228e+00 3.2490712e+00 2.9241808e+00 2.6047889e+00 2.5049231e+00 2.9016211e+00 3.1100277e+00 2.5081992e+00 1.8056342e+00 2.7040060e+00 2.7039988e+00 2.7050721e+00 2.8205713e+00 1.5147271e+00 2.6060742e+00 4.5183778e+00 3.6090052e+00 4.4337691e+00 4.1072664e+00 4.3151164e+00 5.1425125e+00 3.0092613e+00 4.8303615e+00 4.3139066e+00 4.6422789e+00 3.6259317e+00 3.8146285e+00 4.0301568e+00 3.5133848e+00 3.6358680e+00 3.8290678e+00 4.0124919e+00 5.2471177e+00 5.4403962e+00 3.5051114e+00 4.2343452e+00 3.4149831e+00 5.2455706e+00 3.4177035e+00 4.2200398e+00 4.5335328e+00 3.3168776e+00 3.4123846e+00 4.1140176e+00 4.3402553e+00 4.6459028e+00 4.9843016e+00 4.1167964e+00 3.6096226e+00 4.1026403e+00 4.6849407e+00 4.1230798e+00 4.0100505e+00 3.3123688e+00 3.9407837e+00 4.1330547e+00 3.6700537e+00 3.6090052e+00 4.4237036e+00 4.2343452e+00 3.7463488e+00 3.5181052e+00 3.7227931e+00 3.9220791e+00 3.6072781e+00 4.2362917e-01 4.0125062e-01 2.0061436e-01 7.4262850e-01 5.0002283e-01 4.0004442e-01 2.4170870e-01 6.0017982e-01 7.4329527e-01 8.0250123e-01 8.5406674e-01 4.1317535e-01 1.2085435e-01 7.0096858e-01 2.0181667e-01 4.1315633e-01 2.0181667e-01 4.5077696e-01 3.6259865e-01 5.0084481e-01 6.0017665e-01 2.4170870e-01 2.0121983e-01 2.2538848e-01 4.1315633e-01 5.0084481e-01 4.0246123e-01 5.0043842e-01 6.3164729e-01 5.0002283e-01 4.0122873e-01 5.0001522e-01 5.0002283e-01 6.7616723e-01 2.0121983e-01 1.2085435e-01 1.3008771e+00 6.0948506e-01 4.0125062e-01 5.0085236e-01 6.0017982e-01 2.2573593e-01 4.5077696e-01 3.0017653e-01 3.0000000e-01 3.3320240e+00 3.1087192e+00 3.5191371e+00 2.6110181e+00 3.2098845e+00 3.1016129e+00 3.3064697e+00 1.9242109e+00 3.2110200e+00 2.5072065e+00 2.1702438e+00 2.8063347e+00 2.6144115e+00 3.3026483e+00 2.2074446e+00 3.0213781e+00 3.1035271e+00 2.7015967e+00 3.1108570e+00 2.5049231e+00 3.4076266e+00 2.6065485e+00 3.5045818e+00 3.3016829e+00 2.9091905e+00 3.0158857e+00 3.4160038e+00 3.6117923e+00 3.1042949e+00 2.1068047e+00 2.4087956e+00 2.3099309e+00 2.5038387e+00 3.7027671e+00 3.1034919e+00 3.1060428e+00 3.3145595e+00 3.0095593e+00 2.7026925e+00 2.6061038e+00 3.0017811e+00 3.2030205e+00 2.6039803e+00 1.9366876e+00 2.8028640e+00 2.8014482e+00 2.8024453e+00 2.9049136e+00 1.6388635e+00 2.7031257e+00 4.6146430e+00 3.7072412e+00 4.5144508e+00 4.2035048e+00 4.4092709e+00 5.2185448e+00 3.1091788e+00 4.9117351e+00 4.4054277e+00 4.7224997e+00 3.7130507e+00 3.9073151e+00 4.1140274e+00 3.6117351e+00 3.7308330e+00 3.9200674e+00 4.1050815e+00 5.3212796e+00 5.5187578e+00 3.6046347e+00 4.3179262e+00 3.5127783e+00 5.3198559e+00 3.5085510e+00 4.3098508e+00 4.6126513e+00 3.4088749e+00 3.5071604e+00 4.2085176e+00 4.4144980e+00 4.7185095e+00 5.0381903e+00 4.2110099e+00 3.7030413e+00 4.2009868e+00 4.7393218e+00 4.2176488e+00 4.1043951e+00 3.4078683e+00 4.0181902e+00 4.2205976e+00 3.7363838e+00 3.7072412e+00 4.5130595e+00 4.3227928e+00 3.8267408e+00 3.6102542e+00 3.8115096e+00 4.0168944e+00 3.7051079e+00 8.0923926e-01 5.2201750e-01 1.1270411e+00 8.0928056e-01 2.4170870e-01 6.3178782e-01 9.1471442e-01 1.1573074e+00 5.2167829e-01 5.0476836e-01 4.0000000e-01 4.2270142e-01 3.0017653e-01 3.0490481e-01 5.0042326e-01 3.0915245e-01 8.5440680e-01 6.0184622e-01 6.3192325e-01 9.0142681e-01 5.2133179e-01 4.0363334e-01 5.0517282e-01 7.8890806e-01 8.2421923e-01 5.0042326e-01 3.1328089e-01 3.4085233e-01 8.0928056e-01 7.2044167e-01 4.5148429e-01 8.0928056e-01 1.0782211e+00 5.0517282e-01 4.8342635e-01 1.6097492e+00 1.0215068e+00 4.5148429e-01 3.0482299e-01 9.1446938e-01 3.0490481e-01 8.5440680e-01 2.4195741e-01 6.1135434e-01 3.0143288e+00 2.8035152e+00 3.2080663e+00 2.3476141e+00 2.9053991e+00 2.8028019e+00 3.0030626e+00 1.7519158e+00 2.9045816e+00 2.2149484e+00 2.0887699e+00 2.5048522e+00 2.3645147e+00 3.0018766e+00 1.9120303e+00 2.7085154e+00 2.8028008e+00 2.4075162e+00 2.8284908e+00 2.2272457e+00 3.1054022e+00 2.3075573e+00 3.2060163e+00 3.0018874e+00 2.6044486e+00 2.7064438e+00 3.1073418e+00 3.3054063e+00 2.8034238e+00 1.8447840e+00 2.1492024e+00 2.0607272e+00 2.2122063e+00 3.4028104e+00 2.8028007e+00 2.8036182e+00 3.0057998e+00 2.7234787e+00 2.4027927e+00 2.3234132e+00 2.7070699e+00 2.9017335e+00 2.3151346e+00 1.8036834e+00 2.5072065e+00 2.5017313e+00 2.5032633e+00 2.6031823e+00 1.5292174e+00 2.4058519e+00 4.3116266e+00 3.4064593e+00 4.2076930e+00 3.9021503e+00 4.1063936e+00 4.9099401e+00 2.8141516e+00 4.6055969e+00 4.1036742e+00 4.4145324e+00 3.4082578e+00 3.6052799e+00 3.8082804e+00 3.3123693e+00 3.4273179e+00 3.6154977e+00 3.8026444e+00 5.0117750e+00 5.2107474e+00 3.3130198e+00 4.0114753e+00 3.2109395e+00 5.0107787e+00 3.2067490e+00 4.0058313e+00 4.3058539e+00 3.1067996e+00 3.2049797e+00 3.9061098e+00 4.1066170e+00 4.4095056e+00 4.7221364e+00 3.9082316e+00 3.4019453e+00 3.9014304e+00 4.4232188e+00 3.9139973e+00 3.8023591e+00 3.1057392e+00 3.7104219e+00 3.9150553e+00 3.4248402e+00 3.4064593e+00 4.2084919e+00 4.0172759e+00 3.5193527e+00 3.3100431e+00 3.5073655e+00 3.7133435e+00 3.4036743e+00 4.0004442e-01 5.0043084e-01 3.4085233e-01 8.0046764e-01 2.2573593e-01 4.0243965e-01 4.2362917e-01 1.2036925e+00 1.1896595e+00 8.0879776e-01 5.0000761e-01 1.1006371e+00 5.2133179e-01 8.0046685e-01 5.0437695e-01 4.0125062e-01 5.0477564e-01 5.0043084e-01 4.5148429e-01 4.0125062e-01 6.0000952e-01 6.0000317e-01 2.2608083e-01 3.0922892e-01 8.0000160e-01 7.4269314e-01 9.6572569e-01 3.4085233e-01 4.0246123e-01 9.0000136e-01 3.4085233e-01 4.0127250e-01 5.0001522e-01 4.0004442e-01 1.1000003e+00 2.2608083e-01 4.1317535e-01 5.7609230e-01 4.0122873e-01 5.2167829e-01 2.0061436e-01 7.0088627e-01 4.0004442e-01 3.3852404e+00 3.1245391e+00 3.5521657e+00 2.6057331e+00 3.2281303e+00 3.1021033e+00 3.3145497e+00 1.9088256e+00 3.2358110e+00 2.5040476e+00 2.1337832e+00 2.8091158e+00 2.6173653e+00 3.3068237e+00 2.2078368e+00 3.0635687e+00 3.1029264e+00 2.7045714e+00 3.1156892e+00 2.5038387e+00 3.4072735e+00 2.6199287e+00 3.5105217e+00 3.3061800e+00 2.9316687e+00 3.0488379e+00 3.4462681e+00 3.6292576e+00 3.1074604e+00 2.1103491e+00 2.4046650e+00 2.3052527e+00 2.5074705e+00 3.7037846e+00 3.1023805e+00 3.1087156e+00 3.3416864e+00 3.0212423e+00 2.7029308e+00 2.6036513e+00 3.0012006e+00 3.2078939e+00 2.6064541e+00 1.9145304e+00 2.8026114e+00 2.8028068e+00 2.8033825e+00 2.9167099e+00 1.6147493e+00 2.7040740e+00 4.6133719e+00 3.7058811e+00 4.5290217e+00 4.2056470e+00 4.4115634e+00 5.2381327e+00 3.1057013e+00 4.9271590e+00 4.4118721e+00 4.7354168e+00 3.7201124e+00 3.9113698e+00 4.1247181e+00 3.6087856e+00 3.7244383e+00 3.9212835e+00 4.1101783e+00 5.3422962e+00 5.5362181e+00 3.6046999e+00 4.3279835e+00 3.5095358e+00 5.3412086e+00 3.5135120e+00 4.3162096e+00 4.6297141e+00 3.4124092e+00 3.5088081e+00 4.2105763e+00 4.4358170e+00 4.7408876e+00 5.0762364e+00 4.2125085e+00 3.7079173e+00 4.2021973e+00 4.7752666e+00 4.2166536e+00 4.1080028e+00 3.4084548e+00 4.0338654e+00 4.2256165e+00 3.7563734e+00 3.7058811e+00 4.5190617e+00 4.3264209e+00 3.8360186e+00 3.6136974e+00 3.8177300e+00 4.0156240e+00 3.7048582e+00 6.3164977e-01 3.0017653e-01 4.1209001e-01 2.0061436e-01 4.0127250e-01 7.0911112e-01 8.2458409e-01 1.0207396e+00 5.2201750e-01 1.2699992e-01 7.0470867e-01 4.0004442e-01 4.0122873e-01 3.0482299e-01 5.2167208e-01 3.0490481e-01 4.0122873e-01 4.0002221e-01 2.0061436e-01 2.0061436e-01 2.0061436e-01 3.0482299e-01 3.0482299e-01 4.0122873e-01 7.0008584e-01 8.0879701e-01 3.0017653e-01 3.0474106e-01 5.0043084e-01 3.0017653e-01 6.0964597e-01 1.0000000e-01 2.0121983e-01 1.1019599e+00 6.0035305e-01 4.0004442e-01 4.5148429e-01 4.0127250e-01 4.0004442e-01 4.0125062e-01 3.3808272e-01 1.1269424e-01 3.2369541e+00 3.0101869e+00 3.4219340e+00 2.5073576e+00 3.1113295e+00 3.0016913e+00 3.2074921e+00 1.8128536e+00 3.1127326e+00 2.4076937e+00 2.0429861e+00 2.7074657e+00 2.5087337e+00 3.2029987e+00 2.1087640e+00 2.9250474e+00 3.0040848e+00 2.6011837e+00 3.0090716e+00 2.4029250e+00 3.3087901e+00 2.5074281e+00 3.4046875e+00 3.2018065e+00 2.8107271e+00 2.9185950e+00 3.3183094e+00 3.5134617e+00 3.0049285e+00 2.0041542e+00 2.3049133e+00 2.2050331e+00 2.4035997e+00 3.6030023e+00 3.0040438e+00 3.0070658e+00 3.2168317e+00 2.9083216e+00 2.6031436e+00 2.5048522e+00 2.9013423e+00 3.1034810e+00 2.5032729e+00 1.8201043e+00 2.7028014e+00 2.7016556e+00 2.7027522e+00 2.8056775e+00 1.5256523e+00 2.6033557e+00 4.5162553e+00 3.6081006e+00 4.4160732e+00 4.1039121e+00 4.3103378e+00 5.1203327e+00 3.0096880e+00 4.8129366e+00 4.3058720e+00 4.6249088e+00 3.6148619e+00 3.8081633e+00 4.0157626e+00 3.5129206e+00 3.6349703e+00 3.8226858e+00 4.0057080e+00 5.2232912e+00 5.4204287e+00 3.5035589e+00 4.2200398e+00 3.4145570e+00 5.2217206e+00 3.4096180e+00 4.2110197e+00 4.5140458e+00 3.3101076e+00 3.4081996e+00 4.1095117e+00 4.3161641e+00 4.6204721e+00 4.9419857e+00 4.1123051e+00 3.6033860e+00 4.1009647e+00 4.6434791e+00 4.1197833e+00 4.0049425e+00 3.3090452e+00 3.9205015e+00 4.1230798e+00 3.6413278e+00 3.6081006e+00 4.4145323e+00 4.2254713e+00 3.7302938e+00 3.5112285e+00 3.7130507e+00 3.9190472e+00 3.6058055e+00 5.0043842e-01 1.0426513e+00 5.2167208e-01 4.0004442e-01 3.0026460e-01 1.4542931e+00 1.5965783e+00 1.1269511e+00 7.4262964e-01 1.3253871e+00 9.3306807e-01 1.0032293e+00 8.5406674e-01 7.0470720e-01 7.0633229e-01 5.7608844e-01 6.0017982e-01 6.3192325e-01 8.2418071e-01 8.0879625e-01 3.4080442e-01 4.0243965e-01 1.0030871e+00 1.2189645e+00 1.3741465e+00 5.0043842e-01 6.0201716e-01 1.1055705e+00 5.0043842e-01 1.1269424e-01 7.1621748e-01 6.7616902e-01 6.0000952e-01 3.0008832e-01 6.8170466e-01 9.3735629e-01 4.0004442e-01 9.3308853e-01 3.0474106e-01 9.6572569e-01 6.0948212e-01 3.4311880e+00 3.1440065e+00 3.5828092e+00 2.6061623e+00 3.2490712e+00 3.1047537e+00 3.3265679e+00 1.9024467e+00 3.2610547e+00 2.5066443e+00 2.1042326e+00 2.8182771e+00 2.6268573e+00 3.3136174e+00 2.2177383e+00 3.1042292e+00 3.1056084e+00 2.7106185e+00 3.1258664e+00 2.5072166e+00 3.4123850e+00 2.6397031e+00 3.5191318e+00 3.3125861e+00 2.9569988e+00 3.0825000e+00 3.4750557e+00 3.6484459e+00 3.1148203e+00 2.1231535e+00 2.4058952e+00 2.3063814e+00 2.5167763e+00 3.7071732e+00 3.1042001e+00 3.1166462e+00 3.3690976e+00 3.0370401e+00 2.7067267e+00 2.6060811e+00 3.0024163e+00 3.2157547e+00 2.6139440e+00 1.9029771e+00 2.8056558e+00 2.8068283e+00 2.8077255e+00 2.9323793e+00 1.6119586e+00 2.7091848e+00 4.6187512e+00 3.7092298e+00 4.5442576e+00 4.2099019e+00 4.4180513e+00 5.2548586e+00 3.1079005e+00 4.9407795e+00 4.4195588e+00 4.7516511e+00 3.7329384e+00 3.9191954e+00 4.1389224e+00 3.6127211e+00 3.7328453e+00 3.9318950e+00 4.1174259e+00 5.3601143e+00 5.5513974e+00 3.6073242e+00 4.3425018e+00 3.5138357e+00 5.3586950e+00 3.5235095e+00 4.3257995e+00 4.6452056e+00 3.4217238e+00 3.5154314e+00 4.2169032e+00 4.4544908e+00 4.7602896e+00 5.1057502e+00 4.2193718e+00 3.7147036e+00 4.2043114e+00 4.8058872e+00 4.2239687e+00 4.1138950e+00 3.4146523e+00 4.0526593e+00 4.2382079e+00 3.7847403e+00 3.7092298e+00 4.5291248e+00 4.3385161e+00 3.8547029e+00 3.6228903e+00 3.8290678e+00 4.0228342e+00 3.7082809e+00 6.3164977e-01 3.0026460e-01 1.2085435e-01 6.0948506e-01 1.0143978e+00 1.3131369e+00 8.0928056e-01 4.0246123e-01 8.5409862e-01 7.0016860e-01 5.0477564e-01 6.0201716e-01 5.6595908e-01 4.0363334e-01 4.1212852e-01 1.2699992e-01 3.3818226e-01 4.1210927e-01 3.3818226e-01 2.0181667e-01 1.2085435e-01 5.0855077e-01 1.0001598e+00 1.1055707e+00 0.0000000e+00 3.0026460e-01 6.0964891e-01 0.0000000e+00 5.0043842e-01 3.0482299e-01 4.0246123e-01 8.0254500e-01 5.0043842e-01 5.2133802e-01 7.0556260e-01 2.0181667e-01 7.0008735e-01 3.0026460e-01 6.0948506e-01 2.0181667e-01 3.2490712e+00 3.0153168e+00 3.4297841e+00 2.5067523e+00 3.1166337e+00 3.0027816e+00 3.2112793e+00 1.8068048e+00 3.1183051e+00 2.4116924e+00 2.0138832e+00 2.7116615e+00 2.5059537e+00 3.2048192e+00 2.1144760e+00 2.9351753e+00 3.0063019e+00 2.6019122e+00 3.0106587e+00 2.4030297e+00 3.3125861e+00 2.5120719e+00 3.4068163e+00 3.2029877e+00 2.8162444e+00 2.9267417e+00 3.3252407e+00 3.5189464e+00 3.0077107e+00 2.0051350e+00 2.3037132e+00 2.2028146e+00 2.4058620e+00 3.6044981e+00 3.0062070e+00 3.0107283e+00 3.2237456e+00 2.9105093e+00 2.6052541e+00 2.5062865e+00 2.9018772e+00 3.1056084e+00 2.5048522e+00 1.8082911e+00 2.7043948e+00 2.7029415e+00 2.7046027e+00 2.8091099e+00 1.5248852e+00 2.6055127e+00 4.5209020e+00 3.6112573e+00 4.4212031e+00 4.1056541e+00 4.3138986e+00 5.1255338e+00 3.0133997e+00 4.8167235e+00 4.3081273e+00 4.6319211e+00 3.6205854e+00 3.8114965e+00 4.0212972e+00 3.5173798e+00 3.6449970e+00 3.8299342e+00 4.0081754e+00 5.2290121e+00 5.4254411e+00 3.5039202e+00 4.2264145e+00 3.4198378e+00 5.2270034e+00 3.4138008e+00 4.2149806e+00 4.5183778e+00 3.3145502e+00 3.4118179e+00 4.1129687e+00 4.3210760e+00 4.6261633e+00 4.9512603e+00 4.1165035e+00 3.6051692e+00 4.1014742e+00 4.6540056e+00 4.1257291e+00 4.0071257e+00 3.3129914e+00 3.9274863e+00 4.1301604e+00 3.6542046e+00 3.6112573e+00 4.4192311e+00 4.2328883e+00 3.7399948e+00 3.5155767e+00 3.7180846e+00 3.9250546e+00 3.6083191e+00 6.0184622e-01 7.4263078e-01 1.1138955e+00 4.2268438e-01 7.0096708e-01 2.4170870e-01 3.0490481e-01 3.0490481e-01 3.0017653e-01 3.0474106e-01 3.0474106e-01 8.0879701e-01 4.2362917e-01 6.1119267e-01 7.0462697e-01 4.1317535e-01 2.2538848e-01 3.0482299e-01 7.1621748e-01 6.7616723e-01 3.0474106e-01 4.0125062e-01 5.0001522e-01 6.3164977e-01 5.2491131e-01 2.2573593e-01 6.3164977e-01 1.0207396e+00 3.3808272e-01 4.0246123e-01 1.4180463e+00 1.0030868e+00 4.5148429e-01 4.1317535e-01 7.4263078e-01 3.0017653e-01 8.0879701e-01 1.0000000e-01 4.5078948e-01 3.2116783e+00 3.0049285e+00 3.4072983e+00 2.5182898e+00 3.1051604e+00 3.0020136e+00 3.2049016e+00 1.8469618e+00 3.1036832e+00 2.4099081e+00 2.1180493e+00 2.7068820e+00 2.5224740e+00 3.2021231e+00 2.1097449e+00 2.9077617e+00 3.0041462e+00 2.6022422e+00 3.0134290e+00 2.4087504e+00 3.3085101e+00 2.5050799e+00 3.4038679e+00 3.2010814e+00 2.8036959e+00 2.9060895e+00 3.3058271e+00 3.5063866e+00 3.0043212e+00 2.0122773e+00 2.3159426e+00 2.2186306e+00 2.4051454e+00 3.6029749e+00 3.0041461e+00 3.0062373e+00 3.2059465e+00 2.9096170e+00 2.6032656e+00 2.5097004e+00 2.9028411e+00 3.1023606e+00 2.5057847e+00 1.8685354e+00 2.7039990e+00 2.7016498e+00 2.7029428e+00 2.8028074e+00 1.5747520e+00 2.6039937e+00 4.5157550e+00 3.6083209e+00 4.4088451e+00 4.1031691e+00 4.3089952e+00 5.1095334e+00 3.0117336e+00 4.8052574e+00 4.3035619e+00 4.6175091e+00 3.6116958e+00 3.8067089e+00 4.0107159e+00 3.5138361e+00 3.6350483e+00 3.8210210e+00 4.0037985e+00 5.2113565e+00 5.4105254e+00 3.5063553e+00 4.2147222e+00 3.4147657e+00 5.2097995e+00 3.4081036e+00 4.2080425e+00 4.5057296e+00 3.3089414e+00 3.4074852e+00 4.1084282e+00 4.3058539e+00 4.6088153e+00 4.9193995e+00 4.1112251e+00 3.6020843e+00 4.1009356e+00 4.6223848e+00 4.1190046e+00 4.0036188e+00 3.3085886e+00 3.9129256e+00 4.1197933e+00 3.6305006e+00 3.6083209e+00 4.4113183e+00 4.2225427e+00 3.7249938e+00 3.5105217e+00 3.7103007e+00 3.9184088e+00 3.6056580e+00 4.0125062e-01 5.7609230e-01 1.0095367e+00 1.0776296e+00 6.3322667e-01 3.0490481e-01 9.0140221e-01 4.1212852e-01 6.0000317e-01 3.4085233e-01 6.0035305e-01 3.3818226e-01 3.0000000e-01 4.0122873e-01 2.2538848e-01 4.0004442e-01 4.0122873e-01 2.0061436e-01 3.0000000e-01 6.0017982e-01 7.0462844e-01 8.5406616e-01 3.0026460e-01 4.0243965e-01 7.0088477e-01 3.0026460e-01 4.5783248e-01 3.0008832e-01 3.0490481e-01 1.1002025e+00 4.1315633e-01 4.0125062e-01 4.2362917e-01 4.0125062e-01 4.1209001e-01 2.4170870e-01 5.0436965e-01 2.2573593e-01 3.1712557e+00 2.9203034e+00 3.3425817e+00 2.4092081e+00 3.0228582e+00 2.9024211e+00 3.1131137e+00 1.7168003e+00 3.0276611e+00 2.3094323e+00 1.9540727e+00 2.6109956e+00 2.4153242e+00 3.1056218e+00 2.0123796e+00 2.8520945e+00 2.9050328e+00 2.5029614e+00 2.9148948e+00 2.3042831e+00 3.2109395e+00 2.4161682e+00 3.3086859e+00 3.1042389e+00 2.7244207e+00 2.8394157e+00 3.2369857e+00 3.4247142e+00 2.9077271e+00 1.9085444e+00 2.2064916e+00 2.1068047e+00 2.3066817e+00 3.5042241e+00 2.9048033e+00 2.9102290e+00 3.1338090e+00 2.8169587e+00 2.5042601e+00 2.4061715e+00 2.8017212e+00 3.0065627e+00 2.4058322e+00 1.7261843e+00 2.6037439e+00 2.6027120e+00 2.6040234e+00 2.7127458e+00 1.4350761e+00 2.5049231e+00 4.4189015e+00 3.5095669e+00 4.3257995e+00 4.0057109e+00 4.2135057e+00 5.0324952e+00 2.9113810e+00 4.7221382e+00 4.2099962e+00 4.5353918e+00 3.5215862e+00 3.7118930e+00 3.9240025e+00 3.4150232e+00 3.5401623e+00 3.7282910e+00 3.9092259e+00 5.1365012e+00 5.3314853e+00 3.4049933e+00 4.1286955e+00 3.3168890e+00 5.1347989e+00 3.3143385e+00 4.1161770e+00 4.4243750e+00 3.2143454e+00 3.3110189e+00 4.0125032e+00 4.2289520e+00 4.5343227e+00 4.8661173e+00 4.0156353e+00 3.5063553e+00 4.0017163e+00 4.5675364e+00 4.0235140e+00 3.9076272e+00 3.2116700e+00 3.8321139e+00 4.0301570e+00 3.5598557e+00 3.5095669e+00 4.3201293e+00 4.1322798e+00 3.6413292e+00 3.4157005e+00 3.6188994e+00 3.8226858e+00 3.5071409e+00 5.0436235e-01 1.1269511e+00 1.4180734e+00 9.1446938e-01 5.0476836e-01 9.6593231e-01 8.0051115e-01 6.1119558e-01 7.0176271e-01 6.0964891e-01 4.3213914e-01 5.2133179e-01 2.2573593e-01 4.1420960e-01 5.2133802e-01 4.5078948e-01 2.2608083e-01 2.0121983e-01 6.1119558e-01 1.1005364e+00 1.2089192e+00 1.2085435e-01 2.4195741e-01 7.1621884e-01 1.2085435e-01 4.0004442e-01 4.1212852e-01 5.0085236e-01 7.0096858e-01 4.0127250e-01 5.6394820e-01 8.0967961e-01 2.0000000e-01 8.0051115e-01 2.2573593e-01 7.1621884e-01 3.0482299e-01 3.3545239e+00 3.1166331e+00 3.5333785e+00 2.6054739e+00 3.2183845e+00 3.1025789e+00 3.3116521e+00 1.9046783e+00 3.2211369e+00 2.5096353e+00 2.1074907e+00 2.8107054e+00 2.6064541e+00 3.3051050e+00 2.2121875e+00 3.0393610e+00 3.1054994e+00 2.7022579e+00 3.1107490e+00 2.5027328e+00 3.4112739e+00 2.6129479e+00 3.5073688e+00 3.3035252e+00 2.9185900e+00 3.0300451e+00 3.4286400e+00 3.6205854e+00 3.1074470e+00 2.1053074e+00 2.4030297e+00 2.3022754e+00 2.5057763e+00 3.7043108e+00 3.1053329e+00 3.1100313e+00 3.3265652e+00 3.0118276e+00 2.7046025e+00 2.6052853e+00 3.0016501e+00 3.2059133e+00 2.6047974e+00 1.9052628e+00 2.8038694e+00 2.8028007e+00 2.8041967e+00 2.9102290e+00 1.6179159e+00 2.7049931e+00 4.6192199e+00 3.7100254e+00 4.5225779e+00 4.2056438e+00 4.4133506e+00 5.2278849e+00 3.1114444e+00 4.9186970e+00 4.4088300e+00 4.7323336e+00 3.7201124e+00 3.9113387e+00 4.1217116e+00 3.6152935e+00 3.7397620e+00 3.9276515e+00 4.1085246e+00 5.3314853e+00 5.5274937e+00 3.6037456e+00 4.3264210e+00 3.5173586e+00 5.3296471e+00 3.5134601e+00 4.3151165e+00 4.6204664e+00 3.4137985e+00 3.5110031e+00 4.2123903e+00 4.4237218e+00 4.7288133e+00 5.0555470e+00 4.2155430e+00 3.7056457e+00 4.2016096e+00 4.7574592e+00 4.2235569e+00 4.1072664e+00 3.4118179e+00 4.0283196e+00 4.2288238e+00 3.7532858e+00 3.7100254e+00 4.5190617e+00 4.3311362e+00 3.8383398e+00 3.6148683e+00 3.8177286e+00 4.0227665e+00 3.7075359e+00 1.5237054e+00 1.5778323e+00 1.1528553e+00 8.0928056e-01 1.4109657e+00 9.0296858e-01 1.1060939e+00 8.5617086e-01 6.0184934e-01 8.2671175e-01 8.1112984e-01 7.1621748e-01 7.2113820e-01 9.0642722e-01 9.0166476e-01 5.2167829e-01 5.6347978e-01 1.1011719e+00 1.1531951e+00 1.3523685e+00 6.0948506e-01 7.0008735e-01 1.2012929e+00 6.0948506e-01 2.0121983e-01 8.0488008e-01 7.1636719e-01 7.0025283e-01 2.2608083e-01 7.4418186e-01 9.6702272e-01 5.0476836e-01 9.0657583e-01 3.4085233e-01 1.0214933e+00 7.0176271e-01 3.7102713e+00 3.4382051e+00 3.8712380e+00 2.9060895e+00 3.5425492e+00 3.4047913e+00 3.6240078e+00 2.2025238e+00 3.5521653e+00 2.8062729e+00 2.4042873e+00 3.1166330e+00 2.9229849e+00 3.6127194e+00 2.5155829e+00 3.3866455e+00 3.4056098e+00 3.0096888e+00 3.4232171e+00 2.8068501e+00 3.7118528e+00 2.9335034e+00 3.8176417e+00 3.6116893e+00 3.2480452e+00 3.3690976e+00 3.7643838e+00 3.9429243e+00 3.4137984e+00 2.4191998e+00 2.7057317e+00 2.6060678e+00 2.8148779e+00 4.0071259e+00 3.4042418e+00 3.4154455e+00 3.6592943e+00 3.3320889e+00 3.0065584e+00 2.9059779e+00 3.3025806e+00 3.5145393e+00 2.9126049e+00 2.2031052e+00 3.1056091e+00 3.1065947e+00 3.1074470e+00 3.2280982e+00 1.9100994e+00 3.0087060e+00 4.9179684e+00 4.0090033e+00 4.8406432e+00 4.5097222e+00 4.7173415e+00 5.5505575e+00 3.4073974e+00 5.2376127e+00 4.7184838e+00 5.0477042e+00 4.0301568e+00 4.2181242e+00 4.4357103e+00 3.9120615e+00 4.0296437e+00 4.2295175e+00 4.4165186e+00 5.6553854e+00 5.8478137e+00 3.9072483e+00 4.6391758e+00 3.8129545e+00 5.6540008e+00 3.8217034e+00 4.6242406e+00 4.9412981e+00 3.7201124e+00 3.8146271e+00 4.5162248e+00 4.7490801e+00 5.0547228e+00 5.3951639e+00 4.5184830e+00 4.0138270e+00 4.5043856e+00 5.0949990e+00 4.5225786e+00 4.4133506e+00 3.7138970e+00 4.3475342e+00 4.5353918e+00 4.0747727e+00 4.0090033e+00 4.8274110e+00 4.6357670e+00 4.1493188e+00 3.9212879e+00 4.1268500e+00 4.3214438e+00 4.0081754e+00 4.1317535e-01 4.0127250e-01 7.1629303e-01 5.0043842e-01 7.0096858e-01 6.3912709e-01 7.0184453e-01 1.2003596e+00 7.9871893e-01 1.0286506e+00 1.0433442e+00 8.2635069e-01 6.3309012e-01 6.7626502e-01 1.1286016e+00 1.0782105e+00 6.1135434e-01 6.0184934e-01 3.0915245e-01 1.0143978e+00 9.0155393e-01 5.0436965e-01 1.0143978e+00 1.4324323e+00 7.4329414e-01 8.0879776e-01 1.7570482e+00 1.4092511e+00 8.1343016e-01 7.8895472e-01 1.1269511e+00 7.0470720e-01 1.2189701e+00 5.0855077e-01 8.5406616e-01 3.5025396e+00 3.3027388e+00 3.7022129e+00 2.8281704e+00 3.4036672e+00 3.3025779e+00 3.5030234e+00 2.1725076e+00 3.4018155e+00 2.7109019e+00 2.4518621e+00 3.0049127e+00 2.8364042e+00 3.5019450e+00 2.4088882e+00 3.2025657e+00 3.3031143e+00 2.9050277e+00 3.3192099e+00 2.7159616e+00 3.6057054e+00 2.8056568e+00 3.7048624e+00 3.5016345e+00 3.1026586e+00 3.2026875e+00 3.6024856e+00 3.8034160e+00 3.3035252e+00 2.3226028e+00 2.6270968e+00 2.5319718e+00 2.7081195e+00 3.9029099e+00 3.3031170e+00 3.3039553e+00 3.5023855e+00 3.2150432e+00 2.9028412e+00 2.8148779e+00 3.2051933e+00 3.4018781e+00 2.8098127e+00 2.1974660e+00 3.0055598e+00 3.0017653e+00 3.0030650e+00 3.1026235e+00 1.9002712e+00 2.9047832e+00 4.8115512e+00 3.9065683e+00 4.7047990e+00 4.4023911e+00 4.6064349e+00 5.4038141e+00 3.3119500e+00 5.1019028e+00 4.6029848e+00 4.9110422e+00 3.9076509e+00 4.1051819e+00 4.3067850e+00 3.8114951e+00 3.9246405e+00 4.1145310e+00 4.3025710e+00 5.5046681e+00 5.7049556e+00 3.8098343e+00 4.5095384e+00 3.7106240e+00 5.5035834e+00 3.7063915e+00 4.5052925e+00 4.8020893e+00 3.6066590e+00 3.7052383e+00 4.4062090e+00 4.6017113e+00 4.9033376e+00 5.2065497e+00 4.4082090e+00 3.9018737e+00 4.4013937e+00 4.9097097e+00 4.4135256e+00 4.3024877e+00 3.6059708e+00 4.2076410e+00 4.4136664e+00 3.9189006e+00 3.9065683e+00 4.7076752e+00 4.5157700e+00 4.0166034e+00 3.8091065e+00 4.0069397e+00 4.2129114e+00 3.9040721e+00 5.0476836e-01 9.1422402e-01 6.0017982e-01 6.7616723e-01 1.0001903e+00 7.4262850e-01 1.1298636e+00 1.1055799e+00 1.0782211e+00 1.4043036e+00 1.0207260e+00 9.0508712e-01 1.0030871e+00 1.2632996e+00 1.3253497e+00 1.0001598e+00 5.0855077e-01 2.4195741e-01 1.3131369e+00 1.2089895e+00 9.0007572e-01 1.3131369e+00 1.5263518e+00 1.0087393e+00 9.3308891e-01 2.1138769e+00 1.4140515e+00 9.3308891e-01 6.8160885e-01 1.4180436e+00 6.7626681e-01 1.3018145e+00 7.0470720e-01 1.1133986e+00 3.2054626e+00 3.0041787e+00 3.4044439e+00 2.6382589e+00 3.1129223e+00 3.0138245e+00 3.2030205e+00 2.1566438e+00 3.1086927e+00 2.4554557e+00 2.5269479e+00 2.7127202e+00 2.6737930e+00 3.2074207e+00 2.1510232e+00 2.9068053e+00 3.0077106e+00 2.6369414e+00 3.0816280e+00 2.4971338e+00 3.3055260e+00 2.5325155e+00 3.4206210e+00 3.2100081e+00 2.8135926e+00 2.9088609e+00 3.3100014e+00 3.5053029e+00 3.0107283e+00 2.1554609e+00 2.4508792e+00 2.3794578e+00 2.4537333e+00 3.6090037e+00 3.0077114e+00 3.0034200e+00 3.2047284e+00 2.9729700e+00 2.6131590e+00 2.5821442e+00 2.9309340e+00 3.1060464e+00 2.5610212e+00 2.2285414e+00 2.7317035e+00 2.7106184e+00 2.7159615e+00 2.8134622e+00 1.9767345e+00 2.6270952e+00 4.5095106e+00 3.6117736e+00 4.4050179e+00 4.1034650e+00 4.3058769e+00 5.1048430e+00 3.0395885e+00 4.8030341e+00 4.3077256e+00 4.6095748e+00 3.6067573e+00 3.8091370e+00 4.0067454e+00 3.5235094e+00 3.6257071e+00 3.8125141e+00 4.0031827e+00 5.2054101e+00 5.4066794e+00 3.5404367e+00 4.2082468e+00 3.4146523e+00 5.2054259e+00 3.4138230e+00 4.2042865e+00 4.5025743e+00 3.3123786e+00 3.4067938e+00 4.1072911e+00 4.3032007e+00 4.6053784e+00 4.9093646e+00 4.1089590e+00 3.6062594e+00 4.1061436e+00 4.6117560e+00 4.1111295e+00 4.0026055e+00 3.3078313e+00 3.9072844e+00 4.1120111e+00 3.6177796e+00 3.6117736e+00 4.4064445e+00 4.2133758e+00 3.7157992e+00 3.5215805e+00 3.7072609e+00 3.9105673e+00 3.6051689e+00 4.1212852e-01 4.1212852e-01 3.0490481e-01 5.2167208e-01 3.0915245e-01 8.0097499e-01 6.1119558e-01 6.9518117e-01 9.0168933e-01 5.2491131e-01 4.0363334e-01 5.0085236e-01 7.8940551e-01 8.2462252e-01 5.0042326e-01 3.1328089e-01 3.0490481e-01 8.0928056e-01 7.0470867e-01 4.0125062e-01 8.0928056e-01 1.0776294e+00 5.0517282e-01 4.5078948e-01 1.6096629e+00 1.0207396e+00 4.5847767e-01 6.0184622e-01 9.1422402e-01 3.4085233e-01 8.5406674e-01 2.4195741e-01 6.0964891e-01 3.4079041e+00 3.2018548e+00 3.6045966e+00 2.7227151e+00 3.3029106e+00 3.2014779e+00 3.4016816e+00 2.0608234e+00 3.3024690e+00 2.6067721e+00 2.3393392e+00 2.9023862e+00 2.7310942e+00 3.4010299e+00 2.3048574e+00 3.1044057e+00 3.2014774e+00 2.8036023e+00 3.2152055e+00 2.6124447e+00 3.5030234e+00 2.7035171e+00 3.6034258e+00 3.4010358e+00 3.0022434e+00 3.1033306e+00 3.5041120e+00 3.7031277e+00 3.2018065e+00 2.2177966e+00 2.5220687e+00 2.4265152e+00 2.6055197e+00 3.8016494e+00 3.2014773e+00 3.2019092e+00 3.4031881e+00 3.1122360e+00 2.8013347e+00 2.7110046e+00 3.1036553e+00 3.3009330e+00 2.7070770e+00 2.0855888e+00 2.9035486e+00 2.9008500e+00 2.9016034e+00 3.0016038e+00 1.7857124e+00 2.8028019e+00 4.7076061e+00 3.8037955e+00 4.6049801e+00 4.3013465e+00 4.5040960e+00 5.3068325e+00 3.2075036e+00 5.0037550e+00 4.5023523e+00 4.8096009e+00 3.8048551e+00 4.0031892e+00 4.2051335e+00 3.7071735e+00 3.8161632e+00 4.0093901e+00 4.2016368e+00 5.4081547e+00 5.6075434e+00 3.7075525e+00 4.4072835e+00 3.6062405e+00 5.4074634e+00 3.6038441e+00 4.4036959e+00 4.7038247e+00 3.5038075e+00 3.6028345e+00 4.3038299e+00 4.5042394e+00 4.8062731e+00 5.1150203e+00 4.3051629e+00 3.8011413e+00 4.3008955e+00 4.8153681e+00 4.3087925e+00 4.2014602e+00 3.5032125e+00 4.1063865e+00 4.3094598e+00 3.8146853e+00 3.8037955e+00 4.6054982e+00 4.4109814e+00 3.9115832e+00 3.7058197e+00 3.9043917e+00 4.1081838e+00 3.8021570e+00 6.0365948e-01 3.0008832e-01 3.3818226e-01 2.0121983e-01 5.2133802e-01 3.0915245e-01 5.0437695e-01 5.0043842e-01 2.0181667e-01 1.2085435e-01 1.2085435e-01 4.1317535e-01 4.1317535e-01 3.0026460e-01 6.0018299e-01 7.0462697e-01 4.0246123e-01 3.0490481e-01 4.0004442e-01 4.0246123e-01 7.1621884e-01 1.2085435e-01 1.1269424e-01 1.2036863e+00 7.0088627e-01 3.0482299e-01 5.0436965e-01 5.0436235e-01 3.0482299e-01 5.0436965e-01 2.2608083e-01 2.0121983e-01 3.3237063e+00 3.1056091e+00 3.5138377e+00 2.6067805e+00 3.2064817e+00 3.1008890e+00 3.3041599e+00 1.9144935e+00 3.2074507e+00 2.5042498e+00 2.1492024e+00 2.8038903e+00 2.6091437e+00 3.3015588e+00 2.2041706e+00 3.0148613e+00 3.1021975e+00 2.7007716e+00 3.1069083e+00 2.5027328e+00 3.4052051e+00 2.6037226e+00 3.5028446e+00 3.3009330e+00 2.9058292e+00 3.0107430e+00 3.4113341e+00 3.6081814e+00 3.1026177e+00 2.1035154e+00 2.4051766e+00 2.3058791e+00 2.5019964e+00 3.7017412e+00 3.1021847e+00 3.1038570e+00 3.3100818e+00 3.0059450e+00 2.7015162e+00 2.6035107e+00 3.0009631e+00 3.2017847e+00 2.6021247e+00 1.9231085e+00 2.8015882e+00 2.8007533e+00 2.8013565e+00 2.9028946e+00 1.6222582e+00 2.7017239e+00 4.6112605e+00 3.7050457e+00 4.5107898e+00 4.2023583e+00 4.4067851e+00 5.2146266e+00 3.1060428e+00 4.9089682e+00 4.4037570e+00 4.7173415e+00 3.7092301e+00 3.9050336e+00 4.1101936e+00 3.6083379e+00 3.7235997e+00 3.9149881e+00 4.1034584e+00 5.3169366e+00 5.5149065e+00 3.6029421e+00 4.3133953e+00 3.5091580e+00 5.3158283e+00 3.5057369e+00 4.3071174e+00 4.6095441e+00 3.4059695e+00 3.5048429e+00 4.2061215e+00 4.4109767e+00 4.7143113e+00 5.0310424e+00 4.2080636e+00 3.7018984e+00 4.2005765e+00 4.7313463e+00 4.2134002e+00 4.1029723e+00 3.4053426e+00 4.0133308e+00 4.2155438e+00 3.7272780e+00 3.7050457e+00 4.5097224e+00 4.3174320e+00 3.8199266e+00 3.6070223e+00 3.8081328e+00 4.0126677e+00 3.7034791e+00 6.0017665e-01 4.1210927e-01 6.0018299e-01 1.1133986e+00 6.3178534e-01 9.0142681e-01 8.5403486e-01 7.0462844e-01 5.0477564e-01 5.2491734e-01 1.0087252e+00 9.3306807e-01 4.1317535e-01 5.0517282e-01 4.1317535e-01 8.5409862e-01 7.5503094e-01 4.1317535e-01 8.5409862e-01 1.3133231e+00 6.0964891e-01 7.0548138e-01 1.5640758e+00 1.3027556e+00 7.0176271e-01 6.0017982e-01 9.6591433e-01 6.0000635e-01 1.1056693e+00 4.0127250e-01 7.1700909e-01 3.0056049e+00 2.8037508e+00 3.2038047e+00 2.3350905e+00 2.9043042e+00 2.8024566e+00 3.0040963e+00 1.7133143e+00 2.9021652e+00 2.2134864e+00 2.0299551e+00 2.5066443e+00 2.3464211e+00 3.0020171e+00 1.9120296e+00 2.7041827e+00 2.8038683e+00 2.4047867e+00 2.8219468e+00 2.2186306e+00 3.1079220e+00 2.3063026e+00 3.2048524e+00 3.0013665e+00 2.6029242e+00 2.7037323e+00 3.1033714e+00 3.3046348e+00 2.8041978e+00 1.8296471e+00 2.1344310e+00 2.0421601e+00 2.2088481e+00 3.4030563e+00 2.8038694e+00 2.8056176e+00 3.0035303e+00 2.7166861e+00 2.4032760e+00 2.3173405e+00 2.7049931e+00 2.9020943e+00 2.3107074e+00 1.7540491e+00 2.5057744e+00 2.5017294e+00 2.5032614e+00 2.6027348e+00 1.4738792e+00 2.4051328e+00 4.3150879e+00 3.4081972e+00 4.2065765e+00 3.9027794e+00 4.1082339e+00 4.9060122e+00 2.8144592e+00 4.6029848e+00 4.1031680e+00 4.4149701e+00 3.4105998e+00 3.6062867e+00 3.8091132e+00 3.3145497e+00 3.4354252e+00 3.6203140e+00 3.8031346e+00 5.0073650e+00 5.2071845e+00 3.3100796e+00 4.0129286e+00 3.2145637e+00 5.0059527e+00 3.2079238e+00 4.0069158e+00 4.3033003e+00 3.1086424e+00 3.2069551e+00 3.9078313e+00 4.1030253e+00 4.4053246e+00 4.7120702e+00 3.9105941e+00 3.4019027e+00 3.9011588e+00 4.4155733e+00 3.9183552e+00 3.8030509e+00 3.1080886e+00 3.7106642e+00 3.9186175e+00 3.4279064e+00 3.4081972e+00 4.2100505e+00 4.0214626e+00 3.5236480e+00 3.3110446e+00 3.5093262e+00 3.7177968e+00 3.4052047e+00 4.1317535e-01 1.1269424e-01 5.6371422e-01 5.0084481e-01 4.5784410e-01 8.0000239e-01 4.0006662e-01 3.0017653e-01 4.0006662e-01 6.0948800e-01 7.0088627e-01 4.1210927e-01 3.0482299e-01 4.5080200e-01 7.0016860e-01 6.0184934e-01 4.1317535e-01 7.0016860e-01 8.5406674e-01 4.0002221e-01 3.0482299e-01 1.5012719e+00 7.4269314e-01 3.3818226e-01 4.0002221e-01 8.0046685e-01 1.1269424e-01 6.3165225e-01 2.0121983e-01 5.0002283e-01 3.2274206e+00 3.0066036e+00 3.4159337e+00 2.5238518e+00 3.1081931e+00 3.0018108e+00 3.2048307e+00 1.8672243e+00 3.1090333e+00 2.4088880e+00 2.1557835e+00 2.7050009e+00 2.5327574e+00 3.2021240e+00 2.1075767e+00 2.9175658e+00 3.0027966e+00 2.6034860e+00 3.0173373e+00 2.4124132e+00 3.3060311e+00 2.5063233e+00 3.4049933e+00 3.2016467e+00 2.8074882e+00 2.9128790e+00 3.3135401e+00 3.5094636e+00 3.0034944e+00 2.0185049e+00 2.3226176e+00 2.2272636e+00 2.4061715e+00 3.6025233e+00 3.0027816e+00 3.0045159e+00 3.2117473e+00 2.9147768e+00 2.6022650e+00 2.5117111e+00 2.9035536e+00 3.1022707e+00 2.5074705e+00 1.8959538e+00 2.7040250e+00 2.7012715e+00 2.7023320e+00 2.8040244e+00 1.6015419e+00 2.6035923e+00 4.5125057e+00 3.6062867e+00 4.4120448e+00 4.1027436e+00 4.3076126e+00 5.1160601e+00 3.0101869e+00 4.8099407e+00 4.3047534e+00 4.6192050e+00 3.6105351e+00 3.8061121e+00 4.0115220e+00 3.5110245e+00 3.6271576e+00 3.8169672e+00 4.0039490e+00 5.2185416e+00 5.4163883e+00 3.5078417e+00 4.2149895e+00 3.4109302e+00 5.2173835e+00 3.4072913e+00 4.2079670e+00 4.5106052e+00 3.3073674e+00 3.4056857e+00 4.1070396e+00 4.3122866e+00 4.6159499e+00 4.9341410e+00 4.1092167e+00 3.6024856e+00 4.1011078e+00 4.6347105e+00 4.1150272e+00 4.0033723e+00 3.3063033e+00 3.9150644e+00 4.1174503e+00 3.6310621e+00 3.6062867e+00 4.4108290e+00 4.2194920e+00 3.7226812e+00 3.5095242e+00 3.7093173e+00 3.9142888e+00 3.6040604e+00 3.4342562e-01 8.5406616e-01 3.3813251e-01 6.0017665e-01 4.5078948e-01 4.0125062e-01 2.2573593e-01 3.0474106e-01 7.0008584e-01 6.0184622e-01 2.2538848e-01 7.0017011e-01 8.0046685e-01 5.0477564e-01 5.2167208e-01 4.0004442e-01 5.0477564e-01 1.0016896e+00 3.0474106e-01 4.5080200e-01 1.1531953e+00 1.0008617e+00 4.5080200e-01 4.1420960e-01 6.1119558e-01 4.1210927e-01 8.0051036e-01 3.0482299e-01 4.1210927e-01 3.0158412e+00 2.8068284e+00 3.2097099e+00 2.3108757e+00 2.9065890e+00 2.8022000e+00 3.0066661e+00 1.6225614e+00 2.9048017e+00 2.2116239e+00 1.8685354e+00 2.5096708e+00 2.3100354e+00 3.0026666e+00 1.9136652e+00 2.7108290e+00 2.8056165e+00 2.4010446e+00 2.8094419e+00 2.2042326e+00 3.1114445e+00 2.3060252e+00 3.2036636e+00 3.0010404e+00 2.6048201e+00 2.7083838e+00 3.1074856e+00 3.3083888e+00 2.8056953e+00 1.8055938e+00 2.1074903e+00 2.0078120e+00 2.2044098e+00 3.4034897e+00 2.8056164e+00 2.8086638e+00 3.0080453e+00 2.7058598e+00 2.4044765e+00 2.3071636e+00 2.7018643e+00 2.9031229e+00 2.3040302e+00 1.6345914e+00 2.5039390e+00 2.5021287e+00 2.5037126e+00 2.6035547e+00 1.3485963e+00 2.4045982e+00 4.3195471e+00 3.4105069e+00 4.2110205e+00 3.9039624e+00 4.1112641e+00 4.9115281e+00 2.8134622e+00 4.6064153e+00 4.1040152e+00 4.4216160e+00 3.4153329e+00 3.6083651e+00 3.8136432e+00 3.3170079e+00 3.4454804e+00 3.6271127e+00 3.8048208e+00 5.0136911e+00 5.2125102e+00 3.3041886e+00 4.0185527e+00 3.2193583e+00 5.0117789e+00 3.2102572e+00 4.0101485e+00 4.3071174e+00 3.1116727e+00 3.2099137e+00 3.9105756e+00 4.1073261e+00 4.4108290e+00 4.7236350e+00 3.9141183e+00 3.4025037e+00 3.9008223e+00 4.4275986e+00 3.9240716e+00 3.8046112e+00 3.1114731e+00 3.7165788e+00 3.9250546e+00 3.4397950e+00 3.4105069e+00 4.2141201e+00 4.0283725e+00 3.5323888e+00 3.3126331e+00 3.5133654e+00 3.7236064e+00 3.4073749e+00 5.6371422e-01 4.0125062e-01 4.2362917e-01 7.0008735e-01 3.0017653e-01 2.2573593e-01 3.0490481e-01 5.2167829e-01 6.0202028e-01 3.3808272e-01 4.1210927e-01 5.2167829e-01 6.0201716e-01 5.0477564e-01 4.0363334e-01 6.0201716e-01 7.8895472e-01 3.0474106e-01 2.2608083e-01 1.4017696e+00 7.1636719e-01 2.2608083e-01 4.0002221e-01 7.0088627e-01 2.0121983e-01 5.6371422e-01 2.2538848e-01 4.0127250e-01 3.2269400e+00 3.0055754e+00 3.4153574e+00 2.5158464e+00 3.1070003e+00 3.0010043e+00 3.2037264e+00 1.8447764e+00 3.1084925e+00 2.4051329e+00 2.1169795e+00 2.7031257e+00 2.5230194e+00 3.2014729e+00 2.1040690e+00 2.9167440e+00 3.0016616e+00 2.6020655e+00 3.0122358e+00 2.4077390e+00 3.3040889e+00 2.5044039e+00 3.4036241e+00 3.2011770e+00 2.8066054e+00 2.9119764e+00 3.3128842e+00 3.5083774e+00 3.0022542e+00 2.0112442e+00 2.3146813e+00 2.2178117e+00 2.4035997e+00 3.6016272e+00 3.0016466e+00 3.0030180e+00 3.2109724e+00 2.9107838e+00 2.6012056e+00 2.5072166e+00 2.9020943e+00 3.1016037e+00 2.5045154e+00 1.8665804e+00 2.7022828e+00 2.7006623e+00 2.7012715e+00 2.8031364e+00 1.5665127e+00 2.6019940e+00 4.5096473e+00 3.6042722e+00 4.4108394e+00 4.1020090e+00 4.3058539e+00 5.1154681e+00 3.0065585e+00 4.8095984e+00 4.3039464e+00 4.6166518e+00 3.6081814e+00 3.8045576e+00 4.0096177e+00 3.5076367e+00 3.6204901e+00 3.8129595e+00 4.0031500e+00 5.2178509e+00 5.4155855e+00 3.5053718e+00 4.2125027e+00 3.4076329e+00 5.2169557e+00 3.4052726e+00 4.2064771e+00 4.5101686e+00 3.3051940e+00 3.4039470e+00 4.1052782e+00 4.3120002e+00 4.6153660e+00 4.9336188e+00 4.1069501e+00 3.6018985e+00 4.1007374e+00 4.6331222e+00 4.1114855e+00 4.0025890e+00 3.3042986e+00 3.9129418e+00 4.1139050e+00 3.6259501e+00 3.6042722e+00 4.4088300e+00 4.2155438e+00 3.7181244e+00 3.5068261e+00 3.7072136e+00 3.9107458e+00 3.6027359e+00 7.1779518e-01 9.0005048e-01 6.8160885e-01 6.0980961e-01 6.3164977e-01 6.0964597e-01 6.0948506e-01 6.3178534e-01 8.0888055e-01 6.5712813e-01 9.1552331e-01 5.6595908e-01 4.5147187e-01 9.0026543e-01 5.6595908e-01 6.0201716e-01 5.6370994e-01 4.1212852e-01 1.3000455e+00 4.1315633e-01 6.1830764e-01 9.0511169e-01 6.0964891e-01 6.3178534e-01 4.5077696e-01 7.1621748e-01 4.5783248e-01 3.7510248e+00 3.5145413e+00 3.9319585e+00 3.0060350e+00 3.6168418e+00 3.5015800e+00 3.7092301e+00 2.3098782e+00 3.6209185e+00 2.9036019e+00 2.5319798e+00 3.2059465e+00 3.0125595e+00 3.7043511e+00 2.6050088e+00 3.4362995e+00 3.5023713e+00 3.1027850e+00 3.5112541e+00 2.9034027e+00 3.8056232e+00 3.0109656e+00 3.9070007e+00 3.7037948e+00 3.3177283e+00 3.4278716e+00 3.8279200e+00 4.0185639e+00 3.5049370e+00 2.5063489e+00 2.8048596e+00 2.7053910e+00 2.9045816e+00 4.1028806e+00 3.5020661e+00 3.5059137e+00 3.7249599e+00 3.4134555e+00 3.1021033e+00 3.0035422e+00 3.4012314e+00 3.6049329e+00 3.0042977e+00 2.3151346e+00 3.2021240e+00 3.2018065e+00 3.2023319e+00 3.3095196e+00 2.0139907e+00 3.1028259e+00 5.0111326e+00 4.1049446e+00 4.9203200e+00 4.6042055e+00 4.8089555e+00 5.6273643e+00 3.5051449e+00 5.3190012e+00 4.8083867e+00 5.1260178e+00 4.1140184e+00 4.3082037e+00 4.5172930e+00 4.0074572e+00 4.1195081e+00 4.3162103e+00 4.5071301e+00 5.7305902e+00 5.9266306e+00 4.0041352e+00 4.7202045e+00 3.9078680e+00 5.7295930e+00 3.9093593e+00 4.7117434e+00 5.0203874e+00 3.8087102e+00 3.9064537e+00 4.6081315e+00 4.8239967e+00 5.1282911e+00 5.4538016e+00 4.6097450e+00 4.1052255e+00 4.6016323e+00 5.1527860e+00 4.6133719e+00 4.5057296e+00 3.8063301e+00 4.4231469e+00 4.6192070e+00 4.1384491e+00 4.1049446e+00 4.9142249e+00 4.7202041e+00 4.2256252e+00 4.0099717e+00 4.2125085e+00 4.4124585e+00 4.1039188e+00 3.4085233e-01 3.3818226e-01 1.2699992e-01 3.0922892e-01 3.3818226e-01 4.1212852e-01 3.4085233e-01 3.0490481e-01 8.0250202e-01 9.0192695e-01 4.0363334e-01 5.0437695e-01 4.5847767e-01 4.0363334e-01 7.0633229e-01 3.0482299e-01 4.0246123e-01 1.0095513e+00 7.0548283e-01 2.0181667e-01 5.0043084e-01 3.6452132e-01 5.0436965e-01 5.0855778e-01 4.1420960e-01 3.3813251e-01 3.0360001e+00 2.8068283e+00 3.2199554e+00 2.3040302e+00 2.9083228e+00 2.8004229e+00 3.0040677e+00 1.6099792e+00 2.9111118e+00 2.2023225e+00 1.8444678e+00 2.5026969e+00 2.3072196e+00 3.0013665e+00 1.9023442e+00 2.7227152e+00 2.8012528e+00 2.4005053e+00 2.8054838e+00 2.2013444e+00 3.1036553e+00 2.3040711e+00 3.2026875e+00 3.0010106e+00 2.6084695e+00 2.7159628e+00 3.1166009e+00 3.3100796e+00 2.8019019e+00 1.8020058e+00 2.1029252e+00 2.0034861e+00 2.2011906e+00 3.4011297e+00 2.8012319e+00 2.8028007e+00 3.0142197e+00 2.7060544e+00 2.4007552e+00 2.3017471e+00 2.7003774e+00 2.9016034e+00 2.3011979e+00 1.6179000e+00 2.5007284e+00 2.5003793e+00 2.5007008e+00 2.6035320e+00 1.3154932e+00 2.4008859e+00 4.3091529e+00 3.4034897e+00 4.2123904e+00 3.9018704e+00 4.1056542e+00 4.9181670e+00 2.8038684e+00 4.6114520e+00 4.1039624e+00 4.4180514e+00 3.4084525e+00 3.6042874e+00 3.8104403e+00 3.3060311e+00 3.4198458e+00 3.6127194e+00 3.8032956e+00 5.0208687e+00 5.2178587e+00 3.3018328e+00 4.0133297e+00 3.2067991e+00 5.0200323e+00 3.2048524e+00 4.0067585e+00 4.3122441e+00 3.1047671e+00 3.2036090e+00 3.9049697e+00 4.1148052e+00 4.4184267e+00 4.7404155e+00 3.9065727e+00 3.4018864e+00 3.9004186e+00 4.4392802e+00 3.9110280e+00 3.8025990e+00 3.1038577e+00 3.7145622e+00 3.9140895e+00 3.4287259e+00 3.4034897e+00 4.2090841e+00 4.0156240e+00 3.5189468e+00 3.3056781e+00 3.5073617e+00 3.7102604e+00 3.4023494e+00 4.1315633e-01 3.0915245e-01 4.5078948e-01 5.2132556e-01 3.0482299e-01 3.3808272e-01 6.0964597e-01 7.0911112e-01 8.6051414e-01 4.1212852e-01 7.0016860e-01 7.4262964e-01 4.1212852e-01 6.1830489e-01 4.1209001e-01 6.0018299e-01 1.1056693e+00 6.0964597e-01 4.1317535e-01 4.1315633e-01 5.2133179e-01 4.2268438e-01 5.0084481e-01 5.2491131e-01 5.0043084e-01 2.9115264e+00 2.6338022e+00 3.0658361e+00 2.1172959e+00 2.7373419e+00 2.6040822e+00 2.8212040e+00 1.4407364e+00 2.7450517e+00 2.0182279e+00 1.7116683e+00 2.3196029e+00 2.1285888e+00 2.8091316e+00 1.7264084e+00 2.5863714e+00 2.6084695e+00 2.2054529e+00 2.6248910e+00 2.0083311e+00 2.9174382e+00 2.1301354e+00 3.0136614e+00 2.8068911e+00 2.4421128e+00 2.5659281e+00 2.9581275e+00 3.1380442e+00 2.6129786e+00 1.6191482e+00 1.9129988e+00 1.8141046e+00 2.0129571e+00 3.2064817e+00 2.6080847e+00 2.6171503e+00 2.8540078e+00 2.5288353e+00 2.2078337e+00 2.1116321e+00 2.5029614e+00 2.7108347e+00 2.1109969e+00 1.4620239e+00 2.3067198e+00 2.3048725e+00 2.3072196e+00 2.4221911e+00 1.1960519e+00 2.2090466e+00 4.1263920e+00 3.2146438e+00 4.0362393e+00 3.7082866e+00 3.9191966e+00 4.7434452e+00 2.6190662e+00 4.4302264e+00 3.9142233e+00 4.2488425e+00 3.2328627e+00 3.4177609e+00 3.6349468e+00 3.1232336e+00 3.2606481e+00 3.4419771e+00 3.6135028e+00 4.8484858e+00 5.0414126e+00 3.1077602e+00 3.8409517e+00 3.0264502e+00 4.8462395e+00 3.0224855e+00 3.8231767e+00 4.1339850e+00 2.9228238e+00 3.0173096e+00 3.7181009e+00 3.9409635e+00 4.2473796e+00 4.5888422e+00 3.7226114e+00 3.2097424e+00 3.7024938e+00 4.2924779e+00 3.7339169e+00 3.6111693e+00 2.9185950e+00 3.5470876e+00 3.7434043e+00 3.2896416e+00 3.2146438e+00 4.0283196e+00 3.8460162e+00 3.3616756e+00 3.1242731e+00 3.3284650e+00 3.5333785e+00 3.2109426e+00 4.0122873e-01 5.0043084e-01 4.0243965e-01 3.0474106e-01 2.0061436e-01 4.5148429e-01 1.1000100e+00 1.2012928e+00 1.2699992e-01 4.0122873e-01 5.6595488e-01 1.2699992e-01 6.0184309e-01 4.0004442e-01 5.0436965e-01 7.1700909e-01 6.0201716e-01 5.2132556e-01 8.0051115e-01 2.2573593e-01 8.0000080e-01 4.0243965e-01 7.0088477e-01 3.0474106e-01 3.1428043e+00 2.9119661e+00 3.3252402e+00 2.4048325e+00 3.0131919e+00 2.9019362e+00 3.1087162e+00 1.7043719e+00 3.0148571e+00 2.3090280e+00 1.9099608e+00 2.6089255e+00 2.4039780e+00 3.1034773e+00 2.0109328e+00 2.8295063e+00 2.9047982e+00 2.5011632e+00 2.9079901e+00 2.3019339e+00 3.2101766e+00 2.4088882e+00 3.3051149e+00 3.1020645e+00 2.7127201e+00 2.8219255e+00 3.2211370e+00 3.4154432e+00 2.9057760e+00 1.9031964e+00 2.2023924e+00 2.1016800e+00 2.3040177e+00 3.5033831e+00 2.9047500e+00 2.9083094e+00 3.1195526e+00 2.8078790e+00 2.5037817e+00 2.4045555e+00 2.8012577e+00 3.0040677e+00 2.4032886e+00 1.7053664e+00 2.6031367e+00 2.6019751e+00 2.6032655e+00 2.7067267e+00 1.4186217e+00 2.5039390e+00 4.4180854e+00 3.5092124e+00 4.3179254e+00 4.0043989e+00 4.2115634e+00 5.0223341e+00 2.9108451e+00 4.7142986e+00 4.2064794e+00 4.5276385e+00 3.5169841e+00 3.7092301e+00 3.9177719e+00 3.4145764e+00 3.5398518e+00 3.7257231e+00 3.9064409e+00 5.1255523e+00 5.3223081e+00 3.4028319e+00 4.1224590e+00 3.3167394e+00 5.1238108e+00 3.3110167e+00 4.1123595e+00 4.4156295e+00 3.2116669e+00 3.3094499e+00 4.0106882e+00 4.2180724e+00 4.5227110e+00 4.8462592e+00 4.0138270e+00 3.5038530e+00 4.0010263e+00 4.5481710e+00 4.0222346e+00 3.9055783e+00 3.2104685e+00 3.8231767e+00 4.0259306e+00 3.5470873e+00 3.5092124e+00 4.3162096e+00 4.1285334e+00 3.6344358e+00 3.4126369e+00 3.6148618e+00 3.8215373e+00 3.5066394e+00 2.2608083e-01 2.4170870e-01 3.0915245e-01 3.0915245e-01 4.0002221e-01 7.0096858e-01 8.0888055e-01 3.3818226e-01 4.0243965e-01 5.0477564e-01 3.3818226e-01 6.1135434e-01 2.0121983e-01 3.0017653e-01 1.1020506e+00 6.0219099e-01 2.0061436e-01 4.1210927e-01 4.0246123e-01 4.0125062e-01 4.0363334e-01 3.4085233e-01 2.2573593e-01 3.1414744e+00 2.9090612e+00 3.3237063e+00 2.4058952e+00 3.0107723e+00 2.9007492e+00 3.1056084e+00 1.7139238e+00 3.0138400e+00 2.3035512e+00 1.9525301e+00 2.6040007e+00 2.4100386e+00 3.1020779e+00 2.0037730e+00 2.8273039e+00 2.9018637e+00 2.5009579e+00 2.9077468e+00 2.3022754e+00 3.2048985e+00 2.4059812e+00 3.3038272e+00 3.1015567e+00 2.7110304e+00 2.8196992e+00 3.2199879e+00 3.4126307e+00 2.9028597e+00 1.9035634e+00 2.2044603e+00 2.1052067e+00 2.3021298e+00 3.5016873e+00 2.9018153e+00 2.9040211e+00 3.1174676e+00 2.8083845e+00 2.5012694e+00 2.4028385e+00 2.8006965e+00 3.0024198e+00 2.4021168e+00 1.7233814e+00 2.6012647e+00 2.6007117e+00 2.6012056e+00 2.7050190e+00 1.4220898e+00 2.5015263e+00 4.4109742e+00 3.5045842e+00 4.3148937e+00 4.0025815e+00 4.2071337e+00 5.0208677e+00 2.9053047e+00 4.7134688e+00 4.2051335e+00 4.5213131e+00 3.5107904e+00 3.7056862e+00 3.9129303e+00 3.4076849e+00 3.5232606e+00 3.7154827e+00 3.9043905e+00 5.1238111e+00 5.3204854e+00 3.4027174e+00 4.1161770e+00 3.3085473e+00 5.1228006e+00 3.3065392e+00 4.1085246e+00 4.4144908e+00 3.2064337e+00 3.3048953e+00 4.0063737e+00 4.2173565e+00 4.5213359e+00 4.8449108e+00 4.0082529e+00 3.5026815e+00 4.0006690e+00 4.5442582e+00 4.0132894e+00 3.9035247e+00 3.2051958e+00 3.8177289e+00 4.0170262e+00 3.5340950e+00 3.5045842e+00 4.3111747e+00 4.1186693e+00 3.6228903e+00 3.4075338e+00 3.6094379e+00 3.8124787e+00 3.5031928e+00 1.1269424e-01 5.0436965e-01 4.5078948e-01 2.2573593e-01 6.0000317e-01 7.0088477e-01 4.1210927e-01 3.4080442e-01 3.0474106e-01 4.1210927e-01 8.0883841e-01 1.1269424e-01 2.2573593e-01 1.2089253e+00 8.0051036e-01 4.0125062e-01 4.1317535e-01 5.2133802e-01 3.0017653e-01 6.0184622e-01 2.0061436e-01 2.2573593e-01 3.2211375e+00 3.0065592e+00 3.4126307e+00 2.5097024e+00 3.1069750e+00 3.0016616e+00 3.2056674e+00 1.8201133e+00 3.1066333e+00 2.4080686e+00 2.0619186e+00 2.7068820e+00 2.5107632e+00 3.2022485e+00 2.1087015e+00 2.9137660e+00 3.0040552e+00 2.6010528e+00 3.0089164e+00 2.4039766e+00 3.3085605e+00 2.5050799e+00 3.4035383e+00 3.2010814e+00 2.8057187e+00 2.9102474e+00 3.3101488e+00 3.5088135e+00 3.0043212e+00 2.0051350e+00 2.3071665e+00 2.2078183e+00 2.4034084e+00 3.6027901e+00 3.0040510e+00 3.0064311e+00 3.2097125e+00 2.9065741e+00 2.6030847e+00 2.5057763e+00 2.9016034e+00 3.1025924e+00 2.5033702e+00 1.8310475e+00 2.7029487e+00 2.7015162e+00 2.7026433e+00 2.8034238e+00 1.5358653e+00 2.6032968e+00 4.5159027e+00 3.6080734e+00 4.4115652e+00 4.1033603e+00 4.3094200e+00 5.1138780e+00 3.0100866e+00 4.8082318e+00 4.3041941e+00 4.6203464e+00 3.6127197e+00 3.8070338e+00 4.0124958e+00 3.5130651e+00 3.6349183e+00 3.8215362e+00 4.0044029e+00 5.2162094e+00 5.4145184e+00 3.5039680e+00 4.2166537e+00 3.4145702e+00 5.2146334e+00 3.4083285e+00 4.2090727e+00 4.5089181e+00 3.3091123e+00 3.4076329e+00 4.1087140e+00 4.3098021e+00 4.6133860e+00 4.9287684e+00 4.1115100e+00 3.6023704e+00 4.1007820e+00 4.6309835e+00 4.1192351e+00 4.0040240e+00 3.3086516e+00 3.9156759e+00 4.1209257e+00 3.6344375e+00 3.6080734e+00 4.4124586e+00 4.2235561e+00 3.7268101e+00 3.5102374e+00 3.7111714e+00 3.9185866e+00 3.6056580e+00 5.0084481e-01 4.1315633e-01 2.2573593e-01 7.0000303e-01 8.0046605e-01 3.3818226e-01 2.4170870e-01 3.0017653e-01 3.3818226e-01 8.0245824e-01 1.1269424e-01 2.0181667e-01 1.1133897e+00 8.0004523e-01 4.0246123e-01 5.2167829e-01 4.5078948e-01 4.0125062e-01 6.0017665e-01 3.0017653e-01 2.0061436e-01 3.3182813e+00 3.1056085e+00 3.5110034e+00 2.6060742e+00 3.2059465e+00 3.1013637e+00 3.3048926e+00 1.9099680e+00 3.2056789e+00 2.5063346e+00 2.1344310e+00 2.8057704e+00 2.6059875e+00 3.3019214e+00 2.2068463e+00 3.0117188e+00 3.1034568e+00 2.7006623e+00 3.1063566e+00 2.5023073e+00 3.4074246e+00 2.6040822e+00 3.5028878e+00 3.3008913e+00 2.9048017e+00 3.0087102e+00 3.4087684e+00 3.6077011e+00 3.1036688e+00 2.1027637e+00 2.4039664e+00 2.3040177e+00 2.5024922e+00 3.7023994e+00 3.1034532e+00 3.1054994e+00 3.3083865e+00 3.0045919e+00 2.7025560e+00 2.6039937e+00 3.0011260e+00 3.2022183e+00 2.6023240e+00 1.9156198e+00 2.8022964e+00 2.8012577e+00 2.8021795e+00 2.9028597e+00 1.6190551e+00 2.7026433e+00 4.6143249e+00 3.7070367e+00 4.5103890e+00 4.2029881e+00 4.4084395e+00 5.2126509e+00 3.1082889e+00 4.9074567e+00 4.4036930e+00 4.7183734e+00 3.7111657e+00 3.9061763e+00 4.1111077e+00 3.6112621e+00 3.7306950e+00 3.9190472e+00 4.1039096e+00 5.3148048e+00 5.5132902e+00 3.6028435e+00 4.3148929e+00 3.5126667e+00 5.3133599e+00 3.5071916e+00 4.3081091e+00 4.6080296e+00 3.4078683e+00 3.5066415e+00 4.2077543e+00 4.4087821e+00 4.7120754e+00 5.0261502e+00 4.2102487e+00 3.7020547e+00 4.2006494e+00 4.7279956e+00 4.2171591e+00 4.1035746e+00 3.4074978e+00 4.0138994e+00 4.2186688e+00 3.7302926e+00 3.7070367e+00 4.5111938e+00 4.3210760e+00 3.8236433e+00 3.6087856e+00 3.8098356e+00 4.0164852e+00 3.7049593e+00 1.1269424e-01 7.0017011e-01 9.0506343e-01 1.0426636e+00 2.0181667e-01 4.1209001e-01 8.0093081e-01 2.0181667e-01 3.4080442e-01 4.0125062e-01 3.6259865e-01 9.0029064e-01 3.3808272e-01 4.2268438e-01 6.1135434e-01 2.2608083e-01 6.0948212e-01 2.0061436e-01 6.3164977e-01 3.0482299e-01 3.1902650e+00 2.9267417e+00 3.3545239e+00 2.4065479e+00 3.0300492e+00 2.9028462e+00 3.1166330e+00 1.7073273e+00 3.0369978e+00 2.3091363e+00 1.9242178e+00 2.6129479e+00 2.4148785e+00 3.1074477e+00 2.0138888e+00 2.8680310e+00 2.9053048e+00 2.5042875e+00 2.9165009e+00 2.3038195e+00 3.2116668e+00 2.4221589e+00 3.3110857e+00 3.1060464e+00 2.7333589e+00 2.8520935e+00 3.2480481e+00 3.4313924e+00 2.9094538e+00 1.9103596e+00 2.2042535e+00 2.1040084e+00 2.3086427e+00 3.5048916e+00 2.9048754e+00 2.9119661e+00 3.1440058e+00 2.8212158e+00 2.5048147e+00 2.4054865e+00 2.8016296e+00 3.0087060e+00 2.4071454e+00 1.7108740e+00 2.6040234e+00 2.6035022e+00 2.6047905e+00 2.7176633e+00 1.4222462e+00 2.5057847e+00 4.4195581e+00 3.5098289e+00 4.3311360e+00 4.0067587e+00 4.2149806e+00 5.0390074e+00 2.9109559e+00 4.7273232e+00 4.2124122e+00 4.5405876e+00 3.5250718e+00 3.7139027e+00 3.9284285e+00 3.4150426e+00 3.5404383e+00 3.7302923e+00 3.9113385e+00 5.1434691e+00 5.3373009e+00 3.4049076e+00 4.1330547e+00 3.3170101e+00 5.1417717e+00 3.3168869e+00 4.1189487e+00 4.4302240e+00 3.2165105e+00 3.3123688e+00 4.0139003e+00 4.2362095e+00 4.5418862e+00 4.8784553e+00 4.0170271e+00 3.5083268e+00 4.0022121e+00 4.5797332e+00 4.0245435e+00 3.9092247e+00 3.2127499e+00 3.8383398e+00 4.0332236e+00 3.5687055e+00 3.5098289e+00 4.3229228e+00 4.1350002e+00 3.6463113e+00 3.4177609e+00 3.6219569e+00 3.8236419e+00 3.5076152e+00 6.0202028e-01 1.0008471e+00 1.1133984e+00 1.2085435e-01 4.0125062e-01 7.0548138e-01 1.2085435e-01 4.1210927e-01 3.3813251e-01 4.1317535e-01 8.0093160e-01 4.1210927e-01 4.5147187e-01 7.0184453e-01 2.0121983e-01 7.0088326e-01 2.2573593e-01 6.3164977e-01 2.4170870e-01 3.1712556e+00 2.9203033e+00 3.3425812e+00 2.4054865e+00 3.0228150e+00 2.9023686e+00 3.1131138e+00 1.7053664e+00 3.0276460e+00 2.3090554e+00 1.9156198e+00 2.6109872e+00 2.4094445e+00 3.1056085e+00 2.0122722e+00 2.8520934e+00 2.9050277e+00 2.5027053e+00 2.9125192e+00 2.3027405e+00 3.2109394e+00 2.4160415e+00 3.3084146e+00 3.1042008e+00 2.7243956e+00 2.8394100e+00 3.2369546e+00 3.4247119e+00 2.9077087e+00 1.9065546e+00 2.2031052e+00 2.1025721e+00 2.3063026e+00 3.5041732e+00 2.9047982e+00 2.9102300e+00 3.1338083e+00 2.8152056e+00 2.5042498e+00 2.4049187e+00 2.8014068e+00 3.0065584e+00 2.4051787e+00 1.7073273e+00 2.6035320e+00 2.6027034e+00 2.6039922e+00 2.7127202e+00 1.4197348e+00 2.5048165e+00 4.4189015e+00 3.5095163e+00 4.3257988e+00 4.0057069e+00 4.2135049e+00 5.0324949e+00 2.9108796e+00 4.7221364e+00 4.2099109e+00 4.5353940e+00 3.5215862e+00 3.7118544e+00 3.9240013e+00 3.4147901e+00 3.5401421e+00 3.7282909e+00 3.9092247e+00 5.1365094e+00 5.3314710e+00 3.4038679e+00 4.1286955e+00 3.3168613e+00 5.1347955e+00 3.3142720e+00 4.1161770e+00 4.4243750e+00 3.2143132e+00 3.3110162e+00 4.0124921e+00 4.2289511e+00 4.5343165e+00 4.8661278e+00 4.0156242e+00 3.5063341e+00 4.0016595e+00 4.5675358e+00 4.0235142e+00 3.9076269e+00 3.2116668e+00 3.8321137e+00 4.0301568e+00 3.5598554e+00 3.5095163e+00 4.3201293e+00 4.1322798e+00 3.6413274e+00 3.4154677e+00 3.6188977e+00 3.8226861e+00 3.5071388e+00 7.0096708e-01 8.0004602e-01 5.0855077e-01 4.1420960e-01 2.2608083e-01 5.0855077e-01 1.0008768e+00 3.0474106e-01 4.0127250e-01 1.1527746e+00 1.0000457e+00 4.0127250e-01 4.5783248e-01 6.0948800e-01 4.1317535e-01 8.0008964e-01 3.0482299e-01 4.0127250e-01 3.2104685e+00 3.0024155e+00 3.4059092e+00 2.5048145e+00 3.1026586e+00 3.0005260e+00 3.2022151e+00 1.8108200e+00 3.1025924e+00 2.4028974e+00 2.0417688e+00 2.7025751e+00 2.5062865e+00 3.2007416e+00 2.1027376e+00 2.9057769e+00 3.0015388e+00 2.6003213e+00 3.0043084e+00 2.4017233e+00 3.3039365e+00 2.5015263e+00 3.4013673e+00 3.2002931e+00 2.8019179e+00 2.9040262e+00 3.3045112e+00 3.5038551e+00 3.0015958e+00 2.0020172e+00 2.3035509e+00 2.2041009e+00 2.4010446e+00 3.6011254e+00 3.0015387e+00 3.0025850e+00 3.2040849e+00 2.9029297e+00 2.6009614e+00 2.5022969e+00 2.9005699e+00 3.1008537e+00 2.5011717e+00 1.8179820e+00 2.7009799e+00 2.7004102e+00 2.7008225e+00 2.8010266e+00 1.5160787e+00 2.6010449e+00 4.5093548e+00 3.6039076e+00 4.4060847e+00 4.1014985e+00 4.3050076e+00 5.1081747e+00 3.0045274e+00 4.8044751e+00 4.3019076e+00 4.6117611e+00 3.6062405e+00 3.8032982e+00 4.0063634e+00 3.5066398e+00 3.6202705e+00 3.8119529e+00 4.0019489e+00 5.2097659e+00 5.4087506e+00 3.5019664e+00 4.2090727e+00 3.4073888e+00 5.2088330e+00 3.4037266e+00 4.2046087e+00 4.5046939e+00 3.3041077e+00 3.4034672e+00 4.1044794e+00 4.3051857e+00 4.6074982e+00 4.9181673e+00 4.1061528e+00 3.6008588e+00 4.1002763e+00 4.6187512e+00 4.1110303e+00 4.0017844e+00 3.3039580e+00 3.9080413e+00 4.1118167e+00 3.6188744e+00 3.6039076e+00 4.4067826e+00 4.2136946e+00 3.7147052e+00 3.5048712e+00 3.7054764e+00 3.9103830e+00 3.6025973e+00 3.0026460e-01 1.0001598e+00 9.0029064e-01 6.0202028e-01 1.0001598e+00 1.1281352e+00 7.0000303e-01 6.0052920e-01 1.8012962e+00 9.6574369e-01 6.3178782e-01 4.2270142e-01 1.1005460e+00 3.0026460e-01 9.1422402e-01 4.0004442e-01 8.0004602e-01 3.2225450e+00 3.0091788e+00 3.4142737e+00 2.5659206e+00 3.1121190e+00 3.0065741e+00 3.2080663e+00 1.9795481e+00 3.1095927e+00 2.4291135e+00 2.3151880e+00 2.7129011e+00 2.5826416e+00 3.2051685e+00 2.1273517e+00 2.9165009e+00 3.0077149e+00 2.6132476e+00 3.0422268e+00 2.4403272e+00 3.3124044e+00 2.5166989e+00 3.4115621e+00 3.2044340e+00 2.8105357e+00 2.9137358e+00 3.3135329e+00 3.5115123e+00 3.0089448e+00 2.0634477e+00 2.3669955e+00 2.2798676e+00 2.4222743e+00 3.6065353e+00 3.0077107e+00 3.0095641e+00 3.2119165e+00 2.9352117e+00 2.6080595e+00 2.5371436e+00 2.9126008e+00 3.1051604e+00 2.5254396e+00 2.0316239e+00 2.7143597e+00 2.7050980e+00 2.7084026e+00 2.8082597e+00 1.7626307e+00 2.6129786e+00 4.5202811e+00 3.6136284e+00 4.4137920e+00 4.1051791e+00 4.3125133e+00 5.1149745e+00 3.0264326e+00 4.8090821e+00 4.3074243e+00 4.6242424e+00 3.6169385e+00 3.8113313e+00 4.0160029e+00 3.5235168e+00 3.6464145e+00 3.8279938e+00 4.0062032e+00 5.2173409e+00 5.4162239e+00 3.5202520e+00 4.2206877e+00 3.4219359e+00 5.2156030e+00 3.4146242e+00 4.2116109e+00 4.5097900e+00 3.3150972e+00 3.4115327e+00 4.1123767e+00 4.3106068e+00 4.6148396e+00 4.9296764e+00 4.1159139e+00 3.6049049e+00 4.1030781e+00 4.6336876e+00 4.1247374e+00 4.0056652e+00 3.3131419e+00 3.9194404e+00 4.1265834e+00 3.6428011e+00 3.6136284e+00 4.4157045e+00 4.2295866e+00 3.7344540e+00 3.5196603e+00 3.7152542e+00 3.9242137e+00 3.6086340e+00 1.1055707e+00 1.0030868e+00 7.0000151e-01 1.1055707e+00 1.3018102e+00 8.0245824e-01 7.1621884e-01 1.9078389e+00 1.1896594e+00 7.2044167e-01 5.3943256e-01 1.2089192e+00 4.5147187e-01 1.0776188e+00 5.0043084e-01 9.0506254e-01 3.3079985e+00 3.1046072e+00 3.5056118e+00 2.6709345e+00 3.2081329e+00 3.1065948e+00 3.3043807e+00 2.0901204e+00 3.2052035e+00 2.5276373e+00 2.4267777e+00 2.8091158e+00 2.6904888e+00 3.3041886e+00 2.2241050e+00 3.0065909e+00 3.1056084e+00 2.7155799e+00 3.1440950e+00 2.5451785e+00 3.4078458e+00 2.6152930e+00 3.5111169e+00 3.3045112e+00 2.9070941e+00 3.0065909e+00 3.4069929e+00 3.6059670e+00 3.1068940e+00 2.1702441e+00 2.4737523e+00 2.3880607e+00 2.5238437e+00 3.7056514e+00 3.1056084e+00 3.1055129e+00 3.3051256e+00 3.0372254e+00 2.7067267e+00 2.6397031e+00 3.0142197e+00 3.2037566e+00 2.6278605e+00 2.1424915e+00 2.8148768e+00 2.8047553e+00 2.8077256e+00 2.9066659e+00 1.8682644e+00 2.7127202e+00 4.6142218e+00 3.7103348e+00 4.5074842e+00 4.2035307e+00 4.4083412e+00 5.2074280e+00 3.1239158e+00 4.9041928e+00 4.4055872e+00 4.7149533e+00 3.7103439e+00 3.9081758e+00 4.1095835e+00 3.6188977e+00 3.7328455e+00 3.9187196e+00 4.1037709e+00 5.3087451e+00 5.5089283e+00 3.6218973e+00 4.3127798e+00 3.5155554e+00 5.3076917e+00 3.5109043e+00 4.3070065e+00 4.6043043e+00 3.4107931e+00 3.5076367e+00 4.2085644e+00 4.4044360e+00 4.7071579e+00 5.0144130e+00 4.2110565e+00 3.7038321e+00 4.2031939e+00 4.7176313e+00 4.2169539e+00 4.1034568e+00 3.4087523e+00 4.0110693e+00 4.2176552e+00 3.7262659e+00 3.7103348e+00 4.5099841e+00 4.3199893e+00 3.8223318e+00 3.6159248e+00 3.8096375e+00 4.0163546e+00 3.7058422e+00 3.0026460e-01 6.0964891e-01 0.0000000e+00 5.0043842e-01 3.0482299e-01 4.0246123e-01 8.0254500e-01 5.0043842e-01 5.2133802e-01 7.0556260e-01 2.0181667e-01 7.0008735e-01 3.0026460e-01 6.0948506e-01 2.0181667e-01 3.2490712e+00 3.0153168e+00 3.4297841e+00 2.5067523e+00 3.1166337e+00 3.0027816e+00 3.2112793e+00 1.8068048e+00 3.1183051e+00 2.4116924e+00 2.0138832e+00 2.7116615e+00 2.5059537e+00 3.2048192e+00 2.1144760e+00 2.9351753e+00 3.0063019e+00 2.6019122e+00 3.0106587e+00 2.4030297e+00 3.3125861e+00 2.5120719e+00 3.4068163e+00 3.2029877e+00 2.8162444e+00 2.9267417e+00 3.3252407e+00 3.5189464e+00 3.0077107e+00 2.0051350e+00 2.3037132e+00 2.2028146e+00 2.4058620e+00 3.6044981e+00 3.0062070e+00 3.0107283e+00 3.2237456e+00 2.9105093e+00 2.6052541e+00 2.5062865e+00 2.9018772e+00 3.1056084e+00 2.5048522e+00 1.8082911e+00 2.7043948e+00 2.7029415e+00 2.7046027e+00 2.8091099e+00 1.5248852e+00 2.6055127e+00 4.5209020e+00 3.6112573e+00 4.4212031e+00 4.1056541e+00 4.3138986e+00 5.1255338e+00 3.0133997e+00 4.8167235e+00 4.3081273e+00 4.6319211e+00 3.6205854e+00 3.8114965e+00 4.0212972e+00 3.5173798e+00 3.6449970e+00 3.8299342e+00 4.0081754e+00 5.2290121e+00 5.4254411e+00 3.5039202e+00 4.2264145e+00 3.4198378e+00 5.2270034e+00 3.4138008e+00 4.2149806e+00 4.5183778e+00 3.3145502e+00 3.4118179e+00 4.1129687e+00 4.3210760e+00 4.6261633e+00 4.9512603e+00 4.1165035e+00 3.6051692e+00 4.1014742e+00 4.6540056e+00 4.1257291e+00 4.0071257e+00 3.3129914e+00 3.9274863e+00 4.1301604e+00 3.6542046e+00 3.6112573e+00 4.4192311e+00 4.2328883e+00 3.7399948e+00 3.5155767e+00 3.7180846e+00 3.9250546e+00 3.6083191e+00 5.0436965e-01 3.0026460e-01 6.0017982e-01 3.0482299e-01 3.0017653e-01 9.0506343e-01 6.0000317e-01 4.5783248e-01 7.4269314e-01 2.4195741e-01 6.0948506e-01 4.0122873e-01 5.0855077e-01 2.0061436e-01 3.5243030e+00 3.3064692e+00 3.7147036e+00 2.8028227e+00 3.4072759e+00 3.3010449e+00 3.5048841e+00 2.1026764e+00 3.4081977e+00 2.7042302e+00 2.3098753e+00 3.0045117e+00 2.8027924e+00 3.5019450e+00 2.4045982e+00 3.2157547e+00 3.3025861e+00 2.9005886e+00 3.3047156e+00 2.7010554e+00 3.6058037e+00 2.8042692e+00 3.7029937e+00 3.5011559e+00 3.1065954e+00 3.2116669e+00 3.6121048e+00 3.8091015e+00 3.3031148e+00 2.3014285e+00 2.6014645e+00 2.5011992e+00 2.7018905e+00 3.9020189e+00 3.3025600e+00 3.3044825e+00 3.5110031e+00 3.2044340e+00 2.9018587e+00 2.8023124e+00 3.2006932e+00 3.4022345e+00 2.8016296e+00 2.1039819e+00 3.0015958e+00 3.0009948e+00 3.0016466e+00 3.1034780e+00 1.8068049e+00 2.9019412e+00 4.8119571e+00 3.9055005e+00 4.7117433e+00 4.4027872e+00 4.6074923e+00 5.4154953e+00 3.3059205e+00 5.1096877e+00 4.6042055e+00 4.9184662e+00 3.9101579e+00 4.1056576e+00 4.3111747e+00 3.8086187e+00 3.9240074e+00 4.1158326e+00 4.3040379e+00 5.5178488e+00 5.7157885e+00 3.8018678e+00 4.5144444e+00 3.7097243e+00 5.5166376e+00 3.7063915e+00 4.5079295e+00 4.8103281e+00 3.6066590e+00 3.7054748e+00 4.4067833e+00 4.6117275e+00 4.9151622e+00 5.2317566e+00 4.4087821e+00 3.9022961e+00 4.4006562e+00 4.9323259e+00 4.4141504e+00 4.3034963e+00 3.6059708e+00 4.2144276e+00 4.4165186e+00 3.9284285e+00 3.9055005e+00 4.7106152e+00 4.5183778e+00 4.0209837e+00 3.8074712e+00 4.0090032e+00 4.2134002e+00 3.9039579e+00 6.0964891e-01 1.1019501e+00 4.0125062e-01 5.0000761e-01 1.2632947e+00 1.1001012e+00 5.2491131e-01 6.1135434e-01 7.1621884e-01 4.2268438e-01 9.0026543e-01 2.4170870e-01 5.0043084e-01 3.4064574e+00 3.2033141e+00 3.6042703e+00 2.7067267e+00 3.3031847e+00 3.2012079e+00 3.4035361e+00 2.0125822e+00 3.3019706e+00 2.6055127e+00 2.2404570e+00 2.9047685e+00 2.7070958e+00 3.4014441e+00 2.3056303e+00 3.1043374e+00 3.2029748e+00 2.8006755e+00 3.2059945e+00 2.6027034e+00 3.5064150e+00 2.7028014e+00 3.6021536e+00 3.4005708e+00 3.0020583e+00 3.1034908e+00 3.5031921e+00 3.7043162e+00 3.2030081e+00 2.2031888e+00 2.5048145e+00 2.4051640e+00 2.6022353e+00 3.8020808e+00 3.2029748e+00 3.2045605e+00 3.4036086e+00 3.1036832e+00 2.8021575e+00 2.7039988e+00 3.1011640e+00 3.3016475e+00 2.7022579e+00 2.0191796e+00 2.9020892e+00 2.9010579e+00 2.9018587e+00 3.0016913e+00 1.7203066e+00 2.8022905e+00 4.7127831e+00 3.8062227e+00 4.6064153e+00 4.3024456e+00 4.5071341e+00 5.3066177e+00 3.2074507e+00 5.0034627e+00 4.5024143e+00 4.8135231e+00 3.8088290e+00 4.0049892e+00 4.2080447e+00 3.7100251e+00 3.8270894e+00 4.0163858e+00 4.2028588e+00 5.4079977e+00 5.6075453e+00 3.7029588e+00 4.4113160e+00 3.6111044e+00 5.4066727e+00 3.6058057e+00 4.4062018e+00 4.7037812e+00 3.5065182e+00 3.6056307e+00 4.3065776e+00 4.5036239e+00 4.8058364e+00 5.1131156e+00 4.3088094e+00 3.8014142e+00 4.3005452e+00 4.8156922e+00 4.3151195e+00 4.2027764e+00 3.5064276e+00 4.1095026e+00 4.3155223e+00 3.8226872e+00 3.8062227e+00 4.6088777e+00 4.4178507e+00 3.9190516e+00 3.7073875e+00 3.9078043e+00 4.1144923e+00 3.8043348e+00 5.0043842e-01 3.0482299e-01 4.0246123e-01 8.0254500e-01 5.0043842e-01 5.2133802e-01 7.0556260e-01 2.0181667e-01 7.0008735e-01 3.0026460e-01 6.0948506e-01 2.0181667e-01 3.2490712e+00 3.0153168e+00 3.4297841e+00 2.5067523e+00 3.1166337e+00 3.0027816e+00 3.2112793e+00 1.8068048e+00 3.1183051e+00 2.4116924e+00 2.0138832e+00 2.7116615e+00 2.5059537e+00 3.2048192e+00 2.1144760e+00 2.9351753e+00 3.0063019e+00 2.6019122e+00 3.0106587e+00 2.4030297e+00 3.3125861e+00 2.5120719e+00 3.4068163e+00 3.2029877e+00 2.8162444e+00 2.9267417e+00 3.3252407e+00 3.5189464e+00 3.0077107e+00 2.0051350e+00 2.3037132e+00 2.2028146e+00 2.4058620e+00 3.6044981e+00 3.0062070e+00 3.0107283e+00 3.2237456e+00 2.9105093e+00 2.6052541e+00 2.5062865e+00 2.9018772e+00 3.1056084e+00 2.5048522e+00 1.8082911e+00 2.7043948e+00 2.7029415e+00 2.7046027e+00 2.8091099e+00 1.5248852e+00 2.6055127e+00 4.5209020e+00 3.6112573e+00 4.4212031e+00 4.1056541e+00 4.3138986e+00 5.1255338e+00 3.0133997e+00 4.8167235e+00 4.3081273e+00 4.6319211e+00 3.6205854e+00 3.8114965e+00 4.0212972e+00 3.5173798e+00 3.6449970e+00 3.8299342e+00 4.0081754e+00 5.2290121e+00 5.4254411e+00 3.5039202e+00 4.2264145e+00 3.4198378e+00 5.2270034e+00 3.4138008e+00 4.2149806e+00 4.5183778e+00 3.3145502e+00 3.4118179e+00 4.1129687e+00 4.3210760e+00 4.6261633e+00 4.9512603e+00 4.1165035e+00 3.6051692e+00 4.1014742e+00 4.6540056e+00 4.1257291e+00 4.0071257e+00 3.3129914e+00 3.9274863e+00 4.1301604e+00 3.6542046e+00 3.6112573e+00 4.4192311e+00 4.2328883e+00 3.7399948e+00 3.5155767e+00 3.7180846e+00 3.9250546e+00 3.6083191e+00 7.0470720e-01 6.3164977e-01 7.0000303e-01 2.0000000e-01 6.4049114e-01 8.7212232e-01 4.0004442e-01 8.5437440e-01 2.2573593e-01 9.3308853e-01 6.0184622e-01 3.5152865e+00 3.2379971e+00 3.6729302e+00 2.7052554e+00 3.3425813e+00 3.2040843e+00 3.4230889e+00 2.0021220e+00 3.3530528e+00 2.6055127e+00 2.2051638e+00 2.9154879e+00 2.7227228e+00 3.4118179e+00 2.3143923e+00 3.1902650e+00 3.2048191e+00 2.8089346e+00 3.2223766e+00 2.6060092e+00 3.5107904e+00 2.7333577e+00 3.6167495e+00 3.4109217e+00 3.0488339e+00 3.1712556e+00 3.5658221e+00 3.7426726e+00 3.2127498e+00 2.2186491e+00 2.5049231e+00 2.4052960e+00 2.6139440e+00 3.8063158e+00 3.2036084e+00 3.2143157e+00 3.4603347e+00 3.1318587e+00 2.8056557e+00 2.7050980e+00 3.1020681e+00 3.3136174e+00 2.7116685e+00 2.0027889e+00 2.9047842e+00 2.9057760e+00 2.9065359e+00 3.0276459e+00 1.7091578e+00 2.8077256e+00 4.7169309e+00 3.8081280e+00 4.6399369e+00 4.3088507e+00 4.5162248e+00 5.3501952e+00 3.2067991e+00 5.0370912e+00 4.5175831e+00 4.8468186e+00 3.8290678e+00 4.0170262e+00 4.2347722e+00 3.7111714e+00 3.8289857e+00 4.0283196e+00 4.2155430e+00 5.4550926e+00 5.6472422e+00 3.7064735e+00 4.4381718e+00 3.6121030e+00 5.4538016e+00 3.6205857e+00 4.4231445e+00 4.7408825e+00 3.5189464e+00 3.6135011e+00 4.3151164e+00 4.5490925e+00 4.8546830e+00 5.1966534e+00 4.3173271e+00 3.8129545e+00 4.3038527e+00 4.8962803e+00 4.3214438e+00 4.2123903e+00 3.5127693e+00 4.1469662e+00 4.3342207e+00 3.8751227e+00 3.8081280e+00 4.6262568e+00 4.4345825e+00 3.9485180e+00 3.7201181e+00 3.9257254e+00 4.1203162e+00 3.8072915e+00 2.0181667e-01 1.1055799e+00 7.0016860e-01 4.0006662e-01 4.5147187e-01 4.1212852e-01 4.0002221e-01 5.0043084e-01 3.0474106e-01 1.2085435e-01 3.2280983e+00 3.0080445e+00 3.4166801e+00 2.5073304e+00 3.1087541e+00 3.0016255e+00 3.2064005e+00 1.8128544e+00 3.1091921e+00 2.4076934e+00 2.0429861e+00 2.7070770e+00 2.5077793e+00 3.2025221e+00 2.1086021e+00 2.9185909e+00 3.0040552e+00 2.6009247e+00 3.0080768e+00 2.4028385e+00 3.3086417e+00 2.5058827e+00 3.4038679e+00 3.2013290e+00 2.8077473e+00 2.9137660e+00 3.3136458e+00 3.5107924e+00 3.0045274e+00 2.0036964e+00 2.3048725e+00 2.2049827e+00 2.4032211e+00 3.6028345e+00 3.0040403e+00 3.0066661e+00 3.2127504e+00 2.9065741e+00 2.6030847e+00 2.5048249e+00 2.9013288e+00 3.1029264e+00 2.5029614e+00 1.8201043e+00 2.7027522e+00 2.7015465e+00 2.7026433e+00 2.8042851e+00 1.5256523e+00 2.6032253e+00 4.5160443e+00 3.6080467e+00 4.4135519e+00 4.1035779e+00 4.3098000e+00 5.1168009e+00 3.0096881e+00 4.8103297e+00 4.3048676e+00 4.6223708e+00 3.6136097e+00 3.8074712e+00 4.0139003e+00 3.5128896e+00 3.6349183e+00 3.8220063e+00 4.0049433e+00 5.2194303e+00 5.4171979e+00 3.5033669e+00 4.2181241e+00 3.4145410e+00 5.2178541e+00 3.4088042e+00 4.2099019e+00 4.5111938e+00 3.3094784e+00 3.4078458e+00 4.1090316e+00 4.3126257e+00 4.6165627e+00 4.9348334e+00 4.1118265e+00 3.6027619e+00 4.1008192e+00 4.6366757e+00 4.1194553e+00 4.0043991e+00 3.3087928e+00 3.9177721e+00 4.1218428e+00 3.6374332e+00 3.6080467e+00 4.4133507e+00 4.2243717e+00 3.7282925e+00 3.5105217e+00 3.7119499e+00 3.9187675e+00 3.6057072e+00 1.2012865e+00 6.0184622e-01 3.3808272e-01 6.0184934e-01 5.0043084e-01 3.3818226e-01 4.1212852e-01 3.0922892e-01 2.0121983e-01 3.4273160e+00 3.2064011e+00 3.6161317e+00 2.7056828e+00 3.3075153e+00 3.2008118e+00 3.4044261e+00 2.0113827e+00 3.3090710e+00 2.6035236e+00 2.2398619e+00 2.9035670e+00 2.7083020e+00 3.4017079e+00 2.3034787e+00 3.1174706e+00 3.2019092e+00 2.8008297e+00 3.2066700e+00 2.6023240e+00 3.5046442e+00 2.7041827e+00 3.6031099e+00 3.4011658e+00 3.0071110e+00 3.1127326e+00 3.5134157e+00 3.7092355e+00 3.2025439e+00 2.2031052e+00 2.5042875e+00 2.4048325e+00 2.6019131e+00 3.8016620e+00 3.2018790e+00 3.2036084e+00 3.4118203e+00 3.1063566e+00 2.8013151e+00 2.7029498e+00 3.1008327e+00 3.3019518e+00 2.7019892e+00 2.0181988e+00 2.9013773e+00 2.9007142e+00 2.9012240e+00 3.0034647e+00 1.7168003e+00 2.8015399e+00 4.7103355e+00 3.8044833e+00 4.6117631e+00 4.3023732e+00 4.5065283e+00 5.3163061e+00 3.2051927e+00 5.0102923e+00 4.5041827e+00 4.8177760e+00 3.8091017e+00 4.0050028e+00 4.2105704e+00 3.7073402e+00 3.8208501e+00 4.0138272e+00 4.2036880e+00 5.4187310e+00 5.6164039e+00 3.7027275e+00 4.4135513e+00 3.6080195e+00 5.4177192e+00 3.6056365e+00 4.4072751e+00 4.7109359e+00 3.5056751e+00 3.6045028e+00 4.3058539e+00 4.5127175e+00 4.8161488e+00 5.1342237e+00 4.3075896e+00 3.8021528e+00 4.3006308e+00 4.8339900e+00 4.3122441e+00 4.2030792e+00 3.5048429e+00 4.1140184e+00 4.3148937e+00 3.8271256e+00 3.8044833e+00 4.6097143e+00 4.4165186e+00 3.9191998e+00 3.7067060e+00 3.9080455e+00 4.1114854e+00 3.8031381e+00 9.0000091e-01 1.2014191e+00 1.5025345e+00 7.0088477e-01 1.5012926e+00 9.0000136e-01 1.4092540e+00 1.0030724e+00 3.4932678e+00 3.2284356e+00 3.6579694e+00 2.7029237e+00 3.3320310e+00 3.2025221e+00 3.4171529e+00 2.0008116e+00 3.3407229e+00 2.6032740e+00 2.2005685e+00 2.9103582e+00 2.7153679e+00 3.4082220e+00 2.3087475e+00 3.1706684e+00 3.2030687e+00 2.8057704e+00 3.2157547e+00 2.6035236e+00 3.5075879e+00 2.7233837e+00 3.6121030e+00 3.4076329e+00 3.0364247e+00 3.1548602e+00 3.5517240e+00 3.7328844e+00 3.2086545e+00 2.2116271e+00 2.5026949e+00 2.4028972e+00 2.6089340e+00 3.8042764e+00 3.2022967e+00 3.2108192e+00 3.4468872e+00 3.1231713e+00 2.8035152e+00 2.7029238e+00 3.1011647e+00 3.3095196e+00 2.7074599e+00 2.0008921e+00 2.9028462e+00 2.9036791e+00 2.9040745e+00 3.0197997e+00 1.7043730e+00 2.8047771e+00 4.7130344e+00 3.8056232e+00 4.6318991e+00 4.3063824e+00 4.5121922e+00 5.3416858e+00 3.2045522e+00 5.0302179e+00 4.5134527e+00 4.8380714e+00 3.8218532e+00 4.0124930e+00 4.2269486e+00 3.7079008e+00 3.8220112e+00 4.0214147e+00 4.2115853e+00 5.4465234e+00 5.6393868e+00 3.7043105e+00 4.4299700e+00 3.6085945e+00 5.4449998e+00 3.6148636e+00 4.4178341e+00 4.7331065e+00 3.5134671e+00 3.6094821e+00 4.3111775e+00 4.5398308e+00 4.8449119e+00 5.1826640e+00 4.3129030e+00 3.8093617e+00 4.3026669e+00 4.8806417e+00 4.3164776e+00 4.2091204e+00 3.5088587e+00 4.1368711e+00 4.3264627e+00 3.8592376e+00 3.8056232e+00 4.6204067e+00 4.4269727e+00 3.9374314e+00 3.7145622e+00 3.9192263e+00 4.1154804e+00 3.8050056e+00 6.1288055e-01 7.7603846e-01 4.0127250e-01 7.4329414e-01 2.0061436e-01 9.0508712e-01 6.0000635e-01 3.5152864e+00 3.2379970e+00 3.6729302e+00 2.7058598e+00 3.3425838e+00 3.2040874e+00 3.4230885e+00 2.0034894e+00 3.3530533e+00 2.6055423e+00 2.2123846e+00 2.9154880e+00 2.7237438e+00 3.4118184e+00 2.3143951e+00 3.1902650e+00 3.2048192e+00 2.8089552e+00 3.2228316e+00 2.6061975e+00 3.5107904e+00 2.7333644e+00 3.6167885e+00 3.4109240e+00 3.0488346e+00 3.1712557e+00 3.5658240e+00 3.7426726e+00 3.2127504e+00 2.2188249e+00 2.5053901e+00 2.4058634e+00 2.6139732e+00 3.8063206e+00 3.2036085e+00 3.2143126e+00 3.4603347e+00 3.1321581e+00 2.8056558e+00 2.7052554e+00 3.1021033e+00 3.3136175e+00 2.7117356e+00 2.0053411e+00 2.9048017e+00 2.9057761e+00 2.9065368e+00 3.0276467e+00 1.7105814e+00 2.8077315e+00 4.7169308e+00 3.8081328e+00 4.6399369e+00 4.3088509e+00 4.5162248e+00 5.3501952e+00 3.2068686e+00 5.0370912e+00 4.5175965e+00 4.8468145e+00 3.8290678e+00 4.0170299e+00 4.2347722e+00 3.7112059e+00 3.8289870e+00 4.0283196e+00 4.2155430e+00 5.4550814e+00 5.6472442e+00 3.7067060e+00 4.4381718e+00 3.6121048e+00 5.4538018e+00 3.6205918e+00 4.4231444e+00 4.7408825e+00 3.5189484e+00 3.6135011e+00 4.3151171e+00 4.5490925e+00 4.8546834e+00 5.1966394e+00 4.3173279e+00 3.8129558e+00 4.3038600e+00 4.8962803e+00 4.3214431e+00 4.2123903e+00 3.5127694e+00 4.1469662e+00 4.3342207e+00 3.8751227e+00 3.8081328e+00 4.6262568e+00 4.4345823e+00 3.9485180e+00 3.7201522e+00 3.9257254e+00 4.1203153e+00 3.8072915e+00 3.4085233e-01 5.0517282e-01 4.1210927e-01 4.5847767e-01 4.1317535e-01 4.0243965e-01 3.1409605e+00 2.9078360e+00 3.3230621e+00 2.4077390e+00 3.0097979e+00 2.9003941e+00 3.1042002e+00 1.7227897e+00 3.0135090e+00 2.3017319e+00 1.9755996e+00 2.6019350e+00 2.4142039e+00 3.1015567e+00 2.0014192e+00 2.8264551e+00 2.9006366e+00 2.5011717e+00 2.9082658e+00 2.3033727e+00 3.2022157e+00 2.4051094e+00 3.3034165e+00 3.1014454e+00 2.7104802e+00 2.8188502e+00 3.2195779e+00 3.4112824e+00 2.9016560e+00 1.9053006e+00 2.2068965e+00 2.1085395e+00 2.3018945e+00 3.5009585e+00 2.9005880e+00 2.9020766e+00 3.1165913e+00 2.8092629e+00 2.5004154e+00 2.4029432e+00 2.8007533e+00 3.0017918e+00 2.4022355e+00 1.7369589e+00 2.6007937e+00 2.6003442e+00 2.6005344e+00 2.7044628e+00 1.4329832e+00 2.5008032e+00 4.4064393e+00 3.5021596e+00 4.3131639e+00 4.0016670e+00 4.2045223e+00 5.0200323e+00 2.9028411e+00 4.7130517e+00 4.2044861e+00 4.5172866e+00 3.5073617e+00 3.7038321e+00 3.9101623e+00 3.4039470e+00 3.5128093e+00 3.7092301e+00 3.9033549e+00 5.1227972e+00 5.3193887e+00 3.4029615e+00 4.1123595e+00 3.3040255e+00 5.1222477e+00 3.3043123e+00 4.1063328e+00 4.4139149e+00 3.2038047e+00 3.3025879e+00 4.0039164e+00 4.2170349e+00 4.5206139e+00 4.8441812e+00 4.0049702e+00 3.5022104e+00 4.0005674e+00 4.5418878e+00 4.0077003e+00 3.9024858e+00 3.2025221e+00 3.8146102e+00 4.0114630e+00 3.5261349e+00 3.5021596e+00 4.3081194e+00 4.1123594e+00 3.6158324e+00 3.4049076e+00 3.6064417e+00 3.8069566e+00 3.5014492e+00 8.0923926e-01 3.0474106e-01 6.5724028e-01 4.0246123e-01 5.6371422e-01 2.8500310e+00 2.6110761e+00 3.0277528e+00 2.1510447e+00 2.7141512e+00 2.6027937e+00 2.8070667e+00 1.5787180e+00 2.7167342e+00 2.0166118e+00 1.9318287e+00 2.3071806e+00 2.1715812e+00 2.8031215e+00 1.7145956e+00 2.5336675e+00 2.6035547e+00 2.2074446e+00 2.6319765e+00 2.0282636e+00 2.9076123e+00 2.1122780e+00 3.0080768e+00 2.8027924e+00 2.4144060e+00 2.5243969e+00 2.9241685e+00 3.1150226e+00 2.6049377e+00 1.6499744e+00 1.9530826e+00 1.8666228e+00 2.0130304e+00 3.2033374e+00 2.6035250e+00 2.6059866e+00 2.8207213e+00 2.5287261e+00 2.2032583e+00 2.1244184e+00 2.5066544e+00 2.7033232e+00 2.1157724e+00 1.6395342e+00 2.3072196e+00 2.3018945e+00 2.3035850e+00 2.4072314e+00 1.3788456e+00 2.2062031e+00 4.1150297e+00 3.2079717e+00 4.0170857e+00 3.7033716e+00 3.9093672e+00 4.7228082e+00 2.6158579e+00 4.4145659e+00 3.9067187e+00 4.2256164e+00 3.2143454e+00 3.4081069e+00 3.6159248e+00 3.1148584e+00 3.2358776e+00 3.4219579e+00 3.6052692e+00 4.8260874e+00 5.0225486e+00 3.1131211e+00 3.8201108e+00 3.0142352e+00 4.8248264e+00 3.0102191e+00 3.8104451e+00 4.1158426e+00 2.9100849e+00 3.0073054e+00 3.7087638e+00 3.9191144e+00 4.2237272e+00 4.5500784e+00 3.7114840e+00 3.2036323e+00 3.7015743e+00 4.2506870e+00 3.7186993e+00 3.6043148e+00 2.9081165e+00 3.5216379e+00 3.7226348e+00 3.2449757e+00 3.2079717e+00 4.0139105e+00 3.8249612e+00 3.3311289e+00 3.1134242e+00 3.3125194e+00 3.5179632e+00 3.2049019e+00 8.0051115e-01 2.2573593e-01 7.1621884e-01 3.0482299e-01 3.3530529e+00 3.1135637e+00 3.5317001e+00 2.6021962e+00 3.2157548e+00 3.1011640e+00 3.3083865e+00 1.9014069e+00 3.2199554e+00 2.5036852e+00 2.1054816e+00 2.8056557e+00 2.6057308e+00 3.3035252e+00 2.2049636e+00 3.0369970e+00 3.1023768e+00 2.7016498e+00 3.1076516e+00 2.5011992e+00 3.4059088e+00 2.6097152e+00 3.5056297e+00 3.3028597e+00 2.9166917e+00 3.0276459e+00 3.4273156e+00 3.6176286e+00 3.1043337e+00 2.1032882e+00 2.4011650e+00 2.3009622e+00 2.5032633e+00 3.7024058e+00 3.1022094e+00 3.1056121e+00 3.3243222e+00 3.0101957e+00 2.7018643e+00 2.6020065e+00 3.0005955e+00 3.2040843e+00 2.6027120e+00 1.9019962e+00 2.8015673e+00 2.8013346e+00 2.8018959e+00 2.9083044e+00 1.6052507e+00 2.7022567e+00 4.6121162e+00 3.7052383e+00 4.5194334e+00 4.2036849e+00 4.4088275e+00 5.2263180e+00 3.1053076e+00 4.9177739e+00 4.4072684e+00 4.7260117e+00 3.7138970e+00 3.9076272e+00 4.1167962e+00 3.6081590e+00 3.7238338e+00 3.9176170e+00 4.1063328e+00 5.3296625e+00 5.5255577e+00 3.6022190e+00 4.3201293e+00 3.5092121e+00 5.3285444e+00 3.5088064e+00 4.3111749e+00 4.6192198e+00 3.4084525e+00 3.5063337e+00 4.2079639e+00 4.4229098e+00 4.7273231e+00 5.0541259e+00 4.2099019e+00 3.7043105e+00 4.2011108e+00 4.7534769e+00 4.2147199e+00 4.1050714e+00 3.4064570e+00 4.0228303e+00 4.2200398e+00 3.7407842e+00 3.7052383e+00 4.5139612e+00 4.3214432e+00 3.8271242e+00 3.6094426e+00 3.8122429e+00 4.0138280e+00 3.7039439e+00 6.3178534e-01 2.0121983e-01 5.0043084e-01 3.1326249e+00 2.9095058e+00 3.3192760e+00 2.4306373e+00 3.0110559e+00 2.9028946e+00 3.1074604e+00 1.7872757e+00 3.0111989e+00 2.3143923e+00 2.0898615e+00 2.6089349e+00 2.4398792e+00 3.1033306e+00 2.0139907e+00 2.8220753e+00 2.9050462e+00 2.5045154e+00 2.9220490e+00 2.3159976e+00 3.2100379e+00 2.4095513e+00 3.3067017e+00 3.1022615e+00 2.7099669e+00 2.8165723e+00 3.2163877e+00 3.4125151e+00 2.9058641e+00 1.9245956e+00 2.2287984e+00 2.1344529e+00 2.3089795e+00 3.5039202e+00 2.9050287e+00 2.9078401e+00 3.1149135e+00 2.8183214e+00 2.5042875e+00 2.4160514e+00 2.8047612e+00 3.0036601e+00 2.4102273e+00 1.8223604e+00 2.6061038e+00 2.6023240e+00 2.6040822e+00 2.7058598e+00 1.5384093e+00 2.5058827e+00 4.4178532e+00 3.5098740e+00 4.3151587e+00 4.0041429e+00 4.2110099e+00 5.0184788e+00 2.9154880e+00 4.7114737e+00 4.2061525e+00 4.5248210e+00 3.5155767e+00 3.7089995e+00 3.9157422e+00 3.4167041e+00 3.5401921e+00 3.7249704e+00 3.9056466e+00 5.1213055e+00 5.3189433e+00 3.4098171e+00 4.1203252e+00 3.3172667e+00 5.1196436e+00 3.3110366e+00 4.1111102e+00 4.4124656e+00 3.2115770e+00 3.3091938e+00 4.0103680e+00 4.2141675e+00 4.5185015e+00 4.8383751e+00 4.0135080e+00 3.5035589e+00 4.0015001e+00 4.5406852e+00 4.0218666e+00 3.9049967e+00 3.2103515e+00 3.8201314e+00 4.0245707e+00 3.5427188e+00 3.5098740e+00 4.3149009e+00 4.1273075e+00 3.6322643e+00 3.4139979e+00 3.6137088e+00 3.8212216e+00 3.5066417e+00 7.1621748e-01 4.0002221e-01 3.3858048e+00 3.1257747e+00 3.5528332e+00 2.6049377e+00 3.2291581e+00 3.1026235e+00 3.3158954e+00 1.9043238e+00 3.2362541e+00 2.5062158e+00 2.1151984e+00 2.8111676e+00 2.6144115e+00 3.3074459e+00 2.2106051e+00 3.0644792e+00 3.1042002e+00 2.7046286e+00 3.1155579e+00 2.5035275e+00 3.4095576e+00 2.6210983e+00 3.5110555e+00 3.3064076e+00 2.9323802e+00 3.0497665e+00 3.4467650e+00 3.6304824e+00 3.1087162e+00 2.1099919e+00 2.4034951e+00 2.3034398e+00 2.5081992e+00 3.7045399e+00 3.1036554e+00 3.1105454e+00 3.3425812e+00 3.0208515e+00 2.7039990e+00 2.6042124e+00 3.0014077e+00 3.2086215e+00 2.6068616e+00 1.9064532e+00 2.8033825e+00 2.8033607e+00 2.8042643e+00 2.9174390e+00 1.6121856e+00 2.7050791e+00 4.6165570e+00 3.7079065e+00 4.5303834e+00 4.2064764e+00 4.4135512e+00 5.2388103e+00 3.1079784e+00 4.9275471e+00 4.4124760e+00 4.7382279e+00 3.7227931e+00 3.9129335e+00 4.1268500e+00 3.6117351e+00 3.7315577e+00 3.9257254e+00 4.1111068e+00 5.3430927e+00 5.5370582e+00 3.6046347e+00 4.3307527e+00 3.5130597e+00 5.3416790e+00 3.5154388e+00 4.3179254e+00 4.6302391e+00 3.4146546e+00 3.5107904e+00 4.2125004e+00 4.4361489e+00 4.7415152e+00 5.0768435e+00 4.2149814e+00 3.7084460e+00 4.2023583e+00 4.7769945e+00 4.2205945e+00 4.1089343e+00 3.4107328e+00 4.0362383e+00 4.2295174e+00 3.7618281e+00 3.7079065e+00 4.5213131e+00 4.3307527e+00 3.8409517e+00 3.6158715e+00 3.8200965e+00 4.0195882e+00 3.7063858e+00 4.1210927e-01 3.2157661e+00 3.0055754e+00 3.4095823e+00 2.5182899e+00 3.1060146e+00 3.0020171e+00 3.2051958e+00 1.8468437e+00 3.1049591e+00 2.4099079e+00 2.1180305e+00 2.7069308e+00 2.5226256e+00 3.2022186e+00 2.1097489e+00 2.9102819e+00 3.0041469e+00 2.6022650e+00 3.0136614e+00 2.4087525e+00 3.3085287e+00 2.5053901e+00 3.4040883e+00 3.2011770e+00 2.8045980e+00 2.9078384e+00 3.3077457e+00 3.5074140e+00 3.0043867e+00 2.0123013e+00 2.3159429e+00 2.2186309e+00 2.4051787e+00 3.6030023e+00 3.0041461e+00 3.0063027e+00 3.2075252e+00 2.9100849e+00 2.6032671e+00 2.5097006e+00 2.9028412e+00 3.1024718e+00 2.5058120e+00 1.8685011e+00 2.7040002e+00 2.7016556e+00 2.7029487e+00 2.8031364e+00 1.5747356e+00 2.6040007e+00 4.5158117e+00 3.6083256e+00 4.4100325e+00 4.1032589e+00 4.3091726e+00 5.1114855e+00 3.0117223e+00 4.8065772e+00 4.3039464e+00 4.6187506e+00 3.6121095e+00 3.8069169e+00 4.0114753e+00 3.5138377e+00 3.6350529e+00 3.8212252e+00 4.0040508e+00 5.2135438e+00 5.4123528e+00 3.5063866e+00 4.2155461e+00 3.4147661e+00 5.2119900e+00 3.4083227e+00 4.2084707e+00 4.5071259e+00 3.3090897e+00 3.4075560e+00 4.1085724e+00 4.3075896e+00 4.6108642e+00 4.9236635e+00 4.1113689e+00 3.6022523e+00 4.1009647e+00 4.6262707e+00 4.1190929e+00 4.0037820e+00 3.3086298e+00 3.9141023e+00 4.1202674e+00 3.6321858e+00 3.6083256e+00 4.4117993e+00 4.2229640e+00 3.7257625e+00 3.5107118e+00 3.7106642e+00 3.9184747e+00 3.6056703e+00 3.3320214e+00 3.1087156e+00 3.5191298e+00 2.6048201e+00 3.2097208e+00 3.1014199e+00 3.3064692e+00 1.9064149e+00 3.2109426e+00 2.5061784e+00 2.1231492e+00 2.8062729e+00 2.6052659e+00 3.3025806e+00 2.2069764e+00 3.0213628e+00 3.1034889e+00 2.7008785e+00 3.1069083e+00 2.5018386e+00 3.4076243e+00 2.6061038e+00 3.5039680e+00 3.3015400e+00 2.9090661e+00 3.0158419e+00 3.4158824e+00 3.6117739e+00 3.1042038e+00 2.1025721e+00 2.4028385e+00 2.3026344e+00 2.5028039e+00 3.7026090e+00 3.1034538e+00 3.1060427e+00 3.3145497e+00 3.0064351e+00 2.7026184e+00 2.6035547e+00 3.0010106e+00 3.2029877e+00 2.6024546e+00 1.9099608e+00 2.8022622e+00 2.8013859e+00 2.8022964e+00 2.9047883e+00 1.6144390e+00 2.7027522e+00 4.6146429e+00 3.7070840e+00 4.5144445e+00 4.2034836e+00 4.4092638e+00 5.2185417e+00 3.1080879e+00 4.9117250e+00 4.4052231e+00 4.7224998e+00 3.7130492e+00 3.9071930e+00 4.1140176e+00 3.6112039e+00 3.7307535e+00 3.9200662e+00 4.1050716e+00 5.3212806e+00 5.5187164e+00 3.6026912e+00 4.3179254e+00 3.5126721e+00 5.3198415e+00 3.5083463e+00 4.3098506e+00 4.6126508e+00 3.4087523e+00 3.5071392e+00 4.2084730e+00 4.4144909e+00 4.7184838e+00 5.0381916e+00 4.2109654e+00 3.7029588e+00 4.2008334e+00 4.7393168e+00 4.2176488e+00 4.1043916e+00 3.4078439e+00 4.0181863e+00 4.2205945e+00 3.7363783e+00 3.7070840e+00 4.5130589e+00 4.3227927e+00 3.8267268e+00 3.6097220e+00 3.8114954e+00 4.0168944e+00 3.7050916e+00 6.0017982e-01 2.0181667e-01 1.5160570e+00 5.2133802e-01 1.3002451e+00 7.0008584e-01 2.1344529e+00 4.1212852e-01 1.8029854e+00 2.0342311e+00 1.1019599e+00 1.1393620e+00 9.0026497e-01 1.4543172e+00 3.3813251e-01 1.4000061e+00 1.2053003e+00 1.0426638e+00 1.4134492e+00 1.1005364e+00 9.3424697e-01 7.8890806e-01 9.0142636e-01 6.1119558e-01 4.1315633e-01 4.0125062e-01 3.6452132e-01 1.0001753e+00 1.4158897e+00 1.5195166e+00 1.5300146e+00 1.2201636e+00 1.0039209e+00 1.6000032e+00 1.0000457e+00 3.0017653e-01 9.3329055e-01 1.4017696e+00 1.5061610e+00 1.5012947e+00 9.0002570e-01 1.2124837e+00 2.0445123e+00 1.4012283e+00 1.3008855e+00 1.3009222e+00 8.0291749e-01 2.0440118e+00 1.3027556e+00 1.3788457e+00 1.2029161e+00 1.2089253e+00 9.3446811e-01 1.1298636e+00 1.9014076e+00 2.1006232e+00 1.6001224e+00 1.1139906e+00 1.4544336e+00 6.3912709e-01 7.1183012e-01 8.5409862e-01 1.3086191e+00 1.2638465e+00 9.2745734e-01 8.1117067e-01 2.0027889e+00 2.2028146e+00 1.1270327e+00 1.0776190e+00 1.4019372e+00 2.0011307e+00 7.2044167e-01 1.0208709e+00 1.3002450e+00 8.0488008e-01 9.0145141e-01 9.4622126e-01 1.1000289e+00 1.4009513e+00 1.7086186e+00 9.7694377e-01 7.0911112e-01 1.0224133e+00 1.4220925e+00 1.0923537e+00 8.2631334e-01 1.0008620e+00 7.8886139e-01 1.0777307e+00 9.0140221e-01 1.2029161e+00 1.2362755e+00 1.1897289e+00 9.0534502e-01 7.9871893e-01 6.5724028e-01 9.8998705e-01 1.1010807e+00 5.2133179e-01 1.0171340e+00 4.0004442e-01 7.0470720e-01 2.0181667e-01 1.5698091e+00 3.0922892e-01 1.2049541e+00 1.5104875e+00 5.0476836e-01 1.0069214e+00 3.4085233e-01 9.6593231e-01 3.0026460e-01 8.0004443e-01 6.6334810e-01 1.0000152e+00 8.7372177e-01 5.0855077e-01 5.2524663e-01 7.0462697e-01 4.2362917e-01 3.0915245e-01 2.2608083e-01 4.5784410e-01 5.0517282e-01 4.1209001e-01 1.0313359e+00 9.9085945e-01 1.0179856e+00 6.9600743e-01 6.3912943e-01 1.0000152e+00 4.0125062e-01 3.0482299e-01 9.0002615e-01 8.0254500e-01 9.3735629e-01 9.1446938e-01 3.0490481e-01 6.9600743e-01 1.4994060e+00 8.0928056e-01 7.0184453e-01 7.0184453e-01 3.1328089e-01 1.5989637e+00 7.0918894e-01 1.5237054e+00 6.9987517e-01 1.4060443e+00 1.1002025e+00 1.3061181e+00 2.1141220e+00 1.5030978e+00 1.8055480e+00 1.3062025e+00 1.6223413e+00 6.3164977e-01 8.1112909e-01 1.0095513e+00 8.0713433e-01 9.2867113e-01 9.0155393e-01 1.0001753e+00 2.2182690e+00 2.4124980e+00 1.0039060e+00 1.2201577e+00 8.1343016e-01 2.2176846e+00 5.2491734e-01 1.2037520e+00 1.5066999e+00 4.2362917e-01 4.2362917e-01 1.1060937e+00 1.3130978e+00 1.6177611e+00 1.9760242e+00 1.1138953e+00 6.0948506e-01 1.1056693e+00 1.6779798e+00 1.1527746e+00 1.0001601e+00 4.2362917e-01 9.1892454e-01 1.1528477e+00 8.3183672e-01 6.9987517e-01 1.4094144e+00 1.2633467e+00 8.5440680e-01 7.2036951e-01 7.1629303e-01 9.6576136e-01 6.3322667e-01 1.4267554e+00 4.2268438e-01 1.2004262e+00 6.0035621e-01 2.0860325e+00 3.4342562e-01 1.7133162e+00 1.9641993e+00 1.0207260e+00 1.0897469e+00 8.0008964e-01 1.4650300e+00 5.0043084e-01 1.3002407e+00 1.1303267e+00 9.3424659e-01 1.3473688e+00 1.0001604e+00 9.6593231e-01 6.7616545e-01 8.0097499e-01 6.3192325e-01 5.0437695e-01 3.0026460e-01 2.2608083e-01 9.0142636e-01 1.4861824e+00 1.4580174e+00 1.4889602e+00 1.1900969e+00 9.0142681e-01 1.5001212e+00 9.0166476e-01 2.2538848e-01 8.3187290e-01 1.3130978e+00 1.4197078e+00 1.4012600e+00 8.0046764e-01 1.1544060e+00 2.0075255e+00 1.3063533e+00 1.2089835e+00 1.2089313e+00 7.4275547e-01 2.0887699e+00 1.2190319e+00 1.1935069e+00 1.1010807e+00 1.0087396e+00 7.4335736e-01 9.3424697e-01 1.7023957e+00 2.0003507e+00 1.4002035e+00 9.1449234e-01 1.2643523e+00 5.2167829e-01 5.5450500e-01 6.7616902e-01 1.2049541e+00 1.1528553e+00 8.1112984e-01 6.1119558e-01 1.8053679e+00 2.0034894e+00 1.0142484e+00 9.0155438e-01 1.3009222e+00 1.8029948e+00 6.1119267e-01 8.2425704e-01 1.1002025e+00 7.0176271e-01 8.0046685e-01 7.5564478e-01 9.0026588e-01 1.2017042e+00 1.5269837e+00 7.9871893e-01 6.0201716e-01 8.6051471e-01 1.2366099e+00 9.4532171e-01 6.3309012e-01 9.0026588e-01 6.3164729e-01 9.3308853e-01 8.0004443e-01 1.1010807e+00 1.0426516e+00 1.0426760e+00 8.0051115e-01 6.8161057e-01 5.2491734e-01 8.6084272e-01 1.0001753e+00 1.0116865e+00 5.6370994e-01 1.0599087e+00 7.4329527e-01 1.1110092e+00 4.1212852e-01 5.6838732e-01 7.0478886e-01 5.0436965e-01 7.7598796e-01 6.0948506e-01 1.2192920e+00 7.1629303e-01 4.2270142e-01 7.1629303e-01 2.2608083e-01 9.7033357e-01 6.3164729e-01 9.6576136e-01 7.5503094e-01 9.1446896e-01 1.1138955e+00 1.3139296e+00 1.2705641e+00 6.5724028e-01 5.0894102e-01 2.2573593e-01 3.3813251e-01 4.1212852e-01 1.1025819e+00 7.1629303e-01 1.1039833e+00 1.2272550e+00 8.0245746e-01 7.0000303e-01 2.0000000e-01 4.1210927e-01 7.7598796e-01 3.3813251e-01 7.1700774e-01 4.0125062e-01 7.0017011e-01 6.0035305e-01 7.4329414e-01 1.0008768e+00 5.0043084e-01 2.0249458e+00 1.1061923e+00 2.0081838e+00 1.6061519e+00 1.8167511e+00 2.7171724e+00 6.3925756e-01 2.3875202e+00 1.8286180e+00 2.2239101e+00 1.2353587e+00 1.3278587e+00 1.6038059e+00 1.0207533e+00 1.2407946e+00 1.3868868e+00 1.5269837e+00 2.8396169e+00 2.9941208e+00 1.0030871e+00 1.8005082e+00 9.3733589e-01 2.8269381e+00 9.7033357e-01 1.7520952e+00 2.1193712e+00 8.6676847e-01 9.4912864e-01 1.6147493e+00 1.9768316e+00 2.2674825e+00 2.7199050e+00 1.6193612e+00 1.1298636e+00 1.6009488e+00 2.4288142e+00 1.6617386e+00 1.5199103e+00 8.6676847e-01 1.5881447e+00 1.6785116e+00 1.4886759e+00 1.1061923e+00 1.9457481e+00 1.7813291e+00 1.3946348e+00 1.0498347e+00 1.2771155e+00 1.4848797e+00 1.1157320e+00 8.0004523e-01 5.0043842e-01 1.6743483e+00 2.0121983e-01 1.3061139e+00 1.5464046e+00 6.0964597e-01 7.1183012e-01 4.0006662e-01 1.0776296e+00 3.0922892e-01 9.0002570e-01 7.3084171e-01 6.0184622e-01 9.3446811e-01 6.1135434e-01 6.0964597e-01 3.4080442e-01 4.1210927e-01 3.0490481e-01 2.2608083e-01 3.0482299e-01 4.0363334e-01 5.0001522e-01 1.1298636e+00 1.0440350e+00 1.0803561e+00 7.8935898e-01 5.6347978e-01 1.1000098e+00 6.3165225e-01 3.0482299e-01 5.0126466e-01 9.0511169e-01 1.0088926e+00 1.0001903e+00 4.0125062e-01 7.4335736e-01 1.5972311e+00 9.0142681e-01 8.0296037e-01 8.0250202e-01 3.4085233e-01 1.7081446e+00 8.0883841e-01 1.4329858e+00 7.2036951e-01 1.3050153e+00 1.0001753e+00 1.2089252e+00 2.0109333e+00 1.6000184e+00 1.7036944e+00 1.2001396e+00 1.5327217e+00 5.7608844e-01 7.0462844e-01 9.1449234e-01 8.1156529e-01 9.3733552e-01 8.5583415e-01 9.0029018e-01 2.1191883e+00 2.3098756e+00 6.3912709e-01 1.1290757e+00 9.0532049e-01 2.1139617e+00 3.4085233e-01 1.1074834e+00 1.4044980e+00 3.4080442e-01 4.2362917e-01 1.0087250e+00 1.2089253e+00 1.5132032e+00 1.8748226e+00 1.0207260e+00 5.0042326e-01 1.0008620e+00 1.5694554e+00 1.0837679e+00 9.0053003e-01 5.0517282e-01 8.2671175e-01 1.0777411e+00 8.1156529e-01 7.2036951e-01 1.3133662e+00 1.1910068e+00 8.2425704e-01 4.5847767e-01 6.3178534e-01 9.1590889e-01 6.3322667e-01 6.3322667e-01 1.2193537e+00 9.0000091e-01 6.3165225e-01 1.0599087e+00 3.1328089e-01 6.3451734e-01 4.0127250e-01 9.0000091e-01 1.0001604e+00 2.2573593e-01 4.1212852e-01 6.3178534e-01 6.0202028e-01 5.2524663e-01 5.2132556e-01 6.1135434e-01 4.0125062e-01 7.0008584e-01 9.0002615e-01 1.1001014e+00 1.0039209e+00 3.0482299e-01 1.0001751e+00 7.0478886e-01 8.0296037e-01 6.0000952e-01 6.0366256e-01 3.0915245e-01 6.0365948e-01 1.0001903e+00 6.3164977e-01 4.0125062e-01 5.0476836e-01 2.2608083e-01 4.0127250e-01 5.0043842e-01 1.2102248e+00 3.0017653e-01 3.0482299e-01 3.0008832e-01 5.0043084e-01 1.5012947e+00 4.0000000e-01 1.5653766e+00 6.7616902e-01 1.5829749e+00 1.1074742e+00 1.3373141e+00 2.2681751e+00 8.0291671e-01 1.9315820e+00 1.3458100e+00 1.7862938e+00 8.7372177e-01 8.7209348e-01 1.2093969e+00 7.1700774e-01 1.1055705e+00 1.0604287e+00 1.0451812e+00 2.3834499e+00 2.5286011e+00 6.3322667e-01 1.3903623e+00 7.0462697e-01 2.3796582e+00 6.3912943e-01 1.2792049e+00 1.6907308e+00 5.6595488e-01 5.3943256e-01 1.1400339e+00 1.5965952e+00 1.8639835e+00 2.3424496e+00 1.1635325e+00 6.7626502e-01 1.1005460e+00 2.0903382e+00 1.2459141e+00 1.0236548e+00 5.0894102e-01 1.2528590e+00 1.2949162e+00 1.2662318e+00 6.7616902e-01 1.4817248e+00 1.3908238e+00 1.1389163e+00 6.9600743e-01 8.9540816e-01 1.0858512e+00 6.3192325e-01 1.5890088e+00 4.2270142e-01 1.1330776e+00 1.5368468e+00 5.2491734e-01 1.1187430e+00 4.0243965e-01 1.1139906e+00 4.1420960e-01 7.0096858e-01 7.3895268e-01 1.1000100e+00 9.3861512e-01 4.0127250e-01 7.1708289e-01 8.0004523e-01 5.2167208e-01 4.5784410e-01 3.6452132e-01 5.6371422e-01 4.2270142e-01 4.1317535e-01 1.2159868e+00 1.0567817e+00 1.1138092e+00 8.3387677e-01 6.1119267e-01 9.0029064e-01 3.0482299e-01 4.0125062e-01 1.0003196e+00 7.4395693e-01 9.3459651e-01 8.5617086e-01 3.0922892e-01 8.0073117e-01 1.5493206e+00 7.5564478e-01 6.4049114e-01 6.4049114e-01 4.5784410e-01 1.7404389e+00 6.9600743e-01 1.3253457e+00 6.4049114e-01 1.2202193e+00 9.0142636e-01 1.1056785e+00 1.9348400e+00 1.4092540e+00 1.6176783e+00 1.1286101e+00 1.4350761e+00 4.5148429e-01 6.7720957e-01 8.1757693e-01 8.2671175e-01 8.1937731e-01 7.4263078e-01 8.0055465e-01 2.0418418e+00 2.2277117e+00 1.1002025e+00 1.0286508e+00 7.2044167e-01 2.0415798e+00 6.0035305e-01 1.0039060e+00 1.3253497e+00 5.0043842e-01 3.1328089e-01 9.0999313e-01 1.1528476e+00 1.4548293e+00 1.8637334e+00 9.1892454e-01 5.2133179e-01 9.3310976e-01 1.5801693e+00 9.6572569e-01 8.0008964e-01 3.4085233e-01 7.5508853e-01 9.6674360e-01 7.4618926e-01 6.4049114e-01 1.2101609e+00 1.0782105e+00 7.2113820e-01 8.0093081e-01 5.2524663e-01 7.8886139e-01 4.5847767e-01 1.7572657e+00 6.1288055e-01 4.0125062e-01 1.0858512e+00 1.1133984e+00 1.4858469e+00 7.1779518e-01 1.8187119e+00 1.2137020e+00 9.6591433e-01 1.4146346e+00 7.4263078e-01 1.5359852e+00 1.2093243e+00 1.7083042e+00 1.4853863e+00 1.5241361e+00 1.7234436e+00 1.9756319e+00 1.9771636e+00 1.3035495e+00 8.0008884e-01 6.3164977e-01 6.0948212e-01 9.1449234e-01 1.8179429e+00 1.2062153e+00 1.3486924e+00 1.8673780e+00 1.4543172e+00 8.7240114e-01 7.4329527e-01 1.1055892e+00 1.4158897e+00 9.3310976e-01 1.1269424e-01 9.3351278e-01 9.7600992e-01 9.6953662e-01 1.3458100e+00 3.0490481e-01 9.0320459e-01 2.7259033e+00 1.8109877e+00 2.7506971e+00 2.3226569e+00 2.5372438e+00 3.4590995e+00 1.2089192e+00 3.1281646e+00 2.5610212e+00 2.9500632e+00 1.9406671e+00 2.0633570e+00 2.3443688e+00 1.7168003e+00 1.8708330e+00 2.0857354e+00 2.2573821e+00 3.5725062e+00 3.7336869e+00 1.7229558e+00 2.5362836e+00 1.6198349e+00 3.5690915e+00 1.7116793e+00 2.4776152e+00 2.8599555e+00 1.6016303e+00 1.6534235e+00 2.3372717e+00 2.7159844e+00 3.0094888e+00 3.4430661e+00 2.3406025e+00 1.8663319e+00 2.3090404e+00 3.1586433e+00 2.3454995e+00 2.2408459e+00 1.5471213e+00 2.3192230e+00 2.4059451e+00 2.1751377e+00 1.8109877e+00 2.6757243e+00 2.4966678e+00 2.1111734e+00 1.7901165e+00 2.0121175e+00 2.1471399e+00 1.8133657e+00 1.4043036e+00 1.6388784e+00 7.0470867e-01 7.7652636e-01 5.0001522e-01 1.1269424e+00 2.2608083e-01 1.0000158e+00 8.0928056e-01 7.0470867e-01 1.0215068e+00 7.1708289e-01 6.3164977e-01 4.2362917e-01 5.0002283e-01 3.0474106e-01 2.0121983e-01 2.2608083e-01 4.5080200e-01 6.0017982e-01 1.1529284e+00 1.1298636e+00 1.1544060e+00 8.5406674e-01 6.3322667e-01 1.2000066e+00 6.3309258e-01 2.2608083e-01 6.0201716e-01 1.0030721e+00 1.1060937e+00 1.1001110e+00 5.0001522e-01 8.2458478e-01 1.6747799e+00 1.0008617e+00 9.0140221e-01 9.0140131e-01 4.1209001e-01 1.7511598e+00 9.0506299e-01 1.4854079e+00 8.3187290e-01 1.3139296e+00 1.0032293e+00 1.2362702e+00 2.0078120e+00 1.7001329e+00 1.7019430e+00 1.2016381e+00 1.5675442e+00 7.1700909e-01 7.4275547e-01 9.6574369e-01 9.3541878e-01 1.1298552e+00 1.0208844e+00 9.0506343e-01 2.1136134e+00 2.3085898e+00 7.4618926e-01 1.1897982e+00 1.0208709e+00 2.1090362e+00 5.0894102e-01 1.1286018e+00 1.4024091e+00 5.2167829e-01 5.6595908e-01 1.0426638e+00 1.2037520e+00 1.5079206e+00 1.8503663e+00 1.0776296e+00 5.0477564e-01 1.0032296e+00 1.5611241e+00 1.1910693e+00 9.0511169e-01 6.3178782e-01 9.0184172e-01 1.1896660e+00 1.0032443e+00 8.3187290e-01 1.3450688e+00 1.3020492e+00 1.0087252e+00 6.1990228e-01 7.4263078e-01 1.0458540e+00 7.3084171e-01 7.0918894e-01 7.0176271e-01 8.1112984e-01 9.6574336e-01 4.1317535e-01 1.5005626e+00 6.1119558e-01 6.0964597e-01 1.0116724e+00 4.1315633e-01 9.3848935e-01 9.0000136e-01 1.1896660e+00 9.6574369e-01 1.2003597e+00 1.4006465e+00 1.6096629e+00 1.5401713e+00 8.2421923e-01 5.3914287e-01 3.6259865e-01 4.2362917e-01 6.0017665e-01 1.2189701e+00 6.0202028e-01 8.7212232e-01 1.5067961e+00 1.1024820e+00 4.1317535e-01 3.0490481e-01 5.0477564e-01 9.3329017e-01 6.0018299e-01 6.1845783e-01 4.1210927e-01 5.0894102e-01 5.0477564e-01 1.0008620e+00 9.0029064e-01 5.0043842e-01 2.1169442e+00 1.2049539e+00 2.2014913e+00 1.7227908e+00 1.9366943e+00 2.8973091e+00 6.0383105e-01 2.5621105e+00 1.9756002e+00 2.3854144e+00 1.4163126e+00 1.4857201e+00 1.8044011e+00 1.1074834e+00 1.2661803e+00 1.4994060e+00 1.6741010e+00 3.0107625e+00 3.1586112e+00 1.1298552e+00 1.9796588e+00 1.0095370e+00 3.0090568e+00 1.1900276e+00 1.8963668e+00 2.3135911e+00 1.0782107e+00 1.0783219e+00 1.7384347e+00 2.2009981e+00 2.4793129e+00 2.9421182e+00 1.7402224e+00 1.3018103e+00 1.7072540e+00 2.6745468e+00 1.7367211e+00 1.6485141e+00 9.6691372e-01 1.8209763e+00 1.8293641e+00 1.7435092e+00 1.2049539e+00 2.0881329e+00 1.9090398e+00 1.6063540e+00 1.2407432e+00 1.4664722e+00 1.5386307e+00 1.2093243e+00 1.0943600e+00 1.0030868e+00 1.3272142e+00 9.1446938e-01 1.7295996e+00 1.1336109e+00 8.7209296e-01 1.2643054e+00 6.3912943e-01 1.4396100e+00 1.1299441e+00 1.5271597e+00 1.3148232e+00 1.4267817e+00 1.6268514e+00 1.8468437e+00 1.8305154e+00 1.1759988e+00 7.4262850e-01 5.2491734e-01 5.2167208e-01 8.5586571e-01 1.6206300e+00 1.1291536e+00 1.4631182e+00 1.7576822e+00 1.3254284e+00 1.0172489e+00 6.0605366e-01 9.1894698e-01 1.2951152e+00 8.3187290e-01 3.0474106e-01 8.1500329e-01 1.0396215e+00 9.6150595e-01 1.2528590e+00 5.6347978e-01 8.7240114e-01 2.5401214e+00 1.6166178e+00 2.5672376e+00 2.1256636e+00 2.3434890e+00 3.2706021e+00 1.0235120e+00 2.9379053e+00 2.3650373e+00 2.7819820e+00 1.7939428e+00 1.8718670e+00 2.1669217e+00 1.5269837e+00 1.7152309e+00 1.9257519e+00 2.0672316e+00 3.3962101e+00 3.5413320e+00 1.5241169e+00 2.3604685e+00 1.4422764e+00 3.3805191e+00 1.5352907e+00 2.2985560e+00 2.6785349e+00 1.4314424e+00 1.4886759e+00 2.1422340e+00 2.5406863e+00 2.8290490e+00 3.2861997e+00 2.1472832e+00 1.6782365e+00 2.1087015e+00 2.9939447e+00 2.1830772e+00 2.0525906e+00 1.3935744e+00 2.1564350e+00 2.2287876e+00 2.0426476e+00 1.6166178e+00 2.4889554e+00 2.3256006e+00 1.9561612e+00 1.6066555e+00 1.8386981e+00 2.0009986e+00 1.6313110e+00 8.0883916e-01 5.0043842e-01 6.0202028e-01 8.0004602e-01 3.3808272e-01 5.0437695e-01 8.0093081e-01 5.2838320e-01 6.0201716e-01 2.5399984e-01 7.2036819e-01 5.0517282e-01 5.0043842e-01 7.0008584e-01 9.1424701e-01 9.0157896e-01 3.0017653e-01 7.2044167e-01 6.2656178e-01 6.6334810e-01 3.6259865e-01 9.0026588e-01 5.0436235e-01 4.1212852e-01 8.0879701e-01 7.0478886e-01 3.0482299e-01 5.2201750e-01 4.5847767e-01 4.0125062e-01 4.1317535e-01 1.0363096e+00 3.4080442e-01 3.0474106e-01 2.2573593e-01 3.0490481e-01 1.2204839e+00 2.4195741e-01 1.8101835e+00 9.0166476e-01 1.7375279e+00 1.4002005e+00 1.6032003e+00 2.4532171e+00 1.0032443e+00 2.1331916e+00 1.6052507e+00 1.9422649e+00 9.1894698e-01 1.1025819e+00 1.3276411e+00 8.1719606e-01 1.0142626e+00 1.1298636e+00 1.3025621e+00 2.5612597e+00 2.7430487e+00 9.0155438e-01 1.5299064e+00 7.1708289e-01 2.5605373e+00 7.0633229e-01 1.5079428e+00 1.8443132e+00 6.0383105e-01 7.0096708e-01 1.4023806e+00 1.6740160e+00 1.9756002e+00 2.3801033e+00 1.4049093e+00 9.0142636e-01 1.4001717e+00 2.0898615e+00 1.4183606e+00 1.3009222e+00 6.0184622e-01 1.2661803e+00 1.4267527e+00 1.1084368e+00 9.0166476e-01 1.7108631e+00 1.5299252e+00 1.0782751e+00 8.1343016e-01 1.0116721e+00 1.2193537e+00 9.0026497e-01 7.9148746e-01 7.0993998e-01 9.3541878e-01 8.1937731e-01 5.0043084e-01 5.6370994e-01 4.1212852e-01 1.0782753e+00 6.0184622e-01 9.0557807e-01 7.4269314e-01 7.0633229e-01 8.2841920e-01 9.1695534e-01 1.0756891e+00 7.3084048e-01 5.2491131e-01 5.0085236e-01 5.0476836e-01 5.0085236e-01 1.1074740e+00 8.3916809e-01 1.2049539e+00 9.6501813e-01 4.2270142e-01 8.0291749e-01 5.0855077e-01 5.3943256e-01 8.2631334e-01 4.0243965e-01 1.0207260e+00 5.2524663e-01 8.0055465e-01 7.0184453e-01 7.0184453e-01 1.0777307e+00 6.0366256e-01 2.0696799e+00 1.1543334e+00 1.9286352e+00 1.6071727e+00 1.8312163e+00 2.6295459e+00 1.1153247e+00 2.3155068e+00 1.8040969e+00 2.1901871e+00 1.2562955e+00 1.3263639e+00 1.5518083e+00 1.1271226e+00 1.4557657e+00 1.4915559e+00 1.5136393e+00 2.7555974e+00 2.9267466e+00 1.0030718e+00 1.7744103e+00 1.0843333e+00 2.7324083e+00 9.6953662e-01 1.7456060e+00 2.0249458e+00 9.1568820e-01 1.0151258e+00 1.6309461e+00 1.8315348e+00 2.1358791e+00 2.5304748e+00 1.6492450e+00 1.1075720e+00 1.6001777e+00 2.2141198e+00 1.7441970e+00 1.5196090e+00 9.6683480e-01 1.4838225e+00 1.7167914e+00 1.4122282e+00 1.1543334e+00 1.9438463e+00 1.8374400e+00 1.4268530e+00 1.0778421e+00 1.2792049e+00 1.5857302e+00 1.1532421e+00 1.1019503e+00 6.0201716e-01 5.0043842e-01 6.1135434e-01 7.0008735e-01 8.1156529e-01 4.1317535e-01 7.0000303e-01 4.0246123e-01 2.0061436e-01 4.1210927e-01 5.0436965e-01 7.0000303e-01 6.0366256e-01 2.0121983e-01 1.2007726e+00 9.1916394e-01 1.0124729e+00 8.0055465e-01 4.0246123e-01 7.0008735e-01 5.0085236e-01 6.0017982e-01 6.0202028e-01 6.3165225e-01 7.4612830e-01 6.0383105e-01 1.1269424e-01 7.0184453e-01 1.4559030e+00 5.6371422e-01 5.2167829e-01 5.2133179e-01 4.0004442e-01 1.7133283e+00 6.0948800e-01 1.3743342e+00 5.2524663e-01 1.2702954e+00 9.0142636e-01 1.1286018e+00 1.9763960e+00 1.2004262e+00 1.6484371e+00 1.1066159e+00 1.5033966e+00 6.1990228e-01 6.3322667e-01 8.9538275e-01 6.1990228e-01 1.0010060e+00 9.1471442e-01 8.0488008e-01 2.0894199e+00 2.2581358e+00 7.0088627e-01 1.1085342e+00 6.3178782e-01 2.0855639e+00 4.0363334e-01 1.0293900e+00 1.3743657e+00 4.0006662e-01 4.0125062e-01 9.3329055e-01 1.2396422e+00 1.5267540e+00 1.9798779e+00 9.6591465e-01 4.0127250e-01 9.0026497e-01 1.7151603e+00 1.0797805e+00 8.0296037e-01 4.0006662e-01 8.9540816e-01 1.0837679e+00 9.6674360e-01 5.2524663e-01 1.2440789e+00 1.1938630e+00 9.1892454e-01 5.2524663e-01 6.3912943e-01 9.3733589e-01 4.5148429e-01 1.1281352e+00 9.0002570e-01 5.0517282e-01 9.4513210e-01 4.1315633e-01 1.2014191e+00 5.2133179e-01 1.3063533e+00 1.1019505e+00 8.5403370e-01 1.0426516e+00 1.3523310e+00 1.4544312e+00 9.0142636e-01 3.3818226e-01 5.0085236e-01 5.0437695e-01 3.0922892e-01 1.5001461e+00 9.0005094e-01 9.0668287e-01 1.2396475e+00 8.7209296e-01 5.0000761e-01 4.5078948e-01 8.0046764e-01 1.0030724e+00 4.1317535e-01 6.7824250e-01 6.0017665e-01 6.0000952e-01 6.0000317e-01 7.4262850e-01 6.3925756e-01 5.0001522e-01 2.4077059e+00 1.5012741e+00 2.3329496e+00 2.0008921e+00 2.2042323e+00 3.0476353e+00 9.3541878e-01 2.7309758e+00 2.2068463e+00 2.5373913e+00 1.5160787e+00 1.7043730e+00 1.9242109e+00 1.4044668e+00 1.5401330e+00 1.7168122e+00 1.9044144e+00 3.1543192e+00 3.3406961e+00 1.4044697e+00 2.1265131e+00 1.3061138e+00 3.1536525e+00 1.3069754e+00 2.1097680e+00 2.4379736e+00 1.2049541e+00 1.3017511e+00 2.0033792e+00 2.2562563e+00 2.5606009e+00 2.9377608e+00 2.0050253e+00 1.5030978e+00 2.0001168e+00 2.6390071e+00 2.0114908e+00 1.9023062e+00 1.2016381e+00 1.8467998e+00 2.0209800e+00 1.6143467e+00 1.5012741e+00 2.3121231e+00 2.1220697e+00 1.6461460e+00 1.4062065e+00 1.6118728e+00 1.8108200e+00 1.5004645e+00 1.1000005e+00 9.0305330e-01 9.0506343e-01 1.1075720e+00 8.0488008e-01 6.1119558e-01 6.3912943e-01 6.0383105e-01 3.0490481e-01 1.1269424e-01 4.1210927e-01 6.0184622e-01 7.0008735e-01 1.0803561e+00 1.2125410e+00 1.2178626e+00 9.0645118e-01 7.9153339e-01 1.3000002e+00 7.0096858e-01 3.0008832e-01 8.0245824e-01 1.1001015e+00 1.2040344e+00 1.2012928e+00 6.0017982e-01 9.0645118e-01 1.7262450e+00 1.1005460e+00 1.0000307e+00 1.0000307e+00 5.0043842e-01 1.7087610e+00 1.0003198e+00 1.6300950e+00 9.3848935e-01 1.5032156e+00 1.2007124e+00 1.4092540e+00 2.2026134e+00 1.8005395e+00 1.9004485e+00 1.4019342e+00 1.7231818e+00 7.4269314e-01 9.0668287e-01 1.1133897e+00 1.0251597e+00 1.0924484e+00 1.0143978e+00 1.1005460e+00 2.3044111e+00 2.5032992e+00 9.4511250e-01 1.3253497e+00 1.1075720e+00 2.3033192e+00 5.5450500e-01 1.3061180e+00 1.6004128e+00 5.4219811e-01 6.3912943e-01 1.2090477e+00 1.4006179e+00 1.7019555e+00 2.0185049e+00 1.2190878e+00 7.0548283e-01 1.2049539e+00 1.7202439e+00 1.2636227e+00 1.1006371e+00 7.0911112e-01 1.0207396e+00 1.2632946e+00 9.3308853e-01 9.3848935e-01 1.5130871e+00 1.3741498e+00 9.6572569e-01 6.9987517e-01 8.2421923e-01 1.0798806e+00 8.5583415e-01 5.2524663e-01 8.2418002e-01 6.3912709e-01 3.6452132e-01 5.6394820e-01 7.2036819e-01 5.0517282e-01 8.0008964e-01 1.0000005e+00 1.2000731e+00 1.1019597e+00 4.0002221e-01 1.0039063e+00 7.4612830e-01 8.3183672e-01 6.0383105e-01 6.1119558e-01 2.0000000e-01 4.5078948e-01 1.1000098e+00 7.8890806e-01 4.0122873e-01 5.6371422e-01 4.1212852e-01 5.0001522e-01 5.2524663e-01 1.2137020e+00 3.4080442e-01 3.3813251e-01 3.0490481e-01 6.0035621e-01 1.5010034e+00 4.0246123e-01 1.5265987e+00 6.1135434e-01 1.6395342e+00 1.1134850e+00 1.3309249e+00 2.3136797e+00 7.1629168e-01 1.9760038e+00 1.3748534e+00 1.8136626e+00 9.1894698e-01 9.0320459e-01 1.2661802e+00 6.0427481e-01 9.1427000e-01 9.6685270e-01 1.0777305e+00 2.4270428e+00 2.5626268e+00 8.1112909e-01 1.4228744e+00 5.2167208e-01 2.4261071e+00 7.0633229e-01 1.3043552e+00 1.7511131e+00 6.0383105e-01 5.2491131e-01 1.1330776e+00 1.6740160e+00 1.9314874e+00 2.4166999e+00 1.1400420e+00 7.4269200e-01 1.1024820e+00 2.1702438e+00 1.1639421e+00 1.0427822e+00 4.2268438e-01 1.3276412e+00 1.2710363e+00 1.3154933e+00 6.1135434e-01 1.4922566e+00 1.3466030e+00 1.1400339e+00 7.3461436e-01 9.3733552e-01 9.7694377e-01 6.0365948e-01 5.8750389e-01 2.4195741e-01 8.6051471e-01 3.3818226e-01 8.1719606e-01 6.0202028e-01 6.0219099e-01 8.0337471e-01 1.0214933e+00 1.0338224e+00 5.2201750e-01 6.0000635e-01 3.6259865e-01 4.2268438e-01 2.2538848e-01 1.0087393e+00 5.4219811e-01 7.4618926e-01 9.2019277e-01 5.2838320e-01 3.4080442e-01 3.4085233e-01 3.4085233e-01 5.2838320e-01 2.0121983e-01 9.0294373e-01 3.0482299e-01 3.0490481e-01 3.0490481e-01 4.1420960e-01 1.1133986e+00 3.0017653e-01 1.9760242e+00 1.0776188e+00 1.8598666e+00 1.5071120e+00 1.7384459e+00 2.5637810e+00 9.3426769e-01 2.2403929e+00 1.7108631e+00 2.0993246e+00 1.1405598e+00 1.2394690e+00 1.4816205e+00 1.0776296e+00 1.4324323e+00 1.4163126e+00 1.4134492e+00 2.6753615e+00 2.8540068e+00 9.1001664e-01 1.6986597e+00 1.0426638e+00 2.6697938e+00 9.0657539e-01 1.6396943e+00 1.9541963e+00 8.5583415e-01 9.0207914e-01 1.5412500e+00 1.7849153e+00 2.0880425e+00 2.4973149e+00 1.5650163e+00 1.0060994e+00 1.5001440e+00 2.2185588e+00 1.6410190e+00 1.4111252e+00 8.5440680e-01 1.4330979e+00 1.6474117e+00 1.4095656e+00 1.0776188e+00 1.8534896e+00 1.7579984e+00 1.3938438e+00 1.0171340e+00 1.1990152e+00 1.4686236e+00 1.0427822e+00 6.8261201e-01 1.0004792e+00 6.3178782e-01 4.1210927e-01 6.0202028e-01 7.0025283e-01 8.0245903e-01 6.7720957e-01 8.1719606e-01 7.0008432e-01 1.0069214e+00 7.9153339e-01 8.6054545e-01 6.4049114e-01 6.3178782e-01 9.0155393e-01 1.2000065e+00 9.0508712e-01 2.0181667e-01 8.2635069e-01 7.1708289e-01 7.0548283e-01 8.0000239e-01 5.4219811e-01 1.3530568e+00 6.3322667e-01 8.0967961e-01 7.1708289e-01 7.0016860e-01 1.5402579e+00 6.3925756e-01 1.5611241e+00 6.4620889e-01 1.4283663e+00 1.1134850e+00 1.3189663e+00 2.1346646e+00 1.3000497e+00 1.8186729e+00 1.3009674e+00 1.7335369e+00 1.0118233e+00 8.1117067e-01 1.0567664e+00 6.0605366e-01 9.2867113e-01 1.0782857e+00 1.0429127e+00 2.2917679e+00 2.4270713e+00 5.0042326e-01 1.2848760e+00 6.9987517e-01 2.2396581e+00 5.2491734e-01 1.3051737e+00 1.5457820e+00 6.0365948e-01 8.0291749e-01 1.1110184e+00 1.3561934e+00 1.6492450e+00 2.1212048e+00 1.1186586e+00 6.7616723e-01 1.1005365e+00 1.7574668e+00 1.3269962e+00 1.0777411e+00 8.0097499e-01 1.0411548e+00 1.1972915e+00 9.9911696e-01 6.4620889e-01 1.4422764e+00 1.3473056e+00 9.3861512e-01 5.2491734e-01 8.6084272e-01 1.2528048e+00 8.2498722e-01 9.6150595e-01 5.0477564e-01 1.0214931e+00 8.0923926e-01 8.0492246e-01 1.0062544e+00 1.2363856e+00 1.2438823e+00 6.2656178e-01 4.0006662e-01 1.2085435e-01 2.0181667e-01 2.2573593e-01 1.2016443e+00 6.3925756e-01 9.2019277e-01 1.1335345e+00 7.1636719e-01 5.0084481e-01 2.0121983e-01 5.0002283e-01 7.3155911e-01 2.0181667e-01 6.7626681e-01 3.0915245e-01 5.0437695e-01 4.1317535e-01 6.1845783e-01 9.0506254e-01 3.0922892e-01 2.1350025e+00 1.2189760e+00 2.0658767e+00 1.7034615e+00 1.9177947e+00 2.7775988e+00 7.7598704e-01 2.4534018e+00 1.9144928e+00 2.2857680e+00 1.2749306e+00 1.4182222e+00 1.6639408e+00 1.1527669e+00 1.4140789e+00 1.4954274e+00 1.6121856e+00 2.8913337e+00 3.0644792e+00 1.1011719e+00 1.8708183e+00 1.0777305e+00 2.8852099e+00 1.0396215e+00 1.8297117e+00 2.1701542e+00 9.4532171e-01 1.0262619e+00 1.7168122e+00 2.0059655e+00 2.3063931e+00 2.7230908e+00 1.7261949e+00 1.2093243e+00 1.7002548e+00 2.4331092e+00 1.7646791e+00 1.6080687e+00 9.3848935e-01 1.6152383e+00 1.7771159e+00 1.4971922e+00 1.2189760e+00 2.0349233e+00 1.8831259e+00 1.4665397e+00 1.1400339e+00 1.3492939e+00 1.5757399e+00 1.2102248e+00 8.1117067e-01 7.0548283e-01 6.0964891e-01 6.0605366e-01 7.0918894e-01 9.0279223e-01 8.0008964e-01 3.6259865e-01 1.3154973e+00 1.0604287e+00 1.1536694e+00 9.1892454e-01 5.0477564e-01 5.0894102e-01 3.0922892e-01 8.0046764e-01 9.0778124e-01 7.1708289e-01 8.6225026e-01 6.8685125e-01 4.0363334e-01 8.4536936e-01 1.5318139e+00 6.5832080e-01 6.7636452e-01 6.3322667e-01 5.6838732e-01 1.8053679e+00 7.2044167e-01 1.2092602e+00 5.0437695e-01 1.3018595e+00 8.0291671e-01 1.0095513e+00 1.9760044e+00 1.0208709e+00 1.6387179e+00 1.0597877e+00 1.4686236e+00 6.0201716e-01 6.0427481e-01 9.3331138e-01 7.0025283e-01 6.1119558e-01 6.0427175e-01 7.4269200e-01 2.0887699e+00 2.2281421e+00 1.0001753e+00 1.0797700e+00 4.1317535e-01 2.0885107e+00 5.2133179e-01 9.6591465e-01 1.4140457e+00 4.1209001e-01 2.2573593e-01 8.1156529e-01 1.3450340e+00 1.5966664e+00 2.0855643e+00 8.1343016e-01 4.6440171e-01 8.2635069e-01 1.8444686e+00 8.2635069e-01 7.1621748e-01 2.0061436e-01 1.0088783e+00 9.1566538e-01 1.0032296e+00 5.0437695e-01 1.1543257e+00 9.8997136e-01 8.1117067e-01 7.0470867e-01 6.0980961e-01 6.3322667e-01 3.0474106e-01 9.0031539e-01 7.0000151e-01 3.3813251e-01 5.2167829e-01 8.5403428e-01 1.0095513e+00 5.0043842e-01 5.2524663e-01 6.0980961e-01 6.1288055e-01 3.0026460e-01 1.1001015e+00 7.1636719e-01 6.3309258e-01 7.4335736e-01 5.2167208e-01 5.0043084e-01 6.0184309e-01 6.0964891e-01 6.0017982e-01 3.0482299e-01 1.1153247e+00 5.0043084e-01 4.0246123e-01 4.0125062e-01 3.0017653e-01 1.1270411e+00 4.0002221e-01 2.0175565e+00 1.1056693e+00 1.9099615e+00 1.6003257e+00 1.8055799e+00 2.6186105e+00 1.2017042e+00 2.3090806e+00 1.8007233e+00 2.1233216e+00 1.1144002e+00 1.3025622e+00 1.5097103e+00 1.0216374e+00 1.2396937e+00 1.3452695e+00 1.5005647e+00 2.7241212e+00 2.9166918e+00 1.0087396e+00 1.7168636e+00 9.3733552e-01 2.7221286e+00 9.0508756e-01 1.7046111e+00 2.0107590e+00 8.0879701e-01 9.0508712e-01 1.6049314e+00 1.8174378e+00 2.1221146e+00 2.4750525e+00 1.6096791e+00 1.1000193e+00 1.6000016e+00 2.1732693e+00 1.6308665e+00 1.5004872e+00 8.0883916e-01 1.4182493e+00 1.6308803e+00 1.2094550e+00 1.1056693e+00 1.9088565e+00 1.7377460e+00 1.2661852e+00 1.0088926e+00 1.2092662e+00 1.4340155e+00 1.1019692e+00 3.4342562e-01 6.0964891e-01 5.6595908e-01 5.0437695e-01 5.2167829e-01 4.5783248e-01 1.4023777e+00 1.1286018e+00 1.2201578e+00 1.0032443e+00 3.0922892e-01 9.0642679e-01 9.0166476e-01 6.0964597e-01 5.0084481e-01 8.6054545e-01 9.6574336e-01 8.0923926e-01 5.0477564e-01 9.0532093e-01 1.6742781e+00 7.8895472e-01 7.5564478e-01 7.4618926e-01 6.0964891e-01 1.9222003e+00 8.2462252e-01 1.2093908e+00 5.2201750e-01 1.0522594e+00 7.0548138e-01 9.3735629e-01 1.7578497e+00 1.4001717e+00 1.4326118e+00 9.0166431e-01 1.3681502e+00 7.1636719e-01 4.5148429e-01 7.1183012e-01 6.3164977e-01 9.0534502e-01 8.5583415e-01 6.3322667e-01 1.9047821e+00 2.0429861e+00 3.3813251e-01 9.4634218e-01 7.1700774e-01 1.8662975e+00 3.0474106e-01 9.1695534e-01 1.1636098e+00 3.3818226e-01 5.0476836e-01 7.4329527e-01 1.0171202e+00 1.3020942e+00 1.8013674e+00 7.8935898e-01 3.0474106e-01 7.0008735e-01 1.4927071e+00 1.0336860e+00 6.7720957e-01 5.0855778e-01 7.3895268e-01 9.4622126e-01 8.4540285e-01 5.2201750e-01 1.0621172e+00 1.0788651e+00 8.1156529e-01 4.0002221e-01 5.6618864e-01 9.6935134e-01 5.2524663e-01 4.1212852e-01 5.0517282e-01 7.0008584e-01 6.3322667e-01 3.0490481e-01 1.2003660e+00 9.1552373e-01 1.0095513e+00 8.0046685e-01 4.5080200e-01 7.0105084e-01 6.0964891e-01 6.0365948e-01 5.0477564e-01 6.3178782e-01 7.4329527e-01 6.0201716e-01 2.2573593e-01 7.0096708e-01 1.4548054e+00 5.6347978e-01 5.2167208e-01 5.2133802e-01 4.0006662e-01 1.7132643e+00 6.0948506e-01 1.4655221e+00 7.0548283e-01 1.2921474e+00 9.1424701e-01 1.1900342e+00 1.9791159e+00 1.2013591e+00 1.6491682e+00 1.1111057e+00 1.5689626e+00 8.0726668e-01 7.4329527e-01 9.8998705e-01 8.0337471e-01 1.2004198e+00 1.1061923e+00 8.2635069e-01 2.0953157e+00 2.2622460e+00 6.0366256e-01 1.2097311e+00 8.0883841e-01 2.0866883e+00 6.0035621e-01 1.0858512e+00 1.3762609e+00 6.0000635e-01 6.0035305e-01 1.0143975e+00 1.2399444e+00 1.5291965e+00 1.9842703e+00 1.0777305e+00 4.1315633e-01 9.0005048e-01 1.7303039e+00 1.2394744e+00 8.2498722e-01 6.0018299e-01 9.9013884e-01 1.2395260e+00 1.1286911e+00 7.0548283e-01 1.3081175e+00 1.3479052e+00 1.1074834e+00 7.0184453e-01 8.1117067e-01 1.1186499e+00 6.0980961e-01 2.0181667e-01 5.2133802e-01 7.0548283e-01 4.0243965e-01 8.5471446e-01 9.1001664e-01 9.1916394e-01 6.0964891e-01 8.0296037e-01 1.0000307e+00 5.2524663e-01 4.1420960e-01 6.0000635e-01 8.0004523e-01 9.0166431e-01 9.0026588e-01 3.3818226e-01 6.0366256e-01 1.4340438e+00 8.0004523e-01 7.0000454e-01 7.0000151e-01 2.0000000e-01 1.4651632e+00 7.0008584e-01 1.7369589e+00 8.4540285e-01 1.6071563e+00 1.3008770e+00 1.5130871e+00 2.3098753e+00 1.5002444e+00 2.0034559e+00 1.5005854e+00 1.8322392e+00 8.5437498e-01 1.0087393e+00 1.2192920e+00 8.4786353e-01 1.1330694e+00 1.1270325e+00 1.2012867e+00 2.4144060e+00 2.6097166e+00 7.9153339e-01 1.4330116e+00 8.7209348e-01 2.4119960e+00 6.3178782e-01 1.4094452e+00 1.7039341e+00 5.6371422e-01 6.3309258e-01 1.3130937e+00 1.5066999e+00 1.8106410e+00 2.1515742e+00 1.3253458e+00 8.0004602e-01 1.3000908e+00 1.8533295e+00 1.3748188e+00 1.2012928e+00 5.7609230e-01 1.1298636e+00 1.3741846e+00 1.0451812e+00 8.4540285e-01 1.6176927e+00 1.4854079e+00 1.0777307e+00 7.4612830e-01 9.3306807e-01 1.1910068e+00 8.1715665e-01 4.0243965e-01 6.0184622e-01 6.0000952e-01 1.0158274e+00 1.1111057e+00 1.1191444e+00 8.0928056e-01 7.4335736e-01 1.2000002e+00 6.0964891e-01 3.0026460e-01 7.0088477e-01 1.0001601e+00 1.1024820e+00 1.1005458e+00 5.0042326e-01 8.0492246e-01 1.6321742e+00 1.0001753e+00 9.0005048e-01 9.0002615e-01 4.0006662e-01 1.6390068e+00 9.0029064e-01 1.6300430e+00 8.6084272e-01 1.5035329e+00 1.2004200e+00 1.4092511e+00 2.2043907e+00 1.7002548e+00 1.9010379e+00 1.4007831e+00 1.7240342e+00 7.4269314e-01 9.0534502e-01 1.1133984e+00 9.3184922e-01 1.0597992e+00 1.0142766e+00 1.1005364e+00 2.3071806e+00 2.5048249e+00 8.4536936e-01 1.3253910e+00 1.0116865e+00 2.3056305e+00 5.2838320e-01 1.3061582e+00 1.6010223e+00 4.8391482e-01 5.7608844e-01 1.2089313e+00 1.4017695e+00 1.7039229e+00 2.0293124e+00 1.2189760e+00 7.0096858e-01 1.2016380e+00 1.7295384e+00 1.2636227e+00 1.1005460e+00 6.1830489e-01 1.0208709e+00 1.2632948e+00 9.3329055e-01 8.6084272e-01 1.5130912e+00 1.3741813e+00 9.6572569e-01 6.5832080e-01 8.2418071e-01 1.0788007e+00 7.9148662e-01 3.0922892e-01 8.0046764e-01 1.3743342e+00 1.3452695e+00 1.3745152e+00 1.0776296e+00 8.0051115e-01 1.4000349e+00 8.2462252e-01 3.0026460e-01 5.7609230e-01 1.2089253e+00 1.3131370e+00 1.3002493e+00 7.0016860e-01 1.0426760e+00 1.8951252e+00 1.2036864e+00 1.1055892e+00 1.1055707e+00 6.3165225e-01 1.9760099e+00 1.1133895e+00 1.3035495e+00 1.0032296e+00 1.1134939e+00 8.1112984e-01 1.0427944e+00 1.8040883e+00 1.9000220e+00 1.5005626e+00 1.0010060e+00 1.3844234e+00 6.1288055e-01 5.7609230e-01 7.8890721e-01 1.1056785e+00 1.1270325e+00 9.0778124e-01 7.0556260e-01 1.9141294e+00 2.1052841e+00 8.2421923e-01 1.0150395e+00 1.2036863e+00 1.9046783e+00 5.2133802e-01 9.3733589e-01 1.2010584e+00 6.0948212e-01 7.0470867e-01 8.5583357e-01 1.0008768e+00 1.3033860e+00 1.6469593e+00 9.0294373e-01 5.0436965e-01 8.5406616e-01 1.3485619e+00 1.0522594e+00 7.0993998e-01 8.0250123e-01 7.4329527e-01 1.0427822e+00 9.0053003e-01 1.0032296e+00 1.1531951e+00 1.1543259e+00 9.0142681e-01 5.6618864e-01 6.1135434e-01 9.3984267e-01 9.0168933e-01 7.1629303e-01 1.5266891e+00 1.3564850e+00 1.4198077e+00 1.1544060e+00 7.0088627e-01 1.3008812e+00 7.2036951e-01 3.0482299e-01 7.4954884e-01 1.1531951e+00 1.2645755e+00 1.2053003e+00 6.1119267e-01 1.0803561e+00 1.9177201e+00 1.1286911e+00 1.0451689e+00 1.0433444e+00 7.2036951e-01 2.0856547e+00 1.0782211e+00 1.0434746e+00 9.0029064e-01 9.0279223e-01 6.0948800e-01 8.0883841e-01 1.6097492e+00 1.8003682e+00 1.3025173e+00 8.0879701e-01 1.1347620e+00 3.0922892e-01 3.6452132e-01 5.2133179e-01 1.0032293e+00 9.3308891e-01 6.0383105e-01 5.0043084e-01 1.7170314e+00 1.9082779e+00 8.5406616e-01 7.4275547e-01 1.1001110e+00 1.7132654e+00 4.1212852e-01 7.0548138e-01 1.0030871e+00 5.0085236e-01 6.0000635e-01 6.1135434e-01 8.0879701e-01 1.1134075e+00 1.4922778e+00 6.3322667e-01 4.0246123e-01 6.8261201e-01 1.1935004e+00 7.4954884e-01 5.0437695e-01 7.0008584e-01 4.5148429e-01 7.4262964e-01 6.0018299e-01 9.0029064e-01 9.1424701e-01 8.5437440e-01 6.0017665e-01 5.2167208e-01 3.0915245e-01 6.4620889e-01 8.0000160e-01 1.0033867e+00 7.3461436e-01 8.2512420e-01 6.0219099e-01 6.0017982e-01 6.0000317e-01 5.0000761e-01 7.0016860e-01 6.0202028e-01 4.5148429e-01 5.7630313e-01 5.0855778e-01 1.2699992e-01 5.0894102e-01 1.2671752e+00 4.1420960e-01 3.6259865e-01 3.4080442e-01 2.4170870e-01 1.5133193e+00 4.1317535e-01 1.5238388e+00 6.0980961e-01 1.4557632e+00 1.1002023e+00 1.3069713e+00 2.1693127e+00 1.1005458e+00 1.8443124e+00 1.3063934e+00 1.6655594e+00 6.5832080e-01 8.0492246e-01 1.0498228e+00 5.7832449e-01 9.1424701e-01 9.0320459e-01 1.0032296e+00 2.2802814e+00 2.4537354e+00 7.1621613e-01 1.2528590e+00 5.3914287e-01 2.2781292e+00 4.2362917e-01 1.2128138e+00 1.5640141e+00 3.4085233e-01 4.1212852e-01 1.1060939e+00 1.4140458e+00 1.7081323e+00 2.1436849e+00 1.1138955e+00 6.0184622e-01 1.1001015e+00 1.8659091e+00 1.1544060e+00 1.0010209e+00 3.3813251e-01 1.0224270e+00 1.1635398e+00 9.7600992e-01 6.0980961e-01 1.4182493e+00 1.2705641e+00 8.9538275e-01 5.4219811e-01 7.3084171e-01 9.6936870e-01 6.0184934e-01 3.0922892e-01 2.4170870e-01 4.0127250e-01 1.6009488e+00 1.0040629e+00 1.0499492e+00 1.2653025e+00 9.1471442e-01 6.1119558e-01 5.0477564e-01 9.0005048e-01 1.1016049e+00 5.0043084e-01 7.0096708e-01 7.0088627e-01 7.0470720e-01 7.0176121e-01 8.0967961e-01 6.3165225e-01 6.0201716e-01 2.5221737e+00 1.6096629e+00 2.4221589e+00 2.1015969e+00 2.3098905e+00 3.1317714e+00 1.0597879e+00 2.8188299e+00 2.3040148e+00 2.6373482e+00 1.6231306e+00 1.8068049e+00 2.0210084e+00 1.5237053e+00 1.7080686e+00 1.8459262e+00 2.0034094e+00 3.2387184e+00 3.4286399e+00 1.5005854e+00 2.2285172e+00 1.4324350e+00 3.2358000e+00 1.4109628e+00 2.2110849e+00 2.5224740e+00 1.3139336e+00 1.4095777e+00 2.1090366e+00 2.3323064e+00 2.6377472e+00 2.9966835e+00 2.1144760e+00 1.6012568e+00 2.1000482e+00 2.6968519e+00 2.1346646e+00 2.0025815e+00 1.3133662e+00 1.9351024e+00 2.1377869e+00 1.7137965e+00 1.6096629e+00 2.4161682e+00 2.2434416e+00 1.7684500e+00 1.5143051e+00 1.7168636e+00 1.9368172e+00 1.6050040e+00 1.1269424e-01 3.3818226e-01 1.3017961e+00 7.4612830e-01 1.0262619e+00 1.2443200e+00 8.2421923e-01 6.0202028e-01 2.2573593e-01 6.0017982e-01 8.4572653e-01 3.0922892e-01 5.6347978e-01 4.1317535e-01 6.0964891e-01 5.2201750e-01 7.3090905e-01 8.0245824e-01 4.1420960e-01 2.2297880e+00 1.3131802e+00 2.1734835e+00 1.8042696e+00 2.0169191e+00 2.8857511e+00 7.7598796e-01 2.5607759e+00 2.0181988e+00 2.3909895e+00 1.3770846e+00 1.5195166e+00 1.7689720e+00 1.2362756e+00 1.4651863e+00 1.5800353e+00 1.7155605e+00 3.0010211e+00 3.1712557e+00 1.2016443e+00 1.9735224e+00 1.1531953e+00 2.9937162e+00 1.1401191e+00 1.9335528e+00 2.2793947e+00 1.0403116e+00 1.1237940e+00 1.8155572e+00 2.1170640e+00 2.4166673e+00 2.8369211e+00 1.8227568e+00 1.3135522e+00 1.8005404e+00 2.5443612e+00 1.8557670e+00 1.7105814e+00 1.0313359e+00 1.7226330e+00 1.8708183e+00 1.5881025e+00 1.3131802e+00 2.1363271e+00 1.9752912e+00 1.5530527e+00 1.2366099e+00 1.4501583e+00 1.6655594e+00 1.3088083e+00 3.4342562e-01 1.4024091e+00 8.3183672e-01 1.0522594e+00 1.2712749e+00 8.5437498e-01 6.1119558e-01 3.3813251e-01 7.0016860e-01 9.2867113e-01 3.4342562e-01 5.2133179e-01 5.0855778e-01 6.3192325e-01 5.6618864e-01 7.5564478e-01 7.0462844e-01 4.5847767e-01 2.3345511e+00 1.4181033e+00 2.2624227e+00 1.9044571e+00 2.1188381e+00 2.9740725e+00 8.7209348e-01 2.6511588e+00 2.1151751e+00 2.4832720e+00 1.4692287e+00 1.6190709e+00 1.8603115e+00 1.3450304e+00 1.5778323e+00 1.6856949e+00 1.8133657e+00 3.0879393e+00 3.2627356e+00 1.3017553e+00 2.0678448e+00 1.2635708e+00 3.0810441e+00 1.2366675e+00 2.0307764e+00 2.3657459e+00 1.1404856e+00 1.2257611e+00 1.9176961e+00 2.1957105e+00 2.4980314e+00 2.9055191e+00 1.9262438e+00 1.4100098e+00 1.9004485e+00 2.6116431e+00 1.9609333e+00 1.8095574e+00 1.1347620e+00 1.8037909e+00 1.9725464e+00 1.6581521e+00 1.4181033e+00 2.2355461e+00 2.0784269e+00 1.6462102e+00 1.3373104e+00 1.5468618e+00 1.7698041e+00 1.4111252e+00 1.2003596e+00 6.1288055e-01 7.4618926e-01 9.6691372e-01 5.7609230e-01 3.0922892e-01 3.0490481e-01 5.0436965e-01 7.0184453e-01 1.1269424e-01 8.2635069e-01 3.0482299e-01 3.3813251e-01 3.0490481e-01 4.5148429e-01 9.3308891e-01 2.0181667e-01 2.1221982e+00 1.2089191e+00 2.0305682e+00 1.7009400e+00 1.9088256e+00 2.7434081e+00 9.1894698e-01 2.4265154e+00 1.9046790e+00 2.2453370e+00 1.2284047e+00 1.4060413e+00 1.6267848e+00 1.1281352e+00 1.3523310e+00 1.4562730e+00 1.6032169e+00 2.8519346e+00 3.0369970e+00 1.1020600e+00 1.8342569e+00 1.0426638e+00 2.8491513e+00 1.0116721e+00 1.8114932e+00 2.1335035e+00 9.1552373e-01 1.0090312e+00 1.7079369e+00 1.9522117e+00 2.2566913e+00 2.6406601e+00 1.7139238e+00 1.2013529e+00 1.7000137e+00 2.3442222e+00 1.7386523e+00 1.6019500e+00 9.1449234e-01 1.5517970e+00 1.7435092e+00 1.3757183e+00 1.2089191e+00 2.0167186e+00 1.8496945e+00 1.3938438e+00 1.1152390e+00 1.3189663e+00 1.5429659e+00 1.2037520e+00 6.7720957e-01 7.4262850e-01 7.0911112e-01 7.0633229e-01 1.0011648e+00 1.1020600e+00 7.2036951e-01 5.0477564e-01 1.1005460e+00 1.8106900e+00 9.0166431e-01 9.0192695e-01 9.0055475e-01 8.0055465e-01 2.1027376e+00 1.0003198e+00 1.0225570e+00 3.0474106e-01 1.1299441e+00 5.0517282e-01 7.5564478e-01 1.7513222e+00 1.1055799e+00 1.4140515e+00 7.8895472e-01 1.3181953e+00 5.7608844e-01 4.1315633e-01 8.1156529e-01 4.1317535e-01 8.0004523e-01 7.2044167e-01 5.2524663e-01 1.8787830e+00 1.9768256e+00 5.0001522e-01 9.4912864e-01 4.5148429e-01 1.8635775e+00 3.0915245e-01 7.8611860e-01 1.2373911e+00 3.0922892e-01 3.0922892e-01 5.7609230e-01 1.2089834e+00 1.4324608e+00 1.9471600e+00 6.3912943e-01 3.0017653e-01 5.0043842e-01 1.7149040e+00 8.6084272e-01 4.8391482e-01 3.4080442e-01 9.0668287e-01 8.6225026e-01 9.3424659e-01 3.0474106e-01 9.3861512e-01 9.5646231e-01 7.8935898e-01 3.4085233e-01 5.2491734e-01 7.8940551e-01 3.0482299e-01 6.0948506e-01 1.3000044e+00 9.3308891e-01 4.0243965e-01 5.6371422e-01 4.1212852e-01 7.0000303e-01 5.4219811e-01 1.2105001e+00 3.4342562e-01 3.6256305e-01 3.4085233e-01 8.0008964e-01 1.5005854e+00 4.1420960e-01 1.5358856e+00 6.1990228e-01 1.7849054e+00 1.1528477e+00 1.3788456e+00 2.4261894e+00 5.6370994e-01 2.0884901e+00 1.4655452e+00 1.9390732e+00 1.1074834e+00 1.0434746e+00 1.4340155e+00 6.0605366e-01 9.1554656e-01 1.0782857e+00 1.1897288e+00 2.5394068e+00 2.6516318e+00 8.3183606e-01 1.5694554e+00 5.2201750e-01 2.5386539e+00 9.0192695e-01 1.4157600e+00 1.8949500e+00 8.0097499e-01 7.0548138e-01 1.1935069e+00 1.8443040e+00 2.0853276e+00 2.5816887e+00 1.1989547e+00 9.1424659e-01 1.1138955e+00 2.3468430e+00 1.1963432e+00 1.1270327e+00 6.0365948e-01 1.5143051e+00 1.3938114e+00 1.5079206e+00 6.1990228e-01 1.5829749e+00 1.4450801e+00 1.3189240e+00 9.1132198e-01 1.1152300e+00 1.0159134e+00 6.3309012e-01 7.0096858e-01 1.1002025e+00 4.8852375e-01 9.1024401e-01 8.1112984e-01 4.0127250e-01 8.1117067e-01 1.3486924e+00 7.0633229e-01 4.6440171e-01 5.1257987e-01 5.0517282e-01 1.5260594e+00 6.1288055e-01 1.5131090e+00 7.4335736e-01 1.4549432e+00 1.1020600e+00 1.3036236e+00 2.1691920e+00 1.1527669e+00 1.8444686e+00 1.3309288e+00 1.6567564e+00 6.3925756e-01 8.5617086e-01 1.0458540e+00 9.0668287e-01 8.4540285e-01 8.5586571e-01 1.0039209e+00 2.2782572e+00 2.4540263e+00 1.2012866e+00 1.2440282e+00 6.2656178e-01 2.2782572e+00 7.0556260e-01 1.2101609e+00 1.5639802e+00 6.0219099e-01 4.5148429e-01 1.1079931e+00 1.4142064e+00 1.7087610e+00 2.1412345e+00 1.1115204e+00 6.7720957e-01 1.1281352e+00 1.8646736e+00 1.1282162e+00 1.0010209e+00 4.1315633e-01 1.0172673e+00 1.1401191e+00 9.4532171e-01 7.4335736e-01 1.4134218e+00 1.2440229e+00 8.4786353e-01 9.0557807e-01 7.2440846e-01 9.3308853e-01 6.0964891e-01 8.0296037e-01 1.1055799e+00 1.2124837e+00 1.2014191e+00 6.0000952e-01 9.3755356e-01 1.7874653e+00 1.1024913e+00 1.0032296e+00 1.0031018e+00 5.2201750e-01 1.8640262e+00 1.0088926e+00 1.3452347e+00 9.0417295e-01 1.2040344e+00 9.0168933e-01 1.1133986e+00 1.9046783e+00 1.8005318e+00 1.6009504e+00 1.1056691e+00 1.4335330e+00 5.2167829e-01 6.1990228e-01 8.2418141e-01 1.0118233e+00 1.0151880e+00 8.2458478e-01 8.0051115e-01 2.0076819e+00 2.2050331e+00 9.3329017e-01 1.0426638e+00 1.1020600e+00 2.0062587e+00 4.5847767e-01 1.0087393e+00 1.3009222e+00 5.0855778e-01 6.0202028e-01 9.1471442e-01 1.1019505e+00 1.4044980e+00 1.7386523e+00 9.3351278e-01 4.5783248e-01 9.1892454e-01 1.4407364e+00 1.0151880e+00 8.0093081e-01 7.0088627e-01 7.4269200e-01 1.0142482e+00 8.0250123e-01 9.0417295e-01 1.2189645e+00 1.1269510e+00 8.0879701e-01 6.1990228e-01 5.6371422e-01 8.6084272e-01 8.0291749e-01 7.8935813e-01 8.0250123e-01 8.0046685e-01 7.0017011e-01 5.2491734e-01 1.3741813e+00 7.0470720e-01 7.4269314e-01 6.7626502e-01 6.0000635e-01 1.4852616e+00 6.3309012e-01 1.6636721e+00 7.5826453e-01 1.5161847e+00 1.2049539e+00 1.4220925e+00 2.2191056e+00 1.4001717e+00 1.9083789e+00 1.4007861e+00 1.7945122e+00 9.6133119e-01 9.1552373e-01 1.1416778e+00 7.7603846e-01 1.1170561e+00 1.1351073e+00 1.1152390e+00 2.3540839e+00 2.5167781e+00 6.0202028e-01 1.3687074e+00 8.0713433e-01 2.3222107e+00 5.7608844e-01 1.3563898e+00 1.6193612e+00 5.7609230e-01 7.3090905e-01 1.2201578e+00 1.4221192e+00 1.7236083e+00 2.1360791e+00 1.2373857e+00 7.1629168e-01 1.2000731e+00 1.7962160e+00 1.3755348e+00 1.1298552e+00 7.2113820e-01 1.0843962e+00 1.3150470e+00 1.0664292e+00 7.5826453e-01 1.5362595e+00 1.4451976e+00 1.0604287e+00 6.7626502e-01 8.9540816e-01 1.2552585e+00 8.0073117e-01 5.0001522e-01 4.1212852e-01 5.6347549e-01 4.0127250e-01 8.7240114e-01 3.0008832e-01 1.2085435e-01 1.2085435e-01 6.0017982e-01 1.1038933e+00 2.0061436e-01 1.9231154e+00 1.0088926e+00 1.8971338e+00 1.5035330e+00 1.7143629e+00 2.6071033e+00 7.2440846e-01 2.2781292e+00 1.7231818e+00 2.0998984e+00 1.0923537e+00 1.2224463e+00 1.4922544e+00 9.3733589e-01 1.1896725e+00 1.2782589e+00 1.4186216e+00 2.7177641e+00 2.8857013e+00 9.6674360e-01 1.6882618e+00 8.5406616e-01 2.7167827e+00 8.6084272e-01 1.6345263e+00 2.0058565e+00 7.5508853e-01 8.1715593e-01 1.5132180e+00 1.8635428e+00 2.1554613e+00 2.5926811e+00 1.5194972e+00 1.0207533e+00 1.5005626e+00 2.3165875e+00 1.5429659e+00 1.4098467e+00 7.2036819e-01 1.4724918e+00 1.5757929e+00 1.3838027e+00 1.0088926e+00 1.8378491e+00 1.6745686e+00 1.2948699e+00 9.4912864e-01 1.1635325e+00 1.3473688e+00 1.0032293e+00 4.0004442e-01 6.9509552e-01 3.0017653e-01 7.1708289e-01 2.2573593e-01 5.0085236e-01 4.0243965e-01 7.0548138e-01 1.0008617e+00 3.0482299e-01 2.0206913e+00 1.1056785e+00 2.0075255e+00 1.6053217e+00 1.8156855e+00 2.7170183e+00 6.3912709e-01 2.3873965e+00 1.8286172e+00 2.2132187e+00 1.2079042e+00 1.3276450e+00 1.6018644e+00 1.0207396e+00 1.2397507e+00 1.3715471e+00 1.5245240e+00 2.8327619e+00 2.9941199e+00 1.0032443e+00 1.7962160e+00 9.3329055e-01 2.8269181e+00 9.6936870e-01 1.7435156e+00 2.1174156e+00 8.6084272e-01 9.2351241e-01 1.6144550e+00 1.9761215e+00 2.2674248e+00 2.7114616e+00 1.6190709e+00 1.1282247e+00 1.6009322e+00 2.4285500e+00 1.6432478e+00 1.5147271e+00 8.2512420e-01 1.5839544e+00 1.6753044e+00 1.4829434e+00 1.1056785e+00 1.9427965e+00 1.7734131e+00 1.3908238e+00 1.0498226e+00 1.2712749e+00 1.4523130e+00 1.1044111e+00 6.0980961e-01 4.1209001e-01 1.1020600e+00 2.0181667e-01 4.0243965e-01 3.0922892e-01 7.0088627e-01 1.4001688e+00 3.0922892e-01 1.6797901e+00 7.8935898e-01 1.7574606e+00 1.2224463e+00 1.4618181e+00 2.4274025e+00 6.3165225e-01 2.0887499e+00 1.4865883e+00 1.9561612e+00 1.0664292e+00 1.0336863e+00 1.3939836e+00 8.2421923e-01 1.2089895e+00 1.1997296e+00 1.1938630e+00 2.5462062e+00 2.6763758e+00 6.4049114e-01 1.5645185e+00 8.0883916e-01 2.5391583e+00 8.3183672e-01 1.4351453e+00 1.8644317e+00 7.4618926e-01 6.9987517e-01 1.2680580e+00 1.7844580e+00 2.0440071e+00 2.5329067e+00 1.2921474e+00 8.5440680e-01 1.2036924e+00 2.2838099e+00 1.3737025e+00 1.1587585e+00 6.4620889e-01 1.4491800e+00 1.4507713e+00 1.4583848e+00 7.8935898e-01 1.6277433e+00 1.5384791e+00 1.3150470e+00 8.7209348e-01 1.0788651e+00 1.2179890e+00 7.4954884e-01 6.1135434e-01 1.3790270e+00 5.2491734e-01 4.5147187e-01 4.5080200e-01 3.0026460e-01 1.6179159e+00 5.2167829e-01 1.4543196e+00 5.6838732e-01 1.3502290e+00 1.0008620e+00 1.2192919e+00 2.0611274e+00 1.2013529e+00 1.7369589e+00 1.2053003e+00 1.5767956e+00 6.3925756e-01 7.1779518e-01 9.6131279e-01 6.4620889e-01 1.0032443e+00 9.3331138e-01 9.0279223e-01 2.1713885e+00 2.3476282e+00 8.0245903e-01 1.1755517e+00 6.3322667e-01 2.1693131e+00 4.2362917e-01 1.1187430e+00 1.4544336e+00 4.0246123e-01 4.1209001e-01 1.0208844e+00 1.3018144e+00 1.5969056e+00 2.0303761e+00 1.0427944e+00 5.0085236e-01 1.0008465e+00 1.7574039e+00 1.1274284e+00 9.0166476e-01 4.0125062e-01 9.3437551e-01 1.1319099e+00 9.6935134e-01 5.6838732e-01 1.3309288e+00 1.2428507e+00 9.2745734e-01 5.7630313e-01 6.8160885e-01 9.6672602e-01 5.2167208e-01 8.5440680e-01 2.2608083e-01 4.0125062e-01 3.0490481e-01 4.2270142e-01 1.0207262e+00 2.0181667e-01 2.0282636e+00 1.1133895e+00 1.9386586e+00 1.6012719e+00 1.8114342e+00 2.6515677e+00 9.0999313e-01 2.3322945e+00 1.8060515e+00 2.1578375e+00 1.1447365e+00 1.3085752e+00 1.5359734e+00 1.0426516e+00 1.3018144e+00 1.3779960e+00 1.5044724e+00 2.7627283e+00 2.9432651e+00 1.0010209e+00 1.7447170e+00 9.6576136e-01 2.7579768e+00 9.1892454e-01 1.7159977e+00 2.0420308e+00 8.2635069e-01 9.1576742e-01 1.6105700e+00 1.8662195e+00 2.1696258e+00 2.5680120e+00 1.6184785e+00 1.1020600e+00 1.6000183e+00 2.2731185e+00 1.6529028e+00 1.5029725e+00 8.2635069e-01 1.4699000e+00 1.6570292e+00 1.3301857e+00 1.1133895e+00 1.9214944e+00 1.7646791e+00 1.3272142e+00 1.0235120e+00 1.2275665e+00 1.4621584e+00 1.1060939e+00 9.1576742e-01 9.6133119e-01 9.4532171e-01 1.2662318e+00 3.0490481e-01 8.6084272e-01 2.7230933e+00 1.8083405e+00 2.7192500e+00 2.3152780e+00 2.5278895e+00 3.4307167e+00 1.2089253e+00 3.1023107e+00 2.5446557e+00 2.9238544e+00 1.9071235e+00 2.0445123e+00 2.3113306e+00 1.7148932e+00 1.8686471e+00 2.0692591e+00 2.2408459e+00 3.5448121e+00 3.7102716e+00 1.7134856e+00 2.5076804e+00 1.6187837e+00 3.5398901e+00 1.6780493e+00 2.4594169e+00 2.8277283e+00 1.5698091e+00 1.6365650e+00 2.3270565e+00 2.6739856e+00 2.9710335e+00 3.3954286e+00 2.3304578e+00 1.8446316e+00 2.3054869e+00 3.1050823e+00 2.3405162e+00 2.2288004e+00 1.5327217e+00 2.2740189e+00 2.3840998e+00 2.1122339e+00 1.8083405e+00 2.6588338e+00 2.4791402e+00 2.0688078e+00 1.7632513e+00 1.9828965e+00 2.1428811e+00 1.8095574e+00 3.0017653e-01 2.0061436e-01 6.0017982e-01 1.2012991e+00 1.2085435e-01 1.8301371e+00 9.1424659e-01 1.8223693e+00 1.4049093e+00 1.6190709e+00 2.5271432e+00 7.0556260e-01 2.1953731e+00 1.6303102e+00 2.0261311e+00 1.0363096e+00 1.1330692e+00 1.4229011e+00 8.5406674e-01 1.1527746e+00 1.2106302e+00 1.3261864e+00 2.6410980e+00 2.8004332e+00 8.1117067e-01 1.6146557e+00 7.8886054e-01 2.6375763e+00 7.9824795e-01 1.5471213e+00 1.9317133e+00 6.9509552e-01 7.3155911e-01 1.4182194e+00 1.8031267e+00 2.0887452e+00 2.5422790e+00 1.4267527e+00 9.3308891e-01 1.4006149e+00 2.2706274e+00 1.4614246e+00 1.3141581e+00 6.4049114e-01 1.4230277e+00 1.5004249e+00 1.3669148e+00 9.1424659e-01 1.7490906e+00 1.5981930e+00 1.2553121e+00 8.7212232e-01 1.0924484e+00 1.2731059e+00 9.0557807e-01 1.1269424e-01 5.0002283e-01 1.2049541e+00 2.0121983e-01 1.8447840e+00 9.3329055e-01 1.7901165e+00 1.4035225e+00 1.6222582e+00 2.4980210e+00 8.1757693e-01 2.1693127e+00 1.6187837e+00 2.0049100e+00 1.0151397e+00 1.1261381e+00 1.3938113e+00 9.0657539e-01 1.2362756e+00 1.2472959e+00 1.3154932e+00 2.6088344e+00 2.7785257e+00 9.0207914e-01 1.5972548e+00 8.5406674e-01 2.6071034e+00 7.7652636e-01 1.5358856e+00 1.8953567e+00 6.9518117e-01 7.4612718e-01 1.4220925e+00 1.7511588e+00 2.0440071e+00 2.4804818e+00 1.4362913e+00 9.1449234e-01 1.4003402e+00 2.2077377e+00 1.4867147e+00 1.3085752e+00 6.7720780e-01 1.3734975e+00 1.5100598e+00 1.3269962e+00 9.3329055e-01 1.7441015e+00 1.6143613e+00 1.2552584e+00 8.7796615e-01 1.0782751e+00 1.3029195e+00 9.1424659e-01 5.0000761e-01 1.2040406e+00 1.1269424e-01 1.8289847e+00 9.1424701e-01 1.7872748e+00 1.4023776e+00 1.6144390e+00 2.4974488e+00 8.0533198e-01 2.1691714e+00 1.6179842e+00 1.9948417e+00 9.9013884e-01 1.1186586e+00 1.3842454e+00 8.5583357e-01 1.1527671e+00 1.1990152e+00 1.3139296e+00 2.6085083e+00 2.7775771e+00 8.5440680e-01 1.5835478e+00 7.8886139e-01 2.6068470e+00 7.5508853e-01 1.5300146e+00 1.8950932e+00 6.5712813e-01 7.2036951e-01 1.4134189e+00 1.7511120e+00 2.0435901e+00 2.4807480e+00 1.4220898e+00 9.1424701e-01 1.4002005e+00 2.2048860e+00 1.4562730e+00 1.3069754e+00 6.3309258e-01 1.3632211e+00 1.4815986e+00 1.2921474e+00 9.1424701e-01 1.7351894e+00 1.5836236e+00 1.2085436e+00 8.4725834e-01 1.0597879e+00 1.2653025e+00 9.0508756e-01 1.3743342e+00 5.0043084e-01 1.7369589e+00 8.2635069e-01 1.6144390e+00 1.3008770e+00 1.5131090e+00 2.3226028e+00 1.3004854e+00 2.0107294e+00 1.5010034e+00 1.8390199e+00 8.5471446e-01 1.0087539e+00 1.2223855e+00 8.0073117e-01 1.1286018e+00 1.1270411e+00 1.2013529e+00 2.4290388e+00 2.6198428e+00 7.8895472e-01 1.4363167e+00 7.7598796e-01 2.4266979e+00 6.3178782e-01 1.4100098e+00 1.7134977e+00 5.6347549e-01 6.3165225e-01 1.3130978e+00 1.5237265e+00 1.8289379e+00 2.1979419e+00 1.3253497e+00 8.0004602e-01 1.3000455e+00 1.9028805e+00 1.3748188e+00 1.2012991e+00 5.6371422e-01 1.1400420e+00 1.3748220e+00 1.0597992e+00 8.2635069e-01 1.6184929e+00 1.4858469e+00 1.0797702e+00 7.4612830e-01 9.3329055e-01 1.1910002e+00 8.0923926e-01 1.1056785e+00 3.0089448e+00 2.1019570e+00 2.9563162e+00 2.6052626e+00 2.8107271e+00 3.6717374e+00 1.5012719e+00 3.3522198e+00 2.8186527e+00 3.1596417e+00 2.1362161e+00 2.3151198e+00 2.5461024e+00 2.0036629e+00 2.1224665e+00 2.3234228e+00 2.5150159e+00 3.7801779e+00 3.9623034e+00 2.0033816e+00 2.7467405e+00 1.9044216e+00 3.7784933e+00 1.9231092e+00 2.7237438e+00 3.0623796e+00 1.8186729e+00 1.9089573e+00 2.6097166e+00 2.8846684e+00 3.1885494e+00 3.5706709e+00 2.6109888e+00 2.1139043e+00 2.6017555e+00 3.2706949e+00 2.6138780e+00 2.5099937e+00 1.8069857e+00 2.4748863e+00 2.6338874e+00 2.2385678e+00 2.1019570e+00 2.9251726e+00 2.7321685e+00 2.2661994e+00 2.0190735e+00 2.2288464e+00 2.4131370e+00 2.1020441e+00 1.9226845e+00 1.0087252e+00 1.8684939e+00 1.5017097e+00 1.7108631e+00 2.5816562e+00 8.0533198e-01 2.2563154e+00 1.7134977e+00 2.0770423e+00 1.0604287e+00 1.2124777e+00 1.4620239e+00 9.3329017e-01 1.1896594e+00 1.2705641e+00 1.4098496e+00 2.6923501e+00 2.8659663e+00 9.1449234e-01 1.6637458e+00 8.5403428e-01 2.6902417e+00 8.3183672e-01 1.6225614e+00 1.9757175e+00 7.3084048e-01 8.1117067e-01 1.5097082e+00 1.8197097e+00 2.1169795e+00 2.5408329e+00 1.5160570e+00 1.0087393e+00 1.5001233e+00 2.2573596e+00 1.5423651e+00 1.4049376e+00 7.1708289e-01 1.4229011e+00 1.5611429e+00 1.3142952e+00 1.0087252e+00 1.8271493e+00 1.6639408e+00 1.2552635e+00 9.2768675e-01 1.1400420e+00 1.3479052e+00 1.0031018e+00 9.3184922e-01 8.0291749e-01 7.0910969e-01 3.4342562e-01 1.3028005e+00 1.6474201e+00 1.0216374e+00 8.5586571e-01 9.0026543e-01 9.0508756e-01 7.7598796e-01 5.7832449e-01 1.0522594e+00 9.0999313e-01 7.0008735e-01 7.1708289e-01 1.4049376e+00 1.4220925e+00 1.2553121e+00 6.0202028e-01 1.1170561e+00 1.4055109e+00 1.1186497e+00 4.5783248e-01 9.3306769e-01 1.2101609e+00 1.1134939e+00 5.3914287e-01 1.0144117e+00 1.1074742e+00 1.6007365e+00 5.2491734e-01 1.0797700e+00 1.1139044e+00 1.4000349e+00 4.0004442e-01 7.1629303e-01 1.2090477e+00 6.8170466e-01 4.5148429e-01 9.1427000e-01 9.3184922e-01 5.0043842e-01 4.1209001e-01 8.0296037e-01 1.0498226e+00 8.0928056e-01 6.0018299e-01 9.3446811e-01 1.3131410e+00 5.6371422e-01 7.8985507e-01 1.8949500e+00 9.1427000e-01 1.5639785e+00 9.3308891e-01 1.4501583e+00 7.1621748e-01 6.0017665e-01 1.0010209e+00 2.0181667e-01 5.0000761e-01 6.3925756e-01 7.0548283e-01 2.0162299e+00 2.0885102e+00 5.2167829e-01 1.1079931e+00 2.2608083e-01 2.0057464e+00 5.0043084e-01 9.2747919e-01 1.4186217e+00 4.1212852e-01 3.4085233e-01 6.3178782e-01 1.4043632e+00 1.6175925e+00 2.1298991e+00 6.3309258e-01 5.2133179e-01 5.6595908e-01 1.9078843e+00 7.4418186e-01 6.1830764e-01 3.4085233e-01 1.1006468e+00 9.1132198e-01 1.1010711e+00 0.0000000e+00 1.0458540e+00 9.3984267e-01 9.0166476e-01 5.0043084e-01 7.0088627e-01 7.0993998e-01 3.0017653e-01 8.0093160e-01 6.0000635e-01 7.1621613e-01 2.2268632e+00 4.1317535e-01 5.2491734e-01 6.0964891e-01 8.2421923e-01 7.4335736e-01 4.1209001e-01 1.4186217e+00 1.3131410e+00 7.4275547e-01 6.1119267e-01 9.1566538e-01 1.0095513e+00 1.1796101e+00 2.5399984e-01 1.5237074e+00 8.2421923e-01 1.0429127e+00 4.1315633e-01 3.0490481e-01 1.1528553e+00 1.1270325e+00 7.0096708e-01 5.0001522e-01 3.1328089e-01 9.0657583e-01 7.0096858e-01 9.1568820e-01 1.0216374e+00 6.0035305e-01 8.0337471e-01 7.0548283e-01 1.2396937e+00 5.0043084e-01 4.2270142e-01 8.0008964e-01 1.3131410e+00 3.0915245e-01 4.5847767e-01 7.0470720e-01 9.6936870e-01 7.4262964e-01 9.0645118e-01 1.2190260e+00 4.0246123e-01 1.3450652e+00 1.4544312e+00 1.0207258e+00 4.5147187e-01 9.6501813e-01 5.0517282e-01 3.0490481e-01 5.0437695e-01 6.8170466e-01 6.5712813e-01 5.0855778e-01 2.0121983e-01 1.4695463e+00 1.5267750e+00 7.4395693e-01 6.3309258e-01 7.8890806e-01 1.4542932e+00 7.0008432e-01 4.5784410e-01 9.0166431e-01 8.0000160e-01 7.0008584e-01 3.0017653e-01 9.0005094e-01 1.1019505e+00 1.6144405e+00 4.0004442e-01 5.0436965e-01 4.1315633e-01 1.4012283e+00 6.3164729e-01 2.0121983e-01 8.0046685e-01 6.0219099e-01 6.0964597e-01 6.5724028e-01 5.6371422e-01 5.6838732e-01 7.0911112e-01 5.3914287e-01 6.0948506e-01 4.0246123e-01 5.6371422e-01 5.2133179e-01 1.1281267e+00 1.6745375e+00 8.1112984e-01 5.2167208e-01 7.4395693e-01 7.0016860e-01 5.0855778e-01 3.3813251e-01 9.0659977e-01 7.8895472e-01 5.0043842e-01 4.1209001e-01 1.2528048e+00 1.3020492e+00 9.3861512e-01 4.0127250e-01 1.0142766e+00 1.2362810e+00 9.0168933e-01 3.0490481e-01 7.0478886e-01 1.0010209e+00 9.0279223e-01 2.2608083e-01 7.4262850e-01 9.0055475e-01 1.4109657e+00 2.2573593e-01 7.8895472e-01 8.0492246e-01 1.2000668e+00 4.0363334e-01 4.1212852e-01 1.0039060e+00 4.5080200e-01 2.4195741e-01 7.0462844e-01 7.8985507e-01 3.0490481e-01 3.4085233e-01 6.0017982e-01 8.0928056e-01 6.0017665e-01 4.5784410e-01 7.4612718e-01 2.7992301e+00 3.6259865e-01 9.6953662e-01 6.4620889e-01 1.5401330e+00 1.4140789e+00 1.1281265e+00 2.0058560e+00 1.8949500e+00 1.4140515e+00 1.2396937e+00 8.0000239e-01 4.1317535e-01 1.8064092e+00 9.3310976e-01 2.1167364e+00 2.0181667e-01 1.7570696e+00 1.0143975e+00 6.1135434e-01 1.8661434e+00 1.8197089e+00 1.2632995e+00 8.1112909e-01 5.0126466e-01 8.0051115e-01 1.2632996e+00 1.5975200e+00 1.5266891e+00 5.0043084e-01 1.3452695e+00 1.3018553e+00 1.9314575e+00 1.2089192e+00 1.0777307e+00 1.5030978e+00 1.8949500e+00 8.5409862e-01 1.0151880e+00 1.4180463e+00 1.6742781e+00 1.4542907e+00 1.4853863e+00 1.8197089e+00 2.4725511e+00 1.8443040e+00 2.3518103e+00 1.6032169e+00 1.5066818e+00 1.9080163e+00 8.0923851e-01 9.4532171e-01 1.5109394e+00 1.6179000e+00 2.9132779e+00 2.9705619e+00 1.1020600e+00 2.0185049e+00 7.0633229e-01 2.9085831e+00 1.4001717e+00 1.8310931e+00 2.3325127e+00 1.3000908e+00 1.2016381e+00 1.5402579e+00 2.3143334e+00 2.5314248e+00 3.0393618e+00 1.5405402e+00 1.4018011e+00 1.3018553e+00 2.8185850e+00 1.4728279e+00 1.5248833e+00 1.1020506e+00 2.0036931e+00 1.8191683e+00 2.0009584e+00 9.1427000e-01 1.9534067e+00 1.8336293e+00 1.8020058e+00 1.4006178e+00 1.6026130e+00 1.3506710e+00 1.0116724e+00 6.3912709e-01 7.8890806e-01 1.2190319e+00 1.0776296e+00 8.0923926e-01 1.6740888e+00 1.5650163e+00 1.0798806e+00 9.0155438e-01 9.0417295e-01 6.4049114e-01 1.4685147e+00 6.4049114e-01 1.7843537e+00 4.5148429e-01 1.4324350e+00 6.8261201e-01 3.3813251e-01 1.5401311e+00 1.4852570e+00 9.3329055e-01 5.0043842e-01 2.0181667e-01 9.1424701e-01 9.3424697e-01 1.2633467e+00 1.2093243e+00 5.2167829e-01 1.0313359e+00 9.6574336e-01 1.5965767e+00 9.0168933e-01 7.7603846e-01 1.2016443e+00 1.5639785e+00 5.7832449e-01 7.7882758e-01 1.1074742e+00 1.3452311e+00 1.1281352e+00 1.1558746e+00 1.4852570e+00 1.1153247e+00 7.8895472e-01 5.0477564e-01 5.0855778e-01 1.0426636e+00 9.4532171e-01 7.3155911e-01 5.0476836e-01 1.3669148e+00 1.1910003e+00 8.5471446e-01 7.1629303e-01 1.1528553e+00 1.0777411e+00 9.0142636e-01 8.0046685e-01 7.1629168e-01 1.0032293e+00 9.1892413e-01 3.6452132e-01 5.6370994e-01 7.0176271e-01 1.4157327e+00 4.2362917e-01 7.0633229e-01 6.0964891e-01 1.0062544e+00 9.1554656e-01 6.0365948e-01 1.0235118e+00 6.1135434e-01 6.7626502e-01 7.5508853e-01 9.3308891e-01 7.1621884e-01 8.5403428e-01 6.5712608e-01 8.0245824e-01 6.3192325e-01 9.1132198e-01 8.6051414e-01 1.0242692e+00 1.0232576e+00 6.8685125e-01 1.5761415e+00 1.4407364e+00 9.0296858e-01 8.3689956e-01 6.3322667e-01 1.0451812e+00 1.5490134e+00 4.5847767e-01 1.6529028e+00 8.3916809e-01 1.2749306e+00 5.4219811e-01 7.0462697e-01 1.3611955e+00 1.3103292e+00 9.0792879e-01 9.1446896e-01 8.2421853e-01 7.1708289e-01 9.0683128e-01 1.1954899e+00 1.2957636e+00 6.3178534e-01 9.0508756e-01 8.7796615e-01 1.4198077e+00 7.2113820e-01 6.0427481e-01 1.0032443e+00 1.4501583e+00 4.5216167e-01 5.2491131e-01 9.1894698e-01 1.2738390e+00 9.4912864e-01 1.0207533e+00 1.3523292e+00 5.0043842e-01 4.1317535e-01 8.5403428e-01 7.0910969e-01 3.0482299e-01 4.0243965e-01 1.6491696e+00 1.8289467e+00 1.0060994e+00 6.1119267e-01 9.0142636e-01 1.6484371e+00 5.0126466e-01 6.0018299e-01 9.3308853e-01 4.2362917e-01 4.0363334e-01 5.2133802e-01 7.9153339e-01 1.0782107e+00 1.5275160e+00 5.2167829e-01 5.2167208e-01 6.9987517e-01 1.2633516e+00 5.2201750e-01 4.0127250e-01 5.0517282e-01 4.1212852e-01 5.2167829e-01 4.1210927e-01 7.1621748e-01 8.0093081e-01 6.3178782e-01 3.0922892e-01 7.0008735e-01 2.0061436e-01 3.6452132e-01 6.0035305e-01 4.1420960e-01 7.0096858e-01 6.3178782e-01 5.2132556e-01 3.0490481e-01 1.5634961e+00 1.6740875e+00 5.4219811e-01 5.8750389e-01 8.0245903e-01 1.5263478e+00 4.0004442e-01 6.1135434e-01 8.6051471e-01 5.0043842e-01 4.2270142e-01 3.0482299e-01 8.0967961e-01 1.0426513e+00 1.5757929e+00 3.3813251e-01 4.0127250e-01 5.0855778e-01 1.3133662e+00 7.1700909e-01 4.0125062e-01 5.2491734e-01 5.2167829e-01 5.2838320e-01 5.3943256e-01 6.0017665e-01 6.4620889e-01 6.8261201e-01 4.2270142e-01 3.0482299e-01 3.0026460e-01 7.0470867e-01 5.0477564e-01 1.1038840e+00 1.0010209e+00 4.0363334e-01 3.3808272e-01 1.2528049e+00 1.4182049e+00 9.2033101e-01 2.4195741e-01 1.2036925e+00 1.2362756e+00 6.3451734e-01 3.0482299e-01 5.2524663e-01 7.4335736e-01 7.4329414e-01 4.0125062e-01 5.2491131e-01 6.7636452e-01 1.1755449e+00 4.0127250e-01 6.3925756e-01 7.9148746e-01 9.1424659e-01 5.2491734e-01 4.1210927e-01 8.5437440e-01 1.2085435e-01 3.0026460e-01 4.0127250e-01 1.0010209e+00 4.0243965e-01 4.1317535e-01 3.0482299e-01 6.0444249e-01 3.3813251e-01 6.0964891e-01 9.0166431e-01 4.1212852e-01 7.8985507e-01 8.1719606e-01 2.1378157e+00 2.2009978e+00 5.0855077e-01 1.2175952e+00 3.0017653e-01 2.1167404e+00 6.0035621e-01 1.0597879e+00 1.5265797e+00 5.0517282e-01 5.2167829e-01 7.4329527e-01 1.5072282e+00 1.7227391e+00 2.2434054e+00 7.4335736e-01 6.3309258e-01 6.8161057e-01 2.0107350e+00 9.2867113e-01 7.5508853e-01 5.0517282e-01 1.2040344e+00 1.0178820e+00 1.2037520e+00 2.0181667e-01 1.1636098e+00 1.0621172e+00 1.0032443e+00 6.0000317e-01 8.0883841e-01 9.0668287e-01 5.0085236e-01 6.0964891e-01 7.4618926e-01 2.0118073e+00 2.0884859e+00 9.1424701e-01 1.1060939e+00 4.0243965e-01 2.0057764e+00 6.3178782e-01 9.1916394e-01 1.4198627e+00 6.1119267e-01 6.0219099e-01 6.3309012e-01 1.4134218e+00 1.6179000e+00 2.1265315e+00 6.3178534e-01 9.0506254e-01 1.0032443e+00 1.9078396e+00 6.5712608e-01 6.8261201e-01 6.0219099e-01 1.1003034e+00 9.0532049e-01 1.1001014e+00 5.0000761e-01 1.0433444e+00 9.1892454e-01 9.0002615e-01 5.6595908e-01 7.0470867e-01 6.1119558e-01 6.0017982e-01 5.0085236e-01 1.5275160e+00 1.6747664e+00 1.0434746e+00 5.2132556e-01 8.0533198e-01 1.5264802e+00 5.7609230e-01 4.1317535e-01 8.6051414e-01 5.7630313e-01 5.2524663e-01 4.1315633e-01 8.6054545e-01 1.0440350e+00 1.5412701e+00 4.1210927e-01 8.0250202e-01 9.1471442e-01 1.3130978e+00 3.0490481e-01 5.0043084e-01 5.7630313e-01 5.0043842e-01 3.3818226e-01 5.0043084e-01 6.3925756e-01 6.0948212e-01 4.1317535e-01 3.0482299e-01 7.0548283e-01 3.0490481e-01 2.2573593e-01 5.6394820e-01 1.3634093e+00 1.4858469e+00 8.1757693e-01 5.2201750e-01 9.1427000e-01 1.3523380e+00 6.0201716e-01 3.4342562e-01 7.1629168e-01 7.0096708e-01 6.0948212e-01 3.0490481e-01 7.0096708e-01 9.1424701e-01 1.4267554e+00 4.0127250e-01 4.1420960e-01 4.8342635e-01 1.2049539e+00 6.0964891e-01 1.1269424e-01 7.1621613e-01 4.1212852e-01 6.0018299e-01 5.3914287e-01 7.0548283e-01 5.2524663e-01 7.0105084e-01 5.0476836e-01 5.6371422e-01 3.0474106e-01 5.2491734e-01 6.0948212e-01 1.2000065e+00 2.0187441e+00 1.0498228e+00 2.2315584e+00 1.0000152e+00 1.8808952e+00 1.1286798e+00 7.5826453e-01 1.9821126e+00 1.9334872e+00 1.4094023e+00 9.7944085e-01 1.0090312e+00 3.0915245e-01 1.4094022e+00 1.7226330e+00 1.6785116e+00 8.2418071e-01 1.4544336e+00 1.4183053e+00 2.0448570e+00 1.3189240e+00 1.1989547e+00 1.6071563e+00 2.0162299e+00 9.7599312e-01 1.1287691e+00 1.5299044e+00 1.8304280e+00 1.5694554e+00 1.5966664e+00 1.9334872e+00 2.0448570e+00 1.2223853e+00 2.3135239e+00 3.0915245e-01 2.0415522e+00 1.2703001e+00 9.2351241e-01 2.1487275e+00 2.0854181e+00 1.4650300e+00 1.1157320e+00 8.0296037e-01 1.2013591e+00 1.4650276e+00 1.8684939e+00 1.6818191e+00 8.0245746e-01 1.5324961e+00 1.5271597e+00 2.1953922e+00 1.5071120e+00 1.3457716e+00 1.8029854e+00 2.0885102e+00 1.0837575e+00 1.2703001e+00 1.7133162e+00 1.9522053e+00 1.7369702e+00 1.6941963e+00 2.0286286e+00 1.1213597e+00 6.3912943e-01 1.9163315e+00 5.0855778e-01 1.1310337e+00 1.3142952e+00 6.0219099e-01 8.0046764e-01 7.2904264e-01 1.2366099e+00 1.4559030e+00 2.0467625e+00 7.7882758e-01 6.0184622e-01 6.0948800e-01 1.7296063e+00 1.2395260e+00 9.0668287e-01 8.0051036e-01 1.0231745e+00 1.0411548e+00 1.0543951e+00 5.2167829e-01 1.1356187e+00 1.2079042e+00 9.3439622e-01 4.2268438e-01 8.1719606e-01 1.2192978e+00 8.0046764e-01 1.3133662e+00 1.0434746e+00 8.3916809e-01 2.2573593e-01 5.0855077e-01 9.3848935e-01 9.0659977e-01 5.2167829e-01 7.0096858e-01 5.5450500e-01 1.0287902e+00 5.2133802e-01 8.4725834e-01 9.7599312e-01 8.0250123e-01 6.0018299e-01 5.6371422e-01 1.0171340e+00 3.0482299e-01 2.0181667e-01 6.0000317e-01 1.1079931e+00 2.0061436e-01 2.2573593e-01 5.0084481e-01 8.1683095e-01 5.2524663e-01 7.0096708e-01 1.0116865e+00 2.2278855e+00 7.0008584e-01 1.1298552e+00 1.6300950e+00 6.0017982e-01 5.0084481e-01 8.5403428e-01 1.6097507e+00 1.8284464e+00 2.3350903e+00 8.5406616e-01 7.1629168e-01 7.5508853e-01 2.1138813e+00 8.1683095e-01 8.2462252e-01 4.0246123e-01 1.3009222e+00 1.1139906e+00 1.3000951e+00 2.2608083e-01 1.2636227e+00 1.1315710e+00 1.1002119e+00 7.0088627e-01 9.0029018e-01 6.9600743e-01 3.1328089e-01 1.8661354e+00 1.1286798e+00 7.2044167e-01 1.9755679e+00 1.9314520e+00 1.3741466e+00 9.0645118e-01 6.0184622e-01 1.0001751e+00 1.3741498e+00 1.7083042e+00 1.6308665e+00 6.0201716e-01 1.4559030e+00 1.4140789e+00 2.0433026e+00 1.3131370e+00 1.1900969e+00 1.6049479e+00 2.0057464e+00 9.6691372e-01 1.1304042e+00 1.5237285e+00 1.7843627e+00 1.5639785e+00 1.5975352e+00 1.9314520e+00 8.2671175e-01 1.1543257e+00 1.2085435e-01 3.0474106e-01 7.0088627e-01 1.0144117e+00 1.3018103e+00 1.7709153e+00 7.0462844e-01 3.0482299e-01 7.0470867e-01 1.4857440e+00 8.1457587e-01 6.0948506e-01 3.3813251e-01 6.4049114e-01 7.4954884e-01 6.3925756e-01 5.0043084e-01 1.0090834e+00 8.7372177e-01 5.2838320e-01 2.0121983e-01 3.4342562e-01 7.3084171e-01 4.1315633e-01 5.0855778e-01 9.1024401e-01 8.2498722e-01 5.0436965e-01 5.6595908e-01 7.2044167e-01 1.2101609e+00 5.0437695e-01 6.9987517e-01 8.1457660e-01 1.0010209e+00 4.1212852e-01 3.4342562e-01 9.3351278e-01 3.0915245e-01 3.0482299e-01 6.0052920e-01 9.2747919e-01 2.2608083e-01 4.0000000e-01 5.0476836e-01 8.5586571e-01 5.0477564e-01 5.0477564e-01 8.2498722e-01 1.2635707e+00 1.2396421e+00 8.0533198e-01 2.4170870e-01 4.0127250e-01 7.4618926e-01 8.0726668e-01 1.0151880e+00 1.1066159e+00 5.6371422e-01 9.1554656e-01 8.0879701e-01 1.3523345e+00 6.0366256e-01 6.3912943e-01 9.0532093e-01 1.4186217e+00 5.2133179e-01 7.1700909e-01 8.1719606e-01 1.0923439e+00 8.5409862e-01 1.0116865e+00 1.3253496e+00 2.0121983e-01 8.0051036e-01 1.1269596e+00 1.4140457e+00 1.8721285e+00 8.0250123e-01 3.3813251e-01 8.0250202e-01 1.5969056e+00 8.4536936e-01 7.0096708e-01 2.2538848e-01 7.4395693e-01 8.3222261e-01 7.1779518e-01 4.1212852e-01 1.1079931e+00 9.4151244e-01 5.7630313e-01 3.0490481e-01 4.1420960e-01 6.9509395e-01 3.4080442e-01 7.0184453e-01 1.1527745e+00 1.4140486e+00 1.8971345e+00 7.0556260e-01 3.1328089e-01 7.0910969e-01 1.6486410e+00 7.4618926e-01 6.0184622e-01 1.1269424e-01 8.0923926e-01 7.7598796e-01 8.0883916e-01 3.4085233e-01 1.0235254e+00 8.7240114e-01 6.3309012e-01 5.0043842e-01 4.1315633e-01 5.7609230e-01 2.2538848e-01 8.0888055e-01 1.0030868e+00 1.5299044e+00 1.0000000e-01 6.3164977e-01 7.0096708e-01 1.3008855e+00 6.0184622e-01 3.3813251e-01 8.0296037e-01 5.0476836e-01 3.6256305e-01 5.6618864e-01 6.3178782e-01 4.5847767e-01 5.2491734e-01 4.1420960e-01 6.0202028e-01 4.0127250e-01 6.0052920e-01 5.6618864e-01 3.4342562e-01 8.7372177e-01 8.2425704e-01 9.3308891e-01 1.1005554e+00 7.1700774e-01 9.6674360e-01 8.0051115e-01 1.2632995e+00 5.2491734e-01 8.0883916e-01 7.8935898e-01 1.4043632e+00 7.0470867e-01 9.0532093e-01 7.5502989e-01 9.6953662e-01 7.4612718e-01 1.0222576e+00 1.3061180e+00 1.0032296e+00 1.0032293e+00 1.1900276e+00 1.3017553e+00 4.1315633e-01 1.1093621e+00 1.0088783e+00 1.5263498e+00 7.1708289e-01 7.3155911e-01 1.0040629e+00 1.6175925e+00 6.1845783e-01 7.5826453e-01 9.3426769e-01 1.2396937e+00 1.0142626e+00 1.2128138e+00 1.5237074e+00 1.5299064e+00 1.6885111e+00 1.8315348e+00 8.0097499e-01 1.6050900e+00 1.5160591e+00 2.0074169e+00 1.1389082e+00 1.2275665e+00 1.3502668e+00 2.1298991e+00 1.1075720e+00 1.2113964e+00 1.3632538e+00 1.7639471e+00 1.4922544e+00 1.7133283e+00 2.0290146e+00 7.1621748e-01 8.0051036e-01 1.3008813e+00 6.0017982e-01 4.1210927e-01 8.0492246e-01 5.0477564e-01 3.4080442e-01 5.6595908e-01 6.3309258e-01 4.5784410e-01 5.0855778e-01 4.1317535e-01 6.0366256e-01 4.0246123e-01 6.0035621e-01 5.7630313e-01 5.0085236e-01 1.4407390e+00 9.1892413e-01 4.2270142e-01 3.6452132e-01 6.7824250e-01 9.0668287e-01 8.2458409e-01 5.2133179e-01 9.0792879e-01 1.0124729e+00 8.0250202e-01 4.1210927e-01 5.0085236e-01 8.2458478e-01 4.1315633e-01 1.6100639e+00 1.0426636e+00 5.2491734e-01 8.0488008e-01 8.6054545e-01 1.0116721e+00 9.7291273e-01 5.6595908e-01 9.4532171e-01 1.1186499e+00 9.1681464e-01 6.3178782e-01 6.2656178e-01 9.6574369e-01 5.3943256e-01 1.4007831e+00 1.3033860e+00 1.7572550e+00 8.5406674e-01 1.0030724e+00 1.0426513e+00 1.9078843e+00 9.0005048e-01 1.0010209e+00 1.0776188e+00 1.4549432e+00 1.2363278e+00 1.5032156e+00 1.8103044e+00 6.0184934e-01 8.2671175e-01 6.0383105e-01 4.1209001e-01 6.3309258e-01 7.4418186e-01 5.0477564e-01 4.0006662e-01 4.8342635e-01 9.1892413e-01 4.8391482e-01 2.0121983e-01 6.4620889e-01 7.0462697e-01 5.0436965e-01 6.0184622e-01 5.7608844e-01 6.1830764e-01 5.3914287e-01 7.0105084e-01 5.0855778e-01 6.3165225e-01 3.0490481e-01 5.0477564e-01 5.2133179e-01 9.1446938e-01 8.7209348e-01 9.0532093e-01 3.4085233e-01 1.1298636e+00 9.6150595e-01 7.2036819e-01 5.0477564e-01 5.2167208e-01 6.3925756e-01 3.0008832e-01 3.0915245e-01 3.0474106e-01 1.1006468e+00 5.0043842e-01 4.1420960e-01 2.4195741e-01 6.8170466e-01 4.0127250e-01 7.0096708e-01 1.0003198e+00 5.0043084e-01 9.1132198e-01 3.0026460e-01 2.0121983e-01 4.0004442e-01 6.9987517e-01 4.5148429e-01 5.0477564e-01 8.3183672e-01 1.1010711e+00 8.0000160e-01 6.0052920e-01 2.0121983e-01 6.8161057e-01 4.1212852e-01 7.0176121e-01 1.0030721e+00 1.0458540e+00 9.3984267e-01 9.0166476e-01 5.0043084e-01 7.0088627e-01 7.0993998e-01 3.0017653e-01 2.2608083e-01 7.0008584e-01 9.3848935e-01 7.0184453e-01 6.3178534e-01 9.6936870e-01 5.0476836e-01 8.7372177e-01 5.6618864e-01 5.0477564e-01 8.7240114e-01 5.3943256e-01 3.0474106e-01 5.2167208e-01 8.0879701e-01 5.0085236e-01 9.0279268e-01 5.2133802e-01 4.2362917e-01 6.0017982e-01 5.2838320e-01 diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-seuclidean-ml-iris.txt b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-seuclidean-ml-iris.txt new file mode 100644 index 0000000..3e2759d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-seuclidean-ml-iris.txt @@ -0,0 +1 @@ + 1.1781739e+00 8.4573383e-01 1.1040164e+00 2.6033464e-01 1.0391769e+00 6.5951091e-01 2.6643250e-01 1.6215602e+00 9.6424206e-01 5.8926015e-01 4.4417668e-01 1.2158053e+00 1.5196051e+00 1.4342986e+00 2.2147971e+00 1.0267382e+00 1.3103399e-01 1.0246015e+00 7.0646671e-01 4.6190224e-01 5.3352899e-01 6.8496652e-01 6.2944414e-01 5.1453676e-01 1.1649855e+00 3.8639663e-01 1.3340137e-01 2.6033464e-01 8.5141186e-01 9.9757114e-01 5.0629646e-01 1.3963590e+00 1.6851313e+00 9.6424206e-01 7.1143905e-01 4.8636669e-01 9.6424206e-01 1.4309353e+00 2.3749211e-01 1.8699153e-01 2.8644037e+00 1.0938603e+00 5.4968254e-01 7.9227302e-01 1.2158053e+00 7.0111465e-01 9.1831777e-01 5.2374483e-01 4.7680727e-01 3.4225655e+00 2.9886388e+00 3.5231786e+00 3.4844976e+00 3.4140450e+00 2.8802393e+00 3.0292175e+00 2.9585178e+00 3.2500773e+00 2.8104851e+00 3.8076036e+00 2.7718515e+00 3.6661618e+00 3.0567513e+00 2.4313960e+00 3.1540282e+00 2.7718124e+00 2.7494216e+00 4.0917474e+00 3.0136333e+00 3.0855821e+00 2.8833448e+00 3.7756717e+00 3.0462644e+00 3.0262994e+00 3.1582448e+00 3.6064873e+00 3.6179250e+00 3.0140884e+00 2.7108793e+00 3.1480683e+00 3.0769261e+00 2.8006021e+00 3.5140011e+00 2.7293962e+00 2.7724816e+00 3.3142477e+00 3.8377035e+00 2.4725633e+00 3.1307104e+00 3.0248446e+00 2.9240118e+00 2.9852014e+00 3.1515797e+00 2.8921724e+00 2.4678110e+00 2.6524994e+00 2.9083443e+00 2.7444686e+00 2.7478281e+00 4.2652803e+00 3.6712837e+00 4.4571526e+00 3.7518856e+00 4.1563042e+00 5.0327536e+00 3.5110505e+00 4.5914343e+00 4.4347154e+00 4.7605836e+00 3.6465897e+00 3.9644198e+00 4.1403419e+00 3.9458900e+00 4.0035735e+00 3.9244083e+00 3.7394250e+00 5.1213512e+00 5.6085412e+00 4.1515197e+00 4.3260896e+00 3.5311267e+00 5.2010517e+00 3.7194928e+00 4.0104626e+00 4.2547108e+00 3.5326627e+00 3.3344440e+00 4.1152831e+00 4.1647603e+00 4.7306329e+00 5.0503286e+00 4.1958529e+00 3.4649010e+00 3.7290073e+00 5.0848756e+00 4.0161825e+00 3.6208873e+00 3.2588014e+00 4.1126592e+00 4.3082433e+00 4.1887411e+00 3.6712837e+00 4.3324309e+00 4.3552675e+00 4.1561376e+00 4.0674499e+00 3.7933592e+00 3.8117183e+00 3.3250640e+00 5.2374483e-01 4.3319335e-01 1.3890415e+00 2.1841707e+00 9.9973471e-01 9.3211669e-01 6.4636269e-01 2.7124234e-01 1.7245677e+00 9.3727156e-01 1.7819563e-01 7.5570839e-01 2.5520912e+00 3.3809114e+00 2.1782802e+00 1.1854382e+00 2.0937104e+00 1.8662528e+00 1.1155937e+00 1.6542533e+00 1.4482752e+00 8.4881492e-01 9.7259094e-01 1.6562722e-01 9.7322023e-01 1.2100516e+00 9.9111027e-01 5.3286499e-01 2.8394141e-01 1.1346946e+00 2.5666454e+00 2.8608436e+00 2.7124234e-01 4.9009568e-01 1.3630799e+00 2.7124234e-01 6.0647055e-01 9.5529726e-01 1.1682143e+00 1.6911681e+00 7.6195008e-01 1.2774622e+00 1.9003949e+00 1.7819563e-01 1.8642334e+00 5.8652824e-01 1.6860841e+00 7.0235100e-01 3.5517185e+00 3.0793995e+00 3.5669733e+00 2.7166736e+00 3.1838913e+00 2.5120824e+00 3.1938169e+00 2.0428688e+00 3.1039816e+00 2.2561090e+00 2.8016158e+00 2.6226738e+00 2.9050141e+00 2.8502197e+00 2.0976260e+00 3.1846091e+00 2.5890531e+00 2.2584354e+00 3.4434621e+00 2.3329638e+00 3.1272801e+00 2.5616006e+00 3.3203580e+00 2.7436923e+00 2.8484236e+00 3.0948526e+00 3.4151452e+00 3.5708989e+00 2.7939968e+00 2.0736054e+00 2.3834491e+00 2.2886612e+00 2.3204706e+00 3.1632397e+00 2.5205525e+00 3.0112889e+00 3.3433635e+00 3.2300528e+00 2.2657938e+00 2.4705763e+00 2.4462190e+00 2.8038852e+00 2.4332562e+00 2.2089299e+00 2.4060762e+00 2.2734727e+00 2.3627180e+00 2.7012637e+00 1.8976741e+00 2.3590971e+00 4.3837112e+00 3.3195686e+00 4.4453895e+00 3.6018522e+00 4.1012355e+00 5.0512932e+00 2.8774753e+00 4.5344585e+00 4.0827835e+00 5.0801762e+00 3.7291677e+00 3.6888814e+00 4.1064223e+00 3.4625304e+00 3.7552252e+00 3.9939605e+00 3.6781201e+00 5.5433523e+00 5.4381439e+00 3.4976392e+00 4.4223824e+00 3.2288234e+00 5.1217596e+00 3.4157742e+00 4.1643077e+00 4.3726405e+00 3.2842292e+00 3.2296198e+00 3.9190152e+00 4.1591876e+00 4.6244312e+00 5.4884429e+00 4.0035368e+00 3.2202996e+00 3.3301365e+00 5.1089381e+00 4.2054648e+00 3.6234874e+00 3.1421933e+00 4.1502382e+00 4.3306814e+00 4.2256435e+00 3.3195686e+00 4.4219948e+00 4.4973329e+00 4.1152664e+00 3.6487297e+00 3.7329401e+00 4.0033827e+00 3.2017663e+00 2.8394141e-01 9.9272943e-01 1.8549949e+00 4.9772204e-01 5.9738093e-01 7.8305765e-01 3.7622328e-01 1.4342986e+00 5.0621589e-01 4.9772204e-01 6.9001472e-01 2.2742100e+00 3.0330374e+00 1.8410898e+00 8.5582452e-01 1.8552074e+00 1.4758765e+00 9.8932340e-01 1.2824303e+00 9.4580058e-01 7.0175090e-01 5.8564715e-01 6.1067563e-01 6.6453319e-01 9.2528705e-01 7.6195008e-01 1.7002750e-01 3.1093967e-01 1.0044375e+00 2.1686473e+00 2.5011215e+00 3.7622328e-01 3.6669623e-01 1.1883075e+00 3.7622328e-01 5.8652824e-01 6.7745878e-01 7.9191984e-01 2.0937821e+00 3.6228991e-01 9.5582164e-01 1.5272557e+00 4.9772204e-01 1.4755007e+00 1.3340137e-01 1.3666101e+00 4.3319335e-01 3.7283414e+00 3.2257816e+00 3.7651559e+00 3.1082143e+00 3.4606263e+00 2.7705999e+00 3.2962379e+00 2.4179023e+00 3.3643791e+00 2.5175848e+00 3.2317517e+00 2.8135312e+00 3.3502574e+00 3.0859108e+00 2.3316915e+00 3.3832002e+00 2.7540885e+00 2.5906747e+00 3.8459512e+00 2.7110494e+00 3.2296198e+00 2.8510843e+00 3.6612067e+00 3.0231940e+00 3.1083626e+00 3.3221751e+00 3.6999785e+00 3.7824508e+00 3.0223050e+00 2.4549511e+00 2.7813487e+00 2.6993734e+00 2.6424988e+00 3.4348309e+00 2.6680185e+00 3.0548262e+00 3.5357687e+00 3.6340473e+00 2.4474337e+00 2.8211532e+00 2.7662396e+00 3.0069386e+00 2.7817508e+00 2.6121651e+00 2.7000040e+00 2.4677010e+00 2.5915377e+00 2.9544131e+00 2.2712863e+00 2.6277952e+00 4.4682388e+00 3.5629824e+00 4.6484688e+00 3.8140427e+00 4.2790737e+00 5.2629823e+00 3.1332305e+00 4.7710813e+00 4.3977196e+00 5.1429155e+00 3.8634878e+00 3.9555042e+00 4.3021827e+00 3.7450218e+00 3.9451569e+00 4.1141318e+00 3.8729361e+00 5.5923916e+00 5.7171219e+00 3.8836634e+00 4.5660923e+00 3.4290419e+00 5.3764415e+00 3.6907517e+00 4.2782894e+00 4.5393827e+00 3.5302656e+00 3.4102235e+00 4.1476932e+00 4.3814983e+00 4.8831870e+00 5.5467550e+00 4.2276454e+00 3.4820326e+00 3.6311162e+00 5.3207972e+00 4.2656428e+00 3.7854499e+00 3.3178004e+00 4.3254709e+00 4.4873379e+00 4.3956810e+00 3.5629824e+00 4.5607328e+00 4.6030755e+00 4.3016138e+00 3.9622355e+00 3.9225802e+00 4.0577906e+00 3.3684813e+00 1.2515236e+00 2.1021589e+00 7.0646671e-01 8.4383266e-01 5.2374483e-01 3.8525820e-01 1.6876653e+00 7.3502408e-01 3.6319073e-01 5.0299964e-01 2.5372015e+00 3.2897546e+00 2.1021589e+00 1.1117653e+00 2.0978519e+00 1.7286097e+00 1.1937015e+00 1.5323598e+00 1.1874605e+00 8.6297946e-01 7.6710016e-01 5.3827772e-01 8.8540687e-01 1.1730565e+00 1.0034646e+00 2.6643250e-01 2.4808718e-01 1.2168625e+00 2.4209958e+00 2.7605308e+00 3.8525820e-01 5.6164055e-01 1.4300979e+00 3.8525820e-01 3.5266705e-01 9.1831777e-01 1.0556536e+00 1.8570904e+00 3.5266705e-01 1.1671832e+00 1.7581227e+00 3.6319073e-01 1.7245677e+00 2.3749211e-01 1.6215602e+00 6.7030885e-01 3.7702988e+00 3.2513048e+00 3.7854692e+00 2.9445918e+00 3.4252075e+00 2.6854877e+00 3.3289662e+00 2.2084365e+00 3.3482393e+00 2.3872006e+00 3.0088381e+00 2.7858967e+00 3.2051999e+00 3.0423378e+00 2.2727200e+00 3.4066596e+00 2.7026316e+00 2.4942728e+00 3.7194777e+00 2.5718070e+00 3.2266664e+00 2.8009311e+00 3.5699473e+00 2.9607930e+00 3.0876835e+00 3.3257458e+00 3.6752903e+00 3.7792524e+00 2.9772187e+00 2.3405405e+00 2.6225184e+00 2.5379456e+00 2.5530959e+00 3.3522700e+00 2.6036900e+00 3.0973166e+00 3.5528019e+00 3.5210610e+00 2.4001125e+00 2.6797931e+00 2.6323855e+00 2.9822614e+00 2.6747723e+00 2.4035668e+00 2.5939621e+00 2.4241443e+00 2.5291529e+00 2.9226858e+00 2.0959349e+00 2.5480036e+00 4.4738080e+00 3.4750769e+00 4.6459778e+00 3.7712854e+00 4.2573656e+00 5.2660922e+00 2.9665251e+00 4.7582165e+00 4.3221591e+00 5.2027091e+00 3.8786505e+00 3.8957203e+00 4.2952898e+00 3.6300721e+00 3.8796854e+00 4.1217239e+00 3.8539421e+00 5.6722969e+00 5.6818418e+00 3.7421166e+00 4.5832488e+00 3.3486395e+00 5.3611963e+00 3.6296692e+00 4.3021827e+00 4.5620086e+00 3.4793229e+00 3.3827920e+00 4.0990020e+00 4.3836503e+00 4.8653269e+00 5.6359100e+00 4.1798856e+00 3.4290065e+00 3.5331571e+00 5.3326389e+00 4.2899049e+00 3.7762521e+00 3.2871170e+00 4.3357622e+00 4.4879065e+00 4.4101806e+00 3.4750769e+00 4.5719131e+00 4.6252914e+00 4.2958117e+00 3.8764095e+00 3.9087616e+00 4.0828629e+00 3.3281063e+00 8.9980139e-01 6.8064066e-01 4.6472955e-01 1.7695601e+00 1.1682143e+00 5.3827772e-01 5.3286499e-01 1.4108003e+00 1.6357068e+00 1.3406177e+00 2.0471148e+00 8.8540687e-01 2.9145160e-01 9.8663349e-01 4.9772204e-01 6.8921053e-01 3.7371902e-01 5.3360548e-01 8.2263932e-01 5.9279023e-01 1.3884168e+00 5.4248468e-01 3.3872939e-01 5.2066928e-01 9.9757114e-01 1.1836141e+00 7.1971771e-01 1.1867923e+00 1.5097838e+00 1.1682143e+00 9.2945909e-01 6.4884272e-01 1.1682143e+00 1.5630357e+00 4.8016385e-01 2.7124234e-01 3.0617227e+00 1.1744248e+00 5.8374436e-01 6.1345624e-01 1.4108003e+00 4.9009568e-01 1.0413386e+00 4.3319335e-01 6.9189100e-01 3.5573943e+00 3.1141702e+00 3.6648465e+00 3.6881887e+00 3.5883824e+00 3.0468381e+00 3.1315658e+00 3.1515797e+00 3.4214871e+00 2.9743594e+00 4.0164863e+00 2.9182492e+00 3.8928105e+00 3.2158144e+00 2.6006889e+00 3.3027067e+00 2.9031809e+00 2.9465762e+00 4.3027855e+00 3.2186029e+00 3.1845052e+00 3.0688421e+00 3.9670252e+00 3.2223969e+00 3.2005819e+00 3.3183883e+00 3.7835219e+00 3.7624114e+00 3.1706932e+00 2.9238797e+00 3.3563322e+00 3.2896970e+00 2.9943889e+00 3.6782511e+00 2.8525048e+00 2.8501434e+00 3.4560405e+00 4.0524463e+00 2.6189855e+00 3.3240937e+00 3.2080454e+00 3.0726532e+00 3.1844624e+00 3.3537486e+00 3.0707196e+00 2.6200714e+00 2.8136838e+00 3.0798325e+00 2.9434145e+00 2.9219863e+00 4.3385668e+00 3.8211670e+00 4.5879449e+00 3.8900790e+00 4.2758494e+00 5.1630878e+00 3.6606996e+00 4.7359279e+00 4.6113949e+00 4.8204842e+00 3.7540483e+00 4.1248784e+00 4.2705921e+00 4.1081181e+00 4.1285849e+00 4.0208302e+00 3.8718630e+00 5.1706118e+00 5.7653526e+00 4.3529699e+00 4.4302351e+00 3.6643052e+00 5.3499285e+00 3.8863166e+00 4.1025634e+00 4.3705834e+00 3.6895783e+00 3.4655259e+00 4.2576017e+00 4.3078329e+00 4.8848930e+00 5.1059908e+00 4.3355275e+00 3.6287723e+00 3.9015858e+00 5.2167033e+00 4.0809175e+00 3.7394250e+00 3.3885059e+00 4.2346521e+00 4.4182506e+00 4.3085803e+00 3.8211670e+00 4.4331391e+00 4.4402220e+00 4.2825037e+00 4.2456731e+00 3.9239772e+00 3.8761056e+00 3.4480528e+00 1.5196051e+00 1.2824303e+00 2.6220224e+00 1.9839744e+00 5.4248468e-01 1.3880441e+00 2.2398377e+00 2.5185753e+00 6.5993495e-01 1.2140269e+00 2.2670334e-01 1.0140902e+00 4.4901474e-01 4.6310132e-01 1.1825559e+00 5.9738093e-01 1.2799022e+00 1.4364110e+00 1.3915110e+00 2.1479410e+00 1.2515236e+00 9.9544409e-01 1.2188859e+00 1.8419620e+00 2.0002725e+00 1.1587093e+00 6.6217390e-01 7.6869104e-01 1.9839744e+00 1.7287715e+00 9.9282597e-01 1.9839744e+00 2.4262873e+00 1.2419907e+00 1.0737552e+00 3.8557204e+00 2.0456732e+00 1.0753036e+00 4.4417668e-01 2.2089621e+00 5.0629646e-01 1.9071648e+00 5.5576380e-01 1.4985933e+00 3.3087308e+00 2.9428879e+00 3.4716469e+00 4.0891690e+00 3.6027276e+00 3.2367228e+00 2.9085289e+00 3.7111791e+00 3.3928277e+00 3.3150290e+00 4.5928107e+00 2.9594198e+00 4.2678298e+00 3.2621413e+00 2.8156205e+00 3.1507918e+00 2.9937665e+00 3.2188610e+00 4.5717893e+00 3.5888229e+00 3.0697068e+00 3.2000980e+00 4.1198780e+00 3.3377308e+00 3.2155231e+00 3.2352945e+00 3.7547729e+00 3.6294383e+00 3.2310889e+00 3.2831808e+00 3.7736317e+00 3.7263104e+00 3.2475076e+00 3.7907966e+00 2.9840079e+00 2.6164035e+00 3.2920107e+00 4.3046992e+00 2.7582086e+00 3.6782988e+00 3.5276458e+00 3.0726914e+00 3.4670753e+00 3.9103065e+00 3.3340819e+00 2.7470239e+00 2.9746673e+00 3.1328218e+00 3.4555378e+00 3.1318122e+00 4.0752096e+00 3.9330937e+00 4.3762388e+00 3.8407425e+00 4.1274362e+00 4.9032073e+00 4.0261574e+00 4.5547766e+00 4.6534818e+00 4.3582690e+00 3.5326627e+00 4.1405270e+00 4.0947877e+00 4.2953569e+00 4.1533819e+00 3.7981559e+00 3.7518931e+00 4.6218430e+00 5.6203182e+00 4.6338556e+00 4.1503558e+00 3.7655153e+00 5.1552615e+00 3.9363994e+00 3.8053980e+00 4.0787299e+00 3.7177375e+00 3.4172455e+00 4.2121483e+00 4.1116484e+00 4.7277367e+00 4.5452377e+00 4.2828893e+00 3.6617103e+00 4.0381240e+00 4.9437127e+00 3.7768622e+00 3.5869495e+00 3.3594066e+00 4.0056296e+00 4.1979142e+00 4.0739556e+00 3.9330937e+00 4.1628496e+00 4.1341118e+00 4.1117268e+00 4.3552101e+00 3.7951858e+00 3.5859295e+00 3.4280549e+00 5.0370871e-01 1.1854382e+00 8.2574748e-01 1.1968529e+00 2.9724335e-01 9.8896933e-01 1.0391769e+00 2.0112023e+00 2.6653431e+00 1.5111262e+00 6.4636269e-01 1.6262201e+00 1.1040164e+00 9.8966705e-01 9.2934901e-01 5.3040146e-01 7.1789533e-01 3.9472619e-01 1.0556536e+00 5.1318506e-01 7.7368489e-01 7.3633268e-01 5.0731024e-01 7.5303835e-01 9.7659801e-01 1.7897583e+00 2.1453760e+00 8.2574748e-01 6.9001472e-01 1.1202045e+00 8.2574748e-01 9.6424206e-01 6.2046469e-01 5.3827772e-01 2.5404386e+00 5.3988754e-01 6.7372733e-01 1.1459117e+00 9.5361455e-01 1.1160907e+00 4.7951153e-01 1.1016806e+00 5.5109043e-01 3.7667766e+00 3.2399456e+00 3.8211099e+00 3.3920086e+00 3.5974024e+00 2.9126202e+00 3.2661365e+00 2.7296888e+00 3.4884812e+00 2.6863536e+00 3.5939578e+00 2.8820992e+00 3.6783922e+00 3.1916608e+00 2.4616677e+00 3.4465420e+00 2.8051321e+00 2.8088029e+00 4.1173049e+00 2.9788024e+00 3.2021702e+00 3.0140681e+00 3.8639979e+00 3.1756883e+00 3.2362495e+00 3.4136564e+00 3.8424217e+00 3.8484723e+00 3.1221021e+00 2.7251978e+00 3.0739866e+00 3.0068046e+00 2.8468838e+00 3.5726594e+00 2.7099355e+00 2.9743925e+00 3.5889631e+00 3.9062347e+00 2.5235038e+00 3.0623697e+00 2.9777787e+00 3.0820765e+00 3.0110500e+00 2.9445347e+00 2.8809757e+00 2.5543631e+00 2.7073441e+00 3.0792230e+00 2.5679169e+00 2.7817508e+00 4.4017095e+00 3.6741422e+00 4.6939918e+00 3.8825162e+00 4.3049794e+00 5.3127345e+00 3.3002804e+00 4.8514877e+00 4.5630927e+00 5.0475012e+00 3.8518880e+00 4.0758615e+00 4.3442223e+00 3.8984747e+00 3.9980344e+00 4.0855291e+00 3.9215612e+00 5.4851910e+00 5.8312870e+00 4.1416475e+00 4.5535489e+00 3.5028870e+00 5.4694510e+00 3.8234999e+00 4.2411026e+00 4.5531893e+00 3.6365882e+00 3.4540568e+00 4.2272070e+00 4.4531017e+00 4.9839411e+00 5.4520873e+00 4.3016933e+00 3.6054769e+00 3.7985950e+00 5.3693255e+00 4.1776636e+00 3.8035129e+00 3.3594554e+00 4.3469553e+00 4.4886880e+00 4.4112274e+00 3.6741422e+00 4.5435532e+00 4.5534752e+00 4.3346048e+00 4.1329859e+00 3.9643709e+00 3.9674740e+00 3.4024060e+00 1.3630799e+00 7.1446962e-01 8.4383266e-01 2.4808718e-01 9.6424206e-01 1.2783641e+00 1.6962086e+00 2.4702874e+00 1.2824303e+00 2.9691107e-01 1.2631980e+00 9.3957399e-01 4.9617437e-01 7.4965096e-01 7.2553812e-01 4.8492463e-01 3.3125444e-01 9.2426065e-01 2.6812643e-01 3.3395426e-01 2.4808718e-01 5.8926015e-01 7.3502408e-01 5.4956349e-01 1.6376300e+00 1.9421609e+00 7.1446962e-01 4.9160020e-01 6.5622658e-01 7.1446962e-01 1.1785203e+00 1.2076330e-01 2.8845946e-01 2.6135503e+00 8.6638670e-01 5.7543116e-01 9.9282597e-01 9.6424206e-01 9.3211669e-01 6.7030885e-01 7.8100392e-01 2.3749211e-01 3.4362741e+00 2.9772187e+00 3.5154539e+00 3.2993605e+00 3.3443673e+00 2.7564382e+00 3.0285957e+00 2.7337208e+00 3.1980682e+00 2.6433553e+00 3.5789723e+00 2.6973512e+00 3.4963203e+00 2.9759207e+00 2.3127671e+00 3.1412273e+00 2.6774449e+00 2.6095931e+00 3.9436181e+00 2.8415480e+00 3.0475523e+00 2.7865107e+00 3.6589666e+00 2.9471549e+00 2.9637920e+00 3.1238401e+00 3.5511257e+00 3.5866237e+00 2.9292978e+00 2.5500042e+00 2.9620296e+00 2.8874183e+00 2.6658729e+00 3.4048426e+00 2.6224103e+00 2.7775195e+00 3.2991484e+00 3.6986035e+00 2.3717154e+00 2.9594198e+00 2.8613259e+00 2.8592001e+00 2.8393888e+00 2.9284199e+00 2.7478281e+00 2.3715604e+00 2.5423572e+00 2.8329679e+00 2.5370255e+00 2.6226761e+00 4.2550363e+00 3.5587552e+00 4.4384175e+00 3.6863990e+00 4.1157774e+00 5.0262130e+00 3.3282378e+00 4.5651798e+00 4.3425674e+00 4.8115590e+00 3.6359466e+00 3.8813909e+00 4.1126592e+00 3.8106374e+00 3.9142555e+00 3.9091502e+00 3.6969354e+00 5.1996380e+00 5.5654572e+00 3.9942942e+00 4.3261607e+00 3.4228883e+00 5.1764012e+00 3.6303894e+00 4.0165247e+00 4.2627936e+00 3.4508614e+00 3.2748167e+00 4.0461381e+00 4.1489951e+00 4.6983104e+00 5.1372583e+00 4.1280577e+00 3.3829235e+00 3.6112201e+00 5.0844328e+00 4.0217556e+00 3.5877667e+00 3.1942059e+00 4.1021303e+00 4.2899049e+00 4.1807095e+00 3.5587552e+00 4.3276502e+00 4.3608503e+00 4.1273623e+00 3.9585431e+00 3.7540483e+00 3.8154519e+00 3.2543470e+00 7.7313507e-01 2.2058495e+00 1.2553676e+00 5.5109043e-01 3.3742167e-01 3.0507869e+00 3.8084614e+00 2.6171176e+00 1.6268459e+00 2.6113513e+00 2.2457527e+00 1.6784057e+00 2.0471148e+00 1.6480463e+00 1.3225313e+00 1.2819528e+00 7.6880092e-01 1.3915110e+00 1.6886167e+00 1.5043671e+00 7.8918675e-01 6.7745878e-01 1.6911617e+00 2.9348176e+00 3.2792996e+00 7.7313507e-01 1.0082548e+00 1.9190366e+00 7.7313507e-01 2.3749211e-01 1.4309353e+00 1.5685186e+00 1.3963590e+00 6.9420840e-01 1.6514950e+00 2.2742046e+00 5.5109043e-01 2.2440749e+00 7.3283576e-01 2.1421205e+00 1.1730565e+00 4.0382971e+00 3.5072516e+00 4.0204750e+00 2.8157524e+00 3.5602797e+00 2.7716933e+00 3.6026548e+00 1.9881684e+00 3.5249607e+00 2.3719578e+00 2.7108793e+00 2.9588139e+00 3.1000099e+00 3.1914275e+00 2.3942229e+00 3.6456797e+00 2.8533918e+00 2.5518058e+00 3.6496659e+00 2.5198146e+00 3.4451131e+00 2.9183683e+00 3.5989425e+00 3.0794356e+00 3.2576813e+00 3.5320163e+00 3.8261152e+00 3.9741896e+00 3.1180182e+00 2.3364082e+00 2.5170134e+00 2.4274466e+00 2.6068690e+00 3.4218333e+00 2.7386417e+00 3.3934324e+00 3.7851452e+00 3.4854109e+00 2.5636830e+00 2.6200486e+00 2.6174942e+00 3.1669559e+00 2.6880360e+00 2.1675629e+00 2.6284424e+00 2.5986852e+00 2.6571682e+00 3.0828753e+00 1.9438939e+00 2.6338308e+00 4.6899445e+00 3.5257224e+00 4.8360835e+00 3.9149039e+00 4.4239486e+00 5.4654338e+00 2.8575769e+00 4.9368840e+00 4.3795070e+00 5.4971381e+00 4.1073887e+00 3.9867349e+00 4.4778796e+00 3.6113294e+00 3.9521234e+00 4.3324165e+00 4.0348180e+00 6.0067497e+00 5.8007868e+00 3.6612067e+00 4.8067419e+00 3.4133837e+00 5.5245725e+00 3.7158962e+00 4.5501061e+00 4.8067452e+00 3.5898573e+00 3.5494058e+00 4.2132256e+00 4.5903044e+00 5.0235775e+00 5.9805492e+00 4.2919572e+00 3.5520531e+00 3.5821954e+00 5.5319518e+00 4.5355231e+00 3.9801247e+00 3.4489678e+00 4.5459210e+00 4.6801759e+00 4.6148655e+00 3.5257224e+00 4.7911899e+00 4.8567489e+00 4.4697071e+00 3.9039516e+00 4.0848538e+00 4.3320055e+00 3.4824517e+00 1.5154593e+00 7.1671402e-01 2.6643250e-01 7.9347379e-01 2.3528246e+00 3.1744385e+00 1.9839744e+00 9.9059199e-01 1.9029496e+00 1.6532822e+00 9.3451915e-01 1.4586696e+00 1.2483935e+00 7.4743804e-01 7.4957404e-01 2.9691107e-01 8.0686941e-01 9.9973471e-01 7.9394533e-01 3.6319073e-01 1.8699153e-01 9.9891776e-01 2.3345854e+00 2.6422396e+00 0.0000000e+00 3.3742167e-01 1.1857824e+00 0.0000000e+00 6.6918102e-01 7.4445830e-01 9.7322023e-01 1.9284841e+00 6.6918102e-01 1.1393372e+00 1.6942803e+00 3.7371902e-01 1.6386105e+00 4.5257749e-01 1.4715172e+00 4.9772204e-01 3.5602797e+00 3.0968979e+00 3.5933352e+00 2.8998722e+00 3.2656298e+00 2.6029746e+00 3.1974447e+00 2.2445103e+00 3.1601923e+00 2.3946216e+00 3.0209665e+00 2.6867317e+00 3.0775658e+00 2.9161244e+00 2.1946278e+00 3.2137635e+00 2.6502891e+00 2.3652711e+00 3.6096140e+00 2.4893065e+00 3.1578003e+00 2.6568473e+00 3.4426472e+00 2.8187901e+00 2.9128857e+00 3.1418202e+00 3.4847097e+00 3.6205958e+00 2.8694312e+00 2.2223283e+00 2.5588203e+00 2.4651138e+00 2.4413293e+00 3.2621861e+00 2.5834128e+00 2.9995857e+00 3.3733791e+00 3.3817876e+00 2.3263008e+00 2.6305758e+00 2.5756071e+00 2.8533918e+00 2.5683063e+00 2.4187322e+00 2.5258216e+00 2.3250308e+00 2.4413618e+00 2.7691536e+00 2.1006933e+00 2.4608850e+00 4.4119896e+00 3.4290419e+00 4.4942656e+00 3.6650933e+00 4.1590662e+00 5.0899438e+00 3.0333619e+00 4.5799469e+00 4.1882413e+00 5.0726081e+00 3.7613721e+00 3.7859992e+00 4.1623715e+00 3.6029758e+00 3.8608065e+00 4.0352348e+00 3.7266849e+00 5.5043247e+00 5.5172732e+00 3.6569442e+00 4.4568115e+00 3.3324016e+00 5.1765224e+00 3.5192065e+00 4.1799642e+00 4.3857400e+00 3.3769078e+00 3.2906843e+00 4.0034548e+00 4.1917183e+00 4.6854597e+00 5.4444866e+00 4.0904298e+00 3.2962678e+00 3.4250783e+00 5.1569386e+00 4.2213314e+00 3.6582637e+00 3.2059261e+00 4.1937040e+00 4.3826532e+00 4.2786320e+00 3.4290419e+00 4.4549850e+00 4.5270304e+00 4.1816268e+00 3.7777251e+00 3.7924144e+00 4.0173731e+00 3.2613828e+00 1.0034646e+00 1.7753099e+00 2.1070188e+00 8.6079202e-01 1.6751898e+00 5.4248468e-01 6.0365341e-01 4.6310132e-01 4.4901474e-01 7.0111465e-01 4.4713936e-01 1.0328871e+00 1.0722301e+00 1.0271920e+00 1.6860841e+00 8.8540687e-01 5.2066928e-01 7.3502408e-01 1.4309353e+00 1.5630357e+00 7.3985997e-01 9.6257499e-01 1.1608422e+00 1.5154593e+00 1.2617482e+00 4.9009568e-01 1.5154593e+00 2.0192952e+00 7.8100392e-01 6.9001472e-01 3.4112480e+00 1.6736143e+00 8.5090098e-01 5.5183182e-01 1.7753099e+00 4.3319335e-01 1.5054343e+00 1.2076330e-01 1.0428797e+00 3.2901237e+00 2.9292978e+00 3.4367371e+00 3.8111737e+00 3.4729880e+00 3.0672734e+00 2.9473505e+00 3.3901879e+00 3.2662947e+00 3.1144880e+00 4.2413542e+00 2.8660589e+00 3.9495965e+00 3.1433256e+00 2.6375433e+00 3.0908568e+00 2.9081458e+00 2.9702968e+00 4.3236374e+00 3.3103937e+00 3.0964304e+00 3.0179755e+00 3.9313682e+00 3.1669001e+00 3.0754584e+00 3.1432906e+00 3.6245464e+00 3.5873526e+00 3.1179878e+00 2.9918256e+00 3.4776059e+00 3.4142800e+00 3.0198617e+00 3.6568155e+00 2.8980988e+00 2.6944324e+00 3.2512254e+00 4.0479095e+00 2.6293791e+00 3.4291613e+00 3.2968205e+00 2.9799791e+00 3.2239661e+00 3.5774656e+00 3.1299499e+00 2.6069579e+00 2.8203994e+00 2.9888842e+00 3.1470877e+00 2.9476507e+00 4.1975965e+00 3.8311128e+00 4.3861256e+00 3.7921747e+00 4.1446583e+00 4.9211801e+00 3.8442384e+00 4.5238497e+00 4.5231439e+00 4.5451210e+00 3.5805575e+00 4.0469570e+00 4.0990882e+00 4.1579560e+00 4.1249171e+00 3.8727781e+00 3.7290616e+00 4.8292468e+00 5.5757877e+00 4.3965263e+00 4.2248397e+00 3.6936498e+00 5.1256160e+00 3.8221804e+00 3.8961870e+00 4.1176453e+00 3.6242665e+00 3.3807801e+00 4.1671042e+00 4.0787299e+00 4.6798442e+00 4.7374542e+00 4.2466909e+00 3.5432140e+00 3.8759165e+00 4.9689016e+00 3.9204411e+00 3.5927937e+00 3.3203721e+00 4.0348754e+00 4.2531600e+00 4.1147391e+00 3.8311128e+00 4.2401451e+00 4.2502727e+00 4.1279956e+00 4.2116129e+00 3.7856899e+00 3.7242025e+00 3.3954918e+00 9.3865015e-01 1.1459117e+00 1.8505741e+00 2.5636327e+00 1.3972701e+00 4.6310132e-01 1.4327294e+00 1.0013399e+00 7.2679299e-01 8.2574748e-01 6.2187934e-01 5.8496636e-01 1.7002750e-01 9.5361455e-01 3.5639126e-01 5.3827772e-01 4.9617437e-01 4.7680727e-01 6.9189100e-01 7.7259801e-01 1.6911681e+00 2.0326426e+00 7.1671402e-01 5.6788283e-01 8.9258315e-01 7.1671402e-01 1.0551281e+00 3.6669623e-01 3.9699460e-01 2.5716465e+00 6.8921053e-01 6.2148529e-01 1.0391769e+00 9.3865015e-01 9.9111027e-01 5.3286499e-01 9.2006504e-01 3.5266705e-01 3.5819899e+00 3.0902007e+00 3.6482741e+00 3.3284223e+00 3.4528558e+00 2.8062636e+00 3.1283731e+00 2.7130802e+00 3.3201500e+00 2.6478976e+00 3.5696084e+00 2.7728704e+00 3.5649049e+00 3.0583917e+00 2.3718219e+00 3.2763163e+00 2.7180029e+00 2.6779045e+00 4.0150919e+00 2.8864805e+00 3.1083977e+00 2.8822332e+00 3.7402558e+00 3.0304088e+00 3.0793541e+00 3.2506893e+00 3.6756049e+00 3.7003058e+00 3.0054875e+00 2.6160903e+00 2.9965118e+00 2.9238797e+00 2.7351276e+00 3.4650507e+00 2.6418165e+00 2.8577585e+00 3.4252075e+00 3.7832878e+00 2.4227175e+00 2.9917855e+00 2.8903467e+00 2.9460322e+00 2.9034030e+00 2.9191699e+00 2.7908173e+00 2.4332562e+00 2.6000033e+00 2.9338362e+00 2.5416562e+00 2.6797931e+00 4.3169600e+00 3.6002350e+00 4.5501061e+00 3.7611228e+00 4.1952298e+00 5.1491201e+00 3.2996439e+00 4.6835622e+00 4.4311275e+00 4.9194005e+00 3.7316761e+00 3.9622355e+00 4.2152785e+00 3.8426553e+00 3.9520057e+00 3.9894324e+00 3.7877346e+00 5.3234168e+00 5.6801405e+00 4.0465336e+00 4.4289683e+00 3.4509848e+00 5.3007226e+00 3.7123045e+00 4.1128937e+00 4.3848871e+00 3.5295907e+00 3.3480190e+00 4.1214168e+00 4.2758432e+00 4.8208082e+00 5.2754050e+00 4.2018689e+00 3.4688327e+00 3.6716137e+00 5.2146461e+00 4.0903576e+00 3.6733277e+00 3.2612647e+00 4.2126998e+00 4.3809966e+00 4.2914999e+00 3.6002350e+00 4.4223824e+00 4.4497684e+00 4.2250047e+00 4.0330034e+00 3.8460049e+00 3.8818416e+00 3.3084835e+00 6.2729876e-01 2.6091054e+00 3.4299177e+00 2.2340939e+00 1.2368073e+00 2.1640372e+00 1.8992968e+00 1.1925354e+00 1.7015647e+00 1.4288989e+00 9.5582164e-01 9.7391954e-01 2.9724335e-01 1.0376697e+00 1.2583645e+00 1.0495503e+00 5.0731024e-01 2.8845946e-01 1.2384679e+00 2.5831347e+00 2.8967543e+00 2.6643250e-01 5.4873947e-01 1.4369223e+00 2.6643250e-01 5.0370871e-01 1.0013399e+00 1.2082987e+00 1.6761482e+00 6.8299624e-01 1.3528452e+00 1.9417181e+00 2.6206799e-01 1.8882412e+00 5.3690447e-01 1.7295385e+00 7.4445830e-01 3.6974389e+00 3.2246529e+00 3.7127916e+00 2.8221999e+00 3.3289662e+00 2.6369282e+00 3.3348648e+00 2.1165503e+00 3.2465431e+00 2.3709411e+00 2.8608899e+00 2.7655496e+00 3.0110500e+00 2.9862341e+00 2.2391292e+00 3.3332541e+00 2.7176350e+00 2.3810734e+00 3.5657791e+00 2.4469788e+00 3.2638546e+00 2.7057900e+00 3.4512743e+00 2.8728052e+00 2.9934131e+00 3.2431147e+00 3.5582624e+00 3.7179545e+00 2.9335017e+00 2.1999209e+00 2.4893065e+00 2.3915367e+00 2.4540260e+00 3.2923304e+00 2.6414379e+00 3.1466196e+00 3.4901671e+00 3.3542627e+00 2.3973915e+00 2.5861640e+00 2.5561973e+00 2.9420425e+00 2.5609364e+00 2.2836399e+00 2.5303888e+00 2.4035745e+00 2.4950488e+00 2.8435005e+00 2.0000785e+00 2.4916202e+00 4.5218181e+00 3.4492861e+00 4.5921002e+00 3.7366932e+00 4.2432727e+00 5.1949299e+00 2.9709788e+00 4.6735988e+00 4.2160797e+00 5.2249989e+00 3.8759828e+00 3.8289542e+00 4.2545385e+00 3.5878027e+00 3.8924868e+00 4.1403048e+00 3.8179103e+00 5.6801405e+00 5.5805905e+00 3.6100547e+00 4.5709635e+00 3.3584734e+00 5.2629823e+00 3.5576748e+00 4.3070506e+00 4.5135384e+00 3.4273212e+00 3.3707040e+00 4.0596064e+00 4.2990937e+00 4.7676077e+00 5.6256469e+00 4.1454035e+00 3.3551224e+00 3.4472672e+00 5.2603070e+00 4.3452859e+00 3.7614312e+00 3.2825924e+00 4.3002370e+00 4.4796258e+00 4.3809022e+00 3.4492861e+00 4.5673965e+00 4.6446301e+00 4.2677070e+00 3.7864370e+00 3.8796124e+00 4.1423594e+00 3.3352922e+00 2.9361142e+00 3.6728262e+00 2.4980859e+00 1.5364600e+00 2.5390784e+00 2.1113072e+00 1.6578570e+00 1.9353585e+00 1.4375287e+00 1.3425463e+00 1.1993279e+00 9.0115406e-01 1.3418210e+00 1.6061161e+00 1.4416694e+00 7.3727571e-01 7.1781501e-01 1.6797637e+00 2.7692440e+00 3.1313820e+00 7.9347379e-01 9.7352372e-01 1.8600648e+00 7.9347379e-01 2.1119253e-01 1.3612390e+00 1.4580439e+00 1.6571634e+00 5.0731024e-01 1.5980974e+00 2.1674065e+00 6.7984069e-01 2.1059482e+00 6.2457556e-01 2.0330443e+00 1.1132823e+00 4.2319020e+00 3.7044235e+00 4.2326669e+00 3.1432906e+00 3.8172627e+00 3.0425144e+00 3.7866079e+00 2.3206274e+00 3.7650177e+00 2.6608343e+00 3.0454230e+00 3.1914925e+00 3.4221447e+00 3.4413652e+00 2.6453561e+00 3.8539838e+00 3.0892079e+00 2.8357998e+00 3.9683086e+00 2.8336784e+00 3.6477040e+00 3.1799040e+00 3.8944724e+00 3.3434129e+00 3.4994776e+00 3.7569353e+00 4.0775935e+00 4.2049294e+00 3.3684490e+00 2.6363662e+00 2.8414019e+00 2.7526519e+00 2.8906655e+00 3.7008234e+00 2.9737492e+00 3.5555916e+00 3.9977110e+00 3.7960948e+00 2.7978670e+00 2.9332077e+00 2.9200513e+00 3.4002561e+00 2.9851921e+00 2.5032729e+00 2.9159414e+00 2.8324645e+00 2.9104902e+00 3.3286096e+00 2.2670901e+00 2.9042354e+00 4.8902416e+00 3.8029663e+00 5.0697571e+00 4.1657422e+00 4.6611282e+00 5.6979337e+00 3.1565039e+00 5.1794156e+00 4.6677357e+00 5.6656906e+00 4.3138249e+00 4.2590423e+00 4.7118516e+00 3.9079657e+00 4.2090892e+00 4.5409993e+00 4.2707579e+00 6.1569683e+00 6.0684263e+00 3.9837013e+00 5.0178221e+00 3.6761530e+00 5.7743610e+00 3.9890690e+00 4.7480354e+00 5.0151961e+00 3.8518880e+00 3.7849163e+00 4.4740109e+00 4.8191103e+00 5.2745801e+00 6.1258486e+00 4.5520039e+00 3.8145791e+00 3.8707244e+00 5.7618968e+00 4.7193263e+00 4.2030300e+00 3.6843246e+00 4.7664506e+00 4.9031551e+00 4.8333734e+00 3.8029663e+00 5.0038635e+00 5.0562579e+00 4.7021393e+00 4.1966653e+00 4.3193180e+00 4.5127918e+00 3.7195417e+00 9.8143688e-01 5.9868400e-01 1.4402716e+00 5.6992880e-01 9.8663349e-01 1.4928150e+00 1.1361809e+00 1.7216149e+00 1.8856736e+00 1.8789959e+00 2.5107352e+00 1.7228721e+00 1.3724737e+00 1.5661153e+00 2.2847787e+00 2.4120925e+00 1.4985933e+00 7.9011741e-01 5.9738093e-01 2.3528246e+00 2.0826771e+00 1.2100516e+00 2.3528246e+00 2.8601865e+00 1.6304499e+00 1.5111262e+00 4.2257604e+00 2.5031609e+00 1.6091095e+00 1.0739839e+00 2.6091054e+00 9.8932340e-01 2.3488496e+00 9.3392552e-01 1.8848176e+00 3.4513181e+00 3.2138675e+00 3.6568023e+00 4.4832075e+00 3.8715598e+00 3.6399979e+00 3.2048569e+00 4.1609431e+00 3.6276988e+00 3.7852753e+00 5.0007603e+00 3.3356061e+00 4.5726588e+00 3.6020324e+00 3.2283316e+00 3.3543125e+00 3.4317803e+00 3.5762357e+00 4.8853618e+00 3.9697083e+00 3.4608105e+00 3.5194529e+00 4.4307529e+00 3.6664068e+00 3.4821665e+00 3.4661369e+00 3.9690303e+00 3.8732280e+00 3.5908374e+00 3.6384054e+00 4.1605480e+00 4.1054173e+00 3.6121763e+00 4.1591449e+00 3.4571840e+00 2.9726287e+00 3.5108834e+00 4.5938444e+00 3.1869025e+00 4.0859475e+00 3.9449709e+00 3.4111584e+00 3.8289196e+00 4.3382952e+00 3.7437946e+00 3.1530215e+00 3.3792174e+00 3.4400302e+00 3.8876642e+00 3.5288768e+00 4.4107365e+00 4.3401559e+00 4.5910423e+00 4.1731099e+00 4.4383007e+00 5.0605479e+00 4.5288382e+00 4.7400085e+00 4.9337127e+00 4.5282137e+00 3.8167418e+00 4.4582410e+00 4.3491396e+00 4.7099690e+00 4.5667632e+00 4.1110524e+00 4.0457882e+00 4.6970439e+00 5.8050200e+00 4.9831785e+00 4.3869525e+00 4.2045486e+00 5.3107398e+00 4.2598936e+00 4.0608563e+00 4.2495756e+00 4.0560071e+00 3.7740189e+00 4.5388860e+00 4.2824837e+00 4.9058468e+00 4.5708763e+00 4.6120616e+00 3.9763551e+00 4.3872260e+00 5.0860672e+00 4.0998055e+00 3.8946369e+00 3.7330702e+00 4.2352833e+00 4.4742220e+00 4.3047259e+00 4.3401559e+00 4.4192906e+00 4.4017154e+00 4.3831142e+00 4.6832544e+00 4.0909816e+00 3.9225445e+00 3.8229302e+00 1.2140269e+00 2.2031378e+00 1.3945864e+00 1.5674943e+00 2.3519816e+00 1.7695601e+00 2.3060361e+00 2.6440626e+00 2.5730128e+00 3.3484034e+00 2.4570006e+00 2.1775427e+00 2.3990667e+00 3.0314484e+00 3.2003667e+00 2.3345854e+00 9.9891776e-01 5.8565201e-01 3.1744385e+00 2.9106021e+00 2.1090950e+00 3.1744385e+00 3.6015962e+00 2.4316107e+00 2.2478972e+00 5.0583620e+00 3.1946200e+00 2.2571919e+00 1.5783735e+00 3.4098353e+00 1.5848534e+00 3.0815481e+00 1.7053877e+00 2.6874763e+00 3.8897688e+00 3.6527400e+00 4.1085323e+00 5.1878354e+00 4.4401043e+00 4.2306533e+00 3.5668974e+00 4.8855250e+00 4.1984227e+00 4.3936084e+00 5.7667342e+00 3.8604223e+00 5.3386416e+00 4.1481805e+00 3.8457422e+00 3.8556395e+00 3.9253518e+00 4.2633467e+00 5.5746944e+00 4.6805795e+00 3.8185179e+00 4.1531225e+00 5.0514886e+00 4.2706189e+00 4.0732692e+00 4.0031242e+00 4.5383244e+00 4.3266944e+00 4.1312924e+00 4.3745457e+00 4.8862079e+00 4.8484299e+00 4.2820168e+00 4.7051757e+00 3.9401848e+00 3.2884177e+00 3.9767257e+00 5.2985037e+00 3.7419338e+00 4.7600849e+00 4.5926355e+00 3.9322406e+00 4.5116211e+00 5.0823619e+00 4.3725296e+00 3.7236864e+00 3.9623539e+00 4.0300759e+00 4.6141946e+00 4.1447443e+00 4.5866744e+00 4.8386745e+00 4.9461657e+00 4.6106150e+00 4.7813164e+00 5.3858108e+00 5.0919277e+00 5.1446449e+00 5.4739987e+00 4.5885042e+00 4.1414025e+00 4.9586480e+00 4.7213864e+00 5.2471037e+00 4.9661074e+00 4.3830009e+00 4.4568388e+00 4.6901031e+00 6.2154767e+00 5.6469307e+00 4.6501660e+00 4.6626232e+00 5.7036296e+00 4.7932843e+00 4.3038060e+00 4.5618709e+00 4.5655950e+00 4.2115551e+00 4.9692114e+00 4.7030193e+00 5.3377504e+00 4.5914343e+00 5.0293150e+00 4.5146706e+00 4.9581881e+00 5.4087027e+00 4.2557771e+00 4.2671438e+00 4.1737149e+00 4.5756985e+00 4.7660397e+00 4.6314702e+00 4.8386745e+00 4.6734470e+00 4.5970177e+00 4.7412505e+00 5.2464126e+00 4.4890534e+00 4.0948317e+00 4.2440420e+00 1.0013399e+00 5.0299964e-01 4.6310132e-01 1.2040900e+00 5.9738093e-01 1.2286837e+00 1.4541908e+00 1.4279677e+00 2.1539145e+00 1.2617482e+00 9.9544409e-01 1.2082987e+00 1.8489243e+00 2.0066856e+00 1.1587093e+00 6.6217390e-01 7.5179033e-01 1.9839744e+00 1.7063292e+00 9.6659661e-01 1.9839744e+00 2.4156729e+00 1.2419907e+00 1.0495503e+00 3.8490499e+00 2.0330726e+00 1.0871867e+00 5.4779717e-01 2.2031378e+00 5.3106808e-01 1.9004159e+00 5.5576380e-01 1.4899949e+00 3.4307448e+00 3.0710756e+00 3.5952799e+00 4.1669813e+00 3.7116384e+00 3.3536981e+00 3.0466130e+00 3.7729829e+00 3.5082606e+00 3.4067800e+00 4.6484249e+00 3.0744089e+00 4.3424419e+00 3.3858347e+00 2.9098729e+00 3.2669110e+00 3.1198644e+00 3.3210229e+00 4.6553382e+00 3.6737423e+00 3.2048569e+00 3.2989479e+00 4.2245828e+00 3.4587219e+00 3.3255241e+00 3.3484846e+00 3.8660480e+00 3.7512964e+00 3.3482610e+00 3.3605387e+00 3.8511468e+00 3.8014113e+00 3.3411134e+00 3.9109127e+00 3.1105014e+00 2.7597977e+00 3.4146222e+00 4.3904048e+00 2.8767763e+00 3.7646132e+00 3.6317356e+00 3.1996946e+00 3.5585167e+00 3.9690108e+00 3.4365573e+00 2.8705339e+00 3.0890888e+00 3.2456270e+00 3.5108688e+00 3.2367228e+00 4.2147014e+00 4.0489906e+00 4.5035700e+00 3.9755362e+00 4.2591912e+00 5.0350768e+00 4.1207838e+00 4.6882252e+00 4.7707308e+00 4.4918348e+00 3.6612573e+00 4.2568131e+00 4.2184327e+00 4.3988058e+00 4.2632946e+00 3.9245996e+00 3.8864624e+00 4.7642090e+00 5.7424408e+00 4.7299069e+00 4.2784034e+00 3.8797952e+00 5.2832732e+00 4.0458553e+00 3.9446593e+00 4.2181053e+00 3.8300888e+00 3.5427774e+00 4.3354099e+00 4.2438935e+00 4.8511407e+00 4.6817036e+00 4.4041714e+00 3.7859242e+00 4.1665370e+00 5.0618540e+00 3.9138566e+00 3.7274783e+00 3.4833346e+00 4.1288327e+00 4.3215818e+00 4.1859543e+00 4.0489906e+00 4.2965095e+00 4.2626474e+00 4.2257654e+00 4.4572702e+00 3.9184477e+00 3.7230473e+00 3.5604297e+00 1.0161882e+00 6.9420840e-01 4.8012872e-01 4.8284931e-01 6.9738730e-01 5.5709100e-01 5.3095950e-01 1.1723315e+00 3.1271814e-01 1.8699153e-01 2.9145160e-01 8.6143605e-01 1.0061402e+00 4.5257749e-01 1.4146831e+00 1.6902182e+00 9.9059199e-01 7.2340544e-01 5.0370871e-01 9.9059199e-01 1.4369223e+00 2.7124234e-01 1.3340137e-01 2.8614050e+00 1.1016806e+00 4.2656951e-01 7.5906970e-01 1.2087236e+00 7.1325427e-01 9.2761923e-01 5.3988754e-01 4.9448466e-01 3.3643791e+00 2.9159414e+00 3.4617249e+00 3.4323687e+00 3.3505903e+00 2.8169505e+00 2.9517065e+00 2.9146662e+00 3.1941250e+00 2.7393282e+00 3.7736317e+00 2.6933089e+00 3.6308668e+00 2.9914579e+00 2.3560812e+00 3.0907905e+00 2.6932687e+00 2.7021788e+00 4.0389540e+00 2.9648098e+00 2.9980910e+00 2.8201257e+00 3.7183934e+00 2.9922398e+00 2.9661287e+00 3.0950932e+00 3.5513156e+00 3.5484439e+00 2.9420200e+00 2.6629529e+00 3.1013619e+00 3.0347859e+00 2.7417410e+00 3.4474072e+00 2.6495954e+00 2.6875764e+00 3.2488445e+00 3.7904353e+00 2.3985415e+00 3.0725852e+00 2.9704304e+00 2.8556850e+00 2.9300511e+00 3.1104511e+00 2.8291506e+00 2.4008046e+00 2.5836379e+00 2.8456807e+00 2.6907656e+00 2.6814159e+00 4.1737238e+00 3.5932878e+00 4.3853076e+00 3.6802688e+00 4.0749525e+00 4.9692376e+00 3.4394110e+00 4.5331007e+00 4.3742923e+00 4.6787296e+00 3.5632387e+00 3.8923023e+00 4.0628985e+00 3.8689922e+00 3.9102807e+00 3.8336686e+00 3.6675649e+00 5.0555526e+00 5.5454277e+00 4.0994961e+00 4.2439469e+00 3.4449831e+00 5.1429556e+00 3.6472400e+00 3.9304610e+00 4.1916938e+00 3.4565067e+00 3.2536517e+00 4.0373591e+00 4.1087274e+00 4.6703619e+00 4.9904781e+00 4.1152831e+00 3.4023949e+00 3.6756751e+00 5.0151763e+00 3.9231895e+00 3.5466262e+00 3.1760855e+00 4.0346845e+00 4.2216886e+00 4.1038501e+00 3.5932878e+00 4.2504107e+00 4.2656428e+00 4.0705667e+00 3.9971917e+00 3.7133040e+00 3.7182295e+00 3.2440381e+00 7.3339246e-01 9.9973471e-01 7.7988766e-01 1.4669572e+00 1.3868865e+00 1.4360884e+00 2.0344949e+00 1.2593779e+00 9.3451915e-01 1.1232628e+00 1.8421759e+00 1.9514085e+00 1.0061402e+00 9.6168382e-01 9.7747632e-01 1.9029496e+00 1.6513423e+00 7.7821113e-01 1.9029496e+00 2.4366790e+00 1.1857824e+00 1.1156669e+00 3.7575639e+00 2.1090460e+00 1.1623508e+00 7.4500632e-01 2.1481102e+00 7.3851064e-01 1.9301732e+00 5.6262711e-01 1.4458364e+00 3.0574507e+00 2.7604800e+00 3.2354442e+00 3.9296796e+00 3.3802783e+00 3.0910114e+00 2.7653977e+00 3.6086433e+00 3.1477156e+00 3.2299948e+00 4.4531271e+00 2.8182580e+00 4.0359059e+00 3.0838698e+00 2.6832031e+00 2.9127171e+00 2.8999239e+00 3.0235972e+00 4.3556993e+00 3.4142800e+00 2.9871882e+00 2.9947610e+00 3.9084388e+00 3.1359326e+00 2.9852014e+00 3.0007807e+00 3.4997296e+00 3.4243092e+00 3.0709062e+00 3.0889274e+00 3.6054231e+00 3.5510321e+00 3.0652992e+00 3.6307363e+00 2.9199707e+00 2.5302845e+00 3.0705222e+00 4.0683526e+00 2.6430958e+00 3.5303998e+00 3.3838093e+00 2.9011205e+00 3.2808511e+00 3.7876206e+00 3.1898591e+00 2.6081677e+00 2.8342553e+00 2.9259899e+00 3.3400468e+00 2.9809771e+00 4.0130133e+00 3.8156727e+00 4.1823301e+00 3.6854232e+00 3.9919300e+00 4.6844795e+00 3.9756960e+00 4.3245814e+00 4.4396397e+00 4.2453585e+00 3.3946374e+00 3.9634682e+00 3.9204865e+00 4.1772364e+00 4.0766155e+00 3.6959934e+00 3.5831715e+00 4.4790872e+00 5.3894840e+00 4.4405497e+00 4.0027890e+00 3.6857786e+00 4.9137586e+00 3.7567964e+00 3.6729588e+00 3.8728151e+00 3.5543966e+00 3.2848126e+00 4.0598486e+00 3.8712880e+00 4.4886485e+00 4.3722180e+00 4.1373490e+00 3.4683949e+00 3.8543469e+00 4.7248681e+00 3.7193644e+00 3.4383872e+00 3.2381387e+00 3.8297356e+00 4.0647650e+00 3.9099360e+00 3.8156727e+00 4.0266221e+00 4.0296168e+00 3.9579549e+00 4.1722551e+00 3.6379295e+00 3.5328511e+00 3.3224979e+00 1.0061402e+00 2.6525508e-01 8.2148003e-01 1.1879760e+00 1.0251165e+00 1.8544941e+00 9.4128180e-01 7.1446962e-01 9.4128180e-01 1.4726083e+00 1.6607117e+00 9.9973471e-01 7.4965096e-01 1.0510795e+00 1.6532822e+00 1.4055304e+00 8.6143605e-01 1.6532822e+00 2.0368618e+00 9.3178083e-01 7.1143905e-01 3.5363390e+00 1.6307900e+00 8.0686941e-01 2.6184788e-01 1.8811296e+00 1.4276574e-01 1.5165187e+00 3.5874135e-01 1.1682143e+00 3.5420892e+00 3.1213639e+00 3.6765721e+00 3.9907084e+00 3.7063187e+00 3.2329517e+00 3.1017380e+00 3.5164906e+00 3.5204595e+00 3.2215483e+00 4.4016409e+00 3.0251724e+00 4.2008255e+00 3.3367044e+00 2.7940225e+00 3.3344791e+00 3.0219495e+00 3.1880051e+00 4.5546425e+00 3.5075399e+00 3.1952269e+00 3.2406785e+00 4.1563139e+00 3.3848806e+00 3.3178898e+00 3.3859281e+00 3.8870730e+00 3.7997125e+00 3.2944053e+00 3.2110142e+00 3.6683444e+00 3.6131226e+00 3.2236002e+00 3.8317071e+00 2.9830921e+00 2.7973167e+00 3.4787185e+00 4.2995699e+00 2.7671612e+00 3.5982071e+00 3.4619260e+00 3.1665431e+00 3.4310890e+00 3.7235030e+00 3.2953414e+00 2.7679629e+00 2.9819595e+00 3.2106653e+00 3.2879746e+00 3.1196883e+00 4.2713770e+00 3.9634682e+00 4.5846990e+00 3.9587038e+00 4.2895460e+00 5.1416821e+00 3.9119997e+00 4.7572039e+00 4.7460626e+00 4.6638344e+00 3.7280454e+00 4.2349181e+00 4.2803486e+00 4.2908242e+00 4.2152718e+00 3.9857018e+00 3.9070797e+00 4.9741957e+00 5.8097018e+00 4.6049285e+00 4.3788345e+00 3.7893243e+00 5.3689314e+00 4.0140467e+00 4.0363955e+00 4.3259832e+00 3.8006490e+00 3.5269016e+00 4.3297076e+00 4.3216441e+00 4.9220177e+00 4.9100057e+00 4.4024600e+00 3.7489348e+00 4.0736926e+00 5.1891895e+00 3.9903212e+00 3.7514870e+00 3.4564041e+00 4.2166575e+00 4.3944655e+00 4.2851348e+00 3.9634682e+00 4.3836343e+00 4.3634474e+00 4.2898748e+00 4.4067690e+00 3.9524852e+00 3.7906894e+00 3.5162082e+00 8.3156200e-01 1.1417173e+00 5.8221430e-01 7.3339246e-01 1.0428797e+00 5.5247822e-01 3.5266705e-01 2.9537172e-01 9.6466498e-01 1.0034646e+00 2.8553149e-01 1.6415483e+00 1.8567917e+00 9.3451915e-01 7.2553812e-01 3.4520795e-01 9.3451915e-01 1.5364952e+00 3.7960845e-01 5.9589853e-01 2.7723424e+00 1.3124532e+00 7.5130648e-01 1.0314203e+00 1.1925354e+00 9.9272943e-01 1.0839891e+00 7.1143905e-01 5.6164055e-01 3.0511653e+00 2.6629268e+00 3.1545235e+00 3.1980310e+00 3.0467396e+00 2.5772061e+00 2.7369167e+00 2.7576827e+00 2.8651003e+00 2.5868532e+00 3.5774656e+00 2.4748633e+00 3.3139897e+00 2.7217211e+00 2.1506369e+00 2.7852281e+00 2.5158340e+00 2.4059801e+00 3.7433691e+00 2.7041074e+00 2.8389661e+00 2.5310559e+00 3.4176981e+00 2.6902386e+00 2.6527549e+00 2.7866168e+00 3.2144386e+00 3.2675625e+00 2.6971865e+00 2.3822357e+00 2.8532332e+00 2.7780124e+00 2.4721123e+00 3.1952919e+00 2.5042136e+00 2.5315299e+00 2.9556760e+00 3.4693709e+00 2.1993495e+00 2.8460127e+00 2.7344861e+00 2.5960616e+00 2.6558880e+00 2.9309653e+00 2.5980406e+00 2.1695355e+00 2.3550298e+00 2.5518802e+00 2.5245372e+00 2.4441489e+00 4.0319503e+00 3.3933783e+00 4.1146478e+00 3.4339804e+00 3.8578841e+00 4.6712206e+00 3.3248410e+00 4.2174528e+00 4.0704050e+00 4.4988341e+00 3.3546526e+00 3.6317702e+00 3.8139412e+00 3.6743372e+00 3.7645284e+00 3.6614225e+00 3.4131399e+00 4.8439861e+00 5.2322637e+00 3.8189229e+00 4.0256031e+00 3.2902097e+00 4.8190347e+00 3.3870859e+00 3.7223145e+00 3.9080259e+00 3.2141230e+00 3.0414452e+00 3.8022687e+00 3.7869665e+00 4.3507688e+00 4.7565318e+00 3.8893282e+00 3.1162635e+00 3.3877625e+00 4.7282676e+00 3.7917280e+00 3.3122397e+00 2.9763122e+00 3.7889092e+00 4.0173731e+00 3.8788191e+00 3.3933783e+00 4.0384828e+00 4.0914753e+00 3.8500022e+00 3.7349483e+00 3.4804621e+00 3.5920363e+00 3.0535851e+00 7.5284003e-01 9.3865015e-01 8.5442446e-01 1.6409761e+00 7.0463400e-01 5.4408162e-01 7.5179033e-01 1.2786676e+00 1.4553344e+00 7.8100392e-01 1.0100290e+00 1.2786676e+00 1.4586696e+00 1.2008045e+00 7.2638147e-01 1.4586696e+00 1.8445759e+00 7.3985997e-01 5.0731024e-01 3.3136601e+00 1.4580439e+00 5.4702555e-01 3.2339566e-01 1.6607117e+00 3.5366952e-01 1.3290015e+00 3.5639126e-01 9.6825676e-01 3.4059851e+00 2.9602214e+00 3.5257340e+00 3.7492673e+00 3.5115913e+00 3.0191277e+00 2.9517483e+00 3.2720571e+00 3.3411331e+00 2.9834222e+00 4.1580731e+00 2.8211532e+00 3.9717534e+00 3.1414634e+00 2.5643903e+00 3.1728076e+00 2.8176969e+00 2.9703822e+00 4.3244606e+00 3.2734582e+00 3.0210021e+00 3.0274003e+00 3.9433841e+00 3.1866772e+00 3.1269680e+00 3.2103093e+00 3.7065013e+00 3.6299273e+00 3.0909484e+00 2.9772513e+00 3.4297321e+00 3.3756936e+00 2.9971174e+00 3.6243253e+00 2.7759818e+00 2.6501700e+00 3.3189004e+00 4.0764633e+00 2.5559925e+00 3.3602243e+00 3.2356862e+00 2.9780147e+00 3.2026716e+00 3.4783251e+00 3.0685582e+00 2.5635668e+00 2.7679629e+00 3.0129566e+00 3.0370166e+00 2.8975180e+00 4.1264564e+00 3.7496421e+00 4.4295218e+00 3.7774558e+00 4.1191095e+00 5.0038078e+00 3.6756489e+00 4.6074750e+00 4.5494422e+00 4.5665604e+00 3.5702411e+00 4.0355009e+00 4.1137066e+00 4.0638414e+00 4.0067360e+00 3.8250619e+00 3.7375780e+00 4.9153359e+00 5.6444336e+00 4.3773916e+00 4.2331397e+00 3.5751580e+00 5.2199809e+00 3.8075775e+00 3.9003626e+00 4.1989415e+00 3.5967191e+00 3.3381510e+00 4.1394208e+00 4.1772604e+00 4.7627066e+00 4.8574416e+00 4.2113835e+00 3.5565415e+00 3.8744064e+00 5.0458107e+00 3.8530980e+00 3.5894641e+00 3.2635788e+00 4.0605147e+00 4.2327162e+00 4.1232607e+00 3.7496421e+00 4.2381044e+00 4.2216886e+00 4.1152818e+00 4.1901775e+00 3.7759339e+00 3.6506667e+00 3.3268510e+00 1.0748172e+00 7.2889003e-01 1.5046031e+00 7.9398919e-01 8.1148630e-01 8.8835337e-01 9.9058911e-01 1.2262672e+00 1.1380274e+00 1.3972288e+00 1.7741287e+00 1.2483935e+00 1.0474897e+00 1.1240042e+00 1.2483935e+00 1.4149548e+00 8.1096210e-01 5.7672351e-01 3.0082939e+00 9.6865373e-01 8.2273123e-01 9.5195566e-01 1.4288989e+00 8.3246212e-01 9.4996842e-01 9.2092295e-01 8.7375509e-01 4.0151215e+00 3.5231786e+00 4.1026785e+00 3.8908802e+00 3.9665585e+00 3.3438394e+00 3.5293285e+00 3.2540384e+00 3.8314935e+00 3.1634347e+00 4.1178317e+00 3.2512254e+00 4.1561446e+00 3.5717750e+00 2.8833448e+00 3.7345570e+00 3.1952822e+00 3.2548783e+00 4.5820687e+00 3.4621656e+00 3.5141919e+00 3.4137993e+00 4.2939653e+00 3.5777026e+00 3.5926398e+00 3.7328374e+00 4.1920808e+00 4.1652091e+00 3.5073274e+00 3.1921998e+00 3.5706840e+00 3.5044583e+00 3.2904241e+00 3.9914615e+00 3.1120432e+00 3.2204607e+00 3.8807669e+00 4.3582892e+00 2.9219284e+00 3.5476488e+00 3.4540639e+00 3.4397115e+00 3.4680001e+00 3.4670753e+00 3.3367044e+00 2.9471549e+00 3.1205392e+00 3.4518638e+00 3.0783517e+00 3.2145380e+00 4.6697624e+00 4.0951447e+00 4.9940386e+00 4.2442248e+00 4.6312366e+00 5.5957028e+00 3.7901714e+00 5.1629764e+00 4.9662629e+00 5.2245876e+00 4.1326097e+00 4.4648550e+00 4.6557857e+00 4.3477764e+00 4.3833898e+00 4.3689111e+00 4.2520226e+00 5.6153369e+00 6.1715044e+00 4.6178873e+00 4.8201070e+00 3.9129644e+00 5.7808751e+00 4.2195149e+00 4.4949009e+00 4.8099428e+00 4.0213766e+00 3.8049151e+00 4.5961475e+00 4.7475868e+00 5.3061067e+00 5.5699347e+00 4.6684257e+00 3.9900166e+00 4.2272640e+00 5.6441645e+00 4.4197984e+00 4.1176453e+00 3.7157925e+00 4.6326704e+00 4.7820862e+00 4.6921349e+00 4.0951447e+00 4.8160040e+00 4.8050680e+00 4.6459078e+00 4.5554678e+00 4.2905569e+00 4.2115152e+00 3.7649212e+00 5.9314593e-01 8.0686941e-01 2.9691107e-01 6.2826980e-01 5.0121118e-01 6.6653737e-01 7.0834786e-01 4.6310132e-01 1.9251840e+00 2.1737519e+00 7.4743804e-01 5.5009731e-01 8.0748088e-01 7.4743804e-01 1.1828955e+00 4.6964680e-01 5.8942278e-01 2.4421558e+00 9.8677196e-01 4.9772204e-01 1.1660949e+00 8.4116354e-01 1.2196311e+00 7.7538587e-01 1.0376697e+00 4.4499696e-01 3.0983271e+00 2.5986852e+00 3.1534326e+00 2.8897192e+00 2.9336987e+00 2.3392252e+00 2.6586759e+00 2.3702978e+00 2.8165027e+00 2.2079130e+00 3.2363154e+00 2.2664200e+00 3.1218253e+00 2.5673178e+00 1.8638939e+00 2.7710337e+00 2.2535803e+00 2.2156046e+00 3.5264693e+00 2.4375344e+00 2.6410495e+00 2.3635223e+00 3.2419868e+00 2.5535068e+00 2.5663186e+00 2.7372400e+00 3.1657714e+00 3.1910277e+00 2.5035271e+00 2.1450705e+00 2.5644558e+00 2.5011730e+00 2.2417546e+00 2.9810976e+00 2.2012005e+00 2.4146140e+00 2.9247451e+00 3.2953953e+00 1.9474034e+00 2.5368532e+00 2.4541089e+00 2.4554575e+00 2.4210504e+00 2.5661600e+00 2.3207574e+00 1.9628165e+00 2.1171985e+00 2.4261018e+00 2.1366217e+00 2.1917681e+00 3.8609963e+00 3.1157672e+00 4.0464741e+00 3.2769657e+00 3.7011972e+00 4.6584806e+00 2.9074576e+00 4.1962146e+00 3.9292452e+00 4.4717831e+00 3.2385313e+00 3.4507621e+00 3.7050324e+00 3.3601278e+00 3.4577371e+00 3.4991206e+00 3.2980589e+00 4.9174048e+00 5.1685262e+00 3.5822256e+00 3.9345678e+00 2.9743611e+00 4.8043725e+00 3.1946631e+00 3.6425792e+00 3.9147944e+00 3.0137993e+00 2.8509730e+00 3.6160190e+00 3.7930649e+00 4.3160863e+00 4.8705552e+00 3.6935350e+00 2.9765851e+00 3.2157655e+00 4.7030966e+00 3.6383061e+00 3.1964791e+00 2.7656084e+00 3.7055141e+00 3.8768834e+00 3.7701724e+00 3.1157672e+00 3.9366463e+00 3.9674740e+00 3.7027144e+00 3.5167570e+00 3.3369517e+00 3.4319544e+00 2.8332022e+00 9.6865373e-01 3.9487224e-01 5.8131330e-01 5.6003943e-01 5.0621589e-01 7.1247632e-01 8.0317491e-01 1.7053539e+00 2.0491684e+00 7.4957404e-01 6.5459290e-01 9.3991103e-01 7.4957404e-01 1.0954558e+00 4.2737382e-01 4.9430028e-01 2.5884539e+00 7.4949264e-01 6.4432393e-01 1.0251728e+00 9.7391954e-01 1.0055888e+00 5.9279023e-01 9.4588685e-01 4.3798311e-01 3.5017283e+00 3.0032209e+00 3.5640998e+00 3.2626300e+00 3.3723783e+00 2.7101865e+00 3.0361435e+00 2.6574564e+00 3.2363742e+00 2.5684615e+00 3.5220489e+00 2.6863775e+00 3.5035562e+00 2.9639854e+00 2.2954282e+00 3.1974234e+00 2.6186896e+00 2.5919605e+00 3.9485388e+00 2.8137879e+00 3.0123600e+00 2.8059985e+00 3.6581986e+00 2.9351026e+00 2.9984934e+00 3.1711590e+00 3.5947528e+00 3.6146776e+00 2.9159819e+00 2.5508141e+00 2.9298445e+00 2.8588898e+00 2.6582994e+00 3.3705985e+00 2.5395254e+00 2.7634723e+00 3.3411818e+00 3.7151762e+00 2.3273691e+00 2.9184140e+00 2.8006021e+00 2.8512853e+00 2.8277392e+00 2.8675465e+00 2.7048983e+00 2.3342128e+00 2.5075548e+00 2.8488482e+00 2.4938133e+00 2.5939117e+00 4.2210242e+00 3.5094230e+00 4.4613495e+00 3.6611526e+00 4.1011462e+00 5.0575391e+00 3.2183295e+00 4.5889909e+00 4.3421582e+00 4.8334387e+00 3.6441411e+00 3.8749352e+00 4.1286607e+00 3.7602700e+00 3.8694583e+00 3.9027404e+00 3.6910974e+00 5.2330448e+00 5.5920875e+00 3.9683831e+00 4.3421746e+00 3.3618744e+00 5.2099570e+00 3.6296154e+00 4.0192804e+00 4.2904705e+00 3.4453138e+00 3.2560920e+00 4.0303932e+00 4.1835729e+00 4.7330562e+00 5.1897695e+00 4.1126264e+00 3.3744864e+00 3.5691373e+00 5.1336306e+00 3.9986271e+00 3.5735980e+00 3.1698618e+00 4.1283627e+00 4.2954773e+00 4.2156054e+00 3.5094230e+00 4.3310092e+00 4.3633885e+00 4.1455698e+00 3.9545856e+00 3.7585686e+00 3.7901495e+00 3.2094268e+00 9.5902306e-01 1.1795364e+00 9.6032771e-01 5.8652824e-01 3.3395426e-01 1.0753036e+00 2.5524007e+00 2.8349345e+00 2.9691107e-01 5.1396090e-01 1.3127309e+00 2.9691107e-01 7.4426155e-01 9.3211669e-01 1.1729612e+00 1.7369516e+00 8.7560645e-01 1.2666796e+00 1.8751947e+00 2.9724335e-01 1.8489906e+00 6.7745878e-01 1.6555341e+00 7.0111465e-01 3.4067014e+00 2.9452188e+00 3.4231096e+00 2.6265336e+00 3.0474186e+00 2.3887954e+00 3.0652160e+00 1.9891259e+00 2.9589070e+00 2.1699637e+00 2.7527251e+00 2.5008826e+00 2.7949298e+00 2.7160947e+00 1.9851008e+00 3.0428102e+00 2.4755098e+00 2.1256864e+00 3.3327734e+00 2.2236827e+00 3.0131023e+00 2.4300526e+00 3.1928299e+00 2.6040863e+00 2.7075499e+00 2.9536823e+00 3.2710263e+00 3.4338296e+00 2.6673396e+00 1.9555334e+00 2.2858019e+00 2.1897213e+00 2.1973377e+00 3.0392888e+00 2.4158794e+00 2.8941568e+00 3.2025759e+00 3.1091590e+00 2.1471303e+00 2.3710990e+00 2.3347284e+00 2.6698387e+00 2.3133518e+00 2.1525596e+00 2.2918773e+00 2.1454625e+00 2.2398142e+00 2.5636830e+00 1.8343082e+00 2.2388656e+00 4.2714137e+00 3.2107728e+00 4.3091817e+00 3.4717121e+00 3.9768763e+00 4.9078859e+00 2.8122927e+00 4.3885241e+00 3.9504682e+00 4.9558940e+00 3.6044479e+00 3.5632387e+00 3.9760735e+00 3.3646188e+00 3.6594046e+00 3.8782141e+00 3.5443654e+00 5.4091146e+00 5.2988184e+00 3.3878489e+00 4.2952367e+00 3.1303129e+00 4.9761618e+00 3.2919446e+00 4.0362588e+00 4.2291285e+00 3.1618923e+00 3.1077588e+00 3.7959134e+00 4.0112445e+00 4.4810404e+00 5.3509795e+00 3.8831153e+00 3.0844802e+00 3.1980603e+00 4.9707249e+00 4.0945548e+00 3.4918172e+00 3.0237585e+00 4.0192804e+00 4.2092252e+00 4.1017980e+00 3.2107728e+00 4.2952415e+00 4.3790330e+00 3.9936934e+00 3.5312555e+00 3.6065699e+00 3.8937621e+00 3.0840989e+00 4.2827238e-01 3.7398306e-01 6.4241342e-01 7.7828522e-01 4.8636669e-01 1.6800011e+00 1.9622194e+00 8.0686941e-01 5.7691891e-01 7.1789533e-01 8.0686941e-01 1.2139401e+00 2.9406726e-01 3.1507080e-01 2.6166211e+00 9.1398375e-01 3.4909881e-01 9.4580058e-01 9.6922609e-01 9.6659661e-01 7.2638147e-01 8.2574748e-01 3.6704030e-01 3.2939549e+00 2.8018134e+00 3.3643791e+00 3.1688464e+00 3.1882120e+00 2.5926123e+00 2.8420400e+00 2.6229843e+00 3.0569434e+00 2.4659441e+00 3.4932809e+00 2.5062529e+00 3.4038365e+00 2.8103848e+00 2.1284730e+00 2.9880998e+00 2.4809350e+00 2.4830222e+00 3.8129321e+00 2.7155086e+00 2.8370039e+00 2.6306749e+00 3.5140670e+00 2.8045035e+00 2.8143558e+00 2.9698163e+00 3.4126571e+00 3.4177063e+00 2.7508387e+00 2.4282690e+00 2.8424700e+00 2.7781836e+00 2.5174968e+00 3.2360555e+00 2.4214382e+00 2.5753180e+00 3.1397228e+00 3.5790751e+00 2.1850441e+00 2.8131786e+00 2.7177153e+00 2.6876771e+00 2.6993734e+00 2.8253248e+00 2.5871836e+00 2.1990767e+00 2.3678133e+00 2.6762366e+00 2.4070540e+00 2.4551607e+00 4.0383784e+00 3.3671653e+00 4.2642537e+00 3.5070157e+00 3.9193977e+00 4.8684861e+00 3.1505769e+00 4.4165051e+00 4.1898287e+00 4.6203725e+00 3.4386758e+00 3.7047820e+00 3.9273366e+00 3.6237778e+00 3.6947343e+00 3.6968840e+00 3.5190023e+00 5.0398879e+00 5.4089760e+00 3.8611646e+00 4.1322470e+00 3.2145601e+00 5.0295849e+00 3.4546083e+00 3.8248698e+00 4.1055247e+00 3.2664139e+00 3.0788010e+00 3.8567883e+00 4.0060320e+00 4.5478501e+00 4.9912214e+00 3.9339246e+00 3.2236553e+00 3.4677443e+00 4.9178815e+00 3.8042163e+00 3.4041321e+00 2.9939885e+00 3.9171296e+00 4.0866646e+00 3.9845548e+00 3.3671653e+00 4.1322520e+00 4.1520423e+00 3.9277271e+00 3.7880802e+00 3.5624203e+00 3.5967687e+00 3.0549169e+00 2.3749211e-01 9.2006504e-01 1.0428797e+00 4.2450569e-01 1.3899721e+00 1.6555341e+00 9.9973471e-01 7.5230154e-01 3.7960845e-01 9.9973471e-01 1.5086315e+00 2.6033464e-01 2.9724335e-01 2.8989712e+00 1.1937015e+00 5.7988427e-01 7.8318003e-01 1.2583645e+00 7.0463400e-01 1.0034646e+00 4.7680727e-01 5.2374483e-01 3.3114294e+00 2.8933417e+00 3.4177063e+00 3.4461307e+00 3.3255941e+00 2.8176969e+00 2.9380167e+00 2.9507452e+00 3.1524130e+00 2.7797208e+00 3.7960371e+00 2.6995806e+00 3.6095704e+00 2.9762135e+00 2.3753547e+00 3.0506196e+00 2.7121484e+00 2.6831858e+00 4.0299127e+00 2.9653560e+00 3.0144396e+00 2.8058449e+00 3.7011663e+00 2.9654419e+00 2.9344166e+00 3.0597490e+00 3.5085997e+00 3.5226725e+00 2.9395346e+00 2.6564538e+00 3.1076159e+00 3.0365838e+00 2.7379532e+00 3.4446760e+00 2.6796909e+00 2.6912430e+00 3.2129972e+00 3.7686900e+00 2.4108238e+00 3.0879511e+00 2.9762529e+00 2.8408428e+00 2.9254092e+00 3.1396427e+00 2.8384396e+00 2.3985415e+00 2.5881776e+00 2.8229620e+00 2.7289403e+00 2.6869860e+00 4.1910480e+00 3.6130663e+00 4.3602249e+00 3.6707779e+00 4.0745117e+00 4.9277939e+00 3.4934873e+00 4.4880495e+00 4.3514534e+00 4.6654573e+00 3.5594056e+00 3.8864758e+00 4.0498126e+00 3.8963526e+00 3.9502563e+00 3.8456535e+00 3.6509387e+00 5.0146974e+00 5.5101576e+00 4.0937915e+00 4.2345704e+00 3.4807992e+00 5.0960662e+00 3.6438388e+00 3.9190152e+00 4.1487738e+00 3.4580678e+00 3.2588014e+00 4.0378648e+00 4.0580582e+00 4.6285945e+00 4.9381887e+00 4.1199488e+00 3.3816601e+00 3.6553790e+00 4.9813108e+00 3.9405186e+00 3.5335600e+00 3.1869497e+00 4.0186781e+00 4.2240093e+00 4.0988575e+00 3.6130663e+00 4.2429720e+00 4.2712088e+00 4.0719125e+00 3.9975817e+00 3.7087600e+00 3.7375363e+00 3.2561951e+00 7.6824760e-01 8.5141186e-01 3.6086962e-01 1.6207126e+00 1.8802756e+00 7.9394533e-01 5.3286499e-01 4.3319335e-01 7.9394533e-01 1.3370188e+00 1.3340137e-01 3.6319073e-01 2.6778759e+00 1.0720705e+00 6.3173774e-01 1.0072799e+00 1.0495503e+00 9.3727156e-01 8.5893964e-01 7.0463400e-01 3.3395426e-01 3.3027871e+00 2.8812178e+00 3.3955887e+00 3.2888081e+00 3.2512254e+00 2.7283479e+00 2.9463809e+00 2.7764635e+00 3.0911130e+00 2.6620270e+00 3.6054231e+00 2.6430457e+00 3.4442793e+00 2.9123088e+00 2.2793286e+00 3.0205073e+00 2.6595069e+00 2.5635668e+00 3.8866925e+00 2.8178210e+00 3.0060120e+00 2.7101865e+00 3.5930006e+00 2.8829084e+00 2.8651003e+00 3.0121201e+00 3.4400598e+00 3.4869142e+00 2.8725792e+00 2.5068325e+00 2.9480926e+00 2.8720011e+00 2.6183827e+00 3.3619078e+00 2.6263990e+00 2.7176350e+00 3.1874455e+00 3.6289342e+00 2.3459758e+00 2.9476507e+00 2.8536577e+00 2.7917808e+00 2.7959976e+00 2.9585178e+00 2.7268209e+00 2.3347284e+00 2.5080346e+00 2.7508387e+00 2.5565749e+00 2.5881776e+00 4.2068537e+00 3.5342439e+00 4.3380559e+00 3.6271373e+00 4.0499864e+00 4.9127682e+00 3.3748745e+00 4.4574738e+00 4.2666131e+00 4.7143178e+00 3.5549829e+00 3.8149934e+00 4.0227420e+00 3.7946027e+00 3.8919837e+00 3.8432323e+00 3.6208873e+00 5.0849580e+00 5.4596452e+00 3.9569474e+00 4.2354064e+00 3.4126422e+00 5.0611947e+00 3.5638945e+00 3.9334644e+00 4.1519487e+00 3.3885059e+00 3.2191166e+00 3.9849073e+00 4.0334328e+00 4.5859724e+00 5.0075986e+00 4.0680600e+00 3.3134027e+00 3.5670952e+00 4.9632121e+00 3.9675062e+00 3.5176551e+00 3.1453377e+00 4.0038982e+00 4.2114761e+00 4.0820077e+00 3.5342439e+00 4.2453199e+00 4.2844704e+00 4.0426067e+00 3.8984747e+00 3.6765607e+00 3.7642725e+00 3.2184749e+00 2.6033464e-01 9.9962901e-01 2.1664244e+00 2.5030472e+00 3.6319073e-01 4.2737382e-01 1.2004100e+00 3.6319073e-01 6.1067563e-01 6.7030885e-01 8.0996690e-01 2.1006743e+00 4.0020411e-01 9.4057729e-01 1.4985933e+00 5.0731024e-01 1.4656715e+00 1.6562722e-01 1.3630799e+00 4.4417668e-01 3.6433721e+00 3.1333439e+00 3.6757970e+00 3.0281244e+00 3.3717708e+00 2.6624050e+00 3.1998148e+00 2.3430115e+00 3.2729115e+00 2.4219924e+00 3.1700354e+00 2.7177110e+00 3.2760900e+00 2.9826961e+00 2.2410752e+00 3.2981044e+00 2.6452183e+00 2.4901533e+00 3.7687554e+00 2.6225184e+00 3.1280668e+00 2.7635526e+00 3.5692464e+00 2.9177616e+00 3.0187150e+00 3.2354747e+00 3.6116754e+00 3.6909005e+00 2.9234404e+00 2.3731183e+00 2.6987010e+00 2.6178190e+00 2.5515904e+00 3.3308561e+00 2.5554841e+00 2.9570491e+00 3.4460544e+00 3.5549612e+00 2.3407691e+00 2.7326628e+00 2.6614903e+00 2.9042354e+00 2.6919656e+00 2.5430017e+00 2.6000033e+00 2.3578684e+00 2.4871797e+00 2.8599439e+00 2.2045434e+00 2.5287499e+00 4.3690091e+00 3.4628576e+00 4.5552847e+00 3.7077077e+00 4.1799642e+00 5.1678263e+00 3.0379779e+00 4.6720960e+00 4.3013447e+00 5.0550361e+00 3.7713495e+00 3.8605708e+00 4.2104897e+00 3.6525334e+00 3.8549710e+00 4.0229434e+00 3.7708197e+00 5.5011831e+00 5.6245097e+00 3.7945557e+00 4.4755001e+00 3.3306776e+00 5.2815051e+00 3.5995462e+00 4.1814664e+00 4.4417427e+00 3.4376057e+00 3.3113002e+00 4.0501276e+00 4.2847585e+00 4.7905454e+00 5.4600808e+00 4.1319681e+00 3.3795104e+00 3.5192584e+00 5.2359042e+00 4.1708373e+00 3.6809073e+00 3.2190305e+00 4.2365575e+00 4.3973146e+00 4.3149219e+00 3.4628576e+00 4.4657183e+00 4.5132257e+00 4.2167698e+00 3.8749352e+00 3.8293474e+00 3.9628758e+00 3.2623926e+00 1.0371214e+00 2.3606689e+00 2.6764689e+00 1.8699153e-01 4.0363332e-01 1.2627589e+00 1.8699153e-01 5.6164055e-01 7.8305765e-01 9.7747632e-01 1.8924893e+00 5.6164055e-01 1.0881632e+00 1.6837963e+00 2.8845946e-01 1.6545637e+00 3.5266705e-01 1.5108472e+00 5.3286499e-01 3.5596461e+00 3.0642731e+00 3.5820652e+00 2.8366432e+00 3.2382208e+00 2.5375138e+00 3.1537738e+00 2.1559444e+00 3.1474432e+00 2.2926143e+00 2.9585178e+00 2.6250629e+00 3.0590120e+00 2.8699760e+00 2.1233327e+00 3.2024265e+00 2.5670381e+00 2.3272067e+00 3.5735096e+00 2.4368431e+00 3.0826231e+00 2.6212838e+00 3.4052824e+00 2.7833861e+00 2.8923043e+00 3.1255601e+00 3.4747564e+00 3.5908785e+00 2.8135312e+00 2.1839196e+00 2.5032729e+00 2.4158569e+00 2.3928312e+00 3.2017644e+00 2.4862299e+00 2.9403226e+00 3.3545989e+00 3.3587820e+00 2.2520446e+00 2.5607059e+00 2.5059290e+00 2.8073565e+00 2.5209771e+00 2.3430115e+00 2.4562939e+00 2.2633781e+00 2.3755041e+00 2.7368591e+00 2.0165442e+00 2.3969046e+00 4.3354025e+00 3.3475977e+00 4.4615703e+00 3.6095772e+00 4.0990362e+00 5.0710534e+00 2.9144612e+00 4.5627576e+00 4.1522674e+00 5.0316497e+00 3.7102339e+00 3.7341705e+00 4.1195531e+00 3.5174471e+00 3.7659275e+00 3.9693828e+00 3.6809073e+00 5.4858042e+00 5.4945039e+00 3.6088006e+00 4.4109170e+00 3.2362256e+00 5.1634795e+00 3.4678413e+00 4.1322470e+00 4.3666536e+00 3.3199204e+00 3.2266664e+00 3.9433408e+00 4.1815045e+00 4.6694810e+00 5.4392260e+00 4.0273519e+00 3.2552513e+00 3.3773249e+00 5.1375752e+00 4.1484621e+00 3.6075786e+00 3.1365574e+00 4.1554935e+00 4.3260165e+00 4.2353581e+00 3.3475977e+00 4.4043042e+00 4.4676627e+00 4.1295047e+00 3.7244531e+00 3.7408419e+00 3.9430201e+00 3.1856251e+00 1.6790448e+00 1.8683303e+00 9.9891776e-01 7.3735391e-01 3.8639663e-01 9.9891776e-01 1.5462701e+00 4.4713936e-01 5.6262711e-01 2.7653818e+00 1.3238834e+00 5.9868400e-01 1.0167074e+00 1.1817121e+00 1.0267382e+00 1.1036371e+00 7.4965096e-01 5.9868400e-01 2.9920629e+00 2.5767485e+00 3.0904477e+00 3.1383073e+00 2.9738737e+00 2.5155127e+00 2.6450302e+00 2.7097016e+00 2.8120208e+00 2.4963677e+00 3.5442384e+00 2.3737852e+00 3.2878773e+00 2.6552959e+00 2.0482711e+00 2.7132601e+00 2.4244329e+00 2.3725931e+00 3.6825624e+00 2.6567419e+00 2.7277627e+00 2.4551607e+00 3.3586468e+00 2.6490703e+00 2.5878996e+00 2.7146857e+00 3.1604264e+00 3.1862677e+00 2.6121387e+00 2.3320406e+00 2.8060955e+00 2.7397840e+00 2.4059801e+00 3.1251810e+00 2.4123722e+00 2.4266062e+00 2.8827369e+00 3.4219145e+00 2.1146056e+00 2.7787333e+00 2.6868306e+00 2.5237903e+00 2.5969195e+00 2.8858666e+00 2.5292454e+00 2.1030528e+00 2.2789104e+00 2.4843930e+00 2.4502513e+00 2.3681813e+00 3.9129285e+00 3.2963379e+00 4.0307050e+00 3.3579713e+00 3.7573925e+00 4.6072225e+00 3.2350676e+00 4.1666050e+00 4.0096938e+00 4.3939440e+00 3.2458961e+00 3.5448948e+00 3.7163166e+00 3.5735211e+00 3.6303034e+00 3.5366397e+00 3.3347301e+00 4.7764597e+00 5.1656525e+00 3.7678733e+00 3.9190152e+00 3.1752055e+00 4.7655871e+00 3.2963860e+00 3.6257668e+00 3.8480918e+00 3.1163356e+00 2.9401017e+00 3.7060704e+00 3.7400429e+00 4.2905130e+00 4.6982735e+00 3.7862785e+00 3.0555922e+00 3.3519252e+00 4.6433941e+00 3.6672706e+00 3.2313825e+00 2.8704346e+00 3.6888814e+00 3.9001229e+00 3.7578379e+00 3.2963379e+00 3.9355102e+00 3.9693842e+00 3.7298088e+00 3.6452459e+00 3.3776637e+00 3.4666091e+00 2.9570067e+00 4.5257749e-01 2.3345854e+00 2.1006743e+00 1.4408765e+00 2.3345854e+00 2.7201861e+00 1.6242170e+00 1.4334280e+00 4.2461520e+00 2.2960397e+00 1.5510150e+00 8.3619405e-01 2.5963945e+00 7.1671402e-01 2.2031378e+00 9.3957399e-01 1.8662528e+00 3.9018608e+00 3.5587525e+00 4.0758181e+00 4.6738622e+00 4.2315488e+00 3.8362958e+00 3.5101695e+00 4.2349456e+00 4.0096351e+00 3.8957954e+00 5.1177048e+00 3.5857491e+00 4.8511271e+00 3.8770675e+00 3.4324591e+00 3.7687554e+00 3.5952204e+00 3.8095276e+00 5.1880951e+00 4.1733990e+00 3.6719420e+00 3.8275992e+00 4.7391892e+00 3.9417326e+00 3.8406076e+00 3.8597390e+00 4.3729123e+00 4.2482658e+00 3.8534412e+00 3.8740219e+00 4.3496532e+00 4.2951960e+00 3.8572117e+00 4.4028225e+00 3.5707989e+00 3.2084035e+00 3.9057558e+00 4.9165227e+00 3.3635180e+00 4.2694314e+00 4.1082916e+00 3.6886188e+00 4.0716087e+00 4.4411156e+00 3.9335446e+00 3.3496034e+00 3.5830335e+00 3.7561390e+00 4.0088699e+00 3.7413425e+00 4.6436290e+00 4.5471223e+00 4.9787007e+00 4.4481204e+00 4.7341193e+00 5.4826137e+00 4.5863292e+00 5.1433212e+00 5.2725182e+00 4.8836508e+00 4.1393671e+00 4.7672599e+00 4.7092337e+00 4.9106562e+00 4.7707455e+00 4.3996661e+00 4.3591552e+00 5.0844032e+00 6.2257171e+00 5.2378683e+00 4.7433741e+00 4.3742533e+00 5.7435198e+00 4.5678545e+00 4.3840310e+00 4.6485090e+00 4.3482964e+00 4.0364176e+00 4.8328894e+00 4.6980887e+00 5.3298852e+00 5.0020990e+00 4.9051796e+00 4.2757519e+00 4.6314634e+00 5.5369700e+00 4.3420582e+00 4.1857666e+00 3.9786340e+00 4.6138259e+00 4.8044656e+00 4.6911542e+00 4.5471223e+00 4.7508760e+00 4.7161034e+00 4.7355095e+00 4.9879154e+00 4.4154797e+00 4.1545903e+00 4.0343137e+00 2.6422396e+00 2.3867296e+00 1.6154069e+00 2.6422396e+00 3.0703842e+00 1.9080710e+00 1.7295385e+00 4.5475791e+00 2.6621202e+00 1.8051284e+00 1.1105716e+00 2.8967543e+00 1.0474897e+00 2.5495727e+00 1.1795364e+00 2.1617152e+00 3.8171826e+00 3.5339654e+00 4.0163479e+00 4.8425911e+00 4.2514283e+00 3.9557653e+00 3.4792404e+00 4.4740529e+00 4.0150475e+00 4.0717494e+00 5.3501548e+00 3.6486698e+00 4.9910943e+00 3.9350582e+00 3.5547141e+00 3.7282030e+00 3.6962934e+00 3.9420318e+00 5.2895497e+00 4.3341610e+00 3.6960949e+00 3.8986276e+00 4.8106103e+00 4.0206152e+00 3.8664495e+00 3.8454452e+00 4.3675713e+00 4.2173036e+00 3.9169317e+00 4.0237417e+00 4.5248905e+00 4.4756871e+00 3.9778974e+00 4.4827624e+00 3.6962934e+00 3.1970228e+00 3.8646917e+00 5.0103465e+00 3.4775294e+00 4.4295579e+00 4.2690345e+00 3.7344524e+00 4.1995700e+00 4.6716989e+00 4.0716455e+00 3.4573201e+00 3.6936958e+00 3.8056210e+00 4.2211876e+00 3.8604223e+00 4.5958210e+00 4.6323449e+00 4.9087471e+00 4.4703758e+00 4.7121623e+00 5.3828288e+00 4.7798674e+00 5.0815530e+00 5.2996504e+00 4.7231435e+00 4.0911974e+00 4.7955081e+00 4.6606898e+00 5.0156099e+00 4.8233009e+00 4.3540706e+00 4.3488974e+00 4.8785742e+00 6.1614901e+00 5.3577416e+00 4.6571075e+00 4.4651793e+00 5.6630236e+00 4.6077729e+00 4.3065165e+00 4.5525984e+00 4.3873290e+00 4.0638414e+00 4.8447047e+00 4.6322938e+00 5.2676175e+00 4.7796156e+00 4.9133278e+00 4.3194697e+00 4.7202167e+00 5.4208419e+00 4.2794874e+00 4.1728215e+00 4.0165591e+00 4.5422714e+00 4.7447407e+00 4.6112705e+00 4.6323449e+00 4.6754921e+00 4.6293227e+00 4.6871898e+00 5.0428586e+00 4.3953592e+00 4.1024574e+00 4.0848110e+00 3.3742167e-01 1.1857824e+00 0.0000000e+00 6.6918102e-01 7.4445830e-01 9.7322023e-01 1.9284841e+00 6.6918102e-01 1.1393372e+00 1.6942803e+00 3.7371902e-01 1.6386105e+00 4.5257749e-01 1.4715172e+00 4.9772204e-01 3.5602797e+00 3.0968979e+00 3.5933352e+00 2.8998722e+00 3.2656298e+00 2.6029746e+00 3.1974447e+00 2.2445103e+00 3.1601923e+00 2.3946216e+00 3.0209665e+00 2.6867317e+00 3.0775658e+00 2.9161244e+00 2.1946278e+00 3.2137635e+00 2.6502891e+00 2.3652711e+00 3.6096140e+00 2.4893065e+00 3.1578003e+00 2.6568473e+00 3.4426472e+00 2.8187901e+00 2.9128857e+00 3.1418202e+00 3.4847097e+00 3.6205958e+00 2.8694312e+00 2.2223283e+00 2.5588203e+00 2.4651138e+00 2.4413293e+00 3.2621861e+00 2.5834128e+00 2.9995857e+00 3.3733791e+00 3.3817876e+00 2.3263008e+00 2.6305758e+00 2.5756071e+00 2.8533918e+00 2.5683063e+00 2.4187322e+00 2.5258216e+00 2.3250308e+00 2.4413618e+00 2.7691536e+00 2.1006933e+00 2.4608850e+00 4.4119896e+00 3.4290419e+00 4.4942656e+00 3.6650933e+00 4.1590662e+00 5.0899438e+00 3.0333619e+00 4.5799469e+00 4.1882413e+00 5.0726081e+00 3.7613721e+00 3.7859992e+00 4.1623715e+00 3.6029758e+00 3.8608065e+00 4.0352348e+00 3.7266849e+00 5.5043247e+00 5.5172732e+00 3.6569442e+00 4.4568115e+00 3.3324016e+00 5.1765224e+00 3.5192065e+00 4.1799642e+00 4.3857400e+00 3.3769078e+00 3.2906843e+00 4.0034548e+00 4.1917183e+00 4.6854597e+00 5.4444866e+00 4.0904298e+00 3.2962678e+00 3.4250783e+00 5.1569386e+00 4.2213314e+00 3.6582637e+00 3.2059261e+00 4.1937040e+00 4.3826532e+00 4.2786320e+00 3.4290419e+00 4.4549850e+00 4.5270304e+00 4.1816268e+00 3.7777251e+00 3.7924144e+00 4.0173731e+00 3.2613828e+00 9.2006504e-01 3.3742167e-01 8.6080744e-01 5.0621589e-01 7.0646671e-01 2.1664244e+00 7.2679299e-01 8.9712099e-01 1.4681660e+00 5.4873947e-01 1.4074199e+00 4.9617437e-01 1.2206236e+00 2.5698045e-01 3.4986942e+00 3.0427234e+00 3.5520531e+00 3.0444864e+00 3.2783159e+00 2.6723101e+00 3.1333743e+00 2.4360210e+00 3.1627463e+00 2.4904253e+00 3.2338077e+00 2.6808015e+00 3.2240677e+00 2.9412073e+00 2.2206950e+00 3.1669559e+00 2.6716144e+00 2.4623998e+00 3.7173707e+00 2.6198784e+00 3.1208539e+00 2.6854361e+00 3.5171200e+00 2.8753360e+00 2.9157449e+00 3.1157529e+00 3.4945103e+00 3.5956983e+00 2.8873581e+00 2.3297122e+00 2.7075732e+00 2.6220688e+00 2.5143128e+00 3.3225169e+00 2.6164571e+00 2.9213819e+00 3.3323415e+00 3.4842326e+00 2.3487772e+00 2.7507828e+00 2.6991998e+00 2.8571158e+00 2.6614903e+00 2.6122501e+00 2.6121387e+00 2.3527202e+00 2.4822998e+00 2.7826627e+00 2.2477567e+00 2.5188545e+00 4.3590737e+00 3.4800724e+00 4.4652192e+00 3.6820633e+00 4.1423404e+00 5.0632361e+00 3.1594575e+00 4.5764429e+00 4.2442248e+00 4.9703970e+00 3.7054123e+00 3.8144340e+00 4.1322520e+00 3.6772717e+00 3.8704422e+00 3.9786899e+00 3.7187193e+00 5.3973273e+00 5.5276243e+00 3.7838435e+00 4.3978718e+00 3.3669786e+00 5.1732410e+00 3.5478651e+00 4.1195682e+00 4.3422158e+00 3.3925731e+00 3.2818178e+00 4.0157845e+00 4.1753467e+00 4.6824968e+00 5.3318394e+00 4.0983108e+00 3.3321312e+00 3.5171976e+00 5.1116177e+00 4.1480571e+00 3.6395566e+00 3.1983718e+00 4.1451783e+00 4.3355345e+00 4.2161052e+00 3.4800724e+00 4.4037157e+00 4.4559384e+00 4.1399085e+00 3.8303307e+00 3.7678377e+00 3.9434740e+00 3.2672961e+00 1.1857824e+00 1.7590894e+00 5.4715569e-01 6.1787077e-01 3.0224093e+00 1.4977817e+00 8.1744862e-01 9.4676850e-01 1.4369223e+00 8.6079202e-01 1.2896554e+00 5.3286499e-01 7.6195008e-01 3.1536923e+00 2.8019556e+00 3.2823965e+00 3.4754319e+00 3.2348803e+00 2.8339836e+00 2.8678686e+00 3.0569237e+00 3.0422163e+00 2.8599505e+00 3.8711727e+00 2.6769819e+00 3.5769114e+00 2.9369339e+00 2.3887701e+00 2.9172679e+00 2.7450499e+00 2.6744413e+00 3.9868196e+00 2.9825819e+00 3.0070640e+00 2.7478281e+00 3.6492544e+00 2.9260177e+00 2.8398296e+00 2.9417237e+00 3.3879693e+00 3.4191352e+00 2.9103957e+00 2.6495864e+00 3.1359829e+00 3.0635119e+00 2.7246726e+00 3.4310966e+00 2.7450499e+00 2.6593850e+00 3.0929063e+00 3.7090709e+00 2.4372581e+00 3.1206171e+00 3.0186562e+00 2.7973689e+00 2.9151879e+00 3.2261028e+00 2.8631702e+00 2.4096686e+00 2.5984928e+00 2.7564382e+00 2.8056103e+00 2.6945402e+00 4.1622883e+00 3.6243461e+00 4.2495237e+00 3.6308369e+00 4.0200378e+00 4.7940036e+00 3.6050689e+00 4.3664479e+00 4.2800934e+00 4.5553898e+00 3.4840330e+00 3.8323637e+00 3.9571437e+00 3.9163572e+00 3.9605759e+00 3.7909588e+00 3.5846709e+00 4.8756388e+00 5.3862972e+00 4.0807979e+00 4.1385728e+00 3.5138167e+00 4.9592897e+00 3.5910983e+00 3.8379532e+00 4.0230039e+00 3.4134019e+00 3.2269518e+00 3.9906410e+00 3.9261146e+00 4.4982182e+00 4.7746016e+00 4.0736767e+00 3.3286256e+00 3.6393910e+00 4.8333249e+00 3.9033387e+00 3.4776516e+00 3.1661860e+00 3.9124707e+00 4.1473618e+00 3.9899548e+00 3.6243461e+00 4.1607944e+00 4.1969547e+00 3.9859042e+00 3.9511939e+00 3.6382505e+00 3.7066628e+00 3.2552942e+00 6.6918102e-01 7.4445830e-01 9.7322023e-01 1.9284841e+00 6.6918102e-01 1.1393372e+00 1.6942803e+00 3.7371902e-01 1.6386105e+00 4.5257749e-01 1.4715172e+00 4.9772204e-01 3.5602797e+00 3.0968979e+00 3.5933352e+00 2.8998722e+00 3.2656298e+00 2.6029746e+00 3.1974447e+00 2.2445103e+00 3.1601923e+00 2.3946216e+00 3.0209665e+00 2.6867317e+00 3.0775658e+00 2.9161244e+00 2.1946278e+00 3.2137635e+00 2.6502891e+00 2.3652711e+00 3.6096140e+00 2.4893065e+00 3.1578003e+00 2.6568473e+00 3.4426472e+00 2.8187901e+00 2.9128857e+00 3.1418202e+00 3.4847097e+00 3.6205958e+00 2.8694312e+00 2.2223283e+00 2.5588203e+00 2.4651138e+00 2.4413293e+00 3.2621861e+00 2.5834128e+00 2.9995857e+00 3.3733791e+00 3.3817876e+00 2.3263008e+00 2.6305758e+00 2.5756071e+00 2.8533918e+00 2.5683063e+00 2.4187322e+00 2.5258216e+00 2.3250308e+00 2.4413618e+00 2.7691536e+00 2.1006933e+00 2.4608850e+00 4.4119896e+00 3.4290419e+00 4.4942656e+00 3.6650933e+00 4.1590662e+00 5.0899438e+00 3.0333619e+00 4.5799469e+00 4.1882413e+00 5.0726081e+00 3.7613721e+00 3.7859992e+00 4.1623715e+00 3.6029758e+00 3.8608065e+00 4.0352348e+00 3.7266849e+00 5.5043247e+00 5.5172732e+00 3.6569442e+00 4.4568115e+00 3.3324016e+00 5.1765224e+00 3.5192065e+00 4.1799642e+00 4.3857400e+00 3.3769078e+00 3.2906843e+00 4.0034548e+00 4.1917183e+00 4.6854597e+00 5.4444866e+00 4.0904298e+00 3.2962678e+00 3.4250783e+00 5.1569386e+00 4.2213314e+00 3.6582637e+00 3.2059261e+00 4.1937040e+00 4.3826532e+00 4.2786320e+00 3.4290419e+00 4.4549850e+00 4.5270304e+00 4.1816268e+00 3.7777251e+00 3.7924144e+00 4.0173731e+00 3.2613828e+00 1.2563834e+00 1.3681903e+00 1.6242170e+00 4.6126066e-01 1.4691503e+00 2.0743925e+00 5.0370871e-01 2.0365895e+00 5.2374483e-01 1.9494772e+00 1.0034646e+00 4.0320101e+00 3.4981749e+00 4.0289839e+00 2.9648238e+00 3.6116412e+00 2.8362334e+00 3.5807825e+00 2.1594400e+00 3.5619276e+00 2.4608850e+00 2.9150653e+00 2.9806848e+00 3.2524084e+00 3.2332049e+00 2.4351674e+00 3.6506644e+00 2.8794131e+00 2.6371069e+00 3.7842148e+00 2.6442387e+00 3.4386758e+00 2.9743384e+00 3.6958304e+00 3.1396988e+00 3.2947222e+00 3.5521670e+00 3.8756118e+00 3.9969338e+00 3.1587328e+00 2.4432066e+00 2.6604221e+00 2.5745994e+00 2.6880360e+00 3.4951118e+00 2.7657429e+00 3.3524671e+00 3.7924883e+00 3.6104716e+00 2.5876530e+00 2.7410968e+00 2.7238850e+00 3.1914275e+00 2.7871336e+00 2.3484202e+00 2.7125180e+00 2.6235600e+00 2.7012637e+00 3.1219910e+00 2.0888843e+00 2.6969064e+00 4.6820911e+00 3.5968849e+00 4.8607426e+00 3.9563487e+00 4.4501699e+00 5.4913616e+00 2.9743611e+00 4.9743359e+00 4.4659463e+00 5.4618868e+00 4.1043393e+00 4.0513907e+00 4.5016468e+00 3.7087600e+00 4.0024694e+00 4.3310092e+00 4.0611789e+00 5.9599083e+00 5.8632763e+00 3.7995759e+00 4.8081465e+00 3.4697005e+00 5.5699347e+00 3.7817852e+00 4.5398887e+00 4.8101536e+00 3.6425657e+00 3.5739550e+00 4.2642554e+00 4.6155808e+00 5.0696209e+00 5.9318766e+00 4.3420618e+00 3.6079861e+00 3.6711700e+00 5.5546786e+00 4.5127918e+00 3.9935485e+00 3.4733020e+00 4.5569739e+00 4.6922818e+00 4.6236700e+00 3.5968849e+00 4.7939394e+00 4.8471780e+00 4.4913725e+00 3.9942507e+00 4.1085491e+00 4.3067090e+00 3.5093006e+00 3.1271814e-01 2.6440626e+00 9.6964683e-01 5.8796666e-01 9.8545402e-01 1.0013399e+00 9.2426065e-01 7.6195008e-01 7.3283576e-01 2.6643250e-01 3.3524935e+00 2.9103383e+00 3.4378505e+00 3.2794093e+00 3.2805279e+00 2.7218307e+00 2.9677934e+00 2.7417113e+00 3.1265865e+00 2.6350666e+00 3.5810092e+00 2.6509959e+00 3.4564670e+00 2.9240118e+00 2.2778214e+00 3.0636652e+00 2.6473174e+00 2.5673371e+00 3.9008585e+00 2.8131786e+00 3.0066012e+00 2.7310040e+00 3.6088006e+00 2.8947302e+00 2.8966017e+00 3.0506196e+00 3.4785143e+00 3.5188926e+00 2.8816131e+00 2.5125550e+00 2.9397900e+00 2.8645995e+00 2.6245231e+00 3.3639057e+00 2.6028733e+00 2.7271822e+00 3.2253861e+00 3.6489825e+00 2.3376510e+00 2.9371604e+00 2.8382974e+00 2.8051321e+00 2.8006021e+00 2.9309089e+00 2.7184808e+00 2.3312464e+00 2.5047936e+00 2.7731354e+00 2.5341497e+00 2.5862794e+00 4.2119757e+00 3.5278864e+00 4.3705395e+00 3.6366115e+00 4.0640736e+00 4.9516709e+00 3.3348040e+00 4.4927271e+00 4.2867968e+00 4.7459453e+00 3.5773144e+00 3.8303307e+00 4.0501276e+00 3.7856794e+00 3.8862115e+00 3.8584574e+00 3.6392860e+00 5.1247727e+00 5.4955777e+00 3.9594564e+00 4.2633399e+00 3.3993739e+00 5.1011948e+00 3.5798230e+00 3.9561606e+00 4.1885925e+00 3.4019138e+00 3.2277183e+00 3.9971830e+00 4.0727212e+00 4.6247900e+00 5.0557045e+00 4.0800856e+00 3.3285999e+00 3.5685643e+00 5.0078455e+00 3.9761694e+00 3.5324648e+00 3.1505332e+00 4.0358238e+00 4.2334406e+00 4.1156691e+00 3.5278864e+00 4.2682695e+00 4.3053166e+00 4.0686429e+00 3.9122205e+00 3.6972894e+00 3.7712394e+00 3.2160302e+00 2.8326674e+00 1.0103954e+00 4.2829723e-01 7.9126749e-01 1.1795364e+00 7.3442235e-01 8.5582452e-01 6.1158310e-01 4.8284931e-01 3.4789406e+00 3.0164286e+00 3.5708825e+00 3.4760111e+00 3.4435701e+00 2.8856827e+00 3.0483404e+00 2.9286177e+00 3.2959553e+00 2.7769569e+00 3.7899650e+00 2.7721706e+00 3.6919547e+00 3.0773843e+00 2.4199353e+00 3.1984671e+00 2.7597977e+00 2.7743820e+00 4.1049898e+00 3.0189963e+00 3.0754044e+00 2.9033794e+00 3.7972498e+00 3.0781443e+00 3.0628742e+00 3.1980682e+00 3.6529321e+00 3.6479042e+00 3.0224061e+00 2.7237896e+00 3.1475538e+00 3.0809334e+00 2.8106441e+00 3.5217353e+00 2.7064382e+00 2.7753422e+00 3.3543209e+00 3.8636687e+00 2.4678110e+00 3.1212622e+00 3.0250044e+00 2.9444840e+00 2.9956969e+00 3.1281937e+00 2.8892226e+00 2.4772050e+00 2.6547819e+00 2.9364676e+00 2.7130802e+00 2.7488632e+00 4.2524457e+00 3.6566905e+00 4.4856620e+00 3.7659004e+00 4.1610154e+00 5.0768465e+00 3.4623926e+00 4.6393183e+00 4.4611186e+00 4.7773180e+00 3.6552032e+00 3.9746118e+00 4.1574253e+00 3.9234139e+00 3.9686223e+00 3.9172103e+00 3.7603950e+00 5.1648090e+00 5.6463490e+00 4.1614237e+00 4.3393711e+00 3.5009132e+00 5.2503936e+00 3.7276020e+00 4.0260707e+00 4.3007127e+00 3.5361708e+00 3.3347521e+00 4.1191095e+00 4.2183675e+00 4.7752353e+00 5.1049558e+00 4.1955154e+00 3.4902431e+00 3.7536489e+00 5.1215318e+00 4.0036288e+00 3.6385337e+00 3.2536517e+00 4.1326097e+00 4.3100988e+00 4.1978681e+00 3.6566905e+00 4.3438151e+00 4.3538983e+00 4.1591001e+00 4.0714399e+00 3.8024850e+00 3.7974783e+00 3.3185266e+00 2.0833080e+00 2.8648636e+00 3.5532593e+00 1.6555341e+00 3.5410343e+00 2.0840787e+00 3.3747131e+00 2.3883072e+00 4.3833871e+00 3.9159762e+00 4.2941647e+00 2.3488350e+00 3.6240540e+00 2.9044888e+00 4.0815608e+00 1.5532921e+00 3.6825697e+00 2.4113529e+00 1.7998094e+00 3.2616916e+00 2.5529439e+00 3.3821744e+00 2.6637769e+00 3.9531209e+00 3.1831859e+00 2.5836708e+00 3.1669559e+00 2.2907828e+00 3.8684560e+00 3.0202406e+00 3.4019580e+00 3.1886068e+00 3.4332960e+00 3.7685816e+00 3.8803374e+00 4.1746389e+00 3.3102735e+00 2.2304221e+00 2.1489616e+00 2.0501444e+00 2.6225712e+00 3.4164974e+00 3.0901976e+00 3.9885258e+00 4.0802502e+00 3.0869095e+00 2.9336463e+00 2.3936974e+00 2.5327316e+00 3.4518638e+00 2.5837552e+00 1.5782205e+00 2.6521862e+00 2.9662386e+00 2.9040188e+00 3.2768109e+00 1.6628177e+00 2.7685987e+00 5.0448046e+00 3.5141919e+00 4.9824611e+00 4.0549341e+00 4.5981277e+00 5.5863508e+00 2.6647036e+00 5.0241554e+00 4.1998979e+00 5.9440534e+00 4.4432394e+00 3.9560997e+00 4.6422438e+00 3.4164839e+00 4.0005863e+00 4.6454886e+00 4.2390210e+00 6.5166388e+00 5.6880370e+00 3.1944389e+00 5.0789131e+00 3.4956324e+00 5.5310286e+00 3.6881384e+00 4.9152167e+00 5.0890922e+00 3.6527501e+00 3.7902440e+00 4.2540354e+00 4.7585941e+00 5.0389487e+00 6.4918086e+00 4.3280601e+00 3.6284588e+00 3.4969965e+00 5.6399353e+00 4.9671290e+00 4.2659568e+00 3.6994310e+00 4.7714894e+00 4.8963175e+00 4.8281202e+00 3.5141919e+00 5.0683437e+00 5.1871515e+00 4.6280145e+00 3.7055141e+00 4.2764028e+00 4.7873094e+00 3.7371542e+00 1.1433971e+00 1.6774310e+00 6.8299624e-01 1.6304499e+00 2.4808718e-01 1.5886765e+00 7.6250797e-01 4.0055392e+00 3.4676312e+00 4.0289839e+00 3.2391776e+00 3.6989507e+00 2.9466089e+00 3.5208636e+00 2.4804256e+00 3.6211670e+00 2.6281173e+00 3.2921089e+00 3.0161637e+00 3.5345457e+00 3.2983536e+00 2.5210242e+00 3.6506644e+00 2.9161244e+00 2.7938108e+00 4.0292846e+00 2.8755116e+00 3.4075988e+00 3.0797683e+00 3.8646774e+00 3.2397520e+00 3.3586779e+00 3.5819899e+00 3.9571013e+00 4.0234614e+00 3.2253861e+00 2.6519927e+00 2.9269738e+00 2.8491914e+00 2.8419330e+00 3.6148101e+00 2.8039428e+00 3.2558795e+00 3.7924883e+00 3.8389577e+00 2.6284424e+00 2.9648238e+00 2.9126202e+00 3.2245885e+00 2.9718548e+00 2.6864788e+00 2.8651003e+00 2.6637996e+00 2.7789114e+00 3.1894122e+00 2.3748697e+00 2.8127546e+00 4.6364269e+00 3.7133040e+00 4.8825792e+00 4.0097653e+00 4.4740109e+00 5.5106999e+00 3.1817279e+00 5.0169254e+00 4.6066522e+00 5.3636182e+00 4.0783379e+00 4.1550948e+00 4.5252166e+00 3.8770438e+00 4.0814269e+00 4.3063766e+00 4.0872895e+00 5.8336247e+00 5.9533030e+00 4.0437148e+00 4.7859703e+00 3.5604924e+00 5.6269403e+00 3.8926783e+00 4.4927794e+00 4.7879866e+00 3.7291513e+00 3.6035976e+00 4.3384511e+00 4.6385717e+00 5.1321867e+00 5.8049832e+00 4.4149501e+00 3.6953819e+00 3.8133051e+00 5.5737973e+00 4.4415093e+00 3.9935485e+00 3.5037963e+00 4.5569739e+00 4.6922818e+00 4.6236700e+00 3.7133040e+00 4.7716971e+00 4.8030835e+00 4.5149959e+00 4.1509766e+00 4.1343605e+00 4.2319568e+00 3.5394847e+00 7.6869104e-01 1.2471855e+00 8.7636491e-01 9.9981032e-01 7.8863556e-01 7.0733904e-01 3.2400577e+00 2.7256768e+00 3.3173156e+00 3.2734582e+00 3.1889456e+00 2.6198618e+00 2.7351941e+00 2.7665224e+00 3.0627699e+00 2.5021229e+00 3.6608924e+00 2.4643905e+00 3.5457670e+00 2.8045035e+00 2.1368327e+00 2.9466857e+00 2.4386380e+00 2.5729082e+00 3.8963334e+00 2.8235662e+00 2.7242811e+00 2.6575342e+00 3.5598437e+00 2.8418228e+00 2.8206835e+00 2.9462527e+00 3.4233881e+00 3.3667899e+00 2.7322904e+00 2.5411273e+00 2.9638750e+00 2.9140871e+00 2.5797070e+00 3.2425969e+00 2.3780833e+00 2.4351544e+00 3.0892387e+00 3.6720185e+00 2.1688001e+00 2.8939857e+00 2.7945402e+00 2.6616170e+00 2.7767058e+00 2.9769851e+00 2.6347557e+00 2.1986118e+00 2.3753308e+00 2.6828901e+00 2.5283291e+00 2.4839186e+00 3.8851614e+00 3.3427747e+00 4.1909067e+00 3.4628626e+00 3.8305140e+00 4.8043725e+00 3.1800308e+00 4.3815267e+00 4.2038608e+00 4.4513681e+00 3.3256952e+00 3.6826282e+00 3.8475722e+00 3.6210755e+00 3.6107649e+00 3.5632387e+00 3.4596612e+00 4.8847297e+00 5.3781989e+00 3.9435460e+00 4.0131264e+00 3.1614358e+00 4.9957986e+00 3.4408340e+00 3.7000441e+00 4.0284550e+00 3.2354442e+00 3.0107962e+00 3.8036057e+00 3.9713385e+00 4.5180638e+00 4.8486868e+00 3.8729425e+00 3.2243808e+00 3.5087561e+00 4.8402519e+00 3.6359784e+00 3.3268022e+00 2.9240118e+00 3.8232660e+00 3.9709252e+00 3.8746323e+00 3.3427747e+00 4.0131316e+00 4.0031779e+00 3.8300809e+00 3.7945557e+00 3.4841580e+00 3.4283673e+00 2.9863682e+00 1.9060194e+00 3.1239235e-01 1.5583422e+00 4.8124784e-01 1.2220171e+00 3.3785962e+00 2.9374280e+00 3.5071305e+00 3.8740792e+00 3.5491790e+00 3.0669573e+00 2.9018296e+00 3.4251046e+00 3.3648459e+00 3.0744865e+00 4.3230411e+00 2.8485664e+00 4.1027662e+00 3.1626116e+00 2.6442554e+00 3.1724373e+00 2.8315630e+00 3.0534300e+00 4.4306138e+00 3.3882074e+00 2.9857887e+00 3.0959220e+00 4.0072094e+00 3.2240677e+00 3.1644964e+00 3.2264712e+00 3.7352584e+00 3.6230125e+00 3.1206853e+00 3.1023948e+00 3.5580276e+00 3.5096299e+00 3.0877778e+00 3.6577352e+00 2.7900553e+00 2.5838366e+00 3.3069107e+00 4.1792641e+00 2.5911812e+00 3.4684046e+00 3.3165070e+00 2.9868392e+00 3.2999163e+00 3.6373219e+00 3.1449351e+00 2.5937039e+00 2.8148578e+00 3.0518873e+00 3.1967428e+00 2.9647081e+00 4.0498613e+00 3.7819451e+00 4.3976398e+00 3.7644678e+00 4.0879497e+00 4.9574979e+00 3.7577430e+00 4.5772252e+00 4.5796941e+00 4.4589650e+00 3.5295907e+00 4.0592075e+00 4.0919364e+00 4.1226882e+00 4.0237849e+00 3.7803562e+00 3.7136035e+00 4.7772875e+00 5.6344258e+00 4.4679358e+00 4.1805116e+00 3.6013971e+00 5.1936459e+00 3.8460802e+00 3.8293150e+00 4.1365715e+00 3.6263469e+00 3.3344860e+00 4.1404384e+00 4.1465376e+00 4.7500857e+00 4.7258629e+00 4.2123837e+00 3.5757376e+00 3.9028467e+00 5.0127222e+00 3.7704782e+00 3.5495399e+00 3.2637691e+00 4.0284560e+00 4.1958516e+00 4.1011034e+00 3.7819451e+00 4.1793948e+00 4.1561376e+00 4.1029254e+00 4.2472742e+00 3.7624633e+00 3.5705606e+00 3.3154318e+00 1.8882412e+00 5.3690447e-01 1.7295385e+00 7.4445830e-01 3.5842571e+00 3.0831073e+00 3.5905412e+00 2.6850208e+00 3.1920496e+00 2.4895611e+00 3.1874455e+00 1.9825106e+00 3.1280291e+00 2.1902526e+00 2.7631963e+00 2.5991209e+00 2.9183874e+00 2.8448970e+00 2.0635463e+00 3.2072460e+00 2.5480786e+00 2.2627580e+00 3.4383056e+00 2.3172372e+00 3.0909340e+00 2.5623863e+00 3.3194064e+00 2.7506751e+00 2.8644451e+00 3.1134606e+00 3.4405052e+00 3.5767292e+00 2.7771563e+00 2.0712834e+00 2.3618912e+00 2.2737657e+00 2.3098587e+00 3.1429166e+00 2.4666493e+00 2.9899331e+00 3.3598261e+00 3.2396917e+00 2.2342806e+00 2.4357274e+00 2.4181291e+00 2.7984743e+00 2.4231383e+00 2.1599940e+00 2.3764241e+00 2.2561857e+00 2.3387588e+00 2.7074009e+00 1.8390751e+00 2.3351007e+00 4.3436399e+00 3.2756710e+00 4.4477490e+00 3.5866422e+00 4.0782068e+00 5.0677790e+00 2.7922251e+00 4.5545199e+00 4.0836814e+00 5.0715858e+00 3.7130862e+00 3.6733277e+00 4.0983149e+00 3.4111720e+00 3.6933050e+00 3.9623038e+00 3.6711803e+00 5.5579136e+00 5.4498365e+00 3.4842014e+00 4.4103781e+00 3.1690867e+00 5.1441957e+00 3.3997317e+00 4.1528029e+00 4.3901202e+00 3.2630747e+00 3.2035560e+00 3.8955732e+00 4.1857725e+00 4.6435471e+00 5.5146776e+00 3.9762768e+00 3.2193184e+00 3.3255820e+00 5.1213824e+00 4.1678001e+00 3.6124079e+00 3.1107135e+00 4.1457358e+00 4.3076787e+00 4.2130787e+00 3.2756710e+00 4.4066811e+00 4.4713485e+00 4.0952473e+00 3.6289876e+00 3.7168749e+00 3.9644506e+00 3.1662754e+00 1.5140329e+00 3.3872939e-01 1.1649855e+00 3.5691650e+00 3.1595321e+00 3.7055656e+00 4.0160835e+00 3.7376603e+00 3.2592987e+00 3.1435650e+00 3.5370651e+00 3.5437638e+00 3.2591886e+00 4.4166410e+00 3.0676819e+00 4.2127293e+00 3.3654329e+00 2.8346837e+00 3.3660906e+00 3.0613575e+00 3.2026716e+00 4.5808841e+00 3.5275705e+00 3.2454510e+00 3.2718756e+00 4.1819826e+00 3.4031279e+00 3.3454884e+00 3.4170637e+00 3.9109404e+00 3.8358967e+00 3.3305911e+00 3.2315455e+00 3.6883726e+00 3.6296117e+00 3.2506700e+00 3.8623185e+00 3.0230066e+00 2.8458833e+00 3.5111771e+00 4.3201594e+00 2.8024863e+00 3.6263297e+00 3.4825375e+00 3.1978058e+00 3.4556048e+00 3.7429398e+00 3.3240937e+00 2.7959976e+00 3.0137031e+00 3.2391776e+00 3.3180586e+00 3.1510639e+00 4.3279818e+00 4.0059487e+00 4.6233424e+00 3.9929211e+00 4.3355275e+00 5.1718231e+00 3.9512216e+00 4.7810147e+00 4.7732950e+00 4.7150495e+00 3.7777251e+00 4.2731987e+00 4.3246862e+00 4.3347988e+00 4.2753666e+00 4.0433740e+00 3.9425600e+00 5.0081332e+00 5.8406251e+00 4.6274156e+00 4.4284929e+00 3.8398842e+00 5.3940263e+00 4.0533472e+00 4.0818092e+00 4.3543675e+00 3.8429689e+00 3.5715666e+00 4.3728103e+00 4.3436347e+00 4.9498040e+00 4.9393850e+00 4.4487185e+00 3.7756717e+00 4.0901950e+00 5.2287042e+00 4.0497882e+00 3.7884247e+00 3.5028854e+00 4.2624115e+00 4.4485334e+00 4.3403092e+00 4.0059487e+00 4.4317896e+00 4.4210531e+00 4.3442496e+00 4.4457375e+00 3.9985746e+00 3.8504489e+00 3.5592028e+00 1.4309353e+00 5.3528567e-01 3.7908776e+00 3.2731841e+00 3.8215973e+00 3.1206853e+00 3.5080970e+00 2.7892862e+00 3.3363505e+00 2.4070515e+00 3.4174587e+00 2.5169099e+00 3.2261716e+00 2.8456035e+00 3.3834513e+00 3.1193847e+00 2.3599428e+00 3.4420978e+00 2.7676217e+00 2.6211360e+00 3.8782821e+00 2.7318604e+00 3.2516765e+00 2.8950591e+00 3.6956240e+00 3.0573546e+00 3.1595622e+00 3.3778208e+00 3.7543715e+00 3.8301935e+00 3.0538048e+00 2.4889600e+00 2.7975756e+00 2.7172725e+00 2.6747723e+00 3.4570094e+00 2.6710885e+00 3.0859941e+00 3.5894820e+00 3.6730945e+00 2.4678645e+00 2.8348872e+00 2.7756196e+00 3.0423378e+00 2.8112845e+00 2.6077230e+00 2.7173555e+00 2.4925318e+00 2.6151930e+00 2.9985224e+00 2.2768387e+00 2.6523384e+00 4.4886181e+00 3.5762214e+00 4.6936725e+00 3.8412437e+00 4.3086181e+00 5.3124523e+00 3.1125049e+00 4.8185220e+00 4.4330566e+00 5.1853832e+00 3.9019516e+00 3.9878172e+00 4.3438773e+00 3.7545919e+00 3.9571174e+00 4.1452084e+00 3.9080206e+00 5.6409887e+00 5.7635531e+00 3.9041153e+00 4.6071696e+00 3.4361835e+00 5.4269728e+00 3.7248960e+00 4.3153491e+00 4.5881410e+00 3.5627565e+00 3.4386758e+00 4.1762134e+00 4.4334431e+00 4.9338087e+00 5.6026788e+00 4.2556298e+00 3.5163766e+00 3.6516985e+00 5.3754384e+00 4.2899814e+00 3.8175194e+00 3.3436392e+00 4.3710164e+00 4.5233951e+00 4.4426760e+00 3.5762214e+00 4.5972906e+00 4.6375405e+00 4.3421746e+00 3.9932553e+00 3.9596590e+00 4.0813696e+00 3.3867904e+00 9.9272943e-01 3.3624661e+00 2.9811147e+00 3.5018936e+00 3.8169093e+00 3.5209477e+00 3.0838698e+00 2.9939885e+00 3.3707744e+00 3.3216375e+00 3.1074562e+00 4.2293025e+00 2.8939100e+00 3.9735251e+00 3.1779321e+00 2.6513305e+00 3.1539115e+00 2.9206559e+00 2.9923096e+00 4.3522137e+00 3.3213891e+00 3.1222272e+00 3.0540027e+00 3.9664528e+00 3.2012517e+00 3.1248526e+00 3.2007609e+00 3.6824267e+00 3.6418210e+00 3.1482435e+00 3.0088381e+00 3.4838907e+00 3.4206811e+00 3.0415159e+00 3.6826470e+00 2.9006138e+00 2.7293873e+00 3.3112277e+00 4.0819926e+00 2.6432089e+00 3.4355346e+00 3.3034492e+00 3.0164601e+00 3.2442582e+00 3.5631690e+00 3.1415769e+00 2.6264645e+00 2.8384396e+00 3.0300747e+00 3.1354811e+00 2.9649167e+00 4.2304738e+00 3.8482047e+00 4.4439318e+00 3.8285351e+00 4.1849277e+00 4.9874125e+00 3.8271288e+00 4.5862820e+00 4.5664639e+00 4.6040986e+00 3.6270951e+00 4.0846199e+00 4.1503558e+00 4.1702140e+00 4.1407964e+00 3.9121183e+00 3.7737683e+00 4.8997002e+00 5.6369181e+00 4.4180349e+00 4.2780098e+00 3.7035075e+00 5.1920494e+00 3.8582580e+00 3.9463952e+00 4.1826549e+00 3.6583100e+00 3.4129797e+00 4.2036908e+00 4.1443501e+00 4.7432980e+00 4.8153136e+00 4.2825977e+00 3.5821023e+00 3.9040345e+00 5.0374022e+00 3.9556226e+00 3.6351652e+00 3.3487998e+00 4.0905154e+00 4.2992014e+00 4.1693135e+00 3.8482047e+00 4.2897273e+00 4.2963449e+00 4.1754173e+00 4.2443816e+00 3.8297356e+00 3.7573406e+00 3.4190329e+00 3.4434283e+00 2.9833205e+00 3.5091456e+00 3.1516030e+00 3.2866494e+00 2.6849207e+00 3.0541761e+00 2.5654361e+00 3.1545670e+00 2.5403244e+00 3.3918434e+00 2.6608343e+00 3.3413616e+00 2.9302185e+00 2.2379234e+00 3.1290373e+00 2.6442995e+00 2.5077372e+00 3.8111267e+00 2.7069456e+00 3.0566678e+00 2.7098649e+00 3.5644689e+00 2.8826061e+00 2.9134932e+00 3.0944404e+00 3.4986589e+00 3.5664548e+00 2.8806273e+00 2.4158569e+00 2.8131786e+00 2.7333339e+00 2.5637470e+00 3.3370594e+00 2.5885603e+00 2.8220110e+00 3.2904739e+00 3.5710204e+00 2.3287218e+00 2.8315630e+00 2.7529707e+00 2.8293212e+00 2.7254528e+00 2.7527251e+00 2.6524994e+00 2.3299431e+00 2.4822440e+00 2.7803033e+00 2.3731496e+00 2.5423572e+00 4.2830420e+00 3.4939592e+00 4.4286678e+00 3.6575173e+00 4.1044791e+00 5.0220848e+00 3.2200733e+00 4.5468375e+00 4.2700249e+00 4.8698851e+00 3.6462342e+00 3.8237490e+00 4.0990020e+00 3.7208580e+00 3.8692104e+00 3.9203597e+00 3.6817365e+00 5.2775206e+00 5.5250867e+00 3.8676958e+00 4.3392543e+00 3.3693781e+00 5.1524083e+00 3.5650934e+00 4.0437994e+00 4.2783342e+00 3.3968462e+00 3.2517273e+00 4.0065881e+00 4.1377875e+00 4.6677357e+00 5.2142247e+00 4.0893000e+00 3.3307003e+00 3.5369003e+00 5.0771896e+00 4.0613196e+00 3.5869628e+00 3.1695162e+00 4.1006440e+00 4.2899814e+00 4.1769447e+00 3.4939592e+00 4.3422191e+00 4.3859843e+00 4.1114107e+00 3.8721945e+00 3.7365034e+00 3.8554667e+00 3.2330990e+00 7.4500632e-01 3.1271814e-01 2.7864553e+00 1.1117653e+00 1.8291315e+00 9.1459005e-01 3.2771828e+00 8.5582452e-01 2.5020950e+00 3.7722922e+00 1.4404415e+00 2.6850561e+00 1.2884095e+00 1.9346765e+00 4.6190224e-01 1.7610224e+00 1.9545276e+00 2.5064746e+00 2.4134734e+00 1.4291842e+00 1.4855627e+00 1.8305602e+00 1.4494865e+00 1.0355160e+00 6.8921053e-01 9.5529726e-01 7.2626021e-01 1.4025367e+00 2.2620298e+00 2.6646285e+00 2.6984190e+00 1.9245986e+00 1.7053476e+00 1.9940477e+00 1.3238834e+00 4.4901474e-01 2.2514668e+00 1.7899689e+00 2.4621620e+00 2.3008240e+00 1.1820572e+00 2.0593667e+00 3.3235867e+00 2.0701817e+00 1.6811909e+00 1.7438018e+00 1.2168151e+00 2.9923086e+00 1.8570167e+00 1.8407084e+00 1.9774894e+00 1.2374249e+00 1.3146181e+00 1.4369760e+00 1.6548985e+00 3.0339990e+00 1.3065206e+00 1.8441719e+00 1.9017153e+00 1.0169098e+00 1.5490835e+00 1.1480416e+00 2.3912363e+00 2.1724397e+00 1.4252775e+00 1.0284221e+00 2.2390158e+00 2.3611228e+00 2.6121814e+00 1.3139868e+00 2.0833701e+00 1.8624251e+00 1.5270661e+00 1.1605968e+00 9.3589904e-01 1.4360842e+00 1.2967707e+00 1.5740302e+00 8.5349066e-01 1.4639724e+00 2.1546616e+00 1.6538197e+00 1.2783641e+00 1.8320269e+00 1.7168897e+00 1.7042715e+00 1.0288355e+00 1.3960908e+00 1.0327124e+00 1.4702446e+00 1.2287925e+00 1.9774894e+00 1.3826233e+00 1.6072393e+00 1.3472497e+00 1.9439880e+00 1.1295026e+00 1.6414265e+00 1.5177322e+00 6.8496652e-01 2.3745921e+00 9.3211669e-01 1.2784093e+00 3.1271814e-01 2.7526949e+00 7.8034610e-01 1.8874930e+00 3.3568278e+00 7.7863029e-01 2.4620981e+00 7.9999102e-01 1.3194463e+00 4.5257749e-01 1.0705713e+00 1.5282070e+00 2.3189157e+00 1.9824340e+00 7.4029244e-01 1.0636179e+00 1.6347187e+00 1.0722301e+00 7.4849274e-01 5.3988754e-01 1.0632334e+00 7.0213871e-01 8.4383266e-01 1.8384560e+00 2.2399960e+00 2.2847962e+00 1.4577178e+00 1.3022697e+00 1.2927254e+00 6.8064066e-01 4.4417668e-01 2.0964002e+00 1.1252542e+00 1.9840858e+00 1.8038514e+00 6.0365341e-01 1.6354514e+00 2.8387736e+00 1.5364600e+00 1.0539473e+00 1.1361809e+00 7.8649633e-01 2.4634199e+00 1.2983546e+00 1.5835083e+00 1.4983760e+00 1.4748100e+00 1.0180846e+00 1.2694582e+00 2.0850659e+00 2.4405647e+00 1.6897529e+00 1.8533655e+00 2.0793529e+00 7.4797652e-01 1.3453828e+00 1.1770444e+00 1.9571621e+00 1.6977813e+00 1.1421260e+00 8.3850424e-01 2.6029823e+00 2.7071356e+00 2.3733266e+00 1.3878105e+00 1.5050081e+00 2.3020930e+00 1.2450968e+00 1.1247714e+00 1.3455945e+00 1.0453799e+00 7.4157869e-01 1.3630233e+00 1.3061954e+00 1.8456576e+00 2.6048102e+00 1.4425815e+00 9.9058911e-01 1.5658693e+00 2.1444356e+00 1.4166079e+00 7.2727886e-01 7.9343577e-01 1.1384575e+00 1.4013840e+00 1.2776135e+00 1.4983760e+00 1.4006413e+00 1.5375255e+00 1.2650236e+00 1.7250893e+00 9.0221296e-01 1.2767751e+00 9.2060977e-01 2.5673851e+00 8.6079202e-01 1.6428179e+00 8.7623959e-01 3.1131006e+00 6.6453319e-01 2.3246810e+00 3.5720588e+00 1.2918836e+00 2.4857868e+00 1.0845006e+00 1.8135469e+00 3.9472619e-01 1.6028858e+00 1.8029164e+00 2.2526468e+00 2.2305704e+00 1.2920175e+00 1.3194463e+00 1.5620078e+00 1.2567627e+00 8.7273869e-01 5.3095950e-01 7.1671402e-01 4.2827238e-01 1.2022652e+00 2.1186438e+00 2.4755072e+00 2.5212188e+00 1.7582453e+00 1.4360884e+00 1.8400908e+00 1.3147484e+00 2.6680274e-01 2.0194508e+00 1.6709595e+00 2.2587909e+00 2.1030957e+00 1.0161846e+00 1.8732616e+00 3.1496799e+00 1.8819614e+00 1.5700887e+00 1.5933926e+00 1.0543640e+00 2.8415314e+00 1.6890927e+00 1.6862498e+00 1.7038925e+00 1.0251132e+00 1.0245496e+00 1.1781513e+00 1.5212572e+00 2.8050734e+00 1.1091494e+00 1.5452835e+00 1.9080234e+00 8.5359653e-01 1.2416734e+00 8.9528108e-01 2.1088803e+00 1.9097018e+00 1.2522193e+00 7.4612152e-01 2.3284654e+00 2.1556564e+00 2.3436971e+00 1.1651790e+00 1.8364691e+00 1.6976628e+00 1.2371704e+00 1.0463225e+00 8.5302032e-01 1.1623508e+00 1.0682140e+00 1.2723284e+00 6.7955751e-01 1.2572095e+00 2.2840066e+00 1.3572135e+00 1.0082548e+00 1.5613089e+00 1.5962380e+00 1.5974627e+00 7.9671887e-01 1.1799226e+00 8.3571552e-01 1.2674750e+00 1.0543826e+00 1.7038925e+00 1.2197800e+00 1.4811026e+00 1.1132425e+00 1.6485749e+00 8.6295295e-01 1.5402909e+00 1.2957413e+00 1.7240804e+00 1.2117746e+00 2.5620931e+00 9.4346743e-01 1.9481085e+00 1.0013399e+00 1.0383354e+00 1.7091506e+00 7.5651431e-01 1.6169211e+00 1.4074199e+00 2.3606801e+00 1.6642999e+00 1.0677270e+00 9.5748562e-01 5.4702555e-01 2.2752108e+00 1.3619011e+00 1.2144903e+00 1.4245490e+00 1.7677805e+00 2.1070188e+00 2.0042865e+00 2.3026776e+00 1.5583422e+00 8.7856768e-01 3.6704030e-01 4.8644514e-01 1.0013399e+00 1.3262124e+00 1.6642999e+00 2.6524441e+00 2.3938089e+00 9.9234874e-01 1.6199145e+00 4.6126066e-01 7.3978204e-01 1.8066960e+00 7.9191984e-01 8.2250769e-01 9.3727156e-01 1.6415483e+00 1.4092680e+00 1.6304499e+00 9.1432842e-01 1.1795364e+00 3.1638147e+00 1.4103498e+00 2.9322745e+00 2.0247895e+00 2.5487652e+00 3.5082844e+00 1.0453705e+00 2.9611604e+00 1.9449446e+00 4.1343567e+00 2.6451449e+00 1.7869811e+00 2.6253748e+00 1.1973458e+00 1.9817270e+00 2.7838011e+00 2.2840066e+00 4.7706180e+00 3.4576971e+00 8.9870984e-01 3.1324336e+00 1.5639220e+00 3.4016597e+00 1.5728443e+00 3.0734808e+00 3.1995683e+00 1.6368228e+00 1.9546803e+00 2.1052860e+00 2.8313078e+00 2.9375460e+00 4.8020420e+00 2.1735035e+00 1.6493848e+00 1.3576485e+00 3.5774884e+00 3.2045691e+00 2.3952974e+00 1.8988804e+00 2.8268459e+00 2.8989852e+00 2.8927951e+00 1.4103498e+00 3.1063891e+00 3.2893581e+00 2.6241058e+00 1.4441104e+00 2.3170196e+00 3.0817543e+00 1.9124815e+00 1.0026233e+00 1.1867923e+00 2.3572427e+00 3.6939647e-01 1.6408576e+00 2.7392425e+00 8.8835337e-01 1.6805749e+00 5.5399712e-01 1.2745081e+00 7.5303835e-01 1.1820572e+00 1.1301977e+00 1.4315442e+00 1.4464138e+00 1.2423523e+00 6.4626422e-01 7.5230154e-01 6.2536527e-01 4.0664863e-01 5.0731024e-01 4.0158746e-01 6.2543628e-01 6.4884272e-01 1.4014424e+00 1.6702453e+00 1.7317202e+00 1.0390957e+00 7.1781501e-01 1.4073416e+00 1.5165187e+00 7.3502408e-01 1.2122797e+00 1.2421878e+00 1.4565053e+00 1.3559191e+00 6.8064066e-01 1.0943185e+00 2.3628816e+00 1.1638514e+00 1.1627754e+00 1.0519629e+00 5.3106808e-01 2.1057450e+00 1.0403581e+00 1.9325284e+00 1.0596309e+00 1.3779504e+00 7.6633520e-01 1.2315180e+00 1.9698667e+00 2.0697950e+00 1.4385383e+00 1.0743031e+00 2.5609592e+00 1.1664463e+00 7.0702759e-01 1.1055841e+00 1.3757605e+00 1.4784016e+00 1.4566739e+00 7.9213303e-01 3.1107848e+00 2.2607358e+00 1.5267093e+00 1.6037239e+00 1.2804073e+00 1.9864213e+00 5.4310586e-01 1.5475402e+00 1.5328931e+00 5.4647209e-01 7.9343577e-01 9.7668596e-01 1.1862065e+00 1.4760549e+00 3.1060327e+00 1.0849536e+00 3.7234239e-01 8.8571256e-01 2.0333305e+00 1.9196784e+00 9.5289573e-01 8.6297946e-01 1.2392529e+00 1.4996752e+00 1.3752205e+00 1.0596309e+00 1.6198849e+00 1.8691588e+00 1.2188552e+00 9.2906468e-01 8.7042892e-01 1.8304530e+00 9.8621003e-01 1.4220241e+00 1.5496729e+00 1.1125144e+00 7.4201890e-01 2.1434858e+00 6.0719477e-01 1.5102780e+00 5.6262711e-01 5.7267643e-01 1.3990971e+00 5.4408162e-01 5.2316125e-01 1.5323598e+00 8.2317311e-01 1.1694177e+00 5.6003943e-01 1.0600958e+00 5.1318506e-01 8.8354057e-01 1.1892978e+00 1.3456285e+00 1.4234329e+00 5.0311426e-01 8.2976237e-01 1.0655776e+00 1.1267154e+00 4.4786319e-01 6.7424840e-01 6.4241342e-01 1.4834540e+00 1.4207811e+00 1.3630799e+00 5.2795793e-01 7.8571751e-01 5.3988754e-01 6.8299624e-01 5.6992880e-01 1.6313928e+00 3.1093967e-01 5.0876385e-01 2.8653046e-01 6.5622658e-01 1.3398293e+00 2.2670334e-01 2.2472150e+00 8.9528108e-01 2.1908074e+00 1.1815770e+00 1.7549185e+00 2.8271782e+00 1.2987661e+00 2.2927322e+00 1.7056353e+00 3.1591627e+00 1.6557083e+00 1.2615426e+00 1.8432274e+00 1.1833606e+00 1.4858600e+00 1.8676774e+00 1.3771658e+00 3.7547288e+00 3.1005581e+00 1.4815836e+00 2.2650937e+00 9.5252488e-01 2.8687133e+00 1.0290036e+00 2.0855599e+00 2.2987772e+00 9.0705646e-01 9.6267538e-01 1.4839640e+00 2.0473137e+00 2.3780534e+00 3.7918985e+00 1.5792523e+00 8.4221906e-01 9.2300698e-01 2.9301148e+00 2.2149712e+00 1.3941954e+00 8.9564079e-01 1.9843979e+00 2.0984088e+00 2.1003345e+00 8.9528108e-01 2.2276119e+00 2.3923111e+00 1.8829565e+00 1.3046645e+00 1.4645285e+00 2.0631582e+00 9.0331700e-01 2.9007820e+00 1.0677270e+00 1.9884030e+00 3.5404087e+00 8.9973730e-01 2.7097598e+00 9.8896933e-01 1.4521880e+00 7.3735391e-01 1.1060455e+00 1.7358574e+00 2.5457091e+00 2.1802781e+00 5.9868400e-01 1.3038475e+00 1.8531597e+00 1.2895008e+00 1.0351584e+00 8.4116354e-01 1.3290015e+00 8.7070822e-01 1.0061402e+00 2.0523180e+00 2.4354079e+00 2.4861842e+00 1.6612475e+00 1.4482752e+00 1.3000067e+00 4.4417668e-01 6.8064066e-01 2.3457352e+00 1.2097457e+00 2.1562626e+00 1.9604379e+00 7.8034610e-01 1.8447318e+00 3.0052273e+00 1.6924215e+00 1.1656549e+00 1.2692102e+00 1.0351584e+00 2.6195047e+00 1.4577178e+00 1.3905452e+00 1.5765058e+00 1.5178511e+00 1.0862363e+00 1.2425116e+00 2.1288976e+00 2.5085097e+00 1.7889699e+00 2.0235792e+00 1.9184220e+00 6.6154242e-01 1.4831058e+00 1.2157849e+00 2.0573833e+00 1.6866006e+00 1.0122929e+00 9.0072498e-01 2.4680266e+00 2.8037035e+00 2.5716465e+00 1.3193736e+00 1.5270661e+00 2.3974481e+00 1.4129334e+00 9.9186850e-01 1.3586792e+00 1.1900564e+00 7.8649633e-01 1.4261046e+00 1.4313173e+00 1.9693923e+00 2.5032449e+00 1.4908532e+00 1.1825071e+00 1.7301809e+00 2.1927243e+00 1.1883807e+00 7.0823896e-01 8.2574748e-01 1.1508346e+00 1.3435624e+00 1.2769092e+00 1.5765058e+00 1.3121204e+00 1.3947465e+00 1.2781560e+00 1.8941016e+00 9.4449485e-01 1.0327124e+00 9.1221028e-01 2.4983699e+00 1.0001615e+00 9.3727156e-01 2.0156046e+00 1.4610933e+00 2.0818555e+00 1.4925824e+00 2.8275182e+00 1.8765007e+00 1.3658611e+00 1.8892371e+00 9.4900087e-01 2.5853758e+00 1.8063869e+00 2.0403844e+00 1.9103325e+00 2.2554051e+00 2.6063293e+00 2.6670659e+00 2.8999367e+00 1.9965452e+00 1.0765554e+00 7.8898008e-01 7.5921691e-01 1.3580560e+00 1.9753995e+00 1.7807988e+00 2.8573306e+00 2.8966014e+00 1.8587118e+00 1.7290357e+00 9.4346743e-01 1.0932187e+00 2.1982921e+00 1.2728402e+00 2.6033464e-01 1.2680818e+00 1.7824360e+00 1.6364088e+00 2.0664368e+00 3.9699460e-01 1.4644159e+00 3.6567369e+00 2.0227452e+00 3.6362580e+00 2.6431572e+00 3.1825084e+00 4.2569960e+00 1.1649315e+00 3.7040279e+00 2.8079889e+00 4.6643094e+00 3.1456885e+00 2.5368652e+00 3.2881355e+00 1.9057424e+00 2.5373943e+00 3.2972877e+00 2.8812938e+00 5.2957248e+00 4.3256332e+00 1.8261865e+00 3.7402681e+00 2.0260681e+00 4.2089146e+00 2.2931022e+00 3.6001832e+00 3.8156954e+00 2.2665638e+00 2.4364115e+00 2.8123267e+00 3.5007685e+00 3.7249152e+00 5.3249013e+00 2.8816817e+00 2.2758405e+00 2.0704519e+00 4.3322711e+00 3.6389537e+00 2.9225385e+00 2.3454418e+00 3.4545556e+00 3.5207953e+00 3.5188476e+00 2.0227452e+00 3.7070275e+00 3.8401809e+00 3.2712836e+00 2.2870689e+00 2.9197390e+00 3.4787880e+00 2.3479440e+00 1.8015956e+00 2.9300280e+00 9.4226820e-01 1.8443182e+00 6.2046469e-01 1.3340137e+00 5.0731024e-01 1.2583559e+00 1.1751408e+00 1.7063292e+00 1.5923247e+00 1.2788332e+00 7.3035754e-01 1.0391769e+00 6.6194168e-01 2.9537172e-01 2.8845946e-01 3.7622328e-01 6.2760421e-01 7.7259801e-01 1.4843174e+00 1.8353890e+00 1.8732616e+00 1.1492120e+00 9.8621003e-01 1.4916922e+00 1.4186317e+00 5.4702555e-01 1.4349060e+00 1.2616939e+00 1.6526705e+00 1.5077694e+00 6.5951091e-01 1.2429329e+00 2.5190636e+00 1.3124532e+00 1.1415080e+00 1.1102613e+00 5.1210327e-01 2.2412909e+00 1.1466385e+00 2.0209769e+00 1.3581397e+00 1.4351001e+00 9.3899770e-01 1.3860326e+00 1.9736520e+00 2.3116417e+00 1.4395013e+00 1.3256797e+00 2.5152621e+00 1.1895067e+00 1.0230389e+00 1.2126763e+00 1.7102781e+00 1.7732497e+00 1.5528794e+00 8.7017583e-01 2.9799960e+00 2.3789847e+00 1.8031686e+00 1.6479163e+00 1.5433090e+00 2.0188390e+00 8.9564079e-01 1.5340057e+00 1.4361609e+00 8.5359653e-01 9.3591761e-01 1.2375842e+00 1.0932909e+00 1.5255827e+00 2.9419617e+00 1.3503714e+00 5.7743200e-01 1.0870568e+00 2.0633836e+00 1.9646340e+00 9.8006549e-01 1.0101003e+00 1.2839264e+00 1.6205305e+00 1.4633215e+00 1.3581397e+00 1.6723912e+00 1.9304834e+00 1.3785508e+00 1.2852279e+00 1.0122929e+00 1.8669942e+00 1.1301977e+00 1.7293858e+00 1.1132823e+00 1.5940674e+00 1.2647627e+00 7.0155617e-01 2.0524860e+00 9.1916314e-01 9.0143387e-01 1.7090768e+00 7.7500385e-01 1.6060095e+00 1.1202045e+00 1.5217697e+00 1.2283051e+00 1.5431751e+00 1.8486311e+00 2.0116712e+00 2.0744305e+00 1.1308980e+00 8.6249502e-01 8.7618973e-01 9.4738284e-01 7.7051641e-01 1.2102028e+00 8.1844705e-01 1.9297683e+00 2.0868978e+00 1.6471661e+00 8.6143605e-01 6.0365341e-01 5.7743200e-01 1.3481077e+00 8.0628657e-01 1.1400599e+00 5.2860161e-01 9.6999820e-01 7.8957903e-01 1.3189781e+00 8.0128554e-01 6.6918102e-01 2.6783589e+00 1.1902996e+00 2.8052881e+00 1.7833755e+00 2.2807524e+00 3.4730319e+00 7.8369762e-01 2.9612706e+00 2.2200035e+00 3.7113566e+00 2.2079590e+00 1.7773274e+00 2.4240040e+00 1.2586273e+00 1.6606465e+00 2.3345591e+00 2.0100747e+00 4.3781379e+00 3.6673897e+00 1.6336953e+00 2.8241758e+00 1.1071867e+00 3.5077760e+00 1.5364148e+00 2.6605007e+00 2.9756588e+00 1.4305490e+00 1.5019762e+00 1.9806289e+00 2.7459951e+00 3.0159022e+00 4.4377151e+00 2.0446123e+00 1.5157660e+00 1.4706419e+00 3.5410473e+00 2.6488235e+00 2.0119986e+00 1.3953413e+00 2.5748429e+00 2.6034011e+00 2.6304123e+00 1.1902996e+00 2.7818749e+00 2.8834870e+00 2.3861430e+00 1.6719199e+00 2.0259174e+00 2.4855987e+00 1.3894554e+00 2.6621352e+00 1.3234208e+00 2.6096596e+00 2.2340939e+00 3.3444949e+00 2.5679785e+00 1.9118907e+00 1.7502251e+00 1.3868450e+00 3.2376571e+00 2.3245757e+00 2.2030084e+00 2.3874774e+00 2.7435279e+00 3.0963501e+00 2.9911365e+00 3.3313369e+00 2.5528922e+00 1.6215602e+00 1.1232628e+00 1.1083720e+00 1.9130507e+00 2.3463017e+00 2.5105454e+00 3.5809242e+00 3.3974315e+00 1.8325077e+00 2.4726944e+00 1.3889514e+00 1.6150266e+00 2.7833542e+00 1.7312416e+00 7.0111465e-01 1.8556044e+00 2.5019422e+00 2.3097506e+00 2.6016512e+00 1.2007565e+00 2.0949830e+00 4.1622891e+00 2.3984916e+00 3.9595754e+00 3.0477055e+00 3.5738045e+00 4.5102220e+00 1.5833139e+00 3.9547990e+00 2.8883510e+00 5.1681640e+00 3.6715203e+00 2.8100260e+00 3.6615020e+00 2.1175666e+00 2.9197870e+00 3.8026677e+00 3.3142297e+00 5.7988752e+00 4.3773719e+00 1.6802144e+00 4.1689985e+00 2.5051488e+00 4.3637125e+00 2.6075737e+00 4.1031841e+00 4.2218983e+00 2.6731954e+00 2.9685228e+00 3.1235748e+00 3.8333962e+00 3.9200272e+00 5.8238336e+00 3.1861618e+00 2.6684075e+00 2.3174914e+00 4.5851647e+00 4.2037872e+00 3.4173362e+00 2.9015754e+00 3.8649606e+00 3.9284352e+00 3.9274419e+00 2.3984916e+00 4.1396215e+00 4.3152972e+00 3.6556493e+00 2.4306199e+00 3.3534589e+00 4.0726739e+00 2.9019830e+00 1.9649078e+00 4.5716421e-01 6.0725725e-01 1.0082512e+00 4.0020411e-01 9.6216255e-01 1.8879475e+00 1.3283978e+00 6.9493020e-01 5.9382214e-01 1.3116762e+00 7.1128716e-01 6.9976890e-01 8.6291569e-01 1.2356595e+00 1.0989171e+00 3.1093967e-01 1.2231205e+00 1.5729927e+00 1.6302590e+00 8.2263932e-01 8.7786730e-01 6.2729876e-01 9.5483435e-01 1.0328871e+00 1.7091506e+00 4.5071694e-01 1.2824303e+00 1.1188225e+00 3.5622944e-01 1.0163696e+00 2.1159028e+00 8.2380019e-01 4.6137216e-01 4.2450569e-01 5.0629646e-01 1.7321630e+00 5.8565201e-01 1.8627348e+00 1.0140018e+00 1.9095789e+00 1.0347180e+00 1.4794093e+00 2.5851550e+00 1.6987423e+00 2.1172382e+00 1.7999889e+00 2.6937126e+00 1.1946586e+00 1.2274756e+00 1.5304430e+00 1.4222936e+00 1.3705079e+00 1.4369760e+00 1.1056213e+00 3.3133435e+00 3.0027854e+00 1.9037709e+00 1.8688892e+00 9.6470639e-01 2.7156484e+00 1.0119180e+00 1.6591942e+00 1.9679139e+00 7.8369762e-01 6.0848963e-01 1.3509456e+00 1.8177289e+00 2.2200035e+00 3.3498688e+00 1.4311753e+00 8.4040822e-01 1.2474502e+00 2.6426508e+00 1.7620244e+00 1.0560148e+00 5.3362004e-01 1.6100417e+00 1.7340403e+00 1.6942922e+00 1.0140018e+00 1.8496575e+00 1.9626001e+00 1.5340961e+00 1.4294738e+00 1.1293709e+00 1.5949054e+00 6.4398240e-01 1.7472907e+00 1.7451622e+00 2.3128200e+00 2.0364367e+00 1.1795364e+00 7.5358247e-01 8.5582452e-01 2.5764453e+00 1.4435947e+00 1.1399118e+00 1.4681660e+00 1.7387082e+00 2.0628406e+00 1.8244206e+00 2.2981140e+00 1.7651851e+00 1.0308265e+00 7.7934221e-01 7.7863029e-01 1.2082987e+00 1.5285763e+00 2.1068341e+00 2.8909913e+00 2.3684734e+00 6.2479428e-01 1.9481438e+00 9.9891776e-01 1.1557309e+00 1.9516972e+00 9.8896933e-01 1.2918836e+00 1.3154759e+00 1.9018319e+00 1.7043940e+00 1.6876317e+00 1.4136421e+00 1.4845363e+00 3.4227731e+00 1.7797546e+00 2.8993041e+00 2.1584174e+00 2.6985144e+00 3.3744038e+00 1.7790388e+00 2.8051892e+00 1.8256311e+00 4.2196161e+00 2.7909300e+00 1.8699505e+00 2.6716730e+00 1.6273208e+00 2.3931485e+00 2.9994905e+00 2.3643994e+00 4.7587356e+00 3.2663266e+00 8.6629251e-01 3.2140857e+00 2.0311002e+00 3.1918979e+00 1.6793067e+00 3.1869276e+00 3.1309476e+00 1.8104252e+00 2.1858235e+00 2.2467893e+00 2.6763967e+00 2.7532877e+00 4.7380019e+00 2.3330174e+00 1.6923429e+00 1.4009491e+00 3.4550204e+00 3.4609647e+00 2.5225714e+00 2.1699387e+00 2.8630132e+00 3.0349029e+00 2.9631214e+00 1.7797546e+00 3.2114945e+00 3.4557456e+00 2.7355167e+00 1.5237929e+00 2.4389172e+00 3.3539591e+00 2.2150193e+00 8.7774396e-01 8.7560645e-01 6.6918102e-01 8.5695467e-01 1.6281675e+00 1.2552875e+00 9.0276183e-01 4.7723749e-01 9.6922609e-01 3.4909881e-01 4.4701039e-01 6.6835176e-01 8.7807032e-01 8.7272262e-01 2.1119253e-01 1.2038778e+00 1.5064820e+00 1.5654738e+00 7.8630314e-01 5.8942278e-01 8.9320425e-01 1.1940983e+00 8.6887698e-01 1.4210091e+00 7.4201890e-01 1.2452417e+00 1.0494370e+00 2.3749211e-01 9.1435339e-01 2.1409786e+00 8.2148003e-01 6.5993495e-01 5.7516438e-01 2.8835410e-01 1.8418099e+00 6.4756318e-01 1.8787743e+00 9.0810653e-01 1.6779282e+00 7.7021931e-01 1.3319441e+00 2.3098596e+00 1.7659238e+00 1.7880416e+00 1.4280932e+00 2.6604707e+00 1.1753998e+00 9.4281519e-01 1.3471074e+00 1.3158313e+00 1.3974368e+00 1.4547739e+00 8.7568652e-01 3.2288696e+00 2.6753697e+00 1.6330922e+00 1.7674989e+00 1.0240850e+00 2.3852911e+00 7.4743804e-01 1.5932991e+00 1.7495490e+00 5.8796666e-01 5.8374436e-01 1.1339991e+00 1.5083690e+00 1.8912106e+00 3.2526896e+00 1.2423778e+00 4.2436984e-01 8.5959137e-01 2.4097678e+00 1.8344669e+00 9.0791603e-01 5.8796666e-01 1.4645285e+00 1.6477112e+00 1.6088132e+00 9.0810653e-01 1.7454599e+00 1.9428935e+00 1.4315280e+00 1.1694177e+00 9.9244707e-01 1.7007353e+00 6.6154242e-01 1.4832888e+00 6.1810529e-01 7.1128716e-01 1.8601631e+00 9.7397874e-01 1.2254650e+00 6.8496652e-01 1.4755282e+00 9.0753778e-01 1.0443931e+00 1.3169341e+00 1.6226440e+00 1.6498870e+00 7.4980278e-01 8.0686941e-01 1.1940983e+00 1.2255953e+00 5.6318359e-01 1.1503759e+00 6.6361830e-01 1.4063472e+00 1.5603679e+00 1.6837563e+00 3.6536845e-01 9.5761359e-01 8.4619410e-01 8.6958016e-01 7.7821113e-01 1.6196626e+00 5.7306091e-01 4.4786319e-01 3.6086172e-01 8.2608188e-01 1.1831978e+00 3.8480889e-01 2.4265852e+00 1.2696247e+00 2.4764170e+00 1.5584328e+00 2.0444851e+00 3.1426915e+00 1.4493286e+00 2.6430316e+00 2.1446703e+00 3.2893516e+00 1.7955663e+00 1.6408995e+00 2.1004081e+00 1.5285733e+00 1.7064050e+00 2.0142932e+00 1.6802708e+00 3.9009617e+00 3.4821230e+00 1.8809382e+00 2.4651410e+00 1.1989033e+00 3.2268928e+00 1.3782117e+00 2.2651964e+00 2.5478630e+00 1.2124370e+00 1.1789342e+00 1.8358339e+00 2.3443222e+00 2.7210373e+00 3.9221023e+00 1.9136809e+00 1.2486828e+00 1.4646971e+00 3.1951870e+00 2.3252489e+00 1.6537705e+00 1.0855082e+00 2.1947734e+00 2.3108044e+00 2.2621105e+00 1.2696247e+00 2.4484679e+00 2.5504328e+00 2.0873736e+00 1.6773040e+00 1.7023842e+00 2.1476736e+00 1.1560388e+00 1.3558057e+00 1.5283845e+00 2.1664244e+00 1.9784646e+00 1.1457159e+00 1.0355160e+00 1.4985549e+00 1.0494370e+00 6.0365341e-01 2.6033464e-01 7.3803207e-01 5.6864482e-01 9.7352372e-01 1.8229204e+00 2.2308199e+00 2.2668270e+00 1.4769275e+00 1.3385535e+00 1.5931825e+00 1.1248155e+00 2.1466080e-01 1.9117251e+00 1.3652496e+00 2.0207624e+00 1.8704283e+00 7.6880092e-01 1.6220723e+00 2.8778954e+00 1.6265611e+00 1.2621791e+00 1.3042843e+00 7.7313507e-01 2.5362196e+00 1.4082507e+00 1.8291996e+00 1.6183246e+00 1.3603639e+00 1.0878281e+00 1.3564590e+00 1.9053825e+00 2.6072470e+00 1.4737985e+00 1.6790332e+00 2.1679999e+00 9.4182667e-01 1.2929545e+00 1.1391970e+00 2.0265696e+00 1.8799960e+00 1.3547660e+00 8.8029208e-01 2.6196958e+00 2.4872661e+00 2.2706454e+00 1.4300844e+00 1.7151590e+00 2.0626281e+00 1.1997534e+00 1.2637010e+00 1.2307777e+00 1.0813975e+00 9.6603754e-01 1.3834169e+00 1.0564307e+00 1.5971466e+00 2.5708690e+00 1.4735640e+00 9.4160439e-01 1.5222760e+00 1.9572025e+00 1.7004687e+00 8.9142733e-01 1.0459007e+00 1.1049324e+00 1.4763267e+00 1.2674750e+00 1.6183246e+00 1.4769125e+00 1.6832034e+00 1.2843405e+00 1.6410601e+00 9.6706760e-01 1.5985259e+00 1.1910776e+00 1.0088064e+00 1.9822205e+00 1.3115314e+00 7.2626021e-01 8.5225534e-01 1.4476734e+00 8.6297946e-01 1.0334797e+00 1.2160426e+00 1.5358725e+00 1.3833366e+00 5.3528567e-01 1.2712561e+00 1.5367336e+00 1.6013312e+00 8.9845135e-01 9.1916314e-01 2.4152660e-01 1.0495503e+00 1.3530247e+00 1.8419620e+00 3.4651700e-01 1.2220171e+00 1.0116179e+00 6.2046469e-01 1.0696792e+00 2.0057768e+00 7.5914566e-01 4.4499696e-01 4.0664863e-01 8.1224041e-01 1.6406723e+00 5.8942278e-01 1.9060542e+00 9.6301827e-01 2.1281559e+00 1.1449868e+00 1.6017068e+00 2.8050285e+00 1.4536311e+00 2.3373419e+00 1.9472489e+00 2.8613983e+00 1.3924555e+00 1.3756347e+00 1.7433862e+00 1.3615778e+00 1.3332278e+00 1.5654312e+00 1.2872568e+00 3.4973752e+00 3.1986815e+00 1.9281666e+00 2.0588451e+00 8.3270853e-01 2.9373687e+00 1.1828955e+00 1.8231885e+00 2.1962402e+00 9.5979989e-01 7.5532639e-01 1.4672797e+00 2.0720690e+00 2.4566102e+00 3.5648048e+00 1.5414664e+00 1.0212756e+00 1.2733735e+00 2.8900916e+00 1.8289569e+00 1.2092544e+00 6.4558417e-01 1.8428644e+00 1.8966444e+00 1.9319316e+00 9.6301827e-01 2.0102936e+00 2.1030669e+00 1.7380754e+00 1.5489952e+00 1.3296349e+00 1.6538197e+00 6.3357758e-01 1.4295948e+00 5.4873947e-01 1.6126413e+00 5.8496636e-01 1.1009910e+00 6.0725725e-01 9.5139638e-01 1.3098483e+00 1.3941599e+00 1.6617787e+00 8.6702860e-01 4.2826573e-01 8.0996690e-01 8.1324137e-01 2.8553149e-01 9.9883272e-01 1.0921061e+00 1.8259719e+00 1.6053711e+00 1.1828265e+00 8.3161134e-01 7.0834786e-01 5.3106808e-01 9.8233874e-01 3.5366952e-01 1.4106682e+00 4.6484021e-01 7.5179033e-01 6.2055338e-01 7.8324937e-01 1.1546456e+00 4.7149050e-01 2.7022699e+00 1.3084256e+00 2.4620452e+00 1.5488588e+00 2.1433843e+00 3.0477876e+00 1.5122060e+00 2.4794486e+00 1.8496575e+00 3.5092631e+00 2.0205369e+00 1.5421829e+00 2.1550478e+00 1.4847626e+00 1.9338323e+00 2.2845215e+00 1.7093196e+00 4.0428524e+00 3.2768847e+00 1.4413624e+00 2.6112105e+00 1.4262166e+00 3.0341947e+00 1.2919157e+00 2.4486747e+00 2.5390232e+00 1.2420951e+00 1.3836252e+00 1.8380693e+00 2.2098781e+00 2.5420974e+00 4.0353061e+00 1.9425259e+00 1.0808550e+00 1.0871507e+00 3.1511951e+00 2.6568698e+00 1.7619640e+00 1.3391481e+00 2.2882514e+00 2.4739376e+00 2.4163220e+00 1.3084256e+00 2.5943375e+00 2.7895659e+00 2.2249457e+00 1.4927500e+00 1.8163092e+00 2.5068377e+00 1.3832520e+00 1.1807138e+00 2.3735475e+00 1.4416726e+00 7.3803207e-01 1.4480380e+00 1.6571634e+00 1.9125650e+00 1.5766889e+00 1.9793333e+00 1.6323793e+00 1.4021778e+00 1.1659675e+00 1.2498767e+00 1.3539814e+00 1.2332482e+00 2.0826771e+00 2.7811716e+00 2.1646850e+00 3.7371902e-01 2.0122804e+00 1.1585774e+00 1.3127802e+00 1.8544941e+00 1.1485726e+00 1.7450075e+00 1.3972701e+00 1.9880179e+00 1.7517164e+00 1.6394680e+00 1.8002228e+00 1.5490387e+00 2.9816674e+00 1.3976606e+00 2.4151949e+00 1.7787946e+00 2.2180206e+00 2.8804995e+00 1.7355261e+00 2.3592859e+00 1.2412454e+00 3.7977608e+00 2.4485045e+00 1.3668906e+00 2.2064746e+00 1.1631247e+00 1.9116990e+00 2.5849220e+00 2.0027932e+00 4.3925033e+00 2.6611027e+00 3.7234239e-01 2.7559143e+00 1.7089501e+00 2.6795765e+00 1.2450968e+00 2.8073641e+00 2.7667084e+00 1.4485479e+00 1.9038618e+00 1.7262603e+00 2.3286441e+00 2.2609611e+00 4.4068441e+00 1.7897439e+00 1.4300608e+00 1.1275945e+00 2.9337206e+00 3.0746426e+00 2.2005676e+00 1.9094387e+00 2.4292641e+00 2.5401664e+00 2.4975057e+00 1.3976606e+00 2.7518188e+00 2.9966927e+00 2.2416615e+00 9.2104245e-01 2.0302905e+00 3.0030765e+00 1.9507955e+00 1.9593598e+00 9.5666050e-01 1.1447875e+00 1.0324994e+00 1.3800294e+00 1.7386688e+00 1.7301705e+00 2.0251376e+00 1.2143895e+00 3.6924035e-01 2.6643250e-01 3.1271814e-01 5.3690447e-01 1.1566759e+00 1.3335853e+00 2.2553589e+00 2.0395552e+00 1.0374728e+00 1.1879760e+00 2.9406726e-01 4.0650681e-01 1.4164313e+00 3.6319073e-01 9.3305124e-01 5.5709100e-01 1.1791615e+00 9.8143688e-01 1.2231662e+00 7.9042934e-01 7.5817225e-01 2.9833953e+00 1.3537061e+00 2.7591591e+00 1.8262769e+00 2.3975382e+00 3.3499130e+00 1.2034779e+00 2.7851895e+00 1.9405021e+00 3.8845156e+00 2.3750632e+00 1.6954582e+00 2.4431790e+00 1.3394090e+00 1.9751740e+00 2.5771567e+00 2.0432035e+00 4.4739802e+00 3.4420978e+00 1.1727925e+00 2.9298786e+00 1.4800982e+00 3.2892623e+00 1.4456510e+00 2.8154123e+00 2.9321762e+00 1.4509441e+00 1.6902348e+00 2.0142932e+00 2.5791547e+00 2.8031074e+00 4.4835636e+00 2.1018908e+00 1.3894554e+00 1.2250001e+00 3.4334168e+00 2.9754074e+00 2.1241116e+00 1.6323629e+00 2.6113665e+00 2.7403495e+00 2.7045382e+00 1.3537061e+00 2.9092469e+00 3.0943267e+00 2.4717839e+00 1.4839640e+00 2.1082363e+00 2.8334846e+00 1.6627952e+00 1.2426609e+00 1.7313027e+00 1.2372185e+00 1.1631247e+00 1.1195889e+00 1.5188976e+00 1.0845006e+00 8.2263932e-01 1.9012930e+00 2.1909047e+00 2.2638611e+00 1.4908532e+00 1.2008045e+00 8.7223523e-01 5.7002996e-01 1.0697164e+00 2.2410714e+00 9.6470639e-01 1.8639992e+00 1.6786018e+00 7.4743804e-01 1.6592561e+00 2.7039438e+00 1.4162972e+00 1.0024224e+00 1.0401603e+00 1.0580730e+00 2.3284654e+00 1.2231205e+00 1.2611129e+00 1.1791615e+00 1.6899776e+00 9.5793067e-01 1.1548640e+00 2.3712314e+00 2.0275068e+00 2.0149111e+00 1.9649183e+00 2.1679212e+00 7.8905316e-01 1.3385913e+00 1.3061285e+00 1.6571634e+00 1.2299006e+00 9.3495766e-01 9.4613565e-01 2.8415314e+00 2.9130399e+00 2.3454203e+00 1.4655406e+00 1.0267382e+00 2.6085350e+00 1.2515236e+00 1.1837505e+00 1.7109084e+00 9.9111027e-01 5.2374483e-01 1.2552875e+00 1.7513749e+00 2.1661990e+00 2.9392776e+00 1.3022811e+00 1.1259771e+00 1.5663601e+00 2.4310503e+00 1.1268523e+00 7.5840628e-01 4.7680727e-01 1.3348163e+00 1.3454539e+00 1.4034689e+00 1.1791615e+00 1.4139320e+00 1.4450127e+00 1.2754470e+00 1.6940148e+00 9.2620264e-01 9.4281519e-01 4.9160020e-01 9.3054395e-01 4.1781009e-01 4.6190224e-01 8.0369154e-01 9.6816967e-01 1.1548640e+00 4.6557224e-01 8.2518769e-01 1.2073068e+00 1.2487994e+00 4.5257749e-01 7.8164792e-01 1.0374728e+00 1.4711456e+00 1.1089653e+00 1.1997868e+00 7.6195008e-01 1.0018628e+00 8.9796526e-01 5.8785093e-01 6.0098693e-01 1.8456219e+00 6.5622658e-01 6.9001472e-01 5.4715569e-01 3.1093967e-01 1.5254459e+00 4.8636669e-01 2.2683520e+00 1.0914354e+00 1.9823217e+00 1.1675117e+00 1.6963493e+00 2.6008457e+00 1.7128336e+00 2.0692339e+00 1.5728043e+00 3.0096252e+00 1.5213092e+00 1.1599200e+00 1.6580031e+00 1.3691582e+00 1.6116709e+00 1.8005954e+00 1.2641532e+00 3.5755981e+00 2.8921647e+00 1.5229350e+00 2.1046875e+00 1.2108278e+00 2.6299105e+00 8.9496218e-01 1.9702691e+00 2.0808148e+00 8.0585922e-01 9.4983854e-01 1.4326334e+00 1.7811974e+00 2.1211631e+00 3.5687116e+00 1.5311195e+00 7.1811205e-01 1.0257884e+00 2.6607812e+00 2.2075002e+00 1.3273841e+00 9.2853136e-01 1.7721541e+00 1.9757526e+00 1.8755628e+00 1.0914354e+00 2.1076593e+00 2.2924992e+00 1.7080157e+00 1.2150638e+00 1.3228669e+00 2.0678512e+00 1.0435585e+00 8.3930091e-01 1.0246689e+00 1.2483935e+00 9.2934901e-01 1.2786676e+00 1.0167074e+00 1.2794668e+00 1.2845002e+00 1.3705288e+00 1.0262066e+00 6.1158310e-01 1.6007620e+00 2.1232609e+00 1.4700482e+00 6.0145223e-01 1.5227019e+00 1.1234881e+00 1.1051628e+00 1.1975697e+00 9.1241332e-01 1.9821649e+00 1.0739839e+00 1.4719712e+00 1.2657553e+00 1.0246689e+00 1.8799916e+00 1.1304806e+00 2.3473055e+00 9.3001231e-01 1.7895396e+00 1.0784109e+00 1.5778477e+00 2.3110268e+00 1.7258314e+00 1.7588444e+00 8.0501785e-01 3.1299934e+00 1.7625999e+00 7.4394765e-01 1.5582385e+00 9.7850690e-01 1.4989725e+00 1.9419525e+00 1.2877346e+00 3.7053544e+00 2.3011615e+00 7.8305765e-01 2.1061327e+00 1.2737998e+00 2.1925140e+00 6.0604502e-01 2.1121593e+00 2.0810604e+00 8.0686941e-01 1.2420238e+00 1.1264142e+00 1.6698499e+00 1.7264467e+00 3.7248620e+00 1.2214818e+00 7.0111465e-01 5.3487449e-01 2.3978329e+00 2.4200364e+00 1.4831058e+00 1.2723027e+00 1.7715216e+00 1.9225896e+00 1.8845666e+00 9.3001231e-01 2.0954738e+00 2.3579846e+00 1.6403910e+00 5.2719130e-01 1.3587682e+00 2.3456726e+00 1.3154759e+00 5.0299964e-01 8.2155022e-01 8.8684653e-01 1.0935878e+00 4.8492463e-01 9.8860056e-01 1.2858521e+00 1.3288928e+00 6.2451737e-01 6.2760421e-01 1.0463002e+00 1.4889605e+00 1.0762241e+00 1.1975697e+00 8.4271175e-01 1.0854927e+00 8.7560645e-01 5.3352899e-01 7.0810362e-01 1.9474744e+00 7.1781501e-01 7.2553812e-01 6.1968090e-01 3.6924035e-01 1.6978139e+00 6.0510141e-01 2.1983316e+00 1.0378652e+00 1.8773521e+00 9.9490014e-01 1.5974238e+00 2.4585483e+00 1.7380659e+00 1.8957007e+00 1.4179266e+00 2.9495957e+00 1.4948761e+00 1.0683666e+00 1.5886178e+00 1.3564059e+00 1.6294524e+00 1.7819921e+00 1.1268523e+00 3.4719349e+00 2.7528980e+00 1.4535731e+00 2.0452826e+00 1.2150379e+00 2.4732935e+00 8.6167901e-01 1.8885847e+00 1.9433611e+00 7.9744128e-01 9.1854596e-01 1.3349909e+00 1.6250498e+00 1.9838258e+00 3.4743868e+00 1.4520430e+00 5.1406096e-01 7.3595190e-01 2.5794085e+00 2.1692945e+00 1.1973560e+00 9.2123504e-01 1.7205327e+00 1.9329718e+00 1.8817619e+00 1.0378652e+00 2.0262673e+00 2.2533761e+00 1.7016580e+00 1.1862896e+00 1.2748646e+00 2.0406838e+00 9.6984925e-01 3.6319073e-01 6.1968090e-01 7.8521221e-01 5.6113157e-01 1.2463647e+00 1.6309592e+00 1.6676963e+00 8.9796526e-01 8.9789119e-01 1.2621791e+00 1.3154759e+00 6.8124108e-01 1.3901973e+00 9.9970024e-01 1.4357022e+00 1.2962951e+00 4.8012872e-01 1.0246015e+00 2.2910733e+00 1.0720705e+00 8.8779355e-01 8.4724089e-01 2.4152660e-01 1.9817257e+00 8.8354057e-01 2.0655284e+00 1.2495886e+00 1.6398109e+00 9.9332012e-01 1.4769125e+00 2.2251642e+00 2.1023706e+00 1.7015856e+00 1.4609179e+00 2.6557282e+00 1.2410479e+00 1.0733560e+00 1.3593949e+00 1.6013655e+00 1.6915504e+00 1.5864800e+00 9.7957718e-01 3.1644964e+00 2.6137665e+00 1.7509262e+00 1.7860234e+00 1.3941000e+00 2.2824049e+00 8.7876634e-01 1.6464371e+00 1.6642217e+00 7.8808432e-01 8.5400786e-01 1.3018901e+00 1.3652161e+00 1.7805677e+00 3.1380968e+00 1.4095411e+00 5.8483448e-01 1.0816610e+00 2.2968622e+00 1.9911692e+00 1.0509799e+00 8.9223438e-01 1.4369760e+00 1.7217513e+00 1.5811148e+00 1.2495886e+00 1.8031513e+00 2.0209769e+00 1.4702446e+00 1.2810704e+00 1.0813343e+00 1.8691588e+00 1.0259679e+00 5.6788283e-01 5.3362004e-01 7.7368489e-01 1.6022591e+00 1.9873741e+00 2.0277089e+00 1.2494231e+00 1.1089653e+00 1.4561750e+00 1.2033093e+00 3.3742167e-01 1.6597443e+00 1.2265630e+00 1.7784712e+00 1.6384023e+00 6.1436388e-01 1.3800294e+00 2.6463489e+00 1.4025367e+00 1.1237500e+00 1.1244975e+00 5.5399712e-01 2.3227610e+00 1.2000527e+00 1.8734557e+00 1.4137602e+00 1.3887598e+00 9.6005858e-01 1.3202421e+00 1.9632584e+00 2.3879303e+00 1.4839475e+00 1.4995474e+00 2.3336107e+00 1.0014276e+00 1.1074656e+00 1.1350466e+00 1.8013325e+00 1.7379612e+00 1.3863777e+00 8.2339084e-01 2.8225734e+00 2.4523537e+00 2.0154418e+00 1.5091823e+00 1.5393373e+00 2.0723760e+00 9.8233874e-01 1.3702101e+00 1.3545502e+00 8.7875749e-01 8.4830222e-01 1.2549787e+00 1.1060185e+00 1.5823028e+00 2.7877979e+00 1.3537061e+00 7.2012544e-01 1.2954496e+00 2.0208193e+00 1.7781563e+00 8.8029208e-01 9.2256644e-01 1.1605968e+00 1.4991046e+00 1.3162835e+00 1.4137602e+00 1.5442127e+00 1.7645705e+00 1.2692218e+00 1.4162972e+00 9.1557526e-01 1.6722331e+00 1.0708496e+00 6.2826980e-01 1.0161846e+00 1.6718164e+00 1.9471640e+00 1.9947662e+00 1.3566251e+00 1.0412209e+00 1.7655766e+00 1.7163342e+00 7.1671402e-01 1.3277490e+00 1.5771462e+00 1.7793591e+00 1.6725709e+00 9.6964683e-01 1.3947746e+00 2.6556269e+00 1.5119726e+00 1.4702773e+00 1.3966512e+00 8.2199752e-01 2.4266623e+00 1.3925524e+00 2.0577807e+00 1.4034689e+00 1.2545960e+00 9.4767129e-01 1.3281960e+00 1.7401681e+00 2.4345214e+00 1.1896374e+00 1.0436620e+00 2.5015865e+00 1.2764504e+00 8.9223438e-01 1.1006735e+00 1.6953806e+00 1.7900496e+00 1.5985782e+00 8.8098199e-01 2.9595238e+00 2.0497239e+00 1.6965355e+00 1.5863720e+00 1.6496643e+00 1.7201711e+00 8.3409556e-01 1.5639220e+00 1.3496867e+00 8.9427872e-01 1.0978602e+00 1.1314785e+00 9.1432842e-01 1.2235673e+00 2.9196060e+00 1.2400775e+00 6.4083823e-01 1.0643984e+00 1.8241883e+00 2.0498818e+00 1.0696576e+00 1.1919906e+00 1.2042673e+00 1.5543054e+00 1.3831011e+00 1.4034689e+00 1.6218749e+00 1.9188761e+00 1.2920921e+00 1.1337565e+00 1.0067405e+00 1.9865217e+00 1.3029486e+00 9.5748562e-01 1.9681165e+00 2.2573397e+00 2.3235953e+00 1.5741400e+00 1.1016806e+00 1.6166760e+00 1.2896217e+00 3.8830315e-01 1.7972266e+00 1.5164233e+00 2.0064286e+00 1.8697578e+00 8.5494999e-01 1.6681709e+00 2.9309853e+00 1.6503473e+00 1.4467905e+00 1.4113341e+00 9.2189946e-01 2.6393526e+00 1.4852749e+00 1.4601858e+00 1.3160132e+00 8.7649478e-01 6.4756318e-01 8.3256255e-01 1.5094087e+00 2.4769347e+00 1.0668784e+00 1.2459961e+00 1.9408738e+00 6.5485710e-01 8.4116354e-01 6.0795234e-01 1.7154199e+00 1.4961901e+00 9.9551062e-01 3.9472619e-01 2.4940166e+00 2.0216642e+00 2.0463301e+00 1.0230389e+00 1.4612117e+00 1.6595118e+00 8.5582452e-01 9.5437259e-01 9.5694342e-01 7.7934221e-01 7.3851064e-01 8.5695467e-01 7.6638235e-01 1.1767396e+00 2.5076596e+00 9.4281519e-01 7.1971771e-01 1.2830542e+00 1.5700842e+00 1.4287578e+00 5.3095950e-01 8.6291569e-01 6.6154242e-01 1.0050638e+00 8.5606908e-01 1.3160132e+00 1.0514970e+00 1.3171874e+00 7.9433323e-01 1.2774110e+00 4.7509249e-01 1.3730080e+00 9.7659801e-01 1.1663747e+00 1.4582411e+00 1.5261646e+00 7.3570584e-01 5.8785093e-01 7.6039875e-01 1.1605726e+00 9.6964683e-01 1.4553344e+00 6.3765570e-01 1.1681709e+00 1.0005243e+00 2.9691107e-01 8.7856768e-01 2.0651943e+00 7.3735391e-01 6.0653347e-01 4.7837533e-01 3.7398306e-01 1.7406274e+00 5.5183182e-01 1.8498714e+00 8.1329726e-01 1.7508641e+00 8.2125107e-01 1.3423724e+00 2.4127395e+00 1.6384023e+00 1.9130927e+00 1.5043381e+00 2.6917823e+00 1.1782159e+00 9.6249568e-01 1.3877621e+00 1.2214135e+00 1.2719770e+00 1.4200371e+00 9.4526659e-01 3.3044115e+00 2.7645066e+00 1.6390945e+00 1.7948322e+00 8.7588404e-01 2.5003659e+00 7.4157869e-01 1.6267504e+00 1.8590427e+00 5.4310586e-01 5.2316125e-01 1.1372412e+00 1.6472029e+00 2.0021586e+00 3.3409572e+00 1.2314733e+00 5.4779717e-01 9.4822835e-01 2.4877874e+00 1.8001237e+00 9.6012811e-01 4.8644514e-01 1.5074309e+00 1.6452353e+00 1.6151033e+00 8.1329726e-01 1.7721541e+00 1.9352496e+00 1.4226963e+00 1.1564263e+00 1.0022114e+00 1.6574535e+00 5.8132667e-01 5.6318359e-01 5.3286499e-01 4.3341454e-01 1.2747045e+00 1.3163443e+00 2.1153648e+00 1.9183153e+00 1.1909838e+00 1.0657373e+00 5.8852220e-01 6.2225308e-01 1.3220343e+00 4.0443437e-01 1.0982562e+00 6.1619693e-01 1.0378442e+00 8.8917809e-01 1.0970024e+00 8.2199752e-01 6.9493020e-01 3.0003610e+00 1.5102474e+00 2.7635526e+00 1.8759428e+00 2.4405125e+00 3.3586044e+00 1.4659783e+00 2.7980993e+00 2.0759743e+00 3.8255754e+00 2.3211022e+00 1.7886572e+00 2.4450156e+00 1.5788967e+00 2.1011800e+00 2.5635734e+00 2.0416027e+00 4.3880097e+00 3.5282390e+00 1.4609179e+00 2.9105517e+00 1.6043433e+00 3.3245307e+00 1.5187698e+00 2.7743367e+00 2.8814354e+00 1.4896588e+00 1.6771528e+00 2.1027324e+00 2.5396337e+00 2.8265966e+00 4.3745133e+00 2.1946278e+00 1.4104380e+00 1.3873057e+00 3.4289479e+00 2.9514502e+00 2.1043046e+00 1.6198849e+00 2.5820471e+00 2.7513626e+00 2.6746677e+00 1.5102474e+00 2.9036877e+00 3.0793854e+00 2.4777911e+00 1.6406409e+00 2.1046875e+00 2.7982275e+00 1.6824284e+00 1.4276574e-01 7.9394533e-01 1.3473710e+00 1.5367336e+00 2.5040512e+00 2.2893871e+00 1.0820670e+00 1.4237364e+00 3.6704030e-01 5.8785093e-01 1.6733127e+00 6.1158310e-01 7.1781501e-01 7.8318003e-01 1.4288989e+00 1.2280749e+00 1.4809953e+00 7.0150436e-01 1.0034788e+00 3.1877520e+00 1.5005648e+00 2.9634183e+00 2.0359721e+00 2.5953129e+00 3.5470571e+00 1.1634940e+00 2.9839272e+00 2.0686806e+00 4.1156592e+00 2.6069476e+00 1.8659064e+00 2.6504363e+00 1.4017266e+00 2.1040122e+00 2.7893850e+00 2.2677890e+00 4.7183507e+00 3.5819899e+00 1.1465705e+00 3.1455771e+00 1.6263647e+00 3.4643576e+00 1.6254447e+00 3.0471395e+00 3.1646326e+00 1.6517237e+00 1.9156890e+00 2.1886203e+00 2.8006555e+00 2.9856138e+00 4.7315684e+00 2.2694993e+00 1.6130651e+00 1.3903392e+00 3.6256164e+00 3.1929372e+00 2.3573820e+00 1.8552594e+00 2.8291427e+00 2.9408913e+00 2.9120555e+00 1.5005648e+00 3.1237582e+00 3.3065647e+00 2.6677639e+00 1.5962380e+00 2.3224070e+00 3.0542458e+00 1.8794605e+00 8.3156200e-01 1.4460310e+00 1.6013312e+00 2.5509456e+00 2.3359909e+00 1.1395071e+00 1.4612871e+00 4.8644514e-01 6.6244727e-01 1.7247525e+00 6.6453319e-01 6.8496652e-01 8.5330525e-01 1.4567673e+00 1.2739414e+00 1.5213580e+00 6.7904052e-01 1.0560797e+00 3.2869799e+00 1.6218234e+00 3.0463976e+00 2.1264009e+00 2.6948572e+00 3.6228821e+00 1.2747978e+00 3.0537173e+00 2.1607143e+00 4.1937512e+00 2.6849827e+00 1.9680122e+00 2.7382122e+00 1.5399252e+00 2.2309601e+00 2.8826191e+00 2.3479440e+00 4.7798805e+00 3.6690959e+00 1.2447718e+00 3.2325186e+00 1.7450415e+00 3.5380106e+00 1.7243835e+00 3.1258440e+00 3.2275370e+00 1.7473394e+00 2.0003230e+00 2.2955340e+00 2.8573132e+00 3.0588804e+00 4.7837445e+00 2.3799967e+00 1.6861899e+00 1.4737985e+00 3.7047689e+00 3.2828775e+00 2.4345890e+00 1.9408738e+00 2.9104325e+00 3.0383020e+00 2.9993404e+00 1.6218234e+00 3.2132904e+00 3.3994957e+00 2.7639400e+00 1.7088499e+00 2.4110070e+00 3.1406474e+00 1.9689207e+00 8.9196595e-01 9.9107019e-01 1.7478610e+00 1.5467508e+00 1.1459117e+00 7.5303835e-01 6.0365341e-01 5.1453676e-01 9.1435339e-01 2.3749211e-01 1.4031123e+00 3.2313211e-01 7.2263841e-01 5.2290002e-01 7.1740234e-01 1.0975976e+00 3.1271814e-01 2.5686027e+00 1.1418735e+00 2.3704417e+00 1.4573208e+00 2.0173981e+00 2.9893605e+00 1.3924555e+00 2.4418079e+00 1.7809408e+00 3.4092863e+00 1.8988911e+00 1.4127713e+00 2.0371950e+00 1.3095379e+00 1.7286430e+00 2.1358640e+00 1.6228818e+00 3.9920035e+00 3.2072460e+00 1.3897098e+00 2.4925270e+00 1.2375842e+00 2.9891693e+00 1.1418958e+00 2.3510928e+00 2.4945648e+00 1.0792736e+00 1.2447083e+00 1.7021399e+00 2.1843621e+00 2.4864921e+00 3.9967418e+00 1.7954132e+00 1.0172824e+00 1.0869385e+00 3.0619758e+00 2.5242226e+00 1.6782397e+00 1.1896845e+00 2.1746675e+00 2.3309032e+00 2.2706367e+00 1.1418735e+00 2.4800313e+00 2.6530340e+00 2.0689155e+00 1.3443777e+00 1.6837594e+00 2.3748600e+00 1.2545769e+00 1.0660846e+00 1.6498377e+00 1.2783641e+00 1.1376397e+00 1.0898613e+00 1.0585628e+00 9.2189946e-01 8.0142393e-01 8.8029208e-01 1.9920533e+00 8.0501785e-01 1.0699859e+00 8.7105035e-01 7.9448303e-01 1.7999592e+00 8.1251986e-01 1.9227723e+00 4.6137216e-01 1.6965186e+00 7.0213871e-01 1.2723284e+00 2.3160615e+00 1.4526546e+00 1.7912701e+00 1.0739839e+00 2.8496420e+00 1.4032361e+00 6.3302304e-01 1.3757605e+00 7.8863556e-01 1.1001816e+00 1.5547583e+00 9.8152003e-01 3.4772360e+00 2.4799120e+00 1.1619556e+00 1.8622587e+00 7.5769247e-01 2.3162319e+00 4.6128322e-01 1.7816685e+00 1.9387331e+00 4.5729032e-01 7.5817225e-01 8.9223438e-01 1.6541379e+00 1.8404767e+00 3.5381276e+00 9.9244707e-01 4.4901474e-01 4.6557224e-01 2.4199101e+00 1.9790803e+00 1.0974789e+00 7.5914566e-01 1.5781281e+00 1.6567524e+00 1.6951864e+00 4.6137216e-01 1.8193470e+00 2.0336808e+00 1.4275349e+00 7.0834786e-01 1.0588854e+00 1.8801322e+00 7.4965096e-01 1.1803522e+00 1.5908164e+00 1.9645622e+00 4.2238505e-01 1.2220171e+00 1.0116179e+00 8.5731385e-01 1.1485726e+00 1.9317000e+00 7.9664122e-01 5.6097460e-01 5.3106808e-01 1.0334797e+00 1.5679493e+00 6.8124108e-01 2.0247774e+00 1.0499569e+00 2.3371797e+00 1.3332950e+00 1.7744903e+00 3.0154970e+00 1.3277924e+00 2.5520970e+00 2.1193865e+00 3.0297355e+00 1.5881698e+00 1.5547948e+00 1.9487821e+00 1.4037678e+00 1.3973195e+00 1.7249900e+00 1.4967902e+00 3.6762761e+00 3.3933664e+00 2.0023741e+00 2.2484516e+00 8.6702860e-01 3.1482546e+00 1.3659877e+00 2.0060004e+00 2.4114658e+00 1.1530661e+00 9.5944179e-01 1.6364369e+00 2.2989490e+00 2.6726954e+00 3.7560452e+00 1.7032717e+00 1.2286922e+00 1.4040978e+00 3.1041910e+00 1.9523740e+00 1.4097206e+00 8.4169735e-01 2.0525205e+00 2.0729884e+00 2.1328505e+00 1.0499569e+00 2.1908074e+00 2.2633850e+00 1.9289706e+00 1.6929462e+00 1.5333884e+00 1.7729821e+00 7.9671887e-01 1.1060455e+00 2.5932658e+00 1.1359179e+00 2.2153658e+00 2.0116430e+00 9.6825676e-01 1.9538525e+00 2.9958431e+00 1.7387082e+00 1.1339874e+00 1.2823616e+00 1.2471855e+00 2.5771468e+00 1.5006766e+00 1.5158960e+00 1.7131342e+00 1.9169015e+00 1.3850497e+00 1.5416258e+00 2.5358032e+00 2.4678381e+00 2.2144600e+00 2.3737220e+00 2.1274158e+00 9.8372339e-01 1.7887913e+00 1.5921275e+00 2.1896791e+00 1.7854129e+00 1.2218858e+00 1.2670969e+00 2.6904561e+00 3.2109839e+00 2.7851183e+00 1.6425353e+00 1.5729927e+00 2.8211634e+00 1.6904600e+00 1.2882518e+00 1.7618849e+00 1.4390193e+00 9.9282597e-01 1.7222401e+00 1.8692144e+00 2.3979391e+00 2.7477433e+00 1.7762263e+00 1.4761145e+00 1.9687854e+00 2.5941073e+00 1.2723200e+00 1.0497372e+00 9.7397874e-01 1.5327853e+00 1.6373339e+00 1.6177026e+00 1.7131342e+00 1.6177237e+00 1.6189835e+00 1.6013655e+00 2.1620604e+00 1.2836487e+00 1.0769609e+00 1.0246689e+00 1.9326437e+00 1.4149714e+00 2.0593667e+00 1.9008579e+00 7.7368489e-01 1.6801694e+00 2.9457974e+00 1.6627284e+00 1.3215146e+00 1.3491191e+00 8.3512263e-01 2.6175043e+00 1.4565053e+00 1.6449760e+00 1.5357227e+00 1.1692720e+00 9.2780124e-01 1.1582405e+00 1.7355630e+00 2.5925902e+00 1.3094338e+00 1.5678176e+00 2.0102053e+00 7.6952423e-01 1.1716038e+00 9.4417605e-01 1.9573929e+00 1.7612938e+00 1.1827746e+00 6.8675486e-01 2.4881500e+00 2.3327432e+00 2.2476505e+00 1.2375842e+00 1.6387331e+00 1.9108109e+00 1.1188225e+00 1.0733560e+00 1.0560148e+00 1.0005243e+00 8.6347207e-01 1.2199459e+00 9.0753778e-01 1.4483156e+00 2.4625089e+00 1.3082342e+00 8.7375509e-01 1.4601811e+00 1.8000065e+00 1.5372053e+00 7.0097130e-01 9.6204815e-01 9.1315231e-01 1.2848917e+00 1.0993651e+00 1.5357227e+00 1.2764003e+00 1.5003224e+00 1.1101208e+00 1.5658291e+00 7.8808432e-01 1.4489917e+00 1.0920053e+00 1.8302572e+00 1.0943114e+00 1.1955102e+00 1.6415483e+00 9.5491981e-01 1.7343175e+00 1.2563834e+00 1.7780218e+00 1.5661153e+00 1.3901973e+00 1.7352481e+00 1.3724737e+00 2.9349297e+00 1.4110819e+00 2.3154474e+00 1.6753059e+00 2.1644875e+00 2.7793057e+00 1.8300579e+00 2.2275691e+00 1.2267563e+00 3.6839155e+00 2.3163493e+00 1.3205795e+00 2.1115079e+00 1.3018219e+00 1.9822480e+00 2.5100153e+00 1.8661672e+00 4.2327578e+00 2.6573893e+00 6.0725725e-01 2.6633209e+00 1.7222058e+00 2.5939799e+00 1.1664463e+00 2.6821825e+00 2.5963941e+00 1.3509199e+00 1.7816323e+00 1.7046308e+00 2.1381589e+00 2.1542571e+00 4.2222578e+00 1.7881987e+00 1.2473306e+00 1.0083490e+00 2.8478148e+00 2.9960208e+00 2.0583207e+00 1.7939407e+00 2.3128527e+00 2.4854841e+00 2.4090630e+00 1.4110819e+00 2.6669716e+00 2.9270626e+00 2.1822548e+00 9.7289027e-01 1.9265423e+00 2.9135583e+00 1.8510296e+00 1.1608422e+00 9.5483435e-01 6.7975587e-01 9.6424206e-01 1.8685422e+00 6.9420840e-01 1.8699153e-01 2.6643250e-01 7.6880092e-01 1.4668684e+00 4.7680727e-01 2.1966727e+00 1.2150638e+00 2.3282955e+00 1.3855601e+00 1.8709248e+00 2.9899796e+00 1.5396352e+00 2.5003659e+00 2.1099656e+00 3.0668593e+00 1.5989333e+00 1.5788417e+00 1.9566602e+00 1.5639220e+00 1.6339738e+00 1.8236402e+00 1.4967013e+00 3.6603010e+00 3.3937895e+00 1.9915789e+00 2.2840141e+00 1.1223478e+00 3.1075626e+00 1.3520885e+00 2.0407191e+00 2.3526669e+00 1.1508346e+00 9.9970980e-01 1.7227103e+00 2.1946041e+00 2.6155078e+00 3.6958832e+00 1.8054416e+00 1.1477199e+00 1.3984076e+00 3.0713671e+00 2.0894840e+00 1.4301682e+00 9.0552938e-01 2.0395038e+00 2.1489811e+00 2.1344915e+00 1.2150638e+00 2.2517887e+00 2.3533226e+00 1.9673072e+00 1.7095802e+00 1.5528301e+00 1.9068051e+00 9.3899770e-01 3.4893361e-01 1.4098163e+00 4.4901474e-01 9.4301660e-01 4.9009568e-01 1.1908452e+00 9.6032771e-01 1.2627589e+00 7.8945238e-01 7.3502408e-01 2.8451486e+00 1.1622402e+00 2.7058576e+00 1.7424022e+00 2.2846522e+00 3.3213689e+00 9.3810350e-01 2.7757276e+00 1.8894571e+00 3.8131048e+00 2.3010216e+00 1.5984421e+00 2.3698153e+00 1.1049324e+00 1.7539088e+00 2.4591578e+00 1.9849730e+00 4.4474865e+00 3.3956070e+00 1.1104964e+00 2.8478148e+00 1.2628565e+00 3.2741783e+00 1.3548265e+00 2.7443454e+00 2.9214972e+00 1.3520885e+00 1.5950569e+00 1.8924015e+00 2.5961001e+00 2.7889301e+00 4.4811770e+00 1.9680122e+00 1.3672690e+00 1.1906665e+00 3.3943858e+00 2.8533575e+00 2.0610968e+00 1.5261646e+00 2.5498486e+00 2.6295980e+00 2.6227721e+00 1.1622402e+00 2.8191421e+00 2.9841287e+00 2.3684093e+00 1.3684638e+00 2.0228721e+00 2.7146999e+00 1.5430544e+00 1.2073068e+00 4.2737382e-01 1.1404637e+00 3.1271814e-01 9.6032771e-01 7.5303835e-01 1.1016806e+00 9.6606527e-01 5.6318359e-01 2.6951278e+00 1.0877340e+00 2.5880472e+00 1.5788417e+00 2.1577755e+00 3.1981141e+00 1.0053189e+00 2.6422640e+00 1.8441670e+00 3.6556493e+00 2.1516272e+00 1.5283932e+00 2.2572409e+00 1.1515368e+00 1.7244934e+00 2.3310687e+00 1.8210464e+00 4.2584241e+00 3.3382181e+00 1.2189366e+00 2.7191331e+00 1.1859692e+00 3.1732334e+00 1.2980649e+00 2.5768210e+00 2.7513616e+00 1.2636762e+00 1.4403062e+00 1.8020431e+00 2.4433699e+00 2.6920515e+00 4.2945779e+00 1.8903935e+00 1.2074964e+00 1.0277379e+00 3.3038558e+00 2.6967685e+00 1.8755883e+00 1.3730080e+00 2.4290237e+00 2.5228632e+00 2.5343900e+00 1.0877340e+00 2.6795155e+00 2.8549884e+00 2.2878474e+00 1.3941000e+00 1.9010194e+00 2.5529515e+00 1.3637808e+00 1.0801003e+00 2.2778358e+00 9.5491981e-01 5.9448670e-01 5.9589853e-01 3.3742167e-01 1.9403546e+00 7.3727571e-01 1.8011631e+00 1.0580730e+00 1.6859882e+00 8.4110582e-01 1.3396881e+00 2.3254107e+00 1.8940865e+00 1.8320164e+00 1.6099822e+00 2.5455416e+00 1.0698235e+00 1.0938968e+00 1.3476330e+00 1.4941922e+00 1.4633215e+00 1.3755629e+00 8.7649478e-01 3.1069376e+00 2.7702855e+00 1.8674396e+00 1.7104256e+00 1.1065179e+00 2.4455843e+00 9.1688392e-01 1.4945651e+00 1.6975565e+00 7.1757390e-01 5.5102439e-01 1.2274184e+00 1.5152116e+00 1.9568855e+00 3.1286065e+00 1.3281960e+00 6.0709980e-01 1.0827099e+00 2.4180452e+00 1.7168537e+00 8.4814328e-01 5.4968254e-01 1.4259927e+00 1.6175327e+00 1.5676792e+00 1.0580730e+00 1.6914438e+00 1.8627823e+00 1.4252775e+00 1.3670172e+00 9.8340962e-01 1.5690664e+00 6.4292875e-01 1.2799022e+00 3.7622328e-01 9.3727156e-01 7.2340544e-01 8.7070822e-01 1.0517510e+00 4.9772204e-01 2.6753497e+00 1.1327780e+00 2.4219935e+00 1.5112031e+00 2.0792734e+00 3.0229728e+00 1.3206164e+00 2.4652397e+00 1.7009790e+00 3.5349296e+00 2.0290396e+00 1.4008516e+00 2.1030738e+00 1.2197800e+00 1.7532536e+00 2.2495068e+00 1.7048463e+00 4.1210195e+00 3.1691829e+00 1.1769133e+00 2.5856062e+00 1.2767751e+00 2.9863081e+00 1.1384575e+00 2.4711712e+00 2.5838439e+00 1.1268523e+00 1.3640383e+00 1.7178039e+00 2.2416335e+00 2.4908014e+00 4.1279448e+00 1.8102703e+00 1.0585628e+00 1.0110609e+00 3.0999848e+00 2.6577347e+00 1.7876312e+00 1.3164631e+00 2.2615790e+00 2.4095273e+00 2.3580977e+00 1.1327780e+00 2.5710650e+00 2.7600070e+00 2.1383264e+00 1.2571099e+00 1.7683536e+00 2.5188615e+00 1.3683626e+00 1.3381984e+00 1.9104439e+00 1.7447553e+00 2.1191178e+00 5.2290002e-01 1.5506355e+00 3.7401310e+00 2.0532672e+00 3.6450987e+00 2.6791066e+00 3.2198971e+00 4.2474168e+00 1.2374249e+00 3.6904578e+00 2.7448024e+00 4.7359552e+00 3.2167523e+00 2.5268732e+00 3.3111493e+00 1.8901504e+00 2.5824715e+00 3.3694826e+00 2.9225385e+00 5.3651760e+00 4.2632085e+00 1.6938497e+00 3.7848480e+00 2.0962051e+00 4.1703198e+00 2.2884248e+00 3.6689921e+00 3.8480511e+00 2.2915999e+00 2.5084155e+00 2.8222270e+00 3.5057930e+00 3.6931154e+00 5.3885667e+00 2.8913445e+00 2.2944283e+00 2.0536056e+00 4.3194837e+00 3.7370068e+00 2.9859935e+00 2.4261724e+00 3.4875553e+00 3.5613793e+00 3.5512500e+00 2.0532672e+00 3.7558873e+00 3.9047630e+00 3.2988390e+00 2.2352837e+00 2.9604482e+00 3.5852990e+00 2.4345890e+00 7.1446962e-01 4.7680727e-01 8.6080744e-01 1.0528937e+00 2.6643250e-01 2.4784392e+00 9.6779954e-01 2.4056700e+00 1.4093245e+00 1.9680122e+00 3.0432377e+00 1.1095018e+00 2.5046513e+00 1.7969297e+00 3.4167115e+00 1.9006720e+00 1.3928921e+00 2.0543866e+00 1.1288261e+00 1.5650139e+00 2.0901630e+00 1.6223748e+00 4.0330923e+00 3.2470423e+00 1.3554912e+00 2.4968262e+00 1.0256272e+00 3.0550867e+00 1.1407226e+00 2.3454418e+00 2.5560105e+00 1.0597600e+00 1.1958054e+00 1.6477280e+00 2.2779375e+00 2.5604758e+00 4.0677826e+00 1.7340403e+00 1.0472149e+00 1.0317636e+00 3.1283758e+00 2.4552133e+00 1.6602736e+00 1.1211328e+00 2.2084219e+00 2.3071243e+00 2.3006257e+00 9.6779954e-01 2.4647768e+00 2.6219630e+00 2.0691920e+00 1.3232765e+00 1.6800415e+00 2.3045354e+00 1.1399118e+00 2.6525508e-01 6.6194168e-01 1.5279052e+00 4.8284931e-01 2.2240009e+00 1.2628565e+00 2.2754107e+00 1.3512603e+00 1.8635082e+00 2.9164540e+00 1.6496295e+00 2.4127395e+00 2.0563471e+00 3.0426144e+00 1.5827764e+00 1.5566996e+00 1.9230842e+00 1.6230251e+00 1.7204639e+00 1.8421714e+00 1.4471806e+00 3.6003163e+00 3.3322323e+00 1.9737130e+00 2.2612204e+00 1.2180372e+00 3.0253495e+00 1.3338820e+00 2.0126070e+00 2.2700141e+00 1.1450371e+00 1.0044165e+00 1.7168897e+00 2.0924575e+00 2.5354258e+00 3.6216411e+00 1.8094028e+00 1.0735412e+00 1.3351581e+00 3.0117528e+00 2.1161544e+00 1.3888000e+00 9.3005809e-01 2.0016409e+00 2.1479330e+00 2.1191972e+00 1.2628565e+00 2.2323235e+00 2.3582915e+00 1.9639632e+00 1.7034312e+00 1.5340961e+00 1.9379753e+00 9.6779954e-01 6.0647055e-01 1.3810470e+00 2.3749211e-01 2.2111682e+00 1.0514970e+00 2.2223402e+00 1.2585091e+00 1.7887495e+00 2.8752401e+00 1.4450035e+00 2.3620441e+00 1.8870562e+00 3.0854059e+00 1.5854288e+00 1.3907384e+00 1.8599878e+00 1.3776606e+00 1.5509730e+00 1.8163092e+00 1.3995189e+00 3.6797126e+00 3.2203837e+00 1.7354649e+00 2.2402012e+00 1.0327124e+00 2.9556082e+00 1.1508346e+00 2.0324938e+00 2.2869296e+00 9.8115739e-01 9.3443769e-01 1.5799528e+00 2.0763861e+00 2.4587811e+00 3.7098479e+00 1.6697722e+00 9.5240225e-01 1.1656779e+00 2.9602833e+00 2.1358640e+00 1.3782117e+00 8.5400786e-01 1.9683111e+00 2.0924338e+00 2.0712315e+00 1.0514970e+00 2.2110297e+00 2.3461934e+00 1.8840843e+00 1.4831574e+00 1.4659783e+00 1.9682209e+00 8.9496218e-01 1.7964452e+00 6.5622658e-01 2.0655284e+00 1.1268523e+00 1.7764179e+00 9.9332012e-01 1.5158960e+00 2.3895003e+00 1.8982283e+00 1.8651393e+00 1.5387077e+00 2.7528000e+00 1.2871947e+00 1.1001946e+00 1.4627474e+00 1.4880729e+00 1.6030181e+00 1.6047598e+00 1.0374207e+00 3.2910073e+00 2.7655862e+00 1.7002168e+00 1.8814596e+00 1.2390194e+00 2.4548042e+00 8.7876634e-01 1.7158367e+00 1.8151170e+00 7.5016118e-01 7.8272551e-01 1.3241046e+00 1.5455843e+00 1.9524619e+00 3.2834453e+00 1.4300844e+00 5.8483448e-01 1.0263139e+00 2.4682518e+00 1.9911692e+00 1.0783755e+00 7.8808432e-01 1.5539983e+00 1.7882304e+00 1.6881750e+00 1.1268523e+00 1.8822939e+00 2.0779047e+00 1.5475657e+00 1.2810704e+00 1.1339991e+00 1.8534885e+00 9.0513514e-01 1.2087510e+00 3.4293561e+00 1.8554780e+00 3.4031864e+00 2.4420991e+00 2.9637920e+00 4.0403658e+00 1.1828717e+00 3.4998500e+00 2.6632870e+00 4.3954130e+00 2.8761203e+00 2.3399831e+00 3.0445122e+00 1.7890328e+00 2.3476777e+00 3.0401775e+00 2.6527529e+00 5.0321758e+00 4.1557153e+00 1.7943181e+00 3.4850530e+00 1.8421879e+00 4.0156620e+00 2.0769537e+00 3.3460838e+00 3.5735182e+00 2.0311110e+00 2.1883894e+00 2.6137665e+00 3.2724268e+00 3.5184202e+00 5.0524110e+00 2.6818545e+00 2.0664108e+00 1.9589565e+00 4.0923995e+00 3.3884524e+00 2.6885740e+00 2.0959739e+00 3.1948685e+00 3.2743590e+00 3.2448452e+00 1.8554780e+00 3.4633688e+00 3.5839347e+00 3.0150212e+00 2.1174980e+00 2.6708841e+00 3.2242395e+00 2.1262653e+00 2.3423978e+00 1.0035466e+00 2.2827159e+00 1.3153660e+00 1.8615039e+00 2.9298417e+00 1.3184035e+00 2.4022001e+00 1.8151170e+00 3.2315412e+00 1.7166676e+00 1.3595814e+00 1.9250594e+00 1.2570691e+00 1.5534985e+00 1.9352496e+00 1.4849081e+00 3.8359772e+00 3.2064915e+00 1.5410939e+00 2.3431625e+00 1.0302848e+00 2.9742645e+00 1.1013771e+00 2.1700970e+00 2.3919091e+00 9.7531402e-01 1.0396764e+00 1.5925492e+00 2.1393811e+00 2.4733960e+00 3.8624002e+00 1.6816960e+00 9.5650957e-01 1.0890388e+00 3.0080097e+00 2.2891398e+00 1.5007156e+00 9.6470639e-01 2.0543866e+00 2.1765531e+00 2.1487165e+00 1.0035466e+00 2.3180617e+00 2.4663563e+00 1.9433990e+00 1.3718709e+00 1.5414664e+00 2.1305612e+00 1.0107221e+00 1.7770053e+00 1.3000021e+00 1.3205171e+00 8.3930091e-01 1.8258497e+00 2.8432746e+00 1.7831595e+00 2.1193624e+00 1.2896554e+00 8.9496218e-01 1.6446727e+00 1.0946825e+00 2.1632524e+00 1.4041749e+00 5.4207852e-01 1.2077572e+00 2.1213447e+00 2.4069921e+00 2.9335119e+00 8.2206766e-01 1.6918279e+00 2.1851365e+00 1.7733720e+00 7.3278119e-01 1.4407642e+00 1.6273345e+00 1.3293020e+00 1.2924610e+00 1.7503171e+00 1.9276214e+00 2.3545376e+00 1.2450968e+00 1.8184976e+00 2.1894327e+00 1.8463545e+00 3.4893361e-01 1.0719022e+00 1.3834169e+00 1.0621362e+00 7.1740234e-01 1.0327832e+00 1.7770053e+00 6.9976890e-01 5.1210327e-01 9.9313181e-01 2.0841099e+00 1.0825311e+00 5.0208681e-01 1.3466860e+00 1.7937749e+00 8.2148003e-01 1.2268833e+00 2.4485240e+00 1.2563297e+00 1.9934469e+00 1.2524426e+00 2.8471336e+00 1.4358042e+00 7.3339246e-01 1.4342819e+00 4.9772204e-01 6.9457760e-01 1.4636741e+00 1.1233354e+00 3.5605638e+00 2.5755365e+00 1.2907457e+00 1.8667489e+00 3.7622328e-01 2.4814136e+00 6.2818221e-01 1.8112028e+00 2.1131807e+00 5.7672351e-01 7.9999102e-01 8.5275415e-01 1.9102507e+00 2.0267836e+00 3.6643554e+00 9.0168685e-01 8.3216780e-01 8.3306409e-01 2.5178144e+00 1.8656026e+00 1.2019259e+00 7.6362786e-01 1.6472011e+00 1.5943283e+00 1.7001179e+00 0.0000000e+00 1.8078806e+00 1.9570111e+00 1.3920954e+00 7.6195008e-01 1.1016806e+00 1.7729341e+00 7.1446962e-01 1.0816610e+00 7.3851064e-01 7.2248857e-01 3.0483776e+00 5.6342615e-01 1.3118081e+00 1.4889605e+00 9.8006369e-01 1.1737270e+00 4.2737382e-01 2.1131807e+00 1.7428500e+00 1.0543640e+00 8.5494999e-01 2.0376324e+00 1.3288928e+00 2.4590893e+00 5.9382214e-01 1.9576761e+00 9.8006369e-01 1.3739792e+00 8.5141186e-01 6.2055338e-01 1.3918500e+00 1.3907270e+00 9.7789352e-01 6.6861320e-01 6.5233704e-01 2.1059482e+00 9.8663349e-01 1.4035018e+00 1.7831595e+00 7.7880944e-01 1.4027992e+00 9.8677196e-01 1.5191564e+00 4.3798311e-01 6.8554305e-01 6.2111408e-01 1.7937749e+00 6.4241342e-01 9.9981032e-01 6.7780188e-01 1.6099640e+00 8.3640969e-01 1.4769275e+00 1.5684930e+00 6.3173774e-01 1.7302001e+00 2.0286216e+00 1.2711306e+00 1.0474897e+00 2.1700788e+00 8.2827027e-01 5.2290002e-01 7.5863433e-01 1.2491511e+00 1.0565061e+00 9.7542502e-01 3.3872939e-01 2.7982543e+00 2.0758695e+00 1.7342859e+00 1.1984110e+00 9.9693045e-01 1.8354727e+00 6.0840510e-01 1.1145077e+00 1.3082023e+00 5.2283051e-01 5.1857575e-01 4.7149050e-01 1.1471723e+00 1.3839439e+00 2.8837687e+00 5.8522871e-01 5.3667800e-01 9.0098101e-01 1.8496383e+00 1.3956631e+00 4.8016385e-01 6.2451737e-01 9.5139638e-01 1.0316097e+00 1.1168387e+00 8.2148003e-01 1.1408175e+00 1.3888569e+00 8.7588404e-01 9.9189360e-01 4.8124784e-01 1.3365773e+00 6.0566865e-01 1.4097462e+00 2.4566860e+00 1.1582635e+00 1.2895008e+00 1.6771691e+00 6.6244727e-01 8.5330525e-01 4.2110953e-01 1.5929148e+00 1.0739839e+00 5.6992880e-01 5.5102439e-01 2.4009228e+00 1.8321979e+00 2.1944657e+00 6.8299624e-01 1.3125970e+00 1.6253273e+00 1.0353506e+00 7.4661256e-01 1.1022402e+00 9.6950963e-01 8.7649478e-01 5.0731024e-01 1.1544356e+00 1.2559800e+00 2.5390784e+00 4.9009568e-01 1.1268617e+00 1.4819274e+00 1.4649720e+00 9.9544409e-01 6.0942760e-01 9.8006526e-01 5.9589853e-01 4.3937875e-01 6.7904052e-01 1.2268833e+00 6.0365341e-01 8.3354038e-01 4.3719837e-01 1.3221954e+00 4.2932160e-01 1.0251165e+00 9.7833010e-01 3.6949435e+00 6.0653347e-01 1.6944472e+00 1.5821553e+00 1.6484241e+00 1.7861438e+00 1.1497964e+00 2.7265330e+00 2.4114658e+00 1.7100754e+00 1.5191564e+00 1.8544941e+00 9.8143688e-01 2.9288318e+00 1.1208167e+00 2.6441923e+00 4.9772204e-01 2.0065422e+00 1.3857067e+00 8.4632640e-01 2.0655380e+00 2.0890644e+00 1.6229726e+00 9.3175410e-01 6.4813570e-01 1.8882412e+00 1.6282537e+00 2.0045623e+00 2.3010727e+00 4.0443437e-01 1.9471640e+00 1.6420881e+00 2.2200703e+00 1.1092092e+00 1.3077539e+00 1.2486828e+00 2.4485240e+00 1.1714086e+00 1.4815200e+00 1.3709657e+00 2.1645801e+00 1.5528645e+00 2.0592947e+00 2.2565403e+00 3.2107953e+00 2.2989490e+00 4.0089941e+00 2.5709804e+00 1.9412285e+00 2.6814987e+00 1.0808305e+00 1.6177026e+00 2.5906314e+00 2.3241321e+00 4.7335798e+00 3.7356640e+00 1.5467170e+00 3.0855313e+00 1.1828955e+00 3.6907456e+00 1.7719327e+00 2.9776826e+00 3.3258154e+00 1.7290027e+00 1.8703975e+00 2.1032002e+00 3.0991288e+00 3.2379874e+00 4.8403184e+00 2.1396216e+00 1.8765527e+00 1.6420881e+00 3.7688016e+00 2.8977291e+00 2.3525703e+00 1.7721385e+00 2.8780663e+00 2.8053505e+00 2.9124074e+00 1.2563297e+00 3.0197298e+00 3.1129968e+00 2.6135061e+00 1.7341866e+00 2.3184326e+00 2.7661123e+00 1.7090768e+00 1.2067996e+00 1.8641580e+00 1.3940244e+00 1.3162189e+00 8.8198158e-01 2.2794791e+00 2.1012392e+00 1.5525661e+00 1.0918469e+00 2.2063254e+00 1.1211328e+00 2.4017426e+00 1.1211328e+00 2.2284888e+00 6.3765570e-01 1.5168126e+00 1.2830542e+00 7.2263841e-01 1.5939137e+00 1.6681833e+00 1.2435436e+00 4.6557224e-01 3.1271814e-01 2.2147971e+00 1.2909648e+00 1.4589882e+00 1.7351918e+00 8.5359653e-01 1.8877628e+00 1.2647627e+00 1.8001617e+00 9.2780124e-01 1.2301583e+00 1.1566759e+00 1.9934469e+00 1.1506301e+00 1.5274241e+00 1.1815770e+00 1.6939440e+00 1.2016246e+00 1.9452063e+00 1.8368886e+00 2.7696320e+00 1.7002168e+00 6.6444642e-01 1.2360344e+00 1.3162958e+00 1.5606124e+00 1.8019802e+00 1.1903794e+00 3.3139779e+00 1.5262653e+00 1.2463647e+00 1.7598642e+00 1.6038123e+00 1.5053088e+00 8.4040822e-01 1.8873059e+00 1.7273593e+00 1.0791305e+00 1.4542898e+00 8.8167165e-01 1.3277924e+00 1.1132823e+00 3.3576107e+00 9.4738284e-01 1.0119180e+00 9.3046944e-01 1.8017473e+00 2.2743623e+00 1.4404916e+00 1.5380438e+00 1.4761813e+00 1.5955619e+00 1.5999471e+00 1.2524426e+00 1.7473897e+00 2.0612423e+00 1.3691763e+00 6.7534282e-01 1.2539581e+00 2.2701663e+00 1.5558007e+00 1.5218782e+00 2.4628184e+00 1.5932824e+00 3.2458126e+00 2.5692387e+00 1.4348047e+00 1.8937847e+00 9.2060977e-01 2.4408782e+00 3.8250534e+00 1.0499398e+00 2.8336242e+00 2.0769357e+00 2.6064495e+00 1.0813975e+00 1.3021456e+00 2.4993477e+00 2.2323451e+00 2.1662332e+00 1.8260680e+00 2.0200580e+00 1.1770826e+00 2.1383117e+00 2.5736499e+00 3.0399892e+00 1.5323598e+00 1.2212784e+00 1.7944590e+00 2.3235953e+00 1.3759094e+00 1.3385913e+00 1.3604806e+00 2.8471336e+00 1.0797751e+00 9.4588685e-01 1.6150266e+00 2.9366827e+00 1.8217819e+00 1.3773939e+00 2.3541561e+00 1.1723315e+00 6.4232366e-01 1.8822595e+00 1.3566020e+00 4.2656951e-01 5.7691891e-01 2.2149281e+00 2.2825823e+00 2.4730728e+00 7.0958226e-01 1.4300979e+00 1.9425292e+00 1.2122797e+00 4.9430028e-01 1.0215032e+00 1.0391769e+00 7.2638147e-01 9.8137813e-01 1.1659675e+00 1.5397131e+00 2.3056726e+00 1.0072799e+00 1.1569911e+00 1.6871910e+00 1.6699010e+00 7.9127668e-01 4.3341454e-01 8.2155022e-01 5.7672351e-01 6.8304299e-01 6.6412342e-01 1.4358042e+00 7.0097130e-01 8.1019167e-01 6.5485710e-01 1.6386105e+00 4.6472955e-01 7.2626021e-01 8.9802947e-01 8.9083207e-01 9.8663349e-01 1.0101003e+00 1.2666796e+00 7.2340544e-01 3.1120413e+00 1.9012831e+00 1.3662822e+00 1.4214310e+00 1.0271885e+00 1.7789322e+00 2.8835410e-01 1.4717950e+00 1.5613089e+00 4.5716421e-01 8.2373020e-01 3.8830315e-01 1.2833190e+00 1.3103990e+00 3.1817011e+00 4.8644514e-01 5.9610506e-01 8.0162421e-01 1.8503155e+00 1.7547273e+00 9.3865015e-01 8.9973730e-01 1.1346946e+00 1.2001902e+00 1.2260535e+00 7.3339246e-01 1.3976606e+00 1.6479131e+00 9.4228329e-01 5.0621589e-01 7.1671402e-01 1.7153988e+00 9.3451915e-01 1.7865803e+00 1.3700593e+00 7.2638147e-01 5.3458689e-01 2.2505972e+00 1.6524504e+00 2.2440955e+00 5.5576380e-01 1.5638518e+00 1.3688560e+00 1.0552128e+00 7.1143905e-01 8.2518769e-01 1.0245496e+00 9.9235657e-01 6.7030885e-01 8.3156325e-01 9.6025744e-01 2.3337038e+00 6.8299624e-01 1.1166319e+00 1.5524781e+00 1.1685901e+00 1.1719135e+00 6.6412342e-01 1.1159239e+00 2.6643250e-01 4.7488466e-01 4.3341454e-01 1.4342819e+00 5.7691891e-01 8.8366512e-01 3.3492202e-01 1.3576953e+00 4.2110953e-01 1.2033093e+00 1.1777985e+00 8.7819565e-01 1.8719964e+00 1.5530949e+00 3.9773949e+00 2.6834336e+00 1.0194189e+00 2.2401597e+00 7.0463400e-01 2.6908242e+00 8.9981614e-01 2.2443541e+00 2.5055082e+00 9.6168382e-01 1.2786676e+00 1.1515752e+00 2.2564140e+00 2.2581551e+00 4.0837847e+00 1.1737270e+00 1.1984110e+00 1.0100915e+00 2.7760526e+00 2.2855612e+00 1.6668656e+00 1.2419907e+00 2.0207624e+00 1.9399964e+00 2.0427084e+00 4.9772204e-01 2.1876191e+00 2.3343528e+00 1.7191609e+00 7.3633268e-01 1.5086315e+00 2.2088314e+00 1.2082987e+00 1.1857824e+00 1.2636762e+00 3.3874427e+00 2.5564450e+00 1.8349829e+00 1.6578570e+00 5.8813453e-01 2.5222553e+00 1.0240850e+00 1.6676963e+00 2.1419072e+00 9.3827844e-01 9.8741108e-01 8.7169308e-01 2.0802526e+00 2.1175243e+00 3.5451448e+00 8.2097460e-01 1.3248988e+00 1.4633215e+00 2.4116155e+00 1.5361480e+00 1.2935378e+00 9.5818710e-01 1.5578153e+00 1.3192053e+00 1.5035025e+00 6.9457760e-01 1.5912796e+00 1.6259926e+00 1.1892978e+00 1.1294987e+00 1.0978602e+00 1.4813076e+00 9.1948999e-01 8.1819403e-01 2.2419326e+00 2.2807501e+00 2.5846003e+00 6.4497192e-01 1.4107908e+00 2.0247998e+00 1.3509199e+00 5.5183182e-01 1.2328847e+00 1.1911894e+00 9.0810653e-01 9.7397874e-01 1.4379681e+00 1.6702453e+00 2.3957048e+00 9.4716675e-01 1.4061835e+00 1.8616601e+00 1.6979390e+00 5.2290002e-01 7.0376604e-01 9.7757519e-01 6.9976890e-01 4.8012872e-01 6.5622658e-01 1.4636741e+00 5.9074344e-01 5.5183182e-01 5.8926015e-01 1.7101283e+00 6.2055338e-01 5.2374483e-01 1.0096792e+00 2.4983023e+00 2.0024830e+00 2.0009022e+00 9.4244262e-01 1.2563297e+00 1.6864324e+00 8.0788963e-01 8.3930091e-01 1.0038277e+00 7.0810362e-01 5.9074344e-01 6.2055338e-01 9.0121804e-01 1.2356595e+00 2.5673851e+00 7.1082758e-01 6.9066640e-01 1.1671832e+00 1.6263298e+00 1.2372185e+00 2.6033464e-01 7.2248857e-01 6.6653737e-01 8.5606908e-01 8.7588404e-01 1.1233354e+00 9.0810653e-01 1.1795009e+00 7.1867388e-01 1.2188386e+00 3.1239235e-01 1.1894366e+00 7.5921691e-01 2.7729820e+00 4.4273104e+00 1.7851048e+00 3.5860697e+00 2.3211451e+00 3.2572853e+00 1.7681972e+00 1.6466818e+00 3.1677578e+00 2.9074188e+00 2.8617360e+00 2.1557081e+00 2.3917474e+00 3.9487224e-01 2.8587345e+00 3.1370513e+00 3.5889276e+00 1.8806886e+00 2.0412779e+00 2.4100318e+00 3.0088533e+00 2.0247746e+00 2.1265123e+00 2.0926464e+00 3.5605638e+00 1.8217810e+00 1.8066213e+00 2.3669524e+00 3.5958907e+00 2.5091153e+00 2.1661990e+00 3.0374915e+00 2.7063291e+00 1.8195505e+00 2.8431665e+00 6.1655427e-01 2.1507483e+00 2.1438129e+00 1.7230435e+00 2.3108260e+00 2.5097011e+00 1.8135469e+00 1.5638528e+00 9.0791603e-01 2.8200316e+00 1.7992895e+00 2.2827159e+00 2.3805592e+00 1.0279218e+00 2.6120156e+00 2.2030084e+00 2.6289851e+00 1.7477225e+00 1.8297977e+00 1.8176515e+00 2.5755365e+00 1.8486085e+00 2.1438129e+00 1.7993706e+00 2.0846858e+00 2.0084695e+00 2.7218129e+00 2.6544629e+00 2.7850656e+00 1.6064410e+00 2.7362607e+00 1.2723027e+00 2.8153418e+00 2.8097763e+00 1.4630671e+00 1.8911656e+00 1.6976298e+00 2.3931138e+00 2.3316649e+00 4.4654565e+00 1.7621453e+00 1.4315442e+00 9.9921804e-01 3.0176875e+00 3.0491088e+00 2.1855415e+00 1.8898572e+00 2.4817766e+00 2.5552736e+00 2.5674482e+00 1.2907457e+00 2.7588865e+00 3.0041677e+00 2.2870308e+00 9.4057729e-01 2.0520412e+00 2.9779211e+00 1.8911656e+00 1.9172403e+00 1.5033800e+00 1.5778466e+00 4.2450569e-01 7.6773108e-01 1.5016932e+00 1.3345218e+00 1.1346946e+00 1.0902078e+00 1.2416734e+00 1.9196757e+00 1.1117653e+00 1.6095257e+00 2.0596575e+00 1.0943114e+00 8.7072347e-01 9.2729770e-01 1.4434261e+00 3.8830315e-01 3.6319073e-01 4.1088655e-01 1.8667489e+00 1.6562722e-01 4.2450569e-01 5.9279023e-01 1.8877121e+00 8.2518769e-01 9.7789352e-01 1.4886316e+00 2.7335291e+00 9.1459005e-01 1.8213026e+00 2.2454046e+00 7.7259801e-01 8.0376328e-01 1.0525811e+00 2.1168634e+00 2.2814168e+00 3.7089872e+00 1.0767714e+00 1.0755005e+00 1.1631285e+00 2.6946772e+00 1.7497347e+00 1.2634840e+00 7.1971771e-01 1.7438018e+00 1.6356845e+00 1.7637315e+00 3.7622328e-01 1.8511762e+00 1.9311191e+00 1.4699785e+00 1.1016806e+00 1.1928774e+00 1.6354514e+00 6.5233704e-01 2.0052498e+00 1.7681972e+00 1.2007144e+00 2.1235855e+00 2.2484715e+00 1.6942544e+00 1.0546367e+00 5.1386894e-01 2.3251407e+00 1.7093881e+00 2.0273081e+00 2.2255325e+00 6.9493020e-01 2.3316649e+00 1.8640280e+00 2.3781796e+00 1.4043141e+00 1.6126002e+00 1.5456113e+00 2.4814136e+00 1.5467508e+00 1.8811164e+00 1.5963715e+00 2.0694478e+00 1.7422855e+00 2.4276706e+00 2.4143104e+00 1.5837613e+00 1.7028549e+00 2.6643250e-01 7.3283576e-01 6.1619693e-01 1.4102704e+00 1.5157660e+00 3.3107238e+00 7.0702759e-01 4.6964680e-01 7.3731902e-01 2.0564363e+00 1.8389778e+00 9.9058911e-01 7.8305765e-01 1.2692102e+00 1.3637808e+00 1.3483908e+00 6.2818221e-01 1.5635907e+00 1.7874832e+00 1.0817627e+00 4.8284931e-01 7.9664122e-01 1.7693113e+00 8.5141186e-01 7.7538587e-01 1.4522625e+00 1.1678338e+00 1.2100516e+00 1.1294987e+00 1.4712028e+00 1.8985225e+00 1.2171256e+00 1.5155373e+00 1.9939611e+00 1.4342819e+00 6.6653737e-01 7.1511757e-01 1.2680818e+00 5.4772790e-01 6.0868934e-01 6.7484334e-01 1.8112028e+00 3.8639663e-01 5.2413598e-01 7.9227302e-01 1.9656037e+00 7.9656884e-01 7.1789533e-01 1.2970125e+00 1.6649242e+00 1.5382030e+00 1.4107908e+00 5.4248468e-01 9.6424206e-01 1.6581712e+00 1.4527629e+00 1.5643033e+00 2.0014001e+00 1.0048958e+00 1.4365091e+00 1.0328871e+00 1.6659456e+00 6.7424840e-01 1.0427348e+00 9.3481345e-01 2.1131807e+00 8.1596583e-01 1.1349095e+00 1.1009910e+00 2.0312552e+00 1.0961859e+00 1.4886316e+00 1.7139439e+00 4.8016385e-01 6.4687084e-01 1.4356300e+00 1.6309773e+00 3.2287360e+00 7.3391501e-01 4.4499696e-01 8.4121419e-01 2.1133414e+00 1.6592561e+00 8.3333283e-01 5.2066928e-01 1.2097457e+00 1.2911242e+00 1.2850973e+00 5.7672351e-01 1.4812088e+00 1.6720834e+00 1.0285902e+00 7.2340544e-01 6.8124108e-01 1.5683551e+00 6.1067563e-01 8.0990117e-01 1.4468934e+00 1.7768340e+00 2.9867606e+00 8.8098199e-01 6.6217390e-01 1.1327663e+00 2.1506380e+00 1.2980342e+00 5.4779717e-01 1.3340137e-01 1.1051628e+00 1.1634940e+00 1.1952607e+00 7.9999102e-01 1.2953104e+00 1.4320028e+00 9.9155078e-01 1.1867923e+00 5.7526462e-01 1.1726810e+00 2.6680274e-01 1.2602457e+00 1.2678174e+00 2.9703757e+00 1.3103399e-01 8.4439576e-01 1.0887336e+00 1.6811909e+00 1.4435947e+00 7.9778097e-01 8.9789119e-01 9.2528705e-01 8.7435479e-01 9.9613800e-01 8.5275415e-01 1.0871867e+00 1.3186900e+00 6.8124108e-01 8.2317311e-01 5.4397563e-01 1.4334280e+00 9.0121513e-01 6.7419212e-01 2.1234744e+00 1.3330747e+00 1.2524426e+00 1.6423187e+00 1.1112287e+00 1.7731481e+00 1.0412209e+00 1.5779602e+00 8.1552831e-01 1.2367326e+00 1.0877340e+00 1.9102507e+00 1.1360631e+00 1.4957547e+00 1.1495900e+00 1.6944472e+00 1.0511712e+00 1.7894533e+00 1.6403454e+00 2.3936810e+00 1.3012342e+00 1.5364148e+00 1.7852089e+00 7.8659640e-01 2.0467146e+00 1.4387140e+00 1.9055720e+00 1.0341095e+00 1.3049404e+00 1.1996837e+00 2.0267836e+00 1.2898173e+00 1.6473842e+00 1.2092432e+00 1.6223502e+00 1.2928268e+00 2.1087983e+00 1.9576761e+00 2.9790337e+00 3.1661621e+00 3.6343153e+00 1.9094387e+00 2.2505084e+00 2.4932991e+00 3.0919113e+00 2.0983540e+00 2.2774299e+00 2.1822207e+00 3.6643554e+00 1.9784646e+00 2.0041121e+00 2.4741311e+00 3.6564145e+00 2.5932898e+00 2.3540393e+00 3.1383476e+00 9.6758101e-01 1.2012033e+00 1.6658010e+00 1.4135473e+00 8.6985276e-01 9.6249568e-01 9.3451915e-01 8.2380019e-01 9.6993876e-01 9.0168685e-01 1.0632334e+00 1.2723027e+00 6.4232366e-01 8.7376399e-01 5.8942278e-01 1.4153467e+00 9.6559725e-01 6.0709980e-01 2.1192618e+00 1.8400866e+00 8.3619405e-01 7.2626021e-01 1.2848171e+00 1.4775384e+00 1.4500356e+00 8.3216780e-01 1.5874797e+00 1.8427499e+00 1.2442620e+00 8.6985276e-01 8.3878265e-01 1.7484907e+00 7.7500385e-01 2.4608044e+00 2.2758533e+00 1.3186900e+00 1.1601403e+00 1.7655861e+00 1.8899115e+00 1.9324044e+00 8.3306409e-01 2.0122449e+00 2.2830055e+00 1.6787550e+00 8.1019167e-01 1.3243478e+00 2.1959912e+00 1.1244567e+00 1.9511379e+00 1.7500667e+00 2.2774574e+00 1.1011934e+00 1.2684800e+00 1.1435764e+00 2.5178144e+00 1.1861264e+00 1.4342819e+00 1.3109392e+00 2.2026274e+00 1.5858048e+00 2.0711809e+00 2.3400013e+00 1.0557584e+00 1.3438727e+00 1.0821769e+00 8.4383266e-01 1.0493821e+00 1.8656026e+00 7.8957903e-01 5.5399712e-01 1.0737552e+00 2.2030214e+00 1.1115276e+00 2.1119253e-01 1.3352177e+00 6.6627781e-01 7.2272795e-01 8.6751530e-01 9.1936743e-01 1.2019259e+00 8.7588404e-01 1.0946184e+00 8.0162421e-01 1.4236959e+00 4.0664863e-01 9.8463602e-01 6.8496652e-01 1.2266388e+00 1.2615426e+00 1.3010124e+00 7.6362786e-01 1.4014424e+00 1.5148689e+00 1.0932736e+00 1.2210779e+00 6.9618131e-01 1.2059294e+00 2.0855006e-01 4.7509249e-01 3.1239235e-01 1.6472011e+00 4.6557224e-01 7.5810578e-01 4.3937875e-01 1.5999820e+00 5.6262711e-01 1.1233867e+00 1.3019241e+00 3.9472619e-01 1.5943283e+00 3.3742167e-01 4.8284931e-01 3.4893361e-01 1.6410601e+00 6.6154242e-01 9.3451915e-01 1.2980649e+00 1.7001179e+00 5.2283051e-01 6.7484334e-01 3.3872939e-01 1.6485749e+00 6.6653737e-01 1.1055440e+00 1.3931316e+00 1.8078806e+00 1.9570111e+00 1.3920954e+00 7.6195008e-01 1.1016806e+00 1.7729341e+00 7.1446962e-01 3.8639663e-01 6.2027457e-01 1.8723846e+00 8.0990117e-01 9.0447834e-01 1.4243850e+00 7.9227302e-01 2.1007225e+00 1.0230346e+00 7.1789533e-01 1.5391678e+00 1.3603920e+00 4.6137216e-01 1.1083720e+00 1.1686836e+00 1.1908452e+00 2.1561807e+00 1.2583645e+00 1.0722301e+00 7.7259801e-01 1.2001902e+00 diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-seuclidean-ml.txt b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-seuclidean-ml.txt new file mode 100644 index 0000000..ce80cb1 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-seuclidean-ml.txt @@ -0,0 +1 @@ + 1.4330520e+01 1.4635426e+01 1.3450855e+01 1.4761140e+01 1.3508642e+01 1.5434417e+01 1.3887693e+01 1.5166776e+01 1.3966038e+01 1.4950451e+01 1.4564587e+01 1.3834201e+01 1.4347008e+01 1.5641962e+01 1.4689053e+01 1.4418720e+01 1.4545856e+01 1.4151822e+01 1.4669017e+01 1.5150750e+01 1.3770166e+01 1.3288969e+01 1.4048191e+01 1.4049959e+01 1.4164158e+01 1.3727834e+01 1.4074687e+01 1.4321303e+01 1.2497330e+01 1.3820273e+01 1.4441030e+01 1.4780222e+01 1.2504339e+01 1.5022245e+01 1.4263650e+01 1.3704507e+01 1.3694385e+01 1.3667517e+01 1.3177468e+01 1.4391931e+01 1.4893903e+01 1.4475753e+01 1.4440707e+01 1.3603096e+01 1.6889651e+01 1.4731174e+01 1.3337775e+01 1.5187532e+01 1.5667271e+01 1.4226037e+01 1.4203554e+01 1.5272898e+01 1.6031460e+01 1.5991549e+01 1.1855060e+01 1.4844776e+01 1.2475182e+01 1.4408126e+01 1.4836870e+01 1.3472986e+01 1.4089281e+01 1.1018298e+01 1.3183296e+01 1.4590802e+01 1.4404230e+01 1.2717623e+01 1.3983283e+01 1.4017133e+01 1.4608005e+01 1.4402553e+01 1.3977803e+01 1.4091040e+01 1.3977459e+01 1.2630449e+01 1.4160109e+01 1.3029417e+01 1.2654432e+01 1.2794946e+01 1.3194978e+01 1.4378745e+01 1.2431908e+01 1.3852651e+01 1.3748358e+01 1.4003568e+01 1.5066681e+01 1.5192826e+01 1.4370013e+01 1.5792545e+01 1.3547546e+01 1.4411543e+01 1.4794215e+01 1.4924312e+01 1.4789153e+01 1.4875055e+01 1.4208537e+01 1.2786148e+01 1.4882476e+01 1.3302010e+01 1.4354774e+01 1.4542129e+01 1.5889633e+01 1.2928185e+01 1.4877868e+01 1.2890902e+01 1.4406165e+01 1.4498123e+01 1.4303273e+01 1.3207002e+01 1.3954732e+01 1.4841248e+01 1.5427799e+01 1.4363463e+01 1.3976277e+01 1.4284878e+01 1.4457991e+01 1.3369469e+01 1.5246610e+01 1.4487573e+01 1.4525176e+01 1.4505865e+01 1.5037347e+01 1.3834927e+01 1.3758988e+01 1.3424987e+01 1.4914766e+01 1.3783923e+01 1.3434291e+01 1.2895927e+01 1.3870360e+01 1.3342977e+01 1.3094322e+01 1.3057847e+01 1.3322375e+01 1.4940650e+01 1.4476829e+01 1.4197503e+01 1.4597035e+01 1.2963234e+01 1.4011414e+01 1.3181409e+01 1.3339615e+01 1.3928735e+01 1.3508015e+01 1.3170749e+01 1.3529133e+01 1.3454724e+01 1.4883437e+01 1.4564565e+01 1.2474313e+01 1.4435790e+01 1.5285703e+01 1.3701736e+01 1.3578312e+01 1.4807311e+01 1.4281072e+01 1.2920213e+01 1.4427803e+01 1.1408611e+01 1.4097334e+01 1.2868115e+01 1.3903683e+01 1.3800332e+01 1.3439339e+01 1.4062651e+01 1.3242107e+01 1.4400424e+01 1.3826132e+01 1.5991146e+01 1.3118258e+01 1.5377390e+01 1.2858378e+01 1.5249567e+01 1.4081585e+01 1.4458052e+01 1.4175623e+01 1.4850069e+01 1.5506668e+01 1.5014770e+01 1.4337030e+01 1.5214705e+01 1.4803729e+01 1.3188675e+01 1.3437739e+01 1.3409394e+01 1.4607386e+01 1.5394271e+01 1.5946451e+01 1.3769364e+01 1.4181208e+01 1.2551765e+01 diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-spearman-ml.txt b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-spearman-ml.txt new file mode 100644 index 0000000..b50fe3a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/pdist-spearman-ml.txt @@ -0,0 +1 @@ + 9.3540954e-01 9.7904590e-01 8.6703870e-01 1.1569997e+00 8.7174317e-01 1.0627183e+00 9.1272727e-01 1.1593999e+00 9.7573357e-01 1.0072127e+00 1.0536814e+00 9.6276028e-01 9.7700570e-01 1.1513951e+00 1.0719592e+00 9.2178818e-01 1.0004680e+00 9.3689769e-01 9.8205821e-01 1.0332673e+00 9.4517852e-01 8.9437744e-01 9.7556556e-01 9.0460246e-01 9.7210921e-01 9.2230423e-01 9.9605161e-01 9.6852085e-01 8.4162016e-01 9.6667267e-01 9.7759376e-01 9.9757576e-01 7.6992499e-01 1.0151695e+00 9.8691869e-01 9.0325833e-01 8.6665467e-01 8.8844884e-01 8.4553255e-01 9.7700570e-01 9.5159916e-01 9.8906691e-01 1.0551935e+00 9.1973597e-01 1.3266247e+00 1.0982778e+00 8.4531653e-01 1.0887369e+00 1.0984938e+00 9.9851185e-01 9.0701470e-01 1.0639304e+00 1.2392919e+00 1.1422502e+00 8.1725773e-01 1.1844944e+00 7.8219022e-01 1.0817162e+00 1.2196100e+00 1.0003120e+00 1.0164536e+00 7.0724272e-01 9.7981398e-01 1.1134953e+00 1.0671107e+00 9.3600960e-01 9.9984398e-01 1.0356916e+00 1.1248005e+00 1.0696310e+00 1.0634263e+00 9.6472847e-01 9.9365137e-01 8.5724572e-01 1.1257846e+00 8.9930993e-01 9.4903090e-01 9.0667867e-01 9.1231923e-01 1.0573777e+00 9.0105011e-01 9.5255926e-01 1.0177978e+00 1.0606901e+00 1.1966997e+00 1.0891929e+00 1.0085089e+00 1.2640264e+00 9.3246925e-01 1.0198020e+00 1.2055806e+00 1.1237924e+00 1.1060666e+00 1.0517252e+00 1.0684668e+00 7.6844884e-01 1.0572697e+00 8.7373537e-01 9.6283228e-01 9.9350735e-01 1.2412601e+00 7.6322832e-01 1.0298950e+00 8.6148215e-01 1.0042724e+00 9.7012901e-01 9.3712571e-01 8.5845785e-01 8.5862586e-01 1.0336634e+00 1.0955536e+00 9.5302730e-01 9.8696670e-01 1.0633063e+00 1.0026643e+00 9.6380438e-01 1.1711251e+00 9.9273927e-01 1.0260906e+00 1.0863966e+00 1.0482808e+00 9.0361836e-01 9.2358836e-01 8.7794779e-01 1.2461206e+00 9.2985299e-01 1.0418962e+00 9.4660666e-01 9.5636364e-01 9.0646265e-01 9.9113111e-01 8.3027903e-01 9.3341734e-01 1.1378938e+00 1.0548215e+00 1.0086889e+00 1.1998920e+00 8.6063006e-01 1.0255506e+00 8.4786079e-01 1.0090729e+00 9.2542454e-01 9.5176718e-01 9.3477348e-01 9.0091809e-01 9.6404440e-01 1.1158716e+00 9.9614761e-01 7.7682568e-01 1.0605461e+00 1.0895650e+00 9.0065407e-01 8.7173117e-01 9.9821182e-01 1.2165617e+00 8.6127813e-01 1.1111071e+00 7.9015902e-01 1.0433843e+00 8.6510651e-01 1.0019202e+00 1.0154815e+00 9.4381038e-01 9.8646265e-01 1.0062526e+00 9.7426943e-01 9.8191419e-01 1.3038944e+00 8.6277828e-01 1.0830243e+00 8.6851485e-01 1.1192559e+00 9.9120312e-01 9.6540054e-01 9.1072307e-01 1.1775698e+00 1.1139154e+00 1.1083468e+00 9.9593159e-01 1.0825923e+00 1.1115032e+00 9.7430543e-01 9.5605161e-01 9.2800480e-01 9.4369037e-01 1.1136034e+00 1.1382898e+00 9.5937594e-01 9.8843084e-01 7.4563456e-01 diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/random-bool-data.txt b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/random-bool-data.txt new file mode 100644 index 0000000..df0d838 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/random-bool-data.txt @@ -0,0 +1,100 @@ +0 1 1 0 1 0 0 1 0 0 0 1 0 0 0 0 0 1 0 0 1 0 0 0 1 1 0 0 1 1 +1 1 1 1 1 1 1 0 0 1 1 1 0 0 0 0 1 0 1 0 1 1 1 0 1 0 1 1 1 1 +0 1 0 1 1 0 0 1 1 1 1 0 1 0 0 1 1 0 1 1 1 0 1 1 0 1 1 1 0 1 +1 1 1 0 0 1 1 0 0 1 1 1 0 0 1 1 0 1 1 1 0 1 1 0 0 0 0 1 0 0 +1 0 0 0 0 1 1 0 1 1 0 1 0 0 0 0 1 0 0 1 0 1 0 0 1 1 1 1 0 0 +1 0 1 1 0 0 0 1 1 1 1 1 0 1 1 0 1 0 1 0 1 0 0 0 0 0 0 0 1 1 +0 1 0 0 1 0 0 0 1 0 0 1 1 0 0 0 0 1 1 0 0 1 0 1 1 1 1 0 1 0 +1 0 1 1 1 0 0 0 0 1 1 0 0 0 0 1 0 1 0 0 0 1 1 1 0 1 0 0 1 0 +1 1 1 0 0 1 1 0 0 1 0 0 1 0 0 1 0 1 1 0 1 1 0 1 1 1 0 0 1 1 +1 1 0 1 0 0 1 1 1 1 1 1 1 0 1 0 1 1 1 1 0 0 0 0 0 0 1 1 0 0 +1 0 1 0 1 1 0 1 1 0 1 1 0 1 1 1 1 0 0 1 1 0 0 0 0 1 0 0 0 0 +1 1 1 1 0 1 0 0 0 0 0 1 0 1 1 1 1 0 1 1 1 1 1 1 0 1 0 1 1 1 +1 1 1 1 1 1 1 1 1 0 1 1 0 0 1 0 1 0 1 0 1 0 0 0 1 0 0 1 0 1 +0 1 1 0 0 1 1 0 0 0 0 1 0 1 1 0 1 0 1 0 1 1 0 1 0 0 1 1 1 1 +1 0 0 1 0 0 1 0 1 0 0 1 0 0 0 1 1 0 0 0 1 0 1 0 0 1 1 0 1 1 +1 0 0 1 1 0 0 1 1 0 0 1 1 1 1 1 1 1 1 0 1 1 1 1 0 1 1 1 0 0 +1 1 0 0 1 0 0 0 1 0 0 1 0 1 0 0 1 0 1 1 0 1 0 0 0 1 1 1 1 1 +0 0 0 1 1 1 1 1 0 1 0 1 1 1 1 0 0 1 1 1 1 1 0 0 1 0 1 0 0 0 +1 0 1 1 0 1 0 0 1 0 0 1 0 0 1 0 0 0 1 0 0 1 1 1 0 1 1 0 1 1 +0 0 0 0 1 0 1 0 1 1 0 0 1 0 0 0 0 0 0 0 0 1 1 1 0 0 0 1 1 1 +0 1 0 0 1 1 0 0 1 1 1 0 0 0 1 0 0 0 0 1 1 0 0 1 0 1 1 0 1 0 +1 0 1 0 1 1 1 0 0 0 1 0 1 1 0 0 0 0 0 0 0 1 0 0 1 1 1 0 1 1 +0 0 1 0 0 0 0 0 1 1 0 0 1 1 1 1 1 1 1 1 0 1 0 0 0 0 0 0 1 0 +0 1 0 1 1 1 0 1 1 1 0 1 0 1 1 1 0 0 0 0 1 1 1 0 0 1 1 0 0 1 +0 0 0 0 1 1 1 0 0 0 0 0 0 0 0 1 1 0 0 0 0 1 0 1 1 0 1 1 0 0 +1 0 0 0 1 0 1 0 0 1 0 1 1 0 1 0 1 0 1 0 1 1 1 0 0 0 1 1 1 0 +1 0 0 0 1 1 1 0 0 1 0 1 1 1 0 0 0 1 1 1 0 0 0 0 1 0 0 0 1 1 +0 1 0 0 0 1 1 1 0 1 1 1 0 1 0 0 1 1 1 1 0 1 0 1 0 1 1 0 1 1 +0 0 1 0 0 0 0 0 0 0 0 1 1 0 1 0 0 1 0 0 0 1 0 1 0 1 0 1 0 1 +0 0 1 0 1 0 1 1 1 1 0 0 1 1 1 1 0 0 1 1 1 1 0 0 1 0 1 0 1 0 +1 1 0 1 1 1 1 1 0 1 0 0 0 1 1 1 0 1 0 0 0 1 1 0 1 0 0 0 0 1 +0 1 0 0 0 1 0 1 1 0 0 1 0 0 0 1 1 1 0 0 1 1 0 1 1 0 0 1 0 1 +1 1 0 0 0 0 0 1 1 0 1 1 0 0 1 0 1 1 0 0 0 1 0 1 0 1 0 1 0 1 +1 1 1 0 1 0 0 1 1 0 1 1 1 0 1 0 1 1 0 0 0 1 1 0 0 1 1 1 1 1 +0 1 0 0 1 1 0 0 1 1 1 1 0 1 0 1 0 1 1 1 0 1 1 0 1 1 0 0 1 0 +1 1 1 1 0 1 0 1 0 1 1 1 1 0 1 0 1 0 1 0 1 1 0 0 1 0 1 0 0 0 +0 0 0 0 1 1 1 0 1 1 0 0 1 1 1 1 0 1 0 1 1 1 1 1 1 0 0 0 0 0 +0 1 1 1 0 0 0 1 1 1 0 1 0 0 1 1 1 1 1 0 1 0 0 1 0 0 0 0 1 1 +0 1 0 0 1 1 1 1 0 0 1 0 1 0 1 1 0 0 1 0 0 1 1 0 0 0 0 1 0 0 +1 1 0 1 0 0 1 1 0 0 1 1 1 0 0 1 1 1 0 0 0 0 1 1 1 0 1 0 0 1 +0 1 1 0 1 0 1 1 0 0 0 1 1 0 0 0 0 0 0 1 0 0 1 1 0 1 0 0 1 1 +0 0 1 1 1 0 1 0 0 1 1 0 0 0 1 1 1 0 1 0 0 0 0 1 1 0 1 1 0 0 +1 0 1 1 1 1 1 1 1 1 0 1 0 0 0 1 0 1 0 0 0 1 1 0 0 1 0 0 0 0 +1 0 1 1 1 0 1 1 1 1 0 0 1 0 1 1 1 0 0 0 0 1 1 1 1 1 0 1 0 0 +1 0 0 0 1 1 1 0 1 1 0 0 1 1 1 0 1 0 0 1 0 1 0 1 1 1 0 0 0 1 +1 0 1 0 1 0 0 0 1 0 0 1 1 0 1 1 0 0 0 1 0 1 1 0 1 0 0 1 0 0 +0 1 1 0 1 0 1 1 1 1 1 0 0 0 0 1 0 1 0 0 1 1 1 1 0 1 0 1 1 1 +0 1 0 1 1 0 1 0 0 1 0 0 1 0 0 1 1 0 1 0 0 0 1 1 1 0 0 1 0 1 +1 0 1 1 1 0 1 0 1 0 1 1 0 1 0 0 0 1 0 0 0 0 0 0 0 0 1 1 0 1 +1 1 1 1 1 1 1 1 1 1 0 0 1 0 0 1 0 0 1 1 0 0 1 1 1 1 0 1 0 1 +1 1 1 1 0 0 0 1 0 1 1 0 0 0 1 1 0 0 1 1 1 1 0 0 0 1 0 1 0 0 +1 0 1 0 0 1 1 1 1 0 1 1 0 0 1 0 0 1 1 0 1 1 1 1 1 1 0 0 0 0 +0 1 1 0 0 1 0 0 0 0 0 1 0 1 0 0 1 1 0 1 0 1 0 0 0 1 0 0 1 0 +0 0 0 1 0 0 0 1 1 1 1 1 0 0 0 1 1 0 0 0 1 1 1 0 1 0 1 1 1 0 +1 1 0 0 0 0 1 1 1 0 1 0 1 1 1 0 0 1 0 0 0 0 0 0 1 1 1 0 0 0 +1 0 1 1 1 0 1 0 1 0 0 1 1 1 1 1 0 0 1 1 0 1 1 1 1 0 0 0 0 1 +0 0 1 1 1 0 0 0 0 1 0 0 0 0 0 0 1 0 0 1 0 0 1 1 1 0 0 1 0 0 +0 0 1 1 1 1 1 0 1 0 1 0 0 1 1 1 1 0 0 0 1 0 1 1 0 1 1 1 0 0 +0 0 0 0 0 1 0 0 1 1 0 1 1 0 0 0 0 1 0 1 1 0 0 1 0 0 1 0 1 0 +1 0 0 1 0 1 1 1 0 1 0 1 1 0 0 1 1 0 1 1 1 0 1 0 0 0 1 1 1 1 +0 0 0 1 0 0 0 0 0 0 1 1 0 0 0 0 0 1 0 0 0 1 1 0 0 1 1 0 0 0 +1 0 0 1 1 0 1 1 1 1 1 1 1 1 0 0 0 0 0 0 1 0 1 1 0 0 1 0 1 0 +0 1 0 1 1 1 1 1 0 1 0 1 1 0 0 1 1 0 1 1 0 1 1 0 1 1 0 0 0 1 +1 0 1 1 1 0 0 0 1 0 0 1 0 0 0 1 0 1 1 1 0 0 1 1 1 1 0 0 0 1 +0 1 0 0 1 1 1 1 1 1 0 0 1 0 0 1 1 0 1 0 1 0 1 1 1 0 1 1 0 1 +0 0 1 0 1 1 1 0 0 0 1 0 1 0 1 1 0 0 1 1 0 1 0 1 1 0 0 1 0 1 +0 1 1 1 1 1 0 0 0 0 0 1 0 1 1 1 1 1 0 1 1 1 0 0 1 0 0 1 1 1 +1 1 1 1 0 1 1 1 1 1 1 0 0 1 1 0 1 1 0 1 0 1 0 1 0 1 1 0 0 0 +1 0 0 0 1 0 1 0 0 0 1 0 1 0 0 1 0 1 1 1 1 1 0 0 1 1 1 1 1 0 +0 0 0 0 1 1 1 0 1 0 0 1 1 0 0 1 1 1 1 0 0 1 0 1 0 0 0 1 0 0 +1 1 1 1 1 0 0 0 1 1 0 0 1 1 1 1 0 1 0 1 0 0 0 0 1 1 0 1 1 0 +1 0 1 1 0 1 0 1 0 1 1 0 1 1 1 0 0 1 0 0 1 1 0 0 1 1 0 1 0 1 +1 1 1 1 1 0 0 0 0 1 0 0 1 0 0 0 1 0 0 0 0 0 1 0 0 0 0 1 1 1 +0 1 1 0 0 1 0 0 0 0 0 0 0 0 1 1 1 0 0 1 0 1 1 1 0 1 1 1 1 1 +1 1 1 0 1 1 1 1 1 0 0 0 0 1 0 0 1 0 1 0 1 1 1 0 0 1 0 0 1 1 +1 1 0 1 0 1 0 1 0 0 1 0 0 0 1 0 1 1 0 1 1 0 1 0 0 1 0 0 1 0 +1 0 1 1 0 0 1 1 0 0 1 1 0 0 0 1 1 0 0 1 0 0 0 0 0 1 0 1 1 0 +1 1 1 1 1 0 0 1 0 0 1 1 1 0 1 0 0 1 1 1 0 1 1 1 1 1 1 1 1 1 +1 0 1 1 0 0 1 1 0 1 1 1 0 0 0 1 0 1 0 0 0 1 1 1 1 1 0 0 1 0 +0 0 0 0 0 1 1 1 0 0 0 0 0 1 1 1 1 1 0 0 1 1 0 0 1 0 0 1 0 0 +1 1 1 0 0 0 0 1 0 1 1 1 1 1 1 1 0 1 0 1 1 1 1 0 1 1 1 0 1 0 +1 0 0 1 0 1 0 0 0 0 0 0 1 0 1 0 1 1 0 1 0 1 1 0 0 1 0 1 0 1 +1 0 0 0 1 0 1 1 0 1 0 0 0 1 0 1 0 0 0 0 1 1 1 0 1 0 1 1 0 1 +0 1 0 0 0 0 1 0 1 1 1 0 1 1 0 1 0 1 0 1 1 0 0 0 0 0 0 1 1 1 +0 1 0 0 1 0 1 1 0 0 0 0 1 1 0 1 1 1 0 0 1 1 0 0 1 0 1 0 0 0 +0 1 0 1 1 1 1 1 1 1 0 0 1 0 1 0 0 0 0 0 0 1 0 0 1 0 0 1 1 0 +0 0 0 1 0 0 1 0 0 1 1 0 0 0 0 0 0 0 1 0 0 1 0 0 0 1 0 1 0 0 +1 0 0 0 1 0 1 1 1 1 1 1 1 0 1 0 1 1 1 0 0 1 0 1 0 1 0 1 0 0 +1 0 0 0 1 0 1 0 0 0 1 1 0 0 0 1 1 0 0 1 1 1 1 1 1 0 1 1 1 0 +0 0 0 1 0 0 1 0 0 0 0 1 1 0 0 0 0 0 0 1 1 1 0 1 0 0 0 1 1 0 +1 0 0 0 0 0 1 0 1 0 1 0 0 1 1 1 0 1 1 1 0 0 1 0 1 1 1 0 1 0 +0 1 0 0 1 1 1 0 0 1 0 0 0 1 0 1 0 0 0 1 0 0 1 0 0 1 1 1 0 1 +0 0 0 1 1 0 1 0 1 0 1 0 0 0 1 1 1 0 1 1 0 0 0 1 1 0 0 1 0 1 +1 1 1 1 1 1 1 1 0 0 1 1 0 0 0 1 0 1 0 1 0 0 0 1 1 0 1 0 1 0 +0 1 1 0 0 0 1 1 0 0 1 1 0 1 1 1 1 1 0 1 0 0 0 0 1 0 1 0 0 0 +1 1 1 0 1 1 1 0 0 0 0 0 0 0 0 1 1 1 0 0 0 0 1 1 0 1 1 0 0 1 +0 0 1 0 0 1 0 0 1 0 0 0 1 0 0 1 1 1 1 1 1 1 0 1 0 0 0 1 1 0 +1 1 1 0 1 1 0 1 1 0 1 1 0 1 0 0 1 0 0 0 1 1 1 1 0 1 1 0 1 1 +0 0 1 1 1 0 0 0 0 1 1 0 0 1 1 0 1 0 1 0 0 1 0 0 0 1 1 0 0 1 +0 0 0 1 0 0 1 1 1 1 1 1 0 0 1 0 0 1 0 0 0 0 1 1 1 1 1 1 0 0 diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/random-double-data.txt b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/random-double-data.txt new file mode 100644 index 0000000..039ac50 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/random-double-data.txt @@ -0,0 +1,100 @@ +1.172993630434470589e+02 1.905532343119886605e+02 2.613653823499444115e+02 1.570270816248337269e+02 2.373767637129340642e+02 2.175366144750510671e+02 2.609909144757107242e+02 2.086671686166440622e+02 2.674986450118991002e+02 1.395992762090408235e+02 1.115453060949917159e+02 1.531034842395609701e+02 2.621042034264289668e+02 2.958729454449504033e+02 2.137960368830719062e+02 2.606436280968571282e+02 2.492136530687155869e+02 2.770806237064748530e+02 2.667325121892417883e+02 2.909243437665674037e+02 1.570328417294508085e+02 1.738762543815240917e+02 1.514157955792608163e+02 2.264748814818163396e+02 1.911869834397498380e+02 2.083706054660671043e+02 2.778115921852293013e+02 1.330374814391803397e+02 2.988697222234711717e+02 2.534167825404447001e+02 +2.090964891529778242e+02 1.322006104643973003e+02 1.443415945355371832e+02 2.991388772264980389e+02 1.649302344777914868e+02 2.839528641910463875e+02 1.677159709681393736e+02 2.597553832458208944e+02 2.861055547321268477e+02 1.866431671806918189e+02 2.131812134614140177e+02 1.881465139477683124e+02 1.271865139985419262e+02 1.821608865941132649e+02 2.793653846657656459e+02 1.745982981552271838e+02 2.440893905635109888e+02 1.926469151980868446e+02 2.752453852984189098e+02 1.333479229516146347e+02 1.756311805755703404e+02 2.039367615619088383e+02 2.441861159155101575e+02 2.136111324500645594e+02 2.893808960992043922e+02 2.723220466017930335e+02 2.367879096909125565e+02 2.831541206793258425e+02 2.017643187924728068e+02 1.293072046241175030e+02 +2.311242818257193221e+02 2.180694109009666306e+02 2.728791416531455525e+02 1.239345918565636993e+02 2.885729762050686418e+02 2.082619393005260804e+02 2.331416004257805525e+02 1.003112528445347778e+02 2.796331120515330895e+02 2.804679740148056339e+02 2.466936828597247597e+02 1.422398585800914361e+02 1.312115029632765015e+02 1.324417143647877708e+02 2.161716508991076466e+02 1.791489656100356171e+02 2.239038785146145472e+02 2.456511993086799919e+02 2.885023077068626662e+02 2.127338775308419940e+02 2.468090724782538246e+02 2.704135008577740109e+02 1.144148504575758665e+02 1.641571759150080538e+02 2.473349551308716343e+02 2.366620528761779667e+02 1.208143167141831498e+02 1.403705034199327599e+02 2.061073908129479548e+02 1.482034962693051057e+02 +1.938319500339997035e+02 2.000523826243218650e+02 1.356134735235139317e+02 1.224357428573656250e+02 1.262840705282213918e+02 1.112797762573139977e+02 1.727826315738305993e+02 2.199559683100150664e+02 1.817290208723558180e+02 2.185579898773881951e+02 1.772844462934412491e+02 1.589145011846130728e+02 1.017520743541414703e+02 2.836990856171782980e+02 1.265544072638776640e+02 2.503473341476423855e+02 2.178539278172635534e+02 2.063574432066289432e+02 1.473169457524925861e+02 1.112719632489760784e+02 1.195996070145015722e+02 1.345099678548529312e+02 2.992645259487585463e+02 2.692242364540683752e+02 2.139649193607747861e+02 2.313659165106297451e+02 2.524185025119667785e+02 2.678714004815313388e+02 1.111457754393238702e+02 1.296443575800298902e+02 +1.183944097426736306e+02 2.750477277868330930e+02 1.688558971333346790e+02 1.432283295687057034e+02 2.226043174503911359e+02 1.825124733235978169e+02 1.806485153578007612e+02 2.270256019866706936e+02 2.852913053786990076e+02 2.867562520175486043e+02 2.795056496733417362e+02 1.142488895870292822e+02 1.502985045661773427e+02 2.246907359526948937e+02 2.051158858061974115e+02 2.663351441156772808e+02 2.864853431806749882e+02 2.276548949573071070e+02 2.678087640355958001e+02 2.266463576941352187e+02 1.886763304826383774e+02 1.150603609957262563e+02 1.596187994714221929e+02 1.844565420383776484e+02 1.730173420200940768e+02 1.427940137102308995e+02 1.774757620992130001e+02 2.563086691508434001e+02 1.666317348809653822e+02 1.878143419608473437e+02 +1.642344698640436036e+02 1.591648429561690818e+02 1.561851029939521140e+02 1.854367091922420059e+02 1.494951311500319093e+02 2.443780767043579942e+02 2.741090240793212160e+02 1.519200656263381006e+02 1.391711947382712538e+02 1.482414334940778815e+02 2.574425018646875287e+02 1.455120022089010945e+02 1.620904376421240727e+02 2.098493186451893848e+02 2.377904829227144887e+02 2.881187570801528750e+02 1.785609418793050054e+02 1.500483139796714340e+02 1.697371065898091729e+02 1.824143324642365087e+02 2.329862749140337712e+02 1.372006180078979298e+02 2.250666134242961789e+02 1.760894707637434067e+02 1.874161150869196035e+02 2.860410495381969440e+02 1.539271628213176086e+02 1.051658254213322152e+02 1.501619097950496666e+02 1.205717364486104515e+02 +1.275638286377957371e+02 2.620802183565458563e+02 2.290828196339760723e+02 2.591630015014513333e+02 2.102568650793322149e+02 2.385080320420775593e+02 2.683788150825365619e+02 1.808700201925492763e+02 1.972184450648797451e+02 2.382313686117472287e+02 1.733526990293641177e+02 2.369802981553972074e+02 1.835652530901061823e+02 1.274084560526275141e+02 2.403488205519001326e+02 2.713515297463850402e+02 1.455311801633137065e+02 1.889430214806582171e+02 1.676324321357484735e+02 2.327799977696781184e+02 2.846419393176552148e+02 1.510702433968490936e+02 1.361559014852734606e+02 1.732199851325496525e+02 2.451323003571785364e+02 1.833444866660036894e+02 2.451280287301300405e+02 1.669088211440060832e+02 2.768492228383354359e+02 2.445882168033535038e+02 +2.905092787520428601e+02 2.948076984760371033e+02 1.731080208454208673e+02 2.825532355845657548e+02 1.108820315678514845e+02 2.862013985457700755e+02 2.111453776876104769e+02 2.614428154999528147e+02 1.461523265575596042e+02 2.304914832379158156e+02 2.502987607420118934e+02 2.474276046141548875e+02 1.739607960146905725e+02 2.098700376203710789e+02 2.373226438948917121e+02 1.258493219462072119e+02 2.692932028872633055e+02 2.819145908444669999e+02 1.941653933285864468e+02 1.666395497972145847e+02 2.371919109091950588e+02 1.978302896313488191e+02 1.951483674191611613e+02 2.694357972099330141e+02 2.387068160427941450e+02 2.826084316255729618e+02 1.350954172043159929e+02 1.414479610501084039e+02 1.407657276334374501e+02 2.725513503737778365e+02 +2.055761393809777360e+02 1.070553196069381556e+02 1.045726024365074096e+02 1.611577217417760153e+02 1.258091705742062629e+02 1.038769334534844120e+02 2.956016304760584035e+02 1.586570076132481972e+02 1.636816353299032585e+02 2.375674325770941095e+02 2.085436646116971531e+02 2.088922128397473443e+02 2.316234644183506930e+02 2.623581653234684268e+02 1.714245300492981698e+02 2.844387943099641234e+02 1.469270259610659650e+02 1.157700922187784727e+02 2.367694595159086361e+02 1.548671738744121740e+02 2.013687686570863207e+02 1.860374943080277887e+02 1.733446602950305930e+02 2.488507085609763010e+02 2.929099979257852056e+02 1.825615338506695480e+02 1.338575452835397925e+02 1.491478381149757979e+02 1.116052925520655066e+02 2.341983606431906537e+02 +1.014445800974648222e+02 2.539987638010908597e+02 1.871788778457793399e+02 1.454231386314719998e+02 2.284640297096368045e+02 1.174773591296971915e+02 1.395683165851895637e+02 1.137193571402578414e+02 2.370662356797280950e+02 1.767292649815032064e+02 2.688513591587910696e+02 2.913902923086397436e+02 1.122392290694582897e+02 1.366157623619356229e+02 2.667409125457835444e+02 1.834435599491967537e+02 1.437174343391732236e+02 1.130622879516462120e+02 2.898543289046954214e+02 1.559795378531963479e+02 1.765577834073310157e+02 2.422955620302867885e+02 2.384835032255701321e+02 1.708163174135501094e+02 2.012159081107001839e+02 2.825663186839160517e+02 2.627299211659199045e+02 2.173916205317264883e+02 1.878835852278120910e+02 2.578733373077019451e+02 +2.843897417914848802e+02 2.685865547709703378e+02 2.810255710736182664e+02 2.572690897085278152e+02 2.416998564827035523e+02 1.770932574976374099e+02 2.021652319180342943e+02 1.414744641219446351e+02 1.464677002516696405e+02 1.831165552459343644e+02 1.157177632931430651e+02 2.625289386264841482e+02 2.972225480003540952e+02 1.024156386789293265e+02 2.305099741095138768e+02 2.241903749843916671e+02 1.157222019118702292e+02 1.533205318359311775e+02 1.179505454242311799e+02 2.666741766563739020e+02 2.792728900733587238e+02 1.222170248460037811e+02 2.573772727215269924e+02 1.535874607134987286e+02 1.231830862844115728e+02 2.584552954023608891e+02 2.541883057030129862e+02 1.001259630352790566e+02 2.332879439260797767e+02 2.240027888381033563e+02 +1.537092645679641123e+02 1.737278083620151392e+02 1.736358049797527201e+02 2.251608985235982630e+02 1.812387130195175473e+02 1.605621432944637377e+02 1.880655312831545700e+02 2.234500385148787700e+02 1.156918728696038272e+02 2.243685096423413654e+02 1.934342626327970720e+02 1.850952349553267027e+02 2.629944548485545965e+02 1.410418270562070973e+02 1.442479234012843960e+02 2.244518961458842909e+02 1.350755563946989923e+02 1.207094763037939913e+02 1.849900977633715797e+02 1.712315707730903398e+02 1.136025349108833495e+02 2.266901327137990734e+02 2.049289406654929735e+02 2.168279721613268407e+02 2.802488024880154285e+02 2.288593244920211873e+02 2.512942787545493957e+02 1.605416563468323261e+02 1.449848598254574483e+02 1.444073785399158396e+02 +1.576600406756634243e+02 1.316580100950168912e+02 2.530050469343043460e+02 1.319013133578224028e+02 2.708693079386434306e+02 1.256852413190491689e+02 1.471714019119002046e+02 1.119112141125198576e+02 1.482405279774543772e+02 2.151504825709631064e+02 1.449998801809978488e+02 2.163638771503673581e+02 1.272949254250747657e+02 2.476027791419436141e+02 2.891208457332292028e+02 2.642744540427622724e+02 1.972643066216432999e+02 2.480891057982425423e+02 1.265454595896786003e+02 2.957735252703171227e+02 1.831389323451852533e+02 2.674516147697771657e+02 1.404389674972707667e+02 1.350952754772052913e+02 2.169062951790871807e+02 2.445227715623778408e+02 1.771545655819627427e+02 2.729961759152714649e+02 2.655105689521545855e+02 1.887977700062222084e+02 +1.336462666694000632e+02 1.333709897858500995e+02 2.263366393511863350e+02 1.847175439991091821e+02 1.121699721143812383e+02 1.985314153845103533e+02 2.097626398761568396e+02 1.994292542548276970e+02 2.119822099620050722e+02 1.121578896112172430e+02 2.285640262135607372e+02 1.530452060058861719e+02 2.280757825791220625e+02 1.002584314437652893e+02 1.549763597162410349e+02 1.962603185897801836e+02 1.520023734031539107e+02 2.188357004065238129e+02 2.078620274892635678e+02 2.253215106546470281e+02 1.707542413836397373e+02 2.818584030117174279e+02 2.256862624833151472e+02 1.123882683852972377e+02 2.188298604829752776e+02 1.623779544769217296e+02 2.272253780943444212e+02 1.236449568833132560e+02 1.456708971140968174e+02 2.173334506159979753e+02 +1.355111076933105210e+02 2.882277378633141325e+02 1.458332953325788139e+02 2.038461345794760007e+02 2.077052275373579278e+02 2.430957456359013804e+02 2.398926697516154150e+02 1.861334604823129553e+02 1.056851094080089695e+02 1.250491536199931772e+02 1.475324860190441427e+02 2.446126161547439324e+02 2.283994822545897705e+02 1.411463500178549850e+02 1.017206978570942510e+02 2.805514386584911790e+02 1.128847993259780083e+02 2.326583828053989862e+02 1.968387029218569069e+02 2.013375618903088480e+02 2.981010702857409456e+02 1.018614681114941902e+02 1.799507821883679526e+02 1.133741465580100396e+02 1.235533581072856038e+02 1.980629645203880500e+02 2.289642287691829097e+02 1.596082722591768288e+02 1.905110471998515322e+02 1.789448781159623820e+02 +2.588286452268601465e+02 1.978130463173739599e+02 1.052689337312009599e+02 1.316763830509305251e+02 2.659236586726388509e+02 1.637014132384438767e+02 1.416031833329826668e+02 2.638665530652568236e+02 1.007257384115875425e+02 1.143900271701907769e+02 2.977834670475828602e+02 1.589765734727692745e+02 1.903975572290986520e+02 2.371635535037608804e+02 1.840341975670916668e+02 2.047003785265828242e+02 2.798969769773281655e+02 2.731706896262927557e+02 1.266878907904394254e+02 1.882415083052244427e+02 2.273996647906652129e+02 1.051754139634791869e+02 1.949647447346334843e+02 2.153583447980240919e+02 2.763468452623635585e+02 1.126493843527773322e+02 1.566047572050934491e+02 1.655928523150526246e+02 1.733528322945315949e+02 1.292815908595541146e+02 +1.453195062153936874e+02 1.443849872704900008e+02 2.393030362110915519e+02 2.203850914291498668e+02 2.628192548589183275e+02 1.142161203389242132e+02 2.954875947743198594e+02 1.914138981839176950e+02 1.956478457154231023e+02 1.282875398486639824e+02 2.801001077571227142e+02 2.478095646281364566e+02 2.467477848581343949e+02 2.819656424464902784e+02 2.951823714077539194e+02 1.777239847229775478e+02 1.197979896746704185e+02 1.481181033052623661e+02 1.906710229153984528e+02 2.142395628283543658e+02 2.300980272040501973e+02 2.228884003748859186e+02 2.473330601440014220e+02 1.391193242835927322e+02 2.836257563055140736e+02 1.510096324299383923e+02 2.202302141125946946e+02 1.931468179284185851e+02 1.332427495686727639e+02 2.591048546650930575e+02 +1.878681542531208208e+02 1.576240359584147654e+02 2.653849736815447500e+02 2.963544993865212973e+02 2.044592436730770828e+02 2.022626486161902903e+02 2.692262675681025144e+02 2.660999355751699227e+02 2.275843495473382347e+02 1.090849337992742818e+02 2.095602584555617227e+02 1.896271059113536808e+02 1.103822849104477513e+02 2.916911739044173260e+02 1.131212278363718582e+02 2.998892666268029643e+02 2.476782245756396605e+02 2.259689579913920738e+02 1.853942231198421950e+02 1.358270117521841200e+02 1.538630682720535674e+02 1.002148317174243601e+02 2.538393939061405433e+02 1.631649956267838206e+02 2.086654853664906000e+02 2.065167771482954322e+02 2.184161808630845485e+02 2.204789814939956045e+02 2.876785893506615821e+02 2.415299687386639675e+02 +2.578989465605797591e+02 2.309888943086805853e+02 2.139372792253111584e+02 1.438019921733897775e+02 2.686852572045135616e+02 1.347038004304963579e+02 2.662658866335509060e+02 2.378358170108797367e+02 2.901455078003721155e+02 2.653867524737770509e+02 1.011162296015096302e+02 1.236447329941733528e+02 2.440241295351771669e+02 1.285889645706482725e+02 1.234088480316093808e+02 2.765916670935633874e+02 1.132915304101479421e+02 2.967043774237617413e+02 2.960414394814537786e+02 1.923965028192617410e+02 2.177448618307050765e+02 2.328047369831131732e+02 1.702256773965170282e+02 2.320080409490440729e+02 2.962065584958517093e+02 1.421971909775941185e+02 1.416181340866144183e+02 2.318260414882616374e+02 1.990521696869427046e+02 1.291045564046920333e+02 +1.562042774178686386e+02 1.004265446278790392e+02 2.987714610921041185e+02 1.843637355858842284e+02 1.975513718825063165e+02 2.869996482942455032e+02 1.598134132589713943e+02 1.814921031876193638e+02 2.433389905907341983e+02 2.220363745053336970e+02 1.548306942100590504e+02 2.274512269554506361e+02 2.173006200058655963e+02 2.139515436667214772e+02 1.820439741095771353e+02 2.954110718222074183e+02 2.706126458816278273e+02 2.546812106115172583e+02 1.499899738326257363e+02 1.498010641912065921e+02 1.897725780579399668e+02 2.531561160917130167e+02 2.568891780637028432e+02 2.223136077092870551e+02 1.518604819103856585e+02 1.610422120589223027e+02 1.090455809489133259e+02 1.950503873748027388e+02 1.235704160644129388e+02 2.711492093024702967e+02 +2.039597038432034424e+02 2.026680584622021684e+02 1.365818873512059213e+02 2.909476552420245525e+02 1.721994194158640425e+02 1.854386667051114443e+02 2.287109571295530372e+02 1.912591665763447963e+02 1.607322994166321450e+02 2.949516230628389053e+02 2.522065912002103403e+02 1.869433122585654701e+02 1.235797649248940644e+02 1.522422059501078024e+02 2.738245135411146975e+02 1.059681837441489307e+02 1.013027238331489173e+02 1.660100598156148237e+02 2.454471731623151243e+02 2.467503196183328100e+02 2.584564749953993896e+02 2.079587352810677316e+02 1.650926041957846451e+02 2.269719270682073784e+02 2.376254891983122093e+02 1.510146656008620596e+02 2.672848371954185041e+02 2.692845974117340688e+02 2.180714754246087921e+02 2.186797802447831884e+02 +1.704231257711912519e+02 1.993416036368699906e+02 2.293703655438095268e+02 1.494582642918422266e+02 1.988970317734676030e+02 2.329763291241497711e+02 2.594871448385057420e+02 2.168089936885102134e+02 1.825320854593447280e+02 1.816754553181755796e+02 2.164740515812325725e+02 2.676208645391697019e+02 1.298365075936954725e+02 1.802664596093496243e+02 1.015344620621038132e+02 1.955048336384612639e+02 1.938953913674110083e+02 2.716932071347151805e+02 2.391085978949223829e+02 1.852300387899809380e+02 2.933293185307651356e+02 2.502753353909542966e+02 1.326128348575908262e+02 1.132638325194699433e+02 1.382024010322260494e+02 1.899310337488860796e+02 2.577639546186944699e+02 2.130234590296898887e+02 2.056292296528304746e+02 2.070746044453983927e+02 +2.712524956603344890e+02 1.103212761114690750e+02 1.501201791543782917e+02 1.588084859702673555e+02 1.780379814134324192e+02 1.938691258391782810e+02 1.322057441019641146e+02 1.105823874551086590e+02 2.879365916037821194e+02 2.457617763012990224e+02 1.036189749330240488e+02 1.682919366264929124e+02 2.271749409116763161e+02 2.468308259697249127e+02 2.530034131464132088e+02 2.481420904342841709e+02 1.546080547019561209e+02 1.278414739842506265e+02 2.234886960240669111e+02 2.535365186455997843e+02 1.599130733896959669e+02 1.151371295028686035e+02 2.378656188176093451e+02 2.901072209563180877e+02 2.524076257924749882e+02 2.849501171254129304e+02 1.802791659856764568e+02 1.527418387706650833e+02 2.578820596338672431e+02 1.208856989199291263e+02 +1.884906470590645711e+02 2.304295185581007672e+02 1.035923344330140736e+02 1.647061655195892627e+02 1.910201770870304472e+02 1.752788518438422614e+02 2.763014227316762117e+02 2.545709641405486252e+02 1.642694881393259152e+02 1.850698110761380804e+02 2.423689469305483328e+02 2.821007056776016384e+02 1.440765548977453250e+02 1.082195827231368952e+02 1.292487205530619008e+02 2.136496853657876613e+02 2.268509220579896635e+02 2.999629735037570981e+02 2.135306905316524535e+02 2.807718279523737692e+02 1.079256111018183759e+02 2.233050677333321801e+02 1.960571416898615951e+02 2.930642308139058514e+02 1.350490077967585307e+02 2.626074042719769750e+02 2.812196827814445328e+02 2.812753678081913336e+02 1.893738913514469004e+02 1.237248675858835725e+02 +2.024005284879252144e+02 2.663611407988397559e+02 2.687079844301063076e+02 1.583164038086077312e+02 1.451019436850150441e+02 1.100558451420041450e+02 2.083655450975085159e+02 2.034012033819327598e+02 2.745375932717230398e+02 1.454718097055225599e+02 1.519068131933423729e+02 2.522666952972969625e+02 2.409340029943109300e+02 1.697386944425205115e+02 1.092659514648129289e+02 2.785598218078254149e+02 1.404092026094307357e+02 2.152301424167146990e+02 1.170396027347833723e+02 2.495323893679063474e+02 2.070836095469416591e+02 2.187978925167305135e+02 1.478606128149070855e+02 1.189323178954538207e+02 2.012925160284665651e+02 2.080878545398990127e+02 1.510128433840351647e+02 1.657302151838663065e+02 2.177026636795220043e+02 1.221198981216710422e+02 +1.411258561955272341e+02 1.419717097672817374e+02 2.247481951315160984e+02 2.805973971111802712e+02 2.755562061324142178e+02 2.039769327420251557e+02 2.994080883760036045e+02 2.417843309736466040e+02 1.023751441731232319e+02 1.491356884971497152e+02 2.542464200475323821e+02 1.496044144381669128e+02 2.829129207809560285e+02 2.479316882407134699e+02 2.441205876677642550e+02 2.045492313770996020e+02 2.855582203360229414e+02 2.884005586284110336e+02 2.039668453101600676e+02 1.690279206477617890e+02 2.136822090795746760e+02 1.254275901194574772e+02 1.084851042192170922e+02 1.656011685190305229e+02 1.415195951026897774e+02 1.578115814760412263e+02 2.619737257057257693e+02 1.492347147839753347e+02 1.627213988646173561e+02 1.343297485726322691e+02 +2.544675070683062756e+02 1.367461330002975899e+02 2.928364121110963652e+02 2.024865028281971036e+02 2.758937379397792142e+02 1.293527538914390220e+02 1.003170531204512059e+02 1.514803620238746760e+02 2.603616046431354789e+02 1.790387290949859960e+02 1.954717187769221027e+02 1.325226280128280223e+02 1.522166198122710625e+02 1.162911821325583048e+02 2.798489406348742250e+02 2.521718932296424498e+02 2.622327475379161115e+02 1.027798265388270949e+02 2.437256510683693023e+02 1.911771820917219884e+02 2.722604457055863350e+02 2.850557929858495640e+02 1.953760157441756746e+02 2.473572905253965644e+02 1.891404804097296051e+02 1.514672503279451803e+02 2.213565012031598940e+02 2.253356064978207769e+02 2.044629345029305227e+02 2.805872739342098612e+02 +2.859142434488251183e+02 1.016009480575973356e+02 1.779351649172412522e+02 2.205171340775500539e+02 2.104472905774927369e+02 1.755755724600441567e+02 2.751836189782782185e+02 2.820692049982218350e+02 1.337557428916256015e+02 1.569761138230965969e+02 1.991757527032745543e+02 2.615974376894962461e+02 1.944849272958306017e+02 1.868411694165790777e+02 2.994394032068257729e+02 2.802783326794233290e+02 2.693871918204162625e+02 1.750293298802730249e+02 1.468161278725061720e+02 1.272003326865558108e+02 2.233103517167062932e+02 2.103066399402185027e+02 2.720825853079193735e+02 2.728915492341989193e+02 2.160004538807991992e+02 1.325145501710478015e+02 2.549827549782140466e+02 2.921469675413995901e+02 1.846231529604695822e+02 1.391152989663993651e+02 +2.538717579982014456e+02 1.450483481068324352e+02 2.720200816305956550e+02 1.120834821105324011e+02 1.703801876168104741e+02 1.091293661435919233e+02 1.410263490040598526e+02 1.910022197757120352e+02 2.505223413771657022e+02 2.069613533172621374e+02 1.367200764291426935e+02 1.269156762039037574e+02 1.459486945063737267e+02 1.585863332989725905e+02 1.433846106215619329e+02 2.893202513225785424e+02 1.754070497414596730e+02 1.678900237854272746e+02 2.363821059303507752e+02 1.088858921730617908e+02 1.962435837543239927e+02 2.151311182954276831e+02 1.943029551670006754e+02 1.670799798236046172e+02 1.348235227224938910e+02 2.005836112104490212e+02 2.601588534628079969e+02 1.194827586439497935e+02 2.131891535893303740e+02 1.835674362703964277e+02 +2.872207377280434457e+02 1.680389491751975299e+02 2.268072198735419533e+02 1.324343035526375729e+02 2.746241572770433095e+02 2.142161570690199710e+02 1.852290440736100550e+02 1.772431485621305285e+02 1.144750125154023266e+02 2.162070901557998468e+02 1.490690769171257557e+02 2.904041493178549445e+02 2.673617561413327621e+02 2.904362235840736730e+02 1.438791831406123833e+02 2.596893065528289526e+02 2.617155941751458386e+02 2.388486986717779246e+02 2.718819501315180105e+02 1.265484539827731680e+02 2.508989305854047700e+02 1.677208481362706323e+02 1.527665277518251230e+02 2.069026506407369084e+02 2.223100964495413336e+02 2.859845330217733022e+02 1.430291068893224349e+02 1.186508486537613436e+02 2.043257492072551713e+02 2.909823892985461953e+02 +2.385945641230763670e+02 2.011887933217761031e+02 1.622448188725907983e+02 1.738874847453056987e+02 1.669498482708885376e+02 1.853462372214463016e+02 1.514500885098960907e+02 1.569159134451362547e+02 2.521399095730983504e+02 1.246878140446721659e+02 1.758330561641313352e+02 2.722601647479554003e+02 1.679012078705679869e+02 1.710944469563905272e+02 2.012619557548435978e+02 2.130692925302264200e+02 2.489118511754019778e+02 1.553758318484749452e+02 2.531318516516165857e+02 1.895498740333992487e+02 2.010265603399928409e+02 1.805605111948569856e+02 2.471772127430102159e+02 2.822665908577009759e+02 1.256656757093761314e+02 1.218957078832023626e+02 2.851942693987446660e+02 2.434079459678487751e+02 2.183256665756584312e+02 1.881473862468819220e+02 +2.878274557836845133e+02 1.654481949983921254e+02 1.215681808546938214e+02 2.567820905945674781e+02 2.104106688330284101e+02 2.960796083414018085e+02 2.020680111052573693e+02 2.328934707961639106e+02 1.081575190462602336e+02 1.003340046261853189e+02 2.009697278729638299e+02 2.231963192062537757e+02 1.203849639323555323e+02 1.187994179134823156e+02 2.211937485225296030e+02 1.667300587261732119e+02 1.727379541915926211e+02 2.085029285798690353e+02 2.440827389167183981e+02 2.864522928573259151e+02 2.974890568790378893e+02 2.102945085846974393e+02 1.972598274048171447e+02 1.762889209976547136e+02 1.346946323322499666e+02 1.554434255958064170e+02 2.915634104756007901e+02 1.434053307556222876e+02 1.055800565037633163e+02 2.043924431141962259e+02 +1.494596010135965116e+02 1.369114048625681335e+02 1.414146701131132886e+02 1.383970135097982848e+02 1.734304788623498155e+02 1.594301265610334610e+02 1.040146208229407137e+02 2.208381597698417806e+02 2.904998286250861383e+02 1.300157615397056929e+02 2.667076669416877621e+02 1.062418844419948556e+02 2.717657999079561364e+02 1.054097765488278640e+02 2.401074677516734823e+02 1.045408432466875297e+02 1.330046749931937882e+02 2.297648034226271534e+02 1.488059718063634307e+02 1.725671935994615183e+02 1.330818497812682608e+02 2.341687919103425770e+02 2.983144736799429211e+02 2.798846823197050071e+02 2.218705077010061473e+02 2.681931695329894865e+02 2.339384973461015420e+02 2.893058480095726281e+02 1.539801301873031321e+02 2.746688360458649640e+02 +1.330701439354522222e+02 1.727884450558678395e+02 2.309082669627648272e+02 2.027633892073664299e+02 2.725503026364725656e+02 1.999882667367585896e+02 1.904108867169430255e+02 2.952458047945178805e+02 2.903769421220866320e+02 1.593020200554085477e+02 1.236139458806368623e+02 2.670862420061573062e+02 2.910830183895285472e+02 1.860711175093342149e+02 2.161724988935532963e+02 2.564488756979296795e+02 1.231566645138573648e+02 1.554206254375235403e+02 1.148558104746345521e+02 1.512714227454516163e+02 1.953024826710307025e+02 1.296022137194406127e+02 1.500450396815122076e+02 2.611742573447975246e+02 1.601671705158374550e+02 2.391666762859087214e+02 2.566415095930981352e+02 1.923304801412870404e+02 1.194174883996373353e+02 1.970722090829630986e+02 +1.912113734453868688e+02 1.498407015577022605e+02 2.038188614169363007e+02 1.315017316695561647e+02 2.564290419741012101e+02 1.890015309531812022e+02 2.451565642315005960e+02 2.794356592632736920e+02 2.286941218755985972e+02 1.959549984609147941e+02 1.183834182035568716e+02 2.102820643179567242e+02 1.748108698585573393e+02 1.534379248653211221e+02 1.919662859034699522e+02 1.273611408042816464e+02 1.848163823983119585e+02 1.719445827292381637e+02 1.098466009889928898e+02 2.781108902268393877e+02 2.089286134506138524e+02 2.324518337977864348e+02 1.983840049195213169e+02 1.897881971862217370e+02 1.057077761008814605e+02 2.693629461665184408e+02 1.359710117509105487e+02 2.191184409971657487e+02 1.295811391257115304e+02 1.272165218667991553e+02 +1.987244486959793903e+02 1.516360617950651317e+02 2.198509518241761498e+02 2.494181713303175911e+02 2.903223989223247372e+02 2.847249789220907132e+02 1.747037051964282171e+02 1.610307305098726829e+02 1.866621867053561061e+02 1.016530888490581503e+02 2.606194448419089440e+02 1.820037020201941402e+02 2.650669443765450524e+02 1.137210849453726098e+02 1.329244106101075715e+02 1.741312140090854257e+02 2.301425980066611885e+02 1.051708772384664030e+02 1.994040172335078864e+02 1.874773290907829733e+02 2.745616984783777070e+02 2.354781865911449756e+02 1.598287033335407159e+02 2.650689470710170212e+02 1.643692352330562017e+02 2.991199217036622713e+02 2.713535332162406348e+02 2.516280148665988463e+02 1.124367393830256532e+02 1.725070309959049837e+02 +1.637875882282461077e+02 1.407642428016634426e+02 2.759741260511348173e+02 1.982469453863400304e+02 2.966736241669494802e+02 2.756530253528777052e+02 1.426661371226006167e+02 1.585144634205103102e+02 2.836415355000413001e+02 2.468213340046699784e+02 2.898204535963063790e+02 1.711408259966125343e+02 1.900542569026269177e+02 1.112151031999617032e+02 2.679918109779015936e+02 2.737346364036235400e+02 2.597479311885246602e+02 1.719445390286030886e+02 2.361360157374418236e+02 1.123330408578339785e+02 1.214203690485689719e+02 2.552722899309185891e+02 2.436705678248840456e+02 1.596697357728296254e+02 2.533254006866929444e+02 2.066863222258713790e+02 1.194370826184286329e+02 2.943584774485435673e+02 1.636272134478143130e+02 1.191267138602315185e+02 +2.350924626651462006e+02 2.263138093076711357e+02 2.206572605284771385e+02 1.704171521239532296e+02 2.000250897638135257e+02 2.966317084215347109e+02 1.350543763227695138e+02 1.248113195978286285e+02 1.480602782771696297e+02 2.391913401309390679e+02 1.908758915801345779e+02 2.476074601271855045e+02 2.408834383325319095e+02 1.009169451940341560e+02 2.567526834523320645e+02 1.791854948779896688e+02 1.412277552146151152e+02 2.660711025781407670e+02 2.073940326990519054e+02 2.509760072499196610e+02 1.358593750308925223e+02 2.127422683140523532e+02 1.874643773621423293e+02 2.844455725631112273e+02 2.197223292953194118e+02 2.049519862750077266e+02 1.674367936692717365e+02 2.806316262053937294e+02 2.040091003350897836e+02 2.675290975004411962e+02 +1.483513543637005796e+02 2.384008274111940011e+02 2.834409911154408519e+02 1.344593118283445392e+02 2.346883831968173979e+02 1.381882879805813218e+02 1.241165074750676638e+02 2.186327911062819567e+02 2.466602279029802673e+02 1.573094529523951906e+02 1.568918412618390903e+02 2.289205163045023710e+02 1.170165333644822283e+02 1.742406104080407658e+02 2.082974381484526702e+02 1.600869123712819260e+02 2.399160913983472199e+02 2.877189278027444743e+02 2.845252294036096146e+02 2.342337907657317544e+02 1.496264758341107779e+02 2.905797831387872066e+02 2.824703799011629144e+02 1.047015685176013307e+02 1.056531628249932169e+02 2.778559625738202499e+02 1.693549799118289343e+02 1.654193764711911570e+02 1.062077606699500762e+02 1.159643419206647792e+02 +2.694780377267857716e+02 2.229138360502907403e+02 2.407432883969363218e+02 1.240072643521201741e+02 2.128611568148922970e+02 2.114050669978733481e+02 1.042337934877265297e+02 1.044783539591350490e+02 2.706611056394938259e+02 1.972285130309975898e+02 1.959046941044780681e+02 2.915493579522836853e+02 1.131994346897827342e+02 1.197362406389762839e+02 2.877593780027675621e+02 1.089470964294721824e+02 1.996015695685267417e+02 2.185569019121031999e+02 2.102686704320404374e+02 2.955299037924150980e+02 2.987478446256551479e+02 2.517129931888254646e+02 1.552463625479420557e+02 2.295020326441428153e+02 2.886454895961533111e+02 1.869792800456660871e+02 2.703426621835664037e+02 1.873514421416134326e+02 2.714620374401066556e+02 1.623625260081516331e+02 +1.457420078291350194e+02 1.926195242081234369e+02 1.841639049563959247e+02 1.397830290030836125e+02 1.287503203163068406e+02 1.684614546803193775e+02 2.820658047345126533e+02 2.986548244924653090e+02 2.631399932039782925e+02 2.870930868530864473e+02 1.141938207690214426e+02 2.868552010662050407e+02 2.019110175402121286e+02 2.840219745246005232e+02 2.848478851173646262e+02 1.902287203163165259e+02 2.696968940302964484e+02 1.690355482825476656e+02 2.171695948786692725e+02 1.960363641465239652e+02 2.930566891688549731e+02 1.380341365242818483e+02 1.769912313914243214e+02 1.164985277343077996e+02 2.079184380436491324e+02 2.871364788135472850e+02 1.796231479741346391e+02 1.115892945700443875e+02 1.922852518794877028e+02 1.851500906627327083e+02 +2.894943401361737187e+02 1.972990286414578804e+02 2.801948561309920933e+02 1.993490085147259947e+02 2.539099743775018112e+02 2.972486389690005240e+02 1.162404922698449354e+02 1.801898545246462504e+02 1.283416456049016858e+02 2.289248555429664407e+02 2.419505668531598985e+02 2.755101537543703216e+02 2.786083442131507013e+02 2.461931811431258552e+02 2.699066237266536064e+02 1.088542193903703179e+02 2.302113104476973149e+02 2.158136503417114227e+02 2.797451432348925096e+02 2.832754349673875822e+02 2.207567008139471909e+02 2.920947868166995249e+02 1.300092217647513735e+02 2.953259288980694350e+02 2.539624465668687492e+02 1.304833679125420645e+02 1.051395153781939484e+02 1.855592224876973830e+02 2.160289702497469477e+02 1.227895712666205981e+02 +1.029685235386965587e+02 1.410297052380113882e+02 1.832105986621241982e+02 1.016727951098498579e+02 2.130361696974732126e+02 1.817578553203918830e+02 2.644724203174304193e+02 1.713346250427240420e+02 1.297164370175517547e+02 1.072810924841072193e+02 1.083932811014470161e+02 2.860684171745337494e+02 2.893854146138399983e+02 1.677808320623732925e+02 2.343535290724524600e+02 1.209564642240636090e+02 1.329537830609780542e+02 2.924542956964438645e+02 2.733376468658280487e+02 1.397146179999238598e+02 1.103570089598620285e+02 2.231457082965310690e+02 1.056672424832338635e+02 2.887779644840117612e+02 1.127167878193751704e+02 1.387640376146708263e+02 1.791595456124304633e+02 2.709107895779202408e+02 2.238624693992912569e+02 1.773395240564728397e+02 +2.317578772498348769e+02 1.294950944138938667e+02 1.126253428029936572e+02 1.371351849575549693e+02 1.785990678455200964e+02 1.021081186758702444e+02 1.471984209931611360e+02 2.907355141803875540e+02 1.881128962816476644e+02 2.776434621780599628e+02 2.231668573818950279e+02 1.905362514139340817e+02 1.921875823712000226e+02 1.027725913116546792e+02 2.939602582690168902e+02 1.776540079128602656e+02 2.761214484196684111e+02 1.042033722248946646e+02 1.812858538041361385e+02 1.739774673118114663e+02 2.626640185867897799e+02 1.702975408841979288e+02 2.558138050153142729e+02 1.733257751657050392e+02 2.918973111180089859e+02 2.499103812623473857e+02 1.210050998380505973e+02 2.819910650801346605e+02 1.887952629909842699e+02 1.910084514453274380e+02 +2.212539479167726029e+02 2.774434360961662378e+02 2.337566454731646104e+02 2.345785537275947661e+02 2.365459264006348405e+02 1.983982238092833086e+02 2.030822332599765332e+02 1.995891111618029186e+02 2.834365683300363798e+02 1.036872616932399609e+02 2.192093181482490252e+02 2.601252995545215754e+02 2.498786393235831724e+02 2.102914196276636858e+02 1.344974807588668000e+02 2.319076536245909210e+02 2.769341510052834110e+02 2.705990780330756138e+02 1.679097240924248240e+02 2.394521666103182724e+02 2.042111123157340842e+02 1.679545908808316028e+02 1.638112120198904051e+02 2.498667640522866407e+02 1.298749690282424183e+02 2.953546510122243944e+02 2.420377599473625025e+02 1.972281420856064642e+02 1.511153679243939223e+02 1.785899871179086063e+02 +2.568297621323404201e+02 2.469847896802298237e+02 2.766623631158322496e+02 2.476135901735717937e+02 1.788596740963971570e+02 1.849716544556056874e+02 2.568516536462929594e+02 1.692762419184084877e+02 1.468834240718183537e+02 2.716053370235183593e+02 1.674083895790932957e+02 2.340636951853666687e+02 1.637725360284847227e+02 1.316562872243186177e+02 2.850086566701365882e+02 2.066513343106022944e+02 2.990778363456342390e+02 1.780020440519503495e+02 2.906711993591478631e+02 2.149926413975278479e+02 2.151504627144789765e+02 1.458362697904619836e+02 2.339644011324822657e+02 1.740513991402896181e+02 1.804876886135730842e+02 1.706585538790989176e+02 1.113370339871644603e+02 2.032819788543359039e+02 1.225434838619497526e+02 1.558188197132453183e+02 +2.752385657001058803e+02 1.704994416021052643e+02 1.607090409105587696e+02 2.031247490318933444e+02 1.333383797740430339e+02 1.922643047184382112e+02 2.665685682619526915e+02 2.611043497447243453e+02 2.444450591022788615e+02 1.012899678037660181e+02 2.236752860048796947e+02 1.164606756896235993e+02 1.768812782093617955e+02 2.532808672341815850e+02 1.308823477633827395e+02 1.683394957344131626e+02 1.787390150786144716e+02 1.962681762314343530e+02 1.178176219749694980e+02 2.151624908275416885e+02 2.951256579216935734e+02 2.058583926262361388e+02 2.348769662163374790e+02 2.500118096543036472e+02 2.065978549387351109e+02 1.732426267043477139e+02 2.575950640438621804e+02 1.826939497339359946e+02 1.586062531006688801e+02 1.141086110094916819e+02 +2.107478059550890066e+02 1.212326460542207940e+02 2.154852140069355073e+02 2.624147598788578648e+02 1.169795422214265699e+02 1.682202484364929660e+02 2.987700686247625299e+02 2.259973608163532504e+02 1.912690930240648015e+02 1.896338093439390775e+02 2.747727757049322008e+02 2.388804299971102978e+02 2.538821160842531128e+02 1.839990833334872491e+02 2.839611350159472067e+02 2.953225980324958755e+02 1.674336071760058076e+02 1.609172697163818953e+02 2.902596210806400450e+02 1.513824951234124114e+02 1.873458283487339600e+02 1.695960935104061491e+02 2.116215526550050470e+02 1.849422962892989233e+02 1.434256749723924713e+02 1.304784783123307079e+02 2.632948417544853328e+02 1.656472047377057777e+02 2.303125851744007377e+02 1.681993961373014486e+02 +1.104191565760665128e+02 1.750924257030650040e+02 1.242494131306669090e+02 1.541741282893887899e+02 2.585460716706878657e+02 2.286423505464783261e+02 1.890990979891397501e+02 2.707781238779197679e+02 2.619171833457787670e+02 2.695823002806438353e+02 1.941989480397771786e+02 1.389058748786196134e+02 1.283479072532797431e+02 2.347481590897206729e+02 1.518985431591505630e+02 1.757095590143896402e+02 2.225334593093496096e+02 2.231309387578290568e+02 1.039310896134069395e+02 2.614149485334186238e+02 2.212890027388380076e+02 1.425609106790709859e+02 1.376620423520403733e+02 2.403640719649376933e+02 1.152284694789922526e+02 2.108068210397188409e+02 2.526640691383259991e+02 2.323633859683563969e+02 2.720522122905912283e+02 2.498034621012949685e+02 +2.223449436042899947e+02 2.823923482876032267e+02 1.728419664392092727e+02 1.542710015610415724e+02 2.699062389875002737e+02 1.776741825057288793e+02 1.800001384193664080e+02 1.819433000632012636e+02 1.436484983468620840e+02 2.344086094824976954e+02 2.824459866922626361e+02 1.860318500101035681e+02 1.749968777772715498e+02 2.792448396035428004e+02 2.134719239619671498e+02 2.649346822194891047e+02 2.535109715864082602e+02 1.651109960016319178e+02 2.407385671793928736e+02 2.276937454871455770e+02 2.965404491761371446e+02 1.771850291606413634e+02 2.317902380753697855e+02 2.233400563607936817e+02 2.471010629200553694e+02 2.999085009765063319e+02 1.263611681933084725e+02 2.954593528043474180e+02 2.279026703099021915e+02 2.630592311905735414e+02 +1.662671322607742752e+02 1.600442354914371208e+02 2.476541290397616137e+02 1.471310870365195740e+02 2.302232198157895198e+02 2.833854716762933776e+02 1.464787719165046553e+02 1.913553080525503560e+02 1.014594285276723156e+02 2.182963956218923158e+02 1.629807715448000636e+02 2.692152036144454428e+02 2.287521686048013976e+02 2.982465613581407524e+02 1.646080094271899839e+02 1.685350412843276899e+02 2.638506951547767585e+02 2.931520510309920837e+02 1.395453733045734168e+02 2.192750645467382355e+02 1.118562057344099543e+02 2.210439168983162972e+02 1.977199388190010438e+02 2.248771354041466566e+02 2.967583759675493411e+02 1.144799677712354793e+02 2.877369511761256149e+02 2.831237961244747225e+02 2.909105411130262269e+02 2.550977837950437390e+02 +1.519738194711488006e+02 1.042788193386050608e+02 1.298121344332743377e+02 1.827398187867084971e+02 2.371985543371917800e+02 1.647119082252074236e+02 2.792046599520904238e+02 1.737333830141970452e+02 2.019611337599129968e+02 2.402390448779260623e+02 2.107045415433176174e+02 2.447101973248666411e+02 1.584507446746840174e+02 2.877533155913679366e+02 1.209142860803932251e+02 1.903846717728129931e+02 1.485923447895592631e+02 1.040627746119376695e+02 2.329784390325348795e+02 1.136264746597146882e+02 1.019818146651219024e+02 2.395077159260278847e+02 2.571474008697522322e+02 2.507839876514990465e+02 2.649762964978717719e+02 1.398370322453145889e+02 1.116668292809188614e+02 1.262068209877756289e+02 2.561228606182183967e+02 1.019925993853918413e+02 +2.525550526067758881e+02 2.649927164229666232e+02 1.457764901336312846e+02 1.519121804298574148e+02 1.112983565335166247e+02 2.979018464293943680e+02 2.517559946611144142e+02 1.257251989750113239e+02 2.377842966816966737e+02 2.692916709774201536e+02 1.558791612193160745e+02 2.988101508442036334e+02 1.264682305510686575e+02 2.586186621657187743e+02 2.397705732393993969e+02 1.799773948514575750e+02 2.289212202830902072e+02 2.551439950194432242e+02 2.270410183155361210e+02 2.624250216967006395e+02 2.894508375480465361e+02 1.106681053253299183e+02 1.696755343387707171e+02 2.302155275158106917e+02 1.445113211107399138e+02 1.886794441144848236e+02 2.129906512422033131e+02 2.340704769023953986e+02 1.082933010325512981e+02 1.977265970892881626e+02 +2.874406426475449052e+02 1.913451373833616742e+02 2.647704607931181044e+02 1.881279366057496532e+02 2.840067538093052804e+02 2.179159896935567247e+02 1.839859875309309132e+02 1.189702187115672132e+02 2.794517441847542614e+02 2.815599370853284427e+02 1.258259904677427699e+02 1.428483537633051412e+02 2.541426109645265967e+02 1.338781623221585164e+02 2.877181693280556374e+02 2.041742222547631513e+02 2.429167887622087392e+02 1.861891141000048435e+02 2.815058357304060337e+02 2.932279451804108703e+02 1.428092602118218792e+02 1.129541128601477595e+02 1.104970415865426503e+02 1.361068733124779726e+02 1.702082770497633533e+02 1.583852379729134157e+02 1.614070717213254511e+02 1.054529192214523476e+02 1.116913943762218366e+02 1.806474879921846366e+02 +1.904583320230821926e+02 1.477903225290235980e+02 2.926623631581093150e+02 2.267002240281469199e+02 1.643763662729302268e+02 2.199235242233247902e+02 1.853923849032223359e+02 2.941726936508506469e+02 2.665966841434134835e+02 1.199566433868006357e+02 2.951991052054676175e+02 1.594510101065885124e+02 1.458298791153635534e+02 1.532145001211049475e+02 1.411023254500616133e+02 2.140513226665028128e+02 1.678784758049908419e+02 1.708308530430679184e+02 2.099440033407245778e+02 2.664570659333852518e+02 2.959905162222905801e+02 2.829445582187913715e+02 2.588706049990775000e+02 1.722199615074994483e+02 2.869184560072056343e+02 1.681559218785307053e+02 1.503240659973911306e+02 2.588597461006905291e+02 2.678295026364270939e+02 2.154561503934444886e+02 +2.071927904539387839e+02 2.171736003654224305e+02 1.593735315924418785e+02 2.947356579175152547e+02 1.742775794491871011e+02 2.184611101357660914e+02 2.225198306238390842e+02 2.168369296352294668e+02 1.755672175076374231e+02 2.252214925755263835e+02 1.563369877784152209e+02 2.085332604119019209e+02 2.572482649031854862e+02 2.951800051631508950e+02 1.079183556031880329e+02 1.218838648771928774e+02 2.685371616407055626e+02 2.419162624723466877e+02 1.022244855205179022e+02 1.101224552326326602e+02 2.597819405832950679e+02 1.134555412120959517e+02 2.870491931154815575e+02 1.374365654160442318e+02 2.645641258978021142e+02 2.531141673781916666e+02 2.361747183362105886e+02 1.893108861581111171e+02 1.539026912190118139e+02 2.501170032332128415e+02 +2.547888423116186232e+02 1.853670755857669974e+02 1.389074705955763420e+02 2.709929622842061008e+02 1.228800068832790515e+02 2.778321736112652616e+02 1.309641642706778555e+02 1.156980811627219055e+02 1.431313378740429982e+02 1.646591400066212714e+02 1.920182917083556049e+02 2.178001706163468043e+02 2.235489712948179886e+02 1.079088316874027242e+02 2.447091545393394370e+02 2.320303973549428065e+02 2.359105911115680101e+02 2.382951907588607128e+02 1.062067779247245483e+02 2.905379355334102911e+02 2.023335418134440715e+02 2.128348219019524095e+02 2.865957710750057004e+02 1.782427960783044796e+02 2.856139874187100531e+02 1.139905905655008098e+02 2.264676166669663360e+02 2.479179013019825675e+02 1.746165350218777803e+02 2.255842464851874070e+02 +1.883869033800616819e+02 1.965817072065136699e+02 1.890868666652849015e+02 1.898737766004000491e+02 2.779218373710688184e+02 2.134628932560298722e+02 1.100835458783813436e+02 2.768750976313177148e+02 2.547073561014202880e+02 2.728160162818061281e+02 1.733645011505617504e+02 1.625036971255624394e+02 2.977754324167240156e+02 1.632372616873928450e+02 2.174045665187836107e+02 2.606964806055048030e+02 1.625508452643421720e+02 1.715067940576683441e+02 1.218481476549646629e+02 2.842560845538128547e+02 1.928678337146606623e+02 2.708765321293922739e+02 2.077020047066411621e+02 2.923591890868326004e+02 2.230876482822842206e+02 2.689925468225608256e+02 1.036588336737814586e+02 2.052618530546818363e+02 2.648220111560104897e+02 1.868396012623422280e+02 +1.785937212608853315e+02 2.973454718025594161e+02 2.368986004504845084e+02 1.146953890760472348e+02 1.265905165006724644e+02 2.255973396401841455e+02 2.163675674740596264e+02 1.527913853500098185e+02 2.283358642424602465e+02 2.759303134283557597e+02 2.876072117803540777e+02 2.029362495845153944e+02 1.212425121544320490e+02 1.100001317370093830e+02 2.335268996183764330e+02 2.375268130741384027e+02 2.336339660612213436e+02 2.462747325703657282e+02 2.841981652294566061e+02 1.081959034831858446e+02 1.291296469376330833e+02 2.602425849072438950e+02 2.575669438145553727e+02 2.135342654708205714e+02 2.294373105308322067e+02 2.706502840281193016e+02 2.928412927772634475e+02 1.330151104176747765e+02 1.533759962548247131e+02 2.744006234275867655e+02 +2.257735103076358882e+02 2.728385269717355186e+02 2.290872800510813363e+02 2.330934692803050154e+02 1.037274604992595215e+02 2.674079561164307961e+02 1.195755645916240866e+02 1.402804464035359047e+02 2.170516922702277611e+02 2.744725918691634661e+02 2.930458735600458908e+02 1.496408395971007224e+02 1.595562419103408729e+02 2.835538666488008630e+02 1.780163567793609332e+02 2.906408145890961237e+02 1.133853019218590248e+02 1.494630592331960770e+02 1.214592101712915451e+02 2.263015460193574881e+02 2.598100406717117608e+02 1.963383361449393192e+02 2.235083985338561376e+02 2.946475410923074492e+02 1.758055989844200724e+02 2.637780439251395137e+02 2.875400021086666698e+02 1.577781508415756662e+02 2.146553072676672684e+02 1.798181279868336446e+02 +2.620574340171276617e+02 2.153711882285265915e+02 2.245961661539886904e+02 2.054509343172356921e+02 2.926008719008261210e+02 2.432564531143420652e+02 2.303655720936658611e+02 1.615953803481287991e+02 2.918921003884012748e+02 2.760746977013722017e+02 1.909442200188182710e+02 1.596536528765051060e+02 2.491411570718119037e+02 2.924629085319008936e+02 2.587604848561293807e+02 1.524605619386706792e+02 2.737599884275671798e+02 2.090365453766356723e+02 1.610548024559351461e+02 1.018774121963877803e+02 2.410901898572944049e+02 1.875862586601133444e+02 2.588626077539996686e+02 2.579873618626863845e+02 2.838744453525392828e+02 2.580071516854936817e+02 2.114887112935771256e+02 2.675506009048368696e+02 1.260391751775616029e+02 1.858866479221875920e+02 +1.963224789638335892e+02 2.444908535968891954e+02 1.962779352478895589e+02 1.553096436749702889e+02 2.483662294276224429e+02 1.067992874414757978e+02 2.633849667942634483e+02 2.454321751613854588e+02 1.854433418739394028e+02 2.562889653665436072e+02 2.506342746416453622e+02 1.900819942764665598e+02 1.704565979131312474e+02 2.916979173024495822e+02 1.898592592817412310e+02 2.687872145548625440e+02 1.525347862509104004e+02 2.786582104923993484e+02 2.310813531087783872e+02 1.166208530157265386e+02 2.602471623613457723e+02 2.102772607982462034e+02 2.183751071150112466e+02 1.065011561509572999e+02 2.813176394708128782e+02 1.792292558016025623e+02 2.804083600455996361e+02 1.557890480883644102e+02 2.439522159916458861e+02 2.652201783594097719e+02 +1.425266334964659904e+02 2.075049705342416928e+02 1.704914602333145126e+02 1.886474594627911756e+02 1.252313163849750595e+02 2.836097447326676502e+02 1.406399617929505439e+02 2.414245225193989768e+02 2.576349788827002385e+02 1.486724691707949262e+02 1.092388214497626961e+02 1.685935770192617724e+02 2.033388664740227227e+02 1.390809359458484948e+02 1.056188661648174758e+02 2.350581131530574055e+02 1.964295662906907012e+02 2.578831766420791496e+02 1.109952979966328144e+02 2.027546721440710940e+02 2.501377690830167637e+02 2.111868593440530617e+02 2.324728205186171692e+02 2.453971856382445935e+02 1.723822394524685819e+02 2.872924628066301693e+02 1.140766727214026446e+02 2.221345013854892159e+02 1.728173248741775296e+02 2.676400838220500873e+02 +1.711571121866394947e+02 1.085759247733173396e+02 2.001753766691515750e+02 2.760446855018309407e+02 2.056587091496190567e+02 1.121827347031253197e+02 2.274644480946081444e+02 2.571858980756533128e+02 2.945439217283808375e+02 1.913312305877045674e+02 1.500446430731354894e+02 1.650397772114545489e+02 2.581660073502400792e+02 2.094009769144933273e+02 1.731816092302842094e+02 2.727903589313663133e+02 2.606648610353666982e+02 1.460656197586831695e+02 2.016951883706858268e+02 1.247477859691891240e+02 1.732157361502286221e+02 1.195560196858487245e+02 1.253893910664414904e+02 2.455457677441618216e+02 1.778732818035962850e+02 2.490436815297808266e+02 1.487573988963908960e+02 1.937302250034929898e+02 1.502426775501600389e+02 1.110841009912817583e+02 +2.382535443835092508e+02 1.972031918916456732e+02 2.576267295349729807e+02 1.730194312205534288e+02 1.301593684828995094e+02 1.624008376323430127e+02 2.060036399923972681e+02 1.233366573394677630e+02 2.194763391620297739e+02 1.701495187616251314e+02 1.223397596968992218e+02 1.987622577877627350e+02 2.511738650001373117e+02 2.130204435763062634e+02 1.993899817227978133e+02 1.597764561560970265e+02 1.205224890815559604e+02 2.184250491898233690e+02 1.755709834516516139e+02 2.741081010321077542e+02 2.104755291992826187e+02 2.698148014221883386e+02 1.299106544858947814e+02 2.008369880697999292e+02 2.938716155581552130e+02 2.671516623028076083e+02 1.332347035771324215e+02 1.291435420390463378e+02 1.835021202063177554e+02 2.002866194329941720e+02 +2.554906544300547182e+02 2.365682876454178540e+02 2.924004211094360244e+02 1.662852505275750730e+02 1.123350814405425808e+02 1.910015128879867632e+02 1.341551373493250594e+02 1.313122940860927770e+02 2.397311819484906152e+02 1.559268654058377024e+02 1.407120959783594003e+02 2.371419051640040152e+02 2.217591327496910480e+02 1.881187811266301537e+02 1.632462641154496907e+02 2.970940639140721373e+02 2.422917505999918433e+02 1.356966040631749593e+02 1.702398486895437486e+02 2.608644720933497183e+02 2.783751927848827563e+02 2.951746624002826138e+02 1.720706565846523688e+02 1.275268866601749096e+02 1.880990845238362681e+02 1.129502795714700625e+02 2.919985401845127626e+02 2.747497807112307555e+02 2.667734033775608395e+02 1.373740617490475699e+02 +2.115416415080857746e+02 1.431719947715498336e+02 1.718744824503889674e+02 1.075365968452523902e+02 2.220100335193473029e+02 1.965127222891928795e+02 1.062726056237197838e+02 2.631794488147562561e+02 1.658640190278337627e+02 1.169182569761068464e+02 1.645780782039788619e+02 2.940728738870184316e+02 2.979920277570993790e+02 2.125849825405138631e+02 1.533327700316632161e+02 2.655551337415409421e+02 1.329075684859120088e+02 2.686536376777100941e+02 2.299223677315555676e+02 2.123135030200585334e+02 1.474417961566917654e+02 2.899688778344954017e+02 1.439992490259426461e+02 1.606165457016644780e+02 2.854253601360321682e+02 2.837928223954166924e+02 1.868865943198568402e+02 1.809928275876523571e+02 1.583918020284682484e+02 2.384217495701244331e+02 +1.181670050605631417e+02 1.525653020190297582e+02 2.615084872177121724e+02 1.755024420886775829e+02 2.989795566898581001e+02 1.573585789513378188e+02 1.903575226478752711e+02 1.641861715477102166e+02 2.943146494922903003e+02 2.038802368327418719e+02 2.581560000437879694e+02 1.504995935930718076e+02 1.095655891680627008e+02 2.628623226127134558e+02 1.069018430130149255e+02 2.750818506761686422e+02 1.121786007219489818e+02 1.106710601660877415e+02 1.217291564359016149e+02 2.915199334459504144e+02 1.325859381653097557e+02 1.737237090326784141e+02 1.036075961875061751e+02 2.392327113385031510e+02 2.486092083099548233e+02 1.259492139939950306e+02 2.665249241620523435e+02 2.103119814995928039e+02 2.718465347096271216e+02 2.018653364759854298e+02 +2.085808638159350608e+02 2.977621083099649582e+02 1.394173606621695285e+02 2.232898484647512873e+02 1.347812725162832521e+02 1.574683348766579627e+02 1.827258429860655724e+02 2.827887224427595356e+02 2.608349632236463549e+02 2.370910079389979046e+02 2.033290260845359398e+02 1.566531500677691042e+02 2.982287288081304837e+02 2.998057140577807900e+02 1.906108269451214596e+02 2.023344526730545851e+02 1.717672594576409040e+02 2.093320563180507747e+02 2.649028095061802333e+02 2.840422446800275793e+02 2.111868958418739908e+02 1.803076798272542760e+02 2.311954915496957312e+02 1.563425451766251513e+02 2.610066662710300989e+02 1.855286443040786537e+02 1.478912573842241045e+02 2.544380211258828410e+02 2.799416317427427430e+02 2.238937193404353252e+02 +1.269470316997365131e+02 1.895539822645488357e+02 2.443421824114378467e+02 2.632321641240823737e+02 2.164919638664115951e+02 1.042697198382110884e+02 2.896061632271033659e+02 2.068164163046922681e+02 2.059671371408958294e+02 2.352532326493898722e+02 1.046233655847859296e+02 2.755187319279126541e+02 2.344641322699609987e+02 1.434858288567621969e+02 1.255438908126368176e+02 2.548141480364848803e+02 1.466719626681152704e+02 2.020892715394597872e+02 1.195107046056347713e+02 2.012968701954913797e+02 1.996902768982717191e+02 1.560547951636197013e+02 2.162555170020900164e+02 1.483278604161245084e+02 2.615607136845001151e+02 2.424344777210258997e+02 2.524090919470299070e+02 1.726167614603126026e+02 2.199373130240069258e+02 2.318614758097714912e+02 +1.590143031424979370e+02 1.933970326403360502e+02 1.227042846200323112e+02 2.107086401017011781e+02 2.844049872407889552e+02 1.420899421875644464e+02 1.736571760246831673e+02 1.130876049831349661e+02 1.470306210908964317e+02 2.959723384067232246e+02 1.438030965279091049e+02 1.685928342779160403e+02 1.351720793691902713e+02 1.909711091249450590e+02 1.477005416416634205e+02 1.010528808923594681e+02 2.205493627613245167e+02 2.367352422049318079e+02 1.224997665062844305e+02 1.620949451166091251e+02 1.270634404764108467e+02 2.673321646154778932e+02 1.618882934467209225e+02 1.208967331765591524e+02 2.073956586593529607e+02 1.223277950209799059e+02 2.625820210851194361e+02 2.262632377752408672e+02 2.222881433937307349e+02 1.716205611551696961e+02 +2.376094214038359667e+02 2.287867757784330820e+02 2.035778067022395703e+02 2.546588007138803391e+02 1.514832565507949198e+02 1.736683542684334327e+02 1.991020520349750598e+02 1.873563480883249213e+02 1.589186331386689801e+02 1.042563150975229149e+02 2.019924784676414902e+02 1.136537158101241971e+02 1.091264020137841158e+02 1.352770409719844054e+02 2.178414513482917414e+02 1.831380105899948489e+02 1.114225947990316570e+02 1.736029819106907439e+02 1.354612112967272424e+02 1.996055424300992627e+02 2.905125217944571432e+02 2.980326934372309893e+02 1.560898949881966473e+02 1.943286005606112212e+02 2.429797193518882636e+02 2.652714760000731076e+02 2.863852813340179182e+02 1.838252831614893239e+02 1.814799327205894315e+02 2.338290144642930954e+02 +2.526381992552952340e+02 2.089745531365245483e+02 1.869938021147821701e+02 2.864405091884094645e+02 1.736924996547539877e+02 1.479914815134324613e+02 2.132537252074255321e+02 1.830098172980584934e+02 2.476607236946428827e+02 1.066503395377639265e+02 1.405219898965278276e+02 2.743866427972425299e+02 2.269305408710248173e+02 2.791638036143738191e+02 1.824422387811073634e+02 1.852994662516045423e+02 2.777032940597408128e+02 2.109153407914434126e+02 2.214759900082639490e+02 1.857033490029854761e+02 1.302118293337227328e+02 1.889562709124264188e+02 1.844813915245081546e+02 2.875482403705134402e+02 2.022892465111445404e+02 2.230217175841083872e+02 2.843056043891419904e+02 2.350834055358549222e+02 2.080929758762673032e+02 2.770814576487081240e+02 +2.389430507965955428e+02 2.463651891862864147e+02 2.369578462650186452e+02 1.902366989508459199e+02 2.003468797600664004e+02 2.681735461841141728e+02 2.362787745532336601e+02 2.323782975776413480e+02 2.525302892415198812e+02 2.828059530799229151e+02 2.840327053185673662e+02 1.223941816187275435e+02 1.056255174412387134e+02 1.386503050117574105e+02 1.384325506562210535e+02 1.176641636239777426e+02 1.670688688422628161e+02 2.506322552784647826e+02 1.181229702988334083e+02 2.607048520072489737e+02 1.667476448166365515e+02 1.310085831735554223e+02 1.553111545647699927e+02 2.907454039462255651e+02 2.844644695877585718e+02 1.989933906493695019e+02 2.662036190025202131e+02 1.792754658114438371e+02 1.073664330563030944e+02 2.793141822468826376e+02 +2.640306978448612654e+02 2.458161373226257069e+02 1.015510894380497575e+02 1.527048938693112916e+02 2.893334394723561900e+02 2.994916089563248534e+02 1.054055716033572452e+02 2.278819528330843127e+02 1.890909183007994443e+02 2.134436011261824433e+02 2.654189934957544210e+02 1.780852604264427725e+02 2.222277079756825628e+02 2.689688042831336361e+02 2.232046857529678050e+02 1.778434593737022169e+02 1.336418515516146783e+02 2.739064893378349552e+02 2.065065746675076355e+02 1.329712924393647313e+02 2.176938186185978736e+02 1.918043587714230114e+02 2.280421349429639122e+02 1.182282112372680842e+02 1.370131137248831692e+02 1.716251366233928195e+02 2.412427837766657888e+02 2.738208811966829899e+02 1.471415247536169488e+02 1.638288393831292353e+02 +2.669085627842696908e+02 2.477147782526785136e+02 1.718200513884793565e+02 2.299346472745743597e+02 2.016242169414389309e+02 1.631378839470685307e+02 1.859938403107781255e+02 1.609729169019194330e+02 1.536303039404505171e+02 2.234728543554556950e+02 1.953401084257108096e+02 2.920381588589057174e+02 2.034966688752892310e+02 1.019427894404581139e+02 2.980736970140829953e+02 1.738263823108001418e+02 1.531314323312329293e+02 1.400030133312995702e+02 1.802287961283190043e+02 1.719909696301723443e+02 1.974918793689569725e+02 1.666882741246514001e+02 2.879569025675030502e+02 1.334044307903087088e+02 1.016937569869423896e+02 1.660343944328368764e+02 2.214967229035601974e+02 2.539424882366704992e+02 1.211914878013190133e+02 2.835892388637473687e+02 +1.704109091340931741e+02 1.337843054639438378e+02 1.570106251098002588e+02 2.123587857442842335e+02 2.788290802167920219e+02 2.795601449888932848e+02 1.220747715539721696e+02 1.179984498565524405e+02 1.552783750686872963e+02 1.257256444039083192e+02 2.312614004137946893e+02 1.971625968209403084e+02 1.208837070227885135e+02 2.231693789143681386e+02 2.332576722664892941e+02 1.659208209363902711e+02 1.979623049620595907e+02 2.497459328714609512e+02 2.540243570817084446e+02 1.309045902221261599e+02 2.376613837929333499e+02 2.140333351750954023e+02 2.231625169053620539e+02 2.869160136215916737e+02 1.282002159167354023e+02 1.029173927424986488e+02 2.432034421383394545e+02 1.495648010251883306e+02 1.971910657968611247e+02 1.358409247687675361e+02 +1.833826243837603442e+02 2.960483510370855811e+02 2.343723986770386318e+02 1.560358896543934293e+02 2.499669478251469172e+02 1.762005778153444169e+02 1.918050503412152921e+02 2.089352602085182866e+02 2.770127170480132008e+02 1.268157216157417224e+02 2.670673189640755822e+02 1.547628252866769287e+02 2.602514896343354849e+02 1.557532905756793866e+02 2.574076233589491949e+02 2.646855654359934533e+02 1.749681240869035719e+02 2.465698370051858035e+02 1.076897610845538082e+02 2.337637497458482301e+02 1.791847918196868932e+02 1.967068388721293104e+02 2.340964493346380095e+02 2.762770912600988140e+02 1.174465260954359564e+02 2.950490567997024982e+02 1.354710376622284116e+02 2.342233227246520642e+02 1.617966271393036379e+02 2.107879984327653915e+02 +2.493754578342164336e+02 2.275093847135933061e+02 1.466148442335522191e+02 2.261697123059220189e+02 1.213252451599347950e+02 1.628949300801819504e+02 2.100466501082228206e+02 1.508908296808102989e+02 1.488199564735201079e+02 1.727131563468088302e+02 2.306747713688439205e+02 2.570279850661015644e+02 2.309125192178541113e+02 2.422081718543400370e+02 1.769407234272878782e+02 2.688532243604371956e+02 2.276780878660686085e+02 1.065345319601523641e+02 1.535069430280279050e+02 1.717902253122074967e+02 2.876755354986605084e+02 1.683056100689713332e+02 1.120105413679224569e+02 1.755508096146901664e+02 2.095863991316655870e+02 1.523590730880595174e+02 2.944635547123552897e+02 1.444697311944634066e+02 2.165062978405008494e+02 1.410128743297030098e+02 +1.434402193906418006e+02 2.368914090178307106e+02 1.963465933374949941e+02 1.914557752364961516e+02 2.870767419320768568e+02 2.044699144835463187e+02 1.223520556576680036e+02 2.352284247043744472e+02 2.917945011866975165e+02 2.225925999946875322e+02 2.240309397680480288e+02 2.048455962243571093e+02 1.188048963943729035e+02 2.200553599997707579e+02 1.885605934416515765e+02 2.863412817843446874e+02 2.913876692311304737e+02 2.446563674684449552e+02 2.981153955140326843e+02 1.111775924383378253e+02 2.239868361016714857e+02 2.540473271011064469e+02 1.343930974769885438e+02 2.368686732696482409e+02 1.175691554116390591e+02 1.014879352562223715e+02 1.330784448687188046e+02 2.045426156006566885e+02 1.168174380391246245e+02 1.704438548713551995e+02 +2.696784010384477597e+02 2.991318545155386346e+02 2.120364825583467336e+02 1.950895785161033018e+02 1.216112431291165592e+02 2.438998438799096391e+02 1.588292735755803733e+02 2.347670069791354024e+02 1.862846309471772770e+02 2.258642611266068343e+02 1.423367506635381119e+02 2.692888471853933083e+02 2.950212092401994255e+02 2.331327670110776467e+02 1.542291422318579635e+02 2.809064569107727038e+02 2.358857646534314654e+02 2.378124255062788563e+02 2.664164586086786812e+02 1.387157904298663880e+02 2.297158046581682243e+02 2.386372312695162634e+02 1.246509391338716171e+02 2.338956320284196408e+02 1.820257170558419944e+02 1.957425768708682767e+02 1.680974560138464540e+02 1.288235048549348676e+02 1.483029350020115089e+02 1.744880718659300669e+02 +2.512494238114035738e+02 1.112846425403449615e+02 2.472643304237797395e+02 1.241745840646870818e+02 1.808849124644312099e+02 2.524760780760417731e+02 1.836118621524309447e+02 1.408362492891266982e+02 1.099623406752946693e+02 2.383967522197594064e+02 2.436606913384966049e+02 2.770699525768120566e+02 2.597573569531676867e+02 2.935649366424795517e+02 2.702790297508025219e+02 2.563597369995835606e+02 2.279477293752616447e+02 2.477470305460766440e+02 1.962131167814513333e+02 2.859744526791636190e+02 2.703401534622389590e+02 2.763052603711840902e+02 2.934416645125817809e+02 2.193475948646207030e+02 2.822891098008749395e+02 1.085391177109117820e+02 1.782208012387337703e+02 2.335496863699061976e+02 1.715066387390946829e+02 1.948062204233656303e+02 +2.879262290016004613e+02 1.676743911135137068e+02 1.403503828589753937e+02 2.744454339345198832e+02 2.935124358491533485e+02 2.920282649929100671e+02 1.390240222956847447e+02 2.426642861805074745e+02 1.217336684570653489e+02 1.311823750440439085e+02 1.647679902066092836e+02 2.962811279981685288e+02 2.945746172932865647e+02 2.005257587949587332e+02 2.072045953580022228e+02 2.893049469033056766e+02 1.913962360581630833e+02 1.823675529874825543e+02 1.830342103129283373e+02 1.222396004373517400e+02 2.248239872372262482e+02 1.170253438297526429e+02 2.853825568202013301e+02 2.214973458763422514e+02 2.563932510909227176e+02 2.144837192650675206e+02 1.793062298958048473e+02 2.920176466690815005e+02 1.515607839109829627e+02 1.981203765908239802e+02 +1.733053660232129403e+02 1.312183264386245583e+02 1.276233157677672807e+02 2.020942572504836789e+02 2.314817368496994732e+02 2.242589617101967008e+02 2.160504620978007893e+02 2.360595788588375399e+02 2.952977074031120992e+02 2.334652590044975682e+02 1.243453875174208747e+02 1.916144242306085630e+02 1.092365115042800596e+02 1.478765005471206280e+02 2.191946613400726278e+02 2.879274886834762697e+02 2.733443652356662597e+02 1.858481832262083344e+02 2.193747651131673706e+02 2.695165737089945424e+02 2.960753121523491700e+02 1.890691006834304631e+02 2.638343907584013550e+02 1.510492177865631334e+02 1.878288206285384661e+02 2.726561149875388992e+02 1.704246795027074199e+02 1.006381753343381718e+02 2.153734239260733148e+02 2.551451126036402854e+02 +1.591849792872858984e+02 1.304671215023752779e+02 1.427456440770346831e+02 2.882324895344759170e+02 1.680635293254793510e+02 1.205800311663507642e+02 2.861305963205076637e+02 1.219224106654408928e+02 2.467003871618023538e+02 2.830287806498602095e+02 1.445950870572595193e+02 2.496562286252286640e+02 1.464987579205844099e+02 2.848280464142704318e+02 2.785616857190397013e+02 1.837468579783306950e+02 1.246964377230690673e+02 1.251791080124520050e+02 1.496399061799681363e+02 1.375936265087168522e+02 2.547928467777094852e+02 2.554856419260690927e+02 1.285559318166884850e+02 2.092144446410586909e+02 2.868951534942014518e+02 1.178319347908447270e+02 1.347784205269015274e+02 2.851299399919766984e+02 1.754694686670390809e+02 1.016886128619324694e+02 +2.606618423405234353e+02 2.125366732076933545e+02 2.822772640751277322e+02 1.096405633955119185e+02 2.437561663288932721e+02 2.129146561548243994e+02 1.148823764090175530e+02 1.516868774610028368e+02 2.090025176018670265e+02 1.817684320186263562e+02 1.584667226055155709e+02 1.501973711988126468e+02 2.530199923706828713e+02 1.847948752811591930e+02 1.778871618489498303e+02 1.664551902511519188e+02 1.100020157933824265e+02 1.352000835393275509e+02 1.710981737682794801e+02 1.530513645967782566e+02 2.588476693974693035e+02 1.775587245068043956e+02 2.006331886716666588e+02 1.389709403689849694e+02 2.489553638298030194e+02 1.673604491791948021e+02 1.991154502489720812e+02 2.423848982654565418e+02 2.882603768001737308e+02 1.620650086718309240e+02 +2.723642490909132903e+02 1.680927290528325670e+02 1.005734627393615455e+02 1.598916606218045047e+02 1.672547346703738071e+02 2.361420151042074451e+02 2.741857058408131707e+02 2.533004150866734392e+02 2.036092771261417340e+02 1.091915011443997230e+02 1.145604210422382323e+02 1.209982156413156247e+02 2.749595368914399387e+02 2.177794513808643160e+02 2.054163746311436967e+02 2.185860861470465579e+02 1.504022045473846845e+02 1.713704456854883347e+02 2.175221629008602804e+02 1.230663148243889253e+02 2.419648244223723168e+02 1.383010418990747326e+02 2.040260833828849059e+02 2.966316994044250919e+02 1.630596872908637351e+02 2.562534082821714492e+02 2.549425872735235998e+02 1.983522705781282127e+02 1.524860865223137694e+02 2.736848821358530586e+02 +1.277021385004174192e+02 2.448445434866889343e+02 1.296687360965440803e+02 1.874271582575348702e+02 1.145742775945452792e+02 1.884744688522491742e+02 1.336298647132909423e+02 1.523816963142488419e+02 2.658270705367647224e+02 1.781637174983711134e+02 1.154610011723892171e+02 2.005342781476718415e+02 1.303166615041172918e+02 2.397284110571510496e+02 1.612912854182502542e+02 2.821645080329541315e+02 2.544831471501324813e+02 2.622237400581972224e+02 1.417212269902922230e+02 2.054005404298748658e+02 1.092142219674599062e+02 1.652051184306486107e+02 2.825679563619778492e+02 2.056286073102957630e+02 1.772062144904277545e+02 1.163520479257007310e+02 1.006186351926139366e+02 1.734025793931427586e+02 1.446958902579306709e+02 2.025820689614877779e+02 +1.798382687901162740e+02 1.604629760861514001e+02 2.668981169240885265e+02 2.763242846779806996e+02 1.318105471716862098e+02 2.191362245125996537e+02 2.770758446308884686e+02 2.308910816293108326e+02 2.956895796828827656e+02 1.566426856848869988e+02 2.326210561246332418e+02 1.206555816723871715e+02 2.603144096756907970e+02 1.172571782204154829e+02 2.219493974369055991e+02 2.385109304229506790e+02 2.599678734377965839e+02 2.850516346518521686e+02 1.472948582444382168e+02 2.234296740595885922e+02 1.427895312415343199e+02 2.848238578369252423e+02 2.260232767550441508e+02 1.544648385858973541e+02 1.163971462755376791e+02 1.762731012775239492e+02 1.089523563056807660e+02 1.663966154222005116e+02 1.342495772836978745e+02 2.922401077696804350e+02 +2.806557294060240224e+02 1.077657131130299604e+02 1.622983596366119059e+02 1.723469481204717795e+02 2.678046848873893850e+02 1.442059922525422451e+02 2.629931208031973711e+02 2.741083495447689415e+02 1.194142462414748707e+02 1.688961325073638022e+02 2.967954354880449728e+02 1.822107331135221671e+02 1.292333403080546645e+02 1.856814508383810391e+02 2.103923137448445573e+02 2.517859299913771451e+02 2.551152596962431574e+02 2.077883190793959898e+02 2.986930461834413677e+02 1.196764061335889551e+02 2.378823960447958257e+02 1.692017967083341432e+02 1.471250494556689432e+02 2.608355254883699672e+02 1.757172426071724942e+02 2.629426236813185369e+02 1.040244734248400533e+02 1.533558690719498827e+02 2.011860465194789072e+02 1.720545334339216765e+02 +2.966488050331527688e+02 1.809989340563203086e+02 1.871527370563514978e+02 2.315558973515319394e+02 2.657682292004950000e+02 2.237816732699509998e+02 2.282045922056215090e+02 1.846236325909775928e+02 1.644827554373339353e+02 2.760250360653360531e+02 2.492622345937652995e+02 1.483432536002697191e+02 1.527550390024584601e+02 1.573429964258168070e+02 2.090721206423400247e+02 2.535819867756219708e+02 2.420536340362719159e+02 1.691914404667937788e+02 2.388696721384086459e+02 2.593840245957078423e+02 1.331872961625781500e+02 1.116342264469163581e+02 1.680964276125217793e+02 1.555020753508222526e+02 2.422052215908822177e+02 2.626184375196450560e+02 2.674230788003709449e+02 1.948146659156083729e+02 2.663681889818526543e+02 2.795342087705012659e+02 +1.674728956867265310e+02 2.635505920196726493e+02 1.395353777027027604e+02 1.883233466008314565e+02 1.249441512057495913e+02 2.512189370435067417e+02 2.719913755602378842e+02 1.237326636617429614e+02 2.939951219495833357e+02 1.686366002602222807e+02 1.800181056076297068e+02 2.288525977776352818e+02 2.717306800175948638e+02 1.565292507387619594e+02 1.445460932655216766e+02 2.092313282690445249e+02 2.370375511382032698e+02 2.880525812713749474e+02 1.172567175017127141e+02 1.112412797274302250e+02 2.246954385922853135e+02 2.812359340959551446e+02 1.004168603505609241e+02 1.005387863078678805e+02 1.815971195408835683e+02 2.811251817522295937e+02 2.605765849402707772e+02 2.298114360271968621e+02 2.557293814584297706e+02 2.542416589790913122e+02 +2.943583269632734414e+02 1.442274778682184717e+02 2.700917391987959491e+02 2.527420049761408904e+02 1.527279900348522688e+02 1.841979337126335281e+02 2.902442440856567600e+02 2.889101481258517765e+02 1.828125218264408716e+02 1.133179379993730862e+02 1.484787634874768116e+02 2.676352293304336740e+02 1.452118425579454311e+02 2.636966617786087568e+02 1.313546620759107100e+02 1.834019443937838787e+02 2.892465421328221282e+02 2.575015388377624959e+02 1.970702343003932242e+02 2.507528167727347181e+02 1.724897096143170074e+02 2.664268628760375464e+02 1.365257050051324370e+02 1.198011035974838308e+02 1.176831988053894520e+02 1.070946883963453899e+02 1.964638491125322446e+02 2.570844982939356100e+02 1.593905150913052466e+02 1.202569936867807598e+02 +2.734271498156417692e+02 2.352133531486530842e+02 2.590835237087205769e+02 2.260994493040042528e+02 1.805421354394846105e+02 2.728408805160995598e+02 2.367263522625478913e+02 2.580210451062748689e+02 1.204524877415260562e+02 2.946465680607327613e+02 1.547220269335912803e+02 1.186203172746691337e+02 1.923878728892914864e+02 1.094127410697402354e+02 2.222837240826847278e+02 1.529333599077602628e+02 1.861450256630199647e+02 2.125583079944122176e+02 1.527591657960447264e+02 2.694001797345342766e+02 1.986063989766776388e+02 2.192493126389772442e+02 2.986827335637019587e+02 2.790660387254000625e+02 2.781487003899754313e+02 2.564198676846006606e+02 2.597551240338123648e+02 2.358970425952163907e+02 1.951628676328612357e+02 1.078208269500064347e+02 +1.190762776130697205e+02 2.951075493308472346e+02 1.091043363430719069e+02 2.824365312299846664e+02 2.445811468414383398e+02 2.538090805786315514e+02 1.230092364266577363e+02 2.633887649939744051e+02 1.865216093980499181e+02 1.540388898662323243e+02 2.047343894245035756e+02 1.431412534309083640e+02 2.857794001060171922e+02 1.492366175285521592e+02 1.380934567887849198e+02 1.331831467466375898e+02 1.149412013934811796e+02 2.205070844660474734e+02 2.939252657951740844e+02 2.049464694042562769e+02 2.047902832862141054e+02 1.810793422252176015e+02 2.005356992447976836e+02 1.381400138775680375e+02 2.582445444487385657e+02 1.698212931623984616e+02 2.252085951830697468e+02 1.808378144669676999e+02 1.307311344108444473e+02 1.050024101356033697e+02 +1.722314120162143354e+02 2.530014253763471856e+02 1.298340795948372772e+02 2.948664870226410812e+02 2.383106068289312702e+02 1.822969205106659558e+02 2.285226769051377005e+02 2.759417691711663565e+02 2.120970517474504220e+02 2.831046044310812704e+02 2.320579821788242612e+02 1.286125039667014960e+02 1.609837368065715282e+02 2.931112965353385107e+02 1.441758663366052531e+02 2.810263276191118962e+02 1.239857273771131077e+02 2.399447548605567988e+02 1.460208836055017514e+02 1.205325462037979491e+02 2.112513935912650993e+02 1.036793750016967692e+02 1.113202625217208777e+02 1.646612561683649574e+02 1.018350908838390581e+02 1.263835026124204859e+02 2.766683711501553944e+02 1.682407929561517506e+02 2.677103056024840271e+02 2.147294480454548307e+02 +2.763536852866382105e+02 1.511976958084401872e+02 1.026794659371155944e+02 1.805990415690671398e+02 2.442493962549426385e+02 1.881796213041043018e+02 1.028768312506858535e+02 2.787706953534510603e+02 2.589640601731795755e+02 1.730107396932538677e+02 2.218419822849910190e+02 2.651646152747807719e+02 1.476149140151474342e+02 1.986450675254654072e+02 1.050693447352362853e+02 1.819666738706916931e+02 2.873544952103893593e+02 1.472060704631180954e+02 1.297023844405691761e+02 2.824778443572924971e+02 2.918073394139615289e+02 2.128134400148996974e+02 2.223096450508596149e+02 2.761940547406351811e+02 1.348708672340777639e+02 1.857009592938832441e+02 1.062906640064134649e+02 2.104442283262811202e+02 2.812954268214299418e+02 2.739038950945439979e+02 +1.837264129055918147e+02 2.399207190527903322e+02 2.843910623120511900e+02 1.773207161532972975e+02 2.056581469496123873e+02 1.558029517788254168e+02 1.458438122541016924e+02 1.893030782939712253e+02 1.139027557376393673e+02 2.228775749423569437e+02 1.367670384452707140e+02 2.854480456674787092e+02 2.424985140340279202e+02 2.940521113211518696e+02 1.330693282221190259e+02 1.212599008475133076e+02 2.754747741586869552e+02 1.062856492128348549e+02 1.212724485003486166e+02 2.100514698158626743e+02 2.547262582240854272e+02 1.999488755181088777e+02 2.578561029518564283e+02 2.784200494851090752e+02 2.728829168298310606e+02 2.071711407548560544e+02 1.708729380756020362e+02 2.726254883308487251e+02 1.104364015278258364e+02 1.175773277008901090e+02 +2.554381337818412305e+02 1.634513906120204183e+02 2.309962436793083214e+02 2.460443770945291249e+02 1.618890365991254896e+02 1.046310291743186980e+02 2.772116654811295575e+02 2.098555252827713957e+02 2.309383801112169863e+02 2.845300950466865402e+02 1.268119123926061320e+02 1.697885006171669602e+02 1.901887742560337529e+02 2.605757830463372215e+02 2.755463791239279772e+02 1.771647294768940810e+02 2.403902735905423356e+02 1.774352552408031443e+02 1.796883744424403631e+02 2.736192366006921475e+02 2.118505050785533967e+02 1.873353967662169453e+02 1.802980863638028950e+02 1.869858546159753132e+02 1.200946851663063342e+02 2.350811068219035178e+02 2.018941614745772313e+02 1.010158706413519525e+02 1.661546933057649937e+02 2.570882207683835077e+02 +2.856134023048114159e+02 1.356279054667102741e+02 1.225310201562991494e+02 1.529777144242077327e+02 2.936506440162480658e+02 2.589580133771784176e+02 1.864782805190425279e+02 1.931182124516369640e+02 2.913608028278327993e+02 1.555662042949096531e+02 1.173676742008071301e+02 2.242990267171766732e+02 2.651338851871976203e+02 1.128980005738893482e+02 1.283582653966309408e+02 2.071495534530326097e+02 1.241509031508031740e+02 2.393403040292282640e+02 2.829812266966206380e+02 2.294799861563923287e+02 2.129576840814710295e+02 2.165539860914115877e+02 1.357366103660294243e+02 2.396252028023287153e+02 1.395106368224716107e+02 1.700689743264745744e+02 1.253435651632085950e+02 1.508112259783626428e+02 2.310267786371028933e+02 2.311667616985857876e+02 diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/random-int-data.txt b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/random-int-data.txt new file mode 100644 index 0000000..4fd11b7 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/random-int-data.txt @@ -0,0 +1,100 @@ +-67 65 82 64 51 1 -12 2 -84 -52 12 82 -45 -84 -41 31 -49 36 -70 40 -74 -99 32 64 -6 43 -53 -43 43 96 +-58 20 25 99 -25 78 -6 59 -23 30 36 25 -8 83 -43 -7 -8 42 -90 96 46 88 31 12 68 -21 -6 7 78 -19 +-66 -51 0 13 42 -43 -30 -29 20 10 -24 -5 -42 38 -56 6 1 -80 -65 -91 89 64 -21 49 -84 41 6 -78 71 -2 +-50 -84 -50 -66 46 -88 -10 -28 -25 6 -7 10 -35 86 41 -17 72 -67 13 -67 -76 -84 -15 35 67 40 90 38 -1 -47 +-51 27 -48 26 -73 -46 -68 -56 -38 -4 49 -64 57 -86 -80 70 50 34 84 97 -76 3 -54 -89 -7 -53 15 36 -28 85 +2 -59 4 30 70 -42 -26 -1 27 -90 -18 95 -10 -36 43 24 86 -8 -100 92 80 -40 17 -93 -81 54 -8 84 -53 38 +-80 0 -71 -41 -33 9 -61 0 26 80 64 67 74 68 -72 78 -72 -52 -19 37 -33 -24 -11 -71 -53 -16 25 56 -74 0 +71 -23 49 -36 -43 -70 82 69 -100 -27 50 20 30 84 -33 90 49 39 -52 -51 -86 -76 -72 -88 12 91 -96 -61 -87 -47 +21 39 1 78 68 -80 -54 71 17 -94 34 -20 14 -5 -24 55 -84 -50 -90 -24 -79 -81 53 -50 22 -13 -92 78 -22 -50 +-47 -73 77 -93 -20 51 -37 -14 -37 -18 -8 -14 -71 29 -27 -5 54 77 -7 -2 15 -23 98 -34 -65 -78 -77 -90 -5 -35 +92 -33 71 24 43 -19 50 -40 -48 -33 -51 -14 23 40 -78 -14 -76 1 52 69 93 5 -13 30 -60 -20 -54 49 -52 93 +32 -86 21 -41 -86 -38 97 -35 -37 -89 -15 -18 -46 -37 8 63 -63 -61 57 50 43 -27 -45 98 -56 -81 16 -38 -25 -28 +-18 19 -52 -86 92 -72 23 35 20 57 69 -22 52 -66 -74 -29 -1 -10 -97 22 -97 -93 -70 87 85 -31 42 -29 -10 -36 +78 80 -93 68 41 84 -37 -62 38 -9 99 -60 90 47 -33 -40 -59 97 -28 9 35 -6 -60 -83 -39 -97 -25 -78 95 40 +79 -35 -45 -46 69 10 29 -88 98 -44 66 11 45 -58 -11 -25 51 -44 54 30 59 98 35 -28 93 86 99 19 -27 -83 +80 77 -72 57 -35 -27 86 -67 11 77 -28 -89 -30 -31 -72 64 -95 -75 92 -32 -96 -14 6 -83 -66 -58 71 -17 58 -53 +-1 17 -72 82 -57 -48 -7 -44 -80 85 -99 -9 27 -11 24 13 86 18 67 -9 12 77 98 49 49 12 -82 45 31 -68 +-13 -75 -26 17 91 12 -95 -62 -54 -60 22 50 86 58 -11 -11 -21 31 16 -15 67 90 1 80 -57 -98 35 -54 51 91 +28 -75 -31 49 0 73 75 -66 50 -77 -20 82 -40 -90 -28 32 -44 89 -75 -33 -11 -19 -55 79 18 2 -39 -49 78 -72 +14 56 78 69 -40 -20 -39 71 99 -89 60 -82 -1 -77 -42 94 -41 35 72 11 -13 89 -52 -41 -93 43 -39 -61 68 -4 +88 18 -90 -75 -49 46 -28 -48 -69 -64 77 -8 91 -65 62 -27 -19 34 10 78 82 49 -34 63 78 -88 -17 -37 -85 91 +4 36 -77 -75 -12 70 42 8 7 -31 -69 -74 -65 18 85 -92 91 16 -15 24 -74 -56 71 -70 -90 20 13 73 -68 -65 +92 22 -31 -73 -59 -78 -20 -11 -61 36 -40 34 -96 -12 51 -45 -12 12 -3 -42 -71 68 -8 -91 50 -73 -96 -46 -38 -4 +-87 44 -58 -83 70 -81 32 29 -79 45 -64 -52 57 73 -80 69 7 -22 31 -71 -34 -33 47 79 -17 6 -77 -89 3 50 +85 2 73 -88 -99 -13 -76 1 -90 51 30 -52 75 -2 -8 10 -83 -40 -5 -79 82 19 79 94 49 4 66 -76 6 -48 +29 -34 66 -93 45 -1 -98 92 -92 29 -10 64 -23 -81 -73 -62 -18 37 -29 -50 -52 90 -28 24 -4 -67 -33 25 -78 93 +57 -46 36 -16 34 -59 -96 -86 64 2 28 42 -32 6 -17 37 38 -40 -92 55 -22 -42 11 -77 12 81 -89 -39 -30 -39 +-72 -68 -41 -5 93 55 24 -6 84 77 30 33 -51 -62 6 -5 -83 60 -1 -64 7 -7 -92 31 5 -21 -34 -14 21 -33 +26 -75 -36 -54 -21 -38 -49 -20 82 73 -84 -5 -69 84 -87 12 7 -67 -40 -50 -35 -65 80 -83 -2 1 34 -16 91 82 +61 -21 1 -64 -56 -61 74 16 0 38 51 34 -35 37 -28 -52 -14 61 14 58 50 27 -43 -27 14 56 -16 -78 50 -89 +45 -47 -61 68 -41 -70 14 -51 49 -84 64 -65 88 -39 -88 28 -55 -18 81 -2 -1 -45 65 -6 62 16 71 71 -1 47 +47 60 22 -42 -5 -74 12 66 89 -82 -85 65 74 0 -18 56 -39 84 -65 -42 -33 -60 23 33 -8 -72 3 -64 -3 -25 +-70 11 -19 -12 -1 -50 -89 -61 78 28 55 92 -17 86 -17 -45 -31 68 -24 -99 -59 27 79 -2 21 -80 54 9 14 -70 +-38 52 -99 50 -46 -63 -74 -41 -43 -62 -81 38 -99 17 -94 -6 44 -20 -13 -30 71 -43 43 -28 -8 57 -93 98 4 42 +-17 -27 -60 -22 86 -49 39 -83 72 -16 82 74 73 -29 16 -59 81 -60 -96 51 -62 -55 -79 -31 -15 -67 -18 -83 -61 -86 +28 37 -44 7 -17 -10 -65 8 -78 -17 -46 -5 -35 -86 13 -16 27 24 60 -12 -48 -45 16 -33 70 -45 -63 -60 21 70 +-75 -89 -93 -93 62 -44 -39 46 31 57 72 30 -65 29 66 -53 2 -2 71 -90 -73 -40 -63 32 68 30 25 98 38 92 +88 3 5 73 -2 -61 -94 79 99 94 71 -83 -40 80 -79 -14 -34 -99 -52 27 23 13 13 -35 -74 13 43 -19 2 -62 +92 -47 -27 9 -68 -86 -57 43 9 -81 -9 69 52 -28 80 -13 -6 -44 -81 -89 -10 30 -64 86 -76 -11 -100 15 12 -62 +76 -42 39 70 74 79 84 -52 18 -58 78 53 89 58 -32 20 -51 35 12 37 -70 -21 5 97 67 -25 -25 -10 2 30 +-84 26 -60 -34 11 -27 47 85 -89 29 54 -53 66 -9 12 4 92 70 2 -12 -55 72 -62 -79 -8 68 -19 12 -8 -100 +78 -97 -76 86 -47 42 99 -3 9 49 -84 86 26 43 -26 90 23 -66 -37 -35 25 -12 -42 -12 96 -15 48 87 -95 -12 +-60 57 -30 -4 -84 24 -82 -5 34 56 76 81 -64 23 32 34 -41 -48 -6 77 -42 64 87 92 82 59 9 -71 -56 -45 +-74 -90 -27 93 33 15 -35 -73 78 23 17 -28 9 63 9 35 15 32 0 -4 -32 54 -76 14 -14 -8 16 -43 -81 57 +-2 22 85 -33 -48 74 64 -59 -27 17 -65 27 -50 -81 41 -69 -26 -29 -83 48 -81 51 58 -62 -63 -55 -63 39 32 -34 +98 -99 13 25 -10 43 -62 50 82 -90 -51 40 -71 82 27 -73 19 -62 37 10 -21 45 -94 -45 -41 -3 44 86 -2 27 +-80 -89 -57 87 -42 19 32 -49 37 -4 -30 54 46 -3 -92 89 60 37 -86 38 61 93 45 -45 -86 54 21 45 50 -53 +7 -68 71 -6 41 -72 67 45 15 46 85 59 82 19 65 75 -62 -35 47 -51 23 41 -54 27 -99 14 9 69 60 62 +99 -51 83 -47 -19 -57 -22 51 -52 52 92 80 69 1 -31 0 -19 -54 73 -5 3 82 -86 -84 -95 -83 -92 -52 -90 -79 +43 -75 62 99 66 -43 -38 -21 23 35 -63 -61 -46 5 3 -90 -28 55 87 89 -29 -46 23 -61 -5 10 -70 -63 50 -14 +39 38 10 66 -24 -45 55 -33 31 29 44 31 73 44 6 69 -21 -58 -3 93 -51 86 -16 -88 88 -30 75 78 -20 -12 +-11 11 -19 40 82 6 10 22 90 -78 -88 -49 72 69 -62 42 -23 22 -38 -98 0 -3 -43 20 9 18 -67 -7 22 21 +99 80 -55 74 43 -31 60 -26 -29 -6 75 60 92 -42 85 18 1 1 -74 -44 -12 72 -57 -98 99 62 45 -40 -39 -75 +50 30 -18 -29 -80 -59 -96 46 -99 -76 -13 -75 -93 -95 -45 62 -37 53 -96 57 -40 3 14 -45 -84 58 75 16 37 -6 +1 -47 87 -99 -22 -22 -20 71 -91 13 35 -80 75 65 -87 16 -37 99 -60 49 52 18 55 -11 18 24 -65 -80 8 -79 +-8 -87 86 -9 -64 -76 59 -52 -89 18 13 70 44 93 99 62 39 49 83 28 72 -71 -13 -71 -22 44 -87 73 -68 80 +41 -26 44 -63 -26 -83 -44 63 -51 -48 52 -8 55 73 -45 84 40 45 32 -34 -78 -46 -79 57 -40 11 34 -75 -20 91 +94 9 -35 -5 3 59 -63 2 -7 -72 -34 -70 78 99 -29 37 11 91 61 29 85 -15 59 79 47 41 19 -18 -92 47 +-59 -89 57 -72 -79 88 -85 18 -35 -96 -57 33 83 70 -55 -16 -21 72 -53 89 -44 -86 9 -44 -26 78 2 -93 -75 6 +55 73 89 80 -69 -93 -39 -88 62 49 91 -68 87 -26 40 16 -49 -53 -57 23 -97 39 -78 44 -15 1 60 -87 43 -42 +-2 -23 -74 -80 -59 52 -58 68 64 97 -86 -41 -88 35 49 3 -40 90 34 -2 3 13 -95 8 -1 6 75 92 19 -31 +57 76 65 3 37 -72 -43 57 64 -23 41 87 26 76 -18 -32 28 47 11 47 -33 -12 4 81 -92 -47 -81 43 -2 5 +68 74 66 -89 -95 -40 -78 -58 -54 -20 2 20 94 -35 58 -20 41 77 0 95 39 14 36 -40 -85 -60 -63 82 0 58 +-61 -99 61 10 -2 -31 -70 37 -77 -10 85 95 -28 70 -81 -78 -68 -33 -77 77 -6 42 -100 -68 -59 -86 -42 -74 35 -32 +64 -1 -1 -64 51 11 -65 47 -87 -8 5 58 22 -80 68 -25 24 59 -25 -75 95 -22 -73 27 86 -39 -98 -1 -17 -32 +94 -50 -53 -62 -53 46 50 38 -95 -77 40 -38 -23 -14 -68 -20 -47 23 -8 -12 -92 -69 -97 30 94 -45 47 -81 82 -60 +28 67 -48 4 74 27 -30 12 -32 35 91 -83 30 -55 -7 79 97 11 93 -45 -79 31 78 65 84 -23 -26 17 -61 43 +44 60 -88 72 31 98 55 -4 66 -14 10 -81 -40 66 -15 21 69 -98 34 3 75 18 98 -6 47 -39 31 -19 30 -51 +-6 18 -93 31 51 -20 -16 -33 -38 -19 71 4 -53 23 97 1 -28 -72 -44 -48 45 33 -76 86 64 49 -45 -34 -9 -76 +-19 8 28 -27 -51 -58 -36 63 -92 -95 70 41 -38 -49 -95 -100 43 97 -60 -5 -56 45 -13 -3 20 -10 -21 -85 -5 63 +-74 -74 -74 -39 -57 -12 51 11 -11 -22 -26 -54 71 24 -37 77 -90 77 75 86 -53 3 69 -99 -82 -59 30 81 -21 -86 +67 63 87 -15 60 -82 87 51 -39 -49 -16 74 51 17 6 47 98 89 -20 -98 97 -61 18 34 37 -36 37 -96 90 44 +53 -8 37 -76 -61 70 -77 -11 98 -80 12 -80 6 -89 8 -59 -69 -100 -52 -30 95 -58 61 29 52 -64 -51 10 16 -58 +54 -10 49 62 76 -25 80 36 13 5 59 -65 14 41 26 -78 23 -45 -51 -85 91 -43 -61 -37 94 27 -11 49 98 48 +53 -51 27 34 28 -53 18 17 31 -31 59 71 -34 25 54 -84 -34 -24 76 38 -36 15 -1 56 2 -12 0 26 -38 -62 +4 -94 -63 -21 -95 -42 -12 86 14 -86 -1 80 -48 62 -47 -52 3 91 -86 11 79 32 -24 -33 -54 19 -17 28 -33 -97 +-18 41 84 1 -83 48 -99 -64 26 -52 3 -64 68 -98 93 -79 -97 11 88 74 41 -31 -42 -35 -66 18 97 -30 19 -93 +-19 42 61 -91 -20 59 -11 -64 -60 85 -6 -71 33 -52 46 51 -86 -77 74 -4 74 -81 1 -39 -30 12 -12 20 66 60 +86 1 -67 -91 -92 -22 91 -90 -45 26 53 -6 99 46 -29 -40 -99 57 -45 -47 -3 -86 90 -78 -33 73 90 -51 -75 2 +88 -34 -2 30 -18 35 -23 90 99 -49 90 -79 94 -38 48 67 -35 -58 81 -24 18 -54 83 65 -58 -12 13 89 -59 57 +92 -99 94 -73 97 -78 -93 98 -78 95 -21 -17 -11 -92 69 -60 86 9 -36 -18 -33 -39 -65 74 -65 37 -49 87 -28 -81 +-95 2 -18 20 93 54 86 -63 -5 -89 17 -9 75 -66 -64 -82 -46 -48 82 5 -89 19 -32 -45 53 -47 21 -9 40 34 +86 87 55 -41 49 -10 -6 -7 -99 23 90 -50 -9 -81 77 65 29 -21 22 -82 19 48 -24 -72 75 -66 -69 -17 72 6 +13 37 96 31 -65 -54 -91 -27 84 52 -9 -28 85 96 14 63 -34 -29 -85 78 -75 -44 -30 -5 4 72 -45 6 13 71 +96 -69 67 59 69 46 80 42 81 30 89 -45 -10 -44 25 31 89 16 -36 86 31 92 1 5 -2 92 -11 77 20 40 +-48 98 -100 30 54 9 84 -88 5 48 93 56 -94 -89 81 33 44 -30 -95 -98 29 -33 13 -26 -59 -80 -68 -40 12 11 +82 -63 -30 -67 54 -68 50 -63 -91 -68 -45 -66 -58 16 -25 9 -50 -59 -55 4 -2 0 -63 67 30 -21 -8 55 21 -68 +9 -8 56 -6 84 81 -63 -35 81 56 -50 -54 96 -51 86 0 66 -4 -18 65 -26 -57 8 78 -54 17 18 86 21 68 +9 38 33 16 3 86 -57 28 -6 -44 -42 -2 3 -71 -86 23 34 -29 33 -30 67 63 -11 76 -65 92 30 -66 61 1 +-72 -85 -1 64 -79 -78 -1 15 -35 -32 80 33 -36 -82 24 -65 -23 29 38 -31 87 55 -18 -52 -77 -22 -11 54 62 -48 +65 -77 50 16 41 -94 -21 16 85 24 60 86 -78 -13 69 46 55 5 -27 -18 -6 -1 59 -62 -58 -99 -49 -84 89 18 +-21 -15 -55 60 78 98 67 94 58 -5 -36 42 36 73 13 72 -78 -68 41 -37 -33 -46 -80 40 13 -44 -71 -8 15 -77 +16 -93 -42 -10 14 57 -54 -3 -44 -21 30 -93 71 25 -60 -94 93 5 -94 -84 -72 1 -50 -34 23 -15 15 18 72 -29 +-22 -82 -30 -87 -88 -25 46 32 -30 -55 -79 -85 71 -89 -57 -88 21 53 -100 -64 -92 -97 56 -51 -17 -34 -31 6 -68 84 +-53 -51 90 -38 -61 57 -63 67 22 22 70 44 43 97 20 -62 -74 72 83 -32 35 -66 -29 5 -88 55 -94 94 -19 55 +57 51 29 -42 -21 63 -57 7 -48 -87 -60 -55 -77 -53 -1 -85 64 60 53 71 41 59 -61 -73 -12 86 90 10 -60 -38 +2 -9 14 67 -2 70 11 -78 26 -55 -86 -25 99 66 63 64 46 59 66 -37 -78 -70 63 1 -20 2 46 50 34 19 +-87 -40 75 -11 -88 -80 -95 -20 -92 -28 83 24 88 -39 83 -36 -61 56 99 -73 -59 -85 -49 -10 91 12 -79 -18 -15 6 +35 -74 -4 -15 40 -87 81 -22 -12 -46 14 9 98 -35 -2 -12 57 -74 -52 71 70 -70 -61 -47 89 44 33 -100 54 42 +-4 -34 80 -12 -15 -9 -8 -29 89 -55 -33 89 16 -33 -73 -82 98 27 88 59 48 20 -67 -21 -86 11 -50 46 64 -8 diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/random-uint-data.txt b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/random-uint-data.txt new file mode 100644 index 0000000..c1ec7a5 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/random-uint-data.txt @@ -0,0 +1,100 @@ +52 34 59 34 64 20 89 69 26 93 95 32 17 93 77 49 51 60 51 27 60 10 61 2 16 30 41 68 65 0 +43 74 11 37 32 61 72 29 47 21 7 47 68 58 22 33 29 37 14 45 71 1 67 79 69 9 6 6 95 78 +86 20 68 67 43 5 77 70 96 37 79 71 35 30 22 4 56 28 33 50 97 17 85 52 21 5 57 19 35 97 +15 21 99 4 54 39 15 29 68 21 50 76 64 51 79 0 24 5 65 95 90 51 99 82 9 80 61 32 2 38 +46 97 53 96 51 84 18 42 30 52 82 77 72 59 1 67 72 16 14 63 70 94 20 27 38 70 86 95 41 75 +2 35 45 63 92 76 81 60 62 72 90 46 47 33 1 30 54 22 50 85 63 61 22 79 45 53 45 33 8 28 +43 41 14 79 2 77 95 16 74 19 17 78 47 12 68 55 3 2 77 10 35 86 52 33 47 26 98 42 48 86 +18 32 85 4 91 10 69 68 15 42 58 77 88 64 91 43 56 30 92 11 52 23 43 92 65 50 68 8 80 81 +20 57 38 44 62 10 80 25 32 11 70 32 13 50 41 55 44 0 28 83 5 1 34 94 55 52 56 24 76 21 +36 43 59 28 10 59 4 41 64 98 54 66 44 3 37 41 67 10 85 23 58 35 58 34 35 79 46 18 1 51 +72 63 85 51 23 91 3 56 35 72 38 26 91 0 68 98 27 10 12 71 30 1 14 47 47 88 17 68 78 46 +53 47 1 89 95 53 11 45 46 6 91 20 57 35 58 79 60 3 21 45 4 18 59 96 36 12 13 83 52 46 +33 91 82 24 97 28 50 43 65 22 14 44 32 57 33 10 34 77 58 6 27 90 26 77 62 81 87 96 0 32 +96 44 59 3 47 18 0 91 83 68 48 26 67 82 39 18 88 47 80 0 57 40 30 7 57 74 49 37 57 65 +18 44 0 46 47 30 65 79 53 8 26 42 80 76 30 61 82 93 78 25 89 49 55 15 86 63 35 74 41 11 +18 14 40 90 91 79 80 36 33 72 25 56 73 28 65 27 62 17 60 84 23 70 32 26 77 97 47 94 72 1 +82 36 68 10 83 83 40 42 51 55 82 6 37 69 93 82 64 13 54 30 45 36 87 59 1 80 39 93 11 61 +78 34 53 39 64 52 52 22 33 69 71 82 57 37 78 52 62 31 87 68 70 5 85 94 41 75 38 45 84 22 +36 23 51 15 61 76 88 85 36 96 21 60 34 61 72 60 69 81 5 17 16 82 30 61 39 96 40 70 42 71 +45 30 60 50 78 90 36 40 11 85 42 14 61 3 66 53 68 14 41 30 97 74 79 91 64 8 1 53 52 33 +55 24 35 4 49 51 44 70 93 78 25 65 1 29 96 12 93 94 13 65 4 47 84 10 90 12 36 48 21 36 +17 74 61 54 21 83 35 97 47 90 57 11 16 39 95 78 23 40 23 55 17 51 20 73 98 93 50 32 58 4 +84 76 78 33 50 29 11 20 5 93 63 22 91 92 44 85 62 25 63 92 36 26 57 33 8 74 69 64 78 91 +58 34 91 71 37 84 28 90 28 37 97 7 26 44 59 18 58 64 31 83 16 17 50 36 65 81 19 63 66 64 +20 71 1 35 87 5 47 27 6 95 86 75 74 9 94 93 26 5 61 3 97 88 0 57 21 64 46 24 86 12 +23 53 31 39 37 77 29 51 85 10 41 91 67 82 50 91 53 72 75 81 50 63 52 92 83 49 92 50 26 9 +38 43 13 87 11 45 28 16 27 61 70 52 77 9 57 42 73 22 32 95 23 91 93 63 16 44 26 9 93 83 +77 68 21 96 44 45 9 2 14 2 67 90 55 82 67 21 18 64 31 16 2 27 86 42 34 72 22 98 91 33 +89 66 87 76 0 32 81 39 55 76 23 56 51 53 75 79 30 86 1 66 64 14 46 84 92 19 95 47 77 97 +88 79 61 26 66 92 54 22 15 25 26 0 76 27 17 59 48 4 42 61 65 91 0 62 55 79 29 88 10 11 +24 89 91 39 56 36 16 86 41 31 14 35 7 71 77 74 33 11 49 7 96 83 31 63 90 49 96 22 58 86 +45 7 93 44 50 54 83 80 3 36 11 38 14 17 10 84 96 94 26 34 26 75 72 0 41 89 96 47 39 88 +0 95 2 22 68 38 0 3 51 6 13 10 14 49 75 69 25 39 63 67 12 80 37 77 10 90 60 35 84 37 +98 56 99 75 49 66 3 33 65 86 1 79 91 23 69 98 91 73 95 45 64 26 99 75 49 77 71 55 42 18 +80 39 26 94 85 42 91 27 14 57 36 34 10 44 38 77 23 39 54 25 32 5 17 9 66 3 67 94 20 11 +88 80 30 77 72 67 16 75 84 87 60 89 21 94 24 11 63 8 79 89 37 18 6 82 76 70 81 95 67 95 +92 36 55 55 43 18 76 94 30 74 95 38 45 95 54 87 22 57 4 65 15 90 90 38 73 24 67 24 36 25 +98 30 34 68 11 48 42 38 80 23 12 91 77 22 65 2 88 31 70 12 46 63 17 63 27 76 21 71 70 7 +76 29 56 12 41 66 22 96 8 6 7 13 27 10 77 90 2 76 30 24 81 88 19 16 93 13 30 24 98 96 +45 94 89 41 52 14 71 88 80 74 7 85 44 69 65 88 4 15 84 97 86 5 53 15 39 34 9 10 45 20 +95 47 45 96 71 10 36 10 90 49 7 68 14 46 97 89 82 58 69 34 93 77 90 9 27 91 29 27 22 17 +80 6 29 26 34 59 10 55 32 53 18 72 39 40 29 35 52 64 2 64 38 83 16 46 53 20 19 8 10 67 +47 44 79 32 58 82 26 69 0 26 4 73 95 98 61 96 20 38 3 92 6 5 25 24 42 49 15 92 80 16 +74 37 86 84 47 15 56 36 43 59 72 72 74 73 49 54 26 5 40 80 78 48 4 65 31 70 14 91 88 72 +91 45 73 62 83 40 49 3 27 79 80 90 3 3 58 44 7 66 77 42 37 25 20 91 47 63 71 7 72 22 +51 3 36 90 45 84 18 55 75 78 42 62 86 63 65 67 46 75 1 79 2 85 85 60 36 92 34 89 66 99 +36 99 0 63 89 65 54 58 52 28 98 27 67 1 45 71 35 52 55 55 44 23 46 89 83 37 8 2 92 75 +51 13 71 2 9 95 23 60 24 98 86 43 32 16 75 70 92 78 26 84 29 14 35 55 61 89 73 59 76 44 +59 57 28 92 33 50 70 94 89 67 70 38 53 16 35 70 35 92 39 78 88 80 71 1 93 21 87 64 49 84 +29 6 17 45 38 65 41 48 81 69 34 12 2 14 41 71 16 92 69 27 61 74 58 20 75 19 39 66 57 82 +12 8 14 85 97 31 58 31 20 76 6 42 29 95 60 94 15 84 86 69 73 52 73 57 12 66 89 65 60 84 +20 74 96 34 83 41 8 37 22 36 30 25 20 8 58 73 9 75 76 73 84 38 16 24 95 95 68 66 43 19 +33 15 25 80 48 69 63 39 16 45 6 77 14 46 38 15 64 85 49 5 59 28 9 4 23 68 59 26 1 75 +35 45 3 6 34 59 55 51 81 59 59 93 18 41 8 44 88 7 86 4 88 90 24 54 73 62 89 13 44 92 +72 60 68 83 39 32 30 15 98 92 69 94 51 48 9 0 4 1 30 92 40 1 61 82 66 4 39 10 93 87 +12 20 34 72 33 31 67 71 67 47 98 76 53 29 17 17 13 31 43 76 25 37 8 39 9 5 96 41 87 66 +96 30 2 57 57 10 14 17 86 76 35 94 42 54 18 24 19 34 12 42 18 11 83 65 86 38 45 17 60 70 +19 62 71 99 35 60 96 30 44 80 78 15 14 5 32 43 10 26 81 72 41 98 30 87 75 8 53 33 25 95 +22 0 38 57 88 7 47 83 49 41 52 1 14 93 41 3 18 42 15 57 28 74 97 2 18 48 64 25 77 69 +36 95 65 81 44 41 6 74 62 16 72 81 15 72 31 5 22 17 19 6 7 15 82 10 31 93 11 45 41 11 +22 76 14 62 34 65 82 5 57 51 51 5 1 6 17 43 28 31 90 99 48 14 96 49 95 40 87 85 40 51 +95 13 99 46 52 80 4 18 95 94 0 46 10 80 3 34 60 15 86 10 28 59 6 35 14 93 18 8 3 65 +57 37 6 31 45 85 42 34 47 92 48 40 7 17 5 74 67 62 0 74 58 21 23 3 5 24 50 54 99 19 +24 14 10 4 36 33 88 51 40 66 40 56 65 23 43 13 82 62 27 88 89 91 36 37 19 11 50 39 96 68 +82 7 39 80 52 90 57 17 61 15 51 71 82 15 21 44 4 46 75 50 78 18 63 75 98 45 6 16 57 25 +0 26 56 74 62 84 71 42 25 86 68 10 73 0 71 6 15 99 1 51 45 42 5 49 3 35 84 29 15 36 +60 78 76 3 95 73 36 57 35 44 50 42 85 57 18 69 37 42 75 79 15 12 74 72 51 36 79 3 58 71 +69 24 16 96 17 25 21 94 71 78 74 39 7 96 3 12 13 16 7 99 65 72 12 28 75 44 55 8 75 67 +3 13 92 9 92 83 69 91 65 92 29 63 46 1 4 62 29 85 47 93 81 3 15 23 63 50 17 9 13 13 +9 18 46 53 0 86 10 41 87 89 24 25 70 73 8 23 27 76 66 46 58 39 28 1 99 64 59 13 7 68 +72 57 90 50 47 57 34 27 94 39 23 31 74 77 45 74 18 49 96 8 95 50 20 81 73 55 72 2 32 15 +87 77 74 5 99 86 5 65 97 39 17 74 48 87 20 66 28 2 18 58 49 22 79 23 36 30 64 20 71 32 +35 43 66 96 63 77 18 90 47 86 94 19 88 79 23 12 38 4 56 42 36 2 77 1 3 17 64 52 31 24 +80 2 4 39 61 60 74 83 28 28 61 10 71 82 44 29 55 30 1 58 81 79 34 41 85 82 84 55 22 12 +76 77 58 92 90 0 54 28 77 68 58 12 1 81 37 28 19 60 71 59 25 83 8 49 52 11 28 65 59 70 +14 1 92 90 5 48 28 78 1 42 54 43 60 83 72 19 28 33 12 52 18 15 56 95 39 33 37 70 53 23 +53 76 26 31 18 81 83 79 25 1 82 43 50 24 63 49 5 23 66 37 80 41 63 77 2 28 15 21 32 93 +80 41 81 7 37 95 19 42 57 30 12 25 29 34 41 45 87 8 20 95 63 16 99 55 16 61 16 36 81 25 +32 30 2 81 23 25 88 30 37 76 52 77 79 58 21 58 10 0 13 32 72 80 3 75 75 25 21 9 79 18 +26 13 36 63 43 2 50 41 65 18 88 44 82 75 73 24 1 30 54 68 15 18 22 50 41 99 27 96 51 53 +22 4 76 11 85 88 28 75 1 2 92 66 63 3 58 43 53 5 1 24 99 90 87 87 41 1 85 37 98 92 +16 39 13 88 60 55 35 11 34 23 23 85 79 41 79 87 65 78 47 83 88 78 35 84 30 61 37 58 25 55 +27 33 15 76 82 79 73 92 93 78 18 38 22 96 63 92 41 9 50 96 14 55 8 60 15 61 97 56 43 22 +42 34 94 11 35 70 50 49 36 34 59 14 87 84 88 83 4 69 29 99 35 24 2 18 97 97 74 88 91 49 +33 25 71 12 60 2 48 22 81 33 27 95 54 25 53 14 20 43 26 96 98 37 64 27 72 33 78 45 22 61 +61 21 91 38 92 47 26 90 78 96 58 41 21 72 81 61 55 9 55 60 28 25 25 74 73 81 64 16 49 39 +90 89 12 93 91 23 82 36 63 58 73 81 49 32 60 39 4 84 73 16 18 26 58 85 46 28 82 91 72 7 +79 41 28 76 33 70 47 6 18 64 40 54 45 61 28 63 87 83 38 9 65 68 62 45 80 63 89 29 20 40 +20 59 58 23 61 79 35 19 78 2 26 48 90 34 69 31 31 42 92 33 18 74 28 47 45 52 36 89 19 40 +58 13 72 24 31 26 73 72 84 29 85 99 20 32 54 92 8 80 86 58 23 80 59 21 76 75 90 76 92 57 +74 53 80 51 8 88 84 63 82 99 97 77 38 9 51 61 37 20 68 47 65 21 53 82 85 96 62 65 35 4 +71 82 14 18 88 79 38 76 66 27 10 10 62 54 80 21 6 57 83 33 52 10 97 37 6 38 12 51 0 84 +95 30 75 92 84 30 55 57 32 44 53 24 77 81 34 84 69 85 91 33 50 72 62 79 62 12 59 75 99 81 +38 42 47 1 11 34 27 77 70 85 89 84 79 15 14 54 78 93 72 68 63 39 98 72 55 32 93 0 13 21 +3 15 10 15 3 31 84 89 53 5 60 41 66 77 45 12 68 68 50 68 99 64 46 54 30 56 2 90 99 78 +66 10 27 89 42 16 9 98 16 2 68 51 0 22 73 60 69 96 37 69 30 36 20 21 51 26 65 13 74 86 +94 58 34 97 77 88 90 75 47 30 6 36 89 66 48 9 20 6 52 45 0 37 99 46 11 53 53 72 94 40 +5 71 50 96 89 71 80 43 27 95 49 9 74 28 62 65 64 97 2 55 58 11 69 0 31 22 73 20 66 11 +63 39 84 62 64 5 56 92 26 86 19 20 56 85 42 48 56 51 54 29 26 95 72 38 70 61 16 54 57 19 +76 97 40 99 73 68 98 92 97 62 73 1 29 72 18 70 90 4 98 95 70 36 65 45 86 36 88 38 64 54 diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/selfdual-4d-polytope.txt b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/selfdual-4d-polytope.txt new file mode 100644 index 0000000..47ce4a7 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/data/selfdual-4d-polytope.txt @@ -0,0 +1,27 @@ +# The facets of a self-dual 4-dim regular polytope +# with 24 octahedron facets. Taken from cddlib. +# Format b + Ax >= 0 + 1 1 1 1 1 + 1 1 1 1 -1 + 1 1 1 -1 1 + 1 1 1 -1 -1 + 1 1 -1 1 1 + 1 1 -1 1 -1 + 1 1 -1 -1 1 + 1 1 -1 -1 -1 + 1 -1 1 1 1 + 1 -1 1 1 -1 + 1 -1 1 -1 1 + 1 -1 1 -1 -1 + 1 -1 -1 1 1 + 1 -1 -1 1 -1 + 1 -1 -1 -1 1 + 1 -1 -1 -1 -1 + 1 2 0 0 0 + 1 0 2 0 0 + 1 0 0 2 0 + 1 0 0 0 2 + 1 -2 0 0 0 + 1 0 -2 0 0 + 1 0 0 -2 0 + 1 0 0 0 -2 diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/test__plotutils.py b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/test__plotutils.py new file mode 100644 index 0000000..b2486f7 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/test__plotutils.py @@ -0,0 +1,60 @@ +from __future__ import division, print_function, absolute_import + +import pytest +from numpy.testing import assert_, assert_array_equal +from scipy._lib._numpy_compat import suppress_warnings + +try: + import matplotlib + matplotlib.rcParams['backend'] = 'Agg' + import matplotlib.pyplot as plt + from matplotlib.collections import LineCollection + from matplotlib import MatplotlibDeprecationWarning + has_matplotlib = True +except Exception: + has_matplotlib = False + +from scipy.spatial import \ + delaunay_plot_2d, voronoi_plot_2d, convex_hull_plot_2d, \ + Delaunay, Voronoi, ConvexHull + + +@pytest.mark.skipif(not has_matplotlib, reason="Matplotlib not available") +class TestPlotting: + points = [(0,0), (0,1), (1,0), (1,1)] + + def test_delaunay(self): + # Smoke test + fig = plt.figure() + obj = Delaunay(self.points) + s_before = obj.simplices.copy() + with suppress_warnings() as sup: + # filter can be removed when matplotlib 1.x is dropped + sup.filter(message="The ishold function was deprecated in version") + r = delaunay_plot_2d(obj, ax=fig.gca()) + assert_array_equal(obj.simplices, s_before) # shouldn't modify + assert_(r is fig) + delaunay_plot_2d(obj, ax=fig.gca()) + + def test_voronoi(self): + # Smoke test + fig = plt.figure() + obj = Voronoi(self.points) + with suppress_warnings() as sup: + # filter can be removed when matplotlib 1.x is dropped + sup.filter(message="The ishold function was deprecated in version") + r = voronoi_plot_2d(obj, ax=fig.gca()) + assert_(r is fig) + voronoi_plot_2d(obj) + voronoi_plot_2d(obj, show_vertices=False) + + def test_convex_hull(self): + # Smoke test + fig = plt.figure() + tri = ConvexHull(self.points) + with suppress_warnings() as sup: + # filter can be removed when matplotlib 1.x is dropped + sup.filter(message="The ishold function was deprecated in version") + r = convex_hull_plot_2d(tri, ax=fig.gca()) + assert_(r is fig) + convex_hull_plot_2d(tri) diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/test__plotutils.pyc b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/test__plotutils.pyc new file mode 100644 index 0000000..777112b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/test__plotutils.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/test__procrustes.py b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/test__procrustes.py new file mode 100644 index 0000000..2d67546 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/test__procrustes.py @@ -0,0 +1,118 @@ +from __future__ import absolute_import, division, print_function + +import numpy as np +from numpy.testing import assert_allclose, assert_equal, assert_almost_equal +from pytest import raises as assert_raises + +from scipy.spatial import procrustes + + +class TestProcrustes(object): + def setup_method(self): + """creates inputs""" + # an L + self.data1 = np.array([[1, 3], [1, 2], [1, 1], [2, 1]], 'd') + + # a larger, shifted, mirrored L + self.data2 = np.array([[4, -2], [4, -4], [4, -6], [2, -6]], 'd') + + # an L shifted up 1, right 1, and with point 4 shifted an extra .5 + # to the right + # pointwise distance disparity with data1: 3*(2) + (1 + 1.5^2) + self.data3 = np.array([[2, 4], [2, 3], [2, 2], [3, 2.5]], 'd') + + # data4, data5 are standardized (trace(A*A') = 1). + # procrustes should return an identical copy if they are used + # as the first matrix argument. + shiftangle = np.pi / 8 + self.data4 = np.array([[1, 0], [0, 1], [-1, 0], + [0, -1]], 'd') / np.sqrt(4) + self.data5 = np.array([[np.cos(shiftangle), np.sin(shiftangle)], + [np.cos(np.pi / 2 - shiftangle), + np.sin(np.pi / 2 - shiftangle)], + [-np.cos(shiftangle), + -np.sin(shiftangle)], + [-np.cos(np.pi / 2 - shiftangle), + -np.sin(np.pi / 2 - shiftangle)]], + 'd') / np.sqrt(4) + + def test_procrustes(self): + # tests procrustes' ability to match two matrices. + # + # the second matrix is a rotated, shifted, scaled, and mirrored version + # of the first, in two dimensions only + # + # can shift, mirror, and scale an 'L'? + a, b, disparity = procrustes(self.data1, self.data2) + assert_allclose(b, a) + assert_almost_equal(disparity, 0.) + + # if first mtx is standardized, leaves first mtx unchanged? + m4, m5, disp45 = procrustes(self.data4, self.data5) + assert_equal(m4, self.data4) + + # at worst, data3 is an 'L' with one point off by .5 + m1, m3, disp13 = procrustes(self.data1, self.data3) + #assert_(disp13 < 0.5 ** 2) + + def test_procrustes2(self): + # procrustes disparity should not depend on order of matrices + m1, m3, disp13 = procrustes(self.data1, self.data3) + m3_2, m1_2, disp31 = procrustes(self.data3, self.data1) + assert_almost_equal(disp13, disp31) + + # try with 3d, 8 pts per + rand1 = np.array([[2.61955202, 0.30522265, 0.55515826], + [0.41124708, -0.03966978, -0.31854548], + [0.91910318, 1.39451809, -0.15295084], + [2.00452023, 0.50150048, 0.29485268], + [0.09453595, 0.67528885, 0.03283872], + [0.07015232, 2.18892599, -1.67266852], + [0.65029688, 1.60551637, 0.80013549], + [-0.6607528, 0.53644208, 0.17033891]]) + + rand3 = np.array([[0.0809969, 0.09731461, -0.173442], + [-1.84888465, -0.92589646, -1.29335743], + [0.67031855, -1.35957463, 0.41938621], + [0.73967209, -0.20230757, 0.52418027], + [0.17752796, 0.09065607, 0.29827466], + [0.47999368, -0.88455717, -0.57547934], + [-0.11486344, -0.12608506, -0.3395779], + [-0.86106154, -0.28687488, 0.9644429]]) + res1, res3, disp13 = procrustes(rand1, rand3) + res3_2, res1_2, disp31 = procrustes(rand3, rand1) + assert_almost_equal(disp13, disp31) + + def test_procrustes_shape_mismatch(self): + assert_raises(ValueError, procrustes, + np.array([[1, 2], [3, 4]]), + np.array([[5, 6, 7], [8, 9, 10]])) + + def test_procrustes_empty_rows_or_cols(self): + empty = np.array([[]]) + assert_raises(ValueError, procrustes, empty, empty) + + def test_procrustes_no_variation(self): + assert_raises(ValueError, procrustes, + np.array([[42, 42], [42, 42]]), + np.array([[45, 45], [45, 45]])) + + def test_procrustes_bad_number_of_dimensions(self): + # fewer dimensions in one dataset + assert_raises(ValueError, procrustes, + np.array([1, 1, 2, 3, 5, 8]), + np.array([[1, 2], [3, 4]])) + + # fewer dimensions in both datasets + assert_raises(ValueError, procrustes, + np.array([1, 1, 2, 3, 5, 8]), + np.array([1, 1, 2, 3, 5, 8])) + + # zero dimensions + assert_raises(ValueError, procrustes, np.array(7), np.array(11)) + + # extra dimensions + assert_raises(ValueError, procrustes, + np.array([[[11], [7]]]), + np.array([[[5, 13]]])) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/test__procrustes.pyc b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/test__procrustes.pyc new file mode 100644 index 0000000..4eb68ef Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/test__procrustes.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/test_distance.py b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/test_distance.py new file mode 100644 index 0000000..56911d0 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/test_distance.py @@ -0,0 +1,2092 @@ +# +# Author: Damian Eads +# Date: April 17, 2008 +# +# Copyright (C) 2008 Damian Eads +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# +# 3. The name of the author may not be used to endorse or promote +# products derived from this software without specific prior +# written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS +# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from __future__ import division, print_function, absolute_import + +import os.path + +from functools import wraps, partial +from scipy._lib.six import xrange, u + +import numpy as np +import warnings +from numpy.linalg import norm +from numpy.testing import (verbose, assert_, + assert_array_equal, assert_equal, + assert_almost_equal, assert_allclose) +import pytest +from pytest import raises as assert_raises + +from scipy._lib._numpy_compat import suppress_warnings +from scipy.spatial.distance import (squareform, pdist, cdist, num_obs_y, + num_obs_dm, is_valid_dm, is_valid_y, + _validate_vector, _METRICS_NAMES) + +# these were missing: chebyshev cityblock kulsinski +from scipy.spatial.distance import (braycurtis, canberra, chebyshev, cityblock, + correlation, cosine, dice, euclidean, + hamming, jaccard, jensenshannon, + kulsinski, mahalanobis, matching, + minkowski, rogerstanimoto, russellrao, + seuclidean, sokalmichener, sokalsneath, + sqeuclidean, yule) +from scipy.spatial.distance import wminkowski as old_wminkowski + +_filenames = [ + "cdist-X1.txt", + "cdist-X2.txt", + "iris.txt", + "pdist-boolean-inp.txt", + "pdist-chebyshev-ml-iris.txt", + "pdist-chebyshev-ml.txt", + "pdist-cityblock-ml-iris.txt", + "pdist-cityblock-ml.txt", + "pdist-correlation-ml-iris.txt", + "pdist-correlation-ml.txt", + "pdist-cosine-ml-iris.txt", + "pdist-cosine-ml.txt", + "pdist-double-inp.txt", + "pdist-euclidean-ml-iris.txt", + "pdist-euclidean-ml.txt", + "pdist-hamming-ml.txt", + "pdist-jaccard-ml.txt", + "pdist-jensenshannon-ml-iris.txt", + "pdist-jensenshannon-ml.txt", + "pdist-minkowski-3.2-ml-iris.txt", + "pdist-minkowski-3.2-ml.txt", + "pdist-minkowski-5.8-ml-iris.txt", + "pdist-seuclidean-ml-iris.txt", + "pdist-seuclidean-ml.txt", + "pdist-spearman-ml.txt", + "random-bool-data.txt", + "random-double-data.txt", + "random-int-data.txt", + "random-uint-data.txt", + ] + +_tdist = np.array([[0, 662, 877, 255, 412, 996], + [662, 0, 295, 468, 268, 400], + [877, 295, 0, 754, 564, 138], + [255, 468, 754, 0, 219, 869], + [412, 268, 564, 219, 0, 669], + [996, 400, 138, 869, 669, 0]], dtype='double') + +_ytdist = squareform(_tdist) + +# A hashmap of expected output arrays for the tests. These arrays +# come from a list of text files, which are read prior to testing. +# Each test loads inputs and outputs from this dictionary. +eo = {} + + +def load_testing_files(): + for fn in _filenames: + name = fn.replace(".txt", "").replace("-ml", "") + fqfn = os.path.join(os.path.dirname(__file__), 'data', fn) + fp = open(fqfn) + eo[name] = np.loadtxt(fp) + fp.close() + eo['pdist-boolean-inp'] = np.bool_(eo['pdist-boolean-inp']) + eo['random-bool-data'] = np.bool_(eo['random-bool-data']) + eo['random-float32-data'] = np.float32(eo['random-double-data']) + eo['random-int-data'] = np.int_(eo['random-int-data']) + eo['random-uint-data'] = np.uint(eo['random-uint-data']) + + +load_testing_files() + + +def _chk_asarrays(arrays, axis=None): + arrays = [np.asanyarray(a) for a in arrays] + if axis is None: + # np < 1.10 ravel removes subclass from arrays + arrays = [np.ravel(a) if a.ndim != 1 else a + for a in arrays] + axis = 0 + arrays = tuple(np.atleast_1d(a) for a in arrays) + if axis < 0: + if not all(a.ndim == arrays[0].ndim for a in arrays): + raise ValueError("array ndim must be the same for neg axis") + axis = range(arrays[0].ndim)[axis] + return arrays + (axis,) + + +def _chk_weights(arrays, weights=None, axis=None, + force_weights=False, simplify_weights=True, + pos_only=False, neg_check=False, + nan_screen=False, mask_screen=False, + ddof=None): + chked = _chk_asarrays(arrays, axis=axis) + arrays, axis = chked[:-1], chked[-1] + + simplify_weights = simplify_weights and not force_weights + if not force_weights and mask_screen: + force_weights = any(np.ma.getmask(a) is not np.ma.nomask for a in arrays) + + if nan_screen: + has_nans = [np.isnan(np.sum(a)) for a in arrays] + if any(has_nans): + mask_screen = True + force_weights = True + arrays = tuple(np.ma.masked_invalid(a) if has_nan else a + for a, has_nan in zip(arrays, has_nans)) + + if weights is not None: + weights = np.asanyarray(weights) + elif force_weights: + weights = np.ones(arrays[0].shape[axis]) + else: + return arrays + (weights, axis) + + if ddof: + weights = _freq_weights(weights) + + if mask_screen: + weights = _weight_masked(arrays, weights, axis) + + if not all(weights.shape == (a.shape[axis],) for a in arrays): + raise ValueError("weights shape must match arrays along axis") + if neg_check and (weights < 0).any(): + raise ValueError("weights cannot be negative") + + if pos_only: + pos_weights = np.nonzero(weights > 0)[0] + if pos_weights.size < weights.size: + arrays = tuple(np.take(a, pos_weights, axis=axis) for a in arrays) + weights = weights[pos_weights] + if simplify_weights and (weights == 1).all(): + weights = None + return arrays + (weights, axis) + + +def _freq_weights(weights): + if weights is None: + return weights + int_weights = weights.astype(int) + if (weights != int_weights).any(): + raise ValueError("frequency (integer count-type) weights required %s" % weights) + return int_weights + + +def _weight_masked(arrays, weights, axis): + if axis is None: + axis = 0 + weights = np.asanyarray(weights) + for a in arrays: + axis_mask = np.ma.getmask(a) + if axis_mask is np.ma.nomask: + continue + if a.ndim > 1: + not_axes = tuple(i for i in range(a.ndim) if i != axis) + axis_mask = axis_mask.any(axis=not_axes) + weights *= 1 - axis_mask.astype(int) + return weights + + +def within_tol(a, b, tol): + return np.abs(a - b).max() < tol + + +def _assert_within_tol(a, b, atol=0, rtol=0, verbose_=False): + if verbose_: + print(np.abs(a - b).max()) + assert_allclose(a, b, rtol=rtol, atol=atol) + + +def _rand_split(arrays, weights, axis, split_per, seed=None): + # inverse operation for stats.collapse_weights + weights = np.array(weights, dtype=np.float64) # modified inplace; need a copy + seeded_rand = np.random.RandomState(seed) + + def mytake(a, ix, axis): + record = np.asanyarray(np.take(a, ix, axis=axis)) + return record.reshape([a.shape[i] if i != axis else 1 + for i in range(a.ndim)]) + + n_obs = arrays[0].shape[axis] + assert all(a.shape[axis] == n_obs for a in arrays), "data must be aligned on sample axis" + for i in range(int(split_per) * n_obs): + split_ix = seeded_rand.randint(n_obs + i) + prev_w = weights[split_ix] + q = seeded_rand.rand() + weights[split_ix] = q * prev_w + weights = np.append(weights, (1. - q) * prev_w) + arrays = [np.append(a, mytake(a, split_ix, axis=axis), + axis=axis) for a in arrays] + return arrays, weights + + +def _rough_check(a, b, compare_assert=partial(assert_allclose, atol=1e-5), + key=lambda x: x, w=None): + check_a = key(a) + check_b = key(b) + try: + if np.array(check_a != check_b).any(): # try strict equality for string types + compare_assert(check_a, check_b) + except AttributeError: # masked array + compare_assert(check_a, check_b) + except (TypeError, ValueError): # nested data structure + for a_i, b_i in zip(check_a, check_b): + _rough_check(a_i, b_i, compare_assert=compare_assert) + +# diff from test_stats: +# n_args=2, weight_arg='w', default_axis=None +# ma_safe = False, nan_safe = False +def _weight_checked(fn, n_args=2, default_axis=None, key=lambda x: x, weight_arg='w', + squeeze=True, silent=False, + ones_test=True, const_test=True, dup_test=True, + split_test=True, dud_test=True, ma_safe=False, ma_very_safe=False, nan_safe=False, + split_per=1.0, seed=0, compare_assert=partial(assert_allclose, atol=1e-5)): + """runs fn on its arguments 2 or 3 ways, checks that the results are the same, + then returns the same thing it would have returned before""" + @wraps(fn) + def wrapped(*args, **kwargs): + result = fn(*args, **kwargs) + + arrays = args[:n_args] + rest = args[n_args:] + weights = kwargs.get(weight_arg, None) + axis = kwargs.get('axis', default_axis) + + chked = _chk_weights(arrays, weights=weights, axis=axis, force_weights=True, mask_screen=True) + arrays, weights, axis = chked[:-2], chked[-2], chked[-1] + if squeeze: + arrays = [np.atleast_1d(a.squeeze()) for a in arrays] + + try: + # WEIGHTS CHECK 1: EQUAL WEIGHTED OBESERVATIONS + args = tuple(arrays) + rest + if ones_test: + kwargs[weight_arg] = weights + _rough_check(result, fn(*args, **kwargs), key=key) + if const_test: + kwargs[weight_arg] = weights * 101.0 + _rough_check(result, fn(*args, **kwargs), key=key) + kwargs[weight_arg] = weights * 0.101 + try: + _rough_check(result, fn(*args, **kwargs), key=key) + except Exception as e: + raise type(e)((e, arrays, weights)) + + # WEIGHTS CHECK 2: ADDL 0-WEIGHTED OBS + if dud_test: + # add randomly resampled rows, weighted at 0 + dud_arrays, dud_weights = _rand_split(arrays, weights, axis, split_per=split_per, seed=seed) + dud_weights[:weights.size] = weights # not exactly 1 because of masked arrays + dud_weights[weights.size:] = 0 + dud_args = tuple(dud_arrays) + rest + kwargs[weight_arg] = dud_weights + _rough_check(result, fn(*dud_args, **kwargs), key=key) + # increase the value of those 0-weighted rows + for a in dud_arrays: + indexer = [slice(None)] * a.ndim + indexer[axis] = slice(weights.size, None) + indexer = tuple(indexer) + a[indexer] = a[indexer] * 101 + dud_args = tuple(dud_arrays) + rest + _rough_check(result, fn(*dud_args, **kwargs), key=key) + # set those 0-weighted rows to NaNs + for a in dud_arrays: + indexer = [slice(None)] * a.ndim + indexer[axis] = slice(weights.size, None) + indexer = tuple(indexer) + a[indexer] = a[indexer] * np.nan + if kwargs.get("nan_policy", None) == "omit" and nan_safe: + dud_args = tuple(dud_arrays) + rest + _rough_check(result, fn(*dud_args, **kwargs), key=key) + # mask out those nan values + if ma_safe: + dud_arrays = [np.ma.masked_invalid(a) for a in dud_arrays] + dud_args = tuple(dud_arrays) + rest + _rough_check(result, fn(*dud_args, **kwargs), key=key) + if ma_very_safe: + kwargs[weight_arg] = None + _rough_check(result, fn(*dud_args, **kwargs), key=key) + del dud_arrays, dud_args, dud_weights + + # WEIGHTS CHECK 3: DUPLICATE DATA (DUMB SPLITTING) + if dup_test: + dup_arrays = [np.append(a, a, axis=axis) for a in arrays] + dup_weights = np.append(weights, weights) / 2.0 + dup_args = tuple(dup_arrays) + rest + kwargs[weight_arg] = dup_weights + _rough_check(result, fn(*dup_args, **kwargs), key=key) + del dup_args, dup_arrays, dup_weights + + # WEIGHT CHECK 3: RANDOM SPLITTING + if split_test and split_per > 0: + split_arrays, split_weights = _rand_split(arrays, weights, axis, split_per=split_per, seed=seed) + split_args = tuple(split_arrays) + rest + kwargs[weight_arg] = split_weights + _rough_check(result, fn(*split_args, **kwargs), key=key) + except NotImplementedError as e: + # when some combination of arguments makes weighting impossible, + # this is the desired response + if not silent: + warnings.warn("%s NotImplemented weights: %s" % (fn.__name__, e)) + return result + return wrapped + + +wcdist = _weight_checked(cdist, default_axis=1, squeeze=False) +wcdist_no_const = _weight_checked(cdist, default_axis=1, squeeze=False, const_test=False) +wpdist = _weight_checked(pdist, default_axis=1, squeeze=False, n_args=1) +wpdist_no_const = _weight_checked(pdist, default_axis=1, squeeze=False, const_test=False, n_args=1) +wrogerstanimoto = _weight_checked(rogerstanimoto) +wmatching = whamming = _weight_checked(hamming, dud_test=False) +wyule = _weight_checked(yule) +wdice = _weight_checked(dice) +wcityblock = _weight_checked(cityblock) +wchebyshev = _weight_checked(chebyshev) +wcosine = _weight_checked(cosine) +wcorrelation = _weight_checked(correlation) +wkulsinski = _weight_checked(kulsinski) +wminkowski = _weight_checked(minkowski, const_test=False) +wjaccard = _weight_checked(jaccard) +weuclidean = _weight_checked(euclidean, const_test=False) +wsqeuclidean = _weight_checked(sqeuclidean, const_test=False) +wbraycurtis = _weight_checked(braycurtis) +wcanberra = _weight_checked(canberra, const_test=False) +wsokalsneath = _weight_checked(sokalsneath) +wsokalmichener = _weight_checked(sokalmichener) +wrussellrao = _weight_checked(russellrao) + + +class TestCdist(object): + + def setup_method(self): + self.rnd_eo_names = ['random-float32-data', 'random-int-data', + 'random-uint-data', 'random-double-data', + 'random-bool-data'] + self.valid_upcasts = {'bool': [np.uint, np.int_, np.float32, np.double], + 'uint': [np.int_, np.float32, np.double], + 'int': [np.float32, np.double], + 'float32': [np.double]} + + def test_cdist_extra_args(self): + # Tests that args and kwargs are correctly handled + def _my_metric(x, y, arg, kwarg=1, kwarg2=2): + return arg + kwarg + kwarg2 + + X1 = [[1., 2., 3.], [1.2, 2.3, 3.4], [2.2, 2.3, 4.4]] + X2 = [[7., 5., 8.], [7.5, 5.8, 8.4], [5.5, 5.8, 4.4]] + kwargs = {'N0tV4l1D_p4raM': 3.14, "w":np.arange(3)} + args = [3.14] * 200 + with suppress_warnings() as w: + w.filter(DeprecationWarning) + for metric in _METRICS_NAMES: + assert_raises(TypeError, cdist, X1, X2, + metric=metric, **kwargs) + assert_raises(TypeError, cdist, X1, X2, + metric=eval(metric), **kwargs) + assert_raises(TypeError, cdist, X1, X2, + metric="test_" + metric, **kwargs) + assert_raises(TypeError, cdist, X1, X2, + metric=metric, *args) + assert_raises(TypeError, cdist, X1, X2, + metric=eval(metric), *args) + assert_raises(TypeError, cdist, X1, X2, + metric="test_" + metric, *args) + + assert_raises(TypeError, cdist, X1, X2, _my_metric) + assert_raises(TypeError, cdist, X1, X2, _my_metric, *args) + assert_raises(TypeError, cdist, X1, X2, _my_metric, **kwargs) + assert_raises(TypeError, cdist, X1, X2, _my_metric, + kwarg=2.2, kwarg2=3.3) + assert_raises(TypeError, cdist, X1, X2, _my_metric, 1, 2, kwarg=2.2) + + assert_raises(TypeError, cdist, X1, X2, _my_metric, 1.1, 2.2, 3.3) + assert_raises(TypeError, cdist, X1, X2, _my_metric, 1.1, 2.2) + assert_raises(TypeError, cdist, X1, X2, _my_metric, 1.1) + assert_raises(TypeError, cdist, X1, X2, _my_metric, 1.1, + kwarg=2.2, kwarg2=3.3) + + # this should work + assert_allclose(cdist(X1, X2, metric=_my_metric, + arg=1.1, kwarg2=3.3), 5.4) + + def test_cdist_euclidean_random_unicode(self): + eps = 1e-07 + X1 = eo['cdist-X1'] + X2 = eo['cdist-X2'] + Y1 = wcdist_no_const(X1, X2, u('euclidean')) + Y2 = wcdist_no_const(X1, X2, u('test_euclidean')) + _assert_within_tol(Y1, Y2, eps, verbose > 2) + + def test_cdist_minkowski_random_p3d8(self): + eps = 1e-07 + X1 = eo['cdist-X1'] + X2 = eo['cdist-X2'] + Y1 = wcdist_no_const(X1, X2, 'minkowski', p=3.8) + Y2 = wcdist_no_const(X1, X2, 'test_minkowski', p=3.8) + _assert_within_tol(Y1, Y2, eps, verbose > 2) + + def test_cdist_minkowski_random_p4d6(self): + eps = 1e-07 + X1 = eo['cdist-X1'] + X2 = eo['cdist-X2'] + Y1 = wcdist_no_const(X1, X2, 'minkowski', p=4.6) + Y2 = wcdist_no_const(X1, X2, 'test_minkowski', p=4.6) + _assert_within_tol(Y1, Y2, eps, verbose > 2) + + def test_cdist_minkowski_random_p1d23(self): + eps = 1e-07 + X1 = eo['cdist-X1'] + X2 = eo['cdist-X2'] + Y1 = wcdist_no_const(X1, X2, 'minkowski', p=1.23) + Y2 = wcdist_no_const(X1, X2, 'test_minkowski', p=1.23) + _assert_within_tol(Y1, Y2, eps, verbose > 2) + + def test_cdist_cosine_random(self): + eps = 1e-07 + X1 = eo['cdist-X1'] + X2 = eo['cdist-X2'] + Y1 = wcdist(X1, X2, 'cosine') + + # Naive implementation + def norms(X): + return np.linalg.norm(X, axis=1).reshape(-1, 1) + + Y2 = 1 - np.dot((X1 / norms(X1)), (X2 / norms(X2)).T) + + _assert_within_tol(Y1, Y2, eps, verbose > 2) + + def test_cdist_mahalanobis(self): + # 1-dimensional observations + x1 = np.array([[2], [3]]) + x2 = np.array([[2], [5]]) + dist = cdist(x1, x2, metric='mahalanobis') + assert_allclose(dist, [[0.0, np.sqrt(4.5)], [np.sqrt(0.5), np.sqrt(2)]]) + + # 2-dimensional observations + x1 = np.array([[0, 0], [-1, 0]]) + x2 = np.array([[0, 2], [1, 0], [0, -2]]) + dist = cdist(x1, x2, metric='mahalanobis') + rt2 = np.sqrt(2) + assert_allclose(dist, [[rt2, rt2, rt2], [2, 2 * rt2, 2]]) + + # Too few observations + assert_raises(ValueError, + cdist, [[0, 1]], [[2, 3]], metric='mahalanobis') + + def test_cdist_custom_notdouble(self): + class myclass(object): + pass + + def _my_metric(x, y): + if not isinstance(x[0], myclass) or not isinstance(y[0], myclass): + raise ValueError("Type has been changed") + return 1.123 + data = np.array([[myclass()]], dtype=object) + cdist_y = cdist(data, data, metric=_my_metric) + right_y = 1.123 + assert_equal(cdist_y, right_y, verbose=verbose > 2) + + def _check_calling_conventions(self, X1, X2, metric, eps=1e-07, **kwargs): + # helper function for test_cdist_calling_conventions + try: + y1 = cdist(X1, X2, metric=metric, **kwargs) + y2 = cdist(X1, X2, metric=eval(metric), **kwargs) + y3 = cdist(X1, X2, metric="test_" + metric, **kwargs) + except Exception as e: + e_cls = e.__class__ + if verbose > 2: + print(e_cls.__name__) + print(e) + assert_raises(e_cls, cdist, X1, X2, metric=metric, **kwargs) + assert_raises(e_cls, cdist, X1, X2, metric=eval(metric), **kwargs) + assert_raises(e_cls, cdist, X1, X2, metric="test_" + metric, **kwargs) + else: + _assert_within_tol(y1, y2, rtol=eps, verbose_=verbose > 2) + _assert_within_tol(y1, y3, rtol=eps, verbose_=verbose > 2) + + def test_cdist_calling_conventions(self): + # Ensures that specifying the metric with a str or scipy function + # gives the same behaviour (i.e. same result or same exception). + # NOTE: The correctness should be checked within each metric tests. + for eo_name in self.rnd_eo_names: + # subsampling input data to speed-up tests + # NOTE: num samples needs to be > than dimensions for mahalanobis + X1 = eo[eo_name][::5, ::-2] + X2 = eo[eo_name][1::5, ::2] + for metric in _METRICS_NAMES: + if verbose > 2: + print("testing: ", metric, " with: ", eo_name) + if metric == 'wminkowski': + continue + if metric in {'dice', 'yule', 'kulsinski', 'matching', + 'rogerstanimoto', 'russellrao', 'sokalmichener', + 'sokalsneath'} and 'bool' not in eo_name: + # python version permits non-bools e.g. for fuzzy logic + continue + self._check_calling_conventions(X1, X2, metric) + + # Testing built-in metrics with extra args + if metric == "seuclidean": + X12 = np.vstack([X1, X2]).astype(np.double) + V = np.var(X12, axis=0, ddof=1) + self._check_calling_conventions(X1, X2, metric, V=V) + elif metric == "mahalanobis": + X12 = np.vstack([X1, X2]).astype(np.double) + V = np.atleast_2d(np.cov(X12.T)) + VI = np.array(np.linalg.inv(V).T) + self._check_calling_conventions(X1, X2, metric, VI=VI) + + def test_cdist_dtype_equivalence(self): + # Tests that the result is not affected by type up-casting + eps = 1e-07 + tests = [(eo['random-bool-data'], self.valid_upcasts['bool']), + (eo['random-uint-data'], self.valid_upcasts['uint']), + (eo['random-int-data'], self.valid_upcasts['int']), + (eo['random-float32-data'], self.valid_upcasts['float32'])] + for metric in _METRICS_NAMES: + for test in tests: + X1 = test[0][::5, ::-2] + X2 = test[0][1::5, ::2] + try: + y1 = cdist(X1, X2, metric=metric) + except Exception as e: + e_cls = e.__class__ + if verbose > 2: + print(e_cls.__name__) + print(e) + for new_type in test[1]: + X1new = new_type(X1) + X2new = new_type(X2) + assert_raises(e_cls, cdist, X1new, X2new, metric=metric) + else: + for new_type in test[1]: + y2 = cdist(new_type(X1), new_type(X2), metric=metric) + _assert_within_tol(y1, y2, eps, verbose > 2) + + def test_cdist_out(self): + # Test that out parameter works properly + eps = 1e-07 + X1 = eo['cdist-X1'] + X2 = eo['cdist-X2'] + out_r, out_c = X1.shape[0], X2.shape[0] + for metric in _METRICS_NAMES: + kwargs = dict() + if metric in ['minkowski', 'wminkowski']: + kwargs['p'] = 1.23 + if metric == 'wminkowski': + kwargs['w'] = 1.0 / X1.std(axis=0) + out1 = np.empty((out_r, out_c), dtype=np.double) + Y1 = cdist(X1, X2, metric, **kwargs) + Y2 = cdist(X1, X2, metric, out=out1, **kwargs) + # test that output is numerically equivalent + _assert_within_tol(Y1, Y2, eps, verbose > 2) + # test that Y_test1 and out1 are the same object + assert_(Y2 is out1) + # test for incorrect shape + out2 = np.empty((out_r-1, out_c+1), dtype=np.double) + assert_raises(ValueError, cdist, X1, X2, metric, out=out2, **kwargs) + # test for C-contiguous order + out3 = np.empty((2 * out_r, 2 * out_c), dtype=np.double)[::2, ::2] + out4 = np.empty((out_r, out_c), dtype=np.double, order='F') + assert_raises(ValueError, cdist, X1, X2, metric, out=out3, **kwargs) + assert_raises(ValueError, cdist, X1, X2, metric, out=out4, **kwargs) + # test for incorrect dtype + out5 = np.empty((out_r, out_c), dtype=np.int64) + assert_raises(ValueError, cdist, X1, X2, metric, out=out5, **kwargs) + + def test_striding(self): + # test that striding is handled correct with calls to + # _copy_array_if_base_present + eps = 1e-07 + X1 = eo['cdist-X1'][::2, ::2] + X2 = eo['cdist-X2'][::2, ::2] + X1_copy = X1.copy() + X2_copy = X2.copy() + + # confirm equivalence + assert_equal(X1, X1_copy) + assert_equal(X2, X2_copy) + # confirm contiguity + assert_(not X1.flags.c_contiguous) + assert_(not X2.flags.c_contiguous) + assert_(X1_copy.flags.c_contiguous) + assert_(X2_copy.flags.c_contiguous) + + for metric in _METRICS_NAMES: + kwargs = dict() + if metric in ['minkowski', 'wminkowski']: + kwargs['p'] = 1.23 + if metric == 'wminkowski': + kwargs['w'] = 1.0 / X1.std(axis=0) + Y1 = cdist(X1, X2, metric, **kwargs) + Y2 = cdist(X1_copy, X2_copy, metric, **kwargs) + # test that output is numerically equivalent + _assert_within_tol(Y1, Y2, eps, verbose > 2) + +class TestPdist(object): + + def setup_method(self): + self.rnd_eo_names = ['random-float32-data', 'random-int-data', + 'random-uint-data', 'random-double-data', + 'random-bool-data'] + self.valid_upcasts = {'bool': [np.uint, np.int_, np.float32, np.double], + 'uint': [np.int_, np.float32, np.double], + 'int': [np.float32, np.double], + 'float32': [np.double]} + + def test_pdist_extra_args(self): + # Tests that args and kwargs are correctly handled + def _my_metric(x, y, arg, kwarg=1, kwarg2=2): + return arg + kwarg + kwarg2 + + X1 = [[1., 2.], [1.2, 2.3], [2.2, 2.3]] + kwargs = {'N0tV4l1D_p4raM': 3.14, "w":np.arange(2)} + args = [3.14] * 200 + with suppress_warnings() as w: + w.filter(DeprecationWarning) + for metric in _METRICS_NAMES: + assert_raises(TypeError, pdist, X1, metric=metric, **kwargs) + assert_raises(TypeError, pdist, X1, + metric=eval(metric), **kwargs) + assert_raises(TypeError, pdist, X1, + metric="test_" + metric, **kwargs) + assert_raises(TypeError, pdist, X1, metric=metric, *args) + assert_raises(TypeError, pdist, X1, metric=eval(metric), *args) + assert_raises(TypeError, pdist, X1, + metric="test_" + metric, *args) + + assert_raises(TypeError, pdist, X1, _my_metric) + assert_raises(TypeError, pdist, X1, _my_metric, *args) + assert_raises(TypeError, pdist, X1, _my_metric, **kwargs) + assert_raises(TypeError, pdist, X1, _my_metric, + kwarg=2.2, kwarg2=3.3) + assert_raises(TypeError, pdist, X1, _my_metric, 1, 2, kwarg=2.2) + + assert_raises(TypeError, pdist, X1, _my_metric, 1.1, 2.2, 3.3) + assert_raises(TypeError, pdist, X1, _my_metric, 1.1, 2.2) + assert_raises(TypeError, pdist, X1, _my_metric, 1.1) + assert_raises(TypeError, pdist, X1, _my_metric, 1.1, + kwarg=2.2, kwarg2=3.3) + + # these should work + assert_allclose(pdist(X1, metric=_my_metric, + arg=1.1, kwarg2=3.3), 5.4) + + def test_pdist_euclidean_random(self): + eps = 1e-07 + X = eo['pdist-double-inp'] + Y_right = eo['pdist-euclidean'] + Y_test1 = wpdist_no_const(X, 'euclidean') + _assert_within_tol(Y_test1, Y_right, eps) + + def test_pdist_euclidean_random_u(self): + eps = 1e-07 + X = eo['pdist-double-inp'] + Y_right = eo['pdist-euclidean'] + Y_test1 = wpdist_no_const(X, u('euclidean')) + _assert_within_tol(Y_test1, Y_right, eps) + + def test_pdist_euclidean_random_float32(self): + eps = 1e-07 + X = np.float32(eo['pdist-double-inp']) + Y_right = eo['pdist-euclidean'] + Y_test1 = wpdist_no_const(X, 'euclidean') + _assert_within_tol(Y_test1, Y_right, eps) + + def test_pdist_euclidean_random_nonC(self): + eps = 1e-07 + X = eo['pdist-double-inp'] + Y_right = eo['pdist-euclidean'] + Y_test2 = wpdist_no_const(X, 'test_euclidean') + _assert_within_tol(Y_test2, Y_right, eps) + + @pytest.mark.slow + def test_pdist_euclidean_iris_double(self): + eps = 1e-07 + X = eo['iris'] + Y_right = eo['pdist-euclidean-iris'] + Y_test1 = wpdist_no_const(X, 'euclidean') + _assert_within_tol(Y_test1, Y_right, eps) + + @pytest.mark.slow + def test_pdist_euclidean_iris_float32(self): + eps = 1e-06 + X = np.float32(eo['iris']) + Y_right = eo['pdist-euclidean-iris'] + Y_test1 = wpdist_no_const(X, 'euclidean') + _assert_within_tol(Y_test1, Y_right, eps, verbose > 2) + + @pytest.mark.slow + def test_pdist_euclidean_iris_nonC(self): + # Test pdist(X, 'test_euclidean') [the non-C implementation] on the + # Iris data set. + eps = 1e-07 + X = eo['iris'] + Y_right = eo['pdist-euclidean-iris'] + Y_test2 = wpdist_no_const(X, 'test_euclidean') + _assert_within_tol(Y_test2, Y_right, eps) + + def test_pdist_seuclidean_random(self): + eps = 1e-05 + X = eo['pdist-double-inp'] + Y_right = eo['pdist-seuclidean'] + Y_test1 = pdist(X, 'seuclidean') + _assert_within_tol(Y_test1, Y_right, eps) + + def test_pdist_seuclidean_random_float32(self): + eps = 1e-05 + X = np.float32(eo['pdist-double-inp']) + Y_right = eo['pdist-seuclidean'] + Y_test1 = pdist(X, 'seuclidean') + _assert_within_tol(Y_test1, Y_right, eps) + + def test_pdist_seuclidean_random_nonC(self): + # Test pdist(X, 'test_sqeuclidean') [the non-C implementation] + eps = 1e-05 + X = eo['pdist-double-inp'] + Y_right = eo['pdist-seuclidean'] + Y_test2 = pdist(X, 'test_seuclidean') + _assert_within_tol(Y_test2, Y_right, eps) + + def test_pdist_seuclidean_iris(self): + eps = 1e-05 + X = eo['iris'] + Y_right = eo['pdist-seuclidean-iris'] + Y_test1 = pdist(X, 'seuclidean') + _assert_within_tol(Y_test1, Y_right, eps) + + def test_pdist_seuclidean_iris_float32(self): + # Tests pdist(X, 'seuclidean') on the Iris data set (float32). + eps = 1e-05 + X = np.float32(eo['iris']) + Y_right = eo['pdist-seuclidean-iris'] + Y_test1 = pdist(X, 'seuclidean') + _assert_within_tol(Y_test1, Y_right, eps) + + def test_pdist_seuclidean_iris_nonC(self): + # Test pdist(X, 'test_seuclidean') [the non-C implementation] on the + # Iris data set. + eps = 1e-05 + X = eo['iris'] + Y_right = eo['pdist-seuclidean-iris'] + Y_test2 = pdist(X, 'test_seuclidean') + _assert_within_tol(Y_test2, Y_right, eps) + + def test_pdist_cosine_random(self): + eps = 1e-08 + X = eo['pdist-double-inp'] + Y_right = eo['pdist-cosine'] + Y_test1 = wpdist(X, 'cosine') + _assert_within_tol(Y_test1, Y_right, eps) + + def test_pdist_cosine_random_float32(self): + eps = 1e-08 + X = np.float32(eo['pdist-double-inp']) + Y_right = eo['pdist-cosine'] + Y_test1 = wpdist(X, 'cosine') + _assert_within_tol(Y_test1, Y_right, eps) + + def test_pdist_cosine_random_nonC(self): + # Test pdist(X, 'test_cosine') [the non-C implementation] + eps = 1e-08 + X = eo['pdist-double-inp'] + Y_right = eo['pdist-cosine'] + Y_test2 = wpdist(X, 'test_cosine') + _assert_within_tol(Y_test2, Y_right, eps) + + @pytest.mark.slow + def test_pdist_cosine_iris(self): + eps = 1e-08 + X = eo['iris'] + Y_right = eo['pdist-cosine-iris'] + Y_test1 = wpdist(X, 'cosine') + _assert_within_tol(Y_test1, Y_right, eps) + + @pytest.mark.slow + def test_pdist_cosine_iris_float32(self): + eps = 1e-07 + X = np.float32(eo['iris']) + Y_right = eo['pdist-cosine-iris'] + Y_test1 = wpdist(X, 'cosine') + _assert_within_tol(Y_test1, Y_right, eps, verbose > 2) + + @pytest.mark.slow + def test_pdist_cosine_iris_nonC(self): + eps = 1e-08 + X = eo['iris'] + Y_right = eo['pdist-cosine-iris'] + Y_test2 = wpdist(X, 'test_cosine') + _assert_within_tol(Y_test2, Y_right, eps) + + def test_pdist_cosine_bounds(self): + # Test adapted from @joernhees's example at gh-5208: case where + # cosine distance used to be negative. XXX: very sensitive to the + # specific norm computation. + x = np.abs(np.random.RandomState(1337).rand(91)) + X = np.vstack([x, x]) + assert_(wpdist(X, 'cosine')[0] >= 0, + msg='cosine distance should be non-negative') + + def test_pdist_cityblock_random(self): + eps = 1e-06 + X = eo['pdist-double-inp'] + Y_right = eo['pdist-cityblock'] + Y_test1 = wpdist_no_const(X, 'cityblock') + _assert_within_tol(Y_test1, Y_right, eps) + + def test_pdist_cityblock_random_float32(self): + eps = 1e-06 + X = np.float32(eo['pdist-double-inp']) + Y_right = eo['pdist-cityblock'] + Y_test1 = wpdist_no_const(X, 'cityblock') + _assert_within_tol(Y_test1, Y_right, eps) + + def test_pdist_cityblock_random_nonC(self): + eps = 1e-06 + X = eo['pdist-double-inp'] + Y_right = eo['pdist-cityblock'] + Y_test2 = wpdist_no_const(X, 'test_cityblock') + _assert_within_tol(Y_test2, Y_right, eps) + + @pytest.mark.slow + def test_pdist_cityblock_iris(self): + eps = 1e-14 + X = eo['iris'] + Y_right = eo['pdist-cityblock-iris'] + Y_test1 = wpdist_no_const(X, 'cityblock') + _assert_within_tol(Y_test1, Y_right, eps) + + @pytest.mark.slow + def test_pdist_cityblock_iris_float32(self): + eps = 1e-06 + X = np.float32(eo['iris']) + Y_right = eo['pdist-cityblock-iris'] + Y_test1 = wpdist_no_const(X, 'cityblock') + _assert_within_tol(Y_test1, Y_right, eps, verbose > 2) + + @pytest.mark.slow + def test_pdist_cityblock_iris_nonC(self): + # Test pdist(X, 'test_cityblock') [the non-C implementation] on the + # Iris data set. + eps = 1e-14 + X = eo['iris'] + Y_right = eo['pdist-cityblock-iris'] + Y_test2 = wpdist_no_const(X, 'test_cityblock') + _assert_within_tol(Y_test2, Y_right, eps) + + def test_pdist_correlation_random(self): + eps = 1e-07 + X = eo['pdist-double-inp'] + Y_right = eo['pdist-correlation'] + Y_test1 = wpdist(X, 'correlation') + _assert_within_tol(Y_test1, Y_right, eps) + + def test_pdist_correlation_random_float32(self): + eps = 1e-07 + X = np.float32(eo['pdist-double-inp']) + Y_right = eo['pdist-correlation'] + Y_test1 = wpdist(X, 'correlation') + _assert_within_tol(Y_test1, Y_right, eps) + + def test_pdist_correlation_random_nonC(self): + eps = 1e-07 + X = eo['pdist-double-inp'] + Y_right = eo['pdist-correlation'] + Y_test2 = wpdist(X, 'test_correlation') + _assert_within_tol(Y_test2, Y_right, eps) + + @pytest.mark.slow + def test_pdist_correlation_iris(self): + eps = 1e-08 + X = eo['iris'] + Y_right = eo['pdist-correlation-iris'] + Y_test1 = wpdist(X, 'correlation') + _assert_within_tol(Y_test1, Y_right, eps) + + @pytest.mark.slow + def test_pdist_correlation_iris_float32(self): + eps = 1e-07 + X = eo['iris'] + Y_right = np.float32(eo['pdist-correlation-iris']) + Y_test1 = wpdist(X, 'correlation') + _assert_within_tol(Y_test1, Y_right, eps, verbose > 2) + + @pytest.mark.slow + def test_pdist_correlation_iris_nonC(self): + eps = 1e-08 + X = eo['iris'] + Y_right = eo['pdist-correlation-iris'] + Y_test2 = wpdist(X, 'test_correlation') + _assert_within_tol(Y_test2, Y_right, eps) + + def test_pdist_minkowski_random(self): + eps = 1e-05 + X = eo['pdist-double-inp'] + Y_right = eo['pdist-minkowski-3.2'] + Y_test1 = wpdist_no_const(X, 'minkowski', p=3.2) + _assert_within_tol(Y_test1, Y_right, eps) + + def test_pdist_minkowski_random_float32(self): + eps = 1e-05 + X = np.float32(eo['pdist-double-inp']) + Y_right = eo['pdist-minkowski-3.2'] + Y_test1 = wpdist_no_const(X, 'minkowski', p=3.2) + _assert_within_tol(Y_test1, Y_right, eps) + + def test_pdist_minkowski_random_nonC(self): + eps = 1e-05 + X = eo['pdist-double-inp'] + Y_right = eo['pdist-minkowski-3.2'] + Y_test2 = wpdist_no_const(X, 'test_minkowski', p=3.2) + _assert_within_tol(Y_test2, Y_right, eps) + + @pytest.mark.slow + def test_pdist_minkowski_3_2_iris(self): + eps = 1e-07 + X = eo['iris'] + Y_right = eo['pdist-minkowski-3.2-iris'] + Y_test1 = wpdist_no_const(X, 'minkowski', p=3.2) + _assert_within_tol(Y_test1, Y_right, eps) + + @pytest.mark.slow + def test_pdist_minkowski_3_2_iris_float32(self): + eps = 1e-06 + X = np.float32(eo['iris']) + Y_right = eo['pdist-minkowski-3.2-iris'] + Y_test1 = wpdist_no_const(X, 'minkowski', p=3.2) + _assert_within_tol(Y_test1, Y_right, eps) + + @pytest.mark.slow + def test_pdist_minkowski_3_2_iris_nonC(self): + eps = 1e-07 + X = eo['iris'] + Y_right = eo['pdist-minkowski-3.2-iris'] + Y_test2 = wpdist_no_const(X, 'test_minkowski', p=3.2) + _assert_within_tol(Y_test2, Y_right, eps) + + @pytest.mark.slow + def test_pdist_minkowski_5_8_iris(self): + eps = 1e-07 + X = eo['iris'] + Y_right = eo['pdist-minkowski-5.8-iris'] + Y_test1 = wpdist_no_const(X, 'minkowski', p=5.8) + _assert_within_tol(Y_test1, Y_right, eps) + + @pytest.mark.slow + def test_pdist_minkowski_5_8_iris_float32(self): + eps = 1e-06 + X = np.float32(eo['iris']) + Y_right = eo['pdist-minkowski-5.8-iris'] + Y_test1 = wpdist_no_const(X, 'minkowski', p=5.8) + _assert_within_tol(Y_test1, Y_right, eps, verbose > 2) + + @pytest.mark.slow + def test_pdist_minkowski_5_8_iris_nonC(self): + eps = 1e-07 + X = eo['iris'] + Y_right = eo['pdist-minkowski-5.8-iris'] + Y_test2 = wpdist_no_const(X, 'test_minkowski', p=5.8) + _assert_within_tol(Y_test2, Y_right, eps) + + def test_pdist_mahalanobis(self): + # 1-dimensional observations + x = np.array([2.0, 2.0, 3.0, 5.0]).reshape(-1, 1) + dist = pdist(x, metric='mahalanobis') + assert_allclose(dist, [0.0, np.sqrt(0.5), np.sqrt(4.5), + np.sqrt(0.5), np.sqrt(4.5), np.sqrt(2.0)]) + + # 2-dimensional observations + x = np.array([[0, 0], [-1, 0], [0, 2], [1, 0], [0, -2]]) + dist = pdist(x, metric='mahalanobis') + rt2 = np.sqrt(2) + assert_allclose(dist, [rt2, rt2, rt2, rt2, 2, 2 * rt2, 2, 2, 2 * rt2, 2]) + + # Too few observations + assert_raises(ValueError, + wpdist, [[0, 1], [2, 3]], metric='mahalanobis') + + def test_pdist_hamming_random(self): + eps = 1e-07 + X = eo['pdist-boolean-inp'] + Y_right = eo['pdist-hamming'] + Y_test1 = wpdist(X, 'hamming') + _assert_within_tol(Y_test1, Y_right, eps) + + def test_pdist_hamming_random_float32(self): + eps = 1e-07 + X = np.float32(eo['pdist-boolean-inp']) + Y_right = eo['pdist-hamming'] + Y_test1 = wpdist(X, 'hamming') + _assert_within_tol(Y_test1, Y_right, eps) + + def test_pdist_hamming_random_nonC(self): + eps = 1e-07 + X = eo['pdist-boolean-inp'] + Y_right = eo['pdist-hamming'] + Y_test2 = wpdist(X, 'test_hamming') + _assert_within_tol(Y_test2, Y_right, eps) + + def test_pdist_dhamming_random(self): + eps = 1e-07 + X = np.float64(eo['pdist-boolean-inp']) + Y_right = eo['pdist-hamming'] + Y_test1 = wpdist(X, 'hamming') + _assert_within_tol(Y_test1, Y_right, eps) + + def test_pdist_dhamming_random_float32(self): + eps = 1e-07 + X = np.float32(eo['pdist-boolean-inp']) + Y_right = eo['pdist-hamming'] + Y_test1 = wpdist(X, 'hamming') + _assert_within_tol(Y_test1, Y_right, eps) + + def test_pdist_dhamming_random_nonC(self): + eps = 1e-07 + X = np.float64(eo['pdist-boolean-inp']) + Y_right = eo['pdist-hamming'] + Y_test2 = wpdist(X, 'test_hamming') + _assert_within_tol(Y_test2, Y_right, eps) + + def test_pdist_jaccard_random(self): + eps = 1e-08 + X = eo['pdist-boolean-inp'] + Y_right = eo['pdist-jaccard'] + Y_test1 = wpdist(X, 'jaccard') + _assert_within_tol(Y_test1, Y_right, eps) + + def test_pdist_jaccard_random_float32(self): + eps = 1e-08 + X = np.float32(eo['pdist-boolean-inp']) + Y_right = eo['pdist-jaccard'] + Y_test1 = wpdist(X, 'jaccard') + _assert_within_tol(Y_test1, Y_right, eps) + + def test_pdist_jaccard_random_nonC(self): + eps = 1e-08 + X = eo['pdist-boolean-inp'] + Y_right = eo['pdist-jaccard'] + Y_test2 = wpdist(X, 'test_jaccard') + _assert_within_tol(Y_test2, Y_right, eps) + + def test_pdist_djaccard_random(self): + eps = 1e-08 + X = np.float64(eo['pdist-boolean-inp']) + Y_right = eo['pdist-jaccard'] + Y_test1 = wpdist(X, 'jaccard') + _assert_within_tol(Y_test1, Y_right, eps) + + def test_pdist_djaccard_random_float32(self): + eps = 1e-08 + X = np.float32(eo['pdist-boolean-inp']) + Y_right = eo['pdist-jaccard'] + Y_test1 = wpdist(X, 'jaccard') + _assert_within_tol(Y_test1, Y_right, eps) + + def test_pdist_djaccard_allzeros(self): + eps = 1e-08 + Y = pdist(np.zeros((5, 3)), 'jaccard') + _assert_within_tol(np.zeros(10), Y, eps) + + def test_pdist_djaccard_random_nonC(self): + eps = 1e-08 + X = np.float64(eo['pdist-boolean-inp']) + Y_right = eo['pdist-jaccard'] + Y_test2 = wpdist(X, 'test_jaccard') + _assert_within_tol(Y_test2, Y_right, eps) + + def test_pdist_jensenshannon_random(self): + eps = 1e-08 + X = eo['pdist-double-inp'] + Y_right = eo['pdist-jensenshannon'] + Y_test1 = pdist(X, 'jensenshannon') + _assert_within_tol(Y_test1, Y_right, eps) + + def test_pdist_jensenshannon_random_float32(self): + eps = 1e-07 + X = np.float32(eo['pdist-double-inp']) + Y_right = eo['pdist-jensenshannon'] + Y_test1 = pdist(X, 'jensenshannon') + _assert_within_tol(Y_test1, Y_right, eps, verbose > 2) + + def test_pdist_jensenshannon_random_nonC(self): + eps = 1e-08 + X = eo['pdist-double-inp'] + Y_right = eo['pdist-jensenshannon'] + Y_test2 = pdist(X, 'test_jensenshannon') + _assert_within_tol(Y_test2, Y_right, eps) + + def test_pdist_jensenshannon_iris(self): + eps = 1e-12 + X = eo['iris'] + Y_right = eo['pdist-jensenshannon-iris'] + Y_test1 = pdist(X, 'jensenshannon') + _assert_within_tol(Y_test1, Y_right, eps) + + def test_pdist_jensenshannon_iris_float32(self): + eps = 1e-06 + X = np.float32(eo['iris']) + Y_right = eo['pdist-jensenshannon-iris'] + Y_test1 = pdist(X, 'jensenshannon') + _assert_within_tol(Y_test1, Y_right, eps, verbose > 2) + + def test_pdist_jensenshannon_iris_nonC(self): + eps = 5e-12 + X = eo['iris'] + Y_right = eo['pdist-jensenshannon-iris'] + Y_test2 = pdist(X, 'test_jensenshannon') + _assert_within_tol(Y_test2, Y_right, eps) + + def test_pdist_djaccard_allzeros_nonC(self): + eps = 1e-08 + Y = pdist(np.zeros((5, 3)), 'test_jaccard') + _assert_within_tol(np.zeros(10), Y, eps) + + def test_pdist_chebyshev_random(self): + eps = 1e-08 + X = eo['pdist-double-inp'] + Y_right = eo['pdist-chebyshev'] + Y_test1 = pdist(X, 'chebyshev') + _assert_within_tol(Y_test1, Y_right, eps) + + def test_pdist_chebyshev_random_float32(self): + eps = 1e-07 + X = np.float32(eo['pdist-double-inp']) + Y_right = eo['pdist-chebyshev'] + Y_test1 = pdist(X, 'chebyshev') + _assert_within_tol(Y_test1, Y_right, eps, verbose > 2) + + def test_pdist_chebyshev_random_nonC(self): + eps = 1e-08 + X = eo['pdist-double-inp'] + Y_right = eo['pdist-chebyshev'] + Y_test2 = pdist(X, 'test_chebyshev') + _assert_within_tol(Y_test2, Y_right, eps) + + def test_pdist_chebyshev_iris(self): + eps = 1e-15 + X = eo['iris'] + Y_right = eo['pdist-chebyshev-iris'] + Y_test1 = pdist(X, 'chebyshev') + _assert_within_tol(Y_test1, Y_right, eps) + + def test_pdist_chebyshev_iris_float32(self): + eps = 1e-06 + X = np.float32(eo['iris']) + Y_right = eo['pdist-chebyshev-iris'] + Y_test1 = pdist(X, 'chebyshev') + _assert_within_tol(Y_test1, Y_right, eps, verbose > 2) + + def test_pdist_chebyshev_iris_nonC(self): + eps = 1e-15 + X = eo['iris'] + Y_right = eo['pdist-chebyshev-iris'] + Y_test2 = pdist(X, 'test_chebyshev') + _assert_within_tol(Y_test2, Y_right, eps) + + def test_pdist_matching_mtica1(self): + # Test matching(*,*) with mtica example #1 (nums). + m = wmatching(np.array([1, 0, 1, 1, 0]), + np.array([1, 1, 0, 1, 1])) + m2 = wmatching(np.array([1, 0, 1, 1, 0], dtype=bool), + np.array([1, 1, 0, 1, 1], dtype=bool)) + assert_allclose(m, 0.6, rtol=0, atol=1e-10) + assert_allclose(m2, 0.6, rtol=0, atol=1e-10) + + def test_pdist_matching_mtica2(self): + # Test matching(*,*) with mtica example #2. + m = wmatching(np.array([1, 0, 1]), + np.array([1, 1, 0])) + m2 = wmatching(np.array([1, 0, 1], dtype=bool), + np.array([1, 1, 0], dtype=bool)) + assert_allclose(m, 2 / 3, rtol=0, atol=1e-10) + assert_allclose(m2, 2 / 3, rtol=0, atol=1e-10) + + def test_pdist_jaccard_mtica1(self): + m = wjaccard(np.array([1, 0, 1, 1, 0]), + np.array([1, 1, 0, 1, 1])) + m2 = wjaccard(np.array([1, 0, 1, 1, 0], dtype=bool), + np.array([1, 1, 0, 1, 1], dtype=bool)) + assert_allclose(m, 0.6, rtol=0, atol=1e-10) + assert_allclose(m2, 0.6, rtol=0, atol=1e-10) + + def test_pdist_jaccard_mtica2(self): + m = wjaccard(np.array([1, 0, 1]), + np.array([1, 1, 0])) + m2 = wjaccard(np.array([1, 0, 1], dtype=bool), + np.array([1, 1, 0], dtype=bool)) + assert_allclose(m, 2 / 3, rtol=0, atol=1e-10) + assert_allclose(m2, 2 / 3, rtol=0, atol=1e-10) + + def test_pdist_yule_mtica1(self): + m = wyule(np.array([1, 0, 1, 1, 0]), + np.array([1, 1, 0, 1, 1])) + m2 = wyule(np.array([1, 0, 1, 1, 0], dtype=bool), + np.array([1, 1, 0, 1, 1], dtype=bool)) + if verbose > 2: + print(m) + assert_allclose(m, 2, rtol=0, atol=1e-10) + assert_allclose(m2, 2, rtol=0, atol=1e-10) + + def test_pdist_yule_mtica2(self): + m = wyule(np.array([1, 0, 1]), + np.array([1, 1, 0])) + m2 = wyule(np.array([1, 0, 1], dtype=bool), + np.array([1, 1, 0], dtype=bool)) + if verbose > 2: + print(m) + assert_allclose(m, 2, rtol=0, atol=1e-10) + assert_allclose(m2, 2, rtol=0, atol=1e-10) + + def test_pdist_dice_mtica1(self): + m = wdice(np.array([1, 0, 1, 1, 0]), + np.array([1, 1, 0, 1, 1])) + m2 = wdice(np.array([1, 0, 1, 1, 0], dtype=bool), + np.array([1, 1, 0, 1, 1], dtype=bool)) + if verbose > 2: + print(m) + assert_allclose(m, 3 / 7, rtol=0, atol=1e-10) + assert_allclose(m2, 3 / 7, rtol=0, atol=1e-10) + + def test_pdist_dice_mtica2(self): + m = wdice(np.array([1, 0, 1]), + np.array([1, 1, 0])) + m2 = wdice(np.array([1, 0, 1], dtype=bool), + np.array([1, 1, 0], dtype=bool)) + if verbose > 2: + print(m) + assert_allclose(m, 0.5, rtol=0, atol=1e-10) + assert_allclose(m2, 0.5, rtol=0, atol=1e-10) + + def test_pdist_sokalsneath_mtica1(self): + m = sokalsneath(np.array([1, 0, 1, 1, 0]), + np.array([1, 1, 0, 1, 1])) + m2 = sokalsneath(np.array([1, 0, 1, 1, 0], dtype=bool), + np.array([1, 1, 0, 1, 1], dtype=bool)) + if verbose > 2: + print(m) + assert_allclose(m, 3 / 4, rtol=0, atol=1e-10) + assert_allclose(m2, 3 / 4, rtol=0, atol=1e-10) + + def test_pdist_sokalsneath_mtica2(self): + m = wsokalsneath(np.array([1, 0, 1]), + np.array([1, 1, 0])) + m2 = wsokalsneath(np.array([1, 0, 1], dtype=bool), + np.array([1, 1, 0], dtype=bool)) + if verbose > 2: + print(m) + assert_allclose(m, 4 / 5, rtol=0, atol=1e-10) + assert_allclose(m2, 4 / 5, rtol=0, atol=1e-10) + + def test_pdist_rogerstanimoto_mtica1(self): + m = wrogerstanimoto(np.array([1, 0, 1, 1, 0]), + np.array([1, 1, 0, 1, 1])) + m2 = wrogerstanimoto(np.array([1, 0, 1, 1, 0], dtype=bool), + np.array([1, 1, 0, 1, 1], dtype=bool)) + if verbose > 2: + print(m) + assert_allclose(m, 3 / 4, rtol=0, atol=1e-10) + assert_allclose(m2, 3 / 4, rtol=0, atol=1e-10) + + def test_pdist_rogerstanimoto_mtica2(self): + m = wrogerstanimoto(np.array([1, 0, 1]), + np.array([1, 1, 0])) + m2 = wrogerstanimoto(np.array([1, 0, 1], dtype=bool), + np.array([1, 1, 0], dtype=bool)) + if verbose > 2: + print(m) + assert_allclose(m, 4 / 5, rtol=0, atol=1e-10) + assert_allclose(m2, 4 / 5, rtol=0, atol=1e-10) + + def test_pdist_russellrao_mtica1(self): + m = wrussellrao(np.array([1, 0, 1, 1, 0]), + np.array([1, 1, 0, 1, 1])) + m2 = wrussellrao(np.array([1, 0, 1, 1, 0], dtype=bool), + np.array([1, 1, 0, 1, 1], dtype=bool)) + if verbose > 2: + print(m) + assert_allclose(m, 3 / 5, rtol=0, atol=1e-10) + assert_allclose(m2, 3 / 5, rtol=0, atol=1e-10) + + def test_pdist_russellrao_mtica2(self): + m = wrussellrao(np.array([1, 0, 1]), + np.array([1, 1, 0])) + m2 = wrussellrao(np.array([1, 0, 1], dtype=bool), + np.array([1, 1, 0], dtype=bool)) + if verbose > 2: + print(m) + assert_allclose(m, 2 / 3, rtol=0, atol=1e-10) + assert_allclose(m2, 2 / 3, rtol=0, atol=1e-10) + + @pytest.mark.slow + def test_pdist_canberra_match(self): + D = eo['iris'] + if verbose > 2: + print(D.shape, D.dtype) + eps = 1e-10 + y1 = wpdist_no_const(D, "canberra") + y2 = wpdist_no_const(D, "test_canberra") + _assert_within_tol(y1, y2, eps, verbose > 2) + + def test_pdist_canberra_ticket_711(self): + # Test pdist(X, 'canberra') to see if Canberra gives the right result + # as reported on gh-1238. + eps = 1e-8 + pdist_y = wpdist_no_const(([3.3], [3.4]), "canberra") + right_y = 0.01492537 + _assert_within_tol(pdist_y, right_y, eps, verbose > 2) + + def test_pdist_custom_notdouble(self): + # tests that when using a custom metric the data type is not altered + class myclass(object): + pass + + def _my_metric(x, y): + if not isinstance(x[0], myclass) or not isinstance(y[0], myclass): + raise ValueError("Type has been changed") + return 1.123 + data = np.array([[myclass()], [myclass()]], dtype=object) + pdist_y = pdist(data, metric=_my_metric) + right_y = 1.123 + assert_equal(pdist_y, right_y, verbose=verbose > 2) + + def _check_calling_conventions(self, X, metric, eps=1e-07, **kwargs): + # helper function for test_pdist_calling_conventions + try: + y1 = pdist(X, metric=metric, **kwargs) + y2 = pdist(X, metric=eval(metric), **kwargs) + y3 = pdist(X, metric="test_" + metric, **kwargs) + except Exception as e: + e_cls = e.__class__ + if verbose > 2: + print(e_cls.__name__) + print(e) + assert_raises(e_cls, pdist, X, metric=metric, **kwargs) + assert_raises(e_cls, pdist, X, metric=eval(metric), **kwargs) + assert_raises(e_cls, pdist, X, metric="test_" + metric, **kwargs) + else: + _assert_within_tol(y1, y2, rtol=eps, verbose_=verbose > 2) + _assert_within_tol(y1, y3, rtol=eps, verbose_=verbose > 2) + + def test_pdist_calling_conventions(self): + # Ensures that specifying the metric with a str or scipy function + # gives the same behaviour (i.e. same result or same exception). + # NOTE: The correctness should be checked within each metric tests. + # NOTE: Extra args should be checked with a dedicated test + eps = 1e-07 + for eo_name in self.rnd_eo_names: + # subsampling input data to speed-up tests + # NOTE: num samples needs to be > than dimensions for mahalanobis + X = eo[eo_name][::5, ::2] + for metric in _METRICS_NAMES: + if metric == 'wminkowski': + continue + if verbose > 2: + print("testing: ", metric, " with: ", eo_name) + if metric in {'dice', 'yule', 'kulsinski', 'matching', + 'rogerstanimoto', 'russellrao', 'sokalmichener', + 'sokalsneath'} and 'bool' not in eo_name: + # python version permits non-bools e.g. for fuzzy logic + continue + self._check_calling_conventions(X, metric) + + # Testing built-in metrics with extra args + if metric == "seuclidean": + V = np.var(X.astype(np.double), axis=0, ddof=1) + self._check_calling_conventions(X, metric, V=V) + elif metric == "mahalanobis": + V = np.atleast_2d(np.cov(X.astype(np.double).T)) + VI = np.array(np.linalg.inv(V).T) + self._check_calling_conventions(X, metric, VI=VI) + + def test_pdist_dtype_equivalence(self): + # Tests that the result is not affected by type up-casting + eps = 1e-07 + tests = [(eo['random-bool-data'], self.valid_upcasts['bool']), + (eo['random-uint-data'], self.valid_upcasts['uint']), + (eo['random-int-data'], self.valid_upcasts['int']), + (eo['random-float32-data'], self.valid_upcasts['float32'])] + for metric in _METRICS_NAMES: + for test in tests: + X1 = test[0][::5, ::2] + try: + y1 = pdist(X1, metric=metric) + except Exception as e: + e_cls = e.__class__ + if verbose > 2: + print(e_cls.__name__) + print(e) + for new_type in test[1]: + X2 = new_type(X1) + assert_raises(e_cls, pdist, X2, metric=metric) + else: + for new_type in test[1]: + y2 = pdist(new_type(X1), metric=metric) + _assert_within_tol(y1, y2, eps, verbose > 2) + + def test_pdist_out(self): + # Test that out parameter works properly + eps = 1e-07 + X = eo['random-float32-data'][::5, ::2] + out_size = int((X.shape[0] * (X.shape[0] - 1)) / 2) + for metric in _METRICS_NAMES: + kwargs = dict() + if metric in ['minkowski', 'wminkowski']: + kwargs['p'] = 1.23 + if metric == 'wminkowski': + kwargs['w'] = 1.0 / X.std(axis=0) + out1 = np.empty(out_size, dtype=np.double) + Y_right = pdist(X, metric, **kwargs) + Y_test1 = pdist(X, metric, out=out1, **kwargs) + # test that output is numerically equivalent + _assert_within_tol(Y_test1, Y_right, eps) + # test that Y_test1 and out1 are the same object + assert_(Y_test1 is out1) + # test for incorrect shape + out2 = np.empty(out_size + 3, dtype=np.double) + assert_raises(ValueError, pdist, X, metric, out=out2, **kwargs) + # test for (C-)contiguous output + out3 = np.empty(2 * out_size, dtype=np.double)[::2] + assert_raises(ValueError, pdist, X, metric, out=out3, **kwargs) + # test for incorrect dtype + out5 = np.empty(out_size, dtype=np.int64) + assert_raises(ValueError, pdist, X, metric, out=out5, **kwargs) + + def test_striding(self): + # test that striding is handled correct with calls to + # _copy_array_if_base_present + eps = 1e-07 + X = eo['random-float32-data'][::5, ::2] + X_copy = X.copy() + + # confirm contiguity + assert_(not X.flags.c_contiguous) + assert_(X_copy.flags.c_contiguous) + + for metric in _METRICS_NAMES: + kwargs = dict() + if metric in ['minkowski', 'wminkowski']: + kwargs['p'] = 1.23 + if metric == 'wminkowski': + kwargs['w'] = 1.0 / X.std(axis=0) + Y1 = pdist(X, metric, **kwargs) + Y2 = pdist(X_copy, metric, **kwargs) + # test that output is numerically equivalent + _assert_within_tol(Y1, Y2, eps, verbose > 2) + +class TestSomeDistanceFunctions(object): + + def setup_method(self): + # 1D arrays + x = np.array([1.0, 2.0, 3.0]) + y = np.array([1.0, 1.0, 5.0]) + # 3x1 arrays + x31 = x[:, np.newaxis] + y31 = y[:, np.newaxis] + # 1x3 arrays + x13 = x31.T + y13 = y31.T + + self.cases = [(x, y), (x31, y31), (x13, y13)] + + def test_minkowski(self): + with suppress_warnings() as w: + w.filter(message="`wminkowski` is deprecated") + for x, y in self.cases: + dist1 = wminkowski(x, y, p=1) + assert_almost_equal(dist1, 3.0) + dist1p5 = wminkowski(x, y, p=1.5) + assert_almost_equal(dist1p5, (1.0 + 2.0**1.5)**(2. / 3)) + dist2 = wminkowski(x, y, p=2) + + def test_old_wminkowski(self): + with suppress_warnings() as wrn: + wrn.filter(message="`wminkowski` is deprecated") + w = np.array([1.0, 2.0, 0.5]) + for x, y in self.cases: + dist1 = old_wminkowski(x, y, p=1, w=w) + assert_almost_equal(dist1, 3.0) + dist1p5 = old_wminkowski(x, y, p=1.5, w=w) + assert_almost_equal(dist1p5, (2.0**1.5+1.0)**(2./3)) + dist2 = old_wminkowski(x, y, p=2, w=w) + assert_almost_equal(dist2, np.sqrt(5)) + + # test weights Issue #7893 + arr = np.arange(4) + w = np.full_like(arr, 4) + assert_almost_equal(old_wminkowski(arr, arr + 1, p=2, w=w), 8.0) + assert_almost_equal(wminkowski(arr, arr + 1, p=2, w=w), 4.0) + + def test_euclidean(self): + for x, y in self.cases: + dist = weuclidean(x, y) + assert_almost_equal(dist, np.sqrt(5)) + + def test_sqeuclidean(self): + for x, y in self.cases: + dist = wsqeuclidean(x, y) + assert_almost_equal(dist, 5.0) + + def test_cosine(self): + for x, y in self.cases: + dist = wcosine(x, y) + assert_almost_equal(dist, 1.0 - 18.0 / (np.sqrt(14) * np.sqrt(27))) + + def test_correlation(self): + xm = np.array([-1.0, 0, 1.0]) + ym = np.array([-4.0 / 3, -4.0 / 3, 5.0 - 7.0 / 3]) + for x, y in self.cases: + dist = wcorrelation(x, y) + assert_almost_equal(dist, 1.0 - np.dot(xm, ym) / (norm(xm) * norm(ym))) + + def test_mahalanobis(self): + x = np.array([1.0, 2.0, 3.0]) + y = np.array([1.0, 1.0, 5.0]) + vi = np.array([[2.0, 1.0, 0.0], [1.0, 2.0, 1.0], [0.0, 1.0, 2.0]]) + for x, y in self.cases: + dist = mahalanobis(x, y, vi) + assert_almost_equal(dist, np.sqrt(6.0)) + + +class TestSquareForm(object): + checked_dtypes = [np.float64, np.float32, np.int32, np.int8, bool] + + def test_squareform_matrix(self): + for dtype in self.checked_dtypes: + self.check_squareform_matrix(dtype) + + def test_squareform_vector(self): + for dtype in self.checked_dtypes: + self.check_squareform_vector(dtype) + + def check_squareform_matrix(self, dtype): + A = np.zeros((0, 0), dtype=dtype) + rA = squareform(A) + assert_equal(rA.shape, (0,)) + assert_equal(rA.dtype, dtype) + + A = np.zeros((1, 1), dtype=dtype) + rA = squareform(A) + assert_equal(rA.shape, (0,)) + assert_equal(rA.dtype, dtype) + + A = np.array([[0, 4.2], [4.2, 0]], dtype=dtype) + rA = squareform(A) + assert_equal(rA.shape, (1,)) + assert_equal(rA.dtype, dtype) + assert_array_equal(rA, np.array([4.2], dtype=dtype)) + + def check_squareform_vector(self, dtype): + v = np.zeros((0,), dtype=dtype) + rv = squareform(v) + assert_equal(rv.shape, (1, 1)) + assert_equal(rv.dtype, dtype) + assert_array_equal(rv, [[0]]) + + v = np.array([8.3], dtype=dtype) + rv = squareform(v) + assert_equal(rv.shape, (2, 2)) + assert_equal(rv.dtype, dtype) + assert_array_equal(rv, np.array([[0, 8.3], [8.3, 0]], dtype=dtype)) + + def test_squareform_multi_matrix(self): + for n in xrange(2, 5): + self.check_squareform_multi_matrix(n) + + def check_squareform_multi_matrix(self, n): + X = np.random.rand(n, 4) + Y = wpdist_no_const(X) + assert_equal(len(Y.shape), 1) + A = squareform(Y) + Yr = squareform(A) + s = A.shape + k = 0 + if verbose >= 3: + print(A.shape, Y.shape, Yr.shape) + assert_equal(len(s), 2) + assert_equal(len(Yr.shape), 1) + assert_equal(s[0], s[1]) + for i in xrange(0, s[0]): + for j in xrange(i + 1, s[1]): + if i != j: + assert_equal(A[i, j], Y[k]) + k += 1 + else: + assert_equal(A[i, j], 0) + + +class TestNumObsY(object): + + def test_num_obs_y_multi_matrix(self): + for n in xrange(2, 10): + X = np.random.rand(n, 4) + Y = wpdist_no_const(X) + assert_equal(num_obs_y(Y), n) + + def test_num_obs_y_1(self): + # Tests num_obs_y(y) on a condensed distance matrix over 1 + # observations. Expecting exception. + assert_raises(ValueError, self.check_y, 1) + + def test_num_obs_y_2(self): + # Tests num_obs_y(y) on a condensed distance matrix over 2 + # observations. + assert_(self.check_y(2)) + + def test_num_obs_y_3(self): + assert_(self.check_y(3)) + + def test_num_obs_y_4(self): + assert_(self.check_y(4)) + + def test_num_obs_y_5_10(self): + for i in xrange(5, 16): + self.minit(i) + + def test_num_obs_y_2_100(self): + # Tests num_obs_y(y) on 100 improper condensed distance matrices. + # Expecting exception. + a = set([]) + for n in xrange(2, 16): + a.add(n * (n - 1) / 2) + for i in xrange(5, 105): + if i not in a: + assert_raises(ValueError, self.bad_y, i) + + def minit(self, n): + assert_(self.check_y(n)) + + def bad_y(self, n): + y = np.random.rand(n) + return num_obs_y(y) + + def check_y(self, n): + return num_obs_y(self.make_y(n)) == n + + def make_y(self, n): + return np.random.rand((n * (n - 1)) // 2) + + +class TestNumObsDM(object): + + def test_num_obs_dm_multi_matrix(self): + for n in xrange(1, 10): + X = np.random.rand(n, 4) + Y = wpdist_no_const(X) + A = squareform(Y) + if verbose >= 3: + print(A.shape, Y.shape) + assert_equal(num_obs_dm(A), n) + + def test_num_obs_dm_0(self): + # Tests num_obs_dm(D) on a 0x0 distance matrix. Expecting exception. + assert_(self.check_D(0)) + + def test_num_obs_dm_1(self): + # Tests num_obs_dm(D) on a 1x1 distance matrix. + assert_(self.check_D(1)) + + def test_num_obs_dm_2(self): + assert_(self.check_D(2)) + + def test_num_obs_dm_3(self): + assert_(self.check_D(2)) + + def test_num_obs_dm_4(self): + assert_(self.check_D(4)) + + def check_D(self, n): + return num_obs_dm(self.make_D(n)) == n + + def make_D(self, n): + return np.random.rand(n, n) + + +def is_valid_dm_throw(D): + return is_valid_dm(D, throw=True) + + +class TestIsValidDM(object): + + def test_is_valid_dm_improper_shape_1D_E(self): + D = np.zeros((5,), dtype=np.double) + assert_raises(ValueError, is_valid_dm_throw, (D)) + + def test_is_valid_dm_improper_shape_1D_F(self): + D = np.zeros((5,), dtype=np.double) + assert_equal(is_valid_dm(D), False) + + def test_is_valid_dm_improper_shape_3D_E(self): + D = np.zeros((3, 3, 3), dtype=np.double) + assert_raises(ValueError, is_valid_dm_throw, (D)) + + def test_is_valid_dm_improper_shape_3D_F(self): + D = np.zeros((3, 3, 3), dtype=np.double) + assert_equal(is_valid_dm(D), False) + + def test_is_valid_dm_nonzero_diagonal_E(self): + y = np.random.rand(10) + D = squareform(y) + for i in xrange(0, 5): + D[i, i] = 2.0 + assert_raises(ValueError, is_valid_dm_throw, (D)) + + def test_is_valid_dm_nonzero_diagonal_F(self): + y = np.random.rand(10) + D = squareform(y) + for i in xrange(0, 5): + D[i, i] = 2.0 + assert_equal(is_valid_dm(D), False) + + def test_is_valid_dm_asymmetric_E(self): + y = np.random.rand(10) + D = squareform(y) + D[1, 3] = D[3, 1] + 1 + assert_raises(ValueError, is_valid_dm_throw, (D)) + + def test_is_valid_dm_asymmetric_F(self): + y = np.random.rand(10) + D = squareform(y) + D[1, 3] = D[3, 1] + 1 + assert_equal(is_valid_dm(D), False) + + def test_is_valid_dm_correct_1_by_1(self): + D = np.zeros((1, 1), dtype=np.double) + assert_equal(is_valid_dm(D), True) + + def test_is_valid_dm_correct_2_by_2(self): + y = np.random.rand(1) + D = squareform(y) + assert_equal(is_valid_dm(D), True) + + def test_is_valid_dm_correct_3_by_3(self): + y = np.random.rand(3) + D = squareform(y) + assert_equal(is_valid_dm(D), True) + + def test_is_valid_dm_correct_4_by_4(self): + y = np.random.rand(6) + D = squareform(y) + assert_equal(is_valid_dm(D), True) + + def test_is_valid_dm_correct_5_by_5(self): + y = np.random.rand(10) + D = squareform(y) + assert_equal(is_valid_dm(D), True) + + +def is_valid_y_throw(y): + return is_valid_y(y, throw=True) + + +class TestIsValidY(object): + # If test case name ends on "_E" then an exception is expected for the + # given input, if it ends in "_F" then False is expected for the is_valid_y + # check. Otherwise the input is expected to be valid. + + def test_is_valid_y_improper_shape_2D_E(self): + y = np.zeros((3, 3,), dtype=np.double) + assert_raises(ValueError, is_valid_y_throw, (y)) + + def test_is_valid_y_improper_shape_2D_F(self): + y = np.zeros((3, 3,), dtype=np.double) + assert_equal(is_valid_y(y), False) + + def test_is_valid_y_improper_shape_3D_E(self): + y = np.zeros((3, 3, 3), dtype=np.double) + assert_raises(ValueError, is_valid_y_throw, (y)) + + def test_is_valid_y_improper_shape_3D_F(self): + y = np.zeros((3, 3, 3), dtype=np.double) + assert_equal(is_valid_y(y), False) + + def test_is_valid_y_correct_2_by_2(self): + y = self.correct_n_by_n(2) + assert_equal(is_valid_y(y), True) + + def test_is_valid_y_correct_3_by_3(self): + y = self.correct_n_by_n(3) + assert_equal(is_valid_y(y), True) + + def test_is_valid_y_correct_4_by_4(self): + y = self.correct_n_by_n(4) + assert_equal(is_valid_y(y), True) + + def test_is_valid_y_correct_5_by_5(self): + y = self.correct_n_by_n(5) + assert_equal(is_valid_y(y), True) + + def test_is_valid_y_2_100(self): + a = set([]) + for n in xrange(2, 16): + a.add(n * (n - 1) / 2) + for i in xrange(5, 105): + if i not in a: + assert_raises(ValueError, self.bad_y, i) + + def bad_y(self, n): + y = np.random.rand(n) + return is_valid_y(y, throw=True) + + def correct_n_by_n(self, n): + y = np.random.rand((n * (n - 1)) // 2) + return y + + +def test_bad_p(): + # Raise ValueError if p < 1. + p = 0.5 + with suppress_warnings() as w: + w.filter(message="`wminkowski` is deprecated") + assert_raises(ValueError, wminkowski, [1, 2], [3, 4], p) + assert_raises(ValueError, wminkowski, [1, 2], [3, 4], p, [1, 1]) + + +def test_sokalsneath_all_false(): + # Regression test for ticket #876 + assert_raises(ValueError, sokalsneath, [False, False, False], [False, False, False]) + + +def test_canberra(): + # Regression test for ticket #1430. + assert_equal(wcanberra([1, 2, 3], [2, 4, 6]), 1) + assert_equal(wcanberra([1, 1, 0, 0], [1, 0, 1, 0]), 2) + + +def test_braycurtis(): + # Regression test for ticket #1430. + assert_almost_equal(wbraycurtis([1, 2, 3], [2, 4, 6]), 1. / 3, decimal=15) + assert_almost_equal(wbraycurtis([1, 1, 0, 0], [1, 0, 1, 0]), 0.5, decimal=15) + + +def test_euclideans(): + # Regression test for ticket #1328. + x1 = np.array([1, 1, 1]) + x2 = np.array([0, 0, 0]) + + # Basic test of the calculation. + assert_almost_equal(wsqeuclidean(x1, x2), 3.0, decimal=14) + assert_almost_equal(weuclidean(x1, x2), np.sqrt(3), decimal=14) + + # Check flattening for (1, N) or (N, 1) inputs + assert_almost_equal(weuclidean(x1[np.newaxis, :], x2[np.newaxis, :]), + np.sqrt(3), decimal=14) + assert_almost_equal(wsqeuclidean(x1[np.newaxis, :], x2[np.newaxis, :]), + 3.0, decimal=14) + assert_almost_equal(wsqeuclidean(x1[:, np.newaxis], x2[:, np.newaxis]), + 3.0, decimal=14) + + # Distance metrics only defined for vectors (= 1-D) + x = np.arange(4).reshape(2, 2) + assert_raises(ValueError, weuclidean, x, x) + assert_raises(ValueError, wsqeuclidean, x, x) + + # Another check, with random data. + rs = np.random.RandomState(1234567890) + x = rs.rand(10) + y = rs.rand(10) + d1 = weuclidean(x, y) + d2 = wsqeuclidean(x, y) + assert_almost_equal(d1**2, d2, decimal=14) + + +def test_hamming_unequal_length(): + # Regression test for gh-4290. + x = [0, 0, 1] + y = [1, 0, 1, 0] + # Used to give an AttributeError from ndarray.mean called on bool + assert_raises(ValueError, whamming, x, y) + + +def test_hamming_string_array(): + # https://github.com/scikit-learn/scikit-learn/issues/4014 + a = np.array(['eggs', 'spam', 'spam', 'eggs', 'spam', 'spam', 'spam', + 'spam', 'spam', 'spam', 'spam', 'eggs', 'eggs', 'spam', + 'eggs', 'eggs', 'eggs', 'eggs', 'eggs', 'spam'], + dtype='|S4') + b = np.array(['eggs', 'spam', 'spam', 'eggs', 'eggs', 'spam', 'spam', + 'spam', 'spam', 'eggs', 'spam', 'eggs', 'spam', 'eggs', + 'spam', 'spam', 'eggs', 'spam', 'spam', 'eggs'], + dtype='|S4') + desired = 0.45 + assert_allclose(whamming(a, b), desired) + + +def test_minkowski_w(): + # Regression test for gh-8142. + arr_in = np.array([[83.33333333, 100., 83.33333333, 100., 36., + 60., 90., 150., 24., 48.], + [83.33333333, 100., 83.33333333, 100., 36., + 60., 90., 150., 24., 48.]]) + p0 = pdist(arr_in, metric='minkowski', p=1, w=None) + c0 = cdist(arr_in, arr_in, metric='minkowski', p=1, w=None) + p1 = pdist(arr_in, metric='minkowski', p=1) + c1 = cdist(arr_in, arr_in, metric='minkowski', p=1) + + assert_allclose(p0, p1, rtol=1e-15) + assert_allclose(c0, c1, rtol=1e-15) + + +def test_sqeuclidean_dtypes(): + # Assert that sqeuclidean returns the right types of values. + # Integer types should be converted to floating for stability. + # Floating point types should be the same as the input. + x = [1, 2, 3] + y = [4, 5, 6] + + for dtype in [np.int8, np.int16, np.int32, np.int64]: + d = wsqeuclidean(np.asarray(x, dtype=dtype), np.asarray(y, dtype=dtype)) + assert_(np.issubdtype(d.dtype, np.floating)) + + for dtype in [np.uint8, np.uint16, np.uint32, np.uint64]: + d1 = wsqeuclidean([0], np.asarray([-1], dtype=dtype)) + d2 = wsqeuclidean(np.asarray([-1], dtype=dtype), [0]) + + assert_equal(d1, d2) + assert_equal(d1, np.float64(np.iinfo(dtype).max)**2) + + dtypes = [np.float32, np.float64, np.complex64, np.complex128] + for dtype in ['float16', 'float128']: + # These aren't present in older numpy versions; float128 may also not + # be present on all platforms. + if hasattr(np, dtype): + dtypes.append(getattr(np, dtype)) + + for dtype in dtypes: + d = wsqeuclidean(np.asarray(x, dtype=dtype), np.asarray(y, dtype=dtype)) + assert_equal(d.dtype, dtype) + + +def test_sokalmichener(): + # Test that sokalmichener has the same result for bool and int inputs. + p = [True, True, False] + q = [True, False, True] + x = [int(b) for b in p] + y = [int(b) for b in q] + dist1 = sokalmichener(p, q) + dist2 = sokalmichener(x, y) + # These should be exactly the same. + assert_equal(dist1, dist2) + + +def test_modifies_input(): + # test whether cdist or pdist modifies input arrays + X1 = np.asarray([[1., 2., 3.], + [1.2, 2.3, 3.4], + [2.2, 2.3, 4.4], + [22.2, 23.3, 44.4]]) + X1_copy = X1.copy() + with suppress_warnings() as w: + w.filter(message="`wminkowski` is deprecated") + for metric in _METRICS_NAMES: + kwargs = {"w": 1.0 / X1.std(axis=0)} if metric == "wminkowski" else {} + cdist(X1, X1, metric, **kwargs) + pdist(X1, metric, **kwargs) + assert_array_equal(X1, X1_copy) + + +def test_Xdist_deprecated_args(): + # testing both cdist and pdist deprecated warnings + X1 = np.asarray([[1., 2., 3.], + [1.2, 2.3, 3.4], + [2.2, 2.3, 4.4], + [22.2, 23.3, 44.4]]) + weights = np.arange(3) + warn_msg_kwargs = "Got unexpected kwarg" + warn_msg_args = "[0-9]* metric parameters have been passed as positional" + for metric in _METRICS_NAMES: + kwargs = {"w": weights} if metric == "wminkowski" else dict() + with suppress_warnings() as w: + log = w.record(message=warn_msg_args) + w.filter(message=warn_msg_kwargs) + w.filter(message="`wminkowski` is deprecated") + cdist(X1, X1, metric, 2., **kwargs) + pdist(X1, metric, 2., **kwargs) + assert_(len(log) == 2) + + for arg in ["p", "V", "VI"]: + kwargs = {arg:"foo"} + + if metric == "wminkowski": + if "p" in kwargs or "w" in kwargs: + continue + kwargs["w"] = weights + + if((arg == "V" and metric == "seuclidean") or + (arg == "VI" and metric == "mahalanobis") or + (arg == "p" and metric == "minkowski")): + continue + + with suppress_warnings() as w: + log = w.record(message=warn_msg_kwargs) + w.filter(message="`wminkowski` is deprecated") + cdist(X1, X1, metric, **kwargs) + pdist(X1, metric, **kwargs) + assert_(len(log) == 2) + + +def test_Xdist_non_negative_weights(): + X = eo['random-float32-data'][::5, ::2] + w = np.ones(X.shape[1]) + w[::5] = -w[::5] + for metric in _METRICS_NAMES: + if metric in ['seuclidean', 'mahalanobis', 'jensenshannon']: + continue + + for m in [metric, eval(metric), "test_" + metric]: + assert_raises(ValueError, pdist, X, m, w=w) + assert_raises(ValueError, cdist, X, X, m, w=w) + + +def test__validate_vector(): + x = [1, 2, 3] + y = _validate_vector(x) + assert_array_equal(y, x) + + y = _validate_vector(x, dtype=np.float64) + assert_array_equal(y, x) + assert_equal(y.dtype, np.float64) + + x = [1] + y = _validate_vector(x) + assert_equal(y.ndim, 1) + assert_equal(y, x) + + x = 1 + y = _validate_vector(x) + assert_equal(y.ndim, 1) + assert_equal(y, [x]) + + x = np.arange(5).reshape(1, -1, 1) + y = _validate_vector(x) + assert_equal(y.ndim, 1) + assert_array_equal(y, x[0, :, 0]) + + x = [[1, 2], [3, 4]] + assert_raises(ValueError, _validate_vector, x) diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/test_distance.pyc b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/test_distance.pyc new file mode 100644 index 0000000..46ef57a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/test_distance.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/test_hausdorff.py b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/test_hausdorff.py new file mode 100644 index 0000000..b40051d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/test_hausdorff.py @@ -0,0 +1,115 @@ +from __future__ import division, absolute_import, print_function + +import numpy as np +from numpy.testing import (assert_almost_equal, + assert_array_equal, + assert_equal, + assert_) +from scipy.spatial.distance import directed_hausdorff +from scipy.spatial import distance +from scipy._lib._util import check_random_state + +class TestHausdorff(object): + # Test various properties of the directed Hausdorff code. + + def setup_method(self): + np.random.seed(1234) + random_angles = np.random.random(100) * np.pi * 2 + random_columns = np.column_stack( + (random_angles, random_angles, np.zeros(100))) + random_columns[..., 0] = np.cos(random_columns[..., 0]) + random_columns[..., 1] = np.sin(random_columns[..., 1]) + random_columns_2 = np.column_stack( + (random_angles, random_angles, np.zeros(100))) + random_columns_2[1:, 0] = np.cos(random_columns_2[1:, 0]) * 2.0 + random_columns_2[1:, 1] = np.sin(random_columns_2[1:, 1]) * 2.0 + # move one point farther out so we don't have two perfect circles + random_columns_2[0, 0] = np.cos(random_columns_2[0, 0]) * 3.3 + random_columns_2[0, 1] = np.sin(random_columns_2[0, 1]) * 3.3 + self.path_1 = random_columns + self.path_2 = random_columns_2 + self.path_1_4d = np.insert(self.path_1, 3, 5, axis=1) + self.path_2_4d = np.insert(self.path_2, 3, 27, axis=1) + + def test_symmetry(self): + # Ensure that the directed (asymmetric) Hausdorff distance is + # actually asymmetric + + forward = directed_hausdorff(self.path_1, self.path_2)[0] + reverse = directed_hausdorff(self.path_2, self.path_1)[0] + assert_(forward != reverse) + + def test_brute_force_comparison_forward(self): + # Ensure that the algorithm for directed_hausdorff gives the + # same result as the simple / brute force approach in the + # forward direction. + actual = directed_hausdorff(self.path_1, self.path_2)[0] + # brute force over rows: + expected = max(np.amin(distance.cdist(self.path_1, self.path_2), + axis=1)) + assert_almost_equal(actual, expected, decimal=9) + + def test_brute_force_comparison_reverse(self): + # Ensure that the algorithm for directed_hausdorff gives the + # same result as the simple / brute force approach in the + # reverse direction. + actual = directed_hausdorff(self.path_2, self.path_1)[0] + # brute force over columns: + expected = max(np.amin(distance.cdist(self.path_1, self.path_2), + axis=0)) + assert_almost_equal(actual, expected, decimal=9) + + def test_degenerate_case(self): + # The directed Hausdorff distance must be zero if both input + # data arrays match. + actual = directed_hausdorff(self.path_1, self.path_1)[0] + assert_almost_equal(actual, 0.0, decimal=9) + + def test_2d_data_forward(self): + # Ensure that 2D data is handled properly for a simple case + # relative to brute force approach. + actual = directed_hausdorff(self.path_1[..., :2], + self.path_2[..., :2])[0] + expected = max(np.amin(distance.cdist(self.path_1[..., :2], + self.path_2[..., :2]), + axis=1)) + assert_almost_equal(actual, expected, decimal=9) + + def test_4d_data_reverse(self): + # Ensure that 4D data is handled properly for a simple case + # relative to brute force approach. + actual = directed_hausdorff(self.path_2_4d, self.path_1_4d)[0] + # brute force over columns: + expected = max(np.amin(distance.cdist(self.path_1_4d, self.path_2_4d), + axis=0)) + assert_almost_equal(actual, expected, decimal=9) + + def test_indices(self): + # Ensure that correct point indices are returned -- they should + # correspond to the Hausdorff pair + path_simple_1 = np.array([[-1,-12],[0,0], [1,1], [3,7], [1,2]]) + path_simple_2 = np.array([[0,0], [1,1], [4,100], [10,9]]) + actual = directed_hausdorff(path_simple_2, path_simple_1)[1:] + expected = (2, 3) + assert_array_equal(actual, expected) + + def test_random_state(self): + # ensure that the global random state is not modified because + # the directed Hausdorff algorithm uses randomization + rs = check_random_state(None) + old_global_state = rs.get_state() + directed_hausdorff(self.path_1, self.path_2) + rs2 = check_random_state(None) + new_global_state = rs2.get_state() + assert_equal(new_global_state, old_global_state) + + def test_random_state_None_int(self): + # check that seed values of None or int do not alter global + # random state + for seed in [None, 27870671]: + rs = check_random_state(None) + old_global_state = rs.get_state() + directed_hausdorff(self.path_1, self.path_2, seed) + rs2 = check_random_state(None) + new_global_state = rs2.get_state() + assert_equal(new_global_state, old_global_state) diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/test_hausdorff.pyc b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/test_hausdorff.pyc new file mode 100644 index 0000000..b334c9a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/test_hausdorff.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/test_kdtree.py b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/test_kdtree.py new file mode 100644 index 0000000..a27cbb8 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/test_kdtree.py @@ -0,0 +1,1361 @@ +# Copyright Anne M. Archibald 2008 +# Released under the scipy license + +from __future__ import division, print_function, absolute_import + +from numpy.testing import (assert_equal, assert_array_equal, assert_, + assert_almost_equal, assert_array_almost_equal) +from pytest import raises as assert_raises +import pytest +from platform import python_implementation +import numpy as np +from scipy.spatial import KDTree, Rectangle, distance_matrix, cKDTree +from scipy.spatial.ckdtree import cKDTreeNode +from scipy.spatial import minkowski_distance + +import itertools + +def distance_box(a, b, p, boxsize): + diff = a - b + diff[diff > 0.5 * boxsize] -= boxsize + diff[diff < -0.5 * boxsize] += boxsize + d = minkowski_distance(diff, 0, p) + return d + +class ConsistencyTests: + def distance(self, a, b, p): + return minkowski_distance(a, b, p) + + def test_nearest(self): + x = self.x + d, i = self.kdtree.query(x, 1) + assert_almost_equal(d**2,np.sum((x-self.data[i])**2)) + eps = 1e-8 + assert_(np.all(np.sum((self.data-x[np.newaxis,:])**2,axis=1) > d**2-eps)) + + def test_m_nearest(self): + x = self.x + m = self.m + dd, ii = self.kdtree.query(x, m) + d = np.amax(dd) + i = ii[np.argmax(dd)] + assert_almost_equal(d**2,np.sum((x-self.data[i])**2)) + eps = 1e-8 + assert_equal(np.sum(np.sum((self.data-x[np.newaxis,:])**2,axis=1) < d**2+eps),m) + + def test_points_near(self): + x = self.x + d = self.d + dd, ii = self.kdtree.query(x, k=self.kdtree.n, distance_upper_bound=d) + eps = 1e-8 + hits = 0 + for near_d, near_i in zip(dd,ii): + if near_d == np.inf: + continue + hits += 1 + assert_almost_equal(near_d**2,np.sum((x-self.data[near_i])**2)) + assert_(near_d < d+eps, "near_d=%g should be less than %g" % (near_d,d)) + assert_equal(np.sum(self.distance(self.data,x,2) < d**2+eps),hits) + + def test_points_near_l1(self): + x = self.x + d = self.d + dd, ii = self.kdtree.query(x, k=self.kdtree.n, p=1, distance_upper_bound=d) + eps = 1e-8 + hits = 0 + for near_d, near_i in zip(dd,ii): + if near_d == np.inf: + continue + hits += 1 + assert_almost_equal(near_d,self.distance(x,self.data[near_i],1)) + assert_(near_d < d+eps, "near_d=%g should be less than %g" % (near_d,d)) + assert_equal(np.sum(self.distance(self.data,x,1) < d+eps),hits) + + def test_points_near_linf(self): + x = self.x + d = self.d + dd, ii = self.kdtree.query(x, k=self.kdtree.n, p=np.inf, distance_upper_bound=d) + eps = 1e-8 + hits = 0 + for near_d, near_i in zip(dd,ii): + if near_d == np.inf: + continue + hits += 1 + assert_almost_equal(near_d,self.distance(x,self.data[near_i],np.inf)) + assert_(near_d < d+eps, "near_d=%g should be less than %g" % (near_d,d)) + assert_equal(np.sum(self.distance(self.data,x,np.inf) < d+eps),hits) + + def test_approx(self): + x = self.x + k = self.k + eps = 0.1 + d_real, i_real = self.kdtree.query(x, k) + d, i = self.kdtree.query(x, k, eps=eps) + assert_(np.all(d <= d_real*(1+eps))) + + +class Test_random(ConsistencyTests): + def setup_method(self): + self.n = 100 + self.m = 4 + np.random.seed(1234) + self.data = np.random.randn(self.n, self.m) + self.kdtree = KDTree(self.data,leafsize=2) + self.x = np.random.randn(self.m) + self.d = 0.2 + self.k = 10 + +class Test_random_far(Test_random): + def setup_method(self): + Test_random.setup_method(self) + self.x = np.random.randn(self.m)+10 + + +class Test_small(ConsistencyTests): + def setup_method(self): + self.data = np.array([[0,0,0], + [0,0,1], + [0,1,0], + [0,1,1], + [1,0,0], + [1,0,1], + [1,1,0], + [1,1,1]]) + self.kdtree = KDTree(self.data) + self.n = self.kdtree.n + self.m = self.kdtree.m + np.random.seed(1234) + self.x = np.random.randn(3) + self.d = 0.5 + self.k = 4 + + def test_nearest(self): + assert_array_equal( + self.kdtree.query((0,0,0.1), 1), + (0.1,0)) + + def test_nearest_two(self): + assert_array_equal( + self.kdtree.query((0,0,0.1), 2), + ([0.1,0.9],[0,1])) + + +class Test_small_nonleaf(Test_small): + def setup_method(self): + Test_small.setup_method(self) + self.kdtree = KDTree(self.data,leafsize=1) + + +class Test_small_compiled(Test_small): + def setup_method(self): + Test_small.setup_method(self) + self.kdtree = cKDTree(self.data) + + +class Test_small_nonleaf_compiled(Test_small): + def setup_method(self): + Test_small.setup_method(self) + self.kdtree = cKDTree(self.data,leafsize=1) + + +class Test_random_compiled(Test_random): + def setup_method(self): + Test_random.setup_method(self) + self.kdtree = cKDTree(self.data) + + +class Test_random_far_compiled(Test_random_far): + def setup_method(self): + Test_random_far.setup_method(self) + self.kdtree = cKDTree(self.data) + + +class Test_vectorization: + def setup_method(self): + self.data = np.array([[0,0,0], + [0,0,1], + [0,1,0], + [0,1,1], + [1,0,0], + [1,0,1], + [1,1,0], + [1,1,1]]) + self.kdtree = KDTree(self.data) + + def test_single_query(self): + d, i = self.kdtree.query(np.array([0,0,0])) + assert_(isinstance(d,float)) + assert_(np.issubdtype(i, np.signedinteger)) + + def test_vectorized_query(self): + d, i = self.kdtree.query(np.zeros((2,4,3))) + assert_equal(np.shape(d),(2,4)) + assert_equal(np.shape(i),(2,4)) + + def test_single_query_multiple_neighbors(self): + s = 23 + kk = self.kdtree.n+s + d, i = self.kdtree.query(np.array([0,0,0]),k=kk) + assert_equal(np.shape(d),(kk,)) + assert_equal(np.shape(i),(kk,)) + assert_(np.all(~np.isfinite(d[-s:]))) + assert_(np.all(i[-s:] == self.kdtree.n)) + + def test_vectorized_query_multiple_neighbors(self): + s = 23 + kk = self.kdtree.n+s + d, i = self.kdtree.query(np.zeros((2,4,3)),k=kk) + assert_equal(np.shape(d),(2,4,kk)) + assert_equal(np.shape(i),(2,4,kk)) + assert_(np.all(~np.isfinite(d[:,:,-s:]))) + assert_(np.all(i[:,:,-s:] == self.kdtree.n)) + + def test_single_query_all_neighbors(self): + d, i = self.kdtree.query([0,0,0],k=None,distance_upper_bound=1.1) + assert_(isinstance(d,list)) + assert_(isinstance(i,list)) + + def test_vectorized_query_all_neighbors(self): + d, i = self.kdtree.query(np.zeros((2,4,3)),k=None,distance_upper_bound=1.1) + assert_equal(np.shape(d),(2,4)) + assert_equal(np.shape(i),(2,4)) + + assert_(isinstance(d[0,0],list)) + assert_(isinstance(i[0,0],list)) + + +class Test_vectorization_compiled: + def setup_method(self): + self.data = np.array([[0,0,0], + [0,0,1], + [0,1,0], + [0,1,1], + [1,0,0], + [1,0,1], + [1,1,0], + [1,1,1]]) + self.kdtree = cKDTree(self.data) + + def test_single_query(self): + d, i = self.kdtree.query([0,0,0]) + assert_(isinstance(d,float)) + assert_(isinstance(i,int)) + + def test_vectorized_query(self): + d, i = self.kdtree.query(np.zeros((2,4,3))) + assert_equal(np.shape(d),(2,4)) + assert_equal(np.shape(i),(2,4)) + + def test_vectorized_query_noncontiguous_values(self): + np.random.seed(1234) + qs = np.random.randn(3,1000).T + ds, i_s = self.kdtree.query(qs) + for q, d, i in zip(qs,ds,i_s): + assert_equal(self.kdtree.query(q),(d,i)) + + def test_single_query_multiple_neighbors(self): + s = 23 + kk = self.kdtree.n+s + d, i = self.kdtree.query([0,0,0],k=kk) + assert_equal(np.shape(d),(kk,)) + assert_equal(np.shape(i),(kk,)) + assert_(np.all(~np.isfinite(d[-s:]))) + assert_(np.all(i[-s:] == self.kdtree.n)) + + def test_vectorized_query_multiple_neighbors(self): + s = 23 + kk = self.kdtree.n+s + d, i = self.kdtree.query(np.zeros((2,4,3)),k=kk) + assert_equal(np.shape(d),(2,4,kk)) + assert_equal(np.shape(i),(2,4,kk)) + assert_(np.all(~np.isfinite(d[:,:,-s:]))) + assert_(np.all(i[:,:,-s:] == self.kdtree.n)) + + +class ball_consistency: + def distance(self, a, b, p): + return minkowski_distance(a, b, p) + + def test_in_ball(self): + l = self.T.query_ball_point(self.x, self.d, p=self.p, eps=self.eps) + for i in l: + assert_(self.distance(self.data[i],self.x,self.p) <= self.d*(1.+self.eps)) + + def test_found_all(self): + c = np.ones(self.T.n,dtype=bool) + l = self.T.query_ball_point(self.x, self.d, p=self.p, eps=self.eps) + c[l] = False + assert_(np.all(self.distance(self.data[c],self.x,self.p) >= self.d/(1.+self.eps))) + + +class Test_random_ball(ball_consistency): + + def setup_method(self): + n = 100 + m = 4 + np.random.seed(1234) + self.data = np.random.randn(n,m) + self.T = KDTree(self.data,leafsize=2) + self.x = np.random.randn(m) + self.p = 2. + self.eps = 0 + self.d = 0.2 + + +class Test_random_ball_compiled(ball_consistency): + + def setup_method(self): + n = 100 + m = 4 + np.random.seed(1234) + self.data = np.random.randn(n,m) + self.T = cKDTree(self.data,leafsize=2) + self.x = np.random.randn(m) + self.p = 2. + self.eps = 0 + self.d = 0.2 + +class Test_random_ball_compiled_periodic(ball_consistency): + def distance(self, a, b, p): + return distance_box(a, b, p, 1.0) + + def setup_method(self): + n = 10000 + m = 4 + np.random.seed(1234) + self.data = np.random.uniform(size=(n,m)) + self.T = cKDTree(self.data,leafsize=2, boxsize=1) + self.x = np.ones(m) * 0.1 + self.p = 2. + self.eps = 0 + self.d = 0.2 + + def test_in_ball_outside(self): + l = self.T.query_ball_point(self.x + 1.0, self.d, p=self.p, eps=self.eps) + for i in l: + assert_(self.distance(self.data[i],self.x,self.p) <= self.d*(1.+self.eps)) + l = self.T.query_ball_point(self.x - 1.0, self.d, p=self.p, eps=self.eps) + for i in l: + assert_(self.distance(self.data[i],self.x,self.p) <= self.d*(1.+self.eps)) + + def test_found_all_outside(self): + c = np.ones(self.T.n,dtype=bool) + l = self.T.query_ball_point(self.x + 1.0, self.d, p=self.p, eps=self.eps) + c[l] = False + assert_(np.all(self.distance(self.data[c],self.x,self.p) >= self.d/(1.+self.eps))) + + l = self.T.query_ball_point(self.x - 1.0, self.d, p=self.p, eps=self.eps) + c[l] = False + assert_(np.all(self.distance(self.data[c],self.x,self.p) >= self.d/(1.+self.eps))) + +class Test_random_ball_approx(Test_random_ball): + + def setup_method(self): + Test_random_ball.setup_method(self) + self.eps = 0.1 + + +class Test_random_ball_approx_compiled(Test_random_ball_compiled): + + def setup_method(self): + Test_random_ball_compiled.setup_method(self) + self.eps = 0.1 + +class Test_random_ball_approx_compiled_periodic(Test_random_ball_compiled_periodic): + + def setup_method(self): + Test_random_ball_compiled_periodic.setup_method(self) + self.eps = 0.1 + + +class Test_random_ball_far(Test_random_ball): + + def setup_method(self): + Test_random_ball.setup_method(self) + self.d = 2. + + +class Test_random_ball_far_compiled(Test_random_ball_compiled): + + def setup_method(self): + Test_random_ball_compiled.setup_method(self) + self.d = 2. + +class Test_random_ball_far_compiled_periodic(Test_random_ball_compiled_periodic): + + def setup_method(self): + Test_random_ball_compiled_periodic.setup_method(self) + self.d = 2. + + +class Test_random_ball_l1(Test_random_ball): + + def setup_method(self): + Test_random_ball.setup_method(self) + self.p = 1 + + +class Test_random_ball_l1_compiled(Test_random_ball_compiled): + + def setup_method(self): + Test_random_ball_compiled.setup_method(self) + self.p = 1 + +class Test_random_ball_l1_compiled_periodic(Test_random_ball_compiled_periodic): + + def setup_method(self): + Test_random_ball_compiled_periodic.setup_method(self) + self.p = 1 + + +class Test_random_ball_linf(Test_random_ball): + + def setup_method(self): + Test_random_ball.setup_method(self) + self.p = np.inf + +class Test_random_ball_linf_compiled_periodic(Test_random_ball_compiled_periodic): + + def setup_method(self): + Test_random_ball_compiled_periodic.setup_method(self) + self.p = np.inf + + +def test_random_ball_vectorized(): + + n = 20 + m = 5 + T = KDTree(np.random.randn(n,m)) + + r = T.query_ball_point(np.random.randn(2,3,m),1) + assert_equal(r.shape,(2,3)) + assert_(isinstance(r[0,0],list)) + + +def test_random_ball_vectorized_compiled(): + + n = 20 + m = 5 + np.random.seed(1234) + T = cKDTree(np.random.randn(n,m)) + + r = T.query_ball_point(np.random.randn(2,3,m),1) + assert_equal(r.shape,(2,3)) + assert_(isinstance(r[0,0],list)) + + +def test_query_ball_point_multithreading(): + np.random.seed(0) + n = 5000 + k = 2 + points = np.random.randn(n,k) + T = cKDTree(points) + l1 = T.query_ball_point(points,0.003,n_jobs=1) + l2 = T.query_ball_point(points,0.003,n_jobs=64) + l3 = T.query_ball_point(points,0.003,n_jobs=-1) + + for i in range(n): + if l1[i] or l2[i]: + assert_array_equal(l1[i],l2[i]) + + for i in range(n): + if l1[i] or l3[i]: + assert_array_equal(l1[i],l3[i]) + + +class two_trees_consistency: + + def distance(self, a, b, p): + return minkowski_distance(a, b, p) + + def test_all_in_ball(self): + r = self.T1.query_ball_tree(self.T2, self.d, p=self.p, eps=self.eps) + for i, l in enumerate(r): + for j in l: + assert_(self.distance(self.data1[i],self.data2[j],self.p) <= self.d*(1.+self.eps)) + + def test_found_all(self): + r = self.T1.query_ball_tree(self.T2, self.d, p=self.p, eps=self.eps) + for i, l in enumerate(r): + c = np.ones(self.T2.n,dtype=bool) + c[l] = False + assert_(np.all(self.distance(self.data2[c],self.data1[i],self.p) >= self.d/(1.+self.eps))) + + +class Test_two_random_trees(two_trees_consistency): + + def setup_method(self): + n = 50 + m = 4 + np.random.seed(1234) + self.data1 = np.random.randn(n,m) + self.T1 = KDTree(self.data1,leafsize=2) + self.data2 = np.random.randn(n,m) + self.T2 = KDTree(self.data2,leafsize=2) + self.p = 2. + self.eps = 0 + self.d = 0.2 + + +class Test_two_random_trees_compiled(two_trees_consistency): + + def setup_method(self): + n = 50 + m = 4 + np.random.seed(1234) + self.data1 = np.random.randn(n,m) + self.T1 = cKDTree(self.data1,leafsize=2) + self.data2 = np.random.randn(n,m) + self.T2 = cKDTree(self.data2,leafsize=2) + self.p = 2. + self.eps = 0 + self.d = 0.2 + +class Test_two_random_trees_compiled_periodic(two_trees_consistency): + def distance(self, a, b, p): + return distance_box(a, b, p, 1.0) + + def setup_method(self): + n = 50 + m = 4 + np.random.seed(1234) + self.data1 = np.random.uniform(size=(n,m)) + self.T1 = cKDTree(self.data1,leafsize=2, boxsize=1.0) + self.data2 = np.random.uniform(size=(n,m)) + self.T2 = cKDTree(self.data2,leafsize=2, boxsize=1.0) + self.p = 2. + self.eps = 0 + self.d = 0.2 + +class Test_two_random_trees_far(Test_two_random_trees): + + def setup_method(self): + Test_two_random_trees.setup_method(self) + self.d = 2 + + +class Test_two_random_trees_far_compiled(Test_two_random_trees_compiled): + + def setup_method(self): + Test_two_random_trees_compiled.setup_method(self) + self.d = 2 + +class Test_two_random_trees_far_compiled_periodic(Test_two_random_trees_compiled_periodic): + + def setup_method(self): + Test_two_random_trees_compiled_periodic.setup_method(self) + self.d = 2 + + +class Test_two_random_trees_linf(Test_two_random_trees): + + def setup_method(self): + Test_two_random_trees.setup_method(self) + self.p = np.inf + + +class Test_two_random_trees_linf_compiled(Test_two_random_trees_compiled): + + def setup_method(self): + Test_two_random_trees_compiled.setup_method(self) + self.p = np.inf + +class Test_two_random_trees_linf_compiled_periodic(Test_two_random_trees_compiled_periodic): + + def setup_method(self): + Test_two_random_trees_compiled_periodic.setup_method(self) + self.p = np.inf + + +class Test_rectangle: + + def setup_method(self): + self.rect = Rectangle([0,0],[1,1]) + + def test_min_inside(self): + assert_almost_equal(self.rect.min_distance_point([0.5,0.5]),0) + + def test_min_one_side(self): + assert_almost_equal(self.rect.min_distance_point([0.5,1.5]),0.5) + + def test_min_two_sides(self): + assert_almost_equal(self.rect.min_distance_point([2,2]),np.sqrt(2)) + + def test_max_inside(self): + assert_almost_equal(self.rect.max_distance_point([0.5,0.5]),1/np.sqrt(2)) + + def test_max_one_side(self): + assert_almost_equal(self.rect.max_distance_point([0.5,1.5]),np.hypot(0.5,1.5)) + + def test_max_two_sides(self): + assert_almost_equal(self.rect.max_distance_point([2,2]),2*np.sqrt(2)) + + def test_split(self): + less, greater = self.rect.split(0,0.1) + assert_array_equal(less.maxes,[0.1,1]) + assert_array_equal(less.mins,[0,0]) + assert_array_equal(greater.maxes,[1,1]) + assert_array_equal(greater.mins,[0.1,0]) + + +def test_distance_l2(): + assert_almost_equal(minkowski_distance([0,0],[1,1],2),np.sqrt(2)) + + +def test_distance_l1(): + assert_almost_equal(minkowski_distance([0,0],[1,1],1),2) + + +def test_distance_linf(): + assert_almost_equal(minkowski_distance([0,0],[1,1],np.inf),1) + + +def test_distance_vectorization(): + np.random.seed(1234) + x = np.random.randn(10,1,3) + y = np.random.randn(1,7,3) + assert_equal(minkowski_distance(x,y).shape,(10,7)) + + +class count_neighbors_consistency: + def test_one_radius(self): + r = 0.2 + assert_equal(self.T1.count_neighbors(self.T2, r), + np.sum([len(l) for l in self.T1.query_ball_tree(self.T2,r)])) + + def test_large_radius(self): + r = 1000 + assert_equal(self.T1.count_neighbors(self.T2, r), + np.sum([len(l) for l in self.T1.query_ball_tree(self.T2,r)])) + + def test_multiple_radius(self): + rs = np.exp(np.linspace(np.log(0.01),np.log(10),3)) + results = self.T1.count_neighbors(self.T2, rs) + assert_(np.all(np.diff(results) >= 0)) + for r,result in zip(rs, results): + assert_equal(self.T1.count_neighbors(self.T2, r), result) + +class Test_count_neighbors(count_neighbors_consistency): + + def setup_method(self): + n = 50 + m = 2 + np.random.seed(1234) + self.T1 = KDTree(np.random.randn(n,m),leafsize=2) + self.T2 = KDTree(np.random.randn(n,m),leafsize=2) + + +class Test_count_neighbors_compiled(count_neighbors_consistency): + + def setup_method(self): + n = 50 + m = 2 + np.random.seed(1234) + self.T1 = cKDTree(np.random.randn(n,m),leafsize=2) + self.T2 = cKDTree(np.random.randn(n,m),leafsize=2) + + +class sparse_distance_matrix_consistency: + + def distance(self, a, b, p): + return minkowski_distance(a, b, p) + + def test_consistency_with_neighbors(self): + M = self.T1.sparse_distance_matrix(self.T2, self.r) + r = self.T1.query_ball_tree(self.T2, self.r) + for i,l in enumerate(r): + for j in l: + assert_almost_equal(M[i,j], + self.distance(self.T1.data[i], self.T2.data[j], self.p), + decimal=14) + for ((i,j),d) in M.items(): + assert_(j in r[i]) + + def test_zero_distance(self): + # raises an exception for bug 870 (FIXME: Does it?) + self.T1.sparse_distance_matrix(self.T1, self.r) + +class Test_sparse_distance_matrix(sparse_distance_matrix_consistency): + + def setup_method(self): + n = 50 + m = 4 + np.random.seed(1234) + data1 = np.random.randn(n,m) + data2 = np.random.randn(n,m) + self.T1 = cKDTree(data1,leafsize=2) + self.T2 = cKDTree(data2,leafsize=2) + self.r = 0.5 + self.p = 2 + self.data1 = data1 + self.data2 = data2 + self.n = n + self.m = m + +class Test_sparse_distance_matrix_compiled(sparse_distance_matrix_consistency): + + def setup_method(self): + n = 50 + m = 4 + np.random.seed(0) + data1 = np.random.randn(n,m) + data2 = np.random.randn(n,m) + self.T1 = cKDTree(data1,leafsize=2) + self.T2 = cKDTree(data2,leafsize=2) + self.ref_T1 = KDTree(data1, leafsize=2) + self.ref_T2 = KDTree(data2, leafsize=2) + self.r = 0.5 + self.n = n + self.m = m + self.data1 = data1 + self.data2 = data2 + self.p = 2 + + def test_consistency_with_python(self): + M1 = self.T1.sparse_distance_matrix(self.T2, self.r) + M2 = self.ref_T1.sparse_distance_matrix(self.ref_T2, self.r) + assert_array_almost_equal(M1.todense(), M2.todense(), decimal=14) + + def test_against_logic_error_regression(self): + # regression test for gh-5077 logic error + np.random.seed(0) + too_many = np.array(np.random.randn(18, 2), dtype=int) + tree = cKDTree(too_many, balanced_tree=False, compact_nodes=False) + d = tree.sparse_distance_matrix(tree, 3).todense() + assert_array_almost_equal(d, d.T, decimal=14) + + def test_ckdtree_return_types(self): + # brute-force reference + ref = np.zeros((self.n,self.n)) + for i in range(self.n): + for j in range(self.n): + v = self.data1[i,:] - self.data2[j,:] + ref[i,j] = np.dot(v,v) + ref = np.sqrt(ref) + ref[ref > self.r] = 0. + # test return type 'dict' + dist = np.zeros((self.n,self.n)) + r = self.T1.sparse_distance_matrix(self.T2, self.r, output_type='dict') + for i,j in r.keys(): + dist[i,j] = r[(i,j)] + assert_array_almost_equal(ref, dist, decimal=14) + # test return type 'ndarray' + dist = np.zeros((self.n,self.n)) + r = self.T1.sparse_distance_matrix(self.T2, self.r, + output_type='ndarray') + for k in range(r.shape[0]): + i = r['i'][k] + j = r['j'][k] + v = r['v'][k] + dist[i,j] = v + assert_array_almost_equal(ref, dist, decimal=14) + # test return type 'dok_matrix' + r = self.T1.sparse_distance_matrix(self.T2, self.r, + output_type='dok_matrix') + assert_array_almost_equal(ref, r.todense(), decimal=14) + # test return type 'coo_matrix' + r = self.T1.sparse_distance_matrix(self.T2, self.r, + output_type='coo_matrix') + assert_array_almost_equal(ref, r.todense(), decimal=14) + + +def test_distance_matrix(): + m = 10 + n = 11 + k = 4 + np.random.seed(1234) + xs = np.random.randn(m,k) + ys = np.random.randn(n,k) + ds = distance_matrix(xs,ys) + assert_equal(ds.shape, (m,n)) + for i in range(m): + for j in range(n): + assert_almost_equal(minkowski_distance(xs[i],ys[j]),ds[i,j]) + + +def test_distance_matrix_looping(): + m = 10 + n = 11 + k = 4 + np.random.seed(1234) + xs = np.random.randn(m,k) + ys = np.random.randn(n,k) + ds = distance_matrix(xs,ys) + dsl = distance_matrix(xs,ys,threshold=1) + assert_equal(ds,dsl) + + +def check_onetree_query(T,d): + r = T.query_ball_tree(T, d) + s = set() + for i, l in enumerate(r): + for j in l: + if i < j: + s.add((i,j)) + + assert_(s == T.query_pairs(d)) + +def test_onetree_query(): + np.random.seed(0) + n = 50 + k = 4 + points = np.random.randn(n,k) + T = KDTree(points) + check_onetree_query(T, 0.1) + + points = np.random.randn(3*n,k) + points[:n] *= 0.001 + points[n:2*n] += 2 + T = KDTree(points) + check_onetree_query(T, 0.1) + check_onetree_query(T, 0.001) + check_onetree_query(T, 0.00001) + check_onetree_query(T, 1e-6) + + +def test_onetree_query_compiled(): + np.random.seed(0) + n = 100 + k = 4 + points = np.random.randn(n,k) + T = cKDTree(points) + check_onetree_query(T, 0.1) + + points = np.random.randn(3*n,k) + points[:n] *= 0.001 + points[n:2*n] += 2 + T = cKDTree(points) + check_onetree_query(T, 0.1) + check_onetree_query(T, 0.001) + check_onetree_query(T, 0.00001) + check_onetree_query(T, 1e-6) + + +def test_query_pairs_single_node(): + tree = KDTree([[0, 1]]) + assert_equal(tree.query_pairs(0.5), set()) + + +def test_query_pairs_single_node_compiled(): + tree = cKDTree([[0, 1]]) + assert_equal(tree.query_pairs(0.5), set()) + + +def test_ckdtree_query_pairs(): + np.random.seed(0) + n = 50 + k = 2 + r = 0.1 + r2 = r**2 + points = np.random.randn(n,k) + T = cKDTree(points) + # brute force reference + brute = set() + for i in range(n): + for j in range(i+1,n): + v = points[i,:] - points[j,:] + if np.dot(v,v) <= r2: + brute.add((i,j)) + l0 = sorted(brute) + # test default return type + s = T.query_pairs(r) + l1 = sorted(s) + assert_array_equal(l0,l1) + # test return type 'set' + s = T.query_pairs(r, output_type='set') + l1 = sorted(s) + assert_array_equal(l0,l1) + # test return type 'ndarray' + s = set() + arr = T.query_pairs(r, output_type='ndarray') + for i in range(arr.shape[0]): + s.add((int(arr[i,0]),int(arr[i,1]))) + l2 = sorted(s) + assert_array_equal(l0,l2) + + +def test_ball_point_ints(): + # Regression test for #1373. + x, y = np.mgrid[0:4, 0:4] + points = list(zip(x.ravel(), y.ravel())) + tree = KDTree(points) + assert_equal(sorted([4, 8, 9, 12]), + sorted(tree.query_ball_point((2, 0), 1))) + points = np.asarray(points, dtype=float) + tree = KDTree(points) + assert_equal(sorted([4, 8, 9, 12]), + sorted(tree.query_ball_point((2, 0), 1))) + + +def test_kdtree_comparisons(): + # Regression test: node comparisons were done wrong in 0.12 w/Py3. + nodes = [KDTree.node() for _ in range(3)] + assert_equal(sorted(nodes), sorted(nodes[::-1])) + + +def test_ckdtree_build_modes(): + # check if different build modes for cKDTree give + # similar query results + np.random.seed(0) + n = 5000 + k = 4 + points = np.random.randn(n, k) + T1 = cKDTree(points).query(points, k=5)[-1] + T2 = cKDTree(points, compact_nodes=False).query(points, k=5)[-1] + T3 = cKDTree(points, balanced_tree=False).query(points, k=5)[-1] + T4 = cKDTree(points, compact_nodes=False, balanced_tree=False).query(points, k=5)[-1] + assert_array_equal(T1, T2) + assert_array_equal(T1, T3) + assert_array_equal(T1, T4) + +def test_ckdtree_pickle(): + # test if it is possible to pickle + # a cKDTree + try: + import cPickle as pickle + except ImportError: + import pickle + np.random.seed(0) + n = 50 + k = 4 + points = np.random.randn(n, k) + T1 = cKDTree(points) + tmp = pickle.dumps(T1) + T2 = pickle.loads(tmp) + T1 = T1.query(points, k=5)[-1] + T2 = T2.query(points, k=5)[-1] + assert_array_equal(T1, T2) + +def test_ckdtree_pickle_boxsize(): + # test if it is possible to pickle a periodic + # cKDTree + try: + import cPickle as pickle + except ImportError: + import pickle + np.random.seed(0) + n = 50 + k = 4 + points = np.random.uniform(size=(n, k)) + T1 = cKDTree(points, boxsize=1.0) + tmp = pickle.dumps(T1) + T2 = pickle.loads(tmp) + T1 = T1.query(points, k=5)[-1] + T2 = T2.query(points, k=5)[-1] + assert_array_equal(T1, T2) + +def test_ckdtree_copy_data(): + # check if copy_data=True makes the kd-tree + # impervious to data corruption by modification of + # the data arrray + np.random.seed(0) + n = 5000 + k = 4 + points = np.random.randn(n, k) + T = cKDTree(points, copy_data=True) + q = points.copy() + T1 = T.query(q, k=5)[-1] + points[...] = np.random.randn(n, k) + T2 = T.query(q, k=5)[-1] + assert_array_equal(T1, T2) + +def test_ckdtree_parallel(): + # check if parallel=True also generates correct + # query results + np.random.seed(0) + n = 5000 + k = 4 + points = np.random.randn(n, k) + T = cKDTree(points) + T1 = T.query(points, k=5, n_jobs=64)[-1] + T2 = T.query(points, k=5, n_jobs=-1)[-1] + T3 = T.query(points, k=5)[-1] + assert_array_equal(T1, T2) + assert_array_equal(T1, T3) + +def test_ckdtree_view(): + # Check that the nodes can be correctly viewed from Python. + # This test also sanity checks each node in the cKDTree, and + # thus verifies the internal structure of the kd-tree. + np.random.seed(0) + n = 100 + k = 4 + points = np.random.randn(n, k) + kdtree = cKDTree(points) + + # walk the whole kd-tree and sanity check each node + def recurse_tree(n): + assert_(isinstance(n, cKDTreeNode)) + if n.split_dim == -1: + assert_(n.lesser is None) + assert_(n.greater is None) + assert_(n.indices.shape[0] <= kdtree.leafsize) + else: + recurse_tree(n.lesser) + recurse_tree(n.greater) + x = n.lesser.data_points[:, n.split_dim] + y = n.greater.data_points[:, n.split_dim] + assert_(x.max() < y.min()) + + recurse_tree(kdtree.tree) + # check that indices are correctly retrieved + n = kdtree.tree + assert_array_equal(np.sort(n.indices), range(100)) + # check that data_points are correctly retrieved + assert_array_equal(kdtree.data[n.indices, :], n.data_points) + +# cKDTree is specialized to type double points, so no need to make +# a unit test corresponding to test_ball_point_ints() + +def test_ckdtree_list_k(): + # check ckdtree periodic boundary + n = 200 + m = 2 + klist = [1, 2, 3] + kint = 3 + + np.random.seed(1234) + data = np.random.uniform(size=(n, m)) + kdtree = cKDTree(data, leafsize=1) + + # check agreement between arange(1,k+1) and k + dd, ii = kdtree.query(data, klist) + dd1, ii1 = kdtree.query(data, kint) + assert_equal(dd, dd1) + assert_equal(ii, ii1) + + # now check skipping one element + klist = np.array([1, 3]) + kint = 3 + dd, ii = kdtree.query(data, kint) + dd1, ii1 = kdtree.query(data, klist) + assert_equal(dd1, dd[..., klist - 1]) + assert_equal(ii1, ii[..., klist - 1]) + + # check k == 1 special case + # and k == [1] non-special case + dd, ii = kdtree.query(data, 1) + dd1, ii1 = kdtree.query(data, [1]) + assert_equal(len(dd.shape), 1) + assert_equal(len(dd1.shape), 2) + assert_equal(dd, np.ravel(dd1)) + assert_equal(ii, np.ravel(ii1)) + +def test_ckdtree_box(): + # check ckdtree periodic boundary + n = 2000 + m = 3 + k = 3 + np.random.seed(1234) + data = np.random.uniform(size=(n, m)) + kdtree = cKDTree(data, leafsize=1, boxsize=1.0) + + # use the standard python KDTree for the simulated periodic box + kdtree2 = cKDTree(data, leafsize=1) + + for p in [1, 2, 3.0, np.inf]: + dd, ii = kdtree.query(data, k, p=p) + + dd1, ii1 = kdtree.query(data + 1.0, k, p=p) + assert_almost_equal(dd, dd1) + assert_equal(ii, ii1) + + dd1, ii1 = kdtree.query(data - 1.0, k, p=p) + assert_almost_equal(dd, dd1) + assert_equal(ii, ii1) + + dd2, ii2 = simulate_periodic_box(kdtree2, data, k, boxsize=1.0, p=p) + assert_almost_equal(dd, dd2) + assert_equal(ii, ii2) + +def test_ckdtree_box_0boxsize(): + # check ckdtree periodic boundary that mimics non-periodic + n = 2000 + m = 2 + k = 3 + np.random.seed(1234) + data = np.random.uniform(size=(n, m)) + kdtree = cKDTree(data, leafsize=1, boxsize=0.0) + + # use the standard python KDTree for the simulated periodic box + kdtree2 = cKDTree(data, leafsize=1) + + for p in [1, 2, np.inf]: + dd, ii = kdtree.query(data, k, p=p) + + dd1, ii1 = kdtree2.query(data, k, p=p) + assert_almost_equal(dd, dd1) + assert_equal(ii, ii1) + +def test_ckdtree_box_upper_bounds(): + data = np.linspace(0, 2, 10).reshape(-1, 2) + data[:, 1] += 10 + assert_raises(ValueError, cKDTree, data, leafsize=1, boxsize=1.0) + assert_raises(ValueError, cKDTree, data, leafsize=1, boxsize=(0.0, 2.0)) + # skip a dimension. + cKDTree(data, leafsize=1, boxsize=(2.0, 0.0)) + +def test_ckdtree_box_lower_bounds(): + data = np.linspace(-1, 1, 10) + assert_raises(ValueError, cKDTree, data, leafsize=1, boxsize=1.0) + +def simulate_periodic_box(kdtree, data, k, boxsize, p): + dd = [] + ii = [] + x = np.arange(3 ** data.shape[1]) + nn = np.array(np.unravel_index(x, [3] * data.shape[1])).T + nn = nn - 1.0 + for n in nn: + image = data + n * 1.0 * boxsize + dd2, ii2 = kdtree.query(image, k, p=p) + dd2 = dd2.reshape(-1, k) + ii2 = ii2.reshape(-1, k) + dd.append(dd2) + ii.append(ii2) + dd = np.concatenate(dd, axis=-1) + ii = np.concatenate(ii, axis=-1) + + result = np.empty([len(data), len(nn) * k], dtype=[ + ('ii', 'i8'), + ('dd', 'f8')]) + result['ii'][:] = ii + result['dd'][:] = dd + result.sort(order='dd') + return result['dd'][:, :k], result['ii'][:,:k] + + +@pytest.mark.skipif(python_implementation() == 'PyPy', + reason="Fails on PyPy CI runs. See #9507") +def test_ckdtree_memuse(): + # unit test adaptation of gh-5630 + + # NOTE: this will fail when run via valgrind, + # because rss is no longer a reliable memory usage indicator. + + try: + import resource + except ImportError: + # resource is not available on Windows with Python 2.6 + return + # Make some data + dx, dy = 0.05, 0.05 + y, x = np.mgrid[slice(1, 5 + dy, dy), + slice(1, 5 + dx, dx)] + z = np.sin(x)**10 + np.cos(10 + y*x) * np.cos(x) + z_copy = np.empty_like(z) + z_copy[:] = z + # Place FILLVAL in z_copy at random number of random locations + FILLVAL = 99. + mask = np.random.randint(0, z.size, np.random.randint(50) + 5) + z_copy.flat[mask] = FILLVAL + igood = np.vstack(np.nonzero(x != FILLVAL)).T + ibad = np.vstack(np.nonzero(x == FILLVAL)).T + mem_use = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss + # burn-in + for i in range(10): + tree = cKDTree(igood) + # count memleaks while constructing and querying cKDTree + num_leaks = 0 + for i in range(100): + mem_use = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss + tree = cKDTree(igood) + dist, iquery = tree.query(ibad, k=4, p=2) + new_mem_use = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss + if new_mem_use > mem_use: + num_leaks += 1 + # ideally zero leaks, but errors might accidentally happen + # outside cKDTree + assert_(num_leaks < 10) + +def test_ckdtree_weights(): + + data = np.linspace(0, 1, 4).reshape(-1, 1) + tree1 = cKDTree(data, leafsize=1) + weights = np.ones(len(data), dtype='f4') + + nw = tree1._build_weights(weights) + assert_array_equal(nw, [4, 2, 1, 1, 2, 1, 1]) + + assert_raises(ValueError, tree1._build_weights, weights[:-1]) + + for i in range(10): + # since weights are uniform, these shall agree: + c1 = tree1.count_neighbors(tree1, np.linspace(0, 10, i)) + c2 = tree1.count_neighbors(tree1, np.linspace(0, 10, i), + weights=(weights, weights)) + c3 = tree1.count_neighbors(tree1, np.linspace(0, 10, i), + weights=(weights, None)) + c4 = tree1.count_neighbors(tree1, np.linspace(0, 10, i), + weights=(None, weights)) + c5 = tree1.count_neighbors(tree1, np.linspace(0, 10, i), + weights=weights) + + assert_array_equal(c1, c2) + assert_array_equal(c1, c3) + assert_array_equal(c1, c4) + + for i in range(len(data)): + # this tests removal of one data point by setting weight to 0 + w1 = weights.copy() + w1[i] = 0 + data2 = data[w1 != 0] + w2 = weights[w1 != 0] + tree2 = cKDTree(data2) + + c1 = tree1.count_neighbors(tree1, np.linspace(0, 10, 100), + weights=(w1, w1)) + # "c2 is correct" + c2 = tree2.count_neighbors(tree2, np.linspace(0, 10, 100)) + + assert_array_equal(c1, c2) + + #this asserts for two different trees, singular weights + # crashes + assert_raises(ValueError, tree1.count_neighbors, + tree2, np.linspace(0, 10, 100), weights=w1) + +def test_ckdtree_count_neighbous_multiple_r(): + n = 2000 + m = 2 + np.random.seed(1234) + data = np.random.normal(size=(n, m)) + kdtree = cKDTree(data, leafsize=1) + r0 = [0, 0.01, 0.01, 0.02, 0.05] + i0 = np.arange(len(r0)) + n0 = kdtree.count_neighbors(kdtree, r0) + nnc = kdtree.count_neighbors(kdtree, r0, cumulative=False) + assert_equal(n0, nnc.cumsum()) + + for i, r in zip(itertools.permutations(i0), + itertools.permutations(r0)): + # permute n0 by i and it shall agree + n = kdtree.count_neighbors(kdtree, r) + assert_array_equal(n, n0[list(i)]) + +def test_len0_arrays(): + # make sure len-0 arrays are handled correctly + # in range queries (gh-5639) + np.random.seed(1234) + X = np.random.rand(10,2) + Y = np.random.rand(10,2) + tree = cKDTree(X) + # query_ball_point (single) + d,i = tree.query([.5, .5], k=1) + z = tree.query_ball_point([.5, .5], 0.1*d) + assert_array_equal(z, []) + # query_ball_point (multiple) + d,i = tree.query(Y, k=1) + mind = d.min() + z = tree.query_ball_point(Y, 0.1*mind) + y = np.empty(shape=(10,), dtype=object) + y.fill([]) + assert_array_equal(y, z) + # query_ball_tree + other = cKDTree(Y) + y = tree.query_ball_tree(other, 0.1*mind) + assert_array_equal(10*[[]], y) + # count_neighbors + y = tree.count_neighbors(other, 0.1*mind) + assert_(y == 0) + # sparse_distance_matrix + y = tree.sparse_distance_matrix(other, 0.1*mind, output_type='dok_matrix') + assert_array_equal(y == np.zeros((10,10)), True) + y = tree.sparse_distance_matrix(other, 0.1*mind, output_type='coo_matrix') + assert_array_equal(y == np.zeros((10,10)), True) + y = tree.sparse_distance_matrix(other, 0.1*mind, output_type='dict') + assert_equal(y, {}) + y = tree.sparse_distance_matrix(other,0.1*mind, output_type='ndarray') + _dtype = [('i',np.intp), ('j',np.intp), ('v',np.float64)] + res_dtype = np.dtype(_dtype, align=True) + z = np.empty(shape=(0,), dtype=res_dtype) + assert_array_equal(y, z) + # query_pairs + d,i = tree.query(X, k=2) + mind = d[:,-1].min() + y = tree.query_pairs(0.1*mind, output_type='set') + assert_equal(y, set()) + y = tree.query_pairs(0.1*mind, output_type='ndarray') + z = np.empty(shape=(0,2), dtype=np.intp) + assert_array_equal(y, z) + +def test_ckdtree_duplicated_inputs(): + # check ckdtree with duplicated inputs + n = 1024 + for m in range(1, 8): + data = np.concatenate([ + np.ones((n // 2, m)) * 1, + np.ones((n // 2, m)) * 2], axis=0) + + # it shall not divide more than 3 nodes. + # root left (1), and right (2) + kdtree = cKDTree(data, leafsize=1) + assert_equal(kdtree.size, 3) + + kdtree = cKDTree(data) + assert_equal(kdtree.size, 3) + + # if compact_nodes are disabled, the number + # of nodes is n (per leaf) + (m - 1)* 2 (splits per dimension) + 1 + # and the root + kdtree = cKDTree(data, compact_nodes=False, leafsize=1) + assert_equal(kdtree.size, n + m * 2 - 1) + +def test_ckdtree_noncumulative_nondecreasing(): + # check ckdtree with duplicated inputs + + # it shall not divide more than 3 nodes. + # root left (1), and right (2) + kdtree = cKDTree([[0]], leafsize=1) + + assert_raises(ValueError, kdtree.count_neighbors, + kdtree, [0.1, 0], cumulative=False) + +def test_short_knn(): + + # The test case is based on github: #6425 by @SteveDoyle2 + + xyz = np.array([ + [0., 0., 0.], + [1.01, 0., 0.], + [0., 1., 0.], + [0., 1.01, 0.], + [1., 0., 0.], + [1., 1., 0.],], + dtype='float64') + + ckdt = cKDTree(xyz) + + deq, ieq = ckdt.query(xyz, k=4, distance_upper_bound=0.2) + + assert_array_almost_equal(deq, + [[0., np.inf, np.inf, np.inf], + [0., 0.01, np.inf, np.inf], + [0., 0.01, np.inf, np.inf], + [0., 0.01, np.inf, np.inf], + [0., 0.01, np.inf, np.inf], + [0., np.inf, np.inf, np.inf]]) + +class Test_sorted_query_ball_point(object): + + def setup_method(self): + np.random.seed(1234) + self.x = np.random.randn(100, 1) + self.ckdt = cKDTree(self.x) + + def test_return_sorted_True(self): + idxs_list = self.ckdt.query_ball_point(self.x, 1., return_sorted=True) + for idxs in idxs_list: + assert_array_equal(idxs, sorted(idxs)) + + def test_return_sorted_None(self): + """Previous behavior was to sort the returned indices if there were + multiple points per query but not sort them if there was a single point + per query.""" + idxs_list = self.ckdt.query_ball_point(self.x, 1.) + for idxs in idxs_list: + assert_array_equal(idxs, sorted(idxs)) + + idxs_list_single = [self.ckdt.query_ball_point(xi, 1.) for xi in self.x] + idxs_list_False = self.ckdt.query_ball_point(self.x, 1., return_sorted=False) + for idxs0, idxs1 in zip(idxs_list_False, idxs_list_single): + assert_array_equal(idxs0, idxs1) diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/test_kdtree.pyc b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/test_kdtree.pyc new file mode 100644 index 0000000..15a104e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/test_kdtree.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/test_qhull.py b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/test_qhull.py new file mode 100644 index 0000000..3b2b654 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/test_qhull.py @@ -0,0 +1,1008 @@ +from __future__ import division, print_function, absolute_import + +import os +import copy +import pytest + +import numpy as np +from numpy.testing import (assert_equal, assert_almost_equal, + assert_, assert_allclose, assert_array_equal) +import pytest +from pytest import raises as assert_raises +from scipy._lib.six import xrange + +import scipy.spatial.qhull as qhull +from scipy.spatial import cKDTree as KDTree +from scipy.spatial import Voronoi + +import itertools + +def sorted_tuple(x): + return tuple(sorted(x)) + + +def sorted_unique_tuple(x): + return tuple(np.unique(x)) + + +def assert_unordered_tuple_list_equal(a, b, tpl=tuple): + if isinstance(a, np.ndarray): + a = a.tolist() + if isinstance(b, np.ndarray): + b = b.tolist() + a = list(map(tpl, a)) + a.sort() + b = list(map(tpl, b)) + b.sort() + assert_equal(a, b) + + +np.random.seed(1234) + +points = [(0,0), (0,1), (1,0), (1,1), (0.5, 0.5), (0.5, 1.5)] + +pathological_data_1 = np.array([ + [-3.14,-3.14], [-3.14,-2.36], [-3.14,-1.57], [-3.14,-0.79], + [-3.14,0.0], [-3.14,0.79], [-3.14,1.57], [-3.14,2.36], + [-3.14,3.14], [-2.36,-3.14], [-2.36,-2.36], [-2.36,-1.57], + [-2.36,-0.79], [-2.36,0.0], [-2.36,0.79], [-2.36,1.57], + [-2.36,2.36], [-2.36,3.14], [-1.57,-0.79], [-1.57,0.79], + [-1.57,-1.57], [-1.57,0.0], [-1.57,1.57], [-1.57,-3.14], + [-1.57,-2.36], [-1.57,2.36], [-1.57,3.14], [-0.79,-1.57], + [-0.79,1.57], [-0.79,-3.14], [-0.79,-2.36], [-0.79,-0.79], + [-0.79,0.0], [-0.79,0.79], [-0.79,2.36], [-0.79,3.14], + [0.0,-3.14], [0.0,-2.36], [0.0,-1.57], [0.0,-0.79], [0.0,0.0], + [0.0,0.79], [0.0,1.57], [0.0,2.36], [0.0,3.14], [0.79,-3.14], + [0.79,-2.36], [0.79,-0.79], [0.79,0.0], [0.79,0.79], + [0.79,2.36], [0.79,3.14], [0.79,-1.57], [0.79,1.57], + [1.57,-3.14], [1.57,-2.36], [1.57,2.36], [1.57,3.14], + [1.57,-1.57], [1.57,0.0], [1.57,1.57], [1.57,-0.79], + [1.57,0.79], [2.36,-3.14], [2.36,-2.36], [2.36,-1.57], + [2.36,-0.79], [2.36,0.0], [2.36,0.79], [2.36,1.57], + [2.36,2.36], [2.36,3.14], [3.14,-3.14], [3.14,-2.36], + [3.14,-1.57], [3.14,-0.79], [3.14,0.0], [3.14,0.79], + [3.14,1.57], [3.14,2.36], [3.14,3.14], +]) + +pathological_data_2 = np.array([ + [-1, -1], [-1, 0], [-1, 1], + [0, -1], [0, 0], [0, 1], + [1, -1 - np.finfo(np.float_).eps], [1, 0], [1, 1], +]) + +bug_2850_chunks = [np.random.rand(10, 2), + np.array([[0,0], [0,1], [1,0], [1,1]]) # add corners + ] + +# same with some additional chunks +bug_2850_chunks_2 = (bug_2850_chunks + + [np.random.rand(10, 2), + 0.25 + np.array([[0,0], [0,1], [1,0], [1,1]])]) + +DATASETS = { + 'some-points': np.asarray(points), + 'random-2d': np.random.rand(30, 2), + 'random-3d': np.random.rand(30, 3), + 'random-4d': np.random.rand(30, 4), + 'random-5d': np.random.rand(30, 5), + 'random-6d': np.random.rand(10, 6), + 'random-7d': np.random.rand(10, 7), + 'random-8d': np.random.rand(10, 8), + 'pathological-1': pathological_data_1, + 'pathological-2': pathological_data_2 +} + +INCREMENTAL_DATASETS = { + 'bug-2850': (bug_2850_chunks, None), + 'bug-2850-2': (bug_2850_chunks_2, None), +} + + +def _add_inc_data(name, chunksize): + """ + Generate incremental datasets from basic data sets + """ + points = DATASETS[name] + ndim = points.shape[1] + + opts = None + nmin = ndim + 2 + + if name == 'some-points': + # since Qz is not allowed, use QJ + opts = 'QJ Pp' + elif name == 'pathological-1': + # include enough points so that we get different x-coordinates + nmin = 12 + + chunks = [points[:nmin]] + for j in xrange(nmin, len(points), chunksize): + chunks.append(points[j:j+chunksize]) + + new_name = "%s-chunk-%d" % (name, chunksize) + assert new_name not in INCREMENTAL_DATASETS + INCREMENTAL_DATASETS[new_name] = (chunks, opts) + + +for name in DATASETS: + for chunksize in 1, 4, 16: + _add_inc_data(name, chunksize) + + +class Test_Qhull(object): + def test_swapping(self): + # Check that Qhull state swapping works + + x = qhull._Qhull(b'v', + np.array([[0,0],[0,1],[1,0],[1,1.],[0.5,0.5]]), + b'Qz') + xd = copy.deepcopy(x.get_voronoi_diagram()) + + y = qhull._Qhull(b'v', + np.array([[0,0],[0,1],[1,0],[1,2.]]), + b'Qz') + yd = copy.deepcopy(y.get_voronoi_diagram()) + + xd2 = copy.deepcopy(x.get_voronoi_diagram()) + x.close() + yd2 = copy.deepcopy(y.get_voronoi_diagram()) + y.close() + + assert_raises(RuntimeError, x.get_voronoi_diagram) + assert_raises(RuntimeError, y.get_voronoi_diagram) + + assert_allclose(xd[0], xd2[0]) + assert_unordered_tuple_list_equal(xd[1], xd2[1], tpl=sorted_tuple) + assert_unordered_tuple_list_equal(xd[2], xd2[2], tpl=sorted_tuple) + assert_unordered_tuple_list_equal(xd[3], xd2[3], tpl=sorted_tuple) + assert_array_equal(xd[4], xd2[4]) + + assert_allclose(yd[0], yd2[0]) + assert_unordered_tuple_list_equal(yd[1], yd2[1], tpl=sorted_tuple) + assert_unordered_tuple_list_equal(yd[2], yd2[2], tpl=sorted_tuple) + assert_unordered_tuple_list_equal(yd[3], yd2[3], tpl=sorted_tuple) + assert_array_equal(yd[4], yd2[4]) + + x.close() + assert_raises(RuntimeError, x.get_voronoi_diagram) + y.close() + assert_raises(RuntimeError, y.get_voronoi_diagram) + + def test_issue_8051(self): + points = np.array([[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2],[2, 0], [2, 1], [2, 2]]) + Voronoi(points) + + +class TestUtilities(object): + """ + Check that utility functions work. + + """ + + def test_find_simplex(self): + # Simple check that simplex finding works + points = np.array([(0,0), (0,1), (1,1), (1,0)], dtype=np.double) + tri = qhull.Delaunay(points) + + # +---+ + # |\ 0| + # | \ | + # |1 \| + # +---+ + + assert_equal(tri.vertices, [[1, 3, 2], [3, 1, 0]]) + + for p in [(0.25, 0.25, 1), + (0.75, 0.75, 0), + (0.3, 0.2, 1)]: + i = tri.find_simplex(p[:2]) + assert_equal(i, p[2], err_msg='%r' % (p,)) + j = qhull.tsearch(tri, p[:2]) + assert_equal(i, j) + + def test_plane_distance(self): + # Compare plane distance from hyperplane equations obtained from Qhull + # to manually computed plane equations + x = np.array([(0,0), (1, 1), (1, 0), (0.99189033, 0.37674127), + (0.99440079, 0.45182168)], dtype=np.double) + p = np.array([0.99966555, 0.15685619], dtype=np.double) + + tri = qhull.Delaunay(x) + + z = tri.lift_points(x) + pz = tri.lift_points(p) + + dist = tri.plane_distance(p) + + for j, v in enumerate(tri.vertices): + x1 = z[v[0]] + x2 = z[v[1]] + x3 = z[v[2]] + + n = np.cross(x1 - x3, x2 - x3) + n /= np.sqrt(np.dot(n, n)) + n *= -np.sign(n[2]) + + d = np.dot(n, pz - x3) + + assert_almost_equal(dist[j], d) + + def test_convex_hull(self): + # Simple check that the convex hull seems to works + points = np.array([(0,0), (0,1), (1,1), (1,0)], dtype=np.double) + tri = qhull.Delaunay(points) + + # +---+ + # |\ 0| + # | \ | + # |1 \| + # +---+ + + assert_equal(tri.convex_hull, [[3, 2], [1, 2], [1, 0], [3, 0]]) + + def test_volume_area(self): + #Basic check that we get back the correct volume and area for a cube + points = np.array([(0, 0, 0), (0, 1, 0), (1, 0, 0), (1, 1, 0), + (0, 0, 1), (0, 1, 1), (1, 0, 1), (1, 1, 1)]) + hull = qhull.ConvexHull(points) + + assert_allclose(hull.volume, 1., rtol=1e-14, + err_msg="Volume of cube is incorrect") + assert_allclose(hull.area, 6., rtol=1e-14, + err_msg="Area of cube is incorrect") + + def test_random_volume_area(self): + #Test that the results for a random 10-point convex are + #coherent with the output of qconvex Qt s FA + points = np.array([(0.362568364506, 0.472712355305, 0.347003084477), + (0.733731893414, 0.634480295684, 0.950513180209), + (0.511239955611, 0.876839441267, 0.418047827863), + (0.0765906233393, 0.527373281342, 0.6509863541), + (0.146694972056, 0.596725793348, 0.894860986685), + (0.513808585741, 0.069576205858, 0.530890338876), + (0.512343805118, 0.663537132612, 0.037689295973), + (0.47282965018, 0.462176697655, 0.14061843691), + (0.240584597123, 0.778660020591, 0.722913476339), + (0.951271745935, 0.967000673944, 0.890661319684)]) + + hull = qhull.ConvexHull(points) + assert_allclose(hull.volume, 0.14562013, rtol=1e-07, + err_msg="Volume of random polyhedron is incorrect") + assert_allclose(hull.area, 1.6670425, rtol=1e-07, + err_msg="Area of random polyhedron is incorrect") + + def test_incremental_volume_area_random_input(self): + """Test that incremental mode gives the same volume/area as + non-incremental mode and incremental mode with restart""" + nr_points = 20 + dim = 3 + points = np.random.random((nr_points, dim)) + inc_hull = qhull.ConvexHull(points[:dim+1, :], incremental=True) + inc_restart_hull = qhull.ConvexHull(points[:dim+1, :], incremental=True) + for i in range(dim+1, nr_points): + hull = qhull.ConvexHull(points[:i+1, :]) + inc_hull.add_points(points[i:i+1, :]) + inc_restart_hull.add_points(points[i:i+1, :], restart=True) + assert_allclose(hull.volume, inc_hull.volume, rtol=1e-7) + assert_allclose(hull.volume, inc_restart_hull.volume, rtol=1e-7) + assert_allclose(hull.area, inc_hull.area, rtol=1e-7) + assert_allclose(hull.area, inc_restart_hull.area, rtol=1e-7) + + def _check_barycentric_transforms(self, tri, err_msg="", + unit_cube=False, + unit_cube_tol=0): + """Check that a triangulation has reasonable barycentric transforms""" + vertices = tri.points[tri.vertices] + sc = 1/(tri.ndim + 1.0) + centroids = vertices.sum(axis=1) * sc + + # Either: (i) the simplex has a `nan` barycentric transform, + # or, (ii) the centroid is in the simplex + + def barycentric_transform(tr, x): + ndim = tr.shape[1] + r = tr[:,-1,:] + Tinv = tr[:,:-1,:] + return np.einsum('ijk,ik->ij', Tinv, x - r) + + eps = np.finfo(float).eps + + c = barycentric_transform(tri.transform, centroids) + olderr = np.seterr(invalid="ignore") + try: + ok = np.isnan(c).all(axis=1) | (abs(c - sc)/sc < 0.1).all(axis=1) + finally: + np.seterr(**olderr) + + assert_(ok.all(), "%s %s" % (err_msg, np.nonzero(~ok))) + + # Invalid simplices must be (nearly) zero volume + q = vertices[:,:-1,:] - vertices[:,-1,None,:] + volume = np.array([np.linalg.det(q[k,:,:]) + for k in range(tri.nsimplex)]) + ok = np.isfinite(tri.transform[:,0,0]) | (volume < np.sqrt(eps)) + assert_(ok.all(), "%s %s" % (err_msg, np.nonzero(~ok))) + + # Also, find_simplex for the centroid should end up in some + # simplex for the non-degenerate cases + j = tri.find_simplex(centroids) + ok = (j != -1) | np.isnan(tri.transform[:,0,0]) + assert_(ok.all(), "%s %s" % (err_msg, np.nonzero(~ok))) + + if unit_cube: + # If in unit cube, no interior point should be marked out of hull + at_boundary = (centroids <= unit_cube_tol).any(axis=1) + at_boundary |= (centroids >= 1 - unit_cube_tol).any(axis=1) + + ok = (j != -1) | at_boundary + assert_(ok.all(), "%s %s" % (err_msg, np.nonzero(~ok))) + + def test_degenerate_barycentric_transforms(self): + # The triangulation should not produce invalid barycentric + # transforms that stump the simplex finding + data = np.load(os.path.join(os.path.dirname(__file__), 'data', + 'degenerate_pointset.npz')) + points = data['c'] + data.close() + + tri = qhull.Delaunay(points) + + # Check that there are not too many invalid simplices + bad_count = np.isnan(tri.transform[:,0,0]).sum() + assert_(bad_count < 21, bad_count) + + # Check the transforms + self._check_barycentric_transforms(tri) + + @pytest.mark.slow + def test_more_barycentric_transforms(self): + # Triangulate some "nasty" grids + + eps = np.finfo(float).eps + + npoints = {2: 70, 3: 11, 4: 5, 5: 3} + + _is_32bit_platform = np.intp(0).itemsize < 8 + for ndim in xrange(2, 6): + # Generate an uniform grid in n-d unit cube + x = np.linspace(0, 1, npoints[ndim]) + grid = np.c_[list(map(np.ravel, np.broadcast_arrays(*np.ix_(*([x]*ndim)))))].T + + err_msg = "ndim=%d" % ndim + + # Check using regular grid + tri = qhull.Delaunay(grid) + self._check_barycentric_transforms(tri, err_msg=err_msg, + unit_cube=True) + + # Check with eps-perturbations + np.random.seed(1234) + m = (np.random.rand(grid.shape[0]) < 0.2) + grid[m,:] += 2*eps*(np.random.rand(*grid[m,:].shape) - 0.5) + + tri = qhull.Delaunay(grid) + self._check_barycentric_transforms(tri, err_msg=err_msg, + unit_cube=True, + unit_cube_tol=2*eps) + + # Check with duplicated data + tri = qhull.Delaunay(np.r_[grid, grid]) + self._check_barycentric_transforms(tri, err_msg=err_msg, + unit_cube=True, + unit_cube_tol=2*eps) + + if not _is_32bit_platform: + # test numerically unstable, and reported to fail on 32-bit + # installs + + # Check with larger perturbations + np.random.seed(4321) + m = (np.random.rand(grid.shape[0]) < 0.2) + grid[m,:] += 1000*eps*(np.random.rand(*grid[m,:].shape) - 0.5) + + tri = qhull.Delaunay(grid) + self._check_barycentric_transforms(tri, err_msg=err_msg, + unit_cube=True, + unit_cube_tol=1500*eps) + + # Check with yet larger perturbations + np.random.seed(4321) + m = (np.random.rand(grid.shape[0]) < 0.2) + grid[m,:] += 1e6*eps*(np.random.rand(*grid[m,:].shape) - 0.5) + + tri = qhull.Delaunay(grid) + self._check_barycentric_transforms(tri, err_msg=err_msg, + unit_cube=True, + unit_cube_tol=1e7*eps) + + +class TestVertexNeighborVertices(object): + def _check(self, tri): + expected = [set() for j in range(tri.points.shape[0])] + for s in tri.simplices: + for a in s: + for b in s: + if a != b: + expected[a].add(b) + + indptr, indices = tri.vertex_neighbor_vertices + + got = [] + for j in range(tri.points.shape[0]): + got.append(set(map(int, indices[indptr[j]:indptr[j+1]]))) + + assert_equal(got, expected, err_msg="%r != %r" % (got, expected)) + + def test_triangle(self): + points = np.array([(0,0), (0,1), (1,0)], dtype=np.double) + tri = qhull.Delaunay(points) + self._check(tri) + + def test_rectangle(self): + points = np.array([(0,0), (0,1), (1,1), (1,0)], dtype=np.double) + tri = qhull.Delaunay(points) + self._check(tri) + + def test_complicated(self): + points = np.array([(0,0), (0,1), (1,1), (1,0), + (0.5, 0.5), (0.9, 0.5)], dtype=np.double) + tri = qhull.Delaunay(points) + self._check(tri) + + +class TestDelaunay(object): + """ + Check that triangulation works. + + """ + def test_masked_array_fails(self): + masked_array = np.ma.masked_all(1) + assert_raises(ValueError, qhull.Delaunay, masked_array) + + def test_array_with_nans_fails(self): + points_with_nan = np.array([(0,0), (0,1), (1,1), (1,np.nan)], dtype=np.double) + assert_raises(ValueError, qhull.Delaunay, points_with_nan) + + def test_nd_simplex(self): + # simple smoke test: triangulate a n-dimensional simplex + for nd in xrange(2, 8): + points = np.zeros((nd+1, nd)) + for j in xrange(nd): + points[j,j] = 1.0 + points[-1,:] = 1.0 + + tri = qhull.Delaunay(points) + + tri.vertices.sort() + + assert_equal(tri.vertices, np.arange(nd+1, dtype=int)[None,:]) + assert_equal(tri.neighbors, -1 + np.zeros((nd+1), dtype=int)[None,:]) + + def test_2d_square(self): + # simple smoke test: 2d square + points = np.array([(0,0), (0,1), (1,1), (1,0)], dtype=np.double) + tri = qhull.Delaunay(points) + + assert_equal(tri.vertices, [[1, 3, 2], [3, 1, 0]]) + assert_equal(tri.neighbors, [[-1, -1, 1], [-1, -1, 0]]) + + def test_duplicate_points(self): + x = np.array([0, 1, 0, 1], dtype=np.float64) + y = np.array([0, 0, 1, 1], dtype=np.float64) + + xp = np.r_[x, x] + yp = np.r_[y, y] + + # shouldn't fail on duplicate points + tri = qhull.Delaunay(np.c_[x, y]) + tri2 = qhull.Delaunay(np.c_[xp, yp]) + + def test_pathological(self): + # both should succeed + points = DATASETS['pathological-1'] + tri = qhull.Delaunay(points) + assert_equal(tri.points[tri.vertices].max(), points.max()) + assert_equal(tri.points[tri.vertices].min(), points.min()) + + points = DATASETS['pathological-2'] + tri = qhull.Delaunay(points) + assert_equal(tri.points[tri.vertices].max(), points.max()) + assert_equal(tri.points[tri.vertices].min(), points.min()) + + def test_joggle(self): + # Check that the option QJ indeed guarantees that all input points + # occur as vertices of the triangulation + + points = np.random.rand(10, 2) + points = np.r_[points, points] # duplicate input data + + tri = qhull.Delaunay(points, qhull_options="QJ Qbb Pp") + assert_array_equal(np.unique(tri.simplices.ravel()), + np.arange(len(points))) + + def test_coplanar(self): + # Check that the coplanar point output option indeed works + points = np.random.rand(10, 2) + points = np.r_[points, points] # duplicate input data + + tri = qhull.Delaunay(points) + + assert_(len(np.unique(tri.simplices.ravel())) == len(points)//2) + assert_(len(tri.coplanar) == len(points)//2) + + assert_(len(np.unique(tri.coplanar[:,2])) == len(points)//2) + + assert_(np.all(tri.vertex_to_simplex >= 0)) + + def test_furthest_site(self): + points = [(0, 0), (0, 1), (1, 0), (0.5, 0.5), (1.1, 1.1)] + tri = qhull.Delaunay(points, furthest_site=True) + + expected = np.array([(1, 4, 0), (4, 2, 0)]) # from Qhull + assert_array_equal(tri.simplices, expected) + + @pytest.mark.parametrize("name", sorted(INCREMENTAL_DATASETS)) + def test_incremental(self, name): + # Test incremental construction of the triangulation + + chunks, opts = INCREMENTAL_DATASETS[name] + points = np.concatenate(chunks, axis=0) + + obj = qhull.Delaunay(chunks[0], incremental=True, + qhull_options=opts) + for chunk in chunks[1:]: + obj.add_points(chunk) + + obj2 = qhull.Delaunay(points) + + obj3 = qhull.Delaunay(chunks[0], incremental=True, + qhull_options=opts) + if len(chunks) > 1: + obj3.add_points(np.concatenate(chunks[1:], axis=0), + restart=True) + + # Check that the incremental mode agrees with upfront mode + if name.startswith('pathological'): + # XXX: These produce valid but different triangulations. + # They look OK when plotted, but how to check them? + + assert_array_equal(np.unique(obj.simplices.ravel()), + np.arange(points.shape[0])) + assert_array_equal(np.unique(obj2.simplices.ravel()), + np.arange(points.shape[0])) + else: + assert_unordered_tuple_list_equal(obj.simplices, obj2.simplices, + tpl=sorted_tuple) + + assert_unordered_tuple_list_equal(obj2.simplices, obj3.simplices, + tpl=sorted_tuple) + + +def assert_hulls_equal(points, facets_1, facets_2): + # Check that two convex hulls constructed from the same point set + # are equal + + facets_1 = set(map(sorted_tuple, facets_1)) + facets_2 = set(map(sorted_tuple, facets_2)) + + if facets_1 != facets_2 and points.shape[1] == 2: + # The direct check fails for the pathological cases + # --- then the convex hull from Delaunay differs (due + # to rounding error etc.) from the hull computed + # otherwise, by the question whether (tricoplanar) + # points that lie almost exactly on the hull are + # included as vertices of the hull or not. + # + # So we check the result, and accept it if the Delaunay + # hull line segments are a subset of the usual hull. + + eps = 1000 * np.finfo(float).eps + + for a, b in facets_1: + for ap, bp in facets_2: + t = points[bp] - points[ap] + t /= np.linalg.norm(t) # tangent + n = np.array([-t[1], t[0]]) # normal + + # check that the two line segments are parallel + # to the same line + c1 = np.dot(n, points[b] - points[ap]) + c2 = np.dot(n, points[a] - points[ap]) + if not np.allclose(np.dot(c1, n), 0): + continue + if not np.allclose(np.dot(c2, n), 0): + continue + + # Check that the segment (a, b) is contained in (ap, bp) + c1 = np.dot(t, points[a] - points[ap]) + c2 = np.dot(t, points[b] - points[ap]) + c3 = np.dot(t, points[bp] - points[ap]) + if c1 < -eps or c1 > c3 + eps: + continue + if c2 < -eps or c2 > c3 + eps: + continue + + # OK: + break + else: + raise AssertionError("comparison fails") + + # it was OK + return + + assert_equal(facets_1, facets_2) + + +class TestConvexHull: + def test_masked_array_fails(self): + masked_array = np.ma.masked_all(1) + assert_raises(ValueError, qhull.ConvexHull, masked_array) + + def test_array_with_nans_fails(self): + points_with_nan = np.array([(0,0), (1,1), (2,np.nan)], dtype=np.double) + assert_raises(ValueError, qhull.ConvexHull, points_with_nan) + + @pytest.mark.parametrize("name", sorted(DATASETS)) + def test_hull_consistency_tri(self, name): + # Check that a convex hull returned by qhull in ndim + # and the hull constructed from ndim delaunay agree + points = DATASETS[name] + + tri = qhull.Delaunay(points) + hull = qhull.ConvexHull(points) + + assert_hulls_equal(points, tri.convex_hull, hull.simplices) + + # Check that the hull extremes are as expected + if points.shape[1] == 2: + assert_equal(np.unique(hull.simplices), np.sort(hull.vertices)) + else: + assert_equal(np.unique(hull.simplices), hull.vertices) + + @pytest.mark.parametrize("name", sorted(INCREMENTAL_DATASETS)) + def test_incremental(self, name): + # Test incremental construction of the convex hull + chunks, _ = INCREMENTAL_DATASETS[name] + points = np.concatenate(chunks, axis=0) + + obj = qhull.ConvexHull(chunks[0], incremental=True) + for chunk in chunks[1:]: + obj.add_points(chunk) + + obj2 = qhull.ConvexHull(points) + + obj3 = qhull.ConvexHull(chunks[0], incremental=True) + if len(chunks) > 1: + obj3.add_points(np.concatenate(chunks[1:], axis=0), + restart=True) + + # Check that the incremental mode agrees with upfront mode + assert_hulls_equal(points, obj.simplices, obj2.simplices) + assert_hulls_equal(points, obj.simplices, obj3.simplices) + + def test_vertices_2d(self): + # The vertices should be in counterclockwise order in 2-D + np.random.seed(1234) + points = np.random.rand(30, 2) + + hull = qhull.ConvexHull(points) + assert_equal(np.unique(hull.simplices), np.sort(hull.vertices)) + + # Check counterclockwiseness + x, y = hull.points[hull.vertices].T + angle = np.arctan2(y - y.mean(), x - x.mean()) + assert_(np.all(np.diff(np.unwrap(angle)) > 0)) + + def test_volume_area(self): + # Basic check that we get back the correct volume and area for a cube + points = np.array([(0, 0, 0), (0, 1, 0), (1, 0, 0), (1, 1, 0), + (0, 0, 1), (0, 1, 1), (1, 0, 1), (1, 1, 1)]) + tri = qhull.ConvexHull(points) + + assert_allclose(tri.volume, 1., rtol=1e-14) + assert_allclose(tri.area, 6., rtol=1e-14) + + +class TestVoronoi: + def test_masked_array_fails(self): + masked_array = np.ma.masked_all(1) + assert_raises(ValueError, qhull.Voronoi, masked_array) + + def test_simple(self): + # Simple case with known Voronoi diagram + points = [(0, 0), (0, 1), (0, 2), + (1, 0), (1, 1), (1, 2), + (2, 0), (2, 1), (2, 2)] + + # qhull v o Fv Qbb Qc Qz < dat + output = """ + 2 + 5 10 1 + -10.101 -10.101 + 0.5 0.5 + 1.5 0.5 + 0.5 1.5 + 1.5 1.5 + 2 0 1 + 3 3 0 1 + 2 0 3 + 3 2 0 1 + 4 4 3 1 2 + 3 4 0 3 + 2 0 2 + 3 4 0 2 + 2 0 4 + 0 + 12 + 4 0 3 0 1 + 4 0 1 0 1 + 4 1 4 1 3 + 4 1 2 0 3 + 4 2 5 0 3 + 4 3 4 1 2 + 4 3 6 0 2 + 4 4 5 3 4 + 4 4 7 2 4 + 4 5 8 0 4 + 4 6 7 0 2 + 4 7 8 0 4 + """ + self._compare_qvoronoi(points, output) + + def _compare_qvoronoi(self, points, output, **kw): + """Compare to output from 'qvoronoi o Fv < data' to Voronoi()""" + + # Parse output + output = [list(map(float, x.split())) for x in output.strip().splitlines()] + nvertex = int(output[1][0]) + vertices = list(map(tuple, output[3:2+nvertex])) # exclude inf + nregion = int(output[1][1]) + regions = [[int(y)-1 for y in x[1:]] + for x in output[2+nvertex:2+nvertex+nregion]] + nridge = int(output[2+nvertex+nregion][0]) + ridge_points = [[int(y) for y in x[1:3]] + for x in output[3+nvertex+nregion:]] + ridge_vertices = [[int(y)-1 for y in x[3:]] + for x in output[3+nvertex+nregion:]] + + # Compare results + vor = qhull.Voronoi(points, **kw) + + def sorttuple(x): + return tuple(sorted(x)) + + assert_allclose(vor.vertices, vertices) + assert_equal(set(map(tuple, vor.regions)), + set(map(tuple, regions))) + + p1 = list(zip(list(map(sorttuple, ridge_points)), list(map(sorttuple, ridge_vertices)))) + p2 = list(zip(list(map(sorttuple, vor.ridge_points.tolist())), + list(map(sorttuple, vor.ridge_vertices)))) + p1.sort() + p2.sort() + + assert_equal(p1, p2) + + @pytest.mark.parametrize("name", sorted(DATASETS)) + def test_ridges(self, name): + # Check that the ridges computed by Voronoi indeed separate + # the regions of nearest neighborhood, by comparing the result + # to KDTree. + + points = DATASETS[name] + + tree = KDTree(points) + vor = qhull.Voronoi(points) + + for p, v in vor.ridge_dict.items(): + # consider only finite ridges + if not np.all(np.asarray(v) >= 0): + continue + + ridge_midpoint = vor.vertices[v].mean(axis=0) + d = 1e-6 * (points[p[0]] - ridge_midpoint) + + dist, k = tree.query(ridge_midpoint + d, k=1) + assert_equal(k, p[0]) + + dist, k = tree.query(ridge_midpoint - d, k=1) + assert_equal(k, p[1]) + + def test_furthest_site(self): + points = [(0, 0), (0, 1), (1, 0), (0.5, 0.5), (1.1, 1.1)] + + # qhull v o Fv Qbb Qc Qu < dat + output = """ + 2 + 3 5 1 + -10.101 -10.101 + 0.6000000000000001 0.5 + 0.5 0.6000000000000001 + 3 0 1 2 + 2 0 1 + 2 0 2 + 0 + 3 0 1 2 + 5 + 4 0 2 0 2 + 4 0 1 0 1 + 4 0 4 1 2 + 4 1 4 0 1 + 4 2 4 0 2 + """ + self._compare_qvoronoi(points, output, furthest_site=True) + + @pytest.mark.parametrize("name", sorted(INCREMENTAL_DATASETS)) + def test_incremental(self, name): + # Test incremental construction of the triangulation + + if INCREMENTAL_DATASETS[name][0][0].shape[1] > 3: + # too slow (testing of the result --- qhull is still fast) + return + + chunks, opts = INCREMENTAL_DATASETS[name] + points = np.concatenate(chunks, axis=0) + + obj = qhull.Voronoi(chunks[0], incremental=True, + qhull_options=opts) + for chunk in chunks[1:]: + obj.add_points(chunk) + + obj2 = qhull.Voronoi(points) + + obj3 = qhull.Voronoi(chunks[0], incremental=True, + qhull_options=opts) + if len(chunks) > 1: + obj3.add_points(np.concatenate(chunks[1:], axis=0), + restart=True) + + # -- Check that the incremental mode agrees with upfront mode + assert_equal(len(obj.point_region), len(obj2.point_region)) + assert_equal(len(obj.point_region), len(obj3.point_region)) + + # The vertices may be in different order or duplicated in + # the incremental map + for objx in obj, obj3: + vertex_map = {-1: -1} + for i, v in enumerate(objx.vertices): + for j, v2 in enumerate(obj2.vertices): + if np.allclose(v, v2): + vertex_map[i] = j + + def remap(x): + if hasattr(x, '__len__'): + return tuple(set([remap(y) for y in x])) + try: + return vertex_map[x] + except KeyError: + raise AssertionError("incremental result has spurious vertex at %r" + % (objx.vertices[x],)) + + def simplified(x): + items = set(map(sorted_tuple, x)) + if () in items: + items.remove(()) + items = [x for x in items if len(x) > 1] + items.sort() + return items + + assert_equal( + simplified(remap(objx.regions)), + simplified(obj2.regions) + ) + assert_equal( + simplified(remap(objx.ridge_vertices)), + simplified(obj2.ridge_vertices) + ) + + # XXX: compare ridge_points --- not clear exactly how to do this + + +class Test_HalfspaceIntersection(object): + def assert_unordered_allclose(self, arr1, arr2, rtol=1e-7): + """Check that every line in arr1 is only once in arr2""" + assert_equal(arr1.shape, arr2.shape) + + truths = np.zeros((arr1.shape[0],), dtype=bool) + for l1 in arr1: + indexes = np.nonzero((abs(arr2 - l1) < rtol).all(axis=1))[0] + assert_equal(indexes.shape, (1,)) + truths[indexes[0]] = True + assert_(truths.all()) + + def test_cube_halfspace_intersection(self): + halfspaces = np.array([[-1.0, 0.0, 0.0], + [0.0, -1.0, 0.0], + [1.0, 0.0, -1.0], + [0.0, 1.0, -1.0]]) + feasible_point = np.array([0.5, 0.5]) + + points = np.array([[0.0, 1.0], [1.0, 1.0], [0.0, 0.0], [1.0, 0.0]]) + + hull = qhull.HalfspaceIntersection(halfspaces, feasible_point) + + assert_allclose(points, hull.intersections) + + def test_self_dual_polytope_intersection(self): + fname = os.path.join(os.path.dirname(__file__), 'data', + 'selfdual-4d-polytope.txt') + ineqs = np.genfromtxt(fname) + halfspaces = -np.hstack((ineqs[:, 1:], ineqs[:, :1])) + + feas_point = np.array([0., 0., 0., 0.]) + hs = qhull.HalfspaceIntersection(halfspaces, feas_point) + + assert_equal(hs.intersections.shape, (24, 4)) + + assert_almost_equal(hs.dual_volume, 32.0) + assert_equal(len(hs.dual_facets), 24) + for facet in hs.dual_facets: + assert_equal(len(facet), 6) + + dists = halfspaces[:, -1] + halfspaces[:, :-1].dot(feas_point) + self.assert_unordered_allclose((halfspaces[:, :-1].T/dists).T, hs.dual_points) + + points = itertools.permutations([0., 0., 0.5, -0.5]) + for point in points: + assert_equal(np.sum((hs.intersections == point).all(axis=1)), 1) + + def test_wrong_feasible_point(self): + halfspaces = np.array([[-1.0, 0.0, 0.0], + [0.0, -1.0, 0.0], + [1.0, 0.0, -1.0], + [0.0, 1.0, -1.0]]) + feasible_point = np.array([0.5, 0.5, 0.5]) + #Feasible point is (ndim,) instead of (ndim-1,) + assert_raises(ValueError, qhull.HalfspaceIntersection, halfspaces, feasible_point) + feasible_point = np.array([[0.5], [0.5]]) + #Feasible point is (ndim-1, 1) instead of (ndim-1,) + assert_raises(ValueError, qhull.HalfspaceIntersection, halfspaces, feasible_point) + feasible_point = np.array([[0.5, 0.5]]) + #Feasible point is (1, ndim-1) instead of (ndim-1,) + assert_raises(ValueError, qhull.HalfspaceIntersection, halfspaces, feasible_point) + + feasible_point = np.array([-0.5, -0.5]) + #Feasible point is outside feasible region + assert_raises(qhull.QhullError, qhull.HalfspaceIntersection, halfspaces, feasible_point) + + def test_incremental(self): + #Cube + halfspaces = np.array([[0., 0., -1., -0.5], + [0., -1., 0., -0.5], + [-1., 0., 0., -0.5], + [1., 0., 0., -0.5], + [0., 1., 0., -0.5], + [0., 0., 1., -0.5]]) + #Cut each summit + extra_normals = np.array([[1., 1., 1.], + [1., 1., -1.], + [1., -1., 1.], + [1, -1., -1.]]) + offsets = np.array([[-1.]]*8) + extra_halfspaces = np.hstack((np.vstack((extra_normals, -extra_normals)), + offsets)) + + feas_point = np.array([0., 0., 0.]) + + inc_hs = qhull.HalfspaceIntersection(halfspaces, feas_point, incremental=True) + + inc_res_hs = qhull.HalfspaceIntersection(halfspaces, feas_point, incremental=True) + + for i, ehs in enumerate(extra_halfspaces): + inc_hs.add_halfspaces(ehs[np.newaxis, :]) + + inc_res_hs.add_halfspaces(ehs[np.newaxis, :], restart=True) + + total = np.vstack((halfspaces, extra_halfspaces[:i+1, :])) + + hs = qhull.HalfspaceIntersection(total, feas_point) + + assert_allclose(inc_hs.halfspaces, inc_res_hs.halfspaces) + assert_allclose(inc_hs.halfspaces, hs.halfspaces) + + #Direct computation and restart should have points in same order + assert_allclose(hs.intersections, inc_res_hs.intersections) + #Incremental will have points in different order than direct computation + self.assert_unordered_allclose(inc_hs.intersections, hs.intersections) + + inc_hs.close() diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/test_qhull.pyc b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/test_qhull.pyc new file mode 100644 index 0000000..4d4b40d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/test_qhull.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/test_spherical_voronoi.py b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/test_spherical_voronoi.py new file mode 100644 index 0000000..3bc6173 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/test_spherical_voronoi.py @@ -0,0 +1,166 @@ +from __future__ import print_function +import numpy as np +import itertools +from numpy.testing import (assert_equal, + assert_almost_equal, + assert_array_equal, + assert_array_almost_equal) +from pytest import raises as assert_raises +from scipy.spatial import SphericalVoronoi, distance +from scipy.spatial import _spherical_voronoi as spherical_voronoi + + +class TestCircumcenters(object): + + def test_circumcenters(self): + tetrahedrons = np.array([ + [[1, 2, 3], + [-1.1, -2.1, -3.1], + [-1.2, 2.2, 3.2], + [-1.3, -2.3, 3.3]], + [[10, 20, 30], + [-10.1, -20.1, -30.1], + [-10.2, 20.2, 30.2], + [-10.3, -20.3, 30.3]] + ]) + + result = spherical_voronoi.calc_circumcenters(tetrahedrons) + + expected = [ + [-0.5680861153262529, -0.133279590288315, 0.1843323216995444], + [-0.5965330784014926, -0.1480377040397778, 0.1981967854886021] + ] + + assert_array_almost_equal(result, expected) + + +class TestProjectToSphere(object): + + def test_unit_sphere(self): + points = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + center = np.array([0, 0, 0]) + radius = 1 + projected = spherical_voronoi.project_to_sphere(points, center, radius) + assert_array_almost_equal(points, projected) + + def test_scaled_points(self): + points = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + center = np.array([0, 0, 0]) + radius = 1 + scaled = points * 2 + projected = spherical_voronoi.project_to_sphere(scaled, center, radius) + assert_array_almost_equal(points, projected) + + def test_translated_sphere(self): + points = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + center = np.array([1, 2, 3]) + translated = points + center + radius = 1 + projected = spherical_voronoi.project_to_sphere(translated, center, + radius) + assert_array_almost_equal(translated, projected) + + +class TestSphericalVoronoi(object): + + def setup_method(self): + self.points = np.array([ + [-0.78928481, -0.16341094, 0.59188373], + [-0.66839141, 0.73309634, 0.12578818], + [0.32535778, -0.92476944, -0.19734181], + [-0.90177102, -0.03785291, -0.43055335], + [0.71781344, 0.68428936, 0.12842096], + [-0.96064876, 0.23492353, -0.14820556], + [0.73181537, -0.22025898, -0.6449281], + [0.79979205, 0.54555747, 0.25039913]] + ) + + def test_constructor(self): + center = np.array([1, 2, 3]) + radius = 2 + s1 = SphericalVoronoi(self.points) + # user input checks in SphericalVoronoi now require + # the radius / center to match the generators so adjust + # accordingly here + s2 = SphericalVoronoi(self.points * radius, radius) + s3 = SphericalVoronoi(self.points + center, None, center) + s4 = SphericalVoronoi(self.points * radius + center, radius, center) + assert_array_equal(s1.center, np.array([0, 0, 0])) + assert_equal(s1.radius, 1) + assert_array_equal(s2.center, np.array([0, 0, 0])) + assert_equal(s2.radius, 2) + assert_array_equal(s3.center, center) + assert_equal(s3.radius, 1) + assert_array_equal(s4.center, center) + assert_equal(s4.radius, radius) + + def test_vertices_regions_translation_invariance(self): + sv_origin = SphericalVoronoi(self.points) + center = np.array([1, 1, 1]) + sv_translated = SphericalVoronoi(self.points + center, None, center) + assert_array_equal(sv_origin.regions, sv_translated.regions) + assert_array_almost_equal(sv_origin.vertices + center, + sv_translated.vertices) + + def test_vertices_regions_scaling_invariance(self): + sv_unit = SphericalVoronoi(self.points) + sv_scaled = SphericalVoronoi(self.points * 2, 2) + assert_array_equal(sv_unit.regions, sv_scaled.regions) + assert_array_almost_equal(sv_unit.vertices * 2, + sv_scaled.vertices) + + def test_sort_vertices_of_regions(self): + sv = SphericalVoronoi(self.points) + unsorted_regions = sv.regions + sv.sort_vertices_of_regions() + assert_array_equal(sorted(sv.regions), sorted(unsorted_regions)) + + def test_sort_vertices_of_regions_flattened(self): + expected = sorted([[0, 6, 5, 2, 3], [2, 3, 10, 11, 8, 7], [0, 6, 4, 1], [4, 8, + 7, 5, 6], [9, 11, 10], [2, 7, 5], [1, 4, 8, 11, 9], [0, 3, 10, 9, + 1]]) + expected = list(itertools.chain(*sorted(expected))) + sv = SphericalVoronoi(self.points) + sv.sort_vertices_of_regions() + actual = list(itertools.chain(*sorted(sv.regions))) + assert_array_equal(actual, expected) + + def test_num_vertices(self): + # for any n >= 3, a spherical Voronoi diagram has 2n - 4 + # vertices; this is a direct consequence of Euler's formula + # as explained by Dinis and Mamede (2010) Proceedings of the + # 2010 International Symposium on Voronoi Diagrams in Science + # and Engineering + sv = SphericalVoronoi(self.points) + expected = self.points.shape[0] * 2 - 4 + actual = sv.vertices.shape[0] + assert_equal(actual, expected) + + def test_voronoi_circles(self): + sv = spherical_voronoi.SphericalVoronoi(self.points) + for vertex in sv.vertices: + distances = distance.cdist(sv.points,np.array([vertex])) + closest = np.array(sorted(distances)[0:3]) + assert_almost_equal(closest[0], closest[1], 7, str(vertex)) + assert_almost_equal(closest[0], closest[2], 7, str(vertex)) + + def test_duplicate_point_handling(self): + # an exception should be raised for degenerate generators + # related to Issue# 7046 + self.degenerate = np.concatenate((self.points, self.points)) + with assert_raises(ValueError): + sv = spherical_voronoi.SphericalVoronoi(self.degenerate) + + def test_incorrect_radius_handling(self): + # an exception should be raised if the radius provided + # cannot possibly match the input generators + with assert_raises(ValueError): + sv = spherical_voronoi.SphericalVoronoi(self.points, + radius=0.98) + + def test_incorrect_center_handling(self): + # an exception should be raised if the center provided + # cannot possibly match the input generators + with assert_raises(ValueError): + sv = spherical_voronoi.SphericalVoronoi(self.points, + center=[0.1,0,0]) diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/test_spherical_voronoi.pyc b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/test_spherical_voronoi.pyc new file mode 100644 index 0000000..5382601 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/spatial/tests/test_spherical_voronoi.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/transform/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/spatial/transform/__init__.py new file mode 100644 index 0000000..5d4e320 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/transform/__init__.py @@ -0,0 +1,28 @@ +""" +======================================================== +Spatial Transformations (:mod:`scipy.spatial.transform`) +======================================================== + +.. currentmodule:: scipy.spatial.transform + +This package implements various spatial transformations. For now, +only rotations are supported. + +Rotations in 3 dimensions +========================= +.. autosummary:: + :toctree: generated/ + + Rotation + Slerp + +""" +from __future__ import division, print_function, absolute_import + +from .rotation import Rotation, Slerp + +__all__ = ['Rotation', 'Slerp'] + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/transform/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/spatial/transform/__init__.pyc new file mode 100644 index 0000000..32bd63e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/spatial/transform/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/transform/rotation.py b/project/venv/lib/python2.7/site-packages/scipy/spatial/transform/rotation.py new file mode 100644 index 0000000..d3df924 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/transform/rotation.py @@ -0,0 +1,1765 @@ +from __future__ import division, print_function, absolute_import + +import re +import warnings +import numpy as np +import scipy.linalg +from scipy._lib._util import check_random_state + + +_AXIS_TO_IND = {'x': 0, 'y': 1, 'z': 2} + + +def _elementary_basis_vector(axis): + b = np.zeros(3) + b[_AXIS_TO_IND[axis]] = 1 + return b + + +def _compute_euler_from_dcm(dcm, seq, extrinsic=False): + # The algorithm assumes intrinsic frame transformations. For representation + # the paper uses transformation matrices, which are transpose of the + # direction cosine matrices used by our Rotation class. + # Adapt the algorithm for our case by + # 1. Instead of transposing our representation, use the transpose of the + # O matrix as defined in the paper, and be careful to swap indices + # 2. Reversing both axis sequence and angles for extrinsic rotations + + if extrinsic: + seq = seq[::-1] + + if dcm.ndim == 2: + dcm = dcm[None, :, :] + num_rotations = dcm.shape[0] + + # Step 0 + # Algorithm assumes axes as column vectors, here we use 1D vectors + n1 = _elementary_basis_vector(seq[0]) + n2 = _elementary_basis_vector(seq[1]) + n3 = _elementary_basis_vector(seq[2]) + + # Step 2 + sl = np.dot(np.cross(n1, n2), n3) + cl = np.dot(n1, n3) + + # angle offset is lambda from the paper referenced in [2] from docstring of + # `as_euler` function + offset = np.arctan2(sl, cl) + c = np.vstack((n2, np.cross(n1, n2), n1)) + + # Step 3 + rot = np.array([ + [1, 0, 0], + [0, cl, sl], + [0, -sl, cl], + ]) + res = np.einsum('...ij,...jk->...ik', c, dcm) + dcm_transformed = np.einsum('...ij,...jk->...ik', res, c.T.dot(rot)) + + # Step 4 + angles = np.empty((num_rotations, 3)) + # Ensure less than unit norm + positive_unity = dcm_transformed[:, 2, 2] > 1 + negative_unity = dcm_transformed[:, 2, 2] < -1 + dcm_transformed[positive_unity, 2, 2] = 1 + dcm_transformed[negative_unity, 2, 2] = -1 + angles[:, 1] = np.arccos(dcm_transformed[:, 2, 2]) + + # Steps 5, 6 + eps = 1e-7 + safe1 = (np.abs(angles[:, 1]) >= eps) + safe2 = (np.abs(angles[:, 1] - np.pi) >= eps) + + # Step 4 (Completion) + angles[:, 1] += offset + + # 5b + safe_mask = np.logical_and(safe1, safe2) + angles[safe_mask, 0] = np.arctan2(dcm_transformed[safe_mask, 0, 2], + -dcm_transformed[safe_mask, 1, 2]) + angles[safe_mask, 2] = np.arctan2(dcm_transformed[safe_mask, 2, 0], + dcm_transformed[safe_mask, 2, 1]) + + if extrinsic: + # For extrinsic, set first angle to zero so that after reversal we + # ensure that third angle is zero + # 6a + angles[~safe_mask, 0] = 0 + # 6b + angles[~safe1, 2] = np.arctan2( + dcm_transformed[~safe1, 1, 0] - dcm_transformed[~safe1, 0, 1], + dcm_transformed[~safe1, 0, 0] + dcm_transformed[~safe1, 1, 1] + ) + # 6c + angles[~safe2, 2] = -np.arctan2( + dcm_transformed[~safe2, 1, 0] + dcm_transformed[~safe2, 0, 1], + dcm_transformed[~safe2, 0, 0] - dcm_transformed[~safe2, 1, 1] + ) + else: + # For instrinsic, set third angle to zero + # 6a + angles[~safe_mask, 2] = 0 + # 6b + angles[~safe1, 0] = np.arctan2( + dcm_transformed[~safe1, 1, 0] - dcm_transformed[~safe1, 0, 1], + dcm_transformed[~safe1, 0, 0] + dcm_transformed[~safe1, 1, 1] + ) + # 6c + angles[~safe2, 0] = np.arctan2( + dcm_transformed[~safe2, 1, 0] + dcm_transformed[~safe2, 0, 1], + dcm_transformed[~safe2, 0, 0] - dcm_transformed[~safe2, 1, 1] + ) + + # Step 7 + if seq[0] == seq[2]: + # lambda = 0, so we can only ensure angle2 -> [0, pi] + adjust_mask = np.logical_or(angles[:, 1] < 0, angles[:, 1] > np.pi) + else: + # lambda = + or - pi/2, so we can ensure angle2 -> [-pi/2, pi/2] + adjust_mask = np.logical_or(angles[:, 1] < -np.pi / 2, + angles[:, 1] > np.pi / 2) + + # Dont adjust gimbal locked angle sequences + adjust_mask = np.logical_and(adjust_mask, safe_mask) + + angles[adjust_mask, 0] += np.pi + angles[adjust_mask, 1] = 2 * offset - angles[adjust_mask, 1] + angles[adjust_mask, 2] -= np.pi + + angles[angles < -np.pi] += 2 * np.pi + angles[angles > np.pi] -= 2 * np.pi + + # Step 8 + if not np.all(safe_mask): + warnings.warn("Gimbal lock detected. Setting third angle to zero since" + " it is not possible to uniquely determine all angles.") + + # Reverse role of extrinsic and intrinsic rotations, but let third angle be + # zero for gimbal locked cases + if extrinsic: + angles = angles[:, ::-1] + return angles + + +def _make_elementary_quat(axis, angles): + quat = np.zeros((angles.shape[0], 4)) + + quat[:, 3] = np.cos(angles / 2) + quat[:, _AXIS_TO_IND[axis]] = np.sin(angles / 2) + return quat + + +def _compose_quat(p, q): + product = np.empty((max(p.shape[0], q.shape[0]), 4)) + product[:, 3] = p[:, 3] * q[:, 3] - np.sum(p[:, :3] * q[:, :3], axis=1) + product[:, :3] = (p[:, None, 3] * q[:, :3] + q[:, None, 3] * p[:, :3] + + np.cross(p[:, :3], q[:, :3])) + return product + + +def _elementary_quat_compose(seq, angles, intrinsic=False): + result = _make_elementary_quat(seq[0], angles[:, 0]) + + for idx, axis in enumerate(seq[1:], start=1): + if intrinsic: + result = _compose_quat( + result, + _make_elementary_quat(axis, angles[:, idx])) + else: + result = _compose_quat( + _make_elementary_quat(axis, angles[:, idx]), + result) + return result + + +class Rotation(object): + """Rotation in 3 dimensions. + + This class provides an interface to initialize from and represent rotations + with: + + - Quaternions + - Direction Cosine Matrices + - Rotation Vectors + - Euler angles + + The following operations on rotations are supported: + + - Application on vectors + - Rotation Composition + - Rotation Inversion + - Rotation Indexing + + Indexing within a rotation is supported since multiple rotation transforms + can be stored within a single `Rotation` instance. + + To create `Rotation` objects use `from_...` classmethods, `__init__` is not + supposed to be used directly. + + Methods + ------- + __len__ + from_quat + from_dcm + from_rotvec + from_euler + as_quat + as_dcm + as_rotvec + as_euler + apply + __mul__ + inv + __getitem__ + random + + Examples + -------- + >>> from scipy.spatial.transform import Rotation as R + + A `Rotation` instance can be initialized in any of the above formats and + converted to any of the others. The underlying object is independent of the + representation used for initialization. + + Consider a counter-clockwise rotation of 90 degrees about the z-axis. This + corresponds to the following quaternion (in scalar-last format): + + >>> r = R.from_quat([0, 0, np.sin(np.pi/4), np.cos(np.pi/4)]) + + The rotation can be expressed in any of the other formats: + + >>> r.as_dcm() + array([[ 2.22044605e-16, -1.00000000e+00, 0.00000000e+00], + [ 1.00000000e+00, 2.22044605e-16, 0.00000000e+00], + [ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]) + >>> r.as_rotvec() + array([0. , 0. , 1.57079633]) + >>> r.as_euler('zyx', degrees=True) + array([90., 0., 0.]) + + The same rotation can be initialized using a direction cosine matrix: + + >>> r = R.from_dcm(np.array([ + ... [0, -1, 0], + ... [1, 0, 0], + ... [0, 0, 1]])) + + Representation in other formats: + + >>> r.as_quat() + array([0. , 0. , 0.70710678, 0.70710678]) + >>> r.as_rotvec() + array([0. , 0. , 1.57079633]) + >>> r.as_euler('zyx', degrees=True) + array([90., 0., 0.]) + + The rotation vector corresponding to this rotation is given by: + + >>> r = R.from_rotvec(np.pi/2 * np.array([0, 0, 1])) + + Representation in other formats: + + >>> r.as_quat() + array([0. , 0. , 0.70710678, 0.70710678]) + >>> r.as_dcm() + array([[ 2.22044605e-16, -1.00000000e+00, 0.00000000e+00], + [ 1.00000000e+00, 2.22044605e-16, 0.00000000e+00], + [ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]) + >>> r.as_euler('zyx', degrees=True) + array([90., 0., 0.]) + + The `from_euler` function is quite flexible in the range of input formats + it supports. Here we initialize a single rotation about a single axis: + + >>> r = R.from_euler('z', 90, degrees=True) + + Again, the object is representation independent and can be converted to any + other format: + + >>> r.as_quat() + array([0. , 0. , 0.70710678, 0.70710678]) + >>> r.as_dcm() + array([[ 2.22044605e-16, -1.00000000e+00, 0.00000000e+00], + [ 1.00000000e+00, 2.22044605e-16, 0.00000000e+00], + [ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]) + >>> r.as_rotvec() + array([0. , 0. , 1.57079633]) + + It is also possible to initialize multiple rotations in a single instance + using any of the `from_...` functions. Here we initialize a stack of 3 + rotations using the `from_euler` function: + + >>> r = R.from_euler('zyx', [ + ... [90, 0, 0], + ... [0, 45, 0], + ... [45, 60, 30]], degrees=True) + + The other representations also now return a stack of 3 rotations. For + example: + + >>> r.as_quat() + array([[0. , 0. , 0.70710678, 0.70710678], + [0. , 0.38268343, 0. , 0.92387953], + [0.39190384, 0.36042341, 0.43967974, 0.72331741]]) + + Applying the above rotations onto a vector: + + >>> v = [1, 2, 3] + >>> r.apply(v) + array([[-2. , 1. , 3. ], + [ 2.82842712, 2. , 1.41421356], + [ 2.24452282, 0.78093109, 2.89002836]]) + + A `Rotation` instance can be indexed and sliced as if it were a single + 1D array or list: + + >>> r.as_quat() + array([[0. , 0. , 0.70710678, 0.70710678], + [0. , 0.38268343, 0. , 0.92387953], + [0.39190384, 0.36042341, 0.43967974, 0.72331741]]) + >>> p = r[0] + >>> p.as_dcm() + array([[ 2.22044605e-16, -1.00000000e+00, 0.00000000e+00], + [ 1.00000000e+00, 2.22044605e-16, 0.00000000e+00], + [ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]) + >>> q = r[1:3] + >>> q.as_quat() + array([[0. , 0.38268343, 0. , 0.92387953], + [0.39190384, 0.36042341, 0.43967974, 0.72331741]]) + + Multiple rotations can be composed using the `*` operator: + + >>> r1 = R.from_euler('z', 90, degrees=True) + >>> r2 = R.from_rotvec([np.pi/4, 0, 0]) + >>> v = [1, 2, 3] + >>> r2.apply(r1.apply(v)) + array([-2. , -1.41421356, 2.82842712]) + >>> r3 = r2 * r1 # Note the order + >>> r3.apply(v) + array([-2. , -1.41421356, 2.82842712]) + + Finally, it is also possible to invert rotations: + + >>> r1 = R.from_euler('z', [90, 45], degrees=True) + >>> r2 = r1.inv() + >>> r2.as_euler('zyx', degrees=True) + array([[-90., 0., 0.], + [-45., 0., 0.]]) + + These examples serve as an overview into the `Rotation` class and highlight + major functionalities. For more thorough examples of the range of input and + output formats supported, consult the individual method's examples. + + """ + def __init__(self, quat, normalized=False, copy=True): + self._single = False + quat = np.asarray(quat, dtype=float) + + if quat.ndim not in [1, 2] or quat.shape[-1] != 4: + raise ValueError("Expected `quat` to have shape (4,) or (N x 4), " + "got {}.".format(quat.shape)) + + # If a single quaternion is given, convert it to a 2D 1 x 4 matrix but + # set self._single to True so that we can return appropriate objects + # in the `to_...` methods + if quat.shape == (4,): + quat = quat[None, :] + self._single = True + + if normalized: + self._quat = quat.copy() if copy else quat + else: + self._quat = quat.copy() + norms = scipy.linalg.norm(quat, axis=1) + + zero_norms = norms == 0 + if zero_norms.any(): + raise ValueError("Found zero norm quaternions in `quat`.") + + # Ensure norm is broadcasted along each column. + self._quat[~zero_norms] /= norms[~zero_norms][:, None] + + def __len__(self): + """Number of rotations contained in this object. + + Multiple rotations can be stored in a single instance. + + Returns + ------- + length : int + Number of rotations stored in object. + + """ + return self._quat.shape[0] + + @classmethod + def from_quat(cls, quat, normalized=False): + """Initialize from quaternions. + + 3D rotations can be represented using unit-norm quaternions [1]_. + + Parameters + ---------- + quat : array_like, shape (N, 4) or (4,) + Each row is a (possibly non-unit norm) quaternion in scalar-last + (x, y, z, w) format. + normalized : boolean, optional + If `False`, input quaternions are normalized to unit norm before + being stored. If `True`, quaternions are assumed to already have + unit norm and are stored as given. Default is `False`. + + Returns + ------- + rotation : `Rotation` instance + Object containing the rotations represented by input quaternions. + + References + ---------- + .. [1] `Quaternions and Spatial Rotation + <https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation>`_ + + Examples + -------- + >>> from scipy.spatial.transform import Rotation as R + + Initialize a single rotation: + + >>> r = R.from_quat([1, 0, 0, 0]) + >>> r.as_quat() + array([1., 0., 0., 0.]) + >>> r.as_quat().shape + (4,) + + Initialize multiple rotations in a single object: + + >>> r = R.from_quat([ + ... [1, 0, 0, 0], + ... [0, 0, 0, 1] + ... ]) + >>> r.as_quat() + array([[1., 0., 0., 0.], + [0., 0., 0., 1.]]) + >>> r.as_quat().shape + (2, 4) + + It is also possible to have a stack of a single rotation: + + >>> r = R.from_quat([[0, 0, 0, 1]]) + >>> r.as_quat() + array([[0., 0., 0., 1.]]) + >>> r.as_quat().shape + (1, 4) + + By default, quaternions are normalized before initialization. + + >>> r = R.from_quat([0, 0, 1, 1]) + >>> r.as_quat() + array([0. , 0. , 0.70710678, 0.70710678]) + + If unit norms are ensured, skip the normalization step. + + >>> r = R.from_quat([0, 0, 1, 0], normalized=True) + >>> r.as_quat() + array([0., 0., 1., 0.]) + + """ + return cls(quat, normalized) + + @classmethod + def from_dcm(cls, dcm): + """Initialize from direction cosine matrices. + + Rotations in 3 dimensions can be represented using 3 x 3 proper + orthogonal matrices [1]_. If the input is not proper orthogonal, + an approximation is created using the method described in [2]_. + + Parameters + ---------- + dcm : array_like, shape (N, 3, 3) or (3, 3) + A single matrix or a stack of matrices, where `dcm[i]` is the i-th + matrix. + + Returns + ------- + rotation : `Rotation` instance + Object containing the rotations represented by the input direction + cosine matrices. + + References + ---------- + .. [1] `Direction Cosine Matrix + <https://en.wikipedia.org/wiki/Rotation_matrix#In_three_dimensions>`_ + .. [2] F. Landis Markley, `Unit Quaternion from Rotation Matrix + <https://arc.aiaa.org/doi/abs/10.2514/1.31730>`_ + + Examples + -------- + >>> from scipy.spatial.transform import Rotation as R + + Initialize a single rotation: + + >>> r = R.from_dcm([ + ... [0, -1, 0], + ... [1, 0, 0], + ... [0, 0, 1]]) + >>> r.as_dcm().shape + (3, 3) + + Initialize multiple rotations in a single object: + + >>> r = R.from_dcm([ + ... [ + ... [0, -1, 0], + ... [1, 0, 0], + ... [0, 0, 1], + ... ], + ... [ + ... [1, 0, 0], + ... [0, 0, -1], + ... [0, 1, 0], + ... ]]) + >>> r.as_dcm().shape + (2, 3, 3) + + If input matrices are not special orthogonal (orthogonal with + determinant equal to +1), then a special orthogonal estimate is stored: + + >>> a = np.array([ + ... [0, -0.5, 0], + ... [0.5, 0, 0], + ... [0, 0, 0.5]]) + >>> np.linalg.det(a) + 0.12500000000000003 + >>> r = R.from_dcm(a) + >>> dcm = r.as_dcm() + >>> dcm + array([[-0.38461538, -0.92307692, 0. ], + [ 0.92307692, -0.38461538, 0. ], + [ 0. , 0. , 1. ]]) + >>> np.linalg.det(dcm) + 1.0000000000000002 + + It is also possible to have a stack containing a single rotation: + + >>> r = R.from_dcm([[ + ... [0, -1, 0], + ... [1, 0, 0], + ... [0, 0, 1]]]) + >>> r.as_dcm() + array([[[ 0., -1., 0.], + [ 1., 0., 0.], + [ 0., 0., 1.]]]) + >>> r.as_dcm().shape + (1, 3, 3) + + """ + is_single = False + dcm = np.asarray(dcm, dtype=float) + + if dcm.ndim not in [2, 3] or dcm.shape[-2:] != (3, 3): + raise ValueError("Expected `dcm` to have shape (3, 3) or " + "(N, 3, 3), got {}".format(dcm.shape)) + + # If a single dcm is given, convert it to 3D 1 x 3 x 3 matrix but set + # self._single to True so that we can return appropriate objects in + # the `to_...` methods + if dcm.shape == (3, 3): + dcm = dcm.reshape((1, 3, 3)) + is_single = True + + num_rotations = dcm.shape[0] + + decision_matrix = np.empty((num_rotations, 4)) + decision_matrix[:, :3] = dcm.diagonal(axis1=1, axis2=2) + decision_matrix[:, -1] = decision_matrix[:, :3].sum(axis=1) + choices = decision_matrix.argmax(axis=1) + + quat = np.empty((num_rotations, 4)) + + ind = np.nonzero(choices != 3)[0] + i = choices[ind] + j = (i + 1) % 3 + k = (j + 1) % 3 + + quat[ind, i] = 1 - decision_matrix[ind, -1] + 2 * dcm[ind, i, i] + quat[ind, j] = dcm[ind, j, i] + dcm[ind, i, j] + quat[ind, k] = dcm[ind, k, i] + dcm[ind, i, k] + quat[ind, 3] = dcm[ind, k, j] - dcm[ind, j, k] + + ind = np.nonzero(choices == 3)[0] + quat[ind, 0] = dcm[ind, 2, 1] - dcm[ind, 1, 2] + quat[ind, 1] = dcm[ind, 0, 2] - dcm[ind, 2, 0] + quat[ind, 2] = dcm[ind, 1, 0] - dcm[ind, 0, 1] + quat[ind, 3] = 1 + decision_matrix[ind, -1] + + quat /= np.linalg.norm(quat, axis=1)[:, None] + + if is_single: + return cls(quat[0], normalized=True, copy=False) + else: + return cls(quat, normalized=True, copy=False) + + @classmethod + def from_rotvec(cls, rotvec): + """Initialize from rotation vectors. + + A rotation vector is a 3 dimensional vector which is co-directional to + the axis of rotation and whose norm gives the angle of rotation (in + radians) [1]_. + + Parameters + ---------- + rotvec : array_like, shape (N, 3) or (3,) + A single vector or a stack of vectors, where `rot_vec[i]` gives + the ith rotation vector. + + Returns + ------- + rotation : `Rotation` instance + Object containing the rotations represented by input rotation + vectors. + + References + ---------- + .. [1] `Rotation Vectors + <https://en.wikipedia.org/wiki/Axis%E2%80%93angle_representation#Rotation_vector>`_ + + Examples + -------- + >>> from scipy.spatial.transform import Rotation as R + + Initialize a single rotation: + + >>> r = R.from_rotvec(np.pi/2 * np.array([0, 0, 1])) + >>> r.as_rotvec() + array([0. , 0. , 1.57079633]) + >>> r.as_rotvec().shape + (3,) + + Initialize multiple rotations in one object: + + >>> r = R.from_rotvec([ + ... [0, 0, np.pi/2], + ... [np.pi/2, 0, 0]]) + >>> r.as_rotvec() + array([[0. , 0. , 1.57079633], + [1.57079633, 0. , 0. ]]) + >>> r.as_rotvec().shape + (2, 3) + + It is also possible to have a stack of a single rotaton: + + >>> r = R.from_rotvec([[0, 0, np.pi/2]]) + >>> r.as_rotvec().shape + (1, 3) + + """ + is_single = False + rotvec = np.asarray(rotvec, dtype=float) + + if rotvec.ndim not in [1, 2] or rotvec.shape[-1] != 3: + raise ValueError("Expected `rot_vec` to have shape (3,) " + "or (N, 3), got {}".format(rotvec.shape)) + + # If a single vector is given, convert it to a 2D 1 x 3 matrix but + # set self._single to True so that we can return appropriate objects + # in the `as_...` methods + if rotvec.shape == (3,): + rotvec = rotvec[None, :] + is_single = True + + num_rotations = rotvec.shape[0] + + norms = np.linalg.norm(rotvec, axis=1) + small_angle = (norms <= 1e-3) + large_angle = ~small_angle + + scale = np.empty(num_rotations) + scale[small_angle] = (0.5 - norms[small_angle] ** 2 / 48 + + norms[small_angle] ** 4 / 3840) + scale[large_angle] = (np.sin(norms[large_angle] / 2) / + norms[large_angle]) + + quat = np.empty((num_rotations, 4)) + quat[:, :3] = scale[:, None] * rotvec + quat[:, 3] = np.cos(norms / 2) + + if is_single: + return cls(quat[0], normalized=True, copy=False) + else: + return cls(quat, normalized=True, copy=False) + + @classmethod + def from_euler(cls, seq, angles, degrees=False): + """Initialize from Euler angles. + + Rotations in 3 dimensions can be represented by a sequece of 3 + rotations around a sequence of axes. In theory, any three axes spanning + the 3D Euclidean space are enough. In practice the axes of rotation are + chosen to be the basis vectors. + + The three rotations can either be in a global frame of reference + (extrinsic) or in a body centred frame of refernce (intrinsic), which + is attached to, and moves with, the object under rotation [1]_. + + Parameters + ---------- + seq : string + Specifies sequence of axes for rotations. Up to 3 characters + belonging to the set {'X', 'Y', 'Z'} for intrinsic rotations, or + {'x', 'y', 'z'} for extrinsic rotations. Extrinsic and intrinsic + rotations cannot be mixed in one function call. + angles : float or array_like, shape (N,) or (N, [1 or 2 or 3]) + Euler angles specified in radians (`degrees` is False) or degrees + (`degrees` is True). + For a single character `seq`, `angles` can be: + + - a single value + - array_like with shape (N,), where each `angle[i]` + corresponds to a single rotation + - array_like with shape (N, 1), where each `angle[i, 0]` + corresponds to a single rotation + + For 2- and 3-character wide `seq`, `angles` can be: + + - array_like with shape (W,) where `W` is the width of + `seq`, which corresponds to a single rotation with `W` axes + - array_like with shape (N, W) where each `angle[i]` + corresponds to a sequence of Euler angles describing a single + rotation + + degrees : boolean, optional + If True, then the given angles are assumed to be in degrees. + Default is False. + + Returns + ------- + rotation : `Rotation` instance + Object containing the rotation represented by the sequence of + rotations around given axes with given angles. + + References + ---------- + .. [1] `Euler angle definitions + <https://en.wikipedia.org/wiki/Euler_angles#Definition_by_intrinsic_rotations>`_ + + Examples + -------- + >>> from scipy.spatial.transform import Rotation as R + + Initialize a single rotation along a single axis: + + >>> r = R.from_euler('x', 90, degrees=True) + >>> r.as_quat().shape + (4,) + + Initialize a single rotation with a given axis sequence: + + >>> r = R.from_euler('zyx', [90, 45, 30], degrees=True) + >>> r.as_quat().shape + (4,) + + Initialize a stack with a single rotation around a single axis: + + >>> r = R.from_euler('x', [90], degrees=True) + >>> r.as_quat().shape + (1, 4) + + Initialize a stack with a single rotation with an axis sequence: + + >>> r = R.from_euler('zyx', [[90, 45, 30]], degrees=True) + >>> r.as_quat().shape + (1, 4) + + Initialize multiple elementary rotations in one object: + + >>> r = R.from_euler('x', [90, 45, 30], degrees=True) + >>> r.as_quat().shape + (3, 4) + + Initialize multiple rotations in one object: + + >>> r = R.from_euler('zyx', [[90, 45, 30], [35, 45, 90]], degrees=True) + >>> r.as_quat().shape + (2, 4) + + """ + num_axes = len(seq) + if num_axes < 1 or num_axes > 3: + raise ValueError("Expected axis specification to be a non-empty " + "string of upto 3 characters, got {}".format(seq)) + + intrinsic = (re.match(r'^[XYZ]{1,3}$', seq) is not None) + extrinsic = (re.match(r'^[xyz]{1,3}$', seq) is not None) + if not (intrinsic or extrinsic): + raise ValueError("Expected axes from `seq` to be from ['x', 'y', " + "'z'] or ['X', 'Y', 'Z'], got {}".format(seq)) + + if any(seq[i] == seq[i+1] for i in range(num_axes - 1)): + raise ValueError("Expected consecutive axes to be different, " + "got {}".format(seq)) + + seq = seq.lower() + + angles = np.asarray(angles, dtype=float) + if degrees: + angles = np.deg2rad(angles) + + is_single = False + # Prepare angles to have shape (num_rot, num_axes) + if num_axes == 1: + if angles.ndim == 0: + # (1, 1) + angles = angles.reshape((1, 1)) + is_single = True + elif angles.ndim == 1: + # (N, 1) + angles = angles[:, None] + elif angles.ndim == 2 and angles.shape[-1] != 1: + raise ValueError("Expected `angles` parameter to have shape " + "(N, 1), got {}.".format(angles.shape)) + elif angles.ndim > 2: + raise ValueError("Expected float, 1D array, or 2D array for " + "parameter `angles` corresponding to `seq`, " + "got shape {}.".format(angles.shape)) + else: # 2 or 3 axes + if angles.ndim not in [1, 2] or angles.shape[-1] != num_axes: + raise ValueError("Expected `angles` to be at most " + "2-dimensional with width equal to number " + "of axes specified, got {} for shape").format( + angles.shape) + + if angles.ndim == 1: + # (1, num_axes) + angles = angles[None, :] + is_single = True + + # By now angles should have shape (num_rot, num_axes) + # sanity check + if angles.ndim != 2 or angles.shape[-1] != num_axes: + raise ValueError("Expected angles to have shape (num_rotations, " + "num_axes), got {}.".format(angles.shape)) + + quat = _elementary_quat_compose(seq, angles, intrinsic) + return cls(quat[0] if is_single else quat, normalized=True, copy=False) + + def as_quat(self): + """Represent as quaternions. + + Rotations in 3 dimensions can be represented using unit norm + quaternions [1]_. The mapping from quaternions to rotations is + two-to-one, i.e. quaternions `q` and `-q`, where `-q` simply reverses + the sign of each component, represent the same spatial rotation. + + Returns + ------- + quat : `numpy.ndarray`, shape (4,) or (N, 4) + Shape depends on shape of inputs used for initialization. + + References + ---------- + .. [1] `Quaternions and Spatial Rotation + <https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation>`_ + + Examples + -------- + >>> from scipy.spatial.transform import Rotation as R + + Represent a single rotation: + + >>> r = R.from_dcm([ + ... [0, -1, 0], + ... [1, 0, 0], + ... [0, 0, 1]]) + >>> r.as_quat() + array([0. , 0. , 0.70710678, 0.70710678]) + >>> r.as_quat().shape + (4,) + + Represent a stack with a single rotation: + + >>> r = R.from_quat([[0, 0, 0, 1]]) + >>> r.as_quat().shape + (1, 4) + + Represent multiple rotaions in a single object: + + >>> r = R.from_rotvec([[np.pi, 0, 0], [0, 0, np.pi/2]]) + >>> r.as_quat().shape + (2, 4) + + """ + if self._single: + return self._quat[0].copy() + else: + return self._quat.copy() + + def as_dcm(self): + """Represent as direction cosine matrices. + + 3D rotations can be represented using direction cosine matrices, which + are 3 x 3 real orthogonal matrices with determinant equal to +1 [1]_. + + Returns + ------- + dcm : `numpy.ndarray`, shape (3, 3) or (N, 3, 3) + Shape depends on shape of inputs used for initialization. + + References + ---------- + .. [1] `Direction Cosine Matrix + <https://en.wikipedia.org/wiki/Rotation_matrix#In_three_dimensions>`_ + + Examples + -------- + >>> from scipy.spatial.transform import Rotation as R + + Represent a single rotation: + + >>> r = R.from_rotvec([0, 0, np.pi/2]) + >>> r.as_dcm() + array([[ 2.22044605e-16, -1.00000000e+00, 0.00000000e+00], + [ 1.00000000e+00, 2.22044605e-16, 0.00000000e+00], + [ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]) + >>> r.as_dcm().shape + (3, 3) + + Represent a stack with a single rotation: + + >>> r = R.from_quat([[1, 1, 0, 0]]) + >>> r.as_dcm() + array([[[ 0., 1., 0.], + [ 1., 0., 0.], + [ 0., 0., -1.]]]) + >>> r.as_dcm().shape + (1, 3, 3) + + Represent multiple rotations: + + >>> r = R.from_rotvec([[np.pi/2, 0, 0], [0, 0, np.pi/2]]) + >>> r.as_dcm() + array([[[ 1.00000000e+00, 0.00000000e+00, 0.00000000e+00], + [ 0.00000000e+00, 2.22044605e-16, -1.00000000e+00], + [ 0.00000000e+00, 1.00000000e+00, 2.22044605e-16]], + [[ 2.22044605e-16, -1.00000000e+00, 0.00000000e+00], + [ 1.00000000e+00, 2.22044605e-16, 0.00000000e+00], + [ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]]) + >>> r.as_dcm().shape + (2, 3, 3) + + """ + x = self._quat[:, 0] + y = self._quat[:, 1] + z = self._quat[:, 2] + w = self._quat[:, 3] + + x2 = x * x + y2 = y * y + z2 = z * z + w2 = w * w + + xy = x * y + zw = z * w + xz = x * z + yw = y * w + yz = y * z + xw = x * w + + num_rotations = len(self) + dcm = np.empty((num_rotations, 3, 3)) + + dcm[:, 0, 0] = x2 - y2 - z2 + w2 + dcm[:, 1, 0] = 2 * (xy + zw) + dcm[:, 2, 0] = 2 * (xz - yw) + + dcm[:, 0, 1] = 2 * (xy - zw) + dcm[:, 1, 1] = - x2 + y2 - z2 + w2 + dcm[:, 2, 1] = 2 * (yz + xw) + + dcm[:, 0, 2] = 2 * (xz + yw) + dcm[:, 1, 2] = 2 * (yz - xw) + dcm[:, 2, 2] = - x2 - y2 + z2 + w2 + + if self._single: + return dcm[0] + else: + return dcm + + def as_rotvec(self): + """Represent as rotation vectors. + + A rotation vector is a 3 dimensional vector which is co-directional to + the axis of rotation and whose norm gives the angle of rotation (in + radians) [1]_. + + Returns + ------- + rotvec : `numpy.ndarray`, shape (3,) or (N, 3) + Shape depends on shape of inputs used for initialization. + + References + ---------- + .. [1] `Rotation Vectors + <https://en.wikipedia.org/wiki/Axis%E2%80%93angle_representation#Rotation_vector>`_ + + Examples + -------- + >>> from scipy.spatial.transform import Rotation as R + + Represent a single rotation: + + >>> r = R.from_euler('z', 90, degrees=True) + >>> r.as_rotvec() + array([0. , 0. , 1.57079633]) + >>> r.as_rotvec().shape + (3,) + + Represent a stack with a single rotation: + + >>> r = R.from_quat([[0, 0, 1, 1]]) + >>> r.as_rotvec() + array([[0. , 0. , 1.57079633]]) + >>> r.as_rotvec().shape + (1, 3) + + Represent multiple rotations in a single object: + + >>> r = R.from_quat([[0, 0, 1, 1], [1, 1, 0, 1]]) + >>> r.as_rotvec() + array([[0. , 0. , 1.57079633], + [1.35102172, 1.35102172, 0. ]]) + >>> r.as_rotvec().shape + (2, 3) + + """ + quat = self._quat.copy() + # w > 0 to ensure 0 <= angle <= pi + quat[quat[:, 3] < 0] *= -1 + + angle = 2 * np.arctan2(np.linalg.norm(quat[:, :3], axis=1), quat[:, 3]) + + small_angle = (angle <= 1e-3) + large_angle = ~small_angle + + num_rotations = len(self) + scale = np.empty(num_rotations) + scale[small_angle] = (2 + angle[small_angle] ** 2 / 12 + + 7 * angle[small_angle] ** 4 / 2880) + scale[large_angle] = (angle[large_angle] / + np.sin(angle[large_angle] / 2)) + + rotvec = scale[:, None] * quat[:, :3] + + if self._single: + return rotvec[0] + else: + return rotvec + + def as_euler(self, seq, degrees=False): + """Represent as Euler angles. + + Any orientation can be expressed as a composition of 3 elementary + rotations. Once the axis sequence has been chosen, Euler angles define + the angle of rotation around each respective axis [1]_. + + The algorithm from [2]_ has been used to calculate Euler angles for the + rotation about a given sequence of axes. + + Euler angles suffer from the problem of gimbal lock [3]_, where the + representation loses a degree of freedom and it is not possible to + determine the first and third angles uniquely. In this case, + a warning is raised, and the third angle is set to zero. Note however + that the returned angles still represent the correct rotation. + + Parameters + ---------- + seq : string, length 3 + 3 characters belonging to the set {'X', 'Y', 'Z'} for intrinsic + rotations, or {'x', 'y', 'z'} for extrinsic rotations [1]_. + Adjacent axes cannot be the same. + Extrinsic and intrinsic rotations cannot be mixed in one function + call. + degrees : boolean, optional + Returned angles are in degrees if this flag is True, else they are + in radians. Default is False. + + Returns + ------- + angles : `numpy.ndarray`, shape (3,) or (N, 3) + Shape depends on shape of inputs used to initialize object. + + The returned angles are in the range: + + - First angle belongs to [-180, 180] degrees (both inclusive) + - Third angle belongs to [-180, 180] degrees (both inclusive) + - Second angle belongs to: + + - [-90, 90] degrees if all axes are different (like xyz) + - [0, 180] degrees if first and third axes are the same + (like zxz) + + References + ---------- + .. [1] `Euler angle definitions + <https://en.wikipedia.org/wiki/Euler_angles#Definition_by_intrinsic_rotations>`_ + .. [2] Malcolm D. Shuster, F. Landis Markley + `General Formula for Euler Angles + <https://arc.aiaa.org/doi/abs/10.2514/1.16622>`_ + .. [3] `Gimbal lock + <https://en.wikipedia.org/wiki/Gimbal_lock#In_applied_mathematics>`_ + + Examples + -------- + >>> from scipy.spatial.transform import Rotation as R + + Represent a single rotation: + + >>> r = R.from_rotvec([0, 0, np.pi/2]) + >>> r.as_euler('zxy', degrees=True) + array([90., 0., 0.]) + >>> r.as_euler('zxy', degrees=True).shape + (3,) + + Represent a stack of single rotation: + + >>> r = R.from_rotvec([[0, 0, np.pi/2]]) + >>> r.as_euler('zxy', degrees=True) + array([[90., 0., 0.]]) + >>> r.as_euler('zxy', degrees=True).shape + (1, 3) + + Represent multiple rotations in a single object: + + >>> r = R.from_rotvec([ + ... [0, 0, np.pi/2], + ... [0, -np.pi/3, 0], + ... [np.pi/4, 0, 0]]) + >>> r.as_euler('zxy', degrees=True) + array([[ 90., 0., 0.], + [ 0., 0., -60.], + [ 0., 45., 0.]]) + >>> r.as_euler('zxy', degrees=True).shape + (3, 3) + + """ + if len(seq) != 3: + raise ValueError("Expected 3 axes, got {}.".format(seq)) + + intrinsic = (re.match(r'^[XYZ]{1,3}$', seq) is not None) + extrinsic = (re.match(r'^[xyz]{1,3}$', seq) is not None) + if not (intrinsic or extrinsic): + raise ValueError("Expected axes from `seq` to be from " + "['x', 'y', 'z'] or ['X', 'Y', 'Z'], " + "got {}".format(seq)) + + if any(seq[i] == seq[i+1] for i in range(2)): + raise ValueError("Expected consecutive axes to be different, " + "got {}".format(seq)) + + seq = seq.lower() + + angles = _compute_euler_from_dcm(self.as_dcm(), seq, extrinsic) + if degrees: + angles = np.rad2deg(angles) + + return angles[0] if self._single else angles + + def apply(self, vectors, inverse=False): + """Apply this rotation to a set of vectors. + + If the original frame rotates to the final frame by this rotation, then + its application to a vector can be seen in two ways: + + - As a projection of vector components expressed in the final frame + to the original frame. + - As the physical rotation of a vector being glued to the original + frame as it rotates. In this case the vector components are + expressed in the original frame before and after the rotation. + + In terms of DCMs, this application is the same as + `self.as_dcm().dot(vectors)`. + + Parameters + ---------- + vectors : array_like, shape (3,) or (N, 3) + Each `vectors[i]` represents a vector in 3D space. A single vector + can either be specified with shape `(3, )` or `(1, 3)`. The number + of rotations and number of vectors given must follow standard numpy + broadcasting rules: either one of them equals unity or they both + equal each other. + inverse : boolean, optional + If `inverse` is `True` then the inverse of the rotation(s) is + applied to the input vectors. Default is `False`. + + Returns + ------- + rotated_vectors : `numpy.ndarray`, shape (3,) or (N, 3) + Result of applying rotation on input vectors. + Shape depends on the following cases: + + - If object contains a single rotation (as opposed to a stack + with a single rotation) and a single vector is specified with + shape `(3,)`, then `output` has shape `(3,)`. + - In all other cases, `output` has shape `(N, 3)`, where `N` is + either the number of rotations or vectors. + + Examples + -------- + >>> from scipy.spatial.transform import Rotation as R + + Single rotation applied on a single vector: + + >>> vector = np.array([1, 0, 0]) + >>> r = R.from_rotvec([0, 0, np.pi/2]) + >>> r.as_dcm() + array([[ 2.22044605e-16, -1.00000000e+00, 0.00000000e+00], + [ 1.00000000e+00, 2.22044605e-16, 0.00000000e+00], + [ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]) + >>> r.apply(vector) + array([2.22044605e-16, 1.00000000e+00, 0.00000000e+00]) + >>> r.apply(vector).shape + (3,) + + Single rotation applied on multiple vectors: + + >>> vectors = np.array([ + ... [1, 0, 0], + ... [1, 2, 3]]) + >>> r = R.from_rotvec([0, 0, np.pi/4]) + >>> r.as_dcm() + array([[ 0.70710678, -0.70710678, 0. ], + [ 0.70710678, 0.70710678, 0. ], + [ 0. , 0. , 1. ]]) + >>> r.apply(vectors) + array([[ 0.70710678, 0.70710678, 0. ], + [-0.70710678, 2.12132034, 3. ]]) + >>> r.apply(vectors).shape + (2, 3) + + Multiple rotations on a single vector: + + >>> r = R.from_rotvec([[0, 0, np.pi/4], [np.pi/2, 0, 0]]) + >>> vector = np.array([1,2,3]) + >>> r.as_dcm() + array([[[ 7.07106781e-01, -7.07106781e-01, 0.00000000e+00], + [ 7.07106781e-01, 7.07106781e-01, 0.00000000e+00], + [ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]], + [[ 1.00000000e+00, 0.00000000e+00, 0.00000000e+00], + [ 0.00000000e+00, 2.22044605e-16, -1.00000000e+00], + [ 0.00000000e+00, 1.00000000e+00, 2.22044605e-16]]]) + >>> r.apply(vector) + array([[-0.70710678, 2.12132034, 3. ], + [ 1. , -3. , 2. ]]) + >>> r.apply(vector).shape + (2, 3) + + Multiple rotations on multiple vectors. Each rotation is applied on the + corresponding vector: + + >>> r = R.from_euler('zxy', [ + ... [0, 0, 90], + ... [45, 30, 60]], degrees=True) + >>> vectors = [ + ... [1, 2, 3], + ... [1, 0, -1]] + >>> r.apply(vectors) + array([[ 3. , 2. , -1. ], + [-0.09026039, 1.11237244, -0.86860844]]) + >>> r.apply(vectors).shape + (2, 3) + + It is also possible to apply the inverse rotation: + + >>> r = R.from_euler('zxy', [ + ... [0, 0, 90], + ... [45, 30, 60]], degrees=True) + >>> vectors = [ + ... [1, 2, 3], + ... [1, 0, -1]] + >>> r.apply(vectors, inverse=True) + array([[-3. , 2. , 1. ], + [ 1.09533535, -0.8365163 , 0.3169873 ]]) + + """ + vectors = np.asarray(vectors) + if vectors.ndim > 2 or vectors.shape[-1] != 3: + raise ValueError("Expected input of shape (3,) or (P, 3), " + "got {}.".format(vectors.shape)) + + single_vector = False + if vectors.shape == (3,): + single_vector = True + vectors = vectors[None, :] + + dcm = self.as_dcm() + if self._single: + dcm = dcm[None, :, :] + + n_vectors = vectors.shape[0] + n_rotations = len(self) + + if n_vectors != 1 and n_rotations != 1 and n_vectors != n_rotations: + raise ValueError("Expected equal numbers of rotations and vectors " + ", or a single rotation, or a single vector, got " + "{} rotations and {} vectors.".format( + n_rotations, n_vectors)) + + if inverse: + result = np.einsum('ikj,ik->ij', dcm, vectors) + else: + result = np.einsum('ijk,ik->ij', dcm, vectors) + + if self._single and single_vector: + return result[0] + else: + return result + + def __mul__(self, other): + """Compose this rotation with the other. + + If `p` and `q` are two rotations, then the composition of 'q followed + by p' is equivalent to `p * q`. In terms of DCMs, the composition can + be expressed as `p.as_dcm().dot(q.as_dcm())`. + + Parameters + ---------- + other : `Rotation` instance + Object containing the rotaions to be composed with this one. Note + that rotation compositions are not commutative, so `p * q` is + different from `q * p`. + + Returns + ------- + composition : `Rotation` instance + This function supports composition of multiple rotations at a time. + The following cases are possible: + + - Either `p` or `q` contains a single rotation. In this case + `output` contains the result of composing each rotation in the + other object with the single rotation. + - Both `p` and `q` contain `N` rotations. In this case each + rotation `p[i]` is composed with the corresponding rotation + `q[i]` and `output` contains `N` rotations. + + Examples + -------- + >>> from scipy.spatial.transform import Rotation as R + + Composition of two single rotations: + + >>> p = R.from_quat([0, 0, 1, 1]) + >>> q = R.from_quat([1, 0, 0, 1]) + >>> p.as_dcm() + array([[ 0., -1., 0.], + [ 1., 0., 0.], + [ 0., 0., 1.]]) + >>> q.as_dcm() + array([[ 1., 0., 0.], + [ 0., 0., -1.], + [ 0., 1., 0.]]) + >>> r = p * q + >>> r.as_dcm() + array([[0., 0., 1.], + [1., 0., 0.], + [0., 1., 0.]]) + + Composition of two objects containing equal number of rotations: + + >>> p = R.from_quat([[0, 0, 1, 1], [1, 0, 0, 1]]) + >>> q = R.from_rotvec([[np.pi/4, 0, 0], [-np.pi/4, 0, np.pi/4]]) + >>> p.as_quat() + array([[0. , 0. , 0.70710678, 0.70710678], + [0.70710678, 0. , 0. , 0.70710678]]) + >>> q.as_quat() + array([[ 0.38268343, 0. , 0. , 0.92387953], + [-0.37282173, 0. , 0.37282173, 0.84971049]]) + >>> r = p * q + >>> r.as_quat() + array([[ 0.27059805, 0.27059805, 0.65328148, 0.65328148], + [ 0.33721128, -0.26362477, 0.26362477, 0.86446082]]) + + """ + if not(len(self) == 1 or len(other) == 1 or len(self) == len(other)): + raise ValueError("Expected equal number of rotations in both " + "or a single rotation in either object, " + "got {} rotations in first and {} rotations in " + "second object.".format( + len(self), len(other))) + result = _compose_quat(self._quat, other._quat) + if self._single and other._single: + result = result[0] + return self.__class__(result, normalized=True, copy=False) + + def inv(self): + """Invert this rotation. + + Composition of a rotation with its inverse results in an identity + transformation. + + Returns + ------- + inverse : `Rotation` instance + Object containing inverse of the rotations in the current instance. + + Examples + -------- + >>> from scipy.spatial.transform import Rotation as R + + Inverting a single rotation: + + >>> p = R.from_euler('z', 45, degrees=True) + >>> q = p.inv() + >>> q.as_euler('zyx', degrees=True) + array([-45., 0., 0.]) + + Inverting multiple rotations: + + >>> p = R.from_rotvec([[0, 0, np.pi/3], [-np.pi/4, 0, 0]]) + >>> q = p.inv() + >>> q.as_rotvec() + array([[-0. , -0. , -1.04719755], + [ 0.78539816, -0. , -0. ]]) + + """ + quat = self._quat.copy() + quat[:, -1] *= -1 + if self._single: + quat = quat[0] + return self.__class__(quat, normalized=True, copy=False) + + def __getitem__(self, indexer): + """Extract rotation(s) at given index(es) from object. + + Create a new `Rotation` instance containing a subset of rotations + stored in this object. + + Parameters + ---------- + indexer : index, slice, or index array + Specifies which rotation(s) to extract. A single indexer must be + specified, i.e. as if indexing a 1 dimensional array or list. + + Returns + ------- + rotation : `Rotation` instance + Contains + - a single rotation, if `indexer` is a single index + - a stack of rotation(s), if `indexer` is a slice, or and index + array. + + Examples + -------- + >>> from scipy.spatial.transform import Rotation as R + >>> r = R.from_quat([ + ... [1, 1, 0, 0], + ... [0, 1, 0, 1], + ... [1, 1, -1, 0]]) + >>> r.as_quat() + array([[ 0.70710678, 0.70710678, 0. , 0. ], + [ 0. , 0.70710678, 0. , 0.70710678], + [ 0.57735027, 0.57735027, -0.57735027, 0. ]]) + + Indexing using a single index: + + >>> p = r[0] + >>> p.as_quat() + array([0.70710678, 0.70710678, 0. , 0. ]) + + Array slicing: + + >>> q = r[1:3] + >>> q.as_quat() + array([[ 0. , 0.70710678, 0. , 0.70710678], + [ 0.57735027, 0.57735027, -0.57735027, 0. ]]) + + """ + return self.__class__(self._quat[indexer], normalized=True) + + @classmethod + def random(cls, num=None, random_state=None): + """Generate uniformly distributed rotations. + + Parameters + ---------- + num : int or None, optional + Number of random rotations to generate. If None (default), then a + single rotation is generated. + random_state : int, RandomState instance or None, optional + Accepts an `int` as a seed for the random generator or a + RandomState object. If None (default), uses global `np.random` + random state. + + Returns + ------- + random_rotation : `Rotation` instance + Contains a single rotation if `num` is None. Otherwise contains a + stack of `num` rotations. + + Examples + -------- + >>> from scipy.spatial.transform import Rotation as R + + Sample a single rotation: + + >>> R.random(random_state=1234).as_euler('zxy', degrees=True) + array([-110.5976185 , 55.32758512, 76.3289269 ]) + + Sample a stack of rotations: + + >>> R.random(5, random_state=1234).as_euler('zxy', degrees=True) + array([[-110.5976185 , 55.32758512, 76.3289269 ], + [ -91.59132005, -14.3629884 , -93.91933182], + [ 25.23835501, 45.02035145, -121.67867086], + [ -51.51414184, -15.29022692, -172.46870023], + [ -81.63376847, -27.39521579, 2.60408416]]) + + """ + random_state = check_random_state(random_state) + + if num is None: + sample = random_state.normal(size=4) + else: + sample = random_state.normal(size=(num, 4)) + + return Rotation.from_quat(sample) + + @classmethod + def match_vectors(cls, a, b, weights=None, normalized=False): + """Estimate a rotation to match two sets of vectors. + + Find a rotation between frames A and B which best matches a set of unit + vectors `a` and `b` observed in these frames. The following loss + function is minimized to solve for the direction cosine matrix + :math:`C`: + + .. math:: + + L(C) = \\frac{1}{2} \\sum_{i = 1}^{n} w_i \\lVert \\mathbf{a}_i - + C \\mathbf{b}_i \\rVert^2 , + + where :math:`w_i`'s are the `weights` corresponding to each vector. + + The rotation is estimated using Markley's SVD method [1]_. + + Parameters + ---------- + a : array_like, shape (N, 3) + Vector components observed in initial frame A. Each row of `a` + denotes a vector. + b : array_like, shape (N, 3) + Vector components observed in another frame B. Each row of `b` + denotes a vector. + weights : array_like shape (N,), optional + Weights describing the relative importance of the vectors in + `a`. If None (default), then all values in `weights` are assumed to + be equal. + normalized : boolean, optional + If True, assume input vectors `a` and `b` to have unit norm. If + False, normalize `a` and `b` before estimating rotation. Default + is False. + + Returns + ------- + estimated_rotation : `Rotation` instance + Best estimate of the rotation that transforms `b` to `a`. + sensitivity_matrix : `numpy.ndarray`, shape (3, 3) + Scaled covariance of the attitude errors expressed as the small + rotation vector of frame A. Multiply with harmonic mean [3]_ of + variance in each observation to get true covariance matrix. The + error model is detailed in [2]_. + + References + ---------- + .. [1] F. Landis Markley, + "Attitude determination using vector observations: a fast + optimal matrix algorithm", Journal of Astronautical Sciences, + Vol. 41, No.2, 1993, pp. 261-280. + .. [2] F. Landis Markley, + "Attitude determination using vector observations and the + Singular Value Decomposition", Journal of Astronautical + Sciences, Vol. 38, No.3, 1988, pp. 245-258. + .. [3] `Harmonic Mean <https://en.wikipedia.org/wiki/Harmonic_mean>`_ + + """ + a = np.asarray(a) + if a.ndim != 2 or a.shape[-1] != 3: + raise ValueError("Expected input `a` to have shape (N, 3), " + "got {}".format(a.shape)) + b = np.asarray(b) + if b.ndim != 2 or b.shape[-1] != 3: + raise ValueError("Expected input `b` to have shape (N, 3), " + "got {}.".format(b.shape)) + + if a.shape != b.shape: + raise ValueError("Expected inputs `a` and `b` to have same shapes" + ", got {} and {} respectively.".format( + a.shape, b.shape)) + + if b.shape[0] == 1: + raise ValueError("Rotation cannot be estimated using a single " + "vector.") + + if weights is None: + weights = np.ones(b.shape[0]) + else: + weights = np.asarray(weights) + if weights.ndim != 1: + raise ValueError("Expected `weights` to be 1 dimensional, got " + "shape {}.".format(weights.shape)) + if weights.shape[0] != b.shape[0]: + raise ValueError("Expected `weights` to have number of values " + "equal to number of input vectors, got " + "{} values and {} vectors.".format( + weights.shape[0], b.shape[0])) + weights = weights / np.sum(weights) + + if not normalized: + a = a / scipy.linalg.norm(a, axis=1)[:, None] + b = b / scipy.linalg.norm(b, axis=1)[:, None] + + B = np.einsum('ji,jk->ik', weights[:, None] * a, b) + u, s, vh = np.linalg.svd(B) + C = np.dot(u, vh) + + zeta = (s[0]+s[1]) * (s[1]+s[2]) * (s[2]+s[0]) + if np.abs(zeta) <= 1e-16: + raise ValueError("Three component error vector has infinite " + "covariance. It is impossible to determine the " + "rotation uniquely.") + + kappa = s[0]*s[1] + s[1]*s[2] + s[2]*s[0] + sensitivity = ((kappa * np.eye(3) + np.dot(B, B.T)) / + (zeta * a.shape[0])) + return cls.from_dcm(C), sensitivity + + +class Slerp(object): + """Spherical Linear Interpolation of Rotations. + + The interpolation between consecutive rotations is performed as a rotation + around a fixed axis with a constant angular velocity [1]_. This ensures + that the interpolated rotations follow the shortest path between initial + and final orientations. + + Parameters + ---------- + times : array_like, shape (N,) + Times of the known rotations. At least 2 times must be specified. + rotations : `Rotation` instance + Rotations to perform the interpolation between. Must contain N + rotations. + + Methods + ------- + __call__ + + References + ---------- + .. [1] `Quaternion Slerp + <https://en.wikipedia.org/wiki/Slerp#Quaternion_Slerp>`_ + + Examples + -------- + >>> from scipy.spatial.transform import Rotation as R + >>> from scipy.spatial.transform import Slerp + + Setup the fixed keyframe rotations and times: + + >>> key_rots = R.random(5, random_state=2342345) + >>> key_times = [0, 1, 2, 3, 4] + + Create the interpolator object: + + >>> slerp = Slerp(key_times, key_rots) + + Interpolate the rotations at the given times: + + >>> times = [0, 0.5, 0.25, 1, 1.5, 2, 2.75, 3, 3.25, 3.60, 4] + >>> interp_rots = slerp(times) + + The keyframe rotations expressed as Euler angles: + + >>> key_rots.as_euler('xyz', degrees=True) + array([[ 14.31443779, -27.50095894, -3.7275787 ], + [ -1.79924227, -24.69421529, 164.57701743], + [146.15020772, 43.22849451, -31.34891088], + [ 46.39959442, 11.62126073, -45.99719267], + [-88.94647804, -49.64400082, -65.80546984]]) + + The interpolated rotations expressed as Euler angles. These agree with the + keyframe rotations at both endpoints of the range of keyframe times. + + >>> interp_rots.as_euler('xyz', degrees=True) + array([[ 14.31443779, -27.50095894, -3.7275787 ], + [ 4.74588574, -32.44683966, 81.25139984], + [ 10.71094749, -31.56690154, 38.06896408], + [ -1.79924227, -24.69421529, 164.57701743], + [ 11.72796022, 51.64207311, -171.7374683 ], + [ 146.15020772, 43.22849451, -31.34891088], + [ 68.10921869, 20.67625074, -48.74886034], + [ 46.39959442, 11.62126073, -45.99719267], + [ 12.35552615, 4.21525086, -64.89288124], + [ -30.08117143, -19.90769513, -78.98121326], + [ -88.94647804, -49.64400082, -65.80546984]]) + + """ + def __init__(self, times, rotations): + if len(rotations) == 1: + raise ValueError("`rotations` must contain at least 2 rotations.") + + times = np.asarray(times) + if times.ndim != 1: + raise ValueError("Expected times to be specified in a 1 " + "dimensional array, got {} " + "dimensions.".format(times.ndim)) + + if times.shape[0] != len(rotations): + raise ValueError("Expected number of rotations to be equal to " + "number of timestamps given, got {} rotations " + "and {} timestamps.".format( + len(rotations), times.shape[0])) + self.times = times + self.timedelta = np.diff(times) + + if np.any(self.timedelta <= 0): + raise ValueError("Times must be in strictly increasing order.") + + self.rotations = rotations[:-1] + self.rotvecs = (self.rotations.inv() * rotations[1:]).as_rotvec() + + def __call__(self, times): + """Interpolate rotations. + + Compute the interpolated rotations at the given `times`. + + Parameters + ---------- + times : array_like, 1D + Times to compute the interpolations at. + + Returns + ------- + interpolated_rotation : `Rotation` instance + Object containing the rotations computed at given `times`. + + """ + # Clearly differentiate from self.times property + compute_times = np.asarray(times) + if compute_times.ndim != 1: + raise ValueError("Expected times to be specified in a 1 " + "dimensional array, got {} " + "dimensions.".format(compute_times.ndim)) + + # side = 'left' (default) excludes t_min. + ind = np.searchsorted(self.times, compute_times) - 1 + # Include t_min. Without this step, index for t_min equals -1 + ind[compute_times == self.times[0]] = 0 + if np.any(np.logical_or(ind < 0, ind > len(self.rotations) - 1)): + raise ValueError("Interpolation times must be within the range " + "[{}, {}], both inclusive.".format( + self.times[0], self.times[-1])) + + alpha = (compute_times - self.times[ind]) / self.timedelta[ind] + + return (self.rotations[ind] * + Rotation.from_rotvec(self.rotvecs[ind] * alpha[:, None])) diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/transform/rotation.pyc b/project/venv/lib/python2.7/site-packages/scipy/spatial/transform/rotation.pyc new file mode 100644 index 0000000..2a75ba1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/spatial/transform/rotation.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/transform/setup.py b/project/venv/lib/python2.7/site-packages/scipy/spatial/transform/setup.py new file mode 100644 index 0000000..6f8a131 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/transform/setup.py @@ -0,0 +1,11 @@ +from __future__ import division, print_function, absolute_import + + +def configuration(parent_package='', top_path=None): + from numpy.distutils.misc_util import Configuration + + config = Configuration('transform', parent_package, top_path) + + config.add_data_dir('tests') + + return config diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/transform/setup.pyc b/project/venv/lib/python2.7/site-packages/scipy/spatial/transform/setup.pyc new file mode 100644 index 0000000..b3390d9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/spatial/transform/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/transform/tests/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/spatial/transform/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/transform/tests/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/spatial/transform/tests/__init__.pyc new file mode 100644 index 0000000..008565c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/spatial/transform/tests/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/transform/tests/test_rotation.py b/project/venv/lib/python2.7/site-packages/scipy/spatial/transform/tests/test_rotation.py new file mode 100644 index 0000000..7753385 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/spatial/transform/tests/test_rotation.py @@ -0,0 +1,945 @@ +from __future__ import division, print_function, absolute_import + +import pytest + +import numpy as np +from numpy.testing import assert_equal, assert_array_almost_equal +from numpy.testing import assert_allclose +from scipy.spatial.transform import Rotation, Slerp +from scipy.stats import special_ortho_group +from itertools import permutations + + +def test_generic_quat_matrix(): + x = np.array([[3, 4, 0, 0], [5, 12, 0, 0]]) + r = Rotation.from_quat(x) + expected_quat = x / np.array([[5], [13]]) + assert_array_almost_equal(r.as_quat(), expected_quat) + + +def test_from_single_1d_quaternion(): + x = np.array([3, 4, 0, 0]) + r = Rotation.from_quat(x) + expected_quat = x / 5 + assert_array_almost_equal(r.as_quat(), expected_quat) + + +def test_from_single_2d_quaternion(): + x = np.array([[3, 4, 0, 0]]) + r = Rotation.from_quat(x) + expected_quat = x / 5 + assert_array_almost_equal(r.as_quat(), expected_quat) + + +def test_from_square_quat_matrix(): + # Ensure proper norm array broadcasting + x = np.array([ + [3, 0, 0, 4], + [5, 0, 12, 0], + [0, 0, 0, 1], + [0, 0, 0, -1] + ]) + r = Rotation.from_quat(x) + expected_quat = x / np.array([[5], [13], [1], [1]]) + assert_array_almost_equal(r.as_quat(), expected_quat) + + +def test_malformed_1d_from_quat(): + with pytest.raises(ValueError): + Rotation.from_quat(np.array([1, 2, 3])) + + +def test_malformed_2d_from_quat(): + with pytest.raises(ValueError): + Rotation.from_quat(np.array([ + [1, 2, 3, 4, 5], + [4, 5, 6, 7, 8] + ])) + + +def test_zero_norms_from_quat(): + x = np.array([ + [3, 4, 0, 0], + [0, 0, 0, 0], + [5, 0, 12, 0] + ]) + with pytest.raises(ValueError): + Rotation.from_quat(x) + + +def test_as_dcm_single_1d_quaternion(): + quat = [0, 0, 0, 1] + mat = Rotation.from_quat(quat).as_dcm() + # mat.shape == (3,3) due to 1d input + assert_array_almost_equal(mat, np.eye(3)) + + +def test_as_dcm_single_2d_quaternion(): + quat = [[0, 0, 1, 1]] + mat = Rotation.from_quat(quat).as_dcm() + assert_equal(mat.shape, (1, 3, 3)) + expected_mat = np.array([ + [0, -1, 0], + [1, 0, 0], + [0, 0, 1] + ]) + assert_array_almost_equal(mat[0], expected_mat) + + +def test_as_dcm_from_square_input(): + quats = [ + [0, 0, 1, 1], + [0, 1, 0, 1], + [0, 0, 0, 1], + [0, 0, 0, -1] + ] + mat = Rotation.from_quat(quats).as_dcm() + assert_equal(mat.shape, (4, 3, 3)) + + expected0 = np.array([ + [0, -1, 0], + [1, 0, 0], + [0, 0, 1] + ]) + assert_array_almost_equal(mat[0], expected0) + + expected1 = np.array([ + [0, 0, 1], + [0, 1, 0], + [-1, 0, 0] + ]) + assert_array_almost_equal(mat[1], expected1) + + assert_array_almost_equal(mat[2], np.eye(3)) + assert_array_almost_equal(mat[3], np.eye(3)) + + +def test_as_dcm_from_generic_input(): + quats = [ + [0, 0, 1, 1], + [0, 1, 0, 1], + [1, 2, 3, 4] + ] + mat = Rotation.from_quat(quats).as_dcm() + assert_equal(mat.shape, (3, 3, 3)) + + expected0 = np.array([ + [0, -1, 0], + [1, 0, 0], + [0, 0, 1] + ]) + assert_array_almost_equal(mat[0], expected0) + + expected1 = np.array([ + [0, 0, 1], + [0, 1, 0], + [-1, 0, 0] + ]) + assert_array_almost_equal(mat[1], expected1) + + expected2 = np.array([ + [0.4, -2, 2.2], + [2.8, 1, 0.4], + [-1, 2, 2] + ]) / 3 + assert_array_almost_equal(mat[2], expected2) + + +def test_from_single_2d_dcm(): + dcm = [ + [0, 0, 1], + [1, 0, 0], + [0, 1, 0] + ] + expected_quat = [0.5, 0.5, 0.5, 0.5] + assert_array_almost_equal( + Rotation.from_dcm(dcm).as_quat(), + expected_quat) + + +def test_from_single_3d_dcm(): + dcm = np.array([ + [0, 0, 1], + [1, 0, 0], + [0, 1, 0] + ]).reshape((1, 3, 3)) + expected_quat = np.array([0.5, 0.5, 0.5, 0.5]).reshape((1, 4)) + assert_array_almost_equal( + Rotation.from_dcm(dcm).as_quat(), + expected_quat) + + +def test_from_dcm_calculation(): + expected_quat = np.array([1, 1, 6, 1]) / np.sqrt(39) + dcm = np.array([ + [-0.8974359, -0.2564103, 0.3589744], + [0.3589744, -0.8974359, 0.2564103], + [0.2564103, 0.3589744, 0.8974359] + ]) + assert_array_almost_equal( + Rotation.from_dcm(dcm).as_quat(), + expected_quat) + assert_array_almost_equal( + Rotation.from_dcm(dcm.reshape((1, 3, 3))).as_quat(), + expected_quat.reshape((1, 4))) + + +def test_dcm_calculation_pipeline(): + dcm = special_ortho_group.rvs(3, size=10, random_state=0) + assert_array_almost_equal(Rotation.from_dcm(dcm).as_dcm(), dcm) + + +def test_from_dcm_ortho_output(): + np.random.seed(0) + dcm = np.random.random((100, 3, 3)) + ortho_dcm = Rotation.from_dcm(dcm).as_dcm() + + mult_result = np.einsum('...ij,...jk->...ik', ortho_dcm, + ortho_dcm.transpose((0, 2, 1))) + + eye3d = np.zeros((100, 3, 3)) + for i in range(3): + eye3d[:, i, i] = 1.0 + + assert_array_almost_equal(mult_result, eye3d) + + +def test_from_1d_single_rotvec(): + rotvec = [1, 0, 0] + expected_quat = np.array([0.4794255, 0, 0, 0.8775826]) + result = Rotation.from_rotvec(rotvec) + assert_array_almost_equal(result.as_quat(), expected_quat) + + +def test_from_2d_single_rotvec(): + rotvec = [[1, 0, 0]] + expected_quat = np.array([[0.4794255, 0, 0, 0.8775826]]) + result = Rotation.from_rotvec(rotvec) + assert_array_almost_equal(result.as_quat(), expected_quat) + + +def test_from_generic_rotvec(): + rotvec = [ + [1, 2, 2], + [1, -1, 0.5], + [0, 0, 0] + ] + expected_quat = np.array([ + [0.3324983, 0.6649967, 0.6649967, 0.0707372], + [0.4544258, -0.4544258, 0.2272129, 0.7316889], + [0, 0, 0, 1] + ]) + assert_array_almost_equal( + Rotation.from_rotvec(rotvec).as_quat(), + expected_quat) + + +def test_from_rotvec_small_angle(): + rotvec = np.array([ + [5e-4 / np.sqrt(3), -5e-4 / np.sqrt(3), 5e-4 / np.sqrt(3)], + [0.2, 0.3, 0.4], + [0, 0, 0] + ]) + + quat = Rotation.from_rotvec(rotvec).as_quat() + # cos(theta/2) ~~ 1 for small theta + assert_allclose(quat[0, 3], 1) + # sin(theta/2) / theta ~~ 0.5 for small theta + assert_allclose(quat[0, :3], rotvec[0] * 0.5) + + assert_allclose(quat[1, 3], 0.9639685) + assert_allclose( + quat[1, :3], + np.array([ + 0.09879603932153465, + 0.14819405898230198, + 0.19759207864306931 + ])) + + assert_equal(quat[2], np.array([0, 0, 0, 1])) + + +def test_malformed_1d_from_rotvec(): + with pytest.raises(ValueError, match='Expected `rot_vec` to have shape'): + Rotation.from_rotvec([1, 2]) + + +def test_malformed_2d_from_rotvec(): + with pytest.raises(ValueError, match='Expected `rot_vec` to have shape'): + Rotation.from_rotvec([ + [1, 2, 3, 4], + [5, 6, 7, 8] + ]) + + +def test_as_generic_rotvec(): + quat = np.array([ + [1, 2, -1, 0.5], + [1, -1, 1, 0.0003], + [0, 0, 0, 1] + ]) + quat /= np.linalg.norm(quat, axis=1)[:, None] + + rotvec = Rotation.from_quat(quat).as_rotvec() + angle = np.linalg.norm(rotvec, axis=1) + + assert_allclose(quat[:, 3], np.cos(angle/2)) + assert_allclose(np.cross(rotvec, quat[:, :3]), np.zeros((3, 3))) + + +def test_as_rotvec_single_1d_input(): + quat = np.array([1, 2, -3, 2]) + expected_rotvec = np.array([0.5772381, 1.1544763, -1.7317144]) + + actual_rotvec = Rotation.from_quat(quat).as_rotvec() + + assert_equal(actual_rotvec.shape, (3,)) + assert_allclose(actual_rotvec, expected_rotvec) + + +def test_as_rotvec_single_2d_input(): + quat = np.array([[1, 2, -3, 2]]) + expected_rotvec = np.array([[0.5772381, 1.1544763, -1.7317144]]) + + actual_rotvec = Rotation.from_quat(quat).as_rotvec() + + assert_equal(actual_rotvec.shape, (1, 3)) + assert_allclose(actual_rotvec, expected_rotvec) + + +def test_rotvec_calc_pipeline(): + # Include small angles + rotvec = np.array([ + [0, 0, 0], + [1, -1, 2], + [-3e-4, 3.5e-4, 7.5e-5] + ]) + assert_allclose(Rotation.from_rotvec(rotvec).as_rotvec(), rotvec) + + +def test_from_euler_single_rotation(): + quat = Rotation.from_euler('z', 90, degrees=True).as_quat() + expected_quat = np.array([0, 0, 1, 1]) / np.sqrt(2) + assert_allclose(quat, expected_quat) + + +def test_single_intrinsic_extrinsic_rotation(): + extrinsic = Rotation.from_euler('z', 90, degrees=True).as_dcm() + intrinsic = Rotation.from_euler('Z', 90, degrees=True).as_dcm() + assert_allclose(extrinsic, intrinsic) + + +def test_from_euler_rotation_order(): + # Intrinsic rotation is same as extrinsic with order reversed + np.random.seed(0) + a = np.random.randint(low=0, high=180, size=(6, 3)) + b = a[:, ::-1] + x = Rotation.from_euler('xyz', a, degrees=True).as_quat() + y = Rotation.from_euler('ZYX', b, degrees=True).as_quat() + assert_allclose(x, y) + + +def test_from_euler_elementary_extrinsic_rotation(): + # Simple test to check if extrinsic rotations are implemented correctly + dcm = Rotation.from_euler('zx', [90, 90], degrees=True).as_dcm() + expected_dcm = np.array([ + [0, -1, 0], + [0, 0, -1], + [1, 0, 0] + ]) + assert_array_almost_equal(dcm, expected_dcm) + + +def test_from_euler_intrinsic_rotation_312(): + angles = [ + [30, 60, 45], + [30, 60, 30], + [45, 30, 60] + ] + dcm = Rotation.from_euler('ZXY', angles, degrees=True).as_dcm() + + assert_array_almost_equal(dcm[0], np.array([ + [0.3061862, -0.2500000, 0.9185587], + [0.8838835, 0.4330127, -0.1767767], + [-0.3535534, 0.8660254, 0.3535534] + ])) + + assert_array_almost_equal(dcm[1], np.array([ + [0.5334936, -0.2500000, 0.8080127], + [0.8080127, 0.4330127, -0.3995191], + [-0.2500000, 0.8660254, 0.4330127] + ])) + + assert_array_almost_equal(dcm[2], np.array([ + [0.0473672, -0.6123725, 0.7891491], + [0.6597396, 0.6123725, 0.4355958], + [-0.7500000, 0.5000000, 0.4330127] + ])) + + +def test_from_euler_intrinsic_rotation_313(): + angles = [ + [30, 60, 45], + [30, 60, 30], + [45, 30, 60] + ] + dcm = Rotation.from_euler('ZXZ', angles, degrees=True).as_dcm() + + assert_array_almost_equal(dcm[0], np.array([ + [0.43559574, -0.78914913, 0.4330127], + [0.65973961, -0.04736717, -0.750000], + [0.61237244, 0.61237244, 0.500000] + ])) + + assert_array_almost_equal(dcm[1], np.array([ + [0.6250000, -0.64951905, 0.4330127], + [0.64951905, 0.1250000, -0.750000], + [0.4330127, 0.750000, 0.500000] + ])) + + assert_array_almost_equal(dcm[2], np.array([ + [-0.1767767, -0.91855865, 0.35355339], + [0.88388348, -0.30618622, -0.35355339], + [0.4330127, 0.25000000, 0.8660254] + ])) + + +def test_from_euler_extrinsic_rotation_312(): + angles = [ + [30, 60, 45], + [30, 60, 30], + [45, 30, 60] + ] + dcm = Rotation.from_euler('zxy', angles, degrees=True).as_dcm() + + assert_array_almost_equal(dcm[0], np.array([ + [0.91855865, 0.1767767, 0.35355339], + [0.25000000, 0.4330127, -0.8660254], + [-0.30618622, 0.88388348, 0.35355339] + ])) + + assert_array_almost_equal(dcm[1], np.array([ + [0.96650635, -0.0580127, 0.2500000], + [0.25000000, 0.4330127, -0.8660254], + [-0.0580127, 0.89951905, 0.4330127] + ])) + + assert_array_almost_equal(dcm[2], np.array([ + [0.65973961, -0.04736717, 0.7500000], + [0.61237244, 0.61237244, -0.5000000], + [-0.43559574, 0.78914913, 0.4330127] + ])) + + +def test_from_euler_extrinsic_rotation_313(): + angles = [ + [30, 60, 45], + [30, 60, 30], + [45, 30, 60] + ] + dcm = Rotation.from_euler('zxz', angles, degrees=True).as_dcm() + + assert_array_almost_equal(dcm[0], np.array([ + [0.43559574, -0.65973961, 0.61237244], + [0.78914913, -0.04736717, -0.61237244], + [0.4330127, 0.75000000, 0.500000] + ])) + + assert_array_almost_equal(dcm[1], np.array([ + [0.62500000, -0.64951905, 0.4330127], + [0.64951905, 0.12500000, -0.750000], + [0.4330127, 0.75000000, 0.500000] + ])) + + assert_array_almost_equal(dcm[2], np.array([ + [-0.1767767, -0.88388348, 0.4330127], + [0.91855865, -0.30618622, -0.250000], + [0.35355339, 0.35355339, 0.8660254] + ])) + + +def test_as_euler_asymmetric_axes(): + np.random.seed(0) + n = 10 + angles = np.empty((n, 3)) + angles[:, 0] = np.random.uniform(low=-np.pi, high=np.pi, size=(n,)) + angles[:, 1] = np.random.uniform(low=-np.pi / 2, high=np.pi / 2, size=(n,)) + angles[:, 2] = np.random.uniform(low=-np.pi, high=np.pi, size=(n,)) + + for seq_tuple in permutations('xyz'): + # Extrinsic rotations + seq = ''.join(seq_tuple) + assert_allclose(angles, Rotation.from_euler(seq, angles).as_euler(seq)) + # Intrinsic rotations + seq = seq.upper() + assert_allclose(angles, Rotation.from_euler(seq, angles).as_euler(seq)) + + +def test_as_euler_symmetric_axes(): + np.random.seed(0) + n = 10 + angles = np.empty((n, 3)) + angles[:, 0] = np.random.uniform(low=-np.pi, high=np.pi, size=(n,)) + angles[:, 1] = np.random.uniform(low=0, high=np.pi, size=(n,)) + angles[:, 2] = np.random.uniform(low=-np.pi, high=np.pi, size=(n,)) + + for axis1 in ['x', 'y', 'z']: + for axis2 in ['x', 'y', 'z']: + if axis1 == axis2: + continue + # Extrinsic rotations + seq = axis1 + axis2 + axis1 + assert_allclose( + angles, Rotation.from_euler(seq, angles).as_euler(seq)) + # Intrinsic rotations + seq = seq.upper() + assert_allclose( + angles, Rotation.from_euler(seq, angles).as_euler(seq)) + + +def test_as_euler_degenerate_asymmetric_axes(): + # Since we cannot check for angle equality, we check for dcm equality + angles = np.array([ + [45, 90, 35], + [35, -90, 20], + [35, 90, 25], + [25, -90, 15] + ]) + + with pytest.warns(UserWarning, match="Gimbal lock"): + for seq_tuple in permutations('xyz'): + # Extrinsic rotations + seq = ''.join(seq_tuple) + rotation = Rotation.from_euler(seq, angles, degrees=True) + dcm_expected = rotation.as_dcm() + + angle_estimates = rotation.as_euler(seq, degrees=True) + dcm_estimated = Rotation.from_euler( + seq, angle_estimates, degrees=True + ).as_dcm() + + assert_array_almost_equal(dcm_expected, dcm_estimated) + + # Intrinsic rotations + seq = seq.upper() + rotation = Rotation.from_euler(seq, angles, degrees=True) + dcm_expected = rotation.as_dcm() + + angle_estimates = rotation.as_euler(seq, degrees=True) + dcm_estimated = Rotation.from_euler( + seq, angle_estimates, degrees=True + ).as_dcm() + + assert_array_almost_equal(dcm_expected, dcm_estimated) + + +def test_as_euler_degenerate_symmetric_axes(): + # Since we cannot check for angle equality, we check for dcm equality + angles = np.array([ + [15, 0, 60], + [35, 0, 75], + [60, 180, 35], + [15, -180, 25], + ]) + + with pytest.warns(UserWarning, match="Gimbal lock"): + for axis1 in ['x', 'y', 'z']: + for axis2 in ['x', 'y', 'z']: + if axis1 == axis2: + continue + + # Extrinsic rotations + seq = axis1 + axis2 + axis1 + rotation = Rotation.from_euler(seq, angles, degrees=True) + dcm_expected = rotation.as_dcm() + + angle_estimates = rotation.as_euler(seq, degrees=True) + dcm_estimated = Rotation.from_euler( + seq, angle_estimates, degrees=True + ).as_dcm() + + assert_array_almost_equal(dcm_expected, dcm_estimated) + + # Intrinsic rotations + seq = seq.upper() + rotation = Rotation.from_euler(seq, angles, degrees=True) + dcm_expected = rotation.as_dcm() + + angle_estimates = rotation.as_euler(seq, degrees=True) + dcm_estimated = Rotation.from_euler( + seq, angle_estimates, degrees=True + ).as_dcm() + + assert_array_almost_equal(dcm_expected, dcm_estimated) + + +def test_inv(): + np.random.seed(0) + n = 10 + p = Rotation.from_quat(np.random.normal(size=(n, 4))) + q = p.inv() + + p_dcm = p.as_dcm() + q_dcm = q.as_dcm() + result1 = np.einsum('...ij,...jk->...ik', p_dcm, q_dcm) + result2 = np.einsum('...ij,...jk->...ik', q_dcm, p_dcm) + + eye3d = np.empty((n, 3, 3)) + eye3d[:] = np.eye(3) + + assert_array_almost_equal(result1, eye3d) + assert_array_almost_equal(result2, eye3d) + + +def test_inv_single_rotation(): + np.random.seed(0) + p = Rotation.from_quat(np.random.normal(size=(4,))) + q = p.inv() + + p_dcm = p.as_dcm() + q_dcm = q.as_dcm() + res1 = np.dot(p_dcm, q_dcm) + res2 = np.dot(q_dcm, p_dcm) + + eye = np.eye(3) + + assert_array_almost_equal(res1, eye) + assert_array_almost_equal(res2, eye) + + x = Rotation.from_quat(np.random.normal(size=(1, 4))) + y = x.inv() + + x_dcm = x.as_dcm() + y_dcm = y.as_dcm() + result1 = np.einsum('...ij,...jk->...ik', x_dcm, y_dcm) + result2 = np.einsum('...ij,...jk->...ik', y_dcm, x_dcm) + + eye3d = np.empty((1, 3, 3)) + eye3d[:] = np.eye(3) + + assert_array_almost_equal(result1, eye3d) + assert_array_almost_equal(result2, eye3d) + + +def test_apply_single_rotation_single_point(): + dcm = np.array([ + [0, -1, 0], + [1, 0, 0], + [0, 0, 1] + ]) + r_1d = Rotation.from_dcm(dcm) + r_2d = Rotation.from_dcm(np.expand_dims(dcm, axis=0)) + + v_1d = np.array([1, 2, 3]) + v_2d = np.expand_dims(v_1d, axis=0) + v1d_rotated = np.array([-2, 1, 3]) + v2d_rotated = np.expand_dims(v1d_rotated, axis=0) + + assert_allclose(r_1d.apply(v_1d), v1d_rotated) + assert_allclose(r_1d.apply(v_2d), v2d_rotated) + assert_allclose(r_2d.apply(v_1d), v2d_rotated) + assert_allclose(r_2d.apply(v_2d), v2d_rotated) + + v1d_inverse = np.array([2, -1, 3]) + v2d_inverse = np.expand_dims(v1d_inverse, axis=0) + + assert_allclose(r_1d.apply(v_1d, inverse=True), v1d_inverse) + assert_allclose(r_1d.apply(v_2d, inverse=True), v2d_inverse) + assert_allclose(r_2d.apply(v_1d, inverse=True), v2d_inverse) + assert_allclose(r_2d.apply(v_2d, inverse=True), v2d_inverse) + + +def test_apply_single_rotation_multiple_points(): + dcm = np.array([ + [0, -1, 0], + [1, 0, 0], + [0, 0, 1] + ]) + r1 = Rotation.from_dcm(dcm) + r2 = Rotation.from_dcm(np.expand_dims(dcm, axis=0)) + + v = np.array([[1, 2, 3], [4, 5, 6]]) + v_rotated = np.array([[-2, 1, 3], [-5, 4, 6]]) + + assert_allclose(r1.apply(v), v_rotated) + assert_allclose(r2.apply(v), v_rotated) + + v_inverse = np.array([[2, -1, 3], [5, -4, 6]]) + + assert_allclose(r1.apply(v, inverse=True), v_inverse) + assert_allclose(r2.apply(v, inverse=True), v_inverse) + + +def test_apply_multiple_rotations_single_point(): + dcm = np.empty((2, 3, 3)) + dcm[0] = np.array([ + [0, -1, 0], + [1, 0, 0], + [0, 0, 1] + ]) + dcm[1] = np.array([ + [1, 0, 0], + [0, 0, -1], + [0, 1, 0] + ]) + r = Rotation.from_dcm(dcm) + + v1 = np.array([1, 2, 3]) + v2 = np.expand_dims(v1, axis=0) + + v_rotated = np.array([[-2, 1, 3], [1, -3, 2]]) + + assert_allclose(r.apply(v1), v_rotated) + assert_allclose(r.apply(v2), v_rotated) + + v_inverse = np.array([[2, -1, 3], [1, 3, -2]]) + + assert_allclose(r.apply(v1, inverse=True), v_inverse) + assert_allclose(r.apply(v2, inverse=True), v_inverse) + + +def test_apply_multiple_rotations_multiple_points(): + dcm = np.empty((2, 3, 3)) + dcm[0] = np.array([ + [0, -1, 0], + [1, 0, 0], + [0, 0, 1] + ]) + dcm[1] = np.array([ + [1, 0, 0], + [0, 0, -1], + [0, 1, 0] + ]) + r = Rotation.from_dcm(dcm) + + v = np.array([[1, 2, 3], [4, 5, 6]]) + v_rotated = np.array([[-2, 1, 3], [4, -6, 5]]) + assert_allclose(r.apply(v), v_rotated) + + v_inverse = np.array([[2, -1, 3], [4, 6, -5]]) + assert_allclose(r.apply(v, inverse=True), v_inverse) + + +def test_getitem(): + dcm = np.empty((2, 3, 3)) + dcm[0] = np.array([ + [0, -1, 0], + [1, 0, 0], + [0, 0, 1] + ]) + dcm[1] = np.array([ + [1, 0, 0], + [0, 0, -1], + [0, 1, 0] + ]) + r = Rotation.from_dcm(dcm) + + assert_allclose(r[0].as_dcm(), dcm[0]) + assert_allclose(r[1].as_dcm(), dcm[1]) + assert_allclose(r[:-1].as_dcm(), np.expand_dims(dcm[0], axis=0)) + + +def test_n_rotations(): + dcm = np.empty((2, 3, 3)) + dcm[0] = np.array([ + [0, -1, 0], + [1, 0, 0], + [0, 0, 1] + ]) + dcm[1] = np.array([ + [1, 0, 0], + [0, 0, -1], + [0, 1, 0] + ]) + r = Rotation.from_dcm(dcm) + + assert_equal(len(r), 2) + assert_equal(len(r[0]), 1) + assert_equal(len(r[1]), 1) + assert_equal(len(r[:-1]), 1) + + +def test_quat_ownership(): + # Ensure that users cannot accidentally corrupt object + quat = np.array([ + [1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0] + ]) + r = Rotation.from_quat(quat, normalized=True) + s = r[0:2] + + r._quat[0] = np.array([0, -1, 0, 0]) + assert_allclose(s._quat[0], np.array([1, 0, 0, 0])) + + +def test_match_vectors_no_rotation(): + x = np.array([[1, 2, 3], [4, 5, 6]]) + y = x.copy() + + r, p = Rotation.match_vectors(x, y) + assert_array_almost_equal(r.as_dcm(), np.eye(3)) + + +def test_match_vectors_no_noise(): + np.random.seed(0) + c = Rotation.from_quat(np.random.normal(size=4)) + b = np.random.normal(size=(5, 3)) + a = c.apply(b) + + est, cov = Rotation.match_vectors(a, b) + assert_allclose(c.as_quat(), est.as_quat()) + + +def test_match_vectors_noise(): + np.random.seed(0) + n_vectors = 100 + rot = Rotation.from_euler('xyz', np.random.normal(size=3)) + vectors = np.random.normal(size=(n_vectors, 3)) + result = rot.apply(vectors) + + # The paper adds noise as indepedently distributed angular errors + sigma = np.deg2rad(1) + tolerance = 1.5 * sigma + noise = Rotation.from_rotvec( + np.random.normal( + size=(n_vectors, 3), + scale=sigma + ) + ) + + # Attitude errors must preserve norm. Hence apply individual random + # rotations to each vector. + noisy_result = noise.apply(result) + + est, cov = Rotation.match_vectors(noisy_result, vectors) + + # Use rotation compositions to find out closeness + error_vector = (rot * est.inv()).as_rotvec() + assert_allclose(error_vector[0], 0, atol=tolerance) + assert_allclose(error_vector[1], 0, atol=tolerance) + assert_allclose(error_vector[2], 0, atol=tolerance) + + # Check error bounds using covariance matrix + cov *= sigma + assert_allclose(cov[0, 0], 0, atol=tolerance) + assert_allclose(cov[1, 1], 0, atol=tolerance) + assert_allclose(cov[2, 2], 0, atol=tolerance) + + +def test_random_rotation_shape(): + assert_equal(Rotation.random().as_quat().shape, (4,)) + assert_equal(Rotation.random(None).as_quat().shape, (4,)) + + assert_equal(Rotation.random(1).as_quat().shape, (1, 4)) + assert_equal(Rotation.random(5).as_quat().shape, (5, 4)) + + +def test_slerp(): + np.random.seed(0) + + key_rots = Rotation.from_quat(np.random.uniform(size=(5, 4))) + key_quats = key_rots.as_quat() + + key_times = [0, 1, 2, 3, 4] + interpolator = Slerp(key_times, key_rots) + + times = [0, 0.5, 0.25, 1, 1.5, 2, 2.75, 3, 3.25, 3.60, 4] + interp_rots = interpolator(times) + interp_quats = interp_rots.as_quat() + + # Dot products are affected by sign of quaternions + interp_quats[interp_quats[:, -1] < 0] *= -1 + # Checking for quaternion equality, perform same operation + key_quats[key_quats[:, -1] < 0] *= -1 + + # Equality at keyframes, including both endpoints + assert_allclose(interp_quats[0], key_quats[0]) + assert_allclose(interp_quats[3], key_quats[1]) + assert_allclose(interp_quats[5], key_quats[2]) + assert_allclose(interp_quats[7], key_quats[3]) + assert_allclose(interp_quats[10], key_quats[4]) + + # Constant angular velocity between keyframes. Check by equating + # cos(theta) between quaternion pairs with equal time difference. + cos_theta1 = np.sum(interp_quats[0] * interp_quats[2]) + cos_theta2 = np.sum(interp_quats[2] * interp_quats[1]) + assert_allclose(cos_theta1, cos_theta2) + + cos_theta4 = np.sum(interp_quats[3] * interp_quats[4]) + cos_theta5 = np.sum(interp_quats[4] * interp_quats[5]) + assert_allclose(cos_theta4, cos_theta5) + + # theta1: 0 -> 0.25, theta3 : 0.5 -> 1 + # Use double angle formula for double the time difference + cos_theta3 = np.sum(interp_quats[1] * interp_quats[3]) + assert_allclose(cos_theta3, 2 * (cos_theta1**2) - 1) + + # Miscellaneous checks + assert_equal(len(interp_rots), len(times)) + + +def test_slerp_single_rot(): + with pytest.raises(ValueError, match="at least 2 rotations"): + r = Rotation.from_quat([1, 2, 3, 4]) + Slerp([1], r) + + +def test_slerp_time_dim_mismatch(): + with pytest.raises(ValueError, + match="times to be specified in a 1 dimensional array"): + np.random.seed(0) + r = Rotation.from_quat(np.random.uniform(size=(2, 4))) + t = np.array([[1], + [2]]) + Slerp(t, r) + + +def test_slerp_num_rotations_mismatch(): + with pytest.raises(ValueError, match="number of rotations to be equal to " + "number of timestamps"): + np.random.seed(0) + r = Rotation.from_quat(np.random.uniform(size=(5, 4))) + t = np.arange(7) + Slerp(t, r) + + +def test_slerp_equal_times(): + with pytest.raises(ValueError, match="strictly increasing order"): + np.random.seed(0) + r = Rotation.from_quat(np.random.uniform(size=(5, 4))) + t = [0, 1, 2, 2, 4] + Slerp(t, r) + + +def test_slerp_decreasing_times(): + with pytest.raises(ValueError, match="strictly increasing order"): + np.random.seed(0) + r = Rotation.from_quat(np.random.uniform(size=(5, 4))) + t = [0, 1, 3, 2, 4] + Slerp(t, r) + + +def test_slerp_call_time_dim_mismatch(): + np.random.seed(0) + r = Rotation.from_quat(np.random.uniform(size=(5, 4))) + t = np.arange(5) + s = Slerp(t, r) + + with pytest.raises(ValueError, + match="times to be specified in a 1 dimensional array"): + interp_times = np.array([[3.5], + [4.2]]) + s(interp_times) + + +def test_slerp_call_time_out_of_range(): + np.random.seed(0) + r = Rotation.from_quat(np.random.uniform(size=(5, 4))) + t = np.arange(5) + 1 + s = Slerp(t, r) + + with pytest.raises(ValueError, match="times must be within the range"): + s([0, 1, 2]) + with pytest.raises(ValueError, match="times must be within the range"): + s([1, 2, 6]) diff --git a/project/venv/lib/python2.7/site-packages/scipy/spatial/transform/tests/test_rotation.pyc b/project/venv/lib/python2.7/site-packages/scipy/spatial/transform/tests/test_rotation.pyc new file mode 100644 index 0000000..cf6cf06 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/spatial/transform/tests/test_rotation.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special.pxd b/project/venv/lib/python2.7/site-packages/scipy/special.pxd new file mode 100644 index 0000000..62cb828 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special.pxd @@ -0,0 +1 @@ +from .special cimport cython_special diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/special/__init__.py new file mode 100644 index 0000000..0a937f0 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/__init__.py @@ -0,0 +1,668 @@ +""" +======================================== +Special functions (:mod:`scipy.special`) +======================================== + +.. module:: scipy.special + +Nearly all of the functions below are universal functions and follow +broadcasting and automatic array-looping rules. Exceptions are +noted. + +.. seealso:: + + `scipy.special.cython_special` -- Typed Cython versions of special functions + + +Error handling +============== + +Errors are handled by returning NaNs or other appropriate values. +Some of the special function routines can emit warnings or raise +exceptions when an error occurs. By default this is disabled; to +query and control the current error handling state the following +functions are provided. + +.. autosummary:: + :toctree: generated/ + + geterr -- Get the current way of handling special-function errors. + seterr -- Set how special-function errors are handled. + errstate -- Context manager for special-function error handling. + SpecialFunctionWarning -- Warning that can be emitted by special functions. + SpecialFunctionError -- Exception that can be raised by special functions. + +Available functions +=================== + +Airy functions +-------------- + +.. autosummary:: + :toctree: generated/ + + airy -- Airy functions and their derivatives. + airye -- Exponentially scaled Airy functions and their derivatives. + ai_zeros -- [+]Compute `nt` zeros and values of the Airy function Ai and its derivative. + bi_zeros -- [+]Compute `nt` zeros and values of the Airy function Bi and its derivative. + itairy -- Integrals of Airy functions + + +Elliptic Functions and Integrals +-------------------------------- + +.. autosummary:: + :toctree: generated/ + + ellipj -- Jacobian elliptic functions + ellipk -- Complete elliptic integral of the first kind. + ellipkm1 -- Complete elliptic integral of the first kind around `m` = 1 + ellipkinc -- Incomplete elliptic integral of the first kind + ellipe -- Complete elliptic integral of the second kind + ellipeinc -- Incomplete elliptic integral of the second kind + +Bessel Functions +---------------- + +.. autosummary:: + :toctree: generated/ + + jv -- Bessel function of the first kind of real order and complex argument. + jve -- Exponentially scaled Bessel function of order `v`. + yn -- Bessel function of the second kind of integer order and real argument. + yv -- Bessel function of the second kind of real order and complex argument. + yve -- Exponentially scaled Bessel function of the second kind of real order. + kn -- Modified Bessel function of the second kind of integer order `n` + kv -- Modified Bessel function of the second kind of real order `v` + kve -- Exponentially scaled modified Bessel function of the second kind. + iv -- Modified Bessel function of the first kind of real order. + ive -- Exponentially scaled modified Bessel function of the first kind + hankel1 -- Hankel function of the first kind + hankel1e -- Exponentially scaled Hankel function of the first kind + hankel2 -- Hankel function of the second kind + hankel2e -- Exponentially scaled Hankel function of the second kind + +The following is not an universal function: + +.. autosummary:: + :toctree: generated/ + + lmbda -- [+]Jahnke-Emden Lambda function, Lambdav(x). + +Zeros of Bessel Functions +^^^^^^^^^^^^^^^^^^^^^^^^^ + +These are not universal functions: + +.. autosummary:: + :toctree: generated/ + + jnjnp_zeros -- [+]Compute zeros of integer-order Bessel functions Jn and Jn'. + jnyn_zeros -- [+]Compute nt zeros of Bessel functions Jn(x), Jn'(x), Yn(x), and Yn'(x). + jn_zeros -- [+]Compute zeros of integer-order Bessel function Jn(x). + jnp_zeros -- [+]Compute zeros of integer-order Bessel function derivative Jn'(x). + yn_zeros -- [+]Compute zeros of integer-order Bessel function Yn(x). + ynp_zeros -- [+]Compute zeros of integer-order Bessel function derivative Yn'(x). + y0_zeros -- [+]Compute nt zeros of Bessel function Y0(z), and derivative at each zero. + y1_zeros -- [+]Compute nt zeros of Bessel function Y1(z), and derivative at each zero. + y1p_zeros -- [+]Compute nt zeros of Bessel derivative Y1'(z), and value at each zero. + +Faster versions of common Bessel Functions +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + j0 -- Bessel function of the first kind of order 0. + j1 -- Bessel function of the first kind of order 1. + y0 -- Bessel function of the second kind of order 0. + y1 -- Bessel function of the second kind of order 1. + i0 -- Modified Bessel function of order 0. + i0e -- Exponentially scaled modified Bessel function of order 0. + i1 -- Modified Bessel function of order 1. + i1e -- Exponentially scaled modified Bessel function of order 1. + k0 -- Modified Bessel function of the second kind of order 0, :math:`K_0`. + k0e -- Exponentially scaled modified Bessel function K of order 0 + k1 -- Modified Bessel function of the second kind of order 1, :math:`K_1(x)`. + k1e -- Exponentially scaled modified Bessel function K of order 1 + +Integrals of Bessel Functions +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + itj0y0 -- Integrals of Bessel functions of order 0 + it2j0y0 -- Integrals related to Bessel functions of order 0 + iti0k0 -- Integrals of modified Bessel functions of order 0 + it2i0k0 -- Integrals related to modified Bessel functions of order 0 + besselpoly -- [+]Weighted integral of a Bessel function. + +Derivatives of Bessel Functions +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + jvp -- Compute nth derivative of Bessel function Jv(z) with respect to `z`. + yvp -- Compute nth derivative of Bessel function Yv(z) with respect to `z`. + kvp -- Compute nth derivative of real-order modified Bessel function Kv(z) + ivp -- Compute nth derivative of modified Bessel function Iv(z) with respect to `z`. + h1vp -- Compute nth derivative of Hankel function H1v(z) with respect to `z`. + h2vp -- Compute nth derivative of Hankel function H2v(z) with respect to `z`. + +Spherical Bessel Functions +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + spherical_jn -- Spherical Bessel function of the first kind or its derivative. + spherical_yn -- Spherical Bessel function of the second kind or its derivative. + spherical_in -- Modified spherical Bessel function of the first kind or its derivative. + spherical_kn -- Modified spherical Bessel function of the second kind or its derivative. + +Riccati-Bessel Functions +^^^^^^^^^^^^^^^^^^^^^^^^ + +These are not universal functions: + +.. autosummary:: + :toctree: generated/ + + riccati_jn -- [+]Compute Ricatti-Bessel function of the first kind and its derivative. + riccati_yn -- [+]Compute Ricatti-Bessel function of the second kind and its derivative. + +Struve Functions +---------------- + +.. autosummary:: + :toctree: generated/ + + struve -- Struve function. + modstruve -- Modified Struve function. + itstruve0 -- Integral of the Struve function of order 0. + it2struve0 -- Integral related to the Struve function of order 0. + itmodstruve0 -- Integral of the modified Struve function of order 0. + + +Raw Statistical Functions +------------------------- + +.. seealso:: :mod:`scipy.stats`: Friendly versions of these functions. + +.. autosummary:: + :toctree: generated/ + + bdtr -- Binomial distribution cumulative distribution function. + bdtrc -- Binomial distribution survival function. + bdtri -- Inverse function to `bdtr` with respect to `p`. + bdtrik -- Inverse function to `bdtr` with respect to `k`. + bdtrin -- Inverse function to `bdtr` with respect to `n`. + btdtr -- Cumulative density function of the beta distribution. + btdtri -- The `p`-th quantile of the beta distribution. + btdtria -- Inverse of `btdtr` with respect to `a`. + btdtrib -- btdtria(a, p, x) + fdtr -- F cumulative distribution function. + fdtrc -- F survival function. + fdtri -- The `p`-th quantile of the F-distribution. + fdtridfd -- Inverse to `fdtr` vs dfd + gdtr -- Gamma distribution cumulative density function. + gdtrc -- Gamma distribution survival function. + gdtria -- Inverse of `gdtr` vs a. + gdtrib -- Inverse of `gdtr` vs b. + gdtrix -- Inverse of `gdtr` vs x. + nbdtr -- Negative binomial cumulative distribution function. + nbdtrc -- Negative binomial survival function. + nbdtri -- Inverse of `nbdtr` vs `p`. + nbdtrik -- Inverse of `nbdtr` vs `k`. + nbdtrin -- Inverse of `nbdtr` vs `n`. + ncfdtr -- Cumulative distribution function of the non-central F distribution. + ncfdtridfd -- Calculate degrees of freedom (denominator) for the noncentral F-distribution. + ncfdtridfn -- Calculate degrees of freedom (numerator) for the noncentral F-distribution. + ncfdtri -- Inverse cumulative distribution function of the non-central F distribution. + ncfdtrinc -- Calculate non-centrality parameter for non-central F distribution. + nctdtr -- Cumulative distribution function of the non-central `t` distribution. + nctdtridf -- Calculate degrees of freedom for non-central t distribution. + nctdtrit -- Inverse cumulative distribution function of the non-central t distribution. + nctdtrinc -- Calculate non-centrality parameter for non-central t distribution. + nrdtrimn -- Calculate mean of normal distribution given other params. + nrdtrisd -- Calculate standard deviation of normal distribution given other params. + pdtr -- Poisson cumulative distribution function + pdtrc -- Poisson survival function + pdtri -- Inverse to `pdtr` vs m + pdtrik -- Inverse to `pdtr` vs k + stdtr -- Student t distribution cumulative density function + stdtridf -- Inverse of `stdtr` vs df + stdtrit -- Inverse of `stdtr` vs `t` + chdtr -- Chi square cumulative distribution function + chdtrc -- Chi square survival function + chdtri -- Inverse to `chdtrc` + chdtriv -- Inverse to `chdtr` vs `v` + ndtr -- Gaussian cumulative distribution function. + log_ndtr -- Logarithm of Gaussian cumulative distribution function. + ndtri -- Inverse of `ndtr` vs x + chndtr -- Non-central chi square cumulative distribution function + chndtridf -- Inverse to `chndtr` vs `df` + chndtrinc -- Inverse to `chndtr` vs `nc` + chndtrix -- Inverse to `chndtr` vs `x` + smirnov -- Kolmogorov-Smirnov complementary cumulative distribution function + smirnovi -- Inverse to `smirnov` + kolmogorov -- Complementary cumulative distribution function of Kolmogorov distribution + kolmogi -- Inverse function to `kolmogorov` + tklmbda -- Tukey-Lambda cumulative distribution function + logit -- Logit ufunc for ndarrays. + expit -- Expit ufunc for ndarrays. + boxcox -- Compute the Box-Cox transformation. + boxcox1p -- Compute the Box-Cox transformation of 1 + `x`. + inv_boxcox -- Compute the inverse of the Box-Cox transformation. + inv_boxcox1p -- Compute the inverse of the Box-Cox transformation. + owens_t -- Owen's T Function. + + +Information Theory Functions +---------------------------- + +.. autosummary:: + :toctree: generated/ + + entr -- Elementwise function for computing entropy. + rel_entr -- Elementwise function for computing relative entropy. + kl_div -- Elementwise function for computing Kullback-Leibler divergence. + huber -- Huber loss function. + pseudo_huber -- Pseudo-Huber loss function. + + +Gamma and Related Functions +--------------------------- + +.. autosummary:: + :toctree: generated/ + + gamma -- Gamma function. + gammaln -- Logarithm of the absolute value of the Gamma function for real inputs. + loggamma -- Principal branch of the logarithm of the Gamma function. + gammasgn -- Sign of the gamma function. + gammainc -- Regularized lower incomplete gamma function. + gammaincinv -- Inverse to `gammainc` + gammaincc -- Regularized upper incomplete gamma function. + gammainccinv -- Inverse to `gammaincc` + beta -- Beta function. + betaln -- Natural logarithm of absolute value of beta function. + betainc -- Incomplete beta integral. + betaincinv -- Inverse function to beta integral. + psi -- The digamma function. + rgamma -- Gamma function inverted + polygamma -- Polygamma function n. + multigammaln -- Returns the log of multivariate gamma, also sometimes called the generalized gamma. + digamma -- psi(x[, out]) + poch -- Rising factorial (z)_m + + +Error Function and Fresnel Integrals +------------------------------------ + +.. autosummary:: + :toctree: generated/ + + erf -- Returns the error function of complex argument. + erfc -- Complementary error function, ``1 - erf(x)``. + erfcx -- Scaled complementary error function, ``exp(x**2) * erfc(x)``. + erfi -- Imaginary error function, ``-i erf(i z)``. + erfinv -- Inverse function for erf. + erfcinv -- Inverse function for erfc. + wofz -- Faddeeva function + dawsn -- Dawson's integral. + fresnel -- Fresnel sin and cos integrals + fresnel_zeros -- Compute nt complex zeros of sine and cosine Fresnel integrals S(z) and C(z). + modfresnelp -- Modified Fresnel positive integrals + modfresnelm -- Modified Fresnel negative integrals + +These are not universal functions: + +.. autosummary:: + :toctree: generated/ + + erf_zeros -- [+]Compute nt complex zeros of error function erf(z). + fresnelc_zeros -- [+]Compute nt complex zeros of cosine Fresnel integral C(z). + fresnels_zeros -- [+]Compute nt complex zeros of sine Fresnel integral S(z). + +Legendre Functions +------------------ + +.. autosummary:: + :toctree: generated/ + + lpmv -- Associated Legendre function of integer order and real degree. + sph_harm -- Compute spherical harmonics. + +These are not universal functions: + +.. autosummary:: + :toctree: generated/ + + clpmn -- [+]Associated Legendre function of the first kind for complex arguments. + lpn -- [+]Legendre function of the first kind. + lqn -- [+]Legendre function of the second kind. + lpmn -- [+]Sequence of associated Legendre functions of the first kind. + lqmn -- [+]Sequence of associated Legendre functions of the second kind. + +Ellipsoidal Harmonics +--------------------- + +.. autosummary:: + :toctree: generated/ + + ellip_harm -- Ellipsoidal harmonic functions E^p_n(l) + ellip_harm_2 -- Ellipsoidal harmonic functions F^p_n(l) + ellip_normal -- Ellipsoidal harmonic normalization constants gamma^p_n + +Orthogonal polynomials +---------------------- + +The following functions evaluate values of orthogonal polynomials: + +.. autosummary:: + :toctree: generated/ + + assoc_laguerre -- Compute the generalized (associated) Laguerre polynomial of degree n and order k. + eval_legendre -- Evaluate Legendre polynomial at a point. + eval_chebyt -- Evaluate Chebyshev polynomial of the first kind at a point. + eval_chebyu -- Evaluate Chebyshev polynomial of the second kind at a point. + eval_chebyc -- Evaluate Chebyshev polynomial of the first kind on [-2, 2] at a point. + eval_chebys -- Evaluate Chebyshev polynomial of the second kind on [-2, 2] at a point. + eval_jacobi -- Evaluate Jacobi polynomial at a point. + eval_laguerre -- Evaluate Laguerre polynomial at a point. + eval_genlaguerre -- Evaluate generalized Laguerre polynomial at a point. + eval_hermite -- Evaluate physicist's Hermite polynomial at a point. + eval_hermitenorm -- Evaluate probabilist's (normalized) Hermite polynomial at a point. + eval_gegenbauer -- Evaluate Gegenbauer polynomial at a point. + eval_sh_legendre -- Evaluate shifted Legendre polynomial at a point. + eval_sh_chebyt -- Evaluate shifted Chebyshev polynomial of the first kind at a point. + eval_sh_chebyu -- Evaluate shifted Chebyshev polynomial of the second kind at a point. + eval_sh_jacobi -- Evaluate shifted Jacobi polynomial at a point. + +The following functions compute roots and quadrature weights for +orthogonal polynomials: + +.. autosummary:: + :toctree: generated/ + + roots_legendre -- Gauss-Legendre quadrature. + roots_chebyt -- Gauss-Chebyshev (first kind) quadrature. + roots_chebyu -- Gauss-Chebyshev (second kind) quadrature. + roots_chebyc -- Gauss-Chebyshev (first kind) quadrature. + roots_chebys -- Gauss-Chebyshev (second kind) quadrature. + roots_jacobi -- Gauss-Jacobi quadrature. + roots_laguerre -- Gauss-Laguerre quadrature. + roots_genlaguerre -- Gauss-generalized Laguerre quadrature. + roots_hermite -- Gauss-Hermite (physicst's) quadrature. + roots_hermitenorm -- Gauss-Hermite (statistician's) quadrature. + roots_gegenbauer -- Gauss-Gegenbauer quadrature. + roots_sh_legendre -- Gauss-Legendre (shifted) quadrature. + roots_sh_chebyt -- Gauss-Chebyshev (first kind, shifted) quadrature. + roots_sh_chebyu -- Gauss-Chebyshev (second kind, shifted) quadrature. + roots_sh_jacobi -- Gauss-Jacobi (shifted) quadrature. + +The functions below, in turn, return the polynomial coefficients in +:class:`~.orthopoly1d` objects, which function similarly as :ref:`numpy.poly1d`. +The :class:`~.orthopoly1d` class also has an attribute ``weights`` which returns +the roots, weights, and total weights for the appropriate form of Gaussian +quadrature. These are returned in an ``n x 3`` array with roots in the first +column, weights in the second column, and total weights in the final column. +Note that :class:`~.orthopoly1d` objects are converted to ``poly1d`` when doing +arithmetic, and lose information of the original orthogonal polynomial. + +.. autosummary:: + :toctree: generated/ + + legendre -- [+]Legendre polynomial. + chebyt -- [+]Chebyshev polynomial of the first kind. + chebyu -- [+]Chebyshev polynomial of the second kind. + chebyc -- [+]Chebyshev polynomial of the first kind on :math:`[-2, 2]`. + chebys -- [+]Chebyshev polynomial of the second kind on :math:`[-2, 2]`. + jacobi -- [+]Jacobi polynomial. + laguerre -- [+]Laguerre polynomial. + genlaguerre -- [+]Generalized (associated) Laguerre polynomial. + hermite -- [+]Physicist's Hermite polynomial. + hermitenorm -- [+]Normalized (probabilist's) Hermite polynomial. + gegenbauer -- [+]Gegenbauer (ultraspherical) polynomial. + sh_legendre -- [+]Shifted Legendre polynomial. + sh_chebyt -- [+]Shifted Chebyshev polynomial of the first kind. + sh_chebyu -- [+]Shifted Chebyshev polynomial of the second kind. + sh_jacobi -- [+]Shifted Jacobi polynomial. + +.. warning:: + + Computing values of high-order polynomials (around ``order > 20``) using + polynomial coefficients is numerically unstable. To evaluate polynomial + values, the ``eval_*`` functions should be used instead. + + +Hypergeometric Functions +------------------------ + +.. autosummary:: + :toctree: generated/ + + hyp2f1 -- Gauss hypergeometric function 2F1(a, b; c; z). + hyp1f1 -- Confluent hypergeometric function 1F1(a, b; x) + hyperu -- Confluent hypergeometric function U(a, b, x) of the second kind + hyp0f1 -- Confluent hypergeometric limit function 0F1. + hyp2f0 -- Hypergeometric function 2F0 in y and an error estimate + hyp1f2 -- Hypergeometric function 1F2 and error estimate + hyp3f0 -- Hypergeometric function 3F0 in y and an error estimate + + +Parabolic Cylinder Functions +---------------------------- + +.. autosummary:: + :toctree: generated/ + + pbdv -- Parabolic cylinder function D + pbvv -- Parabolic cylinder function V + pbwa -- Parabolic cylinder function W + +These are not universal functions: + +.. autosummary:: + :toctree: generated/ + + pbdv_seq -- [+]Parabolic cylinder functions Dv(x) and derivatives. + pbvv_seq -- [+]Parabolic cylinder functions Vv(x) and derivatives. + pbdn_seq -- [+]Parabolic cylinder functions Dn(z) and derivatives. + +Mathieu and Related Functions +----------------------------- + +.. autosummary:: + :toctree: generated/ + + mathieu_a -- Characteristic value of even Mathieu functions + mathieu_b -- Characteristic value of odd Mathieu functions + +These are not universal functions: + +.. autosummary:: + :toctree: generated/ + + mathieu_even_coef -- [+]Fourier coefficients for even Mathieu and modified Mathieu functions. + mathieu_odd_coef -- [+]Fourier coefficients for even Mathieu and modified Mathieu functions. + +The following return both function and first derivative: + +.. autosummary:: + :toctree: generated/ + + mathieu_cem -- Even Mathieu function and its derivative + mathieu_sem -- Odd Mathieu function and its derivative + mathieu_modcem1 -- Even modified Mathieu function of the first kind and its derivative + mathieu_modcem2 -- Even modified Mathieu function of the second kind and its derivative + mathieu_modsem1 -- Odd modified Mathieu function of the first kind and its derivative + mathieu_modsem2 -- Odd modified Mathieu function of the second kind and its derivative + +Spheroidal Wave Functions +------------------------- + +.. autosummary:: + :toctree: generated/ + + pro_ang1 -- Prolate spheroidal angular function of the first kind and its derivative + pro_rad1 -- Prolate spheroidal radial function of the first kind and its derivative + pro_rad2 -- Prolate spheroidal radial function of the secon kind and its derivative + obl_ang1 -- Oblate spheroidal angular function of the first kind and its derivative + obl_rad1 -- Oblate spheroidal radial function of the first kind and its derivative + obl_rad2 -- Oblate spheroidal radial function of the second kind and its derivative. + pro_cv -- Characteristic value of prolate spheroidal function + obl_cv -- Characteristic value of oblate spheroidal function + pro_cv_seq -- Characteristic values for prolate spheroidal wave functions. + obl_cv_seq -- Characteristic values for oblate spheroidal wave functions. + +The following functions require pre-computed characteristic value: + +.. autosummary:: + :toctree: generated/ + + pro_ang1_cv -- Prolate spheroidal angular function pro_ang1 for precomputed characteristic value + pro_rad1_cv -- Prolate spheroidal radial function pro_rad1 for precomputed characteristic value + pro_rad2_cv -- Prolate spheroidal radial function pro_rad2 for precomputed characteristic value + obl_ang1_cv -- Oblate spheroidal angular function obl_ang1 for precomputed characteristic value + obl_rad1_cv -- Oblate spheroidal radial function obl_rad1 for precomputed characteristic value + obl_rad2_cv -- Oblate spheroidal radial function obl_rad2 for precomputed characteristic value + +Kelvin Functions +---------------- + +.. autosummary:: + :toctree: generated/ + + kelvin -- Kelvin functions as complex numbers + kelvin_zeros -- [+]Compute nt zeros of all Kelvin functions. + ber -- Kelvin function ber. + bei -- Kelvin function bei + berp -- Derivative of the Kelvin function `ber` + beip -- Derivative of the Kelvin function `bei` + ker -- Kelvin function ker + kei -- Kelvin function ker + kerp -- Derivative of the Kelvin function ker + keip -- Derivative of the Kelvin function kei + +These are not universal functions: + +.. autosummary:: + :toctree: generated/ + + ber_zeros -- [+]Compute nt zeros of the Kelvin function ber(x). + bei_zeros -- [+]Compute nt zeros of the Kelvin function bei(x). + berp_zeros -- [+]Compute nt zeros of the Kelvin function ber'(x). + beip_zeros -- [+]Compute nt zeros of the Kelvin function bei'(x). + ker_zeros -- [+]Compute nt zeros of the Kelvin function ker(x). + kei_zeros -- [+]Compute nt zeros of the Kelvin function kei(x). + kerp_zeros -- [+]Compute nt zeros of the Kelvin function ker'(x). + keip_zeros -- [+]Compute nt zeros of the Kelvin function kei'(x). + +Combinatorics +------------- + +.. autosummary:: + :toctree: generated/ + + comb -- [+]The number of combinations of N things taken k at a time. + perm -- [+]Permutations of N things taken k at a time, i.e., k-permutations of N. + +Lambert W and Related Functions +------------------------------- + +.. autosummary:: + :toctree: generated/ + + lambertw -- Lambert W function. + wrightomega -- Wright Omega function. + +Other Special Functions +----------------------- + +.. autosummary:: + :toctree: generated/ + + agm -- Arithmetic, Geometric Mean. + bernoulli -- Bernoulli numbers B0..Bn (inclusive). + binom -- Binomial coefficient + diric -- Periodic sinc function, also called the Dirichlet function. + euler -- Euler numbers E0..En (inclusive). + expn -- Exponential integral E_n + exp1 -- Exponential integral E_1 of complex argument z + expi -- Exponential integral Ei + factorial -- The factorial of a number or array of numbers. + factorial2 -- Double factorial. + factorialk -- [+]Multifactorial of n of order k, n(!!...!). + shichi -- Hyperbolic sine and cosine integrals. + sici -- Sine and cosine integrals. + softmax -- Softmax function. + spence -- Spence's function, also known as the dilogarithm. + zeta -- Riemann zeta function. + zetac -- Riemann zeta function minus 1. + +Convenience Functions +--------------------- + +.. autosummary:: + :toctree: generated/ + + cbrt -- Cube root of `x` + exp10 -- 10**x + exp2 -- 2**x + radian -- Convert from degrees to radians + cosdg -- Cosine of the angle `x` given in degrees. + sindg -- Sine of angle given in degrees + tandg -- Tangent of angle x given in degrees. + cotdg -- Cotangent of the angle `x` given in degrees. + log1p -- Calculates log(1+x) for use when `x` is near zero + expm1 -- exp(x) - 1 for use when `x` is near zero. + cosm1 -- cos(x) - 1 for use when `x` is near zero. + round -- Round to nearest integer + xlogy -- Compute ``x*log(y)`` so that the result is 0 if ``x = 0``. + xlog1py -- Compute ``x*log1p(y)`` so that the result is 0 if ``x = 0``. + logsumexp -- Compute the log of the sum of exponentials of input elements. + exprel -- Relative error exponential, (exp(x)-1)/x, for use when `x` is near zero. + sinc -- Return the sinc function. + +.. [+] in the description indicates a function which is not a universal +.. function and does not follow broadcasting and automatic +.. array-looping rules. + +""" + +from __future__ import division, print_function, absolute_import + +from .sf_error import SpecialFunctionWarning, SpecialFunctionError + +from ._ufuncs import * + +from .basic import * +from ._logsumexp import logsumexp, softmax +from . import specfun +from . import orthogonal +from .orthogonal import * +from .spfun_stats import multigammaln +from ._ellip_harm import ellip_harm, ellip_harm_2, ellip_normal +from .lambertw import lambertw +from ._spherical_bessel import (spherical_jn, spherical_yn, spherical_in, + spherical_kn) + +from numpy import deprecate +hyp2f0 = deprecate(hyp2f0, message="hyp2f0 is deprecated in SciPy 1.2") +hyp1f2 = deprecate(hyp1f2, message="hyp1f2 is deprecated in SciPy 1.2") +hyp3f0 = deprecate(hyp3f0, message="hyp3f0 is deprecated in SciPy 1.2") +del deprecate + +__all__ = [s for s in dir() if not s.startswith('_')] + +from numpy.dual import register_func +register_func('i0',i0) +del register_func + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/__init__.pyc new file mode 100644 index 0000000..8098e24 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/_comb.so b/project/venv/lib/python2.7/site-packages/scipy/special/_comb.so new file mode 100755 index 0000000..1891443 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/_comb.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/_ellip_harm.py b/project/venv/lib/python2.7/site-packages/scipy/special/_ellip_harm.py new file mode 100644 index 0000000..a224a6c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/_ellip_harm.py @@ -0,0 +1,209 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np + +from ._ufuncs import _ellip_harm +from ._ellip_harm_2 import _ellipsoid, _ellipsoid_norm + + +def ellip_harm(h2, k2, n, p, s, signm=1, signn=1): + r""" + Ellipsoidal harmonic functions E^p_n(l) + + These are also known as Lame functions of the first kind, and are + solutions to the Lame equation: + + .. math:: (s^2 - h^2)(s^2 - k^2)E''(s) + s(2s^2 - h^2 - k^2)E'(s) + (a - q s^2)E(s) = 0 + + where :math:`q = (n+1)n` and :math:`a` is the eigenvalue (not + returned) corresponding to the solutions. + + Parameters + ---------- + h2 : float + ``h**2`` + k2 : float + ``k**2``; should be larger than ``h**2`` + n : int + Degree + s : float + Coordinate + p : int + Order, can range between [1,2n+1] + signm : {1, -1}, optional + Sign of prefactor of functions. Can be +/-1. See Notes. + signn : {1, -1}, optional + Sign of prefactor of functions. Can be +/-1. See Notes. + + Returns + ------- + E : float + the harmonic :math:`E^p_n(s)` + + See Also + -------- + ellip_harm_2, ellip_normal + + Notes + ----- + The geometric interpretation of the ellipsoidal functions is + explained in [2]_, [3]_, [4]_. The `signm` and `signn` arguments control the + sign of prefactors for functions according to their type:: + + K : +1 + L : signm + M : signn + N : signm*signn + + .. versionadded:: 0.15.0 + + References + ---------- + .. [1] Digital Library of Mathematical Functions 29.12 + https://dlmf.nist.gov/29.12 + .. [2] Bardhan and Knepley, "Computational science and + re-discovery: open-source implementations of + ellipsoidal harmonics for problems in potential theory", + Comput. Sci. Disc. 5, 014006 (2012) + :doi:`10.1088/1749-4699/5/1/014006`. + .. [3] David J.and Dechambre P, "Computation of Ellipsoidal + Gravity Field Harmonics for small solar system bodies" + pp. 30-36, 2000 + .. [4] George Dassios, "Ellipsoidal Harmonics: Theory and Applications" + pp. 418, 2012 + + Examples + -------- + >>> from scipy.special import ellip_harm + >>> w = ellip_harm(5,8,1,1,2.5) + >>> w + 2.5 + + Check that the functions indeed are solutions to the Lame equation: + + >>> from scipy.interpolate import UnivariateSpline + >>> def eigenvalue(f, df, ddf): + ... r = ((s**2 - h**2)*(s**2 - k**2)*ddf + s*(2*s**2 - h**2 - k**2)*df - n*(n+1)*s**2*f)/f + ... return -r.mean(), r.std() + >>> s = np.linspace(0.1, 10, 200) + >>> k, h, n, p = 8.0, 2.2, 3, 2 + >>> E = ellip_harm(h**2, k**2, n, p, s) + >>> E_spl = UnivariateSpline(s, E) + >>> a, a_err = eigenvalue(E_spl(s), E_spl(s,1), E_spl(s,2)) + >>> a, a_err + (583.44366156701483, 6.4580890640310646e-11) + + """ + return _ellip_harm(h2, k2, n, p, s, signm, signn) + + +_ellip_harm_2_vec = np.vectorize(_ellipsoid, otypes='d') + + +def ellip_harm_2(h2, k2, n, p, s): + r""" + Ellipsoidal harmonic functions F^p_n(l) + + These are also known as Lame functions of the second kind, and are + solutions to the Lame equation: + + .. math:: (s^2 - h^2)(s^2 - k^2)F''(s) + s(2s^2 - h^2 - k^2)F'(s) + (a - q s^2)F(s) = 0 + + where :math:`q = (n+1)n` and :math:`a` is the eigenvalue (not + returned) corresponding to the solutions. + + Parameters + ---------- + h2 : float + ``h**2`` + k2 : float + ``k**2``; should be larger than ``h**2`` + n : int + Degree. + p : int + Order, can range between [1,2n+1]. + s : float + Coordinate + + Returns + ------- + F : float + The harmonic :math:`F^p_n(s)` + + Notes + ----- + Lame functions of the second kind are related to the functions of the first kind: + + .. math:: + + F^p_n(s)=(2n + 1)E^p_n(s)\int_{0}^{1/s}\frac{du}{(E^p_n(1/u))^2\sqrt{(1-u^2k^2)(1-u^2h^2)}} + + .. versionadded:: 0.15.0 + + See Also + -------- + ellip_harm, ellip_normal + + Examples + -------- + >>> from scipy.special import ellip_harm_2 + >>> w = ellip_harm_2(5,8,2,1,10) + >>> w + 0.00108056853382 + + """ + with np.errstate(all='ignore'): + return _ellip_harm_2_vec(h2, k2, n, p, s) + + +def _ellip_normal_vec(h2, k2, n, p): + return _ellipsoid_norm(h2, k2, n, p) + + +_ellip_normal_vec = np.vectorize(_ellip_normal_vec, otypes='d') + + +def ellip_normal(h2, k2, n, p): + r""" + Ellipsoidal harmonic normalization constants gamma^p_n + + The normalization constant is defined as + + .. math:: + + \gamma^p_n=8\int_{0}^{h}dx\int_{h}^{k}dy\frac{(y^2-x^2)(E^p_n(y)E^p_n(x))^2}{\sqrt((k^2-y^2)(y^2-h^2)(h^2-x^2)(k^2-x^2)} + + Parameters + ---------- + h2 : float + ``h**2`` + k2 : float + ``k**2``; should be larger than ``h**2`` + n : int + Degree. + p : int + Order, can range between [1,2n+1]. + + Returns + ------- + gamma : float + The normalization constant :math:`\gamma^p_n` + + See Also + -------- + ellip_harm, ellip_harm_2 + + Notes + ----- + .. versionadded:: 0.15.0 + + Examples + -------- + >>> from scipy.special import ellip_normal + >>> w = ellip_normal(5,8,3,7) + >>> w + 1723.38796997 + + """ + with np.errstate(all='ignore'): + return _ellip_normal_vec(h2, k2, n, p) diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/_ellip_harm.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/_ellip_harm.pyc new file mode 100644 index 0000000..c534739 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/_ellip_harm.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/_ellip_harm_2.so b/project/venv/lib/python2.7/site-packages/scipy/special/_ellip_harm_2.so new file mode 100755 index 0000000..1d5c0e1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/_ellip_harm_2.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/_generate_pyx.py b/project/venv/lib/python2.7/site-packages/scipy/special/_generate_pyx.py new file mode 100644 index 0000000..603739d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/_generate_pyx.py @@ -0,0 +1,1378 @@ +""" +python _generate_pyx.py + +Generate Ufunc definition source files for scipy.special. Produces +files '_ufuncs.c' and '_ufuncs_cxx.c' by first producing Cython. + +This will generate both calls to PyUFunc_FromFuncAndData and the +required ufunc inner loops. + +The functions signatures are contained in 'functions.json', the syntax +for a function signature is + + <function>: <name> ':' <input> '*' <output> + '->' <retval> '*' <ignored_retval> + <input>: <typecode>* + <output>: <typecode>* + <retval>: <typecode>? + <ignored_retval>: <typecode>? + <headers>: <header_name> [',' <header_name>]* + +The input parameter types are denoted by single character type +codes, according to + + 'f': 'float' + 'd': 'double' + 'g': 'long double' + 'F': 'float complex' + 'D': 'double complex' + 'G': 'long double complex' + 'i': 'int' + 'l': 'long' + 'v': 'void' + +If multiple kernel functions are given for a single ufunc, the one +which is used is determined by the standard ufunc mechanism. Kernel +functions that are listed first are also matched first against the +ufunc input types, so functions listed earlier take precedence. + +In addition, versions with casted variables, such as d->f,D->F and +i->d are automatically generated. + +There should be either a single header that contains all of the kernel +functions listed, or there should be one header for each kernel +function. Cython pxd files are allowed in addition to .h files. + +Cython functions may use fused types, but the names in the list +should be the specialized ones, such as 'somefunc[float]'. + +Function coming from C++ should have ``++`` appended to the name of +the header. + +Floating-point exceptions inside these Ufuncs are converted to +special function errors --- which are separately controlled by the +user, and off by default, as they are usually not especially useful +for the user. + + +The C++ module +-------------- +In addition to ``_ufuncs`` module, a second module ``_ufuncs_cxx`` is +generated. This module only exports function pointers that are to be +used when constructing some of the ufuncs in ``_ufuncs``. The function +pointers are exported via Cython's standard mechanism. + +This mainly avoids build issues --- Python distutils has no way to +figure out what to do if you want to link both C++ and Fortran code in +the same shared library. + +""" + +from __future__ import division, print_function, absolute_import + +#--------------------------------------------------------------------------------- +# Extra code +#--------------------------------------------------------------------------------- + +UFUNCS_EXTRA_CODE_COMMON = """\ +# This file is automatically generated by _generate_pyx.py. +# Do not edit manually! +from __future__ import absolute_import + +include "_ufuncs_extra_code_common.pxi" +""" + +UFUNCS_EXTRA_CODE = """\ +include "_ufuncs_extra_code.pxi" +""" + +UFUNCS_EXTRA_CODE_BOTTOM = """\ +# +# Aliases +# +jn = jv +""" + +CYTHON_SPECIAL_PXD = """\ +# This file is automatically generated by _generate_pyx.py. +# Do not edit manually! +""" + +CYTHON_SPECIAL_PYX = """\ +# This file is automatically generated by _generate_pyx.py. +# Do not edit manually! +\"\"\" +.. highlight:: cython + +================================ +Cython API for Special Functions +================================ + +Scalar, typed versions of many of the functions in ``scipy.special`` +can be accessed directly from Cython; the complete list is given +below. Functions are overloaded using Cython fused types so their +names match their ufunc counterpart. The module follows the following +conventions: + +- If a function's ufunc counterpart returns multiple values, then the + function returns its outputs via pointers in the final arguments +- If a function's ufunc counterpart returns a single value, then the + function's output is returned directly. + +The module is usable from Cython via:: + + cimport scipy.special.cython_special + +Error Handling +============== + +Functions can indicate an error by returning ``nan``; however they +cannot emit warnings like their counterparts in ``scipy.special``. + +Available Functions +=================== + +FUNCLIST +\"\"\" + +from __future__ import absolute_import +include "_cython_special.pxi" +""" + + +#--------------------------------------------------------------------------------- +# Code generation +#--------------------------------------------------------------------------------- + +import os +import optparse +import re +import textwrap +import itertools +import numpy +import json + + +BASE_DIR = os.path.abspath(os.path.dirname(__file__)) + +add_newdocs = __import__('add_newdocs') + +CY_TYPES = { + 'f': 'float', + 'd': 'double', + 'g': 'long double', + 'F': 'float complex', + 'D': 'double complex', + 'G': 'long double complex', + 'i': 'int', + 'l': 'long', + 'v': 'void', +} + +C_TYPES = { + 'f': 'npy_float', + 'd': 'npy_double', + 'g': 'npy_longdouble', + 'F': 'npy_cfloat', + 'D': 'npy_cdouble', + 'G': 'npy_clongdouble', + 'i': 'npy_int', + 'l': 'npy_long', + 'v': 'void', +} + +TYPE_NAMES = { + 'f': 'NPY_FLOAT', + 'd': 'NPY_DOUBLE', + 'g': 'NPY_LONGDOUBLE', + 'F': 'NPY_CFLOAT', + 'D': 'NPY_CDOUBLE', + 'G': 'NPY_CLONGDOUBLE', + 'i': 'NPY_INT', + 'l': 'NPY_LONG', +} + +CYTHON_SPECIAL_BENCHFUNCS = { + 'airy': ['d*dddd', 'D*DDDD'], + 'beta': ['dd'], + 'erf': ['d', 'D'], + 'exprel': ['d'], + 'gamma': ['d', 'D'], + 'jv': ['dd', 'dD'], + 'loggamma': ['D'], + 'logit': ['d'], + 'psi': ['d', 'D'], +} + + +def underscore(arg): + return arg.replace(" ", "_") + + +def cast_order(c): + return ['ilfdgFDG'.index(x) for x in c] + + +# These downcasts will cause the function to return NaNs, unless the +# values happen to coincide exactly. +DANGEROUS_DOWNCAST = set([ + ('F', 'i'), ('F', 'l'), ('F', 'f'), ('F', 'd'), ('F', 'g'), + ('D', 'i'), ('D', 'l'), ('D', 'f'), ('D', 'd'), ('D', 'g'), + ('G', 'i'), ('G', 'l'), ('G', 'f'), ('G', 'd'), ('G', 'g'), + ('f', 'i'), ('f', 'l'), + ('d', 'i'), ('d', 'l'), + ('g', 'i'), ('g', 'l'), + ('l', 'i'), +]) + +NAN_VALUE = { + 'f': 'NPY_NAN', + 'd': 'NPY_NAN', + 'g': 'NPY_NAN', + 'F': 'NPY_NAN', + 'D': 'NPY_NAN', + 'G': 'NPY_NAN', + 'i': '0xbad0bad0', + 'l': '0xbad0bad0', +} + + +def generate_loop(func_inputs, func_outputs, func_retval, + ufunc_inputs, ufunc_outputs): + """ + Generate a UFunc loop function that calls a function given as its + data parameter with the specified input and output arguments and + return value. + + This function can be passed to PyUFunc_FromFuncAndData. + + Parameters + ---------- + func_inputs, func_outputs, func_retval : str + Signature of the function to call, given as type codes of the + input, output and return value arguments. These 1-character + codes are given according to the CY_TYPES and TYPE_NAMES + lists above. + + The corresponding C function signature to be called is: + + retval func(intype1 iv1, intype2 iv2, ..., outtype1 *ov1, ...); + + If len(ufunc_outputs) == len(func_outputs)+1, the return value + is treated as the first output argument. Otherwise, the return + value is ignored. + + ufunc_inputs, ufunc_outputs : str + Ufunc input and output signature. + + This does not have to exactly match the function signature, + as long as the type casts work out on the C level. + + Returns + ------- + loop_name + Name of the generated loop function. + loop_body + Generated C code for the loop. + + """ + if len(func_inputs) != len(ufunc_inputs): + raise ValueError("Function and ufunc have different number of inputs") + + if len(func_outputs) != len(ufunc_outputs) and not ( + func_retval != "v" and len(func_outputs)+1 == len(ufunc_outputs)): + raise ValueError("Function retval and ufunc outputs don't match") + + name = "loop_%s_%s_%s_As_%s_%s" % ( + func_retval, func_inputs, func_outputs, ufunc_inputs, ufunc_outputs + ) + body = "cdef void %s(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:\n" % name + body += " cdef np.npy_intp i, n = dims[0]\n" + body += " cdef void *func = (<void**>data)[0]\n" + body += " cdef char *func_name = <char*>(<void**>data)[1]\n" + + for j in range(len(ufunc_inputs)): + body += " cdef char *ip%d = args[%d]\n" % (j, j) + for j in range(len(ufunc_outputs)): + body += " cdef char *op%d = args[%d]\n" % (j, j + len(ufunc_inputs)) + + ftypes = [] + fvars = [] + outtypecodes = [] + for j in range(len(func_inputs)): + ftypes.append(CY_TYPES[func_inputs[j]]) + fvars.append("<%s>(<%s*>ip%d)[0]" % ( + CY_TYPES[func_inputs[j]], + CY_TYPES[ufunc_inputs[j]], j)) + + if len(func_outputs)+1 == len(ufunc_outputs): + func_joff = 1 + outtypecodes.append(func_retval) + body += " cdef %s ov0\n" % (CY_TYPES[func_retval],) + else: + func_joff = 0 + + for j, outtype in enumerate(func_outputs): + body += " cdef %s ov%d\n" % (CY_TYPES[outtype], j+func_joff) + ftypes.append("%s *" % CY_TYPES[outtype]) + fvars.append("&ov%d" % (j+func_joff)) + outtypecodes.append(outtype) + + body += " for i in range(n):\n" + if len(func_outputs)+1 == len(ufunc_outputs): + rv = "ov0 = " + else: + rv = "" + + funcall = " %s(<%s(*)(%s) nogil>func)(%s)\n" % ( + rv, CY_TYPES[func_retval], ", ".join(ftypes), ", ".join(fvars)) + + # Cast-check inputs and call function + input_checks = [] + for j in range(len(func_inputs)): + if (ufunc_inputs[j], func_inputs[j]) in DANGEROUS_DOWNCAST: + chk = "<%s>(<%s*>ip%d)[0] == (<%s*>ip%d)[0]" % ( + CY_TYPES[func_inputs[j]], CY_TYPES[ufunc_inputs[j]], j, + CY_TYPES[ufunc_inputs[j]], j) + input_checks.append(chk) + + if input_checks: + body += " if %s:\n" % (" and ".join(input_checks)) + body += " " + funcall + body += " else:\n" + body += " sf_error.error(func_name, sf_error.DOMAIN, \"invalid input argument\")\n" + for j, outtype in enumerate(outtypecodes): + body += " ov%d = <%s>%s\n" % ( + j, CY_TYPES[outtype], NAN_VALUE[outtype]) + else: + body += funcall + + # Assign and cast-check output values + for j, (outtype, fouttype) in enumerate(zip(ufunc_outputs, outtypecodes)): + if (fouttype, outtype) in DANGEROUS_DOWNCAST: + body += " if ov%d == <%s>ov%d:\n" % (j, CY_TYPES[outtype], j) + body += " (<%s *>op%d)[0] = <%s>ov%d\n" % ( + CY_TYPES[outtype], j, CY_TYPES[outtype], j) + body += " else:\n" + body += " sf_error.error(func_name, sf_error.DOMAIN, \"invalid output\")\n" + body += " (<%s *>op%d)[0] = <%s>%s\n" % ( + CY_TYPES[outtype], j, CY_TYPES[outtype], NAN_VALUE[outtype]) + else: + body += " (<%s *>op%d)[0] = <%s>ov%d\n" % ( + CY_TYPES[outtype], j, CY_TYPES[outtype], j) + for j in range(len(ufunc_inputs)): + body += " ip%d += steps[%d]\n" % (j, j) + for j in range(len(ufunc_outputs)): + body += " op%d += steps[%d]\n" % (j, j + len(ufunc_inputs)) + + body += " sf_error.check_fpe(func_name)\n" + + return name, body + + +def generate_fused_type(codes): + """ + Generate name of and cython code for a fused type. + + Parameters + ---------- + typecodes : str + Valid inputs to CY_TYPES (i.e. f, d, g, ...). + + """ + cytypes = map(lambda x: CY_TYPES[x], codes) + name = codes + "_number_t" + declaration = ["ctypedef fused " + name + ":"] + for cytype in cytypes: + declaration.append(" " + cytype) + declaration = "\n".join(declaration) + return name, declaration + + +def generate_bench(name, codes): + tab = " "*4 + top, middle, end = [], [], [] + + tmp = codes.split("*") + if len(tmp) > 1: + incodes = tmp[0] + outcodes = tmp[1] + else: + incodes = tmp[0] + outcodes = "" + + inargs, inargs_and_types = [], [] + for n, code in enumerate(incodes): + arg = "x{}".format(n) + inargs.append(arg) + inargs_and_types.append("{} {}".format(CY_TYPES[code], arg)) + line = "def {{}}(int N, {}):".format(", ".join(inargs_and_types)) + top.append(line) + top.append(tab + "cdef int n") + + outargs = [] + for n, code in enumerate(outcodes): + arg = "y{}".format(n) + outargs.append("&{}".format(arg)) + line = "cdef {} {}".format(CY_TYPES[code], arg) + middle.append(tab + line) + + end.append(tab + "for n in range(N):") + end.append(2*tab + "{}({})") + pyfunc = "_bench_{}_{}_{}".format(name, incodes, "py") + cyfunc = "_bench_{}_{}_{}".format(name, incodes, "cy") + pytemplate = "\n".join(top + end) + cytemplate = "\n".join(top + middle + end) + pybench = pytemplate.format(pyfunc, "_ufuncs." + name, ", ".join(inargs)) + cybench = cytemplate.format(cyfunc, name, ", ".join(inargs + outargs)) + return pybench, cybench + + +def generate_doc(name, specs): + tab = " "*4 + doc = ["- :py:func:`~scipy.special.{}`::\n".format(name)] + for spec in specs: + incodes, outcodes = spec.split("->") + incodes = incodes.split("*") + intypes = list(map(lambda x: CY_TYPES[x], incodes[0])) + if len(incodes) > 1: + types = map(lambda x: "{} *".format(CY_TYPES[x]), incodes[1]) + intypes.extend(types) + outtype = CY_TYPES[outcodes] + line = "{} {}({})".format(outtype, name, ", ".join(intypes)) + doc.append(2*tab + line) + doc[-1] = "{}\n".format(doc[-1]) + doc = "\n".join(doc) + return doc + + +def npy_cdouble_from_double_complex(var): + """Cast a cython double complex to a numpy cdouble.""" + res = "_complexstuff.npy_cdouble_from_double_complex({})".format(var) + return res + + +def double_complex_from_npy_cdouble(var): + """Cast a numpy cdouble to a cython double complex.""" + res = "_complexstuff.double_complex_from_npy_cdouble({})".format(var) + return res + + +def iter_variants(inputs, outputs): + """ + Generate variants of UFunc signatures, by changing variable types, + within the limitation that the corresponding C types casts still + work out. + + This does not generate all possibilities, just the ones required + for the ufunc to work properly with the most common data types. + + Parameters + ---------- + inputs, outputs : str + UFunc input and output signature strings + + Yields + ------ + new_input, new_output : str + Modified input and output strings. + Also the original input/output pair is yielded. + + """ + maps = [ + # always use long instead of int (more common type on 64-bit) + ('i', 'l'), + ] + + # float32-preserving signatures + if not ('i' in inputs or 'l' in inputs): + # Don't add float32 versions of ufuncs with integer arguments, as this + # can lead to incorrect dtype selection if the integer arguments are + # arrays, but float arguments are scalars. + # For instance sph_harm(0,[0],0,0).dtype == complex64 + # This may be a Numpy bug, but we need to work around it. + # cf. gh-4895, https://github.com/numpy/numpy/issues/5895 + maps = maps + [(a + 'dD', b + 'fF') for a, b in maps] + + # do the replacements + for src, dst in maps: + new_inputs = inputs + new_outputs = outputs + for a, b in zip(src, dst): + new_inputs = new_inputs.replace(a, b) + new_outputs = new_outputs.replace(a, b) + yield new_inputs, new_outputs + + +class Func(object): + """ + Base class for Ufunc and FusedFunc. + + """ + def __init__(self, name, signatures): + self.name = name + self.signatures = [] + self.function_name_overrides = {} + + for header in signatures.keys(): + for name, sig in signatures[header].items(): + inarg, outarg, ret = self._parse_signature(sig) + self.signatures.append((name, inarg, outarg, ret, header)) + + def _parse_signature(self, sig): + m = re.match(r"\s*([fdgFDGil]*)\s*\*\s*([fdgFDGil]*)\s*->\s*([*fdgFDGil]*)\s*$", sig) + if m: + inarg, outarg, ret = [x.strip() for x in m.groups()] + if ret.count('*') > 1: + raise ValueError("{}: Invalid signature: {}".format(self.name, sig)) + return inarg, outarg, ret + m = re.match(r"\s*([fdgFDGil]*)\s*->\s*([fdgFDGil]?)\s*$", sig) + if m: + inarg, ret = [x.strip() for x in m.groups()] + return inarg, "", ret + raise ValueError("{}: Invalid signature: {}".format(self.name, sig)) + + def get_prototypes(self, nptypes_for_h=False): + prototypes = [] + for func_name, inarg, outarg, ret, header in self.signatures: + ret = ret.replace('*', '') + c_args = ([C_TYPES[x] for x in inarg] + + [C_TYPES[x] + ' *' for x in outarg]) + cy_args = ([CY_TYPES[x] for x in inarg] + + [CY_TYPES[x] + ' *' for x in outarg]) + c_proto = "%s (*)(%s)" % (C_TYPES[ret], ", ".join(c_args)) + if header.endswith("h") and nptypes_for_h: + cy_proto = c_proto + "nogil" + else: + cy_proto = "%s (*)(%s) nogil" % (CY_TYPES[ret], ", ".join(cy_args)) + prototypes.append((func_name, c_proto, cy_proto, header)) + return prototypes + + def cython_func_name(self, c_name, specialized=False, prefix="_func_", + override=True): + # act on function name overrides + if override and c_name in self.function_name_overrides: + c_name = self.function_name_overrides[c_name] + prefix = "" + + # support fused types + m = re.match(r'^(.*?)(\[.*\])$', c_name) + if m: + c_base_name, fused_part = m.groups() + else: + c_base_name, fused_part = c_name, "" + if specialized: + return "%s%s%s" % (prefix, c_base_name, fused_part.replace(' ', '_')) + else: + return "%s%s" % (prefix, c_base_name,) + + +class Ufunc(Func): + """ + Ufunc signature, restricted format suitable for special functions. + + Parameters + ---------- + name + Name of the ufunc to create + signature + String of form 'func: fff*ff->f, func2: ddd->*i' describing + the C-level functions and types of their input arguments + and return values. + + The syntax is 'function_name: inputparams*outputparams->output_retval*ignored_retval' + + Attributes + ---------- + name : str + Python name for the Ufunc + signatures : list of (func_name, inarg_spec, outarg_spec, ret_spec, header_name) + List of parsed signatures + doc : str + Docstring, obtained from add_newdocs + function_name_overrides : dict of str->str + Overrides for the function names in signatures + + """ + def __init__(self, name, signatures): + super(Ufunc, self).__init__(name, signatures) + self.doc = add_newdocs.get("scipy.special." + name) + if self.doc is None: + raise ValueError("No docstring for ufunc %r" % name) + self.doc = textwrap.dedent(self.doc).strip() + + def _get_signatures_and_loops(self, all_loops): + inarg_num = None + outarg_num = None + + seen = set() + variants = [] + + def add_variant(func_name, inarg, outarg, ret, inp, outp): + if inp in seen: + return + seen.add(inp) + + sig = (func_name, inp, outp) + if "v" in outp: + raise ValueError("%s: void signature %r" % (self.name, sig)) + if len(inp) != inarg_num or len(outp) != outarg_num: + raise ValueError("%s: signature %r does not have %d/%d input/output args" % ( + self.name, sig, + inarg_num, outarg_num)) + + loop_name, loop = generate_loop(inarg, outarg, ret, inp, outp) + all_loops[loop_name] = loop + variants.append((func_name, loop_name, inp, outp)) + + # First add base variants + for func_name, inarg, outarg, ret, header in self.signatures: + outp = re.sub(r'\*.*', '', ret) + outarg + ret = ret.replace('*', '') + if inarg_num is None: + inarg_num = len(inarg) + outarg_num = len(outp) + + inp, outp = list(iter_variants(inarg, outp))[0] + add_variant(func_name, inarg, outarg, ret, inp, outp) + + # Then the supplementary ones + for func_name, inarg, outarg, ret, header in self.signatures: + outp = re.sub(r'\*.*', '', ret) + outarg + ret = ret.replace('*', '') + for inp, outp in iter_variants(inarg, outp): + add_variant(func_name, inarg, outarg, ret, inp, outp) + + # Then sort variants to input argument cast order + # -- the sort is stable, so functions earlier in the signature list + # are still preferred + variants.sort(key=lambda v: cast_order(v[2])) + + return variants, inarg_num, outarg_num + + def generate(self, all_loops): + toplevel = "" + + variants, inarg_num, outarg_num = self._get_signatures_and_loops(all_loops) + + loops = [] + funcs = [] + types = [] + + for func_name, loop_name, inputs, outputs in variants: + for x in inputs: + types.append(TYPE_NAMES[x]) + for x in outputs: + types.append(TYPE_NAMES[x]) + loops.append(loop_name) + funcs.append(func_name) + + toplevel += "cdef np.PyUFuncGenericFunction ufunc_%s_loops[%d]\n" % (self.name, len(loops)) + toplevel += "cdef void *ufunc_%s_ptr[%d]\n" % (self.name, 2*len(funcs)) + toplevel += "cdef void *ufunc_%s_data[%d]\n" % (self.name, len(funcs)) + toplevel += "cdef char ufunc_%s_types[%d]\n" % (self.name, len(types)) + toplevel += 'cdef char *ufunc_%s_doc = (\n "%s")\n' % ( + self.name, + self.doc.replace("\\", "\\\\").replace('"', '\\"').replace('\n', '\\n\"\n "') + ) + + for j, function in enumerate(loops): + toplevel += "ufunc_%s_loops[%d] = <np.PyUFuncGenericFunction>%s\n" % (self.name, j, function) + for j, type in enumerate(types): + toplevel += "ufunc_%s_types[%d] = <char>%s\n" % (self.name, j, type) + for j, func in enumerate(funcs): + toplevel += "ufunc_%s_ptr[2*%d] = <void*>%s\n" % (self.name, j, + self.cython_func_name(func, specialized=True)) + toplevel += "ufunc_%s_ptr[2*%d+1] = <void*>(<char*>\"%s\")\n" % (self.name, j, + self.name) + for j, func in enumerate(funcs): + toplevel += "ufunc_%s_data[%d] = &ufunc_%s_ptr[2*%d]\n" % ( + self.name, j, self.name, j) + + toplevel += ('@ = np.PyUFunc_FromFuncAndData(ufunc_@_loops, ' + 'ufunc_@_data, ufunc_@_types, %d, %d, %d, 0, ' + '"@", ufunc_@_doc, 0)\n' % (len(types)/(inarg_num+outarg_num), + inarg_num, outarg_num) + ).replace('@', self.name) + + return toplevel + + +class FusedFunc(Func): + """ + Generate code for a fused-type special function that can be + cimported in cython. + + """ + def __init__(self, name, signatures): + super(FusedFunc, self).__init__(name, signatures) + self.doc = "See the documentation for scipy.special." + self.name + # "codes" are the keys for CY_TYPES + self.incodes, self.outcodes = self._get_codes() + self.fused_types = set() + self.intypes, infused_types = self._get_types(self.incodes) + self.fused_types.update(infused_types) + self.outtypes, outfused_types = self._get_types(self.outcodes) + self.fused_types.update(outfused_types) + self.invars, self.outvars = self._get_vars() + + def _get_codes(self): + inarg_num, outarg_num = None, None + all_inp, all_outp = [], [] + for _, inarg, outarg, ret, _ in self.signatures: + outp = re.sub(r'\*.*', '', ret) + outarg + if inarg_num is None: + inarg_num = len(inarg) + outarg_num = len(outp) + inp, outp = list(iter_variants(inarg, outp))[0] + all_inp.append(inp) + all_outp.append(outp) + + incodes = [] + for n in range(inarg_num): + codes = unique(map(lambda x: x[n], all_inp)) + codes.sort() + incodes.append(''.join(codes)) + outcodes = [] + for n in range(outarg_num): + codes = unique(map(lambda x: x[n], all_outp)) + codes.sort() + outcodes.append(''.join(codes)) + + return tuple(incodes), tuple(outcodes) + + def _get_types(self, codes): + all_types = [] + fused_types = set() + for code in codes: + if len(code) == 1: + # It's not a fused type + all_types.append((CY_TYPES[code], code)) + else: + # It's a fused type + fused_type, dec = generate_fused_type(code) + fused_types.add(dec) + all_types.append((fused_type, code)) + return all_types, fused_types + + def _get_vars(self): + invars = [] + for n in range(len(self.intypes)): + invars.append("x{}".format(n)) + outvars = [] + for n in range(len(self.outtypes)): + outvars.append("y{}".format(n)) + return invars, outvars + + def _get_conditional(self, types, codes, adverb): + """Generate an if/elif/else clause that selects a specialization of + fused types. + + """ + clauses = [] + seen = set() + for (typ, typcode), code in zip(types, codes): + if len(typcode) == 1: + continue + if typ not in seen: + clauses.append("{} is {}".format(typ, underscore(CY_TYPES[code]))) + seen.add(typ) + if clauses and adverb != "else": + line = "{} {}:".format(adverb, " and ".join(clauses)) + elif clauses and adverb == "else": + line = "else:" + else: + line = None + return line + + def _get_incallvars(self, intypes, c): + """Generate pure input variables to a specialization, + i.e. variables that aren't used to return a value. + + """ + incallvars = [] + for n, intype in enumerate(intypes): + var = self.invars[n] + if c and intype == "double complex": + var = npy_cdouble_from_double_complex(var) + incallvars.append(var) + return incallvars + + def _get_outcallvars(self, outtypes, c): + """Generate output variables to a specialization, + i.e. pointers that are used to return values. + + """ + outcallvars, tmpvars, casts = [], [], [] + # If there are more out variables than out types, we want the + # tail of the out variables + start = len(self.outvars) - len(outtypes) + outvars = self.outvars[start:] + for n, (var, outtype) in enumerate(zip(outvars, outtypes)): + if c and outtype == "double complex": + tmp = "tmp{}".format(n) + tmpvars.append(tmp) + outcallvars.append("&{}".format(tmp)) + tmpcast = double_complex_from_npy_cdouble(tmp) + casts.append("{}[0] = {}".format(var, tmpcast)) + else: + outcallvars.append("{}".format(var)) + return outcallvars, tmpvars, casts + + def _get_nan_decs(self): + """Set all variables to nan for specializations of fused types for + which don't have signatures. + + """ + # Set non fused-type variables to nan + tab = " "*4 + fused_types, lines = [], [tab + "else:"] + seen = set() + for outvar, outtype, code in zip(self.outvars, self.outtypes, self.outcodes): + if len(code) == 1: + line = "{}[0] = {}".format(outvar, NAN_VALUE[code]) + lines.append(2*tab + line) + else: + fused_type = outtype + name, _ = fused_type + if name not in seen: + fused_types.append(fused_type) + seen.add(name) + if not fused_types: + return lines + + # Set fused-type variables to nan + all_codes = [] + for fused_type in fused_types: + _, codes = fused_type + all_codes.append(codes) + all_codes = tuple(all_codes) + + codelens = list(map(lambda x: len(x), all_codes)) + last = numpy.product(codelens) - 1 + for m, codes in enumerate(itertools.product(*all_codes)): + fused_codes, decs = [], [] + for n, fused_type in enumerate(fused_types): + code = codes[n] + fused_codes.append(underscore(CY_TYPES[code])) + for nn, outvar in enumerate(self.outvars): + if self.outtypes[nn] == fused_type: + line = "{}[0] = {}".format(outvar, NAN_VALUE[code]) + decs.append(line) + if m == 0: + adverb = "if" + elif m == last: + adverb = "else" + else: + adverb = "elif" + cond = self._get_conditional(fused_types, codes, adverb) + lines.append(2*tab + cond) + lines.extend(map(lambda x: 3*tab + x, decs)) + return lines + + def _get_tmp_decs(self, all_tmpvars): + """Generate the declarations of any necessary temporary + variables. + + """ + tab = " "*4 + tmpvars = list(all_tmpvars) + tmpvars.sort() + tmpdecs = [] + for tmpvar in tmpvars: + line = "cdef npy_cdouble {}".format(tmpvar) + tmpdecs.append(tab + line) + return tmpdecs + + def _get_python_wrap(self): + """Generate a python wrapper for functions which pass their + arguments as pointers. + + """ + tab = " "*4 + body, callvars = [], [] + for (intype, _), invar in zip(self.intypes, self.invars): + callvars.append("{} {}".format(intype, invar)) + line = "def _{}_pywrap({}):".format(self.name, ", ".join(callvars)) + body.append(line) + for (outtype, _), outvar in zip(self.outtypes, self.outvars): + line = "cdef {} {}".format(outtype, outvar) + body.append(tab + line) + addr_outvars = map(lambda x: "&{}".format(x), self.outvars) + line = "{}({}, {})".format(self.name, ", ".join(self.invars), + ", ".join(addr_outvars)) + body.append(tab + line) + line = "return {}".format(", ".join(self.outvars)) + body.append(tab + line) + body = "\n".join(body) + return body + + def _get_common(self, signum, sig): + """Generate code common to all the _generate_* methods.""" + tab = " "*4 + func_name, incodes, outcodes, retcode, header = sig + # Convert ints to longs; cf. iter_variants() + incodes = incodes.replace('i', 'l') + outcodes = outcodes.replace('i', 'l') + retcode = retcode.replace('i', 'l') + + if header.endswith("h"): + c = True + else: + c = False + if header.endswith("++"): + cpp = True + else: + cpp = False + + intypes = list(map(lambda x: CY_TYPES[x], incodes)) + outtypes = list(map(lambda x: CY_TYPES[x], outcodes)) + retcode = re.sub(r'\*.*', '', retcode) + if not retcode: + retcode = 'v' + rettype = CY_TYPES[retcode] + + if cpp: + # Functions from _ufuncs_cxx are exported as a void* + # pointers; cast them to the correct types + func_name = "scipy.special._ufuncs_cxx._export_{}".format(func_name) + func_name = "(<{}(*)({}) nogil>{})"\ + .format(rettype, ", ".join(intypes + outtypes), func_name) + else: + func_name = self.cython_func_name(func_name, specialized=True) + + if signum == 0: + adverb = "if" + else: + adverb = "elif" + cond = self._get_conditional(self.intypes, incodes, adverb) + if cond: + lines = [tab + cond] + sp = 2*tab + else: + lines = [] + sp = tab + + return func_name, incodes, outcodes, retcode, \ + intypes, outtypes, rettype, c, lines, sp + + def _generate_from_return_and_no_outargs(self): + tab = " "*4 + specs, body = [], [] + for signum, sig in enumerate(self.signatures): + func_name, incodes, outcodes, retcode, intypes, outtypes, \ + rettype, c, lines, sp = self._get_common(signum, sig) + body.extend(lines) + + # Generate the call to the specialized function + callvars = self._get_incallvars(intypes, c) + call = "{}({})".format(func_name, ", ".join(callvars)) + if c and rettype == "double complex": + call = double_complex_from_npy_cdouble(call) + line = sp + "return {}".format(call) + body.append(line) + sig = "{}->{}".format(incodes, retcode) + specs.append(sig) + + if len(specs) > 1: + # Return nan for signatures without a specialization + body.append(tab + "else:") + outtype, outcodes = self.outtypes[0] + last = len(outcodes) - 1 + if len(outcodes) == 1: + line = "return {}".format(NAN_VALUE[outcodes]) + body.append(2*tab + line) + else: + for n, code in enumerate(outcodes): + if n == 0: + adverb = "if" + elif n == last: + adverb = "else" + else: + adverb = "elif" + cond = self._get_conditional(self.outtypes, code, adverb) + body.append(2*tab + cond) + line = "return {}".format(NAN_VALUE[code]) + body.append(3*tab + line) + + # Generate the head of the function + callvars, head = [], [] + for n, (intype, _) in enumerate(self.intypes): + callvars.append("{} {}".format(intype, self.invars[n])) + (outtype, _) = self.outtypes[0] + dec = "cpdef {} {}({}) nogil".format(outtype, self.name, ", ".join(callvars)) + head.append(dec + ":") + head.append(tab + '"""{}"""'.format(self.doc)) + + src = "\n".join(head + body) + return dec, src, specs + + def _generate_from_outargs_and_no_return(self): + tab = " "*4 + all_tmpvars = set() + specs, body = [], [] + for signum, sig in enumerate(self.signatures): + func_name, incodes, outcodes, retcode, intypes, outtypes, \ + rettype, c, lines, sp = self._get_common(signum, sig) + body.extend(lines) + + # Generate the call to the specialized function + callvars = self._get_incallvars(intypes, c) + outcallvars, tmpvars, casts = self._get_outcallvars(outtypes, c) + callvars.extend(outcallvars) + all_tmpvars.update(tmpvars) + + call = "{}({})".format(func_name, ", ".join(callvars)) + body.append(sp + call) + body.extend(map(lambda x: sp + x, casts)) + if len(outcodes) == 1: + sig = "{}->{}".format(incodes, outcodes) + specs.append(sig) + else: + sig = "{}*{}->v".format(incodes, outcodes) + specs.append(sig) + + if len(specs) > 1: + lines = self._get_nan_decs() + body.extend(lines) + + if len(self.outvars) == 1: + line = "return {}[0]".format(self.outvars[0]) + body.append(tab + line) + + # Generate the head of the function + callvars, head = [], [] + for invar, (intype, _) in zip(self.invars, self.intypes): + callvars.append("{} {}".format(intype, invar)) + if len(self.outvars) > 1: + for outvar, (outtype, _) in zip(self.outvars, self.outtypes): + callvars.append("{} *{}".format(outtype, outvar)) + if len(self.outvars) == 1: + outtype, _ = self.outtypes[0] + dec = "cpdef {} {}({}) nogil".format(outtype, self.name, ", ".join(callvars)) + else: + dec = "cdef void {}({}) nogil".format(self.name, ", ".join(callvars)) + head.append(dec + ":") + head.append(tab + '"""{}"""'.format(self.doc)) + if len(self.outvars) == 1: + outvar = self.outvars[0] + outtype, _ = self.outtypes[0] + line = "cdef {} {}".format(outtype, outvar) + head.append(tab + line) + head.extend(self._get_tmp_decs(all_tmpvars)) + + src = "\n".join(head + body) + return dec, src, specs + + def _generate_from_outargs_and_return(self): + tab = " "*4 + all_tmpvars = set() + specs, body = [], [] + for signum, sig in enumerate(self.signatures): + func_name, incodes, outcodes, retcode, intypes, outtypes, \ + rettype, c, lines, sp = self._get_common(signum, sig) + body.extend(lines) + + # Generate the call to the specialized function + callvars = self._get_incallvars(intypes, c) + outcallvars, tmpvars, casts = self._get_outcallvars(outtypes, c) + callvars.extend(outcallvars) + all_tmpvars.update(tmpvars) + call = "{}({})".format(func_name, ", ".join(callvars)) + if c and rettype == "double complex": + call = double_complex_from_npy_cdouble(call) + call = "{}[0] = {}".format(self.outvars[0], call) + body.append(sp + call) + body.extend(map(lambda x: sp + x, casts)) + sig = "{}*{}->v".format(incodes, outcodes + retcode) + specs.append(sig) + + if len(specs) > 1: + lines = self._get_nan_decs() + body.extend(lines) + + # Generate the head of the function + callvars, head = [], [] + for invar, (intype, _) in zip(self.invars, self.intypes): + callvars.append("{} {}".format(intype, invar)) + for outvar, (outtype, _) in zip(self.outvars, self.outtypes): + callvars.append("{} *{}".format(outtype, outvar)) + dec = "cdef void {}({}) nogil".format(self.name, ", ".join(callvars)) + head.append(dec + ":") + head.append(tab + '"""{}"""'.format(self.doc)) + head.extend(self._get_tmp_decs(all_tmpvars)) + + src = "\n".join(head + body) + return dec, src, specs + + def generate(self): + _, _, outcodes, retcode, _ = self.signatures[0] + retcode = re.sub(r'\*.*', '', retcode) + if not retcode: + retcode = 'v' + + if len(outcodes) == 0 and retcode != 'v': + dec, src, specs = self._generate_from_return_and_no_outargs() + elif len(outcodes) > 0 and retcode == 'v': + dec, src, specs = self._generate_from_outargs_and_no_return() + elif len(outcodes) > 0 and retcode != 'v': + dec, src, specs = self._generate_from_outargs_and_return() + else: + raise ValueError("Invalid signature") + + if len(self.outvars) > 1: + wrap = self._get_python_wrap() + else: + wrap = None + + return dec, src, specs, self.fused_types, wrap + + +def get_declaration(ufunc, c_name, c_proto, cy_proto, header, proto_h_filename): + """ + Construct a Cython declaration of a function coming either from a + pxd or a header file. Do sufficient tricks to enable compile-time + type checking against the signature expected by the ufunc. + """ + + defs = [] + defs_h = [] + + var_name = c_name.replace('[', '_').replace(']', '_').replace(' ', '_') + + if header.endswith('.pxd'): + defs.append("from .%s cimport %s as %s" % ( + header[:-4], ufunc.cython_func_name(c_name, prefix=""), + ufunc.cython_func_name(c_name))) + + # check function signature at compile time + proto_name = '_proto_%s_t' % var_name + defs.append("ctypedef %s" % (cy_proto.replace('(*)', proto_name))) + defs.append("cdef %s *%s_var = &%s" % ( + proto_name, proto_name, ufunc.cython_func_name(c_name, specialized=True))) + else: + # redeclare the function, so that the assumed + # signature is checked at compile time + new_name = "%s \"%s\"" % (ufunc.cython_func_name(c_name), c_name) + defs.append("cdef extern from \"%s\":" % proto_h_filename) + defs.append(" cdef %s" % (cy_proto.replace('(*)', new_name))) + defs_h.append("#include \"%s\"" % header) + defs_h.append("%s;" % (c_proto.replace('(*)', c_name))) + + return defs, defs_h, var_name + + +def generate_ufuncs(fn_prefix, cxx_fn_prefix, ufuncs): + filename = fn_prefix + ".pyx" + proto_h_filename = fn_prefix + '_defs.h' + + cxx_proto_h_filename = cxx_fn_prefix + '_defs.h' + cxx_pyx_filename = cxx_fn_prefix + ".pyx" + cxx_pxd_filename = cxx_fn_prefix + ".pxd" + + toplevel = "" + + # for _ufuncs* + defs = [] + defs_h = [] + all_loops = {} + + # for _ufuncs_cxx* + cxx_defs = [] + cxx_pxd_defs = [ + "from . cimport sf_error", + "cdef void _set_action(sf_error.sf_error_t, sf_error.sf_action_t) nogil" + ] + cxx_defs_h = [] + + ufuncs.sort(key=lambda u: u.name) + + for ufunc in ufuncs: + # generate function declaration and type checking snippets + cfuncs = ufunc.get_prototypes() + for c_name, c_proto, cy_proto, header in cfuncs: + if header.endswith('++'): + header = header[:-2] + + # for the CXX module + item_defs, item_defs_h, var_name = get_declaration(ufunc, c_name, c_proto, cy_proto, + header, cxx_proto_h_filename) + cxx_defs.extend(item_defs) + cxx_defs_h.extend(item_defs_h) + + cxx_defs.append("cdef void *_export_%s = <void*>%s" % ( + var_name, ufunc.cython_func_name(c_name, specialized=True, override=False))) + cxx_pxd_defs.append("cdef void *_export_%s" % (var_name,)) + + # let cython grab the function pointer from the c++ shared library + ufunc.function_name_overrides[c_name] = "scipy.special._ufuncs_cxx._export_" + var_name + else: + # usual case + item_defs, item_defs_h, _ = get_declaration(ufunc, c_name, c_proto, cy_proto, header, + proto_h_filename) + defs.extend(item_defs) + defs_h.extend(item_defs_h) + + # ufunc creation code snippet + t = ufunc.generate(all_loops) + toplevel += t + "\n" + + # Produce output + toplevel = "\n".join(sorted(all_loops.values()) + defs + [toplevel]) + + with open(filename, 'w') as f: + f.write(UFUNCS_EXTRA_CODE_COMMON) + f.write(UFUNCS_EXTRA_CODE) + f.write("\n") + f.write(toplevel) + f.write(UFUNCS_EXTRA_CODE_BOTTOM) + + defs_h = unique(defs_h) + with open(proto_h_filename, 'w') as f: + f.write("#ifndef UFUNCS_PROTO_H\n#define UFUNCS_PROTO_H 1\n") + f.write("\n".join(defs_h)) + f.write("\n#endif\n") + + cxx_defs_h = unique(cxx_defs_h) + with open(cxx_proto_h_filename, 'w') as f: + f.write("#ifndef UFUNCS_PROTO_H\n#define UFUNCS_PROTO_H 1\n") + f.write("\n".join(cxx_defs_h)) + f.write("\n#endif\n") + + with open(cxx_pyx_filename, 'w') as f: + f.write(UFUNCS_EXTRA_CODE_COMMON) + f.write("\n") + f.write("\n".join(cxx_defs)) + f.write("\n# distutils: language = c++\n") + + with open(cxx_pxd_filename, 'w') as f: + f.write("\n".join(cxx_pxd_defs)) + + +def generate_fused_funcs(modname, ufunc_fn_prefix, fused_funcs): + pxdfile = modname + ".pxd" + pyxfile = modname + ".pyx" + proto_h_filename = ufunc_fn_prefix + '_defs.h' + + sources = [] + declarations = [] + # Code for benchmarks + bench_aux = [] + fused_types = set() + # Parameters for the tests + doc = [] + defs = [] + + for func in fused_funcs: + if func.name.startswith("_"): + # Don't try to deal with functions that have extra layers + # of wrappers. + continue + + # Get the function declaration for the .pxd and the source + # code for the .pyx + dec, src, specs, func_fused_types, wrap = func.generate() + declarations.append(dec) + sources.append(src) + if wrap: + sources.append(wrap) + fused_types.update(func_fused_types) + + # Declare the specializations + cfuncs = func.get_prototypes(nptypes_for_h=True) + for c_name, c_proto, cy_proto, header in cfuncs: + if header.endswith('++'): + # We grab the c++ functions from the c++ module + continue + item_defs, _, _ = get_declaration(func, c_name, c_proto, + cy_proto, header, + proto_h_filename) + defs.extend(item_defs) + + # Add a line to the documentation + doc.append(generate_doc(func.name, specs)) + + # Generate code for benchmarks + if func.name in CYTHON_SPECIAL_BENCHFUNCS: + for codes in CYTHON_SPECIAL_BENCHFUNCS[func.name]: + pybench, cybench = generate_bench(func.name, codes) + bench_aux.extend([pybench, cybench]) + + fused_types = list(fused_types) + fused_types.sort() + + with open(pxdfile, 'w') as f: + f.write(CYTHON_SPECIAL_PXD) + f.write("\n") + f.write("\n\n".join(fused_types)) + f.write("\n\n") + f.write("\n".join(declarations)) + with open(pyxfile, 'w') as f: + header = CYTHON_SPECIAL_PYX + header = header.replace("FUNCLIST", "\n".join(doc)) + f.write(header) + f.write("\n") + f.write("\n".join(defs)) + f.write("\n\n") + f.write("\n\n".join(sources)) + f.write("\n\n") + f.write("\n\n".join(bench_aux)) + + +def unique(lst): + """ + Return a list without repeated entries (first occurrence is kept), + preserving order. + """ + seen = set() + new_lst = [] + for item in lst: + if item in seen: + continue + seen.add(item) + new_lst.append(item) + return new_lst + + +def all_newer(src_files, dst_files): + from distutils.dep_util import newer + return all(os.path.exists(dst) and newer(dst, src) + for dst in dst_files for src in src_files) + + +def main(): + p = optparse.OptionParser(usage=(__doc__ or '').strip()) + options, args = p.parse_args() + if len(args) != 0: + p.error('invalid number of arguments') + + pwd = os.path.dirname(__file__) + src_files = (os.path.abspath(__file__), + os.path.abspath(os.path.join(pwd, 'functions.json')), + os.path.abspath(os.path.join(pwd, 'add_newdocs.py'))) + dst_files = ('_ufuncs.pyx', + '_ufuncs_defs.h', + '_ufuncs_cxx.pyx', + '_ufuncs_cxx.pxd', + '_ufuncs_cxx_defs.h', + 'cython_special.pyx', + 'cython_special.pxd') + + os.chdir(BASE_DIR) + + if all_newer(src_files, dst_files): + print("scipy/special/_generate_pyx.py: all files up-to-date") + return + + ufuncs, fused_funcs = [], [] + with open('functions.json') as data: + functions = json.load(data) + for f, sig in functions.items(): + ufuncs.append(Ufunc(f, sig)) + fused_funcs.append(FusedFunc(f, sig)) + generate_ufuncs("_ufuncs", "_ufuncs_cxx", ufuncs) + generate_fused_funcs("cython_special", "_ufuncs", fused_funcs) + + +if __name__ == "__main__": + main() diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/_generate_pyx.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/_generate_pyx.pyc new file mode 100644 index 0000000..5523b67 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/_generate_pyx.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/_logsumexp.py b/project/venv/lib/python2.7/site-packages/scipy/special/_logsumexp.py new file mode 100644 index 0000000..2e4bbc6 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/_logsumexp.py @@ -0,0 +1,215 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from scipy._lib._util import _asarray_validated + +__all__ = ["logsumexp", "softmax"] + + +def logsumexp(a, axis=None, b=None, keepdims=False, return_sign=False): + """Compute the log of the sum of exponentials of input elements. + + Parameters + ---------- + a : array_like + Input array. + axis : None or int or tuple of ints, optional + Axis or axes over which the sum is taken. By default `axis` is None, + and all elements are summed. + + .. versionadded:: 0.11.0 + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in the + result as dimensions with size one. With this option, the result + will broadcast correctly against the original array. + + .. versionadded:: 0.15.0 + b : array-like, optional + Scaling factor for exp(`a`) must be of the same shape as `a` or + broadcastable to `a`. These values may be negative in order to + implement subtraction. + + .. versionadded:: 0.12.0 + return_sign : bool, optional + If this is set to True, the result will be a pair containing sign + information; if False, results that are negative will be returned + as NaN. Default is False (no sign information). + + .. versionadded:: 0.16.0 + + Returns + ------- + res : ndarray + The result, ``np.log(np.sum(np.exp(a)))`` calculated in a numerically + more stable way. If `b` is given then ``np.log(np.sum(b*np.exp(a)))`` + is returned. + sgn : ndarray + If return_sign is True, this will be an array of floating-point + numbers matching res and +1, 0, or -1 depending on the sign + of the result. If False, only one result is returned. + + See Also + -------- + numpy.logaddexp, numpy.logaddexp2 + + Notes + ----- + Numpy has a logaddexp function which is very similar to `logsumexp`, but + only handles two arguments. `logaddexp.reduce` is similar to this + function, but may be less stable. + + Examples + -------- + >>> from scipy.special import logsumexp + >>> a = np.arange(10) + >>> np.log(np.sum(np.exp(a))) + 9.4586297444267107 + >>> logsumexp(a) + 9.4586297444267107 + + With weights + + >>> a = np.arange(10) + >>> b = np.arange(10, 0, -1) + >>> logsumexp(a, b=b) + 9.9170178533034665 + >>> np.log(np.sum(b*np.exp(a))) + 9.9170178533034647 + + Returning a sign flag + + >>> logsumexp([1,2],b=[1,-1],return_sign=True) + (1.5413248546129181, -1.0) + + Notice that `logsumexp` does not directly support masked arrays. To use it + on a masked array, convert the mask into zero weights: + + >>> a = np.ma.array([np.log(2), 2, np.log(3)], + ... mask=[False, True, False]) + >>> b = (~a.mask).astype(int) + >>> logsumexp(a.data, b=b), np.log(5) + 1.6094379124341005, 1.6094379124341005 + + """ + a = _asarray_validated(a, check_finite=False) + if b is not None: + a, b = np.broadcast_arrays(a, b) + if np.any(b == 0): + a = a + 0. # promote to at least float + a[b == 0] = -np.inf + + a_max = np.amax(a, axis=axis, keepdims=True) + + if a_max.ndim > 0: + a_max[~np.isfinite(a_max)] = 0 + elif not np.isfinite(a_max): + a_max = 0 + + if b is not None: + b = np.asarray(b) + tmp = b * np.exp(a - a_max) + else: + tmp = np.exp(a - a_max) + + # suppress warnings about log of zero + with np.errstate(divide='ignore'): + s = np.sum(tmp, axis=axis, keepdims=keepdims) + if return_sign: + sgn = np.sign(s) + s *= sgn # /= makes more sense but we need zero -> zero + out = np.log(s) + + if not keepdims: + a_max = np.squeeze(a_max, axis=axis) + out += a_max + + if return_sign: + return out, sgn + else: + return out + + +def softmax(x, axis=None): + r""" + Softmax function + + The softmax function transforms each element of a collection by + computing the exponential of each element divided by the sum of the + exponentials of all the elements. That is, if `x` is a one-dimensional + numpy array:: + + softmax(x) = np.exp(x)/sum(np.exp(x)) + + Parameters + ---------- + x : array_like + Input array. + axis : int or tuple of ints, optional + Axis to compute values along. Default is None and softmax will be + computed over the entire array `x`. + + Returns + ------- + s : ndarray + An array the same shape as `x`. The result will sum to 1 along the + specified axis. + + Notes + ----- + The formula for the softmax function :math:`\sigma(x)` for a vector + :math:`x = \{x_0, x_1, ..., x_{n-1}\}` is + + .. math:: \sigma(x)_j = \frac{e^{x_j}}{\sum_k e^{x_k}} + + The `softmax` function is the gradient of `logsumexp`. + + .. versionadded:: 1.2.0 + + Examples + -------- + >>> from scipy.special import softmax + >>> np.set_printoptions(precision=5) + + >>> x = np.array([[1, 0.5, 0.2, 3], + ... [1, -1, 7, 3], + ... [2, 12, 13, 3]]) + ... + + Compute the softmax transformation over the entire array. + + >>> m = softmax(x) + >>> m + array([[ 4.48309e-06, 2.71913e-06, 2.01438e-06, 3.31258e-05], + [ 4.48309e-06, 6.06720e-07, 1.80861e-03, 3.31258e-05], + [ 1.21863e-05, 2.68421e-01, 7.29644e-01, 3.31258e-05]]) + + >>> m.sum() + 1.0000000000000002 + + Compute the softmax transformation along the first axis (i.e. the columns). + + >>> m = softmax(x, axis=0) + + >>> m + array([[ 2.11942e-01, 1.01300e-05, 2.75394e-06, 3.33333e-01], + [ 2.11942e-01, 2.26030e-06, 2.47262e-03, 3.33333e-01], + [ 5.76117e-01, 9.99988e-01, 9.97525e-01, 3.33333e-01]]) + + >>> m.sum(axis=0) + array([ 1., 1., 1., 1.]) + + Compute the softmax transformation along the second axis (i.e. the rows). + + >>> m = softmax(x, axis=1) + >>> m + array([[ 1.05877e-01, 6.42177e-02, 4.75736e-02, 7.82332e-01], + [ 2.42746e-03, 3.28521e-04, 9.79307e-01, 1.79366e-02], + [ 1.22094e-05, 2.68929e-01, 7.31025e-01, 3.31885e-05]]) + + >>> m.sum(axis=1) + array([ 1., 1., 1.]) + + """ + + # compute in log space for numerical stability + return np.exp(x - logsumexp(x, axis=axis, keepdims=True)) diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/_logsumexp.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/_logsumexp.pyc new file mode 100644 index 0000000..adec3e0 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/_logsumexp.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/_mptestutils.py b/project/venv/lib/python2.7/site-packages/scipy/special/_mptestutils.py new file mode 100644 index 0000000..4656235 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/_mptestutils.py @@ -0,0 +1,455 @@ +from __future__ import division, print_function, absolute_import + +import os +import sys +import time + +import numpy as np +from numpy.testing import assert_ +import pytest + +from scipy._lib.six import reraise +from scipy.special._testutils import assert_func_equal + +try: + import mpmath +except ImportError: + pass + + +# ------------------------------------------------------------------------------ +# Machinery for systematic tests with mpmath +# ------------------------------------------------------------------------------ + +class Arg(object): + """Generate a set of numbers on the real axis, concentrating on + 'interesting' regions and covering all orders of magnitude. + + """ + + def __init__(self, a=-np.inf, b=np.inf, inclusive_a=True, inclusive_b=True): + if a > b: + raise ValueError("a should be less than or equal to b") + if a == -np.inf: + a = -0.5*np.finfo(float).max + if b == np.inf: + b = 0.5*np.finfo(float).max + self.a, self.b = a, b + + self.inclusive_a, self.inclusive_b = inclusive_a, inclusive_b + + def _positive_values(self, a, b, n): + if a < 0: + raise ValueError("a should be positive") + + # Try to put half of the points into a linspace between a and + # 10 the other half in a logspace. + if n % 2 == 0: + nlogpts = n//2 + nlinpts = nlogpts + else: + nlogpts = n//2 + nlinpts = nlogpts + 1 + + if a >= 10: + # Outside of linspace range; just return a logspace. + pts = np.logspace(np.log10(a), np.log10(b), n) + elif a > 0 and b < 10: + # Outside of logspace range; just return a linspace + pts = np.linspace(a, b, n) + elif a > 0: + # Linspace between a and 10 and a logspace between 10 and + # b. + linpts = np.linspace(a, 10, nlinpts, endpoint=False) + logpts = np.logspace(1, np.log10(b), nlogpts) + pts = np.hstack((linpts, logpts)) + elif a == 0 and b <= 10: + # Linspace between 0 and b and a logspace between 0 and + # the smallest positive point of the linspace + linpts = np.linspace(0, b, nlinpts) + if linpts.size > 1: + right = np.log10(linpts[1]) + else: + right = -30 + logpts = np.logspace(-30, right, nlogpts, endpoint=False) + pts = np.hstack((logpts, linpts)) + else: + # Linspace between 0 and 10, logspace between 0 and the + # smallest positive point of the linspace, and a logspace + # between 10 and b. + if nlogpts % 2 == 0: + nlogpts1 = nlogpts//2 + nlogpts2 = nlogpts1 + else: + nlogpts1 = nlogpts//2 + nlogpts2 = nlogpts1 + 1 + linpts = np.linspace(0, 10, nlinpts, endpoint=False) + if linpts.size > 1: + right = np.log10(linpts[1]) + else: + right = -30 + logpts1 = np.logspace(-30, right, nlogpts1, endpoint=False) + logpts2 = np.logspace(1, np.log10(b), nlogpts2) + pts = np.hstack((logpts1, linpts, logpts2)) + + return np.sort(pts) + + def values(self, n): + """Return an array containing n numbers.""" + a, b = self.a, self.b + if a == b: + return np.zeros(n) + + if not self.inclusive_a: + n += 1 + if not self.inclusive_b: + n += 1 + + if n % 2 == 0: + n1 = n//2 + n2 = n1 + else: + n1 = n//2 + n2 = n1 + 1 + + if a >= 0: + pospts = self._positive_values(a, b, n) + negpts = [] + elif b <= 0: + pospts = [] + negpts = -self._positive_values(-b, -a, n) + else: + pospts = self._positive_values(0, b, n1) + negpts = -self._positive_values(0, -a, n2 + 1) + # Don't want to get zero twice + negpts = negpts[1:] + pts = np.hstack((negpts[::-1], pospts)) + + if not self.inclusive_a: + pts = pts[1:] + if not self.inclusive_b: + pts = pts[:-1] + return pts + + +class FixedArg(object): + def __init__(self, values): + self._values = np.asarray(values) + + def values(self, n): + return self._values + + +class ComplexArg(object): + def __init__(self, a=complex(-np.inf, -np.inf), b=complex(np.inf, np.inf)): + self.real = Arg(a.real, b.real) + self.imag = Arg(a.imag, b.imag) + + def values(self, n): + m = int(np.floor(np.sqrt(n))) + x = self.real.values(m) + y = self.imag.values(m + 1) + return (x[:,None] + 1j*y[None,:]).ravel() + + +class IntArg(object): + def __init__(self, a=-1000, b=1000): + self.a = a + self.b = b + + def values(self, n): + v1 = Arg(self.a, self.b).values(max(1 + n//2, n-5)).astype(int) + v2 = np.arange(-5, 5) + v = np.unique(np.r_[v1, v2]) + v = v[(v >= self.a) & (v < self.b)] + return v + + +def get_args(argspec, n): + if isinstance(argspec, np.ndarray): + args = argspec.copy() + else: + nargs = len(argspec) + ms = np.asarray([1.5 if isinstance(spec, ComplexArg) else 1.0 for spec in argspec]) + ms = (n**(ms/sum(ms))).astype(int) + 1 + + args = [] + for spec, m in zip(argspec, ms): + args.append(spec.values(m)) + args = np.array(np.broadcast_arrays(*np.ix_(*args))).reshape(nargs, -1).T + + return args + + +class MpmathData(object): + def __init__(self, scipy_func, mpmath_func, arg_spec, name=None, + dps=None, prec=None, n=None, rtol=1e-7, atol=1e-300, + ignore_inf_sign=False, distinguish_nan_and_inf=True, + nan_ok=True, param_filter=None): + + # mpmath tests are really slow (see gh-6989). Use a small number of + # points by default, increase back to 5000 (old default) if XSLOW is + # set + if n is None: + try: + is_xslow = int(os.environ.get('SCIPY_XSLOW', '0')) + except ValueError: + is_xslow = False + + n = 5000 if is_xslow else 500 + + self.scipy_func = scipy_func + self.mpmath_func = mpmath_func + self.arg_spec = arg_spec + self.dps = dps + self.prec = prec + self.n = n + self.rtol = rtol + self.atol = atol + self.ignore_inf_sign = ignore_inf_sign + self.nan_ok = nan_ok + if isinstance(self.arg_spec, np.ndarray): + self.is_complex = np.issubdtype(self.arg_spec.dtype, np.complexfloating) + else: + self.is_complex = any([isinstance(arg, ComplexArg) for arg in self.arg_spec]) + self.ignore_inf_sign = ignore_inf_sign + self.distinguish_nan_and_inf = distinguish_nan_and_inf + if not name or name == '<lambda>': + name = getattr(scipy_func, '__name__', None) + if not name or name == '<lambda>': + name = getattr(mpmath_func, '__name__', None) + self.name = name + self.param_filter = param_filter + + def check(self): + np.random.seed(1234) + + # Generate values for the arguments + argarr = get_args(self.arg_spec, self.n) + + # Check + old_dps, old_prec = mpmath.mp.dps, mpmath.mp.prec + try: + if self.dps is not None: + dps_list = [self.dps] + else: + dps_list = [20] + if self.prec is not None: + mpmath.mp.prec = self.prec + + # Proper casting of mpmath input and output types. Using + # native mpmath types as inputs gives improved precision + # in some cases. + if np.issubdtype(argarr.dtype, np.complexfloating): + pytype = mpc2complex + + def mptype(x): + return mpmath.mpc(complex(x)) + else: + def mptype(x): + return mpmath.mpf(float(x)) + + def pytype(x): + if abs(x.imag) > 1e-16*(1 + abs(x.real)): + return np.nan + else: + return mpf2float(x.real) + + # Try out different dps until one (or none) works + for j, dps in enumerate(dps_list): + mpmath.mp.dps = dps + + try: + assert_func_equal(self.scipy_func, + lambda *a: pytype(self.mpmath_func(*map(mptype, a))), + argarr, + vectorized=False, + rtol=self.rtol, atol=self.atol, + ignore_inf_sign=self.ignore_inf_sign, + distinguish_nan_and_inf=self.distinguish_nan_and_inf, + nan_ok=self.nan_ok, + param_filter=self.param_filter) + break + except AssertionError: + if j >= len(dps_list)-1: + reraise(*sys.exc_info()) + finally: + mpmath.mp.dps, mpmath.mp.prec = old_dps, old_prec + + def __repr__(self): + if self.is_complex: + return "<MpmathData: %s (complex)>" % (self.name,) + else: + return "<MpmathData: %s>" % (self.name,) + + +def assert_mpmath_equal(*a, **kw): + d = MpmathData(*a, **kw) + d.check() + + +def nonfunctional_tooslow(func): + return pytest.mark.skip(reason=" Test not yet functional (too slow), needs more work.")(func) + + +# ------------------------------------------------------------------------------ +# Tools for dealing with mpmath quirks +# ------------------------------------------------------------------------------ + +def mpf2float(x): + """ + Convert an mpf to the nearest floating point number. Just using + float directly doesn't work because of results like this: + + with mp.workdps(50): + float(mpf("0.99999999999999999")) = 0.9999999999999999 + + """ + return float(mpmath.nstr(x, 17, min_fixed=0, max_fixed=0)) + + +def mpc2complex(x): + return complex(mpf2float(x.real), mpf2float(x.imag)) + + +def trace_args(func): + def tofloat(x): + if isinstance(x, mpmath.mpc): + return complex(x) + else: + return float(x) + + def wrap(*a, **kw): + sys.stderr.write("%r: " % (tuple(map(tofloat, a)),)) + sys.stderr.flush() + try: + r = func(*a, **kw) + sys.stderr.write("-> %r" % r) + finally: + sys.stderr.write("\n") + sys.stderr.flush() + return r + return wrap + + +try: + import posix + import signal + POSIX = ('setitimer' in dir(signal)) +except ImportError: + POSIX = False + + +class TimeoutError(Exception): + pass + + +def time_limited(timeout=0.5, return_val=np.nan, use_sigalrm=True): + """ + Decorator for setting a timeout for pure-Python functions. + + If the function does not return within `timeout` seconds, the + value `return_val` is returned instead. + + On POSIX this uses SIGALRM by default. On non-POSIX, settrace is + used. Do not use this with threads: the SIGALRM implementation + does probably not work well. The settrace implementation only + traces the current thread. + + The settrace implementation slows down execution speed. Slowdown + by a factor around 10 is probably typical. + """ + if POSIX and use_sigalrm: + def sigalrm_handler(signum, frame): + raise TimeoutError() + + def deco(func): + def wrap(*a, **kw): + old_handler = signal.signal(signal.SIGALRM, sigalrm_handler) + signal.setitimer(signal.ITIMER_REAL, timeout) + try: + return func(*a, **kw) + except TimeoutError: + return return_val + finally: + signal.setitimer(signal.ITIMER_REAL, 0) + signal.signal(signal.SIGALRM, old_handler) + return wrap + else: + def deco(func): + def wrap(*a, **kw): + start_time = time.time() + + def trace(frame, event, arg): + if time.time() - start_time > timeout: + raise TimeoutError() + return trace + sys.settrace(trace) + try: + return func(*a, **kw) + except TimeoutError: + sys.settrace(None) + return return_val + finally: + sys.settrace(None) + return wrap + return deco + + +def exception_to_nan(func): + """Decorate function to return nan if it raises an exception""" + def wrap(*a, **kw): + try: + return func(*a, **kw) + except Exception: + return np.nan + return wrap + + +def inf_to_nan(func): + """Decorate function to return nan if it returns inf""" + def wrap(*a, **kw): + v = func(*a, **kw) + if not np.isfinite(v): + return np.nan + return v + return wrap + + +def mp_assert_allclose(res, std, atol=0, rtol=1e-17): + """ + Compare lists of mpmath.mpf's or mpmath.mpc's directly so that it + can be done to higher precision than double. + + """ + try: + len(res) + except TypeError: + res = list(res) + + n = len(std) + if len(res) != n: + raise AssertionError("Lengths of inputs not equal.") + + failures = [] + for k in range(n): + try: + assert_(mpmath.fabs(res[k] - std[k]) <= atol + rtol*mpmath.fabs(std[k])) + except AssertionError: + failures.append(k) + + ndigits = int(abs(np.log10(rtol))) + msg = [""] + msg.append("Bad results ({} out of {}) for the following points:" + .format(len(failures), n)) + for k in failures: + resrep = mpmath.nstr(res[k], ndigits, min_fixed=0, max_fixed=0) + stdrep = mpmath.nstr(std[k], ndigits, min_fixed=0, max_fixed=0) + if std[k] == 0: + rdiff = "inf" + else: + rdiff = mpmath.fabs((res[k] - std[k])/std[k]) + rdiff = mpmath.nstr(rdiff, 3) + msg.append("{}: {} != {} (rdiff {})".format(k, resrep, stdrep, rdiff)) + if failures: + assert_(False, "\n".join(msg)) diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/_mptestutils.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/_mptestutils.pyc new file mode 100644 index 0000000..cca372c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/_mptestutils.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/__init__.pyc new file mode 100644 index 0000000..cf6711b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/expn_asy.py b/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/expn_asy.py new file mode 100644 index 0000000..2a33a09 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/expn_asy.py @@ -0,0 +1,61 @@ +"""Precompute the polynomials for the asymptotic expansion of the +generalized exponential integral. + +Sources +------- +[1] NIST, Digital Library of Mathematical Functions, + https://dlmf.nist.gov/8.20#ii + +""" +from __future__ import division, print_function, absolute_import + +import os +from scipy._lib._numpy_compat import suppress_warnings + +try: + # Can remove when sympy #11255 is resolved; see + # https://github.com/sympy/sympy/issues/11255 + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "inspect.getargspec.. is deprecated") + import sympy + from sympy import Poly + x = sympy.symbols('x') +except ImportError: + pass + + +def generate_A(K): + A = [Poly(1, x)] + for k in range(K): + A.append(Poly(1 - 2*k*x, x)*A[k] + Poly(x*(x + 1))*A[k].diff()) + return A + + +WARNING = """\ +/* This file was automatically generated by _precompute/expn_asy.py. + * Do not edit it manually! + */ +""" + + +def main(): + print(__doc__) + fn = os.path.join('..', 'cephes', 'expn.h') + + K = 12 + A = generate_A(K) + with open(fn + '.new', 'w') as f: + f.write(WARNING) + f.write("#define nA {}\n".format(len(A))) + for k, Ak in enumerate(A): + tmp = ', '.join([str(x.evalf(18)) for x in Ak.coeffs()]) + f.write("static const double A{}[] = {{{}}};\n".format(k, tmp)) + tmp = ", ".join(["A{}".format(k) for k in range(K + 1)]) + f.write("static const double *A[] = {{{}}};\n".format(tmp)) + tmp = ", ".join([str(Ak.degree()) for Ak in A]) + f.write("static const int Adegs[] = {{{}}};\n".format(tmp)) + os.rename(fn + '.new', fn) + + +if __name__ == "__main__": + main() diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/expn_asy.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/expn_asy.pyc new file mode 100644 index 0000000..87be614 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/expn_asy.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/gammainc_asy.py b/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/gammainc_asy.py new file mode 100644 index 0000000..2bbcf3c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/gammainc_asy.py @@ -0,0 +1,119 @@ +""" +Precompute coefficients of Temme's asymptotic expansion for gammainc. + +This takes about 8 hours to run on a 2.3 GHz Macbook Pro with 4GB ram. + +Sources: +[1] NIST, "Digital Library of Mathematical Functions", + https://dlmf.nist.gov/ + +""" +from __future__ import division, print_function, absolute_import + +import os +from scipy.special._precompute.utils import lagrange_inversion + +try: + import mpmath as mp +except ImportError: + pass + + +def compute_a(n): + """a_k from DLMF 5.11.6""" + a = [mp.sqrt(2)/2] + for k in range(1, n): + ak = a[-1]/k + for j in range(1, len(a)): + ak -= a[j]*a[-j]/(j + 1) + ak /= a[0]*(1 + mp.mpf(1)/(k + 1)) + a.append(ak) + return a + + +def compute_g(n): + """g_k from DLMF 5.11.3/5.11.5""" + a = compute_a(2*n) + g = [] + for k in range(n): + g.append(mp.sqrt(2)*mp.rf(0.5, k)*a[2*k]) + return g + + +def eta(lam): + """Function from DLMF 8.12.1 shifted to be centered at 0.""" + if lam > 0: + return mp.sqrt(2*(lam - mp.log(lam + 1))) + elif lam < 0: + return -mp.sqrt(2*(lam - mp.log(lam + 1))) + else: + return 0 + + +def compute_alpha(n): + """alpha_n from DLMF 8.12.13""" + coeffs = mp.taylor(eta, 0, n - 1) + return lagrange_inversion(coeffs) + + +def compute_d(K, N): + """d_{k, n} from DLMF 8.12.12""" + M = N + 2*K + d0 = [-mp.mpf(1)/3] + alpha = compute_alpha(M + 2) + for n in range(1, M): + d0.append((n + 2)*alpha[n+2]) + d = [d0] + g = compute_g(K) + for k in range(1, K): + dk = [] + for n in range(M - 2*k): + dk.append((-1)**k*g[k]*d[0][n] + (n + 2)*d[k-1][n+2]) + d.append(dk) + for k in range(K): + d[k] = d[k][:N] + return d + + +header = \ +r"""/* This file was automatically generated by _precomp/gammainc.py. + * Do not edit it manually! + */ + +#ifndef IGAM_H +#define IGAM_H + +#define K {} +#define N {} + +static const double d[K][N] = +{{""" + +footer = \ +r""" +#endif +""" + +def main(): + print(__doc__) + K = 25 + N = 25 + with mp.workdps(50): + d = compute_d(K, N) + fn = os.path.join(os.path.dirname(__file__), '..', 'cephes', 'igam.h') + with open(fn + '.new', 'w') as f: + f.write(header.format(K, N)) + for k, row in enumerate(d): + row = map(lambda x: mp.nstr(x, 17, min_fixed=0, max_fixed=0), row) + f.write('{') + f.write(", ".join(row)) + if k < K - 1: + f.write('},\n') + else: + f.write('}};\n') + f.write(footer) + os.rename(fn + '.new', fn) + + +if __name__ == "__main__": + main() diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/gammainc_asy.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/gammainc_asy.pyc new file mode 100644 index 0000000..928a322 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/gammainc_asy.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/gammainc_data.py b/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/gammainc_data.py new file mode 100644 index 0000000..0c9a19e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/gammainc_data.py @@ -0,0 +1,126 @@ +"""Compute gammainc and gammaincc for large arguments and parameters +and save the values to data files for use in tests. We can't just +compare to mpmath's gammainc in test_mpmath.TestSystematic because it +would take too long. + +Note that mpmath's gammainc is computed using hypercomb, but since it +doesn't allow the user to increase the maximum number of terms used in +the series it doesn't converge for many arguments. To get around this +we copy the mpmath implementation but use more terms. + +This takes about 17 minutes to run on a 2.3 GHz Macbook Pro with 4GB +ram. + +Sources: +[1] Fredrik Johansson and others. mpmath: a Python library for + arbitrary-precision floating-point arithmetic (version 0.19), + December 2013. http://mpmath.org/. + +""" +from __future__ import division, print_function, absolute_import + +import os +from time import time +import numpy as np +from numpy import pi + +from scipy.special._mptestutils import mpf2float + +try: + import mpmath as mp +except ImportError: + pass + + +def gammainc(a, x, dps=50, maxterms=10**8): + """Compute gammainc exactly like mpmath does but allow for more + summands in hypercomb. See + + mpmath/functions/expintegrals.py#L134 + + in the mpmath github repository. + + """ + with mp.workdps(dps): + z, a, b = mp.mpf(a), mp.mpf(x), mp.mpf(x) + G = [z] + negb = mp.fneg(b, exact=True) + + def h(z): + T1 = [mp.exp(negb), b, z], [1, z, -1], [], G, [1], [1+z], b + return (T1,) + + res = mp.hypercomb(h, [z], maxterms=maxterms) + return mpf2float(res) + + +def gammaincc(a, x, dps=50, maxterms=10**8): + """Compute gammaincc exactly like mpmath does but allow for more + terms in hypercomb. See + + mpmath/functions/expintegrals.py#L187 + + in the mpmath github repository. + + """ + with mp.workdps(dps): + z, a = a, x + + if mp.isint(z): + try: + # mpmath has a fast integer path + return mpf2float(mp.gammainc(z, a=a, regularized=True)) + except mp.libmp.NoConvergence: + pass + nega = mp.fneg(a, exact=True) + G = [z] + # Use 2F0 series when possible; fall back to lower gamma representation + try: + def h(z): + r = z-1 + return [([mp.exp(nega), a], [1, r], [], G, [1, -r], [], 1/nega)] + return mpf2float(mp.hypercomb(h, [z], force_series=True)) + except mp.libmp.NoConvergence: + def h(z): + T1 = [], [1, z-1], [z], G, [], [], 0 + T2 = [-mp.exp(nega), a, z], [1, z, -1], [], G, [1], [1+z], a + return T1, T2 + return mpf2float(mp.hypercomb(h, [z], maxterms=maxterms)) + + +def main(): + t0 = time() + # It would be nice to have data for larger values, but either this + # requires prohibitively large precision (dps > 800) or mpmath has + # a bug. For example, gammainc(1e20, 1e20, dps=800) returns a + # value around 0.03, while the true value should be close to 0.5 + # (DLMF 8.12.15). + print(__doc__) + pwd = os.path.dirname(__file__) + r = np.logspace(4, 14, 30) + ltheta = np.logspace(np.log10(pi/4), np.log10(np.arctan(0.6)), 30) + utheta = np.logspace(np.log10(pi/4), np.log10(np.arctan(1.4)), 30) + + regimes = [(gammainc, ltheta), (gammaincc, utheta)] + for func, theta in regimes: + rg, thetag = np.meshgrid(r, theta) + a, x = rg*np.cos(thetag), rg*np.sin(thetag) + a, x = a.flatten(), x.flatten() + dataset = [] + for i, (a0, x0) in enumerate(zip(a, x)): + if func == gammaincc: + # Exploit the fast integer path in gammaincc whenever + # possible so that the computation doesn't take too + # long + a0, x0 = np.floor(a0), np.floor(x0) + dataset.append((a0, x0, func(a0, x0))) + dataset = np.array(dataset) + filename = os.path.join(pwd, '..', 'tests', 'data', 'local', + '{}.txt'.format(func.__name__)) + np.savetxt(filename, dataset) + + print("{} minutes elapsed".format((time() - t0)/60)) + + +if __name__ == "__main__": + main() diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/gammainc_data.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/gammainc_data.pyc new file mode 100644 index 0000000..fa65592 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/gammainc_data.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/lambertw.py b/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/lambertw.py new file mode 100644 index 0000000..34ab915 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/lambertw.py @@ -0,0 +1,72 @@ +"""Compute a Pade approximation for the principle branch of the +Lambert W function around 0 and compare it to various other +approximations. + +""" +from __future__ import division, print_function, absolute_import + +import numpy as np + +try: + import mpmath + import matplotlib.pyplot as plt +except ImportError: + pass + + +def lambertw_pade(): + derivs = [] + for n in range(6): + derivs.append(mpmath.diff(mpmath.lambertw, 0, n=n)) + p, q = mpmath.pade(derivs, 3, 2) + return p, q + + +def main(): + print(__doc__) + with mpmath.workdps(50): + p, q = lambertw_pade() + p, q = p[::-1], q[::-1] + print("p = {}".format(p)) + print("q = {}".format(q)) + + x, y = np.linspace(-1.5, 1.5, 75), np.linspace(-1.5, 1.5, 75) + x, y = np.meshgrid(x, y) + z = x + 1j*y + lambertw_std = [] + for z0 in z.flatten(): + lambertw_std.append(complex(mpmath.lambertw(z0))) + lambertw_std = np.array(lambertw_std).reshape(x.shape) + + fig, axes = plt.subplots(nrows=3, ncols=1) + # Compare Pade approximation to true result + p = np.array([float(p0) for p0 in p]) + q = np.array([float(q0) for q0 in q]) + pade_approx = np.polyval(p, z)/np.polyval(q, z) + pade_err = abs(pade_approx - lambertw_std) + axes[0].pcolormesh(x, y, pade_err) + # Compare two terms of asymptotic series to true result + asy_approx = np.log(z) - np.log(np.log(z)) + asy_err = abs(asy_approx - lambertw_std) + axes[1].pcolormesh(x, y, asy_err) + # Compare two terms of the series around the branch point to the + # true result + p = np.sqrt(2*(np.exp(1)*z + 1)) + series_approx = -1 + p - p**2/3 + series_err = abs(series_approx - lambertw_std) + im = axes[2].pcolormesh(x, y, series_err) + + fig.colorbar(im, ax=axes.ravel().tolist()) + plt.show() + + fig, ax = plt.subplots(nrows=1, ncols=1) + pade_better = pade_err < asy_err + im = ax.pcolormesh(x, y, pade_better) + t = np.linspace(-0.3, 0.3) + ax.plot(-2.5*abs(t) - 0.2, t, 'r') + fig.colorbar(im, ax=ax) + plt.show() + + +if __name__ == '__main__': + main() diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/lambertw.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/lambertw.pyc new file mode 100644 index 0000000..bd064d8 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/lambertw.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/loggamma.py b/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/loggamma.py new file mode 100644 index 0000000..bbaee61 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/loggamma.py @@ -0,0 +1,46 @@ +"""Precompute series coefficients for log-Gamma.""" +from __future__ import division, print_function, absolute_import + +try: + import mpmath +except ImportError: + pass + + +def stirling_series(N): + coeffs = [] + with mpmath.workdps(100): + for n in range(1, N + 1): + coeffs.append(mpmath.bernoulli(2*n)/(2*n*(2*n - 1))) + return coeffs + + +def taylor_series_at_1(N): + coeffs = [] + with mpmath.workdps(100): + coeffs.append(-mpmath.euler) + for n in range(2, N + 1): + coeffs.append((-1)**n*mpmath.zeta(n)/n) + return coeffs + + +def main(): + print(__doc__) + print() + stirling_coeffs = [mpmath.nstr(x, 20, min_fixed=0, max_fixed=0) + for x in stirling_series(8)[::-1]] + taylor_coeffs = [mpmath.nstr(x, 20, min_fixed=0, max_fixed=0) + for x in taylor_series_at_1(23)[::-1]] + print("Stirling series coefficients") + print("----------------------------") + print("\n".join(stirling_coeffs)) + print() + print("Taylor series coefficients") + print("--------------------------") + print("\n".join(taylor_coeffs)) + print() + + +if __name__ == '__main__': + main() + diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/loggamma.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/loggamma.pyc new file mode 100644 index 0000000..88690a7 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/loggamma.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/setup.py b/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/setup.py new file mode 100644 index 0000000..7f20329 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/setup.py @@ -0,0 +1,13 @@ +from __future__ import division, print_function, absolute_import + + +def configuration(parent_name='special', top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('_precompute', parent_name, top_path) + return config + + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(**configuration().todict()) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/setup.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/setup.pyc new file mode 100644 index 0000000..ca647aa Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/struve_convergence.py b/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/struve_convergence.py new file mode 100644 index 0000000..d5e8e22 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/struve_convergence.py @@ -0,0 +1,122 @@ +""" +Convergence regions of the expansions used in ``struve.c`` + +Note that for v >> z both functions tend rapidly to 0, +and for v << -z, they tend to infinity. + +The floating-point functions over/underflow in the lower left and right +corners of the figure. + + +Figure legend +============= + +Red region + Power series is close (1e-12) to the mpmath result + +Blue region + Asymptotic series is close to the mpmath result + +Green region + Bessel series is close to the mpmath result + +Dotted colored lines + Boundaries of the regions + +Solid colored lines + Boundaries estimated by the routine itself. These will be used + for determining which of the results to use. + +Black dashed line + The line z = 0.7*|v| + 12 + +""" +from __future__ import absolute_import, division, print_function + +import numpy as np +import matplotlib.pyplot as plt + +import mpmath + + +def err_metric(a, b, atol=1e-290): + m = abs(a - b) / (atol + abs(b)) + m[np.isinf(b) & (a == b)] = 0 + return m + + +def do_plot(is_h=True): + from scipy.special._ufuncs import (_struve_power_series, + _struve_asymp_large_z, + _struve_bessel_series) + + vs = np.linspace(-1000, 1000, 91) + zs = np.sort(np.r_[1e-5, 1.0, np.linspace(0, 700, 91)[1:]]) + + rp = _struve_power_series(vs[:,None], zs[None,:], is_h) + ra = _struve_asymp_large_z(vs[:,None], zs[None,:], is_h) + rb = _struve_bessel_series(vs[:,None], zs[None,:], is_h) + + mpmath.mp.dps = 50 + if is_h: + sh = lambda v, z: float(mpmath.struveh(mpmath.mpf(v), mpmath.mpf(z))) + else: + sh = lambda v, z: float(mpmath.struvel(mpmath.mpf(v), mpmath.mpf(z))) + ex = np.vectorize(sh, otypes='d')(vs[:,None], zs[None,:]) + + err_a = err_metric(ra[0], ex) + 1e-300 + err_p = err_metric(rp[0], ex) + 1e-300 + err_b = err_metric(rb[0], ex) + 1e-300 + + err_est_a = abs(ra[1]/ra[0]) + err_est_p = abs(rp[1]/rp[0]) + err_est_b = abs(rb[1]/rb[0]) + + z_cutoff = 0.7*abs(vs) + 12 + + levels = [-1000, -12] + + plt.cla() + + plt.hold(1) + plt.contourf(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], alpha=0.1) + plt.contourf(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], alpha=0.1) + plt.contourf(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], alpha=0.1) + + plt.contour(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], linestyles=[':', ':']) + plt.contour(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], linestyles=[':', ':']) + plt.contour(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], linestyles=[':', ':']) + + lp = plt.contour(vs, zs, np.log10(err_est_p).T, levels=levels, colors=['r', 'r'], linestyles=['-', '-']) + la = plt.contour(vs, zs, np.log10(err_est_a).T, levels=levels, colors=['b', 'b'], linestyles=['-', '-']) + lb = plt.contour(vs, zs, np.log10(err_est_b).T, levels=levels, colors=['g', 'g'], linestyles=['-', '-']) + + plt.clabel(lp, fmt={-1000: 'P', -12: 'P'}) + plt.clabel(la, fmt={-1000: 'A', -12: 'A'}) + plt.clabel(lb, fmt={-1000: 'B', -12: 'B'}) + + plt.plot(vs, z_cutoff, 'k--') + + plt.xlim(vs.min(), vs.max()) + plt.ylim(zs.min(), zs.max()) + + plt.xlabel('v') + plt.ylabel('z') + + +def main(): + plt.clf() + plt.subplot(121) + do_plot(True) + plt.title('Struve H') + + plt.subplot(122) + do_plot(False) + plt.title('Struve L') + + plt.savefig('struve_convergence.png') + plt.show() + + +if __name__ == "__main__": + main() diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/struve_convergence.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/struve_convergence.pyc new file mode 100644 index 0000000..f116304 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/struve_convergence.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/utils.py b/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/utils.py new file mode 100644 index 0000000..2835543 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/utils.py @@ -0,0 +1,46 @@ +from __future__ import division, print_function, absolute_import + +from scipy._lib._numpy_compat import suppress_warnings + +try: + import mpmath as mp +except ImportError: + pass + +try: + # Can remove when sympy #11255 is resolved; see + # https://github.com/sympy/sympy/issues/11255 + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "inspect.getargspec.. is deprecated") + from sympy.abc import x +except ImportError: + pass + + +def lagrange_inversion(a): + """Given a series + + f(x) = a[1]*x + a[2]*x**2 + ... + a[n-1]*x**(n - 1), + + use the Lagrange inversion formula to compute a series + + g(x) = b[1]*x + b[2]*x**2 + ... + b[n-1]*x**(n - 1) + + so that f(g(x)) = g(f(x)) = x mod x**n. We must have a[0] = 0, so + necessarily b[0] = 0 too. + + The algorithm is naive and could be improved, but speed isn't an + issue here and it's easy to read. + + """ + n = len(a) + f = sum(a[i]*x**i for i in range(len(a))) + h = (x/f).series(x, 0, n).removeO() + hpower = [h**0] + for k in range(n): + hpower.append((hpower[-1]*h).expand()) + b = [mp.mpf(0)] + for k in range(1, n): + b.append(hpower[k].coeff(x, k - 1)/k) + b = map(lambda x: mp.mpf(x), b) + return b diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/utils.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/utils.pyc new file mode 100644 index 0000000..9d71ece Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/utils.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/zetac.py b/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/zetac.py new file mode 100644 index 0000000..ca16ace --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/zetac.py @@ -0,0 +1,29 @@ +"""Compute the Taylor series for zeta(x) - 1 around x = 0.""" +from __future__ import division, print_function, absolute_import + +try: + import mpmath +except ImportError: + pass + + +def zetac_series(N): + coeffs = [] + with mpmath.workdps(100): + coeffs.append(-1.5) + for n in range(1, N): + coeff = mpmath.diff(mpmath.zeta, 0, n)/mpmath.factorial(n) + coeffs.append(coeff) + return coeffs + + +def main(): + print(__doc__) + coeffs = zetac_series(10) + coeffs = [mpmath.nstr(x, 20, min_fixed=0, max_fixed=0) + for x in coeffs] + print("\n".join(coeffs[::-1])) + + +if __name__ == '__main__': + main() diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/zetac.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/zetac.pyc new file mode 100644 index 0000000..466171e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/_precompute/zetac.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/_spherical_bessel.py b/project/venv/lib/python2.7/site-packages/scipy/special/_spherical_bessel.py new file mode 100644 index 0000000..1d3f005 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/_spherical_bessel.py @@ -0,0 +1,205 @@ +from __future__ import division, print_function, absolute_import + +from ._ufuncs import (_spherical_jn, _spherical_yn, _spherical_in, + _spherical_kn, _spherical_jn_d, _spherical_yn_d, + _spherical_in_d, _spherical_kn_d) + +def spherical_jn(n, z, derivative=False): + r"""Spherical Bessel function of the first kind or its derivative. + + Defined as [1]_, + + .. math:: j_n(z) = \sqrt{\frac{\pi}{2z}} J_{n + 1/2}(z), + + where :math:`J_n` is the Bessel function of the first kind. + + Parameters + ---------- + n : int, array_like + Order of the Bessel function (n >= 0). + z : complex or float, array_like + Argument of the Bessel function. + derivative : bool, optional + If True, the value of the derivative (rather than the function + itself) is returned. + + Returns + ------- + jn : ndarray + + Notes + ----- + For real arguments greater than the order, the function is computed + using the ascending recurrence [2]_. For small real or complex + arguments, the definitional relation to the cylindrical Bessel function + of the first kind is used. + + The derivative is computed using the relations [3]_, + + .. math:: + j_n'(z) = j_{n-1}(z) - \frac{n + 1}{z} j_n(z). + + j_0'(z) = -j_1(z) + + + .. versionadded:: 0.18.0 + + References + ---------- + .. [1] https://dlmf.nist.gov/10.47.E3 + .. [2] https://dlmf.nist.gov/10.51.E1 + .. [3] https://dlmf.nist.gov/10.51.E2 + """ + if derivative: + return _spherical_jn_d(n, z) + else: + return _spherical_jn(n, z) + + +def spherical_yn(n, z, derivative=False): + r"""Spherical Bessel function of the second kind or its derivative. + + Defined as [1]_, + + .. math:: y_n(z) = \sqrt{\frac{\pi}{2z}} Y_{n + 1/2}(z), + + where :math:`Y_n` is the Bessel function of the second kind. + + Parameters + ---------- + n : int, array_like + Order of the Bessel function (n >= 0). + z : complex or float, array_like + Argument of the Bessel function. + derivative : bool, optional + If True, the value of the derivative (rather than the function + itself) is returned. + + Returns + ------- + yn : ndarray + + Notes + ----- + For real arguments, the function is computed using the ascending + recurrence [2]_. For complex arguments, the definitional relation to + the cylindrical Bessel function of the second kind is used. + + The derivative is computed using the relations [3]_, + + .. math:: + y_n' = y_{n-1} - \frac{n + 1}{z} y_n. + + y_0' = -y_1 + + + .. versionadded:: 0.18.0 + + References + ---------- + .. [1] https://dlmf.nist.gov/10.47.E4 + .. [2] https://dlmf.nist.gov/10.51.E1 + .. [3] https://dlmf.nist.gov/10.51.E2 + """ + if derivative: + return _spherical_yn_d(n, z) + else: + return _spherical_yn(n, z) + + +def spherical_in(n, z, derivative=False): + r"""Modified spherical Bessel function of the first kind or its derivative. + + Defined as [1]_, + + .. math:: i_n(z) = \sqrt{\frac{\pi}{2z}} I_{n + 1/2}(z), + + where :math:`I_n` is the modified Bessel function of the first kind. + + Parameters + ---------- + n : int, array_like + Order of the Bessel function (n >= 0). + z : complex or float, array_like + Argument of the Bessel function. + derivative : bool, optional + If True, the value of the derivative (rather than the function + itself) is returned. + + Returns + ------- + in : ndarray + + Notes + ----- + The function is computed using its definitional relation to the + modified cylindrical Bessel function of the first kind. + + The derivative is computed using the relations [2]_, + + .. math:: + i_n' = i_{n-1} - \frac{n + 1}{z} i_n. + + i_1' = i_0 + + + .. versionadded:: 0.18.0 + + References + ---------- + .. [1] https://dlmf.nist.gov/10.47.E7 + .. [2] https://dlmf.nist.gov/10.51.E5 + """ + if derivative: + return _spherical_in_d(n, z) + else: + return _spherical_in(n, z) + + +def spherical_kn(n, z, derivative=False): + r"""Modified spherical Bessel function of the second kind or its derivative. + + Defined as [1]_, + + .. math:: k_n(z) = \sqrt{\frac{\pi}{2z}} K_{n + 1/2}(z), + + where :math:`K_n` is the modified Bessel function of the second kind. + + Parameters + ---------- + n : int, array_like + Order of the Bessel function (n >= 0). + z : complex or float, array_like + Argument of the Bessel function. + derivative : bool, optional + If True, the value of the derivative (rather than the function + itself) is returned. + + Returns + ------- + kn : ndarray + + Notes + ----- + The function is computed using its definitional relation to the + modified cylindrical Bessel function of the second kind. + + The derivative is computed using the relations [2]_, + + .. math:: + k_n' = -k_{n-1} - \frac{n + 1}{z} k_n. + + k_0' = -k_1 + + + .. versionadded:: 0.18.0 + + References + ---------- + .. [1] https://dlmf.nist.gov/10.47.E9 + .. [2] https://dlmf.nist.gov/10.51.E5 + """ + if derivative: + return _spherical_kn_d(n, z) + else: + return _spherical_kn(n, z) diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/_spherical_bessel.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/_spherical_bessel.pyc new file mode 100644 index 0000000..4778e10 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/_spherical_bessel.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/_test_round.so b/project/venv/lib/python2.7/site-packages/scipy/special/_test_round.so new file mode 100755 index 0000000..15e6bd4 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/_test_round.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/_testutils.py b/project/venv/lib/python2.7/site-packages/scipy/special/_testutils.py new file mode 100644 index 0000000..fc6c8b4 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/_testutils.py @@ -0,0 +1,319 @@ +from __future__ import division, print_function, absolute_import + +import os + +from distutils.version import LooseVersion + +import functools + +import numpy as np +from numpy.testing import assert_ +import pytest + +import scipy.special as sc + +__all__ = ['with_special_errors', 'assert_func_equal', 'FuncData'] + + +#------------------------------------------------------------------------------ +# Check if a module is present to be used in tests +#------------------------------------------------------------------------------ + +class MissingModule(object): + def __init__(self, name): + self.name = name + + +def check_version(module, min_ver): + if type(module) == MissingModule: + return pytest.mark.skip(reason="{} is not installed".format(module.name)) + return pytest.mark.skipif(LooseVersion(module.__version__) < LooseVersion(min_ver), + reason="{} version >= {} required".format(module.__name__, min_ver)) + + +#------------------------------------------------------------------------------ +# Enable convergence and loss of precision warnings -- turn off one by one +#------------------------------------------------------------------------------ + +def with_special_errors(func): + """ + Enable special function errors (such as underflow, overflow, + loss of precision, etc.) + """ + @functools.wraps(func) + def wrapper(*a, **kw): + with sc.errstate(all='raise'): + res = func(*a, **kw) + return res + return wrapper + + +#------------------------------------------------------------------------------ +# Comparing function values at many data points at once, with helpful +# error reports +#------------------------------------------------------------------------------ + +def assert_func_equal(func, results, points, rtol=None, atol=None, + param_filter=None, knownfailure=None, + vectorized=True, dtype=None, nan_ok=False, + ignore_inf_sign=False, distinguish_nan_and_inf=True): + if hasattr(points, 'next'): + # it's a generator + points = list(points) + + points = np.asarray(points) + if points.ndim == 1: + points = points[:,None] + nparams = points.shape[1] + + if hasattr(results, '__name__'): + # function + data = points + result_columns = None + result_func = results + else: + # dataset + data = np.c_[points, results] + result_columns = list(range(nparams, data.shape[1])) + result_func = None + + fdata = FuncData(func, data, list(range(nparams)), + result_columns=result_columns, result_func=result_func, + rtol=rtol, atol=atol, param_filter=param_filter, + knownfailure=knownfailure, nan_ok=nan_ok, vectorized=vectorized, + ignore_inf_sign=ignore_inf_sign, + distinguish_nan_and_inf=distinguish_nan_and_inf) + fdata.check() + + +class FuncData(object): + """ + Data set for checking a special function. + + Parameters + ---------- + func : function + Function to test + data : numpy array + columnar data to use for testing + param_columns : int or tuple of ints + Columns indices in which the parameters to `func` lie. + Can be imaginary integers to indicate that the parameter + should be cast to complex. + result_columns : int or tuple of ints, optional + Column indices for expected results from `func`. + result_func : callable, optional + Function to call to obtain results. + rtol : float, optional + Required relative tolerance. Default is 5*eps. + atol : float, optional + Required absolute tolerance. Default is 5*tiny. + param_filter : function, or tuple of functions/Nones, optional + Filter functions to exclude some parameter ranges. + If omitted, no filtering is done. + knownfailure : str, optional + Known failure error message to raise when the test is run. + If omitted, no exception is raised. + nan_ok : bool, optional + If nan is always an accepted result. + vectorized : bool, optional + Whether all functions passed in are vectorized. + ignore_inf_sign : bool, optional + Whether to ignore signs of infinities. + (Doesn't matter for complex-valued functions.) + distinguish_nan_and_inf : bool, optional + If True, treat numbers which contain nans or infs as as + equal. Sets ignore_inf_sign to be True. + + """ + + def __init__(self, func, data, param_columns, result_columns=None, + result_func=None, rtol=None, atol=None, param_filter=None, + knownfailure=None, dataname=None, nan_ok=False, vectorized=True, + ignore_inf_sign=False, distinguish_nan_and_inf=True): + self.func = func + self.data = data + self.dataname = dataname + if not hasattr(param_columns, '__len__'): + param_columns = (param_columns,) + self.param_columns = tuple(param_columns) + if result_columns is not None: + if not hasattr(result_columns, '__len__'): + result_columns = (result_columns,) + self.result_columns = tuple(result_columns) + if result_func is not None: + raise ValueError("Only result_func or result_columns should be provided") + elif result_func is not None: + self.result_columns = None + else: + raise ValueError("Either result_func or result_columns should be provided") + self.result_func = result_func + self.rtol = rtol + self.atol = atol + if not hasattr(param_filter, '__len__'): + param_filter = (param_filter,) + self.param_filter = param_filter + self.knownfailure = knownfailure + self.nan_ok = nan_ok + self.vectorized = vectorized + self.ignore_inf_sign = ignore_inf_sign + self.distinguish_nan_and_inf = distinguish_nan_and_inf + if not self.distinguish_nan_and_inf: + self.ignore_inf_sign = True + + def get_tolerances(self, dtype): + if not np.issubdtype(dtype, np.inexact): + dtype = np.dtype(float) + info = np.finfo(dtype) + rtol, atol = self.rtol, self.atol + if rtol is None: + rtol = 5*info.eps + if atol is None: + atol = 5*info.tiny + return rtol, atol + + def check(self, data=None, dtype=None, dtypes=None): + """Check the special function against the data.""" + + if self.knownfailure: + pytest.xfail(reason=self.knownfailure) + + if data is None: + data = self.data + + if dtype is None: + dtype = data.dtype + else: + data = data.astype(dtype) + + rtol, atol = self.get_tolerances(dtype) + + # Apply given filter functions + if self.param_filter: + param_mask = np.ones((data.shape[0],), np.bool_) + for j, filter in zip(self.param_columns, self.param_filter): + if filter: + param_mask &= list(filter(data[:,j])) + data = data[param_mask] + + # Pick parameters from the correct columns + params = [] + for idx, j in enumerate(self.param_columns): + if np.iscomplexobj(j): + j = int(j.imag) + params.append(data[:,j].astype(complex)) + elif dtypes and idx < len(dtypes): + params.append(data[:, j].astype(dtypes[idx])) + else: + params.append(data[:,j]) + + # Helper for evaluating results + def eval_func_at_params(func, skip_mask=None): + if self.vectorized: + got = func(*params) + else: + got = [] + for j in range(len(params[0])): + if skip_mask is not None and skip_mask[j]: + got.append(np.nan) + continue + got.append(func(*tuple([params[i][j] for i in range(len(params))]))) + got = np.asarray(got) + if not isinstance(got, tuple): + got = (got,) + return got + + # Evaluate function to be tested + got = eval_func_at_params(self.func) + + # Grab the correct results + if self.result_columns is not None: + # Correct results passed in with the data + wanted = tuple([data[:,icol] for icol in self.result_columns]) + else: + # Function producing correct results passed in + skip_mask = None + if self.nan_ok and len(got) == 1: + # Don't spend time evaluating what doesn't need to be evaluated + skip_mask = np.isnan(got[0]) + wanted = eval_func_at_params(self.result_func, skip_mask=skip_mask) + + # Check the validity of each output returned + assert_(len(got) == len(wanted)) + + for output_num, (x, y) in enumerate(zip(got, wanted)): + if np.issubdtype(x.dtype, np.complexfloating) or self.ignore_inf_sign: + pinf_x = np.isinf(x) + pinf_y = np.isinf(y) + minf_x = np.isinf(x) + minf_y = np.isinf(y) + else: + pinf_x = np.isposinf(x) + pinf_y = np.isposinf(y) + minf_x = np.isneginf(x) + minf_y = np.isneginf(y) + nan_x = np.isnan(x) + nan_y = np.isnan(y) + + olderr = np.seterr(all='ignore') + try: + abs_y = np.absolute(y) + abs_y[~np.isfinite(abs_y)] = 0 + diff = np.absolute(x - y) + diff[~np.isfinite(diff)] = 0 + + rdiff = diff / np.absolute(y) + rdiff[~np.isfinite(rdiff)] = 0 + finally: + np.seterr(**olderr) + + tol_mask = (diff <= atol + rtol*abs_y) + pinf_mask = (pinf_x == pinf_y) + minf_mask = (minf_x == minf_y) + + nan_mask = (nan_x == nan_y) + + bad_j = ~(tol_mask & pinf_mask & minf_mask & nan_mask) + + point_count = bad_j.size + if self.nan_ok: + bad_j &= ~nan_x + bad_j &= ~nan_y + point_count -= (nan_x | nan_y).sum() + + if not self.distinguish_nan_and_inf and not self.nan_ok: + # If nan's are okay we've already covered all these cases + inf_x = np.isinf(x) + inf_y = np.isinf(y) + both_nonfinite = (inf_x & nan_y) | (nan_x & inf_y) + bad_j &= ~both_nonfinite + point_count -= both_nonfinite.sum() + + if np.any(bad_j): + # Some bad results: inform what, where, and how bad + msg = [""] + msg.append("Max |adiff|: %g" % diff.max()) + msg.append("Max |rdiff|: %g" % rdiff.max()) + msg.append("Bad results (%d out of %d) for the following points (in output %d):" + % (np.sum(bad_j), point_count, output_num,)) + for j in np.nonzero(bad_j)[0]: + j = int(j) + fmt = lambda x: "%30s" % np.array2string(x[j], precision=18) + a = " ".join(map(fmt, params)) + b = " ".join(map(fmt, got)) + c = " ".join(map(fmt, wanted)) + d = fmt(rdiff) + msg.append("%s => %s != %s (rdiff %s)" % (a, b, c, d)) + assert_(False, "\n".join(msg)) + + def __repr__(self): + """Pretty-printing, esp. for Nose output""" + if np.any(list(map(np.iscomplexobj, self.param_columns))): + is_complex = " (complex)" + else: + is_complex = "" + if self.dataname: + return "<Data for %s%s: %s>" % (self.func.__name__, is_complex, + os.path.basename(self.dataname)) + else: + return "<Data for %s%s>" % (self.func.__name__, is_complex) diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/_testutils.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/_testutils.pyc new file mode 100644 index 0000000..74e50fa Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/_testutils.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/_ufuncs.so b/project/venv/lib/python2.7/site-packages/scipy/special/_ufuncs.so new file mode 100755 index 0000000..c35d188 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/_ufuncs.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/_ufuncs_cxx.so b/project/venv/lib/python2.7/site-packages/scipy/special/_ufuncs_cxx.so new file mode 100755 index 0000000..bb79916 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/_ufuncs_cxx.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/add_newdocs.py b/project/venv/lib/python2.7/site-packages/scipy/special/add_newdocs.py new file mode 100644 index 0000000..c2b10bb --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/add_newdocs.py @@ -0,0 +1,7288 @@ +# Docstrings for generated ufuncs +# +# The syntax is designed to look like the function add_newdoc is being +# called from numpy.lib, but in this file add_newdoc puts the +# docstrings in a dictionary. This dictionary is used in +# _generate_pyx.py to generate the docstrings for the ufuncs in +# scipy.special at the C level when the ufuncs are created at compile +# time. + +from __future__ import division, print_function, absolute_import + +docdict = {} + + +def get(name): + return docdict.get(name) + + +def add_newdoc(place, name, doc): + docdict['.'.join((place, name))] = doc + + +add_newdoc("scipy.special", "_sf_error_test_function", + """ + Private function; do not use. + """) + +add_newdoc("scipy.special", "sph_harm", + r""" + sph_harm(m, n, theta, phi) + + Compute spherical harmonics. + + The spherical harmonics are defined as + + .. math:: + + Y^m_n(\theta,\phi) = \sqrt{\frac{2n+1}{4\pi} \frac{(n-m)!}{(n+m)!}} + e^{i m \theta} P^m_n(\cos(\phi)) + + where :math:`P_n^m` are the associated Legendre functions; see `lpmv`. + + Parameters + ---------- + m : array_like + Order of the harmonic (int); must have ``|m| <= n``. + n : array_like + Degree of the harmonic (int); must have ``n >= 0``. This is + often denoted by ``l`` (lower case L) in descriptions of + spherical harmonics. + theta : array_like + Azimuthal (longitudinal) coordinate; must be in ``[0, 2*pi]``. + phi : array_like + Polar (colatitudinal) coordinate; must be in ``[0, pi]``. + + Returns + ------- + y_mn : complex float + The harmonic :math:`Y^m_n` sampled at ``theta`` and ``phi``. + + Notes + ----- + There are different conventions for the meanings of the input + arguments ``theta`` and ``phi``. In SciPy ``theta`` is the + azimuthal angle and ``phi`` is the polar angle. It is common to + see the opposite convention, that is, ``theta`` as the polar angle + and ``phi`` as the azimuthal angle. + + Note that SciPy's spherical harmonics include the Condon-Shortley + phase [2]_ because it is part of `lpmv`. + + With SciPy's conventions, the first several spherical harmonics + are + + .. math:: + + Y_0^0(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{1}{\pi}} \\ + Y_1^{-1}(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{3}{2\pi}} + e^{-i\theta} \sin(\phi) \\ + Y_1^0(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{3}{\pi}} + \cos(\phi) \\ + Y_1^1(\theta, \phi) &= -\frac{1}{2} \sqrt{\frac{3}{2\pi}} + e^{i\theta} \sin(\phi). + + References + ---------- + .. [1] Digital Library of Mathematical Functions, 14.30. + https://dlmf.nist.gov/14.30 + .. [2] https://en.wikipedia.org/wiki/Spherical_harmonics#Condon.E2.80.93Shortley_phase + """) + +add_newdoc("scipy.special", "_ellip_harm", + """ + Internal function, use `ellip_harm` instead. + """) + +add_newdoc("scipy.special", "_ellip_norm", + """ + Internal function, use `ellip_norm` instead. + """) + +add_newdoc("scipy.special", "_lambertw", + """ + Internal function, use `lambertw` instead. + """) + +add_newdoc("scipy.special", "wrightomega", + r""" + wrightomega(z, out=None) + + Wright Omega function. + + Defined as the solution to + + .. math:: + + \omega + \log(\omega) = z + + where :math:`\log` is the principal branch of the complex logarithm. + + Parameters + ---------- + z : array_like + Points at which to evaluate the Wright Omega function + + Returns + ------- + omega : ndarray + Values of the Wright Omega function + + Notes + ----- + .. versionadded:: 0.19.0 + + The function can also be defined as + + .. math:: + + \omega(z) = W_{K(z)}(e^z) + + where :math:`K(z) = \lceil (\Im(z) - \pi)/(2\pi) \rceil` is the + unwinding number and :math:`W` is the Lambert W function. + + The implementation here is taken from [1]_. + + See Also + -------- + lambertw : The Lambert W function + + References + ---------- + .. [1] Lawrence, Corless, and Jeffrey, "Algorithm 917: Complex + Double-Precision Evaluation of the Wright :math:`\omega` + Function." ACM Transactions on Mathematical Software, + 2012. :doi:`10.1145/2168773.2168779`. + + """) + + +add_newdoc("scipy.special", "agm", + """ + agm(a, b) + + Compute the arithmetic-geometric mean of `a` and `b`. + + Start with a_0 = a and b_0 = b and iteratively compute:: + + a_{n+1} = (a_n + b_n)/2 + b_{n+1} = sqrt(a_n*b_n) + + a_n and b_n converge to the same limit as n increases; their common + limit is agm(a, b). + + Parameters + ---------- + a, b : array_like + Real values only. If the values are both negative, the result + is negative. If one value is negative and the other is positive, + `nan` is returned. + + Returns + ------- + float + The arithmetic-geometric mean of `a` and `b`. + + Examples + -------- + >>> from scipy.special import agm + >>> a, b = 24.0, 6.0 + >>> agm(a, b) + 13.458171481725614 + + Compare that result to the iteration: + + >>> while a != b: + ... a, b = (a + b)/2, np.sqrt(a*b) + ... print("a = %19.16f b=%19.16f" % (a, b)) + ... + a = 15.0000000000000000 b=12.0000000000000000 + a = 13.5000000000000000 b=13.4164078649987388 + a = 13.4582039324993694 b=13.4581390309909850 + a = 13.4581714817451772 b=13.4581714817060547 + a = 13.4581714817256159 b=13.4581714817256159 + + When array-like arguments are given, broadcasting applies: + + >>> a = np.array([[1.5], [3], [6]]) # a has shape (3, 1). + >>> b = np.array([6, 12, 24, 48]) # b has shape (4,). + >>> agm(a, b) + array([[ 3.36454287, 5.42363427, 9.05798751, 15.53650756], + [ 4.37037309, 6.72908574, 10.84726853, 18.11597502], + [ 6. , 8.74074619, 13.45817148, 21.69453707]]) + """) + +add_newdoc("scipy.special", "airy", + r""" + airy(z) + + Airy functions and their derivatives. + + Parameters + ---------- + z : array_like + Real or complex argument. + + Returns + ------- + Ai, Aip, Bi, Bip : ndarrays + Airy functions Ai and Bi, and their derivatives Aip and Bip. + + Notes + ----- + The Airy functions Ai and Bi are two independent solutions of + + .. math:: y''(x) = x y(x). + + For real `z` in [-10, 10], the computation is carried out by calling + the Cephes [1]_ `airy` routine, which uses power series summation + for small `z` and rational minimax approximations for large `z`. + + Outside this range, the AMOS [2]_ `zairy` and `zbiry` routines are + employed. They are computed using power series for :math:`|z| < 1` and + the following relations to modified Bessel functions for larger `z` + (where :math:`t \equiv 2 z^{3/2}/3`): + + .. math:: + + Ai(z) = \frac{1}{\pi \sqrt{3}} K_{1/3}(t) + + Ai'(z) = -\frac{z}{\pi \sqrt{3}} K_{2/3}(t) + + Bi(z) = \sqrt{\frac{z}{3}} \left(I_{-1/3}(t) + I_{1/3}(t) \right) + + Bi'(z) = \frac{z}{\sqrt{3}} \left(I_{-2/3}(t) + I_{2/3}(t)\right) + + See also + -------- + airye : exponentially scaled Airy functions. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + .. [2] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + + Examples + -------- + Compute the Airy functions on the interval [-15, 5]. + + >>> from scipy import special + >>> x = np.linspace(-15, 5, 201) + >>> ai, aip, bi, bip = special.airy(x) + + Plot Ai(x) and Bi(x). + + >>> import matplotlib.pyplot as plt + >>> plt.plot(x, ai, 'r', label='Ai(x)') + >>> plt.plot(x, bi, 'b--', label='Bi(x)') + >>> plt.ylim(-0.5, 1.0) + >>> plt.grid() + >>> plt.legend(loc='upper left') + >>> plt.show() + + """) + +add_newdoc("scipy.special", "airye", + """ + airye(z) + + Exponentially scaled Airy functions and their derivatives. + + Scaling:: + + eAi = Ai * exp(2.0/3.0*z*sqrt(z)) + eAip = Aip * exp(2.0/3.0*z*sqrt(z)) + eBi = Bi * exp(-abs(2.0/3.0*(z*sqrt(z)).real)) + eBip = Bip * exp(-abs(2.0/3.0*(z*sqrt(z)).real)) + + Parameters + ---------- + z : array_like + Real or complex argument. + + Returns + ------- + eAi, eAip, eBi, eBip : array_like + Airy functions Ai and Bi, and their derivatives Aip and Bip + + Notes + ----- + Wrapper for the AMOS [1]_ routines `zairy` and `zbiry`. + + See also + -------- + airy + + References + ---------- + .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + """) + +add_newdoc("scipy.special", "bdtr", + r""" + bdtr(k, n, p) + + Binomial distribution cumulative distribution function. + + Sum of the terms 0 through `k` of the Binomial probability density. + + .. math:: + \mathrm{bdtr}(k, n, p) = \sum_{j=0}^k {{n}\choose{j}} p^j (1-p)^{n-j} + + Parameters + ---------- + k : array_like + Number of successes (int). + n : array_like + Number of events (int). + p : array_like + Probability of success in a single event (float). + + Returns + ------- + y : ndarray + Probability of `k` or fewer successes in `n` independent events with + success probabilities of `p`. + + Notes + ----- + The terms are not summed directly; instead the regularized incomplete beta + function is employed, according to the formula, + + .. math:: + \mathrm{bdtr}(k, n, p) = I_{1 - p}(n - k, k + 1). + + Wrapper for the Cephes [1]_ routine `bdtr`. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + """) + +add_newdoc("scipy.special", "bdtrc", + r""" + bdtrc(k, n, p) + + Binomial distribution survival function. + + Sum of the terms `k + 1` through `n` of the binomial probability density, + + .. math:: + \mathrm{bdtrc}(k, n, p) = \sum_{j=k+1}^n {{n}\choose{j}} p^j (1-p)^{n-j} + + Parameters + ---------- + k : array_like + Number of successes (int). + n : array_like + Number of events (int) + p : array_like + Probability of success in a single event. + + Returns + ------- + y : ndarray + Probability of `k + 1` or more successes in `n` independent events + with success probabilities of `p`. + + See also + -------- + bdtr + betainc + + Notes + ----- + The terms are not summed directly; instead the regularized incomplete beta + function is employed, according to the formula, + + .. math:: + \mathrm{bdtrc}(k, n, p) = I_{p}(k + 1, n - k). + + Wrapper for the Cephes [1]_ routine `bdtrc`. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + """) + +add_newdoc("scipy.special", "bdtri", + """ + bdtri(k, n, y) + + Inverse function to `bdtr` with respect to `p`. + + Finds the event probability `p` such that the sum of the terms 0 through + `k` of the binomial probability density is equal to the given cumulative + probability `y`. + + Parameters + ---------- + k : array_like + Number of successes (float). + n : array_like + Number of events (float) + y : array_like + Cumulative probability (probability of `k` or fewer successes in `n` + events). + + Returns + ------- + p : ndarray + The event probability such that `bdtr(k, n, p) = y`. + + See also + -------- + bdtr + betaincinv + + Notes + ----- + The computation is carried out using the inverse beta integral function + and the relation,:: + + 1 - p = betaincinv(n - k, k + 1, y). + + Wrapper for the Cephes [1]_ routine `bdtri`. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + """) + +add_newdoc("scipy.special", "bdtrik", + """ + bdtrik(y, n, p) + + Inverse function to `bdtr` with respect to `k`. + + Finds the number of successes `k` such that the sum of the terms 0 through + `k` of the Binomial probability density for `n` events with probability + `p` is equal to the given cumulative probability `y`. + + Parameters + ---------- + y : array_like + Cumulative probability (probability of `k` or fewer successes in `n` + events). + n : array_like + Number of events (float). + p : array_like + Success probability (float). + + Returns + ------- + k : ndarray + The number of successes `k` such that `bdtr(k, n, p) = y`. + + See also + -------- + bdtr + + Notes + ----- + Formula 26.5.24 of [1]_ is used to reduce the binomial distribution to the + cumulative incomplete beta distribution. + + Computation of `k` involves a search for a value that produces the desired + value of `y`. The search relies on the monotonicity of `y` with `k`. + + Wrapper for the CDFLIB [2]_ Fortran routine `cdfbin`. + + References + ---------- + .. [1] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + .. [2] Barry Brown, James Lovato, and Kathy Russell, + CDFLIB: Library of Fortran Routines for Cumulative Distribution + Functions, Inverses, and Other Parameters. + + """) + +add_newdoc("scipy.special", "bdtrin", + """ + bdtrin(k, y, p) + + Inverse function to `bdtr` with respect to `n`. + + Finds the number of events `n` such that the sum of the terms 0 through + `k` of the Binomial probability density for events with probability `p` is + equal to the given cumulative probability `y`. + + Parameters + ---------- + k : array_like + Number of successes (float). + y : array_like + Cumulative probability (probability of `k` or fewer successes in `n` + events). + p : array_like + Success probability (float). + + Returns + ------- + n : ndarray + The number of events `n` such that `bdtr(k, n, p) = y`. + + See also + -------- + bdtr + + Notes + ----- + Formula 26.5.24 of [1]_ is used to reduce the binomial distribution to the + cumulative incomplete beta distribution. + + Computation of `n` involves a search for a value that produces the desired + value of `y`. The search relies on the monotonicity of `y` with `n`. + + Wrapper for the CDFLIB [2]_ Fortran routine `cdfbin`. + + References + ---------- + .. [1] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + .. [2] Barry Brown, James Lovato, and Kathy Russell, + CDFLIB: Library of Fortran Routines for Cumulative Distribution + Functions, Inverses, and Other Parameters. + """) + +add_newdoc("scipy.special", "binom", + """ + binom(n, k) + + Binomial coefficient + + See Also + -------- + comb : The number of combinations of N things taken k at a time. + + """) + +add_newdoc("scipy.special", "btdtria", + r""" + btdtria(p, b, x) + + Inverse of `btdtr` with respect to `a`. + + This is the inverse of the beta cumulative distribution function, `btdtr`, + considered as a function of `a`, returning the value of `a` for which + `btdtr(a, b, x) = p`, or + + .. math:: + p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt + + Parameters + ---------- + p : array_like + Cumulative probability, in [0, 1]. + b : array_like + Shape parameter (`b` > 0). + x : array_like + The quantile, in [0, 1]. + + Returns + ------- + a : ndarray + The value of the shape parameter `a` such that `btdtr(a, b, x) = p`. + + See Also + -------- + btdtr : Cumulative density function of the beta distribution. + btdtri : Inverse with respect to `x`. + btdtrib : Inverse with respect to `b`. + + Notes + ----- + Wrapper for the CDFLIB [1]_ Fortran routine `cdfbet`. + + The cumulative distribution function `p` is computed using a routine by + DiDinato and Morris [2]_. Computation of `a` involves a search for a value + that produces the desired value of `p`. The search relies on the + monotonicity of `p` with `a`. + + References + ---------- + .. [1] Barry Brown, James Lovato, and Kathy Russell, + CDFLIB: Library of Fortran Routines for Cumulative Distribution + Functions, Inverses, and Other Parameters. + .. [2] DiDinato, A. R. and Morris, A. H., + Algorithm 708: Significant Digit Computation of the Incomplete Beta + Function Ratios. ACM Trans. Math. Softw. 18 (1993), 360-373. + + """) + +add_newdoc("scipy.special", "btdtrib", + r""" + btdtria(a, p, x) + + Inverse of `btdtr` with respect to `b`. + + This is the inverse of the beta cumulative distribution function, `btdtr`, + considered as a function of `b`, returning the value of `b` for which + `btdtr(a, b, x) = p`, or + + .. math:: + p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt + + Parameters + ---------- + a : array_like + Shape parameter (`a` > 0). + p : array_like + Cumulative probability, in [0, 1]. + x : array_like + The quantile, in [0, 1]. + + Returns + ------- + b : ndarray + The value of the shape parameter `b` such that `btdtr(a, b, x) = p`. + + See Also + -------- + btdtr : Cumulative density function of the beta distribution. + btdtri : Inverse with respect to `x`. + btdtria : Inverse with respect to `a`. + + Notes + ----- + Wrapper for the CDFLIB [1]_ Fortran routine `cdfbet`. + + The cumulative distribution function `p` is computed using a routine by + DiDinato and Morris [2]_. Computation of `b` involves a search for a value + that produces the desired value of `p`. The search relies on the + monotonicity of `p` with `b`. + + References + ---------- + .. [1] Barry Brown, James Lovato, and Kathy Russell, + CDFLIB: Library of Fortran Routines for Cumulative Distribution + Functions, Inverses, and Other Parameters. + .. [2] DiDinato, A. R. and Morris, A. H., + Algorithm 708: Significant Digit Computation of the Incomplete Beta + Function Ratios. ACM Trans. Math. Softw. 18 (1993), 360-373. + + + """) + +add_newdoc("scipy.special", "bei", + """ + bei(x) + + Kelvin function bei + """) + +add_newdoc("scipy.special", "beip", + """ + beip(x) + + Derivative of the Kelvin function `bei` + """) + +add_newdoc("scipy.special", "ber", + """ + ber(x) + + Kelvin function ber. + """) + +add_newdoc("scipy.special", "berp", + """ + berp(x) + + Derivative of the Kelvin function `ber` + """) + +add_newdoc("scipy.special", "besselpoly", + r""" + besselpoly(a, lmb, nu) + + Weighted integral of a Bessel function. + + .. math:: + + \int_0^1 x^\lambda J_\nu(2 a x) \, dx + + where :math:`J_\nu` is a Bessel function and :math:`\lambda=lmb`, + :math:`\nu=nu`. + + """) + +add_newdoc("scipy.special", "beta", + """ + beta(a, b) + + Beta function. + + :: + + beta(a, b) = gamma(a) * gamma(b) / gamma(a+b) + """) + +add_newdoc("scipy.special", "betainc", + """ + betainc(a, b, x) + + Incomplete beta integral. + + Compute the incomplete beta integral of the arguments, evaluated + from zero to `x`:: + + gamma(a+b) / (gamma(a)*gamma(b)) * integral(t**(a-1) (1-t)**(b-1), t=0..x). + + Notes + ----- + The incomplete beta is also sometimes defined without the terms + in gamma, in which case the above definition is the so-called regularized + incomplete beta. Under this definition, you can get the incomplete beta by + multiplying the result of the scipy function by beta(a, b). + + """) + +add_newdoc("scipy.special", "betaincinv", + """ + betaincinv(a, b, y) + + Inverse function to beta integral. + + Compute `x` such that betainc(a, b, x) = y. + """) + +add_newdoc("scipy.special", "betaln", + """ + betaln(a, b) + + Natural logarithm of absolute value of beta function. + + Computes ``ln(abs(beta(a, b)))``. + """) + +add_newdoc("scipy.special", "boxcox", + """ + boxcox(x, lmbda) + + Compute the Box-Cox transformation. + + The Box-Cox transformation is:: + + y = (x**lmbda - 1) / lmbda if lmbda != 0 + log(x) if lmbda == 0 + + Returns `nan` if ``x < 0``. + Returns `-inf` if ``x == 0`` and ``lmbda < 0``. + + Parameters + ---------- + x : array_like + Data to be transformed. + lmbda : array_like + Power parameter of the Box-Cox transform. + + Returns + ------- + y : array + Transformed data. + + Notes + ----- + + .. versionadded:: 0.14.0 + + Examples + -------- + >>> from scipy.special import boxcox + >>> boxcox([1, 4, 10], 2.5) + array([ 0. , 12.4 , 126.09110641]) + >>> boxcox(2, [0, 1, 2]) + array([ 0.69314718, 1. , 1.5 ]) + """) + +add_newdoc("scipy.special", "boxcox1p", + """ + boxcox1p(x, lmbda) + + Compute the Box-Cox transformation of 1 + `x`. + + The Box-Cox transformation computed by `boxcox1p` is:: + + y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0 + log(1+x) if lmbda == 0 + + Returns `nan` if ``x < -1``. + Returns `-inf` if ``x == -1`` and ``lmbda < 0``. + + Parameters + ---------- + x : array_like + Data to be transformed. + lmbda : array_like + Power parameter of the Box-Cox transform. + + Returns + ------- + y : array + Transformed data. + + Notes + ----- + + .. versionadded:: 0.14.0 + + Examples + -------- + >>> from scipy.special import boxcox1p + >>> boxcox1p(1e-4, [0, 0.5, 1]) + array([ 9.99950003e-05, 9.99975001e-05, 1.00000000e-04]) + >>> boxcox1p([0.01, 0.1], 0.25) + array([ 0.00996272, 0.09645476]) + """) + +add_newdoc("scipy.special", "inv_boxcox", + """ + inv_boxcox(y, lmbda) + + Compute the inverse of the Box-Cox transformation. + + Find ``x`` such that:: + + y = (x**lmbda - 1) / lmbda if lmbda != 0 + log(x) if lmbda == 0 + + Parameters + ---------- + y : array_like + Data to be transformed. + lmbda : array_like + Power parameter of the Box-Cox transform. + + Returns + ------- + x : array + Transformed data. + + Notes + ----- + + .. versionadded:: 0.16.0 + + Examples + -------- + >>> from scipy.special import boxcox, inv_boxcox + >>> y = boxcox([1, 4, 10], 2.5) + >>> inv_boxcox(y, 2.5) + array([1., 4., 10.]) + """) + +add_newdoc("scipy.special", "inv_boxcox1p", + """ + inv_boxcox1p(y, lmbda) + + Compute the inverse of the Box-Cox transformation. + + Find ``x`` such that:: + + y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0 + log(1+x) if lmbda == 0 + + Parameters + ---------- + y : array_like + Data to be transformed. + lmbda : array_like + Power parameter of the Box-Cox transform. + + Returns + ------- + x : array + Transformed data. + + Notes + ----- + + .. versionadded:: 0.16.0 + + Examples + -------- + >>> from scipy.special import boxcox1p, inv_boxcox1p + >>> y = boxcox1p([1, 4, 10], 2.5) + >>> inv_boxcox1p(y, 2.5) + array([1., 4., 10.]) + """) + +add_newdoc("scipy.special", "btdtr", + r""" + btdtr(a, b, x) + + Cumulative density function of the beta distribution. + + Returns the integral from zero to `x` of the beta probability density + function, + + .. math:: + I = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt + + where :math:`\Gamma` is the gamma function. + + Parameters + ---------- + a : array_like + Shape parameter (a > 0). + b : array_like + Shape parameter (b > 0). + x : array_like + Upper limit of integration, in [0, 1]. + + Returns + ------- + I : ndarray + Cumulative density function of the beta distribution with parameters + `a` and `b` at `x`. + + See Also + -------- + betainc + + Notes + ----- + This function is identical to the incomplete beta integral function + `betainc`. + + Wrapper for the Cephes [1]_ routine `btdtr`. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + """) + +add_newdoc("scipy.special", "btdtri", + r""" + btdtri(a, b, p) + + The `p`-th quantile of the beta distribution. + + This function is the inverse of the beta cumulative distribution function, + `btdtr`, returning the value of `x` for which `btdtr(a, b, x) = p`, or + + .. math:: + p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt + + Parameters + ---------- + a : array_like + Shape parameter (`a` > 0). + b : array_like + Shape parameter (`b` > 0). + p : array_like + Cumulative probability, in [0, 1]. + + Returns + ------- + x : ndarray + The quantile corresponding to `p`. + + See Also + -------- + betaincinv + btdtr + + Notes + ----- + The value of `x` is found by interval halving or Newton iterations. + + Wrapper for the Cephes [1]_ routine `incbi`, which solves the equivalent + problem of finding the inverse of the incomplete beta integral. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + """) + +add_newdoc("scipy.special", "cbrt", + """ + cbrt(x) + + Element-wise cube root of `x`. + + Parameters + ---------- + x : array_like + `x` must contain real numbers. + + Returns + ------- + float + The cube root of each value in `x`. + + Examples + -------- + >>> from scipy.special import cbrt + + >>> cbrt(8) + 2.0 + >>> cbrt([-8, -3, 0.125, 1.331]) + array([-2. , -1.44224957, 0.5 , 1.1 ]) + + """) + +add_newdoc("scipy.special", "chdtr", + """ + chdtr(v, x) + + Chi square cumulative distribution function + + Returns the area under the left hand tail (from 0 to `x`) of the Chi + square probability density function with `v` degrees of freedom:: + + 1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=0..x) + """) + +add_newdoc("scipy.special", "chdtrc", + """ + chdtrc(v, x) + + Chi square survival function + + Returns the area under the right hand tail (from `x` to + infinity) of the Chi square probability density function with `v` + degrees of freedom:: + + 1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=x..inf) + """) + +add_newdoc("scipy.special", "chdtri", + """ + chdtri(v, p) + + Inverse to `chdtrc` + + Returns the argument x such that ``chdtrc(v, x) == p``. + """) + +add_newdoc("scipy.special", "chdtriv", + """ + chdtriv(p, x) + + Inverse to `chdtr` vs `v` + + Returns the argument v such that ``chdtr(v, x) == p``. + """) + +add_newdoc("scipy.special", "chndtr", + """ + chndtr(x, df, nc) + + Non-central chi square cumulative distribution function + + """) + +add_newdoc("scipy.special", "chndtrix", + """ + chndtrix(p, df, nc) + + Inverse to `chndtr` vs `x` + """) + +add_newdoc("scipy.special", "chndtridf", + """ + chndtridf(x, p, nc) + + Inverse to `chndtr` vs `df` + """) + +add_newdoc("scipy.special", "chndtrinc", + """ + chndtrinc(x, df, p) + + Inverse to `chndtr` vs `nc` + """) + +add_newdoc("scipy.special", "cosdg", + """ + cosdg(x) + + Cosine of the angle `x` given in degrees. + """) + +add_newdoc("scipy.special", "cosm1", + """ + cosm1(x) + + cos(x) - 1 for use when `x` is near zero. + """) + +add_newdoc("scipy.special", "cotdg", + """ + cotdg(x) + + Cotangent of the angle `x` given in degrees. + """) + +add_newdoc("scipy.special", "dawsn", + """ + dawsn(x) + + Dawson's integral. + + Computes:: + + exp(-x**2) * integral(exp(t**2), t=0..x). + + See Also + -------- + wofz, erf, erfc, erfcx, erfi + + References + ---------- + .. [1] Steven G. Johnson, Faddeeva W function implementation. + http://ab-initio.mit.edu/Faddeeva + + Examples + -------- + >>> from scipy import special + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(-15, 15, num=1000) + >>> plt.plot(x, special.dawsn(x)) + >>> plt.xlabel('$x$') + >>> plt.ylabel('$dawsn(x)$') + >>> plt.show() + + """) + +add_newdoc("scipy.special", "ellipe", + r""" + ellipe(m) + + Complete elliptic integral of the second kind + + This function is defined as + + .. math:: E(m) = \int_0^{\pi/2} [1 - m \sin(t)^2]^{1/2} dt + + Parameters + ---------- + m : array_like + Defines the parameter of the elliptic integral. + + Returns + ------- + E : ndarray + Value of the elliptic integral. + + Notes + ----- + Wrapper for the Cephes [1]_ routine `ellpe`. + + For `m > 0` the computation uses the approximation, + + .. math:: E(m) \approx P(1-m) - (1-m) \log(1-m) Q(1-m), + + where :math:`P` and :math:`Q` are tenth-order polynomials. For + `m < 0`, the relation + + .. math:: E(m) = E(m/(m - 1)) \sqrt(1-m) + + is used. + + The parameterization in terms of :math:`m` follows that of section + 17.2 in [2]_. Other parameterizations in terms of the + complementary parameter :math:`1 - m`, modular angle + :math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also + used, so be careful that you choose the correct parameter. + + See Also + -------- + ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1 + ellipk : Complete elliptic integral of the first kind + ellipkinc : Incomplete elliptic integral of the first kind + ellipeinc : Incomplete elliptic integral of the second kind + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + .. [2] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + """) + +add_newdoc("scipy.special", "ellipeinc", + r""" + ellipeinc(phi, m) + + Incomplete elliptic integral of the second kind + + This function is defined as + + .. math:: E(\phi, m) = \int_0^{\phi} [1 - m \sin(t)^2]^{1/2} dt + + Parameters + ---------- + phi : array_like + amplitude of the elliptic integral. + + m : array_like + parameter of the elliptic integral. + + Returns + ------- + E : ndarray + Value of the elliptic integral. + + Notes + ----- + Wrapper for the Cephes [1]_ routine `ellie`. + + Computation uses arithmetic-geometric means algorithm. + + The parameterization in terms of :math:`m` follows that of section + 17.2 in [2]_. Other parameterizations in terms of the + complementary parameter :math:`1 - m`, modular angle + :math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also + used, so be careful that you choose the correct parameter. + + See Also + -------- + ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1 + ellipk : Complete elliptic integral of the first kind + ellipkinc : Incomplete elliptic integral of the first kind + ellipe : Complete elliptic integral of the second kind + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + .. [2] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + """) + +add_newdoc("scipy.special", "ellipj", + """ + ellipj(u, m) + + Jacobian elliptic functions + + Calculates the Jacobian elliptic functions of parameter `m` between + 0 and 1, and real argument `u`. + + Parameters + ---------- + m : array_like + Parameter. + u : array_like + Argument. + + Returns + ------- + sn, cn, dn, ph : ndarrays + The returned functions:: + + sn(u|m), cn(u|m), dn(u|m) + + The value `ph` is such that if `u = ellipk(ph, m)`, + then `sn(u|m) = sin(ph)` and `cn(u|m) = cos(ph)`. + + Notes + ----- + Wrapper for the Cephes [1]_ routine `ellpj`. + + These functions are periodic, with quarter-period on the real axis + equal to the complete elliptic integral `ellipk(m)`. + + Relation to incomplete elliptic integral: If `u = ellipk(phi,m)`, then + `sn(u|m) = sin(phi)`, and `cn(u|m) = cos(phi)`. The `phi` is called + the amplitude of `u`. + + Computation is by means of the arithmetic-geometric mean algorithm, + except when `m` is within 1e-9 of 0 or 1. In the latter case with `m` + close to 1, the approximation applies only for `phi < pi/2`. + + See also + -------- + ellipk : Complete elliptic integral of the first kind. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + """) + +add_newdoc("scipy.special", "ellipkm1", + """ + ellipkm1(p) + + Complete elliptic integral of the first kind around `m` = 1 + + This function is defined as + + .. math:: K(p) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt + + where `m = 1 - p`. + + Parameters + ---------- + p : array_like + Defines the parameter of the elliptic integral as `m = 1 - p`. + + Returns + ------- + K : ndarray + Value of the elliptic integral. + + Notes + ----- + Wrapper for the Cephes [1]_ routine `ellpk`. + + For `p <= 1`, computation uses the approximation, + + .. math:: K(p) \\approx P(p) - \\log(p) Q(p), + + where :math:`P` and :math:`Q` are tenth-order polynomials. The + argument `p` is used internally rather than `m` so that the logarithmic + singularity at `m = 1` will be shifted to the origin; this preserves + maximum accuracy. For `p > 1`, the identity + + .. math:: K(p) = K(1/p)/\\sqrt(p) + + is used. + + See Also + -------- + ellipk : Complete elliptic integral of the first kind + ellipkinc : Incomplete elliptic integral of the first kind + ellipe : Complete elliptic integral of the second kind + ellipeinc : Incomplete elliptic integral of the second kind + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + """) + +add_newdoc("scipy.special", "ellipkinc", + r""" + ellipkinc(phi, m) + + Incomplete elliptic integral of the first kind + + This function is defined as + + .. math:: K(\phi, m) = \int_0^{\phi} [1 - m \sin(t)^2]^{-1/2} dt + + This function is also called `F(phi, m)`. + + Parameters + ---------- + phi : array_like + amplitude of the elliptic integral + + m : array_like + parameter of the elliptic integral + + Returns + ------- + K : ndarray + Value of the elliptic integral + + Notes + ----- + Wrapper for the Cephes [1]_ routine `ellik`. The computation is + carried out using the arithmetic-geometric mean algorithm. + + The parameterization in terms of :math:`m` follows that of section + 17.2 in [2]_. Other parameterizations in terms of the + complementary parameter :math:`1 - m`, modular angle + :math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also + used, so be careful that you choose the correct parameter. + + See Also + -------- + ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1 + ellipk : Complete elliptic integral of the first kind + ellipe : Complete elliptic integral of the second kind + ellipeinc : Incomplete elliptic integral of the second kind + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + .. [2] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + """) + +add_newdoc("scipy.special", "entr", + r""" + entr(x) + + Elementwise function for computing entropy. + + .. math:: \text{entr}(x) = \begin{cases} - x \log(x) & x > 0 \\ 0 & x = 0 \\ -\infty & \text{otherwise} \end{cases} + + Parameters + ---------- + x : ndarray + Input array. + + Returns + ------- + res : ndarray + The value of the elementwise entropy function at the given points `x`. + + See Also + -------- + kl_div, rel_entr + + Notes + ----- + This function is concave. + + .. versionadded:: 0.15.0 + + """) + +add_newdoc("scipy.special", "erf", + """ + erf(z) + + Returns the error function of complex argument. + + It is defined as ``2/sqrt(pi)*integral(exp(-t**2), t=0..z)``. + + Parameters + ---------- + x : ndarray + Input array. + + Returns + ------- + res : ndarray + The values of the error function at the given points `x`. + + See Also + -------- + erfc, erfinv, erfcinv, wofz, erfcx, erfi + + Notes + ----- + The cumulative of the unit normal distribution is given by + ``Phi(z) = 1/2[1 + erf(z/sqrt(2))]``. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Error_function + .. [2] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, + 1972. http://www.math.sfu.ca/~cbm/aands/page_297.htm + .. [3] Steven G. Johnson, Faddeeva W function implementation. + http://ab-initio.mit.edu/Faddeeva + + Examples + -------- + >>> from scipy import special + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(-3, 3) + >>> plt.plot(x, special.erf(x)) + >>> plt.xlabel('$x$') + >>> plt.ylabel('$erf(x)$') + >>> plt.show() + + """) + +add_newdoc("scipy.special", "erfc", + """ + erfc(x) + + Complementary error function, ``1 - erf(x)``. + + See Also + -------- + erf, erfi, erfcx, dawsn, wofz + + References + ---------- + .. [1] Steven G. Johnson, Faddeeva W function implementation. + http://ab-initio.mit.edu/Faddeeva + + Examples + -------- + >>> from scipy import special + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(-3, 3) + >>> plt.plot(x, special.erfc(x)) + >>> plt.xlabel('$x$') + >>> plt.ylabel('$erfc(x)$') + >>> plt.show() + + """) + +add_newdoc("scipy.special", "erfi", + """ + erfi(z) + + Imaginary error function, ``-i erf(i z)``. + + See Also + -------- + erf, erfc, erfcx, dawsn, wofz + + Notes + ----- + + .. versionadded:: 0.12.0 + + References + ---------- + .. [1] Steven G. Johnson, Faddeeva W function implementation. + http://ab-initio.mit.edu/Faddeeva + + Examples + -------- + >>> from scipy import special + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(-3, 3) + >>> plt.plot(x, special.erfi(x)) + >>> plt.xlabel('$x$') + >>> plt.ylabel('$erfi(x)$') + >>> plt.show() + + """) + +add_newdoc("scipy.special", "erfcx", + """ + erfcx(x) + + Scaled complementary error function, ``exp(x**2) * erfc(x)``. + + See Also + -------- + erf, erfc, erfi, dawsn, wofz + + Notes + ----- + + .. versionadded:: 0.12.0 + + References + ---------- + .. [1] Steven G. Johnson, Faddeeva W function implementation. + http://ab-initio.mit.edu/Faddeeva + + Examples + -------- + >>> from scipy import special + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(-3, 3) + >>> plt.plot(x, special.erfcx(x)) + >>> plt.xlabel('$x$') + >>> plt.ylabel('$erfcx(x)$') + >>> plt.show() + + """) + +add_newdoc("scipy.special", "eval_jacobi", + r""" + eval_jacobi(n, alpha, beta, x, out=None) + + Evaluate Jacobi polynomial at a point. + + The Jacobi polynomials can be defined via the Gauss hypergeometric + function :math:`{}_2F_1` as + + .. math:: + + P_n^{(\alpha, \beta)}(x) = \frac{(\alpha + 1)_n}{\Gamma(n + 1)} + {}_2F_1(-n, 1 + \alpha + \beta + n; \alpha + 1; (1 - z)/2) + + where :math:`(\cdot)_n` is the Pochhammer symbol; see `poch`. When + :math:`n` is an integer the result is a polynomial of degree + :math:`n`. + + Parameters + ---------- + n : array_like + Degree of the polynomial. If not an integer the result is + determined via the relation to the Gauss hypergeometric + function. + alpha : array_like + Parameter + beta : array_like + Parameter + x : array_like + Points at which to evaluate the polynomial + + Returns + ------- + P : ndarray + Values of the Jacobi polynomial + + See Also + -------- + roots_jacobi : roots and quadrature weights of Jacobi polynomials + jacobi : Jacobi polynomial object + hyp2f1 : Gauss hypergeometric function + """) + +add_newdoc("scipy.special", "eval_sh_jacobi", + r""" + eval_sh_jacobi(n, p, q, x, out=None) + + Evaluate shifted Jacobi polynomial at a point. + + Defined by + + .. math:: + + G_n^{(p, q)}(x) + = \binom{2n + p - 1}{n}^{-1} P_n^{(p - q, q - 1)}(2x - 1), + + where :math:`P_n^{(\cdot, \cdot)}` is the n-th Jacobi polynomial. + + Parameters + ---------- + n : int + Degree of the polynomial. If not an integer, the result is + determined via the relation to `binom` and `eval_jacobi`. + p : float + Parameter + q : float + Parameter + + Returns + ------- + G : ndarray + Values of the shifted Jacobi polynomial. + + See Also + -------- + roots_sh_jacobi : roots and quadrature weights of shifted Jacobi + polynomials + sh_jacobi : shifted Jacobi polynomial object + eval_jacobi : evaluate Jacobi polynomials + """) + +add_newdoc("scipy.special", "eval_gegenbauer", + r""" + eval_gegenbauer(n, alpha, x, out=None) + + Evaluate Gegenbauer polynomial at a point. + + The Gegenbauer polynomials can be defined via the Gauss + hypergeometric function :math:`{}_2F_1` as + + .. math:: + + C_n^{(\alpha)} = \frac{(2\alpha)_n}{\Gamma(n + 1)} + {}_2F_1(-n, 2\alpha + n; \alpha + 1/2; (1 - z)/2). + + When :math:`n` is an integer the result is a polynomial of degree + :math:`n`. + + Parameters + ---------- + n : array_like + Degree of the polynomial. If not an integer, the result is + determined via the relation to the Gauss hypergeometric + function. + alpha : array_like + Parameter + x : array_like + Points at which to evaluate the Gegenbauer polynomial + + Returns + ------- + C : ndarray + Values of the Gegenbauer polynomial + + See Also + -------- + roots_gegenbauer : roots and quadrature weights of Gegenbauer + polynomials + gegenbauer : Gegenbauer polynomial object + hyp2f1 : Gauss hypergeometric function + """) + +add_newdoc("scipy.special", "eval_chebyt", + r""" + eval_chebyt(n, x, out=None) + + Evaluate Chebyshev polynomial of the first kind at a point. + + The Chebyshev polynomials of the first kind can be defined via the + Gauss hypergeometric function :math:`{}_2F_1` as + + .. math:: + + T_n(x) = {}_2F_1(n, -n; 1/2; (1 - x)/2). + + When :math:`n` is an integer the result is a polynomial of degree + :math:`n`. + + Parameters + ---------- + n : array_like + Degree of the polynomial. If not an integer, the result is + determined via the relation to the Gauss hypergeometric + function. + x : array_like + Points at which to evaluate the Chebyshev polynomial + + Returns + ------- + T : ndarray + Values of the Chebyshev polynomial + + See Also + -------- + roots_chebyt : roots and quadrature weights of Chebyshev + polynomials of the first kind + chebyu : Chebychev polynomial object + eval_chebyu : evaluate Chebyshev polynomials of the second kind + hyp2f1 : Gauss hypergeometric function + numpy.polynomial.chebyshev.Chebyshev : Chebyshev series + + Notes + ----- + This routine is numerically stable for `x` in ``[-1, 1]`` at least + up to order ``10000``. + """) + +add_newdoc("scipy.special", "eval_chebyu", + r""" + eval_chebyu(n, x, out=None) + + Evaluate Chebyshev polynomial of the second kind at a point. + + The Chebyshev polynomials of the second kind can be defined via + the Gauss hypergeometric function :math:`{}_2F_1` as + + .. math:: + + U_n(x) = (n + 1) {}_2F_1(-n, n + 2; 3/2; (1 - x)/2). + + When :math:`n` is an integer the result is a polynomial of degree + :math:`n`. + + Parameters + ---------- + n : array_like + Degree of the polynomial. If not an integer, the result is + determined via the relation to the Gauss hypergeometric + function. + x : array_like + Points at which to evaluate the Chebyshev polynomial + + Returns + ------- + U : ndarray + Values of the Chebyshev polynomial + + See Also + -------- + roots_chebyu : roots and quadrature weights of Chebyshev + polynomials of the second kind + chebyu : Chebyshev polynomial object + eval_chebyt : evaluate Chebyshev polynomials of the first kind + hyp2f1 : Gauss hypergeometric function + """) + +add_newdoc("scipy.special", "eval_chebys", + r""" + eval_chebys(n, x, out=None) + + Evaluate Chebyshev polynomial of the second kind on [-2, 2] at a + point. + + These polynomials are defined as + + .. math:: + + S_n(x) = U_n(x/2) + + where :math:`U_n` is a Chebyshev polynomial of the second kind. + + Parameters + ---------- + n : array_like + Degree of the polynomial. If not an integer, the result is + determined via the relation to `eval_chebyu`. + x : array_like + Points at which to evaluate the Chebyshev polynomial + + Returns + ------- + S : ndarray + Values of the Chebyshev polynomial + + See Also + -------- + roots_chebys : roots and quadrature weights of Chebyshev + polynomials of the second kind on [-2, 2] + chebys : Chebyshev polynomial object + eval_chebyu : evaluate Chebyshev polynomials of the second kind + """) + +add_newdoc("scipy.special", "eval_chebyc", + r""" + eval_chebyc(n, x, out=None) + + Evaluate Chebyshev polynomial of the first kind on [-2, 2] at a + point. + + These polynomials are defined as + + .. math:: + + S_n(x) = T_n(x/2) + + where :math:`T_n` is a Chebyshev polynomial of the first kind. + + Parameters + ---------- + n : array_like + Degree of the polynomial. If not an integer, the result is + determined via the relation to `eval_chebyt`. + x : array_like + Points at which to evaluate the Chebyshev polynomial + + Returns + ------- + C : ndarray + Values of the Chebyshev polynomial + + See Also + -------- + roots_chebyc : roots and quadrature weights of Chebyshev + polynomials of the first kind on [-2, 2] + chebyc : Chebyshev polynomial object + numpy.polynomial.chebyshev.Chebyshev : Chebyshev series + eval_chebyt : evaluate Chebycshev polynomials of the first kind + """) + +add_newdoc("scipy.special", "eval_sh_chebyt", + r""" + eval_sh_chebyt(n, x, out=None) + + Evaluate shifted Chebyshev polynomial of the first kind at a + point. + + These polynomials are defined as + + .. math:: + + T_n^*(x) = T_n(2x - 1) + + where :math:`T_n` is a Chebyshev polynomial of the first kind. + + Parameters + ---------- + n : array_like + Degree of the polynomial. If not an integer, the result is + determined via the relation to `eval_chebyt`. + x : array_like + Points at which to evaluate the shifted Chebyshev polynomial + + Returns + ------- + T : ndarray + Values of the shifted Chebyshev polynomial + + See Also + -------- + roots_sh_chebyt : roots and quadrature weights of shifted + Chebyshev polynomials of the first kind + sh_chebyt : shifted Chebyshev polynomial object + eval_chebyt : evaluate Chebyshev polynomials of the first kind + numpy.polynomial.chebyshev.Chebyshev : Chebyshev series + """) + +add_newdoc("scipy.special", "eval_sh_chebyu", + r""" + eval_sh_chebyu(n, x, out=None) + + Evaluate shifted Chebyshev polynomial of the second kind at a + point. + + These polynomials are defined as + + .. math:: + + U_n^*(x) = U_n(2x - 1) + + where :math:`U_n` is a Chebyshev polynomial of the first kind. + + Parameters + ---------- + n : array_like + Degree of the polynomial. If not an integer, the result is + determined via the relation to `eval_chebyu`. + x : array_like + Points at which to evaluate the shifted Chebyshev polynomial + + Returns + ------- + U : ndarray + Values of the shifted Chebyshev polynomial + + See Also + -------- + roots_sh_chebyu : roots and quadrature weights of shifted + Chebychev polynomials of the second kind + sh_chebyu : shifted Chebyshev polynomial object + eval_chebyu : evaluate Chebyshev polynomials of the second kind + """) + +add_newdoc("scipy.special", "eval_legendre", + r""" + eval_legendre(n, x, out=None) + + Evaluate Legendre polynomial at a point. + + The Legendre polynomials can be defined via the Gauss + hypergeometric function :math:`{}_2F_1` as + + .. math:: + + P_n(x) = {}_2F_1(-n, n + 1; 1; (1 - x)/2). + + When :math:`n` is an integer the result is a polynomial of degree + :math:`n`. + + Parameters + ---------- + n : array_like + Degree of the polynomial. If not an integer, the result is + determined via the relation to the Gauss hypergeometric + function. + x : array_like + Points at which to evaluate the Legendre polynomial + + Returns + ------- + P : ndarray + Values of the Legendre polynomial + + See Also + -------- + roots_legendre : roots and quadrature weights of Legendre + polynomials + legendre : Legendre polynomial object + hyp2f1 : Gauss hypergeometric function + numpy.polynomial.legendre.Legendre : Legendre series + """) + +add_newdoc("scipy.special", "eval_sh_legendre", + r""" + eval_sh_legendre(n, x, out=None) + + Evaluate shifted Legendre polynomial at a point. + + These polynomials are defined as + + .. math:: + + P_n^*(x) = P_n(2x - 1) + + where :math:`P_n` is a Legendre polynomial. + + Parameters + ---------- + n : array_like + Degree of the polynomial. If not an integer, the value is + determined via the relation to `eval_legendre`. + x : array_like + Points at which to evaluate the shifted Legendre polynomial + + Returns + ------- + P : ndarray + Values of the shifted Legendre polynomial + + See Also + -------- + roots_sh_legendre : roots and quadrature weights of shifted + Legendre polynomials + sh_legendre : shifted Legendre polynomial object + eval_legendre : evaluate Legendre polynomials + numpy.polynomial.legendre.Legendre : Legendre series + """) + +add_newdoc("scipy.special", "eval_genlaguerre", + r""" + eval_genlaguerre(n, alpha, x, out=None) + + Evaluate generalized Laguerre polynomial at a point. + + The generalized Laguerre polynomials can be defined via the + confluent hypergeometric function :math:`{}_1F_1` as + + .. math:: + + L_n^{(\alpha)}(x) = \binom{n + \alpha}{n} + {}_1F_1(-n, \alpha + 1, x). + + When :math:`n` is an integer the result is a polynomial of degree + :math:`n`. The Laguerre polynomials are the special case where + :math:`\alpha = 0`. + + Parameters + ---------- + n : array_like + Degree of the polynomial. If not an integer the result is + determined via the relation to the confluent hypergeometric + function. + alpha : array_like + Parameter; must have ``alpha > -1`` + x : array_like + Points at which to evaluate the generalized Laguerre + polynomial + + Returns + ------- + L : ndarray + Values of the generalized Laguerre polynomial + + See Also + -------- + roots_genlaguerre : roots and quadrature weights of generalized + Laguerre polynomials + genlaguerre : generalized Laguerre polynomial object + hyp1f1 : confluent hypergeometric function + eval_laguerre : evaluate Laguerre polynomials + """) + +add_newdoc("scipy.special", "eval_laguerre", + r""" + eval_laguerre(n, x, out=None) + + Evaluate Laguerre polynomial at a point. + + The Laguerre polynomials can be defined via the confluent + hypergeometric function :math:`{}_1F_1` as + + .. math:: + + L_n(x) = {}_1F_1(-n, 1, x). + + When :math:`n` is an integer the result is a polynomial of degree + :math:`n`. + + Parameters + ---------- + n : array_like + Degree of the polynomial. If not an integer the result is + determined via the relation to the confluent hypergeometric + function. + x : array_like + Points at which to evaluate the Laguerre polynomial + + Returns + ------- + L : ndarray + Values of the Laguerre polynomial + + See Also + -------- + roots_laguerre : roots and quadrature weights of Laguerre + polynomials + laguerre : Laguerre polynomial object + numpy.polynomial.laguerre.Laguerre : Laguerre series + eval_genlaguerre : evaluate generalized Laguerre polynomials + """) + +add_newdoc("scipy.special", "eval_hermite", + r""" + eval_hermite(n, x, out=None) + + Evaluate physicist's Hermite polynomial at a point. + + Defined by + + .. math:: + + H_n(x) = (-1)^n e^{x^2} \frac{d^n}{dx^n} e^{-x^2}; + + :math:`H_n` is a polynomial of degree :math:`n`. + + Parameters + ---------- + n : array_like + Degree of the polynomial + x : array_like + Points at which to evaluate the Hermite polynomial + + Returns + ------- + H : ndarray + Values of the Hermite polynomial + + See Also + -------- + roots_hermite : roots and quadrature weights of physicist's + Hermite polynomials + hermite : physicist's Hermite polynomial object + numpy.polynomial.hermite.Hermite : Physicist's Hermite series + eval_hermitenorm : evaluate Probabilist's Hermite polynomials + """) + +add_newdoc("scipy.special", "eval_hermitenorm", + r""" + eval_hermitenorm(n, x, out=None) + + Evaluate probabilist's (normalized) Hermite polynomial at a + point. + + Defined by + + .. math:: + + He_n(x) = (-1)^n e^{x^2/2} \frac{d^n}{dx^n} e^{-x^2/2}; + + :math:`He_n` is a polynomial of degree :math:`n`. + + Parameters + ---------- + n : array_like + Degree of the polynomial + x : array_like + Points at which to evaluate the Hermite polynomial + + Returns + ------- + He : ndarray + Values of the Hermite polynomial + + See Also + -------- + roots_hermitenorm : roots and quadrature weights of probabilist's + Hermite polynomials + hermitenorm : probabilist's Hermite polynomial object + numpy.polynomial.hermite_e.HermiteE : Probabilist's Hermite series + eval_hermite : evaluate physicist's Hermite polynomials + """) + +add_newdoc("scipy.special", "exp1", + """ + exp1(z) + + Exponential integral E_1 of complex argument z + + :: + + integral(exp(-z*t)/t, t=1..inf). + """) + +add_newdoc("scipy.special", "exp10", + """ + exp10(x) + + Compute ``10**x`` element-wise. + + Parameters + ---------- + x : array_like + `x` must contain real numbers. + + Returns + ------- + float + ``10**x``, computed element-wise. + + Examples + -------- + >>> from scipy.special import exp10 + + >>> exp10(3) + 1000.0 + >>> x = np.array([[-1, -0.5, 0], [0.5, 1, 1.5]]) + >>> exp10(x) + array([[ 0.1 , 0.31622777, 1. ], + [ 3.16227766, 10. , 31.6227766 ]]) + + """) + +add_newdoc("scipy.special", "exp2", + """ + exp2(x) + + Compute ``2**x`` element-wise. + + Parameters + ---------- + x : array_like + `x` must contain real numbers. + + Returns + ------- + float + ``2**x``, computed element-wise. + + Examples + -------- + >>> from scipy.special import exp2 + + >>> exp2(3) + 8.0 + >>> x = np.array([[-1, -0.5, 0], [0.5, 1, 1.5]]) + >>> exp2(x) + array([[ 0.5 , 0.70710678, 1. ], + [ 1.41421356, 2. , 2.82842712]]) + """) + +add_newdoc("scipy.special", "expi", + """ + expi(x) + + Exponential integral Ei + + Defined as:: + + integral(exp(t)/t, t=-inf..x) + + See `expn` for a different exponential integral. + """) + +add_newdoc('scipy.special', 'expit', + """ + expit(x) + + Expit (a.k.a. logistic sigmoid) ufunc for ndarrays. + + The expit function, also known as the logistic sigmoid function, is + defined as ``expit(x) = 1/(1+exp(-x))``. It is the inverse of the + logit function. + + Parameters + ---------- + x : ndarray + The ndarray to apply expit to element-wise. + + Returns + ------- + out : ndarray + An ndarray of the same shape as x. Its entries + are `expit` of the corresponding entry of x. + + See Also + -------- + logit + + Notes + ----- + As a ufunc expit takes a number of optional + keyword arguments. For more information + see `ufuncs <https://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_ + + .. versionadded:: 0.10.0 + + Examples + -------- + >>> from scipy.special import expit, logit + + >>> expit([-np.inf, -1.5, 0, 1.5, np.inf]) + array([ 0. , 0.18242552, 0.5 , 0.81757448, 1. ]) + + `logit` is the inverse of `expit`: + + >>> logit(expit([-2.5, 0, 3.1, 5.0])) + array([-2.5, 0. , 3.1, 5. ]) + + Plot expit(x) for x in [-6, 6]: + + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(-6, 6, 121) + >>> y = expit(x) + >>> plt.plot(x, y) + >>> plt.grid() + >>> plt.xlim(-6, 6) + >>> plt.xlabel('x') + >>> plt.title('expit(x)') + >>> plt.show() + + """) + +add_newdoc("scipy.special", "expm1", + """ + expm1(x) + + Compute ``exp(x) - 1``. + + When `x` is near zero, ``exp(x)`` is near 1, so the numerical calculation + of ``exp(x) - 1`` can suffer from catastrophic loss of precision. + ``expm1(x)`` is implemented to avoid the loss of precision that occurs when + `x` is near zero. + + Parameters + ---------- + x : array_like + `x` must contain real numbers. + + Returns + ------- + float + ``exp(x) - 1`` computed element-wise. + + Examples + -------- + >>> from scipy.special import expm1 + + >>> expm1(1.0) + 1.7182818284590451 + >>> expm1([-0.2, -0.1, 0, 0.1, 0.2]) + array([-0.18126925, -0.09516258, 0. , 0.10517092, 0.22140276]) + + The exact value of ``exp(7.5e-13) - 1`` is:: + + 7.5000000000028125000000007031250000001318...*10**-13. + + Here is what ``expm1(7.5e-13)`` gives: + + >>> expm1(7.5e-13) + 7.5000000000028135e-13 + + Compare that to ``exp(7.5e-13) - 1``, where the subtraction results in + a "catastrophic" loss of precision: + + >>> np.exp(7.5e-13) - 1 + 7.5006667543675576e-13 + + """) + +add_newdoc("scipy.special", "expn", + """ + expn(n, x) + + Exponential integral E_n + + Returns the exponential integral for integer `n` and non-negative `x` and + `n`:: + + integral(exp(-x*t) / t**n, t=1..inf). + """) + +add_newdoc("scipy.special", "exprel", + r""" + exprel(x) + + Relative error exponential, ``(exp(x) - 1)/x``. + + When `x` is near zero, ``exp(x)`` is near 1, so the numerical calculation + of ``exp(x) - 1`` can suffer from catastrophic loss of precision. + ``exprel(x)`` is implemented to avoid the loss of precision that occurs when + `x` is near zero. + + Parameters + ---------- + x : ndarray + Input array. `x` must contain real numbers. + + Returns + ------- + float + ``(exp(x) - 1)/x``, computed element-wise. + + See Also + -------- + expm1 + + Notes + ----- + .. versionadded:: 0.17.0 + + Examples + -------- + >>> from scipy.special import exprel + + >>> exprel(0.01) + 1.0050167084168056 + >>> exprel([-0.25, -0.1, 0, 0.1, 0.25]) + array([ 0.88479687, 0.95162582, 1. , 1.05170918, 1.13610167]) + + Compare ``exprel(5e-9)`` to the naive calculation. The exact value + is ``1.00000000250000000416...``. + + >>> exprel(5e-9) + 1.0000000025 + + >>> (np.exp(5e-9) - 1)/5e-9 + 0.99999999392252903 + """) + +add_newdoc("scipy.special", "fdtr", + r""" + fdtr(dfn, dfd, x) + + F cumulative distribution function. + + Returns the value of the cumulative density function of the + F-distribution, also known as Snedecor's F-distribution or the + Fisher-Snedecor distribution. + + The F-distribution with parameters :math:`d_n` and :math:`d_d` is the + distribution of the random variable, + + .. math:: + X = \frac{U_n/d_n}{U_d/d_d}, + + where :math:`U_n` and :math:`U_d` are random variables distributed + :math:`\chi^2`, with :math:`d_n` and :math:`d_d` degrees of freedom, + respectively. + + Parameters + ---------- + dfn : array_like + First parameter (positive float). + dfd : array_like + Second parameter (positive float). + x : array_like + Argument (nonnegative float). + + Returns + ------- + y : ndarray + The CDF of the F-distribution with parameters `dfn` and `dfd` at `x`. + + Notes + ----- + The regularized incomplete beta function is used, according to the + formula, + + .. math:: + F(d_n, d_d; x) = I_{xd_n/(d_d + xd_n)}(d_n/2, d_d/2). + + Wrapper for the Cephes [1]_ routine `fdtr`. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + """) + +add_newdoc("scipy.special", "fdtrc", + r""" + fdtrc(dfn, dfd, x) + + F survival function. + + Returns the complemented F-distribution function (the integral of the + density from `x` to infinity). + + Parameters + ---------- + dfn : array_like + First parameter (positive float). + dfd : array_like + Second parameter (positive float). + x : array_like + Argument (nonnegative float). + + Returns + ------- + y : ndarray + The complemented F-distribution function with parameters `dfn` and + `dfd` at `x`. + + See also + -------- + fdtr + + Notes + ----- + The regularized incomplete beta function is used, according to the + formula, + + .. math:: + F(d_n, d_d; x) = I_{d_d/(d_d + xd_n)}(d_d/2, d_n/2). + + Wrapper for the Cephes [1]_ routine `fdtrc`. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + """) + +add_newdoc("scipy.special", "fdtri", + r""" + fdtri(dfn, dfd, p) + + The `p`-th quantile of the F-distribution. + + This function is the inverse of the F-distribution CDF, `fdtr`, returning + the `x` such that `fdtr(dfn, dfd, x) = p`. + + Parameters + ---------- + dfn : array_like + First parameter (positive float). + dfd : array_like + Second parameter (positive float). + p : array_like + Cumulative probability, in [0, 1]. + + Returns + ------- + x : ndarray + The quantile corresponding to `p`. + + Notes + ----- + The computation is carried out using the relation to the inverse + regularized beta function, :math:`I^{-1}_x(a, b)`. Let + :math:`z = I^{-1}_p(d_d/2, d_n/2).` Then, + + .. math:: + x = \frac{d_d (1 - z)}{d_n z}. + + If `p` is such that :math:`x < 0.5`, the following relation is used + instead for improved stability: let + :math:`z' = I^{-1}_{1 - p}(d_n/2, d_d/2).` Then, + + .. math:: + x = \frac{d_d z'}{d_n (1 - z')}. + + Wrapper for the Cephes [1]_ routine `fdtri`. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + """) + +add_newdoc("scipy.special", "fdtridfd", + """ + fdtridfd(dfn, p, x) + + Inverse to `fdtr` vs dfd + + Finds the F density argument dfd such that ``fdtr(dfn, dfd, x) == p``. + """) + +add_newdoc("scipy.special", "fdtridfn", + """ + fdtridfn(p, dfd, x) + + Inverse to `fdtr` vs dfn + + finds the F density argument dfn such that ``fdtr(dfn, dfd, x) == p``. + """) + +add_newdoc("scipy.special", "fresnel", + """ + fresnel(z) + + Fresnel sin and cos integrals + + Defined as:: + + ssa = integral(sin(pi/2 * t**2), t=0..z) + csa = integral(cos(pi/2 * t**2), t=0..z) + + Parameters + ---------- + z : float or complex array_like + Argument + + Returns + ------- + ssa, csa + Fresnel sin and cos integral values + + """) + +add_newdoc("scipy.special", "gamma", + r""" + gamma(z) + + Gamma function. + + .. math:: + + \Gamma(z) = \int_0^\infty x^{z-1} e^{-x} dx = (z - 1)! + + The gamma function is often referred to as the generalized + factorial since ``z*gamma(z) = gamma(z+1)`` and ``gamma(n+1) = + n!`` for natural number *n*. + + Parameters + ---------- + z : float or complex array_like + + Returns + ------- + float or complex + The value(s) of gamma(z) + + Examples + -------- + >>> from scipy.special import gamma, factorial + + >>> gamma([0, 0.5, 1, 5]) + array([ inf, 1.77245385, 1. , 24. ]) + + >>> z = 2.5 + 1j + >>> gamma(z) + (0.77476210455108352+0.70763120437959293j) + >>> gamma(z+1), z*gamma(z) # Recurrence property + ((1.2292740569981171+2.5438401155000685j), + (1.2292740569981158+2.5438401155000658j)) + + >>> gamma(0.5)**2 # gamma(0.5) = sqrt(pi) + 3.1415926535897927 + + Plot gamma(x) for real x + + >>> x = np.linspace(-3.5, 5.5, 2251) + >>> y = gamma(x) + + >>> import matplotlib.pyplot as plt + >>> plt.plot(x, y, 'b', alpha=0.6, label='gamma(x)') + >>> k = np.arange(1, 7) + >>> plt.plot(k, factorial(k-1), 'k*', alpha=0.6, + ... label='(x-1)!, x = 1, 2, ...') + >>> plt.xlim(-3.5, 5.5) + >>> plt.ylim(-10, 25) + >>> plt.grid() + >>> plt.xlabel('x') + >>> plt.legend(loc='lower right') + >>> plt.show() + + """) + +add_newdoc("scipy.special", "gammainc", + r""" + gammainc(a, x) + + Regularized lower incomplete gamma function. + + Defined as + + .. math:: + + \frac{1}{\Gamma(a)} \int_0^x t^{a - 1}e^{-t} dt + + for :math:`a > 0` and :math:`x \geq 0`. The function satisfies the + relation ``gammainc(a, x) + gammaincc(a, x) = 1`` where + `gammaincc` is the regularized upper incomplete gamma function. + + Notes + ----- + The implementation largely follows that of [1]_. + + See also + -------- + gammaincc : regularized upper incomplete gamma function + gammaincinv : inverse to ``gammainc`` versus ``x`` + gammainccinv : inverse to ``gammaincc`` versus ``x`` + + References + ---------- + .. [1] Maddock et. al., "Incomplete Gamma Functions", + https://www.boost.org/doc/libs/1_61_0/libs/math/doc/html/math_toolkit/sf_gamma/igamma.html + """) + +add_newdoc("scipy.special", "gammaincc", + r""" + gammaincc(a, x) + + Regularized upper incomplete gamma function. + + Defined as + + .. math:: + + \frac{1}{\Gamma(a)} \int_x^\infty t^{a - 1}e^{-t} dt + + for :math:`a > 0` and :math:`x \geq 0`. The function satisfies the + relation ``gammainc(a, x) + gammaincc(a, x) = 1`` where `gammainc` + is the regularized lower incomplete gamma function. + + Notes + ----- + The implementation largely follows that of [1]_. + + See also + -------- + gammainc : regularized lower incomplete gamma function + gammaincinv : inverse to ``gammainc`` versus ``x`` + gammainccinv : inverse to ``gammaincc`` versus ``x`` + + References + ---------- + .. [1] Maddock et. al., "Incomplete Gamma Functions", + https://www.boost.org/doc/libs/1_61_0/libs/math/doc/html/math_toolkit/sf_gamma/igamma.html + """) + +add_newdoc("scipy.special", "gammainccinv", + """ + gammainccinv(a, y) + + Inverse to `gammaincc` + + Returns `x` such that ``gammaincc(a, x) == y``. + """) + +add_newdoc("scipy.special", "gammaincinv", + """ + gammaincinv(a, y) + + Inverse to `gammainc` + + Returns `x` such that ``gammainc(a, x) = y``. + """) + +add_newdoc("scipy.special", "gammaln", + """ + Logarithm of the absolute value of the Gamma function. + + Parameters + ---------- + x : array-like + Values on the real line at which to compute ``gammaln`` + + Returns + ------- + gammaln : ndarray + Values of ``gammaln`` at x. + + See Also + -------- + gammasgn : sign of the gamma function + loggamma : principal branch of the logarithm of the gamma function + + Notes + ----- + When used in conjunction with `gammasgn`, this function is useful + for working in logspace on the real axis without having to deal with + complex numbers, via the relation ``exp(gammaln(x)) = gammasgn(x)*gamma(x)``. + + For complex-valued log-gamma, use `loggamma` instead of `gammaln`. + """) + +add_newdoc("scipy.special", "gammasgn", + """ + gammasgn(x) + + Sign of the gamma function. + + See Also + -------- + gammaln + loggamma + """) + +add_newdoc("scipy.special", "gdtr", + r""" + gdtr(a, b, x) + + Gamma distribution cumulative density function. + + Returns the integral from zero to `x` of the gamma probability density + function, + + .. math:: + + F = \int_0^x \frac{a^b}{\Gamma(b)} t^{b-1} e^{-at}\,dt, + + where :math:`\Gamma` is the gamma function. + + Parameters + ---------- + a : array_like + The rate parameter of the gamma distribution, sometimes denoted + :math:`\beta` (float). It is also the reciprocal of the scale + parameter :math:`\theta`. + b : array_like + The shape parameter of the gamma distribution, sometimes denoted + :math:`\alpha` (float). + x : array_like + The quantile (upper limit of integration; float). + + See also + -------- + gdtrc : 1 - CDF of the gamma distribution. + + Returns + ------- + F : ndarray + The CDF of the gamma distribution with parameters `a` and `b` + evaluated at `x`. + + Notes + ----- + The evaluation is carried out using the relation to the incomplete gamma + integral (regularized gamma function). + + Wrapper for the Cephes [1]_ routine `gdtr`. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + """) + +add_newdoc("scipy.special", "gdtrc", + r""" + gdtrc(a, b, x) + + Gamma distribution survival function. + + Integral from `x` to infinity of the gamma probability density function, + + .. math:: + + F = \int_x^\infty \frac{a^b}{\Gamma(b)} t^{b-1} e^{-at}\,dt, + + where :math:`\Gamma` is the gamma function. + + Parameters + ---------- + a : array_like + The rate parameter of the gamma distribution, sometimes denoted + :math:`\beta` (float). It is also the reciprocal of the scale + parameter :math:`\theta`. + b : array_like + The shape parameter of the gamma distribution, sometimes denoted + :math:`\alpha` (float). + x : array_like + The quantile (lower limit of integration; float). + + Returns + ------- + F : ndarray + The survival function of the gamma distribution with parameters `a` + and `b` evaluated at `x`. + + See Also + -------- + gdtr, gdtrix + + Notes + ----- + The evaluation is carried out using the relation to the incomplete gamma + integral (regularized gamma function). + + Wrapper for the Cephes [1]_ routine `gdtrc`. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + """) + +add_newdoc("scipy.special", "gdtria", + """ + gdtria(p, b, x, out=None) + + Inverse of `gdtr` vs a. + + Returns the inverse with respect to the parameter `a` of ``p = + gdtr(a, b, x)``, the cumulative distribution function of the gamma + distribution. + + Parameters + ---------- + p : array_like + Probability values. + b : array_like + `b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter + of the gamma distribution. + x : array_like + Nonnegative real values, from the domain of the gamma distribution. + out : ndarray, optional + If a fourth argument is given, it must be a numpy.ndarray whose size + matches the broadcast result of `a`, `b` and `x`. `out` is then the + array returned by the function. + + Returns + ------- + a : ndarray + Values of the `a` parameter such that `p = gdtr(a, b, x)`. `1/a` + is the "scale" parameter of the gamma distribution. + + See Also + -------- + gdtr : CDF of the gamma distribution. + gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`. + gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`. + + Notes + ----- + Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`. + + The cumulative distribution function `p` is computed using a routine by + DiDinato and Morris [2]_. Computation of `a` involves a search for a value + that produces the desired value of `p`. The search relies on the + monotonicity of `p` with `a`. + + References + ---------- + .. [1] Barry Brown, James Lovato, and Kathy Russell, + CDFLIB: Library of Fortran Routines for Cumulative Distribution + Functions, Inverses, and Other Parameters. + .. [2] DiDinato, A. R. and Morris, A. H., + Computation of the incomplete gamma function ratios and their + inverse. ACM Trans. Math. Softw. 12 (1986), 377-393. + + Examples + -------- + First evaluate `gdtr`. + + >>> from scipy.special import gdtr, gdtria + >>> p = gdtr(1.2, 3.4, 5.6) + >>> print(p) + 0.94378087442 + + Verify the inverse. + + >>> gdtria(p, 3.4, 5.6) + 1.2 + """) + +add_newdoc("scipy.special", "gdtrib", + """ + gdtrib(a, p, x, out=None) + + Inverse of `gdtr` vs b. + + Returns the inverse with respect to the parameter `b` of ``p = + gdtr(a, b, x)``, the cumulative distribution function of the gamma + distribution. + + Parameters + ---------- + a : array_like + `a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale" + parameter of the gamma distribution. + p : array_like + Probability values. + x : array_like + Nonnegative real values, from the domain of the gamma distribution. + out : ndarray, optional + If a fourth argument is given, it must be a numpy.ndarray whose size + matches the broadcast result of `a`, `b` and `x`. `out` is then the + array returned by the function. + + Returns + ------- + b : ndarray + Values of the `b` parameter such that `p = gdtr(a, b, x)`. `b` is + the "shape" parameter of the gamma distribution. + + See Also + -------- + gdtr : CDF of the gamma distribution. + gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`. + gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`. + + Notes + ----- + Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`. + + The cumulative distribution function `p` is computed using a routine by + DiDinato and Morris [2]_. Computation of `b` involves a search for a value + that produces the desired value of `p`. The search relies on the + monotonicity of `p` with `b`. + + References + ---------- + .. [1] Barry Brown, James Lovato, and Kathy Russell, + CDFLIB: Library of Fortran Routines for Cumulative Distribution + Functions, Inverses, and Other Parameters. + .. [2] DiDinato, A. R. and Morris, A. H., + Computation of the incomplete gamma function ratios and their + inverse. ACM Trans. Math. Softw. 12 (1986), 377-393. + + Examples + -------- + First evaluate `gdtr`. + + >>> from scipy.special import gdtr, gdtrib + >>> p = gdtr(1.2, 3.4, 5.6) + >>> print(p) + 0.94378087442 + + Verify the inverse. + + >>> gdtrib(1.2, p, 5.6) + 3.3999999999723882 + """) + +add_newdoc("scipy.special", "gdtrix", + """ + gdtrix(a, b, p, out=None) + + Inverse of `gdtr` vs x. + + Returns the inverse with respect to the parameter `x` of ``p = + gdtr(a, b, x)``, the cumulative distribution function of the gamma + distribution. This is also known as the p'th quantile of the + distribution. + + Parameters + ---------- + a : array_like + `a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale" + parameter of the gamma distribution. + b : array_like + `b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter + of the gamma distribution. + p : array_like + Probability values. + out : ndarray, optional + If a fourth argument is given, it must be a numpy.ndarray whose size + matches the broadcast result of `a`, `b` and `x`. `out` is then the + array returned by the function. + + Returns + ------- + x : ndarray + Values of the `x` parameter such that `p = gdtr(a, b, x)`. + + See Also + -------- + gdtr : CDF of the gamma distribution. + gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`. + gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`. + + Notes + ----- + Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`. + + The cumulative distribution function `p` is computed using a routine by + DiDinato and Morris [2]_. Computation of `x` involves a search for a value + that produces the desired value of `p`. The search relies on the + monotonicity of `p` with `x`. + + References + ---------- + .. [1] Barry Brown, James Lovato, and Kathy Russell, + CDFLIB: Library of Fortran Routines for Cumulative Distribution + Functions, Inverses, and Other Parameters. + .. [2] DiDinato, A. R. and Morris, A. H., + Computation of the incomplete gamma function ratios and their + inverse. ACM Trans. Math. Softw. 12 (1986), 377-393. + + Examples + -------- + First evaluate `gdtr`. + + >>> from scipy.special import gdtr, gdtrix + >>> p = gdtr(1.2, 3.4, 5.6) + >>> print(p) + 0.94378087442 + + Verify the inverse. + + >>> gdtrix(1.2, 3.4, p) + 5.5999999999999996 + """) + +add_newdoc("scipy.special", "hankel1", + r""" + hankel1(v, z) + + Hankel function of the first kind + + Parameters + ---------- + v : array_like + Order (float). + z : array_like + Argument (float or complex). + + Returns + ------- + out : Values of the Hankel function of the first kind. + + Notes + ----- + A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the + computation using the relation, + + .. math:: H^{(1)}_v(z) = \frac{2}{\imath\pi} \exp(-\imath \pi v/2) K_v(z \exp(-\imath\pi/2)) + + where :math:`K_v` is the modified Bessel function of the second kind. + For negative orders, the relation + + .. math:: H^{(1)}_{-v}(z) = H^{(1)}_v(z) \exp(\imath\pi v) + + is used. + + See also + -------- + hankel1e : this function with leading exponential behavior stripped off. + + References + ---------- + .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + """) + +add_newdoc("scipy.special", "hankel1e", + r""" + hankel1e(v, z) + + Exponentially scaled Hankel function of the first kind + + Defined as:: + + hankel1e(v, z) = hankel1(v, z) * exp(-1j * z) + + Parameters + ---------- + v : array_like + Order (float). + z : array_like + Argument (float or complex). + + Returns + ------- + out : Values of the exponentially scaled Hankel function. + + Notes + ----- + A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the + computation using the relation, + + .. math:: H^{(1)}_v(z) = \frac{2}{\imath\pi} \exp(-\imath \pi v/2) K_v(z \exp(-\imath\pi/2)) + + where :math:`K_v` is the modified Bessel function of the second kind. + For negative orders, the relation + + .. math:: H^{(1)}_{-v}(z) = H^{(1)}_v(z) \exp(\imath\pi v) + + is used. + + References + ---------- + .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + """) + +add_newdoc("scipy.special", "hankel2", + r""" + hankel2(v, z) + + Hankel function of the second kind + + Parameters + ---------- + v : array_like + Order (float). + z : array_like + Argument (float or complex). + + Returns + ------- + out : Values of the Hankel function of the second kind. + + Notes + ----- + A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the + computation using the relation, + + .. math:: H^{(2)}_v(z) = -\frac{2}{\imath\pi} \exp(\imath \pi v/2) K_v(z \exp(\imath\pi/2)) + + where :math:`K_v` is the modified Bessel function of the second kind. + For negative orders, the relation + + .. math:: H^{(2)}_{-v}(z) = H^{(2)}_v(z) \exp(-\imath\pi v) + + is used. + + See also + -------- + hankel2e : this function with leading exponential behavior stripped off. + + References + ---------- + .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + """) + +add_newdoc("scipy.special", "hankel2e", + r""" + hankel2e(v, z) + + Exponentially scaled Hankel function of the second kind + + Defined as:: + + hankel2e(v, z) = hankel2(v, z) * exp(1j * z) + + Parameters + ---------- + v : array_like + Order (float). + z : array_like + Argument (float or complex). + + Returns + ------- + out : Values of the exponentially scaled Hankel function of the second kind. + + Notes + ----- + A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the + computation using the relation, + + .. math:: H^{(2)}_v(z) = -\frac{2}{\imath\pi} \exp(\frac{\imath \pi v}{2}) K_v(z exp(\frac{\imath\pi}{2})) + + where :math:`K_v` is the modified Bessel function of the second kind. + For negative orders, the relation + + .. math:: H^{(2)}_{-v}(z) = H^{(2)}_v(z) \exp(-\imath\pi v) + + is used. + + References + ---------- + .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + + """) + +add_newdoc("scipy.special", "huber", + r""" + huber(delta, r) + + Huber loss function. + + .. math:: \text{huber}(\delta, r) = \begin{cases} \infty & \delta < 0 \\ \frac{1}{2}r^2 & 0 \le \delta, | r | \le \delta \\ \delta ( |r| - \frac{1}{2}\delta ) & \text{otherwise} \end{cases} + + Parameters + ---------- + delta : ndarray + Input array, indicating the quadratic vs. linear loss changepoint. + r : ndarray + Input array, possibly representing residuals. + + Returns + ------- + res : ndarray + The computed Huber loss function values. + + Notes + ----- + This function is convex in r. + + .. versionadded:: 0.15.0 + + """) + +add_newdoc("scipy.special", "hyp0f1", + r""" + hyp0f1(v, x) + + Confluent hypergeometric limit function 0F1. + + Parameters + ---------- + v, z : array_like + Input values. + + Returns + ------- + hyp0f1 : ndarray + The confluent hypergeometric limit function. + + Notes + ----- + This function is defined as: + + .. math:: _0F_1(v, z) = \sum_{k=0}^{\infty}\frac{z^k}{(v)_k k!}. + + It's also the limit as :math:`q \to \infty` of :math:`_1F_1(q; v; z/q)`, + and satisfies the differential equation :math:`f''(z) + vf'(z) = f(z)`. + """) + +add_newdoc("scipy.special", "hyp1f1", + """ + hyp1f1(a, b, x) + + Confluent hypergeometric function 1F1(a, b; x) + """) + +add_newdoc("scipy.special", "hyp1f2", + """ + hyp1f2(a, b, c, x) + + Hypergeometric function 1F2 and error estimate + + Returns + ------- + y + Value of the function + err + Error estimate + """) + +add_newdoc("scipy.special", "hyp2f0", + """ + hyp2f0(a, b, x, type) + + Hypergeometric function 2F0 in y and an error estimate + + The parameter `type` determines a convergence factor and can be + either 1 or 2. + + Returns + ------- + y + Value of the function + err + Error estimate + """) + +add_newdoc("scipy.special", "hyp2f1", + r""" + hyp2f1(a, b, c, z) + + Gauss hypergeometric function 2F1(a, b; c; z) + + Parameters + ---------- + a, b, c : array_like + Arguments, should be real-valued. + z : array_like + Argument, real or complex. + + Returns + ------- + hyp2f1 : scalar or ndarray + The values of the gaussian hypergeometric function. + + See also + -------- + hyp0f1 : confluent hypergeometric limit function. + hyp1f1 : Kummer's (confluent hypergeometric) function. + + Notes + ----- + This function is defined for :math:`|z| < 1` as + + .. math:: + + \mathrm{hyp2f1}(a, b, c, z) = \sum_{n=0}^\infty + \frac{(a)_n (b)_n}{(c)_n}\frac{z^n}{n!}, + + and defined on the rest of the complex z-plane by analytic continuation. + Here :math:`(\cdot)_n` is the Pochhammer symbol; see `poch`. When + :math:`n` is an integer the result is a polynomial of degree :math:`n`. + + The implementation for complex values of ``z`` is described in [1]_. + + References + ---------- + .. [1] S. Zhang and J.M. Jin, "Computation of Special Functions", Wiley 1996 + .. [2] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + .. [3] NIST Digital Library of Mathematical Functions + https://dlmf.nist.gov/ + + """) + +add_newdoc("scipy.special", "hyp3f0", + """ + hyp3f0(a, b, c, x) + + Hypergeometric function 3F0 in y and an error estimate + + Returns + ------- + y + Value of the function + err + Error estimate + """) + +add_newdoc("scipy.special", "hyperu", + """ + hyperu(a, b, x) + + Confluent hypergeometric function U(a, b, x) of the second kind + """) + +add_newdoc("scipy.special", "i0", + r""" + i0(x) + + Modified Bessel function of order 0. + + Defined as, + + .. math:: + I_0(x) = \sum_{k=0}^\infty \frac{(x^2/4)^k}{(k!)^2} = J_0(\imath x), + + where :math:`J_0` is the Bessel function of the first kind of order 0. + + Parameters + ---------- + x : array_like + Argument (float) + + Returns + ------- + I : ndarray + Value of the modified Bessel function of order 0 at `x`. + + Notes + ----- + The range is partitioned into the two intervals [0, 8] and (8, infinity). + Chebyshev polynomial expansions are employed in each interval. + + This function is a wrapper for the Cephes [1]_ routine `i0`. + + See also + -------- + iv + i0e + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + """) + +add_newdoc("scipy.special", "i0e", + """ + i0e(x) + + Exponentially scaled modified Bessel function of order 0. + + Defined as:: + + i0e(x) = exp(-abs(x)) * i0(x). + + Parameters + ---------- + x : array_like + Argument (float) + + Returns + ------- + I : ndarray + Value of the exponentially scaled modified Bessel function of order 0 + at `x`. + + Notes + ----- + The range is partitioned into the two intervals [0, 8] and (8, infinity). + Chebyshev polynomial expansions are employed in each interval. The + polynomial expansions used are the same as those in `i0`, but + they are not multiplied by the dominant exponential factor. + + This function is a wrapper for the Cephes [1]_ routine `i0e`. + + See also + -------- + iv + i0 + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + """) + +add_newdoc("scipy.special", "i1", + r""" + i1(x) + + Modified Bessel function of order 1. + + Defined as, + + .. math:: + I_1(x) = \frac{1}{2}x \sum_{k=0}^\infty \frac{(x^2/4)^k}{k! (k + 1)!} + = -\imath J_1(\imath x), + + where :math:`J_1` is the Bessel function of the first kind of order 1. + + Parameters + ---------- + x : array_like + Argument (float) + + Returns + ------- + I : ndarray + Value of the modified Bessel function of order 1 at `x`. + + Notes + ----- + The range is partitioned into the two intervals [0, 8] and (8, infinity). + Chebyshev polynomial expansions are employed in each interval. + + This function is a wrapper for the Cephes [1]_ routine `i1`. + + See also + -------- + iv + i1e + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + """) + +add_newdoc("scipy.special", "i1e", + """ + i1e(x) + + Exponentially scaled modified Bessel function of order 1. + + Defined as:: + + i1e(x) = exp(-abs(x)) * i1(x) + + Parameters + ---------- + x : array_like + Argument (float) + + Returns + ------- + I : ndarray + Value of the exponentially scaled modified Bessel function of order 1 + at `x`. + + Notes + ----- + The range is partitioned into the two intervals [0, 8] and (8, infinity). + Chebyshev polynomial expansions are employed in each interval. The + polynomial expansions used are the same as those in `i1`, but + they are not multiplied by the dominant exponential factor. + + This function is a wrapper for the Cephes [1]_ routine `i1e`. + + See also + -------- + iv + i1 + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + """) + +add_newdoc("scipy.special", "_igam_fac", + """ + Internal function, do not use. + """) + +add_newdoc("scipy.special", "it2i0k0", + """ + it2i0k0(x) + + Integrals related to modified Bessel functions of order 0 + + Returns + ------- + ii0 + ``integral((i0(t)-1)/t, t=0..x)`` + ik0 + ``integral(k0(t)/t, t=x..inf)`` + """) + +add_newdoc("scipy.special", "it2j0y0", + """ + it2j0y0(x) + + Integrals related to Bessel functions of order 0 + + Returns + ------- + ij0 + ``integral((1-j0(t))/t, t=0..x)`` + iy0 + ``integral(y0(t)/t, t=x..inf)`` + """) + +add_newdoc("scipy.special", "it2struve0", + r""" + it2struve0(x) + + Integral related to the Struve function of order 0. + + Returns the integral, + + .. math:: + \int_x^\infty \frac{H_0(t)}{t}\,dt + + where :math:`H_0` is the Struve function of order 0. + + Parameters + ---------- + x : array_like + Lower limit of integration. + + Returns + ------- + I : ndarray + The value of the integral. + + See also + -------- + struve + + Notes + ----- + Wrapper for a Fortran routine created by Shanjie Zhang and Jianming + Jin [1]_. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + """) + +add_newdoc("scipy.special", "itairy", + """ + itairy(x) + + Integrals of Airy functions + + Calculates the integrals of Airy functions from 0 to `x`. + + Parameters + ---------- + + x: array_like + Upper limit of integration (float). + + Returns + ------- + Apt + Integral of Ai(t) from 0 to x. + Bpt + Integral of Bi(t) from 0 to x. + Ant + Integral of Ai(-t) from 0 to x. + Bnt + Integral of Bi(-t) from 0 to x. + + Notes + ----- + + Wrapper for a Fortran routine created by Shanjie Zhang and Jianming + Jin [1]_. + + References + ---------- + + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + """) + +add_newdoc("scipy.special", "iti0k0", + """ + iti0k0(x) + + Integrals of modified Bessel functions of order 0 + + Returns simple integrals from 0 to `x` of the zeroth order modified + Bessel functions `i0` and `k0`. + + Returns + ------- + ii0, ik0 + """) + +add_newdoc("scipy.special", "itj0y0", + """ + itj0y0(x) + + Integrals of Bessel functions of order 0 + + Returns simple integrals from 0 to `x` of the zeroth order Bessel + functions `j0` and `y0`. + + Returns + ------- + ij0, iy0 + """) + +add_newdoc("scipy.special", "itmodstruve0", + r""" + itmodstruve0(x) + + Integral of the modified Struve function of order 0. + + .. math:: + I = \int_0^x L_0(t)\,dt + + Parameters + ---------- + x : array_like + Upper limit of integration (float). + + Returns + ------- + I : ndarray + The integral of :math:`L_0` from 0 to `x`. + + Notes + ----- + Wrapper for a Fortran routine created by Shanjie Zhang and Jianming + Jin [1]_. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + + """) + +add_newdoc("scipy.special", "itstruve0", + r""" + itstruve0(x) + + Integral of the Struve function of order 0. + + .. math:: + I = \int_0^x H_0(t)\,dt + + Parameters + ---------- + x : array_like + Upper limit of integration (float). + + Returns + ------- + I : ndarray + The integral of :math:`H_0` from 0 to `x`. + + See also + -------- + struve + + Notes + ----- + Wrapper for a Fortran routine created by Shanjie Zhang and Jianming + Jin [1]_. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + + """) + +add_newdoc("scipy.special", "iv", + r""" + iv(v, z) + + Modified Bessel function of the first kind of real order. + + Parameters + ---------- + v : array_like + Order. If `z` is of real type and negative, `v` must be integer + valued. + z : array_like of float or complex + Argument. + + Returns + ------- + out : ndarray + Values of the modified Bessel function. + + Notes + ----- + For real `z` and :math:`v \in [-50, 50]`, the evaluation is carried out + using Temme's method [1]_. For larger orders, uniform asymptotic + expansions are applied. + + For complex `z` and positive `v`, the AMOS [2]_ `zbesi` routine is + called. It uses a power series for small `z`, the asymptotic expansion + for large `abs(z)`, the Miller algorithm normalized by the Wronskian + and a Neumann series for intermediate magnitudes, and the uniform + asymptotic expansions for :math:`I_v(z)` and :math:`J_v(z)` for large + orders. Backward recurrence is used to generate sequences or reduce + orders when necessary. + + The calculations above are done in the right half plane and continued + into the left half plane by the formula, + + .. math:: I_v(z \exp(\pm\imath\pi)) = \exp(\pm\pi v) I_v(z) + + (valid when the real part of `z` is positive). For negative `v`, the + formula + + .. math:: I_{-v}(z) = I_v(z) + \frac{2}{\pi} \sin(\pi v) K_v(z) + + is used, where :math:`K_v(z)` is the modified Bessel function of the + second kind, evaluated using the AMOS routine `zbesk`. + + See also + -------- + kve : This function with leading exponential behavior stripped off. + + References + ---------- + .. [1] Temme, Journal of Computational Physics, vol 21, 343 (1976) + .. [2] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + """) + +add_newdoc("scipy.special", "ive", + r""" + ive(v, z) + + Exponentially scaled modified Bessel function of the first kind + + Defined as:: + + ive(v, z) = iv(v, z) * exp(-abs(z.real)) + + Parameters + ---------- + v : array_like of float + Order. + z : array_like of float or complex + Argument. + + Returns + ------- + out : ndarray + Values of the exponentially scaled modified Bessel function. + + Notes + ----- + For positive `v`, the AMOS [1]_ `zbesi` routine is called. It uses a + power series for small `z`, the asymptotic expansion for large + `abs(z)`, the Miller algorithm normalized by the Wronskian and a + Neumann series for intermediate magnitudes, and the uniform asymptotic + expansions for :math:`I_v(z)` and :math:`J_v(z)` for large orders. + Backward recurrence is used to generate sequences or reduce orders when + necessary. + + The calculations above are done in the right half plane and continued + into the left half plane by the formula, + + .. math:: I_v(z \exp(\pm\imath\pi)) = \exp(\pm\pi v) I_v(z) + + (valid when the real part of `z` is positive). For negative `v`, the + formula + + .. math:: I_{-v}(z) = I_v(z) + \frac{2}{\pi} \sin(\pi v) K_v(z) + + is used, where :math:`K_v(z)` is the modified Bessel function of the + second kind, evaluated using the AMOS routine `zbesk`. + + References + ---------- + .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + """) + +add_newdoc("scipy.special", "j0", + r""" + j0(x) + + Bessel function of the first kind of order 0. + + Parameters + ---------- + x : array_like + Argument (float). + + Returns + ------- + J : ndarray + Value of the Bessel function of the first kind of order 0 at `x`. + + Notes + ----- + The domain is divided into the intervals [0, 5] and (5, infinity). In the + first interval the following rational approximation is used: + + .. math:: + + J_0(x) \approx (w - r_1^2)(w - r_2^2) \frac{P_3(w)}{Q_8(w)}, + + where :math:`w = x^2` and :math:`r_1`, :math:`r_2` are the zeros of + :math:`J_0`, and :math:`P_3` and :math:`Q_8` are polynomials of degrees 3 + and 8, respectively. + + In the second interval, the Hankel asymptotic expansion is employed with + two rational functions of degree 6/6 and 7/7. + + This function is a wrapper for the Cephes [1]_ routine `j0`. + It should not be confused with the spherical Bessel functions (see + `spherical_jn`). + + See also + -------- + jv : Bessel function of real order and complex argument. + spherical_jn : spherical Bessel functions. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + """) + +add_newdoc("scipy.special", "j1", + """ + j1(x) + + Bessel function of the first kind of order 1. + + Parameters + ---------- + x : array_like + Argument (float). + + Returns + ------- + J : ndarray + Value of the Bessel function of the first kind of order 1 at `x`. + + Notes + ----- + The domain is divided into the intervals [0, 8] and (8, infinity). In the + first interval a 24 term Chebyshev expansion is used. In the second, the + asymptotic trigonometric representation is employed using two rational + functions of degree 5/5. + + This function is a wrapper for the Cephes [1]_ routine `j1`. + It should not be confused with the spherical Bessel functions (see + `spherical_jn`). + + See also + -------- + jv + spherical_jn : spherical Bessel functions. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + """) + +add_newdoc("scipy.special", "jn", + """ + jn(n, x) + + Bessel function of the first kind of integer order and real argument. + + Notes + ----- + `jn` is an alias of `jv`. + Not to be confused with the spherical Bessel functions (see `spherical_jn`). + + See also + -------- + jv + spherical_jn : spherical Bessel functions. + + """) + +add_newdoc("scipy.special", "jv", + r""" + jv(v, z) + + Bessel function of the first kind of real order and complex argument. + + Parameters + ---------- + v : array_like + Order (float). + z : array_like + Argument (float or complex). + + Returns + ------- + J : ndarray + Value of the Bessel function, :math:`J_v(z)`. + + Notes + ----- + For positive `v` values, the computation is carried out using the AMOS + [1]_ `zbesj` routine, which exploits the connection to the modified + Bessel function :math:`I_v`, + + .. math:: + J_v(z) = \exp(v\pi\imath/2) I_v(-\imath z)\qquad (\Im z > 0) + + J_v(z) = \exp(-v\pi\imath/2) I_v(\imath z)\qquad (\Im z < 0) + + For negative `v` values the formula, + + .. math:: J_{-v}(z) = J_v(z) \cos(\pi v) - Y_v(z) \sin(\pi v) + + is used, where :math:`Y_v(z)` is the Bessel function of the second + kind, computed using the AMOS routine `zbesy`. Note that the second + term is exactly zero for integer `v`; to improve accuracy the second + term is explicitly omitted for `v` values such that `v = floor(v)`. + + Not to be confused with the spherical Bessel functions (see `spherical_jn`). + + See also + -------- + jve : :math:`J_v` with leading exponential behavior stripped off. + spherical_jn : spherical Bessel functions. + + References + ---------- + .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + """) + +add_newdoc("scipy.special", "jve", + r""" + jve(v, z) + + Exponentially scaled Bessel function of order `v`. + + Defined as:: + + jve(v, z) = jv(v, z) * exp(-abs(z.imag)) + + Parameters + ---------- + v : array_like + Order (float). + z : array_like + Argument (float or complex). + + Returns + ------- + J : ndarray + Value of the exponentially scaled Bessel function. + + Notes + ----- + For positive `v` values, the computation is carried out using the AMOS + [1]_ `zbesj` routine, which exploits the connection to the modified + Bessel function :math:`I_v`, + + .. math:: + J_v(z) = \exp(v\pi\imath/2) I_v(-\imath z)\qquad (\Im z > 0) + + J_v(z) = \exp(-v\pi\imath/2) I_v(\imath z)\qquad (\Im z < 0) + + For negative `v` values the formula, + + .. math:: J_{-v}(z) = J_v(z) \cos(\pi v) - Y_v(z) \sin(\pi v) + + is used, where :math:`Y_v(z)` is the Bessel function of the second + kind, computed using the AMOS routine `zbesy`. Note that the second + term is exactly zero for integer `v`; to improve accuracy the second + term is explicitly omitted for `v` values such that `v = floor(v)`. + + References + ---------- + .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + """) + +add_newdoc("scipy.special", "k0", + r""" + k0(x) + + Modified Bessel function of the second kind of order 0, :math:`K_0`. + + This function is also sometimes referred to as the modified Bessel + function of the third kind of order 0. + + Parameters + ---------- + x : array_like + Argument (float). + + Returns + ------- + K : ndarray + Value of the modified Bessel function :math:`K_0` at `x`. + + Notes + ----- + The range is partitioned into the two intervals [0, 2] and (2, infinity). + Chebyshev polynomial expansions are employed in each interval. + + This function is a wrapper for the Cephes [1]_ routine `k0`. + + See also + -------- + kv + k0e + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + """) + +add_newdoc("scipy.special", "k0e", + """ + k0e(x) + + Exponentially scaled modified Bessel function K of order 0 + + Defined as:: + + k0e(x) = exp(x) * k0(x). + + Parameters + ---------- + x : array_like + Argument (float) + + Returns + ------- + K : ndarray + Value of the exponentially scaled modified Bessel function K of order + 0 at `x`. + + Notes + ----- + The range is partitioned into the two intervals [0, 2] and (2, infinity). + Chebyshev polynomial expansions are employed in each interval. + + This function is a wrapper for the Cephes [1]_ routine `k0e`. + + See also + -------- + kv + k0 + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + """) + +add_newdoc("scipy.special", "k1", + """ + k1(x) + + Modified Bessel function of the second kind of order 1, :math:`K_1(x)`. + + Parameters + ---------- + x : array_like + Argument (float) + + Returns + ------- + K : ndarray + Value of the modified Bessel function K of order 1 at `x`. + + Notes + ----- + The range is partitioned into the two intervals [0, 2] and (2, infinity). + Chebyshev polynomial expansions are employed in each interval. + + This function is a wrapper for the Cephes [1]_ routine `k1`. + + See also + -------- + kv + k1e + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + """) + +add_newdoc("scipy.special", "k1e", + """ + k1e(x) + + Exponentially scaled modified Bessel function K of order 1 + + Defined as:: + + k1e(x) = exp(x) * k1(x) + + Parameters + ---------- + x : array_like + Argument (float) + + Returns + ------- + K : ndarray + Value of the exponentially scaled modified Bessel function K of order + 1 at `x`. + + Notes + ----- + The range is partitioned into the two intervals [0, 2] and (2, infinity). + Chebyshev polynomial expansions are employed in each interval. + + This function is a wrapper for the Cephes [1]_ routine `k1e`. + + See also + -------- + kv + k1 + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + """) + +add_newdoc("scipy.special", "kei", + """ + kei(x) + + Kelvin function ker + """) + +add_newdoc("scipy.special", "keip", + """ + keip(x) + + Derivative of the Kelvin function kei + """) + +add_newdoc("scipy.special", "kelvin", + """ + kelvin(x) + + Kelvin functions as complex numbers + + Returns + ------- + Be, Ke, Bep, Kep + The tuple (Be, Ke, Bep, Kep) contains complex numbers + representing the real and imaginary Kelvin functions and their + derivatives evaluated at `x`. For example, kelvin(x)[0].real = + ber x and kelvin(x)[0].imag = bei x with similar relationships + for ker and kei. + """) + +add_newdoc("scipy.special", "ker", + """ + ker(x) + + Kelvin function ker + """) + +add_newdoc("scipy.special", "kerp", + """ + kerp(x) + + Derivative of the Kelvin function ker + """) + +add_newdoc("scipy.special", "kl_div", + r""" + kl_div(x, y) + + Elementwise function for computing Kullback-Leibler divergence. + + .. math:: \mathrm{kl\_div}(x, y) = \begin{cases} x \log(x / y) - x + y & x > 0, y > 0 \\ y & x = 0, y \ge 0 \\ \infty & \text{otherwise} \end{cases} + + Parameters + ---------- + x : ndarray + First input array. + y : ndarray + Second input array. + + Returns + ------- + res : ndarray + Output array. + + See Also + -------- + entr, rel_entr + + Notes + ----- + This function is non-negative and is jointly convex in `x` and `y`. + + .. versionadded:: 0.15.0 + + """) + +add_newdoc("scipy.special", "kn", + r""" + kn(n, x) + + Modified Bessel function of the second kind of integer order `n` + + Returns the modified Bessel function of the second kind for integer order + `n` at real `z`. + + These are also sometimes called functions of the third kind, Basset + functions, or Macdonald functions. + + Parameters + ---------- + n : array_like of int + Order of Bessel functions (floats will truncate with a warning) + z : array_like of float + Argument at which to evaluate the Bessel functions + + Returns + ------- + out : ndarray + The results + + Notes + ----- + Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the + algorithm used, see [2]_ and the references therein. + + See Also + -------- + kv : Same function, but accepts real order and complex argument + kvp : Derivative of this function + + References + ---------- + .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + .. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel + functions of a complex argument and nonnegative order", ACM + TOMS Vol. 12 Issue 3, Sept. 1986, p. 265 + + Examples + -------- + Plot the function of several orders for real input: + + >>> from scipy.special import kn + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(0, 5, 1000) + >>> for N in range(6): + ... plt.plot(x, kn(N, x), label='$K_{}(x)$'.format(N)) + >>> plt.ylim(0, 10) + >>> plt.legend() + >>> plt.title(r'Modified Bessel function of the second kind $K_n(x)$') + >>> plt.show() + + Calculate for a single value at multiple orders: + + >>> kn([4, 5, 6], 1) + array([ 44.23241585, 360.9605896 , 3653.83831186]) + """) + +add_newdoc("scipy.special", "kolmogi", + """ + kolmogi(p) + + Inverse Survival Function of Kolmogorov distribution + + It is the inverse function to `kolmogorov`. + Returns y such that ``kolmogorov(y) == p``. + + Parameters + ---------- + p : float array_like + Probability + + Returns + ------- + float + The value(s) of kolmogi(p) + + Notes + ----- + `kolmogorov` is used by `stats.kstest` in the application of the + Kolmogorov-Smirnov Goodness of Fit test. For historial reasons this + function is exposed in `scpy.special`, but the recommended way to achieve + the most accurate CDF/SF/PDF/PPF/ISF computations is to use the + `stats.kstwobign` distrubution. + + See Also + -------- + kolmogorov : The Survival Function for the distribution + scipy.stats.kstwobign : Provides the functionality as a continuous distribution + smirnov, smirnovi : Functions for the one-sided distribution + + Examples + -------- + >>> from scipy.special import kolmogi + >>> kolmogi([0, 0.1, 0.25, 0.5, 0.75, 0.9, 1.0]) + array([ inf, 1.22384787, 1.01918472, 0.82757356, 0.67644769, + 0.57117327, 0. ]) + + """) + +add_newdoc("scipy.special", "kolmogorov", + r""" + kolmogorov(y) + + Complementary cumulative distribution (Survival Function) function of + Kolmogorov distribution. + + Returns the complementary cumulative distribution function of + Kolmogorov's limiting distribution (``D_n*\sqrt(n)`` as n goes to infinity) + of a two-sided test for equality between an empirical and a theoretical + distribution. It is equal to the (limit as n->infinity of the) + probability that ``sqrt(n) * max absolute deviation > y``. + + Parameters + ---------- + y : float array_like + Absolute deviation between the Empirical CDF (ECDF) and the target CDF, + multiplied by sqrt(n). + + Returns + ------- + float + The value(s) of kolmogorov(y) + + Notes + ----- + `kolmogorov` is used by `stats.kstest` in the application of the + Kolmogorov-Smirnov Goodness of Fit test. For historial reasons this + function is exposed in `scpy.special`, but the recommended way to achieve + the most accurate CDF/SF/PDF/PPF/ISF computations is to use the + `stats.kstwobign` distrubution. + + See Also + -------- + kolmogi : The Inverse Survival Function for the distribution + scipy.stats.kstwobign : Provides the functionality as a continuous distribution + smirnov, smirnovi : Functions for the one-sided distribution + + Examples + -------- + Show the probability of a gap at least as big as 0, 0.5 and 1.0. + + >>> from scipy.special import kolmogorov + >>> from scipy.stats import kstwobign + >>> kolmogorov([0, 0.5, 1.0]) + array([ 1. , 0.96394524, 0.26999967]) + + Compare a sample of size 1000 drawn from a Laplace(0, 1) distribution against + the target distribution, a Normal(0, 1) distribution. + + >>> from scipy.stats import norm, laplace + >>> n = 1000 + >>> np.random.seed(seed=233423) + >>> lap01 = laplace(0, 1) + >>> x = np.sort(lap01.rvs(n)) + >>> np.mean(x), np.std(x) + (-0.083073685397609842, 1.3676426568399822) + + Construct the Empirical CDF and the K-S statistic Dn. + + >>> target = norm(0,1) # Normal mean 0, stddev 1 + >>> cdfs = target.cdf(x) + >>> ecdfs = np.arange(n+1, dtype=float)/n + >>> gaps = np.column_stack([cdfs - ecdfs[:n], ecdfs[1:] - cdfs]) + >>> Dn = np.max(gaps) + >>> Kn = np.sqrt(n) * Dn + >>> print('Dn=%f, sqrt(n)*Dn=%f' % (Dn, Kn)) + Dn=0.058286, sqrt(n)*Dn=1.843153 + >>> print(chr(10).join(['For a sample of size n drawn from a N(0, 1) distribution:', + ... ' the approximate Kolmogorov probability that sqrt(n)*Dn>=%f is %f' % (Kn, kolmogorov(Kn)), + ... ' the approximate Kolmogorov probability that sqrt(n)*Dn<=%f is %f' % (Kn, kstwobign.cdf(Kn))])) + For a sample of size n drawn from a N(0, 1) distribution: + the approximate Kolmogorov probability that sqrt(n)*Dn>=1.843153 is 0.002240 + the approximate Kolmogorov probability that sqrt(n)*Dn<=1.843153 is 0.997760 + + Plot the Empirical CDF against the target N(0, 1) CDF. + + >>> import matplotlib.pyplot as plt + >>> plt.step(np.concatenate([[-3], x]), ecdfs, where='post', label='Empirical CDF') + >>> x3 = np.linspace(-3, 3, 100) + >>> plt.plot(x3, target.cdf(x3), label='CDF for N(0, 1)') + >>> plt.ylim([0, 1]); plt.grid(True); plt.legend(); + >>> # Add vertical lines marking Dn+ and Dn- + >>> iminus, iplus = np.argmax(gaps, axis=0) + >>> plt.vlines([x[iminus]], ecdfs[iminus], cdfs[iminus], color='r', linestyle='dashed', lw=4) + >>> plt.vlines([x[iplus]], cdfs[iplus], ecdfs[iplus+1], color='r', linestyle='dashed', lw=4) + >>> plt.show() + """) + +add_newdoc("scipy.special", "_kolmogc", + r""" + Internal function, do not use. + """) + +add_newdoc("scipy.special", "_kolmogci", + r""" + Internal function, do not use. + """) + +add_newdoc("scipy.special", "_kolmogp", + r""" + Internal function, do not use. + """) + +add_newdoc("scipy.special", "kv", + r""" + kv(v, z) + + Modified Bessel function of the second kind of real order `v` + + Returns the modified Bessel function of the second kind for real order + `v` at complex `z`. + + These are also sometimes called functions of the third kind, Basset + functions, or Macdonald functions. They are defined as those solutions + of the modified Bessel equation for which, + + .. math:: + K_v(x) \sim \sqrt{\pi/(2x)} \exp(-x) + + as :math:`x \to \infty` [3]_. + + Parameters + ---------- + v : array_like of float + Order of Bessel functions + z : array_like of complex + Argument at which to evaluate the Bessel functions + + Returns + ------- + out : ndarray + The results. Note that input must be of complex type to get complex + output, e.g. ``kv(3, -2+0j)`` instead of ``kv(3, -2)``. + + Notes + ----- + Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the + algorithm used, see [2]_ and the references therein. + + See Also + -------- + kve : This function with leading exponential behavior stripped off. + kvp : Derivative of this function + + References + ---------- + .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + .. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel + functions of a complex argument and nonnegative order", ACM + TOMS Vol. 12 Issue 3, Sept. 1986, p. 265 + .. [3] NIST Digital Library of Mathematical Functions, + Eq. 10.25.E3. https://dlmf.nist.gov/10.25.E3 + + Examples + -------- + Plot the function of several orders for real input: + + >>> from scipy.special import kv + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(0, 5, 1000) + >>> for N in np.linspace(0, 6, 5): + ... plt.plot(x, kv(N, x), label='$K_{{{}}}(x)$'.format(N)) + >>> plt.ylim(0, 10) + >>> plt.legend() + >>> plt.title(r'Modified Bessel function of the second kind $K_\nu(x)$') + >>> plt.show() + + Calculate for a single value at multiple orders: + + >>> kv([4, 4.5, 5], 1+2j) + array([ 0.1992+2.3892j, 2.3493+3.6j , 7.2827+3.8104j]) + + """) + +add_newdoc("scipy.special", "kve", + r""" + kve(v, z) + + Exponentially scaled modified Bessel function of the second kind. + + Returns the exponentially scaled, modified Bessel function of the + second kind (sometimes called the third kind) for real order `v` at + complex `z`:: + + kve(v, z) = kv(v, z) * exp(z) + + Parameters + ---------- + v : array_like of float + Order of Bessel functions + z : array_like of complex + Argument at which to evaluate the Bessel functions + + Returns + ------- + out : ndarray + The exponentially scaled modified Bessel function of the second kind. + + Notes + ----- + Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the + algorithm used, see [2]_ and the references therein. + + References + ---------- + .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + .. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel + functions of a complex argument and nonnegative order", ACM + TOMS Vol. 12 Issue 3, Sept. 1986, p. 265 + """) + +add_newdoc("scipy.special", "_lanczos_sum_expg_scaled", + """ + Internal function, do not use. + """) + +add_newdoc("scipy.special", "_lgam1p", + """ + Internal function, do not use. + """) + +add_newdoc("scipy.special", "log1p", + """ + log1p(x) + + Calculates log(1+x) for use when `x` is near zero + """) + +add_newdoc("scipy.special", "_log1pmx", + """ + Internal function, do not use. + """) + +add_newdoc('scipy.special', 'logit', + """ + logit(x) + + Logit ufunc for ndarrays. + + The logit function is defined as logit(p) = log(p/(1-p)). + Note that logit(0) = -inf, logit(1) = inf, and logit(p) + for p<0 or p>1 yields nan. + + Parameters + ---------- + x : ndarray + The ndarray to apply logit to element-wise. + + Returns + ------- + out : ndarray + An ndarray of the same shape as x. Its entries + are logit of the corresponding entry of x. + + See Also + -------- + expit + + Notes + ----- + As a ufunc logit takes a number of optional + keyword arguments. For more information + see `ufuncs <https://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_ + + .. versionadded:: 0.10.0 + + Examples + -------- + >>> from scipy.special import logit, expit + + >>> logit([0, 0.25, 0.5, 0.75, 1]) + array([ -inf, -1.09861229, 0. , 1.09861229, inf]) + + `expit` is the inverse of `logit`: + + >>> expit(logit([0.1, 0.75, 0.999])) + array([ 0.1 , 0.75 , 0.999]) + + Plot logit(x) for x in [0, 1]: + + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(0, 1, 501) + >>> y = logit(x) + >>> plt.plot(x, y) + >>> plt.grid() + >>> plt.ylim(-6, 6) + >>> plt.xlabel('x') + >>> plt.title('logit(x)') + >>> plt.show() + + """) + +add_newdoc("scipy.special", "lpmv", + r""" + lpmv(m, v, x) + + Associated Legendre function of integer order and real degree. + + Defined as + + .. math:: + + P_v^m = (-1)^m (1 - x^2)^{m/2} \frac{d^m}{dx^m} P_v(x) + + where + + .. math:: + + P_v = \sum_{k = 0}^\infty \frac{(-v)_k (v + 1)_k}{(k!)^2} + \left(\frac{1 - x}{2}\right)^k + + is the Legendre function of the first kind. Here :math:`(\cdot)_k` + is the Pochhammer symbol; see `poch`. + + Parameters + ---------- + m : array_like + Order (int or float). If passed a float not equal to an + integer the function returns NaN. + v : array_like + Degree (float). + x : array_like + Argument (float). Must have ``|x| <= 1``. + + Returns + ------- + pmv : ndarray + Value of the associated Legendre function. + + See Also + -------- + lpmn : Compute the associated Legendre function for all orders + ``0, ..., m`` and degrees ``0, ..., n``. + clpmn : Compute the associated Legendre function at complex + arguments. + + Notes + ----- + Note that this implementation includes the Condon-Shortley phase. + + References + ---------- + .. [1] Zhang, Jin, "Computation of Special Functions", John Wiley + and Sons, Inc, 1996. + + """) + +add_newdoc("scipy.special", "mathieu_a", + """ + mathieu_a(m, q) + + Characteristic value of even Mathieu functions + + Returns the characteristic value for the even solution, + ``ce_m(z, q)``, of Mathieu's equation. + """) + +add_newdoc("scipy.special", "mathieu_b", + """ + mathieu_b(m, q) + + Characteristic value of odd Mathieu functions + + Returns the characteristic value for the odd solution, + ``se_m(z, q)``, of Mathieu's equation. + """) + +add_newdoc("scipy.special", "mathieu_cem", + """ + mathieu_cem(m, q, x) + + Even Mathieu function and its derivative + + Returns the even Mathieu function, ``ce_m(x, q)``, of order `m` and + parameter `q` evaluated at `x` (given in degrees). Also returns the + derivative with respect to `x` of ce_m(x, q) + + Parameters + ---------- + m + Order of the function + q + Parameter of the function + x + Argument of the function, *given in degrees, not radians* + + Returns + ------- + y + Value of the function + yp + Value of the derivative vs x + """) + +add_newdoc("scipy.special", "mathieu_modcem1", + """ + mathieu_modcem1(m, q, x) + + Even modified Mathieu function of the first kind and its derivative + + Evaluates the even modified Mathieu function of the first kind, + ``Mc1m(x, q)``, and its derivative at `x` for order `m` and parameter + `q`. + + Returns + ------- + y + Value of the function + yp + Value of the derivative vs x + """) + +add_newdoc("scipy.special", "mathieu_modcem2", + """ + mathieu_modcem2(m, q, x) + + Even modified Mathieu function of the second kind and its derivative + + Evaluates the even modified Mathieu function of the second kind, + Mc2m(x, q), and its derivative at `x` (given in degrees) for order `m` + and parameter `q`. + + Returns + ------- + y + Value of the function + yp + Value of the derivative vs x + """) + +add_newdoc("scipy.special", "mathieu_modsem1", + """ + mathieu_modsem1(m, q, x) + + Odd modified Mathieu function of the first kind and its derivative + + Evaluates the odd modified Mathieu function of the first kind, + Ms1m(x, q), and its derivative at `x` (given in degrees) for order `m` + and parameter `q`. + + Returns + ------- + y + Value of the function + yp + Value of the derivative vs x + """) + +add_newdoc("scipy.special", "mathieu_modsem2", + """ + mathieu_modsem2(m, q, x) + + Odd modified Mathieu function of the second kind and its derivative + + Evaluates the odd modified Mathieu function of the second kind, + Ms2m(x, q), and its derivative at `x` (given in degrees) for order `m` + and parameter q. + + Returns + ------- + y + Value of the function + yp + Value of the derivative vs x + """) + +add_newdoc("scipy.special", "mathieu_sem", + """ + mathieu_sem(m, q, x) + + Odd Mathieu function and its derivative + + Returns the odd Mathieu function, se_m(x, q), of order `m` and + parameter `q` evaluated at `x` (given in degrees). Also returns the + derivative with respect to `x` of se_m(x, q). + + Parameters + ---------- + m + Order of the function + q + Parameter of the function + x + Argument of the function, *given in degrees, not radians*. + + Returns + ------- + y + Value of the function + yp + Value of the derivative vs x + """) + +add_newdoc("scipy.special", "modfresnelm", + """ + modfresnelm(x) + + Modified Fresnel negative integrals + + Returns + ------- + fm + Integral ``F_-(x)``: ``integral(exp(-1j*t*t), t=x..inf)`` + km + Integral ``K_-(x)``: ``1/sqrt(pi)*exp(1j*(x*x+pi/4))*fp`` + """) + +add_newdoc("scipy.special", "modfresnelp", + """ + modfresnelp(x) + + Modified Fresnel positive integrals + + Returns + ------- + fp + Integral ``F_+(x)``: ``integral(exp(1j*t*t), t=x..inf)`` + kp + Integral ``K_+(x)``: ``1/sqrt(pi)*exp(-1j*(x*x+pi/4))*fp`` + """) + +add_newdoc("scipy.special", "modstruve", + r""" + modstruve(v, x) + + Modified Struve function. + + Return the value of the modified Struve function of order `v` at `x`. The + modified Struve function is defined as, + + .. math:: + L_v(x) = -\imath \exp(-\pi\imath v/2) H_v(x), + + where :math:`H_v` is the Struve function. + + Parameters + ---------- + v : array_like + Order of the modified Struve function (float). + x : array_like + Argument of the Struve function (float; must be positive unless `v` is + an integer). + + Returns + ------- + L : ndarray + Value of the modified Struve function of order `v` at `x`. + + Notes + ----- + Three methods discussed in [1]_ are used to evaluate the function: + + - power series + - expansion in Bessel functions (if :math:`|z| < |v| + 20`) + - asymptotic large-z expansion (if :math:`z \geq 0.7v + 12`) + + Rounding errors are estimated based on the largest terms in the sums, and + the result associated with the smallest error is returned. + + See also + -------- + struve + + References + ---------- + .. [1] NIST Digital Library of Mathematical Functions + https://dlmf.nist.gov/11 + """) + +add_newdoc("scipy.special", "nbdtr", + r""" + nbdtr(k, n, p) + + Negative binomial cumulative distribution function. + + Returns the sum of the terms 0 through `k` of the negative binomial + distribution probability mass function, + + .. math:: + + F = \sum_{j=0}^k {{n + j - 1}\choose{j}} p^n (1 - p)^j. + + In a sequence of Bernoulli trials with individual success probabilities + `p`, this is the probability that `k` or fewer failures precede the nth + success. + + Parameters + ---------- + k : array_like + The maximum number of allowed failures (nonnegative int). + n : array_like + The target number of successes (positive int). + p : array_like + Probability of success in a single event (float). + + Returns + ------- + F : ndarray + The probability of `k` or fewer failures before `n` successes in a + sequence of events with individual success probability `p`. + + See also + -------- + nbdtrc + + Notes + ----- + If floating point values are passed for `k` or `n`, they will be truncated + to integers. + + The terms are not summed directly; instead the regularized incomplete beta + function is employed, according to the formula, + + .. math:: + \mathrm{nbdtr}(k, n, p) = I_{p}(n, k + 1). + + Wrapper for the Cephes [1]_ routine `nbdtr`. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + """) + +add_newdoc("scipy.special", "nbdtrc", + r""" + nbdtrc(k, n, p) + + Negative binomial survival function. + + Returns the sum of the terms `k + 1` to infinity of the negative binomial + distribution probability mass function, + + .. math:: + + F = \sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j. + + In a sequence of Bernoulli trials with individual success probabilities + `p`, this is the probability that more than `k` failures precede the nth + success. + + Parameters + ---------- + k : array_like + The maximum number of allowed failures (nonnegative int). + n : array_like + The target number of successes (positive int). + p : array_like + Probability of success in a single event (float). + + Returns + ------- + F : ndarray + The probability of `k + 1` or more failures before `n` successes in a + sequence of events with individual success probability `p`. + + Notes + ----- + If floating point values are passed for `k` or `n`, they will be truncated + to integers. + + The terms are not summed directly; instead the regularized incomplete beta + function is employed, according to the formula, + + .. math:: + \mathrm{nbdtrc}(k, n, p) = I_{1 - p}(k + 1, n). + + Wrapper for the Cephes [1]_ routine `nbdtrc`. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + """) + +add_newdoc("scipy.special", "nbdtri", + """ + nbdtri(k, n, y) + + Inverse of `nbdtr` vs `p`. + + Returns the inverse with respect to the parameter `p` of + `y = nbdtr(k, n, p)`, the negative binomial cumulative distribution + function. + + Parameters + ---------- + k : array_like + The maximum number of allowed failures (nonnegative int). + n : array_like + The target number of successes (positive int). + y : array_like + The probability of `k` or fewer failures before `n` successes (float). + + Returns + ------- + p : ndarray + Probability of success in a single event (float) such that + `nbdtr(k, n, p) = y`. + + See also + -------- + nbdtr : Cumulative distribution function of the negative binomial. + nbdtrik : Inverse with respect to `k` of `nbdtr(k, n, p)`. + nbdtrin : Inverse with respect to `n` of `nbdtr(k, n, p)`. + + Notes + ----- + Wrapper for the Cephes [1]_ routine `nbdtri`. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + """) + +add_newdoc("scipy.special", "nbdtrik", + r""" + nbdtrik(y, n, p) + + Inverse of `nbdtr` vs `k`. + + Returns the inverse with respect to the parameter `k` of + `y = nbdtr(k, n, p)`, the negative binomial cumulative distribution + function. + + Parameters + ---------- + y : array_like + The probability of `k` or fewer failures before `n` successes (float). + n : array_like + The target number of successes (positive int). + p : array_like + Probability of success in a single event (float). + + Returns + ------- + k : ndarray + The maximum number of allowed failures such that `nbdtr(k, n, p) = y`. + + See also + -------- + nbdtr : Cumulative distribution function of the negative binomial. + nbdtri : Inverse with respect to `p` of `nbdtr(k, n, p)`. + nbdtrin : Inverse with respect to `n` of `nbdtr(k, n, p)`. + + Notes + ----- + Wrapper for the CDFLIB [1]_ Fortran routine `cdfnbn`. + + Formula 26.5.26 of [2]_, + + .. math:: + \sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j = I_{1 - p}(k + 1, n), + + is used to reduce calculation of the cumulative distribution function to + that of a regularized incomplete beta :math:`I`. + + Computation of `k` involves a search for a value that produces the desired + value of `y`. The search relies on the monotonicity of `y` with `k`. + + References + ---------- + .. [1] Barry Brown, James Lovato, and Kathy Russell, + CDFLIB: Library of Fortran Routines for Cumulative Distribution + Functions, Inverses, and Other Parameters. + .. [2] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + """) + +add_newdoc("scipy.special", "nbdtrin", + r""" + nbdtrin(k, y, p) + + Inverse of `nbdtr` vs `n`. + + Returns the inverse with respect to the parameter `n` of + `y = nbdtr(k, n, p)`, the negative binomial cumulative distribution + function. + + Parameters + ---------- + k : array_like + The maximum number of allowed failures (nonnegative int). + y : array_like + The probability of `k` or fewer failures before `n` successes (float). + p : array_like + Probability of success in a single event (float). + + Returns + ------- + n : ndarray + The number of successes `n` such that `nbdtr(k, n, p) = y`. + + See also + -------- + nbdtr : Cumulative distribution function of the negative binomial. + nbdtri : Inverse with respect to `p` of `nbdtr(k, n, p)`. + nbdtrik : Inverse with respect to `k` of `nbdtr(k, n, p)`. + + Notes + ----- + Wrapper for the CDFLIB [1]_ Fortran routine `cdfnbn`. + + Formula 26.5.26 of [2]_, + + .. math:: + \sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j = I_{1 - p}(k + 1, n), + + is used to reduce calculation of the cumulative distribution function to + that of a regularized incomplete beta :math:`I`. + + Computation of `n` involves a search for a value that produces the desired + value of `y`. The search relies on the monotonicity of `y` with `n`. + + References + ---------- + .. [1] Barry Brown, James Lovato, and Kathy Russell, + CDFLIB: Library of Fortran Routines for Cumulative Distribution + Functions, Inverses, and Other Parameters. + .. [2] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + """) + +add_newdoc("scipy.special", "ncfdtr", + r""" + ncfdtr(dfn, dfd, nc, f) + + Cumulative distribution function of the non-central F distribution. + + The non-central F describes the distribution of, + + .. math:: + Z = \frac{X/d_n}{Y/d_d} + + where :math:`X` and :math:`Y` are independently distributed, with + :math:`X` distributed non-central :math:`\chi^2` with noncentrality + parameter `nc` and :math:`d_n` degrees of freedom, and :math:`Y` + distributed :math:`\chi^2` with :math:`d_d` degrees of freedom. + + Parameters + ---------- + dfn : array_like + Degrees of freedom of the numerator sum of squares. Range (0, inf). + dfd : array_like + Degrees of freedom of the denominator sum of squares. Range (0, inf). + nc : array_like + Noncentrality parameter. Should be in range (0, 1e4). + f : array_like + Quantiles, i.e. the upper limit of integration. + + Returns + ------- + cdf : float or ndarray + The calculated CDF. If all inputs are scalar, the return will be a + float. Otherwise it will be an array. + + See Also + -------- + ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`. + ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`. + ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`. + ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`. + + Notes + ----- + Wrapper for the CDFLIB [1]_ Fortran routine `cdffnc`. + + The cumulative distribution function is computed using Formula 26.6.20 of + [2]_: + + .. math:: + F(d_n, d_d, n_c, f) = \sum_{j=0}^\infty e^{-n_c/2} \frac{(n_c/2)^j}{j!} I_{x}(\frac{d_n}{2} + j, \frac{d_d}{2}), + + where :math:`I` is the regularized incomplete beta function, and + :math:`x = f d_n/(f d_n + d_d)`. + + The computation time required for this routine is proportional to the + noncentrality parameter `nc`. Very large values of this parameter can + consume immense computer resources. This is why the search range is + bounded by 10,000. + + References + ---------- + .. [1] Barry Brown, James Lovato, and Kathy Russell, + CDFLIB: Library of Fortran Routines for Cumulative Distribution + Functions, Inverses, and Other Parameters. + .. [2] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + Examples + -------- + >>> from scipy import special + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + + Plot the CDF of the non-central F distribution, for nc=0. Compare with the + F-distribution from scipy.stats: + + >>> x = np.linspace(-1, 8, num=500) + >>> dfn = 3 + >>> dfd = 2 + >>> ncf_stats = stats.f.cdf(x, dfn, dfd) + >>> ncf_special = special.ncfdtr(dfn, dfd, 0, x) + + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111) + >>> ax.plot(x, ncf_stats, 'b-', lw=3) + >>> ax.plot(x, ncf_special, 'r-') + >>> plt.show() + + """) + +add_newdoc("scipy.special", "ncfdtri", + """ + ncfdtri(dfn, dfd, nc, p) + + Inverse with respect to `f` of the CDF of the non-central F distribution. + + See `ncfdtr` for more details. + + Parameters + ---------- + dfn : array_like + Degrees of freedom of the numerator sum of squares. Range (0, inf). + dfd : array_like + Degrees of freedom of the denominator sum of squares. Range (0, inf). + nc : array_like + Noncentrality parameter. Should be in range (0, 1e4). + p : array_like + Value of the cumulative distribution function. Must be in the + range [0, 1]. + + Returns + ------- + f : float + Quantiles, i.e. the upper limit of integration. + + See Also + -------- + ncfdtr : CDF of the non-central F distribution. + ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`. + ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`. + ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`. + + Examples + -------- + >>> from scipy.special import ncfdtr, ncfdtri + + Compute the CDF for several values of `f`: + + >>> f = [0.5, 1, 1.5] + >>> p = ncfdtr(2, 3, 1.5, f) + >>> p + array([ 0.20782291, 0.36107392, 0.47345752]) + + Compute the inverse. We recover the values of `f`, as expected: + + >>> ncfdtri(2, 3, 1.5, p) + array([ 0.5, 1. , 1.5]) + + """) + +add_newdoc("scipy.special", "ncfdtridfd", + """ + ncfdtridfd(dfn, p, nc, f) + + Calculate degrees of freedom (denominator) for the noncentral F-distribution. + + This is the inverse with respect to `dfd` of `ncfdtr`. + See `ncfdtr` for more details. + + Parameters + ---------- + dfn : array_like + Degrees of freedom of the numerator sum of squares. Range (0, inf). + p : array_like + Value of the cumulative distribution function. Must be in the + range [0, 1]. + nc : array_like + Noncentrality parameter. Should be in range (0, 1e4). + f : array_like + Quantiles, i.e. the upper limit of integration. + + Returns + ------- + dfd : float + Degrees of freedom of the denominator sum of squares. + + See Also + -------- + ncfdtr : CDF of the non-central F distribution. + ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`. + ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`. + ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`. + + Notes + ----- + The value of the cumulative noncentral F distribution is not necessarily + monotone in either degrees of freedom. There thus may be two values that + provide a given CDF value. This routine assumes monotonicity and will + find an arbitrary one of the two values. + + Examples + -------- + >>> from scipy.special import ncfdtr, ncfdtridfd + + Compute the CDF for several values of `dfd`: + + >>> dfd = [1, 2, 3] + >>> p = ncfdtr(2, dfd, 0.25, 15) + >>> p + array([ 0.8097138 , 0.93020416, 0.96787852]) + + Compute the inverse. We recover the values of `dfd`, as expected: + + >>> ncfdtridfd(2, p, 0.25, 15) + array([ 1., 2., 3.]) + + """) + +add_newdoc("scipy.special", "ncfdtridfn", + """ + ncfdtridfn(p, dfd, nc, f) + + Calculate degrees of freedom (numerator) for the noncentral F-distribution. + + This is the inverse with respect to `dfn` of `ncfdtr`. + See `ncfdtr` for more details. + + Parameters + ---------- + p : array_like + Value of the cumulative distribution function. Must be in the + range [0, 1]. + dfd : array_like + Degrees of freedom of the denominator sum of squares. Range (0, inf). + nc : array_like + Noncentrality parameter. Should be in range (0, 1e4). + f : float + Quantiles, i.e. the upper limit of integration. + + Returns + ------- + dfn : float + Degrees of freedom of the numerator sum of squares. + + See Also + -------- + ncfdtr : CDF of the non-central F distribution. + ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`. + ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`. + ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`. + + Notes + ----- + The value of the cumulative noncentral F distribution is not necessarily + monotone in either degrees of freedom. There thus may be two values that + provide a given CDF value. This routine assumes monotonicity and will + find an arbitrary one of the two values. + + Examples + -------- + >>> from scipy.special import ncfdtr, ncfdtridfn + + Compute the CDF for several values of `dfn`: + + >>> dfn = [1, 2, 3] + >>> p = ncfdtr(dfn, 2, 0.25, 15) + >>> p + array([ 0.92562363, 0.93020416, 0.93188394]) + + Compute the inverse. We recover the values of `dfn`, as expected: + + >>> ncfdtridfn(p, 2, 0.25, 15) + array([ 1., 2., 3.]) + + """) + +add_newdoc("scipy.special", "ncfdtrinc", + """ + ncfdtrinc(dfn, dfd, p, f) + + Calculate non-centrality parameter for non-central F distribution. + + This is the inverse with respect to `nc` of `ncfdtr`. + See `ncfdtr` for more details. + + Parameters + ---------- + dfn : array_like + Degrees of freedom of the numerator sum of squares. Range (0, inf). + dfd : array_like + Degrees of freedom of the denominator sum of squares. Range (0, inf). + p : array_like + Value of the cumulative distribution function. Must be in the + range [0, 1]. + f : array_like + Quantiles, i.e. the upper limit of integration. + + Returns + ------- + nc : float + Noncentrality parameter. + + See Also + -------- + ncfdtr : CDF of the non-central F distribution. + ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`. + ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`. + ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`. + + Examples + -------- + >>> from scipy.special import ncfdtr, ncfdtrinc + + Compute the CDF for several values of `nc`: + + >>> nc = [0.5, 1.5, 2.0] + >>> p = ncfdtr(2, 3, nc, 15) + >>> p + array([ 0.96309246, 0.94327955, 0.93304098]) + + Compute the inverse. We recover the values of `nc`, as expected: + + >>> ncfdtrinc(2, 3, p, 15) + array([ 0.5, 1.5, 2. ]) + + """) + +add_newdoc("scipy.special", "nctdtr", + """ + nctdtr(df, nc, t) + + Cumulative distribution function of the non-central `t` distribution. + + Parameters + ---------- + df : array_like + Degrees of freedom of the distribution. Should be in range (0, inf). + nc : array_like + Noncentrality parameter. Should be in range (-1e6, 1e6). + t : array_like + Quantiles, i.e. the upper limit of integration. + + Returns + ------- + cdf : float or ndarray + The calculated CDF. If all inputs are scalar, the return will be a + float. Otherwise it will be an array. + + See Also + -------- + nctdtrit : Inverse CDF (iCDF) of the non-central t distribution. + nctdtridf : Calculate degrees of freedom, given CDF and iCDF values. + nctdtrinc : Calculate non-centrality parameter, given CDF iCDF values. + + Examples + -------- + >>> from scipy import special + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + + Plot the CDF of the non-central t distribution, for nc=0. Compare with the + t-distribution from scipy.stats: + + >>> x = np.linspace(-5, 5, num=500) + >>> df = 3 + >>> nct_stats = stats.t.cdf(x, df) + >>> nct_special = special.nctdtr(df, 0, x) + + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111) + >>> ax.plot(x, nct_stats, 'b-', lw=3) + >>> ax.plot(x, nct_special, 'r-') + >>> plt.show() + + """) + +add_newdoc("scipy.special", "nctdtridf", + """ + nctdtridf(p, nc, t) + + Calculate degrees of freedom for non-central t distribution. + + See `nctdtr` for more details. + + Parameters + ---------- + p : array_like + CDF values, in range (0, 1]. + nc : array_like + Noncentrality parameter. Should be in range (-1e6, 1e6). + t : array_like + Quantiles, i.e. the upper limit of integration. + + """) + +add_newdoc("scipy.special", "nctdtrinc", + """ + nctdtrinc(df, p, t) + + Calculate non-centrality parameter for non-central t distribution. + + See `nctdtr` for more details. + + Parameters + ---------- + df : array_like + Degrees of freedom of the distribution. Should be in range (0, inf). + p : array_like + CDF values, in range (0, 1]. + t : array_like + Quantiles, i.e. the upper limit of integration. + + """) + +add_newdoc("scipy.special", "nctdtrit", + """ + nctdtrit(df, nc, p) + + Inverse cumulative distribution function of the non-central t distribution. + + See `nctdtr` for more details. + + Parameters + ---------- + df : array_like + Degrees of freedom of the distribution. Should be in range (0, inf). + nc : array_like + Noncentrality parameter. Should be in range (-1e6, 1e6). + p : array_like + CDF values, in range (0, 1]. + + """) + +add_newdoc("scipy.special", "ndtr", + r""" + ndtr(x) + + Gaussian cumulative distribution function. + + Returns the area under the standard Gaussian probability + density function, integrated from minus infinity to `x` + + .. math:: + + \frac{1}{\sqrt{2\pi}} \int_{-\infty}^x \exp(-t^2/2) dt + + Parameters + ---------- + x : array_like, real or complex + Argument + + Returns + ------- + ndarray + The value of the normal CDF evaluated at `x` + + See Also + -------- + erf + erfc + scipy.stats.norm + log_ndtr + + """) + + +add_newdoc("scipy.special", "nrdtrimn", + """ + nrdtrimn(p, x, std) + + Calculate mean of normal distribution given other params. + + Parameters + ---------- + p : array_like + CDF values, in range (0, 1]. + x : array_like + Quantiles, i.e. the upper limit of integration. + std : array_like + Standard deviation. + + Returns + ------- + mn : float or ndarray + The mean of the normal distribution. + + See Also + -------- + nrdtrimn, ndtr + + """) + +add_newdoc("scipy.special", "nrdtrisd", + """ + nrdtrisd(p, x, mn) + + Calculate standard deviation of normal distribution given other params. + + Parameters + ---------- + p : array_like + CDF values, in range (0, 1]. + x : array_like + Quantiles, i.e. the upper limit of integration. + mn : float or ndarray + The mean of the normal distribution. + + Returns + ------- + std : array_like + Standard deviation. + + See Also + -------- + nrdtristd, ndtr + + """) + +add_newdoc("scipy.special", "log_ndtr", + """ + log_ndtr(x) + + Logarithm of Gaussian cumulative distribution function. + + Returns the log of the area under the standard Gaussian probability + density function, integrated from minus infinity to `x`:: + + log(1/sqrt(2*pi) * integral(exp(-t**2 / 2), t=-inf..x)) + + Parameters + ---------- + x : array_like, real or complex + Argument + + Returns + ------- + ndarray + The value of the log of the normal CDF evaluated at `x` + + See Also + -------- + erf + erfc + scipy.stats.norm + ndtr + + """) + +add_newdoc("scipy.special", "ndtri", + """ + ndtri(y) + + Inverse of `ndtr` vs x + + Returns the argument x for which the area under the Gaussian + probability density function (integrated from minus infinity to `x`) + is equal to y. + """) + +add_newdoc("scipy.special", "obl_ang1", + """ + obl_ang1(m, n, c, x) + + Oblate spheroidal angular function of the first kind and its derivative + + Computes the oblate spheroidal angular function of the first kind + and its derivative (with respect to `x`) for mode parameters m>=0 + and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. + + Returns + ------- + s + Value of the function + sp + Value of the derivative vs x + """) + +add_newdoc("scipy.special", "obl_ang1_cv", + """ + obl_ang1_cv(m, n, c, cv, x) + + Oblate spheroidal angular function obl_ang1 for precomputed characteristic value + + Computes the oblate spheroidal angular function of the first kind + and its derivative (with respect to `x`) for mode parameters m>=0 + and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires + pre-computed characteristic value. + + Returns + ------- + s + Value of the function + sp + Value of the derivative vs x + """) + +add_newdoc("scipy.special", "obl_cv", + """ + obl_cv(m, n, c) + + Characteristic value of oblate spheroidal function + + Computes the characteristic value of oblate spheroidal wave + functions of order `m`, `n` (n>=m) and spheroidal parameter `c`. + """) + +add_newdoc("scipy.special", "obl_rad1", + """ + obl_rad1(m, n, c, x) + + Oblate spheroidal radial function of the first kind and its derivative + + Computes the oblate spheroidal radial function of the first kind + and its derivative (with respect to `x`) for mode parameters m>=0 + and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. + + Returns + ------- + s + Value of the function + sp + Value of the derivative vs x + """) + +add_newdoc("scipy.special", "obl_rad1_cv", + """ + obl_rad1_cv(m, n, c, cv, x) + + Oblate spheroidal radial function obl_rad1 for precomputed characteristic value + + Computes the oblate spheroidal radial function of the first kind + and its derivative (with respect to `x`) for mode parameters m>=0 + and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires + pre-computed characteristic value. + + Returns + ------- + s + Value of the function + sp + Value of the derivative vs x + """) + +add_newdoc("scipy.special", "obl_rad2", + """ + obl_rad2(m, n, c, x) + + Oblate spheroidal radial function of the second kind and its derivative. + + Computes the oblate spheroidal radial function of the second kind + and its derivative (with respect to `x`) for mode parameters m>=0 + and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. + + Returns + ------- + s + Value of the function + sp + Value of the derivative vs x + """) + +add_newdoc("scipy.special", "obl_rad2_cv", + """ + obl_rad2_cv(m, n, c, cv, x) + + Oblate spheroidal radial function obl_rad2 for precomputed characteristic value + + Computes the oblate spheroidal radial function of the second kind + and its derivative (with respect to `x`) for mode parameters m>=0 + and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires + pre-computed characteristic value. + + Returns + ------- + s + Value of the function + sp + Value of the derivative vs x + """) + +add_newdoc("scipy.special", "pbdv", + """ + pbdv(v, x) + + Parabolic cylinder function D + + Returns (d, dp) the parabolic cylinder function Dv(x) in d and the + derivative, Dv'(x) in dp. + + Returns + ------- + d + Value of the function + dp + Value of the derivative vs x + """) + +add_newdoc("scipy.special", "pbvv", + """ + pbvv(v, x) + + Parabolic cylinder function V + + Returns the parabolic cylinder function Vv(x) in v and the + derivative, Vv'(x) in vp. + + Returns + ------- + v + Value of the function + vp + Value of the derivative vs x + """) + +add_newdoc("scipy.special", "pbwa", + r""" + pbwa(a, x) + + Parabolic cylinder function W. + + The function is a particular solution to the differential equation + + .. math:: + + y'' + \left(\frac{1}{4}x^2 - a\right)y = 0, + + for a full definition see section 12.14 in [1]_. + + Parameters + ---------- + a : array_like + Real parameter + x : array_like + Real argument + + Returns + ------- + w : scalar or ndarray + Value of the function + wp : scalar or ndarray + Value of the derivative in x + + Notes + ----- + The function is a wrapper for a Fortran routine by Zhang and Jin + [2]_. The implementation is accurate only for ``|a|, |x| < 5`` and + returns NaN outside that range. + + References + ---------- + .. [1] Digital Library of Mathematical Functions, 14.30. + https://dlmf.nist.gov/14.30 + .. [2] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + """) + +add_newdoc("scipy.special", "pdtr", + """ + pdtr(k, m) + + Poisson cumulative distribution function + + Returns the sum of the first `k` terms of the Poisson distribution: + sum(exp(-m) * m**j / j!, j=0..k) = gammaincc( k+1, m). Arguments + must both be positive and `k` an integer. + """) + +add_newdoc("scipy.special", "pdtrc", + """ + pdtrc(k, m) + + Poisson survival function + + Returns the sum of the terms from k+1 to infinity of the Poisson + distribution: sum(exp(-m) * m**j / j!, j=k+1..inf) = gammainc( + k+1, m). Arguments must both be positive and `k` an integer. + """) + +add_newdoc("scipy.special", "pdtri", + """ + pdtri(k, y) + + Inverse to `pdtr` vs m + + Returns the Poisson variable `m` such that the sum from 0 to `k` of + the Poisson density is equal to the given probability `y`: + calculated by gammaincinv(k+1, y). `k` must be a nonnegative + integer and `y` between 0 and 1. + """) + +add_newdoc("scipy.special", "pdtrik", + """ + pdtrik(p, m) + + Inverse to `pdtr` vs k + + Returns the quantile k such that ``pdtr(k, m) = p`` + """) + +add_newdoc("scipy.special", "poch", + r""" + poch(z, m) + + Rising factorial (z)_m + + The Pochhammer symbol (rising factorial), is defined as + + .. math:: + + (z)_m = \frac{\Gamma(z + m)}{\Gamma(z)} + + For positive integer `m` it reads + + .. math:: + + (z)_m = z (z + 1) ... (z + m - 1) + + Parameters + ---------- + z : array_like + (int or float) + m : array_like + (int or float) + + Returns + ------- + poch : ndarray + The value of the function. + """) + +add_newdoc("scipy.special", "pro_ang1", + """ + pro_ang1(m, n, c, x) + + Prolate spheroidal angular function of the first kind and its derivative + + Computes the prolate spheroidal angular function of the first kind + and its derivative (with respect to `x`) for mode parameters m>=0 + and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. + + Returns + ------- + s + Value of the function + sp + Value of the derivative vs x + """) + +add_newdoc("scipy.special", "pro_ang1_cv", + """ + pro_ang1_cv(m, n, c, cv, x) + + Prolate spheroidal angular function pro_ang1 for precomputed characteristic value + + Computes the prolate spheroidal angular function of the first kind + and its derivative (with respect to `x`) for mode parameters m>=0 + and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires + pre-computed characteristic value. + + Returns + ------- + s + Value of the function + sp + Value of the derivative vs x + """) + +add_newdoc("scipy.special", "pro_cv", + """ + pro_cv(m, n, c) + + Characteristic value of prolate spheroidal function + + Computes the characteristic value of prolate spheroidal wave + functions of order `m`, `n` (n>=m) and spheroidal parameter `c`. + """) + +add_newdoc("scipy.special", "pro_rad1", + """ + pro_rad1(m, n, c, x) + + Prolate spheroidal radial function of the first kind and its derivative + + Computes the prolate spheroidal radial function of the first kind + and its derivative (with respect to `x`) for mode parameters m>=0 + and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. + + Returns + ------- + s + Value of the function + sp + Value of the derivative vs x + """) + +add_newdoc("scipy.special", "pro_rad1_cv", + """ + pro_rad1_cv(m, n, c, cv, x) + + Prolate spheroidal radial function pro_rad1 for precomputed characteristic value + + Computes the prolate spheroidal radial function of the first kind + and its derivative (with respect to `x`) for mode parameters m>=0 + and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires + pre-computed characteristic value. + + Returns + ------- + s + Value of the function + sp + Value of the derivative vs x + """) + +add_newdoc("scipy.special", "pro_rad2", + """ + pro_rad2(m, n, c, x) + + Prolate spheroidal radial function of the second kind and its derivative + + Computes the prolate spheroidal radial function of the second kind + and its derivative (with respect to `x`) for mode parameters m>=0 + and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. + + Returns + ------- + s + Value of the function + sp + Value of the derivative vs x + """) + +add_newdoc("scipy.special", "pro_rad2_cv", + """ + pro_rad2_cv(m, n, c, cv, x) + + Prolate spheroidal radial function pro_rad2 for precomputed characteristic value + + Computes the prolate spheroidal radial function of the second kind + and its derivative (with respect to `x`) for mode parameters m>=0 + and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires + pre-computed characteristic value. + + Returns + ------- + s + Value of the function + sp + Value of the derivative vs x + """) + +add_newdoc("scipy.special", "pseudo_huber", + r""" + pseudo_huber(delta, r) + + Pseudo-Huber loss function. + + .. math:: \mathrm{pseudo\_huber}(\delta, r) = \delta^2 \left( \sqrt{ 1 + \left( \frac{r}{\delta} \right)^2 } - 1 \right) + + Parameters + ---------- + delta : ndarray + Input array, indicating the soft quadratic vs. linear loss changepoint. + r : ndarray + Input array, possibly representing residuals. + + Returns + ------- + res : ndarray + The computed Pseudo-Huber loss function values. + + Notes + ----- + This function is convex in :math:`r`. + + .. versionadded:: 0.15.0 + + """) + +add_newdoc("scipy.special", "psi", + """ + psi(z, out=None) + + The digamma function. + + The logarithmic derivative of the gamma function evaluated at ``z``. + + Parameters + ---------- + z : array_like + Real or complex argument. + out : ndarray, optional + Array for the computed values of ``psi``. + + Returns + ------- + digamma : ndarray + Computed values of ``psi``. + + Notes + ----- + For large values not close to the negative real axis ``psi`` is + computed using the asymptotic series (5.11.2) from [1]_. For small + arguments not close to the negative real axis the recurrence + relation (5.5.2) from [1]_ is used until the argument is large + enough to use the asymptotic series. For values close to the + negative real axis the reflection formula (5.5.4) from [1]_ is + used first. Note that ``psi`` has a family of zeros on the + negative real axis which occur between the poles at nonpositive + integers. Around the zeros the reflection formula suffers from + cancellation and the implementation loses precision. The sole + positive zero and the first negative zero, however, are handled + separately by precomputing series expansions using [2]_, so the + function should maintain full accuracy around the origin. + + References + ---------- + .. [1] NIST Digital Library of Mathematical Functions + https://dlmf.nist.gov/5 + .. [2] Fredrik Johansson and others. + "mpmath: a Python library for arbitrary-precision floating-point arithmetic" + (Version 0.19) http://mpmath.org/ + + """) + +add_newdoc("scipy.special", "radian", + """ + radian(d, m, s) + + Convert from degrees to radians + + Returns the angle given in (d)egrees, (m)inutes, and (s)econds in + radians. + """) + +add_newdoc("scipy.special", "rel_entr", + r""" + rel_entr(x, y) + + Elementwise function for computing relative entropy. + + .. math:: \mathrm{rel\_entr}(x, y) = \begin{cases} x \log(x / y) & x > 0, y > 0 \\ 0 & x = 0, y \ge 0 \\ \infty & \text{otherwise} \end{cases} + + Parameters + ---------- + x : ndarray + First input array. + y : ndarray + Second input array. + + Returns + ------- + res : ndarray + Output array. + + See Also + -------- + entr, kl_div + + Notes + ----- + This function is jointly convex in x and y. + + .. versionadded:: 0.15.0 + + """) + +add_newdoc("scipy.special", "rgamma", + """ + rgamma(z) + + Gamma function inverted + + Returns ``1/gamma(x)`` + """) + +add_newdoc("scipy.special", "round", + """ + round(x) + + Round to nearest integer + + Returns the nearest integer to `x` as a double precision floating + point result. If `x` ends in 0.5 exactly, the nearest even integer + is chosen. + """) + +add_newdoc("scipy.special", "shichi", + r""" + shichi(x, out=None) + + Hyperbolic sine and cosine integrals. + + The hyperbolic sine integral is + + .. math:: + + \int_0^x \frac{\sinh{t}}{t}dt + + and the hyperbolic cosine integral is + + .. math:: + + \gamma + \log(x) + \int_0^x \frac{\cosh{t} - 1}{t} dt + + where :math:`\gamma` is Euler's constant and :math:`\log` is the + principle branch of the logarithm. + + Parameters + ---------- + x : array_like + Real or complex points at which to compute the hyperbolic sine + and cosine integrals. + + Returns + ------- + si : ndarray + Hyperbolic sine integral at ``x`` + ci : ndarray + Hyperbolic cosine integral at ``x`` + + Notes + ----- + For real arguments with ``x < 0``, ``chi`` is the real part of the + hyperbolic cosine integral. For such points ``chi(x)`` and ``chi(x + + 0j)`` differ by a factor of ``1j*pi``. + + For real arguments the function is computed by calling Cephes' + [1]_ *shichi* routine. For complex arguments the algorithm is based + on Mpmath's [2]_ *shi* and *chi* routines. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + .. [2] Fredrik Johansson and others. + "mpmath: a Python library for arbitrary-precision floating-point arithmetic" + (Version 0.19) http://mpmath.org/ + """) + +add_newdoc("scipy.special", "sici", + r""" + sici(x, out=None) + + Sine and cosine integrals. + + The sine integral is + + .. math:: + + \int_0^x \frac{\sin{t}}{t}dt + + and the cosine integral is + + .. math:: + + \gamma + \log(x) + \int_0^x \frac{\cos{t} - 1}{t}dt + + where :math:`\gamma` is Euler's constant and :math:`\log` is the + principle branch of the logarithm. + + Parameters + ---------- + x : array_like + Real or complex points at which to compute the sine and cosine + integrals. + + Returns + ------- + si : ndarray + Sine integral at ``x`` + ci : ndarray + Cosine integral at ``x`` + + Notes + ----- + For real arguments with ``x < 0``, ``ci`` is the real part of the + cosine integral. For such points ``ci(x)`` and ``ci(x + 0j)`` + differ by a factor of ``1j*pi``. + + For real arguments the function is computed by calling Cephes' + [1]_ *sici* routine. For complex arguments the algorithm is based + on Mpmath's [2]_ *si* and *ci* routines. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + .. [2] Fredrik Johansson and others. + "mpmath: a Python library for arbitrary-precision floating-point arithmetic" + (Version 0.19) http://mpmath.org/ + """) + +add_newdoc("scipy.special", "sindg", + """ + sindg(x) + + Sine of angle given in degrees + """) + +add_newdoc("scipy.special", "smirnov", + r""" + smirnov(n, d) + + Kolmogorov-Smirnov complementary cumulative distribution function + + Returns the exact Kolmogorov-Smirnov complementary cumulative + distribution function,(aka the Survival Function) of Dn+ (or Dn-) + for a one-sided test of equality between an empirical and a + theoretical distribution. It is equal to the probability that the + maximum difference between a theoretical distribution and an empirical + one based on `n` samples is greater than d. + + Parameters + ---------- + n : int + Number of samples + d : float array_like + Deviation between the Empirical CDF (ECDF) and the target CDF. + + Returns + ------- + float + The value(s) of smirnov(n, d), Prob(Dn+ >= d) (Also Prob(Dn- >= d)) + + Notes + ----- + `smirnov` is used by `stats.kstest` in the application of the + Kolmogorov-Smirnov Goodness of Fit test. For historial reasons this + function is exposed in `scpy.special`, but the recommended way to achieve + the most accurate CDF/SF/PDF/PPF/ISF computations is to use the + `stats.ksone` distrubution. + + See Also + -------- + smirnovi : The Inverse Survival Function for the distribution + scipy.stats.ksone : Provides the functionality as a continuous distribution + kolmogorov, kolmogi : Functions for the two-sided distribution + + Examples + -------- + >>> from scipy.special import smirnov + + Show the probability of a gap at least as big as 0, 0.5 and 1.0 for a sample of size 5 + + >>> smirnov(5, [0, 0.5, 1.0]) + array([ 1. , 0.056, 0. ]) + + Compare a sample of size 5 drawn from a source N(0.5, 1) distribution against + a target N(0, 1) CDF. + + >>> from scipy.stats import norm + >>> n = 5 + >>> gendist = norm(0.5, 1) # Normal distribution, mean 0.5, stddev 1 + >>> np.random.seed(seed=233423) # Set the seed for reproducibility + >>> x = np.sort(gendist.rvs(size=n)) + >>> x + array([-0.20946287, 0.71688765, 0.95164151, 1.44590852, 3.08880533]) + >>> target = norm(0, 1) + >>> cdfs = target.cdf(x) + >>> cdfs + array([ 0.41704346, 0.76327829, 0.82936059, 0.92589857, 0.99899518]) + # Construct the Empirical CDF and the K-S statistics (Dn+, Dn-, Dn) + >>> ecdfs = np.arange(n+1, dtype=float)/n + >>> cols = np.column_stack([x, ecdfs[1:], cdfs, cdfs - ecdfs[:n], ecdfs[1:] - cdfs]) + >>> np.set_printoptions(precision=3) + >>> cols + array([[ -2.095e-01, 2.000e-01, 4.170e-01, 4.170e-01, -2.170e-01], + [ 7.169e-01, 4.000e-01, 7.633e-01, 5.633e-01, -3.633e-01], + [ 9.516e-01, 6.000e-01, 8.294e-01, 4.294e-01, -2.294e-01], + [ 1.446e+00, 8.000e-01, 9.259e-01, 3.259e-01, -1.259e-01], + [ 3.089e+00, 1.000e+00, 9.990e-01, 1.990e-01, 1.005e-03]]) + >>> gaps = cols[:, -2:] + >>> Dnpm = np.max(gaps, axis=0) + >>> print('Dn-=%f, Dn+=%f' % (Dnpm[0], Dnpm[1])) + Dn-=0.563278, Dn+=0.001005 + >>> probs = smirnov(n, Dnpm) + >>> print(chr(10).join(['For a sample of size %d drawn from a N(0, 1) distribution:' % n, + ... ' Smirnov n=%d: Prob(Dn- >= %f) = %.4f' % (n, Dnpm[0], probs[0]), + ... ' Smirnov n=%d: Prob(Dn+ >= %f) = %.4f' % (n, Dnpm[1], probs[1])])) + For a sample of size 5 drawn from a N(0, 1) distribution: + Smirnov n=5: Prob(Dn- >= 0.563278) = 0.0250 + Smirnov n=5: Prob(Dn+ >= 0.001005) = 0.9990 + + Plot the Empirical CDF against the target N(0, 1) CDF + + >>> import matplotlib.pyplot as plt + >>> plt.step(np.concatenate([[-3], x]), ecdfs, where='post', label='Empirical CDF') + >>> x3 = np.linspace(-3, 3, 100) + >>> plt.plot(x3, target.cdf(x3), label='CDF for N(0, 1)') + >>> plt.ylim([0, 1]); plt.grid(True); plt.legend(); + # Add vertical lines marking Dn+ and Dn- + >>> iminus, iplus = np.argmax(gaps, axis=0) + >>> plt.vlines([x[iminus]], ecdfs[iminus], cdfs[iminus], color='r', linestyle='dashed', lw=4) + >>> plt.vlines([x[iplus]], cdfs[iplus], ecdfs[iplus+1], color='m', linestyle='dashed', lw=4) + >>> plt.show() + """) + +add_newdoc("scipy.special", "smirnovi", + """ + smirnovi(n, p) + + Inverse to `smirnov` + + Returns `d` such that ``smirnov(n, d) == p``, the critical value + corresponding to `p`. + + Parameters + ---------- + n : int + Number of samples + p : float array_like + Probability + + Returns + ------- + float + The value(s) of smirnovi(n, p), the critical values. + + Notes + ----- + `smirnov` is used by `stats.kstest` in the application of the + Kolmogorov-Smirnov Goodness of Fit test. For historial reasons this + function is exposed in `scpy.special`, but the recommended way to achieve + the most accurate CDF/SF/PDF/PPF/ISF computations is to use the + `stats.ksone` distrubution. + + See Also + -------- + smirnov : The Survival Function (SF) for the distribution + scipy.stats.ksone : Provides the functionality as a continuous distribution + kolmogorov, kolmogi, scipy.stats.kstwobign : Functions for the two-sided distribution + """) + +add_newdoc("scipy.special", "_smirnovc", + """ + _smirnovc(n, d) + Internal function, do not use. + """) + +add_newdoc("scipy.special", "_smirnovci", + """ + Internal function, do not use. + """) + +add_newdoc("scipy.special", "_smirnovp", + """ + _smirnovp(n, p) + Internal function, do not use. + """) + +add_newdoc("scipy.special", "spence", + r""" + spence(z, out=None) + + Spence's function, also known as the dilogarithm. + + It is defined to be + + .. math:: + \int_0^z \frac{\log(t)}{1 - t}dt + + for complex :math:`z`, where the contour of integration is taken + to avoid the branch cut of the logarithm. Spence's function is + analytic everywhere except the negative real axis where it has a + branch cut. + + Parameters + ---------- + z : array_like + Points at which to evaluate Spence's function + + Returns + ------- + s : ndarray + Computed values of Spence's function + + Notes + ----- + There is a different convention which defines Spence's function by + the integral + + .. math:: + -\int_0^z \frac{\log(1 - t)}{t}dt; + + this is our ``spence(1 - z)``. + """) + +add_newdoc("scipy.special", "stdtr", + """ + stdtr(df, t) + + Student t distribution cumulative density function + + Returns the integral from minus infinity to t of the Student t + distribution with df > 0 degrees of freedom:: + + gamma((df+1)/2)/(sqrt(df*pi)*gamma(df/2)) * + integral((1+x**2/df)**(-df/2-1/2), x=-inf..t) + + """) + +add_newdoc("scipy.special", "stdtridf", + """ + stdtridf(p, t) + + Inverse of `stdtr` vs df + + Returns the argument df such that stdtr(df, t) is equal to `p`. + """) + +add_newdoc("scipy.special", "stdtrit", + """ + stdtrit(df, p) + + Inverse of `stdtr` vs `t` + + Returns the argument `t` such that stdtr(df, t) is equal to `p`. + """) + +add_newdoc("scipy.special", "struve", + r""" + struve(v, x) + + Struve function. + + Return the value of the Struve function of order `v` at `x`. The Struve + function is defined as, + + .. math:: + H_v(x) = (z/2)^{v + 1} \sum_{n=0}^\infty \frac{(-1)^n (z/2)^{2n}}{\Gamma(n + \frac{3}{2}) \Gamma(n + v + \frac{3}{2})}, + + where :math:`\Gamma` is the gamma function. + + Parameters + ---------- + v : array_like + Order of the Struve function (float). + x : array_like + Argument of the Struve function (float; must be positive unless `v` is + an integer). + + Returns + ------- + H : ndarray + Value of the Struve function of order `v` at `x`. + + Notes + ----- + Three methods discussed in [1]_ are used to evaluate the Struve function: + + - power series + - expansion in Bessel functions (if :math:`|z| < |v| + 20`) + - asymptotic large-z expansion (if :math:`z \geq 0.7v + 12`) + + Rounding errors are estimated based on the largest terms in the sums, and + the result associated with the smallest error is returned. + + See also + -------- + modstruve + + References + ---------- + .. [1] NIST Digital Library of Mathematical Functions + https://dlmf.nist.gov/11 + + """) + +add_newdoc("scipy.special", "tandg", + """ + tandg(x) + + Tangent of angle x given in degrees. + """) + +add_newdoc("scipy.special", "tklmbda", + """ + tklmbda(x, lmbda) + + Tukey-Lambda cumulative distribution function + + """) + +add_newdoc("scipy.special", "wofz", + """ + wofz(z) + + Faddeeva function + + Returns the value of the Faddeeva function for complex argument:: + + exp(-z**2) * erfc(-i*z) + + See Also + -------- + dawsn, erf, erfc, erfcx, erfi + + References + ---------- + .. [1] Steven G. Johnson, Faddeeva W function implementation. + http://ab-initio.mit.edu/Faddeeva + + Examples + -------- + >>> from scipy import special + >>> import matplotlib.pyplot as plt + + >>> x = np.linspace(-3, 3) + >>> z = special.wofz(x) + + >>> plt.plot(x, z.real, label='wofz(x).real') + >>> plt.plot(x, z.imag, label='wofz(x).imag') + >>> plt.xlabel('$x$') + >>> plt.legend(framealpha=1, shadow=True) + >>> plt.grid(alpha=0.25) + >>> plt.show() + + """) + +add_newdoc("scipy.special", "xlogy", + """ + xlogy(x, y) + + Compute ``x*log(y)`` so that the result is 0 if ``x = 0``. + + Parameters + ---------- + x : array_like + Multiplier + y : array_like + Argument + + Returns + ------- + z : array_like + Computed x*log(y) + + Notes + ----- + + .. versionadded:: 0.13.0 + + """) + +add_newdoc("scipy.special", "xlog1py", + """ + xlog1py(x, y) + + Compute ``x*log1p(y)`` so that the result is 0 if ``x = 0``. + + Parameters + ---------- + x : array_like + Multiplier + y : array_like + Argument + + Returns + ------- + z : array_like + Computed x*log1p(y) + + Notes + ----- + + .. versionadded:: 0.13.0 + + """) + +add_newdoc("scipy.special", "y0", + r""" + y0(x) + + Bessel function of the second kind of order 0. + + Parameters + ---------- + x : array_like + Argument (float). + + Returns + ------- + Y : ndarray + Value of the Bessel function of the second kind of order 0 at `x`. + + Notes + ----- + + The domain is divided into the intervals [0, 5] and (5, infinity). In the + first interval a rational approximation :math:`R(x)` is employed to + compute, + + .. math:: + + Y_0(x) = R(x) + \frac{2 \log(x) J_0(x)}{\pi}, + + where :math:`J_0` is the Bessel function of the first kind of order 0. + + In the second interval, the Hankel asymptotic expansion is employed with + two rational functions of degree 6/6 and 7/7. + + This function is a wrapper for the Cephes [1]_ routine `y0`. + + See also + -------- + j0 + yv + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + """) + +add_newdoc("scipy.special", "y1", + """ + y1(x) + + Bessel function of the second kind of order 1. + + Parameters + ---------- + x : array_like + Argument (float). + + Returns + ------- + Y : ndarray + Value of the Bessel function of the second kind of order 1 at `x`. + + Notes + ----- + + The domain is divided into the intervals [0, 8] and (8, infinity). In the + first interval a 25 term Chebyshev expansion is used, and computing + :math:`J_1` (the Bessel function of the first kind) is required. In the + second, the asymptotic trigonometric representation is employed using two + rational functions of degree 5/5. + + This function is a wrapper for the Cephes [1]_ routine `y1`. + + See also + -------- + j1 + yn + yv + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + """) + +add_newdoc("scipy.special", "yn", + r""" + yn(n, x) + + Bessel function of the second kind of integer order and real argument. + + Parameters + ---------- + n : array_like + Order (integer). + z : array_like + Argument (float). + + Returns + ------- + Y : ndarray + Value of the Bessel function, :math:`Y_n(x)`. + + Notes + ----- + Wrapper for the Cephes [1]_ routine `yn`. + + The function is evaluated by forward recurrence on `n`, starting with + values computed by the Cephes routines `y0` and `y1`. If `n = 0` or 1, + the routine for `y0` or `y1` is called directly. + + See also + -------- + yv : For real order and real or complex argument. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + """) + +add_newdoc("scipy.special", "yv", + r""" + yv(v, z) + + Bessel function of the second kind of real order and complex argument. + + Parameters + ---------- + v : array_like + Order (float). + z : array_like + Argument (float or complex). + + Returns + ------- + Y : ndarray + Value of the Bessel function of the second kind, :math:`Y_v(x)`. + + Notes + ----- + For positive `v` values, the computation is carried out using the + AMOS [1]_ `zbesy` routine, which exploits the connection to the Hankel + Bessel functions :math:`H_v^{(1)}` and :math:`H_v^{(2)}`, + + .. math:: Y_v(z) = \frac{1}{2\imath} (H_v^{(1)} - H_v^{(2)}). + + For negative `v` values the formula, + + .. math:: Y_{-v}(z) = Y_v(z) \cos(\pi v) + J_v(z) \sin(\pi v) + + is used, where :math:`J_v(z)` is the Bessel function of the first kind, + computed using the AMOS routine `zbesj`. Note that the second term is + exactly zero for integer `v`; to improve accuracy the second term is + explicitly omitted for `v` values such that `v = floor(v)`. + + See also + -------- + yve : :math:`Y_v` with leading exponential behavior stripped off. + + References + ---------- + .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + + """) + +add_newdoc("scipy.special", "yve", + r""" + yve(v, z) + + Exponentially scaled Bessel function of the second kind of real order. + + Returns the exponentially scaled Bessel function of the second + kind of real order `v` at complex `z`:: + + yve(v, z) = yv(v, z) * exp(-abs(z.imag)) + + Parameters + ---------- + v : array_like + Order (float). + z : array_like + Argument (float or complex). + + Returns + ------- + Y : ndarray + Value of the exponentially scaled Bessel function. + + Notes + ----- + For positive `v` values, the computation is carried out using the + AMOS [1]_ `zbesy` routine, which exploits the connection to the Hankel + Bessel functions :math:`H_v^{(1)}` and :math:`H_v^{(2)}`, + + .. math:: Y_v(z) = \frac{1}{2\imath} (H_v^{(1)} - H_v^{(2)}). + + For negative `v` values the formula, + + .. math:: Y_{-v}(z) = Y_v(z) \cos(\pi v) + J_v(z) \sin(\pi v) + + is used, where :math:`J_v(z)` is the Bessel function of the first kind, + computed using the AMOS routine `zbesj`. Note that the second term is + exactly zero for integer `v`; to improve accuracy the second term is + explicitly omitted for `v` values such that `v = floor(v)`. + + References + ---------- + .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + """) + +add_newdoc("scipy.special", "_zeta", + """ + _zeta(x, q) + + Internal function, Hurwitz zeta. + + """) + +add_newdoc("scipy.special", "zetac", + """ + zetac(x) + + Riemann zeta function minus 1. + + This function is defined as + + .. math:: \\zeta(x) = \\sum_{k=2}^{\\infty} 1 / k^x, + + where ``x > 1``. For ``x < 1``, the analytic continuation is computed. + + Because of limitations of the numerical algorithm, ``zetac(x)`` returns + `nan` for `x` less than -30.8148. + + Parameters + ---------- + x : array_like of float + Values at which to compute zeta(x) - 1 (must be real). + + Returns + ------- + out : array_like + Values of zeta(x) - 1. + + See Also + -------- + zeta + + Examples + -------- + >>> from scipy.special import zetac, zeta + + Some special values: + + >>> zetac(2), np.pi**2/6 - 1 + (0.64493406684822641, 0.6449340668482264) + + >>> zetac(-1), -1.0/12 - 1 + (-1.0833333333333333, -1.0833333333333333) + + Compare ``zetac(x)`` to ``zeta(x) - 1`` for large `x`: + + >>> zetac(60), zeta(60) - 1 + (8.673617380119933e-19, 0.0) + + """) + +add_newdoc("scipy.special", "_struve_asymp_large_z", + """ + _struve_asymp_large_z(v, z, is_h) + + Internal function for testing `struve` & `modstruve` + + Evaluates using asymptotic expansion + + Returns + ------- + v, err + """) + +add_newdoc("scipy.special", "_struve_power_series", + """ + _struve_power_series(v, z, is_h) + + Internal function for testing `struve` & `modstruve` + + Evaluates using power series + + Returns + ------- + v, err + """) + +add_newdoc("scipy.special", "_struve_bessel_series", + """ + _struve_bessel_series(v, z, is_h) + + Internal function for testing `struve` & `modstruve` + + Evaluates using Bessel function series + + Returns + ------- + v, err + """) + +add_newdoc("scipy.special", "_spherical_jn", + """ + Internal function, use `spherical_jn` instead. + """) + +add_newdoc("scipy.special", "_spherical_jn_d", + """ + Internal function, use `spherical_jn` instead. + """) + +add_newdoc("scipy.special", "_spherical_yn", + """ + Internal function, use `spherical_yn` instead. + """) + +add_newdoc("scipy.special", "_spherical_yn_d", + """ + Internal function, use `spherical_yn` instead. + """) + +add_newdoc("scipy.special", "_spherical_in", + """ + Internal function, use `spherical_in` instead. + """) + +add_newdoc("scipy.special", "_spherical_in_d", + """ + Internal function, use `spherical_in` instead. + """) + +add_newdoc("scipy.special", "_spherical_kn", + """ + Internal function, use `spherical_kn` instead. + """) + +add_newdoc("scipy.special", "_spherical_kn_d", + """ + Internal function, use `spherical_kn` instead. + """) + +add_newdoc("scipy.special", "loggamma", + r""" + loggamma(z, out=None) + + Principal branch of the logarithm of the Gamma function. + + Defined to be :math:`\log(\Gamma(x))` for :math:`x > 0` and + extended to the complex plane by analytic continuation. The + function has a single branch cut on the negative real axis. + + .. versionadded:: 0.18.0 + + Parameters + ---------- + z : array-like + Values in the complex plain at which to compute ``loggamma`` + out : ndarray, optional + Output array for computed values of ``loggamma`` + + Returns + ------- + loggamma : ndarray + Values of ``loggamma`` at z. + + Notes + ----- + It is not generally true that :math:`\log\Gamma(z) = + \log(\Gamma(z))`, though the real parts of the functions do + agree. The benefit of not defining `loggamma` as + :math:`\log(\Gamma(z))` is that the latter function has a + complicated branch cut structure whereas `loggamma` is analytic + except for on the negative real axis. + + The identities + + .. math:: + \exp(\log\Gamma(z)) &= \Gamma(z) \\ + \log\Gamma(z + 1) &= \log(z) + \log\Gamma(z) + + make `loggamma` useful for working in complex logspace. + + On the real line `loggamma` is related to `gammaln` via + ``exp(loggamma(x + 0j)) = gammasgn(x)*exp(gammaln(x))``, up to + rounding error. + + The implementation here is based on [hare1997]_. + + See also + -------- + gammaln : logarithm of the absolute value of the Gamma function + gammasgn : sign of the gamma function + + References + ---------- + .. [hare1997] D.E.G. Hare, + *Computing the Principal Branch of log-Gamma*, + Journal of Algorithms, Volume 25, Issue 2, November 1997, pages 221-236. + """) + +add_newdoc("scipy.special", "_sinpi", + """ + Internal function, do not use. + """) + +add_newdoc("scipy.special", "_cospi", + """ + Internal function, do not use. + """) + +add_newdoc("scipy.special", "owens_t", + """ + owens_t(h, a) + + Owen's T Function. + + The function T(h, a) gives the probability of the event + (X > h and 0 < Y < a * X) where X and Y are independent + standard normal random variables. + + Parameters + ---------- + h: array_like + Input value. + a: array_like + Input value. + + Returns + ------- + t: scalar or ndarray + Probability of the event (X > h and 0 < Y < a * X), + where X and Y are independent standard normal random variables. + + Examples + -------- + >>> from scipy import special + >>> a = 3.5 + >>> h = 0.78 + >>> special.owens_t(h, a) + 0.10877216734852274 + + References + ---------- + .. [1] M. Patefield and D. Tandy, "Fast and accurate calculation of + Owen's T Function", Statistical Software vol. 5, pp. 1-25, 2000. + """) diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/add_newdocs.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/add_newdocs.pyc new file mode 100644 index 0000000..4ea98e6 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/add_newdocs.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/basic.py b/project/venv/lib/python2.7/site-packages/scipy/special/basic.py new file mode 100644 index 0000000..d1c77f2 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/basic.py @@ -0,0 +1,2278 @@ +# +# Author: Travis Oliphant, 2002 +# + +from __future__ import division, print_function, absolute_import + +import operator +import numpy as np +import math +from scipy._lib.six import xrange +from numpy import (pi, asarray, floor, isscalar, iscomplex, real, + imag, sqrt, where, mgrid, sin, place, issubdtype, + extract, inexact, nan, zeros, sinc) +from . import _ufuncs as ufuncs +from ._ufuncs import (ellipkm1, mathieu_a, mathieu_b, iv, jv, gamma, + psi, _zeta, hankel1, hankel2, yv, kv, ndtri, + poch, binom, hyp0f1) +from . import specfun +from . import orthogonal +from ._comb import _comb_int + + +__all__ = ['ai_zeros', 'assoc_laguerre', 'bei_zeros', 'beip_zeros', + 'ber_zeros', 'bernoulli', 'berp_zeros', + 'bessel_diff_formula', 'bi_zeros', 'clpmn', 'comb', + 'digamma', 'diric', 'ellipk', 'erf_zeros', 'erfcinv', + 'erfinv', 'euler', 'factorial', 'factorialk', 'factorial2', + 'fresnel_zeros', 'fresnelc_zeros', 'fresnels_zeros', + 'gamma', 'h1vp', 'h2vp', 'hankel1', 'hankel2', 'hyp0f1', + 'iv', 'ivp', 'jn_zeros', 'jnjnp_zeros', 'jnp_zeros', + 'jnyn_zeros', 'jv', 'jvp', 'kei_zeros', 'keip_zeros', + 'kelvin_zeros', 'ker_zeros', 'kerp_zeros', 'kv', 'kvp', + 'lmbda', 'lpmn', 'lpn', 'lqmn', 'lqn', 'mathieu_a', + 'mathieu_b', 'mathieu_even_coef', 'mathieu_odd_coef', + 'ndtri', 'obl_cv_seq', 'pbdn_seq', 'pbdv_seq', 'pbvv_seq', + 'perm', 'polygamma', 'pro_cv_seq', 'psi', 'riccati_jn', + 'riccati_yn', 'sinc', 'y0_zeros', 'y1_zeros', 'y1p_zeros', + 'yn_zeros', 'ynp_zeros', 'yv', 'yvp', 'zeta'] + + +def _nonneg_int_or_fail(n, var_name, strict=True): + try: + if strict: + # Raises an exception if float + n = operator.index(n) + elif n == floor(n): + n = int(n) + else: + raise ValueError() + if n < 0: + raise ValueError() + except (ValueError, TypeError) as err: + raise err.__class__("{} must be a non-negative integer".format(var_name)) + return n + + +def diric(x, n): + """Periodic sinc function, also called the Dirichlet function. + + The Dirichlet function is defined as:: + + diric(x, n) = sin(x * n/2) / (n * sin(x / 2)), + + where `n` is a positive integer. + + Parameters + ---------- + x : array_like + Input data + n : int + Integer defining the periodicity. + + Returns + ------- + diric : ndarray + + Examples + -------- + >>> from scipy import special + >>> import matplotlib.pyplot as plt + + >>> x = np.linspace(-8*np.pi, 8*np.pi, num=201) + >>> plt.figure(figsize=(8, 8)); + >>> for idx, n in enumerate([2, 3, 4, 9]): + ... plt.subplot(2, 2, idx+1) + ... plt.plot(x, special.diric(x, n)) + ... plt.title('diric, n={}'.format(n)) + >>> plt.show() + + The following example demonstrates that `diric` gives the magnitudes + (modulo the sign and scaling) of the Fourier coefficients of a + rectangular pulse. + + Suppress output of values that are effectively 0: + + >>> np.set_printoptions(suppress=True) + + Create a signal `x` of length `m` with `k` ones: + + >>> m = 8 + >>> k = 3 + >>> x = np.zeros(m) + >>> x[:k] = 1 + + Use the FFT to compute the Fourier transform of `x`, and + inspect the magnitudes of the coefficients: + + >>> np.abs(np.fft.fft(x)) + array([ 3. , 2.41421356, 1. , 0.41421356, 1. , + 0.41421356, 1. , 2.41421356]) + + Now find the same values (up to sign) using `diric`. We multiply + by `k` to account for the different scaling conventions of + `numpy.fft.fft` and `diric`: + + >>> theta = np.linspace(0, 2*np.pi, m, endpoint=False) + >>> k * special.diric(theta, k) + array([ 3. , 2.41421356, 1. , -0.41421356, -1. , + -0.41421356, 1. , 2.41421356]) + """ + x, n = asarray(x), asarray(n) + n = asarray(n + (x-x)) + x = asarray(x + (n-n)) + if issubdtype(x.dtype, inexact): + ytype = x.dtype + else: + ytype = float + y = zeros(x.shape, ytype) + + # empirical minval for 32, 64 or 128 bit float computations + # where sin(x/2) < minval, result is fixed at +1 or -1 + if np.finfo(ytype).eps < 1e-18: + minval = 1e-11 + elif np.finfo(ytype).eps < 1e-15: + minval = 1e-7 + else: + minval = 1e-3 + + mask1 = (n <= 0) | (n != floor(n)) + place(y, mask1, nan) + + x = x / 2 + denom = sin(x) + mask2 = (1-mask1) & (abs(denom) < minval) + xsub = extract(mask2, x) + nsub = extract(mask2, n) + zsub = xsub / pi + place(y, mask2, pow(-1, np.round(zsub)*(nsub-1))) + + mask = (1-mask1) & (1-mask2) + xsub = extract(mask, x) + nsub = extract(mask, n) + dsub = extract(mask, denom) + place(y, mask, sin(nsub*xsub)/(nsub*dsub)) + return y + + +def jnjnp_zeros(nt): + """Compute zeros of integer-order Bessel functions Jn and Jn'. + + Results are arranged in order of the magnitudes of the zeros. + + Parameters + ---------- + nt : int + Number (<=1200) of zeros to compute + + Returns + ------- + zo[l-1] : ndarray + Value of the lth zero of Jn(x) and Jn'(x). Of length `nt`. + n[l-1] : ndarray + Order of the Jn(x) or Jn'(x) associated with lth zero. Of length `nt`. + m[l-1] : ndarray + Serial number of the zeros of Jn(x) or Jn'(x) associated + with lth zero. Of length `nt`. + t[l-1] : ndarray + 0 if lth zero in zo is zero of Jn(x), 1 if it is a zero of Jn'(x). Of + length `nt`. + + See Also + -------- + jn_zeros, jnp_zeros : to get separated arrays of zeros. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 5. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + + """ + if not isscalar(nt) or (floor(nt) != nt) or (nt > 1200): + raise ValueError("Number must be integer <= 1200.") + nt = int(nt) + n, m, t, zo = specfun.jdzo(nt) + return zo[1:nt+1], n[:nt], m[:nt], t[:nt] + + +def jnyn_zeros(n, nt): + """Compute nt zeros of Bessel functions Jn(x), Jn'(x), Yn(x), and Yn'(x). + + Returns 4 arrays of length `nt`, corresponding to the first `nt` zeros of + Jn(x), Jn'(x), Yn(x), and Yn'(x), respectively. + + Parameters + ---------- + n : int + Order of the Bessel functions + nt : int + Number (<=1200) of zeros to compute + + See jn_zeros, jnp_zeros, yn_zeros, ynp_zeros to get separate arrays. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 5. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + + """ + if not (isscalar(nt) and isscalar(n)): + raise ValueError("Arguments must be scalars.") + if (floor(n) != n) or (floor(nt) != nt): + raise ValueError("Arguments must be integers.") + if (nt <= 0): + raise ValueError("nt > 0") + return specfun.jyzo(abs(n), nt) + + +def jn_zeros(n, nt): + """Compute zeros of integer-order Bessel function Jn(x). + + Parameters + ---------- + n : int + Order of Bessel function + nt : int + Number of zeros to return + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 5. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + + """ + return jnyn_zeros(n, nt)[0] + + +def jnp_zeros(n, nt): + """Compute zeros of integer-order Bessel function derivative Jn'(x). + + Parameters + ---------- + n : int + Order of Bessel function + nt : int + Number of zeros to return + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 5. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + + """ + return jnyn_zeros(n, nt)[1] + + +def yn_zeros(n, nt): + """Compute zeros of integer-order Bessel function Yn(x). + + Parameters + ---------- + n : int + Order of Bessel function + nt : int + Number of zeros to return + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 5. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + + """ + return jnyn_zeros(n, nt)[2] + + +def ynp_zeros(n, nt): + """Compute zeros of integer-order Bessel function derivative Yn'(x). + + Parameters + ---------- + n : int + Order of Bessel function + nt : int + Number of zeros to return + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 5. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + + """ + return jnyn_zeros(n, nt)[3] + + +def y0_zeros(nt, complex=False): + """Compute nt zeros of Bessel function Y0(z), and derivative at each zero. + + The derivatives are given by Y0'(z0) = -Y1(z0) at each zero z0. + + Parameters + ---------- + nt : int + Number of zeros to return + complex : bool, default False + Set to False to return only the real zeros; set to True to return only + the complex zeros with negative real part and positive imaginary part. + Note that the complex conjugates of the latter are also zeros of the + function, but are not returned by this routine. + + Returns + ------- + z0n : ndarray + Location of nth zero of Y0(z) + y0pz0n : ndarray + Value of derivative Y0'(z0) for nth zero + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 5. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + + """ + if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): + raise ValueError("Arguments must be scalar positive integer.") + kf = 0 + kc = not complex + return specfun.cyzo(nt, kf, kc) + + +def y1_zeros(nt, complex=False): + """Compute nt zeros of Bessel function Y1(z), and derivative at each zero. + + The derivatives are given by Y1'(z1) = Y0(z1) at each zero z1. + + Parameters + ---------- + nt : int + Number of zeros to return + complex : bool, default False + Set to False to return only the real zeros; set to True to return only + the complex zeros with negative real part and positive imaginary part. + Note that the complex conjugates of the latter are also zeros of the + function, but are not returned by this routine. + + Returns + ------- + z1n : ndarray + Location of nth zero of Y1(z) + y1pz1n : ndarray + Value of derivative Y1'(z1) for nth zero + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 5. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + + """ + if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): + raise ValueError("Arguments must be scalar positive integer.") + kf = 1 + kc = not complex + return specfun.cyzo(nt, kf, kc) + + +def y1p_zeros(nt, complex=False): + """Compute nt zeros of Bessel derivative Y1'(z), and value at each zero. + + The values are given by Y1(z1) at each z1 where Y1'(z1)=0. + + Parameters + ---------- + nt : int + Number of zeros to return + complex : bool, default False + Set to False to return only the real zeros; set to True to return only + the complex zeros with negative real part and positive imaginary part. + Note that the complex conjugates of the latter are also zeros of the + function, but are not returned by this routine. + + Returns + ------- + z1pn : ndarray + Location of nth zero of Y1'(z) + y1z1pn : ndarray + Value of derivative Y1(z1) for nth zero + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 5. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + + """ + if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): + raise ValueError("Arguments must be scalar positive integer.") + kf = 2 + kc = not complex + return specfun.cyzo(nt, kf, kc) + + +def _bessel_diff_formula(v, z, n, L, phase): + # from AMS55. + # L(v, z) = J(v, z), Y(v, z), H1(v, z), H2(v, z), phase = -1 + # L(v, z) = I(v, z) or exp(v*pi*i)K(v, z), phase = 1 + # For K, you can pull out the exp((v-k)*pi*i) into the caller + v = asarray(v) + p = 1.0 + s = L(v-n, z) + for i in xrange(1, n+1): + p = phase * (p * (n-i+1)) / i # = choose(k, i) + s += p*L(v-n + i*2, z) + return s / (2.**n) + + +bessel_diff_formula = np.deprecate(_bessel_diff_formula, + message="bessel_diff_formula is a private function, do not use it!") + + +def jvp(v, z, n=1): + """Compute nth derivative of Bessel function Jv(z) with respect to `z`. + + Parameters + ---------- + v : float + Order of Bessel function + z : complex + Argument at which to evaluate the derivative + n : int, default 1 + Order of derivative + + Notes + ----- + The derivative is computed using the relation DLFM 10.6.7 [2]_. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 5. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + .. [2] NIST Digital Library of Mathematical Functions. + https://dlmf.nist.gov/10.6.E7 + + """ + n = _nonneg_int_or_fail(n, 'n') + if n == 0: + return jv(v, z) + else: + return _bessel_diff_formula(v, z, n, jv, -1) + + +def yvp(v, z, n=1): + """Compute nth derivative of Bessel function Yv(z) with respect to `z`. + + Parameters + ---------- + v : float + Order of Bessel function + z : complex + Argument at which to evaluate the derivative + n : int, default 1 + Order of derivative + + Notes + ----- + The derivative is computed using the relation DLFM 10.6.7 [2]_. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 5. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + .. [2] NIST Digital Library of Mathematical Functions. + https://dlmf.nist.gov/10.6.E7 + + """ + n = _nonneg_int_or_fail(n, 'n') + if n == 0: + return yv(v, z) + else: + return _bessel_diff_formula(v, z, n, yv, -1) + + +def kvp(v, z, n=1): + """Compute nth derivative of real-order modified Bessel function Kv(z) + + Kv(z) is the modified Bessel function of the second kind. + Derivative is calculated with respect to `z`. + + Parameters + ---------- + v : array_like of float + Order of Bessel function + z : array_like of complex + Argument at which to evaluate the derivative + n : int + Order of derivative. Default is first derivative. + + Returns + ------- + out : ndarray + The results + + Examples + -------- + Calculate multiple values at order 5: + + >>> from scipy.special import kvp + >>> kvp(5, (1, 2, 3+5j)) + array([-1.84903536e+03+0.j , -2.57735387e+01+0.j , + -3.06627741e-02+0.08750845j]) + + + Calculate for a single value at multiple orders: + + >>> kvp((4, 4.5, 5), 1) + array([ -184.0309, -568.9585, -1849.0354]) + + Notes + ----- + The derivative is computed using the relation DLFM 10.29.5 [2]_. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 6. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + .. [2] NIST Digital Library of Mathematical Functions. + https://dlmf.nist.gov/10.29.E5 + + """ + n = _nonneg_int_or_fail(n, 'n') + if n == 0: + return kv(v, z) + else: + return (-1)**n * _bessel_diff_formula(v, z, n, kv, 1) + + +def ivp(v, z, n=1): + """Compute nth derivative of modified Bessel function Iv(z) with respect + to `z`. + + Parameters + ---------- + v : array_like of float + Order of Bessel function + z : array_like of complex + Argument at which to evaluate the derivative + n : int, default 1 + Order of derivative + + Notes + ----- + The derivative is computed using the relation DLFM 10.29.5 [2]_. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 6. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + .. [2] NIST Digital Library of Mathematical Functions. + https://dlmf.nist.gov/10.29.E5 + + """ + n = _nonneg_int_or_fail(n, 'n') + if n == 0: + return iv(v, z) + else: + return _bessel_diff_formula(v, z, n, iv, 1) + + +def h1vp(v, z, n=1): + """Compute nth derivative of Hankel function H1v(z) with respect to `z`. + + Parameters + ---------- + v : float + Order of Hankel function + z : complex + Argument at which to evaluate the derivative + n : int, default 1 + Order of derivative + + Notes + ----- + The derivative is computed using the relation DLFM 10.6.7 [2]_. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 5. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + .. [2] NIST Digital Library of Mathematical Functions. + https://dlmf.nist.gov/10.6.E7 + + """ + n = _nonneg_int_or_fail(n, 'n') + if n == 0: + return hankel1(v, z) + else: + return _bessel_diff_formula(v, z, n, hankel1, -1) + + +def h2vp(v, z, n=1): + """Compute nth derivative of Hankel function H2v(z) with respect to `z`. + + Parameters + ---------- + v : float + Order of Hankel function + z : complex + Argument at which to evaluate the derivative + n : int, default 1 + Order of derivative + + Notes + ----- + The derivative is computed using the relation DLFM 10.6.7 [2]_. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 5. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + .. [2] NIST Digital Library of Mathematical Functions. + https://dlmf.nist.gov/10.6.E7 + + """ + n = _nonneg_int_or_fail(n, 'n') + if n == 0: + return hankel2(v, z) + else: + return _bessel_diff_formula(v, z, n, hankel2, -1) + + +def riccati_jn(n, x): + r"""Compute Ricatti-Bessel function of the first kind and its derivative. + + The Ricatti-Bessel function of the first kind is defined as :math:`x + j_n(x)`, where :math:`j_n` is the spherical Bessel function of the first + kind of order :math:`n`. + + This function computes the value and first derivative of the + Ricatti-Bessel function for all orders up to and including `n`. + + Parameters + ---------- + n : int + Maximum order of function to compute + x : float + Argument at which to evaluate + + Returns + ------- + jn : ndarray + Value of j0(x), ..., jn(x) + jnp : ndarray + First derivative j0'(x), ..., jn'(x) + + Notes + ----- + The computation is carried out via backward recurrence, using the + relation DLMF 10.51.1 [2]_. + + Wrapper for a Fortran routine created by Shanjie Zhang and Jianming + Jin [1]_. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + .. [2] NIST Digital Library of Mathematical Functions. + https://dlmf.nist.gov/10.51.E1 + + """ + if not (isscalar(n) and isscalar(x)): + raise ValueError("arguments must be scalars.") + n = _nonneg_int_or_fail(n, 'n', strict=False) + if (n == 0): + n1 = 1 + else: + n1 = n + nm, jn, jnp = specfun.rctj(n1, x) + return jn[:(n+1)], jnp[:(n+1)] + + +def riccati_yn(n, x): + """Compute Ricatti-Bessel function of the second kind and its derivative. + + The Ricatti-Bessel function of the second kind is defined as :math:`x + y_n(x)`, where :math:`y_n` is the spherical Bessel function of the second + kind of order :math:`n`. + + This function computes the value and first derivative of the function for + all orders up to and including `n`. + + Parameters + ---------- + n : int + Maximum order of function to compute + x : float + Argument at which to evaluate + + Returns + ------- + yn : ndarray + Value of y0(x), ..., yn(x) + ynp : ndarray + First derivative y0'(x), ..., yn'(x) + + Notes + ----- + The computation is carried out via ascending recurrence, using the + relation DLMF 10.51.1 [2]_. + + Wrapper for a Fortran routine created by Shanjie Zhang and Jianming + Jin [1]_. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + .. [2] NIST Digital Library of Mathematical Functions. + https://dlmf.nist.gov/10.51.E1 + + """ + if not (isscalar(n) and isscalar(x)): + raise ValueError("arguments must be scalars.") + n = _nonneg_int_or_fail(n, 'n', strict=False) + if (n == 0): + n1 = 1 + else: + n1 = n + nm, jn, jnp = specfun.rcty(n1, x) + return jn[:(n+1)], jnp[:(n+1)] + + +def erfinv(y): + """Inverse of the error function erf. + + Computes the inverse of the error function. + + In complex domain, there is no unique complex number w satisfying erf(w)=z. + This indicates a true inverse function would have multi-value. When the domain restricts to the real, -1 < x < 1, + there is a unique real number satisfying erf(erfinv(x)) = x. + + Parameters + ---------- + y : ndarray + Argument at which to evaluate. Domain: [-1, 1] + + Returns + ------- + erfinv : ndarray + The inverse of erf of y, element-wise + + Examples + -------- + 1) evaluating a float number + + >>> from scipy import special + >>> special.erfinv(0.5) + 0.4769362762044698 + + 2) evaluating a ndarray + + >>> from scipy import special + >>> y = np.linspace(-1.0, 1.0, num=10) + >>> special.erfinv(y) + array([ -inf, -0.86312307, -0.5407314 , -0.30457019, -0.0987901 , + 0.0987901 , 0.30457019, 0.5407314 , 0.86312307, inf]) + + """ + return ndtri((y+1)/2.0)/sqrt(2) + + +def erfcinv(y): + """Inverse of the complementary error function erfc. + + Computes the inverse of the complementary error function erfc. + + In complex domain, there is no unique complex number w satisfying erfc(w)=z. + This indicates a true inverse function would have multi-value. When the domain restricts to the real, 0 < x < 2, + there is a unique real number satisfying erfc(erfcinv(x)) = erfcinv(erfc(x)). + + It is related to inverse of the error function by erfcinv(1-x) = erfinv(x) + + Parameters + ---------- + y : ndarray + Argument at which to evaluate. Domain: [0, 2] + + Returns + ------- + erfcinv : ndarray + The inverse of erfc of y, element-wise + + Examples + -------- + 1) evaluating a float number + + >>> from scipy import special + >>> special.erfcinv(0.5) + 0.4769362762044698 + + 2) evaluating a ndarray + + >>> from scipy import special + >>> y = np.linspace(0.0, 2.0, num=11) + >>> special.erfcinv(y) + array([ inf, 0.9061938 , 0.59511608, 0.37080716, 0.17914345, + -0. , -0.17914345, -0.37080716, -0.59511608, -0.9061938 , + -inf]) + + """ + return -ndtri(0.5*y)/sqrt(2) + + +def erf_zeros(nt): + """Compute the first nt zero in the first quadrant, ordered by absolute value. + + Zeros in the other quadrants can be obtained by using the symmetries erf(-z) = erf(z) and + erf(conj(z)) = conj(erf(z)). + + + Parameters + ---------- + nt : int + The number of zeros to compute + + Returns + ------- + The locations of the zeros of erf : ndarray (complex) + Complex values at which zeros of erf(z) + + Examples + -------- + >>> from scipy import special + >>> special.erf_zeros(1) + array([1.45061616+1.880943j]) + + Check that erf is (close to) zero for the value returned by erf_zeros + + >>> special.erf(special.erf_zeros(1)) + array([4.95159469e-14-1.16407394e-16j]) + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + + """ + if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt): + raise ValueError("Argument must be positive scalar integer.") + return specfun.cerzo(nt) + + +def fresnelc_zeros(nt): + """Compute nt complex zeros of cosine Fresnel integral C(z). + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + + """ + if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt): + raise ValueError("Argument must be positive scalar integer.") + return specfun.fcszo(1, nt) + + +def fresnels_zeros(nt): + """Compute nt complex zeros of sine Fresnel integral S(z). + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + + """ + if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt): + raise ValueError("Argument must be positive scalar integer.") + return specfun.fcszo(2, nt) + + +def fresnel_zeros(nt): + """Compute nt complex zeros of sine and cosine Fresnel integrals S(z) and C(z). + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + + """ + if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt): + raise ValueError("Argument must be positive scalar integer.") + return specfun.fcszo(2, nt), specfun.fcszo(1, nt) + + +def assoc_laguerre(x, n, k=0.0): + """Compute the generalized (associated) Laguerre polynomial of degree n and order k. + + The polynomial :math:`L^{(k)}_n(x)` is orthogonal over ``[0, inf)``, + with weighting function ``exp(-x) * x**k`` with ``k > -1``. + + Notes + ----- + `assoc_laguerre` is a simple wrapper around `eval_genlaguerre`, with + reversed argument order ``(x, n, k=0.0) --> (n, k, x)``. + + """ + return orthogonal.eval_genlaguerre(n, k, x) + + +digamma = psi + + +def polygamma(n, x): + """Polygamma function n. + + This is the nth derivative of the digamma (psi) function. + + Parameters + ---------- + n : array_like of int + The order of the derivative of `psi`. + x : array_like + Where to evaluate the polygamma function. + + Returns + ------- + polygamma : ndarray + The result. + + Examples + -------- + >>> from scipy import special + >>> x = [2, 3, 25.5] + >>> special.polygamma(1, x) + array([ 0.64493407, 0.39493407, 0.03999467]) + >>> special.polygamma(0, x) == special.psi(x) + array([ True, True, True], dtype=bool) + + """ + n, x = asarray(n), asarray(x) + fac2 = (-1.0)**(n+1) * gamma(n+1.0) * zeta(n+1, x) + return where(n == 0, psi(x), fac2) + + +def mathieu_even_coef(m, q): + r"""Fourier coefficients for even Mathieu and modified Mathieu functions. + + The Fourier series of the even solutions of the Mathieu differential + equation are of the form + + .. math:: \mathrm{ce}_{2n}(z, q) = \sum_{k=0}^{\infty} A_{(2n)}^{(2k)} \cos 2kz + + .. math:: \mathrm{ce}_{2n+1}(z, q) = \sum_{k=0}^{\infty} A_{(2n+1)}^{(2k+1)} \cos (2k+1)z + + This function returns the coefficients :math:`A_{(2n)}^{(2k)}` for even + input m=2n, and the coefficients :math:`A_{(2n+1)}^{(2k+1)}` for odd input + m=2n+1. + + Parameters + ---------- + m : int + Order of Mathieu functions. Must be non-negative. + q : float (>=0) + Parameter of Mathieu functions. Must be non-negative. + + Returns + ------- + Ak : ndarray + Even or odd Fourier coefficients, corresponding to even or odd m. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + .. [2] NIST Digital Library of Mathematical Functions + https://dlmf.nist.gov/28.4#i + + """ + if not (isscalar(m) and isscalar(q)): + raise ValueError("m and q must be scalars.") + if (q < 0): + raise ValueError("q >=0") + if (m != floor(m)) or (m < 0): + raise ValueError("m must be an integer >=0.") + + if (q <= 1): + qm = 7.5 + 56.1*sqrt(q) - 134.7*q + 90.7*sqrt(q)*q + else: + qm = 17.0 + 3.1*sqrt(q) - .126*q + .0037*sqrt(q)*q + km = int(qm + 0.5*m) + if km > 251: + print("Warning, too many predicted coefficients.") + kd = 1 + m = int(floor(m)) + if m % 2: + kd = 2 + + a = mathieu_a(m, q) + fc = specfun.fcoef(kd, m, q, a) + return fc[:km] + + +def mathieu_odd_coef(m, q): + r"""Fourier coefficients for even Mathieu and modified Mathieu functions. + + The Fourier series of the odd solutions of the Mathieu differential + equation are of the form + + .. math:: \mathrm{se}_{2n+1}(z, q) = \sum_{k=0}^{\infty} B_{(2n+1)}^{(2k+1)} \sin (2k+1)z + + .. math:: \mathrm{se}_{2n+2}(z, q) = \sum_{k=0}^{\infty} B_{(2n+2)}^{(2k+2)} \sin (2k+2)z + + This function returns the coefficients :math:`B_{(2n+2)}^{(2k+2)}` for even + input m=2n+2, and the coefficients :math:`B_{(2n+1)}^{(2k+1)}` for odd + input m=2n+1. + + Parameters + ---------- + m : int + Order of Mathieu functions. Must be non-negative. + q : float (>=0) + Parameter of Mathieu functions. Must be non-negative. + + Returns + ------- + Bk : ndarray + Even or odd Fourier coefficients, corresponding to even or odd m. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + + """ + if not (isscalar(m) and isscalar(q)): + raise ValueError("m and q must be scalars.") + if (q < 0): + raise ValueError("q >=0") + if (m != floor(m)) or (m <= 0): + raise ValueError("m must be an integer > 0") + + if (q <= 1): + qm = 7.5 + 56.1*sqrt(q) - 134.7*q + 90.7*sqrt(q)*q + else: + qm = 17.0 + 3.1*sqrt(q) - .126*q + .0037*sqrt(q)*q + km = int(qm + 0.5*m) + if km > 251: + print("Warning, too many predicted coefficients.") + kd = 4 + m = int(floor(m)) + if m % 2: + kd = 3 + + b = mathieu_b(m, q) + fc = specfun.fcoef(kd, m, q, b) + return fc[:km] + + +def lpmn(m, n, z): + """Sequence of associated Legendre functions of the first kind. + + Computes the associated Legendre function of the first kind of order m and + degree n, ``Pmn(z)`` = :math:`P_n^m(z)`, and its derivative, ``Pmn'(z)``. + Returns two arrays of size ``(m+1, n+1)`` containing ``Pmn(z)`` and + ``Pmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``. + + This function takes a real argument ``z``. For complex arguments ``z`` + use clpmn instead. + + Parameters + ---------- + m : int + ``|m| <= n``; the order of the Legendre function. + n : int + where ``n >= 0``; the degree of the Legendre function. Often + called ``l`` (lower case L) in descriptions of the associated + Legendre function + z : float + Input value. + + Returns + ------- + Pmn_z : (m+1, n+1) array + Values for all orders 0..m and degrees 0..n + Pmn_d_z : (m+1, n+1) array + Derivatives for all orders 0..m and degrees 0..n + + See Also + -------- + clpmn: associated Legendre functions of the first kind for complex z + + Notes + ----- + In the interval (-1, 1), Ferrer's function of the first kind is + returned. The phase convention used for the intervals (1, inf) + and (-inf, -1) is such that the result is always real. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + .. [2] NIST Digital Library of Mathematical Functions + https://dlmf.nist.gov/14.3 + + """ + if not isscalar(m) or (abs(m) > n): + raise ValueError("m must be <= n.") + if not isscalar(n) or (n < 0): + raise ValueError("n must be a non-negative integer.") + if not isscalar(z): + raise ValueError("z must be scalar.") + if iscomplex(z): + raise ValueError("Argument must be real. Use clpmn instead.") + if (m < 0): + mp = -m + mf, nf = mgrid[0:mp+1, 0:n+1] + with ufuncs.errstate(all='ignore'): + if abs(z) < 1: + # Ferrer function; DLMF 14.9.3 + fixarr = where(mf > nf, 0.0, + (-1)**mf * gamma(nf-mf+1) / gamma(nf+mf+1)) + else: + # Match to clpmn; DLMF 14.9.13 + fixarr = where(mf > nf, 0.0, gamma(nf-mf+1) / gamma(nf+mf+1)) + else: + mp = m + p, pd = specfun.lpmn(mp, n, z) + if (m < 0): + p = p * fixarr + pd = pd * fixarr + return p, pd + + +def clpmn(m, n, z, type=3): + """Associated Legendre function of the first kind for complex arguments. + + Computes the associated Legendre function of the first kind of order m and + degree n, ``Pmn(z)`` = :math:`P_n^m(z)`, and its derivative, ``Pmn'(z)``. + Returns two arrays of size ``(m+1, n+1)`` containing ``Pmn(z)`` and + ``Pmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``. + + Parameters + ---------- + m : int + ``|m| <= n``; the order of the Legendre function. + n : int + where ``n >= 0``; the degree of the Legendre function. Often + called ``l`` (lower case L) in descriptions of the associated + Legendre function + z : float or complex + Input value. + type : int, optional + takes values 2 or 3 + 2: cut on the real axis ``|x| > 1`` + 3: cut on the real axis ``-1 < x < 1`` (default) + + Returns + ------- + Pmn_z : (m+1, n+1) array + Values for all orders ``0..m`` and degrees ``0..n`` + Pmn_d_z : (m+1, n+1) array + Derivatives for all orders ``0..m`` and degrees ``0..n`` + + See Also + -------- + lpmn: associated Legendre functions of the first kind for real z + + Notes + ----- + By default, i.e. for ``type=3``, phase conventions are chosen according + to [1]_ such that the function is analytic. The cut lies on the interval + (-1, 1). Approaching the cut from above or below in general yields a phase + factor with respect to Ferrer's function of the first kind + (cf. `lpmn`). + + For ``type=2`` a cut at ``|x| > 1`` is chosen. Approaching the real values + on the interval (-1, 1) in the complex plane yields Ferrer's function + of the first kind. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + .. [2] NIST Digital Library of Mathematical Functions + https://dlmf.nist.gov/14.21 + + """ + if not isscalar(m) or (abs(m) > n): + raise ValueError("m must be <= n.") + if not isscalar(n) or (n < 0): + raise ValueError("n must be a non-negative integer.") + if not isscalar(z): + raise ValueError("z must be scalar.") + if not(type == 2 or type == 3): + raise ValueError("type must be either 2 or 3.") + if (m < 0): + mp = -m + mf, nf = mgrid[0:mp+1, 0:n+1] + with ufuncs.errstate(all='ignore'): + if type == 2: + fixarr = where(mf > nf, 0.0, + (-1)**mf * gamma(nf-mf+1) / gamma(nf+mf+1)) + else: + fixarr = where(mf > nf, 0.0, gamma(nf-mf+1) / gamma(nf+mf+1)) + else: + mp = m + p, pd = specfun.clpmn(mp, n, real(z), imag(z), type) + if (m < 0): + p = p * fixarr + pd = pd * fixarr + return p, pd + + +def lqmn(m, n, z): + """Sequence of associated Legendre functions of the second kind. + + Computes the associated Legendre function of the second kind of order m and + degree n, ``Qmn(z)`` = :math:`Q_n^m(z)`, and its derivative, ``Qmn'(z)``. + Returns two arrays of size ``(m+1, n+1)`` containing ``Qmn(z)`` and + ``Qmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``. + + Parameters + ---------- + m : int + ``|m| <= n``; the order of the Legendre function. + n : int + where ``n >= 0``; the degree of the Legendre function. Often + called ``l`` (lower case L) in descriptions of the associated + Legendre function + z : complex + Input value. + + Returns + ------- + Qmn_z : (m+1, n+1) array + Values for all orders 0..m and degrees 0..n + Qmn_d_z : (m+1, n+1) array + Derivatives for all orders 0..m and degrees 0..n + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + + """ + if not isscalar(m) or (m < 0): + raise ValueError("m must be a non-negative integer.") + if not isscalar(n) or (n < 0): + raise ValueError("n must be a non-negative integer.") + if not isscalar(z): + raise ValueError("z must be scalar.") + m = int(m) + n = int(n) + + # Ensure neither m nor n == 0 + mm = max(1, m) + nn = max(1, n) + + if iscomplex(z): + q, qd = specfun.clqmn(mm, nn, z) + else: + q, qd = specfun.lqmn(mm, nn, z) + return q[:(m+1), :(n+1)], qd[:(m+1), :(n+1)] + + +def bernoulli(n): + """Bernoulli numbers B0..Bn (inclusive). + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + + """ + if not isscalar(n) or (n < 0): + raise ValueError("n must be a non-negative integer.") + n = int(n) + if (n < 2): + n1 = 2 + else: + n1 = n + return specfun.bernob(int(n1))[:(n+1)] + + +def euler(n): + """Euler numbers E(0), E(1), ..., E(n). + + The Euler numbers [1]_ are also known as the secant numbers. + + Because ``euler(n)`` returns floating point values, it does not give + exact values for large `n`. The first inexact value is E(22). + + Parameters + ---------- + n : int + The highest index of the Euler number to be returned. + + Returns + ------- + ndarray + The Euler numbers [E(0), E(1), ..., E(n)]. + The odd Euler numbers, which are all zero, are included. + + References + ---------- + .. [1] Sequence A122045, The On-Line Encyclopedia of Integer Sequences, + https://oeis.org/A122045 + .. [2] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + + Examples + -------- + >>> from scipy.special import euler + >>> euler(6) + array([ 1., 0., -1., 0., 5., 0., -61.]) + + >>> euler(13).astype(np.int64) + array([ 1, 0, -1, 0, 5, 0, -61, + 0, 1385, 0, -50521, 0, 2702765, 0]) + + >>> euler(22)[-1] # Exact value of E(22) is -69348874393137901. + -69348874393137976.0 + + """ + if not isscalar(n) or (n < 0): + raise ValueError("n must be a non-negative integer.") + n = int(n) + if (n < 2): + n1 = 2 + else: + n1 = n + return specfun.eulerb(n1)[:(n+1)] + + +def lpn(n, z): + """Legendre function of the first kind. + + Compute sequence of Legendre functions of the first kind (polynomials), + Pn(z) and derivatives for all degrees from 0 to n (inclusive). + + See also special.legendre for polynomial class. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + + """ + if not (isscalar(n) and isscalar(z)): + raise ValueError("arguments must be scalars.") + n = _nonneg_int_or_fail(n, 'n', strict=False) + if (n < 1): + n1 = 1 + else: + n1 = n + if iscomplex(z): + pn, pd = specfun.clpn(n1, z) + else: + pn, pd = specfun.lpn(n1, z) + return pn[:(n+1)], pd[:(n+1)] + + +def lqn(n, z): + """Legendre function of the second kind. + + Compute sequence of Legendre functions of the second kind, Qn(z) and + derivatives for all degrees from 0 to n (inclusive). + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + + """ + if not (isscalar(n) and isscalar(z)): + raise ValueError("arguments must be scalars.") + n = _nonneg_int_or_fail(n, 'n', strict=False) + if (n < 1): + n1 = 1 + else: + n1 = n + if iscomplex(z): + qn, qd = specfun.clqn(n1, z) + else: + qn, qd = specfun.lqnb(n1, z) + return qn[:(n+1)], qd[:(n+1)] + + +def ai_zeros(nt): + """ + Compute `nt` zeros and values of the Airy function Ai and its derivative. + + Computes the first `nt` zeros, `a`, of the Airy function Ai(x); + first `nt` zeros, `ap`, of the derivative of the Airy function Ai'(x); + the corresponding values Ai(a'); + and the corresponding values Ai'(a). + + Parameters + ---------- + nt : int + Number of zeros to compute + + Returns + ------- + a : ndarray + First `nt` zeros of Ai(x) + ap : ndarray + First `nt` zeros of Ai'(x) + ai : ndarray + Values of Ai(x) evaluated at first `nt` zeros of Ai'(x) + aip : ndarray + Values of Ai'(x) evaluated at first `nt` zeros of Ai(x) + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + + """ + kf = 1 + if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): + raise ValueError("nt must be a positive integer scalar.") + return specfun.airyzo(nt, kf) + + +def bi_zeros(nt): + """ + Compute `nt` zeros and values of the Airy function Bi and its derivative. + + Computes the first `nt` zeros, b, of the Airy function Bi(x); + first `nt` zeros, b', of the derivative of the Airy function Bi'(x); + the corresponding values Bi(b'); + and the corresponding values Bi'(b). + + Parameters + ---------- + nt : int + Number of zeros to compute + + Returns + ------- + b : ndarray + First `nt` zeros of Bi(x) + bp : ndarray + First `nt` zeros of Bi'(x) + bi : ndarray + Values of Bi(x) evaluated at first `nt` zeros of Bi'(x) + bip : ndarray + Values of Bi'(x) evaluated at first `nt` zeros of Bi(x) + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + + """ + kf = 2 + if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): + raise ValueError("nt must be a positive integer scalar.") + return specfun.airyzo(nt, kf) + + +def lmbda(v, x): + r"""Jahnke-Emden Lambda function, Lambdav(x). + + This function is defined as [2]_, + + .. math:: \Lambda_v(x) = \Gamma(v+1) \frac{J_v(x)}{(x/2)^v}, + + where :math:`\Gamma` is the gamma function and :math:`J_v` is the + Bessel function of the first kind. + + Parameters + ---------- + v : float + Order of the Lambda function + x : float + Value at which to evaluate the function and derivatives + + Returns + ------- + vl : ndarray + Values of Lambda_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v. + dl : ndarray + Derivatives Lambda_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + .. [2] Jahnke, E. and Emde, F. "Tables of Functions with Formulae and + Curves" (4th ed.), Dover, 1945 + """ + if not (isscalar(v) and isscalar(x)): + raise ValueError("arguments must be scalars.") + if (v < 0): + raise ValueError("argument must be > 0.") + n = int(v) + v0 = v - n + if (n < 1): + n1 = 1 + else: + n1 = n + v1 = n1 + v0 + if (v != floor(v)): + vm, vl, dl = specfun.lamv(v1, x) + else: + vm, vl, dl = specfun.lamn(v1, x) + return vl[:(n+1)], dl[:(n+1)] + + +def pbdv_seq(v, x): + """Parabolic cylinder functions Dv(x) and derivatives. + + Parameters + ---------- + v : float + Order of the parabolic cylinder function + x : float + Value at which to evaluate the function and derivatives + + Returns + ------- + dv : ndarray + Values of D_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v. + dp : ndarray + Derivatives D_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 13. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + + """ + if not (isscalar(v) and isscalar(x)): + raise ValueError("arguments must be scalars.") + n = int(v) + v0 = v-n + if (n < 1): + n1 = 1 + else: + n1 = n + v1 = n1 + v0 + dv, dp, pdf, pdd = specfun.pbdv(v1, x) + return dv[:n1+1], dp[:n1+1] + + +def pbvv_seq(v, x): + """Parabolic cylinder functions Vv(x) and derivatives. + + Parameters + ---------- + v : float + Order of the parabolic cylinder function + x : float + Value at which to evaluate the function and derivatives + + Returns + ------- + dv : ndarray + Values of V_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v. + dp : ndarray + Derivatives V_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 13. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + + """ + if not (isscalar(v) and isscalar(x)): + raise ValueError("arguments must be scalars.") + n = int(v) + v0 = v-n + if (n <= 1): + n1 = 1 + else: + n1 = n + v1 = n1 + v0 + dv, dp, pdf, pdd = specfun.pbvv(v1, x) + return dv[:n1+1], dp[:n1+1] + + +def pbdn_seq(n, z): + """Parabolic cylinder functions Dn(z) and derivatives. + + Parameters + ---------- + n : int + Order of the parabolic cylinder function + z : complex + Value at which to evaluate the function and derivatives + + Returns + ------- + dv : ndarray + Values of D_i(z), for i=0, ..., i=n. + dp : ndarray + Derivatives D_i'(z), for i=0, ..., i=n. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 13. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + + """ + if not (isscalar(n) and isscalar(z)): + raise ValueError("arguments must be scalars.") + if (floor(n) != n): + raise ValueError("n must be an integer.") + if (abs(n) <= 1): + n1 = 1 + else: + n1 = n + cpb, cpd = specfun.cpbdn(n1, z) + return cpb[:n1+1], cpd[:n1+1] + + +def ber_zeros(nt): + """Compute nt zeros of the Kelvin function ber(x). + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + + """ + if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): + raise ValueError("nt must be positive integer scalar.") + return specfun.klvnzo(nt, 1) + + +def bei_zeros(nt): + """Compute nt zeros of the Kelvin function bei(x). + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + + """ + if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): + raise ValueError("nt must be positive integer scalar.") + return specfun.klvnzo(nt, 2) + + +def ker_zeros(nt): + """Compute nt zeros of the Kelvin function ker(x). + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + + """ + if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): + raise ValueError("nt must be positive integer scalar.") + return specfun.klvnzo(nt, 3) + + +def kei_zeros(nt): + """Compute nt zeros of the Kelvin function kei(x). + """ + if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): + raise ValueError("nt must be positive integer scalar.") + return specfun.klvnzo(nt, 4) + + +def berp_zeros(nt): + """Compute nt zeros of the Kelvin function ber'(x). + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + + """ + if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): + raise ValueError("nt must be positive integer scalar.") + return specfun.klvnzo(nt, 5) + + +def beip_zeros(nt): + """Compute nt zeros of the Kelvin function bei'(x). + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + + """ + if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): + raise ValueError("nt must be positive integer scalar.") + return specfun.klvnzo(nt, 6) + + +def kerp_zeros(nt): + """Compute nt zeros of the Kelvin function ker'(x). + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + + """ + if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): + raise ValueError("nt must be positive integer scalar.") + return specfun.klvnzo(nt, 7) + + +def keip_zeros(nt): + """Compute nt zeros of the Kelvin function kei'(x). + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + + """ + if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): + raise ValueError("nt must be positive integer scalar.") + return specfun.klvnzo(nt, 8) + + +def kelvin_zeros(nt): + """Compute nt zeros of all Kelvin functions. + + Returned in a length-8 tuple of arrays of length nt. The tuple contains + the arrays of zeros of (ber, bei, ker, kei, ber', bei', ker', kei'). + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + + """ + if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): + raise ValueError("nt must be positive integer scalar.") + return (specfun.klvnzo(nt, 1), + specfun.klvnzo(nt, 2), + specfun.klvnzo(nt, 3), + specfun.klvnzo(nt, 4), + specfun.klvnzo(nt, 5), + specfun.klvnzo(nt, 6), + specfun.klvnzo(nt, 7), + specfun.klvnzo(nt, 8)) + + +def pro_cv_seq(m, n, c): + """Characteristic values for prolate spheroidal wave functions. + + Compute a sequence of characteristic values for the prolate + spheroidal wave functions for mode m and n'=m..n and spheroidal + parameter c. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + + """ + if not (isscalar(m) and isscalar(n) and isscalar(c)): + raise ValueError("Arguments must be scalars.") + if (n != floor(n)) or (m != floor(m)): + raise ValueError("Modes must be integers.") + if (n-m > 199): + raise ValueError("Difference between n and m is too large.") + maxL = n-m+1 + return specfun.segv(m, n, c, 1)[1][:maxL] + + +def obl_cv_seq(m, n, c): + """Characteristic values for oblate spheroidal wave functions. + + Compute a sequence of characteristic values for the oblate + spheroidal wave functions for mode m and n'=m..n and spheroidal + parameter c. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + + """ + if not (isscalar(m) and isscalar(n) and isscalar(c)): + raise ValueError("Arguments must be scalars.") + if (n != floor(n)) or (m != floor(m)): + raise ValueError("Modes must be integers.") + if (n-m > 199): + raise ValueError("Difference between n and m is too large.") + maxL = n-m+1 + return specfun.segv(m, n, c, -1)[1][:maxL] + + +def ellipk(m): + r"""Complete elliptic integral of the first kind. + + This function is defined as + + .. math:: K(m) = \int_0^{\pi/2} [1 - m \sin(t)^2]^{-1/2} dt + + Parameters + ---------- + m : array_like + The parameter of the elliptic integral. + + Returns + ------- + K : array_like + Value of the elliptic integral. + + Notes + ----- + For more precision around point m = 1, use `ellipkm1`, which this + function calls. + + The parameterization in terms of :math:`m` follows that of section + 17.2 in [1]_. Other parameterizations in terms of the + complementary parameter :math:`1 - m`, modular angle + :math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also + used, so be careful that you choose the correct parameter. + + See Also + -------- + ellipkm1 : Complete elliptic integral of the first kind around m = 1 + ellipkinc : Incomplete elliptic integral of the first kind + ellipe : Complete elliptic integral of the second kind + ellipeinc : Incomplete elliptic integral of the second kind + + References + ---------- + .. [1] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + """ + return ellipkm1(1 - asarray(m)) + + +def comb(N, k, exact=False, repetition=False): + """The number of combinations of N things taken k at a time. + + This is often expressed as "N choose k". + + Parameters + ---------- + N : int, ndarray + Number of things. + k : int, ndarray + Number of elements taken. + exact : bool, optional + If `exact` is False, then floating point precision is used, otherwise + exact long integer is computed. + repetition : bool, optional + If `repetition` is True, then the number of combinations with + repetition is computed. + + Returns + ------- + val : int, float, ndarray + The total number of combinations. + + See Also + -------- + binom : Binomial coefficient ufunc + + Notes + ----- + - Array arguments accepted only for exact=False case. + - If k > N, N < 0, or k < 0, then a 0 is returned. + + Examples + -------- + >>> from scipy.special import comb + >>> k = np.array([3, 4]) + >>> n = np.array([10, 10]) + >>> comb(n, k, exact=False) + array([ 120., 210.]) + >>> comb(10, 3, exact=True) + 120L + >>> comb(10, 3, exact=True, repetition=True) + 220L + + """ + if repetition: + return comb(N + k - 1, k, exact) + if exact: + return _comb_int(N, k) + else: + k, N = asarray(k), asarray(N) + cond = (k <= N) & (N >= 0) & (k >= 0) + vals = binom(N, k) + if isinstance(vals, np.ndarray): + vals[~cond] = 0 + elif not cond: + vals = np.float64(0) + return vals + + +def perm(N, k, exact=False): + """Permutations of N things taken k at a time, i.e., k-permutations of N. + + It's also known as "partial permutations". + + Parameters + ---------- + N : int, ndarray + Number of things. + k : int, ndarray + Number of elements taken. + exact : bool, optional + If `exact` is False, then floating point precision is used, otherwise + exact long integer is computed. + + Returns + ------- + val : int, ndarray + The number of k-permutations of N. + + Notes + ----- + - Array arguments accepted only for exact=False case. + - If k > N, N < 0, or k < 0, then a 0 is returned. + + Examples + -------- + >>> from scipy.special import perm + >>> k = np.array([3, 4]) + >>> n = np.array([10, 10]) + >>> perm(n, k) + array([ 720., 5040.]) + >>> perm(10, 3, exact=True) + 720 + + """ + if exact: + if (k > N) or (N < 0) or (k < 0): + return 0 + val = 1 + for i in xrange(N - k + 1, N + 1): + val *= i + return val + else: + k, N = asarray(k), asarray(N) + cond = (k <= N) & (N >= 0) & (k >= 0) + vals = poch(N - k + 1, k) + if isinstance(vals, np.ndarray): + vals[~cond] = 0 + elif not cond: + vals = np.float64(0) + return vals + + +# https://stackoverflow.com/a/16327037 +def _range_prod(lo, hi): + """ + Product of a range of numbers. + + Returns the product of + lo * (lo+1) * (lo+2) * ... * (hi-2) * (hi-1) * hi + = hi! / (lo-1)! + + Breaks into smaller products first for speed: + _range_prod(2, 9) = ((2*3)*(4*5))*((6*7)*(8*9)) + """ + if lo + 1 < hi: + mid = (hi + lo) // 2 + return _range_prod(lo, mid) * _range_prod(mid + 1, hi) + if lo == hi: + return lo + return lo * hi + + +def factorial(n, exact=False): + """ + The factorial of a number or array of numbers. + + The factorial of non-negative integer `n` is the product of all + positive integers less than or equal to `n`:: + + n! = n * (n - 1) * (n - 2) * ... * 1 + + Parameters + ---------- + n : int or array_like of ints + Input values. If ``n < 0``, the return value is 0. + exact : bool, optional + If True, calculate the answer exactly using long integer arithmetic. + If False, result is approximated in floating point rapidly using the + `gamma` function. + Default is False. + + Returns + ------- + nf : float or int or ndarray + Factorial of `n`, as integer or float depending on `exact`. + + Notes + ----- + For arrays with ``exact=True``, the factorial is computed only once, for + the largest input, with each other result computed in the process. + The output dtype is increased to ``int64`` or ``object`` if necessary. + + With ``exact=False`` the factorial is approximated using the gamma + function: + + .. math:: n! = \\Gamma(n+1) + + Examples + -------- + >>> from scipy.special import factorial + >>> arr = np.array([3, 4, 5]) + >>> factorial(arr, exact=False) + array([ 6., 24., 120.]) + >>> factorial(arr, exact=True) + array([ 6, 24, 120]) + >>> factorial(5, exact=True) + 120L + + """ + if exact: + if np.ndim(n) == 0: + return 0 if n < 0 else math.factorial(n) + else: + n = asarray(n) + un = np.unique(n).astype(object) + + # Convert to object array of long ints if np.int can't handle size + if un[-1] > 20: + dt = object + elif un[-1] > 12: + dt = np.int64 + else: + dt = np.int + + out = np.empty_like(n, dtype=dt) + + # Handle invalid/trivial values + un = un[un > 1] + out[n < 2] = 1 + out[n < 0] = 0 + + # Calculate products of each range of numbers + if un.size: + val = math.factorial(un[0]) + out[n == un[0]] = val + for i in xrange(len(un) - 1): + prev = un[i] + 1 + current = un[i + 1] + val *= _range_prod(prev, current) + out[n == current] = val + return out + else: + n = asarray(n) + vals = gamma(n + 1) + return where(n >= 0, vals, 0) + + +def factorial2(n, exact=False): + """Double factorial. + + This is the factorial with every second value skipped. E.g., ``7!! = 7 * 5 + * 3 * 1``. It can be approximated numerically as:: + + n!! = special.gamma(n/2+1)*2**((m+1)/2)/sqrt(pi) n odd + = 2**(n/2) * (n/2)! n even + + Parameters + ---------- + n : int or array_like + Calculate ``n!!``. Arrays are only supported with `exact` set + to False. If ``n < 0``, the return value is 0. + exact : bool, optional + The result can be approximated rapidly using the gamma-formula + above (default). If `exact` is set to True, calculate the + answer exactly using integer arithmetic. + + Returns + ------- + nff : float or int + Double factorial of `n`, as an int or a float depending on + `exact`. + + Examples + -------- + >>> from scipy.special import factorial2 + >>> factorial2(7, exact=False) + array(105.00000000000001) + >>> factorial2(7, exact=True) + 105L + + """ + if exact: + if n < -1: + return 0 + if n <= 0: + return 1 + val = 1 + for k in xrange(n, 0, -2): + val *= k + return val + else: + n = asarray(n) + vals = zeros(n.shape, 'd') + cond1 = (n % 2) & (n >= -1) + cond2 = (1-(n % 2)) & (n >= -1) + oddn = extract(cond1, n) + evenn = extract(cond2, n) + nd2o = oddn / 2.0 + nd2e = evenn / 2.0 + place(vals, cond1, gamma(nd2o + 1) / sqrt(pi) * pow(2.0, nd2o + 0.5)) + place(vals, cond2, gamma(nd2e + 1) * pow(2.0, nd2e)) + return vals + + +def factorialk(n, k, exact=True): + """Multifactorial of n of order k, n(!!...!). + + This is the multifactorial of n skipping k values. For example, + + factorialk(17, 4) = 17!!!! = 17 * 13 * 9 * 5 * 1 + + In particular, for any integer ``n``, we have + + factorialk(n, 1) = factorial(n) + + factorialk(n, 2) = factorial2(n) + + Parameters + ---------- + n : int + Calculate multifactorial. If `n` < 0, the return value is 0. + k : int + Order of multifactorial. + exact : bool, optional + If exact is set to True, calculate the answer exactly using + integer arithmetic. + + Returns + ------- + val : int + Multifactorial of `n`. + + Raises + ------ + NotImplementedError + Raises when exact is False + + Examples + -------- + >>> from scipy.special import factorialk + >>> factorialk(5, 1, exact=True) + 120L + >>> factorialk(5, 3, exact=True) + 10L + + """ + if exact: + if n < 1-k: + return 0 + if n <= 0: + return 1 + val = 1 + for j in xrange(n, 0, -k): + val = val*j + return val + else: + raise NotImplementedError + + +def zeta(x, q=None, out=None): + r""" + Riemann or Hurwitz zeta function. + + Parameters + ---------- + x : array_like of float + Input data, must be real + q : array_like of float, optional + Input data, must be real. Defaults to Riemann zeta. + out : ndarray, optional + Output array for the computed values. + + Returns + ------- + out : array_like + Values of zeta(x). + + Notes + ----- + The two-argument version is the Hurwitz zeta function: + + .. math:: \zeta(x, q) = \sum_{k=0}^{\infty} \frac{1}{(k + q)^x}, + + Riemann zeta function corresponds to ``q = 1``. + + See Also + -------- + zetac + + Examples + -------- + >>> from scipy.special import zeta, polygamma, factorial + + Some specific values: + + >>> zeta(2), np.pi**2/6 + (1.6449340668482266, 1.6449340668482264) + + >>> zeta(4), np.pi**4/90 + (1.0823232337111381, 1.082323233711138) + + Relation to the `polygamma` function: + + >>> m = 3 + >>> x = 1.25 + >>> polygamma(m, x) + array(2.782144009188397) + >>> (-1)**(m+1) * factorial(m) * zeta(m+1, x) + 2.7821440091883969 + + """ + if q is None: + q = 1 + return _zeta(x, q, out) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/basic.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/basic.pyc new file mode 100644 index 0000000..baa89e7 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/basic.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/cython_special.pxd b/project/venv/lib/python2.7/site-packages/scipy/special/cython_special.pxd new file mode 100644 index 0000000..e16ab8d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/cython_special.pxd @@ -0,0 +1,236 @@ +# This file is automatically generated by _generate_pyx.py. +# Do not edit manually! + +ctypedef fused Dd_number_t: + double complex + double + +ctypedef fused dfg_number_t: + double + float + long double + +ctypedef fused dl_number_t: + double + long + +cpdef double nctdtrit(double x0, double x1, double x2) nogil +cdef void shichi(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1) nogil +cpdef double ellipe(double x0) nogil +cpdef Dd_number_t spence(Dd_number_t x0) nogil +cpdef Dd_number_t ndtr(Dd_number_t x0) nogil +cpdef Dd_number_t hyp2f1(double x0, double x1, double x2, Dd_number_t x3) nogil +cpdef double complex wrightomega(double complex x0) nogil +cpdef double gammainccinv(double x0, double x1) nogil +cpdef double nbdtri(dl_number_t x0, dl_number_t x1, double x2) nogil +cpdef double complex sph_harm(dl_number_t x0, dl_number_t x1, double x2, double x3) nogil +cpdef double y0(double x0) nogil +cpdef Dd_number_t eval_sh_jacobi(dl_number_t x0, double x1, double x2, Dd_number_t x3) nogil +cdef void obl_ang1_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) nogil +cpdef Dd_number_t xlog1py(Dd_number_t x0, Dd_number_t x1) nogil +cpdef double chdtriv(double x0, double x1) nogil +cpdef Dd_number_t dawsn(Dd_number_t x0) nogil +cpdef Dd_number_t psi(Dd_number_t x0) nogil +cpdef double nctdtridf(double x0, double x1, double x2) nogil +cpdef double complex hankel1e(double x0, double complex x1) nogil +cpdef double ncfdtr(double x0, double x1, double x2, double x3) nogil +cdef void sici(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1) nogil +cdef void ellipj(double x0, double x1, double *y0, double *y1, double *y2, double *y3) nogil +cpdef Dd_number_t jve(double x0, Dd_number_t x1) nogil +cpdef double ellipeinc(double x0, double x1) nogil +cpdef double pseudo_huber(double x0, double x1) nogil +cpdef double i0e(double x0) nogil +cpdef double stdtridf(double x0, double x1) nogil +cdef void mathieu_sem(double x0, double x1, double x2, double *y0, double *y1) nogil +cpdef double exprel(double x0) nogil +cpdef double chndtridf(double x0, double x1, double x2) nogil +cpdef double pdtrc(dl_number_t x0, double x1) nogil +cpdef double bei(double x0) nogil +cpdef double beip(double x0) nogil +cpdef double gammaincc(double x0, double x1) nogil +cpdef Dd_number_t yv(double x0, Dd_number_t x1) nogil +cpdef double lpmv(double x0, double x1, double x2) nogil +cpdef double mathieu_b(double x0, double x1) nogil +cpdef double nctdtrinc(double x0, double x1, double x2) nogil +cpdef double betainc(double x0, double x1, double x2) nogil +cdef void mathieu_modcem2(double x0, double x1, double x2, double *y0, double *y1) nogil +cdef void iti0k0(double x0, double *y0, double *y1) nogil +cpdef double boxcox(double x0, double x1) nogil +cpdef double kerp(double x0) nogil +cpdef double tandg(double x0) nogil +cpdef double complex hankel2(double x0, double complex x1) nogil +cpdef double complex hankel1(double x0, double complex x1) nogil +cpdef double chdtr(double x0, double x1) nogil +cpdef double radian(double x0, double x1, double x2) nogil +cpdef double betaincinv(double x0, double x1, double x2) nogil +cpdef double sindg(double x0) nogil +cpdef Dd_number_t eval_jacobi(dl_number_t x0, double x1, double x2, Dd_number_t x3) nogil +cpdef double btdtrib(double x0, double x1, double x2) nogil +cpdef Dd_number_t expi(Dd_number_t x0) nogil +cdef void obl_rad2_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) nogil +cpdef Dd_number_t eval_genlaguerre(dl_number_t x0, double x1, Dd_number_t x2) nogil +cpdef double kolmogorov(double x0) nogil +cpdef double j0(double x0) nogil +cpdef double j1(double x0) nogil +cdef void pro_ang1(double x0, double x1, double x2, double x3, double *y0, double *y1) nogil +cpdef double y1(double x0) nogil +cpdef double entr(double x0) nogil +cdef void airye(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1, Dd_number_t *y2, Dd_number_t *y3) nogil +cpdef Dd_number_t loggamma(Dd_number_t x0) nogil +cpdef double struve(double x0, double x1) nogil +cpdef Dd_number_t erfcx(Dd_number_t x0) nogil +cpdef double inv_boxcox1p(double x0, double x1) nogil +cpdef double pdtri(dl_number_t x0, double x1) nogil +cpdef Dd_number_t rgamma(Dd_number_t x0) nogil +cpdef double poch(double x0, double x1) nogil +cdef void kelvin(double x0, double complex *y0, double complex *y1, double complex *y2, double complex *y3) nogil +cpdef double mathieu_a(double x0, double x1) nogil +cpdef double ndtri(double x0) nogil +cdef void it2i0k0(double x0, double *y0, double *y1) nogil +cpdef double zetac(double x0) nogil +cpdef double kolmogi(double x0) nogil +cpdef double gammaln(double x0) nogil +cpdef double modstruve(double x0, double x1) nogil +cpdef double kei(double x0) nogil +cpdef double huber(double x0, double x1) nogil +cpdef Dd_number_t jv(double x0, Dd_number_t x1) nogil +cpdef double chndtrix(double x0, double x1, double x2) nogil +cpdef double pdtr(dl_number_t x0, double x1) nogil +cpdef double gdtrc(double x0, double x1, double x2) nogil +cpdef double fdtrc(double x0, double x1, double x2) nogil +cpdef double btdtria(double x0, double x1, double x2) nogil +cpdef double it2struve0(double x0) nogil +cpdef double fdtri(double x0, double x1, double x2) nogil +cpdef double ker(double x0) nogil +cpdef double cotdg(double x0) nogil +cpdef double stdtrit(double x0, double x1) nogil +cpdef double bdtrik(double x0, double x1, double x2) nogil +cpdef Dd_number_t yve(double x0, Dd_number_t x1) nogil +cpdef double bdtrin(double x0, double x1, double x2) nogil +cdef void pro_rad1_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) nogil +cpdef double gdtr(double x0, double x1, double x2) nogil +cpdef double round(double x0) nogil +cdef void obl_rad1_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) nogil +cpdef double btdtri(double x0, double x1, double x2) nogil +cpdef Dd_number_t erfc(Dd_number_t x0) nogil +cdef void mathieu_modcem1(double x0, double x1, double x2, double *y0, double *y1) nogil +cpdef Dd_number_t erfi(Dd_number_t x0) nogil +cpdef Dd_number_t eval_chebys(dl_number_t x0, Dd_number_t x1) nogil +cdef void hyp2f0(double x0, double x1, double x2, dl_number_t x3, double *y0, double *y1) nogil +cpdef Dd_number_t eval_chebyu(dl_number_t x0, Dd_number_t x1) nogil +cpdef Dd_number_t eval_chebyt(dl_number_t x0, Dd_number_t x1) nogil +cdef void pbwa(double x0, double x1, double *y0, double *y1) nogil +cpdef double eval_hermitenorm(long x0, double x1) nogil +cpdef double itmodstruve0(double x0) nogil +cpdef double cbrt(double x0) nogil +cpdef Dd_number_t eval_chebyc(dl_number_t x0, Dd_number_t x1) nogil +cpdef double complex wofz(double complex x0) nogil +cpdef double cosm1(double x0) nogil +cpdef double rel_entr(double x0, double x1) nogil +cpdef double ellipkinc(double x0, double x1) nogil +cpdef double ncfdtri(double x0, double x1, double x2, double x3) nogil +cpdef double exp2(double x0) nogil +cpdef Dd_number_t exp1(Dd_number_t x0) nogil +cpdef double tklmbda(double x0, double x1) nogil +cdef void modfresnelm(double x0, double complex *y0, double complex *y1) nogil +cdef void modfresnelp(double x0, double complex *y0, double complex *y1) nogil +cpdef double btdtr(double x0, double x1, double x2) nogil +cpdef Dd_number_t eval_sh_chebyt(dl_number_t x0, Dd_number_t x1) nogil +cpdef Dd_number_t eval_sh_chebyu(dl_number_t x0, Dd_number_t x1) nogil +cpdef double k0e(double x0) nogil +cpdef double cosdg(double x0) nogil +cdef void airy(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1, Dd_number_t *y2, Dd_number_t *y3) nogil +cdef void pro_rad2_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) nogil +cdef void fresnel(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1) nogil +cpdef double exp10(double x0) nogil +cdef void itairy(double x0, double *y0, double *y1, double *y2, double *y3) nogil +cpdef double bdtr(dl_number_t x0, dl_number_t x1, double x2) nogil +cpdef double berp(double x0) nogil +cpdef double complex hankel2e(double x0, double complex x1) nogil +cpdef double kn(dl_number_t x0, double x1) nogil +cpdef Dd_number_t expm1(Dd_number_t x0) nogil +cpdef Dd_number_t eval_sh_legendre(dl_number_t x0, Dd_number_t x1) nogil +cpdef double eval_hermite(long x0, double x1) nogil +cdef void mathieu_cem(double x0, double x1, double x2, double *y0, double *y1) nogil +cpdef double nbdtrc(dl_number_t x0, dl_number_t x1, double x2) nogil +cpdef double ncfdtrinc(double x0, double x1, double x2, double x3) nogil +cpdef double gammainc(double x0, double x1) nogil +cpdef double binom(double x0, double x1) nogil +cpdef Dd_number_t iv(double x0, Dd_number_t x1) nogil +cpdef double stdtr(double x0, double x1) nogil +cpdef Dd_number_t eval_laguerre(dl_number_t x0, Dd_number_t x1) nogil +cpdef double inv_boxcox(double x0, double x1) nogil +cpdef Dd_number_t eval_gegenbauer(dl_number_t x0, double x1, Dd_number_t x2) nogil +cpdef double boxcox1p(double x0, double x1) nogil +cpdef Dd_number_t erf(Dd_number_t x0) nogil +cdef void itj0y0(double x0, double *y0, double *y1) nogil +cpdef double expn(dl_number_t x0, double x1) nogil +cpdef double k1e(double x0) nogil +cpdef double ber(double x0) nogil +cpdef Dd_number_t log1p(Dd_number_t x0) nogil +cpdef double gdtrix(double x0, double x1, double x2) nogil +cpdef double smirnovi(dl_number_t x0, double x1) nogil +cpdef Dd_number_t ive(double x0, Dd_number_t x1) nogil +cpdef double nrdtrimn(double x0, double x1, double x2) nogil +cpdef double pdtrik(double x0, double x1) nogil +cpdef double smirnov(dl_number_t x0, double x1) nogil +cpdef Dd_number_t hyp1f1(double x0, double x1, Dd_number_t x2) nogil +cpdef Dd_number_t eval_legendre(dl_number_t x0, Dd_number_t x1) nogil +cpdef dfg_number_t expit(dfg_number_t x0) nogil +cpdef double fdtr(double x0, double x1, double x2) nogil +cpdef double nbdtr(dl_number_t x0, dl_number_t x1, double x2) nogil +cpdef double gammasgn(double x0) nogil +cpdef double betaln(double x0, double x1) nogil +cpdef double chndtr(double x0, double x1, double x2) nogil +cdef void pbdv(double x0, double x1, double *y0, double *y1) nogil +cdef void pro_rad2(double x0, double x1, double x2, double x3, double *y0, double *y1) nogil +cdef void pro_rad1(double x0, double x1, double x2, double x3, double *y0, double *y1) nogil +cpdef double ellipkm1(double x0) nogil +cpdef double itstruve0(double x0) nogil +cpdef double fdtridfd(double x0, double x1, double x2) nogil +cpdef double nbdtrik(double x0, double x1, double x2) nogil +cpdef double k1(double x0) nogil +cpdef double k0(double x0) nogil +cpdef double nbdtrin(double x0, double x1, double x2) nogil +cpdef double obl_cv(double x0, double x1, double x2) nogil +cpdef double kl_div(double x0, double x1) nogil +cpdef double i1e(double x0) nogil +cpdef double owens_t(double x0, double x1) nogil +cpdef Dd_number_t kv(double x0, Dd_number_t x1) nogil +cpdef double bdtri(dl_number_t x0, dl_number_t x1, double x2) nogil +cpdef Dd_number_t log_ndtr(Dd_number_t x0) nogil +cpdef Dd_number_t kve(double x0, Dd_number_t x1) nogil +cdef void obl_ang1(double x0, double x1, double x2, double x3, double *y0, double *y1) nogil +cpdef double i1(double x0) nogil +cpdef double i0(double x0) nogil +cpdef double bdtrc(dl_number_t x0, dl_number_t x1, double x2) nogil +cdef void obl_rad2(double x0, double x1, double x2, double x3, double *y0, double *y1) nogil +cdef void obl_rad1(double x0, double x1, double x2, double x3, double *y0, double *y1) nogil +cpdef double beta(double x0, double x1) nogil +cpdef double nrdtrisd(double x0, double x1, double x2) nogil +cpdef double nctdtr(double x0, double x1, double x2) nogil +cpdef Dd_number_t xlogy(Dd_number_t x0, Dd_number_t x1) nogil +cpdef double gammaincinv(double x0, double x1) nogil +cpdef double chndtrinc(double x0, double x1, double x2) nogil +cpdef double yn(dl_number_t x0, double x1) nogil +cpdef Dd_number_t hyp0f1(double x0, Dd_number_t x1) nogil +cpdef double hyperu(double x0, double x1, double x2) nogil +cpdef double agm(double x0, double x1) nogil +cpdef dfg_number_t logit(dfg_number_t x0) nogil +cdef void pro_ang1_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) nogil +cdef void it2j0y0(double x0, double *y0, double *y1) nogil +cpdef double besselpoly(double x0, double x1, double x2) nogil +cdef void pbvv(double x0, double x1, double *y0, double *y1) nogil +cdef void hyp1f2(double x0, double x1, double x2, double x3, double *y0, double *y1) nogil +cpdef double pro_cv(double x0, double x1, double x2) nogil +cpdef double gdtrib(double x0, double x1, double x2) nogil +cpdef double ncfdtridfd(double x0, double x1, double x2, double x3) nogil +cpdef double gdtria(double x0, double x1, double x2) nogil +cpdef Dd_number_t gamma(Dd_number_t x0) nogil +cpdef double chdtrc(double x0, double x1) nogil +cpdef double ncfdtridfn(double x0, double x1, double x2, double x3) nogil +cdef void mathieu_modsem1(double x0, double x1, double x2, double *y0, double *y1) nogil +cdef void mathieu_modsem2(double x0, double x1, double x2, double *y0, double *y1) nogil +cpdef double keip(double x0) nogil +cpdef double chdtri(double x0, double x1) nogil +cdef void hyp3f0(double x0, double x1, double x2, double x3, double *y0, double *y1) nogil \ No newline at end of file diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/cython_special.so b/project/venv/lib/python2.7/site-packages/scipy/special/cython_special.so new file mode 100755 index 0000000..39b5761 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/cython_special.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/lambertw.py b/project/venv/lib/python2.7/site-packages/scipy/special/lambertw.py new file mode 100644 index 0000000..b6ea091 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/lambertw.py @@ -0,0 +1,107 @@ +from __future__ import division, print_function, absolute_import + +from ._ufuncs import _lambertw + + +def lambertw(z, k=0, tol=1e-8): + r""" + lambertw(z, k=0, tol=1e-8) + + Lambert W function. + + The Lambert W function `W(z)` is defined as the inverse function + of ``w * exp(w)``. In other words, the value of ``W(z)`` is + such that ``z = W(z) * exp(W(z))`` for any complex number + ``z``. + + The Lambert W function is a multivalued function with infinitely + many branches. Each branch gives a separate solution of the + equation ``z = w exp(w)``. Here, the branches are indexed by the + integer `k`. + + Parameters + ---------- + z : array_like + Input argument. + k : int, optional + Branch index. + tol : float, optional + Evaluation tolerance. + + Returns + ------- + w : array + `w` will have the same shape as `z`. + + Notes + ----- + All branches are supported by `lambertw`: + + * ``lambertw(z)`` gives the principal solution (branch 0) + * ``lambertw(z, k)`` gives the solution on branch `k` + + The Lambert W function has two partially real branches: the + principal branch (`k = 0`) is real for real ``z > -1/e``, and the + ``k = -1`` branch is real for ``-1/e < z < 0``. All branches except + ``k = 0`` have a logarithmic singularity at ``z = 0``. + + **Possible issues** + + The evaluation can become inaccurate very close to the branch point + at ``-1/e``. In some corner cases, `lambertw` might currently + fail to converge, or can end up on the wrong branch. + + **Algorithm** + + Halley's iteration is used to invert ``w * exp(w)``, using a first-order + asymptotic approximation (O(log(w)) or `O(w)`) as the initial estimate. + + The definition, implementation and choice of branches is based on [2]_. + + See Also + -------- + wrightomega : the Wright Omega function + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Lambert_W_function + .. [2] Corless et al, "On the Lambert W function", Adv. Comp. Math. 5 + (1996) 329-359. + http://www.apmaths.uwo.ca/~djeffrey/Offprints/W-adv-cm.pdf + + Examples + -------- + The Lambert W function is the inverse of ``w exp(w)``: + + >>> from scipy.special import lambertw + >>> w = lambertw(1) + >>> w + (0.56714329040978384+0j) + >>> w * np.exp(w) + (1.0+0j) + + Any branch gives a valid inverse: + + >>> w = lambertw(1, k=3) + >>> w + (-2.8535817554090377+17.113535539412148j) + >>> w*np.exp(w) + (1.0000000000000002+1.609823385706477e-15j) + + **Applications to equation-solving** + + The Lambert W function may be used to solve various kinds of + equations, such as finding the value of the infinite power + tower :math:`z^{z^{z^{\ldots}}}`: + + >>> def tower(z, n): + ... if n == 0: + ... return z + ... return z ** tower(z, n-1) + ... + >>> tower(0.5, 100) + 0.641185744504986 + >>> -lambertw(-np.log(0.5)) / np.log(0.5) + (0.64118574450498589+0j) + """ + return _lambertw(z, k, tol) diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/lambertw.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/lambertw.pyc new file mode 100644 index 0000000..ab61a60 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/lambertw.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/orthogonal.py b/project/venv/lib/python2.7/site-packages/scipy/special/orthogonal.py new file mode 100644 index 0000000..06da9cd --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/orthogonal.py @@ -0,0 +1,2081 @@ +""" +A collection of functions to find the weights and abscissas for +Gaussian Quadrature. + +These calculations are done by finding the eigenvalues of a +tridiagonal matrix whose entries are dependent on the coefficients +in the recursion formula for the orthogonal polynomials with the +corresponding weighting function over the interval. + +Many recursion relations for orthogonal polynomials are given: + +.. math:: + + a1n f_{n+1} (x) = (a2n + a3n x ) f_n (x) - a4n f_{n-1} (x) + +The recursion relation of interest is + +.. math:: + + P_{n+1} (x) = (x - A_n) P_n (x) - B_n P_{n-1} (x) + +where :math:`P` has a different normalization than :math:`f`. + +The coefficients can be found as: + +.. math:: + + A_n = -a2n / a3n + \\qquad + B_n = ( a4n / a3n \\sqrt{h_n-1 / h_n})^2 + +where + +.. math:: + + h_n = \\int_a^b w(x) f_n(x)^2 + +assume: + +.. math:: + + P_0 (x) = 1 + \\qquad + P_{-1} (x) == 0 + +For the mathematical background, see [golub.welsch-1969-mathcomp]_ and +[abramowitz.stegun-1965]_. + +References +---------- +.. [golub.welsch-1969-mathcomp] + Golub, Gene H, and John H Welsch. 1969. Calculation of Gauss + Quadrature Rules. *Mathematics of Computation* 23, 221-230+s1--s10. + +.. [abramowitz.stegun-1965] + Abramowitz, Milton, and Irene A Stegun. (1965) *Handbook of + Mathematical Functions: with Formulas, Graphs, and Mathematical + Tables*. Gaithersburg, MD: National Bureau of Standards. + http://www.math.sfu.ca/~cbm/aands/ + +.. [townsend.trogdon.olver-2014] + Townsend, A. and Trogdon, T. and Olver, S. (2014) + *Fast computation of Gauss quadrature nodes and + weights on the whole real line*. :arXiv:`1410.5286`. + +.. [townsend.trogdon.olver-2015] + Townsend, A. and Trogdon, T. and Olver, S. (2015) + *Fast computation of Gauss quadrature nodes and + weights on the whole real line*. + IMA Journal of Numerical Analysis + :doi:`10.1093/imanum/drv002`. +""" +# +# Author: Travis Oliphant 2000 +# Updated Sep. 2003 (fixed bugs --- tested to be accurate) + +from __future__ import division, print_function, absolute_import + +# Scipy imports. +import numpy as np +from numpy import (exp, inf, pi, sqrt, floor, sin, cos, around, int, + hstack, arccos, arange) +from scipy import linalg +from scipy.special import airy + +# Local imports. +from . import _ufuncs +from . import _ufuncs as cephes +_gam = cephes.gamma +from . import specfun + +_polyfuns = ['legendre', 'chebyt', 'chebyu', 'chebyc', 'chebys', + 'jacobi', 'laguerre', 'genlaguerre', 'hermite', + 'hermitenorm', 'gegenbauer', 'sh_legendre', 'sh_chebyt', + 'sh_chebyu', 'sh_jacobi'] + +# Correspondence between new and old names of root functions +_rootfuns_map = {'roots_legendre': 'p_roots', + 'roots_chebyt': 't_roots', + 'roots_chebyu': 'u_roots', + 'roots_chebyc': 'c_roots', + 'roots_chebys': 's_roots', + 'roots_jacobi': 'j_roots', + 'roots_laguerre': 'l_roots', + 'roots_genlaguerre': 'la_roots', + 'roots_hermite': 'h_roots', + 'roots_hermitenorm': 'he_roots', + 'roots_gegenbauer': 'cg_roots', + 'roots_sh_legendre': 'ps_roots', + 'roots_sh_chebyt': 'ts_roots', + 'roots_sh_chebyu': 'us_roots', + 'roots_sh_jacobi': 'js_roots'} + +_evalfuns = ['eval_legendre', 'eval_chebyt', 'eval_chebyu', + 'eval_chebyc', 'eval_chebys', 'eval_jacobi', + 'eval_laguerre', 'eval_genlaguerre', 'eval_hermite', + 'eval_hermitenorm', 'eval_gegenbauer', + 'eval_sh_legendre', 'eval_sh_chebyt', 'eval_sh_chebyu', + 'eval_sh_jacobi'] + +__all__ = _polyfuns + list(_rootfuns_map.keys()) + _evalfuns + ['poch', 'binom'] + + +class orthopoly1d(np.poly1d): + + def __init__(self, roots, weights=None, hn=1.0, kn=1.0, wfunc=None, + limits=None, monic=False, eval_func=None): + equiv_weights = [weights[k] / wfunc(roots[k]) for + k in range(len(roots))] + mu = sqrt(hn) + if monic: + evf = eval_func + if evf: + knn = kn + eval_func = lambda x: evf(x) / knn + mu = mu / abs(kn) + kn = 1.0 + + # compute coefficients from roots, then scale + poly = np.poly1d(roots, r=True) + np.poly1d.__init__(self, poly.coeffs * float(kn)) + + # TODO: In numpy 1.13, there is no need to use __dict__ to access attributes + self.__dict__['weights'] = np.array(list(zip(roots, + weights, equiv_weights))) + self.__dict__['weight_func'] = wfunc + self.__dict__['limits'] = limits + self.__dict__['normcoef'] = mu + + # Note: eval_func will be discarded on arithmetic + self.__dict__['_eval_func'] = eval_func + + def __call__(self, v): + if self._eval_func and not isinstance(v, np.poly1d): + return self._eval_func(v) + else: + return np.poly1d.__call__(self, v) + + def _scale(self, p): + if p == 1.0: + return + try: + self._coeffs + except AttributeError: + self.__dict__['coeffs'] *= p + else: + # the coeffs attr is be made private in future versions of numpy + self._coeffs *= p + + evf = self._eval_func + if evf: + self.__dict__['_eval_func'] = lambda x: evf(x) * p + self.__dict__['normcoef'] *= p + + +def _gen_roots_and_weights(n, mu0, an_func, bn_func, f, df, symmetrize, mu): + """[x,w] = gen_roots_and_weights(n,an_func,sqrt_bn_func,mu) + + Returns the roots (x) of an nth order orthogonal polynomial, + and weights (w) to use in appropriate Gaussian quadrature with that + orthogonal polynomial. + + The polynomials have the recurrence relation + P_n+1(x) = (x - A_n) P_n(x) - B_n P_n-1(x) + + an_func(n) should return A_n + sqrt_bn_func(n) should return sqrt(B_n) + mu ( = h_0 ) is the integral of the weight over the orthogonal + interval + """ + k = np.arange(n, dtype='d') + c = np.zeros((2, n)) + c[0,1:] = bn_func(k[1:]) + c[1,:] = an_func(k) + x = linalg.eigvals_banded(c, overwrite_a_band=True) + + # improve roots by one application of Newton's method + y = f(n, x) + dy = df(n, x) + x -= y/dy + + fm = f(n-1, x) + fm /= np.abs(fm).max() + dy /= np.abs(dy).max() + w = 1.0 / (fm * dy) + + if symmetrize: + w = (w + w[::-1]) / 2 + x = (x - x[::-1]) / 2 + + w *= mu0 / w.sum() + + if mu: + return x, w, mu0 + else: + return x, w + +# Jacobi Polynomials 1 P^(alpha,beta)_n(x) + + +def roots_jacobi(n, alpha, beta, mu=False): + r"""Gauss-Jacobi quadrature. + + Computes the sample points and weights for Gauss-Jacobi quadrature. The + sample points are the roots of the n-th degree Jacobi polynomial, + :math:`P^{\alpha, \beta}_n(x)`. These sample points and weights + correctly integrate polynomials of degree :math:`2n - 1` or less over the + interval :math:`[-1, 1]` with weight function + :math:`f(x) = (1 - x)^{\alpha} (1 + x)^{\beta}`. + + Parameters + ---------- + n : int + quadrature order + alpha : float + alpha must be > -1 + beta : float + beta must be > -1 + mu : bool, optional + If True, return the sum of the weights, optional. + + Returns + ------- + x : ndarray + Sample points + w : ndarray + Weights + mu : float + Sum of the weights + + See Also + -------- + scipy.integrate.quadrature + scipy.integrate.fixed_quad + """ + m = int(n) + if n < 1 or n != m: + raise ValueError("n must be a positive integer.") + if alpha <= -1 or beta <= -1: + raise ValueError("alpha and beta must be greater than -1.") + + if alpha == 0.0 and beta == 0.0: + return roots_legendre(m, mu) + if alpha == beta: + return roots_gegenbauer(m, alpha+0.5, mu) + + mu0 = 2.0**(alpha+beta+1)*cephes.beta(alpha+1, beta+1) + a = alpha + b = beta + if a + b == 0.0: + an_func = lambda k: np.where(k == 0, (b-a)/(2+a+b), 0.0) + else: + an_func = lambda k: np.where(k == 0, (b-a)/(2+a+b), + (b*b - a*a) / ((2.0*k+a+b)*(2.0*k+a+b+2))) + + bn_func = lambda k: 2.0 / (2.0*k+a+b)*np.sqrt((k+a)*(k+b) / (2*k+a+b+1)) \ + * np.where(k == 1, 1.0, np.sqrt(k*(k+a+b) / (2.0*k+a+b-1))) + + f = lambda n, x: cephes.eval_jacobi(n, a, b, x) + df = lambda n, x: 0.5 * (n + a + b + 1) \ + * cephes.eval_jacobi(n-1, a+1, b+1, x) + return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, False, mu) + + +def jacobi(n, alpha, beta, monic=False): + r"""Jacobi polynomial. + + Defined to be the solution of + + .. math:: + (1 - x^2)\frac{d^2}{dx^2}P_n^{(\alpha, \beta)} + + (\beta - \alpha - (\alpha + \beta + 2)x) + \frac{d}{dx}P_n^{(\alpha, \beta)} + + n(n + \alpha + \beta + 1)P_n^{(\alpha, \beta)} = 0 + + for :math:`\alpha, \beta > -1`; :math:`P_n^{(\alpha, \beta)}` is a + polynomial of degree :math:`n`. + + Parameters + ---------- + n : int + Degree of the polynomial. + alpha : float + Parameter, must be greater than -1. + beta : float + Parameter, must be greater than -1. + monic : bool, optional + If `True`, scale the leading coefficient to be 1. Default is + `False`. + + Returns + ------- + P : orthopoly1d + Jacobi polynomial. + + Notes + ----- + For fixed :math:`\alpha, \beta`, the polynomials + :math:`P_n^{(\alpha, \beta)}` are orthogonal over :math:`[-1, 1]` + with weight function :math:`(1 - x)^\alpha(1 + x)^\beta`. + + """ + if n < 0: + raise ValueError("n must be nonnegative.") + + wfunc = lambda x: (1 - x)**alpha * (1 + x)**beta + if n == 0: + return orthopoly1d([], [], 1.0, 1.0, wfunc, (-1, 1), monic, + eval_func=np.ones_like) + x, w, mu = roots_jacobi(n, alpha, beta, mu=True) + ab1 = alpha + beta + 1.0 + hn = 2**ab1 / (2 * n + ab1) * _gam(n + alpha + 1) + hn *= _gam(n + beta + 1.0) / _gam(n + 1) / _gam(n + ab1) + kn = _gam(2 * n + ab1) / 2.0**n / _gam(n + 1) / _gam(n + ab1) + # here kn = coefficient on x^n term + p = orthopoly1d(x, w, hn, kn, wfunc, (-1, 1), monic, + lambda x: eval_jacobi(n, alpha, beta, x)) + return p + +# Jacobi Polynomials shifted G_n(p,q,x) + + +def roots_sh_jacobi(n, p1, q1, mu=False): + """Gauss-Jacobi (shifted) quadrature. + + Computes the sample points and weights for Gauss-Jacobi (shifted) + quadrature. The sample points are the roots of the n-th degree shifted + Jacobi polynomial, :math:`G^{p,q}_n(x)`. These sample points and weights + correctly integrate polynomials of degree :math:`2n - 1` or less over the + interval :math:`[0, 1]` with weight function + :math:`f(x) = (1 - x)^{p-q} x^{q-1}` + + Parameters + ---------- + n : int + quadrature order + p1 : float + (p1 - q1) must be > -1 + q1 : float + q1 must be > 0 + mu : bool, optional + If True, return the sum of the weights, optional. + + Returns + ------- + x : ndarray + Sample points + w : ndarray + Weights + mu : float + Sum of the weights + + See Also + -------- + scipy.integrate.quadrature + scipy.integrate.fixed_quad + """ + if (p1-q1) <= -1 or q1 <= 0: + raise ValueError("(p - q) must be greater than -1, and q must be greater than 0.") + x, w, m = roots_jacobi(n, p1-q1, q1-1, True) + x = (x + 1) / 2 + scale = 2.0**p1 + w /= scale + m /= scale + if mu: + return x, w, m + else: + return x, w + +def sh_jacobi(n, p, q, monic=False): + r"""Shifted Jacobi polynomial. + + Defined by + + .. math:: + + G_n^{(p, q)}(x) + = \binom{2n + p - 1}{n}^{-1}P_n^{(p - q, q - 1)}(2x - 1), + + where :math:`P_n^{(\cdot, \cdot)}` is the nth Jacobi polynomial. + + Parameters + ---------- + n : int + Degree of the polynomial. + p : float + Parameter, must have :math:`p > q - 1`. + q : float + Parameter, must be greater than 0. + monic : bool, optional + If `True`, scale the leading coefficient to be 1. Default is + `False`. + + Returns + ------- + G : orthopoly1d + Shifted Jacobi polynomial. + + Notes + ----- + For fixed :math:`p, q`, the polynomials :math:`G_n^{(p, q)}` are + orthogonal over :math:`[0, 1]` with weight function :math:`(1 - + x)^{p - q}x^{q - 1}`. + + """ + if n < 0: + raise ValueError("n must be nonnegative.") + + wfunc = lambda x: (1.0 - x)**(p - q) * (x)**(q - 1.) + if n == 0: + return orthopoly1d([], [], 1.0, 1.0, wfunc, (-1, 1), monic, + eval_func=np.ones_like) + n1 = n + x, w, mu0 = roots_sh_jacobi(n1, p, q, mu=True) + hn = _gam(n + 1) * _gam(n + q) * _gam(n + p) * _gam(n + p - q + 1) + hn /= (2 * n + p) * (_gam(2 * n + p)**2) + # kn = 1.0 in standard form so monic is redundant. Kept for compatibility. + kn = 1.0 + pp = orthopoly1d(x, w, hn, kn, wfunc=wfunc, limits=(0, 1), monic=monic, + eval_func=lambda x: eval_sh_jacobi(n, p, q, x)) + return pp + +# Generalized Laguerre L^(alpha)_n(x) + + +def roots_genlaguerre(n, alpha, mu=False): + r"""Gauss-generalized Laguerre quadrature. + + Computes the sample points and weights for Gauss-generalized Laguerre + quadrature. The sample points are the roots of the n-th degree generalized + Laguerre polynomial, :math:`L^{\alpha}_n(x)`. These sample points and + weights correctly integrate polynomials of degree :math:`2n - 1` or less + over the interval :math:`[0, \infty]` with weight function + :math:`f(x) = x^{\alpha} e^{-x}`. + + Parameters + ---------- + n : int + quadrature order + alpha : float + alpha must be > -1 + mu : bool, optional + If True, return the sum of the weights, optional. + + Returns + ------- + x : ndarray + Sample points + w : ndarray + Weights + mu : float + Sum of the weights + + See Also + -------- + scipy.integrate.quadrature + scipy.integrate.fixed_quad + """ + m = int(n) + if n < 1 or n != m: + raise ValueError("n must be a positive integer.") + if alpha < -1: + raise ValueError("alpha must be greater than -1.") + + mu0 = cephes.gamma(alpha + 1) + + if m == 1: + x = np.array([alpha+1.0], 'd') + w = np.array([mu0], 'd') + if mu: + return x, w, mu0 + else: + return x, w + + an_func = lambda k: 2 * k + alpha + 1 + bn_func = lambda k: -np.sqrt(k * (k + alpha)) + f = lambda n, x: cephes.eval_genlaguerre(n, alpha, x) + df = lambda n, x: (n*cephes.eval_genlaguerre(n, alpha, x) + - (n + alpha)*cephes.eval_genlaguerre(n-1, alpha, x))/x + return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, False, mu) + + +def genlaguerre(n, alpha, monic=False): + r"""Generalized (associated) Laguerre polynomial. + + Defined to be the solution of + + .. math:: + x\frac{d^2}{dx^2}L_n^{(\alpha)} + + (\alpha + 1 - x)\frac{d}{dx}L_n^{(\alpha)} + + nL_n^{(\alpha)} = 0, + + where :math:`\alpha > -1`; :math:`L_n^{(\alpha)}` is a polynomial + of degree :math:`n`. + + Parameters + ---------- + n : int + Degree of the polynomial. + alpha : float + Parameter, must be greater than -1. + monic : bool, optional + If `True`, scale the leading coefficient to be 1. Default is + `False`. + + Returns + ------- + L : orthopoly1d + Generalized Laguerre polynomial. + + Notes + ----- + For fixed :math:`\alpha`, the polynomials :math:`L_n^{(\alpha)}` + are orthogonal over :math:`[0, \infty)` with weight function + :math:`e^{-x}x^\alpha`. + + The Laguerre polynomials are the special case where :math:`\alpha + = 0`. + + See Also + -------- + laguerre : Laguerre polynomial. + + """ + if alpha <= -1: + raise ValueError("alpha must be > -1") + if n < 0: + raise ValueError("n must be nonnegative.") + + if n == 0: + n1 = n + 1 + else: + n1 = n + x, w, mu0 = roots_genlaguerre(n1, alpha, mu=True) + wfunc = lambda x: exp(-x) * x**alpha + if n == 0: + x, w = [], [] + hn = _gam(n + alpha + 1) / _gam(n + 1) + kn = (-1)**n / _gam(n + 1) + p = orthopoly1d(x, w, hn, kn, wfunc, (0, inf), monic, + lambda x: eval_genlaguerre(n, alpha, x)) + return p + +# Laguerre L_n(x) + + +def roots_laguerre(n, mu=False): + r"""Gauss-Laguerre quadrature. + + Computes the sample points and weights for Gauss-Laguerre quadrature. + The sample points are the roots of the n-th degree Laguerre polynomial, + :math:`L_n(x)`. These sample points and weights correctly integrate + polynomials of degree :math:`2n - 1` or less over the interval + :math:`[0, \infty]` with weight function :math:`f(x) = e^{-x}`. + + Parameters + ---------- + n : int + quadrature order + mu : bool, optional + If True, return the sum of the weights, optional. + + Returns + ------- + x : ndarray + Sample points + w : ndarray + Weights + mu : float + Sum of the weights + + See Also + -------- + scipy.integrate.quadrature + scipy.integrate.fixed_quad + numpy.polynomial.laguerre.laggauss + """ + return roots_genlaguerre(n, 0.0, mu=mu) + + +def laguerre(n, monic=False): + r"""Laguerre polynomial. + + Defined to be the solution of + + .. math:: + x\frac{d^2}{dx^2}L_n + (1 - x)\frac{d}{dx}L_n + nL_n = 0; + + :math:`L_n` is a polynomial of degree :math:`n`. + + Parameters + ---------- + n : int + Degree of the polynomial. + monic : bool, optional + If `True`, scale the leading coefficient to be 1. Default is + `False`. + + Returns + ------- + L : orthopoly1d + Laguerre Polynomial. + + Notes + ----- + The polynomials :math:`L_n` are orthogonal over :math:`[0, + \infty)` with weight function :math:`e^{-x}`. + + """ + if n < 0: + raise ValueError("n must be nonnegative.") + + if n == 0: + n1 = n + 1 + else: + n1 = n + x, w, mu0 = roots_laguerre(n1, mu=True) + if n == 0: + x, w = [], [] + hn = 1.0 + kn = (-1)**n / _gam(n + 1) + p = orthopoly1d(x, w, hn, kn, lambda x: exp(-x), (0, inf), monic, + lambda x: eval_laguerre(n, x)) + return p + +# Hermite 1 H_n(x) + + +def roots_hermite(n, mu=False): + r"""Gauss-Hermite (physicst's) quadrature. + + Computes the sample points and weights for Gauss-Hermite quadrature. + The sample points are the roots of the n-th degree Hermite polynomial, + :math:`H_n(x)`. These sample points and weights correctly integrate + polynomials of degree :math:`2n - 1` or less over the interval + :math:`[-\infty, \infty]` with weight function :math:`f(x) = e^{-x^2}`. + + Parameters + ---------- + n : int + quadrature order + mu : bool, optional + If True, return the sum of the weights, optional. + + Returns + ------- + x : ndarray + Sample points + w : ndarray + Weights + mu : float + Sum of the weights + + Notes + ----- + For small n up to 150 a modified version of the Golub-Welsch + algorithm is used. Nodes are computed from the eigenvalue + problem and improved by one step of a Newton iteration. + The weights are computed from the well-known analytical formula. + + For n larger than 150 an optimal asymptotic algorithm is applied + which computes nodes and weights in a numerically stable manner. + The algorithm has linear runtime making computation for very + large n (several thousand or more) feasible. + + See Also + -------- + scipy.integrate.quadrature + scipy.integrate.fixed_quad + numpy.polynomial.hermite.hermgauss + roots_hermitenorm + + References + ---------- + .. [townsend.trogdon.olver-2014] + Townsend, A. and Trogdon, T. and Olver, S. (2014) + *Fast computation of Gauss quadrature nodes and + weights on the whole real line*. :arXiv:`1410.5286`. + + .. [townsend.trogdon.olver-2015] + Townsend, A. and Trogdon, T. and Olver, S. (2015) + *Fast computation of Gauss quadrature nodes and + weights on the whole real line*. + IMA Journal of Numerical Analysis + :doi:`10.1093/imanum/drv002`. + """ + m = int(n) + if n < 1 or n != m: + raise ValueError("n must be a positive integer.") + + mu0 = np.sqrt(np.pi) + if n <= 150: + an_func = lambda k: 0.0*k + bn_func = lambda k: np.sqrt(k/2.0) + f = cephes.eval_hermite + df = lambda n, x: 2.0 * n * cephes.eval_hermite(n-1, x) + return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu) + else: + nodes, weights = _roots_hermite_asy(m) + if mu: + return nodes, weights, mu0 + else: + return nodes, weights + + +def _compute_tauk(n, k, maxit=5): + """Helper function for Tricomi initial guesses + + For details, see formula 3.1 in lemma 3.1 in the + original paper. + + Parameters + ---------- + n : int + Quadrature order + k : ndarray of type int + Index of roots :math:`\tau_k` to compute + maxit : int + Number of Newton maxit performed, the default + value of 5 is sufficient. + + Returns + ------- + tauk : ndarray + Roots of equation 3.1 + + See Also + -------- + initial_nodes_a + roots_hermite_asy + """ + a = n % 2 - 0.5 + c = (4.0*floor(n/2.0) - 4.0*k + 3.0)*pi / (4.0*floor(n/2.0) + 2.0*a + 2.0) + f = lambda x: x - sin(x) - c + df = lambda x: 1.0 - cos(x) + xi = 0.5*pi + for i in range(maxit): + xi = xi - f(xi)/df(xi) + return xi + + +def _initial_nodes_a(n, k): + r"""Tricomi initial guesses + + Computes an initial approximation to the square of the `k`-th + (positive) root :math:`x_k` of the Hermite polynomial :math:`H_n` + of order :math:`n`. The formula is the one from lemma 3.1 in the + original paper. The guesses are accurate except in the region + near :math:`\sqrt{2n + 1}`. + + Parameters + ---------- + n : int + Quadrature order + k : ndarray of type int + Index of roots to compute + + Returns + ------- + xksq : ndarray + Square of the approximate roots + + See Also + -------- + initial_nodes + roots_hermite_asy + """ + tauk = _compute_tauk(n, k) + sigk = cos(0.5*tauk)**2 + a = n % 2 - 0.5 + nu = 4.0*floor(n/2.0) + 2.0*a + 2.0 + # Initial approximation of Hermite roots (square) + xksq = nu*sigk - 1.0/(3.0*nu) * (5.0/(4.0*(1.0-sigk)**2) - 1.0/(1.0-sigk) - 0.25) + return xksq + + +def _initial_nodes_b(n, k): + r"""Gatteschi initial guesses + + Computes an initial approximation to the square of the `k`-th + (positive) root :math:`x_k` of the Hermite polynomial :math:`H_n` + of order :math:`n`. The formula is the one from lemma 3.2 in the + original paper. The guesses are accurate in the region just + below :math:`\sqrt{2n + 1}`. + + Parameters + ---------- + n : int + Quadrature order + k : ndarray of type int + Index of roots to compute + + Returns + ------- + xksq : ndarray + Square of the approximate root + + See Also + -------- + initial_nodes + roots_hermite_asy + """ + a = n % 2 - 0.5 + nu = 4.0*floor(n/2.0) + 2.0*a + 2.0 + # Airy roots by approximation + ak = specfun.airyzo(k.max(), 1)[0][::-1] + # Initial approximation of Hermite roots (square) + xksq = (nu + + 2.0**(2.0/3.0) * ak * nu**(1.0/3.0) + + 1.0/5.0 * 2.0**(4.0/3.0) * ak**2 * nu**(-1.0/3.0) + + (9.0/140.0 - 12.0/175.0 * ak**3) * nu**(-1.0) + + (16.0/1575.0 * ak + 92.0/7875.0 * ak**4) * 2.0**(2.0/3.0) * nu**(-5.0/3.0) - + (15152.0/3031875.0 * ak**5 + 1088.0/121275.0 * ak**2) * 2.0**(1.0/3.0) * nu**(-7.0/3.0)) + return xksq + + +def _initial_nodes(n): + """Initial guesses for the Hermite roots + + Computes an initial approximation to the non-negative + roots :math:`x_k` of the Hermite polynomial :math:`H_n` + of order :math:`n`. The Tricomi and Gatteschi initial + guesses are used in the region where they are accurate. + + Parameters + ---------- + n : int + Quadrature order + + Returns + ------- + xk : ndarray + Approximate roots + + See Also + -------- + roots_hermite_asy + """ + # Turnover point + # linear polynomial fit to error of 10, 25, 40, ..., 1000 point rules + fit = 0.49082003*n - 4.37859653 + turnover = around(fit).astype(int) + # Compute all approximations + ia = arange(1, int(floor(n*0.5)+1)) + ib = ia[::-1] + xasq = _initial_nodes_a(n, ia[:turnover+1]) + xbsq = _initial_nodes_b(n, ib[turnover+1:]) + # Combine + iv = sqrt(hstack([xasq, xbsq])) + # Central node is always zero + if n % 2 == 1: + iv = hstack([0.0, iv]) + return iv + + +def _pbcf(n, theta): + r"""Asymptotic series expansion of parabolic cylinder function + + The implementation is based on sections 3.2 and 3.3 from the + original paper. Compared to the published version this code + adds one more term to the asymptotic series. The detailed + formulas can be found at [parabolic-asymptotics]_. The evaluation + is done in a transformed variable :math:`\theta := \arccos(t)` + where :math:`t := x / \mu` and :math:`\mu := \sqrt{2n + 1}`. + + Parameters + ---------- + n : int + Quadrature order + theta : ndarray + Transformed position variable + + Returns + ------- + U : ndarray + Value of the parabolic cylinder function :math:`U(a, \theta)`. + Ud : ndarray + Value of the derivative :math:`U^{\prime}(a, \theta)` of + the parabolic cylinder function. + + See Also + -------- + roots_hermite_asy + + References + ---------- + .. [parabolic-asymptotics] + https://dlmf.nist.gov/12.10#vii + """ + st = sin(theta) + ct = cos(theta) + # https://dlmf.nist.gov/12.10#vii + mu = 2.0*n + 1.0 + # https://dlmf.nist.gov/12.10#E23 + eta = 0.5*theta - 0.5*st*ct + # https://dlmf.nist.gov/12.10#E39 + zeta = -(3.0*eta/2.0) ** (2.0/3.0) + # https://dlmf.nist.gov/12.10#E40 + phi = (-zeta / st**2) ** (0.25) + # Coefficients + # https://dlmf.nist.gov/12.10#E43 + a0 = 1.0 + a1 = 0.10416666666666666667 + a2 = 0.08355034722222222222 + a3 = 0.12822657455632716049 + a4 = 0.29184902646414046425 + a5 = 0.88162726744375765242 + b0 = 1.0 + b1 = -0.14583333333333333333 + b2 = -0.09874131944444444444 + b3 = -0.14331205391589506173 + b4 = -0.31722720267841354810 + b5 = -0.94242914795712024914 + # Polynomials + # https://dlmf.nist.gov/12.10#E9 + # https://dlmf.nist.gov/12.10#E10 + ctp = ct ** arange(16).reshape((-1,1)) + u0 = 1.0 + u1 = (1.0*ctp[3,:] - 6.0*ct) / 24.0 + u2 = (-9.0*ctp[4,:] + 249.0*ctp[2,:] + 145.0) / 1152.0 + u3 = (-4042.0*ctp[9,:] + 18189.0*ctp[7,:] - 28287.0*ctp[5,:] - 151995.0*ctp[3,:] - 259290.0*ct) / 414720.0 + u4 = (72756.0*ctp[10,:] - 321339.0*ctp[8,:] - 154982.0*ctp[6,:] + 50938215.0*ctp[4,:] + 122602962.0*ctp[2,:] + 12773113.0) / 39813120.0 + u5 = (82393456.0*ctp[15,:] - 617950920.0*ctp[13,:] + 1994971575.0*ctp[11,:] - 3630137104.0*ctp[9,:] + 4433574213.0*ctp[7,:] + - 37370295816.0*ctp[5,:] - 119582875013.0*ctp[3,:] - 34009066266.0*ct) / 6688604160.0 + v0 = 1.0 + v1 = (1.0*ctp[3,:] + 6.0*ct) / 24.0 + v2 = (15.0*ctp[4,:] - 327.0*ctp[2,:] - 143.0) / 1152.0 + v3 = (-4042.0*ctp[9,:] + 18189.0*ctp[7,:] - 36387.0*ctp[5,:] + 238425.0*ctp[3,:] + 259290.0*ct) / 414720.0 + v4 = (-121260.0*ctp[10,:] + 551733.0*ctp[8,:] - 151958.0*ctp[6,:] - 57484425.0*ctp[4,:] - 132752238.0*ctp[2,:] - 12118727) / 39813120.0 + v5 = (82393456.0*ctp[15,:] - 617950920.0*ctp[13,:] + 2025529095.0*ctp[11,:] - 3750839308.0*ctp[9,:] + 3832454253.0*ctp[7,:] + + 35213253348.0*ctp[5,:] + 130919230435.0*ctp[3,:] + 34009066266*ct) / 6688604160.0 + # Airy Evaluation (Bi and Bip unused) + Ai, Aip, Bi, Bip = airy(mu**(4.0/6.0) * zeta) + # Prefactor for U + P = 2.0*sqrt(pi) * mu**(1.0/6.0) * phi + # Terms for U + # https://dlmf.nist.gov/12.10#E42 + phip = phi ** arange(6, 31, 6).reshape((-1,1)) + A0 = b0*u0 + A1 = (b2*u0 + phip[0,:]*b1*u1 + phip[1,:]*b0*u2) / zeta**3 + A2 = (b4*u0 + phip[0,:]*b3*u1 + phip[1,:]*b2*u2 + phip[2,:]*b1*u3 + phip[3,:]*b0*u4) / zeta**6 + B0 = -(a1*u0 + phip[0,:]*a0*u1) / zeta**2 + B1 = -(a3*u0 + phip[0,:]*a2*u1 + phip[1,:]*a1*u2 + phip[2,:]*a0*u3) / zeta**5 + B2 = -(a5*u0 + phip[0,:]*a4*u1 + phip[1,:]*a3*u2 + phip[2,:]*a2*u3 + phip[3,:]*a1*u4 + phip[4,:]*a0*u5) / zeta**8 + # U + # https://dlmf.nist.gov/12.10#E35 + U = P * (Ai * (A0 + A1/mu**2.0 + A2/mu**4.0) + + Aip * (B0 + B1/mu**2.0 + B2/mu**4.0) / mu**(8.0/6.0)) + # Prefactor for derivative of U + Pd = sqrt(2.0*pi) * mu**(2.0/6.0) / phi + # Terms for derivative of U + # https://dlmf.nist.gov/12.10#E46 + C0 = -(b1*v0 + phip[0,:]*b0*v1) / zeta + C1 = -(b3*v0 + phip[0,:]*b2*v1 + phip[1,:]*b1*v2 + phip[2,:]*b0*v3) / zeta**4 + C2 = -(b5*v0 + phip[0,:]*b4*v1 + phip[1,:]*b3*v2 + phip[2,:]*b2*v3 + phip[3,:]*b1*v4 + phip[4,:]*b0*v5) / zeta**7 + D0 = a0*v0 + D1 = (a2*v0 + phip[0,:]*a1*v1 + phip[1,:]*a0*v2) / zeta**3 + D2 = (a4*v0 + phip[0,:]*a3*v1 + phip[1,:]*a2*v2 + phip[2,:]*a1*v3 + phip[3,:]*a0*v4) / zeta**6 + # Derivative of U + # https://dlmf.nist.gov/12.10#E36 + Ud = Pd * (Ai * (C0 + C1/mu**2.0 + C2/mu**4.0) / mu**(4.0/6.0) + + Aip * (D0 + D1/mu**2.0 + D2/mu**4.0)) + return U, Ud + + +def _newton(n, x_initial, maxit=5): + """Newton iteration for polishing the asymptotic approximation + to the zeros of the Hermite polynomials. + + Parameters + ---------- + n : int + Quadrature order + x_initial : ndarray + Initial guesses for the roots + maxit : int + Maximal number of Newton iterations. + The default 5 is sufficient, usually + only one or two steps are needed. + + Returns + ------- + nodes : ndarray + Quadrature nodes + weights : ndarray + Quadrature weights + + See Also + -------- + roots_hermite_asy + """ + # Variable transformation + mu = sqrt(2.0*n + 1.0) + t = x_initial / mu + theta = arccos(t) + # Newton iteration + for i in range(maxit): + u, ud = _pbcf(n, theta) + dtheta = u / (sqrt(2.0) * mu * sin(theta) * ud) + theta = theta + dtheta + if max(abs(dtheta)) < 1e-14: + break + # Undo variable transformation + x = mu * cos(theta) + # Central node is always zero + if n % 2 == 1: + x[0] = 0.0 + # Compute weights + w = exp(-x**2) / (2.0*ud**2) + return x, w + + +def _roots_hermite_asy(n): + r"""Gauss-Hermite (physicst's) quadrature for large n. + + Computes the sample points and weights for Gauss-Hermite quadrature. + The sample points are the roots of the n-th degree Hermite polynomial, + :math:`H_n(x)`. These sample points and weights correctly integrate + polynomials of degree :math:`2n - 1` or less over the interval + :math:`[-\infty, \infty]` with weight function :math:`f(x) = e^{-x^2}`. + + This method relies on asymptotic expansions which work best for n > 150. + The algorithm has linear runtime making computation for very large n + feasible. + + Parameters + ---------- + n : int + quadrature order + + Returns + ------- + nodes : ndarray + Quadrature nodes + weights : ndarray + Quadrature weights + + See Also + -------- + roots_hermite + + References + ---------- + .. [townsend.trogdon.olver-2014] + Townsend, A. and Trogdon, T. and Olver, S. (2014) + *Fast computation of Gauss quadrature nodes and + weights on the whole real line*. :arXiv:`1410.5286`. + + .. [townsend.trogdon.olver-2015] + Townsend, A. and Trogdon, T. and Olver, S. (2015) + *Fast computation of Gauss quadrature nodes and + weights on the whole real line*. + IMA Journal of Numerical Analysis + :doi:`10.1093/imanum/drv002`. + """ + iv = _initial_nodes(n) + nodes, weights = _newton(n, iv) + # Combine with negative parts + if n % 2 == 0: + nodes = hstack([-nodes[::-1], nodes]) + weights = hstack([weights[::-1], weights]) + else: + nodes = hstack([-nodes[-1:0:-1], nodes]) + weights = hstack([weights[-1:0:-1], weights]) + # Scale weights + weights *= sqrt(pi) / sum(weights) + return nodes, weights + + +def hermite(n, monic=False): + r"""Physicist's Hermite polynomial. + + Defined by + + .. math:: + + H_n(x) = (-1)^ne^{x^2}\frac{d^n}{dx^n}e^{-x^2}; + + :math:`H_n` is a polynomial of degree :math:`n`. + + Parameters + ---------- + n : int + Degree of the polynomial. + monic : bool, optional + If `True`, scale the leading coefficient to be 1. Default is + `False`. + + Returns + ------- + H : orthopoly1d + Hermite polynomial. + + Notes + ----- + The polynomials :math:`H_n` are orthogonal over :math:`(-\infty, + \infty)` with weight function :math:`e^{-x^2}`. + + """ + if n < 0: + raise ValueError("n must be nonnegative.") + + if n == 0: + n1 = n + 1 + else: + n1 = n + x, w, mu0 = roots_hermite(n1, mu=True) + wfunc = lambda x: exp(-x * x) + if n == 0: + x, w = [], [] + hn = 2**n * _gam(n + 1) * sqrt(pi) + kn = 2**n + p = orthopoly1d(x, w, hn, kn, wfunc, (-inf, inf), monic, + lambda x: eval_hermite(n, x)) + return p + +# Hermite 2 He_n(x) + + +def roots_hermitenorm(n, mu=False): + r"""Gauss-Hermite (statistician's) quadrature. + + Computes the sample points and weights for Gauss-Hermite quadrature. + The sample points are the roots of the n-th degree Hermite polynomial, + :math:`He_n(x)`. These sample points and weights correctly integrate + polynomials of degree :math:`2n - 1` or less over the interval + :math:`[-\infty, \infty]` with weight function :math:`f(x) = e^{-x^2/2}`. + + Parameters + ---------- + n : int + quadrature order + mu : bool, optional + If True, return the sum of the weights, optional. + + Returns + ------- + x : ndarray + Sample points + w : ndarray + Weights + mu : float + Sum of the weights + + Notes + ----- + For small n up to 150 a modified version of the Golub-Welsch + algorithm is used. Nodes are computed from the eigenvalue + problem and improved by one step of a Newton iteration. + The weights are computed from the well-known analytical formula. + + For n larger than 150 an optimal asymptotic algorithm is used + which computes nodes and weights in a numerical stable manner. + The algorithm has linear runtime making computation for very + large n (several thousand or more) feasible. + + See Also + -------- + scipy.integrate.quadrature + scipy.integrate.fixed_quad + numpy.polynomial.hermite_e.hermegauss + """ + m = int(n) + if n < 1 or n != m: + raise ValueError("n must be a positive integer.") + + mu0 = np.sqrt(2.0*np.pi) + if n <= 150: + an_func = lambda k: 0.0*k + bn_func = lambda k: np.sqrt(k) + f = cephes.eval_hermitenorm + df = lambda n, x: n * cephes.eval_hermitenorm(n-1, x) + return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu) + else: + nodes, weights = _roots_hermite_asy(m) + # Transform + nodes *= sqrt(2) + weights *= sqrt(2) + if mu: + return nodes, weights, mu0 + else: + return nodes, weights + + +def hermitenorm(n, monic=False): + r"""Normalized (probabilist's) Hermite polynomial. + + Defined by + + .. math:: + + He_n(x) = (-1)^ne^{x^2/2}\frac{d^n}{dx^n}e^{-x^2/2}; + + :math:`He_n` is a polynomial of degree :math:`n`. + + Parameters + ---------- + n : int + Degree of the polynomial. + monic : bool, optional + If `True`, scale the leading coefficient to be 1. Default is + `False`. + + Returns + ------- + He : orthopoly1d + Hermite polynomial. + + Notes + ----- + + The polynomials :math:`He_n` are orthogonal over :math:`(-\infty, + \infty)` with weight function :math:`e^{-x^2/2}`. + + """ + if n < 0: + raise ValueError("n must be nonnegative.") + + if n == 0: + n1 = n + 1 + else: + n1 = n + x, w, mu0 = roots_hermitenorm(n1, mu=True) + wfunc = lambda x: exp(-x * x / 2.0) + if n == 0: + x, w = [], [] + hn = sqrt(2 * pi) * _gam(n + 1) + kn = 1.0 + p = orthopoly1d(x, w, hn, kn, wfunc=wfunc, limits=(-inf, inf), monic=monic, + eval_func=lambda x: eval_hermitenorm(n, x)) + return p + +# The remainder of the polynomials can be derived from the ones above. + +# Ultraspherical (Gegenbauer) C^(alpha)_n(x) + + +def roots_gegenbauer(n, alpha, mu=False): + r"""Gauss-Gegenbauer quadrature. + + Computes the sample points and weights for Gauss-Gegenbauer quadrature. + The sample points are the roots of the n-th degree Gegenbauer polynomial, + :math:`C^{\alpha}_n(x)`. These sample points and weights correctly + integrate polynomials of degree :math:`2n - 1` or less over the interval + :math:`[-1, 1]` with weight function + :math:`f(x) = (1 - x^2)^{\alpha - 1/2}`. + + Parameters + ---------- + n : int + quadrature order + alpha : float + alpha must be > -0.5 + mu : bool, optional + If True, return the sum of the weights, optional. + + Returns + ------- + x : ndarray + Sample points + w : ndarray + Weights + mu : float + Sum of the weights + + See Also + -------- + scipy.integrate.quadrature + scipy.integrate.fixed_quad + """ + m = int(n) + if n < 1 or n != m: + raise ValueError("n must be a positive integer.") + if alpha < -0.5: + raise ValueError("alpha must be greater than -0.5.") + elif alpha == 0.0: + # C(n,0,x) == 0 uniformly, however, as alpha->0, C(n,alpha,x)->T(n,x) + # strictly, we should just error out here, since the roots are not + # really defined, but we used to return something useful, so let's + # keep doing so. + return roots_chebyt(n, mu) + + mu0 = np.sqrt(np.pi) * cephes.gamma(alpha + 0.5) / cephes.gamma(alpha + 1) + an_func = lambda k: 0.0 * k + bn_func = lambda k: np.sqrt(k * (k + 2 * alpha - 1) + / (4 * (k + alpha) * (k + alpha - 1))) + f = lambda n, x: cephes.eval_gegenbauer(n, alpha, x) + df = lambda n, x: (-n*x*cephes.eval_gegenbauer(n, alpha, x) + + (n + 2*alpha - 1)*cephes.eval_gegenbauer(n-1, alpha, x))/(1-x**2) + return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu) + + +def gegenbauer(n, alpha, monic=False): + r"""Gegenbauer (ultraspherical) polynomial. + + Defined to be the solution of + + .. math:: + (1 - x^2)\frac{d^2}{dx^2}C_n^{(\alpha)} + - (2\alpha + 1)x\frac{d}{dx}C_n^{(\alpha)} + + n(n + 2\alpha)C_n^{(\alpha)} = 0 + + for :math:`\alpha > -1/2`; :math:`C_n^{(\alpha)}` is a polynomial + of degree :math:`n`. + + Parameters + ---------- + n : int + Degree of the polynomial. + monic : bool, optional + If `True`, scale the leading coefficient to be 1. Default is + `False`. + + Returns + ------- + C : orthopoly1d + Gegenbauer polynomial. + + Notes + ----- + The polynomials :math:`C_n^{(\alpha)}` are orthogonal over + :math:`[-1,1]` with weight function :math:`(1 - x^2)^{(\alpha - + 1/2)}`. + + """ + base = jacobi(n, alpha - 0.5, alpha - 0.5, monic=monic) + if monic: + return base + # Abrahmowitz and Stegan 22.5.20 + factor = (_gam(2*alpha + n) * _gam(alpha + 0.5) / + _gam(2*alpha) / _gam(alpha + 0.5 + n)) + base._scale(factor) + base.__dict__['_eval_func'] = lambda x: eval_gegenbauer(float(n), alpha, x) + return base + +# Chebyshev of the first kind: T_n(x) = +# n! sqrt(pi) / _gam(n+1./2)* P^(-1/2,-1/2)_n(x) +# Computed anew. + + +def roots_chebyt(n, mu=False): + r"""Gauss-Chebyshev (first kind) quadrature. + + Computes the sample points and weights for Gauss-Chebyshev quadrature. + The sample points are the roots of the n-th degree Chebyshev polynomial of + the first kind, :math:`T_n(x)`. These sample points and weights correctly + integrate polynomials of degree :math:`2n - 1` or less over the interval + :math:`[-1, 1]` with weight function :math:`f(x) = 1/\sqrt{1 - x^2}`. + + Parameters + ---------- + n : int + quadrature order + mu : bool, optional + If True, return the sum of the weights, optional. + + Returns + ------- + x : ndarray + Sample points + w : ndarray + Weights + mu : float + Sum of the weights + + See Also + -------- + scipy.integrate.quadrature + scipy.integrate.fixed_quad + numpy.polynomial.chebyshev.chebgauss + """ + m = int(n) + if n < 1 or n != m: + raise ValueError('n must be a positive integer.') + x = _ufuncs._sinpi(np.arange(-m + 1, m, 2) / (2*m)) + w = np.full_like(x, pi/m) + if mu: + return x, w, pi + else: + return x, w + + +def chebyt(n, monic=False): + r"""Chebyshev polynomial of the first kind. + + Defined to be the solution of + + .. math:: + (1 - x^2)\frac{d^2}{dx^2}T_n - x\frac{d}{dx}T_n + n^2T_n = 0; + + :math:`T_n` is a polynomial of degree :math:`n`. + + Parameters + ---------- + n : int + Degree of the polynomial. + monic : bool, optional + If `True`, scale the leading coefficient to be 1. Default is + `False`. + + Returns + ------- + T : orthopoly1d + Chebyshev polynomial of the first kind. + + Notes + ----- + The polynomials :math:`T_n` are orthogonal over :math:`[-1, 1]` + with weight function :math:`(1 - x^2)^{-1/2}`. + + See Also + -------- + chebyu : Chebyshev polynomial of the second kind. + + """ + if n < 0: + raise ValueError("n must be nonnegative.") + + wfunc = lambda x: 1.0 / sqrt(1 - x * x) + if n == 0: + return orthopoly1d([], [], pi, 1.0, wfunc, (-1, 1), monic, + lambda x: eval_chebyt(n, x)) + n1 = n + x, w, mu = roots_chebyt(n1, mu=True) + hn = pi / 2 + kn = 2**(n - 1) + p = orthopoly1d(x, w, hn, kn, wfunc, (-1, 1), monic, + lambda x: eval_chebyt(n, x)) + return p + +# Chebyshev of the second kind +# U_n(x) = (n+1)! sqrt(pi) / (2*_gam(n+3./2)) * P^(1/2,1/2)_n(x) + + +def roots_chebyu(n, mu=False): + r"""Gauss-Chebyshev (second kind) quadrature. + + Computes the sample points and weights for Gauss-Chebyshev quadrature. + The sample points are the roots of the n-th degree Chebyshev polynomial of + the second kind, :math:`U_n(x)`. These sample points and weights correctly + integrate polynomials of degree :math:`2n - 1` or less over the interval + :math:`[-1, 1]` with weight function :math:`f(x) = \sqrt{1 - x^2}`. + + Parameters + ---------- + n : int + quadrature order + mu : bool, optional + If True, return the sum of the weights, optional. + + Returns + ------- + x : ndarray + Sample points + w : ndarray + Weights + mu : float + Sum of the weights + + See Also + -------- + scipy.integrate.quadrature + scipy.integrate.fixed_quad + """ + m = int(n) + if n < 1 or n != m: + raise ValueError('n must be a positive integer.') + t = np.arange(m, 0, -1) * pi / (m + 1) + x = np.cos(t) + w = pi * np.sin(t)**2 / (m + 1) + if mu: + return x, w, pi / 2 + else: + return x, w + + +def chebyu(n, monic=False): + r"""Chebyshev polynomial of the second kind. + + Defined to be the solution of + + .. math:: + (1 - x^2)\frac{d^2}{dx^2}U_n - 3x\frac{d}{dx}U_n + + n(n + 2)U_n = 0; + + :math:`U_n` is a polynomial of degree :math:`n`. + + Parameters + ---------- + n : int + Degree of the polynomial. + monic : bool, optional + If `True`, scale the leading coefficient to be 1. Default is + `False`. + + Returns + ------- + U : orthopoly1d + Chebyshev polynomial of the second kind. + + Notes + ----- + The polynomials :math:`U_n` are orthogonal over :math:`[-1, 1]` + with weight function :math:`(1 - x^2)^{1/2}`. + + See Also + -------- + chebyt : Chebyshev polynomial of the first kind. + + """ + base = jacobi(n, 0.5, 0.5, monic=monic) + if monic: + return base + factor = sqrt(pi) / 2.0 * _gam(n + 2) / _gam(n + 1.5) + base._scale(factor) + return base + +# Chebyshev of the first kind C_n(x) + + +def roots_chebyc(n, mu=False): + r"""Gauss-Chebyshev (first kind) quadrature. + + Computes the sample points and weights for Gauss-Chebyshev quadrature. + The sample points are the roots of the n-th degree Chebyshev polynomial of + the first kind, :math:`C_n(x)`. These sample points and weights correctly + integrate polynomials of degree :math:`2n - 1` or less over the interval + :math:`[-2, 2]` with weight function :math:`f(x) = 1/\sqrt{1 - (x/2)^2}`. + + Parameters + ---------- + n : int + quadrature order + mu : bool, optional + If True, return the sum of the weights, optional. + + Returns + ------- + x : ndarray + Sample points + w : ndarray + Weights + mu : float + Sum of the weights + + See Also + -------- + scipy.integrate.quadrature + scipy.integrate.fixed_quad + """ + x, w, m = roots_chebyt(n, True) + x *= 2 + w *= 2 + m *= 2 + if mu: + return x, w, m + else: + return x, w + + +def chebyc(n, monic=False): + r"""Chebyshev polynomial of the first kind on :math:`[-2, 2]`. + + Defined as :math:`C_n(x) = 2T_n(x/2)`, where :math:`T_n` is the + nth Chebychev polynomial of the first kind. + + Parameters + ---------- + n : int + Degree of the polynomial. + monic : bool, optional + If `True`, scale the leading coefficient to be 1. Default is + `False`. + + Returns + ------- + C : orthopoly1d + Chebyshev polynomial of the first kind on :math:`[-2, 2]`. + + Notes + ----- + The polynomials :math:`C_n(x)` are orthogonal over :math:`[-2, 2]` + with weight function :math:`1/\sqrt{1 - (x/2)^2}`. + + See Also + -------- + chebyt : Chebyshev polynomial of the first kind. + + References + ---------- + .. [1] Abramowitz and Stegun, "Handbook of Mathematical Functions" + Section 22. National Bureau of Standards, 1972. + + """ + if n < 0: + raise ValueError("n must be nonnegative.") + + if n == 0: + n1 = n + 1 + else: + n1 = n + x, w, mu0 = roots_chebyc(n1, mu=True) + if n == 0: + x, w = [], [] + hn = 4 * pi * ((n == 0) + 1) + kn = 1.0 + p = orthopoly1d(x, w, hn, kn, + wfunc=lambda x: 1.0 / sqrt(1 - x * x / 4.0), + limits=(-2, 2), monic=monic) + if not monic: + p._scale(2.0 / p(2)) + p.__dict__['_eval_func'] = lambda x: eval_chebyc(n, x) + return p + +# Chebyshev of the second kind S_n(x) + + +def roots_chebys(n, mu=False): + r"""Gauss-Chebyshev (second kind) quadrature. + + Computes the sample points and weights for Gauss-Chebyshev quadrature. + The sample points are the roots of the n-th degree Chebyshev polynomial of + the second kind, :math:`S_n(x)`. These sample points and weights correctly + integrate polynomials of degree :math:`2n - 1` or less over the interval + :math:`[-2, 2]` with weight function :math:`f(x) = \sqrt{1 - (x/2)^2}`. + + Parameters + ---------- + n : int + quadrature order + mu : bool, optional + If True, return the sum of the weights, optional. + + Returns + ------- + x : ndarray + Sample points + w : ndarray + Weights + mu : float + Sum of the weights + + See Also + -------- + scipy.integrate.quadrature + scipy.integrate.fixed_quad + """ + x, w, m = roots_chebyu(n, True) + x *= 2 + w *= 2 + m *= 2 + if mu: + return x, w, m + else: + return x, w + + +def chebys(n, monic=False): + r"""Chebyshev polynomial of the second kind on :math:`[-2, 2]`. + + Defined as :math:`S_n(x) = U_n(x/2)` where :math:`U_n` is the + nth Chebychev polynomial of the second kind. + + Parameters + ---------- + n : int + Degree of the polynomial. + monic : bool, optional + If `True`, scale the leading coefficient to be 1. Default is + `False`. + + Returns + ------- + S : orthopoly1d + Chebyshev polynomial of the second kind on :math:`[-2, 2]`. + + Notes + ----- + The polynomials :math:`S_n(x)` are orthogonal over :math:`[-2, 2]` + with weight function :math:`\sqrt{1 - (x/2)}^2`. + + See Also + -------- + chebyu : Chebyshev polynomial of the second kind + + References + ---------- + .. [1] Abramowitz and Stegun, "Handbook of Mathematical Functions" + Section 22. National Bureau of Standards, 1972. + + """ + if n < 0: + raise ValueError("n must be nonnegative.") + + if n == 0: + n1 = n + 1 + else: + n1 = n + x, w, mu0 = roots_chebys(n1, mu=True) + if n == 0: + x, w = [], [] + hn = pi + kn = 1.0 + p = orthopoly1d(x, w, hn, kn, + wfunc=lambda x: sqrt(1 - x * x / 4.0), + limits=(-2, 2), monic=monic) + if not monic: + factor = (n + 1.0) / p(2) + p._scale(factor) + p.__dict__['_eval_func'] = lambda x: eval_chebys(n, x) + return p + +# Shifted Chebyshev of the first kind T^*_n(x) + + +def roots_sh_chebyt(n, mu=False): + r"""Gauss-Chebyshev (first kind, shifted) quadrature. + + Computes the sample points and weights for Gauss-Chebyshev quadrature. + The sample points are the roots of the n-th degree shifted Chebyshev + polynomial of the first kind, :math:`T_n(x)`. These sample points and + weights correctly integrate polynomials of degree :math:`2n - 1` or less + over the interval :math:`[0, 1]` with weight function + :math:`f(x) = 1/\sqrt{x - x^2}`. + + Parameters + ---------- + n : int + quadrature order + mu : bool, optional + If True, return the sum of the weights, optional. + + Returns + ------- + x : ndarray + Sample points + w : ndarray + Weights + mu : float + Sum of the weights + + See Also + -------- + scipy.integrate.quadrature + scipy.integrate.fixed_quad + """ + xw = roots_chebyt(n, mu) + return ((xw[0] + 1) / 2,) + xw[1:] + + +def sh_chebyt(n, monic=False): + r"""Shifted Chebyshev polynomial of the first kind. + + Defined as :math:`T^*_n(x) = T_n(2x - 1)` for :math:`T_n` the nth + Chebyshev polynomial of the first kind. + + Parameters + ---------- + n : int + Degree of the polynomial. + monic : bool, optional + If `True`, scale the leading coefficient to be 1. Default is + `False`. + + Returns + ------- + T : orthopoly1d + Shifted Chebyshev polynomial of the first kind. + + Notes + ----- + The polynomials :math:`T^*_n` are orthogonal over :math:`[0, 1]` + with weight function :math:`(x - x^2)^{-1/2}`. + + """ + base = sh_jacobi(n, 0.0, 0.5, monic=monic) + if monic: + return base + if n > 0: + factor = 4**n / 2.0 + else: + factor = 1.0 + base._scale(factor) + return base + + +# Shifted Chebyshev of the second kind U^*_n(x) +def roots_sh_chebyu(n, mu=False): + r"""Gauss-Chebyshev (second kind, shifted) quadrature. + + Computes the sample points and weights for Gauss-Chebyshev quadrature. + The sample points are the roots of the n-th degree shifted Chebyshev + polynomial of the second kind, :math:`U_n(x)`. These sample points and + weights correctly integrate polynomials of degree :math:`2n - 1` or less + over the interval :math:`[0, 1]` with weight function + :math:`f(x) = \sqrt{x - x^2}`. + + Parameters + ---------- + n : int + quadrature order + mu : bool, optional + If True, return the sum of the weights, optional. + + Returns + ------- + x : ndarray + Sample points + w : ndarray + Weights + mu : float + Sum of the weights + + See Also + -------- + scipy.integrate.quadrature + scipy.integrate.fixed_quad + """ + x, w, m = roots_chebyu(n, True) + x = (x + 1) / 2 + m_us = cephes.beta(1.5, 1.5) + w *= m_us / m + if mu: + return x, w, m_us + else: + return x, w + + +def sh_chebyu(n, monic=False): + r"""Shifted Chebyshev polynomial of the second kind. + + Defined as :math:`U^*_n(x) = U_n(2x - 1)` for :math:`U_n` the nth + Chebyshev polynomial of the second kind. + + Parameters + ---------- + n : int + Degree of the polynomial. + monic : bool, optional + If `True`, scale the leading coefficient to be 1. Default is + `False`. + + Returns + ------- + U : orthopoly1d + Shifted Chebyshev polynomial of the second kind. + + Notes + ----- + The polynomials :math:`U^*_n` are orthogonal over :math:`[0, 1]` + with weight function :math:`(x - x^2)^{1/2}`. + + """ + base = sh_jacobi(n, 2.0, 1.5, monic=monic) + if monic: + return base + factor = 4**n + base._scale(factor) + return base + +# Legendre + + +def roots_legendre(n, mu=False): + r"""Gauss-Legendre quadrature. + + Computes the sample points and weights for Gauss-Legendre quadrature. + The sample points are the roots of the n-th degree Legendre polynomial + :math:`P_n(x)`. These sample points and weights correctly integrate + polynomials of degree :math:`2n - 1` or less over the interval + :math:`[-1, 1]` with weight function :math:`f(x) = 1.0`. + + Parameters + ---------- + n : int + quadrature order + mu : bool, optional + If True, return the sum of the weights, optional. + + Returns + ------- + x : ndarray + Sample points + w : ndarray + Weights + mu : float + Sum of the weights + + See Also + -------- + scipy.integrate.quadrature + scipy.integrate.fixed_quad + numpy.polynomial.legendre.leggauss + """ + m = int(n) + if n < 1 or n != m: + raise ValueError("n must be a positive integer.") + + mu0 = 2.0 + an_func = lambda k: 0.0 * k + bn_func = lambda k: k * np.sqrt(1.0 / (4 * k * k - 1)) + f = cephes.eval_legendre + df = lambda n, x: (-n*x*cephes.eval_legendre(n, x) + + n*cephes.eval_legendre(n-1, x))/(1-x**2) + return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu) + + +def legendre(n, monic=False): + r"""Legendre polynomial. + + Defined to be the solution of + + .. math:: + \frac{d}{dx}\left[(1 - x^2)\frac{d}{dx}P_n(x)\right] + + n(n + 1)P_n(x) = 0; + + :math:`P_n(x)` is a polynomial of degree :math:`n`. + + Parameters + ---------- + n : int + Degree of the polynomial. + monic : bool, optional + If `True`, scale the leading coefficient to be 1. Default is + `False`. + + Returns + ------- + P : orthopoly1d + Legendre polynomial. + + Notes + ----- + The polynomials :math:`P_n` are orthogonal over :math:`[-1, 1]` + with weight function 1. + + Examples + -------- + Generate the 3rd-order Legendre polynomial 1/2*(5x^3 + 0x^2 - 3x + 0): + + >>> from scipy.special import legendre + >>> legendre(3) + poly1d([ 2.5, 0. , -1.5, 0. ]) + + """ + if n < 0: + raise ValueError("n must be nonnegative.") + + if n == 0: + n1 = n + 1 + else: + n1 = n + x, w, mu0 = roots_legendre(n1, mu=True) + if n == 0: + x, w = [], [] + hn = 2.0 / (2 * n + 1) + kn = _gam(2 * n + 1) / _gam(n + 1)**2 / 2.0**n + p = orthopoly1d(x, w, hn, kn, wfunc=lambda x: 1.0, limits=(-1, 1), + monic=monic, eval_func=lambda x: eval_legendre(n, x)) + return p + +# Shifted Legendre P^*_n(x) + + +def roots_sh_legendre(n, mu=False): + r"""Gauss-Legendre (shifted) quadrature. + + Computes the sample points and weights for Gauss-Legendre quadrature. + The sample points are the roots of the n-th degree shifted Legendre + polynomial :math:`P^*_n(x)`. These sample points and weights correctly + integrate polynomials of degree :math:`2n - 1` or less over the interval + :math:`[0, 1]` with weight function :math:`f(x) = 1.0`. + + Parameters + ---------- + n : int + quadrature order + mu : bool, optional + If True, return the sum of the weights, optional. + + Returns + ------- + x : ndarray + Sample points + w : ndarray + Weights + mu : float + Sum of the weights + + See Also + -------- + scipy.integrate.quadrature + scipy.integrate.fixed_quad + """ + x, w = roots_legendre(n) + x = (x + 1) / 2 + w /= 2 + if mu: + return x, w, 1.0 + else: + return x, w + +def sh_legendre(n, monic=False): + r"""Shifted Legendre polynomial. + + Defined as :math:`P^*_n(x) = P_n(2x - 1)` for :math:`P_n` the nth + Legendre polynomial. + + Parameters + ---------- + n : int + Degree of the polynomial. + monic : bool, optional + If `True`, scale the leading coefficient to be 1. Default is + `False`. + + Returns + ------- + P : orthopoly1d + Shifted Legendre polynomial. + + Notes + ----- + The polynomials :math:`P^*_n` are orthogonal over :math:`[0, 1]` + with weight function 1. + + """ + if n < 0: + raise ValueError("n must be nonnegative.") + + wfunc = lambda x: 0.0 * x + 1.0 + if n == 0: + return orthopoly1d([], [], 1.0, 1.0, wfunc, (0, 1), monic, + lambda x: eval_sh_legendre(n, x)) + x, w, mu0 = roots_sh_legendre(n, mu=True) + hn = 1.0 / (2 * n + 1.0) + kn = _gam(2 * n + 1) / _gam(n + 1)**2 + p = orthopoly1d(x, w, hn, kn, wfunc, limits=(0, 1), monic=monic, + eval_func=lambda x: eval_sh_legendre(n, x)) + return p + + +# ----------------------------------------------------------------------------- +# Code for backwards compatibility +# ----------------------------------------------------------------------------- + +# Import functions in case someone is still calling the orthogonal +# module directly. (They shouldn't be; it's not in the public API). +poch = cephes.poch + +from ._ufuncs import (binom, eval_jacobi, eval_sh_jacobi, eval_gegenbauer, + eval_chebyt, eval_chebyu, eval_chebys, eval_chebyc, + eval_sh_chebyt, eval_sh_chebyu, eval_legendre, + eval_sh_legendre, eval_genlaguerre, eval_laguerre, + eval_hermite, eval_hermitenorm) + +# Make the old root function names an alias for the new ones +_modattrs = globals() +for newfun, oldfun in _rootfuns_map.items(): + _modattrs[oldfun] = _modattrs[newfun] + __all__.append(oldfun) diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/orthogonal.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/orthogonal.pyc new file mode 100644 index 0000000..a2f13c3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/orthogonal.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/setup.py b/project/venv/lib/python2.7/site-packages/scipy/special/setup.py new file mode 100644 index 0000000..b4b9d0c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/setup.py @@ -0,0 +1,160 @@ +from __future__ import division, print_function, absolute_import + +import os +import sys +from os.path import join, dirname +from distutils.sysconfig import get_python_inc +import subprocess +import numpy +from numpy.distutils.misc_util import get_numpy_include_dirs + +try: + from numpy.distutils.misc_util import get_info +except ImportError: + raise ValueError("numpy >= 1.4 is required (detected %s from %s)" % + (numpy.__version__, numpy.__file__)) + + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + from scipy._build_utils.system_info import get_info as get_system_info + + config = Configuration('special', parent_package, top_path) + + define_macros = [] + if sys.platform == 'win32': + # define_macros.append(('NOINFINITIES',None)) + # define_macros.append(('NONANS',None)) + define_macros.append(('_USE_MATH_DEFINES',None)) + + curdir = os.path.abspath(os.path.dirname(__file__)) + inc_dirs = [get_python_inc(), os.path.join(curdir, "c_misc")] + if inc_dirs[0] != get_python_inc(plat_specific=1): + inc_dirs.append(get_python_inc(plat_specific=1)) + inc_dirs.insert(0, get_numpy_include_dirs()) + inc_dirs.append(join(dirname(dirname(__file__)), '_lib')) + + # C libraries + c_misc_src = [join('c_misc','*.c')] + c_misc_hdr = [join('c_misc','*.h')] + cephes_src = [join('cephes','*.c')] + cephes_hdr = [join('cephes', '*.h')] + config.add_library('sc_c_misc',sources=c_misc_src, + include_dirs=[curdir] + inc_dirs, + depends=(cephes_hdr + cephes_src + + c_misc_hdr + cephes_hdr + + ['*.h']), + macros=define_macros) + config.add_library('sc_cephes',sources=cephes_src, + include_dirs=[curdir] + inc_dirs, + depends=(cephes_hdr + ['*.h']), + macros=define_macros) + + # Fortran/C++ libraries + mach_src = [join('mach','*.f')] + amos_src = [join('amos','*.f')] + cdf_src = [join('cdflib','*.f')] + specfun_src = [join('specfun','*.f')] + config.add_library('sc_mach',sources=mach_src, + config_fc={'noopt':(__file__,1)}) + config.add_library('sc_amos',sources=amos_src) + config.add_library('sc_cdf',sources=cdf_src) + config.add_library('sc_specfun',sources=specfun_src) + + # Extension specfun + config.add_extension('specfun', + sources=['specfun.pyf'], + f2py_options=['--no-wrap-functions'], + depends=specfun_src, + define_macros=[], + libraries=['sc_specfun']) + + # Extension _ufuncs + headers = ['*.h', join('c_misc', '*.h'), join('cephes', '*.h')] + ufuncs_src = ['_ufuncs.c', 'sf_error.c', '_logit.c.src', + "amos_wrappers.c", "cdf_wrappers.c", "specfun_wrappers.c"] + ufuncs_dep = (headers + ufuncs_src + amos_src + c_misc_src + cephes_src + + mach_src + cdf_src + specfun_src) + cfg = dict(get_system_info('lapack_opt')) + cfg.setdefault('include_dirs', []).extend([curdir] + inc_dirs + [numpy.get_include()]) + cfg.setdefault('libraries', []).extend(['sc_amos','sc_c_misc','sc_cephes','sc_mach', + 'sc_cdf', 'sc_specfun']) + cfg.setdefault('define_macros', []).extend(define_macros) + config.add_extension('_ufuncs', + depends=ufuncs_dep, + sources=ufuncs_src, + extra_info=get_info("npymath"), + **cfg) + + # Extension _ufuncs_cxx + ufuncs_cxx_src = ['_ufuncs_cxx.cxx', 'sf_error.c', + '_faddeeva.cxx', 'Faddeeva.cc', + '_wright.cxx', 'wright.cc'] + ufuncs_cxx_dep = (headers + ufuncs_cxx_src + cephes_src + + ['*.hh']) + config.add_extension('_ufuncs_cxx', + sources=ufuncs_cxx_src, + depends=ufuncs_cxx_dep, + include_dirs=[curdir] + inc_dirs, + define_macros=define_macros, + extra_info=get_info("npymath")) + + cfg = dict(get_system_info('lapack_opt')) + config.add_extension('_ellip_harm_2', + sources=['_ellip_harm_2.c', 'sf_error.c',], + **cfg + ) + + # Cython API + config.add_data_files('cython_special.pxd') + + cython_special_src = ['cython_special.c', 'sf_error.c', '_logit.c.src', + "amos_wrappers.c", "cdf_wrappers.c", "specfun_wrappers.c"] + cython_special_dep = (headers + ufuncs_src + ufuncs_cxx_src + amos_src + + c_misc_src + cephes_src + mach_src + cdf_src + + specfun_src) + cfg = dict(get_system_info('lapack_opt')) + cfg.setdefault('include_dirs', []).extend([curdir] + inc_dirs + [numpy.get_include()]) + cfg.setdefault('libraries', []).extend(['sc_amos','sc_c_misc','sc_cephes','sc_mach', + 'sc_cdf', 'sc_specfun']) + cfg.setdefault('define_macros', []).extend(define_macros) + config.add_extension('cython_special', + depends=cython_special_dep, + sources=cython_special_src, + extra_info=get_info("npymath"), + **cfg) + + # combinatorics + config.add_extension('_comb', + sources=['_comb.c']) + + # testing for _round.h + config.add_extension('_test_round', + sources=['_test_round.c'], + depends=['_round.h', 'cephes/dd_idefs.h'], + include_dirs=[numpy.get_include()] + inc_dirs, + extra_info=get_info('npymath')) + + config.add_data_files('tests/*.py') + config.add_data_files('tests/data/README') + + # regenerate npz data files + makenpz = os.path.join(os.path.dirname(__file__), + 'utils', 'makenpz.py') + data_dir = os.path.join(os.path.dirname(__file__), + 'tests', 'data') + for name in ['boost', 'gsl', 'local']: + subprocess.check_call([sys.executable, makenpz, + '--use-timestamp', + os.path.join(data_dir, name)]) + + config.add_data_files('tests/data/*.npz') + + config.add_subpackage('_precompute') + + return config + + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(**configuration(top_path='').todict()) diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/setup.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/setup.pyc new file mode 100644 index 0000000..98ddf2a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/sf_error.py b/project/venv/lib/python2.7/site-packages/scipy/special/sf_error.py new file mode 100644 index 0000000..e1edc98 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/sf_error.py @@ -0,0 +1,15 @@ +"""Warnings and Exceptions that can be raised by special functions.""" +import warnings + + +class SpecialFunctionWarning(Warning): + """Warning that can be emitted by special functions.""" + pass + + +warnings.simplefilter("always", category=SpecialFunctionWarning) + + +class SpecialFunctionError(Exception): + """Exception that can be raised by special functions.""" + pass diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/sf_error.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/sf_error.pyc new file mode 100644 index 0000000..7a715da Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/sf_error.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/specfun.so b/project/venv/lib/python2.7/site-packages/scipy/special/specfun.so new file mode 100755 index 0000000..ecbcba8 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/specfun.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/spfun_stats.py b/project/venv/lib/python2.7/site-packages/scipy/special/spfun_stats.py new file mode 100644 index 0000000..1fda996 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/spfun_stats.py @@ -0,0 +1,95 @@ +# Last Change: Sat Mar 21 02:00 PM 2009 J + +# Copyright (c) 2001, 2002 Enthought, Inc. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# a. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# b. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# c. Neither the name of the Enthought nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR +# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +# DAMAGE. + +"""Some more special functions which may be useful for multivariate statistical +analysis.""" + +from __future__ import division, print_function, absolute_import + +import numpy as np +from scipy.special import gammaln as loggam + + +__all__ = ['multigammaln'] + + +def multigammaln(a, d): + r"""Returns the log of multivariate gamma, also sometimes called the + generalized gamma. + + Parameters + ---------- + a : ndarray + The multivariate gamma is computed for each item of `a`. + d : int + The dimension of the space of integration. + + Returns + ------- + res : ndarray + The values of the log multivariate gamma at the given points `a`. + + Notes + ----- + The formal definition of the multivariate gamma of dimension d for a real + `a` is + + .. math:: + + \Gamma_d(a) = \int_{A>0} e^{-tr(A)} |A|^{a - (d+1)/2} dA + + with the condition :math:`a > (d-1)/2`, and :math:`A > 0` being the set of + all the positive definite matrices of dimension `d`. Note that `a` is a + scalar: the integrand only is multivariate, the argument is not (the + function is defined over a subset of the real set). + + This can be proven to be equal to the much friendlier equation + + .. math:: + + \Gamma_d(a) = \pi^{d(d-1)/4} \prod_{i=1}^{d} \Gamma(a - (i-1)/2). + + References + ---------- + R. J. Muirhead, Aspects of multivariate statistical theory (Wiley Series in + probability and mathematical statistics). + + """ + a = np.asarray(a) + if not np.isscalar(d) or (np.floor(d) != d): + raise ValueError("d should be a positive integer (dimension)") + if np.any(a <= 0.5 * (d - 1)): + raise ValueError("condition a (%f) > 0.5 * (d-1) (%f) not met" + % (a, 0.5 * (d-1))) + + res = (d * (d-1) * 0.25) * np.log(np.pi) + res += np.sum(loggam([(a - (j - 1.)/2) for j in range(1, d+1)]), axis=0) + return res diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/spfun_stats.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/spfun_stats.pyc new file mode 100644 index 0000000..9e9b94c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/spfun_stats.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/special/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/tests/__init__.pyc new file mode 100644 index 0000000..e987bcf Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/tests/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/data/README b/project/venv/lib/python2.7/site-packages/scipy/special/tests/data/README new file mode 100644 index 0000000..da0b0fd --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/tests/data/README @@ -0,0 +1,578 @@ +This directory contains numerical data for testing special functions. +The data is in version control as text files. + +The data is automatically packed into npz files by setup.py. +The npz files should not be checked in version control. + +The data in gsl is computed using the GNU scientific library, the data +in local is computed using mpmath, and the data in boost is a copy of +data distributed with the boost library and comes with the following +license: + +Boost Software License - Version 1.0 - August 17th, 2003 + +Permission is hereby granted, free of charge, to any person or organization +obtaining a copy of the software and accompanying documentation covered by +this license (the "Software") to use, reproduce, display, distribute, +execute, and transmit the Software, and to prepare derivative works of the +Software, and to permit third-parties to whom the Software is furnished to +do so, all subject to the following: + +The copyright notices in the Software and this entire statement, including +the above license grant, this restriction and the following disclaimer, +must be included in all copies of the Software, in whole or in part, and +all derivative works of the Software, unless such copies or derivative +works are solely in the form of machine-executable object code generated by +a source language processor. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + +========= + +Copyright holders of each file are listed here: + +Jamfile.v2:# Copyright Daryle Walker, Hubert Holin, John Maddock 2006 - 2007 +acosh_data.ipp:// Copyright John Maddock 2008. +acosh_test.hpp:// (C) Copyright Hubert Holin 2003. +almost_equal.ipp:// Copyright (c) 2006 Johan Rade +asinh_data.ipp:// Copyright John Maddock 2008. +asinh_test.hpp:// (C) Copyright Hubert Holin 2003. +assoc_legendre_p.ipp:// (C) Copyright John Maddock 2006-7. +atanh_data.ipp:// Copyright John Maddock 2008. +atanh_test.hpp:// (C) Copyright Hubert Holin 2003. +bessel_i_data.ipp:// Copyright (c) 2007 John Maddock +bessel_i_int_data.ipp:// Copyright (c) 2007 John Maddock +bessel_j_data.ipp:// Copyright (c) 2007 John Maddock +bessel_j_int_data.ipp:// Copyright (c) 2007 John Maddock +bessel_j_large_data.ipp:// Copyright (c) 2007 John Maddock +bessel_k_data.ipp:// Copyright (c) 2007 John Maddock +bessel_k_int_data.ipp:// Copyright (c) 2007 John Maddock +bessel_y01_data.ipp:// Copyright (c) 2007 John Maddock +bessel_yn_data.ipp:// Copyright (c) 2007 John Maddock +bessel_yv_data.ipp:// Copyright (c) 2007 John Maddock +beta_exp_data.ipp:// (C) Copyright John Maddock 2006. +beta_med_data.ipp:// (C) Copyright John Maddock 2006. +beta_small_data.ipp:// (C) Copyright John Maddock 2006. +binomial_data.ipp:// (C) Copyright John Maddock 2006-7. +binomial_large_data.ipp:// (C) Copyright John Maddock 2006-7. +binomial_quantile.ipp:// (C) Copyright John Maddock 2006-7. +cbrt_data.ipp:// (C) Copyright John Maddock 2006-7. +common_factor_test.cpp:// (C) Copyright Daryle Walker 2001, 2006. +compile_test/tools_rational_inc_test.cpp:// Copyright John Maddock 2006. +compile_test/tools_real_cast_inc_test.cpp:// Copyright John Maddock 2006. +compile_test/tools_remez_inc_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_chi_squared_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_complement_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_sign_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_digamma_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_trunc_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/constants_incl_test.cpp:// Copyright John Maddock 2012. +compile_test/sf_sinc_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_binomial_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_binomial_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/tools_test_inc_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_normal_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_sinhc_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_ellint_rc_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_sin_pi_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_sph_harm_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_poisson_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/test_traits.cpp:// Copyright John Maddock 2007. +compile_test/dist_gamma_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_cos_pi_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_logistic_incl_test.cpp:// Copyright John Maddock 2008. +compile_test/sf_fpclassify_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/compl_atanh_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/tools_precision_inc_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_hankel_incl_test.cpp:// Copyright John Maddock 2012. +compile_test/sf_cbrt_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_nc_beta_incl_test.cpp:// Copyright John Maddock 2008. +compile_test/sf_legendre_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/tools_stats_inc_test.cpp:// Copyright John Maddock 2006. +compile_test/tools_polynomial_inc_test.cpp:// Copyright John Maddock 2006. +compile_test/tools_config_inc_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_exponential_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_students_t_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_inv_gamma_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/compl_acosh_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_beta_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_fisher_f_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_triangular_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/instantiate.hpp:// Copyright John Maddock 2006. +compile_test/instantiate.hpp:// Copyright Paul A. Bristow 2007, 2010. +compile_test/tools_solve_inc_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_next_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/generate.sh:// Copyright John Maddock 2006. +compile_test/generate.sh:// Copyright John Maddock 2006. +compile_test/generate.sh:// Copyright John Maddock 2006. +compile_test/distribution_concept_check.cpp:// Copyright John Maddock 2006. +compile_test/sf_laguerre_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/tr1_incl_test.cpp:// Copyright John Maddock 2008. +compile_test/sf_ellint_rj_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_nc_chi_squ_incl_test.cpp:// Copyright John Maddock 2008. +compile_test/dist_skew_norm_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_modf_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_find_location_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/compl_acos_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_ellint_rd_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/tools_roots_inc_test.cpp:// Copyright John Maddock 2006. +compile_test/tools_test_data_inc_test.cpp:// Copyright John Maddock 2006. +compile_test/compl_abs_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_nc_t_incl_test.cpp:// Copyright John Maddock 2008. +compile_test/sf_factorials_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_gamma_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/compl_atan_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_powm1_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_hypot_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_pareto_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_round_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_weibull_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/std_real_concept_check.cpp:// Copyright John Maddock 2006. +compile_test/dist_hypergeo_incl_test.cpp:// Copyright John Maddock 2008. +compile_test/dist_inv_chi_sq_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_sqrt1pm1_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_log1p_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_jacobi_incl_test.cpp:// Copyright John Maddock 2012. +compile_test/dist_neg_binom_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_nc_f_incl_test.cpp:// Copyright John Maddock 2008. +compile_test/dist_find_scale_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_bessel_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/tools_minima_inc_test.cpp:// Copyright John Maddock 2006. +compile_test/compl_asin_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_extreme_val_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_lanczos_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_uniform_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/test_compile_result.hpp:// Copyright John Maddock 2007. +compile_test/tools_series_inc_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_ellint_3_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_ellint_rf_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_ellint_2_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_hermite_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/poison.hpp:// Copyright John Maddock 2013. +compile_test/sf_zeta_incl_test.cpp:// Copyright John Maddock 2007. +compile_test/dist_laplace_incl_test.cpp:// Copyright John Maddock 2008. +compile_test/sf_expint_incl_test.cpp:// Copyright John Maddock 2007. +compile_test/sf_expm1_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_bernoulli_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/compl_asinh_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_beta_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/tools_fraction_inc_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_owens_t_incl_test.cpp:// Copyright John Maddock 2012. +compile_test/tools_toms748_inc_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_ellint_1_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_erf_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/main.cpp:// Copyright John Maddock 2009. +compile_test/sf_math_fwd_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_airy_incl_test.cpp:// Copyright John Maddock 2012. +compile_test/dist_lognormal_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_cauchy_incl_test.cpp:// Copyright John Maddock 2006. +complex_test.cpp:// (C) Copyright John Maddock 2005. +digamma_data.ipp:// (C) Copyright John Maddock 2006-7. +digamma_neg_data.ipp:// (C) Copyright John Maddock 2006-7. +digamma_root_data.ipp:// (C) Copyright John Maddock 2006-7. +digamma_small_data.ipp:// (C) Copyright John Maddock 2006-7. +e_float_concept_check.cpp:// Copyright John Maddock 2011. +ellint_e2_data.ipp:// Copyright (c) 2006 John Maddock +ellint_e_data.ipp:// Copyright (c) 2006 John Maddock +ellint_f_data.ipp:// Copyright (c) 2006 John Maddock +ellint_k_data.ipp:// (C) Copyright John Maddock 2006-7. +ellint_pi2_data.ipp:// Copyright (c) 2006 John Maddock +ellint_pi3_data.ipp:// Copyright (c) 2006 John Maddock +ellint_pi3_large_data.ipp:// Copyright (c) 2006 John Maddock +ellint_rc_data.ipp:// Copyright (c) 2006 John Maddock +ellint_rd_data.ipp:// Copyright (c) 2006 John Maddock +ellint_rf_data.ipp:// Copyright (c) 2006 John Maddock +ellint_rj_data.ipp:// Copyright (c) 2006 John Maddock +erf_data.ipp:// (C) Copyright John Maddock 2006-7. +erf_inv_data.ipp:// (C) Copyright John Maddock 2006-7. +erf_large_data.ipp:// (C) Copyright John Maddock 2006-7. +erf_small_data.ipp:// (C) Copyright John Maddock 2006. +erfc_inv_big_data.ipp:// (C) Copyright John Maddock 2006-7. +erfc_inv_data.ipp:// (C) Copyright John Maddock 2006-7. +expint_1_data.ipp:// Copyright John Maddock 2008. +expint_data.ipp:// Copyright John Maddock 2008. +expint_small_data.ipp:// Copyright John Maddock 2008. +expinti_data.ipp:// Copyright John Maddock 2008. +expinti_data_double.ipp:// Copyright John Maddock 2008. +expinti_data_long.ipp:// Copyright John Maddock 2008. +functor.hpp:// (C) Copyright John Maddock 2007. +gamma_inv_big_data.ipp:// (C) Copyright John Maddock 2006-7. +gamma_inv_data.ipp:// (C) Copyright John Maddock 2006-7. +gamma_inv_small_data.ipp:// (C) Copyright John Maddock 2006-7. +handle_test_result.hpp:// (C) Copyright John Maddock 2006-7. +hermite.ipp:// (C) Copyright John Maddock 2006-7. +hypergeometric_dist_data2.ipp:// Copyright John Maddock 2008 +hypergeometric_test_data.ipp:// Copyright Gautam Sewani 2008 +hypot_test.cpp:// (C) Copyright John Maddock 2005. +ibeta_data.ipp:// (C) Copyright John Maddock 2006. +ibeta_int_data.ipp:// (C) Copyright John Maddock 2006-7. +ibeta_inv_data.ipp:// (C) Copyright John Maddock 2006-7. +ibeta_inva_data.ipp:// (C) Copyright John Maddock 2006-7. +ibeta_large_data.ipp:// (C) Copyright John Maddock 2006. +ibeta_small_data.ipp:// (C) Copyright John Maddock 2006. +igamma_big_data.ipp:// (C) Copyright John Maddock 2006. +igamma_int_data.ipp:// (C) Copyright John Maddock 2006-7. +igamma_inva_data.ipp:// (C) Copyright John Maddock 2006-7. +igamma_med_data.ipp:// (C) Copyright John Maddock 2006. +igamma_small_data.ipp:// (C) Copyright John Maddock 2006. +jacobi_elliptic.ipp:// Copyright John Maddock 2012. +jacobi_elliptic_small.ipp:// Copyright John Maddock 2012. +jacobi_large_phi.ipp:// Copyright John Maddock 2012. +jacobi_near_1.ipp:// Copyright John Maddock 2012. +laguerre2.ipp:// (C) Copyright John Maddock 2006-7. +laguerre3.ipp:// (C) Copyright John Maddock 2006-7. +legendre_p.ipp:// (C) Copyright John Maddock 2006-7. +legendre_p_large.ipp:// (C) Copyright John Maddock 2006-7. +log1p_expm1_data.ipp:// (C) Copyright John Maddock 2006-7. +log1p_expm1_test.cpp:// Copyright John Maddock 2005. +log1p_expm1_test.cpp:// Copyright Paul A. Bristow 2010 +log1p_expm1_test.hpp:// Copyright John Maddock 2005. +log1p_expm1_test.hpp:// Copyright Paul A. Bristow 2010 +mpfr_concept_check.cpp:// Copyright John Maddock 2007-8. +mpreal_concept_check.cpp:// Copyright John Maddock 2007-8. +multiprc_concept_check_1.cpp:// Copyright John Maddock 2013. +multiprc_concept_check_2.cpp:// Copyright John Maddock 2013. +multiprc_concept_check_3.cpp:// Copyright John Maddock 2013. +multiprc_concept_check_4.cpp:// Copyright John Maddock 2013. +ncbeta.ipp:// Copyright John Maddock 2008. +ncbeta_big.ipp:// Copyright John Maddock 2008. +nccs.ipp:// Copyright John Maddock 2008. +nccs_big.ipp:// Copyright John Maddock 2008. +nct.ipp:// Copyright John Maddock 2008. +nct_asym.ipp:// Copyright John Maddock 2012. +nct_small_delta.ipp:// Copyright John Maddock 2012. +negative_binomial_quantile.ipp:// (C) Copyright John Maddock 2006-7. +ntl_concept_check.cpp:// Copyright John Maddock 2007-8. +ntl_concept_check.cpp:// Copyright Paul A. Bristow 2009, 2011 +owens_t.ipp:// Copyright John Maddock 2012. +owens_t_T7.hpp:// Copyright (C) Benjamin Sobotta 2012 +owens_t_large_data.ipp:// Copyright John Maddock 2012. +pch.hpp:// Copyright John Maddock 2008. +pch_light.hpp:// Copyright John Maddock 2008. +poisson_quantile.ipp:// (C) Copyright John Maddock 2006-7. +pow_test.cpp:// (C) Copyright Bruno Lalande 2008. +powm1_sqrtp1m1_test.cpp:// (C) Copyright John Maddock 2006. +powm1_sqrtp1m1_test.hpp:// Copyright John Maddock 2006. +s_.ipp:// Copyright (c) 2006 Johan Rade +s_.ipp:// Copyright (c) 2012 Paul A. Bristow +sinc_test.hpp:// (C) Copyright Hubert Holin 2003. +sinhc_test.hpp:// (C) Copyright Hubert Holin 2003. +special_functions_test.cpp:// (C) Copyright Hubert Holin 2003. +special_functions_test.cpp: BOOST_TEST_MESSAGE("(C) Copyright Hubert Holin 2003-2005."); +sph_bessel_data.ipp:// Copyright (c) 2007 John Maddock +sph_neumann_data.ipp:// Copyright (c) 2007 John Maddock +spherical_harmonic.ipp:// (C) Copyright John Maddock 2006-7. +std_real_concept_check.cpp:// Copyright John Maddock 2006. +table_type.hpp:// Copyright John Maddock 2012. +test_airy.cpp:// Copyright John Maddock 2012 +test_archive.cpp:// Copyright (c) 2006 Johan Rade +test_archive.cpp:// Copyright (c) 2011 Paul A. Bristow - filename changes for boost-trunk. +test_basic_nonfinite.cpp:// Copyright (c) 2006 Johan Rade +test_basic_nonfinite.cpp:// Copyright (c) 2011 Paul A. Bristow comments +test_basic_nonfinite.cpp:// Copyright (c) 2011 John Maddock +test_bernoulli.cpp:// Copyright John Maddock 2006. +test_bernoulli.cpp:// Copyright Paul A. Bristow 2007, 2012. +test_bessel_airy_zeros.cpp:// Copyright John Maddock 2013 +test_bessel_airy_zeros.cpp:// Copyright Christopher Kormanyos 2013. +test_bessel_airy_zeros.cpp:// Copyright Paul A. Bristow 2013. +test_bessel_hooks.hpp:// (C) Copyright John Maddock 2007. +test_bessel_i.cpp:// (C) Copyright John Maddock 2007. +test_bessel_i.hpp:// (C) Copyright John Maddock 2007. +test_bessel_j.cpp:// (C) Copyright John Maddock 2007. +test_bessel_j.hpp:// (C) Copyright John Maddock 2007. +test_bessel_k.cpp:// Copyright John Maddock 2006, 2007 +test_bessel_k.cpp:// Copyright Paul A. Bristow 2007 +test_bessel_k.hpp:// (C) Copyright John Maddock 2007. +test_bessel_y.cpp:// (C) Copyright John Maddock 2007. +test_bessel_y.hpp:// (C) Copyright John Maddock 2007. +test_beta.cpp:// Copyright John Maddock 2006. +test_beta.cpp:// Copyright Paul A. Bristow 2007, 2009 +test_beta.hpp:// Copyright John Maddock 2006. +test_beta.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_beta_dist.cpp:// Copyright John Maddock 2006. +test_beta_dist.cpp:// Copyright Paul A. Bristow 2007, 2009, 2010, 2012. +test_beta_hooks.hpp:// (C) Copyright John Maddock 2006. +test_binomial.cpp:// Copyright John Maddock 2006. +test_binomial.cpp:// Copyright Paul A. Bristow 2007. +test_binomial_coeff.cpp:// (C) Copyright John Maddock 2006. +test_binomial_coeff.hpp:// Copyright John Maddock 2006. +test_binomial_coeff.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_carlson.cpp:// Copyright 2006 John Maddock +test_carlson.cpp:// Copyright Paul A. Bristow 2007. +test_carlson.hpp:// Copyright John Maddock 2006. +test_carlson.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_cauchy.cpp:// Copyright John Maddock 2006, 2007. +test_cauchy.cpp:// Copyright Paul A. Bristow 2007 +test_cbrt.cpp:// Copyright John Maddock 2006. +test_cbrt.cpp:// Copyright Paul A. Bristow 2010 +test_cbrt.hpp:// Copyright John Maddock 2006. +test_cbrt.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_chi_squared.cpp:// Copyright Paul A. Bristow 2006. +test_chi_squared.cpp:// Copyright John Maddock 2007. +test_classify.cpp:// Copyright John Maddock 2006. +test_classify.cpp:// Copyright Paul A. Bristow 2007 +test_common_factor_gmpxx.cpp:// (C) Copyright John Maddock 2010. +test_constant_generate.cpp:// Copyright John Maddock 2010. +test_constants.cpp:// Copyright Paul Bristow 2007, 2011. +test_constants.cpp:// Copyright John Maddock 2006, 2011. +test_digamma.cpp:// (C) Copyright John Maddock 2006. +test_digamma.hpp:// Copyright John Maddock 2006. +test_digamma.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_dist_overloads.cpp:// Copyright John Maddock 2006. +test_dist_overloads.cpp:// Copyright Paul A. Bristow 2007. +test_ellint_1.cpp:// Copyright Xiaogang Zhang 2006 +test_ellint_1.cpp:// Copyright John Maddock 2006, 2007 +test_ellint_1.cpp:// Copyright Paul A. Bristow 2007 +test_ellint_1.hpp:// Copyright John Maddock 2006. +test_ellint_1.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_ellint_2.cpp:// Copyright Xiaogang Zhang 2006 +test_ellint_2.cpp:// Copyright John Maddock 2006, 2007 +test_ellint_2.cpp:// Copyright Paul A. Bristow 2007 +test_ellint_2.hpp:// Copyright John Maddock 2006. +test_ellint_2.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_ellint_3.cpp:// Copyright Xiaogang Zhang 2006 +test_ellint_3.cpp:// Copyright John Maddock 2006, 2007 +test_ellint_3.cpp:// Copyright Paul A. Bristow 2007 +test_ellint_3.hpp:// Copyright John Maddock 2006. +test_ellint_3.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_erf.cpp:// Copyright John Maddock 2006. +test_erf.cpp:// Copyright Paul A. Bristow 2007 +test_erf.hpp:// Copyright John Maddock 2006. +test_erf.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_erf_hooks.hpp:// (C) Copyright John Maddock 2006. +test_error_handling.cpp:// Copyright Paul A. Bristow 2006-7. +test_error_handling.cpp:// Copyright John Maddock 2006-7. +test_expint.cpp:// (C) Copyright John Maddock 2007. +test_expint.hpp:// Copyright John Maddock 2006. +test_expint.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_expint_hooks.hpp:// (C) Copyright John Maddock 2006. +test_exponential_dist.cpp:// Copyright John Maddock 2006. +test_exponential_dist.cpp:// Copyright Paul A. Bristow 2007. +test_extreme_value.cpp:// Copyright John Maddock 2006. +test_factorials.cpp:// Copyright John Maddock 2006. +test_find_location.cpp:// Copyright John Maddock 2007. +test_find_location.cpp:// Copyright Paul A. Bristow 2007. +test_find_scale.cpp:// Copyright John Maddock 2007. +test_find_scale.cpp:// Copyright Paul A. Bristow 2007. +test_fisher_f.cpp:// Copyright Paul A. Bristow 2006. +test_fisher_f.cpp:// Copyright John Maddock 2007. +test_fisher_f.cpp: // Distcalc version 1.2 Copyright 2002 H Lohninger, TU Wein +test_gamma.cpp:// (C) Copyright John Maddock 2006. +test_gamma.hpp:// Copyright John Maddock 2006. +test_gamma.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_gamma_data.ipp:// (C) Copyright John Maddock 2006. +test_gamma_dist.cpp:// Copyright John Maddock 2006. +test_gamma_dist.cpp:// Copyright Paul A. Bristow 2007, 2010. +test_gamma_hooks.hpp:// (C) Copyright John Maddock 2006. +test_geometric.cpp:// Copyright Paul A. Bristow 2010. +test_geometric.cpp:// Copyright John Maddock 2010. +test_hankel.cpp:// Copyright John Maddock 2012 +test_hermite.cpp:// Copyright John Maddock 2006, 2007 +test_hermite.cpp:// Copyright Paul A. Bristow 2007 +test_hermite.hpp:// Copyright John Maddock 2006. +test_hermite.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_hypergeometric_dist.cpp:// Copyright John Maddock 2008 +test_hypergeometric_dist.cpp:// Copyright Paul A. Bristow +test_hypergeometric_dist.cpp:// Copyright Gautam Sewani +test_ibeta.cpp:// (C) Copyright John Maddock 2006. +test_ibeta.hpp:// Copyright John Maddock 2006. +test_ibeta.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_ibeta_inv.cpp:// (C) Copyright John Maddock 2006. +test_ibeta_inv.hpp:// Copyright John Maddock 2006. +test_ibeta_inv.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_ibeta_inv_ab.cpp:// (C) Copyright John Maddock 2006. +test_ibeta_inv_ab.hpp:// Copyright John Maddock 2006. +test_ibeta_inv_ab.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_igamma.cpp:// (C) Copyright John Maddock 2006. +test_igamma.hpp:// Copyright John Maddock 2006. +test_igamma.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_igamma_inv.cpp:// (C) Copyright John Maddock 2006. +test_igamma_inv.hpp:// Copyright John Maddock 2006. +test_igamma_inv.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_igamma_inva.cpp:// (C) Copyright John Maddock 2006. +test_igamma_inva.hpp:// Copyright John Maddock 2006. +test_igamma_inva.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_instances/double_test_instances_4.cpp:// Copyright John Maddock 2011. +test_instances/ldouble_test_instances_4.cpp:// Copyright John Maddock 2011. +test_instances/float_test_instances_8.cpp:// Copyright John Maddock 2011. +test_instances/double_test_instances_9.cpp:// Copyright John Maddock 2011. +test_instances/Jamfile.v2:# Copyright ohn Maddock 2012 +test_instances/real_concept_test_instances_5.cpp:// Copyright John Maddock 2011. +test_instances/ldouble_test_instances_6.cpp:// Copyright John Maddock 2011. +test_instances/real_concept_test_instances_4.cpp:// Copyright John Maddock 2011. +test_instances/double_test_instances_7.cpp:// Copyright John Maddock 2011. +test_instances/real_concept_test_instances_2.cpp:// Copyright John Maddock 2011. +test_instances/double_test_instances_5.cpp:// Copyright John Maddock 2011. +test_instances/ldouble_test_instances_9.cpp:// Copyright John Maddock 2011. +test_instances/real_concept_test_instances_1.cpp:// Copyright John Maddock 2011. +test_instances/float_test_instances_6.cpp:// Copyright John Maddock 2011. +test_instances/real_concept_test_instances_6.cpp:// Copyright John Maddock 2011. +test_instances/ldouble_test_instances_7.cpp:// Copyright John Maddock 2011. +test_instances/real_concept_test_instances_7.cpp:// Copyright John Maddock 2011. +test_instances/float_test_instances_3.cpp:// Copyright John Maddock 2011. +test_instances/double_test_instances_6.cpp:// Copyright John Maddock 2011. +test_instances/real_concept_test_instances_9.cpp:// Copyright John Maddock 2011. +test_instances/double_test_instances_2.cpp:// Copyright John Maddock 2011. +test_instances/pch.hpp:// Copyright John Maddock 2012. +test_instances/ldouble_test_instances_2.cpp:// Copyright John Maddock 2011. +test_instances/long_double_test_instances_1.cpp:// Copyright John Maddock 2011. +test_instances/float_test_instances_7.cpp:// Copyright John Maddock 2011. +test_instances/test_instances.hpp:// Copyright John Maddock 2011. +test_instances/double_test_instances_10.cpp:// Copyright John Maddock 2011. +test_instances/double_test_instances_3.cpp:// Copyright John Maddock 2011. +test_instances/ldouble_test_instances_3.cpp:// Copyright John Maddock 2011. +test_instances/real_concept_test_instances_10.cpp:// Copyright John Maddock 2011. +test_instances/float_test_instances_5.cpp:// Copyright John Maddock 2011. +test_instances/real_concept_test_instances_8.cpp:// Copyright John Maddock 2011. +test_instances/ldouble_test_instances_8.cpp:// Copyright John Maddock 2011. +test_instances/double_test_instances_1.cpp:// Copyright John Maddock 2011. +test_instances/float_test_instances_10.cpp:// Copyright John Maddock 2011. +test_instances/ldouble_test_instances_10.cpp:// Copyright John Maddock 2011. +test_instances/float_test_instances_9.cpp:// Copyright John Maddock 2011. +test_instances/float_test_instances_4.cpp:// Copyright John Maddock 2011. +test_instances/real_concept_test_instances_3.cpp:// Copyright John Maddock 2011. +test_instances/float_test_instances_2.cpp:// Copyright John Maddock 2011. +test_instances/float_test_instances_1.cpp:// Copyright John Maddock 2011. +test_instances/double_test_instances_8.cpp:// Copyright John Maddock 2011. +test_instances/ldouble_test_instances_5.cpp:// Copyright John Maddock 2011. +test_instantiate1.cpp:// Copyright John Maddock 2006. +test_instantiate2.cpp:// Copyright John Maddock 2006. +test_inv_hyp.cpp:// (C) Copyright John Maddock 2006. +test_inverse_chi_squared.cpp:// Copyright Paul A. Bristow 2010. +test_inverse_chi_squared.cpp:// Copyright John Maddock 2010. +test_inverse_chi_squared_distribution.cpp:// Copyright Paul A. Bristow 2010. +test_inverse_chi_squared_distribution.cpp:// Copyright John Maddock 2010. +test_inverse_gamma_distribution.cpp:// Copyright Paul A. Bristow 2010. +test_inverse_gamma_distribution.cpp:// Copyright John Maddock 2010. +test_inverse_gaussian.cpp:// Copyright Paul A. Bristow 2010. +test_inverse_gaussian.cpp:// Copyright John Maddock 2010. +test_jacobi.cpp:// Copyright John Maddock 2012 +test_jacobi.hpp:// Copyright John Maddock 2006. +test_jacobi.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_laguerre.cpp:// (C) Copyright John Maddock 2006. +test_laguerre.hpp:// Copyright John Maddock 2006. +test_laguerre.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_laplace.cpp:// Copyright Thijs van den Berg, 2008. +test_laplace.cpp:// Copyright John Maddock 2008. +test_laplace.cpp:// Copyright Paul A. Bristow 2008, 2009. +test_ldouble_simple.cpp:// Copyright John Maddock 2013. +test_legacy_nonfinite.cpp:// Copyright (c) 2006 Johan Rade +test_legacy_nonfinite.cpp:// Copyright (c) 2011 Paul A. Bristow comments +test_legendre.cpp:// (C) Copyright John Maddock 2006. +test_legendre.hpp:// Copyright John Maddock 2006. +test_legendre.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_legendre_hooks.hpp:// (C) Copyright John Maddock 2006. +test_lexical_cast.cpp:// Copyright (c) 2006 Johan Rade +test_lexical_cast.cpp:// Copyright (c) 2011 Paul A. Bristow incorporated Boost.Math +test_logistic_dist.cpp:// Copyright 2008 Gautam Sewani +test_lognormal.cpp:// Copyright John Maddock 2006. +test_lognormal.cpp:// Copyright Paul A. Bristow 2007 +test_long_double_support.cpp:// Copyright John Maddock 2009 +test_math_fwd.cpp:// Copyright John Maddock 2010. +test_math_fwd.cpp:// Copyright Paul A. Bristow 2010. +test_minima.cpp:// Copyright John Maddock 2006. +test_minima.cpp:// Copyright Paul A. Bristow 2007. +test_nc_beta.cpp:// Copyright John Maddock 2008. +test_nc_chi_squared.cpp:// Copyright John Maddock 2008. +test_nc_f.cpp:// Copyright John Maddock 2008. +test_nc_t.cpp:// Copyright John Maddock 2008, 2012. +test_nc_t.cpp:// Copyright Paul A. Bristow 2012. +test_ncbeta_hooks.hpp:// (C) Copyright John Maddock 2008. +test_nccs_hooks.hpp:// (C) Copyright John Maddock 2008. +test_negative_binomial.cpp:// Copyright Paul A. Bristow 2007. +test_negative_binomial.cpp:// Copyright John Maddock 2006. +test_next.cpp:// (C) Copyright John Maddock 2008. +test_nonfinite_io.cpp:// Copyright 2011 Paul A. Bristow +test_nonfinite_trap.cpp:// Copyright (c) 2006 Johan Rade +test_nonfinite_trap.cpp:// Copyright (c) 2011 Paul A. Bristow To incorporate into Boost.Math +test_normal.cpp:// Copyright Paul A. Bristow 2010. +test_normal.cpp:// Copyright John Maddock 2007. +test_out_of_range.hpp:// Copyright John Maddock 2012. +test_owens_t.cpp:// Copyright Paul A. Bristow 2012. +test_owens_t.cpp:// Copyright Benjamin Sobotta 2012. +test_pareto.cpp:// Copyright Paul A. Bristow 2007, 2009. +test_pareto.cpp:// Copyright John Maddock 2006. +test_poisson.cpp:// Copyright Paul A. Bristow 2007. +test_poisson.cpp:// Copyright John Maddock 2006. +test_policy.cpp:// Copyright John Maddock 2007. +test_policy_2.cpp:// Copyright John Maddock 2007. +test_policy_3.cpp:// Copyright John Maddock 2007. +test_policy_4.cpp:// Copyright John Maddock 2007. +test_policy_5.cpp:// Copyright John Maddock 2007. +test_policy_6.cpp:// Copyright John Maddock 2007. +test_policy_7.cpp:// Copyright John Maddock 2007. +test_policy_8.cpp:// Copyright John Maddock 2007. +test_policy_sf.cpp:// (C) Copyright John Maddock 2007. +test_print_info_on_type.cpp:// Copyright John Maddock 2010. +test_rational_instances/test_rational_ldouble2.cpp:// (C) Copyright John Maddock 2006-7. +test_rational_instances/test_rational_float2.cpp:// (C) Copyright John Maddock 2006-7. +test_rational_instances/test_rational_double2.cpp:// (C) Copyright John Maddock 2006-7. +test_rational_instances/test_rational_double3.cpp:// (C) Copyright John Maddock 2006-7. +test_rational_instances/test_rational_ldouble1.cpp:// (C) Copyright John Maddock 2006-7. +test_rational_instances/test_rational_float4.cpp:// (C) Copyright John Maddock 2006-7. +test_rational_instances/test_rational_double5.cpp:// (C) Copyright John Maddock 2006-7. +test_rational_instances/test_rational_double4.cpp:// (C) Copyright John Maddock 2006-7. +test_rational_instances/test_rational_real_concept1.cpp:// (C) Copyright John Maddock 2006-7. +test_rational_instances/test_rational_real_concept3.cpp:// (C) Copyright John Maddock 2006-7. +test_rational_instances/test_rational.hpp:// (C) Copyright John Maddock 2006-7. +test_rational_instances/test_rational_ldouble3.cpp:// (C) Copyright John Maddock 2006-7. +test_rational_instances/test_rational_float3.cpp:// (C) Copyright John Maddock 2006-7. +test_rational_instances/test_rational_real_concept5.cpp:// (C) Copyright John Maddock 2006-7. +test_rational_instances/test_rational_ldouble5.cpp:// (C) Copyright John Maddock 2006-7. +test_rational_instances/test_rational_ldouble4.cpp:// (C) Copyright John Maddock 2006-7. +test_rational_instances/test_rational_double1.cpp:// (C) Copyright John Maddock 2006-7. +test_rational_instances/test_rational_real_concept4.cpp:// (C) Copyright John Maddock 2006-7. +test_rational_instances/test_rational_real_concept2.cpp:// (C) Copyright John Maddock 2006-7. +test_rational_instances/test_rational_float1.cpp:// (C) Copyright John Maddock 2006-7. +test_rationals.cpp:// (C) Copyright John Maddock 2006. +test_rayleigh.cpp:// Copyright John Maddock 2006. +test_real_concept.cpp:// Copyright John Maddock 2010 +test_real_concept_neg_bin.cpp:// Copyright Paul A. Bristow 2010. +test_real_concept_neg_bin.cpp:// Copyright John Maddock 2010. +test_remez.cpp:// Copyright John Maddock 2006 +test_remez.cpp:// Copyright Paul A. Bristow 2007 +test_roots.cpp:// (C) Copyright John Maddock 2006. +test_round.cpp:// (C) Copyright John Maddock 2007. +test_sign.cpp:#define BOOST_TEST_MAIN// Copyright John Maddock 2008 +test_sign.cpp:// (C) Copyright Paul A. Bristow 2011 (added tests for changesign) +test_signed_zero.cpp:// Copyright 2006 Johan Rade +test_signed_zero.cpp:// Copyright 2011 Paul A. Bristow To incorporate into Boost.Math +test_signed_zero.cpp:// Copyright 2012 Paul A. Bristow with new tests. +test_skew_normal.cpp:// Copyright Paul A. Bristow 2012. +test_skew_normal.cpp:// Copyright John Maddock 2012. +test_skew_normal.cpp:// Copyright Benjamin Sobotta 2012 +test_spherical_harmonic.cpp:// (C) Copyright John Maddock 2006. +test_students_t.cpp:// Copyright Paul A. Bristow 2006. +test_students_t.cpp:// Copyright John Maddock 2006. +test_tgamma_ratio.cpp:// (C) Copyright John Maddock 2006. +test_tgamma_ratio.hpp:// Copyright John Maddock 2006. +test_tgamma_ratio.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_toms748_solve.cpp:// (C) Copyright John Maddock 2006. +test_tr1.c:/* (C) Copyright John Maddock 2008. +test_tr1.cpp:// (C) Copyright John Maddock 2008. +test_triangular.cpp:// Copyright Paul Bristow 2006, 2007. +test_triangular.cpp:// Copyright John Maddock 2006, 2007. +test_uniform.cpp:// Copyright Paul Bristow 2007. +test_uniform.cpp:// Copyright John Maddock 2006. +test_weibull.cpp:// Copyright John Maddock 2006, 2012. +test_weibull.cpp:// Copyright Paul A. Bristow 2007, 2012. +test_zeta.cpp:// (C) Copyright John Maddock 2006. +test_zeta.hpp:// Copyright John Maddock 2006. +test_zeta.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_zeta_hooks.hpp:// (C) Copyright John Maddock 2006. +tgamma_delta_ratio_data.ipp:// (C) Copyright John Maddock 2006-7. +tgamma_delta_ratio_int.ipp:// (C) Copyright John Maddock 2006-7. +tgamma_delta_ratio_int2.ipp:// (C) Copyright John Maddock 2006-7. +tgamma_ratio_data.ipp:// (C) Copyright John Maddock 2006-7. +zeta_1_below_data.ipp:// Copyright John Maddock 2008. +zeta_1_up_data.ipp:// Copyright John Maddock 2008. +zeta_data.ipp:// Copyright John Maddock 2008. +zeta_neg_data.ipp:// Copyright John Maddock 2008. +ztest_max_digits10.cpp: // Copyright 2010 Paul A. Bristow +zztest_max_digits10.cpp:// Copyright 2010 Paul A. Bristow diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/data/boost.npz b/project/venv/lib/python2.7/site-packages/scipy/special/tests/data/boost.npz new file mode 100644 index 0000000..693f6a3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/tests/data/boost.npz differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/data/gsl.npz b/project/venv/lib/python2.7/site-packages/scipy/special/tests/data/gsl.npz new file mode 100644 index 0000000..6032193 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/tests/data/gsl.npz differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/data/local.npz b/project/venv/lib/python2.7/site-packages/scipy/special/tests/data/local.npz new file mode 100644 index 0000000..85458b0 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/tests/data/local.npz differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_basic.py b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_basic.py new file mode 100644 index 0000000..2e47281 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_basic.py @@ -0,0 +1,3458 @@ +# this program corresponds to special.py + +### Means test is not done yet +# E Means test is giving error (E) +# F Means test is failing (F) +# EF Means test is giving error and Failing +#! Means test is segfaulting +# 8 Means test runs forever + +### test_besselpoly +### test_mathieu_a +### test_mathieu_even_coef +### test_mathieu_odd_coef +### test_modfresnelp +### test_modfresnelm +# test_pbdv_seq +### test_pbvv_seq +### test_sph_harm + +from __future__ import division, print_function, absolute_import + +import itertools + +import numpy as np +from numpy import (array, isnan, r_, arange, finfo, pi, sin, cos, tan, exp, + log, zeros, sqrt, asarray, inf, nan_to_num, real, arctan, float_) + +import pytest +from pytest import raises as assert_raises +from numpy.testing import (assert_equal, assert_almost_equal, + assert_array_equal, assert_array_almost_equal, assert_approx_equal, + assert_, assert_allclose, + assert_array_almost_equal_nulp) + +from scipy import special +import scipy.special._ufuncs as cephes +from scipy.special import ellipk, zeta + +from scipy.special._testutils import with_special_errors, \ + assert_func_equal, FuncData + +from scipy._lib._numpy_compat import suppress_warnings +from scipy._lib._version import NumpyVersion + +import math + + +class TestCephes(object): + def test_airy(self): + cephes.airy(0) + + def test_airye(self): + cephes.airye(0) + + def test_binom(self): + n = np.array([0.264, 4, 5.2, 17]) + k = np.array([2, 0.4, 7, 3.3]) + nk = np.array(np.broadcast_arrays(n[:,None], k[None,:]) + ).reshape(2, -1).T + rknown = np.array([[-0.097152, 0.9263051596159367, 0.01858423645695389, + -0.007581020651518199],[6, 2.0214389119675666, 0, 2.9827344527963846], + [10.92, 2.22993515861399, -0.00585728, 10.468891352063146], + [136, 3.5252179590758828, 19448, 1024.5526916174495]]) + assert_func_equal(cephes.binom, rknown.ravel(), nk, rtol=1e-13) + + # Test branches in implementation + np.random.seed(1234) + n = np.r_[np.arange(-7, 30), 1000*np.random.rand(30) - 500] + k = np.arange(0, 102) + nk = np.array(np.broadcast_arrays(n[:,None], k[None,:]) + ).reshape(2, -1).T + + assert_func_equal(cephes.binom, + cephes.binom(nk[:,0], nk[:,1] * (1 + 1e-15)), + nk, + atol=1e-10, rtol=1e-10) + + def test_binom_2(self): + # Test branches in implementation + np.random.seed(1234) + n = np.r_[np.logspace(1, 300, 20)] + k = np.arange(0, 102) + nk = np.array(np.broadcast_arrays(n[:,None], k[None,:]) + ).reshape(2, -1).T + + assert_func_equal(cephes.binom, + cephes.binom(nk[:,0], nk[:,1] * (1 + 1e-15)), + nk, + atol=1e-10, rtol=1e-10) + + def test_binom_exact(self): + @np.vectorize + def binom_int(n, k): + n = int(n) + k = int(k) + num = int(1) + den = int(1) + for i in range(1, k+1): + num *= i + n - k + den *= i + return float(num/den) + + np.random.seed(1234) + n = np.arange(1, 15) + k = np.arange(0, 15) + nk = np.array(np.broadcast_arrays(n[:,None], k[None,:]) + ).reshape(2, -1).T + nk = nk[nk[:,0] >= nk[:,1]] + assert_func_equal(cephes.binom, + binom_int(nk[:,0], nk[:,1]), + nk, + atol=0, rtol=0) + + def test_binom_nooverflow_8346(self): + # Test (binom(n, k) doesn't overflow prematurely */ + dataset = [ + (1000, 500, 2.70288240945436551e+299), + (1002, 501, 1.08007396880791225e+300), + (1004, 502, 4.31599279169058121e+300), + (1006, 503, 1.72468101616263781e+301), + (1008, 504, 6.89188009236419153e+301), + (1010, 505, 2.75402257948335448e+302), + (1012, 506, 1.10052048531923757e+303), + (1014, 507, 4.39774063758732849e+303), + (1016, 508, 1.75736486108312519e+304), + (1018, 509, 7.02255427788423734e+304), + (1020, 510, 2.80626776829962255e+305), + (1022, 511, 1.12140876377061240e+306), + (1024, 512, 4.48125455209897109e+306), + (1026, 513, 1.79075474304149900e+307), + (1028, 514, 7.15605105487789676e+307) + ] + dataset = np.asarray(dataset) + FuncData(cephes.binom, dataset, (0, 1), 2, rtol=1e-12).check() + + def test_bdtr(self): + assert_equal(cephes.bdtr(1,1,0.5),1.0) + + def test_bdtri(self): + assert_equal(cephes.bdtri(1,3,0.5),0.5) + + def test_bdtrc(self): + assert_equal(cephes.bdtrc(1,3,0.5),0.5) + + def test_bdtrin(self): + assert_equal(cephes.bdtrin(1,0,1),5.0) + + def test_bdtrik(self): + cephes.bdtrik(1,3,0.5) + + def test_bei(self): + assert_equal(cephes.bei(0),0.0) + + def test_beip(self): + assert_equal(cephes.beip(0),0.0) + + def test_ber(self): + assert_equal(cephes.ber(0),1.0) + + def test_berp(self): + assert_equal(cephes.berp(0),0.0) + + def test_besselpoly(self): + assert_equal(cephes.besselpoly(0,0,0),1.0) + + def test_beta(self): + assert_equal(cephes.beta(1,1),1.0) + assert_allclose(cephes.beta(-100.3, 1e-200), cephes.gamma(1e-200)) + assert_allclose(cephes.beta(0.0342, 171), 24.070498359873497, + rtol=1e-13, atol=0) + + def test_betainc(self): + assert_equal(cephes.betainc(1,1,1),1.0) + assert_allclose(cephes.betainc(0.0342, 171, 1e-10), 0.55269916901806648) + + def test_betaln(self): + assert_equal(cephes.betaln(1,1),0.0) + assert_allclose(cephes.betaln(-100.3, 1e-200), cephes.gammaln(1e-200)) + assert_allclose(cephes.betaln(0.0342, 170), 3.1811881124242447, + rtol=1e-14, atol=0) + + def test_betaincinv(self): + assert_equal(cephes.betaincinv(1,1,1),1.0) + assert_allclose(cephes.betaincinv(0.0342, 171, 0.25), + 8.4231316935498957e-21, rtol=3e-12, atol=0) + + def test_beta_inf(self): + assert_(np.isinf(special.beta(-1, 2))) + + def test_btdtr(self): + assert_equal(cephes.btdtr(1,1,1),1.0) + + def test_btdtri(self): + assert_equal(cephes.btdtri(1,1,1),1.0) + + def test_btdtria(self): + assert_equal(cephes.btdtria(1,1,1),5.0) + + def test_btdtrib(self): + assert_equal(cephes.btdtrib(1,1,1),5.0) + + def test_cbrt(self): + assert_approx_equal(cephes.cbrt(1),1.0) + + def test_chdtr(self): + assert_equal(cephes.chdtr(1,0),0.0) + + def test_chdtrc(self): + assert_equal(cephes.chdtrc(1,0),1.0) + + def test_chdtri(self): + assert_equal(cephes.chdtri(1,1),0.0) + + def test_chdtriv(self): + assert_equal(cephes.chdtriv(0,0),5.0) + + def test_chndtr(self): + assert_equal(cephes.chndtr(0,1,0),0.0) + + # Each row holds (x, nu, lam, expected_value) + # These values were computed using Wolfram Alpha with + # CDF[NoncentralChiSquareDistribution[nu, lam], x] + values = np.array([ + [25.00, 20.0, 400, 4.1210655112396197139e-57], + [25.00, 8.00, 250, 2.3988026526832425878e-29], + [0.001, 8.00, 40., 5.3761806201366039084e-24], + [0.010, 8.00, 40., 5.45396231055999457039e-20], + [20.00, 2.00, 107, 1.39390743555819597802e-9], + [22.50, 2.00, 107, 7.11803307138105870671e-9], + [25.00, 2.00, 107, 3.11041244829864897313e-8], + [3.000, 2.00, 1.0, 0.62064365321954362734], + [350.0, 300., 10., 0.93880128006276407710], + [100.0, 13.5, 10., 0.99999999650104210949], + [700.0, 20.0, 400, 0.99999999925680650105], + [150.0, 13.5, 10., 0.99999999999999983046], + [160.0, 13.5, 10., 0.99999999999999999518], # 1.0 + ]) + cdf = cephes.chndtr(values[:, 0], values[:, 1], values[:, 2]) + assert_allclose(cdf, values[:, 3], rtol=1e-12) + + assert_almost_equal(cephes.chndtr(np.inf, np.inf, 0), 2.0) + assert_almost_equal(cephes.chndtr(2, 1, np.inf), 0.0) + assert_(np.isnan(cephes.chndtr(np.nan, 1, 2))) + assert_(np.isnan(cephes.chndtr(5, np.nan, 2))) + assert_(np.isnan(cephes.chndtr(5, 1, np.nan))) + + def test_chndtridf(self): + assert_equal(cephes.chndtridf(0,0,1),5.0) + + def test_chndtrinc(self): + assert_equal(cephes.chndtrinc(0,1,0),5.0) + + def test_chndtrix(self): + assert_equal(cephes.chndtrix(0,1,0),0.0) + + def test_cosdg(self): + assert_equal(cephes.cosdg(0),1.0) + + def test_cosm1(self): + assert_equal(cephes.cosm1(0),0.0) + + def test_cotdg(self): + assert_almost_equal(cephes.cotdg(45),1.0) + + def test_dawsn(self): + assert_equal(cephes.dawsn(0),0.0) + assert_allclose(cephes.dawsn(1.23), 0.50053727749081767) + + def test_diric(self): + # Test behavior near multiples of 2pi. Regression test for issue + # described in gh-4001. + n_odd = [1, 5, 25] + x = np.array(2*np.pi + 5e-5).astype(np.float32) + assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=7) + x = np.array(2*np.pi + 1e-9).astype(np.float64) + assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=15) + x = np.array(2*np.pi + 1e-15).astype(np.float64) + assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=15) + if hasattr(np, 'float128'): + # No float128 available in 32-bit numpy + x = np.array(2*np.pi + 1e-12).astype(np.float128) + assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=19) + + n_even = [2, 4, 24] + x = np.array(2*np.pi + 1e-9).astype(np.float64) + assert_almost_equal(special.diric(x, n_even), -1.0, decimal=15) + + # Test at some values not near a multiple of pi + x = np.arange(0.2*np.pi, 1.0*np.pi, 0.2*np.pi) + octave_result = [0.872677996249965, 0.539344662916632, + 0.127322003750035, -0.206011329583298] + assert_almost_equal(special.diric(x, 3), octave_result, decimal=15) + + def test_diric_broadcasting(self): + x = np.arange(5) + n = np.array([1, 3, 7]) + assert_(special.diric(x[:, np.newaxis], n).shape == (x.size, n.size)) + + def test_ellipe(self): + assert_equal(cephes.ellipe(1),1.0) + + def test_ellipeinc(self): + assert_equal(cephes.ellipeinc(0,1),0.0) + + def test_ellipj(self): + cephes.ellipj(0,1) + + def test_ellipk(self): + assert_allclose(ellipk(0), pi/2) + + def test_ellipkinc(self): + assert_equal(cephes.ellipkinc(0,0),0.0) + + def test_erf(self): + assert_equal(cephes.erf(0), 0.0) + + def test_erf_symmetry(self): + x = 5.905732037710919 + assert_equal(cephes.erf(x) + cephes.erf(-x), 0.0) + + def test_erfc(self): + assert_equal(cephes.erfc(0), 1.0) + + def test_exp1(self): + cephes.exp1(1) + + def test_expi(self): + cephes.expi(1) + + def test_expn(self): + cephes.expn(1,1) + + def test_exp1_reg(self): + # Regression for #834 + a = cephes.exp1(-complex(19.9999990)) + b = cephes.exp1(-complex(19.9999991)) + assert_array_almost_equal(a.imag, b.imag) + + def test_exp10(self): + assert_approx_equal(cephes.exp10(2),100.0) + + def test_exp2(self): + assert_equal(cephes.exp2(2),4.0) + + def test_expm1(self): + assert_equal(cephes.expm1(0),0.0) + assert_equal(cephes.expm1(np.inf), np.inf) + assert_equal(cephes.expm1(-np.inf), -1) + assert_equal(cephes.expm1(np.nan), np.nan) + + # Earlier numpy version don't guarantee that npy_cexp conforms to C99. + @pytest.mark.skipif(NumpyVersion(np.__version__) < '1.9.0', reason='') + def test_expm1_complex(self): + expm1 = cephes.expm1 + assert_equal(expm1(0 + 0j), 0 + 0j) + assert_equal(expm1(complex(np.inf, 0)), complex(np.inf, 0)) + assert_equal(expm1(complex(np.inf, 1)), complex(np.inf, np.inf)) + assert_equal(expm1(complex(np.inf, 2)), complex(-np.inf, np.inf)) + assert_equal(expm1(complex(np.inf, 4)), complex(-np.inf, -np.inf)) + assert_equal(expm1(complex(np.inf, 5)), complex(np.inf, -np.inf)) + assert_equal(expm1(complex(1, np.inf)), complex(np.nan, np.nan)) + assert_equal(expm1(complex(0, np.inf)), complex(np.nan, np.nan)) + assert_equal(expm1(complex(np.inf, np.inf)), complex(np.inf, np.nan)) + assert_equal(expm1(complex(-np.inf, np.inf)), complex(-1, 0)) + assert_equal(expm1(complex(-np.inf, np.nan)), complex(-1, 0)) + assert_equal(expm1(complex(np.inf, np.nan)), complex(np.inf, np.nan)) + assert_equal(expm1(complex(0, np.nan)), complex(np.nan, np.nan)) + assert_equal(expm1(complex(1, np.nan)), complex(np.nan, np.nan)) + assert_equal(expm1(complex(np.nan, 1)), complex(np.nan, np.nan)) + assert_equal(expm1(complex(np.nan, np.nan)), complex(np.nan, np.nan)) + + @pytest.mark.xfail(reason='The real part of expm1(z) bad at these points') + def test_expm1_complex_hard(self): + # The real part of this function is difficult to evaluate when + # z.real = -log(cos(z.imag)). + y = np.array([0.1, 0.2, 0.3, 5, 11, 20]) + x = -np.log(np.cos(y)) + z = x + 1j*y + + # evaluate using mpmath.expm1 with dps=1000 + expected = np.array([-5.5507901846769623e-17+0.10033467208545054j, + 2.4289354732893695e-18+0.20271003550867248j, + 4.5235500262585768e-17+0.30933624960962319j, + 7.8234305217489006e-17-3.3805150062465863j, + -1.3685191953697676e-16-225.95084645419513j, + 8.7175620481291045e-17+2.2371609442247422j]) + found = cephes.expm1(z) + # this passes. + assert_array_almost_equal_nulp(found.imag, expected.imag, 3) + # this fails. + assert_array_almost_equal_nulp(found.real, expected.real, 20) + + def test_fdtr(self): + assert_equal(cephes.fdtr(1, 1, 0), 0.0) + # Computed using Wolfram Alpha: CDF[FRatioDistribution[1e-6, 5], 10] + assert_allclose(cephes.fdtr(1e-6, 5, 10), 0.9999940790193488, + rtol=1e-12) + + def test_fdtrc(self): + assert_equal(cephes.fdtrc(1, 1, 0), 1.0) + # Computed using Wolfram Alpha: + # 1 - CDF[FRatioDistribution[2, 1/10], 1e10] + assert_allclose(cephes.fdtrc(2, 0.1, 1e10), 0.27223784621293512, + rtol=1e-12) + + def test_fdtri(self): + assert_allclose(cephes.fdtri(1, 1, [0.499, 0.501]), + array([0.9937365, 1.00630298]), rtol=1e-6) + # From Wolfram Alpha: + # CDF[FRatioDistribution[1/10, 1], 3] = 0.8756751669632105666874... + p = 0.8756751669632105666874 + assert_allclose(cephes.fdtri(0.1, 1, p), 3, rtol=1e-12) + + @pytest.mark.xfail(reason='Returns nan on i686.') + def test_fdtri_mysterious_failure(self): + assert_allclose(cephes.fdtri(1, 1, 0.5), 1) + + def test_fdtridfd(self): + assert_equal(cephes.fdtridfd(1,0,0),5.0) + + def test_fresnel(self): + assert_equal(cephes.fresnel(0),(0.0,0.0)) + + def test_gamma(self): + assert_equal(cephes.gamma(5),24.0) + + def test_gammainc(self): + assert_equal(cephes.gammainc(5,0),0.0) + + def test_gammaincc(self): + assert_equal(cephes.gammaincc(5,0),1.0) + + def test_gammainccinv(self): + assert_equal(cephes.gammainccinv(5,1),0.0) + + def test_gammaln(self): + cephes.gammaln(10) + + def test_gammasgn(self): + vals = np.array([-4, -3.5, -2.3, 1, 4.2], np.float64) + assert_array_equal(cephes.gammasgn(vals), np.sign(cephes.rgamma(vals))) + + def test_gdtr(self): + assert_equal(cephes.gdtr(1,1,0),0.0) + + def test_gdtr_inf(self): + assert_equal(cephes.gdtr(1,1,np.inf),1.0) + + def test_gdtrc(self): + assert_equal(cephes.gdtrc(1,1,0),1.0) + + def test_gdtria(self): + assert_equal(cephes.gdtria(0,1,1),0.0) + + def test_gdtrib(self): + cephes.gdtrib(1,0,1) + # assert_equal(cephes.gdtrib(1,0,1),5.0) + + def test_gdtrix(self): + cephes.gdtrix(1,1,.1) + + def test_hankel1(self): + cephes.hankel1(1,1) + + def test_hankel1e(self): + cephes.hankel1e(1,1) + + def test_hankel2(self): + cephes.hankel2(1,1) + + def test_hankel2e(self): + cephes.hankel2e(1,1) + + def test_hyp1f1(self): + assert_approx_equal(cephes.hyp1f1(1,1,1), exp(1.0)) + assert_approx_equal(cephes.hyp1f1(3,4,-6), 0.026056422099537251095) + cephes.hyp1f1(1,1,1) + + def test_hyp2f1(self): + assert_equal(cephes.hyp2f1(1,1,1,0),1.0) + + def test_hyperu(self): + assert_equal(cephes.hyperu(0,1,1),1.0) + + def test_i0(self): + assert_equal(cephes.i0(0),1.0) + + def test_i0e(self): + assert_equal(cephes.i0e(0),1.0) + + def test_i1(self): + assert_equal(cephes.i1(0),0.0) + + def test_i1e(self): + assert_equal(cephes.i1e(0),0.0) + + def test_it2i0k0(self): + cephes.it2i0k0(1) + + def test_it2j0y0(self): + cephes.it2j0y0(1) + + def test_it2struve0(self): + cephes.it2struve0(1) + + def test_itairy(self): + cephes.itairy(1) + + def test_iti0k0(self): + assert_equal(cephes.iti0k0(0),(0.0,0.0)) + + def test_itj0y0(self): + assert_equal(cephes.itj0y0(0),(0.0,0.0)) + + def test_itmodstruve0(self): + assert_equal(cephes.itmodstruve0(0),0.0) + + def test_itstruve0(self): + assert_equal(cephes.itstruve0(0),0.0) + + def test_iv(self): + assert_equal(cephes.iv(1,0),0.0) + + def _check_ive(self): + assert_equal(cephes.ive(1,0),0.0) + + def test_j0(self): + assert_equal(cephes.j0(0),1.0) + + def test_j1(self): + assert_equal(cephes.j1(0),0.0) + + def test_jn(self): + assert_equal(cephes.jn(0,0),1.0) + + def test_jv(self): + assert_equal(cephes.jv(0,0),1.0) + + def _check_jve(self): + assert_equal(cephes.jve(0,0),1.0) + + def test_k0(self): + cephes.k0(2) + + def test_k0e(self): + cephes.k0e(2) + + def test_k1(self): + cephes.k1(2) + + def test_k1e(self): + cephes.k1e(2) + + def test_kei(self): + cephes.kei(2) + + def test_keip(self): + assert_equal(cephes.keip(0),0.0) + + def test_ker(self): + cephes.ker(2) + + def test_kerp(self): + cephes.kerp(2) + + def _check_kelvin(self): + cephes.kelvin(2) + + def test_kn(self): + cephes.kn(1,1) + + def test_kolmogi(self): + assert_equal(cephes.kolmogi(1),0.0) + assert_(np.isnan(cephes.kolmogi(np.nan))) + + def test_kolmogorov(self): + assert_equal(cephes.kolmogorov(0), 1.0) + + def test_kolmogp(self): + assert_equal(cephes._kolmogp(0), -0.0) + + def test_kolmogc(self): + assert_equal(cephes._kolmogc(0), 0.0) + + def test_kolmogci(self): + assert_equal(cephes._kolmogci(0), 0.0) + assert_(np.isnan(cephes._kolmogci(np.nan))) + + def _check_kv(self): + cephes.kv(1,1) + + def _check_kve(self): + cephes.kve(1,1) + + def test_log1p(self): + log1p = cephes.log1p + assert_equal(log1p(0), 0.0) + assert_equal(log1p(-1), -np.inf) + assert_equal(log1p(-2), np.nan) + assert_equal(log1p(np.inf), np.inf) + + # earlier numpy version don't guarantee that npy_clog conforms to C99 + @pytest.mark.skipif(NumpyVersion(np.__version__) < '1.9.0', reason='') + def test_log1p_complex(self): + log1p = cephes.log1p + c = complex + assert_equal(log1p(0 + 0j), 0 + 0j) + assert_equal(log1p(c(-1, 0)), c(-np.inf, 0)) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in multiply") + assert_allclose(log1p(c(1, np.inf)), c(np.inf, np.pi/2)) + assert_equal(log1p(c(1, np.nan)), c(np.nan, np.nan)) + assert_allclose(log1p(c(-np.inf, 1)), c(np.inf, np.pi)) + assert_equal(log1p(c(np.inf, 1)), c(np.inf, 0)) + assert_allclose(log1p(c(-np.inf, np.inf)), c(np.inf, 3*np.pi/4)) + assert_allclose(log1p(c(np.inf, np.inf)), c(np.inf, np.pi/4)) + assert_equal(log1p(c(np.inf, np.nan)), c(np.inf, np.nan)) + assert_equal(log1p(c(-np.inf, np.nan)), c(np.inf, np.nan)) + assert_equal(log1p(c(np.nan, np.inf)), c(np.inf, np.nan)) + assert_equal(log1p(c(np.nan, 1)), c(np.nan, np.nan)) + assert_equal(log1p(c(np.nan, np.nan)), c(np.nan, np.nan)) + + def test_lpmv(self): + assert_equal(cephes.lpmv(0,0,1),1.0) + + def test_mathieu_a(self): + assert_equal(cephes.mathieu_a(1,0),1.0) + + def test_mathieu_b(self): + assert_equal(cephes.mathieu_b(1,0),1.0) + + def test_mathieu_cem(self): + assert_equal(cephes.mathieu_cem(1,0,0),(1.0,0.0)) + + # Test AMS 20.2.27 + @np.vectorize + def ce_smallq(m, q, z): + z *= np.pi/180 + if m == 0: + return 2**(-0.5) * (1 - .5*q*cos(2*z)) # + O(q^2) + elif m == 1: + return cos(z) - q/8 * cos(3*z) # + O(q^2) + elif m == 2: + return cos(2*z) - q*(cos(4*z)/12 - 1/4) # + O(q^2) + else: + return cos(m*z) - q*(cos((m+2)*z)/(4*(m+1)) - cos((m-2)*z)/(4*(m-1))) # + O(q^2) + m = np.arange(0, 100) + q = np.r_[0, np.logspace(-30, -9, 10)] + assert_allclose(cephes.mathieu_cem(m[:,None], q[None,:], 0.123)[0], + ce_smallq(m[:,None], q[None,:], 0.123), + rtol=1e-14, atol=0) + + def test_mathieu_sem(self): + assert_equal(cephes.mathieu_sem(1,0,0),(0.0,1.0)) + + # Test AMS 20.2.27 + @np.vectorize + def se_smallq(m, q, z): + z *= np.pi/180 + if m == 1: + return sin(z) - q/8 * sin(3*z) # + O(q^2) + elif m == 2: + return sin(2*z) - q*sin(4*z)/12 # + O(q^2) + else: + return sin(m*z) - q*(sin((m+2)*z)/(4*(m+1)) - sin((m-2)*z)/(4*(m-1))) # + O(q^2) + m = np.arange(1, 100) + q = np.r_[0, np.logspace(-30, -9, 10)] + assert_allclose(cephes.mathieu_sem(m[:,None], q[None,:], 0.123)[0], + se_smallq(m[:,None], q[None,:], 0.123), + rtol=1e-14, atol=0) + + def test_mathieu_modcem1(self): + assert_equal(cephes.mathieu_modcem1(1,0,0),(0.0,0.0)) + + def test_mathieu_modcem2(self): + cephes.mathieu_modcem2(1,1,1) + + # Test reflection relation AMS 20.6.19 + m = np.arange(0, 4)[:,None,None] + q = np.r_[np.logspace(-2, 2, 10)][None,:,None] + z = np.linspace(0, 1, 7)[None,None,:] + + y1 = cephes.mathieu_modcem2(m, q, -z)[0] + + fr = -cephes.mathieu_modcem2(m, q, 0)[0] / cephes.mathieu_modcem1(m, q, 0)[0] + y2 = -cephes.mathieu_modcem2(m, q, z)[0] - 2*fr*cephes.mathieu_modcem1(m, q, z)[0] + + assert_allclose(y1, y2, rtol=1e-10) + + def test_mathieu_modsem1(self): + assert_equal(cephes.mathieu_modsem1(1,0,0),(0.0,0.0)) + + def test_mathieu_modsem2(self): + cephes.mathieu_modsem2(1,1,1) + + # Test reflection relation AMS 20.6.20 + m = np.arange(1, 4)[:,None,None] + q = np.r_[np.logspace(-2, 2, 10)][None,:,None] + z = np.linspace(0, 1, 7)[None,None,:] + + y1 = cephes.mathieu_modsem2(m, q, -z)[0] + fr = cephes.mathieu_modsem2(m, q, 0)[1] / cephes.mathieu_modsem1(m, q, 0)[1] + y2 = cephes.mathieu_modsem2(m, q, z)[0] - 2*fr*cephes.mathieu_modsem1(m, q, z)[0] + assert_allclose(y1, y2, rtol=1e-10) + + def test_mathieu_overflow(self): + # Check that these return NaNs instead of causing a SEGV + assert_equal(cephes.mathieu_cem(10000, 0, 1.3), (np.nan, np.nan)) + assert_equal(cephes.mathieu_sem(10000, 0, 1.3), (np.nan, np.nan)) + assert_equal(cephes.mathieu_cem(10000, 1.5, 1.3), (np.nan, np.nan)) + assert_equal(cephes.mathieu_sem(10000, 1.5, 1.3), (np.nan, np.nan)) + assert_equal(cephes.mathieu_modcem1(10000, 1.5, 1.3), (np.nan, np.nan)) + assert_equal(cephes.mathieu_modsem1(10000, 1.5, 1.3), (np.nan, np.nan)) + assert_equal(cephes.mathieu_modcem2(10000, 1.5, 1.3), (np.nan, np.nan)) + assert_equal(cephes.mathieu_modsem2(10000, 1.5, 1.3), (np.nan, np.nan)) + + def test_mathieu_ticket_1847(self): + # Regression test --- this call had some out-of-bounds access + # and could return nan occasionally + for k in range(60): + v = cephes.mathieu_modsem2(2, 100, -1) + # Values from ACM TOMS 804 (derivate by numerical differentiation) + assert_allclose(v[0], 0.1431742913063671074347, rtol=1e-10) + assert_allclose(v[1], 0.9017807375832909144719, rtol=1e-4) + + def test_modfresnelm(self): + cephes.modfresnelm(0) + + def test_modfresnelp(self): + cephes.modfresnelp(0) + + def _check_modstruve(self): + assert_equal(cephes.modstruve(1,0),0.0) + + def test_nbdtr(self): + assert_equal(cephes.nbdtr(1,1,1),1.0) + + def test_nbdtrc(self): + assert_equal(cephes.nbdtrc(1,1,1),0.0) + + def test_nbdtri(self): + assert_equal(cephes.nbdtri(1,1,1),1.0) + + def __check_nbdtrik(self): + cephes.nbdtrik(1,.4,.5) + + def test_nbdtrin(self): + assert_equal(cephes.nbdtrin(1,0,0),5.0) + + def test_ncfdtr(self): + assert_equal(cephes.ncfdtr(1,1,1,0),0.0) + + def test_ncfdtri(self): + assert_equal(cephes.ncfdtri(1, 1, 1, 0), 0.0) + f = [0.5, 1, 1.5] + p = cephes.ncfdtr(2, 3, 1.5, f) + assert_allclose(cephes.ncfdtri(2, 3, 1.5, p), f) + + def test_ncfdtridfd(self): + dfd = [1, 2, 3] + p = cephes.ncfdtr(2, dfd, 0.25, 15) + assert_allclose(cephes.ncfdtridfd(2, p, 0.25, 15), dfd) + + def test_ncfdtridfn(self): + dfn = [0.1, 1, 2, 3, 1e4] + p = cephes.ncfdtr(dfn, 2, 0.25, 15) + assert_allclose(cephes.ncfdtridfn(p, 2, 0.25, 15), dfn, rtol=1e-5) + + def test_ncfdtrinc(self): + nc = [0.5, 1.5, 2.0] + p = cephes.ncfdtr(2, 3, nc, 15) + assert_allclose(cephes.ncfdtrinc(2, 3, p, 15), nc) + + def test_nctdtr(self): + assert_equal(cephes.nctdtr(1,0,0),0.5) + assert_equal(cephes.nctdtr(9, 65536, 45), 0.0) + + assert_approx_equal(cephes.nctdtr(np.inf, 1., 1.), 0.5, 5) + assert_(np.isnan(cephes.nctdtr(2., np.inf, 10.))) + assert_approx_equal(cephes.nctdtr(2., 1., np.inf), 1.) + + assert_(np.isnan(cephes.nctdtr(np.nan, 1., 1.))) + assert_(np.isnan(cephes.nctdtr(2., np.nan, 1.))) + assert_(np.isnan(cephes.nctdtr(2., 1., np.nan))) + + def __check_nctdtridf(self): + cephes.nctdtridf(1,0.5,0) + + def test_nctdtrinc(self): + cephes.nctdtrinc(1,0,0) + + def test_nctdtrit(self): + cephes.nctdtrit(.1,0.2,.5) + + def test_ndtr(self): + assert_equal(cephes.ndtr(0), 0.5) + assert_almost_equal(cephes.ndtr(1), 0.84134474606) + + def test_ndtri(self): + assert_equal(cephes.ndtri(0.5),0.0) + + def test_nrdtrimn(self): + assert_approx_equal(cephes.nrdtrimn(0.5,1,1),1.0) + + def test_nrdtrisd(self): + assert_allclose(cephes.nrdtrisd(0.5,0.5,0.5), 0.0, + atol=0, rtol=0) + + def test_obl_ang1(self): + cephes.obl_ang1(1,1,1,0) + + def test_obl_ang1_cv(self): + result = cephes.obl_ang1_cv(1,1,1,1,0) + assert_almost_equal(result[0],1.0) + assert_almost_equal(result[1],0.0) + + def _check_obl_cv(self): + assert_equal(cephes.obl_cv(1,1,0),2.0) + + def test_obl_rad1(self): + cephes.obl_rad1(1,1,1,0) + + def test_obl_rad1_cv(self): + cephes.obl_rad1_cv(1,1,1,1,0) + + def test_obl_rad2(self): + cephes.obl_rad2(1,1,1,0) + + def test_obl_rad2_cv(self): + cephes.obl_rad2_cv(1,1,1,1,0) + + def test_pbdv(self): + assert_equal(cephes.pbdv(1,0),(0.0,1.0)) + + def test_pbvv(self): + cephes.pbvv(1,0) + + def test_pbwa(self): + cephes.pbwa(1,0) + + def test_pdtr(self): + val = cephes.pdtr(0, 1) + assert_almost_equal(val, np.exp(-1)) + # Edge case: m = 0. + val = cephes.pdtr([0, 1, 2], 0.0) + assert_array_equal(val, [1, 1, 1]) + + def test_pdtrc(self): + val = cephes.pdtrc(0, 1) + assert_almost_equal(val, 1 - np.exp(-1)) + # Edge case: m = 0. + val = cephes.pdtrc([0, 1, 2], 0.0) + assert_array_equal(val, [0, 0, 0]) + + def test_pdtri(self): + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "floating point number truncated to an integer") + cephes.pdtri(0.5,0.5) + + def test_pdtrik(self): + k = cephes.pdtrik(0.5, 1) + assert_almost_equal(cephes.gammaincc(k + 1, 1), 0.5) + # Edge case: m = 0 or very small. + k = cephes.pdtrik([[0], [0.25], [0.95]], [0, 1e-20, 1e-6]) + assert_array_equal(k, np.zeros((3, 3))) + + def test_pro_ang1(self): + cephes.pro_ang1(1,1,1,0) + + def test_pro_ang1_cv(self): + assert_array_almost_equal(cephes.pro_ang1_cv(1,1,1,1,0), + array((1.0,0.0))) + + def _check_pro_cv(self): + assert_equal(cephes.pro_cv(1,1,0),2.0) + + def test_pro_rad1(self): + cephes.pro_rad1(1,1,1,0.1) + + def test_pro_rad1_cv(self): + cephes.pro_rad1_cv(1,1,1,1,0) + + def test_pro_rad2(self): + cephes.pro_rad2(1,1,1,0) + + def test_pro_rad2_cv(self): + cephes.pro_rad2_cv(1,1,1,1,0) + + def test_psi(self): + cephes.psi(1) + + def test_radian(self): + assert_equal(cephes.radian(0,0,0),0) + + def test_rgamma(self): + assert_equal(cephes.rgamma(1),1.0) + + def test_round(self): + assert_equal(cephes.round(3.4),3.0) + assert_equal(cephes.round(-3.4),-3.0) + assert_equal(cephes.round(3.6),4.0) + assert_equal(cephes.round(-3.6),-4.0) + assert_equal(cephes.round(3.5),4.0) + assert_equal(cephes.round(-3.5),-4.0) + + def test_shichi(self): + cephes.shichi(1) + + def test_sici(self): + cephes.sici(1) + + s, c = cephes.sici(np.inf) + assert_almost_equal(s, np.pi * 0.5) + assert_almost_equal(c, 0) + + s, c = cephes.sici(-np.inf) + assert_almost_equal(s, -np.pi * 0.5) + assert_(np.isnan(c), "cosine integral(-inf) is not nan") + + def test_sindg(self): + assert_equal(cephes.sindg(90),1.0) + + def test_smirnov(self): + assert_equal(cephes.smirnov(1,.1),0.9) + assert_(np.isnan(cephes.smirnov(1,np.nan))) + + def test_smirnovp(self): + assert_equal(cephes._smirnovp(1, .1), -1) + assert_equal(cephes._smirnovp(2, 0.75), -2*(0.25)**(2-1)) + assert_equal(cephes._smirnovp(3, 0.75), -3*(0.25)**(3-1)) + assert_(np.isnan(cephes._smirnovp(1, np.nan))) + + def test_smirnovc(self): + assert_equal(cephes._smirnovc(1,.1),0.1) + assert_(np.isnan(cephes._smirnovc(1,np.nan))) + x10 = np.linspace(0, 1, 11, endpoint=True) + assert_almost_equal(cephes._smirnovc(3, x10), 1-cephes.smirnov(3, x10)) + x4 = np.linspace(0, 1, 5, endpoint=True) + assert_almost_equal(cephes._smirnovc(4, x4), 1-cephes.smirnov(4, x4)) + + def test_smirnovi(self): + assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.4)),0.4) + assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.6)),0.6) + assert_(np.isnan(cephes.smirnovi(1,np.nan))) + + def test_smirnovci(self): + assert_almost_equal(cephes._smirnovc(1,cephes._smirnovci(1,0.4)),0.4) + assert_almost_equal(cephes._smirnovc(1,cephes._smirnovci(1,0.6)),0.6) + assert_(np.isnan(cephes._smirnovci(1,np.nan))) + + def test_spence(self): + assert_equal(cephes.spence(1),0.0) + + def test_stdtr(self): + assert_equal(cephes.stdtr(1,0),0.5) + assert_almost_equal(cephes.stdtr(1,1), 0.75) + assert_almost_equal(cephes.stdtr(1,2), 0.852416382349) + + def test_stdtridf(self): + cephes.stdtridf(0.7,1) + + def test_stdtrit(self): + cephes.stdtrit(1,0.7) + + def test_struve(self): + assert_equal(cephes.struve(0,0),0.0) + + def test_tandg(self): + assert_equal(cephes.tandg(45),1.0) + + def test_tklmbda(self): + assert_almost_equal(cephes.tklmbda(1,1),1.0) + + def test_y0(self): + cephes.y0(1) + + def test_y1(self): + cephes.y1(1) + + def test_yn(self): + cephes.yn(1,1) + + def test_yv(self): + cephes.yv(1,1) + + def _check_yve(self): + cephes.yve(1,1) + + def test_wofz(self): + z = [complex(624.2,-0.26123), complex(-0.4,3.), complex(0.6,2.), + complex(-1.,1.), complex(-1.,-9.), complex(-1.,9.), + complex(-0.0000000234545,1.1234), complex(-3.,5.1), + complex(-53,30.1), complex(0.0,0.12345), + complex(11,1), complex(-22,-2), complex(9,-28), + complex(21,-33), complex(1e5,1e5), complex(1e14,1e14) + ] + w = [ + complex(-3.78270245518980507452677445620103199303131110e-7, + 0.000903861276433172057331093754199933411710053155), + complex(0.1764906227004816847297495349730234591778719532788, + -0.02146550539468457616788719893991501311573031095617), + complex(0.2410250715772692146133539023007113781272362309451, + 0.06087579663428089745895459735240964093522265589350), + complex(0.30474420525691259245713884106959496013413834051768, + -0.20821893820283162728743734725471561394145872072738), + complex(7.317131068972378096865595229600561710140617977e34, + 8.321873499714402777186848353320412813066170427e34), + complex(0.0615698507236323685519612934241429530190806818395, + -0.00676005783716575013073036218018565206070072304635), + complex(0.3960793007699874918961319170187598400134746631, + -5.593152259116644920546186222529802777409274656e-9), + complex(0.08217199226739447943295069917990417630675021771804, + -0.04701291087643609891018366143118110965272615832184), + complex(0.00457246000350281640952328010227885008541748668738, + -0.00804900791411691821818731763401840373998654987934), + complex(0.8746342859608052666092782112565360755791467973338452, + 0.), + complex(0.00468190164965444174367477874864366058339647648741, + 0.0510735563901306197993676329845149741675029197050), + complex(-0.0023193175200187620902125853834909543869428763219, + -0.025460054739731556004902057663500272721780776336), + complex(9.11463368405637174660562096516414499772662584e304, + 3.97101807145263333769664875189354358563218932e305), + complex(-4.4927207857715598976165541011143706155432296e281, + -2.8019591213423077494444700357168707775769028e281), + complex(2.820947917809305132678577516325951485807107151e-6, + 2.820947917668257736791638444590253942253354058e-6), + complex(2.82094791773878143474039725787438662716372268e-15, + 2.82094791773878143474039725773333923127678361e-15) + ] + assert_func_equal(cephes.wofz, w, z, rtol=1e-13) + + +class TestAiry(object): + def test_airy(self): + # This tests the airy function to ensure 8 place accuracy in computation + + x = special.airy(.99) + assert_array_almost_equal(x,array([0.13689066,-0.16050153,1.19815925,0.92046818]),8) + x = special.airy(.41) + assert_array_almost_equal(x,array([0.25238916,-.23480512,0.80686202,0.51053919]),8) + x = special.airy(-.36) + assert_array_almost_equal(x,array([0.44508477,-0.23186773,0.44939534,0.48105354]),8) + + def test_airye(self): + a = special.airye(0.01) + b = special.airy(0.01) + b1 = [None]*4 + for n in range(2): + b1[n] = b[n]*exp(2.0/3.0*0.01*sqrt(0.01)) + for n in range(2,4): + b1[n] = b[n]*exp(-abs(real(2.0/3.0*0.01*sqrt(0.01)))) + assert_array_almost_equal(a,b1,6) + + def test_bi_zeros(self): + bi = special.bi_zeros(2) + bia = (array([-1.17371322, -3.2710930]), + array([-2.29443968, -4.07315509]), + array([-0.45494438, 0.39652284]), + array([0.60195789, -0.76031014])) + assert_array_almost_equal(bi,bia,4) + + bi = special.bi_zeros(5) + assert_array_almost_equal(bi[0],array([-1.173713222709127, + -3.271093302836352, + -4.830737841662016, + -6.169852128310251, + -7.376762079367764]),11) + + assert_array_almost_equal(bi[1],array([-2.294439682614122, + -4.073155089071828, + -5.512395729663599, + -6.781294445990305, + -7.940178689168587]),10) + + assert_array_almost_equal(bi[2],array([-0.454944383639657, + 0.396522836094465, + -0.367969161486959, + 0.349499116831805, + -0.336026240133662]),11) + + assert_array_almost_equal(bi[3],array([0.601957887976239, + -0.760310141492801, + 0.836991012619261, + -0.88947990142654, + 0.929983638568022]),10) + + def test_ai_zeros(self): + ai = special.ai_zeros(1) + assert_array_almost_equal(ai,(array([-2.33810741]), + array([-1.01879297]), + array([0.5357]), + array([0.7012])),4) + + def test_ai_zeros_big(self): + z, zp, ai_zpx, aip_zx = special.ai_zeros(50000) + ai_z, aip_z, _, _ = special.airy(z) + ai_zp, aip_zp, _, _ = special.airy(zp) + + ai_envelope = 1/abs(z)**(1./4) + aip_envelope = abs(zp)**(1./4) + + # Check values + assert_allclose(ai_zpx, ai_zp, rtol=1e-10) + assert_allclose(aip_zx, aip_z, rtol=1e-10) + + # Check they are zeros + assert_allclose(ai_z/ai_envelope, 0, atol=1e-10, rtol=0) + assert_allclose(aip_zp/aip_envelope, 0, atol=1e-10, rtol=0) + + # Check first zeros, DLMF 9.9.1 + assert_allclose(z[:6], + [-2.3381074105, -4.0879494441, -5.5205598281, + -6.7867080901, -7.9441335871, -9.0226508533], rtol=1e-10) + assert_allclose(zp[:6], + [-1.0187929716, -3.2481975822, -4.8200992112, + -6.1633073556, -7.3721772550, -8.4884867340], rtol=1e-10) + + def test_bi_zeros_big(self): + z, zp, bi_zpx, bip_zx = special.bi_zeros(50000) + _, _, bi_z, bip_z = special.airy(z) + _, _, bi_zp, bip_zp = special.airy(zp) + + bi_envelope = 1/abs(z)**(1./4) + bip_envelope = abs(zp)**(1./4) + + # Check values + assert_allclose(bi_zpx, bi_zp, rtol=1e-10) + assert_allclose(bip_zx, bip_z, rtol=1e-10) + + # Check they are zeros + assert_allclose(bi_z/bi_envelope, 0, atol=1e-10, rtol=0) + assert_allclose(bip_zp/bip_envelope, 0, atol=1e-10, rtol=0) + + # Check first zeros, DLMF 9.9.2 + assert_allclose(z[:6], + [-1.1737132227, -3.2710933028, -4.8307378417, + -6.1698521283, -7.3767620794, -8.4919488465], rtol=1e-10) + assert_allclose(zp[:6], + [-2.2944396826, -4.0731550891, -5.5123957297, + -6.7812944460, -7.9401786892, -9.0195833588], rtol=1e-10) + + +class TestAssocLaguerre(object): + def test_assoc_laguerre(self): + a1 = special.genlaguerre(11,1) + a2 = special.assoc_laguerre(.2,11,1) + assert_array_almost_equal(a2,a1(.2),8) + a2 = special.assoc_laguerre(1,11,1) + assert_array_almost_equal(a2,a1(1),8) + + +class TestBesselpoly(object): + def test_besselpoly(self): + pass + + +class TestKelvin(object): + def test_bei(self): + mbei = special.bei(2) + assert_almost_equal(mbei, 0.9722916273066613,5) # this may not be exact + + def test_beip(self): + mbeip = special.beip(2) + assert_almost_equal(mbeip,0.91701361338403631,5) # this may not be exact + + def test_ber(self): + mber = special.ber(2) + assert_almost_equal(mber,0.75173418271380821,5) # this may not be exact + + def test_berp(self): + mberp = special.berp(2) + assert_almost_equal(mberp,-0.49306712470943909,5) # this may not be exact + + def test_bei_zeros(self): + # Abramowitz & Stegun, Table 9.12 + bi = special.bei_zeros(5) + assert_array_almost_equal(bi,array([5.02622, + 9.45541, + 13.89349, + 18.33398, + 22.77544]),4) + + def test_beip_zeros(self): + bip = special.beip_zeros(5) + assert_array_almost_equal(bip,array([3.772673304934953, + 8.280987849760042, + 12.742147523633703, + 17.193431752512542, + 21.641143941167325]),8) + + def test_ber_zeros(self): + ber = special.ber_zeros(5) + assert_array_almost_equal(ber,array([2.84892, + 7.23883, + 11.67396, + 16.11356, + 20.55463]),4) + + def test_berp_zeros(self): + brp = special.berp_zeros(5) + assert_array_almost_equal(brp,array([6.03871, + 10.51364, + 14.96844, + 19.41758, + 23.86430]),4) + + def test_kelvin(self): + mkelv = special.kelvin(2) + assert_array_almost_equal(mkelv,(special.ber(2) + special.bei(2)*1j, + special.ker(2) + special.kei(2)*1j, + special.berp(2) + special.beip(2)*1j, + special.kerp(2) + special.keip(2)*1j),8) + + def test_kei(self): + mkei = special.kei(2) + assert_almost_equal(mkei,-0.20240006776470432,5) + + def test_keip(self): + mkeip = special.keip(2) + assert_almost_equal(mkeip,0.21980790991960536,5) + + def test_ker(self): + mker = special.ker(2) + assert_almost_equal(mker,-0.041664513991509472,5) + + def test_kerp(self): + mkerp = special.kerp(2) + assert_almost_equal(mkerp,-0.10660096588105264,5) + + def test_kei_zeros(self): + kei = special.kei_zeros(5) + assert_array_almost_equal(kei,array([3.91467, + 8.34422, + 12.78256, + 17.22314, + 21.66464]),4) + + def test_keip_zeros(self): + keip = special.keip_zeros(5) + assert_array_almost_equal(keip,array([4.93181, + 9.40405, + 13.85827, + 18.30717, + 22.75379]),4) + + # numbers come from 9.9 of A&S pg. 381 + def test_kelvin_zeros(self): + tmp = special.kelvin_zeros(5) + berz,beiz,kerz,keiz,berpz,beipz,kerpz,keipz = tmp + assert_array_almost_equal(berz,array([2.84892, + 7.23883, + 11.67396, + 16.11356, + 20.55463]),4) + assert_array_almost_equal(beiz,array([5.02622, + 9.45541, + 13.89349, + 18.33398, + 22.77544]),4) + assert_array_almost_equal(kerz,array([1.71854, + 6.12728, + 10.56294, + 15.00269, + 19.44382]),4) + assert_array_almost_equal(keiz,array([3.91467, + 8.34422, + 12.78256, + 17.22314, + 21.66464]),4) + assert_array_almost_equal(berpz,array([6.03871, + 10.51364, + 14.96844, + 19.41758, + 23.86430]),4) + assert_array_almost_equal(beipz,array([3.77267, + # table from 1927 had 3.77320 + # but this is more accurate + 8.28099, + 12.74215, + 17.19343, + 21.64114]),4) + assert_array_almost_equal(kerpz,array([2.66584, + 7.17212, + 11.63218, + 16.08312, + 20.53068]),4) + assert_array_almost_equal(keipz,array([4.93181, + 9.40405, + 13.85827, + 18.30717, + 22.75379]),4) + + def test_ker_zeros(self): + ker = special.ker_zeros(5) + assert_array_almost_equal(ker,array([1.71854, + 6.12728, + 10.56294, + 15.00269, + 19.44381]),4) + + def test_kerp_zeros(self): + kerp = special.kerp_zeros(5) + assert_array_almost_equal(kerp,array([2.66584, + 7.17212, + 11.63218, + 16.08312, + 20.53068]),4) + + +class TestBernoulli(object): + def test_bernoulli(self): + brn = special.bernoulli(5) + assert_array_almost_equal(brn,array([1.0000, + -0.5000, + 0.1667, + 0.0000, + -0.0333, + 0.0000]),4) + + +class TestBeta(object): + def test_beta(self): + bet = special.beta(2,4) + betg = (special.gamma(2)*special.gamma(4))/special.gamma(6) + assert_almost_equal(bet,betg,8) + + def test_betaln(self): + betln = special.betaln(2,4) + bet = log(abs(special.beta(2,4))) + assert_almost_equal(betln,bet,8) + + def test_betainc(self): + btinc = special.betainc(1,1,.2) + assert_almost_equal(btinc,0.2,8) + + def test_betaincinv(self): + y = special.betaincinv(2,4,.5) + comp = special.betainc(2,4,y) + assert_almost_equal(comp,.5,5) + + +class TestCombinatorics(object): + def test_comb(self): + assert_array_almost_equal(special.comb([10, 10], [3, 4]), [120., 210.]) + assert_almost_equal(special.comb(10, 3), 120.) + assert_equal(special.comb(10, 3, exact=True), 120) + assert_equal(special.comb(10, 3, exact=True, repetition=True), 220) + + assert_allclose([special.comb(20, k, exact=True) for k in range(21)], + special.comb(20, list(range(21))), atol=1e-15) + + ii = np.iinfo(int).max + 1 + assert_equal(special.comb(ii, ii-1, exact=True), ii) + + expected = 100891344545564193334812497256 + assert_equal(special.comb(100, 50, exact=True), expected) + + def test_comb_with_np_int64(self): + n = 70 + k = 30 + np_n = np.int64(n) + np_k = np.int64(k) + assert_equal(special.comb(np_n, np_k, exact=True), + special.comb(n, k, exact=True)) + + def test_comb_zeros(self): + assert_equal(special.comb(2, 3, exact=True), 0) + assert_equal(special.comb(-1, 3, exact=True), 0) + assert_equal(special.comb(2, -1, exact=True), 0) + assert_equal(special.comb(2, -1, exact=False), 0) + assert_array_almost_equal(special.comb([2, -1, 2, 10], [3, 3, -1, 3]), + [0., 0., 0., 120.]) + + def test_perm(self): + assert_array_almost_equal(special.perm([10, 10], [3, 4]), [720., 5040.]) + assert_almost_equal(special.perm(10, 3), 720.) + assert_equal(special.perm(10, 3, exact=True), 720) + + def test_perm_zeros(self): + assert_equal(special.perm(2, 3, exact=True), 0) + assert_equal(special.perm(-1, 3, exact=True), 0) + assert_equal(special.perm(2, -1, exact=True), 0) + assert_equal(special.perm(2, -1, exact=False), 0) + assert_array_almost_equal(special.perm([2, -1, 2, 10], [3, 3, -1, 3]), + [0., 0., 0., 720.]) + + +class TestTrigonometric(object): + def test_cbrt(self): + cb = special.cbrt(27) + cbrl = 27**(1.0/3.0) + assert_approx_equal(cb,cbrl) + + def test_cbrtmore(self): + cb1 = special.cbrt(27.9) + cbrl1 = 27.9**(1.0/3.0) + assert_almost_equal(cb1,cbrl1,8) + + def test_cosdg(self): + cdg = special.cosdg(90) + cdgrl = cos(pi/2.0) + assert_almost_equal(cdg,cdgrl,8) + + def test_cosdgmore(self): + cdgm = special.cosdg(30) + cdgmrl = cos(pi/6.0) + assert_almost_equal(cdgm,cdgmrl,8) + + def test_cosm1(self): + cs = (special.cosm1(0),special.cosm1(.3),special.cosm1(pi/10)) + csrl = (cos(0)-1,cos(.3)-1,cos(pi/10)-1) + assert_array_almost_equal(cs,csrl,8) + + def test_cotdg(self): + ct = special.cotdg(30) + ctrl = tan(pi/6.0)**(-1) + assert_almost_equal(ct,ctrl,8) + + def test_cotdgmore(self): + ct1 = special.cotdg(45) + ctrl1 = tan(pi/4.0)**(-1) + assert_almost_equal(ct1,ctrl1,8) + + def test_specialpoints(self): + assert_almost_equal(special.cotdg(45), 1.0, 14) + assert_almost_equal(special.cotdg(-45), -1.0, 14) + assert_almost_equal(special.cotdg(90), 0.0, 14) + assert_almost_equal(special.cotdg(-90), 0.0, 14) + assert_almost_equal(special.cotdg(135), -1.0, 14) + assert_almost_equal(special.cotdg(-135), 1.0, 14) + assert_almost_equal(special.cotdg(225), 1.0, 14) + assert_almost_equal(special.cotdg(-225), -1.0, 14) + assert_almost_equal(special.cotdg(270), 0.0, 14) + assert_almost_equal(special.cotdg(-270), 0.0, 14) + assert_almost_equal(special.cotdg(315), -1.0, 14) + assert_almost_equal(special.cotdg(-315), 1.0, 14) + assert_almost_equal(special.cotdg(765), 1.0, 14) + + def test_sinc(self): + # the sinc implementation and more extensive sinc tests are in numpy + assert_array_equal(special.sinc([0]), 1) + assert_equal(special.sinc(0.0), 1.0) + + def test_sindg(self): + sn = special.sindg(90) + assert_equal(sn,1.0) + + def test_sindgmore(self): + snm = special.sindg(30) + snmrl = sin(pi/6.0) + assert_almost_equal(snm,snmrl,8) + snm1 = special.sindg(45) + snmrl1 = sin(pi/4.0) + assert_almost_equal(snm1,snmrl1,8) + + +class TestTandg(object): + + def test_tandg(self): + tn = special.tandg(30) + tnrl = tan(pi/6.0) + assert_almost_equal(tn,tnrl,8) + + def test_tandgmore(self): + tnm = special.tandg(45) + tnmrl = tan(pi/4.0) + assert_almost_equal(tnm,tnmrl,8) + tnm1 = special.tandg(60) + tnmrl1 = tan(pi/3.0) + assert_almost_equal(tnm1,tnmrl1,8) + + def test_specialpoints(self): + assert_almost_equal(special.tandg(0), 0.0, 14) + assert_almost_equal(special.tandg(45), 1.0, 14) + assert_almost_equal(special.tandg(-45), -1.0, 14) + assert_almost_equal(special.tandg(135), -1.0, 14) + assert_almost_equal(special.tandg(-135), 1.0, 14) + assert_almost_equal(special.tandg(180), 0.0, 14) + assert_almost_equal(special.tandg(-180), 0.0, 14) + assert_almost_equal(special.tandg(225), 1.0, 14) + assert_almost_equal(special.tandg(-225), -1.0, 14) + assert_almost_equal(special.tandg(315), -1.0, 14) + assert_almost_equal(special.tandg(-315), 1.0, 14) + + +class TestEllip(object): + def test_ellipj_nan(self): + """Regression test for #912.""" + special.ellipj(0.5, np.nan) + + def test_ellipj(self): + el = special.ellipj(0.2,0) + rel = [sin(0.2),cos(0.2),1.0,0.20] + assert_array_almost_equal(el,rel,13) + + def test_ellipk(self): + elk = special.ellipk(.2) + assert_almost_equal(elk,1.659623598610528,11) + + assert_equal(special.ellipkm1(0.0), np.inf) + assert_equal(special.ellipkm1(1.0), pi/2) + assert_equal(special.ellipkm1(np.inf), 0.0) + assert_equal(special.ellipkm1(np.nan), np.nan) + assert_equal(special.ellipkm1(-1), np.nan) + assert_allclose(special.ellipk(-10), 0.7908718902387385) + + def test_ellipkinc(self): + elkinc = special.ellipkinc(pi/2,.2) + elk = special.ellipk(0.2) + assert_almost_equal(elkinc,elk,15) + alpha = 20*pi/180 + phi = 45*pi/180 + m = sin(alpha)**2 + elkinc = special.ellipkinc(phi,m) + assert_almost_equal(elkinc,0.79398143,8) + # From pg. 614 of A & S + + assert_equal(special.ellipkinc(pi/2, 0.0), pi/2) + assert_equal(special.ellipkinc(pi/2, 1.0), np.inf) + assert_equal(special.ellipkinc(pi/2, -np.inf), 0.0) + assert_equal(special.ellipkinc(pi/2, np.nan), np.nan) + assert_equal(special.ellipkinc(pi/2, 2), np.nan) + assert_equal(special.ellipkinc(0, 0.5), 0.0) + assert_equal(special.ellipkinc(np.inf, 0.5), np.inf) + assert_equal(special.ellipkinc(-np.inf, 0.5), -np.inf) + assert_equal(special.ellipkinc(np.inf, np.inf), np.nan) + assert_equal(special.ellipkinc(np.inf, -np.inf), np.nan) + assert_equal(special.ellipkinc(-np.inf, -np.inf), np.nan) + assert_equal(special.ellipkinc(-np.inf, np.inf), np.nan) + assert_equal(special.ellipkinc(np.nan, 0.5), np.nan) + assert_equal(special.ellipkinc(np.nan, np.nan), np.nan) + + assert_allclose(special.ellipkinc(0.38974112035318718, 1), 0.4, rtol=1e-14) + assert_allclose(special.ellipkinc(1.5707, -10), 0.79084284661724946) + + def test_ellipkinc_2(self): + # Regression test for gh-3550 + # ellipkinc(phi, mbad) was NaN and mvals[2:6] were twice the correct value + mbad = 0.68359375000000011 + phi = 0.9272952180016123 + m = np.nextafter(mbad, 0) + mvals = [] + for j in range(10): + mvals.append(m) + m = np.nextafter(m, 1) + f = special.ellipkinc(phi, mvals) + assert_array_almost_equal_nulp(f, 1.0259330100195334 * np.ones_like(f), 1) + # this bug also appears at phi + n * pi for at least small n + f1 = special.ellipkinc(phi + pi, mvals) + assert_array_almost_equal_nulp(f1, 5.1296650500976675 * np.ones_like(f1), 2) + + def test_ellipkinc_singular(self): + # ellipkinc(phi, 1) has closed form and is finite only for phi in (-pi/2, pi/2) + xlog = np.logspace(-300, -17, 25) + xlin = np.linspace(1e-17, 0.1, 25) + xlin2 = np.linspace(0.1, pi/2, 25, endpoint=False) + + assert_allclose(special.ellipkinc(xlog, 1), np.arcsinh(np.tan(xlog)), rtol=1e14) + assert_allclose(special.ellipkinc(xlin, 1), np.arcsinh(np.tan(xlin)), rtol=1e14) + assert_allclose(special.ellipkinc(xlin2, 1), np.arcsinh(np.tan(xlin2)), rtol=1e14) + assert_equal(special.ellipkinc(np.pi/2, 1), np.inf) + assert_allclose(special.ellipkinc(-xlog, 1), np.arcsinh(np.tan(-xlog)), rtol=1e14) + assert_allclose(special.ellipkinc(-xlin, 1), np.arcsinh(np.tan(-xlin)), rtol=1e14) + assert_allclose(special.ellipkinc(-xlin2, 1), np.arcsinh(np.tan(-xlin2)), rtol=1e14) + assert_equal(special.ellipkinc(-np.pi/2, 1), np.inf) + + def test_ellipe(self): + ele = special.ellipe(.2) + assert_almost_equal(ele,1.4890350580958529,8) + + assert_equal(special.ellipe(0.0), pi/2) + assert_equal(special.ellipe(1.0), 1.0) + assert_equal(special.ellipe(-np.inf), np.inf) + assert_equal(special.ellipe(np.nan), np.nan) + assert_equal(special.ellipe(2), np.nan) + assert_allclose(special.ellipe(-10), 3.6391380384177689) + + def test_ellipeinc(self): + eleinc = special.ellipeinc(pi/2,.2) + ele = special.ellipe(0.2) + assert_almost_equal(eleinc,ele,14) + # pg 617 of A & S + alpha, phi = 52*pi/180,35*pi/180 + m = sin(alpha)**2 + eleinc = special.ellipeinc(phi,m) + assert_almost_equal(eleinc, 0.58823065, 8) + + assert_equal(special.ellipeinc(pi/2, 0.0), pi/2) + assert_equal(special.ellipeinc(pi/2, 1.0), 1.0) + assert_equal(special.ellipeinc(pi/2, -np.inf), np.inf) + assert_equal(special.ellipeinc(pi/2, np.nan), np.nan) + assert_equal(special.ellipeinc(pi/2, 2), np.nan) + assert_equal(special.ellipeinc(0, 0.5), 0.0) + assert_equal(special.ellipeinc(np.inf, 0.5), np.inf) + assert_equal(special.ellipeinc(-np.inf, 0.5), -np.inf) + assert_equal(special.ellipeinc(np.inf, -np.inf), np.inf) + assert_equal(special.ellipeinc(-np.inf, -np.inf), -np.inf) + assert_equal(special.ellipeinc(np.inf, np.inf), np.nan) + assert_equal(special.ellipeinc(-np.inf, np.inf), np.nan) + assert_equal(special.ellipeinc(np.nan, 0.5), np.nan) + assert_equal(special.ellipeinc(np.nan, np.nan), np.nan) + assert_allclose(special.ellipeinc(1.5707, -10), 3.6388185585822876) + + def test_ellipeinc_2(self): + # Regression test for gh-3550 + # ellipeinc(phi, mbad) was NaN and mvals[2:6] were twice the correct value + mbad = 0.68359375000000011 + phi = 0.9272952180016123 + m = np.nextafter(mbad, 0) + mvals = [] + for j in range(10): + mvals.append(m) + m = np.nextafter(m, 1) + f = special.ellipeinc(phi, mvals) + assert_array_almost_equal_nulp(f, 0.84442884574781019 * np.ones_like(f), 2) + # this bug also appears at phi + n * pi for at least small n + f1 = special.ellipeinc(phi + pi, mvals) + assert_array_almost_equal_nulp(f1, 3.3471442287390509 * np.ones_like(f1), 4) + + +class TestErf(object): + + def test_erf(self): + er = special.erf(.25) + assert_almost_equal(er,0.2763263902,8) + + def test_erf_zeros(self): + erz = special.erf_zeros(5) + erzr = array([1.45061616+1.88094300j, + 2.24465928+2.61657514j, + 2.83974105+3.17562810j, + 3.33546074+3.64617438j, + 3.76900557+4.06069723j]) + assert_array_almost_equal(erz,erzr,4) + + def _check_variant_func(self, func, other_func, rtol, atol=0): + np.random.seed(1234) + n = 10000 + x = np.random.pareto(0.02, n) * (2*np.random.randint(0, 2, n) - 1) + y = np.random.pareto(0.02, n) * (2*np.random.randint(0, 2, n) - 1) + z = x + 1j*y + + old_errors = np.seterr(all='ignore') + try: + w = other_func(z) + w_real = other_func(x).real + + mask = np.isfinite(w) + w = w[mask] + z = z[mask] + + mask = np.isfinite(w_real) + w_real = w_real[mask] + x = x[mask] + + # test both real and complex variants + assert_func_equal(func, w, z, rtol=rtol, atol=atol) + assert_func_equal(func, w_real, x, rtol=rtol, atol=atol) + finally: + np.seterr(**old_errors) + + def test_erfc_consistent(self): + self._check_variant_func( + cephes.erfc, + lambda z: 1 - cephes.erf(z), + rtol=1e-12, + atol=1e-14 # <- the test function loses precision + ) + + def test_erfcx_consistent(self): + self._check_variant_func( + cephes.erfcx, + lambda z: np.exp(z*z) * cephes.erfc(z), + rtol=1e-12 + ) + + def test_erfi_consistent(self): + self._check_variant_func( + cephes.erfi, + lambda z: -1j * cephes.erf(1j*z), + rtol=1e-12 + ) + + def test_dawsn_consistent(self): + self._check_variant_func( + cephes.dawsn, + lambda z: sqrt(pi)/2 * np.exp(-z*z) * cephes.erfi(z), + rtol=1e-12 + ) + + def test_erfcinv(self): + i = special.erfcinv(1) + # Use assert_array_equal instead of assert_equal, so the comparison + # of -0.0 and 0.0 doesn't fail. + assert_array_equal(i, 0) + + def test_erfinv(self): + i = special.erfinv(0) + assert_equal(i,0) + + def test_erf_nan_inf(self): + vals = [np.nan, -np.inf, np.inf] + expected = [np.nan, -1, 1] + assert_allclose(special.erf(vals), expected, rtol=1e-15) + + def test_erfc_nan_inf(self): + vals = [np.nan, -np.inf, np.inf] + expected = [np.nan, 2, 0] + assert_allclose(special.erfc(vals), expected, rtol=1e-15) + + def test_erfcx_nan_inf(self): + vals = [np.nan, -np.inf, np.inf] + expected = [np.nan, np.inf, 0] + assert_allclose(special.erfcx(vals), expected, rtol=1e-15) + + def test_erfi_nan_inf(self): + vals = [np.nan, -np.inf, np.inf] + expected = [np.nan, -np.inf, np.inf] + assert_allclose(special.erfi(vals), expected, rtol=1e-15) + + def test_dawsn_nan_inf(self): + vals = [np.nan, -np.inf, np.inf] + expected = [np.nan, -0.0, 0.0] + assert_allclose(special.dawsn(vals), expected, rtol=1e-15) + + def test_wofz_nan_inf(self): + vals = [np.nan, -np.inf, np.inf] + expected = [np.nan + np.nan * 1.j, 0.-0.j, 0.+0.j] + assert_allclose(special.wofz(vals), expected, rtol=1e-15) + + +class TestEuler(object): + def test_euler(self): + eu0 = special.euler(0) + eu1 = special.euler(1) + eu2 = special.euler(2) # just checking segfaults + assert_allclose(eu0, [1], rtol=1e-15) + assert_allclose(eu1, [1, 0], rtol=1e-15) + assert_allclose(eu2, [1, 0, -1], rtol=1e-15) + eu24 = special.euler(24) + mathworld = [1,1,5,61,1385,50521,2702765,199360981, + 19391512145,2404879675441, + 370371188237525,69348874393137901, + 15514534163557086905] + correct = zeros((25,),'d') + for k in range(0,13): + if (k % 2): + correct[2*k] = -float(mathworld[k]) + else: + correct[2*k] = float(mathworld[k]) + olderr = np.seterr(all='ignore') + try: + err = nan_to_num((eu24-correct)/correct) + errmax = max(err) + finally: + np.seterr(**olderr) + assert_almost_equal(errmax, 0.0, 14) + + +class TestExp(object): + def test_exp2(self): + ex = special.exp2(2) + exrl = 2**2 + assert_equal(ex,exrl) + + def test_exp2more(self): + exm = special.exp2(2.5) + exmrl = 2**(2.5) + assert_almost_equal(exm,exmrl,8) + + def test_exp10(self): + ex = special.exp10(2) + exrl = 10**2 + assert_approx_equal(ex,exrl) + + def test_exp10more(self): + exm = special.exp10(2.5) + exmrl = 10**(2.5) + assert_almost_equal(exm,exmrl,8) + + def test_expm1(self): + ex = (special.expm1(2),special.expm1(3),special.expm1(4)) + exrl = (exp(2)-1,exp(3)-1,exp(4)-1) + assert_array_almost_equal(ex,exrl,8) + + def test_expm1more(self): + ex1 = (special.expm1(2),special.expm1(2.1),special.expm1(2.2)) + exrl1 = (exp(2)-1,exp(2.1)-1,exp(2.2)-1) + assert_array_almost_equal(ex1,exrl1,8) + + +class TestFactorialFunctions(object): + def test_factorial(self): + # Some known values, float math + assert_array_almost_equal(special.factorial(0), 1) + assert_array_almost_equal(special.factorial(1), 1) + assert_array_almost_equal(special.factorial(2), 2) + assert_array_almost_equal([6., 24., 120.], + special.factorial([3, 4, 5], exact=False)) + assert_array_almost_equal(special.factorial([[5, 3], [4, 3]]), + [[120, 6], [24, 6]]) + + # Some known values, integer math + assert_equal(special.factorial(0, exact=True), 1) + assert_equal(special.factorial(1, exact=True), 1) + assert_equal(special.factorial(2, exact=True), 2) + assert_equal(special.factorial(5, exact=True), 120) + assert_equal(special.factorial(15, exact=True), 1307674368000) + + # ndarray shape is maintained + assert_equal(special.factorial([7, 4, 15, 10], exact=True), + [5040, 24, 1307674368000, 3628800]) + + assert_equal(special.factorial([[5, 3], [4, 3]], True), + [[120, 6], [24, 6]]) + + # object arrays + assert_equal(special.factorial(np.arange(-3, 22), True), + special.factorial(np.arange(-3, 22), False)) + + # int64 array + assert_equal(special.factorial(np.arange(-3, 15), True), + special.factorial(np.arange(-3, 15), False)) + + # int32 array + assert_equal(special.factorial(np.arange(-3, 5), True), + special.factorial(np.arange(-3, 5), False)) + + # Consistent output for n < 0 + for exact in (True, False): + assert_array_equal(0, special.factorial(-3, exact)) + assert_array_equal([1, 2, 0, 0], + special.factorial([1, 2, -5, -4], exact)) + + for n in range(0, 22): + # Compare all with math.factorial + correct = math.factorial(n) + assert_array_equal(correct, special.factorial(n, True)) + assert_array_equal(correct, special.factorial([n], True)[0]) + + assert_allclose(float(correct), special.factorial(n, False)) + assert_allclose(float(correct), special.factorial([n], False)[0]) + + # Compare exact=True vs False, scalar vs array + assert_array_equal(special.factorial(n, True), + special.factorial(n, False)) + + assert_array_equal(special.factorial([n], True), + special.factorial([n], False)) + + def test_factorial2(self): + assert_array_almost_equal([105., 384., 945.], + special.factorial2([7, 8, 9], exact=False)) + assert_equal(special.factorial2(7, exact=True), 105) + + def test_factorialk(self): + assert_equal(special.factorialk(5, 1, exact=True), 120) + assert_equal(special.factorialk(5, 3, exact=True), 10) + + +class TestFresnel(object): + def test_fresnel(self): + frs = array(special.fresnel(.5)) + assert_array_almost_equal(frs,array([0.064732432859999287, 0.49234422587144644]),8) + + def test_fresnel_inf1(self): + frs = special.fresnel(np.inf) + assert_equal(frs, (0.5, 0.5)) + + def test_fresnel_inf2(self): + frs = special.fresnel(-np.inf) + assert_equal(frs, (-0.5, -0.5)) + + # values from pg 329 Table 7.11 of A & S + # slightly corrected in 4th decimal place + def test_fresnel_zeros(self): + szo, czo = special.fresnel_zeros(5) + assert_array_almost_equal(szo, + array([2.0093+0.2885j, + 2.8335+0.2443j, + 3.4675+0.2185j, + 4.0026+0.2009j, + 4.4742+0.1877j]),3) + assert_array_almost_equal(czo, + array([1.7437+0.3057j, + 2.6515+0.2529j, + 3.3204+0.2240j, + 3.8757+0.2047j, + 4.3611+0.1907j]),3) + vals1 = special.fresnel(szo)[0] + vals2 = special.fresnel(czo)[1] + assert_array_almost_equal(vals1,0,14) + assert_array_almost_equal(vals2,0,14) + + def test_fresnelc_zeros(self): + szo, czo = special.fresnel_zeros(6) + frc = special.fresnelc_zeros(6) + assert_array_almost_equal(frc,czo,12) + + def test_fresnels_zeros(self): + szo, czo = special.fresnel_zeros(5) + frs = special.fresnels_zeros(5) + assert_array_almost_equal(frs,szo,12) + + +class TestGamma(object): + def test_gamma(self): + gam = special.gamma(5) + assert_equal(gam,24.0) + + def test_gammaln(self): + gamln = special.gammaln(3) + lngam = log(special.gamma(3)) + assert_almost_equal(gamln,lngam,8) + + def test_gammainc(self): + gama = special.gammainc(.5,.5) + assert_almost_equal(gama,.7,1) + + def test_gammaincnan(self): + gama = special.gammainc(-1,1) + assert_(isnan(gama)) + + def test_gammainczero(self): + # bad arg but zero integration limit + gama = special.gammainc(-1,0) + assert_equal(gama,0.0) + + def test_gammaincinf(self): + gama = special.gammainc(0.5, np.inf) + assert_equal(gama,1.0) + + def test_gammaincc(self): + gicc = special.gammaincc(.5,.5) + greal = 1 - special.gammainc(.5,.5) + assert_almost_equal(gicc,greal,8) + + def test_gammainccnan(self): + gama = special.gammaincc(-1,1) + assert_(isnan(gama)) + + def test_gammainccinf(self): + gama = special.gammaincc(0.5,np.inf) + assert_equal(gama,0.0) + + def test_gammainccinv(self): + gccinv = special.gammainccinv(.5,.5) + gcinv = special.gammaincinv(.5,.5) + assert_almost_equal(gccinv,gcinv,8) + + @with_special_errors + def test_gammaincinv(self): + y = special.gammaincinv(.4,.4) + x = special.gammainc(.4,y) + assert_almost_equal(x,0.4,1) + y = special.gammainc(10, 0.05) + x = special.gammaincinv(10, 2.5715803516000736e-20) + assert_almost_equal(0.05, x, decimal=10) + assert_almost_equal(y, 2.5715803516000736e-20, decimal=10) + x = special.gammaincinv(50, 8.20754777388471303050299243573393e-18) + assert_almost_equal(11.0, x, decimal=10) + + @with_special_errors + def test_975(self): + # Regression test for ticket #975 -- switch point in algorithm + # check that things work OK at the point, immediately next floats + # around it, and a bit further away + pts = [0.25, + np.nextafter(0.25, 0), 0.25 - 1e-12, + np.nextafter(0.25, 1), 0.25 + 1e-12] + for xp in pts: + y = special.gammaincinv(.4, xp) + x = special.gammainc(0.4, y) + assert_allclose(x, xp, rtol=1e-12) + + def test_rgamma(self): + rgam = special.rgamma(8) + rlgam = 1/special.gamma(8) + assert_almost_equal(rgam,rlgam,8) + + def test_infinity(self): + assert_(np.isinf(special.gamma(-1))) + assert_equal(special.rgamma(-1), 0) + + +class TestHankel(object): + + def test_negv1(self): + assert_almost_equal(special.hankel1(-3,2), -special.hankel1(3,2), 14) + + def test_hankel1(self): + hank1 = special.hankel1(1,.1) + hankrl = (special.jv(1,.1) + special.yv(1,.1)*1j) + assert_almost_equal(hank1,hankrl,8) + + def test_negv1e(self): + assert_almost_equal(special.hankel1e(-3,2), -special.hankel1e(3,2), 14) + + def test_hankel1e(self): + hank1e = special.hankel1e(1,.1) + hankrle = special.hankel1(1,.1)*exp(-.1j) + assert_almost_equal(hank1e,hankrle,8) + + def test_negv2(self): + assert_almost_equal(special.hankel2(-3,2), -special.hankel2(3,2), 14) + + def test_hankel2(self): + hank2 = special.hankel2(1,.1) + hankrl2 = (special.jv(1,.1) - special.yv(1,.1)*1j) + assert_almost_equal(hank2,hankrl2,8) + + def test_neg2e(self): + assert_almost_equal(special.hankel2e(-3,2), -special.hankel2e(3,2), 14) + + def test_hankl2e(self): + hank2e = special.hankel2e(1,.1) + hankrl2e = special.hankel2e(1,.1) + assert_almost_equal(hank2e,hankrl2e,8) + + +class TestHyper(object): + def test_h1vp(self): + h1 = special.h1vp(1,.1) + h1real = (special.jvp(1,.1) + special.yvp(1,.1)*1j) + assert_almost_equal(h1,h1real,8) + + def test_h2vp(self): + h2 = special.h2vp(1,.1) + h2real = (special.jvp(1,.1) - special.yvp(1,.1)*1j) + assert_almost_equal(h2,h2real,8) + + def test_hyp0f1(self): + # scalar input + assert_allclose(special.hyp0f1(2.5, 0.5), 1.21482702689997, rtol=1e-12) + assert_allclose(special.hyp0f1(2.5, 0), 1.0, rtol=1e-15) + + # float input, expected values match mpmath + x = special.hyp0f1(3.0, [-1.5, -1, 0, 1, 1.5]) + expected = np.array([0.58493659229143, 0.70566805723127, 1.0, + 1.37789689539747, 1.60373685288480]) + assert_allclose(x, expected, rtol=1e-12) + + # complex input + x = special.hyp0f1(3.0, np.array([-1.5, -1, 0, 1, 1.5]) + 0.j) + assert_allclose(x, expected.astype(complex), rtol=1e-12) + + # test broadcasting + x1 = [0.5, 1.5, 2.5] + x2 = [0, 1, 0.5] + x = special.hyp0f1(x1, x2) + expected = [1.0, 1.8134302039235093, 1.21482702689997] + assert_allclose(x, expected, rtol=1e-12) + x = special.hyp0f1(np.row_stack([x1] * 2), x2) + assert_allclose(x, np.row_stack([expected] * 2), rtol=1e-12) + assert_raises(ValueError, special.hyp0f1, + np.row_stack([x1] * 3), [0, 1]) + + def test_hyp0f1_gh5764(self): + # Just checks the point that failed; there's a more systematic + # test in test_mpmath + res = special.hyp0f1(0.8, 0.5 + 0.5*1J) + # The expected value was generated using mpmath + assert_almost_equal(res, 1.6139719776441115 + 1J*0.80893054061790665) + + def test_hyp1f1(self): + hyp1 = special.hyp1f1(.1,.1,.3) + assert_almost_equal(hyp1, 1.3498588075760032,7) + + # test contributed by Moritz Deger (2008-05-29) + # https://github.com/scipy/scipy/issues/1186 (Trac #659) + + # reference data obtained from mathematica [ a, b, x, m(a,b,x)]: + # produced with test_hyp1f1.nb + ref_data = array([[-8.38132975e+00, -1.28436461e+01, -2.91081397e+01, 1.04178330e+04], + [2.91076882e+00, -6.35234333e+00, -1.27083993e+01, 6.68132725e+00], + [-1.42938258e+01, 1.80869131e-01, 1.90038728e+01, 1.01385897e+05], + [5.84069088e+00, 1.33187908e+01, 2.91290106e+01, 1.59469411e+08], + [-2.70433202e+01, -1.16274873e+01, -2.89582384e+01, 1.39900152e+24], + [4.26344966e+00, -2.32701773e+01, 1.91635759e+01, 6.13816915e+21], + [1.20514340e+01, -3.40260240e+00, 7.26832235e+00, 1.17696112e+13], + [2.77372955e+01, -1.99424687e+00, 3.61332246e+00, 3.07419615e+13], + [1.50310939e+01, -2.91198675e+01, -1.53581080e+01, -3.79166033e+02], + [1.43995827e+01, 9.84311196e+00, 1.93204553e+01, 2.55836264e+10], + [-4.08759686e+00, 1.34437025e+01, -1.42072843e+01, 1.70778449e+01], + [8.05595738e+00, -1.31019838e+01, 1.52180721e+01, 3.06233294e+21], + [1.81815804e+01, -1.42908793e+01, 9.57868793e+00, -2.84771348e+20], + [-2.49671396e+01, 1.25082843e+01, -1.71562286e+01, 2.36290426e+07], + [2.67277673e+01, 1.70315414e+01, 6.12701450e+00, 7.77917232e+03], + [2.49565476e+01, 2.91694684e+01, 6.29622660e+00, 2.35300027e+02], + [6.11924542e+00, -1.59943768e+00, 9.57009289e+00, 1.32906326e+11], + [-1.47863653e+01, 2.41691301e+01, -1.89981821e+01, 2.73064953e+03], + [2.24070483e+01, -2.93647433e+00, 8.19281432e+00, -6.42000372e+17], + [8.04042600e-01, 1.82710085e+01, -1.97814534e+01, 5.48372441e-01], + [1.39590390e+01, 1.97318686e+01, 2.37606635e+00, 5.51923681e+00], + [-4.66640483e+00, -2.00237930e+01, 7.40365095e+00, 4.50310752e+00], + [2.76821999e+01, -6.36563968e+00, 1.11533984e+01, -9.28725179e+23], + [-2.56764457e+01, 1.24544906e+00, 1.06407572e+01, 1.25922076e+01], + [3.20447808e+00, 1.30874383e+01, 2.26098014e+01, 2.03202059e+04], + [-1.24809647e+01, 4.15137113e+00, -2.92265700e+01, 2.39621411e+08], + [2.14778108e+01, -2.35162960e+00, -1.13758664e+01, 4.46882152e-01], + [-9.85469168e+00, -3.28157680e+00, 1.67447548e+01, -1.07342390e+07], + [1.08122310e+01, -2.47353236e+01, -1.15622349e+01, -2.91733796e+03], + [-2.67933347e+01, -3.39100709e+00, 2.56006986e+01, -5.29275382e+09], + [-8.60066776e+00, -8.02200924e+00, 1.07231926e+01, 1.33548320e+06], + [-1.01724238e-01, -1.18479709e+01, -2.55407104e+01, 1.55436570e+00], + [-3.93356771e+00, 2.11106818e+01, -2.57598485e+01, 2.13467840e+01], + [3.74750503e+00, 1.55687633e+01, -2.92841720e+01, 1.43873509e-02], + [6.99726781e+00, 2.69855571e+01, -1.63707771e+01, 3.08098673e-02], + [-2.31996011e+01, 3.47631054e+00, 9.75119815e-01, 1.79971073e-02], + [2.38951044e+01, -2.91460190e+01, -2.50774708e+00, 9.56934814e+00], + [1.52730825e+01, 5.77062507e+00, 1.21922003e+01, 1.32345307e+09], + [1.74673917e+01, 1.89723426e+01, 4.94903250e+00, 9.90859484e+01], + [1.88971241e+01, 2.86255413e+01, 5.52360109e-01, 1.44165360e+00], + [1.02002319e+01, -1.66855152e+01, -2.55426235e+01, 6.56481554e+02], + [-1.79474153e+01, 1.22210200e+01, -1.84058212e+01, 8.24041812e+05], + [-1.36147103e+01, 1.32365492e+00, -7.22375200e+00, 9.92446491e+05], + [7.57407832e+00, 2.59738234e+01, -1.34139168e+01, 3.64037761e-02], + [2.21110169e+00, 1.28012666e+01, 1.62529102e+01, 1.33433085e+02], + [-2.64297569e+01, -1.63176658e+01, -1.11642006e+01, -2.44797251e+13], + [-2.46622944e+01, -3.02147372e+00, 8.29159315e+00, -3.21799070e+05], + [-1.37215095e+01, -1.96680183e+01, 2.91940118e+01, 3.21457520e+12], + [-5.45566105e+00, 2.81292086e+01, 1.72548215e-01, 9.66973000e-01], + [-1.55751298e+00, -8.65703373e+00, 2.68622026e+01, -3.17190834e+16], + [2.45393609e+01, -2.70571903e+01, 1.96815505e+01, 1.80708004e+37], + [5.77482829e+00, 1.53203143e+01, 2.50534322e+01, 1.14304242e+06], + [-1.02626819e+01, 2.36887658e+01, -2.32152102e+01, 7.28965646e+02], + [-1.30833446e+00, -1.28310210e+01, 1.87275544e+01, -9.33487904e+12], + [5.83024676e+00, -1.49279672e+01, 2.44957538e+01, -7.61083070e+27], + [-2.03130747e+01, 2.59641715e+01, -2.06174328e+01, 4.54744859e+04], + [1.97684551e+01, -2.21410519e+01, -2.26728740e+01, 3.53113026e+06], + [2.73673444e+01, 2.64491725e+01, 1.57599882e+01, 1.07385118e+07], + [5.73287971e+00, 1.21111904e+01, 1.33080171e+01, 2.63220467e+03], + [-2.82751072e+01, 2.08605881e+01, 9.09838900e+00, -6.60957033e-07], + [1.87270691e+01, -1.74437016e+01, 1.52413599e+01, 6.59572851e+27], + [6.60681457e+00, -2.69449855e+00, 9.78972047e+00, -2.38587870e+12], + [1.20895561e+01, -2.51355765e+01, 2.30096101e+01, 7.58739886e+32], + [-2.44682278e+01, 2.10673441e+01, -1.36705538e+01, 4.54213550e+04], + [-4.50665152e+00, 3.72292059e+00, -4.83403707e+00, 2.68938214e+01], + [-7.46540049e+00, -1.08422222e+01, -1.72203805e+01, -2.09402162e+02], + [-2.00307551e+01, -7.50604431e+00, -2.78640020e+01, 4.15985444e+19], + [1.99890876e+01, 2.20677419e+01, -2.51301778e+01, 1.23840297e-09], + [2.03183823e+01, -7.66942559e+00, 2.10340070e+01, 1.46285095e+31], + [-2.90315825e+00, -2.55785967e+01, -9.58779316e+00, 2.65714264e-01], + [2.73960829e+01, -1.80097203e+01, -2.03070131e+00, 2.52908999e+02], + [-2.11708058e+01, -2.70304032e+01, 2.48257944e+01, 3.09027527e+08], + [2.21959758e+01, 4.00258675e+00, -1.62853977e+01, -9.16280090e-09], + [1.61661840e+01, -2.26845150e+01, 2.17226940e+01, -8.24774394e+33], + [-3.35030306e+00, 1.32670581e+00, 9.39711214e+00, -1.47303163e+01], + [7.23720726e+00, -2.29763909e+01, 2.34709682e+01, -9.20711735e+29], + [2.71013568e+01, 1.61951087e+01, -7.11388906e-01, 2.98750911e-01], + [8.40057933e+00, -7.49665220e+00, 2.95587388e+01, 6.59465635e+29], + [-1.51603423e+01, 1.94032322e+01, -7.60044357e+00, 1.05186941e+02], + [-8.83788031e+00, -2.72018313e+01, 1.88269907e+00, 1.81687019e+00], + [-1.87283712e+01, 5.87479570e+00, -1.91210203e+01, 2.52235612e+08], + [-5.61338513e-01, 2.69490237e+01, 1.16660111e-01, 9.97567783e-01], + [-5.44354025e+00, -1.26721408e+01, -4.66831036e+00, 1.06660735e-01], + [-2.18846497e+00, 2.33299566e+01, 9.62564397e+00, 3.03842061e-01], + [6.65661299e+00, -2.39048713e+01, 1.04191807e+01, 4.73700451e+13], + [-2.57298921e+01, -2.60811296e+01, 2.74398110e+01, -5.32566307e+11], + [-1.11431826e+01, -1.59420160e+01, -1.84880553e+01, -1.01514747e+02], + [6.50301931e+00, 2.59859051e+01, -2.33270137e+01, 1.22760500e-02], + [-1.94987891e+01, -2.62123262e+01, 3.90323225e+00, 1.71658894e+01], + [7.26164601e+00, -1.41469402e+01, 2.81499763e+01, -2.50068329e+31], + [-1.52424040e+01, 2.99719005e+01, -2.85753678e+01, 1.31906693e+04], + [5.24149291e+00, -1.72807223e+01, 2.22129493e+01, 2.50748475e+25], + [3.63207230e-01, -9.54120862e-02, -2.83874044e+01, 9.43854939e-01], + [-2.11326457e+00, -1.25707023e+01, 1.17172130e+00, 1.20812698e+00], + [2.48513582e+00, 1.03652647e+01, -1.84625148e+01, 6.47910997e-02], + [2.65395942e+01, 2.74794672e+01, 1.29413428e+01, 2.89306132e+05], + [-9.49445460e+00, 1.59930921e+01, -1.49596331e+01, 3.27574841e+02], + [-5.89173945e+00, 9.96742426e+00, 2.60318889e+01, -3.15842908e-01], + [-1.15387239e+01, -2.21433107e+01, -2.17686413e+01, 1.56724718e-01], + [-5.30592244e+00, -2.42752190e+01, 1.29734035e+00, 1.31985534e+00]]) + + for a,b,c,expected in ref_data: + result = special.hyp1f1(a,b,c) + assert_(abs(expected - result)/expected < 1e-4) + + def test_hyp1f1_gh2957(self): + hyp1 = special.hyp1f1(0.5, 1.5, -709.7827128933) + hyp2 = special.hyp1f1(0.5, 1.5, -709.7827128934) + assert_almost_equal(hyp1, hyp2, 12) + + def test_hyp1f1_gh2282(self): + hyp = special.hyp1f1(0.5, 1.5, -1000) + assert_almost_equal(hyp, 0.028024956081989643, 12) + + def test_hyp2f1(self): + # a collection of special cases taken from AMS 55 + values = [[0.5, 1, 1.5, 0.2**2, 0.5/0.2*log((1+0.2)/(1-0.2))], + [0.5, 1, 1.5, -0.2**2, 1./0.2*arctan(0.2)], + [1, 1, 2, 0.2, -1/0.2*log(1-0.2)], + [3, 3.5, 1.5, 0.2**2, + 0.5/0.2/(-5)*((1+0.2)**(-5)-(1-0.2)**(-5))], + [-3, 3, 0.5, sin(0.2)**2, cos(2*3*0.2)], + [3, 4, 8, 1, special.gamma(8)*special.gamma(8-4-3)/special.gamma(8-3)/special.gamma(8-4)], + [3, 2, 3-2+1, -1, 1./2**3*sqrt(pi) * + special.gamma(1+3-2)/special.gamma(1+0.5*3-2)/special.gamma(0.5+0.5*3)], + [5, 2, 5-2+1, -1, 1./2**5*sqrt(pi) * + special.gamma(1+5-2)/special.gamma(1+0.5*5-2)/special.gamma(0.5+0.5*5)], + [4, 0.5+4, 1.5-2*4, -1./3, (8./9)**(-2*4)*special.gamma(4./3) * + special.gamma(1.5-2*4)/special.gamma(3./2)/special.gamma(4./3-2*4)], + # and some others + # ticket #424 + [1.5, -0.5, 1.0, -10.0, 4.1300097765277476484], + # negative integer a or b, with c-a-b integer and x > 0.9 + [-2,3,1,0.95,0.715], + [2,-3,1,0.95,-0.007], + [-6,3,1,0.95,0.0000810625], + [2,-5,1,0.95,-0.000029375], + # huge negative integers + (10, -900, 10.5, 0.99, 1.91853705796607664803709475658e-24), + (10, -900, -10.5, 0.99, 3.54279200040355710199058559155e-18), + ] + for i, (a, b, c, x, v) in enumerate(values): + cv = special.hyp2f1(a, b, c, x) + assert_almost_equal(cv, v, 8, err_msg='test #%d' % i) + + def test_hyperu(self): + val1 = special.hyperu(1,0.1,100) + assert_almost_equal(val1,0.0098153,7) + a,b = [0.3,0.6,1.2,-2.7],[1.5,3.2,-0.4,-3.2] + a,b = asarray(a), asarray(b) + z = 0.5 + hypu = special.hyperu(a,b,z) + hprl = (pi/sin(pi*b))*(special.hyp1f1(a,b,z) / + (special.gamma(1+a-b)*special.gamma(b)) - + z**(1-b)*special.hyp1f1(1+a-b,2-b,z) + / (special.gamma(a)*special.gamma(2-b))) + assert_array_almost_equal(hypu,hprl,12) + + def test_hyperu_gh2287(self): + assert_almost_equal(special.hyperu(1, 1.5, 20.2), + 0.048360918656699191, 12) + + +class TestBessel(object): + def test_itj0y0(self): + it0 = array(special.itj0y0(.2)) + assert_array_almost_equal(it0,array([0.19933433254006822, -0.34570883800412566]),8) + + def test_it2j0y0(self): + it2 = array(special.it2j0y0(.2)) + assert_array_almost_equal(it2,array([0.0049937546274601858, -0.43423067011231614]),8) + + def test_negv_iv(self): + assert_equal(special.iv(3,2), special.iv(-3,2)) + + def test_j0(self): + oz = special.j0(.1) + ozr = special.jn(0,.1) + assert_almost_equal(oz,ozr,8) + + def test_j1(self): + o1 = special.j1(.1) + o1r = special.jn(1,.1) + assert_almost_equal(o1,o1r,8) + + def test_jn(self): + jnnr = special.jn(1,.2) + assert_almost_equal(jnnr,0.099500832639235995,8) + + def test_negv_jv(self): + assert_almost_equal(special.jv(-3,2), -special.jv(3,2), 14) + + def test_jv(self): + values = [[0, 0.1, 0.99750156206604002], + [2./3, 1e-8, 0.3239028506761532e-5], + [2./3, 1e-10, 0.1503423854873779e-6], + [3.1, 1e-10, 0.1711956265409013e-32], + [2./3, 4.0, -0.2325440850267039], + ] + for i, (v, x, y) in enumerate(values): + yc = special.jv(v, x) + assert_almost_equal(yc, y, 8, err_msg='test #%d' % i) + + def test_negv_jve(self): + assert_almost_equal(special.jve(-3,2), -special.jve(3,2), 14) + + def test_jve(self): + jvexp = special.jve(1,.2) + assert_almost_equal(jvexp,0.099500832639235995,8) + jvexp1 = special.jve(1,.2+1j) + z = .2+1j + jvexpr = special.jv(1,z)*exp(-abs(z.imag)) + assert_almost_equal(jvexp1,jvexpr,8) + + def test_jn_zeros(self): + jn0 = special.jn_zeros(0,5) + jn1 = special.jn_zeros(1,5) + assert_array_almost_equal(jn0,array([2.4048255577, + 5.5200781103, + 8.6537279129, + 11.7915344391, + 14.9309177086]),4) + assert_array_almost_equal(jn1,array([3.83171, + 7.01559, + 10.17347, + 13.32369, + 16.47063]),4) + + jn102 = special.jn_zeros(102,5) + assert_allclose(jn102, array([110.89174935992040343, + 117.83464175788308398, + 123.70194191713507279, + 129.02417238949092824, + 134.00114761868422559]), rtol=1e-13) + + jn301 = special.jn_zeros(301,5) + assert_allclose(jn301, array([313.59097866698830153, + 323.21549776096288280, + 331.22338738656748796, + 338.39676338872084500, + 345.03284233056064157]), rtol=1e-13) + + def test_jn_zeros_slow(self): + jn0 = special.jn_zeros(0, 300) + assert_allclose(jn0[260-1], 816.02884495068867280, rtol=1e-13) + assert_allclose(jn0[280-1], 878.86068707124422606, rtol=1e-13) + assert_allclose(jn0[300-1], 941.69253065317954064, rtol=1e-13) + + jn10 = special.jn_zeros(10, 300) + assert_allclose(jn10[260-1], 831.67668514305631151, rtol=1e-13) + assert_allclose(jn10[280-1], 894.51275095371316931, rtol=1e-13) + assert_allclose(jn10[300-1], 957.34826370866539775, rtol=1e-13) + + jn3010 = special.jn_zeros(3010,5) + assert_allclose(jn3010, array([3036.86590780927, + 3057.06598526482, + 3073.66360690272, + 3088.37736494778, + 3101.86438139042]), rtol=1e-8) + + def test_jnjnp_zeros(self): + jn = special.jn + + def jnp(n, x): + return (jn(n-1,x) - jn(n+1,x))/2 + for nt in range(1, 30): + z, n, m, t = special.jnjnp_zeros(nt) + for zz, nn, tt in zip(z, n, t): + if tt == 0: + assert_allclose(jn(nn, zz), 0, atol=1e-6) + elif tt == 1: + assert_allclose(jnp(nn, zz), 0, atol=1e-6) + else: + raise AssertionError("Invalid t return for nt=%d" % nt) + + def test_jnp_zeros(self): + jnp = special.jnp_zeros(1,5) + assert_array_almost_equal(jnp, array([1.84118, + 5.33144, + 8.53632, + 11.70600, + 14.86359]),4) + jnp = special.jnp_zeros(443,5) + assert_allclose(special.jvp(443, jnp), 0, atol=1e-15) + + def test_jnyn_zeros(self): + jnz = special.jnyn_zeros(1,5) + assert_array_almost_equal(jnz,(array([3.83171, + 7.01559, + 10.17347, + 13.32369, + 16.47063]), + array([1.84118, + 5.33144, + 8.53632, + 11.70600, + 14.86359]), + array([2.19714, + 5.42968, + 8.59601, + 11.74915, + 14.89744]), + array([3.68302, + 6.94150, + 10.12340, + 13.28576, + 16.44006])),5) + + def test_jvp(self): + jvprim = special.jvp(2,2) + jv0 = (special.jv(1,2)-special.jv(3,2))/2 + assert_almost_equal(jvprim,jv0,10) + + def test_k0(self): + ozk = special.k0(.1) + ozkr = special.kv(0,.1) + assert_almost_equal(ozk,ozkr,8) + + def test_k0e(self): + ozke = special.k0e(.1) + ozker = special.kve(0,.1) + assert_almost_equal(ozke,ozker,8) + + def test_k1(self): + o1k = special.k1(.1) + o1kr = special.kv(1,.1) + assert_almost_equal(o1k,o1kr,8) + + def test_k1e(self): + o1ke = special.k1e(.1) + o1ker = special.kve(1,.1) + assert_almost_equal(o1ke,o1ker,8) + + def test_jacobi(self): + a = 5*np.random.random() - 1 + b = 5*np.random.random() - 1 + P0 = special.jacobi(0,a,b) + P1 = special.jacobi(1,a,b) + P2 = special.jacobi(2,a,b) + P3 = special.jacobi(3,a,b) + + assert_array_almost_equal(P0.c,[1],13) + assert_array_almost_equal(P1.c,array([a+b+2,a-b])/2.0,13) + cp = [(a+b+3)*(a+b+4), 4*(a+b+3)*(a+2), 4*(a+1)*(a+2)] + p2c = [cp[0],cp[1]-2*cp[0],cp[2]-cp[1]+cp[0]] + assert_array_almost_equal(P2.c,array(p2c)/8.0,13) + cp = [(a+b+4)*(a+b+5)*(a+b+6),6*(a+b+4)*(a+b+5)*(a+3), + 12*(a+b+4)*(a+2)*(a+3),8*(a+1)*(a+2)*(a+3)] + p3c = [cp[0],cp[1]-3*cp[0],cp[2]-2*cp[1]+3*cp[0],cp[3]-cp[2]+cp[1]-cp[0]] + assert_array_almost_equal(P3.c,array(p3c)/48.0,13) + + def test_kn(self): + kn1 = special.kn(0,.2) + assert_almost_equal(kn1,1.7527038555281462,8) + + def test_negv_kv(self): + assert_equal(special.kv(3.0, 2.2), special.kv(-3.0, 2.2)) + + def test_kv0(self): + kv0 = special.kv(0,.2) + assert_almost_equal(kv0, 1.7527038555281462, 10) + + def test_kv1(self): + kv1 = special.kv(1,0.2) + assert_almost_equal(kv1, 4.775972543220472, 10) + + def test_kv2(self): + kv2 = special.kv(2,0.2) + assert_almost_equal(kv2, 49.51242928773287, 10) + + def test_kn_largeorder(self): + assert_allclose(special.kn(32, 1), 1.7516596664574289e+43) + + def test_kv_largearg(self): + assert_equal(special.kv(0, 1e19), 0) + + def test_negv_kve(self): + assert_equal(special.kve(3.0, 2.2), special.kve(-3.0, 2.2)) + + def test_kve(self): + kve1 = special.kve(0,.2) + kv1 = special.kv(0,.2)*exp(.2) + assert_almost_equal(kve1,kv1,8) + z = .2+1j + kve2 = special.kve(0,z) + kv2 = special.kv(0,z)*exp(z) + assert_almost_equal(kve2,kv2,8) + + def test_kvp_v0n1(self): + z = 2.2 + assert_almost_equal(-special.kv(1,z), special.kvp(0,z, n=1), 10) + + def test_kvp_n1(self): + v = 3. + z = 2.2 + xc = -special.kv(v+1,z) + v/z*special.kv(v,z) + x = special.kvp(v,z, n=1) + assert_almost_equal(xc, x, 10) # this function (kvp) is broken + + def test_kvp_n2(self): + v = 3. + z = 2.2 + xc = (z**2+v**2-v)/z**2 * special.kv(v,z) + special.kv(v+1,z)/z + x = special.kvp(v, z, n=2) + assert_almost_equal(xc, x, 10) + + def test_y0(self): + oz = special.y0(.1) + ozr = special.yn(0,.1) + assert_almost_equal(oz,ozr,8) + + def test_y1(self): + o1 = special.y1(.1) + o1r = special.yn(1,.1) + assert_almost_equal(o1,o1r,8) + + def test_y0_zeros(self): + yo,ypo = special.y0_zeros(2) + zo,zpo = special.y0_zeros(2,complex=1) + all = r_[yo,zo] + allval = r_[ypo,zpo] + assert_array_almost_equal(abs(special.yv(0.0,all)),0.0,11) + assert_array_almost_equal(abs(special.yv(1,all)-allval),0.0,11) + + def test_y1_zeros(self): + y1 = special.y1_zeros(1) + assert_array_almost_equal(y1,(array([2.19714]),array([0.52079])),5) + + def test_y1p_zeros(self): + y1p = special.y1p_zeros(1,complex=1) + assert_array_almost_equal(y1p,(array([0.5768+0.904j]), array([-0.7635+0.5892j])),3) + + def test_yn_zeros(self): + an = special.yn_zeros(4,2) + assert_array_almost_equal(an,array([5.64515, 9.36162]),5) + an = special.yn_zeros(443,5) + assert_allclose(an, [450.13573091578090314, 463.05692376675001542, + 472.80651546418663566, 481.27353184725625838, + 488.98055964441374646], rtol=1e-15) + + def test_ynp_zeros(self): + ao = special.ynp_zeros(0,2) + assert_array_almost_equal(ao,array([2.19714133, 5.42968104]),6) + ao = special.ynp_zeros(43,5) + assert_allclose(special.yvp(43, ao), 0, atol=1e-15) + ao = special.ynp_zeros(443,5) + assert_allclose(special.yvp(443, ao), 0, atol=1e-9) + + def test_ynp_zeros_large_order(self): + ao = special.ynp_zeros(443,5) + assert_allclose(special.yvp(443, ao), 0, atol=1e-14) + + def test_yn(self): + yn2n = special.yn(1,.2) + assert_almost_equal(yn2n,-3.3238249881118471,8) + + def test_negv_yv(self): + assert_almost_equal(special.yv(-3,2), -special.yv(3,2), 14) + + def test_yv(self): + yv2 = special.yv(1,.2) + assert_almost_equal(yv2,-3.3238249881118471,8) + + def test_negv_yve(self): + assert_almost_equal(special.yve(-3,2), -special.yve(3,2), 14) + + def test_yve(self): + yve2 = special.yve(1,.2) + assert_almost_equal(yve2,-3.3238249881118471,8) + yve2r = special.yv(1,.2+1j)*exp(-1) + yve22 = special.yve(1,.2+1j) + assert_almost_equal(yve22,yve2r,8) + + def test_yvp(self): + yvpr = (special.yv(1,.2) - special.yv(3,.2))/2.0 + yvp1 = special.yvp(2,.2) + assert_array_almost_equal(yvp1,yvpr,10) + + def _cephes_vs_amos_points(self): + """Yield points at which to compare Cephes implementation to AMOS""" + # check several points, including large-amplitude ones + for v in [-120, -100.3, -20., -10., -1., -.5, + 0., 1., 12.49, 120., 301]: + for z in [-1300, -11, -10, -1, 1., 10., 200.5, 401., 600.5, + 700.6, 1300, 10003]: + yield v, z + + # check half-integers; these are problematic points at least + # for cephes/iv + for v in 0.5 + arange(-60, 60): + yield v, 3.5 + + def check_cephes_vs_amos(self, f1, f2, rtol=1e-11, atol=0, skip=None): + for v, z in self._cephes_vs_amos_points(): + if skip is not None and skip(v, z): + continue + c1, c2, c3 = f1(v, z), f1(v,z+0j), f2(int(v), z) + if np.isinf(c1): + assert_(np.abs(c2) >= 1e300, (v, z)) + elif np.isnan(c1): + assert_(c2.imag != 0, (v, z)) + else: + assert_allclose(c1, c2, err_msg=(v, z), rtol=rtol, atol=atol) + if v == int(v): + assert_allclose(c3, c2, err_msg=(v, z), + rtol=rtol, atol=atol) + + def test_jv_cephes_vs_amos(self): + self.check_cephes_vs_amos(special.jv, special.jn, rtol=1e-10, atol=1e-305) + + def test_yv_cephes_vs_amos(self): + self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305) + + def test_yv_cephes_vs_amos_only_small_orders(self): + skipper = lambda v, z: (abs(v) > 50) + self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305, skip=skipper) + + def test_iv_cephes_vs_amos(self): + olderr = np.seterr(all='ignore') + try: + self.check_cephes_vs_amos(special.iv, special.iv, rtol=5e-9, atol=1e-305) + finally: + np.seterr(**olderr) + + @pytest.mark.slow + def test_iv_cephes_vs_amos_mass_test(self): + N = 1000000 + np.random.seed(1) + v = np.random.pareto(0.5, N) * (-1)**np.random.randint(2, size=N) + x = np.random.pareto(0.2, N) * (-1)**np.random.randint(2, size=N) + + imsk = (np.random.randint(8, size=N) == 0) + v[imsk] = v[imsk].astype(int) + + old_err = np.seterr(all='ignore') + try: + c1 = special.iv(v, x) + c2 = special.iv(v, x+0j) + + # deal with differences in the inf and zero cutoffs + c1[abs(c1) > 1e300] = np.inf + c2[abs(c2) > 1e300] = np.inf + c1[abs(c1) < 1e-300] = 0 + c2[abs(c2) < 1e-300] = 0 + + dc = abs(c1/c2 - 1) + dc[np.isnan(dc)] = 0 + finally: + np.seterr(**old_err) + + k = np.argmax(dc) + + # Most error apparently comes from AMOS and not our implementation; + # there are some problems near integer orders there + assert_(dc[k] < 2e-7, (v[k], x[k], special.iv(v[k], x[k]), special.iv(v[k], x[k]+0j))) + + def test_kv_cephes_vs_amos(self): + self.check_cephes_vs_amos(special.kv, special.kn, rtol=1e-9, atol=1e-305) + self.check_cephes_vs_amos(special.kv, special.kv, rtol=1e-9, atol=1e-305) + + def test_ticket_623(self): + assert_allclose(special.jv(3, 4), 0.43017147387562193) + assert_allclose(special.jv(301, 1300), 0.0183487151115275) + assert_allclose(special.jv(301, 1296.0682), -0.0224174325312048) + + def test_ticket_853(self): + """Negative-order Bessels""" + # cephes + assert_allclose(special.jv(-1, 1), -0.4400505857449335) + assert_allclose(special.jv(-2, 1), 0.1149034849319005) + assert_allclose(special.yv(-1, 1), 0.7812128213002887) + assert_allclose(special.yv(-2, 1), -1.650682606816255) + assert_allclose(special.iv(-1, 1), 0.5651591039924851) + assert_allclose(special.iv(-2, 1), 0.1357476697670383) + assert_allclose(special.kv(-1, 1), 0.6019072301972347) + assert_allclose(special.kv(-2, 1), 1.624838898635178) + assert_allclose(special.jv(-0.5, 1), 0.43109886801837607952) + assert_allclose(special.yv(-0.5, 1), 0.6713967071418031) + assert_allclose(special.iv(-0.5, 1), 1.231200214592967) + assert_allclose(special.kv(-0.5, 1), 0.4610685044478945) + # amos + assert_allclose(special.jv(-1, 1+0j), -0.4400505857449335) + assert_allclose(special.jv(-2, 1+0j), 0.1149034849319005) + assert_allclose(special.yv(-1, 1+0j), 0.7812128213002887) + assert_allclose(special.yv(-2, 1+0j), -1.650682606816255) + + assert_allclose(special.iv(-1, 1+0j), 0.5651591039924851) + assert_allclose(special.iv(-2, 1+0j), 0.1357476697670383) + assert_allclose(special.kv(-1, 1+0j), 0.6019072301972347) + assert_allclose(special.kv(-2, 1+0j), 1.624838898635178) + + assert_allclose(special.jv(-0.5, 1+0j), 0.43109886801837607952) + assert_allclose(special.jv(-0.5, 1+1j), 0.2628946385649065-0.827050182040562j) + assert_allclose(special.yv(-0.5, 1+0j), 0.6713967071418031) + assert_allclose(special.yv(-0.5, 1+1j), 0.967901282890131+0.0602046062142816j) + + assert_allclose(special.iv(-0.5, 1+0j), 1.231200214592967) + assert_allclose(special.iv(-0.5, 1+1j), 0.77070737376928+0.39891821043561j) + assert_allclose(special.kv(-0.5, 1+0j), 0.4610685044478945) + assert_allclose(special.kv(-0.5, 1+1j), 0.06868578341999-0.38157825981268j) + + assert_allclose(special.jve(-0.5,1+0.3j), special.jv(-0.5, 1+0.3j)*exp(-0.3)) + assert_allclose(special.yve(-0.5,1+0.3j), special.yv(-0.5, 1+0.3j)*exp(-0.3)) + assert_allclose(special.ive(-0.5,0.3+1j), special.iv(-0.5, 0.3+1j)*exp(-0.3)) + assert_allclose(special.kve(-0.5,0.3+1j), special.kv(-0.5, 0.3+1j)*exp(0.3+1j)) + + assert_allclose(special.hankel1(-0.5, 1+1j), special.jv(-0.5, 1+1j) + 1j*special.yv(-0.5,1+1j)) + assert_allclose(special.hankel2(-0.5, 1+1j), special.jv(-0.5, 1+1j) - 1j*special.yv(-0.5,1+1j)) + + def test_ticket_854(self): + """Real-valued Bessel domains""" + assert_(isnan(special.jv(0.5, -1))) + assert_(isnan(special.iv(0.5, -1))) + assert_(isnan(special.yv(0.5, -1))) + assert_(isnan(special.yv(1, -1))) + assert_(isnan(special.kv(0.5, -1))) + assert_(isnan(special.kv(1, -1))) + assert_(isnan(special.jve(0.5, -1))) + assert_(isnan(special.ive(0.5, -1))) + assert_(isnan(special.yve(0.5, -1))) + assert_(isnan(special.yve(1, -1))) + assert_(isnan(special.kve(0.5, -1))) + assert_(isnan(special.kve(1, -1))) + assert_(isnan(special.airye(-1)[0:2]).all(), special.airye(-1)) + assert_(not isnan(special.airye(-1)[2:4]).any(), special.airye(-1)) + + def test_gh_7909(self): + assert_(special.kv(1.5, 0) == np.inf) + assert_(special.kve(1.5, 0) == np.inf) + + def test_ticket_503(self): + """Real-valued Bessel I overflow""" + assert_allclose(special.iv(1, 700), 1.528500390233901e302) + assert_allclose(special.iv(1000, 1120), 1.301564549405821e301) + + def test_iv_hyperg_poles(self): + assert_allclose(special.iv(-0.5, 1), 1.231200214592967) + + def iv_series(self, v, z, n=200): + k = arange(0, n).astype(float_) + r = (v+2*k)*log(.5*z) - special.gammaln(k+1) - special.gammaln(v+k+1) + r[isnan(r)] = inf + r = exp(r) + err = abs(r).max() * finfo(float_).eps * n + abs(r[-1])*10 + return r.sum(), err + + def test_i0_series(self): + for z in [1., 10., 200.5]: + value, err = self.iv_series(0, z) + assert_allclose(special.i0(z), value, atol=err, err_msg=z) + + def test_i1_series(self): + for z in [1., 10., 200.5]: + value, err = self.iv_series(1, z) + assert_allclose(special.i1(z), value, atol=err, err_msg=z) + + def test_iv_series(self): + for v in [-20., -10., -1., 0., 1., 12.49, 120.]: + for z in [1., 10., 200.5, -1+2j]: + value, err = self.iv_series(v, z) + assert_allclose(special.iv(v, z), value, atol=err, err_msg=(v, z)) + + def test_i0(self): + values = [[0.0, 1.0], + [1e-10, 1.0], + [0.1, 0.9071009258], + [0.5, 0.6450352706], + [1.0, 0.4657596077], + [2.5, 0.2700464416], + [5.0, 0.1835408126], + [20.0, 0.0897803119], + ] + for i, (x, v) in enumerate(values): + cv = special.i0(x) * exp(-x) + assert_almost_equal(cv, v, 8, err_msg='test #%d' % i) + + def test_i0e(self): + oize = special.i0e(.1) + oizer = special.ive(0,.1) + assert_almost_equal(oize,oizer,8) + + def test_i1(self): + values = [[0.0, 0.0], + [1e-10, 0.4999999999500000e-10], + [0.1, 0.0452984468], + [0.5, 0.1564208032], + [1.0, 0.2079104154], + [5.0, 0.1639722669], + [20.0, 0.0875062222], + ] + for i, (x, v) in enumerate(values): + cv = special.i1(x) * exp(-x) + assert_almost_equal(cv, v, 8, err_msg='test #%d' % i) + + def test_i1e(self): + oi1e = special.i1e(.1) + oi1er = special.ive(1,.1) + assert_almost_equal(oi1e,oi1er,8) + + def test_iti0k0(self): + iti0 = array(special.iti0k0(5)) + assert_array_almost_equal(iti0,array([31.848667776169801, 1.5673873907283657]),5) + + def test_it2i0k0(self): + it2k = special.it2i0k0(.1) + assert_array_almost_equal(it2k,array([0.0012503906973464409, 3.3309450354686687]),6) + + def test_iv(self): + iv1 = special.iv(0,.1)*exp(-.1) + assert_almost_equal(iv1,0.90710092578230106,10) + + def test_negv_ive(self): + assert_equal(special.ive(3,2), special.ive(-3,2)) + + def test_ive(self): + ive1 = special.ive(0,.1) + iv1 = special.iv(0,.1)*exp(-.1) + assert_almost_equal(ive1,iv1,10) + + def test_ivp0(self): + assert_almost_equal(special.iv(1,2), special.ivp(0,2), 10) + + def test_ivp(self): + y = (special.iv(0,2) + special.iv(2,2))/2 + x = special.ivp(1,2) + assert_almost_equal(x,y,10) + + +class TestLaguerre(object): + def test_laguerre(self): + lag0 = special.laguerre(0) + lag1 = special.laguerre(1) + lag2 = special.laguerre(2) + lag3 = special.laguerre(3) + lag4 = special.laguerre(4) + lag5 = special.laguerre(5) + assert_array_almost_equal(lag0.c,[1],13) + assert_array_almost_equal(lag1.c,[-1,1],13) + assert_array_almost_equal(lag2.c,array([1,-4,2])/2.0,13) + assert_array_almost_equal(lag3.c,array([-1,9,-18,6])/6.0,13) + assert_array_almost_equal(lag4.c,array([1,-16,72,-96,24])/24.0,13) + assert_array_almost_equal(lag5.c,array([-1,25,-200,600,-600,120])/120.0,13) + + def test_genlaguerre(self): + k = 5*np.random.random() - 0.9 + lag0 = special.genlaguerre(0,k) + lag1 = special.genlaguerre(1,k) + lag2 = special.genlaguerre(2,k) + lag3 = special.genlaguerre(3,k) + assert_equal(lag0.c,[1]) + assert_equal(lag1.c,[-1,k+1]) + assert_almost_equal(lag2.c,array([1,-2*(k+2),(k+1.)*(k+2.)])/2.0) + assert_almost_equal(lag3.c,array([-1,3*(k+3),-3*(k+2)*(k+3),(k+1)*(k+2)*(k+3)])/6.0) + + +# Base polynomials come from Abrahmowitz and Stegan +class TestLegendre(object): + def test_legendre(self): + leg0 = special.legendre(0) + leg1 = special.legendre(1) + leg2 = special.legendre(2) + leg3 = special.legendre(3) + leg4 = special.legendre(4) + leg5 = special.legendre(5) + assert_equal(leg0.c, [1]) + assert_equal(leg1.c, [1,0]) + assert_almost_equal(leg2.c, array([3,0,-1])/2.0, decimal=13) + assert_almost_equal(leg3.c, array([5,0,-3,0])/2.0) + assert_almost_equal(leg4.c, array([35,0,-30,0,3])/8.0) + assert_almost_equal(leg5.c, array([63,0,-70,0,15,0])/8.0) + + +class TestLambda(object): + def test_lmbda(self): + lam = special.lmbda(1,.1) + lamr = (array([special.jn(0,.1), 2*special.jn(1,.1)/.1]), + array([special.jvp(0,.1), -2*special.jv(1,.1)/.01 + 2*special.jvp(1,.1)/.1])) + assert_array_almost_equal(lam,lamr,8) + + +class TestLog1p(object): + def test_log1p(self): + l1p = (special.log1p(10), special.log1p(11), special.log1p(12)) + l1prl = (log(11), log(12), log(13)) + assert_array_almost_equal(l1p,l1prl,8) + + def test_log1pmore(self): + l1pm = (special.log1p(1), special.log1p(1.1), special.log1p(1.2)) + l1pmrl = (log(2),log(2.1),log(2.2)) + assert_array_almost_equal(l1pm,l1pmrl,8) + + +class TestLegendreFunctions(object): + def test_clpmn(self): + z = 0.5+0.3j + clp = special.clpmn(2, 2, z, 3) + assert_array_almost_equal(clp, + (array([[1.0000, z, 0.5*(3*z*z-1)], + [0.0000, sqrt(z*z-1), 3*z*sqrt(z*z-1)], + [0.0000, 0.0000, 3*(z*z-1)]]), + array([[0.0000, 1.0000, 3*z], + [0.0000, z/sqrt(z*z-1), 3*(2*z*z-1)/sqrt(z*z-1)], + [0.0000, 0.0000, 6*z]])), + 7) + + def test_clpmn_close_to_real_2(self): + eps = 1e-10 + m = 1 + n = 3 + x = 0.5 + clp_plus = special.clpmn(m, n, x+1j*eps, 2)[0][m, n] + clp_minus = special.clpmn(m, n, x-1j*eps, 2)[0][m, n] + assert_array_almost_equal(array([clp_plus, clp_minus]), + array([special.lpmv(m, n, x), + special.lpmv(m, n, x)]), + 7) + + def test_clpmn_close_to_real_3(self): + eps = 1e-10 + m = 1 + n = 3 + x = 0.5 + clp_plus = special.clpmn(m, n, x+1j*eps, 3)[0][m, n] + clp_minus = special.clpmn(m, n, x-1j*eps, 3)[0][m, n] + assert_array_almost_equal(array([clp_plus, clp_minus]), + array([special.lpmv(m, n, x)*np.exp(-0.5j*m*np.pi), + special.lpmv(m, n, x)*np.exp(0.5j*m*np.pi)]), + 7) + + def test_clpmn_across_unit_circle(self): + eps = 1e-7 + m = 1 + n = 1 + x = 1j + for type in [2, 3]: + assert_almost_equal(special.clpmn(m, n, x+1j*eps, type)[0][m, n], + special.clpmn(m, n, x-1j*eps, type)[0][m, n], 6) + + def test_inf(self): + for z in (1, -1): + for n in range(4): + for m in range(1, n): + lp = special.clpmn(m, n, z) + assert_(np.isinf(lp[1][1,1:]).all()) + lp = special.lpmn(m, n, z) + assert_(np.isinf(lp[1][1,1:]).all()) + + def test_deriv_clpmn(self): + # data inside and outside of the unit circle + zvals = [0.5+0.5j, -0.5+0.5j, -0.5-0.5j, 0.5-0.5j, + 1+1j, -1+1j, -1-1j, 1-1j] + m = 2 + n = 3 + for type in [2, 3]: + for z in zvals: + for h in [1e-3, 1e-3j]: + approx_derivative = (special.clpmn(m, n, z+0.5*h, type)[0] + - special.clpmn(m, n, z-0.5*h, type)[0])/h + assert_allclose(special.clpmn(m, n, z, type)[1], + approx_derivative, + rtol=1e-4) + + def test_lpmn(self): + lp = special.lpmn(0,2,.5) + assert_array_almost_equal(lp,(array([[1.00000, + 0.50000, + -0.12500]]), + array([[0.00000, + 1.00000, + 1.50000]])),4) + + def test_lpn(self): + lpnf = special.lpn(2,.5) + assert_array_almost_equal(lpnf,(array([1.00000, + 0.50000, + -0.12500]), + array([0.00000, + 1.00000, + 1.50000])),4) + + def test_lpmv(self): + lp = special.lpmv(0,2,.5) + assert_almost_equal(lp,-0.125,7) + lp = special.lpmv(0,40,.001) + assert_almost_equal(lp,0.1252678976534484,7) + + # XXX: this is outside the domain of the current implementation, + # so ensure it returns a NaN rather than a wrong answer. + olderr = np.seterr(all='ignore') + try: + lp = special.lpmv(-1,-1,.001) + finally: + np.seterr(**olderr) + assert_(lp != 0 or np.isnan(lp)) + + def test_lqmn(self): + lqmnf = special.lqmn(0,2,.5) + lqf = special.lqn(2,.5) + assert_array_almost_equal(lqmnf[0][0],lqf[0],4) + assert_array_almost_equal(lqmnf[1][0],lqf[1],4) + + def test_lqmn_gt1(self): + """algorithm for real arguments changes at 1.0001 + test against analytical result for m=2, n=1 + """ + x0 = 1.0001 + delta = 0.00002 + for x in (x0-delta, x0+delta): + lq = special.lqmn(2, 1, x)[0][-1, -1] + expected = 2/(x*x-1) + assert_almost_equal(lq, expected) + + def test_lqmn_shape(self): + a, b = special.lqmn(4, 4, 1.1) + assert_equal(a.shape, (5, 5)) + assert_equal(b.shape, (5, 5)) + + a, b = special.lqmn(4, 0, 1.1) + assert_equal(a.shape, (5, 1)) + assert_equal(b.shape, (5, 1)) + + def test_lqn(self): + lqf = special.lqn(2,.5) + assert_array_almost_equal(lqf,(array([0.5493, -0.7253, -0.8187]), + array([1.3333, 1.216, -0.8427])),4) + + +class TestMathieu(object): + + def test_mathieu_a(self): + pass + + def test_mathieu_even_coef(self): + mc = special.mathieu_even_coef(2,5) + # Q not defined broken and cannot figure out proper reporting order + + def test_mathieu_odd_coef(self): + # same problem as above + pass + + +class TestFresnelIntegral(object): + + def test_modfresnelp(self): + pass + + def test_modfresnelm(self): + pass + + +class TestOblCvSeq(object): + def test_obl_cv_seq(self): + obl = special.obl_cv_seq(0,3,1) + assert_array_almost_equal(obl,array([-0.348602, + 1.393206, + 5.486800, + 11.492120]),5) + + +class TestParabolicCylinder(object): + def test_pbdn_seq(self): + pb = special.pbdn_seq(1,.1) + assert_array_almost_equal(pb,(array([0.9975, + 0.0998]), + array([-0.0499, + 0.9925])),4) + + def test_pbdv(self): + pbv = special.pbdv(1,.2) + derrl = 1/2*(.2)*special.pbdv(1,.2)[0] - special.pbdv(0,.2)[0] + + def test_pbdv_seq(self): + pbn = special.pbdn_seq(1,.1) + pbv = special.pbdv_seq(1,.1) + assert_array_almost_equal(pbv,(real(pbn[0]),real(pbn[1])),4) + + def test_pbdv_points(self): + # simple case + eta = np.linspace(-10, 10, 5) + z = 2**(eta/2)*np.sqrt(np.pi)/special.gamma(.5-.5*eta) + assert_allclose(special.pbdv(eta, 0.)[0], z, rtol=1e-14, atol=1e-14) + + # some points + assert_allclose(special.pbdv(10.34, 20.44)[0], 1.3731383034455e-32, rtol=1e-12) + assert_allclose(special.pbdv(-9.53, 3.44)[0], 3.166735001119246e-8, rtol=1e-12) + + def test_pbdv_gradient(self): + x = np.linspace(-4, 4, 8)[:,None] + eta = np.linspace(-10, 10, 5)[None,:] + + p = special.pbdv(eta, x) + eps = 1e-7 + 1e-7*abs(x) + dp = (special.pbdv(eta, x + eps)[0] - special.pbdv(eta, x - eps)[0]) / eps / 2. + assert_allclose(p[1], dp, rtol=1e-6, atol=1e-6) + + def test_pbvv_gradient(self): + x = np.linspace(-4, 4, 8)[:,None] + eta = np.linspace(-10, 10, 5)[None,:] + + p = special.pbvv(eta, x) + eps = 1e-7 + 1e-7*abs(x) + dp = (special.pbvv(eta, x + eps)[0] - special.pbvv(eta, x - eps)[0]) / eps / 2. + assert_allclose(p[1], dp, rtol=1e-6, atol=1e-6) + + +class TestPolygamma(object): + # from Table 6.2 (pg. 271) of A&S + def test_polygamma(self): + poly2 = special.polygamma(2,1) + poly3 = special.polygamma(3,1) + assert_almost_equal(poly2,-2.4041138063,10) + assert_almost_equal(poly3,6.4939394023,10) + + # Test polygamma(0, x) == psi(x) + x = [2, 3, 1.1e14] + assert_almost_equal(special.polygamma(0, x), special.psi(x)) + + # Test broadcasting + n = [0, 1, 2] + x = [0.5, 1.5, 2.5] + expected = [-1.9635100260214238, 0.93480220054467933, + -0.23620405164172739] + assert_almost_equal(special.polygamma(n, x), expected) + expected = np.row_stack([expected]*2) + assert_almost_equal(special.polygamma(n, np.row_stack([x]*2)), + expected) + assert_almost_equal(special.polygamma(np.row_stack([n]*2), x), + expected) + + +class TestProCvSeq(object): + def test_pro_cv_seq(self): + prol = special.pro_cv_seq(0,3,1) + assert_array_almost_equal(prol,array([0.319000, + 2.593084, + 6.533471, + 12.514462]),5) + + +class TestPsi(object): + def test_psi(self): + ps = special.psi(1) + assert_almost_equal(ps,-0.57721566490153287,8) + + +class TestRadian(object): + def test_radian(self): + rad = special.radian(90,0,0) + assert_almost_equal(rad,pi/2.0,5) + + def test_radianmore(self): + rad1 = special.radian(90,1,60) + assert_almost_equal(rad1,pi/2+0.0005816135199345904,5) + + +class TestRiccati(object): + def test_riccati_jn(self): + N, x = 2, 0.2 + S = np.empty((N, N)) + for n in range(N): + j = special.spherical_jn(n, x) + jp = special.spherical_jn(n, x, derivative=True) + S[0,n] = x*j + S[1,n] = x*jp + j + assert_array_almost_equal(S, special.riccati_jn(n, x), 8) + + def test_riccati_yn(self): + N, x = 2, 0.2 + C = np.empty((N, N)) + for n in range(N): + y = special.spherical_yn(n, x) + yp = special.spherical_yn(n, x, derivative=True) + C[0,n] = x*y + C[1,n] = x*yp + y + assert_array_almost_equal(C, special.riccati_yn(n, x), 8) + + +class TestRound(object): + def test_round(self): + rnd = list(map(int,(special.round(10.1),special.round(10.4),special.round(10.5),special.round(10.6)))) + + # Note: According to the documentation, scipy.special.round is + # supposed to round to the nearest even number if the fractional + # part is exactly 0.5. On some platforms, this does not appear + # to work and thus this test may fail. However, this unit test is + # correctly written. + rndrl = (10,10,10,11) + assert_array_equal(rnd,rndrl) + + +def test_sph_harm(): + # Tests derived from tables in + # https://en.wikipedia.org/wiki/Table_of_spherical_harmonics + sh = special.sph_harm + pi = np.pi + exp = np.exp + sqrt = np.sqrt + sin = np.sin + cos = np.cos + assert_array_almost_equal(sh(0,0,0,0), + 0.5/sqrt(pi)) + assert_array_almost_equal(sh(-2,2,0.,pi/4), + 0.25*sqrt(15./(2.*pi)) * + (sin(pi/4))**2.) + assert_array_almost_equal(sh(-2,2,0.,pi/2), + 0.25*sqrt(15./(2.*pi))) + assert_array_almost_equal(sh(2,2,pi,pi/2), + 0.25*sqrt(15/(2.*pi)) * + exp(0+2.*pi*1j)*sin(pi/2.)**2.) + assert_array_almost_equal(sh(2,4,pi/4.,pi/3.), + (3./8.)*sqrt(5./(2.*pi)) * + exp(0+2.*pi/4.*1j) * + sin(pi/3.)**2. * + (7.*cos(pi/3.)**2.-1)) + assert_array_almost_equal(sh(4,4,pi/8.,pi/6.), + (3./16.)*sqrt(35./(2.*pi)) * + exp(0+4.*pi/8.*1j)*sin(pi/6.)**4.) + + +def test_sph_harm_ufunc_loop_selection(): + # see https://github.com/scipy/scipy/issues/4895 + dt = np.dtype(np.complex128) + assert_equal(special.sph_harm(0, 0, 0, 0).dtype, dt) + assert_equal(special.sph_harm([0], 0, 0, 0).dtype, dt) + assert_equal(special.sph_harm(0, [0], 0, 0).dtype, dt) + assert_equal(special.sph_harm(0, 0, [0], 0).dtype, dt) + assert_equal(special.sph_harm(0, 0, 0, [0]).dtype, dt) + assert_equal(special.sph_harm([0], [0], [0], [0]).dtype, dt) + + +class TestStruve(object): + def _series(self, v, z, n=100): + """Compute Struve function & error estimate from its power series.""" + k = arange(0, n) + r = (-1)**k * (.5*z)**(2*k+v+1)/special.gamma(k+1.5)/special.gamma(k+v+1.5) + err = abs(r).max() * finfo(float_).eps * n + return r.sum(), err + + def test_vs_series(self): + """Check Struve function versus its power series""" + for v in [-20, -10, -7.99, -3.4, -1, 0, 1, 3.4, 12.49, 16]: + for z in [1, 10, 19, 21, 30]: + value, err = self._series(v, z) + assert_allclose(special.struve(v, z), value, rtol=0, atol=err), (v, z) + + def test_some_values(self): + assert_allclose(special.struve(-7.99, 21), 0.0467547614113, rtol=1e-7) + assert_allclose(special.struve(-8.01, 21), 0.0398716951023, rtol=1e-8) + assert_allclose(special.struve(-3.0, 200), 0.0142134427432, rtol=1e-12) + assert_allclose(special.struve(-8.0, -41), 0.0192469727846, rtol=1e-11) + assert_equal(special.struve(-12, -41), -special.struve(-12, 41)) + assert_equal(special.struve(+12, -41), -special.struve(+12, 41)) + assert_equal(special.struve(-11, -41), +special.struve(-11, 41)) + assert_equal(special.struve(+11, -41), +special.struve(+11, 41)) + + assert_(isnan(special.struve(-7.1, -1))) + assert_(isnan(special.struve(-10.1, -1))) + + def test_regression_679(self): + """Regression test for #679""" + assert_allclose(special.struve(-1.0, 20 - 1e-8), special.struve(-1.0, 20 + 1e-8)) + assert_allclose(special.struve(-2.0, 20 - 1e-8), special.struve(-2.0, 20 + 1e-8)) + assert_allclose(special.struve(-4.3, 20 - 1e-8), special.struve(-4.3, 20 + 1e-8)) + + +def test_chi2_smalldf(): + assert_almost_equal(special.chdtr(0.6,3), 0.957890536704110) + + +def test_ch2_inf(): + assert_equal(special.chdtr(0.7,np.inf), 1.0) + + +def test_chi2c_smalldf(): + assert_almost_equal(special.chdtrc(0.6,3), 1-0.957890536704110) + + +def test_chi2_inv_smalldf(): + assert_almost_equal(special.chdtri(0.6,1-0.957890536704110), 3) + + +def test_agm_simple(): + rtol = 1e-13 + + # Gauss's constant + assert_allclose(1/special.agm(1, np.sqrt(2)), 0.834626841674073186, + rtol=rtol) + + # These values were computed using Wolfram Alpha, with the + # function ArithmeticGeometricMean[a, b]. + agm13 = 1.863616783244897 + agm15 = 2.604008190530940 + agm35 = 3.936235503649555 + assert_allclose(special.agm([[1], [3]], [1, 3, 5]), + [[1, agm13, agm15], + [agm13, 3, agm35]], rtol=rtol) + + # Computed by the iteration formula using mpmath, + # with mpmath.mp.prec = 1000: + agm12 = 1.4567910310469068 + assert_allclose(special.agm(1, 2), agm12, rtol=rtol) + assert_allclose(special.agm(2, 1), agm12, rtol=rtol) + assert_allclose(special.agm(-1, -2), -agm12, rtol=rtol) + assert_allclose(special.agm(24, 6), 13.458171481725614, rtol=rtol) + assert_allclose(special.agm(13, 123456789.5), 11111458.498599306, + rtol=rtol) + assert_allclose(special.agm(1e30, 1), 2.229223055945383e+28, rtol=rtol) + assert_allclose(special.agm(1e-22, 1), 0.030182566420169886, rtol=rtol) + assert_allclose(special.agm(1e150, 1e180), 2.229223055945383e+178, + rtol=rtol) + assert_allclose(special.agm(1e180, 1e-150), 2.0634722510162677e+177, + rtol=rtol) + assert_allclose(special.agm(1e-150, 1e-170), 3.3112619670463756e-152, + rtol=rtol) + fi = np.finfo(1.0) + assert_allclose(special.agm(fi.tiny, fi.max), 1.9892072050015473e+305, + rtol=rtol) + assert_allclose(special.agm(0.75*fi.max, fi.max), 1.564904312298045e+308, + rtol=rtol) + assert_allclose(special.agm(fi.tiny, 3*fi.tiny), 4.1466849866735005e-308, + rtol=rtol) + + # zero, nan and inf cases. + assert_equal(special.agm(0, 0), 0) + assert_equal(special.agm(99, 0), 0) + + assert_equal(special.agm(-1, 10), np.nan) + assert_equal(special.agm(0, np.inf), np.nan) + assert_equal(special.agm(np.inf, 0), np.nan) + assert_equal(special.agm(0, -np.inf), np.nan) + assert_equal(special.agm(-np.inf, 0), np.nan) + assert_equal(special.agm(np.inf, -np.inf), np.nan) + assert_equal(special.agm(-np.inf, np.inf), np.nan) + assert_equal(special.agm(1, np.nan), np.nan) + assert_equal(special.agm(np.nan, -1), np.nan) + + assert_equal(special.agm(1, np.inf), np.inf) + assert_equal(special.agm(np.inf, 1), np.inf) + assert_equal(special.agm(-1, -np.inf), -np.inf) + assert_equal(special.agm(-np.inf, -1), -np.inf) + + +def test_legacy(): + # Legacy behavior: truncating arguments to integers + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "floating point number truncated to an integer") + assert_equal(special.bdtrc(1, 2, 0.3), special.bdtrc(1.8, 2.8, 0.3)) + assert_equal(special.bdtr(1, 2, 0.3), special.bdtr(1.8, 2.8, 0.3)) + assert_equal(special.bdtri(1, 2, 0.3), special.bdtri(1.8, 2.8, 0.3)) + assert_equal(special.expn(1, 0.3), special.expn(1.8, 0.3)) + assert_equal(special.nbdtrc(1, 2, 0.3), special.nbdtrc(1.8, 2.8, 0.3)) + assert_equal(special.nbdtr(1, 2, 0.3), special.nbdtr(1.8, 2.8, 0.3)) + assert_equal(special.nbdtri(1, 2, 0.3), special.nbdtri(1.8, 2.8, 0.3)) + assert_equal(special.pdtrc(1, 0.3), special.pdtrc(1.8, 0.3)) + assert_equal(special.pdtr(1, 0.3), special.pdtr(1.8, 0.3)) + assert_equal(special.pdtri(1, 0.3), special.pdtri(1.8, 0.3)) + assert_equal(special.kn(1, 0.3), special.kn(1.8, 0.3)) + assert_equal(special.yn(1, 0.3), special.yn(1.8, 0.3)) + assert_equal(special.smirnov(1, 0.3), special.smirnov(1.8, 0.3)) + assert_equal(special.smirnovi(1, 0.3), special.smirnovi(1.8, 0.3)) + + +@with_special_errors +def test_error_raising(): + assert_raises(special.SpecialFunctionError, special.iv, 1, 1e99j) + + +def test_xlogy(): + def xfunc(x, y): + with np.errstate(invalid='ignore'): + if x == 0 and not np.isnan(y): + return x + else: + return x*np.log(y) + + z1 = np.asarray([(0,0), (0, np.nan), (0, np.inf), (1.0, 2.0)], dtype=float) + z2 = np.r_[z1, [(0, 1j), (1, 1j)]] + + w1 = np.vectorize(xfunc)(z1[:,0], z1[:,1]) + assert_func_equal(special.xlogy, w1, z1, rtol=1e-13, atol=1e-13) + w2 = np.vectorize(xfunc)(z2[:,0], z2[:,1]) + assert_func_equal(special.xlogy, w2, z2, rtol=1e-13, atol=1e-13) + + +def test_xlog1py(): + def xfunc(x, y): + with np.errstate(invalid='ignore'): + if x == 0 and not np.isnan(y): + return x + else: + return x * np.log1p(y) + + z1 = np.asarray([(0,0), (0, np.nan), (0, np.inf), (1.0, 2.0), + (1, 1e-30)], dtype=float) + w1 = np.vectorize(xfunc)(z1[:,0], z1[:,1]) + assert_func_equal(special.xlog1py, w1, z1, rtol=1e-13, atol=1e-13) + + +def test_entr(): + def xfunc(x): + if x < 0: + return -np.inf + else: + return -special.xlogy(x, x) + values = (0, 0.5, 1.0, np.inf) + signs = [-1, 1] + arr = [] + for sgn, v in itertools.product(signs, values): + arr.append(sgn * v) + z = np.array(arr, dtype=float) + w = np.vectorize(xfunc, otypes=[np.float64])(z) + assert_func_equal(special.entr, w, z, rtol=1e-13, atol=1e-13) + + +def test_kl_div(): + def xfunc(x, y): + if x < 0 or y < 0 or (y == 0 and x != 0): + # extension of natural domain to preserve convexity + return np.inf + elif np.isposinf(x) or np.isposinf(y): + # limits within the natural domain + return np.inf + elif x == 0: + return y + else: + return special.xlogy(x, x/y) - x + y + values = (0, 0.5, 1.0) + signs = [-1, 1] + arr = [] + for sgna, va, sgnb, vb in itertools.product(signs, values, signs, values): + arr.append((sgna*va, sgnb*vb)) + z = np.array(arr, dtype=float) + w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1]) + assert_func_equal(special.kl_div, w, z, rtol=1e-13, atol=1e-13) + + +def test_rel_entr(): + def xfunc(x, y): + if x > 0 and y > 0: + return special.xlogy(x, x/y) + elif x == 0 and y >= 0: + return 0 + else: + return np.inf + values = (0, 0.5, 1.0) + signs = [-1, 1] + arr = [] + for sgna, va, sgnb, vb in itertools.product(signs, values, signs, values): + arr.append((sgna*va, sgnb*vb)) + z = np.array(arr, dtype=float) + w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1]) + assert_func_equal(special.rel_entr, w, z, rtol=1e-13, atol=1e-13) + + +def test_huber(): + assert_equal(special.huber(-1, 1.5), np.inf) + assert_allclose(special.huber(2, 1.5), 0.5 * np.square(1.5)) + assert_allclose(special.huber(2, 2.5), 2 * (2.5 - 0.5 * 2)) + + def xfunc(delta, r): + if delta < 0: + return np.inf + elif np.abs(r) < delta: + return 0.5 * np.square(r) + else: + return delta * (np.abs(r) - 0.5 * delta) + + z = np.random.randn(10, 2) + w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1]) + assert_func_equal(special.huber, w, z, rtol=1e-13, atol=1e-13) + + +def test_pseudo_huber(): + def xfunc(delta, r): + if delta < 0: + return np.inf + elif (not delta) or (not r): + return 0 + else: + return delta**2 * (np.sqrt(1 + (r/delta)**2) - 1) + + z = np.array(np.random.randn(10, 2).tolist() + [[0, 0.5], [0.5, 0]]) + w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1]) + assert_func_equal(special.pseudo_huber, w, z, rtol=1e-13, atol=1e-13) diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_basic.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_basic.pyc new file mode 100644 index 0000000..c4c9e1e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_basic.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_boxcox.py b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_boxcox.py new file mode 100644 index 0000000..992e0af --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_boxcox.py @@ -0,0 +1,108 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import assert_equal, assert_almost_equal, assert_allclose +from scipy.special import boxcox, boxcox1p, inv_boxcox, inv_boxcox1p + + +# There are more tests of boxcox and boxcox1p in test_mpmath.py. + +def test_boxcox_basic(): + x = np.array([0.5, 1, 2, 4]) + + # lambda = 0 => y = log(x) + y = boxcox(x, 0) + assert_almost_equal(y, np.log(x)) + + # lambda = 1 => y = x - 1 + y = boxcox(x, 1) + assert_almost_equal(y, x - 1) + + # lambda = 2 => y = 0.5*(x**2 - 1) + y = boxcox(x, 2) + assert_almost_equal(y, 0.5*(x**2 - 1)) + + # x = 0 and lambda > 0 => y = -1 / lambda + lam = np.array([0.5, 1, 2]) + y = boxcox(0, lam) + assert_almost_equal(y, -1.0 / lam) + +def test_boxcox_underflow(): + x = 1 + 1e-15 + lmbda = 1e-306 + y = boxcox(x, lmbda) + assert_allclose(y, np.log(x), rtol=1e-14) + + +def test_boxcox_nonfinite(): + # x < 0 => y = nan + x = np.array([-1, -1, -0.5]) + y = boxcox(x, [0.5, 2.0, -1.5]) + assert_equal(y, np.array([np.nan, np.nan, np.nan])) + + # x = 0 and lambda <= 0 => y = -inf + x = 0 + y = boxcox(x, [-2.5, 0]) + assert_equal(y, np.array([-np.inf, -np.inf])) + + +def test_boxcox1p_basic(): + x = np.array([-0.25, -1e-20, 0, 1e-20, 0.25, 1, 3]) + + # lambda = 0 => y = log(1+x) + y = boxcox1p(x, 0) + assert_almost_equal(y, np.log1p(x)) + + # lambda = 1 => y = x + y = boxcox1p(x, 1) + assert_almost_equal(y, x) + + # lambda = 2 => y = 0.5*((1+x)**2 - 1) = 0.5*x*(2 + x) + y = boxcox1p(x, 2) + assert_almost_equal(y, 0.5*x*(2 + x)) + + # x = -1 and lambda > 0 => y = -1 / lambda + lam = np.array([0.5, 1, 2]) + y = boxcox1p(-1, lam) + assert_almost_equal(y, -1.0 / lam) + + +def test_boxcox1p_underflow(): + x = np.array([1e-15, 1e-306]) + lmbda = np.array([1e-306, 1e-18]) + y = boxcox1p(x, lmbda) + assert_allclose(y, np.log1p(x), rtol=1e-14) + + +def test_boxcox1p_nonfinite(): + # x < -1 => y = nan + x = np.array([-2, -2, -1.5]) + y = boxcox1p(x, [0.5, 2.0, -1.5]) + assert_equal(y, np.array([np.nan, np.nan, np.nan])) + + # x = -1 and lambda <= 0 => y = -inf + x = -1 + y = boxcox1p(x, [-2.5, 0]) + assert_equal(y, np.array([-np.inf, -np.inf])) + + +def test_inv_boxcox(): + x = np.array([0., 1., 2.]) + lam = np.array([0., 1., 2.]) + y = boxcox(x, lam) + x2 = inv_boxcox(y, lam) + assert_almost_equal(x, x2) + + x = np.array([0., 1., 2.]) + lam = np.array([0., 1., 2.]) + y = boxcox1p(x, lam) + x2 = inv_boxcox1p(y, lam) + assert_almost_equal(x, x2) + + +def test_inv_boxcox1p_underflow(): + x = 1e-15 + lam = 1e-306 + y = inv_boxcox1p(x, lam) + assert_allclose(y, x, rtol=1e-14) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_boxcox.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_boxcox.pyc new file mode 100644 index 0000000..40871cd Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_boxcox.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_cdflib.py b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_cdflib.py new file mode 100644 index 0000000..b6b67a8 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_cdflib.py @@ -0,0 +1,409 @@ +""" +Test cdflib functions versus mpmath, if available. + +The following functions still need tests: + +- ncfdtr +- ncfdtri +- ncfdtridfn +- ncfdtridfd +- ncfdtrinc +- nbdtrik +- nbdtrin +- nrdtrimn +- nrdtrisd +- pdtrik +- nctdtr +- nctdtrit +- nctdtridf +- nctdtrinc + +""" +from __future__ import division, print_function, absolute_import + +import itertools + +import numpy as np +from numpy.testing import assert_equal +import pytest + +import scipy.special as sp +from scipy._lib.six import with_metaclass +from scipy.special._testutils import ( + MissingModule, check_version, FuncData) +from scipy.special._mptestutils import ( + Arg, IntArg, get_args, mpf2float, assert_mpmath_equal) + +try: + import mpmath +except ImportError: + mpmath = MissingModule('mpmath') + + +class ProbArg(object): + """Generate a set of probabilities on [0, 1].""" + def __init__(self): + # Include the endpoints for compatibility with Arg et. al. + self.a = 0 + self.b = 1 + + def values(self, n): + """Return an array containing approximatively n numbers.""" + m = max(1, n//3) + v1 = np.logspace(-30, np.log10(0.3), m) + v2 = np.linspace(0.3, 0.7, m + 1, endpoint=False)[1:] + v3 = 1 - np.logspace(np.log10(0.3), -15, m) + v = np.r_[v1, v2, v3] + return np.unique(v) + + +class EndpointFilter(object): + def __init__(self, a, b, rtol, atol): + self.a = a + self.b = b + self.rtol = rtol + self.atol = atol + + def __call__(self, x): + mask1 = np.abs(x - self.a) < self.rtol*np.abs(self.a) + self.atol + mask2 = np.abs(x - self.b) < self.rtol*np.abs(self.b) + self.atol + return np.where(mask1 | mask2, False, True) + + +class _CDFData(object): + def __init__(self, spfunc, mpfunc, index, argspec, spfunc_first=True, + dps=20, n=5000, rtol=None, atol=None, + endpt_rtol=None, endpt_atol=None): + self.spfunc = spfunc + self.mpfunc = mpfunc + self.index = index + self.argspec = argspec + self.spfunc_first = spfunc_first + self.dps = dps + self.n = n + self.rtol = rtol + self.atol = atol + + if not isinstance(argspec, list): + self.endpt_rtol = None + self.endpt_atol = None + elif endpt_rtol is not None or endpt_atol is not None: + if isinstance(endpt_rtol, list): + self.endpt_rtol = endpt_rtol + else: + self.endpt_rtol = [endpt_rtol]*len(self.argspec) + if isinstance(endpt_atol, list): + self.endpt_atol = endpt_atol + else: + self.endpt_atol = [endpt_atol]*len(self.argspec) + else: + self.endpt_rtol = None + self.endpt_atol = None + + def idmap(self, *args): + if self.spfunc_first: + res = self.spfunc(*args) + if np.isnan(res): + return np.nan + args = list(args) + args[self.index] = res + with mpmath.workdps(self.dps): + res = self.mpfunc(*tuple(args)) + # Imaginary parts are spurious + res = mpf2float(res.real) + else: + with mpmath.workdps(self.dps): + res = self.mpfunc(*args) + res = mpf2float(res.real) + args = list(args) + args[self.index] = res + res = self.spfunc(*tuple(args)) + return res + + def get_param_filter(self): + if self.endpt_rtol is None and self.endpt_atol is None: + return None + + filters = [] + for rtol, atol, spec in zip(self.endpt_rtol, self.endpt_atol, self.argspec): + if rtol is None and atol is None: + filters.append(None) + continue + elif rtol is None: + rtol = 0.0 + elif atol is None: + atol = 0.0 + + filters.append(EndpointFilter(spec.a, spec.b, rtol, atol)) + return filters + + def check(self): + # Generate values for the arguments + args = get_args(self.argspec, self.n) + param_filter = self.get_param_filter() + param_columns = tuple(range(args.shape[1])) + result_columns = args.shape[1] + args = np.hstack((args, args[:,self.index].reshape(args.shape[0], 1))) + FuncData(self.idmap, args, + param_columns=param_columns, result_columns=result_columns, + rtol=self.rtol, atol=self.atol, vectorized=False, + param_filter=param_filter).check() + + +def _assert_inverts(*a, **kw): + d = _CDFData(*a, **kw) + d.check() + + +def _binomial_cdf(k, n, p): + k, n, p = mpmath.mpf(k), mpmath.mpf(n), mpmath.mpf(p) + if k <= 0: + return mpmath.mpf(0) + elif k >= n: + return mpmath.mpf(1) + + onemp = mpmath.fsub(1, p, exact=True) + return mpmath.betainc(n - k, k + 1, x2=onemp, regularized=True) + + +def _f_cdf(dfn, dfd, x): + if x < 0: + return mpmath.mpf(0) + dfn, dfd, x = mpmath.mpf(dfn), mpmath.mpf(dfd), mpmath.mpf(x) + ub = dfn*x/(dfn*x + dfd) + res = mpmath.betainc(dfn/2, dfd/2, x2=ub, regularized=True) + return res + + +def _student_t_cdf(df, t, dps=None): + if dps is None: + dps = mpmath.mp.dps + with mpmath.workdps(dps): + df, t = mpmath.mpf(df), mpmath.mpf(t) + fac = mpmath.hyp2f1(0.5, 0.5*(df + 1), 1.5, -t**2/df) + fac *= t*mpmath.gamma(0.5*(df + 1)) + fac /= mpmath.sqrt(mpmath.pi*df)*mpmath.gamma(0.5*df) + return 0.5 + fac + + +def _noncentral_chi_pdf(t, df, nc): + res = mpmath.besseli(df/2 - 1, mpmath.sqrt(nc*t)) + res *= mpmath.exp(-(t + nc)/2)*(t/nc)**(df/4 - 1/2)/2 + return res + + +def _noncentral_chi_cdf(x, df, nc, dps=None): + if dps is None: + dps = mpmath.mp.dps + x, df, nc = mpmath.mpf(x), mpmath.mpf(df), mpmath.mpf(nc) + with mpmath.workdps(dps): + res = mpmath.quad(lambda t: _noncentral_chi_pdf(t, df, nc), [0, x]) + return res + + +def _tukey_lmbda_quantile(p, lmbda): + # For lmbda != 0 + return (p**lmbda - (1 - p)**lmbda)/lmbda + + +@pytest.mark.slow +@check_version(mpmath, '0.19') +class TestCDFlib(object): + + @pytest.mark.xfail(run=False) + def test_bdtrik(self): + _assert_inverts( + sp.bdtrik, + _binomial_cdf, + 0, [ProbArg(), IntArg(1, 1000), ProbArg()], + rtol=1e-4) + + def test_bdtrin(self): + _assert_inverts( + sp.bdtrin, + _binomial_cdf, + 1, [IntArg(1, 1000), ProbArg(), ProbArg()], + rtol=1e-4, endpt_atol=[None, None, 1e-6]) + + def test_btdtria(self): + _assert_inverts( + sp.btdtria, + lambda a, b, x: mpmath.betainc(a, b, x2=x, regularized=True), + 0, [ProbArg(), Arg(0, 1e2, inclusive_a=False), + Arg(0, 1, inclusive_a=False, inclusive_b=False)], + rtol=1e-6) + + def test_btdtrib(self): + # Use small values of a or mpmath doesn't converge + _assert_inverts( + sp.btdtrib, + lambda a, b, x: mpmath.betainc(a, b, x2=x, regularized=True), + 1, [Arg(0, 1e2, inclusive_a=False), ProbArg(), + Arg(0, 1, inclusive_a=False, inclusive_b=False)], + rtol=1e-7, endpt_atol=[None, 1e-18, 1e-15]) + + @pytest.mark.xfail(run=False) + def test_fdtridfd(self): + _assert_inverts( + sp.fdtridfd, + _f_cdf, + 1, [IntArg(1, 100), ProbArg(), Arg(0, 100, inclusive_a=False)], + rtol=1e-7) + + def test_gdtria(self): + _assert_inverts( + sp.gdtria, + lambda a, b, x: mpmath.gammainc(b, b=a*x, regularized=True), + 0, [ProbArg(), Arg(0, 1e3, inclusive_a=False), + Arg(0, 1e4, inclusive_a=False)], rtol=1e-7, + endpt_atol=[None, 1e-7, 1e-10]) + + def test_gdtrib(self): + # Use small values of a and x or mpmath doesn't converge + _assert_inverts( + sp.gdtrib, + lambda a, b, x: mpmath.gammainc(b, b=a*x, regularized=True), + 1, [Arg(0, 1e2, inclusive_a=False), ProbArg(), + Arg(0, 1e3, inclusive_a=False)], rtol=1e-5) + + def test_gdtrix(self): + _assert_inverts( + sp.gdtrix, + lambda a, b, x: mpmath.gammainc(b, b=a*x, regularized=True), + 2, [Arg(0, 1e3, inclusive_a=False), Arg(0, 1e3, inclusive_a=False), + ProbArg()], rtol=1e-7, + endpt_atol=[None, 1e-7, 1e-10]) + + def test_stdtr(self): + # Ideally the left endpoint for Arg() should be 0. + assert_mpmath_equal( + sp.stdtr, + _student_t_cdf, + [IntArg(1, 100), Arg(1e-10, np.inf)], rtol=1e-7) + + @pytest.mark.xfail(run=False) + def test_stdtridf(self): + _assert_inverts( + sp.stdtridf, + _student_t_cdf, + 0, [ProbArg(), Arg()], rtol=1e-7) + + def test_stdtrit(self): + _assert_inverts( + sp.stdtrit, + _student_t_cdf, + 1, [IntArg(1, 100), ProbArg()], rtol=1e-7, + endpt_atol=[None, 1e-10]) + + def test_chdtriv(self): + _assert_inverts( + sp.chdtriv, + lambda v, x: mpmath.gammainc(v/2, b=x/2, regularized=True), + 0, [ProbArg(), IntArg(1, 100)], rtol=1e-4) + + @pytest.mark.xfail(run=False) + def test_chndtridf(self): + # Use a larger atol since mpmath is doing numerical integration + _assert_inverts( + sp.chndtridf, + _noncentral_chi_cdf, + 1, [Arg(0, 100, inclusive_a=False), ProbArg(), + Arg(0, 100, inclusive_a=False)], + n=1000, rtol=1e-4, atol=1e-15) + + @pytest.mark.xfail(run=False) + def test_chndtrinc(self): + # Use a larger atol since mpmath is doing numerical integration + _assert_inverts( + sp.chndtrinc, + _noncentral_chi_cdf, + 2, [Arg(0, 100, inclusive_a=False), IntArg(1, 100), ProbArg()], + n=1000, rtol=1e-4, atol=1e-15) + + def test_chndtrix(self): + # Use a larger atol since mpmath is doing numerical integration + _assert_inverts( + sp.chndtrix, + _noncentral_chi_cdf, + 0, [ProbArg(), IntArg(1, 100), Arg(0, 100, inclusive_a=False)], + n=1000, rtol=1e-4, atol=1e-15, + endpt_atol=[1e-6, None, None]) + + def test_tklmbda_zero_shape(self): + # When lmbda = 0 the CDF has a simple closed form + one = mpmath.mpf(1) + assert_mpmath_equal( + lambda x: sp.tklmbda(x, 0), + lambda x: one/(mpmath.exp(-x) + one), + [Arg()], rtol=1e-7) + + def test_tklmbda_neg_shape(self): + _assert_inverts( + sp.tklmbda, + _tukey_lmbda_quantile, + 0, [ProbArg(), Arg(-25, 0, inclusive_b=False)], + spfunc_first=False, rtol=1e-5, + endpt_atol=[1e-9, 1e-5]) + + @pytest.mark.xfail(run=False) + def test_tklmbda_pos_shape(self): + _assert_inverts( + sp.tklmbda, + _tukey_lmbda_quantile, + 0, [ProbArg(), Arg(0, 100, inclusive_a=False)], + spfunc_first=False, rtol=1e-5) + + +def test_nonfinite(): + funcs = [ + ("btdtria", 3), + ("btdtrib", 3), + ("bdtrik", 3), + ("bdtrin", 3), + ("chdtriv", 2), + ("chndtr", 3), + ("chndtrix", 3), + ("chndtridf", 3), + ("chndtrinc", 3), + ("fdtridfd", 3), + ("ncfdtr", 4), + ("ncfdtri", 4), + ("ncfdtridfn", 4), + ("ncfdtridfd", 4), + ("ncfdtrinc", 4), + ("gdtrix", 3), + ("gdtrib", 3), + ("gdtria", 3), + ("nbdtrik", 3), + ("nbdtrin", 3), + ("nrdtrimn", 3), + ("nrdtrisd", 3), + ("pdtrik", 2), + ("stdtr", 2), + ("stdtrit", 2), + ("stdtridf", 2), + ("nctdtr", 3), + ("nctdtrit", 3), + ("nctdtridf", 3), + ("nctdtrinc", 3), + ("tklmbda", 2), + ] + + np.random.seed(1) + + for func, numargs in funcs: + func = getattr(sp, func) + + args_choices = [(float(x), np.nan, np.inf, -np.inf) for x in + np.random.rand(numargs)] + + for args in itertools.product(*args_choices): + res = func(*args) + + if any(np.isnan(x) for x in args): + # Nan inputs should result to nan output + assert_equal(res, np.nan) + else: + # All other inputs should return something (but not + # raise exceptions or cause hangs) + pass diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_cdflib.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_cdflib.pyc new file mode 100644 index 0000000..9494cae Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_cdflib.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_cython_special.py b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_cython_special.py new file mode 100644 index 0000000..ea5b494 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_cython_special.py @@ -0,0 +1,332 @@ +from __future__ import division, print_function, absolute_import + +from itertools import product + +from numpy.testing import assert_allclose +import pytest + +from scipy import special +from scipy.special import cython_special + + +int_points = [-10, -1, 1, 10] +real_points = [-10.0, -1.0, 1.0, 10.0] +complex_points = [complex(*tup) for tup in product(real_points, repeat=2)] + + +CYTHON_SIGNATURE_MAP = { + 'f': 'float', + 'd': 'double', + 'g': 'long double', + 'F': 'float complex', + 'D': 'double complex', + 'G': 'long double complex', + 'i':'int', + 'l': 'long' +} + + +TEST_POINTS = { + 'f': real_points, + 'd': real_points, + 'g': real_points, + 'F': complex_points, + 'D': complex_points, + 'G': complex_points, + 'i': int_points, + 'l': int_points, +} + + +PARAMS = [ + (special.agm, cython_special.agm, ('dd',), None), + (special.airy, cython_special._airy_pywrap, ('d', 'D'), None), + (special.airye, cython_special._airye_pywrap, ('d', 'D'), None), + (special.bdtr, cython_special.bdtr, ('lld', 'ddd'), None), + (special.bdtrc, cython_special.bdtrc, ('lld', 'ddd'), None), + (special.bdtri, cython_special.bdtri, ('lld', 'ddd'), None), + (special.bdtrik, cython_special.bdtrik, ('ddd',), None), + (special.bdtrin, cython_special.bdtrin, ('ddd',), None), + (special.bei, cython_special.bei, ('d',), None), + (special.beip, cython_special.beip, ('d',), None), + (special.ber, cython_special.ber, ('d',), None), + (special.berp, cython_special.berp, ('d',), None), + (special.besselpoly, cython_special.besselpoly, ('ddd',), None), + (special.beta, cython_special.beta, ('dd',), None), + (special.betainc, cython_special.betainc, ('ddd',), None), + (special.betaincinv, cython_special.betaincinv, ('ddd',), None), + (special.betaln, cython_special.betaln, ('dd',), None), + (special.binom, cython_special.binom, ('dd',), None), + (special.boxcox, cython_special.boxcox, ('dd',), None), + (special.boxcox1p, cython_special.boxcox1p, ('dd',), None), + (special.btdtr, cython_special.btdtr, ('ddd',), None), + (special.btdtri, cython_special.btdtri, ('ddd',), None), + (special.btdtria, cython_special.btdtria, ('ddd',), None), + (special.btdtrib, cython_special.btdtrib, ('ddd',), None), + (special.cbrt, cython_special.cbrt, ('d',), None), + (special.chdtr, cython_special.chdtr, ('dd',), None), + (special.chdtrc, cython_special.chdtrc, ('dd',), None), + (special.chdtri, cython_special.chdtri, ('dd',), None), + (special.chdtriv, cython_special.chdtriv, ('dd',), None), + (special.chndtr, cython_special.chndtr, ('ddd',), None), + (special.chndtridf, cython_special.chndtridf, ('ddd',), None), + (special.chndtrinc, cython_special.chndtrinc, ('ddd',), None), + (special.chndtrix, cython_special.chndtrix, ('ddd',), None), + (special.cosdg, cython_special.cosdg, ('d',), None), + (special.cosm1, cython_special.cosm1, ('d',), None), + (special.cotdg, cython_special.cotdg, ('d',), None), + (special.dawsn, cython_special.dawsn, ('d', 'D'), None), + (special.ellipe, cython_special.ellipe, ('d',), None), + (special.ellipeinc, cython_special.ellipeinc, ('dd',), None), + (special.ellipj, cython_special._ellipj_pywrap, ('dd',), None), + (special.ellipkinc, cython_special.ellipkinc, ('dd',), None), + (special.ellipkm1, cython_special.ellipkm1, ('d',), None), + (special.entr, cython_special.entr, ('d',), None), + (special.erf, cython_special.erf, ('d', 'D'), None), + (special.erfc, cython_special.erfc, ('d', 'D'), None), + (special.erfcx, cython_special.erfcx, ('d', 'D'), None), + (special.erfi, cython_special.erfi, ('d', 'D'), None), + (special.eval_chebyc, cython_special.eval_chebyc, ('dd', 'dD', 'ld'), None), + (special.eval_chebys, cython_special.eval_chebys, ('dd', 'dD', 'ld'), + 'd and l differ for negative int'), + (special.eval_chebyt, cython_special.eval_chebyt, ('dd', 'dD', 'ld'), + 'd and l differ for negative int'), + (special.eval_chebyu, cython_special.eval_chebyu, ('dd', 'dD', 'ld'), + 'd and l differ for negative int'), + (special.eval_gegenbauer, cython_special.eval_gegenbauer, ('ddd', 'ddD', 'ldd'), + 'd and l differ for negative int'), + (special.eval_genlaguerre, cython_special.eval_genlaguerre, ('ddd', 'ddD', 'ldd'), + 'd and l differ for negative int'), + (special.eval_hermite, cython_special.eval_hermite, ('ld',), None), + (special.eval_hermitenorm, cython_special.eval_hermitenorm, ('ld',), None), + (special.eval_jacobi, cython_special.eval_jacobi, ('dddd', 'dddD', 'lddd'), + 'd and l differ for negative int'), + (special.eval_laguerre, cython_special.eval_laguerre, ('dd', 'dD', 'ld'), + 'd and l differ for negative int'), + (special.eval_legendre, cython_special.eval_legendre, ('dd', 'dD', 'ld'), None), + (special.eval_sh_chebyt, cython_special.eval_sh_chebyt, ('dd', 'dD', 'ld'), None), + (special.eval_sh_chebyu, cython_special.eval_sh_chebyu, ('dd', 'dD', 'ld'), + 'd and l differ for negative int'), + (special.eval_sh_jacobi, cython_special.eval_sh_jacobi, ('dddd', 'dddD', 'lddd'), + 'd and l differ for negative int'), + (special.eval_sh_legendre, cython_special.eval_sh_legendre, ('dd', 'dD', 'ld'), None), + (special.exp1, cython_special.exp1, ('d', 'D'), None), + (special.exp10, cython_special.exp10, ('d',), None), + (special.exp2, cython_special.exp2, ('d',), None), + (special.expi, cython_special.expi, ('d', 'D'), None), + (special.expit, cython_special.expit, ('f', 'd', 'g'), None), + (special.expm1, cython_special.expm1, ('d', 'D'), None), + (special.expn, cython_special.expn, ('ld', 'dd'), None), + (special.exprel, cython_special.exprel, ('d',), None), + (special.fdtr, cython_special.fdtr, ('ddd',), None), + (special.fdtrc, cython_special.fdtrc, ('ddd',), None), + (special.fdtri, cython_special.fdtri, ('ddd',), None), + (special.fdtridfd, cython_special.fdtridfd, ('ddd',), None), + (special.fresnel, cython_special._fresnel_pywrap, ('d', 'D'), None), + (special.gamma, cython_special.gamma, ('d', 'D'), None), + (special.gammainc, cython_special.gammainc, ('dd',), None), + (special.gammaincc, cython_special.gammaincc, ('dd',), None), + (special.gammainccinv, cython_special.gammainccinv, ('dd',), None), + (special.gammaincinv, cython_special.gammaincinv, ('dd',), None), + (special.gammaln, cython_special.gammaln, ('d',), None), + (special.gammasgn, cython_special.gammasgn, ('d',), None), + (special.gdtr, cython_special.gdtr, ('ddd',), None), + (special.gdtrc, cython_special.gdtrc, ('ddd',), None), + (special.gdtria, cython_special.gdtria, ('ddd',), None), + (special.gdtrib, cython_special.gdtrib, ('ddd',), None), + (special.gdtrix, cython_special.gdtrix, ('ddd',), None), + (special.hankel1, cython_special.hankel1, ('dD',), None), + (special.hankel1e, cython_special.hankel1e, ('dD',), None), + (special.hankel2, cython_special.hankel2, ('dD',), None), + (special.hankel2e, cython_special.hankel2e, ('dD',), None), + (special.huber, cython_special.huber, ('dd',), None), + (special.hyp0f1, cython_special.hyp0f1, ('dd', 'dD'), None), + (special.hyp1f1, cython_special.hyp1f1, ('ddd', 'ddD'), None), + (special.hyp2f1, cython_special.hyp2f1, ('dddd', 'dddD'), None), + (special.hyperu, cython_special.hyperu, ('ddd',), None), + (special.i0, cython_special.i0, ('d',), None), + (special.i0e, cython_special.i0e, ('d',), None), + (special.i1, cython_special.i1, ('d',), None), + (special.i1e, cython_special.i1e, ('d',), None), + (special.inv_boxcox, cython_special.inv_boxcox, ('dd',), None), + (special.inv_boxcox1p, cython_special.inv_boxcox1p, ('dd',), None), + (special.it2i0k0, cython_special._it2i0k0_pywrap, ('d',), None), + (special.it2j0y0, cython_special._it2j0y0_pywrap, ('d',), None), + (special.it2struve0, cython_special.it2struve0, ('d',), None), + (special.itairy, cython_special._itairy_pywrap, ('d',), None), + (special.iti0k0, cython_special._iti0k0_pywrap, ('d',), None), + (special.itj0y0, cython_special._itj0y0_pywrap, ('d',), None), + (special.itmodstruve0, cython_special.itmodstruve0, ('d',), None), + (special.itstruve0, cython_special.itstruve0, ('d',), None), + (special.iv, cython_special.iv, ('dd', 'dD'), None), + (special.ive, cython_special.ive, ('dd', 'dD'), None), + (special.j0, cython_special.j0, ('d',), None), + (special.j1, cython_special.j1, ('d',), None), + (special.jv, cython_special.jv, ('dd', 'dD'), None), + (special.jve, cython_special.jve, ('dd', 'dD'), None), + (special.k0, cython_special.k0, ('d',), None), + (special.k0e, cython_special.k0e, ('d',), None), + (special.k1, cython_special.k1, ('d',), None), + (special.k1e, cython_special.k1e, ('d',), None), + (special.kei, cython_special.kei, ('d',), None), + (special.keip, cython_special.keip, ('d',), None), + (special.kelvin, cython_special._kelvin_pywrap, ('d',), None), + (special.ker, cython_special.ker, ('d',), None), + (special.kerp, cython_special.kerp, ('d',), None), + (special.kl_div, cython_special.kl_div, ('dd',), None), + (special.kn, cython_special.kn, ('ld', 'dd'), None), + (special.kolmogi, cython_special.kolmogi, ('d',), None), + (special.kolmogorov, cython_special.kolmogorov, ('d',), None), + (special.kv, cython_special.kv, ('dd', 'dD'), None), + (special.kve, cython_special.kve, ('dd', 'dD'), None), + (special.log1p, cython_special.log1p, ('d', 'D'), None), + (special.log_ndtr, cython_special.log_ndtr, ('d', 'D'), None), + (special.loggamma, cython_special.loggamma, ('D',), None), + (special.logit, cython_special.logit, ('f', 'd', 'g'), None), + (special.lpmv, cython_special.lpmv, ('ddd',), None), + (special.mathieu_a, cython_special.mathieu_a, ('dd',), None), + (special.mathieu_b, cython_special.mathieu_b, ('dd',), None), + (special.mathieu_cem, cython_special._mathieu_cem_pywrap, ('ddd',), None), + (special.mathieu_modcem1, cython_special._mathieu_modcem1_pywrap, ('ddd',), None), + (special.mathieu_modcem2, cython_special._mathieu_modcem2_pywrap, ('ddd',), None), + (special.mathieu_modsem1, cython_special._mathieu_modsem1_pywrap, ('ddd',), None), + (special.mathieu_modsem2, cython_special._mathieu_modsem2_pywrap, ('ddd',), None), + (special.mathieu_sem, cython_special._mathieu_sem_pywrap, ('ddd',), None), + (special.modfresnelm, cython_special._modfresnelm_pywrap, ('d',), None), + (special.modfresnelp, cython_special._modfresnelp_pywrap, ('d',), None), + (special.modstruve, cython_special.modstruve, ('dd',), None), + (special.nbdtr, cython_special.nbdtr, ('lld', 'ddd'), None), + (special.nbdtrc, cython_special.nbdtrc, ('lld', 'ddd'), None), + (special.nbdtri, cython_special.nbdtri, ('lld', 'ddd'), None), + (special.nbdtrik, cython_special.nbdtrik, ('ddd',), None), + (special.nbdtrin, cython_special.nbdtrin, ('ddd',), None), + (special.ncfdtr, cython_special.ncfdtr, ('dddd',), None), + (special.ncfdtri, cython_special.ncfdtri, ('dddd',), None), + (special.ncfdtridfd, cython_special.ncfdtridfd, ('dddd',), None), + (special.ncfdtridfn, cython_special.ncfdtridfn, ('dddd',), None), + (special.ncfdtrinc, cython_special.ncfdtrinc, ('dddd',), None), + (special.nctdtr, cython_special.nctdtr, ('ddd',), None), + (special.nctdtridf, cython_special.nctdtridf, ('ddd',), None), + (special.nctdtrinc, cython_special.nctdtrinc, ('ddd',), None), + (special.nctdtrit, cython_special.nctdtrit, ('ddd',), None), + (special.ndtr, cython_special.ndtr, ('d', 'D'), None), + (special.ndtri, cython_special.ndtri, ('d',), None), + (special.nrdtrimn, cython_special.nrdtrimn, ('ddd',), None), + (special.nrdtrisd, cython_special.nrdtrisd, ('ddd',), None), + (special.obl_ang1, cython_special._obl_ang1_pywrap, ('dddd',), None), + (special.obl_ang1_cv, cython_special._obl_ang1_cv_pywrap, ('ddddd',), None), + (special.obl_cv, cython_special.obl_cv, ('ddd',), None), + (special.obl_rad1, cython_special._obl_rad1_pywrap, ('dddd',), "see gh-6211"), + (special.obl_rad1_cv, cython_special._obl_rad1_cv_pywrap, ('ddddd',), "see gh-6211"), + (special.obl_rad2, cython_special._obl_rad2_pywrap, ('dddd',), "see gh-6211"), + (special.obl_rad2_cv, cython_special._obl_rad2_cv_pywrap, ('ddddd',), "see gh-6211"), + (special.pbdv, cython_special._pbdv_pywrap, ('dd',), None), + (special.pbvv, cython_special._pbvv_pywrap, ('dd',), None), + (special.pbwa, cython_special._pbwa_pywrap, ('dd',), None), + (special.pdtr, cython_special.pdtr, ('ld', 'dd'), None), + (special.pdtrc, cython_special.pdtrc, ('ld', 'dd'), None), + (special.pdtri, cython_special.pdtri, ('ld', 'dd'), None), + (special.pdtrik, cython_special.pdtrik, ('dd',), None), + (special.poch, cython_special.poch, ('dd',), None), + (special.pro_ang1, cython_special._pro_ang1_pywrap, ('dddd',), None), + (special.pro_ang1_cv, cython_special._pro_ang1_cv_pywrap, ('ddddd',), None), + (special.pro_cv, cython_special.pro_cv, ('ddd',), None), + (special.pro_rad1, cython_special._pro_rad1_pywrap, ('dddd',), "see gh-6211"), + (special.pro_rad1_cv, cython_special._pro_rad1_cv_pywrap, ('ddddd',), "see gh-6211"), + (special.pro_rad2, cython_special._pro_rad2_pywrap, ('dddd',), "see gh-6211"), + (special.pro_rad2_cv, cython_special._pro_rad2_cv_pywrap, ('ddddd',), "see gh-6211"), + (special.pseudo_huber, cython_special.pseudo_huber, ('dd',), None), + (special.psi, cython_special.psi, ('d', 'D'), None), + (special.radian, cython_special.radian, ('ddd',), None), + (special.rel_entr, cython_special.rel_entr, ('dd',), None), + (special.rgamma, cython_special.rgamma, ('d', 'D'), None), + (special.round, cython_special.round, ('d',), None), + (special.shichi, cython_special._shichi_pywrap, ('d', 'D'), None), + (special.sici, cython_special._sici_pywrap, ('d', 'D'), None), + (special.sindg, cython_special.sindg, ('d',), None), + (special.smirnov, cython_special.smirnov, ('ld', 'dd'), None), + (special.smirnovi, cython_special.smirnovi, ('ld', 'dd'), None), + (special.spence, cython_special.spence, ('d', 'D'), None), + (special.sph_harm, cython_special.sph_harm, ('lldd', 'dddd'), None), + (special.stdtr, cython_special.stdtr, ('dd',), None), + (special.stdtridf, cython_special.stdtridf, ('dd',), None), + (special.stdtrit, cython_special.stdtrit, ('dd',), None), + (special.struve, cython_special.struve, ('dd',), None), + (special.tandg, cython_special.tandg, ('d',), None), + (special.tklmbda, cython_special.tklmbda, ('dd',), None), + (special.wofz, cython_special.wofz, ('D',), None), + (special.wrightomega, cython_special.wrightomega, ('D',), None), + (special.xlog1py, cython_special.xlog1py, ('dd', 'DD'), None), + (special.xlogy, cython_special.xlogy, ('dd', 'DD'), None), + (special.y0, cython_special.y0, ('d',), None), + (special.y1, cython_special.y1, ('d',), None), + (special.yn, cython_special.yn, ('ld', 'dd'), None), + (special.yv, cython_special.yv, ('dd', 'dD'), None), + (special.yve, cython_special.yve, ('dd', 'dD'), None), + (special.zetac, cython_special.zetac, ('d',), None), + (special.owens_t, cython_special.owens_t, ('dd',), None) +] + + +IDS = [x[0].__name__ for x in PARAMS] + + +def _generate_test_points(typecodes): + axes = tuple(map(lambda x: TEST_POINTS[x], typecodes)) + pts = list(product(*axes)) + return pts + + +def test_cython_api_completeness(): + # Check that everything is tested + skip = {'hyp2f0', 'hyp1f2', 'hyp3f0'} + for name in dir(cython_special): + func = getattr(cython_special, name) + if callable(func) and not (name.startswith('_') or name in skip): + for _, cyfun, _, _ in PARAMS: + if cyfun is func: + break + else: + raise RuntimeError("{} missing from tests!".format(name)) + + +@pytest.mark.parametrize("param", PARAMS, ids=IDS) +def test_cython_api(param): + pyfunc, cyfunc, specializations, knownfailure = param + if knownfailure: + pytest.xfail(reason=knownfailure) + + # Check which parameters are expected to be fused types + values = [set() for code in specializations[0]] + for typecodes in specializations: + for j, v in enumerate(typecodes): + values[j].add(v) + seen = set() + is_fused_code = [False] * len(values) + for j, v in enumerate(values): + vv = tuple(sorted(v)) + if vv in seen: + continue + is_fused_code[j] = (len(v) > 1) + seen.add(vv) + + # Check results + for typecodes in specializations: + # Pick the correct specialized function + signature = [] + for j, code in enumerate(typecodes): + if is_fused_code[j]: + signature.append(CYTHON_SIGNATURE_MAP[code]) + + if signature: + cy_spec_func = cyfunc[tuple(signature)] + else: + signature = None + cy_spec_func = cyfunc + + # Test it + pts = _generate_test_points(typecodes) + for pt in pts: + pyval = pyfunc(*pt) + cyval = cy_spec_func(*pt) + assert_allclose(cyval, pyval, err_msg="{} {} {}".format(pt, typecodes, signature)) diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_cython_special.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_cython_special.pyc new file mode 100644 index 0000000..eaafc2d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_cython_special.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_data.py b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_data.py new file mode 100644 index 0000000..151b6ab --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_data.py @@ -0,0 +1,500 @@ +from __future__ import division, print_function, absolute_import + +import os + +import numpy as np +from numpy import arccosh, arcsinh, arctanh +from scipy._lib._numpy_compat import suppress_warnings +import pytest + +from scipy.special import ( + lpn, lpmn, lpmv, lqn, lqmn, sph_harm, eval_legendre, eval_hermite, + eval_laguerre, eval_genlaguerre, binom, cbrt, expm1, log1p, zeta, + jn, jv, yn, yv, iv, kv, kn, + gamma, gammaln, gammainc, gammaincc, gammaincinv, gammainccinv, digamma, + beta, betainc, betaincinv, poch, + ellipe, ellipeinc, ellipk, ellipkm1, ellipkinc, ellipj, + erf, erfc, erfinv, erfcinv, exp1, expi, expn, + bdtrik, btdtr, btdtri, btdtria, btdtrib, chndtr, gdtr, gdtrc, gdtrix, gdtrib, + nbdtrik, pdtrik, owens_t, + mathieu_a, mathieu_b, mathieu_cem, mathieu_sem, mathieu_modcem1, + mathieu_modsem1, mathieu_modcem2, mathieu_modsem2, + ellip_harm, ellip_harm_2, spherical_jn, spherical_yn, +) +from scipy.integrate import IntegrationWarning + +from scipy.special._testutils import FuncData + +DATASETS_BOOST = np.load(os.path.join(os.path.dirname(__file__), + "data", "boost.npz")) + +DATASETS_GSL = np.load(os.path.join(os.path.dirname(__file__), + "data", "gsl.npz")) + +DATASETS_LOCAL = np.load(os.path.join(os.path.dirname(__file__), + "data", "local.npz")) + + +def data(func, dataname, *a, **kw): + kw.setdefault('dataname', dataname) + return FuncData(func, DATASETS_BOOST[dataname], *a, **kw) + + +def data_gsl(func, dataname, *a, **kw): + kw.setdefault('dataname', dataname) + return FuncData(func, DATASETS_GSL[dataname], *a, **kw) + + +def data_local(func, dataname, *a, **kw): + kw.setdefault('dataname', dataname) + return FuncData(func, DATASETS_LOCAL[dataname], *a, **kw) + + +def ellipk_(k): + return ellipk(k*k) + + +def ellipkinc_(f, k): + return ellipkinc(f, k*k) + + +def ellipe_(k): + return ellipe(k*k) + + +def ellipeinc_(f, k): + return ellipeinc(f, k*k) + + +def ellipj_(k): + return ellipj(k*k) + + +def zeta_(x): + return zeta(x, 1.) + + +def assoc_legendre_p_boost_(nu, mu, x): + # the boost test data is for integer orders only + return lpmv(mu, nu.astype(int), x) + +def legendre_p_via_assoc_(nu, x): + return lpmv(0, nu, x) + +def lpn_(n, x): + return lpn(n.astype('l'), x)[0][-1] + +def lqn_(n, x): + return lqn(n.astype('l'), x)[0][-1] + +def legendre_p_via_lpmn(n, x): + return lpmn(0, n, x)[0][0,-1] + +def legendre_q_via_lqmn(n, x): + return lqmn(0, n, x)[0][0,-1] + +def mathieu_ce_rad(m, q, x): + return mathieu_cem(m, q, x*180/np.pi)[0] + + +def mathieu_se_rad(m, q, x): + return mathieu_sem(m, q, x*180/np.pi)[0] + + +def mathieu_mc1_scaled(m, q, x): + # GSL follows a different normalization. + # We follow Abramowitz & Stegun, they apparently something else. + return mathieu_modcem1(m, q, x)[0] * np.sqrt(np.pi/2) + + +def mathieu_ms1_scaled(m, q, x): + return mathieu_modsem1(m, q, x)[0] * np.sqrt(np.pi/2) + + +def mathieu_mc2_scaled(m, q, x): + return mathieu_modcem2(m, q, x)[0] * np.sqrt(np.pi/2) + + +def mathieu_ms2_scaled(m, q, x): + return mathieu_modsem2(m, q, x)[0] * np.sqrt(np.pi/2) + +def eval_legendre_ld(n, x): + return eval_legendre(n.astype('l'), x) + +def eval_legendre_dd(n, x): + return eval_legendre(n.astype('d'), x) + +def eval_hermite_ld(n, x): + return eval_hermite(n.astype('l'), x) + +def eval_laguerre_ld(n, x): + return eval_laguerre(n.astype('l'), x) + +def eval_laguerre_dd(n, x): + return eval_laguerre(n.astype('d'), x) + +def eval_genlaguerre_ldd(n, a, x): + return eval_genlaguerre(n.astype('l'), a, x) + +def eval_genlaguerre_ddd(n, a, x): + return eval_genlaguerre(n.astype('d'), a, x) + +def bdtrik_comp(y, n, p): + return bdtrik(1-y, n, p) + +def btdtri_comp(a, b, p): + return btdtri(a, b, 1-p) + +def btdtria_comp(p, b, x): + return btdtria(1-p, b, x) + +def btdtrib_comp(a, p, x): + return btdtrib(a, 1-p, x) + +def gdtr_(p, x): + return gdtr(1.0, p, x) + +def gdtrc_(p, x): + return gdtrc(1.0, p, x) + +def gdtrix_(b, p): + return gdtrix(1.0, b, p) + +def gdtrix_comp(b, p): + return gdtrix(1.0, b, 1-p) + +def gdtrib_(p, x): + return gdtrib(1.0, p, x) + +def gdtrib_comp(p, x): + return gdtrib(1.0, 1-p, x) + +def nbdtrik_comp(y, n, p): + return nbdtrik(1-y, n, p) + +def pdtrik_comp(p, m): + return pdtrik(1-p, m) + +def poch_(z, m): + return 1.0 / poch(z, m) + +def poch_minus(z, m): + return 1.0 / poch(z, -m) + +def spherical_jn_(n, x): + return spherical_jn(n.astype('l'), x) + +def spherical_yn_(n, x): + return spherical_yn(n.astype('l'), x) + +def sph_harm_(m, n, theta, phi): + y = sph_harm(m, n, theta, phi) + return (y.real, y.imag) + +def cexpm1(x, y): + z = expm1(x + 1j*y) + return z.real, z.imag + +def clog1p(x, y): + z = log1p(x + 1j*y) + return z.real, z.imag + + +BOOST_TESTS = [ + data(arccosh, 'acosh_data_ipp-acosh_data', 0, 1, rtol=5e-13), + data(arccosh, 'acosh_data_ipp-acosh_data', 0j, 1, rtol=5e-13), + + data(arcsinh, 'asinh_data_ipp-asinh_data', 0, 1, rtol=1e-11), + data(arcsinh, 'asinh_data_ipp-asinh_data', 0j, 1, rtol=1e-11), + + data(arctanh, 'atanh_data_ipp-atanh_data', 0, 1, rtol=1e-11), + data(arctanh, 'atanh_data_ipp-atanh_data', 0j, 1, rtol=1e-11), + + data(assoc_legendre_p_boost_, 'assoc_legendre_p_ipp-assoc_legendre_p', (0,1,2), 3, rtol=1e-11), + + data(legendre_p_via_assoc_, 'legendre_p_ipp-legendre_p', (0,1), 2, rtol=1e-11), + data(legendre_p_via_assoc_, 'legendre_p_large_ipp-legendre_p_large', (0,1), 2, rtol=7e-14), + data(legendre_p_via_lpmn, 'legendre_p_ipp-legendre_p', (0,1), 2, rtol=5e-14, vectorized=False), + data(legendre_p_via_lpmn, 'legendre_p_large_ipp-legendre_p_large', (0,1), 2, rtol=7e-14, vectorized=False), + data(lpn_, 'legendre_p_ipp-legendre_p', (0,1), 2, rtol=5e-14, vectorized=False), + data(lpn_, 'legendre_p_large_ipp-legendre_p_large', (0,1), 2, rtol=3e-13, vectorized=False), + data(eval_legendre_ld, 'legendre_p_ipp-legendre_p', (0,1), 2, rtol=6e-14), + data(eval_legendre_ld, 'legendre_p_large_ipp-legendre_p_large', (0,1), 2, rtol=2e-13), + data(eval_legendre_dd, 'legendre_p_ipp-legendre_p', (0,1), 2, rtol=2e-14), + data(eval_legendre_dd, 'legendre_p_large_ipp-legendre_p_large', (0,1), 2, rtol=2e-13), + + data(lqn_, 'legendre_p_ipp-legendre_p', (0,1), 3, rtol=2e-14, vectorized=False), + data(lqn_, 'legendre_p_large_ipp-legendre_p_large', (0,1), 3, rtol=2e-12, vectorized=False), + data(legendre_q_via_lqmn, 'legendre_p_ipp-legendre_p', (0,1), 3, rtol=2e-14, vectorized=False), + data(legendre_q_via_lqmn, 'legendre_p_large_ipp-legendre_p_large', (0,1), 3, rtol=2e-12, vectorized=False), + + data(beta, 'beta_exp_data_ipp-beta_exp_data', (0,1), 2, rtol=1e-13), + data(beta, 'beta_exp_data_ipp-beta_exp_data', (0,1), 2, rtol=1e-13), + data(beta, 'beta_small_data_ipp-beta_small_data', (0,1), 2), + data(beta, 'beta_med_data_ipp-beta_med_data', (0,1), 2, rtol=5e-13), + + data(betainc, 'ibeta_small_data_ipp-ibeta_small_data', (0,1,2), 5, rtol=6e-15), + data(betainc, 'ibeta_data_ipp-ibeta_data', (0,1,2), 5, rtol=5e-13), + data(betainc, 'ibeta_int_data_ipp-ibeta_int_data', (0,1,2), 5, rtol=2e-14), + data(betainc, 'ibeta_large_data_ipp-ibeta_large_data', (0,1,2), 5, rtol=4e-10), + + data(betaincinv, 'ibeta_inv_data_ipp-ibeta_inv_data', (0,1,2), 3, rtol=1e-5), + + data(btdtr, 'ibeta_small_data_ipp-ibeta_small_data', (0,1,2), 5, rtol=6e-15), + data(btdtr, 'ibeta_data_ipp-ibeta_data', (0,1,2), 5, rtol=4e-13), + data(btdtr, 'ibeta_int_data_ipp-ibeta_int_data', (0,1,2), 5, rtol=2e-14), + data(btdtr, 'ibeta_large_data_ipp-ibeta_large_data', (0,1,2), 5, rtol=4e-10), + + data(btdtri, 'ibeta_inv_data_ipp-ibeta_inv_data', (0,1,2), 3, rtol=1e-5), + data(btdtri_comp, 'ibeta_inv_data_ipp-ibeta_inv_data', (0,1,2), 4, rtol=8e-7), + + data(btdtria, 'ibeta_inva_data_ipp-ibeta_inva_data', (2,0,1), 3, rtol=5e-9), + data(btdtria_comp, 'ibeta_inva_data_ipp-ibeta_inva_data', (2,0,1), 4, rtol=5e-9), + + data(btdtrib, 'ibeta_inva_data_ipp-ibeta_inva_data', (0,2,1), 5, rtol=5e-9), + data(btdtrib_comp, 'ibeta_inva_data_ipp-ibeta_inva_data', (0,2,1), 6, rtol=5e-9), + + data(binom, 'binomial_data_ipp-binomial_data', (0,1), 2, rtol=1e-13), + data(binom, 'binomial_large_data_ipp-binomial_large_data', (0,1), 2, rtol=5e-13), + + data(bdtrik, 'binomial_quantile_ipp-binomial_quantile_data', (2,0,1), 3, rtol=5e-9), + data(bdtrik_comp, 'binomial_quantile_ipp-binomial_quantile_data', (2,0,1), 4, rtol=5e-9), + + data(nbdtrik, 'negative_binomial_quantile_ipp-negative_binomial_quantile_data', (2,0,1), 3, rtol=4e-9), + data(nbdtrik_comp, 'negative_binomial_quantile_ipp-negative_binomial_quantile_data', (2,0,1), 4, rtol=4e-9), + + data(pdtrik, 'poisson_quantile_ipp-poisson_quantile_data', (1,0), 2, rtol=3e-9), + data(pdtrik_comp, 'poisson_quantile_ipp-poisson_quantile_data', (1,0), 3, rtol=4e-9), + + data(cbrt, 'cbrt_data_ipp-cbrt_data', 1, 0), + + data(digamma, 'digamma_data_ipp-digamma_data', 0, 1), + data(digamma, 'digamma_data_ipp-digamma_data', 0j, 1), + data(digamma, 'digamma_neg_data_ipp-digamma_neg_data', 0, 1, rtol=2e-13), + data(digamma, 'digamma_neg_data_ipp-digamma_neg_data', 0j, 1, rtol=1e-13), + data(digamma, 'digamma_root_data_ipp-digamma_root_data', 0, 1, rtol=1e-15), + data(digamma, 'digamma_root_data_ipp-digamma_root_data', 0j, 1, rtol=1e-15), + data(digamma, 'digamma_small_data_ipp-digamma_small_data', 0, 1, rtol=1e-15), + data(digamma, 'digamma_small_data_ipp-digamma_small_data', 0j, 1, rtol=1e-14), + + data(ellipk_, 'ellint_k_data_ipp-ellint_k_data', 0, 1), + data(ellipkinc_, 'ellint_f_data_ipp-ellint_f_data', (0,1), 2, rtol=1e-14), + data(ellipe_, 'ellint_e_data_ipp-ellint_e_data', 0, 1), + data(ellipeinc_, 'ellint_e2_data_ipp-ellint_e2_data', (0,1), 2, rtol=1e-14), + + data(erf, 'erf_data_ipp-erf_data', 0, 1), + data(erf, 'erf_data_ipp-erf_data', 0j, 1, rtol=1e-13), + data(erfc, 'erf_data_ipp-erf_data', 0, 2, rtol=6e-15), + data(erf, 'erf_large_data_ipp-erf_large_data', 0, 1), + data(erf, 'erf_large_data_ipp-erf_large_data', 0j, 1), + data(erfc, 'erf_large_data_ipp-erf_large_data', 0, 2, rtol=4e-14), + data(erf, 'erf_small_data_ipp-erf_small_data', 0, 1), + data(erf, 'erf_small_data_ipp-erf_small_data', 0j, 1, rtol=1e-13), + data(erfc, 'erf_small_data_ipp-erf_small_data', 0, 2), + + data(erfinv, 'erf_inv_data_ipp-erf_inv_data', 0, 1), + data(erfcinv, 'erfc_inv_data_ipp-erfc_inv_data', 0, 1), + data(erfcinv, 'erfc_inv_big_data_ipp-erfc_inv_big_data2', 0, 1), + + data(exp1, 'expint_1_data_ipp-expint_1_data', 1, 2, rtol=1e-13), + data(exp1, 'expint_1_data_ipp-expint_1_data', 1j, 2, rtol=5e-9), + data(expi, 'expinti_data_ipp-expinti_data', 0, 1, rtol=1e-13), + data(expi, 'expinti_data_double_ipp-expinti_data_double', 0, 1, rtol=1e-13), + + data(expn, 'expint_small_data_ipp-expint_small_data', (0,1), 2), + data(expn, 'expint_data_ipp-expint_data', (0,1), 2, rtol=1e-14), + + data(gamma, 'test_gamma_data_ipp-near_0', 0, 1), + data(gamma, 'test_gamma_data_ipp-near_1', 0, 1), + data(gamma, 'test_gamma_data_ipp-near_2', 0, 1), + data(gamma, 'test_gamma_data_ipp-near_m10', 0, 1), + data(gamma, 'test_gamma_data_ipp-near_m55', 0, 1, rtol=7e-12), + data(gamma, 'test_gamma_data_ipp-factorials', 0, 1, rtol=4e-14), + data(gamma, 'test_gamma_data_ipp-near_0', 0j, 1, rtol=2e-9), + data(gamma, 'test_gamma_data_ipp-near_1', 0j, 1, rtol=2e-9), + data(gamma, 'test_gamma_data_ipp-near_2', 0j, 1, rtol=2e-9), + data(gamma, 'test_gamma_data_ipp-near_m10', 0j, 1, rtol=2e-9), + data(gamma, 'test_gamma_data_ipp-near_m55', 0j, 1, rtol=2e-9), + data(gamma, 'test_gamma_data_ipp-factorials', 0j, 1, rtol=2e-13), + data(gammaln, 'test_gamma_data_ipp-near_0', 0, 2, rtol=5e-11), + data(gammaln, 'test_gamma_data_ipp-near_1', 0, 2, rtol=5e-11), + data(gammaln, 'test_gamma_data_ipp-near_2', 0, 2, rtol=2e-10), + data(gammaln, 'test_gamma_data_ipp-near_m10', 0, 2, rtol=5e-11), + data(gammaln, 'test_gamma_data_ipp-near_m55', 0, 2, rtol=5e-11), + data(gammaln, 'test_gamma_data_ipp-factorials', 0, 2), + + data(gammainc, 'igamma_small_data_ipp-igamma_small_data', (0,1), 5, rtol=5e-15), + data(gammainc, 'igamma_med_data_ipp-igamma_med_data', (0,1), 5, rtol=2e-13), + data(gammainc, 'igamma_int_data_ipp-igamma_int_data', (0,1), 5, rtol=2e-13), + data(gammainc, 'igamma_big_data_ipp-igamma_big_data', (0,1), 5, rtol=1e-12), + + data(gdtr_, 'igamma_small_data_ipp-igamma_small_data', (0,1), 5, rtol=1e-13), + data(gdtr_, 'igamma_med_data_ipp-igamma_med_data', (0,1), 5, rtol=2e-13), + data(gdtr_, 'igamma_int_data_ipp-igamma_int_data', (0,1), 5, rtol=2e-13), + data(gdtr_, 'igamma_big_data_ipp-igamma_big_data', (0,1), 5, rtol=2e-9), + + data(gammaincc, 'igamma_small_data_ipp-igamma_small_data', (0,1), 3, rtol=1e-13), + data(gammaincc, 'igamma_med_data_ipp-igamma_med_data', (0,1), 3, rtol=2e-13), + data(gammaincc, 'igamma_int_data_ipp-igamma_int_data', (0,1), 3, rtol=4e-14), + data(gammaincc, 'igamma_big_data_ipp-igamma_big_data', (0,1), 3, rtol=1e-11), + + data(gdtrc_, 'igamma_small_data_ipp-igamma_small_data', (0,1), 3, rtol=1e-13), + data(gdtrc_, 'igamma_med_data_ipp-igamma_med_data', (0,1), 3, rtol=2e-13), + data(gdtrc_, 'igamma_int_data_ipp-igamma_int_data', (0,1), 3, rtol=4e-14), + data(gdtrc_, 'igamma_big_data_ipp-igamma_big_data', (0,1), 3, rtol=1e-11), + + data(gdtrib_, 'igamma_inva_data_ipp-igamma_inva_data', (1,0), 2, rtol=5e-9), + data(gdtrib_comp, 'igamma_inva_data_ipp-igamma_inva_data', (1,0), 3, rtol=5e-9), + + data(poch_, 'tgamma_delta_ratio_data_ipp-tgamma_delta_ratio_data', (0,1), 2, rtol=2e-13), + data(poch_, 'tgamma_delta_ratio_int_ipp-tgamma_delta_ratio_int', (0,1), 2,), + data(poch_, 'tgamma_delta_ratio_int2_ipp-tgamma_delta_ratio_int2', (0,1), 2,), + data(poch_minus, 'tgamma_delta_ratio_data_ipp-tgamma_delta_ratio_data', (0,1), 3, rtol=2e-13), + data(poch_minus, 'tgamma_delta_ratio_int_ipp-tgamma_delta_ratio_int', (0,1), 3), + data(poch_minus, 'tgamma_delta_ratio_int2_ipp-tgamma_delta_ratio_int2', (0,1), 3), + + + data(eval_hermite_ld, 'hermite_ipp-hermite', (0,1), 2, rtol=2e-14), + data(eval_laguerre_ld, 'laguerre2_ipp-laguerre2', (0,1), 2, rtol=7e-12), + data(eval_laguerre_dd, 'laguerre2_ipp-laguerre2', (0,1), 2, knownfailure='hyp2f1 insufficiently accurate.'), + data(eval_genlaguerre_ldd, 'laguerre3_ipp-laguerre3', (0,1,2), 3, rtol=2e-13), + data(eval_genlaguerre_ddd, 'laguerre3_ipp-laguerre3', (0,1,2), 3, knownfailure='hyp2f1 insufficiently accurate.'), + + data(log1p, 'log1p_expm1_data_ipp-log1p_expm1_data', 0, 1), + data(expm1, 'log1p_expm1_data_ipp-log1p_expm1_data', 0, 2), + + data(iv, 'bessel_i_data_ipp-bessel_i_data', (0,1), 2, rtol=1e-12), + data(iv, 'bessel_i_data_ipp-bessel_i_data', (0,1j), 2, rtol=2e-10, atol=1e-306), + data(iv, 'bessel_i_int_data_ipp-bessel_i_int_data', (0,1), 2, rtol=1e-9), + data(iv, 'bessel_i_int_data_ipp-bessel_i_int_data', (0,1j), 2, rtol=2e-10), + + data(jn, 'bessel_j_int_data_ipp-bessel_j_int_data', (0,1), 2, rtol=1e-12), + data(jn, 'bessel_j_int_data_ipp-bessel_j_int_data', (0,1j), 2, rtol=1e-12), + data(jn, 'bessel_j_large_data_ipp-bessel_j_large_data', (0,1), 2, rtol=6e-11), + data(jn, 'bessel_j_large_data_ipp-bessel_j_large_data', (0,1j), 2, rtol=6e-11), + + data(jv, 'bessel_j_int_data_ipp-bessel_j_int_data', (0,1), 2, rtol=1e-12), + data(jv, 'bessel_j_int_data_ipp-bessel_j_int_data', (0,1j), 2, rtol=1e-12), + data(jv, 'bessel_j_data_ipp-bessel_j_data', (0,1), 2, rtol=1e-12), + data(jv, 'bessel_j_data_ipp-bessel_j_data', (0,1j), 2, rtol=1e-12), + + data(kn, 'bessel_k_int_data_ipp-bessel_k_int_data', (0,1), 2, rtol=1e-12), + + data(kv, 'bessel_k_int_data_ipp-bessel_k_int_data', (0,1), 2, rtol=1e-12), + data(kv, 'bessel_k_int_data_ipp-bessel_k_int_data', (0,1j), 2, rtol=1e-12), + data(kv, 'bessel_k_data_ipp-bessel_k_data', (0,1), 2, rtol=1e-12), + data(kv, 'bessel_k_data_ipp-bessel_k_data', (0,1j), 2, rtol=1e-12), + + data(yn, 'bessel_y01_data_ipp-bessel_y01_data', (0,1), 2, rtol=1e-12), + data(yn, 'bessel_yn_data_ipp-bessel_yn_data', (0,1), 2, rtol=1e-12), + + data(yv, 'bessel_yn_data_ipp-bessel_yn_data', (0,1), 2, rtol=1e-12), + data(yv, 'bessel_yn_data_ipp-bessel_yn_data', (0,1j), 2, rtol=1e-12), + data(yv, 'bessel_yv_data_ipp-bessel_yv_data', (0,1), 2, rtol=1e-10), + data(yv, 'bessel_yv_data_ipp-bessel_yv_data', (0,1j), 2, rtol=1e-10), + + data(zeta_, 'zeta_data_ipp-zeta_data', 0, 1, param_filter=(lambda s: s > 1)), + data(zeta_, 'zeta_neg_data_ipp-zeta_neg_data', 0, 1, param_filter=(lambda s: s > 1)), + data(zeta_, 'zeta_1_up_data_ipp-zeta_1_up_data', 0, 1, param_filter=(lambda s: s > 1)), + data(zeta_, 'zeta_1_below_data_ipp-zeta_1_below_data', 0, 1, param_filter=(lambda s: s > 1)), + + data(gammaincinv, 'gamma_inv_small_data_ipp-gamma_inv_small_data', (0,1), 2, rtol=1e-11), + data(gammaincinv, 'gamma_inv_data_ipp-gamma_inv_data', (0,1), 2, rtol=1e-14), + data(gammaincinv, 'gamma_inv_big_data_ipp-gamma_inv_big_data', (0,1), 2, rtol=1e-11), + + data(gammainccinv, 'gamma_inv_small_data_ipp-gamma_inv_small_data', (0,1), 3, rtol=1e-12), + data(gammainccinv, 'gamma_inv_data_ipp-gamma_inv_data', (0,1), 3, rtol=1e-14), + data(gammainccinv, 'gamma_inv_big_data_ipp-gamma_inv_big_data', (0,1), 3, rtol=1e-14), + + data(gdtrix_, 'gamma_inv_small_data_ipp-gamma_inv_small_data', (0,1), 2, rtol=3e-13, knownfailure='gdtrix unflow some points'), + data(gdtrix_, 'gamma_inv_data_ipp-gamma_inv_data', (0,1), 2, rtol=3e-15), + data(gdtrix_, 'gamma_inv_big_data_ipp-gamma_inv_big_data', (0,1), 2), + data(gdtrix_comp, 'gamma_inv_small_data_ipp-gamma_inv_small_data', (0,1), 2, knownfailure='gdtrix bad some points'), + data(gdtrix_comp, 'gamma_inv_data_ipp-gamma_inv_data', (0,1), 3, rtol=6e-15), + data(gdtrix_comp, 'gamma_inv_big_data_ipp-gamma_inv_big_data', (0,1), 3), + + data(chndtr, 'nccs_ipp-nccs', (2,0,1), 3, rtol=3e-5), + data(chndtr, 'nccs_big_ipp-nccs_big', (2,0,1), 3, rtol=5e-4, knownfailure='chndtr inaccurate some points'), + + data(sph_harm_, 'spherical_harmonic_ipp-spherical_harmonic', (1,0,3,2), (4,5), rtol=5e-11, + param_filter=(lambda p: np.ones(p.shape, '?'), + lambda p: np.ones(p.shape, '?'), + lambda p: np.logical_and(p < 2*np.pi, p >= 0), + lambda p: np.logical_and(p < np.pi, p >= 0))), + + data(spherical_jn_, 'sph_bessel_data_ipp-sph_bessel_data', (0,1), 2, rtol=1e-13), + data(spherical_yn_, 'sph_neumann_data_ipp-sph_neumann_data', (0,1), 2, rtol=8e-15), + + data(owens_t, 'owenst_data_ipp-owens_t', (0, 1), 2, rtol=5e-14), + data(owens_t, 'owenst_data_ipp-owens_t_alarge', (0, 1), 2, rtol=5e-15), + + # -- not used yet (function does not exist in scipy): + # 'ellint_pi2_data_ipp-ellint_pi2_data', + # 'ellint_pi3_data_ipp-ellint_pi3_data', + # 'ellint_pi3_large_data_ipp-ellint_pi3_large_data', + # 'ellint_rc_data_ipp-ellint_rc_data', + # 'ellint_rd_data_ipp-ellint_rd_data', + # 'ellint_rf_data_ipp-ellint_rf_data', + # 'ellint_rj_data_ipp-ellint_rj_data', + # 'ncbeta_big_ipp-ncbeta_big', + # 'ncbeta_ipp-ncbeta', + # 'powm1_sqrtp1m1_test_cpp-powm1_data', + # 'powm1_sqrtp1m1_test_cpp-sqrtp1m1_data', + # 'test_gamma_data_ipp-gammap1m1_data', + # 'tgamma_ratio_data_ipp-tgamma_ratio_data', +] + + +@pytest.mark.parametrize('test', BOOST_TESTS, ids=repr) +def test_boost(test): + _test_factory(test) + + +GSL_TESTS = [ + data_gsl(mathieu_a, 'mathieu_ab', (0, 1), 2, rtol=1e-13, atol=1e-13), + data_gsl(mathieu_b, 'mathieu_ab', (0, 1), 3, rtol=1e-13, atol=1e-13), + + # Also the GSL output has limited accuracy... + data_gsl(mathieu_ce_rad, 'mathieu_ce_se', (0, 1, 2), 3, rtol=1e-7, atol=1e-13), + data_gsl(mathieu_se_rad, 'mathieu_ce_se', (0, 1, 2), 4, rtol=1e-7, atol=1e-13), + + data_gsl(mathieu_mc1_scaled, 'mathieu_mc_ms', (0, 1, 2), 3, rtol=1e-7, atol=1e-13), + data_gsl(mathieu_ms1_scaled, 'mathieu_mc_ms', (0, 1, 2), 4, rtol=1e-7, atol=1e-13), + + data_gsl(mathieu_mc2_scaled, 'mathieu_mc_ms', (0, 1, 2), 5, rtol=1e-7, atol=1e-13), + data_gsl(mathieu_ms2_scaled, 'mathieu_mc_ms', (0, 1, 2), 6, rtol=1e-7, atol=1e-13), +] + + +@pytest.mark.parametrize('test', GSL_TESTS, ids=repr) +def test_gsl(test): + _test_factory(test) + + +LOCAL_TESTS = [ + data_local(ellipkinc, 'ellipkinc_neg_m', (0, 1), 2), + data_local(ellipkm1, 'ellipkm1', 0, 1), + data_local(ellipeinc, 'ellipeinc_neg_m', (0, 1), 2), + data_local(clog1p, 'log1p_expm1_complex', (0,1), (2,3), rtol=1e-14), + data_local(cexpm1, 'log1p_expm1_complex', (0,1), (4,5), rtol=1e-14), + data_local(gammainc, 'gammainc', (0, 1), 2, rtol=1e-12), + data_local(gammaincc, 'gammaincc', (0, 1), 2, rtol=1e-11), + data_local(ellip_harm_2, 'ellip',(0, 1, 2, 3, 4), 6, rtol=1e-10, atol=1e-13), + data_local(ellip_harm, 'ellip',(0, 1, 2, 3, 4), 5, rtol=1e-10, atol=1e-13), +] + + +@pytest.mark.parametrize('test', LOCAL_TESTS, ids=repr) +def test_local(test): + _test_factory(test) + + +def _test_factory(test, dtype=np.double): + """Boost test""" + with suppress_warnings() as sup: + sup.filter(IntegrationWarning, "The occurrence of roundoff error is detected") + olderr = np.seterr(all='ignore') + try: + test.check(dtype=dtype) + finally: + np.seterr(**olderr) diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_data.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_data.pyc new file mode 100644 index 0000000..aab0c23 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_data.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_digamma.py b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_digamma.py new file mode 100644 index 0000000..ea9e30d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_digamma.py @@ -0,0 +1,44 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy import pi, log, sqrt +from numpy.testing import assert_, assert_equal + +from scipy.special._testutils import FuncData +import scipy.special as sc + +# Euler-Mascheroni constant +euler = 0.57721566490153286 + + +def test_consistency(): + # Make sure the implementation of digamma for real arguments + # agrees with the implementation of digamma for complex arguments. + + # It's all poles after -1e16 + x = np.r_[-np.logspace(15, -30, 200), np.logspace(-30, 300, 200)] + dataset = np.vstack((x + 0j, sc.digamma(x))).T + FuncData(sc.digamma, dataset, 0, 1, rtol=5e-14, nan_ok=True).check() + + +def test_special_values(): + # Test special values from Gauss's digamma theorem. See + # + # https://en.wikipedia.org/wiki/Digamma_function + + dataset = [(1, -euler), + (0.5, -2*log(2) - euler), + (1/3, -pi/(2*sqrt(3)) - 3*log(3)/2 - euler), + (1/4, -pi/2 - 3*log(2) - euler), + (1/6, -pi*sqrt(3)/2 - 2*log(2) - 3*log(3)/2 - euler), + (1/8, -pi/2 - 4*log(2) - (pi + log(2 + sqrt(2)) - log(2 - sqrt(2)))/sqrt(2) - euler)] + + dataset = np.asarray(dataset) + FuncData(sc.digamma, dataset, 0, 1, rtol=1e-14).check() + + +def test_nonfinite(): + pts = [0.0, -0.0, np.inf] + std = [-np.inf, np.inf, np.inf] + assert_equal(sc.digamma(pts), std) + assert_(all(np.isnan(sc.digamma([-np.inf, -1])))) diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_digamma.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_digamma.pyc new file mode 100644 index 0000000..20f017f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_digamma.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_ellip_harm.py b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_ellip_harm.py new file mode 100644 index 0000000..d635b2f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_ellip_harm.py @@ -0,0 +1,273 @@ +# +# Tests for the Ellipsoidal Harmonic Function, +# Distributed under the same license as SciPy itself. +# + +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import (assert_equal, assert_almost_equal, assert_allclose, + assert_) +from scipy._lib._numpy_compat import suppress_warnings +from scipy.special._testutils import assert_func_equal +from scipy.special import ellip_harm, ellip_harm_2, ellip_normal +from scipy.integrate import IntegrationWarning +from numpy import sqrt, pi + + +def test_ellip_potential(): + def change_coefficient(lambda1, mu, nu, h2, k2): + x = sqrt(lambda1**2*mu**2*nu**2/(h2*k2)) + y = sqrt((lambda1**2 - h2)*(mu**2 - h2)*(h2 - nu**2)/(h2*(k2 - h2))) + z = sqrt((lambda1**2 - k2)*(k2 - mu**2)*(k2 - nu**2)/(k2*(k2 - h2))) + return x, y, z + + def solid_int_ellip(lambda1, mu, nu, n, p, h2, k2): + return (ellip_harm(h2, k2, n, p, lambda1)*ellip_harm(h2, k2, n, p, mu) + * ellip_harm(h2, k2, n, p, nu)) + + def solid_int_ellip2(lambda1, mu, nu, n, p, h2, k2): + return (ellip_harm_2(h2, k2, n, p, lambda1) + * ellip_harm(h2, k2, n, p, mu)*ellip_harm(h2, k2, n, p, nu)) + + def summation(lambda1, mu1, nu1, lambda2, mu2, nu2, h2, k2): + tol = 1e-8 + sum1 = 0 + for n in range(20): + xsum = 0 + for p in range(1, 2*n+2): + xsum += (4*pi*(solid_int_ellip(lambda2, mu2, nu2, n, p, h2, k2) + * solid_int_ellip2(lambda1, mu1, nu1, n, p, h2, k2)) / + (ellip_normal(h2, k2, n, p)*(2*n + 1))) + if abs(xsum) < 0.1*tol*abs(sum1): + break + sum1 += xsum + return sum1, xsum + + def potential(lambda1, mu1, nu1, lambda2, mu2, nu2, h2, k2): + x1, y1, z1 = change_coefficient(lambda1, mu1, nu1, h2, k2) + x2, y2, z2 = change_coefficient(lambda2, mu2, nu2, h2, k2) + res = sqrt((x2 - x1)**2 + (y2 - y1)**2 + (z2 - z1)**2) + return 1/res + + pts = [ + (120, sqrt(19), 2, 41, sqrt(17), 2, 15, 25), + (120, sqrt(16), 3.2, 21, sqrt(11), 2.9, 11, 20), + ] + + with suppress_warnings() as sup: + sup.filter(IntegrationWarning, "The occurrence of roundoff error") + sup.filter(IntegrationWarning, "The maximum number of subdivisions") + + for p in pts: + err_msg = repr(p) + exact = potential(*p) + result, last_term = summation(*p) + assert_allclose(exact, result, atol=0, rtol=1e-8, err_msg=err_msg) + assert_(abs(result - exact) < 10*abs(last_term), err_msg) + + +def test_ellip_norm(): + + def G01(h2, k2): + return 4*pi + + def G11(h2, k2): + return 4*pi*h2*k2/3 + + def G12(h2, k2): + return 4*pi*h2*(k2 - h2)/3 + + def G13(h2, k2): + return 4*pi*k2*(k2 - h2)/3 + + def G22(h2, k2): + res = (2*(h2**4 + k2**4) - 4*h2*k2*(h2**2 + k2**2) + 6*h2**2*k2**2 + + sqrt(h2**2 + k2**2 - h2*k2)*(-2*(h2**3 + k2**3) + 3*h2*k2*(h2 + k2))) + return 16*pi/405*res + + def G21(h2, k2): + res = (2*(h2**4 + k2**4) - 4*h2*k2*(h2**2 + k2**2) + 6*h2**2*k2**2 + + sqrt(h2**2 + k2**2 - h2*k2)*(2*(h2**3 + k2**3) - 3*h2*k2*(h2 + k2))) + return 16*pi/405*res + + def G23(h2, k2): + return 4*pi*h2**2*k2*(k2 - h2)/15 + + def G24(h2, k2): + return 4*pi*h2*k2**2*(k2 - h2)/15 + + def G25(h2, k2): + return 4*pi*h2*k2*(k2 - h2)**2/15 + + def G32(h2, k2): + res = (16*(h2**4 + k2**4) - 36*h2*k2*(h2**2 + k2**2) + 46*h2**2*k2**2 + + sqrt(4*(h2**2 + k2**2) - 7*h2*k2)*(-8*(h2**3 + k2**3) + + 11*h2*k2*(h2 + k2))) + return 16*pi/13125*k2*h2*res + + def G31(h2, k2): + res = (16*(h2**4 + k2**4) - 36*h2*k2*(h2**2 + k2**2) + 46*h2**2*k2**2 + + sqrt(4*(h2**2 + k2**2) - 7*h2*k2)*(8*(h2**3 + k2**3) - + 11*h2*k2*(h2 + k2))) + return 16*pi/13125*h2*k2*res + + def G34(h2, k2): + res = (6*h2**4 + 16*k2**4 - 12*h2**3*k2 - 28*h2*k2**3 + 34*h2**2*k2**2 + + sqrt(h2**2 + 4*k2**2 - h2*k2)*(-6*h2**3 - 8*k2**3 + 9*h2**2*k2 + + 13*h2*k2**2)) + return 16*pi/13125*h2*(k2 - h2)*res + + def G33(h2, k2): + res = (6*h2**4 + 16*k2**4 - 12*h2**3*k2 - 28*h2*k2**3 + 34*h2**2*k2**2 + + sqrt(h2**2 + 4*k2**2 - h2*k2)*(6*h2**3 + 8*k2**3 - 9*h2**2*k2 - + 13*h2*k2**2)) + return 16*pi/13125*h2*(k2 - h2)*res + + def G36(h2, k2): + res = (16*h2**4 + 6*k2**4 - 28*h2**3*k2 - 12*h2*k2**3 + 34*h2**2*k2**2 + + sqrt(4*h2**2 + k2**2 - h2*k2)*(-8*h2**3 - 6*k2**3 + 13*h2**2*k2 + + 9*h2*k2**2)) + return 16*pi/13125*k2*(k2 - h2)*res + + def G35(h2, k2): + res = (16*h2**4 + 6*k2**4 - 28*h2**3*k2 - 12*h2*k2**3 + 34*h2**2*k2**2 + + sqrt(4*h2**2 + k2**2 - h2*k2)*(8*h2**3 + 6*k2**3 - 13*h2**2*k2 - + 9*h2*k2**2)) + return 16*pi/13125*k2*(k2 - h2)*res + + def G37(h2, k2): + return 4*pi*h2**2*k2**2*(k2 - h2)**2/105 + + known_funcs = {(0, 1): G01, (1, 1): G11, (1, 2): G12, (1, 3): G13, + (2, 1): G21, (2, 2): G22, (2, 3): G23, (2, 4): G24, + (2, 5): G25, (3, 1): G31, (3, 2): G32, (3, 3): G33, + (3, 4): G34, (3, 5): G35, (3, 6): G36, (3, 7): G37} + + def _ellip_norm(n, p, h2, k2): + func = known_funcs[n, p] + return func(h2, k2) + _ellip_norm = np.vectorize(_ellip_norm) + + def ellip_normal_known(h2, k2, n, p): + return _ellip_norm(n, p, h2, k2) + + # generate both large and small h2 < k2 pairs + np.random.seed(1234) + h2 = np.random.pareto(0.5, size=1) + k2 = h2 * (1 + np.random.pareto(0.5, size=h2.size)) + + points = [] + for n in range(4): + for p in range(1, 2*n+2): + points.append((h2, k2, n*np.ones(h2.size), p*np.ones(h2.size))) + points = np.array(points) + with suppress_warnings() as sup: + sup.filter(IntegrationWarning, "The occurrence of roundoff error") + assert_func_equal(ellip_normal, ellip_normal_known, points, rtol=1e-12) + + +def test_ellip_harm_2(): + + def I1(h2, k2, s): + res = (ellip_harm_2(h2, k2, 1, 1, s)/(3 * ellip_harm(h2, k2, 1, 1, s)) + + ellip_harm_2(h2, k2, 1, 2, s)/(3 * ellip_harm(h2, k2, 1, 2, s)) + + ellip_harm_2(h2, k2, 1, 3, s)/(3 * ellip_harm(h2, k2, 1, 3, s))) + return res + + with suppress_warnings() as sup: + sup.filter(IntegrationWarning, "The occurrence of roundoff error") + assert_almost_equal(I1(5, 8, 10), 1/(10*sqrt((100-5)*(100-8)))) + + # Values produced by code from arXiv:1204.0267 + assert_almost_equal(ellip_harm_2(5, 8, 2, 1, 10), 0.00108056853382) + assert_almost_equal(ellip_harm_2(5, 8, 2, 2, 10), 0.00105820513809) + assert_almost_equal(ellip_harm_2(5, 8, 2, 3, 10), 0.00106058384743) + assert_almost_equal(ellip_harm_2(5, 8, 2, 4, 10), 0.00106774492306) + assert_almost_equal(ellip_harm_2(5, 8, 2, 5, 10), 0.00107976356454) + + +def test_ellip_harm(): + + def E01(h2, k2, s): + return 1 + + def E11(h2, k2, s): + return s + + def E12(h2, k2, s): + return sqrt(abs(s*s - h2)) + + def E13(h2, k2, s): + return sqrt(abs(s*s - k2)) + + def E21(h2, k2, s): + return s*s - 1/3*((h2 + k2) + sqrt(abs((h2 + k2)*(h2 + k2)-3*h2*k2))) + + def E22(h2, k2, s): + return s*s - 1/3*((h2 + k2) - sqrt(abs((h2 + k2)*(h2 + k2)-3*h2*k2))) + + def E23(h2, k2, s): + return s * sqrt(abs(s*s - h2)) + + def E24(h2, k2, s): + return s * sqrt(abs(s*s - k2)) + + def E25(h2, k2, s): + return sqrt(abs((s*s - h2)*(s*s - k2))) + + def E31(h2, k2, s): + return s*s*s - (s/5)*(2*(h2 + k2) + sqrt(4*(h2 + k2)*(h2 + k2) - + 15*h2*k2)) + + def E32(h2, k2, s): + return s*s*s - (s/5)*(2*(h2 + k2) - sqrt(4*(h2 + k2)*(h2 + k2) - + 15*h2*k2)) + + def E33(h2, k2, s): + return sqrt(abs(s*s - h2))*(s*s - 1/5*((h2 + 2*k2) + sqrt(abs((h2 + + 2*k2)*(h2 + 2*k2) - 5*h2*k2)))) + + def E34(h2, k2, s): + return sqrt(abs(s*s - h2))*(s*s - 1/5*((h2 + 2*k2) - sqrt(abs((h2 + + 2*k2)*(h2 + 2*k2) - 5*h2*k2)))) + + def E35(h2, k2, s): + return sqrt(abs(s*s - k2))*(s*s - 1/5*((2*h2 + k2) + sqrt(abs((2*h2 + + k2)*(2*h2 + k2) - 5*h2*k2)))) + + def E36(h2, k2, s): + return sqrt(abs(s*s - k2))*(s*s - 1/5*((2*h2 + k2) - sqrt(abs((2*h2 + + k2)*(2*h2 + k2) - 5*h2*k2)))) + + def E37(h2, k2, s): + return s * sqrt(abs((s*s - h2)*(s*s - k2))) + + assert_equal(ellip_harm(5, 8, 1, 2, 2.5, 1, 1), + ellip_harm(5, 8, 1, 2, 2.5)) + + known_funcs = {(0, 1): E01, (1, 1): E11, (1, 2): E12, (1, 3): E13, + (2, 1): E21, (2, 2): E22, (2, 3): E23, (2, 4): E24, + (2, 5): E25, (3, 1): E31, (3, 2): E32, (3, 3): E33, + (3, 4): E34, (3, 5): E35, (3, 6): E36, (3, 7): E37} + + point_ref = [] + + def ellip_harm_known(h2, k2, n, p, s): + for i in range(h2.size): + func = known_funcs[(int(n[i]), int(p[i]))] + point_ref.append(func(h2[i], k2[i], s[i])) + return point_ref + + np.random.seed(1234) + h2 = np.random.pareto(0.5, size=30) + k2 = h2*(1 + np.random.pareto(0.5, size=h2.size)) + s = np.random.pareto(0.5, size=h2.size) + points = [] + for i in range(h2.size): + for n in range(4): + for p in range(1, 2*n+2): + points.append((h2[i], k2[i], n, p, s[i])) + points = np.array(points) + assert_func_equal(ellip_harm, ellip_harm_known, points, rtol=1e-12) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_ellip_harm.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_ellip_harm.pyc new file mode 100644 index 0000000..592473d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_ellip_harm.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_gammainc.py b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_gammainc.py new file mode 100644 index 0000000..db79e41 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_gammainc.py @@ -0,0 +1,46 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import assert_allclose + +import scipy.special as sc +from scipy.special._testutils import FuncData + + +def test_line(): + # Test on the line a = x where a simpler asymptotic expansion + # (analog of DLMF 8.12.15) is available. + + def gammainc_line(x): + c = np.array([-1/3, -1/540, 25/6048, 101/155520, + -3184811/3695155200, -2745493/8151736420]) + res = 0 + xfac = 1 + for ck in c: + res -= ck*xfac + xfac /= x + res /= np.sqrt(2*np.pi*x) + res += 0.5 + return res + + x = np.logspace(np.log10(25), 300, 500) + a = x.copy() + dataset = np.vstack((a, x, gammainc_line(x))).T + + FuncData(sc.gammainc, dataset, (0, 1), 2, rtol=1e-11).check() + + +def test_gammainc_roundtrip(): + a = np.logspace(-5, 10, 100) + x = np.logspace(-5, 10, 100) + + y = sc.gammaincinv(a, sc.gammainc(a, x)) + assert_allclose(x, y, rtol=1e-10) + + +def test_gammaincc_roundtrip(): + a = np.logspace(-5, 10, 100) + x = np.logspace(-5, 10, 100) + + y = sc.gammainccinv(a, sc.gammaincc(a, x)) + assert_allclose(x, y, rtol=1e-14) diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_gammainc.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_gammainc.pyc new file mode 100644 index 0000000..e34f139 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_gammainc.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_kolmogorov.py b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_kolmogorov.py new file mode 100644 index 0000000..c8b5e89 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_kolmogorov.py @@ -0,0 +1,414 @@ +from __future__ import division, print_function, absolute_import + +import itertools +import sys +import pytest + +import numpy as np +from numpy.testing import assert_ +from scipy.special._testutils import FuncData + +from scipy.special import kolmogorov, kolmogi, smirnov, smirnovi +from scipy.special._ufuncs import (_kolmogc, _kolmogci, _kolmogp, + _smirnovc, _smirnovci, _smirnovp) + +_rtol = 1e-10 + +class TestSmirnov(object): + def test_nan(self): + assert_(np.isnan(smirnov(1, np.nan))) + + def test_basic(self): + dataset = [(1, 0.1, 0.9), + (1, 0.875, 0.125), + (2, 0.875, 0.125 * 0.125), + (3, 0.875, 0.125 * 0.125 * 0.125)] + + dataset = np.asarray(dataset) + FuncData(smirnov, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float]) + dataset[:, -1] = 1 - dataset[:, -1] + FuncData(_smirnovc, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float]) + + def test_x_equals_0(self): + dataset = [(n, 0, 1) for n in itertools.chain(range(2, 20), range(1010, 1020))] + dataset = np.asarray(dataset) + FuncData(smirnov, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float]) + dataset[:, -1] = 1 - dataset[:, -1] + FuncData(_smirnovc, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float]) + + def test_x_equals_1(self): + dataset = [(n, 1, 0) for n in itertools.chain(range(2, 20), range(1010, 1020))] + dataset = np.asarray(dataset) + FuncData(smirnov, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float]) + dataset[:, -1] = 1 - dataset[:, -1] + FuncData(_smirnovc, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float]) + + def test_x_equals_0point5(self): + dataset = [(1, 0.5, 0.5), + (2, 0.5, 0.25), + (3, 0.5, 0.166666666667), + (4, 0.5, 0.09375), + (5, 0.5, 0.056), + (6, 0.5, 0.0327932098765), + (7, 0.5, 0.0191958707681), + (8, 0.5, 0.0112953186035), + (9, 0.5, 0.00661933257355), + (10, 0.5, 0.003888705)] + + dataset = np.asarray(dataset) + FuncData(smirnov, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float]) + dataset[:, -1] = 1 - dataset[:, -1] + FuncData(_smirnovc, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float]) + + def test_n_equals_1(self): + x = np.linspace(0, 1, 101, endpoint=True) + dataset = np.column_stack([[1]*len(x), x, 1-x]) + FuncData(smirnov, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float]) + dataset[:, -1] = 1 - dataset[:, -1] + FuncData(_smirnovc, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float]) + + def test_n_equals_2(self): + x = np.linspace(0.5, 1, 101, endpoint=True) + p = np.power(1-x, 2) + n = np.array([2] * len(x)) + dataset = np.column_stack([n, x, p]) + FuncData(smirnov, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float]) + dataset[:, -1] = 1 - dataset[:, -1] + FuncData(_smirnovc, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float]) + + def test_n_equals_3(self): + x = np.linspace(0.7, 1, 31, endpoint=True) + p = np.power(1-x, 3) + n = np.array([3] * len(x)) + dataset = np.column_stack([n, x, p]) + FuncData(smirnov, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float]) + dataset[:, -1] = 1 - dataset[:, -1] + FuncData(_smirnovc, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float]) + + def test_n_large(self): + # test for large values of n + # Probabilities should go down as n goes up + x = 0.4 + pvals = np.array([smirnov(n, x) for n in range(400, 1100, 20)]) + dfs = np.diff(pvals) + assert_(np.all(dfs <= 0), msg='Not all diffs negative %s' % dfs) + + +class TestSmirnovi(object): + def test_nan(self): + assert_(np.isnan(smirnovi(1, np.nan))) + + def test_basic(self): + dataset = [(1, 0.4, 0.6), + (1, 0.6, 0.4), + (1, 0.99, 0.01), + (1, 0.01, 0.99), + (2, 0.125 * 0.125, 0.875), + (3, 0.125 * 0.125 * 0.125, 0.875), + (10, 1.0 / 16 ** 10, 1 - 1.0 / 16)] + + dataset = np.asarray(dataset) + FuncData(smirnovi, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float]) + dataset[:, 1] = 1 - dataset[:, 1] + FuncData(_smirnovci, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float]) + + def test_x_equals_0(self): + dataset = [(n, 0, 1) for n in itertools.chain(range(2, 20), range(1010, 1020))] + dataset = np.asarray(dataset) + FuncData(smirnovi, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float]) + dataset[:, 1] = 1 - dataset[:, 1] + FuncData(_smirnovci, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float]) + + def test_x_equals_1(self): + dataset = [(n, 1, 0) for n in itertools.chain(range(2, 20), range(1010, 1020))] + dataset = np.asarray(dataset) + FuncData(smirnovi, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float]) + dataset[:, 1] = 1 - dataset[:, 1] + FuncData(_smirnovci, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float]) + + def test_n_equals_1(self): + pp = np.linspace(0, 1, 101, endpoint=True) + # dataset = np.array([(1, p, 1-p) for p in pp]) + dataset = np.column_stack([[1]*len(pp), pp, 1-pp]) + FuncData(smirnovi, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float]) + dataset[:, 1] = 1 - dataset[:, 1] + FuncData(_smirnovci, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float]) + + def test_n_equals_2(self): + x = np.linspace(0.5, 1, 101, endpoint=True) + p = np.power(1-x, 2) + n = np.array([2] * len(x)) + dataset = np.column_stack([n, p, x]) + FuncData(smirnovi, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float]) + dataset[:, 1] = 1 - dataset[:, 1] + FuncData(_smirnovci, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float]) + + def test_n_equals_3(self): + x = np.linspace(0.7, 1, 31, endpoint=True) + p = np.power(1-x, 3) + n = np.array([3] * len(x)) + dataset = np.column_stack([n, p, x]) + FuncData(smirnovi, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float]) + dataset[:, 1] = 1 - dataset[:, 1] + FuncData(_smirnovci, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float]) + + def test_round_trip(self): + def _sm_smi(n, p): + return smirnov(n, smirnovi(n, p)) + + def _smc_smci(n, p): + return _smirnovc(n, _smirnovci(n, p)) + + dataset = [(1, 0.4, 0.4), + (1, 0.6, 0.6), + (2, 0.875, 0.875), + (3, 0.875, 0.875), + (3, 0.125, 0.125), + (10, 0.999, 0.999), + (10, 0.0001, 0.0001)] + + dataset = np.asarray(dataset) + FuncData(_sm_smi, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float]) + FuncData(_smc_smci, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float]) + + def test_x_equals_0point5(self): + dataset = [(1, 0.5, 0.5), + (2, 0.5, 0.366025403784), + (2, 0.25, 0.5), + (3, 0.5, 0.297156508177), + (4, 0.5, 0.255520481121), + (5, 0.5, 0.234559536069), + (6, 0.5, 0.21715965898), + (7, 0.5, 0.202722580034), + (8, 0.5, 0.190621765256), + (9, 0.5, 0.180363501362), + (10, 0.5, 0.17157867006)] + + dataset = np.asarray(dataset) + FuncData(smirnovi, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float]) + dataset[:, 1] = 1 - dataset[:, 1] + FuncData(_smirnovci, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float]) + + +class TestSmirnovp(object): + def test_nan(self): + assert_(np.isnan(_smirnovp(1, np.nan))) + + def test_basic(self): + # Check derivative at endpoints + n1_10 = np.arange(1, 10) + dataset0 = np.column_stack([n1_10, np.full_like(n1_10, 0), np.full_like(n1_10, -1)]) + FuncData(_smirnovp, dataset0, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float]) + + n2_10 = np.arange(2, 10) + dataset1 = np.column_stack([n2_10, np.full_like(n2_10, 1.0), np.full_like(n2_10, 0)]) + FuncData(_smirnovp, dataset1, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float]) + + def test_oneminusoneovern(self): + # Check derivative at x=1-1/n + n = np.arange(1, 20) + x = 1.0/n + xm1 = 1-1.0/n + pp1 = -n * x**(n-1) + pp1 -= (1-np.sign(n-2)**2) * 0.5 # n=2, x=0.5, 1-1/n = 0.5, need to adjust + dataset1 = np.column_stack([n, xm1, pp1]) + FuncData(_smirnovp, dataset1, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float]) + + def test_oneovertwon(self): + # Check derivative at x=1/2n (Discontinuous at x=1/n, so check at x=1/2n) + n = np.arange(1, 20) + x = 1.0/2/n + pp = -(n*x+1) * (1+x)**(n-2) + dataset0 = np.column_stack([n, x, pp]) + FuncData(_smirnovp, dataset0, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float]) + + def test_oneovern(self): + # Check derivative at x=1/n (Discontinuous at x=1/n, hard to tell if x==1/n, only use n=power of 2) + n = 2**np.arange(1, 10) + x = 1.0/n + pp = -(n*x+1) * (1+x)**(n-2) + 0.5 + dataset0 = np.column_stack([n, x, pp]) + FuncData(_smirnovp, dataset0, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float]) + + @pytest.mark.xfail(sys.maxsize <= 2**32, + reason="requires 64-bit platform") + def test_oneovernclose(self): + # Check derivative at x=1/n (Discontinuous at x=1/n, test on either side: x=1/n +/- 2epsilon) + n = np.arange(3, 20) + + x = 1.0/n - 2*np.finfo(float).eps + pp = -(n*x+1) * (1+x)**(n-2) + dataset0 = np.column_stack([n, x, pp]) + FuncData(_smirnovp, dataset0, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float]) + + x = 1.0/n + 2*np.finfo(float).eps + pp = -(n*x+1) * (1+x)**(n-2) + 1 + dataset1 = np.column_stack([n, x, pp]) + FuncData(_smirnovp, dataset1, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float]) + + +class TestKolmogorov(object): + def test_nan(self): + assert_(np.isnan(kolmogorov(np.nan))) + + def test_basic(self): + dataset = [(0, 1.0), + (0.5, 0.96394524366487511), + (0.8275735551899077, 0.5000000000000000), + (1, 0.26999967167735456), + (2, 0.00067092525577969533)] + + dataset = np.asarray(dataset) + FuncData(kolmogorov, dataset, (0,), 1, rtol=_rtol).check() + + def test_linspace(self): + x = np.linspace(0, 2.0, 21) + dataset = [1.0000000000000000, 1.0000000000000000, 0.9999999999994950, + 0.9999906941986655, 0.9971923267772983, 0.9639452436648751, + 0.8642827790506042, 0.7112351950296890, 0.5441424115741981, + 0.3927307079406543, 0.2699996716773546, 0.1777181926064012, + 0.1122496666707249, 0.0680922218447664, 0.0396818795381144, + 0.0222179626165251, 0.0119520432391966, 0.0061774306344441, + 0.0030676213475797, 0.0014636048371873, 0.0006709252557797] + + dataset_c = [0.0000000000000000, 6.609305242245699e-53, 5.050407338670114e-13, + 9.305801334566668e-06, 0.0028076732227017, 0.0360547563351249, + 0.1357172209493958, 0.2887648049703110, 0.4558575884258019, + 0.6072692920593457, 0.7300003283226455, 0.8222818073935988, + 0.8877503333292751, 0.9319077781552336, 0.9603181204618857, + 0.9777820373834749, 0.9880479567608034, 0.9938225693655559, + 0.9969323786524203, 0.9985363951628127, 0.9993290747442203] + + dataset = np.column_stack([x, dataset]) + FuncData(kolmogorov, dataset, (0,), 1, rtol=_rtol).check() + dataset_c = np.column_stack([x, dataset_c]) + FuncData(_kolmogc, dataset_c, (0,), 1, rtol=_rtol).check() + + def test_linspacei(self): + p = np.linspace(0, 1.0, 21, endpoint=True) + dataset = [np.inf, 1.3580986393225507, 1.2238478702170823, + 1.1379465424937751, 1.0727491749396481, 1.0191847202536859, + 0.9730633753323726, 0.9320695842357622, 0.8947644549851197, + 0.8601710725555463, 0.8275735551899077, 0.7964065373291559, + 0.7661855555617682, 0.7364542888171910, 0.7067326523068980, + 0.6764476915028201, 0.6448126061663567, 0.6105590999244391, + 0.5711732651063401, 0.5196103791686224, 0.0000000000000000] + + dataset_c = [0.0000000000000000, 0.5196103791686225, 0.5711732651063401, + 0.6105590999244391, 0.6448126061663567, 0.6764476915028201, + 0.7067326523068980, 0.7364542888171910, 0.7661855555617682, + 0.7964065373291559, 0.8275735551899077, 0.8601710725555463, + 0.8947644549851196, 0.9320695842357622, 0.9730633753323727, + 1.0191847202536859, 1.0727491749396481, 1.1379465424937754, + 1.2238478702170825, 1.3580986393225509, np.inf] + + dataset = np.column_stack([p[1:], dataset[1:]]) + FuncData(kolmogi, dataset, (0,), 1, rtol=_rtol).check() + dataset_c = np.column_stack([p[:-1], dataset_c[:-1]]) + FuncData(_kolmogci, dataset_c, (0,), 1, rtol=_rtol).check() + + def test_smallx(self): + epsilon = 0.1 ** np.arange(1, 14) + x = np.array([0.571173265106, 0.441027698518, 0.374219690278, 0.331392659217, + 0.300820537459, 0.277539353999, 0.259023494805, 0.243829561254, + 0.231063086389, 0.220135543236, 0.210641372041, 0.202290283658, + 0.19487060742]) + + dataset = np.column_stack([x, 1-epsilon]) + FuncData(kolmogorov, dataset, (0,), 1, rtol=_rtol).check() + + def test_round_trip(self): + def _ki_k(_x): + return kolmogi(kolmogorov(_x)) + + def _kci_kc(_x): + return _kolmogci(_kolmogc(_x)) + + x = np.linspace(0.0, 2.0, 21, endpoint=True) + x02 = x[(x == 0) | (x > 0.21)] # Exclude 0.1, 0.2. 0.2 almost makes succeeds, but 0.1 has no chance. + dataset02 = np.column_stack([x02, x02]) + FuncData(_ki_k, dataset02, (0,), 1, rtol=_rtol).check() + + dataset = np.column_stack([x, x]) + FuncData(_kci_kc, dataset, (0,), 1, rtol=_rtol).check() + + +class TestKolmogi(object): + def test_nan(self): + assert_(np.isnan(kolmogi(np.nan))) + + def test_basic(self): + dataset = [(1.0, 0), + (0.96394524366487511, 0.5), + (0.9, 0.571173265106), + (0.5000000000000000, 0.8275735551899077), + (0.26999967167735456, 1), + (0.00067092525577969533, 2)] + + dataset = np.asarray(dataset) + FuncData(kolmogi, dataset, (0,), 1, rtol=_rtol).check() + + def test_smallpcdf(self): + epsilon = 0.5 ** np.arange(1, 55, 3) + # kolmogi(1-p) == _kolmogci(p) if 1-(1-p) == p, but not necessarily otherwise + # Use epsilon s.t. 1-(1-epsilon)) == epsilon, so can use same x-array for both results + + x = np.array([0.8275735551899077, 0.5345255069097583, 0.4320114038786941, + 0.3736868442620478, 0.3345161714909591, 0.3057833329315859, + 0.2835052890528936, 0.2655578150208676, 0.2506869966107999, + 0.2380971058736669, 0.2272549289962079, 0.2177876361600040, + 0.2094254686862041, 0.2019676748836232, 0.1952612948137504, + 0.1891874239646641, 0.1836520225050326, 0.1785795904846466]) + + dataset = np.column_stack([1-epsilon, x]) + FuncData(kolmogi, dataset, (0,), 1, rtol=_rtol).check() + + dataset = np.column_stack([epsilon, x]) + FuncData(_kolmogci, dataset, (0,), 1, rtol=_rtol).check() + + def test_smallpsf(self): + epsilon = 0.5 ** np.arange(1, 55, 3) + # kolmogi(p) == _kolmogci(1-p) if 1-(1-p) == p, but not necessarily otherwise + # Use epsilon s.t. 1-(1-epsilon)) == epsilon, so can use same x-array for both results + + x = np.array([0.8275735551899077, 1.3163786275161036, 1.6651092133663343, + 1.9525136345289607, 2.2027324540033235, 2.4272929437460848, + 2.6327688477341593, 2.8233300509220260, 3.0018183401530627, + 3.1702735084088891, 3.3302184446307912, 3.4828258153113318, + 3.6290214150152051, 3.7695513262825959, 3.9050272690877326, + 4.0359582187082550, 4.1627730557884890, 4.2858371743264527]) + + dataset = np.column_stack([epsilon, x]) + FuncData(kolmogi, dataset, (0,), 1, rtol=_rtol).check() + + dataset = np.column_stack([1-epsilon, x]) + FuncData(_kolmogci, dataset, (0,), 1, rtol=_rtol).check() + + def test_round_trip(self): + def _k_ki(_p): + return kolmogorov(kolmogi(_p)) + + p = np.linspace(0.1, 1.0, 10, endpoint=True) + dataset = np.column_stack([p, p]) + FuncData(_k_ki, dataset, (0,), 1, rtol=_rtol).check() + + +class TestKolmogp(object): + def test_nan(self): + assert_(np.isnan(_kolmogp(np.nan))) + + def test_basic(self): + dataset = [(0.000000, -0.0), + (0.200000, -1.532420541338916e-10), + (0.400000, -0.1012254419260496), + (0.600000, -1.324123244249925), + (0.800000, -1.627024345636592), + (1.000000, -1.071948558356941), + (1.200000, -0.538512430720529), + (1.400000, -0.2222133182429472), + (1.600000, -0.07649302775520538), + (1.800000, -0.02208687346347873), + (2.000000, -0.005367402045629683)] + + dataset = np.asarray(dataset) + FuncData(_kolmogp, dataset, (0,), 1, rtol=_rtol).check() diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_kolmogorov.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_kolmogorov.pyc new file mode 100644 index 0000000..41909e6 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_kolmogorov.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_lambertw.py b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_lambertw.py new file mode 100644 index 0000000..b0845a8 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_lambertw.py @@ -0,0 +1,103 @@ +# +# Tests for the lambertw function, +# Adapted from the MPMath tests [1] by Yosef Meller, mellerf@netvision.net.il +# Distributed under the same license as SciPy itself. +# +# [1] mpmath source code, Subversion revision 992 +# http://code.google.com/p/mpmath/source/browse/trunk/mpmath/tests/test_functions2.py?spec=svn994&r=992 + +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import assert_, assert_equal, assert_array_almost_equal +from scipy.special import lambertw +from numpy import nan, inf, pi, e, isnan, log, r_, array, complex_ + +from scipy.special._testutils import FuncData + + +def test_values(): + assert_(isnan(lambertw(nan))) + assert_equal(lambertw(inf,1).real, inf) + assert_equal(lambertw(inf,1).imag, 2*pi) + assert_equal(lambertw(-inf,1).real, inf) + assert_equal(lambertw(-inf,1).imag, 3*pi) + + assert_equal(lambertw(1.), lambertw(1., 0)) + + data = [ + (0,0, 0), + (0+0j,0, 0), + (inf,0, inf), + (0,-1, -inf), + (0,1, -inf), + (0,3, -inf), + (e,0, 1), + (1,0, 0.567143290409783873), + (-pi/2,0, 1j*pi/2), + (-log(2)/2,0, -log(2)), + (0.25,0, 0.203888354702240164), + (-0.25,0, -0.357402956181388903), + (-1./10000,0, -0.000100010001500266719), + (-0.25,-1, -2.15329236411034965), + (0.25,-1, -3.00899800997004620-4.07652978899159763j), + (-0.25,-1, -2.15329236411034965), + (0.25,1, -3.00899800997004620+4.07652978899159763j), + (-0.25,1, -3.48973228422959210+7.41405453009603664j), + (-4,0, 0.67881197132094523+1.91195078174339937j), + (-4,1, -0.66743107129800988+7.76827456802783084j), + (-4,-1, 0.67881197132094523-1.91195078174339937j), + (1000,0, 5.24960285240159623), + (1000,1, 4.91492239981054535+5.44652615979447070j), + (1000,-1, 4.91492239981054535-5.44652615979447070j), + (1000,5, 3.5010625305312892+29.9614548941181328j), + (3+4j,0, 1.281561806123775878+0.533095222020971071j), + (-0.4+0.4j,0, -0.10396515323290657+0.61899273315171632j), + (3+4j,1, -0.11691092896595324+5.61888039871282334j), + (3+4j,-1, 0.25856740686699742-3.85211668616143559j), + (-0.5,-1, -0.794023632344689368-0.770111750510379110j), + (-1./10000,1, -11.82350837248724344+6.80546081842002101j), + (-1./10000,-1, -11.6671145325663544), + (-1./10000,-2, -11.82350837248724344-6.80546081842002101j), + (-1./100000,4, -14.9186890769540539+26.1856750178782046j), + (-1./100000,5, -15.0931437726379218666+32.5525721210262290086j), + ((2+1j)/10,0, 0.173704503762911669+0.071781336752835511j), + ((2+1j)/10,1, -3.21746028349820063+4.56175438896292539j), + ((2+1j)/10,-1, -3.03781405002993088-3.53946629633505737j), + ((2+1j)/10,4, -4.6878509692773249+23.8313630697683291j), + (-(2+1j)/10,0, -0.226933772515757933-0.164986470020154580j), + (-(2+1j)/10,1, -2.43569517046110001+0.76974067544756289j), + (-(2+1j)/10,-1, -3.54858738151989450-6.91627921869943589j), + (-(2+1j)/10,4, -4.5500846928118151+20.6672982215434637j), + (pi,0, 1.073658194796149172092178407024821347547745350410314531), + + # Former bug in generated branch, + (-0.5+0.002j,0, -0.78917138132659918344 + 0.76743539379990327749j), + (-0.5-0.002j,0, -0.78917138132659918344 - 0.76743539379990327749j), + (-0.448+0.4j,0, -0.11855133765652382241 + 0.66570534313583423116j), + (-0.448-0.4j,0, -0.11855133765652382241 - 0.66570534313583423116j), + ] + data = array(data, dtype=complex_) + + def w(x, y): + return lambertw(x, y.real.astype(int)) + olderr = np.seterr(all='ignore') + try: + FuncData(w, data, (0,1), 2, rtol=1e-10, atol=1e-13).check() + finally: + np.seterr(**olderr) + + +def test_ufunc(): + assert_array_almost_equal( + lambertw(r_[0., e, 1.]), r_[0., 1., 0.567143290409783873]) + + +def test_lambertw_ufunc_loop_selection(): + # see https://github.com/scipy/scipy/issues/4895 + dt = np.dtype(np.complex128) + assert_equal(lambertw(0, 0, 0).dtype, dt) + assert_equal(lambertw([0], 0, 0).dtype, dt) + assert_equal(lambertw(0, [0], 0).dtype, dt) + assert_equal(lambertw(0, 0, [0]).dtype, dt) + assert_equal(lambertw([0], [0], [0]).dtype, dt) diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_lambertw.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_lambertw.pyc new file mode 100644 index 0000000..1a00525 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_lambertw.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_loggamma.py b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_loggamma.py new file mode 100644 index 0000000..c48a987 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_loggamma.py @@ -0,0 +1,72 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import assert_allclose, assert_ + +from scipy.special._testutils import FuncData +from scipy.special import gamma, gammaln, loggamma + + +def test_identities1(): + # test the identity exp(loggamma(z)) = gamma(z) + x = np.array([-99.5, -9.5, -0.5, 0.5, 9.5, 99.5]) + y = x.copy() + x, y = np.meshgrid(x, y) + z = (x + 1J*y).flatten() + dataset = np.vstack((z, gamma(z))).T + + def f(z): + return np.exp(loggamma(z)) + + FuncData(f, dataset, 0, 1, rtol=1e-14, atol=1e-14).check() + + +def test_identities2(): + # test the identity loggamma(z + 1) = log(z) + loggamma(z) + x = np.array([-99.5, -9.5, -0.5, 0.5, 9.5, 99.5]) + y = x.copy() + x, y = np.meshgrid(x, y) + z = (x + 1J*y).flatten() + dataset = np.vstack((z, np.log(z) + loggamma(z))).T + + def f(z): + return loggamma(z + 1) + + FuncData(f, dataset, 0, 1, rtol=1e-14, atol=1e-14).check() + + +def test_complex_dispatch_realpart(): + # Test that the real parts of loggamma and gammaln agree on the + # real axis. + x = np.r_[-np.logspace(10, -10), np.logspace(-10, 10)] + 0.5 + + dataset = np.vstack((x, gammaln(x))).T + + def f(z): + z = np.array(z, dtype='complex128') + return loggamma(z).real + + FuncData(f, dataset, 0, 1, rtol=1e-14, atol=1e-14).check() + + +def test_real_dispatch(): + x = np.logspace(-10, 10) + 0.5 + dataset = np.vstack((x, gammaln(x))).T + + FuncData(loggamma, dataset, 0, 1, rtol=1e-14, atol=1e-14).check() + assert_(loggamma(0) == np.inf) + assert_(np.isnan(loggamma(-1))) + + +def test_gh_6536(): + z = loggamma(complex(-3.4, +0.0)) + zbar = loggamma(complex(-3.4, -0.0)) + assert_allclose(z, zbar.conjugate(), rtol=1e-15, atol=0) + + +def test_branch_cut(): + # Make sure negative zero is treated correctly + x = -np.logspace(300, -30, 100) + z = np.asarray([complex(x0, 0.0) for x0 in x]) + zbar = np.asarray([complex(x0, -0.0) for x0 in x]) + assert_allclose(z, zbar.conjugate(), rtol=1e-15, atol=0) diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_loggamma.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_loggamma.pyc new file mode 100644 index 0000000..fcd2eeb Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_loggamma.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_logit.py b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_logit.py new file mode 100644 index 0000000..5d6ea00 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_logit.py @@ -0,0 +1,81 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import (assert_equal, assert_almost_equal, + assert_allclose) +from scipy.special import logit, expit + + +class TestLogit(object): + def check_logit_out(self, dtype, expected): + a = np.linspace(0,1,10) + a = np.array(a, dtype=dtype) + olderr = np.seterr(divide='ignore') + try: + actual = logit(a) + finally: + np.seterr(**olderr) + + assert_almost_equal(actual, expected) + + assert_equal(actual.dtype, np.dtype(dtype)) + + def test_float32(self): + expected = np.array([-np.inf, -2.07944155, + -1.25276291, -0.69314718, + -0.22314353, 0.22314365, + 0.6931473, 1.25276303, + 2.07944155, np.inf], dtype=np.float32) + self.check_logit_out('f4', expected) + + def test_float64(self): + expected = np.array([-np.inf, -2.07944154, + -1.25276297, -0.69314718, + -0.22314355, 0.22314355, + 0.69314718, 1.25276297, + 2.07944154, np.inf]) + self.check_logit_out('f8', expected) + + def test_nan(self): + expected = np.array([np.nan]*4) + olderr = np.seterr(invalid='ignore') + try: + actual = logit(np.array([-3., -2., 2., 3.])) + finally: + np.seterr(**olderr) + + assert_equal(expected, actual) + + +class TestExpit(object): + def check_expit_out(self, dtype, expected): + a = np.linspace(-4,4,10) + a = np.array(a, dtype=dtype) + actual = expit(a) + assert_almost_equal(actual, expected) + assert_equal(actual.dtype, np.dtype(dtype)) + + def test_float32(self): + expected = np.array([0.01798621, 0.04265125, + 0.09777259, 0.20860852, + 0.39068246, 0.60931754, + 0.79139149, 0.9022274, + 0.95734876, 0.98201376], dtype=np.float32) + self.check_expit_out('f4',expected) + + def test_float64(self): + expected = np.array([0.01798621, 0.04265125, + 0.0977726, 0.20860853, + 0.39068246, 0.60931754, + 0.79139147, 0.9022274, + 0.95734875, 0.98201379]) + self.check_expit_out('f8', expected) + + def test_large(self): + for dtype in (np.float32, np.float64, np.longdouble): + for n in (88, 89, 709, 710, 11356, 11357): + n = np.array(n, dtype=dtype) + assert_allclose(expit(n), 1.0, atol=1e-20) + assert_allclose(expit(-n), 0.0, atol=1e-20) + assert_equal(expit(n).dtype, dtype) + assert_equal(expit(-n).dtype, dtype) diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_logit.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_logit.pyc new file mode 100644 index 0000000..31ebddd Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_logit.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_logsumexp.py b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_logsumexp.py new file mode 100644 index 0000000..ae7fcc2 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_logsumexp.py @@ -0,0 +1,196 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import (assert_almost_equal, assert_equal, assert_allclose, + assert_array_almost_equal, assert_) + +from scipy.special import logsumexp, softmax + + +def test_logsumexp(): + # Test whether logsumexp() function correctly handles large inputs. + a = np.arange(200) + desired = np.log(np.sum(np.exp(a))) + assert_almost_equal(logsumexp(a), desired) + + # Now test with large numbers + b = [1000, 1000] + desired = 1000.0 + np.log(2.0) + assert_almost_equal(logsumexp(b), desired) + + n = 1000 + b = np.full(n, 10000, dtype='float64') + desired = 10000.0 + np.log(n) + assert_almost_equal(logsumexp(b), desired) + + x = np.array([1e-40] * 1000000) + logx = np.log(x) + + X = np.vstack([x, x]) + logX = np.vstack([logx, logx]) + assert_array_almost_equal(np.exp(logsumexp(logX)), X.sum()) + assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0)) + assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1)) + + # Handling special values properly + assert_equal(logsumexp(np.inf), np.inf) + assert_equal(logsumexp(-np.inf), -np.inf) + assert_equal(logsumexp(np.nan), np.nan) + assert_equal(logsumexp([-np.inf, -np.inf]), -np.inf) + + # Handling an array with different magnitudes on the axes + assert_array_almost_equal(logsumexp([[1e10, 1e-10], + [-1e10, -np.inf]], axis=-1), + [1e10, -1e10]) + + # Test keeping dimensions + assert_array_almost_equal(logsumexp([[1e10, 1e-10], + [-1e10, -np.inf]], + axis=-1, + keepdims=True), + [[1e10], [-1e10]]) + + # Test multiple axes + assert_array_almost_equal(logsumexp([[1e10, 1e-10], + [-1e10, -np.inf]], + axis=(-1,-2)), + 1e10) + + +def test_logsumexp_b(): + a = np.arange(200) + b = np.arange(200, 0, -1) + desired = np.log(np.sum(b*np.exp(a))) + assert_almost_equal(logsumexp(a, b=b), desired) + + a = [1000, 1000] + b = [1.2, 1.2] + desired = 1000 + np.log(2 * 1.2) + assert_almost_equal(logsumexp(a, b=b), desired) + + x = np.array([1e-40] * 100000) + b = np.linspace(1, 1000, 100000) + logx = np.log(x) + + X = np.vstack((x, x)) + logX = np.vstack((logx, logx)) + B = np.vstack((b, b)) + assert_array_almost_equal(np.exp(logsumexp(logX, b=B)), (B * X).sum()) + assert_array_almost_equal(np.exp(logsumexp(logX, b=B, axis=0)), + (B * X).sum(axis=0)) + assert_array_almost_equal(np.exp(logsumexp(logX, b=B, axis=1)), + (B * X).sum(axis=1)) + + +def test_logsumexp_sign(): + a = [1,1,1] + b = [1,-1,-1] + + r, s = logsumexp(a, b=b, return_sign=True) + assert_almost_equal(r,1) + assert_equal(s,-1) + + +def test_logsumexp_sign_zero(): + a = [1,1] + b = [1,-1] + + r, s = logsumexp(a, b=b, return_sign=True) + assert_(not np.isfinite(r)) + assert_(not np.isnan(r)) + assert_(r < 0) + assert_equal(s,0) + + +def test_logsumexp_sign_shape(): + a = np.ones((1,2,3,4)) + b = np.ones_like(a) + + r, s = logsumexp(a, axis=2, b=b, return_sign=True) + + assert_equal(r.shape, s.shape) + assert_equal(r.shape, (1,2,4)) + + r, s = logsumexp(a, axis=(1,3), b=b, return_sign=True) + + assert_equal(r.shape, s.shape) + assert_equal(r.shape, (1,3)) + + +def test_logsumexp_shape(): + a = np.ones((1, 2, 3, 4)) + b = np.ones_like(a) + + r = logsumexp(a, axis=2, b=b) + assert_equal(r.shape, (1, 2, 4)) + + r = logsumexp(a, axis=(1, 3), b=b) + assert_equal(r.shape, (1, 3)) + + +def test_logsumexp_b_zero(): + a = [1,10000] + b = [1,0] + + assert_almost_equal(logsumexp(a, b=b), 1) + + +def test_logsumexp_b_shape(): + a = np.zeros((4,1,2,1)) + b = np.ones((3,1,5)) + + logsumexp(a, b=b) + + +def test_softmax_fixtures(): + assert_allclose(softmax([1000, 0, 0, 0]), np.array([1, 0, 0, 0]), + rtol=1e-13) + assert_allclose(softmax([1, 1]), np.array([.5, .5]), rtol=1e-13) + assert_allclose(softmax([0, 1]), np.array([1, np.e])/(1 + np.e), + rtol=1e-13) + + # Expected value computed using mpmath (with mpmath.mp.dps = 200) and then + # converted to float. + x = np.arange(4) + expected = np.array([0.03205860328008499, + 0.08714431874203256, + 0.23688281808991013, + 0.6439142598879722]) + + assert_allclose(softmax(x), expected, rtol=1e-13) + + # Translation property. If all the values are changed by the same amount, + # the softmax result does not change. + assert_allclose(softmax(x + 100), expected, rtol=1e-13) + + # When axis=None, softmax operates on the entire array, and preserves + # the shape. + assert_allclose(softmax(x.reshape(2, 2)), expected.reshape(2, 2), + rtol=1e-13) + + +def test_softmax_multi_axes(): + assert_allclose(softmax([[1000, 0], [1000, 0]], axis=0), + np.array([[.5, .5], [.5, .5]]), rtol=1e-13) + assert_allclose(softmax([[1000, 0], [1000, 0]], axis=1), + np.array([[1, 0], [1, 0]]), rtol=1e-13) + + # Expected value computed using mpmath (with mpmath.mp.dps = 200) and then + # converted to float. + x = np.array([[-25, 0, 25, 50], + [1, 325, 749, 750]]) + expected = np.array([[2.678636961770877e-33, + 1.9287498479371314e-22, + 1.3887943864771144e-11, + 0.999999999986112], + [0.0, + 1.9444526359919372e-185, + 0.2689414213699951, + 0.7310585786300048]]) + assert_allclose(softmax(x, axis=1), expected, rtol=1e-13) + assert_allclose(softmax(x.T, axis=0), expected.T, rtol=1e-13) + + # 3-d input, with a tuple for the axis. + x3d = x.reshape(2, 2, 2) + assert_allclose(softmax(x3d, axis=(1, 2)), expected.reshape(2, 2, 2), + rtol=1e-13) diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_logsumexp.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_logsumexp.pyc new file mode 100644 index 0000000..7aaf156 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_logsumexp.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_mpmath.py b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_mpmath.py new file mode 100644 index 0000000..ef56c1e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_mpmath.py @@ -0,0 +1,2023 @@ +""" +Test Scipy functions versus mpmath, if available. + +""" +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import assert_, assert_allclose +from numpy import pi +import pytest +import itertools + +from distutils.version import LooseVersion + +import scipy.special as sc +from scipy._lib.six import with_metaclass +from scipy.special._testutils import ( + MissingModule, check_version, FuncData, + assert_func_equal) +from scipy.special._mptestutils import ( + Arg, FixedArg, ComplexArg, IntArg, assert_mpmath_equal, + nonfunctional_tooslow, trace_args, time_limited, exception_to_nan, + inf_to_nan) +from scipy.special._ufuncs import ( + _sinpi, _cospi, _lgam1p, _lanczos_sum_expg_scaled, _log1pmx, + _igam_fac) + +try: + import mpmath +except ImportError: + mpmath = MissingModule('mpmath') + + +_is_32bit_platform = np.intp(0).itemsize < 8 + + +# ------------------------------------------------------------------------------ +# expi +# ------------------------------------------------------------------------------ + +@check_version(mpmath, '0.10') +def test_expi_complex(): + dataset = [] + for r in np.logspace(-99, 2, 10): + for p in np.linspace(0, 2*np.pi, 30): + z = r*np.exp(1j*p) + dataset.append((z, complex(mpmath.ei(z)))) + dataset = np.array(dataset, dtype=np.complex_) + + FuncData(sc.expi, dataset, 0, 1).check() + + +# ------------------------------------------------------------------------------ +# expn +# ------------------------------------------------------------------------------ + +@check_version(mpmath, '0.19') +def test_expn_large_n(): + # Test the transition to the asymptotic regime of n. + dataset = [] + for n in [50, 51]: + for x in np.logspace(0, 4, 200): + with mpmath.workdps(100): + dataset.append((n, x, float(mpmath.expint(n, x)))) + dataset = np.asarray(dataset) + + FuncData(sc.expn, dataset, (0, 1), 2, rtol=1e-13).check() + +# ------------------------------------------------------------------------------ +# hyp0f1 +# ------------------------------------------------------------------------------ + + +@check_version(mpmath, '0.19') +def test_hyp0f1_gh5764(): + # Do a small and somewhat systematic test that runs quickly + dataset = [] + axis = [-99.5, -9.5, -0.5, 0.5, 9.5, 99.5] + for v in axis: + for x in axis: + for y in axis: + z = x + 1j*y + # mpmath computes the answer correctly at dps ~ 17 but + # fails for 20 < dps < 120 (uses a different method); + # set the dps high enough that this isn't an issue + with mpmath.workdps(120): + res = complex(mpmath.hyp0f1(v, z)) + dataset.append((v, z, res)) + dataset = np.array(dataset) + + FuncData(lambda v, z: sc.hyp0f1(v.real, z), dataset, (0, 1), 2, + rtol=1e-13).check() + + +@check_version(mpmath, '0.19') +def test_hyp0f1_gh_1609(): + # this is a regression test for gh-1609 + vv = np.linspace(150, 180, 21) + af = sc.hyp0f1(vv, 0.5) + mf = np.array([mpmath.hyp0f1(v, 0.5) for v in vv]) + assert_allclose(af, mf.astype(float), rtol=1e-12) + + +# ------------------------------------------------------------------------------ +# hyp2f1 +# ------------------------------------------------------------------------------ + +@check_version(mpmath, '1.0.0') +def test_hyp2f1_strange_points(): + pts = [ + (2, -1, -1, 0.7), # expected: 2.4 + (2, -2, -2, 0.7), # expected: 3.87 + ] + pts += list(itertools.product([2, 1, -0.7, -1000], repeat=4)) + pts = [ + (a, b, c, x) for a, b, c, x in pts + if b == c and round(b) == b and b < 0 and b != -1000 + ] + kw = dict(eliminate=True) + dataset = [p + (float(mpmath.hyp2f1(*p, **kw)),) for p in pts] + dataset = np.array(dataset, dtype=np.float_) + + FuncData(sc.hyp2f1, dataset, (0,1,2,3), 4, rtol=1e-10).check() + + +@check_version(mpmath, '0.13') +def test_hyp2f1_real_some_points(): + pts = [ + (1, 2, 3, 0), + (1./3, 2./3, 5./6, 27./32), + (1./4, 1./2, 3./4, 80./81), + (2,-2, -3, 3), + (2, -3, -2, 3), + (2, -1.5, -1.5, 3), + (1, 2, 3, 0), + (0.7235, -1, -5, 0.3), + (0.25, 1./3, 2, 0.999), + (0.25, 1./3, 2, -1), + (2, 3, 5, 0.99), + (3./2, -0.5, 3, 0.99), + (2, 2.5, -3.25, 0.999), + (-8, 18.016500331508873, 10.805295997850628, 0.90875647507000001), + (-10, 900, -10.5, 0.99), + (-10, 900, 10.5, 0.99), + (-1, 2, 1, 1.0), + (-1, 2, 1, -1.0), + (-3, 13, 5, 1.0), + (-3, 13, 5, -1.0), + (0.5, 1 - 270.5, 1.5, 0.999**2), # from issue 1561 + ] + dataset = [p + (float(mpmath.hyp2f1(*p)),) for p in pts] + dataset = np.array(dataset, dtype=np.float_) + + olderr = np.seterr(invalid='ignore') + try: + FuncData(sc.hyp2f1, dataset, (0,1,2,3), 4, rtol=1e-10).check() + finally: + np.seterr(**olderr) + + +@check_version(mpmath, '0.14') +def test_hyp2f1_some_points_2(): + # Taken from mpmath unit tests -- this point failed for mpmath 0.13 but + # was fixed in their SVN since then + pts = [ + (112, (51,10), (-9,10), -0.99999), + (10,-900,10.5,0.99), + (10,-900,-10.5,0.99), + ] + + def fev(x): + if isinstance(x, tuple): + return float(x[0]) / x[1] + else: + return x + + dataset = [tuple(map(fev, p)) + (float(mpmath.hyp2f1(*p)),) for p in pts] + dataset = np.array(dataset, dtype=np.float_) + + FuncData(sc.hyp2f1, dataset, (0,1,2,3), 4, rtol=1e-10).check() + + +@check_version(mpmath, '0.13') +def test_hyp2f1_real_some(): + dataset = [] + for a in [-10, -5, -1.8, 1.8, 5, 10]: + for b in [-2.5, -1, 1, 7.4]: + for c in [-9, -1.8, 5, 20.4]: + for z in [-10, -1.01, -0.99, 0, 0.6, 0.95, 1.5, 10]: + try: + v = float(mpmath.hyp2f1(a, b, c, z)) + except Exception: + continue + dataset.append((a, b, c, z, v)) + dataset = np.array(dataset, dtype=np.float_) + + olderr = np.seterr(invalid='ignore') + try: + FuncData(sc.hyp2f1, dataset, (0,1,2,3), 4, rtol=1e-9, + ignore_inf_sign=True).check() + finally: + np.seterr(**olderr) + + +@check_version(mpmath, '0.12') +@pytest.mark.slow +def test_hyp2f1_real_random(): + npoints = 500 + dataset = np.zeros((npoints, 5), np.float_) + + np.random.seed(1234) + dataset[:, 0] = np.random.pareto(1.5, npoints) + dataset[:, 1] = np.random.pareto(1.5, npoints) + dataset[:, 2] = np.random.pareto(1.5, npoints) + dataset[:, 3] = 2*np.random.rand(npoints) - 1 + + dataset[:, 0] *= (-1)**np.random.randint(2, npoints) + dataset[:, 1] *= (-1)**np.random.randint(2, npoints) + dataset[:, 2] *= (-1)**np.random.randint(2, npoints) + + for ds in dataset: + if mpmath.__version__ < '0.14': + # mpmath < 0.14 fails for c too much smaller than a, b + if abs(ds[:2]).max() > abs(ds[2]): + ds[2] = abs(ds[:2]).max() + ds[4] = float(mpmath.hyp2f1(*tuple(ds[:4]))) + + FuncData(sc.hyp2f1, dataset, (0, 1, 2, 3), 4, rtol=1e-9).check() + +# ------------------------------------------------------------------------------ +# erf (complex) +# ------------------------------------------------------------------------------ + +@check_version(mpmath, '0.14') +def test_erf_complex(): + # need to increase mpmath precision for this test + old_dps, old_prec = mpmath.mp.dps, mpmath.mp.prec + try: + mpmath.mp.dps = 70 + x1, y1 = np.meshgrid(np.linspace(-10, 1, 31), np.linspace(-10, 1, 11)) + x2, y2 = np.meshgrid(np.logspace(-80, .8, 31), np.logspace(-80, .8, 11)) + points = np.r_[x1.ravel(),x2.ravel()] + 1j*np.r_[y1.ravel(), y2.ravel()] + + assert_func_equal(sc.erf, lambda x: complex(mpmath.erf(x)), points, + vectorized=False, rtol=1e-13) + assert_func_equal(sc.erfc, lambda x: complex(mpmath.erfc(x)), points, + vectorized=False, rtol=1e-13) + finally: + mpmath.mp.dps, mpmath.mp.prec = old_dps, old_prec + + +# ------------------------------------------------------------------------------ +# lpmv +# ------------------------------------------------------------------------------ + +@check_version(mpmath, '0.15') +def test_lpmv(): + pts = [] + for x in [-0.99, -0.557, 1e-6, 0.132, 1]: + pts.extend([ + (1, 1, x), + (1, -1, x), + (-1, 1, x), + (-1, -2, x), + (1, 1.7, x), + (1, -1.7, x), + (-1, 1.7, x), + (-1, -2.7, x), + (1, 10, x), + (1, 11, x), + (3, 8, x), + (5, 11, x), + (-3, 8, x), + (-5, 11, x), + (3, -8, x), + (5, -11, x), + (-3, -8, x), + (-5, -11, x), + (3, 8.3, x), + (5, 11.3, x), + (-3, 8.3, x), + (-5, 11.3, x), + (3, -8.3, x), + (5, -11.3, x), + (-3, -8.3, x), + (-5, -11.3, x), + ]) + + def mplegenp(nu, mu, x): + if mu == int(mu) and x == 1: + # mpmath 0.17 gets this wrong + if mu == 0: + return 1 + else: + return 0 + return mpmath.legenp(nu, mu, x) + + dataset = [p + (mplegenp(p[1], p[0], p[2]),) for p in pts] + dataset = np.array(dataset, dtype=np.float_) + + def evf(mu, nu, x): + return sc.lpmv(mu.astype(int), nu, x) + + olderr = np.seterr(invalid='ignore') + try: + FuncData(evf, dataset, (0,1,2), 3, rtol=1e-10, atol=1e-14).check() + finally: + np.seterr(**olderr) + + +# ------------------------------------------------------------------------------ +# beta +# ------------------------------------------------------------------------------ + +@check_version(mpmath, '0.15') +def test_beta(): + np.random.seed(1234) + + b = np.r_[np.logspace(-200, 200, 4), + np.logspace(-10, 10, 4), + np.logspace(-1, 1, 4), + np.arange(-10, 11, 1), + np.arange(-10, 11, 1) + 0.5, + -1, -2.3, -3, -100.3, -10003.4] + a = b + + ab = np.array(np.broadcast_arrays(a[:,None], b[None,:])).reshape(2, -1).T + + old_dps, old_prec = mpmath.mp.dps, mpmath.mp.prec + try: + mpmath.mp.dps = 400 + + assert_func_equal(sc.beta, + lambda a, b: float(mpmath.beta(a, b)), + ab, + vectorized=False, + rtol=1e-10, + ignore_inf_sign=True) + + assert_func_equal( + sc.betaln, + lambda a, b: float(mpmath.log(abs(mpmath.beta(a, b)))), + ab, + vectorized=False, + rtol=1e-10) + finally: + mpmath.mp.dps, mpmath.mp.prec = old_dps, old_prec + + +# ------------------------------------------------------------------------------ +# loggamma +# ------------------------------------------------------------------------------ + +LOGGAMMA_TAYLOR_RADIUS = 0.2 + + +@check_version(mpmath, '0.19') +def test_loggamma_taylor_transition(): + # Make sure there isn't a big jump in accuracy when we move from + # using the Taylor series to using the recurrence relation. + + r = LOGGAMMA_TAYLOR_RADIUS + np.array([-0.1, -0.01, 0, 0.01, 0.1]) + theta = np.linspace(0, 2*np.pi, 20) + r, theta = np.meshgrid(r, theta) + dz = r*np.exp(1j*theta) + z = np.r_[1 + dz, 2 + dz].flatten() + + dataset = [] + for z0 in z: + dataset.append((z0, complex(mpmath.loggamma(z0)))) + dataset = np.array(dataset) + + FuncData(sc.loggamma, dataset, 0, 1, rtol=5e-14).check() + + +@check_version(mpmath, '0.19') +def test_loggamma_taylor(): + # Test around the zeros at z = 1, 2. + + r = np.logspace(-16, np.log10(LOGGAMMA_TAYLOR_RADIUS), 10) + theta = np.linspace(0, 2*np.pi, 20) + r, theta = np.meshgrid(r, theta) + dz = r*np.exp(1j*theta) + z = np.r_[1 + dz, 2 + dz].flatten() + + dataset = [] + for z0 in z: + dataset.append((z0, complex(mpmath.loggamma(z0)))) + dataset = np.array(dataset) + + FuncData(sc.loggamma, dataset, 0, 1, rtol=5e-14).check() + + +# ------------------------------------------------------------------------------ +# rgamma +# ------------------------------------------------------------------------------ + +@check_version(mpmath, '0.19') +@pytest.mark.slow +def test_rgamma_zeros(): + # Test around the zeros at z = 0, -1, -2, ..., -169. (After -169 we + # get values that are out of floating point range even when we're + # within 0.1 of the zero.) + + # Can't use too many points here or the test takes forever. + dx = np.r_[-np.logspace(-1, -13, 3), 0, np.logspace(-13, -1, 3)] + dy = dx.copy() + dx, dy = np.meshgrid(dx, dy) + dz = dx + 1j*dy + zeros = np.arange(0, -170, -1).reshape(1, 1, -1) + z = (zeros + np.dstack((dz,)*zeros.size)).flatten() + dataset = [] + with mpmath.workdps(100): + for z0 in z: + dataset.append((z0, complex(mpmath.rgamma(z0)))) + + dataset = np.array(dataset) + FuncData(sc.rgamma, dataset, 0, 1, rtol=1e-12).check() + + +# ------------------------------------------------------------------------------ +# digamma +# ------------------------------------------------------------------------------ + +@check_version(mpmath, '0.19') +@pytest.mark.slow +def test_digamma_roots(): + # Test the special-cased roots for digamma. + root = mpmath.findroot(mpmath.digamma, 1.5) + roots = [float(root)] + root = mpmath.findroot(mpmath.digamma, -0.5) + roots.append(float(root)) + roots = np.array(roots) + + # If we test beyond a radius of 0.24 mpmath will take forever. + dx = np.r_[-0.24, -np.logspace(-1, -15, 10), 0, np.logspace(-15, -1, 10), 0.24] + dy = dx.copy() + dx, dy = np.meshgrid(dx, dy) + dz = dx + 1j*dy + z = (roots + np.dstack((dz,)*roots.size)).flatten() + dataset = [] + with mpmath.workdps(30): + for z0 in z: + dataset.append((z0, complex(mpmath.digamma(z0)))) + + dataset = np.array(dataset) + FuncData(sc.digamma, dataset, 0, 1, rtol=1e-14).check() + + +@check_version(mpmath, '0.19') +def test_digamma_negreal(): + # Test digamma around the negative real axis. Don't do this in + # TestSystematic because the points need some jiggering so that + # mpmath doesn't take forever. + + digamma = exception_to_nan(mpmath.digamma) + + x = -np.logspace(300, -30, 100) + y = np.r_[-np.logspace(0, -3, 5), 0, np.logspace(-3, 0, 5)] + x, y = np.meshgrid(x, y) + z = (x + 1j*y).flatten() + + dataset = [] + with mpmath.workdps(40): + for z0 in z: + res = digamma(z0) + dataset.append((z0, complex(res))) + dataset = np.asarray(dataset) + + FuncData(sc.digamma, dataset, 0, 1, rtol=1e-13).check() + + +@check_version(mpmath, '0.19') +def test_digamma_boundary(): + # Check that there isn't a jump in accuracy when we switch from + # using the asymptotic series to the reflection formula. + + x = -np.logspace(300, -30, 100) + y = np.array([-6.1, -5.9, 5.9, 6.1]) + x, y = np.meshgrid(x, y) + z = (x + 1j*y).flatten() + + dataset = [] + with mpmath.workdps(30): + for z0 in z: + res = mpmath.digamma(z0) + dataset.append((z0, complex(res))) + dataset = np.asarray(dataset) + + FuncData(sc.digamma, dataset, 0, 1, rtol=1e-13).check() + + +# ------------------------------------------------------------------------------ +# gammainc +# ------------------------------------------------------------------------------ + +@check_version(mpmath, '0.19') +@pytest.mark.slow +def test_gammainc_boundary(): + # Test the transition to the asymptotic series. + small = 20 + a = np.linspace(0.5*small, 2*small, 50) + x = a.copy() + a, x = np.meshgrid(a, x) + a, x = a.flatten(), x.flatten() + dataset = [] + with mpmath.workdps(100): + for a0, x0 in zip(a, x): + dataset.append((a0, x0, float(mpmath.gammainc(a0, b=x0, regularized=True)))) + dataset = np.array(dataset) + + FuncData(sc.gammainc, dataset, (0, 1), 2, rtol=1e-12).check() + + +# ------------------------------------------------------------------------------ +# spence +# ------------------------------------------------------------------------------ + +@check_version(mpmath, '0.19') +@pytest.mark.slow +def test_spence_circle(): + # The trickiest region for spence is around the circle |z - 1| = 1, + # so test that region carefully. + + def spence(z): + return complex(mpmath.polylog(2, 1 - z)) + + r = np.linspace(0.5, 1.5) + theta = np.linspace(0, 2*pi) + z = (1 + np.outer(r, np.exp(1j*theta))).flatten() + dataset = [] + for z0 in z: + dataset.append((z0, spence(z0))) + + dataset = np.array(dataset) + FuncData(sc.spence, dataset, 0, 1, rtol=1e-14).check() + + +# ------------------------------------------------------------------------------ +# sinpi and cospi +# ------------------------------------------------------------------------------ + +@check_version(mpmath, '0.19') +def test_sinpi_zeros(): + eps = np.finfo(float).eps + dx = np.r_[-np.logspace(0, -13, 3), 0, np.logspace(-13, 0, 3)] + dy = dx.copy() + dx, dy = np.meshgrid(dx, dy) + dz = dx + 1j*dy + zeros = np.arange(-100, 100, 1).reshape(1, 1, -1) + z = (zeros + np.dstack((dz,)*zeros.size)).flatten() + dataset = [] + for z0 in z: + dataset.append((z0, complex(mpmath.sinpi(z0)))) + + dataset = np.array(dataset) + FuncData(_sinpi, dataset, 0, 1, rtol=2*eps).check() + + +@check_version(mpmath, '0.19') +def test_cospi_zeros(): + eps = np.finfo(float).eps + dx = np.r_[-np.logspace(0, -13, 3), 0, np.logspace(-13, 0, 3)] + dy = dx.copy() + dx, dy = np.meshgrid(dx, dy) + dz = dx + 1j*dy + zeros = (np.arange(-100, 100, 1) + 0.5).reshape(1, 1, -1) + z = (zeros + np.dstack((dz,)*zeros.size)).flatten() + dataset = [] + for z0 in z: + dataset.append((z0, complex(mpmath.cospi(z0)))) + + dataset = np.array(dataset) + FuncData(_cospi, dataset, 0, 1, rtol=2*eps).check() + + +# ------------------------------------------------------------------------------ +# ellipj +# ------------------------------------------------------------------------------ + +@check_version(mpmath, '0.19') +def test_dn_quarter_period(): + def dn(u, m): + return sc.ellipj(u, m)[2] + + def mpmath_dn(u, m): + return float(mpmath.ellipfun("dn", u=u, m=m)) + + m = np.linspace(0, 1, 20) + du = np.r_[-np.logspace(-1, -15, 10), 0, np.logspace(-15, -1, 10)] + dataset = [] + for m0 in m: + u0 = float(mpmath.ellipk(m0)) + for du0 in du: + p = u0 + du0 + dataset.append((p, m0, mpmath_dn(p, m0))) + dataset = np.asarray(dataset) + + FuncData(dn, dataset, (0, 1), 2, rtol=1e-10).check() + + +# ------------------------------------------------------------------------------ +# Wright Omega +# ------------------------------------------------------------------------------ + +def _mpmath_wrightomega(z, dps): + with mpmath.workdps(dps): + z = mpmath.mpc(z) + unwind = mpmath.ceil((z.imag - mpmath.pi)/(2*mpmath.pi)) + res = mpmath.lambertw(mpmath.exp(z), unwind) + return res + + +@pytest.mark.slow +@check_version(mpmath, '0.19') +def test_wrightomega_branch(): + x = -np.logspace(10, 0, 25) + picut_above = [np.nextafter(np.pi, np.inf)] + picut_below = [np.nextafter(np.pi, -np.inf)] + npicut_above = [np.nextafter(-np.pi, np.inf)] + npicut_below = [np.nextafter(-np.pi, -np.inf)] + for i in range(50): + picut_above.append(np.nextafter(picut_above[-1], np.inf)) + picut_below.append(np.nextafter(picut_below[-1], -np.inf)) + npicut_above.append(np.nextafter(npicut_above[-1], np.inf)) + npicut_below.append(np.nextafter(npicut_below[-1], -np.inf)) + y = np.hstack((picut_above, picut_below, npicut_above, npicut_below)) + x, y = np.meshgrid(x, y) + z = (x + 1j*y).flatten() + + dataset = [] + for z0 in z: + dataset.append((z0, complex(_mpmath_wrightomega(z0, 25)))) + dataset = np.asarray(dataset) + + FuncData(sc.wrightomega, dataset, 0, 1, rtol=1e-8).check() + + +@pytest.mark.slow +@check_version(mpmath, '0.19') +def test_wrightomega_region1(): + # This region gets less coverage in the TestSystematic test + x = np.linspace(-2, 1) + y = np.linspace(1, 2*np.pi) + x, y = np.meshgrid(x, y) + z = (x + 1j*y).flatten() + + dataset = [] + for z0 in z: + dataset.append((z0, complex(_mpmath_wrightomega(z0, 25)))) + dataset = np.asarray(dataset) + + FuncData(sc.wrightomega, dataset, 0, 1, rtol=1e-15).check() + + +@pytest.mark.slow +@check_version(mpmath, '0.19') +def test_wrightomega_region2(): + # This region gets less coverage in the TestSystematic test + x = np.linspace(-2, 1) + y = np.linspace(-2*np.pi, -1) + x, y = np.meshgrid(x, y) + z = (x + 1j*y).flatten() + + dataset = [] + for z0 in z: + dataset.append((z0, complex(_mpmath_wrightomega(z0, 25)))) + dataset = np.asarray(dataset) + + FuncData(sc.wrightomega, dataset, 0, 1, rtol=1e-15).check() + + +# ------------------------------------------------------------------------------ +# lambertw +# ------------------------------------------------------------------------------ + +@pytest.mark.slow +@check_version(mpmath, '0.19') +def test_lambertw_smallz(): + x, y = np.linspace(-1, 1, 25), np.linspace(-1, 1, 25) + x, y = np.meshgrid(x, y) + z = (x + 1j*y).flatten() + + dataset = [] + for z0 in z: + dataset.append((z0, complex(mpmath.lambertw(z0)))) + dataset = np.asarray(dataset) + + FuncData(sc.lambertw, dataset, 0, 1, rtol=1e-13).check() + + +# ------------------------------------------------------------------------------ +# Systematic tests +# ------------------------------------------------------------------------------ + +HYPERKW = dict(maxprec=200, maxterms=200) + + +@pytest.mark.slow +@check_version(mpmath, '0.17') +class TestSystematic(object): + + def test_airyai(self): + # oscillating function, limit range + assert_mpmath_equal(lambda z: sc.airy(z)[0], + mpmath.airyai, + [Arg(-1e8, 1e8)], + rtol=1e-5) + assert_mpmath_equal(lambda z: sc.airy(z)[0], + mpmath.airyai, + [Arg(-1e3, 1e3)]) + + def test_airyai_complex(self): + assert_mpmath_equal(lambda z: sc.airy(z)[0], + mpmath.airyai, + [ComplexArg()]) + + def test_airyai_prime(self): + # oscillating function, limit range + assert_mpmath_equal(lambda z: sc.airy(z)[1], lambda z: + mpmath.airyai(z, derivative=1), + [Arg(-1e8, 1e8)], + rtol=1e-5) + assert_mpmath_equal(lambda z: sc.airy(z)[1], lambda z: + mpmath.airyai(z, derivative=1), + [Arg(-1e3, 1e3)]) + + def test_airyai_prime_complex(self): + assert_mpmath_equal(lambda z: sc.airy(z)[1], lambda z: + mpmath.airyai(z, derivative=1), + [ComplexArg()]) + + def test_airybi(self): + # oscillating function, limit range + assert_mpmath_equal(lambda z: sc.airy(z)[2], lambda z: + mpmath.airybi(z), + [Arg(-1e8, 1e8)], + rtol=1e-5) + assert_mpmath_equal(lambda z: sc.airy(z)[2], lambda z: + mpmath.airybi(z), + [Arg(-1e3, 1e3)]) + + def test_airybi_complex(self): + assert_mpmath_equal(lambda z: sc.airy(z)[2], lambda z: + mpmath.airybi(z), + [ComplexArg()]) + + def test_airybi_prime(self): + # oscillating function, limit range + assert_mpmath_equal(lambda z: sc.airy(z)[3], lambda z: + mpmath.airybi(z, derivative=1), + [Arg(-1e8, 1e8)], + rtol=1e-5) + assert_mpmath_equal(lambda z: sc.airy(z)[3], lambda z: + mpmath.airybi(z, derivative=1), + [Arg(-1e3, 1e3)]) + + def test_airybi_prime_complex(self): + assert_mpmath_equal(lambda z: sc.airy(z)[3], lambda z: + mpmath.airybi(z, derivative=1), + [ComplexArg()]) + + def test_bei(self): + assert_mpmath_equal(sc.bei, + exception_to_nan(lambda z: mpmath.bei(0, z, **HYPERKW)), + [Arg(-1e3, 1e3)]) + + def test_ber(self): + assert_mpmath_equal(sc.ber, + exception_to_nan(lambda z: mpmath.ber(0, z, **HYPERKW)), + [Arg(-1e3, 1e3)]) + + def test_bernoulli(self): + assert_mpmath_equal(lambda n: sc.bernoulli(int(n))[int(n)], + lambda n: float(mpmath.bernoulli(int(n))), + [IntArg(0, 13000)], + rtol=1e-9, n=13000) + + def test_besseli(self): + assert_mpmath_equal(sc.iv, + exception_to_nan(lambda v, z: mpmath.besseli(v, z, **HYPERKW)), + [Arg(-1e100, 1e100), Arg()], + atol=1e-270) + + def test_besseli_complex(self): + assert_mpmath_equal(lambda v, z: sc.iv(v.real, z), + exception_to_nan(lambda v, z: mpmath.besseli(v, z, **HYPERKW)), + [Arg(-1e100, 1e100), ComplexArg()]) + + def test_besselj(self): + assert_mpmath_equal(sc.jv, + exception_to_nan(lambda v, z: mpmath.besselj(v, z, **HYPERKW)), + [Arg(-1e100, 1e100), Arg(-1e3, 1e3)], + ignore_inf_sign=True) + + # loss of precision at large arguments due to oscillation + assert_mpmath_equal(sc.jv, + exception_to_nan(lambda v, z: mpmath.besselj(v, z, **HYPERKW)), + [Arg(-1e100, 1e100), Arg(-1e8, 1e8)], + ignore_inf_sign=True, + rtol=1e-5) + + def test_besselj_complex(self): + assert_mpmath_equal(lambda v, z: sc.jv(v.real, z), + exception_to_nan(lambda v, z: mpmath.besselj(v, z, **HYPERKW)), + [Arg(), ComplexArg()]) + + def test_besselk(self): + assert_mpmath_equal(sc.kv, + mpmath.besselk, + [Arg(-200, 200), Arg(0, np.inf)], + nan_ok=False, rtol=1e-12) + + def test_besselk_int(self): + assert_mpmath_equal(sc.kn, + mpmath.besselk, + [IntArg(-200, 200), Arg(0, np.inf)], + nan_ok=False, rtol=1e-12) + + def test_besselk_complex(self): + assert_mpmath_equal(lambda v, z: sc.kv(v.real, z), + exception_to_nan(lambda v, z: mpmath.besselk(v, z, **HYPERKW)), + [Arg(-1e100, 1e100), ComplexArg()]) + + def test_bessely(self): + def mpbessely(v, x): + r = float(mpmath.bessely(v, x, **HYPERKW)) + if abs(r) > 1e305: + # overflowing to inf a bit earlier is OK + r = np.inf * np.sign(r) + if abs(r) == 0 and x == 0: + # invalid result from mpmath, point x=0 is a divergence + return np.nan + return r + assert_mpmath_equal(sc.yv, + exception_to_nan(mpbessely), + [Arg(-1e100, 1e100), Arg(-1e8, 1e8)], + n=5000) + + def test_bessely_complex(self): + def mpbessely(v, x): + r = complex(mpmath.bessely(v, x, **HYPERKW)) + if abs(r) > 1e305: + # overflowing to inf a bit earlier is OK + olderr = np.seterr(invalid='ignore') + try: + r = np.inf * np.sign(r) + finally: + np.seterr(**olderr) + return r + assert_mpmath_equal(lambda v, z: sc.yv(v.real, z), + exception_to_nan(mpbessely), + [Arg(), ComplexArg()], + n=15000) + + def test_bessely_int(self): + def mpbessely(v, x): + r = float(mpmath.bessely(v, x)) + if abs(r) == 0 and x == 0: + # invalid result from mpmath, point x=0 is a divergence + return np.nan + return r + assert_mpmath_equal(lambda v, z: sc.yn(int(v), z), + exception_to_nan(mpbessely), + [IntArg(-1000, 1000), Arg(-1e8, 1e8)]) + + def test_beta(self): + bad_points = [] + + def beta(a, b, nonzero=False): + if a < -1e12 or b < -1e12: + # Function is defined here only at integers, but due + # to loss of precision this is numerically + # ill-defined. Don't compare values here. + return np.nan + if (a < 0 or b < 0) and (abs(float(a + b)) % 1) == 0: + # close to a zero of the function: mpmath and scipy + # will not round here the same, so the test needs to be + # run with an absolute tolerance + if nonzero: + bad_points.append((float(a), float(b))) + return np.nan + return mpmath.beta(a, b) + + assert_mpmath_equal(sc.beta, + lambda a, b: beta(a, b, nonzero=True), + [Arg(), Arg()], + dps=400, + ignore_inf_sign=True) + + assert_mpmath_equal(sc.beta, + beta, + np.array(bad_points), + dps=400, + ignore_inf_sign=True, + atol=1e-11) + + def test_betainc(self): + assert_mpmath_equal(sc.betainc, + time_limited()(exception_to_nan(lambda a, b, x: mpmath.betainc(a, b, 0, x, regularized=True))), + [Arg(), Arg(), Arg()]) + + def test_binom(self): + bad_points = [] + + def binomial(n, k, nonzero=False): + if abs(k) > 1e8*(abs(n) + 1): + # The binomial is rapidly oscillating in this region, + # and the function is numerically ill-defined. Don't + # compare values here. + return np.nan + if n < k and abs(float(n-k) - np.round(float(n-k))) < 1e-15: + # close to a zero of the function: mpmath and scipy + # will not round here the same, so the test needs to be + # run with an absolute tolerance + if nonzero: + bad_points.append((float(n), float(k))) + return np.nan + return mpmath.binomial(n, k) + + assert_mpmath_equal(sc.binom, + lambda n, k: binomial(n, k, nonzero=True), + [Arg(), Arg()], + dps=400) + + assert_mpmath_equal(sc.binom, + binomial, + np.array(bad_points), + dps=400, + atol=1e-14) + + def test_chebyt_int(self): + assert_mpmath_equal(lambda n, x: sc.eval_chebyt(int(n), x), + exception_to_nan(lambda n, x: mpmath.chebyt(n, x, **HYPERKW)), + [IntArg(), Arg()], dps=50) + + @pytest.mark.xfail(run=False, reason="some cases in hyp2f1 not fully accurate") + def test_chebyt(self): + assert_mpmath_equal(sc.eval_chebyt, + lambda n, x: time_limited()(exception_to_nan(mpmath.chebyt))(n, x, **HYPERKW), + [Arg(-101, 101), Arg()], n=10000) + + def test_chebyu_int(self): + assert_mpmath_equal(lambda n, x: sc.eval_chebyu(int(n), x), + exception_to_nan(lambda n, x: mpmath.chebyu(n, x, **HYPERKW)), + [IntArg(), Arg()], dps=50) + + @pytest.mark.xfail(run=False, reason="some cases in hyp2f1 not fully accurate") + def test_chebyu(self): + assert_mpmath_equal(sc.eval_chebyu, + lambda n, x: time_limited()(exception_to_nan(mpmath.chebyu))(n, x, **HYPERKW), + [Arg(-101, 101), Arg()]) + + def test_chi(self): + def chi(x): + return sc.shichi(x)[1] + assert_mpmath_equal(chi, mpmath.chi, [Arg()]) + # check asymptotic series cross-over + assert_mpmath_equal(chi, mpmath.chi, [FixedArg([88 - 1e-9, 88, 88 + 1e-9])]) + + def test_chi_complex(self): + def chi(z): + return sc.shichi(z)[1] + # chi oscillates as Im[z] -> +- inf, so limit range + assert_mpmath_equal(chi, + mpmath.chi, + [ComplexArg(complex(-np.inf, -1e8), complex(np.inf, 1e8))], + rtol=1e-12) + + def test_ci(self): + def ci(x): + return sc.sici(x)[1] + # oscillating function: limit range + assert_mpmath_equal(ci, + mpmath.ci, + [Arg(-1e8, 1e8)]) + + def test_ci_complex(self): + def ci(z): + return sc.sici(z)[1] + # ci oscillates as Re[z] -> +- inf, so limit range + assert_mpmath_equal(ci, + mpmath.ci, + [ComplexArg(complex(-1e8, -np.inf), complex(1e8, np.inf))], + rtol=1e-8) + + def test_cospi(self): + eps = np.finfo(float).eps + assert_mpmath_equal(_cospi, + mpmath.cospi, + [Arg()], nan_ok=False, rtol=eps) + + def test_cospi_complex(self): + assert_mpmath_equal(_cospi, + mpmath.cospi, + [ComplexArg()], nan_ok=False, rtol=1e-13) + + def test_digamma(self): + assert_mpmath_equal(sc.digamma, + exception_to_nan(mpmath.digamma), + [Arg()], rtol=1e-12, dps=50) + + def test_digamma_complex(self): + # Test on a cut plane because mpmath will hang. See + # test_digamma_negreal for tests on the negative real axis. + def param_filter(z): + return np.where((z.real < 0) & (np.abs(z.imag) < 1.12), False, True) + + assert_mpmath_equal(sc.digamma, + exception_to_nan(mpmath.digamma), + [ComplexArg()], rtol=1e-13, dps=40, + param_filter=param_filter) + + def test_e1(self): + assert_mpmath_equal(sc.exp1, + mpmath.e1, + [Arg()], rtol=1e-14) + + def test_e1_complex(self): + # E_1 oscillates as Im[z] -> +- inf, so limit range + assert_mpmath_equal(sc.exp1, + mpmath.e1, + [ComplexArg(complex(-np.inf, -1e8), complex(np.inf, 1e8))], + rtol=1e-11) + + # Check cross-over region + assert_mpmath_equal(sc.exp1, + mpmath.e1, + (np.linspace(-50, 50, 171)[:, None] + + np.r_[0, np.logspace(-3, 2, 61), + -np.logspace(-3, 2, 11)]*1j).ravel(), + rtol=1e-11) + assert_mpmath_equal(sc.exp1, + mpmath.e1, + (np.linspace(-50, -35, 10000) + 0j), + rtol=1e-11) + + def test_exprel(self): + assert_mpmath_equal(sc.exprel, + lambda x: mpmath.expm1(x)/x if x != 0 else mpmath.mpf('1.0'), + [Arg(a=-np.log(np.finfo(np.double).max), b=np.log(np.finfo(np.double).max))]) + assert_mpmath_equal(sc.exprel, + lambda x: mpmath.expm1(x)/x if x != 0 else mpmath.mpf('1.0'), + np.array([1e-12, 1e-24, 0, 1e12, 1e24, np.inf]), rtol=1e-11) + assert_(np.isinf(sc.exprel(np.inf))) + assert_(sc.exprel(-np.inf) == 0) + + def test_expm1_complex(self): + # Oscillates as a function of Im[z], so limit range to avoid loss of precision + assert_mpmath_equal(sc.expm1, + mpmath.expm1, + [ComplexArg(complex(-np.inf, -1e7), complex(np.inf, 1e7))]) + + def test_log1p_complex(self): + assert_mpmath_equal(sc.log1p, + lambda x: mpmath.log(x+1), + [ComplexArg()], dps=60) + + def test_log1pmx(self): + assert_mpmath_equal(_log1pmx, + lambda x: mpmath.log(x + 1) - x, + [Arg()], dps=60, rtol=1e-14) + + def test_ei(self): + assert_mpmath_equal(sc.expi, + mpmath.ei, + [Arg()], + rtol=1e-11) + + def test_ei_complex(self): + # Ei oscillates as Im[z] -> +- inf, so limit range + assert_mpmath_equal(sc.expi, + mpmath.ei, + [ComplexArg(complex(-np.inf, -1e8), complex(np.inf, 1e8))], + rtol=1e-9) + + def test_ellipe(self): + assert_mpmath_equal(sc.ellipe, + mpmath.ellipe, + [Arg(b=1.0)]) + + def test_ellipeinc(self): + assert_mpmath_equal(sc.ellipeinc, + mpmath.ellipe, + [Arg(-1e3, 1e3), Arg(b=1.0)]) + + def test_ellipeinc_largephi(self): + assert_mpmath_equal(sc.ellipeinc, + mpmath.ellipe, + [Arg(), Arg()]) + + def test_ellipf(self): + assert_mpmath_equal(sc.ellipkinc, + mpmath.ellipf, + [Arg(-1e3, 1e3), Arg()]) + + def test_ellipf_largephi(self): + assert_mpmath_equal(sc.ellipkinc, + mpmath.ellipf, + [Arg(), Arg()]) + + def test_ellipk(self): + assert_mpmath_equal(sc.ellipk, + mpmath.ellipk, + [Arg(b=1.0)]) + assert_mpmath_equal(sc.ellipkm1, + lambda m: mpmath.ellipk(1 - m), + [Arg(a=0.0)], + dps=400) + + def test_ellipkinc(self): + def ellipkinc(phi, m): + return mpmath.ellippi(0, phi, m) + assert_mpmath_equal(sc.ellipkinc, + ellipkinc, + [Arg(-1e3, 1e3), Arg(b=1.0)], + ignore_inf_sign=True) + + def test_ellipkinc_largephi(self): + def ellipkinc(phi, m): + return mpmath.ellippi(0, phi, m) + assert_mpmath_equal(sc.ellipkinc, + ellipkinc, + [Arg(), Arg(b=1.0)], + ignore_inf_sign=True) + + def test_ellipfun_sn(self): + def sn(u, m): + # mpmath doesn't get the zero at u = 0--fix that + if u == 0: + return 0 + else: + return mpmath.ellipfun("sn", u=u, m=m) + + # Oscillating function --- limit range of first argument; the + # loss of precision there is an expected numerical feature + # rather than an actual bug + assert_mpmath_equal(lambda u, m: sc.ellipj(u, m)[0], + sn, + [Arg(-1e6, 1e6), Arg(a=0, b=1)], + rtol=1e-8) + + def test_ellipfun_cn(self): + # see comment in ellipfun_sn + assert_mpmath_equal(lambda u, m: sc.ellipj(u, m)[1], + lambda u, m: mpmath.ellipfun("cn", u=u, m=m), + [Arg(-1e6, 1e6), Arg(a=0, b=1)], + rtol=1e-8) + + def test_ellipfun_dn(self): + # see comment in ellipfun_sn + assert_mpmath_equal(lambda u, m: sc.ellipj(u, m)[2], + lambda u, m: mpmath.ellipfun("dn", u=u, m=m), + [Arg(-1e6, 1e6), Arg(a=0, b=1)], + rtol=1e-8) + + def test_erf(self): + assert_mpmath_equal(sc.erf, + lambda z: mpmath.erf(z), + [Arg()]) + + def test_erf_complex(self): + assert_mpmath_equal(sc.erf, + lambda z: mpmath.erf(z), + [ComplexArg()], n=200) + + def test_erfc(self): + assert_mpmath_equal(sc.erfc, + exception_to_nan(lambda z: mpmath.erfc(z)), + [Arg()], rtol=1e-13) + + def test_erfc_complex(self): + assert_mpmath_equal(sc.erfc, + exception_to_nan(lambda z: mpmath.erfc(z)), + [ComplexArg()], n=200) + + def test_erfi(self): + assert_mpmath_equal(sc.erfi, + mpmath.erfi, + [Arg()], n=200) + + def test_erfi_complex(self): + assert_mpmath_equal(sc.erfi, + mpmath.erfi, + [ComplexArg()], n=200) + + def test_ndtr(self): + assert_mpmath_equal(sc.ndtr, + exception_to_nan(lambda z: mpmath.ncdf(z)), + [Arg()], n=200) + + def test_ndtr_complex(self): + assert_mpmath_equal(sc.ndtr, + lambda z: mpmath.erfc(-z/np.sqrt(2.))/2., + [ComplexArg(a=complex(-10000, -10000), b=complex(10000, 10000))], n=400) + + def test_log_ndtr(self): + assert_mpmath_equal(sc.log_ndtr, + exception_to_nan(lambda z: mpmath.log(mpmath.ncdf(z))), + [Arg()], n=600, dps=300) + + def test_log_ndtr_complex(self): + assert_mpmath_equal(sc.log_ndtr, + exception_to_nan(lambda z: mpmath.log(mpmath.erfc(-z/np.sqrt(2.))/2.)), + [ComplexArg(a=complex(-10000, -100), + b=complex(10000, 100))], n=200, dps=300) + + def test_eulernum(self): + assert_mpmath_equal(lambda n: sc.euler(n)[-1], + mpmath.eulernum, + [IntArg(1, 10000)], n=10000) + + def test_expint(self): + assert_mpmath_equal(sc.expn, + mpmath.expint, + [IntArg(0, 200), Arg(0, np.inf)], + rtol=1e-13, dps=160) + + def test_fresnels(self): + def fresnels(x): + return sc.fresnel(x)[0] + assert_mpmath_equal(fresnels, + mpmath.fresnels, + [Arg()]) + + def test_fresnelc(self): + def fresnelc(x): + return sc.fresnel(x)[1] + assert_mpmath_equal(fresnelc, + mpmath.fresnelc, + [Arg()]) + + def test_gamma(self): + assert_mpmath_equal(sc.gamma, + exception_to_nan(mpmath.gamma), + [Arg()]) + + def test_gamma_complex(self): + assert_mpmath_equal(sc.gamma, + exception_to_nan(mpmath.gamma), + [ComplexArg()], rtol=5e-13) + + def test_gammainc(self): + # Larger arguments are tested in test_data.py:test_local + assert_mpmath_equal(sc.gammainc, + lambda z, b: mpmath.gammainc(z, b=b, regularized=True), + [Arg(0, 1e4, inclusive_a=False), Arg(0, 1e4)], + nan_ok=False, rtol=1e-11) + + def test_gammaincc(self): + # Larger arguments are tested in test_data.py:test_local + assert_mpmath_equal(sc.gammaincc, + lambda z, a: mpmath.gammainc(z, a=a, regularized=True), + [Arg(0, 1e4, inclusive_a=False), Arg(0, 1e4)], + nan_ok=False, rtol=1e-11) + + def test_gammaln(self): + # The real part of loggamma is log(|gamma(z)|). + def f(z): + return mpmath.loggamma(z).real + + assert_mpmath_equal(sc.gammaln, exception_to_nan(f), [Arg()]) + + @pytest.mark.xfail(run=False) + def test_gegenbauer(self): + assert_mpmath_equal(sc.eval_gegenbauer, + exception_to_nan(mpmath.gegenbauer), + [Arg(-1e3, 1e3), Arg(), Arg()]) + + def test_gegenbauer_int(self): + # Redefine functions to deal with numerical + mpmath issues + def gegenbauer(n, a, x): + # Avoid overflow at large `a` (mpmath would need an even larger + # dps to handle this correctly, so just skip this region) + if abs(a) > 1e100: + return np.nan + + # Deal with n=0, n=1 correctly; mpmath 0.17 doesn't do these + # always correctly + if n == 0: + r = 1.0 + elif n == 1: + r = 2*a*x + else: + r = mpmath.gegenbauer(n, a, x) + + # Mpmath 0.17 gives wrong results (spurious zero) in some cases, so + # compute the value by perturbing the result + if float(r) == 0 and a < -1 and float(a) == int(float(a)): + r = mpmath.gegenbauer(n, a + mpmath.mpf('1e-50'), x) + if abs(r) < mpmath.mpf('1e-50'): + r = mpmath.mpf('0.0') + + # Differing overflow thresholds in scipy vs. mpmath + if abs(r) > 1e270: + return np.inf + return r + + def sc_gegenbauer(n, a, x): + r = sc.eval_gegenbauer(int(n), a, x) + # Differing overflow thresholds in scipy vs. mpmath + if abs(r) > 1e270: + return np.inf + return r + assert_mpmath_equal(sc_gegenbauer, + exception_to_nan(gegenbauer), + [IntArg(0, 100), Arg(-1e9, 1e9), Arg()], + n=40000, dps=100, + ignore_inf_sign=True, rtol=1e-6) + + # Check the small-x expansion + assert_mpmath_equal(sc_gegenbauer, + exception_to_nan(gegenbauer), + [IntArg(0, 100), Arg(), FixedArg(np.logspace(-30, -4, 30))], + dps=100, + ignore_inf_sign=True) + + @pytest.mark.xfail(run=False) + def test_gegenbauer_complex(self): + assert_mpmath_equal(lambda n, a, x: sc.eval_gegenbauer(int(n), a.real, x), + exception_to_nan(mpmath.gegenbauer), + [IntArg(0, 100), Arg(), ComplexArg()]) + + @nonfunctional_tooslow + def test_gegenbauer_complex_general(self): + assert_mpmath_equal(lambda n, a, x: sc.eval_gegenbauer(n.real, a.real, x), + exception_to_nan(mpmath.gegenbauer), + [Arg(-1e3, 1e3), Arg(), ComplexArg()]) + + def test_hankel1(self): + assert_mpmath_equal(sc.hankel1, + exception_to_nan(lambda v, x: mpmath.hankel1(v, x, + **HYPERKW)), + [Arg(-1e20, 1e20), Arg()]) + + def test_hankel2(self): + assert_mpmath_equal(sc.hankel2, + exception_to_nan(lambda v, x: mpmath.hankel2(v, x, **HYPERKW)), + [Arg(-1e20, 1e20), Arg()]) + + @pytest.mark.xfail(run=False, reason="issues at intermediately large orders") + def test_hermite(self): + assert_mpmath_equal(lambda n, x: sc.eval_hermite(int(n), x), + exception_to_nan(mpmath.hermite), + [IntArg(0, 10000), Arg()]) + + # hurwitz: same as zeta + + def test_hyp0f1(self): + # mpmath reports no convergence unless maxterms is large enough + KW = dict(maxprec=400, maxterms=1500) + # n=500 (non-xslow default) fails for one bad point + assert_mpmath_equal(sc.hyp0f1, + lambda a, x: mpmath.hyp0f1(a, x, **KW), + [Arg(-1e7, 1e7), Arg(0, 1e5)], + n=5000) + # NB: The range of the second parameter ("z") is limited from below + # because of an overflow in the intermediate calculations. The way + # for fix it is to implement an asymptotic expansion for Bessel J + # (similar to what is implemented for Bessel I here). + + def test_hyp0f1_complex(self): + assert_mpmath_equal(lambda a, z: sc.hyp0f1(a.real, z), + exception_to_nan(lambda a, x: mpmath.hyp0f1(a, x, **HYPERKW)), + [Arg(-10, 10), ComplexArg(complex(-120, -120), complex(120, 120))]) + # NB: The range of the first parameter ("v") are limited by an overflow + # in the intermediate calculations. Can be fixed by implementing an + # asymptotic expansion for Bessel functions for large order. + + @pytest.mark.xfail(run=False) + def test_hyp1f1(self): + assert_mpmath_equal(inf_to_nan(sc.hyp1f1), + exception_to_nan(lambda a, b, x: mpmath.hyp1f1(a, b, x, **HYPERKW)), + [Arg(-1e5, 1e5), Arg(-1e5, 1e5), Arg()], + n=2000) + + @pytest.mark.xfail(run=False) + def test_hyp1f1_complex(self): + assert_mpmath_equal(inf_to_nan(lambda a, b, x: sc.hyp1f1(a.real, b.real, x)), + exception_to_nan(lambda a, b, x: mpmath.hyp1f1(a, b, x, **HYPERKW)), + [Arg(-1e3, 1e3), Arg(-1e3, 1e3), ComplexArg()], + n=2000) + + @nonfunctional_tooslow + def test_hyp2f1_complex(self): + # Scipy's hyp2f1 seems to have performance and accuracy problems + assert_mpmath_equal(lambda a, b, c, x: sc.hyp2f1(a.real, b.real, c.real, x), + exception_to_nan(lambda a, b, c, x: mpmath.hyp2f1(a, b, c, x, **HYPERKW)), + [Arg(-1e2, 1e2), Arg(-1e2, 1e2), Arg(-1e2, 1e2), ComplexArg()], + n=10) + + @pytest.mark.xfail(run=False) + def test_hyperu(self): + assert_mpmath_equal(sc.hyperu, + exception_to_nan(lambda a, b, x: mpmath.hyperu(a, b, x, **HYPERKW)), + [Arg(), Arg(), Arg()]) + + @pytest.mark.xfail(condition=_is_32bit_platform, + reason="mpmath issue gh-342: unsupported operand mpz, long for pow") + def test_igam_fac(self): + def mp_igam_fac(a, x): + return mpmath.power(x, a)*mpmath.exp(-x)/mpmath.gamma(a) + + assert_mpmath_equal(_igam_fac, + mp_igam_fac, + [Arg(0, 1e14, inclusive_a=False), Arg(0, 1e14)], + rtol=1e-10) + + def test_j0(self): + # The Bessel function at large arguments is j0(x) ~ cos(x + phi)/sqrt(x) + # and at large arguments the phase of the cosine loses precision. + # + # This is numerically expected behavior, so we compare only up to + # 1e8 = 1e15 * 1e-7 + assert_mpmath_equal(sc.j0, + mpmath.j0, + [Arg(-1e3, 1e3)]) + assert_mpmath_equal(sc.j0, + mpmath.j0, + [Arg(-1e8, 1e8)], + rtol=1e-5) + + def test_j1(self): + # See comment in test_j0 + assert_mpmath_equal(sc.j1, + mpmath.j1, + [Arg(-1e3, 1e3)]) + assert_mpmath_equal(sc.j1, + mpmath.j1, + [Arg(-1e8, 1e8)], + rtol=1e-5) + + @pytest.mark.xfail(run=False) + def test_jacobi(self): + assert_mpmath_equal(sc.eval_jacobi, + exception_to_nan(lambda a, b, c, x: mpmath.jacobi(a, b, c, x, **HYPERKW)), + [Arg(), Arg(), Arg(), Arg()]) + assert_mpmath_equal(lambda n, b, c, x: sc.eval_jacobi(int(n), b, c, x), + exception_to_nan(lambda a, b, c, x: mpmath.jacobi(a, b, c, x, **HYPERKW)), + [IntArg(), Arg(), Arg(), Arg()]) + + def test_jacobi_int(self): + # Redefine functions to deal with numerical + mpmath issues + def jacobi(n, a, b, x): + # Mpmath does not handle n=0 case always correctly + if n == 0: + return 1.0 + return mpmath.jacobi(n, a, b, x) + assert_mpmath_equal(lambda n, a, b, x: sc.eval_jacobi(int(n), a, b, x), + lambda n, a, b, x: exception_to_nan(jacobi)(n, a, b, x, **HYPERKW), + [IntArg(), Arg(), Arg(), Arg()], + n=20000, dps=50) + + def test_kei(self): + def kei(x): + if x == 0: + # work around mpmath issue at x=0 + return -pi/4 + return exception_to_nan(mpmath.kei)(0, x, **HYPERKW) + assert_mpmath_equal(sc.kei, + kei, + [Arg(-1e30, 1e30)], n=1000) + + def test_ker(self): + assert_mpmath_equal(sc.ker, + exception_to_nan(lambda x: mpmath.ker(0, x, **HYPERKW)), + [Arg(-1e30, 1e30)], n=1000) + + @nonfunctional_tooslow + def test_laguerre(self): + assert_mpmath_equal(trace_args(sc.eval_laguerre), + lambda n, x: exception_to_nan(mpmath.laguerre)(n, x, **HYPERKW), + [Arg(), Arg()]) + + def test_laguerre_int(self): + assert_mpmath_equal(lambda n, x: sc.eval_laguerre(int(n), x), + lambda n, x: exception_to_nan(mpmath.laguerre)(n, x, **HYPERKW), + [IntArg(), Arg()], n=20000) + + @pytest.mark.xfail(condition=_is_32bit_platform, reason="see gh-3551 for bad points") + def test_lambertw_real(self): + assert_mpmath_equal(lambda x, k: sc.lambertw(x, int(k.real)), + lambda x, k: mpmath.lambertw(x, int(k.real)), + [ComplexArg(-np.inf, np.inf), IntArg(0, 10)], + rtol=1e-13, nan_ok=False) + + def test_lanczos_sum_expg_scaled(self): + maxgamma = 171.624376956302725 + e = np.exp(1) + g = 6.024680040776729583740234375 + + def gamma(x): + with np.errstate(over='ignore'): + fac = ((x + g - 0.5)/e)**(x - 0.5) + if fac != np.inf: + res = fac*_lanczos_sum_expg_scaled(x) + else: + fac = ((x + g - 0.5)/e)**(0.5*(x - 0.5)) + res = fac*_lanczos_sum_expg_scaled(x) + res *= fac + return res + + assert_mpmath_equal(gamma, + mpmath.gamma, + [Arg(0, maxgamma, inclusive_a=False)], + rtol=1e-13) + + @nonfunctional_tooslow + def test_legendre(self): + assert_mpmath_equal(sc.eval_legendre, + mpmath.legendre, + [Arg(), Arg()]) + + def test_legendre_int(self): + assert_mpmath_equal(lambda n, x: sc.eval_legendre(int(n), x), + lambda n, x: exception_to_nan(mpmath.legendre)(n, x, **HYPERKW), + [IntArg(), Arg()], + n=20000) + + # Check the small-x expansion + assert_mpmath_equal(lambda n, x: sc.eval_legendre(int(n), x), + lambda n, x: exception_to_nan(mpmath.legendre)(n, x, **HYPERKW), + [IntArg(), FixedArg(np.logspace(-30, -4, 20))]) + + def test_legenp(self): + def lpnm(n, m, z): + try: + v = sc.lpmn(m, n, z)[0][-1,-1] + except ValueError: + return np.nan + if abs(v) > 1e306: + # harmonize overflow to inf + v = np.inf * np.sign(v.real) + return v + + def lpnm_2(n, m, z): + v = sc.lpmv(m, n, z) + if abs(v) > 1e306: + # harmonize overflow to inf + v = np.inf * np.sign(v.real) + return v + + def legenp(n, m, z): + if (z == 1 or z == -1) and int(n) == n: + # Special case (mpmath may give inf, we take the limit by + # continuity) + if m == 0: + if n < 0: + n = -n - 1 + return mpmath.power(mpmath.sign(z), n) + else: + return 0 + + if abs(z) < 1e-15: + # mpmath has bad performance here + return np.nan + + typ = 2 if abs(z) < 1 else 3 + v = exception_to_nan(mpmath.legenp)(n, m, z, type=typ) + + if abs(v) > 1e306: + # harmonize overflow to inf + v = mpmath.inf * mpmath.sign(v.real) + + return v + + assert_mpmath_equal(lpnm, + legenp, + [IntArg(-100, 100), IntArg(-100, 100), Arg()]) + + assert_mpmath_equal(lpnm_2, + legenp, + [IntArg(-100, 100), Arg(-100, 100), Arg(-1, 1)], + atol=1e-10) + + def test_legenp_complex_2(self): + def clpnm(n, m, z): + try: + return sc.clpmn(m.real, n.real, z, type=2)[0][-1,-1] + except ValueError: + return np.nan + + def legenp(n, m, z): + if abs(z) < 1e-15: + # mpmath has bad performance here + return np.nan + return exception_to_nan(mpmath.legenp)(int(n.real), int(m.real), z, type=2) + + # mpmath is quite slow here + x = np.array([-2, -0.99, -0.5, 0, 1e-5, 0.5, 0.99, 20, 2e3]) + y = np.array([-1e3, -0.5, 0.5, 1.3]) + z = (x[:,None] + 1j*y[None,:]).ravel() + + assert_mpmath_equal(clpnm, + legenp, + [FixedArg([-2, -1, 0, 1, 2, 10]), FixedArg([-2, -1, 0, 1, 2, 10]), FixedArg(z)], + rtol=1e-6, + n=500) + + def test_legenp_complex_3(self): + def clpnm(n, m, z): + try: + return sc.clpmn(m.real, n.real, z, type=3)[0][-1,-1] + except ValueError: + return np.nan + + def legenp(n, m, z): + if abs(z) < 1e-15: + # mpmath has bad performance here + return np.nan + return exception_to_nan(mpmath.legenp)(int(n.real), int(m.real), z, type=3) + + # mpmath is quite slow here + x = np.array([-2, -0.99, -0.5, 0, 1e-5, 0.5, 0.99, 20, 2e3]) + y = np.array([-1e3, -0.5, 0.5, 1.3]) + z = (x[:,None] + 1j*y[None,:]).ravel() + + assert_mpmath_equal(clpnm, + legenp, + [FixedArg([-2, -1, 0, 1, 2, 10]), FixedArg([-2, -1, 0, 1, 2, 10]), FixedArg(z)], + rtol=1e-6, + n=500) + + @pytest.mark.xfail(run=False, reason="apparently picks wrong function at |z| > 1") + def test_legenq(self): + def lqnm(n, m, z): + return sc.lqmn(m, n, z)[0][-1,-1] + + def legenq(n, m, z): + if abs(z) < 1e-15: + # mpmath has bad performance here + return np.nan + return exception_to_nan(mpmath.legenq)(n, m, z, type=2) + + assert_mpmath_equal(lqnm, + legenq, + [IntArg(0, 100), IntArg(0, 100), Arg()]) + + @nonfunctional_tooslow + def test_legenq_complex(self): + def lqnm(n, m, z): + return sc.lqmn(int(m.real), int(n.real), z)[0][-1,-1] + + def legenq(n, m, z): + if abs(z) < 1e-15: + # mpmath has bad performance here + return np.nan + return exception_to_nan(mpmath.legenq)(int(n.real), int(m.real), z, type=2) + + assert_mpmath_equal(lqnm, + legenq, + [IntArg(0, 100), IntArg(0, 100), ComplexArg()], + n=100) + + def test_lgam1p(self): + def param_filter(x): + # Filter the poles + return np.where((np.floor(x) == x) & (x <= 0), False, True) + + def mp_lgam1p(z): + # The real part of loggamma is log(|gamma(z)|) + return mpmath.loggamma(1 + z).real + + assert_mpmath_equal(_lgam1p, + mp_lgam1p, + [Arg()], rtol=1e-13, dps=100, + param_filter=param_filter) + + def test_loggamma(self): + def mpmath_loggamma(z): + try: + res = mpmath.loggamma(z) + except ValueError: + res = complex(np.nan, np.nan) + return res + + assert_mpmath_equal(sc.loggamma, + mpmath_loggamma, + [ComplexArg()], nan_ok=False, + distinguish_nan_and_inf=False, rtol=5e-14) + + @pytest.mark.xfail(run=False) + def test_pcfd(self): + def pcfd(v, x): + return sc.pbdv(v, x)[0] + assert_mpmath_equal(pcfd, + exception_to_nan(lambda v, x: mpmath.pcfd(v, x, **HYPERKW)), + [Arg(), Arg()]) + + @pytest.mark.xfail(run=False, reason="it's not the same as the mpmath function --- maybe different definition?") + def test_pcfv(self): + def pcfv(v, x): + return sc.pbvv(v, x)[0] + assert_mpmath_equal(pcfv, + lambda v, x: time_limited()(exception_to_nan(mpmath.pcfv))(v, x, **HYPERKW), + [Arg(), Arg()], n=1000) + + def test_pcfw(self): + def pcfw(a, x): + return sc.pbwa(a, x)[0] + + def dpcfw(a, x): + return sc.pbwa(a, x)[1] + + def mpmath_dpcfw(a, x): + return mpmath.diff(mpmath.pcfw, (a, x), (0, 1)) + + # The Zhang and Jin implementation only uses Taylor series and + # is thus accurate in only a very small range. + assert_mpmath_equal(pcfw, + mpmath.pcfw, + [Arg(-5, 5), Arg(-5, 5)], rtol=2e-8, n=100) + + assert_mpmath_equal(dpcfw, + mpmath_dpcfw, + [Arg(-5, 5), Arg(-5, 5)], rtol=2e-9, n=100) + + @pytest.mark.xfail(run=False, reason="issues at large arguments (atol OK, rtol not) and <eps-close to z=0") + def test_polygamma(self): + assert_mpmath_equal(sc.polygamma, + time_limited()(exception_to_nan(mpmath.polygamma)), + [IntArg(0, 1000), Arg()]) + + def test_rgamma(self): + def rgamma(x): + if x < -8000: + return np.inf + else: + v = mpmath.rgamma(x) + return v + # n=500 (non-xslow default) fails for one bad point + assert_mpmath_equal(sc.rgamma, + rgamma, + [Arg()], + n=5000, + ignore_inf_sign=True) + + def test_rgamma_complex(self): + assert_mpmath_equal(sc.rgamma, + exception_to_nan(mpmath.rgamma), + [ComplexArg()], rtol=5e-13) + + @pytest.mark.xfail(reason=("see gh-3551 for bad points on 32 bit " + "systems and gh-8095 for another bad " + "point")) + def test_rf(self): + if LooseVersion(mpmath.__version__) >= LooseVersion("1.0.0"): + # no workarounds needed + mppoch = mpmath.rf + else: + def mppoch(a, m): + # deal with cases where the result in double precision + # hits exactly a non-positive integer, but the + # corresponding extended-precision mpf floats don't + if float(a + m) == int(a + m) and float(a + m) <= 0: + a = mpmath.mpf(a) + m = int(a + m) - a + return mpmath.rf(a, m) + + assert_mpmath_equal(sc.poch, + mppoch, + [Arg(), Arg()], + dps=400) + + def test_sinpi(self): + eps = np.finfo(float).eps + assert_mpmath_equal(_sinpi, mpmath.sinpi, + [Arg()], nan_ok=False, rtol=eps) + + def test_sinpi_complex(self): + assert_mpmath_equal(_sinpi, mpmath.sinpi, + [ComplexArg()], nan_ok=False, rtol=2e-14) + + def test_shi(self): + def shi(x): + return sc.shichi(x)[0] + assert_mpmath_equal(shi, mpmath.shi, [Arg()]) + # check asymptotic series cross-over + assert_mpmath_equal(shi, mpmath.shi, [FixedArg([88 - 1e-9, 88, 88 + 1e-9])]) + + def test_shi_complex(self): + def shi(z): + return sc.shichi(z)[0] + # shi oscillates as Im[z] -> +- inf, so limit range + assert_mpmath_equal(shi, + mpmath.shi, + [ComplexArg(complex(-np.inf, -1e8), complex(np.inf, 1e8))], + rtol=1e-12) + + def test_si(self): + def si(x): + return sc.sici(x)[0] + assert_mpmath_equal(si, mpmath.si, [Arg()]) + + def test_si_complex(self): + def si(z): + return sc.sici(z)[0] + # si oscillates as Re[z] -> +- inf, so limit range + assert_mpmath_equal(si, + mpmath.si, + [ComplexArg(complex(-1e8, -np.inf), complex(1e8, np.inf))], + rtol=1e-12) + + def test_spence(self): + # mpmath uses a different convention for the dilogarithm + def dilog(x): + return mpmath.polylog(2, 1 - x) + # Spence has a branch cut on the negative real axis + assert_mpmath_equal(sc.spence, + exception_to_nan(dilog), + [Arg(0, np.inf)], rtol=1e-14) + + def test_spence_complex(self): + def dilog(z): + return mpmath.polylog(2, 1 - z) + assert_mpmath_equal(sc.spence, + exception_to_nan(dilog), + [ComplexArg()], rtol=1e-14) + + def test_spherharm(self): + def spherharm(l, m, theta, phi): + if m > l: + return np.nan + return sc.sph_harm(m, l, phi, theta) + assert_mpmath_equal(spherharm, + mpmath.spherharm, + [IntArg(0, 100), IntArg(0, 100), + Arg(a=0, b=pi), Arg(a=0, b=2*pi)], + atol=1e-8, n=6000, + dps=150) + + def test_struveh(self): + assert_mpmath_equal(sc.struve, + exception_to_nan(mpmath.struveh), + [Arg(-1e4, 1e4), Arg(0, 1e4)], + rtol=5e-10) + + def test_struvel(self): + def mp_struvel(v, z): + if v < 0 and z < -v and abs(v) > 1000: + # larger DPS needed for correct results + old_dps = mpmath.mp.dps + try: + mpmath.mp.dps = 300 + return mpmath.struvel(v, z) + finally: + mpmath.mp.dps = old_dps + return mpmath.struvel(v, z) + + assert_mpmath_equal(sc.modstruve, + exception_to_nan(mp_struvel), + [Arg(-1e4, 1e4), Arg(0, 1e4)], + rtol=5e-10, + ignore_inf_sign=True) + + def test_wrightomega(self): + assert_mpmath_equal(sc.wrightomega, + lambda z: _mpmath_wrightomega(z, 25), + [ComplexArg()], rtol=1e-14, nan_ok=False) + + def test_zeta(self): + assert_mpmath_equal(sc.zeta, + exception_to_nan(mpmath.zeta), + [Arg(a=1, b=1e10, inclusive_a=False), + Arg(a=0, inclusive_a=False)]) + + def test_zetac(self): + assert_mpmath_equal(sc.zetac, + lambda x: mpmath.zeta(x) - 1, + [Arg(-100, 100)], + nan_ok=False, dps=45, rtol=1e-13) + + def test_boxcox(self): + + def mp_boxcox(x, lmbda): + x = mpmath.mp.mpf(x) + lmbda = mpmath.mp.mpf(lmbda) + if lmbda == 0: + return mpmath.mp.log(x) + else: + return mpmath.mp.powm1(x, lmbda) / lmbda + + assert_mpmath_equal(sc.boxcox, + exception_to_nan(mp_boxcox), + [Arg(a=0, inclusive_a=False), Arg()], + n=200, + dps=60, + rtol=1e-13) + + def test_boxcox1p(self): + + def mp_boxcox1p(x, lmbda): + x = mpmath.mp.mpf(x) + lmbda = mpmath.mp.mpf(lmbda) + one = mpmath.mp.mpf(1) + if lmbda == 0: + return mpmath.mp.log(one + x) + else: + return mpmath.mp.powm1(one + x, lmbda) / lmbda + + assert_mpmath_equal(sc.boxcox1p, + exception_to_nan(mp_boxcox1p), + [Arg(a=-1, inclusive_a=False), Arg()], + n=200, + dps=60, + rtol=1e-13) + + def test_spherical_jn(self): + def mp_spherical_jn(n, z): + arg = mpmath.mpmathify(z) + out = (mpmath.besselj(n + mpmath.mpf(1)/2, arg) / + mpmath.sqrt(2*arg/mpmath.pi)) + if arg.imag == 0: + return out.real + else: + return out + + assert_mpmath_equal(lambda n, z: sc.spherical_jn(int(n), z), + exception_to_nan(mp_spherical_jn), + [IntArg(0, 200), Arg(-1e8, 1e8)], + dps=300) + + def test_spherical_jn_complex(self): + def mp_spherical_jn(n, z): + arg = mpmath.mpmathify(z) + out = (mpmath.besselj(n + mpmath.mpf(1)/2, arg) / + mpmath.sqrt(2*arg/mpmath.pi)) + if arg.imag == 0: + return out.real + else: + return out + + assert_mpmath_equal(lambda n, z: sc.spherical_jn(int(n.real), z), + exception_to_nan(mp_spherical_jn), + [IntArg(0, 200), ComplexArg()]) + + def test_spherical_yn(self): + def mp_spherical_yn(n, z): + arg = mpmath.mpmathify(z) + out = (mpmath.bessely(n + mpmath.mpf(1)/2, arg) / + mpmath.sqrt(2*arg/mpmath.pi)) + if arg.imag == 0: + return out.real + else: + return out + + assert_mpmath_equal(lambda n, z: sc.spherical_yn(int(n), z), + exception_to_nan(mp_spherical_yn), + [IntArg(0, 200), Arg(-1e10, 1e10)], + dps=100) + + def test_spherical_yn_complex(self): + def mp_spherical_yn(n, z): + arg = mpmath.mpmathify(z) + out = (mpmath.bessely(n + mpmath.mpf(1)/2, arg) / + mpmath.sqrt(2*arg/mpmath.pi)) + if arg.imag == 0: + return out.real + else: + return out + + assert_mpmath_equal(lambda n, z: sc.spherical_yn(int(n.real), z), + exception_to_nan(mp_spherical_yn), + [IntArg(0, 200), ComplexArg()]) + + def test_spherical_in(self): + def mp_spherical_in(n, z): + arg = mpmath.mpmathify(z) + out = (mpmath.besseli(n + mpmath.mpf(1)/2, arg) / + mpmath.sqrt(2*arg/mpmath.pi)) + if arg.imag == 0: + return out.real + else: + return out + + assert_mpmath_equal(lambda n, z: sc.spherical_in(int(n), z), + exception_to_nan(mp_spherical_in), + [IntArg(0, 200), Arg()], + dps=200, atol=10**(-278)) + + def test_spherical_in_complex(self): + def mp_spherical_in(n, z): + arg = mpmath.mpmathify(z) + out = (mpmath.besseli(n + mpmath.mpf(1)/2, arg) / + mpmath.sqrt(2*arg/mpmath.pi)) + if arg.imag == 0: + return out.real + else: + return out + + assert_mpmath_equal(lambda n, z: sc.spherical_in(int(n.real), z), + exception_to_nan(mp_spherical_in), + [IntArg(0, 200), ComplexArg()]) + + def test_spherical_kn(self): + def mp_spherical_kn(n, z): + out = (mpmath.besselk(n + mpmath.mpf(1)/2, z) * + mpmath.sqrt(mpmath.pi/(2*mpmath.mpmathify(z)))) + if mpmath.mpmathify(z).imag == 0: + return out.real + else: + return out + + assert_mpmath_equal(lambda n, z: sc.spherical_kn(int(n), z), + exception_to_nan(mp_spherical_kn), + [IntArg(0, 150), Arg()], + dps=100) + + @pytest.mark.xfail(run=False, reason="Accuracy issues near z = -1 inherited from kv.") + def test_spherical_kn_complex(self): + def mp_spherical_kn(n, z): + arg = mpmath.mpmathify(z) + out = (mpmath.besselk(n + mpmath.mpf(1)/2, arg) / + mpmath.sqrt(2*arg/mpmath.pi)) + if arg.imag == 0: + return out.real + else: + return out + + assert_mpmath_equal(lambda n, z: sc.spherical_kn(int(n.real), z), + exception_to_nan(mp_spherical_kn), + [IntArg(0, 200), ComplexArg()], + dps=200) diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_mpmath.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_mpmath.pyc new file mode 100644 index 0000000..ca5dd1d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_mpmath.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_nan_inputs.py b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_nan_inputs.py new file mode 100644 index 0000000..9a0f7a0 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_nan_inputs.py @@ -0,0 +1,64 @@ +"""Test how the ufuncs in special handle nan inputs. + +""" +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import assert_array_equal, assert_ +import pytest + +import scipy.special as sc +from scipy._lib._numpy_compat import suppress_warnings + + +KNOWNFAILURES = {} + +POSTPROCESSING = {} + + +def _get_ufuncs(): + ufuncs = [] + ufunc_names = [] + for name in sorted(sc.__dict__): + obj = sc.__dict__[name] + if not isinstance(obj, np.ufunc): + continue + msg = KNOWNFAILURES.get(obj) + if msg is None: + ufuncs.append(obj) + ufunc_names.append(name) + else: + fail = pytest.mark.xfail(run=False, reason=msg) + ufuncs.append(pytest.param(obj, marks=fail)) + ufunc_names.append(name) + return ufuncs, ufunc_names + + +UFUNCS, UFUNC_NAMES = _get_ufuncs() + + +@pytest.mark.parametrize("func", UFUNCS, ids=UFUNC_NAMES) +def test_nan_inputs(func): + args = (np.nan,)*func.nin + with suppress_warnings() as sup: + # Ignore warnings about unsafe casts from legacy wrappers + sup.filter(RuntimeWarning, + "floating point number truncated to an integer") + try: + res = func(*args) + except TypeError: + # One of the arguments doesn't take real inputs + return + if func in POSTPROCESSING: + res = POSTPROCESSING[func](*res) + + msg = "got {} instead of nan".format(res) + assert_array_equal(np.isnan(res), True, err_msg=msg) + + +def test_legacy_cast(): + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, + "floating point number truncated to an integer") + res = sc.bdtrc(np.nan, 1, 0.5) + assert_(np.isnan(res)) diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_nan_inputs.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_nan_inputs.pyc new file mode 100644 index 0000000..e5bbd4e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_nan_inputs.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_orthogonal.py b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_orthogonal.py new file mode 100644 index 0000000..7725a7a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_orthogonal.py @@ -0,0 +1,756 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy import array, sqrt +from numpy.testing import (assert_array_almost_equal, assert_equal, + assert_almost_equal, assert_allclose) +from pytest import raises as assert_raises + +from scipy._lib.six import xrange +from scipy import integrate +import scipy.special as sc +from scipy.special import gamma +import scipy.special.orthogonal as orth + + +class TestCheby(object): + def test_chebyc(self): + C0 = orth.chebyc(0) + C1 = orth.chebyc(1) + olderr = np.seterr(all='ignore') + try: + C2 = orth.chebyc(2) + C3 = orth.chebyc(3) + C4 = orth.chebyc(4) + C5 = orth.chebyc(5) + finally: + np.seterr(**olderr) + + assert_array_almost_equal(C0.c,[2],13) + assert_array_almost_equal(C1.c,[1,0],13) + assert_array_almost_equal(C2.c,[1,0,-2],13) + assert_array_almost_equal(C3.c,[1,0,-3,0],13) + assert_array_almost_equal(C4.c,[1,0,-4,0,2],13) + assert_array_almost_equal(C5.c,[1,0,-5,0,5,0],13) + + def test_chebys(self): + S0 = orth.chebys(0) + S1 = orth.chebys(1) + S2 = orth.chebys(2) + S3 = orth.chebys(3) + S4 = orth.chebys(4) + S5 = orth.chebys(5) + assert_array_almost_equal(S0.c,[1],13) + assert_array_almost_equal(S1.c,[1,0],13) + assert_array_almost_equal(S2.c,[1,0,-1],13) + assert_array_almost_equal(S3.c,[1,0,-2,0],13) + assert_array_almost_equal(S4.c,[1,0,-3,0,1],13) + assert_array_almost_equal(S5.c,[1,0,-4,0,3,0],13) + + def test_chebyt(self): + T0 = orth.chebyt(0) + T1 = orth.chebyt(1) + T2 = orth.chebyt(2) + T3 = orth.chebyt(3) + T4 = orth.chebyt(4) + T5 = orth.chebyt(5) + assert_array_almost_equal(T0.c,[1],13) + assert_array_almost_equal(T1.c,[1,0],13) + assert_array_almost_equal(T2.c,[2,0,-1],13) + assert_array_almost_equal(T3.c,[4,0,-3,0],13) + assert_array_almost_equal(T4.c,[8,0,-8,0,1],13) + assert_array_almost_equal(T5.c,[16,0,-20,0,5,0],13) + + def test_chebyu(self): + U0 = orth.chebyu(0) + U1 = orth.chebyu(1) + U2 = orth.chebyu(2) + U3 = orth.chebyu(3) + U4 = orth.chebyu(4) + U5 = orth.chebyu(5) + assert_array_almost_equal(U0.c,[1],13) + assert_array_almost_equal(U1.c,[2,0],13) + assert_array_almost_equal(U2.c,[4,0,-1],13) + assert_array_almost_equal(U3.c,[8,0,-4,0],13) + assert_array_almost_equal(U4.c,[16,0,-12,0,1],13) + assert_array_almost_equal(U5.c,[32,0,-32,0,6,0],13) + + +class TestGegenbauer(object): + + def test_gegenbauer(self): + a = 5*np.random.random() - 0.5 + if np.any(a == 0): + a = -0.2 + Ca0 = orth.gegenbauer(0,a) + Ca1 = orth.gegenbauer(1,a) + Ca2 = orth.gegenbauer(2,a) + Ca3 = orth.gegenbauer(3,a) + Ca4 = orth.gegenbauer(4,a) + Ca5 = orth.gegenbauer(5,a) + + assert_array_almost_equal(Ca0.c,array([1]),13) + assert_array_almost_equal(Ca1.c,array([2*a,0]),13) + assert_array_almost_equal(Ca2.c,array([2*a*(a+1),0,-a]),13) + assert_array_almost_equal(Ca3.c,array([4*orth.poch(a,3),0,-6*a*(a+1), + 0])/3.0,11) + assert_array_almost_equal(Ca4.c,array([4*orth.poch(a,4),0,-12*orth.poch(a,3), + 0,3*a*(a+1)])/6.0,11) + assert_array_almost_equal(Ca5.c,array([4*orth.poch(a,5),0,-20*orth.poch(a,4), + 0,15*orth.poch(a,3),0])/15.0,11) + + +class TestHermite(object): + def test_hermite(self): + H0 = orth.hermite(0) + H1 = orth.hermite(1) + H2 = orth.hermite(2) + H3 = orth.hermite(3) + H4 = orth.hermite(4) + H5 = orth.hermite(5) + assert_array_almost_equal(H0.c,[1],13) + assert_array_almost_equal(H1.c,[2,0],13) + assert_array_almost_equal(H2.c,[4,0,-2],13) + assert_array_almost_equal(H3.c,[8,0,-12,0],13) + assert_array_almost_equal(H4.c,[16,0,-48,0,12],12) + assert_array_almost_equal(H5.c,[32,0,-160,0,120,0],12) + + def test_hermitenorm(self): + # He_n(x) = 2**(-n/2) H_n(x/sqrt(2)) + psub = np.poly1d([1.0/sqrt(2),0]) + H0 = orth.hermitenorm(0) + H1 = orth.hermitenorm(1) + H2 = orth.hermitenorm(2) + H3 = orth.hermitenorm(3) + H4 = orth.hermitenorm(4) + H5 = orth.hermitenorm(5) + he0 = orth.hermite(0)(psub) + he1 = orth.hermite(1)(psub) / sqrt(2) + he2 = orth.hermite(2)(psub) / 2.0 + he3 = orth.hermite(3)(psub) / (2*sqrt(2)) + he4 = orth.hermite(4)(psub) / 4.0 + he5 = orth.hermite(5)(psub) / (4.0*sqrt(2)) + + assert_array_almost_equal(H0.c,he0.c,13) + assert_array_almost_equal(H1.c,he1.c,13) + assert_array_almost_equal(H2.c,he2.c,13) + assert_array_almost_equal(H3.c,he3.c,13) + assert_array_almost_equal(H4.c,he4.c,13) + assert_array_almost_equal(H5.c,he5.c,13) + + +class _test_sh_legendre(object): + + def test_sh_legendre(self): + # P*_n(x) = P_n(2x-1) + psub = np.poly1d([2,-1]) + Ps0 = orth.sh_legendre(0) + Ps1 = orth.sh_legendre(1) + Ps2 = orth.sh_legendre(2) + Ps3 = orth.sh_legendre(3) + Ps4 = orth.sh_legendre(4) + Ps5 = orth.sh_legendre(5) + pse0 = orth.legendre(0)(psub) + pse1 = orth.legendre(1)(psub) + pse2 = orth.legendre(2)(psub) + pse3 = orth.legendre(3)(psub) + pse4 = orth.legendre(4)(psub) + pse5 = orth.legendre(5)(psub) + assert_array_almost_equal(Ps0.c,pse0.c,13) + assert_array_almost_equal(Ps1.c,pse1.c,13) + assert_array_almost_equal(Ps2.c,pse2.c,13) + assert_array_almost_equal(Ps3.c,pse3.c,13) + assert_array_almost_equal(Ps4.c,pse4.c,12) + assert_array_almost_equal(Ps5.c,pse5.c,12) + + +class _test_sh_chebyt(object): + + def test_sh_chebyt(self): + # T*_n(x) = T_n(2x-1) + psub = np.poly1d([2,-1]) + Ts0 = orth.sh_chebyt(0) + Ts1 = orth.sh_chebyt(1) + Ts2 = orth.sh_chebyt(2) + Ts3 = orth.sh_chebyt(3) + Ts4 = orth.sh_chebyt(4) + Ts5 = orth.sh_chebyt(5) + tse0 = orth.chebyt(0)(psub) + tse1 = orth.chebyt(1)(psub) + tse2 = orth.chebyt(2)(psub) + tse3 = orth.chebyt(3)(psub) + tse4 = orth.chebyt(4)(psub) + tse5 = orth.chebyt(5)(psub) + assert_array_almost_equal(Ts0.c,tse0.c,13) + assert_array_almost_equal(Ts1.c,tse1.c,13) + assert_array_almost_equal(Ts2.c,tse2.c,13) + assert_array_almost_equal(Ts3.c,tse3.c,13) + assert_array_almost_equal(Ts4.c,tse4.c,12) + assert_array_almost_equal(Ts5.c,tse5.c,12) + + +class _test_sh_chebyu(object): + + def test_sh_chebyu(self): + # U*_n(x) = U_n(2x-1) + psub = np.poly1d([2,-1]) + Us0 = orth.sh_chebyu(0) + Us1 = orth.sh_chebyu(1) + Us2 = orth.sh_chebyu(2) + Us3 = orth.sh_chebyu(3) + Us4 = orth.sh_chebyu(4) + Us5 = orth.sh_chebyu(5) + use0 = orth.chebyu(0)(psub) + use1 = orth.chebyu(1)(psub) + use2 = orth.chebyu(2)(psub) + use3 = orth.chebyu(3)(psub) + use4 = orth.chebyu(4)(psub) + use5 = orth.chebyu(5)(psub) + assert_array_almost_equal(Us0.c,use0.c,13) + assert_array_almost_equal(Us1.c,use1.c,13) + assert_array_almost_equal(Us2.c,use2.c,13) + assert_array_almost_equal(Us3.c,use3.c,13) + assert_array_almost_equal(Us4.c,use4.c,12) + assert_array_almost_equal(Us5.c,use5.c,11) + + +class _test_sh_jacobi(object): + def test_sh_jacobi(self): + # G^(p,q)_n(x) = n! gamma(n+p)/gamma(2*n+p) * P^(p-q,q-1)_n(2*x-1) + conv = lambda n,p: gamma(n+1)*gamma(n+p)/gamma(2*n+p) + psub = np.poly1d([2,-1]) + q = 4 * np.random.random() + p = q-1 + 2*np.random.random() + # print("shifted jacobi p,q = ", p, q) + G0 = orth.sh_jacobi(0,p,q) + G1 = orth.sh_jacobi(1,p,q) + G2 = orth.sh_jacobi(2,p,q) + G3 = orth.sh_jacobi(3,p,q) + G4 = orth.sh_jacobi(4,p,q) + G5 = orth.sh_jacobi(5,p,q) + ge0 = orth.jacobi(0,p-q,q-1)(psub) * conv(0,p) + ge1 = orth.jacobi(1,p-q,q-1)(psub) * conv(1,p) + ge2 = orth.jacobi(2,p-q,q-1)(psub) * conv(2,p) + ge3 = orth.jacobi(3,p-q,q-1)(psub) * conv(3,p) + ge4 = orth.jacobi(4,p-q,q-1)(psub) * conv(4,p) + ge5 = orth.jacobi(5,p-q,q-1)(psub) * conv(5,p) + + assert_array_almost_equal(G0.c,ge0.c,13) + assert_array_almost_equal(G1.c,ge1.c,13) + assert_array_almost_equal(G2.c,ge2.c,13) + assert_array_almost_equal(G3.c,ge3.c,13) + assert_array_almost_equal(G4.c,ge4.c,13) + assert_array_almost_equal(G5.c,ge5.c,13) + + +class TestCall(object): + def test_call(self): + poly = [] + for n in xrange(5): + poly.extend([x.strip() for x in + (""" + orth.jacobi(%(n)d,0.3,0.9) + orth.sh_jacobi(%(n)d,0.3,0.9) + orth.genlaguerre(%(n)d,0.3) + orth.laguerre(%(n)d) + orth.hermite(%(n)d) + orth.hermitenorm(%(n)d) + orth.gegenbauer(%(n)d,0.3) + orth.chebyt(%(n)d) + orth.chebyu(%(n)d) + orth.chebyc(%(n)d) + orth.chebys(%(n)d) + orth.sh_chebyt(%(n)d) + orth.sh_chebyu(%(n)d) + orth.legendre(%(n)d) + orth.sh_legendre(%(n)d) + """ % dict(n=n)).split() + ]) + olderr = np.seterr(all='ignore') + try: + for pstr in poly: + p = eval(pstr) + assert_almost_equal(p(0.315), np.poly1d(p.coef)(0.315), + err_msg=pstr) + finally: + np.seterr(**olderr) + + +class TestGenlaguerre(object): + def test_regression(self): + assert_equal(orth.genlaguerre(1, 1, monic=False)(0), 2.) + assert_equal(orth.genlaguerre(1, 1, monic=True)(0), -2.) + assert_equal(orth.genlaguerre(1, 1, monic=False), np.poly1d([-1, 2])) + assert_equal(orth.genlaguerre(1, 1, monic=True), np.poly1d([1, -2])) + + +def verify_gauss_quad(root_func, eval_func, weight_func, a, b, N, + rtol=1e-15, atol=1e-14): + # this test is copied from numpy's TestGauss in test_hermite.py + x, w, mu = root_func(N, True) + + n = np.arange(N) + v = eval_func(n[:,np.newaxis], x) + vv = np.dot(v*w, v.T) + vd = 1 / np.sqrt(vv.diagonal()) + vv = vd[:, np.newaxis] * vv * vd + assert_allclose(vv, np.eye(N), rtol, atol) + + # check that the integral of 1 is correct + assert_allclose(w.sum(), mu, rtol, atol) + + # compare the results of integrating a function with quad. + f = lambda x: x**3 - 3*x**2 + x - 2 + resI = integrate.quad(lambda x: f(x)*weight_func(x), a, b) + resG = np.vdot(f(x), w) + rtol = 1e-6 if 1e-6 < resI[1] else resI[1] * 10 + assert_allclose(resI[0], resG, rtol=rtol) + +def test_roots_jacobi(): + rf = lambda a, b: lambda n, mu: sc.roots_jacobi(n, a, b, mu) + ef = lambda a, b: lambda n, x: orth.eval_jacobi(n, a, b, x) + wf = lambda a, b: lambda x: (1 - x)**a * (1 + x)**b + + vgq = verify_gauss_quad + vgq(rf(-0.5, -0.75), ef(-0.5, -0.75), wf(-0.5, -0.75), -1., 1., 5) + vgq(rf(-0.5, -0.75), ef(-0.5, -0.75), wf(-0.5, -0.75), -1., 1., + 25, atol=1e-12) + vgq(rf(-0.5, -0.75), ef(-0.5, -0.75), wf(-0.5, -0.75), -1., 1., + 100, atol=1e-11) + + vgq(rf(0.5, -0.5), ef(0.5, -0.5), wf(0.5, -0.5), -1., 1., 5) + vgq(rf(0.5, -0.5), ef(0.5, -0.5), wf(0.5, -0.5), -1., 1., 25, atol=1.5e-13) + vgq(rf(0.5, -0.5), ef(0.5, -0.5), wf(0.5, -0.5), -1., 1., 100, atol=1e-12) + + vgq(rf(1, 0.5), ef(1, 0.5), wf(1, 0.5), -1., 1., 5, atol=2e-13) + vgq(rf(1, 0.5), ef(1, 0.5), wf(1, 0.5), -1., 1., 25, atol=2e-13) + vgq(rf(1, 0.5), ef(1, 0.5), wf(1, 0.5), -1., 1., 100, atol=1e-12) + + vgq(rf(0.9, 2), ef(0.9, 2), wf(0.9, 2), -1., 1., 5) + vgq(rf(0.9, 2), ef(0.9, 2), wf(0.9, 2), -1., 1., 25, atol=1e-13) + vgq(rf(0.9, 2), ef(0.9, 2), wf(0.9, 2), -1., 1., 100, atol=3e-13) + + vgq(rf(18.24, 27.3), ef(18.24, 27.3), wf(18.24, 27.3), -1., 1., 5) + vgq(rf(18.24, 27.3), ef(18.24, 27.3), wf(18.24, 27.3), -1., 1., 25) + vgq(rf(18.24, 27.3), ef(18.24, 27.3), wf(18.24, 27.3), -1., 1., + 100, atol=1e-13) + + vgq(rf(47.1, -0.2), ef(47.1, -0.2), wf(47.1, -0.2), -1., 1., 5, atol=1e-13) + vgq(rf(47.1, -0.2), ef(47.1, -0.2), wf(47.1, -0.2), -1., 1., 25, atol=2e-13) + vgq(rf(47.1, -0.2), ef(47.1, -0.2), wf(47.1, -0.2), -1., 1., + 100, atol=1e-11) + + vgq(rf(2.25, 68.9), ef(2.25, 68.9), wf(2.25, 68.9), -1., 1., 5) + vgq(rf(2.25, 68.9), ef(2.25, 68.9), wf(2.25, 68.9), -1., 1., 25, atol=1e-13) + vgq(rf(2.25, 68.9), ef(2.25, 68.9), wf(2.25, 68.9), -1., 1., + 100, atol=1e-13) + + # when alpha == beta == 0, P_n^{a,b}(x) == P_n(x) + xj, wj = sc.roots_jacobi(6, 0.0, 0.0) + xl, wl = sc.roots_legendre(6) + assert_allclose(xj, xl, 1e-14, 1e-14) + assert_allclose(wj, wl, 1e-14, 1e-14) + + # when alpha == beta != 0, P_n^{a,b}(x) == C_n^{alpha+0.5}(x) + xj, wj = sc.roots_jacobi(6, 4.0, 4.0) + xc, wc = sc.roots_gegenbauer(6, 4.5) + assert_allclose(xj, xc, 1e-14, 1e-14) + assert_allclose(wj, wc, 1e-14, 1e-14) + + x, w = sc.roots_jacobi(5, 2, 3, False) + y, v, m = sc.roots_jacobi(5, 2, 3, True) + assert_allclose(x, y, 1e-14, 1e-14) + assert_allclose(w, v, 1e-14, 1e-14) + + muI, muI_err = integrate.quad(wf(2,3), -1, 1) + assert_allclose(m, muI, rtol=muI_err) + + assert_raises(ValueError, sc.roots_jacobi, 0, 1, 1) + assert_raises(ValueError, sc.roots_jacobi, 3.3, 1, 1) + assert_raises(ValueError, sc.roots_jacobi, 3, -2, 1) + assert_raises(ValueError, sc.roots_jacobi, 3, 1, -2) + assert_raises(ValueError, sc.roots_jacobi, 3, -2, -2) + +def test_roots_sh_jacobi(): + rf = lambda a, b: lambda n, mu: sc.roots_sh_jacobi(n, a, b, mu) + ef = lambda a, b: lambda n, x: orth.eval_sh_jacobi(n, a, b, x) + wf = lambda a, b: lambda x: (1. - x)**(a - b) * (x)**(b - 1.) + + vgq = verify_gauss_quad + vgq(rf(-0.5, 0.25), ef(-0.5, 0.25), wf(-0.5, 0.25), 0., 1., 5) + vgq(rf(-0.5, 0.25), ef(-0.5, 0.25), wf(-0.5, 0.25), 0., 1., + 25, atol=1e-12) + vgq(rf(-0.5, 0.25), ef(-0.5, 0.25), wf(-0.5, 0.25), 0., 1., + 100, atol=1e-11) + + vgq(rf(0.5, 0.5), ef(0.5, 0.5), wf(0.5, 0.5), 0., 1., 5) + vgq(rf(0.5, 0.5), ef(0.5, 0.5), wf(0.5, 0.5), 0., 1., 25, atol=1e-13) + vgq(rf(0.5, 0.5), ef(0.5, 0.5), wf(0.5, 0.5), 0., 1., 100, atol=1e-12) + + vgq(rf(1, 0.5), ef(1, 0.5), wf(1, 0.5), 0., 1., 5) + vgq(rf(1, 0.5), ef(1, 0.5), wf(1, 0.5), 0., 1., 25, atol=1.5e-13) + vgq(rf(1, 0.5), ef(1, 0.5), wf(1, 0.5), 0., 1., 100, atol=1e-12) + + vgq(rf(2, 0.9), ef(2, 0.9), wf(2, 0.9), 0., 1., 5) + vgq(rf(2, 0.9), ef(2, 0.9), wf(2, 0.9), 0., 1., 25, atol=1e-13) + vgq(rf(2, 0.9), ef(2, 0.9), wf(2, 0.9), 0., 1., 100, atol=1e-12) + + vgq(rf(27.3, 18.24), ef(27.3, 18.24), wf(27.3, 18.24), 0., 1., 5) + vgq(rf(27.3, 18.24), ef(27.3, 18.24), wf(27.3, 18.24), 0., 1., 25) + vgq(rf(27.3, 18.24), ef(27.3, 18.24), wf(27.3, 18.24), 0., 1., + 100, atol=1e-13) + + vgq(rf(47.1, 0.2), ef(47.1, 0.2), wf(47.1, 0.2), 0., 1., 5, atol=1e-12) + vgq(rf(47.1, 0.2), ef(47.1, 0.2), wf(47.1, 0.2), 0., 1., 25, atol=1e-11) + vgq(rf(47.1, 0.2), ef(47.1, 0.2), wf(47.1, 0.2), 0., 1., 100, atol=1e-10) + + vgq(rf(68.9, 2.25), ef(68.9, 2.25), wf(68.9, 2.25), 0., 1., 5, atol=3.5e-14) + vgq(rf(68.9, 2.25), ef(68.9, 2.25), wf(68.9, 2.25), 0., 1., 25, atol=2e-13) + vgq(rf(68.9, 2.25), ef(68.9, 2.25), wf(68.9, 2.25), 0., 1., + 100, atol=1e-12) + + x, w = sc.roots_sh_jacobi(5, 3, 2, False) + y, v, m = sc.roots_sh_jacobi(5, 3, 2, True) + assert_allclose(x, y, 1e-14, 1e-14) + assert_allclose(w, v, 1e-14, 1e-14) + + muI, muI_err = integrate.quad(wf(3,2), 0, 1) + assert_allclose(m, muI, rtol=muI_err) + + assert_raises(ValueError, sc.roots_sh_jacobi, 0, 1, 1) + assert_raises(ValueError, sc.roots_sh_jacobi, 3.3, 1, 1) + assert_raises(ValueError, sc.roots_sh_jacobi, 3, 1, 2) # p - q <= -1 + assert_raises(ValueError, sc.roots_sh_jacobi, 3, 2, -1) # q <= 0 + assert_raises(ValueError, sc.roots_sh_jacobi, 3, -2, -1) # both + +def test_roots_hermite(): + rootf = sc.roots_hermite + evalf = orth.eval_hermite + weightf = orth.hermite(5).weight_func + + verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 5) + verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 25, atol=1e-13) + verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 100, atol=1e-12) + + # Golub-Welsch branch + x, w = sc.roots_hermite(5, False) + y, v, m = sc.roots_hermite(5, True) + assert_allclose(x, y, 1e-14, 1e-14) + assert_allclose(w, v, 1e-14, 1e-14) + + muI, muI_err = integrate.quad(weightf, -np.inf, np.inf) + assert_allclose(m, muI, rtol=muI_err) + + # Asymptotic branch (switch over at n >= 150) + x, w = sc.roots_hermite(200, False) + y, v, m = sc.roots_hermite(200, True) + assert_allclose(x, y, 1e-14, 1e-14) + assert_allclose(w, v, 1e-14, 1e-14) + assert_allclose(sum(v), m, 1e-14, 1e-14) + + assert_raises(ValueError, sc.roots_hermite, 0) + assert_raises(ValueError, sc.roots_hermite, 3.3) + +def test_roots_hermite_asy(): + # Recursion for Hermite functions + def hermite_recursion(n, nodes): + H = np.zeros((n, nodes.size)) + H[0,:] = np.pi**(-0.25) * np.exp(-0.5*nodes**2) + if n > 1: + H[1,:] = sqrt(2.0) * nodes * H[0,:] + for k in xrange(2, n): + H[k,:] = sqrt(2.0/k) * nodes * H[k-1,:] - sqrt((k-1.0)/k) * H[k-2,:] + return H + + # This tests only the nodes + def test(N, rtol=1e-15, atol=1e-14): + x, w = orth._roots_hermite_asy(N) + H = hermite_recursion(N+1, x) + assert_allclose(H[-1,:], np.zeros(N), rtol, atol) + assert_allclose(sum(w), sqrt(np.pi), rtol, atol) + + test(150, atol=1e-12) + test(151, atol=1e-12) + test(300, atol=1e-12) + test(301, atol=1e-12) + test(500, atol=1e-12) + test(501, atol=1e-12) + test(999, atol=1e-12) + test(1000, atol=1e-12) + test(2000, atol=1e-12) + test(5000, atol=1e-12) + +def test_roots_hermitenorm(): + rootf = sc.roots_hermitenorm + evalf = orth.eval_hermitenorm + weightf = orth.hermitenorm(5).weight_func + + verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 5) + verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 25, atol=1e-13) + verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 100, atol=1e-12) + + x, w = sc.roots_hermitenorm(5, False) + y, v, m = sc.roots_hermitenorm(5, True) + assert_allclose(x, y, 1e-14, 1e-14) + assert_allclose(w, v, 1e-14, 1e-14) + + muI, muI_err = integrate.quad(weightf, -np.inf, np.inf) + assert_allclose(m, muI, rtol=muI_err) + + assert_raises(ValueError, sc.roots_hermitenorm, 0) + assert_raises(ValueError, sc.roots_hermitenorm, 3.3) + +def test_roots_gegenbauer(): + rootf = lambda a: lambda n, mu: sc.roots_gegenbauer(n, a, mu) + evalf = lambda a: lambda n, x: orth.eval_gegenbauer(n, a, x) + weightf = lambda a: lambda x: (1 - x**2)**(a - 0.5) + + vgq = verify_gauss_quad + vgq(rootf(-0.25), evalf(-0.25), weightf(-0.25), -1., 1., 5) + vgq(rootf(-0.25), evalf(-0.25), weightf(-0.25), -1., 1., 25, atol=1e-12) + vgq(rootf(-0.25), evalf(-0.25), weightf(-0.25), -1., 1., 100, atol=1e-11) + + vgq(rootf(0.1), evalf(0.1), weightf(0.1), -1., 1., 5) + vgq(rootf(0.1), evalf(0.1), weightf(0.1), -1., 1., 25, atol=1e-13) + vgq(rootf(0.1), evalf(0.1), weightf(0.1), -1., 1., 100, atol=1e-12) + + vgq(rootf(1), evalf(1), weightf(1), -1., 1., 5) + vgq(rootf(1), evalf(1), weightf(1), -1., 1., 25, atol=1e-13) + vgq(rootf(1), evalf(1), weightf(1), -1., 1., 100, atol=1e-12) + + vgq(rootf(10), evalf(10), weightf(10), -1., 1., 5) + vgq(rootf(10), evalf(10), weightf(10), -1., 1., 25, atol=1e-13) + vgq(rootf(10), evalf(10), weightf(10), -1., 1., 100, atol=1e-12) + + vgq(rootf(50), evalf(50), weightf(50), -1., 1., 5, atol=1e-13) + vgq(rootf(50), evalf(50), weightf(50), -1., 1., 25, atol=1e-12) + vgq(rootf(50), evalf(50), weightf(50), -1., 1., 100, atol=1e-11) + + # this is a special case that the old code supported. + # when alpha = 0, the gegenbauer polynomial is uniformly 0. but it goes + # to a scaled down copy of T_n(x) there. + vgq(rootf(0), orth.eval_chebyt, weightf(0), -1., 1., 5) + vgq(rootf(0), orth.eval_chebyt, weightf(0), -1., 1., 25) + vgq(rootf(0), orth.eval_chebyt, weightf(0), -1., 1., 100, atol=1e-12) + + x, w = sc.roots_gegenbauer(5, 2, False) + y, v, m = sc.roots_gegenbauer(5, 2, True) + assert_allclose(x, y, 1e-14, 1e-14) + assert_allclose(w, v, 1e-14, 1e-14) + + muI, muI_err = integrate.quad(weightf(2), -1, 1) + assert_allclose(m, muI, rtol=muI_err) + + assert_raises(ValueError, sc.roots_gegenbauer, 0, 2) + assert_raises(ValueError, sc.roots_gegenbauer, 3.3, 2) + assert_raises(ValueError, sc.roots_gegenbauer, 3, -.75) + +def test_roots_chebyt(): + weightf = orth.chebyt(5).weight_func + verify_gauss_quad(sc.roots_chebyt, orth.eval_chebyt, weightf, -1., 1., 5) + verify_gauss_quad(sc.roots_chebyt, orth.eval_chebyt, weightf, -1., 1., 25) + verify_gauss_quad(sc.roots_chebyt, orth.eval_chebyt, weightf, -1., 1., 100, atol=1e-12) + + x, w = sc.roots_chebyt(5, False) + y, v, m = sc.roots_chebyt(5, True) + assert_allclose(x, y, 1e-14, 1e-14) + assert_allclose(w, v, 1e-14, 1e-14) + + muI, muI_err = integrate.quad(weightf, -1, 1) + assert_allclose(m, muI, rtol=muI_err) + + assert_raises(ValueError, sc.roots_chebyt, 0) + assert_raises(ValueError, sc.roots_chebyt, 3.3) + +def test_chebyt_symmetry(): + x, w = sc.roots_chebyt(21) + pos, neg = x[:10], x[11:] + assert_equal(neg, -pos[::-1]) + assert_equal(x[10], 0) + +def test_roots_chebyu(): + weightf = orth.chebyu(5).weight_func + verify_gauss_quad(sc.roots_chebyu, orth.eval_chebyu, weightf, -1., 1., 5) + verify_gauss_quad(sc.roots_chebyu, orth.eval_chebyu, weightf, -1., 1., 25) + verify_gauss_quad(sc.roots_chebyu, orth.eval_chebyu, weightf, -1., 1., 100) + + x, w = sc.roots_chebyu(5, False) + y, v, m = sc.roots_chebyu(5, True) + assert_allclose(x, y, 1e-14, 1e-14) + assert_allclose(w, v, 1e-14, 1e-14) + + muI, muI_err = integrate.quad(weightf, -1, 1) + assert_allclose(m, muI, rtol=muI_err) + + assert_raises(ValueError, sc.roots_chebyu, 0) + assert_raises(ValueError, sc.roots_chebyu, 3.3) + +def test_roots_chebyc(): + weightf = orth.chebyc(5).weight_func + verify_gauss_quad(sc.roots_chebyc, orth.eval_chebyc, weightf, -2., 2., 5) + verify_gauss_quad(sc.roots_chebyc, orth.eval_chebyc, weightf, -2., 2., 25) + verify_gauss_quad(sc.roots_chebyc, orth.eval_chebyc, weightf, -2., 2., 100, atol=1e-12) + + x, w = sc.roots_chebyc(5, False) + y, v, m = sc.roots_chebyc(5, True) + assert_allclose(x, y, 1e-14, 1e-14) + assert_allclose(w, v, 1e-14, 1e-14) + + muI, muI_err = integrate.quad(weightf, -2, 2) + assert_allclose(m, muI, rtol=muI_err) + + assert_raises(ValueError, sc.roots_chebyc, 0) + assert_raises(ValueError, sc.roots_chebyc, 3.3) + +def test_roots_chebys(): + weightf = orth.chebys(5).weight_func + verify_gauss_quad(sc.roots_chebys, orth.eval_chebys, weightf, -2., 2., 5) + verify_gauss_quad(sc.roots_chebys, orth.eval_chebys, weightf, -2., 2., 25) + verify_gauss_quad(sc.roots_chebys, orth.eval_chebys, weightf, -2., 2., 100) + + x, w = sc.roots_chebys(5, False) + y, v, m = sc.roots_chebys(5, True) + assert_allclose(x, y, 1e-14, 1e-14) + assert_allclose(w, v, 1e-14, 1e-14) + + muI, muI_err = integrate.quad(weightf, -2, 2) + assert_allclose(m, muI, rtol=muI_err) + + assert_raises(ValueError, sc.roots_chebys, 0) + assert_raises(ValueError, sc.roots_chebys, 3.3) + +def test_roots_sh_chebyt(): + weightf = orth.sh_chebyt(5).weight_func + verify_gauss_quad(sc.roots_sh_chebyt, orth.eval_sh_chebyt, weightf, 0., 1., 5) + verify_gauss_quad(sc.roots_sh_chebyt, orth.eval_sh_chebyt, weightf, 0., 1., 25) + verify_gauss_quad(sc.roots_sh_chebyt, orth.eval_sh_chebyt, weightf, 0., 1., + 100, atol=1e-13) + + x, w = sc.roots_sh_chebyt(5, False) + y, v, m = sc.roots_sh_chebyt(5, True) + assert_allclose(x, y, 1e-14, 1e-14) + assert_allclose(w, v, 1e-14, 1e-14) + + muI, muI_err = integrate.quad(weightf, 0, 1) + assert_allclose(m, muI, rtol=muI_err) + + assert_raises(ValueError, sc.roots_sh_chebyt, 0) + assert_raises(ValueError, sc.roots_sh_chebyt, 3.3) + +def test_roots_sh_chebyu(): + weightf = orth.sh_chebyu(5).weight_func + verify_gauss_quad(sc.roots_sh_chebyu, orth.eval_sh_chebyu, weightf, 0., 1., 5) + verify_gauss_quad(sc.roots_sh_chebyu, orth.eval_sh_chebyu, weightf, 0., 1., 25) + verify_gauss_quad(sc.roots_sh_chebyu, orth.eval_sh_chebyu, weightf, 0., 1., + 100, atol=1e-13) + + x, w = sc.roots_sh_chebyu(5, False) + y, v, m = sc.roots_sh_chebyu(5, True) + assert_allclose(x, y, 1e-14, 1e-14) + assert_allclose(w, v, 1e-14, 1e-14) + + muI, muI_err = integrate.quad(weightf, 0, 1) + assert_allclose(m, muI, rtol=muI_err) + + assert_raises(ValueError, sc.roots_sh_chebyu, 0) + assert_raises(ValueError, sc.roots_sh_chebyu, 3.3) + +def test_roots_legendre(): + weightf = orth.legendre(5).weight_func + verify_gauss_quad(sc.roots_legendre, orth.eval_legendre, weightf, -1., 1., 5) + verify_gauss_quad(sc.roots_legendre, orth.eval_legendre, weightf, -1., 1., + 25, atol=1e-13) + verify_gauss_quad(sc.roots_legendre, orth.eval_legendre, weightf, -1., 1., + 100, atol=1e-12) + + x, w = sc.roots_legendre(5, False) + y, v, m = sc.roots_legendre(5, True) + assert_allclose(x, y, 1e-14, 1e-14) + assert_allclose(w, v, 1e-14, 1e-14) + + muI, muI_err = integrate.quad(weightf, -1, 1) + assert_allclose(m, muI, rtol=muI_err) + + assert_raises(ValueError, sc.roots_legendre, 0) + assert_raises(ValueError, sc.roots_legendre, 3.3) + +def test_roots_sh_legendre(): + weightf = orth.sh_legendre(5).weight_func + verify_gauss_quad(sc.roots_sh_legendre, orth.eval_sh_legendre, weightf, 0., 1., 5) + verify_gauss_quad(sc.roots_sh_legendre, orth.eval_sh_legendre, weightf, 0., 1., + 25, atol=1e-13) + verify_gauss_quad(sc.roots_sh_legendre, orth.eval_sh_legendre, weightf, 0., 1., + 100, atol=1e-12) + + x, w = sc.roots_sh_legendre(5, False) + y, v, m = sc.roots_sh_legendre(5, True) + assert_allclose(x, y, 1e-14, 1e-14) + assert_allclose(w, v, 1e-14, 1e-14) + + muI, muI_err = integrate.quad(weightf, 0, 1) + assert_allclose(m, muI, rtol=muI_err) + + assert_raises(ValueError, sc.roots_sh_legendre, 0) + assert_raises(ValueError, sc.roots_sh_legendre, 3.3) + +def test_roots_laguerre(): + weightf = orth.laguerre(5).weight_func + verify_gauss_quad(sc.roots_laguerre, orth.eval_laguerre, weightf, 0., np.inf, 5) + verify_gauss_quad(sc.roots_laguerre, orth.eval_laguerre, weightf, 0., np.inf, + 25, atol=1e-13) + verify_gauss_quad(sc.roots_laguerre, orth.eval_laguerre, weightf, 0., np.inf, + 100, atol=1e-12) + + x, w = sc.roots_laguerre(5, False) + y, v, m = sc.roots_laguerre(5, True) + assert_allclose(x, y, 1e-14, 1e-14) + assert_allclose(w, v, 1e-14, 1e-14) + + muI, muI_err = integrate.quad(weightf, 0, np.inf) + assert_allclose(m, muI, rtol=muI_err) + + assert_raises(ValueError, sc.roots_laguerre, 0) + assert_raises(ValueError, sc.roots_laguerre, 3.3) + +def test_roots_genlaguerre(): + rootf = lambda a: lambda n, mu: sc.roots_genlaguerre(n, a, mu) + evalf = lambda a: lambda n, x: orth.eval_genlaguerre(n, a, x) + weightf = lambda a: lambda x: x**a * np.exp(-x) + + vgq = verify_gauss_quad + vgq(rootf(-0.5), evalf(-0.5), weightf(-0.5), 0., np.inf, 5) + vgq(rootf(-0.5), evalf(-0.5), weightf(-0.5), 0., np.inf, 25, atol=1e-13) + vgq(rootf(-0.5), evalf(-0.5), weightf(-0.5), 0., np.inf, 100, atol=1e-12) + + vgq(rootf(0.1), evalf(0.1), weightf(0.1), 0., np.inf, 5) + vgq(rootf(0.1), evalf(0.1), weightf(0.1), 0., np.inf, 25, atol=1e-13) + vgq(rootf(0.1), evalf(0.1), weightf(0.1), 0., np.inf, 100, atol=1e-13) + + vgq(rootf(1), evalf(1), weightf(1), 0., np.inf, 5) + vgq(rootf(1), evalf(1), weightf(1), 0., np.inf, 25, atol=1e-13) + vgq(rootf(1), evalf(1), weightf(1), 0., np.inf, 100, atol=1e-13) + + vgq(rootf(10), evalf(10), weightf(10), 0., np.inf, 5) + vgq(rootf(10), evalf(10), weightf(10), 0., np.inf, 25, atol=1e-13) + vgq(rootf(10), evalf(10), weightf(10), 0., np.inf, 100, atol=1e-12) + + vgq(rootf(50), evalf(50), weightf(50), 0., np.inf, 5) + vgq(rootf(50), evalf(50), weightf(50), 0., np.inf, 25, atol=1e-13) + vgq(rootf(50), evalf(50), weightf(50), 0., np.inf, 100, rtol=1e-14, atol=2e-13) + + x, w = sc.roots_genlaguerre(5, 2, False) + y, v, m = sc.roots_genlaguerre(5, 2, True) + assert_allclose(x, y, 1e-14, 1e-14) + assert_allclose(w, v, 1e-14, 1e-14) + + muI, muI_err = integrate.quad(weightf(2.), 0., np.inf) + assert_allclose(m, muI, rtol=muI_err) + + assert_raises(ValueError, sc.roots_genlaguerre, 0, 2) + assert_raises(ValueError, sc.roots_genlaguerre, 3.3, 2) + assert_raises(ValueError, sc.roots_genlaguerre, 3, -1.1) + + +def test_gh_6721(): + # Regresssion test for gh_6721. This should not raise. + sc.chebyt(65)(0.2) diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_orthogonal.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_orthogonal.pyc new file mode 100644 index 0000000..cf98fbe Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_orthogonal.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_orthogonal_eval.py b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_orthogonal_eval.py new file mode 100644 index 0000000..219ea26 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_orthogonal_eval.py @@ -0,0 +1,248 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import assert_, assert_allclose +import scipy.special.orthogonal as orth + +from scipy.special._testutils import FuncData + + +def test_eval_chebyt(): + n = np.arange(0, 10000, 7) + x = 2*np.random.rand() - 1 + v1 = np.cos(n*np.arccos(x)) + v2 = orth.eval_chebyt(n, x) + assert_(np.allclose(v1, v2, rtol=1e-15)) + + +def test_eval_genlaguerre_restriction(): + # check it returns nan for alpha <= -1 + assert_(np.isnan(orth.eval_genlaguerre(0, -1, 0))) + assert_(np.isnan(orth.eval_genlaguerre(0.1, -1, 0))) + + +def test_warnings(): + # ticket 1334 + olderr = np.seterr(all='raise') + try: + # these should raise no fp warnings + orth.eval_legendre(1, 0) + orth.eval_laguerre(1, 1) + orth.eval_gegenbauer(1, 1, 0) + finally: + np.seterr(**olderr) + + +class TestPolys(object): + """ + Check that the eval_* functions agree with the constructed polynomials + + """ + + def check_poly(self, func, cls, param_ranges=[], x_range=[], nn=10, + nparam=10, nx=10, rtol=1e-8): + np.random.seed(1234) + + dataset = [] + for n in np.arange(nn): + params = [a + (b-a)*np.random.rand(nparam) for a,b in param_ranges] + params = np.asarray(params).T + if not param_ranges: + params = [0] + for p in params: + if param_ranges: + p = (n,) + tuple(p) + else: + p = (n,) + x = x_range[0] + (x_range[1] - x_range[0])*np.random.rand(nx) + x[0] = x_range[0] # always include domain start point + x[1] = x_range[1] # always include domain end point + poly = np.poly1d(cls(*p).coef) + z = np.c_[np.tile(p, (nx,1)), x, poly(x)] + dataset.append(z) + + dataset = np.concatenate(dataset, axis=0) + + def polyfunc(*p): + p = (p[0].astype(int),) + p[1:] + return func(*p) + + olderr = np.seterr(all='raise') + try: + ds = FuncData(polyfunc, dataset, list(range(len(param_ranges)+2)), -1, + rtol=rtol) + ds.check() + finally: + np.seterr(**olderr) + + def test_jacobi(self): + self.check_poly(orth.eval_jacobi, orth.jacobi, + param_ranges=[(-0.99, 10), (-0.99, 10)], x_range=[-1, 1], + rtol=1e-5) + + def test_sh_jacobi(self): + self.check_poly(orth.eval_sh_jacobi, orth.sh_jacobi, + param_ranges=[(1, 10), (0, 1)], x_range=[0, 1], + rtol=1e-5) + + def test_gegenbauer(self): + self.check_poly(orth.eval_gegenbauer, orth.gegenbauer, + param_ranges=[(-0.499, 10)], x_range=[-1, 1], + rtol=1e-7) + + def test_chebyt(self): + self.check_poly(orth.eval_chebyt, orth.chebyt, + param_ranges=[], x_range=[-1, 1]) + + def test_chebyu(self): + self.check_poly(orth.eval_chebyu, orth.chebyu, + param_ranges=[], x_range=[-1, 1]) + + def test_chebys(self): + self.check_poly(orth.eval_chebys, orth.chebys, + param_ranges=[], x_range=[-2, 2]) + + def test_chebyc(self): + self.check_poly(orth.eval_chebyc, orth.chebyc, + param_ranges=[], x_range=[-2, 2]) + + def test_sh_chebyt(self): + olderr = np.seterr(all='ignore') + try: + self.check_poly(orth.eval_sh_chebyt, orth.sh_chebyt, + param_ranges=[], x_range=[0, 1]) + finally: + np.seterr(**olderr) + + def test_sh_chebyu(self): + self.check_poly(orth.eval_sh_chebyu, orth.sh_chebyu, + param_ranges=[], x_range=[0, 1]) + + def test_legendre(self): + self.check_poly(orth.eval_legendre, orth.legendre, + param_ranges=[], x_range=[-1, 1]) + + def test_sh_legendre(self): + olderr = np.seterr(all='ignore') + try: + self.check_poly(orth.eval_sh_legendre, orth.sh_legendre, + param_ranges=[], x_range=[0, 1]) + finally: + np.seterr(**olderr) + + def test_genlaguerre(self): + self.check_poly(orth.eval_genlaguerre, orth.genlaguerre, + param_ranges=[(-0.99, 10)], x_range=[0, 100]) + + def test_laguerre(self): + self.check_poly(orth.eval_laguerre, orth.laguerre, + param_ranges=[], x_range=[0, 100]) + + def test_hermite(self): + self.check_poly(orth.eval_hermite, orth.hermite, + param_ranges=[], x_range=[-100, 100]) + + def test_hermitenorm(self): + self.check_poly(orth.eval_hermitenorm, orth.hermitenorm, + param_ranges=[], x_range=[-100, 100]) + + +class TestRecurrence(object): + """ + Check that the eval_* functions sig='ld->d' and 'dd->d' agree. + + """ + + def check_poly(self, func, param_ranges=[], x_range=[], nn=10, + nparam=10, nx=10, rtol=1e-8): + np.random.seed(1234) + + dataset = [] + for n in np.arange(nn): + params = [a + (b-a)*np.random.rand(nparam) for a,b in param_ranges] + params = np.asarray(params).T + if not param_ranges: + params = [0] + for p in params: + if param_ranges: + p = (n,) + tuple(p) + else: + p = (n,) + x = x_range[0] + (x_range[1] - x_range[0])*np.random.rand(nx) + x[0] = x_range[0] # always include domain start point + x[1] = x_range[1] # always include domain end point + kw = dict(sig=(len(p)+1)*'d'+'->d') + z = np.c_[np.tile(p, (nx,1)), x, func(*(p + (x,)), **kw)] + dataset.append(z) + + dataset = np.concatenate(dataset, axis=0) + + def polyfunc(*p): + p = (p[0].astype(int),) + p[1:] + kw = dict(sig='l'+(len(p)-1)*'d'+'->d') + return func(*p, **kw) + + olderr = np.seterr(all='raise') + try: + ds = FuncData(polyfunc, dataset, list(range(len(param_ranges)+2)), -1, + rtol=rtol) + ds.check() + finally: + np.seterr(**olderr) + + def test_jacobi(self): + self.check_poly(orth.eval_jacobi, + param_ranges=[(-0.99, 10), (-0.99, 10)], x_range=[-1, 1]) + + def test_sh_jacobi(self): + self.check_poly(orth.eval_sh_jacobi, + param_ranges=[(1, 10), (0, 1)], x_range=[0, 1]) + + def test_gegenbauer(self): + self.check_poly(orth.eval_gegenbauer, + param_ranges=[(-0.499, 10)], x_range=[-1, 1]) + + def test_chebyt(self): + self.check_poly(orth.eval_chebyt, + param_ranges=[], x_range=[-1, 1]) + + def test_chebyu(self): + self.check_poly(orth.eval_chebyu, + param_ranges=[], x_range=[-1, 1]) + + def test_chebys(self): + self.check_poly(orth.eval_chebys, + param_ranges=[], x_range=[-2, 2]) + + def test_chebyc(self): + self.check_poly(orth.eval_chebyc, + param_ranges=[], x_range=[-2, 2]) + + def test_sh_chebyt(self): + self.check_poly(orth.eval_sh_chebyt, + param_ranges=[], x_range=[0, 1]) + + def test_sh_chebyu(self): + self.check_poly(orth.eval_sh_chebyu, + param_ranges=[], x_range=[0, 1]) + + def test_legendre(self): + self.check_poly(orth.eval_legendre, + param_ranges=[], x_range=[-1, 1]) + + def test_sh_legendre(self): + self.check_poly(orth.eval_sh_legendre, + param_ranges=[], x_range=[0, 1]) + + def test_genlaguerre(self): + self.check_poly(orth.eval_genlaguerre, + param_ranges=[(-0.99, 10)], x_range=[0, 100]) + + def test_laguerre(self): + self.check_poly(orth.eval_laguerre, + param_ranges=[], x_range=[0, 100]) + + def test_hermite(self): + v = orth.eval_hermite(70, 1.0) + a = -1.457076485701412e60 + assert_allclose(v,a) diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_orthogonal_eval.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_orthogonal_eval.pyc new file mode 100644 index 0000000..c4368a1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_orthogonal_eval.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_owens_t.py b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_owens_t.py new file mode 100644 index 0000000..6106c44 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_owens_t.py @@ -0,0 +1,44 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import assert_equal, assert_allclose + +import scipy.special as sc + + +def test_symmetries(): + np.random.seed(1234) + a, h = np.random.rand(100), np.random.rand(100) + assert_equal(sc.owens_t(h, a), sc.owens_t(-h, a)) + assert_equal(sc.owens_t(h, a), -sc.owens_t(h, -a)) + + +def test_special_cases(): + assert_equal(sc.owens_t(5, 0), 0) + assert_allclose(sc.owens_t(0, 5), 0.5*np.arctan(5)/np.pi, + rtol=5e-14) + # Target value is 0.5*Phi(5)*(1 - Phi(5)) for Phi the CDF of the + # standard normal distribution + assert_allclose(sc.owens_t(5, 1), 1.4332574485503512543e-07, + rtol=5e-14) + + +def test_nans(): + assert_equal(sc.owens_t(20, np.nan), np.nan) + assert_equal(sc.owens_t(np.nan, 20), np.nan) + assert_equal(sc.owens_t(np.nan, np.nan), np.nan) + + +def test_infs(): + h = 1 + res = 0.5*sc.erfc(h/np.sqrt(2)) + assert_allclose(sc.owens_t(h, np.inf), res, rtol=5e-14) + assert_allclose(sc.owens_t(h, -np.inf), -res, rtol=5e-14) + + assert_equal(sc.owens_t(np.inf, 1), 0) + assert_equal(sc.owens_t(-np.inf, 1), 0) + + assert_equal(sc.owens_t(np.inf, np.inf), 0) + assert_equal(sc.owens_t(-np.inf, np.inf), 0) + assert_equal(sc.owens_t(np.inf, -np.inf), -0.0) + assert_equal(sc.owens_t(-np.inf, -np.inf), -0.0) diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_owens_t.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_owens_t.pyc new file mode 100644 index 0000000..8fa9a28 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_owens_t.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_pcf.py b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_pcf.py new file mode 100644 index 0000000..a8c42aa --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_pcf.py @@ -0,0 +1,24 @@ +"""Tests for parabolic cylinder functions. + +""" +import numpy as np +from numpy.testing import assert_allclose, assert_equal +import scipy.special as sc + + +def test_pbwa_segfault(): + # Regression test for https://github.com/scipy/scipy/issues/6208. + # + # Data generated by mpmath. + # + w = 1.02276567211316867161 + wp = -0.48887053372346189882 + assert_allclose(sc.pbwa(0, 0), (w, wp), rtol=1e-13, atol=0) + + +def test_pbwa_nan(): + # Check that NaN's are returned outside of the range in which the + # implementation is accurate. + pts = [(-6, -6), (-6, 6), (6, -6), (6, 6)] + for p in pts: + assert_equal(sc.pbwa(*p), (np.nan, np.nan)) diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_pcf.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_pcf.pyc new file mode 100644 index 0000000..e298054 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_pcf.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_precompute_expn_asy.py b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_precompute_expn_asy.py new file mode 100644 index 0000000..86aac8e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_precompute_expn_asy.py @@ -0,0 +1,26 @@ +from __future__ import division, print_function, absolute_import + +from numpy.testing import assert_equal + +from scipy.special._testutils import check_version, MissingModule +from scipy.special._precompute.expn_asy import generate_A + +try: + import sympy + from sympy import Poly +except ImportError: + sympy = MissingModule("sympy") + + +@check_version(sympy, "1.0") +def test_generate_A(): + # Data from DLMF 8.20.5 + x = sympy.symbols('x') + Astd = [Poly(1, x), + Poly(1, x), + Poly(1 - 2*x), + Poly(1 - 8*x + 6*x**2)] + Ares = generate_A(len(Astd)) + + for p, q in zip(Astd, Ares): + assert_equal(p, q) diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_precompute_expn_asy.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_precompute_expn_asy.pyc new file mode 100644 index 0000000..ce197c3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_precompute_expn_asy.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_precompute_gammainc.py b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_precompute_gammainc.py new file mode 100644 index 0000000..03a594b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_precompute_gammainc.py @@ -0,0 +1,116 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +import pytest + +from scipy.special._testutils import MissingModule, check_version +from scipy.special._mptestutils import ( + Arg, IntArg, mp_assert_allclose, assert_mpmath_equal) +from scipy.special._precompute.gammainc_asy import ( + compute_g, compute_alpha, compute_d) +from scipy.special._precompute.gammainc_data import gammainc, gammaincc + +try: + import sympy +except ImportError: + sympy = MissingModule('sympy') + +try: + import mpmath as mp +except ImportError: + mp = MissingModule('mpmath') + + +_is_32bit_platform = np.intp(0).itemsize < 8 + + +@check_version(mp, '0.19') +def test_g(): + # Test data for the g_k. See DLMF 5.11.4. + with mp.workdps(30): + g = [mp.mpf(1), mp.mpf(1)/12, mp.mpf(1)/288, + -mp.mpf(139)/51840, -mp.mpf(571)/2488320, + mp.mpf(163879)/209018880, mp.mpf(5246819)/75246796800] + mp_assert_allclose(compute_g(7), g) + + +@pytest.mark.slow +@check_version(mp, '0.19') +@check_version(sympy, '0.7') +@pytest.mark.xfail(condition=_is_32bit_platform, reason="rtol only 2e-11, see gh-6938") +def test_alpha(): + # Test data for the alpha_k. See DLMF 8.12.14. + with mp.workdps(30): + alpha = [mp.mpf(0), mp.mpf(1), mp.mpf(1)/3, mp.mpf(1)/36, + -mp.mpf(1)/270, mp.mpf(1)/4320, mp.mpf(1)/17010, + -mp.mpf(139)/5443200, mp.mpf(1)/204120] + mp_assert_allclose(compute_alpha(9), alpha) + + +@pytest.mark.xslow +@check_version(mp, '0.19') +@check_version(sympy, '0.7') +def test_d(): + # Compare the d_{k, n} to the results in appendix F of [1]. + # + # Sources + # ------- + # [1] DiDonato and Morris, Computation of the Incomplete Gamma + # Function Ratios and their Inverse, ACM Transactions on + # Mathematical Software, 1986. + + with mp.workdps(50): + dataset = [(0, 0, -mp.mpf('0.333333333333333333333333333333')), + (0, 12, mp.mpf('0.102618097842403080425739573227e-7')), + (1, 0, -mp.mpf('0.185185185185185185185185185185e-2')), + (1, 12, mp.mpf('0.119516285997781473243076536700e-7')), + (2, 0, mp.mpf('0.413359788359788359788359788360e-2')), + (2, 12, -mp.mpf('0.140925299108675210532930244154e-7')), + (3, 0, mp.mpf('0.649434156378600823045267489712e-3')), + (3, 12, -mp.mpf('0.191111684859736540606728140873e-7')), + (4, 0, -mp.mpf('0.861888290916711698604702719929e-3')), + (4, 12, mp.mpf('0.288658297427087836297341274604e-7')), + (5, 0, -mp.mpf('0.336798553366358150308767592718e-3')), + (5, 12, mp.mpf('0.482409670378941807563762631739e-7')), + (6, 0, mp.mpf('0.531307936463992223165748542978e-3')), + (6, 12, -mp.mpf('0.882860074633048352505085243179e-7')), + (7, 0, mp.mpf('0.344367606892377671254279625109e-3')), + (7, 12, -mp.mpf('0.175629733590604619378669693914e-6')), + (8, 0, -mp.mpf('0.652623918595309418922034919727e-3')), + (8, 12, mp.mpf('0.377358774161109793380344937299e-6')), + (9, 0, -mp.mpf('0.596761290192746250124390067179e-3')), + (9, 12, mp.mpf('0.870823417786464116761231237189e-6'))] + d = compute_d(10, 13) + res = [] + for k, n, std in dataset: + res.append(d[k][n]) + std = map(lambda x: x[2], dataset) + mp_assert_allclose(res, std) + + +@check_version(mp, '0.19') +def test_gammainc(): + # Quick check that the gammainc in + # special._precompute.gammainc_data agrees with mpmath's + # gammainc. + assert_mpmath_equal(gammainc, + lambda a, x: mp.gammainc(a, b=x, regularized=True), + [Arg(0, 100, inclusive_a=False), Arg(0, 100)], + nan_ok=False, rtol=1e-17, n=50, dps=50) + + +@pytest.mark.xslow +@check_version(mp, '0.19') +def test_gammaincc(): + # Check that the gammaincc in special._precompute.gammainc_data + # agrees with mpmath's gammainc. + assert_mpmath_equal(lambda a, x: gammaincc(a, x, dps=1000), + lambda a, x: mp.gammainc(a, a=x, regularized=True), + [Arg(20, 100), Arg(20, 100)], + nan_ok=False, rtol=1e-17, n=50, dps=1000) + + # Test the fast integer path + assert_mpmath_equal(gammaincc, + lambda a, x: mp.gammainc(a, a=x, regularized=True), + [IntArg(1, 100), Arg(0, 100)], + nan_ok=False, rtol=1e-17, n=50, dps=50) diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_precompute_gammainc.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_precompute_gammainc.pyc new file mode 100644 index 0000000..e0f6806 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_precompute_gammainc.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_precompute_utils.py b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_precompute_utils.py new file mode 100644 index 0000000..8c64c18 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_precompute_utils.py @@ -0,0 +1,42 @@ +from __future__ import division, print_function, absolute_import +import numpy as np +import pytest + +from scipy.special._testutils import MissingModule, check_version +from scipy.special._mptestutils import mp_assert_allclose +from scipy.special._precompute.utils import lagrange_inversion + +try: + import sympy +except ImportError: + sympy = MissingModule('sympy') + +try: + import mpmath as mp +except ImportError: + mp = MissingModule('mpmath') + + +_is_32bit_platform = np.intp(0).itemsize < 8 + + +@pytest.mark.slow +@check_version(sympy, '0.7') +@check_version(mp, '0.19') +class TestInversion(object): + @pytest.mark.xfail(condition=_is_32bit_platform, reason="rtol only 2e-9, see gh-6938") + def test_log(self): + with mp.workdps(30): + logcoeffs = mp.taylor(lambda x: mp.log(1 + x), 0, 10) + expcoeffs = mp.taylor(lambda x: mp.exp(x) - 1, 0, 10) + invlogcoeffs = lagrange_inversion(logcoeffs) + mp_assert_allclose(invlogcoeffs, expcoeffs) + + @pytest.mark.xfail(condition=_is_32bit_platform, reason="rtol only 1e-15, see gh-6938") + def test_sin(self): + with mp.workdps(30): + sincoeffs = mp.taylor(mp.sin, 0, 10) + asincoeffs = mp.taylor(mp.asin, 0, 10) + invsincoeffs = lagrange_inversion(sincoeffs) + mp_assert_allclose(invsincoeffs, asincoeffs, atol=1e-30) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_precompute_utils.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_precompute_utils.pyc new file mode 100644 index 0000000..48c2ae7 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_precompute_utils.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_round.py b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_round.py new file mode 100644 index 0000000..b5dc2e5 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_round.py @@ -0,0 +1,18 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +import pytest + +from scipy.special import _test_round + + +@pytest.mark.skipif(not _test_round.have_fenv(), reason="no fenv()") +def test_add_round_up(): + np.random.seed(1234) + _test_round.test_add_round(10**5, 'up') + + +@pytest.mark.skipif(not _test_round.have_fenv(), reason="no fenv()") +def test_add_round_down(): + np.random.seed(1234) + _test_round.test_add_round(10**5, 'down') diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_round.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_round.pyc new file mode 100644 index 0000000..19bca67 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_round.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_sf_error.py b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_sf_error.py new file mode 100644 index 0000000..c5f88d8 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_sf_error.py @@ -0,0 +1,115 @@ +from __future__ import division, print_function, absolute_import + +import warnings + +from numpy.testing import assert_, assert_equal +from scipy._lib._numpy_compat import suppress_warnings +import pytest +from pytest import raises as assert_raises + +import scipy.special as sc +from scipy.special._ufuncs import _sf_error_test_function + +_sf_error_code_map = { + # skip 'ok' + 'singular': 1, + 'underflow': 2, + 'overflow': 3, + 'slow': 4, + 'loss': 5, + 'no_result': 6, + 'domain': 7, + 'arg': 8, + 'other': 9 +} + +_sf_error_actions = [ + 'ignore', + 'warn', + 'raise' +] + + +def _check_action(fun, args, action): + if action == 'warn': + with pytest.warns(sc.SpecialFunctionWarning): + fun(*args) + elif action == 'raise': + with assert_raises(sc.SpecialFunctionError): + fun(*args) + else: + # action == 'ignore', make sure there are no warnings/exceptions + with warnings.catch_warnings(): + warnings.simplefilter("error") + fun(*args) + + +def test_geterr(): + err = sc.geterr() + for key, value in err.items(): + assert_(key in _sf_error_code_map.keys()) + assert_(value in _sf_error_actions) + + +def test_seterr(): + entry_err = sc.geterr() + try: + for category in _sf_error_code_map.keys(): + for action in _sf_error_actions: + geterr_olderr = sc.geterr() + seterr_olderr = sc.seterr(**{category: action}) + assert_(geterr_olderr == seterr_olderr) + newerr = sc.geterr() + assert_(newerr[category] == action) + geterr_olderr.pop(category) + newerr.pop(category) + assert_(geterr_olderr == newerr) + _check_action(_sf_error_test_function, + (_sf_error_code_map[category],), + action) + finally: + sc.seterr(**entry_err) + + +def test_errstate_pyx_basic(): + olderr = sc.geterr() + with sc.errstate(singular='raise'): + with assert_raises(sc.SpecialFunctionError): + sc.loggamma(0) + assert_equal(olderr, sc.geterr()) + + +def test_errstate_c_basic(): + olderr = sc.geterr() + with sc.errstate(domain='raise'): + with assert_raises(sc.SpecialFunctionError): + sc.spence(-1) + assert_equal(olderr, sc.geterr()) + + +def test_errstate_cpp_basic(): + olderr = sc.geterr() + with sc.errstate(underflow='raise'): + with assert_raises(sc.SpecialFunctionError): + sc.wrightomega(-1000) + assert_equal(olderr, sc.geterr()) + + +def test_errstate(): + for category in _sf_error_code_map.keys(): + for action in _sf_error_actions: + olderr = sc.geterr() + with sc.errstate(**{category: action}): + _check_action(_sf_error_test_function, + (_sf_error_code_map[category],), + action) + assert_equal(olderr, sc.geterr()) + + +def test_errstate_all_but_one(): + olderr = sc.geterr() + with sc.errstate(all='raise', singular='ignore'): + sc.gammaln(0) + with assert_raises(sc.SpecialFunctionError): + sc.spence(-1.0) + assert_equal(olderr, sc.geterr()) diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_sf_error.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_sf_error.pyc new file mode 100644 index 0000000..a2cc582 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_sf_error.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_sici.py b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_sici.py new file mode 100644 index 0000000..513b4e3 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_sici.py @@ -0,0 +1,38 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np + +import scipy.special as sc +from scipy.special._testutils import FuncData + + +def test_sici_consistency(): + # Make sure the implementation of sici for real arguments agrees + # with the implementation of sici for complex arguments. + + # On the negative real axis Cephes drops the imaginary part in ci + def sici(x): + si, ci = sc.sici(x + 0j) + return si.real, ci.real + + x = np.r_[-np.logspace(8, -30, 200), 0, np.logspace(-30, 8, 200)] + si, ci = sc.sici(x) + dataset = np.column_stack((x, si, ci)) + FuncData(sici, dataset, 0, (1, 2), rtol=1e-12).check() + + +def test_shichi_consistency(): + # Make sure the implementation of shichi for real arguments agrees + # with the implementation of shichi for complex arguments. + + # On the negative real axis Cephes drops the imaginary part in chi + def shichi(x): + shi, chi = sc.shichi(x + 0j) + return shi.real, chi.real + + # Overflow happens quickly, so limit range + x = np.r_[-np.logspace(np.log10(700), -30, 200), 0, + np.logspace(-30, np.log10(700), 200)] + shi, chi = sc.shichi(x) + dataset = np.column_stack((x, shi, chi)) + FuncData(shichi, dataset, 0, (1, 2), rtol=1e-14).check() diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_sici.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_sici.pyc new file mode 100644 index 0000000..cfb5978 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_sici.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_spence.py b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_spence.py new file mode 100644 index 0000000..0bc88fe --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_spence.py @@ -0,0 +1,34 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy import sqrt, log, pi +from scipy.special._testutils import FuncData +from scipy.special import spence + + +def test_consistency(): + # Make sure the implementation of spence for real arguments + # agrees with the implementation of spence for imaginary arguments. + + x = np.logspace(-30, 300, 200) + dataset = np.vstack((x + 0j, spence(x))).T + FuncData(spence, dataset, 0, 1, rtol=1e-14).check() + + +def test_special_points(): + # Check against known values of Spence's function. + + phi = (1 + sqrt(5))/2 + dataset = [(1, 0), + (2, -pi**2/12), + (0.5, pi**2/12 - log(2)**2/2), + (0, pi**2/6), + (-1, pi**2/4 - 1j*pi*log(2)), + ((-1 + sqrt(5))/2, pi**2/15 - log(phi)**2), + ((3 - sqrt(5))/2, pi**2/10 - log(phi)**2), + (phi, -pi**2/15 + log(phi)**2/2), + # Corrected from Zagier, "The Dilogarithm Function" + ((3 + sqrt(5))/2, -pi**2/10 - log(phi)**2)] + + dataset = np.asarray(dataset) + FuncData(spence, dataset, 0, 1, rtol=1e-14).check() diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_spence.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_spence.pyc new file mode 100644 index 0000000..ed74b4a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_spence.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_spfun_stats.py b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_spfun_stats.py new file mode 100644 index 0000000..8a3027f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_spfun_stats.py @@ -0,0 +1,63 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import (assert_array_equal, + assert_array_almost_equal_nulp, assert_almost_equal) +from pytest import raises as assert_raises + +from scipy.special import gammaln, multigammaln + + +class TestMultiGammaLn(object): + + def test1(self): + # A test of the identity + # Gamma_1(a) = Gamma(a) + np.random.seed(1234) + a = np.abs(np.random.randn()) + assert_array_equal(multigammaln(a, 1), gammaln(a)) + + def test2(self): + # A test of the identity + # Gamma_2(a) = sqrt(pi) * Gamma(a) * Gamma(a - 0.5) + a = np.array([2.5, 10.0]) + result = multigammaln(a, 2) + expected = np.log(np.sqrt(np.pi)) + gammaln(a) + gammaln(a - 0.5) + assert_almost_equal(result, expected) + + def test_bararg(self): + assert_raises(ValueError, multigammaln, 0.5, 1.2) + + +def _check_multigammaln_array_result(a, d): + # Test that the shape of the array returned by multigammaln + # matches the input shape, and that all the values match + # the value computed when multigammaln is called with a scalar. + result = multigammaln(a, d) + assert_array_equal(a.shape, result.shape) + a1 = a.ravel() + result1 = result.ravel() + for i in range(a.size): + assert_array_almost_equal_nulp(result1[i], multigammaln(a1[i], d)) + + +def test_multigammaln_array_arg(): + # Check that the array returned by multigammaln has the correct + # shape and contains the correct values. The cases have arrays + # with several differnent shapes. + # The cases include a regression test for ticket #1849 + # (a = np.array([2.0]), an array with a single element). + np.random.seed(1234) + + cases = [ + # a, d + (np.abs(np.random.randn(3, 2)) + 5, 5), + (np.abs(np.random.randn(1, 2)) + 5, 5), + (np.arange(10.0, 18.0).reshape(2, 2, 2), 3), + (np.array([2.0]), 3), + (np.float64(2.0), 3), + ] + + for a, d in cases: + _check_multigammaln_array_result(a, d) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_spfun_stats.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_spfun_stats.pyc new file mode 100644 index 0000000..79deff6 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_spfun_stats.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_sph_harm.py b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_sph_harm.py new file mode 100644 index 0000000..f1fc2af --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_sph_harm.py @@ -0,0 +1,39 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import assert_allclose +import scipy.special as sc + + +def test_first_harmonics(): + # Test against explicit representations of the first four + # spherical harmonics which use `theta` as the azimuthal angle, + # `phi` as the polar angle, and include the Condon-Shortley + # phase. + + # Notation is Ymn + def Y00(theta, phi): + return 0.5*np.sqrt(1/np.pi) + + def Yn11(theta, phi): + return 0.5*np.sqrt(3/(2*np.pi))*np.exp(-1j*theta)*np.sin(phi) + + def Y01(theta, phi): + return 0.5*np.sqrt(3/np.pi)*np.cos(phi) + + def Y11(theta, phi): + return -0.5*np.sqrt(3/(2*np.pi))*np.exp(1j*theta)*np.sin(phi) + + harms = [Y00, Yn11, Y01, Y11] + m = [0, -1, 0, 1] + n = [0, 1, 1, 1] + + theta = np.linspace(0, 2*np.pi) + phi = np.linspace(0, np.pi) + theta, phi = np.meshgrid(theta, phi) + + for harm, m, n in zip(harms, m, n): + assert_allclose(sc.sph_harm(m, n, theta, phi), + harm(theta, phi), + rtol=1e-15, atol=1e-15, + err_msg="Y^{}_{} incorrect".format(m, n)) diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_sph_harm.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_sph_harm.pyc new file mode 100644 index 0000000..c7d94d0 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_sph_harm.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_spherical_bessel.py b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_spherical_bessel.py new file mode 100644 index 0000000..af8d775 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_spherical_bessel.py @@ -0,0 +1,383 @@ +# +# Tests of spherical Bessel functions. +# +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import (assert_almost_equal, assert_allclose, + assert_array_almost_equal) +import pytest +from numpy import sin, cos, sinh, cosh, exp, inf, nan, r_, pi + +from scipy.special import spherical_jn, spherical_yn, spherical_in, spherical_kn +from scipy.integrate import quad + +from scipy._lib._numpy_compat import suppress_warnings + + +class TestSphericalJn: + def test_spherical_jn_exact(self): + # https://dlmf.nist.gov/10.49.E3 + # Note: exact expression is numerically stable only for small + # n or z >> n. + x = np.array([0.12, 1.23, 12.34, 123.45, 1234.5]) + assert_allclose(spherical_jn(2, x), + (-1/x + 3/x**3)*sin(x) - 3/x**2*cos(x)) + + def test_spherical_jn_recurrence_complex(self): + # https://dlmf.nist.gov/10.51.E1 + n = np.array([1, 2, 3, 7, 12]) + x = 1.1 + 1.5j + assert_allclose(spherical_jn(n - 1, x) + spherical_jn(n + 1, x), + (2*n + 1)/x*spherical_jn(n, x)) + + def test_spherical_jn_recurrence_real(self): + # https://dlmf.nist.gov/10.51.E1 + n = np.array([1, 2, 3, 7, 12]) + x = 0.12 + assert_allclose(spherical_jn(n - 1, x) + spherical_jn(n + 1,x), + (2*n + 1)/x*spherical_jn(n, x)) + + def test_spherical_jn_inf_real(self): + # https://dlmf.nist.gov/10.52.E3 + n = 6 + x = np.array([-inf, inf]) + assert_allclose(spherical_jn(n, x), np.array([0, 0])) + + def test_spherical_jn_inf_complex(self): + # https://dlmf.nist.gov/10.52.E3 + n = 7 + x = np.array([-inf + 0j, inf + 0j, inf*(1+1j)]) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in multiply") + assert_allclose(spherical_jn(n, x), np.array([0, 0, inf*(1+1j)])) + + def test_spherical_jn_large_arg_1(self): + # https://github.com/scipy/scipy/issues/2165 + # Reference value computed using mpmath, via + # besselj(n + mpf(1)/2, z)*sqrt(pi/(2*z)) + assert_allclose(spherical_jn(2, 3350.507), -0.00029846226538040747) + + def test_spherical_jn_large_arg_2(self): + # https://github.com/scipy/scipy/issues/1641 + # Reference value computed using mpmath, via + # besselj(n + mpf(1)/2, z)*sqrt(pi/(2*z)) + assert_allclose(spherical_jn(2, 10000), 3.0590002633029811e-05) + + def test_spherical_jn_at_zero(self): + # https://dlmf.nist.gov/10.52.E1 + # But note that n = 0 is a special case: j0 = sin(x)/x -> 1 + n = np.array([0, 1, 2, 5, 10, 100]) + x = 0 + assert_allclose(spherical_jn(n, x), np.array([1, 0, 0, 0, 0, 0])) + + +class TestSphericalYn: + def test_spherical_yn_exact(self): + # https://dlmf.nist.gov/10.49.E5 + # Note: exact expression is numerically stable only for small + # n or z >> n. + x = np.array([0.12, 1.23, 12.34, 123.45, 1234.5]) + assert_allclose(spherical_yn(2, x), + (1/x - 3/x**3)*cos(x) - 3/x**2*sin(x)) + + def test_spherical_yn_recurrence_real(self): + # https://dlmf.nist.gov/10.51.E1 + n = np.array([1, 2, 3, 7, 12]) + x = 0.12 + assert_allclose(spherical_yn(n - 1, x) + spherical_yn(n + 1,x), + (2*n + 1)/x*spherical_yn(n, x)) + + def test_spherical_yn_recurrence_complex(self): + # https://dlmf.nist.gov/10.51.E1 + n = np.array([1, 2, 3, 7, 12]) + x = 1.1 + 1.5j + assert_allclose(spherical_yn(n - 1, x) + spherical_yn(n + 1, x), + (2*n + 1)/x*spherical_yn(n, x)) + + def test_spherical_yn_inf_real(self): + # https://dlmf.nist.gov/10.52.E3 + n = 6 + x = np.array([-inf, inf]) + assert_allclose(spherical_yn(n, x), np.array([0, 0])) + + def test_spherical_yn_inf_complex(self): + # https://dlmf.nist.gov/10.52.E3 + n = 7 + x = np.array([-inf + 0j, inf + 0j, inf*(1+1j)]) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in multiply") + assert_allclose(spherical_yn(n, x), np.array([0, 0, inf*(1+1j)])) + + def test_spherical_yn_at_zero(self): + # https://dlmf.nist.gov/10.52.E2 + n = np.array([0, 1, 2, 5, 10, 100]) + x = 0 + assert_allclose(spherical_yn(n, x), -inf*np.ones(shape=n.shape)) + + def test_spherical_yn_at_zero_complex(self): + # Consistently with numpy: + # >>> -np.cos(0)/0 + # -inf + # >>> -np.cos(0+0j)/(0+0j) + # (-inf + nan*j) + n = np.array([0, 1, 2, 5, 10, 100]) + x = 0 + 0j + assert_allclose(spherical_yn(n, x), nan*np.ones(shape=n.shape)) + + +class TestSphericalJnYnCrossProduct: + def test_spherical_jn_yn_cross_product_1(self): + # https://dlmf.nist.gov/10.50.E3 + n = np.array([1, 5, 8]) + x = np.array([0.1, 1, 10]) + left = (spherical_jn(n + 1, x) * spherical_yn(n, x) - + spherical_jn(n, x) * spherical_yn(n + 1, x)) + right = 1/x**2 + assert_allclose(left, right) + + def test_spherical_jn_yn_cross_product_2(self): + # https://dlmf.nist.gov/10.50.E3 + n = np.array([1, 5, 8]) + x = np.array([0.1, 1, 10]) + left = (spherical_jn(n + 2, x) * spherical_yn(n, x) - + spherical_jn(n, x) * spherical_yn(n + 2, x)) + right = (2*n + 3)/x**3 + assert_allclose(left, right) + + +class TestSphericalIn: + def test_spherical_in_exact(self): + # https://dlmf.nist.gov/10.49.E9 + x = np.array([0.12, 1.23, 12.34, 123.45]) + assert_allclose(spherical_in(2, x), + (1/x + 3/x**3)*sinh(x) - 3/x**2*cosh(x)) + + def test_spherical_in_recurrence_real(self): + # https://dlmf.nist.gov/10.51.E4 + n = np.array([1, 2, 3, 7, 12]) + x = 0.12 + assert_allclose(spherical_in(n - 1, x) - spherical_in(n + 1,x), + (2*n + 1)/x*spherical_in(n, x)) + + def test_spherical_in_recurrence_complex(self): + # https://dlmf.nist.gov/10.51.E1 + n = np.array([1, 2, 3, 7, 12]) + x = 1.1 + 1.5j + assert_allclose(spherical_in(n - 1, x) - spherical_in(n + 1,x), + (2*n + 1)/x*spherical_in(n, x)) + + def test_spherical_in_inf_real(self): + # https://dlmf.nist.gov/10.52.E3 + n = 5 + x = np.array([-inf, inf]) + assert_allclose(spherical_in(n, x), np.array([-inf, inf])) + + def test_spherical_in_inf_complex(self): + # https://dlmf.nist.gov/10.52.E5 + # Ideally, i1n(n, 1j*inf) = 0 and i1n(n, (1+1j)*inf) = (1+1j)*inf, but + # this appears impossible to achieve because C99 regards any complex + # value with at least one infinite part as a complex infinity, so + # 1j*inf cannot be distinguished from (1+1j)*inf. Therefore, nan is + # the correct return value. + n = 7 + x = np.array([-inf + 0j, inf + 0j, inf*(1+1j)]) + assert_allclose(spherical_in(n, x), np.array([-inf, inf, nan])) + + def test_spherical_in_at_zero(self): + # https://dlmf.nist.gov/10.52.E1 + # But note that n = 0 is a special case: i0 = sinh(x)/x -> 1 + n = np.array([0, 1, 2, 5, 10, 100]) + x = 0 + assert_allclose(spherical_in(n, x), np.array([1, 0, 0, 0, 0, 0])) + + +class TestSphericalKn: + def test_spherical_kn_exact(self): + # https://dlmf.nist.gov/10.49.E13 + x = np.array([0.12, 1.23, 12.34, 123.45]) + assert_allclose(spherical_kn(2, x), + pi/2*exp(-x)*(1/x + 3/x**2 + 3/x**3)) + + def test_spherical_kn_recurrence_real(self): + # https://dlmf.nist.gov/10.51.E4 + n = np.array([1, 2, 3, 7, 12]) + x = 0.12 + assert_allclose((-1)**(n - 1)*spherical_kn(n - 1, x) - (-1)**(n + 1)*spherical_kn(n + 1,x), + (-1)**n*(2*n + 1)/x*spherical_kn(n, x)) + + def test_spherical_kn_recurrence_complex(self): + # https://dlmf.nist.gov/10.51.E4 + n = np.array([1, 2, 3, 7, 12]) + x = 1.1 + 1.5j + assert_allclose((-1)**(n - 1)*spherical_kn(n - 1, x) - (-1)**(n + 1)*spherical_kn(n + 1,x), + (-1)**n*(2*n + 1)/x*spherical_kn(n, x)) + + def test_spherical_kn_inf_real(self): + # https://dlmf.nist.gov/10.52.E6 + n = 5 + x = np.array([-inf, inf]) + assert_allclose(spherical_kn(n, x), np.array([-inf, 0])) + + def test_spherical_kn_inf_complex(self): + # https://dlmf.nist.gov/10.52.E6 + # The behavior at complex infinity depends on the sign of the real + # part: if Re(z) >= 0, then the limit is 0; if Re(z) < 0, then it's + # z*inf. This distinction cannot be captured, so we return nan. + n = 7 + x = np.array([-inf + 0j, inf + 0j, inf*(1+1j)]) + assert_allclose(spherical_kn(n, x), np.array([-inf, 0, nan])) + + def test_spherical_kn_at_zero(self): + # https://dlmf.nist.gov/10.52.E2 + n = np.array([0, 1, 2, 5, 10, 100]) + x = 0 + assert_allclose(spherical_kn(n, x), inf*np.ones(shape=n.shape)) + + def test_spherical_kn_at_zero_complex(self): + # https://dlmf.nist.gov/10.52.E2 + n = np.array([0, 1, 2, 5, 10, 100]) + x = 0 + 0j + assert_allclose(spherical_kn(n, x), nan*np.ones(shape=n.shape)) + + +class SphericalDerivativesTestCase: + def fundamental_theorem(self, n, a, b): + integral, tolerance = quad(lambda z: self.df(n, z), a, b) + assert_allclose(integral, + self.f(n, b) - self.f(n, a), + atol=tolerance) + + @pytest.mark.slow + def test_fundamental_theorem_0(self): + self.fundamental_theorem(0, 3.0, 15.0) + + @pytest.mark.slow + def test_fundamental_theorem_7(self): + self.fundamental_theorem(7, 0.5, 1.2) + + +class TestSphericalJnDerivatives(SphericalDerivativesTestCase): + def f(self, n, z): + return spherical_jn(n, z) + + def df(self, n, z): + return spherical_jn(n, z, derivative=True) + + def test_spherical_jn_d_zero(self): + n = np.array([0, 1, 2, 3, 7, 15]) + assert_allclose(spherical_jn(n, 0, derivative=True), + np.array([0, 1/3, 0, 0, 0, 0])) + + +class TestSphericalYnDerivatives(SphericalDerivativesTestCase): + def f(self, n, z): + return spherical_yn(n, z) + + def df(self, n, z): + return spherical_yn(n, z, derivative=True) + + +class TestSphericalInDerivatives(SphericalDerivativesTestCase): + def f(self, n, z): + return spherical_in(n, z) + + def df(self, n, z): + return spherical_in(n, z, derivative=True) + + def test_spherical_in_d_zero(self): + n = np.array([1, 2, 3, 7, 15]) + assert_allclose(spherical_in(n, 0, derivative=True), + np.zeros(5)) + + +class TestSphericalKnDerivatives(SphericalDerivativesTestCase): + def f(self, n, z): + return spherical_kn(n, z) + + def df(self, n, z): + return spherical_kn(n, z, derivative=True) + + +class TestSphericalOld: + # These are tests from the TestSpherical class of test_basic.py, + # rewritten to use spherical_* instead of sph_* but otherwise unchanged. + + def test_sph_in(self): + # This test reproduces test_basic.TestSpherical.test_sph_in. + i1n = np.empty((2,2)) + x = 0.2 + + i1n[0][0] = spherical_in(0, x) + i1n[0][1] = spherical_in(1, x) + i1n[1][0] = spherical_in(0, x, derivative=True) + i1n[1][1] = spherical_in(1, x, derivative=True) + + inp0 = (i1n[0][1]) + inp1 = (i1n[0][0] - 2.0/0.2 * i1n[0][1]) + assert_array_almost_equal(i1n[0],np.array([1.0066800127054699381, + 0.066933714568029540839]),12) + assert_array_almost_equal(i1n[1],[inp0,inp1],12) + + def test_sph_in_kn_order0(self): + x = 1. + sph_i0 = np.empty((2,)) + sph_i0[0] = spherical_in(0, x) + sph_i0[1] = spherical_in(0, x, derivative=True) + sph_i0_expected = np.array([np.sinh(x)/x, + np.cosh(x)/x-np.sinh(x)/x**2]) + assert_array_almost_equal(r_[sph_i0], sph_i0_expected) + + sph_k0 = np.empty((2,)) + sph_k0[0] = spherical_kn(0, x) + sph_k0[1] = spherical_kn(0, x, derivative=True) + sph_k0_expected = np.array([0.5*pi*exp(-x)/x, + -0.5*pi*exp(-x)*(1/x+1/x**2)]) + assert_array_almost_equal(r_[sph_k0], sph_k0_expected) + + def test_sph_jn(self): + s1 = np.empty((2,3)) + x = 0.2 + + s1[0][0] = spherical_jn(0, x) + s1[0][1] = spherical_jn(1, x) + s1[0][2] = spherical_jn(2, x) + s1[1][0] = spherical_jn(0, x, derivative=True) + s1[1][1] = spherical_jn(1, x, derivative=True) + s1[1][2] = spherical_jn(2, x, derivative=True) + + s10 = -s1[0][1] + s11 = s1[0][0]-2.0/0.2*s1[0][1] + s12 = s1[0][1]-3.0/0.2*s1[0][2] + assert_array_almost_equal(s1[0],[0.99334665397530607731, + 0.066400380670322230863, + 0.0026590560795273856680],12) + assert_array_almost_equal(s1[1],[s10,s11,s12],12) + + def test_sph_kn(self): + kn = np.empty((2,3)) + x = 0.2 + + kn[0][0] = spherical_kn(0, x) + kn[0][1] = spherical_kn(1, x) + kn[0][2] = spherical_kn(2, x) + kn[1][0] = spherical_kn(0, x, derivative=True) + kn[1][1] = spherical_kn(1, x, derivative=True) + kn[1][2] = spherical_kn(2, x, derivative=True) + + kn0 = -kn[0][1] + kn1 = -kn[0][0]-2.0/0.2*kn[0][1] + kn2 = -kn[0][1]-3.0/0.2*kn[0][2] + assert_array_almost_equal(kn[0],[6.4302962978445670140, + 38.581777787067402086, + 585.15696310385559829],12) + assert_array_almost_equal(kn[1],[kn0,kn1,kn2],9) + + def test_sph_yn(self): + sy1 = spherical_yn(2, 0.2) + sy2 = spherical_yn(0, 0.2) + assert_almost_equal(sy1,-377.52483,5) # previous values in the system + assert_almost_equal(sy2,-4.9003329,5) + sphpy = (spherical_yn(0, 0.2) - 2*spherical_yn(2, 0.2))/3 + sy3 = spherical_yn(1, 0.2, derivative=True) + assert_almost_equal(sy3,sphpy,4) # compare correct derivative val. (correct =-system val). diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_spherical_bessel.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_spherical_bessel.pyc new file mode 100644 index 0000000..d2b1610 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_spherical_bessel.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_trig.py b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_trig.py new file mode 100644 index 0000000..441529f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_trig.py @@ -0,0 +1,77 @@ +from __future__ import division, print_function, absolute_import + +import sys + +import numpy as np +from numpy.testing import assert_equal, assert_allclose +import pytest + +from scipy.special._ufuncs import _sinpi as sinpi +from scipy.special._ufuncs import _cospi as cospi + +from scipy._lib._numpy_compat import suppress_warnings + + +def test_integer_real_part(): + x = np.arange(-100, 101) + y = np.hstack((-np.linspace(310, -30, 10), np.linspace(-30, 310, 10))) + x, y = np.meshgrid(x, y) + z = x + 1j*y + # In the following we should be *exactly* right + res = sinpi(z) + assert_equal(res.real, 0.0) + res = cospi(z) + assert_equal(res.imag, 0.0) + + +def test_half_integer_real_part(): + x = np.arange(-100, 101) + 0.5 + y = np.hstack((-np.linspace(310, -30, 10), np.linspace(-30, 310, 10))) + x, y = np.meshgrid(x, y) + z = x + 1j*y + # In the following we should be *exactly* right + res = sinpi(z) + assert_equal(res.imag, 0.0) + res = cospi(z) + assert_equal(res.real, 0.0) + + +def test_intermediate_overlow(): + # Make sure we avoid overflow in situations where cosh/sinh would + # overflow but the product with sin/cos would not + sinpi_pts = [complex(1 + 1e-14, 227), + complex(1e-35, 250), + complex(1e-301, 445)] + # Data generated with mpmath + sinpi_std = [complex(-8.113438309924894e+295, -np.inf), + complex(1.9507801934611995e+306, np.inf), + complex(2.205958493464539e+306, np.inf)] + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in multiply") + for p, std in zip(sinpi_pts, sinpi_std): + assert_allclose(sinpi(p), std) + + # Test for cosine, less interesting because cos(0) = 1. + p = complex(0.5 + 1e-14, 227) + std = complex(-8.113438309924894e+295, -np.inf) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in multiply") + assert_allclose(cospi(p), std) + + +@pytest.mark.xfail('win32' in sys.platform + and np.intp(0).itemsize < 8 + and sys.version_info < (3, 5), + reason="fails on 32-bit Windows with old MSVC") +def test_zero_sign(): + y = sinpi(-0.0) + assert y == 0.0 + assert np.signbit(y) + + y = sinpi(0.0) + assert y == 0.0 + assert not np.signbit(y) + + y = cospi(0.5) + assert y == 0.0 + assert not np.signbit(y) diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_trig.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_trig.pyc new file mode 100644 index 0000000..cf5fe65 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_trig.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_wrightomega.py b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_wrightomega.py new file mode 100644 index 0000000..436f281 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_wrightomega.py @@ -0,0 +1,55 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import assert_, assert_equal + +import scipy.special as sc + + +def test_wrightomega_nan(): + pts = [complex(np.nan, 0), + complex(0, np.nan), + complex(np.nan, np.nan), + complex(np.nan, 1), + complex(1, np.nan)] + for p in pts: + res = sc.wrightomega(p) + assert_(np.isnan(res.real)) + assert_(np.isnan(res.imag)) + + +def test_wrightomega_inf_branch(): + pts = [complex(-np.inf, np.pi/4), + complex(-np.inf, -np.pi/4), + complex(-np.inf, 3*np.pi/4), + complex(-np.inf, -3*np.pi/4)] + expected_results = [complex(0.0, 0.0), + complex(0.0, -0.0), + complex(-0.0, 0.0), + complex(-0.0, -0.0)] + for p, expected in zip(pts, expected_results): + res = sc.wrightomega(p) + # We can't use assert_equal(res, expected) because in older versions of + # numpy, assert_equal doesn't check the sign of the real and imaginary + # parts when comparing complex zeros. It does check the sign when the + # arguments are *real* scalars. + assert_equal(res.real, expected.real) + assert_equal(res.imag, expected.imag) + + +def test_wrightomega_inf(): + pts = [complex(np.inf, 10), + complex(-np.inf, 10), + complex(10, np.inf), + complex(10, -np.inf)] + for p in pts: + assert_equal(sc.wrightomega(p), p) + + +def test_wrightomega_singular(): + pts = [complex(-1.0, np.pi), + complex(-1.0, -np.pi)] + for p in pts: + res = sc.wrightomega(p) + assert_equal(res, -1.0) + assert_(np.signbit(res.imag) == False) diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_wrightomega.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_wrightomega.pyc new file mode 100644 index 0000000..6a50041 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_wrightomega.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_zeta.py b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_zeta.py new file mode 100644 index 0000000..269be01 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_zeta.py @@ -0,0 +1,39 @@ +from __future__ import division, print_function, absolute_import + +import scipy.special as sc +import numpy as np +from numpy.testing import assert_, assert_equal, assert_allclose + + +def test_zeta(): + assert_allclose(sc.zeta(2,2), np.pi**2/6 - 1, rtol=1e-12) + + +def test_zeta_1arg(): + assert_allclose(sc.zeta(2), np.pi**2/6, rtol=1e-12) + assert_allclose(sc.zeta(4), np.pi**4/90, rtol=1e-12) + + +def test_zetac(): + assert_equal(sc.zetac(0), -1.5) + assert_equal(sc.zetac(1.0), np.inf) + # Expected values in the following were computed using + # Wolfram Alpha `Zeta[x] - 1`: + rtol = 1e-12 + assert_allclose(sc.zetac(-2.1), -0.9972705002153750, rtol=rtol) + assert_allclose(sc.zetac(0.8), -5.437538415895550, rtol=rtol) + assert_allclose(sc.zetac(0.9999), -10000.42279161673, rtol=rtol) + assert_allclose(sc.zetac(9), 0.002008392826082214, rtol=rtol) + assert_allclose(sc.zetac(50), 8.881784210930816e-16, rtol=rtol) + assert_allclose(sc.zetac(75), 2.646977960169853e-23, rtol=rtol) + + +def test_zetac_negative_even(): + pts = [-2, -50, -100] + for p in pts: + assert_equal(sc.zetac(p), -1) + + +def test_zetac_inf(): + assert_equal(sc.zetac(np.inf), 0.0) + assert_(np.isnan(sc.zetac(-np.inf))) diff --git a/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_zeta.pyc b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_zeta.pyc new file mode 100644 index 0000000..2d62501 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/special/tests/test_zeta.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/stats/__init__.py new file mode 100644 index 0000000..5a4a1bd --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/__init__.py @@ -0,0 +1,380 @@ +""" +========================================== +Statistical functions (:mod:`scipy.stats`) +========================================== + +.. module:: scipy.stats + +This module contains a large number of probability distributions as +well as a growing library of statistical functions. + +Each univariate distribution is an instance of a subclass of `rv_continuous` +(`rv_discrete` for discrete distributions): + +.. autosummary:: + :toctree: generated/ + + rv_continuous + rv_discrete + rv_histogram + +Continuous distributions +======================== + +.. autosummary:: + :toctree: generated/ + + alpha -- Alpha + anglit -- Anglit + arcsine -- Arcsine + argus -- Argus + beta -- Beta + betaprime -- Beta Prime + bradford -- Bradford + burr -- Burr (Type III) + burr12 -- Burr (Type XII) + cauchy -- Cauchy + chi -- Chi + chi2 -- Chi-squared + cosine -- Cosine + crystalball -- Crystalball + dgamma -- Double Gamma + dweibull -- Double Weibull + erlang -- Erlang + expon -- Exponential + exponnorm -- Exponentially Modified Normal + exponweib -- Exponentiated Weibull + exponpow -- Exponential Power + f -- F (Snecdor F) + fatiguelife -- Fatigue Life (Birnbaum-Saunders) + fisk -- Fisk + foldcauchy -- Folded Cauchy + foldnorm -- Folded Normal + frechet_r -- Deprecated. Alias for weibull_min + frechet_l -- Deprecated. Alias for weibull_max + genlogistic -- Generalized Logistic + gennorm -- Generalized normal + genpareto -- Generalized Pareto + genexpon -- Generalized Exponential + genextreme -- Generalized Extreme Value + gausshyper -- Gauss Hypergeometric + gamma -- Gamma + gengamma -- Generalized gamma + genhalflogistic -- Generalized Half Logistic + gilbrat -- Gilbrat + gompertz -- Gompertz (Truncated Gumbel) + gumbel_r -- Right Sided Gumbel, Log-Weibull, Fisher-Tippett, Extreme Value Type I + gumbel_l -- Left Sided Gumbel, etc. + halfcauchy -- Half Cauchy + halflogistic -- Half Logistic + halfnorm -- Half Normal + halfgennorm -- Generalized Half Normal + hypsecant -- Hyperbolic Secant + invgamma -- Inverse Gamma + invgauss -- Inverse Gaussian + invweibull -- Inverse Weibull + johnsonsb -- Johnson SB + johnsonsu -- Johnson SU + kappa4 -- Kappa 4 parameter + kappa3 -- Kappa 3 parameter + ksone -- Kolmogorov-Smirnov one-sided (no stats) + kstwobign -- Kolmogorov-Smirnov two-sided test for Large N (no stats) + laplace -- Laplace + levy -- Levy + levy_l + levy_stable + logistic -- Logistic + loggamma -- Log-Gamma + loglaplace -- Log-Laplace (Log Double Exponential) + lognorm -- Log-Normal + lomax -- Lomax (Pareto of the second kind) + maxwell -- Maxwell + mielke -- Mielke's Beta-Kappa + moyal -- Moyal + nakagami -- Nakagami + ncx2 -- Non-central chi-squared + ncf -- Non-central F + nct -- Non-central Student's T + norm -- Normal (Gaussian) + norminvgauss -- Normal Inverse Gaussian + pareto -- Pareto + pearson3 -- Pearson type III + powerlaw -- Power-function + powerlognorm -- Power log normal + powernorm -- Power normal + rdist -- R-distribution + reciprocal -- Reciprocal + rayleigh -- Rayleigh + rice -- Rice + recipinvgauss -- Reciprocal Inverse Gaussian + semicircular -- Semicircular + skewnorm -- Skew normal + t -- Student's T + trapz -- Trapezoidal + triang -- Triangular + truncexpon -- Truncated Exponential + truncnorm -- Truncated Normal + tukeylambda -- Tukey-Lambda + uniform -- Uniform + vonmises -- Von-Mises (Circular) + vonmises_line -- Von-Mises (Line) + wald -- Wald + weibull_min -- Minimum Weibull (see Frechet) + weibull_max -- Maximum Weibull (see Frechet) + wrapcauchy -- Wrapped Cauchy + +Multivariate distributions +========================== + +.. autosummary:: + :toctree: generated/ + + multivariate_normal -- Multivariate normal distribution + matrix_normal -- Matrix normal distribution + dirichlet -- Dirichlet + wishart -- Wishart + invwishart -- Inverse Wishart + multinomial -- Multinomial distribution + special_ortho_group -- SO(N) group + ortho_group -- O(N) group + unitary_group -- U(N) group + random_correlation -- random correlation matrices + +Discrete distributions +====================== + +.. autosummary:: + :toctree: generated/ + + bernoulli -- Bernoulli + binom -- Binomial + boltzmann -- Boltzmann (Truncated Discrete Exponential) + dlaplace -- Discrete Laplacian + geom -- Geometric + hypergeom -- Hypergeometric + logser -- Logarithmic (Log-Series, Series) + nbinom -- Negative Binomial + planck -- Planck (Discrete Exponential) + poisson -- Poisson + randint -- Discrete Uniform + skellam -- Skellam + zipf -- Zipf + yulesimon -- Yule-Simon + +An overview of statistical functions is given below. +Several of these functions have a similar version in +`scipy.stats.mstats` which work for masked arrays. + +Summary statistics +================== + +.. autosummary:: + :toctree: generated/ + + describe -- Descriptive statistics + gmean -- Geometric mean + hmean -- Harmonic mean + kurtosis -- Fisher or Pearson kurtosis + mode -- Modal value + moment -- Central moment + skew -- Skewness + kstat -- + kstatvar -- + tmean -- Truncated arithmetic mean + tvar -- Truncated variance + tmin -- + tmax -- + tstd -- + tsem -- + variation -- Coefficient of variation + find_repeats + trim_mean + iqr + sem + bayes_mvs + mvsdist + entropy + +Frequency statistics +==================== + +.. autosummary:: + :toctree: generated/ + + cumfreq + itemfreq + percentileofscore + scoreatpercentile + relfreq + +.. autosummary:: + :toctree: generated/ + + binned_statistic -- Compute a binned statistic for a set of data. + binned_statistic_2d -- Compute a 2-D binned statistic for a set of data. + binned_statistic_dd -- Compute a d-D binned statistic for a set of data. + +Correlation functions +===================== + +.. autosummary:: + :toctree: generated/ + + f_oneway + pearsonr + spearmanr + pointbiserialr + kendalltau + weightedtau + linregress + siegelslopes + theilslopes + +Statistical tests +================= + +.. autosummary:: + :toctree: generated/ + + ttest_1samp + ttest_ind + ttest_ind_from_stats + ttest_rel + kstest + chisquare + power_divergence + ks_2samp + mannwhitneyu + tiecorrect + rankdata + ranksums + wilcoxon + kruskal + friedmanchisquare + brunnermunzel + combine_pvalues + jarque_bera + +.. autosummary:: + :toctree: generated/ + + ansari + bartlett + levene + shapiro + anderson + anderson_ksamp + binom_test + fligner + median_test + mood + skewtest + kurtosistest + normaltest + +Transformations +=============== + +.. autosummary:: + :toctree: generated/ + + boxcox + boxcox_normmax + boxcox_llf + yeojohnson + yeojohnson_normmax + yeojohnson_llf + obrientransform + sigmaclip + trimboth + trim1 + zmap + zscore + +Statistical distances +===================== + +.. autosummary:: + :toctree: generated/ + + wasserstein_distance + energy_distance + +Random variate generation +========================= + +.. autosummary:: + :toctree: generated/ + + rvs_ratio_uniforms + +Circular statistical functions +============================== + +.. autosummary:: + :toctree: generated/ + + circmean + circvar + circstd + +Contingency table functions +=========================== + +.. autosummary:: + :toctree: generated/ + + chi2_contingency + contingency.expected_freq + contingency.margins + fisher_exact + +Plot-tests +========== + +.. autosummary:: + :toctree: generated/ + + ppcc_max + ppcc_plot + probplot + boxcox_normplot + yeojohnson_normplot + + +Masked statistics functions +=========================== + +.. toctree:: + + stats.mstats + + +Univariate and multivariate kernel density estimation (:mod:`scipy.stats.kde`) +============================================================================== + +.. autosummary:: + :toctree: generated/ + + gaussian_kde + +For many more stat related functions install the software R and the +interface package rpy. + +""" +from __future__ import division, print_function, absolute_import + +from .stats import * +from .distributions import * +from .morestats import * +from ._binned_statistic import * +from .kde import gaussian_kde +from . import mstats +from .contingency import chi2_contingency +from ._multivariate import * + +__all__ = [s for s in dir() if not s.startswith("_")] # Remove dunders. + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/stats/__init__.pyc new file mode 100644 index 0000000..4dcd920 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/stats/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/_binned_statistic.py b/project/venv/lib/python2.7/site-packages/scipy/stats/_binned_statistic.py new file mode 100644 index 0000000..2e63dbf --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/_binned_statistic.py @@ -0,0 +1,619 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from scipy._lib.six import callable, xrange +from scipy._lib._numpy_compat import suppress_warnings +from collections import namedtuple + +__all__ = ['binned_statistic', + 'binned_statistic_2d', + 'binned_statistic_dd'] + + +BinnedStatisticResult = namedtuple('BinnedStatisticResult', + ('statistic', 'bin_edges', 'binnumber')) + + +def binned_statistic(x, values, statistic='mean', + bins=10, range=None): + """ + Compute a binned statistic for one or more sets of data. + + This is a generalization of a histogram function. A histogram divides + the space into bins, and returns the count of the number of points in + each bin. This function allows the computation of the sum, mean, median, + or other statistic of the values (or set of values) within each bin. + + Parameters + ---------- + x : (N,) array_like + A sequence of values to be binned. + values : (N,) array_like or list of (N,) array_like + The data on which the statistic will be computed. This must be + the same shape as `x`, or a set of sequences - each the same shape as + `x`. If `values` is a set of sequences, the statistic will be computed + on each independently. + statistic : string or callable, optional + The statistic to compute (default is 'mean'). + The following statistics are available: + + * 'mean' : compute the mean of values for points within each bin. + Empty bins will be represented by NaN. + * 'median' : compute the median of values for points within each + bin. Empty bins will be represented by NaN. + * 'count' : compute the count of points within each bin. This is + identical to an unweighted histogram. `values` array is not + referenced. + * 'sum' : compute the sum of values for points within each bin. + This is identical to a weighted histogram. + * 'min' : compute the minimum of values for points within each bin. + Empty bins will be represented by NaN. + * 'max' : compute the maximum of values for point within each bin. + Empty bins will be represented by NaN. + * function : a user-defined function which takes a 1D array of + values, and outputs a single numerical statistic. This function + will be called on the values in each bin. Empty bins will be + represented by function([]), or NaN if this returns an error. + + bins : int or sequence of scalars, optional + If `bins` is an int, it defines the number of equal-width bins in the + given range (10 by default). If `bins` is a sequence, it defines the + bin edges, including the rightmost edge, allowing for non-uniform bin + widths. Values in `x` that are smaller than lowest bin edge are + assigned to bin number 0, values beyond the highest bin are assigned to + ``bins[-1]``. If the bin edges are specified, the number of bins will + be, (nx = len(bins)-1). + range : (float, float) or [(float, float)], optional + The lower and upper range of the bins. If not provided, range + is simply ``(x.min(), x.max())``. Values outside the range are + ignored. + + Returns + ------- + statistic : array + The values of the selected statistic in each bin. + bin_edges : array of dtype float + Return the bin edges ``(length(statistic)+1)``. + binnumber: 1-D ndarray of ints + Indices of the bins (corresponding to `bin_edges`) in which each value + of `x` belongs. Same length as `values`. A binnumber of `i` means the + corresponding value is between (bin_edges[i-1], bin_edges[i]). + + See Also + -------- + numpy.digitize, numpy.histogram, binned_statistic_2d, binned_statistic_dd + + Notes + ----- + All but the last (righthand-most) bin is half-open. In other words, if + `bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1, + but excluding 2) and the second ``[2, 3)``. The last bin, however, is + ``[3, 4]``, which *includes* 4. + + .. versionadded:: 0.11.0 + + Examples + -------- + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + + First some basic examples: + + Create two evenly spaced bins in the range of the given sample, and sum the + corresponding values in each of those bins: + + >>> values = [1.0, 1.0, 2.0, 1.5, 3.0] + >>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2) + (array([ 4. , 4.5]), array([ 1., 4., 7.]), array([1, 1, 1, 2, 2])) + + Multiple arrays of values can also be passed. The statistic is calculated + on each set independently: + + >>> values = [[1.0, 1.0, 2.0, 1.5, 3.0], [2.0, 2.0, 4.0, 3.0, 6.0]] + >>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2) + (array([[ 4. , 4.5], [ 8. , 9. ]]), array([ 1., 4., 7.]), + array([1, 1, 1, 2, 2])) + + >>> stats.binned_statistic([1, 2, 1, 2, 4], np.arange(5), statistic='mean', + ... bins=3) + (array([ 1., 2., 4.]), array([ 1., 2., 3., 4.]), + array([1, 2, 1, 2, 3])) + + As a second example, we now generate some random data of sailing boat speed + as a function of wind speed, and then determine how fast our boat is for + certain wind speeds: + + >>> windspeed = 8 * np.random.rand(500) + >>> boatspeed = .3 * windspeed**.5 + .2 * np.random.rand(500) + >>> bin_means, bin_edges, binnumber = stats.binned_statistic(windspeed, + ... boatspeed, statistic='median', bins=[1,2,3,4,5,6,7]) + >>> plt.figure() + >>> plt.plot(windspeed, boatspeed, 'b.', label='raw data') + >>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=5, + ... label='binned statistic of data') + >>> plt.legend() + + Now we can use ``binnumber`` to select all datapoints with a windspeed + below 1: + + >>> low_boatspeed = boatspeed[binnumber == 0] + + As a final example, we will use ``bin_edges`` and ``binnumber`` to make a + plot of a distribution that shows the mean and distribution around that + mean per bin, on top of a regular histogram and the probability + distribution function: + + >>> x = np.linspace(0, 5, num=500) + >>> x_pdf = stats.maxwell.pdf(x) + >>> samples = stats.maxwell.rvs(size=10000) + + >>> bin_means, bin_edges, binnumber = stats.binned_statistic(x, x_pdf, + ... statistic='mean', bins=25) + >>> bin_width = (bin_edges[1] - bin_edges[0]) + >>> bin_centers = bin_edges[1:] - bin_width/2 + + >>> plt.figure() + >>> plt.hist(samples, bins=50, density=True, histtype='stepfilled', + ... alpha=0.2, label='histogram of data') + >>> plt.plot(x, x_pdf, 'r-', label='analytical pdf') + >>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=2, + ... label='binned statistic of data') + >>> plt.plot((binnumber - 0.5) * bin_width, x_pdf, 'g.', alpha=0.5) + >>> plt.legend(fontsize=10) + >>> plt.show() + + """ + try: + N = len(bins) + except TypeError: + N = 1 + + if N != 1: + bins = [np.asarray(bins, float)] + + if range is not None: + if len(range) == 2: + range = [range] + + medians, edges, binnumbers = binned_statistic_dd( + [x], values, statistic, bins, range) + + return BinnedStatisticResult(medians, edges[0], binnumbers) + + +BinnedStatistic2dResult = namedtuple('BinnedStatistic2dResult', + ('statistic', 'x_edge', 'y_edge', + 'binnumber')) + + +def binned_statistic_2d(x, y, values, statistic='mean', + bins=10, range=None, expand_binnumbers=False): + """ + Compute a bidimensional binned statistic for one or more sets of data. + + This is a generalization of a histogram2d function. A histogram divides + the space into bins, and returns the count of the number of points in + each bin. This function allows the computation of the sum, mean, median, + or other statistic of the values (or set of values) within each bin. + + Parameters + ---------- + x : (N,) array_like + A sequence of values to be binned along the first dimension. + y : (N,) array_like + A sequence of values to be binned along the second dimension. + values : (N,) array_like or list of (N,) array_like + The data on which the statistic will be computed. This must be + the same shape as `x`, or a list of sequences - each with the same + shape as `x`. If `values` is such a list, the statistic will be + computed on each independently. + statistic : string or callable, optional + The statistic to compute (default is 'mean'). + The following statistics are available: + + * 'mean' : compute the mean of values for points within each bin. + Empty bins will be represented by NaN. + * 'median' : compute the median of values for points within each + bin. Empty bins will be represented by NaN. + * 'count' : compute the count of points within each bin. This is + identical to an unweighted histogram. `values` array is not + referenced. + * 'sum' : compute the sum of values for points within each bin. + This is identical to a weighted histogram. + * 'min' : compute the minimum of values for points within each bin. + Empty bins will be represented by NaN. + * 'max' : compute the maximum of values for point within each bin. + Empty bins will be represented by NaN. + * function : a user-defined function which takes a 1D array of + values, and outputs a single numerical statistic. This function + will be called on the values in each bin. Empty bins will be + represented by function([]), or NaN if this returns an error. + + bins : int or [int, int] or array_like or [array, array], optional + The bin specification: + + * the number of bins for the two dimensions (nx = ny = bins), + * the number of bins in each dimension (nx, ny = bins), + * the bin edges for the two dimensions (x_edge = y_edge = bins), + * the bin edges in each dimension (x_edge, y_edge = bins). + + If the bin edges are specified, the number of bins will be, + (nx = len(x_edge)-1, ny = len(y_edge)-1). + + range : (2,2) array_like, optional + The leftmost and rightmost edges of the bins along each dimension + (if not specified explicitly in the `bins` parameters): + [[xmin, xmax], [ymin, ymax]]. All values outside of this range will be + considered outliers and not tallied in the histogram. + expand_binnumbers : bool, optional + 'False' (default): the returned `binnumber` is a shape (N,) array of + linearized bin indices. + 'True': the returned `binnumber` is 'unraveled' into a shape (2,N) + ndarray, where each row gives the bin numbers in the corresponding + dimension. + See the `binnumber` returned value, and the `Examples` section. + + .. versionadded:: 0.17.0 + + Returns + ------- + statistic : (nx, ny) ndarray + The values of the selected statistic in each two-dimensional bin. + x_edge : (nx + 1) ndarray + The bin edges along the first dimension. + y_edge : (ny + 1) ndarray + The bin edges along the second dimension. + binnumber : (N,) array of ints or (2,N) ndarray of ints + This assigns to each element of `sample` an integer that represents the + bin in which this observation falls. The representation depends on the + `expand_binnumbers` argument. See `Notes` for details. + + + See Also + -------- + numpy.digitize, numpy.histogram2d, binned_statistic, binned_statistic_dd + + Notes + ----- + Binedges: + All but the last (righthand-most) bin is half-open. In other words, if + `bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1, + but excluding 2) and the second ``[2, 3)``. The last bin, however, is + ``[3, 4]``, which *includes* 4. + + `binnumber`: + This returned argument assigns to each element of `sample` an integer that + represents the bin in which it belongs. The representation depends on the + `expand_binnumbers` argument. If 'False' (default): The returned + `binnumber` is a shape (N,) array of linearized indices mapping each + element of `sample` to its corresponding bin (using row-major ordering). + If 'True': The returned `binnumber` is a shape (2,N) ndarray where + each row indicates bin placements for each dimension respectively. In each + dimension, a binnumber of `i` means the corresponding value is between + (D_edge[i-1], D_edge[i]), where 'D' is either 'x' or 'y'. + + .. versionadded:: 0.11.0 + + Examples + -------- + >>> from scipy import stats + + Calculate the counts with explicit bin-edges: + + >>> x = [0.1, 0.1, 0.1, 0.6] + >>> y = [2.1, 2.6, 2.1, 2.1] + >>> binx = [0.0, 0.5, 1.0] + >>> biny = [2.0, 2.5, 3.0] + >>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx,biny]) + >>> ret.statistic + array([[ 2., 1.], + [ 1., 0.]]) + + The bin in which each sample is placed is given by the `binnumber` + returned parameter. By default, these are the linearized bin indices: + + >>> ret.binnumber + array([5, 6, 5, 9]) + + The bin indices can also be expanded into separate entries for each + dimension using the `expand_binnumbers` parameter: + + >>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx,biny], + ... expand_binnumbers=True) + >>> ret.binnumber + array([[1, 1, 1, 2], + [1, 2, 1, 1]]) + + Which shows that the first three elements belong in the xbin 1, and the + fourth into xbin 2; and so on for y. + + """ + + # This code is based on np.histogram2d + try: + N = len(bins) + except TypeError: + N = 1 + + if N != 1 and N != 2: + xedges = yedges = np.asarray(bins, float) + bins = [xedges, yedges] + + medians, edges, binnumbers = binned_statistic_dd( + [x, y], values, statistic, bins, range, + expand_binnumbers=expand_binnumbers) + + return BinnedStatistic2dResult(medians, edges[0], edges[1], binnumbers) + + +BinnedStatisticddResult = namedtuple('BinnedStatisticddResult', + ('statistic', 'bin_edges', + 'binnumber')) + + +def binned_statistic_dd(sample, values, statistic='mean', + bins=10, range=None, expand_binnumbers=False): + """ + Compute a multidimensional binned statistic for a set of data. + + This is a generalization of a histogramdd function. A histogram divides + the space into bins, and returns the count of the number of points in + each bin. This function allows the computation of the sum, mean, median, + or other statistic of the values within each bin. + + Parameters + ---------- + sample : array_like + Data to histogram passed as a sequence of D arrays of length N, or + as an (N,D) array. + values : (N,) array_like or list of (N,) array_like + The data on which the statistic will be computed. This must be + the same shape as `sample`, or a list of sequences - each with the + same shape as `sample`. If `values` is such a list, the statistic + will be computed on each independently. + statistic : string or callable, optional + The statistic to compute (default is 'mean'). + The following statistics are available: + + * 'mean' : compute the mean of values for points within each bin. + Empty bins will be represented by NaN. + * 'median' : compute the median of values for points within each + bin. Empty bins will be represented by NaN. + * 'count' : compute the count of points within each bin. This is + identical to an unweighted histogram. `values` array is not + referenced. + * 'sum' : compute the sum of values for points within each bin. + This is identical to a weighted histogram. + * 'min' : compute the minimum of values for points within each bin. + Empty bins will be represented by NaN. + * 'max' : compute the maximum of values for point within each bin. + Empty bins will be represented by NaN. + * function : a user-defined function which takes a 1D array of + values, and outputs a single numerical statistic. This function + will be called on the values in each bin. Empty bins will be + represented by function([]), or NaN if this returns an error. + + bins : sequence or int, optional + The bin specification must be in one of the following forms: + + * A sequence of arrays describing the bin edges along each dimension. + * The number of bins for each dimension (nx, ny, ... = bins). + * The number of bins for all dimensions (nx = ny = ... = bins). + + range : sequence, optional + A sequence of lower and upper bin edges to be used if the edges are + not given explicitly in `bins`. Defaults to the minimum and maximum + values along each dimension. + expand_binnumbers : bool, optional + 'False' (default): the returned `binnumber` is a shape (N,) array of + linearized bin indices. + 'True': the returned `binnumber` is 'unraveled' into a shape (D,N) + ndarray, where each row gives the bin numbers in the corresponding + dimension. + See the `binnumber` returned value, and the `Examples` section of + `binned_statistic_2d`. + + .. versionadded:: 0.17.0 + + Returns + ------- + statistic : ndarray, shape(nx1, nx2, nx3,...) + The values of the selected statistic in each two-dimensional bin. + bin_edges : list of ndarrays + A list of D arrays describing the (nxi + 1) bin edges for each + dimension. + binnumber : (N,) array of ints or (D,N) ndarray of ints + This assigns to each element of `sample` an integer that represents the + bin in which this observation falls. The representation depends on the + `expand_binnumbers` argument. See `Notes` for details. + + + See Also + -------- + numpy.digitize, numpy.histogramdd, binned_statistic, binned_statistic_2d + + Notes + ----- + Binedges: + All but the last (righthand-most) bin is half-open in each dimension. In + other words, if `bins` is ``[1, 2, 3, 4]``, then the first bin is + ``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``. The + last bin, however, is ``[3, 4]``, which *includes* 4. + + `binnumber`: + This returned argument assigns to each element of `sample` an integer that + represents the bin in which it belongs. The representation depends on the + `expand_binnumbers` argument. If 'False' (default): The returned + `binnumber` is a shape (N,) array of linearized indices mapping each + element of `sample` to its corresponding bin (using row-major ordering). + If 'True': The returned `binnumber` is a shape (D,N) ndarray where + each row indicates bin placements for each dimension respectively. In each + dimension, a binnumber of `i` means the corresponding value is between + (bin_edges[D][i-1], bin_edges[D][i]), for each dimension 'D'. + + .. versionadded:: 0.11.0 + + """ + known_stats = ['mean', 'median', 'count', 'sum', 'std','min','max'] + if not callable(statistic) and statistic not in known_stats: + raise ValueError('invalid statistic %r' % (statistic,)) + + # `Ndim` is the number of dimensions (e.g. `2` for `binned_statistic_2d`) + # `Dlen` is the length of elements along each dimension. + # This code is based on np.histogramdd + try: + # `sample` is an ND-array. + Dlen, Ndim = sample.shape + except (AttributeError, ValueError): + # `sample` is a sequence of 1D arrays. + sample = np.atleast_2d(sample).T + Dlen, Ndim = sample.shape + + # Store initial shape of `values` to preserve it in the output + values = np.asarray(values) + input_shape = list(values.shape) + # Make sure that `values` is 2D to iterate over rows + values = np.atleast_2d(values) + Vdim, Vlen = values.shape + + # Make sure `values` match `sample` + if(statistic != 'count' and Vlen != Dlen): + raise AttributeError('The number of `values` elements must match the ' + 'length of each `sample` dimension.') + + nbin = np.empty(Ndim, int) # Number of bins in each dimension + edges = Ndim * [None] # Bin edges for each dim (will be 2D array) + dedges = Ndim * [None] # Spacing between edges (will be 2D array) + + try: + M = len(bins) + if M != Ndim: + raise AttributeError('The dimension of bins must be equal ' + 'to the dimension of the sample x.') + except TypeError: + bins = Ndim * [bins] + + # Select range for each dimension + # Used only if number of bins is given. + if range is None: + smin = np.atleast_1d(np.array(sample.min(axis=0), float)) + smax = np.atleast_1d(np.array(sample.max(axis=0), float)) + else: + smin = np.zeros(Ndim) + smax = np.zeros(Ndim) + for i in xrange(Ndim): + smin[i], smax[i] = range[i] + + # Make sure the bins have a finite width. + for i in xrange(len(smin)): + if smin[i] == smax[i]: + smin[i] = smin[i] - .5 + smax[i] = smax[i] + .5 + + # Create edge arrays + for i in xrange(Ndim): + if np.isscalar(bins[i]): + nbin[i] = bins[i] + 2 # +2 for outlier bins + edges[i] = np.linspace(smin[i], smax[i], nbin[i] - 1) + else: + edges[i] = np.asarray(bins[i], float) + nbin[i] = len(edges[i]) + 1 # +1 for outlier bins + dedges[i] = np.diff(edges[i]) + + nbin = np.asarray(nbin) + + # Compute the bin number each sample falls into, in each dimension + sampBin = [ + np.digitize(sample[:, i], edges[i]) + for i in xrange(Ndim) + ] + + # Using `digitize`, values that fall on an edge are put in the right bin. + # For the rightmost bin, we want values equal to the right + # edge to be counted in the last bin, and not as an outlier. + for i in xrange(Ndim): + # Find the rounding precision + decimal = int(-np.log10(dedges[i].min())) + 6 + # Find which points are on the rightmost edge. + on_edge = np.where(np.around(sample[:, i], decimal) == + np.around(edges[i][-1], decimal))[0] + # Shift these points one bin to the left. + sampBin[i][on_edge] -= 1 + + # Compute the sample indices in the flattened statistic matrix. + binnumbers = np.ravel_multi_index(sampBin, nbin) + + result = np.empty([Vdim, nbin.prod()], float) + + if statistic == 'mean': + result.fill(np.nan) + flatcount = np.bincount(binnumbers, None) + a = flatcount.nonzero() + for vv in xrange(Vdim): + flatsum = np.bincount(binnumbers, values[vv]) + result[vv, a] = flatsum[a] / flatcount[a] + elif statistic == 'std': + result.fill(0) + flatcount = np.bincount(binnumbers, None) + a = flatcount.nonzero() + for vv in xrange(Vdim): + flatsum = np.bincount(binnumbers, values[vv]) + flatsum2 = np.bincount(binnumbers, values[vv] ** 2) + result[vv, a] = np.sqrt(flatsum2[a] / flatcount[a] - + (flatsum[a] / flatcount[a]) ** 2) + elif statistic == 'count': + result.fill(0) + flatcount = np.bincount(binnumbers, None) + a = np.arange(len(flatcount)) + result[:, a] = flatcount[np.newaxis, :] + elif statistic == 'sum': + result.fill(0) + for vv in xrange(Vdim): + flatsum = np.bincount(binnumbers, values[vv]) + a = np.arange(len(flatsum)) + result[vv, a] = flatsum + elif statistic == 'median': + result.fill(np.nan) + for i in np.unique(binnumbers): + for vv in xrange(Vdim): + result[vv, i] = np.median(values[vv, binnumbers == i]) + elif statistic == 'min': + result.fill(np.nan) + for i in np.unique(binnumbers): + for vv in xrange(Vdim): + result[vv, i] = np.min(values[vv, binnumbers == i]) + elif statistic == 'max': + result.fill(np.nan) + for i in np.unique(binnumbers): + for vv in xrange(Vdim): + result[vv, i] = np.max(values[vv, binnumbers == i]) + elif callable(statistic): + with np.errstate(invalid='ignore'), suppress_warnings() as sup: + sup.filter(RuntimeWarning) + try: + null = statistic([]) + except Exception: + null = np.nan + result.fill(null) + for i in np.unique(binnumbers): + for vv in xrange(Vdim): + result[vv, i] = statistic(values[vv, binnumbers == i]) + + # Shape into a proper matrix + result = result.reshape(np.append(Vdim, nbin)) + + # Remove outliers (indices 0 and -1 for each bin-dimension). + core = tuple([slice(None)] + Ndim * [slice(1, -1)]) + result = result[core] + + # Unravel binnumbers into an ndarray, each row the bins for each dimension + if(expand_binnumbers and Ndim > 1): + binnumbers = np.asarray(np.unravel_index(binnumbers, nbin)) + + if np.any(result.shape[1:] != nbin - 2): + raise RuntimeError('Internal Shape Error') + + # Reshape to have output (`reulst`) match input (`values`) shape + result = result.reshape(input_shape[:-1] + list(nbin-2)) + + return BinnedStatisticddResult(result, edges, binnumbers) diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/_binned_statistic.pyc b/project/venv/lib/python2.7/site-packages/scipy/stats/_binned_statistic.pyc new file mode 100644 index 0000000..81fc361 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/stats/_binned_statistic.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/_constants.py b/project/venv/lib/python2.7/site-packages/scipy/stats/_constants.py new file mode 100644 index 0000000..f59851e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/_constants.py @@ -0,0 +1,27 @@ +""" +Statistics-related constants. + +""" +from __future__ import division, print_function, absolute_import + +import numpy as np + + +# The smallest representable positive number such that 1.0 + _EPS != 1.0. +_EPS = np.finfo(float).eps + +# The largest [in magnitude] usable floating value. +_XMAX = np.finfo(float).max + +# The log of the largest usable floating value; useful for knowing +# when exp(something) will overflow +_LOGXMAX = np.log(_XMAX) + +# The smallest [in magnitude] usable floating value. +_XMIN = np.finfo(float).tiny + +# -special.psi(1) +_EULER = 0.577215664901532860606512090082402431042 + +# special.zeta(3, 1) Apery's constant +_ZETA3 = 1.202056903159594285399738161511449990765 diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/_constants.pyc b/project/venv/lib/python2.7/site-packages/scipy/stats/_constants.pyc new file mode 100644 index 0000000..90b886c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/stats/_constants.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/_continuous_distns.py b/project/venv/lib/python2.7/site-packages/scipy/stats/_continuous_distns.py new file mode 100644 index 0000000..5744b22 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/_continuous_distns.py @@ -0,0 +1,7278 @@ +# -*- coding: utf-8 -*- +# +# Author: Travis Oliphant 2002-2011 with contributions from +# SciPy Developers 2004-2011 +# +from __future__ import division, print_function, absolute_import + +import warnings + +import numpy as np + +from scipy.misc.doccer import (extend_notes_in_docstring, + replace_notes_in_docstring) +from scipy import optimize +from scipy import integrate +from scipy import interpolate +import scipy.special as sc +import scipy.special._ufuncs as scu +from scipy._lib._numpy_compat import broadcast_to +from scipy._lib._util import _lazyselect, _lazywhere + +from . import _stats +from ._tukeylambda_stats import (tukeylambda_variance as _tlvar, + tukeylambda_kurtosis as _tlkurt) +from ._distn_infrastructure import (get_distribution_names, _kurtosis, + _ncx2_cdf, _ncx2_log_pdf, _ncx2_pdf, + rv_continuous, _skew, valarray) +from ._constants import _XMIN, _EULER, _ZETA3, _XMAX, _LOGXMAX + + +# In numpy 1.12 and above, np.power refuses to raise integers to negative +# powers, and `np.float_power` is a new replacement. +try: + float_power = np.float_power +except AttributeError: + float_power = np.power + + +## Kolmogorov-Smirnov one-sided and two-sided test statistics +class ksone_gen(rv_continuous): + r"""General Kolmogorov-Smirnov one-sided test. + + This is the distribution of the one-sided Kolmogorov-Smirnov (KS) + statistics :math:`\sqrt{n} D_n^+` and :math:`\sqrt{n} D_n^-` + for a finite sample size ``n`` (the shape parameter). + + %(before_notes)s + + Notes + ----- + :math:`\sqrt{n} D_n^+` and :math:`\sqrt{n} D_n^-` are given by + + .. math:: + + D_n^+ &= \text{sup}_x (F_n(x) - F(x)),\\ + D_n^- &= \text{sup}_x (F(x) - F_n(x)),\\ + + where :math:`F` is a CDF and :math:`F_n` is an empirical CDF. `ksone` + describes the distribution under the null hypothesis of the KS test + that the empirical CDF corresponds to :math:`n` i.i.d. random variates + with CDF :math:`F`. + + %(after_notes)s + + See Also + -------- + kstwobign, kstest + + References + ---------- + .. [1] Birnbaum, Z. W. and Tingey, F.H. "One-sided confidence contours + for probability distribution functions", The Annals of Mathematical + Statistics, 22(4), pp 592-596 (1951). + + %(example)s + + """ + def _pdf(self, x, n): + return -scu._smirnovp(n, x) + + def _cdf(self, x, n): + return scu._smirnovc(n, x) + + def _sf(self, x, n): + return sc.smirnov(n, x) + + def _ppf(self, q, n): + return scu._smirnovci(n, q) + + def _isf(self, q, n): + return sc.smirnovi(n, q) + + +ksone = ksone_gen(a=0.0, name='ksone') + + +class kstwobign_gen(rv_continuous): + r"""Kolmogorov-Smirnov two-sided test for large N. + + This is the asymptotic distribution of the two-sided Kolmogorov-Smirnov + statistic :math:`\sqrt{n} D_n` that measures the maximum absolute + distance of the theoretical CDF from the empirical CDF (see `kstest`). + + %(before_notes)s + + Notes + ----- + :math:`\sqrt{n} D_n` is given by + + .. math:: + + D_n = \text{sup}_x |F_n(x) - F(x)| + + where :math:`F` is a CDF and :math:`F_n` is an empirical CDF. `kstwobign` + describes the asymptotic distribution (i.e. the limit of + :math:`\sqrt{n} D_n`) under the null hypothesis of the KS test that the + empirical CDF corresponds to i.i.d. random variates with CDF :math:`F`. + + %(after_notes)s + + See Also + -------- + ksone, kstest + + References + ---------- + .. [1] Marsaglia, G. et al. "Evaluating Kolmogorov's distribution", + Journal of Statistical Software, 8(18), 2003. + + %(example)s + + """ + def _pdf(self, x): + return -scu._kolmogp(x) + + def _cdf(self, x): + return scu._kolmogc(x) + + def _sf(self, x): + return sc.kolmogorov(x) + + def _ppf(self, q): + return scu._kolmogci(q) + + def _isf(self, q): + return sc.kolmogi(q) + + +kstwobign = kstwobign_gen(a=0.0, name='kstwobign') + + +## Normal distribution + +# loc = mu, scale = std +# Keep these implementations out of the class definition so they can be reused +# by other distributions. +_norm_pdf_C = np.sqrt(2*np.pi) +_norm_pdf_logC = np.log(_norm_pdf_C) + + +def _norm_pdf(x): + return np.exp(-x**2/2.0) / _norm_pdf_C + + +def _norm_logpdf(x): + return -x**2 / 2.0 - _norm_pdf_logC + + +def _norm_cdf(x): + return sc.ndtr(x) + + +def _norm_logcdf(x): + return sc.log_ndtr(x) + + +def _norm_ppf(q): + return sc.ndtri(q) + + +def _norm_sf(x): + return _norm_cdf(-x) + + +def _norm_logsf(x): + return _norm_logcdf(-x) + + +def _norm_isf(q): + return -_norm_ppf(q) + + +class norm_gen(rv_continuous): + r"""A normal continuous random variable. + + The location (``loc``) keyword specifies the mean. + The scale (``scale``) keyword specifies the standard deviation. + + %(before_notes)s + + Notes + ----- + The probability density function for `norm` is: + + .. math:: + + f(x) = \frac{\exp(-x^2/2)}{\sqrt{2\pi}} + + for a real number :math:`x`. + + %(after_notes)s + + %(example)s + + """ + def _rvs(self): + return self._random_state.standard_normal(self._size) + + def _pdf(self, x): + # norm.pdf(x) = exp(-x**2/2)/sqrt(2*pi) + return _norm_pdf(x) + + def _logpdf(self, x): + return _norm_logpdf(x) + + def _cdf(self, x): + return _norm_cdf(x) + + def _logcdf(self, x): + return _norm_logcdf(x) + + def _sf(self, x): + return _norm_sf(x) + + def _logsf(self, x): + return _norm_logsf(x) + + def _ppf(self, q): + return _norm_ppf(q) + + def _isf(self, q): + return _norm_isf(q) + + def _stats(self): + return 0.0, 1.0, 0.0, 0.0 + + def _entropy(self): + return 0.5*(np.log(2*np.pi)+1) + + @replace_notes_in_docstring(rv_continuous, notes="""\ + This function uses explicit formulas for the maximum likelihood + estimation of the normal distribution parameters, so the + `optimizer` argument is ignored.\n\n""") + def fit(self, data, **kwds): + floc = kwds.get('floc', None) + fscale = kwds.get('fscale', None) + + if floc is not None and fscale is not None: + # This check is for consistency with `rv_continuous.fit`. + # Without this check, this function would just return the + # parameters that were given. + raise ValueError("All parameters fixed. There is nothing to " + "optimize.") + + data = np.asarray(data) + + if floc is None: + loc = data.mean() + else: + loc = floc + + if fscale is None: + scale = np.sqrt(((data - loc)**2).mean()) + else: + scale = fscale + + return loc, scale + + +norm = norm_gen(name='norm') + + +class alpha_gen(rv_continuous): + r"""An alpha continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `alpha` is: + + .. math:: + + f(x, a) = \frac{1}{x^2 \Phi(a) \sqrt{2\pi}} * + \exp(-\frac{1}{2} (a-1/x)^2) + + where :math:`\Phi` is the normal CDF, :math:`x > 0`, and :math:`a > 0`. + + `alpha` takes ``a`` as a shape parameter. + + %(after_notes)s + + %(example)s + + """ + _support_mask = rv_continuous._open_support_mask + + def _pdf(self, x, a): + # alpha.pdf(x, a) = 1/(x**2*Phi(a)*sqrt(2*pi)) * exp(-1/2 * (a-1/x)**2) + return 1.0/(x**2)/_norm_cdf(a)*_norm_pdf(a-1.0/x) + + def _logpdf(self, x, a): + return -2*np.log(x) + _norm_logpdf(a-1.0/x) - np.log(_norm_cdf(a)) + + def _cdf(self, x, a): + return _norm_cdf(a-1.0/x) / _norm_cdf(a) + + def _ppf(self, q, a): + return 1.0/np.asarray(a-sc.ndtri(q*_norm_cdf(a))) + + def _stats(self, a): + return [np.inf]*2 + [np.nan]*2 + + +alpha = alpha_gen(a=0.0, name='alpha') + + +class anglit_gen(rv_continuous): + r"""An anglit continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `anglit` is: + + .. math:: + + f(x) = \sin(2x + \pi/2) = \cos(2x) + + for :math:`-\pi/4 \le x \le \pi/4`. + + %(after_notes)s + + %(example)s + + """ + def _pdf(self, x): + # anglit.pdf(x) = sin(2*x + \pi/2) = cos(2*x) + return np.cos(2*x) + + def _cdf(self, x): + return np.sin(x+np.pi/4)**2.0 + + def _ppf(self, q): + return np.arcsin(np.sqrt(q))-np.pi/4 + + def _stats(self): + return 0.0, np.pi*np.pi/16-0.5, 0.0, -2*(np.pi**4 - 96)/(np.pi*np.pi-8)**2 + + def _entropy(self): + return 1-np.log(2) + + +anglit = anglit_gen(a=-np.pi/4, b=np.pi/4, name='anglit') + + +class arcsine_gen(rv_continuous): + r"""An arcsine continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `arcsine` is: + + .. math:: + + f(x) = \frac{1}{\pi \sqrt{x (1-x)}} + + for :math:`0 < x < 1`. + + %(after_notes)s + + %(example)s + + """ + def _pdf(self, x): + # arcsine.pdf(x) = 1/(pi*sqrt(x*(1-x))) + return 1.0/np.pi/np.sqrt(x*(1-x)) + + def _cdf(self, x): + return 2.0/np.pi*np.arcsin(np.sqrt(x)) + + def _ppf(self, q): + return np.sin(np.pi/2.0*q)**2.0 + + def _stats(self): + mu = 0.5 + mu2 = 1.0/8 + g1 = 0 + g2 = -3.0/2.0 + return mu, mu2, g1, g2 + + def _entropy(self): + return -0.24156447527049044468 + + +arcsine = arcsine_gen(a=0.0, b=1.0, name='arcsine') + + +class FitDataError(ValueError): + # This exception is raised by, for example, beta_gen.fit when both floc + # and fscale are fixed and there are values in the data not in the open + # interval (floc, floc+fscale). + def __init__(self, distr, lower, upper): + self.args = ( + "Invalid values in `data`. Maximum likelihood " + "estimation with {distr!r} requires that {lower!r} < x " + "< {upper!r} for each x in `data`.".format( + distr=distr, lower=lower, upper=upper), + ) + + +class FitSolverError(RuntimeError): + # This exception is raised by, for example, beta_gen.fit when + # optimize.fsolve returns with ier != 1. + def __init__(self, mesg): + emsg = "Solver for the MLE equations failed to converge: " + emsg += mesg.replace('\n', '') + self.args = (emsg,) + + +def _beta_mle_a(a, b, n, s1): + # The zeros of this function give the MLE for `a`, with + # `b`, `n` and `s1` given. `s1` is the sum of the logs of + # the data. `n` is the number of data points. + psiab = sc.psi(a + b) + func = s1 - n * (-psiab + sc.psi(a)) + return func + + +def _beta_mle_ab(theta, n, s1, s2): + # Zeros of this function are critical points of + # the maximum likelihood function. Solving this system + # for theta (which contains a and b) gives the MLE for a and b + # given `n`, `s1` and `s2`. `s1` is the sum of the logs of the data, + # and `s2` is the sum of the logs of 1 - data. `n` is the number + # of data points. + a, b = theta + psiab = sc.psi(a + b) + func = [s1 - n * (-psiab + sc.psi(a)), + s2 - n * (-psiab + sc.psi(b))] + return func + + +class beta_gen(rv_continuous): + r"""A beta continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `beta` is: + + .. math:: + + f(x, a, b) = \frac{\Gamma(a+b) x^{a-1} (1-x)^{b-1}} + {\Gamma(a) \Gamma(b)} + + for :math:`0 < x < 1`, :math:`a > 0`, :math:`b > 0`, where + :math:`\Gamma` is the gamma function (`scipy.special.gamma`). + + `beta` takes :math:`a` and :math:`b` as shape parameters. + + %(after_notes)s + + %(example)s + + """ + def _rvs(self, a, b): + return self._random_state.beta(a, b, self._size) + + def _pdf(self, x, a, b): + # gamma(a+b) * x**(a-1) * (1-x)**(b-1) + # beta.pdf(x, a, b) = ------------------------------------ + # gamma(a)*gamma(b) + return np.exp(self._logpdf(x, a, b)) + + def _logpdf(self, x, a, b): + lPx = sc.xlog1py(b - 1.0, -x) + sc.xlogy(a - 1.0, x) + lPx -= sc.betaln(a, b) + return lPx + + def _cdf(self, x, a, b): + return sc.btdtr(a, b, x) + + def _ppf(self, q, a, b): + return sc.btdtri(a, b, q) + + def _stats(self, a, b): + mn = a*1.0 / (a + b) + var = (a*b*1.0)/(a+b+1.0)/(a+b)**2.0 + g1 = 2.0*(b-a)*np.sqrt((1.0+a+b)/(a*b)) / (2+a+b) + g2 = 6.0*(a**3 + a**2*(1-2*b) + b**2*(1+b) - 2*a*b*(2+b)) + g2 /= a*b*(a+b+2)*(a+b+3) + return mn, var, g1, g2 + + def _fitstart(self, data): + g1 = _skew(data) + g2 = _kurtosis(data) + + def func(x): + a, b = x + sk = 2*(b-a)*np.sqrt(a + b + 1) / (a + b + 2) / np.sqrt(a*b) + ku = a**3 - a**2*(2*b-1) + b**2*(b+1) - 2*a*b*(b+2) + ku /= a*b*(a+b+2)*(a+b+3) + ku *= 6 + return [sk-g1, ku-g2] + a, b = optimize.fsolve(func, (1.0, 1.0)) + return super(beta_gen, self)._fitstart(data, args=(a, b)) + + @extend_notes_in_docstring(rv_continuous, notes="""\ + In the special case where both `floc` and `fscale` are given, a + `ValueError` is raised if any value `x` in `data` does not satisfy + `floc < x < floc + fscale`.\n\n""") + def fit(self, data, *args, **kwds): + # Override rv_continuous.fit, so we can more efficiently handle the + # case where floc and fscale are given. + + f0 = (kwds.get('f0', None) or kwds.get('fa', None) or + kwds.get('fix_a', None)) + f1 = (kwds.get('f1', None) or kwds.get('fb', None) or + kwds.get('fix_b', None)) + floc = kwds.get('floc', None) + fscale = kwds.get('fscale', None) + + if floc is None or fscale is None: + # do general fit + return super(beta_gen, self).fit(data, *args, **kwds) + + if f0 is not None and f1 is not None: + # This check is for consistency with `rv_continuous.fit`. + raise ValueError("All parameters fixed. There is nothing to " + "optimize.") + + # Special case: loc and scale are constrained, so we are fitting + # just the shape parameters. This can be done much more efficiently + # than the method used in `rv_continuous.fit`. (See the subsection + # "Two unknown parameters" in the section "Maximum likelihood" of + # the Wikipedia article on the Beta distribution for the formulas.) + + # Normalize the data to the interval [0, 1]. + data = (np.ravel(data) - floc) / fscale + if np.any(data <= 0) or np.any(data >= 1): + raise FitDataError("beta", lower=floc, upper=floc + fscale) + xbar = data.mean() + + if f0 is not None or f1 is not None: + # One of the shape parameters is fixed. + + if f0 is not None: + # The shape parameter a is fixed, so swap the parameters + # and flip the data. We always solve for `a`. The result + # will be swapped back before returning. + b = f0 + data = 1 - data + xbar = 1 - xbar + else: + b = f1 + + # Initial guess for a. Use the formula for the mean of the beta + # distribution, E[x] = a / (a + b), to generate a reasonable + # starting point based on the mean of the data and the given + # value of b. + a = b * xbar / (1 - xbar) + + # Compute the MLE for `a` by solving _beta_mle_a. + theta, info, ier, mesg = optimize.fsolve( + _beta_mle_a, a, + args=(b, len(data), np.log(data).sum()), + full_output=True + ) + if ier != 1: + raise FitSolverError(mesg=mesg) + a = theta[0] + + if f0 is not None: + # The shape parameter a was fixed, so swap back the + # parameters. + a, b = b, a + + else: + # Neither of the shape parameters is fixed. + + # s1 and s2 are used in the extra arguments passed to _beta_mle_ab + # by optimize.fsolve. + s1 = np.log(data).sum() + s2 = sc.log1p(-data).sum() + + # Use the "method of moments" to estimate the initial + # guess for a and b. + fac = xbar * (1 - xbar) / data.var(ddof=0) - 1 + a = xbar * fac + b = (1 - xbar) * fac + + # Compute the MLE for a and b by solving _beta_mle_ab. + theta, info, ier, mesg = optimize.fsolve( + _beta_mle_ab, [a, b], + args=(len(data), s1, s2), + full_output=True + ) + if ier != 1: + raise FitSolverError(mesg=mesg) + a, b = theta + + return a, b, floc, fscale + + +beta = beta_gen(a=0.0, b=1.0, name='beta') + + +class betaprime_gen(rv_continuous): + r"""A beta prime continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `betaprime` is: + + .. math:: + + f(x, a, b) = \frac{x^{a-1} (1+x)^{-a-b}}{\beta(a, b)} + + for :math:`x > 0`, :math:`a > 0`, :math:`b > 0`, where + :math:`\beta(a, b)` is the beta function (see `scipy.special.beta`). + + `betaprime` takes ``a`` and ``b`` as shape parameters. + + %(after_notes)s + + %(example)s + + """ + _support_mask = rv_continuous._open_support_mask + + def _rvs(self, a, b): + sz, rndm = self._size, self._random_state + u1 = gamma.rvs(a, size=sz, random_state=rndm) + u2 = gamma.rvs(b, size=sz, random_state=rndm) + return u1 / u2 + + def _pdf(self, x, a, b): + # betaprime.pdf(x, a, b) = x**(a-1) * (1+x)**(-a-b) / beta(a, b) + return np.exp(self._logpdf(x, a, b)) + + def _logpdf(self, x, a, b): + return sc.xlogy(a - 1.0, x) - sc.xlog1py(a + b, x) - sc.betaln(a, b) + + def _cdf(self, x, a, b): + return sc.betainc(a, b, x/(1.+x)) + + def _munp(self, n, a, b): + if n == 1.0: + return np.where(b > 1, + a/(b-1.0), + np.inf) + elif n == 2.0: + return np.where(b > 2, + a*(a+1.0)/((b-2.0)*(b-1.0)), + np.inf) + elif n == 3.0: + return np.where(b > 3, + a*(a+1.0)*(a+2.0)/((b-3.0)*(b-2.0)*(b-1.0)), + np.inf) + elif n == 4.0: + return np.where(b > 4, + (a*(a + 1.0)*(a + 2.0)*(a + 3.0) / + ((b - 4.0)*(b - 3.0)*(b - 2.0)*(b - 1.0))), + np.inf) + else: + raise NotImplementedError + + +betaprime = betaprime_gen(a=0.0, name='betaprime') + + +class bradford_gen(rv_continuous): + r"""A Bradford continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `bradford` is: + + .. math:: + + f(x, c) = \frac{c}{\log(1+c) (1+cx)} + + for :math:`0 < x < 1` and :math:`c > 0`. + + `bradford` takes ``c`` as a shape parameter for :math:`c`. + + %(after_notes)s + + %(example)s + + """ + def _pdf(self, x, c): + # bradford.pdf(x, c) = c / (k * (1+c*x)) + return c / (c*x + 1.0) / sc.log1p(c) + + def _cdf(self, x, c): + return sc.log1p(c*x) / sc.log1p(c) + + def _ppf(self, q, c): + return sc.expm1(q * sc.log1p(c)) / c + + def _stats(self, c, moments='mv'): + k = np.log(1.0+c) + mu = (c-k)/(c*k) + mu2 = ((c+2.0)*k-2.0*c)/(2*c*k*k) + g1 = None + g2 = None + if 's' in moments: + g1 = np.sqrt(2)*(12*c*c-9*c*k*(c+2)+2*k*k*(c*(c+3)+3)) + g1 /= np.sqrt(c*(c*(k-2)+2*k))*(3*c*(k-2)+6*k) + if 'k' in moments: + g2 = (c**3*(k-3)*(k*(3*k-16)+24)+12*k*c*c*(k-4)*(k-3) + + 6*c*k*k*(3*k-14) + 12*k**3) + g2 /= 3*c*(c*(k-2)+2*k)**2 + return mu, mu2, g1, g2 + + def _entropy(self, c): + k = np.log(1+c) + return k/2.0 - np.log(c/k) + + +bradford = bradford_gen(a=0.0, b=1.0, name='bradford') + + +class burr_gen(rv_continuous): + r"""A Burr (Type III) continuous random variable. + + %(before_notes)s + + See Also + -------- + fisk : a special case of either `burr` or `burr12` with ``d=1`` + burr12 : Burr Type XII distribution + + Notes + ----- + The probability density function for `burr` is: + + .. math:: + + f(x, c, d) = c d x^{-c-1} (1+x^{-c})^{-d-1} + + for :math:`x > 0` and :math:`c, d > 0`. + + `burr` takes :math:`c` and :math:`d` as shape parameters. + + This is the PDF corresponding to the third CDF given in Burr's list; + specifically, it is equation (11) in Burr's paper [1]_. + + %(after_notes)s + + References + ---------- + .. [1] Burr, I. W. "Cumulative frequency functions", Annals of + Mathematical Statistics, 13(2), pp 215-232 (1942). + + %(example)s + + """ + _support_mask = rv_continuous._open_support_mask + + def _pdf(self, x, c, d): + # burr.pdf(x, c, d) = c * d * x**(-c-1) * (1+x**(-c))**(-d-1) + return c * d * (x**(-c - 1.0)) * ((1 + x**(-c))**(-d - 1.0)) + + def _cdf(self, x, c, d): + return (1 + x**(-c))**(-d) + + def _ppf(self, q, c, d): + return (q**(-1.0/d) - 1)**(-1.0/c) + + def _munp(self, n, c, d): + nc = 1. * n / c + return d * sc.beta(1.0 - nc, d + nc) + + +burr = burr_gen(a=0.0, name='burr') + + +class burr12_gen(rv_continuous): + r"""A Burr (Type XII) continuous random variable. + + %(before_notes)s + + See Also + -------- + fisk : a special case of either `burr` or `burr12` with ``d=1`` + burr : Burr Type III distribution + + Notes + ----- + The probability density function for `burr` is: + + .. math:: + + f(x, c, d) = c d x^{c-1} (1+x^c)^{-d-1} + + for :math:`x > 0` and :math:`c, d > 0`. + + `burr12` takes ``c`` and ``d`` as shape parameters for :math:`c` + and :math:`d`. + + This is the PDF corresponding to the twelfth CDF given in Burr's list; + specifically, it is equation (20) in Burr's paper [1]_. + + %(after_notes)s + + The Burr type 12 distribution is also sometimes referred to as + the Singh-Maddala distribution from NIST [2]_. + + References + ---------- + .. [1] Burr, I. W. "Cumulative frequency functions", Annals of + Mathematical Statistics, 13(2), pp 215-232 (1942). + + .. [2] https://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/b12pdf.htm + + %(example)s + + """ + _support_mask = rv_continuous._open_support_mask + + def _pdf(self, x, c, d): + # burr12.pdf(x, c, d) = c * d * x**(c-1) * (1+x**(c))**(-d-1) + return np.exp(self._logpdf(x, c, d)) + + def _logpdf(self, x, c, d): + return np.log(c) + np.log(d) + sc.xlogy(c - 1, x) + sc.xlog1py(-d-1, x**c) + + def _cdf(self, x, c, d): + return -sc.expm1(self._logsf(x, c, d)) + + def _logcdf(self, x, c, d): + return sc.log1p(-(1 + x**c)**(-d)) + + def _sf(self, x, c, d): + return np.exp(self._logsf(x, c, d)) + + def _logsf(self, x, c, d): + return sc.xlog1py(-d, x**c) + + def _ppf(self, q, c, d): + # The following is an implementation of + # ((1 - q)**(-1.0/d) - 1)**(1.0/c) + # that does a better job handling small values of q. + return sc.expm1(-1/d * sc.log1p(-q))**(1/c) + + def _munp(self, n, c, d): + nc = 1. * n / c + return d * sc.beta(1.0 + nc, d - nc) + + +burr12 = burr12_gen(a=0.0, name='burr12') + + +class fisk_gen(burr_gen): + r"""A Fisk continuous random variable. + + The Fisk distribution is also known as the log-logistic distribution. + + %(before_notes)s + + Notes + ----- + The probability density function for `fisk` is: + + .. math:: + + f(x, c) = c x^{-c-1} (1 + x^{-c})^{-2} + + for :math:`x > 0` and :math:`c > 0`. + + `fisk` takes ``c`` as a shape parameter for :math:`c`. + + `fisk` is a special case of `burr` or `burr12` with ``d=1``. + + %(after_notes)s + + See Also + -------- + burr + + %(example)s + + """ + def _pdf(self, x, c): + # fisk.pdf(x, c) = c * x**(-c-1) * (1 + x**(-c))**(-2) + return burr_gen._pdf(self, x, c, 1.0) + + def _cdf(self, x, c): + return burr_gen._cdf(self, x, c, 1.0) + + def _ppf(self, x, c): + return burr_gen._ppf(self, x, c, 1.0) + + def _munp(self, n, c): + return burr_gen._munp(self, n, c, 1.0) + + def _entropy(self, c): + return 2 - np.log(c) + + +fisk = fisk_gen(a=0.0, name='fisk') + + +# median = loc +class cauchy_gen(rv_continuous): + r"""A Cauchy continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `cauchy` is + + .. math:: + + f(x) = \frac{1}{\pi (1 + x^2)} + + for a real number :math:`x`. + + %(after_notes)s + + %(example)s + + """ + def _pdf(self, x): + # cauchy.pdf(x) = 1 / (pi * (1 + x**2)) + return 1.0/np.pi/(1.0+x*x) + + def _cdf(self, x): + return 0.5 + 1.0/np.pi*np.arctan(x) + + def _ppf(self, q): + return np.tan(np.pi*q-np.pi/2.0) + + def _sf(self, x): + return 0.5 - 1.0/np.pi*np.arctan(x) + + def _isf(self, q): + return np.tan(np.pi/2.0-np.pi*q) + + def _stats(self): + return np.nan, np.nan, np.nan, np.nan + + def _entropy(self): + return np.log(4*np.pi) + + def _fitstart(self, data, args=None): + # Initialize ML guesses using quartiles instead of moments. + p25, p50, p75 = np.percentile(data, [25, 50, 75]) + return p50, (p75 - p25)/2 + + +cauchy = cauchy_gen(name='cauchy') + + +class chi_gen(rv_continuous): + r"""A chi continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `chi` is: + + .. math:: + + f(x, k) = \frac{1}{2^{k/2-1} \Gamma \left( k/2 \right)} + x^{k-1} \exp \left( -x^2/2 \right) + + for :math:`x > 0` and :math:`k > 0` (degrees of freedom, denoted ``df`` + in the implementation). :math:`\Gamma` is the gamma function + (`scipy.special.gamma`). + + Special cases of `chi` are: + + - ``chi(1, loc, scale)`` is equivalent to `halfnorm` + - ``chi(2, 0, scale)`` is equivalent to `rayleigh` + - ``chi(3, 0, scale)`` is equivalent to `maxwell` + + `chi` takes ``df`` as a shape parameter. + + %(after_notes)s + + %(example)s + + """ + + def _rvs(self, df): + sz, rndm = self._size, self._random_state + return np.sqrt(chi2.rvs(df, size=sz, random_state=rndm)) + + def _pdf(self, x, df): + # x**(df-1) * exp(-x**2/2) + # chi.pdf(x, df) = ------------------------- + # 2**(df/2-1) * gamma(df/2) + return np.exp(self._logpdf(x, df)) + + def _logpdf(self, x, df): + l = np.log(2) - .5*np.log(2)*df - sc.gammaln(.5*df) + return l + sc.xlogy(df - 1., x) - .5*x**2 + + def _cdf(self, x, df): + return sc.gammainc(.5*df, .5*x**2) + + def _ppf(self, q, df): + return np.sqrt(2*sc.gammaincinv(.5*df, q)) + + def _stats(self, df): + mu = np.sqrt(2)*sc.gamma(df/2.0+0.5)/sc.gamma(df/2.0) + mu2 = df - mu*mu + g1 = (2*mu**3.0 + mu*(1-2*df))/np.asarray(np.power(mu2, 1.5)) + g2 = 2*df*(1.0-df)-6*mu**4 + 4*mu**2 * (2*df-1) + g2 /= np.asarray(mu2**2.0) + return mu, mu2, g1, g2 + + +chi = chi_gen(a=0.0, name='chi') + + +## Chi-squared (gamma-distributed with loc=0 and scale=2 and shape=df/2) +class chi2_gen(rv_continuous): + r"""A chi-squared continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `chi2` is: + + .. math:: + + f(x, k) = \frac{1}{2^{k/2} \Gamma \left( k/2 \right)} + x^{k/2-1} \exp \left( -x/2 \right) + + for :math:`x > 0` and :math:`k > 0` (degrees of freedom, denoted ``df`` + in the implementation). + + `chi2` takes ``df`` as a shape parameter. + + %(after_notes)s + + %(example)s + + """ + def _rvs(self, df): + return self._random_state.chisquare(df, self._size) + + def _pdf(self, x, df): + # chi2.pdf(x, df) = 1 / (2*gamma(df/2)) * (x/2)**(df/2-1) * exp(-x/2) + return np.exp(self._logpdf(x, df)) + + def _logpdf(self, x, df): + return sc.xlogy(df/2.-1, x) - x/2. - sc.gammaln(df/2.) - (np.log(2)*df)/2. + + def _cdf(self, x, df): + return sc.chdtr(df, x) + + def _sf(self, x, df): + return sc.chdtrc(df, x) + + def _isf(self, p, df): + return sc.chdtri(df, p) + + def _ppf(self, p, df): + return self._isf(1.0-p, df) + + def _stats(self, df): + mu = df + mu2 = 2*df + g1 = 2*np.sqrt(2.0/df) + g2 = 12.0/df + return mu, mu2, g1, g2 + + +chi2 = chi2_gen(a=0.0, name='chi2') + + +class cosine_gen(rv_continuous): + r"""A cosine continuous random variable. + + %(before_notes)s + + Notes + ----- + The cosine distribution is an approximation to the normal distribution. + The probability density function for `cosine` is: + + .. math:: + + f(x) = \frac{1}{2\pi} (1+\cos(x)) + + for :math:`-\pi \le x \le \pi`. + + %(after_notes)s + + %(example)s + + """ + def _pdf(self, x): + # cosine.pdf(x) = 1/(2*pi) * (1+cos(x)) + return 1.0/2/np.pi*(1+np.cos(x)) + + def _cdf(self, x): + return 1.0/2/np.pi*(np.pi + x + np.sin(x)) + + def _stats(self): + return 0.0, np.pi*np.pi/3.0-2.0, 0.0, -6.0*(np.pi**4-90)/(5.0*(np.pi*np.pi-6)**2) + + def _entropy(self): + return np.log(4*np.pi)-1.0 + + +cosine = cosine_gen(a=-np.pi, b=np.pi, name='cosine') + + +class dgamma_gen(rv_continuous): + r"""A double gamma continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `dgamma` is: + + .. math:: + + f(x, a) = \frac{1}{2\Gamma(a)} |x|^{a-1} \exp(-|x|) + + for a real number :math:`x` and :math:`a > 0`. :math:`\Gamma` is the + gamma function (`scipy.special.gamma`). + + `dgamma` takes ``a`` as a shape parameter for :math:`a`. + + %(after_notes)s + + %(example)s + + """ + def _rvs(self, a): + sz, rndm = self._size, self._random_state + u = rndm.random_sample(size=sz) + gm = gamma.rvs(a, size=sz, random_state=rndm) + return gm * np.where(u >= 0.5, 1, -1) + + def _pdf(self, x, a): + # dgamma.pdf(x, a) = 1 / (2*gamma(a)) * abs(x)**(a-1) * exp(-abs(x)) + ax = abs(x) + return 1.0/(2*sc.gamma(a))*ax**(a-1.0) * np.exp(-ax) + + def _logpdf(self, x, a): + ax = abs(x) + return sc.xlogy(a - 1.0, ax) - ax - np.log(2) - sc.gammaln(a) + + def _cdf(self, x, a): + fac = 0.5*sc.gammainc(a, abs(x)) + return np.where(x > 0, 0.5 + fac, 0.5 - fac) + + def _sf(self, x, a): + fac = 0.5*sc.gammainc(a, abs(x)) + return np.where(x > 0, 0.5-fac, 0.5+fac) + + def _ppf(self, q, a): + fac = sc.gammainccinv(a, 1-abs(2*q-1)) + return np.where(q > 0.5, fac, -fac) + + def _stats(self, a): + mu2 = a*(a+1.0) + return 0.0, mu2, 0.0, (a+2.0)*(a+3.0)/mu2-3.0 + + +dgamma = dgamma_gen(name='dgamma') + + +class dweibull_gen(rv_continuous): + r"""A double Weibull continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `dweibull` is given by + + .. math:: + + f(x, c) = c / 2 |x|^{c-1} \exp(-|x|^c) + + for a real number :math:`x` and :math:`c > 0`. + + `dweibull` takes ``c`` as a shape parameter for :math:`c`. + + %(after_notes)s + + %(example)s + + """ + def _rvs(self, c): + sz, rndm = self._size, self._random_state + u = rndm.random_sample(size=sz) + w = weibull_min.rvs(c, size=sz, random_state=rndm) + return w * (np.where(u >= 0.5, 1, -1)) + + def _pdf(self, x, c): + # dweibull.pdf(x, c) = c / 2 * abs(x)**(c-1) * exp(-abs(x)**c) + ax = abs(x) + Px = c / 2.0 * ax**(c-1.0) * np.exp(-ax**c) + return Px + + def _logpdf(self, x, c): + ax = abs(x) + return np.log(c) - np.log(2.0) + sc.xlogy(c - 1.0, ax) - ax**c + + def _cdf(self, x, c): + Cx1 = 0.5 * np.exp(-abs(x)**c) + return np.where(x > 0, 1 - Cx1, Cx1) + + def _ppf(self, q, c): + fac = 2. * np.where(q <= 0.5, q, 1. - q) + fac = np.power(-np.log(fac), 1.0 / c) + return np.where(q > 0.5, fac, -fac) + + def _munp(self, n, c): + return (1 - (n % 2)) * sc.gamma(1.0 + 1.0 * n / c) + + # since we know that all odd moments are zeros, return them at once. + # returning Nones from _stats makes the public stats call _munp + # so overall we're saving one or two gamma function evaluations here. + def _stats(self, c): + return 0, None, 0, None + + +dweibull = dweibull_gen(name='dweibull') + + +## Exponential (gamma distributed with a=1.0, loc=loc and scale=scale) +class expon_gen(rv_continuous): + r"""An exponential continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `expon` is: + + .. math:: + + f(x) = \exp(-x) + + for :math:`x \ge 0`. + + %(after_notes)s + + A common parameterization for `expon` is in terms of the rate parameter + ``lambda``, such that ``pdf = lambda * exp(-lambda * x)``. This + parameterization corresponds to using ``scale = 1 / lambda``. + + %(example)s + + """ + def _rvs(self): + return self._random_state.standard_exponential(self._size) + + def _pdf(self, x): + # expon.pdf(x) = exp(-x) + return np.exp(-x) + + def _logpdf(self, x): + return -x + + def _cdf(self, x): + return -sc.expm1(-x) + + def _ppf(self, q): + return -sc.log1p(-q) + + def _sf(self, x): + return np.exp(-x) + + def _logsf(self, x): + return -x + + def _isf(self, q): + return -np.log(q) + + def _stats(self): + return 1.0, 1.0, 2.0, 6.0 + + def _entropy(self): + return 1.0 + + @replace_notes_in_docstring(rv_continuous, notes="""\ + This function uses explicit formulas for the maximum likelihood + estimation of the exponential distribution parameters, so the + `optimizer`, `loc` and `scale` keyword arguments are ignored.\n\n""") + def fit(self, data, *args, **kwds): + if len(args) > 0: + raise TypeError("Too many arguments.") + + floc = kwds.pop('floc', None) + fscale = kwds.pop('fscale', None) + + # Ignore the optimizer-related keyword arguments, if given. + kwds.pop('loc', None) + kwds.pop('scale', None) + kwds.pop('optimizer', None) + if kwds: + raise TypeError("Unknown arguments: %s." % kwds) + + if floc is not None and fscale is not None: + # This check is for consistency with `rv_continuous.fit`. + raise ValueError("All parameters fixed. There is nothing to " + "optimize.") + + data = np.asarray(data) + data_min = data.min() + if floc is None: + # ML estimate of the location is the minimum of the data. + loc = data_min + else: + loc = floc + if data_min < loc: + # There are values that are less than the specified loc. + raise FitDataError("expon", lower=floc, upper=np.inf) + + if fscale is None: + # ML estimate of the scale is the shifted mean. + scale = data.mean() - loc + else: + scale = fscale + + # We expect the return values to be floating point, so ensure it + # by explicitly converting to float. + return float(loc), float(scale) + + +expon = expon_gen(a=0.0, name='expon') + + +## Exponentially Modified Normal (exponential distribution +## convolved with a Normal). +## This is called an exponentially modified gaussian on wikipedia +class exponnorm_gen(rv_continuous): + r"""An exponentially modified Normal continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `exponnorm` is: + + .. math:: + + f(x, K) = \frac{1}{2K} \exp\left(\frac{1}{2 K^2} - x / K \right) + \text{erfc}\left(-\frac{x - 1/K}{\sqrt{2}}\right) + + where :math:`x` is a real number and :math:`K > 0`. + + It can be thought of as the sum of a standard normal random variable + and an independent exponentially distributed random variable with rate + ``1/K``. + + %(after_notes)s + + An alternative parameterization of this distribution (for example, in + `Wikipedia <https://en.wikipedia.org/wiki/Exponentially_modified_Gaussian_distribution>`_) + involves three parameters, :math:`\mu`, :math:`\lambda` and + :math:`\sigma`. + In the present parameterization this corresponds to having ``loc`` and + ``scale`` equal to :math:`\mu` and :math:`\sigma`, respectively, and + shape parameter :math:`K = 1/(\sigma\lambda)`. + + .. versionadded:: 0.16.0 + + %(example)s + + """ + def _rvs(self, K): + expval = self._random_state.standard_exponential(self._size) * K + gval = self._random_state.standard_normal(self._size) + return expval + gval + + def _pdf(self, x, K): + # exponnorm.pdf(x, K) = + # 1/(2*K) exp(1/(2 * K**2)) exp(-x / K) * erfc-(x - 1/K) / sqrt(2)) + invK = 1.0 / K + exparg = 0.5 * invK**2 - invK * x + # Avoid overflows; setting np.exp(exparg) to the max float works + # all right here + expval = _lazywhere(exparg < _LOGXMAX, (exparg,), np.exp, _XMAX) + return 0.5 * invK * expval * sc.erfc(-(x - invK) / np.sqrt(2)) + + def _logpdf(self, x, K): + invK = 1.0 / K + exparg = 0.5 * invK**2 - invK * x + return exparg + np.log(0.5 * invK * sc.erfc(-(x - invK) / np.sqrt(2))) + + def _cdf(self, x, K): + invK = 1.0 / K + expval = invK * (0.5 * invK - x) + return _norm_cdf(x) - np.exp(expval) * _norm_cdf(x - invK) + + def _sf(self, x, K): + invK = 1.0 / K + expval = invK * (0.5 * invK - x) + return _norm_cdf(-x) + np.exp(expval) * _norm_cdf(x - invK) + + def _stats(self, K): + K2 = K * K + opK2 = 1.0 + K2 + skw = 2 * K**3 * opK2**(-1.5) + krt = 6.0 * K2 * K2 * opK2**(-2) + return K, opK2, skw, krt + + +exponnorm = exponnorm_gen(name='exponnorm') + + +class exponweib_gen(rv_continuous): + r"""An exponentiated Weibull continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `exponweib` is: + + .. math:: + + f(x, a, c) = a c (1-\exp(-x^c))^{a-1} \exp(-x^c) x^{c-1} + + for :math:`x > 0`, :math:`a > 0`, :math:`c > 0`. + + `exponweib` takes :math:`a` and :math:`c` as shape parameters. + + %(after_notes)s + + %(example)s + + """ + def _pdf(self, x, a, c): + # exponweib.pdf(x, a, c) = + # a * c * (1-exp(-x**c))**(a-1) * exp(-x**c)*x**(c-1) + return np.exp(self._logpdf(x, a, c)) + + def _logpdf(self, x, a, c): + negxc = -x**c + exm1c = -sc.expm1(negxc) + logp = (np.log(a) + np.log(c) + sc.xlogy(a - 1.0, exm1c) + + negxc + sc.xlogy(c - 1.0, x)) + return logp + + def _cdf(self, x, a, c): + exm1c = -sc.expm1(-x**c) + return exm1c**a + + def _ppf(self, q, a, c): + return (-sc.log1p(-q**(1.0/a)))**np.asarray(1.0/c) + + +exponweib = exponweib_gen(a=0.0, name='exponweib') + + +class exponpow_gen(rv_continuous): + r"""An exponential power continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `exponpow` is: + + .. math:: + + f(x, b) = b x^{b-1} \exp(1 + x^b - \exp(x^b)) + + for :math:`x \ge 0`, :math:`b > 0`. Note that this is a different + distribution from the exponential power distribution that is also known + under the names "generalized normal" or "generalized Gaussian". + + `exponpow` takes ``b`` as a shape parameter for :math:`b`. + + %(after_notes)s + + References + ---------- + http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Exponentialpower.pdf + + %(example)s + + """ + def _pdf(self, x, b): + # exponpow.pdf(x, b) = b * x**(b-1) * exp(1 + x**b - exp(x**b)) + return np.exp(self._logpdf(x, b)) + + def _logpdf(self, x, b): + xb = x**b + f = 1 + np.log(b) + sc.xlogy(b - 1.0, x) + xb - np.exp(xb) + return f + + def _cdf(self, x, b): + return -sc.expm1(-sc.expm1(x**b)) + + def _sf(self, x, b): + return np.exp(-sc.expm1(x**b)) + + def _isf(self, x, b): + return (sc.log1p(-np.log(x)))**(1./b) + + def _ppf(self, q, b): + return pow(sc.log1p(-sc.log1p(-q)), 1.0/b) + + +exponpow = exponpow_gen(a=0.0, name='exponpow') + + +class fatiguelife_gen(rv_continuous): + r"""A fatigue-life (Birnbaum-Saunders) continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `fatiguelife` is: + + .. math:: + + f(x, c) = \frac{x+1}{2c\sqrt{2\pi x^3}} \exp(-\frac{(x-1)^2}{2x c^2}) + + for :math:`x > 0` and :math:`c > 0`. + + `fatiguelife` takes ``c`` as a shape parameter for :math:`c`. + + %(after_notes)s + + References + ---------- + .. [1] "Birnbaum-Saunders distribution", + https://en.wikipedia.org/wiki/Birnbaum-Saunders_distribution + + %(example)s + + """ + _support_mask = rv_continuous._open_support_mask + + def _rvs(self, c): + z = self._random_state.standard_normal(self._size) + x = 0.5*c*z + x2 = x*x + t = 1.0 + 2*x2 + 2*x*np.sqrt(1 + x2) + return t + + def _pdf(self, x, c): + # fatiguelife.pdf(x, c) = + # (x+1) / (2*c*sqrt(2*pi*x**3)) * exp(-(x-1)**2/(2*x*c**2)) + return np.exp(self._logpdf(x, c)) + + def _logpdf(self, x, c): + return (np.log(x+1) - (x-1)**2 / (2.0*x*c**2) - np.log(2*c) - + 0.5*(np.log(2*np.pi) + 3*np.log(x))) + + def _cdf(self, x, c): + return _norm_cdf(1.0 / c * (np.sqrt(x) - 1.0/np.sqrt(x))) + + def _ppf(self, q, c): + tmp = c*sc.ndtri(q) + return 0.25 * (tmp + np.sqrt(tmp**2 + 4))**2 + + def _stats(self, c): + # NB: the formula for kurtosis in wikipedia seems to have an error: + # it's 40, not 41. At least it disagrees with the one from Wolfram + # Alpha. And the latter one, below, passes the tests, while the wiki + # one doesn't So far I didn't have the guts to actually check the + # coefficients from the expressions for the raw moments. + c2 = c*c + mu = c2 / 2.0 + 1.0 + den = 5.0 * c2 + 4.0 + mu2 = c2*den / 4.0 + g1 = 4 * c * (11*c2 + 6.0) / np.power(den, 1.5) + g2 = 6 * c2 * (93*c2 + 40.0) / den**2.0 + return mu, mu2, g1, g2 + + +fatiguelife = fatiguelife_gen(a=0.0, name='fatiguelife') + + +class foldcauchy_gen(rv_continuous): + r"""A folded Cauchy continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `foldcauchy` is: + + .. math:: + + f(x, c) = \frac{1}{\pi (1+(x-c)^2)} + \frac{1}{\pi (1+(x+c)^2)} + + for :math:`x \ge 0`. + + `foldcauchy` takes ``c`` as a shape parameter for :math:`c`. + + %(example)s + + """ + def _rvs(self, c): + return abs(cauchy.rvs(loc=c, size=self._size, + random_state=self._random_state)) + + def _pdf(self, x, c): + # foldcauchy.pdf(x, c) = 1/(pi*(1+(x-c)**2)) + 1/(pi*(1+(x+c)**2)) + return 1.0/np.pi*(1.0/(1+(x-c)**2) + 1.0/(1+(x+c)**2)) + + def _cdf(self, x, c): + return 1.0/np.pi*(np.arctan(x-c) + np.arctan(x+c)) + + def _stats(self, c): + return np.inf, np.inf, np.nan, np.nan + + +foldcauchy = foldcauchy_gen(a=0.0, name='foldcauchy') + + +class f_gen(rv_continuous): + r"""An F continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `f` is: + + .. math:: + + f(x, df_1, df_2) = \frac{df_2^{df_2/2} df_1^{df_1/2} x^{df_1 / 2-1}} + {(df_2+df_1 x)^{(df_1+df_2)/2} + B(df_1/2, df_2/2)} + + for :math:`x > 0`. + + `f` takes ``dfn`` and ``dfd`` as shape parameters. + + %(after_notes)s + + %(example)s + + """ + def _rvs(self, dfn, dfd): + return self._random_state.f(dfn, dfd, self._size) + + def _pdf(self, x, dfn, dfd): + # df2**(df2/2) * df1**(df1/2) * x**(df1/2-1) + # F.pdf(x, df1, df2) = -------------------------------------------- + # (df2+df1*x)**((df1+df2)/2) * B(df1/2, df2/2) + return np.exp(self._logpdf(x, dfn, dfd)) + + def _logpdf(self, x, dfn, dfd): + n = 1.0 * dfn + m = 1.0 * dfd + lPx = m/2 * np.log(m) + n/2 * np.log(n) + (n/2 - 1) * np.log(x) + lPx -= ((n+m)/2) * np.log(m + n*x) + sc.betaln(n/2, m/2) + return lPx + + def _cdf(self, x, dfn, dfd): + return sc.fdtr(dfn, dfd, x) + + def _sf(self, x, dfn, dfd): + return sc.fdtrc(dfn, dfd, x) + + def _ppf(self, q, dfn, dfd): + return sc.fdtri(dfn, dfd, q) + + def _stats(self, dfn, dfd): + v1, v2 = 1. * dfn, 1. * dfd + v2_2, v2_4, v2_6, v2_8 = v2 - 2., v2 - 4., v2 - 6., v2 - 8. + + mu = _lazywhere( + v2 > 2, (v2, v2_2), + lambda v2, v2_2: v2 / v2_2, + np.inf) + + mu2 = _lazywhere( + v2 > 4, (v1, v2, v2_2, v2_4), + lambda v1, v2, v2_2, v2_4: + 2 * v2 * v2 * (v1 + v2_2) / (v1 * v2_2**2 * v2_4), + np.inf) + + g1 = _lazywhere( + v2 > 6, (v1, v2_2, v2_4, v2_6), + lambda v1, v2_2, v2_4, v2_6: + (2 * v1 + v2_2) / v2_6 * np.sqrt(v2_4 / (v1 * (v1 + v2_2))), + np.nan) + g1 *= np.sqrt(8.) + + g2 = _lazywhere( + v2 > 8, (g1, v2_6, v2_8), + lambda g1, v2_6, v2_8: (8 + g1 * g1 * v2_6) / v2_8, + np.nan) + g2 *= 3. / 2. + + return mu, mu2, g1, g2 + + +f = f_gen(a=0.0, name='f') + + +## Folded Normal +## abs(Z) where (Z is normal with mu=L and std=S so that c=abs(L)/S) +## +## note: regress docs have scale parameter correct, but first parameter +## he gives is a shape parameter A = c * scale + +## Half-normal is folded normal with shape-parameter c=0. + +class foldnorm_gen(rv_continuous): + r"""A folded normal continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `foldnorm` is: + + .. math:: + + f(x, c) = \sqrt{2/\pi} cosh(c x) \exp(-\frac{x^2+c^2}{2}) + + for :math:`c \ge 0`. + + `foldnorm` takes ``c`` as a shape parameter for :math:`c`. + + %(after_notes)s + + %(example)s + + """ + def _argcheck(self, c): + return c >= 0 + + def _rvs(self, c): + return abs(self._random_state.standard_normal(self._size) + c) + + def _pdf(self, x, c): + # foldnormal.pdf(x, c) = sqrt(2/pi) * cosh(c*x) * exp(-(x**2+c**2)/2) + return _norm_pdf(x + c) + _norm_pdf(x-c) + + def _cdf(self, x, c): + return _norm_cdf(x-c) + _norm_cdf(x+c) - 1.0 + + def _stats(self, c): + # Regina C. Elandt, Technometrics 3, 551 (1961) + # https://www.jstor.org/stable/1266561 + # + c2 = c*c + expfac = np.exp(-0.5*c2) / np.sqrt(2.*np.pi) + + mu = 2.*expfac + c * sc.erf(c/np.sqrt(2)) + mu2 = c2 + 1 - mu*mu + + g1 = 2. * (mu*mu*mu - c2*mu - expfac) + g1 /= np.power(mu2, 1.5) + + g2 = c2 * (c2 + 6.) + 3 + 8.*expfac*mu + g2 += (2. * (c2 - 3.) - 3. * mu**2) * mu**2 + g2 = g2 / mu2**2.0 - 3. + + return mu, mu2, g1, g2 + + +foldnorm = foldnorm_gen(a=0.0, name='foldnorm') + + +class weibull_min_gen(rv_continuous): + r"""Weibull minimum continuous random variable. + + %(before_notes)s + + See Also + -------- + weibull_max + + Notes + ----- + The probability density function for `weibull_min` is: + + .. math:: + + f(x, c) = c x^{c-1} \exp(-x^c) + + for :math:`x > 0`, :math:`c > 0`. + + `weibull_min` takes ``c`` as a shape parameter for :math:`c`. + + %(after_notes)s + + %(example)s + + """ + + def _pdf(self, x, c): + # frechet_r.pdf(x, c) = c * x**(c-1) * exp(-x**c) + return c*pow(x, c-1)*np.exp(-pow(x, c)) + + def _logpdf(self, x, c): + return np.log(c) + sc.xlogy(c - 1, x) - pow(x, c) + + def _cdf(self, x, c): + return -sc.expm1(-pow(x, c)) + + def _sf(self, x, c): + return np.exp(-pow(x, c)) + + def _logsf(self, x, c): + return -pow(x, c) + + def _ppf(self, q, c): + return pow(-sc.log1p(-q), 1.0/c) + + def _munp(self, n, c): + return sc.gamma(1.0+n*1.0/c) + + def _entropy(self, c): + return -_EULER / c - np.log(c) + _EULER + 1 + + +weibull_min = weibull_min_gen(a=0.0, name='weibull_min') + + +class weibull_max_gen(rv_continuous): + r"""Weibull maximum continuous random variable. + + %(before_notes)s + + See Also + -------- + weibull_min + + Notes + ----- + The probability density function for `weibull_max` is: + + .. math:: + + f(x, c) = c (-x)^{c-1} \exp(-(-x)^c) + + for :math:`x < 0`, :math:`c > 0`. + + `weibull_max` takes ``c`` as a shape parameter for :math:`c`. + + %(after_notes)s + + %(example)s + + """ + def _pdf(self, x, c): + # frechet_l.pdf(x, c) = c * (-x)**(c-1) * exp(-(-x)**c) + return c*pow(-x, c-1)*np.exp(-pow(-x, c)) + + def _logpdf(self, x, c): + return np.log(c) + sc.xlogy(c-1, -x) - pow(-x, c) + + def _cdf(self, x, c): + return np.exp(-pow(-x, c)) + + def _logcdf(self, x, c): + return -pow(-x, c) + + def _sf(self, x, c): + return -sc.expm1(-pow(-x, c)) + + def _ppf(self, q, c): + return -pow(-np.log(q), 1.0/c) + + def _munp(self, n, c): + val = sc.gamma(1.0+n*1.0/c) + if int(n) % 2: + sgn = -1 + else: + sgn = 1 + return sgn * val + + def _entropy(self, c): + return -_EULER / c - np.log(c) + _EULER + 1 + + +weibull_max = weibull_max_gen(b=0.0, name='weibull_max') + +# Public methods to be deprecated in frechet_r and frechet_l: +# ['__call__', 'cdf', 'entropy', 'expect', 'fit', 'fit_loc_scale', 'freeze', +# 'interval', 'isf', 'logcdf', 'logpdf', 'logsf', 'mean', 'median', 'moment', +# 'nnlf', 'pdf', 'ppf', 'rvs', 'sf', 'stats', 'std', 'var'] + +_frechet_r_deprec_msg = """\ +The distribution `frechet_r` is a synonym for `weibull_min`; this historical +usage is deprecated because of possible confusion with the (quite different) +Frechet distribution. To preserve the existing behavior of the program, use +`scipy.stats.weibull_min`. For the Frechet distribution (i.e. the Type II +extreme value distribution), use `scipy.stats.invweibull`.""" + +class frechet_r_gen(weibull_min_gen): + + @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) + def __call__(self, *args, **kwargs): + return weibull_min_gen.__call__(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) + def cdf(self, *args, **kwargs): + return weibull_min_gen.cdf(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) + def entropy(self, *args, **kwargs): + return weibull_min_gen.entropy(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) + def expect(self, *args, **kwargs): + return weibull_min_gen.expect(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) + def fit(self, *args, **kwargs): + return weibull_min_gen.fit(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) + def fit_loc_scale(self, *args, **kwargs): + return weibull_min_gen.fit_loc_scale(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) + def freeze(self, *args, **kwargs): + return weibull_min_gen.freeze(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) + def interval(self, *args, **kwargs): + return weibull_min_gen.interval(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) + def isf(self, *args, **kwargs): + return weibull_min_gen.isf(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) + def logcdf(self, *args, **kwargs): + return weibull_min_gen.logcdf(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) + def logpdf(self, *args, **kwargs): + return weibull_min_gen.logpdf(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) + def logsf(self, *args, **kwargs): + return weibull_min_gen.logsf(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) + def mean(self, *args, **kwargs): + return weibull_min_gen.mean(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) + def median(self, *args, **kwargs): + return weibull_min_gen.median(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) + def moment(self, *args, **kwargs): + return weibull_min_gen.moment(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) + def nnlf(self, *args, **kwargs): + return weibull_min_gen.nnlf(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) + def pdf(self, *args, **kwargs): + return weibull_min_gen.pdf(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) + def ppf(self, *args, **kwargs): + return weibull_min_gen.ppf(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) + def rvs(self, *args, **kwargs): + return weibull_min_gen.rvs(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) + def sf(self, *args, **kwargs): + return weibull_min_gen.sf(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) + def stats(self, *args, **kwargs): + return weibull_min_gen.stats(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) + def std(self, *args, **kwargs): + return weibull_min_gen.std(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) + def var(self, *args, **kwargs): + return weibull_min_gen.var(self, *args, **kwargs) + + +frechet_r = frechet_r_gen(a=0.0, name='frechet_r') + + +_frechet_l_deprec_msg = """\ +The distribution `frechet_l` is a synonym for `weibull_max`; this historical +usage is deprecated because of possible confusion with the (quite different) +Frechet distribution. To preserve the existing behavior of the program, use +`scipy.stats.weibull_max`. For the Frechet distribution (i.e. the Type II +extreme value distribution), use `scipy.stats.invweibull`.""" + +class frechet_l_gen(weibull_max_gen): + + @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) + def __call__(self, *args, **kwargs): + return weibull_max_gen.__call__(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) + def cdf(self, *args, **kwargs): + return weibull_max_gen.cdf(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) + def entropy(self, *args, **kwargs): + return weibull_max_gen.entropy(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) + def expect(self, *args, **kwargs): + return weibull_max_gen.expect(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) + def fit(self, *args, **kwargs): + return weibull_max_gen.fit(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) + def fit_loc_scale(self, *args, **kwargs): + return weibull_max_gen.fit_loc_scale(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) + def freeze(self, *args, **kwargs): + return weibull_max_gen.freeze(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) + def interval(self, *args, **kwargs): + return weibull_max_gen.interval(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) + def isf(self, *args, **kwargs): + return weibull_max_gen.isf(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) + def logcdf(self, *args, **kwargs): + return weibull_max_gen.logcdf(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) + def logpdf(self, *args, **kwargs): + return weibull_max_gen.logpdf(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) + def logsf(self, *args, **kwargs): + return weibull_max_gen.logsf(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) + def mean(self, *args, **kwargs): + return weibull_max_gen.mean(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) + def median(self, *args, **kwargs): + return weibull_max_gen.median(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) + def moment(self, *args, **kwargs): + return weibull_max_gen.moment(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) + def nnlf(self, *args, **kwargs): + return weibull_max_gen.nnlf(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) + def pdf(self, *args, **kwargs): + return weibull_max_gen.pdf(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) + def ppf(self, *args, **kwargs): + return weibull_max_gen.ppf(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) + def rvs(self, *args, **kwargs): + return weibull_max_gen.rvs(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) + def sf(self, *args, **kwargs): + return weibull_max_gen.sf(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) + def stats(self, *args, **kwargs): + return weibull_max_gen.stats(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) + def std(self, *args, **kwargs): + return weibull_max_gen.std(self, *args, **kwargs) + + @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) + def var(self, *args, **kwargs): + return weibull_max_gen.var(self, *args, **kwargs) + + +frechet_l = frechet_l_gen(b=0.0, name='frechet_l') + + +class genlogistic_gen(rv_continuous): + r"""A generalized logistic continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `genlogistic` is: + + .. math:: + + f(x, c) = c \frac{\exp(-x)} + {(1 + \exp(-x))^{c+1}} + + for :math:`x > 0`, :math:`c > 0`. + + `genlogistic` takes ``c`` as a shape parameter for :math:`c`. + + %(after_notes)s + + %(example)s + + """ + def _pdf(self, x, c): + # genlogistic.pdf(x, c) = c * exp(-x) / (1 + exp(-x))**(c+1) + return np.exp(self._logpdf(x, c)) + + def _logpdf(self, x, c): + return np.log(c) - x - (c+1.0)*sc.log1p(np.exp(-x)) + + def _cdf(self, x, c): + Cx = (1+np.exp(-x))**(-c) + return Cx + + def _ppf(self, q, c): + vals = -np.log(pow(q, -1.0/c)-1) + return vals + + def _stats(self, c): + mu = _EULER + sc.psi(c) + mu2 = np.pi*np.pi/6.0 + sc.zeta(2, c) + g1 = -2*sc.zeta(3, c) + 2*_ZETA3 + g1 /= np.power(mu2, 1.5) + g2 = np.pi**4/15.0 + 6*sc.zeta(4, c) + g2 /= mu2**2.0 + return mu, mu2, g1, g2 + + +genlogistic = genlogistic_gen(name='genlogistic') + + +class genpareto_gen(rv_continuous): + r"""A generalized Pareto continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `genpareto` is: + + .. math:: + + f(x, c) = (1 + c x)^{-1 - 1/c} + + defined for :math:`x \ge 0` if :math:`c \ge 0`, and for + :math:`0 \le x \le -1/c` if :math:`c < 0`. + + `genpareto` takes ``c`` as a shape parameter for :math:`c`. + + For :math:`c=0`, `genpareto` reduces to the exponential + distribution, `expon`: + + .. math:: + + f(x, 0) = \exp(-x) + + For :math:`c=-1`, `genpareto` is uniform on ``[0, 1]``: + + .. math:: + + f(x, -1) = 1 + + %(after_notes)s + + %(example)s + + """ + def _argcheck(self, c): + c = np.asarray(c) + self.b = _lazywhere(c < 0, (c,), + lambda c: -1. / c, + np.inf) + return True + + def _pdf(self, x, c): + # genpareto.pdf(x, c) = (1 + c * x)**(-1 - 1/c) + return np.exp(self._logpdf(x, c)) + + def _logpdf(self, x, c): + return _lazywhere((x == x) & (c != 0), (x, c), + lambda x, c: -sc.xlog1py(c + 1., c*x) / c, + -x) + + def _cdf(self, x, c): + return -sc.inv_boxcox1p(-x, -c) + + def _sf(self, x, c): + return sc.inv_boxcox(-x, -c) + + def _logsf(self, x, c): + return _lazywhere((x == x) & (c != 0), (x, c), + lambda x, c: -sc.log1p(c*x) / c, + -x) + + def _ppf(self, q, c): + return -sc.boxcox1p(-q, -c) + + def _isf(self, q, c): + return -sc.boxcox(q, -c) + + def _munp(self, n, c): + def __munp(n, c): + val = 0.0 + k = np.arange(0, n + 1) + for ki, cnk in zip(k, sc.comb(n, k)): + val = val + cnk * (-1) ** ki / (1.0 - c * ki) + return np.where(c * n < 1, val * (-1.0 / c) ** n, np.inf) + return _lazywhere(c != 0, (c,), + lambda c: __munp(n, c), + sc.gamma(n + 1)) + + def _entropy(self, c): + return 1. + c + + +genpareto = genpareto_gen(a=0.0, name='genpareto') + + +class genexpon_gen(rv_continuous): + r"""A generalized exponential continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `genexpon` is: + + .. math:: + + f(x, a, b, c) = (a + b (1 - \exp(-c x))) + \exp(-a x - b x + \frac{b}{c} (1-\exp(-c x))) + + for :math:`x \ge 0`, :math:`a, b, c > 0`. + + `genexpon` takes :math:`a`, :math:`b` and :math:`c` as shape parameters. + + %(after_notes)s + + References + ---------- + H.K. Ryu, "An Extension of Marshall and Olkin's Bivariate Exponential + Distribution", Journal of the American Statistical Association, 1993. + + N. Balakrishnan, "The Exponential Distribution: Theory, Methods and + Applications", Asit P. Basu. + + %(example)s + + """ + def _pdf(self, x, a, b, c): + # genexpon.pdf(x, a, b, c) = (a + b * (1 - exp(-c*x))) * \ + # exp(-a*x - b*x + b/c * (1-exp(-c*x))) + return (a + b*(-sc.expm1(-c*x)))*np.exp((-a-b)*x + + b*(-sc.expm1(-c*x))/c) + + def _cdf(self, x, a, b, c): + return -sc.expm1((-a-b)*x + b*(-sc.expm1(-c*x))/c) + + def _logpdf(self, x, a, b, c): + return np.log(a+b*(-sc.expm1(-c*x))) + (-a-b)*x+b*(-sc.expm1(-c*x))/c + + +genexpon = genexpon_gen(a=0.0, name='genexpon') + + +class genextreme_gen(rv_continuous): + r"""A generalized extreme value continuous random variable. + + %(before_notes)s + + See Also + -------- + gumbel_r + + Notes + ----- + For :math:`c=0`, `genextreme` is equal to `gumbel_r`. + The probability density function for `genextreme` is: + + .. math:: + + f(x, c) = \begin{cases} + \exp(-\exp(-x)) \exp(-x) &\text{for } c = 0\\ + \exp(-(1-c x)^{1/c}) (1-c x)^{1/c-1} &\text{for } + x \le 1/c, c > 0 + \end{cases} + + + Note that several sources and software packages use the opposite + convention for the sign of the shape parameter :math:`c`. + + `genextreme` takes ``c`` as a shape parameter for :math:`c`. + + %(after_notes)s + + %(example)s + + """ + def _argcheck(self, c): + self.b = np.where(c > 0, 1.0 / np.maximum(c, _XMIN), np.inf) + self.a = np.where(c < 0, 1.0 / np.minimum(c, -_XMIN), -np.inf) + return np.where(abs(c) == np.inf, 0, 1) + + def _loglogcdf(self, x, c): + return _lazywhere((x == x) & (c != 0), (x, c), + lambda x, c: sc.log1p(-c*x)/c, -x) + + def _pdf(self, x, c): + # genextreme.pdf(x, c) = + # exp(-exp(-x))*exp(-x), for c==0 + # exp(-(1-c*x)**(1/c))*(1-c*x)**(1/c-1), for x \le 1/c, c > 0 + return np.exp(self._logpdf(x, c)) + + def _logpdf(self, x, c): + cx = _lazywhere((x == x) & (c != 0), (x, c), lambda x, c: c*x, 0.0) + logex2 = sc.log1p(-cx) + logpex2 = self._loglogcdf(x, c) + pex2 = np.exp(logpex2) + # Handle special cases + np.putmask(logpex2, (c == 0) & (x == -np.inf), 0.0) + logpdf = np.where((cx == 1) | (cx == -np.inf), + -np.inf, + -pex2+logpex2-logex2) + np.putmask(logpdf, (c == 1) & (x == 1), 0.0) + return logpdf + + def _logcdf(self, x, c): + return -np.exp(self._loglogcdf(x, c)) + + def _cdf(self, x, c): + return np.exp(self._logcdf(x, c)) + + def _sf(self, x, c): + return -sc.expm1(self._logcdf(x, c)) + + def _ppf(self, q, c): + x = -np.log(-np.log(q)) + return _lazywhere((x == x) & (c != 0), (x, c), + lambda x, c: -sc.expm1(-c * x) / c, x) + + def _isf(self, q, c): + x = -np.log(-sc.log1p(-q)) + return _lazywhere((x == x) & (c != 0), (x, c), + lambda x, c: -sc.expm1(-c * x) / c, x) + + def _stats(self, c): + g = lambda n: sc.gamma(n*c + 1) + g1 = g(1) + g2 = g(2) + g3 = g(3) + g4 = g(4) + g2mg12 = np.where(abs(c) < 1e-7, (c*np.pi)**2.0/6.0, g2-g1**2.0) + gam2k = np.where(abs(c) < 1e-7, np.pi**2.0/6.0, + sc.expm1(sc.gammaln(2.0*c+1.0)-2*sc.gammaln(c + 1.0))/c**2.0) + eps = 1e-14 + gamk = np.where(abs(c) < eps, -_EULER, sc.expm1(sc.gammaln(c + 1))/c) + + m = np.where(c < -1.0, np.nan, -gamk) + v = np.where(c < -0.5, np.nan, g1**2.0*gam2k) + + # skewness + sk1 = _lazywhere(c >= -1./3, + (c, g1, g2, g3, g2mg12), + lambda c, g1, g2, g3, g2gm12: + np.sign(c)*(-g3 + (g2 + 2*g2mg12)*g1)/g2mg12**1.5, + fillvalue=np.nan) + sk = np.where(abs(c) <= eps**0.29, 12*np.sqrt(6)*_ZETA3/np.pi**3, sk1) + + # kurtosis + ku1 = _lazywhere(c >= -1./4, + (g1, g2, g3, g4, g2mg12), + lambda g1, g2, g3, g4, g2mg12: + (g4 + (-4*g3 + 3*(g2 + g2mg12)*g1)*g1)/g2mg12**2, + fillvalue=np.nan) + ku = np.where(abs(c) <= (eps)**0.23, 12.0/5.0, ku1-3.0) + return m, v, sk, ku + + def _fitstart(self, data): + # This is better than the default shape of (1,). + g = _skew(data) + if g < 0: + a = 0.5 + else: + a = -0.5 + return super(genextreme_gen, self)._fitstart(data, args=(a,)) + + def _munp(self, n, c): + k = np.arange(0, n+1) + vals = 1.0/c**n * np.sum( + sc.comb(n, k) * (-1)**k * sc.gamma(c*k + 1), + axis=0) + return np.where(c*n > -1, vals, np.inf) + + def _entropy(self, c): + return _EULER*(1 - c) + 1 + + +genextreme = genextreme_gen(name='genextreme') + + +def _digammainv(y): + # Inverse of the digamma function (real positive arguments only). + # This function is used in the `fit` method of `gamma_gen`. + # The function uses either optimize.fsolve or optimize.newton + # to solve `sc.digamma(x) - y = 0`. There is probably room for + # improvement, but currently it works over a wide range of y: + # >>> y = 64*np.random.randn(1000000) + # >>> y.min(), y.max() + # (-311.43592651416662, 351.77388222276869) + # x = [_digammainv(t) for t in y] + # np.abs(sc.digamma(x) - y).max() + # 1.1368683772161603e-13 + # + _em = 0.5772156649015328606065120 + func = lambda x: sc.digamma(x) - y + if y > -0.125: + x0 = np.exp(y) + 0.5 + if y < 10: + # Some experimentation shows that newton reliably converges + # must faster than fsolve in this y range. For larger y, + # newton sometimes fails to converge. + value = optimize.newton(func, x0, tol=1e-10) + return value + elif y > -3: + x0 = np.exp(y/2.332) + 0.08661 + else: + x0 = 1.0 / (-y - _em) + + value, info, ier, mesg = optimize.fsolve(func, x0, xtol=1e-11, + full_output=True) + if ier != 1: + raise RuntimeError("_digammainv: fsolve failed, y = %r" % y) + + return value[0] + + +## Gamma (Use MATLAB and MATHEMATICA (b=theta=scale, a=alpha=shape) definition) + +## gamma(a, loc, scale) with a an integer is the Erlang distribution +## gamma(1, loc, scale) is the Exponential distribution +## gamma(df/2, 0, 2) is the chi2 distribution with df degrees of freedom. + +class gamma_gen(rv_continuous): + r"""A gamma continuous random variable. + + %(before_notes)s + + See Also + -------- + erlang, expon + + Notes + ----- + The probability density function for `gamma` is: + + .. math:: + + f(x, a) = \frac{x^{a-1} \exp(-x)}{\Gamma(a)} + + for :math:`x \ge 0`, :math:`a > 0`. Here :math:`\Gamma(a)` refers to the + gamma function. + + `gamma` takes ``a`` as a shape parameter for :math:`a`. + + When :math:`a` is an integer, `gamma` reduces to the Erlang + distribution, and when :math:`a=1` to the exponential distribution. + + %(after_notes)s + + %(example)s + + """ + def _rvs(self, a): + return self._random_state.standard_gamma(a, self._size) + + def _pdf(self, x, a): + # gamma.pdf(x, a) = x**(a-1) * exp(-x) / gamma(a) + return np.exp(self._logpdf(x, a)) + + def _logpdf(self, x, a): + return sc.xlogy(a-1.0, x) - x - sc.gammaln(a) + + def _cdf(self, x, a): + return sc.gammainc(a, x) + + def _sf(self, x, a): + return sc.gammaincc(a, x) + + def _ppf(self, q, a): + return sc.gammaincinv(a, q) + + def _stats(self, a): + return a, a, 2.0/np.sqrt(a), 6.0/a + + def _entropy(self, a): + return sc.psi(a)*(1-a) + a + sc.gammaln(a) + + def _fitstart(self, data): + # The skewness of the gamma distribution is `4 / np.sqrt(a)`. + # We invert that to estimate the shape `a` using the skewness + # of the data. The formula is regularized with 1e-8 in the + # denominator to allow for degenerate data where the skewness + # is close to 0. + a = 4 / (1e-8 + _skew(data)**2) + return super(gamma_gen, self)._fitstart(data, args=(a,)) + + @extend_notes_in_docstring(rv_continuous, notes="""\ + When the location is fixed by using the argument `floc`, this + function uses explicit formulas or solves a simpler numerical + problem than the full ML optimization problem. So in that case, + the `optimizer`, `loc` and `scale` arguments are ignored.\n\n""") + def fit(self, data, *args, **kwds): + f0 = (kwds.get('f0', None) or kwds.get('fa', None) or + kwds.get('fix_a', None)) + floc = kwds.get('floc', None) + fscale = kwds.get('fscale', None) + + if floc is None: + # loc is not fixed. Use the default fit method. + return super(gamma_gen, self).fit(data, *args, **kwds) + + # Special case: loc is fixed. + + if f0 is not None and fscale is not None: + # This check is for consistency with `rv_continuous.fit`. + # Without this check, this function would just return the + # parameters that were given. + raise ValueError("All parameters fixed. There is nothing to " + "optimize.") + + # Fixed location is handled by shifting the data. + data = np.asarray(data) + if np.any(data <= floc): + raise FitDataError("gamma", lower=floc, upper=np.inf) + if floc != 0: + # Don't do the subtraction in-place, because `data` might be a + # view of the input array. + data = data - floc + xbar = data.mean() + + # Three cases to handle: + # * shape and scale both free + # * shape fixed, scale free + # * shape free, scale fixed + + if fscale is None: + # scale is free + if f0 is not None: + # shape is fixed + a = f0 + else: + # shape and scale are both free. + # The MLE for the shape parameter `a` is the solution to: + # np.log(a) - sc.digamma(a) - np.log(xbar) + + # np.log(data.mean) = 0 + s = np.log(xbar) - np.log(data).mean() + func = lambda a: np.log(a) - sc.digamma(a) - s + aest = (3-s + np.sqrt((s-3)**2 + 24*s)) / (12*s) + xa = aest*(1-0.4) + xb = aest*(1+0.4) + a = optimize.brentq(func, xa, xb, disp=0) + + # The MLE for the scale parameter is just the data mean + # divided by the shape parameter. + scale = xbar / a + else: + # scale is fixed, shape is free + # The MLE for the shape parameter `a` is the solution to: + # sc.digamma(a) - np.log(data).mean() + np.log(fscale) = 0 + c = np.log(data).mean() - np.log(fscale) + a = _digammainv(c) + scale = fscale + + return a, floc, scale + + +gamma = gamma_gen(a=0.0, name='gamma') + + +class erlang_gen(gamma_gen): + """An Erlang continuous random variable. + + %(before_notes)s + + See Also + -------- + gamma + + Notes + ----- + The Erlang distribution is a special case of the Gamma distribution, with + the shape parameter `a` an integer. Note that this restriction is not + enforced by `erlang`. It will, however, generate a warning the first time + a non-integer value is used for the shape parameter. + + Refer to `gamma` for examples. + + """ + + def _argcheck(self, a): + allint = np.all(np.floor(a) == a) + allpos = np.all(a > 0) + if not allint: + # An Erlang distribution shouldn't really have a non-integer + # shape parameter, so warn the user. + warnings.warn( + 'The shape parameter of the erlang distribution ' + 'has been given a non-integer value %r.' % (a,), + RuntimeWarning) + return allpos + + def _fitstart(self, data): + # Override gamma_gen_fitstart so that an integer initial value is + # used. (Also regularize the division, to avoid issues when + # _skew(data) is 0 or close to 0.) + a = int(4.0 / (1e-8 + _skew(data)**2)) + return super(gamma_gen, self)._fitstart(data, args=(a,)) + + # Trivial override of the fit method, so we can monkey-patch its + # docstring. + def fit(self, data, *args, **kwds): + return super(erlang_gen, self).fit(data, *args, **kwds) + + if fit.__doc__ is not None: + fit.__doc__ = (rv_continuous.fit.__doc__ + + """ + Notes + ----- + The Erlang distribution is generally defined to have integer values + for the shape parameter. This is not enforced by the `erlang` class. + When fitting the distribution, it will generally return a non-integer + value for the shape parameter. By using the keyword argument + `f0=<integer>`, the fit method can be constrained to fit the data to + a specific integer shape parameter. + """) + + +erlang = erlang_gen(a=0.0, name='erlang') + + +class gengamma_gen(rv_continuous): + r"""A generalized gamma continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `gengamma` is: + + .. math:: + + f(x, a, c) = \frac{|c| x^{c a-1} \exp(-x^c)}{\Gamma(a)} + + for :math:`x \ge 0`, :math:`a > 0`, and :math:`c \ne 0`. + :math:`\Gamma` is the gamma function (`scipy.special.gamma`). + + `gengamma` takes :math:`a` and :math:`c` as shape parameters. + + %(after_notes)s + + %(example)s + + """ + def _argcheck(self, a, c): + return (a > 0) & (c != 0) + + def _pdf(self, x, a, c): + # gengamma.pdf(x, a, c) = abs(c) * x**(c*a-1) * exp(-x**c) / gamma(a) + return np.exp(self._logpdf(x, a, c)) + + def _logpdf(self, x, a, c): + return np.log(abs(c)) + sc.xlogy(c*a - 1, x) - x**c - sc.gammaln(a) + + def _cdf(self, x, a, c): + xc = x**c + val1 = sc.gammainc(a, xc) + val2 = sc.gammaincc(a, xc) + return np.where(c > 0, val1, val2) + + def _sf(self, x, a, c): + xc = x**c + val1 = sc.gammainc(a, xc) + val2 = sc.gammaincc(a, xc) + return np.where(c > 0, val2, val1) + + def _ppf(self, q, a, c): + val1 = sc.gammaincinv(a, q) + val2 = sc.gammainccinv(a, q) + return np.where(c > 0, val1, val2)**(1.0/c) + + def _isf(self, q, a, c): + val1 = sc.gammaincinv(a, q) + val2 = sc.gammainccinv(a, q) + return np.where(c > 0, val2, val1)**(1.0/c) + + def _munp(self, n, a, c): + # Pochhammer symbol: sc.pocha,n) = gamma(a+n)/gamma(a) + return sc.poch(a, n*1.0/c) + + def _entropy(self, a, c): + val = sc.psi(a) + return a*(1-val) + 1.0/c*val + sc.gammaln(a) - np.log(abs(c)) + + +gengamma = gengamma_gen(a=0.0, name='gengamma') + + +class genhalflogistic_gen(rv_continuous): + r"""A generalized half-logistic continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `genhalflogistic` is: + + .. math:: + + f(x, c) = \frac{2 (1 - c x)^{1/(c-1)}}{[1 + (1 - c x)^{1/c}]^2} + + for :math:`0 \le x \le 1/c`, and :math:`c > 0`. + + `genhalflogistic` takes ``c`` as a shape parameter for :math:`c`. + + %(after_notes)s + + %(example)s + + """ + def _argcheck(self, c): + self.b = 1.0 / c + return c > 0 + + def _pdf(self, x, c): + # genhalflogistic.pdf(x, c) = + # 2 * (1-c*x)**(1/c-1) / (1+(1-c*x)**(1/c))**2 + limit = 1.0/c + tmp = np.asarray(1-c*x) + tmp0 = tmp**(limit-1) + tmp2 = tmp0*tmp + return 2*tmp0 / (1+tmp2)**2 + + def _cdf(self, x, c): + limit = 1.0/c + tmp = np.asarray(1-c*x) + tmp2 = tmp**(limit) + return (1.0-tmp2) / (1+tmp2) + + def _ppf(self, q, c): + return 1.0/c*(1-((1.0-q)/(1.0+q))**c) + + def _entropy(self, c): + return 2 - (2*c+1)*np.log(2) + + +genhalflogistic = genhalflogistic_gen(a=0.0, name='genhalflogistic') + + +class gompertz_gen(rv_continuous): + r"""A Gompertz (or truncated Gumbel) continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `gompertz` is: + + .. math:: + + f(x, c) = c \exp(x) \exp(-c (e^x-1)) + + for :math:`x \ge 0`, :math:`c > 0`. + + `gompertz` takes ``c`` as a shape parameter for :math:`c`. + + %(after_notes)s + + %(example)s + + """ + def _pdf(self, x, c): + # gompertz.pdf(x, c) = c * exp(x) * exp(-c*(exp(x)-1)) + return np.exp(self._logpdf(x, c)) + + def _logpdf(self, x, c): + return np.log(c) + x - c * sc.expm1(x) + + def _cdf(self, x, c): + return -sc.expm1(-c * sc.expm1(x)) + + def _ppf(self, q, c): + return sc.log1p(-1.0 / c * sc.log1p(-q)) + + def _entropy(self, c): + return 1.0 - np.log(c) - np.exp(c)*sc.expn(1, c) + + +gompertz = gompertz_gen(a=0.0, name='gompertz') + + +class gumbel_r_gen(rv_continuous): + r"""A right-skewed Gumbel continuous random variable. + + %(before_notes)s + + See Also + -------- + gumbel_l, gompertz, genextreme + + Notes + ----- + The probability density function for `gumbel_r` is: + + .. math:: + + f(x) = \exp(-(x + e^{-x})) + + The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett + distribution. It is also related to the extreme value distribution, + log-Weibull and Gompertz distributions. + + %(after_notes)s + + %(example)s + + """ + def _pdf(self, x): + # gumbel_r.pdf(x) = exp(-(x + exp(-x))) + return np.exp(self._logpdf(x)) + + def _logpdf(self, x): + return -x - np.exp(-x) + + def _cdf(self, x): + return np.exp(-np.exp(-x)) + + def _logcdf(self, x): + return -np.exp(-x) + + def _ppf(self, q): + return -np.log(-np.log(q)) + + def _stats(self): + return _EULER, np.pi*np.pi/6.0, 12*np.sqrt(6)/np.pi**3 * _ZETA3, 12.0/5 + + def _entropy(self): + # https://en.wikipedia.org/wiki/Gumbel_distribution + return _EULER + 1. + + +gumbel_r = gumbel_r_gen(name='gumbel_r') + + +class gumbel_l_gen(rv_continuous): + r"""A left-skewed Gumbel continuous random variable. + + %(before_notes)s + + See Also + -------- + gumbel_r, gompertz, genextreme + + Notes + ----- + The probability density function for `gumbel_l` is: + + .. math:: + + f(x) = \exp(x - e^x) + + The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett + distribution. It is also related to the extreme value distribution, + log-Weibull and Gompertz distributions. + + %(after_notes)s + + %(example)s + + """ + def _pdf(self, x): + # gumbel_l.pdf(x) = exp(x - exp(x)) + return np.exp(self._logpdf(x)) + + def _logpdf(self, x): + return x - np.exp(x) + + def _cdf(self, x): + return -sc.expm1(-np.exp(x)) + + def _ppf(self, q): + return np.log(-sc.log1p(-q)) + + def _logsf(self, x): + return -np.exp(x) + + def _sf(self, x): + return np.exp(-np.exp(x)) + + def _isf(self, x): + return np.log(-np.log(x)) + + def _stats(self): + return -_EULER, np.pi*np.pi/6.0, \ + -12*np.sqrt(6)/np.pi**3 * _ZETA3, 12.0/5 + + def _entropy(self): + return _EULER + 1. + + +gumbel_l = gumbel_l_gen(name='gumbel_l') + + +class halfcauchy_gen(rv_continuous): + r"""A Half-Cauchy continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `halfcauchy` is: + + .. math:: + + f(x) = \frac{2}{\pi (1 + x^2)} + + for :math:`x \ge 0`. + + %(after_notes)s + + %(example)s + + """ + def _pdf(self, x): + # halfcauchy.pdf(x) = 2 / (pi * (1 + x**2)) + return 2.0/np.pi/(1.0+x*x) + + def _logpdf(self, x): + return np.log(2.0/np.pi) - sc.log1p(x*x) + + def _cdf(self, x): + return 2.0/np.pi*np.arctan(x) + + def _ppf(self, q): + return np.tan(np.pi/2*q) + + def _stats(self): + return np.inf, np.inf, np.nan, np.nan + + def _entropy(self): + return np.log(2*np.pi) + + +halfcauchy = halfcauchy_gen(a=0.0, name='halfcauchy') + + +class halflogistic_gen(rv_continuous): + r"""A half-logistic continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `halflogistic` is: + + .. math:: + + f(x) = \frac{ 2 e^{-x} }{ (1+e^{-x})^2 } + = \frac{1}{2} \text{sech}(x/2)^2 + + for :math:`x \ge 0`. + + %(after_notes)s + + %(example)s + + """ + def _pdf(self, x): + # halflogistic.pdf(x) = 2 * exp(-x) / (1+exp(-x))**2 + # = 1/2 * sech(x/2)**2 + return np.exp(self._logpdf(x)) + + def _logpdf(self, x): + return np.log(2) - x - 2. * sc.log1p(np.exp(-x)) + + def _cdf(self, x): + return np.tanh(x/2.0) + + def _ppf(self, q): + return 2*np.arctanh(q) + + def _munp(self, n): + if n == 1: + return 2*np.log(2) + if n == 2: + return np.pi*np.pi/3.0 + if n == 3: + return 9*_ZETA3 + if n == 4: + return 7*np.pi**4 / 15.0 + return 2*(1-pow(2.0, 1-n))*sc.gamma(n+1)*sc.zeta(n, 1) + + def _entropy(self): + return 2-np.log(2) + + +halflogistic = halflogistic_gen(a=0.0, name='halflogistic') + + +class halfnorm_gen(rv_continuous): + r"""A half-normal continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `halfnorm` is: + + .. math:: + + f(x) = \sqrt{2/\pi} \exp(-x^2 / 2) + + for :math:`x > 0`. + + `halfnorm` is a special case of `chi` with ``df=1``. + + %(after_notes)s + + %(example)s + + """ + def _rvs(self): + return abs(self._random_state.standard_normal(size=self._size)) + + def _pdf(self, x): + # halfnorm.pdf(x) = sqrt(2/pi) * exp(-x**2/2) + return np.sqrt(2.0/np.pi)*np.exp(-x*x/2.0) + + def _logpdf(self, x): + return 0.5 * np.log(2.0/np.pi) - x*x/2.0 + + def _cdf(self, x): + return _norm_cdf(x)*2-1.0 + + def _ppf(self, q): + return sc.ndtri((1+q)/2.0) + + def _stats(self): + return (np.sqrt(2.0/np.pi), + 1-2.0/np.pi, + np.sqrt(2)*(4-np.pi)/(np.pi-2)**1.5, + 8*(np.pi-3)/(np.pi-2)**2) + + def _entropy(self): + return 0.5*np.log(np.pi/2.0)+0.5 + + +halfnorm = halfnorm_gen(a=0.0, name='halfnorm') + + +class hypsecant_gen(rv_continuous): + r"""A hyperbolic secant continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `hypsecant` is: + + .. math:: + + f(x) = \frac{1}{\pi} \text{sech}(x) + + for a real number :math:`x`. + + %(after_notes)s + + %(example)s + + """ + def _pdf(self, x): + # hypsecant.pdf(x) = 1/pi * sech(x) + return 1.0/(np.pi*np.cosh(x)) + + def _cdf(self, x): + return 2.0/np.pi*np.arctan(np.exp(x)) + + def _ppf(self, q): + return np.log(np.tan(np.pi*q/2.0)) + + def _stats(self): + return 0, np.pi*np.pi/4, 0, 2 + + def _entropy(self): + return np.log(2*np.pi) + + +hypsecant = hypsecant_gen(name='hypsecant') + + +class gausshyper_gen(rv_continuous): + r"""A Gauss hypergeometric continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `gausshyper` is: + + .. math:: + + f(x, a, b, c, z) = C x^{a-1} (1-x)^{b-1} (1+zx)^{-c} + + for :math:`0 \le x \le 1`, :math:`a > 0`, :math:`b > 0`, and + :math:`C = \frac{1}{B(a, b) F[2, 1](c, a; a+b; -z)}`. + :math:`F[2, 1]` is the Gauss hypergeometric function + `scipy.special.hyp2f1`. + + `gausshyper` takes :math:`a`, :math:`b`, :math:`c` and :math:`z` as shape + parameters. + + %(after_notes)s + + %(example)s + + """ + def _argcheck(self, a, b, c, z): + return (a > 0) & (b > 0) & (c == c) & (z == z) + + def _pdf(self, x, a, b, c, z): + # gausshyper.pdf(x, a, b, c, z) = + # C * x**(a-1) * (1-x)**(b-1) * (1+z*x)**(-c) + Cinv = sc.gamma(a)*sc.gamma(b)/sc.gamma(a+b)*sc.hyp2f1(c, a, a+b, -z) + return 1.0/Cinv * x**(a-1.0) * (1.0-x)**(b-1.0) / (1.0+z*x)**c + + def _munp(self, n, a, b, c, z): + fac = sc.beta(n+a, b) / sc.beta(a, b) + num = sc.hyp2f1(c, a+n, a+b+n, -z) + den = sc.hyp2f1(c, a, a+b, -z) + return fac*num / den + + +gausshyper = gausshyper_gen(a=0.0, b=1.0, name='gausshyper') + + +class invgamma_gen(rv_continuous): + r"""An inverted gamma continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `invgamma` is: + + .. math:: + + f(x, a) = \frac{x^{-a-1}}{\Gamma(a)} \exp(-\frac{1}{x}) + + for :math:`x > 0`, :math:`a > 0`. :math:`\Gamma` is the gamma function + (`scipy.special.gamma`). + + `invgamma` takes ``a`` as a shape parameter for :math:`a`. + + `invgamma` is a special case of `gengamma` with ``c=-1``. + + %(after_notes)s + + %(example)s + + """ + _support_mask = rv_continuous._open_support_mask + + def _pdf(self, x, a): + # invgamma.pdf(x, a) = x**(-a-1) / gamma(a) * exp(-1/x) + return np.exp(self._logpdf(x, a)) + + def _logpdf(self, x, a): + return -(a+1) * np.log(x) - sc.gammaln(a) - 1.0/x + + def _cdf(self, x, a): + return sc.gammaincc(a, 1.0 / x) + + def _ppf(self, q, a): + return 1.0 / sc.gammainccinv(a, q) + + def _sf(self, x, a): + return sc.gammainc(a, 1.0 / x) + + def _isf(self, q, a): + return 1.0 / sc.gammaincinv(a, q) + + def _stats(self, a, moments='mvsk'): + m1 = _lazywhere(a > 1, (a,), lambda x: 1. / (x - 1.), np.inf) + m2 = _lazywhere(a > 2, (a,), lambda x: 1. / (x - 1.)**2 / (x - 2.), + np.inf) + + g1, g2 = None, None + if 's' in moments: + g1 = _lazywhere( + a > 3, (a,), + lambda x: 4. * np.sqrt(x - 2.) / (x - 3.), np.nan) + if 'k' in moments: + g2 = _lazywhere( + a > 4, (a,), + lambda x: 6. * (5. * x - 11.) / (x - 3.) / (x - 4.), np.nan) + return m1, m2, g1, g2 + + def _entropy(self, a): + return a - (a+1.0) * sc.psi(a) + sc.gammaln(a) + + +invgamma = invgamma_gen(a=0.0, name='invgamma') + + +# scale is gamma from DATAPLOT and B from Regress +class invgauss_gen(rv_continuous): + r"""An inverse Gaussian continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `invgauss` is: + + .. math:: + + f(x, \mu) = \frac{1}{\sqrt{2 \pi x^3}} + \exp(-\frac{(x-\mu)^2}{2 x \mu^2}) + + for :math:`x > 0` and :math:`\mu > 0`. + + `invgauss` takes ``mu`` as a shape parameter for :math:`\mu`. + + %(after_notes)s + + When :math:`\mu` is too small, evaluating the cumulative distribution + function will be inaccurate due to ``cdf(mu -> 0) = inf * 0``. + NaNs are returned for :math:`\mu \le 0.0028`. + + %(example)s + + """ + _support_mask = rv_continuous._open_support_mask + + def _rvs(self, mu): + return self._random_state.wald(mu, 1.0, size=self._size) + + def _pdf(self, x, mu): + # invgauss.pdf(x, mu) = + # 1 / sqrt(2*pi*x**3) * exp(-(x-mu)**2/(2*x*mu**2)) + return 1.0/np.sqrt(2*np.pi*x**3.0)*np.exp(-1.0/(2*x)*((x-mu)/mu)**2) + + def _logpdf(self, x, mu): + return -0.5*np.log(2*np.pi) - 1.5*np.log(x) - ((x-mu)/mu)**2/(2*x) + + def _cdf(self, x, mu): + fac = np.sqrt(1.0/x) + # Numerical accuracy for small `mu` is bad. See #869. + C1 = _norm_cdf(fac*(x-mu)/mu) + C1 += np.exp(1.0/mu) * _norm_cdf(-fac*(x+mu)/mu) * np.exp(1.0/mu) + return C1 + + def _stats(self, mu): + return mu, mu**3.0, 3*np.sqrt(mu), 15*mu + + +invgauss = invgauss_gen(a=0.0, name='invgauss') + + +class norminvgauss_gen(rv_continuous): + r"""A Normal Inverse Gaussian continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `norminvgauss` is: + + .. math:: + + f(x, a, b) = (a \exp(\sqrt{a^2 - b^2} + b x)) / + (\pi \sqrt{1 + x^2} \, K_1(a \sqrt{1 + x^2})) + + where `x` is a real number, the parameter `a` is the tail heaviness + and `b` is the asymmetry parameter satisfying `a > 0` and `abs(b) <= a`. + :math:`K_1` is the modified Bessel function of second kind + (`scipy.special.k1`). + + %(after_notes)s + + A normal inverse Gaussian random variable `Y` with parameters `a` and `b` + can be expressed as a normal mean-variance mixture: + `Y = b * V + sqrt(V) * X` where `X` is `norm(0,1)` and `V` is + `invgauss(mu=1/sqrt(a**2 - b**2))`. This representation is used + to generate random variates. + + References + ---------- + O. Barndorff-Nielsen, "Hyperbolic Distributions and Distributions on + Hyperbolae", Scandinavian Journal of Statistics, Vol. 5(3), + pp. 151-157, 1978. + + O. Barndorff-Nielsen, "Normal Inverse Gaussian Distributions and Stochastic + Volatility Modelling", Scandinavian Journal of Statistics, Vol. 24, + pp. 1-13, 1997. + + %(example)s + + """ + _support_mask = rv_continuous._open_support_mask + + def _argcheck(self, a, b): + return (a > 0) & (np.absolute(b) < a) + + def _pdf(self, x, a, b): + gamma = np.sqrt(a**2 - b**2) + fac1 = a / np.pi * np.exp(gamma) + sq = np.hypot(1, x) # reduce overflows + return fac1 * sc.k1e(a * sq) * np.exp(b*x - a*sq) / sq + + def _rvs(self, a, b): + # note: X = b * V + sqrt(V) * X is norminvgaus(a,b) if X is standard + # normal and V is invgauss(mu=1/sqrt(a**2 - b**2)) + gamma = np.sqrt(a**2 - b**2) + sz, rndm = self._size, self._random_state + ig = invgauss.rvs(mu=1/gamma, size=sz, random_state=rndm) + return b * ig + np.sqrt(ig) * norm.rvs(size=sz, random_state=rndm) + + def _stats(self, a, b): + gamma = np.sqrt(a**2 - b**2) + mean = b / gamma + variance = a**2 / gamma**3 + skewness = 3.0 * b / (a * np.sqrt(gamma)) + kurtosis = 3.0 * (1 + 4 * b**2 / a**2) / gamma + return mean, variance, skewness, kurtosis + + +norminvgauss = norminvgauss_gen(name="norminvgauss") + + +class invweibull_gen(rv_continuous): + u"""An inverted Weibull continuous random variable. + + This distribution is also known as the Fréchet distribution or the + type II extreme value distribution. + + %(before_notes)s + + Notes + ----- + The probability density function for `invweibull` is: + + .. math:: + + f(x, c) = c x^{-c-1} \\exp(-x^{-c}) + + for :math:`x > 0`, :math:`c > 0`. + + `invweibull` takes ``c`` as a shape parameter for :math:`c`. + + %(after_notes)s + + References + ---------- + F.R.S. de Gusmao, E.M.M Ortega and G.M. Cordeiro, "The generalized inverse + Weibull distribution", Stat. Papers, vol. 52, pp. 591-619, 2011. + + %(example)s + + """ + _support_mask = rv_continuous._open_support_mask + + def _pdf(self, x, c): + # invweibull.pdf(x, c) = c * x**(-c-1) * exp(-x**(-c)) + xc1 = np.power(x, -c - 1.0) + xc2 = np.power(x, -c) + xc2 = np.exp(-xc2) + return c * xc1 * xc2 + + def _cdf(self, x, c): + xc1 = np.power(x, -c) + return np.exp(-xc1) + + def _ppf(self, q, c): + return np.power(-np.log(q), -1.0/c) + + def _munp(self, n, c): + return sc.gamma(1 - n / c) + + def _entropy(self, c): + return 1+_EULER + _EULER / c - np.log(c) + + +invweibull = invweibull_gen(a=0, name='invweibull') + + +class johnsonsb_gen(rv_continuous): + r"""A Johnson SB continuous random variable. + + %(before_notes)s + + See Also + -------- + johnsonsu + + Notes + ----- + The probability density function for `johnsonsb` is: + + .. math:: + + f(x, a, b) = \frac{b}{x(1-x)} \phi(a + b \log \frac{x}{1-x} ) + + for :math:`0 < x < 1` and :math:`a, b > 0`, and :math:`\phi` is the normal + pdf. + + `johnsonsb` takes :math:`a` and :math:`b` as shape parameters. + + %(after_notes)s + + %(example)s + + """ + _support_mask = rv_continuous._open_support_mask + + def _argcheck(self, a, b): + return (b > 0) & (a == a) + + def _pdf(self, x, a, b): + # johnsonsb.pdf(x, a, b) = b / (x*(1-x)) * phi(a + b * log(x/(1-x))) + trm = _norm_pdf(a + b*np.log(x/(1.0-x))) + return b*1.0/(x*(1-x))*trm + + def _cdf(self, x, a, b): + return _norm_cdf(a + b*np.log(x/(1.0-x))) + + def _ppf(self, q, a, b): + return 1.0 / (1 + np.exp(-1.0 / b * (_norm_ppf(q) - a))) + + +johnsonsb = johnsonsb_gen(a=0.0, b=1.0, name='johnsonsb') + + +class johnsonsu_gen(rv_continuous): + r"""A Johnson SU continuous random variable. + + %(before_notes)s + + See Also + -------- + johnsonsb + + Notes + ----- + The probability density function for `johnsonsu` is: + + .. math:: + + f(x, a, b) = \frac{b}{\sqrt{x^2 + 1}} + \phi(a + b \log(x + \sqrt{x^2 + 1})) + + for all :math:`x, a, b > 0`, and :math:`\phi` is the normal pdf. + + `johnsonsu` takes :math:`a` and :math:`b` as shape parameters. + + %(after_notes)s + + %(example)s + + """ + def _argcheck(self, a, b): + return (b > 0) & (a == a) + + def _pdf(self, x, a, b): + # johnsonsu.pdf(x, a, b) = b / sqrt(x**2 + 1) * + # phi(a + b * log(x + sqrt(x**2 + 1))) + x2 = x*x + trm = _norm_pdf(a + b * np.log(x + np.sqrt(x2+1))) + return b*1.0/np.sqrt(x2+1.0)*trm + + def _cdf(self, x, a, b): + return _norm_cdf(a + b * np.log(x + np.sqrt(x*x + 1))) + + def _ppf(self, q, a, b): + return np.sinh((_norm_ppf(q) - a) / b) + + +johnsonsu = johnsonsu_gen(name='johnsonsu') + + +class laplace_gen(rv_continuous): + r"""A Laplace continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `laplace` is + + .. math:: + + f(x) = \frac{1}{2} \exp(-|x|) + + for a real number :math:`x`. + + %(after_notes)s + + %(example)s + + """ + def _rvs(self): + return self._random_state.laplace(0, 1, size=self._size) + + def _pdf(self, x): + # laplace.pdf(x) = 1/2 * exp(-abs(x)) + return 0.5*np.exp(-abs(x)) + + def _cdf(self, x): + return np.where(x > 0, 1.0-0.5*np.exp(-x), 0.5*np.exp(x)) + + def _ppf(self, q): + return np.where(q > 0.5, -np.log(2*(1-q)), np.log(2*q)) + + def _stats(self): + return 0, 2, 0, 3 + + def _entropy(self): + return np.log(2)+1 + + +laplace = laplace_gen(name='laplace') + + +class levy_gen(rv_continuous): + r"""A Levy continuous random variable. + + %(before_notes)s + + See Also + -------- + levy_stable, levy_l + + Notes + ----- + The probability density function for `levy` is: + + .. math:: + + f(x) = \frac{1}{\sqrt{2\pi x^3}} \exp\left(-\frac{1}{2x}\right) + + for :math:`x > 0`. + + This is the same as the Levy-stable distribution with :math:`a=1/2` and + :math:`b=1`. + + %(after_notes)s + + %(example)s + + """ + _support_mask = rv_continuous._open_support_mask + + def _pdf(self, x): + # levy.pdf(x) = 1 / (x * sqrt(2*pi*x)) * exp(-1/(2*x)) + return 1 / np.sqrt(2*np.pi*x) / x * np.exp(-1/(2*x)) + + def _cdf(self, x): + # Equivalent to 2*norm.sf(np.sqrt(1/x)) + return sc.erfc(np.sqrt(0.5 / x)) + + def _ppf(self, q): + # Equivalent to 1.0/(norm.isf(q/2)**2) or 0.5/(erfcinv(q)**2) + val = -sc.ndtri(q/2) + return 1.0 / (val * val) + + def _stats(self): + return np.inf, np.inf, np.nan, np.nan + + +levy = levy_gen(a=0.0, name="levy") + + +class levy_l_gen(rv_continuous): + r"""A left-skewed Levy continuous random variable. + + %(before_notes)s + + See Also + -------- + levy, levy_stable + + Notes + ----- + The probability density function for `levy_l` is: + + .. math:: + f(x) = \frac{1}{|x| \sqrt{2\pi |x|}} \exp{ \left(-\frac{1}{2|x|} \right)} + + for :math:`x < 0`. + + This is the same as the Levy-stable distribution with :math:`a=1/2` and + :math:`b=-1`. + + %(after_notes)s + + %(example)s + + """ + _support_mask = rv_continuous._open_support_mask + + def _pdf(self, x): + # levy_l.pdf(x) = 1 / (abs(x) * sqrt(2*pi*abs(x))) * exp(-1/(2*abs(x))) + ax = abs(x) + return 1/np.sqrt(2*np.pi*ax)/ax*np.exp(-1/(2*ax)) + + def _cdf(self, x): + ax = abs(x) + return 2 * _norm_cdf(1 / np.sqrt(ax)) - 1 + + def _ppf(self, q): + val = _norm_ppf((q + 1.0) / 2) + return -1.0 / (val * val) + + def _stats(self): + return np.inf, np.inf, np.nan, np.nan + + +levy_l = levy_l_gen(b=0.0, name="levy_l") + + +class levy_stable_gen(rv_continuous): + r"""A Levy-stable continuous random variable. + + %(before_notes)s + + See Also + -------- + levy, levy_l + + Notes + ----- + The distribution for `levy_stable` has characteristic function: + + .. math:: + + \varphi(t, \alpha, \beta, c, \mu) = + e^{it\mu -|ct|^{\alpha}(1-i\beta \operatorname{sign}(t)\Phi(\alpha, t))} + + where: + + .. math:: + + \Phi = \begin{cases} + \tan \left({\frac {\pi \alpha }{2}}\right)&\alpha \neq 1\\ + -{\frac {2}{\pi }}\log |t|&\alpha =1 + \end{cases} + + The probability density function for `levy_stable` is: + + .. math:: + + f(x) = \frac{1}{2\pi}\int_{-\infty}^\infty \varphi(t)e^{-ixt}\,dt + + where :math:`-\infty < t < \infty`. This integral does not have a known closed form. + + For evaluation of pdf we use either Zolotarev :math:`S_0` parameterization with integration, + direct integration of standard parameterization of characteristic function or FFT of + characteristic function. If set to other than None and if number of points is greater than + ``levy_stable.pdf_fft_min_points_threshold`` (defaults to None) we use FFT otherwise we use one + of the other methods. + + The default method is 'best' which uses Zolotarev's method if alpha = 1 and integration of + characteristic function otherwise. The default method can be changed by setting + ``levy_stable.pdf_default_method`` to either 'zolotarev', 'quadrature' or 'best'. + + To increase accuracy of FFT calculation one can specify ``levy_stable.pdf_fft_grid_spacing`` + (defaults to 0.001) and ``pdf_fft_n_points_two_power`` (defaults to a value that covers the + input range * 4). Setting ``pdf_fft_n_points_two_power`` to 16 should be sufficiently accurate + in most cases at the expense of CPU time. + + For evaluation of cdf we use Zolatarev :math:`S_0` parameterization with integration or integral of + the pdf FFT interpolated spline. The settings affecting FFT calculation are the same as + for pdf calculation. Setting the threshold to ``None`` (default) will disable FFT. For cdf + calculations the Zolatarev method is superior in accuracy, so FFT is disabled by default. + + Fitting estimate uses quantile estimation method in [MC]. MLE estimation of parameters in + fit method uses this quantile estimate initially. Note that MLE doesn't always converge if + using FFT for pdf calculations; so it's best that ``pdf_fft_min_points_threshold`` is left unset. + + .. warning:: + + For pdf calculations implementation of Zolatarev is unstable for values where alpha = 1 and + beta != 0. In this case the quadrature method is recommended. FFT calculation is also + considered experimental. + + For cdf calculations FFT calculation is considered experimental. Use Zolatarev's method + instead (default). + + %(after_notes)s + + References + ---------- + .. [MC] McCulloch, J., 1986. Simple consistent estimators of stable distribution parameters. + Communications in Statistics - Simulation and Computation 15, 11091136. + .. [MS] Mittnik, S.T. Rachev, T. Doganoglu, D. Chenyao, 1999. Maximum likelihood estimation + of stable Paretian models, Mathematical and Computer Modelling, Volume 29, Issue 10, + 1999, Pages 275-293. + .. [BS] Borak, S., Hardle, W., Rafal, W. 2005. Stable distributions, Economic Risk. + + %(example)s + + """ + + def _rvs(self, alpha, beta): + + def alpha1func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W): + return (2/np.pi*(np.pi/2 + bTH)*tanTH - + beta*np.log((np.pi/2*W*cosTH)/(np.pi/2 + bTH))) + + def beta0func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W): + return (W/(cosTH/np.tan(aTH) + np.sin(TH)) * + ((np.cos(aTH) + np.sin(aTH)*tanTH)/W)**(1.0/alpha)) + + def otherwise(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W): + # alpha is not 1 and beta is not 0 + val0 = beta*np.tan(np.pi*alpha/2) + th0 = np.arctan(val0)/alpha + val3 = W/(cosTH/np.tan(alpha*(th0 + TH)) + np.sin(TH)) + res3 = val3*((np.cos(aTH) + np.sin(aTH)*tanTH - + val0*(np.sin(aTH) - np.cos(aTH)*tanTH))/W)**(1.0/alpha) + return res3 + + def alphanot1func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W): + res = _lazywhere(beta == 0, + (alpha, beta, TH, aTH, bTH, cosTH, tanTH, W), + beta0func, f2=otherwise) + return res + + sz = self._size + alpha = broadcast_to(alpha, sz) + beta = broadcast_to(beta, sz) + TH = uniform.rvs(loc=-np.pi/2.0, scale=np.pi, size=sz, + random_state=self._random_state) + W = expon.rvs(size=sz, random_state=self._random_state) + aTH = alpha*TH + bTH = beta*TH + cosTH = np.cos(TH) + tanTH = np.tan(TH) + res = _lazywhere(alpha == 1, + (alpha, beta, TH, aTH, bTH, cosTH, tanTH, W), + alpha1func, f2=alphanot1func) + return res + + def _argcheck(self, alpha, beta): + return (alpha > 0) & (alpha <= 2) & (beta <= 1) & (beta >= -1) + + @staticmethod + def _cf(t, alpha, beta): + Phi = lambda alpha, t: np.tan(np.pi*alpha/2) if alpha != 1 else -2.0*np.log(np.abs(t))/np.pi + return np.exp(-(np.abs(t)**alpha)*(1-1j*beta*np.sign(t)*Phi(alpha, t))) + + @staticmethod + def _pdf_from_cf_with_fft(cf, h=0.01, q=9): + """Calculates pdf from cf using fft. Using region around 0 with N=2**q points + separated by distance h. As suggested by [MS]. + """ + N = 2**q + n = np.arange(1,N+1) + density = ((-1)**(n-1-N/2))*np.fft.fft(((-1)**(n-1))*cf(2*np.pi*(n-1-N/2)/h/N))/h/N + x = (n-1-N/2)*h + return (x, density) + + @staticmethod + def _pdf_single_value_best(x, alpha, beta): + if alpha != 1. or (alpha == 1. and beta == 0.): + return levy_stable_gen._pdf_single_value_zolotarev(x, alpha, beta) + else: + return levy_stable_gen._pdf_single_value_cf_integrate(x, alpha, beta) + + @staticmethod + def _pdf_single_value_cf_integrate(x, alpha, beta): + cf = lambda t: levy_stable_gen._cf(t, alpha, beta) + return integrate.quad(lambda t: np.real(np.exp(-1j*t*x)*cf(t)), -np.inf, np.inf, limit=1000)[0]/np.pi/2 + + @staticmethod + def _pdf_single_value_zolotarev(x, alpha, beta): + """Calculate pdf using Zolotarev's methods as detailed in [BS]. + """ + zeta = -beta*np.tan(np.pi*alpha/2.) + if alpha != 1: + x0 = x + zeta # convert to S_0 parameterization + xi = np.arctan(-zeta)/alpha + + def V(theta): + return np.cos(alpha*xi)**(1/(alpha-1)) * \ + (np.cos(theta)/np.sin(alpha*(xi+theta)))**(alpha/(alpha-1)) * \ + (np.cos(alpha*xi+(alpha-1)*theta)/np.cos(theta)) + if x0 > zeta: + def g(theta): + return V(theta)*np.real(np.complex(x0-zeta)**(alpha/(alpha-1))) + + def f(theta): + return g(theta) * np.exp(-g(theta)) + + # spare calculating integral on null set + # use isclose as macos has fp differences + if np.isclose(-xi, np.pi/2, rtol=1e-014, atol=1e-014): + return 0. + + with np.errstate(all="ignore"): + intg_max = optimize.minimize_scalar(lambda theta: -f(theta), bounds=[-xi, np.pi/2]) + intg_kwargs = {} + # windows quadpack less forgiving with points out of bounds + if intg_max.success and not np.isnan(intg_max.fun)\ + and intg_max.x > -xi and intg_max.x < np.pi/2: + intg_kwargs["points"] = [intg_max.x] + intg = integrate.quad(f, -xi, np.pi/2, **intg_kwargs)[0] + return alpha * intg / np.pi / np.abs(alpha-1) / (x0-zeta) + elif x0 == zeta: + return sc.gamma(1+1/alpha)*np.cos(xi)/np.pi/((1+zeta**2)**(1/alpha/2)) + else: + return levy_stable_gen._pdf_single_value_zolotarev(-x, alpha, -beta) + else: + # since location zero, no need to reposition x for S_0 parameterization + xi = np.pi/2 + if beta != 0: + warnings.warn('Density calculation unstable for alpha=1 and beta!=0.' + + ' Use quadrature method instead.', RuntimeWarning) + + def V(theta): + expr_1 = np.pi/2+beta*theta + return 2. * expr_1 * np.exp(expr_1*np.tan(theta)/beta) / np.cos(theta) / np.pi + + def g(theta): + return np.exp(-np.pi * x / 2. / beta) * V(theta) + + def f(theta): + return g(theta) * np.exp(-g(theta)) + + with np.errstate(all="ignore"): + intg_max = optimize.minimize_scalar(lambda theta: -f(theta), bounds=[-np.pi/2, np.pi/2]) + intg = integrate.fixed_quad(f, -np.pi/2, intg_max.x)[0] + integrate.fixed_quad(f, intg_max.x, np.pi/2)[0] + return intg / np.abs(beta) / 2. + else: + return 1/(1+x**2)/np.pi + + @staticmethod + def _cdf_single_value_zolotarev(x, alpha, beta): + """Calculate cdf using Zolotarev's methods as detailed in [BS]. + """ + zeta = -beta*np.tan(np.pi*alpha/2.) + if alpha != 1: + x0 = x + zeta # convert to S_0 parameterization + xi = np.arctan(-zeta)/alpha + + def V(theta): + return np.cos(alpha*xi)**(1/(alpha-1)) * \ + (np.cos(theta)/np.sin(alpha*(xi+theta)))**(alpha/(alpha-1)) * \ + (np.cos(alpha*xi+(alpha-1)*theta)/np.cos(theta)) + if x0 > zeta: + c_1 = 1 if alpha > 1 else .5 - xi/np.pi + + def f(theta): + return np.exp(-V(theta)*np.real(np.complex(x0-zeta)**(alpha/(alpha-1)))) + + with np.errstate(all="ignore"): + # spare calculating integral on null set + # use isclose as macos has fp differences + if np.isclose(-xi, np.pi/2, rtol=1e-014, atol=1e-014): + intg = 0 + else: + intg = integrate.quad(f, -xi, np.pi/2)[0] + return c_1 + np.sign(1-alpha) * intg / np.pi + elif x0 == zeta: + return .5 - xi/np.pi + else: + return 1 - levy_stable_gen._cdf_single_value_zolotarev(-x, alpha, -beta) + + else: + # since location zero, no need to reposition x for S_0 parameterization + xi = np.pi/2 + if beta > 0: + + def V(theta): + expr_1 = np.pi/2+beta*theta + return 2. * expr_1 * np.exp(expr_1*np.tan(theta)/beta) / np.cos(theta) / np.pi + + with np.errstate(all="ignore"): + expr_1 = np.exp(-np.pi*x/beta/2.) + int_1 = integrate.quad(lambda theta: np.exp(-expr_1 * V(theta)), -np.pi/2, np.pi/2)[0] + return int_1 / np.pi + elif beta == 0: + return .5 + np.arctan(x)/np.pi + else: + return 1 - levy_stable_gen._cdf_single_value_zolotarev(-x, 1, -beta) + + def _pdf(self, x, alpha, beta): + + x = np.asarray(x).reshape(1, -1)[0,:] + + x, alpha, beta = np.broadcast_arrays(x, alpha, beta) + + data_in = np.dstack((x, alpha, beta))[0] + data_out = np.empty(shape=(len(data_in),1)) + + pdf_default_method_name = getattr(self, 'pdf_default_method', 'best') + if pdf_default_method_name == 'best': + pdf_single_value_method = levy_stable_gen._pdf_single_value_best + elif pdf_default_method_name == 'zolotarev': + pdf_single_value_method = levy_stable_gen._pdf_single_value_zolotarev + else: + pdf_single_value_method = levy_stable_gen._pdf_single_value_cf_integrate + + fft_min_points_threshold = getattr(self, 'pdf_fft_min_points_threshold', None) + fft_grid_spacing = getattr(self, 'pdf_fft_grid_spacing', 0.001) + fft_n_points_two_power = getattr(self, 'pdf_fft_n_points_two_power', None) + + # group data in unique arrays of alpha, beta pairs + uniq_param_pairs = np.vstack(list({tuple(row) for row in + data_in[:, 1:]})) + for pair in uniq_param_pairs: + data_mask = np.all(data_in[:,1:] == pair, axis=-1) + data_subset = data_in[data_mask] + if fft_min_points_threshold is None or len(data_subset) < fft_min_points_threshold: + data_out[data_mask] = np.array([pdf_single_value_method(_x, _alpha, _beta) + for _x, _alpha, _beta in data_subset]).reshape(len(data_subset), 1) + else: + warnings.warn('Density calculations experimental for FFT method.' + + ' Use combination of zolatarev and quadrature methods instead.', RuntimeWarning) + _alpha, _beta = pair + _x = data_subset[:,(0,)] + + # need enough points to "cover" _x for interpolation + h = fft_grid_spacing + q = np.ceil(np.log(2*np.max(np.abs(_x))/h)/np.log(2)) + 2 if fft_n_points_two_power is None else int(fft_n_points_two_power) + + density_x, density = levy_stable_gen._pdf_from_cf_with_fft(lambda t: levy_stable_gen._cf(t, _alpha, _beta), h=h, q=q) + f = interpolate.interp1d(density_x, np.real(density)) + data_out[data_mask] = f(_x) + + return data_out.T[0] + + def _cdf(self, x, alpha, beta): + + x = np.asarray(x).reshape(1, -1)[0,:] + + x, alpha, beta = np.broadcast_arrays(x, alpha, beta) + + data_in = np.dstack((x, alpha, beta))[0] + data_out = np.empty(shape=(len(data_in),1)) + + fft_min_points_threshold = getattr(self, 'pdf_fft_min_points_threshold', None) + fft_grid_spacing = getattr(self, 'pdf_fft_grid_spacing', 0.001) + fft_n_points_two_power = getattr(self, 'pdf_fft_n_points_two_power', None) + + # group data in unique arrays of alpha, beta pairs + uniq_param_pairs = np.vstack( + list({tuple(row) for row in data_in[:,1:]})) + for pair in uniq_param_pairs: + data_mask = np.all(data_in[:,1:] == pair, axis=-1) + data_subset = data_in[data_mask] + if fft_min_points_threshold is None or len(data_subset) < fft_min_points_threshold: + data_out[data_mask] = np.array([levy_stable._cdf_single_value_zolotarev(_x, _alpha, _beta) + for _x, _alpha, _beta in data_subset]).reshape(len(data_subset), 1) + else: + warnings.warn(u'FFT method is considered experimental for ' + + u'cumulative distribution function ' + + u'evaluations. Use Zolotarev’s method instead).', + RuntimeWarning) + _alpha, _beta = pair + _x = data_subset[:,(0,)] + + # need enough points to "cover" _x for interpolation + h = fft_grid_spacing + q = 16 if fft_n_points_two_power is None else int(fft_n_points_two_power) + + density_x, density = levy_stable_gen._pdf_from_cf_with_fft(lambda t: levy_stable_gen._cf(t, _alpha, _beta), h=h, q=q) + f = interpolate.InterpolatedUnivariateSpline(density_x, np.real(density)) + data_out[data_mask] = np.array([f.integral(self.a, x_1) for x_1 in _x]).reshape(data_out[data_mask].shape) + + return data_out.T[0] + + def _fitstart(self, data): + # We follow McCullock 1986 method - Simple Consistent Estimators + # of Stable Distribution Parameters + + # Table III and IV + nu_alpha_range = [2.439, 2.5, 2.6, 2.7, 2.8, 3, 3.2, 3.5, 4, 5, 6, 8, 10, 15, 25] + nu_beta_range = [0, 0.1, 0.2, 0.3, 0.5, 0.7, 1] + + # table III - alpha = psi_1(nu_alpha, nu_beta) + alpha_table = [ + [2.000, 2.000, 2.000, 2.000, 2.000, 2.000, 2.000], + [1.916, 1.924, 1.924, 1.924, 1.924, 1.924, 1.924], + [1.808, 1.813, 1.829, 1.829, 1.829, 1.829, 1.829], + [1.729, 1.730, 1.737, 1.745, 1.745, 1.745, 1.745], + [1.664, 1.663, 1.663, 1.668, 1.676, 1.676, 1.676], + [1.563, 1.560, 1.553, 1.548, 1.547, 1.547, 1.547], + [1.484, 1.480, 1.471, 1.460, 1.448, 1.438, 1.438], + [1.391, 1.386, 1.378, 1.364, 1.337, 1.318, 1.318], + [1.279, 1.273, 1.266, 1.250, 1.210, 1.184, 1.150], + [1.128, 1.121, 1.114, 1.101, 1.067, 1.027, 0.973], + [1.029, 1.021, 1.014, 1.004, 0.974, 0.935, 0.874], + [0.896, 0.892, 0.884, 0.883, 0.855, 0.823, 0.769], + [0.818, 0.812, 0.806, 0.801, 0.780, 0.756, 0.691], + [0.698, 0.695, 0.692, 0.689, 0.676, 0.656, 0.597], + [0.593, 0.590, 0.588, 0.586, 0.579, 0.563, 0.513]] + + # table IV - beta = psi_2(nu_alpha, nu_beta) + beta_table = [ + [0, 2.160, 1.000, 1.000, 1.000, 1.000, 1.000], + [0, 1.592, 3.390, 1.000, 1.000, 1.000, 1.000], + [0, 0.759, 1.800, 1.000, 1.000, 1.000, 1.000], + [0, 0.482, 1.048, 1.694, 1.000, 1.000, 1.000], + [0, 0.360, 0.760, 1.232, 2.229, 1.000, 1.000], + [0, 0.253, 0.518, 0.823, 1.575, 1.000, 1.000], + [0, 0.203, 0.410, 0.632, 1.244, 1.906, 1.000], + [0, 0.165, 0.332, 0.499, 0.943, 1.560, 1.000], + [0, 0.136, 0.271, 0.404, 0.689, 1.230, 2.195], + [0, 0.109, 0.216, 0.323, 0.539, 0.827, 1.917], + [0, 0.096, 0.190, 0.284, 0.472, 0.693, 1.759], + [0, 0.082, 0.163, 0.243, 0.412, 0.601, 1.596], + [0, 0.074, 0.147, 0.220, 0.377, 0.546, 1.482], + [0, 0.064, 0.128, 0.191, 0.330, 0.478, 1.362], + [0, 0.056, 0.112, 0.167, 0.285, 0.428, 1.274]] + + # Table V and VII + alpha_range = [2, 1.9, 1.8, 1.7, 1.6, 1.5, 1.4, 1.3, 1.2, 1.1, 1, 0.9, 0.8, 0.7, 0.6, 0.5] + beta_range = [0, 0.25, 0.5, 0.75, 1] + + # Table V - nu_c = psi_3(alpha, beta) + nu_c_table = [ + [1.908, 1.908, 1.908, 1.908, 1.908], + [1.914, 1.915, 1.916, 1.918, 1.921], + [1.921, 1.922, 1.927, 1.936, 1.947], + [1.927, 1.930, 1.943, 1.961, 1.987], + [1.933, 1.940, 1.962, 1.997, 2.043], + [1.939, 1.952, 1.988, 2.045, 2.116], + [1.946, 1.967, 2.022, 2.106, 2.211], + [1.955, 1.984, 2.067, 2.188, 2.333], + [1.965, 2.007, 2.125, 2.294, 2.491], + [1.980, 2.040, 2.205, 2.435, 2.696], + [2.000, 2.085, 2.311, 2.624, 2.973], + [2.040, 2.149, 2.461, 2.886, 3.356], + [2.098, 2.244, 2.676, 3.265, 3.912], + [2.189, 2.392, 3.004, 3.844, 4.775], + [2.337, 2.634, 3.542, 4.808, 6.247], + [2.588, 3.073, 4.534, 6.636, 9.144]] + + # Table VII - nu_zeta = psi_5(alpha, beta) + nu_zeta_table = [ + [0, 0.000, 0.000, 0.000, 0.000], + [0, -0.017, -0.032, -0.049, -0.064], + [0, -0.030, -0.061, -0.092, -0.123], + [0, -0.043, -0.088, -0.132, -0.179], + [0, -0.056, -0.111, -0.170, -0.232], + [0, -0.066, -0.134, -0.206, -0.283], + [0, -0.075, -0.154, -0.241, -0.335], + [0, -0.084, -0.173, -0.276, -0.390], + [0, -0.090, -0.192, -0.310, -0.447], + [0, -0.095, -0.208, -0.346, -0.508], + [0, -0.098, -0.223, -0.380, -0.576], + [0, -0.099, -0.237, -0.424, -0.652], + [0, -0.096, -0.250, -0.469, -0.742], + [0, -0.089, -0.262, -0.520, -0.853], + [0, -0.078, -0.272, -0.581, -0.997], + [0, -0.061, -0.279, -0.659, -1.198]] + + psi_1 = interpolate.interp2d(nu_beta_range, nu_alpha_range, alpha_table, kind='linear') + psi_2 = interpolate.interp2d(nu_beta_range, nu_alpha_range, beta_table, kind='linear') + psi_2_1 = lambda nu_beta, nu_alpha: psi_2(nu_beta, nu_alpha) if nu_beta > 0 else -psi_2(-nu_beta, nu_alpha) + + phi_3 = interpolate.interp2d(beta_range, alpha_range, nu_c_table, kind='linear') + phi_3_1 = lambda beta, alpha: phi_3(beta, alpha) if beta > 0 else phi_3(-beta, alpha) + phi_5 = interpolate.interp2d(beta_range, alpha_range, nu_zeta_table, kind='linear') + phi_5_1 = lambda beta, alpha: phi_5(beta, alpha) if beta > 0 else -phi_5(-beta, alpha) + + # quantiles + p05 = np.percentile(data, 5) + p50 = np.percentile(data, 50) + p95 = np.percentile(data, 95) + p25 = np.percentile(data, 25) + p75 = np.percentile(data, 75) + + nu_alpha = (p95 - p05)/(p75 - p25) + nu_beta = (p95 + p05 - 2*p50)/(p95 - p05) + + if nu_alpha >= 2.439: + alpha = np.clip(psi_1(nu_beta, nu_alpha)[0], np.finfo(float).eps, 2.) + beta = np.clip(psi_2_1(nu_beta, nu_alpha)[0], -1., 1.) + else: + alpha = 2.0 + beta = np.sign(nu_beta) + c = (p75 - p25) / phi_3_1(beta, alpha)[0] + zeta = p50 + c*phi_5_1(beta, alpha)[0] + delta = np.clip(zeta-beta*c*np.tan(np.pi*alpha/2.) if alpha == 1. else zeta, np.finfo(float).eps, np.inf) + + return (alpha, beta, delta, c) + + def _stats(self, alpha, beta): + mu = 0 if alpha > 1 else np.nan + mu2 = 2 if alpha == 2 else np.inf + g1 = 0. if alpha == 2. else np.NaN + g2 = 0. if alpha == 2. else np.NaN + return mu, mu2, g1, g2 + + +levy_stable = levy_stable_gen(name='levy_stable') + + +class logistic_gen(rv_continuous): + r"""A logistic (or Sech-squared) continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `logistic` is: + + .. math:: + + f(x) = \frac{\exp(-x)} + {(1+\exp(-x))^2} + + `logistic` is a special case of `genlogistic` with ``c=1``. + + %(after_notes)s + + %(example)s + + """ + def _rvs(self): + return self._random_state.logistic(size=self._size) + + def _pdf(self, x): + # logistic.pdf(x) = exp(-x) / (1+exp(-x))**2 + return np.exp(self._logpdf(x)) + + def _logpdf(self, x): + return -x - 2. * sc.log1p(np.exp(-x)) + + def _cdf(self, x): + return sc.expit(x) + + def _ppf(self, q): + return sc.logit(q) + + def _sf(self, x): + return sc.expit(-x) + + def _isf(self, q): + return -sc.logit(q) + + def _stats(self): + return 0, np.pi*np.pi/3.0, 0, 6.0/5.0 + + def _entropy(self): + # https://en.wikipedia.org/wiki/Logistic_distribution + return 2.0 + + +logistic = logistic_gen(name='logistic') + + +class loggamma_gen(rv_continuous): + r"""A log gamma continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `loggamma` is: + + .. math:: + + f(x, c) = \frac{\exp(c x - \exp(x))} + {\Gamma(c)} + + for all :math:`x, c > 0`. Here, :math:`\Gamma` is the + gamma function (`scipy.special.gamma`). + + `loggamma` takes ``c`` as a shape parameter for :math:`c`. + + %(after_notes)s + + %(example)s + + """ + def _rvs(self, c): + return np.log(self._random_state.gamma(c, size=self._size)) + + def _pdf(self, x, c): + # loggamma.pdf(x, c) = exp(c*x-exp(x)) / gamma(c) + return np.exp(c*x-np.exp(x)-sc.gammaln(c)) + + def _cdf(self, x, c): + return sc.gammainc(c, np.exp(x)) + + def _ppf(self, q, c): + return np.log(sc.gammaincinv(c, q)) + + def _stats(self, c): + # See, for example, "A Statistical Study of Log-Gamma Distribution", by + # Ping Shing Chan (thesis, McMaster University, 1993). + mean = sc.digamma(c) + var = sc.polygamma(1, c) + skewness = sc.polygamma(2, c) / np.power(var, 1.5) + excess_kurtosis = sc.polygamma(3, c) / (var*var) + return mean, var, skewness, excess_kurtosis + + +loggamma = loggamma_gen(name='loggamma') + + +class loglaplace_gen(rv_continuous): + r"""A log-Laplace continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `loglaplace` is: + + .. math:: + + f(x, c) = \begin{cases}\frac{c}{2} x^{ c-1} &\text{for } 0 < x < 1\\ + \frac{c}{2} x^{-c-1} &\text{for } x \ge 1 + \end{cases} + + for :math:`c > 0`. + + `loglaplace` takes ``c`` as a shape parameter for :math:`c`. + + %(after_notes)s + + References + ---------- + T.J. Kozubowski and K. Podgorski, "A log-Laplace growth rate model", + The Mathematical Scientist, vol. 28, pp. 49-60, 2003. + + %(example)s + + """ + def _pdf(self, x, c): + # loglaplace.pdf(x, c) = c / 2 * x**(c-1), for 0 < x < 1 + # = c / 2 * x**(-c-1), for x >= 1 + cd2 = c/2.0 + c = np.where(x < 1, c, -c) + return cd2*x**(c-1) + + def _cdf(self, x, c): + return np.where(x < 1, 0.5*x**c, 1-0.5*x**(-c)) + + def _ppf(self, q, c): + return np.where(q < 0.5, (2.0*q)**(1.0/c), (2*(1.0-q))**(-1.0/c)) + + def _munp(self, n, c): + return c**2 / (c**2 - n**2) + + def _entropy(self, c): + return np.log(2.0/c) + 1.0 + + +loglaplace = loglaplace_gen(a=0.0, name='loglaplace') + + +def _lognorm_logpdf(x, s): + return _lazywhere(x != 0, (x, s), + lambda x, s: -np.log(x)**2 / (2*s**2) - np.log(s*x*np.sqrt(2*np.pi)), + -np.inf) + + +class lognorm_gen(rv_continuous): + r"""A lognormal continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `lognorm` is: + + .. math:: + + f(x, s) = \frac{1}{s x \sqrt{2\pi}} + \exp\left(-\frac{\log^2(x)}{2s^2}\right) + + for :math:`x > 0`, :math:`s > 0`. + + `lognorm` takes ``s`` as a shape parameter for :math:`s`. + + %(after_notes)s + + A common parametrization for a lognormal random variable ``Y`` is in + terms of the mean, ``mu``, and standard deviation, ``sigma``, of the + unique normally distributed random variable ``X`` such that exp(X) = Y. + This parametrization corresponds to setting ``s = sigma`` and ``scale = + exp(mu)``. + + %(example)s + + """ + _support_mask = rv_continuous._open_support_mask + + def _rvs(self, s): + return np.exp(s * self._random_state.standard_normal(self._size)) + + def _pdf(self, x, s): + # lognorm.pdf(x, s) = 1 / (s*x*sqrt(2*pi)) * exp(-1/2*(log(x)/s)**2) + return np.exp(self._logpdf(x, s)) + + def _logpdf(self, x, s): + return _lognorm_logpdf(x, s) + + def _cdf(self, x, s): + return _norm_cdf(np.log(x) / s) + + def _logcdf(self, x, s): + return _norm_logcdf(np.log(x) / s) + + def _ppf(self, q, s): + return np.exp(s * _norm_ppf(q)) + + def _sf(self, x, s): + return _norm_sf(np.log(x) / s) + + def _logsf(self, x, s): + return _norm_logsf(np.log(x) / s) + + def _stats(self, s): + p = np.exp(s*s) + mu = np.sqrt(p) + mu2 = p*(p-1) + g1 = np.sqrt((p-1))*(2+p) + g2 = np.polyval([1, 2, 3, 0, -6.0], p) + return mu, mu2, g1, g2 + + def _entropy(self, s): + return 0.5 * (1 + np.log(2*np.pi) + 2 * np.log(s)) + + @extend_notes_in_docstring(rv_continuous, notes="""\ + When the location parameter is fixed by using the `floc` argument, + this function uses explicit formulas for the maximum likelihood + estimation of the log-normal shape and scale parameters, so the + `optimizer`, `loc` and `scale` keyword arguments are ignored.\n\n""") + def fit(self, data, *args, **kwds): + floc = kwds.get('floc', None) + if floc is None: + # loc is not fixed. Use the default fit method. + return super(lognorm_gen, self).fit(data, *args, **kwds) + + f0 = (kwds.get('f0', None) or kwds.get('fs', None) or + kwds.get('fix_s', None)) + fscale = kwds.get('fscale', None) + + if len(args) > 1: + raise TypeError("Too many input arguments.") + for name in ['f0', 'fs', 'fix_s', 'floc', 'fscale', 'loc', 'scale', + 'optimizer']: + kwds.pop(name, None) + if kwds: + raise TypeError("Unknown arguments: %s." % kwds) + + # Special case: loc is fixed. Use the maximum likelihood formulas + # instead of the numerical solver. + + if f0 is not None and fscale is not None: + # This check is for consistency with `rv_continuous.fit`. + raise ValueError("All parameters fixed. There is nothing to " + "optimize.") + + data = np.asarray(data) + floc = float(floc) + if floc != 0: + # Shifting the data by floc. Don't do the subtraction in-place, + # because `data` might be a view of the input array. + data = data - floc + if np.any(data <= 0): + raise FitDataError("lognorm", lower=floc, upper=np.inf) + lndata = np.log(data) + + # Three cases to handle: + # * shape and scale both free + # * shape fixed, scale free + # * shape free, scale fixed + + if fscale is None: + # scale is free. + scale = np.exp(lndata.mean()) + if f0 is None: + # shape is free. + shape = lndata.std() + else: + # shape is fixed. + shape = float(f0) + else: + # scale is fixed, shape is free + scale = float(fscale) + shape = np.sqrt(((lndata - np.log(scale))**2).mean()) + + return shape, floc, scale + + +lognorm = lognorm_gen(a=0.0, name='lognorm') + + +class gilbrat_gen(rv_continuous): + r"""A Gilbrat continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `gilbrat` is: + + .. math:: + + f(x) = \frac{1}{x \sqrt{2\pi}} \exp(-\frac{1}{2} (\log(x))^2) + + `gilbrat` is a special case of `lognorm` with ``s=1``. + + %(after_notes)s + + %(example)s + + """ + _support_mask = rv_continuous._open_support_mask + + def _rvs(self): + return np.exp(self._random_state.standard_normal(self._size)) + + def _pdf(self, x): + # gilbrat.pdf(x) = 1/(x*sqrt(2*pi)) * exp(-1/2*(log(x))**2) + return np.exp(self._logpdf(x)) + + def _logpdf(self, x): + return _lognorm_logpdf(x, 1.0) + + def _cdf(self, x): + return _norm_cdf(np.log(x)) + + def _ppf(self, q): + return np.exp(_norm_ppf(q)) + + def _stats(self): + p = np.e + mu = np.sqrt(p) + mu2 = p * (p - 1) + g1 = np.sqrt((p - 1)) * (2 + p) + g2 = np.polyval([1, 2, 3, 0, -6.0], p) + return mu, mu2, g1, g2 + + def _entropy(self): + return 0.5 * np.log(2 * np.pi) + 0.5 + + +gilbrat = gilbrat_gen(a=0.0, name='gilbrat') + + +class maxwell_gen(rv_continuous): + r"""A Maxwell continuous random variable. + + %(before_notes)s + + Notes + ----- + A special case of a `chi` distribution, with ``df=3``, ``loc=0.0``, + and given ``scale = a``, where ``a`` is the parameter used in the + Mathworld description [1]_. + + The probability density function for `maxwell` is: + + .. math:: + + f(x) = \sqrt{2/\pi}x^2 \exp(-x^2/2) + + for :math:`x > 0`. + + %(after_notes)s + + References + ---------- + .. [1] http://mathworld.wolfram.com/MaxwellDistribution.html + + %(example)s + """ + def _rvs(self): + return chi.rvs(3.0, size=self._size, random_state=self._random_state) + + def _pdf(self, x): + # maxwell.pdf(x) = sqrt(2/pi)x**2 * exp(-x**2/2) + return np.sqrt(2.0/np.pi)*x*x*np.exp(-x*x/2.0) + + def _cdf(self, x): + return sc.gammainc(1.5, x*x/2.0) + + def _ppf(self, q): + return np.sqrt(2*sc.gammaincinv(1.5, q)) + + def _stats(self): + val = 3*np.pi-8 + return (2*np.sqrt(2.0/np.pi), + 3-8/np.pi, + np.sqrt(2)*(32-10*np.pi)/val**1.5, + (-12*np.pi*np.pi + 160*np.pi - 384) / val**2.0) + + def _entropy(self): + return _EULER + 0.5*np.log(2*np.pi)-0.5 + + +maxwell = maxwell_gen(a=0.0, name='maxwell') + + +class mielke_gen(rv_continuous): + r"""A Mielke's Beta-Kappa continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `mielke` is: + + .. math:: + + f(x, k, s) = \frac{k x^{k-1}}{(1+x^s)^{1+k/s}} + + for :math:`x > 0` and :math:`k, s > 0`. + + `mielke` takes ``k`` and ``s`` as shape parameters. + + %(after_notes)s + + %(example)s + + """ + def _pdf(self, x, k, s): + # mielke.pdf(x, k, s) = k * x**(k-1) / (1+x**s)**(1+k/s) + return k*x**(k-1.0) / (1.0+x**s)**(1.0+k*1.0/s) + + def _cdf(self, x, k, s): + return x**k / (1.0+x**s)**(k*1.0/s) + + def _ppf(self, q, k, s): + qsk = pow(q, s*1.0/k) + return pow(qsk/(1.0-qsk), 1.0/s) + + +mielke = mielke_gen(a=0.0, name='mielke') + + +class kappa4_gen(rv_continuous): + r"""Kappa 4 parameter distribution. + + %(before_notes)s + + Notes + ----- + The probability density function for kappa4 is: + + .. math:: + + f(x, h, k) = (1 - k x)^{1/k - 1} (1 - h (1 - k x)^{1/k})^{1/h-1} + + if :math:`h` and :math:`k` are not equal to 0. + + If :math:`h` or :math:`k` are zero then the pdf can be simplified: + + h = 0 and k != 0:: + + kappa4.pdf(x, h, k) = (1.0 - k*x)**(1.0/k - 1.0)* + exp(-(1.0 - k*x)**(1.0/k)) + + h != 0 and k = 0:: + + kappa4.pdf(x, h, k) = exp(-x)*(1.0 - h*exp(-x))**(1.0/h - 1.0) + + h = 0 and k = 0:: + + kappa4.pdf(x, h, k) = exp(-x)*exp(-exp(-x)) + + kappa4 takes :math:`h` and :math:`k` as shape parameters. + + The kappa4 distribution returns other distributions when certain + :math:`h` and :math:`k` values are used. + + +------+-------------+----------------+------------------+ + | h | k=0.0 | k=1.0 | -inf<=k<=inf | + +======+=============+================+==================+ + | -1.0 | Logistic | | Generalized | + | | | | Logistic(1) | + | | | | | + | | logistic(x) | | | + +------+-------------+----------------+------------------+ + | 0.0 | Gumbel | Reverse | Generalized | + | | | Exponential(2) | Extreme Value | + | | | | | + | | gumbel_r(x) | | genextreme(x, k) | + +------+-------------+----------------+------------------+ + | 1.0 | Exponential | Uniform | Generalized | + | | | | Pareto | + | | | | | + | | expon(x) | uniform(x) | genpareto(x, -k) | + +------+-------------+----------------+------------------+ + + (1) There are at least five generalized logistic distributions. + Four are described here: + https://en.wikipedia.org/wiki/Generalized_logistic_distribution + The "fifth" one is the one kappa4 should match which currently + isn't implemented in scipy: + https://en.wikipedia.org/wiki/Talk:Generalized_logistic_distribution + https://www.mathwave.com/help/easyfit/html/analyses/distributions/gen_logistic.html + (2) This distribution is currently not in scipy. + + References + ---------- + J.C. Finney, "Optimization of a Skewed Logistic Distribution With Respect + to the Kolmogorov-Smirnov Test", A Dissertation Submitted to the Graduate + Faculty of the Louisiana State University and Agricultural and Mechanical + College, (August, 2004), + https://digitalcommons.lsu.edu/gradschool_dissertations/3672 + + J.R.M. Hosking, "The four-parameter kappa distribution". IBM J. Res. + Develop. 38 (3), 25 1-258 (1994). + + B. Kumphon, A. Kaew-Man, P. Seenoi, "A Rainfall Distribution for the Lampao + Site in the Chi River Basin, Thailand", Journal of Water Resource and + Protection, vol. 4, 866-869, (2012). + https://doi.org/10.4236/jwarp.2012.410101 + + C. Winchester, "On Estimation of the Four-Parameter Kappa Distribution", A + Thesis Submitted to Dalhousie University, Halifax, Nova Scotia, (March + 2000). + http://www.nlc-bnc.ca/obj/s4/f2/dsk2/ftp01/MQ57336.pdf + + %(after_notes)s + + %(example)s + + """ + def _argcheck(self, h, k): + condlist = [np.logical_and(h > 0, k > 0), + np.logical_and(h > 0, k == 0), + np.logical_and(h > 0, k < 0), + np.logical_and(h <= 0, k > 0), + np.logical_and(h <= 0, k == 0), + np.logical_and(h <= 0, k < 0)] + + def f0(h, k): + return (1.0 - float_power(h, -k))/k + + def f1(h, k): + return np.log(h) + + def f3(h, k): + a = np.empty(np.shape(h)) + a[:] = -np.inf + return a + + def f5(h, k): + return 1.0/k + + self.a = _lazyselect(condlist, + [f0, f1, f0, f3, f3, f5], + [h, k], + default=np.nan) + + def f0(h, k): + return 1.0/k + + def f1(h, k): + a = np.empty(np.shape(h)) + a[:] = np.inf + return a + + self.b = _lazyselect(condlist, + [f0, f1, f1, f0, f1, f1], + [h, k], + default=np.nan) + return h == h + + def _pdf(self, x, h, k): + # kappa4.pdf(x, h, k) = (1.0 - k*x)**(1.0/k - 1.0)* + # (1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h-1) + return np.exp(self._logpdf(x, h, k)) + + def _logpdf(self, x, h, k): + condlist = [np.logical_and(h != 0, k != 0), + np.logical_and(h == 0, k != 0), + np.logical_and(h != 0, k == 0), + np.logical_and(h == 0, k == 0)] + + def f0(x, h, k): + '''pdf = (1.0 - k*x)**(1.0/k - 1.0)*( + 1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h-1.0) + logpdf = ... + ''' + return (sc.xlog1py(1.0/k - 1.0, -k*x) + + sc.xlog1py(1.0/h - 1.0, -h*(1.0 - k*x)**(1.0/k))) + + def f1(x, h, k): + '''pdf = (1.0 - k*x)**(1.0/k - 1.0)*np.exp(-( + 1.0 - k*x)**(1.0/k)) + logpdf = ... + ''' + return sc.xlog1py(1.0/k - 1.0, -k*x) - (1.0 - k*x)**(1.0/k) + + def f2(x, h, k): + '''pdf = np.exp(-x)*(1.0 - h*np.exp(-x))**(1.0/h - 1.0) + logpdf = ... + ''' + return -x + sc.xlog1py(1.0/h - 1.0, -h*np.exp(-x)) + + def f3(x, h, k): + '''pdf = np.exp(-x-np.exp(-x)) + logpdf = ... + ''' + return -x - np.exp(-x) + + return _lazyselect(condlist, + [f0, f1, f2, f3], + [x, h, k], + default=np.nan) + + def _cdf(self, x, h, k): + return np.exp(self._logcdf(x, h, k)) + + def _logcdf(self, x, h, k): + condlist = [np.logical_and(h != 0, k != 0), + np.logical_and(h == 0, k != 0), + np.logical_and(h != 0, k == 0), + np.logical_and(h == 0, k == 0)] + + def f0(x, h, k): + '''cdf = (1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h) + logcdf = ... + ''' + return (1.0/h)*sc.log1p(-h*(1.0 - k*x)**(1.0/k)) + + def f1(x, h, k): + '''cdf = np.exp(-(1.0 - k*x)**(1.0/k)) + logcdf = ... + ''' + return -(1.0 - k*x)**(1.0/k) + + def f2(x, h, k): + '''cdf = (1.0 - h*np.exp(-x))**(1.0/h) + logcdf = ... + ''' + return (1.0/h)*sc.log1p(-h*np.exp(-x)) + + def f3(x, h, k): + '''cdf = np.exp(-np.exp(-x)) + logcdf = ... + ''' + return -np.exp(-x) + + return _lazyselect(condlist, + [f0, f1, f2, f3], + [x, h, k], + default=np.nan) + + def _ppf(self, q, h, k): + condlist = [np.logical_and(h != 0, k != 0), + np.logical_and(h == 0, k != 0), + np.logical_and(h != 0, k == 0), + np.logical_and(h == 0, k == 0)] + + def f0(q, h, k): + return 1.0/k*(1.0 - ((1.0 - (q**h))/h)**k) + + def f1(q, h, k): + return 1.0/k*(1.0 - (-np.log(q))**k) + + def f2(q, h, k): + '''ppf = -np.log((1.0 - (q**h))/h) + ''' + return -sc.log1p(-(q**h)) + np.log(h) + + def f3(q, h, k): + return -np.log(-np.log(q)) + + return _lazyselect(condlist, + [f0, f1, f2, f3], + [q, h, k], + default=np.nan) + + def _stats(self, h, k): + if h >= 0 and k >= 0: + maxr = 5 + elif h < 0 and k >= 0: + maxr = int(-1.0/h*k) + elif k < 0: + maxr = int(-1.0/k) + else: + maxr = 5 + + outputs = [None if r < maxr else np.nan for r in range(1, 5)] + return outputs[:] + + +kappa4 = kappa4_gen(name='kappa4') + + +class kappa3_gen(rv_continuous): + r"""Kappa 3 parameter distribution. + + %(before_notes)s + + Notes + ----- + The probability density function for `kappa3` is: + + .. math:: + + f(x, a) = a (a + x^a)^{-(a + 1)/a} + + for :math:`x > 0` and :math:`a > 0`. + + `kappa3` takes ``a`` as a shape parameter for :math:`a`. + + References + ---------- + P.W. Mielke and E.S. Johnson, "Three-Parameter Kappa Distribution Maximum + Likelihood and Likelihood Ratio Tests", Methods in Weather Research, + 701-707, (September, 1973), + https://doi.org/10.1175/1520-0493(1973)101<0701:TKDMLE>2.3.CO;2 + + B. Kumphon, "Maximum Entropy and Maximum Likelihood Estimation for the + Three-Parameter Kappa Distribution", Open Journal of Statistics, vol 2, + 415-419 (2012), https://doi.org/10.4236/ojs.2012.24050 + + %(after_notes)s + + %(example)s + + """ + def _argcheck(self, a): + return a > 0 + + def _pdf(self, x, a): + # kappa3.pdf(x, a) = a*(a + x**a)**(-(a + 1)/a), for x > 0 + return a*(a + x**a)**(-1.0/a-1) + + def _cdf(self, x, a): + return x*(a + x**a)**(-1.0/a) + + def _ppf(self, q, a): + return (a/(q**-a - 1.0))**(1.0/a) + + def _stats(self, a): + outputs = [None if i < a else np.nan for i in range(1, 5)] + return outputs[:] + + +kappa3 = kappa3_gen(a=0.0, name='kappa3') + +class moyal_gen(rv_continuous): + r"""A Moyal continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `moyal` is: + + .. math:: + + f(x) = \exp(-(x + \exp(-x))/2) / \sqrt{2\pi} + + for a real number :math:`x`. + + %(after_notes)s + + This distribution has utility in high-energy physics and radiation + detection. It describes the energy loss of a charged relativistic + particle due to ionization of the medium [1]_. It also provides an + approximation for the Landau distribution. For an in depth description + see [2]_. For additional description, see [3]_. + + References + ---------- + .. [1] J.E. Moyal, "XXX. Theory of ionization fluctuations", + The London, Edinburgh, and Dublin Philosophical Magazine + and Journal of Science, vol 46, 263-280, (1955). + :doi:`10.1080/14786440308521076` (gated) + .. [2] G. Cordeiro et al., "The beta Moyal: a useful skew distribution", + International Journal of Research and Reviews in Applied Sciences, + vol 10, 171-192, (2012). + http://www.arpapress.com/Volumes/Vol10Issue2/IJRRAS_10_2_02.pdf + .. [3] C. Walck, "Handbook on Statistical Distributions for + Experimentalists; International Report SUF-PFY/96-01", Chapter 26, + University of Stockholm: Stockholm, Sweden, (2007). + http://www.stat.rice.edu/~dobelman/textfiles/DistributionsHandbook.pdf + + .. versionadded:: 1.1.0 + + %(example)s + + """ + def _rvs(self): + sz, rndm = self._size, self._random_state + u1 = gamma.rvs(a = 0.5, scale = 2, size=sz, random_state=rndm) + return -np.log(u1) + + def _pdf(self, x): + return np.exp(-0.5 * (x + np.exp(-x))) / np.sqrt(2*np.pi) + + def _cdf(self, x): + return sc.erfc(np.exp(-0.5 * x) / np.sqrt(2)) + + def _sf(self, x): + return sc.erf(np.exp(-0.5 * x) / np.sqrt(2)) + + def _ppf(self, x): + return -np.log(2 * sc.erfcinv(x)**2) + + def _stats(self): + mu = np.log(2) + np.euler_gamma + mu2 = np.pi**2 / 2 + g1 = 28 * np.sqrt(2) * sc.zeta(3) / np.pi**3 + g2 = 4. + return mu, mu2, g1, g2 + + def _munp(self, n): + if n == 1.0: + return np.log(2) + np.euler_gamma + elif n == 2.0: + return np.pi**2 / 2 + (np.log(2) + np.euler_gamma)**2 + elif n == 3.0: + tmp1 = 1.5 * np.pi**2 * (np.log(2)+np.euler_gamma) + tmp2 = (np.log(2)+np.euler_gamma)**3 + tmp3 = 14 * sc.zeta(3) + return tmp1 + tmp2 + tmp3 + elif n == 4.0: + tmp1 = 4 * 14 * sc.zeta(3) * (np.log(2) + np.euler_gamma) + tmp2 = 3 * np.pi**2 * (np.log(2) + np.euler_gamma)**2 + tmp3 = (np.log(2) + np.euler_gamma)**4 + tmp4 = 7 * np.pi**4 / 4 + return tmp1 + tmp2 + tmp3 + tmp4 + else: + # return generic for higher moments + # return rv_continuous._mom1_sc(self, n, b) + return self._mom1_sc(n) + + +moyal = moyal_gen(name="moyal") + + +class nakagami_gen(rv_continuous): + r"""A Nakagami continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `nakagami` is: + + .. math:: + + f(x, \nu) = \frac{2 \nu^\nu}{\Gamma(\nu)} x^{2\nu-1} \exp(-\nu x^2) + + for :math:`x > 0`, :math:`\nu > 0`. + + `nakagami` takes ``nu`` as a shape parameter for :math:`\nu`. + + %(after_notes)s + + %(example)s + + """ + def _pdf(self, x, nu): + # nakagami.pdf(x, nu) = 2 * nu**nu / gamma(nu) * + # x**(2*nu-1) * exp(-nu*x**2) + return 2*nu**nu/sc.gamma(nu)*(x**(2*nu-1.0))*np.exp(-nu*x*x) + + def _cdf(self, x, nu): + return sc.gammainc(nu, nu*x*x) + + def _ppf(self, q, nu): + return np.sqrt(1.0/nu*sc.gammaincinv(nu, q)) + + def _stats(self, nu): + mu = sc.gamma(nu+0.5)/sc.gamma(nu)/np.sqrt(nu) + mu2 = 1.0-mu*mu + g1 = mu * (1 - 4*nu*mu2) / 2.0 / nu / np.power(mu2, 1.5) + g2 = -6*mu**4*nu + (8*nu-2)*mu**2-2*nu + 1 + g2 /= nu*mu2**2.0 + return mu, mu2, g1, g2 + + +nakagami = nakagami_gen(a=0.0, name="nakagami") + + +class ncx2_gen(rv_continuous): + r"""A non-central chi-squared continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `ncx2` is: + + .. math:: + + f(x, k, \lambda) = \frac{1}{2} \exp(-(\lambda+x)/2) + (x/\lambda)^{(k-2)/4} I_{(k-2)/2}(\sqrt{\lambda x}) + + for :math:`x > 0` and :math:`k, \lambda > 0`. :math:`k` specifies the + degrees of freedom (denoted ``df`` in the implementation) and + :math:`\lambda` is the non-centrality parameter (denoted ``nc`` in the + implementation). :math:`I_\nu` denotes the modified Bessel function of + first order of degree :math:`\nu` (`scipy.special.iv`). + + `ncx2` takes ``df`` and ``nc`` as shape parameters. + + %(after_notes)s + + %(example)s + + """ + def _rvs(self, df, nc): + return self._random_state.noncentral_chisquare(df, nc, self._size) + + def _logpdf(self, x, df, nc): + return _ncx2_log_pdf(x, df, nc) + + def _pdf(self, x, df, nc): + # ncx2.pdf(x, df, nc) = exp(-(nc+x)/2) * 1/2 * (x/nc)**((df-2)/4) + # * I[(df-2)/2](sqrt(nc*x)) + return _ncx2_pdf(x, df, nc) + + def _cdf(self, x, df, nc): + return _ncx2_cdf(x, df, nc) + + def _ppf(self, q, df, nc): + return sc.chndtrix(q, df, nc) + + def _stats(self, df, nc): + val = df + 2.0*nc + return (df + nc, + 2*val, + np.sqrt(8)*(val+nc)/val**1.5, + 12.0*(val+2*nc)/val**2.0) + + +ncx2 = ncx2_gen(a=0.0, name='ncx2') + + +class ncf_gen(rv_continuous): + r"""A non-central F distribution continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `ncf` is: + + .. math:: + + f(x, n_1, n_2, \lambda) = + \exp(\frac{\lambda}{2} + \lambda n_1 \frac{x}{2(n_1 x+n_2)}) + n_1^{n_1/2} n_2^{n_2/2} x^{n_1/2 - 1} \\ + (n_2+n_1 x)^{-(n_1+n_2)/2} + \gamma(n_1/2) \gamma(1+n_2/2) \\ + \frac{L^{\frac{v_1}{2}-1}_{v_2/2} + (-\lambda v_1 \frac{x}{2(v_1 x+v_2)})} + {B(v_1/2, v_2/2) \gamma(\frac{v_1+v_2}{2})} + + for :math:`n_1 > 1`, :math:`n_2, \lambda > 0`. Here :math:`n_1` is the + degrees of freedom in the numerator, :math:`n_2` the degrees of freedom in + the denominator, :math:`\lambda` the non-centrality parameter, + :math:`\gamma` is the logarithm of the Gamma function, :math:`L_n^k` is a + generalized Laguerre polynomial and :math:`B` is the beta function. + + `ncf` takes ``df1``, ``df2`` and ``nc`` as shape parameters. + + %(after_notes)s + + %(example)s + + """ + def _rvs(self, dfn, dfd, nc): + return self._random_state.noncentral_f(dfn, dfd, nc, self._size) + + def _pdf_skip(self, x, dfn, dfd, nc): + # ncf.pdf(x, df1, df2, nc) = exp(nc/2 + nc*df1*x/(2*(df1*x+df2))) * + # df1**(df1/2) * df2**(df2/2) * x**(df1/2-1) * + # (df2+df1*x)**(-(df1+df2)/2) * + # gamma(df1/2)*gamma(1+df2/2) * + # L^{v1/2-1}^{v2/2}(-nc*v1*x/(2*(v1*x+v2))) / + # (B(v1/2, v2/2) * gamma((v1+v2)/2)) + n1, n2 = dfn, dfd + term = -nc/2+nc*n1*x/(2*(n2+n1*x)) + sc.gammaln(n1/2.)+sc.gammaln(1+n2/2.) + term -= sc.gammaln((n1+n2)/2.0) + Px = np.exp(term) + Px *= n1**(n1/2) * n2**(n2/2) * x**(n1/2-1) + Px *= (n2+n1*x)**(-(n1+n2)/2) + Px *= sc.assoc_laguerre(-nc*n1*x/(2.0*(n2+n1*x)), n2/2, n1/2-1) + Px /= sc.beta(n1/2, n2/2) + # This function does not have a return. Drop it for now, the generic + # function seems to work OK. + + def _cdf(self, x, dfn, dfd, nc): + return sc.ncfdtr(dfn, dfd, nc, x) + + def _ppf(self, q, dfn, dfd, nc): + return sc.ncfdtri(dfn, dfd, nc, q) + + def _munp(self, n, dfn, dfd, nc): + val = (dfn * 1.0/dfd)**n + term = sc.gammaln(n+0.5*dfn) + sc.gammaln(0.5*dfd-n) - sc.gammaln(dfd*0.5) + val *= np.exp(-nc / 2.0+term) + val *= sc.hyp1f1(n+0.5*dfn, 0.5*dfn, 0.5*nc) + return val + + def _stats(self, dfn, dfd, nc): + mu = np.where(dfd <= 2, np.inf, dfd / (dfd-2.0)*(1+nc*1.0/dfn)) + mu2 = np.where(dfd <= 4, np.inf, 2*(dfd*1.0/dfn)**2.0 * + ((dfn+nc/2.0)**2.0 + (dfn+nc)*(dfd-2.0)) / + ((dfd-2.0)**2.0 * (dfd-4.0))) + return mu, mu2, None, None + + +ncf = ncf_gen(a=0.0, name='ncf') + + +class t_gen(rv_continuous): + r"""A Student's t continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `t` is: + + .. math:: + + f(x, \nu) = \frac{\Gamma((\nu+1)/2)} + {\sqrt{\pi \nu} \Gamma(\nu)} + (1+x^2/\nu)^{-(\nu+1)/2} + + where :math:`x` is a real number and the degrees of freedom parameter + :math:`\nu` (denoted ``df`` in the implementation) satisfies + :math:`\nu > 0`. :math:`\Gamma` is the gamma function + (`scipy.special.gamma`). + + %(after_notes)s + + %(example)s + + """ + def _argcheck(self, df): + return df > 0 + + def _rvs(self, df): + return self._random_state.standard_t(df, size=self._size) + + def _pdf(self, x, df): + # gamma((df+1)/2) + # t.pdf(x, df) = --------------------------------------------------- + # sqrt(pi*df) * gamma(df/2) * (1+x**2/df)**((df+1)/2) + r = np.asarray(df*1.0) + Px = np.exp(sc.gammaln((r+1)/2)-sc.gammaln(r/2)) + Px /= np.sqrt(r*np.pi)*(1+(x**2)/r)**((r+1)/2) + return Px + + def _logpdf(self, x, df): + r = df*1.0 + lPx = sc.gammaln((r+1)/2)-sc.gammaln(r/2) + lPx -= 0.5*np.log(r*np.pi) + (r+1)/2*np.log(1+(x**2)/r) + return lPx + + def _cdf(self, x, df): + return sc.stdtr(df, x) + + def _sf(self, x, df): + return sc.stdtr(df, -x) + + def _ppf(self, q, df): + return sc.stdtrit(df, q) + + def _isf(self, q, df): + return -sc.stdtrit(df, q) + + def _stats(self, df): + mu = np.where(df > 1, 0.0, np.inf) + mu2 = _lazywhere(df > 2, (df,), + lambda df: df / (df-2.0), + np.inf) + mu2 = np.where(df <= 1, np.nan, mu2) + g1 = np.where(df > 3, 0.0, np.nan) + g2 = _lazywhere(df > 4, (df,), + lambda df: 6.0 / (df-4.0), + np.inf) + g2 = np.where(df <= 2, np.nan, g2) + return mu, mu2, g1, g2 + + +t = t_gen(name='t') + + +class nct_gen(rv_continuous): + r"""A non-central Student's t continuous random variable. + + %(before_notes)s + + Notes + ----- + If :math:`Y` is a standard normal random variable and :math:`V` is + an independent chi-square random variable (`chi2`) with :math:`k` degrees + of freedom, then + + .. math:: + + X = \frac{Y + c}{\sqrt{V/k}} + + has a non-central Student's t distribution on the real line. + The degrees of freedom parameter :math:`k` (denoted ``df`` in the + implementation) satisfies :math:`k > 0` and the noncentrality parameter + :math:`c` (denoted ``nct`` in the implementation) is a real number. + + %(after_notes)s + + %(example)s + + """ + def _argcheck(self, df, nc): + return (df > 0) & (nc == nc) + + def _rvs(self, df, nc): + sz, rndm = self._size, self._random_state + n = norm.rvs(loc=nc, size=sz, random_state=rndm) + c2 = chi2.rvs(df, size=sz, random_state=rndm) + return n * np.sqrt(df) / np.sqrt(c2) + + def _pdf(self, x, df, nc): + n = df*1.0 + nc = nc*1.0 + x2 = x*x + ncx2 = nc*nc*x2 + fac1 = n + x2 + trm1 = n/2.*np.log(n) + sc.gammaln(n+1) + trm1 -= n*np.log(2)+nc*nc/2.+(n/2.)*np.log(fac1)+sc.gammaln(n/2.) + Px = np.exp(trm1) + valF = ncx2 / (2*fac1) + trm1 = np.sqrt(2)*nc*x*sc.hyp1f1(n/2+1, 1.5, valF) + trm1 /= np.asarray(fac1*sc.gamma((n+1)/2)) + trm2 = sc.hyp1f1((n+1)/2, 0.5, valF) + trm2 /= np.asarray(np.sqrt(fac1)*sc.gamma(n/2+1)) + Px *= trm1+trm2 + return Px + + def _cdf(self, x, df, nc): + return sc.nctdtr(df, nc, x) + + def _ppf(self, q, df, nc): + return sc.nctdtrit(df, nc, q) + + def _stats(self, df, nc, moments='mv'): + # + # See D. Hogben, R.S. Pinkham, and M.B. Wilk, + # 'The moments of the non-central t-distribution' + # Biometrika 48, p. 465 (2961). + # e.g. https://www.jstor.org/stable/2332772 (gated) + # + mu, mu2, g1, g2 = None, None, None, None + + gfac = sc.gamma(df/2.-0.5) / sc.gamma(df/2.) + c11 = np.sqrt(df/2.) * gfac + c20 = df / (df-2.) + c22 = c20 - c11*c11 + mu = np.where(df > 1, nc*c11, np.inf) + mu2 = np.where(df > 2, c22*nc*nc + c20, np.inf) + if 's' in moments: + c33t = df * (7.-2.*df) / (df-2.) / (df-3.) + 2.*c11*c11 + c31t = 3.*df / (df-2.) / (df-3.) + mu3 = (c33t*nc*nc + c31t) * c11*nc + g1 = np.where(df > 3, mu3 / np.power(mu2, 1.5), np.nan) + # kurtosis + if 'k' in moments: + c44 = df*df / (df-2.) / (df-4.) + c44 -= c11*c11 * 2.*df*(5.-df) / (df-2.) / (df-3.) + c44 -= 3.*c11**4 + c42 = df / (df-4.) - c11*c11 * (df-1.) / (df-3.) + c42 *= 6.*df / (df-2.) + c40 = 3.*df*df / (df-2.) / (df-4.) + + mu4 = c44 * nc**4 + c42*nc**2 + c40 + g2 = np.where(df > 4, mu4/mu2**2 - 3., np.nan) + return mu, mu2, g1, g2 + + +nct = nct_gen(name="nct") + + +class pareto_gen(rv_continuous): + r"""A Pareto continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `pareto` is: + + .. math:: + + f(x, b) = \frac{b}{x^{b+1}} + + for :math:`x \ge 1`, :math:`b > 0`. + + `pareto` takes ``b`` as a shape parameter for :math:`b`. + + %(after_notes)s + + %(example)s + + """ + def _pdf(self, x, b): + # pareto.pdf(x, b) = b / x**(b+1) + return b * x**(-b-1) + + def _cdf(self, x, b): + return 1 - x**(-b) + + def _ppf(self, q, b): + return pow(1-q, -1.0/b) + + def _sf(self, x, b): + return x**(-b) + + def _stats(self, b, moments='mv'): + mu, mu2, g1, g2 = None, None, None, None + if 'm' in moments: + mask = b > 1 + bt = np.extract(mask, b) + mu = valarray(np.shape(b), value=np.inf) + np.place(mu, mask, bt / (bt-1.0)) + if 'v' in moments: + mask = b > 2 + bt = np.extract(mask, b) + mu2 = valarray(np.shape(b), value=np.inf) + np.place(mu2, mask, bt / (bt-2.0) / (bt-1.0)**2) + if 's' in moments: + mask = b > 3 + bt = np.extract(mask, b) + g1 = valarray(np.shape(b), value=np.nan) + vals = 2 * (bt + 1.0) * np.sqrt(bt - 2.0) / ((bt - 3.0) * np.sqrt(bt)) + np.place(g1, mask, vals) + if 'k' in moments: + mask = b > 4 + bt = np.extract(mask, b) + g2 = valarray(np.shape(b), value=np.nan) + vals = (6.0*np.polyval([1.0, 1.0, -6, -2], bt) / + np.polyval([1.0, -7.0, 12.0, 0.0], bt)) + np.place(g2, mask, vals) + return mu, mu2, g1, g2 + + def _entropy(self, c): + return 1 + 1.0/c - np.log(c) + + +pareto = pareto_gen(a=1.0, name="pareto") + + +class lomax_gen(rv_continuous): + r"""A Lomax (Pareto of the second kind) continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `lomax` is: + + .. math:: + + f(x, c) = \frac{c}{(1+x)^{c+1}} + + for :math:`x \ge 0`, :math:`c > 0`. + + `lomax` takes ``c`` as a shape parameter for :math:`c`. + + `lomax` is a special case of `pareto` with ``loc=-1.0``. + + %(after_notes)s + + %(example)s + + """ + def _pdf(self, x, c): + # lomax.pdf(x, c) = c / (1+x)**(c+1) + return c*1.0/(1.0+x)**(c+1.0) + + def _logpdf(self, x, c): + return np.log(c) - (c+1)*sc.log1p(x) + + def _cdf(self, x, c): + return -sc.expm1(-c*sc.log1p(x)) + + def _sf(self, x, c): + return np.exp(-c*sc.log1p(x)) + + def _logsf(self, x, c): + return -c*sc.log1p(x) + + def _ppf(self, q, c): + return sc.expm1(-sc.log1p(-q)/c) + + def _stats(self, c): + mu, mu2, g1, g2 = pareto.stats(c, loc=-1.0, moments='mvsk') + return mu, mu2, g1, g2 + + def _entropy(self, c): + return 1+1.0/c-np.log(c) + + +lomax = lomax_gen(a=0.0, name="lomax") + + +class pearson3_gen(rv_continuous): + r"""A pearson type III continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `pearson3` is: + + .. math:: + + f(x, skew) = \frac{|\beta|}{\Gamma(\alpha)} + (\beta (x - \zeta))^{\alpha - 1} + \exp(-\beta (x - \zeta)) + + where: + + .. math:: + + \beta = \frac{2}{skew stddev} + \alpha = (stddev \beta)^2 + \zeta = loc - \frac{\alpha}{\beta} + + :math:`\Gamma` is the gamma function (`scipy.special.gamma`). + `pearson3` takes ``skew`` as a shape parameter for :math:`skew`. + + %(after_notes)s + + %(example)s + + References + ---------- + R.W. Vogel and D.E. McMartin, "Probability Plot Goodness-of-Fit and + Skewness Estimation Procedures for the Pearson Type 3 Distribution", Water + Resources Research, Vol.27, 3149-3158 (1991). + + L.R. Salvosa, "Tables of Pearson's Type III Function", Ann. Math. Statist., + Vol.1, 191-198 (1930). + + "Using Modern Computing Tools to Fit the Pearson Type III Distribution to + Aviation Loads Data", Office of Aviation Research (2003). + + """ + def _preprocess(self, x, skew): + # The real 'loc' and 'scale' are handled in the calling pdf(...). The + # local variables 'loc' and 'scale' within pearson3._pdf are set to + # the defaults just to keep them as part of the equations for + # documentation. + loc = 0.0 + scale = 1.0 + + # If skew is small, return _norm_pdf. The divide between pearson3 + # and norm was found by brute force and is approximately a skew of + # 0.000016. No one, I hope, would actually use a skew value even + # close to this small. + norm2pearson_transition = 0.000016 + + ans, x, skew = np.broadcast_arrays([1.0], x, skew) + ans = ans.copy() + + # mask is True where skew is small enough to use the normal approx. + mask = np.absolute(skew) < norm2pearson_transition + invmask = ~mask + + beta = 2.0 / (skew[invmask] * scale) + alpha = (scale * beta)**2 + zeta = loc - alpha / beta + + transx = beta * (x[invmask] - zeta) + return ans, x, transx, mask, invmask, beta, alpha, zeta + + def _argcheck(self, skew): + # The _argcheck function in rv_continuous only allows positive + # arguments. The skew argument for pearson3 can be zero (which I want + # to handle inside pearson3._pdf) or negative. So just return True + # for all skew args. + return np.ones(np.shape(skew), dtype=bool) + + def _stats(self, skew): + _, _, _, _, _, beta, alpha, zeta = ( + self._preprocess([1], skew)) + m = zeta + alpha / beta + v = alpha / (beta**2) + s = 2.0 / (alpha**0.5) * np.sign(beta) + k = 6.0 / alpha + return m, v, s, k + + def _pdf(self, x, skew): + # pearson3.pdf(x, skew) = abs(beta) / gamma(alpha) * + # (beta * (x - zeta))**(alpha - 1) * exp(-beta*(x - zeta)) + # Do the calculation in _logpdf since helps to limit + # overflow/underflow problems + ans = np.exp(self._logpdf(x, skew)) + if ans.ndim == 0: + if np.isnan(ans): + return 0.0 + return ans + ans[np.isnan(ans)] = 0.0 + return ans + + def _logpdf(self, x, skew): + # PEARSON3 logpdf GAMMA logpdf + # np.log(abs(beta)) + # + (alpha - 1)*np.log(beta*(x - zeta)) + (a - 1)*np.log(x) + # - beta*(x - zeta) - x + # - sc.gammalnalpha) - sc.gammalna) + ans, x, transx, mask, invmask, beta, alpha, _ = ( + self._preprocess(x, skew)) + + ans[mask] = np.log(_norm_pdf(x[mask])) + ans[invmask] = np.log(abs(beta)) + gamma._logpdf(transx, alpha) + return ans + + def _cdf(self, x, skew): + ans, x, transx, mask, invmask, _, alpha, _ = ( + self._preprocess(x, skew)) + + ans[mask] = _norm_cdf(x[mask]) + ans[invmask] = gamma._cdf(transx, alpha) + return ans + + def _rvs(self, skew): + skew = broadcast_to(skew, self._size) + ans, _, _, mask, invmask, beta, alpha, zeta = ( + self._preprocess([0], skew)) + + nsmall = mask.sum() + nbig = mask.size - nsmall + ans[mask] = self._random_state.standard_normal(nsmall) + ans[invmask] = (self._random_state.standard_gamma(alpha, nbig)/beta + + zeta) + + if self._size == (): + ans = ans[0] + return ans + + def _ppf(self, q, skew): + ans, q, _, mask, invmask, beta, alpha, zeta = ( + self._preprocess(q, skew)) + ans[mask] = _norm_ppf(q[mask]) + ans[invmask] = sc.gammaincinv(alpha, q[invmask])/beta + zeta + return ans + + +pearson3 = pearson3_gen(name="pearson3") + + +class powerlaw_gen(rv_continuous): + r"""A power-function continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `powerlaw` is: + + .. math:: + + f(x, a) = a x^{a-1} + + for :math:`0 \le x \le 1`, :math:`a > 0`. + + `powerlaw` takes ``a`` as a shape parameter for :math:`a`. + + %(after_notes)s + + `powerlaw` is a special case of `beta` with ``b=1``. + + %(example)s + + """ + def _pdf(self, x, a): + # powerlaw.pdf(x, a) = a * x**(a-1) + return a*x**(a-1.0) + + def _logpdf(self, x, a): + return np.log(a) + sc.xlogy(a - 1, x) + + def _cdf(self, x, a): + return x**(a*1.0) + + def _logcdf(self, x, a): + return a*np.log(x) + + def _ppf(self, q, a): + return pow(q, 1.0/a) + + def _stats(self, a): + return (a / (a + 1.0), + a / (a + 2.0) / (a + 1.0) ** 2, + -2.0 * ((a - 1.0) / (a + 3.0)) * np.sqrt((a + 2.0) / a), + 6 * np.polyval([1, -1, -6, 2], a) / (a * (a + 3.0) * (a + 4))) + + def _entropy(self, a): + return 1 - 1.0/a - np.log(a) + + +powerlaw = powerlaw_gen(a=0.0, b=1.0, name="powerlaw") + + +class powerlognorm_gen(rv_continuous): + r"""A power log-normal continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `powerlognorm` is: + + .. math:: + + f(x, c, s) = \frac{c}{x s} \phi(\log(x)/s) + (\Phi(-\log(x)/s))^{c-1} + + where :math:`\phi` is the normal pdf, and :math:`\Phi` is the normal cdf, + and :math:`x > 0`, :math:`s, c > 0`. + + `powerlognorm` takes :math:`c` and :math:`s` as shape parameters. + + %(after_notes)s + + %(example)s + + """ + _support_mask = rv_continuous._open_support_mask + + def _pdf(self, x, c, s): + # powerlognorm.pdf(x, c, s) = c / (x*s) * phi(log(x)/s) * + # (Phi(-log(x)/s))**(c-1), + return (c/(x*s) * _norm_pdf(np.log(x)/s) * + pow(_norm_cdf(-np.log(x)/s), c*1.0-1.0)) + + def _cdf(self, x, c, s): + return 1.0 - pow(_norm_cdf(-np.log(x)/s), c*1.0) + + def _ppf(self, q, c, s): + return np.exp(-s * _norm_ppf(pow(1.0 - q, 1.0 / c))) + + +powerlognorm = powerlognorm_gen(a=0.0, name="powerlognorm") + + +class powernorm_gen(rv_continuous): + r"""A power normal continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `powernorm` is: + + .. math:: + + f(x, c) = c \phi(x) (\Phi(-x))^{c-1} + + where :math:`\phi` is the normal pdf, and :math:`\Phi` is the normal cdf, + and :math:`x > 0`, :math:`c > 0`. + + `powernorm` takes ``c`` as a shape parameter for :math:`c`. + + %(after_notes)s + + %(example)s + + """ + def _pdf(self, x, c): + # powernorm.pdf(x, c) = c * phi(x) * (Phi(-x))**(c-1) + return c*_norm_pdf(x) * (_norm_cdf(-x)**(c-1.0)) + + def _logpdf(self, x, c): + return np.log(c) + _norm_logpdf(x) + (c-1)*_norm_logcdf(-x) + + def _cdf(self, x, c): + return 1.0-_norm_cdf(-x)**(c*1.0) + + def _ppf(self, q, c): + return -_norm_ppf(pow(1.0 - q, 1.0 / c)) + + +powernorm = powernorm_gen(name='powernorm') + + +class rdist_gen(rv_continuous): + r"""An R-distributed continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `rdist` is: + + .. math:: + + f(x, c) = \frac{(1-x^2)^{c/2-1}}{B(1/2, c/2)} + + for :math:`-1 \le x \le 1`, :math:`c > 0`. + + `rdist` takes ``c`` as a shape parameter for :math:`c`. + + This distribution includes the following distribution kernels as + special cases:: + + c = 2: uniform + c = 4: Epanechnikov (parabolic) + c = 6: quartic (biweight) + c = 8: triweight + + %(after_notes)s + + %(example)s + + """ + def _pdf(self, x, c): + # rdist.pdf(x, c) = (1-x**2)**(c/2-1) / B(1/2, c/2) + return np.power((1.0 - x**2), c / 2.0 - 1) / sc.beta(0.5, c / 2.0) + + def _cdf(self, x, c): + term1 = x / sc.beta(0.5, c / 2.0) + res = 0.5 + term1 * sc.hyp2f1(0.5, 1 - c / 2.0, 1.5, x**2) + # There's an issue with hyp2f1, it returns nans near x = +-1, c > 100. + # Use the generic implementation in that case. See gh-1285 for + # background. + if np.any(np.isnan(res)): + return rv_continuous._cdf(self, x, c) + return res + + def _munp(self, n, c): + numerator = (1 - (n % 2)) * sc.beta((n + 1.0) / 2, c / 2.0) + return numerator / sc.beta(1. / 2, c / 2.) + + +rdist = rdist_gen(a=-1.0, b=1.0, name="rdist") + + +class rayleigh_gen(rv_continuous): + r"""A Rayleigh continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `rayleigh` is: + + .. math:: + + f(x) = x \exp(-x^2/2) + + for :math:`x \ge 0`. + + `rayleigh` is a special case of `chi` with ``df=2``. + + %(after_notes)s + + %(example)s + + """ + _support_mask = rv_continuous._open_support_mask + + def _rvs(self): + return chi.rvs(2, size=self._size, random_state=self._random_state) + + def _pdf(self, r): + # rayleigh.pdf(r) = r * exp(-r**2/2) + return np.exp(self._logpdf(r)) + + def _logpdf(self, r): + return np.log(r) - 0.5 * r * r + + def _cdf(self, r): + return -sc.expm1(-0.5 * r**2) + + def _ppf(self, q): + return np.sqrt(-2 * sc.log1p(-q)) + + def _sf(self, r): + return np.exp(self._logsf(r)) + + def _logsf(self, r): + return -0.5 * r * r + + def _isf(self, q): + return np.sqrt(-2 * np.log(q)) + + def _stats(self): + val = 4 - np.pi + return (np.sqrt(np.pi/2), + val/2, + 2*(np.pi-3)*np.sqrt(np.pi)/val**1.5, + 6*np.pi/val-16/val**2) + + def _entropy(self): + return _EULER/2.0 + 1 - 0.5*np.log(2) + + +rayleigh = rayleigh_gen(a=0.0, name="rayleigh") + + +class reciprocal_gen(rv_continuous): + r"""A reciprocal continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `reciprocal` is: + + .. math:: + + f(x, a, b) = \frac{1}{x \log(b/a)} + + for :math:`a \le x \le b`, :math:`b > a > 0`. + + `reciprocal` takes :math:`a` and :math:`b` as shape parameters. + + %(after_notes)s + + %(example)s + + """ + def _argcheck(self, a, b): + self.a = a + self.b = b + self.d = np.log(b*1.0 / a) + return (a > 0) & (b > a) + + def _pdf(self, x, a, b): + # reciprocal.pdf(x, a, b) = 1 / (x*log(b/a)) + return 1.0 / (x * self.d) + + def _logpdf(self, x, a, b): + return -np.log(x) - np.log(self.d) + + def _cdf(self, x, a, b): + return (np.log(x)-np.log(a)) / self.d + + def _ppf(self, q, a, b): + return a*pow(b*1.0/a, q) + + def _munp(self, n, a, b): + return 1.0/self.d / n * (pow(b*1.0, n) - pow(a*1.0, n)) + + def _entropy(self, a, b): + return 0.5*np.log(a*b)+np.log(np.log(b/a)) + + +reciprocal = reciprocal_gen(name="reciprocal") + + +class rice_gen(rv_continuous): + r"""A Rice continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `rice` is: + + .. math:: + + f(x, b) = x \exp(- \frac{x^2 + b^2}{2}) I_0(x b) + + for :math:`x > 0`, :math:`b > 0`. :math:`I_0` is the modified Bessel + function of order zero (`scipy.special.i0`). + + `rice` takes ``b`` as a shape parameter for :math:`b`. + + %(after_notes)s + + The Rice distribution describes the length, :math:`r`, of a 2-D vector with + components :math:`(U+u, V+v)`, where :math:`U, V` are constant, :math:`u, + v` are independent Gaussian random variables with standard deviation + :math:`s`. Let :math:`R = \sqrt{U^2 + V^2}`. Then the pdf of :math:`r` is + ``rice.pdf(x, R/s, scale=s)``. + + %(example)s + + """ + def _argcheck(self, b): + return b >= 0 + + def _rvs(self, b): + # https://en.wikipedia.org/wiki/Rice_distribution + t = b/np.sqrt(2) + self._random_state.standard_normal(size=(2,) + + self._size) + return np.sqrt((t*t).sum(axis=0)) + + def _cdf(self, x, b): + return sc.chndtr(np.square(x), 2, np.square(b)) + + def _ppf(self, q, b): + return np.sqrt(sc.chndtrix(q, 2, np.square(b))) + + def _pdf(self, x, b): + # rice.pdf(x, b) = x * exp(-(x**2+b**2)/2) * I[0](x*b) + # + # We use (x**2 + b**2)/2 = ((x-b)**2)/2 + xb. + # The factor of np.exp(-xb) is then included in the i0e function + # in place of the modified Bessel function, i0, improving + # numerical stability for large values of xb. + return x * np.exp(-(x-b)*(x-b)/2.0) * sc.i0e(x*b) + + def _munp(self, n, b): + nd2 = n/2.0 + n1 = 1 + nd2 + b2 = b*b/2.0 + return (2.0**(nd2) * np.exp(-b2) * sc.gamma(n1) * + sc.hyp1f1(n1, 1, b2)) + + +rice = rice_gen(a=0.0, name="rice") + + +# FIXME: PPF does not work. +class recipinvgauss_gen(rv_continuous): + r"""A reciprocal inverse Gaussian continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `recipinvgauss` is: + + .. math:: + + f(x, \mu) = \frac{1}{\sqrt{2\pi x}} + \exp\left(\frac{-(1-\mu x)^2}{2\mu^2x}\right) + + for :math:`x \ge 0`. + + `recipinvgauss` takes ``mu`` as a shape parameter for :math:`\mu`. + + %(after_notes)s + + %(example)s + + """ + + def _pdf(self, x, mu): + # recipinvgauss.pdf(x, mu) = + # 1/sqrt(2*pi*x) * exp(-(1-mu*x)**2/(2*x*mu**2)) + return 1.0/np.sqrt(2*np.pi*x)*np.exp(-(1-mu*x)**2.0 / (2*x*mu**2.0)) + + def _logpdf(self, x, mu): + return -(1-mu*x)**2.0 / (2*x*mu**2.0) - 0.5*np.log(2*np.pi*x) + + def _cdf(self, x, mu): + trm1 = 1.0/mu - x + trm2 = 1.0/mu + x + isqx = 1.0/np.sqrt(x) + return 1.0-_norm_cdf(isqx*trm1)-np.exp(2.0/mu)*_norm_cdf(-isqx*trm2) + + def _rvs(self, mu): + return 1.0/self._random_state.wald(mu, 1.0, size=self._size) + + +recipinvgauss = recipinvgauss_gen(a=0.0, name='recipinvgauss') + + +class semicircular_gen(rv_continuous): + r"""A semicircular continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `semicircular` is: + + .. math:: + + f(x) = \frac{2}{\pi} \sqrt{1-x^2} + + for :math:`-1 \le x \le 1`. + + %(after_notes)s + + %(example)s + + """ + def _pdf(self, x): + # semicircular.pdf(x) = 2/pi * sqrt(1-x**2) + return 2.0/np.pi*np.sqrt(1-x*x) + + def _cdf(self, x): + return 0.5+1.0/np.pi*(x*np.sqrt(1-x*x) + np.arcsin(x)) + + def _stats(self): + return 0, 0.25, 0, -1.0 + + def _entropy(self): + return 0.64472988584940017414 + + +semicircular = semicircular_gen(a=-1.0, b=1.0, name="semicircular") + + +class skew_norm_gen(rv_continuous): + r"""A skew-normal random variable. + + %(before_notes)s + + Notes + ----- + The pdf is:: + + skewnorm.pdf(x, a) = 2 * norm.pdf(x) * norm.cdf(a*x) + + `skewnorm` takes a real number :math:`a` as a skewness parameter + When ``a = 0`` the distribution is identical to a normal distribution + (`norm`). `rvs` implements the method of [1]_. + + %(after_notes)s + + %(example)s + + References + ---------- + .. [1] A. Azzalini and A. Capitanio (1999). Statistical applications of the + multivariate skew-normal distribution. J. Roy. Statist. Soc., B 61, 579-602. + http://azzalini.stat.unipd.it/SN/faq-r.html + + """ + def _argcheck(self, a): + return np.isfinite(a) + + def _pdf(self, x, a): + return 2.*_norm_pdf(x)*_norm_cdf(a*x) + + def _cdf_single(self, x, *args): + if x <= 0: + cdf = integrate.quad(self._pdf, self.a, x, args=args)[0] + else: + t1 = integrate.quad(self._pdf, self.a, 0, args=args)[0] + t2 = integrate.quad(self._pdf, 0, x, args=args)[0] + cdf = t1 + t2 + if cdf > 1: + # Presumably numerical noise, e.g. 1.0000000000000002 + cdf = 1.0 + return cdf + + def _sf(self, x, a): + return self._cdf(-x, -a) + + def _rvs(self, a): + u0 = self._random_state.normal(size=self._size) + v = self._random_state.normal(size=self._size) + d = a/np.sqrt(1 + a**2) + u1 = d*u0 + v*np.sqrt(1 - d**2) + return np.where(u0 >= 0, u1, -u1) + + def _stats(self, a, moments='mvsk'): + output = [None, None, None, None] + const = np.sqrt(2/np.pi) * a/np.sqrt(1 + a**2) + + if 'm' in moments: + output[0] = const + if 'v' in moments: + output[1] = 1 - const**2 + if 's' in moments: + output[2] = ((4 - np.pi)/2) * (const/np.sqrt(1 - const**2))**3 + if 'k' in moments: + output[3] = (2*(np.pi - 3)) * (const**4/(1 - const**2)**2) + + return output + + +skewnorm = skew_norm_gen(name='skewnorm') + + +class trapz_gen(rv_continuous): + r"""A trapezoidal continuous random variable. + + %(before_notes)s + + Notes + ----- + The trapezoidal distribution can be represented with an up-sloping line + from ``loc`` to ``(loc + c*scale)``, then constant to ``(loc + d*scale)`` + and then downsloping from ``(loc + d*scale)`` to ``(loc+scale)``. + + `trapz` takes :math:`c` and :math:`d` as shape parameters. + + %(after_notes)s + + The standard form is in the range [0, 1] with c the mode. + The location parameter shifts the start to `loc`. + The scale parameter changes the width from 1 to `scale`. + + %(example)s + + """ + def _argcheck(self, c, d): + return (c >= 0) & (c <= 1) & (d >= 0) & (d <= 1) & (d >= c) + + def _pdf(self, x, c, d): + u = 2 / (d-c+1) + + return _lazyselect([x < c, + (c <= x) & (x <= d), + x > d], + [lambda x, c, d, u: u * x / c, + lambda x, c, d, u: u, + lambda x, c, d, u: u * (1-x) / (1-d)], + (x, c, d, u)) + + def _cdf(self, x, c, d): + return _lazyselect([x < c, + (c <= x) & (x <= d), + x > d], + [lambda x, c, d: x**2 / c / (d-c+1), + lambda x, c, d: (c + 2 * (x-c)) / (d-c+1), + lambda x, c, d: 1-((1-x) ** 2 + / (d-c+1) / (1-d))], + (x, c, d)) + + def _ppf(self, q, c, d): + qc, qd = self._cdf(c, c, d), self._cdf(d, c, d) + condlist = [q < qc, q <= qd, q > qd] + choicelist = [np.sqrt(q * c * (1 + d - c)), + 0.5 * q * (1 + d - c) + 0.5 * c, + 1 - np.sqrt((1 - q) * (d - c + 1) * (1 - d))] + return np.select(condlist, choicelist) + + +trapz = trapz_gen(a=0.0, b=1.0, name="trapz") + + +class triang_gen(rv_continuous): + r"""A triangular continuous random variable. + + %(before_notes)s + + Notes + ----- + The triangular distribution can be represented with an up-sloping line from + ``loc`` to ``(loc + c*scale)`` and then downsloping for ``(loc + c*scale)`` + to ``(loc + scale)``. + + `triang` takes ``c`` as a shape parameter for :math:`c`. + + %(after_notes)s + + The standard form is in the range [0, 1] with c the mode. + The location parameter shifts the start to `loc`. + The scale parameter changes the width from 1 to `scale`. + + %(example)s + + """ + def _rvs(self, c): + return self._random_state.triangular(0, c, 1, self._size) + + def _argcheck(self, c): + return (c >= 0) & (c <= 1) + + def _pdf(self, x, c): + # 0: edge case where c=0 + # 1: generalised case for x < c, don't use x <= c, as it doesn't cope + # with c = 0. + # 2: generalised case for x >= c, but doesn't cope with c = 1 + # 3: edge case where c=1 + r = _lazyselect([c == 0, + x < c, + (x >= c) & (c != 1), + c == 1], + [lambda x, c: 2 - 2 * x, + lambda x, c: 2 * x / c, + lambda x, c: 2 * (1 - x) / (1 - c), + lambda x, c: 2 * x], + (x, c)) + return r + + def _cdf(self, x, c): + r = _lazyselect([c == 0, + x < c, + (x >= c) & (c != 1), + c == 1], + [lambda x, c: 2*x - x*x, + lambda x, c: x * x / c, + lambda x, c: (x*x - 2*x + c) / (c-1), + lambda x, c: x * x], + (x, c)) + return r + + def _ppf(self, q, c): + return np.where(q < c, np.sqrt(c * q), 1-np.sqrt((1-c) * (1-q))) + + def _stats(self, c): + return ((c+1.0)/3.0, + (1.0-c+c*c)/18, + np.sqrt(2)*(2*c-1)*(c+1)*(c-2) / (5*np.power((1.0-c+c*c), 1.5)), + -3.0/5.0) + + def _entropy(self, c): + return 0.5-np.log(2) + + +triang = triang_gen(a=0.0, b=1.0, name="triang") + + +class truncexpon_gen(rv_continuous): + r"""A truncated exponential continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `truncexpon` is: + + .. math:: + + f(x, b) = \frac{\exp(-x)}{1 - \exp(-b)} + + for :math:`0 < x < b`. + + `truncexpon` takes ``b`` as a shape parameter for :math:`b`. + + %(after_notes)s + + %(example)s + + """ + def _argcheck(self, b): + self.b = b + return b > 0 + + def _pdf(self, x, b): + # truncexpon.pdf(x, b) = exp(-x) / (1-exp(-b)) + return np.exp(-x)/(-sc.expm1(-b)) + + def _logpdf(self, x, b): + return -x - np.log(-sc.expm1(-b)) + + def _cdf(self, x, b): + return sc.expm1(-x)/sc.expm1(-b) + + def _ppf(self, q, b): + return -sc.log1p(q*sc.expm1(-b)) + + def _munp(self, n, b): + # wrong answer with formula, same as in continuous.pdf + # return sc.gamman+1)-sc.gammainc1+n, b) + if n == 1: + return (1-(b+1)*np.exp(-b))/(-sc.expm1(-b)) + elif n == 2: + return 2*(1-0.5*(b*b+2*b+2)*np.exp(-b))/(-sc.expm1(-b)) + else: + # return generic for higher moments + # return rv_continuous._mom1_sc(self, n, b) + return self._mom1_sc(n, b) + + def _entropy(self, b): + eB = np.exp(b) + return np.log(eB-1)+(1+eB*(b-1.0))/(1.0-eB) + + +truncexpon = truncexpon_gen(a=0.0, name='truncexpon') + + +class truncnorm_gen(rv_continuous): + r"""A truncated normal continuous random variable. + + %(before_notes)s + + Notes + ----- + The standard form of this distribution is a standard normal truncated to + the range [a, b] --- notice that a and b are defined over the domain of the + standard normal. To convert clip values for a specific mean and standard + deviation, use:: + + a, b = (myclip_a - my_mean) / my_std, (myclip_b - my_mean) / my_std + + `truncnorm` takes :math:`a` and :math:`b` as shape parameters. + + %(after_notes)s + + %(example)s + + """ + def _argcheck(self, a, b): + self.a = a + self.b = b + self._nb = _norm_cdf(b) + self._na = _norm_cdf(a) + self._sb = _norm_sf(b) + self._sa = _norm_sf(a) + self._delta = np.where(self.a > 0, + -(self._sb - self._sa), + self._nb - self._na) + self._logdelta = np.log(self._delta) + return a < b + + def _pdf(self, x, a, b): + return _norm_pdf(x) / self._delta + + def _logpdf(self, x, a, b): + return _norm_logpdf(x) - self._logdelta + + def _cdf(self, x, a, b): + return (_norm_cdf(x) - self._na) / self._delta + + def _ppf(self, q, a, b): + # XXX Use _lazywhere... + ppf = np.where(self.a > 0, + _norm_isf(q*self._sb + self._sa*(1.0-q)), + _norm_ppf(q*self._nb + self._na*(1.0-q))) + return ppf + + def _stats(self, a, b): + nA, nB = self._na, self._nb + d = nB - nA + pA, pB = _norm_pdf(a), _norm_pdf(b) + mu = (pA - pB) / d # correction sign + mu2 = 1 + (a*pA - b*pB) / d - mu*mu + return mu, mu2, None, None + + +truncnorm = truncnorm_gen(name='truncnorm') + + +# FIXME: RVS does not work. +class tukeylambda_gen(rv_continuous): + r"""A Tukey-Lamdba continuous random variable. + + %(before_notes)s + + Notes + ----- + A flexible distribution, able to represent and interpolate between the + following distributions: + + - Cauchy (:math:`lambda = -1`) + - logistic (:math:`lambda = 0`) + - approx Normal (:math:`lambda = 0.14`) + - uniform from -1 to 1 (:math:`lambda = 1`) + + `tukeylambda` takes a real number :math:`lambda` (denoted ``lam`` + in the implementation) as a shape parameter. + + %(after_notes)s + + %(example)s + + """ + def _argcheck(self, lam): + return np.ones(np.shape(lam), dtype=bool) + + def _pdf(self, x, lam): + Fx = np.asarray(sc.tklmbda(x, lam)) + Px = Fx**(lam-1.0) + (np.asarray(1-Fx))**(lam-1.0) + Px = 1.0/np.asarray(Px) + return np.where((lam <= 0) | (abs(x) < 1.0/np.asarray(lam)), Px, 0.0) + + def _cdf(self, x, lam): + return sc.tklmbda(x, lam) + + def _ppf(self, q, lam): + return sc.boxcox(q, lam) - sc.boxcox1p(-q, lam) + + def _stats(self, lam): + return 0, _tlvar(lam), 0, _tlkurt(lam) + + def _entropy(self, lam): + def integ(p): + return np.log(pow(p, lam-1)+pow(1-p, lam-1)) + return integrate.quad(integ, 0, 1)[0] + + +tukeylambda = tukeylambda_gen(name='tukeylambda') + + +class FitUniformFixedScaleDataError(FitDataError): + def __init__(self, ptp, fscale): + self.args = ( + "Invalid values in `data`. Maximum likelihood estimation with " + "the uniform distribution and fixed scale requires that " + "data.ptp() <= fscale, but data.ptp() = %r and fscale = %r." % + (ptp, fscale), + ) + + +class uniform_gen(rv_continuous): + r"""A uniform continuous random variable. + + In the standard form, the distribution is uniform on ``[0, 1]``. Using + the parameters ``loc`` and ``scale``, one obtains the uniform distribution + on ``[loc, loc + scale]``. + + %(before_notes)s + + %(example)s + + """ + def _rvs(self): + return self._random_state.uniform(0.0, 1.0, self._size) + + def _pdf(self, x): + return 1.0*(x == x) + + def _cdf(self, x): + return x + + def _ppf(self, q): + return q + + def _stats(self): + return 0.5, 1.0/12, 0, -1.2 + + def _entropy(self): + return 0.0 + + def fit(self, data, *args, **kwds): + """ + Maximum likelihood estimate for the location and scale parameters. + + `uniform.fit` uses only the following parameters. Because exact + formulas are used, the parameters related to optimization that are + available in the `fit` method of other distributions are ignored + here. The only positional argument accepted is `data`. + + Parameters + ---------- + data : array_like + Data to use in calculating the maximum likelihood estimate. + floc : float, optional + Hold the location parameter fixed to the specified value. + fscale : float, optional + Hold the scale parameter fixed to the specified value. + + Returns + ------- + loc, scale : float + Maximum likelihood estimates for the location and scale. + + Notes + ----- + An error is raised if `floc` is given and any values in `data` are + less than `floc`, or if `fscale` is given and `fscale` is less + than ``data.max() - data.min()``. An error is also raised if both + `floc` and `fscale` are given. + + Examples + -------- + >>> from scipy.stats import uniform + + We'll fit the uniform distribution to `x`: + + >>> x = np.array([2, 2.5, 3.1, 9.5, 13.0]) + + For a uniform distribution MLE, the location is the minimum of the + data, and the scale is the maximum minus the minimum. + + >>> loc, scale = uniform.fit(x) + >>> loc + 2.0 + >>> scale + 11.0 + + If we know the data comes from a uniform distribution where the support + starts at 0, we can use `floc=0`: + + >>> loc, scale = uniform.fit(x, floc=0) + >>> loc + 0.0 + >>> scale + 13.0 + + Alternatively, if we know the length of the support is 12, we can use + `fscale=12`: + + >>> loc, scale = uniform.fit(x, fscale=12) + >>> loc + 1.5 + >>> scale + 12.0 + + In that last example, the support interval is [1.5, 13.5]. This + solution is not unique. For example, the distribution with ``loc=2`` + and ``scale=12`` has the same likelihood as the one above. When + `fscale` is given and it is larger than ``data.max() - data.min()``, + the parameters returned by the `fit` method center the support over + the interval ``[data.min(), data.max()]``. + + """ + if len(args) > 0: + raise TypeError("Too many arguments.") + + floc = kwds.pop('floc', None) + fscale = kwds.pop('fscale', None) + + # Ignore the optimizer-related keyword arguments, if given. + kwds.pop('loc', None) + kwds.pop('scale', None) + kwds.pop('optimizer', None) + if kwds: + raise TypeError("Unknown arguments: %s." % kwds) + + if floc is not None and fscale is not None: + # This check is for consistency with `rv_continuous.fit`. + raise ValueError("All parameters fixed. There is nothing to " + "optimize.") + + data = np.asarray(data) + + # MLE for the uniform distribution + # -------------------------------- + # The PDF is + # + # f(x, loc, scale) = {1/scale for loc <= x <= loc + scale + # {0 otherwise} + # + # The likelihood function is + # L(x, loc, scale) = (1/scale)**n + # where n is len(x), assuming loc <= x <= loc + scale for all x. + # The log-likelihood is + # l(x, loc, scale) = -n*log(scale) + # The log-likelihood is maximized by making scale as small as possible, + # while keeping loc <= x <= loc + scale. So if neither loc nor scale + # are fixed, the log-likelihood is maximized by choosing + # loc = x.min() + # scale = x.ptp() + # If loc is fixed, it must be less than or equal to x.min(), and then + # the scale is + # scale = x.max() - loc + # If scale is fixed, it must not be less than x.ptp(). If scale is + # greater than x.ptp(), the solution is not unique. Note that the + # likelihood does not depend on loc, except for the requirement that + # loc <= x <= loc + scale. All choices of loc for which + # x.max() - scale <= loc <= x.min() + # have the same log-likelihood. In this case, we choose loc such that + # the support is centered over the interval [data.min(), data.max()]: + # loc = x.min() = 0.5*(scale - x.ptp()) + + if fscale is None: + # scale is not fixed. + if floc is None: + # loc is not fixed, scale is not fixed. + loc = data.min() + scale = data.ptp() + else: + # loc is fixed, scale is not fixed. + loc = floc + scale = data.max() - loc + if data.min() < loc: + raise FitDataError("uniform", lower=loc, upper=loc + scale) + else: + # loc is not fixed, scale is fixed. + ptp = data.ptp() + if ptp > fscale: + raise FitUniformFixedScaleDataError(ptp=ptp, fscale=fscale) + # If ptp < fscale, the ML estimate is not unique; see the comments + # above. We choose the distribution for which the support is + # centered over the interval [data.min(), data.max()]. + loc = data.min() - 0.5*(fscale - ptp) + scale = fscale + + # We expect the return values to be floating point, so ensure it + # by explicitly converting to float. + return float(loc), float(scale) + + +uniform = uniform_gen(a=0.0, b=1.0, name='uniform') + + +class vonmises_gen(rv_continuous): + r"""A Von Mises continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `vonmises` and `vonmises_line` is: + + .. math:: + + f(x, \kappa) = \frac{ \exp(\kappa \cos(x)) }{ 2 \pi I_0(\kappa) } + + for :math:`-\pi \le x \le \pi`, :math:`\kappa > 0`. :math:`I_0` is the + modified Bessel function of order zero (`scipy.special.i0`). + + `vonmises` is a circular distribution which does not restrict the + distribution to a fixed interval. Currently, there is no circular + distribution framework in scipy. The ``cdf`` is implemented such that + ``cdf(x + 2*np.pi) == cdf(x) + 1``. + + `vonmises_line` is the same distribution, defined on :math:`[-\pi, \pi]` + on the real line. This is a regular (i.e. non-circular) distribution. + + `vonmises` and `vonmises_line` take ``kappa`` as a shape parameter. + + %(after_notes)s + + %(example)s + + """ + def _rvs(self, kappa): + return self._random_state.vonmises(0.0, kappa, size=self._size) + + def _pdf(self, x, kappa): + # vonmises.pdf(x, \kappa) = exp(\kappa * cos(x)) / (2*pi*I[0](\kappa)) + return np.exp(kappa * np.cos(x)) / (2*np.pi*sc.i0(kappa)) + + def _cdf(self, x, kappa): + return _stats.von_mises_cdf(kappa, x) + + def _stats_skip(self, kappa): + return 0, None, 0, None + + def _entropy(self, kappa): + return (-kappa * sc.i1(kappa) / sc.i0(kappa) + + np.log(2 * np.pi * sc.i0(kappa))) + + +vonmises = vonmises_gen(name='vonmises') +vonmises_line = vonmises_gen(a=-np.pi, b=np.pi, name='vonmises_line') + + +class wald_gen(invgauss_gen): + r"""A Wald continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `wald` is: + + .. math:: + + f(x) = \frac{1}{\sqrt{2\pi x^3}} \exp(- \frac{ (x-1)^2 }{ 2x }) + + for :math:`x > 0`. + + `wald` is a special case of `invgauss` with ``mu=1``. + + %(after_notes)s + + %(example)s + """ + _support_mask = rv_continuous._open_support_mask + + def _rvs(self): + return self._random_state.wald(1.0, 1.0, size=self._size) + + def _pdf(self, x): + # wald.pdf(x) = 1/sqrt(2*pi*x**3) * exp(-(x-1)**2/(2*x)) + return invgauss._pdf(x, 1.0) + + def _logpdf(self, x): + return invgauss._logpdf(x, 1.0) + + def _cdf(self, x): + return invgauss._cdf(x, 1.0) + + def _stats(self): + return 1.0, 1.0, 3.0, 15.0 + + +wald = wald_gen(a=0.0, name="wald") + + +class wrapcauchy_gen(rv_continuous): + r"""A wrapped Cauchy continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `wrapcauchy` is: + + .. math:: + + f(x, c) = \frac{1-c^2}{2\pi (1+c^2 - 2c \cos(x))} + + for :math:`0 \le x \le 2\pi`, :math:`0 < c < 1`. + + `wrapcauchy` takes ``c`` as a shape parameter for :math:`c`. + + %(after_notes)s + + %(example)s + + """ + def _argcheck(self, c): + return (c > 0) & (c < 1) + + def _pdf(self, x, c): + # wrapcauchy.pdf(x, c) = (1-c**2) / (2*pi*(1+c**2-2*c*cos(x))) + return (1.0-c*c)/(2*np.pi*(1+c*c-2*c*np.cos(x))) + + def _cdf(self, x, c): + output = np.zeros(x.shape, dtype=x.dtype) + val = (1.0+c)/(1.0-c) + c1 = x < np.pi + c2 = 1-c1 + xp = np.extract(c1, x) + xn = np.extract(c2, x) + if np.any(xn): + valn = np.extract(c2, np.ones_like(x)*val) + xn = 2*np.pi - xn + yn = np.tan(xn/2.0) + on = 1.0-1.0/np.pi*np.arctan(valn*yn) + np.place(output, c2, on) + if np.any(xp): + valp = np.extract(c1, np.ones_like(x)*val) + yp = np.tan(xp/2.0) + op = 1.0/np.pi*np.arctan(valp*yp) + np.place(output, c1, op) + return output + + def _ppf(self, q, c): + val = (1.0-c)/(1.0+c) + rcq = 2*np.arctan(val*np.tan(np.pi*q)) + rcmq = 2*np.pi-2*np.arctan(val*np.tan(np.pi*(1-q))) + return np.where(q < 1.0/2, rcq, rcmq) + + def _entropy(self, c): + return np.log(2*np.pi*(1-c*c)) + + +wrapcauchy = wrapcauchy_gen(a=0.0, b=2*np.pi, name='wrapcauchy') + + +class gennorm_gen(rv_continuous): + r"""A generalized normal continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `gennorm` is [1]_: + + .. math:: + + f(x, \beta) = \frac{\beta}{2 \Gamma(1/\beta)} \exp(-|x|^\beta) + + :math:`\Gamma` is the gamma function (`scipy.special.gamma`). + + `gennorm` takes ``beta`` as a shape parameter for :math:`\beta`. + For :math:`\beta = 1`, it is identical to a Laplace distribution. + For :math:`\beta = 2`, it is identical to a normal distribution + (with ``scale=1/sqrt(2)``). + + See Also + -------- + laplace : Laplace distribution + norm : normal distribution + + References + ---------- + + .. [1] "Generalized normal distribution, Version 1", + https://en.wikipedia.org/wiki/Generalized_normal_distribution#Version_1 + + %(example)s + + """ + + def _pdf(self, x, beta): + return np.exp(self._logpdf(x, beta)) + + def _logpdf(self, x, beta): + return np.log(0.5*beta) - sc.gammaln(1.0/beta) - abs(x)**beta + + def _cdf(self, x, beta): + c = 0.5 * np.sign(x) + # evaluating (.5 + c) first prevents numerical cancellation + return (0.5 + c) - c * sc.gammaincc(1.0/beta, abs(x)**beta) + + def _ppf(self, x, beta): + c = np.sign(x - 0.5) + # evaluating (1. + c) first prevents numerical cancellation + return c * sc.gammainccinv(1.0/beta, (1.0 + c) - 2.0*c*x)**(1.0/beta) + + def _sf(self, x, beta): + return self._cdf(-x, beta) + + def _isf(self, x, beta): + return -self._ppf(x, beta) + + def _stats(self, beta): + c1, c3, c5 = sc.gammaln([1.0/beta, 3.0/beta, 5.0/beta]) + return 0., np.exp(c3 - c1), 0., np.exp(c5 + c1 - 2.0*c3) - 3. + + def _entropy(self, beta): + return 1. / beta - np.log(.5 * beta) + sc.gammaln(1. / beta) + + +gennorm = gennorm_gen(name='gennorm') + + +class halfgennorm_gen(rv_continuous): + r"""The upper half of a generalized normal continuous random variable. + + %(before_notes)s + + Notes + ----- + The probability density function for `halfgennorm` is: + + .. math:: + + f(x, \beta) = \frac{\beta}{\Gamma(1/\beta)} \exp(-|x|^\beta) + + for :math:`x > 0`. :math:`\Gamma` is the gamma function + (`scipy.special.gamma`). + + `gennorm` takes ``beta`` as a shape parameter for :math:`\beta`. + For :math:`\beta = 1`, it is identical to an exponential distribution. + For :math:`\beta = 2`, it is identical to a half normal distribution + (with ``scale=1/sqrt(2)``). + + See Also + -------- + gennorm : generalized normal distribution + expon : exponential distribution + halfnorm : half normal distribution + + References + ---------- + + .. [1] "Generalized normal distribution, Version 1", + https://en.wikipedia.org/wiki/Generalized_normal_distribution#Version_1 + + %(example)s + + """ + + def _pdf(self, x, beta): + # beta + # halfgennorm.pdf(x, beta) = ------------- exp(-|x|**beta) + # gamma(1/beta) + return np.exp(self._logpdf(x, beta)) + + def _logpdf(self, x, beta): + return np.log(beta) - sc.gammaln(1.0/beta) - x**beta + + def _cdf(self, x, beta): + return sc.gammainc(1.0/beta, x**beta) + + def _ppf(self, x, beta): + return sc.gammaincinv(1.0/beta, x)**(1.0/beta) + + def _sf(self, x, beta): + return sc.gammaincc(1.0/beta, x**beta) + + def _isf(self, x, beta): + return sc.gammainccinv(1.0/beta, x)**(1.0/beta) + + def _entropy(self, beta): + return 1.0/beta - np.log(beta) + sc.gammaln(1.0/beta) + + +halfgennorm = halfgennorm_gen(a=0, name='halfgennorm') + + +class crystalball_gen(rv_continuous): + r""" + Crystalball distribution + + %(before_notes)s + + Notes + ----- + The probability density function for `crystalball` is: + + .. math:: + + f(x, \beta, m) = \begin{cases} + N \exp(-x^2 / 2), &\text{for } x > -\beta\\ + N A (B - x)^{-m} &\text{for } x \le -\beta + \end{cases} + + where :math:`A = (m / |\beta|)^n \exp(-\beta^2 / 2)`, + :math:`B = m/|\beta| - |\beta|` and :math:`N` is a normalisation constant. + + `crystalball` takes :math:`\beta > 0` and :math:`m > 1` as shape + parameters. :math:`\beta` defines the point where the pdf changes + from a power-law to a Gaussian distribution. :math:`m` is the power + of the power-law tail. + + References + ---------- + .. [1] "Crystal Ball Function", + https://en.wikipedia.org/wiki/Crystal_Ball_function + + %(after_notes)s + + .. versionadded:: 0.19.0 + + %(example)s + """ + + def _pdf(self, x, beta, m): + """ + Return PDF of the crystalball function. + + -- + | exp(-x**2 / 2), for x > -beta + crystalball.pdf(x, beta, m) = N * | + | A * (B - x)**(-m), for x <= -beta + -- + """ + N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) + + _norm_pdf_C * _norm_cdf(beta)) + + def rhs(x, beta, m): + return np.exp(-x**2 / 2) + + def lhs(x, beta, m): + return ((m/beta)**m * np.exp(-beta**2 / 2.0) * + (m/beta - beta - x)**(-m)) + + return N * _lazywhere(x > -beta, (x, beta, m), f=rhs, f2=lhs) + + def _logpdf(self, x, beta, m): + """ + Return the log of the PDF of the crystalball function. + """ + N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) + + _norm_pdf_C * _norm_cdf(beta)) + + def rhs(x, beta, m): + return -x**2/2 + + def lhs(x, beta, m): + return m*np.log(m/beta) - beta**2/2 - m*np.log(m/beta - beta - x) + + return np.log(N) + _lazywhere(x > -beta, (x, beta, m), f=rhs, f2=lhs) + + def _cdf(self, x, beta, m): + """ + Return CDF of the crystalball function + """ + N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) + + _norm_pdf_C * _norm_cdf(beta)) + + def rhs(x, beta, m): + return ((m/beta) * np.exp(-beta**2 / 2.0) / (m-1) + + _norm_pdf_C * (_norm_cdf(x) - _norm_cdf(-beta))) + + def lhs(x, beta, m): + return ((m/beta)**m * np.exp(-beta**2 / 2.0) * + (m/beta - beta - x)**(-m+1) / (m-1)) + + return N * _lazywhere(x > -beta, (x, beta, m), f=rhs, f2=lhs) + + def _ppf(self, p, beta, m): + N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) + + _norm_pdf_C * _norm_cdf(beta)) + pbeta = N * (m/beta) * np.exp(-beta**2/2) / (m - 1) + + def ppf_less(p, beta, m): + eb2 = np.exp(-beta**2/2) + C = (m/beta) * eb2 / (m-1) + N = 1/(C + _norm_pdf_C * _norm_cdf(beta)) + return (m/beta - beta - + ((m - 1)*(m/beta)**(-m)/eb2*p/N)**(1/(1-m))) + + def ppf_greater(p, beta, m): + eb2 = np.exp(-beta**2/2) + C = (m/beta) * eb2 / (m-1) + N = 1/(C + _norm_pdf_C * _norm_cdf(beta)) + return _norm_ppf(_norm_cdf(-beta) + (1/_norm_pdf_C)*(p/N - C)) + + return _lazywhere(p < pbeta, (p, beta, m), f=ppf_less, f2=ppf_greater) + + def _munp(self, n, beta, m): + """ + Returns the n-th non-central moment of the crystalball function. + """ + N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) + + _norm_pdf_C * _norm_cdf(beta)) + + def n_th_moment(n, beta, m): + """ + Returns n-th moment. Defined only if n+1 < m + Function cannot broadcast due to the loop over n + """ + A = (m/beta)**m * np.exp(-beta**2 / 2.0) + B = m/beta - beta + rhs = (2**((n-1)/2.0) * sc.gamma((n+1)/2) * + (1.0 + (-1)**n * sc.gammainc((n+1)/2, beta**2 / 2))) + lhs = np.zeros(rhs.shape) + for k in range(n + 1): + lhs += (sc.binom(n, k) * B**(n-k) * (-1)**k / (m - k - 1) * + (m/beta)**(-m + k + 1)) + return A * lhs + rhs + + return N * _lazywhere(n + 1 < m, (n, beta, m), + np.vectorize(n_th_moment, otypes=[np.float]), + np.inf) + + def _argcheck(self, beta, m): + """ + Shape parameter bounds are m > 1 and beta > 0. + """ + return (m > 1) & (beta > 0) + + +crystalball = crystalball_gen(name='crystalball', longname="A Crystalball Function") + + +def _argus_phi(chi): + """ + Utility function for the argus distribution + used in the CDF and norm of the Argus Funktion + """ + return _norm_cdf(chi) - chi * _norm_pdf(chi) - 0.5 + + +class argus_gen(rv_continuous): + r""" + Argus distribution + + %(before_notes)s + + Notes + ----- + The probability density function for `argus` is: + + .. math:: + + f(x, \chi) = \frac{\chi^3}{\sqrt{2\pi} \Psi(\chi)} x \sqrt{1-x^2} + \exp(-\chi^2 (1 - x^2)/2) + + for :math:`0 < x < 1`, where + + .. math:: + + \Psi(\chi) = \Phi(\chi) - \chi \phi(\chi) - 1/2 + + with :math:`\Phi` and :math:`\phi` being the CDF and PDF of a standard + normal distribution, respectively. + + `argus` takes :math:`\chi` as shape a parameter. + + References + ---------- + + .. [1] "ARGUS distribution", + https://en.wikipedia.org/wiki/ARGUS_distribution + + %(after_notes)s + + .. versionadded:: 0.19.0 + + %(example)s + """ + def _pdf(self, x, chi): + """ + Return PDF of the argus function + + argus.pdf(x, chi) = chi**3 / (sqrt(2*pi) * Psi(chi)) * x * + sqrt(1-x**2) * exp(- 0.5 * chi**2 * (1 - x**2)) + """ + y = 1.0 - x**2 + return chi**3 / (_norm_pdf_C * _argus_phi(chi)) * x * np.sqrt(y) * np.exp(-chi**2 * y / 2) + + def _cdf(self, x, chi): + """ + Return CDF of the argus function + """ + return 1.0 - self._sf(x, chi) + + def _sf(self, x, chi): + """ + Return survival function of the argus function + """ + return _argus_phi(chi * np.sqrt(1 - x**2)) / _argus_phi(chi) + + +argus = argus_gen(name='argus', longname="An Argus Function", a=0.0, b=1.0) + + +class rv_histogram(rv_continuous): + """ + Generates a distribution given by a histogram. + This is useful to generate a template distribution from a binned + datasample. + + As a subclass of the `rv_continuous` class, `rv_histogram` inherits from it + a collection of generic methods (see `rv_continuous` for the full list), + and implements them based on the properties of the provided binned + datasample. + + Parameters + ---------- + histogram : tuple of array_like + Tuple containing two array_like objects + The first containing the content of n bins + The second containing the (n+1) bin boundaries + In particular the return value np.histogram is accepted + + Notes + ----- + There are no additional shape parameters except for the loc and scale. + The pdf is defined as a stepwise function from the provided histogram + The cdf is a linear interpolation of the pdf. + + .. versionadded:: 0.19.0 + + Examples + -------- + + Create a scipy.stats distribution from a numpy histogram + + >>> import scipy.stats + >>> import numpy as np + >>> data = scipy.stats.norm.rvs(size=100000, loc=0, scale=1.5, random_state=123) + >>> hist = np.histogram(data, bins=100) + >>> hist_dist = scipy.stats.rv_histogram(hist) + + Behaves like an ordinary scipy rv_continuous distribution + + >>> hist_dist.pdf(1.0) + 0.20538577847618705 + >>> hist_dist.cdf(2.0) + 0.90818568543056499 + + PDF is zero above (below) the highest (lowest) bin of the histogram, + defined by the max (min) of the original dataset + + >>> hist_dist.pdf(np.max(data)) + 0.0 + >>> hist_dist.cdf(np.max(data)) + 1.0 + >>> hist_dist.pdf(np.min(data)) + 7.7591907244498314e-05 + >>> hist_dist.cdf(np.min(data)) + 0.0 + + PDF and CDF follow the histogram + + >>> import matplotlib.pyplot as plt + >>> X = np.linspace(-5.0, 5.0, 100) + >>> plt.title("PDF from Template") + >>> plt.hist(data, density=True, bins=100) + >>> plt.plot(X, hist_dist.pdf(X), label='PDF') + >>> plt.plot(X, hist_dist.cdf(X), label='CDF') + >>> plt.show() + + """ + _support_mask = rv_continuous._support_mask + + def __init__(self, histogram, *args, **kwargs): + """ + Create a new distribution using the given histogram + + Parameters + ---------- + histogram : tuple of array_like + Tuple containing two array_like objects + The first containing the content of n bins + The second containing the (n+1) bin boundaries + In particular the return value np.histogram is accepted + """ + self._histogram = histogram + if len(histogram) != 2: + raise ValueError("Expected length 2 for parameter histogram") + self._hpdf = np.asarray(histogram[0]) + self._hbins = np.asarray(histogram[1]) + if len(self._hpdf) + 1 != len(self._hbins): + raise ValueError("Number of elements in histogram content " + "and histogram boundaries do not match, " + "expected n and n+1.") + self._hbin_widths = self._hbins[1:] - self._hbins[:-1] + self._hpdf = self._hpdf / float(np.sum(self._hpdf * self._hbin_widths)) + self._hcdf = np.cumsum(self._hpdf * self._hbin_widths) + self._hpdf = np.hstack([0.0, self._hpdf, 0.0]) + self._hcdf = np.hstack([0.0, self._hcdf]) + # Set support + kwargs['a'] = self._hbins[0] + kwargs['b'] = self._hbins[-1] + super(rv_histogram, self).__init__(*args, **kwargs) + + def _pdf(self, x): + """ + PDF of the histogram + """ + return self._hpdf[np.searchsorted(self._hbins, x, side='right')] + + def _cdf(self, x): + """ + CDF calculated from the histogram + """ + return np.interp(x, self._hbins, self._hcdf) + + def _ppf(self, x): + """ + Percentile function calculated from the histogram + """ + return np.interp(x, self._hcdf, self._hbins) + + def _munp(self, n): + """Compute the n-th non-central moment.""" + integrals = (self._hbins[1:]**(n+1) - self._hbins[:-1]**(n+1)) / (n+1) + return np.sum(self._hpdf[1:-1] * integrals) + + def _entropy(self): + """Compute entropy of distribution""" + res = _lazywhere(self._hpdf[1:-1] > 0.0, + (self._hpdf[1:-1],), + np.log, + 0.0) + return -np.sum(self._hpdf[1:-1] * res * self._hbin_widths) + + def _updated_ctor_param(self): + """ + Set the histogram as additional constructor argument + """ + dct = super(rv_histogram, self)._updated_ctor_param() + dct['histogram'] = self._histogram + return dct + + +# Collect names of classes and objects in this module. +pairs = list(globals().items()) +_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_continuous) + +__all__ = _distn_names + _distn_gen_names + ['rv_histogram'] diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/_continuous_distns.pyc b/project/venv/lib/python2.7/site-packages/scipy/stats/_continuous_distns.pyc new file mode 100644 index 0000000..99fadbd Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/stats/_continuous_distns.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/_discrete_distns.py b/project/venv/lib/python2.7/site-packages/scipy/stats/_discrete_distns.py new file mode 100644 index 0000000..4aa38d7 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/_discrete_distns.py @@ -0,0 +1,969 @@ +# +# Author: Travis Oliphant 2002-2011 with contributions from +# SciPy Developers 2004-2011 +# +from __future__ import division, print_function, absolute_import + +from scipy import special +from scipy.special import entr, logsumexp, betaln, gammaln as gamln +from scipy._lib._numpy_compat import broadcast_to +from scipy._lib._util import _lazywhere + +from numpy import floor, ceil, log, exp, sqrt, log1p, expm1, tanh, cosh, sinh + +import numpy as np + +from ._distn_infrastructure import ( + rv_discrete, _ncx2_pdf, _ncx2_cdf, get_distribution_names) + + +class binom_gen(rv_discrete): + r"""A binomial discrete random variable. + + %(before_notes)s + + Notes + ----- + The probability mass function for `binom` is: + + .. math:: + + f(k) = \binom{n}{k} p^k (1-p)^{n-k} + + for ``k`` in ``{0, 1,..., n}``. + + `binom` takes ``n`` and ``p`` as shape parameters. + + %(after_notes)s + + %(example)s + + """ + def _rvs(self, n, p): + return self._random_state.binomial(n, p, self._size) + + def _argcheck(self, n, p): + self.b = n + return (n >= 0) & (p >= 0) & (p <= 1) + + def _logpmf(self, x, n, p): + k = floor(x) + combiln = (gamln(n+1) - (gamln(k+1) + gamln(n-k+1))) + return combiln + special.xlogy(k, p) + special.xlog1py(n-k, -p) + + def _pmf(self, x, n, p): + # binom.pmf(k) = choose(n, k) * p**k * (1-p)**(n-k) + return exp(self._logpmf(x, n, p)) + + def _cdf(self, x, n, p): + k = floor(x) + vals = special.bdtr(k, n, p) + return vals + + def _sf(self, x, n, p): + k = floor(x) + return special.bdtrc(k, n, p) + + def _ppf(self, q, n, p): + vals = ceil(special.bdtrik(q, n, p)) + vals1 = np.maximum(vals - 1, 0) + temp = special.bdtr(vals1, n, p) + return np.where(temp >= q, vals1, vals) + + def _stats(self, n, p, moments='mv'): + q = 1.0 - p + mu = n * p + var = n * p * q + g1, g2 = None, None + if 's' in moments: + g1 = (q - p) / sqrt(var) + if 'k' in moments: + g2 = (1.0 - 6*p*q) / var + return mu, var, g1, g2 + + def _entropy(self, n, p): + k = np.r_[0:n + 1] + vals = self._pmf(k, n, p) + return np.sum(entr(vals), axis=0) + + +binom = binom_gen(name='binom') + + +class bernoulli_gen(binom_gen): + r"""A Bernoulli discrete random variable. + + %(before_notes)s + + Notes + ----- + The probability mass function for `bernoulli` is: + + .. math:: + + f(k) = \begin{cases}1-p &\text{if } k = 0\\ + p &\text{if } k = 1\end{cases} + + for :math:`k` in :math:`\{0, 1\}`. + + `bernoulli` takes :math:`p` as shape parameter. + + %(after_notes)s + + %(example)s + + """ + def _rvs(self, p): + return binom_gen._rvs(self, 1, p) + + def _argcheck(self, p): + return (p >= 0) & (p <= 1) + + def _logpmf(self, x, p): + return binom._logpmf(x, 1, p) + + def _pmf(self, x, p): + # bernoulli.pmf(k) = 1-p if k = 0 + # = p if k = 1 + return binom._pmf(x, 1, p) + + def _cdf(self, x, p): + return binom._cdf(x, 1, p) + + def _sf(self, x, p): + return binom._sf(x, 1, p) + + def _ppf(self, q, p): + return binom._ppf(q, 1, p) + + def _stats(self, p): + return binom._stats(1, p) + + def _entropy(self, p): + return entr(p) + entr(1-p) + + +bernoulli = bernoulli_gen(b=1, name='bernoulli') + + +class nbinom_gen(rv_discrete): + r"""A negative binomial discrete random variable. + + %(before_notes)s + + Notes + ----- + Negative binomial distribution describes a sequence of i.i.d. Bernoulli + trials, repeated until a predefined, non-random number of successes occurs. + + The probability mass function of the number of failures for `nbinom` is: + + .. math:: + + f(k) = \binom{k+n-1}{n-1} p^n (1-p)^k + + for :math:`k \ge 0`. + + `nbinom` takes :math:`n` and :math:`p` as shape parameters where n is the + number of successes, whereas p is the probability of a single success. + + %(after_notes)s + + %(example)s + + """ + def _rvs(self, n, p): + return self._random_state.negative_binomial(n, p, self._size) + + def _argcheck(self, n, p): + return (n > 0) & (p >= 0) & (p <= 1) + + def _pmf(self, x, n, p): + # nbinom.pmf(k) = choose(k+n-1, n-1) * p**n * (1-p)**k + return exp(self._logpmf(x, n, p)) + + def _logpmf(self, x, n, p): + coeff = gamln(n+x) - gamln(x+1) - gamln(n) + return coeff + n*log(p) + special.xlog1py(x, -p) + + def _cdf(self, x, n, p): + k = floor(x) + return special.betainc(n, k+1, p) + + def _sf_skip(self, x, n, p): + # skip because special.nbdtrc doesn't work for 0<n<1 + k = floor(x) + return special.nbdtrc(k, n, p) + + def _ppf(self, q, n, p): + vals = ceil(special.nbdtrik(q, n, p)) + vals1 = (vals-1).clip(0.0, np.inf) + temp = self._cdf(vals1, n, p) + return np.where(temp >= q, vals1, vals) + + def _stats(self, n, p): + Q = 1.0 / p + P = Q - 1.0 + mu = n*P + var = n*P*Q + g1 = (Q+P)/sqrt(n*P*Q) + g2 = (1.0 + 6*P*Q) / (n*P*Q) + return mu, var, g1, g2 + + +nbinom = nbinom_gen(name='nbinom') + + +class geom_gen(rv_discrete): + r"""A geometric discrete random variable. + + %(before_notes)s + + Notes + ----- + The probability mass function for `geom` is: + + .. math:: + + f(k) = (1-p)^{k-1} p + + for :math:`k \ge 1`. + + `geom` takes :math:`p` as shape parameter. + + %(after_notes)s + + %(example)s + + """ + def _rvs(self, p): + return self._random_state.geometric(p, size=self._size) + + def _argcheck(self, p): + return (p <= 1) & (p >= 0) + + def _pmf(self, k, p): + # geom.pmf(k) = (1-p)**(k-1)*p + return np.power(1-p, k-1) * p + + def _logpmf(self, k, p): + return special.xlog1py(k - 1, -p) + log(p) + + def _cdf(self, x, p): + k = floor(x) + return -expm1(log1p(-p)*k) + + def _sf(self, x, p): + return np.exp(self._logsf(x, p)) + + def _logsf(self, x, p): + k = floor(x) + return k*log1p(-p) + + def _ppf(self, q, p): + vals = ceil(log1p(-q) / log1p(-p)) + temp = self._cdf(vals-1, p) + return np.where((temp >= q) & (vals > 0), vals-1, vals) + + def _stats(self, p): + mu = 1.0/p + qr = 1.0-p + var = qr / p / p + g1 = (2.0-p) / sqrt(qr) + g2 = np.polyval([1, -6, 6], p)/(1.0-p) + return mu, var, g1, g2 + + +geom = geom_gen(a=1, name='geom', longname="A geometric") + + +class hypergeom_gen(rv_discrete): + r"""A hypergeometric discrete random variable. + + The hypergeometric distribution models drawing objects from a bin. + `M` is the total number of objects, `n` is total number of Type I objects. + The random variate represents the number of Type I objects in `N` drawn + without replacement from the total population. + + %(before_notes)s + + Notes + ----- + The symbols used to denote the shape parameters (`M`, `n`, and `N`) are not + universally accepted. See the Examples for a clarification of the + definitions used here. + + The probability mass function is defined as, + + .. math:: p(k, M, n, N) = \frac{\binom{n}{k} \binom{M - n}{N - k}} + {\binom{M}{N}} + + for :math:`k \in [\max(0, N - M + n), \min(n, N)]`, where the binomial + coefficients are defined as, + + .. math:: \binom{n}{k} \equiv \frac{n!}{k! (n - k)!}. + + %(after_notes)s + + Examples + -------- + >>> from scipy.stats import hypergeom + >>> import matplotlib.pyplot as plt + + Suppose we have a collection of 20 animals, of which 7 are dogs. Then if + we want to know the probability of finding a given number of dogs if we + choose at random 12 of the 20 animals, we can initialize a frozen + distribution and plot the probability mass function: + + >>> [M, n, N] = [20, 7, 12] + >>> rv = hypergeom(M, n, N) + >>> x = np.arange(0, n+1) + >>> pmf_dogs = rv.pmf(x) + + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111) + >>> ax.plot(x, pmf_dogs, 'bo') + >>> ax.vlines(x, 0, pmf_dogs, lw=2) + >>> ax.set_xlabel('# of dogs in our group of chosen animals') + >>> ax.set_ylabel('hypergeom PMF') + >>> plt.show() + + Instead of using a frozen distribution we can also use `hypergeom` + methods directly. To for example obtain the cumulative distribution + function, use: + + >>> prb = hypergeom.cdf(x, M, n, N) + + And to generate random numbers: + + >>> R = hypergeom.rvs(M, n, N, size=10) + + """ + def _rvs(self, M, n, N): + return self._random_state.hypergeometric(n, M-n, N, size=self._size) + + def _argcheck(self, M, n, N): + cond = (M > 0) & (n >= 0) & (N >= 0) + cond &= (n <= M) & (N <= M) + self.a = np.maximum(N-(M-n), 0) + self.b = np.minimum(n, N) + return cond + + def _logpmf(self, k, M, n, N): + tot, good = M, n + bad = tot - good + result = (betaln(good+1, 1) + betaln(bad+1, 1) + betaln(tot-N+1, N+1) - + betaln(k+1, good-k+1) - betaln(N-k+1, bad-N+k+1) - + betaln(tot+1, 1)) + return result + + def _pmf(self, k, M, n, N): + # same as the following but numerically more precise + # return comb(good, k) * comb(bad, N-k) / comb(tot, N) + return exp(self._logpmf(k, M, n, N)) + + def _stats(self, M, n, N): + # tot, good, sample_size = M, n, N + # "wikipedia".replace('N', 'M').replace('n', 'N').replace('K', 'n') + M, n, N = 1.*M, 1.*n, 1.*N + m = M - n + p = n/M + mu = N*p + + var = m*n*N*(M - N)*1.0/(M*M*(M-1)) + g1 = (m - n)*(M-2*N) / (M-2.0) * sqrt((M-1.0) / (m*n*N*(M-N))) + + g2 = M*(M+1) - 6.*N*(M-N) - 6.*n*m + g2 *= (M-1)*M*M + g2 += 6.*n*N*(M-N)*m*(5.*M-6) + g2 /= n * N * (M-N) * m * (M-2.) * (M-3.) + return mu, var, g1, g2 + + def _entropy(self, M, n, N): + k = np.r_[N - (M - n):min(n, N) + 1] + vals = self.pmf(k, M, n, N) + return np.sum(entr(vals), axis=0) + + def _sf(self, k, M, n, N): + """More precise calculation, 1 - cdf doesn't cut it.""" + # This for loop is needed because `k` can be an array. If that's the + # case, the sf() method makes M, n and N arrays of the same shape. We + # therefore unpack all inputs args, so we can do the manual + # integration. + res = [] + for quant, tot, good, draw in zip(k, M, n, N): + # Manual integration over probability mass function. More accurate + # than integrate.quad. + k2 = np.arange(quant + 1, draw + 1) + res.append(np.sum(self._pmf(k2, tot, good, draw))) + return np.asarray(res) + + def _logsf(self, k, M, n, N): + """ + More precise calculation than log(sf) + """ + res = [] + for quant, tot, good, draw in zip(k, M, n, N): + # Integration over probability mass function using logsumexp + k2 = np.arange(quant + 1, draw + 1) + res.append(logsumexp(self._logpmf(k2, tot, good, draw))) + return np.asarray(res) + + +hypergeom = hypergeom_gen(name='hypergeom') + + +# FIXME: Fails _cdfvec +class logser_gen(rv_discrete): + r"""A Logarithmic (Log-Series, Series) discrete random variable. + + %(before_notes)s + + Notes + ----- + The probability mass function for `logser` is: + + .. math:: + + f(k) = - \frac{p^k}{k \log(1-p)} + + for :math:`k \ge 1`. + + `logser` takes :math:`p` as shape parameter. + + %(after_notes)s + + %(example)s + + """ + def _rvs(self, p): + # looks wrong for p>0.5, too few k=1 + # trying to use generic is worse, no k=1 at all + return self._random_state.logseries(p, size=self._size) + + def _argcheck(self, p): + return (p > 0) & (p < 1) + + def _pmf(self, k, p): + # logser.pmf(k) = - p**k / (k*log(1-p)) + return -np.power(p, k) * 1.0 / k / special.log1p(-p) + + def _stats(self, p): + r = special.log1p(-p) + mu = p / (p - 1.0) / r + mu2p = -p / r / (p - 1.0)**2 + var = mu2p - mu*mu + mu3p = -p / r * (1.0+p) / (1.0 - p)**3 + mu3 = mu3p - 3*mu*mu2p + 2*mu**3 + g1 = mu3 / np.power(var, 1.5) + + mu4p = -p / r * ( + 1.0 / (p-1)**2 - 6*p / (p - 1)**3 + 6*p*p / (p-1)**4) + mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4 + g2 = mu4 / var**2 - 3.0 + return mu, var, g1, g2 + + +logser = logser_gen(a=1, name='logser', longname='A logarithmic') + + +class poisson_gen(rv_discrete): + r"""A Poisson discrete random variable. + + %(before_notes)s + + Notes + ----- + The probability mass function for `poisson` is: + + .. math:: + + f(k) = \exp(-\mu) \frac{\mu^k}{k!} + + for :math:`k \ge 0`. + + `poisson` takes :math:`\mu` as shape parameter. + + %(after_notes)s + + %(example)s + + """ + + # Override rv_discrete._argcheck to allow mu=0. + def _argcheck(self, mu): + return mu >= 0 + + def _rvs(self, mu): + return self._random_state.poisson(mu, self._size) + + def _logpmf(self, k, mu): + Pk = special.xlogy(k, mu) - gamln(k + 1) - mu + return Pk + + def _pmf(self, k, mu): + # poisson.pmf(k) = exp(-mu) * mu**k / k! + return exp(self._logpmf(k, mu)) + + def _cdf(self, x, mu): + k = floor(x) + return special.pdtr(k, mu) + + def _sf(self, x, mu): + k = floor(x) + return special.pdtrc(k, mu) + + def _ppf(self, q, mu): + vals = ceil(special.pdtrik(q, mu)) + vals1 = np.maximum(vals - 1, 0) + temp = special.pdtr(vals1, mu) + return np.where(temp >= q, vals1, vals) + + def _stats(self, mu): + var = mu + tmp = np.asarray(mu) + mu_nonzero = tmp > 0 + g1 = _lazywhere(mu_nonzero, (tmp,), lambda x: sqrt(1.0/x), np.inf) + g2 = _lazywhere(mu_nonzero, (tmp,), lambda x: 1.0/x, np.inf) + return mu, var, g1, g2 + + +poisson = poisson_gen(name="poisson", longname='A Poisson') + + +class planck_gen(rv_discrete): + r"""A Planck discrete exponential random variable. + + %(before_notes)s + + Notes + ----- + The probability mass function for `planck` is: + + .. math:: + + f(k) = (1-\exp(-\lambda)) \exp(-\lambda k) + + for :math:`k \ge 0` and :math:`\lambda > 0`. + + `planck` takes :math:`\lambda` as shape parameter. + + %(after_notes)s + + %(example)s + + """ + def _argcheck(self, lambda_): + return lambda_ > 0 + + def _pmf(self, k, lambda_): + return (1-exp(-lambda_))*exp(-lambda_*k) + + def _cdf(self, x, lambda_): + k = floor(x) + return 1-exp(-lambda_*(k+1)) + + def _sf(self, x, lambda_): + return np.exp(self._logsf(x, lambda_)) + + def _logsf(self, x, lambda_): + k = floor(x) + return -lambda_*(k+1) + + def _ppf(self, q, lambda_): + vals = ceil(-1.0/lambda_ * log1p(-q)-1) + vals1 = (vals-1).clip(self.a, np.inf) + temp = self._cdf(vals1, lambda_) + return np.where(temp >= q, vals1, vals) + + def _stats(self, lambda_): + mu = 1/(exp(lambda_)-1) + var = exp(-lambda_)/(expm1(-lambda_))**2 + g1 = 2*cosh(lambda_/2.0) + g2 = 4+2*cosh(lambda_) + return mu, var, g1, g2 + + def _entropy(self, lambda_): + l = lambda_ + C = (1-exp(-l)) + return l*exp(-l)/C - log(C) + + +planck = planck_gen(a=0, name='planck', longname='A discrete exponential ') + + +class boltzmann_gen(rv_discrete): + r"""A Boltzmann (Truncated Discrete Exponential) random variable. + + %(before_notes)s + + Notes + ----- + The probability mass function for `boltzmann` is: + + .. math:: + + f(k) = (1-\exp(-\lambda)) \exp(-\lambda k) / (1-\exp(-\lambda N)) + + for :math:`k = 0,..., N-1`. + + `boltzmann` takes :math:`\lambda > 0` and :math:`N > 0` as shape parameters. + + %(after_notes)s + + %(example)s + + """ + def _argcheck(self, lambda_, N): + self.a = 0 + self.b = N - 1 + return (lambda_ > 0) & (N > 0) + + def _pmf(self, k, lambda_, N): + # boltzmann.pmf(k) = + # (1-exp(-lambda_)*exp(-lambda_*k)/(1-exp(-lambda_*N)) + fact = (1-exp(-lambda_))/(1-exp(-lambda_*N)) + return fact*exp(-lambda_*k) + + def _cdf(self, x, lambda_, N): + k = floor(x) + return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N)) + + def _ppf(self, q, lambda_, N): + qnew = q*(1-exp(-lambda_*N)) + vals = ceil(-1.0/lambda_ * log(1-qnew)-1) + vals1 = (vals-1).clip(0.0, np.inf) + temp = self._cdf(vals1, lambda_, N) + return np.where(temp >= q, vals1, vals) + + def _stats(self, lambda_, N): + z = exp(-lambda_) + zN = exp(-lambda_*N) + mu = z/(1.0-z)-N*zN/(1-zN) + var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2 + trm = (1-zN)/(1-z) + trm2 = (z*trm**2 - N*N*zN) + g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN) + g1 = g1 / trm2**(1.5) + g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN) + g2 = g2 / trm2 / trm2 + return mu, var, g1, g2 + + +boltzmann = boltzmann_gen(name='boltzmann', + longname='A truncated discrete exponential ') + + +class randint_gen(rv_discrete): + r"""A uniform discrete random variable. + + %(before_notes)s + + Notes + ----- + The probability mass function for `randint` is: + + .. math:: + + f(k) = \frac{1}{high - low} + + for ``k = low, ..., high - 1``. + + `randint` takes ``low`` and ``high`` as shape parameters. + + %(after_notes)s + + %(example)s + + """ + def _argcheck(self, low, high): + self.a = low + self.b = high - 1 + return (high > low) + + def _pmf(self, k, low, high): + # randint.pmf(k) = 1./(high - low) + p = np.ones_like(k) / (high - low) + return np.where((k >= low) & (k < high), p, 0.) + + def _cdf(self, x, low, high): + k = floor(x) + return (k - low + 1.) / (high - low) + + def _ppf(self, q, low, high): + vals = ceil(q * (high - low) + low) - 1 + vals1 = (vals - 1).clip(low, high) + temp = self._cdf(vals1, low, high) + return np.where(temp >= q, vals1, vals) + + def _stats(self, low, high): + m2, m1 = np.asarray(high), np.asarray(low) + mu = (m2 + m1 - 1.0) / 2 + d = m2 - m1 + var = (d*d - 1) / 12.0 + g1 = 0.0 + g2 = -6.0/5.0 * (d*d + 1.0) / (d*d - 1.0) + return mu, var, g1, g2 + + def _rvs(self, low, high): + """An array of *size* random integers >= ``low`` and < ``high``.""" + if self._size is not None: + # Numpy's RandomState.randint() doesn't broadcast its arguments. + # Use `broadcast_to()` to extend the shapes of low and high + # up to self._size. Then we can use the numpy.vectorize'd + # randint without needing to pass it a `size` argument. + low = broadcast_to(low, self._size) + high = broadcast_to(high, self._size) + randint = np.vectorize(self._random_state.randint, otypes=[np.int_]) + return randint(low, high) + + def _entropy(self, low, high): + return log(high - low) + + +randint = randint_gen(name='randint', longname='A discrete uniform ' + '(random integer)') + + +# FIXME: problems sampling. +class zipf_gen(rv_discrete): + r"""A Zipf discrete random variable. + + %(before_notes)s + + Notes + ----- + The probability mass function for `zipf` is: + + .. math:: + + f(k, a) = \frac{1}{\zeta(a) k^a} + + for :math:`k \ge 1`. + + `zipf` takes :math:`a` as shape parameter. :math:`\zeta` is the + Riemann zeta function (`scipy.special.zeta`) + + %(after_notes)s + + %(example)s + + """ + def _rvs(self, a): + return self._random_state.zipf(a, size=self._size) + + def _argcheck(self, a): + return a > 1 + + def _pmf(self, k, a): + # zipf.pmf(k, a) = 1/(zeta(a) * k**a) + Pk = 1.0 / special.zeta(a, 1) / k**a + return Pk + + def _munp(self, n, a): + return _lazywhere( + a > n + 1, (a, n), + lambda a, n: special.zeta(a - n, 1) / special.zeta(a, 1), + np.inf) + + +zipf = zipf_gen(a=1, name='zipf', longname='A Zipf') + + +class dlaplace_gen(rv_discrete): + r"""A Laplacian discrete random variable. + + %(before_notes)s + + Notes + ----- + The probability mass function for `dlaplace` is: + + .. math:: + + f(k) = \tanh(a/2) \exp(-a |k|) + + for integers :math:`k` and :math:`a > 0`. + + `dlaplace` takes :math:`a` as shape parameter. + + %(after_notes)s + + %(example)s + + """ + def _pmf(self, k, a): + # dlaplace.pmf(k) = tanh(a/2) * exp(-a*abs(k)) + return tanh(a/2.0) * exp(-a * abs(k)) + + def _cdf(self, x, a): + k = floor(x) + f = lambda k, a: 1.0 - exp(-a * k) / (exp(a) + 1) + f2 = lambda k, a: exp(a * (k+1)) / (exp(a) + 1) + return _lazywhere(k >= 0, (k, a), f=f, f2=f2) + + def _ppf(self, q, a): + const = 1 + exp(a) + vals = ceil(np.where(q < 1.0 / (1 + exp(-a)), + log(q*const) / a - 1, + -log((1-q) * const) / a)) + vals1 = vals - 1 + return np.where(self._cdf(vals1, a) >= q, vals1, vals) + + def _stats(self, a): + ea = exp(a) + mu2 = 2.*ea/(ea-1.)**2 + mu4 = 2.*ea*(ea**2+10.*ea+1.) / (ea-1.)**4 + return 0., mu2, 0., mu4/mu2**2 - 3. + + def _entropy(self, a): + return a / sinh(a) - log(tanh(a/2.0)) + + +dlaplace = dlaplace_gen(a=-np.inf, + name='dlaplace', longname='A discrete Laplacian') + + +class skellam_gen(rv_discrete): + r"""A Skellam discrete random variable. + + %(before_notes)s + + Notes + ----- + Probability distribution of the difference of two correlated or + uncorrelated Poisson random variables. + + Let :math:`k_1` and :math:`k_2` be two Poisson-distributed r.v. with + expected values :math:`\lambda_1` and :math:`\lambda_2`. Then, + :math:`k_1 - k_2` follows a Skellam distribution with parameters + :math:`\mu_1 = \lambda_1 - \rho \sqrt{\lambda_1 \lambda_2}` and + :math:`\mu_2 = \lambda_2 - \rho \sqrt{\lambda_1 \lambda_2}`, where + :math:`\rho` is the correlation coefficient between :math:`k_1` and + :math:`k_2`. If the two Poisson-distributed r.v. are independent then + :math:`\rho = 0`. + + Parameters :math:`\mu_1` and :math:`\mu_2` must be strictly positive. + + For details see: https://en.wikipedia.org/wiki/Skellam_distribution + + `skellam` takes :math:`\mu_1` and :math:`\mu_2` as shape parameters. + + %(after_notes)s + + %(example)s + + """ + def _rvs(self, mu1, mu2): + n = self._size + return (self._random_state.poisson(mu1, n) - + self._random_state.poisson(mu2, n)) + + def _pmf(self, x, mu1, mu2): + px = np.where(x < 0, + _ncx2_pdf(2*mu2, 2*(1-x), 2*mu1)*2, + _ncx2_pdf(2*mu1, 2*(1+x), 2*mu2)*2) + # ncx2.pdf() returns nan's for extremely low probabilities + return px + + def _cdf(self, x, mu1, mu2): + x = floor(x) + px = np.where(x < 0, + _ncx2_cdf(2*mu2, -2*x, 2*mu1), + 1 - _ncx2_cdf(2*mu1, 2*(x+1), 2*mu2)) + return px + + def _stats(self, mu1, mu2): + mean = mu1 - mu2 + var = mu1 + mu2 + g1 = mean / sqrt((var)**3) + g2 = 1 / var + return mean, var, g1, g2 + + +skellam = skellam_gen(a=-np.inf, name="skellam", longname='A Skellam') + + +class yulesimon_gen(rv_discrete): + r"""A Yule-Simon discrete random variable. + + %(before_notes)s + + Notes + ----- + + The probability mass function for the `yulesimon` is: + + .. math:: + + f(k) = \alpha B(k, \alpha+1) + + for :math:`k=1,2,3,...`, where :math:`\alpha>0`. + Here :math:`B` refers to the `scipy.special.beta` function. + + The sampling of random variates is based on pg 553, Section 6.3 of [1]_. + Our notation maps to the referenced logic via :math:`\alpha=a-1`. + + For details see the wikipedia entry [2]_. + + References + ---------- + .. [1] Devroye, Luc. "Non-uniform Random Variate Generation", + (1986) Springer, New York. + + .. [2] https://en.wikipedia.org/wiki/Yule-Simon_distribution + + %(after_notes)s + + %(example)s + + """ + def _rvs(self, alpha): + E1 = self._random_state.standard_exponential(self._size) + E2 = self._random_state.standard_exponential(self._size) + ans = ceil(-E1 / log1p(-exp(-E2 / alpha))) + return ans + + def _pmf(self, x, alpha): + return alpha * special.beta(x, alpha + 1) + + def _argcheck(self, alpha): + return (alpha > 0) + + def _logpmf(self, x, alpha): + return log(alpha) + special.betaln(x, alpha + 1) + + def _cdf(self, x, alpha): + return 1 - x * special.beta(x, alpha + 1) + + def _sf(self, x, alpha): + return x * special.beta(x, alpha + 1) + + def _logsf(self, x, alpha): + return log(x) + special.betaln(x, alpha + 1) + + def _stats(self, alpha): + mu = np.where(alpha <= 1, np.inf, alpha / (alpha - 1)) + mu2 = np.where(alpha > 2, + alpha**2 / ((alpha - 2.0) * (alpha - 1)**2), + np.inf) + mu2 = np.where(alpha <= 1, np.nan, mu2) + g1 = np.where(alpha > 3, + sqrt(alpha - 2) * (alpha + 1)**2 / (alpha * (alpha - 3)), + np.inf) + g1 = np.where(alpha <= 2, np.nan, g1) + g2 = np.where(alpha > 4, + (alpha + 3) + (alpha**3 - 49 * alpha - 22) / (alpha * + (alpha - 4) * (alpha - 3)), np.inf) + g2 = np.where(alpha <= 2, np.nan, g2) + return mu, mu2, g1, g2 + + +yulesimon = yulesimon_gen(name='yulesimon', a=1) + + +# Collect names of classes and objects in this module. +pairs = list(globals().items()) +_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_discrete) + +__all__ = _distn_names + _distn_gen_names diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/_discrete_distns.pyc b/project/venv/lib/python2.7/site-packages/scipy/stats/_discrete_distns.pyc new file mode 100644 index 0000000..f837eb7 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/stats/_discrete_distns.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/_distn_infrastructure.py b/project/venv/lib/python2.7/site-packages/scipy/stats/_distn_infrastructure.py new file mode 100644 index 0000000..d1b8524 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/_distn_infrastructure.py @@ -0,0 +1,3446 @@ +# +# Author: Travis Oliphant 2002-2011 with contributions from +# SciPy Developers 2004-2011 +# +from __future__ import division, print_function, absolute_import + +from scipy._lib.six import string_types, exec_, PY3 +from scipy._lib._util import getargspec_no_self as _getargspec + +import sys +import keyword +import re +import types +import warnings + +from scipy.misc import doccer +from ._distr_params import distcont, distdiscrete +from scipy._lib._util import check_random_state +from scipy._lib._util import _valarray as valarray + +from scipy.special import (comb, chndtr, entr, rel_entr, xlogy, ive) + +# for root finding for discrete distribution ppf, and max likelihood estimation +from scipy import optimize + +# for functions of continuous distributions (e.g. moments, entropy, cdf) +from scipy import integrate + +# to approximate the pdf of a continuous distribution given its cdf +from scipy.misc import derivative + +from numpy import (arange, putmask, ravel, ones, shape, ndarray, zeros, floor, + logical_and, log, sqrt, place, argmax, vectorize, asarray, + nan, inf, isinf, NINF, empty) + +import numpy as np + +from ._constants import _XMAX + +if PY3: + def instancemethod(func, obj, cls): + return types.MethodType(func, obj) +else: + instancemethod = types.MethodType + + +# These are the docstring parts used for substitution in specific +# distribution docstrings + +docheaders = {'methods': """\nMethods\n-------\n""", + 'notes': """\nNotes\n-----\n""", + 'examples': """\nExamples\n--------\n"""} + +_doc_rvs = """\ +rvs(%(shapes)s, loc=0, scale=1, size=1, random_state=None) + Random variates. +""" +_doc_pdf = """\ +pdf(x, %(shapes)s, loc=0, scale=1) + Probability density function. +""" +_doc_logpdf = """\ +logpdf(x, %(shapes)s, loc=0, scale=1) + Log of the probability density function. +""" +_doc_pmf = """\ +pmf(k, %(shapes)s, loc=0, scale=1) + Probability mass function. +""" +_doc_logpmf = """\ +logpmf(k, %(shapes)s, loc=0, scale=1) + Log of the probability mass function. +""" +_doc_cdf = """\ +cdf(x, %(shapes)s, loc=0, scale=1) + Cumulative distribution function. +""" +_doc_logcdf = """\ +logcdf(x, %(shapes)s, loc=0, scale=1) + Log of the cumulative distribution function. +""" +_doc_sf = """\ +sf(x, %(shapes)s, loc=0, scale=1) + Survival function (also defined as ``1 - cdf``, but `sf` is sometimes more accurate). +""" +_doc_logsf = """\ +logsf(x, %(shapes)s, loc=0, scale=1) + Log of the survival function. +""" +_doc_ppf = """\ +ppf(q, %(shapes)s, loc=0, scale=1) + Percent point function (inverse of ``cdf`` --- percentiles). +""" +_doc_isf = """\ +isf(q, %(shapes)s, loc=0, scale=1) + Inverse survival function (inverse of ``sf``). +""" +_doc_moment = """\ +moment(n, %(shapes)s, loc=0, scale=1) + Non-central moment of order n +""" +_doc_stats = """\ +stats(%(shapes)s, loc=0, scale=1, moments='mv') + Mean('m'), variance('v'), skew('s'), and/or kurtosis('k'). +""" +_doc_entropy = """\ +entropy(%(shapes)s, loc=0, scale=1) + (Differential) entropy of the RV. +""" +_doc_fit = """\ +fit(data, %(shapes)s, loc=0, scale=1) + Parameter estimates for generic data. +""" +_doc_expect = """\ +expect(func, args=(%(shapes_)s), loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds) + Expected value of a function (of one argument) with respect to the distribution. +""" +_doc_expect_discrete = """\ +expect(func, args=(%(shapes_)s), loc=0, lb=None, ub=None, conditional=False) + Expected value of a function (of one argument) with respect to the distribution. +""" +_doc_median = """\ +median(%(shapes)s, loc=0, scale=1) + Median of the distribution. +""" +_doc_mean = """\ +mean(%(shapes)s, loc=0, scale=1) + Mean of the distribution. +""" +_doc_var = """\ +var(%(shapes)s, loc=0, scale=1) + Variance of the distribution. +""" +_doc_std = """\ +std(%(shapes)s, loc=0, scale=1) + Standard deviation of the distribution. +""" +_doc_interval = """\ +interval(alpha, %(shapes)s, loc=0, scale=1) + Endpoints of the range that contains alpha percent of the distribution +""" +_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf, + _doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf, + _doc_logsf, _doc_ppf, _doc_isf, _doc_moment, + _doc_stats, _doc_entropy, _doc_fit, + _doc_expect, _doc_median, + _doc_mean, _doc_var, _doc_std, _doc_interval]) + +_doc_default_longsummary = """\ +As an instance of the `rv_continuous` class, `%(name)s` object inherits from it +a collection of generic methods (see below for the full list), +and completes them with details specific for this particular distribution. +""" + +_doc_default_frozen_note = """ +Alternatively, the object may be called (as a function) to fix the shape, +location, and scale parameters returning a "frozen" continuous RV object: + +rv = %(name)s(%(shapes)s, loc=0, scale=1) + - Frozen RV object with the same methods but holding the given shape, + location, and scale fixed. +""" +_doc_default_example = """\ +Examples +-------- +>>> from scipy.stats import %(name)s +>>> import matplotlib.pyplot as plt +>>> fig, ax = plt.subplots(1, 1) + +Calculate a few first moments: + +%(set_vals_stmt)s +>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk') + +Display the probability density function (``pdf``): + +>>> x = np.linspace(%(name)s.ppf(0.01, %(shapes)s), +... %(name)s.ppf(0.99, %(shapes)s), 100) +>>> ax.plot(x, %(name)s.pdf(x, %(shapes)s), +... 'r-', lw=5, alpha=0.6, label='%(name)s pdf') + +Alternatively, the distribution object can be called (as a function) +to fix the shape, location and scale parameters. This returns a "frozen" +RV object holding the given parameters fixed. + +Freeze the distribution and display the frozen ``pdf``: + +>>> rv = %(name)s(%(shapes)s) +>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf') + +Check accuracy of ``cdf`` and ``ppf``: + +>>> vals = %(name)s.ppf([0.001, 0.5, 0.999], %(shapes)s) +>>> np.allclose([0.001, 0.5, 0.999], %(name)s.cdf(vals, %(shapes)s)) +True + +Generate random numbers: + +>>> r = %(name)s.rvs(%(shapes)s, size=1000) + +And compare the histogram: + +>>> ax.hist(r, density=True, histtype='stepfilled', alpha=0.2) +>>> ax.legend(loc='best', frameon=False) +>>> plt.show() + +""" + +_doc_default_locscale = """\ +The probability density above is defined in the "standardized" form. To shift +and/or scale the distribution use the ``loc`` and ``scale`` parameters. +Specifically, ``%(name)s.pdf(x, %(shapes)s, loc, scale)`` is identically +equivalent to ``%(name)s.pdf(y, %(shapes)s) / scale`` with +``y = (x - loc) / scale``. +""" + +_doc_default = ''.join([_doc_default_longsummary, + _doc_allmethods, + '\n', + _doc_default_example]) + +_doc_default_before_notes = ''.join([_doc_default_longsummary, + _doc_allmethods]) + +docdict = { + 'rvs': _doc_rvs, + 'pdf': _doc_pdf, + 'logpdf': _doc_logpdf, + 'cdf': _doc_cdf, + 'logcdf': _doc_logcdf, + 'sf': _doc_sf, + 'logsf': _doc_logsf, + 'ppf': _doc_ppf, + 'isf': _doc_isf, + 'stats': _doc_stats, + 'entropy': _doc_entropy, + 'fit': _doc_fit, + 'moment': _doc_moment, + 'expect': _doc_expect, + 'interval': _doc_interval, + 'mean': _doc_mean, + 'std': _doc_std, + 'var': _doc_var, + 'median': _doc_median, + 'allmethods': _doc_allmethods, + 'longsummary': _doc_default_longsummary, + 'frozennote': _doc_default_frozen_note, + 'example': _doc_default_example, + 'default': _doc_default, + 'before_notes': _doc_default_before_notes, + 'after_notes': _doc_default_locscale +} + +# Reuse common content between continuous and discrete docs, change some +# minor bits. +docdict_discrete = docdict.copy() + +docdict_discrete['pmf'] = _doc_pmf +docdict_discrete['logpmf'] = _doc_logpmf +docdict_discrete['expect'] = _doc_expect_discrete +_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf', + 'ppf', 'isf', 'stats', 'entropy', 'expect', 'median', + 'mean', 'var', 'std', 'interval'] +for obj in _doc_disc_methods: + docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '') + +_doc_disc_methods_err_varname = ['cdf', 'logcdf', 'sf', 'logsf'] +for obj in _doc_disc_methods_err_varname: + docdict_discrete[obj] = docdict_discrete[obj].replace('(x, ', '(k, ') + +docdict_discrete.pop('pdf') +docdict_discrete.pop('logpdf') + +_doc_allmethods = ''.join([docdict_discrete[obj] for obj in _doc_disc_methods]) +docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods + +docdict_discrete['longsummary'] = _doc_default_longsummary.replace( + 'rv_continuous', 'rv_discrete') + +_doc_default_frozen_note = """ +Alternatively, the object may be called (as a function) to fix the shape and +location parameters returning a "frozen" discrete RV object: + +rv = %(name)s(%(shapes)s, loc=0) + - Frozen RV object with the same methods but holding the given shape and + location fixed. +""" +docdict_discrete['frozennote'] = _doc_default_frozen_note + +_doc_default_discrete_example = """\ +Examples +-------- +>>> from scipy.stats import %(name)s +>>> import matplotlib.pyplot as plt +>>> fig, ax = plt.subplots(1, 1) + +Calculate a few first moments: + +%(set_vals_stmt)s +>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk') + +Display the probability mass function (``pmf``): + +>>> x = np.arange(%(name)s.ppf(0.01, %(shapes)s), +... %(name)s.ppf(0.99, %(shapes)s)) +>>> ax.plot(x, %(name)s.pmf(x, %(shapes)s), 'bo', ms=8, label='%(name)s pmf') +>>> ax.vlines(x, 0, %(name)s.pmf(x, %(shapes)s), colors='b', lw=5, alpha=0.5) + +Alternatively, the distribution object can be called (as a function) +to fix the shape and location. This returns a "frozen" RV object holding +the given parameters fixed. + +Freeze the distribution and display the frozen ``pmf``: + +>>> rv = %(name)s(%(shapes)s) +>>> ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-', lw=1, +... label='frozen pmf') +>>> ax.legend(loc='best', frameon=False) +>>> plt.show() + +Check accuracy of ``cdf`` and ``ppf``: + +>>> prob = %(name)s.cdf(x, %(shapes)s) +>>> np.allclose(x, %(name)s.ppf(prob, %(shapes)s)) +True + +Generate random numbers: + +>>> r = %(name)s.rvs(%(shapes)s, size=1000) +""" + + +_doc_default_discrete_locscale = """\ +The probability mass function above is defined in the "standardized" form. +To shift distribution use the ``loc`` parameter. +Specifically, ``%(name)s.pmf(k, %(shapes)s, loc)`` is identically +equivalent to ``%(name)s.pmf(k - loc, %(shapes)s)``. +""" + +docdict_discrete['example'] = _doc_default_discrete_example +docdict_discrete['after_notes'] = _doc_default_discrete_locscale + +_doc_default_before_notes = ''.join([docdict_discrete['longsummary'], + docdict_discrete['allmethods']]) +docdict_discrete['before_notes'] = _doc_default_before_notes + +_doc_default_disc = ''.join([docdict_discrete['longsummary'], + docdict_discrete['allmethods'], + docdict_discrete['frozennote'], + docdict_discrete['example']]) +docdict_discrete['default'] = _doc_default_disc + +# clean up all the separate docstring elements, we do not need them anymore +for obj in [s for s in dir() if s.startswith('_doc_')]: + exec('del ' + obj) +del obj +try: + del s +except NameError: + # in Python 3, loop variables are not visible after the loop + pass + + +def _moment(data, n, mu=None): + if mu is None: + mu = data.mean() + return ((data - mu)**n).mean() + + +def _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args): + if (n == 0): + return 1.0 + elif (n == 1): + if mu is None: + val = moment_func(1, *args) + else: + val = mu + elif (n == 2): + if mu2 is None or mu is None: + val = moment_func(2, *args) + else: + val = mu2 + mu*mu + elif (n == 3): + if g1 is None or mu2 is None or mu is None: + val = moment_func(3, *args) + else: + mu3 = g1 * np.power(mu2, 1.5) # 3rd central moment + val = mu3+3*mu*mu2+mu*mu*mu # 3rd non-central moment + elif (n == 4): + if g1 is None or g2 is None or mu2 is None or mu is None: + val = moment_func(4, *args) + else: + mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment + mu3 = g1*np.power(mu2, 1.5) # 3rd central moment + val = mu4+4*mu*mu3+6*mu*mu*mu2+mu*mu*mu*mu + else: + val = moment_func(n, *args) + + return val + + +def _skew(data): + """ + skew is third central moment / variance**(1.5) + """ + data = np.ravel(data) + mu = data.mean() + m2 = ((data - mu)**2).mean() + m3 = ((data - mu)**3).mean() + return m3 / np.power(m2, 1.5) + + +def _kurtosis(data): + """ + kurtosis is fourth central moment / variance**2 - 3 + """ + data = np.ravel(data) + mu = data.mean() + m2 = ((data - mu)**2).mean() + m4 = ((data - mu)**4).mean() + return m4 / m2**2 - 3 + + +# Frozen RV class +class rv_frozen(object): + + def __init__(self, dist, *args, **kwds): + self.args = args + self.kwds = kwds + + # create a new instance + self.dist = dist.__class__(**dist._updated_ctor_param()) + + # a, b may be set in _argcheck, depending on *args, **kwds. Ouch. + shapes, _, _ = self.dist._parse_args(*args, **kwds) + self.dist._argcheck(*shapes) + self.a, self.b = self.dist.a, self.dist.b + + @property + def random_state(self): + return self.dist._random_state + + @random_state.setter + def random_state(self, seed): + self.dist._random_state = check_random_state(seed) + + def pdf(self, x): # raises AttributeError in frozen discrete distribution + return self.dist.pdf(x, *self.args, **self.kwds) + + def logpdf(self, x): + return self.dist.logpdf(x, *self.args, **self.kwds) + + def cdf(self, x): + return self.dist.cdf(x, *self.args, **self.kwds) + + def logcdf(self, x): + return self.dist.logcdf(x, *self.args, **self.kwds) + + def ppf(self, q): + return self.dist.ppf(q, *self.args, **self.kwds) + + def isf(self, q): + return self.dist.isf(q, *self.args, **self.kwds) + + def rvs(self, size=None, random_state=None): + kwds = self.kwds.copy() + kwds.update({'size': size, 'random_state': random_state}) + return self.dist.rvs(*self.args, **kwds) + + def sf(self, x): + return self.dist.sf(x, *self.args, **self.kwds) + + def logsf(self, x): + return self.dist.logsf(x, *self.args, **self.kwds) + + def stats(self, moments='mv'): + kwds = self.kwds.copy() + kwds.update({'moments': moments}) + return self.dist.stats(*self.args, **kwds) + + def median(self): + return self.dist.median(*self.args, **self.kwds) + + def mean(self): + return self.dist.mean(*self.args, **self.kwds) + + def var(self): + return self.dist.var(*self.args, **self.kwds) + + def std(self): + return self.dist.std(*self.args, **self.kwds) + + def moment(self, n): + return self.dist.moment(n, *self.args, **self.kwds) + + def entropy(self): + return self.dist.entropy(*self.args, **self.kwds) + + def pmf(self, k): + return self.dist.pmf(k, *self.args, **self.kwds) + + def logpmf(self, k): + return self.dist.logpmf(k, *self.args, **self.kwds) + + def interval(self, alpha): + return self.dist.interval(alpha, *self.args, **self.kwds) + + def expect(self, func=None, lb=None, ub=None, conditional=False, **kwds): + # expect method only accepts shape parameters as positional args + # hence convert self.args, self.kwds, also loc/scale + # See the .expect method docstrings for the meaning of + # other parameters. + a, loc, scale = self.dist._parse_args(*self.args, **self.kwds) + if isinstance(self.dist, rv_discrete): + return self.dist.expect(func, a, loc, lb, ub, conditional, **kwds) + else: + return self.dist.expect(func, a, loc, scale, lb, ub, + conditional, **kwds) + + +# This should be rewritten +def argsreduce(cond, *args): + """Return the sequence of ravel(args[i]) where ravel(condition) is + True in 1D. + + Examples + -------- + >>> import numpy as np + >>> rand = np.random.random_sample + >>> A = rand((4, 5)) + >>> B = 2 + >>> C = rand((1, 5)) + >>> cond = np.ones(A.shape) + >>> [A1, B1, C1] = argsreduce(cond, A, B, C) + >>> B1.shape + (20,) + >>> cond[2,:] = 0 + >>> [A2, B2, C2] = argsreduce(cond, A, B, C) + >>> B2.shape + (15,) + + """ + newargs = np.atleast_1d(*args) + if not isinstance(newargs, list): + newargs = [newargs, ] + expand_arr = (cond == cond) + return [np.extract(cond, arr1 * expand_arr) for arr1 in newargs] + + +parse_arg_template = """ +def _parse_args(self, %(shape_arg_str)s %(locscale_in)s): + return (%(shape_arg_str)s), %(locscale_out)s + +def _parse_args_rvs(self, %(shape_arg_str)s %(locscale_in)s, size=None): + return self._argcheck_rvs(%(shape_arg_str)s %(locscale_out)s, size=size) + +def _parse_args_stats(self, %(shape_arg_str)s %(locscale_in)s, moments='mv'): + return (%(shape_arg_str)s), %(locscale_out)s, moments +""" + + +# Both the continuous and discrete distributions depend on ncx2. +# The function name ncx2 is an abbreviation for noncentral chi squared. + +def _ncx2_log_pdf(x, df, nc): + # We use (xs**2 + ns**2)/2 = (xs - ns)**2/2 + xs*ns, and include the + # factor of exp(-xs*ns) into the ive function to improve numerical + # stability at large values of xs. See also `rice.pdf`. + df2 = df/2.0 - 1.0 + xs, ns = np.sqrt(x), np.sqrt(nc) + res = xlogy(df2/2.0, x/nc) - 0.5*(xs - ns)**2 + res += np.log(ive(df2, xs*ns) / 2.0) + return res + + +def _ncx2_pdf(x, df, nc): + return np.exp(_ncx2_log_pdf(x, df, nc)) + + +def _ncx2_cdf(x, df, nc): + return chndtr(x, df, nc) + + +class rv_generic(object): + """Class which encapsulates common functionality between rv_discrete + and rv_continuous. + + """ + def __init__(self, seed=None): + super(rv_generic, self).__init__() + + # figure out if _stats signature has 'moments' keyword + sign = _getargspec(self._stats) + self._stats_has_moments = ((sign[2] is not None) or + ('moments' in sign[0])) + self._random_state = check_random_state(seed) + + @property + def random_state(self): + """ Get or set the RandomState object for generating random variates. + + This can be either None or an existing RandomState object. + + If None (or np.random), use the RandomState singleton used by np.random. + If already a RandomState instance, use it. + If an int, use a new RandomState instance seeded with seed. + + """ + return self._random_state + + @random_state.setter + def random_state(self, seed): + self._random_state = check_random_state(seed) + + def __getstate__(self): + return self._updated_ctor_param(), self._random_state + + def __setstate__(self, state): + ctor_param, r = state + self.__init__(**ctor_param) + self._random_state = r + return self + + def _construct_argparser( + self, meths_to_inspect, locscale_in, locscale_out): + """Construct the parser for the shape arguments. + + Generates the argument-parsing functions dynamically and attaches + them to the instance. + Is supposed to be called in __init__ of a class for each distribution. + + If self.shapes is a non-empty string, interprets it as a + comma-separated list of shape parameters. + + Otherwise inspects the call signatures of `meths_to_inspect` + and constructs the argument-parsing functions from these. + In this case also sets `shapes` and `numargs`. + """ + + if self.shapes: + # sanitize the user-supplied shapes + if not isinstance(self.shapes, string_types): + raise TypeError('shapes must be a string.') + + shapes = self.shapes.replace(',', ' ').split() + + for field in shapes: + if keyword.iskeyword(field): + raise SyntaxError('keywords cannot be used as shapes.') + if not re.match('^[_a-zA-Z][_a-zA-Z0-9]*$', field): + raise SyntaxError( + 'shapes must be valid python identifiers') + else: + # find out the call signatures (_pdf, _cdf etc), deduce shape + # arguments. Generic methods only have 'self, x', any further args + # are shapes. + shapes_list = [] + for meth in meths_to_inspect: + shapes_args = _getargspec(meth) # NB: does not contain self + args = shapes_args.args[1:] # peel off 'x', too + + if args: + shapes_list.append(args) + + # *args or **kwargs are not allowed w/automatic shapes + if shapes_args.varargs is not None: + raise TypeError( + '*args are not allowed w/out explicit shapes') + if shapes_args.keywords is not None: + raise TypeError( + '**kwds are not allowed w/out explicit shapes') + if shapes_args.defaults is not None: + raise TypeError('defaults are not allowed for shapes') + + if shapes_list: + shapes = shapes_list[0] + + # make sure the signatures are consistent + for item in shapes_list: + if item != shapes: + raise TypeError('Shape arguments are inconsistent.') + else: + shapes = [] + + # have the arguments, construct the method from template + shapes_str = ', '.join(shapes) + ', ' if shapes else '' # NB: not None + dct = dict(shape_arg_str=shapes_str, + locscale_in=locscale_in, + locscale_out=locscale_out, + ) + ns = {} + exec_(parse_arg_template % dct, ns) + # NB: attach to the instance, not class + for name in ['_parse_args', '_parse_args_stats', '_parse_args_rvs']: + setattr(self, name, + instancemethod(ns[name], self, self.__class__) + ) + + self.shapes = ', '.join(shapes) if shapes else None + if not hasattr(self, 'numargs'): + # allows more general subclassing with *args + self.numargs = len(shapes) + + def _construct_doc(self, docdict, shapes_vals=None): + """Construct the instance docstring with string substitutions.""" + tempdict = docdict.copy() + tempdict['name'] = self.name or 'distname' + tempdict['shapes'] = self.shapes or '' + + if shapes_vals is None: + shapes_vals = () + vals = ', '.join('%.3g' % val for val in shapes_vals) + tempdict['vals'] = vals + + tempdict['shapes_'] = self.shapes or '' + if self.shapes and self.numargs == 1: + tempdict['shapes_'] += ',' + + if self.shapes: + tempdict['set_vals_stmt'] = '>>> %s = %s' % (self.shapes, vals) + else: + tempdict['set_vals_stmt'] = '' + + if self.shapes is None: + # remove shapes from call parameters if there are none + for item in ['default', 'before_notes']: + tempdict[item] = tempdict[item].replace( + "\n%(shapes)s : array_like\n shape parameters", "") + for i in range(2): + if self.shapes is None: + # necessary because we use %(shapes)s in two forms (w w/o ", ") + self.__doc__ = self.__doc__.replace("%(shapes)s, ", "") + self.__doc__ = doccer.docformat(self.__doc__, tempdict) + + # correct for empty shapes + self.__doc__ = self.__doc__.replace('(, ', '(').replace(', )', ')') + + def _construct_default_doc(self, longname=None, extradoc=None, + docdict=None, discrete='continuous'): + """Construct instance docstring from the default template.""" + if longname is None: + longname = 'A' + if extradoc is None: + extradoc = '' + if extradoc.startswith('\n\n'): + extradoc = extradoc[2:] + self.__doc__ = ''.join(['%s %s random variable.' % (longname, discrete), + '\n\n%(before_notes)s\n', docheaders['notes'], + extradoc, '\n%(example)s']) + self._construct_doc(docdict) + + def freeze(self, *args, **kwds): + """Freeze the distribution for the given arguments. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution. Should include all + the non-optional arguments, may include ``loc`` and ``scale``. + + Returns + ------- + rv_frozen : rv_frozen instance + The frozen distribution. + + """ + return rv_frozen(self, *args, **kwds) + + def __call__(self, *args, **kwds): + return self.freeze(*args, **kwds) + __call__.__doc__ = freeze.__doc__ + + # The actual calculation functions (no basic checking need be done) + # If these are defined, the others won't be looked at. + # Otherwise, the other set can be defined. + def _stats(self, *args, **kwds): + return None, None, None, None + + # Central moments + def _munp(self, n, *args): + # Silence floating point warnings from integration. + olderr = np.seterr(all='ignore') + vals = self.generic_moment(n, *args) + np.seterr(**olderr) + return vals + + def _argcheck_rvs(self, *args, **kwargs): + # Handle broadcasting and size validation of the rvs method. + # Subclasses should not have to override this method. + # The rule is that if `size` is not None, then `size` gives the + # shape of the result (integer values of `size` are treated as + # tuples with length 1; i.e. `size=3` is the same as `size=(3,)`.) + # + # `args` is expected to contain the shape parameters (if any), the + # location and the scale in a flat tuple (e.g. if there are two + # shape parameters `a` and `b`, `args` will be `(a, b, loc, scale)`). + # The only keyword argument expected is 'size'. + size = kwargs.get('size', None) + all_bcast = np.broadcast_arrays(*args) + + def squeeze_left(a): + while a.ndim > 0 and a.shape[0] == 1: + a = a[0] + return a + + # Eliminate trivial leading dimensions. In the convention + # used by numpy's random variate generators, trivial leading + # dimensions are effectively ignored. In other words, when `size` + # is given, trivial leading dimensions of the broadcast parameters + # in excess of the number of dimensions in size are ignored, e.g. + # >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]], size=3) + # array([ 1.00104267, 3.00422496, 4.99799278]) + # If `size` is not given, the exact broadcast shape is preserved: + # >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]]) + # array([[[[ 1.00862899, 3.00061431, 4.99867122]]]]) + # + all_bcast = [squeeze_left(a) for a in all_bcast] + bcast_shape = all_bcast[0].shape + bcast_ndim = all_bcast[0].ndim + + if size is None: + size_ = bcast_shape + else: + size_ = tuple(np.atleast_1d(size)) + + # Check compatibility of size_ with the broadcast shape of all + # the parameters. This check is intended to be consistent with + # how the numpy random variate generators (e.g. np.random.normal, + # np.random.beta) handle their arguments. The rule is that, if size + # is given, it determines the shape of the output. Broadcasting + # can't change the output size. + + # This is the standard broadcasting convention of extending the + # shape with fewer dimensions with enough dimensions of length 1 + # so that the two shapes have the same number of dimensions. + ndiff = bcast_ndim - len(size_) + if ndiff < 0: + bcast_shape = (1,)*(-ndiff) + bcast_shape + elif ndiff > 0: + size_ = (1,)*ndiff + size_ + + # This compatibility test is not standard. In "regular" broadcasting, + # two shapes are compatible if for each dimension, the lengths are the + # same or one of the lengths is 1. Here, the length of a dimension in + # size_ must not be less than the corresponding length in bcast_shape. + ok = all([bcdim == 1 or bcdim == szdim + for (bcdim, szdim) in zip(bcast_shape, size_)]) + if not ok: + raise ValueError("size does not match the broadcast shape of " + "the parameters.") + + param_bcast = all_bcast[:-2] + loc_bcast = all_bcast[-2] + scale_bcast = all_bcast[-1] + + return param_bcast, loc_bcast, scale_bcast, size_ + + ## These are the methods you must define (standard form functions) + ## NB: generic _pdf, _logpdf, _cdf are different for + ## rv_continuous and rv_discrete hence are defined in there + def _argcheck(self, *args): + """Default check for correct values on args and keywords. + + Returns condition array of 1's where arguments are correct and + 0's where they are not. + + """ + cond = 1 + for arg in args: + cond = logical_and(cond, (asarray(arg) > 0)) + return cond + + def _support_mask(self, x): + return (self.a <= x) & (x <= self.b) + + def _open_support_mask(self, x): + return (self.a < x) & (x < self.b) + + def _rvs(self, *args): + # This method must handle self._size being a tuple, and it must + # properly broadcast *args and self._size. self._size might be + # an empty tuple, which means a scalar random variate is to be + # generated. + + ## Use basic inverse cdf algorithm for RV generation as default. + U = self._random_state.random_sample(self._size) + Y = self._ppf(U, *args) + return Y + + def _logcdf(self, x, *args): + return log(self._cdf(x, *args)) + + def _sf(self, x, *args): + return 1.0-self._cdf(x, *args) + + def _logsf(self, x, *args): + return log(self._sf(x, *args)) + + def _ppf(self, q, *args): + return self._ppfvec(q, *args) + + def _isf(self, q, *args): + return self._ppf(1.0-q, *args) # use correct _ppf for subclasses + + # These are actually called, and should not be overwritten if you + # want to keep error checking. + def rvs(self, *args, **kwds): + """ + Random variates of given type. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + scale : array_like, optional + Scale parameter (default=1). + size : int or tuple of ints, optional + Defining number of random variates (default is 1). + random_state : None or int or ``np.random.RandomState`` instance, optional + If int or RandomState, use it for drawing the random variates. + If None, rely on ``self.random_state``. + Default is None. + + Returns + ------- + rvs : ndarray or scalar + Random variates of given `size`. + + """ + discrete = kwds.pop('discrete', None) + rndm = kwds.pop('random_state', None) + args, loc, scale, size = self._parse_args_rvs(*args, **kwds) + cond = logical_and(self._argcheck(*args), (scale >= 0)) + if not np.all(cond): + raise ValueError("Domain error in arguments.") + + if np.all(scale == 0): + return loc*ones(size, 'd') + + # extra gymnastics needed for a custom random_state + if rndm is not None: + random_state_saved = self._random_state + self._random_state = check_random_state(rndm) + + # `size` should just be an argument to _rvs(), but for, um, + # historical reasons, it is made an attribute that is read + # by _rvs(). + self._size = size + vals = self._rvs(*args) + + vals = vals * scale + loc + + # do not forget to restore the _random_state + if rndm is not None: + self._random_state = random_state_saved + + # Cast to int if discrete + if discrete: + if size == (): + vals = int(vals) + else: + vals = vals.astype(int) + + return vals + + def stats(self, *args, **kwds): + """ + Some statistics of the given RV. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional (continuous RVs only) + scale parameter (default=1) + moments : str, optional + composed of letters ['mvsk'] defining which moments to compute: + 'm' = mean, + 'v' = variance, + 's' = (Fisher's) skew, + 'k' = (Fisher's) kurtosis. + (default is 'mv') + + Returns + ------- + stats : sequence + of requested moments. + + """ + args, loc, scale, moments = self._parse_args_stats(*args, **kwds) + # scale = 1 by construction for discrete RVs + loc, scale = map(asarray, (loc, scale)) + args = tuple(map(asarray, args)) + cond = self._argcheck(*args) & (scale > 0) & (loc == loc) + output = [] + default = valarray(shape(cond), self.badvalue) + + # Use only entries that are valid in calculation + if np.any(cond): + goodargs = argsreduce(cond, *(args+(scale, loc))) + scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2] + + if self._stats_has_moments: + mu, mu2, g1, g2 = self._stats(*goodargs, + **{'moments': moments}) + else: + mu, mu2, g1, g2 = self._stats(*goodargs) + if g1 is None: + mu3 = None + else: + if mu2 is None: + mu2 = self._munp(2, *goodargs) + if g2 is None: + # (mu2**1.5) breaks down for nan and inf + mu3 = g1 * np.power(mu2, 1.5) + + if 'm' in moments: + if mu is None: + mu = self._munp(1, *goodargs) + out0 = default.copy() + place(out0, cond, mu * scale + loc) + output.append(out0) + + if 'v' in moments: + if mu2 is None: + mu2p = self._munp(2, *goodargs) + if mu is None: + mu = self._munp(1, *goodargs) + mu2 = mu2p - mu * mu + if np.isinf(mu): + # if mean is inf then var is also inf + mu2 = np.inf + out0 = default.copy() + place(out0, cond, mu2 * scale * scale) + output.append(out0) + + if 's' in moments: + if g1 is None: + mu3p = self._munp(3, *goodargs) + if mu is None: + mu = self._munp(1, *goodargs) + if mu2 is None: + mu2p = self._munp(2, *goodargs) + mu2 = mu2p - mu * mu + with np.errstate(invalid='ignore'): + mu3 = mu3p - 3 * mu * mu2 - mu**3 + g1 = mu3 / np.power(mu2, 1.5) + out0 = default.copy() + place(out0, cond, g1) + output.append(out0) + + if 'k' in moments: + if g2 is None: + mu4p = self._munp(4, *goodargs) + if mu is None: + mu = self._munp(1, *goodargs) + if mu2 is None: + mu2p = self._munp(2, *goodargs) + mu2 = mu2p - mu * mu + if mu3 is None: + mu3p = self._munp(3, *goodargs) + with np.errstate(invalid='ignore'): + mu3 = mu3p - 3 * mu * mu2 - mu**3 + with np.errstate(invalid='ignore'): + mu4 = mu4p - 4 * mu * mu3 - 6 * mu * mu * mu2 - mu**4 + g2 = mu4 / mu2**2.0 - 3.0 + out0 = default.copy() + place(out0, cond, g2) + output.append(out0) + else: # no valid args + output = [] + for _ in moments: + out0 = default.copy() + output.append(out0) + + if len(output) == 1: + return output[0] + else: + return tuple(output) + + def entropy(self, *args, **kwds): + """ + Differential entropy of the RV. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + scale : array_like, optional (continuous distributions only). + Scale parameter (default=1). + + Notes + ----- + Entropy is defined base `e`: + + >>> drv = rv_discrete(values=((0, 1), (0.5, 0.5))) + >>> np.allclose(drv.entropy(), np.log(2.0)) + True + + """ + args, loc, scale = self._parse_args(*args, **kwds) + # NB: for discrete distributions scale=1 by construction in _parse_args + loc, scale = map(asarray, (loc, scale)) + args = tuple(map(asarray, args)) + cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc) + output = zeros(shape(cond0), 'd') + place(output, (1-cond0), self.badvalue) + goodargs = argsreduce(cond0, scale, *args) + goodscale = goodargs[0] + goodargs = goodargs[1:] + place(output, cond0, self.vecentropy(*goodargs) + log(goodscale)) + return output + + def moment(self, n, *args, **kwds): + """ + n-th order non-central moment of distribution. + + Parameters + ---------- + n : int, n >= 1 + Order of moment. + arg1, arg2, arg3,... : float + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + """ + args, loc, scale = self._parse_args(*args, **kwds) + if not (self._argcheck(*args) and (scale > 0)): + return nan + if (floor(n) != n): + raise ValueError("Moment must be an integer.") + if (n < 0): + raise ValueError("Moment must be positive.") + mu, mu2, g1, g2 = None, None, None, None + if (n > 0) and (n < 5): + if self._stats_has_moments: + mdict = {'moments': {1: 'm', 2: 'v', 3: 'vs', 4: 'vk'}[n]} + else: + mdict = {} + mu, mu2, g1, g2 = self._stats(*args, **mdict) + val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args) + + # Convert to transformed X = L + S*Y + # E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n, k)*(S/L)^k E[Y^k], k=0...n) + if loc == 0: + return scale**n * val + else: + result = 0 + fac = float(scale) / float(loc) + for k in range(n): + valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args) + result += comb(n, k, exact=True)*(fac**k) * valk + result += fac**n * val + return result * loc**n + + def median(self, *args, **kwds): + """ + Median of the distribution. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + Location parameter, Default is 0. + scale : array_like, optional + Scale parameter, Default is 1. + + Returns + ------- + median : float + The median of the distribution. + + See Also + -------- + stats.distributions.rv_discrete.ppf + Inverse of the CDF + + """ + return self.ppf(0.5, *args, **kwds) + + def mean(self, *args, **kwds): + """ + Mean of the distribution. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + mean : float + the mean of the distribution + + """ + kwds['moments'] = 'm' + res = self.stats(*args, **kwds) + if isinstance(res, ndarray) and res.ndim == 0: + return res[()] + return res + + def var(self, *args, **kwds): + """ + Variance of the distribution. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + var : float + the variance of the distribution + + """ + kwds['moments'] = 'v' + res = self.stats(*args, **kwds) + if isinstance(res, ndarray) and res.ndim == 0: + return res[()] + return res + + def std(self, *args, **kwds): + """ + Standard deviation of the distribution. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + std : float + standard deviation of the distribution + + """ + kwds['moments'] = 'v' + res = sqrt(self.stats(*args, **kwds)) + return res + + def interval(self, alpha, *args, **kwds): + """ + Confidence interval with equal areas around the median. + + Parameters + ---------- + alpha : array_like of float + Probability that an rv will be drawn from the returned range. + Each value should be in the range [0, 1]. + arg1, arg2, ... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + location parameter, Default is 0. + scale : array_like, optional + scale parameter, Default is 1. + + Returns + ------- + a, b : ndarray of float + end-points of range that contain ``100 * alpha %`` of the rv's + possible values. + + """ + alpha = asarray(alpha) + if np.any((alpha > 1) | (alpha < 0)): + raise ValueError("alpha must be between 0 and 1 inclusive") + q1 = (1.0-alpha)/2 + q2 = (1.0+alpha)/2 + a = self.ppf(q1, *args, **kwds) + b = self.ppf(q2, *args, **kwds) + return a, b + + +## continuous random variables: implement maybe later +## +## hf --- Hazard Function (PDF / SF) +## chf --- Cumulative hazard function (-log(SF)) +## psf --- Probability sparsity function (reciprocal of the pdf) in +## units of percent-point-function (as a function of q). +## Also, the derivative of the percent-point function. + +class rv_continuous(rv_generic): + """ + A generic continuous random variable class meant for subclassing. + + `rv_continuous` is a base class to construct specific distribution classes + and instances for continuous random variables. It cannot be used + directly as a distribution. + + Parameters + ---------- + momtype : int, optional + The type of generic moment calculation to use: 0 for pdf, 1 (default) + for ppf. + a : float, optional + Lower bound of the support of the distribution, default is minus + infinity. + b : float, optional + Upper bound of the support of the distribution, default is plus + infinity. + xtol : float, optional + The tolerance for fixed point calculation for generic ppf. + badvalue : float, optional + The value in a result arrays that indicates a value that for which + some argument restriction is violated, default is np.nan. + name : str, optional + The name of the instance. This string is used to construct the default + example for distributions. + longname : str, optional + This string is used as part of the first line of the docstring returned + when a subclass has no docstring of its own. Note: `longname` exists + for backwards compatibility, do not use for new subclasses. + shapes : str, optional + The shape of the distribution. For example ``"m, n"`` for a + distribution that takes two integers as the two shape arguments for all + its methods. If not provided, shape parameters will be inferred from + the signature of the private methods, ``_pdf`` and ``_cdf`` of the + instance. + extradoc : str, optional, deprecated + This string is used as the last part of the docstring returned when a + subclass has no docstring of its own. Note: `extradoc` exists for + backwards compatibility, do not use for new subclasses. + seed : None or int or ``numpy.random.RandomState`` instance, optional + This parameter defines the RandomState object to use for drawing + random variates. + If None (or np.random), the global np.random state is used. + If integer, it is used to seed the local RandomState instance. + Default is None. + + Methods + ------- + rvs + pdf + logpdf + cdf + logcdf + sf + logsf + ppf + isf + moment + stats + entropy + expect + median + mean + std + var + interval + __call__ + fit + fit_loc_scale + nnlf + + Notes + ----- + Public methods of an instance of a distribution class (e.g., ``pdf``, + ``cdf``) check their arguments and pass valid arguments to private, + computational methods (``_pdf``, ``_cdf``). For ``pdf(x)``, ``x`` is valid + if it is within the support of a distribution, ``self.a <= x <= self.b``. + Whether a shape parameter is valid is decided by an ``_argcheck`` method + (which defaults to checking that its arguments are strictly positive.) + + **Subclassing** + + New random variables can be defined by subclassing the `rv_continuous` class + and re-defining at least the ``_pdf`` or the ``_cdf`` method (normalized + to location 0 and scale 1). + + If positive argument checking is not correct for your RV + then you will also need to re-define the ``_argcheck`` method. + + Correct, but potentially slow defaults exist for the remaining + methods but for speed and/or accuracy you can over-ride:: + + _logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf + + The default method ``_rvs`` relies on the inverse of the cdf, ``_ppf``, + applied to a uniform random variate. In order to generate random variates + efficiently, either the default ``_ppf`` needs to be overwritten (e.g. + if the inverse cdf can expressed in an explicit form) or a sampling + method needs to be implemented in a custom ``_rvs`` method. + + If possible, you should override ``_isf``, ``_sf`` or ``_logsf``. + The main reason would be to improve numerical accuracy: for example, + the survival function ``_sf`` is computed as ``1 - _cdf`` which can + result in loss of precision if ``_cdf(x)`` is close to one. + + **Methods that can be overwritten by subclasses** + :: + + _rvs + _pdf + _cdf + _sf + _ppf + _isf + _stats + _munp + _entropy + _argcheck + + There are additional (internal and private) generic methods that can + be useful for cross-checking and for debugging, but might work in all + cases when directly called. + + A note on ``shapes``: subclasses need not specify them explicitly. In this + case, `shapes` will be automatically deduced from the signatures of the + overridden methods (`pdf`, `cdf` etc). + If, for some reason, you prefer to avoid relying on introspection, you can + specify ``shapes`` explicitly as an argument to the instance constructor. + + + **Frozen Distributions** + + Normally, you must provide shape parameters (and, optionally, location and + scale parameters to each call of a method of a distribution. + + Alternatively, the object may be called (as a function) to fix the shape, + location, and scale parameters returning a "frozen" continuous RV object: + + rv = generic(<shape(s)>, loc=0, scale=1) + `rv_frozen` object with the same methods but holding the given shape, + location, and scale fixed + + **Statistics** + + Statistics are computed using numerical integration by default. + For speed you can redefine this using ``_stats``: + + - take shape parameters and return mu, mu2, g1, g2 + - If you can't compute one of these, return it as None + - Can also be defined with a keyword argument ``moments``, which is a + string composed of "m", "v", "s", and/or "k". + Only the components appearing in string should be computed and + returned in the order "m", "v", "s", or "k" with missing values + returned as None. + + Alternatively, you can override ``_munp``, which takes ``n`` and shape + parameters and returns the n-th non-central moment of the distribution. + + Examples + -------- + To create a new Gaussian distribution, we would do the following: + + >>> from scipy.stats import rv_continuous + >>> class gaussian_gen(rv_continuous): + ... "Gaussian distribution" + ... def _pdf(self, x): + ... return np.exp(-x**2 / 2.) / np.sqrt(2.0 * np.pi) + >>> gaussian = gaussian_gen(name='gaussian') + + ``scipy.stats`` distributions are *instances*, so here we subclass + `rv_continuous` and create an instance. With this, we now have + a fully functional distribution with all relevant methods automagically + generated by the framework. + + Note that above we defined a standard normal distribution, with zero mean + and unit variance. Shifting and scaling of the distribution can be done + by using ``loc`` and ``scale`` parameters: ``gaussian.pdf(x, loc, scale)`` + essentially computes ``y = (x - loc) / scale`` and + ``gaussian._pdf(y) / scale``. + + """ + def __init__(self, momtype=1, a=None, b=None, xtol=1e-14, + badvalue=None, name=None, longname=None, + shapes=None, extradoc=None, seed=None): + + super(rv_continuous, self).__init__(seed) + + # save the ctor parameters, cf generic freeze + self._ctor_param = dict( + momtype=momtype, a=a, b=b, xtol=xtol, + badvalue=badvalue, name=name, longname=longname, + shapes=shapes, extradoc=extradoc, seed=seed) + + if badvalue is None: + badvalue = nan + if name is None: + name = 'Distribution' + self.badvalue = badvalue + self.name = name + self.a = a + self.b = b + if a is None: + self.a = -inf + if b is None: + self.b = inf + self.xtol = xtol + self.moment_type = momtype + self.shapes = shapes + self._construct_argparser(meths_to_inspect=[self._pdf, self._cdf], + locscale_in='loc=0, scale=1', + locscale_out='loc, scale') + + # nin correction + self._ppfvec = vectorize(self._ppf_single, otypes='d') + self._ppfvec.nin = self.numargs + 1 + self.vecentropy = vectorize(self._entropy, otypes='d') + self._cdfvec = vectorize(self._cdf_single, otypes='d') + self._cdfvec.nin = self.numargs + 1 + + self.extradoc = extradoc + if momtype == 0: + self.generic_moment = vectorize(self._mom0_sc, otypes='d') + else: + self.generic_moment = vectorize(self._mom1_sc, otypes='d') + # Because of the *args argument of _mom0_sc, vectorize cannot count the + # number of arguments correctly. + self.generic_moment.nin = self.numargs + 1 + + if longname is None: + if name[0] in ['aeiouAEIOU']: + hstr = "An " + else: + hstr = "A " + longname = hstr + name + + if sys.flags.optimize < 2: + # Skip adding docstrings if interpreter is run with -OO + if self.__doc__ is None: + self._construct_default_doc(longname=longname, + extradoc=extradoc, + docdict=docdict, + discrete='continuous') + else: + dct = dict(distcont) + self._construct_doc(docdict, dct.get(self.name)) + + def _updated_ctor_param(self): + """ Return the current version of _ctor_param, possibly updated by user. + + Used by freezing and pickling. + Keep this in sync with the signature of __init__. + """ + dct = self._ctor_param.copy() + dct['a'] = self.a + dct['b'] = self.b + dct['xtol'] = self.xtol + dct['badvalue'] = self.badvalue + dct['name'] = self.name + dct['shapes'] = self.shapes + dct['extradoc'] = self.extradoc + return dct + + def _ppf_to_solve(self, x, q, *args): + return self.cdf(*(x, )+args)-q + + def _ppf_single(self, q, *args): + left = right = None + if self.a > -np.inf: + left = self.a + if self.b < np.inf: + right = self.b + + factor = 10. + if not left: # i.e. self.a = -inf + left = -1.*factor + while self._ppf_to_solve(left, q, *args) > 0.: + right = left + left *= factor + # left is now such that cdf(left) < q + if not right: # i.e. self.b = inf + right = factor + while self._ppf_to_solve(right, q, *args) < 0.: + left = right + right *= factor + # right is now such that cdf(right) > q + + return optimize.brentq(self._ppf_to_solve, + left, right, args=(q,)+args, xtol=self.xtol) + + # moment from definition + def _mom_integ0(self, x, m, *args): + return x**m * self.pdf(x, *args) + + def _mom0_sc(self, m, *args): + return integrate.quad(self._mom_integ0, self.a, self.b, + args=(m,)+args)[0] + + # moment calculated using ppf + def _mom_integ1(self, q, m, *args): + return (self.ppf(q, *args))**m + + def _mom1_sc(self, m, *args): + return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0] + + def _pdf(self, x, *args): + return derivative(self._cdf, x, dx=1e-5, args=args, order=5) + + ## Could also define any of these + def _logpdf(self, x, *args): + return log(self._pdf(x, *args)) + + def _cdf_single(self, x, *args): + return integrate.quad(self._pdf, self.a, x, args=args)[0] + + def _cdf(self, x, *args): + return self._cdfvec(x, *args) + + ## generic _argcheck, _logcdf, _sf, _logsf, _ppf, _isf, _rvs are defined + ## in rv_generic + + def pdf(self, x, *args, **kwds): + """ + Probability density function at x of the given RV. + + Parameters + ---------- + x : array_like + quantiles + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + pdf : ndarray + Probability density function evaluated at x + + """ + args, loc, scale = self._parse_args(*args, **kwds) + x, loc, scale = map(asarray, (x, loc, scale)) + args = tuple(map(asarray, args)) + dtyp = np.find_common_type([x.dtype, np.float64], []) + x = np.asarray((x - loc)/scale, dtype=dtyp) + cond0 = self._argcheck(*args) & (scale > 0) + cond1 = self._support_mask(x) & (scale > 0) + cond = cond0 & cond1 + output = zeros(shape(cond), dtyp) + putmask(output, (1-cond0)+np.isnan(x), self.badvalue) + if np.any(cond): + goodargs = argsreduce(cond, *((x,)+args+(scale,))) + scale, goodargs = goodargs[-1], goodargs[:-1] + place(output, cond, self._pdf(*goodargs) / scale) + if output.ndim == 0: + return output[()] + return output + + def logpdf(self, x, *args, **kwds): + """ + Log of the probability density function at x of the given RV. + + This uses a more numerically accurate calculation if available. + + Parameters + ---------- + x : array_like + quantiles + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + logpdf : array_like + Log of the probability density function evaluated at x + + """ + args, loc, scale = self._parse_args(*args, **kwds) + x, loc, scale = map(asarray, (x, loc, scale)) + args = tuple(map(asarray, args)) + dtyp = np.find_common_type([x.dtype, np.float64], []) + x = np.asarray((x - loc)/scale, dtype=dtyp) + cond0 = self._argcheck(*args) & (scale > 0) + cond1 = self._support_mask(x) & (scale > 0) + cond = cond0 & cond1 + output = empty(shape(cond), dtyp) + output.fill(NINF) + putmask(output, (1-cond0)+np.isnan(x), self.badvalue) + if np.any(cond): + goodargs = argsreduce(cond, *((x,)+args+(scale,))) + scale, goodargs = goodargs[-1], goodargs[:-1] + place(output, cond, self._logpdf(*goodargs) - log(scale)) + if output.ndim == 0: + return output[()] + return output + + def cdf(self, x, *args, **kwds): + """ + Cumulative distribution function of the given RV. + + Parameters + ---------- + x : array_like + quantiles + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + cdf : ndarray + Cumulative distribution function evaluated at `x` + + """ + args, loc, scale = self._parse_args(*args, **kwds) + x, loc, scale = map(asarray, (x, loc, scale)) + args = tuple(map(asarray, args)) + dtyp = np.find_common_type([x.dtype, np.float64], []) + x = np.asarray((x - loc)/scale, dtype=dtyp) + cond0 = self._argcheck(*args) & (scale > 0) + cond1 = self._open_support_mask(x) & (scale > 0) + cond2 = (x >= self.b) & cond0 + cond = cond0 & cond1 + output = zeros(shape(cond), dtyp) + place(output, (1-cond0)+np.isnan(x), self.badvalue) + place(output, cond2, 1.0) + if np.any(cond): # call only if at least 1 entry + goodargs = argsreduce(cond, *((x,)+args)) + place(output, cond, self._cdf(*goodargs)) + if output.ndim == 0: + return output[()] + return output + + def logcdf(self, x, *args, **kwds): + """ + Log of the cumulative distribution function at x of the given RV. + + Parameters + ---------- + x : array_like + quantiles + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + logcdf : array_like + Log of the cumulative distribution function evaluated at x + + """ + args, loc, scale = self._parse_args(*args, **kwds) + x, loc, scale = map(asarray, (x, loc, scale)) + args = tuple(map(asarray, args)) + dtyp = np.find_common_type([x.dtype, np.float64], []) + x = np.asarray((x - loc)/scale, dtype=dtyp) + cond0 = self._argcheck(*args) & (scale > 0) + cond1 = self._open_support_mask(x) & (scale > 0) + cond2 = (x >= self.b) & cond0 + cond = cond0 & cond1 + output = empty(shape(cond), dtyp) + output.fill(NINF) + place(output, (1-cond0)*(cond1 == cond1)+np.isnan(x), self.badvalue) + place(output, cond2, 0.0) + if np.any(cond): # call only if at least 1 entry + goodargs = argsreduce(cond, *((x,)+args)) + place(output, cond, self._logcdf(*goodargs)) + if output.ndim == 0: + return output[()] + return output + + def sf(self, x, *args, **kwds): + """ + Survival function (1 - `cdf`) at x of the given RV. + + Parameters + ---------- + x : array_like + quantiles + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + sf : array_like + Survival function evaluated at x + + """ + args, loc, scale = self._parse_args(*args, **kwds) + x, loc, scale = map(asarray, (x, loc, scale)) + args = tuple(map(asarray, args)) + dtyp = np.find_common_type([x.dtype, np.float64], []) + x = np.asarray((x - loc)/scale, dtype=dtyp) + cond0 = self._argcheck(*args) & (scale > 0) + cond1 = self._open_support_mask(x) & (scale > 0) + cond2 = cond0 & (x <= self.a) + cond = cond0 & cond1 + output = zeros(shape(cond), dtyp) + place(output, (1-cond0)+np.isnan(x), self.badvalue) + place(output, cond2, 1.0) + if np.any(cond): + goodargs = argsreduce(cond, *((x,)+args)) + place(output, cond, self._sf(*goodargs)) + if output.ndim == 0: + return output[()] + return output + + def logsf(self, x, *args, **kwds): + """ + Log of the survival function of the given RV. + + Returns the log of the "survival function," defined as (1 - `cdf`), + evaluated at `x`. + + Parameters + ---------- + x : array_like + quantiles + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + logsf : ndarray + Log of the survival function evaluated at `x`. + + """ + args, loc, scale = self._parse_args(*args, **kwds) + x, loc, scale = map(asarray, (x, loc, scale)) + args = tuple(map(asarray, args)) + dtyp = np.find_common_type([x.dtype, np.float64], []) + x = np.asarray((x - loc)/scale, dtype=dtyp) + cond0 = self._argcheck(*args) & (scale > 0) + cond1 = self._open_support_mask(x) & (scale > 0) + cond2 = cond0 & (x <= self.a) + cond = cond0 & cond1 + output = empty(shape(cond), dtyp) + output.fill(NINF) + place(output, (1-cond0)+np.isnan(x), self.badvalue) + place(output, cond2, 0.0) + if np.any(cond): + goodargs = argsreduce(cond, *((x,)+args)) + place(output, cond, self._logsf(*goodargs)) + if output.ndim == 0: + return output[()] + return output + + def ppf(self, q, *args, **kwds): + """ + Percent point function (inverse of `cdf`) at q of the given RV. + + Parameters + ---------- + q : array_like + lower tail probability + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + x : array_like + quantile corresponding to the lower tail probability q. + + """ + args, loc, scale = self._parse_args(*args, **kwds) + q, loc, scale = map(asarray, (q, loc, scale)) + args = tuple(map(asarray, args)) + cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc) + cond1 = (0 < q) & (q < 1) + cond2 = cond0 & (q == 0) + cond3 = cond0 & (q == 1) + cond = cond0 & cond1 + output = valarray(shape(cond), value=self.badvalue) + + lower_bound = self.a * scale + loc + upper_bound = self.b * scale + loc + place(output, cond2, argsreduce(cond2, lower_bound)[0]) + place(output, cond3, argsreduce(cond3, upper_bound)[0]) + + if np.any(cond): # call only if at least 1 entry + goodargs = argsreduce(cond, *((q,)+args+(scale, loc))) + scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2] + place(output, cond, self._ppf(*goodargs) * scale + loc) + if output.ndim == 0: + return output[()] + return output + + def isf(self, q, *args, **kwds): + """ + Inverse survival function (inverse of `sf`) at q of the given RV. + + Parameters + ---------- + q : array_like + upper tail probability + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + x : ndarray or scalar + Quantile corresponding to the upper tail probability q. + + """ + args, loc, scale = self._parse_args(*args, **kwds) + q, loc, scale = map(asarray, (q, loc, scale)) + args = tuple(map(asarray, args)) + cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc) + cond1 = (0 < q) & (q < 1) + cond2 = cond0 & (q == 1) + cond3 = cond0 & (q == 0) + cond = cond0 & cond1 + output = valarray(shape(cond), value=self.badvalue) + + lower_bound = self.a * scale + loc + upper_bound = self.b * scale + loc + place(output, cond2, argsreduce(cond2, lower_bound)[0]) + place(output, cond3, argsreduce(cond3, upper_bound)[0]) + + if np.any(cond): + goodargs = argsreduce(cond, *((q,)+args+(scale, loc))) + scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2] + place(output, cond, self._isf(*goodargs) * scale + loc) + if output.ndim == 0: + return output[()] + return output + + def _nnlf(self, x, *args): + return -np.sum(self._logpdf(x, *args), axis=0) + + def _unpack_loc_scale(self, theta): + try: + loc = theta[-2] + scale = theta[-1] + args = tuple(theta[:-2]) + except IndexError: + raise ValueError("Not enough input arguments.") + return loc, scale, args + + def nnlf(self, theta, x): + '''Return negative loglikelihood function. + + Notes + ----- + This is ``-sum(log pdf(x, theta), axis=0)`` where `theta` are the + parameters (including loc and scale). + ''' + loc, scale, args = self._unpack_loc_scale(theta) + if not self._argcheck(*args) or scale <= 0: + return inf + x = asarray((x-loc) / scale) + n_log_scale = len(x) * log(scale) + if np.any(~self._support_mask(x)): + return inf + return self._nnlf(x, *args) + n_log_scale + + def _nnlf_and_penalty(self, x, args): + cond0 = ~self._support_mask(x) + n_bad = np.count_nonzero(cond0, axis=0) + if n_bad > 0: + x = argsreduce(~cond0, x)[0] + logpdf = self._logpdf(x, *args) + finite_logpdf = np.isfinite(logpdf) + n_bad += np.sum(~finite_logpdf, axis=0) + if n_bad > 0: + penalty = n_bad * log(_XMAX) * 100 + return -np.sum(logpdf[finite_logpdf], axis=0) + penalty + return -np.sum(logpdf, axis=0) + + def _penalized_nnlf(self, theta, x): + ''' Return penalized negative loglikelihood function, + i.e., - sum (log pdf(x, theta), axis=0) + penalty + where theta are the parameters (including loc and scale) + ''' + loc, scale, args = self._unpack_loc_scale(theta) + if not self._argcheck(*args) or scale <= 0: + return inf + x = asarray((x-loc) / scale) + n_log_scale = len(x) * log(scale) + return self._nnlf_and_penalty(x, args) + n_log_scale + + # return starting point for fit (shape arguments + loc + scale) + def _fitstart(self, data, args=None): + if args is None: + args = (1.0,)*self.numargs + loc, scale = self._fit_loc_scale_support(data, *args) + return args + (loc, scale) + + # Return the (possibly reduced) function to optimize in order to find MLE + # estimates for the .fit method + def _reduce_func(self, args, kwds): + # First of all, convert fshapes params to fnum: eg for stats.beta, + # shapes='a, b'. To fix `a`, can specify either `f1` or `fa`. + # Convert the latter into the former. + if self.shapes: + shapes = self.shapes.replace(',', ' ').split() + for j, s in enumerate(shapes): + val = kwds.pop('f' + s, None) or kwds.pop('fix_' + s, None) + if val is not None: + key = 'f%d' % j + if key in kwds: + raise ValueError("Duplicate entry for %s." % key) + else: + kwds[key] = val + + args = list(args) + Nargs = len(args) + fixedn = [] + names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale'] + x0 = [] + for n, key in enumerate(names): + if key in kwds: + fixedn.append(n) + args[n] = kwds.pop(key) + else: + x0.append(args[n]) + + if len(fixedn) == 0: + func = self._penalized_nnlf + restore = None + else: + if len(fixedn) == Nargs: + raise ValueError( + "All parameters fixed. There is nothing to optimize.") + + def restore(args, theta): + # Replace with theta for all numbers not in fixedn + # This allows the non-fixed values to vary, but + # we still call self.nnlf with all parameters. + i = 0 + for n in range(Nargs): + if n not in fixedn: + args[n] = theta[i] + i += 1 + return args + + def func(theta, x): + newtheta = restore(args[:], theta) + return self._penalized_nnlf(newtheta, x) + + return x0, func, restore, args + + def fit(self, data, *args, **kwds): + """ + Return MLEs for shape (if applicable), location, and scale + parameters from data. + + MLE stands for Maximum Likelihood Estimate. Starting estimates for + the fit are given by input arguments; for any arguments not provided + with starting estimates, ``self._fitstart(data)`` is called to generate + such. + + One can hold some parameters fixed to specific values by passing in + keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters) + and ``floc`` and ``fscale`` (for location and scale parameters, + respectively). + + Parameters + ---------- + data : array_like + Data to use in calculating the MLEs. + args : floats, optional + Starting value(s) for any shape-characterizing arguments (those not + provided will be determined by a call to ``_fitstart(data)``). + No default value. + kwds : floats, optional + Starting values for the location and scale parameters; no default. + Special keyword arguments are recognized as holding certain + parameters fixed: + + - f0...fn : hold respective shape parameters fixed. + Alternatively, shape parameters to fix can be specified by name. + For example, if ``self.shapes == "a, b"``, ``fa``and ``fix_a`` + are equivalent to ``f0``, and ``fb`` and ``fix_b`` are + equivalent to ``f1``. + + - floc : hold location parameter fixed to specified value. + + - fscale : hold scale parameter fixed to specified value. + + - optimizer : The optimizer to use. The optimizer must take ``func``, + and starting position as the first two arguments, + plus ``args`` (for extra arguments to pass to the + function to be optimized) and ``disp=0`` to suppress + output as keyword arguments. + + Returns + ------- + mle_tuple : tuple of floats + MLEs for any shape parameters (if applicable), followed by those + for location and scale. For most random variables, shape statistics + will be returned, but there are exceptions (e.g. ``norm``). + + Notes + ----- + This fit is computed by maximizing a log-likelihood function, with + penalty applied for samples outside of range of the distribution. The + returned answer is not guaranteed to be the globally optimal MLE, it + may only be locally optimal, or the optimization may fail altogether. + + Examples + -------- + + Generate some data to fit: draw random variates from the `beta` + distribution + + >>> from scipy.stats import beta + >>> a, b = 1., 2. + >>> x = beta.rvs(a, b, size=1000) + + Now we can fit all four parameters (``a``, ``b``, ``loc`` and ``scale``): + + >>> a1, b1, loc1, scale1 = beta.fit(x) + + We can also use some prior knowledge about the dataset: let's keep + ``loc`` and ``scale`` fixed: + + >>> a1, b1, loc1, scale1 = beta.fit(x, floc=0, fscale=1) + >>> loc1, scale1 + (0, 1) + + We can also keep shape parameters fixed by using ``f``-keywords. To + keep the zero-th shape parameter ``a`` equal 1, use ``f0=1`` or, + equivalently, ``fa=1``: + + >>> a1, b1, loc1, scale1 = beta.fit(x, fa=1, floc=0, fscale=1) + >>> a1 + 1 + + Not all distributions return estimates for the shape parameters. + ``norm`` for example just returns estimates for location and scale: + + >>> from scipy.stats import norm + >>> x = norm.rvs(a, b, size=1000, random_state=123) + >>> loc1, scale1 = norm.fit(x) + >>> loc1, scale1 + (0.92087172783841631, 2.0015750750324668) + """ + Narg = len(args) + if Narg > self.numargs: + raise TypeError("Too many input arguments.") + + start = [None]*2 + if (Narg < self.numargs) or not ('loc' in kwds and + 'scale' in kwds): + # get distribution specific starting locations + start = self._fitstart(data) + args += start[Narg:-2] + loc = kwds.pop('loc', start[-2]) + scale = kwds.pop('scale', start[-1]) + args += (loc, scale) + x0, func, restore, args = self._reduce_func(args, kwds) + + optimizer = kwds.pop('optimizer', optimize.fmin) + # convert string to function in scipy.optimize + if not callable(optimizer) and isinstance(optimizer, string_types): + if not optimizer.startswith('fmin_'): + optimizer = "fmin_"+optimizer + if optimizer == 'fmin_': + optimizer = 'fmin' + try: + optimizer = getattr(optimize, optimizer) + except AttributeError: + raise ValueError("%s is not a valid optimizer" % optimizer) + + # by now kwds must be empty, since everybody took what they needed + if kwds: + raise TypeError("Unknown arguments: %s." % kwds) + + vals = optimizer(func, x0, args=(ravel(data),), disp=0) + if restore is not None: + vals = restore(args, vals) + vals = tuple(vals) + return vals + + def _fit_loc_scale_support(self, data, *args): + """ + Estimate loc and scale parameters from data accounting for support. + + Parameters + ---------- + data : array_like + Data to fit. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + + Returns + ------- + Lhat : float + Estimated location parameter for the data. + Shat : float + Estimated scale parameter for the data. + + """ + data = np.asarray(data) + + # Estimate location and scale according to the method of moments. + loc_hat, scale_hat = self.fit_loc_scale(data, *args) + + # Compute the support according to the shape parameters. + self._argcheck(*args) + a, b = self.a, self.b + support_width = b - a + + # If the support is empty then return the moment-based estimates. + if support_width <= 0: + return loc_hat, scale_hat + + # Compute the proposed support according to the loc and scale + # estimates. + a_hat = loc_hat + a * scale_hat + b_hat = loc_hat + b * scale_hat + + # Use the moment-based estimates if they are compatible with the data. + data_a = np.min(data) + data_b = np.max(data) + if a_hat < data_a and data_b < b_hat: + return loc_hat, scale_hat + + # Otherwise find other estimates that are compatible with the data. + data_width = data_b - data_a + rel_margin = 0.1 + margin = data_width * rel_margin + + # For a finite interval, both the location and scale + # should have interesting values. + if support_width < np.inf: + loc_hat = (data_a - a) - margin + scale_hat = (data_width + 2 * margin) / support_width + return loc_hat, scale_hat + + # For a one-sided interval, use only an interesting location parameter. + if a > -np.inf: + return (data_a - a) - margin, 1 + elif b < np.inf: + return (data_b - b) + margin, 1 + else: + raise RuntimeError + + def fit_loc_scale(self, data, *args): + """ + Estimate loc and scale parameters from data using 1st and 2nd moments. + + Parameters + ---------- + data : array_like + Data to fit. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + + Returns + ------- + Lhat : float + Estimated location parameter for the data. + Shat : float + Estimated scale parameter for the data. + + """ + mu, mu2 = self.stats(*args, **{'moments': 'mv'}) + tmp = asarray(data) + muhat = tmp.mean() + mu2hat = tmp.var() + Shat = sqrt(mu2hat / mu2) + Lhat = muhat - Shat*mu + if not np.isfinite(Lhat): + Lhat = 0 + if not (np.isfinite(Shat) and (0 < Shat)): + Shat = 1 + return Lhat, Shat + + def _entropy(self, *args): + def integ(x): + val = self._pdf(x, *args) + return entr(val) + + # upper limit is often inf, so suppress warnings when integrating + olderr = np.seterr(over='ignore') + h = integrate.quad(integ, self.a, self.b)[0] + np.seterr(**olderr) + + if not np.isnan(h): + return h + else: + # try with different limits if integration problems + low, upp = self.ppf([1e-10, 1. - 1e-10], *args) + if np.isinf(self.b): + upper = upp + else: + upper = self.b + if np.isinf(self.a): + lower = low + else: + lower = self.a + return integrate.quad(integ, lower, upper)[0] + + def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None, + conditional=False, **kwds): + """Calculate expected value of a function with respect to the + distribution by numerical integration. + + The expected value of a function ``f(x)`` with respect to a + distribution ``dist`` is defined as:: + + ub + E[f(x)] = Integral(f(x) * dist.pdf(x)), + lb + + where ``ub`` and ``lb`` are arguments and ``x`` has the ``dist.pdf(x)`` + distribution. If the bounds ``lb`` and ``ub`` correspond to the + support of the distribution, e.g. ``[-inf, inf]`` in the default + case, then the integral is the unrestricted expectation of ``f(x)``. + Also, the function ``f(x)`` may be defined such that ``f(x)`` is ``0`` + outside a finite interval in which case the expectation is + calculated within the finite range ``[lb, ub]``. + + Parameters + ---------- + func : callable, optional + Function for which integral is calculated. Takes only one argument. + The default is the identity mapping f(x) = x. + args : tuple, optional + Shape parameters of the distribution. + loc : float, optional + Location parameter (default=0). + scale : float, optional + Scale parameter (default=1). + lb, ub : scalar, optional + Lower and upper bound for integration. Default is set to the + support of the distribution. + conditional : bool, optional + If True, the integral is corrected by the conditional probability + of the integration interval. The return value is the expectation + of the function, conditional on being in the given interval. + Default is False. + + Additional keyword arguments are passed to the integration routine. + + Returns + ------- + expect : float + The calculated expected value. + + Notes + ----- + The integration behavior of this function is inherited from + `scipy.integrate.quad`. Neither this function nor + `scipy.integrate.quad` can verify whether the integral exists or is + finite. For example ``cauchy(0).mean()`` returns ``np.nan`` and + ``cauchy(0).expect()`` returns ``0.0``. + + Examples + -------- + + To understand the effect of the bounds of integration consider + >>> from scipy.stats import expon + >>> expon(1).expect(lambda x: 1, lb=0.0, ub=2.0) + 0.6321205588285578 + + This is close to + + >>> expon(1).cdf(2.0) - expon(1).cdf(0.0) + 0.6321205588285577 + + If ``conditional=True`` + + >>> expon(1).expect(lambda x: 1, lb=0.0, ub=2.0, conditional=True) + 1.0000000000000002 + + The slight deviation from 1 is due to numerical integration. + """ + lockwds = {'loc': loc, + 'scale': scale} + self._argcheck(*args) + if func is None: + def fun(x, *args): + return x * self.pdf(x, *args, **lockwds) + else: + def fun(x, *args): + return func(x) * self.pdf(x, *args, **lockwds) + if lb is None: + lb = loc + self.a * scale + if ub is None: + ub = loc + self.b * scale + if conditional: + invfac = (self.sf(lb, *args, **lockwds) + - self.sf(ub, *args, **lockwds)) + else: + invfac = 1.0 + kwds['args'] = args + # Silence floating point warnings from integration. + olderr = np.seterr(all='ignore') + vals = integrate.quad(fun, lb, ub, **kwds)[0] / invfac + np.seterr(**olderr) + return vals + + +# Helpers for the discrete distributions +def _drv2_moment(self, n, *args): + """Non-central moment of discrete distribution.""" + def fun(x): + return np.power(x, n) * self._pmf(x, *args) + return _expect(fun, self.a, self.b, self.ppf(0.5, *args), self.inc) + + +def _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm + b = self.b + a = self.a + if isinf(b): # Be sure ending point is > q + b = int(max(100*q, 10)) + while 1: + if b >= self.b: + qb = 1.0 + break + qb = self._cdf(b, *args) + if (qb < q): + b += 10 + else: + break + else: + qb = 1.0 + if isinf(a): # be sure starting point < q + a = int(min(-100*q, -10)) + while 1: + if a <= self.a: + qb = 0.0 + break + qa = self._cdf(a, *args) + if (qa > q): + a -= 10 + else: + break + else: + qa = self._cdf(a, *args) + + while 1: + if (qa == q): + return a + if (qb == q): + return b + if b <= a+1: + if qa > q: + return a + else: + return b + c = int((a+b)/2.0) + qc = self._cdf(c, *args) + if (qc < q): + if a != c: + a = c + else: + raise RuntimeError('updating stopped, endless loop') + qa = qc + elif (qc > q): + if b != c: + b = c + else: + raise RuntimeError('updating stopped, endless loop') + qb = qc + else: + return c + + +def entropy(pk, qk=None, base=None): + """Calculate the entropy of a distribution for given probability values. + + If only probabilities `pk` are given, the entropy is calculated as + ``S = -sum(pk * log(pk), axis=0)``. + + If `qk` is not None, then compute the Kullback-Leibler divergence + ``S = sum(pk * log(pk / qk), axis=0)``. + + This routine will normalize `pk` and `qk` if they don't sum to 1. + + Parameters + ---------- + pk : sequence + Defines the (discrete) distribution. ``pk[i]`` is the (possibly + unnormalized) probability of event ``i``. + qk : sequence, optional + Sequence against which the relative entropy is computed. Should be in + the same format as `pk`. + base : float, optional + The logarithmic base to use, defaults to ``e`` (natural logarithm). + + Returns + ------- + S : float + The calculated entropy. + + """ + pk = asarray(pk) + pk = 1.0*pk / np.sum(pk, axis=0) + if qk is None: + vec = entr(pk) + else: + qk = asarray(qk) + if len(qk) != len(pk): + raise ValueError("qk and pk must have same length.") + qk = 1.0*qk / np.sum(qk, axis=0) + vec = rel_entr(pk, qk) + S = np.sum(vec, axis=0) + if base is not None: + S /= log(base) + return S + + +# Must over-ride one of _pmf or _cdf or pass in +# x_k, p(x_k) lists in initialization + +class rv_discrete(rv_generic): + """ + A generic discrete random variable class meant for subclassing. + + `rv_discrete` is a base class to construct specific distribution classes + and instances for discrete random variables. It can also be used + to construct an arbitrary distribution defined by a list of support + points and corresponding probabilities. + + Parameters + ---------- + a : float, optional + Lower bound of the support of the distribution, default: 0 + b : float, optional + Upper bound of the support of the distribution, default: plus infinity + moment_tol : float, optional + The tolerance for the generic calculation of moments. + values : tuple of two array_like, optional + ``(xk, pk)`` where ``xk`` are integers with non-zero + probabilities ``pk`` with ``sum(pk) = 1``. + inc : integer, optional + Increment for the support of the distribution. + Default is 1. (other values have not been tested) + badvalue : float, optional + The value in a result arrays that indicates a value that for which + some argument restriction is violated, default is np.nan. + name : str, optional + The name of the instance. This string is used to construct the default + example for distributions. + longname : str, optional + This string is used as part of the first line of the docstring returned + when a subclass has no docstring of its own. Note: `longname` exists + for backwards compatibility, do not use for new subclasses. + shapes : str, optional + The shape of the distribution. For example "m, n" for a distribution + that takes two integers as the two shape arguments for all its methods + If not provided, shape parameters will be inferred from + the signatures of the private methods, ``_pmf`` and ``_cdf`` of + the instance. + extradoc : str, optional + This string is used as the last part of the docstring returned when a + subclass has no docstring of its own. Note: `extradoc` exists for + backwards compatibility, do not use for new subclasses. + seed : None or int or ``numpy.random.RandomState`` instance, optional + This parameter defines the RandomState object to use for drawing + random variates. + If None, the global np.random state is used. + If integer, it is used to seed the local RandomState instance. + Default is None. + + Methods + ------- + rvs + pmf + logpmf + cdf + logcdf + sf + logsf + ppf + isf + moment + stats + entropy + expect + median + mean + std + var + interval + __call__ + + + Notes + ----- + + This class is similar to `rv_continuous`. Whether a shape parameter is + valid is decided by an ``_argcheck`` method (which defaults to checking + that its arguments are strictly positive.) + The main differences are: + + - the support of the distribution is a set of integers + - instead of the probability density function, ``pdf`` (and the + corresponding private ``_pdf``), this class defines the + *probability mass function*, `pmf` (and the corresponding + private ``_pmf``.) + - scale parameter is not defined. + + To create a new discrete distribution, we would do the following: + + >>> from scipy.stats import rv_discrete + >>> class poisson_gen(rv_discrete): + ... "Poisson distribution" + ... def _pmf(self, k, mu): + ... return exp(-mu) * mu**k / factorial(k) + + and create an instance:: + + >>> poisson = poisson_gen(name="poisson") + + Note that above we defined the Poisson distribution in the standard form. + Shifting the distribution can be done by providing the ``loc`` parameter + to the methods of the instance. For example, ``poisson.pmf(x, mu, loc)`` + delegates the work to ``poisson._pmf(x-loc, mu)``. + + **Discrete distributions from a list of probabilities** + + Alternatively, you can construct an arbitrary discrete rv defined + on a finite set of values ``xk`` with ``Prob{X=xk} = pk`` by using the + ``values`` keyword argument to the `rv_discrete` constructor. + + Examples + -------- + + Custom made discrete distribution: + + >>> from scipy import stats + >>> xk = np.arange(7) + >>> pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.0, 0.2) + >>> custm = stats.rv_discrete(name='custm', values=(xk, pk)) + >>> + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots(1, 1) + >>> ax.plot(xk, custm.pmf(xk), 'ro', ms=12, mec='r') + >>> ax.vlines(xk, 0, custm.pmf(xk), colors='r', lw=4) + >>> plt.show() + + Random number generation: + + >>> R = custm.rvs(size=100) + + """ + def __new__(cls, a=0, b=inf, name=None, badvalue=None, + moment_tol=1e-8, values=None, inc=1, longname=None, + shapes=None, extradoc=None, seed=None): + + if values is not None: + # dispatch to a subclass + return super(rv_discrete, cls).__new__(rv_sample) + else: + # business as usual + return super(rv_discrete, cls).__new__(cls) + + def __init__(self, a=0, b=inf, name=None, badvalue=None, + moment_tol=1e-8, values=None, inc=1, longname=None, + shapes=None, extradoc=None, seed=None): + + super(rv_discrete, self).__init__(seed) + + # cf generic freeze + self._ctor_param = dict( + a=a, b=b, name=name, badvalue=badvalue, + moment_tol=moment_tol, values=values, inc=inc, + longname=longname, shapes=shapes, extradoc=extradoc, seed=seed) + + if badvalue is None: + badvalue = nan + self.badvalue = badvalue + self.a = a + self.b = b + self.moment_tol = moment_tol + self.inc = inc + self._cdfvec = vectorize(self._cdf_single, otypes='d') + self.vecentropy = vectorize(self._entropy) + self.shapes = shapes + + if values is not None: + raise ValueError("rv_discrete.__init__(..., values != None, ...)") + + self._construct_argparser(meths_to_inspect=[self._pmf, self._cdf], + locscale_in='loc=0', + # scale=1 for discrete RVs + locscale_out='loc, 1') + + # nin correction needs to be after we know numargs + # correct nin for generic moment vectorization + _vec_generic_moment = vectorize(_drv2_moment, otypes='d') + _vec_generic_moment.nin = self.numargs + 2 + self.generic_moment = instancemethod(_vec_generic_moment, + self, rv_discrete) + + # correct nin for ppf vectorization + _vppf = vectorize(_drv2_ppfsingle, otypes='d') + _vppf.nin = self.numargs + 2 + self._ppfvec = instancemethod(_vppf, + self, rv_discrete) + + # now that self.numargs is defined, we can adjust nin + self._cdfvec.nin = self.numargs + 1 + + self._construct_docstrings(name, longname, extradoc) + + def _construct_docstrings(self, name, longname, extradoc): + if name is None: + name = 'Distribution' + self.name = name + self.extradoc = extradoc + + # generate docstring for subclass instances + if longname is None: + if name[0] in ['aeiouAEIOU']: + hstr = "An " + else: + hstr = "A " + longname = hstr + name + + if sys.flags.optimize < 2: + # Skip adding docstrings if interpreter is run with -OO + if self.__doc__ is None: + self._construct_default_doc(longname=longname, + extradoc=extradoc, + docdict=docdict_discrete, + discrete='discrete') + else: + dct = dict(distdiscrete) + self._construct_doc(docdict_discrete, dct.get(self.name)) + + # discrete RV do not have the scale parameter, remove it + self.__doc__ = self.__doc__.replace( + '\n scale : array_like, ' + 'optional\n scale parameter (default=1)', '') + + def _updated_ctor_param(self): + """ Return the current version of _ctor_param, possibly updated by user. + + Used by freezing and pickling. + Keep this in sync with the signature of __init__. + """ + dct = self._ctor_param.copy() + dct['a'] = self.a + dct['b'] = self.b + dct['badvalue'] = self.badvalue + dct['moment_tol'] = self.moment_tol + dct['inc'] = self.inc + dct['name'] = self.name + dct['shapes'] = self.shapes + dct['extradoc'] = self.extradoc + return dct + + def _nonzero(self, k, *args): + return floor(k) == k + + def _pmf(self, k, *args): + return self._cdf(k, *args) - self._cdf(k-1, *args) + + def _logpmf(self, k, *args): + return log(self._pmf(k, *args)) + + def _cdf_single(self, k, *args): + m = arange(int(self.a), k+1) + return np.sum(self._pmf(m, *args), axis=0) + + def _cdf(self, x, *args): + k = floor(x) + return self._cdfvec(k, *args) + + # generic _logcdf, _sf, _logsf, _ppf, _isf, _rvs defined in rv_generic + + def rvs(self, *args, **kwargs): + """ + Random variates of given type. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + size : int or tuple of ints, optional + Defining number of random variates (Default is 1). Note that `size` + has to be given as keyword, not as positional argument. + random_state : None or int or ``np.random.RandomState`` instance, optional + If int or RandomState, use it for drawing the random variates. + If None, rely on ``self.random_state``. + Default is None. + + Returns + ------- + rvs : ndarray or scalar + Random variates of given `size`. + + """ + kwargs['discrete'] = True + return super(rv_discrete, self).rvs(*args, **kwargs) + + def pmf(self, k, *args, **kwds): + """ + Probability mass function at k of the given RV. + + Parameters + ---------- + k : array_like + Quantiles. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + Location parameter (default=0). + + Returns + ------- + pmf : array_like + Probability mass function evaluated at k + + """ + args, loc, _ = self._parse_args(*args, **kwds) + k, loc = map(asarray, (k, loc)) + args = tuple(map(asarray, args)) + k = asarray((k-loc)) + cond0 = self._argcheck(*args) + cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k, *args) + cond = cond0 & cond1 + output = zeros(shape(cond), 'd') + place(output, (1-cond0) + np.isnan(k), self.badvalue) + if np.any(cond): + goodargs = argsreduce(cond, *((k,)+args)) + place(output, cond, np.clip(self._pmf(*goodargs), 0, 1)) + if output.ndim == 0: + return output[()] + return output + + def logpmf(self, k, *args, **kwds): + """ + Log of the probability mass function at k of the given RV. + + Parameters + ---------- + k : array_like + Quantiles. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter. Default is 0. + + Returns + ------- + logpmf : array_like + Log of the probability mass function evaluated at k. + + """ + args, loc, _ = self._parse_args(*args, **kwds) + k, loc = map(asarray, (k, loc)) + args = tuple(map(asarray, args)) + k = asarray((k-loc)) + cond0 = self._argcheck(*args) + cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k, *args) + cond = cond0 & cond1 + output = empty(shape(cond), 'd') + output.fill(NINF) + place(output, (1-cond0) + np.isnan(k), self.badvalue) + if np.any(cond): + goodargs = argsreduce(cond, *((k,)+args)) + place(output, cond, self._logpmf(*goodargs)) + if output.ndim == 0: + return output[()] + return output + + def cdf(self, k, *args, **kwds): + """ + Cumulative distribution function of the given RV. + + Parameters + ---------- + k : array_like, int + Quantiles. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + + Returns + ------- + cdf : ndarray + Cumulative distribution function evaluated at `k`. + + """ + args, loc, _ = self._parse_args(*args, **kwds) + k, loc = map(asarray, (k, loc)) + args = tuple(map(asarray, args)) + k = asarray((k-loc)) + cond0 = self._argcheck(*args) + cond1 = (k >= self.a) & (k < self.b) + cond2 = (k >= self.b) + cond = cond0 & cond1 + output = zeros(shape(cond), 'd') + place(output, (1-cond0) + np.isnan(k), self.badvalue) + place(output, cond2*(cond0 == cond0), 1.0) + + if np.any(cond): + goodargs = argsreduce(cond, *((k,)+args)) + place(output, cond, np.clip(self._cdf(*goodargs), 0, 1)) + if output.ndim == 0: + return output[()] + return output + + def logcdf(self, k, *args, **kwds): + """ + Log of the cumulative distribution function at k of the given RV. + + Parameters + ---------- + k : array_like, int + Quantiles. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + + Returns + ------- + logcdf : array_like + Log of the cumulative distribution function evaluated at k. + + """ + args, loc, _ = self._parse_args(*args, **kwds) + k, loc = map(asarray, (k, loc)) + args = tuple(map(asarray, args)) + k = asarray((k-loc)) + cond0 = self._argcheck(*args) + cond1 = (k >= self.a) & (k < self.b) + cond2 = (k >= self.b) + cond = cond0 & cond1 + output = empty(shape(cond), 'd') + output.fill(NINF) + place(output, (1-cond0) + np.isnan(k), self.badvalue) + place(output, cond2*(cond0 == cond0), 0.0) + + if np.any(cond): + goodargs = argsreduce(cond, *((k,)+args)) + place(output, cond, self._logcdf(*goodargs)) + if output.ndim == 0: + return output[()] + return output + + def sf(self, k, *args, **kwds): + """ + Survival function (1 - `cdf`) at k of the given RV. + + Parameters + ---------- + k : array_like + Quantiles. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + + Returns + ------- + sf : array_like + Survival function evaluated at k. + + """ + args, loc, _ = self._parse_args(*args, **kwds) + k, loc = map(asarray, (k, loc)) + args = tuple(map(asarray, args)) + k = asarray(k-loc) + cond0 = self._argcheck(*args) + cond1 = (k >= self.a) & (k < self.b) + cond2 = (k < self.a) & cond0 + cond = cond0 & cond1 + output = zeros(shape(cond), 'd') + place(output, (1-cond0) + np.isnan(k), self.badvalue) + place(output, cond2, 1.0) + if np.any(cond): + goodargs = argsreduce(cond, *((k,)+args)) + place(output, cond, np.clip(self._sf(*goodargs), 0, 1)) + if output.ndim == 0: + return output[()] + return output + + def logsf(self, k, *args, **kwds): + """ + Log of the survival function of the given RV. + + Returns the log of the "survival function," defined as 1 - `cdf`, + evaluated at `k`. + + Parameters + ---------- + k : array_like + Quantiles. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + + Returns + ------- + logsf : ndarray + Log of the survival function evaluated at `k`. + + """ + args, loc, _ = self._parse_args(*args, **kwds) + k, loc = map(asarray, (k, loc)) + args = tuple(map(asarray, args)) + k = asarray(k-loc) + cond0 = self._argcheck(*args) + cond1 = (k >= self.a) & (k < self.b) + cond2 = (k < self.a) & cond0 + cond = cond0 & cond1 + output = empty(shape(cond), 'd') + output.fill(NINF) + place(output, (1-cond0) + np.isnan(k), self.badvalue) + place(output, cond2, 0.0) + if np.any(cond): + goodargs = argsreduce(cond, *((k,)+args)) + place(output, cond, self._logsf(*goodargs)) + if output.ndim == 0: + return output[()] + return output + + def ppf(self, q, *args, **kwds): + """ + Percent point function (inverse of `cdf`) at q of the given RV. + + Parameters + ---------- + q : array_like + Lower tail probability. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + + Returns + ------- + k : array_like + Quantile corresponding to the lower tail probability, q. + + """ + args, loc, _ = self._parse_args(*args, **kwds) + q, loc = map(asarray, (q, loc)) + args = tuple(map(asarray, args)) + cond0 = self._argcheck(*args) & (loc == loc) + cond1 = (q > 0) & (q < 1) + cond2 = (q == 1) & cond0 + cond = cond0 & cond1 + output = valarray(shape(cond), value=self.badvalue, typecode='d') + # output type 'd' to handle nin and inf + place(output, (q == 0)*(cond == cond), self.a-1) + place(output, cond2, self.b) + if np.any(cond): + goodargs = argsreduce(cond, *((q,)+args+(loc,))) + loc, goodargs = goodargs[-1], goodargs[:-1] + place(output, cond, self._ppf(*goodargs) + loc) + + if output.ndim == 0: + return output[()] + return output + + def isf(self, q, *args, **kwds): + """ + Inverse survival function (inverse of `sf`) at q of the given RV. + + Parameters + ---------- + q : array_like + Upper tail probability. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + + Returns + ------- + k : ndarray or scalar + Quantile corresponding to the upper tail probability, q. + + """ + args, loc, _ = self._parse_args(*args, **kwds) + q, loc = map(asarray, (q, loc)) + args = tuple(map(asarray, args)) + cond0 = self._argcheck(*args) & (loc == loc) + cond1 = (q > 0) & (q < 1) + cond2 = (q == 1) & cond0 + cond = cond0 & cond1 + + # same problem as with ppf; copied from ppf and changed + output = valarray(shape(cond), value=self.badvalue, typecode='d') + # output type 'd' to handle nin and inf + place(output, (q == 0)*(cond == cond), self.b) + place(output, cond2, self.a-1) + + # call place only if at least 1 valid argument + if np.any(cond): + goodargs = argsreduce(cond, *((q,)+args+(loc,))) + loc, goodargs = goodargs[-1], goodargs[:-1] + # PB same as ticket 766 + place(output, cond, self._isf(*goodargs) + loc) + + if output.ndim == 0: + return output[()] + return output + + def _entropy(self, *args): + if hasattr(self, 'pk'): + return entropy(self.pk) + else: + return _expect(lambda x: entr(self.pmf(x, *args)), + self.a, self.b, self.ppf(0.5, *args), self.inc) + + def expect(self, func=None, args=(), loc=0, lb=None, ub=None, + conditional=False, maxcount=1000, tolerance=1e-10, chunksize=32): + """ + Calculate expected value of a function with respect to the distribution + for discrete distribution by numerical summation. + + Parameters + ---------- + func : callable, optional + Function for which the expectation value is calculated. + Takes only one argument. + The default is the identity mapping f(k) = k. + args : tuple, optional + Shape parameters of the distribution. + loc : float, optional + Location parameter. + Default is 0. + lb, ub : int, optional + Lower and upper bound for the summation, default is set to the + support of the distribution, inclusive (``ul <= k <= ub``). + conditional : bool, optional + If true then the expectation is corrected by the conditional + probability of the summation interval. The return value is the + expectation of the function, `func`, conditional on being in + the given interval (k such that ``ul <= k <= ub``). + Default is False. + maxcount : int, optional + Maximal number of terms to evaluate (to avoid an endless loop for + an infinite sum). Default is 1000. + tolerance : float, optional + Absolute tolerance for the summation. Default is 1e-10. + chunksize : int, optional + Iterate over the support of a distributions in chunks of this size. + Default is 32. + + Returns + ------- + expect : float + Expected value. + + Notes + ----- + For heavy-tailed distributions, the expected value may or may not exist, + depending on the function, `func`. If it does exist, but the sum converges + slowly, the accuracy of the result may be rather low. For instance, for + ``zipf(4)``, accuracy for mean, variance in example is only 1e-5. + increasing `maxcount` and/or `chunksize` may improve the result, but may + also make zipf very slow. + + The function is not vectorized. + + """ + if func is None: + def fun(x): + # loc and args from outer scope + return (x+loc)*self._pmf(x, *args) + else: + def fun(x): + # loc and args from outer scope + return func(x+loc)*self._pmf(x, *args) + # used pmf because _pmf does not check support in randint and there + # might be problems(?) with correct self.a, self.b at this stage maybe + # not anymore, seems to work now with _pmf + + self._argcheck(*args) # (re)generate scalar self.a and self.b + if lb is None: + lb = self.a + else: + lb = lb - loc # convert bound for standardized distribution + if ub is None: + ub = self.b + else: + ub = ub - loc # convert bound for standardized distribution + if conditional: + invfac = self.sf(lb-1, *args) - self.sf(ub, *args) + else: + invfac = 1.0 + + # iterate over the support, starting from the median + x0 = self.ppf(0.5, *args) + res = _expect(fun, lb, ub, x0, self.inc, maxcount, tolerance, chunksize) + return res / invfac + + +def _expect(fun, lb, ub, x0, inc, maxcount=1000, tolerance=1e-10, + chunksize=32): + """Helper for computing the expectation value of `fun`.""" + + # short-circuit if the support size is small enough + if (ub - lb) <= chunksize: + supp = np.arange(lb, ub+1, inc) + vals = fun(supp) + return np.sum(vals) + + # otherwise, iterate starting from x0 + if x0 < lb: + x0 = lb + if x0 > ub: + x0 = ub + + count, tot = 0, 0. + # iterate over [x0, ub] inclusive + for x in _iter_chunked(x0, ub+1, chunksize=chunksize, inc=inc): + count += x.size + delta = np.sum(fun(x)) + tot += delta + if abs(delta) < tolerance * x.size: + break + if count > maxcount: + warnings.warn('expect(): sum did not converge', RuntimeWarning) + return tot + + # iterate over [lb, x0) + for x in _iter_chunked(x0-1, lb-1, chunksize=chunksize, inc=-inc): + count += x.size + delta = np.sum(fun(x)) + tot += delta + if abs(delta) < tolerance * x.size: + break + if count > maxcount: + warnings.warn('expect(): sum did not converge', RuntimeWarning) + break + + return tot + + +def _iter_chunked(x0, x1, chunksize=4, inc=1): + """Iterate from x0 to x1 in chunks of chunksize and steps inc. + + x0 must be finite, x1 need not be. In the latter case, the iterator is + infinite. + Handles both x0 < x1 and x0 > x1. In the latter case, iterates downwards + (make sure to set inc < 0.) + + >>> [x for x in _iter_chunked(2, 5, inc=2)] + [array([2, 4])] + >>> [x for x in _iter_chunked(2, 11, inc=2)] + [array([2, 4, 6, 8]), array([10])] + >>> [x for x in _iter_chunked(2, -5, inc=-2)] + [array([ 2, 0, -2, -4])] + >>> [x for x in _iter_chunked(2, -9, inc=-2)] + [array([ 2, 0, -2, -4]), array([-6, -8])] + + """ + if inc == 0: + raise ValueError('Cannot increment by zero.') + if chunksize <= 0: + raise ValueError('Chunk size must be positive; got %s.' % chunksize) + + s = 1 if inc > 0 else -1 + stepsize = abs(chunksize * inc) + + x = x0 + while (x - x1) * inc < 0: + delta = min(stepsize, abs(x - x1)) + step = delta * s + supp = np.arange(x, x + step, inc) + x += step + yield supp + + +class rv_sample(rv_discrete): + """A 'sample' discrete distribution defined by the support and values. + + The ctor ignores most of the arguments, only needs the `values` argument. + """ + def __init__(self, a=0, b=inf, name=None, badvalue=None, + moment_tol=1e-8, values=None, inc=1, longname=None, + shapes=None, extradoc=None, seed=None): + + super(rv_discrete, self).__init__(seed) + + if values is None: + raise ValueError("rv_sample.__init__(..., values=None,...)") + + # cf generic freeze + self._ctor_param = dict( + a=a, b=b, name=name, badvalue=badvalue, + moment_tol=moment_tol, values=values, inc=inc, + longname=longname, shapes=shapes, extradoc=extradoc, seed=seed) + + if badvalue is None: + badvalue = nan + self.badvalue = badvalue + self.moment_tol = moment_tol + self.inc = inc + self.shapes = shapes + self.vecentropy = self._entropy + + xk, pk = values + + if len(xk) != len(pk): + raise ValueError("xk and pk need to have the same length.") + if not np.allclose(np.sum(pk), 1): + raise ValueError("The sum of provided pk is not 1.") + + indx = np.argsort(np.ravel(xk)) + self.xk = np.take(np.ravel(xk), indx, 0) + self.pk = np.take(np.ravel(pk), indx, 0) + self.a = self.xk[0] + self.b = self.xk[-1] + self.qvals = np.cumsum(self.pk, axis=0) + + self.shapes = ' ' # bypass inspection + self._construct_argparser(meths_to_inspect=[self._pmf], + locscale_in='loc=0', + # scale=1 for discrete RVs + locscale_out='loc, 1') + + self._construct_docstrings(name, longname, extradoc) + + def _pmf(self, x): + return np.select([x == k for k in self.xk], + [np.broadcast_arrays(p, x)[0] for p in self.pk], 0) + + def _cdf(self, x): + xx, xxk = np.broadcast_arrays(x[:, None], self.xk) + indx = np.argmax(xxk > xx, axis=-1) - 1 + return self.qvals[indx] + + def _ppf(self, q): + qq, sqq = np.broadcast_arrays(q[..., None], self.qvals) + indx = argmax(sqq >= qq, axis=-1) + return self.xk[indx] + + def _rvs(self): + # Need to define it explicitly, otherwise .rvs() with size=None + # fails due to explicit broadcasting in _ppf + U = self._random_state.random_sample(self._size) + if self._size is None: + U = np.array(U, ndmin=1) + Y = self._ppf(U)[0] + else: + Y = self._ppf(U) + return Y + + def _entropy(self): + return entropy(self.pk) + + def generic_moment(self, n): + n = asarray(n) + return np.sum(self.xk**n[np.newaxis, ...] * self.pk, axis=0) + + +def get_distribution_names(namespace_pairs, rv_base_class): + """ + Collect names of statistical distributions and their generators. + + Parameters + ---------- + namespace_pairs : sequence + A snapshot of (name, value) pairs in the namespace of a module. + rv_base_class : class + The base class of random variable generator classes in a module. + + Returns + ------- + distn_names : list of strings + Names of the statistical distributions. + distn_gen_names : list of strings + Names of the generators of the statistical distributions. + Note that these are not simply the names of the statistical + distributions, with a _gen suffix added. + + """ + distn_names = [] + distn_gen_names = [] + for name, value in namespace_pairs: + if name.startswith('_'): + continue + if name.endswith('_gen') and issubclass(value, rv_base_class): + distn_gen_names.append(name) + if isinstance(value, rv_base_class): + distn_names.append(name) + return distn_names, distn_gen_names diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/_distn_infrastructure.pyc b/project/venv/lib/python2.7/site-packages/scipy/stats/_distn_infrastructure.pyc new file mode 100644 index 0000000..1b6d120 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/stats/_distn_infrastructure.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/_distr_params.py b/project/venv/lib/python2.7/site-packages/scipy/stats/_distr_params.py new file mode 100644 index 0000000..2018c9b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/_distr_params.py @@ -0,0 +1,130 @@ +""" +Sane parameters for stats.distributions. +""" + +distcont = [ + ['alpha', (3.5704770516650459,)], + ['anglit', ()], + ['arcsine', ()], + ['argus', (1.0,)], + ['beta', (2.3098496451481823, 0.62687954300963677)], + ['betaprime', (5, 6)], + ['bradford', (0.29891359763170633,)], + ['burr', (10.5, 4.3)], + ['burr12', (10, 4)], + ['cauchy', ()], + ['chi', (78,)], + ['chi2', (55,)], + ['cosine', ()], + ['crystalball', (2.0, 3.0)], + ['dgamma', (1.1023326088288166,)], + ['dweibull', (2.0685080649914673,)], + ['erlang', (10,)], + ['expon', ()], + ['exponnorm', (1.5,)], + ['exponpow', (2.697119160358469,)], + ['exponweib', (2.8923945291034436, 1.9505288745913174)], + ['f', (29, 18)], + ['fatiguelife', (29,)], # correction numargs = 1 + ['fisk', (3.0857548622253179,)], + ['foldcauchy', (4.7164673455831894,)], + ['foldnorm', (1.9521253373555869,)], + ['frechet_l', (3.6279911255583239,)], + ['frechet_r', (1.8928171603534227,)], + ['gamma', (1.9932305483800778,)], + ['gausshyper', (13.763771604130699, 3.1189636648681431, + 2.5145980350183019, 5.1811649903971615)], # veryslow + ['genexpon', (9.1325976465418908, 16.231956600590632, 3.2819552690843983)], + ['genextreme', (-0.1,)], + ['gengamma', (4.4162385429431925, 3.1193091679242761)], + ['gengamma', (4.4162385429431925, -3.1193091679242761)], + ['genhalflogistic', (0.77274727809929322,)], + ['genlogistic', (0.41192440799679475,)], + ['gennorm', (1.2988442399460265,)], + ['halfgennorm', (0.6748054997000371,)], + ['genpareto', (0.1,)], # use case with finite moments + ['gilbrat', ()], + ['gompertz', (0.94743713075105251,)], + ['gumbel_l', ()], + ['gumbel_r', ()], + ['halfcauchy', ()], + ['halflogistic', ()], + ['halfnorm', ()], + ['hypsecant', ()], + ['invgamma', (4.0668996136993067,)], + ['invgauss', (0.14546264555347513,)], + ['invweibull', (10.58,)], + ['johnsonsb', (4.3172675099141058, 3.1837781130785063)], + ['johnsonsu', (2.554395574161155, 2.2482281679651965)], + ['kappa4', (0.0, 0.0)], + ['kappa4', (-0.1, 0.1)], + ['kappa4', (0.0, 0.1)], + ['kappa4', (0.1, 0.0)], + ['kappa3', (1.0,)], + ['ksone', (1000,)], # replace 22 by 100 to avoid failing range, ticket 956 + ['kstwobign', ()], + ['laplace', ()], + ['levy', ()], + ['levy_l', ()], + ['levy_stable', (1.8, -0.5)], + ['loggamma', (0.41411931826052117,)], + ['logistic', ()], + ['loglaplace', (3.2505926592051435,)], + ['lognorm', (0.95368226960575331,)], + ['lomax', (1.8771398388773268,)], + ['maxwell', ()], + ['mielke', (10.4, 3.6)], + ['moyal', ()], + ['nakagami', (4.9673794866666237,)], + ['ncf', (27, 27, 0.41578441799226107)], + ['nct', (14, 0.24045031331198066)], + ['ncx2', (21, 1.0560465975116415)], + ['norm', ()], + ['norminvgauss', (1., 0.5)], + ['pareto', (2.621716532144454,)], + ['pearson3', (0.1,)], + ['powerlaw', (1.6591133289905851,)], + ['powerlognorm', (2.1413923530064087, 0.44639540782048337)], + ['powernorm', (4.4453652254590779,)], + ['rayleigh', ()], + ['rdist', (0.9,)], # feels also slow + ['recipinvgauss', (0.63004267809369119,)], + ['reciprocal', (0.0062309367010521255, 1.0062309367010522)], + ['rice', (0.7749725210111873,)], + ['semicircular', ()], + ['skewnorm', (4.0,)], + ['t', (2.7433514990818093,)], + ['trapz', (0.2, 0.8)], + ['triang', (0.15785029824528218,)], + ['truncexpon', (4.6907725456810478,)], + ['truncnorm', (-1.0978730080013919, 2.7306754109031979)], + ['truncnorm', (0.1, 2.)], + ['tukeylambda', (3.1321477856738267,)], + ['uniform', ()], + ['vonmises', (3.9939042581071398,)], + ['vonmises_line', (3.9939042581071398,)], + ['wald', ()], + ['weibull_max', (2.8687961709100187,)], + ['weibull_min', (1.7866166930421596,)], + ['wrapcauchy', (0.031071279018614728,)]] + + +distdiscrete = [ + ['bernoulli',(0.3,)], + ['binom', (5, 0.4)], + ['boltzmann',(1.4, 19)], + ['dlaplace', (0.8,)], # 0.5 + ['geom', (0.5,)], + ['hypergeom',(30, 12, 6)], + ['hypergeom',(21,3,12)], # numpy.random (3,18,12) numpy ticket:921 + ['hypergeom',(21,18,11)], # numpy.random (18,3,11) numpy ticket:921 + ['logser', (0.6,)], # re-enabled, numpy ticket:921 + ['nbinom', (5, 0.5)], + ['nbinom', (0.4, 0.4)], # from tickets: 583 + ['planck', (0.51,)], # 4.1 + ['poisson', (0.6,)], + ['randint', (7, 31)], + ['skellam', (15, 8)], + ['zipf', (6.5,)], + ['yulesimon',(11.0,)] +] diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/_distr_params.pyc b/project/venv/lib/python2.7/site-packages/scipy/stats/_distr_params.pyc new file mode 100644 index 0000000..290c694 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/stats/_distr_params.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/_multivariate.py b/project/venv/lib/python2.7/site-packages/scipy/stats/_multivariate.py new file mode 100644 index 0000000..323ec11 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/_multivariate.py @@ -0,0 +1,3827 @@ +# +# Author: Joris Vankerschaver 2013 +# +from __future__ import division, print_function, absolute_import + +import math +import numpy as np +from numpy import asarray_chkfinite, asarray +import scipy.linalg +from scipy.misc import doccer +from scipy.special import gammaln, psi, multigammaln, xlogy, entr +from scipy._lib._util import check_random_state +from scipy.linalg.blas import drot +from scipy.linalg.misc import LinAlgError +from scipy.linalg.lapack import get_lapack_funcs + +from ._discrete_distns import binom +from . import mvn + +__all__ = ['multivariate_normal', + 'matrix_normal', + 'dirichlet', + 'wishart', + 'invwishart', + 'multinomial', + 'special_ortho_group', + 'ortho_group', + 'random_correlation', + 'unitary_group'] + +_LOG_2PI = np.log(2 * np.pi) +_LOG_2 = np.log(2) +_LOG_PI = np.log(np.pi) + + +_doc_random_state = """\ +random_state : None or int or np.random.RandomState instance, optional + If int or RandomState, use it for drawing the random variates. + If None (or np.random), the global np.random state is used. + Default is None. +""" + + +def _squeeze_output(out): + """ + Remove single-dimensional entries from array and convert to scalar, + if necessary. + + """ + out = out.squeeze() + if out.ndim == 0: + out = out[()] + return out + + +def _eigvalsh_to_eps(spectrum, cond=None, rcond=None): + """ + Determine which eigenvalues are "small" given the spectrum. + + This is for compatibility across various linear algebra functions + that should agree about whether or not a Hermitian matrix is numerically + singular and what is its numerical matrix rank. + This is designed to be compatible with scipy.linalg.pinvh. + + Parameters + ---------- + spectrum : 1d ndarray + Array of eigenvalues of a Hermitian matrix. + cond, rcond : float, optional + Cutoff for small eigenvalues. + Singular values smaller than rcond * largest_eigenvalue are + considered zero. + If None or -1, suitable machine precision is used. + + Returns + ------- + eps : float + Magnitude cutoff for numerical negligibility. + + """ + if rcond is not None: + cond = rcond + if cond in [None, -1]: + t = spectrum.dtype.char.lower() + factor = {'f': 1E3, 'd': 1E6} + cond = factor[t] * np.finfo(t).eps + eps = cond * np.max(abs(spectrum)) + return eps + + +def _pinv_1d(v, eps=1e-5): + """ + A helper function for computing the pseudoinverse. + + Parameters + ---------- + v : iterable of numbers + This may be thought of as a vector of eigenvalues or singular values. + eps : float + Values with magnitude no greater than eps are considered negligible. + + Returns + ------- + v_pinv : 1d float ndarray + A vector of pseudo-inverted numbers. + + """ + return np.array([0 if abs(x) <= eps else 1/x for x in v], dtype=float) + + +class _PSD(object): + """ + Compute coordinated functions of a symmetric positive semidefinite matrix. + + This class addresses two issues. Firstly it allows the pseudoinverse, + the logarithm of the pseudo-determinant, and the rank of the matrix + to be computed using one call to eigh instead of three. + Secondly it allows these functions to be computed in a way + that gives mutually compatible results. + All of the functions are computed with a common understanding as to + which of the eigenvalues are to be considered negligibly small. + The functions are designed to coordinate with scipy.linalg.pinvh() + but not necessarily with np.linalg.det() or with np.linalg.matrix_rank(). + + Parameters + ---------- + M : array_like + Symmetric positive semidefinite matrix (2-D). + cond, rcond : float, optional + Cutoff for small eigenvalues. + Singular values smaller than rcond * largest_eigenvalue are + considered zero. + If None or -1, suitable machine precision is used. + lower : bool, optional + Whether the pertinent array data is taken from the lower + or upper triangle of M. (Default: lower) + check_finite : bool, optional + Whether to check that the input matrices contain only finite + numbers. Disabling may give a performance gain, but may result + in problems (crashes, non-termination) if the inputs do contain + infinities or NaNs. + allow_singular : bool, optional + Whether to allow a singular matrix. (Default: True) + + Notes + ----- + The arguments are similar to those of scipy.linalg.pinvh(). + + """ + + def __init__(self, M, cond=None, rcond=None, lower=True, + check_finite=True, allow_singular=True): + # Compute the symmetric eigendecomposition. + # Note that eigh takes care of array conversion, chkfinite, + # and assertion that the matrix is square. + s, u = scipy.linalg.eigh(M, lower=lower, check_finite=check_finite) + + eps = _eigvalsh_to_eps(s, cond, rcond) + if np.min(s) < -eps: + raise ValueError('the input matrix must be positive semidefinite') + d = s[s > eps] + if len(d) < len(s) and not allow_singular: + raise np.linalg.LinAlgError('singular matrix') + s_pinv = _pinv_1d(s, eps) + U = np.multiply(u, np.sqrt(s_pinv)) + + # Initialize the eagerly precomputed attributes. + self.rank = len(d) + self.U = U + self.log_pdet = np.sum(np.log(d)) + + # Initialize an attribute to be lazily computed. + self._pinv = None + + @property + def pinv(self): + if self._pinv is None: + self._pinv = np.dot(self.U, self.U.T) + return self._pinv + + +class multi_rv_generic(object): + """ + Class which encapsulates common functionality between all multivariate + distributions. + + """ + def __init__(self, seed=None): + super(multi_rv_generic, self).__init__() + self._random_state = check_random_state(seed) + + @property + def random_state(self): + """ Get or set the RandomState object for generating random variates. + + This can be either None or an existing RandomState object. + + If None (or np.random), use the RandomState singleton used by np.random. + If already a RandomState instance, use it. + If an int, use a new RandomState instance seeded with seed. + + """ + return self._random_state + + @random_state.setter + def random_state(self, seed): + self._random_state = check_random_state(seed) + + def _get_random_state(self, random_state): + if random_state is not None: + return check_random_state(random_state) + else: + return self._random_state + + +class multi_rv_frozen(object): + """ + Class which encapsulates common functionality between all frozen + multivariate distributions. + """ + @property + def random_state(self): + return self._dist._random_state + + @random_state.setter + def random_state(self, seed): + self._dist._random_state = check_random_state(seed) + + +_mvn_doc_default_callparams = """\ +mean : array_like, optional + Mean of the distribution (default zero) +cov : array_like, optional + Covariance matrix of the distribution (default one) +allow_singular : bool, optional + Whether to allow a singular covariance matrix. (Default: False) +""" + +_mvn_doc_callparams_note = \ + """Setting the parameter `mean` to `None` is equivalent to having `mean` + be the zero-vector. The parameter `cov` can be a scalar, in which case + the covariance matrix is the identity times that value, a vector of + diagonal entries for the covariance matrix, or a two-dimensional + array_like. + """ + +_mvn_doc_frozen_callparams = "" + +_mvn_doc_frozen_callparams_note = \ + """See class definition for a detailed description of parameters.""" + +mvn_docdict_params = { + '_mvn_doc_default_callparams': _mvn_doc_default_callparams, + '_mvn_doc_callparams_note': _mvn_doc_callparams_note, + '_doc_random_state': _doc_random_state +} + +mvn_docdict_noparams = { + '_mvn_doc_default_callparams': _mvn_doc_frozen_callparams, + '_mvn_doc_callparams_note': _mvn_doc_frozen_callparams_note, + '_doc_random_state': _doc_random_state +} + + +class multivariate_normal_gen(multi_rv_generic): + r""" + A multivariate normal random variable. + + The `mean` keyword specifies the mean. The `cov` keyword specifies the + covariance matrix. + + Methods + ------- + ``pdf(x, mean=None, cov=1, allow_singular=False)`` + Probability density function. + ``logpdf(x, mean=None, cov=1, allow_singular=False)`` + Log of the probability density function. + ``cdf(x, mean=None, cov=1, allow_singular=False, maxpts=1000000*dim, abseps=1e-5, releps=1e-5)`` + Cumulative distribution function. + ``logcdf(x, mean=None, cov=1, allow_singular=False, maxpts=1000000*dim, abseps=1e-5, releps=1e-5)`` + Log of the cumulative distribution function. + ``rvs(mean=None, cov=1, size=1, random_state=None)`` + Draw random samples from a multivariate normal distribution. + ``entropy()`` + Compute the differential entropy of the multivariate normal. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + %(_mvn_doc_default_callparams)s + %(_doc_random_state)s + + Alternatively, the object may be called (as a function) to fix the mean + and covariance parameters, returning a "frozen" multivariate normal + random variable: + + rv = multivariate_normal(mean=None, cov=1, allow_singular=False) + - Frozen object with the same methods but holding the given + mean and covariance fixed. + + Notes + ----- + %(_mvn_doc_callparams_note)s + + The covariance matrix `cov` must be a (symmetric) positive + semi-definite matrix. The determinant and inverse of `cov` are computed + as the pseudo-determinant and pseudo-inverse, respectively, so + that `cov` does not need to have full rank. + + The probability density function for `multivariate_normal` is + + .. math:: + + f(x) = \frac{1}{\sqrt{(2 \pi)^k \det \Sigma}} + \exp\left( -\frac{1}{2} (x - \mu)^T \Sigma^{-1} (x - \mu) \right), + + where :math:`\mu` is the mean, :math:`\Sigma` the covariance matrix, + and :math:`k` is the dimension of the space where :math:`x` takes values. + + .. versionadded:: 0.14.0 + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> from scipy.stats import multivariate_normal + + >>> x = np.linspace(0, 5, 10, endpoint=False) + >>> y = multivariate_normal.pdf(x, mean=2.5, cov=0.5); y + array([ 0.00108914, 0.01033349, 0.05946514, 0.20755375, 0.43939129, + 0.56418958, 0.43939129, 0.20755375, 0.05946514, 0.01033349]) + >>> fig1 = plt.figure() + >>> ax = fig1.add_subplot(111) + >>> ax.plot(x, y) + + The input quantiles can be any shape of array, as long as the last + axis labels the components. This allows us for instance to + display the frozen pdf for a non-isotropic random variable in 2D as + follows: + + >>> x, y = np.mgrid[-1:1:.01, -1:1:.01] + >>> pos = np.dstack((x, y)) + >>> rv = multivariate_normal([0.5, -0.2], [[2.0, 0.3], [0.3, 0.5]]) + >>> fig2 = plt.figure() + >>> ax2 = fig2.add_subplot(111) + >>> ax2.contourf(x, y, rv.pdf(pos)) + + """ + + def __init__(self, seed=None): + super(multivariate_normal_gen, self).__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__, mvn_docdict_params) + + def __call__(self, mean=None, cov=1, allow_singular=False, seed=None): + """ + Create a frozen multivariate normal distribution. + + See `multivariate_normal_frozen` for more information. + + """ + return multivariate_normal_frozen(mean, cov, + allow_singular=allow_singular, + seed=seed) + + def _process_parameters(self, dim, mean, cov): + """ + Infer dimensionality from mean or covariance matrix, ensure that + mean and covariance are full vector resp. matrix. + + """ + + # Try to infer dimensionality + if dim is None: + if mean is None: + if cov is None: + dim = 1 + else: + cov = np.asarray(cov, dtype=float) + if cov.ndim < 2: + dim = 1 + else: + dim = cov.shape[0] + else: + mean = np.asarray(mean, dtype=float) + dim = mean.size + else: + if not np.isscalar(dim): + raise ValueError("Dimension of random variable must be " + "a scalar.") + + # Check input sizes and return full arrays for mean and cov if + # necessary + if mean is None: + mean = np.zeros(dim) + mean = np.asarray(mean, dtype=float) + + if cov is None: + cov = 1.0 + cov = np.asarray(cov, dtype=float) + + if dim == 1: + mean.shape = (1,) + cov.shape = (1, 1) + + if mean.ndim != 1 or mean.shape[0] != dim: + raise ValueError("Array 'mean' must be a vector of length %d." % + dim) + if cov.ndim == 0: + cov = cov * np.eye(dim) + elif cov.ndim == 1: + cov = np.diag(cov) + elif cov.ndim == 2 and cov.shape != (dim, dim): + rows, cols = cov.shape + if rows != cols: + msg = ("Array 'cov' must be square if it is two dimensional," + " but cov.shape = %s." % str(cov.shape)) + else: + msg = ("Dimension mismatch: array 'cov' is of shape %s," + " but 'mean' is a vector of length %d.") + msg = msg % (str(cov.shape), len(mean)) + raise ValueError(msg) + elif cov.ndim > 2: + raise ValueError("Array 'cov' must be at most two-dimensional," + " but cov.ndim = %d" % cov.ndim) + + return dim, mean, cov + + def _process_quantiles(self, x, dim): + """ + Adjust quantiles array so that last axis labels the components of + each data point. + + """ + x = np.asarray(x, dtype=float) + + if x.ndim == 0: + x = x[np.newaxis] + elif x.ndim == 1: + if dim == 1: + x = x[:, np.newaxis] + else: + x = x[np.newaxis, :] + + return x + + def _logpdf(self, x, mean, prec_U, log_det_cov, rank): + """ + Parameters + ---------- + x : ndarray + Points at which to evaluate the log of the probability + density function + mean : ndarray + Mean of the distribution + prec_U : ndarray + A decomposition such that np.dot(prec_U, prec_U.T) + is the precision matrix, i.e. inverse of the covariance matrix. + log_det_cov : float + Logarithm of the determinant of the covariance matrix + rank : int + Rank of the covariance matrix. + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'logpdf' instead. + + """ + dev = x - mean + maha = np.sum(np.square(np.dot(dev, prec_U)), axis=-1) + return -0.5 * (rank * _LOG_2PI + log_det_cov + maha) + + def logpdf(self, x, mean=None, cov=1, allow_singular=False): + """ + Log of the multivariate normal probability density function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + %(_mvn_doc_default_callparams)s + + Returns + ------- + pdf : ndarray or scalar + Log of the probability density function evaluated at `x` + + Notes + ----- + %(_mvn_doc_callparams_note)s + + """ + dim, mean, cov = self._process_parameters(None, mean, cov) + x = self._process_quantiles(x, dim) + psd = _PSD(cov, allow_singular=allow_singular) + out = self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank) + return _squeeze_output(out) + + def pdf(self, x, mean=None, cov=1, allow_singular=False): + """ + Multivariate normal probability density function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + %(_mvn_doc_default_callparams)s + + Returns + ------- + pdf : ndarray or scalar + Probability density function evaluated at `x` + + Notes + ----- + %(_mvn_doc_callparams_note)s + + """ + dim, mean, cov = self._process_parameters(None, mean, cov) + x = self._process_quantiles(x, dim) + psd = _PSD(cov, allow_singular=allow_singular) + out = np.exp(self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank)) + return _squeeze_output(out) + + def _cdf(self, x, mean, cov, maxpts, abseps, releps): + """ + Parameters + ---------- + x : ndarray + Points at which to evaluate the cumulative distribution function. + mean : ndarray + Mean of the distribution + cov : array_like + Covariance matrix of the distribution + maxpts: integer + The maximum number of points to use for integration + abseps: float + Absolute error tolerance + releps: float + Relative error tolerance + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'cdf' instead. + + .. versionadded:: 1.0.0 + + """ + lower = np.full(mean.shape, -np.inf) + # mvnun expects 1-d arguments, so process points sequentially + func1d = lambda x_slice: mvn.mvnun(lower, x_slice, mean, cov, + maxpts, abseps, releps)[0] + out = np.apply_along_axis(func1d, -1, x) + return _squeeze_output(out) + + def logcdf(self, x, mean=None, cov=1, allow_singular=False, maxpts=None, + abseps=1e-5, releps=1e-5): + """ + Log of the multivariate normal cumulative distribution function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + %(_mvn_doc_default_callparams)s + maxpts: integer, optional + The maximum number of points to use for integration + (default `1000000*dim`) + abseps: float, optional + Absolute error tolerance (default 1e-5) + releps: float, optional + Relative error tolerance (default 1e-5) + + Returns + ------- + cdf : ndarray or scalar + Log of the cumulative distribution function evaluated at `x` + + Notes + ----- + %(_mvn_doc_callparams_note)s + + .. versionadded:: 1.0.0 + + """ + dim, mean, cov = self._process_parameters(None, mean, cov) + x = self._process_quantiles(x, dim) + # Use _PSD to check covariance matrix + _PSD(cov, allow_singular=allow_singular) + if not maxpts: + maxpts = 1000000 * dim + out = np.log(self._cdf(x, mean, cov, maxpts, abseps, releps)) + return out + + def cdf(self, x, mean=None, cov=1, allow_singular=False, maxpts=None, + abseps=1e-5, releps=1e-5): + """ + Multivariate normal cumulative distribution function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + %(_mvn_doc_default_callparams)s + maxpts: integer, optional + The maximum number of points to use for integration + (default `1000000*dim`) + abseps: float, optional + Absolute error tolerance (default 1e-5) + releps: float, optional + Relative error tolerance (default 1e-5) + + Returns + ------- + cdf : ndarray or scalar + Cumulative distribution function evaluated at `x` + + Notes + ----- + %(_mvn_doc_callparams_note)s + + .. versionadded:: 1.0.0 + + """ + dim, mean, cov = self._process_parameters(None, mean, cov) + x = self._process_quantiles(x, dim) + # Use _PSD to check covariance matrix + _PSD(cov, allow_singular=allow_singular) + if not maxpts: + maxpts = 1000000 * dim + out = self._cdf(x, mean, cov, maxpts, abseps, releps) + return out + + def rvs(self, mean=None, cov=1, size=1, random_state=None): + """ + Draw random samples from a multivariate normal distribution. + + Parameters + ---------- + %(_mvn_doc_default_callparams)s + size : integer, optional + Number of samples to draw (default 1). + %(_doc_random_state)s + + Returns + ------- + rvs : ndarray or scalar + Random variates of size (`size`, `N`), where `N` is the + dimension of the random variable. + + Notes + ----- + %(_mvn_doc_callparams_note)s + + """ + dim, mean, cov = self._process_parameters(None, mean, cov) + + random_state = self._get_random_state(random_state) + out = random_state.multivariate_normal(mean, cov, size) + return _squeeze_output(out) + + def entropy(self, mean=None, cov=1): + """ + Compute the differential entropy of the multivariate normal. + + Parameters + ---------- + %(_mvn_doc_default_callparams)s + + Returns + ------- + h : scalar + Entropy of the multivariate normal distribution + + Notes + ----- + %(_mvn_doc_callparams_note)s + + """ + dim, mean, cov = self._process_parameters(None, mean, cov) + _, logdet = np.linalg.slogdet(2 * np.pi * np.e * cov) + return 0.5 * logdet + + +multivariate_normal = multivariate_normal_gen() + + +class multivariate_normal_frozen(multi_rv_frozen): + def __init__(self, mean=None, cov=1, allow_singular=False, seed=None, + maxpts=None, abseps=1e-5, releps=1e-5): + """ + Create a frozen multivariate normal distribution. + + Parameters + ---------- + mean : array_like, optional + Mean of the distribution (default zero) + cov : array_like, optional + Covariance matrix of the distribution (default one) + allow_singular : bool, optional + If this flag is True then tolerate a singular + covariance matrix (default False). + seed : None or int or np.random.RandomState instance, optional + This parameter defines the RandomState object to use for drawing + random variates. + If None (or np.random), the global np.random state is used. + If integer, it is used to seed the local RandomState instance + Default is None. + maxpts: integer, optional + The maximum number of points to use for integration of the + cumulative distribution function (default `1000000*dim`) + abseps: float, optional + Absolute error tolerance for the cumulative distribution function + (default 1e-5) + releps: float, optional + Relative error tolerance for the cumulative distribution function + (default 1e-5) + + Examples + -------- + When called with the default parameters, this will create a 1D random + variable with mean 0 and covariance 1: + + >>> from scipy.stats import multivariate_normal + >>> r = multivariate_normal() + >>> r.mean + array([ 0.]) + >>> r.cov + array([[1.]]) + + """ + self._dist = multivariate_normal_gen(seed) + self.dim, self.mean, self.cov = self._dist._process_parameters( + None, mean, cov) + self.cov_info = _PSD(self.cov, allow_singular=allow_singular) + if not maxpts: + maxpts = 1000000 * self.dim + self.maxpts = maxpts + self.abseps = abseps + self.releps = releps + + def logpdf(self, x): + x = self._dist._process_quantiles(x, self.dim) + out = self._dist._logpdf(x, self.mean, self.cov_info.U, + self.cov_info.log_pdet, self.cov_info.rank) + return _squeeze_output(out) + + def pdf(self, x): + return np.exp(self.logpdf(x)) + + def logcdf(self, x): + return np.log(self.cdf(x)) + + def cdf(self, x): + x = self._dist._process_quantiles(x, self.dim) + out = self._dist._cdf(x, self.mean, self.cov, self.maxpts, self.abseps, + self.releps) + return _squeeze_output(out) + + def rvs(self, size=1, random_state=None): + return self._dist.rvs(self.mean, self.cov, size, random_state) + + def entropy(self): + """ + Computes the differential entropy of the multivariate normal. + + Returns + ------- + h : scalar + Entropy of the multivariate normal distribution + + """ + log_pdet = self.cov_info.log_pdet + rank = self.cov_info.rank + return 0.5 * (rank * (_LOG_2PI + 1) + log_pdet) + + +# Set frozen generator docstrings from corresponding docstrings in +# multivariate_normal_gen and fill in default strings in class docstrings +for name in ['logpdf', 'pdf', 'logcdf', 'cdf', 'rvs']: + method = multivariate_normal_gen.__dict__[name] + method_frozen = multivariate_normal_frozen.__dict__[name] + method_frozen.__doc__ = doccer.docformat(method.__doc__, + mvn_docdict_noparams) + method.__doc__ = doccer.docformat(method.__doc__, mvn_docdict_params) + +_matnorm_doc_default_callparams = """\ +mean : array_like, optional + Mean of the distribution (default: `None`) +rowcov : array_like, optional + Among-row covariance matrix of the distribution (default: `1`) +colcov : array_like, optional + Among-column covariance matrix of the distribution (default: `1`) +""" + +_matnorm_doc_callparams_note = \ + """If `mean` is set to `None` then a matrix of zeros is used for the mean. + The dimensions of this matrix are inferred from the shape of `rowcov` and + `colcov`, if these are provided, or set to `1` if ambiguous. + + `rowcov` and `colcov` can be two-dimensional array_likes specifying the + covariance matrices directly. Alternatively, a one-dimensional array will + be be interpreted as the entries of a diagonal matrix, and a scalar or + zero-dimensional array will be interpreted as this value times the + identity matrix. + """ + +_matnorm_doc_frozen_callparams = "" + +_matnorm_doc_frozen_callparams_note = \ + """See class definition for a detailed description of parameters.""" + +matnorm_docdict_params = { + '_matnorm_doc_default_callparams': _matnorm_doc_default_callparams, + '_matnorm_doc_callparams_note': _matnorm_doc_callparams_note, + '_doc_random_state': _doc_random_state +} + +matnorm_docdict_noparams = { + '_matnorm_doc_default_callparams': _matnorm_doc_frozen_callparams, + '_matnorm_doc_callparams_note': _matnorm_doc_frozen_callparams_note, + '_doc_random_state': _doc_random_state +} + + +class matrix_normal_gen(multi_rv_generic): + r""" + A matrix normal random variable. + + The `mean` keyword specifies the mean. The `rowcov` keyword specifies the + among-row covariance matrix. The 'colcov' keyword specifies the + among-column covariance matrix. + + Methods + ------- + ``pdf(X, mean=None, rowcov=1, colcov=1)`` + Probability density function. + ``logpdf(X, mean=None, rowcov=1, colcov=1)`` + Log of the probability density function. + ``rvs(mean=None, rowcov=1, colcov=1, size=1, random_state=None)`` + Draw random samples. + + Parameters + ---------- + X : array_like + Quantiles, with the last two axes of `X` denoting the components. + %(_matnorm_doc_default_callparams)s + %(_doc_random_state)s + + Alternatively, the object may be called (as a function) to fix the mean + and covariance parameters, returning a "frozen" matrix normal + random variable: + + rv = matrix_normal(mean=None, rowcov=1, colcov=1) + - Frozen object with the same methods but holding the given + mean and covariance fixed. + + Notes + ----- + %(_matnorm_doc_callparams_note)s + + The covariance matrices specified by `rowcov` and `colcov` must be + (symmetric) positive definite. If the samples in `X` are + :math:`m \times n`, then `rowcov` must be :math:`m \times m` and + `colcov` must be :math:`n \times n`. `mean` must be the same shape as `X`. + + The probability density function for `matrix_normal` is + + .. math:: + + f(X) = (2 \pi)^{-\frac{mn}{2}}|U|^{-\frac{n}{2}} |V|^{-\frac{m}{2}} + \exp\left( -\frac{1}{2} \mathrm{Tr}\left[ U^{-1} (X-M) V^{-1} + (X-M)^T \right] \right), + + where :math:`M` is the mean, :math:`U` the among-row covariance matrix, + :math:`V` the among-column covariance matrix. + + The `allow_singular` behaviour of the `multivariate_normal` + distribution is not currently supported. Covariance matrices must be + full rank. + + The `matrix_normal` distribution is closely related to the + `multivariate_normal` distribution. Specifically, :math:`\mathrm{Vec}(X)` + (the vector formed by concatenating the columns of :math:`X`) has a + multivariate normal distribution with mean :math:`\mathrm{Vec}(M)` + and covariance :math:`V \otimes U` (where :math:`\otimes` is the Kronecker + product). Sampling and pdf evaluation are + :math:`\mathcal{O}(m^3 + n^3 + m^2 n + m n^2)` for the matrix normal, but + :math:`\mathcal{O}(m^3 n^3)` for the equivalent multivariate normal, + making this equivalent form algorithmically inefficient. + + .. versionadded:: 0.17.0 + + Examples + -------- + + >>> from scipy.stats import matrix_normal + + >>> M = np.arange(6).reshape(3,2); M + array([[0, 1], + [2, 3], + [4, 5]]) + >>> U = np.diag([1,2,3]); U + array([[1, 0, 0], + [0, 2, 0], + [0, 0, 3]]) + >>> V = 0.3*np.identity(2); V + array([[ 0.3, 0. ], + [ 0. , 0.3]]) + >>> X = M + 0.1; X + array([[ 0.1, 1.1], + [ 2.1, 3.1], + [ 4.1, 5.1]]) + >>> matrix_normal.pdf(X, mean=M, rowcov=U, colcov=V) + 0.023410202050005054 + + >>> # Equivalent multivariate normal + >>> from scipy.stats import multivariate_normal + >>> vectorised_X = X.T.flatten() + >>> equiv_mean = M.T.flatten() + >>> equiv_cov = np.kron(V,U) + >>> multivariate_normal.pdf(vectorised_X, mean=equiv_mean, cov=equiv_cov) + 0.023410202050005054 + """ + + def __init__(self, seed=None): + super(matrix_normal_gen, self).__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__, matnorm_docdict_params) + + def __call__(self, mean=None, rowcov=1, colcov=1, seed=None): + """ + Create a frozen matrix normal distribution. + + See `matrix_normal_frozen` for more information. + + """ + return matrix_normal_frozen(mean, rowcov, colcov, seed=seed) + + def _process_parameters(self, mean, rowcov, colcov): + """ + Infer dimensionality from mean or covariance matrices. Handle + defaults. Ensure compatible dimensions. + + """ + + # Process mean + if mean is not None: + mean = np.asarray(mean, dtype=float) + meanshape = mean.shape + if len(meanshape) != 2: + raise ValueError("Array `mean` must be two dimensional.") + if np.any(meanshape == 0): + raise ValueError("Array `mean` has invalid shape.") + + # Process among-row covariance + rowcov = np.asarray(rowcov, dtype=float) + if rowcov.ndim == 0: + if mean is not None: + rowcov = rowcov * np.identity(meanshape[0]) + else: + rowcov = rowcov * np.identity(1) + elif rowcov.ndim == 1: + rowcov = np.diag(rowcov) + rowshape = rowcov.shape + if len(rowshape) != 2: + raise ValueError("`rowcov` must be a scalar or a 2D array.") + if rowshape[0] != rowshape[1]: + raise ValueError("Array `rowcov` must be square.") + if rowshape[0] == 0: + raise ValueError("Array `rowcov` has invalid shape.") + numrows = rowshape[0] + + # Process among-column covariance + colcov = np.asarray(colcov, dtype=float) + if colcov.ndim == 0: + if mean is not None: + colcov = colcov * np.identity(meanshape[1]) + else: + colcov = colcov * np.identity(1) + elif colcov.ndim == 1: + colcov = np.diag(colcov) + colshape = colcov.shape + if len(colshape) != 2: + raise ValueError("`colcov` must be a scalar or a 2D array.") + if colshape[0] != colshape[1]: + raise ValueError("Array `colcov` must be square.") + if colshape[0] == 0: + raise ValueError("Array `colcov` has invalid shape.") + numcols = colshape[0] + + # Ensure mean and covariances compatible + if mean is not None: + if meanshape[0] != numrows: + raise ValueError("Arrays `mean` and `rowcov` must have the " + "same number of rows.") + if meanshape[1] != numcols: + raise ValueError("Arrays `mean` and `colcov` must have the " + "same number of columns.") + else: + mean = np.zeros((numrows, numcols)) + + dims = (numrows, numcols) + + return dims, mean, rowcov, colcov + + def _process_quantiles(self, X, dims): + """ + Adjust quantiles array so that last two axes labels the components of + each data point. + + """ + X = np.asarray(X, dtype=float) + if X.ndim == 2: + X = X[np.newaxis, :] + if X.shape[-2:] != dims: + raise ValueError("The shape of array `X` is not compatible " + "with the distribution parameters.") + return X + + def _logpdf(self, dims, X, mean, row_prec_rt, log_det_rowcov, + col_prec_rt, log_det_colcov): + """ + Parameters + ---------- + dims : tuple + Dimensions of the matrix variates + X : ndarray + Points at which to evaluate the log of the probability + density function + mean : ndarray + Mean of the distribution + row_prec_rt : ndarray + A decomposition such that np.dot(row_prec_rt, row_prec_rt.T) + is the inverse of the among-row covariance matrix + log_det_rowcov : float + Logarithm of the determinant of the among-row covariance matrix + col_prec_rt : ndarray + A decomposition such that np.dot(col_prec_rt, col_prec_rt.T) + is the inverse of the among-column covariance matrix + log_det_colcov : float + Logarithm of the determinant of the among-column covariance matrix + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'logpdf' instead. + + """ + numrows, numcols = dims + roll_dev = np.rollaxis(X-mean, axis=-1, start=0) + scale_dev = np.tensordot(col_prec_rt.T, + np.dot(roll_dev, row_prec_rt), 1) + maha = np.sum(np.sum(np.square(scale_dev), axis=-1), axis=0) + return -0.5 * (numrows*numcols*_LOG_2PI + numcols*log_det_rowcov + + numrows*log_det_colcov + maha) + + def logpdf(self, X, mean=None, rowcov=1, colcov=1): + """ + Log of the matrix normal probability density function. + + Parameters + ---------- + X : array_like + Quantiles, with the last two axes of `X` denoting the components. + %(_matnorm_doc_default_callparams)s + + Returns + ------- + logpdf : ndarray + Log of the probability density function evaluated at `X` + + Notes + ----- + %(_matnorm_doc_callparams_note)s + + """ + dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov, + colcov) + X = self._process_quantiles(X, dims) + rowpsd = _PSD(rowcov, allow_singular=False) + colpsd = _PSD(colcov, allow_singular=False) + out = self._logpdf(dims, X, mean, rowpsd.U, rowpsd.log_pdet, colpsd.U, + colpsd.log_pdet) + return _squeeze_output(out) + + def pdf(self, X, mean=None, rowcov=1, colcov=1): + """ + Matrix normal probability density function. + + Parameters + ---------- + X : array_like + Quantiles, with the last two axes of `X` denoting the components. + %(_matnorm_doc_default_callparams)s + + Returns + ------- + pdf : ndarray + Probability density function evaluated at `X` + + Notes + ----- + %(_matnorm_doc_callparams_note)s + + """ + return np.exp(self.logpdf(X, mean, rowcov, colcov)) + + def rvs(self, mean=None, rowcov=1, colcov=1, size=1, random_state=None): + """ + Draw random samples from a matrix normal distribution. + + Parameters + ---------- + %(_matnorm_doc_default_callparams)s + size : integer, optional + Number of samples to draw (default 1). + %(_doc_random_state)s + + Returns + ------- + rvs : ndarray or scalar + Random variates of size (`size`, `dims`), where `dims` is the + dimension of the random matrices. + + Notes + ----- + %(_matnorm_doc_callparams_note)s + + """ + size = int(size) + dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov, + colcov) + rowchol = scipy.linalg.cholesky(rowcov, lower=True) + colchol = scipy.linalg.cholesky(colcov, lower=True) + random_state = self._get_random_state(random_state) + std_norm = random_state.standard_normal(size=(dims[1], size, dims[0])) + roll_rvs = np.tensordot(colchol, np.dot(std_norm, rowchol.T), 1) + out = np.rollaxis(roll_rvs.T, axis=1, start=0) + mean[np.newaxis, :, :] + if size == 1: + out = out.reshape(mean.shape) + return out + + +matrix_normal = matrix_normal_gen() + + +class matrix_normal_frozen(multi_rv_frozen): + def __init__(self, mean=None, rowcov=1, colcov=1, seed=None): + """ + Create a frozen matrix normal distribution. + + Parameters + ---------- + %(_matnorm_doc_default_callparams)s + seed : None or int or np.random.RandomState instance, optional + If int or RandomState, use it for drawing the random variates. + If None (or np.random), the global np.random state is used. + Default is None. + + Examples + -------- + >>> from scipy.stats import matrix_normal + + >>> distn = matrix_normal(mean=np.zeros((3,3))) + >>> X = distn.rvs(); X + array([[-0.02976962, 0.93339138, -0.09663178], + [ 0.67405524, 0.28250467, -0.93308929], + [-0.31144782, 0.74535536, 1.30412916]]) + >>> distn.pdf(X) + 2.5160642368346784e-05 + >>> distn.logpdf(X) + -10.590229595124615 + """ + self._dist = matrix_normal_gen(seed) + self.dims, self.mean, self.rowcov, self.colcov = \ + self._dist._process_parameters(mean, rowcov, colcov) + self.rowpsd = _PSD(self.rowcov, allow_singular=False) + self.colpsd = _PSD(self.colcov, allow_singular=False) + + def logpdf(self, X): + X = self._dist._process_quantiles(X, self.dims) + out = self._dist._logpdf(self.dims, X, self.mean, self.rowpsd.U, + self.rowpsd.log_pdet, self.colpsd.U, + self.colpsd.log_pdet) + return _squeeze_output(out) + + def pdf(self, X): + return np.exp(self.logpdf(X)) + + def rvs(self, size=1, random_state=None): + return self._dist.rvs(self.mean, self.rowcov, self.colcov, size, + random_state) + + +# Set frozen generator docstrings from corresponding docstrings in +# matrix_normal_gen and fill in default strings in class docstrings +for name in ['logpdf', 'pdf', 'rvs']: + method = matrix_normal_gen.__dict__[name] + method_frozen = matrix_normal_frozen.__dict__[name] + method_frozen.__doc__ = doccer.docformat(method.__doc__, + matnorm_docdict_noparams) + method.__doc__ = doccer.docformat(method.__doc__, matnorm_docdict_params) + +_dirichlet_doc_default_callparams = """\ +alpha : array_like + The concentration parameters. The number of entries determines the + dimensionality of the distribution. +""" +_dirichlet_doc_frozen_callparams = "" + +_dirichlet_doc_frozen_callparams_note = \ + """See class definition for a detailed description of parameters.""" + +dirichlet_docdict_params = { + '_dirichlet_doc_default_callparams': _dirichlet_doc_default_callparams, + '_doc_random_state': _doc_random_state +} + +dirichlet_docdict_noparams = { + '_dirichlet_doc_default_callparams': _dirichlet_doc_frozen_callparams, + '_doc_random_state': _doc_random_state +} + + +def _dirichlet_check_parameters(alpha): + alpha = np.asarray(alpha) + if np.min(alpha) <= 0: + raise ValueError("All parameters must be greater than 0") + elif alpha.ndim != 1: + raise ValueError("Parameter vector 'a' must be one dimensional, " + "but a.shape = %s." % (alpha.shape, )) + return alpha + + +def _dirichlet_check_input(alpha, x): + x = np.asarray(x) + + if x.shape[0] + 1 != alpha.shape[0] and x.shape[0] != alpha.shape[0]: + raise ValueError("Vector 'x' must have either the same number " + "of entries as, or one entry fewer than, " + "parameter vector 'a', but alpha.shape = %s " + "and x.shape = %s." % (alpha.shape, x.shape)) + + if x.shape[0] != alpha.shape[0]: + xk = np.array([1 - np.sum(x, 0)]) + if xk.ndim == 1: + x = np.append(x, xk) + elif xk.ndim == 2: + x = np.vstack((x, xk)) + else: + raise ValueError("The input must be one dimensional or a two " + "dimensional matrix containing the entries.") + + if np.min(x) < 0: + raise ValueError("Each entry in 'x' must be greater than or equal " + "to zero.") + + if np.max(x) > 1: + raise ValueError("Each entry in 'x' must be smaller or equal one.") + + # Check x_i > 0 or alpha_i > 1 + xeq0 = (x == 0) + alphalt1 = (alpha < 1) + if x.shape != alpha.shape: + alphalt1 = np.repeat(alphalt1, x.shape[-1], axis=-1).reshape(x.shape) + chk = np.logical_and(xeq0, alphalt1) + + if np.sum(chk): + raise ValueError("Each entry in 'x' must be greater than zero if its " + "alpha is less than one.") + + if (np.abs(np.sum(x, 0) - 1.0) > 10e-10).any(): + raise ValueError("The input vector 'x' must lie within the normal " + "simplex. but np.sum(x, 0) = %s." % np.sum(x, 0)) + + return x + + +def _lnB(alpha): + r""" + Internal helper function to compute the log of the useful quotient + + .. math:: + + B(\alpha) = \frac{\prod_{i=1}{K}\Gamma(\alpha_i)} + {\Gamma\left(\sum_{i=1}^{K} \alpha_i \right)} + + Parameters + ---------- + %(_dirichlet_doc_default_callparams)s + + Returns + ------- + B : scalar + Helper quotient, internal use only + + """ + return np.sum(gammaln(alpha)) - gammaln(np.sum(alpha)) + + +class dirichlet_gen(multi_rv_generic): + r""" + A Dirichlet random variable. + + The `alpha` keyword specifies the concentration parameters of the + distribution. + + .. versionadded:: 0.15.0 + + Methods + ------- + ``pdf(x, alpha)`` + Probability density function. + ``logpdf(x, alpha)`` + Log of the probability density function. + ``rvs(alpha, size=1, random_state=None)`` + Draw random samples from a Dirichlet distribution. + ``mean(alpha)`` + The mean of the Dirichlet distribution + ``var(alpha)`` + The variance of the Dirichlet distribution + ``entropy(alpha)`` + Compute the differential entropy of the Dirichlet distribution. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + %(_dirichlet_doc_default_callparams)s + %(_doc_random_state)s + + Alternatively, the object may be called (as a function) to fix + concentration parameters, returning a "frozen" Dirichlet + random variable: + + rv = dirichlet(alpha) + - Frozen object with the same methods but holding the given + concentration parameters fixed. + + Notes + ----- + Each :math:`\alpha` entry must be positive. The distribution has only + support on the simplex defined by + + .. math:: + \sum_{i=1}^{K} x_i \le 1 + + + The probability density function for `dirichlet` is + + .. math:: + + f(x) = \frac{1}{\mathrm{B}(\boldsymbol\alpha)} \prod_{i=1}^K x_i^{\alpha_i - 1} + + where + + .. math:: + + \mathrm{B}(\boldsymbol\alpha) = \frac{\prod_{i=1}^K \Gamma(\alpha_i)} + {\Gamma\bigl(\sum_{i=1}^K \alpha_i\bigr)} + + and :math:`\boldsymbol\alpha=(\alpha_1,\ldots,\alpha_K)`, the + concentration parameters and :math:`K` is the dimension of the space + where :math:`x` takes values. + + Note that the dirichlet interface is somewhat inconsistent. + The array returned by the rvs function is transposed + with respect to the format expected by the pdf and logpdf. + + Examples + -------- + >>> from scipy.stats import dirichlet + + Generate a dirichlet random variable + + >>> quantiles = np.array([0.2, 0.2, 0.6]) # specify quantiles + >>> alpha = np.array([0.4, 5, 15]) # specify concentration parameters + >>> dirichlet.pdf(quantiles, alpha) + 0.2843831684937255 + + The same PDF but following a log scale + + >>> dirichlet.logpdf(quantiles, alpha) + -1.2574327653159187 + + Once we specify the dirichlet distribution + we can then calculate quantities of interest + + >>> dirichlet.mean(alpha) # get the mean of the distribution + array([0.01960784, 0.24509804, 0.73529412]) + >>> dirichlet.var(alpha) # get variance + array([0.00089829, 0.00864603, 0.00909517]) + >>> dirichlet.entropy(alpha) # calculate the differential entropy + -4.3280162474082715 + + We can also return random samples from the distribution + + >>> dirichlet.rvs(alpha, size=1, random_state=1) + array([[0.00766178, 0.24670518, 0.74563305]]) + >>> dirichlet.rvs(alpha, size=2, random_state=2) + array([[0.01639427, 0.1292273 , 0.85437844], + [0.00156917, 0.19033695, 0.80809388]]) + + """ + + def __init__(self, seed=None): + super(dirichlet_gen, self).__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__, dirichlet_docdict_params) + + def __call__(self, alpha, seed=None): + return dirichlet_frozen(alpha, seed=seed) + + def _logpdf(self, x, alpha): + """ + Parameters + ---------- + x : ndarray + Points at which to evaluate the log of the probability + density function + %(_dirichlet_doc_default_callparams)s + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'logpdf' instead. + + """ + lnB = _lnB(alpha) + return - lnB + np.sum((xlogy(alpha - 1, x.T)).T, 0) + + def logpdf(self, x, alpha): + """ + Log of the Dirichlet probability density function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + %(_dirichlet_doc_default_callparams)s + + Returns + ------- + pdf : ndarray or scalar + Log of the probability density function evaluated at `x`. + + """ + alpha = _dirichlet_check_parameters(alpha) + x = _dirichlet_check_input(alpha, x) + + out = self._logpdf(x, alpha) + return _squeeze_output(out) + + def pdf(self, x, alpha): + """ + The Dirichlet probability density function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + %(_dirichlet_doc_default_callparams)s + + Returns + ------- + pdf : ndarray or scalar + The probability density function evaluated at `x`. + + """ + alpha = _dirichlet_check_parameters(alpha) + x = _dirichlet_check_input(alpha, x) + + out = np.exp(self._logpdf(x, alpha)) + return _squeeze_output(out) + + def mean(self, alpha): + """ + Compute the mean of the dirichlet distribution. + + Parameters + ---------- + %(_dirichlet_doc_default_callparams)s + + Returns + ------- + mu : ndarray or scalar + Mean of the Dirichlet distribution. + + """ + alpha = _dirichlet_check_parameters(alpha) + + out = alpha / (np.sum(alpha)) + return _squeeze_output(out) + + def var(self, alpha): + """ + Compute the variance of the dirichlet distribution. + + Parameters + ---------- + %(_dirichlet_doc_default_callparams)s + + Returns + ------- + v : ndarray or scalar + Variance of the Dirichlet distribution. + + """ + + alpha = _dirichlet_check_parameters(alpha) + + alpha0 = np.sum(alpha) + out = (alpha * (alpha0 - alpha)) / ((alpha0 * alpha0) * (alpha0 + 1)) + return _squeeze_output(out) + + def entropy(self, alpha): + """ + Compute the differential entropy of the dirichlet distribution. + + Parameters + ---------- + %(_dirichlet_doc_default_callparams)s + + Returns + ------- + h : scalar + Entropy of the Dirichlet distribution + + """ + + alpha = _dirichlet_check_parameters(alpha) + + alpha0 = np.sum(alpha) + lnB = _lnB(alpha) + K = alpha.shape[0] + + out = lnB + (alpha0 - K) * scipy.special.psi(alpha0) - np.sum( + (alpha - 1) * scipy.special.psi(alpha)) + return _squeeze_output(out) + + def rvs(self, alpha, size=1, random_state=None): + """ + Draw random samples from a Dirichlet distribution. + + Parameters + ---------- + %(_dirichlet_doc_default_callparams)s + size : int, optional + Number of samples to draw (default 1). + %(_doc_random_state)s + + Returns + ------- + rvs : ndarray or scalar + Random variates of size (`size`, `N`), where `N` is the + dimension of the random variable. + + """ + alpha = _dirichlet_check_parameters(alpha) + random_state = self._get_random_state(random_state) + return random_state.dirichlet(alpha, size=size) + + +dirichlet = dirichlet_gen() + + +class dirichlet_frozen(multi_rv_frozen): + def __init__(self, alpha, seed=None): + self.alpha = _dirichlet_check_parameters(alpha) + self._dist = dirichlet_gen(seed) + + def logpdf(self, x): + return self._dist.logpdf(x, self.alpha) + + def pdf(self, x): + return self._dist.pdf(x, self.alpha) + + def mean(self): + return self._dist.mean(self.alpha) + + def var(self): + return self._dist.var(self.alpha) + + def entropy(self): + return self._dist.entropy(self.alpha) + + def rvs(self, size=1, random_state=None): + return self._dist.rvs(self.alpha, size, random_state) + + +# Set frozen generator docstrings from corresponding docstrings in +# multivariate_normal_gen and fill in default strings in class docstrings +for name in ['logpdf', 'pdf', 'rvs', 'mean', 'var', 'entropy']: + method = dirichlet_gen.__dict__[name] + method_frozen = dirichlet_frozen.__dict__[name] + method_frozen.__doc__ = doccer.docformat( + method.__doc__, dirichlet_docdict_noparams) + method.__doc__ = doccer.docformat(method.__doc__, dirichlet_docdict_params) + + +_wishart_doc_default_callparams = """\ +df : int + Degrees of freedom, must be greater than or equal to dimension of the + scale matrix +scale : array_like + Symmetric positive definite scale matrix of the distribution +""" + +_wishart_doc_callparams_note = "" + +_wishart_doc_frozen_callparams = "" + +_wishart_doc_frozen_callparams_note = \ + """See class definition for a detailed description of parameters.""" + +wishart_docdict_params = { + '_doc_default_callparams': _wishart_doc_default_callparams, + '_doc_callparams_note': _wishart_doc_callparams_note, + '_doc_random_state': _doc_random_state +} + +wishart_docdict_noparams = { + '_doc_default_callparams': _wishart_doc_frozen_callparams, + '_doc_callparams_note': _wishart_doc_frozen_callparams_note, + '_doc_random_state': _doc_random_state +} + + +class wishart_gen(multi_rv_generic): + r""" + A Wishart random variable. + + The `df` keyword specifies the degrees of freedom. The `scale` keyword + specifies the scale matrix, which must be symmetric and positive definite. + In this context, the scale matrix is often interpreted in terms of a + multivariate normal precision matrix (the inverse of the covariance + matrix). + + Methods + ------- + ``pdf(x, df, scale)`` + Probability density function. + ``logpdf(x, df, scale)`` + Log of the probability density function. + ``rvs(df, scale, size=1, random_state=None)`` + Draw random samples from a Wishart distribution. + ``entropy()`` + Compute the differential entropy of the Wishart distribution. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + %(_doc_default_callparams)s + %(_doc_random_state)s + + Alternatively, the object may be called (as a function) to fix the degrees + of freedom and scale parameters, returning a "frozen" Wishart random + variable: + + rv = wishart(df=1, scale=1) + - Frozen object with the same methods but holding the given + degrees of freedom and scale fixed. + + See Also + -------- + invwishart, chi2 + + Notes + ----- + %(_doc_callparams_note)s + + The scale matrix `scale` must be a symmetric positive definite + matrix. Singular matrices, including the symmetric positive semi-definite + case, are not supported. + + The Wishart distribution is often denoted + + .. math:: + + W_p(\nu, \Sigma) + + where :math:`\nu` is the degrees of freedom and :math:`\Sigma` is the + :math:`p \times p` scale matrix. + + The probability density function for `wishart` has support over positive + definite matrices :math:`S`; if :math:`S \sim W_p(\nu, \Sigma)`, then + its PDF is given by: + + .. math:: + + f(S) = \frac{|S|^{\frac{\nu - p - 1}{2}}}{2^{ \frac{\nu p}{2} } + |\Sigma|^\frac{\nu}{2} \Gamma_p \left ( \frac{\nu}{2} \right )} + \exp\left( -tr(\Sigma^{-1} S) / 2 \right) + + If :math:`S \sim W_p(\nu, \Sigma)` (Wishart) then + :math:`S^{-1} \sim W_p^{-1}(\nu, \Sigma^{-1})` (inverse Wishart). + + If the scale matrix is 1-dimensional and equal to one, then the Wishart + distribution :math:`W_1(\nu, 1)` collapses to the :math:`\chi^2(\nu)` + distribution. + + .. versionadded:: 0.16.0 + + References + ---------- + .. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach", + Wiley, 1983. + .. [2] W.B. Smith and R.R. Hocking, "Algorithm AS 53: Wishart Variate + Generator", Applied Statistics, vol. 21, pp. 341-345, 1972. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> from scipy.stats import wishart, chi2 + >>> x = np.linspace(1e-5, 8, 100) + >>> w = wishart.pdf(x, df=3, scale=1); w[:5] + array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ]) + >>> c = chi2.pdf(x, 3); c[:5] + array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ]) + >>> plt.plot(x, w) + + The input quantiles can be any shape of array, as long as the last + axis labels the components. + + """ + + def __init__(self, seed=None): + super(wishart_gen, self).__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params) + + def __call__(self, df=None, scale=None, seed=None): + """ + Create a frozen Wishart distribution. + + See `wishart_frozen` for more information. + + """ + return wishart_frozen(df, scale, seed) + + def _process_parameters(self, df, scale): + if scale is None: + scale = 1.0 + scale = np.asarray(scale, dtype=float) + + if scale.ndim == 0: + scale = scale[np.newaxis, np.newaxis] + elif scale.ndim == 1: + scale = np.diag(scale) + elif scale.ndim == 2 and not scale.shape[0] == scale.shape[1]: + raise ValueError("Array 'scale' must be square if it is two" + " dimensional, but scale.scale = %s." + % str(scale.shape)) + elif scale.ndim > 2: + raise ValueError("Array 'scale' must be at most two-dimensional," + " but scale.ndim = %d" % scale.ndim) + + dim = scale.shape[0] + + if df is None: + df = dim + elif not np.isscalar(df): + raise ValueError("Degrees of freedom must be a scalar.") + elif df < dim: + raise ValueError("Degrees of freedom cannot be less than dimension" + " of scale matrix, but df = %d" % df) + + return dim, df, scale + + def _process_quantiles(self, x, dim): + """ + Adjust quantiles array so that last axis labels the components of + each data point. + """ + x = np.asarray(x, dtype=float) + + if x.ndim == 0: + x = x * np.eye(dim)[:, :, np.newaxis] + if x.ndim == 1: + if dim == 1: + x = x[np.newaxis, np.newaxis, :] + else: + x = np.diag(x)[:, :, np.newaxis] + elif x.ndim == 2: + if not x.shape[0] == x.shape[1]: + raise ValueError("Quantiles must be square if they are two" + " dimensional, but x.shape = %s." + % str(x.shape)) + x = x[:, :, np.newaxis] + elif x.ndim == 3: + if not x.shape[0] == x.shape[1]: + raise ValueError("Quantiles must be square in the first two" + " dimensions if they are three dimensional" + ", but x.shape = %s." % str(x.shape)) + elif x.ndim > 3: + raise ValueError("Quantiles must be at most two-dimensional with" + " an additional dimension for multiple" + "components, but x.ndim = %d" % x.ndim) + + # Now we have 3-dim array; should have shape [dim, dim, *] + if not x.shape[0:2] == (dim, dim): + raise ValueError('Quantiles have incompatible dimensions: should' + ' be %s, got %s.' % ((dim, dim), x.shape[0:2])) + + return x + + def _process_size(self, size): + size = np.asarray(size) + + if size.ndim == 0: + size = size[np.newaxis] + elif size.ndim > 1: + raise ValueError('Size must be an integer or tuple of integers;' + ' thus must have dimension <= 1.' + ' Got size.ndim = %s' % str(tuple(size))) + n = size.prod() + shape = tuple(size) + + return n, shape + + def _logpdf(self, x, dim, df, scale, log_det_scale, C): + """ + Parameters + ---------- + x : ndarray + Points at which to evaluate the log of the probability + density function + dim : int + Dimension of the scale matrix + df : int + Degrees of freedom + scale : ndarray + Scale matrix + log_det_scale : float + Logarithm of the determinant of the scale matrix + C : ndarray + Cholesky factorization of the scale matrix, lower triagular. + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'logpdf' instead. + + """ + # log determinant of x + # Note: x has components along the last axis, so that x.T has + # components alone the 0-th axis. Then since det(A) = det(A'), this + # gives us a 1-dim vector of determinants + + # Retrieve tr(scale^{-1} x) + log_det_x = np.zeros(x.shape[-1]) + scale_inv_x = np.zeros(x.shape) + tr_scale_inv_x = np.zeros(x.shape[-1]) + for i in range(x.shape[-1]): + _, log_det_x[i] = self._cholesky_logdet(x[:, :, i]) + scale_inv_x[:, :, i] = scipy.linalg.cho_solve((C, True), x[:, :, i]) + tr_scale_inv_x[i] = scale_inv_x[:, :, i].trace() + + # Log PDF + out = ((0.5 * (df - dim - 1) * log_det_x - 0.5 * tr_scale_inv_x) - + (0.5 * df * dim * _LOG_2 + 0.5 * df * log_det_scale + + multigammaln(0.5*df, dim))) + + return out + + def logpdf(self, x, df, scale): + """ + Log of the Wishart probability density function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + Each quantile must be a symmetric positive definite matrix. + %(_doc_default_callparams)s + + Returns + ------- + pdf : ndarray + Log of the probability density function evaluated at `x` + + Notes + ----- + %(_doc_callparams_note)s + + """ + dim, df, scale = self._process_parameters(df, scale) + x = self._process_quantiles(x, dim) + + # Cholesky decomposition of scale, get log(det(scale)) + C, log_det_scale = self._cholesky_logdet(scale) + + out = self._logpdf(x, dim, df, scale, log_det_scale, C) + return _squeeze_output(out) + + def pdf(self, x, df, scale): + """ + Wishart probability density function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + Each quantile must be a symmetric positive definite matrix. + %(_doc_default_callparams)s + + Returns + ------- + pdf : ndarray + Probability density function evaluated at `x` + + Notes + ----- + %(_doc_callparams_note)s + + """ + return np.exp(self.logpdf(x, df, scale)) + + def _mean(self, dim, df, scale): + """ + Parameters + ---------- + dim : int + Dimension of the scale matrix + %(_doc_default_callparams)s + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'mean' instead. + + """ + return df * scale + + def mean(self, df, scale): + """ + Mean of the Wishart distribution + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + mean : float + The mean of the distribution + """ + dim, df, scale = self._process_parameters(df, scale) + out = self._mean(dim, df, scale) + return _squeeze_output(out) + + def _mode(self, dim, df, scale): + """ + Parameters + ---------- + dim : int + Dimension of the scale matrix + %(_doc_default_callparams)s + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'mode' instead. + + """ + if df >= dim + 1: + out = (df-dim-1) * scale + else: + out = None + return out + + def mode(self, df, scale): + """ + Mode of the Wishart distribution + + Only valid if the degrees of freedom are greater than the dimension of + the scale matrix. + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + mode : float or None + The Mode of the distribution + """ + dim, df, scale = self._process_parameters(df, scale) + out = self._mode(dim, df, scale) + return _squeeze_output(out) if out is not None else out + + def _var(self, dim, df, scale): + """ + Parameters + ---------- + dim : int + Dimension of the scale matrix + %(_doc_default_callparams)s + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'var' instead. + + """ + var = scale**2 + diag = scale.diagonal() # 1 x dim array + var += np.outer(diag, diag) + var *= df + return var + + def var(self, df, scale): + """ + Variance of the Wishart distribution + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + var : float + The variance of the distribution + """ + dim, df, scale = self._process_parameters(df, scale) + out = self._var(dim, df, scale) + return _squeeze_output(out) + + def _standard_rvs(self, n, shape, dim, df, random_state): + """ + Parameters + ---------- + n : integer + Number of variates to generate + shape : iterable + Shape of the variates to generate + dim : int + Dimension of the scale matrix + df : int + Degrees of freedom + random_state : np.random.RandomState instance + RandomState used for drawing the random variates. + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'rvs' instead. + + """ + # Random normal variates for off-diagonal elements + n_tril = dim * (dim-1) // 2 + covariances = random_state.normal( + size=n*n_tril).reshape(shape+(n_tril,)) + + # Random chi-square variates for diagonal elements + variances = (np.r_[[random_state.chisquare(df-(i+1)+1, size=n)**0.5 + for i in range(dim)]].reshape((dim,) + + shape[::-1]).T) + + # Create the A matri(ces) - lower triangular + A = np.zeros(shape + (dim, dim)) + + # Input the covariances + size_idx = tuple([slice(None, None, None)]*len(shape)) + tril_idx = np.tril_indices(dim, k=-1) + A[size_idx + tril_idx] = covariances + + # Input the variances + diag_idx = np.diag_indices(dim) + A[size_idx + diag_idx] = variances + + return A + + def _rvs(self, n, shape, dim, df, C, random_state): + """ + Parameters + ---------- + n : integer + Number of variates to generate + shape : iterable + Shape of the variates to generate + dim : int + Dimension of the scale matrix + df : int + Degrees of freedom + scale : ndarray + Scale matrix + C : ndarray + Cholesky factorization of the scale matrix, lower triangular. + %(_doc_random_state)s + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'rvs' instead. + + """ + random_state = self._get_random_state(random_state) + # Calculate the matrices A, which are actually lower triangular + # Cholesky factorizations of a matrix B such that B ~ W(df, I) + A = self._standard_rvs(n, shape, dim, df, random_state) + + # Calculate SA = C A A' C', where SA ~ W(df, scale) + # Note: this is the product of a (lower) (lower) (lower)' (lower)' + # or, denoting B = AA', it is C B C' where C is the lower + # triangular Cholesky factorization of the scale matrix. + # this appears to conflict with the instructions in [1]_, which + # suggest that it should be D' B D where D is the lower + # triangular factorization of the scale matrix. However, it is + # meant to refer to the Bartlett (1933) representation of a + # Wishart random variate as L A A' L' where L is lower triangular + # so it appears that understanding D' to be upper triangular + # is either a typo in or misreading of [1]_. + for index in np.ndindex(shape): + CA = np.dot(C, A[index]) + A[index] = np.dot(CA, CA.T) + + return A + + def rvs(self, df, scale, size=1, random_state=None): + """ + Draw random samples from a Wishart distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + size : integer or iterable of integers, optional + Number of samples to draw (default 1). + %(_doc_random_state)s + + Returns + ------- + rvs : ndarray + Random variates of shape (`size`) + (`dim`, `dim), where `dim` is + the dimension of the scale matrix. + + Notes + ----- + %(_doc_callparams_note)s + + """ + n, shape = self._process_size(size) + dim, df, scale = self._process_parameters(df, scale) + + # Cholesky decomposition of scale + C = scipy.linalg.cholesky(scale, lower=True) + + out = self._rvs(n, shape, dim, df, C, random_state) + + return _squeeze_output(out) + + def _entropy(self, dim, df, log_det_scale): + """ + Parameters + ---------- + dim : int + Dimension of the scale matrix + df : int + Degrees of freedom + log_det_scale : float + Logarithm of the determinant of the scale matrix + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'entropy' instead. + + """ + return ( + 0.5 * (dim+1) * log_det_scale + + 0.5 * dim * (dim+1) * _LOG_2 + + multigammaln(0.5*df, dim) - + 0.5 * (df - dim - 1) * np.sum( + [psi(0.5*(df + 1 - (i+1))) for i in range(dim)] + ) + + 0.5 * df * dim + ) + + def entropy(self, df, scale): + """ + Compute the differential entropy of the Wishart. + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + h : scalar + Entropy of the Wishart distribution + + Notes + ----- + %(_doc_callparams_note)s + + """ + dim, df, scale = self._process_parameters(df, scale) + _, log_det_scale = self._cholesky_logdet(scale) + return self._entropy(dim, df, log_det_scale) + + def _cholesky_logdet(self, scale): + """ + Compute Cholesky decomposition and determine (log(det(scale)). + + Parameters + ---------- + scale : ndarray + Scale matrix. + + Returns + ------- + c_decomp : ndarray + The Cholesky decomposition of `scale`. + logdet : scalar + The log of the determinant of `scale`. + + Notes + ----- + This computation of ``logdet`` is equivalent to + ``np.linalg.slogdet(scale)``. It is ~2x faster though. + + """ + c_decomp = scipy.linalg.cholesky(scale, lower=True) + logdet = 2 * np.sum(np.log(c_decomp.diagonal())) + return c_decomp, logdet + + +wishart = wishart_gen() + + +class wishart_frozen(multi_rv_frozen): + """ + Create a frozen Wishart distribution. + + Parameters + ---------- + df : array_like + Degrees of freedom of the distribution + scale : array_like + Scale matrix of the distribution + seed : None or int or np.random.RandomState instance, optional + This parameter defines the RandomState object to use for drawing + random variates. + If None (or np.random), the global np.random state is used. + If integer, it is used to seed the local RandomState instance + Default is None. + + """ + def __init__(self, df, scale, seed=None): + self._dist = wishart_gen(seed) + self.dim, self.df, self.scale = self._dist._process_parameters( + df, scale) + self.C, self.log_det_scale = self._dist._cholesky_logdet(self.scale) + + def logpdf(self, x): + x = self._dist._process_quantiles(x, self.dim) + + out = self._dist._logpdf(x, self.dim, self.df, self.scale, + self.log_det_scale, self.C) + return _squeeze_output(out) + + def pdf(self, x): + return np.exp(self.logpdf(x)) + + def mean(self): + out = self._dist._mean(self.dim, self.df, self.scale) + return _squeeze_output(out) + + def mode(self): + out = self._dist._mode(self.dim, self.df, self.scale) + return _squeeze_output(out) if out is not None else out + + def var(self): + out = self._dist._var(self.dim, self.df, self.scale) + return _squeeze_output(out) + + def rvs(self, size=1, random_state=None): + n, shape = self._dist._process_size(size) + out = self._dist._rvs(n, shape, self.dim, self.df, + self.C, random_state) + return _squeeze_output(out) + + def entropy(self): + return self._dist._entropy(self.dim, self.df, self.log_det_scale) + + +# Set frozen generator docstrings from corresponding docstrings in +# Wishart and fill in default strings in class docstrings +for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs', 'entropy']: + method = wishart_gen.__dict__[name] + method_frozen = wishart_frozen.__dict__[name] + method_frozen.__doc__ = doccer.docformat( + method.__doc__, wishart_docdict_noparams) + method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params) + + +def _cho_inv_batch(a, check_finite=True): + """ + Invert the matrices a_i, using a Cholesky factorization of A, where + a_i resides in the last two dimensions of a and the other indices describe + the index i. + + Overwrites the data in a. + + Parameters + ---------- + a : array + Array of matrices to invert, where the matrices themselves are stored + in the last two dimensions. + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + x : array + Array of inverses of the matrices ``a_i``. + + See also + -------- + scipy.linalg.cholesky : Cholesky factorization of a matrix + + """ + if check_finite: + a1 = asarray_chkfinite(a) + else: + a1 = asarray(a) + if len(a1.shape) < 2 or a1.shape[-2] != a1.shape[-1]: + raise ValueError('expected square matrix in last two dimensions') + + potrf, potri = get_lapack_funcs(('potrf', 'potri'), (a1,)) + + triu_rows, triu_cols = np.triu_indices(a.shape[-2], k=1) + for index in np.ndindex(a1.shape[:-2]): + + # Cholesky decomposition + a1[index], info = potrf(a1[index], lower=True, overwrite_a=False, + clean=False) + if info > 0: + raise LinAlgError("%d-th leading minor not positive definite" + % info) + if info < 0: + raise ValueError('illegal value in %d-th argument of internal' + ' potrf' % -info) + # Inversion + a1[index], info = potri(a1[index], lower=True, overwrite_c=False) + if info > 0: + raise LinAlgError("the inverse could not be computed") + if info < 0: + raise ValueError('illegal value in %d-th argument of internal' + ' potrf' % -info) + + # Make symmetric (dpotri only fills in the lower triangle) + a1[index][triu_rows, triu_cols] = a1[index][triu_cols, triu_rows] + + return a1 + + +class invwishart_gen(wishart_gen): + r""" + An inverse Wishart random variable. + + The `df` keyword specifies the degrees of freedom. The `scale` keyword + specifies the scale matrix, which must be symmetric and positive definite. + In this context, the scale matrix is often interpreted in terms of a + multivariate normal covariance matrix. + + Methods + ------- + ``pdf(x, df, scale)`` + Probability density function. + ``logpdf(x, df, scale)`` + Log of the probability density function. + ``rvs(df, scale, size=1, random_state=None)`` + Draw random samples from an inverse Wishart distribution. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + %(_doc_default_callparams)s + %(_doc_random_state)s + + Alternatively, the object may be called (as a function) to fix the degrees + of freedom and scale parameters, returning a "frozen" inverse Wishart + random variable: + + rv = invwishart(df=1, scale=1) + - Frozen object with the same methods but holding the given + degrees of freedom and scale fixed. + + See Also + -------- + wishart + + Notes + ----- + %(_doc_callparams_note)s + + The scale matrix `scale` must be a symmetric positive definite + matrix. Singular matrices, including the symmetric positive semi-definite + case, are not supported. + + The inverse Wishart distribution is often denoted + + .. math:: + + W_p^{-1}(\nu, \Psi) + + where :math:`\nu` is the degrees of freedom and :math:`\Psi` is the + :math:`p \times p` scale matrix. + + The probability density function for `invwishart` has support over positive + definite matrices :math:`S`; if :math:`S \sim W^{-1}_p(\nu, \Sigma)`, + then its PDF is given by: + + .. math:: + + f(S) = \frac{|\Sigma|^\frac{\nu}{2}}{2^{ \frac{\nu p}{2} } + |S|^{\frac{\nu + p + 1}{2}} \Gamma_p \left(\frac{\nu}{2} \right)} + \exp\left( -tr(\Sigma S^{-1}) / 2 \right) + + If :math:`S \sim W_p^{-1}(\nu, \Psi)` (inverse Wishart) then + :math:`S^{-1} \sim W_p(\nu, \Psi^{-1})` (Wishart). + + If the scale matrix is 1-dimensional and equal to one, then the inverse + Wishart distribution :math:`W_1(\nu, 1)` collapses to the + inverse Gamma distribution with parameters shape = :math:`\frac{\nu}{2}` + and scale = :math:`\frac{1}{2}`. + + .. versionadded:: 0.16.0 + + References + ---------- + .. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach", + Wiley, 1983. + .. [2] M.C. Jones, "Generating Inverse Wishart Matrices", Communications + in Statistics - Simulation and Computation, vol. 14.2, pp.511-514, + 1985. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> from scipy.stats import invwishart, invgamma + >>> x = np.linspace(0.01, 1, 100) + >>> iw = invwishart.pdf(x, df=6, scale=1) + >>> iw[:3] + array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03]) + >>> ig = invgamma.pdf(x, 6/2., scale=1./2) + >>> ig[:3] + array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03]) + >>> plt.plot(x, iw) + + The input quantiles can be any shape of array, as long as the last + axis labels the components. + + """ + + def __init__(self, seed=None): + super(invwishart_gen, self).__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params) + + def __call__(self, df=None, scale=None, seed=None): + """ + Create a frozen inverse Wishart distribution. + + See `invwishart_frozen` for more information. + + """ + return invwishart_frozen(df, scale, seed) + + def _logpdf(self, x, dim, df, scale, log_det_scale): + """ + Parameters + ---------- + x : ndarray + Points at which to evaluate the log of the probability + density function. + dim : int + Dimension of the scale matrix + df : int + Degrees of freedom + scale : ndarray + Scale matrix + log_det_scale : float + Logarithm of the determinant of the scale matrix + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'logpdf' instead. + + """ + log_det_x = np.zeros(x.shape[-1]) + x_inv = np.copy(x).T + if dim > 1: + _cho_inv_batch(x_inv) # works in-place + else: + x_inv = 1./x_inv + tr_scale_x_inv = np.zeros(x.shape[-1]) + + for i in range(x.shape[-1]): + C, lower = scipy.linalg.cho_factor(x[:, :, i], lower=True) + + log_det_x[i] = 2 * np.sum(np.log(C.diagonal())) + + tr_scale_x_inv[i] = np.dot(scale, x_inv[i]).trace() + + # Log PDF + out = ((0.5 * df * log_det_scale - 0.5 * tr_scale_x_inv) - + (0.5 * df * dim * _LOG_2 + 0.5 * (df + dim + 1) * log_det_x) - + multigammaln(0.5*df, dim)) + + return out + + def logpdf(self, x, df, scale): + """ + Log of the inverse Wishart probability density function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + Each quantile must be a symmetric positive definite matrix. + %(_doc_default_callparams)s + + Returns + ------- + pdf : ndarray + Log of the probability density function evaluated at `x` + + Notes + ----- + %(_doc_callparams_note)s + + """ + dim, df, scale = self._process_parameters(df, scale) + x = self._process_quantiles(x, dim) + _, log_det_scale = self._cholesky_logdet(scale) + out = self._logpdf(x, dim, df, scale, log_det_scale) + return _squeeze_output(out) + + def pdf(self, x, df, scale): + """ + Inverse Wishart probability density function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + Each quantile must be a symmetric positive definite matrix. + + %(_doc_default_callparams)s + + Returns + ------- + pdf : ndarray + Probability density function evaluated at `x` + + Notes + ----- + %(_doc_callparams_note)s + + """ + return np.exp(self.logpdf(x, df, scale)) + + def _mean(self, dim, df, scale): + """ + Parameters + ---------- + dim : int + Dimension of the scale matrix + %(_doc_default_callparams)s + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'mean' instead. + + """ + if df > dim + 1: + out = scale / (df - dim - 1) + else: + out = None + return out + + def mean(self, df, scale): + """ + Mean of the inverse Wishart distribution + + Only valid if the degrees of freedom are greater than the dimension of + the scale matrix plus one. + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + mean : float or None + The mean of the distribution + + """ + dim, df, scale = self._process_parameters(df, scale) + out = self._mean(dim, df, scale) + return _squeeze_output(out) if out is not None else out + + def _mode(self, dim, df, scale): + """ + Parameters + ---------- + dim : int + Dimension of the scale matrix + %(_doc_default_callparams)s + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'mode' instead. + + """ + return scale / (df + dim + 1) + + def mode(self, df, scale): + """ + Mode of the inverse Wishart distribution + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + mode : float + The Mode of the distribution + + """ + dim, df, scale = self._process_parameters(df, scale) + out = self._mode(dim, df, scale) + return _squeeze_output(out) + + def _var(self, dim, df, scale): + """ + Parameters + ---------- + dim : int + Dimension of the scale matrix + %(_doc_default_callparams)s + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'var' instead. + + """ + if df > dim + 3: + var = (df - dim + 1) * scale**2 + diag = scale.diagonal() # 1 x dim array + var += (df - dim - 1) * np.outer(diag, diag) + var /= (df - dim) * (df - dim - 1)**2 * (df - dim - 3) + else: + var = None + return var + + def var(self, df, scale): + """ + Variance of the inverse Wishart distribution + + Only valid if the degrees of freedom are greater than the dimension of + the scale matrix plus three. + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + var : float + The variance of the distribution + """ + dim, df, scale = self._process_parameters(df, scale) + out = self._var(dim, df, scale) + return _squeeze_output(out) if out is not None else out + + def _rvs(self, n, shape, dim, df, C, random_state): + """ + Parameters + ---------- + n : integer + Number of variates to generate + shape : iterable + Shape of the variates to generate + dim : int + Dimension of the scale matrix + df : int + Degrees of freedom + C : ndarray + Cholesky factorization of the scale matrix, lower triagular. + %(_doc_random_state)s + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'rvs' instead. + + """ + random_state = self._get_random_state(random_state) + # Get random draws A such that A ~ W(df, I) + A = super(invwishart_gen, self)._standard_rvs(n, shape, dim, + df, random_state) + + # Calculate SA = (CA)'^{-1} (CA)^{-1} ~ iW(df, scale) + eye = np.eye(dim) + trtrs = get_lapack_funcs(('trtrs'), (A,)) + + for index in np.ndindex(A.shape[:-2]): + # Calculate CA + CA = np.dot(C, A[index]) + # Get (C A)^{-1} via triangular solver + if dim > 1: + CA, info = trtrs(CA, eye, lower=True) + if info > 0: + raise LinAlgError("Singular matrix.") + if info < 0: + raise ValueError('Illegal value in %d-th argument of' + ' internal trtrs' % -info) + else: + CA = 1. / CA + # Get SA + A[index] = np.dot(CA.T, CA) + + return A + + def rvs(self, df, scale, size=1, random_state=None): + """ + Draw random samples from an inverse Wishart distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + size : integer or iterable of integers, optional + Number of samples to draw (default 1). + %(_doc_random_state)s + + Returns + ------- + rvs : ndarray + Random variates of shape (`size`) + (`dim`, `dim), where `dim` is + the dimension of the scale matrix. + + Notes + ----- + %(_doc_callparams_note)s + + """ + n, shape = self._process_size(size) + dim, df, scale = self._process_parameters(df, scale) + + # Invert the scale + eye = np.eye(dim) + L, lower = scipy.linalg.cho_factor(scale, lower=True) + inv_scale = scipy.linalg.cho_solve((L, lower), eye) + # Cholesky decomposition of inverted scale + C = scipy.linalg.cholesky(inv_scale, lower=True) + + out = self._rvs(n, shape, dim, df, C, random_state) + + return _squeeze_output(out) + + def entropy(self): + # Need to find reference for inverse Wishart entropy + raise AttributeError + + +invwishart = invwishart_gen() + + +class invwishart_frozen(multi_rv_frozen): + def __init__(self, df, scale, seed=None): + """ + Create a frozen inverse Wishart distribution. + + Parameters + ---------- + df : array_like + Degrees of freedom of the distribution + scale : array_like + Scale matrix of the distribution + seed : None or int or np.random.RandomState instance, optional + This parameter defines the RandomState object to use for drawing + random variates. + If None (or np.random), the global np.random state is used. + If integer, it is used to seed the local RandomState instance + Default is None. + + """ + self._dist = invwishart_gen(seed) + self.dim, self.df, self.scale = self._dist._process_parameters( + df, scale + ) + + # Get the determinant via Cholesky factorization + C, lower = scipy.linalg.cho_factor(self.scale, lower=True) + self.log_det_scale = 2 * np.sum(np.log(C.diagonal())) + + # Get the inverse using the Cholesky factorization + eye = np.eye(self.dim) + self.inv_scale = scipy.linalg.cho_solve((C, lower), eye) + + # Get the Cholesky factorization of the inverse scale + self.C = scipy.linalg.cholesky(self.inv_scale, lower=True) + + def logpdf(self, x): + x = self._dist._process_quantiles(x, self.dim) + out = self._dist._logpdf(x, self.dim, self.df, self.scale, + self.log_det_scale) + return _squeeze_output(out) + + def pdf(self, x): + return np.exp(self.logpdf(x)) + + def mean(self): + out = self._dist._mean(self.dim, self.df, self.scale) + return _squeeze_output(out) if out is not None else out + + def mode(self): + out = self._dist._mode(self.dim, self.df, self.scale) + return _squeeze_output(out) + + def var(self): + out = self._dist._var(self.dim, self.df, self.scale) + return _squeeze_output(out) if out is not None else out + + def rvs(self, size=1, random_state=None): + n, shape = self._dist._process_size(size) + + out = self._dist._rvs(n, shape, self.dim, self.df, + self.C, random_state) + + return _squeeze_output(out) + + def entropy(self): + # Need to find reference for inverse Wishart entropy + raise AttributeError + + +# Set frozen generator docstrings from corresponding docstrings in +# inverse Wishart and fill in default strings in class docstrings +for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs']: + method = invwishart_gen.__dict__[name] + method_frozen = wishart_frozen.__dict__[name] + method_frozen.__doc__ = doccer.docformat( + method.__doc__, wishart_docdict_noparams) + method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params) + +_multinomial_doc_default_callparams = """\ +n : int + Number of trials +p : array_like + Probability of a trial falling into each category; should sum to 1 +""" + +_multinomial_doc_callparams_note = \ +"""`n` should be a positive integer. Each element of `p` should be in the +interval :math:`[0,1]` and the elements should sum to 1. If they do not sum to +1, the last element of the `p` array is not used and is replaced with the +remaining probability left over from the earlier elements. +""" + +_multinomial_doc_frozen_callparams = "" + +_multinomial_doc_frozen_callparams_note = \ + """See class definition for a detailed description of parameters.""" + +multinomial_docdict_params = { + '_doc_default_callparams': _multinomial_doc_default_callparams, + '_doc_callparams_note': _multinomial_doc_callparams_note, + '_doc_random_state': _doc_random_state +} + +multinomial_docdict_noparams = { + '_doc_default_callparams': _multinomial_doc_frozen_callparams, + '_doc_callparams_note': _multinomial_doc_frozen_callparams_note, + '_doc_random_state': _doc_random_state +} + + +class multinomial_gen(multi_rv_generic): + r""" + A multinomial random variable. + + Methods + ------- + ``pmf(x, n, p)`` + Probability mass function. + ``logpmf(x, n, p)`` + Log of the probability mass function. + ``rvs(n, p, size=1, random_state=None)`` + Draw random samples from a multinomial distribution. + ``entropy(n, p)`` + Compute the entropy of the multinomial distribution. + ``cov(n, p)`` + Compute the covariance matrix of the multinomial distribution. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + %(_doc_default_callparams)s + %(_doc_random_state)s + + Notes + ----- + %(_doc_callparams_note)s + + Alternatively, the object may be called (as a function) to fix the `n` and + `p` parameters, returning a "frozen" multinomial random variable: + + The probability mass function for `multinomial` is + + .. math:: + + f(x) = \frac{n!}{x_1! \cdots x_k!} p_1^{x_1} \cdots p_k^{x_k}, + + supported on :math:`x=(x_1, \ldots, x_k)` where each :math:`x_i` is a + nonnegative integer and their sum is :math:`n`. + + .. versionadded:: 0.19.0 + + Examples + -------- + + >>> from scipy.stats import multinomial + >>> rv = multinomial(8, [0.3, 0.2, 0.5]) + >>> rv.pmf([1, 3, 4]) + 0.042000000000000072 + + The multinomial distribution for :math:`k=2` is identical to the + corresponding binomial distribution (tiny numerical differences + notwithstanding): + + >>> from scipy.stats import binom + >>> multinomial.pmf([3, 4], n=7, p=[0.4, 0.6]) + 0.29030399999999973 + >>> binom.pmf(3, 7, 0.4) + 0.29030400000000012 + + The functions ``pmf``, ``logpmf``, ``entropy``, and ``cov`` support + broadcasting, under the convention that the vector parameters (``x`` and + ``p``) are interpreted as if each row along the last axis is a single + object. For instance: + + >>> multinomial.pmf([[3, 4], [3, 5]], n=[7, 8], p=[.3, .7]) + array([0.2268945, 0.25412184]) + + Here, ``x.shape == (2, 2)``, ``n.shape == (2,)``, and ``p.shape == (2,)``, + but following the rules mentioned above they behave as if the rows + ``[3, 4]`` and ``[3, 5]`` in ``x`` and ``[.3, .7]`` in ``p`` were a single + object, and as if we had ``x.shape = (2,)``, ``n.shape = (2,)``, and + ``p.shape = ()``. To obtain the individual elements without broadcasting, + we would do this: + + >>> multinomial.pmf([3, 4], n=7, p=[.3, .7]) + 0.2268945 + >>> multinomial.pmf([3, 5], 8, p=[.3, .7]) + 0.25412184 + + This broadcasting also works for ``cov``, where the output objects are + square matrices of size ``p.shape[-1]``. For example: + + >>> multinomial.cov([4, 5], [[.3, .7], [.4, .6]]) + array([[[ 0.84, -0.84], + [-0.84, 0.84]], + [[ 1.2 , -1.2 ], + [-1.2 , 1.2 ]]]) + + In this example, ``n.shape == (2,)`` and ``p.shape == (2, 2)``, and + following the rules above, these broadcast as if ``p.shape == (2,)``. + Thus the result should also be of shape ``(2,)``, but since each output is + a :math:`2 \times 2` matrix, the result in fact has shape ``(2, 2, 2)``, + where ``result[0]`` is equal to ``multinomial.cov(n=4, p=[.3, .7])`` and + ``result[1]`` is equal to ``multinomial.cov(n=5, p=[.4, .6])``. + + See also + -------- + scipy.stats.binom : The binomial distribution. + numpy.random.multinomial : Sampling from the multinomial distribution. + """ + + def __init__(self, seed=None): + super(multinomial_gen, self).__init__(seed) + self.__doc__ = \ + doccer.docformat(self.__doc__, multinomial_docdict_params) + + def __call__(self, n, p, seed=None): + """ + Create a frozen multinomial distribution. + + See `multinomial_frozen` for more information. + """ + return multinomial_frozen(n, p, seed) + + def _process_parameters(self, n, p): + """ + Return: n_, p_, npcond. + + n_ and p_ are arrays of the correct shape; npcond is a boolean array + flagging values out of the domain. + """ + p = np.array(p, dtype=np.float64, copy=True) + p[..., -1] = 1. - p[..., :-1].sum(axis=-1) + + # true for bad p + pcond = np.any(p < 0, axis=-1) + pcond |= np.any(p > 1, axis=-1) + + n = np.array(n, dtype=np.int, copy=True) + + # true for bad n + ncond = n <= 0 + + return n, p, ncond | pcond + + def _process_quantiles(self, x, n, p): + """ + Return: x_, xcond. + + x_ is an int array; xcond is a boolean array flagging values out of the + domain. + """ + xx = np.asarray(x, dtype=np.int) + + if xx.ndim == 0: + raise ValueError("x must be an array.") + + if xx.size != 0 and not xx.shape[-1] == p.shape[-1]: + raise ValueError("Size of each quantile should be size of p: " + "received %d, but expected %d." % + (xx.shape[-1], p.shape[-1])) + + # true for x out of the domain + cond = np.any(xx != x, axis=-1) + cond |= np.any(xx < 0, axis=-1) + cond = cond | (np.sum(xx, axis=-1) != n) + + return xx, cond + + def _checkresult(self, result, cond, bad_value): + result = np.asarray(result) + + if cond.ndim != 0: + result[cond] = bad_value + elif cond: + if result.ndim == 0: + return bad_value + result[...] = bad_value + return result + + def _logpmf(self, x, n, p): + return gammaln(n+1) + np.sum(xlogy(x, p) - gammaln(x+1), axis=-1) + + def logpmf(self, x, n, p): + """ + Log of the Multinomial probability mass function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + %(_doc_default_callparams)s + + Returns + ------- + logpmf : ndarray or scalar + Log of the probability mass function evaluated at `x` + + Notes + ----- + %(_doc_callparams_note)s + """ + n, p, npcond = self._process_parameters(n, p) + x, xcond = self._process_quantiles(x, n, p) + + result = self._logpmf(x, n, p) + + # replace values for which x was out of the domain; broadcast + # xcond to the right shape + xcond_ = xcond | np.zeros(npcond.shape, dtype=np.bool_) + result = self._checkresult(result, xcond_, np.NINF) + + # replace values bad for n or p; broadcast npcond to the right shape + npcond_ = npcond | np.zeros(xcond.shape, dtype=np.bool_) + return self._checkresult(result, npcond_, np.NAN) + + def pmf(self, x, n, p): + """ + Multinomial probability mass function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + %(_doc_default_callparams)s + + Returns + ------- + pmf : ndarray or scalar + Probability density function evaluated at `x` + + Notes + ----- + %(_doc_callparams_note)s + """ + return np.exp(self.logpmf(x, n, p)) + + def mean(self, n, p): + """ + Mean of the Multinomial distribution + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + mean : float + The mean of the distribution + """ + n, p, npcond = self._process_parameters(n, p) + result = n[..., np.newaxis]*p + return self._checkresult(result, npcond, np.NAN) + + def cov(self, n, p): + """ + Covariance matrix of the multinomial distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + cov : ndarray + The covariance matrix of the distribution + """ + n, p, npcond = self._process_parameters(n, p) + + nn = n[..., np.newaxis, np.newaxis] + result = nn * np.einsum('...j,...k->...jk', -p, p) + + # change the diagonal + for i in range(p.shape[-1]): + result[..., i, i] += n*p[..., i] + + return self._checkresult(result, npcond, np.nan) + + def entropy(self, n, p): + r""" + Compute the entropy of the multinomial distribution. + + The entropy is computed using this expression: + + .. math:: + + f(x) = - \log n! - n\sum_{i=1}^k p_i \log p_i + + \sum_{i=1}^k \sum_{x=0}^n \binom n x p_i^x(1-p_i)^{n-x} \log x! + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + h : scalar + Entropy of the Multinomial distribution + + Notes + ----- + %(_doc_callparams_note)s + """ + n, p, npcond = self._process_parameters(n, p) + + x = np.r_[1:np.max(n)+1] + + term1 = n*np.sum(entr(p), axis=-1) + term1 -= gammaln(n+1) + + n = n[..., np.newaxis] + new_axes_needed = max(p.ndim, n.ndim) - x.ndim + 1 + x.shape += (1,)*new_axes_needed + + term2 = np.sum(binom.pmf(x, n, p)*gammaln(x+1), + axis=(-1, -1-new_axes_needed)) + + return self._checkresult(term1 + term2, npcond, np.nan) + + def rvs(self, n, p, size=None, random_state=None): + """ + Draw random samples from a Multinomial distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + size : integer or iterable of integers, optional + Number of samples to draw (default 1). + %(_doc_random_state)s + + Returns + ------- + rvs : ndarray or scalar + Random variates of shape (`size`, `len(p)`) + + Notes + ----- + %(_doc_callparams_note)s + """ + n, p, npcond = self._process_parameters(n, p) + random_state = self._get_random_state(random_state) + return random_state.multinomial(n, p, size) + + +multinomial = multinomial_gen() + + +class multinomial_frozen(multi_rv_frozen): + r""" + Create a frozen Multinomial distribution. + + Parameters + ---------- + n : int + number of trials + p: array_like + probability of a trial falling into each category; should sum to 1 + seed : None or int or np.random.RandomState instance, optional + This parameter defines the RandomState object to use for drawing + random variates. + If None (or np.random), the global np.random state is used. + If integer, it is used to seed the local RandomState instance + Default is None. + """ + def __init__(self, n, p, seed=None): + self._dist = multinomial_gen(seed) + self.n, self.p, self.npcond = self._dist._process_parameters(n, p) + + # monkey patch self._dist + def _process_parameters(n, p): + return self.n, self.p, self.npcond + + self._dist._process_parameters = _process_parameters + + def logpmf(self, x): + return self._dist.logpmf(x, self.n, self.p) + + def pmf(self, x): + return self._dist.pmf(x, self.n, self.p) + + def mean(self): + return self._dist.mean(self.n, self.p) + + def cov(self): + return self._dist.cov(self.n, self.p) + + def entropy(self): + return self._dist.entropy(self.n, self.p) + + def rvs(self, size=1, random_state=None): + return self._dist.rvs(self.n, self.p, size, random_state) + + +# Set frozen generator docstrings from corresponding docstrings in +# multinomial and fill in default strings in class docstrings +for name in ['logpmf', 'pmf', 'mean', 'cov', 'rvs']: + method = multinomial_gen.__dict__[name] + method_frozen = multinomial_frozen.__dict__[name] + method_frozen.__doc__ = doccer.docformat( + method.__doc__, multinomial_docdict_noparams) + method.__doc__ = doccer.docformat(method.__doc__, + multinomial_docdict_params) + + +class special_ortho_group_gen(multi_rv_generic): + r""" + A matrix-valued SO(N) random variable. + + Return a random rotation matrix, drawn from the Haar distribution + (the only uniform distribution on SO(n)). + + The `dim` keyword specifies the dimension N. + + Methods + ------- + ``rvs(dim=None, size=1, random_state=None)`` + Draw random samples from SO(N). + + Parameters + ---------- + dim : scalar + Dimension of matrices + + Notes + ---------- + This class is wrapping the random_rot code from the MDP Toolkit, + https://github.com/mdp-toolkit/mdp-toolkit + + Return a random rotation matrix, drawn from the Haar distribution + (the only uniform distribution on SO(n)). + The algorithm is described in the paper + Stewart, G.W., "The efficient generation of random orthogonal + matrices with an application to condition estimators", SIAM Journal + on Numerical Analysis, 17(3), pp. 403-409, 1980. + For more information see + https://en.wikipedia.org/wiki/Orthogonal_matrix#Randomization + + See also the similar `ortho_group`. + + Examples + -------- + >>> from scipy.stats import special_ortho_group + >>> x = special_ortho_group.rvs(3) + + >>> np.dot(x, x.T) + array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16], + [ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16], + [ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]]) + + >>> import scipy.linalg + >>> scipy.linalg.det(x) + 1.0 + + This generates one random matrix from SO(3). It is orthogonal and + has a determinant of 1. + + """ + + def __init__(self, seed=None): + super(special_ortho_group_gen, self).__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__) + + def __call__(self, dim=None, seed=None): + """ + Create a frozen SO(N) distribution. + + See `special_ortho_group_frozen` for more information. + + """ + return special_ortho_group_frozen(dim, seed=seed) + + def _process_parameters(self, dim): + """ + Dimension N must be specified; it cannot be inferred. + """ + + if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim): + raise ValueError("""Dimension of rotation must be specified, + and must be a scalar greater than 1.""") + + return dim + + def rvs(self, dim, size=1, random_state=None): + """ + Draw random samples from SO(N). + + Parameters + ---------- + dim : integer + Dimension of rotation space (N). + size : integer, optional + Number of samples to draw (default 1). + + Returns + ------- + rvs : ndarray or scalar + Random size N-dimensional matrices, dimension (size, dim, dim) + + """ + random_state = self._get_random_state(random_state) + + size = int(size) + if size > 1: + return np.array([self.rvs(dim, size=1, random_state=random_state) + for i in range(size)]) + + dim = self._process_parameters(dim) + + H = np.eye(dim) + D = np.empty((dim,)) + for n in range(dim-1): + x = random_state.normal(size=(dim-n,)) + D[n] = np.sign(x[0]) if x[0] != 0 else 1 + x[0] += D[n]*np.sqrt((x*x).sum()) + # Householder transformation + Hx = (np.eye(dim-n) - 2.*np.outer(x, x)/(x*x).sum()) + mat = np.eye(dim) + mat[n:, n:] = Hx + H = np.dot(H, mat) + D[-1] = (-1)**(dim-1)*D[:-1].prod() + # Equivalent to np.dot(np.diag(D), H) but faster, apparently + H = (D*H.T).T + return H + + +special_ortho_group = special_ortho_group_gen() + + +class special_ortho_group_frozen(multi_rv_frozen): + def __init__(self, dim=None, seed=None): + """ + Create a frozen SO(N) distribution. + + Parameters + ---------- + dim : scalar + Dimension of matrices + seed : None or int or np.random.RandomState instance, optional + This parameter defines the RandomState object to use for drawing + random variates. + If None (or np.random), the global np.random state is used. + If integer, it is used to seed the local RandomState instance + Default is None. + + Examples + -------- + >>> from scipy.stats import special_ortho_group + >>> g = special_ortho_group(5) + >>> x = g.rvs() + + """ + self._dist = special_ortho_group_gen(seed) + self.dim = self._dist._process_parameters(dim) + + def rvs(self, size=1, random_state=None): + return self._dist.rvs(self.dim, size, random_state) + + +class ortho_group_gen(multi_rv_generic): + r""" + A matrix-valued O(N) random variable. + + Return a random orthogonal matrix, drawn from the O(N) Haar + distribution (the only uniform distribution on O(N)). + + The `dim` keyword specifies the dimension N. + + Methods + ------- + ``rvs(dim=None, size=1, random_state=None)`` + Draw random samples from O(N). + + Parameters + ---------- + dim : scalar + Dimension of matrices + + Notes + ---------- + This class is closely related to `special_ortho_group`. + + Some care is taken to avoid numerical error, as per the paper by Mezzadri. + + References + ---------- + .. [1] F. Mezzadri, "How to generate random matrices from the classical + compact groups", :arXiv:`math-ph/0609050v2`. + + Examples + -------- + >>> from scipy.stats import ortho_group + >>> x = ortho_group.rvs(3) + + >>> np.dot(x, x.T) + array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16], + [ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16], + [ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]]) + + >>> import scipy.linalg + >>> np.fabs(scipy.linalg.det(x)) + 1.0 + + This generates one random matrix from O(3). It is orthogonal and + has a determinant of +1 or -1. + + """ + + def __init__(self, seed=None): + super(ortho_group_gen, self).__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__) + + def _process_parameters(self, dim): + """ + Dimension N must be specified; it cannot be inferred. + """ + + if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim): + raise ValueError("Dimension of rotation must be specified," + "and must be a scalar greater than 1.") + + return dim + + def rvs(self, dim, size=1, random_state=None): + """ + Draw random samples from O(N). + + Parameters + ---------- + dim : integer + Dimension of rotation space (N). + size : integer, optional + Number of samples to draw (default 1). + + Returns + ------- + rvs : ndarray or scalar + Random size N-dimensional matrices, dimension (size, dim, dim) + + """ + random_state = self._get_random_state(random_state) + + size = int(size) + if size > 1: + return np.array([self.rvs(dim, size=1, random_state=random_state) + for i in range(size)]) + + dim = self._process_parameters(dim) + + H = np.eye(dim) + for n in range(dim): + x = random_state.normal(size=(dim-n,)) + # random sign, 50/50, but chosen carefully to avoid roundoff error + D = np.sign(x[0]) if x[0] != 0 else 1 + x[0] += D*np.sqrt((x*x).sum()) + # Householder transformation + Hx = -D*(np.eye(dim-n) - 2.*np.outer(x, x)/(x*x).sum()) + mat = np.eye(dim) + mat[n:, n:] = Hx + H = np.dot(H, mat) + return H + + +ortho_group = ortho_group_gen() + + +class random_correlation_gen(multi_rv_generic): + r""" + A random correlation matrix. + + Return a random correlation matrix, given a vector of eigenvalues. + + The `eigs` keyword specifies the eigenvalues of the correlation matrix, + and implies the dimension. + + Methods + ------- + ``rvs(eigs=None, random_state=None)`` + Draw random correlation matrices, all with eigenvalues eigs. + + Parameters + ---------- + eigs : 1d ndarray + Eigenvalues of correlation matrix. + + Notes + ---------- + + Generates a random correlation matrix following a numerically stable + algorithm spelled out by Davies & Higham. This algorithm uses a single O(N) + similarity transformation to construct a symmetric positive semi-definite + matrix, and applies a series of Givens rotations to scale it to have ones + on the diagonal. + + References + ---------- + + .. [1] Davies, Philip I; Higham, Nicholas J; "Numerically stable generation + of correlation matrices and their factors", BIT 2000, Vol. 40, + No. 4, pp. 640 651 + + Examples + -------- + >>> from scipy.stats import random_correlation + >>> np.random.seed(514) + >>> x = random_correlation.rvs((.5, .8, 1.2, 1.5)) + >>> x + array([[ 1. , -0.20387311, 0.18366501, -0.04953711], + [-0.20387311, 1. , -0.24351129, 0.06703474], + [ 0.18366501, -0.24351129, 1. , 0.38530195], + [-0.04953711, 0.06703474, 0.38530195, 1. ]]) + >>> import scipy.linalg + >>> e, v = scipy.linalg.eigh(x) + >>> e + array([ 0.5, 0.8, 1.2, 1.5]) + + """ + + def __init__(self, seed=None): + super(random_correlation_gen, self).__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__) + + def _process_parameters(self, eigs, tol): + eigs = np.asarray(eigs, dtype=float) + dim = eigs.size + + if eigs.ndim != 1 or eigs.shape[0] != dim or dim <= 1: + raise ValueError("Array 'eigs' must be a vector of length " + "greater than 1.") + + if np.fabs(np.sum(eigs) - dim) > tol: + raise ValueError("Sum of eigenvalues must equal dimensionality.") + + for x in eigs: + if x < -tol: + raise ValueError("All eigenvalues must be non-negative.") + + return dim, eigs + + def _givens_to_1(self, aii, ajj, aij): + """Computes a 2x2 Givens matrix to put 1's on the diagonal. + + The input matrix is a 2x2 symmetric matrix M = [ aii aij ; aij ajj ]. + + The output matrix g is a 2x2 anti-symmetric matrix of the form + [ c s ; -s c ]; the elements c and s are returned. + + Applying the output matrix to the input matrix (as b=g.T M g) + results in a matrix with bii=1, provided tr(M) - det(M) >= 1 + and floating point issues do not occur. Otherwise, some other + valid rotation is returned. When tr(M)==2, also bjj=1. + + """ + aiid = aii - 1. + ajjd = ajj - 1. + + if ajjd == 0: + # ajj==1, so swap aii and ajj to avoid division by zero + return 0., 1. + + dd = math.sqrt(max(aij**2 - aiid*ajjd, 0)) + + # The choice of t should be chosen to avoid cancellation [1] + t = (aij + math.copysign(dd, aij)) / ajjd + c = 1. / math.sqrt(1. + t*t) + if c == 0: + # Underflow + s = 1.0 + else: + s = c*t + return c, s + + def _to_corr(self, m): + """ + Given a psd matrix m, rotate to put one's on the diagonal, turning it + into a correlation matrix. This also requires the trace equal the + dimensionality. Note: modifies input matrix + """ + # Check requirements for in-place Givens + if not (m.flags.c_contiguous and m.dtype == np.float64 and + m.shape[0] == m.shape[1]): + raise ValueError() + + d = m.shape[0] + for i in range(d-1): + if m[i, i] == 1: + continue + elif m[i, i] > 1: + for j in range(i+1, d): + if m[j, j] < 1: + break + else: + for j in range(i+1, d): + if m[j, j] > 1: + break + + c, s = self._givens_to_1(m[i, i], m[j, j], m[i, j]) + + # Use BLAS to apply Givens rotations in-place. Equivalent to: + # g = np.eye(d) + # g[i, i] = g[j,j] = c + # g[j, i] = -s; g[i, j] = s + # m = np.dot(g.T, np.dot(m, g)) + mv = m.ravel() + drot(mv, mv, c, -s, n=d, + offx=i*d, incx=1, offy=j*d, incy=1, + overwrite_x=True, overwrite_y=True) + drot(mv, mv, c, -s, n=d, + offx=i, incx=d, offy=j, incy=d, + overwrite_x=True, overwrite_y=True) + + return m + + def rvs(self, eigs, random_state=None, tol=1e-13, diag_tol=1e-7): + """ + Draw random correlation matrices + + Parameters + ---------- + eigs : 1d ndarray + Eigenvalues of correlation matrix + tol : float, optional + Tolerance for input parameter checks + diag_tol : float, optional + Tolerance for deviation of the diagonal of the resulting + matrix. Default: 1e-7 + + Raises + ------ + RuntimeError + Floating point error prevented generating a valid correlation + matrix. + + Returns + ------- + rvs : ndarray or scalar + Random size N-dimensional matrices, dimension (size, dim, dim), + each having eigenvalues eigs. + + """ + dim, eigs = self._process_parameters(eigs, tol=tol) + + random_state = self._get_random_state(random_state) + + m = ortho_group.rvs(dim, random_state=random_state) + m = np.dot(np.dot(m, np.diag(eigs)), m.T) # Set the trace of m + m = self._to_corr(m) # Carefully rotate to unit diagonal + + # Check diagonal + if abs(m.diagonal() - 1).max() > diag_tol: + raise RuntimeError("Failed to generate a valid correlation matrix") + + return m + + +random_correlation = random_correlation_gen() + + +class unitary_group_gen(multi_rv_generic): + r""" + A matrix-valued U(N) random variable. + + Return a random unitary matrix. + + The `dim` keyword specifies the dimension N. + + Methods + ------- + ``rvs(dim=None, size=1, random_state=None)`` + Draw random samples from U(N). + + Parameters + ---------- + dim : scalar + Dimension of matrices + + Notes + ---------- + This class is similar to `ortho_group`. + + References + ---------- + .. [1] F. Mezzadri, "How to generate random matrices from the classical + compact groups", arXiv:math-ph/0609050v2. + + Examples + -------- + >>> from scipy.stats import unitary_group + >>> x = unitary_group.rvs(3) + + >>> np.dot(x, x.conj().T) + array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16], + [ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16], + [ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]]) + + This generates one random matrix from U(3). The dot product confirms that + it is unitary up to machine precision. + + """ + + def __init__(self, seed=None): + super(unitary_group_gen, self).__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__) + + def _process_parameters(self, dim): + """ + Dimension N must be specified; it cannot be inferred. + """ + + if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim): + raise ValueError("Dimension of rotation must be specified," + "and must be a scalar greater than 1.") + + return dim + + def rvs(self, dim, size=1, random_state=None): + """ + Draw random samples from U(N). + + Parameters + ---------- + dim : integer + Dimension of space (N). + size : integer, optional + Number of samples to draw (default 1). + + Returns + ------- + rvs : ndarray or scalar + Random size N-dimensional matrices, dimension (size, dim, dim) + + """ + random_state = self._get_random_state(random_state) + + size = int(size) + if size > 1: + return np.array([self.rvs(dim, size=1, random_state=random_state) + for i in range(size)]) + + dim = self._process_parameters(dim) + + z = 1/math.sqrt(2)*(random_state.normal(size=(dim, dim)) + + 1j*random_state.normal(size=(dim, dim))) + q, r = scipy.linalg.qr(z) + d = r.diagonal() + q *= d/abs(d) + return q + + +unitary_group = unitary_group_gen() diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/_multivariate.pyc b/project/venv/lib/python2.7/site-packages/scipy/stats/_multivariate.pyc new file mode 100644 index 0000000..36ce7cc Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/stats/_multivariate.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/_rvs_sampling.py b/project/venv/lib/python2.7/site-packages/scipy/stats/_rvs_sampling.py new file mode 100644 index 0000000..3d4f56c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/_rvs_sampling.py @@ -0,0 +1,169 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +import warnings +from scipy._lib._util import check_random_state + + +def rvs_ratio_uniforms(pdf, umax, vmin, vmax, size=1, c=0, random_state=None): + """ + Generate random samples from a probability density function using the + ratio-of-uniforms method. + + Parameters + ---------- + pdf : callable + A function with signature `pdf(x)` that is the probability + density function of the distribution. + umax : float + The upper bound of the bounding rectangle in the u-direction. + vmin : float + The lower bound of the bounding rectangle in the v-direction. + vmax : float + The upper bound of the bounding rectangle in the v-direction. + size : int or tuple of ints, optional + Defining number of random variates (default is 1). + c : float, optional. + Shift parameter of ratio-of-uniforms method, see Notes. Default is 0. + random_state : int or np.random.RandomState instance, optional + If already a RandomState instance, use it. + If seed is an int, return a new RandomState instance seeded with seed. + If None, use np.random.RandomState. Default is None. + + Returns + ------- + rvs : ndarray + The random variates distributed according to the probability + distribution defined by the pdf. + + Notes + ----- + Given a univariate probability density function `pdf` and a constant `c`, + define the set ``A = {(u, v) : 0 < u <= sqrt(pdf(v/u + c))}``. + If `(U, V)` is a random vector uniformly distributed over `A`, + then `V/U + c` follows a distribution according to `pdf`. + + The above result (see [1]_, [2]_) can be used to sample random variables + using only the pdf, i.e. no inversion of the cdf is required. Typical + choices of `c` are zero or the mode of `pdf`. The set `A` is a subset of + the rectangle ``R = [0, umax] x [vmin, vmax]`` where + + - ``umax = sup sqrt(pdf(x))`` + - ``vmin = inf (x - c) sqrt(pdf(x))`` + - ``vmax = sup (x - c) sqrt(pdf(x))`` + + In particular, these values are finite if `pdf` is bounded and + ``x**2 * pdf(x)`` is bounded (i.e. subquadratic tails). + One can generate `(U, V)` uniformly on `R` and return + `V/U + c` if `(U, V)` are also in `A` which can be directly + verified. + + Intuitively, the method works well if `A` fills up most of the + enclosing rectangle such that the probability is high that `(U, V)` + lies in `A` whenever it lies in `R` as the number of required + iterations becomes too large otherwise. To be more precise, note that + the expected number of iterations to draw `(U, V)` uniformly + distributed on `R` such that `(U, V)` is also in `A` is given by + the ratio ``area(R) / area(A) = 2 * umax * (vmax - vmin)``, using the fact + that the area of `A` is equal to 1/2 (Theorem 7.1 in [1]_). A warning + is displayed if this ratio is larger than 20. Moreover, if the sampling + fails to generate a single random variate after 50000 iterations (i.e. + not a single draw is in `A`), an exception is raised. + + If the bounding rectangle is not correctly specified (i.e. if it does not + contain `A`), the algorithm samples from a distribution different from + the one given by `pdf`. It is therefore recommended to perform a + test such as `stats.kstest` as a check. + + References + ---------- + .. [1] L. Devroye, "Non-Uniform Random Variate Generation", + Springer-Verlag, 1986. + + .. [2] W. Hoermann and J. Leydold, "Generating generalized inverse Gaussian + random variates", Statistics and Computing, 24(4), p. 547--557, 2014. + + .. [3] A.J. Kinderman and J.F. Monahan, "Computer Generation of Random + Variables Using the Ratio of Uniform Deviates", + ACM Transactions on Mathematical Software, 3(3), p. 257--260, 1977. + + Examples + -------- + >>> from scipy import stats + + Simulate normally distributed random variables. It is easy to compute the + bounding rectangle explicitly in that case. + + >>> f = stats.norm.pdf + >>> v_bound = np.sqrt(f(np.sqrt(2))) * np.sqrt(2) + >>> umax, vmin, vmax = np.sqrt(f(0)), -v_bound, v_bound + >>> np.random.seed(12345) + >>> rvs = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=2500) + + The K-S test confirms that the random variates are indeed normally + distributed (normality is not rejected at 5% significance level): + + >>> stats.kstest(rvs, 'norm')[1] + 0.3420173467307603 + + The exponential distribution provides another example where the bounding + rectangle can be determined explicitly. + + >>> np.random.seed(12345) + >>> rvs = stats.rvs_ratio_uniforms(lambda x: np.exp(-x), umax=1, + ... vmin=0, vmax=2*np.exp(-1), size=1000) + >>> stats.kstest(rvs, 'expon')[1] + 0.928454552559516 + + Sometimes it can be helpful to use a non-zero shift parameter `c`, see e.g. + [2]_ above in the case of the generalized inverse Gaussian distribution. + + """ + + if vmin >= vmax: + raise ValueError("vmin must be smaller than vmax.") + + if umax <= 0: + raise ValueError("umax must be positive.") + + exp_iter = 2 * (vmax - vmin) * umax # rejection constant (see [1]) + if exp_iter > 20: + msg = ("The expected number of iterations to generate a single random " + "number from the desired distribution is larger than {}, " + "potentially causing bad performance.".format(int(exp_iter))) + warnings.warn(msg, RuntimeWarning) + + size1d = tuple(np.atleast_1d(size)) + N = np.prod(size1d) # number of rvs needed, reshape upon return + + # start sampling using ratio of uniforms method + rng = check_random_state(random_state) + x = np.zeros(N) + simulated, i = 0, 1 + + # loop until N rvs have been generated: expected runtime is finite + # to avoid infinite loop, raise exception if not a single rv has been + # generated after 50000 tries. even if exp_iter = 1000, probability of + # this event is (1-1/1000)**50000 which is of order 10e-22 + while True: + k = N - simulated + # simulate uniform rvs on [0, umax] and [vmin, vmax] + u1 = umax * rng.random_sample(size=k) + v1 = vmin + (vmax - vmin) * rng.random_sample(size=k) + # apply rejection method + rvs = v1 / u1 + c + accept = (u1**2 <= pdf(rvs)) + num_accept = np.sum(accept) + if num_accept > 0: + take = min(num_accept, N - simulated) + x[simulated:(simulated + take)] = rvs[accept][0:take] + simulated += take + if simulated >= N: + return np.reshape(x, size1d) + if (simulated == 0) and (i*N >= 50000): + msg = ("Not a single random variate could be generated in {} " + "attempts. The ratio of uniforms method does not appear " + "to work for the provided parameters. Please check the " + "pdf and the bounds.".format(i*N)) + raise RuntimeError(msg) + i += 1 diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/_rvs_sampling.pyc b/project/venv/lib/python2.7/site-packages/scipy/stats/_rvs_sampling.pyc new file mode 100644 index 0000000..1bb24b6 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/stats/_rvs_sampling.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/_stats.so b/project/venv/lib/python2.7/site-packages/scipy/stats/_stats.so new file mode 100755 index 0000000..34f6392 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/stats/_stats.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/_stats_mstats_common.py b/project/venv/lib/python2.7/site-packages/scipy/stats/_stats_mstats_common.py new file mode 100644 index 0000000..8c17b70 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/_stats_mstats_common.py @@ -0,0 +1,390 @@ +from collections import namedtuple + +import numpy as np + +from . import distributions + + +__all__ = ['_find_repeats', 'linregress', 'theilslopes', 'siegelslopes'] + +LinregressResult = namedtuple('LinregressResult', ('slope', 'intercept', + 'rvalue', 'pvalue', + 'stderr')) + + +def linregress(x, y=None): + """ + Calculate a linear least-squares regression for two sets of measurements. + + Parameters + ---------- + x, y : array_like + Two sets of measurements. Both arrays should have the same length. + If only x is given (and y=None), then it must be a two-dimensional + array where one dimension has length 2. The two sets of measurements + are then found by splitting the array along the length-2 dimension. + + Returns + ------- + slope : float + slope of the regression line + intercept : float + intercept of the regression line + rvalue : float + correlation coefficient + pvalue : float + two-sided p-value for a hypothesis test whose null hypothesis is + that the slope is zero, using Wald Test with t-distribution of + the test statistic. + stderr : float + Standard error of the estimated gradient. + + See also + -------- + :func:`scipy.optimize.curve_fit` : Use non-linear + least squares to fit a function to data. + :func:`scipy.optimize.leastsq` : Minimize the sum of + squares of a set of equations. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> from scipy import stats + + Generate some data: + + >>> np.random.seed(12345678) + >>> x = np.random.random(10) + >>> y = 1.6*x + np.random.random(10) + + Perform the linear regression: + + >>> slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) + >>> print("slope: %f intercept: %f" % (slope, intercept)) + slope: 1.944864 intercept: 0.268578 + + To get coefficient of determination (r_squared): + + >>> print("r-squared: %f" % r_value**2) + r-squared: 0.735498 + + Plot the data along with the fitted line: + + >>> plt.plot(x, y, 'o', label='original data') + >>> plt.plot(x, intercept + slope*x, 'r', label='fitted line') + >>> plt.legend() + >>> plt.show() + + """ + TINY = 1.0e-20 + if y is None: # x is a (2, N) or (N, 2) shaped array_like + x = np.asarray(x) + if x.shape[0] == 2: + x, y = x + elif x.shape[1] == 2: + x, y = x.T + else: + msg = ("If only `x` is given as input, it has to be of shape " + "(2, N) or (N, 2), provided shape was %s" % str(x.shape)) + raise ValueError(msg) + else: + x = np.asarray(x) + y = np.asarray(y) + + if x.size == 0 or y.size == 0: + raise ValueError("Inputs must not be empty.") + + n = len(x) + xmean = np.mean(x, None) + ymean = np.mean(y, None) + + # average sum of squares: + ssxm, ssxym, ssyxm, ssym = np.cov(x, y, bias=1).flat + r_num = ssxym + r_den = np.sqrt(ssxm * ssym) + if r_den == 0.0: + r = 0.0 + else: + r = r_num / r_den + # test for numerical error propagation + if r > 1.0: + r = 1.0 + elif r < -1.0: + r = -1.0 + + df = n - 2 + slope = r_num / ssxm + intercept = ymean - slope*xmean + if n == 2: + # handle case when only two points are passed in + if y[0] == y[1]: + prob = 1.0 + else: + prob = 0.0 + sterrest = 0.0 + else: + t = r * np.sqrt(df / ((1.0 - r + TINY)*(1.0 + r + TINY))) + prob = 2 * distributions.t.sf(np.abs(t), df) + sterrest = np.sqrt((1 - r**2) * ssym / ssxm / df) + + return LinregressResult(slope, intercept, r, prob, sterrest) + + +def theilslopes(y, x=None, alpha=0.95): + r""" + Computes the Theil-Sen estimator for a set of points (x, y). + + `theilslopes` implements a method for robust linear regression. It + computes the slope as the median of all slopes between paired values. + + Parameters + ---------- + y : array_like + Dependent variable. + x : array_like or None, optional + Independent variable. If None, use ``arange(len(y))`` instead. + alpha : float, optional + Confidence degree between 0 and 1. Default is 95% confidence. + Note that `alpha` is symmetric around 0.5, i.e. both 0.1 and 0.9 are + interpreted as "find the 90% confidence interval". + + Returns + ------- + medslope : float + Theil slope. + medintercept : float + Intercept of the Theil line, as ``median(y) - medslope*median(x)``. + lo_slope : float + Lower bound of the confidence interval on `medslope`. + up_slope : float + Upper bound of the confidence interval on `medslope`. + + See also + -------- + siegelslopes : a similar technique using repeated medians + + Notes + ----- + The implementation of `theilslopes` follows [1]_. The intercept is + not defined in [1]_, and here it is defined as ``median(y) - + medslope*median(x)``, which is given in [3]_. Other definitions of + the intercept exist in the literature. A confidence interval for + the intercept is not given as this question is not addressed in + [1]_. + + References + ---------- + .. [1] P.K. Sen, "Estimates of the regression coefficient based on Kendall's tau", + J. Am. Stat. Assoc., Vol. 63, pp. 1379-1389, 1968. + .. [2] H. Theil, "A rank-invariant method of linear and polynomial + regression analysis I, II and III", Nederl. Akad. Wetensch., Proc. + 53:, pp. 386-392, pp. 521-525, pp. 1397-1412, 1950. + .. [3] W.L. Conover, "Practical nonparametric statistics", 2nd ed., + John Wiley and Sons, New York, pp. 493. + + Examples + -------- + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + + >>> x = np.linspace(-5, 5, num=150) + >>> y = x + np.random.normal(size=x.size) + >>> y[11:15] += 10 # add outliers + >>> y[-5:] -= 7 + + Compute the slope, intercept and 90% confidence interval. For comparison, + also compute the least-squares fit with `linregress`: + + >>> res = stats.theilslopes(y, x, 0.90) + >>> lsq_res = stats.linregress(x, y) + + Plot the results. The Theil-Sen regression line is shown in red, with the + dashed red lines illustrating the confidence interval of the slope (note + that the dashed red lines are not the confidence interval of the regression + as the confidence interval of the intercept is not included). The green + line shows the least-squares fit for comparison. + + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111) + >>> ax.plot(x, y, 'b.') + >>> ax.plot(x, res[1] + res[0] * x, 'r-') + >>> ax.plot(x, res[1] + res[2] * x, 'r--') + >>> ax.plot(x, res[1] + res[3] * x, 'r--') + >>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-') + >>> plt.show() + + """ + # We copy both x and y so we can use _find_repeats. + y = np.array(y).flatten() + if x is None: + x = np.arange(len(y), dtype=float) + else: + x = np.array(x, dtype=float).flatten() + if len(x) != len(y): + raise ValueError("Incompatible lengths ! (%s<>%s)" % (len(y), len(x))) + + # Compute sorted slopes only when deltax > 0 + deltax = x[:, np.newaxis] - x + deltay = y[:, np.newaxis] - y + slopes = deltay[deltax > 0] / deltax[deltax > 0] + slopes.sort() + medslope = np.median(slopes) + medinter = np.median(y) - medslope * np.median(x) + # Now compute confidence intervals + if alpha > 0.5: + alpha = 1. - alpha + + z = distributions.norm.ppf(alpha / 2.) + # This implements (2.6) from Sen (1968) + _, nxreps = _find_repeats(x) + _, nyreps = _find_repeats(y) + nt = len(slopes) # N in Sen (1968) + ny = len(y) # n in Sen (1968) + # Equation 2.6 in Sen (1968): + sigsq = 1/18. * (ny * (ny-1) * (2*ny+5) - + sum(k * (k-1) * (2*k + 5) for k in nxreps) - + sum(k * (k-1) * (2*k + 5) for k in nyreps)) + # Find the confidence interval indices in `slopes` + sigma = np.sqrt(sigsq) + Ru = min(int(np.round((nt - z*sigma)/2.)), len(slopes)-1) + Rl = max(int(np.round((nt + z*sigma)/2.)) - 1, 0) + delta = slopes[[Rl, Ru]] + return medslope, medinter, delta[0], delta[1] + + +def _find_repeats(arr): + # This function assumes it may clobber its input. + if len(arr) == 0: + return np.array(0, np.float64), np.array(0, np.intp) + + # XXX This cast was previously needed for the Fortran implementation, + # should we ditch it? + arr = np.asarray(arr, np.float64).ravel() + arr.sort() + + # Taken from NumPy 1.9's np.unique. + change = np.concatenate(([True], arr[1:] != arr[:-1])) + unique = arr[change] + change_idx = np.concatenate(np.nonzero(change) + ([arr.size],)) + freq = np.diff(change_idx) + atleast2 = freq > 1 + return unique[atleast2], freq[atleast2] + + +def siegelslopes(y, x=None, method="hierarchical"): + r""" + Computes the Siegel estimator for a set of points (x, y). + + `siegelslopes` implements a method for robust linear regression + using repeated medians (see [1]_) to fit a line to the points (x, y). + The method is robust to outliers with an asymptotic breakdown point + of 50%. + + Parameters + ---------- + y : array_like + Dependent variable. + x : array_like or None, optional + Independent variable. If None, use ``arange(len(y))`` instead. + method : {'hierarchical', 'separate'} + If 'hierarchical', estimate the intercept using the estimated + slope ``medslope`` (default option). + If 'separate', estimate the intercept independent of the estimated + slope. See Notes for details. + + Returns + ------- + medslope : float + Estimate of the slope of the regression line. + medintercept : float + Estimate of the intercept of the regression line. + + See also + -------- + theilslopes : a similar technique without repeated medians + + Notes + ----- + With ``n = len(y)``, compute ``m_j`` as the median of + the slopes from the point ``(x[j], y[j])`` to all other `n-1` points. + ``medslope`` is then the median of all slopes ``m_j``. + Two ways are given to estimate the intercept in [1]_ which can be chosen + via the parameter ``method``. + The hierarchical approach uses the estimated slope ``medslope`` + and computes ``medintercept`` as the median of ``y - medslope*x``. + The other approach estimates the intercept separately as follows: for + each point ``(x[j], y[j])``, compute the intercepts of all the `n-1` + lines through the remaining points and take the median ``i_j``. + ``medintercept`` is the median of the ``i_j``. + + The implementation computes `n` times the median of a vector of size `n` + which can be slow for large vectors. There are more efficient algorithms + (see [2]_) which are not implemented here. + + References + ---------- + .. [1] A. Siegel, "Robust Regression Using Repeated Medians", + Biometrika, Vol. 69, pp. 242-244, 1982. + + .. [2] A. Stein and M. Werman, "Finding the repeated median regression + line", Proceedings of the Third Annual ACM-SIAM Symposium on + Discrete Algorithms, pp. 409-413, 1992. + + Examples + -------- + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + + >>> x = np.linspace(-5, 5, num=150) + >>> y = x + np.random.normal(size=x.size) + >>> y[11:15] += 10 # add outliers + >>> y[-5:] -= 7 + + Compute the slope and intercept. For comparison, also compute the + least-squares fit with `linregress`: + + >>> res = stats.siegelslopes(y, x) + >>> lsq_res = stats.linregress(x, y) + + Plot the results. The Siegel regression line is shown in red. The green + line shows the least-squares fit for comparison. + + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111) + >>> ax.plot(x, y, 'b.') + >>> ax.plot(x, res[1] + res[0] * x, 'r-') + >>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-') + >>> plt.show() + + """ + if method not in ['hierarchical', 'separate']: + raise ValueError("method can only be 'hierarchical' or 'separate'") + y = np.asarray(y).ravel() + if x is None: + x = np.arange(len(y), dtype=float) + else: + x = np.asarray(x, dtype=float).ravel() + if len(x) != len(y): + raise ValueError("Incompatible lengths ! (%s<>%s)" % (len(y), len(x))) + + deltax = x[:, np.newaxis] - x + deltay = y[:, np.newaxis] - y + slopes, intercepts = [], [] + + for j in range(len(x)): + id_nonzero = deltax[j, :] != 0 + slopes_j = deltay[j, id_nonzero] / deltax[j, id_nonzero] + medslope_j = np.median(slopes_j) + slopes.append(medslope_j) + if method == 'separate': + z = y*x[j] - y[j]*x + medintercept_j = np.median(z[id_nonzero] / deltax[j, id_nonzero]) + intercepts.append(medintercept_j) + + medslope = np.median(np.asarray(slopes)) + if method == "separate": + medinter = np.median(np.asarray(intercepts)) + else: + medinter = np.median(y - medslope*x) + + return medslope, medinter diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/_stats_mstats_common.pyc b/project/venv/lib/python2.7/site-packages/scipy/stats/_stats_mstats_common.pyc new file mode 100644 index 0000000..19497df Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/stats/_stats_mstats_common.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/_tukeylambda_stats.py b/project/venv/lib/python2.7/site-packages/scipy/stats/_tukeylambda_stats.py new file mode 100644 index 0000000..57b5e92 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/_tukeylambda_stats.py @@ -0,0 +1,201 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy import poly1d +from scipy.special import beta + + +# The following code was used to generate the Pade coefficients for the +# Tukey Lambda variance function. Version 0.17 of mpmath was used. +#--------------------------------------------------------------------------- +# import mpmath as mp +# +# mp.mp.dps = 60 +# +# one = mp.mpf(1) +# two = mp.mpf(2) +# +# def mpvar(lam): +# if lam == 0: +# v = mp.pi**2 / three +# else: +# v = (two / lam**2) * (one / (one + two*lam) - +# mp.beta(lam + one, lam + one)) +# return v +# +# t = mp.taylor(mpvar, 0, 8) +# p, q = mp.pade(t, 4, 4) +# print("p =", [mp.fp.mpf(c) for c in p]) +# print("q =", [mp.fp.mpf(c) for c in q]) +#--------------------------------------------------------------------------- + +# Pade coefficients for the Tukey Lambda variance function. +_tukeylambda_var_pc = [3.289868133696453, 0.7306125098871127, + -0.5370742306855439, 0.17292046290190008, + -0.02371146284628187] +_tukeylambda_var_qc = [1.0, 3.683605511659861, 4.184152498888124, + 1.7660926747377275, 0.2643989311168465] + +# numpy.poly1d instances for the numerator and denominator of the +# Pade approximation to the Tukey Lambda variance. +_tukeylambda_var_p = poly1d(_tukeylambda_var_pc[::-1]) +_tukeylambda_var_q = poly1d(_tukeylambda_var_qc[::-1]) + + +def tukeylambda_variance(lam): + """Variance of the Tukey Lambda distribution. + + Parameters + ---------- + lam : array_like + The lambda values at which to compute the variance. + + Returns + ------- + v : ndarray + The variance. For lam < -0.5, the variance is not defined, so + np.nan is returned. For lam = 0.5, np.inf is returned. + + Notes + ----- + In an interval around lambda=0, this function uses the [4,4] Pade + approximation to compute the variance. Otherwise it uses the standard + formula (https://en.wikipedia.org/wiki/Tukey_lambda_distribution). The + Pade approximation is used because the standard formula has a removable + discontinuity at lambda = 0, and does not produce accurate numerical + results near lambda = 0. + """ + lam = np.asarray(lam) + shp = lam.shape + lam = np.atleast_1d(lam).astype(np.float64) + + # For absolute values of lam less than threshold, use the Pade + # approximation. + threshold = 0.075 + + # Play games with masks to implement the conditional evaluation of + # the distribution. + # lambda < -0.5: var = nan + low_mask = lam < -0.5 + # lambda == -0.5: var = inf + neghalf_mask = lam == -0.5 + # abs(lambda) < threshold: use Pade approximation + small_mask = np.abs(lam) < threshold + # else the "regular" case: use the explicit formula. + reg_mask = ~(low_mask | neghalf_mask | small_mask) + + # Get the 'lam' values for the cases where they are needed. + small = lam[small_mask] + reg = lam[reg_mask] + + # Compute the function for each case. + v = np.empty_like(lam) + v[low_mask] = np.nan + v[neghalf_mask] = np.inf + if small.size > 0: + # Use the Pade approximation near lambda = 0. + v[small_mask] = _tukeylambda_var_p(small) / _tukeylambda_var_q(small) + if reg.size > 0: + v[reg_mask] = (2.0 / reg**2) * (1.0 / (1.0 + 2 * reg) - + beta(reg + 1, reg + 1)) + v.shape = shp + return v + + +# The following code was used to generate the Pade coefficients for the +# Tukey Lambda kurtosis function. Version 0.17 of mpmath was used. +#--------------------------------------------------------------------------- +# import mpmath as mp +# +# mp.mp.dps = 60 +# +# one = mp.mpf(1) +# two = mp.mpf(2) +# three = mp.mpf(3) +# four = mp.mpf(4) +# +# def mpkurt(lam): +# if lam == 0: +# k = mp.mpf(6)/5 +# else: +# numer = (one/(four*lam+one) - four*mp.beta(three*lam+one, lam+one) + +# three*mp.beta(two*lam+one, two*lam+one)) +# denom = two*(one/(two*lam+one) - mp.beta(lam+one,lam+one))**2 +# k = numer / denom - three +# return k +# +# # There is a bug in mpmath 0.17: when we use the 'method' keyword of the +# # taylor function and we request a degree 9 Taylor polynomial, we actually +# # get degree 8. +# t = mp.taylor(mpkurt, 0, 9, method='quad', radius=0.01) +# t = [mp.chop(c, tol=1e-15) for c in t] +# p, q = mp.pade(t, 4, 4) +# print("p =", [mp.fp.mpf(c) for c in p]) +# print("q =", [mp.fp.mpf(c) for c in q]) +#--------------------------------------------------------------------------- + +# Pade coefficients for the Tukey Lambda kurtosis function. +_tukeylambda_kurt_pc = [1.2, -5.853465139719495, -22.653447381131077, + 0.20601184383406815, 4.59796302262789] +_tukeylambda_kurt_qc = [1.0, 7.171149192233599, 12.96663094361842, + 0.43075235247853005, -2.789746758009912] + +# numpy.poly1d instances for the numerator and denominator of the +# Pade approximation to the Tukey Lambda kurtosis. +_tukeylambda_kurt_p = poly1d(_tukeylambda_kurt_pc[::-1]) +_tukeylambda_kurt_q = poly1d(_tukeylambda_kurt_qc[::-1]) + + +def tukeylambda_kurtosis(lam): + """Kurtosis of the Tukey Lambda distribution. + + Parameters + ---------- + lam : array_like + The lambda values at which to compute the variance. + + Returns + ------- + v : ndarray + The variance. For lam < -0.25, the variance is not defined, so + np.nan is returned. For lam = 0.25, np.inf is returned. + + """ + lam = np.asarray(lam) + shp = lam.shape + lam = np.atleast_1d(lam).astype(np.float64) + + # For absolute values of lam less than threshold, use the Pade + # approximation. + threshold = 0.055 + + # Use masks to implement the conditional evaluation of the kurtosis. + # lambda < -0.25: kurtosis = nan + low_mask = lam < -0.25 + # lambda == -0.25: kurtosis = inf + negqrtr_mask = lam == -0.25 + # lambda near 0: use Pade approximation + small_mask = np.abs(lam) < threshold + # else the "regular" case: use the explicit formula. + reg_mask = ~(low_mask | negqrtr_mask | small_mask) + + # Get the 'lam' values for the cases where they are needed. + small = lam[small_mask] + reg = lam[reg_mask] + + # Compute the function for each case. + k = np.empty_like(lam) + k[low_mask] = np.nan + k[negqrtr_mask] = np.inf + if small.size > 0: + k[small_mask] = _tukeylambda_kurt_p(small) / _tukeylambda_kurt_q(small) + if reg.size > 0: + numer = (1.0 / (4 * reg + 1) - 4 * beta(3 * reg + 1, reg + 1) + + 3 * beta(2 * reg + 1, 2 * reg + 1)) + denom = 2 * (1.0/(2 * reg + 1) - beta(reg + 1, reg + 1))**2 + k[reg_mask] = numer / denom - 3 + + # The return value will be a numpy array; resetting the shape ensures that + # if `lam` was a scalar, the return value is a 0-d array. + k.shape = shp + return k diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/_tukeylambda_stats.pyc b/project/venv/lib/python2.7/site-packages/scipy/stats/_tukeylambda_stats.pyc new file mode 100644 index 0000000..eda3868 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/stats/_tukeylambda_stats.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/contingency.py b/project/venv/lib/python2.7/site-packages/scipy/stats/contingency.py new file mode 100644 index 0000000..fde8daa --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/contingency.py @@ -0,0 +1,274 @@ +"""Some functions for working with contingency tables (i.e. cross tabulations). +""" + + +from __future__ import division, print_function, absolute_import + +from functools import reduce +import numpy as np +from .stats import power_divergence + + +__all__ = ['margins', 'expected_freq', 'chi2_contingency'] + + +def margins(a): + """Return a list of the marginal sums of the array `a`. + + Parameters + ---------- + a : ndarray + The array for which to compute the marginal sums. + + Returns + ------- + margsums : list of ndarrays + A list of length `a.ndim`. `margsums[k]` is the result + of summing `a` over all axes except `k`; it has the same + number of dimensions as `a`, but the length of each axis + except axis `k` will be 1. + + Examples + -------- + >>> a = np.arange(12).reshape(2, 6) + >>> a + array([[ 0, 1, 2, 3, 4, 5], + [ 6, 7, 8, 9, 10, 11]]) + >>> m0, m1 = margins(a) + >>> m0 + array([[15], + [51]]) + >>> m1 + array([[ 6, 8, 10, 12, 14, 16]]) + + >>> b = np.arange(24).reshape(2,3,4) + >>> m0, m1, m2 = margins(b) + >>> m0 + array([[[ 66]], + [[210]]]) + >>> m1 + array([[[ 60], + [ 92], + [124]]]) + >>> m2 + array([[[60, 66, 72, 78]]]) + """ + margsums = [] + ranged = list(range(a.ndim)) + for k in ranged: + marg = np.apply_over_axes(np.sum, a, [j for j in ranged if j != k]) + margsums.append(marg) + return margsums + + +def expected_freq(observed): + """ + Compute the expected frequencies from a contingency table. + + Given an n-dimensional contingency table of observed frequencies, + compute the expected frequencies for the table based on the marginal + sums under the assumption that the groups associated with each + dimension are independent. + + Parameters + ---------- + observed : array_like + The table of observed frequencies. (While this function can handle + a 1-D array, that case is trivial. Generally `observed` is at + least 2-D.) + + Returns + ------- + expected : ndarray of float64 + The expected frequencies, based on the marginal sums of the table. + Same shape as `observed`. + + Examples + -------- + >>> observed = np.array([[10, 10, 20],[20, 20, 20]]) + >>> from scipy.stats import expected_freq + >>> expected_freq(observed) + array([[ 12., 12., 16.], + [ 18., 18., 24.]]) + + """ + # Typically `observed` is an integer array. If `observed` has a large + # number of dimensions or holds large values, some of the following + # computations may overflow, so we first switch to floating point. + observed = np.asarray(observed, dtype=np.float64) + + # Create a list of the marginal sums. + margsums = margins(observed) + + # Create the array of expected frequencies. The shapes of the + # marginal sums returned by apply_over_axes() are just what we + # need for broadcasting in the following product. + d = observed.ndim + expected = reduce(np.multiply, margsums) / observed.sum() ** (d - 1) + return expected + + +def chi2_contingency(observed, correction=True, lambda_=None): + """Chi-square test of independence of variables in a contingency table. + + This function computes the chi-square statistic and p-value for the + hypothesis test of independence of the observed frequencies in the + contingency table [1]_ `observed`. The expected frequencies are computed + based on the marginal sums under the assumption of independence; see + `scipy.stats.contingency.expected_freq`. The number of degrees of + freedom is (expressed using numpy functions and attributes):: + + dof = observed.size - sum(observed.shape) + observed.ndim - 1 + + + Parameters + ---------- + observed : array_like + The contingency table. The table contains the observed frequencies + (i.e. number of occurrences) in each category. In the two-dimensional + case, the table is often described as an "R x C table". + correction : bool, optional + If True, *and* the degrees of freedom is 1, apply Yates' correction + for continuity. The effect of the correction is to adjust each + observed value by 0.5 towards the corresponding expected value. + lambda_ : float or str, optional. + By default, the statistic computed in this test is Pearson's + chi-squared statistic [2]_. `lambda_` allows a statistic from the + Cressie-Read power divergence family [3]_ to be used instead. See + `power_divergence` for details. + + Returns + ------- + chi2 : float + The test statistic. + p : float + The p-value of the test + dof : int + Degrees of freedom + expected : ndarray, same shape as `observed` + The expected frequencies, based on the marginal sums of the table. + + See Also + -------- + contingency.expected_freq + fisher_exact + chisquare + power_divergence + + Notes + ----- + An often quoted guideline for the validity of this calculation is that + the test should be used only if the observed and expected frequencies + in each cell are at least 5. + + This is a test for the independence of different categories of a + population. The test is only meaningful when the dimension of + `observed` is two or more. Applying the test to a one-dimensional + table will always result in `expected` equal to `observed` and a + chi-square statistic equal to 0. + + This function does not handle masked arrays, because the calculation + does not make sense with missing values. + + Like stats.chisquare, this function computes a chi-square statistic; + the convenience this function provides is to figure out the expected + frequencies and degrees of freedom from the given contingency table. + If these were already known, and if the Yates' correction was not + required, one could use stats.chisquare. That is, if one calls:: + + chi2, p, dof, ex = chi2_contingency(obs, correction=False) + + then the following is true:: + + (chi2, p) == stats.chisquare(obs.ravel(), f_exp=ex.ravel(), + ddof=obs.size - 1 - dof) + + The `lambda_` argument was added in version 0.13.0 of scipy. + + References + ---------- + .. [1] "Contingency table", + https://en.wikipedia.org/wiki/Contingency_table + .. [2] "Pearson's chi-squared test", + https://en.wikipedia.org/wiki/Pearson%27s_chi-squared_test + .. [3] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit + Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984), + pp. 440-464. + + Examples + -------- + A two-way example (2 x 3): + + >>> from scipy.stats import chi2_contingency + >>> obs = np.array([[10, 10, 20], [20, 20, 20]]) + >>> chi2_contingency(obs) + (2.7777777777777777, + 0.24935220877729619, + 2, + array([[ 12., 12., 16.], + [ 18., 18., 24.]])) + + Perform the test using the log-likelihood ratio (i.e. the "G-test") + instead of Pearson's chi-squared statistic. + + >>> g, p, dof, expctd = chi2_contingency(obs, lambda_="log-likelihood") + >>> g, p + (2.7688587616781319, 0.25046668010954165) + + A four-way example (2 x 2 x 2 x 2): + + >>> obs = np.array( + ... [[[[12, 17], + ... [11, 16]], + ... [[11, 12], + ... [15, 16]]], + ... [[[23, 15], + ... [30, 22]], + ... [[14, 17], + ... [15, 16]]]]) + >>> chi2_contingency(obs) + (8.7584514426741897, + 0.64417725029295503, + 11, + array([[[[ 14.15462386, 14.15462386], + [ 16.49423111, 16.49423111]], + [[ 11.2461395 , 11.2461395 ], + [ 13.10500554, 13.10500554]]], + [[[ 19.5591166 , 19.5591166 ], + [ 22.79202844, 22.79202844]], + [[ 15.54012004, 15.54012004], + [ 18.10873492, 18.10873492]]]])) + """ + observed = np.asarray(observed) + if np.any(observed < 0): + raise ValueError("All values in `observed` must be nonnegative.") + if observed.size == 0: + raise ValueError("No data; `observed` has size 0.") + + expected = expected_freq(observed) + if np.any(expected == 0): + # Include one of the positions where expected is zero in + # the exception message. + zeropos = list(zip(*np.nonzero(expected == 0)))[0] + raise ValueError("The internally computed table of expected " + "frequencies has a zero element at %s." % (zeropos,)) + + # The degrees of freedom + dof = expected.size - sum(expected.shape) + expected.ndim - 1 + + if dof == 0: + # Degenerate case; this occurs when `observed` is 1D (or, more + # generally, when it has only one nontrivial dimension). In this + # case, we also have observed == expected, so chi2 is 0. + chi2 = 0.0 + p = 1.0 + else: + if dof == 1 and correction: + # Adjust `observed` according to Yates' correction for continuity. + observed = observed + 0.5 * np.sign(expected - observed) + + chi2, p = power_divergence(observed, expected, + ddof=observed.size - 1 - dof, axis=None, + lambda_=lambda_) + + return chi2, p, dof, expected diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/contingency.pyc b/project/venv/lib/python2.7/site-packages/scipy/stats/contingency.pyc new file mode 100644 index 0000000..99deeb4 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/stats/contingency.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/distributions.py b/project/venv/lib/python2.7/site-packages/scipy/stats/distributions.py new file mode 100644 index 0000000..d37e251 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/distributions.py @@ -0,0 +1,24 @@ +# +# Author: Travis Oliphant 2002-2011 with contributions from +# SciPy Developers 2004-2011 +# +# NOTE: To look at history using `git blame`, use `git blame -M -C -C` +# instead of `git blame -Lxxx,+x`. +# +from __future__ import division, print_function, absolute_import + +from ._distn_infrastructure import (entropy, rv_discrete, rv_continuous, + rv_frozen) + +from . import _continuous_distns +from . import _discrete_distns + +from ._continuous_distns import * +from ._discrete_distns import * + +# For backwards compatibility e.g. pymc expects distributions.__all__. +__all__ = ['entropy', 'rv_discrete', 'rv_continuous', 'rv_histogram'] + +# Add only the distribution names, not the *_gen names. +__all__ += _continuous_distns._distn_names +__all__ += _discrete_distns._distn_names diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/distributions.pyc b/project/venv/lib/python2.7/site-packages/scipy/stats/distributions.pyc new file mode 100644 index 0000000..7f32e88 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/stats/distributions.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/kde.py b/project/venv/lib/python2.7/site-packages/scipy/stats/kde.py new file mode 100644 index 0000000..b50e2fb --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/kde.py @@ -0,0 +1,625 @@ +#------------------------------------------------------------------------------- +# +# Define classes for (uni/multi)-variate kernel density estimation. +# +# Currently, only Gaussian kernels are implemented. +# +# Written by: Robert Kern +# +# Date: 2004-08-09 +# +# Modified: 2005-02-10 by Robert Kern. +# Contributed to Scipy +# 2005-10-07 by Robert Kern. +# Some fixes to match the new scipy_core +# +# Copyright 2004-2005 by Enthought, Inc. +# +#------------------------------------------------------------------------------- + +from __future__ import division, print_function, absolute_import + +# Standard library imports. +import warnings + +# Scipy imports. +from scipy._lib.six import callable, string_types +from scipy import linalg, special +from scipy.special import logsumexp +from scipy._lib._numpy_compat import cov + +from numpy import (atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, + ravel, power, atleast_1d, squeeze, sum, transpose, ones) +import numpy as np +from numpy.random import choice, multivariate_normal + +# Local imports. +from . import mvn + + +__all__ = ['gaussian_kde'] + + +class gaussian_kde(object): + """Representation of a kernel-density estimate using Gaussian kernels. + + Kernel density estimation is a way to estimate the probability density + function (PDF) of a random variable in a non-parametric way. + `gaussian_kde` works for both uni-variate and multi-variate data. It + includes automatic bandwidth determination. The estimation works best for + a unimodal distribution; bimodal or multi-modal distributions tend to be + oversmoothed. + + Parameters + ---------- + dataset : array_like + Datapoints to estimate from. In case of univariate data this is a 1-D + array, otherwise a 2-D array with shape (# of dims, # of data). + bw_method : str, scalar or callable, optional + The method used to calculate the estimator bandwidth. This can be + 'scott', 'silverman', a scalar constant or a callable. If a scalar, + this will be used directly as `kde.factor`. If a callable, it should + take a `gaussian_kde` instance as only parameter and return a scalar. + If None (default), 'scott' is used. See Notes for more details. + weights : array_like, optional + weights of datapoints. This must be the same shape as dataset. + If None (default), the samples are assumed to be equally weighted + + Attributes + ---------- + dataset : ndarray + The dataset with which `gaussian_kde` was initialized. + d : int + Number of dimensions. + n : int + Number of datapoints. + neff : int + Effective number of datapoints. + + .. versionadded:: 1.2.0 + factor : float + The bandwidth factor, obtained from `kde.covariance_factor`, with which + the covariance matrix is multiplied. + covariance : ndarray + The covariance matrix of `dataset`, scaled by the calculated bandwidth + (`kde.factor`). + inv_cov : ndarray + The inverse of `covariance`. + + Methods + ------- + evaluate + __call__ + integrate_gaussian + integrate_box_1d + integrate_box + integrate_kde + pdf + logpdf + resample + set_bandwidth + covariance_factor + + Notes + ----- + Bandwidth selection strongly influences the estimate obtained from the KDE + (much more so than the actual shape of the kernel). Bandwidth selection + can be done by a "rule of thumb", by cross-validation, by "plug-in + methods" or by other means; see [3]_, [4]_ for reviews. `gaussian_kde` + uses a rule of thumb, the default is Scott's Rule. + + Scott's Rule [1]_, implemented as `scotts_factor`, is:: + + n**(-1./(d+4)), + + with ``n`` the number of data points and ``d`` the number of dimensions. + In the case of unequally weighted points, `scotts_factor` becomes:: + + neff**(-1./(d+4)), + + with ``neff`` the effective number of datapoints. + Silverman's Rule [2]_, implemented as `silverman_factor`, is:: + + (n * (d + 2) / 4.)**(-1. / (d + 4)). + + or in the case of unequally weighted points:: + + (neff * (d + 2) / 4.)**(-1. / (d + 4)). + + Good general descriptions of kernel density estimation can be found in [1]_ + and [2]_, the mathematics for this multi-dimensional implementation can be + found in [1]_. + + With a set of weighted samples, the effective number of datapoints ``neff`` + is defined by:: + + neff = sum(weights)^2 / sum(weights^2) + + as detailed in [5]_. + + References + ---------- + .. [1] D.W. Scott, "Multivariate Density Estimation: Theory, Practice, and + Visualization", John Wiley & Sons, New York, Chicester, 1992. + .. [2] B.W. Silverman, "Density Estimation for Statistics and Data + Analysis", Vol. 26, Monographs on Statistics and Applied Probability, + Chapman and Hall, London, 1986. + .. [3] B.A. Turlach, "Bandwidth Selection in Kernel Density Estimation: A + Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993. + .. [4] D.M. Bashtannyk and R.J. Hyndman, "Bandwidth selection for kernel + conditional density estimation", Computational Statistics & Data + Analysis, Vol. 36, pp. 279-298, 2001. + .. [5] Gray P. G., 1969, Journal of the Royal Statistical Society. + Series A (General), 132, 272 + + Examples + -------- + Generate some random two-dimensional data: + + >>> from scipy import stats + >>> def measure(n): + ... "Measurement model, return two coupled measurements." + ... m1 = np.random.normal(size=n) + ... m2 = np.random.normal(scale=0.5, size=n) + ... return m1+m2, m1-m2 + + >>> m1, m2 = measure(2000) + >>> xmin = m1.min() + >>> xmax = m1.max() + >>> ymin = m2.min() + >>> ymax = m2.max() + + Perform a kernel density estimate on the data: + + >>> X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j] + >>> positions = np.vstack([X.ravel(), Y.ravel()]) + >>> values = np.vstack([m1, m2]) + >>> kernel = stats.gaussian_kde(values) + >>> Z = np.reshape(kernel(positions).T, X.shape) + + Plot the results: + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r, + ... extent=[xmin, xmax, ymin, ymax]) + >>> ax.plot(m1, m2, 'k.', markersize=2) + >>> ax.set_xlim([xmin, xmax]) + >>> ax.set_ylim([ymin, ymax]) + >>> plt.show() + + """ + def __init__(self, dataset, bw_method=None, weights=None): + self.dataset = atleast_2d(dataset) + if not self.dataset.size > 1: + raise ValueError("`dataset` input should have multiple elements.") + + self.d, self.n = self.dataset.shape + + if weights is not None: + self._weights = atleast_1d(weights).astype(float) + self._weights /= sum(self._weights) + if self.weights.ndim != 1: + raise ValueError("`weights` input should be one-dimensional.") + if len(self._weights) != self.n: + raise ValueError("`weights` input should be of length n") + self._neff = 1/sum(self._weights**2) + + self.set_bandwidth(bw_method=bw_method) + + def evaluate(self, points): + """Evaluate the estimated pdf on a set of points. + + Parameters + ---------- + points : (# of dimensions, # of points)-array + Alternatively, a (# of dimensions,) vector can be passed in and + treated as a single point. + + Returns + ------- + values : (# of points,)-array + The values at each point. + + Raises + ------ + ValueError : if the dimensionality of the input points is different than + the dimensionality of the KDE. + + """ + points = atleast_2d(points) + + d, m = points.shape + if d != self.d: + if d == 1 and m == self.d: + # points was passed in as a row vector + points = reshape(points, (self.d, 1)) + m = 1 + else: + msg = "points have dimension %s, dataset has dimension %s" % (d, + self.d) + raise ValueError(msg) + + result = zeros((m,), dtype=float) + + whitening = linalg.cholesky(self.inv_cov) + scaled_dataset = dot(whitening, self.dataset) + scaled_points = dot(whitening, points) + + if m >= self.n: + # there are more points than data, so loop over data + for i in range(self.n): + diff = scaled_dataset[:, i, newaxis] - scaled_points + energy = sum(diff * diff, axis=0) / 2.0 + result += self.weights[i]*exp(-energy) + else: + # loop over points + for i in range(m): + diff = scaled_dataset - scaled_points[:, i, newaxis] + energy = sum(diff * diff, axis=0) / 2.0 + result[i] = sum(exp(-energy)*self.weights, axis=0) + + result = result * self.n / self._norm_factor + + return result + + __call__ = evaluate + + def integrate_gaussian(self, mean, cov): + """ + Multiply estimated density by a multivariate Gaussian and integrate + over the whole space. + + Parameters + ---------- + mean : aray_like + A 1-D array, specifying the mean of the Gaussian. + cov : array_like + A 2-D array, specifying the covariance matrix of the Gaussian. + + Returns + ------- + result : scalar + The value of the integral. + + Raises + ------ + ValueError + If the mean or covariance of the input Gaussian differs from + the KDE's dimensionality. + + """ + mean = atleast_1d(squeeze(mean)) + cov = atleast_2d(cov) + + if mean.shape != (self.d,): + raise ValueError("mean does not have dimension %s" % self.d) + if cov.shape != (self.d, self.d): + raise ValueError("covariance does not have dimension %s" % self.d) + + # make mean a column vector + mean = mean[:, newaxis] + + sum_cov = self.covariance + cov + + # This will raise LinAlgError if the new cov matrix is not s.p.d + # cho_factor returns (ndarray, bool) where bool is a flag for whether + # or not ndarray is upper or lower triangular + sum_cov_chol = linalg.cho_factor(sum_cov) + + diff = self.dataset - mean + tdiff = linalg.cho_solve(sum_cov_chol, diff) + + sqrt_det = np.prod(np.diagonal(sum_cov_chol[0])) + norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det + + energies = sum(diff * tdiff, axis=0) / 2.0 + result = sum(exp(-energies)*self.weights, axis=0) / norm_const + + return result + + def integrate_box_1d(self, low, high): + """ + Computes the integral of a 1D pdf between two bounds. + + Parameters + ---------- + low : scalar + Lower bound of integration. + high : scalar + Upper bound of integration. + + Returns + ------- + value : scalar + The result of the integral. + + Raises + ------ + ValueError + If the KDE is over more than one dimension. + + """ + if self.d != 1: + raise ValueError("integrate_box_1d() only handles 1D pdfs") + + stdev = ravel(sqrt(self.covariance))[0] + + normalized_low = ravel((low - self.dataset) / stdev) + normalized_high = ravel((high - self.dataset) / stdev) + + value = np.sum(self.weights*( + special.ndtr(normalized_high) - + special.ndtr(normalized_low))) + return value + + def integrate_box(self, low_bounds, high_bounds, maxpts=None): + """Computes the integral of a pdf over a rectangular interval. + + Parameters + ---------- + low_bounds : array_like + A 1-D array containing the lower bounds of integration. + high_bounds : array_like + A 1-D array containing the upper bounds of integration. + maxpts : int, optional + The maximum number of points to use for integration. + + Returns + ------- + value : scalar + The result of the integral. + + """ + if maxpts is not None: + extra_kwds = {'maxpts': maxpts} + else: + extra_kwds = {} + + value, inform = mvn.mvnun_weighted(low_bounds, high_bounds, + self.dataset, self.weights, + self.covariance, **extra_kwds) + if inform: + msg = ('An integral in mvn.mvnun requires more points than %s' % + (self.d * 1000)) + warnings.warn(msg) + + return value + + def integrate_kde(self, other): + """ + Computes the integral of the product of this kernel density estimate + with another. + + Parameters + ---------- + other : gaussian_kde instance + The other kde. + + Returns + ------- + value : scalar + The result of the integral. + + Raises + ------ + ValueError + If the KDEs have different dimensionality. + + """ + if other.d != self.d: + raise ValueError("KDEs are not the same dimensionality") + + # we want to iterate over the smallest number of points + if other.n < self.n: + small = other + large = self + else: + small = self + large = other + + sum_cov = small.covariance + large.covariance + sum_cov_chol = linalg.cho_factor(sum_cov) + result = 0.0 + for i in range(small.n): + mean = small.dataset[:, i, newaxis] + diff = large.dataset - mean + tdiff = linalg.cho_solve(sum_cov_chol, diff) + + energies = sum(diff * tdiff, axis=0) / 2.0 + result += sum(exp(-energies)*large.weights, axis=0)*small.weights[i] + + sqrt_det = np.prod(np.diagonal(sum_cov_chol[0])) + norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det + + result /= norm_const + + return result + + def resample(self, size=None): + """ + Randomly sample a dataset from the estimated pdf. + + Parameters + ---------- + size : int, optional + The number of samples to draw. If not provided, then the size is + the same as the effective number of samples in the underlying + dataset. + + Returns + ------- + resample : (self.d, `size`) ndarray + The sampled dataset. + + """ + if size is None: + size = int(self.neff) + + norm = transpose(multivariate_normal(zeros((self.d,), float), + self.covariance, size=size)) + indices = choice(self.n, size=size, p=self.weights) + means = self.dataset[:, indices] + + return means + norm + + def scotts_factor(self): + return power(self.neff, -1./(self.d+4)) + + def silverman_factor(self): + return power(self.neff*(self.d+2.0)/4.0, -1./(self.d+4)) + + # Default method to calculate bandwidth, can be overwritten by subclass + covariance_factor = scotts_factor + covariance_factor.__doc__ = """Computes the coefficient (`kde.factor`) that + multiplies the data covariance matrix to obtain the kernel covariance + matrix. The default is `scotts_factor`. A subclass can overwrite this + method to provide a different method, or set it through a call to + `kde.set_bandwidth`.""" + + def set_bandwidth(self, bw_method=None): + """Compute the estimator bandwidth with given method. + + The new bandwidth calculated after a call to `set_bandwidth` is used + for subsequent evaluations of the estimated density. + + Parameters + ---------- + bw_method : str, scalar or callable, optional + The method used to calculate the estimator bandwidth. This can be + 'scott', 'silverman', a scalar constant or a callable. If a + scalar, this will be used directly as `kde.factor`. If a callable, + it should take a `gaussian_kde` instance as only parameter and + return a scalar. If None (default), nothing happens; the current + `kde.covariance_factor` method is kept. + + Notes + ----- + .. versionadded:: 0.11 + + Examples + -------- + >>> import scipy.stats as stats + >>> x1 = np.array([-7, -5, 1, 4, 5.]) + >>> kde = stats.gaussian_kde(x1) + >>> xs = np.linspace(-10, 10, num=50) + >>> y1 = kde(xs) + >>> kde.set_bandwidth(bw_method='silverman') + >>> y2 = kde(xs) + >>> kde.set_bandwidth(bw_method=kde.factor / 3.) + >>> y3 = kde(xs) + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> ax.plot(x1, np.ones(x1.shape) / (4. * x1.size), 'bo', + ... label='Data points (rescaled)') + >>> ax.plot(xs, y1, label='Scott (default)') + >>> ax.plot(xs, y2, label='Silverman') + >>> ax.plot(xs, y3, label='Const (1/3 * Silverman)') + >>> ax.legend() + >>> plt.show() + + """ + if bw_method is None: + pass + elif bw_method == 'scott': + self.covariance_factor = self.scotts_factor + elif bw_method == 'silverman': + self.covariance_factor = self.silverman_factor + elif np.isscalar(bw_method) and not isinstance(bw_method, string_types): + self._bw_method = 'use constant' + self.covariance_factor = lambda: bw_method + elif callable(bw_method): + self._bw_method = bw_method + self.covariance_factor = lambda: self._bw_method(self) + else: + msg = "`bw_method` should be 'scott', 'silverman', a scalar " \ + "or a callable." + raise ValueError(msg) + + self._compute_covariance() + + def _compute_covariance(self): + """Computes the covariance matrix for each Gaussian kernel using + covariance_factor(). + """ + self.factor = self.covariance_factor() + # Cache covariance and inverse covariance of the data + if not hasattr(self, '_data_inv_cov'): + self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1, + bias=False, + aweights=self.weights)) + self._data_inv_cov = linalg.inv(self._data_covariance) + + self.covariance = self._data_covariance * self.factor**2 + self.inv_cov = self._data_inv_cov / self.factor**2 + self._norm_factor = sqrt(linalg.det(2*pi*self.covariance)) * self.n + + def pdf(self, x): + """ + Evaluate the estimated pdf on a provided set of points. + + Notes + ----- + This is an alias for `gaussian_kde.evaluate`. See the ``evaluate`` + docstring for more details. + + """ + return self.evaluate(x) + + def logpdf(self, x): + """ + Evaluate the log of the estimated pdf on a provided set of points. + """ + + points = atleast_2d(x) + + d, m = points.shape + if d != self.d: + if d == 1 and m == self.d: + # points was passed in as a row vector + points = reshape(points, (self.d, 1)) + m = 1 + else: + msg = "points have dimension %s, dataset has dimension %s" % (d, + self.d) + raise ValueError(msg) + + result = zeros((m,), dtype=float) + + if m >= self.n: + # there are more points than data, so loop over data + energy = zeros((self.n, m), dtype=float) + for i in range(self.n): + diff = self.dataset[:, i, newaxis] - points + tdiff = dot(self.inv_cov, diff) + energy[i] = sum(diff*tdiff, axis=0) / 2.0 + result = logsumexp(-energy, + b=self.weights[i]*self.n/self._norm_factor, + axis=0) + else: + # loop over points + for i in range(m): + diff = self.dataset - points[:, i, newaxis] + tdiff = dot(self.inv_cov, diff) + energy = sum(diff * tdiff, axis=0) / 2.0 + result[i] = logsumexp(-energy, + b=self.weights*self.n/self._norm_factor) + + return result + + @property + def weights(self): + try: + return self._weights + except AttributeError: + self._weights = ones(self.n)/self.n + return self._weights + + @property + def neff(self): + try: + return self._neff + except AttributeError: + self._neff = 1/sum(self.weights**2) + return self._neff diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/kde.pyc b/project/venv/lib/python2.7/site-packages/scipy/stats/kde.pyc new file mode 100644 index 0000000..dd29dbe Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/stats/kde.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/morestats.py b/project/venv/lib/python2.7/site-packages/scipy/stats/morestats.py new file mode 100644 index 0000000..901663d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/morestats.py @@ -0,0 +1,3165 @@ +from __future__ import division, print_function, absolute_import + +import math +import warnings +from collections import namedtuple + +import numpy as np +from numpy import (isscalar, r_, log, around, unique, asarray, + zeros, arange, sort, amin, amax, any, atleast_1d, + sqrt, ceil, floor, array, compress, + pi, exp, ravel, count_nonzero, sin, cos, arctan2, hypot) + +from scipy._lib.six import string_types +from scipy import optimize +from scipy import special +from . import statlib +from . import stats +from .stats import find_repeats, _contains_nan +from .contingency import chi2_contingency +from . import distributions +from ._distn_infrastructure import rv_generic + + +__all__ = ['mvsdist', + 'bayes_mvs', 'kstat', 'kstatvar', 'probplot', 'ppcc_max', 'ppcc_plot', + 'boxcox_llf', 'boxcox', 'boxcox_normmax', 'boxcox_normplot', + 'shapiro', 'anderson', 'ansari', 'bartlett', 'levene', 'binom_test', + 'fligner', 'mood', 'wilcoxon', 'median_test', + 'circmean', 'circvar', 'circstd', 'anderson_ksamp', + 'yeojohnson_llf', 'yeojohnson', 'yeojohnson_normmax', + 'yeojohnson_normplot' + ] + + +Mean = namedtuple('Mean', ('statistic', 'minmax')) +Variance = namedtuple('Variance', ('statistic', 'minmax')) +Std_dev = namedtuple('Std_dev', ('statistic', 'minmax')) + + +def bayes_mvs(data, alpha=0.90): + r""" + Bayesian confidence intervals for the mean, var, and std. + + Parameters + ---------- + data : array_like + Input data, if multi-dimensional it is flattened to 1-D by `bayes_mvs`. + Requires 2 or more data points. + alpha : float, optional + Probability that the returned confidence interval contains + the true parameter. + + Returns + ------- + mean_cntr, var_cntr, std_cntr : tuple + The three results are for the mean, variance and standard deviation, + respectively. Each result is a tuple of the form:: + + (center, (lower, upper)) + + with `center` the mean of the conditional pdf of the value given the + data, and `(lower, upper)` a confidence interval, centered on the + median, containing the estimate to a probability ``alpha``. + + See Also + -------- + mvsdist + + Notes + ----- + Each tuple of mean, variance, and standard deviation estimates represent + the (center, (lower, upper)) with center the mean of the conditional pdf + of the value given the data and (lower, upper) is a confidence interval + centered on the median, containing the estimate to a probability + ``alpha``. + + Converts data to 1-D and assumes all data has the same mean and variance. + Uses Jeffrey's prior for variance and std. + + Equivalent to ``tuple((x.mean(), x.interval(alpha)) for x in mvsdist(dat))`` + + References + ---------- + T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and + standard-deviation from data", https://scholarsarchive.byu.edu/facpub/278, + 2006. + + Examples + -------- + First a basic example to demonstrate the outputs: + + >>> from scipy import stats + >>> data = [6, 9, 12, 7, 8, 8, 13] + >>> mean, var, std = stats.bayes_mvs(data) + >>> mean + Mean(statistic=9.0, minmax=(7.103650222612533, 10.896349777387467)) + >>> var + Variance(statistic=10.0, minmax=(3.176724206..., 24.45910382...)) + >>> std + Std_dev(statistic=2.9724954732045084, minmax=(1.7823367265645143, 4.945614605014631)) + + Now we generate some normally distributed random data, and get estimates of + mean and standard deviation with 95% confidence intervals for those + estimates: + + >>> n_samples = 100000 + >>> data = stats.norm.rvs(size=n_samples) + >>> res_mean, res_var, res_std = stats.bayes_mvs(data, alpha=0.95) + + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111) + >>> ax.hist(data, bins=100, density=True, label='Histogram of data') + >>> ax.vlines(res_mean.statistic, 0, 0.5, colors='r', label='Estimated mean') + >>> ax.axvspan(res_mean.minmax[0],res_mean.minmax[1], facecolor='r', + ... alpha=0.2, label=r'Estimated mean (95% limits)') + >>> ax.vlines(res_std.statistic, 0, 0.5, colors='g', label='Estimated scale') + >>> ax.axvspan(res_std.minmax[0],res_std.minmax[1], facecolor='g', alpha=0.2, + ... label=r'Estimated scale (95% limits)') + + >>> ax.legend(fontsize=10) + >>> ax.set_xlim([-4, 4]) + >>> ax.set_ylim([0, 0.5]) + >>> plt.show() + + """ + m, v, s = mvsdist(data) + if alpha >= 1 or alpha <= 0: + raise ValueError("0 < alpha < 1 is required, but alpha=%s was given." + % alpha) + + m_res = Mean(m.mean(), m.interval(alpha)) + v_res = Variance(v.mean(), v.interval(alpha)) + s_res = Std_dev(s.mean(), s.interval(alpha)) + + return m_res, v_res, s_res + + +def mvsdist(data): + """ + 'Frozen' distributions for mean, variance, and standard deviation of data. + + Parameters + ---------- + data : array_like + Input array. Converted to 1-D using ravel. + Requires 2 or more data-points. + + Returns + ------- + mdist : "frozen" distribution object + Distribution object representing the mean of the data + vdist : "frozen" distribution object + Distribution object representing the variance of the data + sdist : "frozen" distribution object + Distribution object representing the standard deviation of the data + + See Also + -------- + bayes_mvs + + Notes + ----- + The return values from ``bayes_mvs(data)`` is equivalent to + ``tuple((x.mean(), x.interval(0.90)) for x in mvsdist(data))``. + + In other words, calling ``<dist>.mean()`` and ``<dist>.interval(0.90)`` + on the three distribution objects returned from this function will give + the same results that are returned from `bayes_mvs`. + + References + ---------- + T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and + standard-deviation from data", https://scholarsarchive.byu.edu/facpub/278, + 2006. + + Examples + -------- + >>> from scipy import stats + >>> data = [6, 9, 12, 7, 8, 8, 13] + >>> mean, var, std = stats.mvsdist(data) + + We now have frozen distribution objects "mean", "var" and "std" that we can + examine: + + >>> mean.mean() + 9.0 + >>> mean.interval(0.95) + (6.6120585482655692, 11.387941451734431) + >>> mean.std() + 1.1952286093343936 + + """ + x = ravel(data) + n = len(x) + if n < 2: + raise ValueError("Need at least 2 data-points.") + xbar = x.mean() + C = x.var() + if n > 1000: # gaussian approximations for large n + mdist = distributions.norm(loc=xbar, scale=math.sqrt(C / n)) + sdist = distributions.norm(loc=math.sqrt(C), scale=math.sqrt(C / (2. * n))) + vdist = distributions.norm(loc=C, scale=math.sqrt(2.0 / n) * C) + else: + nm1 = n - 1 + fac = n * C / 2. + val = nm1 / 2. + mdist = distributions.t(nm1, loc=xbar, scale=math.sqrt(C / nm1)) + sdist = distributions.gengamma(val, -2, scale=math.sqrt(fac)) + vdist = distributions.invgamma(val, scale=fac) + return mdist, vdist, sdist + + +def kstat(data, n=2): + r""" + Return the nth k-statistic (1<=n<=4 so far). + + The nth k-statistic k_n is the unique symmetric unbiased estimator of the + nth cumulant kappa_n. + + Parameters + ---------- + data : array_like + Input array. Note that n-D input gets flattened. + n : int, {1, 2, 3, 4}, optional + Default is equal to 2. + + Returns + ------- + kstat : float + The nth k-statistic. + + See Also + -------- + kstatvar: Returns an unbiased estimator of the variance of the k-statistic. + moment: Returns the n-th central moment about the mean for a sample. + + Notes + ----- + For a sample size n, the first few k-statistics are given by: + + .. math:: + + k_{1} = \mu + k_{2} = \frac{n}{n-1} m_{2} + k_{3} = \frac{ n^{2} } {(n-1) (n-2)} m_{3} + k_{4} = \frac{ n^{2} [(n + 1)m_{4} - 3(n - 1) m^2_{2}]} {(n-1) (n-2) (n-3)} + + where :math:`\mu` is the sample mean, :math:`m_2` is the sample + variance, and :math:`m_i` is the i-th sample central moment. + + References + ---------- + http://mathworld.wolfram.com/k-Statistic.html + + http://mathworld.wolfram.com/Cumulant.html + + Examples + -------- + >>> from scipy import stats + >>> rndm = np.random.RandomState(1234) + + As sample size increases, n-th moment and n-th k-statistic converge to the + same number (although they aren't identical). In the case of the normal + distribution, they converge to zero. + + >>> for n in [2, 3, 4, 5, 6, 7]: + ... x = rndm.normal(size=10**n) + ... m, k = stats.moment(x, 3), stats.kstat(x, 3) + ... print("%.3g %.3g %.3g" % (m, k, m-k)) + -0.631 -0.651 0.0194 + 0.0282 0.0283 -8.49e-05 + -0.0454 -0.0454 1.36e-05 + 7.53e-05 7.53e-05 -2.26e-09 + 0.00166 0.00166 -4.99e-09 + -2.88e-06 -2.88e-06 8.63e-13 + """ + if n > 4 or n < 1: + raise ValueError("k-statistics only supported for 1<=n<=4") + n = int(n) + S = np.zeros(n + 1, np.float64) + data = ravel(data) + N = data.size + + # raise ValueError on empty input + if N == 0: + raise ValueError("Data input must not be empty") + + # on nan input, return nan without warning + if np.isnan(np.sum(data)): + return np.nan + + for k in range(1, n + 1): + S[k] = np.sum(data**k, axis=0) + if n == 1: + return S[1] * 1.0/N + elif n == 2: + return (N*S[2] - S[1]**2.0) / (N*(N - 1.0)) + elif n == 3: + return (2*S[1]**3 - 3*N*S[1]*S[2] + N*N*S[3]) / (N*(N - 1.0)*(N - 2.0)) + elif n == 4: + return ((-6*S[1]**4 + 12*N*S[1]**2 * S[2] - 3*N*(N-1.0)*S[2]**2 - + 4*N*(N+1)*S[1]*S[3] + N*N*(N+1)*S[4]) / + (N*(N-1.0)*(N-2.0)*(N-3.0))) + else: + raise ValueError("Should not be here.") + + +def kstatvar(data, n=2): + r""" + Returns an unbiased estimator of the variance of the k-statistic. + + See `kstat` for more details of the k-statistic. + + Parameters + ---------- + data : array_like + Input array. Note that n-D input gets flattened. + n : int, {1, 2}, optional + Default is equal to 2. + + Returns + ------- + kstatvar : float + The nth k-statistic variance. + + See Also + -------- + kstat: Returns the n-th k-statistic. + moment: Returns the n-th central moment about the mean for a sample. + + Notes + ----- + The variances of the first few k-statistics are given by: + + .. math:: + + var(k_{1}) = \frac{\kappa^2}{n} + var(k_{2}) = \frac{\kappa^4}{n} + \frac{2\kappa^2_{2}}{n - 1} + var(k_{3}) = \frac{\kappa^6}{n} + \frac{9 \kappa_2 \kappa_4}{n - 1} + + \frac{9 \kappa^2_{3}}{n - 1} + + \frac{6 n \kappa^3_{2}}{(n-1) (n-2)} + var(k_{4}) = \frac{\kappa^8}{n} + \frac{16 \kappa_2 \kappa_6}{n - 1} + + \frac{48 \kappa_{3} \kappa_5}{n - 1} + + \frac{34 \kappa^2_{4}}{n-1} + \frac{72 n \kappa^2_{2} \kappa_4}{(n - 1) (n - 2)} + + \frac{144 n \kappa_{2} \kappa^2_{3}}{(n - 1) (n - 2)} + + \frac{24 (n + 1) n \kappa^4_{2}}{(n - 1) (n - 2) (n - 3)} + """ + data = ravel(data) + N = len(data) + if n == 1: + return kstat(data, n=2) * 1.0/N + elif n == 2: + k2 = kstat(data, n=2) + k4 = kstat(data, n=4) + return (2*N*k2**2 + (N-1)*k4) / (N*(N+1)) + else: + raise ValueError("Only n=1 or n=2 supported.") + + +def _calc_uniform_order_statistic_medians(n): + """ + Approximations of uniform order statistic medians. + + Parameters + ---------- + n : int + Sample size. + + Returns + ------- + v : 1d float array + Approximations of the order statistic medians. + + References + ---------- + .. [1] James J. Filliben, "The Probability Plot Correlation Coefficient + Test for Normality", Technometrics, Vol. 17, pp. 111-117, 1975. + + Examples + -------- + Order statistics of the uniform distribution on the unit interval + are marginally distributed according to beta distributions. + The expectations of these order statistic are evenly spaced across + the interval, but the distributions are skewed in a way that + pushes the medians slightly towards the endpoints of the unit interval: + + >>> n = 4 + >>> k = np.arange(1, n+1) + >>> from scipy.stats import beta + >>> a = k + >>> b = n-k+1 + >>> beta.mean(a, b) + array([ 0.2, 0.4, 0.6, 0.8]) + >>> beta.median(a, b) + array([ 0.15910358, 0.38572757, 0.61427243, 0.84089642]) + + The Filliben approximation uses the exact medians of the smallest + and greatest order statistics, and the remaining medians are approximated + by points spread evenly across a sub-interval of the unit interval: + + >>> from scipy.morestats import _calc_uniform_order_statistic_medians + >>> _calc_uniform_order_statistic_medians(n) + array([ 0.15910358, 0.38545246, 0.61454754, 0.84089642]) + + This plot shows the skewed distributions of the order statistics + of a sample of size four from a uniform distribution on the unit interval: + + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(0.0, 1.0, num=50, endpoint=True) + >>> pdfs = [beta.pdf(x, a[i], b[i]) for i in range(n)] + >>> plt.figure() + >>> plt.plot(x, pdfs[0], x, pdfs[1], x, pdfs[2], x, pdfs[3]) + + """ + v = np.zeros(n, dtype=np.float64) + v[-1] = 0.5**(1.0 / n) + v[0] = 1 - v[-1] + i = np.arange(2, n) + v[1:-1] = (i - 0.3175) / (n + 0.365) + return v + + +def _parse_dist_kw(dist, enforce_subclass=True): + """Parse `dist` keyword. + + Parameters + ---------- + dist : str or stats.distributions instance. + Several functions take `dist` as a keyword, hence this utility + function. + enforce_subclass : bool, optional + If True (default), `dist` needs to be a + `_distn_infrastructure.rv_generic` instance. + It can sometimes be useful to set this keyword to False, if a function + wants to accept objects that just look somewhat like such an instance + (for example, they have a ``ppf`` method). + + """ + if isinstance(dist, rv_generic): + pass + elif isinstance(dist, string_types): + try: + dist = getattr(distributions, dist) + except AttributeError: + raise ValueError("%s is not a valid distribution name" % dist) + elif enforce_subclass: + msg = ("`dist` should be a stats.distributions instance or a string " + "with the name of such a distribution.") + raise ValueError(msg) + + return dist + + +def _add_axis_labels_title(plot, xlabel, ylabel, title): + """Helper function to add axes labels and a title to stats plots""" + try: + if hasattr(plot, 'set_title'): + # Matplotlib Axes instance or something that looks like it + plot.set_title(title) + plot.set_xlabel(xlabel) + plot.set_ylabel(ylabel) + else: + # matplotlib.pyplot module + plot.title(title) + plot.xlabel(xlabel) + plot.ylabel(ylabel) + except Exception: + # Not an MPL object or something that looks (enough) like it. + # Don't crash on adding labels or title + pass + + +def probplot(x, sparams=(), dist='norm', fit=True, plot=None, rvalue=False): + """ + Calculate quantiles for a probability plot, and optionally show the plot. + + Generates a probability plot of sample data against the quantiles of a + specified theoretical distribution (the normal distribution by default). + `probplot` optionally calculates a best-fit line for the data and plots the + results using Matplotlib or a given plot function. + + Parameters + ---------- + x : array_like + Sample/response data from which `probplot` creates the plot. + sparams : tuple, optional + Distribution-specific shape parameters (shape parameters plus location + and scale). + dist : str or stats.distributions instance, optional + Distribution or distribution function name. The default is 'norm' for a + normal probability plot. Objects that look enough like a + stats.distributions instance (i.e. they have a ``ppf`` method) are also + accepted. + fit : bool, optional + Fit a least-squares regression (best-fit) line to the sample data if + True (default). + plot : object, optional + If given, plots the quantiles and least squares fit. + `plot` is an object that has to have methods "plot" and "text". + The `matplotlib.pyplot` module or a Matplotlib Axes object can be used, + or a custom object with the same methods. + Default is None, which means that no plot is created. + + Returns + ------- + (osm, osr) : tuple of ndarrays + Tuple of theoretical quantiles (osm, or order statistic medians) and + ordered responses (osr). `osr` is simply sorted input `x`. + For details on how `osm` is calculated see the Notes section. + (slope, intercept, r) : tuple of floats, optional + Tuple containing the result of the least-squares fit, if that is + performed by `probplot`. `r` is the square root of the coefficient of + determination. If ``fit=False`` and ``plot=None``, this tuple is not + returned. + + Notes + ----- + Even if `plot` is given, the figure is not shown or saved by `probplot`; + ``plt.show()`` or ``plt.savefig('figname.png')`` should be used after + calling `probplot`. + + `probplot` generates a probability plot, which should not be confused with + a Q-Q or a P-P plot. Statsmodels has more extensive functionality of this + type, see ``statsmodels.api.ProbPlot``. + + The formula used for the theoretical quantiles (horizontal axis of the + probability plot) is Filliben's estimate:: + + quantiles = dist.ppf(val), for + + 0.5**(1/n), for i = n + val = (i - 0.3175) / (n + 0.365), for i = 2, ..., n-1 + 1 - 0.5**(1/n), for i = 1 + + where ``i`` indicates the i-th ordered value and ``n`` is the total number + of values. + + Examples + -------- + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + >>> nsample = 100 + >>> np.random.seed(7654321) + + A t distribution with small degrees of freedom: + + >>> ax1 = plt.subplot(221) + >>> x = stats.t.rvs(3, size=nsample) + >>> res = stats.probplot(x, plot=plt) + + A t distribution with larger degrees of freedom: + + >>> ax2 = plt.subplot(222) + >>> x = stats.t.rvs(25, size=nsample) + >>> res = stats.probplot(x, plot=plt) + + A mixture of two normal distributions with broadcasting: + + >>> ax3 = plt.subplot(223) + >>> x = stats.norm.rvs(loc=[0,5], scale=[1,1.5], + ... size=(nsample//2,2)).ravel() + >>> res = stats.probplot(x, plot=plt) + + A standard normal distribution: + + >>> ax4 = plt.subplot(224) + >>> x = stats.norm.rvs(loc=0, scale=1, size=nsample) + >>> res = stats.probplot(x, plot=plt) + + Produce a new figure with a loggamma distribution, using the ``dist`` and + ``sparams`` keywords: + + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111) + >>> x = stats.loggamma.rvs(c=2.5, size=500) + >>> res = stats.probplot(x, dist=stats.loggamma, sparams=(2.5,), plot=ax) + >>> ax.set_title("Probplot for loggamma dist with shape parameter 2.5") + + Show the results with Matplotlib: + + >>> plt.show() + + """ + x = np.asarray(x) + _perform_fit = fit or (plot is not None) + if x.size == 0: + if _perform_fit: + return (x, x), (np.nan, np.nan, 0.0) + else: + return x, x + + osm_uniform = _calc_uniform_order_statistic_medians(len(x)) + dist = _parse_dist_kw(dist, enforce_subclass=False) + if sparams is None: + sparams = () + if isscalar(sparams): + sparams = (sparams,) + if not isinstance(sparams, tuple): + sparams = tuple(sparams) + + osm = dist.ppf(osm_uniform, *sparams) + osr = sort(x) + if _perform_fit: + # perform a linear least squares fit. + slope, intercept, r, prob, sterrest = stats.linregress(osm, osr) + + if plot is not None: + plot.plot(osm, osr, 'bo', osm, slope*osm + intercept, 'r-') + _add_axis_labels_title(plot, xlabel='Theoretical quantiles', + ylabel='Ordered Values', + title='Probability Plot') + + # Add R^2 value to the plot as text + if rvalue: + xmin = amin(osm) + xmax = amax(osm) + ymin = amin(x) + ymax = amax(x) + posx = xmin + 0.70 * (xmax - xmin) + posy = ymin + 0.01 * (ymax - ymin) + plot.text(posx, posy, "$R^2=%1.4f$" % r**2) + + if fit: + return (osm, osr), (slope, intercept, r) + else: + return osm, osr + + +def ppcc_max(x, brack=(0.0, 1.0), dist='tukeylambda'): + """ + Calculate the shape parameter that maximizes the PPCC + + The probability plot correlation coefficient (PPCC) plot can be used to + determine the optimal shape parameter for a one-parameter family of + distributions. ppcc_max returns the shape parameter that would maximize the + probability plot correlation coefficient for the given data to a + one-parameter family of distributions. + + Parameters + ---------- + x : array_like + Input array. + brack : tuple, optional + Triple (a,b,c) where (a<b<c). If bracket consists of two numbers (a, c) + then they are assumed to be a starting interval for a downhill bracket + search (see `scipy.optimize.brent`). + dist : str or stats.distributions instance, optional + Distribution or distribution function name. Objects that look enough + like a stats.distributions instance (i.e. they have a ``ppf`` method) + are also accepted. The default is ``'tukeylambda'``. + + Returns + ------- + shape_value : float + The shape parameter at which the probability plot correlation + coefficient reaches its max value. + + See also + -------- + ppcc_plot, probplot, boxcox + + Notes + ----- + The brack keyword serves as a starting point which is useful in corner + cases. One can use a plot to obtain a rough visual estimate of the location + for the maximum to start the search near it. + + References + ---------- + .. [1] J.J. Filliben, "The Probability Plot Correlation Coefficient Test for + Normality", Technometrics, Vol. 17, pp. 111-117, 1975. + + .. [2] https://www.itl.nist.gov/div898/handbook/eda/section3/ppccplot.htm + + Examples + -------- + First we generate some random data from a Tukey-Lambda distribution, + with shape parameter -0.7: + + >>> from scipy import stats + >>> x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000, + ... random_state=1234567) + 1e4 + + Now we explore this data with a PPCC plot as well as the related + probability plot and Box-Cox normplot. A red line is drawn where we + expect the PPCC value to be maximal (at the shape parameter -0.7 used + above): + + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure(figsize=(8, 6)) + >>> ax = fig.add_subplot(111) + >>> res = stats.ppcc_plot(x, -5, 5, plot=ax) + + We calculate the value where the shape should reach its maximum and a red + line is drawn there. The line should coincide with the highest point in the + ppcc_plot. + + >>> max = stats.ppcc_max(x) + >>> ax.vlines(max, 0, 1, colors='r', label='Expected shape value') + + >>> plt.show() + + """ + dist = _parse_dist_kw(dist) + osm_uniform = _calc_uniform_order_statistic_medians(len(x)) + osr = sort(x) + + # this function computes the x-axis values of the probability plot + # and computes a linear regression (including the correlation) + # and returns 1-r so that a minimization function maximizes the + # correlation + def tempfunc(shape, mi, yvals, func): + xvals = func(mi, shape) + r, prob = stats.pearsonr(xvals, yvals) + return 1 - r + + return optimize.brent(tempfunc, brack=brack, args=(osm_uniform, osr, dist.ppf)) + + +def ppcc_plot(x, a, b, dist='tukeylambda', plot=None, N=80): + """ + Calculate and optionally plot probability plot correlation coefficient. + + The probability plot correlation coefficient (PPCC) plot can be used to + determine the optimal shape parameter for a one-parameter family of + distributions. It cannot be used for distributions without shape parameters + (like the normal distribution) or with multiple shape parameters. + + By default a Tukey-Lambda distribution (`stats.tukeylambda`) is used. A + Tukey-Lambda PPCC plot interpolates from long-tailed to short-tailed + distributions via an approximately normal one, and is therefore particularly + useful in practice. + + Parameters + ---------- + x : array_like + Input array. + a, b: scalar + Lower and upper bounds of the shape parameter to use. + dist : str or stats.distributions instance, optional + Distribution or distribution function name. Objects that look enough + like a stats.distributions instance (i.e. they have a ``ppf`` method) + are also accepted. The default is ``'tukeylambda'``. + plot : object, optional + If given, plots PPCC against the shape parameter. + `plot` is an object that has to have methods "plot" and "text". + The `matplotlib.pyplot` module or a Matplotlib Axes object can be used, + or a custom object with the same methods. + Default is None, which means that no plot is created. + N : int, optional + Number of points on the horizontal axis (equally distributed from + `a` to `b`). + + Returns + ------- + svals : ndarray + The shape values for which `ppcc` was calculated. + ppcc : ndarray + The calculated probability plot correlation coefficient values. + + See also + -------- + ppcc_max, probplot, boxcox_normplot, tukeylambda + + References + ---------- + J.J. Filliben, "The Probability Plot Correlation Coefficient Test for + Normality", Technometrics, Vol. 17, pp. 111-117, 1975. + + Examples + -------- + First we generate some random data from a Tukey-Lambda distribution, + with shape parameter -0.7: + + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + >>> np.random.seed(1234567) + >>> x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4 + + Now we explore this data with a PPCC plot as well as the related + probability plot and Box-Cox normplot. A red line is drawn where we + expect the PPCC value to be maximal (at the shape parameter -0.7 used + above): + + >>> fig = plt.figure(figsize=(12, 4)) + >>> ax1 = fig.add_subplot(131) + >>> ax2 = fig.add_subplot(132) + >>> ax3 = fig.add_subplot(133) + >>> res = stats.probplot(x, plot=ax1) + >>> res = stats.boxcox_normplot(x, -5, 5, plot=ax2) + >>> res = stats.ppcc_plot(x, -5, 5, plot=ax3) + >>> ax3.vlines(-0.7, 0, 1, colors='r', label='Expected shape value') + >>> plt.show() + + """ + if b <= a: + raise ValueError("`b` has to be larger than `a`.") + + svals = np.linspace(a, b, num=N) + ppcc = np.empty_like(svals) + for k, sval in enumerate(svals): + _, r2 = probplot(x, sval, dist=dist, fit=True) + ppcc[k] = r2[-1] + + if plot is not None: + plot.plot(svals, ppcc, 'x') + _add_axis_labels_title(plot, xlabel='Shape Values', + ylabel='Prob Plot Corr. Coef.', + title='(%s) PPCC Plot' % dist) + + return svals, ppcc + + +def boxcox_llf(lmb, data): + r"""The boxcox log-likelihood function. + + Parameters + ---------- + lmb : scalar + Parameter for Box-Cox transformation. See `boxcox` for details. + data : array_like + Data to calculate Box-Cox log-likelihood for. If `data` is + multi-dimensional, the log-likelihood is calculated along the first + axis. + + Returns + ------- + llf : float or ndarray + Box-Cox log-likelihood of `data` given `lmb`. A float for 1-D `data`, + an array otherwise. + + See Also + -------- + boxcox, probplot, boxcox_normplot, boxcox_normmax + + Notes + ----- + The Box-Cox log-likelihood function is defined here as + + .. math:: + + llf = (\lambda - 1) \sum_i(\log(x_i)) - + N/2 \log(\sum_i (y_i - \bar{y})^2 / N), + + where ``y`` is the Box-Cox transformed input data ``x``. + + Examples + -------- + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + >>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes + >>> np.random.seed(1245) + + Generate some random variates and calculate Box-Cox log-likelihood values + for them for a range of ``lmbda`` values: + + >>> x = stats.loggamma.rvs(5, loc=10, size=1000) + >>> lmbdas = np.linspace(-2, 10) + >>> llf = np.zeros(lmbdas.shape, dtype=float) + >>> for ii, lmbda in enumerate(lmbdas): + ... llf[ii] = stats.boxcox_llf(lmbda, x) + + Also find the optimal lmbda value with `boxcox`: + + >>> x_most_normal, lmbda_optimal = stats.boxcox(x) + + Plot the log-likelihood as function of lmbda. Add the optimal lmbda as a + horizontal line to check that that's really the optimum: + + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111) + >>> ax.plot(lmbdas, llf, 'b.-') + >>> ax.axhline(stats.boxcox_llf(lmbda_optimal, x), color='r') + >>> ax.set_xlabel('lmbda parameter') + >>> ax.set_ylabel('Box-Cox log-likelihood') + + Now add some probability plots to show that where the log-likelihood is + maximized the data transformed with `boxcox` looks closest to normal: + + >>> locs = [3, 10, 4] # 'lower left', 'center', 'lower right' + >>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs): + ... xt = stats.boxcox(x, lmbda=lmbda) + ... (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt) + ... ax_inset = inset_axes(ax, width="20%", height="20%", loc=loc) + ... ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-') + ... ax_inset.set_xticklabels([]) + ... ax_inset.set_yticklabels([]) + ... ax_inset.set_title('$\lambda=%1.2f$' % lmbda) + + >>> plt.show() + + """ + data = np.asarray(data) + N = data.shape[0] + if N == 0: + return np.nan + + y = boxcox(data, lmb) + y_mean = np.mean(y, axis=0) + llf = (lmb - 1) * np.sum(np.log(data), axis=0) + llf -= N / 2.0 * np.log(np.sum((y - y_mean)**2. / N, axis=0)) + return llf + + +def _boxcox_conf_interval(x, lmax, alpha): + # Need to find the lambda for which + # f(x,lmbda) >= f(x,lmax) - 0.5*chi^2_alpha;1 + fac = 0.5 * distributions.chi2.ppf(1 - alpha, 1) + target = boxcox_llf(lmax, x) - fac + + def rootfunc(lmbda, data, target): + return boxcox_llf(lmbda, data) - target + + # Find positive endpoint of interval in which answer is to be found + newlm = lmax + 0.5 + N = 0 + while (rootfunc(newlm, x, target) > 0.0) and (N < 500): + newlm += 0.1 + N += 1 + + if N == 500: + raise RuntimeError("Could not find endpoint.") + + lmplus = optimize.brentq(rootfunc, lmax, newlm, args=(x, target)) + + # Now find negative interval in the same way + newlm = lmax - 0.5 + N = 0 + while (rootfunc(newlm, x, target) > 0.0) and (N < 500): + newlm -= 0.1 + N += 1 + + if N == 500: + raise RuntimeError("Could not find endpoint.") + + lmminus = optimize.brentq(rootfunc, newlm, lmax, args=(x, target)) + return lmminus, lmplus + + +def boxcox(x, lmbda=None, alpha=None): + r""" + Return a positive dataset transformed by a Box-Cox power transformation. + + Parameters + ---------- + x : ndarray + Input array. Should be 1-dimensional. + lmbda : {None, scalar}, optional + If `lmbda` is not None, do the transformation for that value. + + If `lmbda` is None, find the lambda that maximizes the log-likelihood + function and return it as the second output argument. + alpha : {None, float}, optional + If ``alpha`` is not None, return the ``100 * (1-alpha)%`` confidence + interval for `lmbda` as the third output argument. + Must be between 0.0 and 1.0. + + Returns + ------- + boxcox : ndarray + Box-Cox power transformed array. + maxlog : float, optional + If the `lmbda` parameter is None, the second returned argument is + the lambda that maximizes the log-likelihood function. + (min_ci, max_ci) : tuple of float, optional + If `lmbda` parameter is None and ``alpha`` is not None, this returned + tuple of floats represents the minimum and maximum confidence limits + given ``alpha``. + + See Also + -------- + probplot, boxcox_normplot, boxcox_normmax, boxcox_llf + + Notes + ----- + The Box-Cox transform is given by:: + + y = (x**lmbda - 1) / lmbda, for lmbda > 0 + log(x), for lmbda = 0 + + `boxcox` requires the input data to be positive. Sometimes a Box-Cox + transformation provides a shift parameter to achieve this; `boxcox` does + not. Such a shift parameter is equivalent to adding a positive constant to + `x` before calling `boxcox`. + + The confidence limits returned when ``alpha`` is provided give the interval + where: + + .. math:: + + llf(\hat{\lambda}) - llf(\lambda) < \frac{1}{2}\chi^2(1 - \alpha, 1), + + with ``llf`` the log-likelihood function and :math:`\chi^2` the chi-squared + function. + + References + ---------- + G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal of the + Royal Statistical Society B, 26, 211-252 (1964). + + Examples + -------- + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + + We generate some random variates from a non-normal distribution and make a + probability plot for it, to show it is non-normal in the tails: + + >>> fig = plt.figure() + >>> ax1 = fig.add_subplot(211) + >>> x = stats.loggamma.rvs(5, size=500) + 5 + >>> prob = stats.probplot(x, dist=stats.norm, plot=ax1) + >>> ax1.set_xlabel('') + >>> ax1.set_title('Probplot against normal distribution') + + We now use `boxcox` to transform the data so it's closest to normal: + + >>> ax2 = fig.add_subplot(212) + >>> xt, _ = stats.boxcox(x) + >>> prob = stats.probplot(xt, dist=stats.norm, plot=ax2) + >>> ax2.set_title('Probplot after Box-Cox transformation') + + >>> plt.show() + + """ + x = np.asarray(x) + if x.size == 0: + return x + + if any(x <= 0): + raise ValueError("Data must be positive.") + + if lmbda is not None: # single transformation + return special.boxcox(x, lmbda) + + # If lmbda=None, find the lmbda that maximizes the log-likelihood function. + lmax = boxcox_normmax(x, method='mle') + y = boxcox(x, lmax) + + if alpha is None: + return y, lmax + else: + # Find confidence interval + interval = _boxcox_conf_interval(x, lmax, alpha) + return y, lmax, interval + + +def boxcox_normmax(x, brack=(-2.0, 2.0), method='pearsonr'): + """Compute optimal Box-Cox transform parameter for input data. + + Parameters + ---------- + x : array_like + Input array. + brack : 2-tuple, optional + The starting interval for a downhill bracket search with + `optimize.brent`. Note that this is in most cases not critical; the + final result is allowed to be outside this bracket. + method : str, optional + The method to determine the optimal transform parameter (`boxcox` + ``lmbda`` parameter). Options are: + + 'pearsonr' (default) + Maximizes the Pearson correlation coefficient between + ``y = boxcox(x)`` and the expected values for ``y`` if `x` would be + normally-distributed. + + 'mle' + Minimizes the log-likelihood `boxcox_llf`. This is the method used + in `boxcox`. + + 'all' + Use all optimization methods available, and return all results. + Useful to compare different methods. + + Returns + ------- + maxlog : float or ndarray + The optimal transform parameter found. An array instead of a scalar + for ``method='all'``. + + See Also + -------- + boxcox, boxcox_llf, boxcox_normplot + + Examples + -------- + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + >>> np.random.seed(1234) # make this example reproducible + + Generate some data and determine optimal ``lmbda`` in various ways: + + >>> x = stats.loggamma.rvs(5, size=30) + 5 + >>> y, lmax_mle = stats.boxcox(x) + >>> lmax_pearsonr = stats.boxcox_normmax(x) + + >>> lmax_mle + 7.177... + >>> lmax_pearsonr + 7.916... + >>> stats.boxcox_normmax(x, method='all') + array([ 7.91667384, 7.17718692]) + + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111) + >>> prob = stats.boxcox_normplot(x, -10, 10, plot=ax) + >>> ax.axvline(lmax_mle, color='r') + >>> ax.axvline(lmax_pearsonr, color='g', ls='--') + + >>> plt.show() + + """ + + def _pearsonr(x, brack): + osm_uniform = _calc_uniform_order_statistic_medians(len(x)) + xvals = distributions.norm.ppf(osm_uniform) + + def _eval_pearsonr(lmbda, xvals, samps): + # This function computes the x-axis values of the probability plot + # and computes a linear regression (including the correlation) and + # returns ``1 - r`` so that a minimization function maximizes the + # correlation. + y = boxcox(samps, lmbda) + yvals = np.sort(y) + r, prob = stats.pearsonr(xvals, yvals) + return 1 - r + + return optimize.brent(_eval_pearsonr, brack=brack, args=(xvals, x)) + + def _mle(x, brack): + def _eval_mle(lmb, data): + # function to minimize + return -boxcox_llf(lmb, data) + + return optimize.brent(_eval_mle, brack=brack, args=(x,)) + + def _all(x, brack): + maxlog = np.zeros(2, dtype=float) + maxlog[0] = _pearsonr(x, brack) + maxlog[1] = _mle(x, brack) + return maxlog + + methods = {'pearsonr': _pearsonr, + 'mle': _mle, + 'all': _all} + if method not in methods.keys(): + raise ValueError("Method %s not recognized." % method) + + optimfunc = methods[method] + return optimfunc(x, brack) + + +def _normplot(method, x, la, lb, plot=None, N=80): + """Compute parameters for a Box-Cox or Yeo-Johnson normality plot, + optionally show it. See `boxcox_normplot` or `yeojohnson_normplot` for + details.""" + + if method == 'boxcox': + title = 'Box-Cox Normality Plot' + transform_func = boxcox + else: + title = 'Yeo-Johnson Normality Plot' + transform_func = yeojohnson + + x = np.asarray(x) + if x.size == 0: + return x + + if lb <= la: + raise ValueError("`lb` has to be larger than `la`.") + + lmbdas = np.linspace(la, lb, num=N) + ppcc = lmbdas * 0.0 + for i, val in enumerate(lmbdas): + # Determine for each lmbda the square root of correlation coefficient + # of transformed x + z = transform_func(x, lmbda=val) + _, (_, _, r) = probplot(z, dist='norm', fit=True) + ppcc[i] = r + + if plot is not None: + plot.plot(lmbdas, ppcc, 'x') + _add_axis_labels_title(plot, xlabel='$\\lambda$', + ylabel='Prob Plot Corr. Coef.', + title=title) + + return lmbdas, ppcc + + +def boxcox_normplot(x, la, lb, plot=None, N=80): + """Compute parameters for a Box-Cox normality plot, optionally show it. + + A Box-Cox normality plot shows graphically what the best transformation + parameter is to use in `boxcox` to obtain a distribution that is close + to normal. + + Parameters + ---------- + x : array_like + Input array. + la, lb : scalar + The lower and upper bounds for the ``lmbda`` values to pass to `boxcox` + for Box-Cox transformations. These are also the limits of the + horizontal axis of the plot if that is generated. + plot : object, optional + If given, plots the quantiles and least squares fit. + `plot` is an object that has to have methods "plot" and "text". + The `matplotlib.pyplot` module or a Matplotlib Axes object can be used, + or a custom object with the same methods. + Default is None, which means that no plot is created. + N : int, optional + Number of points on the horizontal axis (equally distributed from + `la` to `lb`). + + Returns + ------- + lmbdas : ndarray + The ``lmbda`` values for which a Box-Cox transform was done. + ppcc : ndarray + Probability Plot Correlelation Coefficient, as obtained from `probplot` + when fitting the Box-Cox transformed input `x` against a normal + distribution. + + See Also + -------- + probplot, boxcox, boxcox_normmax, boxcox_llf, ppcc_max + + Notes + ----- + Even if `plot` is given, the figure is not shown or saved by + `boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')`` + should be used after calling `probplot`. + + Examples + -------- + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + + Generate some non-normally distributed data, and create a Box-Cox plot: + + >>> x = stats.loggamma.rvs(5, size=500) + 5 + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111) + >>> prob = stats.boxcox_normplot(x, -20, 20, plot=ax) + + Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in + the same plot: + + >>> _, maxlog = stats.boxcox(x) + >>> ax.axvline(maxlog, color='r') + + >>> plt.show() + + """ + return _normplot('boxcox', x, la, lb, plot, N) + + +def yeojohnson(x, lmbda=None): + r""" + Return a dataset transformed by a Yeo-Johnson power transformation. + + Parameters + ---------- + x : ndarray + Input array. Should be 1-dimensional. + lmbda : float, optional + If ``lmbda`` is ``None``, find the lambda that maximizes the + log-likelihood function and return it as the second output argument. + Otherwise the transformation is done for the given value. + + Returns + ------- + yeojohnson: ndarray + Yeo-Johnson power transformed array. + maxlog : float, optional + If the `lmbda` parameter is None, the second returned argument is + the lambda that maximizes the log-likelihood function. + + See Also + -------- + probplot, yeojohnson_normplot, yeojohnson_normmax, yeojohnson_llf, boxcox + + Notes + ----- + The Yeo-Johnson transform is given by:: + + y = ((x + 1)**lmbda - 1) / lmbda, for x >= 0, lmbda != 0 + log(x + 1), for x >= 0, lmbda = 0 + -((-x + 1)**(2 - lmbda) - 1) / (2 - lmbda), for x < 0, lmbda != 2 + -log(-x + 1), for x < 0, lmbda = 2 + + Unlike `boxcox`, `yeojohnson` does not require the input data to be + positive. + + .. versionadded:: 1.2.0 + + + References + ---------- + I. Yeo and R.A. Johnson, "A New Family of Power Transformations to + Improve Normality or Symmetry", Biometrika 87.4 (2000): + + + Examples + -------- + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + + We generate some random variates from a non-normal distribution and make a + probability plot for it, to show it is non-normal in the tails: + + >>> fig = plt.figure() + >>> ax1 = fig.add_subplot(211) + >>> x = stats.loggamma.rvs(5, size=500) + 5 + >>> prob = stats.probplot(x, dist=stats.norm, plot=ax1) + >>> ax1.set_xlabel('') + >>> ax1.set_title('Probplot against normal distribution') + + We now use `yeojohnson` to transform the data so it's closest to normal: + + >>> ax2 = fig.add_subplot(212) + >>> xt, lmbda = stats.yeojohnson(x) + >>> prob = stats.probplot(xt, dist=stats.norm, plot=ax2) + >>> ax2.set_title('Probplot after Yeo-Johnson transformation') + + >>> plt.show() + + """ + + x = np.asarray(x) + if x.size == 0: + return x + + if lmbda is not None: + return _yeojohnson_transform(x, lmbda) + + # if lmbda=None, find the lmbda that maximizes the log-likelihood function. + lmax = yeojohnson_normmax(x) + y = _yeojohnson_transform(x, lmax) + + return y, lmax + + +def _yeojohnson_transform(x, lmbda): + """Return x transformed by the Yeo-Johnson power transform with given + parameter lmbda.""" + + out = np.zeros_like(x) + pos = x >= 0 # binary mask + + # when x >= 0 + if abs(lmbda) < np.spacing(1.): + out[pos] = np.log1p(x[pos]) + else: # lmbda != 0 + out[pos] = (np.power(x[pos] + 1, lmbda) - 1) / lmbda + + # when x < 0 + if abs(lmbda - 2) > np.spacing(1.): + out[~pos] = -(np.power(-x[~pos] + 1, 2 - lmbda) - 1) / (2 - lmbda) + else: # lmbda == 2 + out[~pos] = -np.log1p(-x[~pos]) + + return out + + +def yeojohnson_llf(lmb, data): + r"""The yeojohnson log-likelihood function. + + Parameters + ---------- + lmb : scalar + Parameter for Yeo-Johnson transformation. See `yeojohnson` for + details. + data : array_like + Data to calculate Yeo-Johnson log-likelihood for. If `data` is + multi-dimensional, the log-likelihood is calculated along the first + axis. + + Returns + ------- + llf : float + Yeo-Johnson log-likelihood of `data` given `lmb`. + + See Also + -------- + yeojohnson, probplot, yeojohnson_normplot, yeojohnson_normmax + + Notes + ----- + The Yeo-Johnson log-likelihood function is defined here as + + .. math:: + + llf = N/2 \log(\hat{\sigma}^2) + (\lambda - 1) + \sum_i \text{ sign }(x_i)\log(|x_i| + 1) + + where :math:`\hat{\sigma}^2` is estimated variance of the the Yeo-Johnson + transformed input data ``x``. + + .. versionadded:: 1.2.0 + + Examples + -------- + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + >>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes + >>> np.random.seed(1245) + + Generate some random variates and calculate Yeo-Johnson log-likelihood + values for them for a range of ``lmbda`` values: + + >>> x = stats.loggamma.rvs(5, loc=10, size=1000) + >>> lmbdas = np.linspace(-2, 10) + >>> llf = np.zeros(lmbdas.shape, dtype=float) + >>> for ii, lmbda in enumerate(lmbdas): + ... llf[ii] = stats.yeojohnson_llf(lmbda, x) + + Also find the optimal lmbda value with `yeojohnson`: + + >>> x_most_normal, lmbda_optimal = stats.yeojohnson(x) + + Plot the log-likelihood as function of lmbda. Add the optimal lmbda as a + horizontal line to check that that's really the optimum: + + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111) + >>> ax.plot(lmbdas, llf, 'b.-') + >>> ax.axhline(stats.yeojohnson_llf(lmbda_optimal, x), color='r') + >>> ax.set_xlabel('lmbda parameter') + >>> ax.set_ylabel('Yeo-Johnson log-likelihood') + + Now add some probability plots to show that where the log-likelihood is + maximized the data transformed with `yeojohnson` looks closest to normal: + + >>> locs = [3, 10, 4] # 'lower left', 'center', 'lower right' + >>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs): + ... xt = stats.yeojohnson(x, lmbda=lmbda) + ... (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt) + ... ax_inset = inset_axes(ax, width="20%", height="20%", loc=loc) + ... ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-') + ... ax_inset.set_xticklabels([]) + ... ax_inset.set_yticklabels([]) + ... ax_inset.set_title('$\lambda=%1.2f$' % lmbda) + + >>> plt.show() + + """ + data = np.asarray(data) + n_samples = data.shape[0] + + if n_samples == 0: + return np.nan + + trans = _yeojohnson_transform(data, lmb) + + loglike = -n_samples / 2 * np.log(trans.var(axis=0)) + loglike += (lmb - 1) * (np.sign(data) * np.log(np.abs(data) + 1)).sum(axis=0) + + return loglike + + +def yeojohnson_normmax(x, brack=(-2, 2)): + """Compute optimal Yeo-Johnson transform parameter for input data, using + maximum likelihood estimation. + + Parameters + ---------- + x : array_like + Input array. + brack : 2-tuple, optional + The starting interval for a downhill bracket search with + `optimize.brent`. Note that this is in most cases not critical; the + final result is allowed to be outside this bracket. + + Returns + ------- + maxlog : float + The optimal transform parameter found. + + Notes + ----- + .. versionadded:: 1.2.0 + + See Also + -------- + yeojohnson, yeojohnson_llf, yeojohnson_normplot + + Examples + -------- + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + >>> np.random.seed(1234) # make this example reproducible + + Generate some data and determine optimal ``lmbda`` + + >>> x = stats.loggamma.rvs(5, size=30) + 5 + >>> lmax = stats.yeojohnson_normmax(x) + + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111) + >>> prob = stats.yeojohnson_normplot(x, -10, 10, plot=ax) + >>> ax.axvline(lmax, color='r') + + >>> plt.show() + + """ + + def _neg_llf(lmbda, data): + return -yeojohnson_llf(lmbda, data) + + return optimize.brent(_neg_llf, brack=brack, args=(x,)) + + +def yeojohnson_normplot(x, la, lb, plot=None, N=80): + """Compute parameters for a Yeo-Johnson normality plot, optionally show it. + + A Yeo-Johnson normality plot shows graphically what the best + transformation parameter is to use in `yeojohnson` to obtain a + distribution that is close to normal. + + Parameters + ---------- + x : array_like + Input array. + la, lb : scalar + The lower and upper bounds for the ``lmbda`` values to pass to + `yeojohnson` for Yeo-Johnson transformations. These are also the + limits of the horizontal axis of the plot if that is generated. + plot : object, optional + If given, plots the quantiles and least squares fit. + `plot` is an object that has to have methods "plot" and "text". + The `matplotlib.pyplot` module or a Matplotlib Axes object can be used, + or a custom object with the same methods. + Default is None, which means that no plot is created. + N : int, optional + Number of points on the horizontal axis (equally distributed from + `la` to `lb`). + + Returns + ------- + lmbdas : ndarray + The ``lmbda`` values for which a Yeo-Johnson transform was done. + ppcc : ndarray + Probability Plot Correlelation Coefficient, as obtained from `probplot` + when fitting the Box-Cox transformed input `x` against a normal + distribution. + + See Also + -------- + probplot, yeojohnson, yeojohnson_normmax, yeojohnson_llf, ppcc_max + + Notes + ----- + Even if `plot` is given, the figure is not shown or saved by + `boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')`` + should be used after calling `probplot`. + + .. versionadded:: 1.2.0 + + Examples + -------- + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + + Generate some non-normally distributed data, and create a Yeo-Johnson plot: + + >>> x = stats.loggamma.rvs(5, size=500) + 5 + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111) + >>> prob = stats.yeojohnson_normplot(x, -20, 20, plot=ax) + + Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in + the same plot: + + >>> _, maxlog = stats.yeojohnson(x) + >>> ax.axvline(maxlog, color='r') + + >>> plt.show() + + """ + return _normplot('yeojohnson', x, la, lb, plot, N) + + +def shapiro(x): + """ + Perform the Shapiro-Wilk test for normality. + + The Shapiro-Wilk test tests the null hypothesis that the + data was drawn from a normal distribution. + + Parameters + ---------- + x : array_like + Array of sample data. + + Returns + ------- + W : float + The test statistic. + p-value : float + The p-value for the hypothesis test. + + See Also + -------- + anderson : The Anderson-Darling test for normality + kstest : The Kolmogorov-Smirnov test for goodness of fit. + + Notes + ----- + The algorithm used is described in [4]_ but censoring parameters as + described are not implemented. For N > 5000 the W test statistic is accurate + but the p-value may not be. + + The chance of rejecting the null hypothesis when it is true is close to 5% + regardless of sample size. + + References + ---------- + .. [1] https://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm + .. [2] Shapiro, S. S. & Wilk, M.B (1965). An analysis of variance test for + normality (complete samples), Biometrika, Vol. 52, pp. 591-611. + .. [3] Razali, N. M. & Wah, Y. B. (2011) Power comparisons of Shapiro-Wilk, + Kolmogorov-Smirnov, Lilliefors and Anderson-Darling tests, Journal of + Statistical Modeling and Analytics, Vol. 2, pp. 21-33. + .. [4] ALGORITHM AS R94 APPL. STATIST. (1995) VOL. 44, NO. 4. + + Examples + -------- + >>> from scipy import stats + >>> np.random.seed(12345678) + >>> x = stats.norm.rvs(loc=5, scale=3, size=100) + >>> stats.shapiro(x) + (0.9772805571556091, 0.08144091814756393) + + """ + x = np.ravel(x) + + N = len(x) + if N < 3: + raise ValueError("Data must be at least length 3.") + + a = zeros(N, 'f') + init = 0 + + y = sort(x) + a, w, pw, ifault = statlib.swilk(y, a[:N//2], init) + if ifault not in [0, 2]: + warnings.warn("Input data for shapiro has range zero. The results " + "may not be accurate.") + if N > 5000: + warnings.warn("p-value may not be accurate for N > 5000.") + + return w, pw + + +# Values from Stephens, M A, "EDF Statistics for Goodness of Fit and +# Some Comparisons", Journal of he American Statistical +# Association, Vol. 69, Issue 347, Sept. 1974, pp 730-737 +_Avals_norm = array([0.576, 0.656, 0.787, 0.918, 1.092]) +_Avals_expon = array([0.922, 1.078, 1.341, 1.606, 1.957]) +# From Stephens, M A, "Goodness of Fit for the Extreme Value Distribution", +# Biometrika, Vol. 64, Issue 3, Dec. 1977, pp 583-588. +_Avals_gumbel = array([0.474, 0.637, 0.757, 0.877, 1.038]) +# From Stephens, M A, "Tests of Fit for the Logistic Distribution Based +# on the Empirical Distribution Function.", Biometrika, +# Vol. 66, Issue 3, Dec. 1979, pp 591-595. +_Avals_logistic = array([0.426, 0.563, 0.660, 0.769, 0.906, 1.010]) + + +AndersonResult = namedtuple('AndersonResult', ('statistic', + 'critical_values', + 'significance_level')) + + +def anderson(x, dist='norm'): + """ + Anderson-Darling test for data coming from a particular distribution + + The Anderson-Darling tests the null hypothesis that a sample is + drawn from a population that follows a particular distribution. + For the Anderson-Darling test, the critical values depend on + which distribution is being tested against. This function works + for normal, exponential, logistic, or Gumbel (Extreme Value + Type I) distributions. + + Parameters + ---------- + x : array_like + array of sample data + dist : {'norm','expon','logistic','gumbel','gumbel_l', gumbel_r', + 'extreme1'}, optional + the type of distribution to test against. The default is 'norm' + and 'extreme1', 'gumbel_l' and 'gumbel' are synonyms. + + Returns + ------- + statistic : float + The Anderson-Darling test statistic + critical_values : list + The critical values for this distribution + significance_level : list + The significance levels for the corresponding critical values + in percents. The function returns critical values for a + differing set of significance levels depending on the + distribution that is being tested against. + + See Also + -------- + kstest : The Kolmogorov-Smirnov test for goodness-of-fit. + + Notes + ----- + Critical values provided are for the following significance levels: + + normal/exponenential + 15%, 10%, 5%, 2.5%, 1% + logistic + 25%, 10%, 5%, 2.5%, 1%, 0.5% + Gumbel + 25%, 10%, 5%, 2.5%, 1% + + If the returned statistic is larger than these critical values then + for the corresponding significance level, the null hypothesis that + the data come from the chosen distribution can be rejected. + The returned statistic is referred to as 'A2' in the references. + + References + ---------- + .. [1] https://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm + .. [2] Stephens, M. A. (1974). EDF Statistics for Goodness of Fit and + Some Comparisons, Journal of the American Statistical Association, + Vol. 69, pp. 730-737. + .. [3] Stephens, M. A. (1976). Asymptotic Results for Goodness-of-Fit + Statistics with Unknown Parameters, Annals of Statistics, Vol. 4, + pp. 357-369. + .. [4] Stephens, M. A. (1977). Goodness of Fit for the Extreme Value + Distribution, Biometrika, Vol. 64, pp. 583-588. + .. [5] Stephens, M. A. (1977). Goodness of Fit with Special Reference + to Tests for Exponentiality , Technical Report No. 262, + Department of Statistics, Stanford University, Stanford, CA. + .. [6] Stephens, M. A. (1979). Tests of Fit for the Logistic Distribution + Based on the Empirical Distribution Function, Biometrika, Vol. 66, + pp. 591-595. + + """ + if dist not in ['norm', 'expon', 'gumbel', 'gumbel_l', + 'gumbel_r', 'extreme1', 'logistic']: + raise ValueError("Invalid distribution; dist must be 'norm', " + "'expon', 'gumbel', 'extreme1' or 'logistic'.") + y = sort(x) + xbar = np.mean(x, axis=0) + N = len(y) + if dist == 'norm': + s = np.std(x, ddof=1, axis=0) + w = (y - xbar) / s + logcdf = distributions.norm.logcdf(w) + logsf = distributions.norm.logsf(w) + sig = array([15, 10, 5, 2.5, 1]) + critical = around(_Avals_norm / (1.0 + 4.0/N - 25.0/N/N), 3) + elif dist == 'expon': + w = y / xbar + logcdf = distributions.expon.logcdf(w) + logsf = distributions.expon.logsf(w) + sig = array([15, 10, 5, 2.5, 1]) + critical = around(_Avals_expon / (1.0 + 0.6/N), 3) + elif dist == 'logistic': + def rootfunc(ab, xj, N): + a, b = ab + tmp = (xj - a) / b + tmp2 = exp(tmp) + val = [np.sum(1.0/(1+tmp2), axis=0) - 0.5*N, + np.sum(tmp*(1.0-tmp2)/(1+tmp2), axis=0) + N] + return array(val) + + sol0 = array([xbar, np.std(x, ddof=1, axis=0)]) + sol = optimize.fsolve(rootfunc, sol0, args=(x, N), xtol=1e-5) + w = (y - sol[0]) / sol[1] + logcdf = distributions.logistic.logcdf(w) + logsf = distributions.logistic.logsf(w) + sig = array([25, 10, 5, 2.5, 1, 0.5]) + critical = around(_Avals_logistic / (1.0 + 0.25/N), 3) + elif dist == 'gumbel_r': + xbar, s = distributions.gumbel_r.fit(x) + w = (y - xbar) / s + logcdf = distributions.gumbel_r.logcdf(w) + logsf = distributions.gumbel_r.logsf(w) + sig = array([25, 10, 5, 2.5, 1]) + critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)), 3) + else: # (dist == 'gumbel') or (dist == 'gumbel_l') or (dist == 'extreme1') + xbar, s = distributions.gumbel_l.fit(x) + w = (y - xbar) / s + logcdf = distributions.gumbel_l.logcdf(w) + logsf = distributions.gumbel_l.logsf(w) + sig = array([25, 10, 5, 2.5, 1]) + critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)), 3) + + i = arange(1, N + 1) + A2 = -N - np.sum((2*i - 1.0) / N * (logcdf + logsf[::-1]), axis=0) + + return AndersonResult(A2, critical, sig) + + +def _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N): + """ + Compute A2akN equation 7 of Scholz and Stephens. + + Parameters + ---------- + samples : sequence of 1-D array_like + Array of sample arrays. + Z : array_like + Sorted array of all observations. + Zstar : array_like + Sorted array of unique observations. + k : int + Number of samples. + n : array_like + Number of observations in each sample. + N : int + Total number of observations. + + Returns + ------- + A2aKN : float + The A2aKN statistics of Scholz and Stephens 1987. + """ + + A2akN = 0. + Z_ssorted_left = Z.searchsorted(Zstar, 'left') + if N == Zstar.size: + lj = 1. + else: + lj = Z.searchsorted(Zstar, 'right') - Z_ssorted_left + Bj = Z_ssorted_left + lj / 2. + for i in arange(0, k): + s = np.sort(samples[i]) + s_ssorted_right = s.searchsorted(Zstar, side='right') + Mij = s_ssorted_right.astype(float) + fij = s_ssorted_right - s.searchsorted(Zstar, 'left') + Mij -= fij / 2. + inner = lj / float(N) * (N*Mij - Bj*n[i])**2 / (Bj*(N - Bj) - N*lj/4.) + A2akN += inner.sum() / n[i] + A2akN *= (N - 1.) / N + return A2akN + + +def _anderson_ksamp_right(samples, Z, Zstar, k, n, N): + """ + Compute A2akN equation 6 of Scholz & Stephens. + + Parameters + ---------- + samples : sequence of 1-D array_like + Array of sample arrays. + Z : array_like + Sorted array of all observations. + Zstar : array_like + Sorted array of unique observations. + k : int + Number of samples. + n : array_like + Number of observations in each sample. + N : int + Total number of observations. + + Returns + ------- + A2KN : float + The A2KN statistics of Scholz and Stephens 1987. + """ + + A2kN = 0. + lj = Z.searchsorted(Zstar[:-1], 'right') - Z.searchsorted(Zstar[:-1], + 'left') + Bj = lj.cumsum() + for i in arange(0, k): + s = np.sort(samples[i]) + Mij = s.searchsorted(Zstar[:-1], side='right') + inner = lj / float(N) * (N * Mij - Bj * n[i])**2 / (Bj * (N - Bj)) + A2kN += inner.sum() / n[i] + return A2kN + + +Anderson_ksampResult = namedtuple('Anderson_ksampResult', + ('statistic', 'critical_values', + 'significance_level')) + + +def anderson_ksamp(samples, midrank=True): + """The Anderson-Darling test for k-samples. + + The k-sample Anderson-Darling test is a modification of the + one-sample Anderson-Darling test. It tests the null hypothesis + that k-samples are drawn from the same population without having + to specify the distribution function of that population. The + critical values depend on the number of samples. + + Parameters + ---------- + samples : sequence of 1-D array_like + Array of sample data in arrays. + midrank : bool, optional + Type of Anderson-Darling test which is computed. Default + (True) is the midrank test applicable to continuous and + discrete populations. If False, the right side empirical + distribution is used. + + Returns + ------- + statistic : float + Normalized k-sample Anderson-Darling test statistic. + critical_values : array + The critical values for significance levels 25%, 10%, 5%, 2.5%, 1%. + significance_level : float + An approximate significance level at which the null hypothesis for the + provided samples can be rejected. The value is floored / capped at + 1% / 25%. + + Raises + ------ + ValueError + If less than 2 samples are provided, a sample is empty, or no + distinct observations are in the samples. + + See Also + -------- + ks_2samp : 2 sample Kolmogorov-Smirnov test + anderson : 1 sample Anderson-Darling test + + Notes + ----- + [1]_ defines three versions of the k-sample Anderson-Darling test: + one for continuous distributions and two for discrete + distributions, in which ties between samples may occur. The + default of this routine is to compute the version based on the + midrank empirical distribution function. This test is applicable + to continuous and discrete data. If midrank is set to False, the + right side empirical distribution is used for a test for discrete + data. According to [1]_, the two discrete test statistics differ + only slightly if a few collisions due to round-off errors occur in + the test not adjusted for ties between samples. + + The critical values corresponding to the significance levels from 0.01 + to 0.25 are taken from [1]_. p-values are floored / capped + at 0.1% / 25%. Since the range of critical values might be extended in + future releases, it is recommended not to test ``p == 0.25``, but rather + ``p >= 0.25`` (analogously for the lower bound). + + .. versionadded:: 0.14.0 + + References + ---------- + .. [1] Scholz, F. W and Stephens, M. A. (1987), K-Sample + Anderson-Darling Tests, Journal of the American Statistical + Association, Vol. 82, pp. 918-924. + + Examples + -------- + >>> from scipy import stats + >>> np.random.seed(314159) + + The null hypothesis that the two random samples come from the same + distribution can be rejected at the 5% level because the returned + test value is greater than the critical value for 5% (1.961) but + not at the 2.5% level. The interpolation gives an approximate + significance level of 3.2%: + + >>> stats.anderson_ksamp([np.random.normal(size=50), + ... np.random.normal(loc=0.5, size=30)]) + (2.4615796189876105, + array([ 0.325, 1.226, 1.961, 2.718, 3.752, 4.592, 6.546]), + 0.03176687568842282) + + + The null hypothesis cannot be rejected for three samples from an + identical distribution. The reported p-value (25%) has been capped and + may not be very accurate (since it corresponds to the value 0.449 + whereas the statistic is -0.731): + + >>> stats.anderson_ksamp([np.random.normal(size=50), + ... np.random.normal(size=30), np.random.normal(size=20)]) + (-0.73091722665244196, + array([ 0.44925884, 1.3052767 , 1.9434184 , 2.57696569, 3.41634856, + 4.07210043, 5.56419101]), + 0.25) + + """ + k = len(samples) + if (k < 2): + raise ValueError("anderson_ksamp needs at least two samples") + + samples = list(map(np.asarray, samples)) + Z = np.sort(np.hstack(samples)) + N = Z.size + Zstar = np.unique(Z) + if Zstar.size < 2: + raise ValueError("anderson_ksamp needs more than one distinct " + "observation") + + n = np.array([sample.size for sample in samples]) + if any(n == 0): + raise ValueError("anderson_ksamp encountered sample without " + "observations") + + if midrank: + A2kN = _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N) + else: + A2kN = _anderson_ksamp_right(samples, Z, Zstar, k, n, N) + + H = (1. / n).sum() + hs_cs = (1. / arange(N - 1, 1, -1)).cumsum() + h = hs_cs[-1] + 1 + g = (hs_cs / arange(2, N)).sum() + + a = (4*g - 6) * (k - 1) + (10 - 6*g)*H + b = (2*g - 4)*k**2 + 8*h*k + (2*g - 14*h - 4)*H - 8*h + 4*g - 6 + c = (6*h + 2*g - 2)*k**2 + (4*h - 4*g + 6)*k + (2*h - 6)*H + 4*h + d = (2*h + 6)*k**2 - 4*h*k + sigmasq = (a*N**3 + b*N**2 + c*N + d) / ((N - 1.) * (N - 2.) * (N - 3.)) + m = k - 1 + A2 = (A2kN - m) / math.sqrt(sigmasq) + + # The b_i values are the interpolation coefficients from Table 2 + # of Scholz and Stephens 1987 + b0 = np.array([0.675, 1.281, 1.645, 1.96, 2.326, 2.573, 3.085]) + b1 = np.array([-0.245, 0.25, 0.678, 1.149, 1.822, 2.364, 3.615]) + b2 = np.array([-0.105, -0.305, -0.362, -0.391, -0.396, -0.345, -0.154]) + critical = b0 + b1 / math.sqrt(m) + b2 / m + + sig = np.array([0.25, 0.1, 0.05, 0.025, 0.01, 0.005, 0.001]) + if A2 < critical.min(): + p = sig.max() + warnings.warn("p-value capped: true value larger than {}".format(p), + stacklevel=2) + elif A2 > critical.max(): + p = sig.min() + warnings.warn("p-value floored: true value smaller than {}".format(p), + stacklevel=2) + else: + # interpolation of probit of significance level + pf = np.polyfit(critical, log(sig), 2) + p = math.exp(np.polyval(pf, A2)) + + return Anderson_ksampResult(A2, critical, p) + + +AnsariResult = namedtuple('AnsariResult', ('statistic', 'pvalue')) + + +def ansari(x, y): + """ + Perform the Ansari-Bradley test for equal scale parameters + + The Ansari-Bradley test is a non-parametric test for the equality + of the scale parameter of the distributions from which two + samples were drawn. + + Parameters + ---------- + x, y : array_like + arrays of sample data + + Returns + ------- + statistic : float + The Ansari-Bradley test statistic + pvalue : float + The p-value of the hypothesis test + + See Also + -------- + fligner : A non-parametric test for the equality of k variances + mood : A non-parametric test for the equality of two scale parameters + + Notes + ----- + The p-value given is exact when the sample sizes are both less than + 55 and there are no ties, otherwise a normal approximation for the + p-value is used. + + References + ---------- + .. [1] Sprent, Peter and N.C. Smeeton. Applied nonparametric statistical + methods. 3rd ed. Chapman and Hall/CRC. 2001. Section 5.8.2. + + """ + x, y = asarray(x), asarray(y) + n = len(x) + m = len(y) + if m < 1: + raise ValueError("Not enough other observations.") + if n < 1: + raise ValueError("Not enough test observations.") + + N = m + n + xy = r_[x, y] # combine + rank = stats.rankdata(xy) + symrank = amin(array((rank, N - rank + 1)), 0) + AB = np.sum(symrank[:n], axis=0) + uxy = unique(xy) + repeats = (len(uxy) != len(xy)) + exact = ((m < 55) and (n < 55) and not repeats) + if repeats and (m < 55 or n < 55): + warnings.warn("Ties preclude use of exact statistic.") + if exact: + astart, a1, ifault = statlib.gscale(n, m) + ind = AB - astart + total = np.sum(a1, axis=0) + if ind < len(a1)/2.0: + cind = int(ceil(ind)) + if ind == cind: + pval = 2.0 * np.sum(a1[:cind+1], axis=0) / total + else: + pval = 2.0 * np.sum(a1[:cind], axis=0) / total + else: + find = int(floor(ind)) + if ind == floor(ind): + pval = 2.0 * np.sum(a1[find:], axis=0) / total + else: + pval = 2.0 * np.sum(a1[find+1:], axis=0) / total + return AnsariResult(AB, min(1.0, pval)) + + # otherwise compute normal approximation + if N % 2: # N odd + mnAB = n * (N+1.0)**2 / 4.0 / N + varAB = n * m * (N+1.0) * (3+N**2) / (48.0 * N**2) + else: + mnAB = n * (N+2.0) / 4.0 + varAB = m * n * (N+2) * (N-2.0) / 48 / (N-1.0) + if repeats: # adjust variance estimates + # compute np.sum(tj * rj**2,axis=0) + fac = np.sum(symrank**2, axis=0) + if N % 2: # N odd + varAB = m * n * (16*N*fac - (N+1)**4) / (16.0 * N**2 * (N-1)) + else: # N even + varAB = m * n * (16*fac - N*(N+2)**2) / (16.0 * N * (N-1)) + + z = (AB - mnAB) / sqrt(varAB) + pval = distributions.norm.sf(abs(z)) * 2.0 + return AnsariResult(AB, pval) + + +BartlettResult = namedtuple('BartlettResult', ('statistic', 'pvalue')) + + +def bartlett(*args): + """ + Perform Bartlett's test for equal variances + + Bartlett's test tests the null hypothesis that all input samples + are from populations with equal variances. For samples + from significantly non-normal populations, Levene's test + `levene` is more robust. + + Parameters + ---------- + sample1, sample2,... : array_like + arrays of sample data. Only 1d arrays are accepted, they may have + different lengths. + + Returns + ------- + statistic : float + The test statistic. + pvalue : float + The p-value of the test. + + See Also + -------- + fligner : A non-parametric test for the equality of k variances + levene : A robust parametric test for equality of k variances + + Notes + ----- + Conover et al. (1981) examine many of the existing parametric and + nonparametric tests by extensive simulations and they conclude that the + tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be + superior in terms of robustness of departures from normality and power + ([3]_). + + References + ---------- + .. [1] https://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm + + .. [2] Snedecor, George W. and Cochran, William G. (1989), Statistical + Methods, Eighth Edition, Iowa State University Press. + + .. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and + Hypothesis Testing based on Quadratic Inference Function. Technical + Report #99-03, Center for Likelihood Studies, Pennsylvania State + University. + + .. [4] Bartlett, M. S. (1937). Properties of Sufficiency and Statistical + Tests. Proceedings of the Royal Society of London. Series A, + Mathematical and Physical Sciences, Vol. 160, No.901, pp. 268-282. + + """ + # Handle empty input and input that is not 1d + for a in args: + if np.asanyarray(a).size == 0: + return BartlettResult(np.nan, np.nan) + if np.asanyarray(a).ndim > 1: + raise ValueError('Samples must be one-dimensional.') + + k = len(args) + if k < 2: + raise ValueError("Must enter at least two input sample vectors.") + Ni = zeros(k) + ssq = zeros(k, 'd') + for j in range(k): + Ni[j] = len(args[j]) + ssq[j] = np.var(args[j], ddof=1) + Ntot = np.sum(Ni, axis=0) + spsq = np.sum((Ni - 1)*ssq, axis=0) / (1.0*(Ntot - k)) + numer = (Ntot*1.0 - k) * log(spsq) - np.sum((Ni - 1.0)*log(ssq), axis=0) + denom = 1.0 + 1.0/(3*(k - 1)) * ((np.sum(1.0/(Ni - 1.0), axis=0)) - + 1.0/(Ntot - k)) + T = numer / denom + pval = distributions.chi2.sf(T, k - 1) # 1 - cdf + + return BartlettResult(T, pval) + + +LeveneResult = namedtuple('LeveneResult', ('statistic', 'pvalue')) + + +def levene(*args, **kwds): + """ + Perform Levene test for equal variances. + + The Levene test tests the null hypothesis that all input samples + are from populations with equal variances. Levene's test is an + alternative to Bartlett's test `bartlett` in the case where + there are significant deviations from normality. + + Parameters + ---------- + sample1, sample2, ... : array_like + The sample data, possibly with different lengths. Only one-dimensional + samples are accepted. + center : {'mean', 'median', 'trimmed'}, optional + Which function of the data to use in the test. The default + is 'median'. + proportiontocut : float, optional + When `center` is 'trimmed', this gives the proportion of data points + to cut from each end. (See `scipy.stats.trim_mean`.) + Default is 0.05. + + Returns + ------- + statistic : float + The test statistic. + pvalue : float + The p-value for the test. + + Notes + ----- + Three variations of Levene's test are possible. The possibilities + and their recommended usages are: + + * 'median' : Recommended for skewed (non-normal) distributions> + * 'mean' : Recommended for symmetric, moderate-tailed distributions. + * 'trimmed' : Recommended for heavy-tailed distributions. + + The test version using the mean was proposed in the original article + of Levene ([2]_) while the median and trimmed mean have been studied by + Brown and Forsythe ([3]_), sometimes also referred to as Brown-Forsythe + test. + + References + ---------- + .. [1] https://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm + .. [2] Levene, H. (1960). In Contributions to Probability and Statistics: + Essays in Honor of Harold Hotelling, I. Olkin et al. eds., + Stanford University Press, pp. 278-292. + .. [3] Brown, M. B. and Forsythe, A. B. (1974), Journal of the American + Statistical Association, 69, 364-367 + + """ + # Handle keyword arguments. + center = 'median' + proportiontocut = 0.05 + for kw, value in kwds.items(): + if kw not in ['center', 'proportiontocut']: + raise TypeError("levene() got an unexpected keyword " + "argument '%s'" % kw) + if kw == 'center': + center = value + else: + proportiontocut = value + + k = len(args) + if k < 2: + raise ValueError("Must enter at least two input sample vectors.") + # check for 1d input + for j in range(k): + if np.asanyarray(args[j]).ndim > 1: + raise ValueError('Samples must be one-dimensional.') + + Ni = zeros(k) + Yci = zeros(k, 'd') + + if center not in ['mean', 'median', 'trimmed']: + raise ValueError("Keyword argument <center> must be 'mean', 'median'" + " or 'trimmed'.") + + if center == 'median': + func = lambda x: np.median(x, axis=0) + elif center == 'mean': + func = lambda x: np.mean(x, axis=0) + else: # center == 'trimmed' + args = tuple(stats.trimboth(np.sort(arg), proportiontocut) + for arg in args) + func = lambda x: np.mean(x, axis=0) + + for j in range(k): + Ni[j] = len(args[j]) + Yci[j] = func(args[j]) + Ntot = np.sum(Ni, axis=0) + + # compute Zij's + Zij = [None] * k + for i in range(k): + Zij[i] = abs(asarray(args[i]) - Yci[i]) + + # compute Zbari + Zbari = zeros(k, 'd') + Zbar = 0.0 + for i in range(k): + Zbari[i] = np.mean(Zij[i], axis=0) + Zbar += Zbari[i] * Ni[i] + + Zbar /= Ntot + numer = (Ntot - k) * np.sum(Ni * (Zbari - Zbar)**2, axis=0) + + # compute denom_variance + dvar = 0.0 + for i in range(k): + dvar += np.sum((Zij[i] - Zbari[i])**2, axis=0) + + denom = (k - 1.0) * dvar + + W = numer / denom + pval = distributions.f.sf(W, k-1, Ntot-k) # 1 - cdf + return LeveneResult(W, pval) + + +def binom_test(x, n=None, p=0.5, alternative='two-sided'): + """ + Perform a test that the probability of success is p. + + This is an exact, two-sided test of the null hypothesis + that the probability of success in a Bernoulli experiment + is `p`. + + Parameters + ---------- + x : integer or array_like + the number of successes, or if x has length 2, it is the + number of successes and the number of failures. + n : integer + the number of trials. This is ignored if x gives both the + number of successes and failures + p : float, optional + The hypothesized probability of success. 0 <= p <= 1. The + default value is p = 0.5 + alternative : {'two-sided', 'greater', 'less'}, optional + Indicates the alternative hypothesis. The default value is + 'two-sided'. + + Returns + ------- + p-value : float + The p-value of the hypothesis test + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Binomial_test + + Examples + -------- + >>> from scipy import stats + + A car manufacturer claims that no more than 10% of their cars are unsafe. + 15 cars are inspected for safety, 3 were found to be unsafe. Test the + manufacturer's claim: + + >>> stats.binom_test(3, n=15, p=0.1, alternative='greater') + 0.18406106910639114 + + The null hypothesis cannot be rejected at the 5% level of significance + because the returned p-value is greater than the critical value of 5%. + + """ + x = atleast_1d(x).astype(np.integer) + if len(x) == 2: + n = x[1] + x[0] + x = x[0] + elif len(x) == 1: + x = x[0] + if n is None or n < x: + raise ValueError("n must be >= x") + n = np.int_(n) + else: + raise ValueError("Incorrect length for x.") + + if (p > 1.0) or (p < 0.0): + raise ValueError("p must be in range [0,1]") + + if alternative not in ('two-sided', 'less', 'greater'): + raise ValueError("alternative not recognized\n" + "should be 'two-sided', 'less' or 'greater'") + + if alternative == 'less': + pval = distributions.binom.cdf(x, n, p) + return pval + + if alternative == 'greater': + pval = distributions.binom.sf(x-1, n, p) + return pval + + # if alternative was neither 'less' nor 'greater', then it's 'two-sided' + d = distributions.binom.pmf(x, n, p) + rerr = 1 + 1e-7 + if x == p * n: + # special case as shortcut, would also be handled by `else` below + pval = 1. + elif x < p * n: + i = np.arange(np.ceil(p * n), n+1) + y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0) + pval = (distributions.binom.cdf(x, n, p) + + distributions.binom.sf(n - y, n, p)) + else: + i = np.arange(np.floor(p*n) + 1) + y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0) + pval = (distributions.binom.cdf(y-1, n, p) + + distributions.binom.sf(x-1, n, p)) + + return min(1.0, pval) + + +def _apply_func(x, g, func): + # g is list of indices into x + # separating x into different groups + # func should be applied over the groups + g = unique(r_[0, g, len(x)]) + output = [] + for k in range(len(g) - 1): + output.append(func(x[g[k]:g[k+1]])) + + return asarray(output) + + +FlignerResult = namedtuple('FlignerResult', ('statistic', 'pvalue')) + + +def fligner(*args, **kwds): + """ + Perform Fligner-Killeen test for equality of variance. + + Fligner's test tests the null hypothesis that all input samples + are from populations with equal variances. Fligner-Killeen's test is + distribution free when populations are identical [2]_. + + Parameters + ---------- + sample1, sample2, ... : array_like + Arrays of sample data. Need not be the same length. + center : {'mean', 'median', 'trimmed'}, optional + Keyword argument controlling which function of the data is used in + computing the test statistic. The default is 'median'. + proportiontocut : float, optional + When `center` is 'trimmed', this gives the proportion of data points + to cut from each end. (See `scipy.stats.trim_mean`.) + Default is 0.05. + + Returns + ------- + statistic : float + The test statistic. + pvalue : float + The p-value for the hypothesis test. + + See Also + -------- + bartlett : A parametric test for equality of k variances in normal samples + levene : A robust parametric test for equality of k variances + + Notes + ----- + As with Levene's test there are three variants of Fligner's test that + differ by the measure of central tendency used in the test. See `levene` + for more information. + + Conover et al. (1981) examine many of the existing parametric and + nonparametric tests by extensive simulations and they conclude that the + tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be + superior in terms of robustness of departures from normality and power [3]_. + + References + ---------- + .. [1] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and + Hypothesis Testing based on Quadratic Inference Function. Technical + Report #99-03, Center for Likelihood Studies, Pennsylvania State + University. + https://cecas.clemson.edu/~cspark/cv/paper/qif/draftqif2.pdf + + .. [2] Fligner, M.A. and Killeen, T.J. (1976). Distribution-free two-sample + tests for scale. 'Journal of the American Statistical Association.' + 71(353), 210-213. + + .. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and + Hypothesis Testing based on Quadratic Inference Function. Technical + Report #99-03, Center for Likelihood Studies, Pennsylvania State + University. + + .. [4] Conover, W. J., Johnson, M. E. and Johnson M. M. (1981). A + comparative study of tests for homogeneity of variances, with + applications to the outer continental shelf biding data. + Technometrics, 23(4), 351-361. + + """ + # Handle empty input + for a in args: + if np.asanyarray(a).size == 0: + return FlignerResult(np.nan, np.nan) + + # Handle keyword arguments. + center = 'median' + proportiontocut = 0.05 + for kw, value in kwds.items(): + if kw not in ['center', 'proportiontocut']: + raise TypeError("fligner() got an unexpected keyword " + "argument '%s'" % kw) + if kw == 'center': + center = value + else: + proportiontocut = value + + k = len(args) + if k < 2: + raise ValueError("Must enter at least two input sample vectors.") + + if center not in ['mean', 'median', 'trimmed']: + raise ValueError("Keyword argument <center> must be 'mean', 'median'" + " or 'trimmed'.") + + if center == 'median': + func = lambda x: np.median(x, axis=0) + elif center == 'mean': + func = lambda x: np.mean(x, axis=0) + else: # center == 'trimmed' + args = tuple(stats.trimboth(arg, proportiontocut) for arg in args) + func = lambda x: np.mean(x, axis=0) + + Ni = asarray([len(args[j]) for j in range(k)]) + Yci = asarray([func(args[j]) for j in range(k)]) + Ntot = np.sum(Ni, axis=0) + # compute Zij's + Zij = [abs(asarray(args[i]) - Yci[i]) for i in range(k)] + allZij = [] + g = [0] + for i in range(k): + allZij.extend(list(Zij[i])) + g.append(len(allZij)) + + ranks = stats.rankdata(allZij) + a = distributions.norm.ppf(ranks / (2*(Ntot + 1.0)) + 0.5) + + # compute Aibar + Aibar = _apply_func(a, g, np.sum) / Ni + anbar = np.mean(a, axis=0) + varsq = np.var(a, axis=0, ddof=1) + Xsq = np.sum(Ni * (asarray(Aibar) - anbar)**2.0, axis=0) / varsq + pval = distributions.chi2.sf(Xsq, k - 1) # 1 - cdf + return FlignerResult(Xsq, pval) + + +def mood(x, y, axis=0): + """ + Perform Mood's test for equal scale parameters. + + Mood's two-sample test for scale parameters is a non-parametric + test for the null hypothesis that two samples are drawn from the + same distribution with the same scale parameter. + + Parameters + ---------- + x, y : array_like + Arrays of sample data. + axis : int, optional + The axis along which the samples are tested. `x` and `y` can be of + different length along `axis`. + If `axis` is None, `x` and `y` are flattened and the test is done on + all values in the flattened arrays. + + Returns + ------- + z : scalar or ndarray + The z-score for the hypothesis test. For 1-D inputs a scalar is + returned. + p-value : scalar ndarray + The p-value for the hypothesis test. + + See Also + -------- + fligner : A non-parametric test for the equality of k variances + ansari : A non-parametric test for the equality of 2 variances + bartlett : A parametric test for equality of k variances in normal samples + levene : A parametric test for equality of k variances + + Notes + ----- + The data are assumed to be drawn from probability distributions ``f(x)`` + and ``f(x/s) / s`` respectively, for some probability density function f. + The null hypothesis is that ``s == 1``. + + For multi-dimensional arrays, if the inputs are of shapes + ``(n0, n1, n2, n3)`` and ``(n0, m1, n2, n3)``, then if ``axis=1``, the + resulting z and p values will have shape ``(n0, n2, n3)``. Note that + ``n1`` and ``m1`` don't have to be equal, but the other dimensions do. + + Examples + -------- + >>> from scipy import stats + >>> np.random.seed(1234) + >>> x2 = np.random.randn(2, 45, 6, 7) + >>> x1 = np.random.randn(2, 30, 6, 7) + >>> z, p = stats.mood(x1, x2, axis=1) + >>> p.shape + (2, 6, 7) + + Find the number of points where the difference in scale is not significant: + + >>> (p > 0.1).sum() + 74 + + Perform the test with different scales: + + >>> x1 = np.random.randn(2, 30) + >>> x2 = np.random.randn(2, 35) * 10.0 + >>> stats.mood(x1, x2, axis=1) + (array([-5.7178125 , -5.25342163]), array([ 1.07904114e-08, 1.49299218e-07])) + + """ + x = np.asarray(x, dtype=float) + y = np.asarray(y, dtype=float) + + if axis is None: + x = x.flatten() + y = y.flatten() + axis = 0 + + # Determine shape of the result arrays + res_shape = tuple([x.shape[ax] for ax in range(len(x.shape)) if ax != axis]) + if not (res_shape == tuple([y.shape[ax] for ax in range(len(y.shape)) if + ax != axis])): + raise ValueError("Dimensions of x and y on all axes except `axis` " + "should match") + + n = x.shape[axis] + m = y.shape[axis] + N = m + n + if N < 3: + raise ValueError("Not enough observations.") + + xy = np.concatenate((x, y), axis=axis) + if axis != 0: + xy = np.rollaxis(xy, axis) + + xy = xy.reshape(xy.shape[0], -1) + + # Generalized to the n-dimensional case by adding the axis argument, and + # using for loops, since rankdata is not vectorized. For improving + # performance consider vectorizing rankdata function. + all_ranks = np.zeros_like(xy) + for j in range(xy.shape[1]): + all_ranks[:, j] = stats.rankdata(xy[:, j]) + + Ri = all_ranks[:n] + M = np.sum((Ri - (N + 1.0) / 2)**2, axis=0) + # Approx stat. + mnM = n * (N * N - 1.0) / 12 + varM = m * n * (N + 1.0) * (N + 2) * (N - 2) / 180 + z = (M - mnM) / sqrt(varM) + + # sf for right tail, cdf for left tail. Factor 2 for two-sidedness + z_pos = z > 0 + pval = np.zeros_like(z) + pval[z_pos] = 2 * distributions.norm.sf(z[z_pos]) + pval[~z_pos] = 2 * distributions.norm.cdf(z[~z_pos]) + + if res_shape == (): + # Return scalars, not 0-D arrays + z = z[0] + pval = pval[0] + else: + z.shape = res_shape + pval.shape = res_shape + + return z, pval + + +WilcoxonResult = namedtuple('WilcoxonResult', ('statistic', 'pvalue')) + + +def wilcoxon(x, y=None, zero_method="wilcox", correction=False): + """ + Calculate the Wilcoxon signed-rank test. + + The Wilcoxon signed-rank test tests the null hypothesis that two + related paired samples come from the same distribution. In particular, + it tests whether the distribution of the differences x - y is symmetric + about zero. It is a non-parametric version of the paired T-test. + + Parameters + ---------- + x : array_like + The first set of measurements. + y : array_like, optional + The second set of measurements. If `y` is not given, then the `x` + array is considered to be the differences between the two sets of + measurements. + zero_method : string, {"pratt", "wilcox", "zsplit"}, optional + "pratt": + Pratt treatment: includes zero-differences in the ranking process + (more conservative) + "wilcox": + Wilcox treatment: discards all zero-differences + "zsplit": + Zero rank split: just like Pratt, but splitting the zero rank + between positive and negative ones + correction : bool, optional + If True, apply continuity correction by adjusting the Wilcoxon rank + statistic by 0.5 towards the mean value when computing the + z-statistic. Default is False. + + Returns + ------- + statistic : float + The sum of the ranks of the differences above or below zero, whichever + is smaller. + pvalue : float + The two-sided p-value for the test. + + Notes + ----- + Because the normal approximation is used for the calculations, the + samples used should be large. A typical rule is to require that + n > 20. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Wilcoxon_signed-rank_test + + """ + + if zero_method not in ["wilcox", "pratt", "zsplit"]: + raise ValueError("Zero method should be either 'wilcox' " + "or 'pratt' or 'zsplit'") + + if y is None: + d = asarray(x) + else: + x, y = map(asarray, (x, y)) + if len(x) != len(y): + raise ValueError('Unequal N in wilcoxon. Aborting.') + d = x - y + + if zero_method == "wilcox": + # Keep all non-zero differences + d = compress(np.not_equal(d, 0), d, axis=-1) + + count = len(d) + if count < 10: + warnings.warn("Warning: sample size too small for normal approximation.") + + r = stats.rankdata(abs(d)) + r_plus = np.sum((d > 0) * r, axis=0) + r_minus = np.sum((d < 0) * r, axis=0) + + if zero_method == "zsplit": + r_zero = np.sum((d == 0) * r, axis=0) + r_plus += r_zero / 2. + r_minus += r_zero / 2. + + T = min(r_plus, r_minus) + mn = count * (count + 1.) * 0.25 + se = count * (count + 1.) * (2. * count + 1.) + + if zero_method == "pratt": + r = r[d != 0] + + replist, repnum = find_repeats(r) + if repnum.size != 0: + # Correction for repeated elements. + se -= 0.5 * (repnum * (repnum * repnum - 1)).sum() + + se = sqrt(se / 24) + correction = 0.5 * int(bool(correction)) * np.sign(T - mn) + z = (T - mn - correction) / se + prob = 2. * distributions.norm.sf(abs(z)) + + return WilcoxonResult(T, prob) + + +def median_test(*args, **kwds): + """ + Mood's median test. + + Test that two or more samples come from populations with the same median. + + Let ``n = len(args)`` be the number of samples. The "grand median" of + all the data is computed, and a contingency table is formed by + classifying the values in each sample as being above or below the grand + median. The contingency table, along with `correction` and `lambda_`, + are passed to `scipy.stats.chi2_contingency` to compute the test statistic + and p-value. + + Parameters + ---------- + sample1, sample2, ... : array_like + The set of samples. There must be at least two samples. + Each sample must be a one-dimensional sequence containing at least + one value. The samples are not required to have the same length. + ties : str, optional + Determines how values equal to the grand median are classified in + the contingency table. The string must be one of:: + + "below": + Values equal to the grand median are counted as "below". + "above": + Values equal to the grand median are counted as "above". + "ignore": + Values equal to the grand median are not counted. + + The default is "below". + correction : bool, optional + If True, *and* there are just two samples, apply Yates' correction + for continuity when computing the test statistic associated with + the contingency table. Default is True. + lambda_ : float or str, optional. + By default, the statistic computed in this test is Pearson's + chi-squared statistic. `lambda_` allows a statistic from the + Cressie-Read power divergence family to be used instead. See + `power_divergence` for details. + Default is 1 (Pearson's chi-squared statistic). + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. 'propagate' returns nan, + 'raise' throws an error, 'omit' performs the calculations ignoring nan + values. Default is 'propagate'. + + Returns + ------- + stat : float + The test statistic. The statistic that is returned is determined by + `lambda_`. The default is Pearson's chi-squared statistic. + p : float + The p-value of the test. + m : float + The grand median. + table : ndarray + The contingency table. The shape of the table is (2, n), where + n is the number of samples. The first row holds the counts of the + values above the grand median, and the second row holds the counts + of the values below the grand median. The table allows further + analysis with, for example, `scipy.stats.chi2_contingency`, or with + `scipy.stats.fisher_exact` if there are two samples, without having + to recompute the table. If ``nan_policy`` is "propagate" and there + are nans in the input, the return value for ``table`` is ``None``. + + See Also + -------- + kruskal : Compute the Kruskal-Wallis H-test for independent samples. + mannwhitneyu : Computes the Mann-Whitney rank test on samples x and y. + + Notes + ----- + .. versionadded:: 0.15.0 + + References + ---------- + .. [1] Mood, A. M., Introduction to the Theory of Statistics. McGraw-Hill + (1950), pp. 394-399. + .. [2] Zar, J. H., Biostatistical Analysis, 5th ed. Prentice Hall (2010). + See Sections 8.12 and 10.15. + + Examples + -------- + A biologist runs an experiment in which there are three groups of plants. + Group 1 has 16 plants, group 2 has 15 plants, and group 3 has 17 plants. + Each plant produces a number of seeds. The seed counts for each group + are:: + + Group 1: 10 14 14 18 20 22 24 25 31 31 32 39 43 43 48 49 + Group 2: 28 30 31 33 34 35 36 40 44 55 57 61 91 92 99 + Group 3: 0 3 9 22 23 25 25 33 34 34 40 45 46 48 62 67 84 + + The following code applies Mood's median test to these samples. + + >>> g1 = [10, 14, 14, 18, 20, 22, 24, 25, 31, 31, 32, 39, 43, 43, 48, 49] + >>> g2 = [28, 30, 31, 33, 34, 35, 36, 40, 44, 55, 57, 61, 91, 92, 99] + >>> g3 = [0, 3, 9, 22, 23, 25, 25, 33, 34, 34, 40, 45, 46, 48, 62, 67, 84] + >>> from scipy.stats import median_test + >>> stat, p, med, tbl = median_test(g1, g2, g3) + + The median is + + >>> med + 34.0 + + and the contingency table is + + >>> tbl + array([[ 5, 10, 7], + [11, 5, 10]]) + + `p` is too large to conclude that the medians are not the same: + + >>> p + 0.12609082774093244 + + The "G-test" can be performed by passing ``lambda_="log-likelihood"`` to + `median_test`. + + >>> g, p, med, tbl = median_test(g1, g2, g3, lambda_="log-likelihood") + >>> p + 0.12224779737117837 + + The median occurs several times in the data, so we'll get a different + result if, for example, ``ties="above"`` is used: + + >>> stat, p, med, tbl = median_test(g1, g2, g3, ties="above") + >>> p + 0.063873276069553273 + + >>> tbl + array([[ 5, 11, 9], + [11, 4, 8]]) + + This example demonstrates that if the data set is not large and there + are values equal to the median, the p-value can be sensitive to the + choice of `ties`. + + """ + ties = kwds.pop('ties', 'below') + correction = kwds.pop('correction', True) + lambda_ = kwds.pop('lambda_', None) + nan_policy = kwds.pop('nan_policy', 'propagate') + + if len(kwds) > 0: + bad_kwd = kwds.keys()[0] + raise TypeError("median_test() got an unexpected keyword " + "argument %r" % bad_kwd) + + if len(args) < 2: + raise ValueError('median_test requires two or more samples.') + + ties_options = ['below', 'above', 'ignore'] + if ties not in ties_options: + raise ValueError("invalid 'ties' option '%s'; 'ties' must be one " + "of: %s" % (ties, str(ties_options)[1:-1])) + + data = [np.asarray(arg) for arg in args] + + # Validate the sizes and shapes of the arguments. + for k, d in enumerate(data): + if d.size == 0: + raise ValueError("Sample %d is empty. All samples must " + "contain at least one value." % (k + 1)) + if d.ndim != 1: + raise ValueError("Sample %d has %d dimensions. All " + "samples must be one-dimensional sequences." % + (k + 1, d.ndim)) + + cdata = np.concatenate(data) + contains_nan, nan_policy = _contains_nan(cdata, nan_policy) + if contains_nan and nan_policy == 'propagate': + return np.nan, np.nan, np.nan, None + + if contains_nan: + grand_median = np.median(cdata[~np.isnan(cdata)]) + else: + grand_median = np.median(cdata) + # When the minimum version of numpy supported by scipy is 1.9.0, + # the above if/else statement can be replaced by the single line: + # grand_median = np.nanmedian(cdata) + + # Create the contingency table. + table = np.zeros((2, len(data)), dtype=np.int64) + for k, sample in enumerate(data): + sample = sample[~np.isnan(sample)] + + nabove = count_nonzero(sample > grand_median) + nbelow = count_nonzero(sample < grand_median) + nequal = sample.size - (nabove + nbelow) + table[0, k] += nabove + table[1, k] += nbelow + if ties == "below": + table[1, k] += nequal + elif ties == "above": + table[0, k] += nequal + + # Check that no row or column of the table is all zero. + # Such a table can not be given to chi2_contingency, because it would have + # a zero in the table of expected frequencies. + rowsums = table.sum(axis=1) + if rowsums[0] == 0: + raise ValueError("All values are below the grand median (%r)." % + grand_median) + if rowsums[1] == 0: + raise ValueError("All values are above the grand median (%r)." % + grand_median) + if ties == "ignore": + # We already checked that each sample has at least one value, but it + # is possible that all those values equal the grand median. If `ties` + # is "ignore", that would result in a column of zeros in `table`. We + # check for that case here. + zero_cols = np.nonzero((table == 0).all(axis=0))[0] + if len(zero_cols) > 0: + msg = ("All values in sample %d are equal to the grand " + "median (%r), so they are ignored, resulting in an " + "empty sample." % (zero_cols[0] + 1, grand_median)) + raise ValueError(msg) + + stat, p, dof, expected = chi2_contingency(table, lambda_=lambda_, + correction=correction) + return stat, p, grand_median, table + + +def _circfuncs_common(samples, high, low): + samples = np.asarray(samples) + if samples.size == 0: + return np.nan, np.nan + + ang = (samples - low)*2.*pi / (high - low) + return samples, ang + + +def circmean(samples, high=2*pi, low=0, axis=None): + """ + Compute the circular mean for samples in a range. + + Parameters + ---------- + samples : array_like + Input array. + high : float or int, optional + High boundary for circular mean range. Default is ``2*pi``. + low : float or int, optional + Low boundary for circular mean range. Default is 0. + axis : int, optional + Axis along which means are computed. The default is to compute + the mean of the flattened array. + + Returns + ------- + circmean : float + Circular mean. + + Examples + -------- + >>> from scipy.stats import circmean + >>> circmean([0.1, 2*np.pi+0.2, 6*np.pi+0.3]) + 0.2 + + >>> from scipy.stats import circmean + >>> circmean([0.2, 1.4, 2.6], high = 1, low = 0) + 0.4 + + """ + samples, ang = _circfuncs_common(samples, high, low) + S = sin(ang).sum(axis=axis) + C = cos(ang).sum(axis=axis) + res = arctan2(S, C) + mask = res < 0 + if mask.ndim > 0: + res[mask] += 2*pi + elif mask: + res += 2*pi + return res*(high - low)/2.0/pi + low + + +def circvar(samples, high=2*pi, low=0, axis=None): + """ + Compute the circular variance for samples assumed to be in a range + + Parameters + ---------- + samples : array_like + Input array. + low : float or int, optional + Low boundary for circular variance range. Default is 0. + high : float or int, optional + High boundary for circular variance range. Default is ``2*pi``. + axis : int, optional + Axis along which variances are computed. The default is to compute + the variance of the flattened array. + + Returns + ------- + circvar : float + Circular variance. + + Notes + ----- + This uses a definition of circular variance that in the limit of small + angles returns a number close to the 'linear' variance. + + Examples + -------- + >>> from scipy.stats import circvar + >>> circvar([0, 2*np.pi/3, 5*np.pi/3]) + 2.19722457734 + + """ + samples, ang = _circfuncs_common(samples, high, low) + S = sin(ang).mean(axis=axis) + C = cos(ang).mean(axis=axis) + R = hypot(S, C) + return ((high - low)/2.0/pi)**2 * 2 * log(1/R) + + +def circstd(samples, high=2*pi, low=0, axis=None): + """ + Compute the circular standard deviation for samples assumed to be in the + range [low to high]. + + Parameters + ---------- + samples : array_like + Input array. + low : float or int, optional + Low boundary for circular standard deviation range. Default is 0. + high : float or int, optional + High boundary for circular standard deviation range. + Default is ``2*pi``. + axis : int, optional + Axis along which standard deviations are computed. The default is + to compute the standard deviation of the flattened array. + + Returns + ------- + circstd : float + Circular standard deviation. + + Notes + ----- + This uses a definition of circular standard deviation that in the limit of + small angles returns a number close to the 'linear' standard deviation. + + Examples + -------- + >>> from scipy.stats import circstd + >>> circstd([0, 0.1*np.pi/2, 0.001*np.pi, 0.03*np.pi/2]) + 0.063564063306 + + """ + samples, ang = _circfuncs_common(samples, high, low) + S = sin(ang).mean(axis=axis) + C = cos(ang).mean(axis=axis) + R = hypot(S, C) + return ((high - low)/2.0/pi) * sqrt(-2*log(R)) diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/morestats.pyc b/project/venv/lib/python2.7/site-packages/scipy/stats/morestats.pyc new file mode 100644 index 0000000..858cfd9 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/stats/morestats.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/mstats.py b/project/venv/lib/python2.7/site-packages/scipy/stats/mstats.py new file mode 100644 index 0000000..f850bff --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/mstats.py @@ -0,0 +1,135 @@ +""" +=================================================================== +Statistical functions for masked arrays (:mod:`scipy.stats.mstats`) +=================================================================== + +.. currentmodule:: scipy.stats.mstats + +This module contains a large number of statistical functions that can +be used with masked arrays. + +Most of these functions are similar to those in `scipy.stats` but might +have small differences in the API or in the algorithm used. Since this +is a relatively new package, some API changes are still possible. + +Summary statistics +================== + +.. autosummary:: + :toctree: generated/ + + describe + gmean + hmean + kurtosis + mode + mquantiles + hdmedian + hdquantiles + hdquantiles_sd + idealfourths + plotting_positions + meppf + moment + skew + tmean + tvar + tmin + tmax + tsem + variation + find_repeats + sem + trimmed_mean + trimmed_mean_ci + trimmed_std + trimmed_var + +Frequency statistics +==================== + +.. autosummary:: + :toctree: generated/ + + scoreatpercentile + +Correlation functions +===================== + +.. autosummary:: + :toctree: generated/ + + f_oneway + pearsonr + spearmanr + pointbiserialr + kendalltau + kendalltau_seasonal + linregress + siegelslopes + theilslopes + sen_seasonal_slopes + +Statistical tests +================= + +.. autosummary:: + :toctree: generated/ + + ttest_1samp + ttest_onesamp + ttest_ind + ttest_rel + chisquare + ks_2samp + ks_twosamp + mannwhitneyu + rankdata + kruskal + kruskalwallis + friedmanchisquare + brunnermunzel + skewtest + kurtosistest + normaltest + +Transformations +=============== + +.. autosummary:: + :toctree: generated/ + + obrientransform + trim + trima + trimmed_stde + trimr + trimtail + trimboth + winsorize + zmap + zscore + +Other +===== + +.. autosummary:: + :toctree: generated/ + + argstoarray + count_tied_groups + msign + compare_medians_ms + median_cihs + mjci + mquantiles_cimj + rsh + +""" +from __future__ import division, print_function, absolute_import + +from .mstats_basic import * +from .mstats_extras import * +# Functions that support masked array input in stats but need to be kept in the +# mstats namespace for backwards compatibility: +from scipy.stats import gmean, hmean, zmap, zscore, chisquare diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/mstats.pyc b/project/venv/lib/python2.7/site-packages/scipy/stats/mstats.pyc new file mode 100644 index 0000000..7a5636b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/stats/mstats.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/mstats_basic.py b/project/venv/lib/python2.7/site-packages/scipy/stats/mstats_basic.py new file mode 100644 index 0000000..8a43a94 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/mstats_basic.py @@ -0,0 +1,2936 @@ +""" +An extension of scipy.stats.stats to support masked arrays + +""" +# Original author (2007): Pierre GF Gerard-Marchant + +# TODO : f_value_wilks_lambda looks botched... what are dfnum & dfden for ? +# TODO : ttest_rel looks botched: what are x1,x2,v1,v2 for ? +# TODO : reimplement ksonesamp + +from __future__ import division, print_function, absolute_import + + +__all__ = ['argstoarray', + 'count_tied_groups', + 'describe', + 'f_oneway', 'find_repeats','friedmanchisquare', + 'kendalltau','kendalltau_seasonal','kruskal','kruskalwallis', + 'ks_twosamp','ks_2samp','kurtosis','kurtosistest', + 'linregress', + 'mannwhitneyu', 'meppf','mode','moment','mquantiles','msign', + 'normaltest', + 'obrientransform', + 'pearsonr','plotting_positions','pointbiserialr', + 'rankdata', + 'scoreatpercentile','sem', + 'sen_seasonal_slopes','skew','skewtest','spearmanr', + 'siegelslopes', 'theilslopes', + 'tmax','tmean','tmin','trim','trimboth', + 'trimtail','trima','trimr','trimmed_mean','trimmed_std', + 'trimmed_stde','trimmed_var','tsem','ttest_1samp','ttest_onesamp', + 'ttest_ind','ttest_rel','tvar', + 'variation', + 'winsorize', + 'brunnermunzel', + ] + +import numpy as np +from numpy import ndarray +import numpy.ma as ma +from numpy.ma import masked, nomask + +from scipy._lib.six import iteritems + +import itertools +import warnings +from collections import namedtuple + +from . import distributions +import scipy.special as special +from ._stats_mstats_common import ( + _find_repeats, + linregress as stats_linregress, + theilslopes as stats_theilslopes, + siegelslopes as stats_siegelslopes + ) + + +genmissingvaldoc = """ + + Notes + ----- + Missing values are considered pair-wise: if a value is missing in x, + the corresponding value in y is masked. + """ + + +def _chk_asarray(a, axis): + # Always returns a masked array, raveled for axis=None + a = ma.asanyarray(a) + if axis is None: + a = ma.ravel(a) + outaxis = 0 + else: + outaxis = axis + return a, outaxis + + +def _chk2_asarray(a, b, axis): + a = ma.asanyarray(a) + b = ma.asanyarray(b) + if axis is None: + a = ma.ravel(a) + b = ma.ravel(b) + outaxis = 0 + else: + outaxis = axis + return a, b, outaxis + + +def _chk_size(a, b): + a = ma.asanyarray(a) + b = ma.asanyarray(b) + (na, nb) = (a.size, b.size) + if na != nb: + raise ValueError("The size of the input array should match!" + " (%s <> %s)" % (na, nb)) + return (a, b, na) + + +def argstoarray(*args): + """ + Constructs a 2D array from a group of sequences. + + Sequences are filled with missing values to match the length of the longest + sequence. + + Parameters + ---------- + args : sequences + Group of sequences. + + Returns + ------- + argstoarray : MaskedArray + A ( `m` x `n` ) masked array, where `m` is the number of arguments and + `n` the length of the longest argument. + + Notes + ----- + `numpy.ma.row_stack` has identical behavior, but is called with a sequence + of sequences. + + """ + if len(args) == 1 and not isinstance(args[0], ndarray): + output = ma.asarray(args[0]) + if output.ndim != 2: + raise ValueError("The input should be 2D") + else: + n = len(args) + m = max([len(k) for k in args]) + output = ma.array(np.empty((n,m), dtype=float), mask=True) + for (k,v) in enumerate(args): + output[k,:len(v)] = v + + output[np.logical_not(np.isfinite(output._data))] = masked + return output + + +def find_repeats(arr): + """Find repeats in arr and return a tuple (repeats, repeat_count). + + The input is cast to float64. Masked values are discarded. + + Parameters + ---------- + arr : sequence + Input array. The array is flattened if it is not 1D. + + Returns + ------- + repeats : ndarray + Array of repeated values. + counts : ndarray + Array of counts. + + """ + # Make sure we get a copy. ma.compressed promises a "new array", but can + # actually return a reference. + compr = np.asarray(ma.compressed(arr), dtype=np.float64) + try: + need_copy = np.may_share_memory(compr, arr) + except AttributeError: + # numpy < 1.8.2 bug: np.may_share_memory([], []) raises, + # while in numpy 1.8.2 and above it just (correctly) returns False. + need_copy = False + if need_copy: + compr = compr.copy() + return _find_repeats(compr) + + +def count_tied_groups(x, use_missing=False): + """ + Counts the number of tied values. + + Parameters + ---------- + x : sequence + Sequence of data on which to counts the ties + use_missing : bool, optional + Whether to consider missing values as tied. + + Returns + ------- + count_tied_groups : dict + Returns a dictionary (nb of ties: nb of groups). + + Examples + -------- + >>> from scipy.stats import mstats + >>> z = [0, 0, 0, 2, 2, 2, 3, 3, 4, 5, 6] + >>> mstats.count_tied_groups(z) + {2: 1, 3: 2} + + In the above example, the ties were 0 (3x), 2 (3x) and 3 (2x). + + >>> z = np.ma.array([0, 0, 1, 2, 2, 2, 3, 3, 4, 5, 6]) + >>> mstats.count_tied_groups(z) + {2: 2, 3: 1} + >>> z[[1,-1]] = np.ma.masked + >>> mstats.count_tied_groups(z, use_missing=True) + {2: 2, 3: 1} + + """ + nmasked = ma.getmask(x).sum() + # We need the copy as find_repeats will overwrite the initial data + data = ma.compressed(x).copy() + (ties, counts) = find_repeats(data) + nties = {} + if len(ties): + nties = dict(zip(np.unique(counts), itertools.repeat(1))) + nties.update(dict(zip(*find_repeats(counts)))) + + if nmasked and use_missing: + try: + nties[nmasked] += 1 + except KeyError: + nties[nmasked] = 1 + + return nties + + +def rankdata(data, axis=None, use_missing=False): + """Returns the rank (also known as order statistics) of each data point + along the given axis. + + If some values are tied, their rank is averaged. + If some values are masked, their rank is set to 0 if use_missing is False, + or set to the average rank of the unmasked values if use_missing is True. + + Parameters + ---------- + data : sequence + Input data. The data is transformed to a masked array + axis : {None,int}, optional + Axis along which to perform the ranking. + If None, the array is first flattened. An exception is raised if + the axis is specified for arrays with a dimension larger than 2 + use_missing : bool, optional + Whether the masked values have a rank of 0 (False) or equal to the + average rank of the unmasked values (True). + + """ + def _rank1d(data, use_missing=False): + n = data.count() + rk = np.empty(data.size, dtype=float) + idx = data.argsort() + rk[idx[:n]] = np.arange(1,n+1) + + if use_missing: + rk[idx[n:]] = (n+1)/2. + else: + rk[idx[n:]] = 0 + + repeats = find_repeats(data.copy()) + for r in repeats[0]: + condition = (data == r).filled(False) + rk[condition] = rk[condition].mean() + return rk + + data = ma.array(data, copy=False) + if axis is None: + if data.ndim > 1: + return _rank1d(data.ravel(), use_missing).reshape(data.shape) + else: + return _rank1d(data, use_missing) + else: + return ma.apply_along_axis(_rank1d,axis,data,use_missing).view(ndarray) + + +ModeResult = namedtuple('ModeResult', ('mode', 'count')) + + +def mode(a, axis=0): + """ + Returns an array of the modal (most common) value in the passed array. + + Parameters + ---------- + a : array_like + n-dimensional array of which to find mode(s). + axis : int or None, optional + Axis along which to operate. Default is 0. If None, compute over + the whole array `a`. + + Returns + ------- + mode : ndarray + Array of modal values. + count : ndarray + Array of counts for each mode. + + Notes + ----- + For more details, see `stats.mode`. + + Examples + -------- + >>> from scipy import stats + >>> from scipy.stats import mstats + >>> m_arr = np.ma.array([1, 1, 0, 0, 0, 0], mask=[0, 0, 1, 1, 1, 0]) + >>> stats.mode(m_arr) + ModeResult(mode=array([0]), count=array([4])) + >>> mstats.mode(m_arr) + ModeResult(mode=array([1.]), count=array([2.])) + + """ + a, axis = _chk_asarray(a, axis) + + def _mode1D(a): + (rep,cnt) = find_repeats(a) + if not cnt.ndim: + return (0, 0) + elif cnt.size: + return (rep[cnt.argmax()], cnt.max()) + else: + return (a.min(), 1) + + if axis is None: + output = _mode1D(ma.ravel(a)) + output = (ma.array(output[0]), ma.array(output[1])) + else: + output = ma.apply_along_axis(_mode1D, axis, a) + newshape = list(a.shape) + newshape[axis] = 1 + slices = [slice(None)] * output.ndim + slices[axis] = 0 + modes = output[tuple(slices)].reshape(newshape) + slices[axis] = 1 + counts = output[tuple(slices)].reshape(newshape) + output = (modes, counts) + + return ModeResult(*output) + + +def _betai(a, b, x): + x = np.asanyarray(x) + x = ma.where(x < 1.0, x, 1.0) # if x > 1 then return 1.0 + return special.betainc(a, b, x) + + +def msign(x): + """Returns the sign of x, or 0 if x is masked.""" + return ma.filled(np.sign(x), 0) + + +def pearsonr(x,y): + """ + Calculates a Pearson correlation coefficient and the p-value for testing + non-correlation. + + The Pearson correlation coefficient measures the linear relationship + between two datasets. Strictly speaking, Pearson's correlation requires + that each dataset be normally distributed. Like other correlation + coefficients, this one varies between -1 and +1 with 0 implying no + correlation. Correlations of -1 or +1 imply an exact linear + relationship. Positive correlations imply that as `x` increases, so does + `y`. Negative correlations imply that as `x` increases, `y` decreases. + + The p-value roughly indicates the probability of an uncorrelated system + producing datasets that have a Pearson correlation at least as extreme + as the one computed from these datasets. The p-values are not entirely + reliable but are probably reasonable for datasets larger than 500 or so. + + Parameters + ---------- + x : 1-D array_like + Input + y : 1-D array_like + Input + + Returns + ------- + pearsonr : float + Pearson's correlation coefficient, 2-tailed p-value. + + References + ---------- + http://www.statsoft.com/textbook/glosp.html#Pearson%20Correlation + + """ + (x, y, n) = _chk_size(x, y) + (x, y) = (x.ravel(), y.ravel()) + # Get the common mask and the total nb of unmasked elements + m = ma.mask_or(ma.getmask(x), ma.getmask(y)) + n -= m.sum() + df = n-2 + if df < 0: + return (masked, masked) + + (mx, my) = (x.mean(), y.mean()) + (xm, ym) = (x-mx, y-my) + + r_num = ma.add.reduce(xm*ym) + r_den = ma.sqrt(ma.dot(xm,xm) * ma.dot(ym,ym)) + r = r_num / r_den + # Presumably, if r > 1, then it is only some small artifact of floating + # point arithmetic. + r = min(r, 1.0) + r = max(r, -1.0) + + if r is masked or abs(r) == 1.0: + prob = 0. + else: + t_squared = (df / ((1.0 - r) * (1.0 + r))) * r * r + prob = _betai(0.5*df, 0.5, df/(df + t_squared)) + + return r, prob + + +SpearmanrResult = namedtuple('SpearmanrResult', ('correlation', 'pvalue')) + + +def spearmanr(x, y=None, use_ties=True, axis=None, nan_policy='propagate'): + """ + Calculates a Spearman rank-order correlation coefficient and the p-value + to test for non-correlation. + + The Spearman correlation is a nonparametric measure of the linear + relationship between two datasets. Unlike the Pearson correlation, the + Spearman correlation does not assume that both datasets are normally + distributed. Like other correlation coefficients, this one varies + between -1 and +1 with 0 implying no correlation. Correlations of -1 or + +1 imply a monotonic relationship. Positive correlations imply that + as `x` increases, so does `y`. Negative correlations imply that as `x` + increases, `y` decreases. + + Missing values are discarded pair-wise: if a value is missing in `x`, the + corresponding value in `y` is masked. + + The p-value roughly indicates the probability of an uncorrelated system + producing datasets that have a Spearman correlation at least as extreme + as the one computed from these datasets. The p-values are not entirely + reliable but are probably reasonable for datasets larger than 500 or so. + + Parameters + ---------- + x, y : 1D or 2D array_like, y is optional + One or two 1-D or 2-D arrays containing multiple variables and + observations. When these are 1-D, each represents a vector of + observations of a single variable. For the behavior in the 2-D case, + see under ``axis``, below. + use_ties : bool, optional + DO NOT USE. Does not do anything, keyword is only left in place for + backwards compatibility reasons. + axis : int or None, optional + If axis=0 (default), then each column represents a variable, with + observations in the rows. If axis=1, the relationship is transposed: + each row represents a variable, while the columns contain observations. + If axis=None, then both arrays will be raveled. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. 'propagate' returns nan, + 'raise' throws an error, 'omit' performs the calculations ignoring nan + values. Default is 'propagate'. + + Returns + ------- + correlation : float + Spearman correlation coefficient + pvalue : float + 2-tailed p-value. + + References + ---------- + [CRCProbStat2000] section 14.7 + + """ + if not use_ties: + raise ValueError("`use_ties=False` is not supported in SciPy >= 1.2.0") + + # Always returns a masked array, raveled if axis=None + x, axisout = _chk_asarray(x, axis) + if y is not None: + # Deal only with 2-D `x` case. + y, _ = _chk_asarray(y, axis) + if axisout == 0: + x = ma.column_stack((x, y)) + else: + x = ma.row_stack((x, y)) + + if axisout == 1: + # To simplify the code that follow (always use `n_obs, n_vars` shape) + x = x.T + + if nan_policy == 'omit': + x = ma.masked_invalid(x) + + def _spearmanr_2cols(x): + # Mask the same observations for all variables, and then drop those + # observations (can't leave them masked, rankdata is weird). + x = ma.mask_rowcols(x, axis=0) + x = x[~x.mask.any(axis=1), :] + + m = ma.getmask(x) + n_obs = x.shape[0] + dof = n_obs - 2 - int(m.sum(axis=0)[0]) + if dof < 0: + raise ValueError("The input must have at least 3 entries!") + + # Gets the ranks and rank differences + x_ranked = rankdata(x, axis=0) + rs = ma.corrcoef(x_ranked, rowvar=False).data + + # rs can have elements equal to 1, so avoid zero division warnings + olderr = np.seterr(divide='ignore') + try: + # clip the small negative values possibly caused by rounding + # errors before taking the square root + t = rs * np.sqrt((dof / ((rs+1.0) * (1.0-rs))).clip(0)) + finally: + np.seterr(**olderr) + + prob = 2 * distributions.t.sf(np.abs(t), dof) + + # For backwards compatibility, return scalars when comparing 2 columns + if rs.shape == (2, 2): + return SpearmanrResult(rs[1, 0], prob[1, 0]) + else: + return SpearmanrResult(rs, prob) + + # Need to do this per pair of variables, otherwise the dropped observations + # in a third column mess up the result for a pair. + n_vars = x.shape[1] + if n_vars == 2: + return _spearmanr_2cols(x) + else: + rs = np.ones((n_vars, n_vars), dtype=float) + prob = np.zeros((n_vars, n_vars), dtype=float) + for var1 in range(n_vars - 1): + for var2 in range(var1+1, n_vars): + result = _spearmanr_2cols(x[:, [var1, var2]]) + rs[var1, var2] = result.correlation + rs[var2, var1] = result.correlation + prob[var1, var2] = result.pvalue + prob[var2, var1] = result.pvalue + + return SpearmanrResult(rs, prob) + + +KendalltauResult = namedtuple('KendalltauResult', ('correlation', 'pvalue')) + + +def kendalltau(x, y, use_ties=True, use_missing=False, method='auto'): + """ + Computes Kendall's rank correlation tau on two variables *x* and *y*. + + Parameters + ---------- + x : sequence + First data list (for example, time). + y : sequence + Second data list. + use_ties : {True, False}, optional + Whether ties correction should be performed. + use_missing : {False, True}, optional + Whether missing data should be allocated a rank of 0 (False) or the + average rank (True) + method: {'auto', 'asymptotic', 'exact'}, optional + Defines which method is used to calculate the p-value [1]_. + 'asymptotic' uses a normal approximation valid for large samples. + 'exact' computes the exact p-value, but can only be used if no ties + are present. 'auto' is the default and selects the appropriate + method based on a trade-off between speed and accuracy. + + Returns + ------- + correlation : float + Kendall tau + pvalue : float + Approximate 2-side p-value. + + References + ---------- + .. [1] Maurice G. Kendall, "Rank Correlation Methods" (4th Edition), + Charles Griffin & Co., 1970. + + """ + (x, y, n) = _chk_size(x, y) + (x, y) = (x.flatten(), y.flatten()) + m = ma.mask_or(ma.getmask(x), ma.getmask(y)) + if m is not nomask: + x = ma.array(x, mask=m, copy=True) + y = ma.array(y, mask=m, copy=True) + # need int() here, otherwise numpy defaults to 32 bit + # integer on all Windows architectures, causing overflow. + # int() will keep it infinite precision. + n -= int(m.sum()) + + if n < 2: + return KendalltauResult(np.nan, np.nan) + + rx = ma.masked_equal(rankdata(x, use_missing=use_missing), 0) + ry = ma.masked_equal(rankdata(y, use_missing=use_missing), 0) + idx = rx.argsort() + (rx, ry) = (rx[idx], ry[idx]) + C = np.sum([((ry[i+1:] > ry[i]) * (rx[i+1:] > rx[i])).filled(0).sum() + for i in range(len(ry)-1)], dtype=float) + D = np.sum([((ry[i+1:] < ry[i])*(rx[i+1:] > rx[i])).filled(0).sum() + for i in range(len(ry)-1)], dtype=float) + xties = count_tied_groups(x) + yties = count_tied_groups(y) + if use_ties: + corr_x = np.sum([v*k*(k-1) for (k,v) in iteritems(xties)], dtype=float) + corr_y = np.sum([v*k*(k-1) for (k,v) in iteritems(yties)], dtype=float) + denom = ma.sqrt((n*(n-1)-corr_x)/2. * (n*(n-1)-corr_y)/2.) + else: + denom = n*(n-1)/2. + tau = (C-D) / denom + + if method == 'exact' and (xties or yties): + raise ValueError("Ties found, exact method cannot be used.") + + if method == 'auto': + if (not xties and not yties) and (n <= 33 or min(C, n*(n-1)/2.0-C) <= 1): + method = 'exact' + else: + method = 'asymptotic' + + if not xties and not yties and method == 'exact': + # Exact p-value, see Maurice G. Kendall, "Rank Correlation Methods" (4th Edition), Charles Griffin & Co., 1970. + c = int(min(C, (n*(n-1))/2-C)) + if n <= 0: + raise ValueError + elif c < 0 or 2*c > n*(n-1): + raise ValueError + elif n == 1: + prob = 1.0 + elif n == 2: + prob = 1.0 + elif c == 0: + prob = 2.0/np.math.factorial(n) + elif c == 1: + prob = 2.0/np.math.factorial(n-1) + else: + old = [0.0]*(c+1) + new = [0.0]*(c+1) + new[0] = 1.0 + new[1] = 1.0 + for j in range(3,n+1): + old = new[:] + for k in range(1,min(j,c+1)): + new[k] += new[k-1] + for k in range(j,c+1): + new[k] += new[k-1] - old[k-j] + prob = 2.0*sum(new)/np.math.factorial(n) + elif method == 'asymptotic': + var_s = n*(n-1)*(2*n+5) + if use_ties: + var_s -= np.sum([v*k*(k-1)*(2*k+5)*1. for (k,v) in iteritems(xties)]) + var_s -= np.sum([v*k*(k-1)*(2*k+5)*1. for (k,v) in iteritems(yties)]) + v1 = np.sum([v*k*(k-1) for (k, v) in iteritems(xties)], dtype=float) *\ + np.sum([v*k*(k-1) for (k, v) in iteritems(yties)], dtype=float) + v1 /= 2.*n*(n-1) + if n > 2: + v2 = np.sum([v*k*(k-1)*(k-2) for (k,v) in iteritems(xties)], + dtype=float) * \ + np.sum([v*k*(k-1)*(k-2) for (k,v) in iteritems(yties)], + dtype=float) + v2 /= 9.*n*(n-1)*(n-2) + else: + v2 = 0 + else: + v1 = v2 = 0 + + var_s /= 18. + var_s += (v1 + v2) + z = (C-D)/np.sqrt(var_s) + prob = special.erfc(abs(z)/np.sqrt(2)) + else: + raise ValueError("Unknown method "+str(method)+" specified, please use auto, exact or asymptotic.") + + return KendalltauResult(tau, prob) + + +def kendalltau_seasonal(x): + """ + Computes a multivariate Kendall's rank correlation tau, for seasonal data. + + Parameters + ---------- + x : 2-D ndarray + Array of seasonal data, with seasons in columns. + + """ + x = ma.array(x, subok=True, copy=False, ndmin=2) + (n,m) = x.shape + n_p = x.count(0) + + S_szn = sum(msign(x[i:]-x[i]).sum(0) for i in range(n)) + S_tot = S_szn.sum() + + n_tot = x.count() + ties = count_tied_groups(x.compressed()) + corr_ties = sum(v*k*(k-1) for (k,v) in iteritems(ties)) + denom_tot = ma.sqrt(1.*n_tot*(n_tot-1)*(n_tot*(n_tot-1)-corr_ties))/2. + + R = rankdata(x, axis=0, use_missing=True) + K = ma.empty((m,m), dtype=int) + covmat = ma.empty((m,m), dtype=float) + denom_szn = ma.empty(m, dtype=float) + for j in range(m): + ties_j = count_tied_groups(x[:,j].compressed()) + corr_j = sum(v*k*(k-1) for (k,v) in iteritems(ties_j)) + cmb = n_p[j]*(n_p[j]-1) + for k in range(j,m,1): + K[j,k] = sum(msign((x[i:,j]-x[i,j])*(x[i:,k]-x[i,k])).sum() + for i in range(n)) + covmat[j,k] = (K[j,k] + 4*(R[:,j]*R[:,k]).sum() - + n*(n_p[j]+1)*(n_p[k]+1))/3. + K[k,j] = K[j,k] + covmat[k,j] = covmat[j,k] + + denom_szn[j] = ma.sqrt(cmb*(cmb-corr_j)) / 2. + + var_szn = covmat.diagonal() + + z_szn = msign(S_szn) * (abs(S_szn)-1) / ma.sqrt(var_szn) + z_tot_ind = msign(S_tot) * (abs(S_tot)-1) / ma.sqrt(var_szn.sum()) + z_tot_dep = msign(S_tot) * (abs(S_tot)-1) / ma.sqrt(covmat.sum()) + + prob_szn = special.erfc(abs(z_szn)/np.sqrt(2)) + prob_tot_ind = special.erfc(abs(z_tot_ind)/np.sqrt(2)) + prob_tot_dep = special.erfc(abs(z_tot_dep)/np.sqrt(2)) + + chi2_tot = (z_szn*z_szn).sum() + chi2_trd = m * z_szn.mean()**2 + output = {'seasonal tau': S_szn/denom_szn, + 'global tau': S_tot/denom_tot, + 'global tau (alt)': S_tot/denom_szn.sum(), + 'seasonal p-value': prob_szn, + 'global p-value (indep)': prob_tot_ind, + 'global p-value (dep)': prob_tot_dep, + 'chi2 total': chi2_tot, + 'chi2 trend': chi2_trd, + } + return output + + +PointbiserialrResult = namedtuple('PointbiserialrResult', ('correlation', + 'pvalue')) + + +def pointbiserialr(x, y): + """Calculates a point biserial correlation coefficient and its p-value. + + Parameters + ---------- + x : array_like of bools + Input array. + y : array_like + Input array. + + Returns + ------- + correlation : float + R value + pvalue : float + 2-tailed p-value + + Notes + ----- + Missing values are considered pair-wise: if a value is missing in x, + the corresponding value in y is masked. + + For more details on `pointbiserialr`, see `stats.pointbiserialr`. + + """ + x = ma.fix_invalid(x, copy=True).astype(bool) + y = ma.fix_invalid(y, copy=True).astype(float) + # Get rid of the missing data + m = ma.mask_or(ma.getmask(x), ma.getmask(y)) + if m is not nomask: + unmask = np.logical_not(m) + x = x[unmask] + y = y[unmask] + + n = len(x) + # phat is the fraction of x values that are True + phat = x.sum() / float(n) + y0 = y[~x] # y-values where x is False + y1 = y[x] # y-values where x is True + y0m = y0.mean() + y1m = y1.mean() + + rpb = (y1m - y0m)*np.sqrt(phat * (1-phat)) / y.std() + + df = n-2 + t = rpb*ma.sqrt(df/(1.0-rpb**2)) + prob = _betai(0.5*df, 0.5, df/(df+t*t)) + + return PointbiserialrResult(rpb, prob) + + +LinregressResult = namedtuple('LinregressResult', ('slope', 'intercept', + 'rvalue', 'pvalue', + 'stderr')) + + +def linregress(x, y=None): + """ + Linear regression calculation + + Note that the non-masked version is used, and that this docstring is + replaced by the non-masked docstring + some info on missing data. + + """ + if y is None: + x = ma.array(x) + if x.shape[0] == 2: + x, y = x + elif x.shape[1] == 2: + x, y = x.T + else: + msg = ("If only `x` is given as input, it has to be of shape " + "(2, N) or (N, 2), provided shape was %s" % str(x.shape)) + raise ValueError(msg) + else: + x = ma.array(x) + y = ma.array(y) + + x = x.flatten() + y = y.flatten() + + m = ma.mask_or(ma.getmask(x), ma.getmask(y), shrink=False) + if m is not nomask: + x = ma.array(x, mask=m) + y = ma.array(y, mask=m) + if np.any(~m): + slope, intercept, r, prob, sterrest = stats_linregress(x.data[~m], + y.data[~m]) + else: + # All data is masked + return None, None, None, None, None + else: + slope, intercept, r, prob, sterrest = stats_linregress(x.data, y.data) + + return LinregressResult(slope, intercept, r, prob, sterrest) + + +if stats_linregress.__doc__: + linregress.__doc__ = stats_linregress.__doc__ + genmissingvaldoc + + +def theilslopes(y, x=None, alpha=0.95): + r""" + Computes the Theil-Sen estimator for a set of points (x, y). + + `theilslopes` implements a method for robust linear regression. It + computes the slope as the median of all slopes between paired values. + + Parameters + ---------- + y : array_like + Dependent variable. + x : array_like or None, optional + Independent variable. If None, use ``arange(len(y))`` instead. + alpha : float, optional + Confidence degree between 0 and 1. Default is 95% confidence. + Note that `alpha` is symmetric around 0.5, i.e. both 0.1 and 0.9 are + interpreted as "find the 90% confidence interval". + + Returns + ------- + medslope : float + Theil slope. + medintercept : float + Intercept of the Theil line, as ``median(y) - medslope*median(x)``. + lo_slope : float + Lower bound of the confidence interval on `medslope`. + up_slope : float + Upper bound of the confidence interval on `medslope`. + + See also + -------- + siegelslopes : a similar technique with repeated medians + + + Notes + ----- + For more details on `theilslopes`, see `stats.theilslopes`. + + """ + y = ma.asarray(y).flatten() + if x is None: + x = ma.arange(len(y), dtype=float) + else: + x = ma.asarray(x).flatten() + if len(x) != len(y): + raise ValueError("Incompatible lengths ! (%s<>%s)" % (len(y),len(x))) + + m = ma.mask_or(ma.getmask(x), ma.getmask(y)) + y._mask = x._mask = m + # Disregard any masked elements of x or y + y = y.compressed() + x = x.compressed().astype(float) + # We now have unmasked arrays so can use `stats.theilslopes` + return stats_theilslopes(y, x, alpha=alpha) + + +def siegelslopes(y, x=None, method="hierarchical"): + r""" + Computes the Siegel estimator for a set of points (x, y). + + `siegelslopes` implements a method for robust linear regression + using repeated medians to fit a line to the points (x, y). + The method is robust to outliers with an asymptotic breakdown point + of 50%. + + Parameters + ---------- + y : array_like + Dependent variable. + x : array_like or None, optional + Independent variable. If None, use ``arange(len(y))`` instead. + method : {'hierarchical', 'separate'} + If 'hierarchical', estimate the intercept using the estimated + slope ``medslope`` (default option). + If 'separate', estimate the intercept independent of the estimated + slope. See Notes for details. + + Returns + ------- + medslope : float + Estimate of the slope of the regression line. + medintercept : float + Estimate of the intercept of the regression line. + + See also + -------- + theilslopes : a similar technique without repeated medians + + Notes + ----- + For more details on `siegelslopes`, see `scipy.stats.siegelslopes`. + + """ + y = ma.asarray(y).ravel() + if x is None: + x = ma.arange(len(y), dtype=float) + else: + x = ma.asarray(x).ravel() + if len(x) != len(y): + raise ValueError("Incompatible lengths ! (%s<>%s)" % (len(y), len(x))) + + m = ma.mask_or(ma.getmask(x), ma.getmask(y)) + y._mask = x._mask = m + # Disregard any masked elements of x or y + y = y.compressed() + x = x.compressed().astype(float) + # We now have unmasked arrays so can use `stats.siegelslopes` + return stats_siegelslopes(y, x) + + +def sen_seasonal_slopes(x): + x = ma.array(x, subok=True, copy=False, ndmin=2) + (n,_) = x.shape + # Get list of slopes per season + szn_slopes = ma.vstack([(x[i+1:]-x[i])/np.arange(1,n-i)[:,None] + for i in range(n)]) + szn_medslopes = ma.median(szn_slopes, axis=0) + medslope = ma.median(szn_slopes, axis=None) + return szn_medslopes, medslope + + +Ttest_1sampResult = namedtuple('Ttest_1sampResult', ('statistic', 'pvalue')) + + +def ttest_1samp(a, popmean, axis=0): + """ + Calculates the T-test for the mean of ONE group of scores. + + Parameters + ---------- + a : array_like + sample observation + popmean : float or array_like + expected value in null hypothesis, if array_like than it must have the + same shape as `a` excluding the axis dimension + axis : int or None, optional + Axis along which to compute test. If None, compute over the whole + array `a`. + + Returns + ------- + statistic : float or array + t-statistic + pvalue : float or array + two-tailed p-value + + Notes + ----- + For more details on `ttest_1samp`, see `stats.ttest_1samp`. + + """ + a, axis = _chk_asarray(a, axis) + if a.size == 0: + return (np.nan, np.nan) + + x = a.mean(axis=axis) + v = a.var(axis=axis, ddof=1) + n = a.count(axis=axis) + # force df to be an array for masked division not to throw a warning + df = ma.asanyarray(n - 1.0) + svar = ((n - 1.0) * v) / df + with np.errstate(divide='ignore', invalid='ignore'): + t = (x - popmean) / ma.sqrt(svar / n) + prob = special.betainc(0.5*df, 0.5, df/(df + t*t)) + + return Ttest_1sampResult(t, prob) + + +ttest_onesamp = ttest_1samp + + +Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue')) + + +def ttest_ind(a, b, axis=0, equal_var=True): + """ + Calculates the T-test for the means of TWO INDEPENDENT samples of scores. + + Parameters + ---------- + a, b : array_like + The arrays must have the same shape, except in the dimension + corresponding to `axis` (the first, by default). + axis : int or None, optional + Axis along which to compute test. If None, compute over the whole + arrays, `a`, and `b`. + equal_var : bool, optional + If True, perform a standard independent 2 sample test that assumes equal + population variances. + If False, perform Welch's t-test, which does not assume equal population + variance. + + .. versionadded:: 0.17.0 + + Returns + ------- + statistic : float or array + The calculated t-statistic. + pvalue : float or array + The two-tailed p-value. + + Notes + ----- + For more details on `ttest_ind`, see `stats.ttest_ind`. + + """ + a, b, axis = _chk2_asarray(a, b, axis) + + if a.size == 0 or b.size == 0: + return Ttest_indResult(np.nan, np.nan) + + (x1, x2) = (a.mean(axis), b.mean(axis)) + (v1, v2) = (a.var(axis=axis, ddof=1), b.var(axis=axis, ddof=1)) + (n1, n2) = (a.count(axis), b.count(axis)) + + if equal_var: + # force df to be an array for masked division not to throw a warning + df = ma.asanyarray(n1 + n2 - 2.0) + svar = ((n1-1)*v1+(n2-1)*v2) / df + denom = ma.sqrt(svar*(1.0/n1 + 1.0/n2)) # n-D computation here! + else: + vn1 = v1/n1 + vn2 = v2/n2 + with np.errstate(divide='ignore', invalid='ignore'): + df = (vn1 + vn2)**2 / (vn1**2 / (n1 - 1) + vn2**2 / (n2 - 1)) + + # If df is undefined, variances are zero. + # It doesn't matter what df is as long as it is not NaN. + df = np.where(np.isnan(df), 1, df) + denom = ma.sqrt(vn1 + vn2) + + with np.errstate(divide='ignore', invalid='ignore'): + t = (x1-x2) / denom + probs = special.betainc(0.5*df, 0.5, df/(df + t*t)).reshape(t.shape) + + return Ttest_indResult(t, probs.squeeze()) + + +Ttest_relResult = namedtuple('Ttest_relResult', ('statistic', 'pvalue')) + + +def ttest_rel(a, b, axis=0): + """ + Calculates the T-test on TWO RELATED samples of scores, a and b. + + Parameters + ---------- + a, b : array_like + The arrays must have the same shape. + axis : int or None, optional + Axis along which to compute test. If None, compute over the whole + arrays, `a`, and `b`. + + Returns + ------- + statistic : float or array + t-statistic + pvalue : float or array + two-tailed p-value + + Notes + ----- + For more details on `ttest_rel`, see `stats.ttest_rel`. + + """ + a, b, axis = _chk2_asarray(a, b, axis) + if len(a) != len(b): + raise ValueError('unequal length arrays') + + if a.size == 0 or b.size == 0: + return Ttest_relResult(np.nan, np.nan) + + n = a.count(axis) + df = ma.asanyarray(n-1.0) + d = (a-b).astype('d') + dm = d.mean(axis) + v = d.var(axis=axis, ddof=1) + denom = ma.sqrt(v / n) + with np.errstate(divide='ignore', invalid='ignore'): + t = dm / denom + + probs = special.betainc(0.5*df, 0.5, df/(df + t*t)).reshape(t.shape).squeeze() + + return Ttest_relResult(t, probs) + + +MannwhitneyuResult = namedtuple('MannwhitneyuResult', ('statistic', + 'pvalue')) + + +def mannwhitneyu(x,y, use_continuity=True): + """ + Computes the Mann-Whitney statistic + + Missing values in `x` and/or `y` are discarded. + + Parameters + ---------- + x : sequence + Input + y : sequence + Input + use_continuity : {True, False}, optional + Whether a continuity correction (1/2.) should be taken into account. + + Returns + ------- + statistic : float + The Mann-Whitney statistics + pvalue : float + Approximate p-value assuming a normal distribution. + + """ + x = ma.asarray(x).compressed().view(ndarray) + y = ma.asarray(y).compressed().view(ndarray) + ranks = rankdata(np.concatenate([x,y])) + (nx, ny) = (len(x), len(y)) + nt = nx + ny + U = ranks[:nx].sum() - nx*(nx+1)/2. + U = max(U, nx*ny - U) + u = nx*ny - U + + mu = (nx*ny)/2. + sigsq = (nt**3 - nt)/12. + ties = count_tied_groups(ranks) + sigsq -= sum(v*(k**3-k) for (k,v) in iteritems(ties))/12. + sigsq *= nx*ny/float(nt*(nt-1)) + + if use_continuity: + z = (U - 1/2. - mu) / ma.sqrt(sigsq) + else: + z = (U - mu) / ma.sqrt(sigsq) + + prob = special.erfc(abs(z)/np.sqrt(2)) + return MannwhitneyuResult(u, prob) + + +KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue')) + + +def kruskal(*args): + """ + Compute the Kruskal-Wallis H-test for independent samples + + Parameters + ---------- + sample1, sample2, ... : array_like + Two or more arrays with the sample measurements can be given as + arguments. + + Returns + ------- + statistic : float + The Kruskal-Wallis H statistic, corrected for ties + pvalue : float + The p-value for the test using the assumption that H has a chi + square distribution + + Notes + ----- + For more details on `kruskal`, see `stats.kruskal`. + + Examples + -------- + >>> from scipy.stats.mstats import kruskal + + Random samples from three different brands of batteries were tested + to see how long the charge lasted. Results were as follows: + + >>> a = [6.3, 5.4, 5.7, 5.2, 5.0] + >>> b = [6.9, 7.0, 6.1, 7.9] + >>> c = [7.2, 6.9, 6.1, 6.5] + + Test the hypotesis that the distribution functions for all of the brands' + durations are identical. Use 5% level of significance. + + >>> kruskal(a, b, c) + KruskalResult(statistic=7.113812154696133, pvalue=0.028526948491942164) + + The null hypothesis is rejected at the 5% level of significance + because the returned p-value is less than the critical value of 5%. + + """ + output = argstoarray(*args) + ranks = ma.masked_equal(rankdata(output, use_missing=False), 0) + sumrk = ranks.sum(-1) + ngrp = ranks.count(-1) + ntot = ranks.count() + H = 12./(ntot*(ntot+1)) * (sumrk**2/ngrp).sum() - 3*(ntot+1) + # Tie correction + ties = count_tied_groups(ranks) + T = 1. - sum(v*(k**3-k) for (k,v) in iteritems(ties))/float(ntot**3-ntot) + if T == 0: + raise ValueError('All numbers are identical in kruskal') + + H /= T + df = len(output) - 1 + prob = distributions.chi2.sf(H, df) + return KruskalResult(H, prob) + + +kruskalwallis = kruskal + + +def ks_twosamp(data1, data2, alternative="two-sided"): + """ + Computes the Kolmogorov-Smirnov test on two samples. + + Missing values are discarded. + + Parameters + ---------- + data1 : array_like + First data set + data2 : array_like + Second data set + alternative : {'two-sided', 'less', 'greater'}, optional + Indicates the alternative hypothesis. Default is 'two-sided'. + + Returns + ------- + d : float + Value of the Kolmogorov Smirnov test + p : float + Corresponding p-value. + + """ + (data1, data2) = (ma.asarray(data1), ma.asarray(data2)) + (n1, n2) = (data1.count(), data2.count()) + n = (n1*n2/float(n1+n2)) + mix = ma.concatenate((data1.compressed(), data2.compressed())) + mixsort = mix.argsort(kind='mergesort') + csum = np.where(mixsort < n1, 1./n1, -1./n2).cumsum() + # Check for ties + if len(np.unique(mix)) < (n1+n2): + csum = csum[np.r_[np.diff(mix[mixsort]).nonzero()[0],-1]] + + alternative = str(alternative).lower()[0] + if alternative == 't': + d = ma.abs(csum).max() + prob = special.kolmogorov(np.sqrt(n)*d) + elif alternative == 'l': + d = -csum.min() + prob = np.exp(-2*n*d**2) + elif alternative == 'g': + d = csum.max() + prob = np.exp(-2*n*d**2) + else: + raise ValueError("Invalid value for the alternative hypothesis: " + "should be in 'two-sided', 'less' or 'greater'") + + return (d, prob) + + +ks_2samp = ks_twosamp + + +def trima(a, limits=None, inclusive=(True,True)): + """ + Trims an array by masking the data outside some given limits. + + Returns a masked version of the input array. + + Parameters + ---------- + a : array_like + Input array. + limits : {None, tuple}, optional + Tuple of (lower limit, upper limit) in absolute values. + Values of the input array lower (greater) than the lower (upper) limit + will be masked. A limit is None indicates an open interval. + inclusive : (bool, bool) tuple, optional + Tuple of (lower flag, upper flag), indicating whether values exactly + equal to the lower (upper) limit are allowed. + + """ + a = ma.asarray(a) + a.unshare_mask() + if (limits is None) or (limits == (None, None)): + return a + + (lower_lim, upper_lim) = limits + (lower_in, upper_in) = inclusive + condition = False + if lower_lim is not None: + if lower_in: + condition |= (a < lower_lim) + else: + condition |= (a <= lower_lim) + + if upper_lim is not None: + if upper_in: + condition |= (a > upper_lim) + else: + condition |= (a >= upper_lim) + + a[condition.filled(True)] = masked + return a + + +def trimr(a, limits=None, inclusive=(True, True), axis=None): + """ + Trims an array by masking some proportion of the data on each end. + Returns a masked version of the input array. + + Parameters + ---------- + a : sequence + Input array. + limits : {None, tuple}, optional + Tuple of the percentages to cut on each side of the array, with respect + to the number of unmasked data, as floats between 0. and 1. + Noting n the number of unmasked data before trimming, the + (n*limits[0])th smallest data and the (n*limits[1])th largest data are + masked, and the total number of unmasked data after trimming is + n*(1.-sum(limits)). The value of one limit can be set to None to + indicate an open interval. + inclusive : {(True,True) tuple}, optional + Tuple of flags indicating whether the number of data being masked on + the left (right) end should be truncated (True) or rounded (False) to + integers. + axis : {None,int}, optional + Axis along which to trim. If None, the whole array is trimmed, but its + shape is maintained. + + """ + def _trimr1D(a, low_limit, up_limit, low_inclusive, up_inclusive): + n = a.count() + idx = a.argsort() + if low_limit: + if low_inclusive: + lowidx = int(low_limit*n) + else: + lowidx = np.round(low_limit*n) + a[idx[:lowidx]] = masked + if up_limit is not None: + if up_inclusive: + upidx = n - int(n*up_limit) + else: + upidx = n - np.round(n*up_limit) + a[idx[upidx:]] = masked + return a + + a = ma.asarray(a) + a.unshare_mask() + if limits is None: + return a + + # Check the limits + (lolim, uplim) = limits + errmsg = "The proportion to cut from the %s should be between 0. and 1." + if lolim is not None: + if lolim > 1. or lolim < 0: + raise ValueError(errmsg % 'beginning' + "(got %s)" % lolim) + if uplim is not None: + if uplim > 1. or uplim < 0: + raise ValueError(errmsg % 'end' + "(got %s)" % uplim) + + (loinc, upinc) = inclusive + + if axis is None: + shp = a.shape + return _trimr1D(a.ravel(),lolim,uplim,loinc,upinc).reshape(shp) + else: + return ma.apply_along_axis(_trimr1D, axis, a, lolim,uplim,loinc,upinc) + + +trimdoc = """ + Parameters + ---------- + a : sequence + Input array + limits : {None, tuple}, optional + If `relative` is False, tuple (lower limit, upper limit) in absolute values. + Values of the input array lower (greater) than the lower (upper) limit are + masked. + + If `relative` is True, tuple (lower percentage, upper percentage) to cut + on each side of the array, with respect to the number of unmasked data. + + Noting n the number of unmasked data before trimming, the (n*limits[0])th + smallest data and the (n*limits[1])th largest data are masked, and the + total number of unmasked data after trimming is n*(1.-sum(limits)) + In each case, the value of one limit can be set to None to indicate an + open interval. + + If limits is None, no trimming is performed + inclusive : {(bool, bool) tuple}, optional + If `relative` is False, tuple indicating whether values exactly equal + to the absolute limits are allowed. + If `relative` is True, tuple indicating whether the number of data + being masked on each side should be rounded (True) or truncated + (False). + relative : bool, optional + Whether to consider the limits as absolute values (False) or proportions + to cut (True). + axis : int, optional + Axis along which to trim. +""" + + +def trim(a, limits=None, inclusive=(True,True), relative=False, axis=None): + """ + Trims an array by masking the data outside some given limits. + + Returns a masked version of the input array. + + %s + + Examples + -------- + >>> from scipy.stats.mstats import trim + >>> z = [ 1, 2, 3, 4, 5, 6, 7, 8, 9,10] + >>> print(trim(z,(3,8))) + [-- -- 3 4 5 6 7 8 -- --] + >>> print(trim(z,(0.1,0.2),relative=True)) + [-- 2 3 4 5 6 7 8 -- --] + + """ + if relative: + return trimr(a, limits=limits, inclusive=inclusive, axis=axis) + else: + return trima(a, limits=limits, inclusive=inclusive) + + +if trim.__doc__ is not None: + trim.__doc__ = trim.__doc__ % trimdoc + + +def trimboth(data, proportiontocut=0.2, inclusive=(True,True), axis=None): + """ + Trims the smallest and largest data values. + + Trims the `data` by masking the ``int(proportiontocut * n)`` smallest and + ``int(proportiontocut * n)`` largest values of data along the given axis, + where n is the number of unmasked values before trimming. + + Parameters + ---------- + data : ndarray + Data to trim. + proportiontocut : float, optional + Percentage of trimming (as a float between 0 and 1). + If n is the number of unmasked values before trimming, the number of + values after trimming is ``(1 - 2*proportiontocut) * n``. + Default is 0.2. + inclusive : {(bool, bool) tuple}, optional + Tuple indicating whether the number of data being masked on each side + should be rounded (True) or truncated (False). + axis : int, optional + Axis along which to perform the trimming. + If None, the input array is first flattened. + + """ + return trimr(data, limits=(proportiontocut,proportiontocut), + inclusive=inclusive, axis=axis) + + +def trimtail(data, proportiontocut=0.2, tail='left', inclusive=(True,True), + axis=None): + """ + Trims the data by masking values from one tail. + + Parameters + ---------- + data : array_like + Data to trim. + proportiontocut : float, optional + Percentage of trimming. If n is the number of unmasked values + before trimming, the number of values after trimming is + ``(1 - proportiontocut) * n``. Default is 0.2. + tail : {'left','right'}, optional + If 'left' the `proportiontocut` lowest values will be masked. + If 'right' the `proportiontocut` highest values will be masked. + Default is 'left'. + inclusive : {(bool, bool) tuple}, optional + Tuple indicating whether the number of data being masked on each side + should be rounded (True) or truncated (False). Default is + (True, True). + axis : int, optional + Axis along which to perform the trimming. + If None, the input array is first flattened. Default is None. + + Returns + ------- + trimtail : ndarray + Returned array of same shape as `data` with masked tail values. + + """ + tail = str(tail).lower()[0] + if tail == 'l': + limits = (proportiontocut,None) + elif tail == 'r': + limits = (None, proportiontocut) + else: + raise TypeError("The tail argument should be in ('left','right')") + + return trimr(data, limits=limits, axis=axis, inclusive=inclusive) + + +trim1 = trimtail + + +def trimmed_mean(a, limits=(0.1,0.1), inclusive=(1,1), relative=True, + axis=None): + """Returns the trimmed mean of the data along the given axis. + + %s + + """ % trimdoc + if (not isinstance(limits,tuple)) and isinstance(limits,float): + limits = (limits, limits) + if relative: + return trimr(a,limits=limits,inclusive=inclusive,axis=axis).mean(axis=axis) + else: + return trima(a,limits=limits,inclusive=inclusive).mean(axis=axis) + + +def trimmed_var(a, limits=(0.1,0.1), inclusive=(1,1), relative=True, + axis=None, ddof=0): + """Returns the trimmed variance of the data along the given axis. + + %s + ddof : {0,integer}, optional + Means Delta Degrees of Freedom. The denominator used during computations + is (n-ddof). DDOF=0 corresponds to a biased estimate, DDOF=1 to an un- + biased estimate of the variance. + + """ % trimdoc + if (not isinstance(limits,tuple)) and isinstance(limits,float): + limits = (limits, limits) + if relative: + out = trimr(a,limits=limits, inclusive=inclusive,axis=axis) + else: + out = trima(a,limits=limits,inclusive=inclusive) + + return out.var(axis=axis, ddof=ddof) + + +def trimmed_std(a, limits=(0.1,0.1), inclusive=(1,1), relative=True, + axis=None, ddof=0): + """Returns the trimmed standard deviation of the data along the given axis. + + %s + ddof : {0,integer}, optional + Means Delta Degrees of Freedom. The denominator used during computations + is (n-ddof). DDOF=0 corresponds to a biased estimate, DDOF=1 to an un- + biased estimate of the variance. + + """ % trimdoc + if (not isinstance(limits,tuple)) and isinstance(limits,float): + limits = (limits, limits) + if relative: + out = trimr(a,limits=limits,inclusive=inclusive,axis=axis) + else: + out = trima(a,limits=limits,inclusive=inclusive) + return out.std(axis=axis,ddof=ddof) + + +def trimmed_stde(a, limits=(0.1,0.1), inclusive=(1,1), axis=None): + """ + Returns the standard error of the trimmed mean along the given axis. + + Parameters + ---------- + a : sequence + Input array + limits : {(0.1,0.1), tuple of float}, optional + tuple (lower percentage, upper percentage) to cut on each side of the + array, with respect to the number of unmasked data. + + If n is the number of unmasked data before trimming, the values + smaller than ``n * limits[0]`` and the values larger than + ``n * `limits[1]`` are masked, and the total number of unmasked + data after trimming is ``n * (1.-sum(limits))``. In each case, + the value of one limit can be set to None to indicate an open interval. + If `limits` is None, no trimming is performed. + inclusive : {(bool, bool) tuple} optional + Tuple indicating whether the number of data being masked on each side + should be rounded (True) or truncated (False). + axis : int, optional + Axis along which to trim. + + Returns + ------- + trimmed_stde : scalar or ndarray + + """ + def _trimmed_stde_1D(a, low_limit, up_limit, low_inclusive, up_inclusive): + "Returns the standard error of the trimmed mean for a 1D input data." + n = a.count() + idx = a.argsort() + if low_limit: + if low_inclusive: + lowidx = int(low_limit*n) + else: + lowidx = np.round(low_limit*n) + a[idx[:lowidx]] = masked + if up_limit is not None: + if up_inclusive: + upidx = n - int(n*up_limit) + else: + upidx = n - np.round(n*up_limit) + a[idx[upidx:]] = masked + a[idx[:lowidx]] = a[idx[lowidx]] + a[idx[upidx:]] = a[idx[upidx-1]] + winstd = a.std(ddof=1) + return winstd / ((1-low_limit-up_limit)*np.sqrt(len(a))) + + a = ma.array(a, copy=True, subok=True) + a.unshare_mask() + if limits is None: + return a.std(axis=axis,ddof=1)/ma.sqrt(a.count(axis)) + if (not isinstance(limits,tuple)) and isinstance(limits,float): + limits = (limits, limits) + + # Check the limits + (lolim, uplim) = limits + errmsg = "The proportion to cut from the %s should be between 0. and 1." + if lolim is not None: + if lolim > 1. or lolim < 0: + raise ValueError(errmsg % 'beginning' + "(got %s)" % lolim) + if uplim is not None: + if uplim > 1. or uplim < 0: + raise ValueError(errmsg % 'end' + "(got %s)" % uplim) + + (loinc, upinc) = inclusive + if (axis is None): + return _trimmed_stde_1D(a.ravel(),lolim,uplim,loinc,upinc) + else: + if a.ndim > 2: + raise ValueError("Array 'a' must be at most two dimensional, but got a.ndim = %d" % a.ndim) + return ma.apply_along_axis(_trimmed_stde_1D, axis, a, + lolim,uplim,loinc,upinc) + + +def _mask_to_limits(a, limits, inclusive): + """Mask an array for values outside of given limits. + + This is primarily a utility function. + + Parameters + ---------- + a : array + limits : (float or None, float or None) + A tuple consisting of the (lower limit, upper limit). Values in the + input array less than the lower limit or greater than the upper limit + will be masked out. None implies no limit. + inclusive : (bool, bool) + A tuple consisting of the (lower flag, upper flag). These flags + determine whether values exactly equal to lower or upper are allowed. + + Returns + ------- + A MaskedArray. + + Raises + ------ + A ValueError if there are no values within the given limits. + """ + lower_limit, upper_limit = limits + lower_include, upper_include = inclusive + am = ma.MaskedArray(a) + if lower_limit is not None: + if lower_include: + am = ma.masked_less(am, lower_limit) + else: + am = ma.masked_less_equal(am, lower_limit) + + if upper_limit is not None: + if upper_include: + am = ma.masked_greater(am, upper_limit) + else: + am = ma.masked_greater_equal(am, upper_limit) + + if am.count() == 0: + raise ValueError("No array values within given limits") + + return am + + +def tmean(a, limits=None, inclusive=(True, True), axis=None): + """ + Compute the trimmed mean. + + Parameters + ---------- + a : array_like + Array of values. + limits : None or (lower limit, upper limit), optional + Values in the input array less than the lower limit or greater than the + upper limit will be ignored. When limits is None (default), then all + values are used. Either of the limit values in the tuple can also be + None representing a half-open interval. + inclusive : (bool, bool), optional + A tuple consisting of the (lower flag, upper flag). These flags + determine whether values exactly equal to the lower or upper limits + are included. The default value is (True, True). + axis : int or None, optional + Axis along which to operate. If None, compute over the + whole array. Default is None. + + Returns + ------- + tmean : float + + Notes + ----- + For more details on `tmean`, see `stats.tmean`. + + Examples + -------- + >>> from scipy.stats import mstats + >>> a = np.array([[6, 8, 3, 0], + ... [3, 9, 1, 2], + ... [8, 7, 8, 2], + ... [5, 6, 0, 2], + ... [4, 5, 5, 2]]) + ... + ... + >>> mstats.tmean(a, (2,5)) + 3.3 + >>> mstats.tmean(a, (2,5), axis=0) + masked_array(data=[4.0, 5.0, 4.0, 2.0], + mask=[False, False, False, False], + fill_value=1e+20) + + """ + return trima(a, limits=limits, inclusive=inclusive).mean(axis=axis) + + +def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1): + """ + Compute the trimmed variance + + This function computes the sample variance of an array of values, + while ignoring values which are outside of given `limits`. + + Parameters + ---------- + a : array_like + Array of values. + limits : None or (lower limit, upper limit), optional + Values in the input array less than the lower limit or greater than the + upper limit will be ignored. When limits is None, then all values are + used. Either of the limit values in the tuple can also be None + representing a half-open interval. The default value is None. + inclusive : (bool, bool), optional + A tuple consisting of the (lower flag, upper flag). These flags + determine whether values exactly equal to the lower or upper limits + are included. The default value is (True, True). + axis : int or None, optional + Axis along which to operate. If None, compute over the + whole array. Default is zero. + ddof : int, optional + Delta degrees of freedom. Default is 1. + + Returns + ------- + tvar : float + Trimmed variance. + + Notes + ----- + For more details on `tvar`, see `stats.tvar`. + + """ + a = a.astype(float).ravel() + if limits is None: + n = (~a.mask).sum() # todo: better way to do that? + return np.ma.var(a) * n/(n-1.) + am = _mask_to_limits(a, limits=limits, inclusive=inclusive) + + return np.ma.var(am, axis=axis, ddof=ddof) + + +def tmin(a, lowerlimit=None, axis=0, inclusive=True): + """ + Compute the trimmed minimum + + Parameters + ---------- + a : array_like + array of values + lowerlimit : None or float, optional + Values in the input array less than the given limit will be ignored. + When lowerlimit is None, then all values are used. The default value + is None. + axis : int or None, optional + Axis along which to operate. Default is 0. If None, compute over the + whole array `a`. + inclusive : {True, False}, optional + This flag determines whether values exactly equal to the lower limit + are included. The default value is True. + + Returns + ------- + tmin : float, int or ndarray + + Notes + ----- + For more details on `tmin`, see `stats.tmin`. + + Examples + -------- + >>> from scipy.stats import mstats + >>> a = np.array([[6, 8, 3, 0], + ... [3, 2, 1, 2], + ... [8, 1, 8, 2], + ... [5, 3, 0, 2], + ... [4, 7, 5, 2]]) + ... + >>> mstats.tmin(a, 5) + masked_array(data=[5, 7, 5, --], + mask=[False, False, False, True], + fill_value=999999) + + """ + a, axis = _chk_asarray(a, axis) + am = trima(a, (lowerlimit, None), (inclusive, False)) + return ma.minimum.reduce(am, axis) + + +def tmax(a, upperlimit=None, axis=0, inclusive=True): + """ + Compute the trimmed maximum + + This function computes the maximum value of an array along a given axis, + while ignoring values larger than a specified upper limit. + + Parameters + ---------- + a : array_like + array of values + upperlimit : None or float, optional + Values in the input array greater than the given limit will be ignored. + When upperlimit is None, then all values are used. The default value + is None. + axis : int or None, optional + Axis along which to operate. Default is 0. If None, compute over the + whole array `a`. + inclusive : {True, False}, optional + This flag determines whether values exactly equal to the upper limit + are included. The default value is True. + + Returns + ------- + tmax : float, int or ndarray + + Notes + ----- + For more details on `tmax`, see `stats.tmax`. + + Examples + -------- + >>> from scipy.stats import mstats + >>> a = np.array([[6, 8, 3, 0], + ... [3, 9, 1, 2], + ... [8, 7, 8, 2], + ... [5, 6, 0, 2], + ... [4, 5, 5, 2]]) + ... + ... + >>> mstats.tmax(a, 4) + masked_array(data=[4, --, 3, 2], + mask=[False, True, False, False], + fill_value=999999) + + """ + a, axis = _chk_asarray(a, axis) + am = trima(a, (None, upperlimit), (False, inclusive)) + return ma.maximum.reduce(am, axis) + + +def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1): + """ + Compute the trimmed standard error of the mean. + + This function finds the standard error of the mean for given + values, ignoring values outside the given `limits`. + + Parameters + ---------- + a : array_like + array of values + limits : None or (lower limit, upper limit), optional + Values in the input array less than the lower limit or greater than the + upper limit will be ignored. When limits is None, then all values are + used. Either of the limit values in the tuple can also be None + representing a half-open interval. The default value is None. + inclusive : (bool, bool), optional + A tuple consisting of the (lower flag, upper flag). These flags + determine whether values exactly equal to the lower or upper limits + are included. The default value is (True, True). + axis : int or None, optional + Axis along which to operate. If None, compute over the + whole array. Default is zero. + ddof : int, optional + Delta degrees of freedom. Default is 1. + + Returns + ------- + tsem : float + + Notes + ----- + For more details on `tsem`, see `stats.tsem`. + + """ + a = ma.asarray(a).ravel() + if limits is None: + n = float(a.count()) + return a.std(axis=axis, ddof=ddof)/ma.sqrt(n) + + am = trima(a.ravel(), limits, inclusive) + sd = np.sqrt(am.var(axis=axis, ddof=ddof)) + return sd / np.sqrt(am.count()) + + +def winsorize(a, limits=None, inclusive=(True, True), inplace=False, + axis=None): + """Returns a Winsorized version of the input array. + + The (limits[0])th lowest values are set to the (limits[0])th percentile, + and the (limits[1])th highest values are set to the (1 - limits[1])th + percentile. + Masked values are skipped. + + + Parameters + ---------- + a : sequence + Input array. + limits : {None, tuple of float}, optional + Tuple of the percentages to cut on each side of the array, with respect + to the number of unmasked data, as floats between 0. and 1. + Noting n the number of unmasked data before trimming, the + (n*limits[0])th smallest data and the (n*limits[1])th largest data are + masked, and the total number of unmasked data after trimming + is n*(1.-sum(limits)) The value of one limit can be set to None to + indicate an open interval. + inclusive : {(True, True) tuple}, optional + Tuple indicating whether the number of data being masked on each side + should be truncated (True) or rounded (False). + inplace : {False, True}, optional + Whether to winsorize in place (True) or to use a copy (False) + axis : {None, int}, optional + Axis along which to trim. If None, the whole array is trimmed, but its + shape is maintained. + + Notes + ----- + This function is applied to reduce the effect of possibly spurious outliers + by limiting the extreme values. + + """ + def _winsorize1D(a, low_limit, up_limit, low_include, up_include): + n = a.count() + idx = a.argsort() + if low_limit: + if low_include: + lowidx = int(low_limit * n) + else: + lowidx = np.round(low_limit * n).astype(int) + a[idx[:lowidx]] = a[idx[lowidx]] + if up_limit is not None: + if up_include: + upidx = n - int(n * up_limit) + else: + upidx = n - np.round(n * up_limit).astype(int) + a[idx[upidx:]] = a[idx[upidx - 1]] + return a + + # We are going to modify a: better make a copy + a = ma.array(a, copy=np.logical_not(inplace)) + + if limits is None: + return a + if (not isinstance(limits, tuple)) and isinstance(limits, float): + limits = (limits, limits) + + # Check the limits + (lolim, uplim) = limits + errmsg = "The proportion to cut from the %s should be between 0. and 1." + if lolim is not None: + if lolim > 1. or lolim < 0: + raise ValueError(errmsg % 'beginning' + "(got %s)" % lolim) + if uplim is not None: + if uplim > 1. or uplim < 0: + raise ValueError(errmsg % 'end' + "(got %s)" % uplim) + + (loinc, upinc) = inclusive + + if axis is None: + shp = a.shape + return _winsorize1D(a.ravel(), lolim, uplim, loinc, upinc).reshape(shp) + else: + return ma.apply_along_axis(_winsorize1D, axis, a, lolim, uplim, loinc, + upinc) + + +def moment(a, moment=1, axis=0): + """ + Calculates the nth moment about the mean for a sample. + + Parameters + ---------- + a : array_like + data + moment : int, optional + order of central moment that is returned + axis : int or None, optional + Axis along which the central moment is computed. Default is 0. + If None, compute over the whole array `a`. + + Returns + ------- + n-th central moment : ndarray or float + The appropriate moment along the given axis or over all values if axis + is None. The denominator for the moment calculation is the number of + observations, no degrees of freedom correction is done. + + Notes + ----- + For more details about `moment`, see `stats.moment`. + + """ + a, axis = _chk_asarray(a, axis) + if moment == 1: + # By definition the first moment about the mean is 0. + shape = list(a.shape) + del shape[axis] + if shape: + # return an actual array of the appropriate shape + return np.zeros(shape, dtype=float) + else: + # the input was 1D, so return a scalar instead of a rank-0 array + return np.float64(0.0) + else: + # Exponentiation by squares: form exponent sequence + n_list = [moment] + current_n = moment + while current_n > 2: + if current_n % 2: + current_n = (current_n-1)/2 + else: + current_n /= 2 + n_list.append(current_n) + + # Starting point for exponentiation by squares + a_zero_mean = a - ma.expand_dims(a.mean(axis), axis) + if n_list[-1] == 1: + s = a_zero_mean.copy() + else: + s = a_zero_mean**2 + + # Perform multiplications + for n in n_list[-2::-1]: + s = s**2 + if n % 2: + s *= a_zero_mean + return s.mean(axis) + + +def variation(a, axis=0): + """ + Computes the coefficient of variation, the ratio of the biased standard + deviation to the mean. + + Parameters + ---------- + a : array_like + Input array. + axis : int or None, optional + Axis along which to calculate the coefficient of variation. Default + is 0. If None, compute over the whole array `a`. + + Returns + ------- + variation : ndarray + The calculated variation along the requested axis. + + Notes + ----- + For more details about `variation`, see `stats.variation`. + + """ + a, axis = _chk_asarray(a, axis) + return a.std(axis)/a.mean(axis) + + +def skew(a, axis=0, bias=True): + """ + Computes the skewness of a data set. + + Parameters + ---------- + a : ndarray + data + axis : int or None, optional + Axis along which skewness is calculated. Default is 0. + If None, compute over the whole array `a`. + bias : bool, optional + If False, then the calculations are corrected for statistical bias. + + Returns + ------- + skewness : ndarray + The skewness of values along an axis, returning 0 where all values are + equal. + + Notes + ----- + For more details about `skew`, see `stats.skew`. + + """ + a, axis = _chk_asarray(a,axis) + n = a.count(axis) + m2 = moment(a, 2, axis) + m3 = moment(a, 3, axis) + olderr = np.seterr(all='ignore') + try: + vals = ma.where(m2 == 0, 0, m3 / m2**1.5) + finally: + np.seterr(**olderr) + + if not bias: + can_correct = (n > 2) & (m2 > 0) + if can_correct.any(): + m2 = np.extract(can_correct, m2) + m3 = np.extract(can_correct, m3) + nval = ma.sqrt((n-1.0)*n)/(n-2.0)*m3/m2**1.5 + np.place(vals, can_correct, nval) + return vals + + +def kurtosis(a, axis=0, fisher=True, bias=True): + """ + Computes the kurtosis (Fisher or Pearson) of a dataset. + + Kurtosis is the fourth central moment divided by the square of the + variance. If Fisher's definition is used, then 3.0 is subtracted from + the result to give 0.0 for a normal distribution. + + If bias is False then the kurtosis is calculated using k statistics to + eliminate bias coming from biased moment estimators + + Use `kurtosistest` to see if result is close enough to normal. + + Parameters + ---------- + a : array + data for which the kurtosis is calculated + axis : int or None, optional + Axis along which the kurtosis is calculated. Default is 0. + If None, compute over the whole array `a`. + fisher : bool, optional + If True, Fisher's definition is used (normal ==> 0.0). If False, + Pearson's definition is used (normal ==> 3.0). + bias : bool, optional + If False, then the calculations are corrected for statistical bias. + + Returns + ------- + kurtosis : array + The kurtosis of values along an axis. If all values are equal, + return -3 for Fisher's definition and 0 for Pearson's definition. + + Notes + ----- + For more details about `kurtosis`, see `stats.kurtosis`. + + """ + a, axis = _chk_asarray(a, axis) + m2 = moment(a, 2, axis) + m4 = moment(a, 4, axis) + olderr = np.seterr(all='ignore') + try: + vals = ma.where(m2 == 0, 0, m4 / m2**2.0) + finally: + np.seterr(**olderr) + + if not bias: + n = a.count(axis) + can_correct = (n > 3) & (m2 is not ma.masked and m2 > 0) + if can_correct.any(): + n = np.extract(can_correct, n) + m2 = np.extract(can_correct, m2) + m4 = np.extract(can_correct, m4) + nval = 1.0/(n-2)/(n-3)*((n*n-1.0)*m4/m2**2.0-3*(n-1)**2.0) + np.place(vals, can_correct, nval+3.0) + if fisher: + return vals - 3 + else: + return vals + + +DescribeResult = namedtuple('DescribeResult', ('nobs', 'minmax', 'mean', + 'variance', 'skewness', + 'kurtosis')) + + +def describe(a, axis=0, ddof=0, bias=True): + """ + Computes several descriptive statistics of the passed array. + + Parameters + ---------- + a : array_like + Data array + axis : int or None, optional + Axis along which to calculate statistics. Default 0. If None, + compute over the whole array `a`. + ddof : int, optional + degree of freedom (default 0); note that default ddof is different + from the same routine in stats.describe + bias : bool, optional + If False, then the skewness and kurtosis calculations are corrected for + statistical bias. + + Returns + ------- + nobs : int + (size of the data (discarding missing values) + + minmax : (int, int) + min, max + + mean : float + arithmetic mean + + variance : float + unbiased variance + + skewness : float + biased skewness + + kurtosis : float + biased kurtosis + + Examples + -------- + >>> from scipy.stats.mstats import describe + >>> ma = np.ma.array(range(6), mask=[0, 0, 0, 1, 1, 1]) + >>> describe(ma) + DescribeResult(nobs=3, minmax=(masked_array(data=0, + mask=False, + fill_value=999999), masked_array(data=2, + mask=False, + fill_value=999999)), mean=1.0, variance=0.6666666666666666, + skewness=masked_array(data=0., mask=False, fill_value=1e+20), + kurtosis=-1.5) + + """ + a, axis = _chk_asarray(a, axis) + n = a.count(axis) + mm = (ma.minimum.reduce(a), ma.maximum.reduce(a)) + m = a.mean(axis) + v = a.var(axis, ddof=ddof) + sk = skew(a, axis, bias=bias) + kurt = kurtosis(a, axis, bias=bias) + + return DescribeResult(n, mm, m, v, sk, kurt) + + +def stde_median(data, axis=None): + """Returns the McKean-Schrader estimate of the standard error of the sample + median along the given axis. masked values are discarded. + + Parameters + ---------- + data : ndarray + Data to trim. + axis : {None,int}, optional + Axis along which to perform the trimming. + If None, the input array is first flattened. + + """ + def _stdemed_1D(data): + data = np.sort(data.compressed()) + n = len(data) + z = 2.5758293035489004 + k = int(np.round((n+1)/2. - z * np.sqrt(n/4.),0)) + return ((data[n-k] - data[k-1])/(2.*z)) + + data = ma.array(data, copy=False, subok=True) + if (axis is None): + return _stdemed_1D(data) + else: + if data.ndim > 2: + raise ValueError("Array 'data' must be at most two dimensional, " + "but got data.ndim = %d" % data.ndim) + return ma.apply_along_axis(_stdemed_1D, axis, data) + + +SkewtestResult = namedtuple('SkewtestResult', ('statistic', 'pvalue')) + + +def skewtest(a, axis=0): + """ + Tests whether the skew is different from the normal distribution. + + Parameters + ---------- + a : array + The data to be tested + axis : int or None, optional + Axis along which statistics are calculated. Default is 0. + If None, compute over the whole array `a`. + + Returns + ------- + statistic : float + The computed z-score for this test. + pvalue : float + a 2-sided p-value for the hypothesis test + + Notes + ----- + For more details about `skewtest`, see `stats.skewtest`. + + """ + a, axis = _chk_asarray(a, axis) + if axis is None: + a = a.ravel() + axis = 0 + b2 = skew(a,axis) + n = a.count(axis) + if np.min(n) < 8: + raise ValueError( + "skewtest is not valid with less than 8 samples; %i samples" + " were given." % np.min(n)) + + y = b2 * ma.sqrt(((n+1)*(n+3)) / (6.0*(n-2))) + beta2 = (3.0*(n*n+27*n-70)*(n+1)*(n+3)) / ((n-2.0)*(n+5)*(n+7)*(n+9)) + W2 = -1 + ma.sqrt(2*(beta2-1)) + delta = 1/ma.sqrt(0.5*ma.log(W2)) + alpha = ma.sqrt(2.0/(W2-1)) + y = ma.where(y == 0, 1, y) + Z = delta*ma.log(y/alpha + ma.sqrt((y/alpha)**2+1)) + + return SkewtestResult(Z, 2 * distributions.norm.sf(np.abs(Z))) + + +KurtosistestResult = namedtuple('KurtosistestResult', ('statistic', 'pvalue')) + + +def kurtosistest(a, axis=0): + """ + Tests whether a dataset has normal kurtosis + + Parameters + ---------- + a : array + array of the sample data + axis : int or None, optional + Axis along which to compute test. Default is 0. If None, + compute over the whole array `a`. + + Returns + ------- + statistic : float + The computed z-score for this test. + pvalue : float + The 2-sided p-value for the hypothesis test + + Notes + ----- + For more details about `kurtosistest`, see `stats.kurtosistest`. + + """ + a, axis = _chk_asarray(a, axis) + n = a.count(axis=axis) + if np.min(n) < 5: + raise ValueError( + "kurtosistest requires at least 5 observations; %i observations" + " were given." % np.min(n)) + if np.min(n) < 20: + warnings.warn( + "kurtosistest only valid for n>=20 ... continuing anyway, n=%i" % + np.min(n)) + + b2 = kurtosis(a, axis, fisher=False) + E = 3.0*(n-1) / (n+1) + varb2 = 24.0*n*(n-2.)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5)) + x = (b2-E)/ma.sqrt(varb2) + sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) / + (n*(n-2)*(n-3))) + A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2))) + term1 = 1 - 2./(9.0*A) + denom = 1 + x*ma.sqrt(2/(A-4.0)) + if np.ma.isMaskedArray(denom): + # For multi-dimensional array input + denom[denom == 0.0] = masked + elif denom == 0.0: + denom = masked + + term2 = np.ma.where(denom > 0, ma.power((1-2.0/A)/denom, 1/3.0), + -ma.power(-(1-2.0/A)/denom, 1/3.0)) + Z = (term1 - term2) / np.sqrt(2/(9.0*A)) + + return KurtosistestResult(Z, 2 * distributions.norm.sf(np.abs(Z))) + + +NormaltestResult = namedtuple('NormaltestResult', ('statistic', 'pvalue')) + + +def normaltest(a, axis=0): + """ + Tests whether a sample differs from a normal distribution. + + Parameters + ---------- + a : array_like + The array containing the data to be tested. + axis : int or None, optional + Axis along which to compute test. Default is 0. If None, + compute over the whole array `a`. + + Returns + ------- + statistic : float or array + ``s^2 + k^2``, where ``s`` is the z-score returned by `skewtest` and + ``k`` is the z-score returned by `kurtosistest`. + pvalue : float or array + A 2-sided chi squared probability for the hypothesis test. + + Notes + ----- + For more details about `normaltest`, see `stats.normaltest`. + + """ + a, axis = _chk_asarray(a, axis) + s, _ = skewtest(a, axis) + k, _ = kurtosistest(a, axis) + k2 = s*s + k*k + + return NormaltestResult(k2, distributions.chi2.sf(k2, 2)) + + +def mquantiles(a, prob=list([.25,.5,.75]), alphap=.4, betap=.4, axis=None, + limit=()): + """ + Computes empirical quantiles for a data array. + + Samples quantile are defined by ``Q(p) = (1-gamma)*x[j] + gamma*x[j+1]``, + where ``x[j]`` is the j-th order statistic, and gamma is a function of + ``j = floor(n*p + m)``, ``m = alphap + p*(1 - alphap - betap)`` and + ``g = n*p + m - j``. + + Reinterpreting the above equations to compare to **R** lead to the + equation: ``p(k) = (k - alphap)/(n + 1 - alphap - betap)`` + + Typical values of (alphap,betap) are: + - (0,1) : ``p(k) = k/n`` : linear interpolation of cdf + (**R** type 4) + - (.5,.5) : ``p(k) = (k - 1/2.)/n`` : piecewise linear function + (**R** type 5) + - (0,0) : ``p(k) = k/(n+1)`` : + (**R** type 6) + - (1,1) : ``p(k) = (k-1)/(n-1)``: p(k) = mode[F(x[k])]. + (**R** type 7, **R** default) + - (1/3,1/3): ``p(k) = (k-1/3)/(n+1/3)``: Then p(k) ~ median[F(x[k])]. + The resulting quantile estimates are approximately median-unbiased + regardless of the distribution of x. + (**R** type 8) + - (3/8,3/8): ``p(k) = (k-3/8)/(n+1/4)``: Blom. + The resulting quantile estimates are approximately unbiased + if x is normally distributed + (**R** type 9) + - (.4,.4) : approximately quantile unbiased (Cunnane) + - (.35,.35): APL, used with PWM + + Parameters + ---------- + a : array_like + Input data, as a sequence or array of dimension at most 2. + prob : array_like, optional + List of quantiles to compute. + alphap : float, optional + Plotting positions parameter, default is 0.4. + betap : float, optional + Plotting positions parameter, default is 0.4. + axis : int, optional + Axis along which to perform the trimming. + If None (default), the input array is first flattened. + limit : tuple, optional + Tuple of (lower, upper) values. + Values of `a` outside this open interval are ignored. + + Returns + ------- + mquantiles : MaskedArray + An array containing the calculated quantiles. + + Notes + ----- + This formulation is very similar to **R** except the calculation of + ``m`` from ``alphap`` and ``betap``, where in **R** ``m`` is defined + with each type. + + References + ---------- + .. [1] *R* statistical software: http://www.r-project.org/ + .. [2] *R* ``quantile`` function: + http://stat.ethz.ch/R-manual/R-devel/library/stats/html/quantile.html + + Examples + -------- + >>> from scipy.stats.mstats import mquantiles + >>> a = np.array([6., 47., 49., 15., 42., 41., 7., 39., 43., 40., 36.]) + >>> mquantiles(a) + array([ 19.2, 40. , 42.8]) + + Using a 2D array, specifying axis and limit. + + >>> data = np.array([[ 6., 7., 1.], + ... [ 47., 15., 2.], + ... [ 49., 36., 3.], + ... [ 15., 39., 4.], + ... [ 42., 40., -999.], + ... [ 41., 41., -999.], + ... [ 7., -999., -999.], + ... [ 39., -999., -999.], + ... [ 43., -999., -999.], + ... [ 40., -999., -999.], + ... [ 36., -999., -999.]]) + >>> print(mquantiles(data, axis=0, limit=(0, 50))) + [[19.2 14.6 1.45] + [40. 37.5 2.5 ] + [42.8 40.05 3.55]] + + >>> data[:, 2] = -999. + >>> print(mquantiles(data, axis=0, limit=(0, 50))) + [[19.200000000000003 14.6 --] + [40.0 37.5 --] + [42.800000000000004 40.05 --]] + + """ + def _quantiles1D(data,m,p): + x = np.sort(data.compressed()) + n = len(x) + if n == 0: + return ma.array(np.empty(len(p), dtype=float), mask=True) + elif n == 1: + return ma.array(np.resize(x, p.shape), mask=nomask) + aleph = (n*p + m) + k = np.floor(aleph.clip(1, n-1)).astype(int) + gamma = (aleph-k).clip(0,1) + return (1.-gamma)*x[(k-1).tolist()] + gamma*x[k.tolist()] + + data = ma.array(a, copy=False) + if data.ndim > 2: + raise TypeError("Array should be 2D at most !") + + if limit: + condition = (limit[0] < data) & (data < limit[1]) + data[~condition.filled(True)] = masked + + p = np.array(prob, copy=False, ndmin=1) + m = alphap + p*(1.-alphap-betap) + # Computes quantiles along axis (or globally) + if (axis is None): + return _quantiles1D(data, m, p) + + return ma.apply_along_axis(_quantiles1D, axis, data, m, p) + + +def scoreatpercentile(data, per, limit=(), alphap=.4, betap=.4): + """Calculate the score at the given 'per' percentile of the + sequence a. For example, the score at per=50 is the median. + + This function is a shortcut to mquantile + + """ + if (per < 0) or (per > 100.): + raise ValueError("The percentile should be between 0. and 100. !" + " (got %s)" % per) + + return mquantiles(data, prob=[per/100.], alphap=alphap, betap=betap, + limit=limit, axis=0).squeeze() + + +def plotting_positions(data, alpha=0.4, beta=0.4): + """ + Returns plotting positions (or empirical percentile points) for the data. + + Plotting positions are defined as ``(i-alpha)/(n+1-alpha-beta)``, where: + - i is the rank order statistics + - n is the number of unmasked values along the given axis + - `alpha` and `beta` are two parameters. + + Typical values for `alpha` and `beta` are: + - (0,1) : ``p(k) = k/n``, linear interpolation of cdf (R, type 4) + - (.5,.5) : ``p(k) = (k-1/2.)/n``, piecewise linear function + (R, type 5) + - (0,0) : ``p(k) = k/(n+1)``, Weibull (R type 6) + - (1,1) : ``p(k) = (k-1)/(n-1)``, in this case, + ``p(k) = mode[F(x[k])]``. That's R default (R type 7) + - (1/3,1/3): ``p(k) = (k-1/3)/(n+1/3)``, then + ``p(k) ~ median[F(x[k])]``. + The resulting quantile estimates are approximately median-unbiased + regardless of the distribution of x. (R type 8) + - (3/8,3/8): ``p(k) = (k-3/8)/(n+1/4)``, Blom. + The resulting quantile estimates are approximately unbiased + if x is normally distributed (R type 9) + - (.4,.4) : approximately quantile unbiased (Cunnane) + - (.35,.35): APL, used with PWM + - (.3175, .3175): used in scipy.stats.probplot + + Parameters + ---------- + data : array_like + Input data, as a sequence or array of dimension at most 2. + alpha : float, optional + Plotting positions parameter. Default is 0.4. + beta : float, optional + Plotting positions parameter. Default is 0.4. + + Returns + ------- + positions : MaskedArray + The calculated plotting positions. + + """ + data = ma.array(data, copy=False).reshape(1,-1) + n = data.count() + plpos = np.empty(data.size, dtype=float) + plpos[n:] = 0 + plpos[data.argsort(axis=None)[:n]] = ((np.arange(1, n+1) - alpha) / + (n + 1.0 - alpha - beta)) + return ma.array(plpos, mask=data._mask) + + +meppf = plotting_positions + + +def obrientransform(*args): + """ + Computes a transform on input data (any number of columns). Used to + test for homogeneity of variance prior to running one-way stats. Each + array in ``*args`` is one level of a factor. If an `f_oneway()` run on + the transformed data and found significant, variances are unequal. From + Maxwell and Delaney, p.112. + + Returns: transformed data for use in an ANOVA + """ + data = argstoarray(*args).T + v = data.var(axis=0,ddof=1) + m = data.mean(0) + n = data.count(0).astype(float) + # result = ((N-1.5)*N*(a-m)**2 - 0.5*v*(n-1))/((n-1)*(n-2)) + data -= m + data **= 2 + data *= (n-1.5)*n + data -= 0.5*v*(n-1) + data /= (n-1.)*(n-2.) + if not ma.allclose(v,data.mean(0)): + raise ValueError("Lack of convergence in obrientransform.") + + return data + + +def sem(a, axis=0, ddof=1): + """ + Calculates the standard error of the mean of the input array. + + Also sometimes called standard error of measurement. + + Parameters + ---------- + a : array_like + An array containing the values for which the standard error is + returned. + axis : int or None, optional + If axis is None, ravel `a` first. If axis is an integer, this will be + the axis over which to operate. Defaults to 0. + ddof : int, optional + Delta degrees-of-freedom. How many degrees of freedom to adjust + for bias in limited samples relative to the population estimate + of variance. Defaults to 1. + + Returns + ------- + s : ndarray or float + The standard error of the mean in the sample(s), along the input axis. + + Notes + ----- + The default value for `ddof` changed in scipy 0.15.0 to be consistent with + `stats.sem` as well as with the most common definition used (like in the R + documentation). + + Examples + -------- + Find standard error along the first axis: + + >>> from scipy import stats + >>> a = np.arange(20).reshape(5,4) + >>> print(stats.mstats.sem(a)) + [2.8284271247461903 2.8284271247461903 2.8284271247461903 + 2.8284271247461903] + + Find standard error across the whole array, using n degrees of freedom: + + >>> print(stats.mstats.sem(a, axis=None, ddof=0)) + 1.2893796958227628 + + """ + a, axis = _chk_asarray(a, axis) + n = a.count(axis=axis) + s = a.std(axis=axis, ddof=ddof) / ma.sqrt(n) + return s + + +F_onewayResult = namedtuple('F_onewayResult', ('statistic', 'pvalue')) + + +def f_oneway(*args): + """ + Performs a 1-way ANOVA, returning an F-value and probability given + any number of groups. From Heiman, pp.394-7. + + Usage: ``f_oneway(*args)``, where ``*args`` is 2 or more arrays, + one per treatment group. + + Returns + ------- + statistic : float + The computed F-value of the test. + pvalue : float + The associated p-value from the F-distribution. + + """ + # Construct a single array of arguments: each row is a group + data = argstoarray(*args) + ngroups = len(data) + ntot = data.count() + sstot = (data**2).sum() - (data.sum())**2/float(ntot) + ssbg = (data.count(-1) * (data.mean(-1)-data.mean())**2).sum() + sswg = sstot-ssbg + dfbg = ngroups-1 + dfwg = ntot - ngroups + msb = ssbg/float(dfbg) + msw = sswg/float(dfwg) + f = msb/msw + prob = special.fdtrc(dfbg, dfwg, f) # equivalent to stats.f.sf + + return F_onewayResult(f, prob) + + +FriedmanchisquareResult = namedtuple('FriedmanchisquareResult', + ('statistic', 'pvalue')) + + +def friedmanchisquare(*args): + """Friedman Chi-Square is a non-parametric, one-way within-subjects ANOVA. + This function calculates the Friedman Chi-square test for repeated measures + and returns the result, along with the associated probability value. + + Each input is considered a given group. Ideally, the number of treatments + among each group should be equal. If this is not the case, only the first + n treatments are taken into account, where n is the number of treatments + of the smallest group. + If a group has some missing values, the corresponding treatments are masked + in the other groups. + The test statistic is corrected for ties. + + Masked values in one group are propagated to the other groups. + + Returns + ------- + statistic : float + the test statistic. + pvalue : float + the associated p-value. + + """ + data = argstoarray(*args).astype(float) + k = len(data) + if k < 3: + raise ValueError("Less than 3 groups (%i): " % k + + "the Friedman test is NOT appropriate.") + + ranked = ma.masked_values(rankdata(data, axis=0), 0) + if ranked._mask is not nomask: + ranked = ma.mask_cols(ranked) + ranked = ranked.compressed().reshape(k,-1).view(ndarray) + else: + ranked = ranked._data + (k,n) = ranked.shape + # Ties correction + repeats = [find_repeats(row) for row in ranked.T] + ties = np.array([y for x, y in repeats if x.size > 0]) + tie_correction = 1 - (ties**3-ties).sum()/float(n*(k**3-k)) + + ssbg = np.sum((ranked.sum(-1) - n*(k+1)/2.)**2) + chisq = ssbg * 12./(n*k*(k+1)) * 1./tie_correction + + return FriedmanchisquareResult(chisq, + distributions.chi2.sf(chisq, k-1)) + + +BrunnerMunzelResult = namedtuple('BrunnerMunzelResult', ('statistic', 'pvalue')) + + +def brunnermunzel(x, y, alternative="two-sided", distribution="t"): + """ + Computes the Brunner-Munzel test on samples x and y + + Missing values in `x` and/or `y` are discarded. + + Parameters + ---------- + x, y : array_like + Array of samples, should be one-dimensional. + alternative : 'less', 'two-sided', or 'greater', optional + Whether to get the p-value for the one-sided hypothesis ('less' + or 'greater') or for the two-sided hypothesis ('two-sided'). + Defaults value is 'two-sided' . + distribution: 't' or 'normal', optional + Whether to get the p-value by t-distribution or by standard normal + distribution. + Defaults value is 't' . + + Returns + ------- + statistic : float + The Brunner-Munzer W statistic. + pvalue : float + p-value assuming an t distribution. One-sided or + two-sided, depending on the choice of `alternative` and `distribution`. + + See Also + -------- + mannwhitneyu : Mann-Whitney rank test on two samples. + + Notes + ------- + For more details on `brunnermunzel`, see `stats.brunnermunzel`. + + """ + x = ma.asarray(x).compressed().view(ndarray) + y = ma.asarray(y).compressed().view(ndarray) + nx = len(x) + ny = len(y) + if nx == 0 or ny == 0: + return BrunnerMunzelResult(np.nan, np.nan) + nc = nx + ny + rankc = rankdata(np.concatenate((x,y))) + rankcx = rankc[0:nx] + rankcy = rankc[nx:nx+ny] + rankcx_mean = np.mean(rankcx) + rankcy_mean = np.mean(rankcy) + rankx = rankdata(x) + ranky = rankdata(y) + rankx_mean = np.mean(rankx) + ranky_mean = np.mean(ranky) + + Sx = np.sum(np.power(rankcx - rankx - rankcx_mean + rankx_mean, 2.0)) + Sx /= nx - 1 + Sy = np.sum(np.power(rankcy - ranky - rankcy_mean + ranky_mean, 2.0)) + Sy /= ny - 1 + + sigmax = Sx / np.power(nc - nx, 2.0) + sigmay = Sx / np.power(nc - ny, 2.0) + + wbfn = nx * ny * (rankcy_mean - rankcx_mean) + wbfn /= (nx + ny) * np.sqrt(nx * Sx + ny * Sy) + + if distribution == "t": + df_numer = np.power(nx * Sx + ny * Sy, 2.0) + df_denom = np.power(nx * Sx, 2.0) / (nx - 1) + df_denom += np.power(ny * Sy, 2.0) / (ny - 1) + df = df_numer / df_denom + p = distributions.t.cdf(wbfn, df) + elif distribution == "normal": + p = distributions.norm.cdf(wbfn) + else: + raise ValueError( + "distribution should be 't' or 'normal'") + + if alternative == "greater": + p = p + elif alternative == "less": + p = 1 - p + elif alternative == "two-sided": + p = 2 * np.min([p, 1-p]) + else: + raise ValueError( + "alternative should be 'less', 'greater' or 'two-sided'") + + return BrunnerMunzelResult(wbfn, p) diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/mstats_basic.pyc b/project/venv/lib/python2.7/site-packages/scipy/stats/mstats_basic.pyc new file mode 100644 index 0000000..bdc335c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/stats/mstats_basic.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/mstats_extras.py b/project/venv/lib/python2.7/site-packages/scipy/stats/mstats_extras.py new file mode 100644 index 0000000..47d3490 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/mstats_extras.py @@ -0,0 +1,477 @@ +""" +Additional statistics functions with support for masked arrays. + +""" + +# Original author (2007): Pierre GF Gerard-Marchant + + +from __future__ import division, print_function, absolute_import + + +__all__ = ['compare_medians_ms', + 'hdquantiles', 'hdmedian', 'hdquantiles_sd', + 'idealfourths', + 'median_cihs','mjci','mquantiles_cimj', + 'rsh', + 'trimmed_mean_ci',] + + +import numpy as np +from numpy import float_, int_, ndarray + +import numpy.ma as ma +from numpy.ma import MaskedArray + +from . import mstats_basic as mstats + +from scipy.stats.distributions import norm, beta, t, binom + + +def hdquantiles(data, prob=list([.25,.5,.75]), axis=None, var=False,): + """ + Computes quantile estimates with the Harrell-Davis method. + + The quantile estimates are calculated as a weighted linear combination + of order statistics. + + Parameters + ---------- + data : array_like + Data array. + prob : sequence, optional + Sequence of quantiles to compute. + axis : int or None, optional + Axis along which to compute the quantiles. If None, use a flattened + array. + var : bool, optional + Whether to return the variance of the estimate. + + Returns + ------- + hdquantiles : MaskedArray + A (p,) array of quantiles (if `var` is False), or a (2,p) array of + quantiles and variances (if `var` is True), where ``p`` is the + number of quantiles. + + See Also + -------- + hdquantiles_sd + + """ + def _hd_1D(data,prob,var): + "Computes the HD quantiles for a 1D array. Returns nan for invalid data." + xsorted = np.squeeze(np.sort(data.compressed().view(ndarray))) + # Don't use length here, in case we have a numpy scalar + n = xsorted.size + + hd = np.empty((2,len(prob)), float_) + if n < 2: + hd.flat = np.nan + if var: + return hd + return hd[0] + + v = np.arange(n+1) / float(n) + betacdf = beta.cdf + for (i,p) in enumerate(prob): + _w = betacdf(v, (n+1)*p, (n+1)*(1-p)) + w = _w[1:] - _w[:-1] + hd_mean = np.dot(w, xsorted) + hd[0,i] = hd_mean + # + hd[1,i] = np.dot(w, (xsorted-hd_mean)**2) + # + hd[0, prob == 0] = xsorted[0] + hd[0, prob == 1] = xsorted[-1] + if var: + hd[1, prob == 0] = hd[1, prob == 1] = np.nan + return hd + return hd[0] + # Initialization & checks + data = ma.array(data, copy=False, dtype=float_) + p = np.array(prob, copy=False, ndmin=1) + # Computes quantiles along axis (or globally) + if (axis is None) or (data.ndim == 1): + result = _hd_1D(data, p, var) + else: + if data.ndim > 2: + raise ValueError("Array 'data' must be at most two dimensional, " + "but got data.ndim = %d" % data.ndim) + result = ma.apply_along_axis(_hd_1D, axis, data, p, var) + + return ma.fix_invalid(result, copy=False) + + +def hdmedian(data, axis=-1, var=False): + """ + Returns the Harrell-Davis estimate of the median along the given axis. + + Parameters + ---------- + data : ndarray + Data array. + axis : int, optional + Axis along which to compute the quantiles. If None, use a flattened + array. + var : bool, optional + Whether to return the variance of the estimate. + + Returns + ------- + hdmedian : MaskedArray + The median values. If ``var=True``, the variance is returned inside + the masked array. E.g. for a 1-D array the shape change from (1,) to + (2,). + + """ + result = hdquantiles(data,[0.5], axis=axis, var=var) + return result.squeeze() + + +def hdquantiles_sd(data, prob=list([.25,.5,.75]), axis=None): + """ + The standard error of the Harrell-Davis quantile estimates by jackknife. + + Parameters + ---------- + data : array_like + Data array. + prob : sequence, optional + Sequence of quantiles to compute. + axis : int, optional + Axis along which to compute the quantiles. If None, use a flattened + array. + + Returns + ------- + hdquantiles_sd : MaskedArray + Standard error of the Harrell-Davis quantile estimates. + + See Also + -------- + hdquantiles + + """ + def _hdsd_1D(data, prob): + "Computes the std error for 1D arrays." + xsorted = np.sort(data.compressed()) + n = len(xsorted) + + hdsd = np.empty(len(prob), float_) + if n < 2: + hdsd.flat = np.nan + + vv = np.arange(n) / float(n-1) + betacdf = beta.cdf + + for (i,p) in enumerate(prob): + _w = betacdf(vv, (n+1)*p, (n+1)*(1-p)) + w = _w[1:] - _w[:-1] + mx_ = np.fromiter([np.dot(w,xsorted[np.r_[list(range(0,k)), + list(range(k+1,n))].astype(int_)]) + for k in range(n)], dtype=float_) + mx_var = np.array(mx_.var(), copy=False, ndmin=1) * n / float(n-1) + hdsd[i] = float(n-1) * np.sqrt(np.diag(mx_var).diagonal() / float(n)) + return hdsd + + # Initialization & checks + data = ma.array(data, copy=False, dtype=float_) + p = np.array(prob, copy=False, ndmin=1) + # Computes quantiles along axis (or globally) + if (axis is None): + result = _hdsd_1D(data, p) + else: + if data.ndim > 2: + raise ValueError("Array 'data' must be at most two dimensional, " + "but got data.ndim = %d" % data.ndim) + result = ma.apply_along_axis(_hdsd_1D, axis, data, p) + + return ma.fix_invalid(result, copy=False).ravel() + + +def trimmed_mean_ci(data, limits=(0.2,0.2), inclusive=(True,True), + alpha=0.05, axis=None): + """ + Selected confidence interval of the trimmed mean along the given axis. + + Parameters + ---------- + data : array_like + Input data. + limits : {None, tuple}, optional + None or a two item tuple. + Tuple of the percentages to cut on each side of the array, with respect + to the number of unmasked data, as floats between 0. and 1. If ``n`` + is the number of unmasked data before trimming, then + (``n * limits[0]``)th smallest data and (``n * limits[1]``)th + largest data are masked. The total number of unmasked data after + trimming is ``n * (1. - sum(limits))``. + The value of one limit can be set to None to indicate an open interval. + + Defaults to (0.2, 0.2). + inclusive : (2,) tuple of boolean, optional + If relative==False, tuple indicating whether values exactly equal to + the absolute limits are allowed. + If relative==True, tuple indicating whether the number of data being + masked on each side should be rounded (True) or truncated (False). + + Defaults to (True, True). + alpha : float, optional + Confidence level of the intervals. + + Defaults to 0.05. + axis : int, optional + Axis along which to cut. If None, uses a flattened version of `data`. + + Defaults to None. + + Returns + ------- + trimmed_mean_ci : (2,) ndarray + The lower and upper confidence intervals of the trimmed data. + + """ + data = ma.array(data, copy=False) + trimmed = mstats.trimr(data, limits=limits, inclusive=inclusive, axis=axis) + tmean = trimmed.mean(axis) + tstde = mstats.trimmed_stde(data,limits=limits,inclusive=inclusive,axis=axis) + df = trimmed.count(axis) - 1 + tppf = t.ppf(1-alpha/2.,df) + return np.array((tmean - tppf*tstde, tmean+tppf*tstde)) + + +def mjci(data, prob=[0.25,0.5,0.75], axis=None): + """ + Returns the Maritz-Jarrett estimators of the standard error of selected + experimental quantiles of the data. + + Parameters + ---------- + data : ndarray + Data array. + prob : sequence, optional + Sequence of quantiles to compute. + axis : int or None, optional + Axis along which to compute the quantiles. If None, use a flattened + array. + + """ + def _mjci_1D(data, p): + data = np.sort(data.compressed()) + n = data.size + prob = (np.array(p) * n + 0.5).astype(int_) + betacdf = beta.cdf + + mj = np.empty(len(prob), float_) + x = np.arange(1,n+1, dtype=float_) / n + y = x - 1./n + for (i,m) in enumerate(prob): + W = betacdf(x,m-1,n-m) - betacdf(y,m-1,n-m) + C1 = np.dot(W,data) + C2 = np.dot(W,data**2) + mj[i] = np.sqrt(C2 - C1**2) + return mj + + data = ma.array(data, copy=False) + if data.ndim > 2: + raise ValueError("Array 'data' must be at most two dimensional, " + "but got data.ndim = %d" % data.ndim) + + p = np.array(prob, copy=False, ndmin=1) + # Computes quantiles along axis (or globally) + if (axis is None): + return _mjci_1D(data, p) + else: + return ma.apply_along_axis(_mjci_1D, axis, data, p) + + +def mquantiles_cimj(data, prob=[0.25,0.50,0.75], alpha=0.05, axis=None): + """ + Computes the alpha confidence interval for the selected quantiles of the + data, with Maritz-Jarrett estimators. + + Parameters + ---------- + data : ndarray + Data array. + prob : sequence, optional + Sequence of quantiles to compute. + alpha : float, optional + Confidence level of the intervals. + axis : int or None, optional + Axis along which to compute the quantiles. + If None, use a flattened array. + + Returns + ------- + ci_lower : ndarray + The lower boundaries of the confidence interval. Of the same length as + `prob`. + ci_upper : ndarray + The upper boundaries of the confidence interval. Of the same length as + `prob`. + + """ + alpha = min(alpha, 1 - alpha) + z = norm.ppf(1 - alpha/2.) + xq = mstats.mquantiles(data, prob, alphap=0, betap=0, axis=axis) + smj = mjci(data, prob, axis=axis) + return (xq - z * smj, xq + z * smj) + + +def median_cihs(data, alpha=0.05, axis=None): + """ + Computes the alpha-level confidence interval for the median of the data. + + Uses the Hettmasperger-Sheather method. + + Parameters + ---------- + data : array_like + Input data. Masked values are discarded. The input should be 1D only, + or `axis` should be set to None. + alpha : float, optional + Confidence level of the intervals. + axis : int or None, optional + Axis along which to compute the quantiles. If None, use a flattened + array. + + Returns + ------- + median_cihs + Alpha level confidence interval. + + """ + def _cihs_1D(data, alpha): + data = np.sort(data.compressed()) + n = len(data) + alpha = min(alpha, 1-alpha) + k = int(binom._ppf(alpha/2., n, 0.5)) + gk = binom.cdf(n-k,n,0.5) - binom.cdf(k-1,n,0.5) + if gk < 1-alpha: + k -= 1 + gk = binom.cdf(n-k,n,0.5) - binom.cdf(k-1,n,0.5) + gkk = binom.cdf(n-k-1,n,0.5) - binom.cdf(k,n,0.5) + I = (gk - 1 + alpha)/(gk - gkk) + lambd = (n-k) * I / float(k + (n-2*k)*I) + lims = (lambd*data[k] + (1-lambd)*data[k-1], + lambd*data[n-k-1] + (1-lambd)*data[n-k]) + return lims + data = ma.array(data, copy=False) + # Computes quantiles along axis (or globally) + if (axis is None): + result = _cihs_1D(data, alpha) + else: + if data.ndim > 2: + raise ValueError("Array 'data' must be at most two dimensional, " + "but got data.ndim = %d" % data.ndim) + result = ma.apply_along_axis(_cihs_1D, axis, data, alpha) + + return result + + +def compare_medians_ms(group_1, group_2, axis=None): + """ + Compares the medians from two independent groups along the given axis. + + The comparison is performed using the McKean-Schrader estimate of the + standard error of the medians. + + Parameters + ---------- + group_1 : array_like + First dataset. Has to be of size >=7. + group_2 : array_like + Second dataset. Has to be of size >=7. + axis : int, optional + Axis along which the medians are estimated. If None, the arrays are + flattened. If `axis` is not None, then `group_1` and `group_2` + should have the same shape. + + Returns + ------- + compare_medians_ms : {float, ndarray} + If `axis` is None, then returns a float, otherwise returns a 1-D + ndarray of floats with a length equal to the length of `group_1` + along `axis`. + + """ + (med_1, med_2) = (ma.median(group_1,axis=axis), ma.median(group_2,axis=axis)) + (std_1, std_2) = (mstats.stde_median(group_1, axis=axis), + mstats.stde_median(group_2, axis=axis)) + W = np.abs(med_1 - med_2) / ma.sqrt(std_1**2 + std_2**2) + return 1 - norm.cdf(W) + + +def idealfourths(data, axis=None): + """ + Returns an estimate of the lower and upper quartiles. + + Uses the ideal fourths algorithm. + + Parameters + ---------- + data : array_like + Input array. + axis : int, optional + Axis along which the quartiles are estimated. If None, the arrays are + flattened. + + Returns + ------- + idealfourths : {list of floats, masked array} + Returns the two internal values that divide `data` into four parts + using the ideal fourths algorithm either along the flattened array + (if `axis` is None) or along `axis` of `data`. + + """ + def _idf(data): + x = data.compressed() + n = len(x) + if n < 3: + return [np.nan,np.nan] + (j,h) = divmod(n/4. + 5/12.,1) + j = int(j) + qlo = (1-h)*x[j-1] + h*x[j] + k = n - j + qup = (1-h)*x[k] + h*x[k-1] + return [qlo, qup] + data = ma.sort(data, axis=axis).view(MaskedArray) + if (axis is None): + return _idf(data) + else: + return ma.apply_along_axis(_idf, axis, data) + + +def rsh(data, points=None): + """ + Evaluates Rosenblatt's shifted histogram estimators for each data point. + + Rosenblatt's estimator is a centered finite-difference approximation to the + derivative of the empirical cumulative distribution function. + + Parameters + ---------- + data : sequence + Input data, should be 1-D. Masked values are ignored. + points : sequence or None, optional + Sequence of points where to evaluate Rosenblatt shifted histogram. + If None, use the data. + + """ + data = ma.array(data, copy=False) + if points is None: + points = data + else: + points = np.array(points, copy=False, ndmin=1) + + if data.ndim != 1: + raise AttributeError("The input array should be 1D only !") + + n = data.count() + r = idealfourths(data, axis=None) + h = 1.2 * (r[-1]-r[0]) / n**(1./5) + nhi = (data[:,None] <= points[None,:] + h).sum(0) + nlo = (data[:,None] < points[None,:] - h).sum(0) + return (nhi-nlo) / (2.*n*h) diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/mstats_extras.pyc b/project/venv/lib/python2.7/site-packages/scipy/stats/mstats_extras.pyc new file mode 100644 index 0000000..722427b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/stats/mstats_extras.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/mvn.so b/project/venv/lib/python2.7/site-packages/scipy/stats/mvn.so new file mode 100755 index 0000000..b921539 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/stats/mvn.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/setup.py b/project/venv/lib/python2.7/site-packages/scipy/stats/setup.py new file mode 100644 index 0000000..799f796 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/setup.py @@ -0,0 +1,38 @@ +from __future__ import division, print_function, absolute_import + +from os.path import join + + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('stats', parent_package, top_path) + + config.add_data_dir('tests') + + statlib_src = [join('statlib', '*.f')] + config.add_library('statlib', sources=statlib_src) + + # add statlib module + config.add_extension('statlib', + sources=['statlib.pyf'], + f2py_options=['--no-wrap-functions'], + libraries=['statlib'], + depends=statlib_src + ) + + # add _stats module + config.add_extension('_stats', + sources=['_stats.c'], + ) + + # add mvn module + config.add_extension('mvn', + sources=['mvn.pyf','mvndst.f'], + ) + + return config + + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(**configuration(top_path='').todict()) diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/setup.pyc b/project/venv/lib/python2.7/site-packages/scipy/stats/setup.pyc new file mode 100644 index 0000000..0d0200d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/stats/setup.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/statlib.so b/project/venv/lib/python2.7/site-packages/scipy/stats/statlib.so new file mode 100755 index 0000000..2d8c091 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/stats/statlib.so differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/stats.py b/project/venv/lib/python2.7/site-packages/scipy/stats/stats.py new file mode 100644 index 0000000..10007d2 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/stats.py @@ -0,0 +1,5953 @@ +# Copyright 2002 Gary Strangman. All rights reserved +# Copyright 2002-2016 The SciPy Developers +# +# The original code from Gary Strangman was heavily adapted for +# use in SciPy by Travis Oliphant. The original code came with the +# following disclaimer: +# +# This software is provided "as-is". There are no expressed or implied +# warranties of any kind, including, but not limited to, the warranties +# of merchantability and fitness for a given application. In no event +# shall Gary Strangman be liable for any direct, indirect, incidental, +# special, exemplary or consequential damages (including, but not limited +# to, loss of use, data or profits, or business interruption) however +# caused and on any theory of liability, whether in contract, strict +# liability or tort (including negligence or otherwise) arising in any way +# out of the use of this software, even if advised of the possibility of +# such damage. + +""" +A collection of basic statistical functions for Python. The function +names appear below. + + Some scalar functions defined here are also available in the scipy.special + package where they work on arbitrary sized arrays. + +Disclaimers: The function list is obviously incomplete and, worse, the +functions are not optimized. All functions have been tested (some more +so than others), but they are far from bulletproof. Thus, as with any +free software, no warranty or guarantee is expressed or implied. :-) A +few extra functions that don't appear in the list below can be found by +interested treasure-hunters. These functions don't necessarily have +both list and array versions but were deemed useful. + +Central Tendency +---------------- +.. autosummary:: + :toctree: generated/ + + gmean + hmean + mode + +Moments +------- +.. autosummary:: + :toctree: generated/ + + moment + variation + skew + kurtosis + normaltest + +Altered Versions +---------------- +.. autosummary:: + :toctree: generated/ + + tmean + tvar + tstd + tsem + describe + +Frequency Stats +--------------- +.. autosummary:: + :toctree: generated/ + + itemfreq + scoreatpercentile + percentileofscore + cumfreq + relfreq + +Variability +----------- +.. autosummary:: + :toctree: generated/ + + obrientransform + sem + zmap + zscore + iqr + +Trimming Functions +------------------ +.. autosummary:: + :toctree: generated/ + + trimboth + trim1 + +Correlation Functions +--------------------- +.. autosummary:: + :toctree: generated/ + + pearsonr + fisher_exact + spearmanr + pointbiserialr + kendalltau + weightedtau + linregress + theilslopes + +Inferential Stats +----------------- +.. autosummary:: + :toctree: generated/ + + ttest_1samp + ttest_ind + ttest_ind_from_stats + ttest_rel + chisquare + power_divergence + ks_2samp + mannwhitneyu + ranksums + wilcoxon + kruskal + friedmanchisquare + brunnermunzel + combine_pvalues + +Statistical Distances +--------------------- +.. autosummary:: + :toctree: generated/ + + wasserstein_distance + energy_distance + +ANOVA Functions +--------------- +.. autosummary:: + :toctree: generated/ + + f_oneway + +Support Functions +----------------- +.. autosummary:: + :toctree: generated/ + + rankdata + rvs_ratio_uniforms + +References +---------- +.. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard + Probability and Statistics Tables and Formulae. Chapman & Hall: New + York. 2000. + +""" + +from __future__ import division, print_function, absolute_import + +import warnings +import math +from collections import namedtuple + +import numpy as np +from numpy import array, asarray, ma + +from scipy._lib.six import callable, string_types +from scipy._lib._version import NumpyVersion +from scipy._lib._util import _lazywhere +import scipy.special as special +from . import distributions +from . import mstats_basic +from ._stats_mstats_common import _find_repeats, linregress, theilslopes, siegelslopes +from ._stats import _kendall_dis, _toint64, _weightedrankedtau +from ._rvs_sampling import rvs_ratio_uniforms + + +__all__ = ['find_repeats', 'gmean', 'hmean', 'mode', 'tmean', 'tvar', + 'tmin', 'tmax', 'tstd', 'tsem', 'moment', 'variation', + 'skew', 'kurtosis', 'describe', 'skewtest', 'kurtosistest', + 'normaltest', 'jarque_bera', 'itemfreq', + 'scoreatpercentile', 'percentileofscore', + 'cumfreq', 'relfreq', 'obrientransform', + 'sem', 'zmap', 'zscore', 'iqr', + 'sigmaclip', 'trimboth', 'trim1', 'trim_mean', 'f_oneway', + 'pearsonr', 'fisher_exact', 'spearmanr', 'pointbiserialr', + 'kendalltau', 'weightedtau', + 'linregress', 'siegelslopes', 'theilslopes', 'ttest_1samp', + 'ttest_ind', 'ttest_ind_from_stats', 'ttest_rel', 'kstest', + 'chisquare', 'power_divergence', 'ks_2samp', 'mannwhitneyu', + 'tiecorrect', 'ranksums', 'kruskal', 'friedmanchisquare', + 'rankdata', 'rvs_ratio_uniforms', + 'combine_pvalues', 'wasserstein_distance', 'energy_distance', + 'brunnermunzel'] + + +def _chk_asarray(a, axis): + if axis is None: + a = np.ravel(a) + outaxis = 0 + else: + a = np.asarray(a) + outaxis = axis + + if a.ndim == 0: + a = np.atleast_1d(a) + + return a, outaxis + + +def _chk2_asarray(a, b, axis): + if axis is None: + a = np.ravel(a) + b = np.ravel(b) + outaxis = 0 + else: + a = np.asarray(a) + b = np.asarray(b) + outaxis = axis + + if a.ndim == 0: + a = np.atleast_1d(a) + if b.ndim == 0: + b = np.atleast_1d(b) + + return a, b, outaxis + + +def _contains_nan(a, nan_policy='propagate'): + policies = ['propagate', 'raise', 'omit'] + if nan_policy not in policies: + raise ValueError("nan_policy must be one of {%s}" % + ', '.join("'%s'" % s for s in policies)) + try: + # Calling np.sum to avoid creating a huge array into memory + # e.g. np.isnan(a).any() + with np.errstate(invalid='ignore'): + contains_nan = np.isnan(np.sum(a)) + except TypeError: + # If the check cannot be properly performed we fallback to omitting + # nan values and raising a warning. This can happen when attempting to + # sum things that are not numbers (e.g. as in the function `mode`). + contains_nan = False + nan_policy = 'omit' + warnings.warn("The input array could not be properly checked for nan " + "values. nan values will be ignored.", RuntimeWarning) + + if contains_nan and nan_policy == 'raise': + raise ValueError("The input contains nan values") + + return (contains_nan, nan_policy) + + +def gmean(a, axis=0, dtype=None): + """ + Compute the geometric mean along the specified axis. + + Return the geometric average of the array elements. + That is: n-th root of (x1 * x2 * ... * xn) + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + axis : int or None, optional + Axis along which the geometric mean is computed. Default is 0. + If None, compute over the whole array `a`. + dtype : dtype, optional + Type of the returned array and of the accumulator in which the + elements are summed. If dtype is not specified, it defaults to the + dtype of a, unless a has an integer dtype with a precision less than + that of the default platform integer. In that case, the default + platform integer is used. + + Returns + ------- + gmean : ndarray + see dtype parameter above + + See Also + -------- + numpy.mean : Arithmetic average + numpy.average : Weighted average + hmean : Harmonic mean + + Notes + ----- + The geometric average is computed over a single dimension of the input + array, axis=0 by default, or all values in the array if axis=None. + float64 intermediate and return values are used for integer inputs. + + Use masked arrays to ignore any non-finite values in the input or that + arise in the calculations such as Not a Number and infinity because masked + arrays automatically mask any non-finite values. + + Examples + -------- + >>> from scipy.stats import gmean + >>> gmean([1, 4]) + 2.0 + >>> gmean([1, 2, 3, 4, 5, 6, 7]) + 3.3800151591412964 + """ + if not isinstance(a, np.ndarray): + # if not an ndarray object attempt to convert it + log_a = np.log(np.array(a, dtype=dtype)) + elif dtype: + # Must change the default dtype allowing array type + if isinstance(a, np.ma.MaskedArray): + log_a = np.log(np.ma.asarray(a, dtype=dtype)) + else: + log_a = np.log(np.asarray(a, dtype=dtype)) + else: + log_a = np.log(a) + return np.exp(log_a.mean(axis=axis)) + + +def hmean(a, axis=0, dtype=None): + """ + Calculate the harmonic mean along the specified axis. + + That is: n / (1/x1 + 1/x2 + ... + 1/xn) + + Parameters + ---------- + a : array_like + Input array, masked array or object that can be converted to an array. + axis : int or None, optional + Axis along which the harmonic mean is computed. Default is 0. + If None, compute over the whole array `a`. + dtype : dtype, optional + Type of the returned array and of the accumulator in which the + elements are summed. If `dtype` is not specified, it defaults to the + dtype of `a`, unless `a` has an integer `dtype` with a precision less + than that of the default platform integer. In that case, the default + platform integer is used. + + Returns + ------- + hmean : ndarray + see `dtype` parameter above + + See Also + -------- + numpy.mean : Arithmetic average + numpy.average : Weighted average + gmean : Geometric mean + + Notes + ----- + The harmonic mean is computed over a single dimension of the input + array, axis=0 by default, or all values in the array if axis=None. + float64 intermediate and return values are used for integer inputs. + + Use masked arrays to ignore any non-finite values in the input or that + arise in the calculations such as Not a Number and infinity. + + Examples + -------- + >>> from scipy.stats import hmean + >>> hmean([1, 4]) + 1.6000000000000001 + >>> hmean([1, 2, 3, 4, 5, 6, 7]) + 2.6997245179063363 + """ + if not isinstance(a, np.ndarray): + a = np.array(a, dtype=dtype) + if np.all(a > 0): + # Harmonic mean only defined if greater than zero + if isinstance(a, np.ma.MaskedArray): + size = a.count(axis) + else: + if axis is None: + a = a.ravel() + size = a.shape[0] + else: + size = a.shape[axis] + return size / np.sum(1.0 / a, axis=axis, dtype=dtype) + else: + raise ValueError("Harmonic mean only defined if all elements greater " + "than zero") + + +ModeResult = namedtuple('ModeResult', ('mode', 'count')) + + +def mode(a, axis=0, nan_policy='propagate'): + """ + Return an array of the modal (most common) value in the passed array. + + If there is more than one such value, only the smallest is returned. + The bin-count for the modal bins is also returned. + + Parameters + ---------- + a : array_like + n-dimensional array of which to find mode(s). + axis : int or None, optional + Axis along which to operate. Default is 0. If None, compute over + the whole array `a`. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. 'propagate' returns nan, + 'raise' throws an error, 'omit' performs the calculations ignoring nan + values. Default is 'propagate'. + + Returns + ------- + mode : ndarray + Array of modal values. + count : ndarray + Array of counts for each mode. + + Examples + -------- + >>> a = np.array([[6, 8, 3, 0], + ... [3, 2, 1, 7], + ... [8, 1, 8, 4], + ... [5, 3, 0, 5], + ... [4, 7, 5, 9]]) + >>> from scipy import stats + >>> stats.mode(a) + (array([[3, 1, 0, 0]]), array([[1, 1, 1, 1]])) + + To get mode of whole array, specify ``axis=None``: + + >>> stats.mode(a, axis=None) + (array([3]), array([3])) + + """ + a, axis = _chk_asarray(a, axis) + if a.size == 0: + return ModeResult(np.array([]), np.array([])) + + contains_nan, nan_policy = _contains_nan(a, nan_policy) + + if contains_nan and nan_policy == 'omit': + a = ma.masked_invalid(a) + return mstats_basic.mode(a, axis) + + if (NumpyVersion(np.__version__) < '1.9.0') or (a.dtype == object and np.nan in set(a)): + # Fall back to a slower method since np.unique does not work with NaN + # or for older numpy which does not support return_counts + scores = set(np.ravel(a)) # get ALL unique values + testshape = list(a.shape) + testshape[axis] = 1 + oldmostfreq = np.zeros(testshape, dtype=a.dtype) + oldcounts = np.zeros(testshape, dtype=int) + + for score in scores: + template = (a == score) + counts = np.expand_dims(np.sum(template, axis), axis) + mostfrequent = np.where(counts > oldcounts, score, oldmostfreq) + oldcounts = np.maximum(counts, oldcounts) + oldmostfreq = mostfrequent + + return ModeResult(mostfrequent, oldcounts) + + def _mode1D(a): + vals, cnts = np.unique(a, return_counts=True) + return vals[cnts.argmax()], cnts.max() + + # np.apply_along_axis will convert the _mode1D tuples to a numpy array, casting types in the process + # This recreates the results without that issue + # View of a, rotated so the requested axis is last + in_dims = list(range(a.ndim)) + a_view = np.transpose(a, in_dims[:axis] + in_dims[axis+1:] + [axis]) + + inds = np.ndindex(a_view.shape[:-1]) + modes = np.empty(a_view.shape[:-1], dtype=a.dtype) + counts = np.zeros(a_view.shape[:-1], dtype=np.int) + for ind in inds: + modes[ind], counts[ind] = _mode1D(a_view[ind]) + newshape = list(a.shape) + newshape[axis] = 1 + return ModeResult(modes.reshape(newshape), counts.reshape(newshape)) + + +def _mask_to_limits(a, limits, inclusive): + """Mask an array for values outside of given limits. + + This is primarily a utility function. + + Parameters + ---------- + a : array + limits : (float or None, float or None) + A tuple consisting of the (lower limit, upper limit). Values in the + input array less than the lower limit or greater than the upper limit + will be masked out. None implies no limit. + inclusive : (bool, bool) + A tuple consisting of the (lower flag, upper flag). These flags + determine whether values exactly equal to lower or upper are allowed. + + Returns + ------- + A MaskedArray. + + Raises + ------ + A ValueError if there are no values within the given limits. + """ + lower_limit, upper_limit = limits + lower_include, upper_include = inclusive + am = ma.MaskedArray(a) + if lower_limit is not None: + if lower_include: + am = ma.masked_less(am, lower_limit) + else: + am = ma.masked_less_equal(am, lower_limit) + + if upper_limit is not None: + if upper_include: + am = ma.masked_greater(am, upper_limit) + else: + am = ma.masked_greater_equal(am, upper_limit) + + if am.count() == 0: + raise ValueError("No array values within given limits") + + return am + + +def tmean(a, limits=None, inclusive=(True, True), axis=None): + """ + Compute the trimmed mean. + + This function finds the arithmetic mean of given values, ignoring values + outside the given `limits`. + + Parameters + ---------- + a : array_like + Array of values. + limits : None or (lower limit, upper limit), optional + Values in the input array less than the lower limit or greater than the + upper limit will be ignored. When limits is None (default), then all + values are used. Either of the limit values in the tuple can also be + None representing a half-open interval. + inclusive : (bool, bool), optional + A tuple consisting of the (lower flag, upper flag). These flags + determine whether values exactly equal to the lower or upper limits + are included. The default value is (True, True). + axis : int or None, optional + Axis along which to compute test. Default is None. + + Returns + ------- + tmean : float + + See also + -------- + trim_mean : returns mean after trimming a proportion from both tails. + + Examples + -------- + >>> from scipy import stats + >>> x = np.arange(20) + >>> stats.tmean(x) + 9.5 + >>> stats.tmean(x, (3,17)) + 10.0 + + """ + a = asarray(a) + if limits is None: + return np.mean(a, None) + + am = _mask_to_limits(a.ravel(), limits, inclusive) + return am.mean(axis=axis) + + +def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1): + """ + Compute the trimmed variance. + + This function computes the sample variance of an array of values, + while ignoring values which are outside of given `limits`. + + Parameters + ---------- + a : array_like + Array of values. + limits : None or (lower limit, upper limit), optional + Values in the input array less than the lower limit or greater than the + upper limit will be ignored. When limits is None, then all values are + used. Either of the limit values in the tuple can also be None + representing a half-open interval. The default value is None. + inclusive : (bool, bool), optional + A tuple consisting of the (lower flag, upper flag). These flags + determine whether values exactly equal to the lower or upper limits + are included. The default value is (True, True). + axis : int or None, optional + Axis along which to operate. Default is 0. If None, compute over the + whole array `a`. + ddof : int, optional + Delta degrees of freedom. Default is 1. + + Returns + ------- + tvar : float + Trimmed variance. + + Notes + ----- + `tvar` computes the unbiased sample variance, i.e. it uses a correction + factor ``n / (n - 1)``. + + Examples + -------- + >>> from scipy import stats + >>> x = np.arange(20) + >>> stats.tvar(x) + 35.0 + >>> stats.tvar(x, (3,17)) + 20.0 + + """ + a = asarray(a) + a = a.astype(float).ravel() + if limits is None: + n = len(a) + return a.var() * n / (n - 1.) + am = _mask_to_limits(a, limits, inclusive) + return np.ma.var(am, ddof=ddof, axis=axis) + + +def tmin(a, lowerlimit=None, axis=0, inclusive=True, nan_policy='propagate'): + """ + Compute the trimmed minimum. + + This function finds the miminum value of an array `a` along the + specified axis, but only considering values greater than a specified + lower limit. + + Parameters + ---------- + a : array_like + array of values + lowerlimit : None or float, optional + Values in the input array less than the given limit will be ignored. + When lowerlimit is None, then all values are used. The default value + is None. + axis : int or None, optional + Axis along which to operate. Default is 0. If None, compute over the + whole array `a`. + inclusive : {True, False}, optional + This flag determines whether values exactly equal to the lower limit + are included. The default value is True. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. 'propagate' returns nan, + 'raise' throws an error, 'omit' performs the calculations ignoring nan + values. Default is 'propagate'. + + Returns + ------- + tmin : float, int or ndarray + + Examples + -------- + >>> from scipy import stats + >>> x = np.arange(20) + >>> stats.tmin(x) + 0 + + >>> stats.tmin(x, 13) + 13 + + >>> stats.tmin(x, 13, inclusive=False) + 14 + + """ + a, axis = _chk_asarray(a, axis) + am = _mask_to_limits(a, (lowerlimit, None), (inclusive, False)) + + contains_nan, nan_policy = _contains_nan(am, nan_policy) + + if contains_nan and nan_policy == 'omit': + am = ma.masked_invalid(am) + + res = ma.minimum.reduce(am, axis).data + if res.ndim == 0: + return res[()] + return res + + +def tmax(a, upperlimit=None, axis=0, inclusive=True, nan_policy='propagate'): + """ + Compute the trimmed maximum. + + This function computes the maximum value of an array along a given axis, + while ignoring values larger than a specified upper limit. + + Parameters + ---------- + a : array_like + array of values + upperlimit : None or float, optional + Values in the input array greater than the given limit will be ignored. + When upperlimit is None, then all values are used. The default value + is None. + axis : int or None, optional + Axis along which to operate. Default is 0. If None, compute over the + whole array `a`. + inclusive : {True, False}, optional + This flag determines whether values exactly equal to the upper limit + are included. The default value is True. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. 'propagate' returns nan, + 'raise' throws an error, 'omit' performs the calculations ignoring nan + values. Default is 'propagate'. + + Returns + ------- + tmax : float, int or ndarray + + Examples + -------- + >>> from scipy import stats + >>> x = np.arange(20) + >>> stats.tmax(x) + 19 + + >>> stats.tmax(x, 13) + 13 + + >>> stats.tmax(x, 13, inclusive=False) + 12 + + """ + a, axis = _chk_asarray(a, axis) + am = _mask_to_limits(a, (None, upperlimit), (False, inclusive)) + + contains_nan, nan_policy = _contains_nan(am, nan_policy) + + if contains_nan and nan_policy == 'omit': + am = ma.masked_invalid(am) + + res = ma.maximum.reduce(am, axis).data + if res.ndim == 0: + return res[()] + return res + + +def tstd(a, limits=None, inclusive=(True, True), axis=0, ddof=1): + """ + Compute the trimmed sample standard deviation. + + This function finds the sample standard deviation of given values, + ignoring values outside the given `limits`. + + Parameters + ---------- + a : array_like + array of values + limits : None or (lower limit, upper limit), optional + Values in the input array less than the lower limit or greater than the + upper limit will be ignored. When limits is None, then all values are + used. Either of the limit values in the tuple can also be None + representing a half-open interval. The default value is None. + inclusive : (bool, bool), optional + A tuple consisting of the (lower flag, upper flag). These flags + determine whether values exactly equal to the lower or upper limits + are included. The default value is (True, True). + axis : int or None, optional + Axis along which to operate. Default is 0. If None, compute over the + whole array `a`. + ddof : int, optional + Delta degrees of freedom. Default is 1. + + Returns + ------- + tstd : float + + Notes + ----- + `tstd` computes the unbiased sample standard deviation, i.e. it uses a + correction factor ``n / (n - 1)``. + + Examples + -------- + >>> from scipy import stats + >>> x = np.arange(20) + >>> stats.tstd(x) + 5.9160797830996161 + >>> stats.tstd(x, (3,17)) + 4.4721359549995796 + + """ + return np.sqrt(tvar(a, limits, inclusive, axis, ddof)) + + +def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1): + """ + Compute the trimmed standard error of the mean. + + This function finds the standard error of the mean for given + values, ignoring values outside the given `limits`. + + Parameters + ---------- + a : array_like + array of values + limits : None or (lower limit, upper limit), optional + Values in the input array less than the lower limit or greater than the + upper limit will be ignored. When limits is None, then all values are + used. Either of the limit values in the tuple can also be None + representing a half-open interval. The default value is None. + inclusive : (bool, bool), optional + A tuple consisting of the (lower flag, upper flag). These flags + determine whether values exactly equal to the lower or upper limits + are included. The default value is (True, True). + axis : int or None, optional + Axis along which to operate. Default is 0. If None, compute over the + whole array `a`. + ddof : int, optional + Delta degrees of freedom. Default is 1. + + Returns + ------- + tsem : float + + Notes + ----- + `tsem` uses unbiased sample standard deviation, i.e. it uses a + correction factor ``n / (n - 1)``. + + Examples + -------- + >>> from scipy import stats + >>> x = np.arange(20) + >>> stats.tsem(x) + 1.3228756555322954 + >>> stats.tsem(x, (3,17)) + 1.1547005383792515 + + """ + a = np.asarray(a).ravel() + if limits is None: + return a.std(ddof=ddof) / np.sqrt(a.size) + + am = _mask_to_limits(a, limits, inclusive) + sd = np.sqrt(np.ma.var(am, ddof=ddof, axis=axis)) + return sd / np.sqrt(am.count()) + + +##################################### +# MOMENTS # +##################################### + +def moment(a, moment=1, axis=0, nan_policy='propagate'): + r""" + Calculate the nth moment about the mean for a sample. + + A moment is a specific quantitative measure of the shape of a set of + points. It is often used to calculate coefficients of skewness and kurtosis + due to its close relationship with them. + + + Parameters + ---------- + a : array_like + data + moment : int or array_like of ints, optional + order of central moment that is returned. Default is 1. + axis : int or None, optional + Axis along which the central moment is computed. Default is 0. + If None, compute over the whole array `a`. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. 'propagate' returns nan, + 'raise' throws an error, 'omit' performs the calculations ignoring nan + values. Default is 'propagate'. + + Returns + ------- + n-th central moment : ndarray or float + The appropriate moment along the given axis or over all values if axis + is None. The denominator for the moment calculation is the number of + observations, no degrees of freedom correction is done. + + See also + -------- + kurtosis, skew, describe + + Notes + ----- + The k-th central moment of a data sample is: + + .. math:: + + m_k = \frac{1}{n} \sum_{i = 1}^n (x_i - \bar{x})^k + + Where n is the number of samples and x-bar is the mean. This function uses + exponentiation by squares [1]_ for efficiency. + + References + ---------- + .. [1] https://eli.thegreenplace.net/2009/03/21/efficient-integer-exponentiation-algorithms + + Examples + -------- + >>> from scipy.stats import moment + >>> moment([1, 2, 3, 4, 5], moment=1) + 0.0 + >>> moment([1, 2, 3, 4, 5], moment=2) + 2.0 + """ + a, axis = _chk_asarray(a, axis) + + contains_nan, nan_policy = _contains_nan(a, nan_policy) + + if contains_nan and nan_policy == 'omit': + a = ma.masked_invalid(a) + return mstats_basic.moment(a, moment, axis) + + if a.size == 0: + # empty array, return nan(s) with shape matching `moment` + if np.isscalar(moment): + return np.nan + else: + return np.full(np.asarray(moment).shape, np.nan, dtype=np.float64) + + # for array_like moment input, return a value for each. + if not np.isscalar(moment): + mmnt = [_moment(a, i, axis) for i in moment] + return np.array(mmnt) + else: + return _moment(a, moment, axis) + + +def _moment(a, moment, axis): + if np.abs(moment - np.round(moment)) > 0: + raise ValueError("All moment parameters must be integers") + + if moment == 0: + # When moment equals 0, the result is 1, by definition. + shape = list(a.shape) + del shape[axis] + if shape: + # return an actual array of the appropriate shape + return np.ones(shape, dtype=float) + else: + # the input was 1D, so return a scalar instead of a rank-0 array + return 1.0 + + elif moment == 1: + # By definition the first moment about the mean is 0. + shape = list(a.shape) + del shape[axis] + if shape: + # return an actual array of the appropriate shape + return np.zeros(shape, dtype=float) + else: + # the input was 1D, so return a scalar instead of a rank-0 array + return np.float64(0.0) + else: + # Exponentiation by squares: form exponent sequence + n_list = [moment] + current_n = moment + while current_n > 2: + if current_n % 2: + current_n = (current_n - 1) / 2 + else: + current_n /= 2 + n_list.append(current_n) + + # Starting point for exponentiation by squares + a_zero_mean = a - np.expand_dims(np.mean(a, axis), axis) + if n_list[-1] == 1: + s = a_zero_mean.copy() + else: + s = a_zero_mean**2 + + # Perform multiplications + for n in n_list[-2::-1]: + s = s**2 + if n % 2: + s *= a_zero_mean + return np.mean(s, axis) + + +def variation(a, axis=0, nan_policy='propagate'): + """ + Compute the coefficient of variation, the ratio of the biased standard + deviation to the mean. + + Parameters + ---------- + a : array_like + Input array. + axis : int or None, optional + Axis along which to calculate the coefficient of variation. Default + is 0. If None, compute over the whole array `a`. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. 'propagate' returns nan, + 'raise' throws an error, 'omit' performs the calculations ignoring nan + values. Default is 'propagate'. + + Returns + ------- + variation : ndarray + The calculated variation along the requested axis. + + References + ---------- + .. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard + Probability and Statistics Tables and Formulae. Chapman & Hall: New + York. 2000. + + Examples + -------- + >>> from scipy.stats import variation + >>> variation([1, 2, 3, 4, 5]) + 0.47140452079103173 + """ + a, axis = _chk_asarray(a, axis) + + contains_nan, nan_policy = _contains_nan(a, nan_policy) + + if contains_nan and nan_policy == 'omit': + a = ma.masked_invalid(a) + return mstats_basic.variation(a, axis) + + return a.std(axis) / a.mean(axis) + + +def skew(a, axis=0, bias=True, nan_policy='propagate'): + """ + Compute the skewness of a data set. + + For normally distributed data, the skewness should be about 0. For + unimodal continuous distributions, a skewness value > 0 means that + there is more weight in the right tail of the distribution. The + function `skewtest` can be used to determine if the skewness value + is close enough to 0, statistically speaking. + + Parameters + ---------- + a : ndarray + data + axis : int or None, optional + Axis along which skewness is calculated. Default is 0. + If None, compute over the whole array `a`. + bias : bool, optional + If False, then the calculations are corrected for statistical bias. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. 'propagate' returns nan, + 'raise' throws an error, 'omit' performs the calculations ignoring nan + values. Default is 'propagate'. + + Returns + ------- + skewness : ndarray + The skewness of values along an axis, returning 0 where all values are + equal. + + References + ---------- + + .. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard + Probability and Statistics Tables and Formulae. Chapman & Hall: New + York. 2000. + Section 2.2.24.1 + + Examples + -------- + >>> from scipy.stats import skew + >>> skew([1, 2, 3, 4, 5]) + 0.0 + >>> skew([2, 8, 0, 4, 1, 9, 9, 0]) + 0.2650554122698573 + """ + a, axis = _chk_asarray(a, axis) + n = a.shape[axis] + + contains_nan, nan_policy = _contains_nan(a, nan_policy) + + if contains_nan and nan_policy == 'omit': + a = ma.masked_invalid(a) + return mstats_basic.skew(a, axis, bias) + + m2 = moment(a, 2, axis) + m3 = moment(a, 3, axis) + zero = (m2 == 0) + vals = _lazywhere(~zero, (m2, m3), + lambda m2, m3: m3 / m2**1.5, + 0.) + if not bias: + can_correct = (n > 2) & (m2 > 0) + if can_correct.any(): + m2 = np.extract(can_correct, m2) + m3 = np.extract(can_correct, m3) + nval = np.sqrt((n - 1.0) * n) / (n - 2.0) * m3 / m2**1.5 + np.place(vals, can_correct, nval) + + if vals.ndim == 0: + return vals.item() + + return vals + + +def kurtosis(a, axis=0, fisher=True, bias=True, nan_policy='propagate'): + """ + Compute the kurtosis (Fisher or Pearson) of a dataset. + + Kurtosis is the fourth central moment divided by the square of the + variance. If Fisher's definition is used, then 3.0 is subtracted from + the result to give 0.0 for a normal distribution. + + If bias is False then the kurtosis is calculated using k statistics to + eliminate bias coming from biased moment estimators + + Use `kurtosistest` to see if result is close enough to normal. + + Parameters + ---------- + a : array + data for which the kurtosis is calculated + axis : int or None, optional + Axis along which the kurtosis is calculated. Default is 0. + If None, compute over the whole array `a`. + fisher : bool, optional + If True, Fisher's definition is used (normal ==> 0.0). If False, + Pearson's definition is used (normal ==> 3.0). + bias : bool, optional + If False, then the calculations are corrected for statistical bias. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. 'propagate' returns nan, + 'raise' throws an error, 'omit' performs the calculations ignoring nan + values. Default is 'propagate'. + + Returns + ------- + kurtosis : array + The kurtosis of values along an axis. If all values are equal, + return -3 for Fisher's definition and 0 for Pearson's definition. + + References + ---------- + .. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard + Probability and Statistics Tables and Formulae. Chapman & Hall: New + York. 2000. + + Examples + -------- + >>> from scipy.stats import kurtosis + >>> kurtosis([1, 2, 3, 4, 5]) + -1.3 + """ + a, axis = _chk_asarray(a, axis) + + contains_nan, nan_policy = _contains_nan(a, nan_policy) + + if contains_nan and nan_policy == 'omit': + a = ma.masked_invalid(a) + return mstats_basic.kurtosis(a, axis, fisher, bias) + + n = a.shape[axis] + m2 = moment(a, 2, axis) + m4 = moment(a, 4, axis) + zero = (m2 == 0) + olderr = np.seterr(all='ignore') + try: + vals = np.where(zero, 0, m4 / m2**2.0) + finally: + np.seterr(**olderr) + + if not bias: + can_correct = (n > 3) & (m2 > 0) + if can_correct.any(): + m2 = np.extract(can_correct, m2) + m4 = np.extract(can_correct, m4) + nval = 1.0/(n-2)/(n-3) * ((n**2-1.0)*m4/m2**2.0 - 3*(n-1)**2.0) + np.place(vals, can_correct, nval + 3.0) + + if vals.ndim == 0: + vals = vals.item() # array scalar + + return vals - 3 if fisher else vals + + +DescribeResult = namedtuple('DescribeResult', + ('nobs', 'minmax', 'mean', 'variance', 'skewness', + 'kurtosis')) + + +def describe(a, axis=0, ddof=1, bias=True, nan_policy='propagate'): + """ + Compute several descriptive statistics of the passed array. + + Parameters + ---------- + a : array_like + Input data. + axis : int or None, optional + Axis along which statistics are calculated. Default is 0. + If None, compute over the whole array `a`. + ddof : int, optional + Delta degrees of freedom (only for variance). Default is 1. + bias : bool, optional + If False, then the skewness and kurtosis calculations are corrected for + statistical bias. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. 'propagate' returns nan, + 'raise' throws an error, 'omit' performs the calculations ignoring nan + values. Default is 'propagate'. + + Returns + ------- + nobs : int or ndarray of ints + Number of observations (length of data along `axis`). + When 'omit' is chosen as nan_policy, each column is counted separately. + minmax: tuple of ndarrays or floats + Minimum and maximum value of data array. + mean : ndarray or float + Arithmetic mean of data along axis. + variance : ndarray or float + Unbiased variance of the data along axis, denominator is number of + observations minus one. + skewness : ndarray or float + Skewness, based on moment calculations with denominator equal to + the number of observations, i.e. no degrees of freedom correction. + kurtosis : ndarray or float + Kurtosis (Fisher). The kurtosis is normalized so that it is + zero for the normal distribution. No degrees of freedom are used. + + See Also + -------- + skew, kurtosis + + Examples + -------- + >>> from scipy import stats + >>> a = np.arange(10) + >>> stats.describe(a) + DescribeResult(nobs=10, minmax=(0, 9), mean=4.5, variance=9.166666666666666, + skewness=0.0, kurtosis=-1.2242424242424244) + >>> b = [[1, 2], [3, 4]] + >>> stats.describe(b) + DescribeResult(nobs=2, minmax=(array([1, 2]), array([3, 4])), + mean=array([2., 3.]), variance=array([2., 2.]), + skewness=array([0., 0.]), kurtosis=array([-2., -2.])) + + """ + a, axis = _chk_asarray(a, axis) + + contains_nan, nan_policy = _contains_nan(a, nan_policy) + + if contains_nan and nan_policy == 'omit': + a = ma.masked_invalid(a) + return mstats_basic.describe(a, axis, ddof, bias) + + if a.size == 0: + raise ValueError("The input must not be empty.") + n = a.shape[axis] + mm = (np.min(a, axis=axis), np.max(a, axis=axis)) + m = np.mean(a, axis=axis) + v = np.var(a, axis=axis, ddof=ddof) + sk = skew(a, axis, bias=bias) + kurt = kurtosis(a, axis, bias=bias) + + return DescribeResult(n, mm, m, v, sk, kurt) + +##################################### +# NORMALITY TESTS # +##################################### + + +SkewtestResult = namedtuple('SkewtestResult', ('statistic', 'pvalue')) + + +def skewtest(a, axis=0, nan_policy='propagate'): + """ + Test whether the skew is different from the normal distribution. + + This function tests the null hypothesis that the skewness of + the population that the sample was drawn from is the same + as that of a corresponding normal distribution. + + Parameters + ---------- + a : array + The data to be tested + axis : int or None, optional + Axis along which statistics are calculated. Default is 0. + If None, compute over the whole array `a`. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. 'propagate' returns nan, + 'raise' throws an error, 'omit' performs the calculations ignoring nan + values. Default is 'propagate'. + + Returns + ------- + statistic : float + The computed z-score for this test. + pvalue : float + a 2-sided p-value for the hypothesis test + + Notes + ----- + The sample size must be at least 8. + + References + ---------- + .. [1] R. B. D'Agostino, A. J. Belanger and R. B. D'Agostino Jr., + "A suggestion for using powerful and informative tests of + normality", American Statistician 44, pp. 316-321, 1990. + + Examples + -------- + >>> from scipy.stats import skewtest + >>> skewtest([1, 2, 3, 4, 5, 6, 7, 8]) + SkewtestResult(statistic=1.0108048609177787, pvalue=0.3121098361421897) + >>> skewtest([2, 8, 0, 4, 1, 9, 9, 0]) + SkewtestResult(statistic=0.44626385374196975, pvalue=0.6554066631275459) + >>> skewtest([1, 2, 3, 4, 5, 6, 7, 8000]) + SkewtestResult(statistic=3.571773510360407, pvalue=0.0003545719905823133) + >>> skewtest([100, 100, 100, 100, 100, 100, 100, 101]) + SkewtestResult(statistic=3.5717766638478072, pvalue=0.000354567720281634) + """ + a, axis = _chk_asarray(a, axis) + + contains_nan, nan_policy = _contains_nan(a, nan_policy) + + if contains_nan and nan_policy == 'omit': + a = ma.masked_invalid(a) + return mstats_basic.skewtest(a, axis) + + if axis is None: + a = np.ravel(a) + axis = 0 + b2 = skew(a, axis) + n = a.shape[axis] + if n < 8: + raise ValueError( + "skewtest is not valid with less than 8 samples; %i samples" + " were given." % int(n)) + y = b2 * math.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2))) + beta2 = (3.0 * (n**2 + 27*n - 70) * (n+1) * (n+3) / + ((n-2.0) * (n+5) * (n+7) * (n+9))) + W2 = -1 + math.sqrt(2 * (beta2 - 1)) + delta = 1 / math.sqrt(0.5 * math.log(W2)) + alpha = math.sqrt(2.0 / (W2 - 1)) + y = np.where(y == 0, 1, y) + Z = delta * np.log(y / alpha + np.sqrt((y / alpha)**2 + 1)) + + return SkewtestResult(Z, 2 * distributions.norm.sf(np.abs(Z))) + + +KurtosistestResult = namedtuple('KurtosistestResult', ('statistic', 'pvalue')) + + +def kurtosistest(a, axis=0, nan_policy='propagate'): + """ + Test whether a dataset has normal kurtosis. + + This function tests the null hypothesis that the kurtosis + of the population from which the sample was drawn is that + of the normal distribution: ``kurtosis = 3(n-1)/(n+1)``. + + Parameters + ---------- + a : array + array of the sample data + axis : int or None, optional + Axis along which to compute test. Default is 0. If None, + compute over the whole array `a`. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. 'propagate' returns nan, + 'raise' throws an error, 'omit' performs the calculations ignoring nan + values. Default is 'propagate'. + + Returns + ------- + statistic : float + The computed z-score for this test. + pvalue : float + The 2-sided p-value for the hypothesis test + + Notes + ----- + Valid only for n>20. This function uses the method described in [1]_. + + References + ---------- + .. [1] see e.g. F. J. Anscombe, W. J. Glynn, "Distribution of the kurtosis + statistic b2 for normal samples", Biometrika, vol. 70, pp. 227-234, 1983. + + Examples + -------- + >>> from scipy.stats import kurtosistest + >>> kurtosistest(list(range(20))) + KurtosistestResult(statistic=-1.7058104152122062, pvalue=0.08804338332528348) + + >>> np.random.seed(28041990) + >>> s = np.random.normal(0, 1, 1000) + >>> kurtosistest(s) + KurtosistestResult(statistic=1.2317590987707365, pvalue=0.21803908613450895) + """ + a, axis = _chk_asarray(a, axis) + + contains_nan, nan_policy = _contains_nan(a, nan_policy) + + if contains_nan and nan_policy == 'omit': + a = ma.masked_invalid(a) + return mstats_basic.kurtosistest(a, axis) + + n = a.shape[axis] + if n < 5: + raise ValueError( + "kurtosistest requires at least 5 observations; %i observations" + " were given." % int(n)) + if n < 20: + warnings.warn("kurtosistest only valid for n>=20 ... continuing " + "anyway, n=%i" % int(n)) + b2 = kurtosis(a, axis, fisher=False) + + E = 3.0*(n-1) / (n+1) + varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5)) # [1]_ Eq. 1 + x = (b2-E) / np.sqrt(varb2) # [1]_ Eq. 4 + # [1]_ Eq. 2: + sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) / + (n*(n-2)*(n-3))) + # [1]_ Eq. 3: + A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2))) + term1 = 1 - 2/(9.0*A) + denom = 1 + x*np.sqrt(2/(A-4.0)) + term2 = np.sign(denom) * np.where(denom == 0.0, np.nan, + np.power((1-2.0/A)/np.abs(denom), 1/3.0)) + if np.any(denom == 0): + msg = "Test statistic not defined in some cases due to division by " \ + "zero. Return nan in that case..." + warnings.warn(msg, RuntimeWarning) + + Z = (term1 - term2) / np.sqrt(2/(9.0*A)) # [1]_ Eq. 5 + if Z.ndim == 0: + Z = Z[()] + + # zprob uses upper tail, so Z needs to be positive + return KurtosistestResult(Z, 2 * distributions.norm.sf(np.abs(Z))) + + +NormaltestResult = namedtuple('NormaltestResult', ('statistic', 'pvalue')) + + +def normaltest(a, axis=0, nan_policy='propagate'): + """ + Test whether a sample differs from a normal distribution. + + This function tests the null hypothesis that a sample comes + from a normal distribution. It is based on D'Agostino and + Pearson's [1]_, [2]_ test that combines skew and kurtosis to + produce an omnibus test of normality. + + + Parameters + ---------- + a : array_like + The array containing the sample to be tested. + axis : int or None, optional + Axis along which to compute test. Default is 0. If None, + compute over the whole array `a`. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. 'propagate' returns nan, + 'raise' throws an error, 'omit' performs the calculations ignoring nan + values. Default is 'propagate'. + + Returns + ------- + statistic : float or array + ``s^2 + k^2``, where ``s`` is the z-score returned by `skewtest` and + ``k`` is the z-score returned by `kurtosistest`. + pvalue : float or array + A 2-sided chi squared probability for the hypothesis test. + + References + ---------- + .. [1] D'Agostino, R. B. (1971), "An omnibus test of normality for + moderate and large sample size", Biometrika, 58, 341-348 + + .. [2] D'Agostino, R. and Pearson, E. S. (1973), "Tests for departure from + normality", Biometrika, 60, 613-622 + + Examples + -------- + >>> from scipy import stats + >>> pts = 1000 + >>> np.random.seed(28041990) + >>> a = np.random.normal(0, 1, size=pts) + >>> b = np.random.normal(2, 1, size=pts) + >>> x = np.concatenate((a, b)) + >>> k2, p = stats.normaltest(x) + >>> alpha = 1e-3 + >>> print("p = {:g}".format(p)) + p = 3.27207e-11 + >>> if p < alpha: # null hypothesis: x comes from a normal distribution + ... print("The null hypothesis can be rejected") + ... else: + ... print("The null hypothesis cannot be rejected") + The null hypothesis can be rejected + """ + a, axis = _chk_asarray(a, axis) + + contains_nan, nan_policy = _contains_nan(a, nan_policy) + + if contains_nan and nan_policy == 'omit': + a = ma.masked_invalid(a) + return mstats_basic.normaltest(a, axis) + + s, _ = skewtest(a, axis) + k, _ = kurtosistest(a, axis) + k2 = s*s + k*k + + return NormaltestResult(k2, distributions.chi2.sf(k2, 2)) + + +def jarque_bera(x): + """ + Perform the Jarque-Bera goodness of fit test on sample data. + + The Jarque-Bera test tests whether the sample data has the skewness and + kurtosis matching a normal distribution. + + Note that this test only works for a large enough number of data samples + (>2000) as the test statistic asymptotically has a Chi-squared distribution + with 2 degrees of freedom. + + Parameters + ---------- + x : array_like + Observations of a random variable. + + Returns + ------- + jb_value : float + The test statistic. + p : float + The p-value for the hypothesis test. + + References + ---------- + .. [1] Jarque, C. and Bera, A. (1980) "Efficient tests for normality, + homoscedasticity and serial independence of regression residuals", + 6 Econometric Letters 255-259. + + Examples + -------- + >>> from scipy import stats + >>> np.random.seed(987654321) + >>> x = np.random.normal(0, 1, 100000) + >>> y = np.random.rayleigh(1, 100000) + >>> stats.jarque_bera(x) + (4.7165707989581342, 0.09458225503041906) + >>> stats.jarque_bera(y) + (6713.7098548143422, 0.0) + + """ + x = np.asarray(x) + n = x.size + if n == 0: + raise ValueError('At least one observation is required.') + + mu = x.mean() + diffx = x - mu + skewness = (1 / n * np.sum(diffx**3)) / (1 / n * np.sum(diffx**2))**(3 / 2.) + kurtosis = (1 / n * np.sum(diffx**4)) / (1 / n * np.sum(diffx**2))**2 + jb_value = n / 6 * (skewness**2 + (kurtosis - 3)**2 / 4) + p = 1 - distributions.chi2.cdf(jb_value, 2) + + return jb_value, p + + +##################################### +# FREQUENCY FUNCTIONS # +##################################### + +@np.deprecate(message="`itemfreq` is deprecated and will be removed in a " + "future version. Use instead `np.unique(..., return_counts=True)`") +def itemfreq(a): + """ + Return a 2-D array of item frequencies. + + Parameters + ---------- + a : (N,) array_like + Input array. + + Returns + ------- + itemfreq : (K, 2) ndarray + A 2-D frequency table. Column 1 contains sorted, unique values from + `a`, column 2 contains their respective counts. + + Examples + -------- + >>> from scipy import stats + >>> a = np.array([1, 1, 5, 0, 1, 2, 2, 0, 1, 4]) + >>> stats.itemfreq(a) + array([[ 0., 2.], + [ 1., 4.], + [ 2., 2.], + [ 4., 1.], + [ 5., 1.]]) + >>> np.bincount(a) + array([2, 4, 2, 0, 1, 1]) + + >>> stats.itemfreq(a/10.) + array([[ 0. , 2. ], + [ 0.1, 4. ], + [ 0.2, 2. ], + [ 0.4, 1. ], + [ 0.5, 1. ]]) + + """ + items, inv = np.unique(a, return_inverse=True) + freq = np.bincount(inv) + return np.array([items, freq]).T + + +def scoreatpercentile(a, per, limit=(), interpolation_method='fraction', + axis=None): + """ + Calculate the score at a given percentile of the input sequence. + + For example, the score at `per=50` is the median. If the desired quantile + lies between two data points, we interpolate between them, according to + the value of `interpolation`. If the parameter `limit` is provided, it + should be a tuple (lower, upper) of two values. + + Parameters + ---------- + a : array_like + A 1-D array of values from which to extract score. + per : array_like + Percentile(s) at which to extract score. Values should be in range + [0,100]. + limit : tuple, optional + Tuple of two scalars, the lower and upper limits within which to + compute the percentile. Values of `a` outside + this (closed) interval will be ignored. + interpolation_method : {'fraction', 'lower', 'higher'}, optional + This optional parameter specifies the interpolation method to use, + when the desired quantile lies between two data points `i` and `j` + + - fraction: ``i + (j - i) * fraction`` where ``fraction`` is the + fractional part of the index surrounded by ``i`` and ``j``. + - lower: ``i``. + - higher: ``j``. + + axis : int, optional + Axis along which the percentiles are computed. Default is None. If + None, compute over the whole array `a`. + + Returns + ------- + score : float or ndarray + Score at percentile(s). + + See Also + -------- + percentileofscore, numpy.percentile + + Notes + ----- + This function will become obsolete in the future. + For Numpy 1.9 and higher, `numpy.percentile` provides all the functionality + that `scoreatpercentile` provides. And it's significantly faster. + Therefore it's recommended to use `numpy.percentile` for users that have + numpy >= 1.9. + + Examples + -------- + >>> from scipy import stats + >>> a = np.arange(100) + >>> stats.scoreatpercentile(a, 50) + 49.5 + + """ + # adapted from NumPy's percentile function. When we require numpy >= 1.8, + # the implementation of this function can be replaced by np.percentile. + a = np.asarray(a) + if a.size == 0: + # empty array, return nan(s) with shape matching `per` + if np.isscalar(per): + return np.nan + else: + return np.full(np.asarray(per).shape, np.nan, dtype=np.float64) + + if limit: + a = a[(limit[0] <= a) & (a <= limit[1])] + + sorted_ = np.sort(a, axis=axis) + if axis is None: + axis = 0 + + return _compute_qth_percentile(sorted_, per, interpolation_method, axis) + + +# handle sequence of per's without calling sort multiple times +def _compute_qth_percentile(sorted_, per, interpolation_method, axis): + if not np.isscalar(per): + score = [_compute_qth_percentile(sorted_, i, + interpolation_method, axis) + for i in per] + return np.array(score) + + if not (0 <= per <= 100): + raise ValueError("percentile must be in the range [0, 100]") + + indexer = [slice(None)] * sorted_.ndim + idx = per / 100. * (sorted_.shape[axis] - 1) + + if int(idx) != idx: + # round fractional indices according to interpolation method + if interpolation_method == 'lower': + idx = int(np.floor(idx)) + elif interpolation_method == 'higher': + idx = int(np.ceil(idx)) + elif interpolation_method == 'fraction': + pass # keep idx as fraction and interpolate + else: + raise ValueError("interpolation_method can only be 'fraction', " + "'lower' or 'higher'") + + i = int(idx) + if i == idx: + indexer[axis] = slice(i, i + 1) + weights = array(1) + sumval = 1.0 + else: + indexer[axis] = slice(i, i + 2) + j = i + 1 + weights = array([(j - idx), (idx - i)], float) + wshape = [1] * sorted_.ndim + wshape[axis] = 2 + weights.shape = wshape + sumval = weights.sum() + + # Use np.add.reduce (== np.sum but a little faster) to coerce data type + return np.add.reduce(sorted_[tuple(indexer)] * weights, axis=axis) / sumval + + +def percentileofscore(a, score, kind='rank'): + """ + The percentile rank of a score relative to a list of scores. + + A `percentileofscore` of, for example, 80% means that 80% of the + scores in `a` are below the given score. In the case of gaps or + ties, the exact definition depends on the optional keyword, `kind`. + + Parameters + ---------- + a : array_like + Array of scores to which `score` is compared. + score : int or float + Score that is compared to the elements in `a`. + kind : {'rank', 'weak', 'strict', 'mean'}, optional + This optional parameter specifies the interpretation of the + resulting score: + + - "rank": Average percentage ranking of score. In case of + multiple matches, average the percentage rankings of + all matching scores. + - "weak": This kind corresponds to the definition of a cumulative + distribution function. A percentileofscore of 80% + means that 80% of values are less than or equal + to the provided score. + - "strict": Similar to "weak", except that only values that are + strictly less than the given score are counted. + - "mean": The average of the "weak" and "strict" scores, often used in + testing. See + + https://en.wikipedia.org/wiki/Percentile_rank + + Returns + ------- + pcos : float + Percentile-position of score (0-100) relative to `a`. + + See Also + -------- + numpy.percentile + + Examples + -------- + Three-quarters of the given values lie below a given score: + + >>> from scipy import stats + >>> stats.percentileofscore([1, 2, 3, 4], 3) + 75.0 + + With multiple matches, note how the scores of the two matches, 0.6 + and 0.8 respectively, are averaged: + + >>> stats.percentileofscore([1, 2, 3, 3, 4], 3) + 70.0 + + Only 2/5 values are strictly less than 3: + + >>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='strict') + 40.0 + + But 4/5 values are less than or equal to 3: + + >>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='weak') + 80.0 + + The average between the weak and the strict scores is + + >>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='mean') + 60.0 + + """ + if np.isnan(score): + return np.nan + a = np.asarray(a) + n = len(a) + if n == 0: + return 100.0 + + if kind == 'rank': + left = np.count_nonzero(a < score) + right = np.count_nonzero(a <= score) + pct = (right + left + (1 if right > left else 0)) * 50.0/n + return pct + elif kind == 'strict': + return np.count_nonzero(a < score) / n * 100 + elif kind == 'weak': + return np.count_nonzero(a <= score) / n * 100 + elif kind == 'mean': + pct = (np.count_nonzero(a < score) + np.count_nonzero(a <= score)) / n * 50 + return pct + else: + raise ValueError("kind can only be 'rank', 'strict', 'weak' or 'mean'") + + +HistogramResult = namedtuple('HistogramResult', + ('count', 'lowerlimit', 'binsize', 'extrapoints')) + + +def _histogram(a, numbins=10, defaultlimits=None, weights=None, printextras=False): + """ + Separate the range into several bins and return the number of instances + in each bin. + + Parameters + ---------- + a : array_like + Array of scores which will be put into bins. + numbins : int, optional + The number of bins to use for the histogram. Default is 10. + defaultlimits : tuple (lower, upper), optional + The lower and upper values for the range of the histogram. + If no value is given, a range slightly larger than the range of the + values in a is used. Specifically ``(a.min() - s, a.max() + s)``, + where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``. + weights : array_like, optional + The weights for each value in `a`. Default is None, which gives each + value a weight of 1.0 + printextras : bool, optional + If True, if there are extra points (i.e. the points that fall outside + the bin limits) a warning is raised saying how many of those points + there are. Default is False. + + Returns + ------- + count : ndarray + Number of points (or sum of weights) in each bin. + lowerlimit : float + Lowest value of histogram, the lower limit of the first bin. + binsize : float + The size of the bins (all bins have the same size). + extrapoints : int + The number of points outside the range of the histogram. + + See Also + -------- + numpy.histogram + + Notes + ----- + This histogram is based on numpy's histogram but has a larger range by + default if default limits is not set. + + """ + a = np.ravel(a) + if defaultlimits is None: + if a.size == 0: + # handle empty arrays. Undetermined range, so use 0-1. + defaultlimits = (0, 1) + else: + # no range given, so use values in `a` + data_min = a.min() + data_max = a.max() + # Have bins extend past min and max values slightly + s = (data_max - data_min) / (2. * (numbins - 1.)) + defaultlimits = (data_min - s, data_max + s) + + # use numpy's histogram method to compute bins + hist, bin_edges = np.histogram(a, bins=numbins, range=defaultlimits, + weights=weights) + # hist are not always floats, convert to keep with old output + hist = np.array(hist, dtype=float) + # fixed width for bins is assumed, as numpy's histogram gives + # fixed width bins for int values for 'bins' + binsize = bin_edges[1] - bin_edges[0] + # calculate number of extra points + extrapoints = len([v for v in a + if defaultlimits[0] > v or v > defaultlimits[1]]) + if extrapoints > 0 and printextras: + warnings.warn("Points outside given histogram range = %s" + % extrapoints) + + return HistogramResult(hist, defaultlimits[0], binsize, extrapoints) + + +CumfreqResult = namedtuple('CumfreqResult', + ('cumcount', 'lowerlimit', 'binsize', + 'extrapoints')) + + +def cumfreq(a, numbins=10, defaultreallimits=None, weights=None): + """ + Return a cumulative frequency histogram, using the histogram function. + + A cumulative histogram is a mapping that counts the cumulative number of + observations in all of the bins up to the specified bin. + + Parameters + ---------- + a : array_like + Input array. + numbins : int, optional + The number of bins to use for the histogram. Default is 10. + defaultreallimits : tuple (lower, upper), optional + The lower and upper values for the range of the histogram. + If no value is given, a range slightly larger than the range of the + values in `a` is used. Specifically ``(a.min() - s, a.max() + s)``, + where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``. + weights : array_like, optional + The weights for each value in `a`. Default is None, which gives each + value a weight of 1.0 + + Returns + ------- + cumcount : ndarray + Binned values of cumulative frequency. + lowerlimit : float + Lower real limit + binsize : float + Width of each bin. + extrapoints : int + Extra points. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> from scipy import stats + >>> x = [1, 4, 2, 1, 3, 1] + >>> res = stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5)) + >>> res.cumcount + array([ 1., 2., 3., 3.]) + >>> res.extrapoints + 3 + + Create a normal distribution with 1000 random values + + >>> rng = np.random.RandomState(seed=12345) + >>> samples = stats.norm.rvs(size=1000, random_state=rng) + + Calculate cumulative frequencies + + >>> res = stats.cumfreq(samples, numbins=25) + + Calculate space of values for x + + >>> x = res.lowerlimit + np.linspace(0, res.binsize*res.cumcount.size, + ... res.cumcount.size) + + Plot histogram and cumulative histogram + + >>> fig = plt.figure(figsize=(10, 4)) + >>> ax1 = fig.add_subplot(1, 2, 1) + >>> ax2 = fig.add_subplot(1, 2, 2) + >>> ax1.hist(samples, bins=25) + >>> ax1.set_title('Histogram') + >>> ax2.bar(x, res.cumcount, width=res.binsize) + >>> ax2.set_title('Cumulative histogram') + >>> ax2.set_xlim([x.min(), x.max()]) + + >>> plt.show() + + """ + h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights) + cumhist = np.cumsum(h * 1, axis=0) + return CumfreqResult(cumhist, l, b, e) + + +RelfreqResult = namedtuple('RelfreqResult', + ('frequency', 'lowerlimit', 'binsize', + 'extrapoints')) + + +def relfreq(a, numbins=10, defaultreallimits=None, weights=None): + """ + Return a relative frequency histogram, using the histogram function. + + A relative frequency histogram is a mapping of the number of + observations in each of the bins relative to the total of observations. + + Parameters + ---------- + a : array_like + Input array. + numbins : int, optional + The number of bins to use for the histogram. Default is 10. + defaultreallimits : tuple (lower, upper), optional + The lower and upper values for the range of the histogram. + If no value is given, a range slightly larger than the range of the + values in a is used. Specifically ``(a.min() - s, a.max() + s)``, + where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``. + weights : array_like, optional + The weights for each value in `a`. Default is None, which gives each + value a weight of 1.0 + + Returns + ------- + frequency : ndarray + Binned values of relative frequency. + lowerlimit : float + Lower real limit + binsize : float + Width of each bin. + extrapoints : int + Extra points. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> from scipy import stats + >>> a = np.array([2, 4, 1, 2, 3, 2]) + >>> res = stats.relfreq(a, numbins=4) + >>> res.frequency + array([ 0.16666667, 0.5 , 0.16666667, 0.16666667]) + >>> np.sum(res.frequency) # relative frequencies should add up to 1 + 1.0 + + Create a normal distribution with 1000 random values + + >>> rng = np.random.RandomState(seed=12345) + >>> samples = stats.norm.rvs(size=1000, random_state=rng) + + Calculate relative frequencies + + >>> res = stats.relfreq(samples, numbins=25) + + Calculate space of values for x + + >>> x = res.lowerlimit + np.linspace(0, res.binsize*res.frequency.size, + ... res.frequency.size) + + Plot relative frequency histogram + + >>> fig = plt.figure(figsize=(5, 4)) + >>> ax = fig.add_subplot(1, 1, 1) + >>> ax.bar(x, res.frequency, width=res.binsize) + >>> ax.set_title('Relative frequency histogram') + >>> ax.set_xlim([x.min(), x.max()]) + + >>> plt.show() + + """ + a = np.asanyarray(a) + h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights) + h = h / a.shape[0] + + return RelfreqResult(h, l, b, e) + + +##################################### +# VARIABILITY FUNCTIONS # +##################################### + +def obrientransform(*args): + """ + Compute the O'Brien transform on input data (any number of arrays). + + Used to test for homogeneity of variance prior to running one-way stats. + Each array in ``*args`` is one level of a factor. + If `f_oneway` is run on the transformed data and found significant, + the variances are unequal. From Maxwell and Delaney [1]_, p.112. + + Parameters + ---------- + args : tuple of array_like + Any number of arrays. + + Returns + ------- + obrientransform : ndarray + Transformed data for use in an ANOVA. The first dimension + of the result corresponds to the sequence of transformed + arrays. If the arrays given are all 1-D of the same length, + the return value is a 2-D array; otherwise it is a 1-D array + of type object, with each element being an ndarray. + + References + ---------- + .. [1] S. E. Maxwell and H. D. Delaney, "Designing Experiments and + Analyzing Data: A Model Comparison Perspective", Wadsworth, 1990. + + Examples + -------- + We'll test the following data sets for differences in their variance. + + >>> x = [10, 11, 13, 9, 7, 12, 12, 9, 10] + >>> y = [13, 21, 5, 10, 8, 14, 10, 12, 7, 15] + + Apply the O'Brien transform to the data. + + >>> from scipy.stats import obrientransform + >>> tx, ty = obrientransform(x, y) + + Use `scipy.stats.f_oneway` to apply a one-way ANOVA test to the + transformed data. + + >>> from scipy.stats import f_oneway + >>> F, p = f_oneway(tx, ty) + >>> p + 0.1314139477040335 + + If we require that ``p < 0.05`` for significance, we cannot conclude + that the variances are different. + """ + TINY = np.sqrt(np.finfo(float).eps) + + # `arrays` will hold the transformed arguments. + arrays = [] + + for arg in args: + a = np.asarray(arg) + n = len(a) + mu = np.mean(a) + sq = (a - mu)**2 + sumsq = sq.sum() + + # The O'Brien transform. + t = ((n - 1.5) * n * sq - 0.5 * sumsq) / ((n - 1) * (n - 2)) + + # Check that the mean of the transformed data is equal to the + # original variance. + var = sumsq / (n - 1) + if abs(var - np.mean(t)) > TINY: + raise ValueError('Lack of convergence in obrientransform.') + + arrays.append(t) + + return np.array(arrays) + + +def sem(a, axis=0, ddof=1, nan_policy='propagate'): + """ + Calculate the standard error of the mean (or standard error of + measurement) of the values in the input array. + + Parameters + ---------- + a : array_like + An array containing the values for which the standard error is + returned. + axis : int or None, optional + Axis along which to operate. Default is 0. If None, compute over + the whole array `a`. + ddof : int, optional + Delta degrees-of-freedom. How many degrees of freedom to adjust + for bias in limited samples relative to the population estimate + of variance. Defaults to 1. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. 'propagate' returns nan, + 'raise' throws an error, 'omit' performs the calculations ignoring nan + values. Default is 'propagate'. + + Returns + ------- + s : ndarray or float + The standard error of the mean in the sample(s), along the input axis. + + Notes + ----- + The default value for `ddof` is different to the default (0) used by other + ddof containing routines, such as np.std and np.nanstd. + + Examples + -------- + Find standard error along the first axis: + + >>> from scipy import stats + >>> a = np.arange(20).reshape(5,4) + >>> stats.sem(a) + array([ 2.8284, 2.8284, 2.8284, 2.8284]) + + Find standard error across the whole array, using n degrees of freedom: + + >>> stats.sem(a, axis=None, ddof=0) + 1.2893796958227628 + + """ + a, axis = _chk_asarray(a, axis) + + contains_nan, nan_policy = _contains_nan(a, nan_policy) + + if contains_nan and nan_policy == 'omit': + a = ma.masked_invalid(a) + return mstats_basic.sem(a, axis, ddof) + + n = a.shape[axis] + s = np.std(a, axis=axis, ddof=ddof) / np.sqrt(n) + return s + + +def zscore(a, axis=0, ddof=0): + """ + Calculate the z score of each value in the sample, relative to the + sample mean and standard deviation. + + Parameters + ---------- + a : array_like + An array like object containing the sample data. + axis : int or None, optional + Axis along which to operate. Default is 0. If None, compute over + the whole array `a`. + ddof : int, optional + Degrees of freedom correction in the calculation of the + standard deviation. Default is 0. + + Returns + ------- + zscore : array_like + The z-scores, standardized by mean and standard deviation of + input array `a`. + + Notes + ----- + This function preserves ndarray subclasses, and works also with + matrices and masked arrays (it uses `asanyarray` instead of + `asarray` for parameters). + + Examples + -------- + >>> a = np.array([ 0.7972, 0.0767, 0.4383, 0.7866, 0.8091, + ... 0.1954, 0.6307, 0.6599, 0.1065, 0.0508]) + >>> from scipy import stats + >>> stats.zscore(a) + array([ 1.1273, -1.247 , -0.0552, 1.0923, 1.1664, -0.8559, 0.5786, + 0.6748, -1.1488, -1.3324]) + + Computing along a specified axis, using n-1 degrees of freedom + (``ddof=1``) to calculate the standard deviation: + + >>> b = np.array([[ 0.3148, 0.0478, 0.6243, 0.4608], + ... [ 0.7149, 0.0775, 0.6072, 0.9656], + ... [ 0.6341, 0.1403, 0.9759, 0.4064], + ... [ 0.5918, 0.6948, 0.904 , 0.3721], + ... [ 0.0921, 0.2481, 0.1188, 0.1366]]) + >>> stats.zscore(b, axis=1, ddof=1) + array([[-0.19264823, -1.28415119, 1.07259584, 0.40420358], + [ 0.33048416, -1.37380874, 0.04251374, 1.00081084], + [ 0.26796377, -1.12598418, 1.23283094, -0.37481053], + [-0.22095197, 0.24468594, 1.19042819, -1.21416216], + [-0.82780366, 1.4457416 , -0.43867764, -0.1792603 ]]) + """ + a = np.asanyarray(a) + mns = a.mean(axis=axis) + sstd = a.std(axis=axis, ddof=ddof) + if axis and mns.ndim < a.ndim: + return ((a - np.expand_dims(mns, axis=axis)) / + np.expand_dims(sstd, axis=axis)) + else: + return (a - mns) / sstd + + +def zmap(scores, compare, axis=0, ddof=0): + """ + Calculate the relative z-scores. + + Return an array of z-scores, i.e., scores that are standardized to + zero mean and unit variance, where mean and variance are calculated + from the comparison array. + + Parameters + ---------- + scores : array_like + The input for which z-scores are calculated. + compare : array_like + The input from which the mean and standard deviation of the + normalization are taken; assumed to have the same dimension as + `scores`. + axis : int or None, optional + Axis over which mean and variance of `compare` are calculated. + Default is 0. If None, compute over the whole array `scores`. + ddof : int, optional + Degrees of freedom correction in the calculation of the + standard deviation. Default is 0. + + Returns + ------- + zscore : array_like + Z-scores, in the same shape as `scores`. + + Notes + ----- + This function preserves ndarray subclasses, and works also with + matrices and masked arrays (it uses `asanyarray` instead of + `asarray` for parameters). + + Examples + -------- + >>> from scipy.stats import zmap + >>> a = [0.5, 2.0, 2.5, 3] + >>> b = [0, 1, 2, 3, 4] + >>> zmap(a, b) + array([-1.06066017, 0. , 0.35355339, 0.70710678]) + """ + scores, compare = map(np.asanyarray, [scores, compare]) + mns = compare.mean(axis=axis) + sstd = compare.std(axis=axis, ddof=ddof) + if axis and mns.ndim < compare.ndim: + return ((scores - np.expand_dims(mns, axis=axis)) / + np.expand_dims(sstd, axis=axis)) + else: + return (scores - mns) / sstd + + +# Private dictionary initialized only once at module level +# See https://en.wikipedia.org/wiki/Robust_measures_of_scale +_scale_conversions = {'raw': 1.0, + 'normal': special.erfinv(0.5) * 2.0 * math.sqrt(2.0)} + + +def iqr(x, axis=None, rng=(25, 75), scale='raw', nan_policy='propagate', + interpolation='linear', keepdims=False): + """ + Compute the interquartile range of the data along the specified axis. + + The interquartile range (IQR) is the difference between the 75th and + 25th percentile of the data. It is a measure of the dispersion + similar to standard deviation or variance, but is much more robust + against outliers [2]_. + + The ``rng`` parameter allows this function to compute other + percentile ranges than the actual IQR. For example, setting + ``rng=(0, 100)`` is equivalent to `numpy.ptp`. + + The IQR of an empty array is `np.nan`. + + .. versionadded:: 0.18.0 + + Parameters + ---------- + x : array_like + Input array or object that can be converted to an array. + axis : int or sequence of int, optional + Axis along which the range is computed. The default is to + compute the IQR for the entire array. + rng : Two-element sequence containing floats in range of [0,100] optional + Percentiles over which to compute the range. Each must be + between 0 and 100, inclusive. The default is the true IQR: + `(25, 75)`. The order of the elements is not important. + scale : scalar or str, optional + The numerical value of scale will be divided out of the final + result. The following string values are recognized: + + 'raw' : No scaling, just return the raw IQR. + 'normal' : Scale by :math:`2 \\sqrt{2} erf^{-1}(\\frac{1}{2}) \\approx 1.349`. + + The default is 'raw'. Array-like scale is also allowed, as long + as it broadcasts correctly to the output such that + ``out / scale`` is a valid operation. The output dimensions + depend on the input array, `x`, the `axis` argument, and the + `keepdims` flag. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. 'propagate' + returns nan, 'raise' throws an error, 'omit' performs the + calculations ignoring nan values. Default is 'propagate'. + interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}, optional + Specifies the interpolation method to use when the percentile + boundaries lie between two data points `i` and `j`: + + * 'linear' : `i + (j - i) * fraction`, where `fraction` is the + fractional part of the index surrounded by `i` and `j`. + * 'lower' : `i`. + * 'higher' : `j`. + * 'nearest' : `i` or `j` whichever is nearest. + * 'midpoint' : `(i + j) / 2`. + + Default is 'linear'. + keepdims : bool, optional + If this is set to `True`, the reduced axes are left in the + result as dimensions with size one. With this option, the result + will broadcast correctly against the original array `x`. + + Returns + ------- + iqr : scalar or ndarray + If ``axis=None``, a scalar is returned. If the input contains + integers or floats of smaller precision than ``np.float64``, then the + output data-type is ``np.float64``. Otherwise, the output data-type is + the same as that of the input. + + See Also + -------- + numpy.std, numpy.var + + Examples + -------- + >>> from scipy.stats import iqr + >>> x = np.array([[10, 7, 4], [3, 2, 1]]) + >>> x + array([[10, 7, 4], + [ 3, 2, 1]]) + >>> iqr(x) + 4.0 + >>> iqr(x, axis=0) + array([ 3.5, 2.5, 1.5]) + >>> iqr(x, axis=1) + array([ 3., 1.]) + >>> iqr(x, axis=1, keepdims=True) + array([[ 3.], + [ 1.]]) + + Notes + ----- + This function is heavily dependent on the version of `numpy` that is + installed. Versions greater than 1.11.0b3 are highly recommended, as they + include a number of enhancements and fixes to `numpy.percentile` and + `numpy.nanpercentile` that affect the operation of this function. The + following modifications apply: + + Below 1.10.0 : `nan_policy` is poorly defined. + The default behavior of `numpy.percentile` is used for 'propagate'. This + is a hybrid of 'omit' and 'propagate' that mostly yields a skewed + version of 'omit' since NaNs are sorted to the end of the data. A + warning is raised if there are NaNs in the data. + Below 1.9.0: `numpy.nanpercentile` does not exist. + This means that `numpy.percentile` is used regardless of `nan_policy` + and a warning is issued. See previous item for a description of the + behavior. + Below 1.9.0: `keepdims` and `interpolation` are not supported. + The keywords get ignored with a warning if supplied with non-default + values. However, multiple axes are still supported. + + References + ---------- + .. [1] "Interquartile range" https://en.wikipedia.org/wiki/Interquartile_range + .. [2] "Robust measures of scale" https://en.wikipedia.org/wiki/Robust_measures_of_scale + .. [3] "Quantile" https://en.wikipedia.org/wiki/Quantile + """ + x = asarray(x) + + # This check prevents percentile from raising an error later. Also, it is + # consistent with `np.var` and `np.std`. + if not x.size: + return np.nan + + # An error may be raised here, so fail-fast, before doing lengthy + # computations, even though `scale` is not used until later + if isinstance(scale, string_types): + scale_key = scale.lower() + if scale_key not in _scale_conversions: + raise ValueError("{0} not a valid scale for `iqr`".format(scale)) + scale = _scale_conversions[scale_key] + + # Select the percentile function to use based on nans and policy + contains_nan, nan_policy = _contains_nan(x, nan_policy) + + if contains_nan and nan_policy == 'omit': + percentile_func = _iqr_nanpercentile + else: + percentile_func = _iqr_percentile + + if len(rng) != 2: + raise TypeError("quantile range must be two element sequence") + + rng = sorted(rng) + pct = percentile_func(x, rng, axis=axis, interpolation=interpolation, + keepdims=keepdims, contains_nan=contains_nan) + out = np.subtract(pct[1], pct[0]) + + if scale != 1.0: + out /= scale + + return out + + +def _iqr_percentile(x, q, axis=None, interpolation='linear', keepdims=False, contains_nan=False): + """ + Private wrapper that works around older versions of `numpy`. + + While this function is pretty much necessary for the moment, it + should be removed as soon as the minimum supported numpy version + allows. + """ + if contains_nan and NumpyVersion(np.__version__) < '1.10.0a': + # I see no way to avoid the version check to ensure that the corrected + # NaN behavior has been implemented except to call `percentile` on a + # small array. + msg = "Keyword nan_policy='propagate' not correctly supported for " \ + "numpy versions < 1.10.x. The default behavior of " \ + "`numpy.percentile` will be used." + warnings.warn(msg, RuntimeWarning) + + try: + # For older versions of numpy, there are two things that can cause a + # problem here: missing keywords and non-scalar axis. The former can be + # partially handled with a warning, the latter can be handled fully by + # hacking in an implementation similar to numpy's function for + # providing multi-axis functionality + # (`numpy.lib.function_base._ureduce` for the curious). + result = np.percentile(x, q, axis=axis, keepdims=keepdims, + interpolation=interpolation) + except TypeError: + if interpolation != 'linear' or keepdims: + # At time or writing, this means np.__version__ < 1.9.0 + warnings.warn("Keywords interpolation and keepdims not supported " + "for your version of numpy", RuntimeWarning) + try: + # Special processing if axis is an iterable + original_size = len(axis) + except TypeError: + # Axis is a scalar at this point + pass + else: + axis = np.unique(np.asarray(axis) % x.ndim) + if original_size > axis.size: + # mimic numpy if axes are duplicated + raise ValueError("duplicate value in axis") + if axis.size == x.ndim: + # axis includes all axes: revert to None + axis = None + elif axis.size == 1: + # no rolling necessary + axis = axis[0] + else: + # roll multiple axes to the end and flatten that part out + for ax in axis[::-1]: + x = np.rollaxis(x, ax, x.ndim) + x = x.reshape(x.shape[:-axis.size] + + (np.prod(x.shape[-axis.size:]),)) + axis = -1 + result = np.percentile(x, q, axis=axis) + + return result + + +def _iqr_nanpercentile(x, q, axis=None, interpolation='linear', keepdims=False, + contains_nan=False): + """ + Private wrapper that works around the following: + + 1. A bug in `np.nanpercentile` that was around until numpy version + 1.11.0. + 2. A bug in `np.percentile` NaN handling that was fixed in numpy + version 1.10.0. + 3. The non-existence of `np.nanpercentile` before numpy version + 1.9.0. + + While this function is pretty much necessary for the moment, it + should be removed as soon as the minimum supported numpy version + allows. + """ + if hasattr(np, 'nanpercentile'): + # At time or writing, this means np.__version__ < 1.9.0 + result = np.nanpercentile(x, q, axis=axis, + interpolation=interpolation, + keepdims=keepdims) + # If non-scalar result and nanpercentile does not do proper axis roll. + # I see no way of avoiding the version test since dimensions may just + # happen to match in the data. + if result.ndim > 1 and NumpyVersion(np.__version__) < '1.11.0a': + axis = np.asarray(axis) + if axis.size == 1: + # If only one axis specified, reduction happens along that dimension + if axis.ndim == 0: + axis = axis[None] + result = np.rollaxis(result, axis[0]) + else: + # If multiple axes, reduced dimeision is last + result = np.rollaxis(result, -1) + else: + msg = "Keyword nan_policy='omit' not correctly supported for numpy " \ + "versions < 1.9.x. The default behavior of numpy.percentile " \ + "will be used." + warnings.warn(msg, RuntimeWarning) + result = _iqr_percentile(x, q, axis=axis) + + return result + + +##################################### +# TRIMMING FUNCTIONS # +##################################### + +SigmaclipResult = namedtuple('SigmaclipResult', ('clipped', 'lower', 'upper')) + + +def sigmaclip(a, low=4., high=4.): + """ + Iterative sigma-clipping of array elements. + + Starting from the full sample, all elements outside the critical range are + removed, i.e. all elements of the input array `c` that satisfy either of + the following conditions :: + + c < mean(c) - std(c)*low + c > mean(c) + std(c)*high + + The iteration continues with the updated sample until no + elements are outside the (updated) range. + + Parameters + ---------- + a : array_like + Data array, will be raveled if not 1-D. + low : float, optional + Lower bound factor of sigma clipping. Default is 4. + high : float, optional + Upper bound factor of sigma clipping. Default is 4. + + Returns + ------- + clipped : ndarray + Input array with clipped elements removed. + lower : float + Lower threshold value use for clipping. + upper : float + Upper threshold value use for clipping. + + Examples + -------- + >>> from scipy.stats import sigmaclip + >>> a = np.concatenate((np.linspace(9.5, 10.5, 31), + ... np.linspace(0, 20, 5))) + >>> fact = 1.5 + >>> c, low, upp = sigmaclip(a, fact, fact) + >>> c + array([ 9.96666667, 10. , 10.03333333, 10. ]) + >>> c.var(), c.std() + (0.00055555555555555165, 0.023570226039551501) + >>> low, c.mean() - fact*c.std(), c.min() + (9.9646446609406727, 9.9646446609406727, 9.9666666666666668) + >>> upp, c.mean() + fact*c.std(), c.max() + (10.035355339059327, 10.035355339059327, 10.033333333333333) + + >>> a = np.concatenate((np.linspace(9.5, 10.5, 11), + ... np.linspace(-100, -50, 3))) + >>> c, low, upp = sigmaclip(a, 1.8, 1.8) + >>> (c == np.linspace(9.5, 10.5, 11)).all() + True + + """ + c = np.asarray(a).ravel() + delta = 1 + while delta: + c_std = c.std() + c_mean = c.mean() + size = c.size + critlower = c_mean - c_std * low + critupper = c_mean + c_std * high + c = c[(c >= critlower) & (c <= critupper)] + delta = size - c.size + + return SigmaclipResult(c, critlower, critupper) + + +def trimboth(a, proportiontocut, axis=0): + """ + Slices off a proportion of items from both ends of an array. + + Slices off the passed proportion of items from both ends of the passed + array (i.e., with `proportiontocut` = 0.1, slices leftmost 10% **and** + rightmost 10% of scores). The trimmed values are the lowest and + highest ones. + Slices off less if proportion results in a non-integer slice index (i.e., + conservatively slices off`proportiontocut`). + + Parameters + ---------- + a : array_like + Data to trim. + proportiontocut : float + Proportion (in range 0-1) of total data set to trim of each end. + axis : int or None, optional + Axis along which to trim data. Default is 0. If None, compute over + the whole array `a`. + + Returns + ------- + out : ndarray + Trimmed version of array `a`. The order of the trimmed content + is undefined. + + See Also + -------- + trim_mean + + Examples + -------- + >>> from scipy import stats + >>> a = np.arange(20) + >>> b = stats.trimboth(a, 0.1) + >>> b.shape + (16,) + + """ + a = np.asarray(a) + + if a.size == 0: + return a + + if axis is None: + a = a.ravel() + axis = 0 + + nobs = a.shape[axis] + lowercut = int(proportiontocut * nobs) + uppercut = nobs - lowercut + if (lowercut >= uppercut): + raise ValueError("Proportion too big.") + + atmp = np.partition(a, (lowercut, uppercut - 1), axis) + + sl = [slice(None)] * atmp.ndim + sl[axis] = slice(lowercut, uppercut) + return atmp[tuple(sl)] + + +def trim1(a, proportiontocut, tail='right', axis=0): + """ + Slices off a proportion from ONE end of the passed array distribution. + + If `proportiontocut` = 0.1, slices off 'leftmost' or 'rightmost' + 10% of scores. The lowest or highest values are trimmed (depending on + the tail). + Slices off less if proportion results in a non-integer slice index + (i.e., conservatively slices off `proportiontocut` ). + + Parameters + ---------- + a : array_like + Input array + proportiontocut : float + Fraction to cut off of 'left' or 'right' of distribution + tail : {'left', 'right'}, optional + Defaults to 'right'. + axis : int or None, optional + Axis along which to trim data. Default is 0. If None, compute over + the whole array `a`. + + Returns + ------- + trim1 : ndarray + Trimmed version of array `a`. The order of the trimmed content is + undefined. + + """ + a = np.asarray(a) + if axis is None: + a = a.ravel() + axis = 0 + + nobs = a.shape[axis] + + # avoid possible corner case + if proportiontocut >= 1: + return [] + + if tail.lower() == 'right': + lowercut = 0 + uppercut = nobs - int(proportiontocut * nobs) + + elif tail.lower() == 'left': + lowercut = int(proportiontocut * nobs) + uppercut = nobs + + atmp = np.partition(a, (lowercut, uppercut - 1), axis) + + return atmp[lowercut:uppercut] + + +def trim_mean(a, proportiontocut, axis=0): + """ + Return mean of array after trimming distribution from both tails. + + If `proportiontocut` = 0.1, slices off 'leftmost' and 'rightmost' 10% of + scores. The input is sorted before slicing. Slices off less if proportion + results in a non-integer slice index (i.e., conservatively slices off + `proportiontocut` ). + + Parameters + ---------- + a : array_like + Input array + proportiontocut : float + Fraction to cut off of both tails of the distribution + axis : int or None, optional + Axis along which the trimmed means are computed. Default is 0. + If None, compute over the whole array `a`. + + Returns + ------- + trim_mean : ndarray + Mean of trimmed array. + + See Also + -------- + trimboth + tmean : compute the trimmed mean ignoring values outside given `limits`. + + Examples + -------- + >>> from scipy import stats + >>> x = np.arange(20) + >>> stats.trim_mean(x, 0.1) + 9.5 + >>> x2 = x.reshape(5, 4) + >>> x2 + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15], + [16, 17, 18, 19]]) + >>> stats.trim_mean(x2, 0.25) + array([ 8., 9., 10., 11.]) + >>> stats.trim_mean(x2, 0.25, axis=1) + array([ 1.5, 5.5, 9.5, 13.5, 17.5]) + + """ + a = np.asarray(a) + + if a.size == 0: + return np.nan + + if axis is None: + a = a.ravel() + axis = 0 + + nobs = a.shape[axis] + lowercut = int(proportiontocut * nobs) + uppercut = nobs - lowercut + if (lowercut > uppercut): + raise ValueError("Proportion too big.") + + atmp = np.partition(a, (lowercut, uppercut - 1), axis) + + sl = [slice(None)] * atmp.ndim + sl[axis] = slice(lowercut, uppercut) + return np.mean(atmp[tuple(sl)], axis=axis) + + +F_onewayResult = namedtuple('F_onewayResult', ('statistic', 'pvalue')) + + +def f_oneway(*args): + """ + Performs a 1-way ANOVA. + + The one-way ANOVA tests the null hypothesis that two or more groups have + the same population mean. The test is applied to samples from two or + more groups, possibly with differing sizes. + + Parameters + ---------- + sample1, sample2, ... : array_like + The sample measurements for each group. + + Returns + ------- + statistic : float + The computed F-value of the test. + pvalue : float + The associated p-value from the F-distribution. + + Notes + ----- + The ANOVA test has important assumptions that must be satisfied in order + for the associated p-value to be valid. + + 1. The samples are independent. + 2. Each sample is from a normally distributed population. + 3. The population standard deviations of the groups are all equal. This + property is known as homoscedasticity. + + If these assumptions are not true for a given set of data, it may still be + possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`) although + with some loss of power. + + The algorithm is from Heiman[2], pp.394-7. + + + References + ---------- + .. [1] Lowry, Richard. "Concepts and Applications of Inferential + Statistics". Chapter 14. + https://web.archive.org/web/20171027235250/http://vassarstats.net:80/textbook/ch14pt1.html + + .. [2] Heiman, G.W. Research Methods in Statistics. 2002. + + .. [3] McDonald, G. H. "Handbook of Biological Statistics", One-way ANOVA. + http://www.biostathandbook.com/onewayanova.html + + Examples + -------- + >>> import scipy.stats as stats + + [3]_ Here are some data on a shell measurement (the length of the anterior + adductor muscle scar, standardized by dividing by length) in the mussel + Mytilus trossulus from five locations: Tillamook, Oregon; Newport, Oregon; + Petersburg, Alaska; Magadan, Russia; and Tvarminne, Finland, taken from a + much larger data set used in McDonald et al. (1991). + + >>> tillamook = [0.0571, 0.0813, 0.0831, 0.0976, 0.0817, 0.0859, 0.0735, + ... 0.0659, 0.0923, 0.0836] + >>> newport = [0.0873, 0.0662, 0.0672, 0.0819, 0.0749, 0.0649, 0.0835, + ... 0.0725] + >>> petersburg = [0.0974, 0.1352, 0.0817, 0.1016, 0.0968, 0.1064, 0.105] + >>> magadan = [0.1033, 0.0915, 0.0781, 0.0685, 0.0677, 0.0697, 0.0764, + ... 0.0689] + >>> tvarminne = [0.0703, 0.1026, 0.0956, 0.0973, 0.1039, 0.1045] + >>> stats.f_oneway(tillamook, newport, petersburg, magadan, tvarminne) + (7.1210194716424473, 0.00028122423145345439) + + """ + args = [np.asarray(arg, dtype=float) for arg in args] + # ANOVA on N groups, each in its own array + num_groups = len(args) + alldata = np.concatenate(args) + bign = len(alldata) + + # Determine the mean of the data, and subtract that from all inputs to a + # variance (via sum_of_sq / sq_of_sum) calculation. Variance is invariance + # to a shift in location, and centering all data around zero vastly + # improves numerical stability. + offset = alldata.mean() + alldata -= offset + + sstot = _sum_of_squares(alldata) - (_square_of_sums(alldata) / bign) + ssbn = 0 + for a in args: + ssbn += _square_of_sums(a - offset) / len(a) + + # Naming: variables ending in bn/b are for "between treatments", wn/w are + # for "within treatments" + ssbn -= _square_of_sums(alldata) / bign + sswn = sstot - ssbn + dfbn = num_groups - 1 + dfwn = bign - num_groups + msb = ssbn / dfbn + msw = sswn / dfwn + f = msb / msw + + prob = special.fdtrc(dfbn, dfwn, f) # equivalent to stats.f.sf + + return F_onewayResult(f, prob) + + +def pearsonr(x, y): + r""" + Calculate a Pearson correlation coefficient and the p-value for testing + non-correlation. + + The Pearson correlation coefficient measures the linear relationship + between two datasets. Strictly speaking, Pearson's correlation requires + that each dataset be normally distributed, and not necessarily zero-mean. + Like other correlation coefficients, this one varies between -1 and +1 + with 0 implying no correlation. Correlations of -1 or +1 imply an exact + linear relationship. Positive correlations imply that as x increases, so + does y. Negative correlations imply that as x increases, y decreases. + + The p-value roughly indicates the probability of an uncorrelated system + producing datasets that have a Pearson correlation at least as extreme + as the one computed from these datasets. The p-values are not entirely + reliable but are probably reasonable for datasets larger than 500 or so. + + Parameters + ---------- + x : (N,) array_like + Input + y : (N,) array_like + Input + + Returns + ------- + r : float + Pearson's correlation coefficient + p-value : float + 2-tailed p-value + + Notes + ----- + + The correlation coefficient is calculated as follows: + + .. math:: + + r_{pb} = \frac{\sum (x - m_x) (y - m_y)} + {\sqrt{\sum (x - m_x)^2 \sum (y - m_y)^2}} + + where :math:`m_x` is the mean of the vector :math:`x` and :math:`m_y` is + the mean of the vector :math:`y`. + + + References + ---------- + http://www.statsoft.com/textbook/glosp.html#Pearson%20Correlation + + Examples + -------- + >>> from scipy import stats + >>> a = np.array([0, 0, 0, 1, 1, 1, 1]) + >>> b = np.arange(7) + >>> stats.pearsonr(a, b) + (0.8660254037844386, 0.011724811003954654) + + >>> stats.pearsonr([1,2,3,4,5], [5,6,7,8,7]) + (0.83205029433784372, 0.080509573298498519) + """ + # x and y should have same length. + x = np.asarray(x) + y = np.asarray(y) + n = len(x) + mx = x.mean() + my = y.mean() + xm, ym = x - mx, y - my + r_num = np.add.reduce(xm * ym) + r_den = np.sqrt(_sum_of_squares(xm) * _sum_of_squares(ym)) + r = r_num / r_den + + # Presumably, if abs(r) > 1, then it is only some small artifact of + # floating point arithmetic. + r = max(min(r, 1.0), -1.0) + df = n - 2 + if abs(r) == 1.0: + prob = 0.0 + else: + t_squared = r**2 * (df / ((1.0 - r) * (1.0 + r))) + prob = special.betainc( + 0.5*df, 0.5, np.fmin(np.asarray(df / (df + t_squared)), 1.0) + ) + + return r, prob + + +def fisher_exact(table, alternative='two-sided'): + """Performs a Fisher exact test on a 2x2 contingency table. + + Parameters + ---------- + table : array_like of ints + A 2x2 contingency table. Elements should be non-negative integers. + alternative : {'two-sided', 'less', 'greater'}, optional + Which alternative hypothesis to the null hypothesis the test uses. + Default is 'two-sided'. + + Returns + ------- + oddsratio : float + This is prior odds ratio and not a posterior estimate. + p_value : float + P-value, the probability of obtaining a distribution at least as + extreme as the one that was actually observed, assuming that the + null hypothesis is true. + + See Also + -------- + chi2_contingency : Chi-square test of independence of variables in a + contingency table. + + Notes + ----- + The calculated odds ratio is different from the one R uses. This scipy + implementation returns the (more common) "unconditional Maximum + Likelihood Estimate", while R uses the "conditional Maximum Likelihood + Estimate". + + For tables with large numbers, the (inexact) chi-square test implemented + in the function `chi2_contingency` can also be used. + + Examples + -------- + Say we spend a few days counting whales and sharks in the Atlantic and + Indian oceans. In the Atlantic ocean we find 8 whales and 1 shark, in the + Indian ocean 2 whales and 5 sharks. Then our contingency table is:: + + Atlantic Indian + whales 8 2 + sharks 1 5 + + We use this table to find the p-value: + + >>> import scipy.stats as stats + >>> oddsratio, pvalue = stats.fisher_exact([[8, 2], [1, 5]]) + >>> pvalue + 0.0349... + + The probability that we would observe this or an even more imbalanced ratio + by chance is about 3.5%. A commonly used significance level is 5%--if we + adopt that, we can therefore conclude that our observed imbalance is + statistically significant; whales prefer the Atlantic while sharks prefer + the Indian ocean. + + """ + hypergeom = distributions.hypergeom + c = np.asarray(table, dtype=np.int64) # int32 is not enough for the algorithm + if not c.shape == (2, 2): + raise ValueError("The input `table` must be of shape (2, 2).") + + if np.any(c < 0): + raise ValueError("All values in `table` must be nonnegative.") + + if 0 in c.sum(axis=0) or 0 in c.sum(axis=1): + # If both values in a row or column are zero, the p-value is 1 and + # the odds ratio is NaN. + return np.nan, 1.0 + + if c[1, 0] > 0 and c[0, 1] > 0: + oddsratio = c[0, 0] * c[1, 1] / (c[1, 0] * c[0, 1]) + else: + oddsratio = np.inf + + n1 = c[0, 0] + c[0, 1] + n2 = c[1, 0] + c[1, 1] + n = c[0, 0] + c[1, 0] + + def binary_search(n, n1, n2, side): + """Binary search for where to begin lower/upper halves in two-sided + test. + """ + if side == "upper": + minval = mode + maxval = n + else: + minval = 0 + maxval = mode + guess = -1 + while maxval - minval > 1: + if maxval == minval + 1 and guess == minval: + guess = maxval + else: + guess = (maxval + minval) // 2 + pguess = hypergeom.pmf(guess, n1 + n2, n1, n) + if side == "upper": + ng = guess - 1 + else: + ng = guess + 1 + if pguess <= pexact < hypergeom.pmf(ng, n1 + n2, n1, n): + break + elif pguess < pexact: + maxval = guess + else: + minval = guess + if guess == -1: + guess = minval + if side == "upper": + while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon: + guess -= 1 + while hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon: + guess += 1 + else: + while hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon: + guess += 1 + while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon: + guess -= 1 + return guess + + if alternative == 'less': + pvalue = hypergeom.cdf(c[0, 0], n1 + n2, n1, n) + elif alternative == 'greater': + # Same formula as the 'less' case, but with the second column. + pvalue = hypergeom.cdf(c[0, 1], n1 + n2, n1, c[0, 1] + c[1, 1]) + elif alternative == 'two-sided': + mode = int((n + 1) * (n1 + 1) / (n1 + n2 + 2)) + pexact = hypergeom.pmf(c[0, 0], n1 + n2, n1, n) + pmode = hypergeom.pmf(mode, n1 + n2, n1, n) + + epsilon = 1 - 1e-4 + if np.abs(pexact - pmode) / np.maximum(pexact, pmode) <= 1 - epsilon: + return oddsratio, 1. + + elif c[0, 0] < mode: + plower = hypergeom.cdf(c[0, 0], n1 + n2, n1, n) + if hypergeom.pmf(n, n1 + n2, n1, n) > pexact / epsilon: + return oddsratio, plower + + guess = binary_search(n, n1, n2, "upper") + pvalue = plower + hypergeom.sf(guess - 1, n1 + n2, n1, n) + else: + pupper = hypergeom.sf(c[0, 0] - 1, n1 + n2, n1, n) + if hypergeom.pmf(0, n1 + n2, n1, n) > pexact / epsilon: + return oddsratio, pupper + + guess = binary_search(n, n1, n2, "lower") + pvalue = pupper + hypergeom.cdf(guess, n1 + n2, n1, n) + else: + msg = "`alternative` should be one of {'two-sided', 'less', 'greater'}" + raise ValueError(msg) + + pvalue = min(pvalue, 1.0) + + return oddsratio, pvalue + + +SpearmanrResult = namedtuple('SpearmanrResult', ('correlation', 'pvalue')) + + +def spearmanr(a, b=None, axis=0, nan_policy='propagate'): + """ + Calculate a Spearman rank-order correlation coefficient and the p-value + to test for non-correlation. + + The Spearman correlation is a nonparametric measure of the monotonicity + of the relationship between two datasets. Unlike the Pearson correlation, + the Spearman correlation does not assume that both datasets are normally + distributed. Like other correlation coefficients, this one varies + between -1 and +1 with 0 implying no correlation. Correlations of -1 or + +1 imply an exact monotonic relationship. Positive correlations imply that + as x increases, so does y. Negative correlations imply that as x + increases, y decreases. + + The p-value roughly indicates the probability of an uncorrelated system + producing datasets that have a Spearman correlation at least as extreme + as the one computed from these datasets. The p-values are not entirely + reliable but are probably reasonable for datasets larger than 500 or so. + + Parameters + ---------- + a, b : 1D or 2D array_like, b is optional + One or two 1-D or 2-D arrays containing multiple variables and + observations. When these are 1-D, each represents a vector of + observations of a single variable. For the behavior in the 2-D case, + see under ``axis``, below. + Both arrays need to have the same length in the ``axis`` dimension. + axis : int or None, optional + If axis=0 (default), then each column represents a variable, with + observations in the rows. If axis=1, the relationship is transposed: + each row represents a variable, while the columns contain observations. + If axis=None, then both arrays will be raveled. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. 'propagate' returns nan, + 'raise' throws an error, 'omit' performs the calculations ignoring nan + values. Default is 'propagate'. + + Returns + ------- + correlation : float or ndarray (2-D square) + Spearman correlation matrix or correlation coefficient (if only 2 + variables are given as parameters. Correlation matrix is square with + length equal to total number of variables (columns or rows) in ``a`` + and ``b`` combined. + pvalue : float + The two-sided p-value for a hypothesis test whose null hypothesis is + that two sets of data are uncorrelated, has same dimension as rho. + + References + ---------- + + .. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard + Probability and Statistics Tables and Formulae. Chapman & Hall: New + York. 2000. + Section 14.7 + + Examples + -------- + >>> from scipy import stats + >>> stats.spearmanr([1,2,3,4,5], [5,6,7,8,7]) + (0.82078268166812329, 0.088587005313543798) + >>> np.random.seed(1234321) + >>> x2n = np.random.randn(100, 2) + >>> y2n = np.random.randn(100, 2) + >>> stats.spearmanr(x2n) + (0.059969996999699973, 0.55338590803773591) + >>> stats.spearmanr(x2n[:,0], x2n[:,1]) + (0.059969996999699973, 0.55338590803773591) + >>> rho, pval = stats.spearmanr(x2n, y2n) + >>> rho + array([[ 1. , 0.05997 , 0.18569457, 0.06258626], + [ 0.05997 , 1. , 0.110003 , 0.02534653], + [ 0.18569457, 0.110003 , 1. , 0.03488749], + [ 0.06258626, 0.02534653, 0.03488749, 1. ]]) + >>> pval + array([[ 0. , 0.55338591, 0.06435364, 0.53617935], + [ 0.55338591, 0. , 0.27592895, 0.80234077], + [ 0.06435364, 0.27592895, 0. , 0.73039992], + [ 0.53617935, 0.80234077, 0.73039992, 0. ]]) + >>> rho, pval = stats.spearmanr(x2n.T, y2n.T, axis=1) + >>> rho + array([[ 1. , 0.05997 , 0.18569457, 0.06258626], + [ 0.05997 , 1. , 0.110003 , 0.02534653], + [ 0.18569457, 0.110003 , 1. , 0.03488749], + [ 0.06258626, 0.02534653, 0.03488749, 1. ]]) + >>> stats.spearmanr(x2n, y2n, axis=None) + (0.10816770419260482, 0.1273562188027364) + >>> stats.spearmanr(x2n.ravel(), y2n.ravel()) + (0.10816770419260482, 0.1273562188027364) + + >>> xint = np.random.randint(10, size=(100, 2)) + >>> stats.spearmanr(xint) + (0.052760927029710199, 0.60213045837062351) + + """ + a, axisout = _chk_asarray(a, axis) + if a.ndim > 2: + raise ValueError("spearmanr only handles 1-D or 2-D arrays") + + if b is None: + if a.ndim < 2: + raise ValueError("`spearmanr` needs at least 2 variables to compare") + else: + # Concatenate a and b, so that we now only have to handle the case + # of a 2-D `a`. + b, _ = _chk_asarray(b, axis) + if axisout == 0: + a = np.column_stack((a, b)) + else: + a = np.row_stack((a, b)) + + n_vars = a.shape[1 - axisout] + n_obs = a.shape[axisout] + if n_obs <= 1: + # Handle empty arrays or single observations. + return SpearmanrResult(np.nan, np.nan) + + a_contains_nan, nan_policy = _contains_nan(a, nan_policy) + variable_has_nan = np.zeros(n_vars, dtype=bool) + if a_contains_nan: + if nan_policy == 'omit': + return mstats_basic.spearmanr(a, axis=axis, nan_policy=nan_policy) + elif nan_policy == 'propagate': + if a.ndim == 1 or n_vars <= 2: + return SpearmanrResult(np.nan, np.nan) + else: + # Keep track of variables with NaNs, set the outputs to NaN + # only for those variables + variable_has_nan = np.isnan(a).sum(axis=axisout) + + a_ranked = np.apply_along_axis(rankdata, axisout, a) + rs = np.corrcoef(a_ranked, rowvar=axisout) + dof = n_obs - 2 # degrees of freedom + + # rs can have elements equal to 1, so avoid zero division warnings + olderr = np.seterr(divide='ignore') + try: + # clip the small negative values possibly caused by rounding + # errors before taking the square root + t = rs * np.sqrt((dof/((rs+1.0)*(1.0-rs))).clip(0)) + finally: + np.seterr(**olderr) + + prob = 2 * distributions.t.sf(np.abs(t), dof) + + # For backwards compatibility, return scalars when comparing 2 columns + if rs.shape == (2, 2): + return SpearmanrResult(rs[1, 0], prob[1, 0]) + else: + rs[variable_has_nan, :] = np.nan + rs[:, variable_has_nan] = np.nan + return SpearmanrResult(rs, prob) + + +PointbiserialrResult = namedtuple('PointbiserialrResult', + ('correlation', 'pvalue')) + + +def pointbiserialr(x, y): + r""" + Calculate a point biserial correlation coefficient and its p-value. + + The point biserial correlation is used to measure the relationship + between a binary variable, x, and a continuous variable, y. Like other + correlation coefficients, this one varies between -1 and +1 with 0 + implying no correlation. Correlations of -1 or +1 imply a determinative + relationship. + + This function uses a shortcut formula but produces the same result as + `pearsonr`. + + Parameters + ---------- + x : array_like of bools + Input array. + y : array_like + Input array. + + Returns + ------- + correlation : float + R value + pvalue : float + 2-tailed p-value + + Notes + ----- + `pointbiserialr` uses a t-test with ``n-1`` degrees of freedom. + It is equivalent to `pearsonr.` + + The value of the point-biserial correlation can be calculated from: + + .. math:: + + r_{pb} = \frac{\overline{Y_{1}} - + \overline{Y_{0}}}{s_{y}}\sqrt{\frac{N_{1} N_{2}}{N (N - 1))}} + + Where :math:`Y_{0}` and :math:`Y_{1}` are means of the metric + observations coded 0 and 1 respectively; :math:`N_{0}` and :math:`N_{1}` + are number of observations coded 0 and 1 respectively; :math:`N` is the + total number of observations and :math:`s_{y}` is the standard + deviation of all the metric observations. + + A value of :math:`r_{pb}` that is significantly different from zero is + completely equivalent to a significant difference in means between the two + groups. Thus, an independent groups t Test with :math:`N-2` degrees of + freedom may be used to test whether :math:`r_{pb}` is nonzero. The + relation between the t-statistic for comparing two independent groups and + :math:`r_{pb}` is given by: + + .. math:: + + t = \sqrt{N - 2}\frac{r_{pb}}{\sqrt{1 - r^{2}_{pb}}} + + References + ---------- + .. [1] J. Lev, "The Point Biserial Coefficient of Correlation", Ann. Math. + Statist., Vol. 20, no.1, pp. 125-126, 1949. + + .. [2] R.F. Tate, "Correlation Between a Discrete and a Continuous + Variable. Point-Biserial Correlation.", Ann. Math. Statist., Vol. 25, + np. 3, pp. 603-607, 1954. + + .. [3] D. Kornbrot "Point Biserial Correlation", In Wiley StatsRef: + Statistics Reference Online (eds N. Balakrishnan, et al.), 2014. + https://doi.org/10.1002/9781118445112.stat06227 + + Examples + -------- + >>> from scipy import stats + >>> a = np.array([0, 0, 0, 1, 1, 1, 1]) + >>> b = np.arange(7) + >>> stats.pointbiserialr(a, b) + (0.8660254037844386, 0.011724811003954652) + >>> stats.pearsonr(a, b) + (0.86602540378443871, 0.011724811003954626) + >>> np.corrcoef(a, b) + array([[ 1. , 0.8660254], + [ 0.8660254, 1. ]]) + + """ + rpb, prob = pearsonr(x, y) + return PointbiserialrResult(rpb, prob) + + +KendalltauResult = namedtuple('KendalltauResult', ('correlation', 'pvalue')) + + +def kendalltau(x, y, initial_lexsort=None, nan_policy='propagate', method='auto'): + """ + Calculate Kendall's tau, a correlation measure for ordinal data. + + Kendall's tau is a measure of the correspondence between two rankings. + Values close to 1 indicate strong agreement, values close to -1 indicate + strong disagreement. This is the 1945 "tau-b" version of Kendall's + tau [2]_, which can account for ties and which reduces to the 1938 "tau-a" + version [1]_ in absence of ties. + + Parameters + ---------- + x, y : array_like + Arrays of rankings, of the same shape. If arrays are not 1-D, they will + be flattened to 1-D. + initial_lexsort : bool, optional + Unused (deprecated). + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. 'propagate' returns nan, + 'raise' throws an error, 'omit' performs the calculations ignoring nan + values. Default is 'propagate'. Note that if the input contains nan + 'omit' delegates to mstats_basic.kendalltau(), which has a different + implementation. + method: {'auto', 'asymptotic', 'exact'}, optional + Defines which method is used to calculate the p-value [5]_. + 'asymptotic' uses a normal approximation valid for large samples. + 'exact' computes the exact p-value, but can only be used if no ties + are present. 'auto' is the default and selects the appropriate + method based on a trade-off between speed and accuracy. + + Returns + ------- + correlation : float + The tau statistic. + pvalue : float + The two-sided p-value for a hypothesis test whose null hypothesis is + an absence of association, tau = 0. + + See also + -------- + spearmanr : Calculates a Spearman rank-order correlation coefficient. + theilslopes : Computes the Theil-Sen estimator for a set of points (x, y). + weightedtau : Computes a weighted version of Kendall's tau. + + Notes + ----- + The definition of Kendall's tau that is used is [2]_:: + + tau = (P - Q) / sqrt((P + Q + T) * (P + Q + U)) + + where P is the number of concordant pairs, Q the number of discordant + pairs, T the number of ties only in `x`, and U the number of ties only in + `y`. If a tie occurs for the same pair in both `x` and `y`, it is not + added to either T or U. + + References + ---------- + .. [1] Maurice G. Kendall, "A New Measure of Rank Correlation", Biometrika + Vol. 30, No. 1/2, pp. 81-93, 1938. + .. [2] Maurice G. Kendall, "The treatment of ties in ranking problems", + Biometrika Vol. 33, No. 3, pp. 239-251. 1945. + .. [3] Gottfried E. Noether, "Elements of Nonparametric Statistics", John + Wiley & Sons, 1967. + .. [4] Peter M. Fenwick, "A new data structure for cumulative frequency + tables", Software: Practice and Experience, Vol. 24, No. 3, + pp. 327-336, 1994. + .. [5] Maurice G. Kendall, "Rank Correlation Methods" (4th Edition), + Charles Griffin & Co., 1970. + + Examples + -------- + >>> from scipy import stats + >>> x1 = [12, 2, 1, 12, 2] + >>> x2 = [1, 4, 7, 1, 0] + >>> tau, p_value = stats.kendalltau(x1, x2) + >>> tau + -0.47140452079103173 + >>> p_value + 0.2827454599327748 + + """ + x = np.asarray(x).ravel() + y = np.asarray(y).ravel() + + if x.size != y.size: + raise ValueError("All inputs to `kendalltau` must be of the same size, " + "found x-size %s and y-size %s" % (x.size, y.size)) + elif not x.size or not y.size: + return KendalltauResult(np.nan, np.nan) # Return NaN if arrays are empty + + # check both x and y + cnx, npx = _contains_nan(x, nan_policy) + cny, npy = _contains_nan(y, nan_policy) + contains_nan = cnx or cny + if npx == 'omit' or npy == 'omit': + nan_policy = 'omit' + + if contains_nan and nan_policy == 'propagate': + return KendalltauResult(np.nan, np.nan) + + elif contains_nan and nan_policy == 'omit': + x = ma.masked_invalid(x) + y = ma.masked_invalid(y) + return mstats_basic.kendalltau(x, y, method=method) + + if initial_lexsort is not None: # deprecate to drop! + warnings.warn('"initial_lexsort" is gone!') + + def count_rank_tie(ranks): + cnt = np.bincount(ranks).astype('int64', copy=False) + cnt = cnt[cnt > 1] + return ((cnt * (cnt - 1) // 2).sum(), + (cnt * (cnt - 1.) * (cnt - 2)).sum(), + (cnt * (cnt - 1.) * (2*cnt + 5)).sum()) + + size = x.size + perm = np.argsort(y) # sort on y and convert y to dense ranks + x, y = x[perm], y[perm] + y = np.r_[True, y[1:] != y[:-1]].cumsum(dtype=np.intp) + + # stable sort on x and convert x to dense ranks + perm = np.argsort(x, kind='mergesort') + x, y = x[perm], y[perm] + x = np.r_[True, x[1:] != x[:-1]].cumsum(dtype=np.intp) + + dis = _kendall_dis(x, y) # discordant pairs + + obs = np.r_[True, (x[1:] != x[:-1]) | (y[1:] != y[:-1]), True] + cnt = np.diff(np.nonzero(obs)[0]).astype('int64', copy=False) + + ntie = (cnt * (cnt - 1) // 2).sum() # joint ties + xtie, x0, x1 = count_rank_tie(x) # ties in x, stats + ytie, y0, y1 = count_rank_tie(y) # ties in y, stats + + tot = (size * (size - 1)) // 2 + + if xtie == tot or ytie == tot: + return KendalltauResult(np.nan, np.nan) + + # Note that tot = con + dis + (xtie - ntie) + (ytie - ntie) + ntie + # = con + dis + xtie + ytie - ntie + con_minus_dis = tot - xtie - ytie + ntie - 2 * dis + tau = con_minus_dis / np.sqrt(tot - xtie) / np.sqrt(tot - ytie) + # Limit range to fix computational errors + tau = min(1., max(-1., tau)) + + if method == 'exact' and (xtie != 0 or ytie != 0): + raise ValueError("Ties found, exact method cannot be used.") + + if method == 'auto': + if (xtie == 0 and ytie == 0) and (size <= 33 or min(dis, tot-dis) <= 1): + method = 'exact' + else: + method = 'asymptotic' + + if xtie == 0 and ytie == 0 and method == 'exact': + # Exact p-value, see Maurice G. Kendall, "Rank Correlation Methods" (4th Edition), Charles Griffin & Co., 1970. + c = min(dis, tot-dis) + if size <= 0: + raise ValueError + elif c < 0 or 2*c > size*(size-1): + raise ValueError + elif size == 1: + pvalue = 1.0 + elif size == 2: + pvalue = 1.0 + elif c == 0: + pvalue = 2.0/np.math.factorial(size) + elif c == 1: + pvalue = 2.0/np.math.factorial(size-1) + else: + old = [0.0]*(c+1) + new = [0.0]*(c+1) + new[0] = 1.0 + new[1] = 1.0 + for j in range(3,size+1): + old = new[:] + for k in range(1,min(j,c+1)): + new[k] += new[k-1] + for k in range(j,c+1): + new[k] += new[k-1] - old[k-j] + pvalue = 2.0*sum(new)/np.math.factorial(size) + + elif method == 'asymptotic': + # con_minus_dis is approx normally distributed with this variance [3]_ + var = (size * (size - 1) * (2.*size + 5) - x1 - y1) / 18. + ( + 2. * xtie * ytie) / (size * (size - 1)) + x0 * y0 / (9. * + size * (size - 1) * (size - 2)) + pvalue = special.erfc(np.abs(con_minus_dis) / np.sqrt(var) / np.sqrt(2)) + else: + raise ValueError("Unknown method "+str(method)+" specified, please use auto, exact or asymptotic.") + + return KendalltauResult(tau, pvalue) + + +WeightedTauResult = namedtuple('WeightedTauResult', ('correlation', 'pvalue')) + + +def weightedtau(x, y, rank=True, weigher=None, additive=True): + r""" + Compute a weighted version of Kendall's :math:`\tau`. + + The weighted :math:`\tau` is a weighted version of Kendall's + :math:`\tau` in which exchanges of high weight are more influential than + exchanges of low weight. The default parameters compute the additive + hyperbolic version of the index, :math:`\tau_\mathrm h`, which has + been shown to provide the best balance between important and + unimportant elements [1]_. + + The weighting is defined by means of a rank array, which assigns a + nonnegative rank to each element, and a weigher function, which + assigns a weight based from the rank to each element. The weight of an + exchange is then the sum or the product of the weights of the ranks of + the exchanged elements. The default parameters compute + :math:`\tau_\mathrm h`: an exchange between elements with rank + :math:`r` and :math:`s` (starting from zero) has weight + :math:`1/(r+1) + 1/(s+1)`. + + Specifying a rank array is meaningful only if you have in mind an + external criterion of importance. If, as it usually happens, you do + not have in mind a specific rank, the weighted :math:`\tau` is + defined by averaging the values obtained using the decreasing + lexicographical rank by (`x`, `y`) and by (`y`, `x`). This is the + behavior with default parameters. + + Note that if you are computing the weighted :math:`\tau` on arrays of + ranks, rather than of scores (i.e., a larger value implies a lower + rank) you must negate the ranks, so that elements of higher rank are + associated with a larger value. + + Parameters + ---------- + x, y : array_like + Arrays of scores, of the same shape. If arrays are not 1-D, they will + be flattened to 1-D. + rank: array_like of ints or bool, optional + A nonnegative rank assigned to each element. If it is None, the + decreasing lexicographical rank by (`x`, `y`) will be used: elements of + higher rank will be those with larger `x`-values, using `y`-values to + break ties (in particular, swapping `x` and `y` will give a different + result). If it is False, the element indices will be used + directly as ranks. The default is True, in which case this + function returns the average of the values obtained using the + decreasing lexicographical rank by (`x`, `y`) and by (`y`, `x`). + weigher : callable, optional + The weigher function. Must map nonnegative integers (zero + representing the most important element) to a nonnegative weight. + The default, None, provides hyperbolic weighing, that is, + rank :math:`r` is mapped to weight :math:`1/(r+1)`. + additive : bool, optional + If True, the weight of an exchange is computed by adding the + weights of the ranks of the exchanged elements; otherwise, the weights + are multiplied. The default is True. + + Returns + ------- + correlation : float + The weighted :math:`\tau` correlation index. + pvalue : float + Presently ``np.nan``, as the null statistics is unknown (even in the + additive hyperbolic case). + + See also + -------- + kendalltau : Calculates Kendall's tau. + spearmanr : Calculates a Spearman rank-order correlation coefficient. + theilslopes : Computes the Theil-Sen estimator for a set of points (x, y). + + Notes + ----- + This function uses an :math:`O(n \log n)`, mergesort-based algorithm + [1]_ that is a weighted extension of Knight's algorithm for Kendall's + :math:`\tau` [2]_. It can compute Shieh's weighted :math:`\tau` [3]_ + between rankings without ties (i.e., permutations) by setting + `additive` and `rank` to False, as the definition given in [1]_ is a + generalization of Shieh's. + + NaNs are considered the smallest possible score. + + .. versionadded:: 0.19.0 + + References + ---------- + .. [1] Sebastiano Vigna, "A weighted correlation index for rankings with + ties", Proceedings of the 24th international conference on World + Wide Web, pp. 1166-1176, ACM, 2015. + .. [2] W.R. Knight, "A Computer Method for Calculating Kendall's Tau with + Ungrouped Data", Journal of the American Statistical Association, + Vol. 61, No. 314, Part 1, pp. 436-439, 1966. + .. [3] Grace S. Shieh. "A weighted Kendall's tau statistic", Statistics & + Probability Letters, Vol. 39, No. 1, pp. 17-24, 1998. + + Examples + -------- + >>> from scipy import stats + >>> x = [12, 2, 1, 12, 2] + >>> y = [1, 4, 7, 1, 0] + >>> tau, p_value = stats.weightedtau(x, y) + >>> tau + -0.56694968153682723 + >>> p_value + nan + >>> tau, p_value = stats.weightedtau(x, y, additive=False) + >>> tau + -0.62205716951801038 + + NaNs are considered the smallest possible score: + + >>> x = [12, 2, 1, 12, 2] + >>> y = [1, 4, 7, 1, np.nan] + >>> tau, _ = stats.weightedtau(x, y) + >>> tau + -0.56694968153682723 + + This is exactly Kendall's tau: + + >>> x = [12, 2, 1, 12, 2] + >>> y = [1, 4, 7, 1, 0] + >>> tau, _ = stats.weightedtau(x, y, weigher=lambda x: 1) + >>> tau + -0.47140452079103173 + + >>> x = [12, 2, 1, 12, 2] + >>> y = [1, 4, 7, 1, 0] + >>> stats.weightedtau(x, y, rank=None) + WeightedTauResult(correlation=-0.4157652301037516, pvalue=nan) + >>> stats.weightedtau(y, x, rank=None) + WeightedTauResult(correlation=-0.7181341329699028, pvalue=nan) + + """ + x = np.asarray(x).ravel() + y = np.asarray(y).ravel() + + if x.size != y.size: + raise ValueError("All inputs to `weightedtau` must be of the same size, " + "found x-size %s and y-size %s" % (x.size, y.size)) + if not x.size: + return WeightedTauResult(np.nan, np.nan) # Return NaN if arrays are empty + + # If there are NaNs we apply _toint64() + if np.isnan(np.sum(x)): + x = _toint64(x) + if np.isnan(np.sum(x)): + y = _toint64(y) + + # Reduce to ranks unsupported types + if x.dtype != y.dtype: + if x.dtype != np.int64: + x = _toint64(x) + if y.dtype != np.int64: + y = _toint64(y) + else: + if x.dtype not in (np.int32, np.int64, np.float32, np.float64): + x = _toint64(x) + y = _toint64(y) + + if rank is True: + return WeightedTauResult(( + _weightedrankedtau(x, y, None, weigher, additive) + + _weightedrankedtau(y, x, None, weigher, additive) + ) / 2, np.nan) + + if rank is False: + rank = np.arange(x.size, dtype=np.intp) + elif rank is not None: + rank = np.asarray(rank).ravel() + if rank.size != x.size: + raise ValueError("All inputs to `weightedtau` must be of the same size, " + "found x-size %s and rank-size %s" % (x.size, rank.size)) + + return WeightedTauResult(_weightedrankedtau(x, y, rank, weigher, additive), np.nan) + + +##################################### +# INFERENTIAL STATISTICS # +##################################### + +Ttest_1sampResult = namedtuple('Ttest_1sampResult', ('statistic', 'pvalue')) + + +def ttest_1samp(a, popmean, axis=0, nan_policy='propagate'): + """ + Calculate the T-test for the mean of ONE group of scores. + + This is a two-sided test for the null hypothesis that the expected value + (mean) of a sample of independent observations `a` is equal to the given + population mean, `popmean`. + + Parameters + ---------- + a : array_like + sample observation + popmean : float or array_like + expected value in null hypothesis. If array_like, then it must have the + same shape as `a` excluding the axis dimension + axis : int or None, optional + Axis along which to compute test. If None, compute over the whole + array `a`. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. 'propagate' returns nan, + 'raise' throws an error, 'omit' performs the calculations ignoring nan + values. Default is 'propagate'. + + Returns + ------- + statistic : float or array + t-statistic + pvalue : float or array + two-tailed p-value + + Examples + -------- + >>> from scipy import stats + + >>> np.random.seed(7654567) # fix seed to get the same result + >>> rvs = stats.norm.rvs(loc=5, scale=10, size=(50,2)) + + Test if mean of random sample is equal to true mean, and different mean. + We reject the null hypothesis in the second case and don't reject it in + the first case. + + >>> stats.ttest_1samp(rvs,5.0) + (array([-0.68014479, -0.04323899]), array([ 0.49961383, 0.96568674])) + >>> stats.ttest_1samp(rvs,0.0) + (array([ 2.77025808, 4.11038784]), array([ 0.00789095, 0.00014999])) + + Examples using axis and non-scalar dimension for population mean. + + >>> stats.ttest_1samp(rvs,[5.0,0.0]) + (array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04])) + >>> stats.ttest_1samp(rvs.T,[5.0,0.0],axis=1) + (array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04])) + >>> stats.ttest_1samp(rvs,[[5.0],[0.0]]) + (array([[-0.68014479, -0.04323899], + [ 2.77025808, 4.11038784]]), array([[ 4.99613833e-01, 9.65686743e-01], + [ 7.89094663e-03, 1.49986458e-04]])) + + """ + a, axis = _chk_asarray(a, axis) + + contains_nan, nan_policy = _contains_nan(a, nan_policy) + + if contains_nan and nan_policy == 'omit': + a = ma.masked_invalid(a) + return mstats_basic.ttest_1samp(a, popmean, axis) + + n = a.shape[axis] + df = n - 1 + + d = np.mean(a, axis) - popmean + v = np.var(a, axis, ddof=1) + denom = np.sqrt(v / n) + + with np.errstate(divide='ignore', invalid='ignore'): + t = np.divide(d, denom) + t, prob = _ttest_finish(df, t) + + return Ttest_1sampResult(t, prob) + + +def _ttest_finish(df, t): + """Common code between all 3 t-test functions.""" + prob = distributions.t.sf(np.abs(t), df) * 2 # use np.abs to get upper tail + if t.ndim == 0: + t = t[()] + + return t, prob + + +def _ttest_ind_from_stats(mean1, mean2, denom, df): + + d = mean1 - mean2 + with np.errstate(divide='ignore', invalid='ignore'): + t = np.divide(d, denom) + t, prob = _ttest_finish(df, t) + + return (t, prob) + + +def _unequal_var_ttest_denom(v1, n1, v2, n2): + vn1 = v1 / n1 + vn2 = v2 / n2 + with np.errstate(divide='ignore', invalid='ignore'): + df = (vn1 + vn2)**2 / (vn1**2 / (n1 - 1) + vn2**2 / (n2 - 1)) + + # If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0). + # Hence it doesn't matter what df is as long as it's not NaN. + df = np.where(np.isnan(df), 1, df) + denom = np.sqrt(vn1 + vn2) + return df, denom + + +def _equal_var_ttest_denom(v1, n1, v2, n2): + df = n1 + n2 - 2.0 + svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / df + denom = np.sqrt(svar * (1.0 / n1 + 1.0 / n2)) + return df, denom + + +Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue')) + + +def ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2, + equal_var=True): + """ + T-test for means of two independent samples from descriptive statistics. + + This is a two-sided test for the null hypothesis that two independent + samples have identical average (expected) values. + + Parameters + ---------- + mean1 : array_like + The mean(s) of sample 1. + std1 : array_like + The standard deviation(s) of sample 1. + nobs1 : array_like + The number(s) of observations of sample 1. + mean2 : array_like + The mean(s) of sample 2 + std2 : array_like + The standard deviations(s) of sample 2. + nobs2 : array_like + The number(s) of observations of sample 2. + equal_var : bool, optional + If True (default), perform a standard independent 2 sample test + that assumes equal population variances [1]_. + If False, perform Welch's t-test, which does not assume equal + population variance [2]_. + + Returns + ------- + statistic : float or array + The calculated t-statistics + pvalue : float or array + The two-tailed p-value. + + See Also + -------- + scipy.stats.ttest_ind + + Notes + ----- + + .. versionadded:: 0.16.0 + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test + + .. [2] https://en.wikipedia.org/wiki/Welch%27s_t-test + + Examples + -------- + Suppose we have the summary data for two samples, as follows:: + + Sample Sample + Size Mean Variance + Sample 1 13 15.0 87.5 + Sample 2 11 12.0 39.0 + + Apply the t-test to this data (with the assumption that the population + variances are equal): + + >>> from scipy.stats import ttest_ind_from_stats + >>> ttest_ind_from_stats(mean1=15.0, std1=np.sqrt(87.5), nobs1=13, + ... mean2=12.0, std2=np.sqrt(39.0), nobs2=11) + Ttest_indResult(statistic=0.9051358093310269, pvalue=0.3751996797581487) + + For comparison, here is the data from which those summary statistics + were taken. With this data, we can compute the same result using + `scipy.stats.ttest_ind`: + + >>> a = np.array([1, 3, 4, 6, 11, 13, 15, 19, 22, 24, 25, 26, 26]) + >>> b = np.array([2, 4, 6, 9, 11, 13, 14, 15, 18, 19, 21]) + >>> from scipy.stats import ttest_ind + >>> ttest_ind(a, b) + Ttest_indResult(statistic=0.905135809331027, pvalue=0.3751996797581486) + + """ + if equal_var: + df, denom = _equal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2) + else: + df, denom = _unequal_var_ttest_denom(std1**2, nobs1, + std2**2, nobs2) + + res = _ttest_ind_from_stats(mean1, mean2, denom, df) + return Ttest_indResult(*res) + + +def ttest_ind(a, b, axis=0, equal_var=True, nan_policy='propagate'): + """ + Calculate the T-test for the means of *two independent* samples of scores. + + This is a two-sided test for the null hypothesis that 2 independent samples + have identical average (expected) values. This test assumes that the + populations have identical variances by default. + + Parameters + ---------- + a, b : array_like + The arrays must have the same shape, except in the dimension + corresponding to `axis` (the first, by default). + axis : int or None, optional + Axis along which to compute test. If None, compute over the whole + arrays, `a`, and `b`. + equal_var : bool, optional + If True (default), perform a standard independent 2 sample test + that assumes equal population variances [1]_. + If False, perform Welch's t-test, which does not assume equal + population variance [2]_. + + .. versionadded:: 0.11.0 + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. 'propagate' returns nan, + 'raise' throws an error, 'omit' performs the calculations ignoring nan + values. Default is 'propagate'. + + + Returns + ------- + statistic : float or array + The calculated t-statistic. + pvalue : float or array + The two-tailed p-value. + + Notes + ----- + We can use this test, if we observe two independent samples from + the same or different population, e.g. exam scores of boys and + girls or of two ethnic groups. The test measures whether the + average (expected) value differs significantly across samples. If + we observe a large p-value, for example larger than 0.05 or 0.1, + then we cannot reject the null hypothesis of identical average scores. + If the p-value is smaller than the threshold, e.g. 1%, 5% or 10%, + then we reject the null hypothesis of equal averages. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test + + .. [2] https://en.wikipedia.org/wiki/Welch%27s_t-test + + Examples + -------- + >>> from scipy import stats + >>> np.random.seed(12345678) + + Test with sample with identical means: + + >>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500) + >>> rvs2 = stats.norm.rvs(loc=5,scale=10,size=500) + >>> stats.ttest_ind(rvs1,rvs2) + (0.26833823296239279, 0.78849443369564776) + >>> stats.ttest_ind(rvs1,rvs2, equal_var = False) + (0.26833823296239279, 0.78849452749500748) + + `ttest_ind` underestimates p for unequal variances: + + >>> rvs3 = stats.norm.rvs(loc=5, scale=20, size=500) + >>> stats.ttest_ind(rvs1, rvs3) + (-0.46580283298287162, 0.64145827413436174) + >>> stats.ttest_ind(rvs1, rvs3, equal_var = False) + (-0.46580283298287162, 0.64149646246569292) + + When n1 != n2, the equal variance t-statistic is no longer equal to the + unequal variance t-statistic: + + >>> rvs4 = stats.norm.rvs(loc=5, scale=20, size=100) + >>> stats.ttest_ind(rvs1, rvs4) + (-0.99882539442782481, 0.3182832709103896) + >>> stats.ttest_ind(rvs1, rvs4, equal_var = False) + (-0.69712570584654099, 0.48716927725402048) + + T-test with different means, variance, and n: + + >>> rvs5 = stats.norm.rvs(loc=8, scale=20, size=100) + >>> stats.ttest_ind(rvs1, rvs5) + (-1.4679669854490653, 0.14263895620529152) + >>> stats.ttest_ind(rvs1, rvs5, equal_var = False) + (-0.94365973617132992, 0.34744170334794122) + + """ + a, b, axis = _chk2_asarray(a, b, axis) + + # check both a and b + cna, npa = _contains_nan(a, nan_policy) + cnb, npb = _contains_nan(b, nan_policy) + contains_nan = cna or cnb + if npa == 'omit' or npb == 'omit': + nan_policy = 'omit' + + if contains_nan and nan_policy == 'omit': + a = ma.masked_invalid(a) + b = ma.masked_invalid(b) + return mstats_basic.ttest_ind(a, b, axis, equal_var) + + if a.size == 0 or b.size == 0: + return Ttest_indResult(np.nan, np.nan) + + v1 = np.var(a, axis, ddof=1) + v2 = np.var(b, axis, ddof=1) + n1 = a.shape[axis] + n2 = b.shape[axis] + + if equal_var: + df, denom = _equal_var_ttest_denom(v1, n1, v2, n2) + else: + df, denom = _unequal_var_ttest_denom(v1, n1, v2, n2) + + res = _ttest_ind_from_stats(np.mean(a, axis), np.mean(b, axis), denom, df) + + return Ttest_indResult(*res) + + +Ttest_relResult = namedtuple('Ttest_relResult', ('statistic', 'pvalue')) + + +def ttest_rel(a, b, axis=0, nan_policy='propagate'): + """ + Calculate the T-test on TWO RELATED samples of scores, a and b. + + This is a two-sided test for the null hypothesis that 2 related or + repeated samples have identical average (expected) values. + + Parameters + ---------- + a, b : array_like + The arrays must have the same shape. + axis : int or None, optional + Axis along which to compute test. If None, compute over the whole + arrays, `a`, and `b`. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. 'propagate' returns nan, + 'raise' throws an error, 'omit' performs the calculations ignoring nan + values. Default is 'propagate'. + + Returns + ------- + statistic : float or array + t-statistic + pvalue : float or array + two-tailed p-value + + Notes + ----- + Examples for the use are scores of the same set of student in + different exams, or repeated sampling from the same units. The + test measures whether the average score differs significantly + across samples (e.g. exams). If we observe a large p-value, for + example greater than 0.05 or 0.1 then we cannot reject the null + hypothesis of identical average scores. If the p-value is smaller + than the threshold, e.g. 1%, 5% or 10%, then we reject the null + hypothesis of equal averages. Small p-values are associated with + large t-statistics. + + References + ---------- + https://en.wikipedia.org/wiki/T-test#Dependent_t-test_for_paired_samples + + Examples + -------- + >>> from scipy import stats + >>> np.random.seed(12345678) # fix random seed to get same numbers + + >>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500) + >>> rvs2 = (stats.norm.rvs(loc=5,scale=10,size=500) + + ... stats.norm.rvs(scale=0.2,size=500)) + >>> stats.ttest_rel(rvs1,rvs2) + (0.24101764965300962, 0.80964043445811562) + >>> rvs3 = (stats.norm.rvs(loc=8,scale=10,size=500) + + ... stats.norm.rvs(scale=0.2,size=500)) + >>> stats.ttest_rel(rvs1,rvs3) + (-3.9995108708727933, 7.3082402191726459e-005) + + """ + a, b, axis = _chk2_asarray(a, b, axis) + + cna, npa = _contains_nan(a, nan_policy) + cnb, npb = _contains_nan(b, nan_policy) + contains_nan = cna or cnb + if npa == 'omit' or npb == 'omit': + nan_policy = 'omit' + + if contains_nan and nan_policy == 'omit': + a = ma.masked_invalid(a) + b = ma.masked_invalid(b) + m = ma.mask_or(ma.getmask(a), ma.getmask(b)) + aa = ma.array(a, mask=m, copy=True) + bb = ma.array(b, mask=m, copy=True) + return mstats_basic.ttest_rel(aa, bb, axis) + + if a.shape[axis] != b.shape[axis]: + raise ValueError('unequal length arrays') + + if a.size == 0 or b.size == 0: + return np.nan, np.nan + + n = a.shape[axis] + df = n - 1 + + d = (a - b).astype(np.float64) + v = np.var(d, axis, ddof=1) + dm = np.mean(d, axis) + denom = np.sqrt(v / n) + + with np.errstate(divide='ignore', invalid='ignore'): + t = np.divide(dm, denom) + t, prob = _ttest_finish(df, t) + + return Ttest_relResult(t, prob) + + +KstestResult = namedtuple('KstestResult', ('statistic', 'pvalue')) + + +def kstest(rvs, cdf, args=(), N=20, alternative='two-sided', mode='approx'): + """ + Perform the Kolmogorov-Smirnov test for goodness of fit. + + This performs a test of the distribution G(x) of an observed + random variable against a given distribution F(x). Under the null + hypothesis the two distributions are identical, G(x)=F(x). The + alternative hypothesis can be either 'two-sided' (default), 'less' + or 'greater'. The KS test is only valid for continuous distributions. + + Parameters + ---------- + rvs : str, array or callable + If a string, it should be the name of a distribution in `scipy.stats`. + If an array, it should be a 1-D array of observations of random + variables. + If a callable, it should be a function to generate random variables; + it is required to have a keyword argument `size`. + cdf : str or callable + If a string, it should be the name of a distribution in `scipy.stats`. + If `rvs` is a string then `cdf` can be False or the same as `rvs`. + If a callable, that callable is used to calculate the cdf. + args : tuple, sequence, optional + Distribution parameters, used if `rvs` or `cdf` are strings. + N : int, optional + Sample size if `rvs` is string or callable. Default is 20. + alternative : {'two-sided', 'less','greater'}, optional + Defines the alternative hypothesis (see explanation above). + Default is 'two-sided'. + mode : 'approx' (default) or 'asymp', optional + Defines the distribution used for calculating the p-value. + + - 'approx' : use approximation to exact distribution of test statistic + - 'asymp' : use asymptotic distribution of test statistic + + Returns + ------- + statistic : float + KS test statistic, either D, D+ or D-. + pvalue : float + One-tailed or two-tailed p-value. + + Notes + ----- + In the one-sided test, the alternative is that the empirical + cumulative distribution function of the random variable is "less" + or "greater" than the cumulative distribution function F(x) of the + hypothesis, ``G(x)<=F(x)``, resp. ``G(x)>=F(x)``. + + Examples + -------- + >>> from scipy import stats + + >>> x = np.linspace(-15, 15, 9) + >>> stats.kstest(x, 'norm') + (0.44435602715924361, 0.038850142705171065) + + >>> np.random.seed(987654321) # set random seed to get the same result + >>> stats.kstest('norm', False, N=100) + (0.058352892479417884, 0.88531190944151261) + + The above lines are equivalent to: + + >>> np.random.seed(987654321) + >>> stats.kstest(stats.norm.rvs(size=100), 'norm') + (0.058352892479417884, 0.88531190944151261) + + *Test against one-sided alternative hypothesis* + + Shift distribution to larger values, so that ``cdf_dgp(x) < norm.cdf(x)``: + + >>> np.random.seed(987654321) + >>> x = stats.norm.rvs(loc=0.2, size=100) + >>> stats.kstest(x,'norm', alternative = 'less') + (0.12464329735846891, 0.040989164077641749) + + Reject equal distribution against alternative hypothesis: less + + >>> stats.kstest(x,'norm', alternative = 'greater') + (0.0072115233216311081, 0.98531158590396395) + + Don't reject equal distribution against alternative hypothesis: greater + + >>> stats.kstest(x,'norm', mode='asymp') + (0.12464329735846891, 0.08944488871182088) + + *Testing t distributed random variables against normal distribution* + + With 100 degrees of freedom the t distribution looks close to the normal + distribution, and the K-S test does not reject the hypothesis that the + sample came from the normal distribution: + + >>> np.random.seed(987654321) + >>> stats.kstest(stats.t.rvs(100,size=100),'norm') + (0.072018929165471257, 0.67630062862479168) + + With 3 degrees of freedom the t distribution looks sufficiently different + from the normal distribution, that we can reject the hypothesis that the + sample came from the normal distribution at the 10% level: + + >>> np.random.seed(987654321) + >>> stats.kstest(stats.t.rvs(3,size=100),'norm') + (0.131016895759829, 0.058826222555312224) + + """ + if isinstance(rvs, string_types): + if (not cdf) or (cdf == rvs): + cdf = getattr(distributions, rvs).cdf + rvs = getattr(distributions, rvs).rvs + else: + raise AttributeError("if rvs is string, cdf has to be the " + "same distribution") + + if isinstance(cdf, string_types): + cdf = getattr(distributions, cdf).cdf + if callable(rvs): + kwds = {'size': N} + vals = np.sort(rvs(*args, **kwds)) + else: + vals = np.sort(rvs) + N = len(vals) + cdfvals = cdf(vals, *args) + + # to not break compatibility with existing code + if alternative == 'two_sided': + alternative = 'two-sided' + + if alternative in ['two-sided', 'greater']: + Dplus = (np.arange(1.0, N + 1)/N - cdfvals).max() + if alternative == 'greater': + return KstestResult(Dplus, distributions.ksone.sf(Dplus, N)) + + if alternative in ['two-sided', 'less']: + Dmin = (cdfvals - np.arange(0.0, N)/N).max() + if alternative == 'less': + return KstestResult(Dmin, distributions.ksone.sf(Dmin, N)) + + if alternative == 'two-sided': + D = np.max([Dplus, Dmin]) + if mode == 'asymp': + return KstestResult(D, distributions.kstwobign.sf(D * np.sqrt(N))) + if mode == 'approx': + pval_two = distributions.kstwobign.sf(D * np.sqrt(N)) + if N > 2666 or pval_two > 0.80 - N*0.3/1000: + return KstestResult(D, pval_two) + else: + return KstestResult(D, 2 * distributions.ksone.sf(D, N)) + + +# Map from names to lambda_ values used in power_divergence(). +_power_div_lambda_names = { + "pearson": 1, + "log-likelihood": 0, + "freeman-tukey": -0.5, + "mod-log-likelihood": -1, + "neyman": -2, + "cressie-read": 2/3, +} + + +def _count(a, axis=None): + """ + Count the number of non-masked elements of an array. + + This function behaves like np.ma.count(), but is much faster + for ndarrays. + """ + if hasattr(a, 'count'): + num = a.count(axis=axis) + if isinstance(num, np.ndarray) and num.ndim == 0: + # In some cases, the `count` method returns a scalar array (e.g. + # np.array(3)), but we want a plain integer. + num = int(num) + else: + if axis is None: + num = a.size + else: + num = a.shape[axis] + return num + + +Power_divergenceResult = namedtuple('Power_divergenceResult', + ('statistic', 'pvalue')) + +def power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None): + """ + Cressie-Read power divergence statistic and goodness of fit test. + + This function tests the null hypothesis that the categorical data + has the given frequencies, using the Cressie-Read power divergence + statistic. + + Parameters + ---------- + f_obs : array_like + Observed frequencies in each category. + f_exp : array_like, optional + Expected frequencies in each category. By default the categories are + assumed to be equally likely. + ddof : int, optional + "Delta degrees of freedom": adjustment to the degrees of freedom + for the p-value. The p-value is computed using a chi-squared + distribution with ``k - 1 - ddof`` degrees of freedom, where `k` + is the number of observed frequencies. The default value of `ddof` + is 0. + axis : int or None, optional + The axis of the broadcast result of `f_obs` and `f_exp` along which to + apply the test. If axis is None, all values in `f_obs` are treated + as a single data set. Default is 0. + lambda_ : float or str, optional + `lambda_` gives the power in the Cressie-Read power divergence + statistic. The default is 1. For convenience, `lambda_` may be + assigned one of the following strings, in which case the + corresponding numerical value is used:: + + String Value Description + "pearson" 1 Pearson's chi-squared statistic. + In this case, the function is + equivalent to `stats.chisquare`. + "log-likelihood" 0 Log-likelihood ratio. Also known as + the G-test [3]_. + "freeman-tukey" -1/2 Freeman-Tukey statistic. + "mod-log-likelihood" -1 Modified log-likelihood ratio. + "neyman" -2 Neyman's statistic. + "cressie-read" 2/3 The power recommended in [5]_. + + Returns + ------- + statistic : float or ndarray + The Cressie-Read power divergence test statistic. The value is + a float if `axis` is None or if` `f_obs` and `f_exp` are 1-D. + pvalue : float or ndarray + The p-value of the test. The value is a float if `ddof` and the + return value `stat` are scalars. + + See Also + -------- + chisquare + + Notes + ----- + This test is invalid when the observed or expected frequencies in each + category are too small. A typical rule is that all of the observed + and expected frequencies should be at least 5. + + When `lambda_` is less than zero, the formula for the statistic involves + dividing by `f_obs`, so a warning or error may be generated if any value + in `f_obs` is 0. + + Similarly, a warning or error may be generated if any value in `f_exp` is + zero when `lambda_` >= 0. + + The default degrees of freedom, k-1, are for the case when no parameters + of the distribution are estimated. If p parameters are estimated by + efficient maximum likelihood then the correct degrees of freedom are + k-1-p. If the parameters are estimated in a different way, then the + dof can be between k-1-p and k-1. However, it is also possible that + the asymptotic distribution is not a chisquare, in which case this + test is not appropriate. + + This function handles masked arrays. If an element of `f_obs` or `f_exp` + is masked, then data at that position is ignored, and does not count + towards the size of the data set. + + .. versionadded:: 0.13.0 + + References + ---------- + .. [1] Lowry, Richard. "Concepts and Applications of Inferential + Statistics". Chapter 8. + https://web.archive.org/web/20171015035606/http://faculty.vassar.edu/lowry/ch8pt1.html + .. [2] "Chi-squared test", https://en.wikipedia.org/wiki/Chi-squared_test + .. [3] "G-test", https://en.wikipedia.org/wiki/G-test + .. [4] Sokal, R. R. and Rohlf, F. J. "Biometry: the principles and + practice of statistics in biological research", New York: Freeman + (1981) + .. [5] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit + Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984), + pp. 440-464. + + Examples + -------- + + (See `chisquare` for more examples.) + + When just `f_obs` is given, it is assumed that the expected frequencies + are uniform and given by the mean of the observed frequencies. Here we + perform a G-test (i.e. use the log-likelihood ratio statistic): + + >>> from scipy.stats import power_divergence + >>> power_divergence([16, 18, 16, 14, 12, 12], lambda_='log-likelihood') + (2.006573162632538, 0.84823476779463769) + + The expected frequencies can be given with the `f_exp` argument: + + >>> power_divergence([16, 18, 16, 14, 12, 12], + ... f_exp=[16, 16, 16, 16, 16, 8], + ... lambda_='log-likelihood') + (3.3281031458963746, 0.6495419288047497) + + When `f_obs` is 2-D, by default the test is applied to each column. + + >>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T + >>> obs.shape + (6, 2) + >>> power_divergence(obs, lambda_="log-likelihood") + (array([ 2.00657316, 6.77634498]), array([ 0.84823477, 0.23781225])) + + By setting ``axis=None``, the test is applied to all data in the array, + which is equivalent to applying the test to the flattened array. + + >>> power_divergence(obs, axis=None) + (23.31034482758621, 0.015975692534127565) + >>> power_divergence(obs.ravel()) + (23.31034482758621, 0.015975692534127565) + + `ddof` is the change to make to the default degrees of freedom. + + >>> power_divergence([16, 18, 16, 14, 12, 12], ddof=1) + (2.0, 0.73575888234288467) + + The calculation of the p-values is done by broadcasting the + test statistic with `ddof`. + + >>> power_divergence([16, 18, 16, 14, 12, 12], ddof=[0,1,2]) + (2.0, array([ 0.84914504, 0.73575888, 0.5724067 ])) + + `f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has + shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting + `f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared + statistics, we must use ``axis=1``: + + >>> power_divergence([16, 18, 16, 14, 12, 12], + ... f_exp=[[16, 16, 16, 16, 16, 8], + ... [8, 20, 20, 16, 12, 12]], + ... axis=1) + (array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846])) + + """ + # Convert the input argument `lambda_` to a numerical value. + if isinstance(lambda_, string_types): + if lambda_ not in _power_div_lambda_names: + names = repr(list(_power_div_lambda_names.keys()))[1:-1] + raise ValueError("invalid string for lambda_: {0!r}. Valid strings " + "are {1}".format(lambda_, names)) + lambda_ = _power_div_lambda_names[lambda_] + elif lambda_ is None: + lambda_ = 1 + + f_obs = np.asanyarray(f_obs) + + if f_exp is not None: + f_exp = np.atleast_1d(np.asanyarray(f_exp)) + else: + # Compute the equivalent of + # f_exp = f_obs.mean(axis=axis, keepdims=True) + # Older versions of numpy do not have the 'keepdims' argument, so + # we have to do a little work to achieve the same result. + # Ignore 'invalid' errors so the edge case of a data set with length 0 + # is handled without spurious warnings. + with np.errstate(invalid='ignore'): + f_exp = np.atleast_1d(f_obs.mean(axis=axis)) + if axis is not None: + reduced_shape = list(f_obs.shape) + reduced_shape[axis] = 1 + f_exp.shape = reduced_shape + + # `terms` is the array of terms that are summed along `axis` to create + # the test statistic. We use some specialized code for a few special + # cases of lambda_. + if lambda_ == 1: + # Pearson's chi-squared statistic + terms = (f_obs - f_exp)**2 / f_exp + elif lambda_ == 0: + # Log-likelihood ratio (i.e. G-test) + terms = 2.0 * special.xlogy(f_obs, f_obs / f_exp) + elif lambda_ == -1: + # Modified log-likelihood ratio + terms = 2.0 * special.xlogy(f_exp, f_exp / f_obs) + else: + # General Cressie-Read power divergence. + terms = f_obs * ((f_obs / f_exp)**lambda_ - 1) + terms /= 0.5 * lambda_ * (lambda_ + 1) + + stat = terms.sum(axis=axis) + + num_obs = _count(terms, axis=axis) + ddof = asarray(ddof) + p = distributions.chi2.sf(stat, num_obs - 1 - ddof) + + return Power_divergenceResult(stat, p) + + +def chisquare(f_obs, f_exp=None, ddof=0, axis=0): + """ + Calculate a one-way chi square test. + + The chi square test tests the null hypothesis that the categorical data + has the given frequencies. + + Parameters + ---------- + f_obs : array_like + Observed frequencies in each category. + f_exp : array_like, optional + Expected frequencies in each category. By default the categories are + assumed to be equally likely. + ddof : int, optional + "Delta degrees of freedom": adjustment to the degrees of freedom + for the p-value. The p-value is computed using a chi-squared + distribution with ``k - 1 - ddof`` degrees of freedom, where `k` + is the number of observed frequencies. The default value of `ddof` + is 0. + axis : int or None, optional + The axis of the broadcast result of `f_obs` and `f_exp` along which to + apply the test. If axis is None, all values in `f_obs` are treated + as a single data set. Default is 0. + + Returns + ------- + chisq : float or ndarray + The chi-squared test statistic. The value is a float if `axis` is + None or `f_obs` and `f_exp` are 1-D. + p : float or ndarray + The p-value of the test. The value is a float if `ddof` and the + return value `chisq` are scalars. + + See Also + -------- + power_divergence + mstats.chisquare + + Notes + ----- + This test is invalid when the observed or expected frequencies in each + category are too small. A typical rule is that all of the observed + and expected frequencies should be at least 5. + + The default degrees of freedom, k-1, are for the case when no parameters + of the distribution are estimated. If p parameters are estimated by + efficient maximum likelihood then the correct degrees of freedom are + k-1-p. If the parameters are estimated in a different way, then the + dof can be between k-1-p and k-1. However, it is also possible that + the asymptotic distribution is not a chisquare, in which case this + test is not appropriate. + + References + ---------- + .. [1] Lowry, Richard. "Concepts and Applications of Inferential + Statistics". Chapter 8. + https://web.archive.org/web/20171022032306/http://vassarstats.net:80/textbook/ch8pt1.html + .. [2] "Chi-squared test", https://en.wikipedia.org/wiki/Chi-squared_test + + Examples + -------- + When just `f_obs` is given, it is assumed that the expected frequencies + are uniform and given by the mean of the observed frequencies. + + >>> from scipy.stats import chisquare + >>> chisquare([16, 18, 16, 14, 12, 12]) + (2.0, 0.84914503608460956) + + With `f_exp` the expected frequencies can be given. + + >>> chisquare([16, 18, 16, 14, 12, 12], f_exp=[16, 16, 16, 16, 16, 8]) + (3.5, 0.62338762774958223) + + When `f_obs` is 2-D, by default the test is applied to each column. + + >>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T + >>> obs.shape + (6, 2) + >>> chisquare(obs) + (array([ 2. , 6.66666667]), array([ 0.84914504, 0.24663415])) + + By setting ``axis=None``, the test is applied to all data in the array, + which is equivalent to applying the test to the flattened array. + + >>> chisquare(obs, axis=None) + (23.31034482758621, 0.015975692534127565) + >>> chisquare(obs.ravel()) + (23.31034482758621, 0.015975692534127565) + + `ddof` is the change to make to the default degrees of freedom. + + >>> chisquare([16, 18, 16, 14, 12, 12], ddof=1) + (2.0, 0.73575888234288467) + + The calculation of the p-values is done by broadcasting the + chi-squared statistic with `ddof`. + + >>> chisquare([16, 18, 16, 14, 12, 12], ddof=[0,1,2]) + (2.0, array([ 0.84914504, 0.73575888, 0.5724067 ])) + + `f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has + shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting + `f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared + statistics, we use ``axis=1``: + + >>> chisquare([16, 18, 16, 14, 12, 12], + ... f_exp=[[16, 16, 16, 16, 16, 8], [8, 20, 20, 16, 12, 12]], + ... axis=1) + (array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846])) + + """ + return power_divergence(f_obs, f_exp=f_exp, ddof=ddof, axis=axis, + lambda_="pearson") + + +Ks_2sampResult = namedtuple('Ks_2sampResult', ('statistic', 'pvalue')) + + +def ks_2samp(data1, data2): + """ + Compute the Kolmogorov-Smirnov statistic on 2 samples. + + This is a two-sided test for the null hypothesis that 2 independent samples + are drawn from the same continuous distribution. + + Parameters + ---------- + data1, data2 : sequence of 1-D ndarrays + two arrays of sample observations assumed to be drawn from a continuous + distribution, sample sizes can be different + + Returns + ------- + statistic : float + KS statistic + pvalue : float + two-tailed p-value + + Notes + ----- + This tests whether 2 samples are drawn from the same distribution. Note + that, like in the case of the one-sample K-S test, the distribution is + assumed to be continuous. + + This is the two-sided test, one-sided tests are not implemented. + The test uses the two-sided asymptotic Kolmogorov-Smirnov distribution. + + If the K-S statistic is small or the p-value is high, then we cannot + reject the hypothesis that the distributions of the two samples + are the same. + + Examples + -------- + >>> from scipy import stats + >>> np.random.seed(12345678) #fix random seed to get the same result + >>> n1 = 200 # size of first sample + >>> n2 = 300 # size of second sample + + For a different distribution, we can reject the null hypothesis since the + pvalue is below 1%: + + >>> rvs1 = stats.norm.rvs(size=n1, loc=0., scale=1) + >>> rvs2 = stats.norm.rvs(size=n2, loc=0.5, scale=1.5) + >>> stats.ks_2samp(rvs1, rvs2) + (0.20833333333333337, 4.6674975515806989e-005) + + For a slightly different distribution, we cannot reject the null hypothesis + at a 10% or lower alpha since the p-value at 0.144 is higher than 10% + + >>> rvs3 = stats.norm.rvs(size=n2, loc=0.01, scale=1.0) + >>> stats.ks_2samp(rvs1, rvs3) + (0.10333333333333333, 0.14498781825751686) + + For an identical distribution, we cannot reject the null hypothesis since + the p-value is high, 41%: + + >>> rvs4 = stats.norm.rvs(size=n2, loc=0.0, scale=1.0) + >>> stats.ks_2samp(rvs1, rvs4) + (0.07999999999999996, 0.41126949729859719) + + """ + data1 = np.sort(data1) + data2 = np.sort(data2) + n1 = data1.shape[0] + n2 = data2.shape[0] + data_all = np.concatenate([data1, data2]) + cdf1 = np.searchsorted(data1, data_all, side='right') / n1 + cdf2 = np.searchsorted(data2, data_all, side='right') / n2 + d = np.max(np.absolute(cdf1 - cdf2)) + # Note: d absolute not signed distance + en = np.sqrt(n1 * n2 / (n1 + n2)) + try: + prob = distributions.kstwobign.sf((en + 0.12 + 0.11 / en) * d) + except Exception: + warnings.warn('This should not happen! Please open an issue at ' + 'https://github.com/scipy/scipy/issues and provide the code ' + 'you used to trigger this warning.\n') + prob = 1.0 + + return Ks_2sampResult(d, prob) + + +def tiecorrect(rankvals): + """ + Tie correction factor for ties in the Mann-Whitney U and + Kruskal-Wallis H tests. + + Parameters + ---------- + rankvals : array_like + A 1-D sequence of ranks. Typically this will be the array + returned by `stats.rankdata`. + + Returns + ------- + factor : float + Correction factor for U or H. + + See Also + -------- + rankdata : Assign ranks to the data + mannwhitneyu : Mann-Whitney rank test + kruskal : Kruskal-Wallis H test + + References + ---------- + .. [1] Siegel, S. (1956) Nonparametric Statistics for the Behavioral + Sciences. New York: McGraw-Hill. + + Examples + -------- + >>> from scipy.stats import tiecorrect, rankdata + >>> tiecorrect([1, 2.5, 2.5, 4]) + 0.9 + >>> ranks = rankdata([1, 3, 2, 4, 5, 7, 2, 8, 4]) + >>> ranks + array([ 1. , 4. , 2.5, 5.5, 7. , 8. , 2.5, 9. , 5.5]) + >>> tiecorrect(ranks) + 0.9833333333333333 + + """ + arr = np.sort(rankvals) + idx = np.nonzero(np.r_[True, arr[1:] != arr[:-1], True])[0] + cnt = np.diff(idx).astype(np.float64) + + size = np.float64(arr.size) + return 1.0 if size < 2 else 1.0 - (cnt**3 - cnt).sum() / (size**3 - size) + + +MannwhitneyuResult = namedtuple('MannwhitneyuResult', ('statistic', 'pvalue')) + +def mannwhitneyu(x, y, use_continuity=True, alternative=None): + """ + Compute the Mann-Whitney rank test on samples x and y. + + Parameters + ---------- + x, y : array_like + Array of samples, should be one-dimensional. + use_continuity : bool, optional + Whether a continuity correction (1/2.) should be taken into + account. Default is True. + alternative : None (deprecated), 'less', 'two-sided', or 'greater' + Whether to get the p-value for the one-sided hypothesis ('less' + or 'greater') or for the two-sided hypothesis ('two-sided'). + Defaults to None, which results in a p-value half the size of + the 'two-sided' p-value and a different U statistic. The + default behavior is not the same as using 'less' or 'greater': + it only exists for backward compatibility and is deprecated. + + Returns + ------- + statistic : float + The Mann-Whitney U statistic, equal to min(U for x, U for y) if + `alternative` is equal to None (deprecated; exists for backward + compatibility), and U for y otherwise. + pvalue : float + p-value assuming an asymptotic normal distribution. One-sided or + two-sided, depending on the choice of `alternative`. + + Notes + ----- + Use only when the number of observation in each sample is > 20 and + you have 2 independent samples of ranks. Mann-Whitney U is + significant if the u-obtained is LESS THAN or equal to the critical + value of U. + + This test corrects for ties and by default uses a continuity correction. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Mann-Whitney_U_test + + .. [2] H.B. Mann and D.R. Whitney, "On a Test of Whether one of Two Random + Variables is Stochastically Larger than the Other," The Annals of + Mathematical Statistics, vol. 18, no. 1, pp. 50-60, 1947. + + """ + if alternative is None: + warnings.warn("Calling `mannwhitneyu` without specifying " + "`alternative` is deprecated.", DeprecationWarning) + + x = np.asarray(x) + y = np.asarray(y) + n1 = len(x) + n2 = len(y) + ranked = rankdata(np.concatenate((x, y))) + rankx = ranked[0:n1] # get the x-ranks + u1 = n1*n2 + (n1*(n1+1))/2.0 - np.sum(rankx, axis=0) # calc U for x + u2 = n1*n2 - u1 # remainder is U for y + T = tiecorrect(ranked) + if T == 0: + raise ValueError('All numbers are identical in mannwhitneyu') + sd = np.sqrt(T * n1 * n2 * (n1+n2+1) / 12.0) + + meanrank = n1*n2/2.0 + 0.5 * use_continuity + if alternative is None or alternative == 'two-sided': + bigu = max(u1, u2) + elif alternative == 'less': + bigu = u1 + elif alternative == 'greater': + bigu = u2 + else: + raise ValueError("alternative should be None, 'less', 'greater' " + "or 'two-sided'") + + z = (bigu - meanrank) / sd + if alternative is None: + # This behavior, equal to half the size of the two-sided + # p-value, is deprecated. + p = distributions.norm.sf(abs(z)) + elif alternative == 'two-sided': + p = 2 * distributions.norm.sf(abs(z)) + else: + p = distributions.norm.sf(z) + + u = u2 + # This behavior is deprecated. + if alternative is None: + u = min(u1, u2) + return MannwhitneyuResult(u, p) + + +RanksumsResult = namedtuple('RanksumsResult', ('statistic', 'pvalue')) + + +def ranksums(x, y): + """ + Compute the Wilcoxon rank-sum statistic for two samples. + + The Wilcoxon rank-sum test tests the null hypothesis that two sets + of measurements are drawn from the same distribution. The alternative + hypothesis is that values in one sample are more likely to be + larger than the values in the other sample. + + This test should be used to compare two samples from continuous + distributions. It does not handle ties between measurements + in x and y. For tie-handling and an optional continuity correction + see `scipy.stats.mannwhitneyu`. + + Parameters + ---------- + x,y : array_like + The data from the two samples + + Returns + ------- + statistic : float + The test statistic under the large-sample approximation that the + rank sum statistic is normally distributed + pvalue : float + The two-sided p-value of the test + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Wilcoxon_rank-sum_test + + """ + x, y = map(np.asarray, (x, y)) + n1 = len(x) + n2 = len(y) + alldata = np.concatenate((x, y)) + ranked = rankdata(alldata) + x = ranked[:n1] + s = np.sum(x, axis=0) + expected = n1 * (n1+n2+1) / 2.0 + z = (s - expected) / np.sqrt(n1*n2*(n1+n2+1)/12.0) + prob = 2 * distributions.norm.sf(abs(z)) + + return RanksumsResult(z, prob) + + +KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue')) + + +def kruskal(*args, **kwargs): + """ + Compute the Kruskal-Wallis H-test for independent samples + + The Kruskal-Wallis H-test tests the null hypothesis that the population + median of all of the groups are equal. It is a non-parametric version of + ANOVA. The test works on 2 or more independent samples, which may have + different sizes. Note that rejecting the null hypothesis does not + indicate which of the groups differs. Post-hoc comparisons between + groups are required to determine which groups are different. + + Parameters + ---------- + sample1, sample2, ... : array_like + Two or more arrays with the sample measurements can be given as + arguments. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. 'propagate' returns nan, + 'raise' throws an error, 'omit' performs the calculations ignoring nan + values. Default is 'propagate'. + + Returns + ------- + statistic : float + The Kruskal-Wallis H statistic, corrected for ties + pvalue : float + The p-value for the test using the assumption that H has a chi + square distribution + + See Also + -------- + f_oneway : 1-way ANOVA + mannwhitneyu : Mann-Whitney rank test on two samples. + friedmanchisquare : Friedman test for repeated measurements + + Notes + ----- + Due to the assumption that H has a chi square distribution, the number + of samples in each group must not be too small. A typical rule is + that each sample must have at least 5 measurements. + + References + ---------- + .. [1] W. H. Kruskal & W. W. Wallis, "Use of Ranks in + One-Criterion Variance Analysis", Journal of the American Statistical + Association, Vol. 47, Issue 260, pp. 583-621, 1952. + .. [2] https://en.wikipedia.org/wiki/Kruskal-Wallis_one-way_analysis_of_variance + + Examples + -------- + >>> from scipy import stats + >>> x = [1, 3, 5, 7, 9] + >>> y = [2, 4, 6, 8, 10] + >>> stats.kruskal(x, y) + KruskalResult(statistic=0.2727272727272734, pvalue=0.6015081344405895) + + >>> x = [1, 1, 1] + >>> y = [2, 2, 2] + >>> z = [2, 2] + >>> stats.kruskal(x, y, z) + KruskalResult(statistic=7.0, pvalue=0.0301973834223185) + + """ + args = list(map(np.asarray, args)) + num_groups = len(args) + if num_groups < 2: + raise ValueError("Need at least two groups in stats.kruskal()") + + for arg in args: + if arg.size == 0: + return KruskalResult(np.nan, np.nan) + n = np.asarray(list(map(len, args))) + + if 'nan_policy' in kwargs.keys(): + if kwargs['nan_policy'] not in ('propagate', 'raise', 'omit'): + raise ValueError("nan_policy must be 'propagate', " + "'raise' or'omit'") + else: + nan_policy = kwargs['nan_policy'] + else: + nan_policy = 'propagate' + + contains_nan = False + for arg in args: + cn = _contains_nan(arg, nan_policy) + if cn[0]: + contains_nan = True + break + + if contains_nan and nan_policy == 'omit': + for a in args: + a = ma.masked_invalid(a) + return mstats_basic.kruskal(*args) + + if contains_nan and nan_policy == 'propagate': + return KruskalResult(np.nan, np.nan) + + alldata = np.concatenate(args) + ranked = rankdata(alldata) + ties = tiecorrect(ranked) + if ties == 0: + raise ValueError('All numbers are identical in kruskal') + + # Compute sum^2/n for each group and sum + j = np.insert(np.cumsum(n), 0, 0) + ssbn = 0 + for i in range(num_groups): + ssbn += _square_of_sums(ranked[j[i]:j[i+1]]) / n[i] + + totaln = np.sum(n) + h = 12.0 / (totaln * (totaln + 1)) * ssbn - 3 * (totaln + 1) + df = num_groups - 1 + h /= ties + + return KruskalResult(h, distributions.chi2.sf(h, df)) + + +FriedmanchisquareResult = namedtuple('FriedmanchisquareResult', + ('statistic', 'pvalue')) + + +def friedmanchisquare(*args): + """ + Compute the Friedman test for repeated measurements + + The Friedman test tests the null hypothesis that repeated measurements of + the same individuals have the same distribution. It is often used + to test for consistency among measurements obtained in different ways. + For example, if two measurement techniques are used on the same set of + individuals, the Friedman test can be used to determine if the two + measurement techniques are consistent. + + Parameters + ---------- + measurements1, measurements2, measurements3... : array_like + Arrays of measurements. All of the arrays must have the same number + of elements. At least 3 sets of measurements must be given. + + Returns + ------- + statistic : float + the test statistic, correcting for ties + pvalue : float + the associated p-value assuming that the test statistic has a chi + squared distribution + + Notes + ----- + Due to the assumption that the test statistic has a chi squared + distribution, the p-value is only reliable for n > 10 and more than + 6 repeated measurements. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Friedman_test + + """ + k = len(args) + if k < 3: + raise ValueError('Less than 3 levels. Friedman test not appropriate.') + + n = len(args[0]) + for i in range(1, k): + if len(args[i]) != n: + raise ValueError('Unequal N in friedmanchisquare. Aborting.') + + # Rank data + data = np.vstack(args).T + data = data.astype(float) + for i in range(len(data)): + data[i] = rankdata(data[i]) + + # Handle ties + ties = 0 + for i in range(len(data)): + replist, repnum = find_repeats(array(data[i])) + for t in repnum: + ties += t * (t*t - 1) + c = 1 - ties / (k*(k*k - 1)*n) + + ssbn = np.sum(data.sum(axis=0)**2) + chisq = (12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)) / c + + return FriedmanchisquareResult(chisq, distributions.chi2.sf(chisq, k - 1)) + + +BrunnerMunzelResult = namedtuple('BrunnerMunzelResult', + ('statistic', 'pvalue')) + + +def brunnermunzel(x, y, alternative="two-sided", distribution="t", + nan_policy='propagate'): + """ + Computes the Brunner-Munzel test on samples x and y + + The Brunner-Munzel test is a nonparametric test of the null hypothesis that + when values are taken one by one from each group, the probabilities of + getting large values in both groups are equal. + Unlike the Wilcoxon-Mann-Whitney's U test, this does not require the + assumption of equivariance of two groups. Note that this does not assume + the distributions are same. This test works on two independent samples, + which may have different sizes. + + Parameters + ---------- + x, y : array_like + Array of samples, should be one-dimensional. + alternative : 'less', 'two-sided', or 'greater', optional + Whether to get the p-value for the one-sided hypothesis ('less' + or 'greater') or for the two-sided hypothesis ('two-sided'). + Defaults value is 'two-sided' . + distribution: 't' or 'normal', optional + Whether to get the p-value by t-distribution or by standard normal + distribution. + Defaults value is 't' . + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. 'propagate' returns nan, + 'raise' throws an error, 'omit' performs the calculations ignoring nan + values. Default is 'propagate'. + + Returns + ------- + statistic : float + The Brunner-Munzer W statistic. + pvalue : float + p-value assuming an t distribution. One-sided or + two-sided, depending on the choice of `alternative` and `distribution`. + + See Also + -------- + mannwhitneyu : Mann-Whitney rank test on two samples. + + Notes + ------- + Brunner and Munzel recommended to estimate the p-value by t-distribution + when the size of data is 50 or less. If the size is lower than 10, it would + be better to use permuted Brunner Munzel test (see [2]_). + + References + ---------- + .. [1] Brunner, E. and Munzel, U. "The nonparametric Benhrens-Fisher + problem: Asymptotic theory and a small-sample approximation". + Biometrical Journal. Vol. 42(2000): 17-25. + .. [2] Neubert, K. and Brunner, E. "A studentized permutation test for the + non-parametric Behrens-Fisher problem". Computational Statistics and + Data Analysis. Vol. 51(2007): 5192-5204. + + Examples + -------- + >>> from scipy import stats + >>> x1 = [1,2,1,1,1,1,1,1,1,1,2,4,1,1] + >>> x2 = [3,3,4,3,1,2,3,1,1,5,4] + >>> w, p_value = stats.brunnermunzel(x1, x2) + >>> w + 3.1374674823029505 + >>> p_value + 0.0057862086661515377 + + """ + x = np.asarray(x) + y = np.asarray(y) + + # check both x and y + cnx, npx = _contains_nan(x, nan_policy) + cny, npy = _contains_nan(y, nan_policy) + contains_nan = cnx or cny + if npx == "omit" or npy == "omit": + nan_policy = "omit" + + if contains_nan and nan_policy == "propagate": + return BrunnerMunzelResult(np.nan, np.nan) + elif contains_nan and nan_policy == "omit": + x = ma.masked_invalid(x) + y = ma.masked_invalid(y) + return mstats_basic.brunnermunzel(x, y, alternative, distribution) + + nx = len(x) + ny = len(y) + if nx == 0 or ny == 0: + return BrunnerMunzelResult(np.nan, np.nan) + rankc = rankdata(np.concatenate((x, y))) + rankcx = rankc[0:nx] + rankcy = rankc[nx:nx+ny] + rankcx_mean = np.mean(rankcx) + rankcy_mean = np.mean(rankcy) + rankx = rankdata(x) + ranky = rankdata(y) + rankx_mean = np.mean(rankx) + ranky_mean = np.mean(ranky) + + Sx = np.sum(np.power(rankcx - rankx - rankcx_mean + rankx_mean, 2.0)) + Sx /= nx - 1 + Sy = np.sum(np.power(rankcy - ranky - rankcy_mean + ranky_mean, 2.0)) + Sy /= ny - 1 + + wbfn = nx * ny * (rankcy_mean - rankcx_mean) + wbfn /= (nx + ny) * np.sqrt(nx * Sx + ny * Sy) + + if distribution == "t": + df_numer = np.power(nx * Sx + ny * Sy, 2.0) + df_denom = np.power(nx * Sx, 2.0) / (nx - 1) + df_denom += np.power(ny * Sy, 2.0) / (ny - 1) + df = df_numer / df_denom + p = distributions.t.cdf(wbfn, df) + elif distribution == "normal": + p = distributions.norm.cdf(wbfn) + else: + raise ValueError( + "distribution should be 't' or 'normal'") + + if alternative == "greater": + p = p + elif alternative == "less": + p = 1 - p + elif alternative == "two-sided": + p = 2 * np.min([p, 1-p]) + else: + raise ValueError( + "alternative should be 'less', 'greater' or 'two-sided'") + + return BrunnerMunzelResult(wbfn, p) + + +def combine_pvalues(pvalues, method='fisher', weights=None): + """ + Methods for combining the p-values of independent tests bearing upon the + same hypothesis. + + Parameters + ---------- + pvalues : array_like, 1-D + Array of p-values assumed to come from independent tests. + method : {'fisher', 'stouffer'}, optional + Name of method to use to combine p-values. The following methods are + available: + + - "fisher": Fisher's method (Fisher's combined probability test), + the default. + - "stouffer": Stouffer's Z-score method. + weights : array_like, 1-D, optional + Optional array of weights used only for Stouffer's Z-score method. + + Returns + ------- + statistic: float + The statistic calculated by the specified method: + - "fisher": The chi-squared statistic + - "stouffer": The Z-score + pval: float + The combined p-value. + + Notes + ----- + Fisher's method (also known as Fisher's combined probability test) [1]_ uses + a chi-squared statistic to compute a combined p-value. The closely related + Stouffer's Z-score method [2]_ uses Z-scores rather than p-values. The + advantage of Stouffer's method is that it is straightforward to introduce + weights, which can make Stouffer's method more powerful than Fisher's + method when the p-values are from studies of different size [3]_ [4]_. + + Fisher's method may be extended to combine p-values from dependent tests + [5]_. Extensions such as Brown's method and Kost's method are not currently + implemented. + + .. versionadded:: 0.15.0 + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Fisher%27s_method + .. [2] https://en.wikipedia.org/wiki/Fisher%27s_method#Relation_to_Stouffer.27s_Z-score_method + .. [3] Whitlock, M. C. "Combining probability from independent tests: the + weighted Z-method is superior to Fisher's approach." Journal of + Evolutionary Biology 18, no. 5 (2005): 1368-1373. + .. [4] Zaykin, Dmitri V. "Optimally weighted Z-test is a powerful method + for combining probabilities in meta-analysis." Journal of + Evolutionary Biology 24, no. 8 (2011): 1836-1841. + .. [5] https://en.wikipedia.org/wiki/Extensions_of_Fisher%27s_method + + """ + pvalues = np.asarray(pvalues) + if pvalues.ndim != 1: + raise ValueError("pvalues is not 1-D") + + if method == 'fisher': + Xsq = -2 * np.sum(np.log(pvalues)) + pval = distributions.chi2.sf(Xsq, 2 * len(pvalues)) + return (Xsq, pval) + elif method == 'stouffer': + if weights is None: + weights = np.ones_like(pvalues) + elif len(weights) != len(pvalues): + raise ValueError("pvalues and weights must be of the same size.") + + weights = np.asarray(weights) + if weights.ndim != 1: + raise ValueError("weights is not 1-D") + + Zi = distributions.norm.isf(pvalues) + Z = np.dot(weights, Zi) / np.linalg.norm(weights) + pval = distributions.norm.sf(Z) + + return (Z, pval) + else: + raise ValueError( + "Invalid method '%s'. Options are 'fisher' or 'stouffer'", method) + + +##################################### +# STATISTICAL DISTANCES # +##################################### + +def wasserstein_distance(u_values, v_values, u_weights=None, v_weights=None): + r""" + Compute the first Wasserstein distance between two 1D distributions. + + This distance is also known as the earth mover's distance, since it can be + seen as the minimum amount of "work" required to transform :math:`u` into + :math:`v`, where "work" is measured as the amount of distribution weight + that must be moved, multiplied by the distance it has to be moved. + + .. versionadded:: 1.0.0 + + Parameters + ---------- + u_values, v_values : array_like + Values observed in the (empirical) distribution. + u_weights, v_weights : array_like, optional + Weight for each value. If unspecified, each value is assigned the same + weight. + `u_weights` (resp. `v_weights`) must have the same length as + `u_values` (resp. `v_values`). If the weight sum differs from 1, it + must still be positive and finite so that the weights can be normalized + to sum to 1. + + Returns + ------- + distance : float + The computed distance between the distributions. + + Notes + ----- + The first Wasserstein distance between the distributions :math:`u` and + :math:`v` is: + + .. math:: + + l_1 (u, v) = \inf_{\pi \in \Gamma (u, v)} \int_{\mathbb{R} \times + \mathbb{R}} |x-y| \mathrm{d} \pi (x, y) + + where :math:`\Gamma (u, v)` is the set of (probability) distributions on + :math:`\mathbb{R} \times \mathbb{R}` whose marginals are :math:`u` and + :math:`v` on the first and second factors respectively. + + If :math:`U` and :math:`V` are the respective CDFs of :math:`u` and + :math:`v`, this distance also equals to: + + .. math:: + + l_1(u, v) = \int_{-\infty}^{+\infty} |U-V| + + See [2]_ for a proof of the equivalence of both definitions. + + The input distributions can be empirical, therefore coming from samples + whose values are effectively inputs of the function, or they can be seen as + generalized functions, in which case they are weighted sums of Dirac delta + functions located at the specified values. + + References + ---------- + .. [1] "Wasserstein metric", https://en.wikipedia.org/wiki/Wasserstein_metric + .. [2] Ramdas, Garcia, Cuturi "On Wasserstein Two Sample Testing and Related + Families of Nonparametric Tests" (2015). :arXiv:`1509.02237`. + + Examples + -------- + >>> from scipy.stats import wasserstein_distance + >>> wasserstein_distance([0, 1, 3], [5, 6, 8]) + 5.0 + >>> wasserstein_distance([0, 1], [0, 1], [3, 1], [2, 2]) + 0.25 + >>> wasserstein_distance([3.4, 3.9, 7.5, 7.8], [4.5, 1.4], + ... [1.4, 0.9, 3.1, 7.2], [3.2, 3.5]) + 4.0781331438047861 + """ + return _cdf_distance(1, u_values, v_values, u_weights, v_weights) + + +def energy_distance(u_values, v_values, u_weights=None, v_weights=None): + r""" + Compute the energy distance between two 1D distributions. + + .. versionadded:: 1.0.0 + + Parameters + ---------- + u_values, v_values : array_like + Values observed in the (empirical) distribution. + u_weights, v_weights : array_like, optional + Weight for each value. If unspecified, each value is assigned the same + weight. + `u_weights` (resp. `v_weights`) must have the same length as + `u_values` (resp. `v_values`). If the weight sum differs from 1, it + must still be positive and finite so that the weights can be normalized + to sum to 1. + + Returns + ------- + distance : float + The computed distance between the distributions. + + Notes + ----- + The energy distance between two distributions :math:`u` and :math:`v`, whose + respective CDFs are :math:`U` and :math:`V`, equals to: + + .. math:: + + D(u, v) = \left( 2\mathbb E|X - Y| - \mathbb E|X - X'| - + \mathbb E|Y - Y'| \right)^{1/2} + + where :math:`X` and :math:`X'` (resp. :math:`Y` and :math:`Y'`) are + independent random variables whose probability distribution is :math:`u` + (resp. :math:`v`). + + As shown in [2]_, for one-dimensional real-valued variables, the energy + distance is linked to the non-distribution-free version of the Cramer-von + Mises distance: + + .. math:: + + D(u, v) = \sqrt{2} l_2(u, v) = \left( 2 \int_{-\infty}^{+\infty} (U-V)^2 + \right)^{1/2} + + Note that the common Cramer-von Mises criterion uses the distribution-free + version of the distance. See [2]_ (section 2), for more details about both + versions of the distance. + + The input distributions can be empirical, therefore coming from samples + whose values are effectively inputs of the function, or they can be seen as + generalized functions, in which case they are weighted sums of Dirac delta + functions located at the specified values. + + References + ---------- + .. [1] "Energy distance", https://en.wikipedia.org/wiki/Energy_distance + .. [2] Szekely "E-statistics: The energy of statistical samples." Bowling + Green State University, Department of Mathematics and Statistics, + Technical Report 02-16 (2002). + .. [3] Rizzo, Szekely "Energy distance." Wiley Interdisciplinary Reviews: + Computational Statistics, 8(1):27-38 (2015). + .. [4] Bellemare, Danihelka, Dabney, Mohamed, Lakshminarayanan, Hoyer, + Munos "The Cramer Distance as a Solution to Biased Wasserstein + Gradients" (2017). :arXiv:`1705.10743`. + + Examples + -------- + >>> from scipy.stats import energy_distance + >>> energy_distance([0], [2]) + 2.0000000000000004 + >>> energy_distance([0, 8], [0, 8], [3, 1], [2, 2]) + 1.0000000000000002 + >>> energy_distance([0.7, 7.4, 2.4, 6.8], [1.4, 8. ], + ... [2.1, 4.2, 7.4, 8. ], [7.6, 8.8]) + 0.88003340976158217 + """ + return np.sqrt(2) * _cdf_distance(2, u_values, v_values, + u_weights, v_weights) + + +def _cdf_distance(p, u_values, v_values, u_weights=None, v_weights=None): + r""" + Compute, between two one-dimensional distributions :math:`u` and + :math:`v`, whose respective CDFs are :math:`U` and :math:`V`, the + statistical distance that is defined as: + + .. math:: + + l_p(u, v) = \left( \int_{-\infty}^{+\infty} |U-V|^p \right)^{1/p} + + p is a positive parameter; p = 1 gives the Wasserstein distance, p = 2 + gives the energy distance. + + Parameters + ---------- + u_values, v_values : array_like + Values observed in the (empirical) distribution. + u_weights, v_weights : array_like, optional + Weight for each value. If unspecified, each value is assigned the same + weight. + `u_weights` (resp. `v_weights`) must have the same length as + `u_values` (resp. `v_values`). If the weight sum differs from 1, it + must still be positive and finite so that the weights can be normalized + to sum to 1. + + Returns + ------- + distance : float + The computed distance between the distributions. + + Notes + ----- + The input distributions can be empirical, therefore coming from samples + whose values are effectively inputs of the function, or they can be seen as + generalized functions, in which case they are weighted sums of Dirac delta + functions located at the specified values. + + References + ---------- + .. [1] Bellemare, Danihelka, Dabney, Mohamed, Lakshminarayanan, Hoyer, + Munos "The Cramer Distance as a Solution to Biased Wasserstein + Gradients" (2017). :arXiv:`1705.10743`. + """ + u_values, u_weights = _validate_distribution(u_values, u_weights) + v_values, v_weights = _validate_distribution(v_values, v_weights) + + u_sorter = np.argsort(u_values) + v_sorter = np.argsort(v_values) + + all_values = np.concatenate((u_values, v_values)) + all_values.sort(kind='mergesort') + + # Compute the differences between pairs of successive values of u and v. + deltas = np.diff(all_values) + + # Get the respective positions of the values of u and v among the values of + # both distributions. + u_cdf_indices = u_values[u_sorter].searchsorted(all_values[:-1], 'right') + v_cdf_indices = v_values[v_sorter].searchsorted(all_values[:-1], 'right') + + # Calculate the CDFs of u and v using their weights, if specified. + if u_weights is None: + u_cdf = u_cdf_indices / u_values.size + else: + u_sorted_cumweights = np.concatenate(([0], + np.cumsum(u_weights[u_sorter]))) + u_cdf = u_sorted_cumweights[u_cdf_indices] / u_sorted_cumweights[-1] + + if v_weights is None: + v_cdf = v_cdf_indices / v_values.size + else: + v_sorted_cumweights = np.concatenate(([0], + np.cumsum(v_weights[v_sorter]))) + v_cdf = v_sorted_cumweights[v_cdf_indices] / v_sorted_cumweights[-1] + + # Compute the value of the integral based on the CDFs. + # If p = 1 or p = 2, we avoid using np.power, which introduces an overhead + # of about 15%. + if p == 1: + return np.sum(np.multiply(np.abs(u_cdf - v_cdf), deltas)) + if p == 2: + return np.sqrt(np.sum(np.multiply(np.square(u_cdf - v_cdf), deltas))) + return np.power(np.sum(np.multiply(np.power(np.abs(u_cdf - v_cdf), p), + deltas)), 1/p) + + +def _validate_distribution(values, weights): + """ + Validate the values and weights from a distribution input of `cdf_distance` + and return them as ndarray objects. + + Parameters + ---------- + values : array_like + Values observed in the (empirical) distribution. + weights : array_like + Weight for each value. + + Returns + ------- + values : ndarray + Values as ndarray. + weights : ndarray + Weights as ndarray. + """ + # Validate the value array. + values = np.asarray(values, dtype=float) + if len(values) == 0: + raise ValueError("Distribution can't be empty.") + + # Validate the weight array, if specified. + if weights is not None: + weights = np.asarray(weights, dtype=float) + if len(weights) != len(values): + raise ValueError('Value and weight array-likes for the same ' + 'empirical distribution must be of the same size.') + if np.any(weights < 0): + raise ValueError('All weights must be non-negative.') + if not 0 < np.sum(weights) < np.inf: + raise ValueError('Weight array-like sum must be positive and ' + 'finite. Set as None for an equal distribution of ' + 'weight.') + + return values, weights + + return values, None + + +##################################### +# SUPPORT FUNCTIONS # +##################################### + +RepeatedResults = namedtuple('RepeatedResults', ('values', 'counts')) + + +def find_repeats(arr): + """ + Find repeats and repeat counts. + + Parameters + ---------- + arr : array_like + Input array. This is cast to float64. + + Returns + ------- + values : ndarray + The unique values from the (flattened) input that are repeated. + + counts : ndarray + Number of times the corresponding 'value' is repeated. + + Notes + ----- + In numpy >= 1.9 `numpy.unique` provides similar functionality. The main + difference is that `find_repeats` only returns repeated values. + + Examples + -------- + >>> from scipy import stats + >>> stats.find_repeats([2, 1, 2, 3, 2, 2, 5]) + RepeatedResults(values=array([2.]), counts=array([4])) + + >>> stats.find_repeats([[10, 20, 1, 2], [5, 5, 4, 4]]) + RepeatedResults(values=array([4., 5.]), counts=array([2, 2])) + + """ + # Note: always copies. + return RepeatedResults(*_find_repeats(np.array(arr, dtype=np.float64))) + + +def _sum_of_squares(a, axis=0): + """ + Square each element of the input array, and return the sum(s) of that. + + Parameters + ---------- + a : array_like + Input array. + axis : int or None, optional + Axis along which to calculate. Default is 0. If None, compute over + the whole array `a`. + + Returns + ------- + sum_of_squares : ndarray + The sum along the given axis for (a**2). + + See also + -------- + _square_of_sums : The square(s) of the sum(s) (the opposite of + `_sum_of_squares`). + """ + a, axis = _chk_asarray(a, axis) + return np.sum(a*a, axis) + + +def _square_of_sums(a, axis=0): + """ + Sum elements of the input array, and return the square(s) of that sum. + + Parameters + ---------- + a : array_like + Input array. + axis : int or None, optional + Axis along which to calculate. Default is 0. If None, compute over + the whole array `a`. + + Returns + ------- + square_of_sums : float or ndarray + The square of the sum over `axis`. + + See also + -------- + _sum_of_squares : The sum of squares (the opposite of `square_of_sums`). + """ + a, axis = _chk_asarray(a, axis) + s = np.sum(a, axis) + if not np.isscalar(s): + return s.astype(float) * s + else: + return float(s) * s + + +def rankdata(a, method='average'): + """ + Assign ranks to data, dealing with ties appropriately. + + Ranks begin at 1. The `method` argument controls how ranks are assigned + to equal values. See [1]_ for further discussion of ranking methods. + + Parameters + ---------- + a : array_like + The array of values to be ranked. The array is first flattened. + method : str, optional + The method used to assign ranks to tied elements. + The options are 'average', 'min', 'max', 'dense' and 'ordinal'. + + 'average': + The average of the ranks that would have been assigned to + all the tied values is assigned to each value. + 'min': + The minimum of the ranks that would have been assigned to all + the tied values is assigned to each value. (This is also + referred to as "competition" ranking.) + 'max': + The maximum of the ranks that would have been assigned to all + the tied values is assigned to each value. + 'dense': + Like 'min', but the rank of the next highest element is assigned + the rank immediately after those assigned to the tied elements. + 'ordinal': + All values are given a distinct rank, corresponding to the order + that the values occur in `a`. + + The default is 'average'. + + Returns + ------- + ranks : ndarray + An array of length equal to the size of `a`, containing rank + scores. + + References + ---------- + .. [1] "Ranking", https://en.wikipedia.org/wiki/Ranking + + Examples + -------- + >>> from scipy.stats import rankdata + >>> rankdata([0, 2, 3, 2]) + array([ 1. , 2.5, 4. , 2.5]) + >>> rankdata([0, 2, 3, 2], method='min') + array([ 1, 2, 4, 2]) + >>> rankdata([0, 2, 3, 2], method='max') + array([ 1, 3, 4, 3]) + >>> rankdata([0, 2, 3, 2], method='dense') + array([ 1, 2, 3, 2]) + >>> rankdata([0, 2, 3, 2], method='ordinal') + array([ 1, 2, 4, 3]) + """ + if method not in ('average', 'min', 'max', 'dense', 'ordinal'): + raise ValueError('unknown method "{0}"'.format(method)) + + arr = np.ravel(np.asarray(a)) + algo = 'mergesort' if method == 'ordinal' else 'quicksort' + sorter = np.argsort(arr, kind=algo) + + inv = np.empty(sorter.size, dtype=np.intp) + inv[sorter] = np.arange(sorter.size, dtype=np.intp) + + if method == 'ordinal': + return inv + 1 + + arr = arr[sorter] + obs = np.r_[True, arr[1:] != arr[:-1]] + dense = obs.cumsum()[inv] + + if method == 'dense': + return dense + + # cumulative counts of each unique value + count = np.r_[np.nonzero(obs)[0], len(obs)] + + if method == 'max': + return count[dense] + + if method == 'min': + return count[dense - 1] + 1 + + # average method + return .5 * (count[dense] + count[dense - 1] + 1) diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/stats.pyc b/project/venv/lib/python2.7/site-packages/scipy/stats/stats.pyc new file mode 100644 index 0000000..bd01727 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/stats/stats.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/__init__.py b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/__init__.pyc b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/__init__.pyc new file mode 100644 index 0000000..5b48fc3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/common_tests.py b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/common_tests.py new file mode 100644 index 0000000..eb896b4 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/common_tests.py @@ -0,0 +1,306 @@ +from __future__ import division, print_function, absolute_import + +import pickle + +import numpy as np +import numpy.testing as npt +from numpy.testing import assert_allclose, assert_equal +from scipy._lib._numpy_compat import suppress_warnings +from pytest import raises as assert_raises + +import numpy.ma.testutils as ma_npt + +from scipy._lib._util import getargspec_no_self as _getargspec +from scipy import stats + + +def check_named_results(res, attributes, ma=False): + for i, attr in enumerate(attributes): + if ma: + ma_npt.assert_equal(res[i], getattr(res, attr)) + else: + npt.assert_equal(res[i], getattr(res, attr)) + + +def check_normalization(distfn, args, distname): + norm_moment = distfn.moment(0, *args) + npt.assert_allclose(norm_moment, 1.0) + + # this is a temporary plug: either ncf or expect is problematic; + # best be marked as a knownfail, but I've no clue how to do it. + if distname == "ncf": + atol, rtol = 1e-5, 0 + else: + atol, rtol = 1e-7, 1e-7 + + normalization_expect = distfn.expect(lambda x: 1, args=args) + npt.assert_allclose(normalization_expect, 1.0, atol=atol, rtol=rtol, + err_msg=distname, verbose=True) + + normalization_cdf = distfn.cdf(distfn.b, *args) + npt.assert_allclose(normalization_cdf, 1.0) + + +def check_moment(distfn, arg, m, v, msg): + m1 = distfn.moment(1, *arg) + m2 = distfn.moment(2, *arg) + if not np.isinf(m): + npt.assert_almost_equal(m1, m, decimal=10, err_msg=msg + + ' - 1st moment') + else: # or np.isnan(m1), + npt.assert_(np.isinf(m1), + msg + ' - 1st moment -infinite, m1=%s' % str(m1)) + + if not np.isinf(v): + npt.assert_almost_equal(m2 - m1 * m1, v, decimal=10, err_msg=msg + + ' - 2ndt moment') + else: # or np.isnan(m2), + npt.assert_(np.isinf(m2), + msg + ' - 2nd moment -infinite, m2=%s' % str(m2)) + + +def check_mean_expect(distfn, arg, m, msg): + if np.isfinite(m): + m1 = distfn.expect(lambda x: x, arg) + npt.assert_almost_equal(m1, m, decimal=5, err_msg=msg + + ' - 1st moment (expect)') + + +def check_var_expect(distfn, arg, m, v, msg): + if np.isfinite(v): + m2 = distfn.expect(lambda x: x*x, arg) + npt.assert_almost_equal(m2, v + m*m, decimal=5, err_msg=msg + + ' - 2st moment (expect)') + + +def check_skew_expect(distfn, arg, m, v, s, msg): + if np.isfinite(s): + m3e = distfn.expect(lambda x: np.power(x-m, 3), arg) + npt.assert_almost_equal(m3e, s * np.power(v, 1.5), + decimal=5, err_msg=msg + ' - skew') + else: + npt.assert_(np.isnan(s)) + + +def check_kurt_expect(distfn, arg, m, v, k, msg): + if np.isfinite(k): + m4e = distfn.expect(lambda x: np.power(x-m, 4), arg) + npt.assert_allclose(m4e, (k + 3.) * np.power(v, 2), atol=1e-5, rtol=1e-5, + err_msg=msg + ' - kurtosis') + elif not np.isposinf(k): + npt.assert_(np.isnan(k)) + + +def check_entropy(distfn, arg, msg): + ent = distfn.entropy(*arg) + npt.assert_(not np.isnan(ent), msg + 'test Entropy is nan') + + +def check_private_entropy(distfn, args, superclass): + # compare a generic _entropy with the distribution-specific implementation + npt.assert_allclose(distfn._entropy(*args), + superclass._entropy(distfn, *args)) + + +def check_entropy_vect_scale(distfn, arg): + # check 2-d + sc = np.asarray([[1, 2], [3, 4]]) + v_ent = distfn.entropy(*arg, scale=sc) + s_ent = [distfn.entropy(*arg, scale=s) for s in sc.ravel()] + s_ent = np.asarray(s_ent).reshape(v_ent.shape) + assert_allclose(v_ent, s_ent, atol=1e-14) + + # check invalid value, check cast + sc = [1, 2, -3] + v_ent = distfn.entropy(*arg, scale=sc) + s_ent = [distfn.entropy(*arg, scale=s) for s in sc] + s_ent = np.asarray(s_ent).reshape(v_ent.shape) + assert_allclose(v_ent, s_ent, atol=1e-14) + + +def check_edge_support(distfn, args): + # Make sure that x=self.a and self.b are handled correctly. + x = [distfn.a, distfn.b] + if isinstance(distfn, stats.rv_discrete): + x = [distfn.a - 1, distfn.b] + + npt.assert_equal(distfn.cdf(x, *args), [0.0, 1.0]) + npt.assert_equal(distfn.sf(x, *args), [1.0, 0.0]) + + if distfn.name not in ('skellam', 'dlaplace'): + # with a = -inf, log(0) generates warnings + npt.assert_equal(distfn.logcdf(x, *args), [-np.inf, 0.0]) + npt.assert_equal(distfn.logsf(x, *args), [0.0, -np.inf]) + + npt.assert_equal(distfn.ppf([0.0, 1.0], *args), x) + npt.assert_equal(distfn.isf([0.0, 1.0], *args), x[::-1]) + + # out-of-bounds for isf & ppf + npt.assert_(np.isnan(distfn.isf([-1, 2], *args)).all()) + npt.assert_(np.isnan(distfn.ppf([-1, 2], *args)).all()) + + +def check_named_args(distfn, x, shape_args, defaults, meths): + ## Check calling w/ named arguments. + + # check consistency of shapes, numargs and _parse signature + signature = _getargspec(distfn._parse_args) + npt.assert_(signature.varargs is None) + npt.assert_(signature.keywords is None) + npt.assert_(list(signature.defaults) == list(defaults)) + + shape_argnames = signature.args[:-len(defaults)] # a, b, loc=0, scale=1 + if distfn.shapes: + shapes_ = distfn.shapes.replace(',', ' ').split() + else: + shapes_ = '' + npt.assert_(len(shapes_) == distfn.numargs) + npt.assert_(len(shapes_) == len(shape_argnames)) + + # check calling w/ named arguments + shape_args = list(shape_args) + + vals = [meth(x, *shape_args) for meth in meths] + npt.assert_(np.all(np.isfinite(vals))) + + names, a, k = shape_argnames[:], shape_args[:], {} + while names: + k.update({names.pop(): a.pop()}) + v = [meth(x, *a, **k) for meth in meths] + npt.assert_array_equal(vals, v) + if 'n' not in k.keys(): + # `n` is first parameter of moment(), so can't be used as named arg + npt.assert_equal(distfn.moment(1, *a, **k), + distfn.moment(1, *shape_args)) + + # unknown arguments should not go through: + k.update({'kaboom': 42}) + assert_raises(TypeError, distfn.cdf, x, **k) + + +def check_random_state_property(distfn, args): + # check the random_state attribute of a distribution *instance* + + # This test fiddles with distfn.random_state. This breaks other tests, + # hence need to save it and then restore. + rndm = distfn.random_state + + # baseline: this relies on the global state + np.random.seed(1234) + distfn.random_state = None + r0 = distfn.rvs(*args, size=8) + + # use an explicit instance-level random_state + distfn.random_state = 1234 + r1 = distfn.rvs(*args, size=8) + npt.assert_equal(r0, r1) + + distfn.random_state = np.random.RandomState(1234) + r2 = distfn.rvs(*args, size=8) + npt.assert_equal(r0, r2) + + # can override the instance-level random_state for an individual .rvs call + distfn.random_state = 2 + orig_state = distfn.random_state.get_state() + + r3 = distfn.rvs(*args, size=8, random_state=np.random.RandomState(1234)) + npt.assert_equal(r0, r3) + + # ... and that does not alter the instance-level random_state! + npt.assert_equal(distfn.random_state.get_state(), orig_state) + + # finally, restore the random_state + distfn.random_state = rndm + + +def check_meth_dtype(distfn, arg, meths): + q0 = [0.25, 0.5, 0.75] + x0 = distfn.ppf(q0, *arg) + x_cast = [x0.astype(tp) for tp in + (np.int_, np.float16, np.float32, np.float64)] + + for x in x_cast: + # casting may have clipped the values, exclude those + distfn._argcheck(*arg) + x = x[(distfn.a < x) & (x < distfn.b)] + for meth in meths: + val = meth(x, *arg) + npt.assert_(val.dtype == np.float_) + + +def check_ppf_dtype(distfn, arg): + q0 = np.asarray([0.25, 0.5, 0.75]) + q_cast = [q0.astype(tp) for tp in (np.float16, np.float32, np.float64)] + for q in q_cast: + for meth in [distfn.ppf, distfn.isf]: + val = meth(q, *arg) + npt.assert_(val.dtype == np.float_) + + +def check_cmplx_deriv(distfn, arg): + # Distributions allow complex arguments. + def deriv(f, x, *arg): + x = np.asarray(x) + h = 1e-10 + return (f(x + h*1j, *arg)/h).imag + + x0 = distfn.ppf([0.25, 0.51, 0.75], *arg) + x_cast = [x0.astype(tp) for tp in + (np.int_, np.float16, np.float32, np.float64)] + + for x in x_cast: + # casting may have clipped the values, exclude those + distfn._argcheck(*arg) + x = x[(distfn.a < x) & (x < distfn.b)] + + pdf, cdf, sf = distfn.pdf(x, *arg), distfn.cdf(x, *arg), distfn.sf(x, *arg) + assert_allclose(deriv(distfn.cdf, x, *arg), pdf, rtol=1e-5) + assert_allclose(deriv(distfn.logcdf, x, *arg), pdf/cdf, rtol=1e-5) + + assert_allclose(deriv(distfn.sf, x, *arg), -pdf, rtol=1e-5) + assert_allclose(deriv(distfn.logsf, x, *arg), -pdf/sf, rtol=1e-5) + + assert_allclose(deriv(distfn.logpdf, x, *arg), + deriv(distfn.pdf, x, *arg) / distfn.pdf(x, *arg), + rtol=1e-5) + + +def check_pickling(distfn, args): + # check that a distribution instance pickles and unpickles + # pay special attention to the random_state property + + # save the random_state (restore later) + rndm = distfn.random_state + + distfn.random_state = 1234 + distfn.rvs(*args, size=8) + s = pickle.dumps(distfn) + r0 = distfn.rvs(*args, size=8) + + unpickled = pickle.loads(s) + r1 = unpickled.rvs(*args, size=8) + npt.assert_equal(r0, r1) + + # also smoke test some methods + medians = [distfn.ppf(0.5, *args), unpickled.ppf(0.5, *args)] + npt.assert_equal(medians[0], medians[1]) + npt.assert_equal(distfn.cdf(medians[0], *args), + unpickled.cdf(medians[1], *args)) + + # restore the random_state + distfn.random_state = rndm + + +def check_rvs_broadcast(distfunc, distname, allargs, shape, shape_only, otype): + np.random.seed(123) + with suppress_warnings() as sup: + # frechet_l and frechet_r are deprecated, so all their + # methods generate DeprecationWarnings. + sup.filter(category=DeprecationWarning, message=".*frechet_") + sample = distfunc.rvs(*allargs) + assert_equal(sample.shape, shape, "%s: rvs failed to broadcast" % distname) + if not shape_only: + rvs = np.vectorize(lambda *allargs: distfunc.rvs(*allargs), otypes=otype) + np.random.seed(123) + expected = rvs(*allargs) + assert_allclose(sample, expected, rtol=1e-15) diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/common_tests.pyc b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/common_tests.pyc new file mode 100644 index 0000000..43ea5f0 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/common_tests.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/data/nist_anova/AtmWtAg.dat b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/data/nist_anova/AtmWtAg.dat new file mode 100644 index 0000000..3053756 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/data/nist_anova/AtmWtAg.dat @@ -0,0 +1,108 @@ +NIST/ITL StRD +Dataset Name: AtmWtAg (AtmWtAg.dat) + + +File Format: ASCII + Certified Values (lines 41 to 47) + Data (lines 61 to 108) + + +Procedure: Analysis of Variance + + +Reference: Powell, L.J., Murphy, T.J. and Gramlich, J.W. (1982). + "The Absolute Isotopic Abundance & Atomic Weight + of a Reference Sample of Silver". + NBS Journal of Research, 87, pp. 9-19. + + +Data: 1 Factor + 2 Treatments + 24 Replicates/Cell + 48 Observations + 7 Constant Leading Digits + Average Level of Difficulty + Observed Data + + +Model: 3 Parameters (mu, tau_1, tau_2) + y_{ij} = mu + tau_i + epsilon_{ij} + + + + + + +Certified Values: + +Source of Sums of Mean +Variation df Squares Squares F Statistic + + +Between Instrument 1 3.63834187500000E-09 3.63834187500000E-09 1.59467335677930E+01 +Within Instrument 46 1.04951729166667E-08 2.28155932971014E-10 + + Certified R-Squared 2.57426544538321E-01 + + Certified Residual + Standard Deviation 1.51048314446410E-05 + + + + + + + + + + + +Data: Instrument AgWt + 1 107.8681568 + 1 107.8681465 + 1 107.8681572 + 1 107.8681785 + 1 107.8681446 + 1 107.8681903 + 1 107.8681526 + 1 107.8681494 + 1 107.8681616 + 1 107.8681587 + 1 107.8681519 + 1 107.8681486 + 1 107.8681419 + 1 107.8681569 + 1 107.8681508 + 1 107.8681672 + 1 107.8681385 + 1 107.8681518 + 1 107.8681662 + 1 107.8681424 + 1 107.8681360 + 1 107.8681333 + 1 107.8681610 + 1 107.8681477 + 2 107.8681079 + 2 107.8681344 + 2 107.8681513 + 2 107.8681197 + 2 107.8681604 + 2 107.8681385 + 2 107.8681642 + 2 107.8681365 + 2 107.8681151 + 2 107.8681082 + 2 107.8681517 + 2 107.8681448 + 2 107.8681198 + 2 107.8681482 + 2 107.8681334 + 2 107.8681609 + 2 107.8681101 + 2 107.8681512 + 2 107.8681469 + 2 107.8681360 + 2 107.8681254 + 2 107.8681261 + 2 107.8681450 + 2 107.8681368 diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/data/nist_anova/SiRstv.dat b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/data/nist_anova/SiRstv.dat new file mode 100644 index 0000000..18ea897 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/data/nist_anova/SiRstv.dat @@ -0,0 +1,85 @@ +NIST/ITL StRD +Dataset Name: SiRstv (SiRstv.dat) + + +File Format: ASCII + Certified Values (lines 41 to 47) + Data (lines 61 to 85) + + +Procedure: Analysis of Variance + + +Reference: Ehrstein, James and Croarkin, M. Carroll. + Unpublished NIST dataset. + + +Data: 1 Factor + 5 Treatments + 5 Replicates/Cell + 25 Observations + 3 Constant Leading Digits + Lower Level of Difficulty + Observed Data + + +Model: 6 Parameters (mu,tau_1, ... , tau_5) + y_{ij} = mu + tau_i + epsilon_{ij} + + + + + + + + +Certified Values: + +Source of Sums of Mean +Variation df Squares Squares F Statistic + +Between Instrument 4 5.11462616000000E-02 1.27865654000000E-02 1.18046237440255E+00 +Within Instrument 20 2.16636560000000E-01 1.08318280000000E-02 + + Certified R-Squared 1.90999039051129E-01 + + Certified Residual + Standard Deviation 1.04076068334656E-01 + + + + + + + + + + + + +Data: Instrument Resistance + 1 196.3052 + 1 196.1240 + 1 196.1890 + 1 196.2569 + 1 196.3403 + 2 196.3042 + 2 196.3825 + 2 196.1669 + 2 196.3257 + 2 196.0422 + 3 196.1303 + 3 196.2005 + 3 196.2889 + 3 196.0343 + 3 196.1811 + 4 196.2795 + 4 196.1748 + 4 196.1494 + 4 196.1485 + 4 195.9885 + 5 196.2119 + 5 196.1051 + 5 196.1850 + 5 196.0052 + 5 196.2090 diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/data/nist_anova/SmLs01.dat b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/data/nist_anova/SmLs01.dat new file mode 100644 index 0000000..945b24b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/data/nist_anova/SmLs01.dat @@ -0,0 +1,249 @@ +NIST/ITL StRD +Dataset Name: SmLs01 (SmLs01.dat) + + +File Format: ASCII + Certified Values (lines 41 to 47) + Data (lines 61 to 249) + + +Procedure: Analysis of Variance + + +Reference: Simon, Stephen D. and Lesage, James P. (1989). + "Assessing the Accuracy of ANOVA Calculations in + Statistical Software". + Computational Statistics & Data Analysis, 8, pp. 325-332. + + +Data: 1 Factor + 9 Treatments + 21 Replicates/Cell + 189 Observations + 1 Constant Leading Digit + Lower Level of Difficulty + Generated Data + + +Model: 10 Parameters (mu,tau_1, ... , tau_9) + y_{ij} = mu + tau_i + epsilon_{ij} + + + + + + +Certified Values: + +Source of Sums of Mean +Variation df Squares Squares F Statistic + +Between Treatment 8 1.68000000000000E+00 2.10000000000000E-01 2.10000000000000E+01 +Within Treatment 180 1.80000000000000E+00 1.00000000000000E-02 + + Certified R-Squared 4.82758620689655E-01 + + Certified Residual + Standard Deviation 1.00000000000000E-01 + + + + + + + + + + + + +Data: Treatment Response + 1 1.4 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 2 1.3 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 3 1.5 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 4 1.3 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 5 1.5 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 6 1.3 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 7 1.5 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 8 1.3 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 9 1.5 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/data/nist_anova/SmLs02.dat b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/data/nist_anova/SmLs02.dat new file mode 100644 index 0000000..ee76633 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/data/nist_anova/SmLs02.dat @@ -0,0 +1,1869 @@ +NIST/ITL StRD +Dataset Name: SmLs02 (SmLs02.dat) + + +File Format: ASCII + Certified Values (lines 41 to 47) + Data (lines 61 to 1869) + + +Procedure: Analysis of Variance + + +Reference: Simon, Stephen D. and Lesage, James P. (1989). + "Assessing the Accuracy of ANOVA Calculations in + Statistical Software". + Computational Statistics & Data Analysis, 8, pp. 325-332. + + +Data: 1 Factor + 9 Treatments + 201 Replicates/Cell + 1809 Observations + 1 Constant Leading Digit + Lower Level of Difficulty + Generated Data + + +Model: 10 Parameters (mu,tau_1, ... , tau_9) + y_{ij} = mu + tau_i + epsilon_{ij} + + + + + + +Certified Values: + +Source of Sums of Mean +Variation df Squares Squares F Statistic + +Between Treatment 8 1.60800000000000E+01 2.01000000000000E+00 2.01000000000000E+02 +Within Treatment 1800 1.80000000000000E+01 1.00000000000000E-02 + + Certified R-Squared 4.71830985915493E-01 + + Certified Residual + Standard Deviation 1.00000000000000E-01 + + + + + + + + + + + + +Data: Treatment Response + 1 1.4 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 2 1.3 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 3 1.5 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 4 1.3 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 5 1.5 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 6 1.3 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 7 1.5 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 8 1.3 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 9 1.5 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/data/nist_anova/SmLs03.dat b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/data/nist_anova/SmLs03.dat new file mode 100644 index 0000000..55dfa23 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/data/nist_anova/SmLs03.dat @@ -0,0 +1,18069 @@ +NIST/ITL StRD +Dataset Name: SmLs03 (SmLs03.dat) + + +File Format: ASCII + Certified Values (lines 41 to 47) + Data (lines 61 to 18069) + + +Procedure: Analysis of Variance + + +Reference: Simon, Stephen D. and Lesage, James P. (1989). + "Assessing the Accuracy of ANOVA Calculations in + Statistical Software". + Computational Statistics & Data Analysis, 8, pp. 325-332. + + +Data: 1 Factor + 9 Treatments + 2001 Replicates/Cell + 18009 Observations + 1 Constant Leading Digit + Lower Level of Difficulty + Generated Data + + +Model: 10 Parameters (mu,tau_1, ... , tau_9) + y_{ij} = mu + tau_i + epsilon_{ij} + + + + + + +Certified Values: + +Source of Sums of Mean +Variation df Squares Squares F Statistic + +Between Treatment 8 1.60080000000000E+02 2.00100000000000E+01 2.00100000000000E+03 +Within Treatment 18000 1.80000000000000E+02 1.00000000000000E-02 + + Certified R-Squared 4.70712773465067E-01 + + Certified Residual + Standard Deviation 1.00000000000000E-01 + + + + + + + + + + + + +Data: Treatment Response + 1 1.4 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 2 1.3 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 3 1.5 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 4 1.3 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 5 1.5 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 6 1.3 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 7 1.5 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 8 1.3 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 9 1.5 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/data/nist_anova/SmLs04.dat b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/data/nist_anova/SmLs04.dat new file mode 100644 index 0000000..6a2a9fc --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/data/nist_anova/SmLs04.dat @@ -0,0 +1,249 @@ +NIST/ITL StRD +Dataset Name: SmLs04 (SmLs04.dat) + + +File Format: ASCII + Certified Values (lines 41 to 47) + Data (lines 61 to 249) + + +Procedure: Analysis of Variance + + +Reference: Simon, Stephen D. and Lesage, James P. (1989). + "Assessing the Accuracy of ANOVA Calculations in + Statistical Software". + Computational Statistics & Data Analysis, 8, pp. 325-332. + + +Data: 1 Factor + 9 Treatments + 21 Replicates/Cell + 189 Observations + 7 Constant Leading Digits + Average Level of Difficulty + Generated Data + + +Model: 10 Parameters (mu,tau_1, ... , tau_9) + y_{ij} = mu + tau_i + epsilon_{ij} + + + + + + +Certified Values: + +Source of Sums of Mean +Variation df Squares Squares F Statistic + +Between Treatment 8 1.68000000000000E+00 2.10000000000000E-01 2.10000000000000E+01 +Within Treatment 180 1.80000000000000E+00 1.00000000000000E-02 + + Certified R-Squared 4.82758620689655E-01 + + Certified Residual + Standard Deviation 1.00000000000000E-01 + + + + + + + + + + + + +Data: Treatment Response + 1 1000000.4 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 2 1000000.3 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 3 1000000.5 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 4 1000000.3 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 5 1000000.5 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 6 1000000.3 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 7 1000000.5 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 8 1000000.3 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 9 1000000.5 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/data/nist_anova/SmLs05.dat b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/data/nist_anova/SmLs05.dat new file mode 100644 index 0000000..fe11c40 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/data/nist_anova/SmLs05.dat @@ -0,0 +1,1869 @@ +NIST/ITL StRD +Dataset Name: SmLs05 (SmLs05.dat) + + +File Format: ASCII + Certified Values (lines 41 to 47) + Data (lines 61 to 1869) + + +Procedure: Analysis of Variance + + +Reference: Simon, Stephen D. and Lesage, James P. (1989). + "Assessing the Accuracy of ANOVA Calculations in + Statistical Software". + Computational Statistics & Data Analysis, 8, pp. 325-332. + + +Data: 1 Factor + 9 Treatments + 201 Replicates/Cell + 1809 Observations + 7 Constant Leading Digits + Average Level of Difficulty + Generated Data + + +Model: 10 Parameters (mu,tau_1, ... , tau_9) + y_{ij} = mu + tau_i + epsilon_{ij} + + + + + + +Certified Values: + +Source of Sums of Mean +Variation df Squares Squares F Statistic + +Between Treatment 8 1.60800000000000E+01 2.01000000000000E+00 2.01000000000000E+02 +Within Treatment 1800 1.80000000000000E+01 1.00000000000000E-02 + + Certified R-Squared 4.71830985915493E-01 + + Certified Residual + Standard Deviation 1.00000000000000E-01 + + + + + + + + + + + + +Data: Treatment Response + 1 1000000.4 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 2 1000000.3 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 3 1000000.5 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 4 1000000.3 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 5 1000000.5 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 6 1000000.3 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 7 1000000.5 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 8 1000000.3 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 9 1000000.5 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/data/nist_anova/SmLs06.dat b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/data/nist_anova/SmLs06.dat new file mode 100644 index 0000000..602e4fb --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/data/nist_anova/SmLs06.dat @@ -0,0 +1,18069 @@ +NIST/ITL StRD +Dataset Name: SmLs06 (SmLs06.dat) + + +File Format: ASCII + Certified Values (lines 41 to 47) + Data (lines 61 to 18069) + + +Procedure: Analysis of Variance + + +Reference: Simon, Stephen D. and Lesage, James P. (1989). + "Assessing the Accuracy of ANOVA Calculations in + Statistical Software". + Computational Statistics & Data Analysis, 8, pp. 325-332. + + +Data: 1 Factor + 9 Treatments + 2001 Replicates/Cell + 18009 Observations + 7 Constant Leading Digits + Average Level of Difficulty + Generated Data + + +Model: 10 Parameters (mu,tau_1, ... , tau_9) + y_{ij} = mu + tau_i + epsilon_{ij} + + + + + + +Certified Values: + +Source of Sums of Mean +Variation df Squares Squares F Statistic + +Between Treatment 8 1.60080000000000E+02 2.00100000000000E+01 2.00100000000000E+03 +Within Treatment 18000 1.80000000000000E+02 1.00000000000000E-02 + + Certified R-Squared 4.70712773465067E-01 + + Certified Residual + Standard Deviation 1.00000000000000E-01 + + + + + + + + + + + + +Data: Treatment Response + 1 1000000.4 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 2 1000000.3 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 3 1000000.5 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 4 1000000.3 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 5 1000000.5 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 6 1000000.3 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 7 1000000.5 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 8 1000000.3 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 9 1000000.5 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/data/nist_anova/SmLs07.dat b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/data/nist_anova/SmLs07.dat new file mode 100644 index 0000000..deeac95 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/data/nist_anova/SmLs07.dat @@ -0,0 +1,249 @@ +NIST/ITL StRD +Dataset Name: SmLs07 (SmLs07.dat) + + +File Format: ASCII + Certified Values (lines 41 to 47) + Data (lines 61 to 249) + + +Procedure: Analysis of Variance + + +Reference: Simon, Stephen D. and Lesage, James P. (1989). + "Assessing the Accuracy of ANOVA Calculations in + Statistical Software". + Computational Statistics & Data Analysis, 8, pp. 325-332. + + +Data: 1 Factor + 9 Treatments + 21 Replicates/Cell + 189 Observations + 13 Constant Leading Digits + Higher Level of Difficulty + Generated Data + + +Model: 10 Parameters (mu,tau_1, ... , tau_9) + y_{ij} = mu + tau_i + epsilon_{ij} + + + + + + +Certified Values: + +Source of Sums of Mean +Variation df Squares Squares F Statistic + +Between Treatment 8 1.68000000000000E+00 2.10000000000000E-01 2.10000000000000E+01 +Within Treatment 180 1.80000000000000E+00 1.00000000000000E-02 + + Certified R-Squared 4.82758620689655E-01 + + Certified Residual + Standard Deviation 1.00000000000000E-01 + + + + + + + + + + + + +Data: Treatment Response + 1 1000000000000.4 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 2 1000000000000.3 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 3 1000000000000.5 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 4 1000000000000.3 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 5 1000000000000.5 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 6 1000000000000.3 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 7 1000000000000.5 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 8 1000000000000.3 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 9 1000000000000.5 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/data/nist_anova/SmLs08.dat b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/data/nist_anova/SmLs08.dat new file mode 100644 index 0000000..c5ee643 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/data/nist_anova/SmLs08.dat @@ -0,0 +1,1869 @@ +NIST/ITL StRD +Dataset Name: SmLs08 (SmLs08.dat) + + +File Format: ASCII + Certified Values (lines 41 to 47) + Data (lines 61 to 1869) + + +Procedure: Analysis of Variance + + +Reference: Simon, Stephen D. and Lesage, James P. (1989). + "Assessing the Accuracy of ANOVA Calculations in + Statistical Software". + Computational Statistics & Data Analysis, 8, pp. 325-332. + + +Data: 1 Factor + 9 Treatments + 201 Replicates/Cell + 1809 Observations + 13 Constant Leading Digits + Higher Level of Difficulty + Generated Data + + +Model: 10 Parameters (mu,tau_1, ... , tau_9) + y_{ij} = mu + tau_i + epsilon_{ij} + + + + + + +Certified Values: + +Source of Sums of Mean +Variation df Squares Squares F Statistic + +Between Treatment 8 1.60800000000000E+01 2.01000000000000E+00 2.01000000000000E+02 +Within Treatment 1800 1.80000000000000E+01 1.00000000000000E-02 + + Certified R-Squared 4.71830985915493E-01 + + Certified Residual + Standard Deviation 1.00000000000000E-01 + + + + + + + + + + + + +Data: Treatment Response + 1 1000000000000.4 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 2 1000000000000.3 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 3 1000000000000.5 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 4 1000000000000.3 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 5 1000000000000.5 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 6 1000000000000.3 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 7 1000000000000.5 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 8 1000000000000.3 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 9 1000000000000.5 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/data/nist_anova/SmLs09.dat b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/data/nist_anova/SmLs09.dat new file mode 100644 index 0000000..887905e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/data/nist_anova/SmLs09.dat @@ -0,0 +1,18069 @@ +NIST/ITL StRD +Dataset Name: SmLs09 (SmLs09.dat) + + +File Format: ASCII + Certified Values (lines 41 to 47) + Data (lines 61 to 18069) + + +Procedure: Analysis of Variance + + +Reference: Simon, Stephen D. and Lesage, James P. (1989). + "Assessing the Accuracy of ANOVA Calculations in + Statistical Software". + Computational Statistics & Data Analysis, 8, pp. 325-332. + + +Data: 1 Factor + 9 Treatments + 2001 Replicates/Cell + 18009 Observations + 13 Constant Leading Digits + Higher Level of Difficulty + Generated Data + + +Model: 10 Parameters (mu,tau_1, ... , tau_9) + y_{ij} = mu + tau_i + epsilon_{ij} + + + + + + +Certified Values: + +Source of Sums of Mean +Variation df Squares Squares F Statistic + +Between Treatment 8 1.60080000000000E+02 2.00100000000000E+01 2.00100000000000E+03 +Within Treatment 18000 1.80000000000000E+02 1.00000000000000E-02 + + Certified R-Squared 4.70712773465067E-01 + + Certified Residual + Standard Deviation 1.00000000000000E-01 + + + + + + + + + + + + +Data: Treatment Response + 1 1000000000000.4 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 2 1000000000000.3 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 3 1000000000000.5 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 4 1000000000000.3 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 5 1000000000000.5 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 6 1000000000000.3 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 7 1000000000000.5 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 8 1000000000000.3 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 9 1000000000000.5 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/data/nist_linregress/Norris.dat b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/data/nist_linregress/Norris.dat new file mode 100644 index 0000000..4bf8ed9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/data/nist_linregress/Norris.dat @@ -0,0 +1,97 @@ +NIST/ITL StRD +Dataset Name: Norris (Norris.dat) + +File Format: ASCII + Certified Values (lines 31 to 46) + Data (lines 61 to 96) + +Procedure: Linear Least Squares Regression + +Reference: Norris, J., NIST. + Calibration of Ozone Monitors. + +Data: 1 Response Variable (y) + 1 Predictor Variable (x) + 36 Observations + Lower Level of Difficulty + Observed Data + +Model: Linear Class + 2 Parameters (B0,B1) + + y = B0 + B1*x + e + + + + Certified Regression Statistics + + Standard Deviation + Parameter Estimate of Estimate + + B0 -0.262323073774029 0.232818234301152 + B1 1.00211681802045 0.429796848199937E-03 + + Residual + Standard Deviation 0.884796396144373 + + R-Squared 0.999993745883712 + + + Certified Analysis of Variance Table + +Source of Degrees of Sums of Mean +Variation Freedom Squares Squares F Statistic + +Regression 1 4255954.13232369 4255954.13232369 5436385.54079785 +Residual 34 26.6173985294224 0.782864662630069 + + + + + + + + + + + + + +Data: y x + 0.1 0.2 + 338.8 337.4 + 118.1 118.2 + 888.0 884.6 + 9.2 10.1 + 228.1 226.5 + 668.5 666.3 + 998.5 996.3 + 449.1 448.6 + 778.9 777.0 + 559.2 558.2 + 0.3 0.4 + 0.1 0.6 + 778.1 775.5 + 668.8 666.9 + 339.3 338.0 + 448.9 447.5 + 10.8 11.6 + 557.7 556.0 + 228.3 228.1 + 998.0 995.8 + 888.8 887.6 + 119.6 120.2 + 0.3 0.3 + 0.6 0.3 + 557.6 556.8 + 339.3 339.1 + 888.0 887.2 + 998.5 999.0 + 778.9 779.0 + 10.2 11.1 + 117.6 118.3 + 228.9 229.2 + 668.4 669.1 + 449.2 448.9 + 0.2 0.5 + diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/data/stable-cdf-sample-data.npy b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/data/stable-cdf-sample-data.npy new file mode 100644 index 0000000..b464900 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/data/stable-cdf-sample-data.npy differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/data/stable-pdf-sample-data.npy b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/data/stable-pdf-sample-data.npy new file mode 100644 index 0000000..0cc627c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/data/stable-pdf-sample-data.npy differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_binned_statistic.py b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_binned_statistic.py new file mode 100644 index 0000000..2b42a37 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_binned_statistic.py @@ -0,0 +1,437 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import assert_allclose +from scipy.stats import (binned_statistic, binned_statistic_2d, + binned_statistic_dd) + +from scipy._lib.six import u +from .common_tests import check_named_results + + +class TestBinnedStatistic(object): + + @classmethod + def setup_class(cls): + np.random.seed(9865) + cls.x = np.random.random(100) + cls.y = np.random.random(100) + cls.v = np.random.random(100) + cls.X = np.random.random((100, 3)) + cls.w = np.random.random(100) + + def test_1d_count(self): + x = self.x + v = self.v + + count1, edges1, bc = binned_statistic(x, v, 'count', bins=10) + count2, edges2 = np.histogram(x, bins=10) + + assert_allclose(count1, count2) + assert_allclose(edges1, edges2) + + def test_gh5927(self): + # smoke test for gh5927 - binned_statistic was using `is` for string + # comparison + x = self.x + v = self.v + statistics = [u'mean', u'median', u'count', u'sum'] + for statistic in statistics: + res = binned_statistic(x, v, statistic, bins=10) + + def test_1d_result_attributes(self): + x = self.x + v = self.v + + res = binned_statistic(x, v, 'count', bins=10) + attributes = ('statistic', 'bin_edges', 'binnumber') + check_named_results(res, attributes) + + def test_1d_sum(self): + x = self.x + v = self.v + + sum1, edges1, bc = binned_statistic(x, v, 'sum', bins=10) + sum2, edges2 = np.histogram(x, bins=10, weights=v) + + assert_allclose(sum1, sum2) + assert_allclose(edges1, edges2) + + def test_1d_mean(self): + x = self.x + v = self.v + + stat1, edges1, bc = binned_statistic(x, v, 'mean', bins=10) + stat2, edges2, bc = binned_statistic(x, v, np.mean, bins=10) + + assert_allclose(stat1, stat2) + assert_allclose(edges1, edges2) + + def test_1d_std(self): + x = self.x + v = self.v + + stat1, edges1, bc = binned_statistic(x, v, 'std', bins=10) + stat2, edges2, bc = binned_statistic(x, v, np.std, bins=10) + + assert_allclose(stat1, stat2) + assert_allclose(edges1, edges2) + + def test_1d_min(self): + x = self.x + v = self.v + + stat1, edges1, bc = binned_statistic(x, v, 'min', bins=10) + stat2, edges2, bc = binned_statistic(x, v, np.min, bins=10) + + assert_allclose(stat1, stat2) + assert_allclose(edges1, edges2) + + def test_1d_max(self): + x = self.x + v = self.v + + stat1, edges1, bc = binned_statistic(x, v, 'max', bins=10) + stat2, edges2, bc = binned_statistic(x, v, np.max, bins=10) + + assert_allclose(stat1, stat2) + assert_allclose(edges1, edges2) + + def test_1d_median(self): + x = self.x + v = self.v + + stat1, edges1, bc = binned_statistic(x, v, 'median', bins=10) + stat2, edges2, bc = binned_statistic(x, v, np.median, bins=10) + + assert_allclose(stat1, stat2) + assert_allclose(edges1, edges2) + + def test_1d_bincode(self): + x = self.x[:20] + v = self.v[:20] + + count1, edges1, bc = binned_statistic(x, v, 'count', bins=3) + bc2 = np.array([3, 2, 1, 3, 2, 3, 3, 3, 3, 1, 1, 3, 3, 1, 2, 3, 1, + 1, 2, 1]) + + bcount = [(bc == i).sum() for i in np.unique(bc)] + + assert_allclose(bc, bc2) + assert_allclose(bcount, count1) + + def test_1d_range_keyword(self): + # Regression test for gh-3063, range can be (min, max) or [(min, max)] + np.random.seed(9865) + x = np.arange(30) + data = np.random.random(30) + + mean, bins, _ = binned_statistic(x[:15], data[:15]) + mean_range, bins_range, _ = binned_statistic(x, data, range=[(0, 14)]) + mean_range2, bins_range2, _ = binned_statistic(x, data, range=(0, 14)) + + assert_allclose(mean, mean_range) + assert_allclose(bins, bins_range) + assert_allclose(mean, mean_range2) + assert_allclose(bins, bins_range2) + + def test_1d_multi_values(self): + x = self.x + v = self.v + w = self.w + + stat1v, edges1v, bc1v = binned_statistic(x, v, 'mean', bins=10) + stat1w, edges1w, bc1w = binned_statistic(x, w, 'mean', bins=10) + stat2, edges2, bc2 = binned_statistic(x, [v, w], 'mean', bins=10) + + assert_allclose(stat2[0], stat1v) + assert_allclose(stat2[1], stat1w) + assert_allclose(edges1v, edges2) + assert_allclose(bc1v, bc2) + + def test_2d_count(self): + x = self.x + y = self.y + v = self.v + + count1, binx1, biny1, bc = binned_statistic_2d( + x, y, v, 'count', bins=5) + count2, binx2, biny2 = np.histogram2d(x, y, bins=5) + + assert_allclose(count1, count2) + assert_allclose(binx1, binx2) + assert_allclose(biny1, biny2) + + def test_2d_result_attributes(self): + x = self.x + y = self.y + v = self.v + + res = binned_statistic_2d(x, y, v, 'count', bins=5) + attributes = ('statistic', 'x_edge', 'y_edge', 'binnumber') + check_named_results(res, attributes) + + def test_2d_sum(self): + x = self.x + y = self.y + v = self.v + + sum1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'sum', bins=5) + sum2, binx2, biny2 = np.histogram2d(x, y, bins=5, weights=v) + + assert_allclose(sum1, sum2) + assert_allclose(binx1, binx2) + assert_allclose(biny1, biny2) + + def test_2d_mean(self): + x = self.x + y = self.y + v = self.v + + stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'mean', bins=5) + stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.mean, bins=5) + + assert_allclose(stat1, stat2) + assert_allclose(binx1, binx2) + assert_allclose(biny1, biny2) + + def test_2d_mean_unicode(self): + x = self.x + y = self.y + v = self.v + stat1, binx1, biny1, bc = binned_statistic_2d( + x, y, v, u('mean'), bins=5) + stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.mean, bins=5) + assert_allclose(stat1, stat2) + assert_allclose(binx1, binx2) + assert_allclose(biny1, biny2) + + def test_2d_std(self): + x = self.x + y = self.y + v = self.v + + stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'std', bins=5) + stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.std, bins=5) + + assert_allclose(stat1, stat2) + assert_allclose(binx1, binx2) + assert_allclose(biny1, biny2) + + def test_2d_min(self): + x = self.x + y = self.y + v = self.v + + stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'min', bins=5) + stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.min, bins=5) + + assert_allclose(stat1, stat2) + assert_allclose(binx1, binx2) + assert_allclose(biny1, biny2) + + def test_2d_max(self): + x = self.x + y = self.y + v = self.v + + stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'max', bins=5) + stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.max, bins=5) + + assert_allclose(stat1, stat2) + assert_allclose(binx1, binx2) + assert_allclose(biny1, biny2) + + def test_2d_median(self): + x = self.x + y = self.y + v = self.v + + stat1, binx1, biny1, bc = binned_statistic_2d( + x, y, v, 'median', bins=5) + stat2, binx2, biny2, bc = binned_statistic_2d( + x, y, v, np.median, bins=5) + + assert_allclose(stat1, stat2) + assert_allclose(binx1, binx2) + assert_allclose(biny1, biny2) + + def test_2d_bincode(self): + x = self.x[:20] + y = self.y[:20] + v = self.v[:20] + + count1, binx1, biny1, bc = binned_statistic_2d( + x, y, v, 'count', bins=3) + bc2 = np.array([17, 11, 6, 16, 11, 17, 18, 17, 17, 7, 6, 18, 16, + 6, 11, 16, 6, 6, 11, 8]) + + bcount = [(bc == i).sum() for i in np.unique(bc)] + + assert_allclose(bc, bc2) + count1adj = count1[count1.nonzero()] + assert_allclose(bcount, count1adj) + + def test_2d_multi_values(self): + x = self.x + y = self.y + v = self.v + w = self.w + + stat1v, binx1v, biny1v, bc1v = binned_statistic_2d( + x, y, v, 'mean', bins=8) + stat1w, binx1w, biny1w, bc1w = binned_statistic_2d( + x, y, w, 'mean', bins=8) + stat2, binx2, biny2, bc2 = binned_statistic_2d( + x, y, [v, w], 'mean', bins=8) + + assert_allclose(stat2[0], stat1v) + assert_allclose(stat2[1], stat1w) + assert_allclose(binx1v, binx2) + assert_allclose(biny1w, biny2) + assert_allclose(bc1v, bc2) + + def test_2d_binnumbers_unraveled(self): + x = self.x + y = self.y + v = self.v + + stat, edgesx, bcx = binned_statistic(x, v, 'mean', bins=20) + stat, edgesy, bcy = binned_statistic(y, v, 'mean', bins=10) + + stat2, edgesx2, edgesy2, bc2 = binned_statistic_2d( + x, y, v, 'mean', bins=(20, 10), expand_binnumbers=True) + + bcx3 = np.searchsorted(edgesx, x, side='right') + bcy3 = np.searchsorted(edgesy, y, side='right') + + # `numpy.searchsorted` is non-inclusive on right-edge, compensate + bcx3[x == x.max()] -= 1 + bcy3[y == y.max()] -= 1 + + assert_allclose(bcx, bc2[0]) + assert_allclose(bcy, bc2[1]) + assert_allclose(bcx3, bc2[0]) + assert_allclose(bcy3, bc2[1]) + + def test_dd_count(self): + X = self.X + v = self.v + + count1, edges1, bc = binned_statistic_dd(X, v, 'count', bins=3) + count2, edges2 = np.histogramdd(X, bins=3) + + assert_allclose(count1, count2) + assert_allclose(edges1, edges2) + + def test_dd_result_attributes(self): + X = self.X + v = self.v + + res = binned_statistic_dd(X, v, 'count', bins=3) + attributes = ('statistic', 'bin_edges', 'binnumber') + check_named_results(res, attributes) + + def test_dd_sum(self): + X = self.X + v = self.v + + sum1, edges1, bc = binned_statistic_dd(X, v, 'sum', bins=3) + sum2, edges2 = np.histogramdd(X, bins=3, weights=v) + + assert_allclose(sum1, sum2) + assert_allclose(edges1, edges2) + + def test_dd_mean(self): + X = self.X + v = self.v + + stat1, edges1, bc = binned_statistic_dd(X, v, 'mean', bins=3) + stat2, edges2, bc = binned_statistic_dd(X, v, np.mean, bins=3) + + assert_allclose(stat1, stat2) + assert_allclose(edges1, edges2) + + def test_dd_std(self): + X = self.X + v = self.v + + stat1, edges1, bc = binned_statistic_dd(X, v, 'std', bins=3) + stat2, edges2, bc = binned_statistic_dd(X, v, np.std, bins=3) + + assert_allclose(stat1, stat2) + assert_allclose(edges1, edges2) + + def test_dd_min(self): + X = self.X + v = self.v + + stat1, edges1, bc = binned_statistic_dd(X, v, 'min', bins=3) + stat2, edges2, bc = binned_statistic_dd(X, v, np.min, bins=3) + + assert_allclose(stat1, stat2) + assert_allclose(edges1, edges2) + + def test_dd_max(self): + X = self.X + v = self.v + + stat1, edges1, bc = binned_statistic_dd(X, v, 'max', bins=3) + stat2, edges2, bc = binned_statistic_dd(X, v, np.max, bins=3) + + assert_allclose(stat1, stat2) + assert_allclose(edges1, edges2) + + def test_dd_median(self): + X = self.X + v = self.v + + stat1, edges1, bc = binned_statistic_dd(X, v, 'median', bins=3) + stat2, edges2, bc = binned_statistic_dd(X, v, np.median, bins=3) + + assert_allclose(stat1, stat2) + assert_allclose(edges1, edges2) + + def test_dd_bincode(self): + X = self.X[:20] + v = self.v[:20] + + count1, edges1, bc = binned_statistic_dd(X, v, 'count', bins=3) + bc2 = np.array([63, 33, 86, 83, 88, 67, 57, 33, 42, 41, 82, 83, 92, + 32, 36, 91, 43, 87, 81, 81]) + + bcount = [(bc == i).sum() for i in np.unique(bc)] + + assert_allclose(bc, bc2) + count1adj = count1[count1.nonzero()] + assert_allclose(bcount, count1adj) + + def test_dd_multi_values(self): + X = self.X + v = self.v + w = self.w + + stat1v, edges1v, bc1v = binned_statistic_dd(X, v, np.std, bins=8) + stat1w, edges1w, bc1w = binned_statistic_dd(X, w, np.std, bins=8) + stat2, edges2, bc2 = binned_statistic_dd(X, [v, w], np.std, bins=8) + + assert_allclose(stat2[0], stat1v) + assert_allclose(stat2[1], stat1w) + assert_allclose(edges1v, edges2) + assert_allclose(edges1w, edges2) + assert_allclose(bc1v, bc2) + + def test_dd_binnumbers_unraveled(self): + X = self.X + v = self.v + + stat, edgesx, bcx = binned_statistic(X[:, 0], v, 'mean', bins=15) + stat, edgesy, bcy = binned_statistic(X[:, 1], v, 'mean', bins=20) + stat, edgesz, bcz = binned_statistic(X[:, 2], v, 'mean', bins=10) + + stat2, edges2, bc2 = binned_statistic_dd( + X, v, 'mean', bins=(15, 20, 10), expand_binnumbers=True) + + assert_allclose(bcx, bc2[0]) + assert_allclose(bcy, bc2[1]) + assert_allclose(bcz, bc2[2]) diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_binned_statistic.pyc b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_binned_statistic.pyc new file mode 100644 index 0000000..3d07b79 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_binned_statistic.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_contingency.py b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_contingency.py new file mode 100644 index 0000000..8feba32 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_contingency.py @@ -0,0 +1,200 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import (assert_equal, assert_array_equal, + assert_array_almost_equal, assert_approx_equal, assert_allclose) +from pytest import raises as assert_raises + +from scipy.special import xlogy +from scipy.stats.contingency import margins, expected_freq, chi2_contingency + + +def test_margins(): + a = np.array([1]) + m = margins(a) + assert_equal(len(m), 1) + m0 = m[0] + assert_array_equal(m0, np.array([1])) + + a = np.array([[1]]) + m0, m1 = margins(a) + expected0 = np.array([[1]]) + expected1 = np.array([[1]]) + assert_array_equal(m0, expected0) + assert_array_equal(m1, expected1) + + a = np.arange(12).reshape(2, 6) + m0, m1 = margins(a) + expected0 = np.array([[15], [51]]) + expected1 = np.array([[6, 8, 10, 12, 14, 16]]) + assert_array_equal(m0, expected0) + assert_array_equal(m1, expected1) + + a = np.arange(24).reshape(2, 3, 4) + m0, m1, m2 = margins(a) + expected0 = np.array([[[66]], [[210]]]) + expected1 = np.array([[[60], [92], [124]]]) + expected2 = np.array([[[60, 66, 72, 78]]]) + assert_array_equal(m0, expected0) + assert_array_equal(m1, expected1) + assert_array_equal(m2, expected2) + + +def test_expected_freq(): + assert_array_equal(expected_freq([1]), np.array([1.0])) + + observed = np.array([[[2, 0], [0, 2]], [[0, 2], [2, 0]], [[1, 1], [1, 1]]]) + e = expected_freq(observed) + assert_array_equal(e, np.ones_like(observed)) + + observed = np.array([[10, 10, 20], [20, 20, 20]]) + e = expected_freq(observed) + correct = np.array([[12., 12., 16.], [18., 18., 24.]]) + assert_array_almost_equal(e, correct) + + +def test_chi2_contingency_trivial(): + # Some very simple tests for chi2_contingency. + + # A trivial case + obs = np.array([[1, 2], [1, 2]]) + chi2, p, dof, expected = chi2_contingency(obs, correction=False) + assert_equal(chi2, 0.0) + assert_equal(p, 1.0) + assert_equal(dof, 1) + assert_array_equal(obs, expected) + + # A *really* trivial case: 1-D data. + obs = np.array([1, 2, 3]) + chi2, p, dof, expected = chi2_contingency(obs, correction=False) + assert_equal(chi2, 0.0) + assert_equal(p, 1.0) + assert_equal(dof, 0) + assert_array_equal(obs, expected) + + +def test_chi2_contingency_R(): + # Some test cases that were computed independently, using R. + + Rcode = \ + """ + # Data vector. + data <- c( + 12, 34, 23, 4, 47, 11, + 35, 31, 11, 34, 10, 18, + 12, 32, 9, 18, 13, 19, + 12, 12, 14, 9, 33, 25 + ) + + # Create factor tags:r=rows, c=columns, t=tiers + r <- factor(gl(4, 2*3, 2*3*4, labels=c("r1", "r2", "r3", "r4"))) + c <- factor(gl(3, 1, 2*3*4, labels=c("c1", "c2", "c3"))) + t <- factor(gl(2, 3, 2*3*4, labels=c("t1", "t2"))) + + # 3-way Chi squared test of independence + s = summary(xtabs(data~r+c+t)) + print(s) + """ + Routput = \ + """ + Call: xtabs(formula = data ~ r + c + t) + Number of cases in table: 478 + Number of factors: 3 + Test for independence of all factors: + Chisq = 102.17, df = 17, p-value = 3.514e-14 + """ + obs = np.array( + [[[12, 34, 23], + [35, 31, 11], + [12, 32, 9], + [12, 12, 14]], + [[4, 47, 11], + [34, 10, 18], + [18, 13, 19], + [9, 33, 25]]]) + chi2, p, dof, expected = chi2_contingency(obs) + assert_approx_equal(chi2, 102.17, significant=5) + assert_approx_equal(p, 3.514e-14, significant=4) + assert_equal(dof, 17) + + Rcode = \ + """ + # Data vector. + data <- c( + # + 12, 17, + 11, 16, + # + 11, 12, + 15, 16, + # + 23, 15, + 30, 22, + # + 14, 17, + 15, 16 + ) + + # Create factor tags:r=rows, c=columns, d=depths(?), t=tiers + r <- factor(gl(2, 2, 2*2*2*2, labels=c("r1", "r2"))) + c <- factor(gl(2, 1, 2*2*2*2, labels=c("c1", "c2"))) + d <- factor(gl(2, 4, 2*2*2*2, labels=c("d1", "d2"))) + t <- factor(gl(2, 8, 2*2*2*2, labels=c("t1", "t2"))) + + # 4-way Chi squared test of independence + s = summary(xtabs(data~r+c+d+t)) + print(s) + """ + Routput = \ + """ + Call: xtabs(formula = data ~ r + c + d + t) + Number of cases in table: 262 + Number of factors: 4 + Test for independence of all factors: + Chisq = 8.758, df = 11, p-value = 0.6442 + """ + obs = np.array( + [[[[12, 17], + [11, 16]], + [[11, 12], + [15, 16]]], + [[[23, 15], + [30, 22]], + [[14, 17], + [15, 16]]]]) + chi2, p, dof, expected = chi2_contingency(obs) + assert_approx_equal(chi2, 8.758, significant=4) + assert_approx_equal(p, 0.6442, significant=4) + assert_equal(dof, 11) + + +def test_chi2_contingency_g(): + c = np.array([[15, 60], [15, 90]]) + g, p, dof, e = chi2_contingency(c, lambda_='log-likelihood', correction=False) + assert_allclose(g, 2*xlogy(c, c/e).sum()) + + g, p, dof, e = chi2_contingency(c, lambda_='log-likelihood', correction=True) + c_corr = c + np.array([[-0.5, 0.5], [0.5, -0.5]]) + assert_allclose(g, 2*xlogy(c_corr, c_corr/e).sum()) + + c = np.array([[10, 12, 10], [12, 10, 10]]) + g, p, dof, e = chi2_contingency(c, lambda_='log-likelihood') + assert_allclose(g, 2*xlogy(c, c/e).sum()) + + +def test_chi2_contingency_bad_args(): + # Test that "bad" inputs raise a ValueError. + + # Negative value in the array of observed frequencies. + obs = np.array([[-1, 10], [1, 2]]) + assert_raises(ValueError, chi2_contingency, obs) + + # The zeros in this will result in zeros in the array + # of expected frequencies. + obs = np.array([[0, 1], [0, 1]]) + assert_raises(ValueError, chi2_contingency, obs) + + # A degenerate case: `observed` has size 0. + obs = np.empty((0, 8)) + assert_raises(ValueError, chi2_contingency, obs) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_contingency.pyc b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_contingency.pyc new file mode 100644 index 0000000..4bdfef5 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_contingency.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_continuous_basic.py b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_continuous_basic.py new file mode 100644 index 0000000..90caf91 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_continuous_basic.py @@ -0,0 +1,420 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +import numpy.testing as npt +import pytest +from pytest import raises as assert_raises +from scipy._lib._numpy_compat import suppress_warnings +from scipy.integrate import IntegrationWarning + +from scipy import stats +from scipy.special import betainc +from. common_tests import (check_normalization, check_moment, check_mean_expect, + check_var_expect, check_skew_expect, + check_kurt_expect, check_entropy, + check_private_entropy, check_entropy_vect_scale, + check_edge_support, check_named_args, + check_random_state_property, + check_meth_dtype, check_ppf_dtype, check_cmplx_deriv, + check_pickling, check_rvs_broadcast) +from scipy.stats._distr_params import distcont + +""" +Test all continuous distributions. + +Parameters were chosen for those distributions that pass the +Kolmogorov-Smirnov test. This provides safe parameters for each +distributions so that we can perform further testing of class methods. + +These tests currently check only/mostly for serious errors and exceptions, +not for numerically exact results. +""" + +# Note that you need to add new distributions you want tested +# to _distr_params + +DECIMAL = 5 # specify the precision of the tests # increased from 0 to 5 + +# Last four of these fail all around. Need to be checked +distcont_extra = [ + ['betaprime', (100, 86)], + ['fatiguelife', (5,)], + ['mielke', (4.6420495492121487, 0.59707419545516938)], + ['invweibull', (0.58847112119264788,)], + # burr: sample mean test fails still for c<1 + ['burr', (0.94839838075366045, 4.3820284068855795)], + # genextreme: sample mean test, sf-logsf test fail + ['genextreme', (3.3184017469423535,)], +] + + +distslow = ['kappa4', 'rdist', 'gausshyper', + 'recipinvgauss', 'ksone', 'genexpon', + 'vonmises', 'vonmises_line', 'mielke', 'semicircular', + 'cosine', 'invweibull', 'powerlognorm', 'johnsonsu', 'kstwobign'] +# distslow are sorted by speed (very slow to slow) + + +# These distributions fail the complex derivative test below. +# Here 'fail' mean produce wrong results and/or raise exceptions, depending +# on the implementation details of corresponding special functions. +# cf https://github.com/scipy/scipy/pull/4979 for a discussion. +fails_cmplx = set(['beta', 'betaprime', 'chi', 'chi2', 'dgamma', 'dweibull', + 'erlang', 'f', 'gamma', 'gausshyper', 'gengamma', + 'gennorm', 'genpareto', 'halfgennorm', 'invgamma', + 'ksone', 'kstwobign', 'levy_l', 'loggamma', 'logistic', + 'maxwell', 'nakagami', 'ncf', 'nct', 'ncx2', 'norminvgauss', + 'pearson3', 'rice', 't', 'skewnorm', 'tukeylambda', + 'vonmises', 'vonmises_line', 'rv_histogram_instance']) + +_h = np.histogram([1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, + 6, 6, 6, 7, 7, 7, 8, 8, 9], bins=8) +histogram_test_instance = stats.rv_histogram(_h) + + +def cases_test_cont_basic(): + for distname, arg in distcont[:] + [(histogram_test_instance, tuple())]: + if distname == 'levy_stable': + continue + elif distname in distslow: + yield pytest.param(distname, arg, marks=pytest.mark.slow) + else: + yield distname, arg + + +@pytest.mark.parametrize('distname,arg', cases_test_cont_basic()) +def test_cont_basic(distname, arg): + # this test skips slow distributions + + if distname == 'truncnorm': + pytest.xfail(reason=distname) + + try: + distfn = getattr(stats, distname) + except TypeError: + distfn = distname + distname = 'rv_histogram_instance' + np.random.seed(765456) + sn = 500 + with suppress_warnings() as sup: + # frechet_l and frechet_r are deprecated, so all their + # methods generate DeprecationWarnings. + sup.filter(category=DeprecationWarning, message=".*frechet_") + rvs = distfn.rvs(size=sn, *arg) + sm = rvs.mean() + sv = rvs.var() + m, v = distfn.stats(*arg) + + check_sample_meanvar_(distfn, arg, m, v, sm, sv, sn, distname + 'sample mean test') + check_cdf_ppf(distfn, arg, distname) + check_sf_isf(distfn, arg, distname) + check_pdf(distfn, arg, distname) + check_pdf_logpdf(distfn, arg, distname) + check_cdf_logcdf(distfn, arg, distname) + check_sf_logsf(distfn, arg, distname) + + alpha = 0.01 + if distname == 'rv_histogram_instance': + check_distribution_rvs(distfn.cdf, arg, alpha, rvs) + else: + check_distribution_rvs(distname, arg, alpha, rvs) + + locscale_defaults = (0, 1) + meths = [distfn.pdf, distfn.logpdf, distfn.cdf, distfn.logcdf, + distfn.logsf] + # make sure arguments are within support + spec_x = {'frechet_l': -0.5, 'weibull_max': -0.5, 'levy_l': -0.5, + 'pareto': 1.5, 'tukeylambda': 0.3, + 'rv_histogram_instance': 5.0} + x = spec_x.get(distname, 0.5) + if distname == 'invweibull': + arg = (1,) + elif distname == 'ksone': + arg = (3,) + check_named_args(distfn, x, arg, locscale_defaults, meths) + check_random_state_property(distfn, arg) + check_pickling(distfn, arg) + + # Entropy + if distname not in ['ksone', 'kstwobign']: + check_entropy(distfn, arg, distname) + + if distfn.numargs == 0: + check_vecentropy(distfn, arg) + + if (distfn.__class__._entropy != stats.rv_continuous._entropy + and distname != 'vonmises'): + check_private_entropy(distfn, arg, stats.rv_continuous) + + with suppress_warnings() as sup: + sup.filter(IntegrationWarning, "The occurrence of roundoff error") + sup.filter(IntegrationWarning, "Extremely bad integrand") + sup.filter(RuntimeWarning, "invalid value") + check_entropy_vect_scale(distfn, arg) + + check_edge_support(distfn, arg) + + check_meth_dtype(distfn, arg, meths) + check_ppf_dtype(distfn, arg) + + if distname not in fails_cmplx: + check_cmplx_deriv(distfn, arg) + + if distname != 'truncnorm': + check_ppf_private(distfn, arg, distname) + + +def test_levy_stable_random_state_property(): + # levy_stable only implements rvs(), so it is skipped in the + # main loop in test_cont_basic(). Here we apply just the test + # check_random_state_property to levy_stable. + check_random_state_property(stats.levy_stable, (0.5, 0.1)) + + +def cases_test_moments(): + fail_normalization = set(['vonmises', 'ksone']) + fail_higher = set(['vonmises', 'ksone', 'ncf']) + + for distname, arg in distcont[:] + [(histogram_test_instance, tuple())]: + if distname == 'levy_stable': + continue + + cond1 = distname not in fail_normalization + cond2 = distname not in fail_higher + + yield distname, arg, cond1, cond2, False + + if not cond1 or not cond2: + # Run the distributions that have issues twice, once skipping the + # not_ok parts, once with the not_ok parts but marked as knownfail + yield pytest.param(distname, arg, True, True, True, + marks=pytest.mark.xfail) + + +@pytest.mark.slow +@pytest.mark.parametrize('distname,arg,normalization_ok,higher_ok,is_xfailing', + cases_test_moments()) +def test_moments(distname, arg, normalization_ok, higher_ok, is_xfailing): + try: + distfn = getattr(stats, distname) + except TypeError: + distfn = distname + distname = 'rv_histogram_instance' + + with suppress_warnings() as sup: + sup.filter(IntegrationWarning, + "The integral is probably divergent, or slowly convergent.") + sup.filter(category=DeprecationWarning, message=".*frechet_") + if is_xfailing: + sup.filter(IntegrationWarning) + + m, v, s, k = distfn.stats(*arg, moments='mvsk') + + if normalization_ok: + check_normalization(distfn, arg, distname) + + if higher_ok: + check_mean_expect(distfn, arg, m, distname) + check_skew_expect(distfn, arg, m, v, s, distname) + check_var_expect(distfn, arg, m, v, distname) + check_kurt_expect(distfn, arg, m, v, k, distname) + + check_loc_scale(distfn, arg, m, v, distname) + check_moment(distfn, arg, m, v, distname) + + +@pytest.mark.parametrize('dist,shape_args', distcont) +def test_rvs_broadcast(dist, shape_args): + if dist in ['gausshyper', 'genexpon']: + pytest.skip("too slow") + + # If shape_only is True, it means the _rvs method of the + # distribution uses more than one random number to generate a random + # variate. That means the result of using rvs with broadcasting or + # with a nontrivial size will not necessarily be the same as using the + # numpy.vectorize'd version of rvs(), so we can only compare the shapes + # of the results, not the values. + # Whether or not a distribution is in the following list is an + # implementation detail of the distribution, not a requirement. If + # the implementation the rvs() method of a distribution changes, this + # test might also have to be changed. + shape_only = dist in ['betaprime', 'dgamma', 'exponnorm', 'norminvgauss', + 'nct', 'dweibull', 'rice', 'levy_stable', 'skewnorm'] + + distfunc = getattr(stats, dist) + loc = np.zeros(2) + scale = np.ones((3, 1)) + nargs = distfunc.numargs + allargs = [] + bshape = [3, 2] + # Generate shape parameter arguments... + for k in range(nargs): + shp = (k + 4,) + (1,)*(k + 2) + allargs.append(shape_args[k]*np.ones(shp)) + bshape.insert(0, k + 4) + allargs.extend([loc, scale]) + # bshape holds the expected shape when loc, scale, and the shape + # parameters are all broadcast together. + + check_rvs_broadcast(distfunc, dist, allargs, bshape, shape_only, 'd') + + +def test_rvs_gh2069_regression(): + # Regression tests for gh-2069. In scipy 0.17 and earlier, + # these tests would fail. + # + # A typical example of the broken behavior: + # >>> norm.rvs(loc=np.zeros(5), scale=np.ones(5)) + # array([-2.49613705, -2.49613705, -2.49613705, -2.49613705, -2.49613705]) + np.random.seed(123) + vals = stats.norm.rvs(loc=np.zeros(5), scale=1) + d = np.diff(vals) + npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!") + vals = stats.norm.rvs(loc=0, scale=np.ones(5)) + d = np.diff(vals) + npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!") + vals = stats.norm.rvs(loc=np.zeros(5), scale=np.ones(5)) + d = np.diff(vals) + npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!") + vals = stats.norm.rvs(loc=np.array([[0], [0]]), scale=np.ones(5)) + d = np.diff(vals.ravel()) + npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!") + + assert_raises(ValueError, stats.norm.rvs, [[0, 0], [0, 0]], + [[1, 1], [1, 1]], 1) + assert_raises(ValueError, stats.gamma.rvs, [2, 3, 4, 5], 0, 1, (2, 2)) + assert_raises(ValueError, stats.gamma.rvs, [1, 1, 1, 1], [0, 0, 0, 0], + [[1], [2]], (4,)) + + +def check_sample_meanvar_(distfn, arg, m, v, sm, sv, sn, msg): + # this did not work, skipped silently by nose + if np.isfinite(m): + check_sample_mean(sm, sv, sn, m) + if np.isfinite(v): + check_sample_var(sv, sn, v) + + +def check_sample_mean(sm, v, n, popmean): + # from stats.stats.ttest_1samp(a, popmean): + # Calculates the t-obtained for the independent samples T-test on ONE group + # of scores a, given a population mean. + # + # Returns: t-value, two-tailed prob + df = n-1 + svar = ((n-1)*v) / float(df) # looks redundant + t = (sm-popmean) / np.sqrt(svar*(1.0/n)) + prob = betainc(0.5*df, 0.5, df/(df + t*t)) + + # return t,prob + npt.assert_(prob > 0.01, 'mean fail, t,prob = %f, %f, m, sm=%f,%f' % + (t, prob, popmean, sm)) + + +def check_sample_var(sv, n, popvar): + # two-sided chisquare test for sample variance equal to + # hypothesized variance + df = n-1 + chi2 = (n-1)*popvar/float(popvar) + pval = stats.distributions.chi2.sf(chi2, df) * 2 + npt.assert_(pval > 0.01, 'var fail, t, pval = %f, %f, v, sv=%f, %f' % + (chi2, pval, popvar, sv)) + + +def check_cdf_ppf(distfn, arg, msg): + values = [0.001, 0.5, 0.999] + npt.assert_almost_equal(distfn.cdf(distfn.ppf(values, *arg), *arg), + values, decimal=DECIMAL, err_msg=msg + + ' - cdf-ppf roundtrip') + + +def check_sf_isf(distfn, arg, msg): + npt.assert_almost_equal(distfn.sf(distfn.isf([0.1, 0.5, 0.9], *arg), *arg), + [0.1, 0.5, 0.9], decimal=DECIMAL, err_msg=msg + + ' - sf-isf roundtrip') + npt.assert_almost_equal(distfn.cdf([0.1, 0.9], *arg), + 1.0 - distfn.sf([0.1, 0.9], *arg), + decimal=DECIMAL, err_msg=msg + + ' - cdf-sf relationship') + + +def check_pdf(distfn, arg, msg): + # compares pdf at median with numerical derivative of cdf + median = distfn.ppf(0.5, *arg) + eps = 1e-6 + pdfv = distfn.pdf(median, *arg) + if (pdfv < 1e-4) or (pdfv > 1e4): + # avoid checking a case where pdf is close to zero or + # huge (singularity) + median = median + 0.1 + pdfv = distfn.pdf(median, *arg) + cdfdiff = (distfn.cdf(median + eps, *arg) - + distfn.cdf(median - eps, *arg))/eps/2.0 + # replace with better diff and better test (more points), + # actually, this works pretty well + msg += ' - cdf-pdf relationship' + npt.assert_almost_equal(pdfv, cdfdiff, decimal=DECIMAL, err_msg=msg) + + +def check_pdf_logpdf(distfn, args, msg): + # compares pdf at several points with the log of the pdf + points = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]) + vals = distfn.ppf(points, *args) + pdf = distfn.pdf(vals, *args) + logpdf = distfn.logpdf(vals, *args) + pdf = pdf[pdf != 0] + logpdf = logpdf[np.isfinite(logpdf)] + msg += " - logpdf-log(pdf) relationship" + npt.assert_almost_equal(np.log(pdf), logpdf, decimal=7, err_msg=msg) + + +def check_sf_logsf(distfn, args, msg): + # compares sf at several points with the log of the sf + points = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]) + vals = distfn.ppf(points, *args) + sf = distfn.sf(vals, *args) + logsf = distfn.logsf(vals, *args) + sf = sf[sf != 0] + logsf = logsf[np.isfinite(logsf)] + msg += " - logsf-log(sf) relationship" + npt.assert_almost_equal(np.log(sf), logsf, decimal=7, err_msg=msg) + + +def check_cdf_logcdf(distfn, args, msg): + # compares cdf at several points with the log of the cdf + points = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]) + vals = distfn.ppf(points, *args) + cdf = distfn.cdf(vals, *args) + logcdf = distfn.logcdf(vals, *args) + cdf = cdf[cdf != 0] + logcdf = logcdf[np.isfinite(logcdf)] + msg += " - logcdf-log(cdf) relationship" + npt.assert_almost_equal(np.log(cdf), logcdf, decimal=7, err_msg=msg) + + +def check_distribution_rvs(dist, args, alpha, rvs): + # test from scipy.stats.tests + # this version reuses existing random variables + D, pval = stats.kstest(rvs, dist, args=args, N=1000) + if (pval < alpha): + D, pval = stats.kstest(dist, '', args=args, N=1000) + npt.assert_(pval > alpha, "D = " + str(D) + "; pval = " + str(pval) + + "; alpha = " + str(alpha) + "\nargs = " + str(args)) + + +def check_vecentropy(distfn, args): + npt.assert_equal(distfn.vecentropy(*args), distfn._entropy(*args)) + + +def check_loc_scale(distfn, arg, m, v, msg): + loc, scale = 10.0, 10.0 + mt, vt = distfn.stats(loc=loc, scale=scale, *arg) + npt.assert_allclose(m*scale + loc, mt) + npt.assert_allclose(v*scale*scale, vt) + + +def check_ppf_private(distfn, arg, msg): + # fails by design for truncnorm self.nb not defined + ppfs = distfn._ppf(np.array([0.1, 0.5, 0.9]), *arg) + npt.assert_(not np.any(np.isnan(ppfs)), msg + 'ppf private is nan') + diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_continuous_basic.pyc b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_continuous_basic.pyc new file mode 100644 index 0000000..fee4f35 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_continuous_basic.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_discrete_basic.py b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_discrete_basic.py new file mode 100644 index 0000000..f67c334 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_discrete_basic.py @@ -0,0 +1,234 @@ +from __future__ import division, print_function, absolute_import + +import numpy.testing as npt +import numpy as np +from scipy._lib.six import xrange +import pytest + +from scipy import stats +from .common_tests import (check_normalization, check_moment, check_mean_expect, + check_var_expect, check_skew_expect, + check_kurt_expect, check_entropy, + check_private_entropy, check_edge_support, + check_named_args, check_random_state_property, + check_pickling, check_rvs_broadcast) +from scipy.stats._distr_params import distdiscrete + +vals = ([1, 2, 3, 4], [0.1, 0.2, 0.3, 0.4]) +distdiscrete += [[stats.rv_discrete(values=vals), ()]] + + +def cases_test_discrete_basic(): + seen = set() + for distname, arg in distdiscrete: + yield distname, arg, distname not in seen + seen.add(distname) + + +@pytest.mark.parametrize('distname,arg,first_case', cases_test_discrete_basic()) +def test_discrete_basic(distname, arg, first_case): + try: + distfn = getattr(stats, distname) + except TypeError: + distfn = distname + distname = 'sample distribution' + np.random.seed(9765456) + rvs = distfn.rvs(size=2000, *arg) + supp = np.unique(rvs) + m, v = distfn.stats(*arg) + check_cdf_ppf(distfn, arg, supp, distname + ' cdf_ppf') + + check_pmf_cdf(distfn, arg, distname) + check_oth(distfn, arg, supp, distname + ' oth') + check_edge_support(distfn, arg) + + alpha = 0.01 + check_discrete_chisquare(distfn, arg, rvs, alpha, + distname + ' chisquare') + + if first_case: + locscale_defaults = (0,) + meths = [distfn.pmf, distfn.logpmf, distfn.cdf, distfn.logcdf, + distfn.logsf] + # make sure arguments are within support + spec_k = {'randint': 11, 'hypergeom': 4, 'bernoulli': 0, } + k = spec_k.get(distname, 1) + check_named_args(distfn, k, arg, locscale_defaults, meths) + if distname != 'sample distribution': + check_scale_docstring(distfn) + check_random_state_property(distfn, arg) + check_pickling(distfn, arg) + + # Entropy + check_entropy(distfn, arg, distname) + if distfn.__class__._entropy != stats.rv_discrete._entropy: + check_private_entropy(distfn, arg, stats.rv_discrete) + + +@pytest.mark.parametrize('distname,arg', distdiscrete) +def test_moments(distname, arg): + try: + distfn = getattr(stats, distname) + except TypeError: + distfn = distname + distname = 'sample distribution' + m, v, s, k = distfn.stats(*arg, moments='mvsk') + check_normalization(distfn, arg, distname) + + # compare `stats` and `moment` methods + check_moment(distfn, arg, m, v, distname) + check_mean_expect(distfn, arg, m, distname) + check_var_expect(distfn, arg, m, v, distname) + check_skew_expect(distfn, arg, m, v, s, distname) + if distname not in ['zipf', 'yulesimon']: + check_kurt_expect(distfn, arg, m, v, k, distname) + + # frozen distr moments + check_moment_frozen(distfn, arg, m, 1) + check_moment_frozen(distfn, arg, v+m*m, 2) + + +@pytest.mark.parametrize('dist,shape_args', distdiscrete) +def test_rvs_broadcast(dist, shape_args): + # If shape_only is True, it means the _rvs method of the + # distribution uses more than one random number to generate a random + # variate. That means the result of using rvs with broadcasting or + # with a nontrivial size will not necessarily be the same as using the + # numpy.vectorize'd version of rvs(), so we can only compare the shapes + # of the results, not the values. + # Whether or not a distribution is in the following list is an + # implementation detail of the distribution, not a requirement. If + # the implementation the rvs() method of a distribution changes, this + # test might also have to be changed. + shape_only = dist in ['skellam', 'yulesimon'] + + try: + distfunc = getattr(stats, dist) + except TypeError: + distfunc = dist + dist = 'rv_discrete(values=(%r, %r))' % (dist.xk, dist.pk) + loc = np.zeros(2) + nargs = distfunc.numargs + allargs = [] + bshape = [] + # Generate shape parameter arguments... + for k in range(nargs): + shp = (k + 3,) + (1,)*(k + 1) + param_val = shape_args[k] + allargs.append(param_val*np.ones(shp, dtype=np.array(param_val).dtype)) + bshape.insert(0, shp[0]) + allargs.append(loc) + bshape.append(loc.size) + # bshape holds the expected shape when loc, scale, and the shape + # parameters are all broadcast together. + check_rvs_broadcast(distfunc, dist, allargs, bshape, shape_only, [np.int_]) + + +def check_cdf_ppf(distfn, arg, supp, msg): + # cdf is a step function, and ppf(q) = min{k : cdf(k) >= q, k integer} + npt.assert_array_equal(distfn.ppf(distfn.cdf(supp, *arg), *arg), + supp, msg + '-roundtrip') + npt.assert_array_equal(distfn.ppf(distfn.cdf(supp, *arg) - 1e-8, *arg), + supp, msg + '-roundtrip') + + if not hasattr(distfn, 'xk'): + supp1 = supp[supp < distfn.b] + npt.assert_array_equal(distfn.ppf(distfn.cdf(supp1, *arg) + 1e-8, *arg), + supp1 + distfn.inc, msg + ' ppf-cdf-next') + # -1e-8 could cause an error if pmf < 1e-8 + + +def check_pmf_cdf(distfn, arg, distname): + if hasattr(distfn, 'xk'): + index = distfn.xk + else: + startind = int(distfn.ppf(0.01, *arg) - 1) + index = list(range(startind, startind + 10)) + cdfs = distfn.cdf(index, *arg) + pmfs_cum = distfn.pmf(index, *arg).cumsum() + + atol, rtol = 1e-10, 1e-10 + if distname == 'skellam': # ncx2 accuracy + atol, rtol = 1e-5, 1e-5 + npt.assert_allclose(cdfs - cdfs[0], pmfs_cum - pmfs_cum[0], + atol=atol, rtol=rtol) + + +def check_moment_frozen(distfn, arg, m, k): + npt.assert_allclose(distfn(*arg).moment(k), m, + atol=1e-10, rtol=1e-10) + + +def check_oth(distfn, arg, supp, msg): + # checking other methods of distfn + npt.assert_allclose(distfn.sf(supp, *arg), 1. - distfn.cdf(supp, *arg), + atol=1e-10, rtol=1e-10) + + q = np.linspace(0.01, 0.99, 20) + npt.assert_allclose(distfn.isf(q, *arg), distfn.ppf(1. - q, *arg), + atol=1e-10, rtol=1e-10) + + median_sf = distfn.isf(0.5, *arg) + npt.assert_(distfn.sf(median_sf - 1, *arg) > 0.5) + npt.assert_(distfn.cdf(median_sf + 1, *arg) > 0.5) + + +def check_discrete_chisquare(distfn, arg, rvs, alpha, msg): + """Perform chisquare test for random sample of a discrete distribution + + Parameters + ---------- + distname : string + name of distribution function + arg : sequence + parameters of distribution + alpha : float + significance level, threshold for p-value + + Returns + ------- + result : bool + 0 if test passes, 1 if test fails + + """ + wsupp = 0.05 + + # construct intervals with minimum mass `wsupp`. + # intervals are left-half-open as in a cdf difference + lo = int(max(distfn.a, -1000)) + distsupport = xrange(lo, int(min(distfn.b, 1000)) + 1) + last = 0 + distsupp = [lo] + distmass = [] + for ii in distsupport: + current = distfn.cdf(ii, *arg) + if current - last >= wsupp - 1e-14: + distsupp.append(ii) + distmass.append(current - last) + last = current + if current > (1 - wsupp): + break + if distsupp[-1] < distfn.b: + distsupp.append(distfn.b) + distmass.append(1 - last) + distsupp = np.array(distsupp) + distmass = np.array(distmass) + + # convert intervals to right-half-open as required by histogram + histsupp = distsupp + 1e-8 + histsupp[0] = distfn.a + + # find sample frequencies and perform chisquare test + freq, hsupp = np.histogram(rvs, histsupp) + chis, pval = stats.chisquare(np.array(freq), len(rvs)*distmass) + + npt.assert_(pval > alpha, + 'chisquare - test for %s at arg = %s with pval = %s' % + (msg, str(arg), str(pval))) + + +def check_scale_docstring(distfn): + if distfn.__doc__ is not None: + # Docstrings can be stripped if interpreter is run with -OO + npt.assert_('scale' not in distfn.__doc__) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_discrete_basic.pyc b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_discrete_basic.pyc new file mode 100644 index 0000000..774da4b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_discrete_basic.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_discrete_distns.py b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_discrete_distns.py new file mode 100644 index 0000000..62313de --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_discrete_distns.py @@ -0,0 +1,50 @@ +from __future__ import division, print_function, absolute_import + +from scipy.stats import hypergeom, bernoulli, boltzmann +import numpy as np +from numpy.testing import assert_almost_equal, assert_equal, assert_allclose + + +def test_hypergeom_logpmf(): + # symmetries test + # f(k,N,K,n) = f(n-k,N,N-K,n) = f(K-k,N,K,N-n) = f(k,N,n,K) + k = 5 + N = 50 + K = 10 + n = 5 + logpmf1 = hypergeom.logpmf(k, N, K, n) + logpmf2 = hypergeom.logpmf(n - k, N, N - K, n) + logpmf3 = hypergeom.logpmf(K - k, N, K, N - n) + logpmf4 = hypergeom.logpmf(k, N, n, K) + assert_almost_equal(logpmf1, logpmf2, decimal=12) + assert_almost_equal(logpmf1, logpmf3, decimal=12) + assert_almost_equal(logpmf1, logpmf4, decimal=12) + + # test related distribution + # Bernoulli distribution if n = 1 + k = 1 + N = 10 + K = 7 + n = 1 + hypergeom_logpmf = hypergeom.logpmf(k, N, K, n) + bernoulli_logpmf = bernoulli.logpmf(k, K/N) + assert_almost_equal(hypergeom_logpmf, bernoulli_logpmf, decimal=12) + + +def test_boltzmann_upper_bound(): + k = np.arange(-3, 5) + + N = 1 + p = boltzmann.pmf(k, 0.123, N) + expected = k == 0 + assert_equal(p, expected) + + lam = np.log(2) + N = 3 + p = boltzmann.pmf(k, lam, N) + expected = [0, 0, 0, 4/7, 2/7, 1/7, 0, 0] + assert_allclose(p, expected, rtol=1e-13) + + c = boltzmann.cdf(k, lam, N) + expected = [0, 0, 0, 4/7, 6/7, 1, 1, 1] + assert_allclose(c, expected, rtol=1e-13) diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_discrete_distns.pyc b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_discrete_distns.pyc new file mode 100644 index 0000000..1d36da1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_discrete_distns.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_distributions.py b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_distributions.py new file mode 100644 index 0000000..8d9be65 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_distributions.py @@ -0,0 +1,3640 @@ +""" Test functions for stats module + +""" +from __future__ import division, print_function, absolute_import + +import warnings +import re +import sys +import pickle +import os + +from numpy.testing import (assert_equal, assert_array_equal, + assert_almost_equal, assert_array_almost_equal, + assert_allclose, assert_, assert_warns) +import pytest +from pytest import raises as assert_raises +from scipy._lib._numpy_compat import suppress_warnings + +import numpy +import numpy as np +from numpy import typecodes, array +from numpy.lib.recfunctions import rec_append_fields +from scipy import special +from scipy.integrate import IntegrationWarning +import scipy.stats as stats +from scipy.stats._distn_infrastructure import argsreduce +import scipy.stats.distributions + +from scipy.special import xlogy +from .test_continuous_basic import distcont + +# python -OO strips docstrings +DOCSTRINGS_STRIPPED = sys.flags.optimize > 1 + + +# Generate test cases to test cdf and distribution consistency. +# Note that this list does not include all distributions. +dists = ['uniform', 'norm', 'lognorm', 'expon', 'beta', + 'powerlaw', 'bradford', 'burr', 'fisk', 'cauchy', 'halfcauchy', + 'foldcauchy', 'gamma', 'gengamma', 'loggamma', + 'alpha', 'anglit', 'arcsine', 'betaprime', 'dgamma', 'moyal', + 'exponnorm', 'exponweib', 'exponpow', 'frechet_l', 'frechet_r', + 'gilbrat', 'f', 'ncf', 'chi2', 'chi', 'nakagami', 'genpareto', + 'genextreme', 'genhalflogistic', 'pareto', 'lomax', 'halfnorm', + 'halflogistic', 'fatiguelife', 'foldnorm', 'ncx2', 't', 'nct', + 'weibull_min', 'weibull_max', 'dweibull', 'maxwell', 'rayleigh', + 'genlogistic', 'logistic', 'gumbel_l', 'gumbel_r', 'gompertz', + 'hypsecant', 'laplace', 'reciprocal', 'trapz', 'triang', + 'tukeylambda', 'vonmises', 'vonmises_line', 'pearson3', 'gennorm', + 'halfgennorm', 'rice', 'kappa4', 'kappa3', 'truncnorm', 'argus', + 'crystalball'] + + +def _assert_hasattr(a, b, msg=None): + if msg is None: + msg = '%s does not have attribute %s' % (a, b) + assert_(hasattr(a, b), msg=msg) + + +def test_api_regression(): + # https://github.com/scipy/scipy/issues/3802 + _assert_hasattr(scipy.stats.distributions, 'f_gen') + + +# check function for test generator +def check_distribution(dist, args, alpha): + with suppress_warnings() as sup: + # frechet_l and frechet_r are deprecated, so all their + # methods generate DeprecationWarnings. + sup.filter(category=DeprecationWarning, message=".*frechet_") + D, pval = stats.kstest(dist, '', args=args, N=1000) + if (pval < alpha): + D, pval = stats.kstest(dist, '', args=args, N=1000) + assert_(pval > alpha, + msg="D = {}; pval = {}; alpha = {}; args = {}".format( + D, pval, alpha, args)) + + +def cases_test_all_distributions(): + np.random.seed(1234) + + for dist in dists: + distfunc = getattr(stats, dist) + nargs = distfunc.numargs + alpha = 0.01 + if dist == 'fatiguelife': + alpha = 0.001 + + if dist == 'trapz': + args = tuple(np.sort(np.random.random(nargs))) + elif dist == 'triang': + args = tuple(np.random.random(nargs)) + elif dist == 'reciprocal' or dist == 'truncnorm': + vals = np.random.random(nargs) + vals[1] = vals[0] + 1.0 + args = tuple(vals) + elif dist == 'vonmises': + yield dist, (10,), alpha + yield dist, (101,), alpha + args = tuple(1.0 + np.random.random(nargs)) + else: + args = tuple(1.0 + np.random.random(nargs)) + + yield dist, args, alpha + + +@pytest.mark.parametrize('dist,args,alpha', cases_test_all_distributions()) +def test_all_distributions(dist, args, alpha): + check_distribution(dist, args, alpha) + + +def check_vonmises_pdf_periodic(k, l, s, x): + vm = stats.vonmises(k, loc=l, scale=s) + assert_almost_equal(vm.pdf(x), vm.pdf(x % (2*numpy.pi*s))) + + +def check_vonmises_cdf_periodic(k, l, s, x): + vm = stats.vonmises(k, loc=l, scale=s) + assert_almost_equal(vm.cdf(x) % 1, vm.cdf(x % (2*numpy.pi*s)) % 1) + + +def test_vonmises_pdf_periodic(): + for k in [0.1, 1, 101]: + for x in [0, 1, numpy.pi, 10, 100]: + check_vonmises_pdf_periodic(k, 0, 1, x) + check_vonmises_pdf_periodic(k, 1, 1, x) + check_vonmises_pdf_periodic(k, 0, 10, x) + + check_vonmises_cdf_periodic(k, 0, 1, x) + check_vonmises_cdf_periodic(k, 1, 1, x) + check_vonmises_cdf_periodic(k, 0, 10, x) + + +def test_vonmises_line_support(): + assert_equal(stats.vonmises_line.a, -np.pi) + assert_equal(stats.vonmises_line.b, np.pi) + + +def test_vonmises_numerical(): + vm = stats.vonmises(800) + assert_almost_equal(vm.cdf(0), 0.5) + + +@pytest.mark.parametrize('dist', + ['alpha', 'betaprime', 'burr', 'burr12', + 'fatiguelife', 'invgamma', 'invgauss', 'invweibull', + 'johnsonsb', 'levy', 'levy_l', 'lognorm', 'gilbrat', + 'powerlognorm', 'rayleigh', 'wald']) +def test_support(dist): + """gh-6235""" + dct = dict(distcont) + args = dct[dist] + + dist = getattr(stats, dist) + + assert_almost_equal(dist.pdf(dist.a, *args), 0) + assert_equal(dist.logpdf(dist.a, *args), -np.inf) + assert_almost_equal(dist.pdf(dist.b, *args), 0) + assert_equal(dist.logpdf(dist.b, *args), -np.inf) + + +class TestRandInt(object): + def setup_method(self): + np.random.seed(1234) + + def test_rvs(self): + vals = stats.randint.rvs(5, 30, size=100) + assert_(numpy.all(vals < 30) & numpy.all(vals >= 5)) + assert_(len(vals) == 100) + vals = stats.randint.rvs(5, 30, size=(2, 50)) + assert_(numpy.shape(vals) == (2, 50)) + assert_(vals.dtype.char in typecodes['AllInteger']) + val = stats.randint.rvs(15, 46) + assert_((val >= 15) & (val < 46)) + assert_(isinstance(val, numpy.ScalarType), msg=repr(type(val))) + val = stats.randint(15, 46).rvs(3) + assert_(val.dtype.char in typecodes['AllInteger']) + + def test_pdf(self): + k = numpy.r_[0:36] + out = numpy.where((k >= 5) & (k < 30), 1.0/(30-5), 0) + vals = stats.randint.pmf(k, 5, 30) + assert_array_almost_equal(vals, out) + + def test_cdf(self): + x = np.linspace(0,36,100) + k = numpy.floor(x) + out = numpy.select([k >= 30, k >= 5], [1.0, (k-5.0+1)/(30-5.0)], 0) + vals = stats.randint.cdf(x, 5, 30) + assert_array_almost_equal(vals, out, decimal=12) + + +class TestBinom(object): + def setup_method(self): + np.random.seed(1234) + + def test_rvs(self): + vals = stats.binom.rvs(10, 0.75, size=(2, 50)) + assert_(numpy.all(vals >= 0) & numpy.all(vals <= 10)) + assert_(numpy.shape(vals) == (2, 50)) + assert_(vals.dtype.char in typecodes['AllInteger']) + val = stats.binom.rvs(10, 0.75) + assert_(isinstance(val, int)) + val = stats.binom(10, 0.75).rvs(3) + assert_(isinstance(val, numpy.ndarray)) + assert_(val.dtype.char in typecodes['AllInteger']) + + def test_pmf(self): + # regression test for Ticket #1842 + vals1 = stats.binom.pmf(100, 100, 1) + vals2 = stats.binom.pmf(0, 100, 0) + assert_allclose(vals1, 1.0, rtol=1e-15, atol=0) + assert_allclose(vals2, 1.0, rtol=1e-15, atol=0) + + def test_entropy(self): + # Basic entropy tests. + b = stats.binom(2, 0.5) + expected_p = np.array([0.25, 0.5, 0.25]) + expected_h = -sum(xlogy(expected_p, expected_p)) + h = b.entropy() + assert_allclose(h, expected_h) + + b = stats.binom(2, 0.0) + h = b.entropy() + assert_equal(h, 0.0) + + b = stats.binom(2, 1.0) + h = b.entropy() + assert_equal(h, 0.0) + + def test_warns_p0(self): + # no spurious warnigns are generated for p=0; gh-3817 + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + assert_equal(stats.binom(n=2, p=0).mean(), 0) + assert_equal(stats.binom(n=2, p=0).std(), 0) + + +class TestBernoulli(object): + def setup_method(self): + np.random.seed(1234) + + def test_rvs(self): + vals = stats.bernoulli.rvs(0.75, size=(2, 50)) + assert_(numpy.all(vals >= 0) & numpy.all(vals <= 1)) + assert_(numpy.shape(vals) == (2, 50)) + assert_(vals.dtype.char in typecodes['AllInteger']) + val = stats.bernoulli.rvs(0.75) + assert_(isinstance(val, int)) + val = stats.bernoulli(0.75).rvs(3) + assert_(isinstance(val, numpy.ndarray)) + assert_(val.dtype.char in typecodes['AllInteger']) + + def test_entropy(self): + # Simple tests of entropy. + b = stats.bernoulli(0.25) + expected_h = -0.25*np.log(0.25) - 0.75*np.log(0.75) + h = b.entropy() + assert_allclose(h, expected_h) + + b = stats.bernoulli(0.0) + h = b.entropy() + assert_equal(h, 0.0) + + b = stats.bernoulli(1.0) + h = b.entropy() + assert_equal(h, 0.0) + + +class TestBradford(object): + # gh-6216 + def test_cdf_ppf(self): + c = 0.1 + x = np.logspace(-20, -4) + q = stats.bradford.cdf(x, c) + xx = stats.bradford.ppf(q, c) + assert_allclose(x, xx) + + +class TestNBinom(object): + def setup_method(self): + np.random.seed(1234) + + def test_rvs(self): + vals = stats.nbinom.rvs(10, 0.75, size=(2, 50)) + assert_(numpy.all(vals >= 0)) + assert_(numpy.shape(vals) == (2, 50)) + assert_(vals.dtype.char in typecodes['AllInteger']) + val = stats.nbinom.rvs(10, 0.75) + assert_(isinstance(val, int)) + val = stats.nbinom(10, 0.75).rvs(3) + assert_(isinstance(val, numpy.ndarray)) + assert_(val.dtype.char in typecodes['AllInteger']) + + def test_pmf(self): + # regression test for ticket 1779 + assert_allclose(np.exp(stats.nbinom.logpmf(700, 721, 0.52)), + stats.nbinom.pmf(700, 721, 0.52)) + # logpmf(0,1,1) shouldn't return nan (regression test for gh-4029) + val = scipy.stats.nbinom.logpmf(0, 1, 1) + assert_equal(val, 0) + + +class TestNormInvGauss(object): + def setup_method(self): + np.random.seed(1234) + + def test_cdf_R(self): + # test pdf and cdf vals against R + # require("GeneralizedHyperbolic") + # x_test <- c(-7, -5, 0, 8, 15) + # r_cdf <- GeneralizedHyperbolic::pnig(x_test, mu = 0, a = 1, b = 0.5) + # r_pdf <- GeneralizedHyperbolic::dnig(x_test, mu = 0, a = 1, b = 0.5) + r_cdf = np.array([8.034920282e-07, 2.512671945e-05, 3.186661051e-01, + 9.988650664e-01, 9.999848769e-01]) + x_test = np.array([-7, -5, 0, 8, 15]) + vals_cdf = stats.norminvgauss.cdf(x_test, a=1, b=0.5) + assert_allclose(vals_cdf, r_cdf, atol=1e-9) + + def test_pdf_R(self): + # values from R as defined in test_cdf_R + r_pdf = np.array([1.359600783e-06, 4.413878805e-05, 4.555014266e-01, + 7.450485342e-04, 8.917889931e-06]) + x_test = np.array([-7, -5, 0, 8, 15]) + vals_pdf = stats.norminvgauss.pdf(x_test, a=1, b=0.5) + assert_allclose(vals_pdf, r_pdf, atol=1e-9) + + def test_stats(self): + a, b = 1, 0.5 + gamma = np.sqrt(a**2 - b**2) + v_stats = (b / gamma, a**2 / gamma**3, 3.0 * b / (a * np.sqrt(gamma)), + 3.0 * (1 + 4 * b**2 / a**2) / gamma) + assert_equal(v_stats, stats.norminvgauss.stats(a, b, moments='mvsk')) + + def test_ppf(self): + a, b = 1, 0.5 + x_test = np.array([0.001, 0.5, 0.999]) + vals = stats.norminvgauss.ppf(x_test, a, b) + assert_allclose(x_test, stats.norminvgauss.cdf(vals, a, b)) + + +class TestGeom(object): + def setup_method(self): + np.random.seed(1234) + + def test_rvs(self): + vals = stats.geom.rvs(0.75, size=(2, 50)) + assert_(numpy.all(vals >= 0)) + assert_(numpy.shape(vals) == (2, 50)) + assert_(vals.dtype.char in typecodes['AllInteger']) + val = stats.geom.rvs(0.75) + assert_(isinstance(val, int)) + val = stats.geom(0.75).rvs(3) + assert_(isinstance(val, numpy.ndarray)) + assert_(val.dtype.char in typecodes['AllInteger']) + + def test_pmf(self): + vals = stats.geom.pmf([1, 2, 3], 0.5) + assert_array_almost_equal(vals, [0.5, 0.25, 0.125]) + + def test_logpmf(self): + # regression test for ticket 1793 + vals1 = np.log(stats.geom.pmf([1, 2, 3], 0.5)) + vals2 = stats.geom.logpmf([1, 2, 3], 0.5) + assert_allclose(vals1, vals2, rtol=1e-15, atol=0) + + # regression test for gh-4028 + val = stats.geom.logpmf(1, 1) + assert_equal(val, 0.0) + + def test_cdf_sf(self): + vals = stats.geom.cdf([1, 2, 3], 0.5) + vals_sf = stats.geom.sf([1, 2, 3], 0.5) + expected = array([0.5, 0.75, 0.875]) + assert_array_almost_equal(vals, expected) + assert_array_almost_equal(vals_sf, 1-expected) + + def test_logcdf_logsf(self): + vals = stats.geom.logcdf([1, 2, 3], 0.5) + vals_sf = stats.geom.logsf([1, 2, 3], 0.5) + expected = array([0.5, 0.75, 0.875]) + assert_array_almost_equal(vals, np.log(expected)) + assert_array_almost_equal(vals_sf, np.log1p(-expected)) + + def test_ppf(self): + vals = stats.geom.ppf([0.5, 0.75, 0.875], 0.5) + expected = array([1.0, 2.0, 3.0]) + assert_array_almost_equal(vals, expected) + + def test_ppf_underflow(self): + # this should not underflow + assert_allclose(stats.geom.ppf(1e-20, 1e-20), 1.0, atol=1e-14) + + +class TestPlanck(object): + def setup_method(self): + np.random.seed(1234) + + def test_sf(self): + vals = stats.planck.sf([1, 2, 3], 5.) + expected = array([4.5399929762484854e-05, + 3.0590232050182579e-07, + 2.0611536224385579e-09]) + assert_array_almost_equal(vals, expected) + + def test_logsf(self): + vals = stats.planck.logsf([1000., 2000., 3000.], 1000.) + expected = array([-1001000., -2001000., -3001000.]) + assert_array_almost_equal(vals, expected) + + +class TestGennorm(object): + def test_laplace(self): + # test against Laplace (special case for beta=1) + points = [1, 2, 3] + pdf1 = stats.gennorm.pdf(points, 1) + pdf2 = stats.laplace.pdf(points) + assert_almost_equal(pdf1, pdf2) + + def test_norm(self): + # test against normal (special case for beta=2) + points = [1, 2, 3] + pdf1 = stats.gennorm.pdf(points, 2) + pdf2 = stats.norm.pdf(points, scale=2**-.5) + assert_almost_equal(pdf1, pdf2) + + +class TestHalfgennorm(object): + def test_expon(self): + # test against exponential (special case for beta=1) + points = [1, 2, 3] + pdf1 = stats.halfgennorm.pdf(points, 1) + pdf2 = stats.expon.pdf(points) + assert_almost_equal(pdf1, pdf2) + + def test_halfnorm(self): + # test against half normal (special case for beta=2) + points = [1, 2, 3] + pdf1 = stats.halfgennorm.pdf(points, 2) + pdf2 = stats.halfnorm.pdf(points, scale=2**-.5) + assert_almost_equal(pdf1, pdf2) + + def test_gennorm(self): + # test against generalized normal + points = [1, 2, 3] + pdf1 = stats.halfgennorm.pdf(points, .497324) + pdf2 = stats.gennorm.pdf(points, .497324) + assert_almost_equal(pdf1, 2*pdf2) + + +class TestTruncnorm(object): + def setup_method(self): + np.random.seed(1234) + + def test_ppf_ticket1131(self): + vals = stats.truncnorm.ppf([-0.5, 0, 1e-4, 0.5, 1-1e-4, 1, 2], -1., 1., + loc=[3]*7, scale=2) + expected = np.array([np.nan, 1, 1.00056419, 3, 4.99943581, 5, np.nan]) + assert_array_almost_equal(vals, expected) + + def test_isf_ticket1131(self): + vals = stats.truncnorm.isf([-0.5, 0, 1e-4, 0.5, 1-1e-4, 1, 2], -1., 1., + loc=[3]*7, scale=2) + expected = np.array([np.nan, 5, 4.99943581, 3, 1.00056419, 1, np.nan]) + assert_array_almost_equal(vals, expected) + + def test_gh_2477_small_values(self): + # Check a case that worked in the original issue. + low, high = -11, -10 + x = stats.truncnorm.rvs(low, high, 0, 1, size=10) + assert_(low < x.min() < x.max() < high) + # Check a case that failed in the original issue. + low, high = 10, 11 + x = stats.truncnorm.rvs(low, high, 0, 1, size=10) + assert_(low < x.min() < x.max() < high) + + @pytest.mark.xfail(reason="truncnorm rvs is know to fail at extreme tails") + def test_gh_2477_large_values(self): + # Check a case that fails because of extreme tailness. + low, high = 100, 101 + with np.errstate(divide='ignore'): + x = stats.truncnorm.rvs(low, high, 0, 1, size=10) + assert_(low < x.min() < x.max() < high) + + def test_gh_1489_trac_962_rvs(self): + # Check the original example. + low, high = 10, 15 + x = stats.truncnorm.rvs(low, high, 0, 1, size=10) + assert_(low < x.min() < x.max() < high) + + +class TestHypergeom(object): + def setup_method(self): + np.random.seed(1234) + + def test_rvs(self): + vals = stats.hypergeom.rvs(20, 10, 3, size=(2, 50)) + assert_(numpy.all(vals >= 0) & + numpy.all(vals <= 3)) + assert_(numpy.shape(vals) == (2, 50)) + assert_(vals.dtype.char in typecodes['AllInteger']) + val = stats.hypergeom.rvs(20, 3, 10) + assert_(isinstance(val, int)) + val = stats.hypergeom(20, 3, 10).rvs(3) + assert_(isinstance(val, numpy.ndarray)) + assert_(val.dtype.char in typecodes['AllInteger']) + + def test_precision(self): + # comparison number from mpmath + M = 2500 + n = 50 + N = 500 + tot = M + good = n + hgpmf = stats.hypergeom.pmf(2, tot, good, N) + assert_almost_equal(hgpmf, 0.0010114963068932233, 11) + + def test_args(self): + # test correct output for corner cases of arguments + # see gh-2325 + assert_almost_equal(stats.hypergeom.pmf(0, 2, 1, 0), 1.0, 11) + assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11) + + assert_almost_equal(stats.hypergeom.pmf(0, 2, 0, 2), 1.0, 11) + assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11) + + def test_cdf_above_one(self): + # for some values of parameters, hypergeom cdf was >1, see gh-2238 + assert_(0 <= stats.hypergeom.cdf(30, 13397950, 4363, 12390) <= 1.0) + + def test_precision2(self): + # Test hypergeom precision for large numbers. See #1218. + # Results compared with those from R. + oranges = 9.9e4 + pears = 1.1e5 + fruits_eaten = np.array([3, 3.8, 3.9, 4, 4.1, 4.2, 5]) * 1e4 + quantile = 2e4 + res = [] + for eaten in fruits_eaten: + res.append(stats.hypergeom.sf(quantile, oranges + pears, oranges, + eaten)) + expected = np.array([0, 1.904153e-114, 2.752693e-66, 4.931217e-32, + 8.265601e-11, 0.1237904, 1]) + assert_allclose(res, expected, atol=0, rtol=5e-7) + + # Test with array_like first argument + quantiles = [1.9e4, 2e4, 2.1e4, 2.15e4] + res2 = stats.hypergeom.sf(quantiles, oranges + pears, oranges, 4.2e4) + expected2 = [1, 0.1237904, 6.511452e-34, 3.277667e-69] + assert_allclose(res2, expected2, atol=0, rtol=5e-7) + + def test_entropy(self): + # Simple tests of entropy. + hg = stats.hypergeom(4, 1, 1) + h = hg.entropy() + expected_p = np.array([0.75, 0.25]) + expected_h = -np.sum(xlogy(expected_p, expected_p)) + assert_allclose(h, expected_h) + + hg = stats.hypergeom(1, 1, 1) + h = hg.entropy() + assert_equal(h, 0.0) + + def test_logsf(self): + # Test logsf for very large numbers. See issue #4982 + # Results compare with those from R (v3.2.0): + # phyper(k, n, M-n, N, lower.tail=FALSE, log.p=TRUE) + # -2239.771 + + k = 1e4 + M = 1e7 + n = 1e6 + N = 5e4 + + result = stats.hypergeom.logsf(k, M, n, N) + exspected = -2239.771 # From R + assert_almost_equal(result, exspected, decimal=3) + + +class TestLoggamma(object): + + def test_stats(self): + # The following precomputed values are from the table in section 2.2 + # of "A Statistical Study of Log-Gamma Distribution", by Ping Shing + # Chan (thesis, McMaster University, 1993). + table = np.array([ + # c, mean, var, skew, exc. kurt. + 0.5, -1.9635, 4.9348, -1.5351, 4.0000, + 1.0, -0.5772, 1.6449, -1.1395, 2.4000, + 12.0, 2.4427, 0.0869, -0.2946, 0.1735, + ]).reshape(-1, 5) + for c, mean, var, skew, kurt in table: + computed = stats.loggamma.stats(c, moments='msvk') + assert_array_almost_equal(computed, [mean, var, skew, kurt], + decimal=4) + + +class TestLogistic(object): + # gh-6226 + def test_cdf_ppf(self): + x = np.linspace(-20, 20) + y = stats.logistic.cdf(x) + xx = stats.logistic.ppf(y) + assert_allclose(x, xx) + + def test_sf_isf(self): + x = np.linspace(-20, 20) + y = stats.logistic.sf(x) + xx = stats.logistic.isf(y) + assert_allclose(x, xx) + + def test_extreme_values(self): + # p is chosen so that 1 - (1 - p) == p in double precision + p = 9.992007221626409e-16 + desired = 34.53957599234088 + assert_allclose(stats.logistic.ppf(1 - p), desired) + assert_allclose(stats.logistic.isf(p), desired) + + +class TestLogser(object): + def setup_method(self): + np.random.seed(1234) + + def test_rvs(self): + vals = stats.logser.rvs(0.75, size=(2, 50)) + assert_(numpy.all(vals >= 1)) + assert_(numpy.shape(vals) == (2, 50)) + assert_(vals.dtype.char in typecodes['AllInteger']) + val = stats.logser.rvs(0.75) + assert_(isinstance(val, int)) + val = stats.logser(0.75).rvs(3) + assert_(isinstance(val, numpy.ndarray)) + assert_(val.dtype.char in typecodes['AllInteger']) + + def test_pmf_small_p(self): + m = stats.logser.pmf(4, 1e-20) + # The expected value was computed using mpmath: + # >>> import mpmath + # >>> mpmath.mp.dps = 64 + # >>> k = 4 + # >>> p = mpmath.mpf('1e-20') + # >>> float(-(p**k)/k/mpmath.log(1-p)) + # 2.5e-61 + # It is also clear from noticing that for very small p, + # log(1-p) is approximately -p, and the formula becomes + # p**(k-1) / k + assert_allclose(m, 2.5e-61) + + def test_mean_small_p(self): + m = stats.logser.mean(1e-8) + # The expected mean was computed using mpmath: + # >>> import mpmath + # >>> mpmath.dps = 60 + # >>> p = mpmath.mpf('1e-8') + # >>> float(-p / ((1 - p)*mpmath.log(1 - p))) + # 1.000000005 + assert_allclose(m, 1.000000005) + + +class TestPareto(object): + def test_stats(self): + # Check the stats() method with some simple values. Also check + # that the calculations do not trigger RuntimeWarnings. + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + + m, v, s, k = stats.pareto.stats(0.5, moments='mvsk') + assert_equal(m, np.inf) + assert_equal(v, np.inf) + assert_equal(s, np.nan) + assert_equal(k, np.nan) + + m, v, s, k = stats.pareto.stats(1.0, moments='mvsk') + assert_equal(m, np.inf) + assert_equal(v, np.inf) + assert_equal(s, np.nan) + assert_equal(k, np.nan) + + m, v, s, k = stats.pareto.stats(1.5, moments='mvsk') + assert_equal(m, 3.0) + assert_equal(v, np.inf) + assert_equal(s, np.nan) + assert_equal(k, np.nan) + + m, v, s, k = stats.pareto.stats(2.0, moments='mvsk') + assert_equal(m, 2.0) + assert_equal(v, np.inf) + assert_equal(s, np.nan) + assert_equal(k, np.nan) + + m, v, s, k = stats.pareto.stats(2.5, moments='mvsk') + assert_allclose(m, 2.5 / 1.5) + assert_allclose(v, 2.5 / (1.5*1.5*0.5)) + assert_equal(s, np.nan) + assert_equal(k, np.nan) + + m, v, s, k = stats.pareto.stats(3.0, moments='mvsk') + assert_allclose(m, 1.5) + assert_allclose(v, 0.75) + assert_equal(s, np.nan) + assert_equal(k, np.nan) + + m, v, s, k = stats.pareto.stats(3.5, moments='mvsk') + assert_allclose(m, 3.5 / 2.5) + assert_allclose(v, 3.5 / (2.5*2.5*1.5)) + assert_allclose(s, (2*4.5/0.5)*np.sqrt(1.5/3.5)) + assert_equal(k, np.nan) + + m, v, s, k = stats.pareto.stats(4.0, moments='mvsk') + assert_allclose(m, 4.0 / 3.0) + assert_allclose(v, 4.0 / 18.0) + assert_allclose(s, 2*(1+4.0)/(4.0-3) * np.sqrt((4.0-2)/4.0)) + assert_equal(k, np.nan) + + m, v, s, k = stats.pareto.stats(4.5, moments='mvsk') + assert_allclose(m, 4.5 / 3.5) + assert_allclose(v, 4.5 / (3.5*3.5*2.5)) + assert_allclose(s, (2*5.5/1.5) * np.sqrt(2.5/4.5)) + assert_allclose(k, 6*(4.5**3 + 4.5**2 - 6*4.5 - 2)/(4.5*1.5*0.5)) + + def test_sf(self): + x = 1e9 + b = 2 + scale = 1.5 + p = stats.pareto.sf(x, b, loc=0, scale=scale) + expected = (scale/x)**b # 2.25e-18 + assert_allclose(p, expected) + + +class TestGenpareto(object): + def test_ab(self): + # c >= 0: a, b = [0, inf] + for c in [1., 0.]: + c = np.asarray(c) + stats.genpareto._argcheck(c) # ugh + assert_equal(stats.genpareto.a, 0.) + assert_(np.isposinf(stats.genpareto.b)) + + # c < 0: a=0, b=1/|c| + c = np.asarray(-2.) + stats.genpareto._argcheck(c) + assert_allclose([stats.genpareto.a, stats.genpareto.b], [0., 0.5]) + + def test_c0(self): + # with c=0, genpareto reduces to the exponential distribution + rv = stats.genpareto(c=0.) + x = np.linspace(0, 10., 30) + assert_allclose(rv.pdf(x), stats.expon.pdf(x)) + assert_allclose(rv.cdf(x), stats.expon.cdf(x)) + assert_allclose(rv.sf(x), stats.expon.sf(x)) + + q = np.linspace(0., 1., 10) + assert_allclose(rv.ppf(q), stats.expon.ppf(q)) + + def test_cm1(self): + # with c=-1, genpareto reduces to the uniform distr on [0, 1] + rv = stats.genpareto(c=-1.) + x = np.linspace(0, 10., 30) + assert_allclose(rv.pdf(x), stats.uniform.pdf(x)) + assert_allclose(rv.cdf(x), stats.uniform.cdf(x)) + assert_allclose(rv.sf(x), stats.uniform.sf(x)) + + q = np.linspace(0., 1., 10) + assert_allclose(rv.ppf(q), stats.uniform.ppf(q)) + + # logpdf(1., c=-1) should be zero + assert_allclose(rv.logpdf(1), 0) + + def test_x_inf(self): + # make sure x=inf is handled gracefully + rv = stats.genpareto(c=0.1) + assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.]) + assert_(np.isneginf(rv.logpdf(np.inf))) + + rv = stats.genpareto(c=0.) + assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.]) + assert_(np.isneginf(rv.logpdf(np.inf))) + + rv = stats.genpareto(c=-1.) + assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.]) + assert_(np.isneginf(rv.logpdf(np.inf))) + + def test_c_continuity(self): + # pdf is continuous at c=0, -1 + x = np.linspace(0, 10, 30) + for c in [0, -1]: + pdf0 = stats.genpareto.pdf(x, c) + for dc in [1e-14, -1e-14]: + pdfc = stats.genpareto.pdf(x, c + dc) + assert_allclose(pdf0, pdfc, atol=1e-12) + + cdf0 = stats.genpareto.cdf(x, c) + for dc in [1e-14, 1e-14]: + cdfc = stats.genpareto.cdf(x, c + dc) + assert_allclose(cdf0, cdfc, atol=1e-12) + + def test_c_continuity_ppf(self): + q = np.r_[np.logspace(1e-12, 0.01, base=0.1), + np.linspace(0.01, 1, 30, endpoint=False), + 1. - np.logspace(1e-12, 0.01, base=0.1)] + for c in [0., -1.]: + ppf0 = stats.genpareto.ppf(q, c) + for dc in [1e-14, -1e-14]: + ppfc = stats.genpareto.ppf(q, c + dc) + assert_allclose(ppf0, ppfc, atol=1e-12) + + def test_c_continuity_isf(self): + q = np.r_[np.logspace(1e-12, 0.01, base=0.1), + np.linspace(0.01, 1, 30, endpoint=False), + 1. - np.logspace(1e-12, 0.01, base=0.1)] + for c in [0., -1.]: + isf0 = stats.genpareto.isf(q, c) + for dc in [1e-14, -1e-14]: + isfc = stats.genpareto.isf(q, c + dc) + assert_allclose(isf0, isfc, atol=1e-12) + + def test_cdf_ppf_roundtrip(self): + # this should pass with machine precision. hat tip @pbrod + q = np.r_[np.logspace(1e-12, 0.01, base=0.1), + np.linspace(0.01, 1, 30, endpoint=False), + 1. - np.logspace(1e-12, 0.01, base=0.1)] + for c in [1e-8, -1e-18, 1e-15, -1e-15]: + assert_allclose(stats.genpareto.cdf(stats.genpareto.ppf(q, c), c), + q, atol=1e-15) + + def test_logsf(self): + logp = stats.genpareto.logsf(1e10, .01, 0, 1) + assert_allclose(logp, -1842.0680753952365) + + +class TestPearson3(object): + def setup_method(self): + np.random.seed(1234) + + def test_rvs(self): + vals = stats.pearson3.rvs(0.1, size=(2, 50)) + assert_(numpy.shape(vals) == (2, 50)) + assert_(vals.dtype.char in typecodes['AllFloat']) + val = stats.pearson3.rvs(0.5) + assert_(isinstance(val, float)) + val = stats.pearson3(0.5).rvs(3) + assert_(isinstance(val, numpy.ndarray)) + assert_(val.dtype.char in typecodes['AllFloat']) + assert_(len(val) == 3) + + def test_pdf(self): + vals = stats.pearson3.pdf(2, [0.0, 0.1, 0.2]) + assert_allclose(vals, np.array([0.05399097, 0.05555481, 0.05670246]), + atol=1e-6) + vals = stats.pearson3.pdf(-3, 0.1) + assert_allclose(vals, np.array([0.00313791]), atol=1e-6) + vals = stats.pearson3.pdf([-3, -2, -1, 0, 1], 0.1) + assert_allclose(vals, np.array([0.00313791, 0.05192304, 0.25028092, + 0.39885918, 0.23413173]), atol=1e-6) + + def test_cdf(self): + vals = stats.pearson3.cdf(2, [0.0, 0.1, 0.2]) + assert_allclose(vals, np.array([0.97724987, 0.97462004, 0.97213626]), + atol=1e-6) + vals = stats.pearson3.cdf(-3, 0.1) + assert_allclose(vals, [0.00082256], atol=1e-6) + vals = stats.pearson3.cdf([-3, -2, -1, 0, 1], 0.1) + assert_allclose(vals, [8.22563821e-04, 1.99860448e-02, 1.58550710e-01, + 5.06649130e-01, 8.41442111e-01], atol=1e-6) + + +class TestKappa4(object): + def test_cdf_genpareto(self): + # h = 1 and k != 0 is generalized Pareto + x = [0.0, 0.1, 0.2, 0.5] + h = 1.0 + for k in [-1.9, -1.0, -0.5, -0.2, -0.1, 0.1, 0.2, 0.5, 1.0, + 1.9]: + vals = stats.kappa4.cdf(x, h, k) + # shape parameter is opposite what is expected + vals_comp = stats.genpareto.cdf(x, -k) + assert_allclose(vals, vals_comp) + + def test_cdf_genextreme(self): + # h = 0 and k != 0 is generalized extreme value + x = np.linspace(-5, 5, 10) + h = 0.0 + k = np.linspace(-3, 3, 10) + vals = stats.kappa4.cdf(x, h, k) + vals_comp = stats.genextreme.cdf(x, k) + assert_allclose(vals, vals_comp) + + def test_cdf_expon(self): + # h = 1 and k = 0 is exponential + x = np.linspace(0, 10, 10) + h = 1.0 + k = 0.0 + vals = stats.kappa4.cdf(x, h, k) + vals_comp = stats.expon.cdf(x) + assert_allclose(vals, vals_comp) + + def test_cdf_gumbel_r(self): + # h = 0 and k = 0 is gumbel_r + x = np.linspace(-5, 5, 10) + h = 0.0 + k = 0.0 + vals = stats.kappa4.cdf(x, h, k) + vals_comp = stats.gumbel_r.cdf(x) + assert_allclose(vals, vals_comp) + + def test_cdf_logistic(self): + # h = -1 and k = 0 is logistic + x = np.linspace(-5, 5, 10) + h = -1.0 + k = 0.0 + vals = stats.kappa4.cdf(x, h, k) + vals_comp = stats.logistic.cdf(x) + assert_allclose(vals, vals_comp) + + def test_cdf_uniform(self): + # h = 1 and k = 1 is uniform + x = np.linspace(-5, 5, 10) + h = 1.0 + k = 1.0 + vals = stats.kappa4.cdf(x, h, k) + vals_comp = stats.uniform.cdf(x) + assert_allclose(vals, vals_comp) + + def test_integers_ctor(self): + # regression test for gh-7416: _argcheck fails for integer h and k + # in numpy 1.12 + stats.kappa4(1, 2) + + +class TestPoisson(object): + def setup_method(self): + np.random.seed(1234) + + def test_pmf_basic(self): + # Basic case + ln2 = np.log(2) + vals = stats.poisson.pmf([0, 1, 2], ln2) + expected = [0.5, ln2/2, ln2**2/4] + assert_allclose(vals, expected) + + def test_mu0(self): + # Edge case: mu=0 + vals = stats.poisson.pmf([0, 1, 2], 0) + expected = [1, 0, 0] + assert_array_equal(vals, expected) + + interval = stats.poisson.interval(0.95, 0) + assert_equal(interval, (0, 0)) + + def test_rvs(self): + vals = stats.poisson.rvs(0.5, size=(2, 50)) + assert_(numpy.all(vals >= 0)) + assert_(numpy.shape(vals) == (2, 50)) + assert_(vals.dtype.char in typecodes['AllInteger']) + val = stats.poisson.rvs(0.5) + assert_(isinstance(val, int)) + val = stats.poisson(0.5).rvs(3) + assert_(isinstance(val, numpy.ndarray)) + assert_(val.dtype.char in typecodes['AllInteger']) + + def test_stats(self): + mu = 16.0 + result = stats.poisson.stats(mu, moments='mvsk') + assert_allclose(result, [mu, mu, np.sqrt(1.0/mu), 1.0/mu]) + + mu = np.array([0.0, 1.0, 2.0]) + result = stats.poisson.stats(mu, moments='mvsk') + expected = (mu, mu, [np.inf, 1, 1/np.sqrt(2)], [np.inf, 1, 0.5]) + assert_allclose(result, expected) + + +class TestZipf(object): + def setup_method(self): + np.random.seed(1234) + + def test_rvs(self): + vals = stats.zipf.rvs(1.5, size=(2, 50)) + assert_(numpy.all(vals >= 1)) + assert_(numpy.shape(vals) == (2, 50)) + assert_(vals.dtype.char in typecodes['AllInteger']) + val = stats.zipf.rvs(1.5) + assert_(isinstance(val, int)) + val = stats.zipf(1.5).rvs(3) + assert_(isinstance(val, numpy.ndarray)) + assert_(val.dtype.char in typecodes['AllInteger']) + + def test_moments(self): + # n-th moment is finite iff a > n + 1 + m, v = stats.zipf.stats(a=2.8) + assert_(np.isfinite(m)) + assert_equal(v, np.inf) + + s, k = stats.zipf.stats(a=4.8, moments='sk') + assert_(not np.isfinite([s, k]).all()) + + +class TestDLaplace(object): + def setup_method(self): + np.random.seed(1234) + + def test_rvs(self): + vals = stats.dlaplace.rvs(1.5, size=(2, 50)) + assert_(numpy.shape(vals) == (2, 50)) + assert_(vals.dtype.char in typecodes['AllInteger']) + val = stats.dlaplace.rvs(1.5) + assert_(isinstance(val, int)) + val = stats.dlaplace(1.5).rvs(3) + assert_(isinstance(val, numpy.ndarray)) + assert_(val.dtype.char in typecodes['AllInteger']) + assert_(stats.dlaplace.rvs(0.8) is not None) + + def test_stats(self): + # compare the explicit formulas w/ direct summation using pmf + a = 1. + dl = stats.dlaplace(a) + m, v, s, k = dl.stats('mvsk') + + N = 37 + xx = np.arange(-N, N+1) + pp = dl.pmf(xx) + m2, m4 = np.sum(pp*xx**2), np.sum(pp*xx**4) + assert_equal((m, s), (0, 0)) + assert_allclose((v, k), (m2, m4/m2**2 - 3.), atol=1e-14, rtol=1e-8) + + def test_stats2(self): + a = np.log(2.) + dl = stats.dlaplace(a) + m, v, s, k = dl.stats('mvsk') + assert_equal((m, s), (0., 0.)) + assert_allclose((v, k), (4., 3.25)) + + +class TestInvGamma(object): + def test_invgamma_inf_gh_1866(self): + # invgamma's moments are only finite for a>n + # specific numbers checked w/ boost 1.54 + with warnings.catch_warnings(): + warnings.simplefilter('error', RuntimeWarning) + mvsk = stats.invgamma.stats(a=19.31, moments='mvsk') + expected = [0.05461496450, 0.0001723162534, 1.020362676, + 2.055616582] + assert_allclose(mvsk, expected) + + a = [1.1, 3.1, 5.6] + mvsk = stats.invgamma.stats(a=a, moments='mvsk') + expected = ([10., 0.476190476, 0.2173913043], # mmm + [np.inf, 0.2061430632, 0.01312749422], # vvv + [np.nan, 41.95235392, 2.919025532], # sss + [np.nan, np.nan, 24.51923076]) # kkk + for x, y in zip(mvsk, expected): + assert_almost_equal(x, y) + + def test_cdf_ppf(self): + # gh-6245 + x = np.logspace(-2.6, 0) + y = stats.invgamma.cdf(x, 1) + xx = stats.invgamma.ppf(y, 1) + assert_allclose(x, xx) + + def test_sf_isf(self): + # gh-6245 + if sys.maxsize > 2**32: + x = np.logspace(2, 100) + else: + # Invgamme roundtrip on 32-bit systems has relative accuracy + # ~1e-15 until x=1e+15, and becomes inf above x=1e+18 + x = np.logspace(2, 18) + + y = stats.invgamma.sf(x, 1) + xx = stats.invgamma.isf(y, 1) + assert_allclose(x, xx, rtol=1.0) + + +class TestF(object): + def test_f_moments(self): + # n-th moment of F distributions is only finite for n < dfd / 2 + m, v, s, k = stats.f.stats(11, 6.5, moments='mvsk') + assert_(np.isfinite(m)) + assert_(np.isfinite(v)) + assert_(np.isfinite(s)) + assert_(not np.isfinite(k)) + + def test_moments_warnings(self): + # no warnings should be generated for dfd = 2, 4, 6, 8 (div by zero) + with warnings.catch_warnings(): + warnings.simplefilter('error', RuntimeWarning) + stats.f.stats(dfn=[11]*4, dfd=[2, 4, 6, 8], moments='mvsk') + + @pytest.mark.xfail(reason='f stats does not properly broadcast') + def test_stats_broadcast(self): + # stats do not fully broadcast just yet + mv = stats.f.stats(dfn=11, dfd=[11, 12]) + + +def test_rvgeneric_std(): + # Regression test for #1191 + assert_array_almost_equal(stats.t.std([5, 6]), [1.29099445, 1.22474487]) + + +def test_moments_t(): + # regression test for #8786 + assert_equal(stats.t.stats(df=1, moments='mvsk'), + (np.inf, np.nan, np.nan, np.nan)) + assert_equal(stats.t.stats(df=1.01, moments='mvsk'), + (0.0, np.inf, np.nan, np.nan)) + assert_equal(stats.t.stats(df=2, moments='mvsk'), + (0.0, np.inf, np.nan, np.nan)) + assert_equal(stats.t.stats(df=2.01, moments='mvsk'), + (0.0, 2.01/(2.01-2.0), np.nan, np.inf)) + assert_equal(stats.t.stats(df=3, moments='sk'), (np.nan, np.inf)) + assert_equal(stats.t.stats(df=3.01, moments='sk'), (0.0, np.inf)) + assert_equal(stats.t.stats(df=4, moments='sk'), (0.0, np.inf)) + assert_equal(stats.t.stats(df=4.01, moments='sk'), (0.0, 6.0/(4.01 - 4.0))) + + +class TestRvDiscrete(object): + def setup_method(self): + np.random.seed(1234) + + def test_rvs(self): + states = [-1, 0, 1, 2, 3, 4] + probability = [0.0, 0.3, 0.4, 0.0, 0.3, 0.0] + samples = 1000 + r = stats.rv_discrete(name='sample', values=(states, probability)) + x = r.rvs(size=samples) + assert_(isinstance(x, numpy.ndarray)) + + for s, p in zip(states, probability): + assert_(abs(sum(x == s)/float(samples) - p) < 0.05) + + x = r.rvs() + assert_(isinstance(x, int)) + + def test_entropy(self): + # Basic tests of entropy. + pvals = np.array([0.25, 0.45, 0.3]) + p = stats.rv_discrete(values=([0, 1, 2], pvals)) + expected_h = -sum(xlogy(pvals, pvals)) + h = p.entropy() + assert_allclose(h, expected_h) + + p = stats.rv_discrete(values=([0, 1, 2], [1.0, 0, 0])) + h = p.entropy() + assert_equal(h, 0.0) + + def test_pmf(self): + xk = [1, 2, 4] + pk = [0.5, 0.3, 0.2] + rv = stats.rv_discrete(values=(xk, pk)) + + x = [[1., 4.], + [3., 2]] + assert_allclose(rv.pmf(x), + [[0.5, 0.2], + [0., 0.3]], atol=1e-14) + + def test_cdf(self): + xk = [1, 2, 4] + pk = [0.5, 0.3, 0.2] + rv = stats.rv_discrete(values=(xk, pk)) + + x_values = [-2, 1., 1.1, 1.5, 2.0, 3.0, 4, 5] + expected = [0, 0.5, 0.5, 0.5, 0.8, 0.8, 1, 1] + assert_allclose(rv.cdf(x_values), expected, atol=1e-14) + + # also check scalar arguments + assert_allclose([rv.cdf(xx) for xx in x_values], + expected, atol=1e-14) + + def test_ppf(self): + xk = [1, 2, 4] + pk = [0.5, 0.3, 0.2] + rv = stats.rv_discrete(values=(xk, pk)) + + q_values = [0.1, 0.5, 0.6, 0.8, 0.9, 1.] + expected = [1, 1, 2, 2, 4, 4] + assert_allclose(rv.ppf(q_values), expected, atol=1e-14) + + # also check scalar arguments + assert_allclose([rv.ppf(q) for q in q_values], + expected, atol=1e-14) + + def test_cdf_ppf_next(self): + # copied and special cased from test_discrete_basic + vals = ([1, 2, 4, 7, 8], [0.1, 0.2, 0.3, 0.3, 0.1]) + rv = stats.rv_discrete(values=vals) + + assert_array_equal(rv.ppf(rv.cdf(rv.xk[:-1]) + 1e-8), + rv.xk[1:]) + + def test_expect(self): + xk = [1, 2, 4, 6, 7, 11] + pk = [0.1, 0.2, 0.2, 0.2, 0.2, 0.1] + rv = stats.rv_discrete(values=(xk, pk)) + + assert_allclose(rv.expect(), np.sum(rv.xk * rv.pk), atol=1e-14) + + def test_bad_input(self): + xk = [1, 2, 3] + pk = [0.5, 0.5] + assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk))) + + pk = [1, 2, 3] + assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk))) + + +class TestSkewNorm(object): + def setup_method(self): + np.random.seed(1234) + + def test_normal(self): + # When the skewness is 0 the distribution is normal + x = np.linspace(-5, 5, 100) + assert_array_almost_equal(stats.skewnorm.pdf(x, a=0), + stats.norm.pdf(x)) + + def test_rvs(self): + shape = (3, 4, 5) + x = stats.skewnorm.rvs(a=0.75, size=shape) + assert_equal(shape, x.shape) + + x = stats.skewnorm.rvs(a=-3, size=shape) + assert_equal(shape, x.shape) + + def test_moments(self): + X = stats.skewnorm.rvs(a=4, size=int(1e6), loc=5, scale=2) + expected = [np.mean(X), np.var(X), stats.skew(X), stats.kurtosis(X)] + computed = stats.skewnorm.stats(a=4, loc=5, scale=2, moments='mvsk') + assert_array_almost_equal(computed, expected, decimal=2) + + X = stats.skewnorm.rvs(a=-4, size=int(1e6), loc=5, scale=2) + expected = [np.mean(X), np.var(X), stats.skew(X), stats.kurtosis(X)] + computed = stats.skewnorm.stats(a=-4, loc=5, scale=2, moments='mvsk') + assert_array_almost_equal(computed, expected, decimal=2) + + def test_cdf_large_x(self): + # Regression test for gh-7746. + # The x values are large enough that the closest 64 bit floating + # point representation of the exact CDF is 1.0. + p = stats.skewnorm.cdf([10, 20, 30], -1) + assert_allclose(p, np.ones(3), rtol=1e-14) + p = stats.skewnorm.cdf(25, 2.5) + assert_allclose(p, 1.0, rtol=1e-14) + + def test_cdf_sf_small_values(self): + # Triples are [x, a, cdf(x, a)]. These values were computed + # using CDF[SkewNormDistribution[0, 1, a], x] in Wolfram Alpha. + cdfvals = [ + [-8, 1, 3.870035046664392611e-31], + [-4, 2, 8.1298399188811398e-21], + [-2, 5, 1.55326826787106273e-26], + [-9, -1, 2.257176811907681295e-19], + [-10, -4, 1.523970604832105213e-23], + ] + for x, a, cdfval in cdfvals: + p = stats.skewnorm.cdf(x, a) + assert_allclose(p, cdfval, rtol=1e-8) + # For the skew normal distribution, sf(-x, -a) = cdf(x, a). + p = stats.skewnorm.sf(-x, -a) + assert_allclose(p, cdfval, rtol=1e-8) + + +class TestExpon(object): + def test_zero(self): + assert_equal(stats.expon.pdf(0), 1) + + def test_tail(self): # Regression test for ticket 807 + assert_equal(stats.expon.cdf(1e-18), 1e-18) + assert_equal(stats.expon.isf(stats.expon.sf(40)), 40) + + +class TestExponNorm(object): + def test_moments(self): + # Some moment test cases based on non-loc/scaled formula + def get_moms(lam, sig, mu): + # See wikipedia for these formulae + # where it is listed as an exponentially modified gaussian + opK2 = 1.0 + 1 / (lam*sig)**2 + exp_skew = 2 / (lam * sig)**3 * opK2**(-1.5) + exp_kurt = 6.0 * (1 + (lam * sig)**2)**(-2) + return [mu + 1/lam, sig*sig + 1.0/(lam*lam), exp_skew, exp_kurt] + + mu, sig, lam = 0, 1, 1 + K = 1.0 / (lam * sig) + sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk') + assert_almost_equal(sts, get_moms(lam, sig, mu)) + mu, sig, lam = -3, 2, 0.1 + K = 1.0 / (lam * sig) + sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk') + assert_almost_equal(sts, get_moms(lam, sig, mu)) + mu, sig, lam = 0, 3, 1 + K = 1.0 / (lam * sig) + sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk') + assert_almost_equal(sts, get_moms(lam, sig, mu)) + mu, sig, lam = -5, 11, 3.5 + K = 1.0 / (lam * sig) + sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk') + assert_almost_equal(sts, get_moms(lam, sig, mu)) + + def test_extremes_x(self): + # Test for extreme values against overflows + assert_almost_equal(stats.exponnorm.pdf(-900, 1), 0.0) + assert_almost_equal(stats.exponnorm.pdf(+900, 1), 0.0) + + +class TestGenExpon(object): + def test_pdf_unity_area(self): + from scipy.integrate import simps + # PDF should integrate to one + p = stats.genexpon.pdf(numpy.arange(0, 10, 0.01), 0.5, 0.5, 2.0) + assert_almost_equal(simps(p, dx=0.01), 1, 1) + + def test_cdf_bounds(self): + # CDF should always be positive + cdf = stats.genexpon.cdf(numpy.arange(0, 10, 0.01), 0.5, 0.5, 2.0) + assert_(numpy.all((0 <= cdf) & (cdf <= 1))) + + +class TestExponpow(object): + def test_tail(self): + assert_almost_equal(stats.exponpow.cdf(1e-10, 2.), 1e-20) + assert_almost_equal(stats.exponpow.isf(stats.exponpow.sf(5, .8), .8), + 5) + + +class TestSkellam(object): + def test_pmf(self): + # comparison to R + k = numpy.arange(-10, 15) + mu1, mu2 = 10, 5 + skpmfR = numpy.array( + [4.2254582961926893e-005, 1.1404838449648488e-004, + 2.8979625801752660e-004, 6.9177078182101231e-004, + 1.5480716105844708e-003, 3.2412274963433889e-003, + 6.3373707175123292e-003, 1.1552351566696643e-002, + 1.9606152375042644e-002, 3.0947164083410337e-002, + 4.5401737566767360e-002, 6.1894328166820688e-002, + 7.8424609500170578e-002, 9.2418812533573133e-002, + 1.0139793148019728e-001, 1.0371927988298846e-001, + 9.9076583077406091e-002, 8.8546660073089561e-002, + 7.4187842052486810e-002, 5.8392772862200251e-002, + 4.3268692953013159e-002, 3.0248159818374226e-002, + 1.9991434305603021e-002, 1.2516877303301180e-002, + 7.4389876226229707e-003]) + + assert_almost_equal(stats.skellam.pmf(k, mu1, mu2), skpmfR, decimal=15) + + def test_cdf(self): + # comparison to R, only 5 decimals + k = numpy.arange(-10, 15) + mu1, mu2 = 10, 5 + skcdfR = numpy.array( + [6.4061475386192104e-005, 1.7810985988267694e-004, + 4.6790611790020336e-004, 1.1596768997212152e-003, + 2.7077485103056847e-003, 5.9489760066490718e-003, + 1.2286346724161398e-002, 2.3838698290858034e-002, + 4.3444850665900668e-002, 7.4392014749310995e-002, + 1.1979375231607835e-001, 1.8168808048289900e-001, + 2.6011268998306952e-001, 3.5253150251664261e-001, + 4.5392943399683988e-001, 5.5764871387982828e-001, + 6.5672529695723436e-001, 7.4527195703032389e-001, + 8.1945979908281064e-001, 8.7785257194501087e-001, + 9.2112126489802404e-001, 9.5136942471639818e-001, + 9.7136085902200120e-001, 9.8387773632530240e-001, + 9.9131672394792536e-001]) + + assert_almost_equal(stats.skellam.cdf(k, mu1, mu2), skcdfR, decimal=5) + + +class TestLognorm(object): + def test_pdf(self): + # Regression test for Ticket #1471: avoid nan with 0/0 situation + # Also make sure there are no warnings at x=0, cf gh-5202 + with warnings.catch_warnings(): + warnings.simplefilter('error', RuntimeWarning) + pdf = stats.lognorm.pdf([0, 0.5, 1], 1) + assert_array_almost_equal(pdf, [0.0, 0.62749608, 0.39894228]) + + def test_logcdf(self): + # Regression test for gh-5940: sf et al would underflow too early + x2, mu, sigma = 201.68, 195, 0.149 + assert_allclose(stats.lognorm.sf(x2-mu, s=sigma), + stats.norm.sf(np.log(x2-mu)/sigma)) + assert_allclose(stats.lognorm.logsf(x2-mu, s=sigma), + stats.norm.logsf(np.log(x2-mu)/sigma)) + + +class TestBeta(object): + def test_logpdf(self): + # Regression test for Ticket #1326: avoid nan with 0*log(0) situation + logpdf = stats.beta.logpdf(0, 1, 0.5) + assert_almost_equal(logpdf, -0.69314718056) + logpdf = stats.beta.logpdf(0, 0.5, 1) + assert_almost_equal(logpdf, np.inf) + + def test_logpdf_ticket_1866(self): + alpha, beta = 267, 1472 + x = np.array([0.2, 0.5, 0.6]) + b = stats.beta(alpha, beta) + assert_allclose(b.logpdf(x).sum(), -1201.699061824062) + assert_allclose(b.pdf(x), np.exp(b.logpdf(x))) + + +class TestBetaPrime(object): + def test_logpdf(self): + alpha, beta = 267, 1472 + x = np.array([0.2, 0.5, 0.6]) + b = stats.betaprime(alpha, beta) + assert_(np.isfinite(b.logpdf(x)).all()) + assert_allclose(b.pdf(x), np.exp(b.logpdf(x))) + + def test_cdf(self): + # regression test for gh-4030: Implementation of + # scipy.stats.betaprime.cdf() + x = stats.betaprime.cdf(0, 0.2, 0.3) + assert_equal(x, 0.0) + + alpha, beta = 267, 1472 + x = np.array([0.2, 0.5, 0.6]) + cdfs = stats.betaprime.cdf(x, alpha, beta) + assert_(np.isfinite(cdfs).all()) + + # check the new cdf implementation vs generic one: + gen_cdf = stats.rv_continuous._cdf_single + cdfs_g = [gen_cdf(stats.betaprime, val, alpha, beta) for val in x] + assert_allclose(cdfs, cdfs_g, atol=0, rtol=2e-12) + + +class TestGamma(object): + def test_pdf(self): + # a few test cases to compare with R + pdf = stats.gamma.pdf(90, 394, scale=1./5) + assert_almost_equal(pdf, 0.002312341) + + pdf = stats.gamma.pdf(3, 10, scale=1./5) + assert_almost_equal(pdf, 0.1620358) + + def test_logpdf(self): + # Regression test for Ticket #1326: cornercase avoid nan with 0*log(0) + # situation + logpdf = stats.gamma.logpdf(0, 1) + assert_almost_equal(logpdf, 0) + + +class TestChi2(object): + # regression tests after precision improvements, ticket:1041, not verified + def test_precision(self): + assert_almost_equal(stats.chi2.pdf(1000, 1000), 8.919133934753128e-003, + decimal=14) + assert_almost_equal(stats.chi2.pdf(100, 100), 0.028162503162596778, + decimal=14) + + +class TestGumbelL(object): + # gh-6228 + def test_cdf_ppf(self): + x = np.linspace(-100, -4) + y = stats.gumbel_l.cdf(x) + xx = stats.gumbel_l.ppf(y) + assert_allclose(x, xx) + + def test_logcdf_logsf(self): + x = np.linspace(-100, -4) + y = stats.gumbel_l.logcdf(x) + z = stats.gumbel_l.logsf(x) + u = np.exp(y) + v = -special.expm1(z) + assert_allclose(u, v) + + def test_sf_isf(self): + x = np.linspace(-20, 5) + y = stats.gumbel_l.sf(x) + xx = stats.gumbel_l.isf(y) + assert_allclose(x, xx) + +class TestLevyStable(object): + + def test_fit(self): + # construct data to have percentiles that match + # example in McCulloch 1986. + x = [-.05413,-.05413, + 0.,0.,0.,0., + .00533,.00533,.00533,.00533,.00533, + .03354,.03354,.03354,.03354,.03354, + .05309,.05309,.05309,.05309,.05309] + alpha1, beta1, loc1, scale1 = stats.levy_stable._fitstart(x) + assert_allclose(alpha1, 1.48, rtol=0, atol=0.01) + assert_almost_equal(beta1, -.22, 2) + assert_almost_equal(scale1, 0.01717, 4) + assert_almost_equal(loc1, 0.00233, 2) # to 2 dps due to rounding error in McCulloch86 + + # cover alpha=2 scenario + x2 = x + [.05309,.05309,.05309,.05309,.05309] + alpha2, beta2, loc2, scale2 = stats.levy_stable._fitstart(x2) + assert_equal(alpha2, 2) + assert_equal(beta2, -1) + assert_almost_equal(scale2, .02503, 4) + assert_almost_equal(loc2, .03354, 4) + + @pytest.mark.slow + def test_pdf_nolan_samples(self): + """ Test pdf values against Nolan's stablec.exe output + see - http://fs2.american.edu/jpnolan/www/stable/stable.html + + There's a known limitation of Nolan's executable for alpha < 0.2. + + Repeat following with beta = -1, -.5, 0, .5 and 1 + stablec.exe << + 1 # pdf + 1 # Nolan S equivalent to S0 in scipy + .25,2,.25 # alpha + -1,-1,0 # beta + -10,10,1 # x + 1,0 # gamma, delta + 2 # output file + """ + data = np.load(os.path.abspath(os.path.join(os.path.dirname(__file__), + 'data/stable-pdf-sample-data.npy'))) + + data = np.core.records.fromarrays(data.T, names='x,p,alpha,beta') + + # support numpy 1.8.2 for travis + npisin = np.isin if hasattr(np, "isin") else np.in1d + + tests = [ + # best selects + ['best', None, 8, None], + + # quadrature is accurate for most alpha except 0.25; perhaps limitation of Nolan stablec? + # we reduce size of x to speed up computation as numerical integration slow. + ['quadrature', None, 8, lambda r: (r['alpha'] > 0.25) & (npisin(r['x'], [-10,-5,0,5,10]))], + + # zolatarev is accurate except at alpha==1, beta != 0 + ['zolotarev', None, 8, lambda r: r['alpha'] != 1], + ['zolotarev', None, 8, lambda r: (r['alpha'] == 1) & (r['beta'] == 0)], + ['zolotarev', None, 1, lambda r: (r['alpha'] == 1) & (r['beta'] != 0)], + + # fft accuracy reduces as alpha decreases, fails at low values of alpha and x=0 + ['fft', 0, 4, lambda r: r['alpha'] > 1], + ['fft', 0, 3, lambda r: (r['alpha'] < 1) & (r['alpha'] > 0.25)], + ['fft', 0, 1, lambda r: (r['alpha'] == 0.25) & (r['x'] != 0)], # not useful here + ] + for ix, (default_method, fft_min_points, decimal_places, filter_func) in enumerate(tests): + stats.levy_stable.pdf_default_method = default_method + stats.levy_stable.pdf_fft_min_points_threshold = fft_min_points + subdata = data[filter_func(data)] if filter_func is not None else data + with suppress_warnings() as sup: + sup.record(RuntimeWarning, "Density calculation unstable for alpha=1 and beta!=0.*") + sup.record(RuntimeWarning, "Density calculations experimental for FFT method.*") + p = stats.levy_stable.pdf(subdata['x'], subdata['alpha'], subdata['beta'], scale=1, loc=0) + subdata2 = rec_append_fields(subdata, 'calc', p) + failures = subdata2[(np.abs(p-subdata['p']) >= 1.5*10.**(-decimal_places)) | np.isnan(p)] + assert_almost_equal(p, subdata['p'], decimal_places, "pdf test %s failed with method '%s'\n%s" % (ix, default_method, failures), verbose=False) + + @pytest.mark.slow + def test_cdf_nolan_samples(self): + """ Test cdf values against Nolan's stablec.exe output + see - http://fs2.american.edu/jpnolan/www/stable/stable.html + + There's a known limitation of Nolan's executable for alpha < 0.2. + + Repeat following with beta = -1, -.5, 0, .5 and 1 + stablec.exe << + 2 # cdf + 1 # Nolan S equivalent to S0 in scipy + .25,2,.25 # alpha + -1,-1,0 # beta + -10,10,1 # x + 1,0 # gamma, delta + 2 # output file + """ + data = np.load(os.path.abspath(os.path.join(os.path.dirname(__file__), + 'data/stable-cdf-sample-data.npy'))) + + data = np.core.records.fromarrays(data.T, names='x,p,alpha,beta') + + tests = [ + # zolatarev is accurate for all values + ['zolotarev', None, 8, None], + + # fft accuracy poor, very poor alpha < 1 + ['fft', 0, 2, lambda r: r['alpha'] > 1], + ] + for ix, (default_method, fft_min_points, decimal_places, filter_func) in enumerate(tests): + stats.levy_stable.pdf_default_method = default_method + stats.levy_stable.pdf_fft_min_points_threshold = fft_min_points + subdata = data[filter_func(data)] if filter_func is not None else data + with suppress_warnings() as sup: + sup.record(RuntimeWarning, 'FFT method is considered ' + + 'experimental for cumulative distribution ' + + 'function evaluations.*') + p = stats.levy_stable.cdf(subdata['x'], subdata['alpha'], subdata['beta'], scale=1, loc=0) + subdata2 = rec_append_fields(subdata, 'calc', p) + failures = subdata2[(np.abs(p-subdata['p']) >= 1.5*10.**(-decimal_places)) | np.isnan(p)] + assert_almost_equal(p, subdata['p'], decimal_places, "cdf test %s failed with method '%s'\n%s" % (ix, default_method, failures), verbose=False) + + def test_pdf_alpha_equals_one_beta_non_zero(self): + """ sample points extracted from Tables and Graphs of Stable Probability + Density Functions - Donald R Holt - 1973 - p 187. + """ + xs = np.array([0, 0, 0, 0, + 1, 1, 1, 1, + 2, 2, 2, 2, + 3, 3, 3, 3, + 4, 4, 4, 4]) + density = np.array([.3183, .3096, .2925, .2622, + .1591, .1587, .1599, .1635, + .0637, .0729, .0812, .0955, + .0318, .0390, .0458, .0586, + .0187, .0236, .0285, .0384]) + betas = np.array([0, .25, .5, 1, + 0, .25, .5, 1, + 0, .25, .5, 1, + 0, .25, .5, 1, + 0, .25, .5, 1]) + + tests = [ + ['quadrature', None, 4], + #['fft', 0, 4], + ['zolotarev', None, 1], + ] + + with np.errstate(all='ignore'), suppress_warnings() as sup: + sup.filter(category=RuntimeWarning, message="Density calculation unstable.*") + for default_method, fft_min_points, decimal_places in tests: + stats.levy_stable.pdf_default_method = default_method + stats.levy_stable.pdf_fft_min_points_threshold = fft_min_points + #stats.levy_stable.fft_grid_spacing = 0.0001 + pdf = stats.levy_stable.pdf(xs, 1, betas, scale=1, loc=0) + assert_almost_equal(pdf, density, decimal_places, default_method) + + def test_stats(self): + param_sets = [ + [(1.48,-.22, 0, 1), (0,np.inf,np.NaN,np.NaN)], + [(2,.9, 10, 1.5), (10,4.5,0,0)] + ] + for args, exp_stats in param_sets: + calc_stats = stats.levy_stable.stats(args[0], args[1], loc=args[2], scale=args[3], moments='mvsk') + assert_almost_equal(calc_stats, exp_stats) + +class TestArrayArgument(object): # test for ticket:992 + def setup_method(self): + np.random.seed(1234) + + def test_noexception(self): + rvs = stats.norm.rvs(loc=(np.arange(5)), scale=np.ones(5), + size=(10, 5)) + assert_equal(rvs.shape, (10, 5)) + + +class TestDocstring(object): + def test_docstrings(self): + # See ticket #761 + if stats.rayleigh.__doc__ is not None: + assert_("rayleigh" in stats.rayleigh.__doc__.lower()) + if stats.bernoulli.__doc__ is not None: + assert_("bernoulli" in stats.bernoulli.__doc__.lower()) + + def test_no_name_arg(self): + # If name is not given, construction shouldn't fail. See #1508. + stats.rv_continuous() + stats.rv_discrete() + + +class TestEntropy(object): + def test_entropy_positive(self): + # See ticket #497 + pk = [0.5, 0.2, 0.3] + qk = [0.1, 0.25, 0.65] + eself = stats.entropy(pk, pk) + edouble = stats.entropy(pk, qk) + assert_(0.0 == eself) + assert_(edouble >= 0.0) + + def test_entropy_base(self): + pk = np.ones(16, float) + S = stats.entropy(pk, base=2.) + assert_(abs(S - 4.) < 1.e-5) + + qk = np.ones(16, float) + qk[:8] = 2. + S = stats.entropy(pk, qk) + S2 = stats.entropy(pk, qk, base=2.) + assert_(abs(S/S2 - np.log(2.)) < 1.e-5) + + def test_entropy_zero(self): + # Test for PR-479 + assert_almost_equal(stats.entropy([0, 1, 2]), 0.63651416829481278, + decimal=12) + + def test_entropy_2d(self): + pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]] + qk = [[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]] + assert_array_almost_equal(stats.entropy(pk, qk), + [0.1933259, 0.18609809]) + + def test_entropy_2d_zero(self): + pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]] + qk = [[0.0, 0.1], [0.3, 0.6], [0.5, 0.3]] + assert_array_almost_equal(stats.entropy(pk, qk), + [np.inf, 0.18609809]) + + pk[0][0] = 0.0 + assert_array_almost_equal(stats.entropy(pk, qk), + [0.17403988, 0.18609809]) + + +def TestArgsreduce(): + a = array([1, 3, 2, 1, 2, 3, 3]) + b, c = argsreduce(a > 1, a, 2) + + assert_array_equal(b, [3, 2, 2, 3, 3]) + assert_array_equal(c, [2, 2, 2, 2, 2]) + + b, c = argsreduce(2 > 1, a, 2) + assert_array_equal(b, a[0]) + assert_array_equal(c, [2]) + + b, c = argsreduce(a > 0, a, 2) + assert_array_equal(b, a) + assert_array_equal(c, [2] * numpy.size(a)) + + +class TestFitMethod(object): + skip = ['ncf'] + + def setup_method(self): + np.random.seed(1234) + + @pytest.mark.slow + @pytest.mark.parametrize('dist,args,alpha', cases_test_all_distributions()) + def test_fit(self, dist, args, alpha): + if dist in self.skip: + pytest.skip("%s fit known to fail" % dist) + distfunc = getattr(stats, dist) + with np.errstate(all='ignore'), suppress_warnings() as sup: + sup.filter(category=DeprecationWarning, message=".*frechet_") + res = distfunc.rvs(*args, **{'size': 200}) + vals = distfunc.fit(res) + vals2 = distfunc.fit(res, optimizer='powell') + # Only check the length of the return + # FIXME: should check the actual results to see if we are 'close' + # to what was created --- but what is 'close' enough + assert_(len(vals) == 2+len(args)) + assert_(len(vals2) == 2+len(args)) + + @pytest.mark.slow + @pytest.mark.parametrize('dist,args,alpha', cases_test_all_distributions()) + def test_fix_fit(self, dist, args, alpha): + # Not sure why 'ncf', and 'beta' are failing + # frechet has different len(args) than distfunc.numargs + if dist in self.skip + ['frechet']: + pytest.skip("%s fit known to fail" % dist) + distfunc = getattr(stats, dist) + with np.errstate(all='ignore'), suppress_warnings() as sup: + sup.filter(category=DeprecationWarning, message=".*frechet_") + res = distfunc.rvs(*args, **{'size': 200}) + vals = distfunc.fit(res, floc=0) + vals2 = distfunc.fit(res, fscale=1) + assert_(len(vals) == 2+len(args)) + assert_(vals[-2] == 0) + assert_(vals2[-1] == 1) + assert_(len(vals2) == 2+len(args)) + if len(args) > 0: + vals3 = distfunc.fit(res, f0=args[0]) + assert_(len(vals3) == 2+len(args)) + assert_(vals3[0] == args[0]) + if len(args) > 1: + vals4 = distfunc.fit(res, f1=args[1]) + assert_(len(vals4) == 2+len(args)) + assert_(vals4[1] == args[1]) + if len(args) > 2: + vals5 = distfunc.fit(res, f2=args[2]) + assert_(len(vals5) == 2+len(args)) + assert_(vals5[2] == args[2]) + + def test_fix_fit_2args_lognorm(self): + # Regression test for #1551. + np.random.seed(12345) + with np.errstate(all='ignore'): + x = stats.lognorm.rvs(0.25, 0., 20.0, size=20) + expected_shape = np.sqrt(((np.log(x) - np.log(20))**2).mean()) + assert_allclose(np.array(stats.lognorm.fit(x, floc=0, fscale=20)), + [expected_shape, 0, 20], atol=1e-8) + + def test_fix_fit_norm(self): + x = np.arange(1, 6) + + loc, scale = stats.norm.fit(x) + assert_almost_equal(loc, 3) + assert_almost_equal(scale, np.sqrt(2)) + + loc, scale = stats.norm.fit(x, floc=2) + assert_equal(loc, 2) + assert_equal(scale, np.sqrt(3)) + + loc, scale = stats.norm.fit(x, fscale=2) + assert_almost_equal(loc, 3) + assert_equal(scale, 2) + + def test_fix_fit_gamma(self): + x = np.arange(1, 6) + meanlog = np.log(x).mean() + + # A basic test of gamma.fit with floc=0. + floc = 0 + a, loc, scale = stats.gamma.fit(x, floc=floc) + s = np.log(x.mean()) - meanlog + assert_almost_equal(np.log(a) - special.digamma(a), s, decimal=5) + assert_equal(loc, floc) + assert_almost_equal(scale, x.mean()/a, decimal=8) + + # Regression tests for gh-2514. + # The problem was that if `floc=0` was given, any other fixed + # parameters were ignored. + f0 = 1 + floc = 0 + a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc) + assert_equal(a, f0) + assert_equal(loc, floc) + assert_almost_equal(scale, x.mean()/a, decimal=8) + + f0 = 2 + floc = 0 + a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc) + assert_equal(a, f0) + assert_equal(loc, floc) + assert_almost_equal(scale, x.mean()/a, decimal=8) + + # loc and scale fixed. + floc = 0 + fscale = 2 + a, loc, scale = stats.gamma.fit(x, floc=floc, fscale=fscale) + assert_equal(loc, floc) + assert_equal(scale, fscale) + c = meanlog - np.log(fscale) + assert_almost_equal(special.digamma(a), c) + + def test_fix_fit_beta(self): + # Test beta.fit when both floc and fscale are given. + + def mlefunc(a, b, x): + # Zeros of this function are critical points of + # the maximum likelihood function. + n = len(x) + s1 = np.log(x).sum() + s2 = np.log(1-x).sum() + psiab = special.psi(a + b) + func = [s1 - n * (-psiab + special.psi(a)), + s2 - n * (-psiab + special.psi(b))] + return func + + # Basic test with floc and fscale given. + x = np.array([0.125, 0.25, 0.5]) + a, b, loc, scale = stats.beta.fit(x, floc=0, fscale=1) + assert_equal(loc, 0) + assert_equal(scale, 1) + assert_allclose(mlefunc(a, b, x), [0, 0], atol=1e-6) + + # Basic test with f0, floc and fscale given. + # This is also a regression test for gh-2514. + x = np.array([0.125, 0.25, 0.5]) + a, b, loc, scale = stats.beta.fit(x, f0=2, floc=0, fscale=1) + assert_equal(a, 2) + assert_equal(loc, 0) + assert_equal(scale, 1) + da, db = mlefunc(a, b, x) + assert_allclose(db, 0, atol=1e-5) + + # Same floc and fscale values as above, but reverse the data + # and fix b (f1). + x2 = 1 - x + a2, b2, loc2, scale2 = stats.beta.fit(x2, f1=2, floc=0, fscale=1) + assert_equal(b2, 2) + assert_equal(loc2, 0) + assert_equal(scale2, 1) + da, db = mlefunc(a2, b2, x2) + assert_allclose(da, 0, atol=1e-5) + # a2 of this test should equal b from above. + assert_almost_equal(a2, b) + + # Check for detection of data out of bounds when floc and fscale + # are given. + assert_raises(ValueError, stats.beta.fit, x, floc=0.5, fscale=1) + y = np.array([0, .5, 1]) + assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1) + assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f0=2) + assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f1=2) + + # Check that attempting to fix all the parameters raises a ValueError. + assert_raises(ValueError, stats.beta.fit, y, f0=0, f1=1, + floc=2, fscale=3) + + def test_expon_fit(self): + x = np.array([2, 2, 4, 4, 4, 4, 4, 8]) + + loc, scale = stats.expon.fit(x) + assert_equal(loc, 2) # x.min() + assert_equal(scale, 2) # x.mean() - x.min() + + loc, scale = stats.expon.fit(x, fscale=3) + assert_equal(loc, 2) # x.min() + assert_equal(scale, 3) # fscale + + loc, scale = stats.expon.fit(x, floc=0) + assert_equal(loc, 0) # floc + assert_equal(scale, 4) # x.mean() - loc + + def test_lognorm_fit(self): + x = np.array([1.5, 3, 10, 15, 23, 59]) + lnxm1 = np.log(x - 1) + + shape, loc, scale = stats.lognorm.fit(x, floc=1) + assert_allclose(shape, lnxm1.std(), rtol=1e-12) + assert_equal(loc, 1) + assert_allclose(scale, np.exp(lnxm1.mean()), rtol=1e-12) + + shape, loc, scale = stats.lognorm.fit(x, floc=1, fscale=6) + assert_allclose(shape, np.sqrt(((lnxm1 - np.log(6))**2).mean()), + rtol=1e-12) + assert_equal(loc, 1) + assert_equal(scale, 6) + + shape, loc, scale = stats.lognorm.fit(x, floc=1, fix_s=0.75) + assert_equal(shape, 0.75) + assert_equal(loc, 1) + assert_allclose(scale, np.exp(lnxm1.mean()), rtol=1e-12) + + def test_uniform_fit(self): + x = np.array([1.0, 1.1, 1.2, 9.0]) + + loc, scale = stats.uniform.fit(x) + assert_equal(loc, x.min()) + assert_equal(scale, x.ptp()) + + loc, scale = stats.uniform.fit(x, floc=0) + assert_equal(loc, 0) + assert_equal(scale, x.max()) + + loc, scale = stats.uniform.fit(x, fscale=10) + assert_equal(loc, 0) + assert_equal(scale, 10) + + assert_raises(ValueError, stats.uniform.fit, x, floc=2.0) + assert_raises(ValueError, stats.uniform.fit, x, fscale=5.0) + + def test_fshapes(self): + # take a beta distribution, with shapes='a, b', and make sure that + # fa is equivalent to f0, and fb is equivalent to f1 + a, b = 3., 4. + x = stats.beta.rvs(a, b, size=100, random_state=1234) + res_1 = stats.beta.fit(x, f0=3.) + res_2 = stats.beta.fit(x, fa=3.) + assert_allclose(res_1, res_2, atol=1e-12, rtol=1e-12) + + res_2 = stats.beta.fit(x, fix_a=3.) + assert_allclose(res_1, res_2, atol=1e-12, rtol=1e-12) + + res_3 = stats.beta.fit(x, f1=4.) + res_4 = stats.beta.fit(x, fb=4.) + assert_allclose(res_3, res_4, atol=1e-12, rtol=1e-12) + + res_4 = stats.beta.fit(x, fix_b=4.) + assert_allclose(res_3, res_4, atol=1e-12, rtol=1e-12) + + # cannot specify both positional and named args at the same time + assert_raises(ValueError, stats.beta.fit, x, fa=1, f0=2) + + # check that attempting to fix all parameters raises a ValueError + assert_raises(ValueError, stats.beta.fit, x, fa=0, f1=1, + floc=2, fscale=3) + + # check that specifying floc, fscale and fshapes works for + # beta and gamma which override the generic fit method + res_5 = stats.beta.fit(x, fa=3., floc=0, fscale=1) + aa, bb, ll, ss = res_5 + assert_equal([aa, ll, ss], [3., 0, 1]) + + # gamma distribution + a = 3. + data = stats.gamma.rvs(a, size=100) + aa, ll, ss = stats.gamma.fit(data, fa=a) + assert_equal(aa, a) + + def test_extra_params(self): + # unknown parameters should raise rather than be silently ignored + dist = stats.exponnorm + data = dist.rvs(K=2, size=100) + dct = dict(enikibeniki=-101) + assert_raises(TypeError, dist.fit, data, **dct) + + +class TestFrozen(object): + def setup_method(self): + np.random.seed(1234) + + # Test that a frozen distribution gives the same results as the original + # object. + # + # Only tested for the normal distribution (with loc and scale specified) + # and for the gamma distribution (with a shape parameter specified). + def test_norm(self): + dist = stats.norm + frozen = stats.norm(loc=10.0, scale=3.0) + + result_f = frozen.pdf(20.0) + result = dist.pdf(20.0, loc=10.0, scale=3.0) + assert_equal(result_f, result) + + result_f = frozen.cdf(20.0) + result = dist.cdf(20.0, loc=10.0, scale=3.0) + assert_equal(result_f, result) + + result_f = frozen.ppf(0.25) + result = dist.ppf(0.25, loc=10.0, scale=3.0) + assert_equal(result_f, result) + + result_f = frozen.isf(0.25) + result = dist.isf(0.25, loc=10.0, scale=3.0) + assert_equal(result_f, result) + + result_f = frozen.sf(10.0) + result = dist.sf(10.0, loc=10.0, scale=3.0) + assert_equal(result_f, result) + + result_f = frozen.median() + result = dist.median(loc=10.0, scale=3.0) + assert_equal(result_f, result) + + result_f = frozen.mean() + result = dist.mean(loc=10.0, scale=3.0) + assert_equal(result_f, result) + + result_f = frozen.var() + result = dist.var(loc=10.0, scale=3.0) + assert_equal(result_f, result) + + result_f = frozen.std() + result = dist.std(loc=10.0, scale=3.0) + assert_equal(result_f, result) + + result_f = frozen.entropy() + result = dist.entropy(loc=10.0, scale=3.0) + assert_equal(result_f, result) + + result_f = frozen.moment(2) + result = dist.moment(2, loc=10.0, scale=3.0) + assert_equal(result_f, result) + + assert_equal(frozen.a, dist.a) + assert_equal(frozen.b, dist.b) + + def test_gamma(self): + a = 2.0 + dist = stats.gamma + frozen = stats.gamma(a) + + result_f = frozen.pdf(20.0) + result = dist.pdf(20.0, a) + assert_equal(result_f, result) + + result_f = frozen.cdf(20.0) + result = dist.cdf(20.0, a) + assert_equal(result_f, result) + + result_f = frozen.ppf(0.25) + result = dist.ppf(0.25, a) + assert_equal(result_f, result) + + result_f = frozen.isf(0.25) + result = dist.isf(0.25, a) + assert_equal(result_f, result) + + result_f = frozen.sf(10.0) + result = dist.sf(10.0, a) + assert_equal(result_f, result) + + result_f = frozen.median() + result = dist.median(a) + assert_equal(result_f, result) + + result_f = frozen.mean() + result = dist.mean(a) + assert_equal(result_f, result) + + result_f = frozen.var() + result = dist.var(a) + assert_equal(result_f, result) + + result_f = frozen.std() + result = dist.std(a) + assert_equal(result_f, result) + + result_f = frozen.entropy() + result = dist.entropy(a) + assert_equal(result_f, result) + + result_f = frozen.moment(2) + result = dist.moment(2, a) + assert_equal(result_f, result) + + assert_equal(frozen.a, frozen.dist.a) + assert_equal(frozen.b, frozen.dist.b) + + def test_regression_ticket_1293(self): + # Create a frozen distribution. + frozen = stats.lognorm(1) + # Call one of its methods that does not take any keyword arguments. + m1 = frozen.moment(2) + # Now call a method that takes a keyword argument. + frozen.stats(moments='mvsk') + # Call moment(2) again. + # After calling stats(), the following was raising an exception. + # So this test passes if the following does not raise an exception. + m2 = frozen.moment(2) + # The following should also be true, of course. But it is not + # the focus of this test. + assert_equal(m1, m2) + + def test_ab(self): + # test that the support of a frozen distribution + # (i) remains frozen even if it changes for the original one + # (ii) is actually correct if the shape parameters are such that + # the values of [a, b] are not the default [0, inf] + # take a genpareto as an example where the support + # depends on the value of the shape parameter: + # for c > 0: a, b = 0, inf + # for c < 0: a, b = 0, -1/c + rv = stats.genpareto(c=-0.1) + a, b = rv.dist.a, rv.dist.b + assert_equal([a, b], [0., 10.]) + assert_equal([rv.a, rv.b], [0., 10.]) + + stats.genpareto.pdf(0, c=0.1) # this changes genpareto.b + assert_equal([rv.dist.a, rv.dist.b], [a, b]) + assert_equal([rv.a, rv.b], [a, b]) + + rv1 = stats.genpareto(c=0.1) + assert_(rv1.dist is not rv.dist) + + def test_rv_frozen_in_namespace(self): + # Regression test for gh-3522 + assert_(hasattr(stats.distributions, 'rv_frozen')) + + def test_random_state(self): + # only check that the random_state attribute exists, + frozen = stats.norm() + assert_(hasattr(frozen, 'random_state')) + + # ... that it can be set, + frozen.random_state = 42 + assert_equal(frozen.random_state.get_state(), + np.random.RandomState(42).get_state()) + + # ... and that .rvs method accepts it as an argument + rndm = np.random.RandomState(1234) + frozen.rvs(size=8, random_state=rndm) + + def test_pickling(self): + # test that a frozen instance pickles and unpickles + # (this method is a clone of common_tests.check_pickling) + beta = stats.beta(2.3098496451481823, 0.62687954300963677) + poiss = stats.poisson(3.) + sample = stats.rv_discrete(values=([0, 1, 2, 3], + [0.1, 0.2, 0.3, 0.4])) + + for distfn in [beta, poiss, sample]: + distfn.random_state = 1234 + distfn.rvs(size=8) + s = pickle.dumps(distfn) + r0 = distfn.rvs(size=8) + + unpickled = pickle.loads(s) + r1 = unpickled.rvs(size=8) + assert_equal(r0, r1) + + # also smoke test some methods + medians = [distfn.ppf(0.5), unpickled.ppf(0.5)] + assert_equal(medians[0], medians[1]) + assert_equal(distfn.cdf(medians[0]), + unpickled.cdf(medians[1])) + + def test_expect(self): + # smoke test the expect method of the frozen distribution + # only take a gamma w/loc and scale and poisson with loc specified + def func(x): + return x + + gm = stats.gamma(a=2, loc=3, scale=4) + gm_val = gm.expect(func, lb=1, ub=2, conditional=True) + gamma_val = stats.gamma.expect(func, args=(2,), loc=3, scale=4, + lb=1, ub=2, conditional=True) + assert_allclose(gm_val, gamma_val) + + p = stats.poisson(3, loc=4) + p_val = p.expect(func) + poisson_val = stats.poisson.expect(func, args=(3,), loc=4) + assert_allclose(p_val, poisson_val) + + +class TestExpect(object): + # Test for expect method. + # + # Uses normal distribution and beta distribution for finite bounds, and + # hypergeom for discrete distribution with finite support + def test_norm(self): + v = stats.norm.expect(lambda x: (x-5)*(x-5), loc=5, scale=2) + assert_almost_equal(v, 4, decimal=14) + + m = stats.norm.expect(lambda x: (x), loc=5, scale=2) + assert_almost_equal(m, 5, decimal=14) + + lb = stats.norm.ppf(0.05, loc=5, scale=2) + ub = stats.norm.ppf(0.95, loc=5, scale=2) + prob90 = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub) + assert_almost_equal(prob90, 0.9, decimal=14) + + prob90c = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub, + conditional=True) + assert_almost_equal(prob90c, 1., decimal=14) + + def test_beta(self): + # case with finite support interval + v = stats.beta.expect(lambda x: (x-19/3.)*(x-19/3.), args=(10, 5), + loc=5, scale=2) + assert_almost_equal(v, 1./18., decimal=13) + + m = stats.beta.expect(lambda x: x, args=(10, 5), loc=5., scale=2.) + assert_almost_equal(m, 19/3., decimal=13) + + ub = stats.beta.ppf(0.95, 10, 10, loc=5, scale=2) + lb = stats.beta.ppf(0.05, 10, 10, loc=5, scale=2) + prob90 = stats.beta.expect(lambda x: 1., args=(10, 10), loc=5., + scale=2., lb=lb, ub=ub, conditional=False) + assert_almost_equal(prob90, 0.9, decimal=13) + + prob90c = stats.beta.expect(lambda x: 1, args=(10, 10), loc=5, + scale=2, lb=lb, ub=ub, conditional=True) + assert_almost_equal(prob90c, 1., decimal=13) + + def test_hypergeom(self): + # test case with finite bounds + + # without specifying bounds + m_true, v_true = stats.hypergeom.stats(20, 10, 8, loc=5.) + m = stats.hypergeom.expect(lambda x: x, args=(20, 10, 8), loc=5.) + assert_almost_equal(m, m_true, decimal=13) + + v = stats.hypergeom.expect(lambda x: (x-9.)**2, args=(20, 10, 8), + loc=5.) + assert_almost_equal(v, v_true, decimal=14) + + # with bounds, bounds equal to shifted support + v_bounds = stats.hypergeom.expect(lambda x: (x-9.)**2, + args=(20, 10, 8), + loc=5., lb=5, ub=13) + assert_almost_equal(v_bounds, v_true, decimal=14) + + # drop boundary points + prob_true = 1-stats.hypergeom.pmf([5, 13], 20, 10, 8, loc=5).sum() + prob_bounds = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8), + loc=5., lb=6, ub=12) + assert_almost_equal(prob_bounds, prob_true, decimal=13) + + # conditional + prob_bc = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8), loc=5., + lb=6, ub=12, conditional=True) + assert_almost_equal(prob_bc, 1, decimal=14) + + # check simple integral + prob_b = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8), + lb=0, ub=8) + assert_almost_equal(prob_b, 1, decimal=13) + + def test_poisson(self): + # poisson, use lower bound only + prob_bounds = stats.poisson.expect(lambda x: 1, args=(2,), lb=3, + conditional=False) + prob_b_true = 1-stats.poisson.cdf(2, 2) + assert_almost_equal(prob_bounds, prob_b_true, decimal=14) + + prob_lb = stats.poisson.expect(lambda x: 1, args=(2,), lb=2, + conditional=True) + assert_almost_equal(prob_lb, 1, decimal=14) + + def test_genhalflogistic(self): + # genhalflogistic, changes upper bound of support in _argcheck + # regression test for gh-2622 + halflog = stats.genhalflogistic + # check consistency when calling expect twice with the same input + res1 = halflog.expect(args=(1.5,)) + halflog.expect(args=(0.5,)) + res2 = halflog.expect(args=(1.5,)) + assert_almost_equal(res1, res2, decimal=14) + + def test_rice_overflow(self): + # rice.pdf(999, 0.74) was inf since special.i0 silentyly overflows + # check that using i0e fixes it + assert_(np.isfinite(stats.rice.pdf(999, 0.74))) + + assert_(np.isfinite(stats.rice.expect(lambda x: 1, args=(0.74,)))) + assert_(np.isfinite(stats.rice.expect(lambda x: 2, args=(0.74,)))) + assert_(np.isfinite(stats.rice.expect(lambda x: 3, args=(0.74,)))) + + def test_logser(self): + # test a discrete distribution with infinite support and loc + p, loc = 0.3, 3 + res_0 = stats.logser.expect(lambda k: k, args=(p,)) + # check against the correct answer (sum of a geom series) + assert_allclose(res_0, + p / (p - 1.) / np.log(1. - p), atol=1e-15) + + # now check it with `loc` + res_l = stats.logser.expect(lambda k: k, args=(p,), loc=loc) + assert_allclose(res_l, res_0 + loc, atol=1e-15) + + def test_skellam(self): + # Use a discrete distribution w/ bi-infinite support. Compute two first + # moments and compare to known values (cf skellam.stats) + p1, p2 = 18, 22 + m1 = stats.skellam.expect(lambda x: x, args=(p1, p2)) + m2 = stats.skellam.expect(lambda x: x**2, args=(p1, p2)) + assert_allclose(m1, p1 - p2, atol=1e-12) + assert_allclose(m2 - m1**2, p1 + p2, atol=1e-12) + + def test_randint(self): + # Use a discrete distribution w/ parameter-dependent support, which + # is larger than the default chunksize + lo, hi = 0, 113 + res = stats.randint.expect(lambda x: x, (lo, hi)) + assert_allclose(res, + sum(_ for _ in range(lo, hi)) / (hi - lo), atol=1e-15) + + def test_zipf(self): + # Test that there is no infinite loop even if the sum diverges + assert_warns(RuntimeWarning, stats.zipf.expect, + lambda x: x**2, (2,)) + + def test_discrete_kwds(self): + # check that discrete expect accepts keywords to control the summation + n0 = stats.poisson.expect(lambda x: 1, args=(2,)) + n1 = stats.poisson.expect(lambda x: 1, args=(2,), + maxcount=1001, chunksize=32, tolerance=1e-8) + assert_almost_equal(n0, n1, decimal=14) + + def test_moment(self): + # test the .moment() method: compute a higher moment and compare to + # a known value + def poiss_moment5(mu): + return mu**5 + 10*mu**4 + 25*mu**3 + 15*mu**2 + mu + + for mu in [5, 7]: + m5 = stats.poisson.moment(5, mu) + assert_allclose(m5, poiss_moment5(mu), rtol=1e-10) + + +class TestNct(object): + def test_nc_parameter(self): + # Parameter values c<=0 were not enabled (gh-2402). + # For negative values c and for c=0 results of rv.cdf(0) below were nan + rv = stats.nct(5, 0) + assert_equal(rv.cdf(0), 0.5) + rv = stats.nct(5, -1) + assert_almost_equal(rv.cdf(0), 0.841344746069, decimal=10) + + def test_broadcasting(self): + res = stats.nct.pdf(5, np.arange(4, 7)[:, None], + np.linspace(0.1, 1, 4)) + expected = array([[0.00321886, 0.00557466, 0.00918418, 0.01442997], + [0.00217142, 0.00395366, 0.00683888, 0.01126276], + [0.00153078, 0.00291093, 0.00525206, 0.00900815]]) + assert_allclose(res, expected, rtol=1e-5) + + def test_variance_gh_issue_2401(self): + # Computation of the variance of a non-central t-distribution resulted + # in a TypeError: ufunc 'isinf' not supported for the input types, + # and the inputs could not be safely coerced to any supported types + # according to the casting rule 'safe' + rv = stats.nct(4, 0) + assert_equal(rv.var(), 2.0) + + def test_nct_inf_moments(self): + # n-th moment of nct only exists for df > n + m, v, s, k = stats.nct.stats(df=1.9, nc=0.3, moments='mvsk') + assert_(np.isfinite(m)) + assert_equal([v, s, k], [np.inf, np.nan, np.nan]) + + m, v, s, k = stats.nct.stats(df=3.1, nc=0.3, moments='mvsk') + assert_(np.isfinite([m, v, s]).all()) + assert_equal(k, np.nan) + + +class TestRice(object): + def test_rice_zero_b(self): + # rice distribution should work with b=0, cf gh-2164 + x = [0.2, 1., 5.] + assert_(np.isfinite(stats.rice.pdf(x, b=0.)).all()) + assert_(np.isfinite(stats.rice.logpdf(x, b=0.)).all()) + assert_(np.isfinite(stats.rice.cdf(x, b=0.)).all()) + assert_(np.isfinite(stats.rice.logcdf(x, b=0.)).all()) + + q = [0.1, 0.1, 0.5, 0.9] + assert_(np.isfinite(stats.rice.ppf(q, b=0.)).all()) + + mvsk = stats.rice.stats(0, moments='mvsk') + assert_(np.isfinite(mvsk).all()) + + # furthermore, pdf is continuous as b\to 0 + # rice.pdf(x, b\to 0) = x exp(-x^2/2) + O(b^2) + # see e.g. Abramovich & Stegun 9.6.7 & 9.6.10 + b = 1e-8 + assert_allclose(stats.rice.pdf(x, 0), stats.rice.pdf(x, b), + atol=b, rtol=0) + + def test_rice_rvs(self): + rvs = stats.rice.rvs + assert_equal(rvs(b=3.).size, 1) + assert_equal(rvs(b=3., size=(3, 5)).shape, (3, 5)) + + +class TestErlang(object): + def setup_method(self): + np.random.seed(1234) + + def test_erlang_runtimewarning(self): + # erlang should generate a RuntimeWarning if a non-integer + # shape parameter is used. + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + + # The non-integer shape parameter 1.3 should trigger a + # RuntimeWarning + assert_raises(RuntimeWarning, + stats.erlang.rvs, 1.3, loc=0, scale=1, size=4) + + # Calling the fit method with `f0` set to an integer should + # *not* trigger a RuntimeWarning. It should return the same + # values as gamma.fit(...). + data = [0.5, 1.0, 2.0, 4.0] + result_erlang = stats.erlang.fit(data, f0=1) + result_gamma = stats.gamma.fit(data, f0=1) + assert_allclose(result_erlang, result_gamma, rtol=1e-3) + + +class TestRayleigh(object): + # gh-6227 + def test_logpdf(self): + y = stats.rayleigh.logpdf(50) + assert_allclose(y, -1246.0879769945718) + + def test_logsf(self): + y = stats.rayleigh.logsf(50) + assert_allclose(y, -1250) + + +class TestExponWeib(object): + + def test_pdf_logpdf(self): + # Regression test for gh-3508. + x = 0.1 + a = 1.0 + c = 100.0 + p = stats.exponweib.pdf(x, a, c) + logp = stats.exponweib.logpdf(x, a, c) + # Expected values were computed with mpmath. + assert_allclose([p, logp], + [1.0000000000000054e-97, -223.35075402042244]) + + def test_a_is_1(self): + # For issue gh-3508. + # Check that when a=1, the pdf and logpdf methods of exponweib are the + # same as those of weibull_min. + x = np.logspace(-4, -1, 4) + a = 1 + c = 100 + + p = stats.exponweib.pdf(x, a, c) + expected = stats.weibull_min.pdf(x, c) + assert_allclose(p, expected) + + logp = stats.exponweib.logpdf(x, a, c) + expected = stats.weibull_min.logpdf(x, c) + assert_allclose(logp, expected) + + def test_a_is_1_c_is_1(self): + # When a = 1 and c = 1, the distribution is exponential. + x = np.logspace(-8, 1, 10) + a = 1 + c = 1 + + p = stats.exponweib.pdf(x, a, c) + expected = stats.expon.pdf(x) + assert_allclose(p, expected) + + logp = stats.exponweib.logpdf(x, a, c) + expected = stats.expon.logpdf(x) + assert_allclose(logp, expected) + + +class TestWeibull(object): + + def test_logpdf(self): + # gh-6217 + y = stats.weibull_min.logpdf(0, 1) + assert_equal(y, 0) + + def test_with_maxima_distrib(self): + # Tests for weibull_min and weibull_max. + # The expected values were computed using the symbolic algebra + # program 'maxima' with the package 'distrib', which has + # 'pdf_weibull' and 'cdf_weibull'. The mapping between the + # scipy and maxima functions is as follows: + # ----------------------------------------------------------------- + # scipy maxima + # --------------------------------- ------------------------------ + # weibull_min.pdf(x, a, scale=b) pdf_weibull(x, a, b) + # weibull_min.logpdf(x, a, scale=b) log(pdf_weibull(x, a, b)) + # weibull_min.cdf(x, a, scale=b) cdf_weibull(x, a, b) + # weibull_min.logcdf(x, a, scale=b) log(cdf_weibull(x, a, b)) + # weibull_min.sf(x, a, scale=b) 1 - cdf_weibull(x, a, b) + # weibull_min.logsf(x, a, scale=b) log(1 - cdf_weibull(x, a, b)) + # + # weibull_max.pdf(x, a, scale=b) pdf_weibull(-x, a, b) + # weibull_max.logpdf(x, a, scale=b) log(pdf_weibull(-x, a, b)) + # weibull_max.cdf(x, a, scale=b) 1 - cdf_weibull(-x, a, b) + # weibull_max.logcdf(x, a, scale=b) log(1 - cdf_weibull(-x, a, b)) + # weibull_max.sf(x, a, scale=b) cdf_weibull(-x, a, b) + # weibull_max.logsf(x, a, scale=b) log(cdf_weibull(-x, a, b)) + # ----------------------------------------------------------------- + x = 1.5 + a = 2.0 + b = 3.0 + + # weibull_min + + p = stats.weibull_min.pdf(x, a, scale=b) + assert_allclose(p, np.exp(-0.25)/3) + + lp = stats.weibull_min.logpdf(x, a, scale=b) + assert_allclose(lp, -0.25 - np.log(3)) + + c = stats.weibull_min.cdf(x, a, scale=b) + assert_allclose(c, -special.expm1(-0.25)) + + lc = stats.weibull_min.logcdf(x, a, scale=b) + assert_allclose(lc, np.log(-special.expm1(-0.25))) + + s = stats.weibull_min.sf(x, a, scale=b) + assert_allclose(s, np.exp(-0.25)) + + ls = stats.weibull_min.logsf(x, a, scale=b) + assert_allclose(ls, -0.25) + + # Also test using a large value x, for which computing the survival + # function using the CDF would result in 0. + s = stats.weibull_min.sf(30, 2, scale=3) + assert_allclose(s, np.exp(-100)) + + ls = stats.weibull_min.logsf(30, 2, scale=3) + assert_allclose(ls, -100) + + # weibull_max + x = -1.5 + + p = stats.weibull_max.pdf(x, a, scale=b) + assert_allclose(p, np.exp(-0.25)/3) + + lp = stats.weibull_max.logpdf(x, a, scale=b) + assert_allclose(lp, -0.25 - np.log(3)) + + c = stats.weibull_max.cdf(x, a, scale=b) + assert_allclose(c, np.exp(-0.25)) + + lc = stats.weibull_max.logcdf(x, a, scale=b) + assert_allclose(lc, -0.25) + + s = stats.weibull_max.sf(x, a, scale=b) + assert_allclose(s, -special.expm1(-0.25)) + + ls = stats.weibull_max.logsf(x, a, scale=b) + assert_allclose(ls, np.log(-special.expm1(-0.25))) + + # Also test using a value of x close to 0, for which computing the + # survival function using the CDF would result in 0. + s = stats.weibull_max.sf(-1e-9, 2, scale=3) + assert_allclose(s, -special.expm1(-1/9000000000000000000)) + + ls = stats.weibull_max.logsf(-1e-9, 2, scale=3) + assert_allclose(ls, np.log(-special.expm1(-1/9000000000000000000))) + + +class TestRdist(object): + @pytest.mark.slow + def test_rdist_cdf_gh1285(self): + # check workaround in rdist._cdf for issue gh-1285. + distfn = stats.rdist + values = [0.001, 0.5, 0.999] + assert_almost_equal(distfn.cdf(distfn.ppf(values, 541.0), 541.0), + values, decimal=5) + + +class TestTrapz(object): + def test_reduces_to_triang(self): + modes = [0, 0.3, 0.5, 1] + for mode in modes: + x = [0, mode, 1] + assert_almost_equal(stats.trapz.pdf(x, mode, mode), + stats.triang.pdf(x, mode)) + assert_almost_equal(stats.trapz.cdf(x, mode, mode), + stats.triang.cdf(x, mode)) + + def test_reduces_to_uniform(self): + x = np.linspace(0, 1, 10) + assert_almost_equal(stats.trapz.pdf(x, 0, 1), stats.uniform.pdf(x)) + assert_almost_equal(stats.trapz.cdf(x, 0, 1), stats.uniform.cdf(x)) + + def test_cases(self): + # edge cases + assert_almost_equal(stats.trapz.pdf(0, 0, 0), 2) + assert_almost_equal(stats.trapz.pdf(1, 1, 1), 2) + assert_almost_equal(stats.trapz.pdf(0.5, 0, 0.8), + 1.11111111111111111) + assert_almost_equal(stats.trapz.pdf(0.5, 0.2, 1.0), + 1.11111111111111111) + + # straightforward case + assert_almost_equal(stats.trapz.pdf(0.1, 0.2, 0.8), 0.625) + assert_almost_equal(stats.trapz.pdf(0.5, 0.2, 0.8), 1.25) + assert_almost_equal(stats.trapz.pdf(0.9, 0.2, 0.8), 0.625) + + assert_almost_equal(stats.trapz.cdf(0.1, 0.2, 0.8), 0.03125) + assert_almost_equal(stats.trapz.cdf(0.2, 0.2, 0.8), 0.125) + assert_almost_equal(stats.trapz.cdf(0.5, 0.2, 0.8), 0.5) + assert_almost_equal(stats.trapz.cdf(0.9, 0.2, 0.8), 0.96875) + assert_almost_equal(stats.trapz.cdf(1.0, 0.2, 0.8), 1.0) + + def test_trapz_vect(self): + # test that array-valued shapes and arguments are handled + c = np.array([0.1, 0.2, 0.3]) + d = np.array([0.5, 0.6])[:, None] + x = np.array([0.15, 0.25, 0.9]) + v = stats.trapz.pdf(x, c, d) + + cc, dd, xx = np.broadcast_arrays(c, d, x) + + res = np.empty(xx.size, dtype=xx.dtype) + ind = np.arange(xx.size) + for i, x1, c1, d1 in zip(ind, xx.ravel(), cc.ravel(), dd.ravel()): + res[i] = stats.trapz.pdf(x1, c1, d1) + + assert_allclose(v, res.reshape(v.shape), atol=1e-15) + + +class TestTriang(object): + def test_edge_cases(self): + with np.errstate(all='raise'): + assert_equal(stats.triang.pdf(0, 0), 2.) + assert_equal(stats.triang.pdf(0.5, 0), 1.) + assert_equal(stats.triang.pdf(1, 0), 0.) + + assert_equal(stats.triang.pdf(0, 1), 0) + assert_equal(stats.triang.pdf(0.5, 1), 1.) + assert_equal(stats.triang.pdf(1, 1), 2) + + assert_equal(stats.triang.cdf(0., 0.), 0.) + assert_equal(stats.triang.cdf(0.5, 0.), 0.75) + assert_equal(stats.triang.cdf(1.0, 0.), 1.0) + + assert_equal(stats.triang.cdf(0., 1.), 0.) + assert_equal(stats.triang.cdf(0.5, 1.), 0.25) + assert_equal(stats.triang.cdf(1., 1.), 1) + + +def test_540_567(): + # test for nan returned in tickets 540, 567 + assert_almost_equal(stats.norm.cdf(-1.7624320982), 0.03899815971089126, + decimal=10, err_msg='test_540_567') + assert_almost_equal(stats.norm.cdf(-1.7624320983), 0.038998159702449846, + decimal=10, err_msg='test_540_567') + assert_almost_equal(stats.norm.cdf(1.38629436112, loc=0.950273420309, + scale=0.204423758009), + 0.98353464004309321, + decimal=10, err_msg='test_540_567') + + +def test_regression_ticket_1316(): + # The following was raising an exception, because _construct_default_doc() + # did not handle the default keyword extradoc=None. See ticket #1316. + g = stats._continuous_distns.gamma_gen(name='gamma') + + +def test_regression_ticket_1326(): + # adjust to avoid nan with 0*log(0) + assert_almost_equal(stats.chi2.pdf(0.0, 2), 0.5, 14) + + +def test_regression_tukey_lambda(): + # Make sure that Tukey-Lambda distribution correctly handles + # non-positive lambdas. + x = np.linspace(-5.0, 5.0, 101) + + olderr = np.seterr(divide='ignore') + try: + for lam in [0.0, -1.0, -2.0, np.array([[-1.0], [0.0], [-2.0]])]: + p = stats.tukeylambda.pdf(x, lam) + assert_((p != 0.0).all()) + assert_(~np.isnan(p).all()) + + lam = np.array([[-1.0], [0.0], [2.0]]) + p = stats.tukeylambda.pdf(x, lam) + finally: + np.seterr(**olderr) + + assert_(~np.isnan(p).all()) + assert_((p[0] != 0.0).all()) + assert_((p[1] != 0.0).all()) + assert_((p[2] != 0.0).any()) + assert_((p[2] == 0.0).any()) + + +@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstrings stripped") +def test_regression_ticket_1421(): + assert_('pdf(x, mu, loc=0, scale=1)' not in stats.poisson.__doc__) + assert_('pmf(x,' in stats.poisson.__doc__) + + +def test_nan_arguments_gh_issue_1362(): + with np.errstate(invalid='ignore'): + assert_(np.isnan(stats.t.logcdf(1, np.nan))) + assert_(np.isnan(stats.t.cdf(1, np.nan))) + assert_(np.isnan(stats.t.logsf(1, np.nan))) + assert_(np.isnan(stats.t.sf(1, np.nan))) + assert_(np.isnan(stats.t.pdf(1, np.nan))) + assert_(np.isnan(stats.t.logpdf(1, np.nan))) + assert_(np.isnan(stats.t.ppf(1, np.nan))) + assert_(np.isnan(stats.t.isf(1, np.nan))) + + assert_(np.isnan(stats.bernoulli.logcdf(np.nan, 0.5))) + assert_(np.isnan(stats.bernoulli.cdf(np.nan, 0.5))) + assert_(np.isnan(stats.bernoulli.logsf(np.nan, 0.5))) + assert_(np.isnan(stats.bernoulli.sf(np.nan, 0.5))) + assert_(np.isnan(stats.bernoulli.pmf(np.nan, 0.5))) + assert_(np.isnan(stats.bernoulli.logpmf(np.nan, 0.5))) + assert_(np.isnan(stats.bernoulli.ppf(np.nan, 0.5))) + assert_(np.isnan(stats.bernoulli.isf(np.nan, 0.5))) + + +def test_frozen_fit_ticket_1536(): + np.random.seed(5678) + true = np.array([0.25, 0., 0.5]) + x = stats.lognorm.rvs(true[0], true[1], true[2], size=100) + + olderr = np.seterr(divide='ignore') + try: + params = np.array(stats.lognorm.fit(x, floc=0.)) + finally: + np.seterr(**olderr) + + assert_almost_equal(params, true, decimal=2) + + params = np.array(stats.lognorm.fit(x, fscale=0.5, loc=0)) + assert_almost_equal(params, true, decimal=2) + + params = np.array(stats.lognorm.fit(x, f0=0.25, loc=0)) + assert_almost_equal(params, true, decimal=2) + + params = np.array(stats.lognorm.fit(x, f0=0.25, floc=0)) + assert_almost_equal(params, true, decimal=2) + + np.random.seed(5678) + loc = 1 + floc = 0.9 + x = stats.norm.rvs(loc, 2., size=100) + params = np.array(stats.norm.fit(x, floc=floc)) + expected = np.array([floc, np.sqrt(((x-floc)**2).mean())]) + assert_almost_equal(params, expected, decimal=4) + + +def test_regression_ticket_1530(): + # Check the starting value works for Cauchy distribution fit. + np.random.seed(654321) + rvs = stats.cauchy.rvs(size=100) + params = stats.cauchy.fit(rvs) + expected = (0.045, 1.142) + assert_almost_equal(params, expected, decimal=1) + + +def test_gh_pr_4806(): + # Check starting values for Cauchy distribution fit. + np.random.seed(1234) + x = np.random.randn(42) + for offset in 10000.0, 1222333444.0: + loc, scale = stats.cauchy.fit(x + offset) + assert_allclose(loc, offset, atol=1.0) + assert_allclose(scale, 0.6, atol=1.0) + + +def test_tukeylambda_stats_ticket_1545(): + # Some test for the variance and kurtosis of the Tukey Lambda distr. + # See test_tukeylamdba_stats.py for more tests. + + mv = stats.tukeylambda.stats(0, moments='mvsk') + # Known exact values: + expected = [0, np.pi**2/3, 0, 1.2] + assert_almost_equal(mv, expected, decimal=10) + + mv = stats.tukeylambda.stats(3.13, moments='mvsk') + # 'expected' computed with mpmath. + expected = [0, 0.0269220858861465102, 0, -0.898062386219224104] + assert_almost_equal(mv, expected, decimal=10) + + mv = stats.tukeylambda.stats(0.14, moments='mvsk') + # 'expected' computed with mpmath. + expected = [0, 2.11029702221450250, 0, -0.02708377353223019456] + assert_almost_equal(mv, expected, decimal=10) + + +def test_poisson_logpmf_ticket_1436(): + assert_(np.isfinite(stats.poisson.logpmf(1500, 200))) + + +def test_powerlaw_stats(): + """Test the powerlaw stats function. + + This unit test is also a regression test for ticket 1548. + + The exact values are: + mean: + mu = a / (a + 1) + variance: + sigma**2 = a / ((a + 2) * (a + 1) ** 2) + skewness: + One formula (see https://en.wikipedia.org/wiki/Skewness) is + gamma_1 = (E[X**3] - 3*mu*E[X**2] + 2*mu**3) / sigma**3 + A short calculation shows that E[X**k] is a / (a + k), so gamma_1 + can be implemented as + n = a/(a+3) - 3*(a/(a+1))*a/(a+2) + 2*(a/(a+1))**3 + d = sqrt(a/((a+2)*(a+1)**2)) ** 3 + gamma_1 = n/d + Either by simplifying, or by a direct calculation of mu_3 / sigma**3, + one gets the more concise formula: + gamma_1 = -2.0 * ((a - 1) / (a + 3)) * sqrt((a + 2) / a) + kurtosis: (See https://en.wikipedia.org/wiki/Kurtosis) + The excess kurtosis is + gamma_2 = mu_4 / sigma**4 - 3 + A bit of calculus and algebra (sympy helps) shows that + mu_4 = 3*a*(3*a**2 - a + 2) / ((a+1)**4 * (a+2) * (a+3) * (a+4)) + so + gamma_2 = 3*(3*a**2 - a + 2) * (a+2) / (a*(a+3)*(a+4)) - 3 + which can be rearranged to + gamma_2 = 6 * (a**3 - a**2 - 6*a + 2) / (a*(a+3)*(a+4)) + """ + cases = [(1.0, (0.5, 1./12, 0.0, -1.2)), + (2.0, (2./3, 2./36, -0.56568542494924734, -0.6))] + for a, exact_mvsk in cases: + mvsk = stats.powerlaw.stats(a, moments="mvsk") + assert_array_almost_equal(mvsk, exact_mvsk) + + +def test_powerlaw_edge(): + # Regression test for gh-3986. + p = stats.powerlaw.logpdf(0, 1) + assert_equal(p, 0.0) + + +def test_exponpow_edge(): + # Regression test for gh-3982. + p = stats.exponpow.logpdf(0, 1) + assert_equal(p, 0.0) + + # Check pdf and logpdf at x = 0 for other values of b. + p = stats.exponpow.pdf(0, [0.25, 1.0, 1.5]) + assert_equal(p, [np.inf, 1.0, 0.0]) + p = stats.exponpow.logpdf(0, [0.25, 1.0, 1.5]) + assert_equal(p, [np.inf, 0.0, -np.inf]) + + +def test_gengamma_edge(): + # Regression test for gh-3985. + p = stats.gengamma.pdf(0, 1, 1) + assert_equal(p, 1.0) + + # Regression tests for gh-4724. + p = stats.gengamma._munp(-2, 200, 1.) + assert_almost_equal(p, 1./199/198) + + p = stats.gengamma._munp(-2, 10, 1.) + assert_almost_equal(p, 1./9/8) + + +def test_ksone_fit_freeze(): + # Regression test for ticket #1638. + d = np.array( + [-0.18879233, 0.15734249, 0.18695107, 0.27908787, -0.248649, + -0.2171497, 0.12233512, 0.15126419, 0.03119282, 0.4365294, + 0.08930393, -0.23509903, 0.28231224, -0.09974875, -0.25196048, + 0.11102028, 0.1427649, 0.10176452, 0.18754054, 0.25826724, + 0.05988819, 0.0531668, 0.21906056, 0.32106729, 0.2117662, + 0.10886442, 0.09375789, 0.24583286, -0.22968366, -0.07842391, + -0.31195432, -0.21271196, 0.1114243, -0.13293002, 0.01331725, + -0.04330977, -0.09485776, -0.28434547, 0.22245721, -0.18518199, + -0.10943985, -0.35243174, 0.06897665, -0.03553363, -0.0701746, + -0.06037974, 0.37670779, -0.21684405]) + + try: + olderr = np.seterr(invalid='ignore') + with suppress_warnings() as sup: + sup.filter(IntegrationWarning, + "The maximum number of subdivisions .50. has been " + "achieved.") + sup.filter(RuntimeWarning, + "floating point number truncated to an integer") + stats.ksone.fit(d) + finally: + np.seterr(**olderr) + + +def test_norm_logcdf(): + # Test precision of the logcdf of the normal distribution. + # This precision was enhanced in ticket 1614. + x = -np.asarray(list(range(0, 120, 4))) + # Values from R + expected = [-0.69314718, -10.36010149, -35.01343716, -75.41067300, + -131.69539607, -203.91715537, -292.09872100, -396.25241451, + -516.38564863, -652.50322759, -804.60844201, -972.70364403, + -1156.79057310, -1356.87055173, -1572.94460885, -1805.01356068, + -2053.07806561, -2317.13866238, -2597.19579746, -2893.24984493, + -3205.30112136, -3533.34989701, -3877.39640444, -4237.44084522, + -4613.48339520, -5005.52420869, -5413.56342187, -5837.60115548, + -6277.63751711, -6733.67260303] + + assert_allclose(stats.norm().logcdf(x), expected, atol=1e-8) + + # also test the complex-valued code path + assert_allclose(stats.norm().logcdf(x + 1e-14j).real, expected, atol=1e-8) + + # test the accuracy: d(logcdf)/dx = pdf / cdf \equiv exp(logpdf - logcdf) + deriv = (stats.norm.logcdf(x + 1e-10j)/1e-10).imag + deriv_expected = np.exp(stats.norm.logpdf(x) - stats.norm.logcdf(x)) + assert_allclose(deriv, deriv_expected, atol=1e-10) + + +def test_levy_cdf_ppf(): + # Test levy.cdf, including small arguments. + x = np.array([1000, 1.0, 0.5, 0.1, 0.01, 0.001]) + + # Expected values were calculated separately with mpmath. + # E.g. + # >>> mpmath.mp.dps = 100 + # >>> x = mpmath.mp.mpf('0.01') + # >>> cdf = mpmath.erfc(mpmath.sqrt(1/(2*x))) + expected = np.array([0.9747728793699604, + 0.3173105078629141, + 0.1572992070502851, + 0.0015654022580025495, + 1.523970604832105e-23, + 1.795832784800726e-219]) + + y = stats.levy.cdf(x) + assert_allclose(y, expected, rtol=1e-10) + + # ppf(expected) should get us back to x. + xx = stats.levy.ppf(expected) + assert_allclose(xx, x, rtol=1e-13) + + +def test_hypergeom_interval_1802(): + # these two had endless loops + assert_equal(stats.hypergeom.interval(.95, 187601, 43192, 757), + (152.0, 197.0)) + assert_equal(stats.hypergeom.interval(.945, 187601, 43192, 757), + (152.0, 197.0)) + # this was working also before + assert_equal(stats.hypergeom.interval(.94, 187601, 43192, 757), + (153.0, 196.0)) + + # degenerate case .a == .b + assert_equal(stats.hypergeom.ppf(0.02, 100, 100, 8), 8) + assert_equal(stats.hypergeom.ppf(1, 100, 100, 8), 8) + + +def test_distribution_too_many_args(): + np.random.seed(1234) + + # Check that a TypeError is raised when too many args are given to a method + # Regression test for ticket 1815. + x = np.linspace(0.1, 0.7, num=5) + assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0) + assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, loc=1.0) + assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, 5) + assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0, scale=0.5) + assert_raises(TypeError, stats.gamma.rvs, 2., 3, loc=1.0, scale=0.5) + assert_raises(TypeError, stats.gamma.cdf, x, 2., 3, loc=1.0, scale=0.5) + assert_raises(TypeError, stats.gamma.ppf, x, 2., 3, loc=1.0, scale=0.5) + assert_raises(TypeError, stats.gamma.stats, 2., 3, loc=1.0, scale=0.5) + assert_raises(TypeError, stats.gamma.entropy, 2., 3, loc=1.0, scale=0.5) + assert_raises(TypeError, stats.gamma.fit, x, 2., 3, loc=1.0, scale=0.5) + + # These should not give errors + stats.gamma.pdf(x, 2, 3) # loc=3 + stats.gamma.pdf(x, 2, 3, 4) # loc=3, scale=4 + stats.gamma.stats(2., 3) + stats.gamma.stats(2., 3, 4) + stats.gamma.stats(2., 3, 4, 'mv') + stats.gamma.rvs(2., 3, 4, 5) + stats.gamma.fit(stats.gamma.rvs(2., size=7), 2.) + + # Also for a discrete distribution + stats.geom.pmf(x, 2, loc=3) # no error, loc=3 + assert_raises(TypeError, stats.geom.pmf, x, 2, 3, 4) + assert_raises(TypeError, stats.geom.pmf, x, 2, 3, loc=4) + + # And for distributions with 0, 2 and 3 args respectively + assert_raises(TypeError, stats.expon.pdf, x, 3, loc=1.0) + assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, loc=1.0) + assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, 0.1, 0.1) + assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, loc=1.0) + assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, 1.0, scale=0.5) + stats.ncf.pdf(x, 3, 4, 5, 6, 1.0) # 3 args, plus loc/scale + + +def test_ncx2_tails_ticket_955(): + # Trac #955 -- check that the cdf computed by special functions + # matches the integrated pdf + a = stats.ncx2.cdf(np.arange(20, 25, 0.2), 2, 1.07458615e+02) + b = stats.ncx2._cdfvec(np.arange(20, 25, 0.2), 2, 1.07458615e+02) + assert_allclose(a, b, rtol=1e-3, atol=0) + + +def test_ncx2_tails_pdf(): + # ncx2.pdf does not return nans in extreme tails(example from gh-1577) + # NB: this is to check that nan_to_num is not needed in ncx2.pdf + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "divide by zero encountered in log") + assert_equal(stats.ncx2.pdf(1, np.arange(340, 350), 2), 0) + logval = stats.ncx2.logpdf(1, np.arange(340, 350), 2) + + assert_(np.isneginf(logval).all()) + + +def test_foldnorm_zero(): + # Parameter value c=0 was not enabled, see gh-2399. + rv = stats.foldnorm(0, scale=1) + assert_equal(rv.cdf(0), 0) # rv.cdf(0) previously resulted in: nan + + +def test_stats_shapes_argcheck(): + # stats method was failing for vector shapes if some of the values + # were outside of the allowed range, see gh-2678 + mv3 = stats.invgamma.stats([0.0, 0.5, 1.0], 1, 0.5) # 0 is not a legal `a` + mv2 = stats.invgamma.stats([0.5, 1.0], 1, 0.5) + mv2_augmented = tuple(np.r_[np.nan, _] for _ in mv2) + assert_equal(mv2_augmented, mv3) + + # -1 is not a legal shape parameter + mv3 = stats.lognorm.stats([2, 2.4, -1]) + mv2 = stats.lognorm.stats([2, 2.4]) + mv2_augmented = tuple(np.r_[_, np.nan] for _ in mv2) + assert_equal(mv2_augmented, mv3) + + # FIXME: this is only a quick-and-dirty test of a quick-and-dirty bugfix. + # stats method with multiple shape parameters is not properly vectorized + # anyway, so some distributions may or may not fail. + + +# Test subclassing distributions w/ explicit shapes + +class _distr_gen(stats.rv_continuous): + def _pdf(self, x, a): + return 42 + + +class _distr2_gen(stats.rv_continuous): + def _cdf(self, x, a): + return 42 * a + x + + +class _distr3_gen(stats.rv_continuous): + def _pdf(self, x, a, b): + return a + b + + def _cdf(self, x, a): + # Different # of shape params from _pdf, to be able to check that + # inspection catches the inconsistency.""" + return 42 * a + x + + +class _distr6_gen(stats.rv_continuous): + # Two shape parameters (both _pdf and _cdf defined, consistent shapes.) + def _pdf(self, x, a, b): + return a*x + b + + def _cdf(self, x, a, b): + return 42 * a + x + + +class TestSubclassingExplicitShapes(object): + # Construct a distribution w/ explicit shapes parameter and test it. + + def test_correct_shapes(self): + dummy_distr = _distr_gen(name='dummy', shapes='a') + assert_equal(dummy_distr.pdf(1, a=1), 42) + + def test_wrong_shapes_1(self): + dummy_distr = _distr_gen(name='dummy', shapes='A') + assert_raises(TypeError, dummy_distr.pdf, 1, **dict(a=1)) + + def test_wrong_shapes_2(self): + dummy_distr = _distr_gen(name='dummy', shapes='a, b, c') + dct = dict(a=1, b=2, c=3) + assert_raises(TypeError, dummy_distr.pdf, 1, **dct) + + def test_shapes_string(self): + # shapes must be a string + dct = dict(name='dummy', shapes=42) + assert_raises(TypeError, _distr_gen, **dct) + + def test_shapes_identifiers_1(self): + # shapes must be a comma-separated list of valid python identifiers + dct = dict(name='dummy', shapes='(!)') + assert_raises(SyntaxError, _distr_gen, **dct) + + def test_shapes_identifiers_2(self): + dct = dict(name='dummy', shapes='4chan') + assert_raises(SyntaxError, _distr_gen, **dct) + + def test_shapes_identifiers_3(self): + dct = dict(name='dummy', shapes='m(fti)') + assert_raises(SyntaxError, _distr_gen, **dct) + + def test_shapes_identifiers_nodefaults(self): + dct = dict(name='dummy', shapes='a=2') + assert_raises(SyntaxError, _distr_gen, **dct) + + def test_shapes_args(self): + dct = dict(name='dummy', shapes='*args') + assert_raises(SyntaxError, _distr_gen, **dct) + + def test_shapes_kwargs(self): + dct = dict(name='dummy', shapes='**kwargs') + assert_raises(SyntaxError, _distr_gen, **dct) + + def test_shapes_keywords(self): + # python keywords cannot be used for shape parameters + dct = dict(name='dummy', shapes='a, b, c, lambda') + assert_raises(SyntaxError, _distr_gen, **dct) + + def test_shapes_signature(self): + # test explicit shapes which agree w/ the signature of _pdf + class _dist_gen(stats.rv_continuous): + def _pdf(self, x, a): + return stats.norm._pdf(x) * a + + dist = _dist_gen(shapes='a') + assert_equal(dist.pdf(0.5, a=2), stats.norm.pdf(0.5)*2) + + def test_shapes_signature_inconsistent(self): + # test explicit shapes which do not agree w/ the signature of _pdf + class _dist_gen(stats.rv_continuous): + def _pdf(self, x, a): + return stats.norm._pdf(x) * a + + dist = _dist_gen(shapes='a, b') + assert_raises(TypeError, dist.pdf, 0.5, **dict(a=1, b=2)) + + def test_star_args(self): + # test _pdf with only starargs + # NB: **kwargs of pdf will never reach _pdf + class _dist_gen(stats.rv_continuous): + def _pdf(self, x, *args): + extra_kwarg = args[0] + return stats.norm._pdf(x) * extra_kwarg + + dist = _dist_gen(shapes='extra_kwarg') + assert_equal(dist.pdf(0.5, extra_kwarg=33), stats.norm.pdf(0.5)*33) + assert_equal(dist.pdf(0.5, 33), stats.norm.pdf(0.5)*33) + assert_raises(TypeError, dist.pdf, 0.5, **dict(xxx=33)) + + def test_star_args_2(self): + # test _pdf with named & starargs + # NB: **kwargs of pdf will never reach _pdf + class _dist_gen(stats.rv_continuous): + def _pdf(self, x, offset, *args): + extra_kwarg = args[0] + return stats.norm._pdf(x) * extra_kwarg + offset + + dist = _dist_gen(shapes='offset, extra_kwarg') + assert_equal(dist.pdf(0.5, offset=111, extra_kwarg=33), + stats.norm.pdf(0.5)*33 + 111) + assert_equal(dist.pdf(0.5, 111, 33), + stats.norm.pdf(0.5)*33 + 111) + + def test_extra_kwarg(self): + # **kwargs to _pdf are ignored. + # this is a limitation of the framework (_pdf(x, *goodargs)) + class _distr_gen(stats.rv_continuous): + def _pdf(self, x, *args, **kwargs): + # _pdf should handle *args, **kwargs itself. Here "handling" + # is ignoring *args and looking for ``extra_kwarg`` and using + # that. + extra_kwarg = kwargs.pop('extra_kwarg', 1) + return stats.norm._pdf(x) * extra_kwarg + + dist = _distr_gen(shapes='extra_kwarg') + assert_equal(dist.pdf(1, extra_kwarg=3), stats.norm.pdf(1)) + + def shapes_empty_string(self): + # shapes='' is equivalent to shapes=None + class _dist_gen(stats.rv_continuous): + def _pdf(self, x): + return stats.norm.pdf(x) + + dist = _dist_gen(shapes='') + assert_equal(dist.pdf(0.5), stats.norm.pdf(0.5)) + + +class TestSubclassingNoShapes(object): + # Construct a distribution w/o explicit shapes parameter and test it. + + def test_only__pdf(self): + dummy_distr = _distr_gen(name='dummy') + assert_equal(dummy_distr.pdf(1, a=1), 42) + + def test_only__cdf(self): + # _pdf is determined from _cdf by taking numerical derivative + dummy_distr = _distr2_gen(name='dummy') + assert_almost_equal(dummy_distr.pdf(1, a=1), 1) + + @pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstring stripped") + def test_signature_inspection(self): + # check that _pdf signature inspection works correctly, and is used in + # the class docstring + dummy_distr = _distr_gen(name='dummy') + assert_equal(dummy_distr.numargs, 1) + assert_equal(dummy_distr.shapes, 'a') + res = re.findall(r'logpdf\(x, a, loc=0, scale=1\)', + dummy_distr.__doc__) + assert_(len(res) == 1) + + @pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstring stripped") + def test_signature_inspection_2args(self): + # same for 2 shape params and both _pdf and _cdf defined + dummy_distr = _distr6_gen(name='dummy') + assert_equal(dummy_distr.numargs, 2) + assert_equal(dummy_distr.shapes, 'a, b') + res = re.findall(r'logpdf\(x, a, b, loc=0, scale=1\)', + dummy_distr.__doc__) + assert_(len(res) == 1) + + def test_signature_inspection_2args_incorrect_shapes(self): + # both _pdf and _cdf defined, but shapes are inconsistent: raises + assert_raises(TypeError, _distr3_gen, name='dummy') + + def test_defaults_raise(self): + # default arguments should raise + class _dist_gen(stats.rv_continuous): + def _pdf(self, x, a=42): + return 42 + assert_raises(TypeError, _dist_gen, **dict(name='dummy')) + + def test_starargs_raise(self): + # without explicit shapes, *args are not allowed + class _dist_gen(stats.rv_continuous): + def _pdf(self, x, a, *args): + return 42 + assert_raises(TypeError, _dist_gen, **dict(name='dummy')) + + def test_kwargs_raise(self): + # without explicit shapes, **kwargs are not allowed + class _dist_gen(stats.rv_continuous): + def _pdf(self, x, a, **kwargs): + return 42 + assert_raises(TypeError, _dist_gen, **dict(name='dummy')) + + +@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstring stripped") +def test_docstrings(): + badones = [r',\s*,', r'\(\s*,', r'^\s*:'] + for distname in stats.__all__: + dist = getattr(stats, distname) + if isinstance(dist, (stats.rv_discrete, stats.rv_continuous)): + for regex in badones: + assert_(re.search(regex, dist.__doc__) is None) + + +def test_infinite_input(): + assert_almost_equal(stats.skellam.sf(np.inf, 10, 11), 0) + assert_almost_equal(stats.ncx2._cdf(np.inf, 8, 0.1), 1) + + +def test_lomax_accuracy(): + # regression test for gh-4033 + p = stats.lomax.ppf(stats.lomax.cdf(1e-100, 1), 1) + assert_allclose(p, 1e-100) + + +def test_gompertz_accuracy(): + # Regression test for gh-4031 + p = stats.gompertz.ppf(stats.gompertz.cdf(1e-100, 1), 1) + assert_allclose(p, 1e-100) + + +def test_truncexpon_accuracy(): + # regression test for gh-4035 + p = stats.truncexpon.ppf(stats.truncexpon.cdf(1e-100, 1), 1) + assert_allclose(p, 1e-100) + + +def test_rayleigh_accuracy(): + # regression test for gh-4034 + p = stats.rayleigh.isf(stats.rayleigh.sf(9, 1), 1) + assert_almost_equal(p, 9.0, decimal=15) + + +def test_genextreme_give_no_warnings(): + """regression test for gh-6219""" + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + + p = stats.genextreme.cdf(.5, 0) + p = stats.genextreme.pdf(.5, 0) + p = stats.genextreme.ppf(.5, 0) + p = stats.genextreme.logpdf(-np.inf, 0.0) + number_of_warnings_thrown = len(w) + assert_equal(number_of_warnings_thrown, 0) + + +def test_genextreme_entropy(): + # regression test for gh-5181 + euler_gamma = 0.5772156649015329 + + h = stats.genextreme.entropy(-1.0) + assert_allclose(h, 2*euler_gamma + 1, rtol=1e-14) + + h = stats.genextreme.entropy(0) + assert_allclose(h, euler_gamma + 1, rtol=1e-14) + + h = stats.genextreme.entropy(1.0) + assert_equal(h, 1) + + h = stats.genextreme.entropy(-2.0, scale=10) + assert_allclose(h, euler_gamma*3 + np.log(10) + 1, rtol=1e-14) + + h = stats.genextreme.entropy(10) + assert_allclose(h, -9*euler_gamma + 1, rtol=1e-14) + + h = stats.genextreme.entropy(-10) + assert_allclose(h, 11*euler_gamma + 1, rtol=1e-14) + + +def test_genextreme_sf_isf(): + # Expected values were computed using mpmath: + # + # import mpmath + # + # def mp_genextreme_sf(x, xi, mu=0, sigma=1): + # # Formula from wikipedia, which has a sign convention for xi that + # # is the opposite of scipy's shape parameter. + # if xi != 0: + # t = mpmath.power(1 + ((x - mu)/sigma)*xi, -1/xi) + # else: + # t = mpmath.exp(-(x - mu)/sigma) + # return 1 - mpmath.exp(-t) + # + # >>> mpmath.mp.dps = 1000 + # >>> s = mp_genextreme_sf(mpmath.mp.mpf("1e8"), mpmath.mp.mpf("0.125")) + # >>> float(s) + # 1.6777205262585625e-57 + # >>> s = mp_genextreme_sf(mpmath.mp.mpf("7.98"), mpmath.mp.mpf("-0.125")) + # >>> float(s) + # 1.52587890625e-21 + # >>> s = mp_genextreme_sf(mpmath.mp.mpf("7.98"), mpmath.mp.mpf("0")) + # >>> float(s) + # 0.00034218086528426593 + + x = 1e8 + s = stats.genextreme.sf(x, -0.125) + assert_allclose(s, 1.6777205262585625e-57) + x2 = stats.genextreme.isf(s, -0.125) + assert_allclose(x2, x) + + x = 7.98 + s = stats.genextreme.sf(x, 0.125) + assert_allclose(s, 1.52587890625e-21) + x2 = stats.genextreme.isf(s, 0.125) + assert_allclose(x2, x) + + x = 7.98 + s = stats.genextreme.sf(x, 0) + assert_allclose(s, 0.00034218086528426593) + x2 = stats.genextreme.isf(s, 0) + assert_allclose(x2, x) + + +def test_burr12_ppf_small_arg(): + prob = 1e-16 + quantile = stats.burr12.ppf(prob, 2, 3) + # The expected quantile was computed using mpmath: + # >>> import mpmath + # >>> mpmath.mp.dps = 100 + # >>> prob = mpmath.mpf('1e-16') + # >>> c = mpmath.mpf(2) + # >>> d = mpmath.mpf(3) + # >>> float(((1-prob)**(-1/d) - 1)**(1/c)) + # 5.7735026918962575e-09 + assert_allclose(quantile, 5.7735026918962575e-09) + + +def test_crystalball_function(): + """ + All values are calculated using the independent implementation of the + ROOT framework (see https://root.cern.ch/). + Corresponding ROOT code is given in the comments. + """ + X = np.linspace(-5.0, 5.0, 21)[:-1] + + # for(float x = -5.0; x < 5.0; x+=0.5) + # std::cout << ROOT::Math::crystalball_pdf(x, 1.0, 2.0, 1.0) << ", "; + calculated = stats.crystalball.pdf(X, beta=1.0, m=2.0) + expected = np.array([0.0202867, 0.0241428, 0.0292128, 0.0360652, 0.045645, + 0.059618, 0.0811467, 0.116851, 0.18258, 0.265652, + 0.301023, 0.265652, 0.18258, 0.097728, 0.0407391, + 0.013226, 0.00334407, 0.000658486, 0.000100982, + 1.20606e-05]) + assert_allclose(expected, calculated, rtol=0.001) + + # for(float x = -5.0; x < 5.0; x+=0.5) + # std::cout << ROOT::Math::crystalball_pdf(x, 2.0, 3.0, 1.0) << ", "; + calculated = stats.crystalball.pdf(X, beta=2.0, m=3.0) + expected = np.array([0.0019648, 0.00279754, 0.00417592, 0.00663121, + 0.0114587, 0.0223803, 0.0530497, 0.12726, 0.237752, + 0.345928, 0.391987, 0.345928, 0.237752, 0.12726, + 0.0530497, 0.0172227, 0.00435458, 0.000857469, + 0.000131497, 1.57051e-05]) + assert_allclose(expected, calculated, rtol=0.001) + + # for(float x = -5.0; x < 5.0; x+=0.5) { + # std::cout << ROOT::Math::crystalball_pdf(x, 2.0, 3.0, 2.0, 0.5); + # std::cout << ", "; + # } + calculated = stats.crystalball.pdf(X, beta=2.0, m=3.0, loc=0.5, scale=2.0) + expected = np.array([0.00785921, 0.0111902, 0.0167037, 0.0265249, + 0.0423866, 0.0636298, 0.0897324, 0.118876, 0.147944, + 0.172964, 0.189964, 0.195994, 0.189964, 0.172964, + 0.147944, 0.118876, 0.0897324, 0.0636298, 0.0423866, + 0.0265249]) + assert_allclose(expected, calculated, rtol=0.001) + + # for(float x = -5.0; x < 5.0; x+=0.5) + # std::cout << ROOT::Math::crystalball_cdf(x, 1.0, 2.0, 1.0) << ", "; + calculated = stats.crystalball.cdf(X, beta=1.0, m=2.0) + expected = np.array([0.12172, 0.132785, 0.146064, 0.162293, 0.18258, + 0.208663, 0.24344, 0.292128, 0.36516, 0.478254, + 0.622723, 0.767192, 0.880286, 0.94959, 0.982834, + 0.995314, 0.998981, 0.999824, 0.999976, 0.999997]) + assert_allclose(expected, calculated, rtol=0.001) + + # for(float x = -5.0; x < 5.0; x+=0.5) + # std::cout << ROOT::Math::crystalball_cdf(x, 2.0, 3.0, 1.0) << ", "; + calculated = stats.crystalball.cdf(X, beta=2.0, m=3.0) + expected = np.array([0.00442081, 0.00559509, 0.00730787, 0.00994682, + 0.0143234, 0.0223803, 0.0397873, 0.0830763, 0.173323, + 0.320592, 0.508717, 0.696841, 0.844111, 0.934357, + 0.977646, 0.993899, 0.998674, 0.999771, 0.999969, + 0.999997]) + assert_allclose(expected, calculated, rtol=0.001) + + # for(float x = -5.0; x < 5.0; x+=0.5) { + # std::cout << ROOT::Math::crystalball_cdf(x, 2.0, 3.0, 2.0, 0.5); + # std::cout << ", "; + # } + calculated = stats.crystalball.cdf(X, beta=2.0, m=3.0, loc=0.5, scale=2.0) + expected = np.array([0.0176832, 0.0223803, 0.0292315, 0.0397873, 0.0567945, + 0.0830763, 0.121242, 0.173323, 0.24011, 0.320592, + 0.411731, 0.508717, 0.605702, 0.696841, 0.777324, + 0.844111, 0.896192, 0.934357, 0.960639, 0.977646]) + assert_allclose(expected, calculated, rtol=0.001) + + +def test_crystalball_function_moments(): + """ + All values are calculated using the pdf formula and the integrate function + of Mathematica + """ + # The Last two (alpha, n) pairs test the special case n == alpha**2 + beta = np.array([2.0, 1.0, 3.0, 2.0, 3.0]) + m = np.array([3.0, 3.0, 2.0, 4.0, 9.0]) + + # The distribution should be correctly normalised + expected_0th_moment = np.array([1.0, 1.0, 1.0, 1.0, 1.0]) + calculated_0th_moment = stats.crystalball._munp(0, beta, m) + assert_allclose(expected_0th_moment, calculated_0th_moment, rtol=0.001) + + # calculated using wolframalpha.com + # e.g. for beta = 2 and m = 3 we calculate the norm like this: + # integrate exp(-x^2/2) from -2 to infinity + + # integrate (3/2)^3*exp(-2^2/2)*(3/2-2-x)^(-3) from -infinity to -2 + norm = np.array([2.5511, 3.01873, 2.51065, 2.53983, 2.507410455]) + + a = np.array([-0.21992, -3.03265, np.inf, -0.135335, -0.003174]) + expected_1th_moment = a / norm + calculated_1th_moment = stats.crystalball._munp(1, beta, m) + assert_allclose(expected_1th_moment, calculated_1th_moment, rtol=0.001) + + a = np.array([np.inf, np.inf, np.inf, 3.2616, 2.519908]) + expected_2th_moment = a / norm + calculated_2th_moment = stats.crystalball._munp(2, beta, m) + assert_allclose(expected_2th_moment, calculated_2th_moment, rtol=0.001) + + a = np.array([np.inf, np.inf, np.inf, np.inf, -0.0577668]) + expected_3th_moment = a / norm + calculated_3th_moment = stats.crystalball._munp(3, beta, m) + assert_allclose(expected_3th_moment, calculated_3th_moment, rtol=0.001) + + a = np.array([np.inf, np.inf, np.inf, np.inf, 7.78468]) + expected_4th_moment = a / norm + calculated_4th_moment = stats.crystalball._munp(4, beta, m) + assert_allclose(expected_4th_moment, calculated_4th_moment, rtol=0.001) + + a = np.array([np.inf, np.inf, np.inf, np.inf, -1.31086]) + expected_5th_moment = a / norm + calculated_5th_moment = stats.crystalball._munp(5, beta, m) + assert_allclose(expected_5th_moment, calculated_5th_moment, rtol=0.001) + + +def test_argus_function(): + # There is no usable reference implementation. + # (RootFit implementation returns unreasonable results which are not + # normalized correctly.) + # Instead we do some tests if the distribution behaves as expected for + # different shapes and scales. + for i in range(1, 10): + for j in range(1, 10): + assert_equal(stats.argus.pdf(i + 0.001, chi=j, scale=i), 0.0) + assert_(stats.argus.pdf(i - 0.001, chi=j, scale=i) > 0.0) + assert_equal(stats.argus.pdf(-0.001, chi=j, scale=i), 0.0) + assert_(stats.argus.pdf(+0.001, chi=j, scale=i) > 0.0) + + for i in range(1, 10): + assert_equal(stats.argus.cdf(1.0, chi=i), 1.0) + assert_equal(stats.argus.cdf(1.0, chi=i), + 1.0 - stats.argus.sf(1.0, chi=i)) + + +class TestHistogram(object): + def setup_method(self): + np.random.seed(1234) + + # We have 8 bins + # [1,2), [2,3), [3,4), [4,5), [5,6), [6,7), [7,8), [8,9) + # But actually np.histogram will put the last 9 also in the [8,9) bin! + # Therefore there is a slight difference below for the last bin, from + # what you might have expected. + histogram = np.histogram([1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5, + 6, 6, 6, 6, 7, 7, 7, 8, 8, 9], bins=8) + self.template = stats.rv_histogram(histogram) + + data = stats.norm.rvs(loc=1.0, scale=2.5, size=10000, random_state=123) + norm_histogram = np.histogram(data, bins=50) + self.norm_template = stats.rv_histogram(norm_histogram) + + def test_pdf(self): + values = np.array([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, + 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5]) + pdf_values = np.asarray([0.0/25.0, 0.0/25.0, 1.0/25.0, 1.0/25.0, + 2.0/25.0, 2.0/25.0, 3.0/25.0, 3.0/25.0, + 4.0/25.0, 4.0/25.0, 5.0/25.0, 5.0/25.0, + 4.0/25.0, 4.0/25.0, 3.0/25.0, 3.0/25.0, + 3.0/25.0, 3.0/25.0, 0.0/25.0, 0.0/25.0]) + assert_allclose(self.template.pdf(values), pdf_values) + + # Test explicitly the corner cases: + # As stated above the pdf in the bin [8,9) is greater than + # one would naively expect because np.histogram putted the 9 + # into the [8,9) bin. + assert_almost_equal(self.template.pdf(8.0), 3.0/25.0) + assert_almost_equal(self.template.pdf(8.5), 3.0/25.0) + # 9 is outside our defined bins [8,9) hence the pdf is already 0 + # for a continuous distribution this is fine, because a single value + # does not have a finite probability! + assert_almost_equal(self.template.pdf(9.0), 0.0/25.0) + assert_almost_equal(self.template.pdf(10.0), 0.0/25.0) + + x = np.linspace(-2, 2, 10) + assert_allclose(self.norm_template.pdf(x), + stats.norm.pdf(x, loc=1.0, scale=2.5), rtol=0.1) + + def test_cdf_ppf(self): + values = np.array([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, + 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5]) + cdf_values = np.asarray([0.0/25.0, 0.0/25.0, 0.0/25.0, 0.5/25.0, + 1.0/25.0, 2.0/25.0, 3.0/25.0, 4.5/25.0, + 6.0/25.0, 8.0/25.0, 10.0/25.0, 12.5/25.0, + 15.0/25.0, 17.0/25.0, 19.0/25.0, 20.5/25.0, + 22.0/25.0, 23.5/25.0, 25.0/25.0, 25.0/25.0]) + assert_allclose(self.template.cdf(values), cdf_values) + # First three and last two values in cdf_value are not unique + assert_allclose(self.template.ppf(cdf_values[2:-1]), values[2:-1]) + + # Test of cdf and ppf are inverse functions + x = np.linspace(1.0, 9.0, 100) + assert_allclose(self.template.ppf(self.template.cdf(x)), x) + x = np.linspace(0.0, 1.0, 100) + assert_allclose(self.template.cdf(self.template.ppf(x)), x) + + x = np.linspace(-2, 2, 10) + assert_allclose(self.norm_template.cdf(x), + stats.norm.cdf(x, loc=1.0, scale=2.5), rtol=0.1) + + def test_rvs(self): + N = 10000 + sample = self.template.rvs(size=N, random_state=123) + assert_equal(np.sum(sample < 1.0), 0.0) + assert_allclose(np.sum(sample <= 2.0), 1.0/25.0 * N, rtol=0.2) + assert_allclose(np.sum(sample <= 2.5), 2.0/25.0 * N, rtol=0.2) + assert_allclose(np.sum(sample <= 3.0), 3.0/25.0 * N, rtol=0.1) + assert_allclose(np.sum(sample <= 3.5), 4.5/25.0 * N, rtol=0.1) + assert_allclose(np.sum(sample <= 4.0), 6.0/25.0 * N, rtol=0.1) + assert_allclose(np.sum(sample <= 4.5), 8.0/25.0 * N, rtol=0.1) + assert_allclose(np.sum(sample <= 5.0), 10.0/25.0 * N, rtol=0.05) + assert_allclose(np.sum(sample <= 5.5), 12.5/25.0 * N, rtol=0.05) + assert_allclose(np.sum(sample <= 6.0), 15.0/25.0 * N, rtol=0.05) + assert_allclose(np.sum(sample <= 6.5), 17.0/25.0 * N, rtol=0.05) + assert_allclose(np.sum(sample <= 7.0), 19.0/25.0 * N, rtol=0.05) + assert_allclose(np.sum(sample <= 7.5), 20.5/25.0 * N, rtol=0.05) + assert_allclose(np.sum(sample <= 8.0), 22.0/25.0 * N, rtol=0.05) + assert_allclose(np.sum(sample <= 8.5), 23.5/25.0 * N, rtol=0.05) + assert_allclose(np.sum(sample <= 9.0), 25.0/25.0 * N, rtol=0.05) + assert_allclose(np.sum(sample <= 9.0), 25.0/25.0 * N, rtol=0.05) + assert_equal(np.sum(sample > 9.0), 0.0) + + def test_munp(self): + for n in range(4): + assert_allclose(self.norm_template._munp(n), + stats.norm._munp(n, 1.0, 2.5), rtol=0.05) + + def test_entropy(self): + assert_allclose(self.norm_template.entropy(), + stats.norm.entropy(loc=1.0, scale=2.5), rtol=0.05) diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_distributions.pyc b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_distributions.pyc new file mode 100644 index 0000000..3edbec6 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_distributions.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_fit.py b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_fit.py new file mode 100644 index 0000000..c7ea486 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_fit.py @@ -0,0 +1,122 @@ +from __future__ import division, print_function, absolute_import + +import os + +import numpy as np +from numpy.testing import assert_allclose +from scipy._lib._numpy_compat import suppress_warnings +import pytest +from scipy import stats + +from .test_continuous_basic import distcont + +# this is not a proper statistical test for convergence, but only +# verifies that the estimate and true values don't differ by too much + +fit_sizes = [1000, 5000] # sample sizes to try + +thresh_percent = 0.25 # percent of true parameters for fail cut-off +thresh_min = 0.75 # minimum difference estimate - true to fail test + +failing_fits = [ + 'burr', + 'chi2', + 'gausshyper', + 'genexpon', + 'gengamma', + 'kappa4', + 'ksone', + 'mielke', + 'ncf', + 'ncx2', + 'pearson3', + 'powerlognorm', + 'truncexpon', + 'tukeylambda', + 'vonmises', + 'wrapcauchy', + 'levy_stable', + 'trapz' +] + +# Don't run the fit test on these: +skip_fit = [ + 'erlang', # Subclass of gamma, generates a warning. +] + + +def cases_test_cont_fit(): + # this tests the closeness of the estimated parameters to the true + # parameters with fit method of continuous distributions + # Note: is slow, some distributions don't converge with sample size <= 10000 + for distname, arg in distcont: + if distname not in skip_fit: + yield distname, arg + + +@pytest.mark.slow +@pytest.mark.parametrize('distname,arg', cases_test_cont_fit()) +def test_cont_fit(distname, arg): + if distname in failing_fits: + # Skip failing fits unless overridden + try: + xfail = not int(os.environ['SCIPY_XFAIL']) + except Exception: + xfail = True + if xfail: + msg = "Fitting %s doesn't work reliably yet" % distname + msg += " [Set environment variable SCIPY_XFAIL=1 to run this test nevertheless.]" + pytest.xfail(msg) + + distfn = getattr(stats, distname) + + truearg = np.hstack([arg, [0.0, 1.0]]) + diffthreshold = np.max(np.vstack([truearg*thresh_percent, + np.ones(distfn.numargs+2)*thresh_min]), + 0) + + for fit_size in fit_sizes: + # Note that if a fit succeeds, the other fit_sizes are skipped + np.random.seed(1234) + + with np.errstate(all='ignore'), suppress_warnings() as sup: + sup.filter(category=DeprecationWarning, message=".*frechet_") + rvs = distfn.rvs(size=fit_size, *arg) + est = distfn.fit(rvs) # start with default values + + diff = est - truearg + + # threshold for location + diffthreshold[-2] = np.max([np.abs(rvs.mean())*thresh_percent,thresh_min]) + + if np.any(np.isnan(est)): + raise AssertionError('nan returned in fit') + else: + if np.all(np.abs(diff) <= diffthreshold): + break + else: + txt = 'parameter: %s\n' % str(truearg) + txt += 'estimated: %s\n' % str(est) + txt += 'diff : %s\n' % str(diff) + raise AssertionError('fit not very good in %s\n' % distfn.name + txt) + + +def _check_loc_scale_mle_fit(name, data, desired, atol=None): + d = getattr(stats, name) + actual = d.fit(data)[-2:] + assert_allclose(actual, desired, atol=atol, + err_msg='poor mle fit of (loc, scale) in %s' % name) + + +def test_non_default_loc_scale_mle_fit(): + data = np.array([1.01, 1.78, 1.78, 1.78, 1.88, 1.88, 1.88, 2.00]) + _check_loc_scale_mle_fit('uniform', data, [1.01, 0.99], 1e-3) + _check_loc_scale_mle_fit('expon', data, [1.01, 0.73875], 1e-3) + + +def test_expon_fit(): + """gh-6167""" + data = [0, 0, 0, 0, 2, 2, 2, 2] + phat = stats.expon.fit(data, floc=0) + assert_allclose(phat, [0, 1.0], atol=1e-3) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_fit.pyc b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_fit.pyc new file mode 100644 index 0000000..5079971 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_fit.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_kdeoth.py b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_kdeoth.py new file mode 100644 index 0000000..e69510a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_kdeoth.py @@ -0,0 +1,393 @@ +from __future__ import division, print_function, absolute_import + +from scipy import stats +import numpy as np +from numpy.testing import (assert_almost_equal, assert_, + assert_array_almost_equal, assert_array_almost_equal_nulp, assert_allclose) +import pytest +from pytest import raises as assert_raises + + +def test_kde_1d(): + #some basic tests comparing to normal distribution + np.random.seed(8765678) + n_basesample = 500 + xn = np.random.randn(n_basesample) + xnmean = xn.mean() + xnstd = xn.std(ddof=1) + + # get kde for original sample + gkde = stats.gaussian_kde(xn) + + # evaluate the density function for the kde for some points + xs = np.linspace(-7,7,501) + kdepdf = gkde.evaluate(xs) + normpdf = stats.norm.pdf(xs, loc=xnmean, scale=xnstd) + intervall = xs[1] - xs[0] + + assert_(np.sum((kdepdf - normpdf)**2)*intervall < 0.01) + prob1 = gkde.integrate_box_1d(xnmean, np.inf) + prob2 = gkde.integrate_box_1d(-np.inf, xnmean) + assert_almost_equal(prob1, 0.5, decimal=1) + assert_almost_equal(prob2, 0.5, decimal=1) + assert_almost_equal(gkde.integrate_box(xnmean, np.inf), prob1, decimal=13) + assert_almost_equal(gkde.integrate_box(-np.inf, xnmean), prob2, decimal=13) + + assert_almost_equal(gkde.integrate_kde(gkde), + (kdepdf**2).sum()*intervall, decimal=2) + assert_almost_equal(gkde.integrate_gaussian(xnmean, xnstd**2), + (kdepdf*normpdf).sum()*intervall, decimal=2) + + +def test_kde_1d_weighted(): + #some basic tests comparing to normal distribution + np.random.seed(8765678) + n_basesample = 500 + xn = np.random.randn(n_basesample) + wn = np.random.rand(n_basesample) + xnmean = np.average(xn, weights=wn) + xnstd = np.sqrt(np.average((xn-xnmean)**2, weights=wn)) + + # get kde for original sample + gkde = stats.gaussian_kde(xn, weights=wn) + + # evaluate the density function for the kde for some points + xs = np.linspace(-7,7,501) + kdepdf = gkde.evaluate(xs) + normpdf = stats.norm.pdf(xs, loc=xnmean, scale=xnstd) + intervall = xs[1] - xs[0] + + assert_(np.sum((kdepdf - normpdf)**2)*intervall < 0.01) + prob1 = gkde.integrate_box_1d(xnmean, np.inf) + prob2 = gkde.integrate_box_1d(-np.inf, xnmean) + assert_almost_equal(prob1, 0.5, decimal=1) + assert_almost_equal(prob2, 0.5, decimal=1) + assert_almost_equal(gkde.integrate_box(xnmean, np.inf), prob1, decimal=13) + assert_almost_equal(gkde.integrate_box(-np.inf, xnmean), prob2, decimal=13) + + assert_almost_equal(gkde.integrate_kde(gkde), + (kdepdf**2).sum()*intervall, decimal=2) + assert_almost_equal(gkde.integrate_gaussian(xnmean, xnstd**2), + (kdepdf*normpdf).sum()*intervall, decimal=2) + + +@pytest.mark.slow +def test_kde_2d(): + #some basic tests comparing to normal distribution + np.random.seed(8765678) + n_basesample = 500 + + mean = np.array([1.0, 3.0]) + covariance = np.array([[1.0, 2.0], [2.0, 6.0]]) + + # Need transpose (shape (2, 500)) for kde + xn = np.random.multivariate_normal(mean, covariance, size=n_basesample).T + + # get kde for original sample + gkde = stats.gaussian_kde(xn) + + # evaluate the density function for the kde for some points + x, y = np.mgrid[-7:7:500j, -7:7:500j] + grid_coords = np.vstack([x.ravel(), y.ravel()]) + kdepdf = gkde.evaluate(grid_coords) + kdepdf = kdepdf.reshape(500, 500) + + normpdf = stats.multivariate_normal.pdf(np.dstack([x, y]), mean=mean, cov=covariance) + intervall = y.ravel()[1] - y.ravel()[0] + + assert_(np.sum((kdepdf - normpdf)**2) * (intervall**2) < 0.01) + + small = -1e100 + large = 1e100 + prob1 = gkde.integrate_box([small, mean[1]], [large, large]) + prob2 = gkde.integrate_box([small, small], [large, mean[1]]) + + assert_almost_equal(prob1, 0.5, decimal=1) + assert_almost_equal(prob2, 0.5, decimal=1) + assert_almost_equal(gkde.integrate_kde(gkde), + (kdepdf**2).sum()*(intervall**2), decimal=2) + assert_almost_equal(gkde.integrate_gaussian(mean, covariance), + (kdepdf*normpdf).sum()*(intervall**2), decimal=2) + + +@pytest.mark.slow +def test_kde_2d_weighted(): + #some basic tests comparing to normal distribution + np.random.seed(8765678) + n_basesample = 500 + + mean = np.array([1.0, 3.0]) + covariance = np.array([[1.0, 2.0], [2.0, 6.0]]) + + # Need transpose (shape (2, 500)) for kde + xn = np.random.multivariate_normal(mean, covariance, size=n_basesample).T + wn = np.random.rand(n_basesample) + + # get kde for original sample + gkde = stats.gaussian_kde(xn, weights=wn) + + # evaluate the density function for the kde for some points + x, y = np.mgrid[-7:7:500j, -7:7:500j] + grid_coords = np.vstack([x.ravel(), y.ravel()]) + kdepdf = gkde.evaluate(grid_coords) + kdepdf = kdepdf.reshape(500, 500) + + normpdf = stats.multivariate_normal.pdf(np.dstack([x, y]), mean=mean, cov=covariance) + intervall = y.ravel()[1] - y.ravel()[0] + + assert_(np.sum((kdepdf - normpdf)**2) * (intervall**2) < 0.01) + + small = -1e100 + large = 1e100 + prob1 = gkde.integrate_box([small, mean[1]], [large, large]) + prob2 = gkde.integrate_box([small, small], [large, mean[1]]) + + assert_almost_equal(prob1, 0.5, decimal=1) + assert_almost_equal(prob2, 0.5, decimal=1) + assert_almost_equal(gkde.integrate_kde(gkde), + (kdepdf**2).sum()*(intervall**2), decimal=2) + assert_almost_equal(gkde.integrate_gaussian(mean, covariance), + (kdepdf*normpdf).sum()*(intervall**2), decimal=2) + + +def test_kde_bandwidth_method(): + def scotts_factor(kde_obj): + """Same as default, just check that it works.""" + return np.power(kde_obj.n, -1./(kde_obj.d+4)) + + np.random.seed(8765678) + n_basesample = 50 + xn = np.random.randn(n_basesample) + + # Default + gkde = stats.gaussian_kde(xn) + # Supply a callable + gkde2 = stats.gaussian_kde(xn, bw_method=scotts_factor) + # Supply a scalar + gkde3 = stats.gaussian_kde(xn, bw_method=gkde.factor) + + xs = np.linspace(-7,7,51) + kdepdf = gkde.evaluate(xs) + kdepdf2 = gkde2.evaluate(xs) + assert_almost_equal(kdepdf, kdepdf2) + kdepdf3 = gkde3.evaluate(xs) + assert_almost_equal(kdepdf, kdepdf3) + + assert_raises(ValueError, stats.gaussian_kde, xn, bw_method='wrongstring') + + +def test_kde_bandwidth_method_weighted(): + def scotts_factor(kde_obj): + """Same as default, just check that it works.""" + return np.power(kde_obj.neff, -1./(kde_obj.d+4)) + + np.random.seed(8765678) + n_basesample = 50 + xn = np.random.randn(n_basesample) + + # Default + gkde = stats.gaussian_kde(xn) + # Supply a callable + gkde2 = stats.gaussian_kde(xn, bw_method=scotts_factor) + # Supply a scalar + gkde3 = stats.gaussian_kde(xn, bw_method=gkde.factor) + + xs = np.linspace(-7,7,51) + kdepdf = gkde.evaluate(xs) + kdepdf2 = gkde2.evaluate(xs) + assert_almost_equal(kdepdf, kdepdf2) + kdepdf3 = gkde3.evaluate(xs) + assert_almost_equal(kdepdf, kdepdf3) + + assert_raises(ValueError, stats.gaussian_kde, xn, bw_method='wrongstring') + + +# Subclasses that should stay working (extracted from various sources). +# Unfortunately the earlier design of gaussian_kde made it necessary for users +# to create these kinds of subclasses, or call _compute_covariance() directly. + +class _kde_subclass1(stats.gaussian_kde): + def __init__(self, dataset): + self.dataset = np.atleast_2d(dataset) + self.d, self.n = self.dataset.shape + self.covariance_factor = self.scotts_factor + self._compute_covariance() + + +class _kde_subclass2(stats.gaussian_kde): + def __init__(self, dataset): + self.covariance_factor = self.scotts_factor + super(_kde_subclass2, self).__init__(dataset) + + +class _kde_subclass3(stats.gaussian_kde): + def __init__(self, dataset, covariance): + self.covariance = covariance + stats.gaussian_kde.__init__(self, dataset) + + def _compute_covariance(self): + self.inv_cov = np.linalg.inv(self.covariance) + self._norm_factor = np.sqrt(np.linalg.det(2*np.pi * self.covariance)) \ + * self.n + + +class _kde_subclass4(stats.gaussian_kde): + def covariance_factor(self): + return 0.5 * self.silverman_factor() + + +def test_gaussian_kde_subclassing(): + x1 = np.array([-7, -5, 1, 4, 5], dtype=float) + xs = np.linspace(-10, 10, num=50) + + # gaussian_kde itself + kde = stats.gaussian_kde(x1) + ys = kde(xs) + + # subclass 1 + kde1 = _kde_subclass1(x1) + y1 = kde1(xs) + assert_array_almost_equal_nulp(ys, y1, nulp=10) + + # subclass 2 + kde2 = _kde_subclass2(x1) + y2 = kde2(xs) + assert_array_almost_equal_nulp(ys, y2, nulp=10) + + # subclass 3 + kde3 = _kde_subclass3(x1, kde.covariance) + y3 = kde3(xs) + assert_array_almost_equal_nulp(ys, y3, nulp=10) + + # subclass 4 + kde4 = _kde_subclass4(x1) + y4 = kde4(x1) + y_expected = [0.06292987, 0.06346938, 0.05860291, 0.08657652, 0.07904017] + + assert_array_almost_equal(y_expected, y4, decimal=6) + + # Not a subclass, but check for use of _compute_covariance() + kde5 = kde + kde5.covariance_factor = lambda: kde.factor + kde5._compute_covariance() + y5 = kde5(xs) + assert_array_almost_equal_nulp(ys, y5, nulp=10) + + +def test_gaussian_kde_covariance_caching(): + x1 = np.array([-7, -5, 1, 4, 5], dtype=float) + xs = np.linspace(-10, 10, num=5) + # These expected values are from scipy 0.10, before some changes to + # gaussian_kde. They were not compared with any external reference. + y_expected = [0.02463386, 0.04689208, 0.05395444, 0.05337754, 0.01664475] + + # Set the bandwidth, then reset it to the default. + kde = stats.gaussian_kde(x1) + kde.set_bandwidth(bw_method=0.5) + kde.set_bandwidth(bw_method='scott') + y2 = kde(xs) + + assert_array_almost_equal(y_expected, y2, decimal=7) + + +def test_gaussian_kde_monkeypatch(): + """Ugly, but people may rely on this. See scipy pull request 123, + specifically the linked ML thread "Width of the Gaussian in stats.kde". + If it is necessary to break this later on, that is to be discussed on ML. + """ + x1 = np.array([-7, -5, 1, 4, 5], dtype=float) + xs = np.linspace(-10, 10, num=50) + + # The old monkeypatched version to get at Silverman's Rule. + kde = stats.gaussian_kde(x1) + kde.covariance_factor = kde.silverman_factor + kde._compute_covariance() + y1 = kde(xs) + + # The new saner version. + kde2 = stats.gaussian_kde(x1, bw_method='silverman') + y2 = kde2(xs) + + assert_array_almost_equal_nulp(y1, y2, nulp=10) + + +def test_kde_integer_input(): + """Regression test for #1181.""" + x1 = np.arange(5) + kde = stats.gaussian_kde(x1) + y_expected = [0.13480721, 0.18222869, 0.19514935, 0.18222869, 0.13480721] + assert_array_almost_equal(kde(x1), y_expected, decimal=6) + + +def test_pdf_logpdf(): + np.random.seed(1) + n_basesample = 50 + xn = np.random.randn(n_basesample) + + # Default + gkde = stats.gaussian_kde(xn) + + xs = np.linspace(-15, 12, 25) + pdf = gkde.evaluate(xs) + pdf2 = gkde.pdf(xs) + assert_almost_equal(pdf, pdf2, decimal=12) + + logpdf = np.log(pdf) + logpdf2 = gkde.logpdf(xs) + assert_almost_equal(logpdf, logpdf2, decimal=12) + + # There are more points than data + gkde = stats.gaussian_kde(xs) + pdf = np.log(gkde.evaluate(xn)) + pdf2 = gkde.logpdf(xn) + assert_almost_equal(pdf, pdf2, decimal=12) + + +def test_pdf_logpdf_weighted(): + np.random.seed(1) + n_basesample = 50 + xn = np.random.randn(n_basesample) + wn = np.random.rand(n_basesample) + + # Default + gkde = stats.gaussian_kde(xn, weights=wn) + + xs = np.linspace(-15, 12, 25) + pdf = gkde.evaluate(xs) + pdf2 = gkde.pdf(xs) + assert_almost_equal(pdf, pdf2, decimal=12) + + logpdf = np.log(pdf) + logpdf2 = gkde.logpdf(xs) + assert_almost_equal(logpdf, logpdf2, decimal=12) + + # There are more points than data + gkde = stats.gaussian_kde(xs) + pdf = np.log(gkde.evaluate(xn)) + pdf2 = gkde.logpdf(xn) + assert_almost_equal(pdf, pdf2, decimal=12) + + +def test_weights_intact(): + # regression test for gh-9709: weights are not modified + np.random.seed(12345) + vals = np.random.lognormal(size=100) + weights = np.random.choice([1.0, 10.0, 100], size=vals.size) + orig_weights = weights.copy() + + stats.gaussian_kde(np.log10(vals), weights=weights) + assert_allclose(weights, orig_weights, atol=1e-14, rtol=1e-14) + + +def test_weights_integer(): + # integer weights are OK, cf gh-9709 (comment) + np.random.seed(12345) + values = [0.2, 13.5, 21.0, 75.0, 99.0] + weights = [1, 2, 4, 8, 16] # a list of integers + pdf_i = stats.gaussian_kde(values, weights=weights) + pdf_f = stats.gaussian_kde(values, weights=np.float64(weights)) + + xn = [0.3, 11, 88] + assert_allclose(pdf_i.evaluate(xn), + pdf_f.evaluate(xn), atol=1e-14, rtol=1e-14) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_kdeoth.pyc b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_kdeoth.pyc new file mode 100644 index 0000000..89d0181 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_kdeoth.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_morestats.py b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_morestats.py new file mode 100644 index 0000000..4a1e86d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_morestats.py @@ -0,0 +1,1642 @@ +# Author: Travis Oliphant, 2002 +# +# Further enhancements and tests added by numerous SciPy developers. +# +from __future__ import division, print_function, absolute_import + +import warnings + +import numpy as np +from numpy.random import RandomState +from numpy.testing import (assert_array_equal, + assert_almost_equal, assert_array_less, assert_array_almost_equal, + assert_, assert_allclose, assert_equal, assert_warns) +import pytest +from pytest import raises as assert_raises +from scipy._lib._numpy_compat import suppress_warnings + +from scipy import stats +from .common_tests import check_named_results + +# Matplotlib is not a scipy dependency but is optionally used in probplot, so +# check if it's available +try: + import matplotlib.pyplot as plt + have_matplotlib = True +except Exception: + have_matplotlib = False + + +# test data gear.dat from NIST for Levene and Bartlett test +# https://www.itl.nist.gov/div898/handbook/eda/section3/eda3581.htm +g1 = [1.006, 0.996, 0.998, 1.000, 0.992, 0.993, 1.002, 0.999, 0.994, 1.000] +g2 = [0.998, 1.006, 1.000, 1.002, 0.997, 0.998, 0.996, 1.000, 1.006, 0.988] +g3 = [0.991, 0.987, 0.997, 0.999, 0.995, 0.994, 1.000, 0.999, 0.996, 0.996] +g4 = [1.005, 1.002, 0.994, 1.000, 0.995, 0.994, 0.998, 0.996, 1.002, 0.996] +g5 = [0.998, 0.998, 0.982, 0.990, 1.002, 0.984, 0.996, 0.993, 0.980, 0.996] +g6 = [1.009, 1.013, 1.009, 0.997, 0.988, 1.002, 0.995, 0.998, 0.981, 0.996] +g7 = [0.990, 1.004, 0.996, 1.001, 0.998, 1.000, 1.018, 1.010, 0.996, 1.002] +g8 = [0.998, 1.000, 1.006, 1.000, 1.002, 0.996, 0.998, 0.996, 1.002, 1.006] +g9 = [1.002, 0.998, 0.996, 0.995, 0.996, 1.004, 1.004, 0.998, 0.999, 0.991] +g10 = [0.991, 0.995, 0.984, 0.994, 0.997, 0.997, 0.991, 0.998, 1.004, 0.997] + + +class TestBayes_mvs(object): + def test_basic(self): + # Expected values in this test simply taken from the function. For + # some checks regarding correctness of implementation, see review in + # gh-674 + data = [6, 9, 12, 7, 8, 8, 13] + mean, var, std = stats.bayes_mvs(data) + assert_almost_equal(mean.statistic, 9.0) + assert_allclose(mean.minmax, (7.1036502226125329, 10.896349777387467), + rtol=1e-14) + + assert_almost_equal(var.statistic, 10.0) + assert_allclose(var.minmax, (3.1767242068607087, 24.45910381334018), + rtol=1e-09) + + assert_almost_equal(std.statistic, 2.9724954732045084, decimal=14) + assert_allclose(std.minmax, (1.7823367265645145, 4.9456146050146312), + rtol=1e-14) + + def test_empty_input(self): + assert_raises(ValueError, stats.bayes_mvs, []) + + def test_result_attributes(self): + x = np.arange(15) + attributes = ('statistic', 'minmax') + res = stats.bayes_mvs(x) + + for i in res: + check_named_results(i, attributes) + + +class TestMvsdist(object): + def test_basic(self): + data = [6, 9, 12, 7, 8, 8, 13] + mean, var, std = stats.mvsdist(data) + assert_almost_equal(mean.mean(), 9.0) + assert_allclose(mean.interval(0.9), (7.1036502226125329, + 10.896349777387467), rtol=1e-14) + + assert_almost_equal(var.mean(), 10.0) + assert_allclose(var.interval(0.9), (3.1767242068607087, + 24.45910381334018), rtol=1e-09) + + assert_almost_equal(std.mean(), 2.9724954732045084, decimal=14) + assert_allclose(std.interval(0.9), (1.7823367265645145, + 4.9456146050146312), rtol=1e-14) + + def test_empty_input(self): + assert_raises(ValueError, stats.mvsdist, []) + + def test_bad_arg(self): + # Raise ValueError if fewer than two data points are given. + data = [1] + assert_raises(ValueError, stats.mvsdist, data) + + def test_warns(self): + # regression test for gh-5270 + # make sure there are no spurious divide-by-zero warnings + with warnings.catch_warnings(): + warnings.simplefilter('error', RuntimeWarning) + [x.mean() for x in stats.mvsdist([1, 2, 3])] + [x.mean() for x in stats.mvsdist([1, 2, 3, 4, 5])] + + +class TestShapiro(object): + def test_basic(self): + x1 = [0.11, 7.87, 4.61, 10.14, 7.95, 3.14, 0.46, + 4.43, 0.21, 4.75, 0.71, 1.52, 3.24, + 0.93, 0.42, 4.97, 9.53, 4.55, 0.47, 6.66] + w, pw = stats.shapiro(x1) + assert_almost_equal(w, 0.90047299861907959, 6) + assert_almost_equal(pw, 0.042089745402336121, 6) + x2 = [1.36, 1.14, 2.92, 2.55, 1.46, 1.06, 5.27, -1.11, + 3.48, 1.10, 0.88, -0.51, 1.46, 0.52, 6.20, 1.69, + 0.08, 3.67, 2.81, 3.49] + w, pw = stats.shapiro(x2) + assert_almost_equal(w, 0.9590270, 6) + assert_almost_equal(pw, 0.52460, 3) + + # Verified against R + np.random.seed(12345678) + x3 = stats.norm.rvs(loc=5, scale=3, size=100) + w, pw = stats.shapiro(x3) + assert_almost_equal(w, 0.9772805571556091, decimal=6) + assert_almost_equal(pw, 0.08144091814756393, decimal=3) + + # Extracted from original paper + x4 = [0.139, 0.157, 0.175, 0.256, 0.344, 0.413, 0.503, 0.577, 0.614, + 0.655, 0.954, 1.392, 1.557, 1.648, 1.690, 1.994, 2.174, 2.206, + 3.245, 3.510, 3.571, 4.354, 4.980, 6.084, 8.351] + W_expected = 0.83467 + p_expected = 0.000914 + w, pw = stats.shapiro(x4) + assert_almost_equal(w, W_expected, decimal=4) + assert_almost_equal(pw, p_expected, decimal=5) + + def test_2d(self): + x1 = [[0.11, 7.87, 4.61, 10.14, 7.95, 3.14, 0.46, + 4.43, 0.21, 4.75], [0.71, 1.52, 3.24, + 0.93, 0.42, 4.97, 9.53, 4.55, 0.47, 6.66]] + w, pw = stats.shapiro(x1) + assert_almost_equal(w, 0.90047299861907959, 6) + assert_almost_equal(pw, 0.042089745402336121, 6) + x2 = [[1.36, 1.14, 2.92, 2.55, 1.46, 1.06, 5.27, -1.11, + 3.48, 1.10], [0.88, -0.51, 1.46, 0.52, 6.20, 1.69, + 0.08, 3.67, 2.81, 3.49]] + w, pw = stats.shapiro(x2) + assert_almost_equal(w, 0.9590270, 6) + assert_almost_equal(pw, 0.52460, 3) + + def test_empty_input(self): + assert_raises(ValueError, stats.shapiro, []) + assert_raises(ValueError, stats.shapiro, [[], [], []]) + + def test_not_enough_values(self): + assert_raises(ValueError, stats.shapiro, [1, 2]) + assert_raises(ValueError, stats.shapiro, [[], [2]]) + + def test_bad_arg(self): + # Length of x is less than 3. + x = [1] + assert_raises(ValueError, stats.shapiro, x) + + def test_nan_input(self): + x = np.arange(10.) + x[9] = np.nan + + w, pw = stats.shapiro(x) + assert_equal(w, np.nan) + assert_almost_equal(pw, 1.0) + + +class TestAnderson(object): + def test_normal(self): + rs = RandomState(1234567890) + x1 = rs.standard_exponential(size=50) + x2 = rs.standard_normal(size=50) + A, crit, sig = stats.anderson(x1) + assert_array_less(crit[:-1], A) + A, crit, sig = stats.anderson(x2) + assert_array_less(A, crit[-2:]) + + v = np.ones(10) + v[0] = 0 + A, crit, sig = stats.anderson(v) + # The expected statistic 3.208057 was computed independently of scipy. + # For example, in R: + # > library(nortest) + # > v <- rep(1, 10) + # > v[1] <- 0 + # > result <- ad.test(v) + # > result$statistic + # A + # 3.208057 + assert_allclose(A, 3.208057) + + def test_expon(self): + rs = RandomState(1234567890) + x1 = rs.standard_exponential(size=50) + x2 = rs.standard_normal(size=50) + A, crit, sig = stats.anderson(x1, 'expon') + assert_array_less(A, crit[-2:]) + olderr = np.seterr(all='ignore') + try: + A, crit, sig = stats.anderson(x2, 'expon') + finally: + np.seterr(**olderr) + assert_(A > crit[-1]) + + def test_gumbel(self): + # Regression test for gh-6306. Before that issue was fixed, + # this case would return a2=inf. + v = np.ones(100) + v[0] = 0.0 + a2, crit, sig = stats.anderson(v, 'gumbel') + # A brief reimplementation of the calculation of the statistic. + n = len(v) + xbar, s = stats.gumbel_l.fit(v) + logcdf = stats.gumbel_l.logcdf(v, xbar, s) + logsf = stats.gumbel_l.logsf(v, xbar, s) + i = np.arange(1, n+1) + expected_a2 = -n - np.mean((2*i - 1) * (logcdf + logsf[::-1])) + + assert_allclose(a2, expected_a2) + + def test_bad_arg(self): + assert_raises(ValueError, stats.anderson, [1], dist='plate_of_shrimp') + + def test_result_attributes(self): + rs = RandomState(1234567890) + x = rs.standard_exponential(size=50) + res = stats.anderson(x) + attributes = ('statistic', 'critical_values', 'significance_level') + check_named_results(res, attributes) + + def test_gumbel_l(self): + # gh-2592, gh-6337 + # Adds support to 'gumbel_r' and 'gumbel_l' as valid inputs for dist. + rs = RandomState(1234567890) + x = rs.gumbel(size=100) + A1, crit1, sig1 = stats.anderson(x, 'gumbel') + A2, crit2, sig2 = stats.anderson(x, 'gumbel_l') + + assert_allclose(A2, A1) + + def test_gumbel_r(self): + # gh-2592, gh-6337 + # Adds support to 'gumbel_r' and 'gumbel_l' as valid inputs for dist. + rs = RandomState(1234567890) + x1 = rs.gumbel(size=100) + x2 = np.ones(100) + A1, crit1, sig1 = stats.anderson(x1, 'gumbel_r') + A2, crit2, sig2 = stats.anderson(x2, 'gumbel_r') + + assert_array_less(A1, crit1[-2:]) + assert_(A2 > crit2[-1]) + + +class TestAndersonKSamp(object): + def test_example1a(self): + # Example data from Scholz & Stephens (1987), originally + # published in Lehmann (1995, Nonparametrics, Statistical + # Methods Based on Ranks, p. 309) + # Pass a mixture of lists and arrays + t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0] + t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8]) + t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0]) + t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8]) + + Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=False) + + assert_almost_equal(Tk, 4.449, 3) + assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459], + tm[0:5], 4) + assert_allclose(p, 0.0021, atol=0.00025) + + def test_example1b(self): + # Example data from Scholz & Stephens (1987), originally + # published in Lehmann (1995, Nonparametrics, Statistical + # Methods Based on Ranks, p. 309) + # Pass arrays + t1 = np.array([38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0]) + t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8]) + t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0]) + t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8]) + Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=True) + + assert_almost_equal(Tk, 4.480, 3) + assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459], + tm[0:5], 4) + assert_allclose(p, 0.0020, atol=0.00025) + + def test_example2a(self): + # Example data taken from an earlier technical report of + # Scholz and Stephens + # Pass lists instead of arrays + t1 = [194, 15, 41, 29, 33, 181] + t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118] + t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34] + t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29, + 118, 25, 156, 310, 76, 26, 44, 23, 62] + t5 = [130, 208, 70, 101, 208] + t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27] + t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33] + t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5, + 12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95] + t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82, + 54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24] + t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36, + 22, 139, 210, 97, 30, 23, 13, 14] + t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438] + t12 = [50, 254, 5, 283, 35, 12] + t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130] + t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66, + 61, 34] + + Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4, t5, t6, t7, t8, + t9, t10, t11, t12, t13, t14), + midrank=False) + assert_almost_equal(Tk, 3.288, 3) + assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009], + tm[0:5], 4) + assert_allclose(p, 0.0041, atol=0.00025) + + def test_example2b(self): + # Example data taken from an earlier technical report of + # Scholz and Stephens + t1 = [194, 15, 41, 29, 33, 181] + t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118] + t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34] + t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29, + 118, 25, 156, 310, 76, 26, 44, 23, 62] + t5 = [130, 208, 70, 101, 208] + t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27] + t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33] + t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5, + 12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95] + t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82, + 54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24] + t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36, + 22, 139, 210, 97, 30, 23, 13, 14] + t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438] + t12 = [50, 254, 5, 283, 35, 12] + t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130] + t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66, + 61, 34] + + Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4, t5, t6, t7, t8, + t9, t10, t11, t12, t13, t14), + midrank=True) + + assert_almost_equal(Tk, 3.294, 3) + assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009], + tm[0:5], 4) + assert_allclose(p, 0.0041, atol=0.00025) + + def test_R_kSamples(self): + # test values generates with R package kSamples + # package version 1.2-6 (2017-06-14) + # r1 = 1:100 + # continuous case (no ties) --> version 1 + # res <- kSamples::ad.test(r1, r1 + 40.5) + # res$ad[1, "T.AD"] # 41.105 + # res$ad[1, " asympt. P-value"] # 5.8399e-18 + # + # discrete case (ties allowed) --> version 2 (here: midrank=True) + # res$ad[2, "T.AD"] # 41.235 + # + # res <- kSamples::ad.test(r1, r1 + .5) + # res$ad[1, "T.AD"] # -1.2824 + # res$ad[1, " asympt. P-value"] # 1 + # res$ad[2, "T.AD"] # -1.2944 + # + # res <- kSamples::ad.test(r1, r1 + 7.5) + # res$ad[1, "T.AD"] # 1.4923 + # res$ad[1, " asympt. P-value"] # 0.077501 + # + # res <- kSamples::ad.test(r1, r1 + 6) + # res$ad[2, "T.AD"] # 0.63892 + # res$ad[2, " asympt. P-value"] # 0.17981 + # + # res <- kSamples::ad.test(r1, r1 + 11.5) + # res$ad[1, "T.AD"] # 4.5042 + # res$ad[1, " asympt. P-value"] # 0.00545 + # + # res <- kSamples::ad.test(r1, r1 + 13.5) + # res$ad[1, "T.AD"] # 6.2982 + # res$ad[1, " asympt. P-value"] # 0.00118 + + x1 = np.linspace(1, 100, 100) + # test case: different distributions;p-value floored at 0.001 + # test case for issue #5493 / #8536 + with suppress_warnings() as sup: + sup.filter(UserWarning, message='p-value floored') + s, _, p = stats.anderson_ksamp([x1, x1 + 40.5], midrank=False) + assert_almost_equal(s, 41.105, 3) + assert_equal(p, 0.001) + + with suppress_warnings() as sup: + sup.filter(UserWarning, message='p-value floored') + s, _, p = stats.anderson_ksamp([x1, x1 + 40.5]) + assert_almost_equal(s, 41.235, 3) + assert_equal(p, 0.001) + + # test case: similar distributions --> p-value capped at 0.25 + with suppress_warnings() as sup: + sup.filter(UserWarning, message='p-value capped') + s, _, p = stats.anderson_ksamp([x1, x1 + .5], midrank=False) + assert_almost_equal(s, -1.2824, 4) + assert_equal(p, 0.25) + + with suppress_warnings() as sup: + sup.filter(UserWarning, message='p-value capped') + s, _, p = stats.anderson_ksamp([x1, x1 + .5]) + assert_almost_equal(s, -1.2944, 4) + assert_equal(p, 0.25) + + # test case: check interpolated p-value in [0.01, 0.25] (no ties) + s, _, p = stats.anderson_ksamp([x1, x1 + 7.5], midrank=False) + assert_almost_equal(s, 1.4923, 4) + assert_allclose(p, 0.0775, atol=0.005, rtol=0) + + # test case: check interpolated p-value in [0.01, 0.25] (w/ ties) + s, _, p = stats.anderson_ksamp([x1, x1 + 6]) + assert_almost_equal(s, 0.6389, 4) + assert_allclose(p, 0.1798, atol=0.005, rtol=0) + + # test extended critical values for p=0.001 and p=0.005 + s, _, p = stats.anderson_ksamp([x1, x1 + 11.5], midrank=False) + assert_almost_equal(s, 4.5042, 4) + assert_allclose(p, 0.00545, atol=0.0005, rtol=0) + + s, _, p = stats.anderson_ksamp([x1, x1 + 13.5], midrank=False) + assert_almost_equal(s, 6.2982, 4) + assert_allclose(p, 0.00118, atol=0.0001, rtol=0) + + def test_not_enough_samples(self): + assert_raises(ValueError, stats.anderson_ksamp, np.ones(5)) + + def test_no_distinct_observations(self): + assert_raises(ValueError, stats.anderson_ksamp, + (np.ones(5), np.ones(5))) + + def test_empty_sample(self): + assert_raises(ValueError, stats.anderson_ksamp, (np.ones(5), [])) + + def test_result_attributes(self): + # Pass a mixture of lists and arrays + t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0] + t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8]) + res = stats.anderson_ksamp((t1, t2), midrank=False) + + attributes = ('statistic', 'critical_values', 'significance_level') + check_named_results(res, attributes) + + +class TestAnsari(object): + + def test_small(self): + x = [1, 2, 3, 3, 4] + y = [3, 2, 6, 1, 6, 1, 4, 1] + with suppress_warnings() as sup: + sup.filter(UserWarning, "Ties preclude use of exact statistic.") + W, pval = stats.ansari(x, y) + assert_almost_equal(W, 23.5, 11) + assert_almost_equal(pval, 0.13499256881897437, 11) + + def test_approx(self): + ramsay = np.array((111, 107, 100, 99, 102, 106, 109, 108, 104, 99, + 101, 96, 97, 102, 107, 113, 116, 113, 110, 98)) + parekh = np.array((107, 108, 106, 98, 105, 103, 110, 105, 104, + 100, 96, 108, 103, 104, 114, 114, 113, 108, + 106, 99)) + + with suppress_warnings() as sup: + sup.filter(UserWarning, "Ties preclude use of exact statistic.") + W, pval = stats.ansari(ramsay, parekh) + + assert_almost_equal(W, 185.5, 11) + assert_almost_equal(pval, 0.18145819972867083, 11) + + def test_exact(self): + W, pval = stats.ansari([1, 2, 3, 4], [15, 5, 20, 8, 10, 12]) + assert_almost_equal(W, 10.0, 11) + assert_almost_equal(pval, 0.533333333333333333, 7) + + def test_bad_arg(self): + assert_raises(ValueError, stats.ansari, [], [1]) + assert_raises(ValueError, stats.ansari, [1], []) + + def test_result_attributes(self): + x = [1, 2, 3, 3, 4] + y = [3, 2, 6, 1, 6, 1, 4, 1] + with suppress_warnings() as sup: + sup.filter(UserWarning, "Ties preclude use of exact statistic.") + res = stats.ansari(x, y) + attributes = ('statistic', 'pvalue') + check_named_results(res, attributes) + + +class TestBartlett(object): + + def test_data(self): + # https://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm + args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10] + T, pval = stats.bartlett(*args) + assert_almost_equal(T, 20.78587342806484, 7) + assert_almost_equal(pval, 0.0136358632781, 7) + + def test_bad_arg(self): + # Too few args raises ValueError. + assert_raises(ValueError, stats.bartlett, [1]) + + def test_result_attributes(self): + args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10] + res = stats.bartlett(*args) + attributes = ('statistic', 'pvalue') + check_named_results(res, attributes) + + def test_empty_arg(self): + args = (g1, g2, g3, g4, g5, g6, g7, g8, g9, g10, []) + assert_equal((np.nan, np.nan), stats.bartlett(*args)) + + # temporary fix for issue #9252: only accept 1d input + def test_1d_input(self): + x = np.array([[1, 2], [3, 4]]) + assert_raises(ValueError, stats.bartlett, g1, x) + + +class TestLevene(object): + + def test_data(self): + # https://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm + args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10] + W, pval = stats.levene(*args) + assert_almost_equal(W, 1.7059176930008939, 7) + assert_almost_equal(pval, 0.0990829755522, 7) + + def test_trimmed1(self): + # Test that center='trimmed' gives the same result as center='mean' + # when proportiontocut=0. + W1, pval1 = stats.levene(g1, g2, g3, center='mean') + W2, pval2 = stats.levene(g1, g2, g3, center='trimmed', + proportiontocut=0.0) + assert_almost_equal(W1, W2) + assert_almost_equal(pval1, pval2) + + def test_trimmed2(self): + x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0] + y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0] + np.random.seed(1234) + x2 = np.random.permutation(x) + + # Use center='trimmed' + W0, pval0 = stats.levene(x, y, center='trimmed', + proportiontocut=0.125) + W1, pval1 = stats.levene(x2, y, center='trimmed', + proportiontocut=0.125) + # Trim the data here, and use center='mean' + W2, pval2 = stats.levene(x[1:-1], y[1:-1], center='mean') + # Result should be the same. + assert_almost_equal(W0, W2) + assert_almost_equal(W1, W2) + assert_almost_equal(pval1, pval2) + + def test_equal_mean_median(self): + x = np.linspace(-1, 1, 21) + np.random.seed(1234) + x2 = np.random.permutation(x) + y = x**3 + W1, pval1 = stats.levene(x, y, center='mean') + W2, pval2 = stats.levene(x2, y, center='median') + assert_almost_equal(W1, W2) + assert_almost_equal(pval1, pval2) + + def test_bad_keyword(self): + x = np.linspace(-1, 1, 21) + assert_raises(TypeError, stats.levene, x, x, portiontocut=0.1) + + def test_bad_center_value(self): + x = np.linspace(-1, 1, 21) + assert_raises(ValueError, stats.levene, x, x, center='trim') + + def test_too_few_args(self): + assert_raises(ValueError, stats.levene, [1]) + + def test_result_attributes(self): + args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10] + res = stats.levene(*args) + attributes = ('statistic', 'pvalue') + check_named_results(res, attributes) + + # temporary fix for issue #9252: only accept 1d input + def test_1d_input(self): + x = np.array([[1, 2], [3, 4]]) + assert_raises(ValueError, stats.levene, g1, x) + + +class TestBinomP(object): + + def test_data(self): + pval = stats.binom_test(100, 250) + assert_almost_equal(pval, 0.0018833009350757682, 11) + pval = stats.binom_test(201, 405) + assert_almost_equal(pval, 0.92085205962670713, 11) + pval = stats.binom_test([682, 243], p=3.0/4) + assert_almost_equal(pval, 0.38249155957481695, 11) + + def test_bad_len_x(self): + # Length of x must be 1 or 2. + assert_raises(ValueError, stats.binom_test, [1, 2, 3]) + + def test_bad_n(self): + # len(x) is 1, but n is invalid. + # Missing n + assert_raises(ValueError, stats.binom_test, [100]) + # n less than x[0] + assert_raises(ValueError, stats.binom_test, [100], n=50) + + def test_bad_p(self): + assert_raises(ValueError, stats.binom_test, [50, 50], p=2.0) + + def test_alternatives(self): + res = stats.binom_test(51, 235, p=1./6, alternative='less') + assert_almost_equal(res, 0.982022657605858) + + res = stats.binom_test(51, 235, p=1./6, alternative='greater') + assert_almost_equal(res, 0.02654424571169085) + + res = stats.binom_test(51, 235, p=1./6, alternative='two-sided') + assert_almost_equal(res, 0.0437479701823997) + + +class TestFligner(object): + + def test_data(self): + # numbers from R: fligner.test in package stats + x1 = np.arange(5) + assert_array_almost_equal(stats.fligner(x1, x1**2), + (3.2282229927203536, 0.072379187848207877), + 11) + + def test_trimmed1(self): + # Perturb input to break ties in the transformed data + # See https://github.com/scipy/scipy/pull/8042 for more details + rs = np.random.RandomState(123) + _perturb = lambda g: (np.asarray(g) + 1e-10*rs.randn(len(g))).tolist() + g1_ = _perturb(g1) + g2_ = _perturb(g2) + g3_ = _perturb(g3) + # Test that center='trimmed' gives the same result as center='mean' + # when proportiontocut=0. + Xsq1, pval1 = stats.fligner(g1_, g2_, g3_, center='mean') + Xsq2, pval2 = stats.fligner(g1_, g2_, g3_, center='trimmed', + proportiontocut=0.0) + assert_almost_equal(Xsq1, Xsq2) + assert_almost_equal(pval1, pval2) + + def test_trimmed2(self): + x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0] + y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0] + # Use center='trimmed' + Xsq1, pval1 = stats.fligner(x, y, center='trimmed', + proportiontocut=0.125) + # Trim the data here, and use center='mean' + Xsq2, pval2 = stats.fligner(x[1:-1], y[1:-1], center='mean') + # Result should be the same. + assert_almost_equal(Xsq1, Xsq2) + assert_almost_equal(pval1, pval2) + + # The following test looks reasonable at first, but fligner() uses the + # function stats.rankdata(), and in one of the cases in this test, + # there are ties, while in the other (because of normal rounding + # errors) there are not. This difference leads to differences in the + # third significant digit of W. + # + #def test_equal_mean_median(self): + # x = np.linspace(-1,1,21) + # y = x**3 + # W1, pval1 = stats.fligner(x, y, center='mean') + # W2, pval2 = stats.fligner(x, y, center='median') + # assert_almost_equal(W1, W2) + # assert_almost_equal(pval1, pval2) + + def test_bad_keyword(self): + x = np.linspace(-1, 1, 21) + assert_raises(TypeError, stats.fligner, x, x, portiontocut=0.1) + + def test_bad_center_value(self): + x = np.linspace(-1, 1, 21) + assert_raises(ValueError, stats.fligner, x, x, center='trim') + + def test_bad_num_args(self): + # Too few args raises ValueError. + assert_raises(ValueError, stats.fligner, [1]) + + def test_empty_arg(self): + x = np.arange(5) + assert_equal((np.nan, np.nan), stats.fligner(x, x**2, [])) + + +class TestMood(object): + def test_mood(self): + # numbers from R: mood.test in package stats + x1 = np.arange(5) + assert_array_almost_equal(stats.mood(x1, x1**2), + (-1.3830857299399906, 0.16663858066771478), + 11) + + def test_mood_order_of_args(self): + # z should change sign when the order of arguments changes, pvalue + # should not change + np.random.seed(1234) + x1 = np.random.randn(10, 1) + x2 = np.random.randn(15, 1) + z1, p1 = stats.mood(x1, x2) + z2, p2 = stats.mood(x2, x1) + assert_array_almost_equal([z1, p1], [-z2, p2]) + + def test_mood_with_axis_none(self): + # Test with axis = None, compare with results from R + x1 = [-0.626453810742332, 0.183643324222082, -0.835628612410047, + 1.59528080213779, 0.329507771815361, -0.820468384118015, + 0.487429052428485, 0.738324705129217, 0.575781351653492, + -0.305388387156356, 1.51178116845085, 0.389843236411431, + -0.621240580541804, -2.2146998871775, 1.12493091814311, + -0.0449336090152309, -0.0161902630989461, 0.943836210685299, + 0.821221195098089, 0.593901321217509] + + x2 = [-0.896914546624981, 0.184849184646742, 1.58784533120882, + -1.13037567424629, -0.0802517565509893, 0.132420284381094, + 0.707954729271733, -0.23969802417184, 1.98447393665293, + -0.138787012119665, 0.417650750792556, 0.981752777463662, + -0.392695355503813, -1.03966897694891, 1.78222896030858, + -2.31106908460517, 0.878604580921265, 0.035806718015226, + 1.01282869212708, 0.432265154539617, 2.09081920524915, + -1.19992581964387, 1.58963820029007, 1.95465164222325, + 0.00493777682814261, -2.45170638784613, 0.477237302613617, + -0.596558168631403, 0.792203270299649, 0.289636710177348] + + x1 = np.array(x1) + x2 = np.array(x2) + x1.shape = (10, 2) + x2.shape = (15, 2) + assert_array_almost_equal(stats.mood(x1, x2, axis=None), + [-1.31716607555, 0.18778296257]) + + def test_mood_2d(self): + # Test if the results of mood test in 2-D case are consistent with the + # R result for the same inputs. Numbers from R mood.test(). + ny = 5 + np.random.seed(1234) + x1 = np.random.randn(10, ny) + x2 = np.random.randn(15, ny) + z_vectest, pval_vectest = stats.mood(x1, x2) + + for j in range(ny): + assert_array_almost_equal([z_vectest[j], pval_vectest[j]], + stats.mood(x1[:, j], x2[:, j])) + + # inverse order of dimensions + x1 = x1.transpose() + x2 = x2.transpose() + z_vectest, pval_vectest = stats.mood(x1, x2, axis=1) + + for i in range(ny): + # check axis handling is self consistent + assert_array_almost_equal([z_vectest[i], pval_vectest[i]], + stats.mood(x1[i, :], x2[i, :])) + + def test_mood_3d(self): + shape = (10, 5, 6) + np.random.seed(1234) + x1 = np.random.randn(*shape) + x2 = np.random.randn(*shape) + + for axis in range(3): + z_vectest, pval_vectest = stats.mood(x1, x2, axis=axis) + # Tests that result for 3-D arrays is equal to that for the + # same calculation on a set of 1-D arrays taken from the + # 3-D array + axes_idx = ([1, 2], [0, 2], [0, 1]) # the two axes != axis + for i in range(shape[axes_idx[axis][0]]): + for j in range(shape[axes_idx[axis][1]]): + if axis == 0: + slice1 = x1[:, i, j] + slice2 = x2[:, i, j] + elif axis == 1: + slice1 = x1[i, :, j] + slice2 = x2[i, :, j] + else: + slice1 = x1[i, j, :] + slice2 = x2[i, j, :] + + assert_array_almost_equal([z_vectest[i, j], + pval_vectest[i, j]], + stats.mood(slice1, slice2)) + + def test_mood_bad_arg(self): + # Raise ValueError when the sum of the lengths of the args is + # less than 3 + assert_raises(ValueError, stats.mood, [1], []) + + +class TestProbplot(object): + + def test_basic(self): + np.random.seed(12345) + x = stats.norm.rvs(size=20) + osm, osr = stats.probplot(x, fit=False) + osm_expected = [-1.8241636, -1.38768012, -1.11829229, -0.91222575, + -0.73908135, -0.5857176, -0.44506467, -0.31273668, + -0.18568928, -0.06158146, 0.06158146, 0.18568928, + 0.31273668, 0.44506467, 0.5857176, 0.73908135, + 0.91222575, 1.11829229, 1.38768012, 1.8241636] + assert_allclose(osr, np.sort(x)) + assert_allclose(osm, osm_expected) + + res, res_fit = stats.probplot(x, fit=True) + res_fit_expected = [1.05361841, 0.31297795, 0.98741609] + assert_allclose(res_fit, res_fit_expected) + + def test_sparams_keyword(self): + np.random.seed(123456) + x = stats.norm.rvs(size=100) + # Check that None, () and 0 (loc=0, for normal distribution) all work + # and give the same results + osm1, osr1 = stats.probplot(x, sparams=None, fit=False) + osm2, osr2 = stats.probplot(x, sparams=0, fit=False) + osm3, osr3 = stats.probplot(x, sparams=(), fit=False) + assert_allclose(osm1, osm2) + assert_allclose(osm1, osm3) + assert_allclose(osr1, osr2) + assert_allclose(osr1, osr3) + # Check giving (loc, scale) params for normal distribution + osm, osr = stats.probplot(x, sparams=(), fit=False) + + def test_dist_keyword(self): + np.random.seed(12345) + x = stats.norm.rvs(size=20) + osm1, osr1 = stats.probplot(x, fit=False, dist='t', sparams=(3,)) + osm2, osr2 = stats.probplot(x, fit=False, dist=stats.t, sparams=(3,)) + assert_allclose(osm1, osm2) + assert_allclose(osr1, osr2) + + assert_raises(ValueError, stats.probplot, x, dist='wrong-dist-name') + assert_raises(AttributeError, stats.probplot, x, dist=[]) + + class custom_dist(object): + """Some class that looks just enough like a distribution.""" + def ppf(self, q): + return stats.norm.ppf(q, loc=2) + + osm1, osr1 = stats.probplot(x, sparams=(2,), fit=False) + osm2, osr2 = stats.probplot(x, dist=custom_dist(), fit=False) + assert_allclose(osm1, osm2) + assert_allclose(osr1, osr2) + + @pytest.mark.skipif(not have_matplotlib, reason="no matplotlib") + def test_plot_kwarg(self): + np.random.seed(7654321) + fig = plt.figure() + fig.add_subplot(111) + x = stats.t.rvs(3, size=100) + res1, fitres1 = stats.probplot(x, plot=plt) + plt.close() + res2, fitres2 = stats.probplot(x, plot=None) + res3 = stats.probplot(x, fit=False, plot=plt) + plt.close() + res4 = stats.probplot(x, fit=False, plot=None) + # Check that results are consistent between combinations of `fit` and + # `plot` keywords. + assert_(len(res1) == len(res2) == len(res3) == len(res4) == 2) + assert_allclose(res1, res2) + assert_allclose(res1, res3) + assert_allclose(res1, res4) + assert_allclose(fitres1, fitres2) + + # Check that a Matplotlib Axes object is accepted + fig = plt.figure() + ax = fig.add_subplot(111) + stats.probplot(x, fit=False, plot=ax) + plt.close() + + def test_probplot_bad_args(self): + # Raise ValueError when given an invalid distribution. + assert_raises(ValueError, stats.probplot, [1], dist="plate_of_shrimp") + + def test_empty(self): + assert_equal(stats.probplot([], fit=False), + (np.array([]), np.array([]))) + assert_equal(stats.probplot([], fit=True), + ((np.array([]), np.array([])), + (np.nan, np.nan, 0.0))) + + def test_array_of_size_one(self): + with np.errstate(invalid='ignore'): + assert_equal(stats.probplot([1], fit=True), + ((np.array([0.]), np.array([1])), + (np.nan, np.nan, 0.0))) + + +def test_wilcoxon_bad_arg(): + # Raise ValueError when two args of different lengths are given or + # zero_method is unknown. + assert_raises(ValueError, stats.wilcoxon, [1], [1, 2]) + assert_raises(ValueError, stats.wilcoxon, [1, 2], [1, 2], "dummy") + + +def test_wilcoxon_arg_type(): + # Should be able to accept list as arguments. + # Address issue 6070. + arr = [1, 2, 3, 0, -1, 3, 1, 2, 1, 1, 2] + + _ = stats.wilcoxon(arr, zero_method="pratt") + _ = stats.wilcoxon(arr, zero_method="zsplit") + _ = stats.wilcoxon(arr, zero_method="wilcox") + + +class TestKstat(object): + def test_moments_normal_distribution(self): + np.random.seed(32149) + data = np.random.randn(12345) + moments = [] + for n in [1, 2, 3, 4]: + moments.append(stats.kstat(data, n)) + + expected = [0.011315, 1.017931, 0.05811052, 0.0754134] + assert_allclose(moments, expected, rtol=1e-4) + + # test equivalence with `stats.moment` + m1 = stats.moment(data, moment=1) + m2 = stats.moment(data, moment=2) + m3 = stats.moment(data, moment=3) + assert_allclose((m1, m2, m3), expected[:-1], atol=0.02, rtol=1e-2) + + def test_empty_input(self): + assert_raises(ValueError, stats.kstat, []) + + def test_nan_input(self): + data = np.arange(10.) + data[6] = np.nan + + assert_equal(stats.kstat(data), np.nan) + + def test_kstat_bad_arg(self): + # Raise ValueError if n > 4 or n < 1. + data = np.arange(10) + for n in [0, 4.001]: + assert_raises(ValueError, stats.kstat, data, n=n) + + +class TestKstatVar(object): + def test_empty_input(self): + assert_raises(ValueError, stats.kstatvar, []) + + def test_nan_input(self): + data = np.arange(10.) + data[6] = np.nan + + assert_equal(stats.kstat(data), np.nan) + + def test_bad_arg(self): + # Raise ValueError is n is not 1 or 2. + data = [1] + n = 10 + assert_raises(ValueError, stats.kstatvar, data, n=n) + + +class TestPpccPlot(object): + def setup_method(self): + np.random.seed(7654321) + self.x = stats.loggamma.rvs(5, size=500) + 5 + + def test_basic(self): + N = 5 + svals, ppcc = stats.ppcc_plot(self.x, -10, 10, N=N) + ppcc_expected = [0.21139644, 0.21384059, 0.98766719, 0.97980182, + 0.93519298] + assert_allclose(svals, np.linspace(-10, 10, num=N)) + assert_allclose(ppcc, ppcc_expected) + + def test_dist(self): + # Test that we can specify distributions both by name and as objects. + svals1, ppcc1 = stats.ppcc_plot(self.x, -10, 10, dist='tukeylambda') + svals2, ppcc2 = stats.ppcc_plot(self.x, -10, 10, + dist=stats.tukeylambda) + assert_allclose(svals1, svals2, rtol=1e-20) + assert_allclose(ppcc1, ppcc2, rtol=1e-20) + # Test that 'tukeylambda' is the default dist + svals3, ppcc3 = stats.ppcc_plot(self.x, -10, 10) + assert_allclose(svals1, svals3, rtol=1e-20) + assert_allclose(ppcc1, ppcc3, rtol=1e-20) + + @pytest.mark.skipif(not have_matplotlib, reason="no matplotlib") + def test_plot_kwarg(self): + # Check with the matplotlib.pyplot module + fig = plt.figure() + ax = fig.add_subplot(111) + stats.ppcc_plot(self.x, -20, 20, plot=plt) + fig.delaxes(ax) + + # Check that a Matplotlib Axes object is accepted + ax = fig.add_subplot(111) + stats.ppcc_plot(self.x, -20, 20, plot=ax) + plt.close() + + def test_invalid_inputs(self): + # `b` has to be larger than `a` + assert_raises(ValueError, stats.ppcc_plot, self.x, 1, 0) + + # Raise ValueError when given an invalid distribution. + assert_raises(ValueError, stats.ppcc_plot, [1, 2, 3], 0, 1, + dist="plate_of_shrimp") + + def test_empty(self): + # For consistency with probplot return for one empty array, + # ppcc contains all zeros and svals is the same as for normal array + # input. + svals, ppcc = stats.ppcc_plot([], 0, 1) + assert_allclose(svals, np.linspace(0, 1, num=80)) + assert_allclose(ppcc, np.zeros(80, dtype=float)) + + +class TestPpccMax(object): + def test_ppcc_max_bad_arg(self): + # Raise ValueError when given an invalid distribution. + data = [1] + assert_raises(ValueError, stats.ppcc_max, data, dist="plate_of_shrimp") + + def test_ppcc_max_basic(self): + np.random.seed(1234567) + x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4 + # On Python 2.6 the result is accurate to 5 decimals. On Python >= 2.7 + # it is accurate up to 16 decimals + assert_almost_equal(stats.ppcc_max(x), -0.71215366521264145, decimal=5) + + def test_dist(self): + np.random.seed(1234567) + x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4 + + # Test that we can specify distributions both by name and as objects. + max1 = stats.ppcc_max(x, dist='tukeylambda') + max2 = stats.ppcc_max(x, dist=stats.tukeylambda) + assert_almost_equal(max1, -0.71215366521264145, decimal=5) + assert_almost_equal(max2, -0.71215366521264145, decimal=5) + + # Test that 'tukeylambda' is the default dist + max3 = stats.ppcc_max(x) + assert_almost_equal(max3, -0.71215366521264145, decimal=5) + + def test_brack(self): + np.random.seed(1234567) + x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4 + assert_raises(ValueError, stats.ppcc_max, x, brack=(0.0, 1.0, 0.5)) + + # On Python 2.6 the result is accurate to 5 decimals. On Python >= 2.7 + # it is accurate up to 16 decimals + assert_almost_equal(stats.ppcc_max(x, brack=(0, 1)), + -0.71215366521264145, decimal=5) + + # On Python 2.6 the result is accurate to 5 decimals. On Python >= 2.7 + # it is accurate up to 16 decimals + assert_almost_equal(stats.ppcc_max(x, brack=(-2, 2)), + -0.71215366521264145, decimal=5) + + +class TestBoxcox_llf(object): + + def test_basic(self): + np.random.seed(54321) + x = stats.norm.rvs(size=10000, loc=10) + lmbda = 1 + llf = stats.boxcox_llf(lmbda, x) + llf_expected = -x.size / 2. * np.log(np.sum(x.std()**2)) + assert_allclose(llf, llf_expected) + + def test_array_like(self): + np.random.seed(54321) + x = stats.norm.rvs(size=100, loc=10) + lmbda = 1 + llf = stats.boxcox_llf(lmbda, x) + llf2 = stats.boxcox_llf(lmbda, list(x)) + assert_allclose(llf, llf2, rtol=1e-12) + + def test_2d_input(self): + # Note: boxcox_llf() was already working with 2-D input (sort of), so + # keep it like that. boxcox() doesn't work with 2-D input though, due + # to brent() returning a scalar. + np.random.seed(54321) + x = stats.norm.rvs(size=100, loc=10) + lmbda = 1 + llf = stats.boxcox_llf(lmbda, x) + llf2 = stats.boxcox_llf(lmbda, np.vstack([x, x]).T) + assert_allclose([llf, llf], llf2, rtol=1e-12) + + def test_empty(self): + assert_(np.isnan(stats.boxcox_llf(1, []))) + + +class TestBoxcox(object): + + def test_fixed_lmbda(self): + np.random.seed(12345) + x = stats.loggamma.rvs(5, size=50) + 5 + xt = stats.boxcox(x, lmbda=1) + assert_allclose(xt, x - 1) + xt = stats.boxcox(x, lmbda=-1) + assert_allclose(xt, 1 - 1/x) + + xt = stats.boxcox(x, lmbda=0) + assert_allclose(xt, np.log(x)) + + # Also test that array_like input works + xt = stats.boxcox(list(x), lmbda=0) + assert_allclose(xt, np.log(x)) + + def test_lmbda_None(self): + np.random.seed(1234567) + # Start from normal rv's, do inverse transform to check that + # optimization function gets close to the right answer. + np.random.seed(1245) + lmbda = 2.5 + x = stats.norm.rvs(loc=10, size=50000) + x_inv = (x * lmbda + 1)**(-lmbda) + xt, maxlog = stats.boxcox(x_inv) + + assert_almost_equal(maxlog, -1 / lmbda, decimal=2) + + def test_alpha(self): + np.random.seed(1234) + x = stats.loggamma.rvs(5, size=50) + 5 + + # Some regular values for alpha, on a small sample size + _, _, interval = stats.boxcox(x, alpha=0.75) + assert_allclose(interval, [4.004485780226041, 5.138756355035744]) + _, _, interval = stats.boxcox(x, alpha=0.05) + assert_allclose(interval, [1.2138178554857557, 8.209033272375663]) + + # Try some extreme values, see we don't hit the N=500 limit + x = stats.loggamma.rvs(7, size=500) + 15 + _, _, interval = stats.boxcox(x, alpha=0.001) + assert_allclose(interval, [0.3988867, 11.40553131]) + _, _, interval = stats.boxcox(x, alpha=0.999) + assert_allclose(interval, [5.83316246, 5.83735292]) + + def test_boxcox_bad_arg(self): + # Raise ValueError if any data value is negative. + x = np.array([-1]) + assert_raises(ValueError, stats.boxcox, x) + + def test_empty(self): + assert_(stats.boxcox([]).shape == (0,)) + + +class TestBoxcoxNormmax(object): + def setup_method(self): + np.random.seed(12345) + self.x = stats.loggamma.rvs(5, size=50) + 5 + + def test_pearsonr(self): + maxlog = stats.boxcox_normmax(self.x) + assert_allclose(maxlog, 1.804465, rtol=1e-6) + + def test_mle(self): + maxlog = stats.boxcox_normmax(self.x, method='mle') + assert_allclose(maxlog, 1.758101, rtol=1e-6) + + # Check that boxcox() uses 'mle' + _, maxlog_boxcox = stats.boxcox(self.x) + assert_allclose(maxlog_boxcox, maxlog) + + def test_all(self): + maxlog_all = stats.boxcox_normmax(self.x, method='all') + assert_allclose(maxlog_all, [1.804465, 1.758101], rtol=1e-6) + + +class TestBoxcoxNormplot(object): + def setup_method(self): + np.random.seed(7654321) + self.x = stats.loggamma.rvs(5, size=500) + 5 + + def test_basic(self): + N = 5 + lmbdas, ppcc = stats.boxcox_normplot(self.x, -10, 10, N=N) + ppcc_expected = [0.57783375, 0.83610988, 0.97524311, 0.99756057, + 0.95843297] + assert_allclose(lmbdas, np.linspace(-10, 10, num=N)) + assert_allclose(ppcc, ppcc_expected) + + @pytest.mark.skipif(not have_matplotlib, reason="no matplotlib") + def test_plot_kwarg(self): + # Check with the matplotlib.pyplot module + fig = plt.figure() + ax = fig.add_subplot(111) + stats.boxcox_normplot(self.x, -20, 20, plot=plt) + fig.delaxes(ax) + + # Check that a Matplotlib Axes object is accepted + ax = fig.add_subplot(111) + stats.boxcox_normplot(self.x, -20, 20, plot=ax) + plt.close() + + def test_invalid_inputs(self): + # `lb` has to be larger than `la` + assert_raises(ValueError, stats.boxcox_normplot, self.x, 1, 0) + # `x` can not contain negative values + assert_raises(ValueError, stats.boxcox_normplot, [-1, 1], 0, 1) + + def test_empty(self): + assert_(stats.boxcox_normplot([], 0, 1).size == 0) + + +class TestYeojohnson_llf(object): + + def test_array_like(self): + np.random.seed(54321) + x = stats.norm.rvs(size=100, loc=0) + lmbda = 1 + llf = stats.yeojohnson_llf(lmbda, x) + llf2 = stats.yeojohnson_llf(lmbda, list(x)) + assert_allclose(llf, llf2, rtol=1e-12) + + def test_2d_input(self): + np.random.seed(54321) + x = stats.norm.rvs(size=100, loc=10) + lmbda = 1 + llf = stats.yeojohnson_llf(lmbda, x) + llf2 = stats.yeojohnson_llf(lmbda, np.vstack([x, x]).T) + assert_allclose([llf, llf], llf2, rtol=1e-12) + + def test_empty(self): + assert_(np.isnan(stats.yeojohnson_llf(1, []))) + + +class TestYeojohnson(object): + + def test_fixed_lmbda(self): + np.random.seed(12345) + + # Test positive input + x = stats.loggamma.rvs(5, size=50) + 5 + assert np.all(x > 0) + xt = stats.yeojohnson(x, lmbda=1) + assert_allclose(xt, x) + xt = stats.yeojohnson(x, lmbda=-1) + assert_allclose(xt, 1 - 1 / (x + 1)) + xt = stats.yeojohnson(x, lmbda=0) + assert_allclose(xt, np.log(x + 1)) + xt = stats.yeojohnson(x, lmbda=1) + assert_allclose(xt, x) + + # Test negative input + x = stats.loggamma.rvs(5, size=50) - 5 + assert np.all(x < 0) + xt = stats.yeojohnson(x, lmbda=2) + assert_allclose(xt, -np.log(-x + 1)) + xt = stats.yeojohnson(x, lmbda=1) + assert_allclose(xt, x) + xt = stats.yeojohnson(x, lmbda=3) + assert_allclose(xt, 1 / (-x + 1) - 1) + + # test both positive and negative input + x = stats.loggamma.rvs(5, size=50) - 2 + assert not np.all(x < 0) + assert not np.all(x >= 0) + pos = x >= 0 + xt = stats.yeojohnson(x, lmbda=1) + assert_allclose(xt[pos], x[pos]) + xt = stats.yeojohnson(x, lmbda=-1) + assert_allclose(xt[pos], 1 - 1 / (x[pos] + 1)) + xt = stats.yeojohnson(x, lmbda=0) + assert_allclose(xt[pos], np.log(x[pos] + 1)) + xt = stats.yeojohnson(x, lmbda=1) + assert_allclose(xt[pos], x[pos]) + + neg = ~pos + xt = stats.yeojohnson(x, lmbda=2) + assert_allclose(xt[neg], -np.log(-x[neg] + 1)) + xt = stats.yeojohnson(x, lmbda=1) + assert_allclose(xt[neg], x[neg]) + xt = stats.yeojohnson(x, lmbda=3) + assert_allclose(xt[neg], 1 / (-x[neg] + 1) - 1) + + @pytest.mark.parametrize('lmbda', [0, .1, .5, 2]) + def test_lmbda_None(self, lmbda): + # Start from normal rv's, do inverse transform to check that + # optimization function gets close to the right answer. + + def _inverse_transform(x, lmbda): + x_inv = np.zeros(x.shape, dtype=x.dtype) + pos = x >= 0 + + # when x >= 0 + if abs(lmbda) < np.spacing(1.): + x_inv[pos] = np.exp(x[pos]) - 1 + else: # lmbda != 0 + x_inv[pos] = np.power(x[pos] * lmbda + 1, 1 / lmbda) - 1 + + # when x < 0 + if abs(lmbda - 2) > np.spacing(1.): + x_inv[~pos] = 1 - np.power(-(2 - lmbda) * x[~pos] + 1, + 1 / (2 - lmbda)) + else: # lmbda == 2 + x_inv[~pos] = 1 - np.exp(-x[~pos]) + + return x_inv + + np.random.seed(1234567) + n_samples = 20000 + x = np.random.normal(loc=0, scale=1, size=(n_samples)) + + x_inv = _inverse_transform(x, lmbda) + xt, maxlog = stats.yeojohnson(x_inv) + + assert_allclose(maxlog, lmbda, atol=1e-2) + + assert_almost_equal(0, np.linalg.norm(x - xt) / n_samples, decimal=2) + assert_almost_equal(0, xt.mean(), decimal=1) + assert_almost_equal(1, xt.std(), decimal=1) + + def test_empty(self): + assert_(stats.yeojohnson([]).shape == (0,)) + + def test_array_like(self): + np.random.seed(54321) + x = stats.norm.rvs(size=100, loc=0) + lmbda = 1.5 + xt1, _ = stats.yeojohnson(x) + xt2, _ = stats.yeojohnson(list(x)) + assert_allclose(xt1, xt2, rtol=1e-12) + + +class TestYeojohnsonNormmax(object): + def setup_method(self): + np.random.seed(12345) + self.x = stats.loggamma.rvs(5, size=50) + 5 + + def test_mle(self): + maxlog = stats.yeojohnson_normmax(self.x) + assert_allclose(maxlog, 1.876393, rtol=1e-6) + + def test_darwin_example(self): + # test from original paper "A new family of power transformations to + # improve normality or symmetry" by Yeo and Johnson. + x = [6.1, -8.4, 1.0, 2.0, 0.7, 2.9, 3.5, 5.1, 1.8, 3.6, 7.0, 3.0, 9.3, + 7.5, -6.0] + lmbda = stats.yeojohnson_normmax(x) + assert np.allclose(lmbda, 1.305, atol=1e-3) + + +class TestCircFuncs(object): + def test_circfuncs(self): + x = np.array([355, 5, 2, 359, 10, 350]) + M = stats.circmean(x, high=360) + Mval = 0.167690146 + assert_allclose(M, Mval, rtol=1e-7) + + V = stats.circvar(x, high=360) + Vval = 42.51955609 + assert_allclose(V, Vval, rtol=1e-7) + + S = stats.circstd(x, high=360) + Sval = 6.520702116 + assert_allclose(S, Sval, rtol=1e-7) + + def test_circfuncs_small(self): + x = np.array([20, 21, 22, 18, 19, 20.5, 19.2]) + M1 = x.mean() + M2 = stats.circmean(x, high=360) + assert_allclose(M2, M1, rtol=1e-5) + + V1 = x.var() + V2 = stats.circvar(x, high=360) + assert_allclose(V2, V1, rtol=1e-4) + + S1 = x.std() + S2 = stats.circstd(x, high=360) + assert_allclose(S2, S1, rtol=1e-4) + + def test_circmean_axis(self): + x = np.array([[355, 5, 2, 359, 10, 350], + [351, 7, 4, 352, 9, 349], + [357, 9, 8, 358, 4, 356]]) + M1 = stats.circmean(x, high=360) + M2 = stats.circmean(x.ravel(), high=360) + assert_allclose(M1, M2, rtol=1e-14) + + M1 = stats.circmean(x, high=360, axis=1) + M2 = [stats.circmean(x[i], high=360) for i in range(x.shape[0])] + assert_allclose(M1, M2, rtol=1e-14) + + M1 = stats.circmean(x, high=360, axis=0) + M2 = [stats.circmean(x[:, i], high=360) for i in range(x.shape[1])] + assert_allclose(M1, M2, rtol=1e-14) + + def test_circvar_axis(self): + x = np.array([[355, 5, 2, 359, 10, 350], + [351, 7, 4, 352, 9, 349], + [357, 9, 8, 358, 4, 356]]) + + V1 = stats.circvar(x, high=360) + V2 = stats.circvar(x.ravel(), high=360) + assert_allclose(V1, V2, rtol=1e-11) + + V1 = stats.circvar(x, high=360, axis=1) + V2 = [stats.circvar(x[i], high=360) for i in range(x.shape[0])] + assert_allclose(V1, V2, rtol=1e-11) + + V1 = stats.circvar(x, high=360, axis=0) + V2 = [stats.circvar(x[:, i], high=360) for i in range(x.shape[1])] + assert_allclose(V1, V2, rtol=1e-11) + + def test_circstd_axis(self): + x = np.array([[355, 5, 2, 359, 10, 350], + [351, 7, 4, 352, 9, 349], + [357, 9, 8, 358, 4, 356]]) + + S1 = stats.circstd(x, high=360) + S2 = stats.circstd(x.ravel(), high=360) + assert_allclose(S1, S2, rtol=1e-11) + + S1 = stats.circstd(x, high=360, axis=1) + S2 = [stats.circstd(x[i], high=360) for i in range(x.shape[0])] + assert_allclose(S1, S2, rtol=1e-11) + + S1 = stats.circstd(x, high=360, axis=0) + S2 = [stats.circstd(x[:, i], high=360) for i in range(x.shape[1])] + assert_allclose(S1, S2, rtol=1e-11) + + def test_circfuncs_array_like(self): + x = [355, 5, 2, 359, 10, 350] + assert_allclose(stats.circmean(x, high=360), 0.167690146, rtol=1e-7) + assert_allclose(stats.circvar(x, high=360), 42.51955609, rtol=1e-7) + assert_allclose(stats.circstd(x, high=360), 6.520702116, rtol=1e-7) + + def test_empty(self): + assert_(np.isnan(stats.circmean([]))) + assert_(np.isnan(stats.circstd([]))) + assert_(np.isnan(stats.circvar([]))) + + def test_circmean_scalar(self): + x = 1. + M1 = x + M2 = stats.circmean(x) + assert_allclose(M2, M1, rtol=1e-5) + + def test_circmean_range(self): + # regression test for gh-6420: circmean(..., high, low) must be + # between `high` and `low` + m = stats.circmean(np.arange(0, 2, 0.1), np.pi, -np.pi) + assert_(m < np.pi) + assert_(m > -np.pi) + + def test_circfuncs_unit8(self): + # regression test for gh-7255: overflow when working with + # numpy uint8 data type + x = np.array([150, 10], dtype='uint8') + assert_equal(stats.circmean(x, high=180), 170.0) + assert_allclose(stats.circvar(x, high=180), 437.45871686, rtol=1e-7) + assert_allclose(stats.circstd(x, high=180), 20.91551378, rtol=1e-7) + +def test_accuracy_wilcoxon(): + freq = [1, 4, 16, 15, 8, 4, 5, 1, 2] + nums = range(-4, 5) + x = np.concatenate([[u] * v for u, v in zip(nums, freq)]) + y = np.zeros(x.size) + + T, p = stats.wilcoxon(x, y, "pratt") + assert_allclose(T, 423) + assert_allclose(p, 0.00197547303533107) + + T, p = stats.wilcoxon(x, y, "zsplit") + assert_allclose(T, 441) + assert_allclose(p, 0.0032145343172473055) + + T, p = stats.wilcoxon(x, y, "wilcox") + assert_allclose(T, 327) + assert_allclose(p, 0.00641346115861) + + # Test the 'correction' option, using values computed in R with: + # > wilcox.test(x, y, paired=TRUE, exact=FALSE, correct={FALSE,TRUE}) + x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112]) + y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187]) + T, p = stats.wilcoxon(x, y, correction=False) + assert_equal(T, 34) + assert_allclose(p, 0.6948866, rtol=1e-6) + T, p = stats.wilcoxon(x, y, correction=True) + assert_equal(T, 34) + assert_allclose(p, 0.7240817, rtol=1e-6) + + +def test_wilcoxon_result_attributes(): + x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112]) + y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187]) + res = stats.wilcoxon(x, y, correction=False) + attributes = ('statistic', 'pvalue') + check_named_results(res, attributes) + + +def test_wilcoxon_tie(): + # Regression test for gh-2391. + # Corresponding R code is: + # > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=FALSE) + # > result$p.value + # [1] 0.001565402 + # > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=TRUE) + # > result$p.value + # [1] 0.001904195 + stat, p = stats.wilcoxon([0.1] * 10) + expected_p = 0.001565402 + assert_equal(stat, 0) + assert_allclose(p, expected_p, rtol=1e-6) + + stat, p = stats.wilcoxon([0.1] * 10, correction=True) + expected_p = 0.001904195 + assert_equal(stat, 0) + assert_allclose(p, expected_p, rtol=1e-6) + + +class TestMedianTest(object): + + def test_bad_n_samples(self): + # median_test requires at least two samples. + assert_raises(ValueError, stats.median_test, [1, 2, 3]) + + def test_empty_sample(self): + # Each sample must contain at least one value. + assert_raises(ValueError, stats.median_test, [], [1, 2, 3]) + + def test_empty_when_ties_ignored(self): + # The grand median is 1, and all values in the first argument are + # equal to the grand median. With ties="ignore", those values are + # ignored, which results in the first sample being (in effect) empty. + # This should raise a ValueError. + assert_raises(ValueError, stats.median_test, + [1, 1, 1, 1], [2, 0, 1], [2, 0], ties="ignore") + + def test_empty_contingency_row(self): + # The grand median is 1, and with the default ties="below", all the + # values in the samples are counted as being below the grand median. + # This would result a row of zeros in the contingency table, which is + # an error. + assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1]) + + # With ties="above", all the values are counted as above the + # grand median. + assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1], + ties="above") + + def test_bad_ties(self): + assert_raises(ValueError, stats.median_test, [1, 2, 3], [4, 5], + ties="foo") + + def test_bad_nan_policy(self): + assert_raises(ValueError, stats.median_test, [1, 2, 3], [4, 5], nan_policy='foobar') + + def test_bad_keyword(self): + assert_raises(TypeError, stats.median_test, [1, 2, 3], [4, 5], + foo="foo") + + def test_simple(self): + x = [1, 2, 3] + y = [1, 2, 3] + stat, p, med, tbl = stats.median_test(x, y) + + # The median is floating point, but this equality test should be safe. + assert_equal(med, 2.0) + + assert_array_equal(tbl, [[1, 1], [2, 2]]) + + # The expected values of the contingency table equal the contingency + # table, so the statistic should be 0 and the p-value should be 1. + assert_equal(stat, 0) + assert_equal(p, 1) + + def test_ties_options(self): + # Test the contingency table calculation. + x = [1, 2, 3, 4] + y = [5, 6] + z = [7, 8, 9] + # grand median is 5. + + # Default 'ties' option is "below". + stat, p, m, tbl = stats.median_test(x, y, z) + assert_equal(m, 5) + assert_equal(tbl, [[0, 1, 3], [4, 1, 0]]) + + stat, p, m, tbl = stats.median_test(x, y, z, ties="ignore") + assert_equal(m, 5) + assert_equal(tbl, [[0, 1, 3], [4, 0, 0]]) + + stat, p, m, tbl = stats.median_test(x, y, z, ties="above") + assert_equal(m, 5) + assert_equal(tbl, [[0, 2, 3], [4, 0, 0]]) + + def test_nan_policy_options(self): + x = [1, 2, np.nan] + y = [4, 5, 6] + mt1 = stats.median_test(x, y, nan_policy='propagate') + s, p, m, t = stats.median_test(x, y, nan_policy='omit') + + assert_equal(mt1, (np.nan, np.nan, np.nan, None)) + assert_allclose(s, 0.31250000000000006) + assert_allclose(p, 0.57615012203057869) + assert_equal(m, 4.0) + assert_equal(t, np.array([[0, 2],[2, 1]])) + assert_raises(ValueError, stats.median_test, x, y, nan_policy='raise') + + def test_basic(self): + # median_test calls chi2_contingency to compute the test statistic + # and p-value. Make sure it hasn't screwed up the call... + + x = [1, 2, 3, 4, 5] + y = [2, 4, 6, 8] + + stat, p, m, tbl = stats.median_test(x, y) + assert_equal(m, 4) + assert_equal(tbl, [[1, 2], [4, 2]]) + + exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl) + assert_allclose(stat, exp_stat) + assert_allclose(p, exp_p) + + stat, p, m, tbl = stats.median_test(x, y, lambda_=0) + assert_equal(m, 4) + assert_equal(tbl, [[1, 2], [4, 2]]) + + exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, lambda_=0) + assert_allclose(stat, exp_stat) + assert_allclose(p, exp_p) + + stat, p, m, tbl = stats.median_test(x, y, correction=False) + assert_equal(m, 4) + assert_equal(tbl, [[1, 2], [4, 2]]) + + exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, correction=False) + assert_allclose(stat, exp_stat) + assert_allclose(p, exp_p) diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_morestats.pyc b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_morestats.pyc new file mode 100644 index 0000000..bda5b27 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_morestats.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_mstats_basic.py b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_mstats_basic.py new file mode 100644 index 0000000..6ce2b19 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_mstats_basic.py @@ -0,0 +1,1481 @@ +""" +Tests for the stats.mstats module (support for masked arrays) +""" +from __future__ import division, print_function, absolute_import + +import warnings + +import numpy as np +from numpy import nan +import numpy.ma as ma +from numpy.ma import masked, nomask + +import scipy.stats.mstats as mstats +from scipy import stats +from .common_tests import check_named_results +import pytest +from pytest import raises as assert_raises +from numpy.ma.testutils import (assert_equal, assert_almost_equal, + assert_array_almost_equal, assert_array_almost_equal_nulp, assert_, + assert_allclose, assert_array_equal) +from scipy._lib._numpy_compat import suppress_warnings + + +class TestMquantiles(object): + def test_mquantiles_limit_keyword(self): + # Regression test for Trac ticket #867 + data = np.array([[6., 7., 1.], + [47., 15., 2.], + [49., 36., 3.], + [15., 39., 4.], + [42., 40., -999.], + [41., 41., -999.], + [7., -999., -999.], + [39., -999., -999.], + [43., -999., -999.], + [40., -999., -999.], + [36., -999., -999.]]) + desired = [[19.2, 14.6, 1.45], + [40.0, 37.5, 2.5], + [42.8, 40.05, 3.55]] + quants = mstats.mquantiles(data, axis=0, limit=(0, 50)) + assert_almost_equal(quants, desired) + + +class TestGMean(object): + def test_1D(self): + a = (1, 2, 3, 4) + actual = mstats.gmean(a) + desired = np.power(1*2*3*4, 1./4.) + assert_almost_equal(actual, desired, decimal=14) + + desired1 = mstats.gmean(a, axis=-1) + assert_almost_equal(actual, desired1, decimal=14) + assert_(not isinstance(desired1, ma.MaskedArray)) + + a = ma.array((1, 2, 3, 4), mask=(0, 0, 0, 1)) + actual = mstats.gmean(a) + desired = np.power(1*2*3, 1./3.) + assert_almost_equal(actual, desired, decimal=14) + + desired1 = mstats.gmean(a, axis=-1) + assert_almost_equal(actual, desired1, decimal=14) + + @pytest.mark.skipif(not hasattr(np, 'float96'), reason='cannot find float96 so skipping') + def test_1D_float96(self): + a = ma.array((1, 2, 3, 4), mask=(0, 0, 0, 1)) + actual_dt = mstats.gmean(a, dtype=np.float96) + desired_dt = np.power(1*2*3, 1./3.).astype(np.float96) + assert_almost_equal(actual_dt, desired_dt, decimal=14) + assert_(actual_dt.dtype == desired_dt.dtype) + + def test_2D(self): + a = ma.array(((1, 2, 3, 4), (1, 2, 3, 4), (1, 2, 3, 4)), + mask=((0, 0, 0, 0), (1, 0, 0, 1), (0, 1, 1, 0))) + actual = mstats.gmean(a) + desired = np.array((1, 2, 3, 4)) + assert_array_almost_equal(actual, desired, decimal=14) + + desired1 = mstats.gmean(a, axis=0) + assert_array_almost_equal(actual, desired1, decimal=14) + + actual = mstats.gmean(a, -1) + desired = ma.array((np.power(1*2*3*4, 1./4.), + np.power(2*3, 1./2.), + np.power(1*4, 1./2.))) + assert_array_almost_equal(actual, desired, decimal=14) + + +class TestHMean(object): + def test_1D(self): + a = (1, 2, 3, 4) + actual = mstats.hmean(a) + desired = 4. / (1./1 + 1./2 + 1./3 + 1./4) + assert_almost_equal(actual, desired, decimal=14) + desired1 = mstats.hmean(ma.array(a), axis=-1) + assert_almost_equal(actual, desired1, decimal=14) + + a = ma.array((1, 2, 3, 4), mask=(0, 0, 0, 1)) + actual = mstats.hmean(a) + desired = 3. / (1./1 + 1./2 + 1./3) + assert_almost_equal(actual, desired, decimal=14) + desired1 = mstats.hmean(a, axis=-1) + assert_almost_equal(actual, desired1, decimal=14) + + @pytest.mark.skipif(not hasattr(np, 'float96'), reason='cannot find float96 so skipping') + def test_1D_float96(self): + a = ma.array((1, 2, 3, 4), mask=(0, 0, 0, 1)) + actual_dt = mstats.hmean(a, dtype=np.float96) + desired_dt = np.asarray(3. / (1./1 + 1./2 + 1./3), + dtype=np.float96) + assert_almost_equal(actual_dt, desired_dt, decimal=14) + assert_(actual_dt.dtype == desired_dt.dtype) + + def test_2D(self): + a = ma.array(((1,2,3,4),(1,2,3,4),(1,2,3,4)), + mask=((0,0,0,0),(1,0,0,1),(0,1,1,0))) + actual = mstats.hmean(a) + desired = ma.array((1,2,3,4)) + assert_array_almost_equal(actual, desired, decimal=14) + + actual1 = mstats.hmean(a,axis=-1) + desired = (4./(1/1.+1/2.+1/3.+1/4.), + 2./(1/2.+1/3.), + 2./(1/1.+1/4.) + ) + assert_array_almost_equal(actual1, desired, decimal=14) + + +class TestRanking(object): + def test_ranking(self): + x = ma.array([0,1,1,1,2,3,4,5,5,6,]) + assert_almost_equal(mstats.rankdata(x), + [1,3,3,3,5,6,7,8.5,8.5,10]) + x[[3,4]] = masked + assert_almost_equal(mstats.rankdata(x), + [1,2.5,2.5,0,0,4,5,6.5,6.5,8]) + assert_almost_equal(mstats.rankdata(x, use_missing=True), + [1,2.5,2.5,4.5,4.5,4,5,6.5,6.5,8]) + x = ma.array([0,1,5,1,2,4,3,5,1,6,]) + assert_almost_equal(mstats.rankdata(x), + [1,3,8.5,3,5,7,6,8.5,3,10]) + x = ma.array([[0,1,1,1,2], [3,4,5,5,6,]]) + assert_almost_equal(mstats.rankdata(x), + [[1,3,3,3,5], [6,7,8.5,8.5,10]]) + assert_almost_equal(mstats.rankdata(x, axis=1), + [[1,3,3,3,5], [1,2,3.5,3.5,5]]) + assert_almost_equal(mstats.rankdata(x,axis=0), + [[1,1,1,1,1], [2,2,2,2,2,]]) + + +class TestCorr(object): + def test_pearsonr(self): + # Tests some computations of Pearson's r + x = ma.arange(10) + with warnings.catch_warnings(): + # The tests in this context are edge cases, with perfect + # correlation or anticorrelation, or totally masked data. + # None of these should trigger a RuntimeWarning. + warnings.simplefilter("error", RuntimeWarning) + + assert_almost_equal(mstats.pearsonr(x, x)[0], 1.0) + assert_almost_equal(mstats.pearsonr(x, x[::-1])[0], -1.0) + + x = ma.array(x, mask=True) + pr = mstats.pearsonr(x, x) + assert_(pr[0] is masked) + assert_(pr[1] is masked) + + x1 = ma.array([-1.0, 0.0, 1.0]) + y1 = ma.array([0, 0, 3]) + r, p = mstats.pearsonr(x1, y1) + assert_almost_equal(r, np.sqrt(3)/2) + assert_almost_equal(p, 1.0/3) + + # (x2, y2) have the same unmasked data as (x1, y1). + mask = [False, False, False, True] + x2 = ma.array([-1.0, 0.0, 1.0, 99.0], mask=mask) + y2 = ma.array([0, 0, 3, -1], mask=mask) + r, p = mstats.pearsonr(x2, y2) + assert_almost_equal(r, np.sqrt(3)/2) + assert_almost_equal(p, 1.0/3) + + def test_spearmanr(self): + # Tests some computations of Spearman's rho + (x, y) = ([5.05,6.75,3.21,2.66], [1.65,2.64,2.64,6.95]) + assert_almost_equal(mstats.spearmanr(x,y)[0], -0.6324555) + (x, y) = ([5.05,6.75,3.21,2.66,np.nan],[1.65,2.64,2.64,6.95,np.nan]) + (x, y) = (ma.fix_invalid(x), ma.fix_invalid(y)) + assert_almost_equal(mstats.spearmanr(x,y)[0], -0.6324555) + + x = [2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1, + 1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7] + y = [22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6, + 0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4] + assert_almost_equal(mstats.spearmanr(x,y)[0], 0.6887299) + x = [2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1, + 1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7, np.nan] + y = [22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6, + 0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4, np.nan] + (x, y) = (ma.fix_invalid(x), ma.fix_invalid(y)) + assert_almost_equal(mstats.spearmanr(x,y)[0], 0.6887299) + # Next test is to make sure calculation uses sufficient precision. + # The denominator's value is ~n^3 and used to be represented as an + # int. 2000**3 > 2**32 so these arrays would cause overflow on + # some machines. + x = list(range(2000)) + y = list(range(2000)) + y[0], y[9] = y[9], y[0] + y[10], y[434] = y[434], y[10] + y[435], y[1509] = y[1509], y[435] + # rho = 1 - 6 * (2 * (9^2 + 424^2 + 1074^2))/(2000 * (2000^2 - 1)) + # = 1 - (1 / 500) + # = 0.998 + assert_almost_equal(mstats.spearmanr(x,y)[0], 0.998) + + # test for namedtuple attributes + res = mstats.spearmanr(x, y) + attributes = ('correlation', 'pvalue') + check_named_results(res, attributes, ma=True) + + def test_kendalltau(self): + # simple case without ties + x = ma.array(np.arange(10)) + y = ma.array(np.arange(10)) + # Cross-check with exact result from R: + # cor.test(x,y,method="kendall",exact=1) + expected = [1.0, 5.511463844797e-07] + assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected) + + # check exception in case of invalid method keyword + assert_raises(ValueError, mstats.kendalltau, x, y, method='banana') + + # swap a couple of values + b = y[1] + y[1] = y[2] + y[2] = b + # Cross-check with exact result from R: + # cor.test(x,y,method="kendall",exact=1) + expected = [0.9555555555555556, 5.511463844797e-06] + assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected) + + # swap a couple more + b = y[5] + y[5] = y[6] + y[6] = b + # Cross-check with exact result from R: + # cor.test(x,y,method="kendall",exact=1) + expected = [0.9111111111111111, 2.976190476190e-05] + assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected) + + # same in opposite direction + x = ma.array(np.arange(10)) + y = ma.array(np.arange(10)[::-1]) + # Cross-check with exact result from R: + # cor.test(x,y,method="kendall",exact=1) + expected = [-1.0, 5.511463844797e-07] + assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected) + + # swap a couple of values + b = y[1] + y[1] = y[2] + y[2] = b + # Cross-check with exact result from R: + # cor.test(x,y,method="kendall",exact=1) + expected = [-0.9555555555555556, 5.511463844797e-06] + assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected) + + # swap a couple more + b = y[5] + y[5] = y[6] + y[6] = b + # Cross-check with exact result from R: + # cor.test(x,y,method="kendall",exact=1) + expected = [-0.9111111111111111, 2.976190476190e-05] + assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected) + + # Tests some computations of Kendall's tau + x = ma.fix_invalid([5.05, 6.75, 3.21, 2.66, np.nan]) + y = ma.fix_invalid([1.65, 26.5, -5.93, 7.96, np.nan]) + z = ma.fix_invalid([1.65, 2.64, 2.64, 6.95, np.nan]) + assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), + [+0.3333333, 0.75]) + assert_almost_equal(np.asarray(mstats.kendalltau(x, y, method='asymptotic')), + [+0.3333333, 0.4969059]) + assert_almost_equal(np.asarray(mstats.kendalltau(x, z)), + [-0.5477226, 0.2785987]) + # + x = ma.fix_invalid([0, 0, 0, 0, 20, 20, 0, 60, 0, 20, + 10, 10, 0, 40, 0, 20, 0, 0, 0, 0, 0, np.nan]) + y = ma.fix_invalid([0, 80, 80, 80, 10, 33, 60, 0, 67, 27, + 25, 80, 80, 80, 80, 80, 80, 0, 10, 45, np.nan, 0]) + result = mstats.kendalltau(x, y) + assert_almost_equal(np.asarray(result), [-0.1585188, 0.4128009]) + # make sure internal variable use correct precision with + # larger arrays + x = np.arange(2000, dtype=float) + x = ma.masked_greater(x, 1995) + y = np.arange(2000, dtype=float) + y = np.concatenate((y[1000:], y[:1000])) + assert_(np.isfinite(mstats.kendalltau(x, y)[1])) + + # test for namedtuple attributes + res = mstats.kendalltau(x, y) + attributes = ('correlation', 'pvalue') + check_named_results(res, attributes, ma=True) + + def test_kendalltau_seasonal(self): + # Tests the seasonal Kendall tau. + x = [[nan, nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1], + [4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3], + [3, 2, 5, 6, 18, 4, 9, 1, 1, nan, 1, 1, nan], + [nan, 6, 11, 4, 17, nan, 6, 1, 1, 2, 5, 1, 1]] + x = ma.fix_invalid(x).T + output = mstats.kendalltau_seasonal(x) + assert_almost_equal(output['global p-value (indep)'], 0.008, 3) + assert_almost_equal(output['seasonal p-value'].round(2), + [0.18,0.53,0.20,0.04]) + + def test_pointbiserial(self): + x = [1,0,1,1,1,1,0,1,0,0,0,1,1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,1,0, + 0,0,0,0,1,-1] + y = [14.8,13.8,12.4,10.1,7.1,6.1,5.8,4.6,4.3,3.5,3.3,3.2,3.0, + 2.8,2.8,2.5,2.4,2.3,2.1,1.7,1.7,1.5,1.3,1.3,1.2,1.2,1.1, + 0.8,0.7,0.6,0.5,0.2,0.2,0.1,np.nan] + assert_almost_equal(mstats.pointbiserialr(x, y)[0], 0.36149, 5) + + # test for namedtuple attributes + res = mstats.pointbiserialr(x, y) + attributes = ('correlation', 'pvalue') + check_named_results(res, attributes, ma=True) + + +class TestTrimming(object): + + def test_trim(self): + a = ma.arange(10) + assert_equal(mstats.trim(a), [0,1,2,3,4,5,6,7,8,9]) + a = ma.arange(10) + assert_equal(mstats.trim(a,(2,8)), [None,None,2,3,4,5,6,7,8,None]) + a = ma.arange(10) + assert_equal(mstats.trim(a,limits=(2,8),inclusive=(False,False)), + [None,None,None,3,4,5,6,7,None,None]) + a = ma.arange(10) + assert_equal(mstats.trim(a,limits=(0.1,0.2),relative=True), + [None,1,2,3,4,5,6,7,None,None]) + + a = ma.arange(12) + a[[0,-1]] = a[5] = masked + assert_equal(mstats.trim(a, (2,8)), + [None, None, 2, 3, 4, None, 6, 7, 8, None, None, None]) + + x = ma.arange(100).reshape(10, 10) + expected = [1]*10 + [0]*70 + [1]*20 + trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=None) + assert_equal(trimx._mask.ravel(), expected) + trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=0) + assert_equal(trimx._mask.ravel(), expected) + trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=-1) + assert_equal(trimx._mask.T.ravel(), expected) + + # same as above, but with an extra masked row inserted + x = ma.arange(110).reshape(11, 10) + x[1] = masked + expected = [1]*20 + [0]*70 + [1]*20 + trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=None) + assert_equal(trimx._mask.ravel(), expected) + trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=0) + assert_equal(trimx._mask.ravel(), expected) + trimx = mstats.trim(x.T, (0.1,0.2), relative=True, axis=-1) + assert_equal(trimx.T._mask.ravel(), expected) + + def test_trim_old(self): + x = ma.arange(100) + assert_equal(mstats.trimboth(x).count(), 60) + assert_equal(mstats.trimtail(x,tail='r').count(), 80) + x[50:70] = masked + trimx = mstats.trimboth(x) + assert_equal(trimx.count(), 48) + assert_equal(trimx._mask, [1]*16 + [0]*34 + [1]*20 + [0]*14 + [1]*16) + x._mask = nomask + x.shape = (10,10) + assert_equal(mstats.trimboth(x).count(), 60) + assert_equal(mstats.trimtail(x).count(), 80) + + def test_trimmedmean(self): + data = ma.array([77, 87, 88,114,151,210,219,246,253,262, + 296,299,306,376,428,515,666,1310,2611]) + assert_almost_equal(mstats.trimmed_mean(data,0.1), 343, 0) + assert_almost_equal(mstats.trimmed_mean(data,(0.1,0.1)), 343, 0) + assert_almost_equal(mstats.trimmed_mean(data,(0.2,0.2)), 283, 0) + + def test_trimmed_stde(self): + data = ma.array([77, 87, 88,114,151,210,219,246,253,262, + 296,299,306,376,428,515,666,1310,2611]) + assert_almost_equal(mstats.trimmed_stde(data,(0.2,0.2)), 56.13193, 5) + assert_almost_equal(mstats.trimmed_stde(data,0.2), 56.13193, 5) + + def test_winsorization(self): + data = ma.array([77, 87, 88,114,151,210,219,246,253,262, + 296,299,306,376,428,515,666,1310,2611]) + assert_almost_equal(mstats.winsorize(data,(0.2,0.2)).var(ddof=1), + 21551.4, 1) + assert_almost_equal( + mstats.winsorize(data, (0.2,0.2),(False,False)).var(ddof=1), + 11887.3, 1) + data[5] = masked + winsorized = mstats.winsorize(data) + assert_equal(winsorized.mask, data.mask) + + +class TestMoments(object): + # Comparison numbers are found using R v.1.5.1 + # note that length(testcase) = 4 + # testmathworks comes from documentation for the + # Statistics Toolbox for Matlab and can be found at both + # https://www.mathworks.com/help/stats/kurtosis.html + # https://www.mathworks.com/help/stats/skewness.html + # Note that both test cases came from here. + testcase = [1,2,3,4] + testmathworks = ma.fix_invalid([1.165, 0.6268, 0.0751, 0.3516, -0.6965, + np.nan]) + testcase_2d = ma.array( + np.array([[0.05245846, 0.50344235, 0.86589117, 0.36936353, 0.46961149], + [0.11574073, 0.31299969, 0.45925772, 0.72618805, 0.75194407], + [0.67696689, 0.91878127, 0.09769044, 0.04645137, 0.37615733], + [0.05903624, 0.29908861, 0.34088298, 0.66216337, 0.83160998], + [0.64619526, 0.94894632, 0.27855892, 0.0706151, 0.39962917]]), + mask=np.array([[True, False, False, True, False], + [True, True, True, False, True], + [False, False, False, False, False], + [True, True, True, True, True], + [False, False, True, False, False]], dtype=bool)) + + def test_moment(self): + y = mstats.moment(self.testcase,1) + assert_almost_equal(y,0.0,10) + y = mstats.moment(self.testcase,2) + assert_almost_equal(y,1.25) + y = mstats.moment(self.testcase,3) + assert_almost_equal(y,0.0) + y = mstats.moment(self.testcase,4) + assert_almost_equal(y,2.5625) + + def test_variation(self): + y = mstats.variation(self.testcase) + assert_almost_equal(y,0.44721359549996, 10) + + def test_skewness(self): + y = mstats.skew(self.testmathworks) + assert_almost_equal(y,-0.29322304336607,10) + y = mstats.skew(self.testmathworks,bias=0) + assert_almost_equal(y,-0.437111105023940,10) + y = mstats.skew(self.testcase) + assert_almost_equal(y,0.0,10) + + def test_kurtosis(self): + # Set flags for axis = 0 and fisher=0 (Pearson's definition of kurtosis + # for compatibility with Matlab) + y = mstats.kurtosis(self.testmathworks, 0, fisher=0, bias=1) + assert_almost_equal(y, 2.1658856802973, 10) + # Note that MATLAB has confusing docs for the following case + # kurtosis(x,0) gives an unbiased estimate of Pearson's skewness + # kurtosis(x) gives a biased estimate of Fisher's skewness (Pearson-3) + # The MATLAB docs imply that both should give Fisher's + y = mstats.kurtosis(self.testmathworks, fisher=0, bias=0) + assert_almost_equal(y, 3.663542721189047, 10) + y = mstats.kurtosis(self.testcase, 0, 0) + assert_almost_equal(y, 1.64) + + # test that kurtosis works on multidimensional masked arrays + correct_2d = ma.array(np.array([-1.5, -3., -1.47247052385, 0., + -1.26979517952]), + mask=np.array([False, False, False, True, + False], dtype=bool)) + assert_array_almost_equal(mstats.kurtosis(self.testcase_2d, 1), + correct_2d) + for i, row in enumerate(self.testcase_2d): + assert_almost_equal(mstats.kurtosis(row), correct_2d[i]) + + correct_2d_bias_corrected = ma.array( + np.array([-1.5, -3., -1.88988209538, 0., -0.5234638463918877]), + mask=np.array([False, False, False, True, False], dtype=bool)) + assert_array_almost_equal(mstats.kurtosis(self.testcase_2d, 1, + bias=False), + correct_2d_bias_corrected) + for i, row in enumerate(self.testcase_2d): + assert_almost_equal(mstats.kurtosis(row, bias=False), + correct_2d_bias_corrected[i]) + + # Check consistency between stats and mstats implementations + assert_array_almost_equal_nulp(mstats.kurtosis(self.testcase_2d[2, :]), + stats.kurtosis(self.testcase_2d[2, :]), + nulp=4) + + def test_mode(self): + a1 = [0,0,0,1,1,1,2,3,3,3,3,4,5,6,7] + a2 = np.reshape(a1, (3,5)) + a3 = np.array([1,2,3,4,5,6]) + a4 = np.reshape(a3, (3,2)) + ma1 = ma.masked_where(ma.array(a1) > 2, a1) + ma2 = ma.masked_where(a2 > 2, a2) + ma3 = ma.masked_where(a3 < 2, a3) + ma4 = ma.masked_where(ma.array(a4) < 2, a4) + assert_equal(mstats.mode(a1, axis=None), (3,4)) + assert_equal(mstats.mode(a1, axis=0), (3,4)) + assert_equal(mstats.mode(ma1, axis=None), (0,3)) + assert_equal(mstats.mode(a2, axis=None), (3,4)) + assert_equal(mstats.mode(ma2, axis=None), (0,3)) + assert_equal(mstats.mode(a3, axis=None), (1,1)) + assert_equal(mstats.mode(ma3, axis=None), (2,1)) + assert_equal(mstats.mode(a2, axis=0), ([[0,0,0,1,1]], [[1,1,1,1,1]])) + assert_equal(mstats.mode(ma2, axis=0), ([[0,0,0,1,1]], [[1,1,1,1,1]])) + assert_equal(mstats.mode(a2, axis=-1), ([[0],[3],[3]], [[3],[3],[1]])) + assert_equal(mstats.mode(ma2, axis=-1), ([[0],[1],[0]], [[3],[1],[0]])) + assert_equal(mstats.mode(ma4, axis=0), ([[3,2]], [[1,1]])) + assert_equal(mstats.mode(ma4, axis=-1), ([[2],[3],[5]], [[1],[1],[1]])) + + a1_res = mstats.mode(a1, axis=None) + + # test for namedtuple attributes + attributes = ('mode', 'count') + check_named_results(a1_res, attributes, ma=True) + + def test_mode_modifies_input(self): + # regression test for gh-6428: mode(..., axis=None) may not modify + # the input array + im = np.zeros((100, 100)) + im[:50, :] += 1 + im[:, :50] += 1 + cp = im.copy() + a = mstats.mode(im, None) + assert_equal(im, cp) + + +class TestPercentile(object): + def setup_method(self): + self.a1 = [3, 4, 5, 10, -3, -5, 6] + self.a2 = [3, -6, -2, 8, 7, 4, 2, 1] + self.a3 = [3., 4, 5, 10, -3, -5, -6, 7.0] + + def test_percentile(self): + x = np.arange(8) * 0.5 + assert_equal(mstats.scoreatpercentile(x, 0), 0.) + assert_equal(mstats.scoreatpercentile(x, 100), 3.5) + assert_equal(mstats.scoreatpercentile(x, 50), 1.75) + + def test_2D(self): + x = ma.array([[1, 1, 1], + [1, 1, 1], + [4, 4, 3], + [1, 1, 1], + [1, 1, 1]]) + assert_equal(mstats.scoreatpercentile(x, 50), [1, 1, 1]) + + +class TestVariability(object): + """ Comparison numbers are found using R v.1.5.1 + note that length(testcase) = 4 + """ + testcase = ma.fix_invalid([1,2,3,4,np.nan]) + + def test_sem(self): + # This is not in R, so used: sqrt(var(testcase)*3/4) / sqrt(3) + y = mstats.sem(self.testcase) + assert_almost_equal(y, 0.6454972244) + n = self.testcase.count() + assert_allclose(mstats.sem(self.testcase, ddof=0) * np.sqrt(n/(n-2)), + mstats.sem(self.testcase, ddof=2)) + + def test_zmap(self): + # This is not in R, so tested by using: + # (testcase[i]-mean(testcase,axis=0)) / sqrt(var(testcase)*3/4) + y = mstats.zmap(self.testcase, self.testcase) + desired_unmaskedvals = ([-1.3416407864999, -0.44721359549996, + 0.44721359549996, 1.3416407864999]) + assert_array_almost_equal(desired_unmaskedvals, + y.data[y.mask == False], decimal=12) + + def test_zscore(self): + # This is not in R, so tested by using: + # (testcase[i]-mean(testcase,axis=0)) / sqrt(var(testcase)*3/4) + y = mstats.zscore(self.testcase) + desired = ma.fix_invalid([-1.3416407864999, -0.44721359549996, + 0.44721359549996, 1.3416407864999, np.nan]) + assert_almost_equal(desired, y, decimal=12) + + +class TestMisc(object): + + def test_obrientransform(self): + args = [[5]*5+[6]*11+[7]*9+[8]*3+[9]*2+[10]*2, + [6]+[7]*2+[8]*4+[9]*9+[10]*16] + result = [5*[3.1828]+11*[0.5591]+9*[0.0344]+3*[1.6086]+2*[5.2817]+2*[11.0538], + [10.4352]+2*[4.8599]+4*[1.3836]+9*[0.0061]+16*[0.7277]] + assert_almost_equal(np.round(mstats.obrientransform(*args).T,4), + result,4) + + def test_kstwosamp(self): + x = [[nan,nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1], + [4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3], + [3, 2, 5, 6, 18, 4, 9, 1, 1,nan, 1, 1,nan], + [nan, 6, 11, 4, 17,nan, 6, 1, 1, 2, 5, 1, 1]] + x = ma.fix_invalid(x).T + (winter,spring,summer,fall) = x.T + + assert_almost_equal(np.round(mstats.ks_twosamp(winter,spring),4), + (0.1818,0.9892)) + assert_almost_equal(np.round(mstats.ks_twosamp(winter,spring,'g'),4), + (0.1469,0.7734)) + assert_almost_equal(np.round(mstats.ks_twosamp(winter,spring,'l'),4), + (0.1818,0.6744)) + + def test_friedmanchisq(self): + # No missing values + args = ([9.0,9.5,5.0,7.5,9.5,7.5,8.0,7.0,8.5,6.0], + [7.0,6.5,7.0,7.5,5.0,8.0,6.0,6.5,7.0,7.0], + [6.0,8.0,4.0,6.0,7.0,6.5,6.0,4.0,6.5,3.0]) + result = mstats.friedmanchisquare(*args) + assert_almost_equal(result[0], 10.4737, 4) + assert_almost_equal(result[1], 0.005317, 6) + # Missing values + x = [[nan,nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1], + [4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3], + [3, 2, 5, 6, 18, 4, 9, 1, 1,nan, 1, 1,nan], + [nan, 6, 11, 4, 17,nan, 6, 1, 1, 2, 5, 1, 1]] + x = ma.fix_invalid(x) + result = mstats.friedmanchisquare(*x) + assert_almost_equal(result[0], 2.0156, 4) + assert_almost_equal(result[1], 0.5692, 4) + + # test for namedtuple attributes + attributes = ('statistic', 'pvalue') + check_named_results(result, attributes, ma=True) + + +def test_regress_simple(): + # Regress a line with sinusoidal noise. Test for #1273. + x = np.linspace(0, 100, 100) + y = 0.2 * np.linspace(0, 100, 100) + 10 + y += np.sin(np.linspace(0, 20, 100)) + + slope, intercept, r_value, p_value, sterr = mstats.linregress(x, y) + assert_almost_equal(slope, 0.19644990055858422) + assert_almost_equal(intercept, 10.211269918932341) + + # test for namedtuple attributes + res = mstats.linregress(x, y) + attributes = ('slope', 'intercept', 'rvalue', 'pvalue', 'stderr') + check_named_results(res, attributes, ma=True) + + +def test_theilslopes(): + # Test for basic slope and intercept. + slope, intercept, lower, upper = mstats.theilslopes([0, 1, 1]) + assert_almost_equal(slope, 0.5) + assert_almost_equal(intercept, 0.5) + + # Test for correct masking. + y = np.ma.array([0, 1, 100, 1], mask=[False, False, True, False]) + slope, intercept, lower, upper = mstats.theilslopes(y) + assert_almost_equal(slope, 1./3) + assert_almost_equal(intercept, 2./3) + + # Test of confidence intervals from example in Sen (1968). + x = [1, 2, 3, 4, 10, 12, 18] + y = [9, 15, 19, 20, 45, 55, 78] + slope, intercept, lower, upper = mstats.theilslopes(y, x, 0.07) + assert_almost_equal(slope, 4) + assert_almost_equal(upper, 4.38, decimal=2) + assert_almost_equal(lower, 3.71, decimal=2) + + +def test_siegelslopes(): + # method should be exact for straight line + y = 2 * np.arange(10) + 0.5 + assert_equal(mstats.siegelslopes(y), (2.0, 0.5)) + assert_equal(mstats.siegelslopes(y, method='separate'), (2.0, 0.5)) + + x = 2 * np.arange(10) + y = 5 * x - 3.0 + assert_equal(mstats.siegelslopes(y, x), (5.0, -3.0)) + assert_equal(mstats.siegelslopes(y, x, method='separate'), (5.0, -3.0)) + + # method is robust to outliers: brekdown point of 50% + y[:4] = 1000 + assert_equal(mstats.siegelslopes(y, x), (5.0, -3.0)) + + # if there are no outliers, results should be comparble to linregress + np.random.seed(231) + x = np.arange(10) + y = -2.3 + 0.3*x + stats.norm.rvs(size=10) + slope_ols, intercept_ols, _, _, _ = stats.linregress(x, y) + + slope, intercept = mstats.siegelslopes(y, x) + assert_allclose(slope, slope_ols, rtol=0.1) + assert_allclose(intercept, intercept_ols, rtol=0.1) + + slope, intercept = mstats.siegelslopes(y, x, method='separate') + assert_allclose(slope, slope_ols, rtol=0.1) + assert_allclose(intercept, intercept_ols, rtol=0.1) + + +def test_plotting_positions(): + # Regression test for #1256 + pos = mstats.plotting_positions(np.arange(3), 0, 0) + assert_array_almost_equal(pos.data, np.array([0.25, 0.5, 0.75])) + + +class TestNormalitytests(): + + def test_vs_nonmasked(self): + x = np.array((-2, -1, 0, 1, 2, 3)*4)**2 + assert_array_almost_equal(mstats.normaltest(x), + stats.normaltest(x)) + assert_array_almost_equal(mstats.skewtest(x), + stats.skewtest(x)) + assert_array_almost_equal(mstats.kurtosistest(x), + stats.kurtosistest(x)) + + funcs = [stats.normaltest, stats.skewtest, stats.kurtosistest] + mfuncs = [mstats.normaltest, mstats.skewtest, mstats.kurtosistest] + x = [1, 2, 3, 4] + for func, mfunc in zip(funcs, mfuncs): + assert_raises(ValueError, func, x) + assert_raises(ValueError, mfunc, x) + + def test_axis_None(self): + # Test axis=None (equal to axis=0 for 1-D input) + x = np.array((-2,-1,0,1,2,3)*4)**2 + assert_allclose(mstats.normaltest(x, axis=None), mstats.normaltest(x)) + assert_allclose(mstats.skewtest(x, axis=None), mstats.skewtest(x)) + assert_allclose(mstats.kurtosistest(x, axis=None), + mstats.kurtosistest(x)) + + def test_maskedarray_input(self): + # Add some masked values, test result doesn't change + x = np.array((-2, -1, 0, 1, 2, 3)*4)**2 + xm = np.ma.array(np.r_[np.inf, x, 10], + mask=np.r_[True, [False] * x.size, True]) + assert_allclose(mstats.normaltest(xm), stats.normaltest(x)) + assert_allclose(mstats.skewtest(xm), stats.skewtest(x)) + assert_allclose(mstats.kurtosistest(xm), stats.kurtosistest(x)) + + def test_nd_input(self): + x = np.array((-2, -1, 0, 1, 2, 3)*4)**2 + x_2d = np.vstack([x] * 2).T + for func in [mstats.normaltest, mstats.skewtest, mstats.kurtosistest]: + res_1d = func(x) + res_2d = func(x_2d) + assert_allclose(res_2d[0], [res_1d[0]] * 2) + assert_allclose(res_2d[1], [res_1d[1]] * 2) + + def test_normaltest_result_attributes(self): + x = np.array((-2, -1, 0, 1, 2, 3)*4)**2 + res = mstats.normaltest(x) + attributes = ('statistic', 'pvalue') + check_named_results(res, attributes, ma=True) + + def test_kurtosistest_result_attributes(self): + x = np.array((-2, -1, 0, 1, 2, 3)*4)**2 + res = mstats.kurtosistest(x) + attributes = ('statistic', 'pvalue') + check_named_results(res, attributes, ma=True) + + def regression_test_9033(self): + # x cleary non-normal but power of negtative denom needs + # to be handled correctly to reject normality + counts = [128, 0, 58, 7, 0, 41, 16, 0, 0, 167] + x = np.hstack([np.full(c, i) for i, c in enumerate(counts)]) + assert_equal(mstats.kurtosistest(x)[1] < 0.01, True) + + +class TestFOneway(): + def test_result_attributes(self): + a = np.array([655, 788], dtype=np.uint16) + b = np.array([789, 772], dtype=np.uint16) + res = mstats.f_oneway(a, b) + attributes = ('statistic', 'pvalue') + check_named_results(res, attributes, ma=True) + + +class TestMannwhitneyu(): + def test_result_attributes(self): + x = np.array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 2., 1., 1., 2., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 3., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1.]) + + y = np.array([1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1., 1., 1., 1., + 2., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2., 1., 1., 3., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1., + 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2., + 2., 1., 1., 2., 1., 1., 2., 1., 2., 1., 1., 1., 1., 2., + 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 2., 1., 1., 1., 1., 1., 2., 2., 2., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 2., 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 2., 1., 1., + 1., 1., 1., 1.]) + + res = mstats.mannwhitneyu(x, y) + attributes = ('statistic', 'pvalue') + check_named_results(res, attributes, ma=True) + + +class TestKruskal(): + def test_result_attributes(self): + x = [1, 3, 5, 7, 9] + y = [2, 4, 6, 8, 10] + + res = mstats.kruskal(x, y) + attributes = ('statistic', 'pvalue') + check_named_results(res, attributes, ma=True) + + +#TODO: for all ttest functions, add tests with masked array inputs +class TestTtest_rel(): + def test_vs_nonmasked(self): + np.random.seed(1234567) + outcome = np.random.randn(20, 4) + [0, 0, 1, 2] + + # 1-D inputs + res1 = stats.ttest_rel(outcome[:, 0], outcome[:, 1]) + res2 = mstats.ttest_rel(outcome[:, 0], outcome[:, 1]) + assert_allclose(res1, res2) + + # 2-D inputs + res1 = stats.ttest_rel(outcome[:, 0], outcome[:, 1], axis=None) + res2 = mstats.ttest_rel(outcome[:, 0], outcome[:, 1], axis=None) + assert_allclose(res1, res2) + res1 = stats.ttest_rel(outcome[:, :2], outcome[:, 2:], axis=0) + res2 = mstats.ttest_rel(outcome[:, :2], outcome[:, 2:], axis=0) + assert_allclose(res1, res2) + + # Check default is axis=0 + res3 = mstats.ttest_rel(outcome[:, :2], outcome[:, 2:]) + assert_allclose(res2, res3) + + def test_fully_masked(self): + np.random.seed(1234567) + outcome = ma.masked_array(np.random.randn(3, 2), + mask=[[1, 1, 1], [0, 0, 0]]) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in absolute") + for pair in [(outcome[:, 0], outcome[:, 1]), ([np.nan, np.nan], [1.0, 2.0])]: + t, p = mstats.ttest_rel(*pair) + assert_array_equal(t, (np.nan, np.nan)) + assert_array_equal(p, (np.nan, np.nan)) + + def test_result_attributes(self): + np.random.seed(1234567) + outcome = np.random.randn(20, 4) + [0, 0, 1, 2] + + res = mstats.ttest_rel(outcome[:, 0], outcome[:, 1]) + attributes = ('statistic', 'pvalue') + check_named_results(res, attributes, ma=True) + + def test_invalid_input_size(self): + assert_raises(ValueError, mstats.ttest_rel, + np.arange(10), np.arange(11)) + x = np.arange(24) + assert_raises(ValueError, mstats.ttest_rel, + x.reshape(2, 3, 4), x.reshape(2, 4, 3), axis=1) + assert_raises(ValueError, mstats.ttest_rel, + x.reshape(2, 3, 4), x.reshape(2, 4, 3), axis=2) + + def test_empty(self): + res1 = mstats.ttest_rel([], []) + assert_(np.all(np.isnan(res1))) + + def test_zero_division(self): + t, p = mstats.ttest_ind([0, 0, 0], [1, 1, 1]) + assert_equal((np.abs(t), p), (np.inf, 0)) + + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in absolute") + t, p = mstats.ttest_ind([0, 0, 0], [0, 0, 0]) + assert_array_equal(t, np.array([np.nan, np.nan])) + assert_array_equal(p, np.array([np.nan, np.nan])) + + +class TestTtest_ind(): + def test_vs_nonmasked(self): + np.random.seed(1234567) + outcome = np.random.randn(20, 4) + [0, 0, 1, 2] + + # 1-D inputs + res1 = stats.ttest_ind(outcome[:, 0], outcome[:, 1]) + res2 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1]) + assert_allclose(res1, res2) + + # 2-D inputs + res1 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], axis=None) + res2 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], axis=None) + assert_allclose(res1, res2) + res1 = stats.ttest_ind(outcome[:, :2], outcome[:, 2:], axis=0) + res2 = mstats.ttest_ind(outcome[:, :2], outcome[:, 2:], axis=0) + assert_allclose(res1, res2) + + # Check default is axis=0 + res3 = mstats.ttest_ind(outcome[:, :2], outcome[:, 2:]) + assert_allclose(res2, res3) + + # Check equal_var + res4 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=True) + res5 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=True) + assert_allclose(res4, res5) + res4 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=False) + res5 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=False) + assert_allclose(res4, res5) + + def test_fully_masked(self): + np.random.seed(1234567) + outcome = ma.masked_array(np.random.randn(3, 2), mask=[[1, 1, 1], [0, 0, 0]]) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in absolute") + for pair in [(outcome[:, 0], outcome[:, 1]), ([np.nan, np.nan], [1.0, 2.0])]: + t, p = mstats.ttest_ind(*pair) + assert_array_equal(t, (np.nan, np.nan)) + assert_array_equal(p, (np.nan, np.nan)) + + def test_result_attributes(self): + np.random.seed(1234567) + outcome = np.random.randn(20, 4) + [0, 0, 1, 2] + + res = mstats.ttest_ind(outcome[:, 0], outcome[:, 1]) + attributes = ('statistic', 'pvalue') + check_named_results(res, attributes, ma=True) + + def test_empty(self): + res1 = mstats.ttest_ind([], []) + assert_(np.all(np.isnan(res1))) + + def test_zero_division(self): + t, p = mstats.ttest_ind([0, 0, 0], [1, 1, 1]) + assert_equal((np.abs(t), p), (np.inf, 0)) + + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in absolute") + t, p = mstats.ttest_ind([0, 0, 0], [0, 0, 0]) + assert_array_equal(t, (np.nan, np.nan)) + assert_array_equal(p, (np.nan, np.nan)) + + t, p = mstats.ttest_ind([0, 0, 0], [1, 1, 1], equal_var=False) + assert_equal((np.abs(t), p), (np.inf, 0)) + assert_array_equal(mstats.ttest_ind([0, 0, 0], [0, 0, 0], + equal_var=False), (np.nan, np.nan)) + + +class TestTtest_1samp(): + def test_vs_nonmasked(self): + np.random.seed(1234567) + outcome = np.random.randn(20, 4) + [0, 0, 1, 2] + + # 1-D inputs + res1 = stats.ttest_1samp(outcome[:, 0], 1) + res2 = mstats.ttest_1samp(outcome[:, 0], 1) + assert_allclose(res1, res2) + + # 2-D inputs + res1 = stats.ttest_1samp(outcome[:, 0], outcome[:, 1], axis=None) + res2 = mstats.ttest_1samp(outcome[:, 0], outcome[:, 1], axis=None) + assert_allclose(res1, res2) + res1 = stats.ttest_1samp(outcome[:, :2], outcome[:, 2:], axis=0) + res2 = mstats.ttest_1samp(outcome[:, :2], outcome[:, 2:], axis=0) + assert_allclose(res1, res2) + + # Check default is axis=0 + res3 = mstats.ttest_1samp(outcome[:, :2], outcome[:, 2:]) + assert_allclose(res2, res3) + + def test_fully_masked(self): + np.random.seed(1234567) + outcome = ma.masked_array(np.random.randn(3), mask=[1, 1, 1]) + expected = (np.nan, np.nan) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in absolute") + for pair in [((np.nan, np.nan), 0.0), (outcome, 0.0)]: + t, p = mstats.ttest_1samp(*pair) + assert_array_equal(p, expected) + assert_array_equal(t, expected) + + def test_result_attributes(self): + np.random.seed(1234567) + outcome = np.random.randn(20, 4) + [0, 0, 1, 2] + + res = mstats.ttest_1samp(outcome[:, 0], 1) + attributes = ('statistic', 'pvalue') + check_named_results(res, attributes, ma=True) + + def test_empty(self): + res1 = mstats.ttest_1samp([], 1) + assert_(np.all(np.isnan(res1))) + + def test_zero_division(self): + t, p = mstats.ttest_1samp([0, 0, 0], 1) + assert_equal((np.abs(t), p), (np.inf, 0)) + + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in absolute") + t, p = mstats.ttest_1samp([0, 0, 0], 0) + assert_(np.isnan(t)) + assert_array_equal(p, (np.nan, np.nan)) + + +class TestCompareWithStats(object): + """ + Class to compare mstats results with stats results. + + It is in general assumed that scipy.stats is at a more mature stage than + stats.mstats. If a routine in mstats results in similar results like in + scipy.stats, this is considered also as a proper validation of scipy.mstats + routine. + + Different sample sizes are used for testing, as some problems between stats + and mstats are dependent on sample size. + + Author: Alexander Loew + + NOTE that some tests fail. This might be caused by + a) actual differences or bugs between stats and mstats + b) numerical inaccuracies + c) different definitions of routine interfaces + + These failures need to be checked. Current workaround is to have disabled these tests, + but issuing reports on scipy-dev + + """ + def get_n(self): + """ Returns list of sample sizes to be used for comparison. """ + return [1000, 100, 10, 5] + + def generate_xy_sample(self, n): + # This routine generates numpy arrays and corresponding masked arrays + # with the same data, but additional masked values + np.random.seed(1234567) + x = np.random.randn(n) + y = x + np.random.randn(n) + xm = np.ones(len(x) + 5) * 1e16 + ym = np.ones(len(y) + 5) * 1e16 + xm[0:len(x)] = x + ym[0:len(y)] = y + mask = xm > 9e15 + xm = np.ma.array(xm, mask=mask) + ym = np.ma.array(ym, mask=mask) + return x, y, xm, ym + + def generate_xy_sample2D(self, n, nx): + x = np.ones((n, nx)) * np.nan + y = np.ones((n, nx)) * np.nan + xm = np.ones((n+5, nx)) * np.nan + ym = np.ones((n+5, nx)) * np.nan + + for i in range(nx): + x[:, i], y[:, i], dx, dy = self.generate_xy_sample(n) + + xm[0:n, :] = x[0:n] + ym[0:n, :] = y[0:n] + xm = np.ma.array(xm, mask=np.isnan(xm)) + ym = np.ma.array(ym, mask=np.isnan(ym)) + return x, y, xm, ym + + def test_linregress(self): + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + res1 = stats.linregress(x, y) + res2 = stats.mstats.linregress(xm, ym) + assert_allclose(np.asarray(res1), np.asarray(res2)) + + def test_pearsonr(self): + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + r, p = stats.pearsonr(x, y) + rm, pm = stats.mstats.pearsonr(xm, ym) + + assert_almost_equal(r, rm, decimal=14) + assert_almost_equal(p, pm, decimal=14) + + def test_spearmanr(self): + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + r, p = stats.spearmanr(x, y) + rm, pm = stats.mstats.spearmanr(xm, ym) + assert_almost_equal(r, rm, 14) + assert_almost_equal(p, pm, 14) + + def test_spearmanr_backcompat_useties(self): + # A regression test to ensure we don't break backwards compat + # more than we have to (see gh-9204). + x = np.arange(6) + assert_raises(ValueError, mstats.spearmanr, x, x, False) + + def test_gmean(self): + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + r = stats.gmean(abs(x)) + rm = stats.mstats.gmean(abs(xm)) + assert_allclose(r, rm, rtol=1e-13) + + r = stats.gmean(abs(y)) + rm = stats.mstats.gmean(abs(ym)) + assert_allclose(r, rm, rtol=1e-13) + + def test_hmean(self): + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + + r = stats.hmean(abs(x)) + rm = stats.mstats.hmean(abs(xm)) + assert_almost_equal(r, rm, 10) + + r = stats.hmean(abs(y)) + rm = stats.mstats.hmean(abs(ym)) + assert_almost_equal(r, rm, 10) + + def test_skew(self): + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + + r = stats.skew(x) + rm = stats.mstats.skew(xm) + assert_almost_equal(r, rm, 10) + + r = stats.skew(y) + rm = stats.mstats.skew(ym) + assert_almost_equal(r, rm, 10) + + def test_moment(self): + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + + r = stats.moment(x) + rm = stats.mstats.moment(xm) + assert_almost_equal(r, rm, 10) + + r = stats.moment(y) + rm = stats.mstats.moment(ym) + assert_almost_equal(r, rm, 10) + + def test_zscore(self): + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + + # reference solution + zx = (x - x.mean()) / x.std() + zy = (y - y.mean()) / y.std() + + # validate stats + assert_allclose(stats.zscore(x), zx, rtol=1e-10) + assert_allclose(stats.zscore(y), zy, rtol=1e-10) + + # compare stats and mstats + assert_allclose(stats.zscore(x), stats.mstats.zscore(xm[0:len(x)]), + rtol=1e-10) + assert_allclose(stats.zscore(y), stats.mstats.zscore(ym[0:len(y)]), + rtol=1e-10) + + def test_kurtosis(self): + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + r = stats.kurtosis(x) + rm = stats.mstats.kurtosis(xm) + assert_almost_equal(r, rm, 10) + + r = stats.kurtosis(y) + rm = stats.mstats.kurtosis(ym) + assert_almost_equal(r, rm, 10) + + def test_sem(self): + # example from stats.sem doc + a = np.arange(20).reshape(5, 4) + am = np.ma.array(a) + r = stats.sem(a, ddof=1) + rm = stats.mstats.sem(am, ddof=1) + + assert_allclose(r, 2.82842712, atol=1e-5) + assert_allclose(rm, 2.82842712, atol=1e-5) + + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + assert_almost_equal(stats.mstats.sem(xm, axis=None, ddof=0), + stats.sem(x, axis=None, ddof=0), decimal=13) + assert_almost_equal(stats.mstats.sem(ym, axis=None, ddof=0), + stats.sem(y, axis=None, ddof=0), decimal=13) + assert_almost_equal(stats.mstats.sem(xm, axis=None, ddof=1), + stats.sem(x, axis=None, ddof=1), decimal=13) + assert_almost_equal(stats.mstats.sem(ym, axis=None, ddof=1), + stats.sem(y, axis=None, ddof=1), decimal=13) + + def test_describe(self): + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + r = stats.describe(x, ddof=1) + rm = stats.mstats.describe(xm, ddof=1) + for ii in range(6): + assert_almost_equal(np.asarray(r[ii]), + np.asarray(rm[ii]), + decimal=12) + + def test_describe_result_attributes(self): + actual = mstats.describe(np.arange(5)) + attributes = ('nobs', 'minmax', 'mean', 'variance', 'skewness', + 'kurtosis') + check_named_results(actual, attributes, ma=True) + + def test_rankdata(self): + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + r = stats.rankdata(x) + rm = stats.mstats.rankdata(x) + assert_allclose(r, rm) + + def test_tmean(self): + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + assert_almost_equal(stats.tmean(x),stats.mstats.tmean(xm), 14) + assert_almost_equal(stats.tmean(y),stats.mstats.tmean(ym), 14) + + def test_tmax(self): + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + assert_almost_equal(stats.tmax(x,2.), + stats.mstats.tmax(xm,2.), 10) + assert_almost_equal(stats.tmax(y,2.), + stats.mstats.tmax(ym,2.), 10) + + assert_almost_equal(stats.tmax(x, upperlimit=3.), + stats.mstats.tmax(xm, upperlimit=3.), 10) + assert_almost_equal(stats.tmax(y, upperlimit=3.), + stats.mstats.tmax(ym, upperlimit=3.), 10) + + def test_tmin(self): + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + assert_equal(stats.tmin(x), stats.mstats.tmin(xm)) + assert_equal(stats.tmin(y), stats.mstats.tmin(ym)) + + assert_almost_equal(stats.tmin(x, lowerlimit=-1.), + stats.mstats.tmin(xm, lowerlimit=-1.), 10) + assert_almost_equal(stats.tmin(y, lowerlimit=-1.), + stats.mstats.tmin(ym, lowerlimit=-1.), 10) + + def test_zmap(self): + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + z = stats.zmap(x, y) + zm = stats.mstats.zmap(xm, ym) + assert_allclose(z, zm[0:len(z)], atol=1e-10) + + def test_variation(self): + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + assert_almost_equal(stats.variation(x), stats.mstats.variation(xm), + decimal=12) + assert_almost_equal(stats.variation(y), stats.mstats.variation(ym), + decimal=12) + + def test_tvar(self): + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + assert_almost_equal(stats.tvar(x), stats.mstats.tvar(xm), + decimal=12) + assert_almost_equal(stats.tvar(y), stats.mstats.tvar(ym), + decimal=12) + + def test_trimboth(self): + a = np.arange(20) + b = stats.trimboth(a, 0.1) + bm = stats.mstats.trimboth(a, 0.1) + assert_allclose(np.sort(b), bm.data[~bm.mask]) + + def test_tsem(self): + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + assert_almost_equal(stats.tsem(x), stats.mstats.tsem(xm), + decimal=14) + assert_almost_equal(stats.tsem(y), stats.mstats.tsem(ym), + decimal=14) + assert_almost_equal(stats.tsem(x, limits=(-2., 2.)), + stats.mstats.tsem(xm, limits=(-2., 2.)), + decimal=14) + + def test_skewtest(self): + # this test is for 1D data + for n in self.get_n(): + if n > 8: + x, y, xm, ym = self.generate_xy_sample(n) + r = stats.skewtest(x) + rm = stats.mstats.skewtest(xm) + assert_allclose(r[0], rm[0], rtol=1e-15) + # TODO this test is not performed as it is a known issue that + # mstats returns a slightly different p-value what is a bit + # strange is that other tests like test_maskedarray_input don't + # fail! + #~ assert_almost_equal(r[1], rm[1]) + + def test_skewtest_result_attributes(self): + x = np.array((-2, -1, 0, 1, 2, 3)*4)**2 + res = mstats.skewtest(x) + attributes = ('statistic', 'pvalue') + check_named_results(res, attributes, ma=True) + + def test_skewtest_2D_notmasked(self): + # a normal ndarray is passed to the masked function + x = np.random.random((20, 2)) * 20. + r = stats.skewtest(x) + rm = stats.mstats.skewtest(x) + assert_allclose(np.asarray(r), np.asarray(rm)) + + def test_skewtest_2D_WithMask(self): + nx = 2 + for n in self.get_n(): + if n > 8: + x, y, xm, ym = self.generate_xy_sample2D(n, nx) + r = stats.skewtest(x) + rm = stats.mstats.skewtest(xm) + + assert_equal(r[0][0], rm[0][0]) + assert_equal(r[0][1], rm[0][1]) + + def test_normaltest(self): + np.seterr(over='raise') + with suppress_warnings() as sup: + sup.filter(UserWarning, "kurtosistest only valid for n>=20") + for n in self.get_n(): + if n > 8: + x, y, xm, ym = self.generate_xy_sample(n) + r = stats.normaltest(x) + rm = stats.mstats.normaltest(xm) + assert_allclose(np.asarray(r), np.asarray(rm)) + + def test_find_repeats(self): + x = np.asarray([1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 4]).astype('float') + tmp = np.asarray([1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5]).astype('float') + mask = (tmp == 5.) + xm = np.ma.array(tmp, mask=mask) + x_orig, xm_orig = x.copy(), xm.copy() + + r = stats.find_repeats(x) + rm = stats.mstats.find_repeats(xm) + + assert_equal(r, rm) + assert_equal(x, x_orig) + assert_equal(xm, xm_orig) + + # This crazy behavior is expected by count_tied_groups, but is not + # in the docstring... + _, counts = stats.mstats.find_repeats([]) + assert_equal(counts, np.array(0, dtype=np.intp)) + + def test_kendalltau(self): + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + r = stats.kendalltau(x, y) + rm = stats.mstats.kendalltau(xm, ym) + assert_almost_equal(r[0], rm[0], decimal=10) + assert_almost_equal(r[1], rm[1], decimal=7) + + def test_obrientransform(self): + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + r = stats.obrientransform(x) + rm = stats.mstats.obrientransform(xm) + assert_almost_equal(r.T, rm[0:len(x)]) + + +class TestBrunnerMunzel(object): + # Data from (Lumley, 1996) + X = np.ma.masked_invalid([1, 2, 1, 1, 1, np.nan, 1, 1, + 1, 1, 1, 2, 4, 1, 1, np.nan]) + Y = np.ma.masked_invalid([3, 3, 4, 3, np.nan, 1, 2, 3, 1, 1, 5, 4]) + significant = 14 + + def test_brunnermunzel_one_sided(self): + # Results are compared with R's lawstat package. + u1, p1 = mstats.brunnermunzel(self.X, self.Y, alternative='less') + u2, p2 = mstats.brunnermunzel(self.Y, self.X, alternative='greater') + u3, p3 = mstats.brunnermunzel(self.X, self.Y, alternative='greater') + u4, p4 = mstats.brunnermunzel(self.Y, self.X, alternative='less') + + assert_almost_equal(p1, p2, decimal=self.significant) + assert_almost_equal(p3, p4, decimal=self.significant) + assert_(p1 != p3) + assert_almost_equal(u1, 3.1374674823029505, + decimal=self.significant) + assert_almost_equal(u2, -3.1374674823029505, + decimal=self.significant) + assert_almost_equal(u3, 3.1374674823029505, + decimal=self.significant) + assert_almost_equal(u4, -3.1374674823029505, + decimal=self.significant) + assert_almost_equal(p1, 0.0028931043330757342, + decimal=self.significant) + assert_almost_equal(p3, 0.99710689566692423, + decimal=self.significant) + + def test_brunnermunzel_two_sided(self): + # Results are compared with R's lawstat package. + u1, p1 = mstats.brunnermunzel(self.X, self.Y, alternative='two-sided') + u2, p2 = mstats.brunnermunzel(self.Y, self.X, alternative='two-sided') + + assert_almost_equal(p1, p2, decimal=self.significant) + assert_almost_equal(u1, 3.1374674823029505, + decimal=self.significant) + assert_almost_equal(u2, -3.1374674823029505, + decimal=self.significant) + assert_almost_equal(p1, 0.0057862086661515377, + decimal=self.significant) + + def test_brunnermunzel_default(self): + # The default value for alternative is two-sided + u1, p1 = mstats.brunnermunzel(self.X, self.Y) + u2, p2 = mstats.brunnermunzel(self.Y, self.X) + + assert_almost_equal(p1, p2, decimal=self.significant) + assert_almost_equal(u1, 3.1374674823029505, + decimal=self.significant) + assert_almost_equal(u2, -3.1374674823029505, + decimal=self.significant) + assert_almost_equal(p1, 0.0057862086661515377, + decimal=self.significant) + + def test_brunnermunzel_alternative_error(self): + alternative = "error" + distribution = "t" + assert_(alternative not in ["two-sided", "greater", "less"]) + assert_raises(ValueError, + mstats.brunnermunzel, + self.X, + self.Y, + alternative, + distribution) + + def test_brunnermunzel_distribution_norm(self): + u1, p1 = mstats.brunnermunzel(self.X, self.Y, distribution="normal") + u2, p2 = mstats.brunnermunzel(self.Y, self.X, distribution="normal") + assert_almost_equal(p1, p2, decimal=self.significant) + assert_almost_equal(u1, 3.1374674823029505, + decimal=self.significant) + assert_almost_equal(u2, -3.1374674823029505, + decimal=self.significant) + assert_almost_equal(p1, 0.0017041417600383024, + decimal=self.significant) + + def test_brunnermunzel_distribution_error(self): + alternative = "two-sided" + distribution = "error" + assert_(alternative not in ["t", "normal"]) + assert_raises(ValueError, + mstats.brunnermunzel, + self.X, + self.Y, + alternative, + distribution) + + def test_brunnermunzel_empty_imput(self): + u1, p1 = mstats.brunnermunzel(self.X, []) + u2, p2 = mstats.brunnermunzel([], self.Y) + u3, p3 = mstats.brunnermunzel([], []) + + assert_(np.isnan(u1)) + assert_(np.isnan(p1)) + assert_(np.isnan(u2)) + assert_(np.isnan(p2)) + assert_(np.isnan(u3)) + assert_(np.isnan(p3)) diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_mstats_basic.pyc b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_mstats_basic.pyc new file mode 100644 index 0000000..e3837fc Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_mstats_basic.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_mstats_extras.py b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_mstats_extras.py new file mode 100644 index 0000000..42542d1 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_mstats_extras.py @@ -0,0 +1,136 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +import numpy.ma as ma +import scipy.stats.mstats as ms + +from numpy.testing import (assert_equal, assert_almost_equal, assert_, + assert_allclose) + + +def test_compare_medians_ms(): + x = np.arange(7) + y = x + 10 + assert_almost_equal(ms.compare_medians_ms(x, y), 0) + + y2 = np.linspace(0, 1, num=10) + assert_almost_equal(ms.compare_medians_ms(x, y2), 0.017116406778) + + +def test_hdmedian(): + # 1-D array + x = ma.arange(11) + assert_allclose(ms.hdmedian(x), 5, rtol=1e-14) + x.mask = ma.make_mask(x) + x.mask[:7] = False + assert_allclose(ms.hdmedian(x), 3, rtol=1e-14) + + # Check that `var` keyword returns a value. TODO: check whether returned + # value is actually correct. + assert_(ms.hdmedian(x, var=True).size == 2) + + # 2-D array + x2 = ma.arange(22).reshape((11, 2)) + assert_allclose(ms.hdmedian(x2, axis=0), [10, 11]) + x2.mask = ma.make_mask(x2) + x2.mask[:7, :] = False + assert_allclose(ms.hdmedian(x2, axis=0), [6, 7]) + + +def test_rsh(): + np.random.seed(132345) + x = np.random.randn(100) + res = ms.rsh(x) + # Just a sanity check that the code runs and output shape is correct. + # TODO: check that implementation is correct. + assert_(res.shape == x.shape) + + # Check points keyword + res = ms.rsh(x, points=[0, 1.]) + assert_(res.size == 2) + + +def test_mjci(): + # Tests the Marits-Jarrett estimator + data = ma.array([77, 87, 88,114,151,210,219,246,253,262, + 296,299,306,376,428,515,666,1310,2611]) + assert_almost_equal(ms.mjci(data),[55.76819,45.84028,198.87875],5) + + +def test_trimmed_mean_ci(): + # Tests the confidence intervals of the trimmed mean. + data = ma.array([545,555,558,572,575,576,578,580, + 594,605,635,651,653,661,666]) + assert_almost_equal(ms.trimmed_mean(data,0.2), 596.2, 1) + assert_equal(np.round(ms.trimmed_mean_ci(data,(0.2,0.2)),1), + [561.8, 630.6]) + + +def test_idealfourths(): + # Tests ideal-fourths + test = np.arange(100) + assert_almost_equal(np.asarray(ms.idealfourths(test)), + [24.416667,74.583333],6) + test_2D = test.repeat(3).reshape(-1,3) + assert_almost_equal(ms.idealfourths(test_2D, axis=0), + [[24.416667,24.416667,24.416667], + [74.583333,74.583333,74.583333]],6) + assert_almost_equal(ms.idealfourths(test_2D, axis=1), + test.repeat(2).reshape(-1,2)) + test = [0, 0] + _result = ms.idealfourths(test) + assert_(np.isnan(_result).all()) + + +class TestQuantiles(object): + data = [0.706560797,0.727229578,0.990399276,0.927065621,0.158953014, + 0.887764025,0.239407086,0.349638551,0.972791145,0.149789972, + 0.936947700,0.132359948,0.046041972,0.641675031,0.945530547, + 0.224218684,0.771450991,0.820257774,0.336458052,0.589113496, + 0.509736129,0.696838829,0.491323573,0.622767425,0.775189248, + 0.641461450,0.118455200,0.773029450,0.319280007,0.752229111, + 0.047841438,0.466295911,0.583850781,0.840581845,0.550086491, + 0.466470062,0.504765074,0.226855960,0.362641207,0.891620942, + 0.127898691,0.490094097,0.044882048,0.041441695,0.317976349, + 0.504135618,0.567353033,0.434617473,0.636243375,0.231803616, + 0.230154113,0.160011327,0.819464108,0.854706985,0.438809221, + 0.487427267,0.786907310,0.408367937,0.405534192,0.250444460, + 0.995309248,0.144389588,0.739947527,0.953543606,0.680051621, + 0.388382017,0.863530727,0.006514031,0.118007779,0.924024803, + 0.384236354,0.893687694,0.626534881,0.473051932,0.750134705, + 0.241843555,0.432947602,0.689538104,0.136934797,0.150206859, + 0.474335206,0.907775349,0.525869295,0.189184225,0.854284286, + 0.831089744,0.251637345,0.587038213,0.254475554,0.237781276, + 0.827928620,0.480283781,0.594514455,0.213641488,0.024194386, + 0.536668589,0.699497811,0.892804071,0.093835427,0.731107772] + + def test_hdquantiles(self): + data = self.data + assert_almost_equal(ms.hdquantiles(data,[0., 1.]), + [0.006514031, 0.995309248]) + hdq = ms.hdquantiles(data,[0.25, 0.5, 0.75]) + assert_almost_equal(hdq, [0.253210762, 0.512847491, 0.762232442,]) + hdq = ms.hdquantiles_sd(data,[0.25, 0.5, 0.75]) + assert_almost_equal(hdq, [0.03786954, 0.03805389, 0.03800152,], 4) + + data = np.array(data).reshape(10,10) + hdq = ms.hdquantiles(data,[0.25,0.5,0.75],axis=0) + assert_almost_equal(hdq[:,0], ms.hdquantiles(data[:,0],[0.25,0.5,0.75])) + assert_almost_equal(hdq[:,-1], ms.hdquantiles(data[:,-1],[0.25,0.5,0.75])) + hdq = ms.hdquantiles(data,[0.25,0.5,0.75],axis=0,var=True) + assert_almost_equal(hdq[...,0], + ms.hdquantiles(data[:,0],[0.25,0.5,0.75],var=True)) + assert_almost_equal(hdq[...,-1], + ms.hdquantiles(data[:,-1],[0.25,0.5,0.75], var=True)) + + def test_hdquantiles_sd(self): + # Only test that code runs, implementation not checked for correctness + res = ms.hdquantiles_sd(self.data) + assert_(res.size == 3) + + def test_mquantiles_cimj(self): + # Only test that code runs, implementation not checked for correctness + ci_lower, ci_upper = ms.mquantiles_cimj(self.data) + assert_(ci_lower.size == ci_upper.size == 3) + + diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_mstats_extras.pyc b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_mstats_extras.pyc new file mode 100644 index 0000000..dcc395d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_mstats_extras.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_multivariate.py b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_multivariate.py new file mode 100644 index 0000000..752d12d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_multivariate.py @@ -0,0 +1,1675 @@ +""" +Test functions for multivariate normal distributions. + +""" +from __future__ import division, print_function, absolute_import + +import pickle + +from numpy.testing import (assert_allclose, assert_almost_equal, + assert_array_almost_equal, assert_equal, + assert_array_less, assert_) +import pytest +from pytest import raises as assert_raises + +from .test_continuous_basic import check_distribution_rvs + +import numpy +import numpy as np + +import scipy.linalg +from scipy.stats._multivariate import _PSD, _lnB, _cho_inv_batch +from scipy.stats import multivariate_normal +from scipy.stats import matrix_normal +from scipy.stats import special_ortho_group, ortho_group +from scipy.stats import random_correlation +from scipy.stats import unitary_group +from scipy.stats import dirichlet, beta +from scipy.stats import wishart, multinomial, invwishart, chi2, invgamma +from scipy.stats import norm, uniform +from scipy.stats import ks_2samp, kstest +from scipy.stats import binom + +from scipy.integrate import romb +from scipy.special import multigammaln + +from .common_tests import check_random_state_property + + +class TestMultivariateNormal(object): + def test_input_shape(self): + mu = np.arange(3) + cov = np.identity(2) + assert_raises(ValueError, multivariate_normal.pdf, (0, 1), mu, cov) + assert_raises(ValueError, multivariate_normal.pdf, (0, 1, 2), mu, cov) + assert_raises(ValueError, multivariate_normal.cdf, (0, 1), mu, cov) + assert_raises(ValueError, multivariate_normal.cdf, (0, 1, 2), mu, cov) + + def test_scalar_values(self): + np.random.seed(1234) + + # When evaluated on scalar data, the pdf should return a scalar + x, mean, cov = 1.5, 1.7, 2.5 + pdf = multivariate_normal.pdf(x, mean, cov) + assert_equal(pdf.ndim, 0) + + # When evaluated on a single vector, the pdf should return a scalar + x = np.random.randn(5) + mean = np.random.randn(5) + cov = np.abs(np.random.randn(5)) # Diagonal values for cov. matrix + pdf = multivariate_normal.pdf(x, mean, cov) + assert_equal(pdf.ndim, 0) + + # When evaluated on scalar data, the cdf should return a scalar + x, mean, cov = 1.5, 1.7, 2.5 + cdf = multivariate_normal.cdf(x, mean, cov) + assert_equal(cdf.ndim, 0) + + # When evaluated on a single vector, the cdf should return a scalar + x = np.random.randn(5) + mean = np.random.randn(5) + cov = np.abs(np.random.randn(5)) # Diagonal values for cov. matrix + cdf = multivariate_normal.cdf(x, mean, cov) + assert_equal(cdf.ndim, 0) + + def test_logpdf(self): + # Check that the log of the pdf is in fact the logpdf + np.random.seed(1234) + x = np.random.randn(5) + mean = np.random.randn(5) + cov = np.abs(np.random.randn(5)) + d1 = multivariate_normal.logpdf(x, mean, cov) + d2 = multivariate_normal.pdf(x, mean, cov) + assert_allclose(d1, np.log(d2)) + + def test_logpdf_default_values(self): + # Check that the log of the pdf is in fact the logpdf + # with default parameters Mean=None and cov = 1 + np.random.seed(1234) + x = np.random.randn(5) + d1 = multivariate_normal.logpdf(x) + d2 = multivariate_normal.pdf(x) + # check whether default values are being used + d3 = multivariate_normal.logpdf(x, None, 1) + d4 = multivariate_normal.pdf(x, None, 1) + assert_allclose(d1, np.log(d2)) + assert_allclose(d3, np.log(d4)) + + def test_logcdf(self): + # Check that the log of the cdf is in fact the logcdf + np.random.seed(1234) + x = np.random.randn(5) + mean = np.random.randn(5) + cov = np.abs(np.random.randn(5)) + d1 = multivariate_normal.logcdf(x, mean, cov) + d2 = multivariate_normal.cdf(x, mean, cov) + assert_allclose(d1, np.log(d2)) + + def test_logcdf_default_values(self): + # Check that the log of the cdf is in fact the logcdf + # with default parameters Mean=None and cov = 1 + np.random.seed(1234) + x = np.random.randn(5) + d1 = multivariate_normal.logcdf(x) + d2 = multivariate_normal.cdf(x) + # check whether default values are being used + d3 = multivariate_normal.logcdf(x, None, 1) + d4 = multivariate_normal.cdf(x, None, 1) + assert_allclose(d1, np.log(d2)) + assert_allclose(d3, np.log(d4)) + + def test_rank(self): + # Check that the rank is detected correctly. + np.random.seed(1234) + n = 4 + mean = np.random.randn(n) + for expected_rank in range(1, n + 1): + s = np.random.randn(n, expected_rank) + cov = np.dot(s, s.T) + distn = multivariate_normal(mean, cov, allow_singular=True) + assert_equal(distn.cov_info.rank, expected_rank) + + def test_degenerate_distributions(self): + + def _sample_orthonormal_matrix(n): + M = np.random.randn(n, n) + u, s, v = scipy.linalg.svd(M) + return u + + for n in range(1, 5): + x = np.random.randn(n) + for k in range(1, n + 1): + # Sample a small covariance matrix. + s = np.random.randn(k, k) + cov_kk = np.dot(s, s.T) + + # Embed the small covariance matrix into a larger low rank matrix. + cov_nn = np.zeros((n, n)) + cov_nn[:k, :k] = cov_kk + + # Define a rotation of the larger low rank matrix. + u = _sample_orthonormal_matrix(n) + cov_rr = np.dot(u, np.dot(cov_nn, u.T)) + y = np.dot(u, x) + + # Check some identities. + distn_kk = multivariate_normal(np.zeros(k), cov_kk, + allow_singular=True) + distn_nn = multivariate_normal(np.zeros(n), cov_nn, + allow_singular=True) + distn_rr = multivariate_normal(np.zeros(n), cov_rr, + allow_singular=True) + assert_equal(distn_kk.cov_info.rank, k) + assert_equal(distn_nn.cov_info.rank, k) + assert_equal(distn_rr.cov_info.rank, k) + pdf_kk = distn_kk.pdf(x[:k]) + pdf_nn = distn_nn.pdf(x) + pdf_rr = distn_rr.pdf(y) + assert_allclose(pdf_kk, pdf_nn) + assert_allclose(pdf_kk, pdf_rr) + logpdf_kk = distn_kk.logpdf(x[:k]) + logpdf_nn = distn_nn.logpdf(x) + logpdf_rr = distn_rr.logpdf(y) + assert_allclose(logpdf_kk, logpdf_nn) + assert_allclose(logpdf_kk, logpdf_rr) + + def test_large_pseudo_determinant(self): + # Check that large pseudo-determinants are handled appropriately. + + # Construct a singular diagonal covariance matrix + # whose pseudo determinant overflows double precision. + large_total_log = 1000.0 + npos = 100 + nzero = 2 + large_entry = np.exp(large_total_log / npos) + n = npos + nzero + cov = np.zeros((n, n), dtype=float) + np.fill_diagonal(cov, large_entry) + cov[-nzero:, -nzero:] = 0 + + # Check some determinants. + assert_equal(scipy.linalg.det(cov), 0) + assert_equal(scipy.linalg.det(cov[:npos, :npos]), np.inf) + assert_allclose(np.linalg.slogdet(cov[:npos, :npos]), + (1, large_total_log)) + + # Check the pseudo-determinant. + psd = _PSD(cov) + assert_allclose(psd.log_pdet, large_total_log) + + def test_broadcasting(self): + np.random.seed(1234) + n = 4 + + # Construct a random covariance matrix. + data = np.random.randn(n, n) + cov = np.dot(data, data.T) + mean = np.random.randn(n) + + # Construct an ndarray which can be interpreted as + # a 2x3 array whose elements are random data vectors. + X = np.random.randn(2, 3, n) + + # Check that multiple data points can be evaluated at once. + desired_pdf = multivariate_normal.pdf(X, mean, cov) + desired_cdf = multivariate_normal.cdf(X, mean, cov) + for i in range(2): + for j in range(3): + actual = multivariate_normal.pdf(X[i, j], mean, cov) + assert_allclose(actual, desired_pdf[i,j]) + # Repeat for cdf + actual = multivariate_normal.cdf(X[i, j], mean, cov) + assert_allclose(actual, desired_cdf[i,j], rtol=1e-3) + + def test_normal_1D(self): + # The probability density function for a 1D normal variable should + # agree with the standard normal distribution in scipy.stats.distributions + x = np.linspace(0, 2, 10) + mean, cov = 1.2, 0.9 + scale = cov**0.5 + d1 = norm.pdf(x, mean, scale) + d2 = multivariate_normal.pdf(x, mean, cov) + assert_allclose(d1, d2) + # The same should hold for the cumulative distribution function + d1 = norm.cdf(x, mean, scale) + d2 = multivariate_normal.cdf(x, mean, cov) + assert_allclose(d1, d2) + + def test_marginalization(self): + # Integrating out one of the variables of a 2D Gaussian should + # yield a 1D Gaussian + mean = np.array([2.5, 3.5]) + cov = np.array([[.5, 0.2], [0.2, .6]]) + n = 2 ** 8 + 1 # Number of samples + delta = 6 / (n - 1) # Grid spacing + + v = np.linspace(0, 6, n) + xv, yv = np.meshgrid(v, v) + pos = np.empty((n, n, 2)) + pos[:, :, 0] = xv + pos[:, :, 1] = yv + pdf = multivariate_normal.pdf(pos, mean, cov) + + # Marginalize over x and y axis + margin_x = romb(pdf, delta, axis=0) + margin_y = romb(pdf, delta, axis=1) + + # Compare with standard normal distribution + gauss_x = norm.pdf(v, loc=mean[0], scale=cov[0, 0] ** 0.5) + gauss_y = norm.pdf(v, loc=mean[1], scale=cov[1, 1] ** 0.5) + assert_allclose(margin_x, gauss_x, rtol=1e-2, atol=1e-2) + assert_allclose(margin_y, gauss_y, rtol=1e-2, atol=1e-2) + + def test_frozen(self): + # The frozen distribution should agree with the regular one + np.random.seed(1234) + x = np.random.randn(5) + mean = np.random.randn(5) + cov = np.abs(np.random.randn(5)) + norm_frozen = multivariate_normal(mean, cov) + assert_allclose(norm_frozen.pdf(x), multivariate_normal.pdf(x, mean, cov)) + assert_allclose(norm_frozen.logpdf(x), + multivariate_normal.logpdf(x, mean, cov)) + assert_allclose(norm_frozen.cdf(x), multivariate_normal.cdf(x, mean, cov)) + assert_allclose(norm_frozen.logcdf(x), + multivariate_normal.logcdf(x, mean, cov)) + + def test_pseudodet_pinv(self): + # Make sure that pseudo-inverse and pseudo-det agree on cutoff + + # Assemble random covariance matrix with large and small eigenvalues + np.random.seed(1234) + n = 7 + x = np.random.randn(n, n) + cov = np.dot(x, x.T) + s, u = scipy.linalg.eigh(cov) + s = 0.5 * np.ones(n) + s[0] = 1.0 + s[-1] = 1e-7 + cov = np.dot(u, np.dot(np.diag(s), u.T)) + + # Set cond so that the lowest eigenvalue is below the cutoff + cond = 1e-5 + psd = _PSD(cov, cond=cond) + psd_pinv = _PSD(psd.pinv, cond=cond) + + # Check that the log pseudo-determinant agrees with the sum + # of the logs of all but the smallest eigenvalue + assert_allclose(psd.log_pdet, np.sum(np.log(s[:-1]))) + # Check that the pseudo-determinant of the pseudo-inverse + # agrees with 1 / pseudo-determinant + assert_allclose(-psd.log_pdet, psd_pinv.log_pdet) + + def test_exception_nonsquare_cov(self): + cov = [[1, 2, 3], [4, 5, 6]] + assert_raises(ValueError, _PSD, cov) + + def test_exception_nonfinite_cov(self): + cov_nan = [[1, 0], [0, np.nan]] + assert_raises(ValueError, _PSD, cov_nan) + cov_inf = [[1, 0], [0, np.inf]] + assert_raises(ValueError, _PSD, cov_inf) + + def test_exception_non_psd_cov(self): + cov = [[1, 0], [0, -1]] + assert_raises(ValueError, _PSD, cov) + + def test_exception_singular_cov(self): + np.random.seed(1234) + x = np.random.randn(5) + mean = np.random.randn(5) + cov = np.ones((5, 5)) + e = np.linalg.LinAlgError + assert_raises(e, multivariate_normal, mean, cov) + assert_raises(e, multivariate_normal.pdf, x, mean, cov) + assert_raises(e, multivariate_normal.logpdf, x, mean, cov) + assert_raises(e, multivariate_normal.cdf, x, mean, cov) + assert_raises(e, multivariate_normal.logcdf, x, mean, cov) + + def test_R_values(self): + # Compare the multivariate pdf with some values precomputed + # in R version 3.0.1 (2013-05-16) on Mac OS X 10.6. + + # The values below were generated by the following R-script: + # > library(mnormt) + # > x <- seq(0, 2, length=5) + # > y <- 3*x - 2 + # > z <- x + cos(y) + # > mu <- c(1, 3, 2) + # > Sigma <- matrix(c(1,2,0,2,5,0.5,0,0.5,3), 3, 3) + # > r_pdf <- dmnorm(cbind(x,y,z), mu, Sigma) + r_pdf = np.array([0.0002214706, 0.0013819953, 0.0049138692, + 0.0103803050, 0.0140250800]) + + x = np.linspace(0, 2, 5) + y = 3 * x - 2 + z = x + np.cos(y) + r = np.array([x, y, z]).T + + mean = np.array([1, 3, 2], 'd') + cov = np.array([[1, 2, 0], [2, 5, .5], [0, .5, 3]], 'd') + + pdf = multivariate_normal.pdf(r, mean, cov) + assert_allclose(pdf, r_pdf, atol=1e-10) + + # Compare the multivariate cdf with some values precomputed + # in R version 3.3.2 (2016-10-31) on Debian GNU/Linux. + + # The values below were generated by the following R-script: + # > library(mnormt) + # > x <- seq(0, 2, length=5) + # > y <- 3*x - 2 + # > z <- x + cos(y) + # > mu <- c(1, 3, 2) + # > Sigma <- matrix(c(1,2,0,2,5,0.5,0,0.5,3), 3, 3) + # > r_cdf <- pmnorm(cbind(x,y,z), mu, Sigma) + r_cdf = np.array([0.0017866215, 0.0267142892, 0.0857098761, + 0.1063242573, 0.2501068509]) + + cdf = multivariate_normal.cdf(r, mean, cov) + assert_allclose(cdf, r_cdf, atol=1e-5) + + # Also test bivariate cdf with some values precomputed + # in R version 3.3.2 (2016-10-31) on Debian GNU/Linux. + + # The values below were generated by the following R-script: + # > library(mnormt) + # > x <- seq(0, 2, length=5) + # > y <- 3*x - 2 + # > mu <- c(1, 3) + # > Sigma <- matrix(c(1,2,2,5), 2, 2) + # > r_cdf2 <- pmnorm(cbind(x,y), mu, Sigma) + r_cdf2 = np.array([0.01262147, 0.05838989, 0.18389571, + 0.40696599, 0.66470577]) + + r2 = np.array([x, y]).T + + mean2 = np.array([1, 3], 'd') + cov2 = np.array([[1, 2], [2, 5]], 'd') + + cdf2 = multivariate_normal.cdf(r2, mean2, cov2) + assert_allclose(cdf2, r_cdf2, atol=1e-5) + + def test_multivariate_normal_rvs_zero_covariance(self): + mean = np.zeros(2) + covariance = np.zeros((2, 2)) + model = multivariate_normal(mean, covariance, allow_singular=True) + sample = model.rvs() + assert_equal(sample, [0, 0]) + + def test_rvs_shape(self): + # Check that rvs parses the mean and covariance correctly, and returns + # an array of the right shape + N = 300 + d = 4 + sample = multivariate_normal.rvs(mean=np.zeros(d), cov=1, size=N) + assert_equal(sample.shape, (N, d)) + + sample = multivariate_normal.rvs(mean=None, + cov=np.array([[2, .1], [.1, 1]]), + size=N) + assert_equal(sample.shape, (N, 2)) + + u = multivariate_normal(mean=0, cov=1) + sample = u.rvs(N) + assert_equal(sample.shape, (N, )) + + def test_large_sample(self): + # Generate large sample and compare sample mean and sample covariance + # with mean and covariance matrix. + + np.random.seed(2846) + + n = 3 + mean = np.random.randn(n) + M = np.random.randn(n, n) + cov = np.dot(M, M.T) + size = 5000 + + sample = multivariate_normal.rvs(mean, cov, size) + + assert_allclose(numpy.cov(sample.T), cov, rtol=1e-1) + assert_allclose(sample.mean(0), mean, rtol=1e-1) + + def test_entropy(self): + np.random.seed(2846) + + n = 3 + mean = np.random.randn(n) + M = np.random.randn(n, n) + cov = np.dot(M, M.T) + + rv = multivariate_normal(mean, cov) + + # Check that frozen distribution agrees with entropy function + assert_almost_equal(rv.entropy(), multivariate_normal.entropy(mean, cov)) + # Compare entropy with manually computed expression involving + # the sum of the logs of the eigenvalues of the covariance matrix + eigs = np.linalg.eig(cov)[0] + desired = 1 / 2 * (n * (np.log(2 * np.pi) + 1) + np.sum(np.log(eigs))) + assert_almost_equal(desired, rv.entropy()) + + def test_lnB(self): + alpha = np.array([1, 1, 1]) + desired = .5 # e^lnB = 1/2 for [1, 1, 1] + + assert_almost_equal(np.exp(_lnB(alpha)), desired) + +class TestMatrixNormal(object): + + def test_bad_input(self): + # Check that bad inputs raise errors + num_rows = 4 + num_cols = 3 + M = 0.3 * np.ones((num_rows,num_cols)) + U = 0.5 * np.identity(num_rows) + 0.5 * np.ones((num_rows, num_rows)) + V = 0.7 * np.identity(num_cols) + 0.3 * np.ones((num_cols, num_cols)) + + # Incorrect dimensions + assert_raises(ValueError, matrix_normal, np.zeros((5,4,3))) + assert_raises(ValueError, matrix_normal, M, np.zeros(10), V) + assert_raises(ValueError, matrix_normal, M, U, np.zeros(10)) + assert_raises(ValueError, matrix_normal, M, U, U) + assert_raises(ValueError, matrix_normal, M, V, V) + assert_raises(ValueError, matrix_normal, M.T, U, V) + + # Singular covariance + e = np.linalg.LinAlgError + assert_raises(e, matrix_normal, M, U, np.ones((num_cols, num_cols))) + assert_raises(e, matrix_normal, M, np.ones((num_rows, num_rows)), V) + + def test_default_inputs(self): + # Check that default argument handling works + num_rows = 4 + num_cols = 3 + M = 0.3 * np.ones((num_rows,num_cols)) + U = 0.5 * np.identity(num_rows) + 0.5 * np.ones((num_rows, num_rows)) + V = 0.7 * np.identity(num_cols) + 0.3 * np.ones((num_cols, num_cols)) + Z = np.zeros((num_rows, num_cols)) + Zr = np.zeros((num_rows, 1)) + Zc = np.zeros((1, num_cols)) + Ir = np.identity(num_rows) + Ic = np.identity(num_cols) + I1 = np.identity(1) + + assert_equal(matrix_normal.rvs(mean=M, rowcov=U, colcov=V).shape, + (num_rows, num_cols)) + assert_equal(matrix_normal.rvs(mean=M).shape, + (num_rows, num_cols)) + assert_equal(matrix_normal.rvs(rowcov=U).shape, + (num_rows, 1)) + assert_equal(matrix_normal.rvs(colcov=V).shape, + (1, num_cols)) + assert_equal(matrix_normal.rvs(mean=M, colcov=V).shape, + (num_rows, num_cols)) + assert_equal(matrix_normal.rvs(mean=M, rowcov=U).shape, + (num_rows, num_cols)) + assert_equal(matrix_normal.rvs(rowcov=U, colcov=V).shape, + (num_rows, num_cols)) + + assert_equal(matrix_normal(mean=M).rowcov, Ir) + assert_equal(matrix_normal(mean=M).colcov, Ic) + assert_equal(matrix_normal(rowcov=U).mean, Zr) + assert_equal(matrix_normal(rowcov=U).colcov, I1) + assert_equal(matrix_normal(colcov=V).mean, Zc) + assert_equal(matrix_normal(colcov=V).rowcov, I1) + assert_equal(matrix_normal(mean=M, rowcov=U).colcov, Ic) + assert_equal(matrix_normal(mean=M, colcov=V).rowcov, Ir) + assert_equal(matrix_normal(rowcov=U, colcov=V).mean, Z) + + def test_covariance_expansion(self): + # Check that covariance can be specified with scalar or vector + num_rows = 4 + num_cols = 3 + M = 0.3 * np.ones((num_rows,num_cols)) + Uv = 0.2*np.ones(num_rows) + Us = 0.2 + Vv = 0.1*np.ones(num_cols) + Vs = 0.1 + + Ir = np.identity(num_rows) + Ic = np.identity(num_cols) + + assert_equal(matrix_normal(mean=M, rowcov=Uv, colcov=Vv).rowcov, + 0.2*Ir) + assert_equal(matrix_normal(mean=M, rowcov=Uv, colcov=Vv).colcov, + 0.1*Ic) + assert_equal(matrix_normal(mean=M, rowcov=Us, colcov=Vs).rowcov, + 0.2*Ir) + assert_equal(matrix_normal(mean=M, rowcov=Us, colcov=Vs).colcov, + 0.1*Ic) + + def test_frozen_matrix_normal(self): + for i in range(1,5): + for j in range(1,5): + M = 0.3 * np.ones((i,j)) + U = 0.5 * np.identity(i) + 0.5 * np.ones((i,i)) + V = 0.7 * np.identity(j) + 0.3 * np.ones((j,j)) + + frozen = matrix_normal(mean=M, rowcov=U, colcov=V) + + rvs1 = frozen.rvs(random_state=1234) + rvs2 = matrix_normal.rvs(mean=M, rowcov=U, colcov=V, + random_state=1234) + assert_equal(rvs1, rvs2) + + X = frozen.rvs(random_state=1234) + + pdf1 = frozen.pdf(X) + pdf2 = matrix_normal.pdf(X, mean=M, rowcov=U, colcov=V) + assert_equal(pdf1, pdf2) + + logpdf1 = frozen.logpdf(X) + logpdf2 = matrix_normal.logpdf(X, mean=M, rowcov=U, colcov=V) + assert_equal(logpdf1, logpdf2) + + def test_matches_multivariate(self): + # Check that the pdfs match those obtained by vectorising and + # treating as a multivariate normal. + for i in range(1,5): + for j in range(1,5): + M = 0.3 * np.ones((i,j)) + U = 0.5 * np.identity(i) + 0.5 * np.ones((i,i)) + V = 0.7 * np.identity(j) + 0.3 * np.ones((j,j)) + + frozen = matrix_normal(mean=M, rowcov=U, colcov=V) + X = frozen.rvs(random_state=1234) + pdf1 = frozen.pdf(X) + logpdf1 = frozen.logpdf(X) + + vecX = X.T.flatten() + vecM = M.T.flatten() + cov = np.kron(V,U) + pdf2 = multivariate_normal.pdf(vecX, mean=vecM, cov=cov) + logpdf2 = multivariate_normal.logpdf(vecX, mean=vecM, cov=cov) + + assert_allclose(pdf1, pdf2, rtol=1E-10) + assert_allclose(logpdf1, logpdf2, rtol=1E-10) + + def test_array_input(self): + # Check array of inputs has the same output as the separate entries. + num_rows = 4 + num_cols = 3 + M = 0.3 * np.ones((num_rows,num_cols)) + U = 0.5 * np.identity(num_rows) + 0.5 * np.ones((num_rows, num_rows)) + V = 0.7 * np.identity(num_cols) + 0.3 * np.ones((num_cols, num_cols)) + N = 10 + + frozen = matrix_normal(mean=M, rowcov=U, colcov=V) + X1 = frozen.rvs(size=N, random_state=1234) + X2 = frozen.rvs(size=N, random_state=4321) + X = np.concatenate((X1[np.newaxis,:,:,:],X2[np.newaxis,:,:,:]), axis=0) + assert_equal(X.shape, (2, N, num_rows, num_cols)) + + array_logpdf = frozen.logpdf(X) + assert_equal(array_logpdf.shape, (2, N)) + for i in range(2): + for j in range(N): + separate_logpdf = matrix_normal.logpdf(X[i,j], mean=M, + rowcov=U, colcov=V) + assert_allclose(separate_logpdf, array_logpdf[i,j], 1E-10) + + def test_moments(self): + # Check that the sample moments match the parameters + num_rows = 4 + num_cols = 3 + M = 0.3 * np.ones((num_rows,num_cols)) + U = 0.5 * np.identity(num_rows) + 0.5 * np.ones((num_rows, num_rows)) + V = 0.7 * np.identity(num_cols) + 0.3 * np.ones((num_cols, num_cols)) + N = 1000 + + frozen = matrix_normal(mean=M, rowcov=U, colcov=V) + X = frozen.rvs(size=N, random_state=1234) + + sample_mean = np.mean(X,axis=0) + assert_allclose(sample_mean, M, atol=0.1) + + sample_colcov = np.cov(X.reshape(N*num_rows,num_cols).T) + assert_allclose(sample_colcov, V, atol=0.1) + + sample_rowcov = np.cov(np.swapaxes(X,1,2).reshape( + N*num_cols,num_rows).T) + assert_allclose(sample_rowcov, U, atol=0.1) + +class TestDirichlet(object): + + def test_frozen_dirichlet(self): + np.random.seed(2846) + + n = np.random.randint(1, 32) + alpha = np.random.uniform(10e-10, 100, n) + + d = dirichlet(alpha) + + assert_equal(d.var(), dirichlet.var(alpha)) + assert_equal(d.mean(), dirichlet.mean(alpha)) + assert_equal(d.entropy(), dirichlet.entropy(alpha)) + num_tests = 10 + for i in range(num_tests): + x = np.random.uniform(10e-10, 100, n) + x /= np.sum(x) + assert_equal(d.pdf(x[:-1]), dirichlet.pdf(x[:-1], alpha)) + assert_equal(d.logpdf(x[:-1]), dirichlet.logpdf(x[:-1], alpha)) + + def test_numpy_rvs_shape_compatibility(self): + np.random.seed(2846) + alpha = np.array([1.0, 2.0, 3.0]) + x = np.random.dirichlet(alpha, size=7) + assert_equal(x.shape, (7, 3)) + assert_raises(ValueError, dirichlet.pdf, x, alpha) + assert_raises(ValueError, dirichlet.logpdf, x, alpha) + dirichlet.pdf(x.T, alpha) + dirichlet.pdf(x.T[:-1], alpha) + dirichlet.logpdf(x.T, alpha) + dirichlet.logpdf(x.T[:-1], alpha) + + def test_alpha_with_zeros(self): + np.random.seed(2846) + alpha = [1.0, 0.0, 3.0] + # don't pass invalid alpha to np.random.dirichlet + x = np.random.dirichlet(np.maximum(1e-9, alpha), size=7).T + assert_raises(ValueError, dirichlet.pdf, x, alpha) + assert_raises(ValueError, dirichlet.logpdf, x, alpha) + + def test_alpha_with_negative_entries(self): + np.random.seed(2846) + alpha = [1.0, -2.0, 3.0] + # don't pass invalid alpha to np.random.dirichlet + x = np.random.dirichlet(np.maximum(1e-9, alpha), size=7).T + assert_raises(ValueError, dirichlet.pdf, x, alpha) + assert_raises(ValueError, dirichlet.logpdf, x, alpha) + + def test_data_with_zeros(self): + alpha = np.array([1.0, 2.0, 3.0, 4.0]) + x = np.array([0.1, 0.0, 0.2, 0.7]) + dirichlet.pdf(x, alpha) + dirichlet.logpdf(x, alpha) + alpha = np.array([1.0, 1.0, 1.0, 1.0]) + assert_almost_equal(dirichlet.pdf(x, alpha), 6) + assert_almost_equal(dirichlet.logpdf(x, alpha), np.log(6)) + + def test_data_with_zeros_and_small_alpha(self): + alpha = np.array([1.0, 0.5, 3.0, 4.0]) + x = np.array([0.1, 0.0, 0.2, 0.7]) + assert_raises(ValueError, dirichlet.pdf, x, alpha) + assert_raises(ValueError, dirichlet.logpdf, x, alpha) + + def test_data_with_negative_entries(self): + alpha = np.array([1.0, 2.0, 3.0, 4.0]) + x = np.array([0.1, -0.1, 0.3, 0.7]) + assert_raises(ValueError, dirichlet.pdf, x, alpha) + assert_raises(ValueError, dirichlet.logpdf, x, alpha) + + def test_data_with_too_large_entries(self): + alpha = np.array([1.0, 2.0, 3.0, 4.0]) + x = np.array([0.1, 1.1, 0.3, 0.7]) + assert_raises(ValueError, dirichlet.pdf, x, alpha) + assert_raises(ValueError, dirichlet.logpdf, x, alpha) + + def test_data_too_deep_c(self): + alpha = np.array([1.0, 2.0, 3.0]) + x = np.ones((2, 7, 7)) / 14 + assert_raises(ValueError, dirichlet.pdf, x, alpha) + assert_raises(ValueError, dirichlet.logpdf, x, alpha) + + def test_alpha_too_deep(self): + alpha = np.array([[1.0, 2.0], [3.0, 4.0]]) + x = np.ones((2, 2, 7)) / 4 + assert_raises(ValueError, dirichlet.pdf, x, alpha) + assert_raises(ValueError, dirichlet.logpdf, x, alpha) + + def test_alpha_correct_depth(self): + alpha = np.array([1.0, 2.0, 3.0]) + x = np.ones((3, 7)) / 3 + dirichlet.pdf(x, alpha) + dirichlet.logpdf(x, alpha) + + def test_non_simplex_data(self): + alpha = np.array([1.0, 2.0, 3.0]) + x = np.ones((3, 7)) / 2 + assert_raises(ValueError, dirichlet.pdf, x, alpha) + assert_raises(ValueError, dirichlet.logpdf, x, alpha) + + def test_data_vector_too_short(self): + alpha = np.array([1.0, 2.0, 3.0, 4.0]) + x = np.ones((2, 7)) / 2 + assert_raises(ValueError, dirichlet.pdf, x, alpha) + assert_raises(ValueError, dirichlet.logpdf, x, alpha) + + def test_data_vector_too_long(self): + alpha = np.array([1.0, 2.0, 3.0, 4.0]) + x = np.ones((5, 7)) / 5 + assert_raises(ValueError, dirichlet.pdf, x, alpha) + assert_raises(ValueError, dirichlet.logpdf, x, alpha) + + def test_mean_and_var(self): + alpha = np.array([1., 0.8, 0.2]) + d = dirichlet(alpha) + + expected_var = [1. / 12., 0.08, 0.03] + expected_mean = [0.5, 0.4, 0.1] + + assert_array_almost_equal(d.var(), expected_var) + assert_array_almost_equal(d.mean(), expected_mean) + + def test_scalar_values(self): + alpha = np.array([0.2]) + d = dirichlet(alpha) + + # For alpha of length 1, mean and var should be scalar instead of array + assert_equal(d.mean().ndim, 0) + assert_equal(d.var().ndim, 0) + + assert_equal(d.pdf([1.]).ndim, 0) + assert_equal(d.logpdf([1.]).ndim, 0) + + def test_K_and_K_minus_1_calls_equal(self): + # Test that calls with K and K-1 entries yield the same results. + + np.random.seed(2846) + + n = np.random.randint(1, 32) + alpha = np.random.uniform(10e-10, 100, n) + + d = dirichlet(alpha) + num_tests = 10 + for i in range(num_tests): + x = np.random.uniform(10e-10, 100, n) + x /= np.sum(x) + assert_almost_equal(d.pdf(x[:-1]), d.pdf(x)) + + def test_multiple_entry_calls(self): + # Test that calls with multiple x vectors as matrix work + np.random.seed(2846) + + n = np.random.randint(1, 32) + alpha = np.random.uniform(10e-10, 100, n) + d = dirichlet(alpha) + + num_tests = 10 + num_multiple = 5 + xm = None + for i in range(num_tests): + for m in range(num_multiple): + x = np.random.uniform(10e-10, 100, n) + x /= np.sum(x) + if xm is not None: + xm = np.vstack((xm, x)) + else: + xm = x + rm = d.pdf(xm.T) + rs = None + for xs in xm: + r = d.pdf(xs) + if rs is not None: + rs = np.append(rs, r) + else: + rs = r + assert_array_almost_equal(rm, rs) + + def test_2D_dirichlet_is_beta(self): + np.random.seed(2846) + + alpha = np.random.uniform(10e-10, 100, 2) + d = dirichlet(alpha) + b = beta(alpha[0], alpha[1]) + + num_tests = 10 + for i in range(num_tests): + x = np.random.uniform(10e-10, 100, 2) + x /= np.sum(x) + assert_almost_equal(b.pdf(x), d.pdf([x])) + + assert_almost_equal(b.mean(), d.mean()[0]) + assert_almost_equal(b.var(), d.var()[0]) + + +def test_multivariate_normal_dimensions_mismatch(): + # Regression test for GH #3493. Check that setting up a PDF with a mean of + # length M and a covariance matrix of size (N, N), where M != N, raises a + # ValueError with an informative error message. + mu = np.array([0.0, 0.0]) + sigma = np.array([[1.0]]) + + assert_raises(ValueError, multivariate_normal, mu, sigma) + + # A simple check that the right error message was passed along. Checking + # that the entire message is there, word for word, would be somewhat + # fragile, so we just check for the leading part. + try: + multivariate_normal(mu, sigma) + except ValueError as e: + msg = "Dimension mismatch" + assert_equal(str(e)[:len(msg)], msg) + + +class TestWishart(object): + def test_scale_dimensions(self): + # Test that we can call the Wishart with various scale dimensions + + # Test case: dim=1, scale=1 + true_scale = np.array(1, ndmin=2) + scales = [ + 1, # scalar + [1], # iterable + np.array(1), # 0-dim + np.r_[1], # 1-dim + np.array(1, ndmin=2) # 2-dim + ] + for scale in scales: + w = wishart(1, scale) + assert_equal(w.scale, true_scale) + assert_equal(w.scale.shape, true_scale.shape) + + # Test case: dim=2, scale=[[1,0] + # [0,2] + true_scale = np.array([[1,0], + [0,2]]) + scales = [ + [1,2], # iterable + np.r_[1,2], # 1-dim + np.array([[1,0], # 2-dim + [0,2]]) + ] + for scale in scales: + w = wishart(2, scale) + assert_equal(w.scale, true_scale) + assert_equal(w.scale.shape, true_scale.shape) + + # We cannot call with a df < dim + assert_raises(ValueError, wishart, 1, np.eye(2)) + + # We cannot call with a 3-dimension array + scale = np.array(1, ndmin=3) + assert_raises(ValueError, wishart, 1, scale) + + def test_quantile_dimensions(self): + # Test that we can call the Wishart rvs with various quantile dimensions + + # If dim == 1, consider x.shape = [1,1,1] + X = [ + 1, # scalar + [1], # iterable + np.array(1), # 0-dim + np.r_[1], # 1-dim + np.array(1, ndmin=2), # 2-dim + np.array([1], ndmin=3) # 3-dim + ] + + w = wishart(1,1) + density = w.pdf(np.array(1, ndmin=3)) + for x in X: + assert_equal(w.pdf(x), density) + + # If dim == 1, consider x.shape = [1,1,*] + X = [ + [1,2,3], # iterable + np.r_[1,2,3], # 1-dim + np.array([1,2,3], ndmin=3) # 3-dim + ] + + w = wishart(1,1) + density = w.pdf(np.array([1,2,3], ndmin=3)) + for x in X: + assert_equal(w.pdf(x), density) + + # If dim == 2, consider x.shape = [2,2,1] + # where x[:,:,*] = np.eye(1)*2 + X = [ + 2, # scalar + [2,2], # iterable + np.array(2), # 0-dim + np.r_[2,2], # 1-dim + np.array([[2,0], + [0,2]]), # 2-dim + np.array([[2,0], + [0,2]])[:,:,np.newaxis] # 3-dim + ] + + w = wishart(2,np.eye(2)) + density = w.pdf(np.array([[2,0], + [0,2]])[:,:,np.newaxis]) + for x in X: + assert_equal(w.pdf(x), density) + + def test_frozen(self): + # Test that the frozen and non-frozen Wishart gives the same answers + + # Construct an arbitrary positive definite scale matrix + dim = 4 + scale = np.diag(np.arange(dim)+1) + scale[np.tril_indices(dim, k=-1)] = np.arange(dim * (dim-1) // 2) + scale = np.dot(scale.T, scale) + + # Construct a collection of positive definite matrices to test the PDF + X = [] + for i in range(5): + x = np.diag(np.arange(dim)+(i+1)**2) + x[np.tril_indices(dim, k=-1)] = np.arange(dim * (dim-1) // 2) + x = np.dot(x.T, x) + X.append(x) + X = np.array(X).T + + # Construct a 1D and 2D set of parameters + parameters = [ + (10, 1, np.linspace(0.1, 10, 5)), # 1D case + (10, scale, X) + ] + + for (df, scale, x) in parameters: + w = wishart(df, scale) + assert_equal(w.var(), wishart.var(df, scale)) + assert_equal(w.mean(), wishart.mean(df, scale)) + assert_equal(w.mode(), wishart.mode(df, scale)) + assert_equal(w.entropy(), wishart.entropy(df, scale)) + assert_equal(w.pdf(x), wishart.pdf(x, df, scale)) + + def test_1D_is_chisquared(self): + # The 1-dimensional Wishart with an identity scale matrix is just a + # chi-squared distribution. + # Test variance, mean, entropy, pdf + # Kolgomorov-Smirnov test for rvs + np.random.seed(482974) + + sn = 500 + dim = 1 + scale = np.eye(dim) + + df_range = np.arange(1, 10, 2, dtype=float) + X = np.linspace(0.1,10,num=10) + for df in df_range: + w = wishart(df, scale) + c = chi2(df) + + # Statistics + assert_allclose(w.var(), c.var()) + assert_allclose(w.mean(), c.mean()) + assert_allclose(w.entropy(), c.entropy()) + + # PDF + assert_allclose(w.pdf(X), c.pdf(X)) + + # rvs + rvs = w.rvs(size=sn) + args = (df,) + alpha = 0.01 + check_distribution_rvs('chi2', args, alpha, rvs) + + def test_is_scaled_chisquared(self): + # The 2-dimensional Wishart with an arbitrary scale matrix can be + # transformed to a scaled chi-squared distribution. + # For :math:`S \sim W_p(V,n)` and :math:`\lambda \in \mathbb{R}^p` we have + # :math:`\lambda' S \lambda \sim \lambda' V \lambda \times \chi^2(n)` + np.random.seed(482974) + + sn = 500 + df = 10 + dim = 4 + # Construct an arbitrary positive definite matrix + scale = np.diag(np.arange(4)+1) + scale[np.tril_indices(4, k=-1)] = np.arange(6) + scale = np.dot(scale.T, scale) + # Use :math:`\lambda = [1, \dots, 1]'` + lamda = np.ones((dim,1)) + sigma_lamda = lamda.T.dot(scale).dot(lamda).squeeze() + w = wishart(df, sigma_lamda) + c = chi2(df, scale=sigma_lamda) + + # Statistics + assert_allclose(w.var(), c.var()) + assert_allclose(w.mean(), c.mean()) + assert_allclose(w.entropy(), c.entropy()) + + # PDF + X = np.linspace(0.1,10,num=10) + assert_allclose(w.pdf(X), c.pdf(X)) + + # rvs + rvs = w.rvs(size=sn) + args = (df,0,sigma_lamda) + alpha = 0.01 + check_distribution_rvs('chi2', args, alpha, rvs) + +class TestMultinomial(object): + def test_logpmf(self): + vals1 = multinomial.logpmf((3,4), 7, (0.3, 0.7)) + assert_allclose(vals1, -1.483270127243324, rtol=1e-8) + + vals2 = multinomial.logpmf([3, 4], 0, [.3, .7]) + assert_allclose(vals2, np.NAN, rtol=1e-8) + + vals3 = multinomial.logpmf([3, 4], 0, [-2, 3]) + assert_allclose(vals3, np.NAN, rtol=1e-8) + + def test_reduces_binomial(self): + # test that the multinomial pmf reduces to the binomial pmf in the 2d + # case + val1 = multinomial.logpmf((3, 4), 7, (0.3, 0.7)) + val2 = binom.logpmf(3, 7, 0.3) + assert_allclose(val1, val2, rtol=1e-8) + + val1 = multinomial.pmf((6, 8), 14, (0.1, 0.9)) + val2 = binom.pmf(6, 14, 0.1) + assert_allclose(val1, val2, rtol=1e-8) + + def test_R(self): + # test against the values produced by this R code + # (https://stat.ethz.ch/R-manual/R-devel/library/stats/html/Multinom.html) + # X <- t(as.matrix(expand.grid(0:3, 0:3))); X <- X[, colSums(X) <= 3] + # X <- rbind(X, 3:3 - colSums(X)); dimnames(X) <- list(letters[1:3], NULL) + # X + # apply(X, 2, function(x) dmultinom(x, prob = c(1,2,5))) + + n, p = 3, [1./8, 2./8, 5./8] + r_vals = {(0, 0, 3): 0.244140625, (1, 0, 2): 0.146484375, + (2, 0, 1): 0.029296875, (3, 0, 0): 0.001953125, + (0, 1, 2): 0.292968750, (1, 1, 1): 0.117187500, + (2, 1, 0): 0.011718750, (0, 2, 1): 0.117187500, + (1, 2, 0): 0.023437500, (0, 3, 0): 0.015625000} + for x in r_vals: + assert_allclose(multinomial.pmf(x, n, p), r_vals[x], atol=1e-14) + + def test_rvs_np(self): + # test that .rvs agrees w/numpy + sc_rvs = multinomial.rvs(3, [1/4.]*3, size=7, random_state=123) + rndm = np.random.RandomState(123) + np_rvs = rndm.multinomial(3, [1/4.]*3, size=7) + assert_equal(sc_rvs, np_rvs) + + def test_pmf(self): + vals0 = multinomial.pmf((5,), 5, (1,)) + assert_allclose(vals0, 1, rtol=1e-8) + + vals1 = multinomial.pmf((3,4), 7, (.3, .7)) + assert_allclose(vals1, .22689449999999994, rtol=1e-8) + + vals2 = multinomial.pmf([[[3,5],[0,8]], [[-1, 9], [1, 1]]], 8, + (.1, .9)) + assert_allclose(vals2, [[.03306744, .43046721], [0, 0]], rtol=1e-8) + + x = np.empty((0,2), dtype=np.float64) + vals3 = multinomial.pmf(x, 4, (.3, .7)) + assert_equal(vals3, np.empty([], dtype=np.float64)) + + vals4 = multinomial.pmf([1,2], 4, (.3, .7)) + assert_allclose(vals4, 0, rtol=1e-8) + + vals5 = multinomial.pmf([3, 3, 0], 6, [2/3.0, 1/3.0, 0]) + assert_allclose(vals5, 0.219478737997, rtol=1e-8) + + def test_pmf_broadcasting(self): + vals0 = multinomial.pmf([1, 2], 3, [[.1, .9], [.2, .8]]) + assert_allclose(vals0, [.243, .384], rtol=1e-8) + + vals1 = multinomial.pmf([1, 2], [3, 4], [.1, .9]) + assert_allclose(vals1, [.243, 0], rtol=1e-8) + + vals2 = multinomial.pmf([[[1, 2], [1, 1]]], 3, [.1, .9]) + assert_allclose(vals2, [[.243, 0]], rtol=1e-8) + + vals3 = multinomial.pmf([1, 2], [[[3], [4]]], [.1, .9]) + assert_allclose(vals3, [[[.243], [0]]], rtol=1e-8) + + vals4 = multinomial.pmf([[1, 2], [1,1]], [[[[3]]]], [.1, .9]) + assert_allclose(vals4, [[[[.243, 0]]]], rtol=1e-8) + + def test_cov(self): + cov1 = multinomial.cov(5, (.2, .3, .5)) + cov2 = [[5*.2*.8, -5*.2*.3, -5*.2*.5], + [-5*.3*.2, 5*.3*.7, -5*.3*.5], + [-5*.5*.2, -5*.5*.3, 5*.5*.5]] + assert_allclose(cov1, cov2, rtol=1e-8) + + def test_cov_broadcasting(self): + cov1 = multinomial.cov(5, [[.1, .9], [.2, .8]]) + cov2 = [[[.45, -.45],[-.45, .45]], [[.8, -.8], [-.8, .8]]] + assert_allclose(cov1, cov2, rtol=1e-8) + + cov3 = multinomial.cov([4, 5], [.1, .9]) + cov4 = [[[.36, -.36], [-.36, .36]], [[.45, -.45], [-.45, .45]]] + assert_allclose(cov3, cov4, rtol=1e-8) + + cov5 = multinomial.cov([4, 5], [[.3, .7], [.4, .6]]) + cov6 = [[[4*.3*.7, -4*.3*.7], [-4*.3*.7, 4*.3*.7]], + [[5*.4*.6, -5*.4*.6], [-5*.4*.6, 5*.4*.6]]] + assert_allclose(cov5, cov6, rtol=1e-8) + + def test_entropy(self): + # this is equivalent to a binomial distribution with n=2, so the + # entropy .77899774929 is easily computed "by hand" + ent0 = multinomial.entropy(2, [.2, .8]) + assert_allclose(ent0, binom.entropy(2, .2), rtol=1e-8) + + def test_entropy_broadcasting(self): + ent0 = multinomial.entropy([2, 3], [.2, .3]) + assert_allclose(ent0, [binom.entropy(2, .2), binom.entropy(3, .2)], + rtol=1e-8) + + ent1 = multinomial.entropy([7, 8], [[.3, .7], [.4, .6]]) + assert_allclose(ent1, [binom.entropy(7, .3), binom.entropy(8, .4)], + rtol=1e-8) + + ent2 = multinomial.entropy([[7], [8]], [[.3, .7], [.4, .6]]) + assert_allclose(ent2, + [[binom.entropy(7, .3), binom.entropy(7, .4)], + [binom.entropy(8, .3), binom.entropy(8, .4)]], + rtol=1e-8) + + def test_mean(self): + mean1 = multinomial.mean(5, [.2, .8]) + assert_allclose(mean1, [5*.2, 5*.8], rtol=1e-8) + + def test_mean_broadcasting(self): + mean1 = multinomial.mean([5, 6], [.2, .8]) + assert_allclose(mean1, [[5*.2, 5*.8], [6*.2, 6*.8]], rtol=1e-8) + + def test_frozen(self): + # The frozen distribution should agree with the regular one + np.random.seed(1234) + n = 12 + pvals = (.1, .2, .3, .4) + x = [[0,0,0,12],[0,0,1,11],[0,1,1,10],[1,1,1,9],[1,1,2,8]] + x = np.asarray(x, dtype=np.float64) + mn_frozen = multinomial(n, pvals) + assert_allclose(mn_frozen.pmf(x), multinomial.pmf(x, n, pvals)) + assert_allclose(mn_frozen.logpmf(x), multinomial.logpmf(x, n, pvals)) + assert_allclose(mn_frozen.entropy(), multinomial.entropy(n, pvals)) + +class TestInvwishart(object): + def test_frozen(self): + # Test that the frozen and non-frozen inverse Wishart gives the same + # answers + + # Construct an arbitrary positive definite scale matrix + dim = 4 + scale = np.diag(np.arange(dim)+1) + scale[np.tril_indices(dim, k=-1)] = np.arange(dim*(dim-1)/2) + scale = np.dot(scale.T, scale) + + # Construct a collection of positive definite matrices to test the PDF + X = [] + for i in range(5): + x = np.diag(np.arange(dim)+(i+1)**2) + x[np.tril_indices(dim, k=-1)] = np.arange(dim*(dim-1)/2) + x = np.dot(x.T, x) + X.append(x) + X = np.array(X).T + + # Construct a 1D and 2D set of parameters + parameters = [ + (10, 1, np.linspace(0.1, 10, 5)), # 1D case + (10, scale, X) + ] + + for (df, scale, x) in parameters: + iw = invwishart(df, scale) + assert_equal(iw.var(), invwishart.var(df, scale)) + assert_equal(iw.mean(), invwishart.mean(df, scale)) + assert_equal(iw.mode(), invwishart.mode(df, scale)) + assert_allclose(iw.pdf(x), invwishart.pdf(x, df, scale)) + + def test_1D_is_invgamma(self): + # The 1-dimensional inverse Wishart with an identity scale matrix is + # just an inverse gamma distribution. + # Test variance, mean, pdf + # Kolgomorov-Smirnov test for rvs + np.random.seed(482974) + + sn = 500 + dim = 1 + scale = np.eye(dim) + + df_range = np.arange(5, 20, 2, dtype=float) + X = np.linspace(0.1,10,num=10) + for df in df_range: + iw = invwishart(df, scale) + ig = invgamma(df/2, scale=1./2) + + # Statistics + assert_allclose(iw.var(), ig.var()) + assert_allclose(iw.mean(), ig.mean()) + + # PDF + assert_allclose(iw.pdf(X), ig.pdf(X)) + + # rvs + rvs = iw.rvs(size=sn) + args = (df/2, 0, 1./2) + alpha = 0.01 + check_distribution_rvs('invgamma', args, alpha, rvs) + + def test_wishart_invwishart_2D_rvs(self): + dim = 3 + df = 10 + + # Construct a simple non-diagonal positive definite matrix + scale = np.eye(dim) + scale[0,1] = 0.5 + scale[1,0] = 0.5 + + # Construct frozen Wishart and inverse Wishart random variables + w = wishart(df, scale) + iw = invwishart(df, scale) + + # Get the generated random variables from a known seed + np.random.seed(248042) + w_rvs = wishart.rvs(df, scale) + np.random.seed(248042) + frozen_w_rvs = w.rvs() + np.random.seed(248042) + iw_rvs = invwishart.rvs(df, scale) + np.random.seed(248042) + frozen_iw_rvs = iw.rvs() + + # Manually calculate what it should be, based on the Bartlett (1933) + # decomposition of a Wishart into D A A' D', where D is the Cholesky + # factorization of the scale matrix and A is the lower triangular matrix + # with the square root of chi^2 variates on the diagonal and N(0,1) + # variates in the lower triangle. + np.random.seed(248042) + covariances = np.random.normal(size=3) + variances = np.r_[ + np.random.chisquare(df), + np.random.chisquare(df-1), + np.random.chisquare(df-2), + ]**0.5 + + # Construct the lower-triangular A matrix + A = np.diag(variances) + A[np.tril_indices(dim, k=-1)] = covariances + + # Wishart random variate + D = np.linalg.cholesky(scale) + DA = D.dot(A) + manual_w_rvs = np.dot(DA, DA.T) + + # inverse Wishart random variate + # Supposing that the inverse wishart has scale matrix `scale`, then the + # random variate is the inverse of a random variate drawn from a Wishart + # distribution with scale matrix `inv_scale = np.linalg.inv(scale)` + iD = np.linalg.cholesky(np.linalg.inv(scale)) + iDA = iD.dot(A) + manual_iw_rvs = np.linalg.inv(np.dot(iDA, iDA.T)) + + # Test for equality + assert_allclose(w_rvs, manual_w_rvs) + assert_allclose(frozen_w_rvs, manual_w_rvs) + assert_allclose(iw_rvs, manual_iw_rvs) + assert_allclose(frozen_iw_rvs, manual_iw_rvs) + + def test_cho_inv_batch(self): + """Regression test for gh-8844.""" + a0 = np.array([[2, 1, 0, 0.5], + [1, 2, 0.5, 0.5], + [0, 0.5, 3, 1], + [0.5, 0.5, 1, 2]]) + a1 = np.array([[2, -1, 0, 0.5], + [-1, 2, 0.5, 0.5], + [0, 0.5, 3, 1], + [0.5, 0.5, 1, 4]]) + a = np.array([a0, a1]) + ainv = a.copy() + _cho_inv_batch(ainv) + ident = np.eye(4) + assert_allclose(a[0].dot(ainv[0]), ident, atol=1e-15) + assert_allclose(a[1].dot(ainv[1]), ident, atol=1e-15) + + def test_logpdf_4x4(self): + """Regression test for gh-8844.""" + X = np.array([[2, 1, 0, 0.5], + [1, 2, 0.5, 0.5], + [0, 0.5, 3, 1], + [0.5, 0.5, 1, 2]]) + Psi = np.array([[9, 7, 3, 1], + [7, 9, 5, 1], + [3, 5, 8, 2], + [1, 1, 2, 9]]) + nu = 6 + prob = invwishart.logpdf(X, nu, Psi) + # Explicit calculation from the formula on wikipedia. + p = X.shape[0] + sig, logdetX = np.linalg.slogdet(X) + sig, logdetPsi = np.linalg.slogdet(Psi) + M = np.linalg.solve(X, Psi) + expected = ((nu/2)*logdetPsi + - (nu*p/2)*np.log(2) + - multigammaln(nu/2, p) + - (nu + p + 1)/2*logdetX + - 0.5*M.trace()) + assert_allclose(prob, expected) + + +class TestSpecialOrthoGroup(object): + def test_reproducibility(self): + np.random.seed(514) + x = special_ortho_group.rvs(3) + expected = np.array([[-0.99394515, -0.04527879, 0.10011432], + [0.04821555, -0.99846897, 0.02711042], + [0.09873351, 0.03177334, 0.99460653]]) + assert_array_almost_equal(x, expected) + + random_state = np.random.RandomState(seed=514) + x = special_ortho_group.rvs(3, random_state=random_state) + assert_array_almost_equal(x, expected) + + def test_invalid_dim(self): + assert_raises(ValueError, special_ortho_group.rvs, None) + assert_raises(ValueError, special_ortho_group.rvs, (2, 2)) + assert_raises(ValueError, special_ortho_group.rvs, 1) + assert_raises(ValueError, special_ortho_group.rvs, 2.5) + + def test_frozen_matrix(self): + dim = 7 + frozen = special_ortho_group(dim) + + rvs1 = frozen.rvs(random_state=1234) + rvs2 = special_ortho_group.rvs(dim, random_state=1234) + + assert_equal(rvs1, rvs2) + + def test_det_and_ortho(self): + xs = [special_ortho_group.rvs(dim) + for dim in range(2,12) + for i in range(3)] + + # Test that determinants are always +1 + dets = [np.linalg.det(x) for x in xs] + assert_allclose(dets, [1.]*30, rtol=1e-13) + + # Test that these are orthogonal matrices + for x in xs: + assert_array_almost_equal(np.dot(x, x.T), + np.eye(x.shape[0])) + + def test_haar(self): + # Test that the distribution is constant under rotation + # Every column should have the same distribution + # Additionally, the distribution should be invariant under another rotation + + # Generate samples + dim = 5 + samples = 1000 # Not too many, or the test takes too long + ks_prob = .05 + np.random.seed(514) + xs = special_ortho_group.rvs(dim, size=samples) + + # Dot a few rows (0, 1, 2) with unit vectors (0, 2, 4, 3), + # effectively picking off entries in the matrices of xs. + # These projections should all have the same disribution, + # establishing rotational invariance. We use the two-sided + # KS test to confirm this. + # We could instead test that angles between random vectors + # are uniformly distributed, but the below is sufficient. + # It is not feasible to consider all pairs, so pick a few. + els = ((0,0), (0,2), (1,4), (2,3)) + #proj = {(er, ec): [x[er][ec] for x in xs] for er, ec in els} + proj = dict(((er, ec), sorted([x[er][ec] for x in xs])) for er, ec in els) + pairs = [(e0, e1) for e0 in els for e1 in els if e0 > e1] + ks_tests = [ks_2samp(proj[p0], proj[p1])[1] for (p0, p1) in pairs] + assert_array_less([ks_prob]*len(pairs), ks_tests) + +class TestOrthoGroup(object): + def test_reproducibility(self): + np.random.seed(515) + x = ortho_group.rvs(3) + x2 = ortho_group.rvs(3, random_state=515) + # Note this matrix has det -1, distinguishing O(N) from SO(N) + assert_almost_equal(np.linalg.det(x), -1) + expected = np.array([[0.94449759, -0.21678569, -0.24683651], + [-0.13147569, -0.93800245, 0.3207266], + [0.30106219, 0.27047251, 0.9144431]]) + assert_array_almost_equal(x, expected) + assert_array_almost_equal(x2, expected) + + def test_invalid_dim(self): + assert_raises(ValueError, ortho_group.rvs, None) + assert_raises(ValueError, ortho_group.rvs, (2, 2)) + assert_raises(ValueError, ortho_group.rvs, 1) + assert_raises(ValueError, ortho_group.rvs, 2.5) + + def test_det_and_ortho(self): + xs = [[ortho_group.rvs(dim) + for i in range(10)] + for dim in range(2,12)] + + # Test that abs determinants are always +1 + dets = np.array([[np.linalg.det(x) for x in xx] for xx in xs]) + assert_allclose(np.fabs(dets), np.ones(dets.shape), rtol=1e-13) + + # Test that we get both positive and negative determinants + # Check that we have at least one and less than 10 negative dets in a sample of 10. The rest are positive by the previous test. + # Test each dimension separately + assert_array_less([0]*10, [np.nonzero(d < 0)[0].shape[0] for d in dets]) + assert_array_less([np.nonzero(d < 0)[0].shape[0] for d in dets], [10]*10) + + # Test that these are orthogonal matrices + for xx in xs: + for x in xx: + assert_array_almost_equal(np.dot(x, x.T), + np.eye(x.shape[0])) + + def test_haar(self): + # Test that the distribution is constant under rotation + # Every column should have the same distribution + # Additionally, the distribution should be invariant under another rotation + + # Generate samples + dim = 5 + samples = 1000 # Not too many, or the test takes too long + ks_prob = .05 + np.random.seed(518) # Note that the test is sensitive to seed too + xs = ortho_group.rvs(dim, size=samples) + + # Dot a few rows (0, 1, 2) with unit vectors (0, 2, 4, 3), + # effectively picking off entries in the matrices of xs. + # These projections should all have the same disribution, + # establishing rotational invariance. We use the two-sided + # KS test to confirm this. + # We could instead test that angles between random vectors + # are uniformly distributed, but the below is sufficient. + # It is not feasible to consider all pairs, so pick a few. + els = ((0,0), (0,2), (1,4), (2,3)) + #proj = {(er, ec): [x[er][ec] for x in xs] for er, ec in els} + proj = dict(((er, ec), sorted([x[er][ec] for x in xs])) for er, ec in els) + pairs = [(e0, e1) for e0 in els for e1 in els if e0 > e1] + ks_tests = [ks_2samp(proj[p0], proj[p1])[1] for (p0, p1) in pairs] + assert_array_less([ks_prob]*len(pairs), ks_tests) + + @pytest.mark.slow + def test_pairwise_distances(self): + # Test that the distribution of pairwise distances is close to correct. + np.random.seed(514) + + def random_ortho(dim): + u, _s, v = np.linalg.svd(np.random.normal(size=(dim, dim))) + return np.dot(u, v) + + for dim in range(2, 6): + def generate_test_statistics(rvs, N=1000, eps=1e-10): + stats = np.array([ + np.sum((rvs(dim=dim) - rvs(dim=dim))**2) + for _ in range(N) + ]) + # Add a bit of noise to account for numeric accuracy. + stats += np.random.uniform(-eps, eps, size=stats.shape) + return stats + + expected = generate_test_statistics(random_ortho) + actual = generate_test_statistics(scipy.stats.ortho_group.rvs) + + _D, p = scipy.stats.ks_2samp(expected, actual) + + assert_array_less(.05, p) + +class TestRandomCorrelation(object): + def test_reproducibility(self): + np.random.seed(514) + eigs = (.5, .8, 1.2, 1.5) + x = random_correlation.rvs((.5, .8, 1.2, 1.5)) + x2 = random_correlation.rvs((.5, .8, 1.2, 1.5), random_state=514) + expected = np.array([[1., -0.20387311, 0.18366501, -0.04953711], + [-0.20387311, 1., -0.24351129, 0.06703474], + [0.18366501, -0.24351129, 1., 0.38530195], + [-0.04953711, 0.06703474, 0.38530195, 1.]]) + assert_array_almost_equal(x, expected) + assert_array_almost_equal(x2, expected) + + def test_invalid_eigs(self): + assert_raises(ValueError, random_correlation.rvs, None) + assert_raises(ValueError, random_correlation.rvs, 'test') + assert_raises(ValueError, random_correlation.rvs, 2.5) + assert_raises(ValueError, random_correlation.rvs, [2.5]) + assert_raises(ValueError, random_correlation.rvs, [[1,2],[3,4]]) + assert_raises(ValueError, random_correlation.rvs, [2.5, -.5]) + assert_raises(ValueError, random_correlation.rvs, [1, 2, .1]) + + def test_definition(self): + # Test the definition of a correlation matrix in several dimensions: + # + # 1. Det is product of eigenvalues (and positive by construction + # in examples) + # 2. 1's on diagonal + # 3. Matrix is symmetric + + def norm(i, e): + return i*e/sum(e) + + np.random.seed(123) + + eigs = [norm(i, np.random.uniform(size=i)) for i in range(2, 6)] + eigs.append([4,0,0,0]) + + ones = [[1.]*len(e) for e in eigs] + xs = [random_correlation.rvs(e) for e in eigs] + + # Test that determinants are products of eigenvalues + # These are positive by construction + # Could also test that the eigenvalues themselves are correct, + # but this seems sufficient. + dets = [np.fabs(np.linalg.det(x)) for x in xs] + dets_known = [np.prod(e) for e in eigs] + assert_allclose(dets, dets_known, rtol=1e-13, atol=1e-13) + + # Test for 1's on the diagonal + diags = [np.diag(x) for x in xs] + for a, b in zip(diags, ones): + assert_allclose(a, b, rtol=1e-13) + + # Correlation matrices are symmetric + for x in xs: + assert_allclose(x, x.T, rtol=1e-13) + + def test_to_corr(self): + # Check some corner cases in to_corr + + # ajj == 1 + m = np.array([[0.1, 0], [0, 1]], dtype=float) + m = random_correlation._to_corr(m) + assert_allclose(m, np.array([[1, 0], [0, 0.1]])) + + # Floating point overflow; fails to compute the correct + # rotation, but should still produce some valid rotation + # rather than infs/nans + with np.errstate(over='ignore'): + g = np.array([[0, 1], [-1, 0]]) + + m0 = np.array([[1e300, 0], [0, np.nextafter(1, 0)]], dtype=float) + m = random_correlation._to_corr(m0.copy()) + assert_allclose(m, g.T.dot(m0).dot(g)) + + m0 = np.array([[0.9, 1e300], [1e300, 1.1]], dtype=float) + m = random_correlation._to_corr(m0.copy()) + assert_allclose(m, g.T.dot(m0).dot(g)) + + # Zero discriminant; should set the first diag entry to 1 + m0 = np.array([[2, 1], [1, 2]], dtype=float) + m = random_correlation._to_corr(m0.copy()) + assert_allclose(m[0,0], 1) + + # Slightly negative discriminant; should be approx correct still + m0 = np.array([[2 + 1e-7, 1], [1, 2]], dtype=float) + m = random_correlation._to_corr(m0.copy()) + assert_allclose(m[0,0], 1) + + +class TestUnitaryGroup(object): + def test_reproducibility(self): + np.random.seed(514) + x = unitary_group.rvs(3) + x2 = unitary_group.rvs(3, random_state=514) + + expected = np.array([[0.308771+0.360312j, 0.044021+0.622082j, 0.160327+0.600173j], + [0.732757+0.297107j, 0.076692-0.4614j, -0.394349+0.022613j], + [-0.148844+0.357037j, -0.284602-0.557949j, 0.607051+0.299257j]]) + + assert_array_almost_equal(x, expected) + assert_array_almost_equal(x2, expected) + + def test_invalid_dim(self): + assert_raises(ValueError, unitary_group.rvs, None) + assert_raises(ValueError, unitary_group.rvs, (2, 2)) + assert_raises(ValueError, unitary_group.rvs, 1) + assert_raises(ValueError, unitary_group.rvs, 2.5) + + def test_unitarity(self): + xs = [unitary_group.rvs(dim) + for dim in range(2,12) + for i in range(3)] + + # Test that these are unitary matrices + for x in xs: + assert_allclose(np.dot(x, x.conj().T), np.eye(x.shape[0]), atol=1e-15) + + def test_haar(self): + # Test that the eigenvalues, which lie on the unit circle in + # the complex plane, are uncorrelated. + + # Generate samples + dim = 5 + samples = 1000 # Not too many, or the test takes too long + np.random.seed(514) # Note that the test is sensitive to seed too + xs = unitary_group.rvs(dim, size=samples) + + # The angles "x" of the eigenvalues should be uniformly distributed + # Overall this seems to be a necessary but weak test of the distribution. + eigs = np.vstack([scipy.linalg.eigvals(x) for x in xs]) + x = np.arctan2(eigs.imag, eigs.real) + res = kstest(x.ravel(), uniform(-np.pi, 2*np.pi).cdf) + assert_(res.pvalue > 0.05) + +def check_pickling(distfn, args): + # check that a distribution instance pickles and unpickles + # pay special attention to the random_state property + + # save the random_state (restore later) + rndm = distfn.random_state + + distfn.random_state = 1234 + distfn.rvs(*args, size=8) + s = pickle.dumps(distfn) + r0 = distfn.rvs(*args, size=8) + + unpickled = pickle.loads(s) + r1 = unpickled.rvs(*args, size=8) + assert_equal(r0, r1) + + # restore the random_state + distfn.random_state = rndm + + +def test_random_state_property(): + scale = np.eye(3) + scale[0, 1] = 0.5 + scale[1, 0] = 0.5 + dists = [ + [multivariate_normal, ()], + [dirichlet, (np.array([1.]), )], + [wishart, (10, scale)], + [invwishart, (10, scale)], + [multinomial, (5, [0.5, 0.4, 0.1])], + [ortho_group, (2,)], + [special_ortho_group, (2,)] + ] + for distfn, args in dists: + check_random_state_property(distfn, args) + check_pickling(distfn, args) diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_multivariate.pyc b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_multivariate.pyc new file mode 100644 index 0000000..19b9adb Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_multivariate.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_rank.py b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_rank.py new file mode 100644 index 0000000..e7885f8 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_rank.py @@ -0,0 +1,218 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import assert_equal, assert_array_equal + +from scipy.stats import rankdata, tiecorrect + + +class TestTieCorrect(object): + + def test_empty(self): + """An empty array requires no correction, should return 1.0.""" + ranks = np.array([], dtype=np.float64) + c = tiecorrect(ranks) + assert_equal(c, 1.0) + + def test_one(self): + """A single element requires no correction, should return 1.0.""" + ranks = np.array([1.0], dtype=np.float64) + c = tiecorrect(ranks) + assert_equal(c, 1.0) + + def test_no_correction(self): + """Arrays with no ties require no correction.""" + ranks = np.arange(2.0) + c = tiecorrect(ranks) + assert_equal(c, 1.0) + ranks = np.arange(3.0) + c = tiecorrect(ranks) + assert_equal(c, 1.0) + + def test_basic(self): + """Check a few basic examples of the tie correction factor.""" + # One tie of two elements + ranks = np.array([1.0, 2.5, 2.5]) + c = tiecorrect(ranks) + T = 2.0 + N = ranks.size + expected = 1.0 - (T**3 - T) / (N**3 - N) + assert_equal(c, expected) + + # One tie of two elements (same as above, but tie is not at the end) + ranks = np.array([1.5, 1.5, 3.0]) + c = tiecorrect(ranks) + T = 2.0 + N = ranks.size + expected = 1.0 - (T**3 - T) / (N**3 - N) + assert_equal(c, expected) + + # One tie of three elements + ranks = np.array([1.0, 3.0, 3.0, 3.0]) + c = tiecorrect(ranks) + T = 3.0 + N = ranks.size + expected = 1.0 - (T**3 - T) / (N**3 - N) + assert_equal(c, expected) + + # Two ties, lengths 2 and 3. + ranks = np.array([1.5, 1.5, 4.0, 4.0, 4.0]) + c = tiecorrect(ranks) + T1 = 2.0 + T2 = 3.0 + N = ranks.size + expected = 1.0 - ((T1**3 - T1) + (T2**3 - T2)) / (N**3 - N) + assert_equal(c, expected) + + def test_overflow(self): + ntie, k = 2000, 5 + a = np.repeat(np.arange(k), ntie) + n = a.size # ntie * k + out = tiecorrect(rankdata(a)) + assert_equal(out, 1.0 - k * (ntie**3 - ntie) / float(n**3 - n)) + + +class TestRankData(object): + + def test_empty(self): + """stats.rankdata([]) should return an empty array.""" + a = np.array([], dtype=int) + r = rankdata(a) + assert_array_equal(r, np.array([], dtype=np.float64)) + r = rankdata([]) + assert_array_equal(r, np.array([], dtype=np.float64)) + + def test_one(self): + """Check stats.rankdata with an array of length 1.""" + data = [100] + a = np.array(data, dtype=int) + r = rankdata(a) + assert_array_equal(r, np.array([1.0], dtype=np.float64)) + r = rankdata(data) + assert_array_equal(r, np.array([1.0], dtype=np.float64)) + + def test_basic(self): + """Basic tests of stats.rankdata.""" + data = [100, 10, 50] + expected = np.array([3.0, 1.0, 2.0], dtype=np.float64) + a = np.array(data, dtype=int) + r = rankdata(a) + assert_array_equal(r, expected) + r = rankdata(data) + assert_array_equal(r, expected) + + data = [40, 10, 30, 10, 50] + expected = np.array([4.0, 1.5, 3.0, 1.5, 5.0], dtype=np.float64) + a = np.array(data, dtype=int) + r = rankdata(a) + assert_array_equal(r, expected) + r = rankdata(data) + assert_array_equal(r, expected) + + data = [20, 20, 20, 10, 10, 10] + expected = np.array([5.0, 5.0, 5.0, 2.0, 2.0, 2.0], dtype=np.float64) + a = np.array(data, dtype=int) + r = rankdata(a) + assert_array_equal(r, expected) + r = rankdata(data) + assert_array_equal(r, expected) + # The docstring states explicitly that the argument is flattened. + a2d = a.reshape(2, 3) + r = rankdata(a2d) + assert_array_equal(r, expected) + + def test_rankdata_object_string(self): + min_rank = lambda a: [1 + sum(i < j for i in a) for j in a] + max_rank = lambda a: [sum(i <= j for i in a) for j in a] + ordinal_rank = lambda a: min_rank([(x, i) for i, x in enumerate(a)]) + + def average_rank(a): + return [(i + j) / 2.0 for i, j in zip(min_rank(a), max_rank(a))] + + def dense_rank(a): + b = np.unique(a) + return [1 + sum(i < j for i in b) for j in a] + + rankf = dict(min=min_rank, max=max_rank, ordinal=ordinal_rank, + average=average_rank, dense=dense_rank) + + def check_ranks(a): + for method in 'min', 'max', 'dense', 'ordinal', 'average': + out = rankdata(a, method=method) + assert_array_equal(out, rankf[method](a)) + + val = ['foo', 'bar', 'qux', 'xyz', 'abc', 'efg', 'ace', 'qwe', 'qaz'] + check_ranks(np.random.choice(val, 200)) + check_ranks(np.random.choice(val, 200).astype('object')) + + val = np.array([0, 1, 2, 2.718, 3, 3.141], dtype='object') + check_ranks(np.random.choice(val, 200).astype('object')) + + def test_large_int(self): + data = np.array([2**60, 2**60+1], dtype=np.uint64) + r = rankdata(data) + assert_array_equal(r, [1.0, 2.0]) + + data = np.array([2**60, 2**60+1], dtype=np.int64) + r = rankdata(data) + assert_array_equal(r, [1.0, 2.0]) + + data = np.array([2**60, -2**60+1], dtype=np.int64) + r = rankdata(data) + assert_array_equal(r, [2.0, 1.0]) + + def test_big_tie(self): + for n in [10000, 100000, 1000000]: + data = np.ones(n, dtype=int) + r = rankdata(data) + expected_rank = 0.5 * (n + 1) + assert_array_equal(r, expected_rank * data, + "test failed with n=%d" % n) + + +_cases = ( + # values, method, expected + ([], 'average', []), + ([], 'min', []), + ([], 'max', []), + ([], 'dense', []), + ([], 'ordinal', []), + # + ([100], 'average', [1.0]), + ([100], 'min', [1.0]), + ([100], 'max', [1.0]), + ([100], 'dense', [1.0]), + ([100], 'ordinal', [1.0]), + # + ([100, 100, 100], 'average', [2.0, 2.0, 2.0]), + ([100, 100, 100], 'min', [1.0, 1.0, 1.0]), + ([100, 100, 100], 'max', [3.0, 3.0, 3.0]), + ([100, 100, 100], 'dense', [1.0, 1.0, 1.0]), + ([100, 100, 100], 'ordinal', [1.0, 2.0, 3.0]), + # + ([100, 300, 200], 'average', [1.0, 3.0, 2.0]), + ([100, 300, 200], 'min', [1.0, 3.0, 2.0]), + ([100, 300, 200], 'max', [1.0, 3.0, 2.0]), + ([100, 300, 200], 'dense', [1.0, 3.0, 2.0]), + ([100, 300, 200], 'ordinal', [1.0, 3.0, 2.0]), + # + ([100, 200, 300, 200], 'average', [1.0, 2.5, 4.0, 2.5]), + ([100, 200, 300, 200], 'min', [1.0, 2.0, 4.0, 2.0]), + ([100, 200, 300, 200], 'max', [1.0, 3.0, 4.0, 3.0]), + ([100, 200, 300, 200], 'dense', [1.0, 2.0, 3.0, 2.0]), + ([100, 200, 300, 200], 'ordinal', [1.0, 2.0, 4.0, 3.0]), + # + ([100, 200, 300, 200, 100], 'average', [1.5, 3.5, 5.0, 3.5, 1.5]), + ([100, 200, 300, 200, 100], 'min', [1.0, 3.0, 5.0, 3.0, 1.0]), + ([100, 200, 300, 200, 100], 'max', [2.0, 4.0, 5.0, 4.0, 2.0]), + ([100, 200, 300, 200, 100], 'dense', [1.0, 2.0, 3.0, 2.0, 1.0]), + ([100, 200, 300, 200, 100], 'ordinal', [1.0, 3.0, 5.0, 4.0, 2.0]), + # + ([10] * 30, 'ordinal', np.arange(1.0, 31.0)), +) + + +def test_cases(): + for values, method, expected in _cases: + r = rankdata(values, method=method) + assert_array_equal(r, expected) diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_rank.pyc b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_rank.pyc new file mode 100644 index 0000000..8bd9a02 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_rank.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_stats.py b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_stats.py new file mode 100644 index 0000000..f383395 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_stats.py @@ -0,0 +1,4702 @@ +""" Test functions for stats module + + WRITTEN BY LOUIS LUANGKESORN <lluang@yahoo.com> FOR THE STATS MODULE + BASED ON WILKINSON'S STATISTICS QUIZ + https://www.stanford.edu/~clint/bench/wilk.txt + + Additional tests by a host of SciPy developers. +""" +from __future__ import division, print_function, absolute_import + +import os +import sys +import warnings +from collections import namedtuple + +from numpy.testing import (assert_, assert_equal, + assert_almost_equal, assert_array_almost_equal, + assert_array_equal, assert_approx_equal, + assert_allclose) +import pytest +from pytest import raises as assert_raises +from scipy._lib._numpy_compat import suppress_warnings +import numpy.ma.testutils as mat +from numpy import array, arange, float32, float64, power +import numpy as np + +import scipy.stats as stats +import scipy.stats.mstats as mstats +import scipy.stats.mstats_basic as mstats_basic +from scipy._lib._version import NumpyVersion +from scipy._lib.six import xrange +from .common_tests import check_named_results +from scipy.special import kv +from scipy.integrate import quad + +""" Numbers in docstrings beginning with 'W' refer to the section numbers + and headings found in the STATISTICS QUIZ of Leland Wilkinson. These are + considered to be essential functionality. True testing and + evaluation of a statistics package requires use of the + NIST Statistical test data. See McCoullough(1999) Assessing The Reliability + of Statistical Software for a test methodology and its + implementation in testing SAS, SPSS, and S-Plus +""" + +# Datasets +# These data sets are from the nasty.dat sets used by Wilkinson +# For completeness, I should write the relevant tests and count them as failures +# Somewhat acceptable, since this is still beta software. It would count as a +# good target for 1.0 status +X = array([1,2,3,4,5,6,7,8,9], float) +ZERO = array([0,0,0,0,0,0,0,0,0], float) +BIG = array([99999991,99999992,99999993,99999994,99999995,99999996,99999997, + 99999998,99999999], float) +LITTLE = array([0.99999991,0.99999992,0.99999993,0.99999994,0.99999995,0.99999996, + 0.99999997,0.99999998,0.99999999], float) +HUGE = array([1e+12,2e+12,3e+12,4e+12,5e+12,6e+12,7e+12,8e+12,9e+12], float) +TINY = array([1e-12,2e-12,3e-12,4e-12,5e-12,6e-12,7e-12,8e-12,9e-12], float) +ROUND = array([0.5,1.5,2.5,3.5,4.5,5.5,6.5,7.5,8.5], float) + +class TestTrimmedStats(object): + # TODO: write these tests to handle missing values properly + dprec = np.finfo(np.float64).precision + + def test_tmean(self): + y = stats.tmean(X, (2, 8), (True, True)) + assert_approx_equal(y, 5.0, significant=self.dprec) + + y1 = stats.tmean(X, limits=(2, 8), inclusive=(False, False)) + y2 = stats.tmean(X, limits=None) + assert_approx_equal(y1, y2, significant=self.dprec) + + def test_tvar(self): + y = stats.tvar(X, limits=(2, 8), inclusive=(True, True)) + assert_approx_equal(y, 4.6666666666666661, significant=self.dprec) + + y = stats.tvar(X, limits=None) + assert_approx_equal(y, X.var(ddof=1), significant=self.dprec) + + def test_tstd(self): + y = stats.tstd(X, (2, 8), (True, True)) + assert_approx_equal(y, 2.1602468994692865, significant=self.dprec) + + y = stats.tstd(X, limits=None) + assert_approx_equal(y, X.std(ddof=1), significant=self.dprec) + + def test_tmin(self): + assert_equal(stats.tmin(4), 4) + + x = np.arange(10) + assert_equal(stats.tmin(x), 0) + assert_equal(stats.tmin(x, lowerlimit=0), 0) + assert_equal(stats.tmin(x, lowerlimit=0, inclusive=False), 1) + + x = x.reshape((5, 2)) + assert_equal(stats.tmin(x, lowerlimit=0, inclusive=False), [2, 1]) + assert_equal(stats.tmin(x, axis=1), [0, 2, 4, 6, 8]) + assert_equal(stats.tmin(x, axis=None), 0) + + x = np.arange(10.) + x[9] = np.nan + with suppress_warnings() as sup: + r = sup.record(RuntimeWarning, "invalid value*") + assert_equal(stats.tmin(x), np.nan) + assert_equal(stats.tmin(x, nan_policy='omit'), 0.) + assert_raises(ValueError, stats.tmin, x, nan_policy='raise') + assert_raises(ValueError, stats.tmin, x, nan_policy='foobar') + msg = "'propagate', 'raise', 'omit'" + with assert_raises(ValueError, match=msg): + stats.tmin(x, nan_policy='foo') + + def test_tmax(self): + assert_equal(stats.tmax(4), 4) + + x = np.arange(10) + assert_equal(stats.tmax(x), 9) + assert_equal(stats.tmax(x, upperlimit=9), 9) + assert_equal(stats.tmax(x, upperlimit=9, inclusive=False), 8) + + x = x.reshape((5, 2)) + assert_equal(stats.tmax(x, upperlimit=9, inclusive=False), [8, 7]) + assert_equal(stats.tmax(x, axis=1), [1, 3, 5, 7, 9]) + assert_equal(stats.tmax(x, axis=None), 9) + + x = np.arange(10.) + x[6] = np.nan + with suppress_warnings() as sup: + r = sup.record(RuntimeWarning, "invalid value*") + assert_equal(stats.tmax(x), np.nan) + assert_equal(stats.tmax(x, nan_policy='omit'), 9.) + assert_raises(ValueError, stats.tmax, x, nan_policy='raise') + assert_raises(ValueError, stats.tmax, x, nan_policy='foobar') + + def test_tsem(self): + y = stats.tsem(X, limits=(3, 8), inclusive=(False, True)) + y_ref = np.array([4, 5, 6, 7, 8]) + assert_approx_equal(y, y_ref.std(ddof=1) / np.sqrt(y_ref.size), + significant=self.dprec) + + assert_approx_equal(stats.tsem(X, limits=[-1, 10]), + stats.tsem(X, limits=None), + significant=self.dprec) + + +class TestCorrPearsonr(object): + """ W.II.D. Compute a correlation matrix on all the variables. + + All the correlations, except for ZERO and MISS, should be exactly 1. + ZERO and MISS should have undefined or missing correlations with the + other variables. The same should go for SPEARMAN correlations, if + your program has them. + """ + def test_pXX(self): + y = stats.pearsonr(X,X) + r = y[0] + assert_approx_equal(r,1.0) + + def test_pXBIG(self): + y = stats.pearsonr(X,BIG) + r = y[0] + assert_approx_equal(r,1.0) + + def test_pXLITTLE(self): + y = stats.pearsonr(X,LITTLE) + r = y[0] + assert_approx_equal(r,1.0) + + def test_pXHUGE(self): + y = stats.pearsonr(X,HUGE) + r = y[0] + assert_approx_equal(r,1.0) + + def test_pXTINY(self): + y = stats.pearsonr(X,TINY) + r = y[0] + assert_approx_equal(r,1.0) + + def test_pXROUND(self): + y = stats.pearsonr(X,ROUND) + r = y[0] + assert_approx_equal(r,1.0) + + def test_pBIGBIG(self): + y = stats.pearsonr(BIG,BIG) + r = y[0] + assert_approx_equal(r,1.0) + + def test_pBIGLITTLE(self): + y = stats.pearsonr(BIG,LITTLE) + r = y[0] + assert_approx_equal(r,1.0) + + def test_pBIGHUGE(self): + y = stats.pearsonr(BIG,HUGE) + r = y[0] + assert_approx_equal(r,1.0) + + def test_pBIGTINY(self): + y = stats.pearsonr(BIG,TINY) + r = y[0] + assert_approx_equal(r,1.0) + + def test_pBIGROUND(self): + y = stats.pearsonr(BIG,ROUND) + r = y[0] + assert_approx_equal(r,1.0) + + def test_pLITTLELITTLE(self): + y = stats.pearsonr(LITTLE,LITTLE) + r = y[0] + assert_approx_equal(r,1.0) + + def test_pLITTLEHUGE(self): + y = stats.pearsonr(LITTLE,HUGE) + r = y[0] + assert_approx_equal(r,1.0) + + def test_pLITTLETINY(self): + y = stats.pearsonr(LITTLE,TINY) + r = y[0] + assert_approx_equal(r,1.0) + + def test_pLITTLEROUND(self): + y = stats.pearsonr(LITTLE,ROUND) + r = y[0] + assert_approx_equal(r,1.0) + + def test_pHUGEHUGE(self): + y = stats.pearsonr(HUGE,HUGE) + r = y[0] + assert_approx_equal(r,1.0) + + def test_pHUGETINY(self): + y = stats.pearsonr(HUGE,TINY) + r = y[0] + assert_approx_equal(r,1.0) + + def test_pHUGEROUND(self): + y = stats.pearsonr(HUGE,ROUND) + r = y[0] + assert_approx_equal(r,1.0) + + def test_pTINYTINY(self): + y = stats.pearsonr(TINY,TINY) + r = y[0] + assert_approx_equal(r,1.0) + + def test_pTINYROUND(self): + y = stats.pearsonr(TINY,ROUND) + r = y[0] + assert_approx_equal(r,1.0) + + def test_pROUNDROUND(self): + y = stats.pearsonr(ROUND,ROUND) + r = y[0] + assert_approx_equal(r,1.0) + + def test_r_exactly_pos1(self): + a = arange(3.0) + b = a + r, prob = stats.pearsonr(a,b) + assert_equal(r, 1.0) + assert_equal(prob, 0.0) + + def test_r_exactly_neg1(self): + a = arange(3.0) + b = -a + r, prob = stats.pearsonr(a,b) + assert_equal(r, -1.0) + assert_equal(prob, 0.0) + + def test_basic(self): + # A basic test, with a correlation coefficient + # that is not 1 or -1. + a = array([-1, 0, 1]) + b = array([0, 0, 3]) + r, prob = stats.pearsonr(a, b) + assert_approx_equal(r, np.sqrt(3)/2) + assert_approx_equal(prob, 1.0/3) + + +class TestFisherExact(object): + """Some tests to show that fisher_exact() works correctly. + + Note that in SciPy 0.9.0 this was not working well for large numbers due to + inaccuracy of the hypergeom distribution (see #1218). Fixed now. + + Also note that R and Scipy have different argument formats for their + hypergeometric distribution functions. + + R: + > phyper(18999, 99000, 110000, 39000, lower.tail = FALSE) + [1] 1.701815e-09 + """ + def test_basic(self): + fisher_exact = stats.fisher_exact + + res = fisher_exact([[14500, 20000], [30000, 40000]])[1] + assert_approx_equal(res, 0.01106, significant=4) + res = fisher_exact([[100, 2], [1000, 5]])[1] + assert_approx_equal(res, 0.1301, significant=4) + res = fisher_exact([[2, 7], [8, 2]])[1] + assert_approx_equal(res, 0.0230141, significant=6) + res = fisher_exact([[5, 1], [10, 10]])[1] + assert_approx_equal(res, 0.1973244, significant=6) + res = fisher_exact([[5, 15], [20, 20]])[1] + assert_approx_equal(res, 0.0958044, significant=6) + res = fisher_exact([[5, 16], [20, 25]])[1] + assert_approx_equal(res, 0.1725862, significant=6) + res = fisher_exact([[10, 5], [10, 1]])[1] + assert_approx_equal(res, 0.1973244, significant=6) + res = fisher_exact([[5, 0], [1, 4]])[1] + assert_approx_equal(res, 0.04761904, significant=6) + res = fisher_exact([[0, 1], [3, 2]])[1] + assert_approx_equal(res, 1.0) + res = fisher_exact([[0, 2], [6, 4]])[1] + assert_approx_equal(res, 0.4545454545) + res = fisher_exact([[2, 7], [8, 2]]) + assert_approx_equal(res[1], 0.0230141, significant=6) + assert_approx_equal(res[0], 4.0 / 56) + + def test_precise(self): + # results from R + # + # R defines oddsratio differently (see Notes section of fisher_exact + # docstring), so those will not match. We leave them in anyway, in + # case they will be useful later on. We test only the p-value. + tablist = [ + ([[100, 2], [1000, 5]], (2.505583993422285e-001, 1.300759363430016e-001)), + ([[2, 7], [8, 2]], (8.586235135736206e-002, 2.301413756522114e-002)), + ([[5, 1], [10, 10]], (4.725646047336584e+000, 1.973244147157190e-001)), + ([[5, 15], [20, 20]], (3.394396617440852e-001, 9.580440012477637e-002)), + ([[5, 16], [20, 25]], (3.960558326183334e-001, 1.725864953812994e-001)), + ([[10, 5], [10, 1]], (2.116112781158483e-001, 1.973244147157190e-001)), + ([[10, 5], [10, 0]], (0.000000000000000e+000, 6.126482213438734e-002)), + ([[5, 0], [1, 4]], (np.inf, 4.761904761904762e-002)), + ([[0, 5], [1, 4]], (0.000000000000000e+000, 1.000000000000000e+000)), + ([[5, 1], [0, 4]], (np.inf, 4.761904761904758e-002)), + ([[0, 1], [3, 2]], (0.000000000000000e+000, 1.000000000000000e+000)) + ] + for table, res_r in tablist: + res = stats.fisher_exact(np.asarray(table)) + np.testing.assert_almost_equal(res[1], res_r[1], decimal=11, + verbose=True) + + @pytest.mark.slow + def test_large_numbers(self): + # Test with some large numbers. Regression test for #1401 + pvals = [5.56e-11, 2.666e-11, 1.363e-11] # from R + for pval, num in zip(pvals, [75, 76, 77]): + res = stats.fisher_exact([[17704, 496], [1065, num]])[1] + assert_approx_equal(res, pval, significant=4) + + res = stats.fisher_exact([[18000, 80000], [20000, 90000]])[1] + assert_approx_equal(res, 0.2751, significant=4) + + def test_raises(self): + # test we raise an error for wrong shape of input. + assert_raises(ValueError, stats.fisher_exact, + np.arange(6).reshape(2, 3)) + + def test_row_or_col_zero(self): + tables = ([[0, 0], [5, 10]], + [[5, 10], [0, 0]], + [[0, 5], [0, 10]], + [[5, 0], [10, 0]]) + for table in tables: + oddsratio, pval = stats.fisher_exact(table) + assert_equal(pval, 1.0) + assert_equal(oddsratio, np.nan) + + def test_less_greater(self): + tables = ( + # Some tables to compare with R: + [[2, 7], [8, 2]], + [[200, 7], [8, 300]], + [[28, 21], [6, 1957]], + [[190, 800], [200, 900]], + # Some tables with simple exact values + # (includes regression test for ticket #1568): + [[0, 2], [3, 0]], + [[1, 1], [2, 1]], + [[2, 0], [1, 2]], + [[0, 1], [2, 3]], + [[1, 0], [1, 4]], + ) + pvals = ( + # from R: + [0.018521725952066501, 0.9990149169715733], + [1.0, 2.0056578803889148e-122], + [1.0, 5.7284374608319831e-44], + [0.7416227, 0.2959826], + # Exact: + [0.1, 1.0], + [0.7, 0.9], + [1.0, 0.3], + [2./3, 1.0], + [1.0, 1./3], + ) + for table, pval in zip(tables, pvals): + res = [] + res.append(stats.fisher_exact(table, alternative="less")[1]) + res.append(stats.fisher_exact(table, alternative="greater")[1]) + assert_allclose(res, pval, atol=0, rtol=1e-7) + + def test_gh3014(self): + # check if issue #3014 has been fixed. + # before, this would have risen a ValueError + odds, pvalue = stats.fisher_exact([[1, 2], [9, 84419233]]) + + +class TestCorrSpearmanr(object): + """ W.II.D. Compute a correlation matrix on all the variables. + + All the correlations, except for ZERO and MISS, should be exactly 1. + ZERO and MISS should have undefined or missing correlations with the + other variables. The same should go for SPEARMAN corelations, if + your program has them. + """ + def test_scalar(self): + y = stats.spearmanr(4., 2.) + assert_(np.isnan(y).all()) + + def test_uneven_lengths(self): + assert_raises(ValueError, stats.spearmanr, [1, 2, 1], [8, 9]) + assert_raises(ValueError, stats.spearmanr, [1, 2, 1], 8) + + def test_uneven_2d_shapes(self): + # Different number of columns should work - those just get concatenated. + np.random.seed(232324) + x = np.random.randn(4, 3) + y = np.random.randn(4, 2) + assert stats.spearmanr(x, y).correlation.shape == (5, 5) + assert stats.spearmanr(x.T, y.T, axis=1).pvalue.shape == (5, 5) + + assert_raises(ValueError, stats.spearmanr, x, y, axis=1) + assert_raises(ValueError, stats.spearmanr, x.T, y.T) + + def test_ndim_too_high(self): + np.random.seed(232324) + x = np.random.randn(4, 3, 2) + assert_raises(ValueError, stats.spearmanr, x) + assert_raises(ValueError, stats.spearmanr, x, x) + assert_raises(ValueError, stats.spearmanr, x, None, None) + # But should work with axis=None (raveling axes) for two input arrays + assert_allclose(stats.spearmanr(x, x, axis=None), + stats.spearmanr(x.flatten(), x.flatten(), axis=0)) + + def test_nan_policy(self): + x = np.arange(10.) + x[9] = np.nan + assert_array_equal(stats.spearmanr(x, x), (np.nan, np.nan)) + assert_array_equal(stats.spearmanr(x, x, nan_policy='omit'), + (1.0, 0.0)) + assert_raises(ValueError, stats.spearmanr, x, x, nan_policy='raise') + assert_raises(ValueError, stats.spearmanr, x, x, nan_policy='foobar') + + def test_sXX(self): + y = stats.spearmanr(X,X) + r = y[0] + assert_approx_equal(r,1.0) + + def test_sXBIG(self): + y = stats.spearmanr(X,BIG) + r = y[0] + assert_approx_equal(r,1.0) + + def test_sXLITTLE(self): + y = stats.spearmanr(X,LITTLE) + r = y[0] + assert_approx_equal(r,1.0) + + def test_sXHUGE(self): + y = stats.spearmanr(X,HUGE) + r = y[0] + assert_approx_equal(r,1.0) + + def test_sXTINY(self): + y = stats.spearmanr(X,TINY) + r = y[0] + assert_approx_equal(r,1.0) + + def test_sXROUND(self): + y = stats.spearmanr(X,ROUND) + r = y[0] + assert_approx_equal(r,1.0) + + def test_sBIGBIG(self): + y = stats.spearmanr(BIG,BIG) + r = y[0] + assert_approx_equal(r,1.0) + + def test_sBIGLITTLE(self): + y = stats.spearmanr(BIG,LITTLE) + r = y[0] + assert_approx_equal(r,1.0) + + def test_sBIGHUGE(self): + y = stats.spearmanr(BIG,HUGE) + r = y[0] + assert_approx_equal(r,1.0) + + def test_sBIGTINY(self): + y = stats.spearmanr(BIG,TINY) + r = y[0] + assert_approx_equal(r,1.0) + + def test_sBIGROUND(self): + y = stats.spearmanr(BIG,ROUND) + r = y[0] + assert_approx_equal(r,1.0) + + def test_sLITTLELITTLE(self): + y = stats.spearmanr(LITTLE,LITTLE) + r = y[0] + assert_approx_equal(r,1.0) + + def test_sLITTLEHUGE(self): + y = stats.spearmanr(LITTLE,HUGE) + r = y[0] + assert_approx_equal(r,1.0) + + def test_sLITTLETINY(self): + y = stats.spearmanr(LITTLE,TINY) + r = y[0] + assert_approx_equal(r,1.0) + + def test_sLITTLEROUND(self): + y = stats.spearmanr(LITTLE,ROUND) + r = y[0] + assert_approx_equal(r,1.0) + + def test_sHUGEHUGE(self): + y = stats.spearmanr(HUGE,HUGE) + r = y[0] + assert_approx_equal(r,1.0) + + def test_sHUGETINY(self): + y = stats.spearmanr(HUGE,TINY) + r = y[0] + assert_approx_equal(r,1.0) + + def test_sHUGEROUND(self): + y = stats.spearmanr(HUGE,ROUND) + r = y[0] + assert_approx_equal(r,1.0) + + def test_sTINYTINY(self): + y = stats.spearmanr(TINY,TINY) + r = y[0] + assert_approx_equal(r,1.0) + + def test_sTINYROUND(self): + y = stats.spearmanr(TINY,ROUND) + r = y[0] + assert_approx_equal(r,1.0) + + def test_sROUNDROUND(self): + y = stats.spearmanr(ROUND,ROUND) + r = y[0] + assert_approx_equal(r,1.0) + + def test_spearmanr_result_attributes(self): + res = stats.spearmanr(X, X) + attributes = ('correlation', 'pvalue') + check_named_results(res, attributes) + + def test_1d_vs_2d(self): + x1 = [1, 2, 3, 4, 5, 6] + x2 = [1, 2, 3, 4, 6, 5] + res1 = stats.spearmanr(x1, x2) + res2 = stats.spearmanr(np.asarray([x1, x2]).T) + assert_allclose(res1, res2) + + def test_1d_vs_2d_nans(self): + # Now the same with NaNs present. Regression test for gh-9103. + for nan_policy in ['propagate', 'omit']: + x1 = [1, np.nan, 3, 4, 5, 6] + x2 = [1, 2, 3, 4, 6, np.nan] + res1 = stats.spearmanr(x1, x2, nan_policy=nan_policy) + res2 = stats.spearmanr(np.asarray([x1, x2]).T, nan_policy=nan_policy) + assert_allclose(res1, res2) + + def test_3cols(self): + x1 = np.arange(6) + x2 = -x1 + x3 = np.array([0, 1, 2, 3, 5, 4]) + x = np.asarray([x1, x2, x3]).T + actual = stats.spearmanr(x) + expected_corr = np.array([[1, -1, 0.94285714], + [-1, 1, -0.94285714], + [0.94285714, -0.94285714, 1]]) + expected_pvalue = np.zeros((3, 3), dtype=float) + expected_pvalue[2, 0:2] = 0.00480466472 + expected_pvalue[0:2, 2] = 0.00480466472 + + assert_allclose(actual.correlation, expected_corr) + assert_allclose(actual.pvalue, expected_pvalue) + + def test_gh_9103(self): + # Regression test for gh-9103. + x = np.array([[np.nan, 3.0, 4.0, 5.0, 5.1, 6.0, 9.2], + [5.0, np.nan, 4.1, 4.8, 4.9, 5.0, 4.1], + [0.5, 4.0, 7.1, 3.8, 8.0, 5.1, 7.6]]).T + corr = np.array([[np.nan, np.nan, np.nan], + [np.nan, np.nan, np.nan], + [np.nan, np.nan, 1.]]) + assert_allclose(stats.spearmanr(x, nan_policy='propagate').correlation, + corr) + + res = stats.spearmanr(x, nan_policy='omit').correlation + assert_allclose((res[0][1], res[0][2], res[1][2]), + (0.2051957, 0.4857143, -0.4707919), rtol=1e-6) + + def test_gh_8111(self): + # Regression test for gh-8111 (different result for float/int/bool). + n = 100 + np.random.seed(234568) + x = np.random.rand(n) + m = np.random.rand(n) > 0.7 + + # bool against float, no nans + a = (x > .5) + b = np.array(x) + res1 = stats.spearmanr(a, b, nan_policy='omit').correlation + + # bool against float with NaNs + b[m] = np.nan + res2 = stats.spearmanr(a, b, nan_policy='omit').correlation + + # int against float with NaNs + a = a.astype(np.int32) + res3 = stats.spearmanr(a, b, nan_policy='omit').correlation + + expected = [0.865895477, 0.866100381, 0.866100381] + assert_allclose([res1, res2, res3], expected) + + +def test_spearmanr(): + # Cross-check with R: + # cor.test(c(1,2,3,4,5),c(5,6,7,8,7),method="spearmanr") + x1 = [1, 2, 3, 4, 5] + x2 = [5, 6, 7, 8, 7] + expected = (0.82078268166812329, 0.088587005313543798) + res = stats.spearmanr(x1, x2) + assert_approx_equal(res[0], expected[0]) + assert_approx_equal(res[1], expected[1]) + + attributes = ('correlation', 'pvalue') + res = stats.spearmanr(x1, x2) + check_named_results(res, attributes) + + # with only ties in one or both inputs + with np.errstate(invalid="ignore"): + assert_equal(stats.spearmanr([2,2,2], [2,2,2]), (np.nan, np.nan)) + assert_equal(stats.spearmanr([2,0,2], [2,2,2]), (np.nan, np.nan)) + assert_equal(stats.spearmanr([2,2,2], [2,0,2]), (np.nan, np.nan)) + + # empty arrays provided as input + assert_equal(stats.spearmanr([], []), (np.nan, np.nan)) + + np.random.seed(7546) + x = np.array([np.random.normal(loc=1, scale=1, size=500), + np.random.normal(loc=1, scale=1, size=500)]) + corr = [[1.0, 0.3], + [0.3, 1.0]] + x = np.dot(np.linalg.cholesky(corr), x) + expected = (0.28659685838743354, 6.579862219051161e-11) + res = stats.spearmanr(x[0], x[1]) + assert_approx_equal(res[0], expected[0]) + assert_approx_equal(res[1], expected[1]) + + assert_approx_equal(stats.spearmanr([1,1,2], [1,1,2])[0], 1.0) + + # test nan_policy + x = np.arange(10.) + x[9] = np.nan + assert_array_equal(stats.spearmanr(x, x), (np.nan, np.nan)) + assert_allclose(stats.spearmanr(x, x, nan_policy='omit'), + (1.0, 0)) + assert_raises(ValueError, stats.spearmanr, x, x, nan_policy='raise') + assert_raises(ValueError, stats.spearmanr, x, x, nan_policy='foobar') + + # test unequal length inputs + x = np.arange(10.) + y = np.arange(20.) + assert_raises(ValueError, stats.spearmanr, x, y) + + #test paired value + x1 = [1, 2, 3, 4] + x2 = [8, 7, 6, np.nan] + res1 = stats.spearmanr(x1, x2, nan_policy='omit') + res2 = stats.spearmanr(x1[:3], x2[:3], nan_policy='omit') + assert_equal(res1, res2) + + # Regression test for GitHub issue #6061 - Overflow on Windows + x = list(range(2000)) + y = list(range(2000)) + y[0], y[9] = y[9], y[0] + y[10], y[434] = y[434], y[10] + y[435], y[1509] = y[1509], y[435] + # rho = 1 - 6 * (2 * (9^2 + 424^2 + 1074^2))/(2000 * (2000^2 - 1)) + # = 1 - (1 / 500) + # = 0.998 + x.append(np.nan) + y.append(3.0) + assert_almost_equal(stats.spearmanr(x, y, nan_policy='omit')[0], 0.998) + +class TestCorrSpearmanrTies(object): + """Some tests of tie-handling by the spearmanr function.""" + + def test_tie1(self): + # Data + x = [1.0, 2.0, 3.0, 4.0] + y = [1.0, 2.0, 2.0, 3.0] + # Ranks of the data, with tie-handling. + xr = [1.0, 2.0, 3.0, 4.0] + yr = [1.0, 2.5, 2.5, 4.0] + # Result of spearmanr should be the same as applying + # pearsonr to the ranks. + sr = stats.spearmanr(x, y) + pr = stats.pearsonr(xr, yr) + assert_almost_equal(sr, pr) + + def test_tie2(self): + # Test tie-handling if inputs contain nan's + # Data without nan's + x1 = [1, 2, 2.5, 2] + y1 = [1, 3, 2.5, 4] + # Same data with nan's + x2 = [1, 2, 2.5, 2, np.nan] + y2 = [1, 3, 2.5, 4, np.nan] + + # Results for two data sets should be the same if nan's are ignored + sr1 = stats.spearmanr(x1, y1) + sr2 = stats.spearmanr(x2, y2, nan_policy='omit') + assert_almost_equal(sr1, sr2) + + +# W.II.E. Tabulate X against X, using BIG as a case weight. The values +# should appear on the diagonal and the total should be 899999955. +# If the table cannot hold these values, forget about working with +# census data. You can also tabulate HUGE against TINY. There is no +# reason a tabulation program should not be able to distinguish +# different values regardless of their magnitude. + +# I need to figure out how to do this one. + + +def test_kendalltau(): + # simple case without ties + x = np.arange(10) + y = np.arange(10) + # Cross-check with exact result from R: + # cor.test(x,y,method="kendall",exact=1) + expected = (1.0, 5.511463844797e-07) + res = stats.kendalltau(x, y) + assert_approx_equal(res[0], expected[0]) + assert_approx_equal(res[1], expected[1]) + + # swap a couple of values + b = y[1] + y[1] = y[2] + y[2] = b + # Cross-check with exact result from R: + # cor.test(x,y,method="kendall",exact=1) + expected = (0.9555555555555556, 5.511463844797e-06) + res = stats.kendalltau(x, y) + assert_approx_equal(res[0], expected[0]) + assert_approx_equal(res[1], expected[1]) + + # swap a couple more + b = y[5] + y[5] = y[6] + y[6] = b + # Cross-check with exact result from R: + # cor.test(x,y,method="kendall",exact=1) + expected = (0.9111111111111111, 2.976190476190e-05) + res = stats.kendalltau(x, y) + assert_approx_equal(res[0], expected[0]) + assert_approx_equal(res[1], expected[1]) + + # same in opposite direction + x = np.arange(10) + y = np.arange(10)[::-1] + # Cross-check with exact result from R: + # cor.test(x,y,method="kendall",exact=1) + expected = (-1.0, 5.511463844797e-07) + res = stats.kendalltau(x, y) + assert_approx_equal(res[0], expected[0]) + assert_approx_equal(res[1], expected[1]) + + # swap a couple of values + b = y[1] + y[1] = y[2] + y[2] = b + # Cross-check with exact result from R: + # cor.test(x,y,method="kendall",exact=1) + expected = (-0.9555555555555556, 5.511463844797e-06) + res = stats.kendalltau(x, y) + assert_approx_equal(res[0], expected[0]) + assert_approx_equal(res[1], expected[1]) + + # swap a couple more + b = y[5] + y[5] = y[6] + y[6] = b + # Cross-check with exact result from R: + # cor.test(x,y,method="kendall",exact=1) + expected = (-0.9111111111111111, 2.976190476190e-05) + res = stats.kendalltau(x, y) + assert_approx_equal(res[0], expected[0]) + assert_approx_equal(res[1], expected[1]) + + # check exception in case of ties + y[2] = y[1] + assert_raises(ValueError, stats.kendalltau, x, y, method='exact') + + # check exception in case of invalid method keyword + assert_raises(ValueError, stats.kendalltau, x, y, method='banana') + + # with some ties + # Cross-check with R: + # cor.test(c(12,2,1,12,2),c(1,4,7,1,0),method="kendall",exact=FALSE) + x1 = [12, 2, 1, 12, 2] + x2 = [1, 4, 7, 1, 0] + expected = (-0.47140452079103173, 0.28274545993277478) + res = stats.kendalltau(x1, x2) + assert_approx_equal(res[0], expected[0]) + assert_approx_equal(res[1], expected[1]) + + # test for namedtuple attribute results + attributes = ('correlation', 'pvalue') + res = stats.kendalltau(x1, x2) + check_named_results(res, attributes) + + # with only ties in one or both inputs + assert_equal(stats.kendalltau([2,2,2], [2,2,2]), (np.nan, np.nan)) + assert_equal(stats.kendalltau([2,0,2], [2,2,2]), (np.nan, np.nan)) + assert_equal(stats.kendalltau([2,2,2], [2,0,2]), (np.nan, np.nan)) + + # empty arrays provided as input + assert_equal(stats.kendalltau([], []), (np.nan, np.nan)) + + # check with larger arrays + np.random.seed(7546) + x = np.array([np.random.normal(loc=1, scale=1, size=500), + np.random.normal(loc=1, scale=1, size=500)]) + corr = [[1.0, 0.3], + [0.3, 1.0]] + x = np.dot(np.linalg.cholesky(corr), x) + expected = (0.19291382765531062, 1.1337095377742629e-10) + res = stats.kendalltau(x[0], x[1]) + assert_approx_equal(res[0], expected[0]) + assert_approx_equal(res[1], expected[1]) + + # and do we get a tau of 1 for identical inputs? + assert_approx_equal(stats.kendalltau([1,1,2], [1,1,2])[0], 1.0) + + # test nan_policy + x = np.arange(10.) + x[9] = np.nan + assert_array_equal(stats.kendalltau(x, x), (np.nan, np.nan)) + assert_allclose(stats.kendalltau(x, x, nan_policy='omit'), + (1.0, 5.5114638e-6), rtol=1e-06) + assert_allclose(stats.kendalltau(x, x, nan_policy='omit', method='asymptotic'), + (1.0, 0.00017455009626808976), rtol=1e-06) + assert_raises(ValueError, stats.kendalltau, x, x, nan_policy='raise') + assert_raises(ValueError, stats.kendalltau, x, x, nan_policy='foobar') + + # test unequal length inputs + x = np.arange(10.) + y = np.arange(20.) + assert_raises(ValueError, stats.kendalltau, x, y) + + # test all ties + tau, p_value = stats.kendalltau([], []) + assert_equal(np.nan, tau) + assert_equal(np.nan, p_value) + tau, p_value = stats.kendalltau([0], [0]) + assert_equal(np.nan, tau) + assert_equal(np.nan, p_value) + + # Regression test for GitHub issue #6061 - Overflow on Windows + x = np.arange(2000, dtype=float) + x = np.ma.masked_greater(x, 1995) + y = np.arange(2000, dtype=float) + y = np.concatenate((y[1000:], y[:1000])) + assert_(np.isfinite(stats.kendalltau(x,y)[1])) + +def test_kendalltau_vs_mstats_basic(): + np.random.seed(42) + for s in range(2,10): + a = [] + # Generate rankings with ties + for i in range(s): + a += [i]*i + b = list(a) + np.random.shuffle(a) + np.random.shuffle(b) + expected = mstats_basic.kendalltau(a, b) + actual = stats.kendalltau(a, b) + assert_approx_equal(actual[0], expected[0]) + assert_approx_equal(actual[1], expected[1]) + + +def test_kendalltau_nan_2nd_arg(): + # regression test for gh-6134: nans in the second arg were not handled + x = [1., 2., 3., 4.] + y = [np.nan, 2.4, 3.4, 3.4] + + r1 = stats.kendalltau(x, y, nan_policy='omit') + r2 = stats.kendalltau(x[1:], y[1:]) + assert_allclose(r1.correlation, r2.correlation, atol=1e-15) + + +def test_weightedtau(): + x = [12, 2, 1, 12, 2] + y = [1, 4, 7, 1, 0] + tau, p_value = stats.weightedtau(x, y) + assert_approx_equal(tau, -0.56694968153682723) + assert_equal(np.nan, p_value) + tau, p_value = stats.weightedtau(x, y, additive=False) + assert_approx_equal(tau, -0.62205716951801038) + assert_equal(np.nan, p_value) + # This must be exactly Kendall's tau + tau, p_value = stats.weightedtau(x, y, weigher=lambda x: 1) + assert_approx_equal(tau, -0.47140452079103173) + assert_equal(np.nan, p_value) + + # Asymmetric, ranked version + tau, p_value = stats.weightedtau(x, y, rank=None) + assert_approx_equal(tau, -0.4157652301037516) + assert_equal(np.nan, p_value) + tau, p_value = stats.weightedtau(y, x, rank=None) + assert_approx_equal(tau, -0.7181341329699029) + assert_equal(np.nan, p_value) + tau, p_value = stats.weightedtau(x, y, rank=None, additive=False) + assert_approx_equal(tau, -0.40644850966246893) + assert_equal(np.nan, p_value) + tau, p_value = stats.weightedtau(y, x, rank=None, additive=False) + assert_approx_equal(tau, -0.83766582937355172) + assert_equal(np.nan, p_value) + tau, p_value = stats.weightedtau(x, y, rank=False) + assert_approx_equal(tau, -0.51604397940261848) + assert_equal(np.nan, p_value) + # This must be exactly Kendall's tau + tau, p_value = stats.weightedtau(x, y, rank=True, weigher=lambda x: 1) + assert_approx_equal(tau, -0.47140452079103173) + assert_equal(np.nan, p_value) + tau, p_value = stats.weightedtau(y, x, rank=True, weigher=lambda x: 1) + assert_approx_equal(tau, -0.47140452079103173) + assert_equal(np.nan, p_value) + # Test argument conversion + tau, p_value = stats.weightedtau(np.asarray(x, dtype=np.float64), y) + assert_approx_equal(tau, -0.56694968153682723) + tau, p_value = stats.weightedtau(np.asarray(x, dtype=np.int16), y) + assert_approx_equal(tau, -0.56694968153682723) + tau, p_value = stats.weightedtau(np.asarray(x, dtype=np.float64), np.asarray(y, dtype=np.float64)) + assert_approx_equal(tau, -0.56694968153682723) + # All ties + tau, p_value = stats.weightedtau([], []) + assert_equal(np.nan, tau) + assert_equal(np.nan, p_value) + tau, p_value = stats.weightedtau([0], [0]) + assert_equal(np.nan, tau) + assert_equal(np.nan, p_value) + # Size mismatches + assert_raises(ValueError, stats.weightedtau, [0, 1], [0, 1, 2]) + assert_raises(ValueError, stats.weightedtau, [0, 1], [0, 1], [0]) + # NaNs + x = [12, 2, 1, 12, 2] + y = [1, 4, 7, 1, np.nan] + tau, p_value = stats.weightedtau(x, y) + assert_approx_equal(tau, -0.56694968153682723) + x = [12, 2, np.nan, 12, 2] + tau, p_value = stats.weightedtau(x, y) + assert_approx_equal(tau, -0.56694968153682723) + + +def test_weightedtau_vs_quadratic(): + # Trivial quadratic implementation, all parameters mandatory + def wkq(x, y, rank, weigher, add): + tot = conc = disc = u = v = 0 + for i in range(len(x)): + for j in range(len(x)): + w = weigher(rank[i]) + weigher(rank[j]) if add else weigher(rank[i]) * weigher(rank[j]) + tot += w + if x[i] == x[j]: + u += w + if y[i] == y[j]: + v += w + if x[i] < x[j] and y[i] < y[j] or x[i] > x[j] and y[i] > y[j]: + conc += w + elif x[i] < x[j] and y[i] > y[j] or x[i] > x[j] and y[i] < y[j]: + disc += w + return (conc - disc) / np.sqrt(tot - u) / np.sqrt(tot - v) + + np.random.seed(42) + for s in range(3,10): + a = [] + # Generate rankings with ties + for i in range(s): + a += [i]*i + b = list(a) + np.random.shuffle(a) + np.random.shuffle(b) + # First pass: use element indices as ranks + rank = np.arange(len(a), dtype=np.intp) + for _ in range(2): + for add in [True, False]: + expected = wkq(a, b, rank, lambda x: 1./(x+1), add) + actual = stats.weightedtau(a, b, rank, lambda x: 1./(x+1), add).correlation + assert_approx_equal(expected, actual) + # Second pass: use a random rank + np.random.shuffle(rank) + + +class TestFindRepeats(object): + + def test_basic(self): + a = [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 5] + res, nums = stats.find_repeats(a) + assert_array_equal(res, [1, 2, 3, 4]) + assert_array_equal(nums, [3, 3, 2, 2]) + + def test_empty_result(self): + # Check that empty arrays are returned when there are no repeats. + for a in [[10, 20, 50, 30, 40], []]: + repeated, counts = stats.find_repeats(a) + assert_array_equal(repeated, []) + assert_array_equal(counts, []) + + +class TestRegression(object): + def test_linregressBIGX(self): + # W.II.F. Regress BIG on X. + # The constant should be 99999990 and the regression coefficient should be 1. + y = stats.linregress(X,BIG) + intercept = y[1] + r = y[2] + assert_almost_equal(intercept,99999990) + assert_almost_equal(r,1.0) + + def test_regressXX(self): + # W.IV.B. Regress X on X. + # The constant should be exactly 0 and the regression coefficient should be 1. + # This is a perfectly valid regression. The program should not complain. + y = stats.linregress(X,X) + intercept = y[1] + r = y[2] + assert_almost_equal(intercept,0.0) + assert_almost_equal(r,1.0) +# W.IV.C. Regress X on BIG and LITTLE (two predictors). The program +# should tell you that this model is "singular" because BIG and +# LITTLE are linear combinations of each other. Cryptic error +# messages are unacceptable here. Singularity is the most +# fundamental regression error. +# Need to figure out how to handle multiple linear regression. Not obvious + + def test_regressZEROX(self): + # W.IV.D. Regress ZERO on X. + # The program should inform you that ZERO has no variance or it should + # go ahead and compute the regression and report a correlation and + # total sum of squares of exactly 0. + y = stats.linregress(X,ZERO) + intercept = y[1] + r = y[2] + assert_almost_equal(intercept,0.0) + assert_almost_equal(r,0.0) + + def test_regress_simple(self): + # Regress a line with sinusoidal noise. + x = np.linspace(0, 100, 100) + y = 0.2 * np.linspace(0, 100, 100) + 10 + y += np.sin(np.linspace(0, 20, 100)) + + res = stats.linregress(x, y) + assert_almost_equal(res[4], 2.3957814497838803e-3) + + def test_regress_simple_onearg_rows(self): + # Regress a line w sinusoidal noise, with a single input of shape (2, N). + x = np.linspace(0, 100, 100) + y = 0.2 * np.linspace(0, 100, 100) + 10 + y += np.sin(np.linspace(0, 20, 100)) + rows = np.vstack((x, y)) + + res = stats.linregress(rows) + assert_almost_equal(res[4], 2.3957814497838803e-3) + + def test_regress_simple_onearg_cols(self): + x = np.linspace(0, 100, 100) + y = 0.2 * np.linspace(0, 100, 100) + 10 + y += np.sin(np.linspace(0, 20, 100)) + cols = np.hstack((np.expand_dims(x, 1), np.expand_dims(y, 1))) + + res = stats.linregress(cols) + assert_almost_equal(res[4], 2.3957814497838803e-3) + + def test_regress_shape_error(self): + # Check that a single input argument to linregress with wrong shape + # results in a ValueError. + assert_raises(ValueError, stats.linregress, np.ones((3, 3))) + + def test_linregress(self): + # compared with multivariate ols with pinv + x = np.arange(11) + y = np.arange(5,16) + y[[(1),(-2)]] -= 1 + y[[(0),(-1)]] += 1 + + res = (1.0, 5.0, 0.98229948625750, 7.45259691e-008, 0.063564172616372733) + assert_array_almost_equal(stats.linregress(x,y),res,decimal=14) + + def test_regress_simple_negative_cor(self): + # If the slope of the regression is negative the factor R tend to -1 not 1. + # Sometimes rounding errors makes it < -1 leading to stderr being NaN + a, n = 1e-71, 100000 + x = np.linspace(a, 2 * a, n) + y = np.linspace(2 * a, a, n) + stats.linregress(x, y) + res = stats.linregress(x, y) + assert_(res[2] >= -1) # propagated numerical errors were not corrected + assert_almost_equal(res[2], -1) # perfect negative correlation case + assert_(not np.isnan(res[4])) # stderr should stay finite + + def test_linregress_result_attributes(self): + # Regress a line with sinusoidal noise. + x = np.linspace(0, 100, 100) + y = 0.2 * np.linspace(0, 100, 100) + 10 + y += np.sin(np.linspace(0, 20, 100)) + + res = stats.linregress(x, y) + attributes = ('slope', 'intercept', 'rvalue', 'pvalue', 'stderr') + check_named_results(res, attributes) + + def test_regress_two_inputs(self): + # Regress a simple line formed by two points. + x = np.arange(2) + y = np.arange(3, 5) + + res = stats.linregress(x, y) + assert_almost_equal(res[3], 0.0) # non-horizontal line + assert_almost_equal(res[4], 0.0) # zero stderr + + def test_regress_two_inputs_horizontal_line(self): + # Regress a horizontal line formed by two points. + x = np.arange(2) + y = np.ones(2) + + res = stats.linregress(x, y) + assert_almost_equal(res[3], 1.0) # horizontal line + assert_almost_equal(res[4], 0.0) # zero stderr + + def test_nist_norris(self): + x = [0.2, 337.4, 118.2, 884.6, 10.1, 226.5, 666.3, 996.3, 448.6, 777.0, + 558.2, 0.4, 0.6, 775.5, 666.9, 338.0, 447.5, 11.6, 556.0, 228.1, + 995.8, 887.6, 120.2, 0.3, 0.3, 556.8, 339.1, 887.2, 999.0, 779.0, + 11.1, 118.3, 229.2, 669.1, 448.9, 0.5] + + y = [0.1, 338.8, 118.1, 888.0, 9.2, 228.1, 668.5, 998.5, 449.1, 778.9, + 559.2, 0.3, 0.1, 778.1, 668.8, 339.3, 448.9, 10.8, 557.7, 228.3, + 998.0, 888.8, 119.6, 0.3, 0.6, 557.6, 339.3, 888.0, 998.5, 778.9, + 10.2, 117.6, 228.9, 668.4, 449.2, 0.2] + + # Expected values + exp_slope = 1.00211681802045 + exp_intercept = -0.262323073774029 + exp_rsquared = 0.999993745883712 + + actual = stats.linregress(x, y) + + assert_almost_equal(actual.slope, exp_slope) + assert_almost_equal(actual.intercept, exp_intercept) + assert_almost_equal(actual.rvalue**2, exp_rsquared) + + def test_empty_input(self): + assert_raises(ValueError, stats.linregress, [], []) + + def test_nan_input(self): + x = np.arange(10.) + x[9] = np.nan + + with np.errstate(invalid="ignore"): + assert_array_equal(stats.linregress(x, x), + (np.nan, np.nan, np.nan, np.nan, np.nan)) + + +def test_theilslopes(): + # Basic slope test. + slope, intercept, lower, upper = stats.theilslopes([0,1,1]) + assert_almost_equal(slope, 0.5) + assert_almost_equal(intercept, 0.5) + + # Test of confidence intervals. + x = [1, 2, 3, 4, 10, 12, 18] + y = [9, 15, 19, 20, 45, 55, 78] + slope, intercept, lower, upper = stats.theilslopes(y, x, 0.07) + assert_almost_equal(slope, 4) + assert_almost_equal(upper, 4.38, decimal=2) + assert_almost_equal(lower, 3.71, decimal=2) + + +def test_cumfreq(): + x = [1, 4, 2, 1, 3, 1] + cumfreqs, lowlim, binsize, extrapoints = stats.cumfreq(x, numbins=4) + assert_array_almost_equal(cumfreqs, np.array([3., 4., 5., 6.])) + cumfreqs, lowlim, binsize, extrapoints = stats.cumfreq(x, numbins=4, + defaultreallimits=(1.5, 5)) + assert_(extrapoints == 3) + + # test for namedtuple attribute results + attributes = ('cumcount', 'lowerlimit', 'binsize', 'extrapoints') + res = stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5)) + check_named_results(res, attributes) + + +def test_relfreq(): + a = np.array([1, 4, 2, 1, 3, 1]) + relfreqs, lowlim, binsize, extrapoints = stats.relfreq(a, numbins=4) + assert_array_almost_equal(relfreqs, + array([0.5, 0.16666667, 0.16666667, 0.16666667])) + + # test for namedtuple attribute results + attributes = ('frequency', 'lowerlimit', 'binsize', 'extrapoints') + res = stats.relfreq(a, numbins=4) + check_named_results(res, attributes) + + # check array_like input is accepted + relfreqs2, lowlim, binsize, extrapoints = stats.relfreq([1, 4, 2, 1, 3, 1], + numbins=4) + assert_array_almost_equal(relfreqs, relfreqs2) + + +class TestGMean(object): + + def test_1D_list(self): + a = (1,2,3,4) + actual = stats.gmean(a) + desired = power(1*2*3*4,1./4.) + assert_almost_equal(actual, desired,decimal=14) + + desired1 = stats.gmean(a,axis=-1) + assert_almost_equal(actual, desired1, decimal=14) + + def test_1D_array(self): + a = array((1,2,3,4), float32) + actual = stats.gmean(a) + desired = power(1*2*3*4,1./4.) + assert_almost_equal(actual, desired, decimal=7) + + desired1 = stats.gmean(a,axis=-1) + assert_almost_equal(actual, desired1, decimal=7) + + def test_2D_array_default(self): + a = array(((1,2,3,4), + (1,2,3,4), + (1,2,3,4))) + actual = stats.gmean(a) + desired = array((1,2,3,4)) + assert_array_almost_equal(actual, desired, decimal=14) + + desired1 = stats.gmean(a,axis=0) + assert_array_almost_equal(actual, desired1, decimal=14) + + def test_2D_array_dim1(self): + a = array(((1,2,3,4), + (1,2,3,4), + (1,2,3,4))) + actual = stats.gmean(a, axis=1) + v = power(1*2*3*4,1./4.) + desired = array((v,v,v)) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_large_values(self): + a = array([1e100, 1e200, 1e300]) + actual = stats.gmean(a) + assert_approx_equal(actual, 1e200, significant=13) + + +class TestHMean(object): + def test_1D_list(self): + a = (1,2,3,4) + actual = stats.hmean(a) + desired = 4. / (1./1 + 1./2 + 1./3 + 1./4) + assert_almost_equal(actual, desired, decimal=14) + + desired1 = stats.hmean(array(a),axis=-1) + assert_almost_equal(actual, desired1, decimal=14) + + def test_1D_array(self): + a = array((1,2,3,4), float64) + actual = stats.hmean(a) + desired = 4. / (1./1 + 1./2 + 1./3 + 1./4) + assert_almost_equal(actual, desired, decimal=14) + + desired1 = stats.hmean(a,axis=-1) + assert_almost_equal(actual, desired1, decimal=14) + + def test_2D_array_default(self): + a = array(((1,2,3,4), + (1,2,3,4), + (1,2,3,4))) + actual = stats.hmean(a) + desired = array((1.,2.,3.,4.)) + assert_array_almost_equal(actual, desired, decimal=14) + + actual1 = stats.hmean(a,axis=0) + assert_array_almost_equal(actual1, desired, decimal=14) + + def test_2D_array_dim1(self): + a = array(((1,2,3,4), + (1,2,3,4), + (1,2,3,4))) + + v = 4. / (1./1 + 1./2 + 1./3 + 1./4) + desired1 = array((v,v,v)) + actual1 = stats.hmean(a, axis=1) + assert_array_almost_equal(actual1, desired1, decimal=14) + + +class TestScoreatpercentile(object): + def setup_method(self): + self.a1 = [3, 4, 5, 10, -3, -5, 6] + self.a2 = [3, -6, -2, 8, 7, 4, 2, 1] + self.a3 = [3., 4, 5, 10, -3, -5, -6, 7.0] + + def test_basic(self): + x = arange(8) * 0.5 + assert_equal(stats.scoreatpercentile(x, 0), 0.) + assert_equal(stats.scoreatpercentile(x, 100), 3.5) + assert_equal(stats.scoreatpercentile(x, 50), 1.75) + + def test_fraction(self): + scoreatperc = stats.scoreatpercentile + + # Test defaults + assert_equal(scoreatperc(list(range(10)), 50), 4.5) + assert_equal(scoreatperc(list(range(10)), 50, (2,7)), 4.5) + assert_equal(scoreatperc(list(range(100)), 50, limit=(1, 8)), 4.5) + assert_equal(scoreatperc(np.array([1, 10,100]), 50, (10,100)), 55) + assert_equal(scoreatperc(np.array([1, 10,100]), 50, (1,10)), 5.5) + + # explicitly specify interpolation_method 'fraction' (the default) + assert_equal(scoreatperc(list(range(10)), 50, interpolation_method='fraction'), + 4.5) + assert_equal(scoreatperc(list(range(10)), 50, limit=(2, 7), + interpolation_method='fraction'), + 4.5) + assert_equal(scoreatperc(list(range(100)), 50, limit=(1, 8), + interpolation_method='fraction'), + 4.5) + assert_equal(scoreatperc(np.array([1, 10,100]), 50, (10, 100), + interpolation_method='fraction'), + 55) + assert_equal(scoreatperc(np.array([1, 10,100]), 50, (1,10), + interpolation_method='fraction'), + 5.5) + + def test_lower_higher(self): + scoreatperc = stats.scoreatpercentile + + # interpolation_method 'lower'/'higher' + assert_equal(scoreatperc(list(range(10)), 50, + interpolation_method='lower'), 4) + assert_equal(scoreatperc(list(range(10)), 50, + interpolation_method='higher'), 5) + assert_equal(scoreatperc(list(range(10)), 50, (2,7), + interpolation_method='lower'), 4) + assert_equal(scoreatperc(list(range(10)), 50, limit=(2,7), + interpolation_method='higher'), 5) + assert_equal(scoreatperc(list(range(100)), 50, (1,8), + interpolation_method='lower'), 4) + assert_equal(scoreatperc(list(range(100)), 50, (1,8), + interpolation_method='higher'), 5) + assert_equal(scoreatperc(np.array([1, 10, 100]), 50, (10, 100), + interpolation_method='lower'), 10) + assert_equal(scoreatperc(np.array([1, 10, 100]), 50, limit=(10, 100), + interpolation_method='higher'), 100) + assert_equal(scoreatperc(np.array([1, 10, 100]), 50, (1, 10), + interpolation_method='lower'), 1) + assert_equal(scoreatperc(np.array([1, 10, 100]), 50, limit=(1, 10), + interpolation_method='higher'), 10) + + def test_sequence_per(self): + x = arange(8) * 0.5 + expected = np.array([0, 3.5, 1.75]) + res = stats.scoreatpercentile(x, [0, 100, 50]) + assert_allclose(res, expected) + assert_(isinstance(res, np.ndarray)) + # Test with ndarray. Regression test for gh-2861 + assert_allclose(stats.scoreatpercentile(x, np.array([0, 100, 50])), + expected) + # Also test combination of 2-D array, axis not None and array-like per + res2 = stats.scoreatpercentile(np.arange(12).reshape((3,4)), + np.array([0, 1, 100, 100]), axis=1) + expected2 = array([[0, 4, 8], + [0.03, 4.03, 8.03], + [3, 7, 11], + [3, 7, 11]]) + assert_allclose(res2, expected2) + + def test_axis(self): + scoreatperc = stats.scoreatpercentile + x = arange(12).reshape(3, 4) + + assert_equal(scoreatperc(x, (25, 50, 100)), [2.75, 5.5, 11.0]) + + r0 = [[2, 3, 4, 5], [4, 5, 6, 7], [8, 9, 10, 11]] + assert_equal(scoreatperc(x, (25, 50, 100), axis=0), r0) + + r1 = [[0.75, 4.75, 8.75], [1.5, 5.5, 9.5], [3, 7, 11]] + assert_equal(scoreatperc(x, (25, 50, 100), axis=1), r1) + + x = array([[1, 1, 1], + [1, 1, 1], + [4, 4, 3], + [1, 1, 1], + [1, 1, 1]]) + score = stats.scoreatpercentile(x, 50) + assert_equal(score.shape, ()) + assert_equal(score, 1.0) + score = stats.scoreatpercentile(x, 50, axis=0) + assert_equal(score.shape, (3,)) + assert_equal(score, [1, 1, 1]) + + def test_exception(self): + assert_raises(ValueError, stats.scoreatpercentile, [1, 2], 56, + interpolation_method='foobar') + assert_raises(ValueError, stats.scoreatpercentile, [1], 101) + assert_raises(ValueError, stats.scoreatpercentile, [1], -1) + + def test_empty(self): + assert_equal(stats.scoreatpercentile([], 50), np.nan) + assert_equal(stats.scoreatpercentile(np.array([[], []]), 50), np.nan) + assert_equal(stats.scoreatpercentile([], [50, 99]), [np.nan, np.nan]) + + +class TestItemfreq(object): + a = [5, 7, 1, 2, 1, 5, 7] * 10 + b = [1, 2, 5, 7] + + def test_numeric_types(self): + # Check itemfreq works for all dtypes (adapted from np.unique tests) + def _check_itemfreq(dt): + a = np.array(self.a, dt) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning) + v = stats.itemfreq(a) + assert_array_equal(v[:, 0], [1, 2, 5, 7]) + assert_array_equal(v[:, 1], np.array([20, 10, 20, 20], dtype=dt)) + + dtypes = [np.int32, np.int64, np.float32, np.float64, + np.complex64, np.complex128] + for dt in dtypes: + _check_itemfreq(dt) + + def test_object_arrays(self): + a, b = self.a, self.b + dt = 'O' + aa = np.empty(len(a), dt) + aa[:] = a + bb = np.empty(len(b), dt) + bb[:] = b + with suppress_warnings() as sup: + sup.filter(DeprecationWarning) + v = stats.itemfreq(aa) + assert_array_equal(v[:, 0], bb) + + def test_structured_arrays(self): + a, b = self.a, self.b + dt = [('', 'i'), ('', 'i')] + aa = np.array(list(zip(a, a)), dt) + bb = np.array(list(zip(b, b)), dt) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning) + v = stats.itemfreq(aa) + # Arrays don't compare equal because v[:,0] is object array + assert_equal(tuple(v[2, 0]), tuple(bb[2])) + + +class TestMode(object): + def test_empty(self): + vals, counts = stats.mode([]) + assert_equal(vals, np.array([])) + assert_equal(counts, np.array([])) + + def test_scalar(self): + vals, counts = stats.mode(4.) + assert_equal(vals, np.array([4.])) + assert_equal(counts, np.array([1])) + + def test_basic(self): + data1 = [3, 5, 1, 10, 23, 3, 2, 6, 8, 6, 10, 6] + vals = stats.mode(data1) + assert_equal(vals[0][0], 6) + assert_equal(vals[1][0], 3) + + def test_axes(self): + data1 = [10, 10, 30, 40] + data2 = [10, 10, 10, 10] + data3 = [20, 10, 20, 20] + data4 = [30, 30, 30, 30] + data5 = [40, 30, 30, 30] + arr = np.array([data1, data2, data3, data4, data5]) + + vals = stats.mode(arr, axis=None) + assert_equal(vals[0], np.array([30])) + assert_equal(vals[1], np.array([8])) + + vals = stats.mode(arr, axis=0) + assert_equal(vals[0], np.array([[10, 10, 30, 30]])) + assert_equal(vals[1], np.array([[2, 3, 3, 2]])) + + vals = stats.mode(arr, axis=1) + assert_equal(vals[0], np.array([[10], [10], [20], [30], [30]])) + assert_equal(vals[1], np.array([[2], [4], [3], [4], [3]])) + + def test_strings(self): + data1 = ['rain', 'showers', 'showers'] + + with suppress_warnings() as sup: + r = sup.record(RuntimeWarning, ".*checked for nan values") + vals = stats.mode(data1) + assert_equal(len(r), 1) + + assert_equal(vals[0][0], 'showers') + assert_equal(vals[1][0], 2) + + def test_mixed_objects(self): + objects = [10, True, np.nan, 'hello', 10] + arr = np.empty((5,), dtype=object) + arr[:] = objects + with suppress_warnings() as sup: + r = sup.record(RuntimeWarning, ".*checked for nan values") + vals = stats.mode(arr) + assert_equal(len(r), 1) + assert_equal(vals[0][0], 10) + assert_equal(vals[1][0], 2) + + def test_objects(self): + # Python objects must be sortable (le + eq) and have ne defined + # for np.unique to work. hash is for set. + class Point(object): + def __init__(self, x): + self.x = x + + def __eq__(self, other): + return self.x == other.x + + def __ne__(self, other): + return self.x != other.x + + def __lt__(self, other): + return self.x < other.x + + def __hash__(self): + return hash(self.x) + + points = [Point(x) for x in [1, 2, 3, 4, 3, 2, 2, 2]] + arr = np.empty((8,), dtype=object) + arr[:] = points + assert_(len(set(points)) == 4) + assert_equal(np.unique(arr).shape, (4,)) + with suppress_warnings() as sup: + r = sup.record(RuntimeWarning, ".*checked for nan values") + vals = stats.mode(arr) + assert_equal(len(r), 1) + + assert_equal(vals[0][0], Point(2)) + assert_equal(vals[1][0], 4) + + def test_mode_result_attributes(self): + data1 = [3, 5, 1, 10, 23, 3, 2, 6, 8, 6, 10, 6] + data2 = [] + actual = stats.mode(data1) + attributes = ('mode', 'count') + check_named_results(actual, attributes) + actual2 = stats.mode(data2) + check_named_results(actual2, attributes) + + def test_mode_nan(self): + data1 = [3, np.nan, 5, 1, 10, 23, 3, 2, 6, 8, 6, 10, 6] + actual = stats.mode(data1) + assert_equal(actual, (6, 3)) + + actual = stats.mode(data1, nan_policy='omit') + assert_equal(actual, (6, 3)) + assert_raises(ValueError, stats.mode, data1, nan_policy='raise') + assert_raises(ValueError, stats.mode, data1, nan_policy='foobar') + + @pytest.mark.parametrize("data", [ + [3, 5, 1, 1, 3], + [3, np.nan, 5, 1, 1, 3], + [3, 5, 1], + [3, np.nan, 5, 1], + ]) + def test_smallest_equal(self, data): + result = stats.mode(data, nan_policy='omit') + assert_equal(result[0][0], 1) + + +class TestVariability(object): + + testcase = [1,2,3,4] + scalar_testcase = 4. + + def test_sem(self): + # This is not in R, so used: + # sqrt(var(testcase)*3/4)/sqrt(3) + + # y = stats.sem(self.shoes[0]) + # assert_approx_equal(y,0.775177399) + with suppress_warnings() as sup, np.errstate(invalid="ignore"): + sup.filter(RuntimeWarning, "Degrees of freedom <= 0 for slice") + y = stats.sem(self.scalar_testcase) + assert_(np.isnan(y)) + + y = stats.sem(self.testcase) + assert_approx_equal(y, 0.6454972244) + n = len(self.testcase) + assert_allclose(stats.sem(self.testcase, ddof=0) * np.sqrt(n/(n-2)), + stats.sem(self.testcase, ddof=2)) + + x = np.arange(10.) + x[9] = np.nan + assert_equal(stats.sem(x), np.nan) + assert_equal(stats.sem(x, nan_policy='omit'), 0.9128709291752769) + assert_raises(ValueError, stats.sem, x, nan_policy='raise') + assert_raises(ValueError, stats.sem, x, nan_policy='foobar') + + def test_zmap(self): + # not in R, so tested by using: + # (testcase[i] - mean(testcase, axis=0)) / sqrt(var(testcase) * 3/4) + y = stats.zmap(self.testcase,self.testcase) + desired = ([-1.3416407864999, -0.44721359549996, 0.44721359549996, 1.3416407864999]) + assert_array_almost_equal(desired,y,decimal=12) + + def test_zmap_axis(self): + # Test use of 'axis' keyword in zmap. + x = np.array([[0.0, 0.0, 1.0, 1.0], + [1.0, 1.0, 1.0, 2.0], + [2.0, 0.0, 2.0, 0.0]]) + + t1 = 1.0/np.sqrt(2.0/3) + t2 = np.sqrt(3.)/3 + t3 = np.sqrt(2.) + + z0 = stats.zmap(x, x, axis=0) + z1 = stats.zmap(x, x, axis=1) + + z0_expected = [[-t1, -t3/2, -t3/2, 0.0], + [0.0, t3, -t3/2, t1], + [t1, -t3/2, t3, -t1]] + z1_expected = [[-1.0, -1.0, 1.0, 1.0], + [-t2, -t2, -t2, np.sqrt(3.)], + [1.0, -1.0, 1.0, -1.0]] + + assert_array_almost_equal(z0, z0_expected) + assert_array_almost_equal(z1, z1_expected) + + def test_zmap_ddof(self): + # Test use of 'ddof' keyword in zmap. + x = np.array([[0.0, 0.0, 1.0, 1.0], + [0.0, 1.0, 2.0, 3.0]]) + + z = stats.zmap(x, x, axis=1, ddof=1) + + z0_expected = np.array([-0.5, -0.5, 0.5, 0.5])/(1.0/np.sqrt(3)) + z1_expected = np.array([-1.5, -0.5, 0.5, 1.5])/(np.sqrt(5./3)) + assert_array_almost_equal(z[0], z0_expected) + assert_array_almost_equal(z[1], z1_expected) + + def test_zscore(self): + # not in R, so tested by using: + # (testcase[i] - mean(testcase, axis=0)) / sqrt(var(testcase) * 3/4) + y = stats.zscore(self.testcase) + desired = ([-1.3416407864999, -0.44721359549996, 0.44721359549996, 1.3416407864999]) + assert_array_almost_equal(desired,y,decimal=12) + + def test_zscore_axis(self): + # Test use of 'axis' keyword in zscore. + x = np.array([[0.0, 0.0, 1.0, 1.0], + [1.0, 1.0, 1.0, 2.0], + [2.0, 0.0, 2.0, 0.0]]) + + t1 = 1.0/np.sqrt(2.0/3) + t2 = np.sqrt(3.)/3 + t3 = np.sqrt(2.) + + z0 = stats.zscore(x, axis=0) + z1 = stats.zscore(x, axis=1) + + z0_expected = [[-t1, -t3/2, -t3/2, 0.0], + [0.0, t3, -t3/2, t1], + [t1, -t3/2, t3, -t1]] + z1_expected = [[-1.0, -1.0, 1.0, 1.0], + [-t2, -t2, -t2, np.sqrt(3.)], + [1.0, -1.0, 1.0, -1.0]] + + assert_array_almost_equal(z0, z0_expected) + assert_array_almost_equal(z1, z1_expected) + + def test_zscore_ddof(self): + # Test use of 'ddof' keyword in zscore. + x = np.array([[0.0, 0.0, 1.0, 1.0], + [0.0, 1.0, 2.0, 3.0]]) + + z = stats.zscore(x, axis=1, ddof=1) + + z0_expected = np.array([-0.5, -0.5, 0.5, 0.5])/(1.0/np.sqrt(3)) + z1_expected = np.array([-1.5, -0.5, 0.5, 1.5])/(np.sqrt(5./3)) + assert_array_almost_equal(z[0], z0_expected) + assert_array_almost_equal(z[1], z1_expected) + + +class _numpy_version_warn_context_mgr(object): + """ + A simple context maneger class to avoid retyping the same code for + different versions of numpy when the only difference is that older + versions raise warnings. + + This manager does not apply for cases where the old code returns + different values. + """ + def __init__(self, min_numpy_version, warning_type, num_warnings): + if NumpyVersion(np.__version__) < min_numpy_version: + self.numpy_is_old = True + self.warning_type = warning_type + self.num_warnings = num_warnings + self.delegate = warnings.catch_warnings(record = True) + else: + self.numpy_is_old = False + + def __enter__(self): + if self.numpy_is_old: + self.warn_list = self.delegate.__enter__() + warnings.simplefilter("always") + return None + + def __exit__(self, exc_type, exc_value, traceback): + if self.numpy_is_old: + self.delegate.__exit__(exc_type, exc_value, traceback) + _check_warnings(self.warn_list, self.warning_type, self.num_warnings) + + +def _check_warnings(warn_list, expected_type, expected_len): + """ + Checks that all of the warnings from a list returned by + `warnings.catch_all(record=True)` are of the required type and that the list + contains expected number of warnings. + """ + assert_equal(len(warn_list), expected_len, "number of warnings") + for warn_ in warn_list: + assert_(warn_.category is expected_type) + + +class TestIQR(object): + + def test_basic(self): + x = np.arange(8) * 0.5 + np.random.shuffle(x) + assert_equal(stats.iqr(x), 1.75) + + def test_api(self): + d = np.ones((5, 5)) + stats.iqr(d) + stats.iqr(d, None) + stats.iqr(d, 1) + stats.iqr(d, (0, 1)) + stats.iqr(d, None, (10, 90)) + stats.iqr(d, None, (30, 20), 'raw') + stats.iqr(d, None, (25, 75), 1.5, 'propagate') + if NumpyVersion(np.__version__) >= '1.9.0a': + stats.iqr(d, None, (50, 50), 'normal', 'raise', 'linear') + stats.iqr(d, None, (25, 75), -0.4, 'omit', 'lower', True) + + def test_empty(self): + assert_equal(stats.iqr([]), np.nan) + assert_equal(stats.iqr(np.arange(0)), np.nan) + + def test_constant(self): + # Constant array always gives 0 + x = np.ones((7, 4)) + assert_equal(stats.iqr(x), 0.0) + assert_array_equal(stats.iqr(x, axis=0), np.zeros(4)) + assert_array_equal(stats.iqr(x, axis=1), np.zeros(7)) + # Even for older versions, 'linear' does not raise a warning + with _numpy_version_warn_context_mgr('1.9.0a', RuntimeWarning, 4): + assert_equal(stats.iqr(x, interpolation='linear'), 0.0) + assert_equal(stats.iqr(x, interpolation='midpoint'), 0.0) + assert_equal(stats.iqr(x, interpolation='nearest'), 0.0) + assert_equal(stats.iqr(x, interpolation='lower'), 0.0) + assert_equal(stats.iqr(x, interpolation='higher'), 0.0) + + # 0 only along constant dimensions + # This also tests much of `axis` + y = np.ones((4, 5, 6)) * np.arange(6) + assert_array_equal(stats.iqr(y, axis=0), np.zeros((5, 6))) + assert_array_equal(stats.iqr(y, axis=1), np.zeros((4, 6))) + assert_array_equal(stats.iqr(y, axis=2), 2.5 * np.ones((4, 5))) + assert_array_equal(stats.iqr(y, axis=(0, 1)), np.zeros(6)) + assert_array_equal(stats.iqr(y, axis=(0, 2)), 3. * np.ones(5)) + assert_array_equal(stats.iqr(y, axis=(1, 2)), 3. * np.ones(4)) + + def test_scalarlike(self): + x = np.arange(1) + 7.0 + assert_equal(stats.iqr(x[0]), 0.0) + assert_equal(stats.iqr(x), 0.0) + if NumpyVersion(np.__version__) >= '1.9.0a': + assert_array_equal(stats.iqr(x, keepdims=True), [0.0]) + else: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + assert_array_equal(stats.iqr(x, keepdims=True), 0.0) + _check_warnings(w, RuntimeWarning, 1) + + def test_2D(self): + x = np.arange(15).reshape((3, 5)) + assert_equal(stats.iqr(x), 7.0) + assert_array_equal(stats.iqr(x, axis=0), 5. * np.ones(5)) + assert_array_equal(stats.iqr(x, axis=1), 2. * np.ones(3)) + assert_array_equal(stats.iqr(x, axis=(0, 1)), 7.0) + assert_array_equal(stats.iqr(x, axis=(1, 0)), 7.0) + + def test_axis(self): + # The `axis` keyword is also put through its paces in `test_keepdims`. + o = np.random.normal(size=(71, 23)) + x = np.dstack([o] * 10) # x.shape = (71, 23, 10) + q = stats.iqr(o) + + assert_equal(stats.iqr(x, axis=(0, 1)), q) + x = np.rollaxis(x, -1, 0) # x.shape = (10, 71, 23) + assert_equal(stats.iqr(x, axis=(2, 1)), q) + x = x.swapaxes(0, 1) # x.shape = (71, 10, 23) + assert_equal(stats.iqr(x, axis=(0, 2)), q) + x = x.swapaxes(0, 1) # x.shape = (10, 71, 23) + + assert_equal(stats.iqr(x, axis=(0, 1, 2)), + stats.iqr(x, axis=None)) + assert_equal(stats.iqr(x, axis=(0,)), + stats.iqr(x, axis=0)) + + d = np.arange(3 * 5 * 7 * 11) + # Older versions of numpy only shuffle along axis=0. + # Not sure about newer, don't care. + np.random.shuffle(d) + d = d.reshape((3, 5, 7, 11)) + assert_equal(stats.iqr(d, axis=(0, 1, 2))[0], + stats.iqr(d[:,:,:, 0].ravel())) + assert_equal(stats.iqr(d, axis=(0, 1, 3))[1], + stats.iqr(d[:,:, 1,:].ravel())) + assert_equal(stats.iqr(d, axis=(3, 1, -4))[2], + stats.iqr(d[:,:, 2,:].ravel())) + assert_equal(stats.iqr(d, axis=(3, 1, 2))[2], + stats.iqr(d[2,:,:,:].ravel())) + assert_equal(stats.iqr(d, axis=(3, 2))[2, 1], + stats.iqr(d[2, 1,:,:].ravel())) + assert_equal(stats.iqr(d, axis=(1, -2))[2, 1], + stats.iqr(d[2, :, :, 1].ravel())) + assert_equal(stats.iqr(d, axis=(1, 3))[2, 2], + stats.iqr(d[2, :, 2,:].ravel())) + + if NumpyVersion(np.__version__) >= '1.9.0a': + assert_raises(IndexError, stats.iqr, d, axis=4) + else: + assert_raises(ValueError, stats.iqr, d, axis=4) + assert_raises(ValueError, stats.iqr, d, axis=(0, 0)) + + def test_rng(self): + x = np.arange(5) + assert_equal(stats.iqr(x), 2) + assert_equal(stats.iqr(x, rng=(25, 87.5)), 2.5) + assert_equal(stats.iqr(x, rng=(12.5, 75)), 2.5) + assert_almost_equal(stats.iqr(x, rng=(10, 50)), 1.6) # 3-1.4 + + assert_raises(ValueError, stats.iqr, x, rng=(0, 101)) + assert_raises(ValueError, stats.iqr, x, rng=(np.nan, 25)) + assert_raises(TypeError, stats.iqr, x, rng=(0, 50, 60)) + + def test_interpolation(self): + x = np.arange(5) + y = np.arange(4) + # Default + assert_equal(stats.iqr(x), 2) + assert_equal(stats.iqr(y), 1.5) + if NumpyVersion(np.__version__) >= '1.9.0a': + # Linear + assert_equal(stats.iqr(x, interpolation='linear'), 2) + assert_equal(stats.iqr(y, interpolation='linear'), 1.5) + # Higher + assert_equal(stats.iqr(x, interpolation='higher'), 2) + assert_equal(stats.iqr(x, rng=(25, 80), interpolation='higher'), 3) + assert_equal(stats.iqr(y, interpolation='higher'), 2) + # Lower (will generally, but not always be the same as higher) + assert_equal(stats.iqr(x, interpolation='lower'), 2) + assert_equal(stats.iqr(x, rng=(25, 80), interpolation='lower'), 2) + assert_equal(stats.iqr(y, interpolation='lower'), 2) + # Nearest + assert_equal(stats.iqr(x, interpolation='nearest'), 2) + assert_equal(stats.iqr(y, interpolation='nearest'), 1) + # Midpoint + if NumpyVersion(np.__version__) >= '1.11.0a': + assert_equal(stats.iqr(x, interpolation='midpoint'), 2) + assert_equal(stats.iqr(x, rng=(25, 80), interpolation='midpoint'), 2.5) + assert_equal(stats.iqr(y, interpolation='midpoint'), 2) + else: + # midpoint did not work correctly before numpy 1.11.0 + assert_equal(stats.iqr(x, interpolation='midpoint'), 2) + assert_equal(stats.iqr(x, rng=(25, 80), interpolation='midpoint'), 2) + assert_equal(stats.iqr(y, interpolation='midpoint'), 2) + else: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + # Linear + assert_equal(stats.iqr(x, interpolation='linear'), 2) + assert_equal(stats.iqr(y, interpolation='linear'), 1.5) + # Higher + assert_equal(stats.iqr(x, interpolation='higher'), 2) + assert_almost_equal(stats.iqr(x, rng=(25, 80), interpolation='higher'), 2.2) + assert_equal(stats.iqr(y, interpolation='higher'), 1.5) + # Lower + assert_equal(stats.iqr(x, interpolation='lower'), 2) + assert_almost_equal(stats.iqr(x, rng=(25, 80), interpolation='lower'), 2.2) + assert_equal(stats.iqr(y, interpolation='lower'), 1.5) + # Nearest + assert_equal(stats.iqr(x, interpolation='nearest'), 2) + assert_equal(stats.iqr(y, interpolation='nearest'), 1.5) + # Midpoint + assert_equal(stats.iqr(x, interpolation='midpoint'), 2) + assert_almost_equal(stats.iqr(x, rng=(25, 80), interpolation='midpoint'), 2.2) + assert_equal(stats.iqr(y, interpolation='midpoint'), 1.5) + _check_warnings(w, RuntimeWarning, 11) + + if NumpyVersion(np.__version__) >= '1.9.0a': + assert_raises(ValueError, stats.iqr, x, interpolation='foobar') + else: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + assert_equal(stats.iqr(x, interpolation='foobar'), 2) + _check_warnings(w, RuntimeWarning, 1) + + def test_keepdims(self): + numpy_version = NumpyVersion(np.__version__) + + # Also tests most of `axis` + x = np.ones((3, 5, 7, 11)) + assert_equal(stats.iqr(x, axis=None, keepdims=False).shape, ()) + assert_equal(stats.iqr(x, axis=2, keepdims=False).shape, (3, 5, 11)) + assert_equal(stats.iqr(x, axis=(0, 1), keepdims=False).shape, (7, 11)) + assert_equal(stats.iqr(x, axis=(0, 3), keepdims=False).shape, (5, 7)) + assert_equal(stats.iqr(x, axis=(1,), keepdims=False).shape, (3, 7, 11)) + assert_equal(stats.iqr(x, (0, 1, 2, 3), keepdims=False).shape, ()) + assert_equal(stats.iqr(x, axis=(0, 1, 3), keepdims=False).shape, (7,)) + + if numpy_version >= '1.9.0a': + assert_equal(stats.iqr(x, axis=None, keepdims=True).shape, (1, 1, 1, 1)) + assert_equal(stats.iqr(x, axis=2, keepdims=True).shape, (3, 5, 1, 11)) + assert_equal(stats.iqr(x, axis=(0, 1), keepdims=True).shape, (1, 1, 7, 11)) + assert_equal(stats.iqr(x, axis=(0, 3), keepdims=True).shape, (1, 5, 7, 1)) + assert_equal(stats.iqr(x, axis=(1,), keepdims=True).shape, (3, 1, 7, 11)) + assert_equal(stats.iqr(x, (0, 1, 2, 3), keepdims=True).shape, (1, 1, 1, 1)) + assert_equal(stats.iqr(x, axis=(0, 1, 3), keepdims=True).shape, (1, 1, 7, 1)) + else: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + assert_equal(stats.iqr(x, axis=None, keepdims=True).shape, ()) + assert_equal(stats.iqr(x, axis=2, keepdims=True).shape, (3, 5, 11)) + assert_equal(stats.iqr(x, axis=(0, 1), keepdims=True).shape, (7, 11)) + assert_equal(stats.iqr(x, axis=(0, 3), keepdims=True).shape, (5, 7)) + assert_equal(stats.iqr(x, axis=(1,), keepdims=True).shape, (3, 7, 11)) + assert_equal(stats.iqr(x, (0, 1, 2, 3), keepdims=True).shape, ()) + assert_equal(stats.iqr(x, axis=(0, 1, 3), keepdims=True).shape, (7,)) + _check_warnings(w, RuntimeWarning, 7) + + def test_nanpolicy(self): + numpy_version = NumpyVersion(np.__version__) + x = np.arange(15.0).reshape((3, 5)) + + # No NaNs + assert_equal(stats.iqr(x, nan_policy='propagate'), 7) + assert_equal(stats.iqr(x, nan_policy='omit'), 7) + assert_equal(stats.iqr(x, nan_policy='raise'), 7) + + # Yes NaNs + x[1, 2] = np.nan + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + if numpy_version < '1.10.0a': + # Fails over to mishmash of omit/propagate, but mostly omit + # The first case showcases the "incorrect" behavior of np.percentile + assert_equal(stats.iqr(x, nan_policy='propagate'), 8) + assert_equal(stats.iqr(x, axis=0, nan_policy='propagate'), [5, 5, np.nan, 5, 5]) + if numpy_version < '1.9.0a': + assert_equal(stats.iqr(x, axis=1, nan_policy='propagate'), [2, 3, 2]) + else: + # some fixes to percentile nan handling in 1.9 + assert_equal(stats.iqr(x, axis=1, nan_policy='propagate'), [2, np.nan, 2]) + _check_warnings(w, RuntimeWarning, 3) + else: + assert_equal(stats.iqr(x, nan_policy='propagate'), np.nan) + assert_equal(stats.iqr(x, axis=0, nan_policy='propagate'), [5, 5, np.nan, 5, 5]) + assert_equal(stats.iqr(x, axis=1, nan_policy='propagate'), [2, np.nan, 2]) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + if numpy_version < '1.9.0a': + # Fails over to mishmash of omit/propagate, but mostly omit + assert_equal(stats.iqr(x, nan_policy='omit'), 8) + assert_equal(stats.iqr(x, axis=0, nan_policy='omit'), [5, 5, np.nan, 5, 5]) + assert_equal(stats.iqr(x, axis=1, nan_policy='omit'), [2, 3, 2]) + _check_warnings(w, RuntimeWarning, 3) + else: + assert_equal(stats.iqr(x, nan_policy='omit'), 7.5) + assert_equal(stats.iqr(x, axis=0, nan_policy='omit'), 5 * np.ones(5)) + assert_equal(stats.iqr(x, axis=1, nan_policy='omit'), [2, 2.5, 2]) + + assert_raises(ValueError, stats.iqr, x, nan_policy='raise') + assert_raises(ValueError, stats.iqr, x, axis=0, nan_policy='raise') + assert_raises(ValueError, stats.iqr, x, axis=1, nan_policy='raise') + + # Bad policy + assert_raises(ValueError, stats.iqr, x, nan_policy='barfood') + + def test_scale(self): + numpy_version = NumpyVersion(np.__version__) + x = np.arange(15.0).reshape((3, 5)) + + # No NaNs + assert_equal(stats.iqr(x, scale='raw'), 7) + assert_almost_equal(stats.iqr(x, scale='normal'), 7 / 1.3489795) + assert_equal(stats.iqr(x, scale=2.0), 3.5) + + # Yes NaNs + x[1, 2] = np.nan + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + if numpy_version < '1.10.0a': + # Fails over to mishmash of omit/propagate, but mostly omit + assert_equal(stats.iqr(x, scale='raw', nan_policy='propagate'), 8) + assert_almost_equal(stats.iqr(x, scale='normal', + nan_policy='propagate'), + 8 / 1.3489795) + assert_equal(stats.iqr(x, scale=2.0, nan_policy='propagate'), 4) + # axis=1 chosen to show behavior with both nans and without + if numpy_version < '1.9.0a': + assert_equal(stats.iqr(x, axis=1, nan_policy='propagate'), [2, 3, 2]) + assert_almost_equal(stats.iqr(x, axis=1, scale='normal', + nan_policy='propagate'), + np.array([2, 3, 2]) / 1.3489795) + assert_equal(stats.iqr(x, axis=1, scale=2.0, + nan_policy='propagate'), [1, 1.5, 1]) + else: + # some fixes to percentile nan handling in 1.9 + assert_equal(stats.iqr(x, axis=1, nan_policy='propagate'), [2, np.nan, 2]) + assert_almost_equal(stats.iqr(x, axis=1, scale='normal', + nan_policy='propagate'), + np.array([2, np.nan, 2]) / 1.3489795) + assert_equal(stats.iqr(x, axis=1, scale=2.0, + nan_policy='propagate'), [1, np.nan, 1]) + _check_warnings(w, RuntimeWarning, 6) + else: + assert_equal(stats.iqr(x, scale='raw', nan_policy='propagate'), np.nan) + assert_equal(stats.iqr(x, scale='normal', nan_policy='propagate'), np.nan) + assert_equal(stats.iqr(x, scale=2.0, nan_policy='propagate'), np.nan) + # axis=1 chosen to show behavior with both nans and without + assert_equal(stats.iqr(x, axis=1, scale='raw', + nan_policy='propagate'), [2, np.nan, 2]) + assert_almost_equal(stats.iqr(x, axis=1, scale='normal', + nan_policy='propagate'), + np.array([2, np.nan, 2]) / 1.3489795) + assert_equal(stats.iqr(x, axis=1, scale=2.0, nan_policy='propagate'), + [1, np.nan, 1]) + _check_warnings(w, RuntimeWarning, 6) + + if numpy_version < '1.9.0a': + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + # Fails over to mishmash of omit/propagate, but mostly omit + assert_equal(stats.iqr(x, scale='raw', nan_policy='omit'), 8) + assert_almost_equal(stats.iqr(x, scale='normal', nan_policy='omit'), + 8 / 1.3489795) + assert_equal(stats.iqr(x, scale=2.0, nan_policy='omit'), 4) + _check_warnings(w, RuntimeWarning, 3) + else: + assert_equal(stats.iqr(x, scale='raw', nan_policy='omit'), 7.5) + assert_almost_equal(stats.iqr(x, scale='normal', nan_policy='omit'), + 7.5 / 1.3489795) + assert_equal(stats.iqr(x, scale=2.0, nan_policy='omit'), 3.75) + + # Bad scale + assert_raises(ValueError, stats.iqr, x, scale='foobar') + + +class TestMoments(object): + """ + Comparison numbers are found using R v.1.5.1 + note that length(testcase) = 4 + testmathworks comes from documentation for the + Statistics Toolbox for Matlab and can be found at both + https://www.mathworks.com/help/stats/kurtosis.html + https://www.mathworks.com/help/stats/skewness.html + Note that both test cases came from here. + """ + testcase = [1,2,3,4] + scalar_testcase = 4. + np.random.seed(1234) + testcase_moment_accuracy = np.random.rand(42) + testmathworks = [1.165, 0.6268, 0.0751, 0.3516, -0.6965] + + def test_moment(self): + # mean((testcase-mean(testcase))**power,axis=0),axis=0))**power)) + y = stats.moment(self.scalar_testcase) + assert_approx_equal(y, 0.0) + y = stats.moment(self.testcase, 0) + assert_approx_equal(y, 1.0) + y = stats.moment(self.testcase, 1) + assert_approx_equal(y, 0.0, 10) + y = stats.moment(self.testcase, 2) + assert_approx_equal(y, 1.25) + y = stats.moment(self.testcase, 3) + assert_approx_equal(y, 0.0) + y = stats.moment(self.testcase, 4) + assert_approx_equal(y, 2.5625) + + # check array_like input for moment + y = stats.moment(self.testcase, [1, 2, 3, 4]) + assert_allclose(y, [0, 1.25, 0, 2.5625]) + + # check moment input consists only of integers + y = stats.moment(self.testcase, 0.0) + assert_approx_equal(y, 1.0) + assert_raises(ValueError, stats.moment, self.testcase, 1.2) + y = stats.moment(self.testcase, [1.0, 2, 3, 4.0]) + assert_allclose(y, [0, 1.25, 0, 2.5625]) + + # test empty input + y = stats.moment([]) + assert_equal(y, np.nan) + + x = np.arange(10.) + x[9] = np.nan + assert_equal(stats.moment(x, 2), np.nan) + assert_almost_equal(stats.moment(x, nan_policy='omit'), 0.0) + assert_raises(ValueError, stats.moment, x, nan_policy='raise') + assert_raises(ValueError, stats.moment, x, nan_policy='foobar') + + def test_moment_propagate_nan(self): + # Check that the shape of the result is the same for inputs + # with and without nans, cf gh-5817 + a = np.arange(8).reshape(2, -1).astype(float) + a[1, 0] = np.nan + mm = stats.moment(a, 2, axis=1, nan_policy="propagate") + np.testing.assert_allclose(mm, [1.25, np.nan], atol=1e-15) + + def test_variation(self): + # variation = samplestd / mean + y = stats.variation(self.scalar_testcase) + assert_approx_equal(y, 0.0) + y = stats.variation(self.testcase) + assert_approx_equal(y, 0.44721359549996, 10) + + x = np.arange(10.) + x[9] = np.nan + assert_equal(stats.variation(x), np.nan) + assert_almost_equal(stats.variation(x, nan_policy='omit'), + 0.6454972243679028) + assert_raises(ValueError, stats.variation, x, nan_policy='raise') + assert_raises(ValueError, stats.variation, x, nan_policy='foobar') + + def test_variation_propagate_nan(self): + # Check that the shape of the result is the same for inputs + # with and without nans, cf gh-5817 + a = np.arange(8).reshape(2, -1).astype(float) + a[1, 0] = np.nan + vv = stats.variation(a, axis=1, nan_policy="propagate") + np.testing.assert_allclose(vv, [0.7453559924999299, np.nan], atol=1e-15) + + def test_skewness(self): + # Scalar test case + y = stats.skew(self.scalar_testcase) + assert_approx_equal(y, 0.0) + # sum((testmathworks-mean(testmathworks,axis=0))**3,axis=0) / + # ((sqrt(var(testmathworks)*4/5))**3)/5 + y = stats.skew(self.testmathworks) + assert_approx_equal(y, -0.29322304336607, 10) + y = stats.skew(self.testmathworks, bias=0) + assert_approx_equal(y, -0.437111105023940, 10) + y = stats.skew(self.testcase) + assert_approx_equal(y, 0.0, 10) + + x = np.arange(10.) + x[9] = np.nan + with np.errstate(invalid='ignore'): + assert_equal(stats.skew(x), np.nan) + assert_equal(stats.skew(x, nan_policy='omit'), 0.) + assert_raises(ValueError, stats.skew, x, nan_policy='raise') + assert_raises(ValueError, stats.skew, x, nan_policy='foobar') + + def test_skewness_scalar(self): + # `skew` must return a scalar for 1-dim input + assert_equal(stats.skew(arange(10)), 0.0) + + def test_skew_propagate_nan(self): + # Check that the shape of the result is the same for inputs + # with and without nans, cf gh-5817 + a = np.arange(8).reshape(2, -1).astype(float) + a[1, 0] = np.nan + with np.errstate(invalid='ignore'): + s = stats.skew(a, axis=1, nan_policy="propagate") + np.testing.assert_allclose(s, [0, np.nan], atol=1e-15) + + def test_kurtosis(self): + # Scalar test case + y = stats.kurtosis(self.scalar_testcase) + assert_approx_equal(y, -3.0) + # sum((testcase-mean(testcase,axis=0))**4,axis=0)/((sqrt(var(testcase)*3/4))**4)/4 + # sum((test2-mean(testmathworks,axis=0))**4,axis=0)/((sqrt(var(testmathworks)*4/5))**4)/5 + # Set flags for axis = 0 and + # fisher=0 (Pearson's defn of kurtosis for compatibility with Matlab) + y = stats.kurtosis(self.testmathworks, 0, fisher=0, bias=1) + assert_approx_equal(y, 2.1658856802973, 10) + + # Note that MATLAB has confusing docs for the following case + # kurtosis(x,0) gives an unbiased estimate of Pearson's skewness + # kurtosis(x) gives a biased estimate of Fisher's skewness (Pearson-3) + # The MATLAB docs imply that both should give Fisher's + y = stats.kurtosis(self.testmathworks, fisher=0, bias=0) + assert_approx_equal(y, 3.663542721189047, 10) + y = stats.kurtosis(self.testcase, 0, 0) + assert_approx_equal(y, 1.64) + + x = np.arange(10.) + x[9] = np.nan + assert_equal(stats.kurtosis(x), np.nan) + assert_almost_equal(stats.kurtosis(x, nan_policy='omit'), -1.230000) + assert_raises(ValueError, stats.kurtosis, x, nan_policy='raise') + assert_raises(ValueError, stats.kurtosis, x, nan_policy='foobar') + + def test_kurtosis_array_scalar(self): + assert_equal(type(stats.kurtosis([1,2,3])), float) + + def test_kurtosis_propagate_nan(self): + # Check that the shape of the result is the same for inputs + # with and without nans, cf gh-5817 + a = np.arange(8).reshape(2, -1).astype(float) + a[1, 0] = np.nan + k = stats.kurtosis(a, axis=1, nan_policy="propagate") + np.testing.assert_allclose(k, [-1.36, np.nan], atol=1e-15) + + def test_moment_accuracy(self): + # 'moment' must have a small enough error compared to the slower + # but very accurate numpy.power() implementation. + tc_no_mean = self.testcase_moment_accuracy - \ + np.mean(self.testcase_moment_accuracy) + assert_allclose(np.power(tc_no_mean, 42).mean(), + stats.moment(self.testcase_moment_accuracy, 42)) + + +class TestStudentTest(object): + X1 = np.array([-1, 0, 1]) + X2 = np.array([0, 1, 2]) + T1_0 = 0 + P1_0 = 1 + T1_1 = -1.732051 + P1_1 = 0.2254033 + T1_2 = -3.464102 + P1_2 = 0.0741799 + T2_0 = 1.732051 + P2_0 = 0.2254033 + + def test_onesample(self): + with suppress_warnings() as sup, np.errstate(invalid="ignore"): + sup.filter(RuntimeWarning, "Degrees of freedom <= 0 for slice") + t, p = stats.ttest_1samp(4., 3.) + assert_(np.isnan(t)) + assert_(np.isnan(p)) + + t, p = stats.ttest_1samp(self.X1, 0) + + assert_array_almost_equal(t, self.T1_0) + assert_array_almost_equal(p, self.P1_0) + + res = stats.ttest_1samp(self.X1, 0) + attributes = ('statistic', 'pvalue') + check_named_results(res, attributes) + + t, p = stats.ttest_1samp(self.X2, 0) + + assert_array_almost_equal(t, self.T2_0) + assert_array_almost_equal(p, self.P2_0) + + t, p = stats.ttest_1samp(self.X1, 1) + + assert_array_almost_equal(t, self.T1_1) + assert_array_almost_equal(p, self.P1_1) + + t, p = stats.ttest_1samp(self.X1, 2) + + assert_array_almost_equal(t, self.T1_2) + assert_array_almost_equal(p, self.P1_2) + + # check nan policy + np.random.seed(7654567) + x = stats.norm.rvs(loc=5, scale=10, size=51) + x[50] = np.nan + with np.errstate(invalid="ignore"): + assert_array_equal(stats.ttest_1samp(x, 5.0), (np.nan, np.nan)) + + assert_array_almost_equal(stats.ttest_1samp(x, 5.0, nan_policy='omit'), + (-1.6412624074367159, 0.107147027334048005)) + assert_raises(ValueError, stats.ttest_1samp, x, 5.0, nan_policy='raise') + assert_raises(ValueError, stats.ttest_1samp, x, 5.0, + nan_policy='foobar') + + +def test_percentileofscore(): + pcos = stats.percentileofscore + + assert_equal(pcos([1,2,3,4,5,6,7,8,9,10],4), 40.0) + + for (kind, result) in [('mean', 35.0), + ('strict', 30.0), + ('weak', 40.0)]: + assert_equal(pcos(np.arange(10) + 1, 4, kind=kind), result) + + # multiple - 2 + for (kind, result) in [('rank', 45.0), + ('strict', 30.0), + ('weak', 50.0), + ('mean', 40.0)]: + assert_equal(pcos([1,2,3,4,4,5,6,7,8,9], 4, kind=kind), result) + + # multiple - 3 + assert_equal(pcos([1,2,3,4,4,4,5,6,7,8], 4), 50.0) + for (kind, result) in [('rank', 50.0), + ('mean', 45.0), + ('strict', 30.0), + ('weak', 60.0)]: + + assert_equal(pcos([1,2,3,4,4,4,5,6,7,8], 4, kind=kind), result) + + # missing + for kind in ('rank', 'mean', 'strict', 'weak'): + assert_equal(pcos([1,2,3,5,6,7,8,9,10,11], 4, kind=kind), 30) + + # larger numbers + for (kind, result) in [('mean', 35.0), + ('strict', 30.0), + ('weak', 40.0)]: + assert_equal( + pcos([10, 20, 30, 40, 50, 60, 70, 80, 90, 100], 40, + kind=kind), result) + + for (kind, result) in [('mean', 45.0), + ('strict', 30.0), + ('weak', 60.0)]: + assert_equal( + pcos([10, 20, 30, 40, 40, 40, 50, 60, 70, 80], + 40, kind=kind), result) + + for kind in ('rank', 'mean', 'strict', 'weak'): + assert_equal( + pcos([10, 20, 30, 50, 60, 70, 80, 90, 100, 110], + 40, kind=kind), 30.0) + + # boundaries + for (kind, result) in [('rank', 10.0), + ('mean', 5.0), + ('strict', 0.0), + ('weak', 10.0)]: + assert_equal( + pcos([10, 20, 30, 50, 60, 70, 80, 90, 100, 110], + 10, kind=kind), result) + + for (kind, result) in [('rank', 100.0), + ('mean', 95.0), + ('strict', 90.0), + ('weak', 100.0)]: + assert_equal( + pcos([10, 20, 30, 50, 60, 70, 80, 90, 100, 110], + 110, kind=kind), result) + + # out of bounds + for (kind, score, result) in [('rank', 200, 100.0), + ('mean', 200, 100.0), + ('mean', 0, 0.0)]: + assert_equal( + pcos([10, 20, 30, 50, 60, 70, 80, 90, 100, 110], + score, kind=kind), result) + + assert_raises(ValueError, pcos, [1, 2, 3, 3, 4], 3, kind='unrecognized') + + +PowerDivCase = namedtuple('Case', ['f_obs', 'f_exp', 'ddof', 'axis', + 'chi2', # Pearson's + 'log', # G-test (log-likelihood) + 'mod_log', # Modified log-likelihood + 'cr', # Cressie-Read (lambda=2/3) + ]) + +# The details of the first two elements in power_div_1d_cases are used +# in a test in TestPowerDivergence. Check that code before making +# any changes here. +power_div_1d_cases = [ + # Use the default f_exp. + PowerDivCase(f_obs=[4, 8, 12, 8], f_exp=None, ddof=0, axis=None, + chi2=4, + log=2*(4*np.log(4/8) + 12*np.log(12/8)), + mod_log=2*(8*np.log(8/4) + 8*np.log(8/12)), + cr=(4*((4/8)**(2/3) - 1) + 12*((12/8)**(2/3) - 1))/(5/9)), + # Give a non-uniform f_exp. + PowerDivCase(f_obs=[4, 8, 12, 8], f_exp=[2, 16, 12, 2], ddof=0, axis=None, + chi2=24, + log=2*(4*np.log(4/2) + 8*np.log(8/16) + 8*np.log(8/2)), + mod_log=2*(2*np.log(2/4) + 16*np.log(16/8) + 2*np.log(2/8)), + cr=(4*((4/2)**(2/3) - 1) + 8*((8/16)**(2/3) - 1) + + 8*((8/2)**(2/3) - 1))/(5/9)), + # f_exp is a scalar. + PowerDivCase(f_obs=[4, 8, 12, 8], f_exp=8, ddof=0, axis=None, + chi2=4, + log=2*(4*np.log(4/8) + 12*np.log(12/8)), + mod_log=2*(8*np.log(8/4) + 8*np.log(8/12)), + cr=(4*((4/8)**(2/3) - 1) + 12*((12/8)**(2/3) - 1))/(5/9)), + # f_exp equal to f_obs. + PowerDivCase(f_obs=[3, 5, 7, 9], f_exp=[3, 5, 7, 9], ddof=0, axis=0, + chi2=0, log=0, mod_log=0, cr=0), +] + + +power_div_empty_cases = [ + # Shape is (0,)--a data set with length 0. The computed + # test statistic should be 0. + PowerDivCase(f_obs=[], + f_exp=None, ddof=0, axis=0, + chi2=0, log=0, mod_log=0, cr=0), + # Shape is (0, 3). This is 3 data sets, but each data set has + # length 0, so the computed test statistic should be [0, 0, 0]. + PowerDivCase(f_obs=np.array([[],[],[]]).T, + f_exp=None, ddof=0, axis=0, + chi2=[0, 0, 0], + log=[0, 0, 0], + mod_log=[0, 0, 0], + cr=[0, 0, 0]), + # Shape is (3, 0). This represents an empty collection of + # data sets in which each data set has length 3. The test + # statistic should be an empty array. + PowerDivCase(f_obs=np.array([[],[],[]]), + f_exp=None, ddof=0, axis=0, + chi2=[], + log=[], + mod_log=[], + cr=[]), +] + + +class TestPowerDivergence(object): + + def check_power_divergence(self, f_obs, f_exp, ddof, axis, lambda_, + expected_stat): + f_obs = np.asarray(f_obs) + if axis is None: + num_obs = f_obs.size + else: + b = np.broadcast(f_obs, f_exp) + num_obs = b.shape[axis] + + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "Mean of empty slice") + stat, p = stats.power_divergence( + f_obs=f_obs, f_exp=f_exp, ddof=ddof, + axis=axis, lambda_=lambda_) + assert_allclose(stat, expected_stat) + + if lambda_ == 1 or lambda_ == "pearson": + # Also test stats.chisquare. + stat, p = stats.chisquare(f_obs=f_obs, f_exp=f_exp, ddof=ddof, + axis=axis) + assert_allclose(stat, expected_stat) + + ddof = np.asarray(ddof) + expected_p = stats.distributions.chi2.sf(expected_stat, + num_obs - 1 - ddof) + assert_allclose(p, expected_p) + + def test_basic(self): + for case in power_div_1d_cases: + self.check_power_divergence( + case.f_obs, case.f_exp, case.ddof, case.axis, + None, case.chi2) + self.check_power_divergence( + case.f_obs, case.f_exp, case.ddof, case.axis, + "pearson", case.chi2) + self.check_power_divergence( + case.f_obs, case.f_exp, case.ddof, case.axis, + 1, case.chi2) + self.check_power_divergence( + case.f_obs, case.f_exp, case.ddof, case.axis, + "log-likelihood", case.log) + self.check_power_divergence( + case.f_obs, case.f_exp, case.ddof, case.axis, + "mod-log-likelihood", case.mod_log) + self.check_power_divergence( + case.f_obs, case.f_exp, case.ddof, case.axis, + "cressie-read", case.cr) + self.check_power_divergence( + case.f_obs, case.f_exp, case.ddof, case.axis, + 2/3, case.cr) + + def test_basic_masked(self): + for case in power_div_1d_cases: + mobs = np.ma.array(case.f_obs) + self.check_power_divergence( + mobs, case.f_exp, case.ddof, case.axis, + None, case.chi2) + self.check_power_divergence( + mobs, case.f_exp, case.ddof, case.axis, + "pearson", case.chi2) + self.check_power_divergence( + mobs, case.f_exp, case.ddof, case.axis, + 1, case.chi2) + self.check_power_divergence( + mobs, case.f_exp, case.ddof, case.axis, + "log-likelihood", case.log) + self.check_power_divergence( + mobs, case.f_exp, case.ddof, case.axis, + "mod-log-likelihood", case.mod_log) + self.check_power_divergence( + mobs, case.f_exp, case.ddof, case.axis, + "cressie-read", case.cr) + self.check_power_divergence( + mobs, case.f_exp, case.ddof, case.axis, + 2/3, case.cr) + + def test_axis(self): + case0 = power_div_1d_cases[0] + case1 = power_div_1d_cases[1] + f_obs = np.vstack((case0.f_obs, case1.f_obs)) + f_exp = np.vstack((np.ones_like(case0.f_obs)*np.mean(case0.f_obs), + case1.f_exp)) + # Check the four computational code paths in power_divergence + # using a 2D array with axis=1. + self.check_power_divergence( + f_obs, f_exp, 0, 1, + "pearson", [case0.chi2, case1.chi2]) + self.check_power_divergence( + f_obs, f_exp, 0, 1, + "log-likelihood", [case0.log, case1.log]) + self.check_power_divergence( + f_obs, f_exp, 0, 1, + "mod-log-likelihood", [case0.mod_log, case1.mod_log]) + self.check_power_divergence( + f_obs, f_exp, 0, 1, + "cressie-read", [case0.cr, case1.cr]) + # Reshape case0.f_obs to shape (2,2), and use axis=None. + # The result should be the same. + self.check_power_divergence( + np.array(case0.f_obs).reshape(2, 2), None, 0, None, + "pearson", case0.chi2) + + def test_ddof_broadcasting(self): + # Test that ddof broadcasts correctly. + # ddof does not affect the test statistic. It is broadcast + # with the computed test statistic for the computation of + # the p value. + + case0 = power_div_1d_cases[0] + case1 = power_div_1d_cases[1] + # Create 4x2 arrays of observed and expected frequencies. + f_obs = np.vstack((case0.f_obs, case1.f_obs)).T + f_exp = np.vstack((np.ones_like(case0.f_obs)*np.mean(case0.f_obs), + case1.f_exp)).T + + expected_chi2 = [case0.chi2, case1.chi2] + + # ddof has shape (2, 1). This is broadcast with the computed + # statistic, so p will have shape (2,2). + ddof = np.array([[0], [1]]) + + stat, p = stats.power_divergence(f_obs, f_exp, ddof=ddof) + assert_allclose(stat, expected_chi2) + + # Compute the p values separately, passing in scalars for ddof. + stat0, p0 = stats.power_divergence(f_obs, f_exp, ddof=ddof[0,0]) + stat1, p1 = stats.power_divergence(f_obs, f_exp, ddof=ddof[1,0]) + + assert_array_equal(p, np.vstack((p0, p1))) + + def test_empty_cases(self): + with warnings.catch_warnings(): + for case in power_div_empty_cases: + self.check_power_divergence( + case.f_obs, case.f_exp, case.ddof, case.axis, + "pearson", case.chi2) + self.check_power_divergence( + case.f_obs, case.f_exp, case.ddof, case.axis, + "log-likelihood", case.log) + self.check_power_divergence( + case.f_obs, case.f_exp, case.ddof, case.axis, + "mod-log-likelihood", case.mod_log) + self.check_power_divergence( + case.f_obs, case.f_exp, case.ddof, case.axis, + "cressie-read", case.cr) + + def test_power_divergence_result_attributes(self): + f_obs = power_div_1d_cases[0].f_obs + f_exp = power_div_1d_cases[0].f_exp + ddof = power_div_1d_cases[0].ddof + axis = power_div_1d_cases[0].axis + + res = stats.power_divergence(f_obs=f_obs, f_exp=f_exp, ddof=ddof, + axis=axis, lambda_="pearson") + attributes = ('statistic', 'pvalue') + check_named_results(res, attributes) + + +def test_chisquare_masked_arrays(): + # Test masked arrays. + obs = np.array([[8, 8, 16, 32, -1], [-1, -1, 3, 4, 5]]).T + mask = np.array([[0, 0, 0, 0, 1], [1, 1, 0, 0, 0]]).T + mobs = np.ma.masked_array(obs, mask) + expected_chisq = np.array([24.0, 0.5]) + expected_g = np.array([2*(2*8*np.log(0.5) + 32*np.log(2.0)), + 2*(3*np.log(0.75) + 5*np.log(1.25))]) + + chi2 = stats.distributions.chi2 + + chisq, p = stats.chisquare(mobs) + mat.assert_array_equal(chisq, expected_chisq) + mat.assert_array_almost_equal(p, chi2.sf(expected_chisq, + mobs.count(axis=0) - 1)) + + g, p = stats.power_divergence(mobs, lambda_='log-likelihood') + mat.assert_array_almost_equal(g, expected_g, decimal=15) + mat.assert_array_almost_equal(p, chi2.sf(expected_g, + mobs.count(axis=0) - 1)) + + chisq, p = stats.chisquare(mobs.T, axis=1) + mat.assert_array_equal(chisq, expected_chisq) + mat.assert_array_almost_equal(p, chi2.sf(expected_chisq, + mobs.T.count(axis=1) - 1)) + g, p = stats.power_divergence(mobs.T, axis=1, lambda_="log-likelihood") + mat.assert_array_almost_equal(g, expected_g, decimal=15) + mat.assert_array_almost_equal(p, chi2.sf(expected_g, + mobs.count(axis=0) - 1)) + + obs1 = np.ma.array([3, 5, 6, 99, 10], mask=[0, 0, 0, 1, 0]) + exp1 = np.ma.array([2, 4, 8, 10, 99], mask=[0, 0, 0, 0, 1]) + chi2, p = stats.chisquare(obs1, f_exp=exp1) + # Because of the mask at index 3 of obs1 and at index 4 of exp1, + # only the first three elements are included in the calculation + # of the statistic. + mat.assert_array_equal(chi2, 1/2 + 1/4 + 4/8) + + # When axis=None, the two values should have type np.float64. + chisq, p = stats.chisquare(np.ma.array([1,2,3]), axis=None) + assert_(isinstance(chisq, np.float64)) + assert_(isinstance(p, np.float64)) + assert_equal(chisq, 1.0) + assert_almost_equal(p, stats.distributions.chi2.sf(1.0, 2)) + + # Empty arrays: + # A data set with length 0 returns a masked scalar. + with np.errstate(invalid='ignore'): + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "Mean of empty slice") + chisq, p = stats.chisquare(np.ma.array([])) + assert_(isinstance(chisq, np.ma.MaskedArray)) + assert_equal(chisq.shape, ()) + assert_(chisq.mask) + + empty3 = np.ma.array([[],[],[]]) + + # empty3 is a collection of 0 data sets (whose lengths would be 3, if + # there were any), so the return value is an array with length 0. + chisq, p = stats.chisquare(empty3) + assert_(isinstance(chisq, np.ma.MaskedArray)) + mat.assert_array_equal(chisq, []) + + # empty3.T is an array containing 3 data sets, each with length 0, + # so an array of size (3,) is returned, with all values masked. + with np.errstate(invalid='ignore'): + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "Mean of empty slice") + chisq, p = stats.chisquare(empty3.T) + + assert_(isinstance(chisq, np.ma.MaskedArray)) + assert_equal(chisq.shape, (3,)) + assert_(np.all(chisq.mask)) + + +def test_power_divergence_against_cressie_read_data(): + # Test stats.power_divergence against tables 4 and 5 from + # Cressie and Read, "Multimonial Goodness-of-Fit Tests", + # J. R. Statist. Soc. B (1984), Vol 46, No. 3, pp. 440-464. + # This tests the calculation for several values of lambda. + + # `table4` holds just the second and third columns from Table 4. + table4 = np.array([ + # observed, expected, + 15, 15.171, + 11, 13.952, + 14, 12.831, + 17, 11.800, + 5, 10.852, + 11, 9.9796, + 10, 9.1777, + 4, 8.4402, + 8, 7.7620, + 10, 7.1383, + 7, 6.5647, + 9, 6.0371, + 11, 5.5520, + 3, 5.1059, + 6, 4.6956, + 1, 4.3183, + 1, 3.9713, + 4, 3.6522, + ]).reshape(-1, 2) + table5 = np.array([ + # lambda, statistic + -10.0, 72.2e3, + -5.0, 28.9e1, + -3.0, 65.6, + -2.0, 40.6, + -1.5, 34.0, + -1.0, 29.5, + -0.5, 26.5, + 0.0, 24.6, + 0.5, 23.4, + 0.67, 23.1, + 1.0, 22.7, + 1.5, 22.6, + 2.0, 22.9, + 3.0, 24.8, + 5.0, 35.5, + 10.0, 21.4e1, + ]).reshape(-1, 2) + + for lambda_, expected_stat in table5: + stat, p = stats.power_divergence(table4[:,0], table4[:,1], + lambda_=lambda_) + assert_allclose(stat, expected_stat, rtol=5e-3) + + +def test_friedmanchisquare(): + # see ticket:113 + # verified with matlab and R + # From Demsar "Statistical Comparisons of Classifiers over Multiple Data Sets" + # 2006, Xf=9.28 (no tie handling, tie corrected Xf >=9.28) + x1 = [array([0.763, 0.599, 0.954, 0.628, 0.882, 0.936, 0.661, 0.583, + 0.775, 1.0, 0.94, 0.619, 0.972, 0.957]), + array([0.768, 0.591, 0.971, 0.661, 0.888, 0.931, 0.668, 0.583, + 0.838, 1.0, 0.962, 0.666, 0.981, 0.978]), + array([0.771, 0.590, 0.968, 0.654, 0.886, 0.916, 0.609, 0.563, + 0.866, 1.0, 0.965, 0.614, 0.9751, 0.946]), + array([0.798, 0.569, 0.967, 0.657, 0.898, 0.931, 0.685, 0.625, + 0.875, 1.0, 0.962, 0.669, 0.975, 0.970])] + + # From "Bioestadistica para las ciencias de la salud" Xf=18.95 p<0.001: + x2 = [array([4,3,5,3,5,3,2,5,4,4,4,3]), + array([2,2,1,2,3,1,2,3,2,1,1,3]), + array([2,4,3,3,4,3,3,4,4,1,2,1]), + array([3,5,4,3,4,4,3,3,3,4,4,4])] + + # From Jerrorl H. Zar, "Biostatistical Analysis"(example 12.6), Xf=10.68, 0.005 < p < 0.01: + # Probability from this example is inexact using Chisquare approximation of Friedman Chisquare. + x3 = [array([7.0,9.9,8.5,5.1,10.3]), + array([5.3,5.7,4.7,3.5,7.7]), + array([4.9,7.6,5.5,2.8,8.4]), + array([8.8,8.9,8.1,3.3,9.1])] + + assert_array_almost_equal(stats.friedmanchisquare(x1[0],x1[1],x1[2],x1[3]), + (10.2283464566929, 0.0167215803284414)) + assert_array_almost_equal(stats.friedmanchisquare(x2[0],x2[1],x2[2],x2[3]), + (18.9428571428571, 0.000280938375189499)) + assert_array_almost_equal(stats.friedmanchisquare(x3[0],x3[1],x3[2],x3[3]), + (10.68, 0.0135882729582176)) + assert_raises(ValueError, stats.friedmanchisquare,x3[0],x3[1]) + + # test for namedtuple attribute results + attributes = ('statistic', 'pvalue') + res = stats.friedmanchisquare(*x1) + check_named_results(res, attributes) + + # test using mstats + assert_array_almost_equal(mstats.friedmanchisquare(x1[0], x1[1], + x1[2], x1[3]), + (10.2283464566929, 0.0167215803284414)) + # the following fails + # assert_array_almost_equal(mstats.friedmanchisquare(x2[0],x2[1],x2[2],x2[3]), + # (18.9428571428571, 0.000280938375189499)) + assert_array_almost_equal(mstats.friedmanchisquare(x3[0], x3[1], + x3[2], x3[3]), + (10.68, 0.0135882729582176)) + assert_raises(ValueError, mstats.friedmanchisquare,x3[0],x3[1]) + + +def test_kstest(): + # from numpy.testing import assert_almost_equal + + # comparing with values from R + x = np.linspace(-1,1,9) + D,p = stats.kstest(x,'norm') + assert_almost_equal(D, 0.15865525393145705, 12) + assert_almost_equal(p, 0.95164069201518386, 1) + + x = np.linspace(-15,15,9) + D,p = stats.kstest(x,'norm') + assert_almost_equal(D, 0.44435602715924361, 15) + assert_almost_equal(p, 0.038850140086788665, 8) + + # test for namedtuple attribute results + attributes = ('statistic', 'pvalue') + res = stats.kstest(x, 'norm') + check_named_results(res, attributes) + + # the following tests rely on deterministicaly replicated rvs + np.random.seed(987654321) + x = stats.norm.rvs(loc=0.2, size=100) + D,p = stats.kstest(x, 'norm', mode='asymp') + assert_almost_equal(D, 0.12464329735846891, 15) + assert_almost_equal(p, 0.089444888711820769, 15) + assert_almost_equal(np.array(stats.kstest(x, 'norm', mode='asymp')), + np.array((0.12464329735846891, 0.089444888711820769)), 15) + assert_almost_equal(np.array(stats.kstest(x,'norm', alternative='less')), + np.array((0.12464329735846891, 0.040989164077641749)), 15) + # this 'greater' test fails with precision of decimal=14 + assert_almost_equal(np.array(stats.kstest(x,'norm', alternative='greater')), + np.array((0.0072115233216310994, 0.98531158590396228)), 12) + + # missing: no test that uses *args + + +def test_ks_2samp(): + # exact small sample solution + data1 = np.array([1.0,2.0]) + data2 = np.array([1.0,2.0,3.0]) + assert_almost_equal(np.array(stats.ks_2samp(data1+0.01,data2)), + np.array((0.33333333333333337, 0.99062316386915694))) + assert_almost_equal(np.array(stats.ks_2samp(data1-0.01,data2)), + np.array((0.66666666666666674, 0.42490954988801982))) + # these can also be verified graphically + assert_almost_equal( + np.array(stats.ks_2samp(np.linspace(1,100,100), + np.linspace(1,100,100)+2+0.1)), + np.array((0.030000000000000027, 0.99999999996005062))) + assert_almost_equal( + np.array(stats.ks_2samp(np.linspace(1,100,100), + np.linspace(1,100,100)+2-0.1)), + np.array((0.020000000000000018, 0.99999999999999933))) + # these are just regression tests + assert_almost_equal( + np.array(stats.ks_2samp(np.linspace(1,100,100), + np.linspace(1,100,110)+20.1)), + np.array((0.21090909090909091, 0.015880386730710221))) + assert_almost_equal( + np.array(stats.ks_2samp(np.linspace(1,100,100), + np.linspace(1,100,110)+20-0.1)), + np.array((0.20818181818181825, 0.017981441789762638))) + + # test for namedtuple attribute results + attributes = ('statistic', 'pvalue') + res = stats.ks_2samp(data1 - 0.01, data2) + check_named_results(res, attributes) + + +def test_ttest_rel(): + # regression test + tr,pr = 0.81248591389165692, 0.41846234511362157 + tpr = ([tr,-tr],[pr,pr]) + + rvs1 = np.linspace(1,100,100) + rvs2 = np.linspace(1.01,99.989,100) + rvs1_2D = np.array([np.linspace(1,100,100), np.linspace(1.01,99.989,100)]) + rvs2_2D = np.array([np.linspace(1.01,99.989,100), np.linspace(1,100,100)]) + + t,p = stats.ttest_rel(rvs1, rvs2, axis=0) + assert_array_almost_equal([t,p],(tr,pr)) + t,p = stats.ttest_rel(rvs1_2D.T, rvs2_2D.T, axis=0) + assert_array_almost_equal([t,p],tpr) + t,p = stats.ttest_rel(rvs1_2D, rvs2_2D, axis=1) + assert_array_almost_equal([t,p],tpr) + + # test scalars + with suppress_warnings() as sup, np.errstate(invalid="ignore"): + sup.filter(RuntimeWarning, "Degrees of freedom <= 0 for slice") + t, p = stats.ttest_rel(4., 3.) + assert_(np.isnan(t)) + assert_(np.isnan(p)) + + # test for namedtuple attribute results + attributes = ('statistic', 'pvalue') + res = stats.ttest_rel(rvs1, rvs2, axis=0) + check_named_results(res, attributes) + + # test on 3 dimensions + rvs1_3D = np.dstack([rvs1_2D,rvs1_2D,rvs1_2D]) + rvs2_3D = np.dstack([rvs2_2D,rvs2_2D,rvs2_2D]) + t,p = stats.ttest_rel(rvs1_3D, rvs2_3D, axis=1) + assert_array_almost_equal(np.abs(t), tr) + assert_array_almost_equal(np.abs(p), pr) + assert_equal(t.shape, (2, 3)) + + t,p = stats.ttest_rel(np.rollaxis(rvs1_3D,2), np.rollaxis(rvs2_3D,2), axis=2) + assert_array_almost_equal(np.abs(t), tr) + assert_array_almost_equal(np.abs(p), pr) + assert_equal(t.shape, (3, 2)) + + # check nan policy + np.random.seed(12345678) + x = stats.norm.rvs(loc=5, scale=10, size=501) + x[500] = np.nan + y = (stats.norm.rvs(loc=5, scale=10, size=501) + + stats.norm.rvs(scale=0.2, size=501)) + y[500] = np.nan + + with np.errstate(invalid="ignore"): + assert_array_equal(stats.ttest_rel(x, x), (np.nan, np.nan)) + + assert_array_almost_equal(stats.ttest_rel(x, y, nan_policy='omit'), + (0.25299925303978066, 0.8003729814201519)) + assert_raises(ValueError, stats.ttest_rel, x, y, nan_policy='raise') + assert_raises(ValueError, stats.ttest_rel, x, y, nan_policy='foobar') + + # test zero division problem + t, p = stats.ttest_rel([0, 0, 0], [1, 1, 1]) + assert_equal((np.abs(t), p), (np.inf, 0)) + with np.errstate(invalid="ignore"): + assert_equal(stats.ttest_rel([0, 0, 0], [0, 0, 0]), (np.nan, np.nan)) + + # check that nan in input array result in nan output + anan = np.array([[1, np.nan], [-1, 1]]) + assert_equal(stats.ttest_rel(anan, np.zeros((2, 2))), + ([0, np.nan], [1, np.nan])) + + # test incorrect input shape raise an error + x = np.arange(24) + assert_raises(ValueError, stats.ttest_rel, x.reshape((8, 3)), + x.reshape((2, 3, 4))) + + +def test_ttest_rel_nan_2nd_arg(): + # regression test for gh-6134: nans in the second arg were not handled + x = [np.nan, 2.0, 3.0, 4.0] + y = [1.0, 2.0, 1.0, 2.0] + + r1 = stats.ttest_rel(x, y, nan_policy='omit') + r2 = stats.ttest_rel(y, x, nan_policy='omit') + assert_allclose(r2.statistic, -r1.statistic, atol=1e-15) + assert_allclose(r2.pvalue, r1.pvalue, atol=1e-15) + + # NB: arguments are paired when NaNs are dropped + r3 = stats.ttest_rel(y[1:], x[1:]) + assert_allclose(r2, r3, atol=1e-15) + + # .. and this is consistent with R. R code: + # x = c(NA, 2.0, 3.0, 4.0) + # y = c(1.0, 2.0, 1.0, 2.0) + # t.test(x, y, paired=TRUE) + assert_allclose(r2, (-2, 0.1835), atol=1e-4) + + +def _desc_stats(x1, x2, axis=0): + def _stats(x, axis=0): + x = np.asarray(x) + mu = np.mean(x, axis=axis) + std = np.std(x, axis=axis, ddof=1) + nobs = x.shape[axis] + return mu, std, nobs + return _stats(x1, axis) + _stats(x2, axis) + + +def test_ttest_ind(): + # regression test + tr = 1.0912746897927283 + pr = 0.27647818616351882 + tpr = ([tr,-tr],[pr,pr]) + + rvs2 = np.linspace(1,100,100) + rvs1 = np.linspace(5,105,100) + rvs1_2D = np.array([rvs1, rvs2]) + rvs2_2D = np.array([rvs2, rvs1]) + + t,p = stats.ttest_ind(rvs1, rvs2, axis=0) + assert_array_almost_equal([t,p],(tr,pr)) + # test from_stats API + assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(rvs1, + rvs2)), + [t, p]) + t,p = stats.ttest_ind(rvs1_2D.T, rvs2_2D.T, axis=0) + assert_array_almost_equal([t,p],tpr) + args = _desc_stats(rvs1_2D.T, rvs2_2D.T) + assert_array_almost_equal(stats.ttest_ind_from_stats(*args), + [t, p]) + t,p = stats.ttest_ind(rvs1_2D, rvs2_2D, axis=1) + assert_array_almost_equal([t,p],tpr) + args = _desc_stats(rvs1_2D, rvs2_2D, axis=1) + assert_array_almost_equal(stats.ttest_ind_from_stats(*args), + [t, p]) + + # test scalars + with suppress_warnings() as sup, np.errstate(invalid="ignore"): + sup.filter(RuntimeWarning, "Degrees of freedom <= 0 for slice") + t, p = stats.ttest_ind(4., 3.) + assert_(np.isnan(t)) + assert_(np.isnan(p)) + + # test on 3 dimensions + rvs1_3D = np.dstack([rvs1_2D,rvs1_2D,rvs1_2D]) + rvs2_3D = np.dstack([rvs2_2D,rvs2_2D,rvs2_2D]) + t,p = stats.ttest_ind(rvs1_3D, rvs2_3D, axis=1) + assert_almost_equal(np.abs(t), np.abs(tr)) + assert_array_almost_equal(np.abs(p), pr) + assert_equal(t.shape, (2, 3)) + + t,p = stats.ttest_ind(np.rollaxis(rvs1_3D,2), np.rollaxis(rvs2_3D,2), axis=2) + assert_array_almost_equal(np.abs(t), np.abs(tr)) + assert_array_almost_equal(np.abs(p), pr) + assert_equal(t.shape, (3, 2)) + + # check nan policy + np.random.seed(12345678) + x = stats.norm.rvs(loc=5, scale=10, size=501) + x[500] = np.nan + y = stats.norm.rvs(loc=5, scale=10, size=500) + + with np.errstate(invalid="ignore"): + assert_array_equal(stats.ttest_ind(x, y), (np.nan, np.nan)) + + assert_array_almost_equal(stats.ttest_ind(x, y, nan_policy='omit'), + (0.24779670949091914, 0.80434267337517906)) + assert_raises(ValueError, stats.ttest_ind, x, y, nan_policy='raise') + assert_raises(ValueError, stats.ttest_ind, x, y, nan_policy='foobar') + + # test zero division problem + t, p = stats.ttest_ind([0, 0, 0], [1, 1, 1]) + assert_equal((np.abs(t), p), (np.inf, 0)) + + with np.errstate(invalid="ignore"): + assert_equal(stats.ttest_ind([0, 0, 0], [0, 0, 0]), (np.nan, np.nan)) + + # check that nan in input array result in nan output + anan = np.array([[1, np.nan], [-1, 1]]) + assert_equal(stats.ttest_ind(anan, np.zeros((2, 2))), + ([0, np.nan], [1, np.nan])) + + +def test_ttest_ind_with_uneq_var(): + # check vs. R + a = (1, 2, 3) + b = (1.1, 2.9, 4.2) + pr = 0.53619490753126731 + tr = -0.68649512735572582 + t, p = stats.ttest_ind(a, b, equal_var=False) + assert_array_almost_equal([t,p], [tr, pr]) + # test from desc stats API + assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(a, b), + equal_var=False), + [t, p]) + + a = (1, 2, 3, 4) + pr = 0.84354139131608286 + tr = -0.2108663315950719 + t, p = stats.ttest_ind(a, b, equal_var=False) + assert_array_almost_equal([t,p], [tr, pr]) + assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(a, b), + equal_var=False), + [t, p]) + + # regression test + tr = 1.0912746897927283 + tr_uneq_n = 0.66745638708050492 + pr = 0.27647831993021388 + pr_uneq_n = 0.50873585065616544 + tpr = ([tr,-tr],[pr,pr]) + + rvs3 = np.linspace(1,100, 25) + rvs2 = np.linspace(1,100,100) + rvs1 = np.linspace(5,105,100) + rvs1_2D = np.array([rvs1, rvs2]) + rvs2_2D = np.array([rvs2, rvs1]) + + t,p = stats.ttest_ind(rvs1, rvs2, axis=0, equal_var=False) + assert_array_almost_equal([t,p],(tr,pr)) + assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(rvs1, + rvs2), + equal_var=False), + (t, p)) + + t,p = stats.ttest_ind(rvs1, rvs3, axis=0, equal_var=False) + assert_array_almost_equal([t,p], (tr_uneq_n, pr_uneq_n)) + assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(rvs1, + rvs3), + equal_var=False), + (t, p)) + + t,p = stats.ttest_ind(rvs1_2D.T, rvs2_2D.T, axis=0, equal_var=False) + assert_array_almost_equal([t,p],tpr) + args = _desc_stats(rvs1_2D.T, rvs2_2D.T) + assert_array_almost_equal(stats.ttest_ind_from_stats(*args, + equal_var=False), + (t, p)) + + t,p = stats.ttest_ind(rvs1_2D, rvs2_2D, axis=1, equal_var=False) + assert_array_almost_equal([t,p],tpr) + args = _desc_stats(rvs1_2D, rvs2_2D, axis=1) + assert_array_almost_equal(stats.ttest_ind_from_stats(*args, + equal_var=False), + (t, p)) + + # test for namedtuple attribute results + attributes = ('statistic', 'pvalue') + res = stats.ttest_ind(rvs1, rvs2, axis=0, equal_var=False) + check_named_results(res, attributes) + + # test on 3 dimensions + rvs1_3D = np.dstack([rvs1_2D,rvs1_2D,rvs1_2D]) + rvs2_3D = np.dstack([rvs2_2D,rvs2_2D,rvs2_2D]) + t,p = stats.ttest_ind(rvs1_3D, rvs2_3D, axis=1, equal_var=False) + assert_almost_equal(np.abs(t), np.abs(tr)) + assert_array_almost_equal(np.abs(p), pr) + assert_equal(t.shape, (2, 3)) + args = _desc_stats(rvs1_3D, rvs2_3D, axis=1) + t, p = stats.ttest_ind_from_stats(*args, equal_var=False) + assert_almost_equal(np.abs(t), np.abs(tr)) + assert_array_almost_equal(np.abs(p), pr) + assert_equal(t.shape, (2, 3)) + + t,p = stats.ttest_ind(np.rollaxis(rvs1_3D,2), np.rollaxis(rvs2_3D,2), + axis=2, equal_var=False) + assert_array_almost_equal(np.abs(t), np.abs(tr)) + assert_array_almost_equal(np.abs(p), pr) + assert_equal(t.shape, (3, 2)) + args = _desc_stats(np.rollaxis(rvs1_3D, 2), + np.rollaxis(rvs2_3D, 2), axis=2) + t, p = stats.ttest_ind_from_stats(*args, equal_var=False) + assert_array_almost_equal(np.abs(t), np.abs(tr)) + assert_array_almost_equal(np.abs(p), pr) + assert_equal(t.shape, (3, 2)) + + # test zero division problem + t, p = stats.ttest_ind([0, 0, 0], [1, 1, 1], equal_var=False) + assert_equal((np.abs(t), p), (np.inf, 0)) + with np.errstate(all='ignore'): + assert_equal(stats.ttest_ind([0, 0, 0], [0, 0, 0], equal_var=False), + (np.nan, np.nan)) + + # check that nan in input array result in nan output + anan = np.array([[1, np.nan], [-1, 1]]) + assert_equal(stats.ttest_ind(anan, np.zeros((2, 2)), equal_var=False), + ([0, np.nan], [1, np.nan])) + + +def test_ttest_ind_nan_2nd_arg(): + # regression test for gh-6134: nans in the second arg were not handled + x = [np.nan, 2.0, 3.0, 4.0] + y = [1.0, 2.0, 1.0, 2.0] + + r1 = stats.ttest_ind(x, y, nan_policy='omit') + r2 = stats.ttest_ind(y, x, nan_policy='omit') + assert_allclose(r2.statistic, -r1.statistic, atol=1e-15) + assert_allclose(r2.pvalue, r1.pvalue, atol=1e-15) + + # NB: arguments are not paired when NaNs are dropped + r3 = stats.ttest_ind(y, x[1:]) + assert_allclose(r2, r3, atol=1e-15) + + # .. and this is consistent with R. R code: + # x = c(NA, 2.0, 3.0, 4.0) + # y = c(1.0, 2.0, 1.0, 2.0) + # t.test(x, y, var.equal=TRUE) + assert_allclose(r2, (-2.5354627641855498, 0.052181400457057901), atol=1e-15) + + +def test_gh5686(): + mean1, mean2 = np.array([1, 2]), np.array([3, 4]) + std1, std2 = np.array([5, 3]), np.array([4, 5]) + nobs1, nobs2 = np.array([130, 140]), np.array([100, 150]) + # This will raise a TypeError unless gh-5686 is fixed. + stats.ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2) + + +def test_ttest_1samp_new(): + n1, n2, n3 = (10,15,20) + rvn1 = stats.norm.rvs(loc=5,scale=10,size=(n1,n2,n3)) + + # check multidimensional array and correct axis handling + # deterministic rvn1 and rvn2 would be better as in test_ttest_rel + t1,p1 = stats.ttest_1samp(rvn1[:,:,:], np.ones((n2,n3)),axis=0) + t2,p2 = stats.ttest_1samp(rvn1[:,:,:], 1,axis=0) + t3,p3 = stats.ttest_1samp(rvn1[:,0,0], 1) + assert_array_almost_equal(t1,t2, decimal=14) + assert_almost_equal(t1[0,0],t3, decimal=14) + assert_equal(t1.shape, (n2,n3)) + + t1,p1 = stats.ttest_1samp(rvn1[:,:,:], np.ones((n1,n3)),axis=1) + t2,p2 = stats.ttest_1samp(rvn1[:,:,:], 1,axis=1) + t3,p3 = stats.ttest_1samp(rvn1[0,:,0], 1) + assert_array_almost_equal(t1,t2, decimal=14) + assert_almost_equal(t1[0,0],t3, decimal=14) + assert_equal(t1.shape, (n1,n3)) + + t1,p1 = stats.ttest_1samp(rvn1[:,:,:], np.ones((n1,n2)),axis=2) + t2,p2 = stats.ttest_1samp(rvn1[:,:,:], 1,axis=2) + t3,p3 = stats.ttest_1samp(rvn1[0,0,:], 1) + assert_array_almost_equal(t1,t2, decimal=14) + assert_almost_equal(t1[0,0],t3, decimal=14) + assert_equal(t1.shape, (n1,n2)) + + # test zero division problem + t, p = stats.ttest_1samp([0, 0, 0], 1) + assert_equal((np.abs(t), p), (np.inf, 0)) + + with np.errstate(all='ignore'): + assert_equal(stats.ttest_1samp([0, 0, 0], 0), (np.nan, np.nan)) + + # check that nan in input array result in nan output + anan = np.array([[1, np.nan],[-1, 1]]) + assert_equal(stats.ttest_1samp(anan, 0), ([0, np.nan], [1, np.nan])) + + +class TestDescribe(object): + def test_describe_scalar(self): + with suppress_warnings() as sup, np.errstate(invalid="ignore"): + sup.filter(RuntimeWarning, "Degrees of freedom <= 0 for slice") + n, mm, m, v, sk, kurt = stats.describe(4.) + assert_equal(n, 1) + assert_equal(mm, (4.0, 4.0)) + assert_equal(m, 4.0) + assert_(np.isnan(v)) + assert_array_almost_equal(sk, 0.0, decimal=13) + assert_array_almost_equal(kurt, -3.0, decimal=13) + + def test_describe_numbers(self): + x = np.vstack((np.ones((3,4)), 2 * np.ones((2,4)))) + nc, mmc = (5, ([1., 1., 1., 1.], [2., 2., 2., 2.])) + mc = np.array([1.4, 1.4, 1.4, 1.4]) + vc = np.array([0.3, 0.3, 0.3, 0.3]) + skc = [0.40824829046386357] * 4 + kurtc = [-1.833333333333333] * 4 + n, mm, m, v, sk, kurt = stats.describe(x) + assert_equal(n, nc) + assert_equal(mm, mmc) + assert_equal(m, mc) + assert_equal(v, vc) + assert_array_almost_equal(sk, skc, decimal=13) + assert_array_almost_equal(kurt, kurtc, decimal=13) + n, mm, m, v, sk, kurt = stats.describe(x.T, axis=1) + assert_equal(n, nc) + assert_equal(mm, mmc) + assert_equal(m, mc) + assert_equal(v, vc) + assert_array_almost_equal(sk, skc, decimal=13) + assert_array_almost_equal(kurt, kurtc, decimal=13) + + x = np.arange(10.) + x[9] = np.nan + + nc, mmc = (9, (0.0, 8.0)) + mc = 4.0 + vc = 7.5 + skc = 0.0 + kurtc = -1.2300000000000002 + n, mm, m, v, sk, kurt = stats.describe(x, nan_policy='omit') + assert_equal(n, nc) + assert_equal(mm, mmc) + assert_equal(m, mc) + assert_equal(v, vc) + assert_array_almost_equal(sk, skc) + assert_array_almost_equal(kurt, kurtc, decimal=13) + + assert_raises(ValueError, stats.describe, x, nan_policy='raise') + assert_raises(ValueError, stats.describe, x, nan_policy='foobar') + + def test_describe_result_attributes(self): + actual = stats.describe(np.arange(5)) + attributes = ('nobs', 'minmax', 'mean', 'variance', 'skewness', + 'kurtosis') + check_named_results(actual, attributes) + + def test_describe_ddof(self): + x = np.vstack((np.ones((3, 4)), 2 * np.ones((2, 4)))) + nc, mmc = (5, ([1., 1., 1., 1.], [2., 2., 2., 2.])) + mc = np.array([1.4, 1.4, 1.4, 1.4]) + vc = np.array([0.24, 0.24, 0.24, 0.24]) + skc = [0.40824829046386357] * 4 + kurtc = [-1.833333333333333] * 4 + n, mm, m, v, sk, kurt = stats.describe(x, ddof=0) + assert_equal(n, nc) + assert_allclose(mm, mmc, rtol=1e-15) + assert_allclose(m, mc, rtol=1e-15) + assert_allclose(v, vc, rtol=1e-15) + assert_array_almost_equal(sk, skc, decimal=13) + assert_array_almost_equal(kurt, kurtc, decimal=13) + + def test_describe_axis_none(self): + x = np.vstack((np.ones((3, 4)), 2 * np.ones((2, 4)))) + + # expected values + e_nobs, e_minmax = (20, (1.0, 2.0)) + e_mean = 1.3999999999999999 + e_var = 0.25263157894736848 + e_skew = 0.4082482904638634 + e_kurt = -1.8333333333333333 + + # actual values + a = stats.describe(x, axis=None) + + assert_equal(a.nobs, e_nobs) + assert_almost_equal(a.minmax, e_minmax) + assert_almost_equal(a.mean, e_mean) + assert_almost_equal(a.variance, e_var) + assert_array_almost_equal(a.skewness, e_skew, decimal=13) + assert_array_almost_equal(a.kurtosis, e_kurt, decimal=13) + + def test_describe_empty(self): + assert_raises(ValueError, stats.describe, []) + + +def test_normalitytests(): + assert_raises(ValueError, stats.skewtest, 4.) + assert_raises(ValueError, stats.kurtosistest, 4.) + assert_raises(ValueError, stats.normaltest, 4.) + + # numbers verified with R: dagoTest in package fBasics + st_normal, st_skew, st_kurt = (3.92371918, 1.98078826, -0.01403734) + pv_normal, pv_skew, pv_kurt = (0.14059673, 0.04761502, 0.98880019) + x = np.array((-2, -1, 0, 1, 2, 3)*4)**2 + attributes = ('statistic', 'pvalue') + + assert_array_almost_equal(stats.normaltest(x), (st_normal, pv_normal)) + check_named_results(stats.normaltest(x), attributes) + assert_array_almost_equal(stats.skewtest(x), (st_skew, pv_skew)) + check_named_results(stats.skewtest(x), attributes) + assert_array_almost_equal(stats.kurtosistest(x), (st_kurt, pv_kurt)) + check_named_results(stats.kurtosistest(x), attributes) + + # Test axis=None (equal to axis=0 for 1-D input) + assert_array_almost_equal(stats.normaltest(x, axis=None), + (st_normal, pv_normal)) + assert_array_almost_equal(stats.skewtest(x, axis=None), + (st_skew, pv_skew)) + assert_array_almost_equal(stats.kurtosistest(x, axis=None), + (st_kurt, pv_kurt)) + + x = np.arange(10.) + x[9] = np.nan + with np.errstate(invalid="ignore"): + assert_array_equal(stats.skewtest(x), (np.nan, np.nan)) + + expected = (1.0184643553962129, 0.30845733195153502) + assert_array_almost_equal(stats.skewtest(x, nan_policy='omit'), expected) + + with np.errstate(all='ignore'): + assert_raises(ValueError, stats.skewtest, x, nan_policy='raise') + assert_raises(ValueError, stats.skewtest, x, nan_policy='foobar') + + x = np.arange(30.) + x[29] = np.nan + with np.errstate(all='ignore'): + assert_array_equal(stats.kurtosistest(x), (np.nan, np.nan)) + + expected = (-2.2683547379505273, 0.023307594135872967) + assert_array_almost_equal(stats.kurtosistest(x, nan_policy='omit'), + expected) + + assert_raises(ValueError, stats.kurtosistest, x, nan_policy='raise') + assert_raises(ValueError, stats.kurtosistest, x, nan_policy='foobar') + + with np.errstate(all='ignore'): + assert_array_equal(stats.normaltest(x), (np.nan, np.nan)) + + expected = (6.2260409514287449, 0.04446644248650191) + assert_array_almost_equal(stats.normaltest(x, nan_policy='omit'), expected) + + assert_raises(ValueError, stats.normaltest, x, nan_policy='raise') + assert_raises(ValueError, stats.normaltest, x, nan_policy='foobar') + + # regression test for issue gh-9033: x cleary non-normal but power of + # negtative denom needs to be handled correctly to reject normality + counts = [128, 0, 58, 7, 0, 41, 16, 0, 0, 167] + x = np.hstack([np.full(c, i) for i, c in enumerate(counts)]) + assert_equal(stats.kurtosistest(x)[1] < 0.01, True) + + +class TestRankSums(object): + def test_ranksums_result_attributes(self): + res = stats.ranksums(np.arange(5), np.arange(25)) + attributes = ('statistic', 'pvalue') + check_named_results(res, attributes) + + +class TestJarqueBera(object): + def test_jarque_bera_stats(self): + np.random.seed(987654321) + x = np.random.normal(0, 1, 100000) + y = np.random.chisquare(10000, 100000) + z = np.random.rayleigh(1, 100000) + + assert_(stats.jarque_bera(x)[1] > stats.jarque_bera(y)[1]) + assert_(stats.jarque_bera(x)[1] > stats.jarque_bera(z)[1]) + assert_(stats.jarque_bera(y)[1] > stats.jarque_bera(z)[1]) + + def test_jarque_bera_array_like(self): + np.random.seed(987654321) + x = np.random.normal(0, 1, 100000) + + JB1, p1 = stats.jarque_bera(list(x)) + JB2, p2 = stats.jarque_bera(tuple(x)) + JB3, p3 = stats.jarque_bera(x.reshape(2, 50000)) + + assert_(JB1 == JB2 == JB3) + assert_(p1 == p2 == p3) + + def test_jarque_bera_size(self): + assert_raises(ValueError, stats.jarque_bera, []) + + +def test_skewtest_too_few_samples(): + # Regression test for ticket #1492. + # skewtest requires at least 8 samples; 7 should raise a ValueError. + x = np.arange(7.0) + assert_raises(ValueError, stats.skewtest, x) + + +def test_kurtosistest_too_few_samples(): + # Regression test for ticket #1425. + # kurtosistest requires at least 5 samples; 4 should raise a ValueError. + x = np.arange(4.0) + assert_raises(ValueError, stats.kurtosistest, x) + + +class TestMannWhitneyU(object): + X = [19.8958398126694, 19.5452691647182, 19.0577309166425, 21.716543054589, + 20.3269502208702, 20.0009273294025, 19.3440043632957, 20.4216806548105, + 19.0649894736528, 18.7808043120398, 19.3680942943298, 19.4848044069953, + 20.7514611265663, 19.0894948874598, 19.4975522356628, 18.9971170734274, + 20.3239606288208, 20.6921298083835, 19.0724259532507, 18.9825187935021, + 19.5144462609601, 19.8256857844223, 20.5174677102032, 21.1122407995892, + 17.9490854922535, 18.2847521114727, 20.1072217648826, 18.6439891962179, + 20.4970638083542, 19.5567594734914] + + Y = [19.2790668029091, 16.993808441865, 18.5416338448258, 17.2634018833575, + 19.1577183624616, 18.5119655377495, 18.6068455037221, 18.8358343362655, + 19.0366413269742, 18.1135025515417, 19.2201873866958, 17.8344909022841, + 18.2894380745856, 18.6661374133922, 19.9688601693252, 16.0672254617636, + 19.00596360572, 19.201561539032, 19.0487501090183, 19.0847908674356] + + significant = 14 + + def test_mannwhitneyu_one_sided(self): + u1, p1 = stats.mannwhitneyu(self.X, self.Y, alternative='less') + u2, p2 = stats.mannwhitneyu(self.Y, self.X, alternative='greater') + u3, p3 = stats.mannwhitneyu(self.X, self.Y, alternative='greater') + u4, p4 = stats.mannwhitneyu(self.Y, self.X, alternative='less') + + assert_equal(p1, p2) + assert_equal(p3, p4) + assert_(p1 != p3) + assert_equal(u1, 498) + assert_equal(u2, 102) + assert_equal(u3, 498) + assert_equal(u4, 102) + assert_approx_equal(p1, 0.999957683256589, significant=self.significant) + assert_approx_equal(p3, 4.5941632666275e-05, significant=self.significant) + + def test_mannwhitneyu_two_sided(self): + u1, p1 = stats.mannwhitneyu(self.X, self.Y, alternative='two-sided') + u2, p2 = stats.mannwhitneyu(self.Y, self.X, alternative='two-sided') + + assert_equal(p1, p2) + assert_equal(u1, 498) + assert_equal(u2, 102) + assert_approx_equal(p1, 9.188326533255e-05, + significant=self.significant) + + def test_mannwhitneyu_default(self): + # The default value for alternative is None + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, + "Calling `mannwhitneyu` without .*`alternative`") + u1, p1 = stats.mannwhitneyu(self.X, self.Y) + u2, p2 = stats.mannwhitneyu(self.Y, self.X) + u3, p3 = stats.mannwhitneyu(self.X, self.Y, alternative=None) + + assert_equal(p1, p2) + assert_equal(p1, p3) + assert_equal(u1, 102) + assert_equal(u2, 102) + assert_equal(u3, 102) + assert_approx_equal(p1, 4.5941632666275e-05, + significant=self.significant) + + def test_mannwhitneyu_no_correct_one_sided(self): + u1, p1 = stats.mannwhitneyu(self.X, self.Y, False, + alternative='less') + u2, p2 = stats.mannwhitneyu(self.Y, self.X, False, + alternative='greater') + u3, p3 = stats.mannwhitneyu(self.X, self.Y, False, + alternative='greater') + u4, p4 = stats.mannwhitneyu(self.Y, self.X, False, + alternative='less') + + assert_equal(p1, p2) + assert_equal(p3, p4) + assert_(p1 != p3) + assert_equal(u1, 498) + assert_equal(u2, 102) + assert_equal(u3, 498) + assert_equal(u4, 102) + assert_approx_equal(p1, 0.999955905990004, significant=self.significant) + assert_approx_equal(p3, 4.40940099958089e-05, significant=self.significant) + + def test_mannwhitneyu_no_correct_two_sided(self): + u1, p1 = stats.mannwhitneyu(self.X, self.Y, False, + alternative='two-sided') + u2, p2 = stats.mannwhitneyu(self.Y, self.X, False, + alternative='two-sided') + + assert_equal(p1, p2) + assert_equal(u1, 498) + assert_equal(u2, 102) + assert_approx_equal(p1, 8.81880199916178e-05, + significant=self.significant) + + def test_mannwhitneyu_no_correct_default(self): + # The default value for alternative is None + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, + "Calling `mannwhitneyu` without .*`alternative`") + u1, p1 = stats.mannwhitneyu(self.X, self.Y, False) + u2, p2 = stats.mannwhitneyu(self.Y, self.X, False) + u3, p3 = stats.mannwhitneyu(self.X, self.Y, False, + alternative=None) + + assert_equal(p1, p2) + assert_equal(p1, p3) + assert_equal(u1, 102) + assert_equal(u2, 102) + assert_equal(u3, 102) + assert_approx_equal(p1, 4.40940099958089e-05, + significant=self.significant) + + def test_mannwhitneyu_ones(self): + x = np.array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 2., 1., 1., 2., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 3., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1.]) + + y = np.array([1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1., 1., 1., 1., + 2., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2., 1., 1., 3., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1., + 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2., + 2., 1., 1., 2., 1., 1., 2., 1., 2., 1., 1., 1., 1., 2., + 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 2., 1., 1., 1., 1., 1., 2., 2., 2., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 2., 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 2., 1., 1., + 1., 1., 1., 1.]) + + # p-value verified with matlab and R to 5 significant digits + assert_array_almost_equal(stats.stats.mannwhitneyu(x, y, + alternative='less'), + (16980.5, 2.8214327656317373e-005), + decimal=12) + + def test_mannwhitneyu_result_attributes(self): + # test for namedtuple attribute results + attributes = ('statistic', 'pvalue') + res = stats.mannwhitneyu(self.X, self.Y, alternative="less") + check_named_results(res, attributes) + + +def test_pointbiserial(): + # same as mstats test except for the nan + # Test data: https://web.archive.org/web/20060504220742/https://support.sas.com/ctx/samples/index.jsp?sid=490&tab=output + x = [1,0,1,1,1,1,0,1,0,0,0,1,1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,1,0, + 0,0,0,0,1] + y = [14.8,13.8,12.4,10.1,7.1,6.1,5.8,4.6,4.3,3.5,3.3,3.2,3.0, + 2.8,2.8,2.5,2.4,2.3,2.1,1.7,1.7,1.5,1.3,1.3,1.2,1.2,1.1, + 0.8,0.7,0.6,0.5,0.2,0.2,0.1] + assert_almost_equal(stats.pointbiserialr(x, y)[0], 0.36149, 5) + + # test for namedtuple attribute results + attributes = ('correlation', 'pvalue') + res = stats.pointbiserialr(x, y) + check_named_results(res, attributes) + + +def test_obrientransform(): + # A couple tests calculated by hand. + x1 = np.array([0, 2, 4]) + t1 = stats.obrientransform(x1) + expected = [7, -2, 7] + assert_allclose(t1[0], expected) + + x2 = np.array([0, 3, 6, 9]) + t2 = stats.obrientransform(x2) + expected = np.array([30, 0, 0, 30]) + assert_allclose(t2[0], expected) + + # Test two arguments. + a, b = stats.obrientransform(x1, x2) + assert_equal(a, t1[0]) + assert_equal(b, t2[0]) + + # Test three arguments. + a, b, c = stats.obrientransform(x1, x2, x1) + assert_equal(a, t1[0]) + assert_equal(b, t2[0]) + assert_equal(c, t1[0]) + + # This is a regression test to check np.var replacement. + # The author of this test didn't separately verify the numbers. + x1 = np.arange(5) + result = np.array( + [[5.41666667, 1.04166667, -0.41666667, 1.04166667, 5.41666667], + [21.66666667, 4.16666667, -1.66666667, 4.16666667, 21.66666667]]) + assert_array_almost_equal(stats.obrientransform(x1, 2*x1), result, decimal=8) + + # Example from "O'Brien Test for Homogeneity of Variance" + # by Herve Abdi. + values = range(5, 11) + reps = np.array([5, 11, 9, 3, 2, 2]) + data = np.repeat(values, reps) + transformed_values = np.array([3.1828, 0.5591, 0.0344, + 1.6086, 5.2817, 11.0538]) + expected = np.repeat(transformed_values, reps) + result = stats.obrientransform(data) + assert_array_almost_equal(result[0], expected, decimal=4) + + +class HarMeanTestCase: + def test_1dlist(self): + # Test a 1d list + a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100] + b = 34.1417152147 + self.do(a, b) + + def test_1darray(self): + # Test a 1d array + a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]) + b = 34.1417152147 + self.do(a, b) + + def test_1dma(self): + # Test a 1d masked array + a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]) + b = 34.1417152147 + self.do(a, b) + + def test_1dmavalue(self): + # Test a 1d masked array with a masked value + a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100], + mask=[0,0,0,0,0,0,0,0,0,1]) + b = 31.8137186141 + self.do(a, b) + + # Note the next tests use axis=None as default, not axis=0 + def test_2dlist(self): + # Test a 2d list + a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] + b = 38.6696271841 + self.do(a, b) + + def test_2darray(self): + # Test a 2d array + a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] + b = 38.6696271841 + self.do(np.array(a), b) + + def test_2dma(self): + # Test a 2d masked array + a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] + b = 38.6696271841 + self.do(np.ma.array(a), b) + + def test_2daxis0(self): + # Test a 2d list with axis=0 + a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] + b = np.array([22.88135593, 39.13043478, 52.90076336, 65.45454545]) + self.do(a, b, axis=0) + + def test_2daxis1(self): + # Test a 2d list with axis=1 + a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] + b = np.array([19.2, 63.03939962, 103.80078637]) + self.do(a, b, axis=1) + + def test_2dmatrixdaxis0(self): + # Test a 2d list with axis=0 + a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] + b = np.matrix([[22.88135593, 39.13043478, 52.90076336, 65.45454545]]) + self.do(np.matrix(a), b, axis=0) + + def test_2dmatrixaxis1(self): + # Test a 2d list with axis=1 + a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] + b = np.matrix([[19.2, 63.03939962, 103.80078637]]).T + self.do(np.matrix(a), b, axis=1) + + +class TestHarMean(HarMeanTestCase): + def do(self, a, b, axis=None, dtype=None): + x = stats.hmean(a, axis=axis, dtype=dtype) + assert_almost_equal(b, x) + assert_equal(x.dtype, dtype) + + +class GeoMeanTestCase: + def test_1dlist(self): + # Test a 1d list + a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100] + b = 45.2872868812 + self.do(a, b) + + def test_1darray(self): + # Test a 1d array + a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]) + b = 45.2872868812 + self.do(a, b) + + def test_1dma(self): + # Test a 1d masked array + a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]) + b = 45.2872868812 + self.do(a, b) + + def test_1dmavalue(self): + # Test a 1d masked array with a masked value + a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100], mask=[0,0,0,0,0,0,0,0,0,1]) + b = 41.4716627439 + self.do(a, b) + + # Note the next tests use axis=None as default, not axis=0 + def test_2dlist(self): + # Test a 2d list + a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] + b = 52.8885199 + self.do(a, b) + + def test_2darray(self): + # Test a 2d array + a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] + b = 52.8885199 + self.do(np.array(a), b) + + def test_2dma(self): + # Test a 2d masked array + a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] + b = 52.8885199 + self.do(np.ma.array(a), b) + + def test_2daxis0(self): + # Test a 2d list with axis=0 + a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] + b = np.array([35.56893304, 49.32424149, 61.3579244, 72.68482371]) + self.do(a, b, axis=0) + + def test_2daxis1(self): + # Test a 2d list with axis=1 + a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] + b = np.array([22.13363839, 64.02171746, 104.40086817]) + self.do(a, b, axis=1) + + def test_2dmatrixdaxis0(self): + # Test a 2d list with axis=0 + a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] + b = np.matrix([[35.56893304, 49.32424149, 61.3579244, 72.68482371]]) + self.do(np.matrix(a), b, axis=0) + + def test_2dmatrixaxis1(self): + # Test a 2d list with axis=1 + a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] + b = np.matrix([[22.13363839, 64.02171746, 104.40086817]]).T + self.do(np.matrix(a), b, axis=1) + + def test_1dlist0(self): + # Test a 1d list with zero element + a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 0] + b = 0.0 # due to exp(-inf)=0 + olderr = np.seterr(all='ignore') + try: + self.do(a, b) + finally: + np.seterr(**olderr) + + def test_1darray0(self): + # Test a 1d array with zero element + a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 0]) + b = 0.0 # due to exp(-inf)=0 + olderr = np.seterr(all='ignore') + try: + self.do(a, b) + finally: + np.seterr(**olderr) + + def test_1dma0(self): + # Test a 1d masked array with zero element + a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 0]) + b = 41.4716627439 + olderr = np.seterr(all='ignore') + try: + self.do(a, b) + finally: + np.seterr(**olderr) + + def test_1dmainf(self): + # Test a 1d masked array with negative element + a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, -1]) + b = 41.4716627439 + olderr = np.seterr(all='ignore') + try: + self.do(a, b) + finally: + np.seterr(**olderr) + + +class TestGeoMean(GeoMeanTestCase): + def do(self, a, b, axis=None, dtype=None): + # Note this doesn't test when axis is not specified + x = stats.gmean(a, axis=axis, dtype=dtype) + assert_almost_equal(b, x) + assert_equal(x.dtype, dtype) + + +def test_binomtest(): + # precision tests compared to R for ticket:986 + pp = np.concatenate((np.linspace(0.1,0.2,5), np.linspace(0.45,0.65,5), + np.linspace(0.85,0.95,5))) + n = 501 + x = 450 + results = [0.0, 0.0, 1.0159969301994141e-304, + 2.9752418572150531e-275, 7.7668382922535275e-250, + 2.3381250925167094e-099, 7.8284591587323951e-081, + 9.9155947819961383e-065, 2.8729390725176308e-050, + 1.7175066298388421e-037, 0.0021070691951093692, + 0.12044570587262322, 0.88154763174802508, 0.027120993063129286, + 2.6102587134694721e-006] + + for p, res in zip(pp,results): + assert_approx_equal(stats.binom_test(x, n, p), res, + significant=12, err_msg='fail forp=%f' % p) + + assert_approx_equal(stats.binom_test(50,100,0.1), 5.8320387857343647e-024, + significant=12, err_msg='fail forp=%f' % p) + + +def test_binomtest2(): + # test added for issue #2384 + res2 = [ + [1.0, 1.0], + [0.5,1.0,0.5], + [0.25,1.00,1.00,0.25], + [0.125,0.625,1.000,0.625,0.125], + [0.0625,0.3750,1.0000,1.0000,0.3750,0.0625], + [0.03125,0.21875,0.68750,1.00000,0.68750,0.21875,0.03125], + [0.015625,0.125000,0.453125,1.000000,1.000000,0.453125,0.125000,0.015625], + [0.0078125,0.0703125,0.2890625,0.7265625,1.0000000,0.7265625,0.2890625, + 0.0703125,0.0078125], + [0.00390625,0.03906250,0.17968750,0.50781250,1.00000000,1.00000000, + 0.50781250,0.17968750,0.03906250,0.00390625], + [0.001953125,0.021484375,0.109375000,0.343750000,0.753906250,1.000000000, + 0.753906250,0.343750000,0.109375000,0.021484375,0.001953125] + ] + + for k in range(1, 11): + res1 = [stats.binom_test(v, k, 0.5) for v in range(k + 1)] + assert_almost_equal(res1, res2[k-1], decimal=10) + + +def test_binomtest3(): + # test added for issue #2384 + # test when x == n*p and neighbors + res3 = [stats.binom_test(v, v*k, 1./k) for v in range(1, 11) + for k in range(2, 11)] + assert_equal(res3, np.ones(len(res3), int)) + + #> bt=c() + #> for(i in as.single(1:10)){for(k in as.single(2:10)){bt = c(bt, binom.test(i-1, k*i,(1/k))$p.value); print(c(i+1, k*i,(1/k)))}} + binom_testm1 = np.array([ + 0.5, 0.5555555555555556, 0.578125, 0.5904000000000003, + 0.5981224279835393, 0.603430543396034, 0.607304096221924, + 0.610255656871054, 0.612579511000001, 0.625, 0.670781893004115, + 0.68853759765625, 0.6980101120000006, 0.703906431368616, + 0.70793209416498, 0.7108561134173507, 0.713076544331419, + 0.714820192935702, 0.6875, 0.7268709038256367, 0.7418963909149174, + 0.74986110468096, 0.7548015520398076, 0.7581671424768577, + 0.760607984787832, 0.762459425024199, 0.7639120677676575, 0.7265625, + 0.761553963657302, 0.774800934828818, 0.7818005980538996, + 0.78613491480358, 0.789084353140195, 0.7912217659828884, + 0.79284214559524, 0.794112956558801, 0.75390625, 0.7856929451142176, + 0.7976688481430754, 0.8039848974727624, 0.807891868948366, + 0.8105487660137676, 0.812473307174702, 0.8139318233591120, + 0.815075399104785, 0.7744140625, 0.8037322594985427, + 0.814742863657656, 0.8205425178645808, 0.8241275984172285, + 0.8265645374416, 0.8283292196088257, 0.829666291102775, + 0.8307144686362666, 0.7905273437499996, 0.8178712053954738, + 0.828116983756619, 0.833508948940494, 0.8368403871552892, + 0.839104213210105, 0.840743186196171, 0.84198481438049, + 0.8429580531563676, 0.803619384765625, 0.829338573944648, + 0.8389591907548646, 0.84401876783902, 0.84714369697889, + 0.8492667010581667, 0.850803474598719, 0.851967542858308, + 0.8528799045949524, 0.8145294189453126, 0.838881732845347, + 0.847979024541911, 0.852760894015685, 0.8557134656773457, + 0.8577190131799202, 0.85917058278431, 0.860270010472127, + 0.861131648404582, 0.823802947998047, 0.846984756807511, + 0.855635653643743, 0.860180994825685, 0.86298688573253, + 0.864892525675245, 0.866271647085603, 0.867316125625004, + 0.8681346531755114 + ]) + + # > bt=c() + # > for(i in as.single(1:10)){for(k in as.single(2:10)){bt = c(bt, binom.test(i+1, k*i,(1/k))$p.value); print(c(i+1, k*i,(1/k)))}} + + binom_testp1 = np.array([ + 0.5, 0.259259259259259, 0.26171875, 0.26272, 0.2632244513031551, + 0.2635138663069203, 0.2636951804161073, 0.2638162407564354, + 0.2639010709000002, 0.625, 0.4074074074074074, 0.42156982421875, + 0.4295746560000003, 0.43473045988554, 0.4383309503172684, + 0.4409884859402103, 0.4430309389962837, 0.444649849401104, 0.6875, + 0.4927602499618962, 0.5096031427383425, 0.5189636628480, + 0.5249280070771274, 0.5290623300865124, 0.5320974248125793, + 0.5344204730474308, 0.536255847400756, 0.7265625, 0.5496019313526808, + 0.5669248746708034, 0.576436455045805, 0.5824538812831795, + 0.5866053321547824, 0.589642781414643, 0.5919618019300193, + 0.593790427805202, 0.75390625, 0.590868349763505, 0.607983393277209, + 0.617303847446822, 0.623172512167948, 0.627208862156123, + 0.6301556891501057, 0.632401894928977, 0.6341708982290303, + 0.7744140625, 0.622562037497196, 0.639236102912278, 0.648263335014579, + 0.65392850011132, 0.657816519817211, 0.660650782947676, + 0.662808780346311, 0.6645068560246006, 0.7905273437499996, + 0.6478843304312477, 0.6640468318879372, 0.6727589686071775, + 0.6782129857784873, 0.681950188903695, 0.684671508668418, + 0.686741824999918, 0.688369886732168, 0.803619384765625, + 0.668716055304315, 0.684360013879534, 0.6927642396829181, + 0.6980155964704895, 0.701609591890657, 0.7042244320992127, + 0.7062125081341817, 0.707775152962577, 0.8145294189453126, + 0.686243374488305, 0.7013873696358975, 0.709501223328243, + 0.714563595144314, 0.718024953392931, 0.7205416252126137, + 0.722454130389843, 0.723956813292035, 0.823802947998047, + 0.701255953767043, 0.715928221686075, 0.723772209289768, + 0.7286603031173616, 0.7319999279787631, 0.7344267920995765, + 0.736270323773157, 0.737718376096348 + ]) + + res4_p1 = [stats.binom_test(v+1, v*k, 1./k) for v in range(1, 11) + for k in range(2, 11)] + res4_m1 = [stats.binom_test(v-1, v*k, 1./k) for v in range(1, 11) + for k in range(2, 11)] + + assert_almost_equal(res4_p1, binom_testp1, decimal=13) + assert_almost_equal(res4_m1, binom_testm1, decimal=13) + + +class TestTrim(object): + # test trim functions + def test_trim1(self): + a = np.arange(11) + assert_equal(np.sort(stats.trim1(a, 0.1)), np.arange(10)) + assert_equal(np.sort(stats.trim1(a, 0.2)), np.arange(9)) + assert_equal(np.sort(stats.trim1(a, 0.2, tail='left')), + np.arange(2, 11)) + assert_equal(np.sort(stats.trim1(a, 3/11., tail='left')), + np.arange(3, 11)) + assert_equal(stats.trim1(a, 1.0), []) + assert_equal(stats.trim1(a, 1.0, tail='left'), []) + + # empty input + assert_equal(stats.trim1([], 0.1), []) + assert_equal(stats.trim1([], 3/11., tail='left'), []) + assert_equal(stats.trim1([], 4/6.), []) + + def test_trimboth(self): + a = np.arange(11) + assert_equal(np.sort(stats.trimboth(a, 3/11.)), np.arange(3, 8)) + assert_equal(np.sort(stats.trimboth(a, 0.2)), + np.array([2, 3, 4, 5, 6, 7, 8])) + assert_equal(np.sort(stats.trimboth(np.arange(24).reshape(6, 4), 0.2)), + np.arange(4, 20).reshape(4, 4)) + assert_equal(np.sort(stats.trimboth(np.arange(24).reshape(4, 6).T, + 2/6.)), + np.array([[2, 8, 14, 20], [3, 9, 15, 21]])) + assert_raises(ValueError, stats.trimboth, + np.arange(24).reshape(4, 6).T, 4/6.) + + # empty input + assert_equal(stats.trimboth([], 0.1), []) + assert_equal(stats.trimboth([], 3/11.), []) + assert_equal(stats.trimboth([], 4/6.), []) + + def test_trim_mean(self): + # don't use pre-sorted arrays + a = np.array([4, 8, 2, 0, 9, 5, 10, 1, 7, 3, 6]) + idx = np.array([3, 5, 0, 1, 2, 4]) + a2 = np.arange(24).reshape(6, 4)[idx, :] + a3 = np.arange(24).reshape(6, 4, order='F')[idx, :] + assert_equal(stats.trim_mean(a3, 2/6.), + np.array([2.5, 8.5, 14.5, 20.5])) + assert_equal(stats.trim_mean(a2, 2/6.), + np.array([10., 11., 12., 13.])) + idx4 = np.array([1, 0, 3, 2]) + a4 = np.arange(24).reshape(4, 6)[idx4, :] + assert_equal(stats.trim_mean(a4, 2/6.), + np.array([9., 10., 11., 12., 13., 14.])) + # shuffled arange(24) as array_like + a = [7, 11, 12, 21, 16, 6, 22, 1, 5, 0, 18, 10, 17, 9, 19, 15, 23, + 20, 2, 14, 4, 13, 8, 3] + assert_equal(stats.trim_mean(a, 2/6.), 11.5) + assert_equal(stats.trim_mean([5,4,3,1,2,0], 2/6.), 2.5) + + # check axis argument + np.random.seed(1234) + a = np.random.randint(20, size=(5, 6, 4, 7)) + for axis in [0, 1, 2, 3, -1]: + res1 = stats.trim_mean(a, 2/6., axis=axis) + res2 = stats.trim_mean(np.rollaxis(a, axis), 2/6.) + assert_equal(res1, res2) + + res1 = stats.trim_mean(a, 2/6., axis=None) + res2 = stats.trim_mean(a.ravel(), 2/6.) + assert_equal(res1, res2) + + assert_raises(ValueError, stats.trim_mean, a, 0.6) + + # empty input + assert_equal(stats.trim_mean([], 0.0), np.nan) + assert_equal(stats.trim_mean([], 0.6), np.nan) + + +class TestSigmaClip(object): + def test_sigmaclip1(self): + a = np.concatenate((np.linspace(9.5, 10.5, 31), np.linspace(0, 20, 5))) + fact = 4 # default + c, low, upp = stats.sigmaclip(a) + assert_(c.min() > low) + assert_(c.max() < upp) + assert_equal(low, c.mean() - fact*c.std()) + assert_equal(upp, c.mean() + fact*c.std()) + assert_equal(c.size, a.size) + + def test_sigmaclip2(self): + a = np.concatenate((np.linspace(9.5, 10.5, 31), np.linspace(0, 20, 5))) + fact = 1.5 + c, low, upp = stats.sigmaclip(a, fact, fact) + assert_(c.min() > low) + assert_(c.max() < upp) + assert_equal(low, c.mean() - fact*c.std()) + assert_equal(upp, c.mean() + fact*c.std()) + assert_equal(c.size, 4) + assert_equal(a.size, 36) # check original array unchanged + + def test_sigmaclip3(self): + a = np.concatenate((np.linspace(9.5, 10.5, 11), + np.linspace(-100, -50, 3))) + fact = 1.8 + c, low, upp = stats.sigmaclip(a, fact, fact) + assert_(c.min() > low) + assert_(c.max() < upp) + assert_equal(low, c.mean() - fact*c.std()) + assert_equal(upp, c.mean() + fact*c.std()) + assert_equal(c, np.linspace(9.5, 10.5, 11)) + + def test_sigmaclip_result_attributes(self): + a = np.concatenate((np.linspace(9.5, 10.5, 11), + np.linspace(-100, -50, 3))) + fact = 1.8 + res = stats.sigmaclip(a, fact, fact) + attributes = ('clipped', 'lower', 'upper') + check_named_results(res, attributes) + + def test_std_zero(self): + # regression test #8632 + x = np.ones(10) + assert_equal(stats.sigmaclip(x)[0], x) + + +class TestFOneWay(object): + def test_trivial(self): + # A trivial test of stats.f_oneway, with F=0. + F, p = stats.f_oneway([0,2], [0,2]) + assert_equal(F, 0.0) + + def test_basic(self): + # Despite being a floating point calculation, this data should + # result in F being exactly 2.0. + F, p = stats.f_oneway([0,2], [2,4]) + assert_equal(F, 2.0) + + def test_large_integer_array(self): + a = np.array([655, 788], dtype=np.uint16) + b = np.array([789, 772], dtype=np.uint16) + F, p = stats.f_oneway(a, b) + assert_almost_equal(F, 0.77450216931805538) + + def test_result_attributes(self): + a = np.array([655, 788], dtype=np.uint16) + b = np.array([789, 772], dtype=np.uint16) + res = stats.f_oneway(a, b) + attributes = ('statistic', 'pvalue') + check_named_results(res, attributes) + + def test_nist(self): + # These are the nist ANOVA files. They can be found at: + # https://www.itl.nist.gov/div898/strd/anova/anova.html + filenames = ['SiRstv.dat', 'SmLs01.dat', 'SmLs02.dat', 'SmLs03.dat', + 'AtmWtAg.dat', 'SmLs04.dat', 'SmLs05.dat', 'SmLs06.dat', + 'SmLs07.dat', 'SmLs08.dat', 'SmLs09.dat'] + + for test_case in filenames: + rtol = 1e-7 + fname = os.path.abspath(os.path.join(os.path.dirname(__file__), + 'data/nist_anova', test_case)) + with open(fname, 'r') as f: + content = f.read().split('\n') + certified = [line.split() for line in content[40:48] + if line.strip()] + dataf = np.loadtxt(fname, skiprows=60) + y, x = dataf.T + y = y.astype(int) + caty = np.unique(y) + f = float(certified[0][-1]) + + xlist = [x[y == i] for i in caty] + res = stats.f_oneway(*xlist) + + # With the hard test cases we relax the tolerance a bit. + hard_tc = ('SmLs07.dat', 'SmLs08.dat', 'SmLs09.dat') + if test_case in hard_tc: + rtol = 1e-4 + + assert_allclose(res[0], f, rtol=rtol, + err_msg='Failing testcase: %s' % test_case) + + +class TestKruskal(object): + def test_simple(self): + x = [1] + y = [2] + h, p = stats.kruskal(x, y) + assert_equal(h, 1.0) + assert_approx_equal(p, stats.distributions.chi2.sf(h, 1)) + h, p = stats.kruskal(np.array(x), np.array(y)) + assert_equal(h, 1.0) + assert_approx_equal(p, stats.distributions.chi2.sf(h, 1)) + + def test_basic(self): + x = [1, 3, 5, 7, 9] + y = [2, 4, 6, 8, 10] + h, p = stats.kruskal(x, y) + assert_approx_equal(h, 3./11, significant=10) + assert_approx_equal(p, stats.distributions.chi2.sf(3./11, 1)) + h, p = stats.kruskal(np.array(x), np.array(y)) + assert_approx_equal(h, 3./11, significant=10) + assert_approx_equal(p, stats.distributions.chi2.sf(3./11, 1)) + + def test_simple_tie(self): + x = [1] + y = [1, 2] + h_uncorr = 1.5**2 + 2*2.25**2 - 12 + corr = 0.75 + expected = h_uncorr / corr # 0.5 + h, p = stats.kruskal(x, y) + # Since the expression is simple and the exact answer is 0.5, it + # should be safe to use assert_equal(). + assert_equal(h, expected) + + def test_another_tie(self): + x = [1, 1, 1, 2] + y = [2, 2, 2, 2] + h_uncorr = (12. / 8. / 9.) * 4 * (3**2 + 6**2) - 3 * 9 + corr = 1 - float(3**3 - 3 + 5**3 - 5) / (8**3 - 8) + expected = h_uncorr / corr + h, p = stats.kruskal(x, y) + assert_approx_equal(h, expected) + + def test_three_groups(self): + # A test of stats.kruskal with three groups, with ties. + x = [1, 1, 1] + y = [2, 2, 2] + z = [2, 2] + h_uncorr = (12. / 8. / 9.) * (3*2**2 + 3*6**2 + 2*6**2) - 3 * 9 # 5.0 + corr = 1 - float(3**3 - 3 + 5**3 - 5) / (8**3 - 8) + expected = h_uncorr / corr # 7.0 + h, p = stats.kruskal(x, y, z) + assert_approx_equal(h, expected) + assert_approx_equal(p, stats.distributions.chi2.sf(h, 2)) + + def test_empty(self): + # A test of stats.kruskal with three groups, with ties. + x = [1, 1, 1] + y = [2, 2, 2] + z = [] + assert_equal(stats.kruskal(x, y, z), (np.nan, np.nan)) + + def test_kruskal_result_attributes(self): + x = [1, 3, 5, 7, 9] + y = [2, 4, 6, 8, 10] + res = stats.kruskal(x, y) + attributes = ('statistic', 'pvalue') + check_named_results(res, attributes) + + def test_nan_policy(self): + x = np.arange(10.) + x[9] = np.nan + assert_equal(stats.kruskal(x, x), (np.nan, np.nan)) + assert_almost_equal(stats.kruskal(x, x, nan_policy='omit'), (0.0, 1.0)) + assert_raises(ValueError, stats.kruskal, x, x, nan_policy='raise') + assert_raises(ValueError, stats.kruskal, x, x, nan_policy='foobar') + + +class TestCombinePvalues(object): + + def test_fisher(self): + # Example taken from https://en.wikipedia.org/wiki/Fisher%27s_exact_test#Example + xsq, p = stats.combine_pvalues([.01, .2, .3], method='fisher') + assert_approx_equal(p, 0.02156, significant=4) + + def test_stouffer(self): + Z, p = stats.combine_pvalues([.01, .2, .3], method='stouffer') + assert_approx_equal(p, 0.01651, significant=4) + + def test_stouffer2(self): + Z, p = stats.combine_pvalues([.5, .5, .5], method='stouffer') + assert_approx_equal(p, 0.5, significant=4) + + def test_weighted_stouffer(self): + Z, p = stats.combine_pvalues([.01, .2, .3], method='stouffer', + weights=np.ones(3)) + assert_approx_equal(p, 0.01651, significant=4) + + def test_weighted_stouffer2(self): + Z, p = stats.combine_pvalues([.01, .2, .3], method='stouffer', + weights=np.array((1, 4, 9))) + assert_approx_equal(p, 0.1464, significant=4) + + +class TestCdfDistanceValidation(object): + """ + Test that _cdf_distance() (via wasserstein_distance()) raises ValueErrors + for bad inputs. + """ + + def test_distinct_value_and_weight_lengths(self): + # When the number of weights does not match the number of values, + # a ValueError should be raised. + assert_raises(ValueError, stats.wasserstein_distance, + [1], [2], [4], [3, 1]) + assert_raises(ValueError, stats.wasserstein_distance, [1], [2], [1, 0]) + + def test_zero_weight(self): + # When a distribution is given zero weight, a ValueError should be + # raised. + assert_raises(ValueError, stats.wasserstein_distance, + [0, 1], [2], [0, 0]) + assert_raises(ValueError, stats.wasserstein_distance, + [0, 1], [2], [3, 1], [0]) + + def test_negative_weights(self): + # A ValueError should be raised if there are any negative weights. + assert_raises(ValueError, stats.wasserstein_distance, + [0, 1], [2, 2], [1, 1], [3, -1]) + + def test_empty_distribution(self): + # A ValueError should be raised when trying to measure the distance + # between something and nothing. + assert_raises(ValueError, stats.wasserstein_distance, [], [2, 2]) + assert_raises(ValueError, stats.wasserstein_distance, [1], []) + + def test_inf_weight(self): + # An inf weight is not valid. + assert_raises(ValueError, stats.wasserstein_distance, + [1, 2, 1], [1, 1], [1, np.inf, 1], [1, 1]) + + +class TestWassersteinDistance(object): + """ Tests for wasserstein_distance() output values. + """ + + def test_simple(self): + # For basic distributions, the value of the Wasserstein distance is + # straightforward. + assert_almost_equal( + stats.wasserstein_distance([0, 1], [0], [1, 1], [1]), + .5) + assert_almost_equal(stats.wasserstein_distance( + [0, 1], [0], [3, 1], [1]), + .25) + assert_almost_equal(stats.wasserstein_distance( + [0, 2], [0], [1, 1], [1]), + 1) + assert_almost_equal(stats.wasserstein_distance( + [0, 1, 2], [1, 2, 3]), + 1) + + def test_same_distribution(self): + # Any distribution moved to itself should have a Wasserstein distance of + # zero. + assert_equal(stats.wasserstein_distance([1, 2, 3], [2, 1, 3]), 0) + assert_equal( + stats.wasserstein_distance([1, 1, 1, 4], [4, 1], + [1, 1, 1, 1], [1, 3]), + 0) + + def test_shift(self): + # If the whole distribution is shifted by x, then the Wasserstein + # distance should be x. + assert_almost_equal(stats.wasserstein_distance([0], [1]), 1) + assert_almost_equal(stats.wasserstein_distance([-5], [5]), 10) + assert_almost_equal( + stats.wasserstein_distance([1, 2, 3, 4, 5], [11, 12, 13, 14, 15]), + 10) + assert_almost_equal( + stats.wasserstein_distance([4.5, 6.7, 2.1], [4.6, 7, 9.2], + [3, 1, 1], [1, 3, 1]), + 2.5) + + def test_combine_weights(self): + # Assigning a weight w to a value is equivalent to including that value + # w times in the value array with weight of 1. + assert_almost_equal( + stats.wasserstein_distance( + [0, 0, 1, 1, 1, 1, 5], [0, 3, 3, 3, 3, 4, 4], + [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1]), + stats.wasserstein_distance([5, 0, 1], [0, 4, 3], + [1, 2, 4], [1, 2, 4])) + + def test_collapse(self): + # Collapsing a distribution to a point distribution at zero is + # equivalent to taking the average of the absolute values of the values. + u = np.arange(-10, 30, 0.3) + v = np.zeros_like(u) + assert_almost_equal( + stats.wasserstein_distance(u, v), + np.mean(np.abs(u))) + + u_weights = np.arange(len(u)) + v_weights = u_weights[::-1] + assert_almost_equal( + stats.wasserstein_distance(u, v, u_weights, v_weights), + np.average(np.abs(u), weights=u_weights)) + + def test_zero_weight(self): + # Values with zero weight have no impact on the Wasserstein distance. + assert_almost_equal( + stats.wasserstein_distance([1, 2, 100000], [1, 1], + [1, 1, 0], [1, 1]), + stats.wasserstein_distance([1, 2], [1, 1], [1, 1], [1, 1])) + + def test_inf_values(self): + # Inf values can lead to an inf distance or trigger a RuntimeWarning + # (and return NaN) if the distance is undefined. + assert_equal( + stats.wasserstein_distance([1, 2, np.inf], [1, 1]), + np.inf) + assert_equal( + stats.wasserstein_distance([1, 2, np.inf], [-np.inf, 1]), + np.inf) + assert_equal( + stats.wasserstein_distance([1, -np.inf, np.inf], [1, 1]), + np.inf) + with suppress_warnings() as sup: + r = sup.record(RuntimeWarning, "invalid value*") + assert_equal( + stats.wasserstein_distance([1, 2, np.inf], [np.inf, 1]), + np.nan) + + +class TestEnergyDistance(object): + """ Tests for energy_distance() output values. + """ + + def test_simple(self): + # For basic distributions, the value of the energy distance is + # straightforward. + assert_almost_equal( + stats.energy_distance([0, 1], [0], [1, 1], [1]), + np.sqrt(2) * .5) + assert_almost_equal(stats.energy_distance( + [0, 1], [0], [3, 1], [1]), + np.sqrt(2) * .25) + assert_almost_equal(stats.energy_distance( + [0, 2], [0], [1, 1], [1]), + 2 * .5) + assert_almost_equal( + stats.energy_distance([0, 1, 2], [1, 2, 3]), + np.sqrt(2) * (3*(1./3**2))**.5) + + def test_same_distribution(self): + # Any distribution moved to itself should have a energy distance of + # zero. + assert_equal(stats.energy_distance([1, 2, 3], [2, 1, 3]), 0) + assert_equal( + stats.energy_distance([1, 1, 1, 4], [4, 1], [1, 1, 1, 1], [1, 3]), + 0) + + def test_shift(self): + # If a single-point distribution is shifted by x, then the energy + # distance should be sqrt(2) * sqrt(x). + assert_almost_equal(stats.energy_distance([0], [1]), np.sqrt(2)) + assert_almost_equal( + stats.energy_distance([-5], [5]), + np.sqrt(2) * 10**.5) + + def test_combine_weights(self): + # Assigning a weight w to a value is equivalent to including that value + # w times in the value array with weight of 1. + assert_almost_equal( + stats.energy_distance([0, 0, 1, 1, 1, 1, 5], [0, 3, 3, 3, 3, 4, 4], + [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1]), + stats.energy_distance([5, 0, 1], [0, 4, 3], [1, 2, 4], [1, 2, 4])) + + def test_zero_weight(self): + # Values with zero weight have no impact on the energy distance. + assert_almost_equal( + stats.energy_distance([1, 2, 100000], [1, 1], [1, 1, 0], [1, 1]), + stats.energy_distance([1, 2], [1, 1], [1, 1], [1, 1])) + + def test_inf_values(self): + # Inf values can lead to an inf distance or trigger a RuntimeWarning + # (and return NaN) if the distance is undefined. + assert_equal(stats.energy_distance([1, 2, np.inf], [1, 1]), np.inf) + assert_equal( + stats.energy_distance([1, 2, np.inf], [-np.inf, 1]), + np.inf) + assert_equal( + stats.energy_distance([1, -np.inf, np.inf], [1, 1]), + np.inf) + with suppress_warnings() as sup: + r = sup.record(RuntimeWarning, "invalid value*") + assert_equal( + stats.energy_distance([1, 2, np.inf], [np.inf, 1]), + np.nan) + + +class TestBrunnerMunzel(object): + # Data from (Lumley, 1996) + X = [1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 4, 1, 1] + Y = [3, 3, 4, 3, 1, 2, 3, 1, 1, 5, 4] + significant = 13 + + def test_brunnermunzel_one_sided(self): + # Results are compared with R's lawstat package. + u1, p1 = stats.brunnermunzel(self.X, self.Y, alternative='less') + u2, p2 = stats.brunnermunzel(self.Y, self.X, alternative='greater') + u3, p3 = stats.brunnermunzel(self.X, self.Y, alternative='greater') + u4, p4 = stats.brunnermunzel(self.Y, self.X, alternative='less') + + assert_approx_equal(p1, p2, significant=self.significant) + assert_approx_equal(p3, p4, significant=self.significant) + assert_(p1 != p3) + assert_approx_equal(u1, 3.1374674823029505, + significant=self.significant) + assert_approx_equal(u2, -3.1374674823029505, + significant=self.significant) + assert_approx_equal(u3, 3.1374674823029505, + significant=self.significant) + assert_approx_equal(u4, -3.1374674823029505, + significant=self.significant) + assert_approx_equal(p1, 0.0028931043330757342, + significant=self.significant) + assert_approx_equal(p3, 0.99710689566692423, + significant=self.significant) + + def test_brunnermunzel_two_sided(self): + # Results are compared with R's lawstat package. + u1, p1 = stats.brunnermunzel(self.X, self.Y, alternative='two-sided') + u2, p2 = stats.brunnermunzel(self.Y, self.X, alternative='two-sided') + + assert_approx_equal(p1, p2, significant=self.significant) + assert_approx_equal(u1, 3.1374674823029505, + significant=self.significant) + assert_approx_equal(u2, -3.1374674823029505, + significant=self.significant) + assert_approx_equal(p1, 0.0057862086661515377, + significant=self.significant) + + def test_brunnermunzel_default(self): + # The default value for alternative is two-sided + u1, p1 = stats.brunnermunzel(self.X, self.Y) + u2, p2 = stats.brunnermunzel(self.Y, self.X) + + assert_approx_equal(p1, p2, significant=self.significant) + assert_approx_equal(u1, 3.1374674823029505, + significant=self.significant) + assert_approx_equal(u2, -3.1374674823029505, + significant=self.significant) + assert_approx_equal(p1, 0.0057862086661515377, + significant=self.significant) + + def test_brunnermunzel_alternative_error(self): + alternative = "error" + distribution = "t" + nan_policy = "propagate" + assert_(alternative not in ["two-sided", "greater", "less"]) + assert_raises(ValueError, + stats.brunnermunzel, + self.X, + self.Y, + alternative, + distribution, + nan_policy) + + def test_brunnermunzel_distribution_norm(self): + u1, p1 = stats.brunnermunzel(self.X, self.Y, distribution="normal") + u2, p2 = stats.brunnermunzel(self.Y, self.X, distribution="normal") + assert_approx_equal(p1, p2, significant=self.significant) + assert_approx_equal(u1, 3.1374674823029505, + significant=self.significant) + assert_approx_equal(u2, -3.1374674823029505, + significant=self.significant) + assert_approx_equal(p1, 0.0017041417600383024, + significant=self.significant) + + def test_brunnermunzel_distribution_error(self): + alternative = "two-sided" + distribution = "error" + nan_policy = "propagate" + assert_(alternative not in ["t", "normal"]) + assert_raises(ValueError, + stats.brunnermunzel, + self.X, + self.Y, + alternative, + distribution, + nan_policy) + + def test_brunnermunzel_empty_imput(self): + u1, p1 = stats.brunnermunzel(self.X, []) + u2, p2 = stats.brunnermunzel([], self.Y) + u3, p3 = stats.brunnermunzel([], []) + + assert_equal(u1, np.nan) + assert_equal(p1, np.nan) + assert_equal(u2, np.nan) + assert_equal(p2, np.nan) + assert_equal(u3, np.nan) + assert_equal(p3, np.nan) + + def test_brunnermunzel_nan_input_propagate(self): + X = [1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 4, 1, 1, np.nan] + Y = [3, 3, 4, 3, 1, 2, 3, 1, 1, 5, 4] + u1, p1 = stats.brunnermunzel(X, Y, nan_policy="propagate") + u2, p2 = stats.brunnermunzel(Y, X, nan_policy="propagate") + + assert_equal(u1, np.nan) + assert_equal(p1, np.nan) + assert_equal(u2, np.nan) + assert_equal(p2, np.nan) + + def test_brunnermunzel_nan_input_raise(self): + X = [1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 4, 1, 1, np.nan] + Y = [3, 3, 4, 3, 1, 2, 3, 1, 1, 5, 4] + alternative = "two-sided" + distribution = "t" + nan_policy = "raise" + + assert_raises(ValueError, + stats.brunnermunzel, + X, + Y, + alternative, + distribution, + nan_policy) + assert_raises(ValueError, + stats.brunnermunzel, + Y, + X, + alternative, + distribution, + nan_policy) + + def test_brunnermunzel_nan_input_omit(self): + X = [1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 4, 1, 1, np.nan] + Y = [3, 3, 4, 3, 1, 2, 3, 1, 1, 5, 4] + u1, p1 = stats.brunnermunzel(X, Y, nan_policy="omit") + u2, p2 = stats.brunnermunzel(Y, X, nan_policy="omit") + + assert_approx_equal(p1, p2, significant=self.significant) + assert_approx_equal(u1, 3.1374674823029505, + significant=self.significant) + assert_approx_equal(u2, -3.1374674823029505, + significant=self.significant) + assert_approx_equal(p1, 0.0057862086661515377, + significant=self.significant) + + +class TestRatioUniforms(object): + """ Tests for rvs_ratio_uniforms. + """ + def test_rv_generation(self): + # use KS test to check distribution of rvs + # normal distribution + f = stats.norm.pdf + v_bound = np.sqrt(f(np.sqrt(2))) * np.sqrt(2) + umax, vmin, vmax = np.sqrt(f(0)), -v_bound, v_bound + rvs = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=2500, + random_state=12345) + assert_equal(stats.kstest(rvs, 'norm')[1] > 0.25, True) + + # exponential distribution + rvs = stats.rvs_ratio_uniforms(lambda x: np.exp(-x), umax=1, + vmin=0, vmax=2*np.exp(-1), + size=1000, random_state=12345) + assert_equal(stats.kstest(rvs, 'expon')[1] > 0.25, True) + + def test_shape(self): + # test shape of return value depending on size parameter + f = stats.norm.pdf + v_bound = np.sqrt(f(np.sqrt(2))) * np.sqrt(2) + umax, vmin, vmax = np.sqrt(f(0)), -v_bound, v_bound + + r1 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=3, + random_state=1234) + r2 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=(3,), + random_state=1234) + r3 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=(3, 1), + random_state=1234) + assert_equal(r1, r2) + assert_equal(r2, r3.flatten()) + assert_equal(r1.shape, (3,)) + assert_equal(r3.shape, (3, 1)) + + r4 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=(3, 3, 3), + random_state=12) + r5 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=27, + random_state=12) + assert_equal(r4.flatten(), r5) + assert_equal(r4.shape, (3, 3, 3)) + + r6 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, random_state=1234) + r7 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=1, + random_state=1234) + r8 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=(1, ), + random_state=1234) + assert_equal(r6, r7) + assert_equal(r7, r8) + + def test_random_state(self): + f = stats.norm.pdf + v_bound = np.sqrt(f(np.sqrt(2))) * np.sqrt(2) + umax, vmin, vmax = np.sqrt(f(0)), -v_bound, v_bound + np.random.seed(1234) + r1 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=(3, 4)) + r2 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=(3, 4), + random_state=1234) + assert_equal(r1, r2) + + def test_exceptions(self): + f = stats.norm.pdf + # need vmin < vmax + assert_raises(ValueError, + stats.rvs_ratio_uniforms, pdf=f, umax=1, vmin=3, vmax=1) + assert_raises(ValueError, + stats.rvs_ratio_uniforms, pdf=f, umax=1, vmin=1, vmax=1) + # need umax > 0 + assert_raises(ValueError, + stats.rvs_ratio_uniforms, pdf=f, umax=-1, vmin=1, vmax=1) + assert_raises(ValueError, + stats.rvs_ratio_uniforms, pdf=f, umax=0, vmin=1, vmax=1) + + def test_gig(self): + # test generalized inverse gaussian distribution + p, b = 0.5, 0.75 + + def gig_mode(p, b): + return b / (np.sqrt((p - 1)**2 + b**2) + 1 - p) + + def gig_pdf(x, p, b): + c = 1/(2 * kv(p, b)) + return c * x**(p - 1) * np.exp(- b * (x + 1/x) / 2) + + def gig_cdf(x, p, b): + x = np.atleast_1d(x) + cdf = [quad(gig_pdf, 0, xi, args=(p, b))[0] for xi in x] + return np.array(cdf) + + s = kv(p+2, b) / kv(p, b) + vmax = np.sqrt(gig_pdf(gig_mode(p + 2, b), p + 2, b) * s) + umax = np.sqrt(gig_pdf(gig_mode(p, b), p, b)) + + rvs = stats.rvs_ratio_uniforms(lambda x: gig_pdf(x, p, b), umax, + 0, vmax, random_state=1234, size=1500) + + assert_equal(stats.kstest(rvs, lambda x: gig_cdf(x, p, b))[1] > 0.25, + True) diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_stats.pyc b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_stats.pyc new file mode 100644 index 0000000..a02118e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_stats.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_tukeylambda_stats.py b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_tukeylambda_stats.py new file mode 100644 index 0000000..229d584 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_tukeylambda_stats.py @@ -0,0 +1,88 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +from numpy.testing import assert_allclose, assert_equal + +from scipy.stats._tukeylambda_stats import (tukeylambda_variance, + tukeylambda_kurtosis) + + +def test_tukeylambda_stats_known_exact(): + """Compare results with some known exact formulas.""" + # Some exact values of the Tukey Lambda variance and kurtosis: + # lambda var kurtosis + # 0 pi**2/3 6/5 (logistic distribution) + # 0.5 4 - pi (5/3 - pi/2)/(pi/4 - 1)**2 - 3 + # 1 1/3 -6/5 (uniform distribution on (-1,1)) + # 2 1/12 -6/5 (uniform distribution on (-1/2, 1/2)) + + # lambda = 0 + var = tukeylambda_variance(0) + assert_allclose(var, np.pi**2 / 3, atol=1e-12) + kurt = tukeylambda_kurtosis(0) + assert_allclose(kurt, 1.2, atol=1e-10) + + # lambda = 0.5 + var = tukeylambda_variance(0.5) + assert_allclose(var, 4 - np.pi, atol=1e-12) + kurt = tukeylambda_kurtosis(0.5) + desired = (5./3 - np.pi/2) / (np.pi/4 - 1)**2 - 3 + assert_allclose(kurt, desired, atol=1e-10) + + # lambda = 1 + var = tukeylambda_variance(1) + assert_allclose(var, 1.0 / 3, atol=1e-12) + kurt = tukeylambda_kurtosis(1) + assert_allclose(kurt, -1.2, atol=1e-10) + + # lambda = 2 + var = tukeylambda_variance(2) + assert_allclose(var, 1.0 / 12, atol=1e-12) + kurt = tukeylambda_kurtosis(2) + assert_allclose(kurt, -1.2, atol=1e-10) + + +def test_tukeylambda_stats_mpmath(): + """Compare results with some values that were computed using mpmath.""" + a10 = dict(atol=1e-10, rtol=0) + a12 = dict(atol=1e-12, rtol=0) + data = [ + # lambda variance kurtosis + [-0.1, 4.78050217874253547, 3.78559520346454510], + [-0.0649, 4.16428023599895777, 2.52019675947435718], + [-0.05, 3.93672267890775277, 2.13129793057777277], + [-0.001, 3.30128380390964882, 1.21452460083542988], + [0.001, 3.27850775649572176, 1.18560634779287585], + [0.03125, 2.95927803254615800, 0.804487555161819980], + [0.05, 2.78281053405464501, 0.611604043886644327], + [0.0649, 2.65282386754100551, 0.476834119532774540], + [1.2, 0.242153920578588346, -1.23428047169049726], + [10.0, 0.00095237579757703597, 2.37810697355144933], + [20.0, 0.00012195121951131043, 7.37654321002709531], + ] + + for lam, var_expected, kurt_expected in data: + var = tukeylambda_variance(lam) + assert_allclose(var, var_expected, **a12) + kurt = tukeylambda_kurtosis(lam) + assert_allclose(kurt, kurt_expected, **a10) + + # Test with vector arguments (most of the other tests are for single + # values). + lam, var_expected, kurt_expected = zip(*data) + var = tukeylambda_variance(lam) + assert_allclose(var, var_expected, **a12) + kurt = tukeylambda_kurtosis(lam) + assert_allclose(kurt, kurt_expected, **a10) + + +def test_tukeylambda_stats_invalid(): + """Test values of lambda outside the domains of the functions.""" + lam = [-1.0, -0.5] + var = tukeylambda_variance(lam) + assert_equal(var, np.array([np.nan, np.inf])) + + lam = [-1.0, -0.25] + kurt = tukeylambda_kurtosis(lam) + assert_equal(kurt, np.array([np.nan, np.inf])) + diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_tukeylambda_stats.pyc b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_tukeylambda_stats.pyc new file mode 100644 index 0000000..ebc0468 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/stats/tests/test_tukeylambda_stats.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/vonmises.py b/project/venv/lib/python2.7/site-packages/scipy/stats/vonmises.py new file mode 100644 index 0000000..59eec8c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/stats/vonmises.py @@ -0,0 +1,46 @@ +from __future__ import division, print_function, absolute_import + +import numpy as np +import scipy.stats +from scipy.special import i0 + + +def von_mises_cdf_series(k,x,p): + x = float(x) + s = np.sin(x) + c = np.cos(x) + sn = np.sin(p*x) + cn = np.cos(p*x) + R = 0 + V = 0 + for n in range(p-1,0,-1): + sn, cn = sn*c - cn*s, cn*c + sn*s + R = 1./(2*n/k + R) + V = R*(sn/n+V) + + return 0.5+x/(2*np.pi) + V/np.pi + + +def von_mises_cdf_normalapprox(k, x): + b = np.sqrt(2/np.pi)*np.exp(k)/i0(k) + z = b*np.sin(x/2.) + return scipy.stats.norm.cdf(z) + + +def von_mises_cdf(k,x): + ix = 2*np.pi*np.round(x/(2*np.pi)) + x = x-ix + k = float(k) + + # These values should give 12 decimal digits + CK = 50 + a = [28., 0.5, 100., 5.0] + + if k < CK: + p = int(np.ceil(a[0]+a[1]*k-a[2]/(k+a[3]))) + + F = np.clip(von_mises_cdf_series(k,x,p),0,1) + else: + F = von_mises_cdf_normalapprox(k, x) + + return F+ix diff --git a/project/venv/lib/python2.7/site-packages/scipy/stats/vonmises.pyc b/project/venv/lib/python2.7/site-packages/scipy/stats/vonmises.pyc new file mode 100644 index 0000000..655b17d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/stats/vonmises.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/scipy/version.py b/project/venv/lib/python2.7/site-packages/scipy/version.py new file mode 100644 index 0000000..4929407 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/scipy/version.py @@ -0,0 +1,10 @@ + +# THIS FILE IS GENERATED FROM SCIPY SETUP.PY +short_version = '1.2.1' +version = '1.2.1' +full_version = '1.2.1' +git_revision = 'c3fa90dcfcaef71658744c73578e9e7d915c81e9' +release = True + +if not release: + version = full_version diff --git a/project/venv/lib/python2.7/site-packages/scipy/version.pyc b/project/venv/lib/python2.7/site-packages/scipy/version.pyc new file mode 100644 index 0000000..7680580 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/scipy/version.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools-41.0.0.dist-info/INSTALLER b/project/venv/lib/python2.7/site-packages/setuptools-41.0.0.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools-41.0.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/project/venv/lib/python2.7/site-packages/setuptools-41.0.0.dist-info/LICENSE b/project/venv/lib/python2.7/site-packages/setuptools-41.0.0.dist-info/LICENSE new file mode 100644 index 0000000..6e0693b --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools-41.0.0.dist-info/LICENSE @@ -0,0 +1,19 @@ +Copyright (C) 2016 Jason R Coombs <jaraco@jaraco.com> + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/project/venv/lib/python2.7/site-packages/setuptools-41.0.0.dist-info/METADATA b/project/venv/lib/python2.7/site-packages/setuptools-41.0.0.dist-info/METADATA new file mode 100644 index 0000000..a0eee33 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools-41.0.0.dist-info/METADATA @@ -0,0 +1,77 @@ +Metadata-Version: 2.1 +Name: setuptools +Version: 41.0.0 +Summary: Easily download, build, install, upgrade, and uninstall Python packages +Home-page: https://github.com/pypa/setuptools +Author: Python Packaging Authority +Author-email: distutils-sig@python.org +License: UNKNOWN +Project-URL: Documentation, https://setuptools.readthedocs.io/ +Keywords: CPAN PyPI distutils eggs package management +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: System :: Archiving :: Packaging +Classifier: Topic :: System :: Systems Administration +Classifier: Topic :: Utilities +Requires-Python: >=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.* +Description-Content-Type: text/x-rst; charset=UTF-8 +Provides-Extra: certs +Requires-Dist: certifi (==2016.9.26) ; extra == 'certs' +Provides-Extra: ssl +Requires-Dist: wincertstore (==0.2) ; (sys_platform=='win32') and extra == 'ssl' + +.. image:: https://img.shields.io/pypi/v/setuptools.svg + :target: https://pypi.org/project/setuptools + +.. image:: https://img.shields.io/readthedocs/setuptools/latest.svg + :target: https://setuptools.readthedocs.io + +.. image:: https://img.shields.io/travis/pypa/setuptools/master.svg?label=Linux%20CI&logo=travis&logoColor=white + :target: https://travis-ci.org/pypa/setuptools + +.. image:: https://img.shields.io/appveyor/ci/pypa/setuptools/master.svg?label=Windows%20CI&logo=appveyor&logoColor=white + :target: https://ci.appveyor.com/project/pypa/setuptools/branch/master + +.. image:: https://img.shields.io/codecov/c/github/pypa/setuptools/master.svg?logo=codecov&logoColor=white + :target: https://codecov.io/gh/pypa/setuptools + +.. image:: https://tidelift.com/badges/github/pypa/setuptools?style=flat + :target: https://tidelift.com/subscription/pkg/pypi-setuptools?utm_source=pypi-setuptools&utm_medium=readme + +.. image:: https://img.shields.io/pypi/pyversions/setuptools.svg + +See the `Installation Instructions +<https://packaging.python.org/installing/>`_ in the Python Packaging +User's Guide for instructions on installing, upgrading, and uninstalling +Setuptools. + +Questions and comments should be directed to the `distutils-sig +mailing list <http://mail.python.org/pipermail/distutils-sig/>`_. +Bug reports and especially tested patches may be +submitted directly to the `bug tracker +<https://github.com/pypa/setuptools/issues>`_. + +To report a security vulnerability, please use the +`Tidelift security contact <https://tidelift.com/security>`_. +Tidelift will coordinate the fix and disclosure. + + +Code of Conduct +--------------- + +Everyone interacting in the setuptools project's codebases, issue trackers, +chat rooms, and mailing lists is expected to follow the +`PyPA Code of Conduct <https://www.pypa.io/en/latest/code-of-conduct/>`_. + + diff --git a/project/venv/lib/python2.7/site-packages/setuptools-41.0.0.dist-info/RECORD b/project/venv/lib/python2.7/site-packages/setuptools-41.0.0.dist-info/RECORD new file mode 100644 index 0000000..8a25f9c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools-41.0.0.dist-info/RECORD @@ -0,0 +1,186 @@ +../../../bin/easy_install,sha256=zO3j6u_gu2A9yJ63JZ_G1Do0Cqx0tNavip7fV1BEk-0,300 +../../../bin/easy_install-2.7,sha256=zO3j6u_gu2A9yJ63JZ_G1Do0Cqx0tNavip7fV1BEk-0,300 +easy_install.py,sha256=MDC9vt5AxDsXX5qcKlBz2TnW6Tpuv_AobnfhCJ9X3PM,126 +easy_install.pyc,, +pkg_resources/__init__.py,sha256=pp8b7Asoaheso-q8lIMS1tpQp88xjAkYgCIRX-JsPlE,107982 +pkg_resources/__init__.pyc,, +pkg_resources/_vendor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pkg_resources/_vendor/__init__.pyc,, +pkg_resources/_vendor/appdirs.py,sha256=MievUEuv3l_mQISH5SF0shDk_BNhHHzYiAPrT3ITN4I,24701 +pkg_resources/_vendor/appdirs.pyc,, +pkg_resources/_vendor/packaging/__about__.py,sha256=zkcCPTN_6TcLW0Nrlg0176-R1QQ_WVPTm8sz1R4-HjM,720 +pkg_resources/_vendor/packaging/__about__.pyc,, +pkg_resources/_vendor/packaging/__init__.py,sha256=_vNac5TrzwsrzbOFIbF-5cHqc_Y2aPT2D7zrIR06BOo,513 +pkg_resources/_vendor/packaging/__init__.pyc,, +pkg_resources/_vendor/packaging/_compat.py,sha256=Vi_A0rAQeHbU-a9X0tt1yQm9RqkgQbDSxzRw8WlU9kA,860 +pkg_resources/_vendor/packaging/_compat.pyc,, +pkg_resources/_vendor/packaging/_structures.py,sha256=RImECJ4c_wTlaTYYwZYLHEiebDMaAJmK1oPARhw1T5o,1416 +pkg_resources/_vendor/packaging/_structures.pyc,, +pkg_resources/_vendor/packaging/markers.py,sha256=uEcBBtGvzqltgnArqb9c4RrcInXezDLos14zbBHhWJo,8248 +pkg_resources/_vendor/packaging/markers.pyc,, +pkg_resources/_vendor/packaging/requirements.py,sha256=SikL2UynbsT0qtY9ltqngndha_sfo0w6XGFhAhoSoaQ,4355 +pkg_resources/_vendor/packaging/requirements.pyc,, +pkg_resources/_vendor/packaging/specifiers.py,sha256=SAMRerzO3fK2IkFZCaZkuwZaL_EGqHNOz4pni4vhnN0,28025 +pkg_resources/_vendor/packaging/specifiers.pyc,, +pkg_resources/_vendor/packaging/utils.py,sha256=3m6WvPm6NNxE8rkTGmn0r75B_GZSGg7ikafxHsBN1WA,421 +pkg_resources/_vendor/packaging/utils.pyc,, +pkg_resources/_vendor/packaging/version.py,sha256=OwGnxYfr2ghNzYx59qWIBkrK3SnB6n-Zfd1XaLpnnM0,11556 +pkg_resources/_vendor/packaging/version.pyc,, +pkg_resources/_vendor/pyparsing.py,sha256=tmrp-lu-qO1i75ZzIN5A12nKRRD1Cm4Vpk-5LR9rims,232055 +pkg_resources/_vendor/pyparsing.pyc,, +pkg_resources/_vendor/six.py,sha256=A6hdJZVjI3t_geebZ9BzUvwRrIXo0lfwzQlM2LcKyas,30098 +pkg_resources/_vendor/six.pyc,, +pkg_resources/extern/__init__.py,sha256=cHiEfHuLmm6rs5Ve_ztBfMI7Lr31vss-D4wkqF5xzlI,2498 +pkg_resources/extern/__init__.pyc,, +pkg_resources/py31compat.py,sha256=-WQ0e4c3RG_acdhwC3gLiXhP_lg4G5q7XYkZkQg0gxU,558 +pkg_resources/py31compat.pyc,, +setuptools-41.0.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +setuptools-41.0.0.dist-info/LICENSE,sha256=wyo6w5WvYyHv0ovnPQagDw22q4h9HCHU_sRhKNIFbVo,1078 +setuptools-41.0.0.dist-info/METADATA,sha256=G8l1-Bj-bEaQybjeIKADQaocUIsM4JBoxn9OreadXho,3303 +setuptools-41.0.0.dist-info/RECORD,, +setuptools-41.0.0.dist-info/WHEEL,sha256=HX-v9-noUkyUoxyZ1PMSuS7auUxDAR4VBdoYLqD0xws,110 +setuptools-41.0.0.dist-info/dependency_links.txt,sha256=HlkCFkoK5TbZ5EMLbLKYhLcY_E31kBWD8TqW2EgmatQ,239 +setuptools-41.0.0.dist-info/entry_points.txt,sha256=jBqCYDlVjl__sjYFGXo1JQGIMAYFJE-prYWUtnMZEew,2990 +setuptools-41.0.0.dist-info/top_level.txt,sha256=2HUXVVwA4Pff1xgTFr3GsTXXKaPaO6vlG6oNJ_4u4Tg,38 +setuptools-41.0.0.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1 +setuptools/__init__.py,sha256=WBpCcn2lvdckotabeae1TTYonPOcgCIF3raD2zRWzBc,7283 +setuptools/__init__.pyc,, +setuptools/_deprecation_warning.py,sha256=jU9-dtfv6cKmtQJOXN8nP1mm7gONw5kKEtiPtbwnZyI,218 +setuptools/_deprecation_warning.pyc,, +setuptools/_vendor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +setuptools/_vendor/__init__.pyc,, +setuptools/_vendor/packaging/__about__.py,sha256=zkcCPTN_6TcLW0Nrlg0176-R1QQ_WVPTm8sz1R4-HjM,720 +setuptools/_vendor/packaging/__about__.pyc,, +setuptools/_vendor/packaging/__init__.py,sha256=_vNac5TrzwsrzbOFIbF-5cHqc_Y2aPT2D7zrIR06BOo,513 +setuptools/_vendor/packaging/__init__.pyc,, +setuptools/_vendor/packaging/_compat.py,sha256=Vi_A0rAQeHbU-a9X0tt1yQm9RqkgQbDSxzRw8WlU9kA,860 +setuptools/_vendor/packaging/_compat.pyc,, +setuptools/_vendor/packaging/_structures.py,sha256=RImECJ4c_wTlaTYYwZYLHEiebDMaAJmK1oPARhw1T5o,1416 +setuptools/_vendor/packaging/_structures.pyc,, +setuptools/_vendor/packaging/markers.py,sha256=Gvpk9EY20yKaMTiKgQZ8yFEEpodqVgVYtfekoic1Yts,8239 +setuptools/_vendor/packaging/markers.pyc,, +setuptools/_vendor/packaging/requirements.py,sha256=t44M2HVWtr8phIz2OhnILzuGT3rTATaovctV1dpnVIg,4343 +setuptools/_vendor/packaging/requirements.pyc,, +setuptools/_vendor/packaging/specifiers.py,sha256=SAMRerzO3fK2IkFZCaZkuwZaL_EGqHNOz4pni4vhnN0,28025 +setuptools/_vendor/packaging/specifiers.pyc,, +setuptools/_vendor/packaging/utils.py,sha256=3m6WvPm6NNxE8rkTGmn0r75B_GZSGg7ikafxHsBN1WA,421 +setuptools/_vendor/packaging/utils.pyc,, +setuptools/_vendor/packaging/version.py,sha256=OwGnxYfr2ghNzYx59qWIBkrK3SnB6n-Zfd1XaLpnnM0,11556 +setuptools/_vendor/packaging/version.pyc,, +setuptools/_vendor/pyparsing.py,sha256=tmrp-lu-qO1i75ZzIN5A12nKRRD1Cm4Vpk-5LR9rims,232055 +setuptools/_vendor/pyparsing.pyc,, +setuptools/_vendor/six.py,sha256=A6hdJZVjI3t_geebZ9BzUvwRrIXo0lfwzQlM2LcKyas,30098 +setuptools/_vendor/six.pyc,, +setuptools/archive_util.py,sha256=kw8Ib_lKjCcnPKNbS7h8HztRVK0d5RacU3r_KRdVnmM,6592 +setuptools/archive_util.pyc,, +setuptools/build_meta.py,sha256=dBHAVqIekvB7PkID80JkAQRM-o7JFPpxdYonS2TC83U,8911 +setuptools/build_meta.pyc,, +setuptools/cli-32.exe,sha256=dfEuovMNnA2HLa3jRfMPVi5tk4R7alCbpTvuxtCyw0Y,65536 +setuptools/cli-64.exe,sha256=KLABu5pyrnokJCv6skjXZ6GsXeyYHGcqOUT3oHI3Xpo,74752 +setuptools/cli.exe,sha256=dfEuovMNnA2HLa3jRfMPVi5tk4R7alCbpTvuxtCyw0Y,65536 +setuptools/command/__init__.py,sha256=NWzJ0A1BEengZpVeqUyWLNm2bk4P3F4iL5QUErHy7kA,594 +setuptools/command/__init__.pyc,, +setuptools/command/alias.py,sha256=KjpE0sz_SDIHv3fpZcIQK-sCkJz-SrC6Gmug6b9Nkc8,2426 +setuptools/command/alias.pyc,, +setuptools/command/bdist_egg.py,sha256=be-IBpr1zhS9i6GjKANJgzkbH3ChImdWY7S-j0r2BK8,18167 +setuptools/command/bdist_egg.pyc,, +setuptools/command/bdist_rpm.py,sha256=B7l0TnzCGb-0nLlm6rS00jWLkojASwVmdhW2w5Qz_Ak,1508 +setuptools/command/bdist_rpm.pyc,, +setuptools/command/bdist_wininst.py,sha256=_6dz3lpB1tY200LxKPLM7qgwTCceOMgaWFF-jW2-pm0,637 +setuptools/command/bdist_wininst.pyc,, +setuptools/command/build_clib.py,sha256=bQ9aBr-5ZSO-9fGsGsDLz0mnnFteHUZnftVLkhvHDq0,4484 +setuptools/command/build_clib.pyc,, +setuptools/command/build_ext.py,sha256=81CTgsqjBjNl_HOgCJ1lQ5vv1NIM3RBpcoVGpqT4N1M,12897 +setuptools/command/build_ext.pyc,, +setuptools/command/build_py.py,sha256=yWyYaaS9F3o9JbIczn064A5g1C5_UiKRDxGaTqYbtLE,9596 +setuptools/command/build_py.pyc,, +setuptools/command/develop.py,sha256=MQlnGS6uP19erK2JCNOyQYoYyquk3PADrqrrinqqLtA,8184 +setuptools/command/develop.pyc,, +setuptools/command/dist_info.py,sha256=5t6kOfrdgALT-P3ogss6PF9k-Leyesueycuk3dUyZnI,960 +setuptools/command/dist_info.pyc,, +setuptools/command/easy_install.py,sha256=telww7CuPsoTtvlpY-ktnZGT85cZ6xGCGZa0vHvFJ-Q,87273 +setuptools/command/easy_install.pyc,, +setuptools/command/egg_info.py,sha256=w73EdxYSOk2gsaAiHGL2dZrCldoPiuRr2eTfqcFvCds,25570 +setuptools/command/egg_info.pyc,, +setuptools/command/install.py,sha256=a0EZpL_A866KEdhicTGbuyD_TYl1sykfzdrri-zazT4,4683 +setuptools/command/install.pyc,, +setuptools/command/install_egg_info.py,sha256=bMgeIeRiXzQ4DAGPV1328kcjwQjHjOWU4FngAWLV78Q,2203 +setuptools/command/install_egg_info.pyc,, +setuptools/command/install_lib.py,sha256=11mxf0Ch12NsuYwS8PHwXBRvyh671QAM4cTRh7epzG0,3840 +setuptools/command/install_lib.pyc,, +setuptools/command/install_scripts.py,sha256=UD0rEZ6861mTYhIdzcsqKnUl8PozocXWl9VBQ1VTWnc,2439 +setuptools/command/install_scripts.pyc,, +setuptools/command/launcher manifest.xml,sha256=xlLbjWrB01tKC0-hlVkOKkiSPbzMml2eOPtJ_ucCnbE,628 +setuptools/command/py36compat.py,sha256=SzjZcOxF7zdFUT47Zv2n7AM3H8koDys_0OpS-n9gIfc,4986 +setuptools/command/py36compat.pyc,, +setuptools/command/register.py,sha256=LO3MvYKPE8dN1m-KkrBRHC68ZFoPvA_vI8Xgp7vv6zI,534 +setuptools/command/register.pyc,, +setuptools/command/rotate.py,sha256=co5C1EkI7P0GGT6Tqz-T2SIj2LBJTZXYELpmao6d4KQ,2164 +setuptools/command/rotate.pyc,, +setuptools/command/saveopts.py,sha256=za7QCBcQimKKriWcoCcbhxPjUz30gSB74zuTL47xpP4,658 +setuptools/command/saveopts.pyc,, +setuptools/command/sdist.py,sha256=gr5hFrDzUtGfp_0tu0sllzIyr3jMQegIkFmlDauQJxw,7388 +setuptools/command/sdist.pyc,, +setuptools/command/setopt.py,sha256=NTWDyx-gjDF-txf4dO577s7LOzHVoKR0Mq33rFxaRr8,5085 +setuptools/command/setopt.pyc,, +setuptools/command/test.py,sha256=fSl5OsZWSmFR3QJRvyy2OxbcYkuIkPvykWNOhFvAcUA,9228 +setuptools/command/test.pyc,, +setuptools/command/upload.py,sha256=GxtNkIl7SA0r8mACkbDcSCN1m2_WPppK9gZXJmQSiow,6811 +setuptools/command/upload.pyc,, +setuptools/command/upload_docs.py,sha256=oXiGplM_cUKLwE4CWWw98RzCufAu8tBhMC97GegFcms,7311 +setuptools/command/upload_docs.pyc,, +setuptools/config.py,sha256=lz19l1AtoHctpp1_tbYZv176nrEj4Gpf7ykNIYTIkAQ,20425 +setuptools/config.pyc,, +setuptools/dep_util.py,sha256=fgixvC1R7sH3r13ktyf7N0FALoqEXL1cBarmNpSEoWg,935 +setuptools/dep_util.pyc,, +setuptools/depends.py,sha256=hC8QIDcM3VDpRXvRVA6OfL9AaQfxvhxHcN_w6sAyNq8,5837 +setuptools/depends.pyc,, +setuptools/dist.py,sha256=qYPmmVlLPWCLHrILR0J74bqoYgTSZh5ocLeyRKqnVyU,49913 +setuptools/dist.pyc,, +setuptools/extension.py,sha256=uc6nHI-MxwmNCNPbUiBnybSyqhpJqjbhvOQ-emdvt_E,1729 +setuptools/extension.pyc,, +setuptools/extern/__init__.py,sha256=TxeNKFMSfBMzBpBDiHx8Dh3RzsdVmvWaXhtZ03DZMs0,2499 +setuptools/extern/__init__.pyc,, +setuptools/glibc.py,sha256=X64VvGPL2AbURKwYRsWJOXXGAYOiF_v2qixeTkAULuU,3146 +setuptools/glibc.pyc,, +setuptools/glob.py,sha256=o75cHrOxYsvn854thSxE0x9k8JrKDuhP_rRXlVB00Q4,5084 +setuptools/glob.pyc,, +setuptools/gui-32.exe,sha256=XBr0bHMA6Hpz2s9s9Bzjl-PwXfa9nH4ie0rFn4V2kWA,65536 +setuptools/gui-64.exe,sha256=aYKMhX1IJLn4ULHgWX0sE0yREUt6B3TEHf_jOw6yNyE,75264 +setuptools/gui.exe,sha256=XBr0bHMA6Hpz2s9s9Bzjl-PwXfa9nH4ie0rFn4V2kWA,65536 +setuptools/launch.py,sha256=sd7ejwhBocCDx_wG9rIs0OaZ8HtmmFU8ZC6IR_S0Lvg,787 +setuptools/launch.pyc,, +setuptools/lib2to3_ex.py,sha256=t5e12hbR2pi9V4ezWDTB4JM-AISUnGOkmcnYHek3xjg,2013 +setuptools/lib2to3_ex.pyc,, +setuptools/monkey.py,sha256=FGc9fffh7gAxMLFmJs2DW_OYWpBjkdbNS2n14UAK4NA,5264 +setuptools/monkey.pyc,, +setuptools/msvc.py,sha256=uuRFaZzjJt5Fv3ZmyKUUuLtjx12_8G9RILigGec4irI,40838 +setuptools/msvc.pyc,, +setuptools/namespaces.py,sha256=F0Nrbv8KCT2OrO7rwa03om4N4GZKAlnce-rr-cgDQa8,3199 +setuptools/namespaces.pyc,, +setuptools/package_index.py,sha256=zuL2nOfEYssSp9zvn2zjla0MAW5Om_0abbB5zKH72h4,40594 +setuptools/package_index.pyc,, +setuptools/pep425tags.py,sha256=o_D_WVeWcXZiI2xjPSg7pouGOvaWRgGRxEDK9DzAXIA,10861 +setuptools/pep425tags.pyc,, +setuptools/py27compat.py,sha256=3mwxRMDk5Q5O1rSXOERbQDXhFqwDJhhUitfMW_qpUCo,536 +setuptools/py27compat.pyc,, +setuptools/py31compat.py,sha256=REvrUBibUHgqI9S-ww0C9bhU-n8PyaQ8Slr1_NRxaaE,820 +setuptools/py31compat.pyc,, +setuptools/py33compat.py,sha256=OubjldHJH1KGE1CKt1kRU-Q55keftHT3ea1YoL0ZSco,1195 +setuptools/py33compat.pyc,, +setuptools/sandbox.py,sha256=9UbwfEL5QY436oMI1LtFWohhoZ-UzwHvGyZjUH_qhkw,14276 +setuptools/sandbox.pyc,, +setuptools/script (dev).tmpl,sha256=RUzQzCQUaXtwdLtYHWYbIQmOaES5Brqq1FvUA_tu-5I,218 +setuptools/script.tmpl,sha256=WGTt5piezO27c-Dbx6l5Q4T3Ff20A5z7872hv3aAhYY,138 +setuptools/site-patch.py,sha256=OumkIHMuoSenRSW1382kKWI1VAwxNE86E5W8iDd34FY,2302 +setuptools/site-patch.pyc,, +setuptools/ssl_support.py,sha256=nLjPUBBw7RTTx6O4RJZ5eAMGgjJG8beiDbkFXDZpLuM,8493 +setuptools/ssl_support.pyc,, +setuptools/unicode_utils.py,sha256=NOiZ_5hD72A6w-4wVj8awHFM3n51Kmw1Ic_vx15XFqw,996 +setuptools/unicode_utils.pyc,, +setuptools/version.py,sha256=og_cuZQb0QI6ukKZFfZWPlr1HgJBPPn2vO2m_bI9ZTE,144 +setuptools/version.pyc,, +setuptools/wheel.py,sha256=94uqXsOaKt91d9hW5z6ZppZmNSs_nO66R4uiwhcr4V0,8094 +setuptools/wheel.pyc,, +setuptools/windows_support.py,sha256=5GrfqSP2-dLGJoZTq2g6dCKkyQxxa2n5IQiXlJCoYEE,714 +setuptools/windows_support.pyc,, diff --git a/project/venv/lib/python2.7/site-packages/setuptools-41.0.0.dist-info/WHEEL b/project/venv/lib/python2.7/site-packages/setuptools-41.0.0.dist-info/WHEEL new file mode 100644 index 0000000..c8240f0 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools-41.0.0.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.33.1) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/project/venv/lib/python2.7/site-packages/setuptools-41.0.0.dist-info/dependency_links.txt b/project/venv/lib/python2.7/site-packages/setuptools-41.0.0.dist-info/dependency_links.txt new file mode 100644 index 0000000..e87d021 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools-41.0.0.dist-info/dependency_links.txt @@ -0,0 +1,2 @@ +https://files.pythonhosted.org/packages/source/c/certifi/certifi-2016.9.26.tar.gz#md5=baa81e951a29958563689d868ef1064d +https://files.pythonhosted.org/packages/source/w/wincertstore/wincertstore-0.2.zip#md5=ae728f2f007185648d0c7a8679b361e2 diff --git a/project/venv/lib/python2.7/site-packages/setuptools-41.0.0.dist-info/entry_points.txt b/project/venv/lib/python2.7/site-packages/setuptools-41.0.0.dist-info/entry_points.txt new file mode 100644 index 0000000..4159fd0 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools-41.0.0.dist-info/entry_points.txt @@ -0,0 +1,65 @@ +[console_scripts] +easy_install = setuptools.command.easy_install:main +easy_install-3.6 = setuptools.command.easy_install:main + +[distutils.commands] +alias = setuptools.command.alias:alias +bdist_egg = setuptools.command.bdist_egg:bdist_egg +bdist_rpm = setuptools.command.bdist_rpm:bdist_rpm +bdist_wininst = setuptools.command.bdist_wininst:bdist_wininst +build_clib = setuptools.command.build_clib:build_clib +build_ext = setuptools.command.build_ext:build_ext +build_py = setuptools.command.build_py:build_py +develop = setuptools.command.develop:develop +dist_info = setuptools.command.dist_info:dist_info +easy_install = setuptools.command.easy_install:easy_install +egg_info = setuptools.command.egg_info:egg_info +install = setuptools.command.install:install +install_egg_info = setuptools.command.install_egg_info:install_egg_info +install_lib = setuptools.command.install_lib:install_lib +install_scripts = setuptools.command.install_scripts:install_scripts +register = setuptools.command.register:register +rotate = setuptools.command.rotate:rotate +saveopts = setuptools.command.saveopts:saveopts +sdist = setuptools.command.sdist:sdist +setopt = setuptools.command.setopt:setopt +test = setuptools.command.test:test +upload = setuptools.command.upload:upload +upload_docs = setuptools.command.upload_docs:upload_docs + +[distutils.setup_keywords] +convert_2to3_doctests = setuptools.dist:assert_string_list +dependency_links = setuptools.dist:assert_string_list +eager_resources = setuptools.dist:assert_string_list +entry_points = setuptools.dist:check_entry_points +exclude_package_data = setuptools.dist:check_package_data +extras_require = setuptools.dist:check_extras +include_package_data = setuptools.dist:assert_bool +install_requires = setuptools.dist:check_requirements +namespace_packages = setuptools.dist:check_nsp +package_data = setuptools.dist:check_package_data +packages = setuptools.dist:check_packages +python_requires = setuptools.dist:check_specifier +setup_requires = setuptools.dist:check_requirements +test_loader = setuptools.dist:check_importable +test_runner = setuptools.dist:check_importable +test_suite = setuptools.dist:check_test_suite +tests_require = setuptools.dist:check_requirements +use_2to3 = setuptools.dist:assert_bool +use_2to3_exclude_fixers = setuptools.dist:assert_string_list +use_2to3_fixers = setuptools.dist:assert_string_list +zip_safe = setuptools.dist:assert_bool + +[egg_info.writers] +PKG-INFO = setuptools.command.egg_info:write_pkg_info +dependency_links.txt = setuptools.command.egg_info:overwrite_arg +depends.txt = setuptools.command.egg_info:warn_depends_obsolete +eager_resources.txt = setuptools.command.egg_info:overwrite_arg +entry_points.txt = setuptools.command.egg_info:write_entries +namespace_packages.txt = setuptools.command.egg_info:overwrite_arg +requires.txt = setuptools.command.egg_info:write_requirements +top_level.txt = setuptools.command.egg_info:write_toplevel_names + +[setuptools.installation] +eggsecutable = setuptools.command.easy_install:bootstrap + diff --git a/project/venv/lib/python2.7/site-packages/setuptools-41.0.0.dist-info/top_level.txt b/project/venv/lib/python2.7/site-packages/setuptools-41.0.0.dist-info/top_level.txt new file mode 100644 index 0000000..4577c6a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools-41.0.0.dist-info/top_level.txt @@ -0,0 +1,3 @@ +easy_install +pkg_resources +setuptools diff --git a/project/venv/lib/python2.7/site-packages/setuptools-41.0.0.dist-info/zip-safe b/project/venv/lib/python2.7/site-packages/setuptools-41.0.0.dist-info/zip-safe new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools-41.0.0.dist-info/zip-safe @@ -0,0 +1 @@ + diff --git a/project/venv/lib/python2.7/site-packages/setuptools/__init__.py b/project/venv/lib/python2.7/site-packages/setuptools/__init__.py new file mode 100644 index 0000000..a71b2bb --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/__init__.py @@ -0,0 +1,228 @@ +"""Extensions to the 'distutils' for large or complex distributions""" + +import os +import sys +import functools +import distutils.core +import distutils.filelist +import re +from distutils.errors import DistutilsOptionError +from distutils.util import convert_path +from fnmatch import fnmatchcase + +from ._deprecation_warning import SetuptoolsDeprecationWarning + +from setuptools.extern.six import PY3, string_types +from setuptools.extern.six.moves import filter, map + +import setuptools.version +from setuptools.extension import Extension +from setuptools.dist import Distribution, Feature +from setuptools.depends import Require +from . import monkey + +__metaclass__ = type + + +__all__ = [ + 'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require', + 'SetuptoolsDeprecationWarning', + 'find_packages' +] + +if PY3: + __all__.append('find_namespace_packages') + +__version__ = setuptools.version.__version__ + +bootstrap_install_from = None + +# If we run 2to3 on .py files, should we also convert docstrings? +# Default: yes; assume that we can detect doctests reliably +run_2to3_on_doctests = True +# Standard package names for fixer packages +lib2to3_fixer_packages = ['lib2to3.fixes'] + + +class PackageFinder: + """ + Generate a list of all Python packages found within a directory + """ + + @classmethod + def find(cls, where='.', exclude=(), include=('*',)): + """Return a list all Python packages found within directory 'where' + + 'where' is the root directory which will be searched for packages. It + should be supplied as a "cross-platform" (i.e. URL-style) path; it will + be converted to the appropriate local path syntax. + + 'exclude' is a sequence of package names to exclude; '*' can be used + as a wildcard in the names, such that 'foo.*' will exclude all + subpackages of 'foo' (but not 'foo' itself). + + 'include' is a sequence of package names to include. If it's + specified, only the named packages will be included. If it's not + specified, all found packages will be included. 'include' can contain + shell style wildcard patterns just like 'exclude'. + """ + + return list(cls._find_packages_iter( + convert_path(where), + cls._build_filter('ez_setup', '*__pycache__', *exclude), + cls._build_filter(*include))) + + @classmethod + def _find_packages_iter(cls, where, exclude, include): + """ + All the packages found in 'where' that pass the 'include' filter, but + not the 'exclude' filter. + """ + for root, dirs, files in os.walk(where, followlinks=True): + # Copy dirs to iterate over it, then empty dirs. + all_dirs = dirs[:] + dirs[:] = [] + + for dir in all_dirs: + full_path = os.path.join(root, dir) + rel_path = os.path.relpath(full_path, where) + package = rel_path.replace(os.path.sep, '.') + + # Skip directory trees that are not valid packages + if ('.' in dir or not cls._looks_like_package(full_path)): + continue + + # Should this package be included? + if include(package) and not exclude(package): + yield package + + # Keep searching subdirectories, as there may be more packages + # down there, even if the parent was excluded. + dirs.append(dir) + + @staticmethod + def _looks_like_package(path): + """Does a directory look like a package?""" + return os.path.isfile(os.path.join(path, '__init__.py')) + + @staticmethod + def _build_filter(*patterns): + """ + Given a list of patterns, return a callable that will be true only if + the input matches at least one of the patterns. + """ + return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns) + + +class PEP420PackageFinder(PackageFinder): + @staticmethod + def _looks_like_package(path): + return True + + +find_packages = PackageFinder.find + +if PY3: + find_namespace_packages = PEP420PackageFinder.find + + +def _install_setup_requires(attrs): + # Note: do not use `setuptools.Distribution` directly, as + # our PEP 517 backend patch `distutils.core.Distribution`. + dist = distutils.core.Distribution(dict( + (k, v) for k, v in attrs.items() + if k in ('dependency_links', 'setup_requires') + )) + # Honor setup.cfg's options. + dist.parse_config_files(ignore_option_errors=True) + if dist.setup_requires: + dist.fetch_build_eggs(dist.setup_requires) + + +def setup(**attrs): + # Make sure we have any requirements needed to interpret 'attrs'. + _install_setup_requires(attrs) + return distutils.core.setup(**attrs) + +setup.__doc__ = distutils.core.setup.__doc__ + + +_Command = monkey.get_unpatched(distutils.core.Command) + + +class Command(_Command): + __doc__ = _Command.__doc__ + + command_consumes_arguments = False + + def __init__(self, dist, **kw): + """ + Construct the command for dist, updating + vars(self) with any keyword parameters. + """ + _Command.__init__(self, dist) + vars(self).update(kw) + + def _ensure_stringlike(self, option, what, default=None): + val = getattr(self, option) + if val is None: + setattr(self, option, default) + return default + elif not isinstance(val, string_types): + raise DistutilsOptionError("'%s' must be a %s (got `%s`)" + % (option, what, val)) + return val + + def ensure_string_list(self, option): + r"""Ensure that 'option' is a list of strings. If 'option' is + currently a string, we split it either on /,\s*/ or /\s+/, so + "foo bar baz", "foo,bar,baz", and "foo, bar baz" all become + ["foo", "bar", "baz"]. + """ + val = getattr(self, option) + if val is None: + return + elif isinstance(val, string_types): + setattr(self, option, re.split(r',\s*|\s+', val)) + else: + if isinstance(val, list): + ok = all(isinstance(v, string_types) for v in val) + else: + ok = False + if not ok: + raise DistutilsOptionError( + "'%s' must be a list of strings (got %r)" + % (option, val)) + + def reinitialize_command(self, command, reinit_subcommands=0, **kw): + cmd = _Command.reinitialize_command(self, command, reinit_subcommands) + vars(cmd).update(kw) + return cmd + + +def _find_all_simple(path): + """ + Find all files under 'path' + """ + results = ( + os.path.join(base, file) + for base, dirs, files in os.walk(path, followlinks=True) + for file in files + ) + return filter(os.path.isfile, results) + + +def findall(dir=os.curdir): + """ + Find all files under 'dir' and return the list of full filenames. + Unless dir is '.', return full filenames with dir prepended. + """ + files = _find_all_simple(dir) + if dir == os.curdir: + make_rel = functools.partial(os.path.relpath, start=dir) + files = map(make_rel, files) + return list(files) + + +# Apply monkey patches +monkey.patch_all() diff --git a/project/venv/lib/python2.7/site-packages/setuptools/__init__.pyc b/project/venv/lib/python2.7/site-packages/setuptools/__init__.pyc new file mode 100644 index 0000000..e64269c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/_deprecation_warning.py b/project/venv/lib/python2.7/site-packages/setuptools/_deprecation_warning.py new file mode 100644 index 0000000..086b64d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/_deprecation_warning.py @@ -0,0 +1,7 @@ +class SetuptoolsDeprecationWarning(Warning): + """ + Base class for warning deprecations in ``setuptools`` + + This class is not derived from ``DeprecationWarning``, and as such is + visible by default. + """ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/_deprecation_warning.pyc b/project/venv/lib/python2.7/site-packages/setuptools/_deprecation_warning.pyc new file mode 100644 index 0000000..f4bfac8 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/_deprecation_warning.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/_vendor/__init__.py b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project/venv/lib/python2.7/site-packages/setuptools/_vendor/__init__.pyc b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/__init__.pyc new file mode 100644 index 0000000..5413399 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/__about__.py b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/__about__.py new file mode 100644 index 0000000..95d330e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/__about__.py @@ -0,0 +1,21 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + +__all__ = [ + "__title__", "__summary__", "__uri__", "__version__", "__author__", + "__email__", "__license__", "__copyright__", +] + +__title__ = "packaging" +__summary__ = "Core utilities for Python packages" +__uri__ = "https://github.com/pypa/packaging" + +__version__ = "16.8" + +__author__ = "Donald Stufft and individual contributors" +__email__ = "donald@stufft.io" + +__license__ = "BSD or Apache License, Version 2.0" +__copyright__ = "Copyright 2014-2016 %s" % __author__ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/__about__.pyc b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/__about__.pyc new file mode 100644 index 0000000..e2956aa Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/__about__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/__init__.py b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/__init__.py new file mode 100644 index 0000000..5ee6220 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/__init__.py @@ -0,0 +1,14 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + +from .__about__ import ( + __author__, __copyright__, __email__, __license__, __summary__, __title__, + __uri__, __version__ +) + +__all__ = [ + "__title__", "__summary__", "__uri__", "__version__", "__author__", + "__email__", "__license__", "__copyright__", +] diff --git a/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/__init__.pyc b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/__init__.pyc new file mode 100644 index 0000000..242116b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/_compat.py b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/_compat.py new file mode 100644 index 0000000..210bb80 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/_compat.py @@ -0,0 +1,30 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + +import sys + + +PY2 = sys.version_info[0] == 2 +PY3 = sys.version_info[0] == 3 + +# flake8: noqa + +if PY3: + string_types = str, +else: + string_types = basestring, + + +def with_metaclass(meta, *bases): + """ + Create a base class with a metaclass. + """ + # This requires a bit of explanation: the basic idea is to make a dummy + # metaclass for one level of class instantiation that replaces itself with + # the actual metaclass. + class metaclass(meta): + def __new__(cls, name, this_bases, d): + return meta(name, bases, d) + return type.__new__(metaclass, 'temporary_class', (), {}) diff --git a/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/_compat.pyc b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/_compat.pyc new file mode 100644 index 0000000..74de9c1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/_compat.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/_structures.py b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/_structures.py new file mode 100644 index 0000000..ccc2786 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/_structures.py @@ -0,0 +1,68 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + + +class Infinity(object): + + def __repr__(self): + return "Infinity" + + def __hash__(self): + return hash(repr(self)) + + def __lt__(self, other): + return False + + def __le__(self, other): + return False + + def __eq__(self, other): + return isinstance(other, self.__class__) + + def __ne__(self, other): + return not isinstance(other, self.__class__) + + def __gt__(self, other): + return True + + def __ge__(self, other): + return True + + def __neg__(self): + return NegativeInfinity + +Infinity = Infinity() + + +class NegativeInfinity(object): + + def __repr__(self): + return "-Infinity" + + def __hash__(self): + return hash(repr(self)) + + def __lt__(self, other): + return True + + def __le__(self, other): + return True + + def __eq__(self, other): + return isinstance(other, self.__class__) + + def __ne__(self, other): + return not isinstance(other, self.__class__) + + def __gt__(self, other): + return False + + def __ge__(self, other): + return False + + def __neg__(self): + return Infinity + +NegativeInfinity = NegativeInfinity() diff --git a/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/_structures.pyc b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/_structures.pyc new file mode 100644 index 0000000..a6c40b7 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/_structures.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/markers.py b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/markers.py new file mode 100644 index 0000000..031332a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/markers.py @@ -0,0 +1,301 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + +import operator +import os +import platform +import sys + +from setuptools.extern.pyparsing import ParseException, ParseResults, stringStart, stringEnd +from setuptools.extern.pyparsing import ZeroOrMore, Group, Forward, QuotedString +from setuptools.extern.pyparsing import Literal as L # noqa + +from ._compat import string_types +from .specifiers import Specifier, InvalidSpecifier + + +__all__ = [ + "InvalidMarker", "UndefinedComparison", "UndefinedEnvironmentName", + "Marker", "default_environment", +] + + +class InvalidMarker(ValueError): + """ + An invalid marker was found, users should refer to PEP 508. + """ + + +class UndefinedComparison(ValueError): + """ + An invalid operation was attempted on a value that doesn't support it. + """ + + +class UndefinedEnvironmentName(ValueError): + """ + A name was attempted to be used that does not exist inside of the + environment. + """ + + +class Node(object): + + def __init__(self, value): + self.value = value + + def __str__(self): + return str(self.value) + + def __repr__(self): + return "<{0}({1!r})>".format(self.__class__.__name__, str(self)) + + def serialize(self): + raise NotImplementedError + + +class Variable(Node): + + def serialize(self): + return str(self) + + +class Value(Node): + + def serialize(self): + return '"{0}"'.format(self) + + +class Op(Node): + + def serialize(self): + return str(self) + + +VARIABLE = ( + L("implementation_version") | + L("platform_python_implementation") | + L("implementation_name") | + L("python_full_version") | + L("platform_release") | + L("platform_version") | + L("platform_machine") | + L("platform_system") | + L("python_version") | + L("sys_platform") | + L("os_name") | + L("os.name") | # PEP-345 + L("sys.platform") | # PEP-345 + L("platform.version") | # PEP-345 + L("platform.machine") | # PEP-345 + L("platform.python_implementation") | # PEP-345 + L("python_implementation") | # undocumented setuptools legacy + L("extra") +) +ALIASES = { + 'os.name': 'os_name', + 'sys.platform': 'sys_platform', + 'platform.version': 'platform_version', + 'platform.machine': 'platform_machine', + 'platform.python_implementation': 'platform_python_implementation', + 'python_implementation': 'platform_python_implementation' +} +VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0]))) + +VERSION_CMP = ( + L("===") | + L("==") | + L(">=") | + L("<=") | + L("!=") | + L("~=") | + L(">") | + L("<") +) + +MARKER_OP = VERSION_CMP | L("not in") | L("in") +MARKER_OP.setParseAction(lambda s, l, t: Op(t[0])) + +MARKER_VALUE = QuotedString("'") | QuotedString('"') +MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0])) + +BOOLOP = L("and") | L("or") + +MARKER_VAR = VARIABLE | MARKER_VALUE + +MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR) +MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0])) + +LPAREN = L("(").suppress() +RPAREN = L(")").suppress() + +MARKER_EXPR = Forward() +MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN) +MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR) + +MARKER = stringStart + MARKER_EXPR + stringEnd + + +def _coerce_parse_result(results): + if isinstance(results, ParseResults): + return [_coerce_parse_result(i) for i in results] + else: + return results + + +def _format_marker(marker, first=True): + assert isinstance(marker, (list, tuple, string_types)) + + # Sometimes we have a structure like [[...]] which is a single item list + # where the single item is itself it's own list. In that case we want skip + # the rest of this function so that we don't get extraneous () on the + # outside. + if (isinstance(marker, list) and len(marker) == 1 and + isinstance(marker[0], (list, tuple))): + return _format_marker(marker[0]) + + if isinstance(marker, list): + inner = (_format_marker(m, first=False) for m in marker) + if first: + return " ".join(inner) + else: + return "(" + " ".join(inner) + ")" + elif isinstance(marker, tuple): + return " ".join([m.serialize() for m in marker]) + else: + return marker + + +_operators = { + "in": lambda lhs, rhs: lhs in rhs, + "not in": lambda lhs, rhs: lhs not in rhs, + "<": operator.lt, + "<=": operator.le, + "==": operator.eq, + "!=": operator.ne, + ">=": operator.ge, + ">": operator.gt, +} + + +def _eval_op(lhs, op, rhs): + try: + spec = Specifier("".join([op.serialize(), rhs])) + except InvalidSpecifier: + pass + else: + return spec.contains(lhs) + + oper = _operators.get(op.serialize()) + if oper is None: + raise UndefinedComparison( + "Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs) + ) + + return oper(lhs, rhs) + + +_undefined = object() + + +def _get_env(environment, name): + value = environment.get(name, _undefined) + + if value is _undefined: + raise UndefinedEnvironmentName( + "{0!r} does not exist in evaluation environment.".format(name) + ) + + return value + + +def _evaluate_markers(markers, environment): + groups = [[]] + + for marker in markers: + assert isinstance(marker, (list, tuple, string_types)) + + if isinstance(marker, list): + groups[-1].append(_evaluate_markers(marker, environment)) + elif isinstance(marker, tuple): + lhs, op, rhs = marker + + if isinstance(lhs, Variable): + lhs_value = _get_env(environment, lhs.value) + rhs_value = rhs.value + else: + lhs_value = lhs.value + rhs_value = _get_env(environment, rhs.value) + + groups[-1].append(_eval_op(lhs_value, op, rhs_value)) + else: + assert marker in ["and", "or"] + if marker == "or": + groups.append([]) + + return any(all(item) for item in groups) + + +def format_full_version(info): + version = '{0.major}.{0.minor}.{0.micro}'.format(info) + kind = info.releaselevel + if kind != 'final': + version += kind[0] + str(info.serial) + return version + + +def default_environment(): + if hasattr(sys, 'implementation'): + iver = format_full_version(sys.implementation.version) + implementation_name = sys.implementation.name + else: + iver = '0' + implementation_name = '' + + return { + "implementation_name": implementation_name, + "implementation_version": iver, + "os_name": os.name, + "platform_machine": platform.machine(), + "platform_release": platform.release(), + "platform_system": platform.system(), + "platform_version": platform.version(), + "python_full_version": platform.python_version(), + "platform_python_implementation": platform.python_implementation(), + "python_version": platform.python_version()[:3], + "sys_platform": sys.platform, + } + + +class Marker(object): + + def __init__(self, marker): + try: + self._markers = _coerce_parse_result(MARKER.parseString(marker)) + except ParseException as e: + err_str = "Invalid marker: {0!r}, parse error at {1!r}".format( + marker, marker[e.loc:e.loc + 8]) + raise InvalidMarker(err_str) + + def __str__(self): + return _format_marker(self._markers) + + def __repr__(self): + return "<Marker({0!r})>".format(str(self)) + + def evaluate(self, environment=None): + """Evaluate a marker. + + Return the boolean from evaluating the given marker against the + environment. environment is an optional argument to override all or + part of the determined environment. + + The environment is determined from the current Python process. + """ + current_environment = default_environment() + if environment is not None: + current_environment.update(environment) + + return _evaluate_markers(self._markers, current_environment) diff --git a/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/markers.pyc b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/markers.pyc new file mode 100644 index 0000000..12a8f9d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/markers.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/requirements.py b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/requirements.py new file mode 100644 index 0000000..5b49341 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/requirements.py @@ -0,0 +1,127 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + +import string +import re + +from setuptools.extern.pyparsing import stringStart, stringEnd, originalTextFor, ParseException +from setuptools.extern.pyparsing import ZeroOrMore, Word, Optional, Regex, Combine +from setuptools.extern.pyparsing import Literal as L # noqa +from setuptools.extern.six.moves.urllib import parse as urlparse + +from .markers import MARKER_EXPR, Marker +from .specifiers import LegacySpecifier, Specifier, SpecifierSet + + +class InvalidRequirement(ValueError): + """ + An invalid requirement was found, users should refer to PEP 508. + """ + + +ALPHANUM = Word(string.ascii_letters + string.digits) + +LBRACKET = L("[").suppress() +RBRACKET = L("]").suppress() +LPAREN = L("(").suppress() +RPAREN = L(")").suppress() +COMMA = L(",").suppress() +SEMICOLON = L(";").suppress() +AT = L("@").suppress() + +PUNCTUATION = Word("-_.") +IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM) +IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END)) + +NAME = IDENTIFIER("name") +EXTRA = IDENTIFIER + +URI = Regex(r'[^ ]+')("url") +URL = (AT + URI) + +EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA) +EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras") + +VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE) +VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE) + +VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY +VERSION_MANY = Combine(VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), + joinString=",", adjacent=False)("_raw_spec") +_VERSION_SPEC = Optional(((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY)) +_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or '') + +VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier") +VERSION_SPEC.setParseAction(lambda s, l, t: t[1]) + +MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker") +MARKER_EXPR.setParseAction( + lambda s, l, t: Marker(s[t._original_start:t._original_end]) +) +MARKER_SEPERATOR = SEMICOLON +MARKER = MARKER_SEPERATOR + MARKER_EXPR + +VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER) +URL_AND_MARKER = URL + Optional(MARKER) + +NAMED_REQUIREMENT = \ + NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER) + +REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd + + +class Requirement(object): + """Parse a requirement. + + Parse a given requirement string into its parts, such as name, specifier, + URL, and extras. Raises InvalidRequirement on a badly-formed requirement + string. + """ + + # TODO: Can we test whether something is contained within a requirement? + # If so how do we do that? Do we need to test against the _name_ of + # the thing as well as the version? What about the markers? + # TODO: Can we normalize the name and extra name? + + def __init__(self, requirement_string): + try: + req = REQUIREMENT.parseString(requirement_string) + except ParseException as e: + raise InvalidRequirement( + "Invalid requirement, parse error at \"{0!r}\"".format( + requirement_string[e.loc:e.loc + 8])) + + self.name = req.name + if req.url: + parsed_url = urlparse.urlparse(req.url) + if not (parsed_url.scheme and parsed_url.netloc) or ( + not parsed_url.scheme and not parsed_url.netloc): + raise InvalidRequirement("Invalid URL given") + self.url = req.url + else: + self.url = None + self.extras = set(req.extras.asList() if req.extras else []) + self.specifier = SpecifierSet(req.specifier) + self.marker = req.marker if req.marker else None + + def __str__(self): + parts = [self.name] + + if self.extras: + parts.append("[{0}]".format(",".join(sorted(self.extras)))) + + if self.specifier: + parts.append(str(self.specifier)) + + if self.url: + parts.append("@ {0}".format(self.url)) + + if self.marker: + parts.append("; {0}".format(self.marker)) + + return "".join(parts) + + def __repr__(self): + return "<Requirement({0!r})>".format(str(self)) diff --git a/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/requirements.pyc b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/requirements.pyc new file mode 100644 index 0000000..f0e520d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/requirements.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/specifiers.py b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/specifiers.py new file mode 100644 index 0000000..7f5a76c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/specifiers.py @@ -0,0 +1,774 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + +import abc +import functools +import itertools +import re + +from ._compat import string_types, with_metaclass +from .version import Version, LegacyVersion, parse + + +class InvalidSpecifier(ValueError): + """ + An invalid specifier was found, users should refer to PEP 440. + """ + + +class BaseSpecifier(with_metaclass(abc.ABCMeta, object)): + + @abc.abstractmethod + def __str__(self): + """ + Returns the str representation of this Specifier like object. This + should be representative of the Specifier itself. + """ + + @abc.abstractmethod + def __hash__(self): + """ + Returns a hash value for this Specifier like object. + """ + + @abc.abstractmethod + def __eq__(self, other): + """ + Returns a boolean representing whether or not the two Specifier like + objects are equal. + """ + + @abc.abstractmethod + def __ne__(self, other): + """ + Returns a boolean representing whether or not the two Specifier like + objects are not equal. + """ + + @abc.abstractproperty + def prereleases(self): + """ + Returns whether or not pre-releases as a whole are allowed by this + specifier. + """ + + @prereleases.setter + def prereleases(self, value): + """ + Sets whether or not pre-releases as a whole are allowed by this + specifier. + """ + + @abc.abstractmethod + def contains(self, item, prereleases=None): + """ + Determines if the given item is contained within this specifier. + """ + + @abc.abstractmethod + def filter(self, iterable, prereleases=None): + """ + Takes an iterable of items and filters them so that only items which + are contained within this specifier are allowed in it. + """ + + +class _IndividualSpecifier(BaseSpecifier): + + _operators = {} + + def __init__(self, spec="", prereleases=None): + match = self._regex.search(spec) + if not match: + raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec)) + + self._spec = ( + match.group("operator").strip(), + match.group("version").strip(), + ) + + # Store whether or not this Specifier should accept prereleases + self._prereleases = prereleases + + def __repr__(self): + pre = ( + ", prereleases={0!r}".format(self.prereleases) + if self._prereleases is not None + else "" + ) + + return "<{0}({1!r}{2})>".format( + self.__class__.__name__, + str(self), + pre, + ) + + def __str__(self): + return "{0}{1}".format(*self._spec) + + def __hash__(self): + return hash(self._spec) + + def __eq__(self, other): + if isinstance(other, string_types): + try: + other = self.__class__(other) + except InvalidSpecifier: + return NotImplemented + elif not isinstance(other, self.__class__): + return NotImplemented + + return self._spec == other._spec + + def __ne__(self, other): + if isinstance(other, string_types): + try: + other = self.__class__(other) + except InvalidSpecifier: + return NotImplemented + elif not isinstance(other, self.__class__): + return NotImplemented + + return self._spec != other._spec + + def _get_operator(self, op): + return getattr(self, "_compare_{0}".format(self._operators[op])) + + def _coerce_version(self, version): + if not isinstance(version, (LegacyVersion, Version)): + version = parse(version) + return version + + @property + def operator(self): + return self._spec[0] + + @property + def version(self): + return self._spec[1] + + @property + def prereleases(self): + return self._prereleases + + @prereleases.setter + def prereleases(self, value): + self._prereleases = value + + def __contains__(self, item): + return self.contains(item) + + def contains(self, item, prereleases=None): + # Determine if prereleases are to be allowed or not. + if prereleases is None: + prereleases = self.prereleases + + # Normalize item to a Version or LegacyVersion, this allows us to have + # a shortcut for ``"2.0" in Specifier(">=2") + item = self._coerce_version(item) + + # Determine if we should be supporting prereleases in this specifier + # or not, if we do not support prereleases than we can short circuit + # logic if this version is a prereleases. + if item.is_prerelease and not prereleases: + return False + + # Actually do the comparison to determine if this item is contained + # within this Specifier or not. + return self._get_operator(self.operator)(item, self.version) + + def filter(self, iterable, prereleases=None): + yielded = False + found_prereleases = [] + + kw = {"prereleases": prereleases if prereleases is not None else True} + + # Attempt to iterate over all the values in the iterable and if any of + # them match, yield them. + for version in iterable: + parsed_version = self._coerce_version(version) + + if self.contains(parsed_version, **kw): + # If our version is a prerelease, and we were not set to allow + # prereleases, then we'll store it for later incase nothing + # else matches this specifier. + if (parsed_version.is_prerelease and not + (prereleases or self.prereleases)): + found_prereleases.append(version) + # Either this is not a prerelease, or we should have been + # accepting prereleases from the begining. + else: + yielded = True + yield version + + # Now that we've iterated over everything, determine if we've yielded + # any values, and if we have not and we have any prereleases stored up + # then we will go ahead and yield the prereleases. + if not yielded and found_prereleases: + for version in found_prereleases: + yield version + + +class LegacySpecifier(_IndividualSpecifier): + + _regex_str = ( + r""" + (?P<operator>(==|!=|<=|>=|<|>)) + \s* + (?P<version> + [^,;\s)]* # Since this is a "legacy" specifier, and the version + # string can be just about anything, we match everything + # except for whitespace, a semi-colon for marker support, + # a closing paren since versions can be enclosed in + # them, and a comma since it's a version separator. + ) + """ + ) + + _regex = re.compile( + r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE) + + _operators = { + "==": "equal", + "!=": "not_equal", + "<=": "less_than_equal", + ">=": "greater_than_equal", + "<": "less_than", + ">": "greater_than", + } + + def _coerce_version(self, version): + if not isinstance(version, LegacyVersion): + version = LegacyVersion(str(version)) + return version + + def _compare_equal(self, prospective, spec): + return prospective == self._coerce_version(spec) + + def _compare_not_equal(self, prospective, spec): + return prospective != self._coerce_version(spec) + + def _compare_less_than_equal(self, prospective, spec): + return prospective <= self._coerce_version(spec) + + def _compare_greater_than_equal(self, prospective, spec): + return prospective >= self._coerce_version(spec) + + def _compare_less_than(self, prospective, spec): + return prospective < self._coerce_version(spec) + + def _compare_greater_than(self, prospective, spec): + return prospective > self._coerce_version(spec) + + +def _require_version_compare(fn): + @functools.wraps(fn) + def wrapped(self, prospective, spec): + if not isinstance(prospective, Version): + return False + return fn(self, prospective, spec) + return wrapped + + +class Specifier(_IndividualSpecifier): + + _regex_str = ( + r""" + (?P<operator>(~=|==|!=|<=|>=|<|>|===)) + (?P<version> + (?: + # The identity operators allow for an escape hatch that will + # do an exact string match of the version you wish to install. + # This will not be parsed by PEP 440 and we cannot determine + # any semantic meaning from it. This operator is discouraged + # but included entirely as an escape hatch. + (?<====) # Only match for the identity operator + \s* + [^\s]* # We just match everything, except for whitespace + # since we are only testing for strict identity. + ) + | + (?: + # The (non)equality operators allow for wild card and local + # versions to be specified so we have to define these two + # operators separately to enable that. + (?<===|!=) # Only match for equals and not equals + + \s* + v? + (?:[0-9]+!)? # epoch + [0-9]+(?:\.[0-9]+)* # release + (?: # pre release + [-_\.]? + (a|b|c|rc|alpha|beta|pre|preview) + [-_\.]? + [0-9]* + )? + (?: # post release + (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) + )? + + # You cannot use a wild card and a dev or local version + # together so group them with a | and make them optional. + (?: + (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release + (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local + | + \.\* # Wild card syntax of .* + )? + ) + | + (?: + # The compatible operator requires at least two digits in the + # release segment. + (?<=~=) # Only match for the compatible operator + + \s* + v? + (?:[0-9]+!)? # epoch + [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *) + (?: # pre release + [-_\.]? + (a|b|c|rc|alpha|beta|pre|preview) + [-_\.]? + [0-9]* + )? + (?: # post release + (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) + )? + (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release + ) + | + (?: + # All other operators only allow a sub set of what the + # (non)equality operators do. Specifically they do not allow + # local versions to be specified nor do they allow the prefix + # matching wild cards. + (?<!==|!=|~=) # We have special cases for these + # operators so we want to make sure they + # don't match here. + + \s* + v? + (?:[0-9]+!)? # epoch + [0-9]+(?:\.[0-9]+)* # release + (?: # pre release + [-_\.]? + (a|b|c|rc|alpha|beta|pre|preview) + [-_\.]? + [0-9]* + )? + (?: # post release + (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) + )? + (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release + ) + ) + """ + ) + + _regex = re.compile( + r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE) + + _operators = { + "~=": "compatible", + "==": "equal", + "!=": "not_equal", + "<=": "less_than_equal", + ">=": "greater_than_equal", + "<": "less_than", + ">": "greater_than", + "===": "arbitrary", + } + + @_require_version_compare + def _compare_compatible(self, prospective, spec): + # Compatible releases have an equivalent combination of >= and ==. That + # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to + # implement this in terms of the other specifiers instead of + # implementing it ourselves. The only thing we need to do is construct + # the other specifiers. + + # We want everything but the last item in the version, but we want to + # ignore post and dev releases and we want to treat the pre-release as + # it's own separate segment. + prefix = ".".join( + list( + itertools.takewhile( + lambda x: (not x.startswith("post") and not + x.startswith("dev")), + _version_split(spec), + ) + )[:-1] + ) + + # Add the prefix notation to the end of our string + prefix += ".*" + + return (self._get_operator(">=")(prospective, spec) and + self._get_operator("==")(prospective, prefix)) + + @_require_version_compare + def _compare_equal(self, prospective, spec): + # We need special logic to handle prefix matching + if spec.endswith(".*"): + # In the case of prefix matching we want to ignore local segment. + prospective = Version(prospective.public) + # Split the spec out by dots, and pretend that there is an implicit + # dot in between a release segment and a pre-release segment. + spec = _version_split(spec[:-2]) # Remove the trailing .* + + # Split the prospective version out by dots, and pretend that there + # is an implicit dot in between a release segment and a pre-release + # segment. + prospective = _version_split(str(prospective)) + + # Shorten the prospective version to be the same length as the spec + # so that we can determine if the specifier is a prefix of the + # prospective version or not. + prospective = prospective[:len(spec)] + + # Pad out our two sides with zeros so that they both equal the same + # length. + spec, prospective = _pad_version(spec, prospective) + else: + # Convert our spec string into a Version + spec = Version(spec) + + # If the specifier does not have a local segment, then we want to + # act as if the prospective version also does not have a local + # segment. + if not spec.local: + prospective = Version(prospective.public) + + return prospective == spec + + @_require_version_compare + def _compare_not_equal(self, prospective, spec): + return not self._compare_equal(prospective, spec) + + @_require_version_compare + def _compare_less_than_equal(self, prospective, spec): + return prospective <= Version(spec) + + @_require_version_compare + def _compare_greater_than_equal(self, prospective, spec): + return prospective >= Version(spec) + + @_require_version_compare + def _compare_less_than(self, prospective, spec): + # Convert our spec to a Version instance, since we'll want to work with + # it as a version. + spec = Version(spec) + + # Check to see if the prospective version is less than the spec + # version. If it's not we can short circuit and just return False now + # instead of doing extra unneeded work. + if not prospective < spec: + return False + + # This special case is here so that, unless the specifier itself + # includes is a pre-release version, that we do not accept pre-release + # versions for the version mentioned in the specifier (e.g. <3.1 should + # not match 3.1.dev0, but should match 3.0.dev0). + if not spec.is_prerelease and prospective.is_prerelease: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # If we've gotten to here, it means that prospective version is both + # less than the spec version *and* it's not a pre-release of the same + # version in the spec. + return True + + @_require_version_compare + def _compare_greater_than(self, prospective, spec): + # Convert our spec to a Version instance, since we'll want to work with + # it as a version. + spec = Version(spec) + + # Check to see if the prospective version is greater than the spec + # version. If it's not we can short circuit and just return False now + # instead of doing extra unneeded work. + if not prospective > spec: + return False + + # This special case is here so that, unless the specifier itself + # includes is a post-release version, that we do not accept + # post-release versions for the version mentioned in the specifier + # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0). + if not spec.is_postrelease and prospective.is_postrelease: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # Ensure that we do not allow a local version of the version mentioned + # in the specifier, which is techincally greater than, to match. + if prospective.local is not None: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # If we've gotten to here, it means that prospective version is both + # greater than the spec version *and* it's not a pre-release of the + # same version in the spec. + return True + + def _compare_arbitrary(self, prospective, spec): + return str(prospective).lower() == str(spec).lower() + + @property + def prereleases(self): + # If there is an explicit prereleases set for this, then we'll just + # blindly use that. + if self._prereleases is not None: + return self._prereleases + + # Look at all of our specifiers and determine if they are inclusive + # operators, and if they are if they are including an explicit + # prerelease. + operator, version = self._spec + if operator in ["==", ">=", "<=", "~=", "==="]: + # The == specifier can include a trailing .*, if it does we + # want to remove before parsing. + if operator == "==" and version.endswith(".*"): + version = version[:-2] + + # Parse the version, and if it is a pre-release than this + # specifier allows pre-releases. + if parse(version).is_prerelease: + return True + + return False + + @prereleases.setter + def prereleases(self, value): + self._prereleases = value + + +_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$") + + +def _version_split(version): + result = [] + for item in version.split("."): + match = _prefix_regex.search(item) + if match: + result.extend(match.groups()) + else: + result.append(item) + return result + + +def _pad_version(left, right): + left_split, right_split = [], [] + + # Get the release segment of our versions + left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left))) + right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right))) + + # Get the rest of our versions + left_split.append(left[len(left_split[0]):]) + right_split.append(right[len(right_split[0]):]) + + # Insert our padding + left_split.insert( + 1, + ["0"] * max(0, len(right_split[0]) - len(left_split[0])), + ) + right_split.insert( + 1, + ["0"] * max(0, len(left_split[0]) - len(right_split[0])), + ) + + return ( + list(itertools.chain(*left_split)), + list(itertools.chain(*right_split)), + ) + + +class SpecifierSet(BaseSpecifier): + + def __init__(self, specifiers="", prereleases=None): + # Split on , to break each indidivual specifier into it's own item, and + # strip each item to remove leading/trailing whitespace. + specifiers = [s.strip() for s in specifiers.split(",") if s.strip()] + + # Parsed each individual specifier, attempting first to make it a + # Specifier and falling back to a LegacySpecifier. + parsed = set() + for specifier in specifiers: + try: + parsed.add(Specifier(specifier)) + except InvalidSpecifier: + parsed.add(LegacySpecifier(specifier)) + + # Turn our parsed specifiers into a frozen set and save them for later. + self._specs = frozenset(parsed) + + # Store our prereleases value so we can use it later to determine if + # we accept prereleases or not. + self._prereleases = prereleases + + def __repr__(self): + pre = ( + ", prereleases={0!r}".format(self.prereleases) + if self._prereleases is not None + else "" + ) + + return "<SpecifierSet({0!r}{1})>".format(str(self), pre) + + def __str__(self): + return ",".join(sorted(str(s) for s in self._specs)) + + def __hash__(self): + return hash(self._specs) + + def __and__(self, other): + if isinstance(other, string_types): + other = SpecifierSet(other) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + specifier = SpecifierSet() + specifier._specs = frozenset(self._specs | other._specs) + + if self._prereleases is None and other._prereleases is not None: + specifier._prereleases = other._prereleases + elif self._prereleases is not None and other._prereleases is None: + specifier._prereleases = self._prereleases + elif self._prereleases == other._prereleases: + specifier._prereleases = self._prereleases + else: + raise ValueError( + "Cannot combine SpecifierSets with True and False prerelease " + "overrides." + ) + + return specifier + + def __eq__(self, other): + if isinstance(other, string_types): + other = SpecifierSet(other) + elif isinstance(other, _IndividualSpecifier): + other = SpecifierSet(str(other)) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + return self._specs == other._specs + + def __ne__(self, other): + if isinstance(other, string_types): + other = SpecifierSet(other) + elif isinstance(other, _IndividualSpecifier): + other = SpecifierSet(str(other)) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + return self._specs != other._specs + + def __len__(self): + return len(self._specs) + + def __iter__(self): + return iter(self._specs) + + @property + def prereleases(self): + # If we have been given an explicit prerelease modifier, then we'll + # pass that through here. + if self._prereleases is not None: + return self._prereleases + + # If we don't have any specifiers, and we don't have a forced value, + # then we'll just return None since we don't know if this should have + # pre-releases or not. + if not self._specs: + return None + + # Otherwise we'll see if any of the given specifiers accept + # prereleases, if any of them do we'll return True, otherwise False. + return any(s.prereleases for s in self._specs) + + @prereleases.setter + def prereleases(self, value): + self._prereleases = value + + def __contains__(self, item): + return self.contains(item) + + def contains(self, item, prereleases=None): + # Ensure that our item is a Version or LegacyVersion instance. + if not isinstance(item, (LegacyVersion, Version)): + item = parse(item) + + # Determine if we're forcing a prerelease or not, if we're not forcing + # one for this particular filter call, then we'll use whatever the + # SpecifierSet thinks for whether or not we should support prereleases. + if prereleases is None: + prereleases = self.prereleases + + # We can determine if we're going to allow pre-releases by looking to + # see if any of the underlying items supports them. If none of them do + # and this item is a pre-release then we do not allow it and we can + # short circuit that here. + # Note: This means that 1.0.dev1 would not be contained in something + # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0 + if not prereleases and item.is_prerelease: + return False + + # We simply dispatch to the underlying specs here to make sure that the + # given version is contained within all of them. + # Note: This use of all() here means that an empty set of specifiers + # will always return True, this is an explicit design decision. + return all( + s.contains(item, prereleases=prereleases) + for s in self._specs + ) + + def filter(self, iterable, prereleases=None): + # Determine if we're forcing a prerelease or not, if we're not forcing + # one for this particular filter call, then we'll use whatever the + # SpecifierSet thinks for whether or not we should support prereleases. + if prereleases is None: + prereleases = self.prereleases + + # If we have any specifiers, then we want to wrap our iterable in the + # filter method for each one, this will act as a logical AND amongst + # each specifier. + if self._specs: + for spec in self._specs: + iterable = spec.filter(iterable, prereleases=bool(prereleases)) + return iterable + # If we do not have any specifiers, then we need to have a rough filter + # which will filter out any pre-releases, unless there are no final + # releases, and which will filter out LegacyVersion in general. + else: + filtered = [] + found_prereleases = [] + + for item in iterable: + # Ensure that we some kind of Version class for this item. + if not isinstance(item, (LegacyVersion, Version)): + parsed_version = parse(item) + else: + parsed_version = item + + # Filter out any item which is parsed as a LegacyVersion + if isinstance(parsed_version, LegacyVersion): + continue + + # Store any item which is a pre-release for later unless we've + # already found a final version or we are accepting prereleases + if parsed_version.is_prerelease and not prereleases: + if not filtered: + found_prereleases.append(item) + else: + filtered.append(item) + + # If we've found no items except for pre-releases, then we'll go + # ahead and use the pre-releases + if not filtered and found_prereleases and prereleases is None: + return found_prereleases + + return filtered diff --git a/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/specifiers.pyc b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/specifiers.pyc new file mode 100644 index 0000000..a209182 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/specifiers.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/utils.py b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/utils.py new file mode 100644 index 0000000..942387c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/utils.py @@ -0,0 +1,14 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + +import re + + +_canonicalize_regex = re.compile(r"[-_.]+") + + +def canonicalize_name(name): + # This is taken from PEP 503. + return _canonicalize_regex.sub("-", name).lower() diff --git a/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/utils.pyc b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/utils.pyc new file mode 100644 index 0000000..2d6b725 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/utils.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/version.py b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/version.py new file mode 100644 index 0000000..83b5ee8 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/version.py @@ -0,0 +1,393 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + +import collections +import itertools +import re + +from ._structures import Infinity + + +__all__ = [ + "parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN" +] + + +_Version = collections.namedtuple( + "_Version", + ["epoch", "release", "dev", "pre", "post", "local"], +) + + +def parse(version): + """ + Parse the given version string and return either a :class:`Version` object + or a :class:`LegacyVersion` object depending on if the given version is + a valid PEP 440 version or a legacy version. + """ + try: + return Version(version) + except InvalidVersion: + return LegacyVersion(version) + + +class InvalidVersion(ValueError): + """ + An invalid version was found, users should refer to PEP 440. + """ + + +class _BaseVersion(object): + + def __hash__(self): + return hash(self._key) + + def __lt__(self, other): + return self._compare(other, lambda s, o: s < o) + + def __le__(self, other): + return self._compare(other, lambda s, o: s <= o) + + def __eq__(self, other): + return self._compare(other, lambda s, o: s == o) + + def __ge__(self, other): + return self._compare(other, lambda s, o: s >= o) + + def __gt__(self, other): + return self._compare(other, lambda s, o: s > o) + + def __ne__(self, other): + return self._compare(other, lambda s, o: s != o) + + def _compare(self, other, method): + if not isinstance(other, _BaseVersion): + return NotImplemented + + return method(self._key, other._key) + + +class LegacyVersion(_BaseVersion): + + def __init__(self, version): + self._version = str(version) + self._key = _legacy_cmpkey(self._version) + + def __str__(self): + return self._version + + def __repr__(self): + return "<LegacyVersion({0})>".format(repr(str(self))) + + @property + def public(self): + return self._version + + @property + def base_version(self): + return self._version + + @property + def local(self): + return None + + @property + def is_prerelease(self): + return False + + @property + def is_postrelease(self): + return False + + +_legacy_version_component_re = re.compile( + r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE, +) + +_legacy_version_replacement_map = { + "pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@", +} + + +def _parse_version_parts(s): + for part in _legacy_version_component_re.split(s): + part = _legacy_version_replacement_map.get(part, part) + + if not part or part == ".": + continue + + if part[:1] in "0123456789": + # pad for numeric comparison + yield part.zfill(8) + else: + yield "*" + part + + # ensure that alpha/beta/candidate are before final + yield "*final" + + +def _legacy_cmpkey(version): + # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch + # greater than or equal to 0. This will effectively put the LegacyVersion, + # which uses the defacto standard originally implemented by setuptools, + # as before all PEP 440 versions. + epoch = -1 + + # This scheme is taken from pkg_resources.parse_version setuptools prior to + # it's adoption of the packaging library. + parts = [] + for part in _parse_version_parts(version.lower()): + if part.startswith("*"): + # remove "-" before a prerelease tag + if part < "*final": + while parts and parts[-1] == "*final-": + parts.pop() + + # remove trailing zeros from each series of numeric parts + while parts and parts[-1] == "00000000": + parts.pop() + + parts.append(part) + parts = tuple(parts) + + return epoch, parts + +# Deliberately not anchored to the start and end of the string, to make it +# easier for 3rd party code to reuse +VERSION_PATTERN = r""" + v? + (?: + (?:(?P<epoch>[0-9]+)!)? # epoch + (?P<release>[0-9]+(?:\.[0-9]+)*) # release segment + (?P<pre> # pre-release + [-_\.]? + (?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview)) + [-_\.]? + (?P<pre_n>[0-9]+)? + )? + (?P<post> # post release + (?:-(?P<post_n1>[0-9]+)) + | + (?: + [-_\.]? + (?P<post_l>post|rev|r) + [-_\.]? + (?P<post_n2>[0-9]+)? + ) + )? + (?P<dev> # dev release + [-_\.]? + (?P<dev_l>dev) + [-_\.]? + (?P<dev_n>[0-9]+)? + )? + ) + (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version +""" + + +class Version(_BaseVersion): + + _regex = re.compile( + r"^\s*" + VERSION_PATTERN + r"\s*$", + re.VERBOSE | re.IGNORECASE, + ) + + def __init__(self, version): + # Validate the version and parse it into pieces + match = self._regex.search(version) + if not match: + raise InvalidVersion("Invalid version: '{0}'".format(version)) + + # Store the parsed out pieces of the version + self._version = _Version( + epoch=int(match.group("epoch")) if match.group("epoch") else 0, + release=tuple(int(i) for i in match.group("release").split(".")), + pre=_parse_letter_version( + match.group("pre_l"), + match.group("pre_n"), + ), + post=_parse_letter_version( + match.group("post_l"), + match.group("post_n1") or match.group("post_n2"), + ), + dev=_parse_letter_version( + match.group("dev_l"), + match.group("dev_n"), + ), + local=_parse_local_version(match.group("local")), + ) + + # Generate a key which will be used for sorting + self._key = _cmpkey( + self._version.epoch, + self._version.release, + self._version.pre, + self._version.post, + self._version.dev, + self._version.local, + ) + + def __repr__(self): + return "<Version({0})>".format(repr(str(self))) + + def __str__(self): + parts = [] + + # Epoch + if self._version.epoch != 0: + parts.append("{0}!".format(self._version.epoch)) + + # Release segment + parts.append(".".join(str(x) for x in self._version.release)) + + # Pre-release + if self._version.pre is not None: + parts.append("".join(str(x) for x in self._version.pre)) + + # Post-release + if self._version.post is not None: + parts.append(".post{0}".format(self._version.post[1])) + + # Development release + if self._version.dev is not None: + parts.append(".dev{0}".format(self._version.dev[1])) + + # Local version segment + if self._version.local is not None: + parts.append( + "+{0}".format(".".join(str(x) for x in self._version.local)) + ) + + return "".join(parts) + + @property + def public(self): + return str(self).split("+", 1)[0] + + @property + def base_version(self): + parts = [] + + # Epoch + if self._version.epoch != 0: + parts.append("{0}!".format(self._version.epoch)) + + # Release segment + parts.append(".".join(str(x) for x in self._version.release)) + + return "".join(parts) + + @property + def local(self): + version_string = str(self) + if "+" in version_string: + return version_string.split("+", 1)[1] + + @property + def is_prerelease(self): + return bool(self._version.dev or self._version.pre) + + @property + def is_postrelease(self): + return bool(self._version.post) + + +def _parse_letter_version(letter, number): + if letter: + # We consider there to be an implicit 0 in a pre-release if there is + # not a numeral associated with it. + if number is None: + number = 0 + + # We normalize any letters to their lower case form + letter = letter.lower() + + # We consider some words to be alternate spellings of other words and + # in those cases we want to normalize the spellings to our preferred + # spelling. + if letter == "alpha": + letter = "a" + elif letter == "beta": + letter = "b" + elif letter in ["c", "pre", "preview"]: + letter = "rc" + elif letter in ["rev", "r"]: + letter = "post" + + return letter, int(number) + if not letter and number: + # We assume if we are given a number, but we are not given a letter + # then this is using the implicit post release syntax (e.g. 1.0-1) + letter = "post" + + return letter, int(number) + + +_local_version_seperators = re.compile(r"[\._-]") + + +def _parse_local_version(local): + """ + Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve"). + """ + if local is not None: + return tuple( + part.lower() if not part.isdigit() else int(part) + for part in _local_version_seperators.split(local) + ) + + +def _cmpkey(epoch, release, pre, post, dev, local): + # When we compare a release version, we want to compare it with all of the + # trailing zeros removed. So we'll use a reverse the list, drop all the now + # leading zeros until we come to something non zero, then take the rest + # re-reverse it back into the correct order and make it a tuple and use + # that for our sorting key. + release = tuple( + reversed(list( + itertools.dropwhile( + lambda x: x == 0, + reversed(release), + ) + )) + ) + + # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0. + # We'll do this by abusing the pre segment, but we _only_ want to do this + # if there is not a pre or a post segment. If we have one of those then + # the normal sorting rules will handle this case correctly. + if pre is None and post is None and dev is not None: + pre = -Infinity + # Versions without a pre-release (except as noted above) should sort after + # those with one. + elif pre is None: + pre = Infinity + + # Versions without a post segment should sort before those with one. + if post is None: + post = -Infinity + + # Versions without a development segment should sort after those with one. + if dev is None: + dev = Infinity + + if local is None: + # Versions without a local segment should sort before those with one. + local = -Infinity + else: + # Versions with a local segment need that segment parsed to implement + # the sorting rules in PEP440. + # - Alpha numeric segments sort before numeric segments + # - Alpha numeric segments sort lexicographically + # - Numeric segments sort numerically + # - Shorter versions sort before longer versions when the prefixes + # match exactly + local = tuple( + (i, "") if isinstance(i, int) else (-Infinity, i) + for i in local + ) + + return epoch, release, pre, post, dev, local diff --git a/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/version.pyc b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/version.pyc new file mode 100644 index 0000000..2ca3e33 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/packaging/version.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/_vendor/pyparsing.py b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/pyparsing.py new file mode 100644 index 0000000..cf75e1e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/pyparsing.py @@ -0,0 +1,5742 @@ +# module pyparsing.py +# +# Copyright (c) 2003-2018 Paul T. McGuire +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# + +__doc__ = \ +""" +pyparsing module - Classes and methods to define and execute parsing grammars +============================================================================= + +The pyparsing module is an alternative approach to creating and executing simple grammars, +vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you +don't need to learn a new syntax for defining grammars or matching expressions - the parsing module +provides a library of classes that you use to construct the grammar directly in Python. + +Here is a program to parse "Hello, World!" (or any greeting of the form +C{"<salutation>, <addressee>!"}), built up using L{Word}, L{Literal}, and L{And} elements +(L{'+'<ParserElement.__add__>} operator gives L{And} expressions, strings are auto-converted to +L{Literal} expressions):: + + from pyparsing import Word, alphas + + # define grammar of a greeting + greet = Word(alphas) + "," + Word(alphas) + "!" + + hello = "Hello, World!" + print (hello, "->", greet.parseString(hello)) + +The program outputs the following:: + + Hello, World! -> ['Hello', ',', 'World', '!'] + +The Python representation of the grammar is quite readable, owing to the self-explanatory +class names, and the use of '+', '|' and '^' operators. + +The L{ParseResults} object returned from L{ParserElement.parseString<ParserElement.parseString>} can be accessed as a nested list, a dictionary, or an +object with named attributes. + +The pyparsing module handles some of the problems that are typically vexing when writing text parsers: + - extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.) + - quoted strings + - embedded comments + + +Getting Started - +----------------- +Visit the classes L{ParserElement} and L{ParseResults} to see the base classes that most other pyparsing +classes inherit from. Use the docstrings for examples of how to: + - construct literal match expressions from L{Literal} and L{CaselessLiteral} classes + - construct character word-group expressions using the L{Word} class + - see how to create repetitive expressions using L{ZeroOrMore} and L{OneOrMore} classes + - use L{'+'<And>}, L{'|'<MatchFirst>}, L{'^'<Or>}, and L{'&'<Each>} operators to combine simple expressions into more complex ones + - associate names with your parsed results using L{ParserElement.setResultsName} + - find some helpful expression short-cuts like L{delimitedList} and L{oneOf} + - find more useful common expressions in the L{pyparsing_common} namespace class +""" + +__version__ = "2.2.1" +__versionTime__ = "18 Sep 2018 00:49 UTC" +__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>" + +import string +from weakref import ref as wkref +import copy +import sys +import warnings +import re +import sre_constants +import collections +import pprint +import traceback +import types +from datetime import datetime + +try: + from _thread import RLock +except ImportError: + from threading import RLock + +try: + # Python 3 + from collections.abc import Iterable + from collections.abc import MutableMapping +except ImportError: + # Python 2.7 + from collections import Iterable + from collections import MutableMapping + +try: + from collections import OrderedDict as _OrderedDict +except ImportError: + try: + from ordereddict import OrderedDict as _OrderedDict + except ImportError: + _OrderedDict = None + +#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) ) + +__all__ = [ +'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty', +'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal', +'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or', +'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException', +'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException', +'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', +'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore', +'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col', +'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString', +'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums', +'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno', +'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral', +'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables', +'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity', +'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd', +'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute', +'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr', 'withClass', +'CloseMatch', 'tokenMap', 'pyparsing_common', +] + +system_version = tuple(sys.version_info)[:3] +PY_3 = system_version[0] == 3 +if PY_3: + _MAX_INT = sys.maxsize + basestring = str + unichr = chr + _ustr = str + + # build list of single arg builtins, that can be used as parse actions + singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max] + +else: + _MAX_INT = sys.maxint + range = xrange + + def _ustr(obj): + """Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries + str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It + then < returns the unicode object | encodes it with the default encoding | ... >. + """ + if isinstance(obj,unicode): + return obj + + try: + # If this works, then _ustr(obj) has the same behaviour as str(obj), so + # it won't break any existing code. + return str(obj) + + except UnicodeEncodeError: + # Else encode it + ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace') + xmlcharref = Regex(r'&#\d+;') + xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:]) + return xmlcharref.transformString(ret) + + # build list of single arg builtins, tolerant of Python version, that can be used as parse actions + singleArgBuiltins = [] + import __builtin__ + for fname in "sum len sorted reversed list tuple set any all min max".split(): + try: + singleArgBuiltins.append(getattr(__builtin__,fname)) + except AttributeError: + continue + +_generatorType = type((y for y in range(1))) + +def _xml_escape(data): + """Escape &, <, >, ", ', etc. in a string of data.""" + + # ampersand must be replaced first + from_symbols = '&><"\'' + to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split()) + for from_,to_ in zip(from_symbols, to_symbols): + data = data.replace(from_, to_) + return data + +class _Constants(object): + pass + +alphas = string.ascii_uppercase + string.ascii_lowercase +nums = "0123456789" +hexnums = nums + "ABCDEFabcdef" +alphanums = alphas + nums +_bslash = chr(92) +printables = "".join(c for c in string.printable if c not in string.whitespace) + +class ParseBaseException(Exception): + """base exception class for all parsing runtime exceptions""" + # Performance tuning: we construct a *lot* of these, so keep this + # constructor as small and fast as possible + def __init__( self, pstr, loc=0, msg=None, elem=None ): + self.loc = loc + if msg is None: + self.msg = pstr + self.pstr = "" + else: + self.msg = msg + self.pstr = pstr + self.parserElement = elem + self.args = (pstr, loc, msg) + + @classmethod + def _from_exception(cls, pe): + """ + internal factory method to simplify creating one type of ParseException + from another - avoids having __init__ signature conflicts among subclasses + """ + return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement) + + def __getattr__( self, aname ): + """supported attributes by name are: + - lineno - returns the line number of the exception text + - col - returns the column number of the exception text + - line - returns the line containing the exception text + """ + if( aname == "lineno" ): + return lineno( self.loc, self.pstr ) + elif( aname in ("col", "column") ): + return col( self.loc, self.pstr ) + elif( aname == "line" ): + return line( self.loc, self.pstr ) + else: + raise AttributeError(aname) + + def __str__( self ): + return "%s (at char %d), (line:%d, col:%d)" % \ + ( self.msg, self.loc, self.lineno, self.column ) + def __repr__( self ): + return _ustr(self) + def markInputline( self, markerString = ">!<" ): + """Extracts the exception line from the input string, and marks + the location of the exception with a special symbol. + """ + line_str = self.line + line_column = self.column - 1 + if markerString: + line_str = "".join((line_str[:line_column], + markerString, line_str[line_column:])) + return line_str.strip() + def __dir__(self): + return "lineno col line".split() + dir(type(self)) + +class ParseException(ParseBaseException): + """ + Exception thrown when parse expressions don't match class; + supported attributes by name are: + - lineno - returns the line number of the exception text + - col - returns the column number of the exception text + - line - returns the line containing the exception text + + Example:: + try: + Word(nums).setName("integer").parseString("ABC") + except ParseException as pe: + print(pe) + print("column: {}".format(pe.col)) + + prints:: + Expected integer (at char 0), (line:1, col:1) + column: 1 + """ + pass + +class ParseFatalException(ParseBaseException): + """user-throwable exception thrown when inconsistent parse content + is found; stops all parsing immediately""" + pass + +class ParseSyntaxException(ParseFatalException): + """just like L{ParseFatalException}, but thrown internally when an + L{ErrorStop<And._ErrorStop>} ('-' operator) indicates that parsing is to stop + immediately because an unbacktrackable syntax error has been found""" + pass + +#~ class ReparseException(ParseBaseException): + #~ """Experimental class - parse actions can raise this exception to cause + #~ pyparsing to reparse the input string: + #~ - with a modified input string, and/or + #~ - with a modified start location + #~ Set the values of the ReparseException in the constructor, and raise the + #~ exception in a parse action to cause pyparsing to use the new string/location. + #~ Setting the values as None causes no change to be made. + #~ """ + #~ def __init_( self, newstring, restartLoc ): + #~ self.newParseText = newstring + #~ self.reparseLoc = restartLoc + +class RecursiveGrammarException(Exception): + """exception thrown by L{ParserElement.validate} if the grammar could be improperly recursive""" + def __init__( self, parseElementList ): + self.parseElementTrace = parseElementList + + def __str__( self ): + return "RecursiveGrammarException: %s" % self.parseElementTrace + +class _ParseResultsWithOffset(object): + def __init__(self,p1,p2): + self.tup = (p1,p2) + def __getitem__(self,i): + return self.tup[i] + def __repr__(self): + return repr(self.tup[0]) + def setOffset(self,i): + self.tup = (self.tup[0],i) + +class ParseResults(object): + """ + Structured parse results, to provide multiple means of access to the parsed data: + - as a list (C{len(results)}) + - by list index (C{results[0], results[1]}, etc.) + - by attribute (C{results.<resultsName>} - see L{ParserElement.setResultsName}) + + Example:: + integer = Word(nums) + date_str = (integer.setResultsName("year") + '/' + + integer.setResultsName("month") + '/' + + integer.setResultsName("day")) + # equivalent form: + # date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + # parseString returns a ParseResults object + result = date_str.parseString("1999/12/31") + + def test(s, fn=repr): + print("%s -> %s" % (s, fn(eval(s)))) + test("list(result)") + test("result[0]") + test("result['month']") + test("result.day") + test("'month' in result") + test("'minutes' in result") + test("result.dump()", str) + prints:: + list(result) -> ['1999', '/', '12', '/', '31'] + result[0] -> '1999' + result['month'] -> '12' + result.day -> '31' + 'month' in result -> True + 'minutes' in result -> False + result.dump() -> ['1999', '/', '12', '/', '31'] + - day: 31 + - month: 12 + - year: 1999 + """ + def __new__(cls, toklist=None, name=None, asList=True, modal=True ): + if isinstance(toklist, cls): + return toklist + retobj = object.__new__(cls) + retobj.__doinit = True + return retobj + + # Performance tuning: we construct a *lot* of these, so keep this + # constructor as small and fast as possible + def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance ): + if self.__doinit: + self.__doinit = False + self.__name = None + self.__parent = None + self.__accumNames = {} + self.__asList = asList + self.__modal = modal + if toklist is None: + toklist = [] + if isinstance(toklist, list): + self.__toklist = toklist[:] + elif isinstance(toklist, _generatorType): + self.__toklist = list(toklist) + else: + self.__toklist = [toklist] + self.__tokdict = dict() + + if name is not None and name: + if not modal: + self.__accumNames[name] = 0 + if isinstance(name,int): + name = _ustr(name) # will always return a str, but use _ustr for consistency + self.__name = name + if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None,'',[])): + if isinstance(toklist,basestring): + toklist = [ toklist ] + if asList: + if isinstance(toklist,ParseResults): + self[name] = _ParseResultsWithOffset(toklist.copy(),0) + else: + self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0) + self[name].__name = name + else: + try: + self[name] = toklist[0] + except (KeyError,TypeError,IndexError): + self[name] = toklist + + def __getitem__( self, i ): + if isinstance( i, (int,slice) ): + return self.__toklist[i] + else: + if i not in self.__accumNames: + return self.__tokdict[i][-1][0] + else: + return ParseResults([ v[0] for v in self.__tokdict[i] ]) + + def __setitem__( self, k, v, isinstance=isinstance ): + if isinstance(v,_ParseResultsWithOffset): + self.__tokdict[k] = self.__tokdict.get(k,list()) + [v] + sub = v[0] + elif isinstance(k,(int,slice)): + self.__toklist[k] = v + sub = v + else: + self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)] + sub = v + if isinstance(sub,ParseResults): + sub.__parent = wkref(self) + + def __delitem__( self, i ): + if isinstance(i,(int,slice)): + mylen = len( self.__toklist ) + del self.__toklist[i] + + # convert int to slice + if isinstance(i, int): + if i < 0: + i += mylen + i = slice(i, i+1) + # get removed indices + removed = list(range(*i.indices(mylen))) + removed.reverse() + # fixup indices in token dictionary + for name,occurrences in self.__tokdict.items(): + for j in removed: + for k, (value, position) in enumerate(occurrences): + occurrences[k] = _ParseResultsWithOffset(value, position - (position > j)) + else: + del self.__tokdict[i] + + def __contains__( self, k ): + return k in self.__tokdict + + def __len__( self ): return len( self.__toklist ) + def __bool__(self): return ( not not self.__toklist ) + __nonzero__ = __bool__ + def __iter__( self ): return iter( self.__toklist ) + def __reversed__( self ): return iter( self.__toklist[::-1] ) + def _iterkeys( self ): + if hasattr(self.__tokdict, "iterkeys"): + return self.__tokdict.iterkeys() + else: + return iter(self.__tokdict) + + def _itervalues( self ): + return (self[k] for k in self._iterkeys()) + + def _iteritems( self ): + return ((k, self[k]) for k in self._iterkeys()) + + if PY_3: + keys = _iterkeys + """Returns an iterator of all named result keys (Python 3.x only).""" + + values = _itervalues + """Returns an iterator of all named result values (Python 3.x only).""" + + items = _iteritems + """Returns an iterator of all named result key-value tuples (Python 3.x only).""" + + else: + iterkeys = _iterkeys + """Returns an iterator of all named result keys (Python 2.x only).""" + + itervalues = _itervalues + """Returns an iterator of all named result values (Python 2.x only).""" + + iteritems = _iteritems + """Returns an iterator of all named result key-value tuples (Python 2.x only).""" + + def keys( self ): + """Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x).""" + return list(self.iterkeys()) + + def values( self ): + """Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x).""" + return list(self.itervalues()) + + def items( self ): + """Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x).""" + return list(self.iteritems()) + + def haskeys( self ): + """Since keys() returns an iterator, this method is helpful in bypassing + code that looks for the existence of any defined results names.""" + return bool(self.__tokdict) + + def pop( self, *args, **kwargs): + """ + Removes and returns item at specified index (default=C{last}). + Supports both C{list} and C{dict} semantics for C{pop()}. If passed no + argument or an integer argument, it will use C{list} semantics + and pop tokens from the list of parsed tokens. If passed a + non-integer argument (most likely a string), it will use C{dict} + semantics and pop the corresponding value from any defined + results names. A second default return value argument is + supported, just as in C{dict.pop()}. + + Example:: + def remove_first(tokens): + tokens.pop(0) + print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] + print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321'] + + label = Word(alphas) + patt = label("LABEL") + OneOrMore(Word(nums)) + print(patt.parseString("AAB 123 321").dump()) + + # Use pop() in a parse action to remove named result (note that corresponding value is not + # removed from list form of results) + def remove_LABEL(tokens): + tokens.pop("LABEL") + return tokens + patt.addParseAction(remove_LABEL) + print(patt.parseString("AAB 123 321").dump()) + prints:: + ['AAB', '123', '321'] + - LABEL: AAB + + ['AAB', '123', '321'] + """ + if not args: + args = [-1] + for k,v in kwargs.items(): + if k == 'default': + args = (args[0], v) + else: + raise TypeError("pop() got an unexpected keyword argument '%s'" % k) + if (isinstance(args[0], int) or + len(args) == 1 or + args[0] in self): + index = args[0] + ret = self[index] + del self[index] + return ret + else: + defaultvalue = args[1] + return defaultvalue + + def get(self, key, defaultValue=None): + """ + Returns named result matching the given key, or if there is no + such name, then returns the given C{defaultValue} or C{None} if no + C{defaultValue} is specified. + + Similar to C{dict.get()}. + + Example:: + integer = Word(nums) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + result = date_str.parseString("1999/12/31") + print(result.get("year")) # -> '1999' + print(result.get("hour", "not specified")) # -> 'not specified' + print(result.get("hour")) # -> None + """ + if key in self: + return self[key] + else: + return defaultValue + + def insert( self, index, insStr ): + """ + Inserts new element at location index in the list of parsed tokens. + + Similar to C{list.insert()}. + + Example:: + print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] + + # use a parse action to insert the parse location in the front of the parsed results + def insert_locn(locn, tokens): + tokens.insert(0, locn) + print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321'] + """ + self.__toklist.insert(index, insStr) + # fixup indices in token dictionary + for name,occurrences in self.__tokdict.items(): + for k, (value, position) in enumerate(occurrences): + occurrences[k] = _ParseResultsWithOffset(value, position + (position > index)) + + def append( self, item ): + """ + Add single element to end of ParseResults list of elements. + + Example:: + print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] + + # use a parse action to compute the sum of the parsed integers, and add it to the end + def append_sum(tokens): + tokens.append(sum(map(int, tokens))) + print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444] + """ + self.__toklist.append(item) + + def extend( self, itemseq ): + """ + Add sequence of elements to end of ParseResults list of elements. + + Example:: + patt = OneOrMore(Word(alphas)) + + # use a parse action to append the reverse of the matched strings, to make a palindrome + def make_palindrome(tokens): + tokens.extend(reversed([t[::-1] for t in tokens])) + return ''.join(tokens) + print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl' + """ + if isinstance(itemseq, ParseResults): + self += itemseq + else: + self.__toklist.extend(itemseq) + + def clear( self ): + """ + Clear all elements and results names. + """ + del self.__toklist[:] + self.__tokdict.clear() + + def __getattr__( self, name ): + try: + return self[name] + except KeyError: + return "" + + if name in self.__tokdict: + if name not in self.__accumNames: + return self.__tokdict[name][-1][0] + else: + return ParseResults([ v[0] for v in self.__tokdict[name] ]) + else: + return "" + + def __add__( self, other ): + ret = self.copy() + ret += other + return ret + + def __iadd__( self, other ): + if other.__tokdict: + offset = len(self.__toklist) + addoffset = lambda a: offset if a<0 else a+offset + otheritems = other.__tokdict.items() + otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) ) + for (k,vlist) in otheritems for v in vlist] + for k,v in otherdictitems: + self[k] = v + if isinstance(v[0],ParseResults): + v[0].__parent = wkref(self) + + self.__toklist += other.__toklist + self.__accumNames.update( other.__accumNames ) + return self + + def __radd__(self, other): + if isinstance(other,int) and other == 0: + # useful for merging many ParseResults using sum() builtin + return self.copy() + else: + # this may raise a TypeError - so be it + return other + self + + def __repr__( self ): + return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) ) + + def __str__( self ): + return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']' + + def _asStringList( self, sep='' ): + out = [] + for item in self.__toklist: + if out and sep: + out.append(sep) + if isinstance( item, ParseResults ): + out += item._asStringList() + else: + out.append( _ustr(item) ) + return out + + def asList( self ): + """ + Returns the parse results as a nested list of matching tokens, all converted to strings. + + Example:: + patt = OneOrMore(Word(alphas)) + result = patt.parseString("sldkj lsdkj sldkj") + # even though the result prints in string-like form, it is actually a pyparsing ParseResults + print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj'] + + # Use asList() to create an actual list + result_list = result.asList() + print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj'] + """ + return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist] + + def asDict( self ): + """ + Returns the named parse results as a nested dictionary. + + Example:: + integer = Word(nums) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + result = date_str.parseString('12/31/1999') + print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]}) + + result_dict = result.asDict() + print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'} + + # even though a ParseResults supports dict-like access, sometime you just need to have a dict + import json + print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable + print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"} + """ + if PY_3: + item_fn = self.items + else: + item_fn = self.iteritems + + def toItem(obj): + if isinstance(obj, ParseResults): + if obj.haskeys(): + return obj.asDict() + else: + return [toItem(v) for v in obj] + else: + return obj + + return dict((k,toItem(v)) for k,v in item_fn()) + + def copy( self ): + """ + Returns a new copy of a C{ParseResults} object. + """ + ret = ParseResults( self.__toklist ) + ret.__tokdict = self.__tokdict.copy() + ret.__parent = self.__parent + ret.__accumNames.update( self.__accumNames ) + ret.__name = self.__name + return ret + + def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ): + """ + (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names. + """ + nl = "\n" + out = [] + namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items() + for v in vlist) + nextLevelIndent = indent + " " + + # collapse out indents if formatting is not desired + if not formatted: + indent = "" + nextLevelIndent = "" + nl = "" + + selfTag = None + if doctag is not None: + selfTag = doctag + else: + if self.__name: + selfTag = self.__name + + if not selfTag: + if namedItemsOnly: + return "" + else: + selfTag = "ITEM" + + out += [ nl, indent, "<", selfTag, ">" ] + + for i,res in enumerate(self.__toklist): + if isinstance(res,ParseResults): + if i in namedItems: + out += [ res.asXML(namedItems[i], + namedItemsOnly and doctag is None, + nextLevelIndent, + formatted)] + else: + out += [ res.asXML(None, + namedItemsOnly and doctag is None, + nextLevelIndent, + formatted)] + else: + # individual token, see if there is a name for it + resTag = None + if i in namedItems: + resTag = namedItems[i] + if not resTag: + if namedItemsOnly: + continue + else: + resTag = "ITEM" + xmlBodyText = _xml_escape(_ustr(res)) + out += [ nl, nextLevelIndent, "<", resTag, ">", + xmlBodyText, + "</", resTag, ">" ] + + out += [ nl, indent, "</", selfTag, ">" ] + return "".join(out) + + def __lookup(self,sub): + for k,vlist in self.__tokdict.items(): + for v,loc in vlist: + if sub is v: + return k + return None + + def getName(self): + r""" + Returns the results name for this token expression. Useful when several + different expressions might match at a particular location. + + Example:: + integer = Word(nums) + ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d") + house_number_expr = Suppress('#') + Word(nums, alphanums) + user_data = (Group(house_number_expr)("house_number") + | Group(ssn_expr)("ssn") + | Group(integer)("age")) + user_info = OneOrMore(user_data) + + result = user_info.parseString("22 111-22-3333 #221B") + for item in result: + print(item.getName(), ':', item[0]) + prints:: + age : 22 + ssn : 111-22-3333 + house_number : 221B + """ + if self.__name: + return self.__name + elif self.__parent: + par = self.__parent() + if par: + return par.__lookup(self) + else: + return None + elif (len(self) == 1 and + len(self.__tokdict) == 1 and + next(iter(self.__tokdict.values()))[0][1] in (0,-1)): + return next(iter(self.__tokdict.keys())) + else: + return None + + def dump(self, indent='', depth=0, full=True): + """ + Diagnostic method for listing out the contents of a C{ParseResults}. + Accepts an optional C{indent} argument so that this string can be embedded + in a nested display of other data. + + Example:: + integer = Word(nums) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + result = date_str.parseString('12/31/1999') + print(result.dump()) + prints:: + ['12', '/', '31', '/', '1999'] + - day: 1999 + - month: 31 + - year: 12 + """ + out = [] + NL = '\n' + out.append( indent+_ustr(self.asList()) ) + if full: + if self.haskeys(): + items = sorted((str(k), v) for k,v in self.items()) + for k,v in items: + if out: + out.append(NL) + out.append( "%s%s- %s: " % (indent,(' '*depth), k) ) + if isinstance(v,ParseResults): + if v: + out.append( v.dump(indent,depth+1) ) + else: + out.append(_ustr(v)) + else: + out.append(repr(v)) + elif any(isinstance(vv,ParseResults) for vv in self): + v = self + for i,vv in enumerate(v): + if isinstance(vv,ParseResults): + out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),vv.dump(indent,depth+1) )) + else: + out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),_ustr(vv))) + + return "".join(out) + + def pprint(self, *args, **kwargs): + """ + Pretty-printer for parsed results as a list, using the C{pprint} module. + Accepts additional positional or keyword args as defined for the + C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint}) + + Example:: + ident = Word(alphas, alphanums) + num = Word(nums) + func = Forward() + term = ident | num | Group('(' + func + ')') + func <<= ident + Group(Optional(delimitedList(term))) + result = func.parseString("fna a,b,(fnb c,d,200),100") + result.pprint(width=40) + prints:: + ['fna', + ['a', + 'b', + ['(', 'fnb', ['c', 'd', '200'], ')'], + '100']] + """ + pprint.pprint(self.asList(), *args, **kwargs) + + # add support for pickle protocol + def __getstate__(self): + return ( self.__toklist, + ( self.__tokdict.copy(), + self.__parent is not None and self.__parent() or None, + self.__accumNames, + self.__name ) ) + + def __setstate__(self,state): + self.__toklist = state[0] + (self.__tokdict, + par, + inAccumNames, + self.__name) = state[1] + self.__accumNames = {} + self.__accumNames.update(inAccumNames) + if par is not None: + self.__parent = wkref(par) + else: + self.__parent = None + + def __getnewargs__(self): + return self.__toklist, self.__name, self.__asList, self.__modal + + def __dir__(self): + return (dir(type(self)) + list(self.keys())) + +MutableMapping.register(ParseResults) + +def col (loc,strg): + """Returns current column within a string, counting newlines as line separators. + The first column is number 1. + + Note: the default parsing behavior is to expand tabs in the input string + before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information + on parsing strings containing C{<TAB>}s, and suggested methods to maintain a + consistent view of the parsed string, the parse location, and line and column + positions within the parsed string. + """ + s = strg + return 1 if 0<loc<len(s) and s[loc-1] == '\n' else loc - s.rfind("\n", 0, loc) + +def lineno(loc,strg): + """Returns current line number within a string, counting newlines as line separators. + The first line is number 1. + + Note: the default parsing behavior is to expand tabs in the input string + before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information + on parsing strings containing C{<TAB>}s, and suggested methods to maintain a + consistent view of the parsed string, the parse location, and line and column + positions within the parsed string. + """ + return strg.count("\n",0,loc) + 1 + +def line( loc, strg ): + """Returns the line of text containing loc within a string, counting newlines as line separators. + """ + lastCR = strg.rfind("\n", 0, loc) + nextCR = strg.find("\n", loc) + if nextCR >= 0: + return strg[lastCR+1:nextCR] + else: + return strg[lastCR+1:] + +def _defaultStartDebugAction( instring, loc, expr ): + print (("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))) + +def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ): + print ("Matched " + _ustr(expr) + " -> " + str(toks.asList())) + +def _defaultExceptionDebugAction( instring, loc, expr, exc ): + print ("Exception raised:" + _ustr(exc)) + +def nullDebugAction(*args): + """'Do-nothing' debug action, to suppress debugging output during parsing.""" + pass + +# Only works on Python 3.x - nonlocal is toxic to Python 2 installs +#~ 'decorator to trim function calls to match the arity of the target' +#~ def _trim_arity(func, maxargs=3): + #~ if func in singleArgBuiltins: + #~ return lambda s,l,t: func(t) + #~ limit = 0 + #~ foundArity = False + #~ def wrapper(*args): + #~ nonlocal limit,foundArity + #~ while 1: + #~ try: + #~ ret = func(*args[limit:]) + #~ foundArity = True + #~ return ret + #~ except TypeError: + #~ if limit == maxargs or foundArity: + #~ raise + #~ limit += 1 + #~ continue + #~ return wrapper + +# this version is Python 2.x-3.x cross-compatible +'decorator to trim function calls to match the arity of the target' +def _trim_arity(func, maxargs=2): + if func in singleArgBuiltins: + return lambda s,l,t: func(t) + limit = [0] + foundArity = [False] + + # traceback return data structure changed in Py3.5 - normalize back to plain tuples + if system_version[:2] >= (3,5): + def extract_stack(limit=0): + # special handling for Python 3.5.0 - extra deep call stack by 1 + offset = -3 if system_version == (3,5,0) else -2 + frame_summary = traceback.extract_stack(limit=-offset+limit-1)[offset] + return [frame_summary[:2]] + def extract_tb(tb, limit=0): + frames = traceback.extract_tb(tb, limit=limit) + frame_summary = frames[-1] + return [frame_summary[:2]] + else: + extract_stack = traceback.extract_stack + extract_tb = traceback.extract_tb + + # synthesize what would be returned by traceback.extract_stack at the call to + # user's parse action 'func', so that we don't incur call penalty at parse time + + LINE_DIFF = 6 + # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND + # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!! + this_line = extract_stack(limit=2)[-1] + pa_call_line_synth = (this_line[0], this_line[1]+LINE_DIFF) + + def wrapper(*args): + while 1: + try: + ret = func(*args[limit[0]:]) + foundArity[0] = True + return ret + except TypeError: + # re-raise TypeErrors if they did not come from our arity testing + if foundArity[0]: + raise + else: + try: + tb = sys.exc_info()[-1] + if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth: + raise + finally: + del tb + + if limit[0] <= maxargs: + limit[0] += 1 + continue + raise + + # copy func name to wrapper for sensible debug output + func_name = "<parse action>" + try: + func_name = getattr(func, '__name__', + getattr(func, '__class__').__name__) + except Exception: + func_name = str(func) + wrapper.__name__ = func_name + + return wrapper + +class ParserElement(object): + """Abstract base level parser element class.""" + DEFAULT_WHITE_CHARS = " \n\t\r" + verbose_stacktrace = False + + @staticmethod + def setDefaultWhitespaceChars( chars ): + r""" + Overrides the default whitespace chars + + Example:: + # default whitespace chars are space, <TAB> and newline + OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl'] + + # change to just treat newline as significant + ParserElement.setDefaultWhitespaceChars(" \t") + OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def'] + """ + ParserElement.DEFAULT_WHITE_CHARS = chars + + @staticmethod + def inlineLiteralsUsing(cls): + """ + Set class to be used for inclusion of string literals into a parser. + + Example:: + # default literal class used is Literal + integer = Word(nums) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31'] + + + # change to Suppress + ParserElement.inlineLiteralsUsing(Suppress) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + date_str.parseString("1999/12/31") # -> ['1999', '12', '31'] + """ + ParserElement._literalStringClass = cls + + def __init__( self, savelist=False ): + self.parseAction = list() + self.failAction = None + #~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall + self.strRepr = None + self.resultsName = None + self.saveAsList = savelist + self.skipWhitespace = True + self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS + self.copyDefaultWhiteChars = True + self.mayReturnEmpty = False # used when checking for left-recursion + self.keepTabs = False + self.ignoreExprs = list() + self.debug = False + self.streamlined = False + self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index + self.errmsg = "" + self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all) + self.debugActions = ( None, None, None ) #custom debug actions + self.re = None + self.callPreparse = True # used to avoid redundant calls to preParse + self.callDuringTry = False + + def copy( self ): + """ + Make a copy of this C{ParserElement}. Useful for defining different parse actions + for the same parsing pattern, using copies of the original parse element. + + Example:: + integer = Word(nums).setParseAction(lambda toks: int(toks[0])) + integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K") + integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M") + + print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M")) + prints:: + [5120, 100, 655360, 268435456] + Equivalent form of C{expr.copy()} is just C{expr()}:: + integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M") + """ + cpy = copy.copy( self ) + cpy.parseAction = self.parseAction[:] + cpy.ignoreExprs = self.ignoreExprs[:] + if self.copyDefaultWhiteChars: + cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS + return cpy + + def setName( self, name ): + """ + Define name for this expression, makes debugging and exception messages clearer. + + Example:: + Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1) + Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1) + """ + self.name = name + self.errmsg = "Expected " + self.name + if hasattr(self,"exception"): + self.exception.msg = self.errmsg + return self + + def setResultsName( self, name, listAllMatches=False ): + """ + Define name for referencing matching tokens as a nested attribute + of the returned parse results. + NOTE: this returns a *copy* of the original C{ParserElement} object; + this is so that the client can define a basic element, such as an + integer, and reference it in multiple places with different names. + + You can also set results names using the abbreviated syntax, + C{expr("name")} in place of C{expr.setResultsName("name")} - + see L{I{__call__}<__call__>}. + + Example:: + date_str = (integer.setResultsName("year") + '/' + + integer.setResultsName("month") + '/' + + integer.setResultsName("day")) + + # equivalent form: + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + """ + newself = self.copy() + if name.endswith("*"): + name = name[:-1] + listAllMatches=True + newself.resultsName = name + newself.modalResults = not listAllMatches + return newself + + def setBreak(self,breakFlag = True): + """Method to invoke the Python pdb debugger when this element is + about to be parsed. Set C{breakFlag} to True to enable, False to + disable. + """ + if breakFlag: + _parseMethod = self._parse + def breaker(instring, loc, doActions=True, callPreParse=True): + import pdb + pdb.set_trace() + return _parseMethod( instring, loc, doActions, callPreParse ) + breaker._originalParseMethod = _parseMethod + self._parse = breaker + else: + if hasattr(self._parse,"_originalParseMethod"): + self._parse = self._parse._originalParseMethod + return self + + def setParseAction( self, *fns, **kwargs ): + """ + Define one or more actions to perform when successfully matching parse element definition. + Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)}, + C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where: + - s = the original string being parsed (see note below) + - loc = the location of the matching substring + - toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object + If the functions in fns modify the tokens, they can return them as the return + value from fn, and the modified list of tokens will replace the original. + Otherwise, fn does not need to return any value. + + Optional keyword arguments: + - callDuringTry = (default=C{False}) indicate if parse action should be run during lookaheads and alternate testing + + Note: the default parsing behavior is to expand tabs in the input string + before starting the parsing process. See L{I{parseString}<parseString>} for more information + on parsing strings containing C{<TAB>}s, and suggested methods to maintain a + consistent view of the parsed string, the parse location, and line and column + positions within the parsed string. + + Example:: + integer = Word(nums) + date_str = integer + '/' + integer + '/' + integer + + date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31'] + + # use parse action to convert to ints at parse time + integer = Word(nums).setParseAction(lambda toks: int(toks[0])) + date_str = integer + '/' + integer + '/' + integer + + # note that integer fields are now ints, not strings + date_str.parseString("1999/12/31") # -> [1999, '/', 12, '/', 31] + """ + self.parseAction = list(map(_trim_arity, list(fns))) + self.callDuringTry = kwargs.get("callDuringTry", False) + return self + + def addParseAction( self, *fns, **kwargs ): + """ + Add one or more parse actions to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}. + + See examples in L{I{copy}<copy>}. + """ + self.parseAction += list(map(_trim_arity, list(fns))) + self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False) + return self + + def addCondition(self, *fns, **kwargs): + """Add a boolean predicate function to expression's list of parse actions. See + L{I{setParseAction}<setParseAction>} for function call signatures. Unlike C{setParseAction}, + functions passed to C{addCondition} need to return boolean success/fail of the condition. + + Optional keyword arguments: + - message = define a custom message to be used in the raised exception + - fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException + + Example:: + integer = Word(nums).setParseAction(lambda toks: int(toks[0])) + year_int = integer.copy() + year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later") + date_str = year_int + '/' + integer + '/' + integer + + result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1) + """ + msg = kwargs.get("message", "failed user-defined condition") + exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException + for fn in fns: + def pa(s,l,t): + if not bool(_trim_arity(fn)(s,l,t)): + raise exc_type(s,l,msg) + self.parseAction.append(pa) + self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False) + return self + + def setFailAction( self, fn ): + """Define action to perform if parsing fails at this expression. + Fail acton fn is a callable function that takes the arguments + C{fn(s,loc,expr,err)} where: + - s = string being parsed + - loc = location where expression match was attempted and failed + - expr = the parse expression that failed + - err = the exception thrown + The function returns no value. It may throw C{L{ParseFatalException}} + if it is desired to stop parsing immediately.""" + self.failAction = fn + return self + + def _skipIgnorables( self, instring, loc ): + exprsFound = True + while exprsFound: + exprsFound = False + for e in self.ignoreExprs: + try: + while 1: + loc,dummy = e._parse( instring, loc ) + exprsFound = True + except ParseException: + pass + return loc + + def preParse( self, instring, loc ): + if self.ignoreExprs: + loc = self._skipIgnorables( instring, loc ) + + if self.skipWhitespace: + wt = self.whiteChars + instrlen = len(instring) + while loc < instrlen and instring[loc] in wt: + loc += 1 + + return loc + + def parseImpl( self, instring, loc, doActions=True ): + return loc, [] + + def postParse( self, instring, loc, tokenlist ): + return tokenlist + + #~ @profile + def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ): + debugging = ( self.debug ) #and doActions ) + + if debugging or self.failAction: + #~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )) + if (self.debugActions[0] ): + self.debugActions[0]( instring, loc, self ) + if callPreParse and self.callPreparse: + preloc = self.preParse( instring, loc ) + else: + preloc = loc + tokensStart = preloc + try: + try: + loc,tokens = self.parseImpl( instring, preloc, doActions ) + except IndexError: + raise ParseException( instring, len(instring), self.errmsg, self ) + except ParseBaseException as err: + #~ print ("Exception raised:", err) + if self.debugActions[2]: + self.debugActions[2]( instring, tokensStart, self, err ) + if self.failAction: + self.failAction( instring, tokensStart, self, err ) + raise + else: + if callPreParse and self.callPreparse: + preloc = self.preParse( instring, loc ) + else: + preloc = loc + tokensStart = preloc + if self.mayIndexError or preloc >= len(instring): + try: + loc,tokens = self.parseImpl( instring, preloc, doActions ) + except IndexError: + raise ParseException( instring, len(instring), self.errmsg, self ) + else: + loc,tokens = self.parseImpl( instring, preloc, doActions ) + + tokens = self.postParse( instring, loc, tokens ) + + retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults ) + if self.parseAction and (doActions or self.callDuringTry): + if debugging: + try: + for fn in self.parseAction: + tokens = fn( instring, tokensStart, retTokens ) + if tokens is not None: + retTokens = ParseResults( tokens, + self.resultsName, + asList=self.saveAsList and isinstance(tokens,(ParseResults,list)), + modal=self.modalResults ) + except ParseBaseException as err: + #~ print "Exception raised in user parse action:", err + if (self.debugActions[2] ): + self.debugActions[2]( instring, tokensStart, self, err ) + raise + else: + for fn in self.parseAction: + tokens = fn( instring, tokensStart, retTokens ) + if tokens is not None: + retTokens = ParseResults( tokens, + self.resultsName, + asList=self.saveAsList and isinstance(tokens,(ParseResults,list)), + modal=self.modalResults ) + if debugging: + #~ print ("Matched",self,"->",retTokens.asList()) + if (self.debugActions[1] ): + self.debugActions[1]( instring, tokensStart, loc, self, retTokens ) + + return loc, retTokens + + def tryParse( self, instring, loc ): + try: + return self._parse( instring, loc, doActions=False )[0] + except ParseFatalException: + raise ParseException( instring, loc, self.errmsg, self) + + def canParseNext(self, instring, loc): + try: + self.tryParse(instring, loc) + except (ParseException, IndexError): + return False + else: + return True + + class _UnboundedCache(object): + def __init__(self): + cache = {} + self.not_in_cache = not_in_cache = object() + + def get(self, key): + return cache.get(key, not_in_cache) + + def set(self, key, value): + cache[key] = value + + def clear(self): + cache.clear() + + def cache_len(self): + return len(cache) + + self.get = types.MethodType(get, self) + self.set = types.MethodType(set, self) + self.clear = types.MethodType(clear, self) + self.__len__ = types.MethodType(cache_len, self) + + if _OrderedDict is not None: + class _FifoCache(object): + def __init__(self, size): + self.not_in_cache = not_in_cache = object() + + cache = _OrderedDict() + + def get(self, key): + return cache.get(key, not_in_cache) + + def set(self, key, value): + cache[key] = value + while len(cache) > size: + try: + cache.popitem(False) + except KeyError: + pass + + def clear(self): + cache.clear() + + def cache_len(self): + return len(cache) + + self.get = types.MethodType(get, self) + self.set = types.MethodType(set, self) + self.clear = types.MethodType(clear, self) + self.__len__ = types.MethodType(cache_len, self) + + else: + class _FifoCache(object): + def __init__(self, size): + self.not_in_cache = not_in_cache = object() + + cache = {} + key_fifo = collections.deque([], size) + + def get(self, key): + return cache.get(key, not_in_cache) + + def set(self, key, value): + cache[key] = value + while len(key_fifo) > size: + cache.pop(key_fifo.popleft(), None) + key_fifo.append(key) + + def clear(self): + cache.clear() + key_fifo.clear() + + def cache_len(self): + return len(cache) + + self.get = types.MethodType(get, self) + self.set = types.MethodType(set, self) + self.clear = types.MethodType(clear, self) + self.__len__ = types.MethodType(cache_len, self) + + # argument cache for optimizing repeated calls when backtracking through recursive expressions + packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail + packrat_cache_lock = RLock() + packrat_cache_stats = [0, 0] + + # this method gets repeatedly called during backtracking with the same arguments - + # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression + def _parseCache( self, instring, loc, doActions=True, callPreParse=True ): + HIT, MISS = 0, 1 + lookup = (self, instring, loc, callPreParse, doActions) + with ParserElement.packrat_cache_lock: + cache = ParserElement.packrat_cache + value = cache.get(lookup) + if value is cache.not_in_cache: + ParserElement.packrat_cache_stats[MISS] += 1 + try: + value = self._parseNoCache(instring, loc, doActions, callPreParse) + except ParseBaseException as pe: + # cache a copy of the exception, without the traceback + cache.set(lookup, pe.__class__(*pe.args)) + raise + else: + cache.set(lookup, (value[0], value[1].copy())) + return value + else: + ParserElement.packrat_cache_stats[HIT] += 1 + if isinstance(value, Exception): + raise value + return (value[0], value[1].copy()) + + _parse = _parseNoCache + + @staticmethod + def resetCache(): + ParserElement.packrat_cache.clear() + ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats) + + _packratEnabled = False + @staticmethod + def enablePackrat(cache_size_limit=128): + """Enables "packrat" parsing, which adds memoizing to the parsing logic. + Repeated parse attempts at the same string location (which happens + often in many complex grammars) can immediately return a cached value, + instead of re-executing parsing/validating code. Memoizing is done of + both valid results and parsing exceptions. + + Parameters: + - cache_size_limit - (default=C{128}) - if an integer value is provided + will limit the size of the packrat cache; if None is passed, then + the cache size will be unbounded; if 0 is passed, the cache will + be effectively disabled. + + This speedup may break existing programs that use parse actions that + have side-effects. For this reason, packrat parsing is disabled when + you first import pyparsing. To activate the packrat feature, your + program must call the class method C{ParserElement.enablePackrat()}. If + your program uses C{psyco} to "compile as you go", you must call + C{enablePackrat} before calling C{psyco.full()}. If you do not do this, + Python will crash. For best results, call C{enablePackrat()} immediately + after importing pyparsing. + + Example:: + import pyparsing + pyparsing.ParserElement.enablePackrat() + """ + if not ParserElement._packratEnabled: + ParserElement._packratEnabled = True + if cache_size_limit is None: + ParserElement.packrat_cache = ParserElement._UnboundedCache() + else: + ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit) + ParserElement._parse = ParserElement._parseCache + + def parseString( self, instring, parseAll=False ): + """ + Execute the parse expression with the given string. + This is the main interface to the client code, once the complete + expression has been built. + + If you want the grammar to require that the entire input string be + successfully parsed, then set C{parseAll} to True (equivalent to ending + the grammar with C{L{StringEnd()}}). + + Note: C{parseString} implicitly calls C{expandtabs()} on the input string, + in order to report proper column numbers in parse actions. + If the input string contains tabs and + the grammar uses parse actions that use the C{loc} argument to index into the + string being parsed, you can ensure you have a consistent view of the input + string by: + - calling C{parseWithTabs} on your grammar before calling C{parseString} + (see L{I{parseWithTabs}<parseWithTabs>}) + - define your parse action using the full C{(s,loc,toks)} signature, and + reference the input string using the parse action's C{s} argument + - explictly expand the tabs in your input string before calling + C{parseString} + + Example:: + Word('a').parseString('aaaaabaaa') # -> ['aaaaa'] + Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text + """ + ParserElement.resetCache() + if not self.streamlined: + self.streamline() + #~ self.saveAsList = True + for e in self.ignoreExprs: + e.streamline() + if not self.keepTabs: + instring = instring.expandtabs() + try: + loc, tokens = self._parse( instring, 0 ) + if parseAll: + loc = self.preParse( instring, loc ) + se = Empty() + StringEnd() + se._parse( instring, loc ) + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + else: + # catch and re-raise exception from here, clears out pyparsing internal stack trace + raise exc + else: + return tokens + + def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ): + """ + Scan the input string for expression matches. Each match will return the + matching tokens, start location, and end location. May be called with optional + C{maxMatches} argument, to clip scanning after 'n' matches are found. If + C{overlap} is specified, then overlapping matches will be reported. + + Note that the start and end locations are reported relative to the string + being parsed. See L{I{parseString}<parseString>} for more information on parsing + strings with embedded tabs. + + Example:: + source = "sldjf123lsdjjkf345sldkjf879lkjsfd987" + print(source) + for tokens,start,end in Word(alphas).scanString(source): + print(' '*start + '^'*(end-start)) + print(' '*start + tokens[0]) + + prints:: + + sldjf123lsdjjkf345sldkjf879lkjsfd987 + ^^^^^ + sldjf + ^^^^^^^ + lsdjjkf + ^^^^^^ + sldkjf + ^^^^^^ + lkjsfd + """ + if not self.streamlined: + self.streamline() + for e in self.ignoreExprs: + e.streamline() + + if not self.keepTabs: + instring = _ustr(instring).expandtabs() + instrlen = len(instring) + loc = 0 + preparseFn = self.preParse + parseFn = self._parse + ParserElement.resetCache() + matches = 0 + try: + while loc <= instrlen and matches < maxMatches: + try: + preloc = preparseFn( instring, loc ) + nextLoc,tokens = parseFn( instring, preloc, callPreParse=False ) + except ParseException: + loc = preloc+1 + else: + if nextLoc > loc: + matches += 1 + yield tokens, preloc, nextLoc + if overlap: + nextloc = preparseFn( instring, loc ) + if nextloc > loc: + loc = nextLoc + else: + loc += 1 + else: + loc = nextLoc + else: + loc = preloc+1 + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + else: + # catch and re-raise exception from here, clears out pyparsing internal stack trace + raise exc + + def transformString( self, instring ): + """ + Extension to C{L{scanString}}, to modify matching text with modified tokens that may + be returned from a parse action. To use C{transformString}, define a grammar and + attach a parse action to it that modifies the returned token list. + Invoking C{transformString()} on a target string will then scan for matches, + and replace the matched text patterns according to the logic in the parse + action. C{transformString()} returns the resulting transformed string. + + Example:: + wd = Word(alphas) + wd.setParseAction(lambda toks: toks[0].title()) + + print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york.")) + Prints:: + Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York. + """ + out = [] + lastE = 0 + # force preservation of <TAB>s, to minimize unwanted transformation of string, and to + # keep string locs straight between transformString and scanString + self.keepTabs = True + try: + for t,s,e in self.scanString( instring ): + out.append( instring[lastE:s] ) + if t: + if isinstance(t,ParseResults): + out += t.asList() + elif isinstance(t,list): + out += t + else: + out.append(t) + lastE = e + out.append(instring[lastE:]) + out = [o for o in out if o] + return "".join(map(_ustr,_flatten(out))) + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + else: + # catch and re-raise exception from here, clears out pyparsing internal stack trace + raise exc + + def searchString( self, instring, maxMatches=_MAX_INT ): + """ + Another extension to C{L{scanString}}, simplifying the access to the tokens found + to match the given parse expression. May be called with optional + C{maxMatches} argument, to clip searching after 'n' matches are found. + + Example:: + # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters + cap_word = Word(alphas.upper(), alphas.lower()) + + print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")) + + # the sum() builtin can be used to merge results into a single ParseResults object + print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))) + prints:: + [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']] + ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity'] + """ + try: + return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ]) + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + else: + # catch and re-raise exception from here, clears out pyparsing internal stack trace + raise exc + + def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False): + """ + Generator method to split a string using the given expression as a separator. + May be called with optional C{maxsplit} argument, to limit the number of splits; + and the optional C{includeSeparators} argument (default=C{False}), if the separating + matching text should be included in the split results. + + Example:: + punc = oneOf(list(".,;:/-!?")) + print(list(punc.split("This, this?, this sentence, is badly punctuated!"))) + prints:: + ['This', ' this', '', ' this sentence', ' is badly punctuated', ''] + """ + splits = 0 + last = 0 + for t,s,e in self.scanString(instring, maxMatches=maxsplit): + yield instring[last:s] + if includeSeparators: + yield t[0] + last = e + yield instring[last:] + + def __add__(self, other ): + """ + Implementation of + operator - returns C{L{And}}. Adding strings to a ParserElement + converts them to L{Literal}s by default. + + Example:: + greet = Word(alphas) + "," + Word(alphas) + "!" + hello = "Hello, World!" + print (hello, "->", greet.parseString(hello)) + Prints:: + Hello, World! -> ['Hello', ',', 'World', '!'] + """ + if isinstance( other, basestring ): + other = ParserElement._literalStringClass( other ) + if not isinstance( other, ParserElement ): + warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return None + return And( [ self, other ] ) + + def __radd__(self, other ): + """ + Implementation of + operator when left operand is not a C{L{ParserElement}} + """ + if isinstance( other, basestring ): + other = ParserElement._literalStringClass( other ) + if not isinstance( other, ParserElement ): + warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return None + return other + self + + def __sub__(self, other): + """ + Implementation of - operator, returns C{L{And}} with error stop + """ + if isinstance( other, basestring ): + other = ParserElement._literalStringClass( other ) + if not isinstance( other, ParserElement ): + warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return None + return self + And._ErrorStop() + other + + def __rsub__(self, other ): + """ + Implementation of - operator when left operand is not a C{L{ParserElement}} + """ + if isinstance( other, basestring ): + other = ParserElement._literalStringClass( other ) + if not isinstance( other, ParserElement ): + warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return None + return other - self + + def __mul__(self,other): + """ + Implementation of * operator, allows use of C{expr * 3} in place of + C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer + tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples + may also include C{None} as in: + - C{expr*(n,None)} or C{expr*(n,)} is equivalent + to C{expr*n + L{ZeroOrMore}(expr)} + (read as "at least n instances of C{expr}") + - C{expr*(None,n)} is equivalent to C{expr*(0,n)} + (read as "0 to n instances of C{expr}") + - C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)} + - C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)} + + Note that C{expr*(None,n)} does not raise an exception if + more than n exprs exist in the input stream; that is, + C{expr*(None,n)} does not enforce a maximum number of expr + occurrences. If this behavior is desired, then write + C{expr*(None,n) + ~expr} + """ + if isinstance(other,int): + minElements, optElements = other,0 + elif isinstance(other,tuple): + other = (other + (None, None))[:2] + if other[0] is None: + other = (0, other[1]) + if isinstance(other[0],int) and other[1] is None: + if other[0] == 0: + return ZeroOrMore(self) + if other[0] == 1: + return OneOrMore(self) + else: + return self*other[0] + ZeroOrMore(self) + elif isinstance(other[0],int) and isinstance(other[1],int): + minElements, optElements = other + optElements -= minElements + else: + raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1])) + else: + raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other)) + + if minElements < 0: + raise ValueError("cannot multiply ParserElement by negative value") + if optElements < 0: + raise ValueError("second tuple value must be greater or equal to first tuple value") + if minElements == optElements == 0: + raise ValueError("cannot multiply ParserElement by 0 or (0,0)") + + if (optElements): + def makeOptionalList(n): + if n>1: + return Optional(self + makeOptionalList(n-1)) + else: + return Optional(self) + if minElements: + if minElements == 1: + ret = self + makeOptionalList(optElements) + else: + ret = And([self]*minElements) + makeOptionalList(optElements) + else: + ret = makeOptionalList(optElements) + else: + if minElements == 1: + ret = self + else: + ret = And([self]*minElements) + return ret + + def __rmul__(self, other): + return self.__mul__(other) + + def __or__(self, other ): + """ + Implementation of | operator - returns C{L{MatchFirst}} + """ + if isinstance( other, basestring ): + other = ParserElement._literalStringClass( other ) + if not isinstance( other, ParserElement ): + warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return None + return MatchFirst( [ self, other ] ) + + def __ror__(self, other ): + """ + Implementation of | operator when left operand is not a C{L{ParserElement}} + """ + if isinstance( other, basestring ): + other = ParserElement._literalStringClass( other ) + if not isinstance( other, ParserElement ): + warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return None + return other | self + + def __xor__(self, other ): + """ + Implementation of ^ operator - returns C{L{Or}} + """ + if isinstance( other, basestring ): + other = ParserElement._literalStringClass( other ) + if not isinstance( other, ParserElement ): + warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return None + return Or( [ self, other ] ) + + def __rxor__(self, other ): + """ + Implementation of ^ operator when left operand is not a C{L{ParserElement}} + """ + if isinstance( other, basestring ): + other = ParserElement._literalStringClass( other ) + if not isinstance( other, ParserElement ): + warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return None + return other ^ self + + def __and__(self, other ): + """ + Implementation of & operator - returns C{L{Each}} + """ + if isinstance( other, basestring ): + other = ParserElement._literalStringClass( other ) + if not isinstance( other, ParserElement ): + warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return None + return Each( [ self, other ] ) + + def __rand__(self, other ): + """ + Implementation of & operator when left operand is not a C{L{ParserElement}} + """ + if isinstance( other, basestring ): + other = ParserElement._literalStringClass( other ) + if not isinstance( other, ParserElement ): + warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return None + return other & self + + def __invert__( self ): + """ + Implementation of ~ operator - returns C{L{NotAny}} + """ + return NotAny( self ) + + def __call__(self, name=None): + """ + Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}. + + If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be + passed as C{True}. + + If C{name} is omitted, same as calling C{L{copy}}. + + Example:: + # these are equivalent + userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno") + userdata = Word(alphas)("name") + Word(nums+"-")("socsecno") + """ + if name is not None: + return self.setResultsName(name) + else: + return self.copy() + + def suppress( self ): + """ + Suppresses the output of this C{ParserElement}; useful to keep punctuation from + cluttering up returned output. + """ + return Suppress( self ) + + def leaveWhitespace( self ): + """ + Disables the skipping of whitespace before matching the characters in the + C{ParserElement}'s defined pattern. This is normally only used internally by + the pyparsing module, but may be needed in some whitespace-sensitive grammars. + """ + self.skipWhitespace = False + return self + + def setWhitespaceChars( self, chars ): + """ + Overrides the default whitespace chars + """ + self.skipWhitespace = True + self.whiteChars = chars + self.copyDefaultWhiteChars = False + return self + + def parseWithTabs( self ): + """ + Overrides default behavior to expand C{<TAB>}s to spaces before parsing the input string. + Must be called before C{parseString} when the input grammar contains elements that + match C{<TAB>} characters. + """ + self.keepTabs = True + return self + + def ignore( self, other ): + """ + Define expression to be ignored (e.g., comments) while doing pattern + matching; may be called repeatedly, to define multiple comment or other + ignorable patterns. + + Example:: + patt = OneOrMore(Word(alphas)) + patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj'] + + patt.ignore(cStyleComment) + patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd'] + """ + if isinstance(other, basestring): + other = Suppress(other) + + if isinstance( other, Suppress ): + if other not in self.ignoreExprs: + self.ignoreExprs.append(other) + else: + self.ignoreExprs.append( Suppress( other.copy() ) ) + return self + + def setDebugActions( self, startAction, successAction, exceptionAction ): + """ + Enable display of debugging messages while doing pattern matching. + """ + self.debugActions = (startAction or _defaultStartDebugAction, + successAction or _defaultSuccessDebugAction, + exceptionAction or _defaultExceptionDebugAction) + self.debug = True + return self + + def setDebug( self, flag=True ): + """ + Enable display of debugging messages while doing pattern matching. + Set C{flag} to True to enable, False to disable. + + Example:: + wd = Word(alphas).setName("alphaword") + integer = Word(nums).setName("numword") + term = wd | integer + + # turn on debugging for wd + wd.setDebug() + + OneOrMore(term).parseString("abc 123 xyz 890") + + prints:: + Match alphaword at loc 0(1,1) + Matched alphaword -> ['abc'] + Match alphaword at loc 3(1,4) + Exception raised:Expected alphaword (at char 4), (line:1, col:5) + Match alphaword at loc 7(1,8) + Matched alphaword -> ['xyz'] + Match alphaword at loc 11(1,12) + Exception raised:Expected alphaword (at char 12), (line:1, col:13) + Match alphaword at loc 15(1,16) + Exception raised:Expected alphaword (at char 15), (line:1, col:16) + + The output shown is that produced by the default debug actions - custom debug actions can be + specified using L{setDebugActions}. Prior to attempting + to match the C{wd} expression, the debugging message C{"Match <exprname> at loc <n>(<line>,<col>)"} + is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"} + message is shown. Also note the use of L{setName} to assign a human-readable name to the expression, + which makes debugging and exception messages easier to understand - for instance, the default + name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}. + """ + if flag: + self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction ) + else: + self.debug = False + return self + + def __str__( self ): + return self.name + + def __repr__( self ): + return _ustr(self) + + def streamline( self ): + self.streamlined = True + self.strRepr = None + return self + + def checkRecursion( self, parseElementList ): + pass + + def validate( self, validateTrace=[] ): + """ + Check defined expressions for valid structure, check for infinite recursive definitions. + """ + self.checkRecursion( [] ) + + def parseFile( self, file_or_filename, parseAll=False ): + """ + Execute the parse expression on the given file or filename. + If a filename is specified (instead of a file object), + the entire file is opened, read, and closed before parsing. + """ + try: + file_contents = file_or_filename.read() + except AttributeError: + with open(file_or_filename, "r") as f: + file_contents = f.read() + try: + return self.parseString(file_contents, parseAll) + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + else: + # catch and re-raise exception from here, clears out pyparsing internal stack trace + raise exc + + def __eq__(self,other): + if isinstance(other, ParserElement): + return self is other or vars(self) == vars(other) + elif isinstance(other, basestring): + return self.matches(other) + else: + return super(ParserElement,self)==other + + def __ne__(self,other): + return not (self == other) + + def __hash__(self): + return hash(id(self)) + + def __req__(self,other): + return self == other + + def __rne__(self,other): + return not (self == other) + + def matches(self, testString, parseAll=True): + """ + Method for quick testing of a parser against a test string. Good for simple + inline microtests of sub expressions while building up larger parser. + + Parameters: + - testString - to test against this expression for a match + - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests + + Example:: + expr = Word(nums) + assert expr.matches("100") + """ + try: + self.parseString(_ustr(testString), parseAll=parseAll) + return True + except ParseBaseException: + return False + + def runTests(self, tests, parseAll=True, comment='#', fullDump=True, printResults=True, failureTests=False): + """ + Execute the parse expression on a series of test strings, showing each + test, the parsed results or where the parse failed. Quick and easy way to + run a parse expression against a list of sample strings. + + Parameters: + - tests - a list of separate test strings, or a multiline string of test strings + - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests + - comment - (default=C{'#'}) - expression for indicating embedded comments in the test + string; pass None to disable comment filtering + - fullDump - (default=C{True}) - dump results as list followed by results names in nested outline; + if False, only dump nested list + - printResults - (default=C{True}) prints test output to stdout + - failureTests - (default=C{False}) indicates if these tests are expected to fail parsing + + Returns: a (success, results) tuple, where success indicates that all tests succeeded + (or failed if C{failureTests} is True), and the results contain a list of lines of each + test's output + + Example:: + number_expr = pyparsing_common.number.copy() + + result = number_expr.runTests(''' + # unsigned integer + 100 + # negative integer + -100 + # float with scientific notation + 6.02e23 + # integer with scientific notation + 1e-12 + ''') + print("Success" if result[0] else "Failed!") + + result = number_expr.runTests(''' + # stray character + 100Z + # missing leading digit before '.' + -.100 + # too many '.' + 3.14.159 + ''', failureTests=True) + print("Success" if result[0] else "Failed!") + prints:: + # unsigned integer + 100 + [100] + + # negative integer + -100 + [-100] + + # float with scientific notation + 6.02e23 + [6.02e+23] + + # integer with scientific notation + 1e-12 + [1e-12] + + Success + + # stray character + 100Z + ^ + FAIL: Expected end of text (at char 3), (line:1, col:4) + + # missing leading digit before '.' + -.100 + ^ + FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1) + + # too many '.' + 3.14.159 + ^ + FAIL: Expected end of text (at char 4), (line:1, col:5) + + Success + + Each test string must be on a single line. If you want to test a string that spans multiple + lines, create a test like this:: + + expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines") + + (Note that this is a raw string literal, you must include the leading 'r'.) + """ + if isinstance(tests, basestring): + tests = list(map(str.strip, tests.rstrip().splitlines())) + if isinstance(comment, basestring): + comment = Literal(comment) + allResults = [] + comments = [] + success = True + for t in tests: + if comment is not None and comment.matches(t, False) or comments and not t: + comments.append(t) + continue + if not t: + continue + out = ['\n'.join(comments), t] + comments = [] + try: + t = t.replace(r'\n','\n') + result = self.parseString(t, parseAll=parseAll) + out.append(result.dump(full=fullDump)) + success = success and not failureTests + except ParseBaseException as pe: + fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else "" + if '\n' in t: + out.append(line(pe.loc, t)) + out.append(' '*(col(pe.loc,t)-1) + '^' + fatal) + else: + out.append(' '*pe.loc + '^' + fatal) + out.append("FAIL: " + str(pe)) + success = success and failureTests + result = pe + except Exception as exc: + out.append("FAIL-EXCEPTION: " + str(exc)) + success = success and failureTests + result = exc + + if printResults: + if fullDump: + out.append('') + print('\n'.join(out)) + + allResults.append((t, result)) + + return success, allResults + + +class Token(ParserElement): + """ + Abstract C{ParserElement} subclass, for defining atomic matching patterns. + """ + def __init__( self ): + super(Token,self).__init__( savelist=False ) + + +class Empty(Token): + """ + An empty token, will always match. + """ + def __init__( self ): + super(Empty,self).__init__() + self.name = "Empty" + self.mayReturnEmpty = True + self.mayIndexError = False + + +class NoMatch(Token): + """ + A token that will never match. + """ + def __init__( self ): + super(NoMatch,self).__init__() + self.name = "NoMatch" + self.mayReturnEmpty = True + self.mayIndexError = False + self.errmsg = "Unmatchable token" + + def parseImpl( self, instring, loc, doActions=True ): + raise ParseException(instring, loc, self.errmsg, self) + + +class Literal(Token): + """ + Token to exactly match a specified string. + + Example:: + Literal('blah').parseString('blah') # -> ['blah'] + Literal('blah').parseString('blahfooblah') # -> ['blah'] + Literal('blah').parseString('bla') # -> Exception: Expected "blah" + + For case-insensitive matching, use L{CaselessLiteral}. + + For keyword matching (force word break before and after the matched string), + use L{Keyword} or L{CaselessKeyword}. + """ + def __init__( self, matchString ): + super(Literal,self).__init__() + self.match = matchString + self.matchLen = len(matchString) + try: + self.firstMatchChar = matchString[0] + except IndexError: + warnings.warn("null string passed to Literal; use Empty() instead", + SyntaxWarning, stacklevel=2) + self.__class__ = Empty + self.name = '"%s"' % _ustr(self.match) + self.errmsg = "Expected " + self.name + self.mayReturnEmpty = False + self.mayIndexError = False + + # Performance tuning: this routine gets called a *lot* + # if this is a single character match string and the first character matches, + # short-circuit as quickly as possible, and avoid calling startswith + #~ @profile + def parseImpl( self, instring, loc, doActions=True ): + if (instring[loc] == self.firstMatchChar and + (self.matchLen==1 or instring.startswith(self.match,loc)) ): + return loc+self.matchLen, self.match + raise ParseException(instring, loc, self.errmsg, self) +_L = Literal +ParserElement._literalStringClass = Literal + +class Keyword(Token): + """ + Token to exactly match a specified string as a keyword, that is, it must be + immediately followed by a non-keyword character. Compare with C{L{Literal}}: + - C{Literal("if")} will match the leading C{'if'} in C{'ifAndOnlyIf'}. + - C{Keyword("if")} will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'} + Accepts two optional constructor arguments in addition to the keyword string: + - C{identChars} is a string of characters that would be valid identifier characters, + defaulting to all alphanumerics + "_" and "$" + - C{caseless} allows case-insensitive matching, default is C{False}. + + Example:: + Keyword("start").parseString("start") # -> ['start'] + Keyword("start").parseString("starting") # -> Exception + + For case-insensitive matching, use L{CaselessKeyword}. + """ + DEFAULT_KEYWORD_CHARS = alphanums+"_$" + + def __init__( self, matchString, identChars=None, caseless=False ): + super(Keyword,self).__init__() + if identChars is None: + identChars = Keyword.DEFAULT_KEYWORD_CHARS + self.match = matchString + self.matchLen = len(matchString) + try: + self.firstMatchChar = matchString[0] + except IndexError: + warnings.warn("null string passed to Keyword; use Empty() instead", + SyntaxWarning, stacklevel=2) + self.name = '"%s"' % self.match + self.errmsg = "Expected " + self.name + self.mayReturnEmpty = False + self.mayIndexError = False + self.caseless = caseless + if caseless: + self.caselessmatch = matchString.upper() + identChars = identChars.upper() + self.identChars = set(identChars) + + def parseImpl( self, instring, loc, doActions=True ): + if self.caseless: + if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and + (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and + (loc == 0 or instring[loc-1].upper() not in self.identChars) ): + return loc+self.matchLen, self.match + else: + if (instring[loc] == self.firstMatchChar and + (self.matchLen==1 or instring.startswith(self.match,loc)) and + (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and + (loc == 0 or instring[loc-1] not in self.identChars) ): + return loc+self.matchLen, self.match + raise ParseException(instring, loc, self.errmsg, self) + + def copy(self): + c = super(Keyword,self).copy() + c.identChars = Keyword.DEFAULT_KEYWORD_CHARS + return c + + @staticmethod + def setDefaultKeywordChars( chars ): + """Overrides the default Keyword chars + """ + Keyword.DEFAULT_KEYWORD_CHARS = chars + +class CaselessLiteral(Literal): + """ + Token to match a specified string, ignoring case of letters. + Note: the matched results will always be in the case of the given + match string, NOT the case of the input text. + + Example:: + OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD'] + + (Contrast with example for L{CaselessKeyword}.) + """ + def __init__( self, matchString ): + super(CaselessLiteral,self).__init__( matchString.upper() ) + # Preserve the defining literal. + self.returnString = matchString + self.name = "'%s'" % self.returnString + self.errmsg = "Expected " + self.name + + def parseImpl( self, instring, loc, doActions=True ): + if instring[ loc:loc+self.matchLen ].upper() == self.match: + return loc+self.matchLen, self.returnString + raise ParseException(instring, loc, self.errmsg, self) + +class CaselessKeyword(Keyword): + """ + Caseless version of L{Keyword}. + + Example:: + OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD'] + + (Contrast with example for L{CaselessLiteral}.) + """ + def __init__( self, matchString, identChars=None ): + super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True ) + + def parseImpl( self, instring, loc, doActions=True ): + if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and + (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ): + return loc+self.matchLen, self.match + raise ParseException(instring, loc, self.errmsg, self) + +class CloseMatch(Token): + """ + A variation on L{Literal} which matches "close" matches, that is, + strings with at most 'n' mismatching characters. C{CloseMatch} takes parameters: + - C{match_string} - string to be matched + - C{maxMismatches} - (C{default=1}) maximum number of mismatches allowed to count as a match + + The results from a successful parse will contain the matched text from the input string and the following named results: + - C{mismatches} - a list of the positions within the match_string where mismatches were found + - C{original} - the original match_string used to compare against the input string + + If C{mismatches} is an empty list, then the match was an exact match. + + Example:: + patt = CloseMatch("ATCATCGAATGGA") + patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']}) + patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1) + + # exact match + patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']}) + + # close match allowing up to 2 mismatches + patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2) + patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']}) + """ + def __init__(self, match_string, maxMismatches=1): + super(CloseMatch,self).__init__() + self.name = match_string + self.match_string = match_string + self.maxMismatches = maxMismatches + self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches) + self.mayIndexError = False + self.mayReturnEmpty = False + + def parseImpl( self, instring, loc, doActions=True ): + start = loc + instrlen = len(instring) + maxloc = start + len(self.match_string) + + if maxloc <= instrlen: + match_string = self.match_string + match_stringloc = 0 + mismatches = [] + maxMismatches = self.maxMismatches + + for match_stringloc,s_m in enumerate(zip(instring[loc:maxloc], self.match_string)): + src,mat = s_m + if src != mat: + mismatches.append(match_stringloc) + if len(mismatches) > maxMismatches: + break + else: + loc = match_stringloc + 1 + results = ParseResults([instring[start:loc]]) + results['original'] = self.match_string + results['mismatches'] = mismatches + return loc, results + + raise ParseException(instring, loc, self.errmsg, self) + + +class Word(Token): + """ + Token for matching words composed of allowed character sets. + Defined with string containing all allowed initial characters, + an optional string containing allowed body characters (if omitted, + defaults to the initial character set), and an optional minimum, + maximum, and/or exact length. The default value for C{min} is 1 (a + minimum value < 1 is not valid); the default values for C{max} and C{exact} + are 0, meaning no maximum or exact length restriction. An optional + C{excludeChars} parameter can list characters that might be found in + the input C{bodyChars} string; useful to define a word of all printables + except for one or two characters, for instance. + + L{srange} is useful for defining custom character set strings for defining + C{Word} expressions, using range notation from regular expression character sets. + + A common mistake is to use C{Word} to match a specific literal string, as in + C{Word("Address")}. Remember that C{Word} uses the string argument to define + I{sets} of matchable characters. This expression would match "Add", "AAA", + "dAred", or any other word made up of the characters 'A', 'd', 'r', 'e', and 's'. + To match an exact literal string, use L{Literal} or L{Keyword}. + + pyparsing includes helper strings for building Words: + - L{alphas} + - L{nums} + - L{alphanums} + - L{hexnums} + - L{alphas8bit} (alphabetic characters in ASCII range 128-255 - accented, tilded, umlauted, etc.) + - L{punc8bit} (non-alphabetic characters in ASCII range 128-255 - currency, symbols, superscripts, diacriticals, etc.) + - L{printables} (any non-whitespace character) + + Example:: + # a word composed of digits + integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9")) + + # a word with a leading capital, and zero or more lowercase + capital_word = Word(alphas.upper(), alphas.lower()) + + # hostnames are alphanumeric, with leading alpha, and '-' + hostname = Word(alphas, alphanums+'-') + + # roman numeral (not a strict parser, accepts invalid mix of characters) + roman = Word("IVXLCDM") + + # any string of non-whitespace characters, except for ',' + csv_value = Word(printables, excludeChars=",") + """ + def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ): + super(Word,self).__init__() + if excludeChars: + initChars = ''.join(c for c in initChars if c not in excludeChars) + if bodyChars: + bodyChars = ''.join(c for c in bodyChars if c not in excludeChars) + self.initCharsOrig = initChars + self.initChars = set(initChars) + if bodyChars : + self.bodyCharsOrig = bodyChars + self.bodyChars = set(bodyChars) + else: + self.bodyCharsOrig = initChars + self.bodyChars = set(initChars) + + self.maxSpecified = max > 0 + + if min < 1: + raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted") + + self.minLen = min + + if max > 0: + self.maxLen = max + else: + self.maxLen = _MAX_INT + + if exact > 0: + self.maxLen = exact + self.minLen = exact + + self.name = _ustr(self) + self.errmsg = "Expected " + self.name + self.mayIndexError = False + self.asKeyword = asKeyword + + if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0): + if self.bodyCharsOrig == self.initCharsOrig: + self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig) + elif len(self.initCharsOrig) == 1: + self.reString = "%s[%s]*" % \ + (re.escape(self.initCharsOrig), + _escapeRegexRangeChars(self.bodyCharsOrig),) + else: + self.reString = "[%s][%s]*" % \ + (_escapeRegexRangeChars(self.initCharsOrig), + _escapeRegexRangeChars(self.bodyCharsOrig),) + if self.asKeyword: + self.reString = r"\b"+self.reString+r"\b" + try: + self.re = re.compile( self.reString ) + except Exception: + self.re = None + + def parseImpl( self, instring, loc, doActions=True ): + if self.re: + result = self.re.match(instring,loc) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + return loc, result.group() + + if not(instring[ loc ] in self.initChars): + raise ParseException(instring, loc, self.errmsg, self) + + start = loc + loc += 1 + instrlen = len(instring) + bodychars = self.bodyChars + maxloc = start + self.maxLen + maxloc = min( maxloc, instrlen ) + while loc < maxloc and instring[loc] in bodychars: + loc += 1 + + throwException = False + if loc - start < self.minLen: + throwException = True + if self.maxSpecified and loc < instrlen and instring[loc] in bodychars: + throwException = True + if self.asKeyword: + if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars): + throwException = True + + if throwException: + raise ParseException(instring, loc, self.errmsg, self) + + return loc, instring[start:loc] + + def __str__( self ): + try: + return super(Word,self).__str__() + except Exception: + pass + + + if self.strRepr is None: + + def charsAsStr(s): + if len(s)>4: + return s[:4]+"..." + else: + return s + + if ( self.initCharsOrig != self.bodyCharsOrig ): + self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) ) + else: + self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig) + + return self.strRepr + + +class Regex(Token): + r""" + Token for matching strings that match a given regular expression. + Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module. + If the given regex contains named groups (defined using C{(?P<name>...)}), these will be preserved as + named parse results. + + Example:: + realnum = Regex(r"[+-]?\d+\.\d*") + date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)') + # ref: http://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression + roman = Regex(r"M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})") + """ + compiledREtype = type(re.compile("[A-Z]")) + def __init__( self, pattern, flags=0): + """The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags.""" + super(Regex,self).__init__() + + if isinstance(pattern, basestring): + if not pattern: + warnings.warn("null string passed to Regex; use Empty() instead", + SyntaxWarning, stacklevel=2) + + self.pattern = pattern + self.flags = flags + + try: + self.re = re.compile(self.pattern, self.flags) + self.reString = self.pattern + except sre_constants.error: + warnings.warn("invalid pattern (%s) passed to Regex" % pattern, + SyntaxWarning, stacklevel=2) + raise + + elif isinstance(pattern, Regex.compiledREtype): + self.re = pattern + self.pattern = \ + self.reString = str(pattern) + self.flags = flags + + else: + raise ValueError("Regex may only be constructed with a string or a compiled RE object") + + self.name = _ustr(self) + self.errmsg = "Expected " + self.name + self.mayIndexError = False + self.mayReturnEmpty = True + + def parseImpl( self, instring, loc, doActions=True ): + result = self.re.match(instring,loc) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + d = result.groupdict() + ret = ParseResults(result.group()) + if d: + for k in d: + ret[k] = d[k] + return loc,ret + + def __str__( self ): + try: + return super(Regex,self).__str__() + except Exception: + pass + + if self.strRepr is None: + self.strRepr = "Re:(%s)" % repr(self.pattern) + + return self.strRepr + + +class QuotedString(Token): + r""" + Token for matching strings that are delimited by quoting characters. + + Defined with the following parameters: + - quoteChar - string of one or more characters defining the quote delimiting string + - escChar - character to escape quotes, typically backslash (default=C{None}) + - escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=C{None}) + - multiline - boolean indicating whether quotes can span multiple lines (default=C{False}) + - unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True}) + - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar) + - convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True}) + + Example:: + qs = QuotedString('"') + print(qs.searchString('lsjdf "This is the quote" sldjf')) + complex_qs = QuotedString('{{', endQuoteChar='}}') + print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf')) + sql_qs = QuotedString('"', escQuote='""') + print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf')) + prints:: + [['This is the quote']] + [['This is the "quote"']] + [['This is the quote with "embedded" quotes']] + """ + def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True): + super(QuotedString,self).__init__() + + # remove white space from quote chars - wont work anyway + quoteChar = quoteChar.strip() + if not quoteChar: + warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2) + raise SyntaxError() + + if endQuoteChar is None: + endQuoteChar = quoteChar + else: + endQuoteChar = endQuoteChar.strip() + if not endQuoteChar: + warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2) + raise SyntaxError() + + self.quoteChar = quoteChar + self.quoteCharLen = len(quoteChar) + self.firstQuoteChar = quoteChar[0] + self.endQuoteChar = endQuoteChar + self.endQuoteCharLen = len(endQuoteChar) + self.escChar = escChar + self.escQuote = escQuote + self.unquoteResults = unquoteResults + self.convertWhitespaceEscapes = convertWhitespaceEscapes + + if multiline: + self.flags = re.MULTILINE | re.DOTALL + self.pattern = r'%s(?:[^%s%s]' % \ + ( re.escape(self.quoteChar), + _escapeRegexRangeChars(self.endQuoteChar[0]), + (escChar is not None and _escapeRegexRangeChars(escChar) or '') ) + else: + self.flags = 0 + self.pattern = r'%s(?:[^%s\n\r%s]' % \ + ( re.escape(self.quoteChar), + _escapeRegexRangeChars(self.endQuoteChar[0]), + (escChar is not None and _escapeRegexRangeChars(escChar) or '') ) + if len(self.endQuoteChar) > 1: + self.pattern += ( + '|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]), + _escapeRegexRangeChars(self.endQuoteChar[i])) + for i in range(len(self.endQuoteChar)-1,0,-1)) + ')' + ) + if escQuote: + self.pattern += (r'|(?:%s)' % re.escape(escQuote)) + if escChar: + self.pattern += (r'|(?:%s.)' % re.escape(escChar)) + self.escCharReplacePattern = re.escape(self.escChar)+"(.)" + self.pattern += (r')*%s' % re.escape(self.endQuoteChar)) + + try: + self.re = re.compile(self.pattern, self.flags) + self.reString = self.pattern + except sre_constants.error: + warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern, + SyntaxWarning, stacklevel=2) + raise + + self.name = _ustr(self) + self.errmsg = "Expected " + self.name + self.mayIndexError = False + self.mayReturnEmpty = True + + def parseImpl( self, instring, loc, doActions=True ): + result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + ret = result.group() + + if self.unquoteResults: + + # strip off quotes + ret = ret[self.quoteCharLen:-self.endQuoteCharLen] + + if isinstance(ret,basestring): + # replace escaped whitespace + if '\\' in ret and self.convertWhitespaceEscapes: + ws_map = { + r'\t' : '\t', + r'\n' : '\n', + r'\f' : '\f', + r'\r' : '\r', + } + for wslit,wschar in ws_map.items(): + ret = ret.replace(wslit, wschar) + + # replace escaped characters + if self.escChar: + ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret) + + # replace escaped quotes + if self.escQuote: + ret = ret.replace(self.escQuote, self.endQuoteChar) + + return loc, ret + + def __str__( self ): + try: + return super(QuotedString,self).__str__() + except Exception: + pass + + if self.strRepr is None: + self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar) + + return self.strRepr + + +class CharsNotIn(Token): + """ + Token for matching words composed of characters I{not} in a given set (will + include whitespace in matched characters if not listed in the provided exclusion set - see example). + Defined with string containing all disallowed characters, and an optional + minimum, maximum, and/or exact length. The default value for C{min} is 1 (a + minimum value < 1 is not valid); the default values for C{max} and C{exact} + are 0, meaning no maximum or exact length restriction. + + Example:: + # define a comma-separated-value as anything that is not a ',' + csv_value = CharsNotIn(',') + print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213")) + prints:: + ['dkls', 'lsdkjf', 's12 34', '@!#', '213'] + """ + def __init__( self, notChars, min=1, max=0, exact=0 ): + super(CharsNotIn,self).__init__() + self.skipWhitespace = False + self.notChars = notChars + + if min < 1: + raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted") + + self.minLen = min + + if max > 0: + self.maxLen = max + else: + self.maxLen = _MAX_INT + + if exact > 0: + self.maxLen = exact + self.minLen = exact + + self.name = _ustr(self) + self.errmsg = "Expected " + self.name + self.mayReturnEmpty = ( self.minLen == 0 ) + self.mayIndexError = False + + def parseImpl( self, instring, loc, doActions=True ): + if instring[loc] in self.notChars: + raise ParseException(instring, loc, self.errmsg, self) + + start = loc + loc += 1 + notchars = self.notChars + maxlen = min( start+self.maxLen, len(instring) ) + while loc < maxlen and \ + (instring[loc] not in notchars): + loc += 1 + + if loc - start < self.minLen: + raise ParseException(instring, loc, self.errmsg, self) + + return loc, instring[start:loc] + + def __str__( self ): + try: + return super(CharsNotIn, self).__str__() + except Exception: + pass + + if self.strRepr is None: + if len(self.notChars) > 4: + self.strRepr = "!W:(%s...)" % self.notChars[:4] + else: + self.strRepr = "!W:(%s)" % self.notChars + + return self.strRepr + +class White(Token): + """ + Special matching class for matching whitespace. Normally, whitespace is ignored + by pyparsing grammars. This class is included when some whitespace structures + are significant. Define with a string containing the whitespace characters to be + matched; default is C{" \\t\\r\\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments, + as defined for the C{L{Word}} class. + """ + whiteStrs = { + " " : "<SPC>", + "\t": "<TAB>", + "\n": "<LF>", + "\r": "<CR>", + "\f": "<FF>", + } + def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0): + super(White,self).__init__() + self.matchWhite = ws + self.setWhitespaceChars( "".join(c for c in self.whiteChars if c not in self.matchWhite) ) + #~ self.leaveWhitespace() + self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite)) + self.mayReturnEmpty = True + self.errmsg = "Expected " + self.name + + self.minLen = min + + if max > 0: + self.maxLen = max + else: + self.maxLen = _MAX_INT + + if exact > 0: + self.maxLen = exact + self.minLen = exact + + def parseImpl( self, instring, loc, doActions=True ): + if not(instring[ loc ] in self.matchWhite): + raise ParseException(instring, loc, self.errmsg, self) + start = loc + loc += 1 + maxloc = start + self.maxLen + maxloc = min( maxloc, len(instring) ) + while loc < maxloc and instring[loc] in self.matchWhite: + loc += 1 + + if loc - start < self.minLen: + raise ParseException(instring, loc, self.errmsg, self) + + return loc, instring[start:loc] + + +class _PositionToken(Token): + def __init__( self ): + super(_PositionToken,self).__init__() + self.name=self.__class__.__name__ + self.mayReturnEmpty = True + self.mayIndexError = False + +class GoToColumn(_PositionToken): + """ + Token to advance to a specific column of input text; useful for tabular report scraping. + """ + def __init__( self, colno ): + super(GoToColumn,self).__init__() + self.col = colno + + def preParse( self, instring, loc ): + if col(loc,instring) != self.col: + instrlen = len(instring) + if self.ignoreExprs: + loc = self._skipIgnorables( instring, loc ) + while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col : + loc += 1 + return loc + + def parseImpl( self, instring, loc, doActions=True ): + thiscol = col( loc, instring ) + if thiscol > self.col: + raise ParseException( instring, loc, "Text not in expected column", self ) + newloc = loc + self.col - thiscol + ret = instring[ loc: newloc ] + return newloc, ret + + +class LineStart(_PositionToken): + """ + Matches if current position is at the beginning of a line within the parse string + + Example:: + + test = '''\ + AAA this line + AAA and this line + AAA but not this one + B AAA and definitely not this one + ''' + + for t in (LineStart() + 'AAA' + restOfLine).searchString(test): + print(t) + + Prints:: + ['AAA', ' this line'] + ['AAA', ' and this line'] + + """ + def __init__( self ): + super(LineStart,self).__init__() + self.errmsg = "Expected start of line" + + def parseImpl( self, instring, loc, doActions=True ): + if col(loc, instring) == 1: + return loc, [] + raise ParseException(instring, loc, self.errmsg, self) + +class LineEnd(_PositionToken): + """ + Matches if current position is at the end of a line within the parse string + """ + def __init__( self ): + super(LineEnd,self).__init__() + self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") ) + self.errmsg = "Expected end of line" + + def parseImpl( self, instring, loc, doActions=True ): + if loc<len(instring): + if instring[loc] == "\n": + return loc+1, "\n" + else: + raise ParseException(instring, loc, self.errmsg, self) + elif loc == len(instring): + return loc+1, [] + else: + raise ParseException(instring, loc, self.errmsg, self) + +class StringStart(_PositionToken): + """ + Matches if current position is at the beginning of the parse string + """ + def __init__( self ): + super(StringStart,self).__init__() + self.errmsg = "Expected start of text" + + def parseImpl( self, instring, loc, doActions=True ): + if loc != 0: + # see if entire string up to here is just whitespace and ignoreables + if loc != self.preParse( instring, 0 ): + raise ParseException(instring, loc, self.errmsg, self) + return loc, [] + +class StringEnd(_PositionToken): + """ + Matches if current position is at the end of the parse string + """ + def __init__( self ): + super(StringEnd,self).__init__() + self.errmsg = "Expected end of text" + + def parseImpl( self, instring, loc, doActions=True ): + if loc < len(instring): + raise ParseException(instring, loc, self.errmsg, self) + elif loc == len(instring): + return loc+1, [] + elif loc > len(instring): + return loc, [] + else: + raise ParseException(instring, loc, self.errmsg, self) + +class WordStart(_PositionToken): + """ + Matches if the current position is at the beginning of a Word, and + is not preceded by any character in a given set of C{wordChars} + (default=C{printables}). To emulate the C{\b} behavior of regular expressions, + use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of + the string being parsed, or at the beginning of a line. + """ + def __init__(self, wordChars = printables): + super(WordStart,self).__init__() + self.wordChars = set(wordChars) + self.errmsg = "Not at the start of a word" + + def parseImpl(self, instring, loc, doActions=True ): + if loc != 0: + if (instring[loc-1] in self.wordChars or + instring[loc] not in self.wordChars): + raise ParseException(instring, loc, self.errmsg, self) + return loc, [] + +class WordEnd(_PositionToken): + """ + Matches if the current position is at the end of a Word, and + is not followed by any character in a given set of C{wordChars} + (default=C{printables}). To emulate the C{\b} behavior of regular expressions, + use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of + the string being parsed, or at the end of a line. + """ + def __init__(self, wordChars = printables): + super(WordEnd,self).__init__() + self.wordChars = set(wordChars) + self.skipWhitespace = False + self.errmsg = "Not at the end of a word" + + def parseImpl(self, instring, loc, doActions=True ): + instrlen = len(instring) + if instrlen>0 and loc<instrlen: + if (instring[loc] in self.wordChars or + instring[loc-1] not in self.wordChars): + raise ParseException(instring, loc, self.errmsg, self) + return loc, [] + + +class ParseExpression(ParserElement): + """ + Abstract subclass of ParserElement, for combining and post-processing parsed tokens. + """ + def __init__( self, exprs, savelist = False ): + super(ParseExpression,self).__init__(savelist) + if isinstance( exprs, _generatorType ): + exprs = list(exprs) + + if isinstance( exprs, basestring ): + self.exprs = [ ParserElement._literalStringClass( exprs ) ] + elif isinstance( exprs, Iterable ): + exprs = list(exprs) + # if sequence of strings provided, wrap with Literal + if all(isinstance(expr, basestring) for expr in exprs): + exprs = map(ParserElement._literalStringClass, exprs) + self.exprs = list(exprs) + else: + try: + self.exprs = list( exprs ) + except TypeError: + self.exprs = [ exprs ] + self.callPreparse = False + + def __getitem__( self, i ): + return self.exprs[i] + + def append( self, other ): + self.exprs.append( other ) + self.strRepr = None + return self + + def leaveWhitespace( self ): + """Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on + all contained expressions.""" + self.skipWhitespace = False + self.exprs = [ e.copy() for e in self.exprs ] + for e in self.exprs: + e.leaveWhitespace() + return self + + def ignore( self, other ): + if isinstance( other, Suppress ): + if other not in self.ignoreExprs: + super( ParseExpression, self).ignore( other ) + for e in self.exprs: + e.ignore( self.ignoreExprs[-1] ) + else: + super( ParseExpression, self).ignore( other ) + for e in self.exprs: + e.ignore( self.ignoreExprs[-1] ) + return self + + def __str__( self ): + try: + return super(ParseExpression,self).__str__() + except Exception: + pass + + if self.strRepr is None: + self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) ) + return self.strRepr + + def streamline( self ): + super(ParseExpression,self).streamline() + + for e in self.exprs: + e.streamline() + + # collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d ) + # but only if there are no parse actions or resultsNames on the nested And's + # (likewise for Or's and MatchFirst's) + if ( len(self.exprs) == 2 ): + other = self.exprs[0] + if ( isinstance( other, self.__class__ ) and + not(other.parseAction) and + other.resultsName is None and + not other.debug ): + self.exprs = other.exprs[:] + [ self.exprs[1] ] + self.strRepr = None + self.mayReturnEmpty |= other.mayReturnEmpty + self.mayIndexError |= other.mayIndexError + + other = self.exprs[-1] + if ( isinstance( other, self.__class__ ) and + not(other.parseAction) and + other.resultsName is None and + not other.debug ): + self.exprs = self.exprs[:-1] + other.exprs[:] + self.strRepr = None + self.mayReturnEmpty |= other.mayReturnEmpty + self.mayIndexError |= other.mayIndexError + + self.errmsg = "Expected " + _ustr(self) + + return self + + def setResultsName( self, name, listAllMatches=False ): + ret = super(ParseExpression,self).setResultsName(name,listAllMatches) + return ret + + def validate( self, validateTrace=[] ): + tmp = validateTrace[:]+[self] + for e in self.exprs: + e.validate(tmp) + self.checkRecursion( [] ) + + def copy(self): + ret = super(ParseExpression,self).copy() + ret.exprs = [e.copy() for e in self.exprs] + return ret + +class And(ParseExpression): + """ + Requires all given C{ParseExpression}s to be found in the given order. + Expressions may be separated by whitespace. + May be constructed using the C{'+'} operator. + May also be constructed using the C{'-'} operator, which will suppress backtracking. + + Example:: + integer = Word(nums) + name_expr = OneOrMore(Word(alphas)) + + expr = And([integer("id"),name_expr("name"),integer("age")]) + # more easily written as: + expr = integer("id") + name_expr("name") + integer("age") + """ + + class _ErrorStop(Empty): + def __init__(self, *args, **kwargs): + super(And._ErrorStop,self).__init__(*args, **kwargs) + self.name = '-' + self.leaveWhitespace() + + def __init__( self, exprs, savelist = True ): + super(And,self).__init__(exprs, savelist) + self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) + self.setWhitespaceChars( self.exprs[0].whiteChars ) + self.skipWhitespace = self.exprs[0].skipWhitespace + self.callPreparse = True + + def parseImpl( self, instring, loc, doActions=True ): + # pass False as last arg to _parse for first element, since we already + # pre-parsed the string as part of our And pre-parsing + loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False ) + errorStop = False + for e in self.exprs[1:]: + if isinstance(e, And._ErrorStop): + errorStop = True + continue + if errorStop: + try: + loc, exprtokens = e._parse( instring, loc, doActions ) + except ParseSyntaxException: + raise + except ParseBaseException as pe: + pe.__traceback__ = None + raise ParseSyntaxException._from_exception(pe) + except IndexError: + raise ParseSyntaxException(instring, len(instring), self.errmsg, self) + else: + loc, exprtokens = e._parse( instring, loc, doActions ) + if exprtokens or exprtokens.haskeys(): + resultlist += exprtokens + return loc, resultlist + + def __iadd__(self, other ): + if isinstance( other, basestring ): + other = ParserElement._literalStringClass( other ) + return self.append( other ) #And( [ self, other ] ) + + def checkRecursion( self, parseElementList ): + subRecCheckList = parseElementList[:] + [ self ] + for e in self.exprs: + e.checkRecursion( subRecCheckList ) + if not e.mayReturnEmpty: + break + + def __str__( self ): + if hasattr(self,"name"): + return self.name + + if self.strRepr is None: + self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}" + + return self.strRepr + + +class Or(ParseExpression): + """ + Requires that at least one C{ParseExpression} is found. + If two expressions match, the expression that matches the longest string will be used. + May be constructed using the C{'^'} operator. + + Example:: + # construct Or using '^' operator + + number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums)) + print(number.searchString("123 3.1416 789")) + prints:: + [['123'], ['3.1416'], ['789']] + """ + def __init__( self, exprs, savelist = False ): + super(Or,self).__init__(exprs, savelist) + if self.exprs: + self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) + else: + self.mayReturnEmpty = True + + def parseImpl( self, instring, loc, doActions=True ): + maxExcLoc = -1 + maxException = None + matches = [] + for e in self.exprs: + try: + loc2 = e.tryParse( instring, loc ) + except ParseException as err: + err.__traceback__ = None + if err.loc > maxExcLoc: + maxException = err + maxExcLoc = err.loc + except IndexError: + if len(instring) > maxExcLoc: + maxException = ParseException(instring,len(instring),e.errmsg,self) + maxExcLoc = len(instring) + else: + # save match among all matches, to retry longest to shortest + matches.append((loc2, e)) + + if matches: + matches.sort(key=lambda x: -x[0]) + for _,e in matches: + try: + return e._parse( instring, loc, doActions ) + except ParseException as err: + err.__traceback__ = None + if err.loc > maxExcLoc: + maxException = err + maxExcLoc = err.loc + + if maxException is not None: + maxException.msg = self.errmsg + raise maxException + else: + raise ParseException(instring, loc, "no defined alternatives to match", self) + + + def __ixor__(self, other ): + if isinstance( other, basestring ): + other = ParserElement._literalStringClass( other ) + return self.append( other ) #Or( [ self, other ] ) + + def __str__( self ): + if hasattr(self,"name"): + return self.name + + if self.strRepr is None: + self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}" + + return self.strRepr + + def checkRecursion( self, parseElementList ): + subRecCheckList = parseElementList[:] + [ self ] + for e in self.exprs: + e.checkRecursion( subRecCheckList ) + + +class MatchFirst(ParseExpression): + """ + Requires that at least one C{ParseExpression} is found. + If two expressions match, the first one listed is the one that will match. + May be constructed using the C{'|'} operator. + + Example:: + # construct MatchFirst using '|' operator + + # watch the order of expressions to match + number = Word(nums) | Combine(Word(nums) + '.' + Word(nums)) + print(number.searchString("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']] + + # put more selective expression first + number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums) + print(number.searchString("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']] + """ + def __init__( self, exprs, savelist = False ): + super(MatchFirst,self).__init__(exprs, savelist) + if self.exprs: + self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) + else: + self.mayReturnEmpty = True + + def parseImpl( self, instring, loc, doActions=True ): + maxExcLoc = -1 + maxException = None + for e in self.exprs: + try: + ret = e._parse( instring, loc, doActions ) + return ret + except ParseException as err: + if err.loc > maxExcLoc: + maxException = err + maxExcLoc = err.loc + except IndexError: + if len(instring) > maxExcLoc: + maxException = ParseException(instring,len(instring),e.errmsg,self) + maxExcLoc = len(instring) + + # only got here if no expression matched, raise exception for match that made it the furthest + else: + if maxException is not None: + maxException.msg = self.errmsg + raise maxException + else: + raise ParseException(instring, loc, "no defined alternatives to match", self) + + def __ior__(self, other ): + if isinstance( other, basestring ): + other = ParserElement._literalStringClass( other ) + return self.append( other ) #MatchFirst( [ self, other ] ) + + def __str__( self ): + if hasattr(self,"name"): + return self.name + + if self.strRepr is None: + self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}" + + return self.strRepr + + def checkRecursion( self, parseElementList ): + subRecCheckList = parseElementList[:] + [ self ] + for e in self.exprs: + e.checkRecursion( subRecCheckList ) + + +class Each(ParseExpression): + """ + Requires all given C{ParseExpression}s to be found, but in any order. + Expressions may be separated by whitespace. + May be constructed using the C{'&'} operator. + + Example:: + color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN") + shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON") + integer = Word(nums) + shape_attr = "shape:" + shape_type("shape") + posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn") + color_attr = "color:" + color("color") + size_attr = "size:" + integer("size") + + # use Each (using operator '&') to accept attributes in any order + # (shape and posn are required, color and size are optional) + shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr) + + shape_spec.runTests(''' + shape: SQUARE color: BLACK posn: 100, 120 + shape: CIRCLE size: 50 color: BLUE posn: 50,80 + color:GREEN size:20 shape:TRIANGLE posn:20,40 + ''' + ) + prints:: + shape: SQUARE color: BLACK posn: 100, 120 + ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']] + - color: BLACK + - posn: ['100', ',', '120'] + - x: 100 + - y: 120 + - shape: SQUARE + + + shape: CIRCLE size: 50 color: BLUE posn: 50,80 + ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']] + - color: BLUE + - posn: ['50', ',', '80'] + - x: 50 + - y: 80 + - shape: CIRCLE + - size: 50 + + + color: GREEN size: 20 shape: TRIANGLE posn: 20,40 + ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']] + - color: GREEN + - posn: ['20', ',', '40'] + - x: 20 + - y: 40 + - shape: TRIANGLE + - size: 20 + """ + def __init__( self, exprs, savelist = True ): + super(Each,self).__init__(exprs, savelist) + self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) + self.skipWhitespace = True + self.initExprGroups = True + + def parseImpl( self, instring, loc, doActions=True ): + if self.initExprGroups: + self.opt1map = dict((id(e.expr),e) for e in self.exprs if isinstance(e,Optional)) + opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ] + opt2 = [ e for e in self.exprs if e.mayReturnEmpty and not isinstance(e,Optional)] + self.optionals = opt1 + opt2 + self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ] + self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ] + self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ] + self.required += self.multirequired + self.initExprGroups = False + tmpLoc = loc + tmpReqd = self.required[:] + tmpOpt = self.optionals[:] + matchOrder = [] + + keepMatching = True + while keepMatching: + tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired + failed = [] + for e in tmpExprs: + try: + tmpLoc = e.tryParse( instring, tmpLoc ) + except ParseException: + failed.append(e) + else: + matchOrder.append(self.opt1map.get(id(e),e)) + if e in tmpReqd: + tmpReqd.remove(e) + elif e in tmpOpt: + tmpOpt.remove(e) + if len(failed) == len(tmpExprs): + keepMatching = False + + if tmpReqd: + missing = ", ".join(_ustr(e) for e in tmpReqd) + raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing ) + + # add any unmatched Optionals, in case they have default values defined + matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt] + + resultlist = [] + for e in matchOrder: + loc,results = e._parse(instring,loc,doActions) + resultlist.append(results) + + finalResults = sum(resultlist, ParseResults([])) + return loc, finalResults + + def __str__( self ): + if hasattr(self,"name"): + return self.name + + if self.strRepr is None: + self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}" + + return self.strRepr + + def checkRecursion( self, parseElementList ): + subRecCheckList = parseElementList[:] + [ self ] + for e in self.exprs: + e.checkRecursion( subRecCheckList ) + + +class ParseElementEnhance(ParserElement): + """ + Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens. + """ + def __init__( self, expr, savelist=False ): + super(ParseElementEnhance,self).__init__(savelist) + if isinstance( expr, basestring ): + if issubclass(ParserElement._literalStringClass, Token): + expr = ParserElement._literalStringClass(expr) + else: + expr = ParserElement._literalStringClass(Literal(expr)) + self.expr = expr + self.strRepr = None + if expr is not None: + self.mayIndexError = expr.mayIndexError + self.mayReturnEmpty = expr.mayReturnEmpty + self.setWhitespaceChars( expr.whiteChars ) + self.skipWhitespace = expr.skipWhitespace + self.saveAsList = expr.saveAsList + self.callPreparse = expr.callPreparse + self.ignoreExprs.extend(expr.ignoreExprs) + + def parseImpl( self, instring, loc, doActions=True ): + if self.expr is not None: + return self.expr._parse( instring, loc, doActions, callPreParse=False ) + else: + raise ParseException("",loc,self.errmsg,self) + + def leaveWhitespace( self ): + self.skipWhitespace = False + self.expr = self.expr.copy() + if self.expr is not None: + self.expr.leaveWhitespace() + return self + + def ignore( self, other ): + if isinstance( other, Suppress ): + if other not in self.ignoreExprs: + super( ParseElementEnhance, self).ignore( other ) + if self.expr is not None: + self.expr.ignore( self.ignoreExprs[-1] ) + else: + super( ParseElementEnhance, self).ignore( other ) + if self.expr is not None: + self.expr.ignore( self.ignoreExprs[-1] ) + return self + + def streamline( self ): + super(ParseElementEnhance,self).streamline() + if self.expr is not None: + self.expr.streamline() + return self + + def checkRecursion( self, parseElementList ): + if self in parseElementList: + raise RecursiveGrammarException( parseElementList+[self] ) + subRecCheckList = parseElementList[:] + [ self ] + if self.expr is not None: + self.expr.checkRecursion( subRecCheckList ) + + def validate( self, validateTrace=[] ): + tmp = validateTrace[:]+[self] + if self.expr is not None: + self.expr.validate(tmp) + self.checkRecursion( [] ) + + def __str__( self ): + try: + return super(ParseElementEnhance,self).__str__() + except Exception: + pass + + if self.strRepr is None and self.expr is not None: + self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) ) + return self.strRepr + + +class FollowedBy(ParseElementEnhance): + """ + Lookahead matching of the given parse expression. C{FollowedBy} + does I{not} advance the parsing position within the input string, it only + verifies that the specified parse expression matches at the current + position. C{FollowedBy} always returns a null token list. + + Example:: + # use FollowedBy to match a label only if it is followed by a ':' + data_word = Word(alphas) + label = data_word + FollowedBy(':') + attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) + + OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint() + prints:: + [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']] + """ + def __init__( self, expr ): + super(FollowedBy,self).__init__(expr) + self.mayReturnEmpty = True + + def parseImpl( self, instring, loc, doActions=True ): + self.expr.tryParse( instring, loc ) + return loc, [] + + +class NotAny(ParseElementEnhance): + """ + Lookahead to disallow matching with the given parse expression. C{NotAny} + does I{not} advance the parsing position within the input string, it only + verifies that the specified parse expression does I{not} match at the current + position. Also, C{NotAny} does I{not} skip over leading whitespace. C{NotAny} + always returns a null token list. May be constructed using the '~' operator. + + Example:: + + """ + def __init__( self, expr ): + super(NotAny,self).__init__(expr) + #~ self.leaveWhitespace() + self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs + self.mayReturnEmpty = True + self.errmsg = "Found unwanted token, "+_ustr(self.expr) + + def parseImpl( self, instring, loc, doActions=True ): + if self.expr.canParseNext(instring, loc): + raise ParseException(instring, loc, self.errmsg, self) + return loc, [] + + def __str__( self ): + if hasattr(self,"name"): + return self.name + + if self.strRepr is None: + self.strRepr = "~{" + _ustr(self.expr) + "}" + + return self.strRepr + +class _MultipleMatch(ParseElementEnhance): + def __init__( self, expr, stopOn=None): + super(_MultipleMatch, self).__init__(expr) + self.saveAsList = True + ender = stopOn + if isinstance(ender, basestring): + ender = ParserElement._literalStringClass(ender) + self.not_ender = ~ender if ender is not None else None + + def parseImpl( self, instring, loc, doActions=True ): + self_expr_parse = self.expr._parse + self_skip_ignorables = self._skipIgnorables + check_ender = self.not_ender is not None + if check_ender: + try_not_ender = self.not_ender.tryParse + + # must be at least one (but first see if we are the stopOn sentinel; + # if so, fail) + if check_ender: + try_not_ender(instring, loc) + loc, tokens = self_expr_parse( instring, loc, doActions, callPreParse=False ) + try: + hasIgnoreExprs = (not not self.ignoreExprs) + while 1: + if check_ender: + try_not_ender(instring, loc) + if hasIgnoreExprs: + preloc = self_skip_ignorables( instring, loc ) + else: + preloc = loc + loc, tmptokens = self_expr_parse( instring, preloc, doActions ) + if tmptokens or tmptokens.haskeys(): + tokens += tmptokens + except (ParseException,IndexError): + pass + + return loc, tokens + +class OneOrMore(_MultipleMatch): + """ + Repetition of one or more of the given expression. + + Parameters: + - expr - expression that must match one or more times + - stopOn - (default=C{None}) - expression for a terminating sentinel + (only required if the sentinel would ordinarily match the repetition + expression) + + Example:: + data_word = Word(alphas) + label = data_word + FollowedBy(':') + attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join)) + + text = "shape: SQUARE posn: upper left color: BLACK" + OneOrMore(attr_expr).parseString(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']] + + # use stopOn attribute for OneOrMore to avoid reading label string as part of the data + attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) + OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']] + + # could also be written as + (attr_expr * (1,)).parseString(text).pprint() + """ + + def __str__( self ): + if hasattr(self,"name"): + return self.name + + if self.strRepr is None: + self.strRepr = "{" + _ustr(self.expr) + "}..." + + return self.strRepr + +class ZeroOrMore(_MultipleMatch): + """ + Optional repetition of zero or more of the given expression. + + Parameters: + - expr - expression that must match zero or more times + - stopOn - (default=C{None}) - expression for a terminating sentinel + (only required if the sentinel would ordinarily match the repetition + expression) + + Example: similar to L{OneOrMore} + """ + def __init__( self, expr, stopOn=None): + super(ZeroOrMore,self).__init__(expr, stopOn=stopOn) + self.mayReturnEmpty = True + + def parseImpl( self, instring, loc, doActions=True ): + try: + return super(ZeroOrMore, self).parseImpl(instring, loc, doActions) + except (ParseException,IndexError): + return loc, [] + + def __str__( self ): + if hasattr(self,"name"): + return self.name + + if self.strRepr is None: + self.strRepr = "[" + _ustr(self.expr) + "]..." + + return self.strRepr + +class _NullToken(object): + def __bool__(self): + return False + __nonzero__ = __bool__ + def __str__(self): + return "" + +_optionalNotMatched = _NullToken() +class Optional(ParseElementEnhance): + """ + Optional matching of the given expression. + + Parameters: + - expr - expression that must match zero or more times + - default (optional) - value to be returned if the optional expression is not found. + + Example:: + # US postal code can be a 5-digit zip, plus optional 4-digit qualifier + zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4))) + zip.runTests(''' + # traditional ZIP code + 12345 + + # ZIP+4 form + 12101-0001 + + # invalid ZIP + 98765- + ''') + prints:: + # traditional ZIP code + 12345 + ['12345'] + + # ZIP+4 form + 12101-0001 + ['12101-0001'] + + # invalid ZIP + 98765- + ^ + FAIL: Expected end of text (at char 5), (line:1, col:6) + """ + def __init__( self, expr, default=_optionalNotMatched ): + super(Optional,self).__init__( expr, savelist=False ) + self.saveAsList = self.expr.saveAsList + self.defaultValue = default + self.mayReturnEmpty = True + + def parseImpl( self, instring, loc, doActions=True ): + try: + loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False ) + except (ParseException,IndexError): + if self.defaultValue is not _optionalNotMatched: + if self.expr.resultsName: + tokens = ParseResults([ self.defaultValue ]) + tokens[self.expr.resultsName] = self.defaultValue + else: + tokens = [ self.defaultValue ] + else: + tokens = [] + return loc, tokens + + def __str__( self ): + if hasattr(self,"name"): + return self.name + + if self.strRepr is None: + self.strRepr = "[" + _ustr(self.expr) + "]" + + return self.strRepr + +class SkipTo(ParseElementEnhance): + """ + Token for skipping over all undefined text until the matched expression is found. + + Parameters: + - expr - target expression marking the end of the data to be skipped + - include - (default=C{False}) if True, the target expression is also parsed + (the skipped text and target expression are returned as a 2-element list). + - ignore - (default=C{None}) used to define grammars (typically quoted strings and + comments) that might contain false matches to the target expression + - failOn - (default=C{None}) define expressions that are not allowed to be + included in the skipped test; if found before the target expression is found, + the SkipTo is not a match + + Example:: + report = ''' + Outstanding Issues Report - 1 Jan 2000 + + # | Severity | Description | Days Open + -----+----------+-------------------------------------------+----------- + 101 | Critical | Intermittent system crash | 6 + 94 | Cosmetic | Spelling error on Login ('log|n') | 14 + 79 | Minor | System slow when running too many reports | 47 + ''' + integer = Word(nums) + SEP = Suppress('|') + # use SkipTo to simply match everything up until the next SEP + # - ignore quoted strings, so that a '|' character inside a quoted string does not match + # - parse action will call token.strip() for each matched token, i.e., the description body + string_data = SkipTo(SEP, ignore=quotedString) + string_data.setParseAction(tokenMap(str.strip)) + ticket_expr = (integer("issue_num") + SEP + + string_data("sev") + SEP + + string_data("desc") + SEP + + integer("days_open")) + + for tkt in ticket_expr.searchString(report): + print tkt.dump() + prints:: + ['101', 'Critical', 'Intermittent system crash', '6'] + - days_open: 6 + - desc: Intermittent system crash + - issue_num: 101 + - sev: Critical + ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14'] + - days_open: 14 + - desc: Spelling error on Login ('log|n') + - issue_num: 94 + - sev: Cosmetic + ['79', 'Minor', 'System slow when running too many reports', '47'] + - days_open: 47 + - desc: System slow when running too many reports + - issue_num: 79 + - sev: Minor + """ + def __init__( self, other, include=False, ignore=None, failOn=None ): + super( SkipTo, self ).__init__( other ) + self.ignoreExpr = ignore + self.mayReturnEmpty = True + self.mayIndexError = False + self.includeMatch = include + self.asList = False + if isinstance(failOn, basestring): + self.failOn = ParserElement._literalStringClass(failOn) + else: + self.failOn = failOn + self.errmsg = "No match found for "+_ustr(self.expr) + + def parseImpl( self, instring, loc, doActions=True ): + startloc = loc + instrlen = len(instring) + expr = self.expr + expr_parse = self.expr._parse + self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None + self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None + + tmploc = loc + while tmploc <= instrlen: + if self_failOn_canParseNext is not None: + # break if failOn expression matches + if self_failOn_canParseNext(instring, tmploc): + break + + if self_ignoreExpr_tryParse is not None: + # advance past ignore expressions + while 1: + try: + tmploc = self_ignoreExpr_tryParse(instring, tmploc) + except ParseBaseException: + break + + try: + expr_parse(instring, tmploc, doActions=False, callPreParse=False) + except (ParseException, IndexError): + # no match, advance loc in string + tmploc += 1 + else: + # matched skipto expr, done + break + + else: + # ran off the end of the input string without matching skipto expr, fail + raise ParseException(instring, loc, self.errmsg, self) + + # build up return values + loc = tmploc + skiptext = instring[startloc:loc] + skipresult = ParseResults(skiptext) + + if self.includeMatch: + loc, mat = expr_parse(instring,loc,doActions,callPreParse=False) + skipresult += mat + + return loc, skipresult + +class Forward(ParseElementEnhance): + """ + Forward declaration of an expression to be defined later - + used for recursive grammars, such as algebraic infix notation. + When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator. + + Note: take care when assigning to C{Forward} not to overlook precedence of operators. + Specifically, '|' has a lower precedence than '<<', so that:: + fwdExpr << a | b | c + will actually be evaluated as:: + (fwdExpr << a) | b | c + thereby leaving b and c out as parseable alternatives. It is recommended that you + explicitly group the values inserted into the C{Forward}:: + fwdExpr << (a | b | c) + Converting to use the '<<=' operator instead will avoid this problem. + + See L{ParseResults.pprint} for an example of a recursive parser created using + C{Forward}. + """ + def __init__( self, other=None ): + super(Forward,self).__init__( other, savelist=False ) + + def __lshift__( self, other ): + if isinstance( other, basestring ): + other = ParserElement._literalStringClass(other) + self.expr = other + self.strRepr = None + self.mayIndexError = self.expr.mayIndexError + self.mayReturnEmpty = self.expr.mayReturnEmpty + self.setWhitespaceChars( self.expr.whiteChars ) + self.skipWhitespace = self.expr.skipWhitespace + self.saveAsList = self.expr.saveAsList + self.ignoreExprs.extend(self.expr.ignoreExprs) + return self + + def __ilshift__(self, other): + return self << other + + def leaveWhitespace( self ): + self.skipWhitespace = False + return self + + def streamline( self ): + if not self.streamlined: + self.streamlined = True + if self.expr is not None: + self.expr.streamline() + return self + + def validate( self, validateTrace=[] ): + if self not in validateTrace: + tmp = validateTrace[:]+[self] + if self.expr is not None: + self.expr.validate(tmp) + self.checkRecursion([]) + + def __str__( self ): + if hasattr(self,"name"): + return self.name + return self.__class__.__name__ + ": ..." + + # stubbed out for now - creates awful memory and perf issues + self._revertClass = self.__class__ + self.__class__ = _ForwardNoRecurse + try: + if self.expr is not None: + retString = _ustr(self.expr) + else: + retString = "None" + finally: + self.__class__ = self._revertClass + return self.__class__.__name__ + ": " + retString + + def copy(self): + if self.expr is not None: + return super(Forward,self).copy() + else: + ret = Forward() + ret <<= self + return ret + +class _ForwardNoRecurse(Forward): + def __str__( self ): + return "..." + +class TokenConverter(ParseElementEnhance): + """ + Abstract subclass of C{ParseExpression}, for converting parsed results. + """ + def __init__( self, expr, savelist=False ): + super(TokenConverter,self).__init__( expr )#, savelist ) + self.saveAsList = False + +class Combine(TokenConverter): + """ + Converter to concatenate all matching tokens to a single string. + By default, the matching patterns must also be contiguous in the input string; + this can be disabled by specifying C{'adjacent=False'} in the constructor. + + Example:: + real = Word(nums) + '.' + Word(nums) + print(real.parseString('3.1416')) # -> ['3', '.', '1416'] + # will also erroneously match the following + print(real.parseString('3. 1416')) # -> ['3', '.', '1416'] + + real = Combine(Word(nums) + '.' + Word(nums)) + print(real.parseString('3.1416')) # -> ['3.1416'] + # no match when there are internal spaces + print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...) + """ + def __init__( self, expr, joinString="", adjacent=True ): + super(Combine,self).__init__( expr ) + # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself + if adjacent: + self.leaveWhitespace() + self.adjacent = adjacent + self.skipWhitespace = True + self.joinString = joinString + self.callPreparse = True + + def ignore( self, other ): + if self.adjacent: + ParserElement.ignore(self, other) + else: + super( Combine, self).ignore( other ) + return self + + def postParse( self, instring, loc, tokenlist ): + retToks = tokenlist.copy() + del retToks[:] + retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults) + + if self.resultsName and retToks.haskeys(): + return [ retToks ] + else: + return retToks + +class Group(TokenConverter): + """ + Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions. + + Example:: + ident = Word(alphas) + num = Word(nums) + term = ident | num + func = ident + Optional(delimitedList(term)) + print(func.parseString("fn a,b,100")) # -> ['fn', 'a', 'b', '100'] + + func = ident + Group(Optional(delimitedList(term))) + print(func.parseString("fn a,b,100")) # -> ['fn', ['a', 'b', '100']] + """ + def __init__( self, expr ): + super(Group,self).__init__( expr ) + self.saveAsList = True + + def postParse( self, instring, loc, tokenlist ): + return [ tokenlist ] + +class Dict(TokenConverter): + """ + Converter to return a repetitive expression as a list, but also as a dictionary. + Each element can also be referenced using the first token in the expression as its key. + Useful for tabular report scraping when the first column can be used as a item key. + + Example:: + data_word = Word(alphas) + label = data_word + FollowedBy(':') + attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join)) + + text = "shape: SQUARE posn: upper left color: light blue texture: burlap" + attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) + + # print attributes as plain groups + print(OneOrMore(attr_expr).parseString(text).dump()) + + # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names + result = Dict(OneOrMore(Group(attr_expr))).parseString(text) + print(result.dump()) + + # access named fields as dict entries, or output as dict + print(result['shape']) + print(result.asDict()) + prints:: + ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap'] + + [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] + - color: light blue + - posn: upper left + - shape: SQUARE + - texture: burlap + SQUARE + {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'} + See more examples at L{ParseResults} of accessing fields by results name. + """ + def __init__( self, expr ): + super(Dict,self).__init__( expr ) + self.saveAsList = True + + def postParse( self, instring, loc, tokenlist ): + for i,tok in enumerate(tokenlist): + if len(tok) == 0: + continue + ikey = tok[0] + if isinstance(ikey,int): + ikey = _ustr(tok[0]).strip() + if len(tok)==1: + tokenlist[ikey] = _ParseResultsWithOffset("",i) + elif len(tok)==2 and not isinstance(tok[1],ParseResults): + tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i) + else: + dictvalue = tok.copy() #ParseResults(i) + del dictvalue[0] + if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.haskeys()): + tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i) + else: + tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i) + + if self.resultsName: + return [ tokenlist ] + else: + return tokenlist + + +class Suppress(TokenConverter): + """ + Converter for ignoring the results of a parsed expression. + + Example:: + source = "a, b, c,d" + wd = Word(alphas) + wd_list1 = wd + ZeroOrMore(',' + wd) + print(wd_list1.parseString(source)) + + # often, delimiters that are useful during parsing are just in the + # way afterward - use Suppress to keep them out of the parsed output + wd_list2 = wd + ZeroOrMore(Suppress(',') + wd) + print(wd_list2.parseString(source)) + prints:: + ['a', ',', 'b', ',', 'c', ',', 'd'] + ['a', 'b', 'c', 'd'] + (See also L{delimitedList}.) + """ + def postParse( self, instring, loc, tokenlist ): + return [] + + def suppress( self ): + return self + + +class OnlyOnce(object): + """ + Wrapper for parse actions, to ensure they are only called once. + """ + def __init__(self, methodCall): + self.callable = _trim_arity(methodCall) + self.called = False + def __call__(self,s,l,t): + if not self.called: + results = self.callable(s,l,t) + self.called = True + return results + raise ParseException(s,l,"") + def reset(self): + self.called = False + +def traceParseAction(f): + """ + Decorator for debugging parse actions. + + When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".} + When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised. + + Example:: + wd = Word(alphas) + + @traceParseAction + def remove_duplicate_chars(tokens): + return ''.join(sorted(set(''.join(tokens)))) + + wds = OneOrMore(wd).setParseAction(remove_duplicate_chars) + print(wds.parseString("slkdjs sld sldd sdlf sdljf")) + prints:: + >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {})) + <<leaving remove_duplicate_chars (ret: 'dfjkls') + ['dfjkls'] + """ + f = _trim_arity(f) + def z(*paArgs): + thisFunc = f.__name__ + s,l,t = paArgs[-3:] + if len(paArgs)>3: + thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc + sys.stderr.write( ">>entering %s(line: '%s', %d, %r)\n" % (thisFunc,line(l,s),l,t) ) + try: + ret = f(*paArgs) + except Exception as exc: + sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) ) + raise + sys.stderr.write( "<<leaving %s (ret: %r)\n" % (thisFunc,ret) ) + return ret + try: + z.__name__ = f.__name__ + except AttributeError: + pass + return z + +# +# global helpers +# +def delimitedList( expr, delim=",", combine=False ): + """ + Helper to define a delimited list of expressions - the delimiter defaults to ','. + By default, the list elements and delimiters can have intervening whitespace, and + comments, but this can be overridden by passing C{combine=True} in the constructor. + If C{combine} is set to C{True}, the matching tokens are returned as a single token + string, with the delimiters included; otherwise, the matching tokens are returned + as a list of tokens, with the delimiters suppressed. + + Example:: + delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc'] + delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE'] + """ + dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..." + if combine: + return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName) + else: + return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName) + +def countedArray( expr, intExpr=None ): + """ + Helper to define a counted list of expressions. + This helper defines a pattern of the form:: + integer expr expr expr... + where the leading integer tells how many expr expressions follow. + The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed. + + If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value. + + Example:: + countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd'] + + # in this parser, the leading integer value is given in binary, + # '10' indicating that 2 values are in the array + binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2)) + countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd'] + """ + arrayExpr = Forward() + def countFieldParseAction(s,l,t): + n = t[0] + arrayExpr << (n and Group(And([expr]*n)) or Group(empty)) + return [] + if intExpr is None: + intExpr = Word(nums).setParseAction(lambda t:int(t[0])) + else: + intExpr = intExpr.copy() + intExpr.setName("arrayLen") + intExpr.addParseAction(countFieldParseAction, callDuringTry=True) + return ( intExpr + arrayExpr ).setName('(len) ' + _ustr(expr) + '...') + +def _flatten(L): + ret = [] + for i in L: + if isinstance(i,list): + ret.extend(_flatten(i)) + else: + ret.append(i) + return ret + +def matchPreviousLiteral(expr): + """ + Helper to define an expression that is indirectly defined from + the tokens matched in a previous expression, that is, it looks + for a 'repeat' of a previous expression. For example:: + first = Word(nums) + second = matchPreviousLiteral(first) + matchExpr = first + ":" + second + will match C{"1:1"}, but not C{"1:2"}. Because this matches a + previous literal, will also match the leading C{"1:1"} in C{"1:10"}. + If this is not desired, use C{matchPreviousExpr}. + Do I{not} use with packrat parsing enabled. + """ + rep = Forward() + def copyTokenToRepeater(s,l,t): + if t: + if len(t) == 1: + rep << t[0] + else: + # flatten t tokens + tflat = _flatten(t.asList()) + rep << And(Literal(tt) for tt in tflat) + else: + rep << Empty() + expr.addParseAction(copyTokenToRepeater, callDuringTry=True) + rep.setName('(prev) ' + _ustr(expr)) + return rep + +def matchPreviousExpr(expr): + """ + Helper to define an expression that is indirectly defined from + the tokens matched in a previous expression, that is, it looks + for a 'repeat' of a previous expression. For example:: + first = Word(nums) + second = matchPreviousExpr(first) + matchExpr = first + ":" + second + will match C{"1:1"}, but not C{"1:2"}. Because this matches by + expressions, will I{not} match the leading C{"1:1"} in C{"1:10"}; + the expressions are evaluated first, and then compared, so + C{"1"} is compared with C{"10"}. + Do I{not} use with packrat parsing enabled. + """ + rep = Forward() + e2 = expr.copy() + rep <<= e2 + def copyTokenToRepeater(s,l,t): + matchTokens = _flatten(t.asList()) + def mustMatchTheseTokens(s,l,t): + theseTokens = _flatten(t.asList()) + if theseTokens != matchTokens: + raise ParseException("",0,"") + rep.setParseAction( mustMatchTheseTokens, callDuringTry=True ) + expr.addParseAction(copyTokenToRepeater, callDuringTry=True) + rep.setName('(prev) ' + _ustr(expr)) + return rep + +def _escapeRegexRangeChars(s): + #~ escape these chars: ^-] + for c in r"\^-]": + s = s.replace(c,_bslash+c) + s = s.replace("\n",r"\n") + s = s.replace("\t",r"\t") + return _ustr(s) + +def oneOf( strs, caseless=False, useRegex=True ): + """ + Helper to quickly define a set of alternative Literals, and makes sure to do + longest-first testing when there is a conflict, regardless of the input order, + but returns a C{L{MatchFirst}} for best performance. + + Parameters: + - strs - a string of space-delimited literals, or a collection of string literals + - caseless - (default=C{False}) - treat all literals as caseless + - useRegex - (default=C{True}) - as an optimization, will generate a Regex + object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or + if creating a C{Regex} raises an exception) + + Example:: + comp_oper = oneOf("< = > <= >= !=") + var = Word(alphas) + number = Word(nums) + term = var | number + comparison_expr = term + comp_oper + term + print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12")) + prints:: + [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']] + """ + if caseless: + isequal = ( lambda a,b: a.upper() == b.upper() ) + masks = ( lambda a,b: b.upper().startswith(a.upper()) ) + parseElementClass = CaselessLiteral + else: + isequal = ( lambda a,b: a == b ) + masks = ( lambda a,b: b.startswith(a) ) + parseElementClass = Literal + + symbols = [] + if isinstance(strs,basestring): + symbols = strs.split() + elif isinstance(strs, Iterable): + symbols = list(strs) + else: + warnings.warn("Invalid argument to oneOf, expected string or iterable", + SyntaxWarning, stacklevel=2) + if not symbols: + return NoMatch() + + i = 0 + while i < len(symbols)-1: + cur = symbols[i] + for j,other in enumerate(symbols[i+1:]): + if ( isequal(other, cur) ): + del symbols[i+j+1] + break + elif ( masks(cur, other) ): + del symbols[i+j+1] + symbols.insert(i,other) + cur = other + break + else: + i += 1 + + if not caseless and useRegex: + #~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] )) + try: + if len(symbols)==len("".join(symbols)): + return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ).setName(' | '.join(symbols)) + else: + return Regex( "|".join(re.escape(sym) for sym in symbols) ).setName(' | '.join(symbols)) + except Exception: + warnings.warn("Exception creating Regex for oneOf, building MatchFirst", + SyntaxWarning, stacklevel=2) + + + # last resort, just use MatchFirst + return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols)) + +def dictOf( key, value ): + """ + Helper to easily and clearly define a dictionary by specifying the respective patterns + for the key and value. Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens + in the proper order. The key pattern can include delimiting markers or punctuation, + as long as they are suppressed, thereby leaving the significant key text. The value + pattern can include named results, so that the C{Dict} results can include named token + fields. + + Example:: + text = "shape: SQUARE posn: upper left color: light blue texture: burlap" + attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) + print(OneOrMore(attr_expr).parseString(text).dump()) + + attr_label = label + attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join) + + # similar to Dict, but simpler call format + result = dictOf(attr_label, attr_value).parseString(text) + print(result.dump()) + print(result['shape']) + print(result.shape) # object attribute access works too + print(result.asDict()) + prints:: + [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] + - color: light blue + - posn: upper left + - shape: SQUARE + - texture: burlap + SQUARE + SQUARE + {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'} + """ + return Dict( ZeroOrMore( Group ( key + value ) ) ) + +def originalTextFor(expr, asString=True): + """ + Helper to return the original, untokenized text for a given expression. Useful to + restore the parsed fields of an HTML start tag into the raw tag text itself, or to + revert separate tokens with intervening whitespace back to the original matching + input text. By default, returns astring containing the original parsed text. + + If the optional C{asString} argument is passed as C{False}, then the return value is a + C{L{ParseResults}} containing any results names that were originally matched, and a + single token containing the original matched text from the input string. So if + the expression passed to C{L{originalTextFor}} contains expressions with defined + results names, you must set C{asString} to C{False} if you want to preserve those + results name values. + + Example:: + src = "this is test <b> bold <i>text</i> </b> normal text " + for tag in ("b","i"): + opener,closer = makeHTMLTags(tag) + patt = originalTextFor(opener + SkipTo(closer) + closer) + print(patt.searchString(src)[0]) + prints:: + ['<b> bold <i>text</i> </b>'] + ['<i>text</i>'] + """ + locMarker = Empty().setParseAction(lambda s,loc,t: loc) + endlocMarker = locMarker.copy() + endlocMarker.callPreparse = False + matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end") + if asString: + extractText = lambda s,l,t: s[t._original_start:t._original_end] + else: + def extractText(s,l,t): + t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]] + matchExpr.setParseAction(extractText) + matchExpr.ignoreExprs = expr.ignoreExprs + return matchExpr + +def ungroup(expr): + """ + Helper to undo pyparsing's default grouping of And expressions, even + if all but one are non-empty. + """ + return TokenConverter(expr).setParseAction(lambda t:t[0]) + +def locatedExpr(expr): + """ + Helper to decorate a returned token with its starting and ending locations in the input string. + This helper adds the following results names: + - locn_start = location where matched expression begins + - locn_end = location where matched expression ends + - value = the actual parsed results + + Be careful if the input text contains C{<TAB>} characters, you may want to call + C{L{ParserElement.parseWithTabs}} + + Example:: + wd = Word(alphas) + for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"): + print(match) + prints:: + [[0, 'ljsdf', 5]] + [[8, 'lksdjjf', 15]] + [[18, 'lkkjj', 23]] + """ + locator = Empty().setParseAction(lambda s,l,t: l) + return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end")) + + +# convenience constants for positional expressions +empty = Empty().setName("empty") +lineStart = LineStart().setName("lineStart") +lineEnd = LineEnd().setName("lineEnd") +stringStart = StringStart().setName("stringStart") +stringEnd = StringEnd().setName("stringEnd") + +_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1]) +_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16))) +_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8))) +_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r'\]', exact=1) +_charRange = Group(_singleChar + Suppress("-") + _singleChar) +_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]" + +def srange(s): + r""" + Helper to easily define string ranges for use in Word construction. Borrows + syntax from regexp '[]' string range definitions:: + srange("[0-9]") -> "0123456789" + srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz" + srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_" + The input string must be enclosed in []'s, and the returned string is the expanded + character set joined into a single string. + The values enclosed in the []'s may be: + - a single character + - an escaped character with a leading backslash (such as C{\-} or C{\]}) + - an escaped hex character with a leading C{'\x'} (C{\x21}, which is a C{'!'} character) + (C{\0x##} is also supported for backwards compatibility) + - an escaped octal character with a leading C{'\0'} (C{\041}, which is a C{'!'} character) + - a range of any of the above, separated by a dash (C{'a-z'}, etc.) + - any combination of the above (C{'aeiouy'}, C{'a-zA-Z0-9_$'}, etc.) + """ + _expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1)) + try: + return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body) + except Exception: + return "" + +def matchOnlyAtCol(n): + """ + Helper method for defining parse actions that require matching at a specific + column in the input text. + """ + def verifyCol(strg,locn,toks): + if col(locn,strg) != n: + raise ParseException(strg,locn,"matched token not at column %d" % n) + return verifyCol + +def replaceWith(replStr): + """ + Helper method for common parse actions that simply return a literal value. Especially + useful when used with C{L{transformString<ParserElement.transformString>}()}. + + Example:: + num = Word(nums).setParseAction(lambda toks: int(toks[0])) + na = oneOf("N/A NA").setParseAction(replaceWith(math.nan)) + term = na | num + + OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234] + """ + return lambda s,l,t: [replStr] + +def removeQuotes(s,l,t): + """ + Helper parse action for removing quotation marks from parsed quoted strings. + + Example:: + # by default, quotation marks are included in parsed results + quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"] + + # use removeQuotes to strip quotation marks from parsed results + quotedString.setParseAction(removeQuotes) + quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"] + """ + return t[0][1:-1] + +def tokenMap(func, *args): + """ + Helper to define a parse action by mapping a function to all elements of a ParseResults list.If any additional + args are passed, they are forwarded to the given function as additional arguments after + the token, as in C{hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))}, which will convert the + parsed data to an integer using base 16. + + Example (compare the last to example in L{ParserElement.transformString}:: + hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16)) + hex_ints.runTests(''' + 00 11 22 aa FF 0a 0d 1a + ''') + + upperword = Word(alphas).setParseAction(tokenMap(str.upper)) + OneOrMore(upperword).runTests(''' + my kingdom for a horse + ''') + + wd = Word(alphas).setParseAction(tokenMap(str.title)) + OneOrMore(wd).setParseAction(' '.join).runTests(''' + now is the winter of our discontent made glorious summer by this sun of york + ''') + prints:: + 00 11 22 aa FF 0a 0d 1a + [0, 17, 34, 170, 255, 10, 13, 26] + + my kingdom for a horse + ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE'] + + now is the winter of our discontent made glorious summer by this sun of york + ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York'] + """ + def pa(s,l,t): + return [func(tokn, *args) for tokn in t] + + try: + func_name = getattr(func, '__name__', + getattr(func, '__class__').__name__) + except Exception: + func_name = str(func) + pa.__name__ = func_name + + return pa + +upcaseTokens = tokenMap(lambda t: _ustr(t).upper()) +"""(Deprecated) Helper parse action to convert tokens to upper case. Deprecated in favor of L{pyparsing_common.upcaseTokens}""" + +downcaseTokens = tokenMap(lambda t: _ustr(t).lower()) +"""(Deprecated) Helper parse action to convert tokens to lower case. Deprecated in favor of L{pyparsing_common.downcaseTokens}""" + +def _makeTags(tagStr, xml): + """Internal helper to construct opening and closing tag expressions, given a tag name""" + if isinstance(tagStr,basestring): + resname = tagStr + tagStr = Keyword(tagStr, caseless=not xml) + else: + resname = tagStr.name + + tagAttrName = Word(alphas,alphanums+"_-:") + if (xml): + tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes ) + openTag = Suppress("<") + tagStr("tag") + \ + Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \ + Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">") + else: + printablesLessRAbrack = "".join(c for c in printables if c not in ">") + tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack) + openTag = Suppress("<") + tagStr("tag") + \ + Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \ + Optional( Suppress("=") + tagAttrValue ) ))) + \ + Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">") + closeTag = Combine(_L("</") + tagStr + ">") + + openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % resname) + closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % resname) + openTag.tag = resname + closeTag.tag = resname + return openTag, closeTag + +def makeHTMLTags(tagStr): + """ + Helper to construct opening and closing tag expressions for HTML, given a tag name. Matches + tags in either upper or lower case, attributes with namespaces and with quoted or unquoted values. + + Example:: + text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>' + # makeHTMLTags returns pyparsing expressions for the opening and closing tags as a 2-tuple + a,a_end = makeHTMLTags("A") + link_expr = a + SkipTo(a_end)("link_text") + a_end + + for link in link_expr.searchString(text): + # attributes in the <A> tag (like "href" shown here) are also accessible as named results + print(link.link_text, '->', link.href) + prints:: + pyparsing -> http://pyparsing.wikispaces.com + """ + return _makeTags( tagStr, False ) + +def makeXMLTags(tagStr): + """ + Helper to construct opening and closing tag expressions for XML, given a tag name. Matches + tags only in the given upper/lower case. + + Example: similar to L{makeHTMLTags} + """ + return _makeTags( tagStr, True ) + +def withAttribute(*args,**attrDict): + """ + Helper to create a validating parse action to be used with start tags created + with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag + with a required attribute value, to avoid false matches on common tags such as + C{<TD>} or C{<DIV>}. + + Call C{withAttribute} with a series of attribute names and values. Specify the list + of filter attributes names and values as: + - keyword arguments, as in C{(align="right")}, or + - as an explicit dict with C{**} operator, when an attribute name is also a Python + reserved word, as in C{**{"class":"Customer", "align":"right"}} + - a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") ) + For attribute names with a namespace prefix, you must use the second form. Attribute + names are matched insensitive to upper/lower case. + + If just testing for C{class} (with or without a namespace), use C{L{withClass}}. + + To verify that the attribute exists, but without specifying a value, pass + C{withAttribute.ANY_VALUE} as the value. + + Example:: + html = ''' + <div> + Some text + <div type="grid">1 4 0 1 0</div> + <div type="graph">1,3 2,3 1,1</div> + <div>this has no type</div> + </div> + + ''' + div,div_end = makeHTMLTags("div") + + # only match div tag having a type attribute with value "grid" + div_grid = div().setParseAction(withAttribute(type="grid")) + grid_expr = div_grid + SkipTo(div | div_end)("body") + for grid_header in grid_expr.searchString(html): + print(grid_header.body) + + # construct a match with any div tag having a type attribute, regardless of the value + div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE)) + div_expr = div_any_type + SkipTo(div | div_end)("body") + for div_header in div_expr.searchString(html): + print(div_header.body) + prints:: + 1 4 0 1 0 + + 1 4 0 1 0 + 1,3 2,3 1,1 + """ + if args: + attrs = args[:] + else: + attrs = attrDict.items() + attrs = [(k,v) for k,v in attrs] + def pa(s,l,tokens): + for attrName,attrValue in attrs: + if attrName not in tokens: + raise ParseException(s,l,"no matching attribute " + attrName) + if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue: + raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" % + (attrName, tokens[attrName], attrValue)) + return pa +withAttribute.ANY_VALUE = object() + +def withClass(classname, namespace=''): + """ + Simplified version of C{L{withAttribute}} when matching on a div class - made + difficult because C{class} is a reserved word in Python. + + Example:: + html = ''' + <div> + Some text + <div class="grid">1 4 0 1 0</div> + <div class="graph">1,3 2,3 1,1</div> + <div>this <div> has no class</div> + </div> + + ''' + div,div_end = makeHTMLTags("div") + div_grid = div().setParseAction(withClass("grid")) + + grid_expr = div_grid + SkipTo(div | div_end)("body") + for grid_header in grid_expr.searchString(html): + print(grid_header.body) + + div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE)) + div_expr = div_any_type + SkipTo(div | div_end)("body") + for div_header in div_expr.searchString(html): + print(div_header.body) + prints:: + 1 4 0 1 0 + + 1 4 0 1 0 + 1,3 2,3 1,1 + """ + classattr = "%s:class" % namespace if namespace else "class" + return withAttribute(**{classattr : classname}) + +opAssoc = _Constants() +opAssoc.LEFT = object() +opAssoc.RIGHT = object() + +def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ): + """ + Helper method for constructing grammars of expressions made up of + operators working in a precedence hierarchy. Operators may be unary or + binary, left- or right-associative. Parse actions can also be attached + to operator expressions. The generated parser will also recognize the use + of parentheses to override operator precedences (see example below). + + Note: if you define a deep operator list, you may see performance issues + when using infixNotation. See L{ParserElement.enablePackrat} for a + mechanism to potentially improve your parser performance. + + Parameters: + - baseExpr - expression representing the most basic element for the nested + - opList - list of tuples, one for each operator precedence level in the + expression grammar; each tuple is of the form + (opExpr, numTerms, rightLeftAssoc, parseAction), where: + - opExpr is the pyparsing expression for the operator; + may also be a string, which will be converted to a Literal; + if numTerms is 3, opExpr is a tuple of two expressions, for the + two operators separating the 3 terms + - numTerms is the number of terms for this operator (must + be 1, 2, or 3) + - rightLeftAssoc is the indicator whether the operator is + right or left associative, using the pyparsing-defined + constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}. + - parseAction is the parse action to be associated with + expressions matching this operator expression (the + parse action tuple member may be omitted); if the parse action + is passed a tuple or list of functions, this is equivalent to + calling C{setParseAction(*fn)} (L{ParserElement.setParseAction}) + - lpar - expression for matching left-parentheses (default=C{Suppress('(')}) + - rpar - expression for matching right-parentheses (default=C{Suppress(')')}) + + Example:: + # simple example of four-function arithmetic with ints and variable names + integer = pyparsing_common.signed_integer + varname = pyparsing_common.identifier + + arith_expr = infixNotation(integer | varname, + [ + ('-', 1, opAssoc.RIGHT), + (oneOf('* /'), 2, opAssoc.LEFT), + (oneOf('+ -'), 2, opAssoc.LEFT), + ]) + + arith_expr.runTests(''' + 5+3*6 + (5+3)*6 + -2--11 + ''', fullDump=False) + prints:: + 5+3*6 + [[5, '+', [3, '*', 6]]] + + (5+3)*6 + [[[5, '+', 3], '*', 6]] + + -2--11 + [[['-', 2], '-', ['-', 11]]] + """ + ret = Forward() + lastExpr = baseExpr | ( lpar + ret + rpar ) + for i,operDef in enumerate(opList): + opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4] + termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr + if arity == 3: + if opExpr is None or len(opExpr) != 2: + raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions") + opExpr1, opExpr2 = opExpr + thisExpr = Forward().setName(termName) + if rightLeftAssoc == opAssoc.LEFT: + if arity == 1: + matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) ) + elif arity == 2: + if opExpr is not None: + matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) ) + else: + matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) ) + elif arity == 3: + matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \ + Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr ) + else: + raise ValueError("operator must be unary (1), binary (2), or ternary (3)") + elif rightLeftAssoc == opAssoc.RIGHT: + if arity == 1: + # try to avoid LR with this extra test + if not isinstance(opExpr, Optional): + opExpr = Optional(opExpr) + matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr ) + elif arity == 2: + if opExpr is not None: + matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) ) + else: + matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) ) + elif arity == 3: + matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \ + Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr ) + else: + raise ValueError("operator must be unary (1), binary (2), or ternary (3)") + else: + raise ValueError("operator must indicate right or left associativity") + if pa: + if isinstance(pa, (tuple, list)): + matchExpr.setParseAction(*pa) + else: + matchExpr.setParseAction(pa) + thisExpr <<= ( matchExpr.setName(termName) | lastExpr ) + lastExpr = thisExpr + ret <<= lastExpr + return ret + +operatorPrecedence = infixNotation +"""(Deprecated) Former name of C{L{infixNotation}}, will be dropped in a future release.""" + +dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"').setName("string enclosed in double quotes") +sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("string enclosed in single quotes") +quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"'| + Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("quotedString using single or double quotes") +unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal") + +def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()): + """ + Helper method for defining nested lists enclosed in opening and closing + delimiters ("(" and ")" are the default). + + Parameters: + - opener - opening character for a nested list (default=C{"("}); can also be a pyparsing expression + - closer - closing character for a nested list (default=C{")"}); can also be a pyparsing expression + - content - expression for items within the nested lists (default=C{None}) + - ignoreExpr - expression for ignoring opening and closing delimiters (default=C{quotedString}) + + If an expression is not provided for the content argument, the nested + expression will capture all whitespace-delimited content between delimiters + as a list of separate values. + + Use the C{ignoreExpr} argument to define expressions that may contain + opening or closing characters that should not be treated as opening + or closing characters for nesting, such as quotedString or a comment + expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}. + The default is L{quotedString}, but if no expressions are to be ignored, + then pass C{None} for this argument. + + Example:: + data_type = oneOf("void int short long char float double") + decl_data_type = Combine(data_type + Optional(Word('*'))) + ident = Word(alphas+'_', alphanums+'_') + number = pyparsing_common.number + arg = Group(decl_data_type + ident) + LPAR,RPAR = map(Suppress, "()") + + code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment)) + + c_function = (decl_data_type("type") + + ident("name") + + LPAR + Optional(delimitedList(arg), [])("args") + RPAR + + code_body("body")) + c_function.ignore(cStyleComment) + + source_code = ''' + int is_odd(int x) { + return (x%2); + } + + int dec_to_hex(char hchar) { + if (hchar >= '0' && hchar <= '9') { + return (ord(hchar)-ord('0')); + } else { + return (10+ord(hchar)-ord('A')); + } + } + ''' + for func in c_function.searchString(source_code): + print("%(name)s (%(type)s) args: %(args)s" % func) + + prints:: + is_odd (int) args: [['int', 'x']] + dec_to_hex (int) args: [['char', 'hchar']] + """ + if opener == closer: + raise ValueError("opening and closing strings cannot be the same") + if content is None: + if isinstance(opener,basestring) and isinstance(closer,basestring): + if len(opener) == 1 and len(closer)==1: + if ignoreExpr is not None: + content = (Combine(OneOrMore(~ignoreExpr + + CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1)) + ).setParseAction(lambda t:t[0].strip())) + else: + content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS + ).setParseAction(lambda t:t[0].strip())) + else: + if ignoreExpr is not None: + content = (Combine(OneOrMore(~ignoreExpr + + ~Literal(opener) + ~Literal(closer) + + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1)) + ).setParseAction(lambda t:t[0].strip())) + else: + content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) + + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1)) + ).setParseAction(lambda t:t[0].strip())) + else: + raise ValueError("opening and closing arguments must be strings if no content expression is given") + ret = Forward() + if ignoreExpr is not None: + ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) ) + else: + ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) ) + ret.setName('nested %s%s expression' % (opener,closer)) + return ret + +def indentedBlock(blockStatementExpr, indentStack, indent=True): + """ + Helper method for defining space-delimited indentation blocks, such as + those used to define block statements in Python source code. + + Parameters: + - blockStatementExpr - expression defining syntax of statement that + is repeated within the indented block + - indentStack - list created by caller to manage indentation stack + (multiple statementWithIndentedBlock expressions within a single grammar + should share a common indentStack) + - indent - boolean indicating whether block must be indented beyond the + the current level; set to False for block of left-most statements + (default=C{True}) + + A valid block must contain at least one C{blockStatement}. + + Example:: + data = ''' + def A(z): + A1 + B = 100 + G = A2 + A2 + A3 + B + def BB(a,b,c): + BB1 + def BBA(): + bba1 + bba2 + bba3 + C + D + def spam(x,y): + def eggs(z): + pass + ''' + + + indentStack = [1] + stmt = Forward() + + identifier = Word(alphas, alphanums) + funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":") + func_body = indentedBlock(stmt, indentStack) + funcDef = Group( funcDecl + func_body ) + + rvalue = Forward() + funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")") + rvalue << (funcCall | identifier | Word(nums)) + assignment = Group(identifier + "=" + rvalue) + stmt << ( funcDef | assignment | identifier ) + + module_body = OneOrMore(stmt) + + parseTree = module_body.parseString(data) + parseTree.pprint() + prints:: + [['def', + 'A', + ['(', 'z', ')'], + ':', + [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]], + 'B', + ['def', + 'BB', + ['(', 'a', 'b', 'c', ')'], + ':', + [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]], + 'C', + 'D', + ['def', + 'spam', + ['(', 'x', 'y', ')'], + ':', + [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] + """ + def checkPeerIndent(s,l,t): + if l >= len(s): return + curCol = col(l,s) + if curCol != indentStack[-1]: + if curCol > indentStack[-1]: + raise ParseFatalException(s,l,"illegal nesting") + raise ParseException(s,l,"not a peer entry") + + def checkSubIndent(s,l,t): + curCol = col(l,s) + if curCol > indentStack[-1]: + indentStack.append( curCol ) + else: + raise ParseException(s,l,"not a subentry") + + def checkUnindent(s,l,t): + if l >= len(s): return + curCol = col(l,s) + if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]): + raise ParseException(s,l,"not an unindent") + indentStack.pop() + + NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress()) + INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT') + PEER = Empty().setParseAction(checkPeerIndent).setName('') + UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT') + if indent: + smExpr = Group( Optional(NL) + + #~ FollowedBy(blockStatementExpr) + + INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT) + else: + smExpr = Group( Optional(NL) + + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) ) + blockStatementExpr.ignore(_bslash + LineEnd()) + return smExpr.setName('indented block') + +alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") +punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") + +anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:").setName('any tag')) +_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(),'><& "\'')) +commonHTMLEntity = Regex('&(?P<entity>' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity") +def replaceHTMLEntity(t): + """Helper parser action to replace common HTML entities with their special characters""" + return _htmlEntityMap.get(t.entity) + +# it's easy to get these comment structures wrong - they're very common, so may as well make them available +cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment") +"Comment of the form C{/* ... */}" + +htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment") +"Comment of the form C{<!-- ... -->}" + +restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line") +dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment") +"Comment of the form C{// ... (to end of line)}" + +cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/'| dblSlashComment).setName("C++ style comment") +"Comment of either form C{L{cStyleComment}} or C{L{dblSlashComment}}" + +javaStyleComment = cppStyleComment +"Same as C{L{cppStyleComment}}" + +pythonStyleComment = Regex(r"#.*").setName("Python style comment") +"Comment of the form C{# ... (to end of line)}" + +_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') + + Optional( Word(" \t") + + ~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem") +commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList") +"""(Deprecated) Predefined expression of 1 or more printable words or quoted strings, separated by commas. + This expression is deprecated in favor of L{pyparsing_common.comma_separated_list}.""" + +# some other useful expressions - using lower-case class name since we are really using this as a namespace +class pyparsing_common: + """ + Here are some common low-level expressions that may be useful in jump-starting parser development: + - numeric forms (L{integers<integer>}, L{reals<real>}, L{scientific notation<sci_real>}) + - common L{programming identifiers<identifier>} + - network addresses (L{MAC<mac_address>}, L{IPv4<ipv4_address>}, L{IPv6<ipv6_address>}) + - ISO8601 L{dates<iso8601_date>} and L{datetime<iso8601_datetime>} + - L{UUID<uuid>} + - L{comma-separated list<comma_separated_list>} + Parse actions: + - C{L{convertToInteger}} + - C{L{convertToFloat}} + - C{L{convertToDate}} + - C{L{convertToDatetime}} + - C{L{stripHTMLTags}} + - C{L{upcaseTokens}} + - C{L{downcaseTokens}} + + Example:: + pyparsing_common.number.runTests(''' + # any int or real number, returned as the appropriate type + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + ''') + + pyparsing_common.fnumber.runTests(''' + # any int or real number, returned as float + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + ''') + + pyparsing_common.hex_integer.runTests(''' + # hex numbers + 100 + FF + ''') + + pyparsing_common.fraction.runTests(''' + # fractions + 1/2 + -3/4 + ''') + + pyparsing_common.mixed_integer.runTests(''' + # mixed fractions + 1 + 1/2 + -3/4 + 1-3/4 + ''') + + import uuid + pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) + pyparsing_common.uuid.runTests(''' + # uuid + 12345678-1234-5678-1234-567812345678 + ''') + prints:: + # any int or real number, returned as the appropriate type + 100 + [100] + + -100 + [-100] + + +100 + [100] + + 3.14159 + [3.14159] + + 6.02e23 + [6.02e+23] + + 1e-12 + [1e-12] + + # any int or real number, returned as float + 100 + [100.0] + + -100 + [-100.0] + + +100 + [100.0] + + 3.14159 + [3.14159] + + 6.02e23 + [6.02e+23] + + 1e-12 + [1e-12] + + # hex numbers + 100 + [256] + + FF + [255] + + # fractions + 1/2 + [0.5] + + -3/4 + [-0.75] + + # mixed fractions + 1 + [1] + + 1/2 + [0.5] + + -3/4 + [-0.75] + + 1-3/4 + [1.75] + + # uuid + 12345678-1234-5678-1234-567812345678 + [UUID('12345678-1234-5678-1234-567812345678')] + """ + + convertToInteger = tokenMap(int) + """ + Parse action for converting parsed integers to Python int + """ + + convertToFloat = tokenMap(float) + """ + Parse action for converting parsed numbers to Python float + """ + + integer = Word(nums).setName("integer").setParseAction(convertToInteger) + """expression that parses an unsigned integer, returns an int""" + + hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int,16)) + """expression that parses a hexadecimal integer, returns an int""" + + signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger) + """expression that parses an integer with optional leading sign, returns an int""" + + fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction") + """fractional expression of an integer divided by an integer, returns a float""" + fraction.addParseAction(lambda t: t[0]/t[-1]) + + mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction") + """mixed integer of the form 'integer - fraction', with optional leading integer, returns float""" + mixed_integer.addParseAction(sum) + + real = Regex(r'[+-]?\d+\.\d*').setName("real number").setParseAction(convertToFloat) + """expression that parses a floating point number and returns a float""" + + sci_real = Regex(r'[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat) + """expression that parses a floating point number with optional scientific notation and returns a float""" + + # streamlining this expression makes the docs nicer-looking + number = (sci_real | real | signed_integer).streamline() + """any numeric expression, returns the corresponding Python type""" + + fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat) + """any int or real number, returned as float""" + + identifier = Word(alphas+'_', alphanums+'_').setName("identifier") + """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')""" + + ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address") + "IPv4 address (C{0.0.0.0 - 255.255.255.255})" + + _ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer") + _full_ipv6_address = (_ipv6_part + (':' + _ipv6_part)*7).setName("full IPv6 address") + _short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part)*(0,6)) + "::" + Optional(_ipv6_part + (':' + _ipv6_part)*(0,6))).setName("short IPv6 address") + _short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8) + _mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address") + ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address") + "IPv6 address (long, short, or mixed form)" + + mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address") + "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)" + + @staticmethod + def convertToDate(fmt="%Y-%m-%d"): + """ + Helper to create a parse action for converting parsed date string to Python datetime.date + + Params - + - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"}) + + Example:: + date_expr = pyparsing_common.iso8601_date.copy() + date_expr.setParseAction(pyparsing_common.convertToDate()) + print(date_expr.parseString("1999-12-31")) + prints:: + [datetime.date(1999, 12, 31)] + """ + def cvt_fn(s,l,t): + try: + return datetime.strptime(t[0], fmt).date() + except ValueError as ve: + raise ParseException(s, l, str(ve)) + return cvt_fn + + @staticmethod + def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"): + """ + Helper to create a parse action for converting parsed datetime string to Python datetime.datetime + + Params - + - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"}) + + Example:: + dt_expr = pyparsing_common.iso8601_datetime.copy() + dt_expr.setParseAction(pyparsing_common.convertToDatetime()) + print(dt_expr.parseString("1999-12-31T23:59:59.999")) + prints:: + [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] + """ + def cvt_fn(s,l,t): + try: + return datetime.strptime(t[0], fmt) + except ValueError as ve: + raise ParseException(s, l, str(ve)) + return cvt_fn + + iso8601_date = Regex(r'(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?').setName("ISO8601 date") + "ISO8601 date (C{yyyy-mm-dd})" + + iso8601_datetime = Regex(r'(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime") + "ISO8601 datetime (C{yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)}) - trailing seconds, milliseconds, and timezone optional; accepts separating C{'T'} or C{' '}" + + uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID") + "UUID (C{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx})" + + _html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress() + @staticmethod + def stripHTMLTags(s, l, tokens): + """ + Parse action to remove HTML tags from web page HTML source + + Example:: + # strip HTML links from normal text + text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>' + td,td_end = makeHTMLTags("TD") + table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end + + print(table_text.parseString(text).body) # -> 'More info at the pyparsing wiki page' + """ + return pyparsing_common._html_stripper.transformString(tokens[0]) + + _commasepitem = Combine(OneOrMore(~Literal(",") + ~LineEnd() + Word(printables, excludeChars=',') + + Optional( White(" \t") ) ) ).streamline().setName("commaItem") + comma_separated_list = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("comma separated list") + """Predefined expression of 1 or more printable words or quoted strings, separated by commas.""" + + upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper())) + """Parse action to convert tokens to upper case.""" + + downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower())) + """Parse action to convert tokens to lower case.""" + + +if __name__ == "__main__": + + selectToken = CaselessLiteral("select") + fromToken = CaselessLiteral("from") + + ident = Word(alphas, alphanums + "_$") + + columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) + columnNameList = Group(delimitedList(columnName)).setName("columns") + columnSpec = ('*' | columnNameList) + + tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) + tableNameList = Group(delimitedList(tableName)).setName("tables") + + simpleSQL = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables") + + # demo runTests method, including embedded comments in test string + simpleSQL.runTests(""" + # '*' as column list and dotted table name + select * from SYS.XYZZY + + # caseless match on "SELECT", and casts back to "select" + SELECT * from XYZZY, ABC + + # list of column names, and mixed case SELECT keyword + Select AA,BB,CC from Sys.dual + + # multiple tables + Select A, B, C from Sys.dual, Table2 + + # invalid SELECT keyword - should fail + Xelect A, B, C from Sys.dual + + # incomplete command - should fail + Select + + # invalid column name - should fail + Select ^^^ frox Sys.dual + + """) + + pyparsing_common.number.runTests(""" + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + """) + + # any int or real number, returned as float + pyparsing_common.fnumber.runTests(""" + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + """) + + pyparsing_common.hex_integer.runTests(""" + 100 + FF + """) + + import uuid + pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) + pyparsing_common.uuid.runTests(""" + 12345678-1234-5678-1234-567812345678 + """) diff --git a/project/venv/lib/python2.7/site-packages/setuptools/_vendor/pyparsing.pyc b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/pyparsing.pyc new file mode 100644 index 0000000..6c1e250 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/pyparsing.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/_vendor/six.py b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/six.py new file mode 100644 index 0000000..190c023 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/six.py @@ -0,0 +1,868 @@ +"""Utilities for writing code that runs on Python 2 and 3""" + +# Copyright (c) 2010-2015 Benjamin Peterson +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from __future__ import absolute_import + +import functools +import itertools +import operator +import sys +import types + +__author__ = "Benjamin Peterson <benjamin@python.org>" +__version__ = "1.10.0" + + +# Useful for very coarse version differentiation. +PY2 = sys.version_info[0] == 2 +PY3 = sys.version_info[0] == 3 +PY34 = sys.version_info[0:2] >= (3, 4) + +if PY3: + string_types = str, + integer_types = int, + class_types = type, + text_type = str + binary_type = bytes + + MAXSIZE = sys.maxsize +else: + string_types = basestring, + integer_types = (int, long) + class_types = (type, types.ClassType) + text_type = unicode + binary_type = str + + if sys.platform.startswith("java"): + # Jython always uses 32 bits. + MAXSIZE = int((1 << 31) - 1) + else: + # It's possible to have sizeof(long) != sizeof(Py_ssize_t). + class X(object): + + def __len__(self): + return 1 << 31 + try: + len(X()) + except OverflowError: + # 32-bit + MAXSIZE = int((1 << 31) - 1) + else: + # 64-bit + MAXSIZE = int((1 << 63) - 1) + del X + + +def _add_doc(func, doc): + """Add documentation to a function.""" + func.__doc__ = doc + + +def _import_module(name): + """Import module, returning the module after the last dot.""" + __import__(name) + return sys.modules[name] + + +class _LazyDescr(object): + + def __init__(self, name): + self.name = name + + def __get__(self, obj, tp): + result = self._resolve() + setattr(obj, self.name, result) # Invokes __set__. + try: + # This is a bit ugly, but it avoids running this again by + # removing this descriptor. + delattr(obj.__class__, self.name) + except AttributeError: + pass + return result + + +class MovedModule(_LazyDescr): + + def __init__(self, name, old, new=None): + super(MovedModule, self).__init__(name) + if PY3: + if new is None: + new = name + self.mod = new + else: + self.mod = old + + def _resolve(self): + return _import_module(self.mod) + + def __getattr__(self, attr): + _module = self._resolve() + value = getattr(_module, attr) + setattr(self, attr, value) + return value + + +class _LazyModule(types.ModuleType): + + def __init__(self, name): + super(_LazyModule, self).__init__(name) + self.__doc__ = self.__class__.__doc__ + + def __dir__(self): + attrs = ["__doc__", "__name__"] + attrs += [attr.name for attr in self._moved_attributes] + return attrs + + # Subclasses should override this + _moved_attributes = [] + + +class MovedAttribute(_LazyDescr): + + def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): + super(MovedAttribute, self).__init__(name) + if PY3: + if new_mod is None: + new_mod = name + self.mod = new_mod + if new_attr is None: + if old_attr is None: + new_attr = name + else: + new_attr = old_attr + self.attr = new_attr + else: + self.mod = old_mod + if old_attr is None: + old_attr = name + self.attr = old_attr + + def _resolve(self): + module = _import_module(self.mod) + return getattr(module, self.attr) + + +class _SixMetaPathImporter(object): + + """ + A meta path importer to import six.moves and its submodules. + + This class implements a PEP302 finder and loader. It should be compatible + with Python 2.5 and all existing versions of Python3 + """ + + def __init__(self, six_module_name): + self.name = six_module_name + self.known_modules = {} + + def _add_module(self, mod, *fullnames): + for fullname in fullnames: + self.known_modules[self.name + "." + fullname] = mod + + def _get_module(self, fullname): + return self.known_modules[self.name + "." + fullname] + + def find_module(self, fullname, path=None): + if fullname in self.known_modules: + return self + return None + + def __get_module(self, fullname): + try: + return self.known_modules[fullname] + except KeyError: + raise ImportError("This loader does not know module " + fullname) + + def load_module(self, fullname): + try: + # in case of a reload + return sys.modules[fullname] + except KeyError: + pass + mod = self.__get_module(fullname) + if isinstance(mod, MovedModule): + mod = mod._resolve() + else: + mod.__loader__ = self + sys.modules[fullname] = mod + return mod + + def is_package(self, fullname): + """ + Return true, if the named module is a package. + + We need this method to get correct spec objects with + Python 3.4 (see PEP451) + """ + return hasattr(self.__get_module(fullname), "__path__") + + def get_code(self, fullname): + """Return None + + Required, if is_package is implemented""" + self.__get_module(fullname) # eventually raises ImportError + return None + get_source = get_code # same as get_code + +_importer = _SixMetaPathImporter(__name__) + + +class _MovedItems(_LazyModule): + + """Lazy loading of moved objects""" + __path__ = [] # mark as package + + +_moved_attributes = [ + MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), + MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), + MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), + MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), + MovedAttribute("intern", "__builtin__", "sys"), + MovedAttribute("map", "itertools", "builtins", "imap", "map"), + MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), + MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), + MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), + MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), + MovedAttribute("reduce", "__builtin__", "functools"), + MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), + MovedAttribute("StringIO", "StringIO", "io"), + MovedAttribute("UserDict", "UserDict", "collections"), + MovedAttribute("UserList", "UserList", "collections"), + MovedAttribute("UserString", "UserString", "collections"), + MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), + MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), + MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), + MovedModule("builtins", "__builtin__"), + MovedModule("configparser", "ConfigParser"), + MovedModule("copyreg", "copy_reg"), + MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), + MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), + MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), + MovedModule("http_cookies", "Cookie", "http.cookies"), + MovedModule("html_entities", "htmlentitydefs", "html.entities"), + MovedModule("html_parser", "HTMLParser", "html.parser"), + MovedModule("http_client", "httplib", "http.client"), + MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), + MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), + MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), + MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), + MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), + MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), + MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), + MovedModule("cPickle", "cPickle", "pickle"), + MovedModule("queue", "Queue"), + MovedModule("reprlib", "repr"), + MovedModule("socketserver", "SocketServer"), + MovedModule("_thread", "thread", "_thread"), + MovedModule("tkinter", "Tkinter"), + MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), + MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), + MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), + MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), + MovedModule("tkinter_tix", "Tix", "tkinter.tix"), + MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), + MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), + MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), + MovedModule("tkinter_colorchooser", "tkColorChooser", + "tkinter.colorchooser"), + MovedModule("tkinter_commondialog", "tkCommonDialog", + "tkinter.commondialog"), + MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), + MovedModule("tkinter_font", "tkFont", "tkinter.font"), + MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), + MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", + "tkinter.simpledialog"), + MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), + MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), + MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), + MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), + MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), + MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), +] +# Add windows specific modules. +if sys.platform == "win32": + _moved_attributes += [ + MovedModule("winreg", "_winreg"), + ] + +for attr in _moved_attributes: + setattr(_MovedItems, attr.name, attr) + if isinstance(attr, MovedModule): + _importer._add_module(attr, "moves." + attr.name) +del attr + +_MovedItems._moved_attributes = _moved_attributes + +moves = _MovedItems(__name__ + ".moves") +_importer._add_module(moves, "moves") + + +class Module_six_moves_urllib_parse(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_parse""" + + +_urllib_parse_moved_attributes = [ + MovedAttribute("ParseResult", "urlparse", "urllib.parse"), + MovedAttribute("SplitResult", "urlparse", "urllib.parse"), + MovedAttribute("parse_qs", "urlparse", "urllib.parse"), + MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), + MovedAttribute("urldefrag", "urlparse", "urllib.parse"), + MovedAttribute("urljoin", "urlparse", "urllib.parse"), + MovedAttribute("urlparse", "urlparse", "urllib.parse"), + MovedAttribute("urlsplit", "urlparse", "urllib.parse"), + MovedAttribute("urlunparse", "urlparse", "urllib.parse"), + MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), + MovedAttribute("quote", "urllib", "urllib.parse"), + MovedAttribute("quote_plus", "urllib", "urllib.parse"), + MovedAttribute("unquote", "urllib", "urllib.parse"), + MovedAttribute("unquote_plus", "urllib", "urllib.parse"), + MovedAttribute("urlencode", "urllib", "urllib.parse"), + MovedAttribute("splitquery", "urllib", "urllib.parse"), + MovedAttribute("splittag", "urllib", "urllib.parse"), + MovedAttribute("splituser", "urllib", "urllib.parse"), + MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), + MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), + MovedAttribute("uses_params", "urlparse", "urllib.parse"), + MovedAttribute("uses_query", "urlparse", "urllib.parse"), + MovedAttribute("uses_relative", "urlparse", "urllib.parse"), +] +for attr in _urllib_parse_moved_attributes: + setattr(Module_six_moves_urllib_parse, attr.name, attr) +del attr + +Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes + +_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), + "moves.urllib_parse", "moves.urllib.parse") + + +class Module_six_moves_urllib_error(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_error""" + + +_urllib_error_moved_attributes = [ + MovedAttribute("URLError", "urllib2", "urllib.error"), + MovedAttribute("HTTPError", "urllib2", "urllib.error"), + MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), +] +for attr in _urllib_error_moved_attributes: + setattr(Module_six_moves_urllib_error, attr.name, attr) +del attr + +Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes + +_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), + "moves.urllib_error", "moves.urllib.error") + + +class Module_six_moves_urllib_request(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_request""" + + +_urllib_request_moved_attributes = [ + MovedAttribute("urlopen", "urllib2", "urllib.request"), + MovedAttribute("install_opener", "urllib2", "urllib.request"), + MovedAttribute("build_opener", "urllib2", "urllib.request"), + MovedAttribute("pathname2url", "urllib", "urllib.request"), + MovedAttribute("url2pathname", "urllib", "urllib.request"), + MovedAttribute("getproxies", "urllib", "urllib.request"), + MovedAttribute("Request", "urllib2", "urllib.request"), + MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), + MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), + MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), + MovedAttribute("BaseHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), + MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), + MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), + MovedAttribute("FileHandler", "urllib2", "urllib.request"), + MovedAttribute("FTPHandler", "urllib2", "urllib.request"), + MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), + MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), + MovedAttribute("urlretrieve", "urllib", "urllib.request"), + MovedAttribute("urlcleanup", "urllib", "urllib.request"), + MovedAttribute("URLopener", "urllib", "urllib.request"), + MovedAttribute("FancyURLopener", "urllib", "urllib.request"), + MovedAttribute("proxy_bypass", "urllib", "urllib.request"), +] +for attr in _urllib_request_moved_attributes: + setattr(Module_six_moves_urllib_request, attr.name, attr) +del attr + +Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes + +_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), + "moves.urllib_request", "moves.urllib.request") + + +class Module_six_moves_urllib_response(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_response""" + + +_urllib_response_moved_attributes = [ + MovedAttribute("addbase", "urllib", "urllib.response"), + MovedAttribute("addclosehook", "urllib", "urllib.response"), + MovedAttribute("addinfo", "urllib", "urllib.response"), + MovedAttribute("addinfourl", "urllib", "urllib.response"), +] +for attr in _urllib_response_moved_attributes: + setattr(Module_six_moves_urllib_response, attr.name, attr) +del attr + +Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes + +_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), + "moves.urllib_response", "moves.urllib.response") + + +class Module_six_moves_urllib_robotparser(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_robotparser""" + + +_urllib_robotparser_moved_attributes = [ + MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), +] +for attr in _urllib_robotparser_moved_attributes: + setattr(Module_six_moves_urllib_robotparser, attr.name, attr) +del attr + +Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes + +_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), + "moves.urllib_robotparser", "moves.urllib.robotparser") + + +class Module_six_moves_urllib(types.ModuleType): + + """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" + __path__ = [] # mark as package + parse = _importer._get_module("moves.urllib_parse") + error = _importer._get_module("moves.urllib_error") + request = _importer._get_module("moves.urllib_request") + response = _importer._get_module("moves.urllib_response") + robotparser = _importer._get_module("moves.urllib_robotparser") + + def __dir__(self): + return ['parse', 'error', 'request', 'response', 'robotparser'] + +_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), + "moves.urllib") + + +def add_move(move): + """Add an item to six.moves.""" + setattr(_MovedItems, move.name, move) + + +def remove_move(name): + """Remove item from six.moves.""" + try: + delattr(_MovedItems, name) + except AttributeError: + try: + del moves.__dict__[name] + except KeyError: + raise AttributeError("no such move, %r" % (name,)) + + +if PY3: + _meth_func = "__func__" + _meth_self = "__self__" + + _func_closure = "__closure__" + _func_code = "__code__" + _func_defaults = "__defaults__" + _func_globals = "__globals__" +else: + _meth_func = "im_func" + _meth_self = "im_self" + + _func_closure = "func_closure" + _func_code = "func_code" + _func_defaults = "func_defaults" + _func_globals = "func_globals" + + +try: + advance_iterator = next +except NameError: + def advance_iterator(it): + return it.next() +next = advance_iterator + + +try: + callable = callable +except NameError: + def callable(obj): + return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) + + +if PY3: + def get_unbound_function(unbound): + return unbound + + create_bound_method = types.MethodType + + def create_unbound_method(func, cls): + return func + + Iterator = object +else: + def get_unbound_function(unbound): + return unbound.im_func + + def create_bound_method(func, obj): + return types.MethodType(func, obj, obj.__class__) + + def create_unbound_method(func, cls): + return types.MethodType(func, None, cls) + + class Iterator(object): + + def next(self): + return type(self).__next__(self) + + callable = callable +_add_doc(get_unbound_function, + """Get the function out of a possibly unbound function""") + + +get_method_function = operator.attrgetter(_meth_func) +get_method_self = operator.attrgetter(_meth_self) +get_function_closure = operator.attrgetter(_func_closure) +get_function_code = operator.attrgetter(_func_code) +get_function_defaults = operator.attrgetter(_func_defaults) +get_function_globals = operator.attrgetter(_func_globals) + + +if PY3: + def iterkeys(d, **kw): + return iter(d.keys(**kw)) + + def itervalues(d, **kw): + return iter(d.values(**kw)) + + def iteritems(d, **kw): + return iter(d.items(**kw)) + + def iterlists(d, **kw): + return iter(d.lists(**kw)) + + viewkeys = operator.methodcaller("keys") + + viewvalues = operator.methodcaller("values") + + viewitems = operator.methodcaller("items") +else: + def iterkeys(d, **kw): + return d.iterkeys(**kw) + + def itervalues(d, **kw): + return d.itervalues(**kw) + + def iteritems(d, **kw): + return d.iteritems(**kw) + + def iterlists(d, **kw): + return d.iterlists(**kw) + + viewkeys = operator.methodcaller("viewkeys") + + viewvalues = operator.methodcaller("viewvalues") + + viewitems = operator.methodcaller("viewitems") + +_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") +_add_doc(itervalues, "Return an iterator over the values of a dictionary.") +_add_doc(iteritems, + "Return an iterator over the (key, value) pairs of a dictionary.") +_add_doc(iterlists, + "Return an iterator over the (key, [values]) pairs of a dictionary.") + + +if PY3: + def b(s): + return s.encode("latin-1") + + def u(s): + return s + unichr = chr + import struct + int2byte = struct.Struct(">B").pack + del struct + byte2int = operator.itemgetter(0) + indexbytes = operator.getitem + iterbytes = iter + import io + StringIO = io.StringIO + BytesIO = io.BytesIO + _assertCountEqual = "assertCountEqual" + if sys.version_info[1] <= 1: + _assertRaisesRegex = "assertRaisesRegexp" + _assertRegex = "assertRegexpMatches" + else: + _assertRaisesRegex = "assertRaisesRegex" + _assertRegex = "assertRegex" +else: + def b(s): + return s + # Workaround for standalone backslash + + def u(s): + return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") + unichr = unichr + int2byte = chr + + def byte2int(bs): + return ord(bs[0]) + + def indexbytes(buf, i): + return ord(buf[i]) + iterbytes = functools.partial(itertools.imap, ord) + import StringIO + StringIO = BytesIO = StringIO.StringIO + _assertCountEqual = "assertItemsEqual" + _assertRaisesRegex = "assertRaisesRegexp" + _assertRegex = "assertRegexpMatches" +_add_doc(b, """Byte literal""") +_add_doc(u, """Text literal""") + + +def assertCountEqual(self, *args, **kwargs): + return getattr(self, _assertCountEqual)(*args, **kwargs) + + +def assertRaisesRegex(self, *args, **kwargs): + return getattr(self, _assertRaisesRegex)(*args, **kwargs) + + +def assertRegex(self, *args, **kwargs): + return getattr(self, _assertRegex)(*args, **kwargs) + + +if PY3: + exec_ = getattr(moves.builtins, "exec") + + def reraise(tp, value, tb=None): + if value is None: + value = tp() + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + +else: + def exec_(_code_, _globs_=None, _locs_=None): + """Execute code in a namespace.""" + if _globs_ is None: + frame = sys._getframe(1) + _globs_ = frame.f_globals + if _locs_ is None: + _locs_ = frame.f_locals + del frame + elif _locs_ is None: + _locs_ = _globs_ + exec("""exec _code_ in _globs_, _locs_""") + + exec_("""def reraise(tp, value, tb=None): + raise tp, value, tb +""") + + +if sys.version_info[:2] == (3, 2): + exec_("""def raise_from(value, from_value): + if from_value is None: + raise value + raise value from from_value +""") +elif sys.version_info[:2] > (3, 2): + exec_("""def raise_from(value, from_value): + raise value from from_value +""") +else: + def raise_from(value, from_value): + raise value + + +print_ = getattr(moves.builtins, "print", None) +if print_ is None: + def print_(*args, **kwargs): + """The new-style print function for Python 2.4 and 2.5.""" + fp = kwargs.pop("file", sys.stdout) + if fp is None: + return + + def write(data): + if not isinstance(data, basestring): + data = str(data) + # If the file has an encoding, encode unicode with it. + if (isinstance(fp, file) and + isinstance(data, unicode) and + fp.encoding is not None): + errors = getattr(fp, "errors", None) + if errors is None: + errors = "strict" + data = data.encode(fp.encoding, errors) + fp.write(data) + want_unicode = False + sep = kwargs.pop("sep", None) + if sep is not None: + if isinstance(sep, unicode): + want_unicode = True + elif not isinstance(sep, str): + raise TypeError("sep must be None or a string") + end = kwargs.pop("end", None) + if end is not None: + if isinstance(end, unicode): + want_unicode = True + elif not isinstance(end, str): + raise TypeError("end must be None or a string") + if kwargs: + raise TypeError("invalid keyword arguments to print()") + if not want_unicode: + for arg in args: + if isinstance(arg, unicode): + want_unicode = True + break + if want_unicode: + newline = unicode("\n") + space = unicode(" ") + else: + newline = "\n" + space = " " + if sep is None: + sep = space + if end is None: + end = newline + for i, arg in enumerate(args): + if i: + write(sep) + write(arg) + write(end) +if sys.version_info[:2] < (3, 3): + _print = print_ + + def print_(*args, **kwargs): + fp = kwargs.get("file", sys.stdout) + flush = kwargs.pop("flush", False) + _print(*args, **kwargs) + if flush and fp is not None: + fp.flush() + +_add_doc(reraise, """Reraise an exception.""") + +if sys.version_info[0:2] < (3, 4): + def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, + updated=functools.WRAPPER_UPDATES): + def wrapper(f): + f = functools.wraps(wrapped, assigned, updated)(f) + f.__wrapped__ = wrapped + return f + return wrapper +else: + wraps = functools.wraps + + +def with_metaclass(meta, *bases): + """Create a base class with a metaclass.""" + # This requires a bit of explanation: the basic idea is to make a dummy + # metaclass for one level of class instantiation that replaces itself with + # the actual metaclass. + class metaclass(meta): + + def __new__(cls, name, this_bases, d): + return meta(name, bases, d) + return type.__new__(metaclass, 'temporary_class', (), {}) + + +def add_metaclass(metaclass): + """Class decorator for creating a class with a metaclass.""" + def wrapper(cls): + orig_vars = cls.__dict__.copy() + slots = orig_vars.get('__slots__') + if slots is not None: + if isinstance(slots, str): + slots = [slots] + for slots_var in slots: + orig_vars.pop(slots_var) + orig_vars.pop('__dict__', None) + orig_vars.pop('__weakref__', None) + return metaclass(cls.__name__, cls.__bases__, orig_vars) + return wrapper + + +def python_2_unicode_compatible(klass): + """ + A decorator that defines __unicode__ and __str__ methods under Python 2. + Under Python 3 it does nothing. + + To support Python 2 and 3 with a single code base, define a __str__ method + returning text and apply this decorator to the class. + """ + if PY2: + if '__str__' not in klass.__dict__: + raise ValueError("@python_2_unicode_compatible cannot be applied " + "to %s because it doesn't define __str__()." % + klass.__name__) + klass.__unicode__ = klass.__str__ + klass.__str__ = lambda self: self.__unicode__().encode('utf-8') + return klass + + +# Complete the moves implementation. +# This code is at the end of this module to speed up module loading. +# Turn this module into a package. +__path__ = [] # required for PEP 302 and PEP 451 +__package__ = __name__ # see PEP 366 @ReservedAssignment +if globals().get("__spec__") is not None: + __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable +# Remove other six meta path importers, since they cause problems. This can +# happen if six is removed from sys.modules and then reloaded. (Setuptools does +# this for some reason.) +if sys.meta_path: + for i, importer in enumerate(sys.meta_path): + # Here's some real nastiness: Another "instance" of the six module might + # be floating around. Therefore, we can't use isinstance() to check for + # the six meta path importer, since the other six instance will have + # inserted an importer with different class. + if (type(importer).__name__ == "_SixMetaPathImporter" and + importer.name == __name__): + del sys.meta_path[i] + break + del i, importer +# Finally, add the importer to the meta path import hook. +sys.meta_path.append(_importer) diff --git a/project/venv/lib/python2.7/site-packages/setuptools/_vendor/six.pyc b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/six.pyc new file mode 100644 index 0000000..3132784 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/_vendor/six.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/archive_util.py b/project/venv/lib/python2.7/site-packages/setuptools/archive_util.py new file mode 100644 index 0000000..8143604 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/archive_util.py @@ -0,0 +1,173 @@ +"""Utilities for extracting common archive formats""" + +import zipfile +import tarfile +import os +import shutil +import posixpath +import contextlib +from distutils.errors import DistutilsError + +from pkg_resources import ensure_directory + +__all__ = [ + "unpack_archive", "unpack_zipfile", "unpack_tarfile", "default_filter", + "UnrecognizedFormat", "extraction_drivers", "unpack_directory", +] + + +class UnrecognizedFormat(DistutilsError): + """Couldn't recognize the archive type""" + + +def default_filter(src, dst): + """The default progress/filter callback; returns True for all files""" + return dst + + +def unpack_archive(filename, extract_dir, progress_filter=default_filter, + drivers=None): + """Unpack `filename` to `extract_dir`, or raise ``UnrecognizedFormat`` + + `progress_filter` is a function taking two arguments: a source path + internal to the archive ('/'-separated), and a filesystem path where it + will be extracted. The callback must return the desired extract path + (which may be the same as the one passed in), or else ``None`` to skip + that file or directory. The callback can thus be used to report on the + progress of the extraction, as well as to filter the items extracted or + alter their extraction paths. + + `drivers`, if supplied, must be a non-empty sequence of functions with the + same signature as this function (minus the `drivers` argument), that raise + ``UnrecognizedFormat`` if they do not support extracting the designated + archive type. The `drivers` are tried in sequence until one is found that + does not raise an error, or until all are exhausted (in which case + ``UnrecognizedFormat`` is raised). If you do not supply a sequence of + drivers, the module's ``extraction_drivers`` constant will be used, which + means that ``unpack_zipfile`` and ``unpack_tarfile`` will be tried, in that + order. + """ + for driver in drivers or extraction_drivers: + try: + driver(filename, extract_dir, progress_filter) + except UnrecognizedFormat: + continue + else: + return + else: + raise UnrecognizedFormat( + "Not a recognized archive type: %s" % filename + ) + + +def unpack_directory(filename, extract_dir, progress_filter=default_filter): + """"Unpack" a directory, using the same interface as for archives + + Raises ``UnrecognizedFormat`` if `filename` is not a directory + """ + if not os.path.isdir(filename): + raise UnrecognizedFormat("%s is not a directory" % filename) + + paths = { + filename: ('', extract_dir), + } + for base, dirs, files in os.walk(filename): + src, dst = paths[base] + for d in dirs: + paths[os.path.join(base, d)] = src + d + '/', os.path.join(dst, d) + for f in files: + target = os.path.join(dst, f) + target = progress_filter(src + f, target) + if not target: + # skip non-files + continue + ensure_directory(target) + f = os.path.join(base, f) + shutil.copyfile(f, target) + shutil.copystat(f, target) + + +def unpack_zipfile(filename, extract_dir, progress_filter=default_filter): + """Unpack zip `filename` to `extract_dir` + + Raises ``UnrecognizedFormat`` if `filename` is not a zipfile (as determined + by ``zipfile.is_zipfile()``). See ``unpack_archive()`` for an explanation + of the `progress_filter` argument. + """ + + if not zipfile.is_zipfile(filename): + raise UnrecognizedFormat("%s is not a zip file" % (filename,)) + + with zipfile.ZipFile(filename) as z: + for info in z.infolist(): + name = info.filename + + # don't extract absolute paths or ones with .. in them + if name.startswith('/') or '..' in name.split('/'): + continue + + target = os.path.join(extract_dir, *name.split('/')) + target = progress_filter(name, target) + if not target: + continue + if name.endswith('/'): + # directory + ensure_directory(target) + else: + # file + ensure_directory(target) + data = z.read(info.filename) + with open(target, 'wb') as f: + f.write(data) + unix_attributes = info.external_attr >> 16 + if unix_attributes: + os.chmod(target, unix_attributes) + + +def unpack_tarfile(filename, extract_dir, progress_filter=default_filter): + """Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir` + + Raises ``UnrecognizedFormat`` if `filename` is not a tarfile (as determined + by ``tarfile.open()``). See ``unpack_archive()`` for an explanation + of the `progress_filter` argument. + """ + try: + tarobj = tarfile.open(filename) + except tarfile.TarError: + raise UnrecognizedFormat( + "%s is not a compressed or uncompressed tar file" % (filename,) + ) + with contextlib.closing(tarobj): + # don't do any chowning! + tarobj.chown = lambda *args: None + for member in tarobj: + name = member.name + # don't extract absolute paths or ones with .. in them + if not name.startswith('/') and '..' not in name.split('/'): + prelim_dst = os.path.join(extract_dir, *name.split('/')) + + # resolve any links and to extract the link targets as normal + # files + while member is not None and (member.islnk() or member.issym()): + linkpath = member.linkname + if member.issym(): + base = posixpath.dirname(member.name) + linkpath = posixpath.join(base, linkpath) + linkpath = posixpath.normpath(linkpath) + member = tarobj._getmember(linkpath) + + if member is not None and (member.isfile() or member.isdir()): + final_dst = progress_filter(name, prelim_dst) + if final_dst: + if final_dst.endswith(os.sep): + final_dst = final_dst[:-1] + try: + # XXX Ugh + tarobj._extract_member(member, final_dst) + except tarfile.ExtractError: + # chown/chmod/mkfifo/mknode/makedev failed + pass + return True + + +extraction_drivers = unpack_directory, unpack_zipfile, unpack_tarfile diff --git a/project/venv/lib/python2.7/site-packages/setuptools/archive_util.pyc b/project/venv/lib/python2.7/site-packages/setuptools/archive_util.pyc new file mode 100644 index 0000000..cbd7bd7 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/archive_util.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/build_meta.py b/project/venv/lib/python2.7/site-packages/setuptools/build_meta.py new file mode 100644 index 0000000..47cbcbf --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/build_meta.py @@ -0,0 +1,245 @@ +"""A PEP 517 interface to setuptools + +Previously, when a user or a command line tool (let's call it a "frontend") +needed to make a request of setuptools to take a certain action, for +example, generating a list of installation requirements, the frontend would +would call "setup.py egg_info" or "setup.py bdist_wheel" on the command line. + +PEP 517 defines a different method of interfacing with setuptools. Rather +than calling "setup.py" directly, the frontend should: + + 1. Set the current directory to the directory with a setup.py file + 2. Import this module into a safe python interpreter (one in which + setuptools can potentially set global variables or crash hard). + 3. Call one of the functions defined in PEP 517. + +What each function does is defined in PEP 517. However, here is a "casual" +definition of the functions (this definition should not be relied on for +bug reports or API stability): + + - `build_wheel`: build a wheel in the folder and return the basename + - `get_requires_for_build_wheel`: get the `setup_requires` to build + - `prepare_metadata_for_build_wheel`: get the `install_requires` + - `build_sdist`: build an sdist in the folder and return the basename + - `get_requires_for_build_sdist`: get the `setup_requires` to build + +Again, this is not a formal definition! Just a "taste" of the module. +""" + +import io +import os +import sys +import tokenize +import shutil +import contextlib + +import setuptools +import distutils + +from pkg_resources import parse_requirements + +__all__ = ['get_requires_for_build_sdist', + 'get_requires_for_build_wheel', + 'prepare_metadata_for_build_wheel', + 'build_wheel', + 'build_sdist', + '__legacy__', + 'SetupRequirementsError'] + +class SetupRequirementsError(BaseException): + def __init__(self, specifiers): + self.specifiers = specifiers + + +class Distribution(setuptools.dist.Distribution): + def fetch_build_eggs(self, specifiers): + specifier_list = list(map(str, parse_requirements(specifiers))) + + raise SetupRequirementsError(specifier_list) + + @classmethod + @contextlib.contextmanager + def patch(cls): + """ + Replace + distutils.dist.Distribution with this class + for the duration of this context. + """ + orig = distutils.core.Distribution + distutils.core.Distribution = cls + try: + yield + finally: + distutils.core.Distribution = orig + + +def _to_str(s): + """ + Convert a filename to a string (on Python 2, explicitly + a byte string, not Unicode) as distutils checks for the + exact type str. + """ + if sys.version_info[0] == 2 and not isinstance(s, str): + # Assume it's Unicode, as that's what the PEP says + # should be provided. + return s.encode(sys.getfilesystemencoding()) + return s + + +def _get_immediate_subdirectories(a_dir): + return [name for name in os.listdir(a_dir) + if os.path.isdir(os.path.join(a_dir, name))] + + +def _file_with_extension(directory, extension): + matching = ( + f for f in os.listdir(directory) + if f.endswith(extension) + ) + file, = matching + return file + + +def _open_setup_script(setup_script): + if not os.path.exists(setup_script): + # Supply a default setup.py + return io.StringIO(u"from setuptools import setup; setup()") + + return getattr(tokenize, 'open', open)(setup_script) + + +class _BuildMetaBackend(object): + + def _fix_config(self, config_settings): + config_settings = config_settings or {} + config_settings.setdefault('--global-option', []) + return config_settings + + def _get_build_requires(self, config_settings, requirements): + config_settings = self._fix_config(config_settings) + + sys.argv = sys.argv[:1] + ['egg_info'] + \ + config_settings["--global-option"] + try: + with Distribution.patch(): + self.run_setup() + except SetupRequirementsError as e: + requirements += e.specifiers + + return requirements + + def run_setup(self, setup_script='setup.py'): + # Note that we can reuse our build directory between calls + # Correctness comes first, then optimization later + __file__ = setup_script + __name__ = '__main__' + + with _open_setup_script(__file__) as f: + code = f.read().replace(r'\r\n', r'\n') + + exec(compile(code, __file__, 'exec'), locals()) + + def get_requires_for_build_wheel(self, config_settings=None): + config_settings = self._fix_config(config_settings) + return self._get_build_requires(config_settings, requirements=['wheel']) + + def get_requires_for_build_sdist(self, config_settings=None): + config_settings = self._fix_config(config_settings) + return self._get_build_requires(config_settings, requirements=[]) + + def prepare_metadata_for_build_wheel(self, metadata_directory, + config_settings=None): + sys.argv = sys.argv[:1] + ['dist_info', '--egg-base', + _to_str(metadata_directory)] + self.run_setup() + + dist_info_directory = metadata_directory + while True: + dist_infos = [f for f in os.listdir(dist_info_directory) + if f.endswith('.dist-info')] + + if (len(dist_infos) == 0 and + len(_get_immediate_subdirectories(dist_info_directory)) == 1): + + dist_info_directory = os.path.join( + dist_info_directory, os.listdir(dist_info_directory)[0]) + continue + + assert len(dist_infos) == 1 + break + + # PEP 517 requires that the .dist-info directory be placed in the + # metadata_directory. To comply, we MUST copy the directory to the root + if dist_info_directory != metadata_directory: + shutil.move( + os.path.join(dist_info_directory, dist_infos[0]), + metadata_directory) + shutil.rmtree(dist_info_directory, ignore_errors=True) + + return dist_infos[0] + + def build_wheel(self, wheel_directory, config_settings=None, + metadata_directory=None): + config_settings = self._fix_config(config_settings) + wheel_directory = os.path.abspath(wheel_directory) + sys.argv = sys.argv[:1] + ['bdist_wheel'] + \ + config_settings["--global-option"] + self.run_setup() + if wheel_directory != 'dist': + shutil.rmtree(wheel_directory) + shutil.copytree('dist', wheel_directory) + + return _file_with_extension(wheel_directory, '.whl') + + def build_sdist(self, sdist_directory, config_settings=None): + config_settings = self._fix_config(config_settings) + sdist_directory = os.path.abspath(sdist_directory) + sys.argv = sys.argv[:1] + ['sdist', '--formats', 'gztar'] + \ + config_settings["--global-option"] + \ + ["--dist-dir", sdist_directory] + self.run_setup() + + return _file_with_extension(sdist_directory, '.tar.gz') + + +class _BuildMetaLegacyBackend(_BuildMetaBackend): + """Compatibility backend for setuptools + + This is a version of setuptools.build_meta that endeavors to maintain backwards + compatibility with pre-PEP 517 modes of invocation. It exists as a temporary + bridge between the old packaging mechanism and the new packaging mechanism, + and will eventually be removed. + """ + def run_setup(self, setup_script='setup.py'): + # In order to maintain compatibility with scripts assuming that + # the setup.py script is in a directory on the PYTHONPATH, inject + # '' into sys.path. (pypa/setuptools#1642) + sys_path = list(sys.path) # Save the original path + + script_dir = os.path.dirname(os.path.abspath(setup_script)) + if script_dir not in sys.path: + sys.path.insert(0, script_dir) + + try: + super(_BuildMetaLegacyBackend, + self).run_setup(setup_script=setup_script) + finally: + # While PEP 517 frontends should be calling each hook in a fresh + # subprocess according to the standard (and thus it should not be + # strictly necessary to restore the old sys.path), we'll restore + # the original path so that the path manipulation does not persist + # within the hook after run_setup is called. + sys.path[:] = sys_path + +# The primary backend +_BACKEND = _BuildMetaBackend() + +get_requires_for_build_wheel = _BACKEND.get_requires_for_build_wheel +get_requires_for_build_sdist = _BACKEND.get_requires_for_build_sdist +prepare_metadata_for_build_wheel = _BACKEND.prepare_metadata_for_build_wheel +build_wheel = _BACKEND.build_wheel +build_sdist = _BACKEND.build_sdist + + +# The legacy backend +__legacy__ = _BuildMetaLegacyBackend() diff --git a/project/venv/lib/python2.7/site-packages/setuptools/build_meta.pyc b/project/venv/lib/python2.7/site-packages/setuptools/build_meta.pyc new file mode 100644 index 0000000..c3fcc2d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/build_meta.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/cli-32.exe b/project/venv/lib/python2.7/site-packages/setuptools/cli-32.exe new file mode 100644 index 0000000..b1487b7 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/cli-32.exe differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/cli-64.exe b/project/venv/lib/python2.7/site-packages/setuptools/cli-64.exe new file mode 100644 index 0000000..675e6bf Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/cli-64.exe differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/cli.exe b/project/venv/lib/python2.7/site-packages/setuptools/cli.exe new file mode 100644 index 0000000..b1487b7 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/cli.exe differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/__init__.py b/project/venv/lib/python2.7/site-packages/setuptools/command/__init__.py new file mode 100644 index 0000000..fe619e2 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/command/__init__.py @@ -0,0 +1,18 @@ +__all__ = [ + 'alias', 'bdist_egg', 'bdist_rpm', 'build_ext', 'build_py', 'develop', + 'easy_install', 'egg_info', 'install', 'install_lib', 'rotate', 'saveopts', + 'sdist', 'setopt', 'test', 'install_egg_info', 'install_scripts', + 'register', 'bdist_wininst', 'upload_docs', 'upload', 'build_clib', + 'dist_info', +] + +from distutils.command.bdist import bdist +import sys + +from setuptools.command import install_scripts + +if 'egg' not in bdist.format_commands: + bdist.format_command['egg'] = ('bdist_egg', "Python .egg file") + bdist.format_commands.append('egg') + +del bdist, sys diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/__init__.pyc b/project/venv/lib/python2.7/site-packages/setuptools/command/__init__.pyc new file mode 100644 index 0000000..84dd2c6 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/command/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/alias.py b/project/venv/lib/python2.7/site-packages/setuptools/command/alias.py new file mode 100644 index 0000000..4532b1c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/command/alias.py @@ -0,0 +1,80 @@ +from distutils.errors import DistutilsOptionError + +from setuptools.extern.six.moves import map + +from setuptools.command.setopt import edit_config, option_base, config_file + + +def shquote(arg): + """Quote an argument for later parsing by shlex.split()""" + for c in '"', "'", "\\", "#": + if c in arg: + return repr(arg) + if arg.split() != [arg]: + return repr(arg) + return arg + + +class alias(option_base): + """Define a shortcut that invokes one or more commands""" + + description = "define a shortcut to invoke one or more commands" + command_consumes_arguments = True + + user_options = [ + ('remove', 'r', 'remove (unset) the alias'), + ] + option_base.user_options + + boolean_options = option_base.boolean_options + ['remove'] + + def initialize_options(self): + option_base.initialize_options(self) + self.args = None + self.remove = None + + def finalize_options(self): + option_base.finalize_options(self) + if self.remove and len(self.args) != 1: + raise DistutilsOptionError( + "Must specify exactly one argument (the alias name) when " + "using --remove" + ) + + def run(self): + aliases = self.distribution.get_option_dict('aliases') + + if not self.args: + print("Command Aliases") + print("---------------") + for alias in aliases: + print("setup.py alias", format_alias(alias, aliases)) + return + + elif len(self.args) == 1: + alias, = self.args + if self.remove: + command = None + elif alias in aliases: + print("setup.py alias", format_alias(alias, aliases)) + return + else: + print("No alias definition found for %r" % alias) + return + else: + alias = self.args[0] + command = ' '.join(map(shquote, self.args[1:])) + + edit_config(self.filename, {'aliases': {alias: command}}, self.dry_run) + + +def format_alias(name, aliases): + source, command = aliases[name] + if source == config_file('global'): + source = '--global-config ' + elif source == config_file('user'): + source = '--user-config ' + elif source == config_file('local'): + source = '' + else: + source = '--filename=%r' % source + return source + name + ' ' + command diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/alias.pyc b/project/venv/lib/python2.7/site-packages/setuptools/command/alias.pyc new file mode 100644 index 0000000..fd1d88f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/command/alias.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/bdist_egg.py b/project/venv/lib/python2.7/site-packages/setuptools/command/bdist_egg.py new file mode 100644 index 0000000..9f8df91 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/command/bdist_egg.py @@ -0,0 +1,502 @@ +"""setuptools.command.bdist_egg + +Build .egg distributions""" + +from distutils.errors import DistutilsSetupError +from distutils.dir_util import remove_tree, mkpath +from distutils import log +from types import CodeType +import sys +import os +import re +import textwrap +import marshal + +from setuptools.extern import six + +from pkg_resources import get_build_platform, Distribution, ensure_directory +from pkg_resources import EntryPoint +from setuptools.extension import Library +from setuptools import Command + +try: + # Python 2.7 or >=3.2 + from sysconfig import get_path, get_python_version + + def _get_purelib(): + return get_path("purelib") +except ImportError: + from distutils.sysconfig import get_python_lib, get_python_version + + def _get_purelib(): + return get_python_lib(False) + + +def strip_module(filename): + if '.' in filename: + filename = os.path.splitext(filename)[0] + if filename.endswith('module'): + filename = filename[:-6] + return filename + + +def sorted_walk(dir): + """Do os.walk in a reproducible way, + independent of indeterministic filesystem readdir order + """ + for base, dirs, files in os.walk(dir): + dirs.sort() + files.sort() + yield base, dirs, files + + +def write_stub(resource, pyfile): + _stub_template = textwrap.dedent(""" + def __bootstrap__(): + global __bootstrap__, __loader__, __file__ + import sys, pkg_resources, imp + __file__ = pkg_resources.resource_filename(__name__, %r) + __loader__ = None; del __bootstrap__, __loader__ + imp.load_dynamic(__name__,__file__) + __bootstrap__() + """).lstrip() + with open(pyfile, 'w') as f: + f.write(_stub_template % resource) + + +class bdist_egg(Command): + description = "create an \"egg\" distribution" + + user_options = [ + ('bdist-dir=', 'b', + "temporary directory for creating the distribution"), + ('plat-name=', 'p', "platform name to embed in generated filenames " + "(default: %s)" % get_build_platform()), + ('exclude-source-files', None, + "remove all .py files from the generated egg"), + ('keep-temp', 'k', + "keep the pseudo-installation tree around after " + + "creating the distribution archive"), + ('dist-dir=', 'd', + "directory to put final built distributions in"), + ('skip-build', None, + "skip rebuilding everything (for testing/debugging)"), + ] + + boolean_options = [ + 'keep-temp', 'skip-build', 'exclude-source-files' + ] + + def initialize_options(self): + self.bdist_dir = None + self.plat_name = None + self.keep_temp = 0 + self.dist_dir = None + self.skip_build = 0 + self.egg_output = None + self.exclude_source_files = None + + def finalize_options(self): + ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info") + self.egg_info = ei_cmd.egg_info + + if self.bdist_dir is None: + bdist_base = self.get_finalized_command('bdist').bdist_base + self.bdist_dir = os.path.join(bdist_base, 'egg') + + if self.plat_name is None: + self.plat_name = get_build_platform() + + self.set_undefined_options('bdist', ('dist_dir', 'dist_dir')) + + if self.egg_output is None: + + # Compute filename of the output egg + basename = Distribution( + None, None, ei_cmd.egg_name, ei_cmd.egg_version, + get_python_version(), + self.distribution.has_ext_modules() and self.plat_name + ).egg_name() + + self.egg_output = os.path.join(self.dist_dir, basename + '.egg') + + def do_install_data(self): + # Hack for packages that install data to install's --install-lib + self.get_finalized_command('install').install_lib = self.bdist_dir + + site_packages = os.path.normcase(os.path.realpath(_get_purelib())) + old, self.distribution.data_files = self.distribution.data_files, [] + + for item in old: + if isinstance(item, tuple) and len(item) == 2: + if os.path.isabs(item[0]): + realpath = os.path.realpath(item[0]) + normalized = os.path.normcase(realpath) + if normalized == site_packages or normalized.startswith( + site_packages + os.sep + ): + item = realpath[len(site_packages) + 1:], item[1] + # XXX else: raise ??? + self.distribution.data_files.append(item) + + try: + log.info("installing package data to %s", self.bdist_dir) + self.call_command('install_data', force=0, root=None) + finally: + self.distribution.data_files = old + + def get_outputs(self): + return [self.egg_output] + + def call_command(self, cmdname, **kw): + """Invoke reinitialized command `cmdname` with keyword args""" + for dirname in INSTALL_DIRECTORY_ATTRS: + kw.setdefault(dirname, self.bdist_dir) + kw.setdefault('skip_build', self.skip_build) + kw.setdefault('dry_run', self.dry_run) + cmd = self.reinitialize_command(cmdname, **kw) + self.run_command(cmdname) + return cmd + + def run(self): + # Generate metadata first + self.run_command("egg_info") + # We run install_lib before install_data, because some data hacks + # pull their data path from the install_lib command. + log.info("installing library code to %s", self.bdist_dir) + instcmd = self.get_finalized_command('install') + old_root = instcmd.root + instcmd.root = None + if self.distribution.has_c_libraries() and not self.skip_build: + self.run_command('build_clib') + cmd = self.call_command('install_lib', warn_dir=0) + instcmd.root = old_root + + all_outputs, ext_outputs = self.get_ext_outputs() + self.stubs = [] + to_compile = [] + for (p, ext_name) in enumerate(ext_outputs): + filename, ext = os.path.splitext(ext_name) + pyfile = os.path.join(self.bdist_dir, strip_module(filename) + + '.py') + self.stubs.append(pyfile) + log.info("creating stub loader for %s", ext_name) + if not self.dry_run: + write_stub(os.path.basename(ext_name), pyfile) + to_compile.append(pyfile) + ext_outputs[p] = ext_name.replace(os.sep, '/') + + if to_compile: + cmd.byte_compile(to_compile) + if self.distribution.data_files: + self.do_install_data() + + # Make the EGG-INFO directory + archive_root = self.bdist_dir + egg_info = os.path.join(archive_root, 'EGG-INFO') + self.mkpath(egg_info) + if self.distribution.scripts: + script_dir = os.path.join(egg_info, 'scripts') + log.info("installing scripts to %s", script_dir) + self.call_command('install_scripts', install_dir=script_dir, + no_ep=1) + + self.copy_metadata_to(egg_info) + native_libs = os.path.join(egg_info, "native_libs.txt") + if all_outputs: + log.info("writing %s", native_libs) + if not self.dry_run: + ensure_directory(native_libs) + libs_file = open(native_libs, 'wt') + libs_file.write('\n'.join(all_outputs)) + libs_file.write('\n') + libs_file.close() + elif os.path.isfile(native_libs): + log.info("removing %s", native_libs) + if not self.dry_run: + os.unlink(native_libs) + + write_safety_flag( + os.path.join(archive_root, 'EGG-INFO'), self.zip_safe() + ) + + if os.path.exists(os.path.join(self.egg_info, 'depends.txt')): + log.warn( + "WARNING: 'depends.txt' will not be used by setuptools 0.6!\n" + "Use the install_requires/extras_require setup() args instead." + ) + + if self.exclude_source_files: + self.zap_pyfiles() + + # Make the archive + make_zipfile(self.egg_output, archive_root, verbose=self.verbose, + dry_run=self.dry_run, mode=self.gen_header()) + if not self.keep_temp: + remove_tree(self.bdist_dir, dry_run=self.dry_run) + + # Add to 'Distribution.dist_files' so that the "upload" command works + getattr(self.distribution, 'dist_files', []).append( + ('bdist_egg', get_python_version(), self.egg_output)) + + def zap_pyfiles(self): + log.info("Removing .py files from temporary directory") + for base, dirs, files in walk_egg(self.bdist_dir): + for name in files: + path = os.path.join(base, name) + + if name.endswith('.py'): + log.debug("Deleting %s", path) + os.unlink(path) + + if base.endswith('__pycache__'): + path_old = path + + pattern = r'(?P<name>.+)\.(?P<magic>[^.]+)\.pyc' + m = re.match(pattern, name) + path_new = os.path.join( + base, os.pardir, m.group('name') + '.pyc') + log.info( + "Renaming file from [%s] to [%s]" + % (path_old, path_new)) + try: + os.remove(path_new) + except OSError: + pass + os.rename(path_old, path_new) + + def zip_safe(self): + safe = getattr(self.distribution, 'zip_safe', None) + if safe is not None: + return safe + log.warn("zip_safe flag not set; analyzing archive contents...") + return analyze_egg(self.bdist_dir, self.stubs) + + def gen_header(self): + epm = EntryPoint.parse_map(self.distribution.entry_points or '') + ep = epm.get('setuptools.installation', {}).get('eggsecutable') + if ep is None: + return 'w' # not an eggsecutable, do it the usual way. + + if not ep.attrs or ep.extras: + raise DistutilsSetupError( + "eggsecutable entry point (%r) cannot have 'extras' " + "or refer to a module" % (ep,) + ) + + pyver = sys.version[:3] + pkg = ep.module_name + full = '.'.join(ep.attrs) + base = ep.attrs[0] + basename = os.path.basename(self.egg_output) + + header = ( + "#!/bin/sh\n" + 'if [ `basename $0` = "%(basename)s" ]\n' + 'then exec python%(pyver)s -c "' + "import sys, os; sys.path.insert(0, os.path.abspath('$0')); " + "from %(pkg)s import %(base)s; sys.exit(%(full)s())" + '" "$@"\n' + 'else\n' + ' echo $0 is not the correct name for this egg file.\n' + ' echo Please rename it back to %(basename)s and try again.\n' + ' exec false\n' + 'fi\n' + ) % locals() + + if not self.dry_run: + mkpath(os.path.dirname(self.egg_output), dry_run=self.dry_run) + f = open(self.egg_output, 'w') + f.write(header) + f.close() + return 'a' + + def copy_metadata_to(self, target_dir): + "Copy metadata (egg info) to the target_dir" + # normalize the path (so that a forward-slash in egg_info will + # match using startswith below) + norm_egg_info = os.path.normpath(self.egg_info) + prefix = os.path.join(norm_egg_info, '') + for path in self.ei_cmd.filelist.files: + if path.startswith(prefix): + target = os.path.join(target_dir, path[len(prefix):]) + ensure_directory(target) + self.copy_file(path, target) + + def get_ext_outputs(self): + """Get a list of relative paths to C extensions in the output distro""" + + all_outputs = [] + ext_outputs = [] + + paths = {self.bdist_dir: ''} + for base, dirs, files in sorted_walk(self.bdist_dir): + for filename in files: + if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS: + all_outputs.append(paths[base] + filename) + for filename in dirs: + paths[os.path.join(base, filename)] = (paths[base] + + filename + '/') + + if self.distribution.has_ext_modules(): + build_cmd = self.get_finalized_command('build_ext') + for ext in build_cmd.extensions: + if isinstance(ext, Library): + continue + fullname = build_cmd.get_ext_fullname(ext.name) + filename = build_cmd.get_ext_filename(fullname) + if not os.path.basename(filename).startswith('dl-'): + if os.path.exists(os.path.join(self.bdist_dir, filename)): + ext_outputs.append(filename) + + return all_outputs, ext_outputs + + +NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split()) + + +def walk_egg(egg_dir): + """Walk an unpacked egg's contents, skipping the metadata directory""" + walker = sorted_walk(egg_dir) + base, dirs, files = next(walker) + if 'EGG-INFO' in dirs: + dirs.remove('EGG-INFO') + yield base, dirs, files + for bdf in walker: + yield bdf + + +def analyze_egg(egg_dir, stubs): + # check for existing flag in EGG-INFO + for flag, fn in safety_flags.items(): + if os.path.exists(os.path.join(egg_dir, 'EGG-INFO', fn)): + return flag + if not can_scan(): + return False + safe = True + for base, dirs, files in walk_egg(egg_dir): + for name in files: + if name.endswith('.py') or name.endswith('.pyw'): + continue + elif name.endswith('.pyc') or name.endswith('.pyo'): + # always scan, even if we already know we're not safe + safe = scan_module(egg_dir, base, name, stubs) and safe + return safe + + +def write_safety_flag(egg_dir, safe): + # Write or remove zip safety flag file(s) + for flag, fn in safety_flags.items(): + fn = os.path.join(egg_dir, fn) + if os.path.exists(fn): + if safe is None or bool(safe) != flag: + os.unlink(fn) + elif safe is not None and bool(safe) == flag: + f = open(fn, 'wt') + f.write('\n') + f.close() + + +safety_flags = { + True: 'zip-safe', + False: 'not-zip-safe', +} + + +def scan_module(egg_dir, base, name, stubs): + """Check whether module possibly uses unsafe-for-zipfile stuff""" + + filename = os.path.join(base, name) + if filename[:-1] in stubs: + return True # Extension module + pkg = base[len(egg_dir) + 1:].replace(os.sep, '.') + module = pkg + (pkg and '.' or '') + os.path.splitext(name)[0] + if six.PY2: + skip = 8 # skip magic & date + elif sys.version_info < (3, 7): + skip = 12 # skip magic & date & file size + else: + skip = 16 # skip magic & reserved? & date & file size + f = open(filename, 'rb') + f.read(skip) + code = marshal.load(f) + f.close() + safe = True + symbols = dict.fromkeys(iter_symbols(code)) + for bad in ['__file__', '__path__']: + if bad in symbols: + log.warn("%s: module references %s", module, bad) + safe = False + if 'inspect' in symbols: + for bad in [ + 'getsource', 'getabsfile', 'getsourcefile', 'getfile' + 'getsourcelines', 'findsource', 'getcomments', 'getframeinfo', + 'getinnerframes', 'getouterframes', 'stack', 'trace' + ]: + if bad in symbols: + log.warn("%s: module MAY be using inspect.%s", module, bad) + safe = False + return safe + + +def iter_symbols(code): + """Yield names and strings used by `code` and its nested code objects""" + for name in code.co_names: + yield name + for const in code.co_consts: + if isinstance(const, six.string_types): + yield const + elif isinstance(const, CodeType): + for name in iter_symbols(const): + yield name + + +def can_scan(): + if not sys.platform.startswith('java') and sys.platform != 'cli': + # CPython, PyPy, etc. + return True + log.warn("Unable to analyze compiled code on this platform.") + log.warn("Please ask the author to include a 'zip_safe'" + " setting (either True or False) in the package's setup.py") + + +# Attribute names of options for commands that might need to be convinced to +# install to the egg build directory + +INSTALL_DIRECTORY_ATTRS = [ + 'install_lib', 'install_dir', 'install_data', 'install_base' +] + + +def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=True, + mode='w'): + """Create a zip file from all the files under 'base_dir'. The output + zip file will be named 'base_dir' + ".zip". Uses either the "zipfile" + Python module (if available) or the InfoZIP "zip" utility (if installed + and found on the default search path). If neither tool is available, + raises DistutilsExecError. Returns the name of the output zip file. + """ + import zipfile + + mkpath(os.path.dirname(zip_filename), dry_run=dry_run) + log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir) + + def visit(z, dirname, names): + for name in names: + path = os.path.normpath(os.path.join(dirname, name)) + if os.path.isfile(path): + p = path[len(base_dir) + 1:] + if not dry_run: + z.write(path, p) + log.debug("adding '%s'", p) + + compression = zipfile.ZIP_DEFLATED if compress else zipfile.ZIP_STORED + if not dry_run: + z = zipfile.ZipFile(zip_filename, mode, compression=compression) + for dirname, dirs, files in sorted_walk(base_dir): + visit(z, dirname, files) + z.close() + else: + for dirname, dirs, files in sorted_walk(base_dir): + visit(None, dirname, files) + return zip_filename diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/bdist_egg.pyc b/project/venv/lib/python2.7/site-packages/setuptools/command/bdist_egg.pyc new file mode 100644 index 0000000..3c423e5 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/command/bdist_egg.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/bdist_rpm.py b/project/venv/lib/python2.7/site-packages/setuptools/command/bdist_rpm.py new file mode 100644 index 0000000..7073092 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/command/bdist_rpm.py @@ -0,0 +1,43 @@ +import distutils.command.bdist_rpm as orig + + +class bdist_rpm(orig.bdist_rpm): + """ + Override the default bdist_rpm behavior to do the following: + + 1. Run egg_info to ensure the name and version are properly calculated. + 2. Always run 'install' using --single-version-externally-managed to + disable eggs in RPM distributions. + 3. Replace dash with underscore in the version numbers for better RPM + compatibility. + """ + + def run(self): + # ensure distro name is up-to-date + self.run_command('egg_info') + + orig.bdist_rpm.run(self) + + def _make_spec_file(self): + version = self.distribution.get_version() + rpmversion = version.replace('-', '_') + spec = orig.bdist_rpm._make_spec_file(self) + line23 = '%define version ' + version + line24 = '%define version ' + rpmversion + spec = [ + line.replace( + "Source0: %{name}-%{version}.tar", + "Source0: %{name}-%{unmangled_version}.tar" + ).replace( + "setup.py install ", + "setup.py install --single-version-externally-managed " + ).replace( + "%setup", + "%setup -n %{name}-%{unmangled_version}" + ).replace(line23, line24) + for line in spec + ] + insert_loc = spec.index(line24) + 1 + unmangled_version = "%define unmangled_version " + version + spec.insert(insert_loc, unmangled_version) + return spec diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/bdist_rpm.pyc b/project/venv/lib/python2.7/site-packages/setuptools/command/bdist_rpm.pyc new file mode 100644 index 0000000..3718042 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/command/bdist_rpm.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/bdist_wininst.py b/project/venv/lib/python2.7/site-packages/setuptools/command/bdist_wininst.py new file mode 100644 index 0000000..073de97 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/command/bdist_wininst.py @@ -0,0 +1,21 @@ +import distutils.command.bdist_wininst as orig + + +class bdist_wininst(orig.bdist_wininst): + def reinitialize_command(self, command, reinit_subcommands=0): + """ + Supplement reinitialize_command to work around + http://bugs.python.org/issue20819 + """ + cmd = self.distribution.reinitialize_command( + command, reinit_subcommands) + if command in ('install', 'install_lib'): + cmd.install_lib = None + return cmd + + def run(self): + self._is_running = True + try: + orig.bdist_wininst.run(self) + finally: + self._is_running = False diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/bdist_wininst.pyc b/project/venv/lib/python2.7/site-packages/setuptools/command/bdist_wininst.pyc new file mode 100644 index 0000000..35a2d86 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/command/bdist_wininst.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/build_clib.py b/project/venv/lib/python2.7/site-packages/setuptools/command/build_clib.py new file mode 100644 index 0000000..09caff6 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/command/build_clib.py @@ -0,0 +1,98 @@ +import distutils.command.build_clib as orig +from distutils.errors import DistutilsSetupError +from distutils import log +from setuptools.dep_util import newer_pairwise_group + + +class build_clib(orig.build_clib): + """ + Override the default build_clib behaviour to do the following: + + 1. Implement a rudimentary timestamp-based dependency system + so 'compile()' doesn't run every time. + 2. Add more keys to the 'build_info' dictionary: + * obj_deps - specify dependencies for each object compiled. + this should be a dictionary mapping a key + with the source filename to a list of + dependencies. Use an empty string for global + dependencies. + * cflags - specify a list of additional flags to pass to + the compiler. + """ + + def build_libraries(self, libraries): + for (lib_name, build_info) in libraries: + sources = build_info.get('sources') + if sources is None or not isinstance(sources, (list, tuple)): + raise DistutilsSetupError( + "in 'libraries' option (library '%s'), " + "'sources' must be present and must be " + "a list of source filenames" % lib_name) + sources = list(sources) + + log.info("building '%s' library", lib_name) + + # Make sure everything is the correct type. + # obj_deps should be a dictionary of keys as sources + # and a list/tuple of files that are its dependencies. + obj_deps = build_info.get('obj_deps', dict()) + if not isinstance(obj_deps, dict): + raise DistutilsSetupError( + "in 'libraries' option (library '%s'), " + "'obj_deps' must be a dictionary of " + "type 'source: list'" % lib_name) + dependencies = [] + + # Get the global dependencies that are specified by the '' key. + # These will go into every source's dependency list. + global_deps = obj_deps.get('', list()) + if not isinstance(global_deps, (list, tuple)): + raise DistutilsSetupError( + "in 'libraries' option (library '%s'), " + "'obj_deps' must be a dictionary of " + "type 'source: list'" % lib_name) + + # Build the list to be used by newer_pairwise_group + # each source will be auto-added to its dependencies. + for source in sources: + src_deps = [source] + src_deps.extend(global_deps) + extra_deps = obj_deps.get(source, list()) + if not isinstance(extra_deps, (list, tuple)): + raise DistutilsSetupError( + "in 'libraries' option (library '%s'), " + "'obj_deps' must be a dictionary of " + "type 'source: list'" % lib_name) + src_deps.extend(extra_deps) + dependencies.append(src_deps) + + expected_objects = self.compiler.object_filenames( + sources, + output_dir=self.build_temp + ) + + if newer_pairwise_group(dependencies, expected_objects) != ([], []): + # First, compile the source code to object files in the library + # directory. (This should probably change to putting object + # files in a temporary build directory.) + macros = build_info.get('macros') + include_dirs = build_info.get('include_dirs') + cflags = build_info.get('cflags') + objects = self.compiler.compile( + sources, + output_dir=self.build_temp, + macros=macros, + include_dirs=include_dirs, + extra_postargs=cflags, + debug=self.debug + ) + + # Now "link" the object files together into a static library. + # (On Unix at least, this isn't really linking -- it just + # builds an archive. Whatever.) + self.compiler.create_static_lib( + expected_objects, + lib_name, + output_dir=self.build_clib, + debug=self.debug + ) diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/build_clib.pyc b/project/venv/lib/python2.7/site-packages/setuptools/command/build_clib.pyc new file mode 100644 index 0000000..3f0f762 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/command/build_clib.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/build_ext.py b/project/venv/lib/python2.7/site-packages/setuptools/command/build_ext.py new file mode 100644 index 0000000..60a8a32 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/command/build_ext.py @@ -0,0 +1,321 @@ +import os +import sys +import itertools +import imp +from distutils.command.build_ext import build_ext as _du_build_ext +from distutils.file_util import copy_file +from distutils.ccompiler import new_compiler +from distutils.sysconfig import customize_compiler, get_config_var +from distutils.errors import DistutilsError +from distutils import log + +from setuptools.extension import Library +from setuptools.extern import six + +try: + # Attempt to use Cython for building extensions, if available + from Cython.Distutils.build_ext import build_ext as _build_ext + # Additionally, assert that the compiler module will load + # also. Ref #1229. + __import__('Cython.Compiler.Main') +except ImportError: + _build_ext = _du_build_ext + +# make sure _config_vars is initialized +get_config_var("LDSHARED") +from distutils.sysconfig import _config_vars as _CONFIG_VARS + + +def _customize_compiler_for_shlib(compiler): + if sys.platform == "darwin": + # building .dylib requires additional compiler flags on OSX; here we + # temporarily substitute the pyconfig.h variables so that distutils' + # 'customize_compiler' uses them before we build the shared libraries. + tmp = _CONFIG_VARS.copy() + try: + # XXX Help! I don't have any idea whether these are right... + _CONFIG_VARS['LDSHARED'] = ( + "gcc -Wl,-x -dynamiclib -undefined dynamic_lookup") + _CONFIG_VARS['CCSHARED'] = " -dynamiclib" + _CONFIG_VARS['SO'] = ".dylib" + customize_compiler(compiler) + finally: + _CONFIG_VARS.clear() + _CONFIG_VARS.update(tmp) + else: + customize_compiler(compiler) + + +have_rtld = False +use_stubs = False +libtype = 'shared' + +if sys.platform == "darwin": + use_stubs = True +elif os.name != 'nt': + try: + import dl + use_stubs = have_rtld = hasattr(dl, 'RTLD_NOW') + except ImportError: + pass + +if_dl = lambda s: s if have_rtld else '' + + +def get_abi3_suffix(): + """Return the file extension for an abi3-compliant Extension()""" + for suffix, _, _ in (s for s in imp.get_suffixes() if s[2] == imp.C_EXTENSION): + if '.abi3' in suffix: # Unix + return suffix + elif suffix == '.pyd': # Windows + return suffix + + +class build_ext(_build_ext): + def run(self): + """Build extensions in build directory, then copy if --inplace""" + old_inplace, self.inplace = self.inplace, 0 + _build_ext.run(self) + self.inplace = old_inplace + if old_inplace: + self.copy_extensions_to_source() + + def copy_extensions_to_source(self): + build_py = self.get_finalized_command('build_py') + for ext in self.extensions: + fullname = self.get_ext_fullname(ext.name) + filename = self.get_ext_filename(fullname) + modpath = fullname.split('.') + package = '.'.join(modpath[:-1]) + package_dir = build_py.get_package_dir(package) + dest_filename = os.path.join(package_dir, + os.path.basename(filename)) + src_filename = os.path.join(self.build_lib, filename) + + # Always copy, even if source is older than destination, to ensure + # that the right extensions for the current Python/platform are + # used. + copy_file( + src_filename, dest_filename, verbose=self.verbose, + dry_run=self.dry_run + ) + if ext._needs_stub: + self.write_stub(package_dir or os.curdir, ext, True) + + def get_ext_filename(self, fullname): + filename = _build_ext.get_ext_filename(self, fullname) + if fullname in self.ext_map: + ext = self.ext_map[fullname] + use_abi3 = ( + six.PY3 + and getattr(ext, 'py_limited_api') + and get_abi3_suffix() + ) + if use_abi3: + so_ext = get_config_var('EXT_SUFFIX') + filename = filename[:-len(so_ext)] + filename = filename + get_abi3_suffix() + if isinstance(ext, Library): + fn, ext = os.path.splitext(filename) + return self.shlib_compiler.library_filename(fn, libtype) + elif use_stubs and ext._links_to_dynamic: + d, fn = os.path.split(filename) + return os.path.join(d, 'dl-' + fn) + return filename + + def initialize_options(self): + _build_ext.initialize_options(self) + self.shlib_compiler = None + self.shlibs = [] + self.ext_map = {} + + def finalize_options(self): + _build_ext.finalize_options(self) + self.extensions = self.extensions or [] + self.check_extensions_list(self.extensions) + self.shlibs = [ext for ext in self.extensions + if isinstance(ext, Library)] + if self.shlibs: + self.setup_shlib_compiler() + for ext in self.extensions: + ext._full_name = self.get_ext_fullname(ext.name) + for ext in self.extensions: + fullname = ext._full_name + self.ext_map[fullname] = ext + + # distutils 3.1 will also ask for module names + # XXX what to do with conflicts? + self.ext_map[fullname.split('.')[-1]] = ext + + ltd = self.shlibs and self.links_to_dynamic(ext) or False + ns = ltd and use_stubs and not isinstance(ext, Library) + ext._links_to_dynamic = ltd + ext._needs_stub = ns + filename = ext._file_name = self.get_ext_filename(fullname) + libdir = os.path.dirname(os.path.join(self.build_lib, filename)) + if ltd and libdir not in ext.library_dirs: + ext.library_dirs.append(libdir) + if ltd and use_stubs and os.curdir not in ext.runtime_library_dirs: + ext.runtime_library_dirs.append(os.curdir) + + def setup_shlib_compiler(self): + compiler = self.shlib_compiler = new_compiler( + compiler=self.compiler, dry_run=self.dry_run, force=self.force + ) + _customize_compiler_for_shlib(compiler) + + if self.include_dirs is not None: + compiler.set_include_dirs(self.include_dirs) + if self.define is not None: + # 'define' option is a list of (name,value) tuples + for (name, value) in self.define: + compiler.define_macro(name, value) + if self.undef is not None: + for macro in self.undef: + compiler.undefine_macro(macro) + if self.libraries is not None: + compiler.set_libraries(self.libraries) + if self.library_dirs is not None: + compiler.set_library_dirs(self.library_dirs) + if self.rpath is not None: + compiler.set_runtime_library_dirs(self.rpath) + if self.link_objects is not None: + compiler.set_link_objects(self.link_objects) + + # hack so distutils' build_extension() builds a library instead + compiler.link_shared_object = link_shared_object.__get__(compiler) + + def get_export_symbols(self, ext): + if isinstance(ext, Library): + return ext.export_symbols + return _build_ext.get_export_symbols(self, ext) + + def build_extension(self, ext): + ext._convert_pyx_sources_to_lang() + _compiler = self.compiler + try: + if isinstance(ext, Library): + self.compiler = self.shlib_compiler + _build_ext.build_extension(self, ext) + if ext._needs_stub: + cmd = self.get_finalized_command('build_py').build_lib + self.write_stub(cmd, ext) + finally: + self.compiler = _compiler + + def links_to_dynamic(self, ext): + """Return true if 'ext' links to a dynamic lib in the same package""" + # XXX this should check to ensure the lib is actually being built + # XXX as dynamic, and not just using a locally-found version or a + # XXX static-compiled version + libnames = dict.fromkeys([lib._full_name for lib in self.shlibs]) + pkg = '.'.join(ext._full_name.split('.')[:-1] + ['']) + return any(pkg + libname in libnames for libname in ext.libraries) + + def get_outputs(self): + return _build_ext.get_outputs(self) + self.__get_stubs_outputs() + + def __get_stubs_outputs(self): + # assemble the base name for each extension that needs a stub + ns_ext_bases = ( + os.path.join(self.build_lib, *ext._full_name.split('.')) + for ext in self.extensions + if ext._needs_stub + ) + # pair each base with the extension + pairs = itertools.product(ns_ext_bases, self.__get_output_extensions()) + return list(base + fnext for base, fnext in pairs) + + def __get_output_extensions(self): + yield '.py' + yield '.pyc' + if self.get_finalized_command('build_py').optimize: + yield '.pyo' + + def write_stub(self, output_dir, ext, compile=False): + log.info("writing stub loader for %s to %s", ext._full_name, + output_dir) + stub_file = (os.path.join(output_dir, *ext._full_name.split('.')) + + '.py') + if compile and os.path.exists(stub_file): + raise DistutilsError(stub_file + " already exists! Please delete.") + if not self.dry_run: + f = open(stub_file, 'w') + f.write( + '\n'.join([ + "def __bootstrap__():", + " global __bootstrap__, __file__, __loader__", + " import sys, os, pkg_resources, imp" + if_dl(", dl"), + " __file__ = pkg_resources.resource_filename" + "(__name__,%r)" + % os.path.basename(ext._file_name), + " del __bootstrap__", + " if '__loader__' in globals():", + " del __loader__", + if_dl(" old_flags = sys.getdlopenflags()"), + " old_dir = os.getcwd()", + " try:", + " os.chdir(os.path.dirname(__file__))", + if_dl(" sys.setdlopenflags(dl.RTLD_NOW)"), + " imp.load_dynamic(__name__,__file__)", + " finally:", + if_dl(" sys.setdlopenflags(old_flags)"), + " os.chdir(old_dir)", + "__bootstrap__()", + "" # terminal \n + ]) + ) + f.close() + if compile: + from distutils.util import byte_compile + + byte_compile([stub_file], optimize=0, + force=True, dry_run=self.dry_run) + optimize = self.get_finalized_command('install_lib').optimize + if optimize > 0: + byte_compile([stub_file], optimize=optimize, + force=True, dry_run=self.dry_run) + if os.path.exists(stub_file) and not self.dry_run: + os.unlink(stub_file) + + +if use_stubs or os.name == 'nt': + # Build shared libraries + # + def link_shared_object( + self, objects, output_libname, output_dir=None, libraries=None, + library_dirs=None, runtime_library_dirs=None, export_symbols=None, + debug=0, extra_preargs=None, extra_postargs=None, build_temp=None, + target_lang=None): + self.link( + self.SHARED_LIBRARY, objects, output_libname, + output_dir, libraries, library_dirs, runtime_library_dirs, + export_symbols, debug, extra_preargs, extra_postargs, + build_temp, target_lang + ) +else: + # Build static libraries everywhere else + libtype = 'static' + + def link_shared_object( + self, objects, output_libname, output_dir=None, libraries=None, + library_dirs=None, runtime_library_dirs=None, export_symbols=None, + debug=0, extra_preargs=None, extra_postargs=None, build_temp=None, + target_lang=None): + # XXX we need to either disallow these attrs on Library instances, + # or warn/abort here if set, or something... + # libraries=None, library_dirs=None, runtime_library_dirs=None, + # export_symbols=None, extra_preargs=None, extra_postargs=None, + # build_temp=None + + assert output_dir is None # distutils build_ext doesn't pass this + output_dir, filename = os.path.split(output_libname) + basename, ext = os.path.splitext(filename) + if self.library_filename("x").startswith('lib'): + # strip 'lib' prefix; this is kludgy if some platform uses + # a different prefix + basename = basename[3:] + + self.create_static_lib( + objects, basename, output_dir, debug, target_lang + ) diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/build_ext.pyc b/project/venv/lib/python2.7/site-packages/setuptools/command/build_ext.pyc new file mode 100644 index 0000000..1418e8e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/command/build_ext.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/build_py.py b/project/venv/lib/python2.7/site-packages/setuptools/command/build_py.py new file mode 100644 index 0000000..b0314fd --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/command/build_py.py @@ -0,0 +1,270 @@ +from glob import glob +from distutils.util import convert_path +import distutils.command.build_py as orig +import os +import fnmatch +import textwrap +import io +import distutils.errors +import itertools + +from setuptools.extern import six +from setuptools.extern.six.moves import map, filter, filterfalse + +try: + from setuptools.lib2to3_ex import Mixin2to3 +except ImportError: + + class Mixin2to3: + def run_2to3(self, files, doctests=True): + "do nothing" + + +class build_py(orig.build_py, Mixin2to3): + """Enhanced 'build_py' command that includes data files with packages + + The data files are specified via a 'package_data' argument to 'setup()'. + See 'setuptools.dist.Distribution' for more details. + + Also, this version of the 'build_py' command allows you to specify both + 'py_modules' and 'packages' in the same setup operation. + """ + + def finalize_options(self): + orig.build_py.finalize_options(self) + self.package_data = self.distribution.package_data + self.exclude_package_data = (self.distribution.exclude_package_data or + {}) + if 'data_files' in self.__dict__: + del self.__dict__['data_files'] + self.__updated_files = [] + self.__doctests_2to3 = [] + + def run(self): + """Build modules, packages, and copy data files to build directory""" + if not self.py_modules and not self.packages: + return + + if self.py_modules: + self.build_modules() + + if self.packages: + self.build_packages() + self.build_package_data() + + self.run_2to3(self.__updated_files, False) + self.run_2to3(self.__updated_files, True) + self.run_2to3(self.__doctests_2to3, True) + + # Only compile actual .py files, using our base class' idea of what our + # output files are. + self.byte_compile(orig.build_py.get_outputs(self, include_bytecode=0)) + + def __getattr__(self, attr): + "lazily compute data files" + if attr == 'data_files': + self.data_files = self._get_data_files() + return self.data_files + return orig.build_py.__getattr__(self, attr) + + def build_module(self, module, module_file, package): + if six.PY2 and isinstance(package, six.string_types): + # avoid errors on Python 2 when unicode is passed (#190) + package = package.split('.') + outfile, copied = orig.build_py.build_module(self, module, module_file, + package) + if copied: + self.__updated_files.append(outfile) + return outfile, copied + + def _get_data_files(self): + """Generate list of '(package,src_dir,build_dir,filenames)' tuples""" + self.analyze_manifest() + return list(map(self._get_pkg_data_files, self.packages or ())) + + def _get_pkg_data_files(self, package): + # Locate package source directory + src_dir = self.get_package_dir(package) + + # Compute package build directory + build_dir = os.path.join(*([self.build_lib] + package.split('.'))) + + # Strip directory from globbed filenames + filenames = [ + os.path.relpath(file, src_dir) + for file in self.find_data_files(package, src_dir) + ] + return package, src_dir, build_dir, filenames + + def find_data_files(self, package, src_dir): + """Return filenames for package's data files in 'src_dir'""" + patterns = self._get_platform_patterns( + self.package_data, + package, + src_dir, + ) + globs_expanded = map(glob, patterns) + # flatten the expanded globs into an iterable of matches + globs_matches = itertools.chain.from_iterable(globs_expanded) + glob_files = filter(os.path.isfile, globs_matches) + files = itertools.chain( + self.manifest_files.get(package, []), + glob_files, + ) + return self.exclude_data_files(package, src_dir, files) + + def build_package_data(self): + """Copy data files into build directory""" + for package, src_dir, build_dir, filenames in self.data_files: + for filename in filenames: + target = os.path.join(build_dir, filename) + self.mkpath(os.path.dirname(target)) + srcfile = os.path.join(src_dir, filename) + outf, copied = self.copy_file(srcfile, target) + srcfile = os.path.abspath(srcfile) + if (copied and + srcfile in self.distribution.convert_2to3_doctests): + self.__doctests_2to3.append(outf) + + def analyze_manifest(self): + self.manifest_files = mf = {} + if not self.distribution.include_package_data: + return + src_dirs = {} + for package in self.packages or (): + # Locate package source directory + src_dirs[assert_relative(self.get_package_dir(package))] = package + + self.run_command('egg_info') + ei_cmd = self.get_finalized_command('egg_info') + for path in ei_cmd.filelist.files: + d, f = os.path.split(assert_relative(path)) + prev = None + oldf = f + while d and d != prev and d not in src_dirs: + prev = d + d, df = os.path.split(d) + f = os.path.join(df, f) + if d in src_dirs: + if path.endswith('.py') and f == oldf: + continue # it's a module, not data + mf.setdefault(src_dirs[d], []).append(path) + + def get_data_files(self): + pass # Lazily compute data files in _get_data_files() function. + + def check_package(self, package, package_dir): + """Check namespace packages' __init__ for declare_namespace""" + try: + return self.packages_checked[package] + except KeyError: + pass + + init_py = orig.build_py.check_package(self, package, package_dir) + self.packages_checked[package] = init_py + + if not init_py or not self.distribution.namespace_packages: + return init_py + + for pkg in self.distribution.namespace_packages: + if pkg == package or pkg.startswith(package + '.'): + break + else: + return init_py + + with io.open(init_py, 'rb') as f: + contents = f.read() + if b'declare_namespace' not in contents: + raise distutils.errors.DistutilsError( + "Namespace package problem: %s is a namespace package, but " + "its\n__init__.py does not call declare_namespace()! Please " + 'fix it.\n(See the setuptools manual under ' + '"Namespace Packages" for details.)\n"' % (package,) + ) + return init_py + + def initialize_options(self): + self.packages_checked = {} + orig.build_py.initialize_options(self) + + def get_package_dir(self, package): + res = orig.build_py.get_package_dir(self, package) + if self.distribution.src_root is not None: + return os.path.join(self.distribution.src_root, res) + return res + + def exclude_data_files(self, package, src_dir, files): + """Filter filenames for package's data files in 'src_dir'""" + files = list(files) + patterns = self._get_platform_patterns( + self.exclude_package_data, + package, + src_dir, + ) + match_groups = ( + fnmatch.filter(files, pattern) + for pattern in patterns + ) + # flatten the groups of matches into an iterable of matches + matches = itertools.chain.from_iterable(match_groups) + bad = set(matches) + keepers = ( + fn + for fn in files + if fn not in bad + ) + # ditch dupes + return list(_unique_everseen(keepers)) + + @staticmethod + def _get_platform_patterns(spec, package, src_dir): + """ + yield platform-specific path patterns (suitable for glob + or fn_match) from a glob-based spec (such as + self.package_data or self.exclude_package_data) + matching package in src_dir. + """ + raw_patterns = itertools.chain( + spec.get('', []), + spec.get(package, []), + ) + return ( + # Each pattern has to be converted to a platform-specific path + os.path.join(src_dir, convert_path(pattern)) + for pattern in raw_patterns + ) + + +# from Python docs +def _unique_everseen(iterable, key=None): + "List unique elements, preserving order. Remember all elements ever seen." + # unique_everseen('AAAABBBCCDAABBB') --> A B C D + # unique_everseen('ABBCcAD', str.lower) --> A B C D + seen = set() + seen_add = seen.add + if key is None: + for element in filterfalse(seen.__contains__, iterable): + seen_add(element) + yield element + else: + for element in iterable: + k = key(element) + if k not in seen: + seen_add(k) + yield element + + +def assert_relative(path): + if not os.path.isabs(path): + return path + from distutils.errors import DistutilsSetupError + + msg = textwrap.dedent(""" + Error: setup script specifies an absolute path: + + %s + + setup() arguments must *always* be /-separated paths relative to the + setup.py directory, *never* absolute paths. + """).lstrip() % path + raise DistutilsSetupError(msg) diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/build_py.pyc b/project/venv/lib/python2.7/site-packages/setuptools/command/build_py.pyc new file mode 100644 index 0000000..50c7413 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/command/build_py.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/develop.py b/project/venv/lib/python2.7/site-packages/setuptools/command/develop.py new file mode 100644 index 0000000..009e4f9 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/command/develop.py @@ -0,0 +1,221 @@ +from distutils.util import convert_path +from distutils import log +from distutils.errors import DistutilsError, DistutilsOptionError +import os +import glob +import io + +from setuptools.extern import six + +import pkg_resources +from setuptools.command.easy_install import easy_install +from setuptools import namespaces +import setuptools + +__metaclass__ = type + + +class develop(namespaces.DevelopInstaller, easy_install): + """Set up package for development""" + + description = "install package in 'development mode'" + + user_options = easy_install.user_options + [ + ("uninstall", "u", "Uninstall this source package"), + ("egg-path=", None, "Set the path to be used in the .egg-link file"), + ] + + boolean_options = easy_install.boolean_options + ['uninstall'] + + command_consumes_arguments = False # override base + + def run(self): + if self.uninstall: + self.multi_version = True + self.uninstall_link() + self.uninstall_namespaces() + else: + self.install_for_development() + self.warn_deprecated_options() + + def initialize_options(self): + self.uninstall = None + self.egg_path = None + easy_install.initialize_options(self) + self.setup_path = None + self.always_copy_from = '.' # always copy eggs installed in curdir + + def finalize_options(self): + ei = self.get_finalized_command("egg_info") + if ei.broken_egg_info: + template = "Please rename %r to %r before using 'develop'" + args = ei.egg_info, ei.broken_egg_info + raise DistutilsError(template % args) + self.args = [ei.egg_name] + + easy_install.finalize_options(self) + self.expand_basedirs() + self.expand_dirs() + # pick up setup-dir .egg files only: no .egg-info + self.package_index.scan(glob.glob('*.egg')) + + egg_link_fn = ei.egg_name + '.egg-link' + self.egg_link = os.path.join(self.install_dir, egg_link_fn) + self.egg_base = ei.egg_base + if self.egg_path is None: + self.egg_path = os.path.abspath(ei.egg_base) + + target = pkg_resources.normalize_path(self.egg_base) + egg_path = pkg_resources.normalize_path( + os.path.join(self.install_dir, self.egg_path)) + if egg_path != target: + raise DistutilsOptionError( + "--egg-path must be a relative path from the install" + " directory to " + target + ) + + # Make a distribution for the package's source + self.dist = pkg_resources.Distribution( + target, + pkg_resources.PathMetadata(target, os.path.abspath(ei.egg_info)), + project_name=ei.egg_name + ) + + self.setup_path = self._resolve_setup_path( + self.egg_base, + self.install_dir, + self.egg_path, + ) + + @staticmethod + def _resolve_setup_path(egg_base, install_dir, egg_path): + """ + Generate a path from egg_base back to '.' where the + setup script resides and ensure that path points to the + setup path from $install_dir/$egg_path. + """ + path_to_setup = egg_base.replace(os.sep, '/').rstrip('/') + if path_to_setup != os.curdir: + path_to_setup = '../' * (path_to_setup.count('/') + 1) + resolved = pkg_resources.normalize_path( + os.path.join(install_dir, egg_path, path_to_setup) + ) + if resolved != pkg_resources.normalize_path(os.curdir): + raise DistutilsOptionError( + "Can't get a consistent path to setup script from" + " installation directory", resolved, + pkg_resources.normalize_path(os.curdir)) + return path_to_setup + + def install_for_development(self): + if six.PY3 and getattr(self.distribution, 'use_2to3', False): + # If we run 2to3 we can not do this inplace: + + # Ensure metadata is up-to-date + self.reinitialize_command('build_py', inplace=0) + self.run_command('build_py') + bpy_cmd = self.get_finalized_command("build_py") + build_path = pkg_resources.normalize_path(bpy_cmd.build_lib) + + # Build extensions + self.reinitialize_command('egg_info', egg_base=build_path) + self.run_command('egg_info') + + self.reinitialize_command('build_ext', inplace=0) + self.run_command('build_ext') + + # Fixup egg-link and easy-install.pth + ei_cmd = self.get_finalized_command("egg_info") + self.egg_path = build_path + self.dist.location = build_path + # XXX + self.dist._provider = pkg_resources.PathMetadata( + build_path, ei_cmd.egg_info) + else: + # Without 2to3 inplace works fine: + self.run_command('egg_info') + + # Build extensions in-place + self.reinitialize_command('build_ext', inplace=1) + self.run_command('build_ext') + + self.install_site_py() # ensure that target dir is site-safe + if setuptools.bootstrap_install_from: + self.easy_install(setuptools.bootstrap_install_from) + setuptools.bootstrap_install_from = None + + self.install_namespaces() + + # create an .egg-link in the installation dir, pointing to our egg + log.info("Creating %s (link to %s)", self.egg_link, self.egg_base) + if not self.dry_run: + with open(self.egg_link, "w") as f: + f.write(self.egg_path + "\n" + self.setup_path) + # postprocess the installed distro, fixing up .pth, installing scripts, + # and handling requirements + self.process_distribution(None, self.dist, not self.no_deps) + + def uninstall_link(self): + if os.path.exists(self.egg_link): + log.info("Removing %s (link to %s)", self.egg_link, self.egg_base) + egg_link_file = open(self.egg_link) + contents = [line.rstrip() for line in egg_link_file] + egg_link_file.close() + if contents not in ([self.egg_path], + [self.egg_path, self.setup_path]): + log.warn("Link points to %s: uninstall aborted", contents) + return + if not self.dry_run: + os.unlink(self.egg_link) + if not self.dry_run: + self.update_pth(self.dist) # remove any .pth link to us + if self.distribution.scripts: + # XXX should also check for entry point scripts! + log.warn("Note: you must uninstall or replace scripts manually!") + + def install_egg_scripts(self, dist): + if dist is not self.dist: + # Installing a dependency, so fall back to normal behavior + return easy_install.install_egg_scripts(self, dist) + + # create wrapper scripts in the script dir, pointing to dist.scripts + + # new-style... + self.install_wrapper_scripts(dist) + + # ...and old-style + for script_name in self.distribution.scripts or []: + script_path = os.path.abspath(convert_path(script_name)) + script_name = os.path.basename(script_path) + with io.open(script_path) as strm: + script_text = strm.read() + self.install_script(dist, script_name, script_text, script_path) + + def install_wrapper_scripts(self, dist): + dist = VersionlessRequirement(dist) + return easy_install.install_wrapper_scripts(self, dist) + + +class VersionlessRequirement: + """ + Adapt a pkg_resources.Distribution to simply return the project + name as the 'requirement' so that scripts will work across + multiple versions. + + >>> from pkg_resources import Distribution + >>> dist = Distribution(project_name='foo', version='1.0') + >>> str(dist.as_requirement()) + 'foo==1.0' + >>> adapted_dist = VersionlessRequirement(dist) + >>> str(adapted_dist.as_requirement()) + 'foo' + """ + + def __init__(self, dist): + self.__dist = dist + + def __getattr__(self, name): + return getattr(self.__dist, name) + + def as_requirement(self): + return self.project_name diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/develop.pyc b/project/venv/lib/python2.7/site-packages/setuptools/command/develop.pyc new file mode 100644 index 0000000..2f130bd Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/command/develop.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/dist_info.py b/project/venv/lib/python2.7/site-packages/setuptools/command/dist_info.py new file mode 100644 index 0000000..c45258f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/command/dist_info.py @@ -0,0 +1,36 @@ +""" +Create a dist_info directory +As defined in the wheel specification +""" + +import os + +from distutils.core import Command +from distutils import log + + +class dist_info(Command): + + description = 'create a .dist-info directory' + + user_options = [ + ('egg-base=', 'e', "directory containing .egg-info directories" + " (default: top of the source tree)"), + ] + + def initialize_options(self): + self.egg_base = None + + def finalize_options(self): + pass + + def run(self): + egg_info = self.get_finalized_command('egg_info') + egg_info.egg_base = self.egg_base + egg_info.finalize_options() + egg_info.run() + dist_info_dir = egg_info.egg_info[:-len('.egg-info')] + '.dist-info' + log.info("creating '{}'".format(os.path.abspath(dist_info_dir))) + + bdist_wheel = self.get_finalized_command('bdist_wheel') + bdist_wheel.egg2dist(egg_info.egg_info, dist_info_dir) diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/dist_info.pyc b/project/venv/lib/python2.7/site-packages/setuptools/command/dist_info.pyc new file mode 100644 index 0000000..444c508 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/command/dist_info.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/easy_install.py b/project/venv/lib/python2.7/site-packages/setuptools/command/easy_install.py new file mode 100644 index 0000000..06c9827 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/command/easy_install.py @@ -0,0 +1,2342 @@ +#!/usr/bin/env python +""" +Easy Install +------------ + +A tool for doing automatic download/extract/build of distutils-based Python +packages. For detailed documentation, see the accompanying EasyInstall.txt +file, or visit the `EasyInstall home page`__. + +__ https://setuptools.readthedocs.io/en/latest/easy_install.html + +""" + +from glob import glob +from distutils.util import get_platform +from distutils.util import convert_path, subst_vars +from distutils.errors import ( + DistutilsArgError, DistutilsOptionError, + DistutilsError, DistutilsPlatformError, +) +from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS +from distutils import log, dir_util +from distutils.command.build_scripts import first_line_re +from distutils.spawn import find_executable +import sys +import os +import zipimport +import shutil +import tempfile +import zipfile +import re +import stat +import random +import textwrap +import warnings +import site +import struct +import contextlib +import subprocess +import shlex +import io + + +from sysconfig import get_config_vars, get_path + +from setuptools import SetuptoolsDeprecationWarning + +from setuptools.extern import six +from setuptools.extern.six.moves import configparser, map + +from setuptools import Command +from setuptools.sandbox import run_setup +from setuptools.py27compat import rmtree_safe +from setuptools.command import setopt +from setuptools.archive_util import unpack_archive +from setuptools.package_index import ( + PackageIndex, parse_requirement_arg, URL_SCHEME, +) +from setuptools.command import bdist_egg, egg_info +from setuptools.wheel import Wheel +from pkg_resources import ( + yield_lines, normalize_path, resource_string, ensure_directory, + get_distribution, find_distributions, Environment, Requirement, + Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound, + VersionConflict, DEVELOP_DIST, +) +import pkg_resources.py31compat + +__metaclass__ = type + +# Turn on PEP440Warnings +warnings.filterwarnings("default", category=pkg_resources.PEP440Warning) + +__all__ = [ + 'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg', + 'main', 'get_exe_prefixes', +] + + +def is_64bit(): + return struct.calcsize("P") == 8 + + +def samefile(p1, p2): + """ + Determine if two paths reference the same file. + + Augments os.path.samefile to work on Windows and + suppresses errors if the path doesn't exist. + """ + both_exist = os.path.exists(p1) and os.path.exists(p2) + use_samefile = hasattr(os.path, 'samefile') and both_exist + if use_samefile: + return os.path.samefile(p1, p2) + norm_p1 = os.path.normpath(os.path.normcase(p1)) + norm_p2 = os.path.normpath(os.path.normcase(p2)) + return norm_p1 == norm_p2 + + +if six.PY2: + + def _to_bytes(s): + return s + + def isascii(s): + try: + six.text_type(s, 'ascii') + return True + except UnicodeError: + return False +else: + + def _to_bytes(s): + return s.encode('utf8') + + def isascii(s): + try: + s.encode('ascii') + return True + except UnicodeError: + return False + + +_one_liner = lambda text: textwrap.dedent(text).strip().replace('\n', '; ') + + +class easy_install(Command): + """Manage a download/build/install process""" + description = "Find/get/install Python packages" + command_consumes_arguments = True + + user_options = [ + ('prefix=', None, "installation prefix"), + ("zip-ok", "z", "install package as a zipfile"), + ("multi-version", "m", "make apps have to require() a version"), + ("upgrade", "U", "force upgrade (searches PyPI for latest versions)"), + ("install-dir=", "d", "install package to DIR"), + ("script-dir=", "s", "install scripts to DIR"), + ("exclude-scripts", "x", "Don't install scripts"), + ("always-copy", "a", "Copy all needed packages to install dir"), + ("index-url=", "i", "base URL of Python Package Index"), + ("find-links=", "f", "additional URL(s) to search for packages"), + ("build-directory=", "b", + "download/extract/build in DIR; keep the results"), + ('optimize=', 'O', + "also compile with optimization: -O1 for \"python -O\", " + "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"), + ('record=', None, + "filename in which to record list of installed files"), + ('always-unzip', 'Z', "don't install as a zipfile, no matter what"), + ('site-dirs=', 'S', "list of directories where .pth files work"), + ('editable', 'e', "Install specified packages in editable form"), + ('no-deps', 'N', "don't install dependencies"), + ('allow-hosts=', 'H', "pattern(s) that hostnames must match"), + ('local-snapshots-ok', 'l', + "allow building eggs from local checkouts"), + ('version', None, "print version information and exit"), + ('no-find-links', None, + "Don't load find-links defined in packages being installed") + ] + boolean_options = [ + 'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy', + 'editable', + 'no-deps', 'local-snapshots-ok', 'version' + ] + + if site.ENABLE_USER_SITE: + help_msg = "install in user site-package '%s'" % site.USER_SITE + user_options.append(('user', None, help_msg)) + boolean_options.append('user') + + negative_opt = {'always-unzip': 'zip-ok'} + create_index = PackageIndex + + def initialize_options(self): + # the --user option seems to be an opt-in one, + # so the default should be False. + self.user = 0 + self.zip_ok = self.local_snapshots_ok = None + self.install_dir = self.script_dir = self.exclude_scripts = None + self.index_url = None + self.find_links = None + self.build_directory = None + self.args = None + self.optimize = self.record = None + self.upgrade = self.always_copy = self.multi_version = None + self.editable = self.no_deps = self.allow_hosts = None + self.root = self.prefix = self.no_report = None + self.version = None + self.install_purelib = None # for pure module distributions + self.install_platlib = None # non-pure (dists w/ extensions) + self.install_headers = None # for C/C++ headers + self.install_lib = None # set to either purelib or platlib + self.install_scripts = None + self.install_data = None + self.install_base = None + self.install_platbase = None + if site.ENABLE_USER_SITE: + self.install_userbase = site.USER_BASE + self.install_usersite = site.USER_SITE + else: + self.install_userbase = None + self.install_usersite = None + self.no_find_links = None + + # Options not specifiable via command line + self.package_index = None + self.pth_file = self.always_copy_from = None + self.site_dirs = None + self.installed_projects = {} + self.sitepy_installed = False + # Always read easy_install options, even if we are subclassed, or have + # an independent instance created. This ensures that defaults will + # always come from the standard configuration file(s)' "easy_install" + # section, even if this is a "develop" or "install" command, or some + # other embedding. + self._dry_run = None + self.verbose = self.distribution.verbose + self.distribution._set_command_options( + self, self.distribution.get_option_dict('easy_install') + ) + + def delete_blockers(self, blockers): + extant_blockers = ( + filename for filename in blockers + if os.path.exists(filename) or os.path.islink(filename) + ) + list(map(self._delete_path, extant_blockers)) + + def _delete_path(self, path): + log.info("Deleting %s", path) + if self.dry_run: + return + + is_tree = os.path.isdir(path) and not os.path.islink(path) + remover = rmtree if is_tree else os.unlink + remover(path) + + @staticmethod + def _render_version(): + """ + Render the Setuptools version and installation details, then exit. + """ + ver = sys.version[:3] + dist = get_distribution('setuptools') + tmpl = 'setuptools {dist.version} from {dist.location} (Python {ver})' + print(tmpl.format(**locals())) + raise SystemExit() + + def finalize_options(self): + self.version and self._render_version() + + py_version = sys.version.split()[0] + prefix, exec_prefix = get_config_vars('prefix', 'exec_prefix') + + self.config_vars = { + 'dist_name': self.distribution.get_name(), + 'dist_version': self.distribution.get_version(), + 'dist_fullname': self.distribution.get_fullname(), + 'py_version': py_version, + 'py_version_short': py_version[0:3], + 'py_version_nodot': py_version[0] + py_version[2], + 'sys_prefix': prefix, + 'prefix': prefix, + 'sys_exec_prefix': exec_prefix, + 'exec_prefix': exec_prefix, + # Only python 3.2+ has abiflags + 'abiflags': getattr(sys, 'abiflags', ''), + } + + if site.ENABLE_USER_SITE: + self.config_vars['userbase'] = self.install_userbase + self.config_vars['usersite'] = self.install_usersite + + self._fix_install_dir_for_user_site() + + self.expand_basedirs() + self.expand_dirs() + + self._expand( + 'install_dir', 'script_dir', 'build_directory', + 'site_dirs', + ) + # If a non-default installation directory was specified, default the + # script directory to match it. + if self.script_dir is None: + self.script_dir = self.install_dir + + if self.no_find_links is None: + self.no_find_links = False + + # Let install_dir get set by install_lib command, which in turn + # gets its info from the install command, and takes into account + # --prefix and --home and all that other crud. + self.set_undefined_options( + 'install_lib', ('install_dir', 'install_dir') + ) + # Likewise, set default script_dir from 'install_scripts.install_dir' + self.set_undefined_options( + 'install_scripts', ('install_dir', 'script_dir') + ) + + if self.user and self.install_purelib: + self.install_dir = self.install_purelib + self.script_dir = self.install_scripts + # default --record from the install command + self.set_undefined_options('install', ('record', 'record')) + # Should this be moved to the if statement below? It's not used + # elsewhere + normpath = map(normalize_path, sys.path) + self.all_site_dirs = get_site_dirs() + if self.site_dirs is not None: + site_dirs = [ + os.path.expanduser(s.strip()) for s in + self.site_dirs.split(',') + ] + for d in site_dirs: + if not os.path.isdir(d): + log.warn("%s (in --site-dirs) does not exist", d) + elif normalize_path(d) not in normpath: + raise DistutilsOptionError( + d + " (in --site-dirs) is not on sys.path" + ) + else: + self.all_site_dirs.append(normalize_path(d)) + if not self.editable: + self.check_site_dir() + self.index_url = self.index_url or "https://pypi.org/simple/" + self.shadow_path = self.all_site_dirs[:] + for path_item in self.install_dir, normalize_path(self.script_dir): + if path_item not in self.shadow_path: + self.shadow_path.insert(0, path_item) + + if self.allow_hosts is not None: + hosts = [s.strip() for s in self.allow_hosts.split(',')] + else: + hosts = ['*'] + if self.package_index is None: + self.package_index = self.create_index( + self.index_url, search_path=self.shadow_path, hosts=hosts, + ) + self.local_index = Environment(self.shadow_path + sys.path) + + if self.find_links is not None: + if isinstance(self.find_links, six.string_types): + self.find_links = self.find_links.split() + else: + self.find_links = [] + if self.local_snapshots_ok: + self.package_index.scan_egg_links(self.shadow_path + sys.path) + if not self.no_find_links: + self.package_index.add_find_links(self.find_links) + self.set_undefined_options('install_lib', ('optimize', 'optimize')) + if not isinstance(self.optimize, int): + try: + self.optimize = int(self.optimize) + if not (0 <= self.optimize <= 2): + raise ValueError + except ValueError: + raise DistutilsOptionError("--optimize must be 0, 1, or 2") + + if self.editable and not self.build_directory: + raise DistutilsArgError( + "Must specify a build directory (-b) when using --editable" + ) + if not self.args: + raise DistutilsArgError( + "No urls, filenames, or requirements specified (see --help)") + + self.outputs = [] + + def _fix_install_dir_for_user_site(self): + """ + Fix the install_dir if "--user" was used. + """ + if not self.user or not site.ENABLE_USER_SITE: + return + + self.create_home_path() + if self.install_userbase is None: + msg = "User base directory is not specified" + raise DistutilsPlatformError(msg) + self.install_base = self.install_platbase = self.install_userbase + scheme_name = os.name.replace('posix', 'unix') + '_user' + self.select_scheme(scheme_name) + + def _expand_attrs(self, attrs): + for attr in attrs: + val = getattr(self, attr) + if val is not None: + if os.name == 'posix' or os.name == 'nt': + val = os.path.expanduser(val) + val = subst_vars(val, self.config_vars) + setattr(self, attr, val) + + def expand_basedirs(self): + """Calls `os.path.expanduser` on install_base, install_platbase and + root.""" + self._expand_attrs(['install_base', 'install_platbase', 'root']) + + def expand_dirs(self): + """Calls `os.path.expanduser` on install dirs.""" + dirs = [ + 'install_purelib', + 'install_platlib', + 'install_lib', + 'install_headers', + 'install_scripts', + 'install_data', + ] + self._expand_attrs(dirs) + + def run(self): + if self.verbose != self.distribution.verbose: + log.set_verbosity(self.verbose) + try: + for spec in self.args: + self.easy_install(spec, not self.no_deps) + if self.record: + outputs = self.outputs + if self.root: # strip any package prefix + root_len = len(self.root) + for counter in range(len(outputs)): + outputs[counter] = outputs[counter][root_len:] + from distutils import file_util + + self.execute( + file_util.write_file, (self.record, outputs), + "writing list of installed files to '%s'" % + self.record + ) + self.warn_deprecated_options() + finally: + log.set_verbosity(self.distribution.verbose) + + def pseudo_tempname(self): + """Return a pseudo-tempname base in the install directory. + This code is intentionally naive; if a malicious party can write to + the target directory you're already in deep doodoo. + """ + try: + pid = os.getpid() + except Exception: + pid = random.randint(0, sys.maxsize) + return os.path.join(self.install_dir, "test-easy-install-%s" % pid) + + def warn_deprecated_options(self): + pass + + def check_site_dir(self): + """Verify that self.install_dir is .pth-capable dir, if needed""" + + instdir = normalize_path(self.install_dir) + pth_file = os.path.join(instdir, 'easy-install.pth') + + # Is it a configured, PYTHONPATH, implicit, or explicit site dir? + is_site_dir = instdir in self.all_site_dirs + + if not is_site_dir and not self.multi_version: + # No? Then directly test whether it does .pth file processing + is_site_dir = self.check_pth_processing() + else: + # make sure we can write to target dir + testfile = self.pseudo_tempname() + '.write-test' + test_exists = os.path.exists(testfile) + try: + if test_exists: + os.unlink(testfile) + open(testfile, 'w').close() + os.unlink(testfile) + except (OSError, IOError): + self.cant_write_to_target() + + if not is_site_dir and not self.multi_version: + # Can't install non-multi to non-site dir + raise DistutilsError(self.no_default_version_msg()) + + if is_site_dir: + if self.pth_file is None: + self.pth_file = PthDistributions(pth_file, self.all_site_dirs) + else: + self.pth_file = None + + if instdir not in map(normalize_path, _pythonpath()): + # only PYTHONPATH dirs need a site.py, so pretend it's there + self.sitepy_installed = True + elif self.multi_version and not os.path.exists(pth_file): + self.sitepy_installed = True # don't need site.py in this case + self.pth_file = None # and don't create a .pth file + self.install_dir = instdir + + __cant_write_msg = textwrap.dedent(""" + can't create or remove files in install directory + + The following error occurred while trying to add or remove files in the + installation directory: + + %s + + The installation directory you specified (via --install-dir, --prefix, or + the distutils default setting) was: + + %s + """).lstrip() + + __not_exists_id = textwrap.dedent(""" + This directory does not currently exist. Please create it and try again, or + choose a different installation directory (using the -d or --install-dir + option). + """).lstrip() + + __access_msg = textwrap.dedent(""" + Perhaps your account does not have write access to this directory? If the + installation directory is a system-owned directory, you may need to sign in + as the administrator or "root" account. If you do not have administrative + access to this machine, you may wish to choose a different installation + directory, preferably one that is listed in your PYTHONPATH environment + variable. + + For information on other options, you may wish to consult the + documentation at: + + https://setuptools.readthedocs.io/en/latest/easy_install.html + + Please make the appropriate changes for your system and try again. + """).lstrip() + + def cant_write_to_target(self): + msg = self.__cant_write_msg % (sys.exc_info()[1], self.install_dir,) + + if not os.path.exists(self.install_dir): + msg += '\n' + self.__not_exists_id + else: + msg += '\n' + self.__access_msg + raise DistutilsError(msg) + + def check_pth_processing(self): + """Empirically verify whether .pth files are supported in inst. dir""" + instdir = self.install_dir + log.info("Checking .pth file support in %s", instdir) + pth_file = self.pseudo_tempname() + ".pth" + ok_file = pth_file + '.ok' + ok_exists = os.path.exists(ok_file) + tmpl = _one_liner(""" + import os + f = open({ok_file!r}, 'w') + f.write('OK') + f.close() + """) + '\n' + try: + if ok_exists: + os.unlink(ok_file) + dirname = os.path.dirname(ok_file) + pkg_resources.py31compat.makedirs(dirname, exist_ok=True) + f = open(pth_file, 'w') + except (OSError, IOError): + self.cant_write_to_target() + else: + try: + f.write(tmpl.format(**locals())) + f.close() + f = None + executable = sys.executable + if os.name == 'nt': + dirname, basename = os.path.split(executable) + alt = os.path.join(dirname, 'pythonw.exe') + use_alt = ( + basename.lower() == 'python.exe' and + os.path.exists(alt) + ) + if use_alt: + # use pythonw.exe to avoid opening a console window + executable = alt + + from distutils.spawn import spawn + + spawn([executable, '-E', '-c', 'pass'], 0) + + if os.path.exists(ok_file): + log.info( + "TEST PASSED: %s appears to support .pth files", + instdir + ) + return True + finally: + if f: + f.close() + if os.path.exists(ok_file): + os.unlink(ok_file) + if os.path.exists(pth_file): + os.unlink(pth_file) + if not self.multi_version: + log.warn("TEST FAILED: %s does NOT support .pth files", instdir) + return False + + def install_egg_scripts(self, dist): + """Write all the scripts for `dist`, unless scripts are excluded""" + if not self.exclude_scripts and dist.metadata_isdir('scripts'): + for script_name in dist.metadata_listdir('scripts'): + if dist.metadata_isdir('scripts/' + script_name): + # The "script" is a directory, likely a Python 3 + # __pycache__ directory, so skip it. + continue + self.install_script( + dist, script_name, + dist.get_metadata('scripts/' + script_name) + ) + self.install_wrapper_scripts(dist) + + def add_output(self, path): + if os.path.isdir(path): + for base, dirs, files in os.walk(path): + for filename in files: + self.outputs.append(os.path.join(base, filename)) + else: + self.outputs.append(path) + + def not_editable(self, spec): + if self.editable: + raise DistutilsArgError( + "Invalid argument %r: you can't use filenames or URLs " + "with --editable (except via the --find-links option)." + % (spec,) + ) + + def check_editable(self, spec): + if not self.editable: + return + + if os.path.exists(os.path.join(self.build_directory, spec.key)): + raise DistutilsArgError( + "%r already exists in %s; can't do a checkout there" % + (spec.key, self.build_directory) + ) + + @contextlib.contextmanager + def _tmpdir(self): + tmpdir = tempfile.mkdtemp(prefix=u"easy_install-") + try: + # cast to str as workaround for #709 and #710 and #712 + yield str(tmpdir) + finally: + os.path.exists(tmpdir) and rmtree(rmtree_safe(tmpdir)) + + def easy_install(self, spec, deps=False): + if not self.editable: + self.install_site_py() + + with self._tmpdir() as tmpdir: + if not isinstance(spec, Requirement): + if URL_SCHEME(spec): + # It's a url, download it to tmpdir and process + self.not_editable(spec) + dl = self.package_index.download(spec, tmpdir) + return self.install_item(None, dl, tmpdir, deps, True) + + elif os.path.exists(spec): + # Existing file or directory, just process it directly + self.not_editable(spec) + return self.install_item(None, spec, tmpdir, deps, True) + else: + spec = parse_requirement_arg(spec) + + self.check_editable(spec) + dist = self.package_index.fetch_distribution( + spec, tmpdir, self.upgrade, self.editable, + not self.always_copy, self.local_index + ) + if dist is None: + msg = "Could not find suitable distribution for %r" % spec + if self.always_copy: + msg += " (--always-copy skips system and development eggs)" + raise DistutilsError(msg) + elif dist.precedence == DEVELOP_DIST: + # .egg-info dists don't need installing, just process deps + self.process_distribution(spec, dist, deps, "Using") + return dist + else: + return self.install_item(spec, dist.location, tmpdir, deps) + + def install_item(self, spec, download, tmpdir, deps, install_needed=False): + + # Installation is also needed if file in tmpdir or is not an egg + install_needed = install_needed or self.always_copy + install_needed = install_needed or os.path.dirname(download) == tmpdir + install_needed = install_needed or not download.endswith('.egg') + install_needed = install_needed or ( + self.always_copy_from is not None and + os.path.dirname(normalize_path(download)) == + normalize_path(self.always_copy_from) + ) + + if spec and not install_needed: + # at this point, we know it's a local .egg, we just don't know if + # it's already installed. + for dist in self.local_index[spec.project_name]: + if dist.location == download: + break + else: + install_needed = True # it's not in the local index + + log.info("Processing %s", os.path.basename(download)) + + if install_needed: + dists = self.install_eggs(spec, download, tmpdir) + for dist in dists: + self.process_distribution(spec, dist, deps) + else: + dists = [self.egg_distribution(download)] + self.process_distribution(spec, dists[0], deps, "Using") + + if spec is not None: + for dist in dists: + if dist in spec: + return dist + + def select_scheme(self, name): + """Sets the install directories by applying the install schemes.""" + # it's the caller's problem if they supply a bad name! + scheme = INSTALL_SCHEMES[name] + for key in SCHEME_KEYS: + attrname = 'install_' + key + if getattr(self, attrname) is None: + setattr(self, attrname, scheme[key]) + + def process_distribution(self, requirement, dist, deps=True, *info): + self.update_pth(dist) + self.package_index.add(dist) + if dist in self.local_index[dist.key]: + self.local_index.remove(dist) + self.local_index.add(dist) + self.install_egg_scripts(dist) + self.installed_projects[dist.key] = dist + log.info(self.installation_report(requirement, dist, *info)) + if (dist.has_metadata('dependency_links.txt') and + not self.no_find_links): + self.package_index.add_find_links( + dist.get_metadata_lines('dependency_links.txt') + ) + if not deps and not self.always_copy: + return + elif requirement is not None and dist.key != requirement.key: + log.warn("Skipping dependencies for %s", dist) + return # XXX this is not the distribution we were looking for + elif requirement is None or dist not in requirement: + # if we wound up with a different version, resolve what we've got + distreq = dist.as_requirement() + requirement = Requirement(str(distreq)) + log.info("Processing dependencies for %s", requirement) + try: + distros = WorkingSet([]).resolve( + [requirement], self.local_index, self.easy_install + ) + except DistributionNotFound as e: + raise DistutilsError(str(e)) + except VersionConflict as e: + raise DistutilsError(e.report()) + if self.always_copy or self.always_copy_from: + # Force all the relevant distros to be copied or activated + for dist in distros: + if dist.key not in self.installed_projects: + self.easy_install(dist.as_requirement()) + log.info("Finished processing dependencies for %s", requirement) + + def should_unzip(self, dist): + if self.zip_ok is not None: + return not self.zip_ok + if dist.has_metadata('not-zip-safe'): + return True + if not dist.has_metadata('zip-safe'): + return True + return False + + def maybe_move(self, spec, dist_filename, setup_base): + dst = os.path.join(self.build_directory, spec.key) + if os.path.exists(dst): + msg = ( + "%r already exists in %s; build directory %s will not be kept" + ) + log.warn(msg, spec.key, self.build_directory, setup_base) + return setup_base + if os.path.isdir(dist_filename): + setup_base = dist_filename + else: + if os.path.dirname(dist_filename) == setup_base: + os.unlink(dist_filename) # get it out of the tmp dir + contents = os.listdir(setup_base) + if len(contents) == 1: + dist_filename = os.path.join(setup_base, contents[0]) + if os.path.isdir(dist_filename): + # if the only thing there is a directory, move it instead + setup_base = dist_filename + ensure_directory(dst) + shutil.move(setup_base, dst) + return dst + + def install_wrapper_scripts(self, dist): + if self.exclude_scripts: + return + for args in ScriptWriter.best().get_args(dist): + self.write_script(*args) + + def install_script(self, dist, script_name, script_text, dev_path=None): + """Generate a legacy script wrapper and install it""" + spec = str(dist.as_requirement()) + is_script = is_python_script(script_text, script_name) + + if is_script: + body = self._load_template(dev_path) % locals() + script_text = ScriptWriter.get_header(script_text) + body + self.write_script(script_name, _to_bytes(script_text), 'b') + + @staticmethod + def _load_template(dev_path): + """ + There are a couple of template scripts in the package. This + function loads one of them and prepares it for use. + """ + # See https://github.com/pypa/setuptools/issues/134 for info + # on script file naming and downstream issues with SVR4 + name = 'script.tmpl' + if dev_path: + name = name.replace('.tmpl', ' (dev).tmpl') + + raw_bytes = resource_string('setuptools', name) + return raw_bytes.decode('utf-8') + + def write_script(self, script_name, contents, mode="t", blockers=()): + """Write an executable file to the scripts directory""" + self.delete_blockers( # clean up old .py/.pyw w/o a script + [os.path.join(self.script_dir, x) for x in blockers] + ) + log.info("Installing %s script to %s", script_name, self.script_dir) + target = os.path.join(self.script_dir, script_name) + self.add_output(target) + + if self.dry_run: + return + + mask = current_umask() + ensure_directory(target) + if os.path.exists(target): + os.unlink(target) + with open(target, "w" + mode) as f: + f.write(contents) + chmod(target, 0o777 - mask) + + def install_eggs(self, spec, dist_filename, tmpdir): + # .egg dirs or files are already built, so just return them + if dist_filename.lower().endswith('.egg'): + return [self.install_egg(dist_filename, tmpdir)] + elif dist_filename.lower().endswith('.exe'): + return [self.install_exe(dist_filename, tmpdir)] + elif dist_filename.lower().endswith('.whl'): + return [self.install_wheel(dist_filename, tmpdir)] + + # Anything else, try to extract and build + setup_base = tmpdir + if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'): + unpack_archive(dist_filename, tmpdir, self.unpack_progress) + elif os.path.isdir(dist_filename): + setup_base = os.path.abspath(dist_filename) + + if (setup_base.startswith(tmpdir) # something we downloaded + and self.build_directory and spec is not None): + setup_base = self.maybe_move(spec, dist_filename, setup_base) + + # Find the setup.py file + setup_script = os.path.join(setup_base, 'setup.py') + + if not os.path.exists(setup_script): + setups = glob(os.path.join(setup_base, '*', 'setup.py')) + if not setups: + raise DistutilsError( + "Couldn't find a setup script in %s" % + os.path.abspath(dist_filename) + ) + if len(setups) > 1: + raise DistutilsError( + "Multiple setup scripts in %s" % + os.path.abspath(dist_filename) + ) + setup_script = setups[0] + + # Now run it, and return the result + if self.editable: + log.info(self.report_editable(spec, setup_script)) + return [] + else: + return self.build_and_install(setup_script, setup_base) + + def egg_distribution(self, egg_path): + if os.path.isdir(egg_path): + metadata = PathMetadata(egg_path, os.path.join(egg_path, + 'EGG-INFO')) + else: + metadata = EggMetadata(zipimport.zipimporter(egg_path)) + return Distribution.from_filename(egg_path, metadata=metadata) + + def install_egg(self, egg_path, tmpdir): + destination = os.path.join( + self.install_dir, + os.path.basename(egg_path), + ) + destination = os.path.abspath(destination) + if not self.dry_run: + ensure_directory(destination) + + dist = self.egg_distribution(egg_path) + if not samefile(egg_path, destination): + if os.path.isdir(destination) and not os.path.islink(destination): + dir_util.remove_tree(destination, dry_run=self.dry_run) + elif os.path.exists(destination): + self.execute( + os.unlink, + (destination,), + "Removing " + destination, + ) + try: + new_dist_is_zipped = False + if os.path.isdir(egg_path): + if egg_path.startswith(tmpdir): + f, m = shutil.move, "Moving" + else: + f, m = shutil.copytree, "Copying" + elif self.should_unzip(dist): + self.mkpath(destination) + f, m = self.unpack_and_compile, "Extracting" + else: + new_dist_is_zipped = True + if egg_path.startswith(tmpdir): + f, m = shutil.move, "Moving" + else: + f, m = shutil.copy2, "Copying" + self.execute( + f, + (egg_path, destination), + (m + " %s to %s") % ( + os.path.basename(egg_path), + os.path.dirname(destination) + ), + ) + update_dist_caches( + destination, + fix_zipimporter_caches=new_dist_is_zipped, + ) + except Exception: + update_dist_caches(destination, fix_zipimporter_caches=False) + raise + + self.add_output(destination) + return self.egg_distribution(destination) + + def install_exe(self, dist_filename, tmpdir): + # See if it's valid, get data + cfg = extract_wininst_cfg(dist_filename) + if cfg is None: + raise DistutilsError( + "%s is not a valid distutils Windows .exe" % dist_filename + ) + # Create a dummy distribution object until we build the real distro + dist = Distribution( + None, + project_name=cfg.get('metadata', 'name'), + version=cfg.get('metadata', 'version'), platform=get_platform(), + ) + + # Convert the .exe to an unpacked egg + egg_path = os.path.join(tmpdir, dist.egg_name() + '.egg') + dist.location = egg_path + egg_tmp = egg_path + '.tmp' + _egg_info = os.path.join(egg_tmp, 'EGG-INFO') + pkg_inf = os.path.join(_egg_info, 'PKG-INFO') + ensure_directory(pkg_inf) # make sure EGG-INFO dir exists + dist._provider = PathMetadata(egg_tmp, _egg_info) # XXX + self.exe_to_egg(dist_filename, egg_tmp) + + # Write EGG-INFO/PKG-INFO + if not os.path.exists(pkg_inf): + f = open(pkg_inf, 'w') + f.write('Metadata-Version: 1.0\n') + for k, v in cfg.items('metadata'): + if k != 'target_version': + f.write('%s: %s\n' % (k.replace('_', '-').title(), v)) + f.close() + script_dir = os.path.join(_egg_info, 'scripts') + # delete entry-point scripts to avoid duping + self.delete_blockers([ + os.path.join(script_dir, args[0]) + for args in ScriptWriter.get_args(dist) + ]) + # Build .egg file from tmpdir + bdist_egg.make_zipfile( + egg_path, egg_tmp, verbose=self.verbose, dry_run=self.dry_run, + ) + # install the .egg + return self.install_egg(egg_path, tmpdir) + + def exe_to_egg(self, dist_filename, egg_tmp): + """Extract a bdist_wininst to the directories an egg would use""" + # Check for .pth file and set up prefix translations + prefixes = get_exe_prefixes(dist_filename) + to_compile = [] + native_libs = [] + top_level = {} + + def process(src, dst): + s = src.lower() + for old, new in prefixes: + if s.startswith(old): + src = new + src[len(old):] + parts = src.split('/') + dst = os.path.join(egg_tmp, *parts) + dl = dst.lower() + if dl.endswith('.pyd') or dl.endswith('.dll'): + parts[-1] = bdist_egg.strip_module(parts[-1]) + top_level[os.path.splitext(parts[0])[0]] = 1 + native_libs.append(src) + elif dl.endswith('.py') and old != 'SCRIPTS/': + top_level[os.path.splitext(parts[0])[0]] = 1 + to_compile.append(dst) + return dst + if not src.endswith('.pth'): + log.warn("WARNING: can't process %s", src) + return None + + # extract, tracking .pyd/.dll->native_libs and .py -> to_compile + unpack_archive(dist_filename, egg_tmp, process) + stubs = [] + for res in native_libs: + if res.lower().endswith('.pyd'): # create stubs for .pyd's + parts = res.split('/') + resource = parts[-1] + parts[-1] = bdist_egg.strip_module(parts[-1]) + '.py' + pyfile = os.path.join(egg_tmp, *parts) + to_compile.append(pyfile) + stubs.append(pyfile) + bdist_egg.write_stub(resource, pyfile) + self.byte_compile(to_compile) # compile .py's + bdist_egg.write_safety_flag( + os.path.join(egg_tmp, 'EGG-INFO'), + bdist_egg.analyze_egg(egg_tmp, stubs)) # write zip-safety flag + + for name in 'top_level', 'native_libs': + if locals()[name]: + txt = os.path.join(egg_tmp, 'EGG-INFO', name + '.txt') + if not os.path.exists(txt): + f = open(txt, 'w') + f.write('\n'.join(locals()[name]) + '\n') + f.close() + + def install_wheel(self, wheel_path, tmpdir): + wheel = Wheel(wheel_path) + assert wheel.is_compatible() + destination = os.path.join(self.install_dir, wheel.egg_name()) + destination = os.path.abspath(destination) + if not self.dry_run: + ensure_directory(destination) + if os.path.isdir(destination) and not os.path.islink(destination): + dir_util.remove_tree(destination, dry_run=self.dry_run) + elif os.path.exists(destination): + self.execute( + os.unlink, + (destination,), + "Removing " + destination, + ) + try: + self.execute( + wheel.install_as_egg, + (destination,), + ("Installing %s to %s") % ( + os.path.basename(wheel_path), + os.path.dirname(destination) + ), + ) + finally: + update_dist_caches(destination, fix_zipimporter_caches=False) + self.add_output(destination) + return self.egg_distribution(destination) + + __mv_warning = textwrap.dedent(""" + Because this distribution was installed --multi-version, before you can + import modules from this package in an application, you will need to + 'import pkg_resources' and then use a 'require()' call similar to one of + these examples, in order to select the desired version: + + pkg_resources.require("%(name)s") # latest installed version + pkg_resources.require("%(name)s==%(version)s") # this exact version + pkg_resources.require("%(name)s>=%(version)s") # this version or higher + """).lstrip() + + __id_warning = textwrap.dedent(""" + Note also that the installation directory must be on sys.path at runtime for + this to work. (e.g. by being the application's script directory, by being on + PYTHONPATH, or by being added to sys.path by your code.) + """) + + def installation_report(self, req, dist, what="Installed"): + """Helpful installation message for display to package users""" + msg = "\n%(what)s %(eggloc)s%(extras)s" + if self.multi_version and not self.no_report: + msg += '\n' + self.__mv_warning + if self.install_dir not in map(normalize_path, sys.path): + msg += '\n' + self.__id_warning + + eggloc = dist.location + name = dist.project_name + version = dist.version + extras = '' # TODO: self.report_extras(req, dist) + return msg % locals() + + __editable_msg = textwrap.dedent(""" + Extracted editable version of %(spec)s to %(dirname)s + + If it uses setuptools in its setup script, you can activate it in + "development" mode by going to that directory and running:: + + %(python)s setup.py develop + + See the setuptools documentation for the "develop" command for more info. + """).lstrip() + + def report_editable(self, spec, setup_script): + dirname = os.path.dirname(setup_script) + python = sys.executable + return '\n' + self.__editable_msg % locals() + + def run_setup(self, setup_script, setup_base, args): + sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg) + sys.modules.setdefault('distutils.command.egg_info', egg_info) + + args = list(args) + if self.verbose > 2: + v = 'v' * (self.verbose - 1) + args.insert(0, '-' + v) + elif self.verbose < 2: + args.insert(0, '-q') + if self.dry_run: + args.insert(0, '-n') + log.info( + "Running %s %s", setup_script[len(setup_base) + 1:], ' '.join(args) + ) + try: + run_setup(setup_script, args) + except SystemExit as v: + raise DistutilsError("Setup script exited with %s" % (v.args[0],)) + + def build_and_install(self, setup_script, setup_base): + args = ['bdist_egg', '--dist-dir'] + + dist_dir = tempfile.mkdtemp( + prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script) + ) + try: + self._set_fetcher_options(os.path.dirname(setup_script)) + args.append(dist_dir) + + self.run_setup(setup_script, setup_base, args) + all_eggs = Environment([dist_dir]) + eggs = [] + for key in all_eggs: + for dist in all_eggs[key]: + eggs.append(self.install_egg(dist.location, setup_base)) + if not eggs and not self.dry_run: + log.warn("No eggs found in %s (setup script problem?)", + dist_dir) + return eggs + finally: + rmtree(dist_dir) + log.set_verbosity(self.verbose) # restore our log verbosity + + def _set_fetcher_options(self, base): + """ + When easy_install is about to run bdist_egg on a source dist, that + source dist might have 'setup_requires' directives, requiring + additional fetching. Ensure the fetcher options given to easy_install + are available to that command as well. + """ + # find the fetch options from easy_install and write them out + # to the setup.cfg file. + ei_opts = self.distribution.get_option_dict('easy_install').copy() + fetch_directives = ( + 'find_links', 'site_dirs', 'index_url', 'optimize', + 'site_dirs', 'allow_hosts', + ) + fetch_options = {} + for key, val in ei_opts.items(): + if key not in fetch_directives: + continue + fetch_options[key.replace('_', '-')] = val[1] + # create a settings dictionary suitable for `edit_config` + settings = dict(easy_install=fetch_options) + cfg_filename = os.path.join(base, 'setup.cfg') + setopt.edit_config(cfg_filename, settings) + + def update_pth(self, dist): + if self.pth_file is None: + return + + for d in self.pth_file[dist.key]: # drop old entries + if self.multi_version or d.location != dist.location: + log.info("Removing %s from easy-install.pth file", d) + self.pth_file.remove(d) + if d.location in self.shadow_path: + self.shadow_path.remove(d.location) + + if not self.multi_version: + if dist.location in self.pth_file.paths: + log.info( + "%s is already the active version in easy-install.pth", + dist, + ) + else: + log.info("Adding %s to easy-install.pth file", dist) + self.pth_file.add(dist) # add new entry + if dist.location not in self.shadow_path: + self.shadow_path.append(dist.location) + + if not self.dry_run: + + self.pth_file.save() + + if dist.key == 'setuptools': + # Ensure that setuptools itself never becomes unavailable! + # XXX should this check for latest version? + filename = os.path.join(self.install_dir, 'setuptools.pth') + if os.path.islink(filename): + os.unlink(filename) + f = open(filename, 'wt') + f.write(self.pth_file.make_relative(dist.location) + '\n') + f.close() + + def unpack_progress(self, src, dst): + # Progress filter for unpacking + log.debug("Unpacking %s to %s", src, dst) + return dst # only unpack-and-compile skips files for dry run + + def unpack_and_compile(self, egg_path, destination): + to_compile = [] + to_chmod = [] + + def pf(src, dst): + if dst.endswith('.py') and not src.startswith('EGG-INFO/'): + to_compile.append(dst) + elif dst.endswith('.dll') or dst.endswith('.so'): + to_chmod.append(dst) + self.unpack_progress(src, dst) + return not self.dry_run and dst or None + + unpack_archive(egg_path, destination, pf) + self.byte_compile(to_compile) + if not self.dry_run: + for f in to_chmod: + mode = ((os.stat(f)[stat.ST_MODE]) | 0o555) & 0o7755 + chmod(f, mode) + + def byte_compile(self, to_compile): + if sys.dont_write_bytecode: + return + + from distutils.util import byte_compile + + try: + # try to make the byte compile messages quieter + log.set_verbosity(self.verbose - 1) + + byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run) + if self.optimize: + byte_compile( + to_compile, optimize=self.optimize, force=1, + dry_run=self.dry_run, + ) + finally: + log.set_verbosity(self.verbose) # restore original verbosity + + __no_default_msg = textwrap.dedent(""" + bad install directory or PYTHONPATH + + You are attempting to install a package to a directory that is not + on PYTHONPATH and which Python does not read ".pth" files from. The + installation directory you specified (via --install-dir, --prefix, or + the distutils default setting) was: + + %s + + and your PYTHONPATH environment variable currently contains: + + %r + + Here are some of your options for correcting the problem: + + * You can choose a different installation directory, i.e., one that is + on PYTHONPATH or supports .pth files + + * You can add the installation directory to the PYTHONPATH environment + variable. (It must then also be on PYTHONPATH whenever you run + Python and want to use the package(s) you are installing.) + + * You can set up the installation directory to support ".pth" files by + using one of the approaches described here: + + https://setuptools.readthedocs.io/en/latest/easy_install.html#custom-installation-locations + + + Please make the appropriate changes for your system and try again.""").lstrip() + + def no_default_version_msg(self): + template = self.__no_default_msg + return template % (self.install_dir, os.environ.get('PYTHONPATH', '')) + + def install_site_py(self): + """Make sure there's a site.py in the target dir, if needed""" + + if self.sitepy_installed: + return # already did it, or don't need to + + sitepy = os.path.join(self.install_dir, "site.py") + source = resource_string("setuptools", "site-patch.py") + source = source.decode('utf-8') + current = "" + + if os.path.exists(sitepy): + log.debug("Checking existing site.py in %s", self.install_dir) + with io.open(sitepy) as strm: + current = strm.read() + + if not current.startswith('def __boot():'): + raise DistutilsError( + "%s is not a setuptools-generated site.py; please" + " remove it." % sitepy + ) + + if current != source: + log.info("Creating %s", sitepy) + if not self.dry_run: + ensure_directory(sitepy) + with io.open(sitepy, 'w', encoding='utf-8') as strm: + strm.write(source) + self.byte_compile([sitepy]) + + self.sitepy_installed = True + + def create_home_path(self): + """Create directories under ~.""" + if not self.user: + return + home = convert_path(os.path.expanduser("~")) + for name, path in six.iteritems(self.config_vars): + if path.startswith(home) and not os.path.isdir(path): + self.debug_print("os.makedirs('%s', 0o700)" % path) + os.makedirs(path, 0o700) + + INSTALL_SCHEMES = dict( + posix=dict( + install_dir='$base/lib/python$py_version_short/site-packages', + script_dir='$base/bin', + ), + ) + + DEFAULT_SCHEME = dict( + install_dir='$base/Lib/site-packages', + script_dir='$base/Scripts', + ) + + def _expand(self, *attrs): + config_vars = self.get_finalized_command('install').config_vars + + if self.prefix: + # Set default install_dir/scripts from --prefix + config_vars = config_vars.copy() + config_vars['base'] = self.prefix + scheme = self.INSTALL_SCHEMES.get(os.name, self.DEFAULT_SCHEME) + for attr, val in scheme.items(): + if getattr(self, attr, None) is None: + setattr(self, attr, val) + + from distutils.util import subst_vars + + for attr in attrs: + val = getattr(self, attr) + if val is not None: + val = subst_vars(val, config_vars) + if os.name == 'posix': + val = os.path.expanduser(val) + setattr(self, attr, val) + + +def _pythonpath(): + items = os.environ.get('PYTHONPATH', '').split(os.pathsep) + return filter(None, items) + + +def get_site_dirs(): + """ + Return a list of 'site' dirs + """ + + sitedirs = [] + + # start with PYTHONPATH + sitedirs.extend(_pythonpath()) + + prefixes = [sys.prefix] + if sys.exec_prefix != sys.prefix: + prefixes.append(sys.exec_prefix) + for prefix in prefixes: + if prefix: + if sys.platform in ('os2emx', 'riscos'): + sitedirs.append(os.path.join(prefix, "Lib", "site-packages")) + elif os.sep == '/': + sitedirs.extend([ + os.path.join( + prefix, + "lib", + "python" + sys.version[:3], + "site-packages", + ), + os.path.join(prefix, "lib", "site-python"), + ]) + else: + sitedirs.extend([ + prefix, + os.path.join(prefix, "lib", "site-packages"), + ]) + if sys.platform == 'darwin': + # for framework builds *only* we add the standard Apple + # locations. Currently only per-user, but /Library and + # /Network/Library could be added too + if 'Python.framework' in prefix: + home = os.environ.get('HOME') + if home: + home_sp = os.path.join( + home, + 'Library', + 'Python', + sys.version[:3], + 'site-packages', + ) + sitedirs.append(home_sp) + lib_paths = get_path('purelib'), get_path('platlib') + for site_lib in lib_paths: + if site_lib not in sitedirs: + sitedirs.append(site_lib) + + if site.ENABLE_USER_SITE: + sitedirs.append(site.USER_SITE) + + try: + sitedirs.extend(site.getsitepackages()) + except AttributeError: + pass + + sitedirs = list(map(normalize_path, sitedirs)) + + return sitedirs + + +def expand_paths(inputs): + """Yield sys.path directories that might contain "old-style" packages""" + + seen = {} + + for dirname in inputs: + dirname = normalize_path(dirname) + if dirname in seen: + continue + + seen[dirname] = 1 + if not os.path.isdir(dirname): + continue + + files = os.listdir(dirname) + yield dirname, files + + for name in files: + if not name.endswith('.pth'): + # We only care about the .pth files + continue + if name in ('easy-install.pth', 'setuptools.pth'): + # Ignore .pth files that we control + continue + + # Read the .pth file + f = open(os.path.join(dirname, name)) + lines = list(yield_lines(f)) + f.close() + + # Yield existing non-dupe, non-import directory lines from it + for line in lines: + if not line.startswith("import"): + line = normalize_path(line.rstrip()) + if line not in seen: + seen[line] = 1 + if not os.path.isdir(line): + continue + yield line, os.listdir(line) + + +def extract_wininst_cfg(dist_filename): + """Extract configuration data from a bdist_wininst .exe + + Returns a configparser.RawConfigParser, or None + """ + f = open(dist_filename, 'rb') + try: + endrec = zipfile._EndRecData(f) + if endrec is None: + return None + + prepended = (endrec[9] - endrec[5]) - endrec[6] + if prepended < 12: # no wininst data here + return None + f.seek(prepended - 12) + + tag, cfglen, bmlen = struct.unpack("<iii", f.read(12)) + if tag not in (0x1234567A, 0x1234567B): + return None # not a valid tag + + f.seek(prepended - (12 + cfglen)) + init = {'version': '', 'target_version': ''} + cfg = configparser.RawConfigParser(init) + try: + part = f.read(cfglen) + # Read up to the first null byte. + config = part.split(b'\0', 1)[0] + # Now the config is in bytes, but for RawConfigParser, it should + # be text, so decode it. + config = config.decode(sys.getfilesystemencoding()) + cfg.readfp(six.StringIO(config)) + except configparser.Error: + return None + if not cfg.has_section('metadata') or not cfg.has_section('Setup'): + return None + return cfg + + finally: + f.close() + + +def get_exe_prefixes(exe_filename): + """Get exe->egg path translations for a given .exe file""" + + prefixes = [ + ('PURELIB/', ''), + ('PLATLIB/pywin32_system32', ''), + ('PLATLIB/', ''), + ('SCRIPTS/', 'EGG-INFO/scripts/'), + ('DATA/lib/site-packages', ''), + ] + z = zipfile.ZipFile(exe_filename) + try: + for info in z.infolist(): + name = info.filename + parts = name.split('/') + if len(parts) == 3 and parts[2] == 'PKG-INFO': + if parts[1].endswith('.egg-info'): + prefixes.insert(0, ('/'.join(parts[:2]), 'EGG-INFO/')) + break + if len(parts) != 2 or not name.endswith('.pth'): + continue + if name.endswith('-nspkg.pth'): + continue + if parts[0].upper() in ('PURELIB', 'PLATLIB'): + contents = z.read(name) + if six.PY3: + contents = contents.decode() + for pth in yield_lines(contents): + pth = pth.strip().replace('\\', '/') + if not pth.startswith('import'): + prefixes.append((('%s/%s/' % (parts[0], pth)), '')) + finally: + z.close() + prefixes = [(x.lower(), y) for x, y in prefixes] + prefixes.sort() + prefixes.reverse() + return prefixes + + +class PthDistributions(Environment): + """A .pth file with Distribution paths in it""" + + dirty = False + + def __init__(self, filename, sitedirs=()): + self.filename = filename + self.sitedirs = list(map(normalize_path, sitedirs)) + self.basedir = normalize_path(os.path.dirname(self.filename)) + self._load() + Environment.__init__(self, [], None, None) + for path in yield_lines(self.paths): + list(map(self.add, find_distributions(path, True))) + + def _load(self): + self.paths = [] + saw_import = False + seen = dict.fromkeys(self.sitedirs) + if os.path.isfile(self.filename): + f = open(self.filename, 'rt') + for line in f: + if line.startswith('import'): + saw_import = True + continue + path = line.rstrip() + self.paths.append(path) + if not path.strip() or path.strip().startswith('#'): + continue + # skip non-existent paths, in case somebody deleted a package + # manually, and duplicate paths as well + path = self.paths[-1] = normalize_path( + os.path.join(self.basedir, path) + ) + if not os.path.exists(path) or path in seen: + self.paths.pop() # skip it + self.dirty = True # we cleaned up, so we're dirty now :) + continue + seen[path] = 1 + f.close() + + if self.paths and not saw_import: + self.dirty = True # ensure anything we touch has import wrappers + while self.paths and not self.paths[-1].strip(): + self.paths.pop() + + def save(self): + """Write changed .pth file back to disk""" + if not self.dirty: + return + + rel_paths = list(map(self.make_relative, self.paths)) + if rel_paths: + log.debug("Saving %s", self.filename) + lines = self._wrap_lines(rel_paths) + data = '\n'.join(lines) + '\n' + + if os.path.islink(self.filename): + os.unlink(self.filename) + with open(self.filename, 'wt') as f: + f.write(data) + + elif os.path.exists(self.filename): + log.debug("Deleting empty %s", self.filename) + os.unlink(self.filename) + + self.dirty = False + + @staticmethod + def _wrap_lines(lines): + return lines + + def add(self, dist): + """Add `dist` to the distribution map""" + new_path = ( + dist.location not in self.paths and ( + dist.location not in self.sitedirs or + # account for '.' being in PYTHONPATH + dist.location == os.getcwd() + ) + ) + if new_path: + self.paths.append(dist.location) + self.dirty = True + Environment.add(self, dist) + + def remove(self, dist): + """Remove `dist` from the distribution map""" + while dist.location in self.paths: + self.paths.remove(dist.location) + self.dirty = True + Environment.remove(self, dist) + + def make_relative(self, path): + npath, last = os.path.split(normalize_path(path)) + baselen = len(self.basedir) + parts = [last] + sep = os.altsep == '/' and '/' or os.sep + while len(npath) >= baselen: + if npath == self.basedir: + parts.append(os.curdir) + parts.reverse() + return sep.join(parts) + npath, last = os.path.split(npath) + parts.append(last) + else: + return path + + +class RewritePthDistributions(PthDistributions): + @classmethod + def _wrap_lines(cls, lines): + yield cls.prelude + for line in lines: + yield line + yield cls.postlude + + prelude = _one_liner(""" + import sys + sys.__plen = len(sys.path) + """) + postlude = _one_liner(""" + import sys + new = sys.path[sys.__plen:] + del sys.path[sys.__plen:] + p = getattr(sys, '__egginsert', 0) + sys.path[p:p] = new + sys.__egginsert = p + len(new) + """) + + +if os.environ.get('SETUPTOOLS_SYS_PATH_TECHNIQUE', 'raw') == 'rewrite': + PthDistributions = RewritePthDistributions + + +def _first_line_re(): + """ + Return a regular expression based on first_line_re suitable for matching + strings. + """ + if isinstance(first_line_re.pattern, str): + return first_line_re + + # first_line_re in Python >=3.1.4 and >=3.2.1 is a bytes pattern. + return re.compile(first_line_re.pattern.decode()) + + +def auto_chmod(func, arg, exc): + if func in [os.unlink, os.remove] and os.name == 'nt': + chmod(arg, stat.S_IWRITE) + return func(arg) + et, ev, _ = sys.exc_info() + six.reraise(et, (ev[0], ev[1] + (" %s %s" % (func, arg)))) + + +def update_dist_caches(dist_path, fix_zipimporter_caches): + """ + Fix any globally cached `dist_path` related data + + `dist_path` should be a path of a newly installed egg distribution (zipped + or unzipped). + + sys.path_importer_cache contains finder objects that have been cached when + importing data from the original distribution. Any such finders need to be + cleared since the replacement distribution might be packaged differently, + e.g. a zipped egg distribution might get replaced with an unzipped egg + folder or vice versa. Having the old finders cached may then cause Python + to attempt loading modules from the replacement distribution using an + incorrect loader. + + zipimport.zipimporter objects are Python loaders charged with importing + data packaged inside zip archives. If stale loaders referencing the + original distribution, are left behind, they can fail to load modules from + the replacement distribution. E.g. if an old zipimport.zipimporter instance + is used to load data from a new zipped egg archive, it may cause the + operation to attempt to locate the requested data in the wrong location - + one indicated by the original distribution's zip archive directory + information. Such an operation may then fail outright, e.g. report having + read a 'bad local file header', or even worse, it may fail silently & + return invalid data. + + zipimport._zip_directory_cache contains cached zip archive directory + information for all existing zipimport.zipimporter instances and all such + instances connected to the same archive share the same cached directory + information. + + If asked, and the underlying Python implementation allows it, we can fix + all existing zipimport.zipimporter instances instead of having to track + them down and remove them one by one, by updating their shared cached zip + archive directory information. This, of course, assumes that the + replacement distribution is packaged as a zipped egg. + + If not asked to fix existing zipimport.zipimporter instances, we still do + our best to clear any remaining zipimport.zipimporter related cached data + that might somehow later get used when attempting to load data from the new + distribution and thus cause such load operations to fail. Note that when + tracking down such remaining stale data, we can not catch every conceivable + usage from here, and we clear only those that we know of and have found to + cause problems if left alive. Any remaining caches should be updated by + whomever is in charge of maintaining them, i.e. they should be ready to + handle us replacing their zip archives with new distributions at runtime. + + """ + # There are several other known sources of stale zipimport.zipimporter + # instances that we do not clear here, but might if ever given a reason to + # do so: + # * Global setuptools pkg_resources.working_set (a.k.a. 'master working + # set') may contain distributions which may in turn contain their + # zipimport.zipimporter loaders. + # * Several zipimport.zipimporter loaders held by local variables further + # up the function call stack when running the setuptools installation. + # * Already loaded modules may have their __loader__ attribute set to the + # exact loader instance used when importing them. Python 3.4 docs state + # that this information is intended mostly for introspection and so is + # not expected to cause us problems. + normalized_path = normalize_path(dist_path) + _uncache(normalized_path, sys.path_importer_cache) + if fix_zipimporter_caches: + _replace_zip_directory_cache_data(normalized_path) + else: + # Here, even though we do not want to fix existing and now stale + # zipimporter cache information, we still want to remove it. Related to + # Python's zip archive directory information cache, we clear each of + # its stale entries in two phases: + # 1. Clear the entry so attempting to access zip archive information + # via any existing stale zipimport.zipimporter instances fails. + # 2. Remove the entry from the cache so any newly constructed + # zipimport.zipimporter instances do not end up using old stale + # zip archive directory information. + # This whole stale data removal step does not seem strictly necessary, + # but has been left in because it was done before we started replacing + # the zip archive directory information cache content if possible, and + # there are no relevant unit tests that we can depend on to tell us if + # this is really needed. + _remove_and_clear_zip_directory_cache_data(normalized_path) + + +def _collect_zipimporter_cache_entries(normalized_path, cache): + """ + Return zipimporter cache entry keys related to a given normalized path. + + Alternative path spellings (e.g. those using different character case or + those using alternative path separators) related to the same path are + included. Any sub-path entries are included as well, i.e. those + corresponding to zip archives embedded in other zip archives. + + """ + result = [] + prefix_len = len(normalized_path) + for p in cache: + np = normalize_path(p) + if (np.startswith(normalized_path) and + np[prefix_len:prefix_len + 1] in (os.sep, '')): + result.append(p) + return result + + +def _update_zipimporter_cache(normalized_path, cache, updater=None): + """ + Update zipimporter cache data for a given normalized path. + + Any sub-path entries are processed as well, i.e. those corresponding to zip + archives embedded in other zip archives. + + Given updater is a callable taking a cache entry key and the original entry + (after already removing the entry from the cache), and expected to update + the entry and possibly return a new one to be inserted in its place. + Returning None indicates that the entry should not be replaced with a new + one. If no updater is given, the cache entries are simply removed without + any additional processing, the same as if the updater simply returned None. + + """ + for p in _collect_zipimporter_cache_entries(normalized_path, cache): + # N.B. pypy's custom zipimport._zip_directory_cache implementation does + # not support the complete dict interface: + # * Does not support item assignment, thus not allowing this function + # to be used only for removing existing cache entries. + # * Does not support the dict.pop() method, forcing us to use the + # get/del patterns instead. For more detailed information see the + # following links: + # https://github.com/pypa/setuptools/issues/202#issuecomment-202913420 + # http://bit.ly/2h9itJX + old_entry = cache[p] + del cache[p] + new_entry = updater and updater(p, old_entry) + if new_entry is not None: + cache[p] = new_entry + + +def _uncache(normalized_path, cache): + _update_zipimporter_cache(normalized_path, cache) + + +def _remove_and_clear_zip_directory_cache_data(normalized_path): + def clear_and_remove_cached_zip_archive_directory_data(path, old_entry): + old_entry.clear() + + _update_zipimporter_cache( + normalized_path, zipimport._zip_directory_cache, + updater=clear_and_remove_cached_zip_archive_directory_data) + + +# PyPy Python implementation does not allow directly writing to the +# zipimport._zip_directory_cache and so prevents us from attempting to correct +# its content. The best we can do there is clear the problematic cache content +# and have PyPy repopulate it as needed. The downside is that if there are any +# stale zipimport.zipimporter instances laying around, attempting to use them +# will fail due to not having its zip archive directory information available +# instead of being automatically corrected to use the new correct zip archive +# directory information. +if '__pypy__' in sys.builtin_module_names: + _replace_zip_directory_cache_data = \ + _remove_and_clear_zip_directory_cache_data +else: + + def _replace_zip_directory_cache_data(normalized_path): + def replace_cached_zip_archive_directory_data(path, old_entry): + # N.B. In theory, we could load the zip directory information just + # once for all updated path spellings, and then copy it locally and + # update its contained path strings to contain the correct + # spelling, but that seems like a way too invasive move (this cache + # structure is not officially documented anywhere and could in + # theory change with new Python releases) for no significant + # benefit. + old_entry.clear() + zipimport.zipimporter(path) + old_entry.update(zipimport._zip_directory_cache[path]) + return old_entry + + _update_zipimporter_cache( + normalized_path, zipimport._zip_directory_cache, + updater=replace_cached_zip_archive_directory_data) + + +def is_python(text, filename='<string>'): + "Is this string a valid Python script?" + try: + compile(text, filename, 'exec') + except (SyntaxError, TypeError): + return False + else: + return True + + +def is_sh(executable): + """Determine if the specified executable is a .sh (contains a #! line)""" + try: + with io.open(executable, encoding='latin-1') as fp: + magic = fp.read(2) + except (OSError, IOError): + return executable + return magic == '#!' + + +def nt_quote_arg(arg): + """Quote a command line argument according to Windows parsing rules""" + return subprocess.list2cmdline([arg]) + + +def is_python_script(script_text, filename): + """Is this text, as a whole, a Python script? (as opposed to shell/bat/etc. + """ + if filename.endswith('.py') or filename.endswith('.pyw'): + return True # extension says it's Python + if is_python(script_text, filename): + return True # it's syntactically valid Python + if script_text.startswith('#!'): + # It begins with a '#!' line, so check if 'python' is in it somewhere + return 'python' in script_text.splitlines()[0].lower() + + return False # Not any Python I can recognize + + +try: + from os import chmod as _chmod +except ImportError: + # Jython compatibility + def _chmod(*args): + pass + + +def chmod(path, mode): + log.debug("changing mode of %s to %o", path, mode) + try: + _chmod(path, mode) + except os.error as e: + log.debug("chmod failed: %s", e) + + +class CommandSpec(list): + """ + A command spec for a #! header, specified as a list of arguments akin to + those passed to Popen. + """ + + options = [] + split_args = dict() + + @classmethod + def best(cls): + """ + Choose the best CommandSpec class based on environmental conditions. + """ + return cls + + @classmethod + def _sys_executable(cls): + _default = os.path.normpath(sys.executable) + return os.environ.get('__PYVENV_LAUNCHER__', _default) + + @classmethod + def from_param(cls, param): + """ + Construct a CommandSpec from a parameter to build_scripts, which may + be None. + """ + if isinstance(param, cls): + return param + if isinstance(param, list): + return cls(param) + if param is None: + return cls.from_environment() + # otherwise, assume it's a string. + return cls.from_string(param) + + @classmethod + def from_environment(cls): + return cls([cls._sys_executable()]) + + @classmethod + def from_string(cls, string): + """ + Construct a command spec from a simple string representing a command + line parseable by shlex.split. + """ + items = shlex.split(string, **cls.split_args) + return cls(items) + + def install_options(self, script_text): + self.options = shlex.split(self._extract_options(script_text)) + cmdline = subprocess.list2cmdline(self) + if not isascii(cmdline): + self.options[:0] = ['-x'] + + @staticmethod + def _extract_options(orig_script): + """ + Extract any options from the first line of the script. + """ + first = (orig_script + '\n').splitlines()[0] + match = _first_line_re().match(first) + options = match.group(1) or '' if match else '' + return options.strip() + + def as_header(self): + return self._render(self + list(self.options)) + + @staticmethod + def _strip_quotes(item): + _QUOTES = '"\'' + for q in _QUOTES: + if item.startswith(q) and item.endswith(q): + return item[1:-1] + return item + + @staticmethod + def _render(items): + cmdline = subprocess.list2cmdline( + CommandSpec._strip_quotes(item.strip()) for item in items) + return '#!' + cmdline + '\n' + + +# For pbr compat; will be removed in a future version. +sys_executable = CommandSpec._sys_executable() + + +class WindowsCommandSpec(CommandSpec): + split_args = dict(posix=False) + + +class ScriptWriter: + """ + Encapsulates behavior around writing entry point scripts for console and + gui apps. + """ + + template = textwrap.dedent(r""" + # EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r + __requires__ = %(spec)r + import re + import sys + from pkg_resources import load_entry_point + + if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit( + load_entry_point(%(spec)r, %(group)r, %(name)r)() + ) + """).lstrip() + + command_spec_class = CommandSpec + + @classmethod + def get_script_args(cls, dist, executable=None, wininst=False): + # for backward compatibility + warnings.warn("Use get_args", EasyInstallDeprecationWarning) + writer = (WindowsScriptWriter if wininst else ScriptWriter).best() + header = cls.get_script_header("", executable, wininst) + return writer.get_args(dist, header) + + @classmethod + def get_script_header(cls, script_text, executable=None, wininst=False): + # for backward compatibility + warnings.warn("Use get_header", EasyInstallDeprecationWarning, stacklevel=2) + if wininst: + executable = "python.exe" + return cls.get_header(script_text, executable) + + @classmethod + def get_args(cls, dist, header=None): + """ + Yield write_script() argument tuples for a distribution's + console_scripts and gui_scripts entry points. + """ + if header is None: + header = cls.get_header() + spec = str(dist.as_requirement()) + for type_ in 'console', 'gui': + group = type_ + '_scripts' + for name, ep in dist.get_entry_map(group).items(): + cls._ensure_safe_name(name) + script_text = cls.template % locals() + args = cls._get_script_args(type_, name, header, script_text) + for res in args: + yield res + + @staticmethod + def _ensure_safe_name(name): + """ + Prevent paths in *_scripts entry point names. + """ + has_path_sep = re.search(r'[\\/]', name) + if has_path_sep: + raise ValueError("Path separators not allowed in script names") + + @classmethod + def get_writer(cls, force_windows): + # for backward compatibility + warnings.warn("Use best", EasyInstallDeprecationWarning) + return WindowsScriptWriter.best() if force_windows else cls.best() + + @classmethod + def best(cls): + """ + Select the best ScriptWriter for this environment. + """ + if sys.platform == 'win32' or (os.name == 'java' and os._name == 'nt'): + return WindowsScriptWriter.best() + else: + return cls + + @classmethod + def _get_script_args(cls, type_, name, header, script_text): + # Simply write the stub with no extension. + yield (name, header + script_text) + + @classmethod + def get_header(cls, script_text="", executable=None): + """Create a #! line, getting options (if any) from script_text""" + cmd = cls.command_spec_class.best().from_param(executable) + cmd.install_options(script_text) + return cmd.as_header() + + +class WindowsScriptWriter(ScriptWriter): + command_spec_class = WindowsCommandSpec + + @classmethod + def get_writer(cls): + # for backward compatibility + warnings.warn("Use best", EasyInstallDeprecationWarning) + return cls.best() + + @classmethod + def best(cls): + """ + Select the best ScriptWriter suitable for Windows + """ + writer_lookup = dict( + executable=WindowsExecutableLauncherWriter, + natural=cls, + ) + # for compatibility, use the executable launcher by default + launcher = os.environ.get('SETUPTOOLS_LAUNCHER', 'executable') + return writer_lookup[launcher] + + @classmethod + def _get_script_args(cls, type_, name, header, script_text): + "For Windows, add a .py extension" + ext = dict(console='.pya', gui='.pyw')[type_] + if ext not in os.environ['PATHEXT'].lower().split(';'): + msg = ( + "{ext} not listed in PATHEXT; scripts will not be " + "recognized as executables." + ).format(**locals()) + warnings.warn(msg, UserWarning) + old = ['.pya', '.py', '-script.py', '.pyc', '.pyo', '.pyw', '.exe'] + old.remove(ext) + header = cls._adjust_header(type_, header) + blockers = [name + x for x in old] + yield name + ext, header + script_text, 't', blockers + + @classmethod + def _adjust_header(cls, type_, orig_header): + """ + Make sure 'pythonw' is used for gui and and 'python' is used for + console (regardless of what sys.executable is). + """ + pattern = 'pythonw.exe' + repl = 'python.exe' + if type_ == 'gui': + pattern, repl = repl, pattern + pattern_ob = re.compile(re.escape(pattern), re.IGNORECASE) + new_header = pattern_ob.sub(string=orig_header, repl=repl) + return new_header if cls._use_header(new_header) else orig_header + + @staticmethod + def _use_header(new_header): + """ + Should _adjust_header use the replaced header? + + On non-windows systems, always use. On + Windows systems, only use the replaced header if it resolves + to an executable on the system. + """ + clean_header = new_header[2:-1].strip('"') + return sys.platform != 'win32' or find_executable(clean_header) + + +class WindowsExecutableLauncherWriter(WindowsScriptWriter): + @classmethod + def _get_script_args(cls, type_, name, header, script_text): + """ + For Windows, add a .py extension and an .exe launcher + """ + if type_ == 'gui': + launcher_type = 'gui' + ext = '-script.pyw' + old = ['.pyw'] + else: + launcher_type = 'cli' + ext = '-script.py' + old = ['.py', '.pyc', '.pyo'] + hdr = cls._adjust_header(type_, header) + blockers = [name + x for x in old] + yield (name + ext, hdr + script_text, 't', blockers) + yield ( + name + '.exe', get_win_launcher(launcher_type), + 'b' # write in binary mode + ) + if not is_64bit(): + # install a manifest for the launcher to prevent Windows + # from detecting it as an installer (which it will for + # launchers like easy_install.exe). Consider only + # adding a manifest for launchers detected as installers. + # See Distribute #143 for details. + m_name = name + '.exe.manifest' + yield (m_name, load_launcher_manifest(name), 't') + + +# for backward-compatibility +get_script_args = ScriptWriter.get_script_args +get_script_header = ScriptWriter.get_script_header + + +def get_win_launcher(type): + """ + Load the Windows launcher (executable) suitable for launching a script. + + `type` should be either 'cli' or 'gui' + + Returns the executable as a byte string. + """ + launcher_fn = '%s.exe' % type + if is_64bit(): + launcher_fn = launcher_fn.replace(".", "-64.") + else: + launcher_fn = launcher_fn.replace(".", "-32.") + return resource_string('setuptools', launcher_fn) + + +def load_launcher_manifest(name): + manifest = pkg_resources.resource_string(__name__, 'launcher manifest.xml') + if six.PY2: + return manifest % vars() + else: + return manifest.decode('utf-8') % vars() + + +def rmtree(path, ignore_errors=False, onerror=auto_chmod): + return shutil.rmtree(path, ignore_errors, onerror) + + +def current_umask(): + tmp = os.umask(0o022) + os.umask(tmp) + return tmp + + +def bootstrap(): + # This function is called when setuptools*.egg is run using /bin/sh + import setuptools + + argv0 = os.path.dirname(setuptools.__path__[0]) + sys.argv[0] = argv0 + sys.argv.append(argv0) + main() + + +def main(argv=None, **kw): + from setuptools import setup + from setuptools.dist import Distribution + + class DistributionWithoutHelpCommands(Distribution): + common_usage = "" + + def _show_help(self, *args, **kw): + with _patch_usage(): + Distribution._show_help(self, *args, **kw) + + if argv is None: + argv = sys.argv[1:] + + with _patch_usage(): + setup( + script_args=['-q', 'easy_install', '-v'] + argv, + script_name=sys.argv[0] or 'easy_install', + distclass=DistributionWithoutHelpCommands, + **kw + ) + + +@contextlib.contextmanager +def _patch_usage(): + import distutils.core + USAGE = textwrap.dedent(""" + usage: %(script)s [options] requirement_or_url ... + or: %(script)s --help + """).lstrip() + + def gen_usage(script_name): + return USAGE % dict( + script=os.path.basename(script_name), + ) + + saved = distutils.core.gen_usage + distutils.core.gen_usage = gen_usage + try: + yield + finally: + distutils.core.gen_usage = saved + +class EasyInstallDeprecationWarning(SetuptoolsDeprecationWarning): + """Class for warning about deprecations in EasyInstall in SetupTools. Not ignored by default, unlike DeprecationWarning.""" + diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/easy_install.pyc b/project/venv/lib/python2.7/site-packages/setuptools/command/easy_install.pyc new file mode 100644 index 0000000..7d635f4 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/command/easy_install.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/egg_info.py b/project/venv/lib/python2.7/site-packages/setuptools/command/egg_info.py new file mode 100644 index 0000000..5d8f451 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/command/egg_info.py @@ -0,0 +1,717 @@ +"""setuptools.command.egg_info + +Create a distribution's .egg-info directory and contents""" + +from distutils.filelist import FileList as _FileList +from distutils.errors import DistutilsInternalError +from distutils.util import convert_path +from distutils import log +import distutils.errors +import distutils.filelist +import os +import re +import sys +import io +import warnings +import time +import collections + +from setuptools.extern import six +from setuptools.extern.six.moves import map + +from setuptools import Command +from setuptools.command.sdist import sdist +from setuptools.command.sdist import walk_revctrl +from setuptools.command.setopt import edit_config +from setuptools.command import bdist_egg +from pkg_resources import ( + parse_requirements, safe_name, parse_version, + safe_version, yield_lines, EntryPoint, iter_entry_points, to_filename) +import setuptools.unicode_utils as unicode_utils +from setuptools.glob import glob + +from setuptools.extern import packaging +from setuptools import SetuptoolsDeprecationWarning + +def translate_pattern(glob): + """ + Translate a file path glob like '*.txt' in to a regular expression. + This differs from fnmatch.translate which allows wildcards to match + directory separators. It also knows about '**/' which matches any number of + directories. + """ + pat = '' + + # This will split on '/' within [character classes]. This is deliberate. + chunks = glob.split(os.path.sep) + + sep = re.escape(os.sep) + valid_char = '[^%s]' % (sep,) + + for c, chunk in enumerate(chunks): + last_chunk = c == len(chunks) - 1 + + # Chunks that are a literal ** are globstars. They match anything. + if chunk == '**': + if last_chunk: + # Match anything if this is the last component + pat += '.*' + else: + # Match '(name/)*' + pat += '(?:%s+%s)*' % (valid_char, sep) + continue # Break here as the whole path component has been handled + + # Find any special characters in the remainder + i = 0 + chunk_len = len(chunk) + while i < chunk_len: + char = chunk[i] + if char == '*': + # Match any number of name characters + pat += valid_char + '*' + elif char == '?': + # Match a name character + pat += valid_char + elif char == '[': + # Character class + inner_i = i + 1 + # Skip initial !/] chars + if inner_i < chunk_len and chunk[inner_i] == '!': + inner_i = inner_i + 1 + if inner_i < chunk_len and chunk[inner_i] == ']': + inner_i = inner_i + 1 + + # Loop till the closing ] is found + while inner_i < chunk_len and chunk[inner_i] != ']': + inner_i = inner_i + 1 + + if inner_i >= chunk_len: + # Got to the end of the string without finding a closing ] + # Do not treat this as a matching group, but as a literal [ + pat += re.escape(char) + else: + # Grab the insides of the [brackets] + inner = chunk[i + 1:inner_i] + char_class = '' + + # Class negation + if inner[0] == '!': + char_class = '^' + inner = inner[1:] + + char_class += re.escape(inner) + pat += '[%s]' % (char_class,) + + # Skip to the end ] + i = inner_i + else: + pat += re.escape(char) + i += 1 + + # Join each chunk with the dir separator + if not last_chunk: + pat += sep + + pat += r'\Z' + return re.compile(pat, flags=re.MULTILINE|re.DOTALL) + + +class InfoCommon: + tag_build = None + tag_date = None + + @property + def name(self): + return safe_name(self.distribution.get_name()) + + def tagged_version(self): + version = self.distribution.get_version() + # egg_info may be called more than once for a distribution, + # in which case the version string already contains all tags. + if self.vtags and version.endswith(self.vtags): + return safe_version(version) + return safe_version(version + self.vtags) + + def tags(self): + version = '' + if self.tag_build: + version += self.tag_build + if self.tag_date: + version += time.strftime("-%Y%m%d") + return version + vtags = property(tags) + + +class egg_info(InfoCommon, Command): + description = "create a distribution's .egg-info directory" + + user_options = [ + ('egg-base=', 'e', "directory containing .egg-info directories" + " (default: top of the source tree)"), + ('tag-date', 'd', "Add date stamp (e.g. 20050528) to version number"), + ('tag-build=', 'b', "Specify explicit tag to add to version number"), + ('no-date', 'D', "Don't include date stamp [default]"), + ] + + boolean_options = ['tag-date'] + negative_opt = { + 'no-date': 'tag-date', + } + + def initialize_options(self): + self.egg_base = None + self.egg_name = None + self.egg_info = None + self.egg_version = None + self.broken_egg_info = False + + #################################### + # allow the 'tag_svn_revision' to be detected and + # set, supporting sdists built on older Setuptools. + @property + def tag_svn_revision(self): + pass + + @tag_svn_revision.setter + def tag_svn_revision(self, value): + pass + #################################### + + def save_version_info(self, filename): + """ + Materialize the value of date into the + build tag. Install build keys in a deterministic order + to avoid arbitrary reordering on subsequent builds. + """ + egg_info = collections.OrderedDict() + # follow the order these keys would have been added + # when PYTHONHASHSEED=0 + egg_info['tag_build'] = self.tags() + egg_info['tag_date'] = 0 + edit_config(filename, dict(egg_info=egg_info)) + + def finalize_options(self): + # Note: we need to capture the current value returned + # by `self.tagged_version()`, so we can later update + # `self.distribution.metadata.version` without + # repercussions. + self.egg_name = self.name + self.egg_version = self.tagged_version() + parsed_version = parse_version(self.egg_version) + + try: + is_version = isinstance(parsed_version, packaging.version.Version) + spec = ( + "%s==%s" if is_version else "%s===%s" + ) + list( + parse_requirements(spec % (self.egg_name, self.egg_version)) + ) + except ValueError: + raise distutils.errors.DistutilsOptionError( + "Invalid distribution name or version syntax: %s-%s" % + (self.egg_name, self.egg_version) + ) + + if self.egg_base is None: + dirs = self.distribution.package_dir + self.egg_base = (dirs or {}).get('', os.curdir) + + self.ensure_dirname('egg_base') + self.egg_info = to_filename(self.egg_name) + '.egg-info' + if self.egg_base != os.curdir: + self.egg_info = os.path.join(self.egg_base, self.egg_info) + if '-' in self.egg_name: + self.check_broken_egg_info() + + # Set package version for the benefit of dumber commands + # (e.g. sdist, bdist_wininst, etc.) + # + self.distribution.metadata.version = self.egg_version + + # If we bootstrapped around the lack of a PKG-INFO, as might be the + # case in a fresh checkout, make sure that any special tags get added + # to the version info + # + pd = self.distribution._patched_dist + if pd is not None and pd.key == self.egg_name.lower(): + pd._version = self.egg_version + pd._parsed_version = parse_version(self.egg_version) + self.distribution._patched_dist = None + + def write_or_delete_file(self, what, filename, data, force=False): + """Write `data` to `filename` or delete if empty + + If `data` is non-empty, this routine is the same as ``write_file()``. + If `data` is empty but not ``None``, this is the same as calling + ``delete_file(filename)`. If `data` is ``None``, then this is a no-op + unless `filename` exists, in which case a warning is issued about the + orphaned file (if `force` is false), or deleted (if `force` is true). + """ + if data: + self.write_file(what, filename, data) + elif os.path.exists(filename): + if data is None and not force: + log.warn( + "%s not set in setup(), but %s exists", what, filename + ) + return + else: + self.delete_file(filename) + + def write_file(self, what, filename, data): + """Write `data` to `filename` (if not a dry run) after announcing it + + `what` is used in a log message to identify what is being written + to the file. + """ + log.info("writing %s to %s", what, filename) + if six.PY3: + data = data.encode("utf-8") + if not self.dry_run: + f = open(filename, 'wb') + f.write(data) + f.close() + + def delete_file(self, filename): + """Delete `filename` (if not a dry run) after announcing it""" + log.info("deleting %s", filename) + if not self.dry_run: + os.unlink(filename) + + def run(self): + self.mkpath(self.egg_info) + os.utime(self.egg_info, None) + installer = self.distribution.fetch_build_egg + for ep in iter_entry_points('egg_info.writers'): + ep.require(installer=installer) + writer = ep.resolve() + writer(self, ep.name, os.path.join(self.egg_info, ep.name)) + + # Get rid of native_libs.txt if it was put there by older bdist_egg + nl = os.path.join(self.egg_info, "native_libs.txt") + if os.path.exists(nl): + self.delete_file(nl) + + self.find_sources() + + def find_sources(self): + """Generate SOURCES.txt manifest file""" + manifest_filename = os.path.join(self.egg_info, "SOURCES.txt") + mm = manifest_maker(self.distribution) + mm.manifest = manifest_filename + mm.run() + self.filelist = mm.filelist + + def check_broken_egg_info(self): + bei = self.egg_name + '.egg-info' + if self.egg_base != os.curdir: + bei = os.path.join(self.egg_base, bei) + if os.path.exists(bei): + log.warn( + "-" * 78 + '\n' + "Note: Your current .egg-info directory has a '-' in its name;" + '\nthis will not work correctly with "setup.py develop".\n\n' + 'Please rename %s to %s to correct this problem.\n' + '-' * 78, + bei, self.egg_info + ) + self.broken_egg_info = self.egg_info + self.egg_info = bei # make it work for now + + +class FileList(_FileList): + # Implementations of the various MANIFEST.in commands + + def process_template_line(self, line): + # Parse the line: split it up, make sure the right number of words + # is there, and return the relevant words. 'action' is always + # defined: it's the first word of the line. Which of the other + # three are defined depends on the action; it'll be either + # patterns, (dir and patterns), or (dir_pattern). + (action, patterns, dir, dir_pattern) = self._parse_template_line(line) + + # OK, now we know that the action is valid and we have the + # right number of words on the line for that action -- so we + # can proceed with minimal error-checking. + if action == 'include': + self.debug_print("include " + ' '.join(patterns)) + for pattern in patterns: + if not self.include(pattern): + log.warn("warning: no files found matching '%s'", pattern) + + elif action == 'exclude': + self.debug_print("exclude " + ' '.join(patterns)) + for pattern in patterns: + if not self.exclude(pattern): + log.warn(("warning: no previously-included files " + "found matching '%s'"), pattern) + + elif action == 'global-include': + self.debug_print("global-include " + ' '.join(patterns)) + for pattern in patterns: + if not self.global_include(pattern): + log.warn(("warning: no files found matching '%s' " + "anywhere in distribution"), pattern) + + elif action == 'global-exclude': + self.debug_print("global-exclude " + ' '.join(patterns)) + for pattern in patterns: + if not self.global_exclude(pattern): + log.warn(("warning: no previously-included files matching " + "'%s' found anywhere in distribution"), + pattern) + + elif action == 'recursive-include': + self.debug_print("recursive-include %s %s" % + (dir, ' '.join(patterns))) + for pattern in patterns: + if not self.recursive_include(dir, pattern): + log.warn(("warning: no files found matching '%s' " + "under directory '%s'"), + pattern, dir) + + elif action == 'recursive-exclude': + self.debug_print("recursive-exclude %s %s" % + (dir, ' '.join(patterns))) + for pattern in patterns: + if not self.recursive_exclude(dir, pattern): + log.warn(("warning: no previously-included files matching " + "'%s' found under directory '%s'"), + pattern, dir) + + elif action == 'graft': + self.debug_print("graft " + dir_pattern) + if not self.graft(dir_pattern): + log.warn("warning: no directories found matching '%s'", + dir_pattern) + + elif action == 'prune': + self.debug_print("prune " + dir_pattern) + if not self.prune(dir_pattern): + log.warn(("no previously-included directories found " + "matching '%s'"), dir_pattern) + + else: + raise DistutilsInternalError( + "this cannot happen: invalid action '%s'" % action) + + def _remove_files(self, predicate): + """ + Remove all files from the file list that match the predicate. + Return True if any matching files were removed + """ + found = False + for i in range(len(self.files) - 1, -1, -1): + if predicate(self.files[i]): + self.debug_print(" removing " + self.files[i]) + del self.files[i] + found = True + return found + + def include(self, pattern): + """Include files that match 'pattern'.""" + found = [f for f in glob(pattern) if not os.path.isdir(f)] + self.extend(found) + return bool(found) + + def exclude(self, pattern): + """Exclude files that match 'pattern'.""" + match = translate_pattern(pattern) + return self._remove_files(match.match) + + def recursive_include(self, dir, pattern): + """ + Include all files anywhere in 'dir/' that match the pattern. + """ + full_pattern = os.path.join(dir, '**', pattern) + found = [f for f in glob(full_pattern, recursive=True) + if not os.path.isdir(f)] + self.extend(found) + return bool(found) + + def recursive_exclude(self, dir, pattern): + """ + Exclude any file anywhere in 'dir/' that match the pattern. + """ + match = translate_pattern(os.path.join(dir, '**', pattern)) + return self._remove_files(match.match) + + def graft(self, dir): + """Include all files from 'dir/'.""" + found = [ + item + for match_dir in glob(dir) + for item in distutils.filelist.findall(match_dir) + ] + self.extend(found) + return bool(found) + + def prune(self, dir): + """Filter out files from 'dir/'.""" + match = translate_pattern(os.path.join(dir, '**')) + return self._remove_files(match.match) + + def global_include(self, pattern): + """ + Include all files anywhere in the current directory that match the + pattern. This is very inefficient on large file trees. + """ + if self.allfiles is None: + self.findall() + match = translate_pattern(os.path.join('**', pattern)) + found = [f for f in self.allfiles if match.match(f)] + self.extend(found) + return bool(found) + + def global_exclude(self, pattern): + """ + Exclude all files anywhere that match the pattern. + """ + match = translate_pattern(os.path.join('**', pattern)) + return self._remove_files(match.match) + + def append(self, item): + if item.endswith('\r'): # Fix older sdists built on Windows + item = item[:-1] + path = convert_path(item) + + if self._safe_path(path): + self.files.append(path) + + def extend(self, paths): + self.files.extend(filter(self._safe_path, paths)) + + def _repair(self): + """ + Replace self.files with only safe paths + + Because some owners of FileList manipulate the underlying + ``files`` attribute directly, this method must be called to + repair those paths. + """ + self.files = list(filter(self._safe_path, self.files)) + + def _safe_path(self, path): + enc_warn = "'%s' not %s encodable -- skipping" + + # To avoid accidental trans-codings errors, first to unicode + u_path = unicode_utils.filesys_decode(path) + if u_path is None: + log.warn("'%s' in unexpected encoding -- skipping" % path) + return False + + # Must ensure utf-8 encodability + utf8_path = unicode_utils.try_encode(u_path, "utf-8") + if utf8_path is None: + log.warn(enc_warn, path, 'utf-8') + return False + + try: + # accept is either way checks out + if os.path.exists(u_path) or os.path.exists(utf8_path): + return True + # this will catch any encode errors decoding u_path + except UnicodeEncodeError: + log.warn(enc_warn, path, sys.getfilesystemencoding()) + + +class manifest_maker(sdist): + template = "MANIFEST.in" + + def initialize_options(self): + self.use_defaults = 1 + self.prune = 1 + self.manifest_only = 1 + self.force_manifest = 1 + + def finalize_options(self): + pass + + def run(self): + self.filelist = FileList() + if not os.path.exists(self.manifest): + self.write_manifest() # it must exist so it'll get in the list + self.add_defaults() + if os.path.exists(self.template): + self.read_template() + self.prune_file_list() + self.filelist.sort() + self.filelist.remove_duplicates() + self.write_manifest() + + def _manifest_normalize(self, path): + path = unicode_utils.filesys_decode(path) + return path.replace(os.sep, '/') + + def write_manifest(self): + """ + Write the file list in 'self.filelist' to the manifest file + named by 'self.manifest'. + """ + self.filelist._repair() + + # Now _repairs should encodability, but not unicode + files = [self._manifest_normalize(f) for f in self.filelist.files] + msg = "writing manifest file '%s'" % self.manifest + self.execute(write_file, (self.manifest, files), msg) + + def warn(self, msg): + if not self._should_suppress_warning(msg): + sdist.warn(self, msg) + + @staticmethod + def _should_suppress_warning(msg): + """ + suppress missing-file warnings from sdist + """ + return re.match(r"standard file .*not found", msg) + + def add_defaults(self): + sdist.add_defaults(self) + self.check_license() + self.filelist.append(self.template) + self.filelist.append(self.manifest) + rcfiles = list(walk_revctrl()) + if rcfiles: + self.filelist.extend(rcfiles) + elif os.path.exists(self.manifest): + self.read_manifest() + + if os.path.exists("setup.py"): + # setup.py should be included by default, even if it's not + # the script called to create the sdist + self.filelist.append("setup.py") + + ei_cmd = self.get_finalized_command('egg_info') + self.filelist.graft(ei_cmd.egg_info) + + def prune_file_list(self): + build = self.get_finalized_command('build') + base_dir = self.distribution.get_fullname() + self.filelist.prune(build.build_base) + self.filelist.prune(base_dir) + sep = re.escape(os.sep) + self.filelist.exclude_pattern(r'(^|' + sep + r')(RCS|CVS|\.svn)' + sep, + is_regex=1) + + +def write_file(filename, contents): + """Create a file with the specified name and write 'contents' (a + sequence of strings without line terminators) to it. + """ + contents = "\n".join(contents) + + # assuming the contents has been vetted for utf-8 encoding + contents = contents.encode("utf-8") + + with open(filename, "wb") as f: # always write POSIX-style manifest + f.write(contents) + + +def write_pkg_info(cmd, basename, filename): + log.info("writing %s", filename) + if not cmd.dry_run: + metadata = cmd.distribution.metadata + metadata.version, oldver = cmd.egg_version, metadata.version + metadata.name, oldname = cmd.egg_name, metadata.name + + try: + # write unescaped data to PKG-INFO, so older pkg_resources + # can still parse it + metadata.write_pkg_info(cmd.egg_info) + finally: + metadata.name, metadata.version = oldname, oldver + + safe = getattr(cmd.distribution, 'zip_safe', None) + + bdist_egg.write_safety_flag(cmd.egg_info, safe) + + +def warn_depends_obsolete(cmd, basename, filename): + if os.path.exists(filename): + log.warn( + "WARNING: 'depends.txt' is not used by setuptools 0.6!\n" + "Use the install_requires/extras_require setup() args instead." + ) + + +def _write_requirements(stream, reqs): + lines = yield_lines(reqs or ()) + append_cr = lambda line: line + '\n' + lines = map(append_cr, lines) + stream.writelines(lines) + + +def write_requirements(cmd, basename, filename): + dist = cmd.distribution + data = six.StringIO() + _write_requirements(data, dist.install_requires) + extras_require = dist.extras_require or {} + for extra in sorted(extras_require): + data.write('\n[{extra}]\n'.format(**vars())) + _write_requirements(data, extras_require[extra]) + cmd.write_or_delete_file("requirements", filename, data.getvalue()) + + +def write_setup_requirements(cmd, basename, filename): + data = io.StringIO() + _write_requirements(data, cmd.distribution.setup_requires) + cmd.write_or_delete_file("setup-requirements", filename, data.getvalue()) + + +def write_toplevel_names(cmd, basename, filename): + pkgs = dict.fromkeys( + [ + k.split('.', 1)[0] + for k in cmd.distribution.iter_distribution_names() + ] + ) + cmd.write_file("top-level names", filename, '\n'.join(sorted(pkgs)) + '\n') + + +def overwrite_arg(cmd, basename, filename): + write_arg(cmd, basename, filename, True) + + +def write_arg(cmd, basename, filename, force=False): + argname = os.path.splitext(basename)[0] + value = getattr(cmd.distribution, argname, None) + if value is not None: + value = '\n'.join(value) + '\n' + cmd.write_or_delete_file(argname, filename, value, force) + + +def write_entries(cmd, basename, filename): + ep = cmd.distribution.entry_points + + if isinstance(ep, six.string_types) or ep is None: + data = ep + elif ep is not None: + data = [] + for section, contents in sorted(ep.items()): + if not isinstance(contents, six.string_types): + contents = EntryPoint.parse_group(section, contents) + contents = '\n'.join(sorted(map(str, contents.values()))) + data.append('[%s]\n%s\n\n' % (section, contents)) + data = ''.join(data) + + cmd.write_or_delete_file('entry points', filename, data, True) + + +def get_pkg_info_revision(): + """ + Get a -r### off of PKG-INFO Version in case this is an sdist of + a subversion revision. + """ + warnings.warn("get_pkg_info_revision is deprecated.", EggInfoDeprecationWarning) + if os.path.exists('PKG-INFO'): + with io.open('PKG-INFO') as f: + for line in f: + match = re.match(r"Version:.*-r(\d+)\s*$", line) + if match: + return int(match.group(1)) + return 0 + + +class EggInfoDeprecationWarning(SetuptoolsDeprecationWarning): + """Class for warning about deprecations in eggInfo in setupTools. Not ignored by default, unlike DeprecationWarning.""" diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/egg_info.pyc b/project/venv/lib/python2.7/site-packages/setuptools/command/egg_info.pyc new file mode 100644 index 0000000..d903dec Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/command/egg_info.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/install.py b/project/venv/lib/python2.7/site-packages/setuptools/command/install.py new file mode 100644 index 0000000..31a5ddb --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/command/install.py @@ -0,0 +1,125 @@ +from distutils.errors import DistutilsArgError +import inspect +import glob +import warnings +import platform +import distutils.command.install as orig + +import setuptools + +# Prior to numpy 1.9, NumPy relies on the '_install' name, so provide it for +# now. See https://github.com/pypa/setuptools/issues/199/ +_install = orig.install + + +class install(orig.install): + """Use easy_install to install the package, w/dependencies""" + + user_options = orig.install.user_options + [ + ('old-and-unmanageable', None, "Try not to use this!"), + ('single-version-externally-managed', None, + "used by system package builders to create 'flat' eggs"), + ] + boolean_options = orig.install.boolean_options + [ + 'old-and-unmanageable', 'single-version-externally-managed', + ] + new_commands = [ + ('install_egg_info', lambda self: True), + ('install_scripts', lambda self: True), + ] + _nc = dict(new_commands) + + def initialize_options(self): + orig.install.initialize_options(self) + self.old_and_unmanageable = None + self.single_version_externally_managed = None + + def finalize_options(self): + orig.install.finalize_options(self) + if self.root: + self.single_version_externally_managed = True + elif self.single_version_externally_managed: + if not self.root and not self.record: + raise DistutilsArgError( + "You must specify --record or --root when building system" + " packages" + ) + + def handle_extra_path(self): + if self.root or self.single_version_externally_managed: + # explicit backward-compatibility mode, allow extra_path to work + return orig.install.handle_extra_path(self) + + # Ignore extra_path when installing an egg (or being run by another + # command without --root or --single-version-externally-managed + self.path_file = None + self.extra_dirs = '' + + def run(self): + # Explicit request for old-style install? Just do it + if self.old_and_unmanageable or self.single_version_externally_managed: + return orig.install.run(self) + + if not self._called_from_setup(inspect.currentframe()): + # Run in backward-compatibility mode to support bdist_* commands. + orig.install.run(self) + else: + self.do_egg_install() + + @staticmethod + def _called_from_setup(run_frame): + """ + Attempt to detect whether run() was called from setup() or by another + command. If called by setup(), the parent caller will be the + 'run_command' method in 'distutils.dist', and *its* caller will be + the 'run_commands' method. If called any other way, the + immediate caller *might* be 'run_command', but it won't have been + called by 'run_commands'. Return True in that case or if a call stack + is unavailable. Return False otherwise. + """ + if run_frame is None: + msg = "Call stack not available. bdist_* commands may fail." + warnings.warn(msg) + if platform.python_implementation() == 'IronPython': + msg = "For best results, pass -X:Frames to enable call stack." + warnings.warn(msg) + return True + res = inspect.getouterframes(run_frame)[2] + caller, = res[:1] + info = inspect.getframeinfo(caller) + caller_module = caller.f_globals.get('__name__', '') + return ( + caller_module == 'distutils.dist' + and info.function == 'run_commands' + ) + + def do_egg_install(self): + + easy_install = self.distribution.get_command_class('easy_install') + + cmd = easy_install( + self.distribution, args="x", root=self.root, record=self.record, + ) + cmd.ensure_finalized() # finalize before bdist_egg munges install cmd + cmd.always_copy_from = '.' # make sure local-dir eggs get installed + + # pick up setup-dir .egg files only: no .egg-info + cmd.package_index.scan(glob.glob('*.egg')) + + self.run_command('bdist_egg') + args = [self.distribution.get_command_obj('bdist_egg').egg_output] + + if setuptools.bootstrap_install_from: + # Bootstrap self-installation of setuptools + args.insert(0, setuptools.bootstrap_install_from) + + cmd.args = args + cmd.run() + setuptools.bootstrap_install_from = None + + +# XXX Python 3.1 doesn't see _nc if this is inside the class +install.sub_commands = ( + [cmd for cmd in orig.install.sub_commands if cmd[0] not in install._nc] + + install.new_commands +) diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/install.pyc b/project/venv/lib/python2.7/site-packages/setuptools/command/install.pyc new file mode 100644 index 0000000..c68efc5 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/command/install.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/install_egg_info.py b/project/venv/lib/python2.7/site-packages/setuptools/command/install_egg_info.py new file mode 100644 index 0000000..edc4718 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/command/install_egg_info.py @@ -0,0 +1,62 @@ +from distutils import log, dir_util +import os + +from setuptools import Command +from setuptools import namespaces +from setuptools.archive_util import unpack_archive +import pkg_resources + + +class install_egg_info(namespaces.Installer, Command): + """Install an .egg-info directory for the package""" + + description = "Install an .egg-info directory for the package" + + user_options = [ + ('install-dir=', 'd', "directory to install to"), + ] + + def initialize_options(self): + self.install_dir = None + + def finalize_options(self): + self.set_undefined_options('install_lib', + ('install_dir', 'install_dir')) + ei_cmd = self.get_finalized_command("egg_info") + basename = pkg_resources.Distribution( + None, None, ei_cmd.egg_name, ei_cmd.egg_version + ).egg_name() + '.egg-info' + self.source = ei_cmd.egg_info + self.target = os.path.join(self.install_dir, basename) + self.outputs = [] + + def run(self): + self.run_command('egg_info') + if os.path.isdir(self.target) and not os.path.islink(self.target): + dir_util.remove_tree(self.target, dry_run=self.dry_run) + elif os.path.exists(self.target): + self.execute(os.unlink, (self.target,), "Removing " + self.target) + if not self.dry_run: + pkg_resources.ensure_directory(self.target) + self.execute( + self.copytree, (), "Copying %s to %s" % (self.source, self.target) + ) + self.install_namespaces() + + def get_outputs(self): + return self.outputs + + def copytree(self): + # Copy the .egg-info tree to site-packages + def skimmer(src, dst): + # filter out source-control directories; note that 'src' is always + # a '/'-separated path, regardless of platform. 'dst' is a + # platform-specific path. + for skip in '.svn/', 'CVS/': + if src.startswith(skip) or '/' + skip in src: + return None + self.outputs.append(dst) + log.debug("Copying %s to %s", src, dst) + return dst + + unpack_archive(self.source, self.target, skimmer) diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/install_egg_info.pyc b/project/venv/lib/python2.7/site-packages/setuptools/command/install_egg_info.pyc new file mode 100644 index 0000000..bdf2b29 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/command/install_egg_info.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/install_lib.py b/project/venv/lib/python2.7/site-packages/setuptools/command/install_lib.py new file mode 100644 index 0000000..2b31c3e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/command/install_lib.py @@ -0,0 +1,121 @@ +import os +import imp +from itertools import product, starmap +import distutils.command.install_lib as orig + + +class install_lib(orig.install_lib): + """Don't add compiled flags to filenames of non-Python files""" + + def run(self): + self.build() + outfiles = self.install() + if outfiles is not None: + # always compile, in case we have any extension stubs to deal with + self.byte_compile(outfiles) + + def get_exclusions(self): + """ + Return a collections.Sized collections.Container of paths to be + excluded for single_version_externally_managed installations. + """ + all_packages = ( + pkg + for ns_pkg in self._get_SVEM_NSPs() + for pkg in self._all_packages(ns_pkg) + ) + + excl_specs = product(all_packages, self._gen_exclusion_paths()) + return set(starmap(self._exclude_pkg_path, excl_specs)) + + def _exclude_pkg_path(self, pkg, exclusion_path): + """ + Given a package name and exclusion path within that package, + compute the full exclusion path. + """ + parts = pkg.split('.') + [exclusion_path] + return os.path.join(self.install_dir, *parts) + + @staticmethod + def _all_packages(pkg_name): + """ + >>> list(install_lib._all_packages('foo.bar.baz')) + ['foo.bar.baz', 'foo.bar', 'foo'] + """ + while pkg_name: + yield pkg_name + pkg_name, sep, child = pkg_name.rpartition('.') + + def _get_SVEM_NSPs(self): + """ + Get namespace packages (list) but only for + single_version_externally_managed installations and empty otherwise. + """ + # TODO: is it necessary to short-circuit here? i.e. what's the cost + # if get_finalized_command is called even when namespace_packages is + # False? + if not self.distribution.namespace_packages: + return [] + + install_cmd = self.get_finalized_command('install') + svem = install_cmd.single_version_externally_managed + + return self.distribution.namespace_packages if svem else [] + + @staticmethod + def _gen_exclusion_paths(): + """ + Generate file paths to be excluded for namespace packages (bytecode + cache files). + """ + # always exclude the package module itself + yield '__init__.py' + + yield '__init__.pyc' + yield '__init__.pyo' + + if not hasattr(imp, 'get_tag'): + return + + base = os.path.join('__pycache__', '__init__.' + imp.get_tag()) + yield base + '.pyc' + yield base + '.pyo' + yield base + '.opt-1.pyc' + yield base + '.opt-2.pyc' + + def copy_tree( + self, infile, outfile, + preserve_mode=1, preserve_times=1, preserve_symlinks=0, level=1 + ): + assert preserve_mode and preserve_times and not preserve_symlinks + exclude = self.get_exclusions() + + if not exclude: + return orig.install_lib.copy_tree(self, infile, outfile) + + # Exclude namespace package __init__.py* files from the output + + from setuptools.archive_util import unpack_directory + from distutils import log + + outfiles = [] + + def pf(src, dst): + if dst in exclude: + log.warn("Skipping installation of %s (namespace package)", + dst) + return False + + log.info("copying %s -> %s", src, os.path.dirname(dst)) + outfiles.append(dst) + return dst + + unpack_directory(infile, outfile, pf) + return outfiles + + def get_outputs(self): + outputs = orig.install_lib.get_outputs(self) + exclude = self.get_exclusions() + if exclude: + return [f for f in outputs if f not in exclude] + return outputs diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/install_lib.pyc b/project/venv/lib/python2.7/site-packages/setuptools/command/install_lib.pyc new file mode 100644 index 0000000..001fe1a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/command/install_lib.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/install_scripts.py b/project/venv/lib/python2.7/site-packages/setuptools/command/install_scripts.py new file mode 100644 index 0000000..1623427 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/command/install_scripts.py @@ -0,0 +1,65 @@ +from distutils import log +import distutils.command.install_scripts as orig +import os +import sys + +from pkg_resources import Distribution, PathMetadata, ensure_directory + + +class install_scripts(orig.install_scripts): + """Do normal script install, plus any egg_info wrapper scripts""" + + def initialize_options(self): + orig.install_scripts.initialize_options(self) + self.no_ep = False + + def run(self): + import setuptools.command.easy_install as ei + + self.run_command("egg_info") + if self.distribution.scripts: + orig.install_scripts.run(self) # run first to set up self.outfiles + else: + self.outfiles = [] + if self.no_ep: + # don't install entry point scripts into .egg file! + return + + ei_cmd = self.get_finalized_command("egg_info") + dist = Distribution( + ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info), + ei_cmd.egg_name, ei_cmd.egg_version, + ) + bs_cmd = self.get_finalized_command('build_scripts') + exec_param = getattr(bs_cmd, 'executable', None) + bw_cmd = self.get_finalized_command("bdist_wininst") + is_wininst = getattr(bw_cmd, '_is_running', False) + writer = ei.ScriptWriter + if is_wininst: + exec_param = "python.exe" + writer = ei.WindowsScriptWriter + if exec_param == sys.executable: + # In case the path to the Python executable contains a space, wrap + # it so it's not split up. + exec_param = [exec_param] + # resolve the writer to the environment + writer = writer.best() + cmd = writer.command_spec_class.best().from_param(exec_param) + for args in writer.get_args(dist, cmd.as_header()): + self.write_script(*args) + + def write_script(self, script_name, contents, mode="t", *ignored): + """Write an executable file to the scripts directory""" + from setuptools.command.easy_install import chmod, current_umask + + log.info("Installing %s script to %s", script_name, self.install_dir) + target = os.path.join(self.install_dir, script_name) + self.outfiles.append(target) + + mask = current_umask() + if not self.dry_run: + ensure_directory(target) + f = open(target, "w" + mode) + f.write(contents) + f.close() + chmod(target, 0o777 - mask) diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/install_scripts.pyc b/project/venv/lib/python2.7/site-packages/setuptools/command/install_scripts.pyc new file mode 100644 index 0000000..e068541 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/command/install_scripts.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/launcher manifest.xml b/project/venv/lib/python2.7/site-packages/setuptools/command/launcher manifest.xml new file mode 100644 index 0000000..5972a96 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/command/launcher manifest.xml @@ -0,0 +1,15 @@ +<?xml version="1.0" encoding="UTF-8" standalone="yes"?> +<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0"> + <assemblyIdentity version="1.0.0.0" + processorArchitecture="X86" + name="%(name)s" + type="win32"/> + <!-- Identify the application security requirements. --> + <trustInfo xmlns="urn:schemas-microsoft-com:asm.v3"> + <security> + <requestedPrivileges> + <requestedExecutionLevel level="asInvoker" uiAccess="false"/> + </requestedPrivileges> + </security> + </trustInfo> +</assembly> diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/py36compat.py b/project/venv/lib/python2.7/site-packages/setuptools/command/py36compat.py new file mode 100644 index 0000000..61063e7 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/command/py36compat.py @@ -0,0 +1,136 @@ +import os +from glob import glob +from distutils.util import convert_path +from distutils.command import sdist + +from setuptools.extern.six.moves import filter + + +class sdist_add_defaults: + """ + Mix-in providing forward-compatibility for functionality as found in + distutils on Python 3.7. + + Do not edit the code in this class except to update functionality + as implemented in distutils. Instead, override in the subclass. + """ + + def add_defaults(self): + """Add all the default files to self.filelist: + - README or README.txt + - setup.py + - test/test*.py + - all pure Python modules mentioned in setup script + - all files pointed by package_data (build_py) + - all files defined in data_files. + - all files defined as scripts. + - all C sources listed as part of extensions or C libraries + in the setup script (doesn't catch C headers!) + Warns if (README or README.txt) or setup.py are missing; everything + else is optional. + """ + self._add_defaults_standards() + self._add_defaults_optional() + self._add_defaults_python() + self._add_defaults_data_files() + self._add_defaults_ext() + self._add_defaults_c_libs() + self._add_defaults_scripts() + + @staticmethod + def _cs_path_exists(fspath): + """ + Case-sensitive path existence check + + >>> sdist_add_defaults._cs_path_exists(__file__) + True + >>> sdist_add_defaults._cs_path_exists(__file__.upper()) + False + """ + if not os.path.exists(fspath): + return False + # make absolute so we always have a directory + abspath = os.path.abspath(fspath) + directory, filename = os.path.split(abspath) + return filename in os.listdir(directory) + + def _add_defaults_standards(self): + standards = [self.READMES, self.distribution.script_name] + for fn in standards: + if isinstance(fn, tuple): + alts = fn + got_it = False + for fn in alts: + if self._cs_path_exists(fn): + got_it = True + self.filelist.append(fn) + break + + if not got_it: + self.warn("standard file not found: should have one of " + + ', '.join(alts)) + else: + if self._cs_path_exists(fn): + self.filelist.append(fn) + else: + self.warn("standard file '%s' not found" % fn) + + def _add_defaults_optional(self): + optional = ['test/test*.py', 'setup.cfg'] + for pattern in optional: + files = filter(os.path.isfile, glob(pattern)) + self.filelist.extend(files) + + def _add_defaults_python(self): + # build_py is used to get: + # - python modules + # - files defined in package_data + build_py = self.get_finalized_command('build_py') + + # getting python files + if self.distribution.has_pure_modules(): + self.filelist.extend(build_py.get_source_files()) + + # getting package_data files + # (computed in build_py.data_files by build_py.finalize_options) + for pkg, src_dir, build_dir, filenames in build_py.data_files: + for filename in filenames: + self.filelist.append(os.path.join(src_dir, filename)) + + def _add_defaults_data_files(self): + # getting distribution.data_files + if self.distribution.has_data_files(): + for item in self.distribution.data_files: + if isinstance(item, str): + # plain file + item = convert_path(item) + if os.path.isfile(item): + self.filelist.append(item) + else: + # a (dirname, filenames) tuple + dirname, filenames = item + for f in filenames: + f = convert_path(f) + if os.path.isfile(f): + self.filelist.append(f) + + def _add_defaults_ext(self): + if self.distribution.has_ext_modules(): + build_ext = self.get_finalized_command('build_ext') + self.filelist.extend(build_ext.get_source_files()) + + def _add_defaults_c_libs(self): + if self.distribution.has_c_libraries(): + build_clib = self.get_finalized_command('build_clib') + self.filelist.extend(build_clib.get_source_files()) + + def _add_defaults_scripts(self): + if self.distribution.has_scripts(): + build_scripts = self.get_finalized_command('build_scripts') + self.filelist.extend(build_scripts.get_source_files()) + + +if hasattr(sdist.sdist, '_add_defaults_standards'): + # disable the functionality already available upstream + class sdist_add_defaults: + pass diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/py36compat.pyc b/project/venv/lib/python2.7/site-packages/setuptools/command/py36compat.pyc new file mode 100644 index 0000000..13d3249 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/command/py36compat.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/register.py b/project/venv/lib/python2.7/site-packages/setuptools/command/register.py new file mode 100644 index 0000000..98bc015 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/command/register.py @@ -0,0 +1,18 @@ +from distutils import log +import distutils.command.register as orig + + +class register(orig.register): + __doc__ = orig.register.__doc__ + + def run(self): + try: + # Make sure that we are using valid current name/version info + self.run_command('egg_info') + orig.register.run(self) + finally: + self.announce( + "WARNING: Registering is deprecated, use twine to " + "upload instead (https://pypi.org/p/twine/)", + log.WARN + ) diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/register.pyc b/project/venv/lib/python2.7/site-packages/setuptools/command/register.pyc new file mode 100644 index 0000000..a70deb3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/command/register.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/rotate.py b/project/venv/lib/python2.7/site-packages/setuptools/command/rotate.py new file mode 100644 index 0000000..b89353f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/command/rotate.py @@ -0,0 +1,66 @@ +from distutils.util import convert_path +from distutils import log +from distutils.errors import DistutilsOptionError +import os +import shutil + +from setuptools.extern import six + +from setuptools import Command + + +class rotate(Command): + """Delete older distributions""" + + description = "delete older distributions, keeping N newest files" + user_options = [ + ('match=', 'm', "patterns to match (required)"), + ('dist-dir=', 'd', "directory where the distributions are"), + ('keep=', 'k', "number of matching distributions to keep"), + ] + + boolean_options = [] + + def initialize_options(self): + self.match = None + self.dist_dir = None + self.keep = None + + def finalize_options(self): + if self.match is None: + raise DistutilsOptionError( + "Must specify one or more (comma-separated) match patterns " + "(e.g. '.zip' or '.egg')" + ) + if self.keep is None: + raise DistutilsOptionError("Must specify number of files to keep") + try: + self.keep = int(self.keep) + except ValueError: + raise DistutilsOptionError("--keep must be an integer") + if isinstance(self.match, six.string_types): + self.match = [ + convert_path(p.strip()) for p in self.match.split(',') + ] + self.set_undefined_options('bdist', ('dist_dir', 'dist_dir')) + + def run(self): + self.run_command("egg_info") + from glob import glob + + for pattern in self.match: + pattern = self.distribution.get_name() + '*' + pattern + files = glob(os.path.join(self.dist_dir, pattern)) + files = [(os.path.getmtime(f), f) for f in files] + files.sort() + files.reverse() + + log.info("%d file(s) matching %s", len(files), pattern) + files = files[self.keep:] + for (t, f) in files: + log.info("Deleting %s", f) + if not self.dry_run: + if os.path.isdir(f): + shutil.rmtree(f) + else: + os.unlink(f) diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/rotate.pyc b/project/venv/lib/python2.7/site-packages/setuptools/command/rotate.pyc new file mode 100644 index 0000000..d11d517 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/command/rotate.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/saveopts.py b/project/venv/lib/python2.7/site-packages/setuptools/command/saveopts.py new file mode 100644 index 0000000..611cec5 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/command/saveopts.py @@ -0,0 +1,22 @@ +from setuptools.command.setopt import edit_config, option_base + + +class saveopts(option_base): + """Save command-line options to a file""" + + description = "save supplied options to setup.cfg or other config file" + + def run(self): + dist = self.distribution + settings = {} + + for cmd in dist.command_options: + + if cmd == 'saveopts': + continue # don't save our own options! + + for opt, (src, val) in dist.get_option_dict(cmd).items(): + if src == "command line": + settings.setdefault(cmd, {})[opt] = val + + edit_config(self.filename, settings, self.dry_run) diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/saveopts.pyc b/project/venv/lib/python2.7/site-packages/setuptools/command/saveopts.pyc new file mode 100644 index 0000000..2cf50bd Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/command/saveopts.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/sdist.py b/project/venv/lib/python2.7/site-packages/setuptools/command/sdist.py new file mode 100644 index 0000000..dc25398 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/command/sdist.py @@ -0,0 +1,221 @@ +from distutils import log +import distutils.command.sdist as orig +import os +import sys +import io +import contextlib + +from setuptools.extern import six + +from .py36compat import sdist_add_defaults + +import pkg_resources + +_default_revctrl = list + + +def walk_revctrl(dirname=''): + """Find all files under revision control""" + for ep in pkg_resources.iter_entry_points('setuptools.file_finders'): + for item in ep.load()(dirname): + yield item + + +class sdist(sdist_add_defaults, orig.sdist): + """Smart sdist that finds anything supported by revision control""" + + user_options = [ + ('formats=', None, + "formats for source distribution (comma-separated list)"), + ('keep-temp', 'k', + "keep the distribution tree around after creating " + + "archive file(s)"), + ('dist-dir=', 'd', + "directory to put the source distribution archive(s) in " + "[default: dist]"), + ] + + negative_opt = {} + + README_EXTENSIONS = ['', '.rst', '.txt', '.md'] + READMES = tuple('README{0}'.format(ext) for ext in README_EXTENSIONS) + + def run(self): + self.run_command('egg_info') + ei_cmd = self.get_finalized_command('egg_info') + self.filelist = ei_cmd.filelist + self.filelist.append(os.path.join(ei_cmd.egg_info, 'SOURCES.txt')) + self.check_readme() + + # Run sub commands + for cmd_name in self.get_sub_commands(): + self.run_command(cmd_name) + + self.make_distribution() + + dist_files = getattr(self.distribution, 'dist_files', []) + for file in self.archive_files: + data = ('sdist', '', file) + if data not in dist_files: + dist_files.append(data) + + def initialize_options(self): + orig.sdist.initialize_options(self) + + self._default_to_gztar() + + def _default_to_gztar(self): + # only needed on Python prior to 3.6. + if sys.version_info >= (3, 6, 0, 'beta', 1): + return + self.formats = ['gztar'] + + def make_distribution(self): + """ + Workaround for #516 + """ + with self._remove_os_link(): + orig.sdist.make_distribution(self) + + @staticmethod + @contextlib.contextmanager + def _remove_os_link(): + """ + In a context, remove and restore os.link if it exists + """ + + class NoValue: + pass + + orig_val = getattr(os, 'link', NoValue) + try: + del os.link + except Exception: + pass + try: + yield + finally: + if orig_val is not NoValue: + setattr(os, 'link', orig_val) + + def __read_template_hack(self): + # This grody hack closes the template file (MANIFEST.in) if an + # exception occurs during read_template. + # Doing so prevents an error when easy_install attempts to delete the + # file. + try: + orig.sdist.read_template(self) + except Exception: + _, _, tb = sys.exc_info() + tb.tb_next.tb_frame.f_locals['template'].close() + raise + + # Beginning with Python 2.7.2, 3.1.4, and 3.2.1, this leaky file handle + # has been fixed, so only override the method if we're using an earlier + # Python. + has_leaky_handle = ( + sys.version_info < (2, 7, 2) + or (3, 0) <= sys.version_info < (3, 1, 4) + or (3, 2) <= sys.version_info < (3, 2, 1) + ) + if has_leaky_handle: + read_template = __read_template_hack + + def _add_defaults_python(self): + """getting python files""" + if self.distribution.has_pure_modules(): + build_py = self.get_finalized_command('build_py') + self.filelist.extend(build_py.get_source_files()) + # This functionality is incompatible with include_package_data, and + # will in fact create an infinite recursion if include_package_data + # is True. Use of include_package_data will imply that + # distutils-style automatic handling of package_data is disabled + if not self.distribution.include_package_data: + for _, src_dir, _, filenames in build_py.data_files: + self.filelist.extend([os.path.join(src_dir, filename) + for filename in filenames]) + + def _add_defaults_data_files(self): + try: + if six.PY2: + sdist_add_defaults._add_defaults_data_files(self) + else: + super()._add_defaults_data_files() + except TypeError: + log.warn("data_files contains unexpected objects") + + def check_readme(self): + for f in self.READMES: + if os.path.exists(f): + return + else: + self.warn( + "standard file not found: should have one of " + + ', '.join(self.READMES) + ) + + def make_release_tree(self, base_dir, files): + orig.sdist.make_release_tree(self, base_dir, files) + + # Save any egg_info command line options used to create this sdist + dest = os.path.join(base_dir, 'setup.cfg') + if hasattr(os, 'link') and os.path.exists(dest): + # unlink and re-copy, since it might be hard-linked, and + # we don't want to change the source version + os.unlink(dest) + self.copy_file('setup.cfg', dest) + + self.get_finalized_command('egg_info').save_version_info(dest) + + def _manifest_is_not_generated(self): + # check for special comment used in 2.7.1 and higher + if not os.path.isfile(self.manifest): + return False + + with io.open(self.manifest, 'rb') as fp: + first_line = fp.readline() + return (first_line != + '# file GENERATED by distutils, do NOT edit\n'.encode()) + + def read_manifest(self): + """Read the manifest file (named by 'self.manifest') and use it to + fill in 'self.filelist', the list of files to include in the source + distribution. + """ + log.info("reading manifest file '%s'", self.manifest) + manifest = open(self.manifest, 'rb') + for line in manifest: + # The manifest must contain UTF-8. See #303. + if six.PY3: + try: + line = line.decode('UTF-8') + except UnicodeDecodeError: + log.warn("%r not UTF-8 decodable -- skipping" % line) + continue + # ignore comments and blank lines + line = line.strip() + if line.startswith('#') or not line: + continue + self.filelist.append(line) + manifest.close() + + def check_license(self): + """Checks if license_file' is configured and adds it to + 'self.filelist' if the value contains a valid path. + """ + + opts = self.distribution.get_option_dict('metadata') + + # ignore the source of the value + _, license_file = opts.get('license_file', (None, None)) + + if license_file is None: + log.debug("'license_file' option was not specified") + return + + if not os.path.exists(license_file): + log.warn("warning: Failed to find the configured license file '%s'", + license_file) + return + + self.filelist.append(license_file) diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/sdist.pyc b/project/venv/lib/python2.7/site-packages/setuptools/command/sdist.pyc new file mode 100644 index 0000000..1f61a60 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/command/sdist.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/setopt.py b/project/venv/lib/python2.7/site-packages/setuptools/command/setopt.py new file mode 100644 index 0000000..7e57cc0 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/command/setopt.py @@ -0,0 +1,149 @@ +from distutils.util import convert_path +from distutils import log +from distutils.errors import DistutilsOptionError +import distutils +import os + +from setuptools.extern.six.moves import configparser + +from setuptools import Command + +__all__ = ['config_file', 'edit_config', 'option_base', 'setopt'] + + +def config_file(kind="local"): + """Get the filename of the distutils, local, global, or per-user config + + `kind` must be one of "local", "global", or "user" + """ + if kind == 'local': + return 'setup.cfg' + if kind == 'global': + return os.path.join( + os.path.dirname(distutils.__file__), 'distutils.cfg' + ) + if kind == 'user': + dot = os.name == 'posix' and '.' or '' + return os.path.expanduser(convert_path("~/%spydistutils.cfg" % dot)) + raise ValueError( + "config_file() type must be 'local', 'global', or 'user'", kind + ) + + +def edit_config(filename, settings, dry_run=False): + """Edit a configuration file to include `settings` + + `settings` is a dictionary of dictionaries or ``None`` values, keyed by + command/section name. A ``None`` value means to delete the entire section, + while a dictionary lists settings to be changed or deleted in that section. + A setting of ``None`` means to delete that setting. + """ + log.debug("Reading configuration from %s", filename) + opts = configparser.RawConfigParser() + opts.read([filename]) + for section, options in settings.items(): + if options is None: + log.info("Deleting section [%s] from %s", section, filename) + opts.remove_section(section) + else: + if not opts.has_section(section): + log.debug("Adding new section [%s] to %s", section, filename) + opts.add_section(section) + for option, value in options.items(): + if value is None: + log.debug( + "Deleting %s.%s from %s", + section, option, filename + ) + opts.remove_option(section, option) + if not opts.options(section): + log.info("Deleting empty [%s] section from %s", + section, filename) + opts.remove_section(section) + else: + log.debug( + "Setting %s.%s to %r in %s", + section, option, value, filename + ) + opts.set(section, option, value) + + log.info("Writing %s", filename) + if not dry_run: + with open(filename, 'w') as f: + opts.write(f) + + +class option_base(Command): + """Abstract base class for commands that mess with config files""" + + user_options = [ + ('global-config', 'g', + "save options to the site-wide distutils.cfg file"), + ('user-config', 'u', + "save options to the current user's pydistutils.cfg file"), + ('filename=', 'f', + "configuration file to use (default=setup.cfg)"), + ] + + boolean_options = [ + 'global-config', 'user-config', + ] + + def initialize_options(self): + self.global_config = None + self.user_config = None + self.filename = None + + def finalize_options(self): + filenames = [] + if self.global_config: + filenames.append(config_file('global')) + if self.user_config: + filenames.append(config_file('user')) + if self.filename is not None: + filenames.append(self.filename) + if not filenames: + filenames.append(config_file('local')) + if len(filenames) > 1: + raise DistutilsOptionError( + "Must specify only one configuration file option", + filenames + ) + self.filename, = filenames + + +class setopt(option_base): + """Save command-line options to a file""" + + description = "set an option in setup.cfg or another config file" + + user_options = [ + ('command=', 'c', 'command to set an option for'), + ('option=', 'o', 'option to set'), + ('set-value=', 's', 'value of the option'), + ('remove', 'r', 'remove (unset) the value'), + ] + option_base.user_options + + boolean_options = option_base.boolean_options + ['remove'] + + def initialize_options(self): + option_base.initialize_options(self) + self.command = None + self.option = None + self.set_value = None + self.remove = None + + def finalize_options(self): + option_base.finalize_options(self) + if self.command is None or self.option is None: + raise DistutilsOptionError("Must specify --command *and* --option") + if self.set_value is None and not self.remove: + raise DistutilsOptionError("Must specify --set-value or --remove") + + def run(self): + edit_config( + self.filename, { + self.command: {self.option.replace('-', '_'): self.set_value} + }, + self.dry_run + ) diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/setopt.pyc b/project/venv/lib/python2.7/site-packages/setuptools/command/setopt.pyc new file mode 100644 index 0000000..cb05ce3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/command/setopt.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/test.py b/project/venv/lib/python2.7/site-packages/setuptools/command/test.py new file mode 100644 index 0000000..dde0118 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/command/test.py @@ -0,0 +1,270 @@ +import os +import operator +import sys +import contextlib +import itertools +import unittest +from distutils.errors import DistutilsError, DistutilsOptionError +from distutils import log +from unittest import TestLoader + +from setuptools.extern import six +from setuptools.extern.six.moves import map, filter + +from pkg_resources import (resource_listdir, resource_exists, normalize_path, + working_set, _namespace_packages, evaluate_marker, + add_activation_listener, require, EntryPoint) +from setuptools import Command + +__metaclass__ = type + + +class ScanningLoader(TestLoader): + + def __init__(self): + TestLoader.__init__(self) + self._visited = set() + + def loadTestsFromModule(self, module, pattern=None): + """Return a suite of all tests cases contained in the given module + + If the module is a package, load tests from all the modules in it. + If the module has an ``additional_tests`` function, call it and add + the return value to the tests. + """ + if module in self._visited: + return None + self._visited.add(module) + + tests = [] + tests.append(TestLoader.loadTestsFromModule(self, module)) + + if hasattr(module, "additional_tests"): + tests.append(module.additional_tests()) + + if hasattr(module, '__path__'): + for file in resource_listdir(module.__name__, ''): + if file.endswith('.py') and file != '__init__.py': + submodule = module.__name__ + '.' + file[:-3] + else: + if resource_exists(module.__name__, file + '/__init__.py'): + submodule = module.__name__ + '.' + file + else: + continue + tests.append(self.loadTestsFromName(submodule)) + + if len(tests) != 1: + return self.suiteClass(tests) + else: + return tests[0] # don't create a nested suite for only one return + + +# adapted from jaraco.classes.properties:NonDataProperty +class NonDataProperty: + def __init__(self, fget): + self.fget = fget + + def __get__(self, obj, objtype=None): + if obj is None: + return self + return self.fget(obj) + + +class test(Command): + """Command to run unit tests after in-place build""" + + description = "run unit tests after in-place build" + + user_options = [ + ('test-module=', 'm', "Run 'test_suite' in specified module"), + ('test-suite=', 's', + "Run single test, case or suite (e.g. 'module.test_suite')"), + ('test-runner=', 'r', "Test runner to use"), + ] + + def initialize_options(self): + self.test_suite = None + self.test_module = None + self.test_loader = None + self.test_runner = None + + def finalize_options(self): + + if self.test_suite and self.test_module: + msg = "You may specify a module or a suite, but not both" + raise DistutilsOptionError(msg) + + if self.test_suite is None: + if self.test_module is None: + self.test_suite = self.distribution.test_suite + else: + self.test_suite = self.test_module + ".test_suite" + + if self.test_loader is None: + self.test_loader = getattr(self.distribution, 'test_loader', None) + if self.test_loader is None: + self.test_loader = "setuptools.command.test:ScanningLoader" + if self.test_runner is None: + self.test_runner = getattr(self.distribution, 'test_runner', None) + + @NonDataProperty + def test_args(self): + return list(self._test_args()) + + def _test_args(self): + if not self.test_suite and sys.version_info >= (2, 7): + yield 'discover' + if self.verbose: + yield '--verbose' + if self.test_suite: + yield self.test_suite + + def with_project_on_sys_path(self, func): + """ + Backward compatibility for project_on_sys_path context. + """ + with self.project_on_sys_path(): + func() + + @contextlib.contextmanager + def project_on_sys_path(self, include_dists=[]): + with_2to3 = six.PY3 and getattr(self.distribution, 'use_2to3', False) + + if with_2to3: + # If we run 2to3 we can not do this inplace: + + # Ensure metadata is up-to-date + self.reinitialize_command('build_py', inplace=0) + self.run_command('build_py') + bpy_cmd = self.get_finalized_command("build_py") + build_path = normalize_path(bpy_cmd.build_lib) + + # Build extensions + self.reinitialize_command('egg_info', egg_base=build_path) + self.run_command('egg_info') + + self.reinitialize_command('build_ext', inplace=0) + self.run_command('build_ext') + else: + # Without 2to3 inplace works fine: + self.run_command('egg_info') + + # Build extensions in-place + self.reinitialize_command('build_ext', inplace=1) + self.run_command('build_ext') + + ei_cmd = self.get_finalized_command("egg_info") + + old_path = sys.path[:] + old_modules = sys.modules.copy() + + try: + project_path = normalize_path(ei_cmd.egg_base) + sys.path.insert(0, project_path) + working_set.__init__() + add_activation_listener(lambda dist: dist.activate()) + require('%s==%s' % (ei_cmd.egg_name, ei_cmd.egg_version)) + with self.paths_on_pythonpath([project_path]): + yield + finally: + sys.path[:] = old_path + sys.modules.clear() + sys.modules.update(old_modules) + working_set.__init__() + + @staticmethod + @contextlib.contextmanager + def paths_on_pythonpath(paths): + """ + Add the indicated paths to the head of the PYTHONPATH environment + variable so that subprocesses will also see the packages at + these paths. + + Do this in a context that restores the value on exit. + """ + nothing = object() + orig_pythonpath = os.environ.get('PYTHONPATH', nothing) + current_pythonpath = os.environ.get('PYTHONPATH', '') + try: + prefix = os.pathsep.join(paths) + to_join = filter(None, [prefix, current_pythonpath]) + new_path = os.pathsep.join(to_join) + if new_path: + os.environ['PYTHONPATH'] = new_path + yield + finally: + if orig_pythonpath is nothing: + os.environ.pop('PYTHONPATH', None) + else: + os.environ['PYTHONPATH'] = orig_pythonpath + + @staticmethod + def install_dists(dist): + """ + Install the requirements indicated by self.distribution and + return an iterable of the dists that were built. + """ + ir_d = dist.fetch_build_eggs(dist.install_requires) + tr_d = dist.fetch_build_eggs(dist.tests_require or []) + er_d = dist.fetch_build_eggs( + v for k, v in dist.extras_require.items() + if k.startswith(':') and evaluate_marker(k[1:]) + ) + return itertools.chain(ir_d, tr_d, er_d) + + def run(self): + installed_dists = self.install_dists(self.distribution) + + cmd = ' '.join(self._argv) + if self.dry_run: + self.announce('skipping "%s" (dry run)' % cmd) + return + + self.announce('running "%s"' % cmd) + + paths = map(operator.attrgetter('location'), installed_dists) + with self.paths_on_pythonpath(paths): + with self.project_on_sys_path(): + self.run_tests() + + def run_tests(self): + # Purge modules under test from sys.modules. The test loader will + # re-import them from the build location. Required when 2to3 is used + # with namespace packages. + if six.PY3 and getattr(self.distribution, 'use_2to3', False): + module = self.test_suite.split('.')[0] + if module in _namespace_packages: + del_modules = [] + if module in sys.modules: + del_modules.append(module) + module += '.' + for name in sys.modules: + if name.startswith(module): + del_modules.append(name) + list(map(sys.modules.__delitem__, del_modules)) + + test = unittest.main( + None, None, self._argv, + testLoader=self._resolve_as_ep(self.test_loader), + testRunner=self._resolve_as_ep(self.test_runner), + exit=False, + ) + if not test.result.wasSuccessful(): + msg = 'Test failed: %s' % test.result + self.announce(msg, log.ERROR) + raise DistutilsError(msg) + + @property + def _argv(self): + return ['unittest'] + self.test_args + + @staticmethod + def _resolve_as_ep(val): + """ + Load the indicated attribute value, called, as a as if it were + specified as an entry point. + """ + if val is None: + return + parsed = EntryPoint.parse("x=" + val) + return parsed.resolve()() diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/test.pyc b/project/venv/lib/python2.7/site-packages/setuptools/command/test.pyc new file mode 100644 index 0000000..a9767dd Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/command/test.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/upload.py b/project/venv/lib/python2.7/site-packages/setuptools/command/upload.py new file mode 100644 index 0000000..6db8888 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/command/upload.py @@ -0,0 +1,196 @@ +import io +import os +import hashlib +import getpass + +from base64 import standard_b64encode + +from distutils import log +from distutils.command import upload as orig +from distutils.spawn import spawn + +from distutils.errors import DistutilsError + +from setuptools.extern.six.moves.urllib.request import urlopen, Request +from setuptools.extern.six.moves.urllib.error import HTTPError +from setuptools.extern.six.moves.urllib.parse import urlparse + + +class upload(orig.upload): + """ + Override default upload behavior to obtain password + in a variety of different ways. + """ + def run(self): + try: + orig.upload.run(self) + finally: + self.announce( + "WARNING: Uploading via this command is deprecated, use twine " + "to upload instead (https://pypi.org/p/twine/)", + log.WARN + ) + + def finalize_options(self): + orig.upload.finalize_options(self) + self.username = ( + self.username or + getpass.getuser() + ) + # Attempt to obtain password. Short circuit evaluation at the first + # sign of success. + self.password = ( + self.password or + self._load_password_from_keyring() or + self._prompt_for_password() + ) + + def upload_file(self, command, pyversion, filename): + # Makes sure the repository URL is compliant + schema, netloc, url, params, query, fragments = \ + urlparse(self.repository) + if params or query or fragments: + raise AssertionError("Incompatible url %s" % self.repository) + + if schema not in ('http', 'https'): + raise AssertionError("unsupported schema " + schema) + + # Sign if requested + if self.sign: + gpg_args = ["gpg", "--detach-sign", "-a", filename] + if self.identity: + gpg_args[2:2] = ["--local-user", self.identity] + spawn(gpg_args, + dry_run=self.dry_run) + + # Fill in the data - send all the meta-data in case we need to + # register a new release + with open(filename, 'rb') as f: + content = f.read() + + meta = self.distribution.metadata + + data = { + # action + ':action': 'file_upload', + 'protocol_version': '1', + + # identify release + 'name': meta.get_name(), + 'version': meta.get_version(), + + # file content + 'content': (os.path.basename(filename), content), + 'filetype': command, + 'pyversion': pyversion, + 'md5_digest': hashlib.md5(content).hexdigest(), + + # additional meta-data + 'metadata_version': str(meta.get_metadata_version()), + 'summary': meta.get_description(), + 'home_page': meta.get_url(), + 'author': meta.get_contact(), + 'author_email': meta.get_contact_email(), + 'license': meta.get_licence(), + 'description': meta.get_long_description(), + 'keywords': meta.get_keywords(), + 'platform': meta.get_platforms(), + 'classifiers': meta.get_classifiers(), + 'download_url': meta.get_download_url(), + # PEP 314 + 'provides': meta.get_provides(), + 'requires': meta.get_requires(), + 'obsoletes': meta.get_obsoletes(), + } + + data['comment'] = '' + + if self.sign: + data['gpg_signature'] = (os.path.basename(filename) + ".asc", + open(filename+".asc", "rb").read()) + + # set up the authentication + user_pass = (self.username + ":" + self.password).encode('ascii') + # The exact encoding of the authentication string is debated. + # Anyway PyPI only accepts ascii for both username or password. + auth = "Basic " + standard_b64encode(user_pass).decode('ascii') + + # Build up the MIME payload for the POST data + boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254' + sep_boundary = b'\r\n--' + boundary.encode('ascii') + end_boundary = sep_boundary + b'--\r\n' + body = io.BytesIO() + for key, value in data.items(): + title = '\r\nContent-Disposition: form-data; name="%s"' % key + # handle multiple entries for the same name + if not isinstance(value, list): + value = [value] + for value in value: + if type(value) is tuple: + title += '; filename="%s"' % value[0] + value = value[1] + else: + value = str(value).encode('utf-8') + body.write(sep_boundary) + body.write(title.encode('utf-8')) + body.write(b"\r\n\r\n") + body.write(value) + body.write(end_boundary) + body = body.getvalue() + + msg = "Submitting %s to %s" % (filename, self.repository) + self.announce(msg, log.INFO) + + # build the Request + headers = { + 'Content-type': 'multipart/form-data; boundary=%s' % boundary, + 'Content-length': str(len(body)), + 'Authorization': auth, + } + + request = Request(self.repository, data=body, + headers=headers) + # send the data + try: + result = urlopen(request) + status = result.getcode() + reason = result.msg + except HTTPError as e: + status = e.code + reason = e.msg + except OSError as e: + self.announce(str(e), log.ERROR) + raise + + if status == 200: + self.announce('Server response (%s): %s' % (status, reason), + log.INFO) + if self.show_response: + text = getattr(self, '_read_pypi_response', + lambda x: None)(result) + if text is not None: + msg = '\n'.join(('-' * 75, text, '-' * 75)) + self.announce(msg, log.INFO) + else: + msg = 'Upload failed (%s): %s' % (status, reason) + self.announce(msg, log.ERROR) + raise DistutilsError(msg) + + def _load_password_from_keyring(self): + """ + Attempt to load password from keyring. Suppress Exceptions. + """ + try: + keyring = __import__('keyring') + return keyring.get_password(self.repository, self.username) + except Exception: + pass + + def _prompt_for_password(self): + """ + Prompt for a password on the tty. Suppress Exceptions. + """ + try: + return getpass.getpass() + except (Exception, KeyboardInterrupt): + pass diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/upload.pyc b/project/venv/lib/python2.7/site-packages/setuptools/command/upload.pyc new file mode 100644 index 0000000..fb2c94e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/command/upload.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/upload_docs.py b/project/venv/lib/python2.7/site-packages/setuptools/command/upload_docs.py new file mode 100644 index 0000000..07aa564 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/command/upload_docs.py @@ -0,0 +1,206 @@ +# -*- coding: utf-8 -*- +"""upload_docs + +Implements a Distutils 'upload_docs' subcommand (upload documentation to +PyPI's pythonhosted.org). +""" + +from base64 import standard_b64encode +from distutils import log +from distutils.errors import DistutilsOptionError +import os +import socket +import zipfile +import tempfile +import shutil +import itertools +import functools + +from setuptools.extern import six +from setuptools.extern.six.moves import http_client, urllib + +from pkg_resources import iter_entry_points +from .upload import upload + + +def _encode(s): + errors = 'surrogateescape' if six.PY3 else 'strict' + return s.encode('utf-8', errors) + + +class upload_docs(upload): + # override the default repository as upload_docs isn't + # supported by Warehouse (and won't be). + DEFAULT_REPOSITORY = 'https://pypi.python.org/pypi/' + + description = 'Upload documentation to PyPI' + + user_options = [ + ('repository=', 'r', + "url of repository [default: %s]" % upload.DEFAULT_REPOSITORY), + ('show-response', None, + 'display full response text from server'), + ('upload-dir=', None, 'directory to upload'), + ] + boolean_options = upload.boolean_options + + def has_sphinx(self): + if self.upload_dir is None: + for ep in iter_entry_points('distutils.commands', 'build_sphinx'): + return True + + sub_commands = [('build_sphinx', has_sphinx)] + + def initialize_options(self): + upload.initialize_options(self) + self.upload_dir = None + self.target_dir = None + + def finalize_options(self): + upload.finalize_options(self) + if self.upload_dir is None: + if self.has_sphinx(): + build_sphinx = self.get_finalized_command('build_sphinx') + self.target_dir = build_sphinx.builder_target_dir + else: + build = self.get_finalized_command('build') + self.target_dir = os.path.join(build.build_base, 'docs') + else: + self.ensure_dirname('upload_dir') + self.target_dir = self.upload_dir + if 'pypi.python.org' in self.repository: + log.warn("Upload_docs command is deprecated. Use RTD instead.") + self.announce('Using upload directory %s' % self.target_dir) + + def create_zipfile(self, filename): + zip_file = zipfile.ZipFile(filename, "w") + try: + self.mkpath(self.target_dir) # just in case + for root, dirs, files in os.walk(self.target_dir): + if root == self.target_dir and not files: + tmpl = "no files found in upload directory '%s'" + raise DistutilsOptionError(tmpl % self.target_dir) + for name in files: + full = os.path.join(root, name) + relative = root[len(self.target_dir):].lstrip(os.path.sep) + dest = os.path.join(relative, name) + zip_file.write(full, dest) + finally: + zip_file.close() + + def run(self): + # Run sub commands + for cmd_name in self.get_sub_commands(): + self.run_command(cmd_name) + + tmp_dir = tempfile.mkdtemp() + name = self.distribution.metadata.get_name() + zip_file = os.path.join(tmp_dir, "%s.zip" % name) + try: + self.create_zipfile(zip_file) + self.upload_file(zip_file) + finally: + shutil.rmtree(tmp_dir) + + @staticmethod + def _build_part(item, sep_boundary): + key, values = item + title = '\nContent-Disposition: form-data; name="%s"' % key + # handle multiple entries for the same name + if not isinstance(values, list): + values = [values] + for value in values: + if isinstance(value, tuple): + title += '; filename="%s"' % value[0] + value = value[1] + else: + value = _encode(value) + yield sep_boundary + yield _encode(title) + yield b"\n\n" + yield value + if value and value[-1:] == b'\r': + yield b'\n' # write an extra newline (lurve Macs) + + @classmethod + def _build_multipart(cls, data): + """ + Build up the MIME payload for the POST data + """ + boundary = b'--------------GHSKFJDLGDS7543FJKLFHRE75642756743254' + sep_boundary = b'\n--' + boundary + end_boundary = sep_boundary + b'--' + end_items = end_boundary, b"\n", + builder = functools.partial( + cls._build_part, + sep_boundary=sep_boundary, + ) + part_groups = map(builder, data.items()) + parts = itertools.chain.from_iterable(part_groups) + body_items = itertools.chain(parts, end_items) + content_type = 'multipart/form-data; boundary=%s' % boundary.decode('ascii') + return b''.join(body_items), content_type + + def upload_file(self, filename): + with open(filename, 'rb') as f: + content = f.read() + meta = self.distribution.metadata + data = { + ':action': 'doc_upload', + 'name': meta.get_name(), + 'content': (os.path.basename(filename), content), + } + # set up the authentication + credentials = _encode(self.username + ':' + self.password) + credentials = standard_b64encode(credentials) + if six.PY3: + credentials = credentials.decode('ascii') + auth = "Basic " + credentials + + body, ct = self._build_multipart(data) + + msg = "Submitting documentation to %s" % (self.repository) + self.announce(msg, log.INFO) + + # build the Request + # We can't use urllib2 since we need to send the Basic + # auth right with the first request + schema, netloc, url, params, query, fragments = \ + urllib.parse.urlparse(self.repository) + assert not params and not query and not fragments + if schema == 'http': + conn = http_client.HTTPConnection(netloc) + elif schema == 'https': + conn = http_client.HTTPSConnection(netloc) + else: + raise AssertionError("unsupported schema " + schema) + + data = '' + try: + conn.connect() + conn.putrequest("POST", url) + content_type = ct + conn.putheader('Content-type', content_type) + conn.putheader('Content-length', str(len(body))) + conn.putheader('Authorization', auth) + conn.endheaders() + conn.send(body) + except socket.error as e: + self.announce(str(e), log.ERROR) + return + + r = conn.getresponse() + if r.status == 200: + msg = 'Server response (%s): %s' % (r.status, r.reason) + self.announce(msg, log.INFO) + elif r.status == 301: + location = r.getheader('Location') + if location is None: + location = 'https://pythonhosted.org/%s/' % meta.get_name() + msg = 'Upload successful. Visit %s' % location + self.announce(msg, log.INFO) + else: + msg = 'Upload failed (%s): %s' % (r.status, r.reason) + self.announce(msg, log.ERROR) + if self.show_response: + print('-' * 75, r.read(), '-' * 75) diff --git a/project/venv/lib/python2.7/site-packages/setuptools/command/upload_docs.pyc b/project/venv/lib/python2.7/site-packages/setuptools/command/upload_docs.pyc new file mode 100644 index 0000000..ce2a459 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/command/upload_docs.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/config.py b/project/venv/lib/python2.7/site-packages/setuptools/config.py new file mode 100644 index 0000000..b662604 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/config.py @@ -0,0 +1,656 @@ +from __future__ import absolute_import, unicode_literals +import io +import os +import sys + +import warnings +import functools +from collections import defaultdict +from functools import partial +from functools import wraps +from importlib import import_module + +from distutils.errors import DistutilsOptionError, DistutilsFileError +from setuptools.extern.packaging.version import LegacyVersion, parse +from setuptools.extern.six import string_types, PY3 + + +__metaclass__ = type + + +def read_configuration( + filepath, find_others=False, ignore_option_errors=False): + """Read given configuration file and returns options from it as a dict. + + :param str|unicode filepath: Path to configuration file + to get options from. + + :param bool find_others: Whether to search for other configuration files + which could be on in various places. + + :param bool ignore_option_errors: Whether to silently ignore + options, values of which could not be resolved (e.g. due to exceptions + in directives such as file:, attr:, etc.). + If False exceptions are propagated as expected. + + :rtype: dict + """ + from setuptools.dist import Distribution, _Distribution + + filepath = os.path.abspath(filepath) + + if not os.path.isfile(filepath): + raise DistutilsFileError( + 'Configuration file %s does not exist.' % filepath) + + current_directory = os.getcwd() + os.chdir(os.path.dirname(filepath)) + + try: + dist = Distribution() + + filenames = dist.find_config_files() if find_others else [] + if filepath not in filenames: + filenames.append(filepath) + + _Distribution.parse_config_files(dist, filenames=filenames) + + handlers = parse_configuration( + dist, dist.command_options, + ignore_option_errors=ignore_option_errors) + + finally: + os.chdir(current_directory) + + return configuration_to_dict(handlers) + + +def _get_option(target_obj, key): + """ + Given a target object and option key, get that option from + the target object, either through a get_{key} method or + from an attribute directly. + """ + getter_name = 'get_{key}'.format(**locals()) + by_attribute = functools.partial(getattr, target_obj, key) + getter = getattr(target_obj, getter_name, by_attribute) + return getter() + + +def configuration_to_dict(handlers): + """Returns configuration data gathered by given handlers as a dict. + + :param list[ConfigHandler] handlers: Handlers list, + usually from parse_configuration() + + :rtype: dict + """ + config_dict = defaultdict(dict) + + for handler in handlers: + for option in handler.set_options: + value = _get_option(handler.target_obj, option) + config_dict[handler.section_prefix][option] = value + + return config_dict + + +def parse_configuration( + distribution, command_options, ignore_option_errors=False): + """Performs additional parsing of configuration options + for a distribution. + + Returns a list of used option handlers. + + :param Distribution distribution: + :param dict command_options: + :param bool ignore_option_errors: Whether to silently ignore + options, values of which could not be resolved (e.g. due to exceptions + in directives such as file:, attr:, etc.). + If False exceptions are propagated as expected. + :rtype: list + """ + options = ConfigOptionsHandler( + distribution, command_options, ignore_option_errors) + options.parse() + + meta = ConfigMetadataHandler( + distribution.metadata, command_options, ignore_option_errors, + distribution.package_dir) + meta.parse() + + return meta, options + + +class ConfigHandler: + """Handles metadata supplied in configuration files.""" + + section_prefix = None + """Prefix for config sections handled by this handler. + Must be provided by class heirs. + + """ + + aliases = {} + """Options aliases. + For compatibility with various packages. E.g.: d2to1 and pbr. + Note: `-` in keys is replaced with `_` by config parser. + + """ + + def __init__(self, target_obj, options, ignore_option_errors=False): + sections = {} + + section_prefix = self.section_prefix + for section_name, section_options in options.items(): + if not section_name.startswith(section_prefix): + continue + + section_name = section_name.replace(section_prefix, '').strip('.') + sections[section_name] = section_options + + self.ignore_option_errors = ignore_option_errors + self.target_obj = target_obj + self.sections = sections + self.set_options = [] + + @property + def parsers(self): + """Metadata item name to parser function mapping.""" + raise NotImplementedError( + '%s must provide .parsers property' % self.__class__.__name__) + + def __setitem__(self, option_name, value): + unknown = tuple() + target_obj = self.target_obj + + # Translate alias into real name. + option_name = self.aliases.get(option_name, option_name) + + current_value = getattr(target_obj, option_name, unknown) + + if current_value is unknown: + raise KeyError(option_name) + + if current_value: + # Already inhabited. Skipping. + return + + skip_option = False + parser = self.parsers.get(option_name) + if parser: + try: + value = parser(value) + + except Exception: + skip_option = True + if not self.ignore_option_errors: + raise + + if skip_option: + return + + setter = getattr(target_obj, 'set_%s' % option_name, None) + if setter is None: + setattr(target_obj, option_name, value) + else: + setter(value) + + self.set_options.append(option_name) + + @classmethod + def _parse_list(cls, value, separator=','): + """Represents value as a list. + + Value is split either by separator (defaults to comma) or by lines. + + :param value: + :param separator: List items separator character. + :rtype: list + """ + if isinstance(value, list): # _get_parser_compound case + return value + + if '\n' in value: + value = value.splitlines() + else: + value = value.split(separator) + + return [chunk.strip() for chunk in value if chunk.strip()] + + @classmethod + def _parse_dict(cls, value): + """Represents value as a dict. + + :param value: + :rtype: dict + """ + separator = '=' + result = {} + for line in cls._parse_list(value): + key, sep, val = line.partition(separator) + if sep != separator: + raise DistutilsOptionError( + 'Unable to parse option value to dict: %s' % value) + result[key.strip()] = val.strip() + + return result + + @classmethod + def _parse_bool(cls, value): + """Represents value as boolean. + + :param value: + :rtype: bool + """ + value = value.lower() + return value in ('1', 'true', 'yes') + + @classmethod + def _exclude_files_parser(cls, key): + """Returns a parser function to make sure field inputs + are not files. + + Parses a value after getting the key so error messages are + more informative. + + :param key: + :rtype: callable + """ + def parser(value): + exclude_directive = 'file:' + if value.startswith(exclude_directive): + raise ValueError( + 'Only strings are accepted for the {0} field, ' + 'files are not accepted'.format(key)) + return value + return parser + + @classmethod + def _parse_file(cls, value): + """Represents value as a string, allowing including text + from nearest files using `file:` directive. + + Directive is sandboxed and won't reach anything outside + directory with setup.py. + + Examples: + file: README.rst, CHANGELOG.md, src/file.txt + + :param str value: + :rtype: str + """ + include_directive = 'file:' + + if not isinstance(value, string_types): + return value + + if not value.startswith(include_directive): + return value + + spec = value[len(include_directive):] + filepaths = (os.path.abspath(path.strip()) for path in spec.split(',')) + return '\n'.join( + cls._read_file(path) + for path in filepaths + if (cls._assert_local(path) or True) + and os.path.isfile(path) + ) + + @staticmethod + def _assert_local(filepath): + if not filepath.startswith(os.getcwd()): + raise DistutilsOptionError( + '`file:` directive can not access %s' % filepath) + + @staticmethod + def _read_file(filepath): + with io.open(filepath, encoding='utf-8') as f: + return f.read() + + @classmethod + def _parse_attr(cls, value, package_dir=None): + """Represents value as a module attribute. + + Examples: + attr: package.attr + attr: package.module.attr + + :param str value: + :rtype: str + """ + attr_directive = 'attr:' + if not value.startswith(attr_directive): + return value + + attrs_path = value.replace(attr_directive, '').strip().split('.') + attr_name = attrs_path.pop() + + module_name = '.'.join(attrs_path) + module_name = module_name or '__init__' + + parent_path = os.getcwd() + if package_dir: + if attrs_path[0] in package_dir: + # A custom path was specified for the module we want to import + custom_path = package_dir[attrs_path[0]] + parts = custom_path.rsplit('/', 1) + if len(parts) > 1: + parent_path = os.path.join(os.getcwd(), parts[0]) + module_name = parts[1] + else: + module_name = custom_path + elif '' in package_dir: + # A custom parent directory was specified for all root modules + parent_path = os.path.join(os.getcwd(), package_dir['']) + sys.path.insert(0, parent_path) + try: + module = import_module(module_name) + value = getattr(module, attr_name) + + finally: + sys.path = sys.path[1:] + + return value + + @classmethod + def _get_parser_compound(cls, *parse_methods): + """Returns parser function to represents value as a list. + + Parses a value applying given methods one after another. + + :param parse_methods: + :rtype: callable + """ + def parse(value): + parsed = value + + for method in parse_methods: + parsed = method(parsed) + + return parsed + + return parse + + @classmethod + def _parse_section_to_dict(cls, section_options, values_parser=None): + """Parses section options into a dictionary. + + Optionally applies a given parser to values. + + :param dict section_options: + :param callable values_parser: + :rtype: dict + """ + value = {} + values_parser = values_parser or (lambda val: val) + for key, (_, val) in section_options.items(): + value[key] = values_parser(val) + return value + + def parse_section(self, section_options): + """Parses configuration file section. + + :param dict section_options: + """ + for (name, (_, value)) in section_options.items(): + try: + self[name] = value + + except KeyError: + pass # Keep silent for a new option may appear anytime. + + def parse(self): + """Parses configuration file items from one + or more related sections. + + """ + for section_name, section_options in self.sections.items(): + + method_postfix = '' + if section_name: # [section.option] variant + method_postfix = '_%s' % section_name + + section_parser_method = getattr( + self, + # Dots in section names are translated into dunderscores. + ('parse_section%s' % method_postfix).replace('.', '__'), + None) + + if section_parser_method is None: + raise DistutilsOptionError( + 'Unsupported distribution option section: [%s.%s]' % ( + self.section_prefix, section_name)) + + section_parser_method(section_options) + + def _deprecated_config_handler(self, func, msg, warning_class): + """ this function will wrap around parameters that are deprecated + + :param msg: deprecation message + :param warning_class: class of warning exception to be raised + :param func: function to be wrapped around + """ + @wraps(func) + def config_handler(*args, **kwargs): + warnings.warn(msg, warning_class) + return func(*args, **kwargs) + + return config_handler + + +class ConfigMetadataHandler(ConfigHandler): + + section_prefix = 'metadata' + + aliases = { + 'home_page': 'url', + 'summary': 'description', + 'classifier': 'classifiers', + 'platform': 'platforms', + } + + strict_mode = False + """We need to keep it loose, to be partially compatible with + `pbr` and `d2to1` packages which also uses `metadata` section. + + """ + + def __init__(self, target_obj, options, ignore_option_errors=False, + package_dir=None): + super(ConfigMetadataHandler, self).__init__(target_obj, options, + ignore_option_errors) + self.package_dir = package_dir + + @property + def parsers(self): + """Metadata item name to parser function mapping.""" + parse_list = self._parse_list + parse_file = self._parse_file + parse_dict = self._parse_dict + exclude_files_parser = self._exclude_files_parser + + return { + 'platforms': parse_list, + 'keywords': parse_list, + 'provides': parse_list, + 'requires': self._deprecated_config_handler( + parse_list, + "The requires parameter is deprecated, please use " + "install_requires for runtime dependencies.", + DeprecationWarning), + 'obsoletes': parse_list, + 'classifiers': self._get_parser_compound(parse_file, parse_list), + 'license': exclude_files_parser('license'), + 'description': parse_file, + 'long_description': parse_file, + 'version': self._parse_version, + 'project_urls': parse_dict, + } + + def _parse_version(self, value): + """Parses `version` option value. + + :param value: + :rtype: str + + """ + version = self._parse_file(value) + + if version != value: + version = version.strip() + # Be strict about versions loaded from file because it's easy to + # accidentally include newlines and other unintended content + if isinstance(parse(version), LegacyVersion): + tmpl = ( + 'Version loaded from {value} does not ' + 'comply with PEP 440: {version}' + ) + raise DistutilsOptionError(tmpl.format(**locals())) + + return version + + version = self._parse_attr(value, self.package_dir) + + if callable(version): + version = version() + + if not isinstance(version, string_types): + if hasattr(version, '__iter__'): + version = '.'.join(map(str, version)) + else: + version = '%s' % version + + return version + + +class ConfigOptionsHandler(ConfigHandler): + + section_prefix = 'options' + + @property + def parsers(self): + """Metadata item name to parser function mapping.""" + parse_list = self._parse_list + parse_list_semicolon = partial(self._parse_list, separator=';') + parse_bool = self._parse_bool + parse_dict = self._parse_dict + + return { + 'zip_safe': parse_bool, + 'use_2to3': parse_bool, + 'include_package_data': parse_bool, + 'package_dir': parse_dict, + 'use_2to3_fixers': parse_list, + 'use_2to3_exclude_fixers': parse_list, + 'convert_2to3_doctests': parse_list, + 'scripts': parse_list, + 'eager_resources': parse_list, + 'dependency_links': parse_list, + 'namespace_packages': parse_list, + 'install_requires': parse_list_semicolon, + 'setup_requires': parse_list_semicolon, + 'tests_require': parse_list_semicolon, + 'packages': self._parse_packages, + 'entry_points': self._parse_file, + 'py_modules': parse_list, + } + + def _parse_packages(self, value): + """Parses `packages` option value. + + :param value: + :rtype: list + """ + find_directives = ['find:', 'find_namespace:'] + trimmed_value = value.strip() + + if trimmed_value not in find_directives: + return self._parse_list(value) + + findns = trimmed_value == find_directives[1] + if findns and not PY3: + raise DistutilsOptionError( + 'find_namespace: directive is unsupported on Python < 3.3') + + # Read function arguments from a dedicated section. + find_kwargs = self.parse_section_packages__find( + self.sections.get('packages.find', {})) + + if findns: + from setuptools import find_namespace_packages as find_packages + else: + from setuptools import find_packages + + return find_packages(**find_kwargs) + + def parse_section_packages__find(self, section_options): + """Parses `packages.find` configuration file section. + + To be used in conjunction with _parse_packages(). + + :param dict section_options: + """ + section_data = self._parse_section_to_dict( + section_options, self._parse_list) + + valid_keys = ['where', 'include', 'exclude'] + + find_kwargs = dict( + [(k, v) for k, v in section_data.items() if k in valid_keys and v]) + + where = find_kwargs.get('where') + if where is not None: + find_kwargs['where'] = where[0] # cast list to single val + + return find_kwargs + + def parse_section_entry_points(self, section_options): + """Parses `entry_points` configuration file section. + + :param dict section_options: + """ + parsed = self._parse_section_to_dict(section_options, self._parse_list) + self['entry_points'] = parsed + + def _parse_package_data(self, section_options): + parsed = self._parse_section_to_dict(section_options, self._parse_list) + + root = parsed.get('*') + if root: + parsed[''] = root + del parsed['*'] + + return parsed + + def parse_section_package_data(self, section_options): + """Parses `package_data` configuration file section. + + :param dict section_options: + """ + self['package_data'] = self._parse_package_data(section_options) + + def parse_section_exclude_package_data(self, section_options): + """Parses `exclude_package_data` configuration file section. + + :param dict section_options: + """ + self['exclude_package_data'] = self._parse_package_data( + section_options) + + def parse_section_extras_require(self, section_options): + """Parses `extras_require` configuration file section. + + :param dict section_options: + """ + parse_list = partial(self._parse_list, separator=';') + self['extras_require'] = self._parse_section_to_dict( + section_options, parse_list) + + def parse_section_data_files(self, section_options): + """Parses `data_files` configuration file section. + + :param dict section_options: + """ + parsed = self._parse_section_to_dict(section_options, self._parse_list) + self['data_files'] = [(k, v) for k, v in parsed.items()] diff --git a/project/venv/lib/python2.7/site-packages/setuptools/config.pyc b/project/venv/lib/python2.7/site-packages/setuptools/config.pyc new file mode 100644 index 0000000..c7ae30e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/config.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/dep_util.py b/project/venv/lib/python2.7/site-packages/setuptools/dep_util.py new file mode 100644 index 0000000..2931c13 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/dep_util.py @@ -0,0 +1,23 @@ +from distutils.dep_util import newer_group + +# yes, this is was almost entirely copy-pasted from +# 'newer_pairwise()', this is just another convenience +# function. +def newer_pairwise_group(sources_groups, targets): + """Walk both arguments in parallel, testing if each source group is newer + than its corresponding target. Returns a pair of lists (sources_groups, + targets) where sources is newer than target, according to the semantics + of 'newer_group()'. + """ + if len(sources_groups) != len(targets): + raise ValueError("'sources_group' and 'targets' must be the same length") + + # build a pair of lists (sources_groups, targets) where source is newer + n_sources = [] + n_targets = [] + for i in range(len(sources_groups)): + if newer_group(sources_groups[i], targets[i]): + n_sources.append(sources_groups[i]) + n_targets.append(targets[i]) + + return n_sources, n_targets diff --git a/project/venv/lib/python2.7/site-packages/setuptools/dep_util.pyc b/project/venv/lib/python2.7/site-packages/setuptools/dep_util.pyc new file mode 100644 index 0000000..4d91d08 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/dep_util.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/depends.py b/project/venv/lib/python2.7/site-packages/setuptools/depends.py new file mode 100644 index 0000000..45e7052 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/depends.py @@ -0,0 +1,186 @@ +import sys +import imp +import marshal +from distutils.version import StrictVersion +from imp import PKG_DIRECTORY, PY_COMPILED, PY_SOURCE, PY_FROZEN + +from .py33compat import Bytecode + + +__all__ = [ + 'Require', 'find_module', 'get_module_constant', 'extract_constant' +] + + +class Require: + """A prerequisite to building or installing a distribution""" + + def __init__(self, name, requested_version, module, homepage='', + attribute=None, format=None): + + if format is None and requested_version is not None: + format = StrictVersion + + if format is not None: + requested_version = format(requested_version) + if attribute is None: + attribute = '__version__' + + self.__dict__.update(locals()) + del self.self + + def full_name(self): + """Return full package/distribution name, w/version""" + if self.requested_version is not None: + return '%s-%s' % (self.name, self.requested_version) + return self.name + + def version_ok(self, version): + """Is 'version' sufficiently up-to-date?""" + return self.attribute is None or self.format is None or \ + str(version) != "unknown" and version >= self.requested_version + + def get_version(self, paths=None, default="unknown"): + """Get version number of installed module, 'None', or 'default' + + Search 'paths' for module. If not found, return 'None'. If found, + return the extracted version attribute, or 'default' if no version + attribute was specified, or the value cannot be determined without + importing the module. The version is formatted according to the + requirement's version format (if any), unless it is 'None' or the + supplied 'default'. + """ + + if self.attribute is None: + try: + f, p, i = find_module(self.module, paths) + if f: + f.close() + return default + except ImportError: + return None + + v = get_module_constant(self.module, self.attribute, default, paths) + + if v is not None and v is not default and self.format is not None: + return self.format(v) + + return v + + def is_present(self, paths=None): + """Return true if dependency is present on 'paths'""" + return self.get_version(paths) is not None + + def is_current(self, paths=None): + """Return true if dependency is present and up-to-date on 'paths'""" + version = self.get_version(paths) + if version is None: + return False + return self.version_ok(version) + + +def find_module(module, paths=None): + """Just like 'imp.find_module()', but with package support""" + + parts = module.split('.') + + while parts: + part = parts.pop(0) + f, path, (suffix, mode, kind) = info = imp.find_module(part, paths) + + if kind == PKG_DIRECTORY: + parts = parts or ['__init__'] + paths = [path] + + elif parts: + raise ImportError("Can't find %r in %s" % (parts, module)) + + return info + + +def get_module_constant(module, symbol, default=-1, paths=None): + """Find 'module' by searching 'paths', and extract 'symbol' + + Return 'None' if 'module' does not exist on 'paths', or it does not define + 'symbol'. If the module defines 'symbol' as a constant, return the + constant. Otherwise, return 'default'.""" + + try: + f, path, (suffix, mode, kind) = find_module(module, paths) + except ImportError: + # Module doesn't exist + return None + + try: + if kind == PY_COMPILED: + f.read(8) # skip magic & date + code = marshal.load(f) + elif kind == PY_FROZEN: + code = imp.get_frozen_object(module) + elif kind == PY_SOURCE: + code = compile(f.read(), path, 'exec') + else: + # Not something we can parse; we'll have to import it. :( + if module not in sys.modules: + imp.load_module(module, f, path, (suffix, mode, kind)) + return getattr(sys.modules[module], symbol, None) + + finally: + if f: + f.close() + + return extract_constant(code, symbol, default) + + +def extract_constant(code, symbol, default=-1): + """Extract the constant value of 'symbol' from 'code' + + If the name 'symbol' is bound to a constant value by the Python code + object 'code', return that value. If 'symbol' is bound to an expression, + return 'default'. Otherwise, return 'None'. + + Return value is based on the first assignment to 'symbol'. 'symbol' must + be a global, or at least a non-"fast" local in the code block. That is, + only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol' + must be present in 'code.co_names'. + """ + if symbol not in code.co_names: + # name's not there, can't possibly be an assignment + return None + + name_idx = list(code.co_names).index(symbol) + + STORE_NAME = 90 + STORE_GLOBAL = 97 + LOAD_CONST = 100 + + const = default + + for byte_code in Bytecode(code): + op = byte_code.opcode + arg = byte_code.arg + + if op == LOAD_CONST: + const = code.co_consts[arg] + elif arg == name_idx and (op == STORE_NAME or op == STORE_GLOBAL): + return const + else: + const = default + + +def _update_globals(): + """ + Patch the globals to remove the objects not available on some platforms. + + XXX it'd be better to test assertions about bytecode instead. + """ + + if not sys.platform.startswith('java') and sys.platform != 'cli': + return + incompatible = 'extract_constant', 'get_module_constant' + for name in incompatible: + del globals()[name] + __all__.remove(name) + + +_update_globals() diff --git a/project/venv/lib/python2.7/site-packages/setuptools/depends.pyc b/project/venv/lib/python2.7/site-packages/setuptools/depends.pyc new file mode 100644 index 0000000..f4fb5ed Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/depends.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/dist.py b/project/venv/lib/python2.7/site-packages/setuptools/dist.py new file mode 100644 index 0000000..9a165de --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/dist.py @@ -0,0 +1,1278 @@ +# -*- coding: utf-8 -*- +__all__ = ['Distribution'] + +import io +import sys +import re +import os +import warnings +import numbers +import distutils.log +import distutils.core +import distutils.cmd +import distutils.dist +from distutils.util import strtobool +from distutils.debug import DEBUG +from distutils.fancy_getopt import translate_longopt +import itertools + +from collections import defaultdict +from email import message_from_file + +from distutils.errors import ( + DistutilsOptionError, DistutilsPlatformError, DistutilsSetupError, +) +from distutils.util import rfc822_escape +from distutils.version import StrictVersion + +from setuptools.extern import six +from setuptools.extern import packaging +from setuptools.extern.six.moves import map, filter, filterfalse + +from . import SetuptoolsDeprecationWarning + +from setuptools.depends import Require +from setuptools import windows_support +from setuptools.monkey import get_unpatched +from setuptools.config import parse_configuration +import pkg_resources + +__import__('setuptools.extern.packaging.specifiers') +__import__('setuptools.extern.packaging.version') + + +def _get_unpatched(cls): + warnings.warn("Do not call this function", DistDeprecationWarning) + return get_unpatched(cls) + + +def get_metadata_version(self): + mv = getattr(self, 'metadata_version', None) + + if mv is None: + if self.long_description_content_type or self.provides_extras: + mv = StrictVersion('2.1') + elif (self.maintainer is not None or + self.maintainer_email is not None or + getattr(self, 'python_requires', None) is not None): + mv = StrictVersion('1.2') + elif (self.provides or self.requires or self.obsoletes or + self.classifiers or self.download_url): + mv = StrictVersion('1.1') + else: + mv = StrictVersion('1.0') + + self.metadata_version = mv + + return mv + + +def read_pkg_file(self, file): + """Reads the metadata values from a file object.""" + msg = message_from_file(file) + + def _read_field(name): + value = msg[name] + if value == 'UNKNOWN': + return None + return value + + def _read_list(name): + values = msg.get_all(name, None) + if values == []: + return None + return values + + self.metadata_version = StrictVersion(msg['metadata-version']) + self.name = _read_field('name') + self.version = _read_field('version') + self.description = _read_field('summary') + # we are filling author only. + self.author = _read_field('author') + self.maintainer = None + self.author_email = _read_field('author-email') + self.maintainer_email = None + self.url = _read_field('home-page') + self.license = _read_field('license') + + if 'download-url' in msg: + self.download_url = _read_field('download-url') + else: + self.download_url = None + + self.long_description = _read_field('description') + self.description = _read_field('summary') + + if 'keywords' in msg: + self.keywords = _read_field('keywords').split(',') + + self.platforms = _read_list('platform') + self.classifiers = _read_list('classifier') + + # PEP 314 - these fields only exist in 1.1 + if self.metadata_version == StrictVersion('1.1'): + self.requires = _read_list('requires') + self.provides = _read_list('provides') + self.obsoletes = _read_list('obsoletes') + else: + self.requires = None + self.provides = None + self.obsoletes = None + + +# Based on Python 3.5 version +def write_pkg_file(self, file): + """Write the PKG-INFO format data to a file object. + """ + version = self.get_metadata_version() + + if six.PY2: + def write_field(key, value): + file.write("%s: %s\n" % (key, self._encode_field(value))) + else: + def write_field(key, value): + file.write("%s: %s\n" % (key, value)) + + write_field('Metadata-Version', str(version)) + write_field('Name', self.get_name()) + write_field('Version', self.get_version()) + write_field('Summary', self.get_description()) + write_field('Home-page', self.get_url()) + + if version < StrictVersion('1.2'): + write_field('Author', self.get_contact()) + write_field('Author-email', self.get_contact_email()) + else: + optional_fields = ( + ('Author', 'author'), + ('Author-email', 'author_email'), + ('Maintainer', 'maintainer'), + ('Maintainer-email', 'maintainer_email'), + ) + + for field, attr in optional_fields: + attr_val = getattr(self, attr) + + if attr_val is not None: + write_field(field, attr_val) + + write_field('License', self.get_license()) + if self.download_url: + write_field('Download-URL', self.download_url) + for project_url in self.project_urls.items(): + write_field('Project-URL', '%s, %s' % project_url) + + long_desc = rfc822_escape(self.get_long_description()) + write_field('Description', long_desc) + + keywords = ','.join(self.get_keywords()) + if keywords: + write_field('Keywords', keywords) + + if version >= StrictVersion('1.2'): + for platform in self.get_platforms(): + write_field('Platform', platform) + else: + self._write_list(file, 'Platform', self.get_platforms()) + + self._write_list(file, 'Classifier', self.get_classifiers()) + + # PEP 314 + self._write_list(file, 'Requires', self.get_requires()) + self._write_list(file, 'Provides', self.get_provides()) + self._write_list(file, 'Obsoletes', self.get_obsoletes()) + + # Setuptools specific for PEP 345 + if hasattr(self, 'python_requires'): + write_field('Requires-Python', self.python_requires) + + # PEP 566 + if self.long_description_content_type: + write_field( + 'Description-Content-Type', + self.long_description_content_type + ) + if self.provides_extras: + for extra in self.provides_extras: + write_field('Provides-Extra', extra) + + +sequence = tuple, list + + +def check_importable(dist, attr, value): + try: + ep = pkg_resources.EntryPoint.parse('x=' + value) + assert not ep.extras + except (TypeError, ValueError, AttributeError, AssertionError): + raise DistutilsSetupError( + "%r must be importable 'module:attrs' string (got %r)" + % (attr, value) + ) + + +def assert_string_list(dist, attr, value): + """Verify that value is a string list or None""" + try: + assert ''.join(value) != value + except (TypeError, ValueError, AttributeError, AssertionError): + raise DistutilsSetupError( + "%r must be a list of strings (got %r)" % (attr, value) + ) + + +def check_nsp(dist, attr, value): + """Verify that namespace packages are valid""" + ns_packages = value + assert_string_list(dist, attr, ns_packages) + for nsp in ns_packages: + if not dist.has_contents_for(nsp): + raise DistutilsSetupError( + "Distribution contains no modules or packages for " + + "namespace package %r" % nsp + ) + parent, sep, child = nsp.rpartition('.') + if parent and parent not in ns_packages: + distutils.log.warn( + "WARNING: %r is declared as a package namespace, but %r" + " is not: please correct this in setup.py", nsp, parent + ) + + +def check_extras(dist, attr, value): + """Verify that extras_require mapping is valid""" + try: + list(itertools.starmap(_check_extra, value.items())) + except (TypeError, ValueError, AttributeError): + raise DistutilsSetupError( + "'extras_require' must be a dictionary whose values are " + "strings or lists of strings containing valid project/version " + "requirement specifiers." + ) + + +def _check_extra(extra, reqs): + name, sep, marker = extra.partition(':') + if marker and pkg_resources.invalid_marker(marker): + raise DistutilsSetupError("Invalid environment marker: " + marker) + list(pkg_resources.parse_requirements(reqs)) + + +def assert_bool(dist, attr, value): + """Verify that value is True, False, 0, or 1""" + if bool(value) != value: + tmpl = "{attr!r} must be a boolean value (got {value!r})" + raise DistutilsSetupError(tmpl.format(attr=attr, value=value)) + + +def check_requirements(dist, attr, value): + """Verify that install_requires is a valid requirements list""" + try: + list(pkg_resources.parse_requirements(value)) + if isinstance(value, (dict, set)): + raise TypeError("Unordered types are not allowed") + except (TypeError, ValueError) as error: + tmpl = ( + "{attr!r} must be a string or list of strings " + "containing valid project/version requirement specifiers; {error}" + ) + raise DistutilsSetupError(tmpl.format(attr=attr, error=error)) + + +def check_specifier(dist, attr, value): + """Verify that value is a valid version specifier""" + try: + packaging.specifiers.SpecifierSet(value) + except packaging.specifiers.InvalidSpecifier as error: + tmpl = ( + "{attr!r} must be a string " + "containing valid version specifiers; {error}" + ) + raise DistutilsSetupError(tmpl.format(attr=attr, error=error)) + + +def check_entry_points(dist, attr, value): + """Verify that entry_points map is parseable""" + try: + pkg_resources.EntryPoint.parse_map(value) + except ValueError as e: + raise DistutilsSetupError(e) + + +def check_test_suite(dist, attr, value): + if not isinstance(value, six.string_types): + raise DistutilsSetupError("test_suite must be a string") + + +def check_package_data(dist, attr, value): + """Verify that value is a dictionary of package names to glob lists""" + if isinstance(value, dict): + for k, v in value.items(): + if not isinstance(k, str): + break + try: + iter(v) + except TypeError: + break + else: + return + raise DistutilsSetupError( + attr + " must be a dictionary mapping package names to lists of " + "wildcard patterns" + ) + + +def check_packages(dist, attr, value): + for pkgname in value: + if not re.match(r'\w+(\.\w+)*', pkgname): + distutils.log.warn( + "WARNING: %r not a valid package name; please use only " + ".-separated package names in setup.py", pkgname + ) + + +_Distribution = get_unpatched(distutils.core.Distribution) + + +class Distribution(_Distribution): + """Distribution with support for features, tests, and package data + + This is an enhanced version of 'distutils.dist.Distribution' that + effectively adds the following new optional keyword arguments to 'setup()': + + 'install_requires' -- a string or sequence of strings specifying project + versions that the distribution requires when installed, in the format + used by 'pkg_resources.require()'. They will be installed + automatically when the package is installed. If you wish to use + packages that are not available in PyPI, or want to give your users an + alternate download location, you can add a 'find_links' option to the + '[easy_install]' section of your project's 'setup.cfg' file, and then + setuptools will scan the listed web pages for links that satisfy the + requirements. + + 'extras_require' -- a dictionary mapping names of optional "extras" to the + additional requirement(s) that using those extras incurs. For example, + this:: + + extras_require = dict(reST = ["docutils>=0.3", "reSTedit"]) + + indicates that the distribution can optionally provide an extra + capability called "reST", but it can only be used if docutils and + reSTedit are installed. If the user installs your package using + EasyInstall and requests one of your extras, the corresponding + additional requirements will be installed if needed. + + 'features' **deprecated** -- a dictionary mapping option names to + 'setuptools.Feature' + objects. Features are a portion of the distribution that can be + included or excluded based on user options, inter-feature dependencies, + and availability on the current system. Excluded features are omitted + from all setup commands, including source and binary distributions, so + you can create multiple distributions from the same source tree. + Feature names should be valid Python identifiers, except that they may + contain the '-' (minus) sign. Features can be included or excluded + via the command line options '--with-X' and '--without-X', where 'X' is + the name of the feature. Whether a feature is included by default, and + whether you are allowed to control this from the command line, is + determined by the Feature object. See the 'Feature' class for more + information. + + 'test_suite' -- the name of a test suite to run for the 'test' command. + If the user runs 'python setup.py test', the package will be installed, + and the named test suite will be run. The format is the same as + would be used on a 'unittest.py' command line. That is, it is the + dotted name of an object to import and call to generate a test suite. + + 'package_data' -- a dictionary mapping package names to lists of filenames + or globs to use to find data files contained in the named packages. + If the dictionary has filenames or globs listed under '""' (the empty + string), those names will be searched for in every package, in addition + to any names for the specific package. Data files found using these + names/globs will be installed along with the package, in the same + location as the package. Note that globs are allowed to reference + the contents of non-package subdirectories, as long as you use '/' as + a path separator. (Globs are automatically converted to + platform-specific paths at runtime.) + + In addition to these new keywords, this class also has several new methods + for manipulating the distribution's contents. For example, the 'include()' + and 'exclude()' methods can be thought of as in-place add and subtract + commands that add or remove packages, modules, extensions, and so on from + the distribution. They are used by the feature subsystem to configure the + distribution for the included and excluded features. + """ + + _DISTUTILS_UNSUPPORTED_METADATA = { + 'long_description_content_type': None, + 'project_urls': dict, + 'provides_extras': set, + } + + _patched_dist = None + + def patch_missing_pkg_info(self, attrs): + # Fake up a replacement for the data that would normally come from + # PKG-INFO, but which might not yet be built if this is a fresh + # checkout. + # + if not attrs or 'name' not in attrs or 'version' not in attrs: + return + key = pkg_resources.safe_name(str(attrs['name'])).lower() + dist = pkg_resources.working_set.by_key.get(key) + if dist is not None and not dist.has_metadata('PKG-INFO'): + dist._version = pkg_resources.safe_version(str(attrs['version'])) + self._patched_dist = dist + + def __init__(self, attrs=None): + have_package_data = hasattr(self, "package_data") + if not have_package_data: + self.package_data = {} + attrs = attrs or {} + if 'features' in attrs or 'require_features' in attrs: + Feature.warn_deprecated() + self.require_features = [] + self.features = {} + self.dist_files = [] + # Filter-out setuptools' specific options. + self.src_root = attrs.pop("src_root", None) + self.patch_missing_pkg_info(attrs) + self.dependency_links = attrs.pop('dependency_links', []) + self.setup_requires = attrs.pop('setup_requires', []) + for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'): + vars(self).setdefault(ep.name, None) + _Distribution.__init__(self, { + k: v for k, v in attrs.items() + if k not in self._DISTUTILS_UNSUPPORTED_METADATA + }) + + # Fill-in missing metadata fields not supported by distutils. + # Note some fields may have been set by other tools (e.g. pbr) + # above; they are taken preferrentially to setup() arguments + for option, default in self._DISTUTILS_UNSUPPORTED_METADATA.items(): + for source in self.metadata.__dict__, attrs: + if option in source: + value = source[option] + break + else: + value = default() if default else None + setattr(self.metadata, option, value) + + if isinstance(self.metadata.version, numbers.Number): + # Some people apparently take "version number" too literally :) + self.metadata.version = str(self.metadata.version) + + if self.metadata.version is not None: + try: + ver = packaging.version.Version(self.metadata.version) + normalized_version = str(ver) + if self.metadata.version != normalized_version: + warnings.warn( + "Normalizing '%s' to '%s'" % ( + self.metadata.version, + normalized_version, + ) + ) + self.metadata.version = normalized_version + except (packaging.version.InvalidVersion, TypeError): + warnings.warn( + "The version specified (%r) is an invalid version, this " + "may not work as expected with newer versions of " + "setuptools, pip, and PyPI. Please see PEP 440 for more " + "details." % self.metadata.version + ) + self._finalize_requires() + + def _finalize_requires(self): + """ + Set `metadata.python_requires` and fix environment markers + in `install_requires` and `extras_require`. + """ + if getattr(self, 'python_requires', None): + self.metadata.python_requires = self.python_requires + + if getattr(self, 'extras_require', None): + for extra in self.extras_require.keys(): + # Since this gets called multiple times at points where the + # keys have become 'converted' extras, ensure that we are only + # truly adding extras we haven't seen before here. + extra = extra.split(':')[0] + if extra: + self.metadata.provides_extras.add(extra) + + self._convert_extras_requirements() + self._move_install_requirements_markers() + + def _convert_extras_requirements(self): + """ + Convert requirements in `extras_require` of the form + `"extra": ["barbazquux; {marker}"]` to + `"extra:{marker}": ["barbazquux"]`. + """ + spec_ext_reqs = getattr(self, 'extras_require', None) or {} + self._tmp_extras_require = defaultdict(list) + for section, v in spec_ext_reqs.items(): + # Do not strip empty sections. + self._tmp_extras_require[section] + for r in pkg_resources.parse_requirements(v): + suffix = self._suffix_for(r) + self._tmp_extras_require[section + suffix].append(r) + + @staticmethod + def _suffix_for(req): + """ + For a requirement, return the 'extras_require' suffix for + that requirement. + """ + return ':' + str(req.marker) if req.marker else '' + + def _move_install_requirements_markers(self): + """ + Move requirements in `install_requires` that are using environment + markers `extras_require`. + """ + + # divide the install_requires into two sets, simple ones still + # handled by install_requires and more complex ones handled + # by extras_require. + + def is_simple_req(req): + return not req.marker + + spec_inst_reqs = getattr(self, 'install_requires', None) or () + inst_reqs = list(pkg_resources.parse_requirements(spec_inst_reqs)) + simple_reqs = filter(is_simple_req, inst_reqs) + complex_reqs = filterfalse(is_simple_req, inst_reqs) + self.install_requires = list(map(str, simple_reqs)) + + for r in complex_reqs: + self._tmp_extras_require[':' + str(r.marker)].append(r) + self.extras_require = dict( + (k, [str(r) for r in map(self._clean_req, v)]) + for k, v in self._tmp_extras_require.items() + ) + + def _clean_req(self, req): + """ + Given a Requirement, remove environment markers and return it. + """ + req.marker = None + return req + + def _parse_config_files(self, filenames=None): + """ + Adapted from distutils.dist.Distribution.parse_config_files, + this method provides the same functionality in subtly-improved + ways. + """ + from setuptools.extern.six.moves.configparser import ConfigParser + + # Ignore install directory options if we have a venv + if six.PY3 and sys.prefix != sys.base_prefix: + ignore_options = [ + 'install-base', 'install-platbase', 'install-lib', + 'install-platlib', 'install-purelib', 'install-headers', + 'install-scripts', 'install-data', 'prefix', 'exec-prefix', + 'home', 'user', 'root'] + else: + ignore_options = [] + + ignore_options = frozenset(ignore_options) + + if filenames is None: + filenames = self.find_config_files() + + if DEBUG: + self.announce("Distribution.parse_config_files():") + + parser = ConfigParser() + for filename in filenames: + with io.open(filename, encoding='utf-8') as reader: + if DEBUG: + self.announce(" reading {filename}".format(**locals())) + (parser.read_file if six.PY3 else parser.readfp)(reader) + for section in parser.sections(): + options = parser.options(section) + opt_dict = self.get_option_dict(section) + + for opt in options: + if opt != '__name__' and opt not in ignore_options: + val = self._try_str(parser.get(section, opt)) + opt = opt.replace('-', '_') + opt_dict[opt] = (filename, val) + + # Make the ConfigParser forget everything (so we retain + # the original filenames that options come from) + parser.__init__() + + # If there was a "global" section in the config file, use it + # to set Distribution options. + + if 'global' in self.command_options: + for (opt, (src, val)) in self.command_options['global'].items(): + alias = self.negative_opt.get(opt) + try: + if alias: + setattr(self, alias, not strtobool(val)) + elif opt in ('verbose', 'dry_run'): # ugh! + setattr(self, opt, strtobool(val)) + else: + setattr(self, opt, val) + except ValueError as msg: + raise DistutilsOptionError(msg) + + @staticmethod + def _try_str(val): + """ + On Python 2, much of distutils relies on string values being of + type 'str' (bytes) and not unicode text. If the value can be safely + encoded to bytes using the default encoding, prefer that. + + Why the default encoding? Because that value can be implicitly + decoded back to text if needed. + + Ref #1653 + """ + if six.PY3: + return val + try: + return val.encode() + except UnicodeEncodeError: + pass + return val + + def _set_command_options(self, command_obj, option_dict=None): + """ + Set the options for 'command_obj' from 'option_dict'. Basically + this means copying elements of a dictionary ('option_dict') to + attributes of an instance ('command'). + + 'command_obj' must be a Command instance. If 'option_dict' is not + supplied, uses the standard option dictionary for this command + (from 'self.command_options'). + + (Adopted from distutils.dist.Distribution._set_command_options) + """ + command_name = command_obj.get_command_name() + if option_dict is None: + option_dict = self.get_option_dict(command_name) + + if DEBUG: + self.announce(" setting options for '%s' command:" % command_name) + for (option, (source, value)) in option_dict.items(): + if DEBUG: + self.announce(" %s = %s (from %s)" % (option, value, + source)) + try: + bool_opts = [translate_longopt(o) + for o in command_obj.boolean_options] + except AttributeError: + bool_opts = [] + try: + neg_opt = command_obj.negative_opt + except AttributeError: + neg_opt = {} + + try: + is_string = isinstance(value, six.string_types) + if option in neg_opt and is_string: + setattr(command_obj, neg_opt[option], not strtobool(value)) + elif option in bool_opts and is_string: + setattr(command_obj, option, strtobool(value)) + elif hasattr(command_obj, option): + setattr(command_obj, option, value) + else: + raise DistutilsOptionError( + "error in %s: command '%s' has no such option '%s'" + % (source, command_name, option)) + except ValueError as msg: + raise DistutilsOptionError(msg) + + def parse_config_files(self, filenames=None, ignore_option_errors=False): + """Parses configuration files from various levels + and loads configuration. + + """ + self._parse_config_files(filenames=filenames) + + parse_configuration(self, self.command_options, + ignore_option_errors=ignore_option_errors) + self._finalize_requires() + + def parse_command_line(self): + """Process features after parsing command line options""" + result = _Distribution.parse_command_line(self) + if self.features: + self._finalize_features() + return result + + def _feature_attrname(self, name): + """Convert feature name to corresponding option attribute name""" + return 'with_' + name.replace('-', '_') + + def fetch_build_eggs(self, requires): + """Resolve pre-setup requirements""" + resolved_dists = pkg_resources.working_set.resolve( + pkg_resources.parse_requirements(requires), + installer=self.fetch_build_egg, + replace_conflicting=True, + ) + for dist in resolved_dists: + pkg_resources.working_set.add(dist, replace=True) + return resolved_dists + + def finalize_options(self): + _Distribution.finalize_options(self) + if self.features: + self._set_global_opts_from_features() + + for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'): + value = getattr(self, ep.name, None) + if value is not None: + ep.require(installer=self.fetch_build_egg) + ep.load()(self, ep.name, value) + if getattr(self, 'convert_2to3_doctests', None): + # XXX may convert to set here when we can rely on set being builtin + self.convert_2to3_doctests = [ + os.path.abspath(p) + for p in self.convert_2to3_doctests + ] + else: + self.convert_2to3_doctests = [] + + def get_egg_cache_dir(self): + egg_cache_dir = os.path.join(os.curdir, '.eggs') + if not os.path.exists(egg_cache_dir): + os.mkdir(egg_cache_dir) + windows_support.hide_file(egg_cache_dir) + readme_txt_filename = os.path.join(egg_cache_dir, 'README.txt') + with open(readme_txt_filename, 'w') as f: + f.write('This directory contains eggs that were downloaded ' + 'by setuptools to build, test, and run plug-ins.\n\n') + f.write('This directory caches those eggs to prevent ' + 'repeated downloads.\n\n') + f.write('However, it is safe to delete this directory.\n\n') + + return egg_cache_dir + + def fetch_build_egg(self, req): + """Fetch an egg needed for building""" + from setuptools.command.easy_install import easy_install + dist = self.__class__({'script_args': ['easy_install']}) + opts = dist.get_option_dict('easy_install') + opts.clear() + opts.update( + (k, v) + for k, v in self.get_option_dict('easy_install').items() + if k in ( + # don't use any other settings + 'find_links', 'site_dirs', 'index_url', + 'optimize', 'site_dirs', 'allow_hosts', + )) + if self.dependency_links: + links = self.dependency_links[:] + if 'find_links' in opts: + links = opts['find_links'][1] + links + opts['find_links'] = ('setup', links) + install_dir = self.get_egg_cache_dir() + cmd = easy_install( + dist, args=["x"], install_dir=install_dir, + exclude_scripts=True, + always_copy=False, build_directory=None, editable=False, + upgrade=False, multi_version=True, no_report=True, user=False + ) + cmd.ensure_finalized() + return cmd.easy_install(req) + + def _set_global_opts_from_features(self): + """Add --with-X/--without-X options based on optional features""" + + go = [] + no = self.negative_opt.copy() + + for name, feature in self.features.items(): + self._set_feature(name, None) + feature.validate(self) + + if feature.optional: + descr = feature.description + incdef = ' (default)' + excdef = '' + if not feature.include_by_default(): + excdef, incdef = incdef, excdef + + new = ( + ('with-' + name, None, 'include ' + descr + incdef), + ('without-' + name, None, 'exclude ' + descr + excdef), + ) + go.extend(new) + no['without-' + name] = 'with-' + name + + self.global_options = self.feature_options = go + self.global_options + self.negative_opt = self.feature_negopt = no + + def _finalize_features(self): + """Add/remove features and resolve dependencies between them""" + + # First, flag all the enabled items (and thus their dependencies) + for name, feature in self.features.items(): + enabled = self.feature_is_included(name) + if enabled or (enabled is None and feature.include_by_default()): + feature.include_in(self) + self._set_feature(name, 1) + + # Then disable the rest, so that off-by-default features don't + # get flagged as errors when they're required by an enabled feature + for name, feature in self.features.items(): + if not self.feature_is_included(name): + feature.exclude_from(self) + self._set_feature(name, 0) + + def get_command_class(self, command): + """Pluggable version of get_command_class()""" + if command in self.cmdclass: + return self.cmdclass[command] + + eps = pkg_resources.iter_entry_points('distutils.commands', command) + for ep in eps: + ep.require(installer=self.fetch_build_egg) + self.cmdclass[command] = cmdclass = ep.load() + return cmdclass + else: + return _Distribution.get_command_class(self, command) + + def print_commands(self): + for ep in pkg_resources.iter_entry_points('distutils.commands'): + if ep.name not in self.cmdclass: + # don't require extras as the commands won't be invoked + cmdclass = ep.resolve() + self.cmdclass[ep.name] = cmdclass + return _Distribution.print_commands(self) + + def get_command_list(self): + for ep in pkg_resources.iter_entry_points('distutils.commands'): + if ep.name not in self.cmdclass: + # don't require extras as the commands won't be invoked + cmdclass = ep.resolve() + self.cmdclass[ep.name] = cmdclass + return _Distribution.get_command_list(self) + + def _set_feature(self, name, status): + """Set feature's inclusion status""" + setattr(self, self._feature_attrname(name), status) + + def feature_is_included(self, name): + """Return 1 if feature is included, 0 if excluded, 'None' if unknown""" + return getattr(self, self._feature_attrname(name)) + + def include_feature(self, name): + """Request inclusion of feature named 'name'""" + + if self.feature_is_included(name) == 0: + descr = self.features[name].description + raise DistutilsOptionError( + descr + " is required, but was excluded or is not available" + ) + self.features[name].include_in(self) + self._set_feature(name, 1) + + def include(self, **attrs): + """Add items to distribution that are named in keyword arguments + + For example, 'dist.include(py_modules=["x"])' would add 'x' to + the distribution's 'py_modules' attribute, if it was not already + there. + + Currently, this method only supports inclusion for attributes that are + lists or tuples. If you need to add support for adding to other + attributes in this or a subclass, you can add an '_include_X' method, + where 'X' is the name of the attribute. The method will be called with + the value passed to 'include()'. So, 'dist.include(foo={"bar":"baz"})' + will try to call 'dist._include_foo({"bar":"baz"})', which can then + handle whatever special inclusion logic is needed. + """ + for k, v in attrs.items(): + include = getattr(self, '_include_' + k, None) + if include: + include(v) + else: + self._include_misc(k, v) + + def exclude_package(self, package): + """Remove packages, modules, and extensions in named package""" + + pfx = package + '.' + if self.packages: + self.packages = [ + p for p in self.packages + if p != package and not p.startswith(pfx) + ] + + if self.py_modules: + self.py_modules = [ + p for p in self.py_modules + if p != package and not p.startswith(pfx) + ] + + if self.ext_modules: + self.ext_modules = [ + p for p in self.ext_modules + if p.name != package and not p.name.startswith(pfx) + ] + + def has_contents_for(self, package): + """Return true if 'exclude_package(package)' would do something""" + + pfx = package + '.' + + for p in self.iter_distribution_names(): + if p == package or p.startswith(pfx): + return True + + def _exclude_misc(self, name, value): + """Handle 'exclude()' for list/tuple attrs without a special handler""" + if not isinstance(value, sequence): + raise DistutilsSetupError( + "%s: setting must be a list or tuple (%r)" % (name, value) + ) + try: + old = getattr(self, name) + except AttributeError: + raise DistutilsSetupError( + "%s: No such distribution setting" % name + ) + if old is not None and not isinstance(old, sequence): + raise DistutilsSetupError( + name + ": this setting cannot be changed via include/exclude" + ) + elif old: + setattr(self, name, [item for item in old if item not in value]) + + def _include_misc(self, name, value): + """Handle 'include()' for list/tuple attrs without a special handler""" + + if not isinstance(value, sequence): + raise DistutilsSetupError( + "%s: setting must be a list (%r)" % (name, value) + ) + try: + old = getattr(self, name) + except AttributeError: + raise DistutilsSetupError( + "%s: No such distribution setting" % name + ) + if old is None: + setattr(self, name, value) + elif not isinstance(old, sequence): + raise DistutilsSetupError( + name + ": this setting cannot be changed via include/exclude" + ) + else: + new = [item for item in value if item not in old] + setattr(self, name, old + new) + + def exclude(self, **attrs): + """Remove items from distribution that are named in keyword arguments + + For example, 'dist.exclude(py_modules=["x"])' would remove 'x' from + the distribution's 'py_modules' attribute. Excluding packages uses + the 'exclude_package()' method, so all of the package's contained + packages, modules, and extensions are also excluded. + + Currently, this method only supports exclusion from attributes that are + lists or tuples. If you need to add support for excluding from other + attributes in this or a subclass, you can add an '_exclude_X' method, + where 'X' is the name of the attribute. The method will be called with + the value passed to 'exclude()'. So, 'dist.exclude(foo={"bar":"baz"})' + will try to call 'dist._exclude_foo({"bar":"baz"})', which can then + handle whatever special exclusion logic is needed. + """ + for k, v in attrs.items(): + exclude = getattr(self, '_exclude_' + k, None) + if exclude: + exclude(v) + else: + self._exclude_misc(k, v) + + def _exclude_packages(self, packages): + if not isinstance(packages, sequence): + raise DistutilsSetupError( + "packages: setting must be a list or tuple (%r)" % (packages,) + ) + list(map(self.exclude_package, packages)) + + def _parse_command_opts(self, parser, args): + # Remove --with-X/--without-X options when processing command args + self.global_options = self.__class__.global_options + self.negative_opt = self.__class__.negative_opt + + # First, expand any aliases + command = args[0] + aliases = self.get_option_dict('aliases') + while command in aliases: + src, alias = aliases[command] + del aliases[command] # ensure each alias can expand only once! + import shlex + args[:1] = shlex.split(alias, True) + command = args[0] + + nargs = _Distribution._parse_command_opts(self, parser, args) + + # Handle commands that want to consume all remaining arguments + cmd_class = self.get_command_class(command) + if getattr(cmd_class, 'command_consumes_arguments', None): + self.get_option_dict(command)['args'] = ("command line", nargs) + if nargs is not None: + return [] + + return nargs + + def get_cmdline_options(self): + """Return a '{cmd: {opt:val}}' map of all command-line options + + Option names are all long, but do not include the leading '--', and + contain dashes rather than underscores. If the option doesn't take + an argument (e.g. '--quiet'), the 'val' is 'None'. + + Note that options provided by config files are intentionally excluded. + """ + + d = {} + + for cmd, opts in self.command_options.items(): + + for opt, (src, val) in opts.items(): + + if src != "command line": + continue + + opt = opt.replace('_', '-') + + if val == 0: + cmdobj = self.get_command_obj(cmd) + neg_opt = self.negative_opt.copy() + neg_opt.update(getattr(cmdobj, 'negative_opt', {})) + for neg, pos in neg_opt.items(): + if pos == opt: + opt = neg + val = None + break + else: + raise AssertionError("Shouldn't be able to get here") + + elif val == 1: + val = None + + d.setdefault(cmd, {})[opt] = val + + return d + + def iter_distribution_names(self): + """Yield all packages, modules, and extension names in distribution""" + + for pkg in self.packages or (): + yield pkg + + for module in self.py_modules or (): + yield module + + for ext in self.ext_modules or (): + if isinstance(ext, tuple): + name, buildinfo = ext + else: + name = ext.name + if name.endswith('module'): + name = name[:-6] + yield name + + def handle_display_options(self, option_order): + """If there were any non-global "display-only" options + (--help-commands or the metadata display options) on the command + line, display the requested info and return true; else return + false. + """ + import sys + + if six.PY2 or self.help_commands: + return _Distribution.handle_display_options(self, option_order) + + # Stdout may be StringIO (e.g. in tests) + if not isinstance(sys.stdout, io.TextIOWrapper): + return _Distribution.handle_display_options(self, option_order) + + # Don't wrap stdout if utf-8 is already the encoding. Provides + # workaround for #334. + if sys.stdout.encoding.lower() in ('utf-8', 'utf8'): + return _Distribution.handle_display_options(self, option_order) + + # Print metadata in UTF-8 no matter the platform + encoding = sys.stdout.encoding + errors = sys.stdout.errors + newline = sys.platform != 'win32' and '\n' or None + line_buffering = sys.stdout.line_buffering + + sys.stdout = io.TextIOWrapper( + sys.stdout.detach(), 'utf-8', errors, newline, line_buffering) + try: + return _Distribution.handle_display_options(self, option_order) + finally: + sys.stdout = io.TextIOWrapper( + sys.stdout.detach(), encoding, errors, newline, line_buffering) + + +class Feature: + """ + **deprecated** -- The `Feature` facility was never completely implemented + or supported, `has reported issues + <https://github.com/pypa/setuptools/issues/58>`_ and will be removed in + a future version. + + A subset of the distribution that can be excluded if unneeded/wanted + + Features are created using these keyword arguments: + + 'description' -- a short, human readable description of the feature, to + be used in error messages, and option help messages. + + 'standard' -- if true, the feature is included by default if it is + available on the current system. Otherwise, the feature is only + included if requested via a command line '--with-X' option, or if + another included feature requires it. The default setting is 'False'. + + 'available' -- if true, the feature is available for installation on the + current system. The default setting is 'True'. + + 'optional' -- if true, the feature's inclusion can be controlled from the + command line, using the '--with-X' or '--without-X' options. If + false, the feature's inclusion status is determined automatically, + based on 'availabile', 'standard', and whether any other feature + requires it. The default setting is 'True'. + + 'require_features' -- a string or sequence of strings naming features + that should also be included if this feature is included. Defaults to + empty list. May also contain 'Require' objects that should be + added/removed from the distribution. + + 'remove' -- a string or list of strings naming packages to be removed + from the distribution if this feature is *not* included. If the + feature *is* included, this argument is ignored. This argument exists + to support removing features that "crosscut" a distribution, such as + defining a 'tests' feature that removes all the 'tests' subpackages + provided by other features. The default for this argument is an empty + list. (Note: the named package(s) or modules must exist in the base + distribution when the 'setup()' function is initially called.) + + other keywords -- any other keyword arguments are saved, and passed to + the distribution's 'include()' and 'exclude()' methods when the + feature is included or excluded, respectively. So, for example, you + could pass 'packages=["a","b"]' to cause packages 'a' and 'b' to be + added or removed from the distribution as appropriate. + + A feature must include at least one 'requires', 'remove', or other + keyword argument. Otherwise, it can't affect the distribution in any way. + Note also that you can subclass 'Feature' to create your own specialized + feature types that modify the distribution in other ways when included or + excluded. See the docstrings for the various methods here for more detail. + Aside from the methods, the only feature attributes that distributions look + at are 'description' and 'optional'. + """ + + @staticmethod + def warn_deprecated(): + msg = ( + "Features are deprecated and will be removed in a future " + "version. See https://github.com/pypa/setuptools/issues/65." + ) + warnings.warn(msg, DistDeprecationWarning, stacklevel=3) + + def __init__( + self, description, standard=False, available=True, + optional=True, require_features=(), remove=(), **extras): + self.warn_deprecated() + + self.description = description + self.standard = standard + self.available = available + self.optional = optional + if isinstance(require_features, (str, Require)): + require_features = require_features, + + self.require_features = [ + r for r in require_features if isinstance(r, str) + ] + er = [r for r in require_features if not isinstance(r, str)] + if er: + extras['require_features'] = er + + if isinstance(remove, str): + remove = remove, + self.remove = remove + self.extras = extras + + if not remove and not require_features and not extras: + raise DistutilsSetupError( + "Feature %s: must define 'require_features', 'remove', or " + "at least one of 'packages', 'py_modules', etc." + ) + + def include_by_default(self): + """Should this feature be included by default?""" + return self.available and self.standard + + def include_in(self, dist): + """Ensure feature and its requirements are included in distribution + + You may override this in a subclass to perform additional operations on + the distribution. Note that this method may be called more than once + per feature, and so should be idempotent. + + """ + + if not self.available: + raise DistutilsPlatformError( + self.description + " is required, " + "but is not available on this platform" + ) + + dist.include(**self.extras) + + for f in self.require_features: + dist.include_feature(f) + + def exclude_from(self, dist): + """Ensure feature is excluded from distribution + + You may override this in a subclass to perform additional operations on + the distribution. This method will be called at most once per + feature, and only after all included features have been asked to + include themselves. + """ + + dist.exclude(**self.extras) + + if self.remove: + for item in self.remove: + dist.exclude_package(item) + + def validate(self, dist): + """Verify that feature makes sense in context of distribution + + This method is called by the distribution just before it parses its + command line. It checks to ensure that the 'remove' attribute, if any, + contains only valid package/module names that are present in the base + distribution when 'setup()' is called. You may override it in a + subclass to perform any other required validation of the feature + against a target distribution. + """ + + for item in self.remove: + if not dist.has_contents_for(item): + raise DistutilsSetupError( + "%s wants to be able to remove %s, but the distribution" + " doesn't contain any packages or modules under %s" + % (self.description, item, item) + ) + + +class DistDeprecationWarning(SetuptoolsDeprecationWarning): + """Class for warning about deprecations in dist in + setuptools. Not ignored by default, unlike DeprecationWarning.""" diff --git a/project/venv/lib/python2.7/site-packages/setuptools/dist.pyc b/project/venv/lib/python2.7/site-packages/setuptools/dist.pyc new file mode 100644 index 0000000..79f93bc Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/dist.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/extension.py b/project/venv/lib/python2.7/site-packages/setuptools/extension.py new file mode 100644 index 0000000..2946889 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/extension.py @@ -0,0 +1,57 @@ +import re +import functools +import distutils.core +import distutils.errors +import distutils.extension + +from setuptools.extern.six.moves import map + +from .monkey import get_unpatched + + +def _have_cython(): + """ + Return True if Cython can be imported. + """ + cython_impl = 'Cython.Distutils.build_ext' + try: + # from (cython_impl) import build_ext + __import__(cython_impl, fromlist=['build_ext']).build_ext + return True + except Exception: + pass + return False + + +# for compatibility +have_pyrex = _have_cython + +_Extension = get_unpatched(distutils.core.Extension) + + +class Extension(_Extension): + """Extension that uses '.c' files in place of '.pyx' files""" + + def __init__(self, name, sources, *args, **kw): + # The *args is needed for compatibility as calls may use positional + # arguments. py_limited_api may be set only via keyword. + self.py_limited_api = kw.pop("py_limited_api", False) + _Extension.__init__(self, name, sources, *args, **kw) + + def _convert_pyx_sources_to_lang(self): + """ + Replace sources with .pyx extensions to sources with the target + language extension. This mechanism allows language authors to supply + pre-converted sources but to prefer the .pyx sources. + """ + if _have_cython(): + # the build has Cython, so allow it to compile the .pyx files + return + lang = self.language or '' + target_ext = '.cpp' if lang.lower() == 'c++' else '.c' + sub = functools.partial(re.sub, '.pyx$', target_ext) + self.sources = list(map(sub, self.sources)) + + +class Library(Extension): + """Just like a regular Extension, but built as a library instead""" diff --git a/project/venv/lib/python2.7/site-packages/setuptools/extension.pyc b/project/venv/lib/python2.7/site-packages/setuptools/extension.pyc new file mode 100644 index 0000000..e87d54d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/extension.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/extern/__init__.py b/project/venv/lib/python2.7/site-packages/setuptools/extern/__init__.py new file mode 100644 index 0000000..cb2fa32 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/extern/__init__.py @@ -0,0 +1,73 @@ +import sys + + +class VendorImporter: + """ + A PEP 302 meta path importer for finding optionally-vendored + or otherwise naturally-installed packages from root_name. + """ + + def __init__(self, root_name, vendored_names=(), vendor_pkg=None): + self.root_name = root_name + self.vendored_names = set(vendored_names) + self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor') + + @property + def search_path(self): + """ + Search first the vendor package then as a natural package. + """ + yield self.vendor_pkg + '.' + yield '' + + def find_module(self, fullname, path=None): + """ + Return self when fullname starts with root_name and the + target module is one vendored through this importer. + """ + root, base, target = fullname.partition(self.root_name + '.') + if root: + return + if not any(map(target.startswith, self.vendored_names)): + return + return self + + def load_module(self, fullname): + """ + Iterate over the search path to locate and load fullname. + """ + root, base, target = fullname.partition(self.root_name + '.') + for prefix in self.search_path: + try: + extant = prefix + target + __import__(extant) + mod = sys.modules[extant] + sys.modules[fullname] = mod + # mysterious hack: + # Remove the reference to the extant package/module + # on later Python versions to cause relative imports + # in the vendor package to resolve the same modules + # as those going through this importer. + if sys.version_info >= (3, ): + del sys.modules[extant] + return mod + except ImportError: + pass + else: + raise ImportError( + "The '{target}' package is required; " + "normally this is bundled with this package so if you get " + "this warning, consult the packager of your " + "distribution.".format(**locals()) + ) + + def install(self): + """ + Install this importer into sys.meta_path if not already present. + """ + if self not in sys.meta_path: + sys.meta_path.append(self) + + +names = 'six', 'packaging', 'pyparsing', +VendorImporter(__name__, names, 'setuptools._vendor').install() diff --git a/project/venv/lib/python2.7/site-packages/setuptools/extern/__init__.pyc b/project/venv/lib/python2.7/site-packages/setuptools/extern/__init__.pyc new file mode 100644 index 0000000..8a51af1 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/extern/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/glibc.py b/project/venv/lib/python2.7/site-packages/setuptools/glibc.py new file mode 100644 index 0000000..a134591 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/glibc.py @@ -0,0 +1,86 @@ +# This file originally from pip: +# https://github.com/pypa/pip/blob/8f4f15a5a95d7d5b511ceaee9ed261176c181970/src/pip/_internal/utils/glibc.py +from __future__ import absolute_import + +import ctypes +import re +import warnings + + +def glibc_version_string(): + "Returns glibc version string, or None if not using glibc." + + # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen + # manpage says, "If filename is NULL, then the returned handle is for the + # main program". This way we can let the linker do the work to figure out + # which libc our process is actually using. + process_namespace = ctypes.CDLL(None) + try: + gnu_get_libc_version = process_namespace.gnu_get_libc_version + except AttributeError: + # Symbol doesn't exist -> therefore, we are not linked to + # glibc. + return None + + # Call gnu_get_libc_version, which returns a string like "2.5" + gnu_get_libc_version.restype = ctypes.c_char_p + version_str = gnu_get_libc_version() + # py2 / py3 compatibility: + if not isinstance(version_str, str): + version_str = version_str.decode("ascii") + + return version_str + + +# Separated out from have_compatible_glibc for easier unit testing +def check_glibc_version(version_str, required_major, minimum_minor): + # Parse string and check against requested version. + # + # We use a regexp instead of str.split because we want to discard any + # random junk that might come after the minor version -- this might happen + # in patched/forked versions of glibc (e.g. Linaro's version of glibc + # uses version strings like "2.20-2014.11"). See gh-3588. + m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str) + if not m: + warnings.warn("Expected glibc version with 2 components major.minor," + " got: %s" % version_str, RuntimeWarning) + return False + return (int(m.group("major")) == required_major and + int(m.group("minor")) >= minimum_minor) + + +def have_compatible_glibc(required_major, minimum_minor): + version_str = glibc_version_string() + if version_str is None: + return False + return check_glibc_version(version_str, required_major, minimum_minor) + + +# platform.libc_ver regularly returns completely nonsensical glibc +# versions. E.g. on my computer, platform says: +# +# ~$ python2.7 -c 'import platform; print(platform.libc_ver())' +# ('glibc', '2.7') +# ~$ python3.5 -c 'import platform; print(platform.libc_ver())' +# ('glibc', '2.9') +# +# But the truth is: +# +# ~$ ldd --version +# ldd (Debian GLIBC 2.22-11) 2.22 +# +# This is unfortunate, because it means that the linehaul data on libc +# versions that was generated by pip 8.1.2 and earlier is useless and +# misleading. Solution: instead of using platform, use our code that actually +# works. +def libc_ver(): + """Try to determine the glibc version + + Returns a tuple of strings (lib, version) which default to empty strings + in case the lookup fails. + """ + glibc_version = glibc_version_string() + if glibc_version is None: + return ("", "") + else: + return ("glibc", glibc_version) diff --git a/project/venv/lib/python2.7/site-packages/setuptools/glibc.pyc b/project/venv/lib/python2.7/site-packages/setuptools/glibc.pyc new file mode 100644 index 0000000..ed0850f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/glibc.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/glob.py b/project/venv/lib/python2.7/site-packages/setuptools/glob.py new file mode 100644 index 0000000..9d7cbc5 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/glob.py @@ -0,0 +1,174 @@ +""" +Filename globbing utility. Mostly a copy of `glob` from Python 3.5. + +Changes include: + * `yield from` and PEP3102 `*` removed. + * Hidden files are not ignored. +""" + +import os +import re +import fnmatch + +__all__ = ["glob", "iglob", "escape"] + + +def glob(pathname, recursive=False): + """Return a list of paths matching a pathname pattern. + + The pattern may contain simple shell-style wildcards a la + fnmatch. However, unlike fnmatch, filenames starting with a + dot are special cases that are not matched by '*' and '?' + patterns. + + If recursive is true, the pattern '**' will match any files and + zero or more directories and subdirectories. + """ + return list(iglob(pathname, recursive=recursive)) + + +def iglob(pathname, recursive=False): + """Return an iterator which yields the paths matching a pathname pattern. + + The pattern may contain simple shell-style wildcards a la + fnmatch. However, unlike fnmatch, filenames starting with a + dot are special cases that are not matched by '*' and '?' + patterns. + + If recursive is true, the pattern '**' will match any files and + zero or more directories and subdirectories. + """ + it = _iglob(pathname, recursive) + if recursive and _isrecursive(pathname): + s = next(it) # skip empty string + assert not s + return it + + +def _iglob(pathname, recursive): + dirname, basename = os.path.split(pathname) + if not has_magic(pathname): + if basename: + if os.path.lexists(pathname): + yield pathname + else: + # Patterns ending with a slash should match only directories + if os.path.isdir(dirname): + yield pathname + return + if not dirname: + if recursive and _isrecursive(basename): + for x in glob2(dirname, basename): + yield x + else: + for x in glob1(dirname, basename): + yield x + return + # `os.path.split()` returns the argument itself as a dirname if it is a + # drive or UNC path. Prevent an infinite recursion if a drive or UNC path + # contains magic characters (i.e. r'\\?\C:'). + if dirname != pathname and has_magic(dirname): + dirs = _iglob(dirname, recursive) + else: + dirs = [dirname] + if has_magic(basename): + if recursive and _isrecursive(basename): + glob_in_dir = glob2 + else: + glob_in_dir = glob1 + else: + glob_in_dir = glob0 + for dirname in dirs: + for name in glob_in_dir(dirname, basename): + yield os.path.join(dirname, name) + + +# These 2 helper functions non-recursively glob inside a literal directory. +# They return a list of basenames. `glob1` accepts a pattern while `glob0` +# takes a literal basename (so it only has to check for its existence). + + +def glob1(dirname, pattern): + if not dirname: + if isinstance(pattern, bytes): + dirname = os.curdir.encode('ASCII') + else: + dirname = os.curdir + try: + names = os.listdir(dirname) + except OSError: + return [] + return fnmatch.filter(names, pattern) + + +def glob0(dirname, basename): + if not basename: + # `os.path.split()` returns an empty basename for paths ending with a + # directory separator. 'q*x/' should match only directories. + if os.path.isdir(dirname): + return [basename] + else: + if os.path.lexists(os.path.join(dirname, basename)): + return [basename] + return [] + + +# This helper function recursively yields relative pathnames inside a literal +# directory. + + +def glob2(dirname, pattern): + assert _isrecursive(pattern) + yield pattern[:0] + for x in _rlistdir(dirname): + yield x + + +# Recursively yields relative pathnames inside a literal directory. +def _rlistdir(dirname): + if not dirname: + if isinstance(dirname, bytes): + dirname = os.curdir.encode('ASCII') + else: + dirname = os.curdir + try: + names = os.listdir(dirname) + except os.error: + return + for x in names: + yield x + path = os.path.join(dirname, x) if dirname else x + for y in _rlistdir(path): + yield os.path.join(x, y) + + +magic_check = re.compile('([*?[])') +magic_check_bytes = re.compile(b'([*?[])') + + +def has_magic(s): + if isinstance(s, bytes): + match = magic_check_bytes.search(s) + else: + match = magic_check.search(s) + return match is not None + + +def _isrecursive(pattern): + if isinstance(pattern, bytes): + return pattern == b'**' + else: + return pattern == '**' + + +def escape(pathname): + """Escape all special characters. + """ + # Escaping is done by wrapping any of "*?[" between square brackets. + # Metacharacters do not work in the drive part and shouldn't be escaped. + drive, pathname = os.path.splitdrive(pathname) + if isinstance(pathname, bytes): + pathname = magic_check_bytes.sub(br'[\1]', pathname) + else: + pathname = magic_check.sub(r'[\1]', pathname) + return drive + pathname diff --git a/project/venv/lib/python2.7/site-packages/setuptools/glob.pyc b/project/venv/lib/python2.7/site-packages/setuptools/glob.pyc new file mode 100644 index 0000000..fb210ff Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/glob.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/gui-32.exe b/project/venv/lib/python2.7/site-packages/setuptools/gui-32.exe new file mode 100644 index 0000000..f8d3509 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/gui-32.exe differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/gui-64.exe b/project/venv/lib/python2.7/site-packages/setuptools/gui-64.exe new file mode 100644 index 0000000..330c51a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/gui-64.exe differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/gui.exe b/project/venv/lib/python2.7/site-packages/setuptools/gui.exe new file mode 100644 index 0000000..f8d3509 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/gui.exe differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/launch.py b/project/venv/lib/python2.7/site-packages/setuptools/launch.py new file mode 100644 index 0000000..308283e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/launch.py @@ -0,0 +1,35 @@ +""" +Launch the Python script on the command line after +setuptools is bootstrapped via import. +""" + +# Note that setuptools gets imported implicitly by the +# invocation of this script using python -m setuptools.launch + +import tokenize +import sys + + +def run(): + """ + Run the script in sys.argv[1] as if it had + been invoked naturally. + """ + __builtins__ + script_name = sys.argv[1] + namespace = dict( + __file__=script_name, + __name__='__main__', + __doc__=None, + ) + sys.argv[:] = sys.argv[1:] + + open_ = getattr(tokenize, 'open', open) + script = open_(script_name).read() + norm_script = script.replace('\\r\\n', '\\n') + code = compile(norm_script, script_name, 'exec') + exec(code, namespace) + + +if __name__ == '__main__': + run() diff --git a/project/venv/lib/python2.7/site-packages/setuptools/launch.pyc b/project/venv/lib/python2.7/site-packages/setuptools/launch.pyc new file mode 100644 index 0000000..c59e61e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/launch.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/lib2to3_ex.py b/project/venv/lib/python2.7/site-packages/setuptools/lib2to3_ex.py new file mode 100644 index 0000000..4b1a73f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/lib2to3_ex.py @@ -0,0 +1,62 @@ +""" +Customized Mixin2to3 support: + + - adds support for converting doctests + + +This module raises an ImportError on Python 2. +""" + +from distutils.util import Mixin2to3 as _Mixin2to3 +from distutils import log +from lib2to3.refactor import RefactoringTool, get_fixers_from_package + +import setuptools + + +class DistutilsRefactoringTool(RefactoringTool): + def log_error(self, msg, *args, **kw): + log.error(msg, *args) + + def log_message(self, msg, *args): + log.info(msg, *args) + + def log_debug(self, msg, *args): + log.debug(msg, *args) + + +class Mixin2to3(_Mixin2to3): + def run_2to3(self, files, doctests=False): + # See of the distribution option has been set, otherwise check the + # setuptools default. + if self.distribution.use_2to3 is not True: + return + if not files: + return + log.info("Fixing " + " ".join(files)) + self.__build_fixer_names() + self.__exclude_fixers() + if doctests: + if setuptools.run_2to3_on_doctests: + r = DistutilsRefactoringTool(self.fixer_names) + r.refactor(files, write=True, doctests_only=True) + else: + _Mixin2to3.run_2to3(self, files) + + def __build_fixer_names(self): + if self.fixer_names: + return + self.fixer_names = [] + for p in setuptools.lib2to3_fixer_packages: + self.fixer_names.extend(get_fixers_from_package(p)) + if self.distribution.use_2to3_fixers is not None: + for p in self.distribution.use_2to3_fixers: + self.fixer_names.extend(get_fixers_from_package(p)) + + def __exclude_fixers(self): + excluded_fixers = getattr(self, 'exclude_fixers', []) + if self.distribution.use_2to3_exclude_fixers is not None: + excluded_fixers.extend(self.distribution.use_2to3_exclude_fixers) + for fixer_name in excluded_fixers: + if fixer_name in self.fixer_names: + self.fixer_names.remove(fixer_name) diff --git a/project/venv/lib/python2.7/site-packages/setuptools/lib2to3_ex.pyc b/project/venv/lib/python2.7/site-packages/setuptools/lib2to3_ex.pyc new file mode 100644 index 0000000..6fd0b77 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/lib2to3_ex.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/monkey.py b/project/venv/lib/python2.7/site-packages/setuptools/monkey.py new file mode 100644 index 0000000..3c77f8c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/monkey.py @@ -0,0 +1,179 @@ +""" +Monkey patching of distutils. +""" + +import sys +import distutils.filelist +import platform +import types +import functools +from importlib import import_module +import inspect + +from setuptools.extern import six + +import setuptools + +__all__ = [] +""" +Everything is private. Contact the project team +if you think you need this functionality. +""" + + +def _get_mro(cls): + """ + Returns the bases classes for cls sorted by the MRO. + + Works around an issue on Jython where inspect.getmro will not return all + base classes if multiple classes share the same name. Instead, this + function will return a tuple containing the class itself, and the contents + of cls.__bases__. See https://github.com/pypa/setuptools/issues/1024. + """ + if platform.python_implementation() == "Jython": + return (cls,) + cls.__bases__ + return inspect.getmro(cls) + + +def get_unpatched(item): + lookup = ( + get_unpatched_class if isinstance(item, six.class_types) else + get_unpatched_function if isinstance(item, types.FunctionType) else + lambda item: None + ) + return lookup(item) + + +def get_unpatched_class(cls): + """Protect against re-patching the distutils if reloaded + + Also ensures that no other distutils extension monkeypatched the distutils + first. + """ + external_bases = ( + cls + for cls in _get_mro(cls) + if not cls.__module__.startswith('setuptools') + ) + base = next(external_bases) + if not base.__module__.startswith('distutils'): + msg = "distutils has already been patched by %r" % cls + raise AssertionError(msg) + return base + + +def patch_all(): + # we can't patch distutils.cmd, alas + distutils.core.Command = setuptools.Command + + has_issue_12885 = sys.version_info <= (3, 5, 3) + + if has_issue_12885: + # fix findall bug in distutils (http://bugs.python.org/issue12885) + distutils.filelist.findall = setuptools.findall + + needs_warehouse = ( + sys.version_info < (2, 7, 13) + or + (3, 4) < sys.version_info < (3, 4, 6) + or + (3, 5) < sys.version_info <= (3, 5, 3) + ) + + if needs_warehouse: + warehouse = 'https://upload.pypi.org/legacy/' + distutils.config.PyPIRCCommand.DEFAULT_REPOSITORY = warehouse + + _patch_distribution_metadata() + + # Install Distribution throughout the distutils + for module in distutils.dist, distutils.core, distutils.cmd: + module.Distribution = setuptools.dist.Distribution + + # Install the patched Extension + distutils.core.Extension = setuptools.extension.Extension + distutils.extension.Extension = setuptools.extension.Extension + if 'distutils.command.build_ext' in sys.modules: + sys.modules['distutils.command.build_ext'].Extension = ( + setuptools.extension.Extension + ) + + patch_for_msvc_specialized_compiler() + + +def _patch_distribution_metadata(): + """Patch write_pkg_file and read_pkg_file for higher metadata standards""" + for attr in ('write_pkg_file', 'read_pkg_file', 'get_metadata_version'): + new_val = getattr(setuptools.dist, attr) + setattr(distutils.dist.DistributionMetadata, attr, new_val) + + +def patch_func(replacement, target_mod, func_name): + """ + Patch func_name in target_mod with replacement + + Important - original must be resolved by name to avoid + patching an already patched function. + """ + original = getattr(target_mod, func_name) + + # set the 'unpatched' attribute on the replacement to + # point to the original. + vars(replacement).setdefault('unpatched', original) + + # replace the function in the original module + setattr(target_mod, func_name, replacement) + + +def get_unpatched_function(candidate): + return getattr(candidate, 'unpatched') + + +def patch_for_msvc_specialized_compiler(): + """ + Patch functions in distutils to use standalone Microsoft Visual C++ + compilers. + """ + # import late to avoid circular imports on Python < 3.5 + msvc = import_module('setuptools.msvc') + + if platform.system() != 'Windows': + # Compilers only availables on Microsoft Windows + return + + def patch_params(mod_name, func_name): + """ + Prepare the parameters for patch_func to patch indicated function. + """ + repl_prefix = 'msvc9_' if 'msvc9' in mod_name else 'msvc14_' + repl_name = repl_prefix + func_name.lstrip('_') + repl = getattr(msvc, repl_name) + mod = import_module(mod_name) + if not hasattr(mod, func_name): + raise ImportError(func_name) + return repl, mod, func_name + + # Python 2.7 to 3.4 + msvc9 = functools.partial(patch_params, 'distutils.msvc9compiler') + + # Python 3.5+ + msvc14 = functools.partial(patch_params, 'distutils._msvccompiler') + + try: + # Patch distutils.msvc9compiler + patch_func(*msvc9('find_vcvarsall')) + patch_func(*msvc9('query_vcvarsall')) + except ImportError: + pass + + try: + # Patch distutils._msvccompiler._get_vc_env + patch_func(*msvc14('_get_vc_env')) + except ImportError: + pass + + try: + # Patch distutils._msvccompiler.gen_lib_options for Numpy + patch_func(*msvc14('gen_lib_options')) + except ImportError: + pass diff --git a/project/venv/lib/python2.7/site-packages/setuptools/monkey.pyc b/project/venv/lib/python2.7/site-packages/setuptools/monkey.pyc new file mode 100644 index 0000000..bc9aef3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/monkey.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/msvc.py b/project/venv/lib/python2.7/site-packages/setuptools/msvc.py new file mode 100644 index 0000000..b9c472f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/msvc.py @@ -0,0 +1,1301 @@ +""" +Improved support for Microsoft Visual C++ compilers. + +Known supported compilers: +-------------------------- +Microsoft Visual C++ 9.0: + Microsoft Visual C++ Compiler for Python 2.7 (x86, amd64) + Microsoft Windows SDK 6.1 (x86, x64, ia64) + Microsoft Windows SDK 7.0 (x86, x64, ia64) + +Microsoft Visual C++ 10.0: + Microsoft Windows SDK 7.1 (x86, x64, ia64) + +Microsoft Visual C++ 14.0: + Microsoft Visual C++ Build Tools 2015 (x86, x64, arm) + Microsoft Visual Studio 2017 (x86, x64, arm, arm64) + Microsoft Visual Studio Build Tools 2017 (x86, x64, arm, arm64) +""" + +import os +import sys +import platform +import itertools +import distutils.errors +from setuptools.extern.packaging.version import LegacyVersion + +from setuptools.extern.six.moves import filterfalse + +from .monkey import get_unpatched + +if platform.system() == 'Windows': + from setuptools.extern.six.moves import winreg + safe_env = os.environ +else: + """ + Mock winreg and environ so the module can be imported + on this platform. + """ + + class winreg: + HKEY_USERS = None + HKEY_CURRENT_USER = None + HKEY_LOCAL_MACHINE = None + HKEY_CLASSES_ROOT = None + + safe_env = dict() + +_msvc9_suppress_errors = ( + # msvc9compiler isn't available on some platforms + ImportError, + + # msvc9compiler raises DistutilsPlatformError in some + # environments. See #1118. + distutils.errors.DistutilsPlatformError, +) + +try: + from distutils.msvc9compiler import Reg +except _msvc9_suppress_errors: + pass + + +def msvc9_find_vcvarsall(version): + """ + Patched "distutils.msvc9compiler.find_vcvarsall" to use the standalone + compiler build for Python (VCForPython). Fall back to original behavior + when the standalone compiler is not available. + + Redirect the path of "vcvarsall.bat". + + Known supported compilers + ------------------------- + Microsoft Visual C++ 9.0: + Microsoft Visual C++ Compiler for Python 2.7 (x86, amd64) + + Parameters + ---------- + version: float + Required Microsoft Visual C++ version. + + Return + ------ + vcvarsall.bat path: str + """ + VC_BASE = r'Software\%sMicrosoft\DevDiv\VCForPython\%0.1f' + key = VC_BASE % ('', version) + try: + # Per-user installs register the compiler path here + productdir = Reg.get_value(key, "installdir") + except KeyError: + try: + # All-user installs on a 64-bit system register here + key = VC_BASE % ('Wow6432Node\\', version) + productdir = Reg.get_value(key, "installdir") + except KeyError: + productdir = None + + if productdir: + vcvarsall = os.path.os.path.join(productdir, "vcvarsall.bat") + if os.path.isfile(vcvarsall): + return vcvarsall + + return get_unpatched(msvc9_find_vcvarsall)(version) + + +def msvc9_query_vcvarsall(ver, arch='x86', *args, **kwargs): + """ + Patched "distutils.msvc9compiler.query_vcvarsall" for support extra + compilers. + + Set environment without use of "vcvarsall.bat". + + Known supported compilers + ------------------------- + Microsoft Visual C++ 9.0: + Microsoft Visual C++ Compiler for Python 2.7 (x86, amd64) + Microsoft Windows SDK 6.1 (x86, x64, ia64) + Microsoft Windows SDK 7.0 (x86, x64, ia64) + + Microsoft Visual C++ 10.0: + Microsoft Windows SDK 7.1 (x86, x64, ia64) + + Parameters + ---------- + ver: float + Required Microsoft Visual C++ version. + arch: str + Target architecture. + + Return + ------ + environment: dict + """ + # Try to get environement from vcvarsall.bat (Classical way) + try: + orig = get_unpatched(msvc9_query_vcvarsall) + return orig(ver, arch, *args, **kwargs) + except distutils.errors.DistutilsPlatformError: + # Pass error if Vcvarsall.bat is missing + pass + except ValueError: + # Pass error if environment not set after executing vcvarsall.bat + pass + + # If error, try to set environment directly + try: + return EnvironmentInfo(arch, ver).return_env() + except distutils.errors.DistutilsPlatformError as exc: + _augment_exception(exc, ver, arch) + raise + + +def msvc14_get_vc_env(plat_spec): + """ + Patched "distutils._msvccompiler._get_vc_env" for support extra + compilers. + + Set environment without use of "vcvarsall.bat". + + Known supported compilers + ------------------------- + Microsoft Visual C++ 14.0: + Microsoft Visual C++ Build Tools 2015 (x86, x64, arm) + Microsoft Visual Studio 2017 (x86, x64, arm, arm64) + Microsoft Visual Studio Build Tools 2017 (x86, x64, arm, arm64) + + Parameters + ---------- + plat_spec: str + Target architecture. + + Return + ------ + environment: dict + """ + # Try to get environment from vcvarsall.bat (Classical way) + try: + return get_unpatched(msvc14_get_vc_env)(plat_spec) + except distutils.errors.DistutilsPlatformError: + # Pass error Vcvarsall.bat is missing + pass + + # If error, try to set environment directly + try: + return EnvironmentInfo(plat_spec, vc_min_ver=14.0).return_env() + except distutils.errors.DistutilsPlatformError as exc: + _augment_exception(exc, 14.0) + raise + + +def msvc14_gen_lib_options(*args, **kwargs): + """ + Patched "distutils._msvccompiler.gen_lib_options" for fix + compatibility between "numpy.distutils" and "distutils._msvccompiler" + (for Numpy < 1.11.2) + """ + if "numpy.distutils" in sys.modules: + import numpy as np + if LegacyVersion(np.__version__) < LegacyVersion('1.11.2'): + return np.distutils.ccompiler.gen_lib_options(*args, **kwargs) + return get_unpatched(msvc14_gen_lib_options)(*args, **kwargs) + + +def _augment_exception(exc, version, arch=''): + """ + Add details to the exception message to help guide the user + as to what action will resolve it. + """ + # Error if MSVC++ directory not found or environment not set + message = exc.args[0] + + if "vcvarsall" in message.lower() or "visual c" in message.lower(): + # Special error message if MSVC++ not installed + tmpl = 'Microsoft Visual C++ {version:0.1f} is required.' + message = tmpl.format(**locals()) + msdownload = 'www.microsoft.com/download/details.aspx?id=%d' + if version == 9.0: + if arch.lower().find('ia64') > -1: + # For VC++ 9.0, if IA64 support is needed, redirect user + # to Windows SDK 7.0 + message += ' Get it with "Microsoft Windows SDK 7.0": ' + message += msdownload % 3138 + else: + # For VC++ 9.0 redirect user to Vc++ for Python 2.7 : + # This redirection link is maintained by Microsoft. + # Contact vspython@microsoft.com if it needs updating. + message += ' Get it from http://aka.ms/vcpython27' + elif version == 10.0: + # For VC++ 10.0 Redirect user to Windows SDK 7.1 + message += ' Get it with "Microsoft Windows SDK 7.1": ' + message += msdownload % 8279 + elif version >= 14.0: + # For VC++ 14.0 Redirect user to Visual C++ Build Tools + message += (' Get it with "Microsoft Visual C++ Build Tools": ' + r'https://visualstudio.microsoft.com/downloads/') + + exc.args = (message, ) + + +class PlatformInfo: + """ + Current and Target Architectures informations. + + Parameters + ---------- + arch: str + Target architecture. + """ + current_cpu = safe_env.get('processor_architecture', '').lower() + + def __init__(self, arch): + self.arch = arch.lower().replace('x64', 'amd64') + + @property + def target_cpu(self): + return self.arch[self.arch.find('_') + 1:] + + def target_is_x86(self): + return self.target_cpu == 'x86' + + def current_is_x86(self): + return self.current_cpu == 'x86' + + def current_dir(self, hidex86=False, x64=False): + """ + Current platform specific subfolder. + + Parameters + ---------- + hidex86: bool + return '' and not '\x86' if architecture is x86. + x64: bool + return '\x64' and not '\amd64' if architecture is amd64. + + Return + ------ + subfolder: str + '\target', or '' (see hidex86 parameter) + """ + return ( + '' if (self.current_cpu == 'x86' and hidex86) else + r'\x64' if (self.current_cpu == 'amd64' and x64) else + r'\%s' % self.current_cpu + ) + + def target_dir(self, hidex86=False, x64=False): + r""" + Target platform specific subfolder. + + Parameters + ---------- + hidex86: bool + return '' and not '\x86' if architecture is x86. + x64: bool + return '\x64' and not '\amd64' if architecture is amd64. + + Return + ------ + subfolder: str + '\current', or '' (see hidex86 parameter) + """ + return ( + '' if (self.target_cpu == 'x86' and hidex86) else + r'\x64' if (self.target_cpu == 'amd64' and x64) else + r'\%s' % self.target_cpu + ) + + def cross_dir(self, forcex86=False): + r""" + Cross platform specific subfolder. + + Parameters + ---------- + forcex86: bool + Use 'x86' as current architecture even if current acritecture is + not x86. + + Return + ------ + subfolder: str + '' if target architecture is current architecture, + '\current_target' if not. + """ + current = 'x86' if forcex86 else self.current_cpu + return ( + '' if self.target_cpu == current else + self.target_dir().replace('\\', '\\%s_' % current) + ) + + +class RegistryInfo: + """ + Microsoft Visual Studio related registry informations. + + Parameters + ---------- + platform_info: PlatformInfo + "PlatformInfo" instance. + """ + HKEYS = (winreg.HKEY_USERS, + winreg.HKEY_CURRENT_USER, + winreg.HKEY_LOCAL_MACHINE, + winreg.HKEY_CLASSES_ROOT) + + def __init__(self, platform_info): + self.pi = platform_info + + @property + def visualstudio(self): + """ + Microsoft Visual Studio root registry key. + """ + return 'VisualStudio' + + @property + def sxs(self): + """ + Microsoft Visual Studio SxS registry key. + """ + return os.path.join(self.visualstudio, 'SxS') + + @property + def vc(self): + """ + Microsoft Visual C++ VC7 registry key. + """ + return os.path.join(self.sxs, 'VC7') + + @property + def vs(self): + """ + Microsoft Visual Studio VS7 registry key. + """ + return os.path.join(self.sxs, 'VS7') + + @property + def vc_for_python(self): + """ + Microsoft Visual C++ for Python registry key. + """ + return r'DevDiv\VCForPython' + + @property + def microsoft_sdk(self): + """ + Microsoft SDK registry key. + """ + return 'Microsoft SDKs' + + @property + def windows_sdk(self): + """ + Microsoft Windows/Platform SDK registry key. + """ + return os.path.join(self.microsoft_sdk, 'Windows') + + @property + def netfx_sdk(self): + """ + Microsoft .NET Framework SDK registry key. + """ + return os.path.join(self.microsoft_sdk, 'NETFXSDK') + + @property + def windows_kits_roots(self): + """ + Microsoft Windows Kits Roots registry key. + """ + return r'Windows Kits\Installed Roots' + + def microsoft(self, key, x86=False): + """ + Return key in Microsoft software registry. + + Parameters + ---------- + key: str + Registry key path where look. + x86: str + Force x86 software registry. + + Return + ------ + str: value + """ + node64 = '' if self.pi.current_is_x86() or x86 else 'Wow6432Node' + return os.path.join('Software', node64, 'Microsoft', key) + + def lookup(self, key, name): + """ + Look for values in registry in Microsoft software registry. + + Parameters + ---------- + key: str + Registry key path where look. + name: str + Value name to find. + + Return + ------ + str: value + """ + KEY_READ = winreg.KEY_READ + openkey = winreg.OpenKey + ms = self.microsoft + for hkey in self.HKEYS: + try: + bkey = openkey(hkey, ms(key), 0, KEY_READ) + except (OSError, IOError): + if not self.pi.current_is_x86(): + try: + bkey = openkey(hkey, ms(key, True), 0, KEY_READ) + except (OSError, IOError): + continue + else: + continue + try: + return winreg.QueryValueEx(bkey, name)[0] + except (OSError, IOError): + pass + + +class SystemInfo: + """ + Microsoft Windows and Visual Studio related system inormations. + + Parameters + ---------- + registry_info: RegistryInfo + "RegistryInfo" instance. + vc_ver: float + Required Microsoft Visual C++ version. + """ + + # Variables and properties in this class use originals CamelCase variables + # names from Microsoft source files for more easy comparaison. + WinDir = safe_env.get('WinDir', '') + ProgramFiles = safe_env.get('ProgramFiles', '') + ProgramFilesx86 = safe_env.get('ProgramFiles(x86)', ProgramFiles) + + def __init__(self, registry_info, vc_ver=None): + self.ri = registry_info + self.pi = self.ri.pi + self.vc_ver = vc_ver or self._find_latest_available_vc_ver() + + def _find_latest_available_vc_ver(self): + try: + return self.find_available_vc_vers()[-1] + except IndexError: + err = 'No Microsoft Visual C++ version found' + raise distutils.errors.DistutilsPlatformError(err) + + def find_available_vc_vers(self): + """ + Find all available Microsoft Visual C++ versions. + """ + ms = self.ri.microsoft + vckeys = (self.ri.vc, self.ri.vc_for_python, self.ri.vs) + vc_vers = [] + for hkey in self.ri.HKEYS: + for key in vckeys: + try: + bkey = winreg.OpenKey(hkey, ms(key), 0, winreg.KEY_READ) + except (OSError, IOError): + continue + subkeys, values, _ = winreg.QueryInfoKey(bkey) + for i in range(values): + try: + ver = float(winreg.EnumValue(bkey, i)[0]) + if ver not in vc_vers: + vc_vers.append(ver) + except ValueError: + pass + for i in range(subkeys): + try: + ver = float(winreg.EnumKey(bkey, i)) + if ver not in vc_vers: + vc_vers.append(ver) + except ValueError: + pass + return sorted(vc_vers) + + @property + def VSInstallDir(self): + """ + Microsoft Visual Studio directory. + """ + # Default path + name = 'Microsoft Visual Studio %0.1f' % self.vc_ver + default = os.path.join(self.ProgramFilesx86, name) + + # Try to get path from registry, if fail use default path + return self.ri.lookup(self.ri.vs, '%0.1f' % self.vc_ver) or default + + @property + def VCInstallDir(self): + """ + Microsoft Visual C++ directory. + """ + self.VSInstallDir + + guess_vc = self._guess_vc() or self._guess_vc_legacy() + + # Try to get "VC++ for Python" path from registry as default path + reg_path = os.path.join(self.ri.vc_for_python, '%0.1f' % self.vc_ver) + python_vc = self.ri.lookup(reg_path, 'installdir') + default_vc = os.path.join(python_vc, 'VC') if python_vc else guess_vc + + # Try to get path from registry, if fail use default path + path = self.ri.lookup(self.ri.vc, '%0.1f' % self.vc_ver) or default_vc + + if not os.path.isdir(path): + msg = 'Microsoft Visual C++ directory not found' + raise distutils.errors.DistutilsPlatformError(msg) + + return path + + def _guess_vc(self): + """ + Locate Visual C for 2017 + """ + if self.vc_ver <= 14.0: + return + + default = r'VC\Tools\MSVC' + guess_vc = os.path.join(self.VSInstallDir, default) + # Subdir with VC exact version as name + try: + vc_exact_ver = os.listdir(guess_vc)[-1] + return os.path.join(guess_vc, vc_exact_ver) + except (OSError, IOError, IndexError): + pass + + def _guess_vc_legacy(self): + """ + Locate Visual C for versions prior to 2017 + """ + default = r'Microsoft Visual Studio %0.1f\VC' % self.vc_ver + return os.path.join(self.ProgramFilesx86, default) + + @property + def WindowsSdkVersion(self): + """ + Microsoft Windows SDK versions for specified MSVC++ version. + """ + if self.vc_ver <= 9.0: + return ('7.0', '6.1', '6.0a') + elif self.vc_ver == 10.0: + return ('7.1', '7.0a') + elif self.vc_ver == 11.0: + return ('8.0', '8.0a') + elif self.vc_ver == 12.0: + return ('8.1', '8.1a') + elif self.vc_ver >= 14.0: + return ('10.0', '8.1') + + @property + def WindowsSdkLastVersion(self): + """ + Microsoft Windows SDK last version + """ + return self._use_last_dir_name(os.path.join( + self.WindowsSdkDir, 'lib')) + + @property + def WindowsSdkDir(self): + """ + Microsoft Windows SDK directory. + """ + sdkdir = '' + for ver in self.WindowsSdkVersion: + # Try to get it from registry + loc = os.path.join(self.ri.windows_sdk, 'v%s' % ver) + sdkdir = self.ri.lookup(loc, 'installationfolder') + if sdkdir: + break + if not sdkdir or not os.path.isdir(sdkdir): + # Try to get "VC++ for Python" version from registry + path = os.path.join(self.ri.vc_for_python, '%0.1f' % self.vc_ver) + install_base = self.ri.lookup(path, 'installdir') + if install_base: + sdkdir = os.path.join(install_base, 'WinSDK') + if not sdkdir or not os.path.isdir(sdkdir): + # If fail, use default new path + for ver in self.WindowsSdkVersion: + intver = ver[:ver.rfind('.')] + path = r'Microsoft SDKs\Windows Kits\%s' % (intver) + d = os.path.join(self.ProgramFiles, path) + if os.path.isdir(d): + sdkdir = d + if not sdkdir or not os.path.isdir(sdkdir): + # If fail, use default old path + for ver in self.WindowsSdkVersion: + path = r'Microsoft SDKs\Windows\v%s' % ver + d = os.path.join(self.ProgramFiles, path) + if os.path.isdir(d): + sdkdir = d + if not sdkdir: + # If fail, use Platform SDK + sdkdir = os.path.join(self.VCInstallDir, 'PlatformSDK') + return sdkdir + + @property + def WindowsSDKExecutablePath(self): + """ + Microsoft Windows SDK executable directory. + """ + # Find WinSDK NetFx Tools registry dir name + if self.vc_ver <= 11.0: + netfxver = 35 + arch = '' + else: + netfxver = 40 + hidex86 = True if self.vc_ver <= 12.0 else False + arch = self.pi.current_dir(x64=True, hidex86=hidex86) + fx = 'WinSDK-NetFx%dTools%s' % (netfxver, arch.replace('\\', '-')) + + # liste all possibles registry paths + regpaths = [] + if self.vc_ver >= 14.0: + for ver in self.NetFxSdkVersion: + regpaths += [os.path.join(self.ri.netfx_sdk, ver, fx)] + + for ver in self.WindowsSdkVersion: + regpaths += [os.path.join(self.ri.windows_sdk, 'v%sA' % ver, fx)] + + # Return installation folder from the more recent path + for path in regpaths: + execpath = self.ri.lookup(path, 'installationfolder') + if execpath: + break + return execpath + + @property + def FSharpInstallDir(self): + """ + Microsoft Visual F# directory. + """ + path = r'%0.1f\Setup\F#' % self.vc_ver + path = os.path.join(self.ri.visualstudio, path) + return self.ri.lookup(path, 'productdir') or '' + + @property + def UniversalCRTSdkDir(self): + """ + Microsoft Universal CRT SDK directory. + """ + # Set Kit Roots versions for specified MSVC++ version + if self.vc_ver >= 14.0: + vers = ('10', '81') + else: + vers = () + + # Find path of the more recent Kit + for ver in vers: + sdkdir = self.ri.lookup(self.ri.windows_kits_roots, + 'kitsroot%s' % ver) + if sdkdir: + break + return sdkdir or '' + + @property + def UniversalCRTSdkLastVersion(self): + """ + Microsoft Universal C Runtime SDK last version + """ + return self._use_last_dir_name(os.path.join( + self.UniversalCRTSdkDir, 'lib')) + + @property + def NetFxSdkVersion(self): + """ + Microsoft .NET Framework SDK versions. + """ + # Set FxSdk versions for specified MSVC++ version + if self.vc_ver >= 14.0: + return ('4.6.1', '4.6') + else: + return () + + @property + def NetFxSdkDir(self): + """ + Microsoft .NET Framework SDK directory. + """ + for ver in self.NetFxSdkVersion: + loc = os.path.join(self.ri.netfx_sdk, ver) + sdkdir = self.ri.lookup(loc, 'kitsinstallationfolder') + if sdkdir: + break + return sdkdir or '' + + @property + def FrameworkDir32(self): + """ + Microsoft .NET Framework 32bit directory. + """ + # Default path + guess_fw = os.path.join(self.WinDir, r'Microsoft.NET\Framework') + + # Try to get path from registry, if fail use default path + return self.ri.lookup(self.ri.vc, 'frameworkdir32') or guess_fw + + @property + def FrameworkDir64(self): + """ + Microsoft .NET Framework 64bit directory. + """ + # Default path + guess_fw = os.path.join(self.WinDir, r'Microsoft.NET\Framework64') + + # Try to get path from registry, if fail use default path + return self.ri.lookup(self.ri.vc, 'frameworkdir64') or guess_fw + + @property + def FrameworkVersion32(self): + """ + Microsoft .NET Framework 32bit versions. + """ + return self._find_dot_net_versions(32) + + @property + def FrameworkVersion64(self): + """ + Microsoft .NET Framework 64bit versions. + """ + return self._find_dot_net_versions(64) + + def _find_dot_net_versions(self, bits): + """ + Find Microsoft .NET Framework versions. + + Parameters + ---------- + bits: int + Platform number of bits: 32 or 64. + """ + # Find actual .NET version in registry + reg_ver = self.ri.lookup(self.ri.vc, 'frameworkver%d' % bits) + dot_net_dir = getattr(self, 'FrameworkDir%d' % bits) + ver = reg_ver or self._use_last_dir_name(dot_net_dir, 'v') or '' + + # Set .NET versions for specified MSVC++ version + if self.vc_ver >= 12.0: + frameworkver = (ver, 'v4.0') + elif self.vc_ver >= 10.0: + frameworkver = ('v4.0.30319' if ver.lower()[:2] != 'v4' else ver, + 'v3.5') + elif self.vc_ver == 9.0: + frameworkver = ('v3.5', 'v2.0.50727') + if self.vc_ver == 8.0: + frameworkver = ('v3.0', 'v2.0.50727') + return frameworkver + + def _use_last_dir_name(self, path, prefix=''): + """ + Return name of the last dir in path or '' if no dir found. + + Parameters + ---------- + path: str + Use dirs in this path + prefix: str + Use only dirs startings by this prefix + """ + matching_dirs = ( + dir_name + for dir_name in reversed(os.listdir(path)) + if os.path.isdir(os.path.join(path, dir_name)) and + dir_name.startswith(prefix) + ) + return next(matching_dirs, None) or '' + + +class EnvironmentInfo: + """ + Return environment variables for specified Microsoft Visual C++ version + and platform : Lib, Include, Path and libpath. + + This function is compatible with Microsoft Visual C++ 9.0 to 14.0. + + Script created by analysing Microsoft environment configuration files like + "vcvars[...].bat", "SetEnv.Cmd", "vcbuildtools.bat", ... + + Parameters + ---------- + arch: str + Target architecture. + vc_ver: float + Required Microsoft Visual C++ version. If not set, autodetect the last + version. + vc_min_ver: float + Minimum Microsoft Visual C++ version. + """ + + # Variables and properties in this class use originals CamelCase variables + # names from Microsoft source files for more easy comparaison. + + def __init__(self, arch, vc_ver=None, vc_min_ver=0): + self.pi = PlatformInfo(arch) + self.ri = RegistryInfo(self.pi) + self.si = SystemInfo(self.ri, vc_ver) + + if self.vc_ver < vc_min_ver: + err = 'No suitable Microsoft Visual C++ version found' + raise distutils.errors.DistutilsPlatformError(err) + + @property + def vc_ver(self): + """ + Microsoft Visual C++ version. + """ + return self.si.vc_ver + + @property + def VSTools(self): + """ + Microsoft Visual Studio Tools + """ + paths = [r'Common7\IDE', r'Common7\Tools'] + + if self.vc_ver >= 14.0: + arch_subdir = self.pi.current_dir(hidex86=True, x64=True) + paths += [r'Common7\IDE\CommonExtensions\Microsoft\TestWindow'] + paths += [r'Team Tools\Performance Tools'] + paths += [r'Team Tools\Performance Tools%s' % arch_subdir] + + return [os.path.join(self.si.VSInstallDir, path) for path in paths] + + @property + def VCIncludes(self): + """ + Microsoft Visual C++ & Microsoft Foundation Class Includes + """ + return [os.path.join(self.si.VCInstallDir, 'Include'), + os.path.join(self.si.VCInstallDir, r'ATLMFC\Include')] + + @property + def VCLibraries(self): + """ + Microsoft Visual C++ & Microsoft Foundation Class Libraries + """ + if self.vc_ver >= 15.0: + arch_subdir = self.pi.target_dir(x64=True) + else: + arch_subdir = self.pi.target_dir(hidex86=True) + paths = ['Lib%s' % arch_subdir, r'ATLMFC\Lib%s' % arch_subdir] + + if self.vc_ver >= 14.0: + paths += [r'Lib\store%s' % arch_subdir] + + return [os.path.join(self.si.VCInstallDir, path) for path in paths] + + @property + def VCStoreRefs(self): + """ + Microsoft Visual C++ store references Libraries + """ + if self.vc_ver < 14.0: + return [] + return [os.path.join(self.si.VCInstallDir, r'Lib\store\references')] + + @property + def VCTools(self): + """ + Microsoft Visual C++ Tools + """ + si = self.si + tools = [os.path.join(si.VCInstallDir, 'VCPackages')] + + forcex86 = True if self.vc_ver <= 10.0 else False + arch_subdir = self.pi.cross_dir(forcex86) + if arch_subdir: + tools += [os.path.join(si.VCInstallDir, 'Bin%s' % arch_subdir)] + + if self.vc_ver == 14.0: + path = 'Bin%s' % self.pi.current_dir(hidex86=True) + tools += [os.path.join(si.VCInstallDir, path)] + + elif self.vc_ver >= 15.0: + host_dir = (r'bin\HostX86%s' if self.pi.current_is_x86() else + r'bin\HostX64%s') + tools += [os.path.join( + si.VCInstallDir, host_dir % self.pi.target_dir(x64=True))] + + if self.pi.current_cpu != self.pi.target_cpu: + tools += [os.path.join( + si.VCInstallDir, host_dir % self.pi.current_dir(x64=True))] + + else: + tools += [os.path.join(si.VCInstallDir, 'Bin')] + + return tools + + @property + def OSLibraries(self): + """ + Microsoft Windows SDK Libraries + """ + if self.vc_ver <= 10.0: + arch_subdir = self.pi.target_dir(hidex86=True, x64=True) + return [os.path.join(self.si.WindowsSdkDir, 'Lib%s' % arch_subdir)] + + else: + arch_subdir = self.pi.target_dir(x64=True) + lib = os.path.join(self.si.WindowsSdkDir, 'lib') + libver = self._sdk_subdir + return [os.path.join(lib, '%sum%s' % (libver , arch_subdir))] + + @property + def OSIncludes(self): + """ + Microsoft Windows SDK Include + """ + include = os.path.join(self.si.WindowsSdkDir, 'include') + + if self.vc_ver <= 10.0: + return [include, os.path.join(include, 'gl')] + + else: + if self.vc_ver >= 14.0: + sdkver = self._sdk_subdir + else: + sdkver = '' + return [os.path.join(include, '%sshared' % sdkver), + os.path.join(include, '%sum' % sdkver), + os.path.join(include, '%swinrt' % sdkver)] + + @property + def OSLibpath(self): + """ + Microsoft Windows SDK Libraries Paths + """ + ref = os.path.join(self.si.WindowsSdkDir, 'References') + libpath = [] + + if self.vc_ver <= 9.0: + libpath += self.OSLibraries + + if self.vc_ver >= 11.0: + libpath += [os.path.join(ref, r'CommonConfiguration\Neutral')] + + if self.vc_ver >= 14.0: + libpath += [ + ref, + os.path.join(self.si.WindowsSdkDir, 'UnionMetadata'), + os.path.join( + ref, + 'Windows.Foundation.UniversalApiContract', + '1.0.0.0', + ), + os.path.join( + ref, + 'Windows.Foundation.FoundationContract', + '1.0.0.0', + ), + os.path.join( + ref, + 'Windows.Networking.Connectivity.WwanContract', + '1.0.0.0', + ), + os.path.join( + self.si.WindowsSdkDir, + 'ExtensionSDKs', + 'Microsoft.VCLibs', + '%0.1f' % self.vc_ver, + 'References', + 'CommonConfiguration', + 'neutral', + ), + ] + return libpath + + @property + def SdkTools(self): + """ + Microsoft Windows SDK Tools + """ + return list(self._sdk_tools()) + + def _sdk_tools(self): + """ + Microsoft Windows SDK Tools paths generator + """ + if self.vc_ver < 15.0: + bin_dir = 'Bin' if self.vc_ver <= 11.0 else r'Bin\x86' + yield os.path.join(self.si.WindowsSdkDir, bin_dir) + + if not self.pi.current_is_x86(): + arch_subdir = self.pi.current_dir(x64=True) + path = 'Bin%s' % arch_subdir + yield os.path.join(self.si.WindowsSdkDir, path) + + if self.vc_ver == 10.0 or self.vc_ver == 11.0: + if self.pi.target_is_x86(): + arch_subdir = '' + else: + arch_subdir = self.pi.current_dir(hidex86=True, x64=True) + path = r'Bin\NETFX 4.0 Tools%s' % arch_subdir + yield os.path.join(self.si.WindowsSdkDir, path) + + elif self.vc_ver >= 15.0: + path = os.path.join(self.si.WindowsSdkDir, 'Bin') + arch_subdir = self.pi.current_dir(x64=True) + sdkver = self.si.WindowsSdkLastVersion + yield os.path.join(path, '%s%s' % (sdkver, arch_subdir)) + + if self.si.WindowsSDKExecutablePath: + yield self.si.WindowsSDKExecutablePath + + @property + def _sdk_subdir(self): + """ + Microsoft Windows SDK version subdir + """ + ucrtver = self.si.WindowsSdkLastVersion + return ('%s\\' % ucrtver) if ucrtver else '' + + @property + def SdkSetup(self): + """ + Microsoft Windows SDK Setup + """ + if self.vc_ver > 9.0: + return [] + + return [os.path.join(self.si.WindowsSdkDir, 'Setup')] + + @property + def FxTools(self): + """ + Microsoft .NET Framework Tools + """ + pi = self.pi + si = self.si + + if self.vc_ver <= 10.0: + include32 = True + include64 = not pi.target_is_x86() and not pi.current_is_x86() + else: + include32 = pi.target_is_x86() or pi.current_is_x86() + include64 = pi.current_cpu == 'amd64' or pi.target_cpu == 'amd64' + + tools = [] + if include32: + tools += [os.path.join(si.FrameworkDir32, ver) + for ver in si.FrameworkVersion32] + if include64: + tools += [os.path.join(si.FrameworkDir64, ver) + for ver in si.FrameworkVersion64] + return tools + + @property + def NetFxSDKLibraries(self): + """ + Microsoft .Net Framework SDK Libraries + """ + if self.vc_ver < 14.0 or not self.si.NetFxSdkDir: + return [] + + arch_subdir = self.pi.target_dir(x64=True) + return [os.path.join(self.si.NetFxSdkDir, r'lib\um%s' % arch_subdir)] + + @property + def NetFxSDKIncludes(self): + """ + Microsoft .Net Framework SDK Includes + """ + if self.vc_ver < 14.0 or not self.si.NetFxSdkDir: + return [] + + return [os.path.join(self.si.NetFxSdkDir, r'include\um')] + + @property + def VsTDb(self): + """ + Microsoft Visual Studio Team System Database + """ + return [os.path.join(self.si.VSInstallDir, r'VSTSDB\Deploy')] + + @property + def MSBuild(self): + """ + Microsoft Build Engine + """ + if self.vc_ver < 12.0: + return [] + elif self.vc_ver < 15.0: + base_path = self.si.ProgramFilesx86 + arch_subdir = self.pi.current_dir(hidex86=True) + else: + base_path = self.si.VSInstallDir + arch_subdir = '' + + path = r'MSBuild\%0.1f\bin%s' % (self.vc_ver, arch_subdir) + build = [os.path.join(base_path, path)] + + if self.vc_ver >= 15.0: + # Add Roslyn C# & Visual Basic Compiler + build += [os.path.join(base_path, path, 'Roslyn')] + + return build + + @property + def HTMLHelpWorkshop(self): + """ + Microsoft HTML Help Workshop + """ + if self.vc_ver < 11.0: + return [] + + return [os.path.join(self.si.ProgramFilesx86, 'HTML Help Workshop')] + + @property + def UCRTLibraries(self): + """ + Microsoft Universal C Runtime SDK Libraries + """ + if self.vc_ver < 14.0: + return [] + + arch_subdir = self.pi.target_dir(x64=True) + lib = os.path.join(self.si.UniversalCRTSdkDir, 'lib') + ucrtver = self._ucrt_subdir + return [os.path.join(lib, '%sucrt%s' % (ucrtver, arch_subdir))] + + @property + def UCRTIncludes(self): + """ + Microsoft Universal C Runtime SDK Include + """ + if self.vc_ver < 14.0: + return [] + + include = os.path.join(self.si.UniversalCRTSdkDir, 'include') + return [os.path.join(include, '%sucrt' % self._ucrt_subdir)] + + @property + def _ucrt_subdir(self): + """ + Microsoft Universal C Runtime SDK version subdir + """ + ucrtver = self.si.UniversalCRTSdkLastVersion + return ('%s\\' % ucrtver) if ucrtver else '' + + @property + def FSharp(self): + """ + Microsoft Visual F# + """ + if self.vc_ver < 11.0 and self.vc_ver > 12.0: + return [] + + return self.si.FSharpInstallDir + + @property + def VCRuntimeRedist(self): + """ + Microsoft Visual C++ runtime redistribuable dll + """ + arch_subdir = self.pi.target_dir(x64=True) + if self.vc_ver < 15: + redist_path = self.si.VCInstallDir + vcruntime = 'redist%s\\Microsoft.VC%d0.CRT\\vcruntime%d0.dll' + else: + redist_path = self.si.VCInstallDir.replace('\\Tools', '\\Redist') + vcruntime = 'onecore%s\\Microsoft.VC%d0.CRT\\vcruntime%d0.dll' + + # Visual Studio 2017 is still Visual C++ 14.0 + dll_ver = 14.0 if self.vc_ver == 15 else self.vc_ver + + vcruntime = vcruntime % (arch_subdir, self.vc_ver, dll_ver) + return os.path.join(redist_path, vcruntime) + + def return_env(self, exists=True): + """ + Return environment dict. + + Parameters + ---------- + exists: bool + It True, only return existing paths. + """ + env = dict( + include=self._build_paths('include', + [self.VCIncludes, + self.OSIncludes, + self.UCRTIncludes, + self.NetFxSDKIncludes], + exists), + lib=self._build_paths('lib', + [self.VCLibraries, + self.OSLibraries, + self.FxTools, + self.UCRTLibraries, + self.NetFxSDKLibraries], + exists), + libpath=self._build_paths('libpath', + [self.VCLibraries, + self.FxTools, + self.VCStoreRefs, + self.OSLibpath], + exists), + path=self._build_paths('path', + [self.VCTools, + self.VSTools, + self.VsTDb, + self.SdkTools, + self.SdkSetup, + self.FxTools, + self.MSBuild, + self.HTMLHelpWorkshop, + self.FSharp], + exists), + ) + if self.vc_ver >= 14 and os.path.isfile(self.VCRuntimeRedist): + env['py_vcruntime_redist'] = self.VCRuntimeRedist + return env + + def _build_paths(self, name, spec_path_lists, exists): + """ + Given an environment variable name and specified paths, + return a pathsep-separated string of paths containing + unique, extant, directories from those paths and from + the environment variable. Raise an error if no paths + are resolved. + """ + # flatten spec_path_lists + spec_paths = itertools.chain.from_iterable(spec_path_lists) + env_paths = safe_env.get(name, '').split(os.pathsep) + paths = itertools.chain(spec_paths, env_paths) + extant_paths = list(filter(os.path.isdir, paths)) if exists else paths + if not extant_paths: + msg = "%s environment variable is empty" % name.upper() + raise distutils.errors.DistutilsPlatformError(msg) + unique_paths = self._unique_everseen(extant_paths) + return os.pathsep.join(unique_paths) + + # from Python docs + def _unique_everseen(self, iterable, key=None): + """ + List unique elements, preserving order. + Remember all elements ever seen. + + _unique_everseen('AAAABBBCCDAABBB') --> A B C D + + _unique_everseen('ABBCcAD', str.lower) --> A B C D + """ + seen = set() + seen_add = seen.add + if key is None: + for element in filterfalse(seen.__contains__, iterable): + seen_add(element) + yield element + else: + for element in iterable: + k = key(element) + if k not in seen: + seen_add(k) + yield element diff --git a/project/venv/lib/python2.7/site-packages/setuptools/msvc.pyc b/project/venv/lib/python2.7/site-packages/setuptools/msvc.pyc new file mode 100644 index 0000000..e394e8c Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/msvc.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/namespaces.py b/project/venv/lib/python2.7/site-packages/setuptools/namespaces.py new file mode 100644 index 0000000..dc16106 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/namespaces.py @@ -0,0 +1,107 @@ +import os +from distutils import log +import itertools + +from setuptools.extern.six.moves import map + + +flatten = itertools.chain.from_iterable + + +class Installer: + + nspkg_ext = '-nspkg.pth' + + def install_namespaces(self): + nsp = self._get_all_ns_packages() + if not nsp: + return + filename, ext = os.path.splitext(self._get_target()) + filename += self.nspkg_ext + self.outputs.append(filename) + log.info("Installing %s", filename) + lines = map(self._gen_nspkg_line, nsp) + + if self.dry_run: + # always generate the lines, even in dry run + list(lines) + return + + with open(filename, 'wt') as f: + f.writelines(lines) + + def uninstall_namespaces(self): + filename, ext = os.path.splitext(self._get_target()) + filename += self.nspkg_ext + if not os.path.exists(filename): + return + log.info("Removing %s", filename) + os.remove(filename) + + def _get_target(self): + return self.target + + _nspkg_tmpl = ( + "import sys, types, os", + "has_mfs = sys.version_info > (3, 5)", + "p = os.path.join(%(root)s, *%(pth)r)", + "importlib = has_mfs and __import__('importlib.util')", + "has_mfs and __import__('importlib.machinery')", + "m = has_mfs and " + "sys.modules.setdefault(%(pkg)r, " + "importlib.util.module_from_spec(" + "importlib.machinery.PathFinder.find_spec(%(pkg)r, " + "[os.path.dirname(p)])))", + "m = m or " + "sys.modules.setdefault(%(pkg)r, types.ModuleType(%(pkg)r))", + "mp = (m or []) and m.__dict__.setdefault('__path__',[])", + "(p not in mp) and mp.append(p)", + ) + "lines for the namespace installer" + + _nspkg_tmpl_multi = ( + 'm and setattr(sys.modules[%(parent)r], %(child)r, m)', + ) + "additional line(s) when a parent package is indicated" + + def _get_root(self): + return "sys._getframe(1).f_locals['sitedir']" + + def _gen_nspkg_line(self, pkg): + # ensure pkg is not a unicode string under Python 2.7 + pkg = str(pkg) + pth = tuple(pkg.split('.')) + root = self._get_root() + tmpl_lines = self._nspkg_tmpl + parent, sep, child = pkg.rpartition('.') + if parent: + tmpl_lines += self._nspkg_tmpl_multi + return ';'.join(tmpl_lines) % locals() + '\n' + + def _get_all_ns_packages(self): + """Return sorted list of all package namespaces""" + pkgs = self.distribution.namespace_packages or [] + return sorted(flatten(map(self._pkg_names, pkgs))) + + @staticmethod + def _pkg_names(pkg): + """ + Given a namespace package, yield the components of that + package. + + >>> names = Installer._pkg_names('a.b.c') + >>> set(names) == set(['a', 'a.b', 'a.b.c']) + True + """ + parts = pkg.split('.') + while parts: + yield '.'.join(parts) + parts.pop() + + +class DevelopInstaller(Installer): + def _get_root(self): + return repr(str(self.egg_path)) + + def _get_target(self): + return self.egg_link diff --git a/project/venv/lib/python2.7/site-packages/setuptools/namespaces.pyc b/project/venv/lib/python2.7/site-packages/setuptools/namespaces.pyc new file mode 100644 index 0000000..1969f6a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/namespaces.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/package_index.py b/project/venv/lib/python2.7/site-packages/setuptools/package_index.py new file mode 100644 index 0000000..705a47c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/package_index.py @@ -0,0 +1,1136 @@ +"""PyPI and direct package downloading""" +import sys +import os +import re +import shutil +import socket +import base64 +import hashlib +import itertools +import warnings +from functools import wraps + +from setuptools.extern import six +from setuptools.extern.six.moves import urllib, http_client, configparser, map + +import setuptools +from pkg_resources import ( + CHECKOUT_DIST, Distribution, BINARY_DIST, normalize_path, SOURCE_DIST, + Environment, find_distributions, safe_name, safe_version, + to_filename, Requirement, DEVELOP_DIST, EGG_DIST, +) +from setuptools import ssl_support +from distutils import log +from distutils.errors import DistutilsError +from fnmatch import translate +from setuptools.py27compat import get_all_headers +from setuptools.py33compat import unescape +from setuptools.wheel import Wheel + +__metaclass__ = type + +EGG_FRAGMENT = re.compile(r'^egg=([-A-Za-z0-9_.+!]+)$') +HREF = re.compile(r"""href\s*=\s*['"]?([^'"> ]+)""", re.I) +PYPI_MD5 = re.compile( + r'<a href="([^"#]+)">([^<]+)</a>\n\s+\(<a (?:title="MD5 hash"\n\s+)' + r'href="[^?]+\?:action=show_md5&digest=([0-9a-f]{32})">md5</a>\)' +) +URL_SCHEME = re.compile('([-+.a-z0-9]{2,}):', re.I).match +EXTENSIONS = ".tar.gz .tar.bz2 .tar .zip .tgz".split() + +__all__ = [ + 'PackageIndex', 'distros_for_url', 'parse_bdist_wininst', + 'interpret_distro_name', +] + +_SOCKET_TIMEOUT = 15 + +_tmpl = "setuptools/{setuptools.__version__} Python-urllib/{py_major}" +user_agent = _tmpl.format(py_major=sys.version[:3], setuptools=setuptools) + + +def parse_requirement_arg(spec): + try: + return Requirement.parse(spec) + except ValueError: + raise DistutilsError( + "Not a URL, existing file, or requirement spec: %r" % (spec,) + ) + + +def parse_bdist_wininst(name): + """Return (base,pyversion) or (None,None) for possible .exe name""" + + lower = name.lower() + base, py_ver, plat = None, None, None + + if lower.endswith('.exe'): + if lower.endswith('.win32.exe'): + base = name[:-10] + plat = 'win32' + elif lower.startswith('.win32-py', -16): + py_ver = name[-7:-4] + base = name[:-16] + plat = 'win32' + elif lower.endswith('.win-amd64.exe'): + base = name[:-14] + plat = 'win-amd64' + elif lower.startswith('.win-amd64-py', -20): + py_ver = name[-7:-4] + base = name[:-20] + plat = 'win-amd64' + return base, py_ver, plat + + +def egg_info_for_url(url): + parts = urllib.parse.urlparse(url) + scheme, server, path, parameters, query, fragment = parts + base = urllib.parse.unquote(path.split('/')[-1]) + if server == 'sourceforge.net' and base == 'download': # XXX Yuck + base = urllib.parse.unquote(path.split('/')[-2]) + if '#' in base: + base, fragment = base.split('#', 1) + return base, fragment + + +def distros_for_url(url, metadata=None): + """Yield egg or source distribution objects that might be found at a URL""" + base, fragment = egg_info_for_url(url) + for dist in distros_for_location(url, base, metadata): + yield dist + if fragment: + match = EGG_FRAGMENT.match(fragment) + if match: + for dist in interpret_distro_name( + url, match.group(1), metadata, precedence=CHECKOUT_DIST + ): + yield dist + + +def distros_for_location(location, basename, metadata=None): + """Yield egg or source distribution objects based on basename""" + if basename.endswith('.egg.zip'): + basename = basename[:-4] # strip the .zip + if basename.endswith('.egg') and '-' in basename: + # only one, unambiguous interpretation + return [Distribution.from_location(location, basename, metadata)] + if basename.endswith('.whl') and '-' in basename: + wheel = Wheel(basename) + if not wheel.is_compatible(): + return [] + return [Distribution( + location=location, + project_name=wheel.project_name, + version=wheel.version, + # Increase priority over eggs. + precedence=EGG_DIST + 1, + )] + if basename.endswith('.exe'): + win_base, py_ver, platform = parse_bdist_wininst(basename) + if win_base is not None: + return interpret_distro_name( + location, win_base, metadata, py_ver, BINARY_DIST, platform + ) + # Try source distro extensions (.zip, .tgz, etc.) + # + for ext in EXTENSIONS: + if basename.endswith(ext): + basename = basename[:-len(ext)] + return interpret_distro_name(location, basename, metadata) + return [] # no extension matched + + +def distros_for_filename(filename, metadata=None): + """Yield possible egg or source distribution objects based on a filename""" + return distros_for_location( + normalize_path(filename), os.path.basename(filename), metadata + ) + + +def interpret_distro_name( + location, basename, metadata, py_version=None, precedence=SOURCE_DIST, + platform=None +): + """Generate alternative interpretations of a source distro name + + Note: if `location` is a filesystem filename, you should call + ``pkg_resources.normalize_path()`` on it before passing it to this + routine! + """ + # Generate alternative interpretations of a source distro name + # Because some packages are ambiguous as to name/versions split + # e.g. "adns-python-1.1.0", "egenix-mx-commercial", etc. + # So, we generate each possible interepretation (e.g. "adns, python-1.1.0" + # "adns-python, 1.1.0", and "adns-python-1.1.0, no version"). In practice, + # the spurious interpretations should be ignored, because in the event + # there's also an "adns" package, the spurious "python-1.1.0" version will + # compare lower than any numeric version number, and is therefore unlikely + # to match a request for it. It's still a potential problem, though, and + # in the long run PyPI and the distutils should go for "safe" names and + # versions in distribution archive names (sdist and bdist). + + parts = basename.split('-') + if not py_version and any(re.match(r'py\d\.\d$', p) for p in parts[2:]): + # it is a bdist_dumb, not an sdist -- bail out + return + + for p in range(1, len(parts) + 1): + yield Distribution( + location, metadata, '-'.join(parts[:p]), '-'.join(parts[p:]), + py_version=py_version, precedence=precedence, + platform=platform + ) + + +# From Python 2.7 docs +def unique_everseen(iterable, key=None): + "List unique elements, preserving order. Remember all elements ever seen." + # unique_everseen('AAAABBBCCDAABBB') --> A B C D + # unique_everseen('ABBCcAD', str.lower) --> A B C D + seen = set() + seen_add = seen.add + if key is None: + for element in six.moves.filterfalse(seen.__contains__, iterable): + seen_add(element) + yield element + else: + for element in iterable: + k = key(element) + if k not in seen: + seen_add(k) + yield element + + +def unique_values(func): + """ + Wrap a function returning an iterable such that the resulting iterable + only ever yields unique items. + """ + + @wraps(func) + def wrapper(*args, **kwargs): + return unique_everseen(func(*args, **kwargs)) + + return wrapper + + +REL = re.compile(r"""<([^>]*\srel\s*=\s*['"]?([^'">]+)[^>]*)>""", re.I) +# this line is here to fix emacs' cruddy broken syntax highlighting + + +@unique_values +def find_external_links(url, page): + """Find rel="homepage" and rel="download" links in `page`, yielding URLs""" + + for match in REL.finditer(page): + tag, rel = match.groups() + rels = set(map(str.strip, rel.lower().split(','))) + if 'homepage' in rels or 'download' in rels: + for match in HREF.finditer(tag): + yield urllib.parse.urljoin(url, htmldecode(match.group(1))) + + for tag in ("<th>Home Page", "<th>Download URL"): + pos = page.find(tag) + if pos != -1: + match = HREF.search(page, pos) + if match: + yield urllib.parse.urljoin(url, htmldecode(match.group(1))) + + +class ContentChecker: + """ + A null content checker that defines the interface for checking content + """ + + def feed(self, block): + """ + Feed a block of data to the hash. + """ + return + + def is_valid(self): + """ + Check the hash. Return False if validation fails. + """ + return True + + def report(self, reporter, template): + """ + Call reporter with information about the checker (hash name) + substituted into the template. + """ + return + + +class HashChecker(ContentChecker): + pattern = re.compile( + r'(?P<hash_name>sha1|sha224|sha384|sha256|sha512|md5)=' + r'(?P<expected>[a-f0-9]+)' + ) + + def __init__(self, hash_name, expected): + self.hash_name = hash_name + self.hash = hashlib.new(hash_name) + self.expected = expected + + @classmethod + def from_url(cls, url): + "Construct a (possibly null) ContentChecker from a URL" + fragment = urllib.parse.urlparse(url)[-1] + if not fragment: + return ContentChecker() + match = cls.pattern.search(fragment) + if not match: + return ContentChecker() + return cls(**match.groupdict()) + + def feed(self, block): + self.hash.update(block) + + def is_valid(self): + return self.hash.hexdigest() == self.expected + + def report(self, reporter, template): + msg = template % self.hash_name + return reporter(msg) + + +class PackageIndex(Environment): + """A distribution index that scans web pages for download URLs""" + + def __init__( + self, index_url="https://pypi.org/simple/", hosts=('*',), + ca_bundle=None, verify_ssl=True, *args, **kw + ): + Environment.__init__(self, *args, **kw) + self.index_url = index_url + "/" [:not index_url.endswith('/')] + self.scanned_urls = {} + self.fetched_urls = {} + self.package_pages = {} + self.allows = re.compile('|'.join(map(translate, hosts))).match + self.to_scan = [] + use_ssl = ( + verify_ssl + and ssl_support.is_available + and (ca_bundle or ssl_support.find_ca_bundle()) + ) + if use_ssl: + self.opener = ssl_support.opener_for(ca_bundle) + else: + self.opener = urllib.request.urlopen + + def process_url(self, url, retrieve=False): + """Evaluate a URL as a possible download, and maybe retrieve it""" + if url in self.scanned_urls and not retrieve: + return + self.scanned_urls[url] = True + if not URL_SCHEME(url): + self.process_filename(url) + return + else: + dists = list(distros_for_url(url)) + if dists: + if not self.url_ok(url): + return + self.debug("Found link: %s", url) + + if dists or not retrieve or url in self.fetched_urls: + list(map(self.add, dists)) + return # don't need the actual page + + if not self.url_ok(url): + self.fetched_urls[url] = True + return + + self.info("Reading %s", url) + self.fetched_urls[url] = True # prevent multiple fetch attempts + tmpl = "Download error on %s: %%s -- Some packages may not be found!" + f = self.open_url(url, tmpl % url) + if f is None: + return + self.fetched_urls[f.url] = True + if 'html' not in f.headers.get('content-type', '').lower(): + f.close() # not html, we can't process it + return + + base = f.url # handle redirects + page = f.read() + if not isinstance(page, str): + # In Python 3 and got bytes but want str. + if isinstance(f, urllib.error.HTTPError): + # Errors have no charset, assume latin1: + charset = 'latin-1' + else: + charset = f.headers.get_param('charset') or 'latin-1' + page = page.decode(charset, "ignore") + f.close() + for match in HREF.finditer(page): + link = urllib.parse.urljoin(base, htmldecode(match.group(1))) + self.process_url(link) + if url.startswith(self.index_url) and getattr(f, 'code', None) != 404: + page = self.process_index(url, page) + + def process_filename(self, fn, nested=False): + # process filenames or directories + if not os.path.exists(fn): + self.warn("Not found: %s", fn) + return + + if os.path.isdir(fn) and not nested: + path = os.path.realpath(fn) + for item in os.listdir(path): + self.process_filename(os.path.join(path, item), True) + + dists = distros_for_filename(fn) + if dists: + self.debug("Found: %s", fn) + list(map(self.add, dists)) + + def url_ok(self, url, fatal=False): + s = URL_SCHEME(url) + is_file = s and s.group(1).lower() == 'file' + if is_file or self.allows(urllib.parse.urlparse(url)[1]): + return True + msg = ( + "\nNote: Bypassing %s (disallowed host; see " + "http://bit.ly/2hrImnY for details).\n") + if fatal: + raise DistutilsError(msg % url) + else: + self.warn(msg, url) + + def scan_egg_links(self, search_path): + dirs = filter(os.path.isdir, search_path) + egg_links = ( + (path, entry) + for path in dirs + for entry in os.listdir(path) + if entry.endswith('.egg-link') + ) + list(itertools.starmap(self.scan_egg_link, egg_links)) + + def scan_egg_link(self, path, entry): + with open(os.path.join(path, entry)) as raw_lines: + # filter non-empty lines + lines = list(filter(None, map(str.strip, raw_lines))) + + if len(lines) != 2: + # format is not recognized; punt + return + + egg_path, setup_path = lines + + for dist in find_distributions(os.path.join(path, egg_path)): + dist.location = os.path.join(path, *lines) + dist.precedence = SOURCE_DIST + self.add(dist) + + def process_index(self, url, page): + """Process the contents of a PyPI page""" + + def scan(link): + # Process a URL to see if it's for a package page + if link.startswith(self.index_url): + parts = list(map( + urllib.parse.unquote, link[len(self.index_url):].split('/') + )) + if len(parts) == 2 and '#' not in parts[1]: + # it's a package page, sanitize and index it + pkg = safe_name(parts[0]) + ver = safe_version(parts[1]) + self.package_pages.setdefault(pkg.lower(), {})[link] = True + return to_filename(pkg), to_filename(ver) + return None, None + + # process an index page into the package-page index + for match in HREF.finditer(page): + try: + scan(urllib.parse.urljoin(url, htmldecode(match.group(1)))) + except ValueError: + pass + + pkg, ver = scan(url) # ensure this page is in the page index + if pkg: + # process individual package page + for new_url in find_external_links(url, page): + # Process the found URL + base, frag = egg_info_for_url(new_url) + if base.endswith('.py') and not frag: + if ver: + new_url += '#egg=%s-%s' % (pkg, ver) + else: + self.need_version_info(url) + self.scan_url(new_url) + + return PYPI_MD5.sub( + lambda m: '<a href="%s#md5=%s">%s</a>' % m.group(1, 3, 2), page + ) + else: + return "" # no sense double-scanning non-package pages + + def need_version_info(self, url): + self.scan_all( + "Page at %s links to .py file(s) without version info; an index " + "scan is required.", url + ) + + def scan_all(self, msg=None, *args): + if self.index_url not in self.fetched_urls: + if msg: + self.warn(msg, *args) + self.info( + "Scanning index of all packages (this may take a while)" + ) + self.scan_url(self.index_url) + + def find_packages(self, requirement): + self.scan_url(self.index_url + requirement.unsafe_name + '/') + + if not self.package_pages.get(requirement.key): + # Fall back to safe version of the name + self.scan_url(self.index_url + requirement.project_name + '/') + + if not self.package_pages.get(requirement.key): + # We couldn't find the target package, so search the index page too + self.not_found_in_index(requirement) + + for url in list(self.package_pages.get(requirement.key, ())): + # scan each page that might be related to the desired package + self.scan_url(url) + + def obtain(self, requirement, installer=None): + self.prescan() + self.find_packages(requirement) + for dist in self[requirement.key]: + if dist in requirement: + return dist + self.debug("%s does not match %s", requirement, dist) + return super(PackageIndex, self).obtain(requirement, installer) + + def check_hash(self, checker, filename, tfp): + """ + checker is a ContentChecker + """ + checker.report( + self.debug, + "Validating %%s checksum for %s" % filename) + if not checker.is_valid(): + tfp.close() + os.unlink(filename) + raise DistutilsError( + "%s validation failed for %s; " + "possible download problem?" + % (checker.hash.name, os.path.basename(filename)) + ) + + def add_find_links(self, urls): + """Add `urls` to the list that will be prescanned for searches""" + for url in urls: + if ( + self.to_scan is None # if we have already "gone online" + or not URL_SCHEME(url) # or it's a local file/directory + or url.startswith('file:') + or list(distros_for_url(url)) # or a direct package link + ): + # then go ahead and process it now + self.scan_url(url) + else: + # otherwise, defer retrieval till later + self.to_scan.append(url) + + def prescan(self): + """Scan urls scheduled for prescanning (e.g. --find-links)""" + if self.to_scan: + list(map(self.scan_url, self.to_scan)) + self.to_scan = None # from now on, go ahead and process immediately + + def not_found_in_index(self, requirement): + if self[requirement.key]: # we've seen at least one distro + meth, msg = self.info, "Couldn't retrieve index page for %r" + else: # no distros seen for this name, might be misspelled + meth, msg = ( + self.warn, + "Couldn't find index page for %r (maybe misspelled?)") + meth(msg, requirement.unsafe_name) + self.scan_all() + + def download(self, spec, tmpdir): + """Locate and/or download `spec` to `tmpdir`, returning a local path + + `spec` may be a ``Requirement`` object, or a string containing a URL, + an existing local filename, or a project/version requirement spec + (i.e. the string form of a ``Requirement`` object). If it is the URL + of a .py file with an unambiguous ``#egg=name-version`` tag (i.e., one + that escapes ``-`` as ``_`` throughout), a trivial ``setup.py`` is + automatically created alongside the downloaded file. + + If `spec` is a ``Requirement`` object or a string containing a + project/version requirement spec, this method returns the location of + a matching distribution (possibly after downloading it to `tmpdir`). + If `spec` is a locally existing file or directory name, it is simply + returned unchanged. If `spec` is a URL, it is downloaded to a subpath + of `tmpdir`, and the local filename is returned. Various errors may be + raised if a problem occurs during downloading. + """ + if not isinstance(spec, Requirement): + scheme = URL_SCHEME(spec) + if scheme: + # It's a url, download it to tmpdir + found = self._download_url(scheme.group(1), spec, tmpdir) + base, fragment = egg_info_for_url(spec) + if base.endswith('.py'): + found = self.gen_setup(found, fragment, tmpdir) + return found + elif os.path.exists(spec): + # Existing file or directory, just return it + return spec + else: + spec = parse_requirement_arg(spec) + return getattr(self.fetch_distribution(spec, tmpdir), 'location', None) + + def fetch_distribution( + self, requirement, tmpdir, force_scan=False, source=False, + develop_ok=False, local_index=None): + """Obtain a distribution suitable for fulfilling `requirement` + + `requirement` must be a ``pkg_resources.Requirement`` instance. + If necessary, or if the `force_scan` flag is set, the requirement is + searched for in the (online) package index as well as the locally + installed packages. If a distribution matching `requirement` is found, + the returned distribution's ``location`` is the value you would have + gotten from calling the ``download()`` method with the matching + distribution's URL or filename. If no matching distribution is found, + ``None`` is returned. + + If the `source` flag is set, only source distributions and source + checkout links will be considered. Unless the `develop_ok` flag is + set, development and system eggs (i.e., those using the ``.egg-info`` + format) will be ignored. + """ + # process a Requirement + self.info("Searching for %s", requirement) + skipped = {} + dist = None + + def find(req, env=None): + if env is None: + env = self + # Find a matching distribution; may be called more than once + + for dist in env[req.key]: + + if dist.precedence == DEVELOP_DIST and not develop_ok: + if dist not in skipped: + self.warn( + "Skipping development or system egg: %s", dist, + ) + skipped[dist] = 1 + continue + + test = ( + dist in req + and (dist.precedence <= SOURCE_DIST or not source) + ) + if test: + loc = self.download(dist.location, tmpdir) + dist.download_location = loc + if os.path.exists(dist.download_location): + return dist + + if force_scan: + self.prescan() + self.find_packages(requirement) + dist = find(requirement) + + if not dist and local_index is not None: + dist = find(requirement, local_index) + + if dist is None: + if self.to_scan is not None: + self.prescan() + dist = find(requirement) + + if dist is None and not force_scan: + self.find_packages(requirement) + dist = find(requirement) + + if dist is None: + self.warn( + "No local packages or working download links found for %s%s", + (source and "a source distribution of " or ""), + requirement, + ) + else: + self.info("Best match: %s", dist) + return dist.clone(location=dist.download_location) + + def fetch(self, requirement, tmpdir, force_scan=False, source=False): + """Obtain a file suitable for fulfilling `requirement` + + DEPRECATED; use the ``fetch_distribution()`` method now instead. For + backward compatibility, this routine is identical but returns the + ``location`` of the downloaded distribution instead of a distribution + object. + """ + dist = self.fetch_distribution(requirement, tmpdir, force_scan, source) + if dist is not None: + return dist.location + return None + + def gen_setup(self, filename, fragment, tmpdir): + match = EGG_FRAGMENT.match(fragment) + dists = match and [ + d for d in + interpret_distro_name(filename, match.group(1), None) if d.version + ] or [] + + if len(dists) == 1: # unambiguous ``#egg`` fragment + basename = os.path.basename(filename) + + # Make sure the file has been downloaded to the temp dir. + if os.path.dirname(filename) != tmpdir: + dst = os.path.join(tmpdir, basename) + from setuptools.command.easy_install import samefile + if not samefile(filename, dst): + shutil.copy2(filename, dst) + filename = dst + + with open(os.path.join(tmpdir, 'setup.py'), 'w') as file: + file.write( + "from setuptools import setup\n" + "setup(name=%r, version=%r, py_modules=[%r])\n" + % ( + dists[0].project_name, dists[0].version, + os.path.splitext(basename)[0] + ) + ) + return filename + + elif match: + raise DistutilsError( + "Can't unambiguously interpret project/version identifier %r; " + "any dashes in the name or version should be escaped using " + "underscores. %r" % (fragment, dists) + ) + else: + raise DistutilsError( + "Can't process plain .py files without an '#egg=name-version'" + " suffix to enable automatic setup script generation." + ) + + dl_blocksize = 8192 + + def _download_to(self, url, filename): + self.info("Downloading %s", url) + # Download the file + fp = None + try: + checker = HashChecker.from_url(url) + fp = self.open_url(url) + if isinstance(fp, urllib.error.HTTPError): + raise DistutilsError( + "Can't download %s: %s %s" % (url, fp.code, fp.msg) + ) + headers = fp.info() + blocknum = 0 + bs = self.dl_blocksize + size = -1 + if "content-length" in headers: + # Some servers return multiple Content-Length headers :( + sizes = get_all_headers(headers, 'Content-Length') + size = max(map(int, sizes)) + self.reporthook(url, filename, blocknum, bs, size) + with open(filename, 'wb') as tfp: + while True: + block = fp.read(bs) + if block: + checker.feed(block) + tfp.write(block) + blocknum += 1 + self.reporthook(url, filename, blocknum, bs, size) + else: + break + self.check_hash(checker, filename, tfp) + return headers + finally: + if fp: + fp.close() + + def reporthook(self, url, filename, blocknum, blksize, size): + pass # no-op + + def open_url(self, url, warning=None): + if url.startswith('file:'): + return local_open(url) + try: + return open_with_auth(url, self.opener) + except (ValueError, http_client.InvalidURL) as v: + msg = ' '.join([str(arg) for arg in v.args]) + if warning: + self.warn(warning, msg) + else: + raise DistutilsError('%s %s' % (url, msg)) + except urllib.error.HTTPError as v: + return v + except urllib.error.URLError as v: + if warning: + self.warn(warning, v.reason) + else: + raise DistutilsError("Download error for %s: %s" + % (url, v.reason)) + except http_client.BadStatusLine as v: + if warning: + self.warn(warning, v.line) + else: + raise DistutilsError( + '%s returned a bad status line. The server might be ' + 'down, %s' % + (url, v.line) + ) + except (http_client.HTTPException, socket.error) as v: + if warning: + self.warn(warning, v) + else: + raise DistutilsError("Download error for %s: %s" + % (url, v)) + + def _download_url(self, scheme, url, tmpdir): + # Determine download filename + # + name, fragment = egg_info_for_url(url) + if name: + while '..' in name: + name = name.replace('..', '.').replace('\\', '_') + else: + name = "__downloaded__" # default if URL has no path contents + + if name.endswith('.egg.zip'): + name = name[:-4] # strip the extra .zip before download + + filename = os.path.join(tmpdir, name) + + # Download the file + # + if scheme == 'svn' or scheme.startswith('svn+'): + return self._download_svn(url, filename) + elif scheme == 'git' or scheme.startswith('git+'): + return self._download_git(url, filename) + elif scheme.startswith('hg+'): + return self._download_hg(url, filename) + elif scheme == 'file': + return urllib.request.url2pathname(urllib.parse.urlparse(url)[2]) + else: + self.url_ok(url, True) # raises error if not allowed + return self._attempt_download(url, filename) + + def scan_url(self, url): + self.process_url(url, True) + + def _attempt_download(self, url, filename): + headers = self._download_to(url, filename) + if 'html' in headers.get('content-type', '').lower(): + return self._download_html(url, headers, filename) + else: + return filename + + def _download_html(self, url, headers, filename): + file = open(filename) + for line in file: + if line.strip(): + # Check for a subversion index page + if re.search(r'<title>([^- ]+ - )?Revision \d+:', line): + # it's a subversion index page: + file.close() + os.unlink(filename) + return self._download_svn(url, filename) + break # not an index page + file.close() + os.unlink(filename) + raise DistutilsError("Unexpected HTML page found at " + url) + + def _download_svn(self, url, filename): + warnings.warn("SVN download support is deprecated", UserWarning) + url = url.split('#', 1)[0] # remove any fragment for svn's sake + creds = '' + if url.lower().startswith('svn:') and '@' in url: + scheme, netloc, path, p, q, f = urllib.parse.urlparse(url) + if not netloc and path.startswith('//') and '/' in path[2:]: + netloc, path = path[2:].split('/', 1) + auth, host = _splituser(netloc) + if auth: + if ':' in auth: + user, pw = auth.split(':', 1) + creds = " --username=%s --password=%s" % (user, pw) + else: + creds = " --username=" + auth + netloc = host + parts = scheme, netloc, url, p, q, f + url = urllib.parse.urlunparse(parts) + self.info("Doing subversion checkout from %s to %s", url, filename) + os.system("svn checkout%s -q %s %s" % (creds, url, filename)) + return filename + + @staticmethod + def _vcs_split_rev_from_url(url, pop_prefix=False): + scheme, netloc, path, query, frag = urllib.parse.urlsplit(url) + + scheme = scheme.split('+', 1)[-1] + + # Some fragment identification fails + path = path.split('#', 1)[0] + + rev = None + if '@' in path: + path, rev = path.rsplit('@', 1) + + # Also, discard fragment + url = urllib.parse.urlunsplit((scheme, netloc, path, query, '')) + + return url, rev + + def _download_git(self, url, filename): + filename = filename.split('#', 1)[0] + url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True) + + self.info("Doing git clone from %s to %s", url, filename) + os.system("git clone --quiet %s %s" % (url, filename)) + + if rev is not None: + self.info("Checking out %s", rev) + os.system("(cd %s && git checkout --quiet %s)" % ( + filename, + rev, + )) + + return filename + + def _download_hg(self, url, filename): + filename = filename.split('#', 1)[0] + url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True) + + self.info("Doing hg clone from %s to %s", url, filename) + os.system("hg clone --quiet %s %s" % (url, filename)) + + if rev is not None: + self.info("Updating to %s", rev) + os.system("(cd %s && hg up -C -r %s -q)" % ( + filename, + rev, + )) + + return filename + + def debug(self, msg, *args): + log.debug(msg, *args) + + def info(self, msg, *args): + log.info(msg, *args) + + def warn(self, msg, *args): + log.warn(msg, *args) + + +# This pattern matches a character entity reference (a decimal numeric +# references, a hexadecimal numeric reference, or a named reference). +entity_sub = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?').sub + + +def decode_entity(match): + what = match.group(0) + return unescape(what) + + +def htmldecode(text): + """ + Decode HTML entities in the given text. + + >>> htmldecode( + ... 'https://../package_name-0.1.2.tar.gz' + ... '?tokena=A&tokenb=B">package_name-0.1.2.tar.gz') + 'https://../package_name-0.1.2.tar.gz?tokena=A&tokenb=B">package_name-0.1.2.tar.gz' + """ + return entity_sub(decode_entity, text) + + +def socket_timeout(timeout=15): + def _socket_timeout(func): + def _socket_timeout(*args, **kwargs): + old_timeout = socket.getdefaulttimeout() + socket.setdefaulttimeout(timeout) + try: + return func(*args, **kwargs) + finally: + socket.setdefaulttimeout(old_timeout) + + return _socket_timeout + + return _socket_timeout + + +def _encode_auth(auth): + """ + A function compatible with Python 2.3-3.3 that will encode + auth from a URL suitable for an HTTP header. + >>> str(_encode_auth('username%3Apassword')) + 'dXNlcm5hbWU6cGFzc3dvcmQ=' + + Long auth strings should not cause a newline to be inserted. + >>> long_auth = 'username:' + 'password'*10 + >>> chr(10) in str(_encode_auth(long_auth)) + False + """ + auth_s = urllib.parse.unquote(auth) + # convert to bytes + auth_bytes = auth_s.encode() + encoded_bytes = base64.b64encode(auth_bytes) + # convert back to a string + encoded = encoded_bytes.decode() + # strip the trailing carriage return + return encoded.replace('\n', '') + + +class Credential: + """ + A username/password pair. Use like a namedtuple. + """ + + def __init__(self, username, password): + self.username = username + self.password = password + + def __iter__(self): + yield self.username + yield self.password + + def __str__(self): + return '%(username)s:%(password)s' % vars(self) + + +class PyPIConfig(configparser.RawConfigParser): + def __init__(self): + """ + Load from ~/.pypirc + """ + defaults = dict.fromkeys(['username', 'password', 'repository'], '') + configparser.RawConfigParser.__init__(self, defaults) + + rc = os.path.join(os.path.expanduser('~'), '.pypirc') + if os.path.exists(rc): + self.read(rc) + + @property + def creds_by_repository(self): + sections_with_repositories = [ + section for section in self.sections() + if self.get(section, 'repository').strip() + ] + + return dict(map(self._get_repo_cred, sections_with_repositories)) + + def _get_repo_cred(self, section): + repo = self.get(section, 'repository').strip() + return repo, Credential( + self.get(section, 'username').strip(), + self.get(section, 'password').strip(), + ) + + def find_credential(self, url): + """ + If the URL indicated appears to be a repository defined in this + config, return the credential for that repository. + """ + for repository, cred in self.creds_by_repository.items(): + if url.startswith(repository): + return cred + + +def open_with_auth(url, opener=urllib.request.urlopen): + """Open a urllib2 request, handling HTTP authentication""" + + parsed = urllib.parse.urlparse(url) + scheme, netloc, path, params, query, frag = parsed + + # Double scheme does not raise on Mac OS X as revealed by a + # failing test. We would expect "nonnumeric port". Refs #20. + if netloc.endswith(':'): + raise http_client.InvalidURL("nonnumeric port: ''") + + if scheme in ('http', 'https'): + auth, address = _splituser(netloc) + else: + auth = None + + if not auth: + cred = PyPIConfig().find_credential(url) + if cred: + auth = str(cred) + info = cred.username, url + log.info('Authenticating as %s for %s (from .pypirc)', *info) + + if auth: + auth = "Basic " + _encode_auth(auth) + parts = scheme, address, path, params, query, frag + new_url = urllib.parse.urlunparse(parts) + request = urllib.request.Request(new_url) + request.add_header("Authorization", auth) + else: + request = urllib.request.Request(url) + + request.add_header('User-Agent', user_agent) + fp = opener(request) + + if auth: + # Put authentication info back into request URL if same host, + # so that links found on the page will work + s2, h2, path2, param2, query2, frag2 = urllib.parse.urlparse(fp.url) + if s2 == scheme and h2 == address: + parts = s2, netloc, path2, param2, query2, frag2 + fp.url = urllib.parse.urlunparse(parts) + + return fp + + +# copy of urllib.parse._splituser from Python 3.8 +def _splituser(host): + """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'.""" + user, delim, host = host.rpartition('@') + return (user if delim else None), host + + +# adding a timeout to avoid freezing package_index +open_with_auth = socket_timeout(_SOCKET_TIMEOUT)(open_with_auth) + + +def fix_sf_url(url): + return url # backward compatibility + + +def local_open(url): + """Read a local path, with special support for directories""" + scheme, server, path, param, query, frag = urllib.parse.urlparse(url) + filename = urllib.request.url2pathname(path) + if os.path.isfile(filename): + return urllib.request.urlopen(url) + elif path.endswith('/') and os.path.isdir(filename): + files = [] + for f in os.listdir(filename): + filepath = os.path.join(filename, f) + if f == 'index.html': + with open(filepath, 'r') as fp: + body = fp.read() + break + elif os.path.isdir(filepath): + f += '/' + files.append('<a href="{name}">{name}</a>'.format(name=f)) + else: + tmpl = ( + "<html><head><title>{url}" + "{files}") + body = tmpl.format(url=url, files='\n'.join(files)) + status, message = 200, "OK" + else: + status, message, body = 404, "Path not found", "Not found" + + headers = {'content-type': 'text/html'} + body_stream = six.StringIO(body) + return urllib.error.HTTPError(url, status, message, headers, body_stream) diff --git a/project/venv/lib/python2.7/site-packages/setuptools/package_index.pyc b/project/venv/lib/python2.7/site-packages/setuptools/package_index.pyc new file mode 100644 index 0000000..1c79eb7 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/package_index.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/pep425tags.py b/project/venv/lib/python2.7/site-packages/setuptools/pep425tags.py new file mode 100644 index 0000000..48745a2 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/pep425tags.py @@ -0,0 +1,319 @@ +# This file originally from pip: +# https://github.com/pypa/pip/blob/8f4f15a5a95d7d5b511ceaee9ed261176c181970/src/pip/_internal/pep425tags.py +"""Generate and work with PEP 425 Compatibility Tags.""" +from __future__ import absolute_import + +import distutils.util +from distutils import log +import platform +import re +import sys +import sysconfig +import warnings +from collections import OrderedDict + +from .extern import six + +from . import glibc + +_osx_arch_pat = re.compile(r'(.+)_(\d+)_(\d+)_(.+)') + + +def get_config_var(var): + try: + return sysconfig.get_config_var(var) + except IOError as e: # Issue #1074 + warnings.warn("{}".format(e), RuntimeWarning) + return None + + +def get_abbr_impl(): + """Return abbreviated implementation name.""" + if hasattr(sys, 'pypy_version_info'): + pyimpl = 'pp' + elif sys.platform.startswith('java'): + pyimpl = 'jy' + elif sys.platform == 'cli': + pyimpl = 'ip' + else: + pyimpl = 'cp' + return pyimpl + + +def get_impl_ver(): + """Return implementation version.""" + impl_ver = get_config_var("py_version_nodot") + if not impl_ver or get_abbr_impl() == 'pp': + impl_ver = ''.join(map(str, get_impl_version_info())) + return impl_ver + + +def get_impl_version_info(): + """Return sys.version_info-like tuple for use in decrementing the minor + version.""" + if get_abbr_impl() == 'pp': + # as per https://github.com/pypa/pip/issues/2882 + return (sys.version_info[0], sys.pypy_version_info.major, + sys.pypy_version_info.minor) + else: + return sys.version_info[0], sys.version_info[1] + + +def get_impl_tag(): + """ + Returns the Tag for this specific implementation. + """ + return "{}{}".format(get_abbr_impl(), get_impl_ver()) + + +def get_flag(var, fallback, expected=True, warn=True): + """Use a fallback method for determining SOABI flags if the needed config + var is unset or unavailable.""" + val = get_config_var(var) + if val is None: + if warn: + log.debug("Config variable '%s' is unset, Python ABI tag may " + "be incorrect", var) + return fallback() + return val == expected + + +def get_abi_tag(): + """Return the ABI tag based on SOABI (if available) or emulate SOABI + (CPython 2, PyPy).""" + soabi = get_config_var('SOABI') + impl = get_abbr_impl() + if not soabi and impl in {'cp', 'pp'} and hasattr(sys, 'maxunicode'): + d = '' + m = '' + u = '' + if get_flag('Py_DEBUG', + lambda: hasattr(sys, 'gettotalrefcount'), + warn=(impl == 'cp')): + d = 'd' + if get_flag('WITH_PYMALLOC', + lambda: impl == 'cp', + warn=(impl == 'cp')): + m = 'm' + if get_flag('Py_UNICODE_SIZE', + lambda: sys.maxunicode == 0x10ffff, + expected=4, + warn=(impl == 'cp' and + six.PY2)) \ + and six.PY2: + u = 'u' + abi = '%s%s%s%s%s' % (impl, get_impl_ver(), d, m, u) + elif soabi and soabi.startswith('cpython-'): + abi = 'cp' + soabi.split('-')[1] + elif soabi: + abi = soabi.replace('.', '_').replace('-', '_') + else: + abi = None + return abi + + +def _is_running_32bit(): + return sys.maxsize == 2147483647 + + +def get_platform(): + """Return our platform name 'win32', 'linux_x86_64'""" + if sys.platform == 'darwin': + # distutils.util.get_platform() returns the release based on the value + # of MACOSX_DEPLOYMENT_TARGET on which Python was built, which may + # be significantly older than the user's current machine. + release, _, machine = platform.mac_ver() + split_ver = release.split('.') + + if machine == "x86_64" and _is_running_32bit(): + machine = "i386" + elif machine == "ppc64" and _is_running_32bit(): + machine = "ppc" + + return 'macosx_{}_{}_{}'.format(split_ver[0], split_ver[1], machine) + + # XXX remove distutils dependency + result = distutils.util.get_platform().replace('.', '_').replace('-', '_') + if result == "linux_x86_64" and _is_running_32bit(): + # 32 bit Python program (running on a 64 bit Linux): pip should only + # install and run 32 bit compiled extensions in that case. + result = "linux_i686" + + return result + + +def is_manylinux1_compatible(): + # Only Linux, and only x86-64 / i686 + if get_platform() not in {"linux_x86_64", "linux_i686"}: + return False + + # Check for presence of _manylinux module + try: + import _manylinux + return bool(_manylinux.manylinux1_compatible) + except (ImportError, AttributeError): + # Fall through to heuristic check below + pass + + # Check glibc version. CentOS 5 uses glibc 2.5. + return glibc.have_compatible_glibc(2, 5) + + +def get_darwin_arches(major, minor, machine): + """Return a list of supported arches (including group arches) for + the given major, minor and machine architecture of a macOS machine. + """ + arches = [] + + def _supports_arch(major, minor, arch): + # Looking at the application support for macOS versions in the chart + # provided by https://en.wikipedia.org/wiki/OS_X#Versions it appears + # our timeline looks roughly like: + # + # 10.0 - Introduces ppc support. + # 10.4 - Introduces ppc64, i386, and x86_64 support, however the ppc64 + # and x86_64 support is CLI only, and cannot be used for GUI + # applications. + # 10.5 - Extends ppc64 and x86_64 support to cover GUI applications. + # 10.6 - Drops support for ppc64 + # 10.7 - Drops support for ppc + # + # Given that we do not know if we're installing a CLI or a GUI + # application, we must be conservative and assume it might be a GUI + # application and behave as if ppc64 and x86_64 support did not occur + # until 10.5. + # + # Note: The above information is taken from the "Application support" + # column in the chart not the "Processor support" since I believe + # that we care about what instruction sets an application can use + # not which processors the OS supports. + if arch == 'ppc': + return (major, minor) <= (10, 5) + if arch == 'ppc64': + return (major, minor) == (10, 5) + if arch == 'i386': + return (major, minor) >= (10, 4) + if arch == 'x86_64': + return (major, minor) >= (10, 5) + if arch in groups: + for garch in groups[arch]: + if _supports_arch(major, minor, garch): + return True + return False + + groups = OrderedDict([ + ("fat", ("i386", "ppc")), + ("intel", ("x86_64", "i386")), + ("fat64", ("x86_64", "ppc64")), + ("fat32", ("x86_64", "i386", "ppc")), + ]) + + if _supports_arch(major, minor, machine): + arches.append(machine) + + for garch in groups: + if machine in groups[garch] and _supports_arch(major, minor, garch): + arches.append(garch) + + arches.append('universal') + + return arches + + +def get_supported(versions=None, noarch=False, platform=None, + impl=None, abi=None): + """Return a list of supported tags for each version specified in + `versions`. + + :param versions: a list of string versions, of the form ["33", "32"], + or None. The first version will be assumed to support our ABI. + :param platform: specify the exact platform you want valid + tags for, or None. If None, use the local system platform. + :param impl: specify the exact implementation you want valid + tags for, or None. If None, use the local interpreter impl. + :param abi: specify the exact abi you want valid + tags for, or None. If None, use the local interpreter abi. + """ + supported = [] + + # Versions must be given with respect to the preference + if versions is None: + versions = [] + version_info = get_impl_version_info() + major = version_info[:-1] + # Support all previous minor Python versions. + for minor in range(version_info[-1], -1, -1): + versions.append(''.join(map(str, major + (minor,)))) + + impl = impl or get_abbr_impl() + + abis = [] + + abi = abi or get_abi_tag() + if abi: + abis[0:0] = [abi] + + abi3s = set() + import imp + for suffix in imp.get_suffixes(): + if suffix[0].startswith('.abi'): + abi3s.add(suffix[0].split('.', 2)[1]) + + abis.extend(sorted(list(abi3s))) + + abis.append('none') + + if not noarch: + arch = platform or get_platform() + if arch.startswith('macosx'): + # support macosx-10.6-intel on macosx-10.9-x86_64 + match = _osx_arch_pat.match(arch) + if match: + name, major, minor, actual_arch = match.groups() + tpl = '{}_{}_%i_%s'.format(name, major) + arches = [] + for m in reversed(range(int(minor) + 1)): + for a in get_darwin_arches(int(major), m, actual_arch): + arches.append(tpl % (m, a)) + else: + # arch pattern didn't match (?!) + arches = [arch] + elif platform is None and is_manylinux1_compatible(): + arches = [arch.replace('linux', 'manylinux1'), arch] + else: + arches = [arch] + + # Current version, current API (built specifically for our Python): + for abi in abis: + for arch in arches: + supported.append(('%s%s' % (impl, versions[0]), abi, arch)) + + # abi3 modules compatible with older version of Python + for version in versions[1:]: + # abi3 was introduced in Python 3.2 + if version in {'31', '30'}: + break + for abi in abi3s: # empty set if not Python 3 + for arch in arches: + supported.append(("%s%s" % (impl, version), abi, arch)) + + # Has binaries, does not use the Python API: + for arch in arches: + supported.append(('py%s' % (versions[0][0]), 'none', arch)) + + # No abi / arch, but requires our implementation: + supported.append(('%s%s' % (impl, versions[0]), 'none', 'any')) + # Tagged specifically as being cross-version compatible + # (with just the major version specified) + supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any')) + + # No abi / arch, generic Python + for i, version in enumerate(versions): + supported.append(('py%s' % (version,), 'none', 'any')) + if i == 0: + supported.append(('py%s' % (version[0]), 'none', 'any')) + + return supported + + +implementation_tag = get_impl_tag() diff --git a/project/venv/lib/python2.7/site-packages/setuptools/pep425tags.pyc b/project/venv/lib/python2.7/site-packages/setuptools/pep425tags.pyc new file mode 100644 index 0000000..243f024 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/pep425tags.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/py27compat.py b/project/venv/lib/python2.7/site-packages/setuptools/py27compat.py new file mode 100644 index 0000000..2985011 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/py27compat.py @@ -0,0 +1,28 @@ +""" +Compatibility Support for Python 2.7 and earlier +""" + +import platform + +from setuptools.extern import six + + +def get_all_headers(message, key): + """ + Given an HTTPMessage, return all headers matching a given key. + """ + return message.get_all(key) + + +if six.PY2: + def get_all_headers(message, key): + return message.getheaders(key) + + +linux_py2_ascii = ( + platform.system() == 'Linux' and + six.PY2 +) + +rmtree_safe = str if linux_py2_ascii else lambda x: x +"""Workaround for http://bugs.python.org/issue24672""" diff --git a/project/venv/lib/python2.7/site-packages/setuptools/py27compat.pyc b/project/venv/lib/python2.7/site-packages/setuptools/py27compat.pyc new file mode 100644 index 0000000..7de7e13 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/py27compat.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/py31compat.py b/project/venv/lib/python2.7/site-packages/setuptools/py31compat.py new file mode 100644 index 0000000..1a0705e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/py31compat.py @@ -0,0 +1,32 @@ +__all__ = [] + +__metaclass__ = type + + +try: + # Python >=3.2 + from tempfile import TemporaryDirectory +except ImportError: + import shutil + import tempfile + + class TemporaryDirectory: + """ + Very simple temporary directory context manager. + Will try to delete afterward, but will also ignore OS and similar + errors on deletion. + """ + + def __init__(self): + self.name = None # Handle mkdtemp raising an exception + self.name = tempfile.mkdtemp() + + def __enter__(self): + return self.name + + def __exit__(self, exctype, excvalue, exctrace): + try: + shutil.rmtree(self.name, True) + except OSError: # removal errors are not the only possible + pass + self.name = None diff --git a/project/venv/lib/python2.7/site-packages/setuptools/py31compat.pyc b/project/venv/lib/python2.7/site-packages/setuptools/py31compat.pyc new file mode 100644 index 0000000..2a4e839 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/py31compat.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/py33compat.py b/project/venv/lib/python2.7/site-packages/setuptools/py33compat.py new file mode 100644 index 0000000..87cf539 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/py33compat.py @@ -0,0 +1,55 @@ +import dis +import array +import collections + +try: + import html +except ImportError: + html = None + +from setuptools.extern import six +from setuptools.extern.six.moves import html_parser + +__metaclass__ = type + +OpArg = collections.namedtuple('OpArg', 'opcode arg') + + +class Bytecode_compat: + def __init__(self, code): + self.code = code + + def __iter__(self): + """Yield '(op,arg)' pair for each operation in code object 'code'""" + + bytes = array.array('b', self.code.co_code) + eof = len(self.code.co_code) + + ptr = 0 + extended_arg = 0 + + while ptr < eof: + + op = bytes[ptr] + + if op >= dis.HAVE_ARGUMENT: + + arg = bytes[ptr + 1] + bytes[ptr + 2] * 256 + extended_arg + ptr += 3 + + if op == dis.EXTENDED_ARG: + long_type = six.integer_types[-1] + extended_arg = arg * long_type(65536) + continue + + else: + arg = None + ptr += 1 + + yield OpArg(op, arg) + + +Bytecode = getattr(dis, 'Bytecode', Bytecode_compat) + + +unescape = getattr(html, 'unescape', html_parser.HTMLParser().unescape) diff --git a/project/venv/lib/python2.7/site-packages/setuptools/py33compat.pyc b/project/venv/lib/python2.7/site-packages/setuptools/py33compat.pyc new file mode 100644 index 0000000..ff012a4 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/py33compat.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/sandbox.py b/project/venv/lib/python2.7/site-packages/setuptools/sandbox.py new file mode 100644 index 0000000..685f3f7 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/sandbox.py @@ -0,0 +1,491 @@ +import os +import sys +import tempfile +import operator +import functools +import itertools +import re +import contextlib +import pickle +import textwrap + +from setuptools.extern import six +from setuptools.extern.six.moves import builtins, map + +import pkg_resources.py31compat + +if sys.platform.startswith('java'): + import org.python.modules.posix.PosixModule as _os +else: + _os = sys.modules[os.name] +try: + _file = file +except NameError: + _file = None +_open = open +from distutils.errors import DistutilsError +from pkg_resources import working_set + + +__all__ = [ + "AbstractSandbox", "DirectorySandbox", "SandboxViolation", "run_setup", +] + + +def _execfile(filename, globals, locals=None): + """ + Python 3 implementation of execfile. + """ + mode = 'rb' + with open(filename, mode) as stream: + script = stream.read() + if locals is None: + locals = globals + code = compile(script, filename, 'exec') + exec(code, globals, locals) + + +@contextlib.contextmanager +def save_argv(repl=None): + saved = sys.argv[:] + if repl is not None: + sys.argv[:] = repl + try: + yield saved + finally: + sys.argv[:] = saved + + +@contextlib.contextmanager +def save_path(): + saved = sys.path[:] + try: + yield saved + finally: + sys.path[:] = saved + + +@contextlib.contextmanager +def override_temp(replacement): + """ + Monkey-patch tempfile.tempdir with replacement, ensuring it exists + """ + pkg_resources.py31compat.makedirs(replacement, exist_ok=True) + + saved = tempfile.tempdir + + tempfile.tempdir = replacement + + try: + yield + finally: + tempfile.tempdir = saved + + +@contextlib.contextmanager +def pushd(target): + saved = os.getcwd() + os.chdir(target) + try: + yield saved + finally: + os.chdir(saved) + + +class UnpickleableException(Exception): + """ + An exception representing another Exception that could not be pickled. + """ + + @staticmethod + def dump(type, exc): + """ + Always return a dumped (pickled) type and exc. If exc can't be pickled, + wrap it in UnpickleableException first. + """ + try: + return pickle.dumps(type), pickle.dumps(exc) + except Exception: + # get UnpickleableException inside the sandbox + from setuptools.sandbox import UnpickleableException as cls + return cls.dump(cls, cls(repr(exc))) + + +class ExceptionSaver: + """ + A Context Manager that will save an exception, serialized, and restore it + later. + """ + + def __enter__(self): + return self + + def __exit__(self, type, exc, tb): + if not exc: + return + + # dump the exception + self._saved = UnpickleableException.dump(type, exc) + self._tb = tb + + # suppress the exception + return True + + def resume(self): + "restore and re-raise any exception" + + if '_saved' not in vars(self): + return + + type, exc = map(pickle.loads, self._saved) + six.reraise(type, exc, self._tb) + + +@contextlib.contextmanager +def save_modules(): + """ + Context in which imported modules are saved. + + Translates exceptions internal to the context into the equivalent exception + outside the context. + """ + saved = sys.modules.copy() + with ExceptionSaver() as saved_exc: + yield saved + + sys.modules.update(saved) + # remove any modules imported since + del_modules = ( + mod_name for mod_name in sys.modules + if mod_name not in saved + # exclude any encodings modules. See #285 + and not mod_name.startswith('encodings.') + ) + _clear_modules(del_modules) + + saved_exc.resume() + + +def _clear_modules(module_names): + for mod_name in list(module_names): + del sys.modules[mod_name] + + +@contextlib.contextmanager +def save_pkg_resources_state(): + saved = pkg_resources.__getstate__() + try: + yield saved + finally: + pkg_resources.__setstate__(saved) + + +@contextlib.contextmanager +def setup_context(setup_dir): + temp_dir = os.path.join(setup_dir, 'temp') + with save_pkg_resources_state(): + with save_modules(): + hide_setuptools() + with save_path(): + with save_argv(): + with override_temp(temp_dir): + with pushd(setup_dir): + # ensure setuptools commands are available + __import__('setuptools') + yield + + +def _needs_hiding(mod_name): + """ + >>> _needs_hiding('setuptools') + True + >>> _needs_hiding('pkg_resources') + True + >>> _needs_hiding('setuptools_plugin') + False + >>> _needs_hiding('setuptools.__init__') + True + >>> _needs_hiding('distutils') + True + >>> _needs_hiding('os') + False + >>> _needs_hiding('Cython') + True + """ + pattern = re.compile(r'(setuptools|pkg_resources|distutils|Cython)(\.|$)') + return bool(pattern.match(mod_name)) + + +def hide_setuptools(): + """ + Remove references to setuptools' modules from sys.modules to allow the + invocation to import the most appropriate setuptools. This technique is + necessary to avoid issues such as #315 where setuptools upgrading itself + would fail to find a function declared in the metadata. + """ + modules = filter(_needs_hiding, sys.modules) + _clear_modules(modules) + + +def run_setup(setup_script, args): + """Run a distutils setup script, sandboxed in its directory""" + setup_dir = os.path.abspath(os.path.dirname(setup_script)) + with setup_context(setup_dir): + try: + sys.argv[:] = [setup_script] + list(args) + sys.path.insert(0, setup_dir) + # reset to include setup dir, w/clean callback list + working_set.__init__() + working_set.callbacks.append(lambda dist: dist.activate()) + + # __file__ should be a byte string on Python 2 (#712) + dunder_file = ( + setup_script + if isinstance(setup_script, str) else + setup_script.encode(sys.getfilesystemencoding()) + ) + + with DirectorySandbox(setup_dir): + ns = dict(__file__=dunder_file, __name__='__main__') + _execfile(setup_script, ns) + except SystemExit as v: + if v.args and v.args[0]: + raise + # Normal exit, just return + + +class AbstractSandbox: + """Wrap 'os' module and 'open()' builtin for virtualizing setup scripts""" + + _active = False + + def __init__(self): + self._attrs = [ + name for name in dir(_os) + if not name.startswith('_') and hasattr(self, name) + ] + + def _copy(self, source): + for name in self._attrs: + setattr(os, name, getattr(source, name)) + + def __enter__(self): + self._copy(self) + if _file: + builtins.file = self._file + builtins.open = self._open + self._active = True + + def __exit__(self, exc_type, exc_value, traceback): + self._active = False + if _file: + builtins.file = _file + builtins.open = _open + self._copy(_os) + + def run(self, func): + """Run 'func' under os sandboxing""" + with self: + return func() + + def _mk_dual_path_wrapper(name): + original = getattr(_os, name) + + def wrap(self, src, dst, *args, **kw): + if self._active: + src, dst = self._remap_pair(name, src, dst, *args, **kw) + return original(src, dst, *args, **kw) + + return wrap + + for name in ["rename", "link", "symlink"]: + if hasattr(_os, name): + locals()[name] = _mk_dual_path_wrapper(name) + + def _mk_single_path_wrapper(name, original=None): + original = original or getattr(_os, name) + + def wrap(self, path, *args, **kw): + if self._active: + path = self._remap_input(name, path, *args, **kw) + return original(path, *args, **kw) + + return wrap + + if _file: + _file = _mk_single_path_wrapper('file', _file) + _open = _mk_single_path_wrapper('open', _open) + for name in [ + "stat", "listdir", "chdir", "open", "chmod", "chown", "mkdir", + "remove", "unlink", "rmdir", "utime", "lchown", "chroot", "lstat", + "startfile", "mkfifo", "mknod", "pathconf", "access" + ]: + if hasattr(_os, name): + locals()[name] = _mk_single_path_wrapper(name) + + def _mk_single_with_return(name): + original = getattr(_os, name) + + def wrap(self, path, *args, **kw): + if self._active: + path = self._remap_input(name, path, *args, **kw) + return self._remap_output(name, original(path, *args, **kw)) + return original(path, *args, **kw) + + return wrap + + for name in ['readlink', 'tempnam']: + if hasattr(_os, name): + locals()[name] = _mk_single_with_return(name) + + def _mk_query(name): + original = getattr(_os, name) + + def wrap(self, *args, **kw): + retval = original(*args, **kw) + if self._active: + return self._remap_output(name, retval) + return retval + + return wrap + + for name in ['getcwd', 'tmpnam']: + if hasattr(_os, name): + locals()[name] = _mk_query(name) + + def _validate_path(self, path): + """Called to remap or validate any path, whether input or output""" + return path + + def _remap_input(self, operation, path, *args, **kw): + """Called for path inputs""" + return self._validate_path(path) + + def _remap_output(self, operation, path): + """Called for path outputs""" + return self._validate_path(path) + + def _remap_pair(self, operation, src, dst, *args, **kw): + """Called for path pairs like rename, link, and symlink operations""" + return ( + self._remap_input(operation + '-from', src, *args, **kw), + self._remap_input(operation + '-to', dst, *args, **kw) + ) + + +if hasattr(os, 'devnull'): + _EXCEPTIONS = [os.devnull,] +else: + _EXCEPTIONS = [] + + +class DirectorySandbox(AbstractSandbox): + """Restrict operations to a single subdirectory - pseudo-chroot""" + + write_ops = dict.fromkeys([ + "open", "chmod", "chown", "mkdir", "remove", "unlink", "rmdir", + "utime", "lchown", "chroot", "mkfifo", "mknod", "tempnam", + ]) + + _exception_patterns = [ + # Allow lib2to3 to attempt to save a pickled grammar object (#121) + r'.*lib2to3.*\.pickle$', + ] + "exempt writing to paths that match the pattern" + + def __init__(self, sandbox, exceptions=_EXCEPTIONS): + self._sandbox = os.path.normcase(os.path.realpath(sandbox)) + self._prefix = os.path.join(self._sandbox, '') + self._exceptions = [ + os.path.normcase(os.path.realpath(path)) + for path in exceptions + ] + AbstractSandbox.__init__(self) + + def _violation(self, operation, *args, **kw): + from setuptools.sandbox import SandboxViolation + raise SandboxViolation(operation, args, kw) + + if _file: + + def _file(self, path, mode='r', *args, **kw): + if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path): + self._violation("file", path, mode, *args, **kw) + return _file(path, mode, *args, **kw) + + def _open(self, path, mode='r', *args, **kw): + if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path): + self._violation("open", path, mode, *args, **kw) + return _open(path, mode, *args, **kw) + + def tmpnam(self): + self._violation("tmpnam") + + def _ok(self, path): + active = self._active + try: + self._active = False + realpath = os.path.normcase(os.path.realpath(path)) + return ( + self._exempted(realpath) + or realpath == self._sandbox + or realpath.startswith(self._prefix) + ) + finally: + self._active = active + + def _exempted(self, filepath): + start_matches = ( + filepath.startswith(exception) + for exception in self._exceptions + ) + pattern_matches = ( + re.match(pattern, filepath) + for pattern in self._exception_patterns + ) + candidates = itertools.chain(start_matches, pattern_matches) + return any(candidates) + + def _remap_input(self, operation, path, *args, **kw): + """Called for path inputs""" + if operation in self.write_ops and not self._ok(path): + self._violation(operation, os.path.realpath(path), *args, **kw) + return path + + def _remap_pair(self, operation, src, dst, *args, **kw): + """Called for path pairs like rename, link, and symlink operations""" + if not self._ok(src) or not self._ok(dst): + self._violation(operation, src, dst, *args, **kw) + return (src, dst) + + def open(self, file, flags, mode=0o777, *args, **kw): + """Called for low-level os.open()""" + if flags & WRITE_FLAGS and not self._ok(file): + self._violation("os.open", file, flags, mode, *args, **kw) + return _os.open(file, flags, mode, *args, **kw) + + +WRITE_FLAGS = functools.reduce( + operator.or_, [getattr(_os, a, 0) for a in + "O_WRONLY O_RDWR O_APPEND O_CREAT O_TRUNC O_TEMPORARY".split()] +) + + +class SandboxViolation(DistutilsError): + """A setup script attempted to modify the filesystem outside the sandbox""" + + tmpl = textwrap.dedent(""" + SandboxViolation: {cmd}{args!r} {kwargs} + + The package setup script has attempted to modify files on your system + that are not within the EasyInstall build area, and has been aborted. + + This package cannot be safely installed by EasyInstall, and may not + support alternate installation locations even if you run its setup + script by hand. Please inform the package's author and the EasyInstall + maintainers to find out if a fix or workaround is available. + """).lstrip() + + def __str__(self): + cmd, args, kwargs = self.args + return self.tmpl.format(**locals()) diff --git a/project/venv/lib/python2.7/site-packages/setuptools/sandbox.pyc b/project/venv/lib/python2.7/site-packages/setuptools/sandbox.pyc new file mode 100644 index 0000000..34079a3 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/sandbox.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/script (dev).tmpl b/project/venv/lib/python2.7/site-packages/setuptools/script (dev).tmpl new file mode 100644 index 0000000..39a24b0 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/script (dev).tmpl @@ -0,0 +1,6 @@ +# EASY-INSTALL-DEV-SCRIPT: %(spec)r,%(script_name)r +__requires__ = %(spec)r +__import__('pkg_resources').require(%(spec)r) +__file__ = %(dev_path)r +with open(__file__) as f: + exec(compile(f.read(), __file__, 'exec')) diff --git a/project/venv/lib/python2.7/site-packages/setuptools/script.tmpl b/project/venv/lib/python2.7/site-packages/setuptools/script.tmpl new file mode 100644 index 0000000..ff5efbc --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/script.tmpl @@ -0,0 +1,3 @@ +# EASY-INSTALL-SCRIPT: %(spec)r,%(script_name)r +__requires__ = %(spec)r +__import__('pkg_resources').run_script(%(spec)r, %(script_name)r) diff --git a/project/venv/lib/python2.7/site-packages/setuptools/site-patch.py b/project/venv/lib/python2.7/site-packages/setuptools/site-patch.py new file mode 100644 index 0000000..40b00de --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/site-patch.py @@ -0,0 +1,74 @@ +def __boot(): + import sys + import os + PYTHONPATH = os.environ.get('PYTHONPATH') + if PYTHONPATH is None or (sys.platform == 'win32' and not PYTHONPATH): + PYTHONPATH = [] + else: + PYTHONPATH = PYTHONPATH.split(os.pathsep) + + pic = getattr(sys, 'path_importer_cache', {}) + stdpath = sys.path[len(PYTHONPATH):] + mydir = os.path.dirname(__file__) + + for item in stdpath: + if item == mydir or not item: + continue # skip if current dir. on Windows, or my own directory + importer = pic.get(item) + if importer is not None: + loader = importer.find_module('site') + if loader is not None: + # This should actually reload the current module + loader.load_module('site') + break + else: + try: + import imp # Avoid import loop in Python 3 + stream, path, descr = imp.find_module('site', [item]) + except ImportError: + continue + if stream is None: + continue + try: + # This should actually reload the current module + imp.load_module('site', stream, path, descr) + finally: + stream.close() + break + else: + raise ImportError("Couldn't find the real 'site' module") + + known_paths = dict([(makepath(item)[1], 1) for item in sys.path]) # 2.2 comp + + oldpos = getattr(sys, '__egginsert', 0) # save old insertion position + sys.__egginsert = 0 # and reset the current one + + for item in PYTHONPATH: + addsitedir(item) + + sys.__egginsert += oldpos # restore effective old position + + d, nd = makepath(stdpath[0]) + insert_at = None + new_path = [] + + for item in sys.path: + p, np = makepath(item) + + if np == nd and insert_at is None: + # We've hit the first 'system' path entry, so added entries go here + insert_at = len(new_path) + + if np in known_paths or insert_at is None: + new_path.append(item) + else: + # new path after the insert point, back-insert it + new_path.insert(insert_at, item) + insert_at += 1 + + sys.path[:] = new_path + + +if __name__ == 'site': + __boot() + del __boot diff --git a/project/venv/lib/python2.7/site-packages/setuptools/site-patch.pyc b/project/venv/lib/python2.7/site-packages/setuptools/site-patch.pyc new file mode 100644 index 0000000..6965c3d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/site-patch.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/ssl_support.py b/project/venv/lib/python2.7/site-packages/setuptools/ssl_support.py new file mode 100644 index 0000000..226db69 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/ssl_support.py @@ -0,0 +1,260 @@ +import os +import socket +import atexit +import re +import functools + +from setuptools.extern.six.moves import urllib, http_client, map, filter + +from pkg_resources import ResolutionError, ExtractionError + +try: + import ssl +except ImportError: + ssl = None + +__all__ = [ + 'VerifyingHTTPSHandler', 'find_ca_bundle', 'is_available', 'cert_paths', + 'opener_for' +] + +cert_paths = """ +/etc/pki/tls/certs/ca-bundle.crt +/etc/ssl/certs/ca-certificates.crt +/usr/share/ssl/certs/ca-bundle.crt +/usr/local/share/certs/ca-root.crt +/etc/ssl/cert.pem +/System/Library/OpenSSL/certs/cert.pem +/usr/local/share/certs/ca-root-nss.crt +/etc/ssl/ca-bundle.pem +""".strip().split() + +try: + HTTPSHandler = urllib.request.HTTPSHandler + HTTPSConnection = http_client.HTTPSConnection +except AttributeError: + HTTPSHandler = HTTPSConnection = object + +is_available = ssl is not None and object not in (HTTPSHandler, HTTPSConnection) + + +try: + from ssl import CertificateError, match_hostname +except ImportError: + try: + from backports.ssl_match_hostname import CertificateError + from backports.ssl_match_hostname import match_hostname + except ImportError: + CertificateError = None + match_hostname = None + +if not CertificateError: + + class CertificateError(ValueError): + pass + + +if not match_hostname: + + def _dnsname_match(dn, hostname, max_wildcards=1): + """Matching according to RFC 6125, section 6.4.3 + + https://tools.ietf.org/html/rfc6125#section-6.4.3 + """ + pats = [] + if not dn: + return False + + # Ported from python3-syntax: + # leftmost, *remainder = dn.split(r'.') + parts = dn.split(r'.') + leftmost = parts[0] + remainder = parts[1:] + + wildcards = leftmost.count('*') + if wildcards > max_wildcards: + # Issue #17980: avoid denials of service by refusing more + # than one wildcard per fragment. A survey of established + # policy among SSL implementations showed it to be a + # reasonable choice. + raise CertificateError( + "too many wildcards in certificate DNS name: " + repr(dn)) + + # speed up common case w/o wildcards + if not wildcards: + return dn.lower() == hostname.lower() + + # RFC 6125, section 6.4.3, subitem 1. + # The client SHOULD NOT attempt to match a presented identifier in which + # the wildcard character comprises a label other than the left-most label. + if leftmost == '*': + # When '*' is a fragment by itself, it matches a non-empty dotless + # fragment. + pats.append('[^.]+') + elif leftmost.startswith('xn--') or hostname.startswith('xn--'): + # RFC 6125, section 6.4.3, subitem 3. + # The client SHOULD NOT attempt to match a presented identifier + # where the wildcard character is embedded within an A-label or + # U-label of an internationalized domain name. + pats.append(re.escape(leftmost)) + else: + # Otherwise, '*' matches any dotless string, e.g. www* + pats.append(re.escape(leftmost).replace(r'\*', '[^.]*')) + + # add the remaining fragments, ignore any wildcards + for frag in remainder: + pats.append(re.escape(frag)) + + pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) + return pat.match(hostname) + + def match_hostname(cert, hostname): + """Verify that *cert* (in decoded format as returned by + SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 + rules are followed, but IP addresses are not accepted for *hostname*. + + CertificateError is raised on failure. On success, the function + returns nothing. + """ + if not cert: + raise ValueError("empty or no certificate") + dnsnames = [] + san = cert.get('subjectAltName', ()) + for key, value in san: + if key == 'DNS': + if _dnsname_match(value, hostname): + return + dnsnames.append(value) + if not dnsnames: + # The subject is only checked when there is no dNSName entry + # in subjectAltName + for sub in cert.get('subject', ()): + for key, value in sub: + # XXX according to RFC 2818, the most specific Common Name + # must be used. + if key == 'commonName': + if _dnsname_match(value, hostname): + return + dnsnames.append(value) + if len(dnsnames) > 1: + raise CertificateError("hostname %r " + "doesn't match either of %s" + % (hostname, ', '.join(map(repr, dnsnames)))) + elif len(dnsnames) == 1: + raise CertificateError("hostname %r " + "doesn't match %r" + % (hostname, dnsnames[0])) + else: + raise CertificateError("no appropriate commonName or " + "subjectAltName fields were found") + + +class VerifyingHTTPSHandler(HTTPSHandler): + """Simple verifying handler: no auth, subclasses, timeouts, etc.""" + + def __init__(self, ca_bundle): + self.ca_bundle = ca_bundle + HTTPSHandler.__init__(self) + + def https_open(self, req): + return self.do_open( + lambda host, **kw: VerifyingHTTPSConn(host, self.ca_bundle, **kw), req + ) + + +class VerifyingHTTPSConn(HTTPSConnection): + """Simple verifying connection: no auth, subclasses, timeouts, etc.""" + + def __init__(self, host, ca_bundle, **kw): + HTTPSConnection.__init__(self, host, **kw) + self.ca_bundle = ca_bundle + + def connect(self): + sock = socket.create_connection( + (self.host, self.port), getattr(self, 'source_address', None) + ) + + # Handle the socket if a (proxy) tunnel is present + if hasattr(self, '_tunnel') and getattr(self, '_tunnel_host', None): + self.sock = sock + self._tunnel() + # http://bugs.python.org/issue7776: Python>=3.4.1 and >=2.7.7 + # change self.host to mean the proxy server host when tunneling is + # being used. Adapt, since we are interested in the destination + # host for the match_hostname() comparison. + actual_host = self._tunnel_host + else: + actual_host = self.host + + if hasattr(ssl, 'create_default_context'): + ctx = ssl.create_default_context(cafile=self.ca_bundle) + self.sock = ctx.wrap_socket(sock, server_hostname=actual_host) + else: + # This is for python < 2.7.9 and < 3.4? + self.sock = ssl.wrap_socket( + sock, cert_reqs=ssl.CERT_REQUIRED, ca_certs=self.ca_bundle + ) + try: + match_hostname(self.sock.getpeercert(), actual_host) + except CertificateError: + self.sock.shutdown(socket.SHUT_RDWR) + self.sock.close() + raise + + +def opener_for(ca_bundle=None): + """Get a urlopen() replacement that uses ca_bundle for verification""" + return urllib.request.build_opener( + VerifyingHTTPSHandler(ca_bundle or find_ca_bundle()) + ).open + + +# from jaraco.functools +def once(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + if not hasattr(func, 'always_returns'): + func.always_returns = func(*args, **kwargs) + return func.always_returns + return wrapper + + +@once +def get_win_certfile(): + try: + import wincertstore + except ImportError: + return None + + class CertFile(wincertstore.CertFile): + def __init__(self): + super(CertFile, self).__init__() + atexit.register(self.close) + + def close(self): + try: + super(CertFile, self).close() + except OSError: + pass + + _wincerts = CertFile() + _wincerts.addstore('CA') + _wincerts.addstore('ROOT') + return _wincerts.name + + +def find_ca_bundle(): + """Return an existing CA bundle path, or None""" + extant_cert_paths = filter(os.path.isfile, cert_paths) + return ( + get_win_certfile() + or next(extant_cert_paths, None) + or _certifi_where() + ) + + +def _certifi_where(): + try: + return __import__('certifi').where() + except (ImportError, ResolutionError, ExtractionError): + pass diff --git a/project/venv/lib/python2.7/site-packages/setuptools/ssl_support.pyc b/project/venv/lib/python2.7/site-packages/setuptools/ssl_support.pyc new file mode 100644 index 0000000..3c9b795 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/ssl_support.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/unicode_utils.py b/project/venv/lib/python2.7/site-packages/setuptools/unicode_utils.py new file mode 100644 index 0000000..7c63efd --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/unicode_utils.py @@ -0,0 +1,44 @@ +import unicodedata +import sys + +from setuptools.extern import six + + +# HFS Plus uses decomposed UTF-8 +def decompose(path): + if isinstance(path, six.text_type): + return unicodedata.normalize('NFD', path) + try: + path = path.decode('utf-8') + path = unicodedata.normalize('NFD', path) + path = path.encode('utf-8') + except UnicodeError: + pass # Not UTF-8 + return path + + +def filesys_decode(path): + """ + Ensure that the given path is decoded, + NONE when no expected encoding works + """ + + if isinstance(path, six.text_type): + return path + + fs_enc = sys.getfilesystemencoding() or 'utf-8' + candidates = fs_enc, 'utf-8' + + for enc in candidates: + try: + return path.decode(enc) + except UnicodeDecodeError: + continue + + +def try_encode(string, enc): + "turn unicode encoding into a functional routine" + try: + return string.encode(enc) + except UnicodeEncodeError: + return None diff --git a/project/venv/lib/python2.7/site-packages/setuptools/unicode_utils.pyc b/project/venv/lib/python2.7/site-packages/setuptools/unicode_utils.pyc new file mode 100644 index 0000000..56ee061 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/unicode_utils.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/version.py b/project/venv/lib/python2.7/site-packages/setuptools/version.py new file mode 100644 index 0000000..95e1869 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/version.py @@ -0,0 +1,6 @@ +import pkg_resources + +try: + __version__ = pkg_resources.get_distribution('setuptools').version +except Exception: + __version__ = 'unknown' diff --git a/project/venv/lib/python2.7/site-packages/setuptools/version.pyc b/project/venv/lib/python2.7/site-packages/setuptools/version.pyc new file mode 100644 index 0000000..eead94b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/version.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/wheel.py b/project/venv/lib/python2.7/site-packages/setuptools/wheel.py new file mode 100644 index 0000000..e11f0a1 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/wheel.py @@ -0,0 +1,211 @@ +"""Wheels support.""" + +from distutils.util import get_platform +import email +import itertools +import os +import posixpath +import re +import zipfile + +import pkg_resources +import setuptools +from pkg_resources import parse_version +from setuptools.extern.packaging.utils import canonicalize_name +from setuptools.extern.six import PY3 +from setuptools import pep425tags +from setuptools.command.egg_info import write_requirements + + +__metaclass__ = type + + +WHEEL_NAME = re.compile( + r"""^(?P.+?)-(?P\d.*?) + ((-(?P\d.*?))?-(?P.+?)-(?P.+?)-(?P.+?) + )\.whl$""", + re.VERBOSE).match + +NAMESPACE_PACKAGE_INIT = '''\ +try: + __import__('pkg_resources').declare_namespace(__name__) +except ImportError: + __path__ = __import__('pkgutil').extend_path(__path__, __name__) +''' + + +def unpack(src_dir, dst_dir): + '''Move everything under `src_dir` to `dst_dir`, and delete the former.''' + for dirpath, dirnames, filenames in os.walk(src_dir): + subdir = os.path.relpath(dirpath, src_dir) + for f in filenames: + src = os.path.join(dirpath, f) + dst = os.path.join(dst_dir, subdir, f) + os.renames(src, dst) + for n, d in reversed(list(enumerate(dirnames))): + src = os.path.join(dirpath, d) + dst = os.path.join(dst_dir, subdir, d) + if not os.path.exists(dst): + # Directory does not exist in destination, + # rename it and prune it from os.walk list. + os.renames(src, dst) + del dirnames[n] + # Cleanup. + for dirpath, dirnames, filenames in os.walk(src_dir, topdown=True): + assert not filenames + os.rmdir(dirpath) + + +class Wheel: + + def __init__(self, filename): + match = WHEEL_NAME(os.path.basename(filename)) + if match is None: + raise ValueError('invalid wheel name: %r' % filename) + self.filename = filename + for k, v in match.groupdict().items(): + setattr(self, k, v) + + def tags(self): + '''List tags (py_version, abi, platform) supported by this wheel.''' + return itertools.product( + self.py_version.split('.'), + self.abi.split('.'), + self.platform.split('.'), + ) + + def is_compatible(self): + '''Is the wheel is compatible with the current platform?''' + supported_tags = pep425tags.get_supported() + return next((True for t in self.tags() if t in supported_tags), False) + + def egg_name(self): + return pkg_resources.Distribution( + project_name=self.project_name, version=self.version, + platform=(None if self.platform == 'any' else get_platform()), + ).egg_name() + '.egg' + + def get_dist_info(self, zf): + # find the correct name of the .dist-info dir in the wheel file + for member in zf.namelist(): + dirname = posixpath.dirname(member) + if (dirname.endswith('.dist-info') and + canonicalize_name(dirname).startswith( + canonicalize_name(self.project_name))): + return dirname + raise ValueError("unsupported wheel format. .dist-info not found") + + def install_as_egg(self, destination_eggdir): + '''Install wheel as an egg directory.''' + with zipfile.ZipFile(self.filename) as zf: + self._install_as_egg(destination_eggdir, zf) + + def _install_as_egg(self, destination_eggdir, zf): + dist_basename = '%s-%s' % (self.project_name, self.version) + dist_info = self.get_dist_info(zf) + dist_data = '%s.data' % dist_basename + egg_info = os.path.join(destination_eggdir, 'EGG-INFO') + + self._convert_metadata(zf, destination_eggdir, dist_info, egg_info) + self._move_data_entries(destination_eggdir, dist_data) + self._fix_namespace_packages(egg_info, destination_eggdir) + + @staticmethod + def _convert_metadata(zf, destination_eggdir, dist_info, egg_info): + def get_metadata(name): + with zf.open(posixpath.join(dist_info, name)) as fp: + value = fp.read().decode('utf-8') if PY3 else fp.read() + return email.parser.Parser().parsestr(value) + + wheel_metadata = get_metadata('WHEEL') + # Check wheel format version is supported. + wheel_version = parse_version(wheel_metadata.get('Wheel-Version')) + wheel_v1 = ( + parse_version('1.0') <= wheel_version < parse_version('2.0dev0') + ) + if not wheel_v1: + raise ValueError( + 'unsupported wheel format version: %s' % wheel_version) + # Extract to target directory. + os.mkdir(destination_eggdir) + zf.extractall(destination_eggdir) + # Convert metadata. + dist_info = os.path.join(destination_eggdir, dist_info) + dist = pkg_resources.Distribution.from_location( + destination_eggdir, dist_info, + metadata=pkg_resources.PathMetadata(destination_eggdir, dist_info), + ) + + # Note: Evaluate and strip markers now, + # as it's difficult to convert back from the syntax: + # foobar; "linux" in sys_platform and extra == 'test' + def raw_req(req): + req.marker = None + return str(req) + install_requires = list(sorted(map(raw_req, dist.requires()))) + extras_require = { + extra: sorted( + req + for req in map(raw_req, dist.requires((extra,))) + if req not in install_requires + ) + for extra in dist.extras + } + os.rename(dist_info, egg_info) + os.rename( + os.path.join(egg_info, 'METADATA'), + os.path.join(egg_info, 'PKG-INFO'), + ) + setup_dist = setuptools.Distribution( + attrs=dict( + install_requires=install_requires, + extras_require=extras_require, + ), + ) + write_requirements( + setup_dist.get_command_obj('egg_info'), + None, + os.path.join(egg_info, 'requires.txt'), + ) + + @staticmethod + def _move_data_entries(destination_eggdir, dist_data): + """Move data entries to their correct location.""" + dist_data = os.path.join(destination_eggdir, dist_data) + dist_data_scripts = os.path.join(dist_data, 'scripts') + if os.path.exists(dist_data_scripts): + egg_info_scripts = os.path.join( + destination_eggdir, 'EGG-INFO', 'scripts') + os.mkdir(egg_info_scripts) + for entry in os.listdir(dist_data_scripts): + # Remove bytecode, as it's not properly handled + # during easy_install scripts install phase. + if entry.endswith('.pyc'): + os.unlink(os.path.join(dist_data_scripts, entry)) + else: + os.rename( + os.path.join(dist_data_scripts, entry), + os.path.join(egg_info_scripts, entry), + ) + os.rmdir(dist_data_scripts) + for subdir in filter(os.path.exists, ( + os.path.join(dist_data, d) + for d in ('data', 'headers', 'purelib', 'platlib') + )): + unpack(subdir, destination_eggdir) + if os.path.exists(dist_data): + os.rmdir(dist_data) + + @staticmethod + def _fix_namespace_packages(egg_info, destination_eggdir): + namespace_packages = os.path.join( + egg_info, 'namespace_packages.txt') + if os.path.exists(namespace_packages): + with open(namespace_packages) as fp: + namespace_packages = fp.read().split() + for mod in namespace_packages: + mod_dir = os.path.join(destination_eggdir, *mod.split('.')) + mod_init = os.path.join(mod_dir, '__init__.py') + if os.path.exists(mod_dir) and not os.path.exists(mod_init): + with open(mod_init, 'w') as fp: + fp.write(NAMESPACE_PACKAGE_INIT) diff --git a/project/venv/lib/python2.7/site-packages/setuptools/wheel.pyc b/project/venv/lib/python2.7/site-packages/setuptools/wheel.pyc new file mode 100644 index 0000000..8f69006 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/wheel.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/setuptools/windows_support.py b/project/venv/lib/python2.7/site-packages/setuptools/windows_support.py new file mode 100644 index 0000000..cb977cf --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/setuptools/windows_support.py @@ -0,0 +1,29 @@ +import platform +import ctypes + + +def windows_only(func): + if platform.system() != 'Windows': + return lambda *args, **kwargs: None + return func + + +@windows_only +def hide_file(path): + """ + Set the hidden attribute on a file or directory. + + From http://stackoverflow.com/questions/19622133/ + + `path` must be text. + """ + __import__('ctypes.wintypes') + SetFileAttributes = ctypes.windll.kernel32.SetFileAttributesW + SetFileAttributes.argtypes = ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD + SetFileAttributes.restype = ctypes.wintypes.BOOL + + FILE_ATTRIBUTE_HIDDEN = 0x02 + + ret = SetFileAttributes(path, FILE_ATTRIBUTE_HIDDEN) + if not ret: + raise ctypes.WinError() diff --git a/project/venv/lib/python2.7/site-packages/setuptools/windows_support.pyc b/project/venv/lib/python2.7/site-packages/setuptools/windows_support.pyc new file mode 100644 index 0000000..e25db5f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/setuptools/windows_support.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/textgrid/__init__.py b/project/venv/lib/python2.7/site-packages/textgrid/__init__.py new file mode 100644 index 0000000..51f205f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/textgrid/__init__.py @@ -0,0 +1 @@ +from .textgrid import TextGrid, MLF, IntervalTier, PointTier, Interval, Point diff --git a/project/venv/lib/python2.7/site-packages/textgrid/__init__.pyc b/project/venv/lib/python2.7/site-packages/textgrid/__init__.pyc new file mode 100644 index 0000000..8cd4771 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/textgrid/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/textgrid/exceptions.py b/project/venv/lib/python2.7/site-packages/textgrid/exceptions.py new file mode 100644 index 0000000..0d9b731 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/textgrid/exceptions.py @@ -0,0 +1,3 @@ + +class TextGridError(Exception): + pass \ No newline at end of file diff --git a/project/venv/lib/python2.7/site-packages/textgrid/exceptions.pyc b/project/venv/lib/python2.7/site-packages/textgrid/exceptions.pyc new file mode 100644 index 0000000..2b6e29d Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/textgrid/exceptions.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/textgrid/textgrid.py b/project/venv/lib/python2.7/site-packages/textgrid/textgrid.py new file mode 100644 index 0000000..dfa217f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/textgrid/textgrid.py @@ -0,0 +1,874 @@ +#!/usr/bin/env python -O +# +# Copyright (c) 2011-2016 Kyle Gorman, Max Bane, Morgan Sonderegger +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# +# textgrid.py: classes for Praat TextGrid and HTK mlf files +# +# Max Bane +# Kyle Gorman +# Morgan Sonderegger + +from __future__ import print_function + +import re +import codecs +import os.path + +from sys import stderr +from bisect import bisect_left +from decimal import Decimal + +from .exceptions import TextGridError + +DEFAULT_TEXTGRID_PRECISION = 5 +DEFAULT_MLF_PRECISION = 5 + + +def _getMark(text, short): + """ + Return the mark or text entry on a line. Praat escapes double-quotes + by doubling them, so doubled double-quotes are read as single + double-quotes. Newlines within an entry are allowed. + """ + + line = text.readline() + + # check that the line begins with a valid entry type + if not short and not re.match(r'^\s*(text|mark) = "', line): + raise ValueError('Bad entry: ' + line) + + # read until the number of double-quotes is even + while line.count('"') % 2: + next_line = text.readline() + + if not next_line: + raise EOFError('Bad entry: ' + line[:20] + '...') + + line += next_line + if short: + pattern = r'^"(.*?)"\s*$' + else: + pattern = r'^\s*(text|mark) = "(.*?)"\s*$' + entry = re.match(pattern, line, re.DOTALL) + + return entry.groups()[-1].replace('""', '"') + + +def _formatMark(text): + return text.replace('"', '""') + + +def detectEncoding(f): + """ + This helper method returns the file encoding corresponding to path f. + This handles UTF-8, which is itself an ASCII extension, so also ASCII. + """ + encoding = 'ascii' + try: + with codecs.open(f, 'r', encoding='utf-16') as source: + source.readline() # Read one line to ensure correct encoding + except UnicodeError: + try: + with codecs.open(f, 'r', encoding='utf-8') as source: + source.readline() # Read one line to ensure correct encoding + except UnicodeError: + with codecs.open(f, 'r', encoding='ascii') as source: + source.readline() # Read one line to ensure correct encoding + else: + encoding = 'utf-8' + else: + encoding = 'utf-16' + + return encoding + + +class Point(object): + """ + Represents a point in time with an associated textual mark, as stored + in a PointTier. + + """ + + def __init__(self, time, mark): + self.time = time + self.mark = mark + + def __repr__(self): + return 'Point({0}, {1})'.format(self.time, + self.mark if self.mark else None) + + def __lt__(self, other): + if hasattr(other, 'time'): + return self.time < other.time + elif hasattr(other, 'minTime'): + return self.time < other.minTime + else: + return self.time < other + + def __gt__(self, other): + if hasattr(other, 'time'): + return self.time > other.time + elif hasattr(other, 'maxTime'): + return self.time > other.maxTime + else: + return self.time > other + + def __eq__(self, other): + if isinstance(other, Point): + return self.time == other.time + elif isinstance(other, Interval): + return other.minTime < self.time < other.maxTime + else: + return self.time == other + + def __gte__(self, other): + return self > other or self == other + + def __lte__(self, other): + return self < other or self == other + + def __cmp__(self, other): + """ + In addition to the obvious semantics, Point/Interval comparison is + 0 iff the point is inside the interval (non-inclusively), if you + need inclusive membership, use Interval.__contains__ + """ + if hasattr(other, 'time'): + return cmp(self.time, other.time) + elif hasattr(other, 'minTime') and hasattr(other, 'maxTime'): + return cmp(self.time, other.minTime) + \ + cmp(self.time, other.maxTime) + else: # hopefully numerical + return cmp(self.time, other) + + def __iadd__(self, other): + self.time += other + + def __isub__(self, other): + self.time -= other + + +def decode(string): + """ + Decode HTK's mangling of UTF-8 strings into something useful + """ + # print(string) + return string + return string.decode('string_escape').decode('UTF-8') + + +class Interval(object): + """ + Represents an interval of time, with an associated textual mark, as + stored in an IntervalTier. + + """ + + def __init__(self, minTime, maxTime, mark): + if minTime >= maxTime: + # Praat does not support intervals with duration <= 0 + raise ValueError(minTime, maxTime) + self.minTime = minTime + self.maxTime = maxTime + self.mark = mark + self.strict =True + + def __repr__(self): + return 'Interval({0}, {1}, {2})'.format(self.minTime, self.maxTime, + self.mark if self.mark else None) + + def duration(self): + """ + Returns the duration of the interval in seconds. + """ + return self.maxTime - self.minTime + + def __lt__(self, other): + if hasattr(other, 'minTime'): + if self.strict and self.overlaps(other): + raise (ValueError(self, other)) + return self.minTime < other.minTime + elif hasattr(other, 'time'): + return self.maxTime < other.time + else: + return self.maxTime < other + + def __gt__(self, other): + if hasattr(other, 'maxTime'): + if self.strict and self.overlaps(other): + raise (ValueError(self, other)) + return self.maxTime > other.maxTime + elif hasattr(other, 'time'): + return self.minTime > other.time + else: + return self.minTime > other + + def __gte__(self, other): + return self > other or self == other + + def __lte__(self, other): + return self < other or self == other + + def __cmp__(self, other): + if hasattr(other, 'minTime') and hasattr(other, 'maxTime'): + if self.overlaps(other): + raise ValueError(self, other) + # this returns the two intervals, so user can patch things + # up if s/he so chooses + return cmp(self.minTime, other.minTime) + elif hasattr(other, 'time'): # comparing Intervals and Points + return cmp(self.minTime, other.time) + \ + cmp(self.maxTime, other.time) + else: + return cmp(self.minTime, other) + cmp(self.maxTime, other) + + def __eq__(self, other): + """ + This might seem superfluous but not that a ValueError will be + raised if you compare two intervals to each other...not anymore + """ + if hasattr(other, 'minTime') and hasattr(other, 'maxTime'): + if self.minTime == other.minTime: + if self.maxTime == other.maxTime: + return True + elif hasattr(other, 'time'): + return self.minTime < other.time < self.maxTime + else: + return False + + def __iadd__(self, other): + self.minTime += other + self.maxTime += other + + def __isub__(self, other): + self.minTime -= other + self.maxTime -= other + + def overlaps(self, other): + """ + Tests whether self overlaps with the given interval. Symmetric. + See: http://www.rgrjr.com/emacs/overlap.html + """ + return other.minTime < self.maxTime and \ + self.minTime < other.maxTime + + def __contains__(self, other): + """ + Tests whether the given time point is contained in this interval, + either a numeric type or a Point object. + """ + if hasattr(other, 'minTime') and hasattr(other, 'maxTime'): + return self.minTime <= other.minTime and \ + other.maxTime <= self.maxTime + elif hasattr(other, 'time'): + return self.minTime <= other.time <= self.maxTime + else: + return self.minTime <= other <= self.maxTime + + def bounds(self): + return (self.minTime, self.maxTime) + + +class PointTier(object): + """ + Represents Praat PointTiers (also called TextTiers) as list of Points + (e.g., for point in pointtier). A PointTier is used much like a Python + set in that it has add/remove methods, not append/extend methods. + + """ + + def __init__(self, name=None, minTime=0., maxTime=None): + self.name = name + self.minTime = minTime + self.maxTime = maxTime + self.points = [] + + def __str__(self): + return ''.format(self.name, len(self)) + + def __repr__(self): + return 'PointTier({0}, {1})'.format(self.name, self.points) + + def __iter__(self): + return iter(self.points) + + def __len__(self): + return len(self.points) + + def __getitem__(self, i): + return self.points[i] + + def add(self, time, mark): + """ + constructs a Point and adds it to the PointTier, maintaining order + """ + self.addPoint(Point(time, mark)) + + def addPoint(self, point): + if point < self.minTime: + raise ValueError(self.minTime) # too early + if self.maxTime and point > self.maxTime: + raise ValueError(self.maxTime) # too late + i = bisect_left(self.points, point) + if i < len(self.points) and self.points[i].time == point.time: + raise ValueError(point) # we already got one right there + self.points.insert(i, point) + + def remove(self, time, mark): + """ + removes a constructed Point i from the PointTier + """ + self.removePoint(Point(time, mark)) + + def removePoint(self, point): + self.points.remove(point) + + def read(self, f, round_digits=DEFAULT_TEXTGRID_PRECISION): + """ + Read the Points contained in the Praat-formated PointTier/TextTier + file indicated by string f + """ + to_round = Decimal('.{}1'.format('0' * (round_digits - 1))) + encoding = detectEncoding(f) + with codecs.open(f, 'r', encoding=encoding) as source: + file_type, short = parse_header(source) + if file_type != 'TextTier': + raise TextGridError('The file could not be parsed as a PointTier as it is lacking a proper header.') + + self.minTime = parse_line(source.readline(), short, to_round) + self.maxTime = parse_line(source.readline(), short, to_round) + n = int(parse_line(source.readline(), short, to_round)) + for i in range(n): + source.readline().rstrip() # header + itim = parse_line(source.readline(), short, to_round) + imrk = _getMark(source, short) + self.points.append(Point(itim, imrk)) + + def write(self, f): + """ + Write the current state into a Praat-format PointTier/TextTier + file. f may be a file object to write to, or a string naming a + path for writing + """ + sink = f if hasattr(f, 'write') else codecs.open(f, 'w', 'UTF-8') + print('File type = "ooTextFile"', file=sink) + print('Object class = "TextTier"\n', file=sink) + + print('xmin = {0}'.format(self.minTime), file=sink) + print('xmax = {0}'.format(self.maxTime if self.maxTime \ + else self.points[-1].time), file=sink) + print('points: size = {0}'.format(len(self)), file=sink) + for (i, point) in enumerate(self.points, 1): + print('points [{0}]:'.format(i), file=sink) + print('\ttime = {0}'.format(point.time), file=sink) + mark = _formatMark(point.mark) + print('\tmark = "{0}"'.format(mark), file=sink) + sink.close() + + def bounds(self): + return (self.minTime, self.maxTime or self.points[-1].time) + + # alternative constructor + + @classmethod + def fromFile(cls, f, name=None): + pt = cls(name=name) + pt.read(f) + return pt + + +class IntervalTier(object): + """ + Represents Praat IntervalTiers as list of sequence types of Intervals + (e.g., for interval in intervaltier). An IntervalTier is used much like a + Python set in that it has add/remove methods, not append/extend methods. + + """ + + def __init__(self, name=None, minTime=0., maxTime=None): + self.name = name + self.minTime = minTime + self.maxTime = maxTime + self.intervals = [] + self.strict = True + + def __str__(self): + return ''.format(self.name, + len(self)) + + def __repr__(self): + return 'IntervalTier({0}, {1})'.format(self.name, self.intervals) + + def __iter__(self): + return iter(self.intervals) + + def __len__(self): + return len(self.intervals) + + def __getitem__(self, i): + return self.intervals[i] + + def add(self, minTime, maxTime, mark): + interval = Interval(minTime, maxTime, mark) + interval.strict = self.strict + self.addInterval(interval) + + def addInterval(self, interval): + if interval.minTime < self.minTime: # too early + raise ValueError(self.minTime) + if self.maxTime and interval.maxTime > self.maxTime: # too late + # raise ValueError, self.maxTime + raise ValueError(self.maxTime) + i = bisect_left(self.intervals, interval) + if i != len(self.intervals) and self.intervals[i] == interval: + raise ValueError(self.intervals[i]) + interval.strict = self.strict + self.intervals.insert(i, interval) + + def remove(self, minTime, maxTime, mark): + self.removeInterval(Interval(minTime, maxTime, mark)) + + def removeInterval(self, interval): + self.intervals.remove(interval) + + def indexContaining(self, time): + """ + Returns the index of the interval containing the given time point, + or None if the time point is outside the bounds of this tier. The + argument can be a numeric type, or a Point object. + """ + i = bisect_left(self.intervals, time) + if i != len(self.intervals): + if self.intervals[i].minTime <= time <= \ + self.intervals[i].maxTime: + return i + + def intervalContaining(self, time): + """ + Returns the interval containing the given time point, or None if + the time point is outside the bounds of this tier. The argument + can be a numeric type, or a Point object. + """ + i = self.indexContaining(time) + if i: + return self.intervals[i] + + def read(self, f, round_digits=DEFAULT_TEXTGRID_PRECISION): + """ + Read the Intervals contained in the Praat-formated IntervalTier + file indicated by string f + """ + to_round = Decimal('.{}1'.format('0' * (round_digits - 1))) + encoding = detectEncoding(f) + with codecs.open(f, 'r', encoding=encoding) as source: + file_type, short = parse_header(source) + if file_type != 'IntervalTier': + raise TextGridError('The file could not be parsed as a IntervalTier as it is lacking a proper header.') + + self.minTime = parse_line(source.readline(), short, to_round) + self.maxTime = parse_line(source.readline(), short, to_round) + n = int(parse_line(source.readline(), short, to_round)) + for i in range(n): + source.readline().rstrip() # header + imin = parse_line(source.readline(), short, to_round) + imax = parse_line(source.readline(), short, to_round) + imrk = _getMark(source, short) + self.intervals.append(Interval(imin, imax, imrk)) + + def _fillInTheGaps(self, null): + """ + Returns a pseudo-IntervalTier with the temporal gaps filled in + """ + prev_t = self.minTime + output = [] + for interval in self.intervals: + if prev_t < interval.minTime: + output.append(Interval(prev_t, interval.minTime, null)) + output.append(interval) + prev_t = interval.maxTime + # last interval + if self.maxTime is not None and prev_t < self.maxTime: # also false if maxTime isn't defined + output.append(Interval(prev_t, self.maxTime, null)) + return output + + def write(self, f, null=''): + """ + Write the current state into a Praat-format IntervalTier file. f + may be a file object to write to, or a string naming a path for + writing + """ + sink = f if hasattr(f, 'write') else open(f, 'w') + print('File type = "ooTextFile"', file=sink) + print('Object class = "IntervalTier"\n', file=sink) + print('xmin = {0}'.format(self.minTime), file=sink) + print('xmax = {0}'.format(self.maxTime if self.maxTime \ + else self.intervals[-1].maxTime), file=sink) + # compute the number of intervals and make the empty ones + output = self._fillInTheGaps(null) + # write it all out + print('intervals: size = {0}'.format(len(output)), file=sink) + for (i, interval) in enumerate(output, 1): + print('intervals [{0}]'.format(i), file=sink) + print('\txmin = {0}'.format(interval.minTime), file=sink) + print('\txmax = {0}'.format(interval.maxTime), file=sink) + mark = _formatMark(interval.mark) + print('\ttext = "{0}"'.format(mark), file=sink) + sink.close() + + def bounds(self): + return self.minTime, self.maxTime or self.intervals[-1].maxTime + + # alternative constructor + + @classmethod + def fromFile(cls, f, name=None): + it = cls(name=name) + it.intervals = [] + it.read(f) + return it + + +def parse_line(line, short, to_round): + line = line.strip() + if short: + if '"' in line: + return line[1:-1] + return Decimal(line).quantize(to_round) + if '"' in line: + m = re.match(r'.+? = "(.*)"', line) + return m.groups()[0] + m = re.match(r'.+? = (.*)', line) + return Decimal(m.groups()[0]).quantize(to_round) + + +def parse_header(source): + header = source.readline() # header junk + m = re.match('File type = "([\w ]+)"', header) + if m is None or not m.groups()[0].startswith('ooTextFile'): + raise TextGridError('The file could not be parsed as a Praat text file as it is lacking a proper header.') + + short = 'short' in m.groups()[0] + file_type = parse_line(source.readline(), short, '') # header junk + t = source.readline() # header junk + return file_type, short + + +class TextGrid(object): + """ + Represents Praat TextGrids as list of sequence types of tiers (e.g., + for tier in textgrid), and as map from names to tiers (e.g., + textgrid['tierName']). Whereas the *Tier classes that make up a + TextGrid impose a strict ordering on Points/Intervals, a TextGrid + instance is given order by the user. Like a true Python list, there + are append/extend methods for a TextGrid. + + """ + + def __init__(self, name=None, minTime=0., maxTime=None, strict = True): + """ + Construct a TextGrid instance with the given (optional) name + (which is only relevant for MLF stuff). If file is given, it is a + string naming the location of a Praat-format TextGrid file from + which to populate this instance. + """ + self.name = name + self.minTime = minTime + self.maxTime = maxTime + self.tiers = [] + self.strict = strict + + def __str__(self): + return ''.format(self.name, len(self)) + + def __repr__(self): + return 'TextGrid({0}, {1})'.format(self.name, self.tiers) + + def __iter__(self): + return iter(self.tiers) + + def __len__(self): + return len(self.tiers) + + def __getitem__(self, i): + """ + Return the ith tier + """ + return self.tiers[i] + + def getFirst(self, tierName): + """ + Return the first tier with the given name. + """ + for t in self.tiers: + if t.name == tierName: + return t + + def getList(self, tierName): + """ + Return a list of all tiers with the given name. + """ + tiers = [] + for t in self.tiers: + if t.name == tierName: + tiers.append(t) + return tiers + + def getNames(self): + """ + return a list of the names of the intervals contained in this + TextGrid + """ + return [tier.name for tier in self.tiers] + + def append(self, tier): + if self.maxTime is not None and tier.maxTime is not None and tier.maxTime > self.maxTime: + raise ValueError(self.maxTime) # too late + tier.strict = self.strict + for i in tier: + i.strict = self.strict + self.tiers.append(tier) + + def extend(self, tiers): + if min([t.minTime for t in tiers]) < self.minTime: + raise ValueError(self.minTime) # too early + if self.maxTime and max([t.minTime for t in tiers]) > self.maxTime: + raise ValueError(self.maxTime) # too late + self.tiers.extend(tiers) + + def pop(self, i=None): + """ + Remove and return tier at index i (default last). Will raise + IndexError if TextGrid is empty or index is out of range. + """ + return (self.tiers.pop(i) if i else self.tiers.pop()) + + def read(self, f, round_digits=DEFAULT_TEXTGRID_PRECISION): + """ + Read the tiers contained in the Praat-formatted TextGrid file + indicated by string f. Times are rounded to the specified precision. + """ + to_round = Decimal('.{}1'.format('0' * (round_digits - 1))) + encoding = detectEncoding(f) + with codecs.open(f, 'r', encoding=encoding) as source: + file_type, short = parse_header(source) + if file_type != 'TextGrid': + raise TextGridError('The file could not be parsed as a TextGrid as it is lacking a proper header.') + self.minTime = parse_line(source.readline(), short, to_round) + self.maxTime = parse_line(source.readline(), short, to_round) + source.readline() # more header junk + if short: + m = int(source.readline().strip()) # will be self.n + else: + m = int(source.readline().strip().split()[2]) # will be self.n + if not short: + source.readline() + for i in range(m): # loop over grids + if not short: + source.readline() + if parse_line(source.readline(), short, to_round) == 'IntervalTier': + inam = parse_line(source.readline(), short, to_round) + imin = parse_line(source.readline(), short, to_round) + imax = parse_line(source.readline(), short, to_round) + itie = IntervalTier(inam, imin, imax) + itie.strict = self.strict + n = int(parse_line(source.readline(), short, to_round)) + for j in range(n): + if not short: + source.readline().rstrip().split() # header junk + jmin = parse_line(source.readline(), short, to_round) + jmax = parse_line(source.readline(), short, to_round) + jmrk = _getMark(source, short) + if jmin < jmax: # non-null + itie.addInterval(Interval(jmin, jmax, jmrk)) + self.append(itie) + else: # pointTier + inam = parse_line(source.readline(), short, to_round) + imin = parse_line(source.readline(), short, to_round) + imax = parse_line(source.readline(), short, to_round) + itie = PointTier(inam) + n = int(parse_line(source.readline(), short, to_round)) + for j in range(n): + source.readline().rstrip() # header junk + jtim = parse_line(source.readline(), short, to_round) + jmrk = _getMark(source, short) + itie.addPoint(Point(jtim, jmrk)) + self.append(itie) + + def write(self, f, null=''): + """ + Write the current state into a Praat-format TextGrid file. f may + be a file object to write to, or a string naming a path to open + for writing. + """ + sink = f if hasattr(f, 'write') else codecs.open(f, 'w', 'UTF-8') + print('File type = "ooTextFile"', file=sink) + print('Object class = "TextGrid"\n', file=sink) + print('xmin = {0}'.format(self.minTime), file=sink) + # compute max time + maxT = self.maxTime + if not maxT: + maxT = max([t.maxTime if t.maxTime else t[-1].maxTime \ + for t in self.tiers]) + print('xmax = {0}'.format(maxT), file=sink) + print('tiers? ', file=sink) + print('size = {0}'.format(len(self)), file=sink) + print('item []:', file=sink) + for (i, tier) in enumerate(self.tiers, 1): + print('\titem [{0}]:'.format(i), file=sink) + if tier.__class__ == IntervalTier: + print('\t\tclass = "IntervalTier"', file=sink) + print('\t\tname = "{0}"'.format(tier.name), file=sink) + print('\t\txmin = {0}'.format(tier.minTime), file=sink) + print('\t\txmax = {0}'.format(maxT), file=sink) + # compute the number of intervals and make the empty ones + output = tier._fillInTheGaps(null) + print('\t\tintervals: size = {0}'.format( + len(output)), file=sink) + for (j, interval) in enumerate(output, 1): + print('\t\t\tintervals [{0}]:'.format(j), file=sink) + print('\t\t\t\txmin = {0}'.format( + interval.minTime), file=sink) + print('\t\t\t\txmax = {0}'.format( + interval.maxTime), file=sink) + mark = _formatMark(interval.mark) + print('\t\t\t\ttext = "{0}"'.format(mark), file=sink) + elif tier.__class__ == PointTier: # PointTier + print('\t\tclass = "TextTier"', file=sink) + print('\t\tname = "{0}"'.format(tier.name), file=sink) + print('\t\txmin = {0}'.format(tier.minTime), file=sink) + print('\t\txmax = {0}'.format(maxT), file=sink) + print('\t\tpoints: size = {0}'.format(len(tier)), file=sink) + for (k, point) in enumerate(tier, 1): + print('\t\t\tpoints [{0}]:'.format(k), file=sink) + print('\t\t\t\ttime = {0}'.format(point.time), file=sink) + mark = _formatMark(point.mark) + print('\t\t\t\tmark = "{0}"'.format(mark), file=sink) + sink.close() + + # alternative constructor + + @classmethod + def fromFile(cls, f, name=None): + tg = cls(name=name) + tg.read(f) + return tg + + +class MLF(object): + """ + Read in a HTK .mlf file generated with HVite -o SM and turn it into a + list of TextGrids. The resulting class can be iterated over to give + one TextGrid at a time, or the write(prefix='') class method can be + used to write all the resulting TextGrids into separate files. + + Unlike other classes, this is always initialized from a text file. + """ + + def __init__(self, f, samplerate=10e6): + self.grids = [] + self.read(f, samplerate) + + def __iter__(self): + return iter(self.grids) + + def __str__(self): + return ''.format(len(self)) + + def __repr__(self): + return 'MLF({0})'.format(self.grids) + + def __len__(self): + return len(self.grids) + + def __getitem__(self, i): + """ + Return the ith TextGrid + """ + return self.grids[i] + + def read(self, f, samplerate, round_digits=DEFAULT_MLF_PRECISION): + source = open(f, 'r') # HTK returns ostensible ASCII + samplerate = Decimal(samplerate) + source.readline() # header + to_round = Decimal('.{}1'.format('0' * (round_digits - 1))) + while True: # loop over text + name = re.match('\"(.*)\"', source.readline().rstrip()) + if name: + name = name.groups()[0] + grid = TextGrid(name) + phon = IntervalTier(name='phones') + word = IntervalTier(name='words') + wmrk = '' + wsrt = 0. + wend = 0. + while 1: # loop over the lines in each grid + line = source.readline().rstrip().split() + if len(line) == 4: # word on this baby + pmin = Decimal(line[0]).quantize(to_round) / samplerate + pmax = Decimal(line[1]).quantize(to_round) / samplerate + if pmin == pmax: + raise ValueError('null duration interval') + phon.add(pmin, pmax, line[2]) + if wmrk: + word.add(wsrt, wend, wmrk) + wmrk = decode(line[3]) + wsrt = pmin + wend = pmax + elif len(line) == 3: # just phone + pmin = Decimal(line[0]).quantize(to_round) / samplerate + pmax = Decimal(line[1]).quantize(to_round) / samplerate + if line[2] == 'sp' and pmin != pmax: + if wmrk: + word.add(wsrt, wend, wmrk) + wmrk = decode(line[2]) + wsrt = pmin + wend = pmax + elif pmin != pmax: + phon.add(pmin, pmax, line[2]) + wend = pmax + else: # it's a period + word.add(wsrt, wend, wmrk) + self.grids.append(grid) + break + grid.append(phon) + grid.append(word) + else: + source.close() + break + + def write(self, prefix=''): + """ + Write the current state into Praat-formatted TextGrids. The + filenames that the output is stored in are taken from the HTK + label files. If a string argument is given, then the any prefix in + the name of the label file (e.g., "mfc/myLabFile.lab"), it is + truncated and files are written to the directory given by the + prefix. An IOError will result if the folder does not exist. + + The number of TextGrids is returned. + """ + for grid in self.grids: + (junk, tail) = os.path.split(grid.name) + (root, junk) = os.path.splitext(tail) + my_path = os.path.join(prefix, root + '.TextGrid') + grid.write(codecs.open(my_path, 'w', 'UTF-8')) + return len(self.grids) diff --git a/project/venv/lib/python2.7/site-packages/textgrid/textgrid.pyc b/project/venv/lib/python2.7/site-packages/textgrid/textgrid.pyc new file mode 100644 index 0000000..8f73dce Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/textgrid/textgrid.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/wheel-0.33.1.dist-info/INSTALLER b/project/venv/lib/python2.7/site-packages/wheel-0.33.1.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/wheel-0.33.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/project/venv/lib/python2.7/site-packages/wheel-0.33.1.dist-info/LICENSE.txt b/project/venv/lib/python2.7/site-packages/wheel-0.33.1.dist-info/LICENSE.txt new file mode 100644 index 0000000..c3441e6 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/wheel-0.33.1.dist-info/LICENSE.txt @@ -0,0 +1,22 @@ +"wheel" copyright (c) 2012-2014 Daniel Holth and +contributors. + +The MIT License + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. diff --git a/project/venv/lib/python2.7/site-packages/wheel-0.33.1.dist-info/METADATA b/project/venv/lib/python2.7/site-packages/wheel-0.33.1.dist-info/METADATA new file mode 100644 index 0000000..2354dd4 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/wheel-0.33.1.dist-info/METADATA @@ -0,0 +1,60 @@ +Metadata-Version: 2.1 +Name: wheel +Version: 0.33.1 +Summary: A built-package format for Python. +Home-page: https://github.com/pypa/wheel +Author: Daniel Holth +Author-email: dholth@fastmail.fm +Maintainer: Alex Grönholm +Maintainer-email: alex.gronholm@nextday.fi +License: MIT +Project-URL: Issue Tracker, https://github.com/pypa/wheel/issues +Project-URL: Documentation, https://wheel.readthedocs.io/ +Project-URL: Changelog, https://wheel.readthedocs.io/en/stable/news.html +Keywords: wheel,packaging +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Topic :: System :: Archiving :: Packaging +Classifier: License :: OSI Approved :: MIT License +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.* +Provides-Extra: test +Requires-Dist: pytest (>=3.0.0) ; extra == 'test' +Requires-Dist: pytest-cov ; extra == 'test' + +wheel +===== + +This library is the reference implementation of the Python wheel packaging +standard, as defined in `PEP 427`_. + +It has two different roles: + +#. A setuptools_ extension for building wheels that provides the + ``bdist_wheel`` setuptools command +#. A command line tool for working with wheel files + +It should be noted that wheel is **not** intended to be used as a library, and +as such there is no stable, public API. + +.. _PEP 427: https://www.python.org/dev/peps/pep-0427/ +.. _setuptools: https://pypi.org/project/setuptools/ + + +Code of Conduct +--------------- + +Everyone interacting in the wheel project's codebases, issue trackers, chat +rooms, and mailing lists is expected to follow the `PyPA Code of Conduct`_. + +.. _PyPA Code of Conduct: https://www.pypa.io/en/latest/code-of-conduct/ + + diff --git a/project/venv/lib/python2.7/site-packages/wheel-0.33.1.dist-info/RECORD b/project/venv/lib/python2.7/site-packages/wheel-0.33.1.dist-info/RECORD new file mode 100644 index 0000000..ddd03fa --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/wheel-0.33.1.dist-info/RECORD @@ -0,0 +1,32 @@ +../../../bin/wheel,sha256=ffNKfZicvj9Ihb3MLe_n5ar8hy_RdGtxigoE-BN3Ov4,278 +wheel-0.33.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +wheel-0.33.1.dist-info/LICENSE.txt,sha256=zKniDGrx_Pv2lAjzd3aShsvuvN7TNhAMm0o_NfvmNeQ,1125 +wheel-0.33.1.dist-info/METADATA,sha256=ZKo4q8P_vXXg-OGjDZGlaIf4gafusgkeCd6jDxbrJLA,2082 +wheel-0.33.1.dist-info/RECORD,, +wheel-0.33.1.dist-info/WHEEL,sha256=HX-v9-noUkyUoxyZ1PMSuS7auUxDAR4VBdoYLqD0xws,110 +wheel-0.33.1.dist-info/entry_points.txt,sha256=N8HbYFST3yrNQYeB2wXWBEPUhFsEtKNRPaCFGJPyqyc,108 +wheel-0.33.1.dist-info/top_level.txt,sha256=HxSBIbgEstMPe4eFawhA66Mq-QYHMopXVoAncfjb_1c,6 +wheel/__init__.py,sha256=OyOAcqlvJaBEASYRGijJyAkY_HL_5HR3alvjO5BEwKg,96 +wheel/__init__.pyc,, +wheel/__main__.py,sha256=lF-YLO4hdQmoWuh4eWZd8YL1U95RSdm76sNLBXa0vjE,417 +wheel/__main__.pyc,, +wheel/bdist_wheel.py,sha256=x2beC81u8AzTNVLwD36EPkVX5AxAYs2q70N09BxvMN0,14756 +wheel/bdist_wheel.pyc,, +wheel/cli/__init__.py,sha256=GWSoGUpRabTf8bk3FsNTPrc5Fsr8YOv2dX55iY2W7eY,2572 +wheel/cli/__init__.pyc,, +wheel/cli/convert.py,sha256=me0l6G4gSw-EBVhzjSr7yWYWBp9spMz7mnXlyJTiXso,9497 +wheel/cli/convert.pyc,, +wheel/cli/pack.py,sha256=vkvZc4-rRZyWiwc6sHjpqIjzwDRMEF5u3JUNU9NY_jA,2263 +wheel/cli/pack.pyc,, +wheel/cli/unpack.py,sha256=0VWzT7U_xyenTPwEVavxqvdee93GPvAFHnR3Uu91aRc,673 +wheel/cli/unpack.pyc,, +wheel/metadata.py,sha256=uBv2aOz4U2sERF834C8DeNo235drcsp3ypTzT7MTWEA,4699 +wheel/metadata.pyc,, +wheel/pep425tags.py,sha256=Jdjbnq17kqwPRKJCMb2E1VccNgnC3H6iQL7VGaxkPao,5908 +wheel/pep425tags.pyc,, +wheel/pkginfo.py,sha256=GR76kupQzn1x9sKDaXuE6B6FsZ4OkfRtG7pndlXPvQ4,1257 +wheel/pkginfo.pyc,, +wheel/util.py,sha256=zwVIk-9qWVQLRMgkgQTMp4TRE4HY03-tCUxLrtCpsfU,924 +wheel/util.pyc,, +wheel/wheelfile.py,sha256=U_13q1XpVt02704XXkFRzmUbz_0R9-GgNxMOZFP3tOs,7168 +wheel/wheelfile.pyc,, diff --git a/project/venv/lib/python2.7/site-packages/wheel-0.33.1.dist-info/WHEEL b/project/venv/lib/python2.7/site-packages/wheel-0.33.1.dist-info/WHEEL new file mode 100644 index 0000000..c8240f0 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/wheel-0.33.1.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.33.1) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/project/venv/lib/python2.7/site-packages/wheel-0.33.1.dist-info/entry_points.txt b/project/venv/lib/python2.7/site-packages/wheel-0.33.1.dist-info/entry_points.txt new file mode 100644 index 0000000..b27acad --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/wheel-0.33.1.dist-info/entry_points.txt @@ -0,0 +1,6 @@ +[console_scripts] +wheel = wheel.cli:main + +[distutils.commands] +bdist_wheel = wheel.bdist_wheel:bdist_wheel + diff --git a/project/venv/lib/python2.7/site-packages/wheel-0.33.1.dist-info/top_level.txt b/project/venv/lib/python2.7/site-packages/wheel-0.33.1.dist-info/top_level.txt new file mode 100644 index 0000000..2309722 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/wheel-0.33.1.dist-info/top_level.txt @@ -0,0 +1 @@ +wheel diff --git a/project/venv/lib/python2.7/site-packages/wheel/__init__.py b/project/venv/lib/python2.7/site-packages/wheel/__init__.py new file mode 100644 index 0000000..745cefc --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/wheel/__init__.py @@ -0,0 +1,2 @@ +# __variables__ with double-quoted values will be available in setup.py: +__version__ = "0.33.1" diff --git a/project/venv/lib/python2.7/site-packages/wheel/__init__.pyc b/project/venv/lib/python2.7/site-packages/wheel/__init__.pyc new file mode 100644 index 0000000..a3e6c68 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/wheel/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/wheel/__main__.py b/project/venv/lib/python2.7/site-packages/wheel/__main__.py new file mode 100644 index 0000000..b3773a2 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/wheel/__main__.py @@ -0,0 +1,19 @@ +""" +Wheel command line tool (enable python -m wheel syntax) +""" + +import sys + + +def main(): # needed for console script + if __package__ == '': + # To be able to run 'python wheel-0.9.whl/wheel': + import os.path + path = os.path.dirname(os.path.dirname(__file__)) + sys.path[0:0] = [path] + import wheel.cli + sys.exit(wheel.cli.main()) + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/project/venv/lib/python2.7/site-packages/wheel/__main__.pyc b/project/venv/lib/python2.7/site-packages/wheel/__main__.pyc new file mode 100644 index 0000000..4975808 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/wheel/__main__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/wheel/bdist_wheel.py b/project/venv/lib/python2.7/site-packages/wheel/bdist_wheel.py new file mode 100644 index 0000000..4f06d36 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/wheel/bdist_wheel.py @@ -0,0 +1,372 @@ +""" +Create a wheel (.whl) distribution. + +A wheel is a built archive format. +""" + +import os +import shutil +import sys +import re +from email.generator import Generator +from distutils.core import Command +from distutils.sysconfig import get_python_version +from distutils import log as logger +from glob import iglob +from shutil import rmtree +from warnings import warn + +import pkg_resources + +from .pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag, get_platform +from .pkginfo import write_pkg_info +from .metadata import pkginfo_to_metadata +from .wheelfile import WheelFile +from . import pep425tags +from . import __version__ as wheel_version + + +safe_name = pkg_resources.safe_name +safe_version = pkg_resources.safe_version + +PY_LIMITED_API_PATTERN = r'cp3\d' + + +def safer_name(name): + return safe_name(name).replace('-', '_') + + +def safer_version(version): + return safe_version(version).replace('-', '_') + + +class bdist_wheel(Command): + + description = 'create a wheel distribution' + + user_options = [('bdist-dir=', 'b', + "temporary directory for creating the distribution"), + ('plat-name=', 'p', + "platform name to embed in generated filenames " + "(default: %s)" % get_platform()), + ('keep-temp', 'k', + "keep the pseudo-installation tree around after " + + "creating the distribution archive"), + ('dist-dir=', 'd', + "directory to put final built distributions in"), + ('skip-build', None, + "skip rebuilding everything (for testing/debugging)"), + ('relative', None, + "build the archive using relative paths" + "(default: false)"), + ('owner=', 'u', + "Owner name used when creating a tar file" + " [default: current user]"), + ('group=', 'g', + "Group name used when creating a tar file" + " [default: current group]"), + ('universal', None, + "make a universal wheel" + " (default: false)"), + ('python-tag=', None, + "Python implementation compatibility tag" + " (default: py%s)" % get_impl_ver()[0]), + ('build-number=', None, + "Build number for this particular version. " + "As specified in PEP-0427, this must start with a digit. " + "[default: None]"), + ('py-limited-api=', None, + "Python tag (cp32|cp33|cpNN) for abi3 wheel tag" + " (default: false)"), + ] + + boolean_options = ['keep-temp', 'skip-build', 'relative', 'universal'] + + def initialize_options(self): + self.bdist_dir = None + self.data_dir = None + self.plat_name = None + self.plat_tag = None + self.format = 'zip' + self.keep_temp = False + self.dist_dir = None + self.egginfo_dir = None + self.root_is_pure = None + self.skip_build = None + self.relative = False + self.owner = None + self.group = None + self.universal = False + self.python_tag = 'py' + get_impl_ver()[0] + self.build_number = None + self.py_limited_api = False + self.plat_name_supplied = False + + def finalize_options(self): + if self.bdist_dir is None: + bdist_base = self.get_finalized_command('bdist').bdist_base + self.bdist_dir = os.path.join(bdist_base, 'wheel') + + self.data_dir = self.wheel_dist_name + '.data' + self.plat_name_supplied = self.plat_name is not None + + need_options = ('dist_dir', 'plat_name', 'skip_build') + + self.set_undefined_options('bdist', + *zip(need_options, need_options)) + + self.root_is_pure = not (self.distribution.has_ext_modules() + or self.distribution.has_c_libraries()) + + if self.py_limited_api and not re.match(PY_LIMITED_API_PATTERN, self.py_limited_api): + raise ValueError("py-limited-api must match '%s'" % PY_LIMITED_API_PATTERN) + + # Support legacy [wheel] section for setting universal + wheel = self.distribution.get_option_dict('wheel') + if 'universal' in wheel: + # please don't define this in your global configs + logger.warn('The [wheel] section is deprecated. Use [bdist_wheel] instead.') + val = wheel['universal'][1].strip() + if val.lower() in ('1', 'true', 'yes'): + self.universal = True + + if self.build_number is not None and not self.build_number[:1].isdigit(): + raise ValueError("Build tag (build-number) must start with a digit.") + + @property + def wheel_dist_name(self): + """Return distribution full name with - replaced with _""" + components = (safer_name(self.distribution.get_name()), + safer_version(self.distribution.get_version())) + if self.build_number: + components += (self.build_number,) + return '-'.join(components) + + def get_tag(self): + # bdist sets self.plat_name if unset, we should only use it for purepy + # wheels if the user supplied it. + if self.plat_name_supplied: + plat_name = self.plat_name + elif self.root_is_pure: + plat_name = 'any' + else: + plat_name = self.plat_name or get_platform() + if plat_name in ('linux-x86_64', 'linux_x86_64') and sys.maxsize == 2147483647: + plat_name = 'linux_i686' + plat_name = plat_name.replace('-', '_').replace('.', '_') + + if self.root_is_pure: + if self.universal: + impl = 'py2.py3' + else: + impl = self.python_tag + tag = (impl, 'none', plat_name) + else: + impl_name = get_abbr_impl() + impl_ver = get_impl_ver() + impl = impl_name + impl_ver + # We don't work on CPython 3.1, 3.0. + if self.py_limited_api and (impl_name + impl_ver).startswith('cp3'): + impl = self.py_limited_api + abi_tag = 'abi3' + else: + abi_tag = str(get_abi_tag()).lower() + tag = (impl, abi_tag, plat_name) + supported_tags = pep425tags.get_supported( + supplied_platform=plat_name if self.plat_name_supplied else None) + # XXX switch to this alternate implementation for non-pure: + if not self.py_limited_api: + assert tag == supported_tags[0], "%s != %s" % (tag, supported_tags[0]) + assert tag in supported_tags, "would build wheel with unsupported tag {}".format(tag) + return tag + + def run(self): + build_scripts = self.reinitialize_command('build_scripts') + build_scripts.executable = 'python' + build_scripts.force = True + + build_ext = self.reinitialize_command('build_ext') + build_ext.inplace = False + + if not self.skip_build: + self.run_command('build') + + install = self.reinitialize_command('install', + reinit_subcommands=True) + install.root = self.bdist_dir + install.compile = False + install.skip_build = self.skip_build + install.warn_dir = False + + # A wheel without setuptools scripts is more cross-platform. + # Use the (undocumented) `no_ep` option to setuptools' + # install_scripts command to avoid creating entry point scripts. + install_scripts = self.reinitialize_command('install_scripts') + install_scripts.no_ep = True + + # Use a custom scheme for the archive, because we have to decide + # at installation time which scheme to use. + for key in ('headers', 'scripts', 'data', 'purelib', 'platlib'): + setattr(install, + 'install_' + key, + os.path.join(self.data_dir, key)) + + basedir_observed = '' + + if os.name == 'nt': + # win32 barfs if any of these are ''; could be '.'? + # (distutils.command.install:change_roots bug) + basedir_observed = os.path.normpath(os.path.join(self.data_dir, '..')) + self.install_libbase = self.install_lib = basedir_observed + + setattr(install, + 'install_purelib' if self.root_is_pure else 'install_platlib', + basedir_observed) + + logger.info("installing to %s", self.bdist_dir) + + self.run_command('install') + + impl_tag, abi_tag, plat_tag = self.get_tag() + archive_basename = "{}-{}-{}-{}".format(self.wheel_dist_name, impl_tag, abi_tag, plat_tag) + if not self.relative: + archive_root = self.bdist_dir + else: + archive_root = os.path.join( + self.bdist_dir, + self._ensure_relative(install.install_base)) + + self.set_undefined_options('install_egg_info', ('target', 'egginfo_dir')) + distinfo_dirname = '{}-{}.dist-info'.format( + safer_name(self.distribution.get_name()), + safer_version(self.distribution.get_version())) + distinfo_dir = os.path.join(self.bdist_dir, distinfo_dirname) + self.egg2dist(self.egginfo_dir, distinfo_dir) + + self.write_wheelfile(distinfo_dir) + + # Make the archive + if not os.path.exists(self.dist_dir): + os.makedirs(self.dist_dir) + + wheel_path = os.path.join(self.dist_dir, archive_basename + '.whl') + with WheelFile(wheel_path, 'w') as wf: + wf.write_files(archive_root) + + # Add to 'Distribution.dist_files' so that the "upload" command works + getattr(self.distribution, 'dist_files', []).append( + ('bdist_wheel', get_python_version(), wheel_path)) + + if not self.keep_temp: + logger.info('removing %s', self.bdist_dir) + if not self.dry_run: + rmtree(self.bdist_dir) + + def write_wheelfile(self, wheelfile_base, generator='bdist_wheel (' + wheel_version + ')'): + from email.message import Message + msg = Message() + msg['Wheel-Version'] = '1.0' # of the spec + msg['Generator'] = generator + msg['Root-Is-Purelib'] = str(self.root_is_pure).lower() + if self.build_number is not None: + msg['Build'] = self.build_number + + # Doesn't work for bdist_wininst + impl_tag, abi_tag, plat_tag = self.get_tag() + for impl in impl_tag.split('.'): + for abi in abi_tag.split('.'): + for plat in plat_tag.split('.'): + msg['Tag'] = '-'.join((impl, abi, plat)) + + wheelfile_path = os.path.join(wheelfile_base, 'WHEEL') + logger.info('creating %s', wheelfile_path) + with open(wheelfile_path, 'w') as f: + Generator(f, maxheaderlen=0).flatten(msg) + + def _ensure_relative(self, path): + # copied from dir_util, deleted + drive, path = os.path.splitdrive(path) + if path[0:1] == os.sep: + path = drive + path[1:] + return path + + @property + def license_paths(self): + metadata = self.distribution.get_option_dict('metadata') + files = set() + patterns = sorted({ + option for option in metadata.get('license_files', ('', ''))[1].split() + }) + + if 'license_file' in metadata: + warn('The "license_file" option is deprecated. Use "license_files" instead.', + DeprecationWarning) + files.add(metadata['license_file'][1]) + + if 'license_file' not in metadata and 'license_files' not in metadata: + patterns = ('LICEN[CS]E*', 'COPYING*', 'NOTICE*', 'AUTHORS*') + + for pattern in patterns: + for path in iglob(pattern): + if path not in files and os.path.isfile(path): + logger.info('adding license file "%s" (matched pattern "%s")', path, pattern) + files.add(path) + + return files + + def egg2dist(self, egginfo_path, distinfo_path): + """Convert an .egg-info directory into a .dist-info directory""" + def adios(p): + """Appropriately delete directory, file or link.""" + if os.path.exists(p) and not os.path.islink(p) and os.path.isdir(p): + shutil.rmtree(p) + elif os.path.exists(p): + os.unlink(p) + + adios(distinfo_path) + + if not os.path.exists(egginfo_path): + # There is no egg-info. This is probably because the egg-info + # file/directory is not named matching the distribution name used + # to name the archive file. Check for this case and report + # accordingly. + import glob + pat = os.path.join(os.path.dirname(egginfo_path), '*.egg-info') + possible = glob.glob(pat) + err = "Egg metadata expected at %s but not found" % (egginfo_path,) + if possible: + alt = os.path.basename(possible[0]) + err += " (%s found - possible misnamed archive file?)" % (alt,) + + raise ValueError(err) + + if os.path.isfile(egginfo_path): + # .egg-info is a single file + pkginfo_path = egginfo_path + pkg_info = pkginfo_to_metadata(egginfo_path, egginfo_path) + os.mkdir(distinfo_path) + else: + # .egg-info is a directory + pkginfo_path = os.path.join(egginfo_path, 'PKG-INFO') + pkg_info = pkginfo_to_metadata(egginfo_path, pkginfo_path) + + # ignore common egg metadata that is useless to wheel + shutil.copytree(egginfo_path, distinfo_path, + ignore=lambda x, y: {'PKG-INFO', 'requires.txt', 'SOURCES.txt', + 'not-zip-safe'} + ) + + # delete dependency_links if it is only whitespace + dependency_links_path = os.path.join(distinfo_path, 'dependency_links.txt') + with open(dependency_links_path, 'r') as dependency_links_file: + dependency_links = dependency_links_file.read().strip() + if not dependency_links: + adios(dependency_links_path) + + write_pkg_info(os.path.join(distinfo_path, 'METADATA'), pkg_info) + + for license_path in self.license_paths: + filename = os.path.basename(license_path) + shutil.copy(license_path, os.path.join(distinfo_path, filename)) + + adios(egginfo_path) diff --git a/project/venv/lib/python2.7/site-packages/wheel/bdist_wheel.pyc b/project/venv/lib/python2.7/site-packages/wheel/bdist_wheel.pyc new file mode 100644 index 0000000..30d5e71 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/wheel/bdist_wheel.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/wheel/cli/__init__.py b/project/venv/lib/python2.7/site-packages/wheel/cli/__init__.py new file mode 100644 index 0000000..95740bf --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/wheel/cli/__init__.py @@ -0,0 +1,88 @@ +""" +Wheel command-line utility. +""" + +from __future__ import print_function + +import argparse +import os +import sys + + +def require_pkgresources(name): + try: + import pkg_resources # noqa: F401 + except ImportError: + raise RuntimeError("'{0}' needs pkg_resources (part of setuptools).".format(name)) + + +class WheelError(Exception): + pass + + +def unpack_f(args): + from .unpack import unpack + unpack(args.wheelfile, args.dest) + + +def pack_f(args): + from .pack import pack + pack(args.directory, args.dest_dir, args.build_number) + + +def convert_f(args): + from .convert import convert + convert(args.files, args.dest_dir, args.verbose) + + +def version_f(args): + from .. import __version__ + print("wheel %s" % __version__) + + +def parser(): + p = argparse.ArgumentParser() + s = p.add_subparsers(help="commands") + + unpack_parser = s.add_parser('unpack', help='Unpack wheel') + unpack_parser.add_argument('--dest', '-d', help='Destination directory', + default='.') + unpack_parser.add_argument('wheelfile', help='Wheel file') + unpack_parser.set_defaults(func=unpack_f) + + repack_parser = s.add_parser('pack', help='Repack wheel') + repack_parser.add_argument('directory', help='Root directory of the unpacked wheel') + repack_parser.add_argument('--dest-dir', '-d', default=os.path.curdir, + help="Directory to store the wheel (default %(default)s)") + repack_parser.add_argument('--build-number', help="Build tag to use in the wheel name") + repack_parser.set_defaults(func=pack_f) + + convert_parser = s.add_parser('convert', help='Convert egg or wininst to wheel') + convert_parser.add_argument('files', nargs='*', help='Files to convert') + convert_parser.add_argument('--dest-dir', '-d', default=os.path.curdir, + help="Directory to store wheels (default %(default)s)") + convert_parser.add_argument('--verbose', '-v', action='store_true') + convert_parser.set_defaults(func=convert_f) + + version_parser = s.add_parser('version', help='Print version and exit') + version_parser.set_defaults(func=version_f) + + help_parser = s.add_parser('help', help='Show this help') + help_parser.set_defaults(func=lambda args: p.print_help()) + + return p + + +def main(): + p = parser() + args = p.parse_args() + if not hasattr(args, 'func'): + p.print_help() + else: + try: + args.func(args) + return 0 + except WheelError as e: + print(e, file=sys.stderr) + + return 1 diff --git a/project/venv/lib/python2.7/site-packages/wheel/cli/__init__.pyc b/project/venv/lib/python2.7/site-packages/wheel/cli/__init__.pyc new file mode 100644 index 0000000..69cda10 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/wheel/cli/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/wheel/cli/convert.py b/project/venv/lib/python2.7/site-packages/wheel/cli/convert.py new file mode 100644 index 0000000..f1a793a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/wheel/cli/convert.py @@ -0,0 +1,269 @@ +import os.path +import re +import shutil +import sys +import tempfile +import zipfile +from distutils import dist +from glob import iglob + +from ..bdist_wheel import bdist_wheel +from ..wheelfile import WheelFile +from . import WheelError, require_pkgresources + +egg_info_re = re.compile(r''' + (?P.+?)-(?P.+?) + (-(?Ppy\d\.\d) + (-(?P.+?))? + )?.egg$''', re.VERBOSE) + + +class _bdist_wheel_tag(bdist_wheel): + # allow the client to override the default generated wheel tag + # The default bdist_wheel implementation uses python and abi tags + # of the running python process. This is not suitable for + # generating/repackaging prebuild binaries. + + full_tag_supplied = False + full_tag = None # None or a (pytag, soabitag, plattag) triple + + def get_tag(self): + if self.full_tag_supplied and self.full_tag is not None: + return self.full_tag + else: + return bdist_wheel.get_tag(self) + + +def egg2wheel(egg_path, dest_dir): + filename = os.path.basename(egg_path) + match = egg_info_re.match(filename) + if not match: + raise WheelError('Invalid egg file name: {}'.format(filename)) + + egg_info = match.groupdict() + dir = tempfile.mkdtemp(suffix="_e2w") + if os.path.isfile(egg_path): + # assume we have a bdist_egg otherwise + with zipfile.ZipFile(egg_path) as egg: + egg.extractall(dir) + else: + # support buildout-style installed eggs directories + for pth in os.listdir(egg_path): + src = os.path.join(egg_path, pth) + if os.path.isfile(src): + shutil.copy2(src, dir) + else: + shutil.copytree(src, os.path.join(dir, pth)) + + pyver = egg_info['pyver'] + if pyver: + pyver = egg_info['pyver'] = pyver.replace('.', '') + + arch = (egg_info['arch'] or 'any').replace('.', '_').replace('-', '_') + + # assume all binary eggs are for CPython + abi = 'cp' + pyver[2:] if arch != 'any' else 'none' + + root_is_purelib = egg_info['arch'] is None + if root_is_purelib: + bw = bdist_wheel(dist.Distribution()) + else: + bw = _bdist_wheel_tag(dist.Distribution()) + + bw.root_is_pure = root_is_purelib + bw.python_tag = pyver + bw.plat_name_supplied = True + bw.plat_name = egg_info['arch'] or 'any' + if not root_is_purelib: + bw.full_tag_supplied = True + bw.full_tag = (pyver, abi, arch) + + dist_info_dir = os.path.join(dir, '{name}-{ver}.dist-info'.format(**egg_info)) + bw.egg2dist(os.path.join(dir, 'EGG-INFO'), dist_info_dir) + bw.write_wheelfile(dist_info_dir, generator='egg2wheel') + wheel_name = '{name}-{ver}-{pyver}-{}-{}.whl'.format(abi, arch, **egg_info) + with WheelFile(os.path.join(dest_dir, wheel_name), 'w') as wf: + wf.write_files(dir) + + shutil.rmtree(dir) + + +def parse_wininst_info(wininfo_name, egginfo_name): + """Extract metadata from filenames. + + Extracts the 4 metadataitems needed (name, version, pyversion, arch) from + the installer filename and the name of the egg-info directory embedded in + the zipfile (if any). + + The egginfo filename has the format:: + + name-ver(-pyver)(-arch).egg-info + + The installer filename has the format:: + + name-ver.arch(-pyver).exe + + Some things to note: + + 1. The installer filename is not definitive. An installer can be renamed + and work perfectly well as an installer. So more reliable data should + be used whenever possible. + 2. The egg-info data should be preferred for the name and version, because + these come straight from the distutils metadata, and are mandatory. + 3. The pyver from the egg-info data should be ignored, as it is + constructed from the version of Python used to build the installer, + which is irrelevant - the installer filename is correct here (even to + the point that when it's not there, any version is implied). + 4. The architecture must be taken from the installer filename, as it is + not included in the egg-info data. + 5. Architecture-neutral installers still have an architecture because the + installer format itself (being executable) is architecture-specific. We + should therefore ignore the architecture if the content is pure-python. + """ + + egginfo = None + if egginfo_name: + egginfo = egg_info_re.search(egginfo_name) + if not egginfo: + raise ValueError("Egg info filename %s is not valid" % (egginfo_name,)) + + # Parse the wininst filename + # 1. Distribution name (up to the first '-') + w_name, sep, rest = wininfo_name.partition('-') + if not sep: + raise ValueError("Installer filename %s is not valid" % (wininfo_name,)) + + # Strip '.exe' + rest = rest[:-4] + # 2. Python version (from the last '-', must start with 'py') + rest2, sep, w_pyver = rest.rpartition('-') + if sep and w_pyver.startswith('py'): + rest = rest2 + w_pyver = w_pyver.replace('.', '') + else: + # Not version specific - use py2.py3. While it is possible that + # pure-Python code is not compatible with both Python 2 and 3, there + # is no way of knowing from the wininst format, so we assume the best + # here (the user can always manually rename the wheel to be more + # restrictive if needed). + w_pyver = 'py2.py3' + # 3. Version and architecture + w_ver, sep, w_arch = rest.rpartition('.') + if not sep: + raise ValueError("Installer filename %s is not valid" % (wininfo_name,)) + + if egginfo: + w_name = egginfo.group('name') + w_ver = egginfo.group('ver') + + return {'name': w_name, 'ver': w_ver, 'arch': w_arch, 'pyver': w_pyver} + + +def wininst2wheel(path, dest_dir): + with zipfile.ZipFile(path) as bdw: + # Search for egg-info in the archive + egginfo_name = None + for filename in bdw.namelist(): + if '.egg-info' in filename: + egginfo_name = filename + break + + info = parse_wininst_info(os.path.basename(path), egginfo_name) + + root_is_purelib = True + for zipinfo in bdw.infolist(): + if zipinfo.filename.startswith('PLATLIB'): + root_is_purelib = False + break + if root_is_purelib: + paths = {'purelib': ''} + else: + paths = {'platlib': ''} + + dist_info = "%(name)s-%(ver)s" % info + datadir = "%s.data/" % dist_info + + # rewrite paths to trick ZipFile into extracting an egg + # XXX grab wininst .ini - between .exe, padding, and first zip file. + members = [] + egginfo_name = '' + for zipinfo in bdw.infolist(): + key, basename = zipinfo.filename.split('/', 1) + key = key.lower() + basepath = paths.get(key, None) + if basepath is None: + basepath = datadir + key.lower() + '/' + oldname = zipinfo.filename + newname = basepath + basename + zipinfo.filename = newname + del bdw.NameToInfo[oldname] + bdw.NameToInfo[newname] = zipinfo + # Collect member names, but omit '' (from an entry like "PLATLIB/" + if newname: + members.append(newname) + # Remember egg-info name for the egg2dist call below + if not egginfo_name: + if newname.endswith('.egg-info'): + egginfo_name = newname + elif '.egg-info/' in newname: + egginfo_name, sep, _ = newname.rpartition('/') + dir = tempfile.mkdtemp(suffix="_b2w") + bdw.extractall(dir, members) + + # egg2wheel + abi = 'none' + pyver = info['pyver'] + arch = (info['arch'] or 'any').replace('.', '_').replace('-', '_') + # Wininst installers always have arch even if they are not + # architecture-specific (because the format itself is). + # So, assume the content is architecture-neutral if root is purelib. + if root_is_purelib: + arch = 'any' + # If the installer is architecture-specific, it's almost certainly also + # CPython-specific. + if arch != 'any': + pyver = pyver.replace('py', 'cp') + wheel_name = '-'.join((dist_info, pyver, abi, arch)) + if root_is_purelib: + bw = bdist_wheel(dist.Distribution()) + else: + bw = _bdist_wheel_tag(dist.Distribution()) + + bw.root_is_pure = root_is_purelib + bw.python_tag = pyver + bw.plat_name_supplied = True + bw.plat_name = info['arch'] or 'any' + + if not root_is_purelib: + bw.full_tag_supplied = True + bw.full_tag = (pyver, abi, arch) + + dist_info_dir = os.path.join(dir, '%s.dist-info' % dist_info) + bw.egg2dist(os.path.join(dir, egginfo_name), dist_info_dir) + bw.write_wheelfile(dist_info_dir, generator='wininst2wheel') + + wheel_path = os.path.join(dest_dir, wheel_name) + with WheelFile(wheel_path, 'w') as wf: + wf.write_files(dir) + + shutil.rmtree(dir) + + +def convert(files, dest_dir, verbose): + # Only support wheel convert if pkg_resources is present + require_pkgresources('wheel convert') + + for pat in files: + for installer in iglob(pat): + if os.path.splitext(installer)[1] == '.egg': + conv = egg2wheel + else: + conv = wininst2wheel + + if verbose: + print("{}... ".format(installer)) + sys.stdout.flush() + + conv(installer, dest_dir) + if verbose: + print("OK") diff --git a/project/venv/lib/python2.7/site-packages/wheel/cli/convert.pyc b/project/venv/lib/python2.7/site-packages/wheel/cli/convert.pyc new file mode 100644 index 0000000..1340e40 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/wheel/cli/convert.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/wheel/cli/pack.py b/project/venv/lib/python2.7/site-packages/wheel/cli/pack.py new file mode 100644 index 0000000..af6e81c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/wheel/cli/pack.py @@ -0,0 +1,58 @@ +from __future__ import print_function + +import os.path +import re +import sys + +from wheel.cli import WheelError +from wheel.wheelfile import WheelFile + +DIST_INFO_RE = re.compile(r"^(?P(?P.+?)-(?P\d.*?))\.dist-info$") + + +def pack(directory, dest_dir, build_number): + """Repack a previously unpacked wheel directory into a new wheel file. + + The .dist-info/WHEEL file must contain one or more tags so that the target + wheel file name can be determined. + + :param directory: The unpacked wheel directory + :param dest_dir: Destination directory (defaults to the current directory) + """ + # Find the .dist-info directory + dist_info_dirs = [fn for fn in os.listdir(directory) + if os.path.isdir(os.path.join(directory, fn)) and DIST_INFO_RE.match(fn)] + if len(dist_info_dirs) > 1: + raise WheelError('Multiple .dist-info directories found in {}'.format(directory)) + elif not dist_info_dirs: + raise WheelError('No .dist-info directories found in {}'.format(directory)) + + # Determine the target wheel filename + dist_info_dir = dist_info_dirs[0] + name_version = DIST_INFO_RE.match(dist_info_dir).group('namever') + + # Add the build number if specific + if build_number: + name_version += '-' + build_number + + # Read the tags from .dist-info/WHEEL + with open(os.path.join(directory, dist_info_dir, 'WHEEL')) as f: + tags = [line.split(' ')[1].rstrip() for line in f if line.startswith('Tag: ')] + if not tags: + raise WheelError('No tags present in {}/WHEEL; cannot determine target wheel filename' + .format(dist_info_dir)) + + # Reassemble the tags for the wheel file + impls = sorted({tag.split('-')[0] for tag in tags}) + abivers = sorted({tag.split('-')[1] for tag in tags}) + platforms = sorted({tag.split('-')[2] for tag in tags}) + tagline = '-'.join(['.'.join(impls), '.'.join(abivers), '.'.join(platforms)]) + + # Repack the wheel + wheel_path = os.path.join(dest_dir, '{}-{}.whl'.format(name_version, tagline)) + with WheelFile(wheel_path, 'w') as wf: + print("Repacking wheel as {}...".format(wheel_path), end='') + sys.stdout.flush() + wf.write_files(directory) + + print('OK') diff --git a/project/venv/lib/python2.7/site-packages/wheel/cli/pack.pyc b/project/venv/lib/python2.7/site-packages/wheel/cli/pack.pyc new file mode 100644 index 0000000..5b44d88 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/wheel/cli/pack.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/wheel/cli/unpack.py b/project/venv/lib/python2.7/site-packages/wheel/cli/unpack.py new file mode 100644 index 0000000..2e9857a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/wheel/cli/unpack.py @@ -0,0 +1,25 @@ +from __future__ import print_function + +import os.path +import sys + +from ..wheelfile import WheelFile + + +def unpack(path, dest='.'): + """Unpack a wheel. + + Wheel content will be unpacked to {dest}/{name}-{ver}, where {name} + is the package name and {ver} its version. + + :param path: The path to the wheel. + :param dest: Destination directory (default to current directory). + """ + with WheelFile(path) as wf: + namever = wf.parsed_filename.group('namever') + destination = os.path.join(dest, namever) + print("Unpacking to: {}...".format(destination), end='') + sys.stdout.flush() + wf.extractall(destination) + + print('OK') diff --git a/project/venv/lib/python2.7/site-packages/wheel/cli/unpack.pyc b/project/venv/lib/python2.7/site-packages/wheel/cli/unpack.pyc new file mode 100644 index 0000000..dff2bb5 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/wheel/cli/unpack.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/wheel/metadata.py b/project/venv/lib/python2.7/site-packages/wheel/metadata.py new file mode 100644 index 0000000..ab0c07e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/wheel/metadata.py @@ -0,0 +1,141 @@ +""" +Tools for converting old- to new-style metadata. +""" + +import os.path +import re +import textwrap + +import pkg_resources + +from .pkginfo import read_pkg_info + +# Wheel itself is probably the only program that uses non-extras markers +# in METADATA/PKG-INFO. Support its syntax with the extra at the end only. +EXTRA_RE = re.compile( + r"""^(?P.*?)(;\s*(?P.*?)(extra == '(?P.*?)')?)$""") + + +def requires_to_requires_dist(requirement): + """Return the version specifier for a requirement in PEP 345/566 fashion.""" + if getattr(requirement, 'url', None): + return " @ " + requirement.url + + requires_dist = [] + for op, ver in requirement.specs: + requires_dist.append(op + ver) + if not requires_dist: + return '' + return " (%s)" % ','.join(sorted(requires_dist)) + + +def convert_requirements(requirements): + """Yield Requires-Dist: strings for parsed requirements strings.""" + for req in requirements: + parsed_requirement = pkg_resources.Requirement.parse(req) + spec = requires_to_requires_dist(parsed_requirement) + extras = ",".join(sorted(parsed_requirement.extras)) + if extras: + extras = "[%s]" % extras + yield (parsed_requirement.project_name + extras + spec) + + +def generate_requirements(extras_require): + """ + Convert requirements from a setup()-style dictionary to ('Requires-Dist', 'requirement') + and ('Provides-Extra', 'extra') tuples. + + extras_require is a dictionary of {extra: [requirements]} as passed to setup(), + using the empty extra {'': [requirements]} to hold install_requires. + """ + for extra, depends in extras_require.items(): + condition = '' + extra = extra or '' + if ':' in extra: # setuptools extra:condition syntax + extra, condition = extra.split(':', 1) + + extra = pkg_resources.safe_extra(extra) + if extra: + yield 'Provides-Extra', extra + if condition: + condition = "(" + condition + ") and " + condition += "extra == '%s'" % extra + + if condition: + condition = ' ; ' + condition + + for new_req in convert_requirements(depends): + yield 'Requires-Dist', new_req + condition + + +def pkginfo_to_metadata(egg_info_path, pkginfo_path): + """ + Convert .egg-info directory with PKG-INFO to the Metadata 2.1 format + """ + pkg_info = read_pkg_info(pkginfo_path) + pkg_info.replace_header('Metadata-Version', '2.1') + # Those will be regenerated from `requires.txt`. + del pkg_info['Provides-Extra'] + del pkg_info['Requires-Dist'] + requires_path = os.path.join(egg_info_path, 'requires.txt') + if os.path.exists(requires_path): + with open(requires_path) as requires_file: + requires = requires_file.read() + + parsed_requirements = sorted(pkg_resources.split_sections(requires), + key=lambda x: x[0] or '') + for extra, reqs in parsed_requirements: + for key, value in generate_requirements({extra: reqs}): + if (key, value) not in pkg_info.items(): + pkg_info[key] = value + + description = pkg_info['Description'] + if description: + pkg_info.set_payload(dedent_description(pkg_info)) + del pkg_info['Description'] + + return pkg_info + + +def pkginfo_unicode(pkg_info, field): + """Hack to coax Unicode out of an email Message() - Python 3.3+""" + text = pkg_info[field] + field = field.lower() + if not isinstance(text, str): + if not hasattr(pkg_info, 'raw_items'): # Python 3.2 + return str(text) + for item in pkg_info.raw_items(): + if item[0].lower() == field: + text = item[1].encode('ascii', 'surrogateescape') \ + .decode('utf-8') + break + + return text + + +def dedent_description(pkg_info): + """ + Dedent and convert pkg_info['Description'] to Unicode. + """ + description = pkg_info['Description'] + + # Python 3 Unicode handling, sorta. + surrogates = False + if not isinstance(description, str): + surrogates = True + description = pkginfo_unicode(pkg_info, 'Description') + + description_lines = description.splitlines() + description_dedent = '\n'.join( + # if the first line of long_description is blank, + # the first line here will be indented. + (description_lines[0].lstrip(), + textwrap.dedent('\n'.join(description_lines[1:])), + '\n')) + + if surrogates: + description_dedent = description_dedent \ + .encode("utf8") \ + .decode("ascii", "surrogateescape") + + return description_dedent diff --git a/project/venv/lib/python2.7/site-packages/wheel/metadata.pyc b/project/venv/lib/python2.7/site-packages/wheel/metadata.pyc new file mode 100644 index 0000000..c8501e7 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/wheel/metadata.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/wheel/pep425tags.py b/project/venv/lib/python2.7/site-packages/wheel/pep425tags.py new file mode 100644 index 0000000..13b5073 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/wheel/pep425tags.py @@ -0,0 +1,185 @@ +"""Generate and work with PEP 425 Compatibility Tags.""" + +import distutils.util +import platform +import sys +import sysconfig +import warnings + +try: + from importlib.machinery import get_all_suffixes +except ImportError: + from imp import get_suffixes as get_all_suffixes + + +def get_config_var(var): + try: + return sysconfig.get_config_var(var) + except IOError as e: # pip Issue #1074 + warnings.warn("{0}".format(e), RuntimeWarning) + return None + + +def get_abbr_impl(): + """Return abbreviated implementation name.""" + impl = platform.python_implementation() + if impl == 'PyPy': + return 'pp' + elif impl == 'Jython': + return 'jy' + elif impl == 'IronPython': + return 'ip' + elif impl == 'CPython': + return 'cp' + + raise LookupError('Unknown Python implementation: ' + impl) + + +def get_impl_ver(): + """Return implementation version.""" + impl_ver = get_config_var("py_version_nodot") + if not impl_ver or get_abbr_impl() == 'pp': + impl_ver = ''.join(map(str, get_impl_version_info())) + return impl_ver + + +def get_impl_version_info(): + """Return sys.version_info-like tuple for use in decrementing the minor + version.""" + if get_abbr_impl() == 'pp': + # as per https://github.com/pypa/pip/issues/2882 + return (sys.version_info[0], sys.pypy_version_info.major, + sys.pypy_version_info.minor) + else: + return sys.version_info[0], sys.version_info[1] + + +def get_flag(var, fallback, expected=True, warn=True): + """Use a fallback method for determining SOABI flags if the needed config + var is unset or unavailable.""" + val = get_config_var(var) + if val is None: + if warn: + warnings.warn("Config variable '{0}' is unset, Python ABI tag may " + "be incorrect".format(var), RuntimeWarning, 2) + return fallback() + return val == expected + + +def get_abi_tag(): + """Return the ABI tag based on SOABI (if available) or emulate SOABI + (CPython 2, PyPy).""" + soabi = get_config_var('SOABI') + impl = get_abbr_impl() + if not soabi and impl in ('cp', 'pp') and hasattr(sys, 'maxunicode'): + d = '' + m = '' + u = '' + if get_flag('Py_DEBUG', + lambda: hasattr(sys, 'gettotalrefcount'), + warn=(impl == 'cp')): + d = 'd' + if get_flag('WITH_PYMALLOC', + lambda: impl == 'cp', + warn=(impl == 'cp')): + m = 'm' + if get_flag('Py_UNICODE_SIZE', + lambda: sys.maxunicode == 0x10ffff, + expected=4, + warn=(impl == 'cp' and + sys.version_info < (3, 3))) \ + and sys.version_info < (3, 3): + u = 'u' + abi = '%s%s%s%s%s' % (impl, get_impl_ver(), d, m, u) + elif soabi and soabi.startswith('cpython-'): + abi = 'cp' + soabi.split('-')[1] + elif soabi: + abi = soabi.replace('.', '_').replace('-', '_') + else: + abi = None + return abi + + +def get_platform(): + """Return our platform name 'win32', 'linux_x86_64'""" + # XXX remove distutils dependency + result = distutils.util.get_platform().replace('.', '_').replace('-', '_') + if result == "linux_x86_64" and sys.maxsize == 2147483647: + # pip pull request #3497 + result = "linux_i686" + return result + + +def get_supported(versions=None, supplied_platform=None): + """Return a list of supported tags for each version specified in + `versions`. + + :param versions: a list of string versions, of the form ["33", "32"], + or None. The first version will be assumed to support our ABI. + """ + supported = [] + + # Versions must be given with respect to the preference + if versions is None: + versions = [] + version_info = get_impl_version_info() + major = version_info[:-1] + # Support all previous minor Python versions. + for minor in range(version_info[-1], -1, -1): + versions.append(''.join(map(str, major + (minor,)))) + + impl = get_abbr_impl() + + abis = [] + + abi = get_abi_tag() + if abi: + abis[0:0] = [abi] + + abi3s = set() + for suffix in get_all_suffixes(): + if suffix[0].startswith('.abi'): + abi3s.add(suffix[0].split('.', 2)[1]) + + abis.extend(sorted(list(abi3s))) + + abis.append('none') + + platforms = [] + if supplied_platform: + platforms.append(supplied_platform) + platforms.append(get_platform()) + + # Current version, current API (built specifically for our Python): + for abi in abis: + for arch in platforms: + supported.append(('%s%s' % (impl, versions[0]), abi, arch)) + + # abi3 modules compatible with older version of Python + for version in versions[1:]: + # abi3 was introduced in Python 3.2 + if version in ('31', '30'): + break + for abi in abi3s: # empty set if not Python 3 + for arch in platforms: + supported.append(("%s%s" % (impl, version), abi, arch)) + + # No abi / arch, but requires our implementation: + for i, version in enumerate(versions): + supported.append(('%s%s' % (impl, version), 'none', 'any')) + if i == 0: + # Tagged specifically as being cross-version compatible + # (with just the major version specified) + supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any')) + + # Major Python version + platform; e.g. binaries not using the Python API + for arch in platforms: + supported.append(('py%s' % (versions[0][0]), 'none', arch)) + + # No abi / arch, generic Python + for i, version in enumerate(versions): + supported.append(('py%s' % (version,), 'none', 'any')) + if i == 0: + supported.append(('py%s' % (version[0]), 'none', 'any')) + + return supported diff --git a/project/venv/lib/python2.7/site-packages/wheel/pep425tags.pyc b/project/venv/lib/python2.7/site-packages/wheel/pep425tags.pyc new file mode 100644 index 0000000..186cb1f Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/wheel/pep425tags.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/wheel/pkginfo.py b/project/venv/lib/python2.7/site-packages/wheel/pkginfo.py new file mode 100644 index 0000000..115be45 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/wheel/pkginfo.py @@ -0,0 +1,43 @@ +"""Tools for reading and writing PKG-INFO / METADATA without caring +about the encoding.""" + +from email.parser import Parser + +try: + unicode + _PY3 = False +except NameError: + _PY3 = True + +if not _PY3: + from email.generator import Generator + + def read_pkg_info_bytes(bytestr): + return Parser().parsestr(bytestr) + + def read_pkg_info(path): + with open(path, "r") as headers: + message = Parser().parse(headers) + return message + + def write_pkg_info(path, message): + with open(path, 'w') as metadata: + Generator(metadata, mangle_from_=False, maxheaderlen=0).flatten(message) +else: + from email.generator import BytesGenerator + + def read_pkg_info_bytes(bytestr): + headers = bytestr.decode(encoding="ascii", errors="surrogateescape") + message = Parser().parsestr(headers) + return message + + def read_pkg_info(path): + with open(path, "r", + encoding="ascii", + errors="surrogateescape") as headers: + message = Parser().parse(headers) + return message + + def write_pkg_info(path, message): + with open(path, "wb") as out: + BytesGenerator(out, mangle_from_=False, maxheaderlen=0).flatten(message) diff --git a/project/venv/lib/python2.7/site-packages/wheel/pkginfo.pyc b/project/venv/lib/python2.7/site-packages/wheel/pkginfo.pyc new file mode 100644 index 0000000..d34c07e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/wheel/pkginfo.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/wheel/util.py b/project/venv/lib/python2.7/site-packages/wheel/util.py new file mode 100644 index 0000000..0afb54a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/wheel/util.py @@ -0,0 +1,46 @@ +import base64 +import io +import sys + + +if sys.version_info[0] < 3: + text_type = unicode # noqa: F821 + + StringIO = io.BytesIO + + def native(s, encoding='utf-8'): + if isinstance(s, unicode): + return s.encode(encoding) + return s +else: + text_type = str + + StringIO = io.StringIO + + def native(s, encoding='utf-8'): + if isinstance(s, bytes): + return s.decode(encoding) + return s + + +def urlsafe_b64encode(data): + """urlsafe_b64encode without padding""" + return base64.urlsafe_b64encode(data).rstrip(b'=') + + +def urlsafe_b64decode(data): + """urlsafe_b64decode without padding""" + pad = b'=' * (4 - (len(data) & 3)) + return base64.urlsafe_b64decode(data + pad) + + +def as_unicode(s): + if isinstance(s, bytes): + return s.decode('utf-8') + return s + + +def as_bytes(s): + if isinstance(s, text_type): + return s.encode('utf-8') + return s diff --git a/project/venv/lib/python2.7/site-packages/wheel/util.pyc b/project/venv/lib/python2.7/site-packages/wheel/util.pyc new file mode 100644 index 0000000..a40c5ee Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/wheel/util.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/wheel/wheelfile.py b/project/venv/lib/python2.7/site-packages/wheel/wheelfile.py new file mode 100644 index 0000000..9a1c8d2 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/wheel/wheelfile.py @@ -0,0 +1,168 @@ +from __future__ import print_function + +import csv +import hashlib +import os.path +import re +import time +from collections import OrderedDict +from distutils import log as logger +from zipfile import ZIP_DEFLATED, ZipInfo, ZipFile + +from wheel.cli import WheelError +from wheel.util import urlsafe_b64decode, as_unicode, native, urlsafe_b64encode, as_bytes, StringIO + +# Non-greedy matching of an optional build number may be too clever (more +# invalid wheel filenames will match). Separate regex for .dist-info? +WHEEL_INFO_RE = re.compile( + r"""^(?P(?P.+?)-(?P.+?))(-(?P\d[^-]*))? + -(?P.+?)-(?P.+?)-(?P.+?)\.whl$""", + re.VERBOSE) + + +def get_zipinfo_datetime(timestamp=None): + # Some applications need reproducible .whl files, but they can't do this without forcing + # the timestamp of the individual ZipInfo objects. See issue #143. + timestamp = int(os.environ.get('SOURCE_DATE_EPOCH', timestamp or time.time())) + return time.gmtime(timestamp)[0:6] + + +class WheelFile(ZipFile): + """A ZipFile derivative class that also reads SHA-256 hashes from + .dist-info/RECORD and checks any read files against those. + """ + + _default_algorithm = hashlib.sha256 + + def __init__(self, file, mode='r'): + basename = os.path.basename(file) + self.parsed_filename = WHEEL_INFO_RE.match(basename) + if not basename.endswith('.whl') or self.parsed_filename is None: + raise WheelError("Bad wheel filename {!r}".format(basename)) + + ZipFile.__init__(self, file, mode, compression=ZIP_DEFLATED, allowZip64=True) + + self.dist_info_path = '{}.dist-info'.format(self.parsed_filename.group('namever')) + self.record_path = self.dist_info_path + '/RECORD' + self._file_hashes = OrderedDict() + self._file_sizes = {} + if mode == 'r': + # Ignore RECORD and any embedded wheel signatures + self._file_hashes[self.record_path] = None, None + self._file_hashes[self.record_path + '.jws'] = None, None + self._file_hashes[self.record_path + '.p7s'] = None, None + + # Fill in the expected hashes by reading them from RECORD + try: + record = self.open(self.record_path) + except KeyError: + raise WheelError('Missing {} file'.format(self.record_path)) + + with record: + for line in record: + line = line.decode('utf-8') + path, hash_sum, size = line.rsplit(u',', 2) + if hash_sum: + algorithm, hash_sum = hash_sum.split(u'=') + try: + hashlib.new(algorithm) + except ValueError: + raise WheelError('Unsupported hash algorithm: {}'.format(algorithm)) + + if algorithm.lower() in {'md5', 'sha1'}: + raise WheelError( + 'Weak hash algorithm ({}) is not permitted by PEP 427' + .format(algorithm)) + + self._file_hashes[path] = ( + algorithm, urlsafe_b64decode(hash_sum.encode('ascii'))) + + def open(self, name_or_info, mode="r", pwd=None): + def _update_crc(newdata, eof=None): + if eof is None: + eof = ef._eof + update_crc_orig(newdata) + else: # Python 2 + update_crc_orig(newdata, eof) + + running_hash.update(newdata) + if eof and running_hash.digest() != expected_hash: + raise WheelError("Hash mismatch for file '{}'".format(native(ef_name))) + + ef = ZipFile.open(self, name_or_info, mode, pwd) + ef_name = as_unicode(name_or_info.filename if isinstance(name_or_info, ZipInfo) + else name_or_info) + if mode == 'r' and not ef_name.endswith('/'): + if ef_name not in self._file_hashes: + raise WheelError("No hash found for file '{}'".format(native(ef_name))) + + algorithm, expected_hash = self._file_hashes[ef_name] + if expected_hash is not None: + # Monkey patch the _update_crc method to also check for the hash from RECORD + running_hash = hashlib.new(algorithm) + update_crc_orig, ef._update_crc = ef._update_crc, _update_crc + + return ef + + def write_files(self, base_dir): + logger.info("creating '%s' and adding '%s' to it", self.filename, base_dir) + deferred = [] + for root, dirnames, filenames in os.walk(base_dir): + # Sort the directory names so that `os.walk` will walk them in a + # defined order on the next iteration. + dirnames.sort() + for name in sorted(filenames): + path = os.path.normpath(os.path.join(root, name)) + if os.path.isfile(path): + arcname = os.path.relpath(path, base_dir) + if arcname == self.record_path: + pass + elif root.endswith('.dist-info'): + deferred.append((path, arcname)) + else: + self.write(path, arcname) + + deferred.sort() + for path, arcname in deferred: + self.write(path, arcname) + + def write(self, filename, arcname=None, compress_type=None): + with open(filename, 'rb') as f: + st = os.fstat(f.fileno()) + data = f.read() + + zinfo = ZipInfo(arcname or filename, date_time=get_zipinfo_datetime(st.st_mtime)) + zinfo.external_attr = st.st_mode << 16 + zinfo.compress_type = ZIP_DEFLATED + self.writestr(zinfo, data, compress_type) + + def writestr(self, zinfo_or_arcname, bytes, compress_type=None): + ZipFile.writestr(self, zinfo_or_arcname, bytes, compress_type) + fname = (zinfo_or_arcname.filename if isinstance(zinfo_or_arcname, ZipInfo) + else zinfo_or_arcname) + logger.info("adding '%s'", fname) + if fname != self.record_path: + hash_ = self._default_algorithm(bytes) + self._file_hashes[fname] = hash_.name, native(urlsafe_b64encode(hash_.digest())) + self._file_sizes[fname] = len(bytes) + + def close(self): + # Write RECORD + if self.fp is not None and self.mode == 'w' and self._file_hashes: + data = StringIO() + writer = csv.writer(data, delimiter=',', quotechar='"', lineterminator='\n') + writer.writerows(( + ( + fname, + algorithm + "=" + hash_, + self._file_sizes[fname] + ) + for fname, (algorithm, hash_) in self._file_hashes.items() + )) + writer.writerow((format(self.record_path), "", "")) + zinfo = ZipInfo(native(self.record_path), date_time=get_zipinfo_datetime()) + zinfo.compress_type = ZIP_DEFLATED + zinfo.external_attr = 0o664 << 16 + self.writestr(zinfo, as_bytes(data.getvalue())) + + ZipFile.close(self) diff --git a/project/venv/lib/python2.7/site-packages/wheel/wheelfile.pyc b/project/venv/lib/python2.7/site-packages/wheel/wheelfile.pyc new file mode 100644 index 0000000..51d3531 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/wheel/wheelfile.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/yaml/__init__.py b/project/venv/lib/python2.7/site-packages/yaml/__init__.py new file mode 100644 index 0000000..e7a419d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/yaml/__init__.py @@ -0,0 +1,406 @@ + +from error import * + +from tokens import * +from events import * +from nodes import * + +from loader import * +from dumper import * + +__version__ = '5.1' + +try: + from cyaml import * + __with_libyaml__ = True +except ImportError: + __with_libyaml__ = False + + +#------------------------------------------------------------------------------ +# Warnings control +#------------------------------------------------------------------------------ + +# 'Global' warnings state: +_warnings_enabled = { + 'YAMLLoadWarning': True, +} + +# Get or set global warnings' state +def warnings(settings=None): + if settings is None: + return _warnings_enabled + + if type(settings) is dict: + for key in settings: + if key in _warnings_enabled: + _warnings_enabled[key] = settings[key] + +# Warn when load() is called without Loader=... +class YAMLLoadWarning(RuntimeWarning): + pass + +def load_warning(method): + if _warnings_enabled['YAMLLoadWarning'] is False: + return + + import warnings + + message = ( + "calling yaml.%s() without Loader=... is deprecated, as the " + "default Loader is unsafe. Please read " + "https://msg.pyyaml.org/load for full details." + ) % method + + warnings.warn(message, YAMLLoadWarning, stacklevel=3) + +#------------------------------------------------------------------------------ +def scan(stream, Loader=Loader): + """ + Scan a YAML stream and produce scanning tokens. + """ + loader = Loader(stream) + try: + while loader.check_token(): + yield loader.get_token() + finally: + loader.dispose() + +def parse(stream, Loader=Loader): + """ + Parse a YAML stream and produce parsing events. + """ + loader = Loader(stream) + try: + while loader.check_event(): + yield loader.get_event() + finally: + loader.dispose() + +def compose(stream, Loader=Loader): + """ + Parse the first YAML document in a stream + and produce the corresponding representation tree. + """ + loader = Loader(stream) + try: + return loader.get_single_node() + finally: + loader.dispose() + +def compose_all(stream, Loader=Loader): + """ + Parse all YAML documents in a stream + and produce corresponding representation trees. + """ + loader = Loader(stream) + try: + while loader.check_node(): + yield loader.get_node() + finally: + loader.dispose() + +def load(stream, Loader=None): + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + """ + if Loader is None: + load_warning('load') + Loader = FullLoader + + loader = Loader(stream) + try: + return loader.get_single_data() + finally: + loader.dispose() + +def load_all(stream, Loader=None): + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + """ + if Loader is None: + load_warning('load_all') + Loader = FullLoader + + loader = Loader(stream) + try: + while loader.check_data(): + yield loader.get_data() + finally: + loader.dispose() + +def full_load(stream): + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + + Resolve all tags except those known to be + unsafe on untrusted input. + """ + return load(stream, FullLoader) + +def full_load_all(stream): + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + + Resolve all tags except those known to be + unsafe on untrusted input. + """ + return load_all(stream, FullLoader) + +def safe_load(stream): + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + + Resolve only basic YAML tags. This is known + to be safe for untrusted input. + """ + return load(stream, SafeLoader) + +def safe_load_all(stream): + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + + Resolve only basic YAML tags. This is known + to be safe for untrusted input. + """ + return load_all(stream, SafeLoader) + +def unsafe_load(stream): + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + + Resolve all tags, even those known to be + unsafe on untrusted input. + """ + return load(stream, UnsafeLoader) + +def unsafe_load_all(stream): + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + + Resolve all tags, even those known to be + unsafe on untrusted input. + """ + return load_all(stream, UnsafeLoader) + +def emit(events, stream=None, Dumper=Dumper, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None): + """ + Emit YAML parsing events into a stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + from StringIO import StringIO + stream = StringIO() + getvalue = stream.getvalue + dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + try: + for event in events: + dumper.emit(event) + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def serialize_all(nodes, stream=None, Dumper=Dumper, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding='utf-8', explicit_start=None, explicit_end=None, + version=None, tags=None): + """ + Serialize a sequence of representation trees into a YAML stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + if encoding is None: + from StringIO import StringIO + else: + from cStringIO import StringIO + stream = StringIO() + getvalue = stream.getvalue + dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break, + encoding=encoding, version=version, tags=tags, + explicit_start=explicit_start, explicit_end=explicit_end) + try: + dumper.open() + for node in nodes: + dumper.serialize(node) + dumper.close() + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def serialize(node, stream=None, Dumper=Dumper, **kwds): + """ + Serialize a representation tree into a YAML stream. + If stream is None, return the produced string instead. + """ + return serialize_all([node], stream, Dumper=Dumper, **kwds) + +def dump_all(documents, stream=None, Dumper=Dumper, + default_style=None, default_flow_style=False, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding='utf-8', explicit_start=None, explicit_end=None, + version=None, tags=None, sort_keys=True): + """ + Serialize a sequence of Python objects into a YAML stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + if encoding is None: + from StringIO import StringIO + else: + from cStringIO import StringIO + stream = StringIO() + getvalue = stream.getvalue + dumper = Dumper(stream, default_style=default_style, + default_flow_style=default_flow_style, + canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break, + encoding=encoding, version=version, tags=tags, + explicit_start=explicit_start, explicit_end=explicit_end, sort_keys=sort_keys) + try: + dumper.open() + for data in documents: + dumper.represent(data) + dumper.close() + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def dump(data, stream=None, Dumper=Dumper, **kwds): + """ + Serialize a Python object into a YAML stream. + If stream is None, return the produced string instead. + """ + return dump_all([data], stream, Dumper=Dumper, **kwds) + +def safe_dump_all(documents, stream=None, **kwds): + """ + Serialize a sequence of Python objects into a YAML stream. + Produce only basic YAML tags. + If stream is None, return the produced string instead. + """ + return dump_all(documents, stream, Dumper=SafeDumper, **kwds) + +def safe_dump(data, stream=None, **kwds): + """ + Serialize a Python object into a YAML stream. + Produce only basic YAML tags. + If stream is None, return the produced string instead. + """ + return dump_all([data], stream, Dumper=SafeDumper, **kwds) + +def add_implicit_resolver(tag, regexp, first=None, + Loader=Loader, Dumper=Dumper): + """ + Add an implicit scalar detector. + If an implicit scalar value matches the given regexp, + the corresponding tag is assigned to the scalar. + first is a sequence of possible initial characters or None. + """ + Loader.add_implicit_resolver(tag, regexp, first) + Dumper.add_implicit_resolver(tag, regexp, first) + +def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper): + """ + Add a path based resolver for the given tag. + A path is a list of keys that forms a path + to a node in the representation tree. + Keys can be string values, integers, or None. + """ + Loader.add_path_resolver(tag, path, kind) + Dumper.add_path_resolver(tag, path, kind) + +def add_constructor(tag, constructor, Loader=Loader): + """ + Add a constructor for the given tag. + Constructor is a function that accepts a Loader instance + and a node object and produces the corresponding Python object. + """ + Loader.add_constructor(tag, constructor) + +def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader): + """ + Add a multi-constructor for the given tag prefix. + Multi-constructor is called for a node if its tag starts with tag_prefix. + Multi-constructor accepts a Loader instance, a tag suffix, + and a node object and produces the corresponding Python object. + """ + Loader.add_multi_constructor(tag_prefix, multi_constructor) + +def add_representer(data_type, representer, Dumper=Dumper): + """ + Add a representer for the given type. + Representer is a function accepting a Dumper instance + and an instance of the given data type + and producing the corresponding representation node. + """ + Dumper.add_representer(data_type, representer) + +def add_multi_representer(data_type, multi_representer, Dumper=Dumper): + """ + Add a representer for the given type. + Multi-representer is a function accepting a Dumper instance + and an instance of the given data type or subtype + and producing the corresponding representation node. + """ + Dumper.add_multi_representer(data_type, multi_representer) + +class YAMLObjectMetaclass(type): + """ + The metaclass for YAMLObject. + """ + def __init__(cls, name, bases, kwds): + super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds) + if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None: + cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml) + cls.yaml_dumper.add_representer(cls, cls.to_yaml) + +class YAMLObject(object): + """ + An object that can dump itself to a YAML stream + and load itself from a YAML stream. + """ + + __metaclass__ = YAMLObjectMetaclass + __slots__ = () # no direct instantiation, so allow immutable subclasses + + yaml_loader = Loader + yaml_dumper = Dumper + + yaml_tag = None + yaml_flow_style = None + + def from_yaml(cls, loader, node): + """ + Convert a representation node to a Python object. + """ + return loader.construct_yaml_object(node, cls) + from_yaml = classmethod(from_yaml) + + def to_yaml(cls, dumper, data): + """ + Convert a Python object to a representation node. + """ + return dumper.represent_yaml_object(cls.yaml_tag, data, cls, + flow_style=cls.yaml_flow_style) + to_yaml = classmethod(to_yaml) + diff --git a/project/venv/lib/python2.7/site-packages/yaml/__init__.pyc b/project/venv/lib/python2.7/site-packages/yaml/__init__.pyc new file mode 100644 index 0000000..f48912b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/yaml/__init__.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/yaml/composer.py b/project/venv/lib/python2.7/site-packages/yaml/composer.py new file mode 100644 index 0000000..df85ef6 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/yaml/composer.py @@ -0,0 +1,139 @@ + +__all__ = ['Composer', 'ComposerError'] + +from error import MarkedYAMLError +from events import * +from nodes import * + +class ComposerError(MarkedYAMLError): + pass + +class Composer(object): + + def __init__(self): + self.anchors = {} + + def check_node(self): + # Drop the STREAM-START event. + if self.check_event(StreamStartEvent): + self.get_event() + + # If there are more documents available? + return not self.check_event(StreamEndEvent) + + def get_node(self): + # Get the root node of the next document. + if not self.check_event(StreamEndEvent): + return self.compose_document() + + def get_single_node(self): + # Drop the STREAM-START event. + self.get_event() + + # Compose a document if the stream is not empty. + document = None + if not self.check_event(StreamEndEvent): + document = self.compose_document() + + # Ensure that the stream contains no more documents. + if not self.check_event(StreamEndEvent): + event = self.get_event() + raise ComposerError("expected a single document in the stream", + document.start_mark, "but found another document", + event.start_mark) + + # Drop the STREAM-END event. + self.get_event() + + return document + + def compose_document(self): + # Drop the DOCUMENT-START event. + self.get_event() + + # Compose the root node. + node = self.compose_node(None, None) + + # Drop the DOCUMENT-END event. + self.get_event() + + self.anchors = {} + return node + + def compose_node(self, parent, index): + if self.check_event(AliasEvent): + event = self.get_event() + anchor = event.anchor + if anchor not in self.anchors: + raise ComposerError(None, None, "found undefined alias %r" + % anchor.encode('utf-8'), event.start_mark) + return self.anchors[anchor] + event = self.peek_event() + anchor = event.anchor + if anchor is not None: + if anchor in self.anchors: + raise ComposerError("found duplicate anchor %r; first occurrence" + % anchor.encode('utf-8'), self.anchors[anchor].start_mark, + "second occurrence", event.start_mark) + self.descend_resolver(parent, index) + if self.check_event(ScalarEvent): + node = self.compose_scalar_node(anchor) + elif self.check_event(SequenceStartEvent): + node = self.compose_sequence_node(anchor) + elif self.check_event(MappingStartEvent): + node = self.compose_mapping_node(anchor) + self.ascend_resolver() + return node + + def compose_scalar_node(self, anchor): + event = self.get_event() + tag = event.tag + if tag is None or tag == u'!': + tag = self.resolve(ScalarNode, event.value, event.implicit) + node = ScalarNode(tag, event.value, + event.start_mark, event.end_mark, style=event.style) + if anchor is not None: + self.anchors[anchor] = node + return node + + def compose_sequence_node(self, anchor): + start_event = self.get_event() + tag = start_event.tag + if tag is None or tag == u'!': + tag = self.resolve(SequenceNode, None, start_event.implicit) + node = SequenceNode(tag, [], + start_event.start_mark, None, + flow_style=start_event.flow_style) + if anchor is not None: + self.anchors[anchor] = node + index = 0 + while not self.check_event(SequenceEndEvent): + node.value.append(self.compose_node(node, index)) + index += 1 + end_event = self.get_event() + node.end_mark = end_event.end_mark + return node + + def compose_mapping_node(self, anchor): + start_event = self.get_event() + tag = start_event.tag + if tag is None or tag == u'!': + tag = self.resolve(MappingNode, None, start_event.implicit) + node = MappingNode(tag, [], + start_event.start_mark, None, + flow_style=start_event.flow_style) + if anchor is not None: + self.anchors[anchor] = node + while not self.check_event(MappingEndEvent): + #key_event = self.peek_event() + item_key = self.compose_node(node, None) + #if item_key in node.value: + # raise ComposerError("while composing a mapping", start_event.start_mark, + # "found duplicate key", key_event.start_mark) + item_value = self.compose_node(node, item_key) + #node.value[item_key] = item_value + node.value.append((item_key, item_value)) + end_event = self.get_event() + node.end_mark = end_event.end_mark + return node + diff --git a/project/venv/lib/python2.7/site-packages/yaml/composer.pyc b/project/venv/lib/python2.7/site-packages/yaml/composer.pyc new file mode 100644 index 0000000..98e9337 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/yaml/composer.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/yaml/constructor.py b/project/venv/lib/python2.7/site-packages/yaml/constructor.py new file mode 100644 index 0000000..516dad1 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/yaml/constructor.py @@ -0,0 +1,709 @@ + +__all__ = [ + 'BaseConstructor', + 'SafeConstructor', + 'FullConstructor', + 'UnsafeConstructor', + 'Constructor', + 'ConstructorError' +] + +from error import * +from nodes import * + +import datetime + +import binascii, re, sys, types + +class ConstructorError(MarkedYAMLError): + pass + +class BaseConstructor(object): + + yaml_constructors = {} + yaml_multi_constructors = {} + + def __init__(self): + self.constructed_objects = {} + self.recursive_objects = {} + self.state_generators = [] + self.deep_construct = False + + def check_data(self): + # If there are more documents available? + return self.check_node() + + def get_data(self): + # Construct and return the next document. + if self.check_node(): + return self.construct_document(self.get_node()) + + def get_single_data(self): + # Ensure that the stream contains a single document and construct it. + node = self.get_single_node() + if node is not None: + return self.construct_document(node) + return None + + def construct_document(self, node): + data = self.construct_object(node) + while self.state_generators: + state_generators = self.state_generators + self.state_generators = [] + for generator in state_generators: + for dummy in generator: + pass + self.constructed_objects = {} + self.recursive_objects = {} + self.deep_construct = False + return data + + def construct_object(self, node, deep=False): + if node in self.constructed_objects: + return self.constructed_objects[node] + if deep: + old_deep = self.deep_construct + self.deep_construct = True + if node in self.recursive_objects: + raise ConstructorError(None, None, + "found unconstructable recursive node", node.start_mark) + self.recursive_objects[node] = None + constructor = None + tag_suffix = None + if node.tag in self.yaml_constructors: + constructor = self.yaml_constructors[node.tag] + else: + for tag_prefix in self.yaml_multi_constructors: + if node.tag.startswith(tag_prefix): + tag_suffix = node.tag[len(tag_prefix):] + constructor = self.yaml_multi_constructors[tag_prefix] + break + else: + if None in self.yaml_multi_constructors: + tag_suffix = node.tag + constructor = self.yaml_multi_constructors[None] + elif None in self.yaml_constructors: + constructor = self.yaml_constructors[None] + elif isinstance(node, ScalarNode): + constructor = self.__class__.construct_scalar + elif isinstance(node, SequenceNode): + constructor = self.__class__.construct_sequence + elif isinstance(node, MappingNode): + constructor = self.__class__.construct_mapping + if tag_suffix is None: + data = constructor(self, node) + else: + data = constructor(self, tag_suffix, node) + if isinstance(data, types.GeneratorType): + generator = data + data = generator.next() + if self.deep_construct: + for dummy in generator: + pass + else: + self.state_generators.append(generator) + self.constructed_objects[node] = data + del self.recursive_objects[node] + if deep: + self.deep_construct = old_deep + return data + + def construct_scalar(self, node): + if not isinstance(node, ScalarNode): + raise ConstructorError(None, None, + "expected a scalar node, but found %s" % node.id, + node.start_mark) + return node.value + + def construct_sequence(self, node, deep=False): + if not isinstance(node, SequenceNode): + raise ConstructorError(None, None, + "expected a sequence node, but found %s" % node.id, + node.start_mark) + return [self.construct_object(child, deep=deep) + for child in node.value] + + def construct_mapping(self, node, deep=False): + if not isinstance(node, MappingNode): + raise ConstructorError(None, None, + "expected a mapping node, but found %s" % node.id, + node.start_mark) + mapping = {} + for key_node, value_node in node.value: + key = self.construct_object(key_node, deep=deep) + try: + hash(key) + except TypeError, exc: + raise ConstructorError("while constructing a mapping", node.start_mark, + "found unacceptable key (%s)" % exc, key_node.start_mark) + value = self.construct_object(value_node, deep=deep) + mapping[key] = value + return mapping + + def construct_pairs(self, node, deep=False): + if not isinstance(node, MappingNode): + raise ConstructorError(None, None, + "expected a mapping node, but found %s" % node.id, + node.start_mark) + pairs = [] + for key_node, value_node in node.value: + key = self.construct_object(key_node, deep=deep) + value = self.construct_object(value_node, deep=deep) + pairs.append((key, value)) + return pairs + + def add_constructor(cls, tag, constructor): + if not 'yaml_constructors' in cls.__dict__: + cls.yaml_constructors = cls.yaml_constructors.copy() + cls.yaml_constructors[tag] = constructor + add_constructor = classmethod(add_constructor) + + def add_multi_constructor(cls, tag_prefix, multi_constructor): + if not 'yaml_multi_constructors' in cls.__dict__: + cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy() + cls.yaml_multi_constructors[tag_prefix] = multi_constructor + add_multi_constructor = classmethod(add_multi_constructor) + +class SafeConstructor(BaseConstructor): + + def construct_scalar(self, node): + if isinstance(node, MappingNode): + for key_node, value_node in node.value: + if key_node.tag == u'tag:yaml.org,2002:value': + return self.construct_scalar(value_node) + return BaseConstructor.construct_scalar(self, node) + + def flatten_mapping(self, node): + merge = [] + index = 0 + while index < len(node.value): + key_node, value_node = node.value[index] + if key_node.tag == u'tag:yaml.org,2002:merge': + del node.value[index] + if isinstance(value_node, MappingNode): + self.flatten_mapping(value_node) + merge.extend(value_node.value) + elif isinstance(value_node, SequenceNode): + submerge = [] + for subnode in value_node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing a mapping", + node.start_mark, + "expected a mapping for merging, but found %s" + % subnode.id, subnode.start_mark) + self.flatten_mapping(subnode) + submerge.append(subnode.value) + submerge.reverse() + for value in submerge: + merge.extend(value) + else: + raise ConstructorError("while constructing a mapping", node.start_mark, + "expected a mapping or list of mappings for merging, but found %s" + % value_node.id, value_node.start_mark) + elif key_node.tag == u'tag:yaml.org,2002:value': + key_node.tag = u'tag:yaml.org,2002:str' + index += 1 + else: + index += 1 + if merge: + node.value = merge + node.value + + def construct_mapping(self, node, deep=False): + if isinstance(node, MappingNode): + self.flatten_mapping(node) + return BaseConstructor.construct_mapping(self, node, deep=deep) + + def construct_yaml_null(self, node): + self.construct_scalar(node) + return None + + bool_values = { + u'yes': True, + u'no': False, + u'true': True, + u'false': False, + u'on': True, + u'off': False, + } + + def construct_yaml_bool(self, node): + value = self.construct_scalar(node) + return self.bool_values[value.lower()] + + def construct_yaml_int(self, node): + value = str(self.construct_scalar(node)) + value = value.replace('_', '') + sign = +1 + if value[0] == '-': + sign = -1 + if value[0] in '+-': + value = value[1:] + if value == '0': + return 0 + elif value.startswith('0b'): + return sign*int(value[2:], 2) + elif value.startswith('0x'): + return sign*int(value[2:], 16) + elif value[0] == '0': + return sign*int(value, 8) + elif ':' in value: + digits = [int(part) for part in value.split(':')] + digits.reverse() + base = 1 + value = 0 + for digit in digits: + value += digit*base + base *= 60 + return sign*value + else: + return sign*int(value) + + inf_value = 1e300 + while inf_value != inf_value*inf_value: + inf_value *= inf_value + nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99). + + def construct_yaml_float(self, node): + value = str(self.construct_scalar(node)) + value = value.replace('_', '').lower() + sign = +1 + if value[0] == '-': + sign = -1 + if value[0] in '+-': + value = value[1:] + if value == '.inf': + return sign*self.inf_value + elif value == '.nan': + return self.nan_value + elif ':' in value: + digits = [float(part) for part in value.split(':')] + digits.reverse() + base = 1 + value = 0.0 + for digit in digits: + value += digit*base + base *= 60 + return sign*value + else: + return sign*float(value) + + def construct_yaml_binary(self, node): + value = self.construct_scalar(node) + try: + return str(value).decode('base64') + except (binascii.Error, UnicodeEncodeError), exc: + raise ConstructorError(None, None, + "failed to decode base64 data: %s" % exc, node.start_mark) + + timestamp_regexp = re.compile( + ur'''^(?P[0-9][0-9][0-9][0-9]) + -(?P[0-9][0-9]?) + -(?P[0-9][0-9]?) + (?:(?:[Tt]|[ \t]+) + (?P[0-9][0-9]?) + :(?P[0-9][0-9]) + :(?P[0-9][0-9]) + (?:\.(?P[0-9]*))? + (?:[ \t]*(?PZ|(?P[-+])(?P[0-9][0-9]?) + (?::(?P[0-9][0-9]))?))?)?$''', re.X) + + def construct_yaml_timestamp(self, node): + value = self.construct_scalar(node) + match = self.timestamp_regexp.match(node.value) + values = match.groupdict() + year = int(values['year']) + month = int(values['month']) + day = int(values['day']) + if not values['hour']: + return datetime.date(year, month, day) + hour = int(values['hour']) + minute = int(values['minute']) + second = int(values['second']) + fraction = 0 + if values['fraction']: + fraction = values['fraction'][:6] + while len(fraction) < 6: + fraction += '0' + fraction = int(fraction) + delta = None + if values['tz_sign']: + tz_hour = int(values['tz_hour']) + tz_minute = int(values['tz_minute'] or 0) + delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute) + if values['tz_sign'] == '-': + delta = -delta + data = datetime.datetime(year, month, day, hour, minute, second, fraction) + if delta: + data -= delta + return data + + def construct_yaml_omap(self, node): + # Note: we do not check for duplicate keys, because it's too + # CPU-expensive. + omap = [] + yield omap + if not isinstance(node, SequenceNode): + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a sequence, but found %s" % node.id, node.start_mark) + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a mapping of length 1, but found %s" % subnode.id, + subnode.start_mark) + if len(subnode.value) != 1: + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a single mapping item, but found %d items" % len(subnode.value), + subnode.start_mark) + key_node, value_node = subnode.value[0] + key = self.construct_object(key_node) + value = self.construct_object(value_node) + omap.append((key, value)) + + def construct_yaml_pairs(self, node): + # Note: the same code as `construct_yaml_omap`. + pairs = [] + yield pairs + if not isinstance(node, SequenceNode): + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a sequence, but found %s" % node.id, node.start_mark) + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a mapping of length 1, but found %s" % subnode.id, + subnode.start_mark) + if len(subnode.value) != 1: + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a single mapping item, but found %d items" % len(subnode.value), + subnode.start_mark) + key_node, value_node = subnode.value[0] + key = self.construct_object(key_node) + value = self.construct_object(value_node) + pairs.append((key, value)) + + def construct_yaml_set(self, node): + data = set() + yield data + value = self.construct_mapping(node) + data.update(value) + + def construct_yaml_str(self, node): + value = self.construct_scalar(node) + try: + return value.encode('ascii') + except UnicodeEncodeError: + return value + + def construct_yaml_seq(self, node): + data = [] + yield data + data.extend(self.construct_sequence(node)) + + def construct_yaml_map(self, node): + data = {} + yield data + value = self.construct_mapping(node) + data.update(value) + + def construct_yaml_object(self, node, cls): + data = cls.__new__(cls) + yield data + if hasattr(data, '__setstate__'): + state = self.construct_mapping(node, deep=True) + data.__setstate__(state) + else: + state = self.construct_mapping(node) + data.__dict__.update(state) + + def construct_undefined(self, node): + raise ConstructorError(None, None, + "could not determine a constructor for the tag %r" % node.tag.encode('utf-8'), + node.start_mark) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:null', + SafeConstructor.construct_yaml_null) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:bool', + SafeConstructor.construct_yaml_bool) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:int', + SafeConstructor.construct_yaml_int) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:float', + SafeConstructor.construct_yaml_float) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:binary', + SafeConstructor.construct_yaml_binary) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:timestamp', + SafeConstructor.construct_yaml_timestamp) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:omap', + SafeConstructor.construct_yaml_omap) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:pairs', + SafeConstructor.construct_yaml_pairs) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:set', + SafeConstructor.construct_yaml_set) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:str', + SafeConstructor.construct_yaml_str) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:seq', + SafeConstructor.construct_yaml_seq) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:map', + SafeConstructor.construct_yaml_map) + +SafeConstructor.add_constructor(None, + SafeConstructor.construct_undefined) + +class FullConstructor(SafeConstructor): + + def construct_python_str(self, node): + return self.construct_scalar(node).encode('utf-8') + + def construct_python_unicode(self, node): + return self.construct_scalar(node) + + def construct_python_long(self, node): + return long(self.construct_yaml_int(node)) + + def construct_python_complex(self, node): + return complex(self.construct_scalar(node)) + + def construct_python_tuple(self, node): + return tuple(self.construct_sequence(node)) + + def find_python_module(self, name, mark, unsafe=False): + if not name: + raise ConstructorError("while constructing a Python module", mark, + "expected non-empty name appended to the tag", mark) + if unsafe: + try: + __import__(name) + except ImportError, exc: + raise ConstructorError("while constructing a Python module", mark, + "cannot find module %r (%s)" % (name.encode('utf-8'), exc), mark) + if not name in sys.modules: + raise ConstructorError("while constructing a Python module", mark, + "module %r is not imported" % name.encode('utf-8'), mark) + return sys.modules[name] + + def find_python_name(self, name, mark, unsafe=False): + if not name: + raise ConstructorError("while constructing a Python object", mark, + "expected non-empty name appended to the tag", mark) + if u'.' in name: + module_name, object_name = name.rsplit('.', 1) + else: + module_name = '__builtin__' + object_name = name + if unsafe: + try: + __import__(module_name) + except ImportError, exc: + raise ConstructorError("while constructing a Python object", mark, + "cannot find module %r (%s)" % (module_name.encode('utf-8'), exc), mark) + if not module_name in sys.modules: + raise ConstructorError("while constructing a Python object", mark, + "module %r is not imported" % module_name.encode('utf-8'), mark) + module = sys.modules[module_name] + if not hasattr(module, object_name): + raise ConstructorError("while constructing a Python object", mark, + "cannot find %r in the module %r" % (object_name.encode('utf-8'), + module.__name__), mark) + return getattr(module, object_name) + + def construct_python_name(self, suffix, node): + value = self.construct_scalar(node) + if value: + raise ConstructorError("while constructing a Python name", node.start_mark, + "expected the empty value, but found %r" % value.encode('utf-8'), + node.start_mark) + return self.find_python_name(suffix, node.start_mark) + + def construct_python_module(self, suffix, node): + value = self.construct_scalar(node) + if value: + raise ConstructorError("while constructing a Python module", node.start_mark, + "expected the empty value, but found %r" % value.encode('utf-8'), + node.start_mark) + return self.find_python_module(suffix, node.start_mark) + + class classobj: pass + + def make_python_instance(self, suffix, node, + args=None, kwds=None, newobj=False, unsafe=False): + if not args: + args = [] + if not kwds: + kwds = {} + cls = self.find_python_name(suffix, node.start_mark) + if not (unsafe or isinstance(cls, type) or isinstance(cls, type(self.classobj))): + raise ConstructorError("while constructing a Python instance", node.start_mark, + "expected a class, but found %r" % type(cls), + node.start_mark) + if newobj and isinstance(cls, type(self.classobj)) \ + and not args and not kwds: + instance = self.classobj() + instance.__class__ = cls + return instance + elif newobj and isinstance(cls, type): + return cls.__new__(cls, *args, **kwds) + else: + return cls(*args, **kwds) + + def set_python_instance_state(self, instance, state): + if hasattr(instance, '__setstate__'): + instance.__setstate__(state) + else: + slotstate = {} + if isinstance(state, tuple) and len(state) == 2: + state, slotstate = state + if hasattr(instance, '__dict__'): + instance.__dict__.update(state) + elif state: + slotstate.update(state) + for key, value in slotstate.items(): + setattr(object, key, value) + + def construct_python_object(self, suffix, node): + # Format: + # !!python/object:module.name { ... state ... } + instance = self.make_python_instance(suffix, node, newobj=True) + yield instance + deep = hasattr(instance, '__setstate__') + state = self.construct_mapping(node, deep=deep) + self.set_python_instance_state(instance, state) + + def construct_python_object_apply(self, suffix, node, newobj=False): + # Format: + # !!python/object/apply # (or !!python/object/new) + # args: [ ... arguments ... ] + # kwds: { ... keywords ... } + # state: ... state ... + # listitems: [ ... listitems ... ] + # dictitems: { ... dictitems ... } + # or short format: + # !!python/object/apply [ ... arguments ... ] + # The difference between !!python/object/apply and !!python/object/new + # is how an object is created, check make_python_instance for details. + if isinstance(node, SequenceNode): + args = self.construct_sequence(node, deep=True) + kwds = {} + state = {} + listitems = [] + dictitems = {} + else: + value = self.construct_mapping(node, deep=True) + args = value.get('args', []) + kwds = value.get('kwds', {}) + state = value.get('state', {}) + listitems = value.get('listitems', []) + dictitems = value.get('dictitems', {}) + instance = self.make_python_instance(suffix, node, args, kwds, newobj) + if state: + self.set_python_instance_state(instance, state) + if listitems: + instance.extend(listitems) + if dictitems: + for key in dictitems: + instance[key] = dictitems[key] + return instance + + def construct_python_object_new(self, suffix, node): + return self.construct_python_object_apply(suffix, node, newobj=True) + +FullConstructor.add_constructor( + u'tag:yaml.org,2002:python/none', + FullConstructor.construct_yaml_null) + +FullConstructor.add_constructor( + u'tag:yaml.org,2002:python/bool', + FullConstructor.construct_yaml_bool) + +FullConstructor.add_constructor( + u'tag:yaml.org,2002:python/str', + FullConstructor.construct_python_str) + +FullConstructor.add_constructor( + u'tag:yaml.org,2002:python/unicode', + FullConstructor.construct_python_unicode) + +FullConstructor.add_constructor( + u'tag:yaml.org,2002:python/int', + FullConstructor.construct_yaml_int) + +FullConstructor.add_constructor( + u'tag:yaml.org,2002:python/long', + FullConstructor.construct_python_long) + +FullConstructor.add_constructor( + u'tag:yaml.org,2002:python/float', + FullConstructor.construct_yaml_float) + +FullConstructor.add_constructor( + u'tag:yaml.org,2002:python/complex', + FullConstructor.construct_python_complex) + +FullConstructor.add_constructor( + u'tag:yaml.org,2002:python/list', + FullConstructor.construct_yaml_seq) + +FullConstructor.add_constructor( + u'tag:yaml.org,2002:python/tuple', + FullConstructor.construct_python_tuple) + +FullConstructor.add_constructor( + u'tag:yaml.org,2002:python/dict', + FullConstructor.construct_yaml_map) + +FullConstructor.add_multi_constructor( + u'tag:yaml.org,2002:python/name:', + FullConstructor.construct_python_name) + +FullConstructor.add_multi_constructor( + u'tag:yaml.org,2002:python/module:', + FullConstructor.construct_python_module) + +FullConstructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object:', + FullConstructor.construct_python_object) + +FullConstructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object/apply:', + FullConstructor.construct_python_object_apply) + +FullConstructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object/new:', + FullConstructor.construct_python_object_new) + +class UnsafeConstructor(FullConstructor): + + def find_python_module(self, name, mark): + return super(UnsafeConstructor, self).find_python_module(name, mark, unsafe=True) + + def find_python_name(self, name, mark): + return super(UnsafeConstructor, self).find_python_name(name, mark, unsafe=True) + + def make_python_instance(self, suffix, node, args=None, kwds=None, newobj=False): + return super(UnsafeConstructor, self).make_python_instance( + suffix, node, args, kwds, newobj, unsafe=True) + +# Constructor is same as UnsafeConstructor. Need to leave this in place in case +# people have extended it directly. +class Constructor(UnsafeConstructor): + pass diff --git a/project/venv/lib/python2.7/site-packages/yaml/constructor.pyc b/project/venv/lib/python2.7/site-packages/yaml/constructor.pyc new file mode 100644 index 0000000..adb6ede Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/yaml/constructor.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/yaml/cyaml.py b/project/venv/lib/python2.7/site-packages/yaml/cyaml.py new file mode 100644 index 0000000..ebb8959 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/yaml/cyaml.py @@ -0,0 +1,101 @@ + +__all__ = [ + 'CBaseLoader', 'CSafeLoader', 'CFullLoader', 'CUnsafeLoader', 'CLoader', + 'CBaseDumper', 'CSafeDumper', 'CDumper' +] + +from _yaml import CParser, CEmitter + +from constructor import * + +from serializer import * +from representer import * + +from resolver import * + +class CBaseLoader(CParser, BaseConstructor, BaseResolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + BaseConstructor.__init__(self) + BaseResolver.__init__(self) + +class CSafeLoader(CParser, SafeConstructor, Resolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + SafeConstructor.__init__(self) + Resolver.__init__(self) + +class CFullLoader(CParser, FullConstructor, Resolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + FullConstructor.__init__(self) + Resolver.__init__(self) + +class CUnsafeLoader(CParser, UnsafeConstructor, Resolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + UnsafeConstructor.__init__(self) + Resolver.__init__(self) + +class CLoader(CParser, Constructor, Resolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + Constructor.__init__(self) + Resolver.__init__(self) + +class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver): + + def __init__(self, stream, + default_style=None, default_flow_style=False, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None, sort_keys=True): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style, sort_keys=sort_keys) + Resolver.__init__(self) + +class CSafeDumper(CEmitter, SafeRepresenter, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=False, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None, sort_keys=True): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + SafeRepresenter.__init__(self, default_style=default_style, + default_flow_style=default_flow_style, sort_keys=sort_keys) + Resolver.__init__(self) + +class CDumper(CEmitter, Serializer, Representer, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=False, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None, sort_keys=True): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style, sort_keys=sort_keys) + Resolver.__init__(self) + diff --git a/project/venv/lib/python2.7/site-packages/yaml/cyaml.pyc b/project/venv/lib/python2.7/site-packages/yaml/cyaml.pyc new file mode 100644 index 0000000..6423e02 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/yaml/cyaml.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/yaml/dumper.py b/project/venv/lib/python2.7/site-packages/yaml/dumper.py new file mode 100644 index 0000000..f9cd49f --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/yaml/dumper.py @@ -0,0 +1,62 @@ + +__all__ = ['BaseDumper', 'SafeDumper', 'Dumper'] + +from emitter import * +from serializer import * +from representer import * +from resolver import * + +class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver): + + def __init__(self, stream, + default_style=None, default_flow_style=False, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None, sort_keys=True): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style, sort_keys=sort_keys) + Resolver.__init__(self) + +class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=False, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None, sort_keys=True): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + SafeRepresenter.__init__(self, default_style=default_style, + default_flow_style=default_flow_style, sort_keys=sort_keys) + Resolver.__init__(self) + +class Dumper(Emitter, Serializer, Representer, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=False, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None, sort_keys=True): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style, sort_keys=sort_keys) + Resolver.__init__(self) + diff --git a/project/venv/lib/python2.7/site-packages/yaml/dumper.pyc b/project/venv/lib/python2.7/site-packages/yaml/dumper.pyc new file mode 100644 index 0000000..95e591b Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/yaml/dumper.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/yaml/emitter.py b/project/venv/lib/python2.7/site-packages/yaml/emitter.py new file mode 100644 index 0000000..9561a82 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/yaml/emitter.py @@ -0,0 +1,1144 @@ + +# Emitter expects events obeying the following grammar: +# stream ::= STREAM-START document* STREAM-END +# document ::= DOCUMENT-START node DOCUMENT-END +# node ::= SCALAR | sequence | mapping +# sequence ::= SEQUENCE-START node* SEQUENCE-END +# mapping ::= MAPPING-START (node node)* MAPPING-END + +__all__ = ['Emitter', 'EmitterError'] + +import sys + +from error import YAMLError +from events import * + +has_ucs4 = sys.maxunicode > 0xffff + +class EmitterError(YAMLError): + pass + +class ScalarAnalysis(object): + def __init__(self, scalar, empty, multiline, + allow_flow_plain, allow_block_plain, + allow_single_quoted, allow_double_quoted, + allow_block): + self.scalar = scalar + self.empty = empty + self.multiline = multiline + self.allow_flow_plain = allow_flow_plain + self.allow_block_plain = allow_block_plain + self.allow_single_quoted = allow_single_quoted + self.allow_double_quoted = allow_double_quoted + self.allow_block = allow_block + +class Emitter(object): + + DEFAULT_TAG_PREFIXES = { + u'!' : u'!', + u'tag:yaml.org,2002:' : u'!!', + } + + def __init__(self, stream, canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None): + + # The stream should have the methods `write` and possibly `flush`. + self.stream = stream + + # Encoding can be overridden by STREAM-START. + self.encoding = None + + # Emitter is a state machine with a stack of states to handle nested + # structures. + self.states = [] + self.state = self.expect_stream_start + + # Current event and the event queue. + self.events = [] + self.event = None + + # The current indentation level and the stack of previous indents. + self.indents = [] + self.indent = None + + # Flow level. + self.flow_level = 0 + + # Contexts. + self.root_context = False + self.sequence_context = False + self.mapping_context = False + self.simple_key_context = False + + # Characteristics of the last emitted character: + # - current position. + # - is it a whitespace? + # - is it an indention character + # (indentation space, '-', '?', or ':')? + self.line = 0 + self.column = 0 + self.whitespace = True + self.indention = True + + # Whether the document requires an explicit document indicator + self.open_ended = False + + # Formatting details. + self.canonical = canonical + self.allow_unicode = allow_unicode + self.best_indent = 2 + if indent and 1 < indent < 10: + self.best_indent = indent + self.best_width = 80 + if width and width > self.best_indent*2: + self.best_width = width + self.best_line_break = u'\n' + if line_break in [u'\r', u'\n', u'\r\n']: + self.best_line_break = line_break + + # Tag prefixes. + self.tag_prefixes = None + + # Prepared anchor and tag. + self.prepared_anchor = None + self.prepared_tag = None + + # Scalar analysis and style. + self.analysis = None + self.style = None + + def dispose(self): + # Reset the state attributes (to clear self-references) + self.states = [] + self.state = None + + def emit(self, event): + self.events.append(event) + while not self.need_more_events(): + self.event = self.events.pop(0) + self.state() + self.event = None + + # In some cases, we wait for a few next events before emitting. + + def need_more_events(self): + if not self.events: + return True + event = self.events[0] + if isinstance(event, DocumentStartEvent): + return self.need_events(1) + elif isinstance(event, SequenceStartEvent): + return self.need_events(2) + elif isinstance(event, MappingStartEvent): + return self.need_events(3) + else: + return False + + def need_events(self, count): + level = 0 + for event in self.events[1:]: + if isinstance(event, (DocumentStartEvent, CollectionStartEvent)): + level += 1 + elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)): + level -= 1 + elif isinstance(event, StreamEndEvent): + level = -1 + if level < 0: + return False + return (len(self.events) < count+1) + + def increase_indent(self, flow=False, indentless=False): + self.indents.append(self.indent) + if self.indent is None: + if flow: + self.indent = self.best_indent + else: + self.indent = 0 + elif not indentless: + self.indent += self.best_indent + + # States. + + # Stream handlers. + + def expect_stream_start(self): + if isinstance(self.event, StreamStartEvent): + if self.event.encoding and not getattr(self.stream, 'encoding', None): + self.encoding = self.event.encoding + self.write_stream_start() + self.state = self.expect_first_document_start + else: + raise EmitterError("expected StreamStartEvent, but got %s" + % self.event) + + def expect_nothing(self): + raise EmitterError("expected nothing, but got %s" % self.event) + + # Document handlers. + + def expect_first_document_start(self): + return self.expect_document_start(first=True) + + def expect_document_start(self, first=False): + if isinstance(self.event, DocumentStartEvent): + if (self.event.version or self.event.tags) and self.open_ended: + self.write_indicator(u'...', True) + self.write_indent() + if self.event.version: + version_text = self.prepare_version(self.event.version) + self.write_version_directive(version_text) + self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy() + if self.event.tags: + handles = self.event.tags.keys() + handles.sort() + for handle in handles: + prefix = self.event.tags[handle] + self.tag_prefixes[prefix] = handle + handle_text = self.prepare_tag_handle(handle) + prefix_text = self.prepare_tag_prefix(prefix) + self.write_tag_directive(handle_text, prefix_text) + implicit = (first and not self.event.explicit and not self.canonical + and not self.event.version and not self.event.tags + and not self.check_empty_document()) + if not implicit: + self.write_indent() + self.write_indicator(u'---', True) + if self.canonical: + self.write_indent() + self.state = self.expect_document_root + elif isinstance(self.event, StreamEndEvent): + if self.open_ended: + self.write_indicator(u'...', True) + self.write_indent() + self.write_stream_end() + self.state = self.expect_nothing + else: + raise EmitterError("expected DocumentStartEvent, but got %s" + % self.event) + + def expect_document_end(self): + if isinstance(self.event, DocumentEndEvent): + self.write_indent() + if self.event.explicit: + self.write_indicator(u'...', True) + self.write_indent() + self.flush_stream() + self.state = self.expect_document_start + else: + raise EmitterError("expected DocumentEndEvent, but got %s" + % self.event) + + def expect_document_root(self): + self.states.append(self.expect_document_end) + self.expect_node(root=True) + + # Node handlers. + + def expect_node(self, root=False, sequence=False, mapping=False, + simple_key=False): + self.root_context = root + self.sequence_context = sequence + self.mapping_context = mapping + self.simple_key_context = simple_key + if isinstance(self.event, AliasEvent): + self.expect_alias() + elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)): + self.process_anchor(u'&') + self.process_tag() + if isinstance(self.event, ScalarEvent): + self.expect_scalar() + elif isinstance(self.event, SequenceStartEvent): + if self.flow_level or self.canonical or self.event.flow_style \ + or self.check_empty_sequence(): + self.expect_flow_sequence() + else: + self.expect_block_sequence() + elif isinstance(self.event, MappingStartEvent): + if self.flow_level or self.canonical or self.event.flow_style \ + or self.check_empty_mapping(): + self.expect_flow_mapping() + else: + self.expect_block_mapping() + else: + raise EmitterError("expected NodeEvent, but got %s" % self.event) + + def expect_alias(self): + if self.event.anchor is None: + raise EmitterError("anchor is not specified for alias") + self.process_anchor(u'*') + self.state = self.states.pop() + + def expect_scalar(self): + self.increase_indent(flow=True) + self.process_scalar() + self.indent = self.indents.pop() + self.state = self.states.pop() + + # Flow sequence handlers. + + def expect_flow_sequence(self): + self.write_indicator(u'[', True, whitespace=True) + self.flow_level += 1 + self.increase_indent(flow=True) + self.state = self.expect_first_flow_sequence_item + + def expect_first_flow_sequence_item(self): + if isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + self.write_indicator(u']', False) + self.state = self.states.pop() + else: + if self.canonical or self.column > self.best_width: + self.write_indent() + self.states.append(self.expect_flow_sequence_item) + self.expect_node(sequence=True) + + def expect_flow_sequence_item(self): + if isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + if self.canonical: + self.write_indicator(u',', False) + self.write_indent() + self.write_indicator(u']', False) + self.state = self.states.pop() + else: + self.write_indicator(u',', False) + if self.canonical or self.column > self.best_width: + self.write_indent() + self.states.append(self.expect_flow_sequence_item) + self.expect_node(sequence=True) + + # Flow mapping handlers. + + def expect_flow_mapping(self): + self.write_indicator(u'{', True, whitespace=True) + self.flow_level += 1 + self.increase_indent(flow=True) + self.state = self.expect_first_flow_mapping_key + + def expect_first_flow_mapping_key(self): + if isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + self.write_indicator(u'}', False) + self.state = self.states.pop() + else: + if self.canonical or self.column > self.best_width: + self.write_indent() + if not self.canonical and self.check_simple_key(): + self.states.append(self.expect_flow_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator(u'?', True) + self.states.append(self.expect_flow_mapping_value) + self.expect_node(mapping=True) + + def expect_flow_mapping_key(self): + if isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + if self.canonical: + self.write_indicator(u',', False) + self.write_indent() + self.write_indicator(u'}', False) + self.state = self.states.pop() + else: + self.write_indicator(u',', False) + if self.canonical or self.column > self.best_width: + self.write_indent() + if not self.canonical and self.check_simple_key(): + self.states.append(self.expect_flow_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator(u'?', True) + self.states.append(self.expect_flow_mapping_value) + self.expect_node(mapping=True) + + def expect_flow_mapping_simple_value(self): + self.write_indicator(u':', False) + self.states.append(self.expect_flow_mapping_key) + self.expect_node(mapping=True) + + def expect_flow_mapping_value(self): + if self.canonical or self.column > self.best_width: + self.write_indent() + self.write_indicator(u':', True) + self.states.append(self.expect_flow_mapping_key) + self.expect_node(mapping=True) + + # Block sequence handlers. + + def expect_block_sequence(self): + indentless = (self.mapping_context and not self.indention) + self.increase_indent(flow=False, indentless=indentless) + self.state = self.expect_first_block_sequence_item + + def expect_first_block_sequence_item(self): + return self.expect_block_sequence_item(first=True) + + def expect_block_sequence_item(self, first=False): + if not first and isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.state = self.states.pop() + else: + self.write_indent() + self.write_indicator(u'-', True, indention=True) + self.states.append(self.expect_block_sequence_item) + self.expect_node(sequence=True) + + # Block mapping handlers. + + def expect_block_mapping(self): + self.increase_indent(flow=False) + self.state = self.expect_first_block_mapping_key + + def expect_first_block_mapping_key(self): + return self.expect_block_mapping_key(first=True) + + def expect_block_mapping_key(self, first=False): + if not first and isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.state = self.states.pop() + else: + self.write_indent() + if self.check_simple_key(): + self.states.append(self.expect_block_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator(u'?', True, indention=True) + self.states.append(self.expect_block_mapping_value) + self.expect_node(mapping=True) + + def expect_block_mapping_simple_value(self): + self.write_indicator(u':', False) + self.states.append(self.expect_block_mapping_key) + self.expect_node(mapping=True) + + def expect_block_mapping_value(self): + self.write_indent() + self.write_indicator(u':', True, indention=True) + self.states.append(self.expect_block_mapping_key) + self.expect_node(mapping=True) + + # Checkers. + + def check_empty_sequence(self): + return (isinstance(self.event, SequenceStartEvent) and self.events + and isinstance(self.events[0], SequenceEndEvent)) + + def check_empty_mapping(self): + return (isinstance(self.event, MappingStartEvent) and self.events + and isinstance(self.events[0], MappingEndEvent)) + + def check_empty_document(self): + if not isinstance(self.event, DocumentStartEvent) or not self.events: + return False + event = self.events[0] + return (isinstance(event, ScalarEvent) and event.anchor is None + and event.tag is None and event.implicit and event.value == u'') + + def check_simple_key(self): + length = 0 + if isinstance(self.event, NodeEvent) and self.event.anchor is not None: + if self.prepared_anchor is None: + self.prepared_anchor = self.prepare_anchor(self.event.anchor) + length += len(self.prepared_anchor) + if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \ + and self.event.tag is not None: + if self.prepared_tag is None: + self.prepared_tag = self.prepare_tag(self.event.tag) + length += len(self.prepared_tag) + if isinstance(self.event, ScalarEvent): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + length += len(self.analysis.scalar) + return (length < 128 and (isinstance(self.event, AliasEvent) + or (isinstance(self.event, ScalarEvent) + and not self.analysis.empty and not self.analysis.multiline) + or self.check_empty_sequence() or self.check_empty_mapping())) + + # Anchor, Tag, and Scalar processors. + + def process_anchor(self, indicator): + if self.event.anchor is None: + self.prepared_anchor = None + return + if self.prepared_anchor is None: + self.prepared_anchor = self.prepare_anchor(self.event.anchor) + if self.prepared_anchor: + self.write_indicator(indicator+self.prepared_anchor, True) + self.prepared_anchor = None + + def process_tag(self): + tag = self.event.tag + if isinstance(self.event, ScalarEvent): + if self.style is None: + self.style = self.choose_scalar_style() + if ((not self.canonical or tag is None) and + ((self.style == '' and self.event.implicit[0]) + or (self.style != '' and self.event.implicit[1]))): + self.prepared_tag = None + return + if self.event.implicit[0] and tag is None: + tag = u'!' + self.prepared_tag = None + else: + if (not self.canonical or tag is None) and self.event.implicit: + self.prepared_tag = None + return + if tag is None: + raise EmitterError("tag is not specified") + if self.prepared_tag is None: + self.prepared_tag = self.prepare_tag(tag) + if self.prepared_tag: + self.write_indicator(self.prepared_tag, True) + self.prepared_tag = None + + def choose_scalar_style(self): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + if self.event.style == '"' or self.canonical: + return '"' + if not self.event.style and self.event.implicit[0]: + if (not (self.simple_key_context and + (self.analysis.empty or self.analysis.multiline)) + and (self.flow_level and self.analysis.allow_flow_plain + or (not self.flow_level and self.analysis.allow_block_plain))): + return '' + if self.event.style and self.event.style in '|>': + if (not self.flow_level and not self.simple_key_context + and self.analysis.allow_block): + return self.event.style + if not self.event.style or self.event.style == '\'': + if (self.analysis.allow_single_quoted and + not (self.simple_key_context and self.analysis.multiline)): + return '\'' + return '"' + + def process_scalar(self): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + if self.style is None: + self.style = self.choose_scalar_style() + split = (not self.simple_key_context) + #if self.analysis.multiline and split \ + # and (not self.style or self.style in '\'\"'): + # self.write_indent() + if self.style == '"': + self.write_double_quoted(self.analysis.scalar, split) + elif self.style == '\'': + self.write_single_quoted(self.analysis.scalar, split) + elif self.style == '>': + self.write_folded(self.analysis.scalar) + elif self.style == '|': + self.write_literal(self.analysis.scalar) + else: + self.write_plain(self.analysis.scalar, split) + self.analysis = None + self.style = None + + # Analyzers. + + def prepare_version(self, version): + major, minor = version + if major != 1: + raise EmitterError("unsupported YAML version: %d.%d" % (major, minor)) + return u'%d.%d' % (major, minor) + + def prepare_tag_handle(self, handle): + if not handle: + raise EmitterError("tag handle must not be empty") + if handle[0] != u'!' or handle[-1] != u'!': + raise EmitterError("tag handle must start and end with '!': %r" + % (handle.encode('utf-8'))) + for ch in handle[1:-1]: + if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_'): + raise EmitterError("invalid character %r in the tag handle: %r" + % (ch.encode('utf-8'), handle.encode('utf-8'))) + return handle + + def prepare_tag_prefix(self, prefix): + if not prefix: + raise EmitterError("tag prefix must not be empty") + chunks = [] + start = end = 0 + if prefix[0] == u'!': + end = 1 + while end < len(prefix): + ch = prefix[end] + if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-;/?!:@&=+$,_.~*\'()[]': + end += 1 + else: + if start < end: + chunks.append(prefix[start:end]) + start = end = end+1 + data = ch.encode('utf-8') + for ch in data: + chunks.append(u'%%%02X' % ord(ch)) + if start < end: + chunks.append(prefix[start:end]) + return u''.join(chunks) + + def prepare_tag(self, tag): + if not tag: + raise EmitterError("tag must not be empty") + if tag == u'!': + return tag + handle = None + suffix = tag + prefixes = self.tag_prefixes.keys() + prefixes.sort() + for prefix in prefixes: + if tag.startswith(prefix) \ + and (prefix == u'!' or len(prefix) < len(tag)): + handle = self.tag_prefixes[prefix] + suffix = tag[len(prefix):] + chunks = [] + start = end = 0 + while end < len(suffix): + ch = suffix[end] + if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-;/?:@&=+$,_.~*\'()[]' \ + or (ch == u'!' and handle != u'!'): + end += 1 + else: + if start < end: + chunks.append(suffix[start:end]) + start = end = end+1 + data = ch.encode('utf-8') + for ch in data: + chunks.append(u'%%%02X' % ord(ch)) + if start < end: + chunks.append(suffix[start:end]) + suffix_text = u''.join(chunks) + if handle: + return u'%s%s' % (handle, suffix_text) + else: + return u'!<%s>' % suffix_text + + def prepare_anchor(self, anchor): + if not anchor: + raise EmitterError("anchor must not be empty") + for ch in anchor: + if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_'): + raise EmitterError("invalid character %r in the anchor: %r" + % (ch.encode('utf-8'), anchor.encode('utf-8'))) + return anchor + + def analyze_scalar(self, scalar): + + # Empty scalar is a special case. + if not scalar: + return ScalarAnalysis(scalar=scalar, empty=True, multiline=False, + allow_flow_plain=False, allow_block_plain=True, + allow_single_quoted=True, allow_double_quoted=True, + allow_block=False) + + # Indicators and special characters. + block_indicators = False + flow_indicators = False + line_breaks = False + special_characters = False + + # Important whitespace combinations. + leading_space = False + leading_break = False + trailing_space = False + trailing_break = False + break_space = False + space_break = False + + # Check document indicators. + if scalar.startswith(u'---') or scalar.startswith(u'...'): + block_indicators = True + flow_indicators = True + + # First character or preceded by a whitespace. + preceded_by_whitespace = True + + # Last character or followed by a whitespace. + followed_by_whitespace = (len(scalar) == 1 or + scalar[1] in u'\0 \t\r\n\x85\u2028\u2029') + + # The previous character is a space. + previous_space = False + + # The previous character is a break. + previous_break = False + + index = 0 + while index < len(scalar): + ch = scalar[index] + + # Check for indicators. + if index == 0: + # Leading indicators are special characters. + if ch in u'#,[]{}&*!|>\'\"%@`': + flow_indicators = True + block_indicators = True + if ch in u'?:': + flow_indicators = True + if followed_by_whitespace: + block_indicators = True + if ch == u'-' and followed_by_whitespace: + flow_indicators = True + block_indicators = True + else: + # Some indicators cannot appear within a scalar as well. + if ch in u',?[]{}': + flow_indicators = True + if ch == u':': + flow_indicators = True + if followed_by_whitespace: + block_indicators = True + if ch == u'#' and preceded_by_whitespace: + flow_indicators = True + block_indicators = True + + # Check for line breaks, special, and unicode characters. + if ch in u'\n\x85\u2028\u2029': + line_breaks = True + if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'): + if (ch == u'\x85' or u'\xA0' <= ch <= u'\uD7FF' + or u'\uE000' <= ch <= u'\uFFFD' + or ((not has_ucs4) or (u'\U00010000' <= ch < u'\U0010ffff'))) and ch != u'\uFEFF': + unicode_characters = True + if not self.allow_unicode: + special_characters = True + else: + special_characters = True + + # Detect important whitespace combinations. + if ch == u' ': + if index == 0: + leading_space = True + if index == len(scalar)-1: + trailing_space = True + if previous_break: + break_space = True + previous_space = True + previous_break = False + elif ch in u'\n\x85\u2028\u2029': + if index == 0: + leading_break = True + if index == len(scalar)-1: + trailing_break = True + if previous_space: + space_break = True + previous_space = False + previous_break = True + else: + previous_space = False + previous_break = False + + # Prepare for the next character. + index += 1 + preceded_by_whitespace = (ch in u'\0 \t\r\n\x85\u2028\u2029') + followed_by_whitespace = (index+1 >= len(scalar) or + scalar[index+1] in u'\0 \t\r\n\x85\u2028\u2029') + + # Let's decide what styles are allowed. + allow_flow_plain = True + allow_block_plain = True + allow_single_quoted = True + allow_double_quoted = True + allow_block = True + + # Leading and trailing whitespaces are bad for plain scalars. + if (leading_space or leading_break + or trailing_space or trailing_break): + allow_flow_plain = allow_block_plain = False + + # We do not permit trailing spaces for block scalars. + if trailing_space: + allow_block = False + + # Spaces at the beginning of a new line are only acceptable for block + # scalars. + if break_space: + allow_flow_plain = allow_block_plain = allow_single_quoted = False + + # Spaces followed by breaks, as well as special character are only + # allowed for double quoted scalars. + if space_break or special_characters: + allow_flow_plain = allow_block_plain = \ + allow_single_quoted = allow_block = False + + # Although the plain scalar writer supports breaks, we never emit + # multiline plain scalars. + if line_breaks: + allow_flow_plain = allow_block_plain = False + + # Flow indicators are forbidden for flow plain scalars. + if flow_indicators: + allow_flow_plain = False + + # Block indicators are forbidden for block plain scalars. + if block_indicators: + allow_block_plain = False + + return ScalarAnalysis(scalar=scalar, + empty=False, multiline=line_breaks, + allow_flow_plain=allow_flow_plain, + allow_block_plain=allow_block_plain, + allow_single_quoted=allow_single_quoted, + allow_double_quoted=allow_double_quoted, + allow_block=allow_block) + + # Writers. + + def flush_stream(self): + if hasattr(self.stream, 'flush'): + self.stream.flush() + + def write_stream_start(self): + # Write BOM if needed. + if self.encoding and self.encoding.startswith('utf-16'): + self.stream.write(u'\uFEFF'.encode(self.encoding)) + + def write_stream_end(self): + self.flush_stream() + + def write_indicator(self, indicator, need_whitespace, + whitespace=False, indention=False): + if self.whitespace or not need_whitespace: + data = indicator + else: + data = u' '+indicator + self.whitespace = whitespace + self.indention = self.indention and indention + self.column += len(data) + self.open_ended = False + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_indent(self): + indent = self.indent or 0 + if not self.indention or self.column > indent \ + or (self.column == indent and not self.whitespace): + self.write_line_break() + if self.column < indent: + self.whitespace = True + data = u' '*(indent-self.column) + self.column = indent + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_line_break(self, data=None): + if data is None: + data = self.best_line_break + self.whitespace = True + self.indention = True + self.line += 1 + self.column = 0 + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_version_directive(self, version_text): + data = u'%%YAML %s' % version_text + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_line_break() + + def write_tag_directive(self, handle_text, prefix_text): + data = u'%%TAG %s %s' % (handle_text, prefix_text) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_line_break() + + # Scalar streams. + + def write_single_quoted(self, text, split=True): + self.write_indicator(u'\'', True) + spaces = False + breaks = False + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if spaces: + if ch is None or ch != u' ': + if start+1 == end and self.column > self.best_width and split \ + and start != 0 and end != len(text): + self.write_indent() + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + elif breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029': + if text[start] == u'\n': + self.write_line_break() + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + self.write_indent() + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u'\'': + if start < end: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch == u'\'': + data = u'\'\'' + self.column += 2 + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + 1 + if ch is not None: + spaces = (ch == u' ') + breaks = (ch in u'\n\x85\u2028\u2029') + end += 1 + self.write_indicator(u'\'', False) + + ESCAPE_REPLACEMENTS = { + u'\0': u'0', + u'\x07': u'a', + u'\x08': u'b', + u'\x09': u't', + u'\x0A': u'n', + u'\x0B': u'v', + u'\x0C': u'f', + u'\x0D': u'r', + u'\x1B': u'e', + u'\"': u'\"', + u'\\': u'\\', + u'\x85': u'N', + u'\xA0': u'_', + u'\u2028': u'L', + u'\u2029': u'P', + } + + def write_double_quoted(self, text, split=True): + self.write_indicator(u'"', True) + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if ch is None or ch in u'"\\\x85\u2028\u2029\uFEFF' \ + or not (u'\x20' <= ch <= u'\x7E' + or (self.allow_unicode + and (u'\xA0' <= ch <= u'\uD7FF' + or u'\uE000' <= ch <= u'\uFFFD'))): + if start < end: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch is not None: + if ch in self.ESCAPE_REPLACEMENTS: + data = u'\\'+self.ESCAPE_REPLACEMENTS[ch] + elif ch <= u'\xFF': + data = u'\\x%02X' % ord(ch) + elif ch <= u'\uFFFF': + data = u'\\u%04X' % ord(ch) + else: + data = u'\\U%08X' % ord(ch) + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end+1 + if 0 < end < len(text)-1 and (ch == u' ' or start >= end) \ + and self.column+(end-start) > self.best_width and split: + data = text[start:end]+u'\\' + if start < end: + start = end + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_indent() + self.whitespace = False + self.indention = False + if text[start] == u' ': + data = u'\\' + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + end += 1 + self.write_indicator(u'"', False) + + def determine_block_hints(self, text): + hints = u'' + if text: + if text[0] in u' \n\x85\u2028\u2029': + hints += unicode(self.best_indent) + if text[-1] not in u'\n\x85\u2028\u2029': + hints += u'-' + elif len(text) == 1 or text[-2] in u'\n\x85\u2028\u2029': + hints += u'+' + return hints + + def write_folded(self, text): + hints = self.determine_block_hints(text) + self.write_indicator(u'>'+hints, True) + if hints[-1:] == u'+': + self.open_ended = True + self.write_line_break() + leading_space = True + spaces = False + breaks = True + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029': + if not leading_space and ch is not None and ch != u' ' \ + and text[start] == u'\n': + self.write_line_break() + leading_space = (ch == u' ') + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + if ch is not None: + self.write_indent() + start = end + elif spaces: + if ch != u' ': + if start+1 == end and self.column > self.best_width: + self.write_indent() + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029': + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + if ch is None: + self.write_line_break() + start = end + if ch is not None: + breaks = (ch in u'\n\x85\u2028\u2029') + spaces = (ch == u' ') + end += 1 + + def write_literal(self, text): + hints = self.determine_block_hints(text) + self.write_indicator(u'|'+hints, True) + if hints[-1:] == u'+': + self.open_ended = True + self.write_line_break() + breaks = True + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029': + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + if ch is not None: + self.write_indent() + start = end + else: + if ch is None or ch in u'\n\x85\u2028\u2029': + data = text[start:end] + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + if ch is None: + self.write_line_break() + start = end + if ch is not None: + breaks = (ch in u'\n\x85\u2028\u2029') + end += 1 + + def write_plain(self, text, split=True): + if self.root_context: + self.open_ended = True + if not text: + return + if not self.whitespace: + data = u' ' + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.whitespace = False + self.indention = False + spaces = False + breaks = False + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if spaces: + if ch != u' ': + if start+1 == end and self.column > self.best_width and split: + self.write_indent() + self.whitespace = False + self.indention = False + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + elif breaks: + if ch not in u'\n\x85\u2028\u2029': + if text[start] == u'\n': + self.write_line_break() + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + self.write_indent() + self.whitespace = False + self.indention = False + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029': + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch is not None: + spaces = (ch == u' ') + breaks = (ch in u'\n\x85\u2028\u2029') + end += 1 diff --git a/project/venv/lib/python2.7/site-packages/yaml/emitter.pyc b/project/venv/lib/python2.7/site-packages/yaml/emitter.pyc new file mode 100644 index 0000000..a8fc08e Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/yaml/emitter.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/yaml/error.py b/project/venv/lib/python2.7/site-packages/yaml/error.py new file mode 100644 index 0000000..577686d --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/yaml/error.py @@ -0,0 +1,75 @@ + +__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError'] + +class Mark(object): + + def __init__(self, name, index, line, column, buffer, pointer): + self.name = name + self.index = index + self.line = line + self.column = column + self.buffer = buffer + self.pointer = pointer + + def get_snippet(self, indent=4, max_length=75): + if self.buffer is None: + return None + head = '' + start = self.pointer + while start > 0 and self.buffer[start-1] not in u'\0\r\n\x85\u2028\u2029': + start -= 1 + if self.pointer-start > max_length/2-1: + head = ' ... ' + start += 5 + break + tail = '' + end = self.pointer + while end < len(self.buffer) and self.buffer[end] not in u'\0\r\n\x85\u2028\u2029': + end += 1 + if end-self.pointer > max_length/2-1: + tail = ' ... ' + end -= 5 + break + snippet = self.buffer[start:end].encode('utf-8') + return ' '*indent + head + snippet + tail + '\n' \ + + ' '*(indent+self.pointer-start+len(head)) + '^' + + def __str__(self): + snippet = self.get_snippet() + where = " in \"%s\", line %d, column %d" \ + % (self.name, self.line+1, self.column+1) + if snippet is not None: + where += ":\n"+snippet + return where + +class YAMLError(Exception): + pass + +class MarkedYAMLError(YAMLError): + + def __init__(self, context=None, context_mark=None, + problem=None, problem_mark=None, note=None): + self.context = context + self.context_mark = context_mark + self.problem = problem + self.problem_mark = problem_mark + self.note = note + + def __str__(self): + lines = [] + if self.context is not None: + lines.append(self.context) + if self.context_mark is not None \ + and (self.problem is None or self.problem_mark is None + or self.context_mark.name != self.problem_mark.name + or self.context_mark.line != self.problem_mark.line + or self.context_mark.column != self.problem_mark.column): + lines.append(str(self.context_mark)) + if self.problem is not None: + lines.append(self.problem) + if self.problem_mark is not None: + lines.append(str(self.problem_mark)) + if self.note is not None: + lines.append(self.note) + return '\n'.join(lines) + diff --git a/project/venv/lib/python2.7/site-packages/yaml/error.pyc b/project/venv/lib/python2.7/site-packages/yaml/error.pyc new file mode 100644 index 0000000..9bf89f4 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/yaml/error.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/yaml/events.py b/project/venv/lib/python2.7/site-packages/yaml/events.py new file mode 100644 index 0000000..f79ad38 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/yaml/events.py @@ -0,0 +1,86 @@ + +# Abstract classes. + +class Event(object): + def __init__(self, start_mark=None, end_mark=None): + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + attributes = [key for key in ['anchor', 'tag', 'implicit', 'value'] + if hasattr(self, key)] + arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) + for key in attributes]) + return '%s(%s)' % (self.__class__.__name__, arguments) + +class NodeEvent(Event): + def __init__(self, anchor, start_mark=None, end_mark=None): + self.anchor = anchor + self.start_mark = start_mark + self.end_mark = end_mark + +class CollectionStartEvent(NodeEvent): + def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None, + flow_style=None): + self.anchor = anchor + self.tag = tag + self.implicit = implicit + self.start_mark = start_mark + self.end_mark = end_mark + self.flow_style = flow_style + +class CollectionEndEvent(Event): + pass + +# Implementations. + +class StreamStartEvent(Event): + def __init__(self, start_mark=None, end_mark=None, encoding=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.encoding = encoding + +class StreamEndEvent(Event): + pass + +class DocumentStartEvent(Event): + def __init__(self, start_mark=None, end_mark=None, + explicit=None, version=None, tags=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.explicit = explicit + self.version = version + self.tags = tags + +class DocumentEndEvent(Event): + def __init__(self, start_mark=None, end_mark=None, + explicit=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.explicit = explicit + +class AliasEvent(NodeEvent): + pass + +class ScalarEvent(NodeEvent): + def __init__(self, anchor, tag, implicit, value, + start_mark=None, end_mark=None, style=None): + self.anchor = anchor + self.tag = tag + self.implicit = implicit + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + +class SequenceStartEvent(CollectionStartEvent): + pass + +class SequenceEndEvent(CollectionEndEvent): + pass + +class MappingStartEvent(CollectionStartEvent): + pass + +class MappingEndEvent(CollectionEndEvent): + pass + diff --git a/project/venv/lib/python2.7/site-packages/yaml/events.pyc b/project/venv/lib/python2.7/site-packages/yaml/events.pyc new file mode 100644 index 0000000..3160376 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/yaml/events.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/yaml/loader.py b/project/venv/lib/python2.7/site-packages/yaml/loader.py new file mode 100644 index 0000000..a79182e --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/yaml/loader.py @@ -0,0 +1,63 @@ + +__all__ = ['BaseLoader', 'FullLoader', 'SafeLoader', 'Loader', 'UnsafeLoader'] + +from reader import * +from scanner import * +from parser import * +from composer import * +from constructor import * +from resolver import * + +class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + BaseConstructor.__init__(self) + BaseResolver.__init__(self) + +class FullLoader(Reader, Scanner, Parser, Composer, FullConstructor, Resolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + FullConstructor.__init__(self) + Resolver.__init__(self) + +class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + SafeConstructor.__init__(self) + Resolver.__init__(self) + +class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + Constructor.__init__(self) + Resolver.__init__(self) + +# UnsafeLoader is the same as Loader (which is and was always unsafe on +# untrusted input). Use of either Loader or UnsafeLoader should be rare, since +# FullLoad should be able to load almost all YAML safely. Loader is left intact +# to ensure backwards compatability. +class UnsafeLoader(Reader, Scanner, Parser, Composer, Constructor, Resolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + Constructor.__init__(self) + Resolver.__init__(self) diff --git a/project/venv/lib/python2.7/site-packages/yaml/loader.pyc b/project/venv/lib/python2.7/site-packages/yaml/loader.pyc new file mode 100644 index 0000000..9b69e9a Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/yaml/loader.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/yaml/nodes.py b/project/venv/lib/python2.7/site-packages/yaml/nodes.py new file mode 100644 index 0000000..c4f070c --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/yaml/nodes.py @@ -0,0 +1,49 @@ + +class Node(object): + def __init__(self, tag, value, start_mark, end_mark): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + value = self.value + #if isinstance(value, list): + # if len(value) == 0: + # value = '' + # elif len(value) == 1: + # value = '<1 item>' + # else: + # value = '<%d items>' % len(value) + #else: + # if len(value) > 75: + # value = repr(value[:70]+u' ... ') + # else: + # value = repr(value) + value = repr(value) + return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value) + +class ScalarNode(Node): + id = 'scalar' + def __init__(self, tag, value, + start_mark=None, end_mark=None, style=None): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + +class CollectionNode(Node): + def __init__(self, tag, value, + start_mark=None, end_mark=None, flow_style=None): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.flow_style = flow_style + +class SequenceNode(CollectionNode): + id = 'sequence' + +class MappingNode(CollectionNode): + id = 'mapping' + diff --git a/project/venv/lib/python2.7/site-packages/yaml/nodes.pyc b/project/venv/lib/python2.7/site-packages/yaml/nodes.pyc new file mode 100644 index 0000000..a63b2dc Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/yaml/nodes.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/yaml/parser.py b/project/venv/lib/python2.7/site-packages/yaml/parser.py new file mode 100644 index 0000000..f9e3057 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/yaml/parser.py @@ -0,0 +1,589 @@ + +# The following YAML grammar is LL(1) and is parsed by a recursive descent +# parser. +# +# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +# implicit_document ::= block_node DOCUMENT-END* +# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +# block_node_or_indentless_sequence ::= +# ALIAS +# | properties (block_content | indentless_block_sequence)? +# | block_content +# | indentless_block_sequence +# block_node ::= ALIAS +# | properties block_content? +# | block_content +# flow_node ::= ALIAS +# | properties flow_content? +# | flow_content +# properties ::= TAG ANCHOR? | ANCHOR TAG? +# block_content ::= block_collection | flow_collection | SCALAR +# flow_content ::= flow_collection | SCALAR +# block_collection ::= block_sequence | block_mapping +# flow_collection ::= flow_sequence | flow_mapping +# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +# indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +# block_mapping ::= BLOCK-MAPPING_START +# ((KEY block_node_or_indentless_sequence?)? +# (VALUE block_node_or_indentless_sequence?)?)* +# BLOCK-END +# flow_sequence ::= FLOW-SEQUENCE-START +# (flow_sequence_entry FLOW-ENTRY)* +# flow_sequence_entry? +# FLOW-SEQUENCE-END +# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +# flow_mapping ::= FLOW-MAPPING-START +# (flow_mapping_entry FLOW-ENTRY)* +# flow_mapping_entry? +# FLOW-MAPPING-END +# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +# +# FIRST sets: +# +# stream: { STREAM-START } +# explicit_document: { DIRECTIVE DOCUMENT-START } +# implicit_document: FIRST(block_node) +# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START } +# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START } +# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } +# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } +# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START } +# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } +# block_sequence: { BLOCK-SEQUENCE-START } +# block_mapping: { BLOCK-MAPPING-START } +# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY } +# indentless_sequence: { ENTRY } +# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } +# flow_sequence: { FLOW-SEQUENCE-START } +# flow_mapping: { FLOW-MAPPING-START } +# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } +# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } + +__all__ = ['Parser', 'ParserError'] + +from error import MarkedYAMLError +from tokens import * +from events import * +from scanner import * + +class ParserError(MarkedYAMLError): + pass + +class Parser(object): + # Since writing a recursive-descendant parser is a straightforward task, we + # do not give many comments here. + + DEFAULT_TAGS = { + u'!': u'!', + u'!!': u'tag:yaml.org,2002:', + } + + def __init__(self): + self.current_event = None + self.yaml_version = None + self.tag_handles = {} + self.states = [] + self.marks = [] + self.state = self.parse_stream_start + + def dispose(self): + # Reset the state attributes (to clear self-references) + self.states = [] + self.state = None + + def check_event(self, *choices): + # Check the type of the next event. + if self.current_event is None: + if self.state: + self.current_event = self.state() + if self.current_event is not None: + if not choices: + return True + for choice in choices: + if isinstance(self.current_event, choice): + return True + return False + + def peek_event(self): + # Get the next event. + if self.current_event is None: + if self.state: + self.current_event = self.state() + return self.current_event + + def get_event(self): + # Get the next event and proceed further. + if self.current_event is None: + if self.state: + self.current_event = self.state() + value = self.current_event + self.current_event = None + return value + + # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END + # implicit_document ::= block_node DOCUMENT-END* + # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* + + def parse_stream_start(self): + + # Parse the stream start. + token = self.get_token() + event = StreamStartEvent(token.start_mark, token.end_mark, + encoding=token.encoding) + + # Prepare the next state. + self.state = self.parse_implicit_document_start + + return event + + def parse_implicit_document_start(self): + + # Parse an implicit document. + if not self.check_token(DirectiveToken, DocumentStartToken, + StreamEndToken): + self.tag_handles = self.DEFAULT_TAGS + token = self.peek_token() + start_mark = end_mark = token.start_mark + event = DocumentStartEvent(start_mark, end_mark, + explicit=False) + + # Prepare the next state. + self.states.append(self.parse_document_end) + self.state = self.parse_block_node + + return event + + else: + return self.parse_document_start() + + def parse_document_start(self): + + # Parse any extra document end indicators. + while self.check_token(DocumentEndToken): + self.get_token() + + # Parse an explicit document. + if not self.check_token(StreamEndToken): + token = self.peek_token() + start_mark = token.start_mark + version, tags = self.process_directives() + if not self.check_token(DocumentStartToken): + raise ParserError(None, None, + "expected '', but found %r" + % self.peek_token().id, + self.peek_token().start_mark) + token = self.get_token() + end_mark = token.end_mark + event = DocumentStartEvent(start_mark, end_mark, + explicit=True, version=version, tags=tags) + self.states.append(self.parse_document_end) + self.state = self.parse_document_content + else: + # Parse the end of the stream. + token = self.get_token() + event = StreamEndEvent(token.start_mark, token.end_mark) + assert not self.states + assert not self.marks + self.state = None + return event + + def parse_document_end(self): + + # Parse the document end. + token = self.peek_token() + start_mark = end_mark = token.start_mark + explicit = False + if self.check_token(DocumentEndToken): + token = self.get_token() + end_mark = token.end_mark + explicit = True + event = DocumentEndEvent(start_mark, end_mark, + explicit=explicit) + + # Prepare the next state. + self.state = self.parse_document_start + + return event + + def parse_document_content(self): + if self.check_token(DirectiveToken, + DocumentStartToken, DocumentEndToken, StreamEndToken): + event = self.process_empty_scalar(self.peek_token().start_mark) + self.state = self.states.pop() + return event + else: + return self.parse_block_node() + + def process_directives(self): + self.yaml_version = None + self.tag_handles = {} + while self.check_token(DirectiveToken): + token = self.get_token() + if token.name == u'YAML': + if self.yaml_version is not None: + raise ParserError(None, None, + "found duplicate YAML directive", token.start_mark) + major, minor = token.value + if major != 1: + raise ParserError(None, None, + "found incompatible YAML document (version 1.* is required)", + token.start_mark) + self.yaml_version = token.value + elif token.name == u'TAG': + handle, prefix = token.value + if handle in self.tag_handles: + raise ParserError(None, None, + "duplicate tag handle %r" % handle.encode('utf-8'), + token.start_mark) + self.tag_handles[handle] = prefix + if self.tag_handles: + value = self.yaml_version, self.tag_handles.copy() + else: + value = self.yaml_version, None + for key in self.DEFAULT_TAGS: + if key not in self.tag_handles: + self.tag_handles[key] = self.DEFAULT_TAGS[key] + return value + + # block_node_or_indentless_sequence ::= ALIAS + # | properties (block_content | indentless_block_sequence)? + # | block_content + # | indentless_block_sequence + # block_node ::= ALIAS + # | properties block_content? + # | block_content + # flow_node ::= ALIAS + # | properties flow_content? + # | flow_content + # properties ::= TAG ANCHOR? | ANCHOR TAG? + # block_content ::= block_collection | flow_collection | SCALAR + # flow_content ::= flow_collection | SCALAR + # block_collection ::= block_sequence | block_mapping + # flow_collection ::= flow_sequence | flow_mapping + + def parse_block_node(self): + return self.parse_node(block=True) + + def parse_flow_node(self): + return self.parse_node() + + def parse_block_node_or_indentless_sequence(self): + return self.parse_node(block=True, indentless_sequence=True) + + def parse_node(self, block=False, indentless_sequence=False): + if self.check_token(AliasToken): + token = self.get_token() + event = AliasEvent(token.value, token.start_mark, token.end_mark) + self.state = self.states.pop() + else: + anchor = None + tag = None + start_mark = end_mark = tag_mark = None + if self.check_token(AnchorToken): + token = self.get_token() + start_mark = token.start_mark + end_mark = token.end_mark + anchor = token.value + if self.check_token(TagToken): + token = self.get_token() + tag_mark = token.start_mark + end_mark = token.end_mark + tag = token.value + elif self.check_token(TagToken): + token = self.get_token() + start_mark = tag_mark = token.start_mark + end_mark = token.end_mark + tag = token.value + if self.check_token(AnchorToken): + token = self.get_token() + end_mark = token.end_mark + anchor = token.value + if tag is not None: + handle, suffix = tag + if handle is not None: + if handle not in self.tag_handles: + raise ParserError("while parsing a node", start_mark, + "found undefined tag handle %r" % handle.encode('utf-8'), + tag_mark) + tag = self.tag_handles[handle]+suffix + else: + tag = suffix + #if tag == u'!': + # raise ParserError("while parsing a node", start_mark, + # "found non-specific tag '!'", tag_mark, + # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.") + if start_mark is None: + start_mark = end_mark = self.peek_token().start_mark + event = None + implicit = (tag is None or tag == u'!') + if indentless_sequence and self.check_token(BlockEntryToken): + end_mark = self.peek_token().end_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark) + self.state = self.parse_indentless_sequence_entry + else: + if self.check_token(ScalarToken): + token = self.get_token() + end_mark = token.end_mark + if (token.plain and tag is None) or tag == u'!': + implicit = (True, False) + elif tag is None: + implicit = (False, True) + else: + implicit = (False, False) + event = ScalarEvent(anchor, tag, implicit, token.value, + start_mark, end_mark, style=token.style) + self.state = self.states.pop() + elif self.check_token(FlowSequenceStartToken): + end_mark = self.peek_token().end_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=True) + self.state = self.parse_flow_sequence_first_entry + elif self.check_token(FlowMappingStartToken): + end_mark = self.peek_token().end_mark + event = MappingStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=True) + self.state = self.parse_flow_mapping_first_key + elif block and self.check_token(BlockSequenceStartToken): + end_mark = self.peek_token().start_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=False) + self.state = self.parse_block_sequence_first_entry + elif block and self.check_token(BlockMappingStartToken): + end_mark = self.peek_token().start_mark + event = MappingStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=False) + self.state = self.parse_block_mapping_first_key + elif anchor is not None or tag is not None: + # Empty scalars are allowed even if a tag or an anchor is + # specified. + event = ScalarEvent(anchor, tag, (implicit, False), u'', + start_mark, end_mark) + self.state = self.states.pop() + else: + if block: + node = 'block' + else: + node = 'flow' + token = self.peek_token() + raise ParserError("while parsing a %s node" % node, start_mark, + "expected the node content, but found %r" % token.id, + token.start_mark) + return event + + # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END + + def parse_block_sequence_first_entry(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_block_sequence_entry() + + def parse_block_sequence_entry(self): + if self.check_token(BlockEntryToken): + token = self.get_token() + if not self.check_token(BlockEntryToken, BlockEndToken): + self.states.append(self.parse_block_sequence_entry) + return self.parse_block_node() + else: + self.state = self.parse_block_sequence_entry + return self.process_empty_scalar(token.end_mark) + if not self.check_token(BlockEndToken): + token = self.peek_token() + raise ParserError("while parsing a block collection", self.marks[-1], + "expected , but found %r" % token.id, token.start_mark) + token = self.get_token() + event = SequenceEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + # indentless_sequence ::= (BLOCK-ENTRY block_node?)+ + + def parse_indentless_sequence_entry(self): + if self.check_token(BlockEntryToken): + token = self.get_token() + if not self.check_token(BlockEntryToken, + KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_indentless_sequence_entry) + return self.parse_block_node() + else: + self.state = self.parse_indentless_sequence_entry + return self.process_empty_scalar(token.end_mark) + token = self.peek_token() + event = SequenceEndEvent(token.start_mark, token.start_mark) + self.state = self.states.pop() + return event + + # block_mapping ::= BLOCK-MAPPING_START + # ((KEY block_node_or_indentless_sequence?)? + # (VALUE block_node_or_indentless_sequence?)?)* + # BLOCK-END + + def parse_block_mapping_first_key(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_block_mapping_key() + + def parse_block_mapping_key(self): + if self.check_token(KeyToken): + token = self.get_token() + if not self.check_token(KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_block_mapping_value) + return self.parse_block_node_or_indentless_sequence() + else: + self.state = self.parse_block_mapping_value + return self.process_empty_scalar(token.end_mark) + if not self.check_token(BlockEndToken): + token = self.peek_token() + raise ParserError("while parsing a block mapping", self.marks[-1], + "expected , but found %r" % token.id, token.start_mark) + token = self.get_token() + event = MappingEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_block_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_block_mapping_key) + return self.parse_block_node_or_indentless_sequence() + else: + self.state = self.parse_block_mapping_key + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_block_mapping_key + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + # flow_sequence ::= FLOW-SEQUENCE-START + # (flow_sequence_entry FLOW-ENTRY)* + # flow_sequence_entry? + # FLOW-SEQUENCE-END + # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + # + # Note that while production rules for both flow_sequence_entry and + # flow_mapping_entry are equal, their interpretations are different. + # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?` + # generate an inline mapping (set syntax). + + def parse_flow_sequence_first_entry(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_flow_sequence_entry(first=True) + + def parse_flow_sequence_entry(self, first=False): + if not self.check_token(FlowSequenceEndToken): + if not first: + if self.check_token(FlowEntryToken): + self.get_token() + else: + token = self.peek_token() + raise ParserError("while parsing a flow sequence", self.marks[-1], + "expected ',' or ']', but got %r" % token.id, token.start_mark) + + if self.check_token(KeyToken): + token = self.peek_token() + event = MappingStartEvent(None, None, True, + token.start_mark, token.end_mark, + flow_style=True) + self.state = self.parse_flow_sequence_entry_mapping_key + return event + elif not self.check_token(FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry) + return self.parse_flow_node() + token = self.get_token() + event = SequenceEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_flow_sequence_entry_mapping_key(self): + token = self.get_token() + if not self.check_token(ValueToken, + FlowEntryToken, FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry_mapping_value) + return self.parse_flow_node() + else: + self.state = self.parse_flow_sequence_entry_mapping_value + return self.process_empty_scalar(token.end_mark) + + def parse_flow_sequence_entry_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(FlowEntryToken, FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry_mapping_end) + return self.parse_flow_node() + else: + self.state = self.parse_flow_sequence_entry_mapping_end + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_flow_sequence_entry_mapping_end + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + def parse_flow_sequence_entry_mapping_end(self): + self.state = self.parse_flow_sequence_entry + token = self.peek_token() + return MappingEndEvent(token.start_mark, token.start_mark) + + # flow_mapping ::= FLOW-MAPPING-START + # (flow_mapping_entry FLOW-ENTRY)* + # flow_mapping_entry? + # FLOW-MAPPING-END + # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + + def parse_flow_mapping_first_key(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_flow_mapping_key(first=True) + + def parse_flow_mapping_key(self, first=False): + if not self.check_token(FlowMappingEndToken): + if not first: + if self.check_token(FlowEntryToken): + self.get_token() + else: + token = self.peek_token() + raise ParserError("while parsing a flow mapping", self.marks[-1], + "expected ',' or '}', but got %r" % token.id, token.start_mark) + if self.check_token(KeyToken): + token = self.get_token() + if not self.check_token(ValueToken, + FlowEntryToken, FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_value) + return self.parse_flow_node() + else: + self.state = self.parse_flow_mapping_value + return self.process_empty_scalar(token.end_mark) + elif not self.check_token(FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_empty_value) + return self.parse_flow_node() + token = self.get_token() + event = MappingEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_flow_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(FlowEntryToken, FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_key) + return self.parse_flow_node() + else: + self.state = self.parse_flow_mapping_key + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_flow_mapping_key + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + def parse_flow_mapping_empty_value(self): + self.state = self.parse_flow_mapping_key + return self.process_empty_scalar(self.peek_token().start_mark) + + def process_empty_scalar(self, mark): + return ScalarEvent(None, None, (True, False), u'', mark, mark) + diff --git a/project/venv/lib/python2.7/site-packages/yaml/parser.pyc b/project/venv/lib/python2.7/site-packages/yaml/parser.pyc new file mode 100644 index 0000000..adba4ad Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/yaml/parser.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/yaml/reader.py b/project/venv/lib/python2.7/site-packages/yaml/reader.py new file mode 100644 index 0000000..b2f10b0 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/yaml/reader.py @@ -0,0 +1,188 @@ +# This module contains abstractions for the input stream. You don't have to +# looks further, there are no pretty code. +# +# We define two classes here. +# +# Mark(source, line, column) +# It's just a record and its only use is producing nice error messages. +# Parser does not use it for any other purposes. +# +# Reader(source, data) +# Reader determines the encoding of `data` and converts it to unicode. +# Reader provides the following methods and attributes: +# reader.peek(length=1) - return the next `length` characters +# reader.forward(length=1) - move the current position to `length` characters. +# reader.index - the number of the current character. +# reader.line, stream.column - the line and the column of the current character. + +__all__ = ['Reader', 'ReaderError'] + +from error import YAMLError, Mark + +import codecs, re, sys + +has_ucs4 = sys.maxunicode > 0xffff + +class ReaderError(YAMLError): + + def __init__(self, name, position, character, encoding, reason): + self.name = name + self.character = character + self.position = position + self.encoding = encoding + self.reason = reason + + def __str__(self): + if isinstance(self.character, str): + return "'%s' codec can't decode byte #x%02x: %s\n" \ + " in \"%s\", position %d" \ + % (self.encoding, ord(self.character), self.reason, + self.name, self.position) + else: + return "unacceptable character #x%04x: %s\n" \ + " in \"%s\", position %d" \ + % (self.character, self.reason, + self.name, self.position) + +class Reader(object): + # Reader: + # - determines the data encoding and converts it to unicode, + # - checks if characters are in allowed range, + # - adds '\0' to the end. + + # Reader accepts + # - a `str` object, + # - a `unicode` object, + # - a file-like object with its `read` method returning `str`, + # - a file-like object with its `read` method returning `unicode`. + + # Yeah, it's ugly and slow. + + def __init__(self, stream): + self.name = None + self.stream = None + self.stream_pointer = 0 + self.eof = True + self.buffer = u'' + self.pointer = 0 + self.raw_buffer = None + self.raw_decode = None + self.encoding = None + self.index = 0 + self.line = 0 + self.column = 0 + if isinstance(stream, unicode): + self.name = "" + self.check_printable(stream) + self.buffer = stream+u'\0' + elif isinstance(stream, str): + self.name = "" + self.raw_buffer = stream + self.determine_encoding() + else: + self.stream = stream + self.name = getattr(stream, 'name', "") + self.eof = False + self.raw_buffer = '' + self.determine_encoding() + + def peek(self, index=0): + try: + return self.buffer[self.pointer+index] + except IndexError: + self.update(index+1) + return self.buffer[self.pointer+index] + + def prefix(self, length=1): + if self.pointer+length >= len(self.buffer): + self.update(length) + return self.buffer[self.pointer:self.pointer+length] + + def forward(self, length=1): + if self.pointer+length+1 >= len(self.buffer): + self.update(length+1) + while length: + ch = self.buffer[self.pointer] + self.pointer += 1 + self.index += 1 + if ch in u'\n\x85\u2028\u2029' \ + or (ch == u'\r' and self.buffer[self.pointer] != u'\n'): + self.line += 1 + self.column = 0 + elif ch != u'\uFEFF': + self.column += 1 + length -= 1 + + def get_mark(self): + if self.stream is None: + return Mark(self.name, self.index, self.line, self.column, + self.buffer, self.pointer) + else: + return Mark(self.name, self.index, self.line, self.column, + None, None) + + def determine_encoding(self): + while not self.eof and len(self.raw_buffer) < 2: + self.update_raw() + if not isinstance(self.raw_buffer, unicode): + if self.raw_buffer.startswith(codecs.BOM_UTF16_LE): + self.raw_decode = codecs.utf_16_le_decode + self.encoding = 'utf-16-le' + elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE): + self.raw_decode = codecs.utf_16_be_decode + self.encoding = 'utf-16-be' + else: + self.raw_decode = codecs.utf_8_decode + self.encoding = 'utf-8' + self.update(1) + + if has_ucs4: + NON_PRINTABLE = re.compile(u'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD\U00010000-\U0010ffff]') + else: + NON_PRINTABLE = re.compile(u'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]') + def check_printable(self, data): + match = self.NON_PRINTABLE.search(data) + if match: + character = match.group() + position = self.index+(len(self.buffer)-self.pointer)+match.start() + raise ReaderError(self.name, position, ord(character), + 'unicode', "special characters are not allowed") + + def update(self, length): + if self.raw_buffer is None: + return + self.buffer = self.buffer[self.pointer:] + self.pointer = 0 + while len(self.buffer) < length: + if not self.eof: + self.update_raw() + if self.raw_decode is not None: + try: + data, converted = self.raw_decode(self.raw_buffer, + 'strict', self.eof) + except UnicodeDecodeError, exc: + character = exc.object[exc.start] + if self.stream is not None: + position = self.stream_pointer-len(self.raw_buffer)+exc.start + else: + position = exc.start + raise ReaderError(self.name, position, character, + exc.encoding, exc.reason) + else: + data = self.raw_buffer + converted = len(data) + self.check_printable(data) + self.buffer += data + self.raw_buffer = self.raw_buffer[converted:] + if self.eof: + self.buffer += u'\0' + self.raw_buffer = None + break + + def update_raw(self, size=1024): + data = self.stream.read(size) + if data: + self.raw_buffer += data + self.stream_pointer += len(data) + else: + self.eof = True diff --git a/project/venv/lib/python2.7/site-packages/yaml/reader.pyc b/project/venv/lib/python2.7/site-packages/yaml/reader.pyc new file mode 100644 index 0000000..92961a7 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/yaml/reader.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/yaml/representer.py b/project/venv/lib/python2.7/site-packages/yaml/representer.py new file mode 100644 index 0000000..9dca41a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/yaml/representer.py @@ -0,0 +1,488 @@ + +__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer', + 'RepresenterError'] + +from error import * +from nodes import * + +import datetime + +import sys, copy_reg, types + +class RepresenterError(YAMLError): + pass + +class BaseRepresenter(object): + + yaml_representers = {} + yaml_multi_representers = {} + + def __init__(self, default_style=None, default_flow_style=False, sort_keys=True): + self.default_style = default_style + self.default_flow_style = default_flow_style + self.sort_keys = sort_keys + self.represented_objects = {} + self.object_keeper = [] + self.alias_key = None + + def represent(self, data): + node = self.represent_data(data) + self.serialize(node) + self.represented_objects = {} + self.object_keeper = [] + self.alias_key = None + + def get_classobj_bases(self, cls): + bases = [cls] + for base in cls.__bases__: + bases.extend(self.get_classobj_bases(base)) + return bases + + def represent_data(self, data): + if self.ignore_aliases(data): + self.alias_key = None + else: + self.alias_key = id(data) + if self.alias_key is not None: + if self.alias_key in self.represented_objects: + node = self.represented_objects[self.alias_key] + #if node is None: + # raise RepresenterError("recursive objects are not allowed: %r" % data) + return node + #self.represented_objects[alias_key] = None + self.object_keeper.append(data) + data_types = type(data).__mro__ + if type(data) is types.InstanceType: + data_types = self.get_classobj_bases(data.__class__)+list(data_types) + if data_types[0] in self.yaml_representers: + node = self.yaml_representers[data_types[0]](self, data) + else: + for data_type in data_types: + if data_type in self.yaml_multi_representers: + node = self.yaml_multi_representers[data_type](self, data) + break + else: + if None in self.yaml_multi_representers: + node = self.yaml_multi_representers[None](self, data) + elif None in self.yaml_representers: + node = self.yaml_representers[None](self, data) + else: + node = ScalarNode(None, unicode(data)) + #if alias_key is not None: + # self.represented_objects[alias_key] = node + return node + + def add_representer(cls, data_type, representer): + if not 'yaml_representers' in cls.__dict__: + cls.yaml_representers = cls.yaml_representers.copy() + cls.yaml_representers[data_type] = representer + add_representer = classmethod(add_representer) + + def add_multi_representer(cls, data_type, representer): + if not 'yaml_multi_representers' in cls.__dict__: + cls.yaml_multi_representers = cls.yaml_multi_representers.copy() + cls.yaml_multi_representers[data_type] = representer + add_multi_representer = classmethod(add_multi_representer) + + def represent_scalar(self, tag, value, style=None): + if style is None: + style = self.default_style + node = ScalarNode(tag, value, style=style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + return node + + def represent_sequence(self, tag, sequence, flow_style=None): + value = [] + node = SequenceNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + for item in sequence: + node_item = self.represent_data(item) + if not (isinstance(node_item, ScalarNode) and not node_item.style): + best_style = False + value.append(node_item) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def represent_mapping(self, tag, mapping, flow_style=None): + value = [] + node = MappingNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + if hasattr(mapping, 'items'): + mapping = mapping.items() + if self.sort_keys: + mapping.sort() + for item_key, item_value in mapping: + node_key = self.represent_data(item_key) + node_value = self.represent_data(item_value) + if not (isinstance(node_key, ScalarNode) and not node_key.style): + best_style = False + if not (isinstance(node_value, ScalarNode) and not node_value.style): + best_style = False + value.append((node_key, node_value)) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def ignore_aliases(self, data): + return False + +class SafeRepresenter(BaseRepresenter): + + def ignore_aliases(self, data): + if data is None: + return True + if isinstance(data, tuple) and data == (): + return True + if isinstance(data, (str, unicode, bool, int, float)): + return True + + def represent_none(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:null', + u'null') + + def represent_str(self, data): + tag = None + style = None + try: + data = unicode(data, 'ascii') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + try: + data = unicode(data, 'utf-8') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + data = data.encode('base64') + tag = u'tag:yaml.org,2002:binary' + style = '|' + return self.represent_scalar(tag, data, style=style) + + def represent_unicode(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:str', data) + + def represent_bool(self, data): + if data: + value = u'true' + else: + value = u'false' + return self.represent_scalar(u'tag:yaml.org,2002:bool', value) + + def represent_int(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data)) + + def represent_long(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data)) + + inf_value = 1e300 + while repr(inf_value) != repr(inf_value*inf_value): + inf_value *= inf_value + + def represent_float(self, data): + if data != data or (data == 0.0 and data == 1.0): + value = u'.nan' + elif data == self.inf_value: + value = u'.inf' + elif data == -self.inf_value: + value = u'-.inf' + else: + value = unicode(repr(data)).lower() + # Note that in some cases `repr(data)` represents a float number + # without the decimal parts. For instance: + # >>> repr(1e17) + # '1e17' + # Unfortunately, this is not a valid float representation according + # to the definition of the `!!float` tag. We fix this by adding + # '.0' before the 'e' symbol. + if u'.' not in value and u'e' in value: + value = value.replace(u'e', u'.0e', 1) + return self.represent_scalar(u'tag:yaml.org,2002:float', value) + + def represent_list(self, data): + #pairs = (len(data) > 0 and isinstance(data, list)) + #if pairs: + # for item in data: + # if not isinstance(item, tuple) or len(item) != 2: + # pairs = False + # break + #if not pairs: + return self.represent_sequence(u'tag:yaml.org,2002:seq', data) + #value = [] + #for item_key, item_value in data: + # value.append(self.represent_mapping(u'tag:yaml.org,2002:map', + # [(item_key, item_value)])) + #return SequenceNode(u'tag:yaml.org,2002:pairs', value) + + def represent_dict(self, data): + return self.represent_mapping(u'tag:yaml.org,2002:map', data) + + def represent_set(self, data): + value = {} + for key in data: + value[key] = None + return self.represent_mapping(u'tag:yaml.org,2002:set', value) + + def represent_date(self, data): + value = unicode(data.isoformat()) + return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value) + + def represent_datetime(self, data): + value = unicode(data.isoformat(' ')) + return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value) + + def represent_yaml_object(self, tag, data, cls, flow_style=None): + if hasattr(data, '__getstate__'): + state = data.__getstate__() + else: + state = data.__dict__.copy() + return self.represent_mapping(tag, state, flow_style=flow_style) + + def represent_undefined(self, data): + raise RepresenterError("cannot represent an object", data) + +SafeRepresenter.add_representer(type(None), + SafeRepresenter.represent_none) + +SafeRepresenter.add_representer(str, + SafeRepresenter.represent_str) + +SafeRepresenter.add_representer(unicode, + SafeRepresenter.represent_unicode) + +SafeRepresenter.add_representer(bool, + SafeRepresenter.represent_bool) + +SafeRepresenter.add_representer(int, + SafeRepresenter.represent_int) + +SafeRepresenter.add_representer(long, + SafeRepresenter.represent_long) + +SafeRepresenter.add_representer(float, + SafeRepresenter.represent_float) + +SafeRepresenter.add_representer(list, + SafeRepresenter.represent_list) + +SafeRepresenter.add_representer(tuple, + SafeRepresenter.represent_list) + +SafeRepresenter.add_representer(dict, + SafeRepresenter.represent_dict) + +SafeRepresenter.add_representer(set, + SafeRepresenter.represent_set) + +SafeRepresenter.add_representer(datetime.date, + SafeRepresenter.represent_date) + +SafeRepresenter.add_representer(datetime.datetime, + SafeRepresenter.represent_datetime) + +SafeRepresenter.add_representer(None, + SafeRepresenter.represent_undefined) + +class Representer(SafeRepresenter): + + def represent_str(self, data): + tag = None + style = None + try: + data = unicode(data, 'ascii') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + try: + data = unicode(data, 'utf-8') + tag = u'tag:yaml.org,2002:python/str' + except UnicodeDecodeError: + data = data.encode('base64') + tag = u'tag:yaml.org,2002:binary' + style = '|' + return self.represent_scalar(tag, data, style=style) + + def represent_unicode(self, data): + tag = None + try: + data.encode('ascii') + tag = u'tag:yaml.org,2002:python/unicode' + except UnicodeEncodeError: + tag = u'tag:yaml.org,2002:str' + return self.represent_scalar(tag, data) + + def represent_long(self, data): + tag = u'tag:yaml.org,2002:int' + if int(data) is not data: + tag = u'tag:yaml.org,2002:python/long' + return self.represent_scalar(tag, unicode(data)) + + def represent_complex(self, data): + if data.imag == 0.0: + data = u'%r' % data.real + elif data.real == 0.0: + data = u'%rj' % data.imag + elif data.imag > 0: + data = u'%r+%rj' % (data.real, data.imag) + else: + data = u'%r%rj' % (data.real, data.imag) + return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data) + + def represent_tuple(self, data): + return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data) + + def represent_name(self, data): + name = u'%s.%s' % (data.__module__, data.__name__) + return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'') + + def represent_module(self, data): + return self.represent_scalar( + u'tag:yaml.org,2002:python/module:'+data.__name__, u'') + + def represent_instance(self, data): + # For instances of classic classes, we use __getinitargs__ and + # __getstate__ to serialize the data. + + # If data.__getinitargs__ exists, the object must be reconstructed by + # calling cls(**args), where args is a tuple returned by + # __getinitargs__. Otherwise, the cls.__init__ method should never be + # called and the class instance is created by instantiating a trivial + # class and assigning to the instance's __class__ variable. + + # If data.__getstate__ exists, it returns the state of the object. + # Otherwise, the state of the object is data.__dict__. + + # We produce either a !!python/object or !!python/object/new node. + # If data.__getinitargs__ does not exist and state is a dictionary, we + # produce a !!python/object node . Otherwise we produce a + # !!python/object/new node. + + cls = data.__class__ + class_name = u'%s.%s' % (cls.__module__, cls.__name__) + args = None + state = None + if hasattr(data, '__getinitargs__'): + args = list(data.__getinitargs__()) + if hasattr(data, '__getstate__'): + state = data.__getstate__() + else: + state = data.__dict__ + if args is None and isinstance(state, dict): + return self.represent_mapping( + u'tag:yaml.org,2002:python/object:'+class_name, state) + if isinstance(state, dict) and not state: + return self.represent_sequence( + u'tag:yaml.org,2002:python/object/new:'+class_name, args) + value = {} + if args: + value['args'] = args + value['state'] = state + return self.represent_mapping( + u'tag:yaml.org,2002:python/object/new:'+class_name, value) + + def represent_object(self, data): + # We use __reduce__ API to save the data. data.__reduce__ returns + # a tuple of length 2-5: + # (function, args, state, listitems, dictitems) + + # For reconstructing, we calls function(*args), then set its state, + # listitems, and dictitems if they are not None. + + # A special case is when function.__name__ == '__newobj__'. In this + # case we create the object with args[0].__new__(*args). + + # Another special case is when __reduce__ returns a string - we don't + # support it. + + # We produce a !!python/object, !!python/object/new or + # !!python/object/apply node. + + cls = type(data) + if cls in copy_reg.dispatch_table: + reduce = copy_reg.dispatch_table[cls](data) + elif hasattr(data, '__reduce_ex__'): + reduce = data.__reduce_ex__(2) + elif hasattr(data, '__reduce__'): + reduce = data.__reduce__() + else: + raise RepresenterError("cannot represent an object", data) + reduce = (list(reduce)+[None]*5)[:5] + function, args, state, listitems, dictitems = reduce + args = list(args) + if state is None: + state = {} + if listitems is not None: + listitems = list(listitems) + if dictitems is not None: + dictitems = dict(dictitems) + if function.__name__ == '__newobj__': + function = args[0] + args = args[1:] + tag = u'tag:yaml.org,2002:python/object/new:' + newobj = True + else: + tag = u'tag:yaml.org,2002:python/object/apply:' + newobj = False + function_name = u'%s.%s' % (function.__module__, function.__name__) + if not args and not listitems and not dictitems \ + and isinstance(state, dict) and newobj: + return self.represent_mapping( + u'tag:yaml.org,2002:python/object:'+function_name, state) + if not listitems and not dictitems \ + and isinstance(state, dict) and not state: + return self.represent_sequence(tag+function_name, args) + value = {} + if args: + value['args'] = args + if state or not isinstance(state, dict): + value['state'] = state + if listitems: + value['listitems'] = listitems + if dictitems: + value['dictitems'] = dictitems + return self.represent_mapping(tag+function_name, value) + +Representer.add_representer(str, + Representer.represent_str) + +Representer.add_representer(unicode, + Representer.represent_unicode) + +Representer.add_representer(long, + Representer.represent_long) + +Representer.add_representer(complex, + Representer.represent_complex) + +Representer.add_representer(tuple, + Representer.represent_tuple) + +Representer.add_representer(type, + Representer.represent_name) + +Representer.add_representer(types.ClassType, + Representer.represent_name) + +Representer.add_representer(types.FunctionType, + Representer.represent_name) + +Representer.add_representer(types.BuiltinFunctionType, + Representer.represent_name) + +Representer.add_representer(types.ModuleType, + Representer.represent_module) + +Representer.add_multi_representer(types.InstanceType, + Representer.represent_instance) + +Representer.add_multi_representer(object, + Representer.represent_object) + diff --git a/project/venv/lib/python2.7/site-packages/yaml/representer.pyc b/project/venv/lib/python2.7/site-packages/yaml/representer.pyc new file mode 100644 index 0000000..e68e814 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/yaml/representer.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/yaml/resolver.py b/project/venv/lib/python2.7/site-packages/yaml/resolver.py new file mode 100644 index 0000000..528fbc0 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/yaml/resolver.py @@ -0,0 +1,227 @@ + +__all__ = ['BaseResolver', 'Resolver'] + +from error import * +from nodes import * + +import re + +class ResolverError(YAMLError): + pass + +class BaseResolver(object): + + DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str' + DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq' + DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map' + + yaml_implicit_resolvers = {} + yaml_path_resolvers = {} + + def __init__(self): + self.resolver_exact_paths = [] + self.resolver_prefix_paths = [] + + def add_implicit_resolver(cls, tag, regexp, first): + if not 'yaml_implicit_resolvers' in cls.__dict__: + implicit_resolvers = {} + for key in cls.yaml_implicit_resolvers: + implicit_resolvers[key] = cls.yaml_implicit_resolvers[key][:] + cls.yaml_implicit_resolvers = implicit_resolvers + if first is None: + first = [None] + for ch in first: + cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp)) + add_implicit_resolver = classmethod(add_implicit_resolver) + + def add_path_resolver(cls, tag, path, kind=None): + # Note: `add_path_resolver` is experimental. The API could be changed. + # `new_path` is a pattern that is matched against the path from the + # root to the node that is being considered. `node_path` elements are + # tuples `(node_check, index_check)`. `node_check` is a node class: + # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None` + # matches any kind of a node. `index_check` could be `None`, a boolean + # value, a string value, or a number. `None` and `False` match against + # any _value_ of sequence and mapping nodes. `True` matches against + # any _key_ of a mapping node. A string `index_check` matches against + # a mapping value that corresponds to a scalar key which content is + # equal to the `index_check` value. An integer `index_check` matches + # against a sequence value with the index equal to `index_check`. + if not 'yaml_path_resolvers' in cls.__dict__: + cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy() + new_path = [] + for element in path: + if isinstance(element, (list, tuple)): + if len(element) == 2: + node_check, index_check = element + elif len(element) == 1: + node_check = element[0] + index_check = True + else: + raise ResolverError("Invalid path element: %s" % element) + else: + node_check = None + index_check = element + if node_check is str: + node_check = ScalarNode + elif node_check is list: + node_check = SequenceNode + elif node_check is dict: + node_check = MappingNode + elif node_check not in [ScalarNode, SequenceNode, MappingNode] \ + and not isinstance(node_check, basestring) \ + and node_check is not None: + raise ResolverError("Invalid node checker: %s" % node_check) + if not isinstance(index_check, (basestring, int)) \ + and index_check is not None: + raise ResolverError("Invalid index checker: %s" % index_check) + new_path.append((node_check, index_check)) + if kind is str: + kind = ScalarNode + elif kind is list: + kind = SequenceNode + elif kind is dict: + kind = MappingNode + elif kind not in [ScalarNode, SequenceNode, MappingNode] \ + and kind is not None: + raise ResolverError("Invalid node kind: %s" % kind) + cls.yaml_path_resolvers[tuple(new_path), kind] = tag + add_path_resolver = classmethod(add_path_resolver) + + def descend_resolver(self, current_node, current_index): + if not self.yaml_path_resolvers: + return + exact_paths = {} + prefix_paths = [] + if current_node: + depth = len(self.resolver_prefix_paths) + for path, kind in self.resolver_prefix_paths[-1]: + if self.check_resolver_prefix(depth, path, kind, + current_node, current_index): + if len(path) > depth: + prefix_paths.append((path, kind)) + else: + exact_paths[kind] = self.yaml_path_resolvers[path, kind] + else: + for path, kind in self.yaml_path_resolvers: + if not path: + exact_paths[kind] = self.yaml_path_resolvers[path, kind] + else: + prefix_paths.append((path, kind)) + self.resolver_exact_paths.append(exact_paths) + self.resolver_prefix_paths.append(prefix_paths) + + def ascend_resolver(self): + if not self.yaml_path_resolvers: + return + self.resolver_exact_paths.pop() + self.resolver_prefix_paths.pop() + + def check_resolver_prefix(self, depth, path, kind, + current_node, current_index): + node_check, index_check = path[depth-1] + if isinstance(node_check, basestring): + if current_node.tag != node_check: + return + elif node_check is not None: + if not isinstance(current_node, node_check): + return + if index_check is True and current_index is not None: + return + if (index_check is False or index_check is None) \ + and current_index is None: + return + if isinstance(index_check, basestring): + if not (isinstance(current_index, ScalarNode) + and index_check == current_index.value): + return + elif isinstance(index_check, int) and not isinstance(index_check, bool): + if index_check != current_index: + return + return True + + def resolve(self, kind, value, implicit): + if kind is ScalarNode and implicit[0]: + if value == u'': + resolvers = self.yaml_implicit_resolvers.get(u'', []) + else: + resolvers = self.yaml_implicit_resolvers.get(value[0], []) + resolvers += self.yaml_implicit_resolvers.get(None, []) + for tag, regexp in resolvers: + if regexp.match(value): + return tag + implicit = implicit[1] + if self.yaml_path_resolvers: + exact_paths = self.resolver_exact_paths[-1] + if kind in exact_paths: + return exact_paths[kind] + if None in exact_paths: + return exact_paths[None] + if kind is ScalarNode: + return self.DEFAULT_SCALAR_TAG + elif kind is SequenceNode: + return self.DEFAULT_SEQUENCE_TAG + elif kind is MappingNode: + return self.DEFAULT_MAPPING_TAG + +class Resolver(BaseResolver): + pass + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:bool', + re.compile(ur'''^(?:yes|Yes|YES|no|No|NO + |true|True|TRUE|false|False|FALSE + |on|On|ON|off|Off|OFF)$''', re.X), + list(u'yYnNtTfFoO')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:float', + re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)? + |\.[0-9_]+(?:[eE][-+][0-9]+)? + |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]* + |[-+]?\.(?:inf|Inf|INF) + |\.(?:nan|NaN|NAN))$''', re.X), + list(u'-+0123456789.')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:int', + re.compile(ur'''^(?:[-+]?0b[0-1_]+ + |[-+]?0[0-7_]+ + |[-+]?(?:0|[1-9][0-9_]*) + |[-+]?0x[0-9a-fA-F_]+ + |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X), + list(u'-+0123456789')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:merge', + re.compile(ur'^(?:<<)$'), + [u'<']) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:null', + re.compile(ur'''^(?: ~ + |null|Null|NULL + | )$''', re.X), + [u'~', u'n', u'N', u'']) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:timestamp', + re.compile(ur'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] + |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]? + (?:[Tt]|[ \t]+)[0-9][0-9]? + :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)? + (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X), + list(u'0123456789')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:value', + re.compile(ur'^(?:=)$'), + [u'=']) + +# The following resolver is only for documentation purposes. It cannot work +# because plain scalars cannot start with '!', '&', or '*'. +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:yaml', + re.compile(ur'^(?:!|&|\*)$'), + list(u'!&*')) + diff --git a/project/venv/lib/python2.7/site-packages/yaml/resolver.pyc b/project/venv/lib/python2.7/site-packages/yaml/resolver.pyc new file mode 100644 index 0000000..0c6d746 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/yaml/resolver.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/yaml/scanner.py b/project/venv/lib/python2.7/site-packages/yaml/scanner.py new file mode 100644 index 0000000..5126cf0 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/yaml/scanner.py @@ -0,0 +1,1444 @@ + +# Scanner produces tokens of the following types: +# STREAM-START +# STREAM-END +# DIRECTIVE(name, value) +# DOCUMENT-START +# DOCUMENT-END +# BLOCK-SEQUENCE-START +# BLOCK-MAPPING-START +# BLOCK-END +# FLOW-SEQUENCE-START +# FLOW-MAPPING-START +# FLOW-SEQUENCE-END +# FLOW-MAPPING-END +# BLOCK-ENTRY +# FLOW-ENTRY +# KEY +# VALUE +# ALIAS(value) +# ANCHOR(value) +# TAG(value) +# SCALAR(value, plain, style) +# +# Read comments in the Scanner code for more details. +# + +__all__ = ['Scanner', 'ScannerError'] + +from error import MarkedYAMLError +from tokens import * + +class ScannerError(MarkedYAMLError): + pass + +class SimpleKey(object): + # See below simple keys treatment. + + def __init__(self, token_number, required, index, line, column, mark): + self.token_number = token_number + self.required = required + self.index = index + self.line = line + self.column = column + self.mark = mark + +class Scanner(object): + + def __init__(self): + """Initialize the scanner.""" + # It is assumed that Scanner and Reader will have a common descendant. + # Reader do the dirty work of checking for BOM and converting the + # input data to Unicode. It also adds NUL to the end. + # + # Reader supports the following methods + # self.peek(i=0) # peek the next i-th character + # self.prefix(l=1) # peek the next l characters + # self.forward(l=1) # read the next l characters and move the pointer. + + # Had we reached the end of the stream? + self.done = False + + # The number of unclosed '{' and '['. `flow_level == 0` means block + # context. + self.flow_level = 0 + + # List of processed tokens that are not yet emitted. + self.tokens = [] + + # Add the STREAM-START token. + self.fetch_stream_start() + + # Number of tokens that were emitted through the `get_token` method. + self.tokens_taken = 0 + + # The current indentation level. + self.indent = -1 + + # Past indentation levels. + self.indents = [] + + # Variables related to simple keys treatment. + + # A simple key is a key that is not denoted by the '?' indicator. + # Example of simple keys: + # --- + # block simple key: value + # ? not a simple key: + # : { flow simple key: value } + # We emit the KEY token before all keys, so when we find a potential + # simple key, we try to locate the corresponding ':' indicator. + # Simple keys should be limited to a single line and 1024 characters. + + # Can a simple key start at the current position? A simple key may + # start: + # - at the beginning of the line, not counting indentation spaces + # (in block context), + # - after '{', '[', ',' (in the flow context), + # - after '?', ':', '-' (in the block context). + # In the block context, this flag also signifies if a block collection + # may start at the current position. + self.allow_simple_key = True + + # Keep track of possible simple keys. This is a dictionary. The key + # is `flow_level`; there can be no more that one possible simple key + # for each level. The value is a SimpleKey record: + # (token_number, required, index, line, column, mark) + # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow), + # '[', or '{' tokens. + self.possible_simple_keys = {} + + # Public methods. + + def check_token(self, *choices): + # Check if the next token is one of the given types. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + if not choices: + return True + for choice in choices: + if isinstance(self.tokens[0], choice): + return True + return False + + def peek_token(self): + # Return the next token, but do not delete if from the queue. + # Return None if no more tokens. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + return self.tokens[0] + else: + return None + + def get_token(self): + # Return the next token. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + self.tokens_taken += 1 + return self.tokens.pop(0) + + # Private methods. + + def need_more_tokens(self): + if self.done: + return False + if not self.tokens: + return True + # The current token may be a potential simple key, so we + # need to look further. + self.stale_possible_simple_keys() + if self.next_possible_simple_key() == self.tokens_taken: + return True + + def fetch_more_tokens(self): + + # Eat whitespaces and comments until we reach the next token. + self.scan_to_next_token() + + # Remove obsolete possible simple keys. + self.stale_possible_simple_keys() + + # Compare the current indentation and column. It may add some tokens + # and decrease the current indentation level. + self.unwind_indent(self.column) + + # Peek the next character. + ch = self.peek() + + # Is it the end of stream? + if ch == u'\0': + return self.fetch_stream_end() + + # Is it a directive? + if ch == u'%' and self.check_directive(): + return self.fetch_directive() + + # Is it the document start? + if ch == u'-' and self.check_document_start(): + return self.fetch_document_start() + + # Is it the document end? + if ch == u'.' and self.check_document_end(): + return self.fetch_document_end() + + # TODO: support for BOM within a stream. + #if ch == u'\uFEFF': + # return self.fetch_bom() <-- issue BOMToken + + # Note: the order of the following checks is NOT significant. + + # Is it the flow sequence start indicator? + if ch == u'[': + return self.fetch_flow_sequence_start() + + # Is it the flow mapping start indicator? + if ch == u'{': + return self.fetch_flow_mapping_start() + + # Is it the flow sequence end indicator? + if ch == u']': + return self.fetch_flow_sequence_end() + + # Is it the flow mapping end indicator? + if ch == u'}': + return self.fetch_flow_mapping_end() + + # Is it the flow entry indicator? + if ch == u',': + return self.fetch_flow_entry() + + # Is it the block entry indicator? + if ch == u'-' and self.check_block_entry(): + return self.fetch_block_entry() + + # Is it the key indicator? + if ch == u'?' and self.check_key(): + return self.fetch_key() + + # Is it the value indicator? + if ch == u':' and self.check_value(): + return self.fetch_value() + + # Is it an alias? + if ch == u'*': + return self.fetch_alias() + + # Is it an anchor? + if ch == u'&': + return self.fetch_anchor() + + # Is it a tag? + if ch == u'!': + return self.fetch_tag() + + # Is it a literal scalar? + if ch == u'|' and not self.flow_level: + return self.fetch_literal() + + # Is it a folded scalar? + if ch == u'>' and not self.flow_level: + return self.fetch_folded() + + # Is it a single quoted scalar? + if ch == u'\'': + return self.fetch_single() + + # Is it a double quoted scalar? + if ch == u'\"': + return self.fetch_double() + + # It must be a plain scalar then. + if self.check_plain(): + return self.fetch_plain() + + # No? It's an error. Let's produce a nice error message. + raise ScannerError("while scanning for the next token", None, + "found character %r that cannot start any token" + % ch.encode('utf-8'), self.get_mark()) + + # Simple keys treatment. + + def next_possible_simple_key(self): + # Return the number of the nearest possible simple key. Actually we + # don't need to loop through the whole dictionary. We may replace it + # with the following code: + # if not self.possible_simple_keys: + # return None + # return self.possible_simple_keys[ + # min(self.possible_simple_keys.keys())].token_number + min_token_number = None + for level in self.possible_simple_keys: + key = self.possible_simple_keys[level] + if min_token_number is None or key.token_number < min_token_number: + min_token_number = key.token_number + return min_token_number + + def stale_possible_simple_keys(self): + # Remove entries that are no longer possible simple keys. According to + # the YAML specification, simple keys + # - should be limited to a single line, + # - should be no longer than 1024 characters. + # Disabling this procedure will allow simple keys of any length and + # height (may cause problems if indentation is broken though). + for level in self.possible_simple_keys.keys(): + key = self.possible_simple_keys[level] + if key.line != self.line \ + or self.index-key.index > 1024: + if key.required: + raise ScannerError("while scanning a simple key", key.mark, + "could not find expected ':'", self.get_mark()) + del self.possible_simple_keys[level] + + def save_possible_simple_key(self): + # The next token may start a simple key. We check if it's possible + # and save its position. This function is called for + # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'. + + # Check if a simple key is required at the current position. + required = not self.flow_level and self.indent == self.column + + # The next token might be a simple key. Let's save it's number and + # position. + if self.allow_simple_key: + self.remove_possible_simple_key() + token_number = self.tokens_taken+len(self.tokens) + key = SimpleKey(token_number, required, + self.index, self.line, self.column, self.get_mark()) + self.possible_simple_keys[self.flow_level] = key + + def remove_possible_simple_key(self): + # Remove the saved possible key position at the current flow level. + if self.flow_level in self.possible_simple_keys: + key = self.possible_simple_keys[self.flow_level] + + if key.required: + raise ScannerError("while scanning a simple key", key.mark, + "could not find expected ':'", self.get_mark()) + + del self.possible_simple_keys[self.flow_level] + + # Indentation functions. + + def unwind_indent(self, column): + + ## In flow context, tokens should respect indentation. + ## Actually the condition should be `self.indent >= column` according to + ## the spec. But this condition will prohibit intuitively correct + ## constructions such as + ## key : { + ## } + #if self.flow_level and self.indent > column: + # raise ScannerError(None, None, + # "invalid intendation or unclosed '[' or '{'", + # self.get_mark()) + + # In the flow context, indentation is ignored. We make the scanner less + # restrictive then specification requires. + if self.flow_level: + return + + # In block context, we may need to issue the BLOCK-END tokens. + while self.indent > column: + mark = self.get_mark() + self.indent = self.indents.pop() + self.tokens.append(BlockEndToken(mark, mark)) + + def add_indent(self, column): + # Check if we need to increase indentation. + if self.indent < column: + self.indents.append(self.indent) + self.indent = column + return True + return False + + # Fetchers. + + def fetch_stream_start(self): + # We always add STREAM-START as the first token and STREAM-END as the + # last token. + + # Read the token. + mark = self.get_mark() + + # Add STREAM-START. + self.tokens.append(StreamStartToken(mark, mark, + encoding=self.encoding)) + + + def fetch_stream_end(self): + + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. + self.remove_possible_simple_key() + self.allow_simple_key = False + self.possible_simple_keys = {} + + # Read the token. + mark = self.get_mark() + + # Add STREAM-END. + self.tokens.append(StreamEndToken(mark, mark)) + + # The steam is finished. + self.done = True + + def fetch_directive(self): + + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. + self.remove_possible_simple_key() + self.allow_simple_key = False + + # Scan and add DIRECTIVE. + self.tokens.append(self.scan_directive()) + + def fetch_document_start(self): + self.fetch_document_indicator(DocumentStartToken) + + def fetch_document_end(self): + self.fetch_document_indicator(DocumentEndToken) + + def fetch_document_indicator(self, TokenClass): + + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. Note that there could not be a block collection + # after '---'. + self.remove_possible_simple_key() + self.allow_simple_key = False + + # Add DOCUMENT-START or DOCUMENT-END. + start_mark = self.get_mark() + self.forward(3) + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_sequence_start(self): + self.fetch_flow_collection_start(FlowSequenceStartToken) + + def fetch_flow_mapping_start(self): + self.fetch_flow_collection_start(FlowMappingStartToken) + + def fetch_flow_collection_start(self, TokenClass): + + # '[' and '{' may start a simple key. + self.save_possible_simple_key() + + # Increase the flow level. + self.flow_level += 1 + + # Simple keys are allowed after '[' and '{'. + self.allow_simple_key = True + + # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_sequence_end(self): + self.fetch_flow_collection_end(FlowSequenceEndToken) + + def fetch_flow_mapping_end(self): + self.fetch_flow_collection_end(FlowMappingEndToken) + + def fetch_flow_collection_end(self, TokenClass): + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Decrease the flow level. + self.flow_level -= 1 + + # No simple keys after ']' or '}'. + self.allow_simple_key = False + + # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_entry(self): + + # Simple keys are allowed after ','. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add FLOW-ENTRY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(FlowEntryToken(start_mark, end_mark)) + + def fetch_block_entry(self): + + # Block context needs additional checks. + if not self.flow_level: + + # Are we allowed to start a new entry? + if not self.allow_simple_key: + raise ScannerError(None, None, + "sequence entries are not allowed here", + self.get_mark()) + + # We may need to add BLOCK-SEQUENCE-START. + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockSequenceStartToken(mark, mark)) + + # It's an error for the block entry to occur in the flow context, + # but we let the parser detect this. + else: + pass + + # Simple keys are allowed after '-'. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add BLOCK-ENTRY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(BlockEntryToken(start_mark, end_mark)) + + def fetch_key(self): + + # Block context needs additional checks. + if not self.flow_level: + + # Are we allowed to start a key (not necessary a simple)? + if not self.allow_simple_key: + raise ScannerError(None, None, + "mapping keys are not allowed here", + self.get_mark()) + + # We may need to add BLOCK-MAPPING-START. + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockMappingStartToken(mark, mark)) + + # Simple keys are allowed after '?' in the block context. + self.allow_simple_key = not self.flow_level + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add KEY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(KeyToken(start_mark, end_mark)) + + def fetch_value(self): + + # Do we determine a simple key? + if self.flow_level in self.possible_simple_keys: + + # Add KEY. + key = self.possible_simple_keys[self.flow_level] + del self.possible_simple_keys[self.flow_level] + self.tokens.insert(key.token_number-self.tokens_taken, + KeyToken(key.mark, key.mark)) + + # If this key starts a new block mapping, we need to add + # BLOCK-MAPPING-START. + if not self.flow_level: + if self.add_indent(key.column): + self.tokens.insert(key.token_number-self.tokens_taken, + BlockMappingStartToken(key.mark, key.mark)) + + # There cannot be two simple keys one after another. + self.allow_simple_key = False + + # It must be a part of a complex key. + else: + + # Block context needs additional checks. + # (Do we really need them? They will be caught by the parser + # anyway.) + if not self.flow_level: + + # We are allowed to start a complex value if and only if + # we can start a simple key. + if not self.allow_simple_key: + raise ScannerError(None, None, + "mapping values are not allowed here", + self.get_mark()) + + # If this value starts a new block mapping, we need to add + # BLOCK-MAPPING-START. It will be detected as an error later by + # the parser. + if not self.flow_level: + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockMappingStartToken(mark, mark)) + + # Simple keys are allowed after ':' in the block context. + self.allow_simple_key = not self.flow_level + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add VALUE. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(ValueToken(start_mark, end_mark)) + + def fetch_alias(self): + + # ALIAS could be a simple key. + self.save_possible_simple_key() + + # No simple keys after ALIAS. + self.allow_simple_key = False + + # Scan and add ALIAS. + self.tokens.append(self.scan_anchor(AliasToken)) + + def fetch_anchor(self): + + # ANCHOR could start a simple key. + self.save_possible_simple_key() + + # No simple keys after ANCHOR. + self.allow_simple_key = False + + # Scan and add ANCHOR. + self.tokens.append(self.scan_anchor(AnchorToken)) + + def fetch_tag(self): + + # TAG could start a simple key. + self.save_possible_simple_key() + + # No simple keys after TAG. + self.allow_simple_key = False + + # Scan and add TAG. + self.tokens.append(self.scan_tag()) + + def fetch_literal(self): + self.fetch_block_scalar(style='|') + + def fetch_folded(self): + self.fetch_block_scalar(style='>') + + def fetch_block_scalar(self, style): + + # A simple key may follow a block scalar. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Scan and add SCALAR. + self.tokens.append(self.scan_block_scalar(style)) + + def fetch_single(self): + self.fetch_flow_scalar(style='\'') + + def fetch_double(self): + self.fetch_flow_scalar(style='"') + + def fetch_flow_scalar(self, style): + + # A flow scalar could be a simple key. + self.save_possible_simple_key() + + # No simple keys after flow scalars. + self.allow_simple_key = False + + # Scan and add SCALAR. + self.tokens.append(self.scan_flow_scalar(style)) + + def fetch_plain(self): + + # A plain scalar could be a simple key. + self.save_possible_simple_key() + + # No simple keys after plain scalars. But note that `scan_plain` will + # change this flag if the scan is finished at the beginning of the + # line. + self.allow_simple_key = False + + # Scan and add SCALAR. May change `allow_simple_key`. + self.tokens.append(self.scan_plain()) + + # Checkers. + + def check_directive(self): + + # DIRECTIVE: ^ '%' ... + # The '%' indicator is already checked. + if self.column == 0: + return True + + def check_document_start(self): + + # DOCUMENT-START: ^ '---' (' '|'\n') + if self.column == 0: + if self.prefix(3) == u'---' \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return True + + def check_document_end(self): + + # DOCUMENT-END: ^ '...' (' '|'\n') + if self.column == 0: + if self.prefix(3) == u'...' \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return True + + def check_block_entry(self): + + # BLOCK-ENTRY: '-' (' '|'\n') + return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' + + def check_key(self): + + # KEY(flow context): '?' + if self.flow_level: + return True + + # KEY(block context): '?' (' '|'\n') + else: + return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' + + def check_value(self): + + # VALUE(flow context): ':' + if self.flow_level: + return True + + # VALUE(block context): ':' (' '|'\n') + else: + return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' + + def check_plain(self): + + # A plain scalar may start with any non-space character except: + # '-', '?', ':', ',', '[', ']', '{', '}', + # '#', '&', '*', '!', '|', '>', '\'', '\"', + # '%', '@', '`'. + # + # It may also start with + # '-', '?', ':' + # if it is followed by a non-space character. + # + # Note that we limit the last rule to the block context (except the + # '-' character) because we want the flow context to be space + # independent. + ch = self.peek() + return ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \ + or (self.peek(1) not in u'\0 \t\r\n\x85\u2028\u2029' + and (ch == u'-' or (not self.flow_level and ch in u'?:'))) + + # Scanners. + + def scan_to_next_token(self): + # We ignore spaces, line breaks and comments. + # If we find a line break in the block context, we set the flag + # `allow_simple_key` on. + # The byte order mark is stripped if it's the first character in the + # stream. We do not yet support BOM inside the stream as the + # specification requires. Any such mark will be considered as a part + # of the document. + # + # TODO: We need to make tab handling rules more sane. A good rule is + # Tabs cannot precede tokens + # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END, + # KEY(block), VALUE(block), BLOCK-ENTRY + # So the checking code is + # if : + # self.allow_simple_keys = False + # We also need to add the check for `allow_simple_keys == True` to + # `unwind_indent` before issuing BLOCK-END. + # Scanners for block, flow, and plain scalars need to be modified. + + if self.index == 0 and self.peek() == u'\uFEFF': + self.forward() + found = False + while not found: + while self.peek() == u' ': + self.forward() + if self.peek() == u'#': + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + if self.scan_line_break(): + if not self.flow_level: + self.allow_simple_key = True + else: + found = True + + def scan_directive(self): + # See the specification for details. + start_mark = self.get_mark() + self.forward() + name = self.scan_directive_name(start_mark) + value = None + if name == u'YAML': + value = self.scan_yaml_directive_value(start_mark) + end_mark = self.get_mark() + elif name == u'TAG': + value = self.scan_tag_directive_value(start_mark) + end_mark = self.get_mark() + else: + end_mark = self.get_mark() + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + self.scan_directive_ignored_line(start_mark) + return DirectiveToken(name, value, start_mark, end_mark) + + def scan_directive_name(self, start_mark): + # See the specification for details. + length = 0 + ch = self.peek(length) + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_': + length += 1 + ch = self.peek(length) + if not length: + raise ScannerError("while scanning a directive", start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + value = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + return value + + def scan_yaml_directive_value(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + major = self.scan_yaml_directive_number(start_mark) + if self.peek() != '.': + raise ScannerError("while scanning a directive", start_mark, + "expected a digit or '.', but found %r" + % self.peek().encode('utf-8'), + self.get_mark()) + self.forward() + minor = self.scan_yaml_directive_number(start_mark) + if self.peek() not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected a digit or ' ', but found %r" + % self.peek().encode('utf-8'), + self.get_mark()) + return (major, minor) + + def scan_yaml_directive_number(self, start_mark): + # See the specification for details. + ch = self.peek() + if not (u'0' <= ch <= u'9'): + raise ScannerError("while scanning a directive", start_mark, + "expected a digit, but found %r" % ch.encode('utf-8'), + self.get_mark()) + length = 0 + while u'0' <= self.peek(length) <= u'9': + length += 1 + value = int(self.prefix(length)) + self.forward(length) + return value + + def scan_tag_directive_value(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + handle = self.scan_tag_directive_handle(start_mark) + while self.peek() == u' ': + self.forward() + prefix = self.scan_tag_directive_prefix(start_mark) + return (handle, prefix) + + def scan_tag_directive_handle(self, start_mark): + # See the specification for details. + value = self.scan_tag_handle('directive', start_mark) + ch = self.peek() + if ch != u' ': + raise ScannerError("while scanning a directive", start_mark, + "expected ' ', but found %r" % ch.encode('utf-8'), + self.get_mark()) + return value + + def scan_tag_directive_prefix(self, start_mark): + # See the specification for details. + value = self.scan_tag_uri('directive', start_mark) + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected ' ', but found %r" % ch.encode('utf-8'), + self.get_mark()) + return value + + def scan_directive_ignored_line(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + if self.peek() == u'#': + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + ch = self.peek() + if ch not in u'\0\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected a comment or a line break, but found %r" + % ch.encode('utf-8'), self.get_mark()) + self.scan_line_break() + + def scan_anchor(self, TokenClass): + # The specification does not restrict characters for anchors and + # aliases. This may lead to problems, for instance, the document: + # [ *alias, value ] + # can be interpreted in two ways, as + # [ "value" ] + # and + # [ *alias , "value" ] + # Therefore we restrict aliases to numbers and ASCII letters. + start_mark = self.get_mark() + indicator = self.peek() + if indicator == u'*': + name = 'alias' + else: + name = 'anchor' + self.forward() + length = 0 + ch = self.peek(length) + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_': + length += 1 + ch = self.peek(length) + if not length: + raise ScannerError("while scanning an %s" % name, start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + value = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch not in u'\0 \t\r\n\x85\u2028\u2029?:,]}%@`': + raise ScannerError("while scanning an %s" % name, start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + end_mark = self.get_mark() + return TokenClass(value, start_mark, end_mark) + + def scan_tag(self): + # See the specification for details. + start_mark = self.get_mark() + ch = self.peek(1) + if ch == u'<': + handle = None + self.forward(2) + suffix = self.scan_tag_uri('tag', start_mark) + if self.peek() != u'>': + raise ScannerError("while parsing a tag", start_mark, + "expected '>', but found %r" % self.peek().encode('utf-8'), + self.get_mark()) + self.forward() + elif ch in u'\0 \t\r\n\x85\u2028\u2029': + handle = None + suffix = u'!' + self.forward() + else: + length = 1 + use_handle = False + while ch not in u'\0 \r\n\x85\u2028\u2029': + if ch == u'!': + use_handle = True + break + length += 1 + ch = self.peek(length) + handle = u'!' + if use_handle: + handle = self.scan_tag_handle('tag', start_mark) + else: + handle = u'!' + self.forward() + suffix = self.scan_tag_uri('tag', start_mark) + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a tag", start_mark, + "expected ' ', but found %r" % ch.encode('utf-8'), + self.get_mark()) + value = (handle, suffix) + end_mark = self.get_mark() + return TagToken(value, start_mark, end_mark) + + def scan_block_scalar(self, style): + # See the specification for details. + + if style == '>': + folded = True + else: + folded = False + + chunks = [] + start_mark = self.get_mark() + + # Scan the header. + self.forward() + chomping, increment = self.scan_block_scalar_indicators(start_mark) + self.scan_block_scalar_ignored_line(start_mark) + + # Determine the indentation level and go to the first non-empty line. + min_indent = self.indent+1 + if min_indent < 1: + min_indent = 1 + if increment is None: + breaks, max_indent, end_mark = self.scan_block_scalar_indentation() + indent = max(min_indent, max_indent) + else: + indent = min_indent+increment-1 + breaks, end_mark = self.scan_block_scalar_breaks(indent) + line_break = u'' + + # Scan the inner part of the block scalar. + while self.column == indent and self.peek() != u'\0': + chunks.extend(breaks) + leading_non_space = self.peek() not in u' \t' + length = 0 + while self.peek(length) not in u'\0\r\n\x85\u2028\u2029': + length += 1 + chunks.append(self.prefix(length)) + self.forward(length) + line_break = self.scan_line_break() + breaks, end_mark = self.scan_block_scalar_breaks(indent) + if self.column == indent and self.peek() != u'\0': + + # Unfortunately, folding rules are ambiguous. + # + # This is the folding according to the specification: + + if folded and line_break == u'\n' \ + and leading_non_space and self.peek() not in u' \t': + if not breaks: + chunks.append(u' ') + else: + chunks.append(line_break) + + # This is Clark Evans's interpretation (also in the spec + # examples): + # + #if folded and line_break == u'\n': + # if not breaks: + # if self.peek() not in ' \t': + # chunks.append(u' ') + # else: + # chunks.append(line_break) + #else: + # chunks.append(line_break) + else: + break + + # Chomp the tail. + if chomping is not False: + chunks.append(line_break) + if chomping is True: + chunks.extend(breaks) + + # We are done. + return ScalarToken(u''.join(chunks), False, start_mark, end_mark, + style) + + def scan_block_scalar_indicators(self, start_mark): + # See the specification for details. + chomping = None + increment = None + ch = self.peek() + if ch in u'+-': + if ch == '+': + chomping = True + else: + chomping = False + self.forward() + ch = self.peek() + if ch in u'0123456789': + increment = int(ch) + if increment == 0: + raise ScannerError("while scanning a block scalar", start_mark, + "expected indentation indicator in the range 1-9, but found 0", + self.get_mark()) + self.forward() + elif ch in u'0123456789': + increment = int(ch) + if increment == 0: + raise ScannerError("while scanning a block scalar", start_mark, + "expected indentation indicator in the range 1-9, but found 0", + self.get_mark()) + self.forward() + ch = self.peek() + if ch in u'+-': + if ch == '+': + chomping = True + else: + chomping = False + self.forward() + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a block scalar", start_mark, + "expected chomping or indentation indicators, but found %r" + % ch.encode('utf-8'), self.get_mark()) + return chomping, increment + + def scan_block_scalar_ignored_line(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + if self.peek() == u'#': + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + ch = self.peek() + if ch not in u'\0\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a block scalar", start_mark, + "expected a comment or a line break, but found %r" + % ch.encode('utf-8'), self.get_mark()) + self.scan_line_break() + + def scan_block_scalar_indentation(self): + # See the specification for details. + chunks = [] + max_indent = 0 + end_mark = self.get_mark() + while self.peek() in u' \r\n\x85\u2028\u2029': + if self.peek() != u' ': + chunks.append(self.scan_line_break()) + end_mark = self.get_mark() + else: + self.forward() + if self.column > max_indent: + max_indent = self.column + return chunks, max_indent, end_mark + + def scan_block_scalar_breaks(self, indent): + # See the specification for details. + chunks = [] + end_mark = self.get_mark() + while self.column < indent and self.peek() == u' ': + self.forward() + while self.peek() in u'\r\n\x85\u2028\u2029': + chunks.append(self.scan_line_break()) + end_mark = self.get_mark() + while self.column < indent and self.peek() == u' ': + self.forward() + return chunks, end_mark + + def scan_flow_scalar(self, style): + # See the specification for details. + # Note that we loose indentation rules for quoted scalars. Quoted + # scalars don't need to adhere indentation because " and ' clearly + # mark the beginning and the end of them. Therefore we are less + # restrictive then the specification requires. We only need to check + # that document separators are not included in scalars. + if style == '"': + double = True + else: + double = False + chunks = [] + start_mark = self.get_mark() + quote = self.peek() + self.forward() + chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) + while self.peek() != quote: + chunks.extend(self.scan_flow_scalar_spaces(double, start_mark)) + chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) + self.forward() + end_mark = self.get_mark() + return ScalarToken(u''.join(chunks), False, start_mark, end_mark, + style) + + ESCAPE_REPLACEMENTS = { + u'0': u'\0', + u'a': u'\x07', + u'b': u'\x08', + u't': u'\x09', + u'\t': u'\x09', + u'n': u'\x0A', + u'v': u'\x0B', + u'f': u'\x0C', + u'r': u'\x0D', + u'e': u'\x1B', + u' ': u'\x20', + u'\"': u'\"', + u'\\': u'\\', + u'/': u'/', + u'N': u'\x85', + u'_': u'\xA0', + u'L': u'\u2028', + u'P': u'\u2029', + } + + ESCAPE_CODES = { + u'x': 2, + u'u': 4, + u'U': 8, + } + + def scan_flow_scalar_non_spaces(self, double, start_mark): + # See the specification for details. + chunks = [] + while True: + length = 0 + while self.peek(length) not in u'\'\"\\\0 \t\r\n\x85\u2028\u2029': + length += 1 + if length: + chunks.append(self.prefix(length)) + self.forward(length) + ch = self.peek() + if not double and ch == u'\'' and self.peek(1) == u'\'': + chunks.append(u'\'') + self.forward(2) + elif (double and ch == u'\'') or (not double and ch in u'\"\\'): + chunks.append(ch) + self.forward() + elif double and ch == u'\\': + self.forward() + ch = self.peek() + if ch in self.ESCAPE_REPLACEMENTS: + chunks.append(self.ESCAPE_REPLACEMENTS[ch]) + self.forward() + elif ch in self.ESCAPE_CODES: + length = self.ESCAPE_CODES[ch] + self.forward() + for k in range(length): + if self.peek(k) not in u'0123456789ABCDEFabcdef': + raise ScannerError("while scanning a double-quoted scalar", start_mark, + "expected escape sequence of %d hexdecimal numbers, but found %r" % + (length, self.peek(k).encode('utf-8')), self.get_mark()) + code = int(self.prefix(length), 16) + chunks.append(unichr(code)) + self.forward(length) + elif ch in u'\r\n\x85\u2028\u2029': + self.scan_line_break() + chunks.extend(self.scan_flow_scalar_breaks(double, start_mark)) + else: + raise ScannerError("while scanning a double-quoted scalar", start_mark, + "found unknown escape character %r" % ch.encode('utf-8'), self.get_mark()) + else: + return chunks + + def scan_flow_scalar_spaces(self, double, start_mark): + # See the specification for details. + chunks = [] + length = 0 + while self.peek(length) in u' \t': + length += 1 + whitespaces = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch == u'\0': + raise ScannerError("while scanning a quoted scalar", start_mark, + "found unexpected end of stream", self.get_mark()) + elif ch in u'\r\n\x85\u2028\u2029': + line_break = self.scan_line_break() + breaks = self.scan_flow_scalar_breaks(double, start_mark) + if line_break != u'\n': + chunks.append(line_break) + elif not breaks: + chunks.append(u' ') + chunks.extend(breaks) + else: + chunks.append(whitespaces) + return chunks + + def scan_flow_scalar_breaks(self, double, start_mark): + # See the specification for details. + chunks = [] + while True: + # Instead of checking indentation, we check for document + # separators. + prefix = self.prefix(3) + if (prefix == u'---' or prefix == u'...') \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a quoted scalar", start_mark, + "found unexpected document separator", self.get_mark()) + while self.peek() in u' \t': + self.forward() + if self.peek() in u'\r\n\x85\u2028\u2029': + chunks.append(self.scan_line_break()) + else: + return chunks + + def scan_plain(self): + # See the specification for details. + # We add an additional restriction for the flow context: + # plain scalars in the flow context cannot contain ',' or '?'. + # We also keep track of the `allow_simple_key` flag here. + # Indentation rules are loosed for the flow context. + chunks = [] + start_mark = self.get_mark() + end_mark = start_mark + indent = self.indent+1 + # We allow zero indentation for scalars, but then we need to check for + # document separators at the beginning of the line. + #if indent == 0: + # indent = 1 + spaces = [] + while True: + length = 0 + if self.peek() == u'#': + break + while True: + ch = self.peek(length) + if ch in u'\0 \t\r\n\x85\u2028\u2029' \ + or (ch == u':' and + self.peek(length+1) in u'\0 \t\r\n\x85\u2028\u2029' + + (u',[]{}' if self.flow_level else u''))\ + or (self.flow_level and ch in u',?[]{}'): + break + length += 1 + if length == 0: + break + self.allow_simple_key = False + chunks.extend(spaces) + chunks.append(self.prefix(length)) + self.forward(length) + end_mark = self.get_mark() + spaces = self.scan_plain_spaces(indent, start_mark) + if not spaces or self.peek() == u'#' \ + or (not self.flow_level and self.column < indent): + break + return ScalarToken(u''.join(chunks), True, start_mark, end_mark) + + def scan_plain_spaces(self, indent, start_mark): + # See the specification for details. + # The specification is really confusing about tabs in plain scalars. + # We just forbid them completely. Do not use tabs in YAML! + chunks = [] + length = 0 + while self.peek(length) in u' ': + length += 1 + whitespaces = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch in u'\r\n\x85\u2028\u2029': + line_break = self.scan_line_break() + self.allow_simple_key = True + prefix = self.prefix(3) + if (prefix == u'---' or prefix == u'...') \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return + breaks = [] + while self.peek() in u' \r\n\x85\u2028\u2029': + if self.peek() == ' ': + self.forward() + else: + breaks.append(self.scan_line_break()) + prefix = self.prefix(3) + if (prefix == u'---' or prefix == u'...') \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return + if line_break != u'\n': + chunks.append(line_break) + elif not breaks: + chunks.append(u' ') + chunks.extend(breaks) + elif whitespaces: + chunks.append(whitespaces) + return chunks + + def scan_tag_handle(self, name, start_mark): + # See the specification for details. + # For some strange reasons, the specification does not allow '_' in + # tag handles. I have allowed it anyway. + ch = self.peek() + if ch != u'!': + raise ScannerError("while scanning a %s" % name, start_mark, + "expected '!', but found %r" % ch.encode('utf-8'), + self.get_mark()) + length = 1 + ch = self.peek(length) + if ch != u' ': + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_': + length += 1 + ch = self.peek(length) + if ch != u'!': + self.forward(length) + raise ScannerError("while scanning a %s" % name, start_mark, + "expected '!', but found %r" % ch.encode('utf-8'), + self.get_mark()) + length += 1 + value = self.prefix(length) + self.forward(length) + return value + + def scan_tag_uri(self, name, start_mark): + # See the specification for details. + # Note: we do not check if URI is well-formed. + chunks = [] + length = 0 + ch = self.peek(length) + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-;/?:@&=+$,_.!~*\'()[]%': + if ch == u'%': + chunks.append(self.prefix(length)) + self.forward(length) + length = 0 + chunks.append(self.scan_uri_escapes(name, start_mark)) + else: + length += 1 + ch = self.peek(length) + if length: + chunks.append(self.prefix(length)) + self.forward(length) + length = 0 + if not chunks: + raise ScannerError("while parsing a %s" % name, start_mark, + "expected URI, but found %r" % ch.encode('utf-8'), + self.get_mark()) + return u''.join(chunks) + + def scan_uri_escapes(self, name, start_mark): + # See the specification for details. + bytes = [] + mark = self.get_mark() + while self.peek() == u'%': + self.forward() + for k in range(2): + if self.peek(k) not in u'0123456789ABCDEFabcdef': + raise ScannerError("while scanning a %s" % name, start_mark, + "expected URI escape sequence of 2 hexdecimal numbers, but found %r" % + (self.peek(k).encode('utf-8')), self.get_mark()) + bytes.append(chr(int(self.prefix(2), 16))) + self.forward(2) + try: + value = unicode(''.join(bytes), 'utf-8') + except UnicodeDecodeError, exc: + raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark) + return value + + def scan_line_break(self): + # Transforms: + # '\r\n' : '\n' + # '\r' : '\n' + # '\n' : '\n' + # '\x85' : '\n' + # '\u2028' : '\u2028' + # '\u2029 : '\u2029' + # default : '' + ch = self.peek() + if ch in u'\r\n\x85': + if self.prefix(2) == u'\r\n': + self.forward(2) + else: + self.forward() + return u'\n' + elif ch in u'\u2028\u2029': + self.forward() + return ch + return u'' diff --git a/project/venv/lib/python2.7/site-packages/yaml/scanner.pyc b/project/venv/lib/python2.7/site-packages/yaml/scanner.pyc new file mode 100644 index 0000000..369e3e2 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/yaml/scanner.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/yaml/serializer.py b/project/venv/lib/python2.7/site-packages/yaml/serializer.py new file mode 100644 index 0000000..0bf1e96 --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/yaml/serializer.py @@ -0,0 +1,111 @@ + +__all__ = ['Serializer', 'SerializerError'] + +from error import YAMLError +from events import * +from nodes import * + +class SerializerError(YAMLError): + pass + +class Serializer(object): + + ANCHOR_TEMPLATE = u'id%03d' + + def __init__(self, encoding=None, + explicit_start=None, explicit_end=None, version=None, tags=None): + self.use_encoding = encoding + self.use_explicit_start = explicit_start + self.use_explicit_end = explicit_end + self.use_version = version + self.use_tags = tags + self.serialized_nodes = {} + self.anchors = {} + self.last_anchor_id = 0 + self.closed = None + + def open(self): + if self.closed is None: + self.emit(StreamStartEvent(encoding=self.use_encoding)) + self.closed = False + elif self.closed: + raise SerializerError("serializer is closed") + else: + raise SerializerError("serializer is already opened") + + def close(self): + if self.closed is None: + raise SerializerError("serializer is not opened") + elif not self.closed: + self.emit(StreamEndEvent()) + self.closed = True + + #def __del__(self): + # self.close() + + def serialize(self, node): + if self.closed is None: + raise SerializerError("serializer is not opened") + elif self.closed: + raise SerializerError("serializer is closed") + self.emit(DocumentStartEvent(explicit=self.use_explicit_start, + version=self.use_version, tags=self.use_tags)) + self.anchor_node(node) + self.serialize_node(node, None, None) + self.emit(DocumentEndEvent(explicit=self.use_explicit_end)) + self.serialized_nodes = {} + self.anchors = {} + self.last_anchor_id = 0 + + def anchor_node(self, node): + if node in self.anchors: + if self.anchors[node] is None: + self.anchors[node] = self.generate_anchor(node) + else: + self.anchors[node] = None + if isinstance(node, SequenceNode): + for item in node.value: + self.anchor_node(item) + elif isinstance(node, MappingNode): + for key, value in node.value: + self.anchor_node(key) + self.anchor_node(value) + + def generate_anchor(self, node): + self.last_anchor_id += 1 + return self.ANCHOR_TEMPLATE % self.last_anchor_id + + def serialize_node(self, node, parent, index): + alias = self.anchors[node] + if node in self.serialized_nodes: + self.emit(AliasEvent(alias)) + else: + self.serialized_nodes[node] = True + self.descend_resolver(parent, index) + if isinstance(node, ScalarNode): + detected_tag = self.resolve(ScalarNode, node.value, (True, False)) + default_tag = self.resolve(ScalarNode, node.value, (False, True)) + implicit = (node.tag == detected_tag), (node.tag == default_tag) + self.emit(ScalarEvent(alias, node.tag, implicit, node.value, + style=node.style)) + elif isinstance(node, SequenceNode): + implicit = (node.tag + == self.resolve(SequenceNode, node.value, True)) + self.emit(SequenceStartEvent(alias, node.tag, implicit, + flow_style=node.flow_style)) + index = 0 + for item in node.value: + self.serialize_node(item, node, index) + index += 1 + self.emit(SequenceEndEvent()) + elif isinstance(node, MappingNode): + implicit = (node.tag + == self.resolve(MappingNode, node.value, True)) + self.emit(MappingStartEvent(alias, node.tag, implicit, + flow_style=node.flow_style)) + for key, value in node.value: + self.serialize_node(key, node, None) + self.serialize_node(value, node, key) + self.emit(MappingEndEvent()) + self.ascend_resolver() + diff --git a/project/venv/lib/python2.7/site-packages/yaml/serializer.pyc b/project/venv/lib/python2.7/site-packages/yaml/serializer.pyc new file mode 100644 index 0000000..8de34ce Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/yaml/serializer.pyc differ diff --git a/project/venv/lib/python2.7/site-packages/yaml/tokens.py b/project/venv/lib/python2.7/site-packages/yaml/tokens.py new file mode 100644 index 0000000..4d0b48a --- /dev/null +++ b/project/venv/lib/python2.7/site-packages/yaml/tokens.py @@ -0,0 +1,104 @@ + +class Token(object): + def __init__(self, start_mark, end_mark): + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + attributes = [key for key in self.__dict__ + if not key.endswith('_mark')] + attributes.sort() + arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) + for key in attributes]) + return '%s(%s)' % (self.__class__.__name__, arguments) + +#class BOMToken(Token): +# id = '' + +class DirectiveToken(Token): + id = '' + def __init__(self, name, value, start_mark, end_mark): + self.name = name + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class DocumentStartToken(Token): + id = '' + +class DocumentEndToken(Token): + id = '' + +class StreamStartToken(Token): + id = '' + def __init__(self, start_mark=None, end_mark=None, + encoding=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.encoding = encoding + +class StreamEndToken(Token): + id = '' + +class BlockSequenceStartToken(Token): + id = '' + +class BlockMappingStartToken(Token): + id = '' + +class BlockEndToken(Token): + id = '' + +class FlowSequenceStartToken(Token): + id = '[' + +class FlowMappingStartToken(Token): + id = '{' + +class FlowSequenceEndToken(Token): + id = ']' + +class FlowMappingEndToken(Token): + id = '}' + +class KeyToken(Token): + id = '?' + +class ValueToken(Token): + id = ':' + +class BlockEntryToken(Token): + id = '-' + +class FlowEntryToken(Token): + id = ',' + +class AliasToken(Token): + id = '' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class AnchorToken(Token): + id = '' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class TagToken(Token): + id = '' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class ScalarToken(Token): + id = '' + def __init__(self, value, plain, start_mark, end_mark, style=None): + self.value = value + self.plain = plain + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + diff --git a/project/venv/lib/python2.7/site-packages/yaml/tokens.pyc b/project/venv/lib/python2.7/site-packages/yaml/tokens.pyc new file mode 100644 index 0000000..17188d2 Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/yaml/tokens.pyc differ diff --git a/project/venv/lib/python2.7/site.py b/project/venv/lib/python2.7/site.py new file mode 100644 index 0000000..5fac5ee --- /dev/null +++ b/project/venv/lib/python2.7/site.py @@ -0,0 +1,851 @@ +"""Append module search paths for third-party packages to sys.path. + +**************************************************************** +* This module is automatically imported during initialization. * +**************************************************************** + +In earlier versions of Python (up to 1.5a3), scripts or modules that +needed to use site-specific modules would place ``import site'' +somewhere near the top of their code. Because of the automatic +import, this is no longer necessary (but code that does it still +works). + +This will append site-specific paths to the module search path. On +Unix, it starts with sys.prefix and sys.exec_prefix (if different) and +appends lib/python/site-packages as well as lib/site-python. +It also supports the Debian convention of +lib/python/dist-packages. On other platforms (mainly Mac and +Windows), it uses just sys.prefix (and sys.exec_prefix, if different, +but this is unlikely). The resulting directories, if they exist, are +appended to sys.path, and also inspected for path configuration files. + +FOR DEBIAN, this sys.path is augmented with directories in /usr/local. +Local addons go into /usr/local/lib/python/site-packages +(resp. /usr/local/lib/site-python), Debian addons install into +/usr/{lib,share}/python/dist-packages. + +A path configuration file is a file whose name has the form +.pth; its contents are additional directories (one per line) +to be added to sys.path. Non-existing directories (or +non-directories) are never added to sys.path; no directory is added to +sys.path more than once. Blank lines and lines beginning with +'#' are skipped. Lines starting with 'import' are executed. + +For example, suppose sys.prefix and sys.exec_prefix are set to +/usr/local and there is a directory /usr/local/lib/python2.X/site-packages +with three subdirectories, foo, bar and spam, and two path +configuration files, foo.pth and bar.pth. Assume foo.pth contains the +following: + + # foo package configuration + foo + bar + bletch + +and bar.pth contains: + + # bar package configuration + bar + +Then the following directories are added to sys.path, in this order: + + /usr/local/lib/python2.X/site-packages/bar + /usr/local/lib/python2.X/site-packages/foo + +Note that bletch is omitted because it doesn't exist; bar precedes foo +because bar.pth comes alphabetically before foo.pth; and spam is +omitted because it is not mentioned in either path configuration file. + +After these path manipulations, an attempt is made to import a module +named sitecustomize, which can perform arbitrary additional +site-specific customizations. If this import fails with an +ImportError exception, it is silently ignored. + +""" + +import os +import sys + +try: + import __builtin__ as builtins +except ImportError: + import builtins +try: + set +except NameError: + from sets import Set as set + +# Prefixes for site-packages; add additional prefixes like /usr/local here +PREFIXES = [sys.prefix, sys.exec_prefix] +# Enable per user site-packages directory +# set it to False to disable the feature or True to force the feature +ENABLE_USER_SITE = None +# for distutils.commands.install +USER_SITE = None +USER_BASE = None + +_is_64bit = (getattr(sys, "maxsize", None) or getattr(sys, "maxint")) > 2 ** 32 +_is_pypy = hasattr(sys, "pypy_version_info") +_is_jython = sys.platform[:4] == "java" +if _is_jython: + ModuleType = type(os) + + +def makepath(*paths): + dir = os.path.join(*paths) + if _is_jython and (dir == "__classpath__" or dir.startswith("__pyclasspath__")): + return dir, dir + dir = os.path.abspath(dir) + return dir, os.path.normcase(dir) + + +def abs__file__(): + """Set all module' __file__ attribute to an absolute path""" + for m in sys.modules.values(): + if (_is_jython and not isinstance(m, ModuleType)) or hasattr(m, "__loader__"): + # only modules need the abspath in Jython. and don't mess + # with a PEP 302-supplied __file__ + continue + f = getattr(m, "__file__", None) + if f is None: + continue + m.__file__ = os.path.abspath(f) + + +def removeduppaths(): + """ Remove duplicate entries from sys.path along with making them + absolute""" + # This ensures that the initial path provided by the interpreter contains + # only absolute pathnames, even if we're running from the build directory. + L = [] + known_paths = set() + for dir in sys.path: + # Filter out duplicate paths (on case-insensitive file systems also + # if they only differ in case); turn relative paths into absolute + # paths. + dir, dircase = makepath(dir) + if not dircase in known_paths: + L.append(dir) + known_paths.add(dircase) + sys.path[:] = L + return known_paths + + +# XXX This should not be part of site.py, since it is needed even when +# using the -S option for Python. See http://www.python.org/sf/586680 +def addbuilddir(): + """Append ./build/lib. in case we're running in the build dir + (especially for Guido :-)""" + from distutils.util import get_platform + + s = "build/lib.{}-{:.3}".format(get_platform(), sys.version) + if hasattr(sys, "gettotalrefcount"): + s += "-pydebug" + s = os.path.join(os.path.dirname(sys.path[-1]), s) + sys.path.append(s) + + +def _init_pathinfo(): + """Return a set containing all existing directory entries from sys.path""" + d = set() + for dir in sys.path: + try: + if os.path.isdir(dir): + dir, dircase = makepath(dir) + d.add(dircase) + except TypeError: + continue + return d + + +def addpackage(sitedir, name, known_paths): + """Add a new path to known_paths by combining sitedir and 'name' or execute + sitedir if it starts with 'import'""" + if known_paths is None: + _init_pathinfo() + reset = 1 + else: + reset = 0 + fullname = os.path.join(sitedir, name) + try: + f = open(fullname, "r") + except IOError: + return + try: + for line in f: + if line.startswith("#"): + continue + if line.startswith("import"): + exec(line) + continue + line = line.rstrip() + dir, dircase = makepath(sitedir, line) + if not dircase in known_paths and os.path.exists(dir): + sys.path.append(dir) + known_paths.add(dircase) + finally: + f.close() + if reset: + known_paths = None + return known_paths + + +def addsitedir(sitedir, known_paths=None): + """Add 'sitedir' argument to sys.path if missing and handle .pth files in + 'sitedir'""" + if known_paths is None: + known_paths = _init_pathinfo() + reset = 1 + else: + reset = 0 + sitedir, sitedircase = makepath(sitedir) + if not sitedircase in known_paths: + sys.path.append(sitedir) # Add path component + try: + names = os.listdir(sitedir) + except os.error: + return + names.sort() + for name in names: + if name.endswith(os.extsep + "pth"): + addpackage(sitedir, name, known_paths) + if reset: + known_paths = None + return known_paths + + +def addsitepackages(known_paths, sys_prefix=sys.prefix, exec_prefix=sys.exec_prefix): + """Add site-packages (and possibly site-python) to sys.path""" + prefixes = [os.path.join(sys_prefix, "local"), sys_prefix] + if exec_prefix != sys_prefix: + prefixes.append(os.path.join(exec_prefix, "local")) + + for prefix in prefixes: + if prefix: + if sys.platform in ("os2emx", "riscos") or _is_jython: + sitedirs = [os.path.join(prefix, "Lib", "site-packages")] + elif _is_pypy: + sitedirs = [os.path.join(prefix, "site-packages")] + elif sys.platform == "darwin" and prefix == sys_prefix: + + if prefix.startswith("/System/Library/Frameworks/"): # Apple's Python + + sitedirs = [ + os.path.join("/Library/Python", sys.version[:3], "site-packages"), + os.path.join(prefix, "Extras", "lib", "python"), + ] + + else: # any other Python distros on OSX work this way + sitedirs = [os.path.join(prefix, "lib", "python" + sys.version[:3], "site-packages")] + + elif os.sep == "/": + sitedirs = [ + os.path.join(prefix, "lib", "python" + sys.version[:3], "site-packages"), + os.path.join(prefix, "lib", "site-python"), + os.path.join(prefix, "python" + sys.version[:3], "lib-dynload"), + ] + lib64_dir = os.path.join(prefix, "lib64", "python" + sys.version[:3], "site-packages") + if os.path.exists(lib64_dir) and os.path.realpath(lib64_dir) not in [ + os.path.realpath(p) for p in sitedirs + ]: + if _is_64bit: + sitedirs.insert(0, lib64_dir) + else: + sitedirs.append(lib64_dir) + try: + # sys.getobjects only available in --with-pydebug build + sys.getobjects + sitedirs.insert(0, os.path.join(sitedirs[0], "debug")) + except AttributeError: + pass + # Debian-specific dist-packages directories: + sitedirs.append(os.path.join(prefix, "local/lib", "python" + sys.version[:3], "dist-packages")) + if sys.version[0] == "2": + sitedirs.append(os.path.join(prefix, "lib", "python" + sys.version[:3], "dist-packages")) + else: + sitedirs.append(os.path.join(prefix, "lib", "python" + sys.version[0], "dist-packages")) + sitedirs.append(os.path.join(prefix, "lib", "dist-python")) + else: + sitedirs = [prefix, os.path.join(prefix, "lib", "site-packages")] + if sys.platform == "darwin": + # for framework builds *only* we add the standard Apple + # locations. Currently only per-user, but /Library and + # /Network/Library could be added too + if "Python.framework" in prefix: + home = os.environ.get("HOME") + if home: + sitedirs.append(os.path.join(home, "Library", "Python", sys.version[:3], "site-packages")) + for sitedir in sitedirs: + if os.path.isdir(sitedir): + addsitedir(sitedir, known_paths) + return None + + +def check_enableusersite(): + """Check if user site directory is safe for inclusion + + The function tests for the command line flag (including environment var), + process uid/gid equal to effective uid/gid. + + None: Disabled for security reasons + False: Disabled by user (command line option) + True: Safe and enabled + """ + if hasattr(sys, "flags") and getattr(sys.flags, "no_user_site", False): + return False + + if hasattr(os, "getuid") and hasattr(os, "geteuid"): + # check process uid == effective uid + if os.geteuid() != os.getuid(): + return None + if hasattr(os, "getgid") and hasattr(os, "getegid"): + # check process gid == effective gid + if os.getegid() != os.getgid(): + return None + + return True + + +def addusersitepackages(known_paths): + """Add a per user site-package to sys.path + + Each user has its own python directory with site-packages in the + home directory. + + USER_BASE is the root directory for all Python versions + + USER_SITE is the user specific site-packages directory + + USER_SITE/.. can be used for data. + """ + global USER_BASE, USER_SITE, ENABLE_USER_SITE + env_base = os.environ.get("PYTHONUSERBASE", None) + + def joinuser(*args): + return os.path.expanduser(os.path.join(*args)) + + # if sys.platform in ('os2emx', 'riscos'): + # # Don't know what to put here + # USER_BASE = '' + # USER_SITE = '' + if os.name == "nt": + base = os.environ.get("APPDATA") or "~" + if env_base: + USER_BASE = env_base + else: + USER_BASE = joinuser(base, "Python") + USER_SITE = os.path.join(USER_BASE, "Python" + sys.version[0] + sys.version[2], "site-packages") + else: + if env_base: + USER_BASE = env_base + else: + USER_BASE = joinuser("~", ".local") + USER_SITE = os.path.join(USER_BASE, "lib", "python" + sys.version[:3], "site-packages") + + if ENABLE_USER_SITE and os.path.isdir(USER_SITE): + addsitedir(USER_SITE, known_paths) + if ENABLE_USER_SITE: + for dist_libdir in ("lib", "local/lib"): + user_site = os.path.join(USER_BASE, dist_libdir, "python" + sys.version[:3], "dist-packages") + if os.path.isdir(user_site): + addsitedir(user_site, known_paths) + return known_paths + + +def setBEGINLIBPATH(): + """The OS/2 EMX port has optional extension modules that do double duty + as DLLs (and must use the .DLL file extension) for other extensions. + The library search path needs to be amended so these will be found + during module import. Use BEGINLIBPATH so that these are at the start + of the library search path. + + """ + dllpath = os.path.join(sys.prefix, "Lib", "lib-dynload") + libpath = os.environ["BEGINLIBPATH"].split(";") + if libpath[-1]: + libpath.append(dllpath) + else: + libpath[-1] = dllpath + os.environ["BEGINLIBPATH"] = ";".join(libpath) + + +def setquit(): + """Define new built-ins 'quit' and 'exit'. + These are simply strings that display a hint on how to exit. + + """ + if os.sep == ":": + eof = "Cmd-Q" + elif os.sep == "\\": + eof = "Ctrl-Z plus Return" + else: + eof = "Ctrl-D (i.e. EOF)" + + class Quitter(object): + def __init__(self, name): + self.name = name + + def __repr__(self): + return "Use {}() or {} to exit".format(self.name, eof) + + def __call__(self, code=None): + # Shells like IDLE catch the SystemExit, but listen when their + # stdin wrapper is closed. + try: + sys.stdin.close() + except: + pass + raise SystemExit(code) + + builtins.quit = Quitter("quit") + builtins.exit = Quitter("exit") + + +class _Printer(object): + """interactive prompt objects for printing the license text, a list of + contributors and the copyright notice.""" + + MAXLINES = 23 + + def __init__(self, name, data, files=(), dirs=()): + self.__name = name + self.__data = data + self.__files = files + self.__dirs = dirs + self.__lines = None + + def __setup(self): + if self.__lines: + return + data = None + for dir in self.__dirs: + for filename in self.__files: + filename = os.path.join(dir, filename) + try: + fp = open(filename, "r") + data = fp.read() + fp.close() + break + except IOError: + pass + if data: + break + if not data: + data = self.__data + self.__lines = data.split("\n") + self.__linecnt = len(self.__lines) + + def __repr__(self): + self.__setup() + if len(self.__lines) <= self.MAXLINES: + return "\n".join(self.__lines) + else: + return "Type %s() to see the full %s text" % ((self.__name,) * 2) + + def __call__(self): + self.__setup() + prompt = "Hit Return for more, or q (and Return) to quit: " + lineno = 0 + while 1: + try: + for i in range(lineno, lineno + self.MAXLINES): + print(self.__lines[i]) + except IndexError: + break + else: + lineno += self.MAXLINES + key = None + while key is None: + try: + key = raw_input(prompt) + except NameError: + key = input(prompt) + if key not in ("", "q"): + key = None + if key == "q": + break + + +def setcopyright(): + """Set 'copyright' and 'credits' in __builtin__""" + builtins.copyright = _Printer("copyright", sys.copyright) + if _is_jython: + builtins.credits = _Printer("credits", "Jython is maintained by the Jython developers (www.jython.org).") + elif _is_pypy: + builtins.credits = _Printer("credits", "PyPy is maintained by the PyPy developers: http://pypy.org/") + else: + builtins.credits = _Printer( + "credits", + """\ + Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands + for supporting Python development. See www.python.org for more information.""", + ) + here = os.path.dirname(os.__file__) + builtins.license = _Printer( + "license", + "See https://www.python.org/psf/license/", + ["LICENSE.txt", "LICENSE"], + [os.path.join(here, os.pardir), here, os.curdir], + ) + + +class _Helper(object): + """Define the built-in 'help'. + This is a wrapper around pydoc.help (with a twist). + + """ + + def __repr__(self): + return "Type help() for interactive help, " "or help(object) for help about object." + + def __call__(self, *args, **kwds): + import pydoc + + return pydoc.help(*args, **kwds) + + +def sethelper(): + builtins.help = _Helper() + + +def aliasmbcs(): + """On Windows, some default encodings are not provided by Python, + while they are always available as "mbcs" in each locale. Make + them usable by aliasing to "mbcs" in such a case.""" + if sys.platform == "win32": + import locale, codecs + + enc = locale.getdefaultlocale()[1] + if enc.startswith("cp"): # "cp***" ? + try: + codecs.lookup(enc) + except LookupError: + import encodings + + encodings._cache[enc] = encodings._unknown + encodings.aliases.aliases[enc] = "mbcs" + + +def setencoding(): + """Set the string encoding used by the Unicode implementation. The + default is 'ascii', but if you're willing to experiment, you can + change this.""" + encoding = "ascii" # Default value set by _PyUnicode_Init() + if 0: + # Enable to support locale aware default string encodings. + import locale + + loc = locale.getdefaultlocale() + if loc[1]: + encoding = loc[1] + if 0: + # Enable to switch off string to Unicode coercion and implicit + # Unicode to string conversion. + encoding = "undefined" + if encoding != "ascii": + # On Non-Unicode builds this will raise an AttributeError... + sys.setdefaultencoding(encoding) # Needs Python Unicode build ! + + +def execsitecustomize(): + """Run custom site specific code, if available.""" + try: + import sitecustomize + except ImportError: + pass + + +def virtual_install_main_packages(): + f = open(os.path.join(os.path.dirname(__file__), "orig-prefix.txt")) + sys.real_prefix = f.read().strip() + f.close() + pos = 2 + hardcoded_relative_dirs = [] + if sys.path[0] == "": + pos += 1 + if _is_jython: + paths = [os.path.join(sys.real_prefix, "Lib")] + elif _is_pypy: + if sys.version_info > (3, 2): + cpyver = "%d" % sys.version_info[0] + elif sys.pypy_version_info >= (1, 5): + cpyver = "%d.%d" % sys.version_info[:2] + else: + cpyver = "%d.%d.%d" % sys.version_info[:3] + paths = [os.path.join(sys.real_prefix, "lib_pypy"), os.path.join(sys.real_prefix, "lib-python", cpyver)] + if sys.pypy_version_info < (1, 9): + paths.insert(1, os.path.join(sys.real_prefix, "lib-python", "modified-%s" % cpyver)) + hardcoded_relative_dirs = paths[:] # for the special 'darwin' case below + # + # This is hardcoded in the Python executable, but relative to sys.prefix: + for path in paths[:]: + plat_path = os.path.join(path, "plat-%s" % sys.platform) + if os.path.exists(plat_path): + paths.append(plat_path) + elif sys.platform == "win32": + paths = [os.path.join(sys.real_prefix, "Lib"), os.path.join(sys.real_prefix, "DLLs")] + else: + paths = [os.path.join(sys.real_prefix, "lib", "python" + sys.version[:3])] + hardcoded_relative_dirs = paths[:] # for the special 'darwin' case below + lib64_path = os.path.join(sys.real_prefix, "lib64", "python" + sys.version[:3]) + if os.path.exists(lib64_path): + if _is_64bit: + paths.insert(0, lib64_path) + else: + paths.append(lib64_path) + # This is hardcoded in the Python executable, but relative to + # sys.prefix. Debian change: we need to add the multiarch triplet + # here, which is where the real stuff lives. As per PEP 421, in + # Python 3.3+, this lives in sys.implementation, while in Python 2.7 + # it lives in sys. + try: + arch = getattr(sys, "implementation", sys)._multiarch + except AttributeError: + # This is a non-multiarch aware Python. Fallback to the old way. + arch = sys.platform + plat_path = os.path.join(sys.real_prefix, "lib", "python" + sys.version[:3], "plat-%s" % arch) + if os.path.exists(plat_path): + paths.append(plat_path) + # This is hardcoded in the Python executable, but + # relative to sys.prefix, so we have to fix up: + for path in list(paths): + tk_dir = os.path.join(path, "lib-tk") + if os.path.exists(tk_dir): + paths.append(tk_dir) + + # These are hardcoded in the Apple's Python executable, + # but relative to sys.prefix, so we have to fix them up: + if sys.platform == "darwin": + hardcoded_paths = [ + os.path.join(relative_dir, module) + for relative_dir in hardcoded_relative_dirs + for module in ("plat-darwin", "plat-mac", "plat-mac/lib-scriptpackages") + ] + + for path in hardcoded_paths: + if os.path.exists(path): + paths.append(path) + + sys.path.extend(paths) + + +def force_global_eggs_after_local_site_packages(): + """ + Force easy_installed eggs in the global environment to get placed + in sys.path after all packages inside the virtualenv. This + maintains the "least surprise" result that packages in the + virtualenv always mask global packages, never the other way + around. + + """ + egginsert = getattr(sys, "__egginsert", 0) + for i, path in enumerate(sys.path): + if i > egginsert and path.startswith(sys.prefix): + egginsert = i + sys.__egginsert = egginsert + 1 + + +def virtual_addsitepackages(known_paths): + force_global_eggs_after_local_site_packages() + return addsitepackages(known_paths, sys_prefix=sys.real_prefix) + + +def fixclasspath(): + """Adjust the special classpath sys.path entries for Jython. These + entries should follow the base virtualenv lib directories. + """ + paths = [] + classpaths = [] + for path in sys.path: + if path == "__classpath__" or path.startswith("__pyclasspath__"): + classpaths.append(path) + else: + paths.append(path) + sys.path = paths + sys.path.extend(classpaths) + + +def execusercustomize(): + """Run custom user specific code, if available.""" + try: + import usercustomize + except ImportError: + pass + + +def enablerlcompleter(): + """Enable default readline configuration on interactive prompts, by + registering a sys.__interactivehook__. + If the readline module can be imported, the hook will set the Tab key + as completion key and register ~/.python_history as history file. + This can be overridden in the sitecustomize or usercustomize module, + or in a PYTHONSTARTUP file. + """ + + def register_readline(): + import atexit + + try: + import readline + import rlcompleter + except ImportError: + return + + # Reading the initialization (config) file may not be enough to set a + # completion key, so we set one first and then read the file. + readline_doc = getattr(readline, "__doc__", "") + if readline_doc is not None and "libedit" in readline_doc: + readline.parse_and_bind("bind ^I rl_complete") + else: + readline.parse_and_bind("tab: complete") + + try: + readline.read_init_file() + except OSError: + # An OSError here could have many causes, but the most likely one + # is that there's no .inputrc file (or .editrc file in the case of + # Mac OS X + libedit) in the expected location. In that case, we + # want to ignore the exception. + pass + + if readline.get_current_history_length() == 0: + # If no history was loaded, default to .python_history. + # The guard is necessary to avoid doubling history size at + # each interpreter exit when readline was already configured + # through a PYTHONSTARTUP hook, see: + # http://bugs.python.org/issue5845#msg198636 + history = os.path.join(os.path.expanduser("~"), ".python_history") + try: + readline.read_history_file(history) + except OSError: + pass + + def write_history(): + try: + readline.write_history_file(history) + except (FileNotFoundError, PermissionError): + # home directory does not exist or is not writable + # https://bugs.python.org/issue19891 + pass + + atexit.register(write_history) + + sys.__interactivehook__ = register_readline + + +if _is_pypy: + + def import_builtin_stuff(): + """PyPy specific: some built-in modules should be pre-imported because + some programs expect them to be in sys.modules on startup. This is ported + from PyPy's site.py. + """ + import encodings + + if "exceptions" in sys.builtin_module_names: + import exceptions + + if "zipimport" in sys.builtin_module_names: + import zipimport + + +def main(): + global ENABLE_USER_SITE + virtual_install_main_packages() + if _is_pypy: + import_builtin_stuff() + abs__file__() + paths_in_sys = removeduppaths() + if os.name == "posix" and sys.path and os.path.basename(sys.path[-1]) == "Modules": + addbuilddir() + if _is_jython: + fixclasspath() + GLOBAL_SITE_PACKAGES = not os.path.exists(os.path.join(os.path.dirname(__file__), "no-global-site-packages.txt")) + if not GLOBAL_SITE_PACKAGES: + ENABLE_USER_SITE = False + if ENABLE_USER_SITE is None: + ENABLE_USER_SITE = check_enableusersite() + paths_in_sys = addsitepackages(paths_in_sys) + paths_in_sys = addusersitepackages(paths_in_sys) + if GLOBAL_SITE_PACKAGES: + paths_in_sys = virtual_addsitepackages(paths_in_sys) + if sys.platform == "os2emx": + setBEGINLIBPATH() + setquit() + setcopyright() + sethelper() + if sys.version_info[0] == 3: + enablerlcompleter() + aliasmbcs() + setencoding() + execsitecustomize() + if ENABLE_USER_SITE: + execusercustomize() + # Remove sys.setdefaultencoding() so that users cannot change the + # encoding after initialization. The test for presence is needed when + # this module is run as a script, because this code is executed twice. + if hasattr(sys, "setdefaultencoding"): + del sys.setdefaultencoding + + +main() + + +def _script(): + help = """\ + %s [--user-base] [--user-site] + + Without arguments print some useful information + With arguments print the value of USER_BASE and/or USER_SITE separated + by '%s'. + + Exit codes with --user-base or --user-site: + 0 - user site directory is enabled + 1 - user site directory is disabled by user + 2 - uses site directory is disabled by super user + or for security reasons + >2 - unknown error + """ + args = sys.argv[1:] + if not args: + print("sys.path = [") + for dir in sys.path: + print(" {!r},".format(dir)) + print("]") + + def exists(path): + if os.path.isdir(path): + return "exists" + else: + return "doesn't exist" + + print("USER_BASE: {!r} ({})".format(USER_BASE, exists(USER_BASE))) + print("USER_SITE: {!r} ({})".format(USER_SITE, exists(USER_SITE))) + print("ENABLE_USER_SITE: %r" % ENABLE_USER_SITE) + sys.exit(0) + + buffer = [] + if "--user-base" in args: + buffer.append(USER_BASE) + if "--user-site" in args: + buffer.append(USER_SITE) + + if buffer: + print(os.pathsep.join(buffer)) + if ENABLE_USER_SITE: + sys.exit(0) + elif ENABLE_USER_SITE is False: + sys.exit(1) + elif ENABLE_USER_SITE is None: + sys.exit(2) + else: + sys.exit(3) + else: + import textwrap + + print(textwrap.dedent(help % (sys.argv[0], os.pathsep))) + sys.exit(10) + + +if __name__ == "__main__": + _script() diff --git a/project/venv/lib/python2.7/site.pyc b/project/venv/lib/python2.7/site.pyc new file mode 100644 index 0000000..19cab5b Binary files /dev/null and b/project/venv/lib/python2.7/site.pyc differ diff --git a/project/venv/lib/python2.7/sre.py b/project/venv/lib/python2.7/sre.py new file mode 120000 index 0000000..27f81b9 --- /dev/null +++ b/project/venv/lib/python2.7/sre.py @@ -0,0 +1 @@ +/usr/lib/python2.7/sre.py \ No newline at end of file diff --git a/project/venv/lib/python2.7/sre_compile.py b/project/venv/lib/python2.7/sre_compile.py new file mode 120000 index 0000000..dce5da4 --- /dev/null +++ b/project/venv/lib/python2.7/sre_compile.py @@ -0,0 +1 @@ +/usr/lib/python2.7/sre_compile.py \ No newline at end of file diff --git a/project/venv/lib/python2.7/sre_compile.pyc b/project/venv/lib/python2.7/sre_compile.pyc new file mode 100644 index 0000000..281bfcd Binary files /dev/null and b/project/venv/lib/python2.7/sre_compile.pyc differ diff --git a/project/venv/lib/python2.7/sre_constants.py b/project/venv/lib/python2.7/sre_constants.py new file mode 120000 index 0000000..b9c9797 --- /dev/null +++ b/project/venv/lib/python2.7/sre_constants.py @@ -0,0 +1 @@ +/usr/lib/python2.7/sre_constants.py \ No newline at end of file diff --git a/project/venv/lib/python2.7/sre_constants.pyc b/project/venv/lib/python2.7/sre_constants.pyc new file mode 100644 index 0000000..d247ef2 Binary files /dev/null and b/project/venv/lib/python2.7/sre_constants.pyc differ diff --git a/project/venv/lib/python2.7/sre_parse.py b/project/venv/lib/python2.7/sre_parse.py new file mode 120000 index 0000000..f33a572 --- /dev/null +++ b/project/venv/lib/python2.7/sre_parse.py @@ -0,0 +1 @@ +/usr/lib/python2.7/sre_parse.py \ No newline at end of file diff --git a/project/venv/lib/python2.7/sre_parse.pyc b/project/venv/lib/python2.7/sre_parse.pyc new file mode 100644 index 0000000..d10a3f4 Binary files /dev/null and b/project/venv/lib/python2.7/sre_parse.pyc differ diff --git a/project/venv/lib/python2.7/stat.py b/project/venv/lib/python2.7/stat.py new file mode 120000 index 0000000..c1d654c --- /dev/null +++ b/project/venv/lib/python2.7/stat.py @@ -0,0 +1 @@ +/usr/lib/python2.7/stat.py \ No newline at end of file diff --git a/project/venv/lib/python2.7/stat.pyc b/project/venv/lib/python2.7/stat.pyc new file mode 100644 index 0000000..cb9397d Binary files /dev/null and b/project/venv/lib/python2.7/stat.pyc differ diff --git a/project/venv/lib/python2.7/types.py b/project/venv/lib/python2.7/types.py new file mode 120000 index 0000000..5546478 --- /dev/null +++ b/project/venv/lib/python2.7/types.py @@ -0,0 +1 @@ +/usr/lib/python2.7/types.py \ No newline at end of file diff --git a/project/venv/lib/python2.7/types.pyc b/project/venv/lib/python2.7/types.pyc new file mode 100644 index 0000000..6fe97a4 Binary files /dev/null and b/project/venv/lib/python2.7/types.pyc differ diff --git a/project/venv/lib/python2.7/warnings.py b/project/venv/lib/python2.7/warnings.py new file mode 120000 index 0000000..a9c4730 --- /dev/null +++ b/project/venv/lib/python2.7/warnings.py @@ -0,0 +1 @@ +/usr/lib/python2.7/warnings.py \ No newline at end of file diff --git a/project/venv/lib/python2.7/warnings.pyc b/project/venv/lib/python2.7/warnings.pyc new file mode 100644 index 0000000..0075747 Binary files /dev/null and b/project/venv/lib/python2.7/warnings.pyc differ diff --git a/project/venv/local/bin b/project/venv/local/bin new file mode 120000 index 0000000..3ab46e9 --- /dev/null +++ b/project/venv/local/bin @@ -0,0 +1 @@ +/home/angeliki/Documents/0918_PZI/0918_Grad/xpub_graduation/project/venv/bin \ No newline at end of file diff --git a/project/venv/local/include b/project/venv/local/include new file mode 120000 index 0000000..f7552ea --- /dev/null +++ b/project/venv/local/include @@ -0,0 +1 @@ +/home/angeliki/Documents/0918_PZI/0918_Grad/xpub_graduation/project/venv/include \ No newline at end of file diff --git a/project/venv/local/lib b/project/venv/local/lib new file mode 120000 index 0000000..f87cb6c --- /dev/null +++ b/project/venv/local/lib @@ -0,0 +1 @@ +/home/angeliki/Documents/0918_PZI/0918_Grad/xpub_graduation/project/venv/lib \ No newline at end of file diff --git a/sound-.wav b/sound-.wav new file mode 100644 index 0000000..d1fb0c1 Binary files /dev/null and b/sound-.wav differ diff --git a/sound-monitor.wav b/sound-monitor.wav new file mode 100644 index 0000000..5e3aa27 Binary files /dev/null and b/sound-monitor.wav differ